[
  {
    "path": "L2TermComputation3.py",
    "content": "#make sure to set env vars first: \n#export PYTHONPATH=/home/lsmith/software/caffe-5-1-17/python/:$PYTHONPATH\n\nimport sys\nimport subprocess\nimport os\nimport math\nimport numpy as np\n\nos.environ['GLOG_minloglevel'] = '2' \n#must be done before importing caffe; prevents tons of screen output during net load\n\nimport caffe\ncaffe.set_mode_gpu()\ncaffe.set_device(0)\n\nnetProto = '/home/lsmith/software/caffe-5-1-17/examples/largeLR/architectures/Resnet56Cifar.prototxt'\npresnapshot = 'examples/largeLR/snapshots/cifar20kResnet56CLR3SS5k_iter_'\npostsnapshot = '.caffemodel'\n\nmovingAvg = 0.1\nalpha = 0.1\ninc = 1\n#for i in range(10, 9990, 10):\nfor i in range(inc, 298, inc):\n\n  net1Snapshot = presnapshot + `i` + postsnapshot\n  net2Snapshot = presnapshot + `i+inc` + postsnapshot\n  net3Snapshot = presnapshot + `i+2*inc` + postsnapshot\n\n  if i < 5000:\n    LR = 0.1 + 2.9*i/5000\n  else:\n    LR = 3.0 - 2.9*(i-5000)/5000   \n\n\n  #print('Loading net 1.')\n  sys.stdout.flush()\n  net1 = caffe.Net(netProto, net1Snapshot, caffe.TEST)\n  #print('Loading net 2.')\n  sys.stdout.flush()\n  net2 = caffe.Net(netProto, net2Snapshot, caffe.TEST)\n  #print('Loading net 3.')\n  sys.stdout.flush()\n  net3 = caffe.Net(netProto, net3Snapshot, caffe.TEST)\n\n  topTerm = 0\n  for param in net2.params: #over each layer\n\tfor paramLayer in range(0, len(net2.params[param])): #over each parameter in layer (weight, bias, etc)\n  \t\ttopTerm += np.sum(map(lambda x: abs(x), net2.params[param][paramLayer].data - net1.params[param][paramLayer].data))\n\n  botTerm = 0\n  for param in net1.params: #over each layer\n\tfor paramLayer in range(0, len(net1.params[param])): #over each parameter in layer (weight, bias, etc)\n\t\tbotTerm += np.sum(map(lambda x: abs(x), 2*net2.params[param][paramLayer].data - net1.params[param][paramLayer].data - net3.params[param][paramLayer].data))\n\n  movingAvg = (1-alpha)*movingAvg + LR*alpha*topTerm/botTerm\n#  print(math.sqrt(topTerm/botTerm)/10 )\n  print(movingAvg, LR, topTerm, botTerm)\n\n\n"
  },
  {
    "path": "L2TermComputation4.py",
    "content": "#make sure to set env vars first: \n#export PYTHONPATH=/home/lsmith/software/caffe-5-1-17/python/:$PYTHONPATH\n\nimport sys\nimport subprocess\nimport os\nimport math\nimport numpy as np\n\nos.environ['GLOG_minloglevel'] = '2' \n#must be done before importing caffe; prevents tons of screen output during net load\n\nimport caffe\ncaffe.set_mode_gpu()\ncaffe.set_device(0)\n\nnetProto = '/home/lsmith/software/caffe-5-1-17/examples/largeLR/architectures/Resnet56Cifar.prototxt'\npresnapshot = 'examples/largeLR/snapshots/lr1/lr1Resnet56_iter_'\npostsnapshot = '.caffemodel'\n\nmovingAvg = 0.1\nalpha = 0.1\nLR = 0.35\ninc = 10\nfor i in range(inc, 9980, inc):\n\n\n  net1Snapshot = presnapshot + `i` + postsnapshot\n  net2Snapshot = presnapshot + `i+inc` + postsnapshot\n  net3Snapshot = presnapshot + `i+2*inc` + postsnapshot\n\n  #print('Loading net 1.')\n  sys.stdout.flush()\n  net1 = caffe.Net(netProto, net1Snapshot, caffe.TEST)\n  #print('Loading net 2.')\n  sys.stdout.flush()\n  net2 = caffe.Net(netProto, net2Snapshot, caffe.TEST)\n  #print('Loading net 3.')\n  sys.stdout.flush()\n  net3 = caffe.Net(netProto, net3Snapshot, caffe.TEST)\n\n\n  topTerm = 0\n  for param in net2.params: #over each layer\n\tfor paramLayer in range(0, len(net2.params[param])): #over each parameter in layer (weight, bias, etc)\n  \t\ttopTerm += np.sum(map(lambda x: abs(x), net2.params[param][paramLayer].data - net1.params[param][paramLayer].data))\n\n  botTerm = 0\n  for param in net1.params: #over each layer\n\tfor paramLayer in range(0, len(net1.params[param])): #over each parameter in layer (weight, bias, etc)\n\t\tbotTerm += np.sum(map(lambda x: abs(x), 2*net2.params[param][paramLayer].data - net1.params[param][paramLayer].data - net3.params[param][paramLayer].data))\n\n  movingAvg = (1-alpha)*movingAvg + LR*alpha*topTerm/botTerm\n#  print(math.sqrt(topTerm/botTerm)/10 )\n  print(movingAvg, LR, topTerm, botTerm)\n\n\n"
  },
  {
    "path": "README.md",
    "content": "# super-convergence\n\nHere are the Caffe files of our recent work: \nSmith, Leslie N. and Nicholay Topin \"Super-Convergence: Very Fast Training of Residual Networks Using Large Learning Rates\"  arXiv preprint arXiv:1708.07120 (2017).  Please read the paper for details. In addition, see the paper \"Cyclical Learning Rates for Training Neural Networks\" at https://arxiv.org/pdf/1506.01186.pdf for instructions on implementing cyclical learning rates in Caffe.\n\nNote: I just uploaded the outputs from my own replication to the new results folder.  These outputs should be helpful references for replicating the figures in the paper.\n\n\nInstructions:\n\nTo simplify the replication of the figures in the paper, a shell script x.sh is included, which we used to replicate our experiments and create the figures in the paper.  This execution script shows the changes to each file needed for each run.  Below we spell out these changes.\n\nFrom caffe home directory:\n./build/tools/caffe train --solver=$SOLVER -gpu=all\n\n\nAs provided, this solver file trains the CLR network from Figure 1a. Changes must be made to reproduce other experiments, as listed below. \n\n\tFig. 1a:\n\t\tLR=0.35:\n\t\t$SOLVER should be the provided \"solver.prototxt\". \t\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\ttest_iter: 200\n\t\t\ttest_interval: 100\n\t\t\tdisplay: 100\n\t\t\tlr_policy: \"multistep\"\n\t\t\tstepvalue: 50000\n\t\t\tstepvalue: 70000\n\t\t\tbase_lr: 0.35\n\t\t\tgamma: 0.1\n\t\t\tmax_iter: 80000\n\t\t\tweight_decay: 1e-4\n\t\t\tmomentum: 0.9\n\n\t\tCLR=0.1-3.0:\n\t\t$SOLVER should be the provided \"clrsolver.prototxt\". \n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0.1\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 5000\n\t\t\tmax_iter: 10000\n\t\t\tweight_decay: 1e-4\n\t\t\tmomentum: 0.9\n\n\tFig. 1b:\n\t$SOLVER should be the provided \"clrsolver.prototxt\". \n\t\tStepsize=10k:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0.1\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 10000\n\t\t\tmax_iter: 20000\n\t\tStepsize=5k:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0.1\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 5000\n\t\t\tmax_iter: 10000\n\t\tStepsize=3k:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0.1\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 3000\n\t\t\tmax_iter: 6000\n\t\tStepsize=1k:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0.1\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 1000\n\t\t\tmax_iter: 2000\n\t\t\t\n\tFig. 2a:\n\t\tFigured reproduced from Smith [2017] with permission.\n\n\tFig. 2b:\n\t$SOLVER should be the provided \"lrRangeSolver.prototxt\". \n\t\tMax Iter=5k:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 5000\n\t\t\tmax_iter: 5000\n\t\tMax Iter=20k:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 20000\n\t\t\tmax_iter: 20000\n\t\tMax Iter=100k:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 100000\n\t\t\tmax_iter: 100000\n\n\tFig. 3a:\n\t\tFigure reproduced from Goodfellow et al. [2014] with permission.\n\t\t\n\tFig. 3b:\n\t\tFigure reproduced from Goodfellow et al. [2014] with permission.\n\n\tFig. 4a:\n\t$SOLVER should be the provided \"lrRangeSolver.prototxt\". \n\t\tSingle network:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 1.5\n\t\t\tstepsize: 20000\n\t\t\tmax_iter: 20000\n\n\tFig. 4b:\n\t\tResnet-20:\n\t\t\tnet: \".../Resnet20Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 1.5\n\t\t\tstepsize: 20000\n\t\t\tmax_iter: 20000\n\t\tResnet-110:\n\t\t\tnet: \".../Resnet110Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 1.5\n\t\t\tstepsize: 20000\n\t\t\tmax_iter: 20000\n\n\tFig. 5a:\n\t\tNeed to write out snapshots every iteration for 300 iterations\n\t\tModify L2TermComputation3.py:\n\t\t\tnetProto = \n\t\t\tpresnapshot = \n\t\t\tpostsnapshot = \n\n\n\tFig. 5b:\n\t\tNeed to write out snapshots every 10th iteration for 10000 iterations\n\t\tModify L2TermComputation4.py:\n\t\t\tnetProto = \n\t\t\tpresnapshot = \n\t\t\tpostsnapshot = \n\n\tFig. 6a:\n\t\tSame solver settings as Fig. 1a. \n\t\tTraining LMDB (or other source) listed within architecture must be re-made with fewer samples.\n\n\tFig. 6b:\n\t\t$SOLVER should be the provided \"solver.prototxt\". \t\n\t\tResnet-110 LR=0.35:\n\t\t\tnet: \".../Resnet110Cifar.prototxt\"\n\t\t\tlr_policy: \"multistep\"\n\t\t\tstepvalue: 50000\n\t\t\tstepvalue: 70000\n\t\t\tbase_lr: 0.35\n\t\t\tgamma: 0.1\n\t\t\tmax_iter: 80000\n\t\t$SOLVER should be the provided \"clrsolver.prototxt\". \n\t\tResnet-110 CLR=0.1-3 SS=10k:\n\t\t\tnet: \".../Resnet110Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0.1\n\t\t\tmax_lr: 3\n\t\t\tstepsize: 10000\n\t\t\tmax_iter: 20000\n\t\t$SOLVER should be the provided \"solver.prototxt\". \t\n\t\tResnet-20 LR=0.35:\n\t\t\tnet: \".../Resnet20Cifar.prototxt\"\n\t\t\tlr_policy: \"multistep\"\n\t\t\tstepvalue: 50000\n\t\t\tstepvalue: 70000\n\t\t\tbase_lr: 0.35\n\t\t\tgamma: 0.1\n\t\t\tmax_iter: 80000\n\t\t$SOLVER should be the provided \"clrsolver.prototxt\". \n\t\tResnet-20 CLR=0.1-3 SS=10k:\n\t\t\tnet: \".../Resnet20Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0.1\n\t\t\tmax_lr: 3\n\t\t\tstepsize: 10000\n\t\t\tmax_iter: 20000\n\n\tFig. 7a:\n\tdataset used by network must be changed to CIFAR-100\n\t$SOLVER should be the provided \"lrRangeSolver.prototxt\". \n\t\tSingle network:\n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 20000\n\t\t\tmax_iter: 20000\n\n\tFig. 7b:\n\tdataset used by network must be changed to CIFAR-100\n\t\tLR=0.35:\n\t\t$SOLVER should be the provided \"solver.prototxt\". \n\t\t\tnet: \".../Resnet56Cifar.prototxt\" \n\t\t\tlr_policy: \"multistep\"\n\t\t\tstepvalue: 50000\n\t\t\tstepvalue: 70000\n\t\t\tbase_lr: 0.35\n\t\t\tgamma: 0.1\n\t\t\tmax_iter: 80000\n\t\tCLR=0.1-3 SS=5k:\n\t\t$SOLVER should be the provided \"clrsolver.prototxt\". \n\t\t\tnet: \".../Resnet56Cifar.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0.1\n\t\t\tmax_lr: 3.0\n\t\t\tstepsize: 5000\n\t\t\tmax_iter: 10000\n\t\t\n\tFig. 8a:\n\t$SOLVER should be the provided \"solver.prototxt\". \n\t\tLR=0.35:\n\t\tAll use solver settings from LR=0.35 in Fig. 1a, but with solver type changed.\n\t\t\ttype: \"Nesterov\"\n\t\t\ttype: \"AdaDelta\"\n\t\t\ttype: \"AdaGrad\"  and remove momentum\n\t\t\ttype: \"Adam\"     and base_lr:  0.0035\n\n\tFig. 8b:\n\t\tLR=0.35:\n\t\t$SOLVER should be the provided \"solver.prototxt\". \n\t\tSame solver settings as Fig. 9a, but with:\n\t\t\ttype: \"Nesterov\"\n\t\tCLR=0.1-3 SS=5k:\n\t\t$SOLVER should be the provided \"clrsolver.prototxt\". \n\t\tSame solver settings as Fig. 1a, but with:\n\t\t\ttype: \"Nesterov\"\n\n\tFig. 9a:\n\t\tSame solver settings as CLR=0.1-3.0 in Fig. 1a, but with batchSize changed within architecture.\n\t\t\n\tFig. 9b:\n\t\tSame solver settings as CLR=0.1-3.0 in Fig. 1a, but with dropout ratio changed within architecture.\n\n\tFig. 10a:\n\t\tSame solver settings as CLR=0.1-3.0 in Fig. 1a, but with momentum changed.\n\t\t\n\tFig. 10b:\n\t\tSame solver settings as CLR=0.1-3.0 in Fig. 1a, but with weight_decay changed.\n\t\t\n\tFig. 11a:\n\t$SOLVER should be the provided \"clrsolver.prototxt\". \n\t\tSingle network:\n\t\t\tnet: \".../bottleneckResnet56.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 0.61\n\t\t\tstepsize: 50000\n\t\t\tmax_iter: 50000\n\n\tFig. 11b:\n\t$SOLVER should be the provided \"clrsolver.prototxt\". \n\t\tSingle network:\n\t\t\tnet: \".../ResNeXt56.prototxt\"\n\t\t\tlr_policy: \"triangular\"\n\t\t\tbase_lr: 0\n\t\t\tmax_lr: 0.7\n\t\t\tstepsize: 50000\n\t\t\tmax_iter: 50000\n\t\t\n"
  },
  {
    "path": "Results/clr3SS5kCifar100Fig8",
    "content": "I0817 16:12:34.704705 17316 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:12:34.707029 17316 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:12:34.708253 17316 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:12:34.709466 17316 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:12:34.710683 17316 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:12:34.712116 17316 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:12:34.713343 17316 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:12:34.714570 17316 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:12:34.715796 17316 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:12:35.130897 17316 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kCifar100Fig8\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0817 16:12:35.135243 17316 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:12:35.152686 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:35.152760 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:35.153841 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:12:35.153901 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:12:35.153923 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:12:35.153944 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:12:35.153962 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:12:35.153980 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:12:35.153997 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:12:35.154016 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:12:35.154036 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:12:35.154053 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:12:35.154083 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:12:35.154099 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:12:35.154119 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:12:35.154136 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:12:35.154156 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:12:35.154175 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:12:35.154192 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:12:35.154211 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:12:35.154229 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:12:35.154248 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:12:35.154279 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:12:35.154299 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:12:35.154322 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:12:35.154342 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:12:35.154361 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:12:35.154376 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:12:35.154394 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:12:35.154410 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:12:35.154428 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:12:35.154448 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:12:35.154467 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:12:35.154485 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:12:35.154503 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:12:35.154520 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:12:35.154538 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:12:35.154556 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:12:35.154574 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:12:35.154592 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:12:35.154609 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:12:35.154626 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:12:35.154650 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:12:35.154667 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:12:35.154685 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:12:35.154701 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:12:35.154721 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:12:35.154739 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:12:35.154757 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:12:35.154773 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:12:35.154790 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:12:35.154808 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:12:35.154824 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:12:35.154850 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:12:35.154870 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:12:35.154888 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:12:35.154906 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:12:35.154922 17316 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:12:35.156669 17316 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar100/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar100/cifar100_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b\nI0817 16:12:35.158743 17316 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:12:35.160488 17316 net.cpp:100] Creating Layer dataLayer\nI0817 16:12:35.160590 17316 net.cpp:408] dataLayer -> data_top\nI0817 16:12:35.160792 17316 net.cpp:408] dataLayer -> label\nI0817 16:12:35.160910 17316 data_transformer.cpp:25] Loading mean file from: examples/cifar100/mean.binaryproto\nI0817 16:12:35.174528 17321 db_lmdb.cpp:35] Opened lmdb examples/cifar100/cifar100_train_lmdb\nI0817 16:12:35.194567 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:35.201597 17316 net.cpp:150] Setting up dataLayer\nI0817 16:12:35.201661 17316 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:12:35.201673 17316 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:35.201679 17316 net.cpp:165] Memory required for data: 1536500\nI0817 16:12:35.201694 17316 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:12:35.201707 17316 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:12:35.201715 17316 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:12:35.201736 17316 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:12:35.201752 17316 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:12:35.201823 17316 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:12:35.201838 17316 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:35.201843 17316 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:35.201848 17316 net.cpp:165] Memory required for data: 1537500\nI0817 16:12:35.201853 17316 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:12:35.201918 17316 net.cpp:100] Creating Layer pre_conv\nI0817 16:12:35.201931 17316 net.cpp:434] pre_conv <- data_top\nI0817 16:12:35.201941 17316 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:12:35.203752 17316 net.cpp:150] Setting up pre_conv\nI0817 16:12:35.203773 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.203778 17316 net.cpp:165] Memory required for data: 9729500\nI0817 16:12:35.203843 17316 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:12:35.203913 17316 net.cpp:100] Creating Layer pre_bn\nI0817 16:12:35.203927 17316 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:12:35.203939 17316 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:12:35.204074 17322 blocking_queue.cpp:50] Waiting for data\nI0817 16:12:35.204270 17316 net.cpp:150] Setting up pre_bn\nI0817 16:12:35.204288 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.204294 17316 net.cpp:165] Memory required for data: 17921500\nI0817 16:12:35.204311 17316 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:12:35.204358 17316 net.cpp:100] Creating Layer pre_scale\nI0817 16:12:35.204367 17316 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:12:35.204388 17316 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:12:35.204557 17316 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:12:35.204816 17316 net.cpp:150] Setting up pre_scale\nI0817 16:12:35.204835 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.204840 17316 net.cpp:165] Memory required for data: 26113500\nI0817 16:12:35.204851 17316 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:12:35.204895 17316 net.cpp:100] Creating Layer pre_relu\nI0817 16:12:35.204903 17316 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:12:35.204911 17316 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:12:35.204922 17316 net.cpp:150] Setting up pre_relu\nI0817 16:12:35.204929 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.204934 17316 net.cpp:165] Memory required for data: 34305500\nI0817 16:12:35.204939 17316 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:12:35.204953 17316 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:12:35.204959 17316 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:12:35.204967 17316 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:12:35.204977 17316 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:12:35.205027 17316 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:12:35.205039 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.205046 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.205051 17316 net.cpp:165] Memory required for data: 50689500\nI0817 16:12:35.205056 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:12:35.205075 17316 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:12:35.205082 17316 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:12:35.205094 17316 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:12:35.205415 17316 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:12:35.205430 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.205435 17316 net.cpp:165] Memory required for data: 58881500\nI0817 16:12:35.205448 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:12:35.205459 17316 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:12:35.205466 17316 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:12:35.205476 17316 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:12:35.205713 17316 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:12:35.205726 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.205731 17316 net.cpp:165] Memory required for data: 67073500\nI0817 16:12:35.205741 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:12:35.205754 17316 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:12:35.205760 17316 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:12:35.205768 17316 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.205818 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:12:35.205956 17316 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:12:35.205970 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.205974 17316 net.cpp:165] Memory required for data: 75265500\nI0817 16:12:35.205983 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:12:35.206002 17316 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:12:35.206009 17316 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:12:35.206017 17316 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.206027 17316 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:12:35.206032 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.206037 17316 net.cpp:165] Memory required for data: 83457500\nI0817 16:12:35.206043 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:12:35.206058 17316 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:12:35.206073 17316 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:12:35.206094 17316 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:12:35.206413 17316 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:12:35.206428 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.206434 17316 net.cpp:165] Memory required for data: 91649500\nI0817 16:12:35.206442 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:12:35.206451 17316 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:12:35.206457 17316 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:12:35.206465 17316 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:12:35.206703 17316 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:12:35.206717 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.206722 17316 net.cpp:165] Memory required for data: 99841500\nI0817 16:12:35.206739 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:12:35.206748 17316 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:12:35.206758 17316 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:12:35.206765 17316 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:12:35.206820 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:12:35.206957 17316 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:12:35.206970 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.206975 17316 net.cpp:165] Memory required for data: 108033500\nI0817 16:12:35.206984 17316 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:12:35.207039 17316 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:12:35.207051 17316 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:12:35.207059 17316 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:12:35.207083 17316 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:12:35.207156 17316 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:12:35.207171 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.207176 17316 net.cpp:165] Memory required for data: 116225500\nI0817 16:12:35.207182 17316 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:12:35.207195 17316 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:12:35.207201 17316 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:12:35.207208 17316 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:12:35.207217 17316 net.cpp:150] Setting up L1_b1_relu\nI0817 16:12:35.207224 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.207229 17316 net.cpp:165] Memory required for data: 124417500\nI0817 16:12:35.207234 17316 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:35.207243 17316 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:35.207249 17316 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:12:35.207257 17316 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:12:35.207265 17316 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:12:35.207310 17316 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:35.207322 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.207329 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.207340 17316 net.cpp:165] Memory required for data: 140801500\nI0817 16:12:35.207346 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:12:35.207361 17316 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:12:35.207367 17316 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:12:35.207376 17316 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:12:35.207682 17316 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:12:35.207697 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.207702 17316 net.cpp:165] Memory required for data: 148993500\nI0817 16:12:35.207711 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:12:35.207726 17316 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:12:35.207731 17316 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:12:35.207741 17316 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:12:35.207978 17316 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:12:35.207990 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.207995 17316 net.cpp:165] Memory required for data: 157185500\nI0817 16:12:35.208005 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:12:35.208014 17316 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:12:35.208020 17316 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:12:35.208027 17316 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.208092 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:12:35.208230 17316 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:12:35.208242 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.208248 17316 net.cpp:165] Memory required for data: 165377500\nI0817 16:12:35.208257 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:12:35.208266 17316 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:12:35.208271 17316 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:12:35.208283 17316 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.208293 17316 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:12:35.208300 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.208305 17316 net.cpp:165] Memory required for data: 173569500\nI0817 16:12:35.208310 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:12:35.208323 17316 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:12:35.208329 17316 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:12:35.208340 17316 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:12:35.208643 17316 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:12:35.208657 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.208662 17316 net.cpp:165] Memory required for data: 181761500\nI0817 16:12:35.208670 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:12:35.208683 17316 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:12:35.208688 17316 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:12:35.208698 17316 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:12:35.208932 17316 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:12:35.208945 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.208950 17316 net.cpp:165] Memory required for data: 189953500\nI0817 16:12:35.208966 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:12:35.208978 17316 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:12:35.208984 17316 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:12:35.208992 17316 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:12:35.209043 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:12:35.209194 17316 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:12:35.209208 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.209213 17316 net.cpp:165] Memory required for data: 198145500\nI0817 16:12:35.209223 17316 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:12:35.209239 17316 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:12:35.209245 17316 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:12:35.209252 17316 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:12:35.209262 17316 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:12:35.209295 17316 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:12:35.209306 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.209311 17316 net.cpp:165] Memory required for data: 206337500\nI0817 16:12:35.209316 17316 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:12:35.209323 17316 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:12:35.209328 17316 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:12:35.209336 17316 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:12:35.209344 17316 net.cpp:150] Setting up L1_b2_relu\nI0817 16:12:35.209352 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.209355 17316 net.cpp:165] Memory required for data: 214529500\nI0817 16:12:35.209360 17316 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:35.209372 17316 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:35.209378 17316 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:12:35.209385 17316 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:12:35.209395 17316 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:12:35.209435 17316 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:35.209450 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.209456 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.209461 17316 net.cpp:165] Memory required for data: 230913500\nI0817 16:12:35.209466 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:12:35.209477 17316 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:12:35.209483 17316 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:12:35.209492 17316 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:12:35.209794 17316 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:12:35.209807 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.209812 17316 net.cpp:165] Memory required for data: 239105500\nI0817 16:12:35.209821 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:12:35.209833 17316 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:12:35.209839 17316 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:12:35.209847 17316 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:12:35.210093 17316 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:12:35.210108 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.210113 17316 net.cpp:165] Memory required for data: 247297500\nI0817 16:12:35.210122 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:12:35.210134 17316 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:12:35.210140 17316 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:12:35.210149 17316 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.210199 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:12:35.210338 17316 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:12:35.210351 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.210356 17316 net.cpp:165] Memory required for data: 255489500\nI0817 16:12:35.210364 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:12:35.210372 17316 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:12:35.210378 17316 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:12:35.210388 17316 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.210398 17316 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:12:35.210412 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.210417 17316 net.cpp:165] Memory required for data: 263681500\nI0817 16:12:35.210422 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:12:35.210436 17316 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:12:35.210443 17316 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:12:35.210450 17316 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:12:35.210757 17316 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:12:35.210770 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.210775 17316 net.cpp:165] Memory required for data: 271873500\nI0817 16:12:35.210783 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:12:35.210798 17316 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:12:35.210804 17316 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:12:35.210813 17316 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:12:35.211047 17316 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:12:35.211061 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.211081 17316 net.cpp:165] Memory required for data: 280065500\nI0817 16:12:35.211091 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:12:35.211103 17316 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:12:35.211109 17316 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:12:35.211117 17316 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:12:35.211172 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:12:35.211308 17316 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:12:35.211320 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.211325 17316 net.cpp:165] Memory required for data: 288257500\nI0817 16:12:35.211334 17316 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:12:35.211344 17316 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:12:35.211349 17316 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:12:35.211356 17316 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:12:35.211367 17316 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:12:35.211400 17316 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:12:35.211410 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.211414 17316 net.cpp:165] Memory required for data: 296449500\nI0817 16:12:35.211419 17316 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:12:35.211426 17316 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:12:35.211432 17316 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:12:35.211439 17316 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:12:35.211447 17316 net.cpp:150] Setting up L1_b3_relu\nI0817 16:12:35.211454 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.211459 17316 net.cpp:165] Memory required for data: 304641500\nI0817 16:12:35.211463 17316 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:35.211479 17316 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:35.211485 17316 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:12:35.211493 17316 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:12:35.211501 17316 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:12:35.211546 17316 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:35.211558 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.211565 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.211570 17316 net.cpp:165] Memory required for data: 321025500\nI0817 16:12:35.211575 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:12:35.211585 17316 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:12:35.211592 17316 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:12:35.211611 17316 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:12:35.211920 17316 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:12:35.211935 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.211940 17316 net.cpp:165] Memory required for data: 329217500\nI0817 16:12:35.211948 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:12:35.211958 17316 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:12:35.211964 17316 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:12:35.211971 17316 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:12:35.212218 17316 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:12:35.212231 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.212236 17316 net.cpp:165] Memory required for data: 337409500\nI0817 16:12:35.212247 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:12:35.212260 17316 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:12:35.212265 17316 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:12:35.212273 17316 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.212332 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:12:35.212471 17316 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:12:35.212484 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.212489 17316 net.cpp:165] Memory required for data: 345601500\nI0817 16:12:35.212498 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:12:35.212507 17316 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:12:35.212512 17316 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:12:35.212522 17316 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.212532 17316 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:12:35.212538 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.212543 17316 net.cpp:165] Memory required for data: 353793500\nI0817 16:12:35.212548 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:12:35.212563 17316 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:12:35.212568 17316 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:12:35.212576 17316 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:12:35.212882 17316 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:12:35.212895 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.212900 17316 net.cpp:165] Memory required for data: 361985500\nI0817 16:12:35.212909 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:12:35.212920 17316 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:12:35.212927 17316 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:12:35.212935 17316 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:12:35.213181 17316 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:12:35.213196 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.213201 17316 net.cpp:165] Memory required for data: 370177500\nI0817 16:12:35.213212 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:12:35.213223 17316 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:12:35.213229 17316 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:12:35.213239 17316 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:12:35.213292 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:12:35.213434 17316 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:12:35.213448 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.213452 17316 net.cpp:165] Memory required for data: 378369500\nI0817 16:12:35.213461 17316 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:12:35.213472 17316 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:12:35.213479 17316 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:12:35.213485 17316 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:12:35.213493 17316 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:12:35.213536 17316 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:12:35.213546 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.213551 17316 net.cpp:165] Memory required for data: 386561500\nI0817 16:12:35.213555 17316 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:12:35.213563 17316 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:12:35.213569 17316 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:12:35.213579 17316 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:12:35.213588 17316 net.cpp:150] Setting up L1_b4_relu\nI0817 16:12:35.213595 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.213600 17316 net.cpp:165] Memory required for data: 394753500\nI0817 16:12:35.213604 17316 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:35.213613 17316 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:35.213618 17316 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:12:35.213624 17316 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:12:35.213634 17316 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:12:35.213682 17316 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:35.213695 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.213701 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.213706 17316 net.cpp:165] Memory required for data: 411137500\nI0817 16:12:35.213711 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:12:35.213721 17316 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:12:35.213727 17316 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:12:35.213739 17316 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:12:35.214046 17316 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:12:35.214058 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.214069 17316 net.cpp:165] Memory required for data: 419329500\nI0817 16:12:35.214092 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:12:35.214105 17316 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:12:35.214112 17316 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:12:35.214119 17316 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:12:35.214359 17316 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:12:35.214371 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.214376 17316 net.cpp:165] Memory required for data: 427521500\nI0817 16:12:35.214387 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:12:35.214399 17316 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:12:35.214406 17316 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:12:35.214413 17316 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.214465 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:12:35.214603 17316 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:12:35.214615 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.214620 17316 net.cpp:165] Memory required for data: 435713500\nI0817 16:12:35.214629 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:12:35.214637 17316 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:12:35.214643 17316 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:12:35.214653 17316 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.214663 17316 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:12:35.214670 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.214675 17316 net.cpp:165] Memory required for data: 443905500\nI0817 16:12:35.214680 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:12:35.214694 17316 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:12:35.214700 17316 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:12:35.214718 17316 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:12:35.215028 17316 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:12:35.215041 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.215046 17316 net.cpp:165] Memory required for data: 452097500\nI0817 16:12:35.215055 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:12:35.215070 17316 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:12:35.215077 17316 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:12:35.215085 17316 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:12:35.215330 17316 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:12:35.215343 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.215348 17316 net.cpp:165] Memory required for data: 460289500\nI0817 16:12:35.215358 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:12:35.215370 17316 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:12:35.215376 17316 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:12:35.215384 17316 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:12:35.215440 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:12:35.215579 17316 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:12:35.215591 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.215596 17316 net.cpp:165] Memory required for data: 468481500\nI0817 16:12:35.215605 17316 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:12:35.215615 17316 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:12:35.215620 17316 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:12:35.215627 17316 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:12:35.215638 17316 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:12:35.215668 17316 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:12:35.215680 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.215685 17316 net.cpp:165] Memory required for data: 476673500\nI0817 16:12:35.215690 17316 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:12:35.215698 17316 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:12:35.215704 17316 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:12:35.215711 17316 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:12:35.215720 17316 net.cpp:150] Setting up L1_b5_relu\nI0817 16:12:35.215728 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.215731 17316 net.cpp:165] Memory required for data: 484865500\nI0817 16:12:35.215736 17316 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:35.215746 17316 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:35.215751 17316 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:12:35.215759 17316 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:12:35.215768 17316 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:12:35.215813 17316 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:35.215824 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.215831 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.215837 17316 net.cpp:165] Memory required for data: 501249500\nI0817 16:12:35.215842 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:12:35.215852 17316 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:12:35.215858 17316 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:12:35.215870 17316 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:12:35.216186 17316 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:12:35.216199 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.216204 17316 net.cpp:165] Memory required for data: 509441500\nI0817 16:12:35.216220 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:12:35.216229 17316 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:12:35.216235 17316 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:12:35.216243 17316 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:12:35.216482 17316 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:12:35.216495 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.216500 17316 net.cpp:165] Memory required for data: 517633500\nI0817 16:12:35.216511 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:12:35.216523 17316 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:12:35.216529 17316 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:12:35.216536 17316 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.216591 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:12:35.216737 17316 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:12:35.216750 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.216755 17316 net.cpp:165] Memory required for data: 525825500\nI0817 16:12:35.216764 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:12:35.216773 17316 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:12:35.216778 17316 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:12:35.216789 17316 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.216799 17316 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:12:35.216805 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.216810 17316 net.cpp:165] Memory required for data: 534017500\nI0817 16:12:35.216814 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:12:35.216830 17316 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:12:35.216835 17316 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:12:35.216843 17316 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:12:35.217161 17316 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:12:35.217175 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.217180 17316 net.cpp:165] Memory required for data: 542209500\nI0817 16:12:35.217190 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:12:35.217201 17316 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:12:35.217208 17316 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:12:35.217216 17316 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:12:35.217453 17316 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:12:35.217465 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.217470 17316 net.cpp:165] Memory required for data: 550401500\nI0817 16:12:35.217480 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:12:35.217489 17316 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:12:35.217495 17316 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:12:35.217505 17316 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:12:35.217558 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:12:35.217700 17316 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:12:35.217711 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.217716 17316 net.cpp:165] Memory required for data: 558593500\nI0817 16:12:35.217725 17316 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:12:35.217742 17316 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:12:35.217748 17316 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:12:35.217756 17316 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:12:35.217767 17316 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:12:35.217797 17316 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:12:35.217808 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.217813 17316 net.cpp:165] Memory required for data: 566785500\nI0817 16:12:35.217820 17316 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:12:35.217840 17316 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:12:35.217846 17316 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:12:35.217854 17316 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:12:35.217862 17316 net.cpp:150] Setting up L1_b6_relu\nI0817 16:12:35.217870 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.217875 17316 net.cpp:165] Memory required for data: 574977500\nI0817 16:12:35.217880 17316 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:35.217886 17316 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:35.217891 17316 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:12:35.217900 17316 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:12:35.217908 17316 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:12:35.217954 17316 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:35.217967 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.217973 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.217978 17316 net.cpp:165] Memory required for data: 591361500\nI0817 16:12:35.217983 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:12:35.217998 17316 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:12:35.218003 17316 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:12:35.218013 17316 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:12:35.218333 17316 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:12:35.218348 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.218353 17316 net.cpp:165] Memory required for data: 599553500\nI0817 16:12:35.218361 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:12:35.218377 17316 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:12:35.218384 17316 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:12:35.218394 17316 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:12:35.218631 17316 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:12:35.218647 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.218652 17316 net.cpp:165] Memory required for data: 607745500\nI0817 16:12:35.218662 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:12:35.218672 17316 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:12:35.218677 17316 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:12:35.218685 17316 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.218737 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:12:35.218876 17316 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:12:35.218888 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.218894 17316 net.cpp:165] Memory required for data: 615937500\nI0817 16:12:35.218902 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:12:35.218914 17316 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:12:35.218919 17316 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:12:35.218930 17316 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.218940 17316 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:12:35.218947 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.218952 17316 net.cpp:165] Memory required for data: 624129500\nI0817 16:12:35.218957 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:12:35.218967 17316 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:12:35.218973 17316 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:12:35.218984 17316 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:12:35.219296 17316 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:12:35.219310 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.219316 17316 net.cpp:165] Memory required for data: 632321500\nI0817 16:12:35.219332 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:12:35.219341 17316 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:12:35.219347 17316 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:12:35.219358 17316 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:12:35.219599 17316 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:12:35.219612 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.219617 17316 net.cpp:165] Memory required for data: 640513500\nI0817 16:12:35.219627 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:12:35.219640 17316 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:12:35.219645 17316 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:12:35.219653 17316 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:12:35.219707 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:12:35.219846 17316 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:12:35.219859 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.219864 17316 net.cpp:165] Memory required for data: 648705500\nI0817 16:12:35.219873 17316 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:12:35.219887 17316 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:12:35.219894 17316 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:12:35.219902 17316 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:12:35.219909 17316 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:12:35.219943 17316 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:12:35.219954 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.219959 17316 net.cpp:165] Memory required for data: 656897500\nI0817 16:12:35.219964 17316 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:12:35.219971 17316 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:12:35.219977 17316 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:12:35.219987 17316 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:12:35.219996 17316 net.cpp:150] Setting up L1_b7_relu\nI0817 16:12:35.220003 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.220008 17316 net.cpp:165] Memory required for data: 665089500\nI0817 16:12:35.220013 17316 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:35.220021 17316 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:35.220026 17316 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:12:35.220033 17316 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:12:35.220042 17316 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:12:35.220094 17316 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:35.220106 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.220113 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.220118 17316 net.cpp:165] Memory required for data: 681473500\nI0817 16:12:35.220124 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:12:35.220134 17316 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:12:35.220141 17316 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:12:35.220154 17316 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:12:35.220463 17316 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:12:35.220477 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.220482 17316 net.cpp:165] Memory required for data: 689665500\nI0817 16:12:35.220491 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:12:35.220501 17316 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:12:35.220507 17316 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:12:35.220520 17316 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:12:35.220767 17316 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:12:35.220783 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.220789 17316 net.cpp:165] Memory required for data: 697857500\nI0817 16:12:35.220799 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:12:35.220808 17316 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:12:35.220814 17316 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:12:35.220821 17316 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.220873 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:12:35.221019 17316 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:12:35.221031 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.221036 17316 net.cpp:165] Memory required for data: 706049500\nI0817 16:12:35.221045 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:12:35.221056 17316 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:12:35.221062 17316 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:12:35.221076 17316 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.221086 17316 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:12:35.221093 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.221097 17316 net.cpp:165] Memory required for data: 714241500\nI0817 16:12:35.221102 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:12:35.221117 17316 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:12:35.221122 17316 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:12:35.221133 17316 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:12:35.221448 17316 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:12:35.221462 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.221467 17316 net.cpp:165] Memory required for data: 722433500\nI0817 16:12:35.221475 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:12:35.221484 17316 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:12:35.221490 17316 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:12:35.221501 17316 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:12:35.221747 17316 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:12:35.221760 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.221765 17316 net.cpp:165] Memory required for data: 730625500\nI0817 16:12:35.221776 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:12:35.221788 17316 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:12:35.221794 17316 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:12:35.221802 17316 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:12:35.221854 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:12:35.221993 17316 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:12:35.222007 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.222012 17316 net.cpp:165] Memory required for data: 738817500\nI0817 16:12:35.222020 17316 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:12:35.222033 17316 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:12:35.222038 17316 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:12:35.222045 17316 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:12:35.222054 17316 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:12:35.222095 17316 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:12:35.222107 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.222112 17316 net.cpp:165] Memory required for data: 747009500\nI0817 16:12:35.222117 17316 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:12:35.222126 17316 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:12:35.222131 17316 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:12:35.222141 17316 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:12:35.222151 17316 net.cpp:150] Setting up L1_b8_relu\nI0817 16:12:35.222157 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.222168 17316 net.cpp:165] Memory required for data: 755201500\nI0817 16:12:35.222174 17316 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:35.222182 17316 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:35.222187 17316 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:12:35.222198 17316 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:12:35.222211 17316 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:12:35.222280 17316 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:35.222306 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.222316 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.222326 17316 net.cpp:165] Memory required for data: 771585500\nI0817 16:12:35.222334 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:12:35.222352 17316 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:12:35.222363 17316 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:12:35.222383 17316 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:12:35.222721 17316 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:12:35.222738 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.222743 17316 net.cpp:165] Memory required for data: 779777500\nI0817 16:12:35.222753 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:12:35.222764 17316 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:12:35.222771 17316 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:12:35.222782 17316 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:12:35.223024 17316 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:12:35.223037 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.223042 17316 net.cpp:165] Memory required for data: 787969500\nI0817 16:12:35.223052 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:12:35.223062 17316 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:12:35.223075 17316 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:12:35.223083 17316 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.223140 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:12:35.223284 17316 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:12:35.223296 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.223301 17316 net.cpp:165] Memory required for data: 796161500\nI0817 16:12:35.223310 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:12:35.223321 17316 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:12:35.223327 17316 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:12:35.223335 17316 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.223345 17316 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:12:35.223351 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.223356 17316 net.cpp:165] Memory required for data: 804353500\nI0817 16:12:35.223361 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:12:35.223376 17316 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:12:35.223381 17316 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:12:35.223392 17316 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:12:35.223713 17316 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:12:35.223727 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.223732 17316 net.cpp:165] Memory required for data: 812545500\nI0817 16:12:35.223742 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:12:35.223753 17316 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:12:35.223760 17316 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:12:35.223768 17316 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:12:35.224021 17316 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:12:35.224035 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.224040 17316 net.cpp:165] Memory required for data: 820737500\nI0817 16:12:35.224077 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:12:35.224089 17316 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:12:35.224097 17316 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:12:35.224113 17316 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:12:35.224166 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:12:35.224304 17316 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:12:35.224318 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.224323 17316 net.cpp:165] Memory required for data: 828929500\nI0817 16:12:35.224331 17316 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:12:35.224344 17316 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:12:35.224350 17316 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:12:35.224357 17316 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:12:35.224365 17316 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:12:35.224395 17316 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:12:35.224406 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.224409 17316 net.cpp:165] Memory required for data: 837121500\nI0817 16:12:35.224414 17316 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:12:35.224426 17316 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:12:35.224431 17316 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:12:35.224438 17316 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:12:35.224447 17316 net.cpp:150] Setting up L1_b9_relu\nI0817 16:12:35.224454 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.224458 17316 net.cpp:165] Memory required for data: 845313500\nI0817 16:12:35.224463 17316 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:35.224475 17316 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:35.224480 17316 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:12:35.224488 17316 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:12:35.224498 17316 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:12:35.224542 17316 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:35.224553 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.224560 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.224565 17316 net.cpp:165] Memory required for data: 861697500\nI0817 16:12:35.224570 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:12:35.224584 17316 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:12:35.224591 17316 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:12:35.224601 17316 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:12:35.224913 17316 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:12:35.224926 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.224931 17316 net.cpp:165] Memory required for data: 863745500\nI0817 16:12:35.224941 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:12:35.224952 17316 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:12:35.224959 17316 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:12:35.224967 17316 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:12:35.225210 17316 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:12:35.225230 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.225235 17316 net.cpp:165] Memory required for data: 865793500\nI0817 16:12:35.225246 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:12:35.225255 17316 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:12:35.225268 17316 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:12:35.225277 17316 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.225330 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:12:35.225476 17316 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:12:35.225489 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.225494 17316 net.cpp:165] Memory required for data: 867841500\nI0817 16:12:35.225503 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:12:35.225512 17316 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:12:35.225517 17316 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:12:35.225528 17316 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.225538 17316 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:12:35.225544 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.225549 17316 net.cpp:165] Memory required for data: 869889500\nI0817 16:12:35.225554 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:12:35.225567 17316 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:12:35.225574 17316 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:12:35.225582 17316 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:12:35.225896 17316 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:12:35.225909 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.225914 17316 net.cpp:165] Memory required for data: 871937500\nI0817 16:12:35.225924 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:12:35.225935 17316 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:12:35.225942 17316 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:12:35.225950 17316 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:12:35.226199 17316 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:12:35.226213 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.226218 17316 net.cpp:165] Memory required for data: 873985500\nI0817 16:12:35.226228 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:12:35.226240 17316 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:12:35.226246 17316 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:12:35.226254 17316 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:12:35.226310 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:12:35.226454 17316 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:12:35.226466 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.226471 17316 net.cpp:165] Memory required for data: 876033500\nI0817 16:12:35.226480 17316 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:12:35.226495 17316 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:12:35.226502 17316 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:12:35.226513 17316 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:12:35.226598 17316 net.cpp:150] Setting up L2_b1_pool\nI0817 16:12:35.226613 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.226617 17316 net.cpp:165] Memory required for data: 878081500\nI0817 16:12:35.226624 17316 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:12:35.226636 17316 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:12:35.226644 17316 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:12:35.226650 17316 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:12:35.226657 17316 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:12:35.226691 17316 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:12:35.226699 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.226704 17316 net.cpp:165] Memory required for data: 880129500\nI0817 16:12:35.226709 17316 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:12:35.226717 17316 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:12:35.226723 17316 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:12:35.226733 17316 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:12:35.226750 17316 net.cpp:150] Setting up L2_b1_relu\nI0817 16:12:35.226758 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.226763 17316 net.cpp:165] Memory required for data: 882177500\nI0817 16:12:35.226768 17316 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:12:35.226814 17316 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:12:35.226826 17316 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:12:35.229197 17316 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:12:35.229216 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.229223 17316 net.cpp:165] Memory required for data: 884225500\nI0817 16:12:35.229228 17316 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:12:35.229238 17316 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:12:35.229243 17316 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:12:35.229251 17316 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:12:35.229262 17316 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:12:35.229341 17316 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:12:35.229356 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.229360 17316 net.cpp:165] Memory required for data: 888321500\nI0817 16:12:35.229367 17316 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:35.229374 17316 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:35.229382 17316 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:12:35.229391 17316 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:12:35.229403 17316 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:12:35.229450 17316 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:35.229461 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.229468 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.229473 17316 net.cpp:165] Memory required for data: 896513500\nI0817 16:12:35.229478 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:12:35.229493 17316 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:12:35.229501 17316 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:12:35.229511 17316 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:12:35.230980 17316 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:12:35.230998 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.231003 17316 net.cpp:165] Memory required for data: 900609500\nI0817 16:12:35.231012 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:12:35.231025 17316 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:12:35.231032 17316 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:12:35.231043 17316 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:12:35.231297 17316 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:12:35.231310 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.231315 17316 net.cpp:165] Memory required for data: 904705500\nI0817 16:12:35.231326 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:12:35.231335 17316 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:12:35.231343 17316 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:12:35.231350 17316 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.231406 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:12:35.231583 17316 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:12:35.231597 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.231602 17316 net.cpp:165] Memory required for data: 908801500\nI0817 16:12:35.231611 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:12:35.231619 17316 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:12:35.231626 17316 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:12:35.231637 17316 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.231654 17316 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:12:35.231662 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.231667 17316 net.cpp:165] Memory required for data: 912897500\nI0817 16:12:35.231672 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:12:35.231688 17316 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:12:35.231694 17316 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:12:35.231703 17316 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:12:35.232172 17316 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:12:35.232185 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.232190 17316 net.cpp:165] Memory required for data: 916993500\nI0817 16:12:35.232199 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:12:35.232213 17316 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:12:35.232219 17316 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:12:35.232228 17316 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:12:35.232473 17316 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:12:35.232489 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.232494 17316 net.cpp:165] Memory required for data: 921089500\nI0817 16:12:35.232506 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:12:35.232514 17316 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:12:35.232520 17316 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:12:35.232527 17316 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:12:35.232583 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:12:35.232728 17316 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:12:35.232741 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.232746 17316 net.cpp:165] Memory required for data: 925185500\nI0817 16:12:35.232755 17316 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:12:35.232764 17316 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:12:35.232770 17316 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:12:35.232777 17316 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:12:35.232789 17316 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:12:35.232815 17316 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:12:35.232825 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.232828 17316 net.cpp:165] Memory required for data: 929281500\nI0817 16:12:35.232834 17316 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:12:35.232842 17316 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:12:35.232848 17316 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:12:35.232858 17316 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:12:35.232868 17316 net.cpp:150] Setting up L2_b2_relu\nI0817 16:12:35.232875 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.232879 17316 net.cpp:165] Memory required for data: 933377500\nI0817 16:12:35.232884 17316 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:35.232892 17316 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:35.232897 17316 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:12:35.232904 17316 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:12:35.232913 17316 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:12:35.232961 17316 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:35.232972 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.232980 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.232985 17316 net.cpp:165] Memory required for data: 941569500\nI0817 16:12:35.232990 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:12:35.233011 17316 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:12:35.233018 17316 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:12:35.233027 17316 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:12:35.233494 17316 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:12:35.233508 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.233513 17316 net.cpp:165] Memory required for data: 945665500\nI0817 16:12:35.233522 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:12:35.233534 17316 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:12:35.233541 17316 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:12:35.233549 17316 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:12:35.233791 17316 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:12:35.233808 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.233813 17316 net.cpp:165] Memory required for data: 949761500\nI0817 16:12:35.233824 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:12:35.233832 17316 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:12:35.233839 17316 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:12:35.233845 17316 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.233901 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:12:35.234048 17316 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:12:35.234061 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.234072 17316 net.cpp:165] Memory required for data: 953857500\nI0817 16:12:35.234081 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:12:35.234096 17316 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:12:35.234102 17316 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:12:35.234112 17316 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.234122 17316 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:12:35.234128 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.234133 17316 net.cpp:165] Memory required for data: 957953500\nI0817 16:12:35.234138 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:12:35.234153 17316 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:12:35.234158 17316 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:12:35.234166 17316 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:12:35.234619 17316 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:12:35.234633 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.234638 17316 net.cpp:165] Memory required for data: 962049500\nI0817 16:12:35.234647 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:12:35.234657 17316 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:12:35.234663 17316 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:12:35.234673 17316 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:12:35.234920 17316 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:12:35.234933 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.234938 17316 net.cpp:165] Memory required for data: 966145500\nI0817 16:12:35.234948 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:12:35.234962 17316 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:12:35.234969 17316 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:12:35.234977 17316 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:12:35.235031 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:12:35.235184 17316 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:12:35.235198 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.235203 17316 net.cpp:165] Memory required for data: 970241500\nI0817 16:12:35.235213 17316 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:12:35.235224 17316 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:12:35.235230 17316 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:12:35.235237 17316 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:12:35.235255 17316 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:12:35.235285 17316 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:12:35.235293 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.235298 17316 net.cpp:165] Memory required for data: 974337500\nI0817 16:12:35.235303 17316 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:12:35.235324 17316 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:12:35.235330 17316 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:12:35.235337 17316 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:12:35.235347 17316 net.cpp:150] Setting up L2_b3_relu\nI0817 16:12:35.235354 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.235358 17316 net.cpp:165] Memory required for data: 978433500\nI0817 16:12:35.235364 17316 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:35.235374 17316 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:35.235380 17316 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:12:35.235388 17316 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:12:35.235396 17316 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:12:35.235445 17316 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:35.235456 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.235463 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.235467 17316 net.cpp:165] Memory required for data: 986625500\nI0817 16:12:35.235473 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:12:35.235484 17316 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:12:35.235491 17316 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:12:35.235502 17316 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:12:35.235962 17316 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:12:35.235976 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.235981 17316 net.cpp:165] Memory required for data: 990721500\nI0817 16:12:35.235991 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:12:35.235999 17316 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:12:35.236006 17316 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:12:35.236016 17316 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:12:35.236269 17316 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:12:35.236284 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.236289 17316 net.cpp:165] Memory required for data: 994817500\nI0817 16:12:35.236299 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:12:35.236310 17316 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:12:35.236317 17316 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:12:35.236325 17316 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.236380 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:12:35.236528 17316 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:12:35.236541 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.236546 17316 net.cpp:165] Memory required for data: 998913500\nI0817 16:12:35.236555 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:12:35.236567 17316 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:12:35.236572 17316 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:12:35.236580 17316 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.236589 17316 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:12:35.236619 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.236625 17316 net.cpp:165] Memory required for data: 1003009500\nI0817 16:12:35.236631 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:12:35.236654 17316 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:12:35.236660 17316 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:12:35.236671 17316 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:12:35.237134 17316 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:12:35.237149 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.237154 17316 net.cpp:165] Memory required for data: 1007105500\nI0817 16:12:35.237164 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:12:35.237172 17316 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:12:35.237179 17316 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:12:35.237186 17316 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:12:35.237431 17316 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:12:35.237443 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.237448 17316 net.cpp:165] Memory required for data: 1011201500\nI0817 16:12:35.237458 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:12:35.237468 17316 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:12:35.237473 17316 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:12:35.237483 17316 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:12:35.237538 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:12:35.237684 17316 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:12:35.237697 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.237702 17316 net.cpp:165] Memory required for data: 1015297500\nI0817 16:12:35.237711 17316 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:12:35.237720 17316 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:12:35.237726 17316 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:12:35.237732 17316 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:12:35.237743 17316 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:12:35.237771 17316 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:12:35.237782 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.237787 17316 net.cpp:165] Memory required for data: 1019393500\nI0817 16:12:35.237793 17316 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:12:35.237800 17316 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:12:35.237807 17316 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:12:35.237813 17316 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:12:35.237823 17316 net.cpp:150] Setting up L2_b4_relu\nI0817 16:12:35.237829 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.237833 17316 net.cpp:165] Memory required for data: 1023489500\nI0817 16:12:35.237838 17316 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:35.237848 17316 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:35.237854 17316 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:12:35.237861 17316 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:12:35.237871 17316 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:12:35.237915 17316 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:35.237929 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.237936 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.237941 17316 net.cpp:165] Memory required for data: 1031681500\nI0817 16:12:35.237946 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:12:35.237957 17316 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:12:35.237963 17316 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:12:35.237972 17316 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:12:35.238443 17316 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:12:35.238469 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.238476 17316 net.cpp:165] Memory required for data: 1035777500\nI0817 16:12:35.238484 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:12:35.238493 17316 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:12:35.238500 17316 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:12:35.238512 17316 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:12:35.238757 17316 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:12:35.238770 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.238775 17316 net.cpp:165] Memory required for data: 1039873500\nI0817 16:12:35.238785 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:12:35.238797 17316 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:12:35.238804 17316 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:12:35.238812 17316 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.238864 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:12:35.239012 17316 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:12:35.239024 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.239029 17316 net.cpp:165] Memory required for data: 1043969500\nI0817 16:12:35.239038 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:12:35.239047 17316 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:12:35.239053 17316 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:12:35.239068 17316 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.239079 17316 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:12:35.239086 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.239091 17316 net.cpp:165] Memory required for data: 1048065500\nI0817 16:12:35.239096 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:12:35.239110 17316 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:12:35.239117 17316 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:12:35.239125 17316 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:12:35.239581 17316 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:12:35.239595 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.239600 17316 net.cpp:165] Memory required for data: 1052161500\nI0817 16:12:35.239609 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:12:35.239621 17316 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:12:35.239629 17316 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:12:35.239636 17316 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:12:35.239878 17316 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:12:35.239892 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.239897 17316 net.cpp:165] Memory required for data: 1056257500\nI0817 16:12:35.239907 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:12:35.239915 17316 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:12:35.239922 17316 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:12:35.239933 17316 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:12:35.239986 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:12:35.240146 17316 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:12:35.240159 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.240164 17316 net.cpp:165] Memory required for data: 1060353500\nI0817 16:12:35.240173 17316 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:12:35.240182 17316 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:12:35.240188 17316 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:12:35.240195 17316 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:12:35.240206 17316 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:12:35.240233 17316 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:12:35.240242 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.240253 17316 net.cpp:165] Memory required for data: 1064449500\nI0817 16:12:35.240258 17316 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:12:35.240269 17316 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:12:35.240276 17316 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:12:35.240283 17316 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:12:35.240293 17316 net.cpp:150] Setting up L2_b5_relu\nI0817 16:12:35.240299 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.240303 17316 net.cpp:165] Memory required for data: 1068545500\nI0817 16:12:35.240309 17316 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:35.240319 17316 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:35.240324 17316 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:12:35.240332 17316 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:12:35.240341 17316 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:12:35.240386 17316 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:35.240401 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.240407 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.240412 17316 net.cpp:165] Memory required for data: 1076737500\nI0817 16:12:35.240417 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:12:35.240428 17316 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:12:35.240435 17316 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:12:35.240444 17316 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:12:35.240906 17316 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:12:35.240921 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.240926 17316 net.cpp:165] Memory required for data: 1080833500\nI0817 16:12:35.240934 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:12:35.240947 17316 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:12:35.240952 17316 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:12:35.240960 17316 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:12:35.241216 17316 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:12:35.241230 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.241235 17316 net.cpp:165] Memory required for data: 1084929500\nI0817 16:12:35.241245 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:12:35.241253 17316 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:12:35.241259 17316 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:12:35.241271 17316 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.241325 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:12:35.241472 17316 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:12:35.241484 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.241489 17316 net.cpp:165] Memory required for data: 1089025500\nI0817 16:12:35.241498 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:12:35.241506 17316 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:12:35.241513 17316 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:12:35.241523 17316 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.241533 17316 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:12:35.241539 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.241544 17316 net.cpp:165] Memory required for data: 1093121500\nI0817 16:12:35.241549 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:12:35.241562 17316 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:12:35.241569 17316 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:12:35.241577 17316 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:12:35.242039 17316 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:12:35.242059 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.242303 17316 net.cpp:165] Memory required for data: 1097217500\nI0817 16:12:35.242316 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:12:35.242328 17316 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:12:35.242336 17316 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:12:35.242343 17316 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:12:35.242590 17316 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:12:35.242604 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.242609 17316 net.cpp:165] Memory required for data: 1101313500\nI0817 16:12:35.242619 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:12:35.242628 17316 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:12:35.242635 17316 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:12:35.242645 17316 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:12:35.242700 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:12:35.242847 17316 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:12:35.242861 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.242866 17316 net.cpp:165] Memory required for data: 1105409500\nI0817 16:12:35.242874 17316 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:12:35.242883 17316 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:12:35.242889 17316 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:12:35.242897 17316 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:12:35.242908 17316 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:12:35.242934 17316 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:12:35.242944 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.242947 17316 net.cpp:165] Memory required for data: 1109505500\nI0817 16:12:35.242954 17316 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:12:35.242964 17316 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:12:35.242970 17316 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:12:35.242977 17316 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:12:35.242987 17316 net.cpp:150] Setting up L2_b6_relu\nI0817 16:12:35.242993 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.242998 17316 net.cpp:165] Memory required for data: 1113601500\nI0817 16:12:35.243003 17316 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:35.243010 17316 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:35.243016 17316 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:12:35.243026 17316 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:12:35.243036 17316 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:12:35.243085 17316 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:35.243100 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.243108 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.243113 17316 net.cpp:165] Memory required for data: 1121793500\nI0817 16:12:35.243118 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:12:35.243129 17316 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:12:35.243136 17316 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:12:35.243145 17316 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:12:35.243608 17316 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:12:35.243623 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.243628 17316 net.cpp:165] Memory required for data: 1125889500\nI0817 16:12:35.243636 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:12:35.243649 17316 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:12:35.243661 17316 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:12:35.243670 17316 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:12:35.243924 17316 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:12:35.243937 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.243942 17316 net.cpp:165] Memory required for data: 1129985500\nI0817 16:12:35.243952 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:12:35.243962 17316 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:12:35.243968 17316 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:12:35.243978 17316 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.244035 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:12:35.244189 17316 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:12:35.244202 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.244207 17316 net.cpp:165] Memory required for data: 1134081500\nI0817 16:12:35.244216 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:12:35.244225 17316 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:12:35.244231 17316 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:12:35.244243 17316 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.244253 17316 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:12:35.244261 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.244266 17316 net.cpp:165] Memory required for data: 1138177500\nI0817 16:12:35.244271 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:12:35.244284 17316 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:12:35.244290 17316 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:12:35.244299 17316 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:12:35.244765 17316 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:12:35.244779 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.244784 17316 net.cpp:165] Memory required for data: 1142273500\nI0817 16:12:35.244793 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:12:35.244805 17316 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:12:35.244812 17316 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:12:35.244820 17316 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:12:35.245074 17316 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:12:35.245086 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.245091 17316 net.cpp:165] Memory required for data: 1146369500\nI0817 16:12:35.245102 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:12:35.245111 17316 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:12:35.245117 17316 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:12:35.245126 17316 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:12:35.245185 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:12:35.245334 17316 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:12:35.245349 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.245354 17316 net.cpp:165] Memory required for data: 1150465500\nI0817 16:12:35.245363 17316 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:12:35.245373 17316 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:12:35.245379 17316 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:12:35.245386 17316 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:12:35.245393 17316 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:12:35.245424 17316 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:12:35.245434 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.245437 17316 net.cpp:165] Memory required for data: 1154561500\nI0817 16:12:35.245442 17316 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:12:35.245450 17316 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:12:35.245456 17316 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:12:35.245473 17316 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:12:35.245483 17316 net.cpp:150] Setting up L2_b7_relu\nI0817 16:12:35.245491 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.245496 17316 net.cpp:165] Memory required for data: 1158657500\nI0817 16:12:35.245501 17316 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:35.245507 17316 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:35.245512 17316 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:12:35.245522 17316 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:12:35.245532 17316 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:12:35.245578 17316 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:35.245589 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.245595 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.245600 17316 net.cpp:165] Memory required for data: 1166849500\nI0817 16:12:35.245605 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:12:35.245620 17316 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:12:35.245626 17316 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:12:35.245635 17316 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:12:35.246114 17316 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:12:35.246129 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.246134 17316 net.cpp:165] Memory required for data: 1170945500\nI0817 16:12:35.246143 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:12:35.246155 17316 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:12:35.246161 17316 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:12:35.246170 17316 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:12:35.246421 17316 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:12:35.246434 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.246439 17316 net.cpp:165] Memory required for data: 1175041500\nI0817 16:12:35.246449 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:12:35.246459 17316 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:12:35.246464 17316 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:12:35.246474 17316 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.246531 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:12:35.246686 17316 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:12:35.246700 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.246704 17316 net.cpp:165] Memory required for data: 1179137500\nI0817 16:12:35.246713 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:12:35.246721 17316 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:12:35.246727 17316 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:12:35.246734 17316 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.246747 17316 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:12:35.246754 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.246759 17316 net.cpp:165] Memory required for data: 1183233500\nI0817 16:12:35.246764 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:12:35.246775 17316 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:12:35.246781 17316 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:12:35.246793 17316 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:12:35.247268 17316 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:12:35.247283 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.247288 17316 net.cpp:165] Memory required for data: 1187329500\nI0817 16:12:35.247298 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:12:35.247309 17316 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:12:35.247323 17316 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:12:35.247335 17316 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:12:35.247584 17316 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:12:35.247597 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.247602 17316 net.cpp:165] Memory required for data: 1191425500\nI0817 16:12:35.247613 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:12:35.247622 17316 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:12:35.247627 17316 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:12:35.247635 17316 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:12:35.247694 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:12:35.247839 17316 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:12:35.247854 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.247859 17316 net.cpp:165] Memory required for data: 1195521500\nI0817 16:12:35.247869 17316 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:12:35.247877 17316 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:12:35.247884 17316 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:12:35.247890 17316 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:12:35.247897 17316 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:12:35.247927 17316 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:12:35.247936 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.247941 17316 net.cpp:165] Memory required for data: 1199617500\nI0817 16:12:35.247946 17316 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:12:35.247954 17316 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:12:35.247959 17316 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:12:35.247969 17316 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:12:35.247979 17316 net.cpp:150] Setting up L2_b8_relu\nI0817 16:12:35.247987 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.247992 17316 net.cpp:165] Memory required for data: 1203713500\nI0817 16:12:35.247995 17316 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:35.248003 17316 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:35.248008 17316 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:12:35.248018 17316 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:12:35.248041 17316 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:12:35.248097 17316 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:35.248111 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.248116 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.248121 17316 net.cpp:165] Memory required for data: 1211905500\nI0817 16:12:35.248126 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:12:35.248142 17316 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:12:35.248148 17316 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:12:35.248162 17316 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:12:35.248625 17316 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:12:35.248639 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.248644 17316 net.cpp:165] Memory required for data: 1216001500\nI0817 16:12:35.248653 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:12:35.248667 17316 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:12:35.248674 17316 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:12:35.248685 17316 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:12:35.248930 17316 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:12:35.248944 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.248949 17316 net.cpp:165] Memory required for data: 1220097500\nI0817 16:12:35.248966 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:12:35.248975 17316 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:12:35.248981 17316 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:12:35.248989 17316 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.249047 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:12:35.249202 17316 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:12:35.249215 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.249220 17316 net.cpp:165] Memory required for data: 1224193500\nI0817 16:12:35.249229 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:12:35.249240 17316 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:12:35.249246 17316 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:12:35.249254 17316 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.249264 17316 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:12:35.249270 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.249275 17316 net.cpp:165] Memory required for data: 1228289500\nI0817 16:12:35.249280 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:12:35.249294 17316 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:12:35.249300 17316 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:12:35.249312 17316 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:12:35.249770 17316 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:12:35.249784 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.249789 17316 net.cpp:165] Memory required for data: 1232385500\nI0817 16:12:35.249799 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:12:35.249809 17316 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:12:35.249816 17316 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:12:35.249827 17316 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:12:35.250082 17316 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:12:35.250097 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.250102 17316 net.cpp:165] Memory required for data: 1236481500\nI0817 16:12:35.250143 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:12:35.250159 17316 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:12:35.250166 17316 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:12:35.250174 17316 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:12:35.250229 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:12:35.250399 17316 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:12:35.250411 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.250416 17316 net.cpp:165] Memory required for data: 1240577500\nI0817 16:12:35.250425 17316 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:12:35.250434 17316 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:12:35.250442 17316 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:12:35.250448 17316 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:12:35.250460 17316 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:12:35.250488 17316 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:12:35.250501 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.250506 17316 net.cpp:165] Memory required for data: 1244673500\nI0817 16:12:35.250511 17316 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:12:35.250519 17316 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:12:35.250525 17316 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:12:35.250532 17316 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:12:35.250541 17316 net.cpp:150] Setting up L2_b9_relu\nI0817 16:12:35.250548 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.250552 17316 net.cpp:165] Memory required for data: 1248769500\nI0817 16:12:35.250564 17316 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:35.250576 17316 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:35.250581 17316 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:12:35.250589 17316 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:12:35.250598 17316 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:12:35.250650 17316 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:35.250663 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.250669 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.250674 17316 net.cpp:165] Memory required for data: 1256961500\nI0817 16:12:35.250679 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:12:35.250690 17316 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:12:35.250697 17316 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:12:35.250710 17316 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:12:35.251185 17316 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:12:35.251200 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.251205 17316 net.cpp:165] Memory required for data: 1257985500\nI0817 16:12:35.251214 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:12:35.251224 17316 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:12:35.251230 17316 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:12:35.251240 17316 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:12:35.251502 17316 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:12:35.251518 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.251523 17316 net.cpp:165] Memory required for data: 1259009500\nI0817 16:12:35.251533 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:12:35.251543 17316 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:12:35.251549 17316 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:12:35.251556 17316 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.251610 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:12:35.251763 17316 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:12:35.251776 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.251781 17316 net.cpp:165] Memory required for data: 1260033500\nI0817 16:12:35.251791 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:12:35.251797 17316 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:12:35.251803 17316 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:12:35.251814 17316 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.251824 17316 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:12:35.251832 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.251835 17316 net.cpp:165] Memory required for data: 1261057500\nI0817 16:12:35.251840 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:12:35.251857 17316 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:12:35.251863 17316 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:12:35.251873 17316 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:12:35.252353 17316 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:12:35.252367 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.252372 17316 net.cpp:165] Memory required for data: 1262081500\nI0817 16:12:35.252382 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:12:35.252395 17316 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:12:35.252401 17316 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:12:35.252413 17316 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:12:35.252666 17316 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:12:35.252679 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.252684 17316 net.cpp:165] Memory required for data: 1263105500\nI0817 16:12:35.252701 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:12:35.252710 17316 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:12:35.252717 17316 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:12:35.252727 17316 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:12:35.252784 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:12:35.252936 17316 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:12:35.252949 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.252954 17316 net.cpp:165] Memory required for data: 1264129500\nI0817 16:12:35.252964 17316 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:12:35.252972 17316 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:12:35.252979 17316 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:12:35.252990 17316 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:12:35.253027 17316 net.cpp:150] Setting up L3_b1_pool\nI0817 16:12:35.253037 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.253041 17316 net.cpp:165] Memory required for data: 1265153500\nI0817 16:12:35.253047 17316 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:12:35.253056 17316 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:12:35.253062 17316 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:12:35.253074 17316 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:12:35.253085 17316 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:12:35.253118 17316 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:12:35.253129 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.253132 17316 net.cpp:165] Memory required for data: 1266177500\nI0817 16:12:35.253137 17316 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:12:35.253145 17316 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:12:35.253151 17316 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:12:35.253159 17316 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:12:35.253167 17316 net.cpp:150] Setting up L3_b1_relu\nI0817 16:12:35.253175 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.253178 17316 net.cpp:165] Memory required for data: 1267201500\nI0817 16:12:35.253183 17316 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:12:35.253196 17316 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:12:35.253203 17316 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:12:35.254449 17316 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:12:35.254467 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.254473 17316 net.cpp:165] Memory required for data: 1268225500\nI0817 16:12:35.254479 17316 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:12:35.254488 17316 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:12:35.254494 17316 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:12:35.254503 17316 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:12:35.254513 17316 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:12:35.254552 17316 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:12:35.254566 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.254572 17316 net.cpp:165] Memory required for data: 1270273500\nI0817 16:12:35.254577 17316 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:35.254585 17316 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:35.254591 17316 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:12:35.254598 17316 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:12:35.254608 17316 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:12:35.254658 17316 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:35.254669 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.254676 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.254688 17316 net.cpp:165] Memory required for data: 1274369500\nI0817 16:12:35.254694 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:12:35.254709 17316 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:12:35.254716 17316 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:12:35.254725 17316 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:12:35.256726 17316 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:12:35.256742 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.256747 17316 net.cpp:165] Memory required for data: 1276417500\nI0817 16:12:35.256757 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:12:35.256767 17316 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:12:35.256774 17316 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:12:35.256785 17316 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:12:35.257050 17316 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:12:35.257068 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.257074 17316 net.cpp:165] Memory required for data: 1278465500\nI0817 16:12:35.257086 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:12:35.257094 17316 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:12:35.257100 17316 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:12:35.257108 17316 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.257169 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:12:35.257324 17316 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:12:35.257340 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.257345 17316 net.cpp:165] Memory required for data: 1280513500\nI0817 16:12:35.257355 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:12:35.257364 17316 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:12:35.257369 17316 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:12:35.257376 17316 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.257386 17316 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:12:35.257393 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.257398 17316 net.cpp:165] Memory required for data: 1282561500\nI0817 16:12:35.257403 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:12:35.257417 17316 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:12:35.257424 17316 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:12:35.257432 17316 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:12:35.258453 17316 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:12:35.258468 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.258473 17316 net.cpp:165] Memory required for data: 1284609500\nI0817 16:12:35.258482 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:12:35.258494 17316 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:12:35.258502 17316 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:12:35.258512 17316 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:12:35.258770 17316 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:12:35.258782 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.258787 17316 net.cpp:165] Memory required for data: 1286657500\nI0817 16:12:35.258798 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:12:35.258806 17316 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:12:35.258812 17316 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:12:35.258823 17316 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:12:35.258880 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:12:35.259033 17316 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:12:35.259047 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.259052 17316 net.cpp:165] Memory required for data: 1288705500\nI0817 16:12:35.259060 17316 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:12:35.259078 17316 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:12:35.259093 17316 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:12:35.259100 17316 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:12:35.259109 17316 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:12:35.259146 17316 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:12:35.259156 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.259161 17316 net.cpp:165] Memory required for data: 1290753500\nI0817 16:12:35.259166 17316 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:12:35.259173 17316 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:12:35.259179 17316 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:12:35.259189 17316 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:12:35.259199 17316 net.cpp:150] Setting up L3_b2_relu\nI0817 16:12:35.259207 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.259210 17316 net.cpp:165] Memory required for data: 1292801500\nI0817 16:12:35.259215 17316 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:35.259222 17316 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:35.259227 17316 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:12:35.259235 17316 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:12:35.259244 17316 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:12:35.259294 17316 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:35.259305 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.259312 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.259316 17316 net.cpp:165] Memory required for data: 1296897500\nI0817 16:12:35.259321 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:12:35.259333 17316 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:12:35.259340 17316 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:12:35.259351 17316 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:12:35.260375 17316 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:12:35.260390 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.260395 17316 net.cpp:165] Memory required for data: 1298945500\nI0817 16:12:35.260403 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:12:35.260416 17316 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:12:35.260421 17316 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:12:35.260430 17316 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:12:35.260718 17316 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:12:35.260732 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.260737 17316 net.cpp:165] Memory required for data: 1300993500\nI0817 16:12:35.260748 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:12:35.260756 17316 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:12:35.260763 17316 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:12:35.260771 17316 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.260830 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:12:35.260982 17316 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:12:35.260996 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.261001 17316 net.cpp:165] Memory required for data: 1303041500\nI0817 16:12:35.261011 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:12:35.261019 17316 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:12:35.261025 17316 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:12:35.261032 17316 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.261042 17316 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:12:35.261049 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.261060 17316 net.cpp:165] Memory required for data: 1305089500\nI0817 16:12:35.261071 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:12:35.261086 17316 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:12:35.261093 17316 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:12:35.261102 17316 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:12:35.262162 17316 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:12:35.262178 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.262183 17316 net.cpp:165] Memory required for data: 1307137500\nI0817 16:12:35.262192 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:12:35.262202 17316 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:12:35.262208 17316 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:12:35.262221 17316 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:12:35.262485 17316 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:12:35.262498 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.262503 17316 net.cpp:165] Memory required for data: 1309185500\nI0817 16:12:35.262513 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:12:35.262522 17316 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:12:35.262528 17316 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:12:35.262539 17316 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:12:35.262598 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:12:35.262754 17316 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:12:35.262768 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.262773 17316 net.cpp:165] Memory required for data: 1311233500\nI0817 16:12:35.262781 17316 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:12:35.262794 17316 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:12:35.262801 17316 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:12:35.262809 17316 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:12:35.262816 17316 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:12:35.262853 17316 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:12:35.262864 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.262869 17316 net.cpp:165] Memory required for data: 1313281500\nI0817 16:12:35.262876 17316 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:12:35.262883 17316 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:12:35.262889 17316 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:12:35.262899 17316 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:12:35.262909 17316 net.cpp:150] Setting up L3_b3_relu\nI0817 16:12:35.262917 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.262920 17316 net.cpp:165] Memory required for data: 1315329500\nI0817 16:12:35.262925 17316 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:35.262933 17316 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:35.262938 17316 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:12:35.262945 17316 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:12:35.262955 17316 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:12:35.263003 17316 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:35.263015 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.263022 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.263026 17316 net.cpp:165] Memory required for data: 1319425500\nI0817 16:12:35.263031 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:12:35.263043 17316 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:12:35.263049 17316 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:12:35.263062 17316 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:12:35.264091 17316 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:12:35.264106 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.264111 17316 net.cpp:165] Memory required for data: 1321473500\nI0817 16:12:35.264120 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:12:35.264132 17316 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:12:35.264139 17316 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:12:35.264147 17316 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:12:35.264417 17316 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:12:35.264430 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.264436 17316 net.cpp:165] Memory required for data: 1323521500\nI0817 16:12:35.264446 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:12:35.264454 17316 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:12:35.264461 17316 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:12:35.264468 17316 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.264528 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:12:35.264684 17316 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:12:35.264699 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.264705 17316 net.cpp:165] Memory required for data: 1325569500\nI0817 16:12:35.264714 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:12:35.264721 17316 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:12:35.264729 17316 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:12:35.264735 17316 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.264745 17316 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:12:35.264752 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.264756 17316 net.cpp:165] Memory required for data: 1327617500\nI0817 16:12:35.264761 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:12:35.264776 17316 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:12:35.264782 17316 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:12:35.264793 17316 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:12:35.265826 17316 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:12:35.265841 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.265846 17316 net.cpp:165] Memory required for data: 1329665500\nI0817 16:12:35.265854 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:12:35.265866 17316 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:12:35.265873 17316 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:12:35.265882 17316 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:12:35.266155 17316 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:12:35.266170 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.266175 17316 net.cpp:165] Memory required for data: 1331713500\nI0817 16:12:35.266185 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:12:35.266197 17316 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:12:35.266204 17316 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:12:35.266212 17316 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:12:35.266273 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:12:35.266435 17316 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:12:35.266448 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.266453 17316 net.cpp:165] Memory required for data: 1333761500\nI0817 16:12:35.266461 17316 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:12:35.266474 17316 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:12:35.266480 17316 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:12:35.266487 17316 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:12:35.266495 17316 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:12:35.266530 17316 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:12:35.266547 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.266552 17316 net.cpp:165] Memory required for data: 1335809500\nI0817 16:12:35.266557 17316 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:12:35.266566 17316 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:12:35.266571 17316 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:12:35.266582 17316 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:12:35.266592 17316 net.cpp:150] Setting up L3_b4_relu\nI0817 16:12:35.266598 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.266602 17316 net.cpp:165] Memory required for data: 1337857500\nI0817 16:12:35.266607 17316 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:35.266615 17316 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:35.266620 17316 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:12:35.266628 17316 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:12:35.266638 17316 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:12:35.266686 17316 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:35.266698 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.266705 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.266710 17316 net.cpp:165] Memory required for data: 1341953500\nI0817 16:12:35.266715 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:12:35.266731 17316 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:12:35.266737 17316 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:12:35.266746 17316 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:12:35.267769 17316 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:12:35.267784 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.267789 17316 net.cpp:165] Memory required for data: 1344001500\nI0817 16:12:35.267798 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:12:35.267810 17316 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:12:35.267817 17316 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:12:35.267825 17316 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:12:35.269131 17316 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:12:35.269148 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.269153 17316 net.cpp:165] Memory required for data: 1346049500\nI0817 16:12:35.269165 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:12:35.269177 17316 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:12:35.269184 17316 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:12:35.269193 17316 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.269258 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:12:35.269418 17316 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:12:35.269430 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.269436 17316 net.cpp:165] Memory required for data: 1348097500\nI0817 16:12:35.269445 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:12:35.269456 17316 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:12:35.269462 17316 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:12:35.269470 17316 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.269480 17316 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:12:35.269487 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.269491 17316 net.cpp:165] Memory required for data: 1350145500\nI0817 16:12:35.269496 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:12:35.269511 17316 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:12:35.269517 17316 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:12:35.269529 17316 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:12:35.271581 17316 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:12:35.271598 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.271605 17316 net.cpp:165] Memory required for data: 1352193500\nI0817 16:12:35.271615 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:12:35.271626 17316 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:12:35.271634 17316 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:12:35.271642 17316 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:12:35.271903 17316 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:12:35.271916 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.271921 17316 net.cpp:165] Memory required for data: 1354241500\nI0817 16:12:35.271932 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:12:35.271941 17316 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:12:35.271948 17316 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:12:35.271958 17316 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:12:35.272017 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:12:35.272177 17316 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:12:35.272192 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.272197 17316 net.cpp:165] Memory required for data: 1356289500\nI0817 16:12:35.272205 17316 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:12:35.272218 17316 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:12:35.272225 17316 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:12:35.272233 17316 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:12:35.272240 17316 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:12:35.272275 17316 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:12:35.272286 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.272291 17316 net.cpp:165] Memory required for data: 1358337500\nI0817 16:12:35.272296 17316 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:12:35.272305 17316 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:12:35.272310 17316 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:12:35.272320 17316 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:12:35.272330 17316 net.cpp:150] Setting up L3_b5_relu\nI0817 16:12:35.272337 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.272342 17316 net.cpp:165] Memory required for data: 1360385500\nI0817 16:12:35.272347 17316 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:35.272354 17316 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:35.272359 17316 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:12:35.272367 17316 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:12:35.272377 17316 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:12:35.272423 17316 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:35.272435 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.272441 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.272446 17316 net.cpp:165] Memory required for data: 1364481500\nI0817 16:12:35.272451 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:12:35.272462 17316 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:12:35.272469 17316 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:12:35.272483 17316 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:12:35.273496 17316 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:12:35.273511 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.273516 17316 net.cpp:165] Memory required for data: 1366529500\nI0817 16:12:35.273525 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:12:35.273537 17316 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:12:35.273552 17316 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:12:35.273561 17316 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:12:35.273819 17316 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:12:35.273833 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.273838 17316 net.cpp:165] Memory required for data: 1368577500\nI0817 16:12:35.273847 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:12:35.273856 17316 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:12:35.273862 17316 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:12:35.273870 17316 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.273929 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:12:35.274086 17316 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:12:35.274099 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.274104 17316 net.cpp:165] Memory required for data: 1370625500\nI0817 16:12:35.274113 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:12:35.274121 17316 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:12:35.274127 17316 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:12:35.274134 17316 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.274144 17316 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:12:35.274152 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.274155 17316 net.cpp:165] Memory required for data: 1372673500\nI0817 16:12:35.274160 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:12:35.274176 17316 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:12:35.274183 17316 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:12:35.274194 17316 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:12:35.275204 17316 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:12:35.275219 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.275224 17316 net.cpp:165] Memory required for data: 1374721500\nI0817 16:12:35.275233 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:12:35.275245 17316 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:12:35.275252 17316 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:12:35.275260 17316 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:12:35.275518 17316 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:12:35.275532 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.275537 17316 net.cpp:165] Memory required for data: 1376769500\nI0817 16:12:35.275547 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:12:35.275560 17316 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:12:35.275568 17316 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:12:35.275574 17316 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:12:35.275635 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:12:35.275789 17316 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:12:35.275801 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.275806 17316 net.cpp:165] Memory required for data: 1378817500\nI0817 16:12:35.275815 17316 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:12:35.275827 17316 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:12:35.275835 17316 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:12:35.275841 17316 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:12:35.275849 17316 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:12:35.275884 17316 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:12:35.275895 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.275900 17316 net.cpp:165] Memory required for data: 1380865500\nI0817 16:12:35.275905 17316 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:12:35.275913 17316 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:12:35.275919 17316 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:12:35.275936 17316 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:12:35.275946 17316 net.cpp:150] Setting up L3_b6_relu\nI0817 16:12:35.275954 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.275959 17316 net.cpp:165] Memory required for data: 1382913500\nI0817 16:12:35.275964 17316 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:35.275971 17316 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:35.275976 17316 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:12:35.275984 17316 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:12:35.275993 17316 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:12:35.276042 17316 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:35.276054 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.276062 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.276072 17316 net.cpp:165] Memory required for data: 1387009500\nI0817 16:12:35.276077 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:12:35.276091 17316 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:12:35.276098 17316 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:12:35.276108 17316 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:12:35.277127 17316 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:12:35.277142 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.277146 17316 net.cpp:165] Memory required for data: 1389057500\nI0817 16:12:35.277156 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:12:35.277168 17316 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:12:35.277174 17316 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:12:35.277184 17316 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:12:35.277444 17316 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:12:35.277456 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.277462 17316 net.cpp:165] Memory required for data: 1391105500\nI0817 16:12:35.277472 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:12:35.277482 17316 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:12:35.277487 17316 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:12:35.277495 17316 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.277554 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:12:35.277714 17316 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:12:35.277727 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.277732 17316 net.cpp:165] Memory required for data: 1393153500\nI0817 16:12:35.277740 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:12:35.277773 17316 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:12:35.277783 17316 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:12:35.277791 17316 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.277801 17316 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:12:35.277808 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.277813 17316 net.cpp:165] Memory required for data: 1395201500\nI0817 16:12:35.277818 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:12:35.277833 17316 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:12:35.277839 17316 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:12:35.277848 17316 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:12:35.278875 17316 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:12:35.278890 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.278895 17316 net.cpp:165] Memory required for data: 1397249500\nI0817 16:12:35.278904 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:12:35.278916 17316 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:12:35.278929 17316 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:12:35.278939 17316 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:12:35.279211 17316 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:12:35.279225 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.279230 17316 net.cpp:165] Memory required for data: 1399297500\nI0817 16:12:35.279240 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:12:35.279249 17316 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:12:35.279255 17316 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:12:35.279263 17316 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:12:35.279323 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:12:35.279474 17316 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:12:35.279489 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.279495 17316 net.cpp:165] Memory required for data: 1401345500\nI0817 16:12:35.279505 17316 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:12:35.279513 17316 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:12:35.279520 17316 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:12:35.279526 17316 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:12:35.279537 17316 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:12:35.279570 17316 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:12:35.279579 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.279583 17316 net.cpp:165] Memory required for data: 1403393500\nI0817 16:12:35.279589 17316 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:12:35.279600 17316 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:12:35.279606 17316 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:12:35.279613 17316 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:12:35.279623 17316 net.cpp:150] Setting up L3_b7_relu\nI0817 16:12:35.279629 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.279634 17316 net.cpp:165] Memory required for data: 1405441500\nI0817 16:12:35.279639 17316 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:35.279652 17316 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:35.279659 17316 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:12:35.279666 17316 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:12:35.279675 17316 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:12:35.279721 17316 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:35.279736 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.279742 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.279747 17316 net.cpp:165] Memory required for data: 1409537500\nI0817 16:12:35.279752 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:12:35.279763 17316 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:12:35.279769 17316 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:12:35.279779 17316 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:12:35.280796 17316 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:12:35.280810 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.280815 17316 net.cpp:165] Memory required for data: 1411585500\nI0817 16:12:35.280824 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:12:35.280836 17316 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:12:35.280843 17316 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:12:35.280851 17316 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:12:35.281119 17316 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:12:35.281132 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.281137 17316 net.cpp:165] Memory required for data: 1413633500\nI0817 16:12:35.281154 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:12:35.281168 17316 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:12:35.281175 17316 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:12:35.281185 17316 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.281245 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:12:35.281404 17316 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:12:35.281416 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.281421 17316 net.cpp:165] Memory required for data: 1415681500\nI0817 16:12:35.281430 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:12:35.281438 17316 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:12:35.281445 17316 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:12:35.281455 17316 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.281466 17316 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:12:35.281472 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.281476 17316 net.cpp:165] Memory required for data: 1417729500\nI0817 16:12:35.281481 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:12:35.281497 17316 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:12:35.281502 17316 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:12:35.281512 17316 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:12:35.282547 17316 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:12:35.282563 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.282568 17316 net.cpp:165] Memory required for data: 1419777500\nI0817 16:12:35.282577 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:12:35.282590 17316 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:12:35.282598 17316 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:12:35.282606 17316 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:12:35.282874 17316 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:12:35.282887 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.282892 17316 net.cpp:165] Memory required for data: 1421825500\nI0817 16:12:35.282902 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:12:35.282912 17316 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:12:35.282917 17316 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:12:35.282925 17316 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:12:35.282984 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:12:35.283149 17316 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:12:35.283161 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.283166 17316 net.cpp:165] Memory required for data: 1423873500\nI0817 16:12:35.283175 17316 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:12:35.283185 17316 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:12:35.283191 17316 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:12:35.283197 17316 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:12:35.283208 17316 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:12:35.283242 17316 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:12:35.283254 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.283259 17316 net.cpp:165] Memory required for data: 1425921500\nI0817 16:12:35.283264 17316 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:12:35.283272 17316 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:12:35.283278 17316 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:12:35.283285 17316 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:12:35.283294 17316 net.cpp:150] Setting up L3_b8_relu\nI0817 16:12:35.283301 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.283306 17316 net.cpp:165] Memory required for data: 1427969500\nI0817 16:12:35.283310 17316 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:35.283327 17316 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:35.283334 17316 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:12:35.283341 17316 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:12:35.283351 17316 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:12:35.283399 17316 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:35.283411 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.283417 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.283422 17316 net.cpp:165] Memory required for data: 1432065500\nI0817 16:12:35.283427 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:12:35.283438 17316 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:12:35.283444 17316 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:12:35.283457 17316 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:12:35.285547 17316 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:12:35.285564 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.285570 17316 net.cpp:165] Memory required for data: 1434113500\nI0817 16:12:35.285579 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:12:35.285595 17316 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:12:35.285603 17316 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:12:35.285614 17316 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:12:35.285878 17316 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:12:35.285892 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.285897 17316 net.cpp:165] Memory required for data: 1436161500\nI0817 16:12:35.285908 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:12:35.285917 17316 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:12:35.285923 17316 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:12:35.285934 17316 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.285992 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:12:35.286160 17316 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:12:35.286175 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.286180 17316 net.cpp:165] Memory required for data: 1438209500\nI0817 16:12:35.286190 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:12:35.286197 17316 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:12:35.286203 17316 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:12:35.286213 17316 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.286223 17316 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:12:35.286231 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.286236 17316 net.cpp:165] Memory required for data: 1440257500\nI0817 16:12:35.286240 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:12:35.286255 17316 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:12:35.286262 17316 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:12:35.286270 17316 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:12:35.287292 17316 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:12:35.287307 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.287312 17316 net.cpp:165] Memory required for data: 1442305500\nI0817 16:12:35.287322 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:12:35.287333 17316 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:12:35.287340 17316 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:12:35.287348 17316 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:12:35.287613 17316 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:12:35.287626 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.287631 17316 net.cpp:165] Memory required for data: 1444353500\nI0817 16:12:35.287649 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:12:35.287662 17316 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:12:35.287668 17316 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:12:35.287680 17316 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:12:35.287739 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:12:35.287897 17316 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:12:35.287910 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.287915 17316 net.cpp:165] Memory required for data: 1446401500\nI0817 16:12:35.287925 17316 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:12:35.287933 17316 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:12:35.287940 17316 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:12:35.287947 17316 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:12:35.287958 17316 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:12:35.287992 17316 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:12:35.288003 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.288007 17316 net.cpp:165] Memory required for data: 1448449500\nI0817 16:12:35.288013 17316 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:12:35.288024 17316 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:12:35.288030 17316 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:12:35.288038 17316 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:12:35.288048 17316 net.cpp:150] Setting up L3_b9_relu\nI0817 16:12:35.288054 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.288058 17316 net.cpp:165] Memory required for data: 1450497500\nI0817 16:12:35.288069 17316 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:12:35.288079 17316 net.cpp:100] Creating Layer post_pool\nI0817 16:12:35.288084 17316 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:12:35.288092 17316 net.cpp:408] post_pool -> post_pool\nI0817 16:12:35.288131 17316 net.cpp:150] Setting up post_pool\nI0817 16:12:35.288144 17316 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:12:35.288149 17316 net.cpp:165] Memory required for data: 1450529500\nI0817 16:12:35.288154 17316 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:12:35.288240 17316 net.cpp:100] Creating Layer post_FC\nI0817 16:12:35.288254 17316 net.cpp:434] post_FC <- post_pool\nI0817 16:12:35.288264 17316 net.cpp:408] post_FC -> post_FC_top\nI0817 16:12:35.288564 17316 net.cpp:150] Setting up post_FC\nI0817 16:12:35.288579 17316 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:35.288585 17316 net.cpp:165] Memory required for data: 1450579500\nI0817 16:12:35.288595 17316 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:12:35.288604 17316 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:12:35.288609 17316 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:12:35.288620 17316 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:12:35.288631 17316 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:12:35.288678 17316 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:12:35.288693 17316 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:35.288700 17316 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:35.288704 17316 net.cpp:165] Memory required for data: 1450679500\nI0817 16:12:35.288710 17316 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:12:35.288753 17316 net.cpp:100] Creating Layer accuracy\nI0817 16:12:35.288764 17316 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:12:35.288772 17316 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:12:35.288780 17316 net.cpp:408] accuracy -> accuracy\nI0817 16:12:35.288822 17316 net.cpp:150] Setting up accuracy\nI0817 16:12:35.288836 17316 net.cpp:157] Top shape: (1)\nI0817 16:12:35.288841 17316 net.cpp:165] Memory required for data: 1450679504\nI0817 16:12:35.288846 17316 layer_factory.hpp:77] Creating layer loss\nI0817 16:12:35.288861 17316 net.cpp:100] Creating Layer loss\nI0817 16:12:35.288868 17316 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:12:35.288875 17316 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:12:35.288887 17316 net.cpp:408] loss -> loss\nI0817 16:12:35.288933 17316 layer_factory.hpp:77] Creating layer loss\nI0817 16:12:35.289113 17316 net.cpp:150] Setting up loss\nI0817 16:12:35.289127 17316 net.cpp:157] Top shape: (1)\nI0817 16:12:35.289134 17316 net.cpp:160]     with loss weight 1\nI0817 16:12:35.289207 17316 net.cpp:165] Memory required for data: 1450679508\nI0817 16:12:35.289216 17316 net.cpp:226] loss needs backward computation.\nI0817 16:12:35.289223 17316 net.cpp:228] accuracy does not need backward computation.\nI0817 16:12:35.289229 17316 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:12:35.289234 17316 net.cpp:226] post_FC needs backward computation.\nI0817 16:12:35.289239 17316 net.cpp:226] post_pool needs backward computation.\nI0817 16:12:35.289244 17316 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:12:35.289249 17316 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:12:35.289255 17316 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:12:35.289260 17316 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:12:35.289265 17316 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:12:35.289270 17316 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:12:35.289275 17316 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:12:35.289280 17316 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:12:35.289285 17316 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:12:35.289290 17316 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:12:35.289296 17316 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:12:35.289301 17316 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:12:35.289306 17316 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:12:35.289311 17316 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:12:35.289317 17316 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:12:35.289322 17316 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:12:35.289327 17316 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:12:35.289332 17316 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:12:35.289337 17316 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:12:35.289342 17316 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:12:35.289347 17316 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:12:35.289353 17316 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:12:35.289358 17316 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:12:35.289363 17316 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:12:35.289368 17316 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:12:35.289374 17316 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:12:35.289378 17316 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:12:35.289383 17316 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:12:35.289388 17316 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:12:35.289394 17316 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:12:35.289399 17316 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:12:35.289404 17316 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:12:35.289410 17316 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:12:35.289415 17316 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:12:35.289424 17316 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:12:35.289436 17316 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:12:35.289443 17316 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:12:35.289448 17316 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:12:35.289453 17316 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:12:35.289458 17316 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:12:35.289463 17316 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:12:35.289469 17316 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:12:35.289474 17316 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:12:35.289479 17316 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:12:35.289484 17316 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:12:35.289489 17316 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:12:35.289494 17316 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:12:35.289499 17316 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:12:35.289505 17316 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:12:35.289510 17316 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:12:35.289515 17316 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:12:35.289520 17316 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:12:35.289526 17316 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:12:35.289531 17316 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:12:35.289536 17316 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:12:35.289541 17316 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:12:35.289546 17316 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:12:35.289551 17316 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:12:35.289556 17316 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:12:35.289562 17316 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:12:35.289567 17316 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:12:35.289572 17316 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:12:35.289578 17316 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:12:35.289583 17316 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:12:35.289589 17316 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:12:35.289594 17316 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:12:35.289599 17316 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:12:35.289604 17316 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:12:35.289609 17316 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:12:35.289614 17316 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:12:35.289620 17316 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:12:35.289625 17316 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:12:35.289630 17316 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:12:35.289636 17316 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:12:35.289641 17316 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:12:35.289646 17316 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:12:35.289651 17316 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:12:35.289656 17316 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:12:35.289661 17316 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:12:35.289667 17316 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:12:35.289672 17316 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:12:35.289679 17316 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:12:35.289688 17316 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:12:35.289695 17316 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:12:35.289701 17316 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:12:35.289710 17316 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:12:35.289716 17316 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:12:35.289721 17316 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:12:35.289726 17316 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:12:35.289732 17316 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:12:35.289737 17316 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:12:35.289742 17316 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:12:35.289748 17316 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:12:35.289753 17316 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:12:35.289759 17316 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:12:35.289765 17316 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:12:35.289770 17316 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:12:35.289777 17316 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:12:35.289782 17316 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:12:35.289786 17316 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:12:35.289791 17316 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:12:35.289798 17316 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:12:35.289803 17316 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:12:35.289808 17316 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:12:35.289813 17316 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:12:35.289819 17316 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:12:35.289824 17316 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:12:35.289830 17316 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:12:35.289835 17316 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:12:35.289840 17316 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:12:35.289846 17316 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:12:35.289851 17316 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:12:35.289857 17316 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:12:35.289862 17316 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:12:35.289867 17316 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:12:35.289873 17316 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:12:35.289878 17316 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:12:35.289885 17316 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:12:35.289890 17316 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:12:35.289894 17316 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:12:35.289899 17316 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:12:35.289906 17316 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:12:35.289911 17316 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:12:35.289916 17316 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:12:35.289921 17316 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:12:35.289927 17316 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:12:35.289932 17316 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:12:35.289937 17316 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:12:35.289943 17316 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:12:35.289948 17316 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:12:35.289958 17316 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:12:35.289964 17316 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:12:35.289970 17316 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:12:35.289975 17316 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:12:35.289980 17316 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:12:35.289986 17316 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:12:35.289993 17316 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:12:35.289997 17316 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:12:35.290002 17316 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:12:35.290009 17316 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:12:35.290014 17316 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:12:35.290019 17316 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:12:35.290024 17316 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:12:35.290030 17316 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:12:35.290035 17316 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:12:35.290040 17316 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:12:35.290045 17316 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:12:35.290051 17316 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:12:35.290056 17316 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:12:35.290061 17316 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:12:35.290073 17316 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:12:35.290081 17316 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:12:35.290086 17316 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:12:35.290096 17316 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:12:35.290102 17316 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:12:35.290107 17316 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:12:35.290113 17316 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:12:35.290119 17316 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:12:35.290124 17316 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:12:35.290130 17316 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:12:35.290135 17316 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:12:35.290141 17316 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:12:35.290148 17316 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:12:35.290153 17316 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:12:35.290158 17316 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:12:35.290164 17316 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:12:35.290170 17316 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:12:35.290175 17316 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:12:35.290181 17316 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:12:35.290186 17316 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:12:35.290192 17316 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:12:35.290197 17316 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:12:35.290204 17316 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:12:35.290210 17316 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:12:35.290216 17316 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:12:35.290221 17316 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:12:35.290227 17316 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:12:35.290238 17316 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:12:35.290244 17316 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:12:35.290251 17316 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:12:35.290256 17316 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:12:35.290261 17316 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:12:35.290267 17316 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:12:35.290272 17316 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:12:35.290278 17316 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:12:35.290284 17316 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:12:35.290289 17316 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:12:35.290295 17316 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:12:35.290302 17316 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:12:35.290307 17316 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:12:35.290313 17316 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:12:35.290318 17316 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:12:35.290323 17316 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:12:35.290329 17316 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:12:35.290334 17316 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:12:35.290340 17316 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:12:35.290346 17316 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:12:35.290351 17316 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:12:35.290359 17316 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:12:35.290364 17316 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:12:35.290369 17316 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:12:35.290374 17316 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:12:35.290380 17316 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:12:35.290386 17316 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:12:35.290391 17316 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:12:35.290397 17316 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:12:35.290403 17316 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:12:35.290410 17316 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:12:35.290416 17316 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:12:35.290421 17316 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:12:35.290427 17316 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:12:35.290433 17316 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:12:35.290438 17316 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:12:35.290444 17316 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:12:35.290451 17316 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:12:35.290457 17316 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:12:35.290462 17316 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:12:35.290468 17316 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:12:35.290474 17316 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:12:35.290479 17316 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:12:35.290485 17316 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:12:35.290491 17316 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:12:35.290496 17316 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:12:35.290503 17316 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:12:35.290513 17316 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:12:35.290519 17316 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:12:35.290525 17316 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:12:35.290531 17316 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:12:35.290537 17316 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:12:35.290544 17316 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:12:35.290549 17316 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:12:35.290555 17316 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:12:35.290560 17316 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:12:35.290565 17316 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:12:35.290571 17316 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:12:35.290577 17316 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:12:35.290582 17316 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:12:35.290588 17316 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:12:35.290594 17316 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:12:35.290599 17316 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:12:35.290606 17316 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:12:35.290611 17316 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:12:35.290616 17316 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:12:35.290622 17316 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:12:35.290628 17316 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:12:35.290633 17316 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:12:35.290639 17316 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:12:35.290645 17316 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:12:35.290652 17316 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:12:35.290657 17316 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:12:35.290663 17316 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:12:35.290668 17316 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:12:35.290674 17316 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:12:35.290679 17316 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:12:35.290685 17316 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:12:35.290691 17316 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:12:35.290697 17316 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:12:35.290702 17316 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:12:35.290709 17316 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:12:35.290715 17316 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:12:35.290720 17316 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:12:35.290726 17316 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:12:35.290732 17316 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:12:35.290737 17316 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:12:35.290743 17316 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:12:35.290750 17316 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:12:35.290755 17316 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:12:35.290761 17316 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:12:35.290767 17316 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:12:35.290773 17316 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:12:35.290779 17316 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:12:35.290791 17316 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:12:35.290798 17316 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:12:35.290804 17316 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:12:35.290810 17316 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:12:35.290817 17316 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:12:35.290822 17316 net.cpp:226] pre_relu needs backward computation.\nI0817 16:12:35.290827 17316 net.cpp:226] pre_scale needs backward computation.\nI0817 16:12:35.290832 17316 net.cpp:226] pre_bn needs backward computation.\nI0817 16:12:35.290838 17316 net.cpp:226] pre_conv needs backward computation.\nI0817 16:12:35.290845 17316 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:12:35.290853 17316 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:12:35.290858 17316 net.cpp:270] This network produces output accuracy\nI0817 16:12:35.290863 17316 net.cpp:270] This network produces output loss\nI0817 16:12:35.291232 17316 net.cpp:283] Network initialization done.\nI0817 16:12:35.300501 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:35.300542 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:35.300602 17316 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:12:35.300985 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:12:35.301003 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:12:35.301014 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:12:35.301023 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:12:35.301033 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:12:35.301043 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:12:35.301051 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:12:35.301060 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:12:35.301079 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:12:35.301089 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:12:35.301097 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:12:35.301105 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:12:35.301115 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:12:35.301123 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:12:35.301132 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:12:35.301141 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:12:35.301149 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:12:35.301158 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:12:35.301167 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:12:35.301187 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:12:35.301196 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:12:35.301204 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:12:35.301218 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:12:35.301226 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:12:35.301235 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:12:35.301242 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:12:35.301252 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:12:35.301260 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:12:35.301268 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:12:35.301277 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:12:35.301286 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:12:35.301295 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:12:35.301304 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:12:35.301311 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:12:35.301321 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:12:35.301329 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:12:35.301337 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:12:35.301347 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:12:35.301355 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:12:35.301363 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:12:35.301375 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:12:35.301383 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:12:35.301393 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:12:35.301400 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:12:35.301409 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:12:35.301417 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:12:35.301426 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:12:35.301434 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:12:35.301443 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:12:35.301460 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:12:35.301470 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:12:35.301478 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:12:35.301487 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:12:35.301496 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:12:35.301506 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:12:35.301512 17316 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:12:35.303161 17316 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar100/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar100/cifar100_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"\nI0817 16:12:35.304760 17316 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:12:35.305003 17316 net.cpp:100] Creating Layer dataLayer\nI0817 16:12:35.305027 17316 net.cpp:408] dataLayer -> data_top\nI0817 16:12:35.305042 17316 net.cpp:408] dataLayer -> label\nI0817 16:12:35.305054 17316 data_transformer.cpp:25] Loading mean file from: examples/cifar100/mean.binaryproto\nI0817 16:12:35.318161 17323 db_lmdb.cpp:35] Opened lmdb examples/cifar100/cifar100_test_lmdb\nI0817 16:12:35.318554 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:35.325896 17316 net.cpp:150] Setting up dataLayer\nI0817 16:12:35.325958 17316 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:12:35.325971 17316 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:35.325976 17316 net.cpp:165] Memory required for data: 1536500\nI0817 16:12:35.325984 17316 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:12:35.325996 17316 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:12:35.326002 17316 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:12:35.326010 17316 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:12:35.326026 17316 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:12:35.326165 17316 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:12:35.326179 17316 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:35.326189 17316 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:35.326195 17316 net.cpp:165] Memory required for data: 1537500\nI0817 16:12:35.326200 17316 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:12:35.326216 17316 net.cpp:100] Creating Layer pre_conv\nI0817 16:12:35.326223 17316 net.cpp:434] pre_conv <- data_top\nI0817 16:12:35.326234 17316 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:12:35.326725 17316 net.cpp:150] Setting up pre_conv\nI0817 16:12:35.326750 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.326758 17316 net.cpp:165] Memory required for data: 9729500\nI0817 16:12:35.326773 17316 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:12:35.326789 17316 net.cpp:100] Creating Layer pre_bn\nI0817 16:12:35.326795 17316 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:12:35.326803 17316 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:12:35.327132 17316 net.cpp:150] Setting up pre_bn\nI0817 16:12:35.327147 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.327155 17316 net.cpp:165] Memory required for data: 17921500\nI0817 16:12:35.327172 17316 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:12:35.327181 17316 net.cpp:100] Creating Layer pre_scale\nI0817 16:12:35.327188 17316 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:12:35.327198 17316 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:12:35.327286 17316 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:12:35.327484 17316 net.cpp:150] Setting up pre_scale\nI0817 16:12:35.327500 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.327507 17316 net.cpp:165] Memory required for data: 26113500\nI0817 16:12:35.327517 17316 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:12:35.327524 17316 net.cpp:100] Creating Layer pre_relu\nI0817 16:12:35.327530 17316 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:12:35.327551 17316 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:12:35.327564 17316 net.cpp:150] Setting up pre_relu\nI0817 16:12:35.327571 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.327576 17316 net.cpp:165] Memory required for data: 34305500\nI0817 16:12:35.327581 17316 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:12:35.327589 17316 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:12:35.327594 17316 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:12:35.327618 17316 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:12:35.327630 17316 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:12:35.327687 17316 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:12:35.327699 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.327706 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.327711 17316 net.cpp:165] Memory required for data: 50689500\nI0817 16:12:35.327716 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:12:35.327728 17316 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:12:35.327733 17316 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:12:35.327778 17316 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:12:35.328182 17316 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:12:35.328200 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.328205 17316 net.cpp:165] Memory required for data: 58881500\nI0817 16:12:35.328218 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:12:35.328235 17316 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:12:35.328243 17316 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:12:35.328251 17316 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:12:35.328824 17316 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:12:35.328841 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.328850 17316 net.cpp:165] Memory required for data: 67073500\nI0817 16:12:35.328861 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:12:35.328871 17316 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:12:35.328876 17316 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:12:35.328884 17316 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.328948 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:12:35.329139 17316 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:12:35.329154 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.329161 17316 net.cpp:165] Memory required for data: 75265500\nI0817 16:12:35.329179 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:12:35.329190 17316 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:12:35.329196 17316 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:12:35.329206 17316 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.329216 17316 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:12:35.329226 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.329231 17316 net.cpp:165] Memory required for data: 83457500\nI0817 16:12:35.329236 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:12:35.329247 17316 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:12:35.329253 17316 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:12:35.329267 17316 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:12:35.329619 17316 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:12:35.329633 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.329638 17316 net.cpp:165] Memory required for data: 91649500\nI0817 16:12:35.329648 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:12:35.329658 17316 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:12:35.329663 17316 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:12:35.329675 17316 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:12:35.329982 17316 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:12:35.329998 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.330003 17316 net.cpp:165] Memory required for data: 99841500\nI0817 16:12:35.330024 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:12:35.330034 17316 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:12:35.330039 17316 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:12:35.330047 17316 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:12:35.330113 17316 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:12:35.330269 17316 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:12:35.330283 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.330288 17316 net.cpp:165] Memory required for data: 108033500\nI0817 16:12:35.330297 17316 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:12:35.330309 17316 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:12:35.330315 17316 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:12:35.330322 17316 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:12:35.330332 17316 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:12:35.330366 17316 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:12:35.330375 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.330380 17316 net.cpp:165] Memory required for data: 116225500\nI0817 16:12:35.330385 17316 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:12:35.330396 17316 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:12:35.330402 17316 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:12:35.330410 17316 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:12:35.330418 17316 net.cpp:150] Setting up L1_b1_relu\nI0817 16:12:35.330425 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.330430 17316 net.cpp:165] Memory required for data: 124417500\nI0817 16:12:35.330435 17316 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:35.330443 17316 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:35.330449 17316 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:12:35.330456 17316 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:12:35.330466 17316 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:12:35.330513 17316 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:35.330533 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.330541 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.330546 17316 net.cpp:165] Memory required for data: 140801500\nI0817 16:12:35.330551 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:12:35.330565 17316 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:12:35.330571 17316 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:12:35.330580 17316 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:12:35.330925 17316 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:12:35.330940 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.330946 17316 net.cpp:165] Memory required for data: 148993500\nI0817 16:12:35.330955 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:12:35.330967 17316 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:12:35.330973 17316 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:12:35.330981 17316 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:12:35.331276 17316 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:12:35.331291 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.331296 17316 net.cpp:165] Memory required for data: 157185500\nI0817 16:12:35.331307 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:12:35.331316 17316 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:12:35.331322 17316 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:12:35.331329 17316 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.331390 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:12:35.331807 17316 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:12:35.331830 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.331838 17316 net.cpp:165] Memory required for data: 165377500\nI0817 16:12:35.331854 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:12:35.331869 17316 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:12:35.331879 17316 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:12:35.331897 17316 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.331915 17316 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:12:35.331926 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.331933 17316 net.cpp:165] Memory required for data: 173569500\nI0817 16:12:35.331944 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:12:35.331969 17316 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:12:35.331979 17316 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:12:35.331992 17316 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:12:35.332435 17316 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:12:35.332451 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.332459 17316 net.cpp:165] Memory required for data: 181761500\nI0817 16:12:35.332469 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:12:35.332482 17316 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:12:35.332489 17316 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:12:35.332497 17316 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:12:35.332818 17316 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:12:35.332836 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.332841 17316 net.cpp:165] Memory required for data: 189953500\nI0817 16:12:35.332859 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:12:35.332870 17316 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:12:35.332876 17316 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:12:35.332890 17316 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:12:35.332967 17316 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:12:35.333154 17316 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:12:35.333171 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.333178 17316 net.cpp:165] Memory required for data: 198145500\nI0817 16:12:35.333194 17316 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:12:35.333204 17316 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:12:35.333210 17316 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:12:35.333220 17316 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:12:35.333230 17316 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:12:35.333272 17316 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:12:35.333282 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.333287 17316 net.cpp:165] Memory required for data: 206337500\nI0817 16:12:35.333292 17316 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:12:35.333303 17316 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:12:35.333310 17316 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:12:35.333317 17316 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:12:35.333328 17316 net.cpp:150] Setting up L1_b2_relu\nI0817 16:12:35.333334 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.333338 17316 net.cpp:165] Memory required for data: 214529500\nI0817 16:12:35.333343 17316 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:35.333353 17316 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:35.333359 17316 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:12:35.333366 17316 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:12:35.333379 17316 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:12:35.333433 17316 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:35.333447 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.333457 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.333462 17316 net.cpp:165] Memory required for data: 230913500\nI0817 16:12:35.333467 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:12:35.333478 17316 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:12:35.333485 17316 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:12:35.333497 17316 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:12:35.333885 17316 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:12:35.333902 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.333907 17316 net.cpp:165] Memory required for data: 239105500\nI0817 16:12:35.333916 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:12:35.333930 17316 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:12:35.333937 17316 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:12:35.333945 17316 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:12:35.334260 17316 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:12:35.334275 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.334281 17316 net.cpp:165] Memory required for data: 247297500\nI0817 16:12:35.334293 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:12:35.334302 17316 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:12:35.334308 17316 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:12:35.334319 17316 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.334389 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:12:35.334626 17316 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:12:35.334643 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.334648 17316 net.cpp:165] Memory required for data: 255489500\nI0817 16:12:35.334661 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:12:35.334671 17316 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:12:35.334676 17316 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:12:35.334683 17316 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.334700 17316 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:12:35.334708 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.334712 17316 net.cpp:165] Memory required for data: 263681500\nI0817 16:12:35.334717 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:12:35.334740 17316 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:12:35.334748 17316 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:12:35.334758 17316 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:12:35.335175 17316 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:12:35.335192 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.335198 17316 net.cpp:165] Memory required for data: 271873500\nI0817 16:12:35.335207 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:12:35.335228 17316 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:12:35.335235 17316 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:12:35.335245 17316 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:12:35.335544 17316 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:12:35.335558 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.335563 17316 net.cpp:165] Memory required for data: 280065500\nI0817 16:12:35.335574 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:12:35.335582 17316 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:12:35.335588 17316 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:12:35.335602 17316 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:12:35.335669 17316 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:12:35.335849 17316 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:12:35.335868 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.335873 17316 net.cpp:165] Memory required for data: 288257500\nI0817 16:12:35.335882 17316 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:12:35.335891 17316 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:12:35.335898 17316 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:12:35.335906 17316 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:12:35.335913 17316 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:12:35.335954 17316 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:12:35.335968 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.335973 17316 net.cpp:165] Memory required for data: 296449500\nI0817 16:12:35.335978 17316 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:12:35.335988 17316 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:12:35.335994 17316 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:12:35.336004 17316 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:12:35.336015 17316 net.cpp:150] Setting up L1_b3_relu\nI0817 16:12:35.336022 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.336026 17316 net.cpp:165] Memory required for data: 304641500\nI0817 16:12:35.336031 17316 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:35.336041 17316 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:35.336047 17316 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:12:35.336055 17316 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:12:35.336078 17316 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:12:35.336136 17316 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:35.336154 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.336164 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.336169 17316 net.cpp:165] Memory required for data: 321025500\nI0817 16:12:35.336175 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:12:35.336186 17316 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:12:35.336201 17316 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:12:35.336213 17316 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:12:35.336627 17316 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:12:35.336642 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.336649 17316 net.cpp:165] Memory required for data: 329217500\nI0817 16:12:35.336659 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:12:35.336673 17316 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:12:35.336678 17316 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:12:35.336704 17316 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:12:35.337013 17316 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:12:35.337028 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.337033 17316 net.cpp:165] Memory required for data: 337409500\nI0817 16:12:35.337047 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:12:35.337056 17316 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:12:35.337062 17316 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:12:35.337081 17316 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.337148 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:12:35.337324 17316 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:12:35.337342 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.337347 17316 net.cpp:165] Memory required for data: 345601500\nI0817 16:12:35.337357 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:12:35.337364 17316 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:12:35.337370 17316 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:12:35.337378 17316 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.337388 17316 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:12:35.337393 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.337402 17316 net.cpp:165] Memory required for data: 353793500\nI0817 16:12:35.337407 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:12:35.337421 17316 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:12:35.337427 17316 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:12:35.337451 17316 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:12:35.337898 17316 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:12:35.337913 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.337918 17316 net.cpp:165] Memory required for data: 361985500\nI0817 16:12:35.337929 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:12:35.337941 17316 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:12:35.337949 17316 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:12:35.337959 17316 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:12:35.338279 17316 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:12:35.338296 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.338301 17316 net.cpp:165] Memory required for data: 370177500\nI0817 16:12:35.338312 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:12:35.338323 17316 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:12:35.338330 17316 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:12:35.338341 17316 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:12:35.338405 17316 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:12:35.338582 17316 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:12:35.338601 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.338606 17316 net.cpp:165] Memory required for data: 378369500\nI0817 16:12:35.338615 17316 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:12:35.338629 17316 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:12:35.338635 17316 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:12:35.338642 17316 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:12:35.338659 17316 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:12:35.338707 17316 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:12:35.338718 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.338723 17316 net.cpp:165] Memory required for data: 386561500\nI0817 16:12:35.338729 17316 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:12:35.338737 17316 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:12:35.338745 17316 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:12:35.338757 17316 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:12:35.338766 17316 net.cpp:150] Setting up L1_b4_relu\nI0817 16:12:35.338773 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.338779 17316 net.cpp:165] Memory required for data: 394753500\nI0817 16:12:35.338786 17316 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:35.338793 17316 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:35.338798 17316 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:12:35.338809 17316 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:12:35.338819 17316 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:12:35.338872 17316 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:35.338886 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.338894 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.338898 17316 net.cpp:165] Memory required for data: 411137500\nI0817 16:12:35.338904 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:12:35.338918 17316 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:12:35.338928 17316 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:12:35.338937 17316 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:12:35.339386 17316 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:12:35.339402 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.339407 17316 net.cpp:165] Memory required for data: 419329500\nI0817 16:12:35.339431 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:12:35.339442 17316 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:12:35.339452 17316 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:12:35.339462 17316 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:12:35.339768 17316 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:12:35.339781 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.339787 17316 net.cpp:165] Memory required for data: 427521500\nI0817 16:12:35.339802 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:12:35.339810 17316 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:12:35.339817 17316 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:12:35.339824 17316 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.339887 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:12:35.340073 17316 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:12:35.340087 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.340092 17316 net.cpp:165] Memory required for data: 435713500\nI0817 16:12:35.340106 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:12:35.340116 17316 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:12:35.340123 17316 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:12:35.340131 17316 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.340144 17316 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:12:35.340152 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.340157 17316 net.cpp:165] Memory required for data: 443905500\nI0817 16:12:35.340162 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:12:35.340183 17316 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:12:35.340189 17316 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:12:35.340204 17316 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:12:35.340590 17316 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:12:35.340603 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.340608 17316 net.cpp:165] Memory required for data: 452097500\nI0817 16:12:35.340617 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:12:35.340631 17316 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:12:35.340636 17316 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:12:35.340644 17316 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:12:35.340919 17316 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:12:35.340932 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.340937 17316 net.cpp:165] Memory required for data: 460289500\nI0817 16:12:35.340948 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:12:35.340956 17316 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:12:35.340962 17316 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:12:35.340970 17316 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:12:35.341030 17316 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:12:35.341280 17316 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:12:35.341300 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.341305 17316 net.cpp:165] Memory required for data: 468481500\nI0817 16:12:35.341315 17316 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:12:35.341325 17316 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:12:35.341331 17316 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:12:35.341337 17316 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:12:35.341348 17316 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:12:35.341385 17316 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:12:35.341398 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.341403 17316 net.cpp:165] Memory required for data: 476673500\nI0817 16:12:35.341408 17316 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:12:35.341420 17316 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:12:35.341426 17316 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:12:35.341434 17316 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:12:35.341444 17316 net.cpp:150] Setting up L1_b5_relu\nI0817 16:12:35.341450 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.341455 17316 net.cpp:165] Memory required for data: 484865500\nI0817 16:12:35.341459 17316 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:35.341467 17316 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:35.341472 17316 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:12:35.341480 17316 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:12:35.341488 17316 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:12:35.341538 17316 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:35.341550 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.341557 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.341562 17316 net.cpp:165] Memory required for data: 501249500\nI0817 16:12:35.341567 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:12:35.341580 17316 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:12:35.341588 17316 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:12:35.341596 17316 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:12:35.341943 17316 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:12:35.341956 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.341969 17316 net.cpp:165] Memory required for data: 509441500\nI0817 16:12:35.341979 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:12:35.341991 17316 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:12:35.341997 17316 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:12:35.342008 17316 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:12:35.342288 17316 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:12:35.342301 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.342306 17316 net.cpp:165] Memory required for data: 517633500\nI0817 16:12:35.342316 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:12:35.342325 17316 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:12:35.342331 17316 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:12:35.342339 17316 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.342406 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:12:35.342568 17316 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:12:35.342581 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.342586 17316 net.cpp:165] Memory required for data: 525825500\nI0817 16:12:35.342595 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:12:35.342603 17316 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:12:35.342612 17316 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:12:35.342620 17316 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.342629 17316 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:12:35.342636 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.342641 17316 net.cpp:165] Memory required for data: 534017500\nI0817 16:12:35.342645 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:12:35.342659 17316 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:12:35.342665 17316 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:12:35.342676 17316 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:12:35.343037 17316 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:12:35.343051 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.343056 17316 net.cpp:165] Memory required for data: 542209500\nI0817 16:12:35.343072 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:12:35.343086 17316 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:12:35.343092 17316 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:12:35.343101 17316 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:12:35.343464 17316 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:12:35.343484 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.343494 17316 net.cpp:165] Memory required for data: 550401500\nI0817 16:12:35.343513 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:12:35.343526 17316 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:12:35.343538 17316 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:12:35.343551 17316 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:12:35.343614 17316 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:12:35.343775 17316 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:12:35.343788 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.343793 17316 net.cpp:165] Memory required for data: 558593500\nI0817 16:12:35.343802 17316 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:12:35.343823 17316 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:12:35.343830 17316 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:12:35.343837 17316 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:12:35.343845 17316 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:12:35.343883 17316 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:12:35.343894 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.343897 17316 net.cpp:165] Memory required for data: 566785500\nI0817 16:12:35.343910 17316 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:12:35.343919 17316 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:12:35.343925 17316 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:12:35.343932 17316 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:12:35.343941 17316 net.cpp:150] Setting up L1_b6_relu\nI0817 16:12:35.343950 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.343953 17316 net.cpp:165] Memory required for data: 574977500\nI0817 16:12:35.343958 17316 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:35.343966 17316 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:35.343971 17316 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:12:35.343981 17316 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:12:35.343991 17316 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:12:35.344038 17316 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:35.344048 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.344053 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.344058 17316 net.cpp:165] Memory required for data: 591361500\nI0817 16:12:35.344069 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:12:35.344084 17316 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:12:35.344090 17316 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:12:35.344099 17316 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:12:35.344455 17316 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:12:35.344470 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.344475 17316 net.cpp:165] Memory required for data: 599553500\nI0817 16:12:35.344483 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:12:35.344492 17316 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:12:35.344501 17316 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:12:35.344509 17316 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:12:35.344780 17316 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:12:35.344794 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.344799 17316 net.cpp:165] Memory required for data: 607745500\nI0817 16:12:35.344810 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:12:35.344817 17316 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:12:35.344823 17316 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:12:35.344835 17316 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.344893 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:12:35.345051 17316 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:12:35.345072 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.345077 17316 net.cpp:165] Memory required for data: 615937500\nI0817 16:12:35.345086 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:12:35.345094 17316 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:12:35.345100 17316 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:12:35.345108 17316 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.345118 17316 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:12:35.345124 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.345129 17316 net.cpp:165] Memory required for data: 624129500\nI0817 16:12:35.345134 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:12:35.345147 17316 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:12:35.345154 17316 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:12:35.345165 17316 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:12:35.345535 17316 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:12:35.345548 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.345559 17316 net.cpp:165] Memory required for data: 632321500\nI0817 16:12:35.345569 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:12:35.345582 17316 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:12:35.345588 17316 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:12:35.345598 17316 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:12:35.345867 17316 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:12:35.345880 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.345885 17316 net.cpp:165] Memory required for data: 640513500\nI0817 16:12:35.345896 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:12:35.345903 17316 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:12:35.345909 17316 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:12:35.345917 17316 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:12:35.345978 17316 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:12:35.346143 17316 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:12:35.346156 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.346163 17316 net.cpp:165] Memory required for data: 648705500\nI0817 16:12:35.346171 17316 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:12:35.346184 17316 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:12:35.346189 17316 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:12:35.346196 17316 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:12:35.346204 17316 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:12:35.346241 17316 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:12:35.346253 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.346258 17316 net.cpp:165] Memory required for data: 656897500\nI0817 16:12:35.346263 17316 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:12:35.346271 17316 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:12:35.346277 17316 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:12:35.346287 17316 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:12:35.346297 17316 net.cpp:150] Setting up L1_b7_relu\nI0817 16:12:35.346303 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.346308 17316 net.cpp:165] Memory required for data: 665089500\nI0817 16:12:35.346313 17316 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:35.346319 17316 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:35.346325 17316 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:12:35.346335 17316 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:12:35.346345 17316 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:12:35.346392 17316 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:35.346403 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.346410 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.346415 17316 net.cpp:165] Memory required for data: 681473500\nI0817 16:12:35.346421 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:12:35.346434 17316 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:12:35.346441 17316 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:12:35.346449 17316 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:12:35.346803 17316 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:12:35.346818 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.346823 17316 net.cpp:165] Memory required for data: 689665500\nI0817 16:12:35.346832 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:12:35.346843 17316 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:12:35.346850 17316 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:12:35.346868 17316 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:12:35.347154 17316 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:12:35.347168 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.347173 17316 net.cpp:165] Memory required for data: 697857500\nI0817 16:12:35.347183 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:12:35.347192 17316 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:12:35.347198 17316 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:12:35.347205 17316 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.347267 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:12:35.347453 17316 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:12:35.347467 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.347473 17316 net.cpp:165] Memory required for data: 706049500\nI0817 16:12:35.347482 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:12:35.347493 17316 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:12:35.347499 17316 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:12:35.347506 17316 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.347517 17316 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:12:35.347523 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.347528 17316 net.cpp:165] Memory required for data: 714241500\nI0817 16:12:35.347533 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:12:35.347548 17316 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:12:35.347553 17316 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:12:35.347564 17316 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:12:35.347923 17316 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:12:35.347937 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.347942 17316 net.cpp:165] Memory required for data: 722433500\nI0817 16:12:35.347950 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:12:35.347964 17316 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:12:35.347970 17316 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:12:35.347980 17316 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:12:35.348258 17316 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:12:35.348273 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.348278 17316 net.cpp:165] Memory required for data: 730625500\nI0817 16:12:35.348287 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:12:35.348296 17316 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:12:35.348302 17316 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:12:35.348310 17316 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:12:35.348371 17316 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:12:35.348527 17316 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:12:35.348541 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.348546 17316 net.cpp:165] Memory required for data: 738817500\nI0817 16:12:35.348556 17316 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:12:35.348563 17316 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:12:35.348575 17316 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:12:35.348582 17316 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:12:35.348590 17316 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:12:35.348624 17316 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:12:35.348636 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.348641 17316 net.cpp:165] Memory required for data: 747009500\nI0817 16:12:35.348646 17316 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:12:35.348657 17316 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:12:35.348664 17316 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:12:35.348672 17316 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:12:35.348690 17316 net.cpp:150] Setting up L1_b8_relu\nI0817 16:12:35.348696 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.348701 17316 net.cpp:165] Memory required for data: 755201500\nI0817 16:12:35.348706 17316 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:35.348712 17316 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:35.348717 17316 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:12:35.348726 17316 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:12:35.348734 17316 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:12:35.348784 17316 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:35.348796 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.348803 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.348808 17316 net.cpp:165] Memory required for data: 771585500\nI0817 16:12:35.348812 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:12:35.348826 17316 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:12:35.348832 17316 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:12:35.348841 17316 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:12:35.349212 17316 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:12:35.349230 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.349236 17316 net.cpp:165] Memory required for data: 779777500\nI0817 16:12:35.349244 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:12:35.349253 17316 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:12:35.349259 17316 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:12:35.349267 17316 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:12:35.349555 17316 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:12:35.349570 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.349575 17316 net.cpp:165] Memory required for data: 787969500\nI0817 16:12:35.349586 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:12:35.349596 17316 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:12:35.349603 17316 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:12:35.349611 17316 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.349673 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:12:35.349833 17316 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:12:35.349848 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.349853 17316 net.cpp:165] Memory required for data: 796161500\nI0817 16:12:35.349861 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:12:35.349869 17316 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:12:35.349875 17316 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:12:35.349887 17316 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.349897 17316 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:12:35.349905 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.349910 17316 net.cpp:165] Memory required for data: 804353500\nI0817 16:12:35.349915 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:12:35.349927 17316 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:12:35.349933 17316 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:12:35.349941 17316 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:12:35.350301 17316 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:12:35.350314 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.350319 17316 net.cpp:165] Memory required for data: 812545500\nI0817 16:12:35.350328 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:12:35.350342 17316 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:12:35.350349 17316 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:12:35.350364 17316 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:12:35.350644 17316 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:12:35.350658 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.350663 17316 net.cpp:165] Memory required for data: 820737500\nI0817 16:12:35.350697 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:12:35.350709 17316 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:12:35.350716 17316 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:12:35.350728 17316 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:12:35.350785 17316 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:12:35.350944 17316 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:12:35.350956 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.350961 17316 net.cpp:165] Memory required for data: 828929500\nI0817 16:12:35.350970 17316 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:12:35.350978 17316 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:12:35.350988 17316 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:12:35.350999 17316 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:12:35.351008 17316 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:12:35.351047 17316 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:12:35.351058 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.351063 17316 net.cpp:165] Memory required for data: 837121500\nI0817 16:12:35.351075 17316 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:12:35.351083 17316 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:12:35.351089 17316 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:12:35.351099 17316 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:12:35.351109 17316 net.cpp:150] Setting up L1_b9_relu\nI0817 16:12:35.351116 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.351120 17316 net.cpp:165] Memory required for data: 845313500\nI0817 16:12:35.351125 17316 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:35.351132 17316 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:35.351137 17316 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:12:35.351147 17316 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:12:35.351157 17316 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:12:35.351205 17316 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:35.351217 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.351223 17316 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:35.351227 17316 net.cpp:165] Memory required for data: 861697500\nI0817 16:12:35.351233 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:12:35.351246 17316 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:12:35.351253 17316 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:12:35.351261 17316 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:12:35.351624 17316 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:12:35.351639 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.351644 17316 net.cpp:165] Memory required for data: 863745500\nI0817 16:12:35.351652 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:12:35.351663 17316 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:12:35.351670 17316 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:12:35.351678 17316 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:12:35.351948 17316 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:12:35.351960 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.351965 17316 net.cpp:165] Memory required for data: 865793500\nI0817 16:12:35.351982 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:12:35.351990 17316 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:12:35.351997 17316 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:12:35.352007 17316 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.352073 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:12:35.352234 17316 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:12:35.352247 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.352252 17316 net.cpp:165] Memory required for data: 867841500\nI0817 16:12:35.352262 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:12:35.352268 17316 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:12:35.352274 17316 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:12:35.352283 17316 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.352294 17316 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:12:35.352301 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.352306 17316 net.cpp:165] Memory required for data: 869889500\nI0817 16:12:35.352311 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:12:35.352321 17316 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:12:35.352330 17316 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:12:35.352339 17316 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:12:35.352689 17316 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:12:35.352704 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.352708 17316 net.cpp:165] Memory required for data: 871937500\nI0817 16:12:35.352717 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:12:35.352728 17316 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:12:35.352735 17316 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:12:35.352746 17316 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:12:35.353008 17316 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:12:35.353020 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.353025 17316 net.cpp:165] Memory required for data: 873985500\nI0817 16:12:35.353035 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:12:35.353044 17316 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:12:35.353050 17316 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:12:35.353057 17316 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:12:35.353124 17316 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:12:35.353293 17316 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:12:35.353312 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.353317 17316 net.cpp:165] Memory required for data: 876033500\nI0817 16:12:35.353327 17316 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:12:35.353337 17316 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:12:35.353343 17316 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:12:35.353350 17316 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:12:35.353385 17316 net.cpp:150] Setting up L2_b1_pool\nI0817 16:12:35.353397 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.353402 17316 net.cpp:165] Memory required for data: 878081500\nI0817 16:12:35.353407 17316 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:12:35.353416 17316 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:12:35.353422 17316 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:12:35.353428 17316 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:12:35.353440 17316 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:12:35.353473 17316 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:12:35.353482 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.353487 17316 net.cpp:165] Memory required for data: 880129500\nI0817 16:12:35.353492 17316 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:12:35.353503 17316 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:12:35.353515 17316 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:12:35.353523 17316 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:12:35.353533 17316 net.cpp:150] Setting up L2_b1_relu\nI0817 16:12:35.353540 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.353544 17316 net.cpp:165] Memory required for data: 882177500\nI0817 16:12:35.353549 17316 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:12:35.353559 17316 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:12:35.353569 17316 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:12:35.355829 17316 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:12:35.355849 17316 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:35.355854 17316 net.cpp:165] Memory required for data: 884225500\nI0817 16:12:35.355859 17316 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:12:35.355870 17316 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:12:35.355875 17316 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:12:35.355883 17316 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:12:35.355895 17316 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:12:35.355937 17316 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:12:35.355952 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.355958 17316 net.cpp:165] Memory required for data: 888321500\nI0817 16:12:35.355963 17316 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:35.355971 17316 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:35.355976 17316 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:12:35.355988 17316 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:12:35.355998 17316 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:12:35.356046 17316 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:35.356061 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.356075 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.356079 17316 net.cpp:165] Memory required for data: 896513500\nI0817 16:12:35.356084 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:12:35.356096 17316 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:12:35.356103 17316 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:12:35.356115 17316 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:12:35.356622 17316 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:12:35.356637 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.356642 17316 net.cpp:165] Memory required for data: 900609500\nI0817 16:12:35.356652 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:12:35.356663 17316 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:12:35.356669 17316 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:12:35.356680 17316 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:12:35.356945 17316 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:12:35.356961 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.356967 17316 net.cpp:165] Memory required for data: 904705500\nI0817 16:12:35.356977 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:12:35.356987 17316 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:12:35.356993 17316 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:12:35.357002 17316 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.357060 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:12:35.357228 17316 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:12:35.357241 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.357246 17316 net.cpp:165] Memory required for data: 908801500\nI0817 16:12:35.357255 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:12:35.357264 17316 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:12:35.357277 17316 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:12:35.357288 17316 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.357298 17316 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:12:35.357306 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.357311 17316 net.cpp:165] Memory required for data: 912897500\nI0817 16:12:35.357316 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:12:35.357328 17316 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:12:35.357336 17316 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:12:35.357343 17316 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:12:35.357836 17316 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:12:35.357851 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.357856 17316 net.cpp:165] Memory required for data: 916993500\nI0817 16:12:35.357866 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:12:35.357877 17316 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:12:35.357884 17316 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:12:35.357892 17316 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:12:35.358162 17316 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:12:35.358176 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.358181 17316 net.cpp:165] Memory required for data: 921089500\nI0817 16:12:35.358191 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:12:35.358203 17316 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:12:35.358209 17316 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:12:35.358217 17316 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:12:35.358275 17316 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:12:35.358433 17316 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:12:35.358446 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.358451 17316 net.cpp:165] Memory required for data: 925185500\nI0817 16:12:35.358460 17316 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:12:35.358471 17316 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:12:35.358479 17316 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:12:35.358485 17316 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:12:35.358495 17316 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:12:35.358525 17316 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:12:35.358533 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.358537 17316 net.cpp:165] Memory required for data: 929281500\nI0817 16:12:35.358543 17316 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:12:35.358551 17316 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:12:35.358556 17316 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:12:35.358566 17316 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:12:35.358577 17316 net.cpp:150] Setting up L2_b2_relu\nI0817 16:12:35.358583 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.358588 17316 net.cpp:165] Memory required for data: 933377500\nI0817 16:12:35.358592 17316 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:35.358599 17316 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:35.358604 17316 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:12:35.358613 17316 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:12:35.358621 17316 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:12:35.358672 17316 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:35.358685 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.358691 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.358696 17316 net.cpp:165] Memory required for data: 941569500\nI0817 16:12:35.358707 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:12:35.358722 17316 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:12:35.358728 17316 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:12:35.358738 17316 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:12:35.359244 17316 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:12:35.359259 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.359264 17316 net.cpp:165] Memory required for data: 945665500\nI0817 16:12:35.359273 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:12:35.359287 17316 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:12:35.359293 17316 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:12:35.359302 17316 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:12:35.359568 17316 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:12:35.359585 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.359589 17316 net.cpp:165] Memory required for data: 949761500\nI0817 16:12:35.359599 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:12:35.359608 17316 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:12:35.359614 17316 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:12:35.359622 17316 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.359679 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:12:35.359841 17316 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:12:35.359855 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.359860 17316 net.cpp:165] Memory required for data: 953857500\nI0817 16:12:35.359869 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:12:35.359876 17316 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:12:35.359882 17316 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:12:35.359894 17316 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.359902 17316 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:12:35.359910 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.359915 17316 net.cpp:165] Memory required for data: 957953500\nI0817 16:12:35.359920 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:12:35.359935 17316 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:12:35.359941 17316 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:12:35.359949 17316 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:12:35.360440 17316 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:12:35.360455 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.360460 17316 net.cpp:165] Memory required for data: 962049500\nI0817 16:12:35.360469 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:12:35.360478 17316 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:12:35.360484 17316 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:12:35.360496 17316 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:12:35.360793 17316 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:12:35.360807 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.360812 17316 net.cpp:165] Memory required for data: 966145500\nI0817 16:12:35.360823 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:12:35.360837 17316 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:12:35.360844 17316 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:12:35.360852 17316 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:12:35.360909 17316 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:12:35.361073 17316 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:12:35.361086 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.361091 17316 net.cpp:165] Memory required for data: 970241500\nI0817 16:12:35.361104 17316 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:12:35.361115 17316 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:12:35.361129 17316 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:12:35.361136 17316 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:12:35.361146 17316 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:12:35.361176 17316 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:12:35.361186 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.361191 17316 net.cpp:165] Memory required for data: 974337500\nI0817 16:12:35.361199 17316 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:12:35.361222 17316 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:12:35.361227 17316 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:12:35.361235 17316 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:12:35.361244 17316 net.cpp:150] Setting up L2_b3_relu\nI0817 16:12:35.361251 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.361256 17316 net.cpp:165] Memory required for data: 978433500\nI0817 16:12:35.361261 17316 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:35.361271 17316 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:35.361277 17316 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:12:35.361284 17316 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:12:35.361294 17316 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:12:35.361346 17316 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:35.361358 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.361366 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.361371 17316 net.cpp:165] Memory required for data: 986625500\nI0817 16:12:35.361376 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:12:35.361387 17316 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:12:35.361392 17316 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:12:35.361404 17316 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:12:35.361922 17316 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:12:35.361937 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.361943 17316 net.cpp:165] Memory required for data: 990721500\nI0817 16:12:35.361951 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:12:35.361960 17316 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:12:35.361966 17316 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:12:35.361981 17316 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:12:35.362260 17316 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:12:35.362274 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.362279 17316 net.cpp:165] Memory required for data: 994817500\nI0817 16:12:35.362290 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:12:35.362303 17316 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:12:35.362309 17316 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:12:35.362318 17316 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.362375 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:12:35.362537 17316 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:12:35.362550 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.362555 17316 net.cpp:165] Memory required for data: 998913500\nI0817 16:12:35.362565 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:12:35.362572 17316 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:12:35.362578 17316 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:12:35.362588 17316 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.362598 17316 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:12:35.362606 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.362610 17316 net.cpp:165] Memory required for data: 1003009500\nI0817 16:12:35.362623 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:12:35.362637 17316 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:12:35.362644 17316 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:12:35.362655 17316 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:12:35.363149 17316 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:12:35.363163 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.363169 17316 net.cpp:165] Memory required for data: 1007105500\nI0817 16:12:35.363178 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:12:35.363188 17316 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:12:35.363193 17316 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:12:35.363201 17316 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:12:35.363467 17316 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:12:35.363481 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.363484 17316 net.cpp:165] Memory required for data: 1011201500\nI0817 16:12:35.363495 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:12:35.363503 17316 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:12:35.363509 17316 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:12:35.363523 17316 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:12:35.363579 17316 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:12:35.363740 17316 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:12:35.363754 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.363759 17316 net.cpp:165] Memory required for data: 1015297500\nI0817 16:12:35.363766 17316 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:12:35.363775 17316 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:12:35.363781 17316 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:12:35.363788 17316 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:12:35.363801 17316 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:12:35.363831 17316 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:12:35.363843 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.363848 17316 net.cpp:165] Memory required for data: 1019393500\nI0817 16:12:35.363853 17316 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:12:35.363862 17316 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:12:35.363867 17316 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:12:35.363874 17316 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:12:35.363883 17316 net.cpp:150] Setting up L2_b4_relu\nI0817 16:12:35.363890 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.363894 17316 net.cpp:165] Memory required for data: 1023489500\nI0817 16:12:35.363899 17316 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:35.363909 17316 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:35.363914 17316 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:12:35.363922 17316 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:12:35.363931 17316 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:12:35.363978 17316 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:35.363993 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.364001 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.364004 17316 net.cpp:165] Memory required for data: 1031681500\nI0817 16:12:35.364009 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:12:35.364020 17316 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:12:35.364027 17316 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:12:35.364035 17316 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:12:35.364538 17316 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:12:35.364559 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.364564 17316 net.cpp:165] Memory required for data: 1035777500\nI0817 16:12:35.364573 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:12:35.364583 17316 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:12:35.364588 17316 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:12:35.364599 17316 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:12:35.364866 17316 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:12:35.364879 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.364884 17316 net.cpp:165] Memory required for data: 1039873500\nI0817 16:12:35.364894 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:12:35.364907 17316 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:12:35.364912 17316 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:12:35.364920 17316 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.364975 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:12:35.365140 17316 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:12:35.365154 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.365159 17316 net.cpp:165] Memory required for data: 1043969500\nI0817 16:12:35.365167 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:12:35.365175 17316 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:12:35.365181 17316 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:12:35.365192 17316 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.365202 17316 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:12:35.365209 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.365213 17316 net.cpp:165] Memory required for data: 1048065500\nI0817 16:12:35.365218 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:12:35.365231 17316 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:12:35.365238 17316 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:12:35.365247 17316 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:12:35.365731 17316 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:12:35.365746 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.365751 17316 net.cpp:165] Memory required for data: 1052161500\nI0817 16:12:35.365759 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:12:35.365772 17316 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:12:35.365778 17316 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:12:35.365787 17316 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:12:35.366056 17316 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:12:35.366075 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.366080 17316 net.cpp:165] Memory required for data: 1056257500\nI0817 16:12:35.366091 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:12:35.366101 17316 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:12:35.366106 17316 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:12:35.366117 17316 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:12:35.366176 17316 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:12:35.366338 17316 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:12:35.366351 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.366356 17316 net.cpp:165] Memory required for data: 1060353500\nI0817 16:12:35.366365 17316 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:12:35.366374 17316 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:12:35.366380 17316 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:12:35.366387 17316 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:12:35.366397 17316 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:12:35.366425 17316 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:12:35.366441 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.366446 17316 net.cpp:165] Memory required for data: 1064449500\nI0817 16:12:35.366452 17316 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:12:35.366462 17316 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:12:35.366469 17316 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:12:35.366477 17316 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:12:35.366485 17316 net.cpp:150] Setting up L2_b5_relu\nI0817 16:12:35.366492 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.366497 17316 net.cpp:165] Memory required for data: 1068545500\nI0817 16:12:35.366503 17316 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:35.366511 17316 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:35.366518 17316 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:12:35.366524 17316 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:12:35.366534 17316 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:12:35.366583 17316 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:35.366597 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.366605 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.366610 17316 net.cpp:165] Memory required for data: 1076737500\nI0817 16:12:35.366614 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:12:35.366626 17316 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:12:35.366632 17316 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:12:35.366641 17316 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:12:35.368149 17316 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:12:35.368165 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.368171 17316 net.cpp:165] Memory required for data: 1080833500\nI0817 16:12:35.368180 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:12:35.368193 17316 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:12:35.368201 17316 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:12:35.368211 17316 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:12:35.368486 17316 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:12:35.368499 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.368505 17316 net.cpp:165] Memory required for data: 1084929500\nI0817 16:12:35.368515 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:12:35.368525 17316 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:12:35.368530 17316 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:12:35.368537 17316 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.368599 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:12:35.368759 17316 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:12:35.368772 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.368777 17316 net.cpp:165] Memory required for data: 1089025500\nI0817 16:12:35.368788 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:12:35.368798 17316 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:12:35.368804 17316 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:12:35.368813 17316 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.368821 17316 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:12:35.368829 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.368834 17316 net.cpp:165] Memory required for data: 1093121500\nI0817 16:12:35.368839 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:12:35.368852 17316 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:12:35.368858 17316 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:12:35.368867 17316 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:12:35.369374 17316 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:12:35.369388 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.369393 17316 net.cpp:165] Memory required for data: 1097217500\nI0817 16:12:35.369402 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:12:35.369415 17316 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:12:35.369421 17316 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:12:35.369429 17316 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:12:35.369699 17316 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:12:35.369714 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.369719 17316 net.cpp:165] Memory required for data: 1101313500\nI0817 16:12:35.369730 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:12:35.369740 17316 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:12:35.369745 17316 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:12:35.369753 17316 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:12:35.369810 17316 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:12:35.369972 17316 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:12:35.369985 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.369990 17316 net.cpp:165] Memory required for data: 1105409500\nI0817 16:12:35.369999 17316 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:12:35.370008 17316 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:12:35.370014 17316 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:12:35.370021 17316 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:12:35.370033 17316 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:12:35.370060 17316 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:12:35.370079 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.370085 17316 net.cpp:165] Memory required for data: 1109505500\nI0817 16:12:35.370090 17316 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:12:35.370103 17316 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:12:35.370110 17316 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:12:35.370117 17316 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:12:35.370126 17316 net.cpp:150] Setting up L2_b6_relu\nI0817 16:12:35.370134 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.370138 17316 net.cpp:165] Memory required for data: 1113601500\nI0817 16:12:35.370143 17316 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:35.370151 17316 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:35.370157 17316 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:12:35.370163 17316 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:12:35.370172 17316 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:12:35.370226 17316 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:35.370239 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.370245 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.370250 17316 net.cpp:165] Memory required for data: 1121793500\nI0817 16:12:35.370255 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:12:35.370270 17316 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:12:35.370276 17316 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:12:35.370286 17316 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:12:35.370779 17316 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:12:35.370792 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.370797 17316 net.cpp:165] Memory required for data: 1125889500\nI0817 16:12:35.370805 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:12:35.370824 17316 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:12:35.370832 17316 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:12:35.370843 17316 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:12:35.371119 17316 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:12:35.371131 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.371136 17316 net.cpp:165] Memory required for data: 1129985500\nI0817 16:12:35.371147 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:12:35.371155 17316 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:12:35.371161 17316 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:12:35.371170 17316 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.371232 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:12:35.371388 17316 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:12:35.371402 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.371407 17316 net.cpp:165] Memory required for data: 1134081500\nI0817 16:12:35.371417 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:12:35.371424 17316 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:12:35.371430 17316 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:12:35.371440 17316 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.371450 17316 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:12:35.371457 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.371462 17316 net.cpp:165] Memory required for data: 1138177500\nI0817 16:12:35.371467 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:12:35.371481 17316 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:12:35.371487 17316 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:12:35.371496 17316 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:12:35.371980 17316 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:12:35.371994 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.371999 17316 net.cpp:165] Memory required for data: 1142273500\nI0817 16:12:35.372009 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:12:35.372020 17316 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:12:35.372027 17316 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:12:35.372035 17316 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:12:35.372314 17316 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:12:35.372330 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.372335 17316 net.cpp:165] Memory required for data: 1146369500\nI0817 16:12:35.372346 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:12:35.372355 17316 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:12:35.372361 17316 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:12:35.372370 17316 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:12:35.372426 17316 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:12:35.372583 17316 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:12:35.372596 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.372601 17316 net.cpp:165] Memory required for data: 1150465500\nI0817 16:12:35.372609 17316 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:12:35.372618 17316 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:12:35.372624 17316 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:12:35.372632 17316 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:12:35.372643 17316 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:12:35.372673 17316 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:12:35.372681 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.372686 17316 net.cpp:165] Memory required for data: 1154561500\nI0817 16:12:35.372691 17316 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:12:35.372699 17316 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:12:35.372712 17316 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:12:35.372722 17316 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:12:35.372732 17316 net.cpp:150] Setting up L2_b7_relu\nI0817 16:12:35.372740 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.372745 17316 net.cpp:165] Memory required for data: 1158657500\nI0817 16:12:35.372750 17316 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:35.372756 17316 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:35.372761 17316 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:12:35.372769 17316 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:12:35.372778 17316 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:12:35.372829 17316 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:35.372841 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.372848 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.372853 17316 net.cpp:165] Memory required for data: 1166849500\nI0817 16:12:35.372858 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:12:35.372872 17316 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:12:35.372879 17316 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:12:35.372887 17316 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:12:35.373379 17316 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:12:35.373394 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.373399 17316 net.cpp:165] Memory required for data: 1170945500\nI0817 16:12:35.373407 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:12:35.373420 17316 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:12:35.373426 17316 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:12:35.373435 17316 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:12:35.373708 17316 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:12:35.373723 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.373728 17316 net.cpp:165] Memory required for data: 1175041500\nI0817 16:12:35.373739 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:12:35.373747 17316 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:12:35.373754 17316 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:12:35.373761 17316 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.373819 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:12:35.373980 17316 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:12:35.373993 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.373998 17316 net.cpp:165] Memory required for data: 1179137500\nI0817 16:12:35.374006 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:12:35.374014 17316 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:12:35.374022 17316 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:12:35.374032 17316 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.374042 17316 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:12:35.374048 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.374053 17316 net.cpp:165] Memory required for data: 1183233500\nI0817 16:12:35.374058 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:12:35.374078 17316 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:12:35.374084 17316 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:12:35.374094 17316 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:12:35.374603 17316 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:12:35.374619 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.374624 17316 net.cpp:165] Memory required for data: 1187329500\nI0817 16:12:35.374632 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:12:35.374652 17316 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:12:35.374660 17316 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:12:35.374668 17316 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:12:35.374938 17316 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:12:35.374950 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.374955 17316 net.cpp:165] Memory required for data: 1191425500\nI0817 16:12:35.374965 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:12:35.374977 17316 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:12:35.374984 17316 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:12:35.374991 17316 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:12:35.375049 17316 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:12:35.375214 17316 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:12:35.375228 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.375233 17316 net.cpp:165] Memory required for data: 1195521500\nI0817 16:12:35.375242 17316 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:12:35.375254 17316 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:12:35.375262 17316 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:12:35.375268 17316 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:12:35.375278 17316 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:12:35.375308 17316 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:12:35.375316 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.375321 17316 net.cpp:165] Memory required for data: 1199617500\nI0817 16:12:35.375326 17316 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:12:35.375334 17316 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:12:35.375340 17316 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:12:35.375350 17316 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:12:35.375360 17316 net.cpp:150] Setting up L2_b8_relu\nI0817 16:12:35.375367 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.375371 17316 net.cpp:165] Memory required for data: 1203713500\nI0817 16:12:35.375376 17316 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:35.375383 17316 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:35.375389 17316 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:12:35.375396 17316 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:12:35.375419 17316 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:12:35.375476 17316 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:35.375489 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.375496 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.375501 17316 net.cpp:165] Memory required for data: 1211905500\nI0817 16:12:35.375506 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:12:35.375517 17316 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:12:35.375524 17316 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:12:35.375536 17316 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:12:35.376031 17316 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:12:35.376045 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.376050 17316 net.cpp:165] Memory required for data: 1216001500\nI0817 16:12:35.376060 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:12:35.376075 17316 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:12:35.376081 17316 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:12:35.376093 17316 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:12:35.376390 17316 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:12:35.376410 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.376415 17316 net.cpp:165] Memory required for data: 1220097500\nI0817 16:12:35.376427 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:12:35.376440 17316 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:12:35.376446 17316 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:12:35.376453 17316 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.376513 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:12:35.376672 17316 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:12:35.376684 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.376689 17316 net.cpp:165] Memory required for data: 1224193500\nI0817 16:12:35.376699 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:12:35.376709 17316 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:12:35.376716 17316 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:12:35.376724 17316 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.376734 17316 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:12:35.376740 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.376744 17316 net.cpp:165] Memory required for data: 1228289500\nI0817 16:12:35.376749 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:12:35.376763 17316 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:12:35.376770 17316 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:12:35.376781 17316 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:12:35.378268 17316 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:12:35.378285 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.378291 17316 net.cpp:165] Memory required for data: 1232385500\nI0817 16:12:35.378301 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:12:35.378314 17316 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:12:35.378321 17316 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:12:35.378334 17316 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:12:35.378599 17316 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:12:35.378615 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.378620 17316 net.cpp:165] Memory required for data: 1236481500\nI0817 16:12:35.378669 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:12:35.378684 17316 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:12:35.378691 17316 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:12:35.378700 17316 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:12:35.378758 17316 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:12:35.378911 17316 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:12:35.378923 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.378928 17316 net.cpp:165] Memory required for data: 1240577500\nI0817 16:12:35.378937 17316 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:12:35.378947 17316 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:12:35.378952 17316 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:12:35.378960 17316 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:12:35.378971 17316 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:12:35.378999 17316 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:12:35.379011 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.379016 17316 net.cpp:165] Memory required for data: 1244673500\nI0817 16:12:35.379022 17316 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:12:35.379030 17316 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:12:35.379035 17316 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:12:35.379042 17316 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:12:35.379052 17316 net.cpp:150] Setting up L2_b9_relu\nI0817 16:12:35.379060 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.379077 17316 net.cpp:165] Memory required for data: 1248769500\nI0817 16:12:35.379083 17316 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:35.379094 17316 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:35.379101 17316 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:12:35.379108 17316 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:12:35.379118 17316 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:12:35.379173 17316 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:35.379184 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.379191 17316 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:35.379195 17316 net.cpp:165] Memory required for data: 1256961500\nI0817 16:12:35.379201 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:12:35.379214 17316 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:12:35.379220 17316 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:12:35.379231 17316 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:12:35.379731 17316 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:12:35.379746 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.379751 17316 net.cpp:165] Memory required for data: 1257985500\nI0817 16:12:35.379760 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:12:35.379770 17316 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:12:35.379776 17316 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:12:35.379787 17316 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:12:35.380059 17316 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:12:35.380081 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.380087 17316 net.cpp:165] Memory required for data: 1259009500\nI0817 16:12:35.380098 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:12:35.380107 17316 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:12:35.380113 17316 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:12:35.380121 17316 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.380179 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:12:35.380343 17316 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:12:35.380357 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.380362 17316 net.cpp:165] Memory required for data: 1260033500\nI0817 16:12:35.380370 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:12:35.380378 17316 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:12:35.380384 17316 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:12:35.380395 17316 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:12:35.380405 17316 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:12:35.380412 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.380417 17316 net.cpp:165] Memory required for data: 1261057500\nI0817 16:12:35.380421 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:12:35.380435 17316 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:12:35.380441 17316 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:12:35.380450 17316 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:12:35.380944 17316 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:12:35.380959 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.380964 17316 net.cpp:165] Memory required for data: 1262081500\nI0817 16:12:35.380972 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:12:35.380985 17316 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:12:35.380991 17316 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:12:35.381002 17316 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:12:35.381283 17316 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:12:35.381304 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.381309 17316 net.cpp:165] Memory required for data: 1263105500\nI0817 16:12:35.381320 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:12:35.381328 17316 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:12:35.381335 17316 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:12:35.381342 17316 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:12:35.381405 17316 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:12:35.381572 17316 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:12:35.381584 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.381589 17316 net.cpp:165] Memory required for data: 1264129500\nI0817 16:12:35.381598 17316 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:12:35.381608 17316 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:12:35.381614 17316 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:12:35.381626 17316 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:12:35.381664 17316 net.cpp:150] Setting up L3_b1_pool\nI0817 16:12:35.381673 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.381677 17316 net.cpp:165] Memory required for data: 1265153500\nI0817 16:12:35.381683 17316 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:12:35.381691 17316 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:12:35.381697 17316 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:12:35.381705 17316 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:12:35.381714 17316 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:12:35.381747 17316 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:12:35.381757 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.381762 17316 net.cpp:165] Memory required for data: 1266177500\nI0817 16:12:35.381767 17316 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:12:35.381773 17316 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:12:35.381779 17316 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:12:35.381786 17316 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:12:35.381795 17316 net.cpp:150] Setting up L3_b1_relu\nI0817 16:12:35.381803 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.381806 17316 net.cpp:165] Memory required for data: 1267201500\nI0817 16:12:35.381811 17316 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:12:35.381825 17316 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:12:35.381834 17316 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:12:35.383072 17316 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:12:35.383092 17316 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:35.383098 17316 net.cpp:165] Memory required for data: 1268225500\nI0817 16:12:35.383105 17316 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:12:35.383113 17316 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:12:35.383119 17316 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:12:35.383127 17316 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:12:35.383137 17316 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:12:35.383180 17316 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:12:35.383191 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.383200 17316 net.cpp:165] Memory required for data: 1270273500\nI0817 16:12:35.383205 17316 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:35.383213 17316 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:35.383219 17316 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:12:35.383226 17316 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:12:35.383236 17316 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:12:35.383292 17316 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:35.383304 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.383318 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.383323 17316 net.cpp:165] Memory required for data: 1274369500\nI0817 16:12:35.383328 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:12:35.383343 17316 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:12:35.383350 17316 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:12:35.383359 17316 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:12:35.384409 17316 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:12:35.384424 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.384429 17316 net.cpp:165] Memory required for data: 1276417500\nI0817 16:12:35.384439 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:12:35.384451 17316 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:12:35.384459 17316 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:12:35.384469 17316 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:12:35.384748 17316 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:12:35.384762 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.384766 17316 net.cpp:165] Memory required for data: 1278465500\nI0817 16:12:35.384778 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:12:35.384786 17316 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:12:35.384793 17316 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:12:35.384804 17316 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.384865 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:12:35.385027 17316 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:12:35.385040 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.385046 17316 net.cpp:165] Memory required for data: 1280513500\nI0817 16:12:35.385054 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:12:35.385068 17316 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:12:35.385076 17316 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:12:35.385087 17316 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:12:35.385097 17316 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:12:35.385103 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.385108 17316 net.cpp:165] Memory required for data: 1282561500\nI0817 16:12:35.385113 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:12:35.385128 17316 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:12:35.385133 17316 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:12:35.385145 17316 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:12:35.386190 17316 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:12:35.386205 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.386210 17316 net.cpp:165] Memory required for data: 1284609500\nI0817 16:12:35.386219 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:12:35.386229 17316 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:12:35.386235 17316 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:12:35.386246 17316 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:12:35.386518 17316 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:12:35.386533 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.386538 17316 net.cpp:165] Memory required for data: 1286657500\nI0817 16:12:35.386548 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:12:35.386559 17316 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:12:35.386564 17316 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:12:35.386572 17316 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:12:35.386631 17316 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:12:35.386797 17316 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:12:35.386809 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.386814 17316 net.cpp:165] Memory required for data: 1288705500\nI0817 16:12:35.386831 17316 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:12:35.386843 17316 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:12:35.386850 17316 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:12:35.386857 17316 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:12:35.386865 17316 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:12:35.386903 17316 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:12:35.386914 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.386919 17316 net.cpp:165] Memory required for data: 1290753500\nI0817 16:12:35.386924 17316 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:12:35.386932 17316 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:12:35.386939 17316 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:12:35.386945 17316 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:12:35.386955 17316 net.cpp:150] Setting up L3_b2_relu\nI0817 16:12:35.386961 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.386966 17316 net.cpp:165] Memory required for data: 1292801500\nI0817 16:12:35.386971 17316 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:35.386978 17316 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:35.386983 17316 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:12:35.386993 17316 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:12:35.387003 17316 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:12:35.387050 17316 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:35.387061 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.387073 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.387079 17316 net.cpp:165] Memory required for data: 1296897500\nI0817 16:12:35.387084 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:12:35.387099 17316 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:12:35.387105 17316 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:12:35.387115 17316 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:12:35.388165 17316 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:12:35.388180 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.388185 17316 net.cpp:165] Memory required for data: 1298945500\nI0817 16:12:35.388195 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:12:35.388203 17316 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:12:35.388213 17316 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:12:35.388221 17316 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:12:35.388490 17316 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:12:35.388504 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.388509 17316 net.cpp:165] Memory required for data: 1300993500\nI0817 16:12:35.388520 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:12:35.388528 17316 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:12:35.388535 17316 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:12:35.388545 17316 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.388603 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:12:35.388762 17316 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:12:35.388774 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.388779 17316 net.cpp:165] Memory required for data: 1303041500\nI0817 16:12:35.388788 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:12:35.388799 17316 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:12:35.388806 17316 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:12:35.388813 17316 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:12:35.388823 17316 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:12:35.388837 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.388842 17316 net.cpp:165] Memory required for data: 1305089500\nI0817 16:12:35.388847 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:12:35.388864 17316 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:12:35.388870 17316 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:12:35.388881 17316 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:12:35.389926 17316 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:12:35.389940 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.389946 17316 net.cpp:165] Memory required for data: 1307137500\nI0817 16:12:35.389955 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:12:35.389964 17316 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:12:35.389971 17316 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:12:35.389982 17316 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:12:35.390259 17316 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:12:35.390276 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.390282 17316 net.cpp:165] Memory required for data: 1309185500\nI0817 16:12:35.390292 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:12:35.390301 17316 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:12:35.390308 17316 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:12:35.390316 17316 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:12:35.390377 17316 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:12:35.390538 17316 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:12:35.390550 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.390555 17316 net.cpp:165] Memory required for data: 1311233500\nI0817 16:12:35.390565 17316 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:12:35.390578 17316 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:12:35.390583 17316 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:12:35.390591 17316 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:12:35.390599 17316 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:12:35.390635 17316 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:12:35.390646 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.390651 17316 net.cpp:165] Memory required for data: 1313281500\nI0817 16:12:35.390656 17316 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:12:35.390666 17316 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:12:35.390671 17316 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:12:35.390678 17316 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:12:35.390687 17316 net.cpp:150] Setting up L3_b3_relu\nI0817 16:12:35.390694 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.390699 17316 net.cpp:165] Memory required for data: 1315329500\nI0817 16:12:35.390704 17316 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:35.390712 17316 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:35.390717 17316 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:12:35.390727 17316 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:12:35.390738 17316 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:12:35.390784 17316 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:35.390794 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.390801 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.390806 17316 net.cpp:165] Memory required for data: 1319425500\nI0817 16:12:35.390811 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:12:35.390825 17316 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:12:35.390832 17316 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:12:35.390848 17316 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:12:35.391901 17316 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:12:35.391916 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.391921 17316 net.cpp:165] Memory required for data: 1321473500\nI0817 16:12:35.391930 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:12:35.391943 17316 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:12:35.391950 17316 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:12:35.391958 17316 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:12:35.392236 17316 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:12:35.392249 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.392254 17316 net.cpp:165] Memory required for data: 1323521500\nI0817 16:12:35.392264 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:12:35.392277 17316 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:12:35.392282 17316 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:12:35.392290 17316 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.392351 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:12:35.392511 17316 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:12:35.392524 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.392529 17316 net.cpp:165] Memory required for data: 1325569500\nI0817 16:12:35.392539 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:12:35.392549 17316 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:12:35.392556 17316 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:12:35.392563 17316 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:12:35.392573 17316 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:12:35.392580 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.392585 17316 net.cpp:165] Memory required for data: 1327617500\nI0817 16:12:35.392590 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:12:35.392603 17316 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:12:35.392611 17316 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:12:35.392621 17316 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:12:35.394644 17316 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:12:35.394661 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.394666 17316 net.cpp:165] Memory required for data: 1329665500\nI0817 16:12:35.394676 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:12:35.394690 17316 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:12:35.394696 17316 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:12:35.394706 17316 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:12:35.394979 17316 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:12:35.394991 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.394996 17316 net.cpp:165] Memory required for data: 1331713500\nI0817 16:12:35.395007 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:12:35.395016 17316 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:12:35.395023 17316 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:12:35.395033 17316 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:12:35.395100 17316 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:12:35.395263 17316 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:12:35.395277 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.395282 17316 net.cpp:165] Memory required for data: 1333761500\nI0817 16:12:35.395290 17316 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:12:35.395303 17316 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:12:35.395309 17316 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:12:35.395316 17316 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:12:35.395324 17316 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:12:35.395368 17316 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:12:35.395380 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.395383 17316 net.cpp:165] Memory required for data: 1335809500\nI0817 16:12:35.395390 17316 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:12:35.395397 17316 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:12:35.395403 17316 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:12:35.395414 17316 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:12:35.395424 17316 net.cpp:150] Setting up L3_b4_relu\nI0817 16:12:35.395431 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.395436 17316 net.cpp:165] Memory required for data: 1337857500\nI0817 16:12:35.395440 17316 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:35.395448 17316 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:35.395453 17316 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:12:35.395462 17316 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:12:35.395470 17316 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:12:35.395519 17316 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:35.395531 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.395539 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.395543 17316 net.cpp:165] Memory required for data: 1341953500\nI0817 16:12:35.395548 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:12:35.395560 17316 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:12:35.395566 17316 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:12:35.395578 17316 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:12:35.396608 17316 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:12:35.396623 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.396628 17316 net.cpp:165] Memory required for data: 1344001500\nI0817 16:12:35.396637 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:12:35.396651 17316 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:12:35.396657 17316 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:12:35.396666 17316 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:12:35.396939 17316 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:12:35.396951 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.396956 17316 net.cpp:165] Memory required for data: 1346049500\nI0817 16:12:35.396966 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:12:35.396975 17316 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:12:35.396982 17316 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:12:35.396989 17316 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.397050 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:12:35.397222 17316 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:12:35.397235 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.397240 17316 net.cpp:165] Memory required for data: 1348097500\nI0817 16:12:35.397249 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:12:35.397258 17316 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:12:35.397264 17316 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:12:35.397271 17316 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:12:35.397281 17316 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:12:35.397289 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.397292 17316 net.cpp:165] Memory required for data: 1350145500\nI0817 16:12:35.397297 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:12:35.397312 17316 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:12:35.397320 17316 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:12:35.397337 17316 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:12:35.398373 17316 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:12:35.398388 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.398393 17316 net.cpp:165] Memory required for data: 1352193500\nI0817 16:12:35.398402 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:12:35.398414 17316 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:12:35.398422 17316 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:12:35.398430 17316 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:12:35.398694 17316 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:12:35.398707 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.398712 17316 net.cpp:165] Memory required for data: 1354241500\nI0817 16:12:35.398722 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:12:35.398736 17316 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:12:35.398743 17316 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:12:35.398751 17316 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:12:35.398813 17316 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:12:35.398969 17316 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:12:35.398983 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.398988 17316 net.cpp:165] Memory required for data: 1356289500\nI0817 16:12:35.398996 17316 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:12:35.399008 17316 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:12:35.399014 17316 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:12:35.399021 17316 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:12:35.399029 17316 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:12:35.399070 17316 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:12:35.399083 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.399088 17316 net.cpp:165] Memory required for data: 1358337500\nI0817 16:12:35.399093 17316 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:12:35.399101 17316 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:12:35.399107 17316 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:12:35.399118 17316 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:12:35.399128 17316 net.cpp:150] Setting up L3_b5_relu\nI0817 16:12:35.399135 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.399140 17316 net.cpp:165] Memory required for data: 1360385500\nI0817 16:12:35.399145 17316 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:35.399152 17316 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:35.399158 17316 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:12:35.399165 17316 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:12:35.399175 17316 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:12:35.399227 17316 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:35.399240 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.399245 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.399250 17316 net.cpp:165] Memory required for data: 1364481500\nI0817 16:12:35.399255 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:12:35.399269 17316 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:12:35.399276 17316 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:12:35.399286 17316 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:12:35.400310 17316 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:12:35.400324 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.400329 17316 net.cpp:165] Memory required for data: 1366529500\nI0817 16:12:35.400346 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:12:35.400359 17316 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:12:35.400367 17316 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:12:35.400375 17316 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:12:35.400650 17316 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:12:35.400663 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.400668 17316 net.cpp:165] Memory required for data: 1368577500\nI0817 16:12:35.400679 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:12:35.400688 17316 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:12:35.400694 17316 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:12:35.400702 17316 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.400764 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:12:35.400929 17316 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:12:35.400941 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.400946 17316 net.cpp:165] Memory required for data: 1370625500\nI0817 16:12:35.400955 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:12:35.400964 17316 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:12:35.400970 17316 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:12:35.400980 17316 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:12:35.400991 17316 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:12:35.400998 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.401002 17316 net.cpp:165] Memory required for data: 1372673500\nI0817 16:12:35.401007 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:12:35.401021 17316 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:12:35.401027 17316 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:12:35.401036 17316 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:12:35.402067 17316 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:12:35.402082 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.402087 17316 net.cpp:165] Memory required for data: 1374721500\nI0817 16:12:35.402096 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:12:35.402109 17316 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:12:35.402115 17316 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:12:35.402124 17316 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:12:35.402401 17316 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:12:35.402415 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.402420 17316 net.cpp:165] Memory required for data: 1376769500\nI0817 16:12:35.402431 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:12:35.402441 17316 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:12:35.402448 17316 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:12:35.402456 17316 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:12:35.402518 17316 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:12:35.402681 17316 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:12:35.402694 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.402699 17316 net.cpp:165] Memory required for data: 1378817500\nI0817 16:12:35.402709 17316 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:12:35.402720 17316 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:12:35.402727 17316 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:12:35.402734 17316 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:12:35.402745 17316 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:12:35.402778 17316 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:12:35.402791 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.402794 17316 net.cpp:165] Memory required for data: 1380865500\nI0817 16:12:35.402801 17316 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:12:35.402812 17316 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:12:35.402824 17316 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:12:35.402832 17316 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:12:35.402842 17316 net.cpp:150] Setting up L3_b6_relu\nI0817 16:12:35.402848 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.402853 17316 net.cpp:165] Memory required for data: 1382913500\nI0817 16:12:35.402858 17316 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:35.402865 17316 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:35.402871 17316 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:12:35.402878 17316 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:12:35.402889 17316 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:12:35.402940 17316 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:35.402951 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.402958 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.402962 17316 net.cpp:165] Memory required for data: 1387009500\nI0817 16:12:35.402967 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:12:35.402982 17316 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:12:35.402988 17316 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:12:35.402998 17316 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:12:35.404021 17316 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:12:35.404036 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.404042 17316 net.cpp:165] Memory required for data: 1389057500\nI0817 16:12:35.404050 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:12:35.404067 17316 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:12:35.404075 17316 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:12:35.404088 17316 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:12:35.404357 17316 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:12:35.404371 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.404376 17316 net.cpp:165] Memory required for data: 1391105500\nI0817 16:12:35.404386 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:12:35.404394 17316 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:12:35.404402 17316 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:12:35.404408 17316 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.404471 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:12:35.404634 17316 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:12:35.404646 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.404651 17316 net.cpp:165] Memory required for data: 1393153500\nI0817 16:12:35.404660 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:12:35.404695 17316 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:12:35.404705 17316 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:12:35.404712 17316 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:12:35.404722 17316 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:12:35.404729 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.404734 17316 net.cpp:165] Memory required for data: 1395201500\nI0817 16:12:35.404739 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:12:35.404753 17316 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:12:35.404760 17316 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:12:35.404769 17316 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:12:35.405803 17316 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:12:35.405818 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.405823 17316 net.cpp:165] Memory required for data: 1397249500\nI0817 16:12:35.405833 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:12:35.405853 17316 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:12:35.405859 17316 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:12:35.405869 17316 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:12:35.406152 17316 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:12:35.406164 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.406169 17316 net.cpp:165] Memory required for data: 1399297500\nI0817 16:12:35.406180 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:12:35.406189 17316 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:12:35.406195 17316 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:12:35.406203 17316 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:12:35.406265 17316 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:12:35.406426 17316 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:12:35.406440 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.406445 17316 net.cpp:165] Memory required for data: 1401345500\nI0817 16:12:35.406453 17316 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:12:35.406462 17316 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:12:35.406468 17316 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:12:35.406476 17316 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:12:35.406486 17316 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:12:35.406520 17316 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:12:35.406535 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.406540 17316 net.cpp:165] Memory required for data: 1403393500\nI0817 16:12:35.406545 17316 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:12:35.406554 17316 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:12:35.406560 17316 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:12:35.406568 17316 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:12:35.406576 17316 net.cpp:150] Setting up L3_b7_relu\nI0817 16:12:35.406584 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.406589 17316 net.cpp:165] Memory required for data: 1405441500\nI0817 16:12:35.406594 17316 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:35.406605 17316 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:35.406610 17316 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:12:35.406618 17316 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:12:35.406628 17316 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:12:35.406677 17316 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:35.406689 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.406697 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.406700 17316 net.cpp:165] Memory required for data: 1409537500\nI0817 16:12:35.406705 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:12:35.406718 17316 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:12:35.406723 17316 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:12:35.406735 17316 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:12:35.408746 17316 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:12:35.408763 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.408769 17316 net.cpp:165] Memory required for data: 1411585500\nI0817 16:12:35.408778 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:12:35.408792 17316 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:12:35.408798 17316 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:12:35.408809 17316 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:12:35.409090 17316 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:12:35.409111 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.409116 17316 net.cpp:165] Memory required for data: 1413633500\nI0817 16:12:35.409127 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:12:35.409137 17316 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:12:35.409142 17316 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:12:35.409155 17316 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.409219 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:12:35.409382 17316 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:12:35.409395 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.409400 17316 net.cpp:165] Memory required for data: 1415681500\nI0817 16:12:35.409410 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:12:35.409418 17316 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:12:35.409425 17316 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:12:35.409437 17316 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:12:35.409447 17316 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:12:35.409456 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.409459 17316 net.cpp:165] Memory required for data: 1417729500\nI0817 16:12:35.409464 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:12:35.409478 17316 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:12:35.409485 17316 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:12:35.409493 17316 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:12:35.410522 17316 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:12:35.410537 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.410542 17316 net.cpp:165] Memory required for data: 1419777500\nI0817 16:12:35.410552 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:12:35.410563 17316 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:12:35.410570 17316 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:12:35.410578 17316 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:12:35.410853 17316 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:12:35.410866 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.410871 17316 net.cpp:165] Memory required for data: 1421825500\nI0817 16:12:35.410881 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:12:35.410892 17316 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:12:35.410899 17316 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:12:35.410909 17316 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:12:35.410967 17316 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:12:35.411136 17316 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:12:35.411149 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.411154 17316 net.cpp:165] Memory required for data: 1423873500\nI0817 16:12:35.411164 17316 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:12:35.411172 17316 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:12:35.411180 17316 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:12:35.411186 17316 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:12:35.411197 17316 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:12:35.411231 17316 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:12:35.411242 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.411247 17316 net.cpp:165] Memory required for data: 1425921500\nI0817 16:12:35.411252 17316 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:12:35.411264 17316 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:12:35.411272 17316 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:12:35.411278 17316 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:12:35.411288 17316 net.cpp:150] Setting up L3_b8_relu\nI0817 16:12:35.411294 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.411299 17316 net.cpp:165] Memory required for data: 1427969500\nI0817 16:12:35.411310 17316 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:35.411319 17316 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:35.411324 17316 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:12:35.411331 17316 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:12:35.411341 17316 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:12:35.411392 17316 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:35.411406 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.411412 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.411417 17316 net.cpp:165] Memory required for data: 1432065500\nI0817 16:12:35.411422 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:12:35.411435 17316 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:12:35.411442 17316 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:12:35.411451 17316 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:12:35.412487 17316 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:12:35.412503 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.412508 17316 net.cpp:165] Memory required for data: 1434113500\nI0817 16:12:35.412516 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:12:35.412530 17316 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:12:35.412537 17316 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:12:35.412549 17316 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:12:35.412822 17316 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:12:35.412834 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.412839 17316 net.cpp:165] Memory required for data: 1436161500\nI0817 16:12:35.412849 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:12:35.412858 17316 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:12:35.412864 17316 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:12:35.412875 17316 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.412936 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:12:35.413103 17316 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:12:35.413117 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.413122 17316 net.cpp:165] Memory required for data: 1438209500\nI0817 16:12:35.413131 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:12:35.413139 17316 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:12:35.413146 17316 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:12:35.413156 17316 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:12:35.413166 17316 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:12:35.413173 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.413177 17316 net.cpp:165] Memory required for data: 1440257500\nI0817 16:12:35.413182 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:12:35.413197 17316 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:12:35.413203 17316 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:12:35.413213 17316 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:12:35.414239 17316 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:12:35.414254 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.414259 17316 net.cpp:165] Memory required for data: 1442305500\nI0817 16:12:35.414268 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:12:35.414278 17316 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:12:35.414284 17316 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:12:35.414295 17316 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:12:35.414569 17316 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:12:35.414584 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.414599 17316 net.cpp:165] Memory required for data: 1444353500\nI0817 16:12:35.414610 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:12:35.414619 17316 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:12:35.414625 17316 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:12:35.414633 17316 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:12:35.414692 17316 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:12:35.414856 17316 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:12:35.414870 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.414875 17316 net.cpp:165] Memory required for data: 1446401500\nI0817 16:12:35.414883 17316 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:12:35.414892 17316 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:12:35.414904 17316 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:12:35.414911 17316 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:12:35.414919 17316 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:12:35.414957 17316 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:12:35.414968 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.414973 17316 net.cpp:165] Memory required for data: 1448449500\nI0817 16:12:35.414978 17316 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:12:35.414985 17316 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:12:35.414991 17316 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:12:35.414999 17316 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:12:35.415009 17316 net.cpp:150] Setting up L3_b9_relu\nI0817 16:12:35.415015 17316 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:35.415019 17316 net.cpp:165] Memory required for data: 1450497500\nI0817 16:12:35.415024 17316 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:12:35.415032 17316 net.cpp:100] Creating Layer post_pool\nI0817 16:12:35.415038 17316 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:12:35.415048 17316 net.cpp:408] post_pool -> post_pool\nI0817 16:12:35.415091 17316 net.cpp:150] Setting up post_pool\nI0817 16:12:35.415102 17316 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:12:35.415107 17316 net.cpp:165] Memory required for data: 1450529500\nI0817 16:12:35.415112 17316 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:12:35.415123 17316 net.cpp:100] Creating Layer post_FC\nI0817 16:12:35.415129 17316 net.cpp:434] post_FC <- post_pool\nI0817 16:12:35.415138 17316 net.cpp:408] post_FC -> post_FC_top\nI0817 16:12:35.415354 17316 net.cpp:150] Setting up post_FC\nI0817 16:12:35.415369 17316 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:35.415374 17316 net.cpp:165] Memory required for data: 1450579500\nI0817 16:12:35.415382 17316 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:12:35.415390 17316 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:12:35.415396 17316 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:12:35.415407 17316 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:12:35.415417 17316 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:12:35.415468 17316 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:12:35.415482 17316 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:35.415489 17316 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:35.415493 17316 net.cpp:165] Memory required for data: 1450679500\nI0817 16:12:35.415499 17316 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:12:35.415508 17316 net.cpp:100] Creating Layer accuracy\nI0817 16:12:35.415513 17316 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:12:35.415520 17316 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:12:35.415529 17316 net.cpp:408] accuracy -> accuracy\nI0817 16:12:35.415540 17316 net.cpp:150] Setting up accuracy\nI0817 16:12:35.415547 17316 net.cpp:157] Top shape: (1)\nI0817 16:12:35.415558 17316 net.cpp:165] Memory required for data: 1450679504\nI0817 16:12:35.415565 17316 layer_factory.hpp:77] Creating layer loss\nI0817 16:12:35.415572 17316 net.cpp:100] Creating Layer loss\nI0817 16:12:35.415577 17316 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:12:35.415585 17316 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:12:35.415594 17316 net.cpp:408] loss -> loss\nI0817 16:12:35.415606 17316 layer_factory.hpp:77] Creating layer loss\nI0817 16:12:35.415741 17316 net.cpp:150] Setting up loss\nI0817 16:12:35.415755 17316 net.cpp:157] Top shape: (1)\nI0817 16:12:35.415760 17316 net.cpp:160]     with loss weight 1\nI0817 16:12:35.415776 17316 net.cpp:165] Memory required for data: 1450679508\nI0817 16:12:35.415782 17316 net.cpp:226] loss needs backward computation.\nI0817 16:12:35.415788 17316 net.cpp:228] accuracy does not need backward computation.\nI0817 16:12:35.415794 17316 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:12:35.415799 17316 net.cpp:226] post_FC needs backward computation.\nI0817 16:12:35.415804 17316 net.cpp:226] post_pool needs backward computation.\nI0817 16:12:35.415809 17316 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:12:35.415814 17316 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:12:35.415820 17316 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:12:35.415825 17316 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:12:35.415830 17316 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:12:35.415835 17316 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:12:35.415840 17316 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:12:35.415844 17316 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:12:35.415849 17316 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:12:35.415855 17316 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:12:35.415860 17316 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:12:35.415865 17316 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:12:35.415870 17316 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:12:35.415875 17316 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:12:35.415880 17316 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:12:35.415886 17316 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:12:35.415890 17316 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:12:35.415895 17316 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:12:35.415900 17316 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:12:35.415906 17316 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:12:35.415911 17316 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:12:35.415916 17316 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:12:35.415922 17316 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:12:35.415927 17316 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:12:35.415932 17316 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:12:35.415937 17316 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:12:35.415942 17316 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:12:35.415947 17316 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:12:35.415956 17316 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:12:35.415961 17316 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:12:35.415966 17316 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:12:35.415972 17316 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:12:35.415977 17316 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:12:35.415982 17316 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:12:35.415994 17316 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:12:35.415999 17316 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:12:35.416004 17316 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:12:35.416009 17316 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:12:35.416014 17316 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:12:35.416020 17316 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:12:35.416025 17316 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:12:35.416030 17316 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:12:35.416036 17316 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:12:35.416041 17316 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:12:35.416046 17316 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:12:35.416051 17316 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:12:35.416056 17316 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:12:35.416061 17316 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:12:35.416074 17316 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:12:35.416079 17316 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:12:35.416085 17316 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:12:35.416090 17316 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:12:35.416095 17316 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:12:35.416100 17316 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:12:35.416106 17316 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:12:35.416111 17316 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:12:35.416116 17316 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:12:35.416121 17316 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:12:35.416126 17316 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:12:35.416132 17316 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:12:35.416137 17316 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:12:35.416142 17316 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:12:35.416148 17316 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:12:35.416153 17316 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:12:35.416158 17316 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:12:35.416163 17316 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:12:35.416168 17316 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:12:35.416173 17316 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:12:35.416178 17316 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:12:35.416184 17316 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:12:35.416189 17316 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:12:35.416194 17316 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:12:35.416200 17316 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:12:35.416205 17316 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:12:35.416211 17316 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:12:35.416216 17316 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:12:35.416221 17316 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:12:35.416226 17316 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:12:35.416231 17316 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:12:35.416236 17316 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:12:35.416245 17316 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:12:35.416257 17316 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:12:35.416262 17316 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:12:35.416268 17316 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:12:35.416275 17316 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:12:35.416280 17316 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:12:35.416285 17316 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:12:35.416290 17316 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:12:35.416296 17316 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:12:35.416301 17316 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:12:35.416306 17316 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:12:35.416311 17316 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:12:35.416316 17316 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:12:35.416322 17316 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:12:35.416327 17316 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:12:35.416332 17316 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:12:35.416338 17316 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:12:35.416343 17316 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:12:35.416348 17316 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:12:35.416354 17316 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:12:35.416359 17316 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:12:35.416364 17316 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:12:35.416370 17316 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:12:35.416376 17316 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:12:35.416381 17316 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:12:35.416388 17316 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:12:35.416393 17316 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:12:35.416399 17316 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:12:35.416404 17316 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:12:35.416409 17316 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:12:35.416414 17316 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:12:35.416419 17316 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:12:35.416424 17316 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:12:35.416430 17316 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:12:35.416435 17316 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:12:35.416441 17316 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:12:35.416446 17316 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:12:35.416451 17316 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:12:35.416457 17316 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:12:35.416462 17316 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:12:35.416467 17316 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:12:35.416472 17316 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:12:35.416478 17316 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:12:35.416483 17316 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:12:35.416489 17316 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:12:35.416496 17316 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:12:35.416501 17316 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:12:35.416505 17316 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:12:35.416515 17316 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:12:35.416522 17316 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:12:35.416527 17316 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:12:35.416532 17316 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:12:35.416538 17316 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:12:35.416543 17316 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:12:35.416548 17316 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:12:35.416555 17316 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:12:35.416560 17316 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:12:35.416566 17316 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:12:35.416571 17316 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:12:35.416576 17316 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:12:35.416582 17316 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:12:35.416587 17316 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:12:35.416592 17316 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:12:35.416599 17316 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:12:35.416604 17316 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:12:35.416615 17316 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:12:35.416620 17316 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:12:35.416625 17316 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:12:35.416631 17316 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:12:35.416636 17316 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:12:35.416641 17316 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:12:35.416647 17316 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:12:35.416652 17316 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:12:35.416658 17316 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:12:35.416664 17316 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:12:35.416671 17316 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:12:35.416676 17316 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:12:35.416682 17316 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:12:35.416687 17316 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:12:35.416692 17316 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:12:35.416697 17316 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:12:35.416703 17316 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:12:35.416709 17316 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:12:35.416714 17316 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:12:35.416720 17316 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:12:35.416726 17316 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:12:35.416731 17316 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:12:35.416738 17316 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:12:35.416743 17316 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:12:35.416749 17316 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:12:35.416754 17316 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:12:35.416759 17316 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:12:35.416764 17316 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:12:35.416769 17316 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:12:35.416776 17316 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:12:35.416786 17316 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:12:35.416792 17316 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:12:35.416798 17316 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:12:35.416805 17316 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:12:35.416810 17316 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:12:35.416815 17316 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:12:35.416821 17316 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:12:35.416826 17316 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:12:35.416832 17316 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:12:35.416837 17316 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:12:35.416843 17316 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:12:35.416848 17316 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:12:35.416854 17316 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:12:35.416860 17316 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:12:35.416867 17316 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:12:35.416872 17316 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:12:35.416877 17316 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:12:35.416882 17316 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:12:35.416888 17316 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:12:35.416893 17316 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:12:35.416899 17316 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:12:35.416905 17316 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:12:35.416910 17316 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:12:35.416918 17316 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:12:35.416923 17316 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:12:35.416929 17316 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:12:35.416934 17316 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:12:35.416939 17316 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:12:35.416945 17316 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:12:35.416951 17316 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:12:35.416956 17316 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:12:35.416962 17316 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:12:35.416968 17316 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:12:35.416975 17316 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:12:35.416980 17316 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:12:35.416985 17316 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:12:35.416991 17316 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:12:35.416997 17316 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:12:35.417002 17316 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:12:35.417008 17316 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:12:35.417013 17316 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:12:35.417019 17316 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:12:35.417026 17316 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:12:35.417032 17316 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:12:35.417037 17316 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:12:35.417043 17316 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:12:35.417048 17316 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:12:35.417054 17316 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:12:35.417069 17316 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:12:35.417076 17316 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:12:35.417083 17316 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:12:35.417088 17316 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:12:35.417094 17316 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:12:35.417101 17316 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:12:35.417107 17316 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:12:35.417112 17316 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:12:35.417119 17316 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:12:35.417124 17316 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:12:35.417129 17316 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:12:35.417135 17316 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:12:35.417141 17316 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:12:35.417147 17316 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:12:35.417153 17316 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:12:35.417160 17316 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:12:35.417165 17316 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:12:35.417171 17316 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:12:35.417177 17316 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:12:35.417183 17316 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:12:35.417188 17316 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:12:35.417194 17316 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:12:35.417201 17316 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:12:35.417207 17316 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:12:35.417212 17316 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:12:35.417218 17316 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:12:35.417223 17316 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:12:35.417229 17316 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:12:35.417235 17316 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:12:35.417240 17316 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:12:35.417246 17316 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:12:35.417253 17316 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:12:35.417258 17316 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:12:35.417264 17316 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:12:35.417269 17316 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:12:35.417276 17316 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:12:35.417281 17316 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:12:35.417287 17316 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:12:35.417294 17316 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:12:35.417299 17316 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:12:35.417304 17316 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:12:35.417310 17316 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:12:35.417320 17316 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:12:35.417325 17316 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:12:35.417331 17316 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:12:35.417338 17316 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:12:35.417345 17316 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:12:35.417356 17316 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:12:35.417363 17316 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:12:35.417369 17316 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:12:35.417374 17316 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:12:35.417381 17316 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:12:35.417387 17316 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:12:35.417392 17316 net.cpp:226] pre_relu needs backward computation.\nI0817 16:12:35.417398 17316 net.cpp:226] pre_scale needs backward computation.\nI0817 16:12:35.417403 17316 net.cpp:226] pre_bn needs backward computation.\nI0817 16:12:35.417409 17316 net.cpp:226] pre_conv needs backward computation.\nI0817 16:12:35.417415 17316 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:12:35.417423 17316 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:12:35.417428 17316 net.cpp:270] This network produces output accuracy\nI0817 16:12:35.417434 17316 net.cpp:270] This network produces output loss\nI0817 16:12:35.417763 17316 net.cpp:283] Network initialization done.\nI0817 16:12:35.418776 17316 solver.cpp:60] Solver scaffolding done.\nI0817 16:12:35.642032 17316 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:12:35.992676 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:35.992728 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:35.999651 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:36.229543 17316 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:36.229629 17316 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:36.264252 17316 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:36.264335 17316 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:36.699312 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:36.699364 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:36.707370 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:36.960682 17316 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:36.960793 17316 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:37.012042 17316 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:37.012151 17316 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:37.510960 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:37.511010 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:37.519688 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:37.789361 17316 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:37.789494 17316 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:37.859902 17316 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:37.860030 17316 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:37.943449 17316 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:12:38.410665 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:38.410718 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:38.419941 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:38.716820 17316 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:38.717011 17316 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:38.807410 17316 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:38.807590 17316 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:39.430629 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:39.430683 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:39.441151 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:39.759758 17316 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:39.759943 17316 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:39.871558 17316 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:39.871739 17316 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:40.554841 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:40.554893 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:40.566355 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:40.903260 17316 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:40.903468 17316 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:41.034996 17316 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:41.035202 17316 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:41.787928 17316 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:41.787981 17316 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:41.800245 17316 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:41.875707 17343 blocking_queue.cpp:50] Waiting for data\nI0817 16:12:41.947392 17340 blocking_queue.cpp:50] Waiting for data\nI0817 16:12:42.269703 17316 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:42.269935 17316 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:42.418059 17316 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:42.418290 17316 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:42.585450 17316 parallel.cpp:425] Starting Optimization\nI0817 16:12:42.586624 17316 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:12:42.586639 17316 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:12:42.591423 17316 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:14:02.900388 17316 solver.cpp:404]     Test net output #0: accuracy = 0.01008\nI0817 16:14:02.900678 17316 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:14:07.028239 17316 solver.cpp:228] Iteration 0, loss = 7.06628\nI0817 16:14:07.028280 17316 solver.cpp:244]     Train net output #0: accuracy = 0.008\nI0817 16:14:07.028296 17316 solver.cpp:244]     Train net output #1: loss = 7.06628 (* 1 = 7.06628 loss)\nI0817 16:14:07.028447 17316 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0817 16:16:25.893121 17316 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:17:45.582994 17316 solver.cpp:404]     Test net output #0: accuracy = 0.02244\nI0817 16:17:45.583243 17316 solver.cpp:404]     Test net output #1: loss = 4.48804 (* 1 = 4.48804 loss)\nI0817 16:17:46.878653 17316 solver.cpp:228] Iteration 100, loss = 4.34922\nI0817 16:17:46.878695 17316 solver.cpp:244]     Train net output #0: accuracy = 0.024\nI0817 16:17:46.878710 17316 solver.cpp:244]     Train net output #1: loss = 4.34922 (* 1 = 4.34922 loss)\nI0817 16:17:46.986950 17316 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0817 16:20:05.352684 17316 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:21:25.751339 17316 solver.cpp:404]     Test net output #0: accuracy = 0.07372\nI0817 16:21:25.751570 17316 solver.cpp:404]     Test net output #1: loss = 4.08298 (* 1 = 4.08298 loss)\nI0817 16:21:27.061007 17316 solver.cpp:228] Iteration 200, loss = 3.70084\nI0817 16:21:27.061045 17316 solver.cpp:244]     Train net output #0: accuracy = 0.136\nI0817 16:21:27.061062 17316 solver.cpp:244]     Train net output #1: loss = 3.70084 (* 1 = 3.70084 loss)\nI0817 16:21:27.163717 17316 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0817 16:23:45.525071 17316 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:25:05.974598 17316 solver.cpp:404]     Test net output #0: accuracy = 0.15216\nI0817 16:25:05.974812 17316 solver.cpp:404]     Test net output #1: loss = 3.68821 (* 1 = 3.68821 loss)\nI0817 16:25:07.284833 17316 solver.cpp:228] Iteration 300, loss = 2.96586\nI0817 16:25:07.284873 17316 solver.cpp:244]     Train net output #0: accuracy = 0.264\nI0817 16:25:07.284890 17316 solver.cpp:244]     Train net output #1: loss = 2.96586 (* 1 = 2.96586 loss)\nI0817 16:25:07.379189 17316 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0817 16:27:25.822001 17316 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:28:46.277124 17316 solver.cpp:404]     Test net output #0: accuracy = 0.21132\nI0817 16:28:46.277372 17316 solver.cpp:404]     Test net output #1: loss = 3.44946 (* 1 = 3.44946 loss)\nI0817 16:28:47.588418 17316 solver.cpp:228] Iteration 400, loss = 2.59911\nI0817 16:28:47.588459 17316 solver.cpp:244]     Train net output #0: accuracy = 0.32\nI0817 16:28:47.588475 17316 solver.cpp:244]     Train net output #1: loss = 2.59911 (* 1 = 2.59911 loss)\nI0817 16:28:47.688683 17316 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0817 16:31:06.168406 17316 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:32:26.611166 17316 solver.cpp:404]     Test net output #0: accuracy = 0.34472\nI0817 16:32:26.611371 17316 solver.cpp:404]     Test net output #1: loss = 2.56119 (* 1 = 2.56119 loss)\nI0817 16:32:27.921392 17316 solver.cpp:228] Iteration 500, loss = 2.07894\nI0817 16:32:27.921438 17316 solver.cpp:244]     Train net output #0: accuracy = 0.384\nI0817 16:32:27.921455 17316 solver.cpp:244]     Train net output #1: loss = 2.07894 (* 1 = 2.07894 loss)\nI0817 16:32:28.017721 17316 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0817 16:34:46.447536 17316 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:36:06.880224 17316 solver.cpp:404]     Test net output #0: accuracy = 0.36488\nI0817 16:36:06.880442 17316 solver.cpp:404]     Test net output #1: loss = 2.44763 (* 1 = 2.44763 loss)\nI0817 16:36:08.189961 17316 solver.cpp:228] Iteration 600, loss = 1.83606\nI0817 16:36:08.190002 17316 solver.cpp:244]     Train net output #0: accuracy = 0.504\nI0817 16:36:08.190018 17316 solver.cpp:244]     Train net output #1: loss = 1.83606 (* 1 = 1.83606 loss)\nI0817 16:36:08.281867 17316 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0817 16:38:26.626169 17316 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:39:47.056195 17316 solver.cpp:404]     Test net output #0: accuracy = 0.37444\nI0817 16:39:47.056447 17316 solver.cpp:404]     Test net output #1: loss = 2.52433 (* 1 = 2.52433 loss)\nI0817 16:39:48.366039 17316 solver.cpp:228] Iteration 700, loss = 1.522\nI0817 16:39:48.366081 17316 solver.cpp:244]     Train net output #0: accuracy = 0.544\nI0817 16:39:48.366097 17316 solver.cpp:244]     Train net output #1: loss = 1.522 (* 1 = 1.522 loss)\nI0817 16:39:48.468279 17316 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0817 16:42:06.897330 17316 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:43:27.258314 17316 solver.cpp:404]     Test net output #0: accuracy = 0.38744\nI0817 16:43:27.258533 17316 solver.cpp:404]     Test net output #1: loss = 2.56841 (* 1 = 2.56841 loss)\nI0817 16:43:28.569155 17316 solver.cpp:228] Iteration 800, loss = 1.44976\nI0817 16:43:28.569198 17316 solver.cpp:244]     Train net output #0: accuracy = 0.544\nI0817 16:43:28.569216 17316 solver.cpp:244]     Train net output #1: loss = 1.44976 (* 1 = 1.44976 loss)\nI0817 16:43:28.672142 17316 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0817 16:45:47.068387 17316 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:47:07.428768 17316 solver.cpp:404]     Test net output #0: accuracy = 0.3676\nI0817 16:47:07.429004 17316 solver.cpp:404]     Test net output #1: loss = 2.51093 (* 1 = 2.51093 loss)\nI0817 16:47:08.739199 17316 solver.cpp:228] Iteration 900, loss = 1.31402\nI0817 16:47:08.739243 17316 solver.cpp:244]     Train net output #0: accuracy = 0.616\nI0817 16:47:08.739259 17316 solver.cpp:244]     Train net output #1: loss = 1.31402 (* 1 = 1.31402 loss)\nI0817 16:47:08.839239 17316 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0817 16:49:27.356088 17316 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:50:47.721223 17316 solver.cpp:404]     Test net output #0: accuracy = 0.3886\nI0817 16:50:47.721464 17316 solver.cpp:404]     Test net output #1: loss = 2.56431 (* 1 = 2.56431 loss)\nI0817 16:50:49.031644 17316 solver.cpp:228] Iteration 1000, loss = 1.31718\nI0817 16:50:49.031687 17316 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0817 16:50:49.031704 17316 solver.cpp:244]     Train net output #1: loss = 1.31718 (* 1 = 1.31718 loss)\nI0817 16:50:49.123486 17316 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0817 16:53:07.497593 17316 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:54:27.866760 17316 solver.cpp:404]     Test net output #0: accuracy = 0.43392\nI0817 16:54:27.866946 17316 solver.cpp:404]     Test net output #1: loss = 2.20027 (* 1 = 2.20027 loss)\nI0817 16:54:29.177417 17316 solver.cpp:228] Iteration 1100, loss = 0.927856\nI0817 16:54:29.177460 17316 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 16:54:29.177476 17316 solver.cpp:244]     Train net output #1: loss = 0.927856 (* 1 = 0.927856 loss)\nI0817 16:54:29.272066 17316 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0817 16:56:47.833343 17316 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:58:08.201625 17316 solver.cpp:404]     Test net output #0: accuracy = 0.3868\nI0817 16:58:08.201830 17316 solver.cpp:404]     Test net output #1: loss = 2.78613 (* 1 = 2.78613 loss)\nI0817 16:58:09.512558 17316 solver.cpp:228] Iteration 1200, loss = 1.10479\nI0817 16:58:09.512604 17316 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0817 16:58:09.512622 17316 solver.cpp:244]     Train net output #1: loss = 1.10479 (* 1 = 1.10479 loss)\nI0817 16:58:09.611670 17316 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0817 17:00:28.214846 17316 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 17:01:48.577978 17316 solver.cpp:404]     Test net output #0: accuracy = 0.3934\nI0817 17:01:48.578197 17316 solver.cpp:404]     Test net output #1: loss = 2.61919 (* 1 = 2.61919 loss)\nI0817 17:01:49.887583 17316 solver.cpp:228] Iteration 1300, loss = 1.15291\nI0817 17:01:49.887630 17316 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 17:01:49.887647 17316 solver.cpp:244]     Train net output #1: loss = 1.15291 (* 1 = 1.15291 loss)\nI0817 17:01:49.991814 17316 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0817 17:04:08.470667 17316 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:05:28.835770 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42612\nI0817 17:05:28.836019 17316 solver.cpp:404]     Test net output #1: loss = 2.49349 (* 1 = 2.49349 loss)\nI0817 17:05:30.146029 17316 solver.cpp:228] Iteration 1400, loss = 1.013\nI0817 17:05:30.146073 17316 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 17:05:30.146090 17316 solver.cpp:244]     Train net output #1: loss = 1.013 (* 1 = 1.013 loss)\nI0817 17:05:30.247174 17316 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0817 17:07:48.801084 17316 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:09:09.163636 17316 solver.cpp:404]     Test net output #0: accuracy = 0.4176\nI0817 17:09:09.163875 17316 solver.cpp:404]     Test net output #1: loss = 2.4475 (* 1 = 2.4475 loss)\nI0817 17:09:10.473673 17316 solver.cpp:228] Iteration 1500, loss = 1.01646\nI0817 17:09:10.473716 17316 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0817 17:09:10.473732 17316 solver.cpp:244]     Train net output #1: loss = 1.01646 (* 1 = 1.01646 loss)\nI0817 17:09:10.565836 17316 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0817 17:11:29.018635 17316 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:12:49.378216 17316 solver.cpp:404]     Test net output #0: accuracy = 0.3416\nI0817 17:12:49.378459 17316 solver.cpp:404]     Test net output #1: loss = 3.25369 (* 1 = 3.25369 loss)\nI0817 17:12:50.688735 17316 solver.cpp:228] Iteration 1600, loss = 1.03386\nI0817 17:12:50.688777 17316 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0817 17:12:50.688793 17316 solver.cpp:244]     Train net output #1: loss = 1.03386 (* 1 = 1.03386 loss)\nI0817 17:12:50.783701 17316 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0817 17:15:09.065208 17316 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:16:29.520877 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42708\nI0817 17:16:29.521131 17316 solver.cpp:404]     Test net output #1: loss = 2.53106 (* 1 = 2.53106 loss)\nI0817 17:16:30.832109 17316 solver.cpp:228] Iteration 1700, loss = 0.853143\nI0817 17:16:30.832155 17316 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 17:16:30.832178 17316 solver.cpp:244]     Train net output #1: loss = 0.853143 (* 1 = 0.853143 loss)\nI0817 17:16:30.933609 17316 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0817 17:18:49.248076 17316 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:20:09.707868 17316 solver.cpp:404]     Test net output #0: accuracy = 0.39904\nI0817 17:20:09.708130 17316 solver.cpp:404]     Test net output #1: loss = 2.82767 (* 1 = 2.82767 loss)\nI0817 17:20:11.017999 17316 solver.cpp:228] Iteration 1800, loss = 0.90951\nI0817 17:20:11.018043 17316 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 17:20:11.018074 17316 solver.cpp:244]     Train net output #1: loss = 0.90951 (* 1 = 0.90951 loss)\nI0817 17:20:11.115372 17316 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0817 17:22:29.493206 17316 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:23:49.955454 17316 solver.cpp:404]     Test net output #0: accuracy = 0.367\nI0817 17:23:49.955703 17316 solver.cpp:404]     Test net output #1: loss = 2.90623 (* 1 = 2.90623 loss)\nI0817 17:23:51.265499 17316 solver.cpp:228] Iteration 1900, loss = 0.961539\nI0817 17:23:51.265547 17316 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 17:23:51.265570 17316 solver.cpp:244]     Train net output #1: loss = 0.961539 (* 1 = 0.961539 loss)\nI0817 17:23:51.367743 17316 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0817 17:26:09.800163 17316 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:27:30.253325 17316 solver.cpp:404]     Test net output #0: accuracy = 0.38144\nI0817 17:27:30.253576 17316 solver.cpp:404]     Test net output #1: loss = 2.78984 (* 1 = 2.78984 loss)\nI0817 17:27:31.563464 17316 solver.cpp:228] Iteration 2000, loss = 1.008\nI0817 17:27:31.563508 17316 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0817 17:27:31.563532 17316 solver.cpp:244]     Train net output #1: loss = 1.008 (* 1 = 1.008 loss)\nI0817 17:27:31.666100 17316 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0817 17:29:50.199156 17316 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:31:10.644987 17316 solver.cpp:404]     Test net output #0: accuracy = 0.3932\nI0817 17:31:10.645246 17316 solver.cpp:404]     Test net output #1: loss = 2.82237 (* 1 = 2.82237 loss)\nI0817 17:31:11.958855 17316 solver.cpp:228] Iteration 2100, loss = 0.993467\nI0817 17:31:11.958905 17316 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 17:31:11.958928 17316 solver.cpp:244]     Train net output #1: loss = 0.993467 (* 1 = 0.993467 loss)\nI0817 17:31:12.057942 17316 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0817 17:33:30.451916 17316 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:34:50.907961 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42692\nI0817 17:34:50.908190 17316 solver.cpp:404]     Test net output #1: loss = 2.42764 (* 1 = 2.42764 loss)\nI0817 17:34:52.220116 17316 solver.cpp:228] Iteration 2200, loss = 0.922489\nI0817 17:34:52.220165 17316 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 17:34:52.220180 17316 solver.cpp:244]     Train net output #1: loss = 0.922489 (* 1 = 0.922489 loss)\nI0817 17:34:52.319964 17316 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0817 17:37:10.778789 17316 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:38:31.238680 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42732\nI0817 17:38:31.238929 17316 solver.cpp:404]     Test net output #1: loss = 2.48752 (* 1 = 2.48752 loss)\nI0817 17:38:32.549829 17316 solver.cpp:228] Iteration 2300, loss = 1.06318\nI0817 17:38:32.549877 17316 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 17:38:32.549901 17316 solver.cpp:244]     Train net output #1: loss = 1.06318 (* 1 = 1.06318 loss)\nI0817 17:38:32.647205 17316 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0817 17:40:51.043733 17316 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:42:11.496111 17316 solver.cpp:404]     Test net output #0: accuracy = 0.39148\nI0817 17:42:11.496356 17316 solver.cpp:404]     Test net output #1: loss = 2.93732 (* 1 = 2.93732 loss)\nI0817 17:42:12.806876 17316 solver.cpp:228] Iteration 2400, loss = 0.928678\nI0817 17:42:12.806921 17316 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 17:42:12.806938 17316 solver.cpp:244]     Train net output #1: loss = 0.928678 (* 1 = 0.928678 loss)\nI0817 17:42:12.901413 17316 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0817 17:44:31.405014 17316 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:45:51.847695 17316 solver.cpp:404]     Test net output #0: accuracy = 0.465\nI0817 17:45:51.847945 17316 solver.cpp:404]     Test net output #1: loss = 2.37045 (* 1 = 2.37045 loss)\nI0817 17:45:53.157649 17316 solver.cpp:228] Iteration 2500, loss = 0.905101\nI0817 17:45:53.157694 17316 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 17:45:53.157711 17316 solver.cpp:244]     Train net output #1: loss = 0.905101 (* 1 = 0.905101 loss)\nI0817 17:45:53.254252 17316 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0817 17:48:11.768934 17316 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:49:32.214098 17316 solver.cpp:404]     Test net output #0: accuracy = 0.40428\nI0817 17:49:32.214360 17316 solver.cpp:404]     Test net output #1: loss = 2.78919 (* 1 = 2.78919 loss)\nI0817 17:49:33.524345 17316 solver.cpp:228] Iteration 2600, loss = 0.988198\nI0817 17:49:33.524392 17316 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 17:49:33.524410 17316 solver.cpp:244]     Train net output #1: loss = 0.988198 (* 1 = 0.988198 loss)\nI0817 17:49:33.626298 17316 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0817 17:51:52.120139 17316 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:53:12.560504 17316 solver.cpp:404]     Test net output #0: accuracy = 0.3592\nI0817 17:53:12.560717 17316 solver.cpp:404]     Test net output #1: loss = 3.08249 (* 1 = 3.08249 loss)\nI0817 17:53:13.870244 17316 solver.cpp:228] Iteration 2700, loss = 0.976773\nI0817 17:53:13.870285 17316 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 17:53:13.870301 17316 solver.cpp:244]     Train net output #1: loss = 0.976773 (* 1 = 0.976773 loss)\nI0817 17:53:13.965157 17316 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0817 17:55:32.397617 17316 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:56:52.827651 17316 solver.cpp:404]     Test net output #0: accuracy = 0.45972\nI0817 17:56:52.827888 17316 solver.cpp:404]     Test net output #1: loss = 2.32354 (* 1 = 2.32354 loss)\nI0817 17:56:54.137480 17316 solver.cpp:228] Iteration 2800, loss = 0.860786\nI0817 17:56:54.137522 17316 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 17:56:54.137538 17316 solver.cpp:244]     Train net output #1: loss = 0.860786 (* 1 = 0.860786 loss)\nI0817 17:56:54.240222 17316 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0817 17:59:12.744817 17316 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 18:00:33.181355 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42028\nI0817 18:00:33.181591 17316 solver.cpp:404]     Test net output #1: loss = 2.45981 (* 1 = 2.45981 loss)\nI0817 18:00:34.491541 17316 solver.cpp:228] Iteration 2900, loss = 1.03493\nI0817 18:00:34.491586 17316 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 18:00:34.491605 17316 solver.cpp:244]     Train net output #1: loss = 1.03493 (* 1 = 1.03493 loss)\nI0817 18:00:34.583070 17316 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0817 18:02:53.069725 17316 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 18:04:13.504472 17316 solver.cpp:404]     Test net output #0: accuracy = 0.4184\nI0817 18:04:13.504714 17316 solver.cpp:404]     Test net output #1: loss = 2.44034 (* 1 = 2.44034 loss)\nI0817 18:04:14.814433 17316 solver.cpp:228] Iteration 3000, loss = 0.895171\nI0817 18:04:14.814479 17316 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 18:04:14.814496 17316 solver.cpp:244]     Train net output #1: loss = 0.895171 (* 1 = 0.895171 loss)\nI0817 18:04:14.915199 17316 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0817 18:06:33.403297 17316 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:07:53.838124 17316 solver.cpp:404]     Test net output #0: accuracy = 0.4666\nI0817 18:07:53.838380 17316 solver.cpp:404]     Test net output #1: loss = 2.10508 (* 1 = 2.10508 loss)\nI0817 18:07:55.147750 17316 solver.cpp:228] Iteration 3100, loss = 0.904657\nI0817 18:07:55.147796 17316 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 18:07:55.147812 17316 solver.cpp:244]     Train net output #1: loss = 0.904657 (* 1 = 0.904657 loss)\nI0817 18:07:55.244340 17316 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0817 18:10:13.597650 17316 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:11:34.036844 17316 solver.cpp:404]     Test net output #0: accuracy = 0.33456\nI0817 18:11:34.037055 17316 solver.cpp:404]     Test net output #1: loss = 3.13308 (* 1 = 3.13308 loss)\nI0817 18:11:35.346336 17316 solver.cpp:228] Iteration 3200, loss = 0.954389\nI0817 18:11:35.346377 17316 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 18:11:35.346393 17316 solver.cpp:244]     Train net output #1: loss = 0.954389 (* 1 = 0.954389 loss)\nI0817 18:11:35.442843 17316 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0817 18:13:53.660850 17316 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:15:14.111308 17316 solver.cpp:404]     Test net output #0: accuracy = 0.36404\nI0817 18:15:14.111522 17316 solver.cpp:404]     Test net output #1: loss = 2.93878 (* 1 = 2.93878 loss)\nI0817 18:15:15.420394 17316 solver.cpp:228] Iteration 3300, loss = 1.15977\nI0817 18:15:15.420428 17316 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 18:15:15.420444 17316 solver.cpp:244]     Train net output #1: loss = 1.15977 (* 1 = 1.15977 loss)\nI0817 18:15:15.514713 17316 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0817 18:17:33.536720 17316 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:18:53.974027 17316 solver.cpp:404]     Test net output #0: accuracy = 0.37044\nI0817 18:18:53.974225 17316 solver.cpp:404]     Test net output #1: loss = 2.86953 (* 1 = 2.86953 loss)\nI0817 18:18:55.284109 17316 solver.cpp:228] Iteration 3400, loss = 0.96757\nI0817 18:18:55.284154 17316 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 18:18:55.284175 17316 solver.cpp:244]     Train net output #1: loss = 0.96757 (* 1 = 0.96757 loss)\nI0817 18:18:55.380779 17316 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0817 18:21:13.524605 17316 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:22:33.959961 17316 solver.cpp:404]     Test net output #0: accuracy = 0.41584\nI0817 18:22:33.960212 17316 solver.cpp:404]     Test net output #1: loss = 2.58079 (* 1 = 2.58079 loss)\nI0817 18:22:35.270345 17316 solver.cpp:228] Iteration 3500, loss = 0.964388\nI0817 18:22:35.270380 17316 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 18:22:35.270395 17316 solver.cpp:244]     Train net output #1: loss = 0.964388 (* 1 = 0.964388 loss)\nI0817 18:22:35.368871 17316 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0817 18:24:53.466733 17316 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:26:13.892331 17316 solver.cpp:404]     Test net output #0: accuracy = 0.46796\nI0817 18:26:13.892580 17316 solver.cpp:404]     Test net output #1: loss = 2.31654 (* 1 = 2.31654 loss)\nI0817 18:26:15.202674 17316 solver.cpp:228] Iteration 3600, loss = 0.891594\nI0817 18:26:15.202720 17316 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 18:26:15.202738 17316 solver.cpp:244]     Train net output #1: loss = 0.891594 (* 1 = 0.891594 loss)\nI0817 18:26:15.299263 17316 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0817 18:28:33.759284 17316 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:29:54.193814 17316 solver.cpp:404]     Test net output #0: accuracy = 0.43532\nI0817 18:29:54.194053 17316 solver.cpp:404]     Test net output #1: loss = 2.41029 (* 1 = 2.41029 loss)\nI0817 18:29:55.503928 17316 solver.cpp:228] Iteration 3700, loss = 1.13638\nI0817 18:29:55.503973 17316 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0817 18:29:55.503989 17316 solver.cpp:244]     Train net output #1: loss = 1.13638 (* 1 = 1.13638 loss)\nI0817 18:29:55.596829 17316 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0817 18:32:14.121778 17316 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:33:34.563040 17316 solver.cpp:404]     Test net output #0: accuracy = 0.36664\nI0817 18:33:34.563251 17316 solver.cpp:404]     Test net output #1: loss = 3.09367 (* 1 = 3.09367 loss)\nI0817 18:33:35.873538 17316 solver.cpp:228] Iteration 3800, loss = 1.15045\nI0817 18:33:35.873582 17316 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 18:33:35.873598 17316 solver.cpp:244]     Train net output #1: loss = 1.15045 (* 1 = 1.15045 loss)\nI0817 18:33:35.972151 17316 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0817 18:35:54.414232 17316 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:37:14.852017 17316 solver.cpp:404]     Test net output #0: accuracy = 0.38008\nI0817 18:37:14.852267 17316 solver.cpp:404]     Test net output #1: loss = 2.81167 (* 1 = 2.81167 loss)\nI0817 18:37:16.162567 17316 solver.cpp:228] Iteration 3900, loss = 0.97317\nI0817 18:37:16.162609 17316 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 18:37:16.162626 17316 solver.cpp:244]     Train net output #1: loss = 0.97317 (* 1 = 0.97317 loss)\nI0817 18:37:16.261742 17316 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0817 18:39:34.662256 17316 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:40:55.098343 17316 solver.cpp:404]     Test net output #0: accuracy = 0.36336\nI0817 18:40:55.098587 17316 solver.cpp:404]     Test net output #1: loss = 2.85381 (* 1 = 2.85381 loss)\nI0817 18:40:56.408516 17316 solver.cpp:228] Iteration 4000, loss = 1.0551\nI0817 18:40:56.408558 17316 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0817 18:40:56.408573 17316 solver.cpp:244]     Train net output #1: loss = 1.0551 (* 1 = 1.0551 loss)\nI0817 18:40:56.501874 17316 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0817 18:43:14.841547 17316 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:44:35.276113 17316 solver.cpp:404]     Test net output #0: accuracy = 0.4402\nI0817 18:44:35.276353 17316 solver.cpp:404]     Test net output #1: loss = 2.22706 (* 1 = 2.22706 loss)\nI0817 18:44:36.586127 17316 solver.cpp:228] Iteration 4100, loss = 1.11243\nI0817 18:44:36.586172 17316 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0817 18:44:36.586189 17316 solver.cpp:244]     Train net output #1: loss = 1.11243 (* 1 = 1.11243 loss)\nI0817 18:44:36.687099 17316 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0817 18:46:55.129505 17316 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:48:15.567559 17316 solver.cpp:404]     Test net output #0: accuracy = 0.36156\nI0817 18:48:15.567806 17316 solver.cpp:404]     Test net output #1: loss = 2.91597 (* 1 = 2.91597 loss)\nI0817 18:48:16.878367 17316 solver.cpp:228] Iteration 4200, loss = 1.0578\nI0817 18:48:16.878412 17316 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0817 18:48:16.878427 17316 solver.cpp:244]     Train net output #1: loss = 1.0578 (* 1 = 1.0578 loss)\nI0817 18:48:16.974656 17316 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0817 18:50:35.386548 17316 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:51:55.823750 17316 solver.cpp:404]     Test net output #0: accuracy = 0.46152\nI0817 18:51:55.823998 17316 solver.cpp:404]     Test net output #1: loss = 2.10685 (* 1 = 2.10685 loss)\nI0817 18:51:57.134080 17316 solver.cpp:228] Iteration 4300, loss = 1.20095\nI0817 18:51:57.134122 17316 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0817 18:51:57.134138 17316 solver.cpp:244]     Train net output #1: loss = 1.20095 (* 1 = 1.20095 loss)\nI0817 18:51:57.231724 17316 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0817 18:54:15.717382 17316 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:55:36.154090 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42464\nI0817 18:55:36.154336 17316 solver.cpp:404]     Test net output #1: loss = 2.33911 (* 1 = 2.33911 loss)\nI0817 18:55:37.464781 17316 solver.cpp:228] Iteration 4400, loss = 1.12858\nI0817 18:55:37.464823 17316 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 18:55:37.464839 17316 solver.cpp:244]     Train net output #1: loss = 1.12858 (* 1 = 1.12858 loss)\nI0817 18:55:37.560827 17316 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0817 18:57:55.947182 17316 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 18:59:16.315125 17316 solver.cpp:404]     Test net output #0: accuracy = 0.41836\nI0817 18:59:16.315367 17316 solver.cpp:404]     Test net output #1: loss = 2.45605 (* 1 = 2.45605 loss)\nI0817 18:59:17.625669 17316 solver.cpp:228] Iteration 4500, loss = 1.08185\nI0817 18:59:17.625711 17316 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 18:59:17.625727 17316 solver.cpp:244]     Train net output #1: loss = 1.08185 (* 1 = 1.08185 loss)\nI0817 18:59:17.720929 17316 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0817 19:01:36.248730 17316 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 19:02:56.613049 17316 solver.cpp:404]     Test net output #0: accuracy = 0.43244\nI0817 19:02:56.613303 17316 solver.cpp:404]     Test net output #1: loss = 2.36722 (* 1 = 2.36722 loss)\nI0817 19:02:57.923660 17316 solver.cpp:228] Iteration 4600, loss = 1.17052\nI0817 19:02:57.923702 17316 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0817 19:02:57.923718 17316 solver.cpp:244]     Train net output #1: loss = 1.17052 (* 1 = 1.17052 loss)\nI0817 19:02:58.025229 17316 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0817 19:05:16.536306 17316 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:06:36.897922 17316 solver.cpp:404]     Test net output #0: accuracy = 0.46092\nI0817 19:06:36.898155 17316 solver.cpp:404]     Test net output #1: loss = 2.06739 (* 1 = 2.06739 loss)\nI0817 19:06:38.207873 17316 solver.cpp:228] Iteration 4700, loss = 1.25058\nI0817 19:06:38.207918 17316 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0817 19:06:38.207934 17316 solver.cpp:244]     Train net output #1: loss = 1.25058 (* 1 = 1.25058 loss)\nI0817 19:06:38.307320 17316 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0817 19:08:56.912549 17316 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:10:17.280097 17316 solver.cpp:404]     Test net output #0: accuracy = 0.37636\nI0817 19:10:17.280344 17316 solver.cpp:404]     Test net output #1: loss = 2.95844 (* 1 = 2.95844 loss)\nI0817 19:10:18.590538 17316 solver.cpp:228] Iteration 4800, loss = 1.19267\nI0817 19:10:18.590582 17316 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 19:10:18.590598 17316 solver.cpp:244]     Train net output #1: loss = 1.19267 (* 1 = 1.19267 loss)\nI0817 19:10:18.688351 17316 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0817 19:12:37.225626 17316 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:13:57.604697 17316 solver.cpp:404]     Test net output #0: accuracy = 0.44524\nI0817 19:13:57.604951 17316 solver.cpp:404]     Test net output #1: loss = 2.33288 (* 1 = 2.33288 loss)\nI0817 19:13:58.916189 17316 solver.cpp:228] Iteration 4900, loss = 1.06376\nI0817 19:13:58.916232 17316 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0817 19:13:58.916249 17316 solver.cpp:244]     Train net output #1: loss = 1.06376 (* 1 = 1.06376 loss)\nI0817 19:13:59.018584 17316 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0817 19:16:17.410567 17316 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:17:37.801414 17316 solver.cpp:404]     Test net output #0: accuracy = 0.39428\nI0817 19:17:37.801643 17316 solver.cpp:404]     Test net output #1: loss = 2.77298 (* 1 = 2.77298 loss)\nI0817 19:17:39.112679 17316 solver.cpp:228] Iteration 5000, loss = 1.25907\nI0817 19:17:39.112720 17316 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0817 19:17:39.112736 17316 solver.cpp:244]     Train net output #1: loss = 1.25907 (* 1 = 1.25907 loss)\nI0817 19:17:39.202410 17316 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0817 19:19:57.659524 17316 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:21:18.034783 17316 solver.cpp:404]     Test net output #0: accuracy = 0.32656\nI0817 19:21:18.035027 17316 solver.cpp:404]     Test net output #1: loss = 3.28229 (* 1 = 3.28229 loss)\nI0817 19:21:19.346880 17316 solver.cpp:228] Iteration 5100, loss = 1.13819\nI0817 19:21:19.346925 17316 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 19:21:19.346941 17316 solver.cpp:244]     Train net output #1: loss = 1.13819 (* 1 = 1.13819 loss)\nI0817 19:21:19.436929 17316 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0817 19:23:37.951913 17316 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:24:58.320912 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42852\nI0817 19:24:58.321144 17316 solver.cpp:404]     Test net output #1: loss = 2.52918 (* 1 = 2.52918 loss)\nI0817 19:24:59.632297 17316 solver.cpp:228] Iteration 5200, loss = 1.16877\nI0817 19:24:59.632341 17316 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0817 19:24:59.632359 17316 solver.cpp:244]     Train net output #1: loss = 1.16877 (* 1 = 1.16877 loss)\nI0817 19:24:59.736894 17316 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0817 19:27:18.296202 17316 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:28:38.670542 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42172\nI0817 19:28:38.670760 17316 solver.cpp:404]     Test net output #1: loss = 2.50905 (* 1 = 2.50905 loss)\nI0817 19:28:39.981724 17316 solver.cpp:228] Iteration 5300, loss = 1.23145\nI0817 19:28:39.981771 17316 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0817 19:28:39.981796 17316 solver.cpp:244]     Train net output #1: loss = 1.23145 (* 1 = 1.23145 loss)\nI0817 19:28:40.078151 17316 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0817 19:30:58.573287 17316 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:32:19.040163 17316 solver.cpp:404]     Test net output #0: accuracy = 0.39748\nI0817 19:32:19.040411 17316 solver.cpp:404]     Test net output #1: loss = 2.55159 (* 1 = 2.55159 loss)\nI0817 19:32:20.350810 17316 solver.cpp:228] Iteration 5400, loss = 1.06143\nI0817 19:32:20.350855 17316 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 19:32:20.350879 17316 solver.cpp:244]     Train net output #1: loss = 1.06143 (* 1 = 1.06143 loss)\nI0817 19:32:20.454228 17316 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0817 19:34:38.980790 17316 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:35:59.445804 17316 solver.cpp:404]     Test net output #0: accuracy = 0.47092\nI0817 19:35:59.446072 17316 solver.cpp:404]     Test net output #1: loss = 2.22081 (* 1 = 2.22081 loss)\nI0817 19:36:00.755669 17316 solver.cpp:228] Iteration 5500, loss = 1.31161\nI0817 19:36:00.755714 17316 solver.cpp:244]     Train net output #0: accuracy = 0.616\nI0817 19:36:00.755739 17316 solver.cpp:244]     Train net output #1: loss = 1.31161 (* 1 = 1.31161 loss)\nI0817 19:36:00.856086 17316 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0817 19:38:19.143447 17316 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:39:39.601447 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42728\nI0817 19:39:39.601707 17316 solver.cpp:404]     Test net output #1: loss = 2.3787 (* 1 = 2.3787 loss)\nI0817 19:39:40.912055 17316 solver.cpp:228] Iteration 5600, loss = 1.18944\nI0817 19:39:40.912099 17316 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0817 19:39:40.912124 17316 solver.cpp:244]     Train net output #1: loss = 1.18944 (* 1 = 1.18944 loss)\nI0817 19:39:41.008474 17316 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0817 19:41:59.476908 17316 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:43:19.953413 17316 solver.cpp:404]     Test net output #0: accuracy = 0.38504\nI0817 19:43:19.953662 17316 solver.cpp:404]     Test net output #1: loss = 2.91743 (* 1 = 2.91743 loss)\nI0817 19:43:21.263434 17316 solver.cpp:228] Iteration 5700, loss = 1.14281\nI0817 19:43:21.263479 17316 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0817 19:43:21.263504 17316 solver.cpp:244]     Train net output #1: loss = 1.14281 (* 1 = 1.14281 loss)\nI0817 19:43:21.362933 17316 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0817 19:45:39.838096 17316 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:47:00.303751 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42632\nI0817 19:47:00.303992 17316 solver.cpp:404]     Test net output #1: loss = 2.49323 (* 1 = 2.49323 loss)\nI0817 19:47:01.614991 17316 solver.cpp:228] Iteration 5800, loss = 0.882342\nI0817 19:47:01.615039 17316 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 19:47:01.615064 17316 solver.cpp:244]     Train net output #1: loss = 0.882342 (* 1 = 0.882342 loss)\nI0817 19:47:01.715587 17316 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0817 19:49:20.071899 17316 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:50:40.527395 17316 solver.cpp:404]     Test net output #0: accuracy = 0.45776\nI0817 19:50:40.527652 17316 solver.cpp:404]     Test net output #1: loss = 2.26744 (* 1 = 2.26744 loss)\nI0817 19:50:41.838662 17316 solver.cpp:228] Iteration 5900, loss = 0.824859\nI0817 19:50:41.838711 17316 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 19:50:41.838734 17316 solver.cpp:244]     Train net output #1: loss = 0.824859 (* 1 = 0.824859 loss)\nI0817 19:50:41.940538 17316 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0817 19:53:00.371794 17316 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:54:20.838692 17316 solver.cpp:404]     Test net output #0: accuracy = 0.43592\nI0817 19:54:20.838944 17316 solver.cpp:404]     Test net output #1: loss = 2.52433 (* 1 = 2.52433 loss)\nI0817 19:54:22.150559 17316 solver.cpp:228] Iteration 6000, loss = 0.992\nI0817 19:54:22.150606 17316 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 19:54:22.150630 17316 solver.cpp:244]     Train net output #1: loss = 0.992 (* 1 = 0.992 loss)\nI0817 19:54:22.249063 17316 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0817 19:56:40.659637 17316 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:58:01.113196 17316 solver.cpp:404]     Test net output #0: accuracy = 0.44064\nI0817 19:58:01.113420 17316 solver.cpp:404]     Test net output #1: loss = 2.52549 (* 1 = 2.52549 loss)\nI0817 19:58:02.424052 17316 solver.cpp:228] Iteration 6100, loss = 0.991934\nI0817 19:58:02.424096 17316 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 19:58:02.424120 17316 solver.cpp:244]     Train net output #1: loss = 0.991934 (* 1 = 0.991934 loss)\nI0817 19:58:02.518534 17316 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0817 20:00:20.924448 17316 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 20:01:41.383707 17316 solver.cpp:404]     Test net output #0: accuracy = 0.35668\nI0817 20:01:41.383956 17316 solver.cpp:404]     Test net output #1: loss = 3.01674 (* 1 = 3.01674 loss)\nI0817 20:01:42.694136 17316 solver.cpp:228] Iteration 6200, loss = 0.998225\nI0817 20:01:42.694180 17316 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 20:01:42.694203 17316 solver.cpp:244]     Train net output #1: loss = 0.998225 (* 1 = 0.998225 loss)\nI0817 20:01:42.785689 17316 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0817 20:04:01.194880 17316 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:05:21.655402 17316 solver.cpp:404]     Test net output #0: accuracy = 0.44996\nI0817 20:05:21.655653 17316 solver.cpp:404]     Test net output #1: loss = 2.32983 (* 1 = 2.32983 loss)\nI0817 20:05:22.965826 17316 solver.cpp:228] Iteration 6300, loss = 0.912314\nI0817 20:05:22.965870 17316 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 20:05:22.965894 17316 solver.cpp:244]     Train net output #1: loss = 0.912314 (* 1 = 0.912314 loss)\nI0817 20:05:23.068094 17316 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0817 20:07:41.536867 17316 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:09:01.984910 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42544\nI0817 20:09:01.985162 17316 solver.cpp:404]     Test net output #1: loss = 2.47428 (* 1 = 2.47428 loss)\nI0817 20:09:03.295100 17316 solver.cpp:228] Iteration 6400, loss = 0.873551\nI0817 20:09:03.295147 17316 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 20:09:03.295171 17316 solver.cpp:244]     Train net output #1: loss = 0.873551 (* 1 = 0.873551 loss)\nI0817 20:09:03.388203 17316 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0817 20:11:21.767783 17316 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:12:42.236562 17316 solver.cpp:404]     Test net output #0: accuracy = 0.44792\nI0817 20:12:42.236779 17316 solver.cpp:404]     Test net output #1: loss = 2.28108 (* 1 = 2.28108 loss)\nI0817 20:12:43.546499 17316 solver.cpp:228] Iteration 6500, loss = 1.0381\nI0817 20:12:43.546545 17316 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0817 20:12:43.546571 17316 solver.cpp:244]     Train net output #1: loss = 1.0381 (* 1 = 1.0381 loss)\nI0817 20:12:43.641405 17316 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0817 20:15:01.992384 17316 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:16:22.438016 17316 solver.cpp:404]     Test net output #0: accuracy = 0.45532\nI0817 20:16:22.438277 17316 solver.cpp:404]     Test net output #1: loss = 2.30636 (* 1 = 2.30636 loss)\nI0817 20:16:23.749372 17316 solver.cpp:228] Iteration 6600, loss = 0.813534\nI0817 20:16:23.749418 17316 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 20:16:23.749444 17316 solver.cpp:244]     Train net output #1: loss = 0.813534 (* 1 = 0.813534 loss)\nI0817 20:16:23.849025 17316 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0817 20:18:42.430447 17316 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:20:02.893687 17316 solver.cpp:404]     Test net output #0: accuracy = 0.4806\nI0817 20:20:02.893934 17316 solver.cpp:404]     Test net output #1: loss = 2.19087 (* 1 = 2.19087 loss)\nI0817 20:20:04.205160 17316 solver.cpp:228] Iteration 6700, loss = 0.946483\nI0817 20:20:04.205206 17316 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 20:20:04.205230 17316 solver.cpp:244]     Train net output #1: loss = 0.946483 (* 1 = 0.946483 loss)\nI0817 20:20:04.295713 17316 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0817 20:22:22.768123 17316 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:23:43.225319 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42204\nI0817 20:23:43.225589 17316 solver.cpp:404]     Test net output #1: loss = 2.60601 (* 1 = 2.60601 loss)\nI0817 20:23:44.536583 17316 solver.cpp:228] Iteration 6800, loss = 1.0651\nI0817 20:23:44.536629 17316 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 20:23:44.536653 17316 solver.cpp:244]     Train net output #1: loss = 1.0651 (* 1 = 1.0651 loss)\nI0817 20:23:44.638443 17316 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0817 20:26:03.131487 17316 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:27:23.581034 17316 solver.cpp:404]     Test net output #0: accuracy = 0.47296\nI0817 20:27:23.581284 17316 solver.cpp:404]     Test net output #1: loss = 2.09708 (* 1 = 2.09708 loss)\nI0817 20:27:24.892124 17316 solver.cpp:228] Iteration 6900, loss = 0.721124\nI0817 20:27:24.892170 17316 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 20:27:24.892194 17316 solver.cpp:244]     Train net output #1: loss = 0.721124 (* 1 = 0.721124 loss)\nI0817 20:27:24.997352 17316 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0817 20:29:43.534492 17316 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:31:03.981659 17316 solver.cpp:404]     Test net output #0: accuracy = 0.42892\nI0817 20:31:03.981873 17316 solver.cpp:404]     Test net output #1: loss = 2.50523 (* 1 = 2.50523 loss)\nI0817 20:31:05.293308 17316 solver.cpp:228] Iteration 7000, loss = 0.736245\nI0817 20:31:05.293359 17316 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 20:31:05.293382 17316 solver.cpp:244]     Train net output #1: loss = 0.736245 (* 1 = 0.736245 loss)\nI0817 20:31:05.383746 17316 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0817 20:33:23.853382 17316 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:34:44.293409 17316 solver.cpp:404]     Test net output #0: accuracy = 0.49816\nI0817 20:34:44.293634 17316 solver.cpp:404]     Test net output #1: loss = 2.10113 (* 1 = 2.10113 loss)\nI0817 20:34:45.604588 17316 solver.cpp:228] Iteration 7100, loss = 0.850543\nI0817 20:34:45.604635 17316 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 20:34:45.604658 17316 solver.cpp:244]     Train net output #1: loss = 0.850543 (* 1 = 0.850543 loss)\nI0817 20:34:45.706187 17316 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0817 20:37:04.143920 17316 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:38:24.603240 17316 solver.cpp:404]     Test net output #0: accuracy = 0.41604\nI0817 20:38:24.603484 17316 solver.cpp:404]     Test net output #1: loss = 2.74143 (* 1 = 2.74143 loss)\nI0817 20:38:25.914180 17316 solver.cpp:228] Iteration 7200, loss = 0.765914\nI0817 20:38:25.914227 17316 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 20:38:25.914252 17316 solver.cpp:244]     Train net output #1: loss = 0.765914 (* 1 = 0.765914 loss)\nI0817 20:38:26.012110 17316 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0817 20:40:44.490396 17316 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:42:04.949096 17316 solver.cpp:404]     Test net output #0: accuracy = 0.45748\nI0817 20:42:04.949307 17316 solver.cpp:404]     Test net output #1: loss = 2.45352 (* 1 = 2.45352 loss)\nI0817 20:42:06.260807 17316 solver.cpp:228] Iteration 7300, loss = 0.722713\nI0817 20:42:06.260852 17316 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 20:42:06.260877 17316 solver.cpp:244]     Train net output #1: loss = 0.722713 (* 1 = 0.722713 loss)\nI0817 20:42:06.362491 17316 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0817 20:44:24.900384 17316 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:45:45.353713 17316 solver.cpp:404]     Test net output #0: accuracy = 0.44644\nI0817 20:45:45.353972 17316 solver.cpp:404]     Test net output #1: loss = 2.51836 (* 1 = 2.51836 loss)\nI0817 20:45:46.665160 17316 solver.cpp:228] Iteration 7400, loss = 0.870452\nI0817 20:45:46.665205 17316 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 20:45:46.665228 17316 solver.cpp:244]     Train net output #1: loss = 0.870452 (* 1 = 0.870452 loss)\nI0817 20:45:46.764358 17316 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0817 20:48:05.301455 17316 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:49:25.754611 17316 solver.cpp:404]     Test net output #0: accuracy = 0.44368\nI0817 20:49:25.754812 17316 solver.cpp:404]     Test net output #1: loss = 2.77759 (* 1 = 2.77759 loss)\nI0817 20:49:27.065464 17316 solver.cpp:228] Iteration 7500, loss = 0.602738\nI0817 20:49:27.065510 17316 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 20:49:27.065536 17316 solver.cpp:244]     Train net output #1: loss = 0.602738 (* 1 = 0.602738 loss)\nI0817 20:49:27.168925 17316 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0817 20:51:45.711462 17316 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:53:06.164899 17316 solver.cpp:404]     Test net output #0: accuracy = 0.39316\nI0817 20:53:06.165125 17316 solver.cpp:404]     Test net output #1: loss = 3.00828 (* 1 = 3.00828 loss)\nI0817 20:53:07.476086 17316 solver.cpp:228] Iteration 7600, loss = 0.607367\nI0817 20:53:07.476132 17316 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 20:53:07.476155 17316 solver.cpp:244]     Train net output #1: loss = 0.607367 (* 1 = 0.607367 loss)\nI0817 20:53:07.578949 17316 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0817 20:55:26.156003 17316 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:56:46.604833 17316 solver.cpp:404]     Test net output #0: accuracy = 0.51452\nI0817 20:56:46.605079 17316 solver.cpp:404]     Test net output #1: loss = 2.19994 (* 1 = 2.19994 loss)\nI0817 20:56:47.915058 17316 solver.cpp:228] Iteration 7700, loss = 0.566538\nI0817 20:56:47.915104 17316 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 20:56:47.915127 17316 solver.cpp:244]     Train net output #1: loss = 0.566538 (* 1 = 0.566538 loss)\nI0817 20:56:48.019559 17316 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0817 20:59:06.490257 17316 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 21:00:26.928642 17316 solver.cpp:404]     Test net output #0: accuracy = 0.49536\nI0817 21:00:26.928890 17316 solver.cpp:404]     Test net output #1: loss = 2.36244 (* 1 = 2.36244 loss)\nI0817 21:00:28.239027 17316 solver.cpp:228] Iteration 7800, loss = 0.634017\nI0817 21:00:28.239073 17316 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 21:00:28.239097 17316 solver.cpp:244]     Train net output #1: loss = 0.634017 (* 1 = 0.634017 loss)\nI0817 21:00:28.333513 17316 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0817 21:02:46.780603 17316 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:04:07.234191 17316 solver.cpp:404]     Test net output #0: accuracy = 0.49072\nI0817 21:04:07.234452 17316 solver.cpp:404]     Test net output #1: loss = 2.29507 (* 1 = 2.29507 loss)\nI0817 21:04:08.544561 17316 solver.cpp:228] Iteration 7900, loss = 0.566647\nI0817 21:04:08.544605 17316 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 21:04:08.544630 17316 solver.cpp:244]     Train net output #1: loss = 0.566647 (* 1 = 0.566647 loss)\nI0817 21:04:08.646353 17316 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0817 21:06:27.121232 17316 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:07:47.577738 17316 solver.cpp:404]     Test net output #0: accuracy = 0.50392\nI0817 21:07:47.577980 17316 solver.cpp:404]     Test net output #1: loss = 2.36191 (* 1 = 2.36191 loss)\nI0817 21:07:48.887699 17316 solver.cpp:228] Iteration 8000, loss = 0.632225\nI0817 21:07:48.887743 17316 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 21:07:48.887768 17316 solver.cpp:244]     Train net output #1: loss = 0.632225 (* 1 = 0.632225 loss)\nI0817 21:07:48.980679 17316 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0817 21:10:07.500643 17316 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:11:27.958556 17316 solver.cpp:404]     Test net output #0: accuracy = 0.51052\nI0817 21:11:27.958784 17316 solver.cpp:404]     Test net output #1: loss = 2.46884 (* 1 = 2.46884 loss)\nI0817 21:11:29.268618 17316 solver.cpp:228] Iteration 8100, loss = 0.513861\nI0817 21:11:29.268663 17316 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 21:11:29.268687 17316 solver.cpp:244]     Train net output #1: loss = 0.513861 (* 1 = 0.513861 loss)\nI0817 21:11:29.366065 17316 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0817 21:13:47.766168 17316 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:15:08.142426 17316 solver.cpp:404]     Test net output #0: accuracy = 0.47804\nI0817 21:15:08.142685 17316 solver.cpp:404]     Test net output #1: loss = 2.65695 (* 1 = 2.65695 loss)\nI0817 21:15:09.452828 17316 solver.cpp:228] Iteration 8200, loss = 0.462292\nI0817 21:15:09.452873 17316 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 21:15:09.452896 17316 solver.cpp:244]     Train net output #1: loss = 0.462292 (* 1 = 0.462292 loss)\nI0817 21:15:09.544775 17316 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0817 21:17:27.969732 17316 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:18:48.348541 17316 solver.cpp:404]     Test net output #0: accuracy = 0.46788\nI0817 21:18:48.348804 17316 solver.cpp:404]     Test net output #1: loss = 2.65382 (* 1 = 2.65382 loss)\nI0817 21:18:49.659929 17316 solver.cpp:228] Iteration 8300, loss = 0.642512\nI0817 21:18:49.659973 17316 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 21:18:49.659997 17316 solver.cpp:244]     Train net output #1: loss = 0.642512 (* 1 = 0.642512 loss)\nI0817 21:18:49.755403 17316 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0817 21:21:08.269682 17316 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:22:28.646070 17316 solver.cpp:404]     Test net output #0: accuracy = 0.5216\nI0817 21:22:28.646338 17316 solver.cpp:404]     Test net output #1: loss = 2.26871 (* 1 = 2.26871 loss)\nI0817 21:22:29.956526 17316 solver.cpp:228] Iteration 8400, loss = 0.53602\nI0817 21:22:29.956568 17316 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 21:22:29.956583 17316 solver.cpp:244]     Train net output #1: loss = 0.53602 (* 1 = 0.53602 loss)\nI0817 21:22:30.047034 17316 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0817 21:24:48.536792 17316 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:26:08.899556 17316 solver.cpp:404]     Test net output #0: accuracy = 0.4996\nI0817 21:26:08.899775 17316 solver.cpp:404]     Test net output #1: loss = 2.50999 (* 1 = 2.50999 loss)\nI0817 21:26:10.209311 17316 solver.cpp:228] Iteration 8500, loss = 0.442455\nI0817 21:26:10.209352 17316 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 21:26:10.209367 17316 solver.cpp:244]     Train net output #1: loss = 0.442455 (* 1 = 0.442455 loss)\nI0817 21:26:10.305824 17316 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0817 21:28:28.762303 17316 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:29:49.121666 17316 solver.cpp:404]     Test net output #0: accuracy = 0.53444\nI0817 21:29:49.121886 17316 solver.cpp:404]     Test net output #1: loss = 2.22094 (* 1 = 2.22094 loss)\nI0817 21:29:50.431802 17316 solver.cpp:228] Iteration 8600, loss = 0.40633\nI0817 21:29:50.431843 17316 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 21:29:50.431859 17316 solver.cpp:244]     Train net output #1: loss = 0.40633 (* 1 = 0.40633 loss)\nI0817 21:29:50.523190 17316 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0817 21:32:09.026741 17316 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:33:29.386178 17316 solver.cpp:404]     Test net output #0: accuracy = 0.51652\nI0817 21:33:29.386430 17316 solver.cpp:404]     Test net output #1: loss = 2.5069 (* 1 = 2.5069 loss)\nI0817 21:33:30.697221 17316 solver.cpp:228] Iteration 8700, loss = 0.3581\nI0817 21:33:30.697264 17316 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 21:33:30.697280 17316 solver.cpp:244]     Train net output #1: loss = 0.3581 (* 1 = 0.3581 loss)\nI0817 21:33:30.801317 17316 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0817 21:35:49.373276 17316 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:37:09.756469 17316 solver.cpp:404]     Test net output #0: accuracy = 0.5342\nI0817 21:37:09.756701 17316 solver.cpp:404]     Test net output #1: loss = 2.29812 (* 1 = 2.29812 loss)\nI0817 21:37:11.067483 17316 solver.cpp:228] Iteration 8800, loss = 0.411852\nI0817 21:37:11.067522 17316 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 21:37:11.067544 17316 solver.cpp:244]     Train net output #1: loss = 0.411852 (* 1 = 0.411852 loss)\nI0817 21:37:11.161238 17316 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0817 21:39:29.594125 17316 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:40:49.971683 17316 solver.cpp:404]     Test net output #0: accuracy = 0.51832\nI0817 21:40:49.971895 17316 solver.cpp:404]     Test net output #1: loss = 2.53389 (* 1 = 2.53389 loss)\nI0817 21:40:51.282815 17316 solver.cpp:228] Iteration 8900, loss = 0.316828\nI0817 21:40:51.282861 17316 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 21:40:51.282884 17316 solver.cpp:244]     Train net output #1: loss = 0.316828 (* 1 = 0.316828 loss)\nI0817 21:40:51.376792 17316 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0817 21:43:09.823806 17316 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:44:30.216351 17316 solver.cpp:404]     Test net output #0: accuracy = 0.52896\nI0817 21:44:30.216617 17316 solver.cpp:404]     Test net output #1: loss = 2.74308 (* 1 = 2.74308 loss)\nI0817 21:44:31.527276 17316 solver.cpp:228] Iteration 9000, loss = 0.291093\nI0817 21:44:31.527328 17316 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 21:44:31.527354 17316 solver.cpp:244]     Train net output #1: loss = 0.291093 (* 1 = 0.291093 loss)\nI0817 21:44:31.619582 17316 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0817 21:46:49.983597 17316 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:48:10.436141 17316 solver.cpp:404]     Test net output #0: accuracy = 0.52372\nI0817 21:48:10.436386 17316 solver.cpp:404]     Test net output #1: loss = 2.46187 (* 1 = 2.46187 loss)\nI0817 21:48:11.747056 17316 solver.cpp:228] Iteration 9100, loss = 0.309366\nI0817 21:48:11.747104 17316 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 21:48:11.747128 17316 solver.cpp:244]     Train net output #1: loss = 0.309366 (* 1 = 0.309366 loss)\nI0817 21:48:11.844573 17316 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0817 21:50:30.391145 17316 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:51:50.839481 17316 solver.cpp:404]     Test net output #0: accuracy = 0.50892\nI0817 21:51:50.839701 17316 solver.cpp:404]     Test net output #1: loss = 2.81515 (* 1 = 2.81515 loss)\nI0817 21:51:52.151758 17316 solver.cpp:228] Iteration 9200, loss = 0.240005\nI0817 21:51:52.151794 17316 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:51:52.151809 17316 solver.cpp:244]     Train net output #1: loss = 0.240005 (* 1 = 0.240005 loss)\nI0817 21:51:52.243943 17316 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0817 21:54:10.660111 17316 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:55:31.104604 17316 solver.cpp:404]     Test net output #0: accuracy = 0.5942\nI0817 21:55:31.104823 17316 solver.cpp:404]     Test net output #1: loss = 1.98868 (* 1 = 1.98868 loss)\nI0817 21:55:32.415405 17316 solver.cpp:228] Iteration 9300, loss = 0.306593\nI0817 21:55:32.415443 17316 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 21:55:32.415459 17316 solver.cpp:244]     Train net output #1: loss = 0.306593 (* 1 = 0.306593 loss)\nI0817 21:55:32.518270 17316 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0817 21:57:51.000159 17316 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 21:59:11.425489 17316 solver.cpp:404]     Test net output #0: accuracy = 0.58504\nI0817 21:59:11.425732 17316 solver.cpp:404]     Test net output #1: loss = 2.20105 (* 1 = 2.20105 loss)\nI0817 21:59:12.736315 17316 solver.cpp:228] Iteration 9400, loss = 0.133999\nI0817 21:59:12.736358 17316 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:59:12.736374 17316 solver.cpp:244]     Train net output #1: loss = 0.133999 (* 1 = 0.133999 loss)\nI0817 21:59:12.834774 17316 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0817 22:01:31.320868 17316 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:02:51.763527 17316 solver.cpp:404]     Test net output #0: accuracy = 0.59864\nI0817 22:02:51.763787 17316 solver.cpp:404]     Test net output #1: loss = 2.0832 (* 1 = 2.0832 loss)\nI0817 22:02:53.074620 17316 solver.cpp:228] Iteration 9500, loss = 0.207673\nI0817 22:02:53.074654 17316 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 22:02:53.074669 17316 solver.cpp:244]     Train net output #1: loss = 0.207673 (* 1 = 0.207673 loss)\nI0817 22:02:53.178139 17316 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0817 22:05:11.766629 17316 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:06:32.198163 17316 solver.cpp:404]     Test net output #0: accuracy = 0.6184\nI0817 22:06:32.198377 17316 solver.cpp:404]     Test net output #1: loss = 2.04836 (* 1 = 2.04836 loss)\nI0817 22:06:33.509163 17316 solver.cpp:228] Iteration 9600, loss = 0.101533\nI0817 22:06:33.509199 17316 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:06:33.509214 17316 solver.cpp:244]     Train net output #1: loss = 0.101534 (* 1 = 0.101534 loss)\nI0817 22:06:33.611841 17316 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0817 22:08:52.316073 17316 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:10:12.755339 17316 solver.cpp:404]     Test net output #0: accuracy = 0.64968\nI0817 22:10:12.755587 17316 solver.cpp:404]     Test net output #1: loss = 1.8247 (* 1 = 1.8247 loss)\nI0817 22:10:14.066520 17316 solver.cpp:228] Iteration 9700, loss = 0.026426\nI0817 22:10:14.066565 17316 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:10:14.066581 17316 solver.cpp:244]     Train net output #1: loss = 0.0264261 (* 1 = 0.0264261 loss)\nI0817 22:10:14.169028 17316 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0817 22:12:32.598492 17316 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:13:53.042626 17316 solver.cpp:404]     Test net output #0: accuracy = 0.67124\nI0817 22:13:53.042881 17316 solver.cpp:404]     Test net output #1: loss = 1.64488 (* 1 = 1.64488 loss)\nI0817 22:13:54.353662 17316 solver.cpp:228] Iteration 9800, loss = 0.0237104\nI0817 22:13:54.353706 17316 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:13:54.353723 17316 solver.cpp:244]     Train net output #1: loss = 0.0237105 (* 1 = 0.0237105 loss)\nI0817 22:13:54.452744 17316 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0817 22:16:12.983973 17316 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:17:33.433831 17316 solver.cpp:404]     Test net output #0: accuracy = 0.68136\nI0817 22:17:33.434067 17316 solver.cpp:404]     Test net output #1: loss = 1.58979 (* 1 = 1.58979 loss)\nI0817 22:17:34.744240 17316 solver.cpp:228] Iteration 9900, loss = 0.00710779\nI0817 22:17:34.744284 17316 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:17:34.744300 17316 solver.cpp:244]     Train net output #1: loss = 0.00710787 (* 1 = 0.00710787 loss)\nI0817 22:17:34.845578 17316 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0817 22:19:53.293653 17316 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kCifar100Fig8_iter_10000.caffemodel\nI0817 22:19:53.503865 17316 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kCifar100Fig8_iter_10000.solverstate\nI0817 22:19:53.941906 17316 solver.cpp:317] Iteration 10000, loss = 0.0057193\nI0817 22:19:53.941946 17316 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:21:14.384752 17316 solver.cpp:404]     Test net output #0: accuracy = 0.682\nI0817 22:21:14.385005 17316 solver.cpp:404]     Test net output #1: loss = 1.59289 (* 1 = 1.59289 loss)\nI0817 22:21:14.385017 17316 solver.cpp:322] Optimization Done.\nI0817 22:21:19.589154 17316 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kFig1a",
    "content": "I0817 16:02:51.154722 17550 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:02:51.157384 17550 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:02:51.158605 17550 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:02:51.159819 17550 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:02:51.161043 17550 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:02:51.162274 17550 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:02:51.163516 17550 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:02:51.164749 17550 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:02:51.165985 17550 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:02:51.582952 17550 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kFig1a\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0817 16:02:51.587083 17550 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:02:51.605506 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:51.605587 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:51.606643 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:02:51.606698 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:02:51.606724 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:02:51.606744 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:02:51.606763 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:02:51.606781 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:02:51.606799 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:02:51.606819 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:02:51.606839 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:02:51.606858 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:02:51.606878 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:02:51.606894 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:02:51.606914 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:02:51.606932 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:02:51.606953 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:02:51.606971 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:02:51.606989 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:02:51.607008 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:02:51.607028 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:02:51.607046 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:02:51.607076 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:02:51.607096 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:02:51.607120 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:02:51.607141 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:02:51.607169 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:02:51.607187 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:02:51.607205 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:02:51.607223 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:02:51.607240 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:02:51.607260 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:02:51.607280 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:02:51.607298 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:02:51.607317 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:02:51.607333 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:02:51.607353 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:02:51.607370 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:02:51.607391 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:02:51.607409 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:02:51.607429 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:02:51.607445 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:02:51.607471 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:02:51.607487 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:02:51.607506 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:02:51.607523 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:02:51.607543 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:02:51.607563 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:02:51.607580 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:02:51.607597 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:02:51.607615 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:02:51.607633 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:02:51.607650 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:02:51.607678 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:02:51.607699 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:02:51.607717 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:02:51.607738 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:02:51.607754 17550 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:02:51.609508 17550 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0817 16:02:51.611572 17550 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:02:51.612761 17550 net.cpp:100] Creating Layer dataLayer\nI0817 16:02:51.612833 17550 net.cpp:408] dataLayer -> data_top\nI0817 16:02:51.613028 17550 net.cpp:408] dataLayer -> label\nI0817 16:02:51.613157 17550 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:02:51.623277 17555 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:02:51.645619 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:51.653071 17550 net.cpp:150] Setting up dataLayer\nI0817 16:02:51.653153 17550 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:02:51.653175 17550 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:51.653185 17550 net.cpp:165] Memory required for data: 1536500\nI0817 16:02:51.653210 17550 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:02:51.653232 17550 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:02:51.653247 17550 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:02:51.653285 17550 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:02:51.653311 17550 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:02:51.653421 17550 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:02:51.653445 17550 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:51.653458 17550 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:51.653467 17550 net.cpp:165] Memory required for data: 1537500\nI0817 16:02:51.653478 17550 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:02:51.653555 17550 net.cpp:100] Creating Layer pre_conv\nI0817 16:02:51.653570 17550 net.cpp:434] pre_conv <- data_top\nI0817 16:02:51.653587 17550 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:02:51.655395 17550 net.cpp:150] Setting up pre_conv\nI0817 16:02:51.655421 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.655431 17550 net.cpp:165] Memory required for data: 9729500\nI0817 16:02:51.655513 17550 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:02:51.655623 17550 net.cpp:100] Creating Layer pre_bn\nI0817 16:02:51.655640 17550 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:02:51.655661 17550 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:02:51.655724 17556 blocking_queue.cpp:50] Waiting for data\nI0817 16:02:51.656008 17550 net.cpp:150] Setting up pre_bn\nI0817 16:02:51.656031 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.656040 17550 net.cpp:165] Memory required for data: 17921500\nI0817 16:02:51.656077 17550 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:02:51.656143 17550 net.cpp:100] Creating Layer pre_scale\nI0817 16:02:51.656165 17550 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:02:51.656185 17550 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:02:51.656396 17550 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:02:51.656678 17550 net.cpp:150] Setting up pre_scale\nI0817 16:02:51.656702 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.656713 17550 net.cpp:165] Memory required for data: 26113500\nI0817 16:02:51.656733 17550 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:02:51.656792 17550 net.cpp:100] Creating Layer pre_relu\nI0817 16:02:51.656807 17550 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:02:51.656822 17550 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:02:51.656841 17550 net.cpp:150] Setting up pre_relu\nI0817 16:02:51.656857 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.656865 17550 net.cpp:165] Memory required for data: 34305500\nI0817 16:02:51.656875 17550 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:02:51.656895 17550 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:02:51.656908 17550 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:02:51.656924 17550 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:02:51.656944 17550 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:02:51.657029 17550 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:02:51.657050 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.657064 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.657073 17550 net.cpp:165] Memory required for data: 50689500\nI0817 16:02:51.657084 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:02:51.657104 17550 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:02:51.657116 17550 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:02:51.657138 17550 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:02:51.657515 17550 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:02:51.657534 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.657544 17550 net.cpp:165] Memory required for data: 58881500\nI0817 16:02:51.657567 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:02:51.657588 17550 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:02:51.657599 17550 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:02:51.657620 17550 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:02:51.657905 17550 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:02:51.657925 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.657935 17550 net.cpp:165] Memory required for data: 67073500\nI0817 16:02:51.657958 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:02:51.657979 17550 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:02:51.657991 17550 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:02:51.658006 17550 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.658088 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:02:51.658274 17550 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:02:51.658294 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.658303 17550 net.cpp:165] Memory required for data: 75265500\nI0817 16:02:51.658323 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:02:51.658351 17550 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:02:51.658363 17550 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:02:51.658387 17550 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.658406 17550 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:02:51.658421 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.658430 17550 net.cpp:165] Memory required for data: 83457500\nI0817 16:02:51.658440 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:02:51.658468 17550 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:02:51.658480 17550 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:02:51.658502 17550 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:02:51.658854 17550 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:02:51.658874 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.658884 17550 net.cpp:165] Memory required for data: 91649500\nI0817 16:02:51.658900 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:02:51.658918 17550 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:02:51.658929 17550 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:02:51.658946 17550 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:02:51.659225 17550 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:02:51.659245 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.659255 17550 net.cpp:165] Memory required for data: 99841500\nI0817 16:02:51.659286 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:02:51.659306 17550 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:02:51.659322 17550 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:02:51.659344 17550 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:02:51.659436 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:02:51.659612 17550 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:02:51.659631 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.659641 17550 net.cpp:165] Memory required for data: 108033500\nI0817 16:02:51.659659 17550 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:02:51.659728 17550 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:02:51.659742 17550 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:02:51.659754 17550 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:02:51.659775 17550 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:02:51.659867 17550 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:02:51.659885 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.659895 17550 net.cpp:165] Memory required for data: 116225500\nI0817 16:02:51.659905 17550 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:02:51.659925 17550 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:02:51.659939 17550 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:02:51.659951 17550 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:02:51.659971 17550 net.cpp:150] Setting up L1_b1_relu\nI0817 16:02:51.659986 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.659996 17550 net.cpp:165] Memory required for data: 124417500\nI0817 16:02:51.660006 17550 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:51.660022 17550 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:51.660032 17550 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:02:51.660048 17550 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:02:51.660068 17550 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:02:51.660153 17550 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:51.660174 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.660187 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.660207 17550 net.cpp:165] Memory required for data: 140801500\nI0817 16:02:51.660220 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:02:51.660245 17550 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:02:51.660259 17550 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:02:51.660277 17550 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:02:51.660621 17550 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:02:51.660640 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.660650 17550 net.cpp:165] Memory required for data: 148993500\nI0817 16:02:51.660668 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:02:51.660691 17550 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:02:51.660704 17550 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:02:51.660722 17550 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:02:51.660990 17550 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:02:51.661010 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.661020 17550 net.cpp:165] Memory required for data: 157185500\nI0817 16:02:51.661042 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:02:51.661058 17550 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:02:51.661069 17550 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:02:51.661084 17550 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.661177 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:02:51.661348 17550 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:02:51.661366 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.661376 17550 net.cpp:165] Memory required for data: 165377500\nI0817 16:02:51.661394 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:02:51.661412 17550 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:02:51.661422 17550 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:02:51.661444 17550 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.661464 17550 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:02:51.661478 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.661489 17550 net.cpp:165] Memory required for data: 173569500\nI0817 16:02:51.661497 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:02:51.661521 17550 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:02:51.661535 17550 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:02:51.661556 17550 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:02:51.661906 17550 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:02:51.661924 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.661934 17550 net.cpp:165] Memory required for data: 181761500\nI0817 16:02:51.661952 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:02:51.661973 17550 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:02:51.661985 17550 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:02:51.662003 17550 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:02:51.662278 17550 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:02:51.662297 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.662307 17550 net.cpp:165] Memory required for data: 189953500\nI0817 16:02:51.662336 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:02:51.662358 17550 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:02:51.662370 17550 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:02:51.662386 17550 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:02:51.662475 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:02:51.662653 17550 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:02:51.662672 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.662681 17550 net.cpp:165] Memory required for data: 198145500\nI0817 16:02:51.662699 17550 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:02:51.662725 17550 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:02:51.662739 17550 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:02:51.662751 17550 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:02:51.662771 17550 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:02:51.662827 17550 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:02:51.662847 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.662858 17550 net.cpp:165] Memory required for data: 206337500\nI0817 16:02:51.662868 17550 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:02:51.662881 17550 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:02:51.662892 17550 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:02:51.662906 17550 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:02:51.662925 17550 net.cpp:150] Setting up L1_b2_relu\nI0817 16:02:51.662940 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.662948 17550 net.cpp:165] Memory required for data: 214529500\nI0817 16:02:51.662958 17550 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:51.662979 17550 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:51.662989 17550 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:02:51.663004 17550 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:02:51.663024 17550 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:02:51.663099 17550 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:51.663125 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.663139 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.663161 17550 net.cpp:165] Memory required for data: 230913500\nI0817 16:02:51.663173 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:02:51.663194 17550 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:02:51.663205 17550 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:02:51.663223 17550 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:02:51.663568 17550 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:02:51.663586 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.663596 17550 net.cpp:165] Memory required for data: 239105500\nI0817 16:02:51.663614 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:02:51.663636 17550 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:02:51.663648 17550 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:02:51.663664 17550 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:02:51.663935 17550 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:02:51.663954 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.663964 17550 net.cpp:165] Memory required for data: 247297500\nI0817 16:02:51.663985 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:02:51.664006 17550 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:02:51.664018 17550 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:02:51.664033 17550 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.664115 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:02:51.664295 17550 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:02:51.664314 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.664324 17550 net.cpp:165] Memory required for data: 255489500\nI0817 16:02:51.664343 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:02:51.664358 17550 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:02:51.664369 17550 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:02:51.664388 17550 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.664408 17550 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:02:51.664433 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.664443 17550 net.cpp:165] Memory required for data: 263681500\nI0817 16:02:51.664454 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:02:51.664479 17550 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:02:51.664491 17550 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:02:51.664510 17550 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:02:51.664851 17550 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:02:51.664870 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.664880 17550 net.cpp:165] Memory required for data: 271873500\nI0817 16:02:51.664897 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:02:51.664923 17550 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:02:51.664937 17550 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:02:51.664954 17550 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:02:51.665235 17550 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:02:51.665254 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.665264 17550 net.cpp:165] Memory required for data: 280065500\nI0817 16:02:51.665285 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:02:51.665307 17550 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:02:51.665319 17550 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:02:51.665335 17550 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:02:51.665421 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:02:51.665596 17550 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:02:51.665614 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.665624 17550 net.cpp:165] Memory required for data: 288257500\nI0817 16:02:51.665642 17550 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:02:51.665659 17550 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:02:51.665670 17550 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:02:51.665683 17550 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:02:51.665704 17550 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:02:51.665760 17550 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:02:51.665778 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.665788 17550 net.cpp:165] Memory required for data: 296449500\nI0817 16:02:51.665798 17550 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:02:51.665813 17550 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:02:51.665825 17550 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:02:51.665839 17550 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:02:51.665858 17550 net.cpp:150] Setting up L1_b3_relu\nI0817 16:02:51.665874 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.665882 17550 net.cpp:165] Memory required for data: 304641500\nI0817 16:02:51.665892 17550 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:51.665911 17550 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:51.665923 17550 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:02:51.665940 17550 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:02:51.665959 17550 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:02:51.666039 17550 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:51.666061 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.666075 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.666085 17550 net.cpp:165] Memory required for data: 321025500\nI0817 16:02:51.666095 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:02:51.666115 17550 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:02:51.666127 17550 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:02:51.666167 17550 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:02:51.666530 17550 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:02:51.666550 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.666559 17550 net.cpp:165] Memory required for data: 329217500\nI0817 16:02:51.666577 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:02:51.666594 17550 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:02:51.666605 17550 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:02:51.666621 17550 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:02:51.666895 17550 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:02:51.666914 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.666924 17550 net.cpp:165] Memory required for data: 337409500\nI0817 16:02:51.666945 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:02:51.666967 17550 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:02:51.666980 17550 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:02:51.666996 17550 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.667083 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:02:51.667263 17550 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:02:51.667282 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.667291 17550 net.cpp:165] Memory required for data: 345601500\nI0817 16:02:51.667309 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:02:51.667325 17550 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:02:51.667335 17550 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:02:51.667354 17550 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.667374 17550 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:02:51.667389 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.667398 17550 net.cpp:165] Memory required for data: 353793500\nI0817 16:02:51.667408 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:02:51.667433 17550 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:02:51.667445 17550 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:02:51.667462 17550 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:02:51.667814 17550 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:02:51.667834 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.667842 17550 net.cpp:165] Memory required for data: 361985500\nI0817 16:02:51.667861 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:02:51.667886 17550 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:02:51.667901 17550 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:02:51.667917 17550 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:02:51.668193 17550 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:02:51.668213 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.668223 17550 net.cpp:165] Memory required for data: 370177500\nI0817 16:02:51.668242 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:02:51.668262 17550 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:02:51.668274 17550 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:02:51.668294 17550 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:02:51.668382 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:02:51.668557 17550 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:02:51.668576 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.668586 17550 net.cpp:165] Memory required for data: 378369500\nI0817 16:02:51.668604 17550 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:02:51.668624 17550 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:02:51.668637 17550 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:02:51.668650 17550 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:02:51.668666 17550 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:02:51.668735 17550 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:02:51.668752 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.668762 17550 net.cpp:165] Memory required for data: 386561500\nI0817 16:02:51.668773 17550 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:02:51.668787 17550 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:02:51.668799 17550 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:02:51.668818 17550 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:02:51.668838 17550 net.cpp:150] Setting up L1_b4_relu\nI0817 16:02:51.668853 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.668862 17550 net.cpp:165] Memory required for data: 394753500\nI0817 16:02:51.668871 17550 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:51.668886 17550 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:51.668897 17550 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:02:51.668913 17550 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:02:51.668932 17550 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:02:51.669011 17550 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:51.669030 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.669044 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.669052 17550 net.cpp:165] Memory required for data: 411137500\nI0817 16:02:51.669062 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:02:51.669082 17550 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:02:51.669095 17550 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:02:51.669118 17550 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:02:51.669481 17550 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:02:51.669499 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.669509 17550 net.cpp:165] Memory required for data: 419329500\nI0817 16:02:51.669545 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:02:51.669564 17550 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:02:51.669575 17550 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:02:51.669596 17550 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:02:51.669881 17550 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:02:51.669900 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.669909 17550 net.cpp:165] Memory required for data: 427521500\nI0817 16:02:51.669929 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:02:51.669950 17550 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:02:51.669962 17550 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:02:51.669978 17550 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.670066 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:02:51.670249 17550 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:02:51.670267 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.670277 17550 net.cpp:165] Memory required for data: 435713500\nI0817 16:02:51.670295 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:02:51.670310 17550 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:02:51.670321 17550 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:02:51.670341 17550 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.670361 17550 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:02:51.670375 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.670385 17550 net.cpp:165] Memory required for data: 443905500\nI0817 16:02:51.670395 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:02:51.670418 17550 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:02:51.670431 17550 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:02:51.670462 17550 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:02:51.670824 17550 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:02:51.670843 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.670853 17550 net.cpp:165] Memory required for data: 452097500\nI0817 16:02:51.670871 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:02:51.670888 17550 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:02:51.670899 17550 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:02:51.670915 17550 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:02:51.671200 17550 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:02:51.671221 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.671231 17550 net.cpp:165] Memory required for data: 460289500\nI0817 16:02:51.671252 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:02:51.671272 17550 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:02:51.671284 17550 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:02:51.671300 17550 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:02:51.671386 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:02:51.671561 17550 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:02:51.671581 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.671591 17550 net.cpp:165] Memory required for data: 468481500\nI0817 16:02:51.671609 17550 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:02:51.671627 17550 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:02:51.671638 17550 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:02:51.671650 17550 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:02:51.671671 17550 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:02:51.671725 17550 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:02:51.671747 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.671757 17550 net.cpp:165] Memory required for data: 476673500\nI0817 16:02:51.671767 17550 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:02:51.671783 17550 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:02:51.671794 17550 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:02:51.671808 17550 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:02:51.671826 17550 net.cpp:150] Setting up L1_b5_relu\nI0817 16:02:51.671840 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.671850 17550 net.cpp:165] Memory required for data: 484865500\nI0817 16:02:51.671860 17550 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:51.671878 17550 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:51.671890 17550 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:02:51.671905 17550 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:02:51.671926 17550 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:02:51.672001 17550 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:51.672020 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.672034 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.672044 17550 net.cpp:165] Memory required for data: 501249500\nI0817 16:02:51.672053 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:02:51.672073 17550 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:02:51.672086 17550 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:02:51.672107 17550 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:02:51.672464 17550 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:02:51.672483 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.672493 17550 net.cpp:165] Memory required for data: 509441500\nI0817 16:02:51.672520 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:02:51.672539 17550 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:02:51.672551 17550 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:02:51.672567 17550 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:02:51.672849 17550 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:02:51.672869 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.672879 17550 net.cpp:165] Memory required for data: 517633500\nI0817 16:02:51.672901 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:02:51.672924 17550 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:02:51.672935 17550 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:02:51.672951 17550 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.673040 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:02:51.673225 17550 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:02:51.673244 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.673254 17550 net.cpp:165] Memory required for data: 525825500\nI0817 16:02:51.673272 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:02:51.673288 17550 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:02:51.673300 17550 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:02:51.673318 17550 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.673338 17550 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:02:51.673353 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.673364 17550 net.cpp:165] Memory required for data: 534017500\nI0817 16:02:51.673373 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:02:51.673398 17550 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:02:51.673411 17550 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:02:51.673429 17550 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:02:51.673781 17550 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:02:51.673801 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.673810 17550 net.cpp:165] Memory required for data: 542209500\nI0817 16:02:51.673828 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:02:51.673849 17550 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:02:51.673862 17550 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:02:51.673878 17550 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:02:51.674161 17550 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:02:51.674180 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.674190 17550 net.cpp:165] Memory required for data: 550401500\nI0817 16:02:51.674211 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:02:51.674228 17550 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:02:51.674239 17550 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:02:51.674259 17550 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:02:51.674345 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:02:51.674523 17550 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:02:51.674542 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.674552 17550 net.cpp:165] Memory required for data: 558593500\nI0817 16:02:51.674569 17550 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:02:51.674597 17550 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:02:51.674612 17550 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:02:51.674625 17550 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:02:51.674644 17550 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:02:51.674700 17550 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:02:51.674717 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.674727 17550 net.cpp:165] Memory required for data: 566785500\nI0817 16:02:51.674738 17550 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:02:51.674768 17550 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:02:51.674782 17550 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:02:51.674796 17550 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:02:51.674815 17550 net.cpp:150] Setting up L1_b6_relu\nI0817 16:02:51.674830 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.674840 17550 net.cpp:165] Memory required for data: 574977500\nI0817 16:02:51.674850 17550 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:51.674865 17550 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:51.674875 17550 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:02:51.674890 17550 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:02:51.674909 17550 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:02:51.674991 17550 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:51.675011 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.675025 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.675035 17550 net.cpp:165] Memory required for data: 591361500\nI0817 16:02:51.675045 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:02:51.675071 17550 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:02:51.675084 17550 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:02:51.675102 17550 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:02:51.675464 17550 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:02:51.675484 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.675494 17550 net.cpp:165] Memory required for data: 599553500\nI0817 16:02:51.675511 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:02:51.675534 17550 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:02:51.675547 17550 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:02:51.675565 17550 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:02:51.675838 17550 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:02:51.675861 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.675871 17550 net.cpp:165] Memory required for data: 607745500\nI0817 16:02:51.675892 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:02:51.675909 17550 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:02:51.675920 17550 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:02:51.675935 17550 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.676023 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:02:51.676203 17550 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:02:51.676221 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.676231 17550 net.cpp:165] Memory required for data: 615937500\nI0817 16:02:51.676249 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:02:51.676268 17550 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:02:51.676281 17550 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:02:51.676302 17550 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.676322 17550 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:02:51.676337 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.676347 17550 net.cpp:165] Memory required for data: 624129500\nI0817 16:02:51.676357 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:02:51.676378 17550 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:02:51.676390 17550 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:02:51.676411 17550 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:02:51.676759 17550 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:02:51.676779 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.676789 17550 net.cpp:165] Memory required for data: 632321500\nI0817 16:02:51.676815 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:02:51.676834 17550 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:02:51.676846 17550 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:02:51.676869 17550 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:02:51.677161 17550 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:02:51.677181 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.677191 17550 net.cpp:165] Memory required for data: 640513500\nI0817 16:02:51.677211 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:02:51.677233 17550 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:02:51.677247 17550 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:02:51.677263 17550 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:02:51.677346 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:02:51.677521 17550 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:02:51.677541 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.677551 17550 net.cpp:165] Memory required for data: 648705500\nI0817 16:02:51.677568 17550 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:02:51.677589 17550 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:02:51.677603 17550 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:02:51.677616 17550 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:02:51.677633 17550 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:02:51.677695 17550 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:02:51.677714 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.677723 17550 net.cpp:165] Memory required for data: 656897500\nI0817 16:02:51.677733 17550 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:02:51.677748 17550 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:02:51.677759 17550 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:02:51.677779 17550 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:02:51.677799 17550 net.cpp:150] Setting up L1_b7_relu\nI0817 16:02:51.677814 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.677824 17550 net.cpp:165] Memory required for data: 665089500\nI0817 16:02:51.677834 17550 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:51.677846 17550 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:51.677857 17550 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:02:51.677873 17550 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:02:51.677892 17550 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:02:51.677973 17550 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:51.677992 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.678006 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.678015 17550 net.cpp:165] Memory required for data: 681473500\nI0817 16:02:51.678025 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:02:51.678046 17550 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:02:51.678059 17550 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:02:51.678081 17550 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:02:51.678444 17550 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:02:51.678463 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.678472 17550 net.cpp:165] Memory required for data: 689665500\nI0817 16:02:51.678491 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:02:51.678508 17550 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:02:51.678520 17550 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:02:51.678544 17550 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:02:51.678830 17550 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:02:51.678861 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.678871 17550 net.cpp:165] Memory required for data: 697857500\nI0817 16:02:51.678894 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:02:51.678910 17550 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:02:51.678920 17550 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:02:51.678936 17550 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.679025 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:02:51.679213 17550 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:02:51.679231 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.679241 17550 net.cpp:165] Memory required for data: 706049500\nI0817 16:02:51.679260 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:02:51.679280 17550 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:02:51.679291 17550 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:02:51.679306 17550 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.679325 17550 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:02:51.679340 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.679349 17550 net.cpp:165] Memory required for data: 714241500\nI0817 16:02:51.679359 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:02:51.679383 17550 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:02:51.679397 17550 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:02:51.679419 17550 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:02:51.679774 17550 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:02:51.679792 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.679801 17550 net.cpp:165] Memory required for data: 722433500\nI0817 16:02:51.679819 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:02:51.679836 17550 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:02:51.679847 17550 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:02:51.679868 17550 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:02:51.680163 17550 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:02:51.680183 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.680193 17550 net.cpp:165] Memory required for data: 730625500\nI0817 16:02:51.680215 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:02:51.680236 17550 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:02:51.680248 17550 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:02:51.680265 17550 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:02:51.680348 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:02:51.680524 17550 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:02:51.680543 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.680553 17550 net.cpp:165] Memory required for data: 738817500\nI0817 16:02:51.680572 17550 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:02:51.680593 17550 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:02:51.680605 17550 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:02:51.680619 17550 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:02:51.680634 17550 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:02:51.680693 17550 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:02:51.680711 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.680719 17550 net.cpp:165] Memory required for data: 747009500\nI0817 16:02:51.680729 17550 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:02:51.680744 17550 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:02:51.680757 17550 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:02:51.680775 17550 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:02:51.680796 17550 net.cpp:150] Setting up L1_b8_relu\nI0817 16:02:51.680810 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.680829 17550 net.cpp:165] Memory required for data: 755201500\nI0817 16:02:51.680840 17550 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:51.680852 17550 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:51.680863 17550 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:02:51.680879 17550 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:02:51.680899 17550 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:02:51.680979 17550 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:51.680999 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.681012 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.681021 17550 net.cpp:165] Memory required for data: 771585500\nI0817 16:02:51.681031 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:02:51.681052 17550 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:02:51.681064 17550 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:02:51.681087 17550 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:02:51.681462 17550 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:02:51.681483 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.681491 17550 net.cpp:165] Memory required for data: 779777500\nI0817 16:02:51.681510 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:02:51.681535 17550 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:02:51.681548 17550 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:02:51.681571 17550 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:02:51.681860 17550 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:02:51.681879 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.681890 17550 net.cpp:165] Memory required for data: 787969500\nI0817 16:02:51.681910 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:02:51.681929 17550 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:02:51.681941 17550 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:02:51.681955 17550 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.682047 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:02:51.682234 17550 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:02:51.682253 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.682262 17550 net.cpp:165] Memory required for data: 796161500\nI0817 16:02:51.682279 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:02:51.682299 17550 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:02:51.682312 17550 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:02:51.682327 17550 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.682345 17550 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:02:51.682360 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.682369 17550 net.cpp:165] Memory required for data: 804353500\nI0817 16:02:51.682379 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:02:51.682404 17550 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:02:51.682417 17550 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:02:51.682438 17550 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:02:51.682803 17550 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:02:51.682822 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.682832 17550 net.cpp:165] Memory required for data: 812545500\nI0817 16:02:51.682850 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:02:51.682871 17550 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:02:51.682884 17550 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:02:51.682901 17550 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:02:51.683203 17550 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:02:51.683223 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.683233 17550 net.cpp:165] Memory required for data: 820737500\nI0817 16:02:51.683284 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:02:51.683301 17550 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:02:51.683313 17550 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:02:51.683333 17550 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:02:51.683421 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:02:51.683599 17550 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:02:51.683619 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.683629 17550 net.cpp:165] Memory required for data: 828929500\nI0817 16:02:51.683646 17550 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:02:51.683668 17550 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:02:51.683681 17550 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:02:51.683696 17550 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:02:51.683710 17550 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:02:51.683763 17550 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:02:51.683781 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.683791 17550 net.cpp:165] Memory required for data: 837121500\nI0817 16:02:51.683801 17550 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:02:51.683815 17550 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:02:51.683826 17550 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:02:51.683850 17550 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:02:51.683871 17550 net.cpp:150] Setting up L1_b9_relu\nI0817 16:02:51.683886 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.683895 17550 net.cpp:165] Memory required for data: 845313500\nI0817 16:02:51.683905 17550 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:51.683926 17550 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:51.683938 17550 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:02:51.683954 17550 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:02:51.683974 17550 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:02:51.684056 17550 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:51.684074 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.684087 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.684096 17550 net.cpp:165] Memory required for data: 861697500\nI0817 16:02:51.684106 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:02:51.684129 17550 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:02:51.684144 17550 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:02:51.684170 17550 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:02:51.684535 17550 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:02:51.684554 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.684563 17550 net.cpp:165] Memory required for data: 863745500\nI0817 16:02:51.684581 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:02:51.684603 17550 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:02:51.684615 17550 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:02:51.684633 17550 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:02:51.684919 17550 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:02:51.684953 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.684964 17550 net.cpp:165] Memory required for data: 865793500\nI0817 16:02:51.684986 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:02:51.685003 17550 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:02:51.685022 17550 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:02:51.685039 17550 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.685132 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:02:51.685318 17550 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:02:51.685336 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.685346 17550 net.cpp:165] Memory required for data: 867841500\nI0817 16:02:51.685365 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:02:51.685380 17550 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:02:51.685391 17550 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:02:51.685411 17550 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.685432 17550 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:02:51.685447 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.685457 17550 net.cpp:165] Memory required for data: 869889500\nI0817 16:02:51.685467 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:02:51.685492 17550 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:02:51.685505 17550 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:02:51.685523 17550 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:02:51.685883 17550 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:02:51.685901 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.685911 17550 net.cpp:165] Memory required for data: 871937500\nI0817 16:02:51.685927 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:02:51.685952 17550 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:02:51.685966 17550 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:02:51.685983 17550 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:02:51.686250 17550 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:02:51.686269 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.686278 17550 net.cpp:165] Memory required for data: 873985500\nI0817 16:02:51.686298 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:02:51.686318 17550 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:02:51.686331 17550 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:02:51.686347 17550 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:02:51.686439 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:02:51.686619 17550 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:02:51.686637 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.686647 17550 net.cpp:165] Memory required for data: 876033500\nI0817 16:02:51.686664 17550 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:02:51.686688 17550 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:02:51.686702 17550 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:02:51.686723 17550 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:02:51.686830 17550 net.cpp:150] Setting up L2_b1_pool\nI0817 16:02:51.686849 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.686859 17550 net.cpp:165] Memory required for data: 878081500\nI0817 16:02:51.686869 17550 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:02:51.686885 17550 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:02:51.686897 17550 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:02:51.686915 17550 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:02:51.686933 17550 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:02:51.686988 17550 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:02:51.687008 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.687018 17550 net.cpp:165] Memory required for data: 880129500\nI0817 16:02:51.687028 17550 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:02:51.687042 17550 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:02:51.687053 17550 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:02:51.687072 17550 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:02:51.687101 17550 net.cpp:150] Setting up L2_b1_relu\nI0817 16:02:51.687117 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.687126 17550 net.cpp:165] Memory required for data: 882177500\nI0817 16:02:51.687137 17550 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:02:51.687207 17550 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:02:51.687227 17550 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:02:51.689756 17550 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:02:51.689779 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.689790 17550 net.cpp:165] Memory required for data: 884225500\nI0817 16:02:51.689800 17550 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:02:51.689816 17550 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:02:51.689828 17550 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:02:51.689841 17550 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:02:51.689862 17550 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:02:51.689972 17550 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:02:51.689993 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.690003 17550 net.cpp:165] Memory required for data: 888321500\nI0817 16:02:51.690013 17550 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:51.690029 17550 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:51.690040 17550 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:02:51.690060 17550 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:02:51.690081 17550 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:02:51.690177 17550 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:51.690196 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.690210 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.690219 17550 net.cpp:165] Memory required for data: 896513500\nI0817 16:02:51.690229 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:02:51.690254 17550 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:02:51.690268 17550 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:02:51.690286 17550 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:02:51.691815 17550 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:02:51.691838 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.691848 17550 net.cpp:165] Memory required for data: 900609500\nI0817 16:02:51.691866 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:02:51.691889 17550 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:02:51.691900 17550 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:02:51.691922 17550 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:02:51.692212 17550 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:02:51.692235 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.692246 17550 net.cpp:165] Memory required for data: 904705500\nI0817 16:02:51.692268 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:02:51.692286 17550 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:02:51.692297 17550 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:02:51.692314 17550 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.692410 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:02:51.692595 17550 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:02:51.692615 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.692623 17550 net.cpp:165] Memory required for data: 908801500\nI0817 16:02:51.692642 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:02:51.692658 17550 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:02:51.692670 17550 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:02:51.692689 17550 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.692719 17550 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:02:51.692736 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.692746 17550 net.cpp:165] Memory required for data: 912897500\nI0817 16:02:51.692756 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:02:51.692781 17550 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:02:51.692795 17550 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:02:51.692813 17550 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:02:51.693331 17550 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:02:51.693351 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.693361 17550 net.cpp:165] Memory required for data: 916993500\nI0817 16:02:51.693379 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:02:51.693401 17550 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:02:51.693414 17550 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:02:51.693431 17550 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:02:51.693713 17550 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:02:51.693732 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.693742 17550 net.cpp:165] Memory required for data: 921089500\nI0817 16:02:51.693763 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:02:51.693784 17550 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:02:51.693796 17550 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:02:51.693812 17550 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:02:51.693898 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:02:51.694080 17550 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:02:51.694100 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.694109 17550 net.cpp:165] Memory required for data: 925185500\nI0817 16:02:51.694128 17550 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:02:51.694154 17550 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:02:51.694169 17550 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:02:51.694183 17550 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:02:51.694205 17550 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:02:51.694252 17550 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:02:51.694270 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.694279 17550 net.cpp:165] Memory required for data: 929281500\nI0817 16:02:51.694290 17550 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:02:51.694304 17550 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:02:51.694315 17550 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:02:51.694335 17550 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:02:51.694356 17550 net.cpp:150] Setting up L2_b2_relu\nI0817 16:02:51.694370 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.694380 17550 net.cpp:165] Memory required for data: 933377500\nI0817 16:02:51.694391 17550 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:51.694404 17550 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:51.694416 17550 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:02:51.694432 17550 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:02:51.694450 17550 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:02:51.694533 17550 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:51.694552 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.694566 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.694576 17550 net.cpp:165] Memory required for data: 941569500\nI0817 16:02:51.694586 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:02:51.694619 17550 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:02:51.694633 17550 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:02:51.694651 17550 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:02:51.695176 17550 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:02:51.695197 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.695206 17550 net.cpp:165] Memory required for data: 945665500\nI0817 16:02:51.695225 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:02:51.695246 17550 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:02:51.695260 17550 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:02:51.695277 17550 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:02:51.695555 17550 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:02:51.695578 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.695588 17550 net.cpp:165] Memory required for data: 949761500\nI0817 16:02:51.695610 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:02:51.695627 17550 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:02:51.695639 17550 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:02:51.695655 17550 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.695745 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:02:51.695929 17550 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:02:51.695947 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.695957 17550 net.cpp:165] Memory required for data: 953857500\nI0817 16:02:51.695976 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:02:51.695991 17550 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:02:51.696002 17550 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:02:51.696022 17550 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.696041 17550 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:02:51.696056 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.696068 17550 net.cpp:165] Memory required for data: 957953500\nI0817 16:02:51.696079 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:02:51.696104 17550 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:02:51.696117 17550 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:02:51.696135 17550 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:02:51.696640 17550 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:02:51.696660 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.696668 17550 net.cpp:165] Memory required for data: 962049500\nI0817 16:02:51.696687 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:02:51.696704 17550 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:02:51.696717 17550 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:02:51.696738 17550 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:02:51.697024 17550 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:02:51.697043 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.697052 17550 net.cpp:165] Memory required for data: 966145500\nI0817 16:02:51.697074 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:02:51.697095 17550 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:02:51.697108 17550 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:02:51.697124 17550 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:02:51.697216 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:02:51.697402 17550 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:02:51.697422 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.697432 17550 net.cpp:165] Memory required for data: 970241500\nI0817 16:02:51.697449 17550 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:02:51.697470 17550 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:02:51.697484 17550 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:02:51.697496 17550 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:02:51.697527 17550 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:02:51.697577 17550 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:02:51.697595 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.697605 17550 net.cpp:165] Memory required for data: 974337500\nI0817 16:02:51.697615 17550 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:02:51.697648 17550 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:02:51.697661 17550 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:02:51.697676 17550 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:02:51.697695 17550 net.cpp:150] Setting up L2_b3_relu\nI0817 16:02:51.697711 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.697721 17550 net.cpp:165] Memory required for data: 978433500\nI0817 16:02:51.697731 17550 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:51.697755 17550 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:51.697767 17550 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:02:51.697783 17550 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:02:51.697803 17550 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:02:51.697890 17550 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:51.697907 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.697921 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.697930 17550 net.cpp:165] Memory required for data: 986625500\nI0817 16:02:51.697942 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:02:51.697962 17550 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:02:51.697974 17550 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:02:51.697998 17550 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:02:51.698513 17550 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:02:51.698534 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.698542 17550 net.cpp:165] Memory required for data: 990721500\nI0817 16:02:51.698560 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:02:51.698578 17550 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:02:51.698590 17550 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:02:51.698611 17550 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:02:51.698894 17550 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:02:51.698912 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.698922 17550 net.cpp:165] Memory required for data: 994817500\nI0817 16:02:51.698945 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:02:51.698966 17550 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:02:51.698978 17550 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:02:51.698994 17550 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.699079 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:02:51.699267 17550 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:02:51.699286 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.699295 17550 net.cpp:165] Memory required for data: 998913500\nI0817 16:02:51.699313 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:02:51.699329 17550 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:02:51.699340 17550 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:02:51.699359 17550 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.699380 17550 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:02:51.699395 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.699405 17550 net.cpp:165] Memory required for data: 1003009500\nI0817 16:02:51.699415 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:02:51.699448 17550 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:02:51.699462 17550 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:02:51.699484 17550 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:02:51.699995 17550 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:02:51.700014 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.700023 17550 net.cpp:165] Memory required for data: 1007105500\nI0817 16:02:51.700042 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:02:51.700059 17550 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:02:51.700070 17550 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:02:51.700086 17550 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:02:51.700378 17550 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:02:51.700397 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.700407 17550 net.cpp:165] Memory required for data: 1011201500\nI0817 16:02:51.700428 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:02:51.700444 17550 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:02:51.700456 17550 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:02:51.700476 17550 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:02:51.700562 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:02:51.700759 17550 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:02:51.700778 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.700788 17550 net.cpp:165] Memory required for data: 1015297500\nI0817 16:02:51.700806 17550 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:02:51.700824 17550 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:02:51.700834 17550 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:02:51.700847 17550 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:02:51.700868 17550 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:02:51.700915 17550 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:02:51.700947 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.700958 17550 net.cpp:165] Memory required for data: 1019393500\nI0817 16:02:51.700968 17550 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:02:51.700981 17550 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:02:51.700994 17550 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:02:51.701009 17550 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:02:51.701025 17550 net.cpp:150] Setting up L2_b4_relu\nI0817 16:02:51.701040 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.701050 17550 net.cpp:165] Memory required for data: 1023489500\nI0817 16:02:51.701063 17550 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:51.701083 17550 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:51.701094 17550 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:02:51.701109 17550 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:02:51.701129 17550 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:02:51.701216 17550 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:51.701242 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.701257 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.701267 17550 net.cpp:165] Memory required for data: 1031681500\nI0817 16:02:51.701277 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:02:51.701297 17550 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:02:51.701309 17550 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:02:51.701328 17550 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:02:51.701845 17550 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:02:51.701876 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.701886 17550 net.cpp:165] Memory required for data: 1035777500\nI0817 16:02:51.701905 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:02:51.701921 17550 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:02:51.701932 17550 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:02:51.701954 17550 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:02:51.702250 17550 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:02:51.702267 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.702276 17550 net.cpp:165] Memory required for data: 1039873500\nI0817 16:02:51.702297 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:02:51.702318 17550 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:02:51.702332 17550 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:02:51.702347 17550 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.702428 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:02:51.702622 17550 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:02:51.702641 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.702649 17550 net.cpp:165] Memory required for data: 1043969500\nI0817 16:02:51.702667 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:02:51.702682 17550 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:02:51.702693 17550 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:02:51.702713 17550 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.702733 17550 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:02:51.702747 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.702759 17550 net.cpp:165] Memory required for data: 1048065500\nI0817 16:02:51.702769 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:02:51.702793 17550 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:02:51.702806 17550 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:02:51.702822 17550 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:02:51.703342 17550 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:02:51.703362 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.703372 17550 net.cpp:165] Memory required for data: 1052161500\nI0817 16:02:51.703388 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:02:51.703410 17550 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:02:51.703423 17550 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:02:51.703440 17550 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:02:51.703723 17550 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:02:51.703742 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.703752 17550 net.cpp:165] Memory required for data: 1056257500\nI0817 16:02:51.703773 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:02:51.703789 17550 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:02:51.703801 17550 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:02:51.703821 17550 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:02:51.703909 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:02:51.704097 17550 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:02:51.704115 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.704125 17550 net.cpp:165] Memory required for data: 1060353500\nI0817 16:02:51.704144 17550 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:02:51.704169 17550 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:02:51.704181 17550 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:02:51.704195 17550 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:02:51.704216 17550 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:02:51.704264 17550 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:02:51.704282 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.704301 17550 net.cpp:165] Memory required for data: 1064449500\nI0817 16:02:51.704313 17550 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:02:51.704332 17550 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:02:51.704345 17550 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:02:51.704360 17550 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:02:51.704378 17550 net.cpp:150] Setting up L2_b5_relu\nI0817 16:02:51.704393 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.704402 17550 net.cpp:165] Memory required for data: 1068545500\nI0817 16:02:51.704412 17550 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:51.704432 17550 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:51.704443 17550 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:02:51.704458 17550 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:02:51.704478 17550 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:02:51.704558 17550 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:51.704586 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.704601 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.704610 17550 net.cpp:165] Memory required for data: 1076737500\nI0817 16:02:51.704619 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:02:51.704639 17550 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:02:51.704653 17550 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:02:51.704670 17550 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:02:51.705193 17550 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:02:51.705214 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.705224 17550 net.cpp:165] Memory required for data: 1080833500\nI0817 16:02:51.705241 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:02:51.705263 17550 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:02:51.705276 17550 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:02:51.705293 17550 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:02:51.705580 17550 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:02:51.705600 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.705610 17550 net.cpp:165] Memory required for data: 1084929500\nI0817 16:02:51.705631 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:02:51.705648 17550 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:02:51.705660 17550 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:02:51.705680 17550 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.705776 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:02:51.705960 17550 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:02:51.705978 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.705988 17550 net.cpp:165] Memory required for data: 1089025500\nI0817 16:02:51.706007 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:02:51.706022 17550 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:02:51.706032 17550 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:02:51.706051 17550 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.706073 17550 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:02:51.706086 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.706095 17550 net.cpp:165] Memory required for data: 1093121500\nI0817 16:02:51.706104 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:02:51.706128 17550 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:02:51.706142 17550 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:02:51.706167 17550 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:02:51.706684 17550 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:02:51.706712 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.706722 17550 net.cpp:165] Memory required for data: 1097217500\nI0817 16:02:51.706739 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:02:51.706761 17550 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:02:51.706774 17550 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:02:51.706791 17550 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:02:51.707070 17550 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:02:51.707089 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.707098 17550 net.cpp:165] Memory required for data: 1101313500\nI0817 16:02:51.707120 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:02:51.707137 17550 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:02:51.707155 17550 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:02:51.707178 17550 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:02:51.707274 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:02:51.707458 17550 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:02:51.707478 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.707486 17550 net.cpp:165] Memory required for data: 1105409500\nI0817 16:02:51.707505 17550 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:02:51.707521 17550 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:02:51.707532 17550 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:02:51.707545 17550 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:02:51.707566 17550 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:02:51.707613 17550 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:02:51.707630 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.707640 17550 net.cpp:165] Memory required for data: 1109505500\nI0817 16:02:51.707651 17550 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:02:51.707670 17550 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:02:51.707684 17550 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:02:51.707698 17550 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:02:51.707717 17550 net.cpp:150] Setting up L2_b6_relu\nI0817 16:02:51.707732 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.707741 17550 net.cpp:165] Memory required for data: 1113601500\nI0817 16:02:51.707751 17550 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:51.707765 17550 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:51.707777 17550 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:02:51.707797 17550 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:02:51.707818 17550 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:02:51.707896 17550 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:51.707916 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.707931 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.707940 17550 net.cpp:165] Memory required for data: 1121793500\nI0817 16:02:51.707950 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:02:51.707974 17550 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:02:51.707988 17550 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:02:51.708006 17550 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:02:51.708528 17550 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:02:51.708547 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.708556 17550 net.cpp:165] Memory required for data: 1125889500\nI0817 16:02:51.708575 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:02:51.708596 17550 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:02:51.708608 17550 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:02:51.708634 17550 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:02:51.708933 17550 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:02:51.708952 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.708962 17550 net.cpp:165] Memory required for data: 1129985500\nI0817 16:02:51.708984 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:02:51.709000 17550 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:02:51.709012 17550 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:02:51.709033 17550 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.709121 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:02:51.709316 17550 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:02:51.709336 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.709344 17550 net.cpp:165] Memory required for data: 1134081500\nI0817 16:02:51.709363 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:02:51.709378 17550 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:02:51.709389 17550 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:02:51.709411 17550 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.709431 17550 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:02:51.709447 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.709457 17550 net.cpp:165] Memory required for data: 1138177500\nI0817 16:02:51.709466 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:02:51.709492 17550 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:02:51.709506 17550 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:02:51.709522 17550 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:02:51.710029 17550 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:02:51.710052 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.710062 17550 net.cpp:165] Memory required for data: 1142273500\nI0817 16:02:51.710079 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:02:51.710101 17550 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:02:51.710114 17550 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:02:51.710130 17550 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:02:51.710415 17550 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:02:51.710434 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.710444 17550 net.cpp:165] Memory required for data: 1146369500\nI0817 16:02:51.710464 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:02:51.710480 17550 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:02:51.710492 17550 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:02:51.710508 17550 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:02:51.710602 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:02:51.710788 17550 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:02:51.710811 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.710821 17550 net.cpp:165] Memory required for data: 1150465500\nI0817 16:02:51.710840 17550 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:02:51.710857 17550 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:02:51.710868 17550 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:02:51.710883 17550 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:02:51.710899 17550 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:02:51.710950 17550 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:02:51.710968 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.710979 17550 net.cpp:165] Memory required for data: 1154561500\nI0817 16:02:51.710989 17550 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:02:51.711004 17550 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:02:51.711014 17550 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:02:51.711033 17550 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:02:51.711061 17550 net.cpp:150] Setting up L2_b7_relu\nI0817 16:02:51.711078 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.711088 17550 net.cpp:165] Memory required for data: 1158657500\nI0817 16:02:51.711097 17550 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:51.711112 17550 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:51.711123 17550 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:02:51.711144 17550 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:02:51.711174 17550 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:02:51.711251 17550 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:51.711269 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.711282 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.711292 17550 net.cpp:165] Memory required for data: 1166849500\nI0817 16:02:51.711302 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:02:51.711328 17550 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:02:51.711340 17550 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:02:51.711359 17550 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:02:51.711885 17550 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:02:51.711905 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.711915 17550 net.cpp:165] Memory required for data: 1170945500\nI0817 16:02:51.711932 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:02:51.711953 17550 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:02:51.711966 17550 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:02:51.711983 17550 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:02:51.712287 17550 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:02:51.712306 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.712316 17550 net.cpp:165] Memory required for data: 1175041500\nI0817 16:02:51.712337 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:02:51.712354 17550 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:02:51.712365 17550 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:02:51.712385 17550 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.712476 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:02:51.712664 17550 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:02:51.712683 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.712693 17550 net.cpp:165] Memory required for data: 1179137500\nI0817 16:02:51.712712 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:02:51.712726 17550 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:02:51.712738 17550 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:02:51.712751 17550 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.712771 17550 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:02:51.712785 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.712795 17550 net.cpp:165] Memory required for data: 1183233500\nI0817 16:02:51.712805 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:02:51.712831 17550 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:02:51.712843 17550 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:02:51.712865 17550 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:02:51.713475 17550 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:02:51.713500 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.713505 17550 net.cpp:165] Memory required for data: 1187329500\nI0817 16:02:51.713516 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:02:51.713532 17550 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:02:51.713548 17550 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:02:51.713562 17550 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:02:51.713824 17550 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:02:51.713838 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.713843 17550 net.cpp:165] Memory required for data: 1191425500\nI0817 16:02:51.713855 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:02:51.713863 17550 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:02:51.713871 17550 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:02:51.713878 17550 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:02:51.713954 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:02:51.714110 17550 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:02:51.714126 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.714133 17550 net.cpp:165] Memory required for data: 1195521500\nI0817 16:02:51.714141 17550 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:02:51.714150 17550 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:02:51.714156 17550 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:02:51.714164 17550 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:02:51.714171 17550 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:02:51.714203 17550 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:02:51.714215 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.714220 17550 net.cpp:165] Memory required for data: 1199617500\nI0817 16:02:51.714224 17550 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:02:51.714232 17550 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:02:51.714238 17550 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:02:51.714248 17550 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:02:51.714258 17550 net.cpp:150] Setting up L2_b8_relu\nI0817 16:02:51.714265 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.714270 17550 net.cpp:165] Memory required for data: 1203713500\nI0817 16:02:51.714274 17550 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:51.714282 17550 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:51.714287 17550 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:02:51.714298 17550 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:02:51.714323 17550 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:02:51.714370 17550 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:51.714383 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.714390 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.714395 17550 net.cpp:165] Memory required for data: 1211905500\nI0817 16:02:51.714399 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:02:51.714414 17550 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:02:51.714421 17550 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:02:51.714434 17550 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:02:51.714910 17550 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:02:51.714923 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.714929 17550 net.cpp:165] Memory required for data: 1216001500\nI0817 16:02:51.714943 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:02:51.714956 17550 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:02:51.714962 17550 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:02:51.714973 17550 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:02:51.715226 17550 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:02:51.715240 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.715245 17550 net.cpp:165] Memory required for data: 1220097500\nI0817 16:02:51.715262 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:02:51.715272 17550 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:02:51.715278 17550 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:02:51.715286 17550 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.715346 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:02:51.715498 17550 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:02:51.715512 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.715515 17550 net.cpp:165] Memory required for data: 1224193500\nI0817 16:02:51.715524 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:02:51.715535 17550 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:02:51.715543 17550 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:02:51.715549 17550 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.715559 17550 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:02:51.715566 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.715571 17550 net.cpp:165] Memory required for data: 1228289500\nI0817 16:02:51.715576 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:02:51.715590 17550 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:02:51.715595 17550 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:02:51.715606 17550 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:02:51.716089 17550 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:02:51.716104 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.716109 17550 net.cpp:165] Memory required for data: 1232385500\nI0817 16:02:51.716117 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:02:51.716130 17550 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:02:51.716136 17550 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:02:51.716147 17550 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:02:51.716399 17550 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:02:51.716415 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.716420 17550 net.cpp:165] Memory required for data: 1236481500\nI0817 16:02:51.716464 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:02:51.716480 17550 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:02:51.716486 17550 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:02:51.716495 17550 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:02:51.716552 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:02:51.716699 17550 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:02:51.716712 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.716717 17550 net.cpp:165] Memory required for data: 1240577500\nI0817 16:02:51.716727 17550 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:02:51.716735 17550 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:02:51.716742 17550 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:02:51.716748 17550 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:02:51.716759 17550 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:02:51.716787 17550 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:02:51.716800 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.716805 17550 net.cpp:165] Memory required for data: 1244673500\nI0817 16:02:51.716810 17550 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:02:51.716819 17550 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:02:51.716825 17550 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:02:51.716831 17550 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:02:51.716840 17550 net.cpp:150] Setting up L2_b9_relu\nI0817 16:02:51.716847 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.716851 17550 net.cpp:165] Memory required for data: 1248769500\nI0817 16:02:51.716856 17550 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:51.716873 17550 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:51.716879 17550 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:02:51.716887 17550 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:02:51.716897 17550 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:02:51.716965 17550 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:51.716979 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.716984 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.716989 17550 net.cpp:165] Memory required for data: 1256961500\nI0817 16:02:51.716995 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:02:51.717006 17550 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:02:51.717012 17550 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:02:51.717025 17550 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:02:51.717504 17550 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:02:51.717519 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.717523 17550 net.cpp:165] Memory required for data: 1257985500\nI0817 16:02:51.717531 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:02:51.717540 17550 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:02:51.717547 17550 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:02:51.717558 17550 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:02:51.717828 17550 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:02:51.717844 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.717849 17550 net.cpp:165] Memory required for data: 1259009500\nI0817 16:02:51.717859 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:02:51.717869 17550 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:02:51.717875 17550 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:02:51.717882 17550 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.717945 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:02:51.718104 17550 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:02:51.718117 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.718122 17550 net.cpp:165] Memory required for data: 1260033500\nI0817 16:02:51.718132 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:02:51.718139 17550 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:02:51.718145 17550 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:02:51.718155 17550 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.718165 17550 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:02:51.718173 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.718176 17550 net.cpp:165] Memory required for data: 1261057500\nI0817 16:02:51.718183 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:02:51.718196 17550 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:02:51.718202 17550 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:02:51.718211 17550 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:02:51.718684 17550 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:02:51.718698 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.718703 17550 net.cpp:165] Memory required for data: 1262081500\nI0817 16:02:51.718713 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:02:51.718726 17550 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:02:51.718732 17550 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:02:51.718744 17550 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:02:51.719008 17550 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:02:51.719022 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.719027 17550 net.cpp:165] Memory required for data: 1263105500\nI0817 16:02:51.719044 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:02:51.719053 17550 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:02:51.719059 17550 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:02:51.719070 17550 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:02:51.719127 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:02:51.719285 17550 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:02:51.719298 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.719303 17550 net.cpp:165] Memory required for data: 1264129500\nI0817 16:02:51.719311 17550 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:02:51.719321 17550 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:02:51.719327 17550 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:02:51.719338 17550 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:02:51.719377 17550 net.cpp:150] Setting up L3_b1_pool\nI0817 16:02:51.719388 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.719393 17550 net.cpp:165] Memory required for data: 1265153500\nI0817 16:02:51.719398 17550 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:02:51.719408 17550 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:02:51.719413 17550 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:02:51.719419 17550 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:02:51.719429 17550 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:02:51.719462 17550 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:02:51.719471 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.719476 17550 net.cpp:165] Memory required for data: 1266177500\nI0817 16:02:51.719481 17550 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:02:51.719488 17550 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:02:51.719494 17550 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:02:51.719501 17550 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:02:51.719511 17550 net.cpp:150] Setting up L3_b1_relu\nI0817 16:02:51.719517 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.719521 17550 net.cpp:165] Memory required for data: 1267201500\nI0817 16:02:51.719527 17550 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:02:51.719538 17550 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:02:51.719547 17550 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:02:51.720820 17550 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:02:51.720839 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.720844 17550 net.cpp:165] Memory required for data: 1268225500\nI0817 16:02:51.720849 17550 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:02:51.720860 17550 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:02:51.720865 17550 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:02:51.720872 17550 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:02:51.720883 17550 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:02:51.720924 17550 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:02:51.720944 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.720950 17550 net.cpp:165] Memory required for data: 1270273500\nI0817 16:02:51.720957 17550 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:51.720964 17550 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:51.720970 17550 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:02:51.720978 17550 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:02:51.720988 17550 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:02:51.721042 17550 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:51.721055 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.721061 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.721073 17550 net.cpp:165] Memory required for data: 1274369500\nI0817 16:02:51.721079 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:02:51.721094 17550 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:02:51.721101 17550 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:02:51.721110 17550 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:02:51.723109 17550 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:02:51.723126 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.723131 17550 net.cpp:165] Memory required for data: 1276417500\nI0817 16:02:51.723141 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:02:51.723151 17550 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:02:51.723158 17550 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:02:51.723170 17550 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:02:51.723439 17550 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:02:51.723453 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.723457 17550 net.cpp:165] Memory required for data: 1278465500\nI0817 16:02:51.723469 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:02:51.723477 17550 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:02:51.723484 17550 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:02:51.723491 17550 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.723552 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:02:51.723707 17550 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:02:51.723721 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.723726 17550 net.cpp:165] Memory required for data: 1280513500\nI0817 16:02:51.723733 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:02:51.723744 17550 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:02:51.723752 17550 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:02:51.723758 17550 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.723768 17550 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:02:51.723775 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.723780 17550 net.cpp:165] Memory required for data: 1282561500\nI0817 16:02:51.723785 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:02:51.723799 17550 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:02:51.723806 17550 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:02:51.723814 17550 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:02:51.724838 17550 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:02:51.724853 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.724858 17550 net.cpp:165] Memory required for data: 1284609500\nI0817 16:02:51.724867 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:02:51.724879 17550 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:02:51.724886 17550 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:02:51.724897 17550 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:02:51.725172 17550 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:02:51.725188 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.725193 17550 net.cpp:165] Memory required for data: 1286657500\nI0817 16:02:51.725203 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:02:51.725211 17550 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:02:51.725219 17550 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:02:51.725229 17550 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:02:51.725286 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:02:51.725443 17550 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:02:51.725456 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.725461 17550 net.cpp:165] Memory required for data: 1288705500\nI0817 16:02:51.725471 17550 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:02:51.725482 17550 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:02:51.725497 17550 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:02:51.725503 17550 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:02:51.725512 17550 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:02:51.725550 17550 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:02:51.725559 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.725564 17550 net.cpp:165] Memory required for data: 1290753500\nI0817 16:02:51.725569 17550 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:02:51.725577 17550 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:02:51.725584 17550 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:02:51.725594 17550 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:02:51.725603 17550 net.cpp:150] Setting up L3_b2_relu\nI0817 16:02:51.725610 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.725615 17550 net.cpp:165] Memory required for data: 1292801500\nI0817 16:02:51.725620 17550 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:51.725627 17550 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:51.725632 17550 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:02:51.725639 17550 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:02:51.725649 17550 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:02:51.725703 17550 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:51.725715 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.725723 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.725728 17550 net.cpp:165] Memory required for data: 1296897500\nI0817 16:02:51.725733 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:02:51.725744 17550 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:02:51.725749 17550 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:02:51.725761 17550 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:02:51.726783 17550 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:02:51.726799 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.726804 17550 net.cpp:165] Memory required for data: 1298945500\nI0817 16:02:51.726812 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:02:51.726822 17550 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:02:51.726828 17550 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:02:51.726840 17550 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:02:51.727113 17550 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:02:51.727128 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.727133 17550 net.cpp:165] Memory required for data: 1300993500\nI0817 16:02:51.727143 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:02:51.727151 17550 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:02:51.727157 17550 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:02:51.727165 17550 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.727226 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:02:51.727380 17550 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:02:51.727396 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.727401 17550 net.cpp:165] Memory required for data: 1303041500\nI0817 16:02:51.727409 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:02:51.727417 17550 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:02:51.727423 17550 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:02:51.727432 17550 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.727442 17550 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:02:51.727448 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.727459 17550 net.cpp:165] Memory required for data: 1305089500\nI0817 16:02:51.727464 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:02:51.727479 17550 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:02:51.727485 17550 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:02:51.727494 17550 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:02:51.728523 17550 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:02:51.728538 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.728543 17550 net.cpp:165] Memory required for data: 1307137500\nI0817 16:02:51.728552 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:02:51.728564 17550 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:02:51.728570 17550 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:02:51.728582 17550 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:02:51.728849 17550 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:02:51.728863 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.728868 17550 net.cpp:165] Memory required for data: 1309185500\nI0817 16:02:51.728878 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:02:51.728886 17550 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:02:51.728893 17550 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:02:51.728902 17550 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:02:51.728971 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:02:51.729133 17550 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:02:51.729146 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.729151 17550 net.cpp:165] Memory required for data: 1311233500\nI0817 16:02:51.729161 17550 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:02:51.729173 17550 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:02:51.729180 17550 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:02:51.729187 17550 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:02:51.729195 17550 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:02:51.729233 17550 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:02:51.729243 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.729249 17550 net.cpp:165] Memory required for data: 1313281500\nI0817 16:02:51.729254 17550 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:02:51.729261 17550 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:02:51.729266 17550 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:02:51.729276 17550 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:02:51.729286 17550 net.cpp:150] Setting up L3_b3_relu\nI0817 16:02:51.729293 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.729298 17550 net.cpp:165] Memory required for data: 1315329500\nI0817 16:02:51.729303 17550 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:51.729310 17550 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:51.729316 17550 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:02:51.729323 17550 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:02:51.729334 17550 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:02:51.729383 17550 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:51.729394 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.729401 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.729405 17550 net.cpp:165] Memory required for data: 1319425500\nI0817 16:02:51.729410 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:02:51.729421 17550 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:02:51.729429 17550 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:02:51.729440 17550 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:02:51.730470 17550 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:02:51.730484 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.730489 17550 net.cpp:165] Memory required for data: 1321473500\nI0817 16:02:51.730499 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:02:51.730510 17550 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:02:51.730517 17550 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:02:51.730526 17550 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:02:51.730798 17550 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:02:51.730810 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.730814 17550 net.cpp:165] Memory required for data: 1323521500\nI0817 16:02:51.730824 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:02:51.730834 17550 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:02:51.730839 17550 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:02:51.730847 17550 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.730911 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:02:51.731076 17550 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:02:51.731093 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.731098 17550 net.cpp:165] Memory required for data: 1325569500\nI0817 16:02:51.731107 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:02:51.731115 17550 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:02:51.731122 17550 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:02:51.731129 17550 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.731139 17550 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:02:51.731147 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.731150 17550 net.cpp:165] Memory required for data: 1327617500\nI0817 16:02:51.731155 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:02:51.731169 17550 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:02:51.731175 17550 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:02:51.731186 17550 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:02:51.732219 17550 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:02:51.732234 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.732239 17550 net.cpp:165] Memory required for data: 1329665500\nI0817 16:02:51.732247 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:02:51.732259 17550 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:02:51.732266 17550 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:02:51.732275 17550 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:02:51.732542 17550 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:02:51.732555 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.732560 17550 net.cpp:165] Memory required for data: 1331713500\nI0817 16:02:51.732570 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:02:51.732583 17550 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:02:51.732589 17550 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:02:51.732596 17550 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:02:51.732656 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:02:51.732818 17550 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:02:51.732831 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.732836 17550 net.cpp:165] Memory required for data: 1333761500\nI0817 16:02:51.732846 17550 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:02:51.732856 17550 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:02:51.732863 17550 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:02:51.732870 17550 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:02:51.732878 17550 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:02:51.732914 17550 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:02:51.732930 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.732935 17550 net.cpp:165] Memory required for data: 1335809500\nI0817 16:02:51.732946 17550 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:02:51.732964 17550 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:02:51.732969 17550 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:02:51.732980 17550 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:02:51.732990 17550 net.cpp:150] Setting up L3_b4_relu\nI0817 16:02:51.732996 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.733001 17550 net.cpp:165] Memory required for data: 1337857500\nI0817 16:02:51.733006 17550 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:51.733012 17550 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:51.733018 17550 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:02:51.733026 17550 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:02:51.733034 17550 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:02:51.733088 17550 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:51.733100 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.733108 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.733111 17550 net.cpp:165] Memory required for data: 1341953500\nI0817 16:02:51.733116 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:02:51.733127 17550 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:02:51.733134 17550 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:02:51.733145 17550 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:02:51.734176 17550 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:02:51.734191 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.734196 17550 net.cpp:165] Memory required for data: 1344001500\nI0817 16:02:51.734205 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:02:51.734217 17550 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:02:51.734225 17550 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:02:51.734233 17550 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:02:51.735504 17550 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:02:51.735522 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.735527 17550 net.cpp:165] Memory required for data: 1346049500\nI0817 16:02:51.735538 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:02:51.735551 17550 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:02:51.735558 17550 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:02:51.735566 17550 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.735630 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:02:51.735786 17550 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:02:51.735800 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.735805 17550 net.cpp:165] Memory required for data: 1348097500\nI0817 16:02:51.735813 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:02:51.735826 17550 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:02:51.735831 17550 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:02:51.735839 17550 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.735849 17550 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:02:51.735857 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.735860 17550 net.cpp:165] Memory required for data: 1350145500\nI0817 16:02:51.735865 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:02:51.735883 17550 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:02:51.735889 17550 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:02:51.735900 17550 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:02:51.737918 17550 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:02:51.737936 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.737946 17550 net.cpp:165] Memory required for data: 1352193500\nI0817 16:02:51.737957 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:02:51.737967 17550 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:02:51.737977 17550 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:02:51.737985 17550 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:02:51.738246 17550 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:02:51.738260 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.738265 17550 net.cpp:165] Memory required for data: 1354241500\nI0817 16:02:51.738275 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:02:51.738283 17550 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:02:51.738289 17550 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:02:51.738301 17550 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:02:51.738358 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:02:51.738514 17550 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:02:51.738528 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.738533 17550 net.cpp:165] Memory required for data: 1356289500\nI0817 16:02:51.738543 17550 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:02:51.738554 17550 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:02:51.738561 17550 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:02:51.738569 17550 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:02:51.738576 17550 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:02:51.738612 17550 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:02:51.738623 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.738628 17550 net.cpp:165] Memory required for data: 1358337500\nI0817 16:02:51.738633 17550 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:02:51.738641 17550 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:02:51.738646 17550 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:02:51.738659 17550 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:02:51.738669 17550 net.cpp:150] Setting up L3_b5_relu\nI0817 16:02:51.738677 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.738680 17550 net.cpp:165] Memory required for data: 1360385500\nI0817 16:02:51.738685 17550 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:51.738692 17550 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:51.738698 17550 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:02:51.738705 17550 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:02:51.738714 17550 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:02:51.738762 17550 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:51.738775 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.738780 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.738785 17550 net.cpp:165] Memory required for data: 1364481500\nI0817 16:02:51.738790 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:02:51.738801 17550 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:02:51.738807 17550 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:02:51.738819 17550 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:02:51.739836 17550 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:02:51.739851 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.739856 17550 net.cpp:165] Memory required for data: 1366529500\nI0817 16:02:51.739866 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:02:51.739877 17550 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:02:51.739892 17550 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:02:51.739902 17550 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:02:51.740167 17550 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:02:51.740181 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.740186 17550 net.cpp:165] Memory required for data: 1368577500\nI0817 16:02:51.740196 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:02:51.740206 17550 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:02:51.740212 17550 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:02:51.740219 17550 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.740278 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:02:51.740430 17550 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:02:51.740445 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.740450 17550 net.cpp:165] Memory required for data: 1370625500\nI0817 16:02:51.740459 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:02:51.740468 17550 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:02:51.740473 17550 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:02:51.740480 17550 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.740490 17550 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:02:51.740497 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.740501 17550 net.cpp:165] Memory required for data: 1372673500\nI0817 16:02:51.740506 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:02:51.740522 17550 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:02:51.740528 17550 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:02:51.740540 17550 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:02:51.741560 17550 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:02:51.741575 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.741580 17550 net.cpp:165] Memory required for data: 1374721500\nI0817 16:02:51.741590 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:02:51.741601 17550 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:02:51.741607 17550 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:02:51.741616 17550 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:02:51.741879 17550 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:02:51.741892 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.741897 17550 net.cpp:165] Memory required for data: 1376769500\nI0817 16:02:51.741907 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:02:51.741920 17550 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:02:51.741926 17550 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:02:51.741935 17550 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:02:51.742000 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:02:51.742159 17550 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:02:51.742172 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.742177 17550 net.cpp:165] Memory required for data: 1378817500\nI0817 16:02:51.742187 17550 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:02:51.742197 17550 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:02:51.742204 17550 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:02:51.742211 17550 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:02:51.742219 17550 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:02:51.742254 17550 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:02:51.742265 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.742270 17550 net.cpp:165] Memory required for data: 1380865500\nI0817 16:02:51.742275 17550 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:02:51.742283 17550 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:02:51.742288 17550 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:02:51.742305 17550 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:02:51.742316 17550 net.cpp:150] Setting up L3_b6_relu\nI0817 16:02:51.742323 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.742327 17550 net.cpp:165] Memory required for data: 1382913500\nI0817 16:02:51.742332 17550 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:51.742339 17550 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:51.742346 17550 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:02:51.742352 17550 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:02:51.742362 17550 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:02:51.742411 17550 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:51.742424 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.742430 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.742434 17550 net.cpp:165] Memory required for data: 1387009500\nI0817 16:02:51.742439 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:02:51.742453 17550 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:02:51.742460 17550 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:02:51.742468 17550 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:02:51.743484 17550 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:02:51.743497 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.743502 17550 net.cpp:165] Memory required for data: 1389057500\nI0817 16:02:51.743511 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:02:51.743523 17550 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:02:51.743530 17550 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:02:51.743538 17550 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:02:51.743799 17550 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:02:51.743811 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.743816 17550 net.cpp:165] Memory required for data: 1391105500\nI0817 16:02:51.743826 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:02:51.743835 17550 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:02:51.743842 17550 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:02:51.743849 17550 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.743911 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:02:51.744076 17550 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:02:51.744089 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.744094 17550 net.cpp:165] Memory required for data: 1393153500\nI0817 16:02:51.744103 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:02:51.744139 17550 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:02:51.744148 17550 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:02:51.744156 17550 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.744168 17550 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:02:51.744174 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.744179 17550 net.cpp:165] Memory required for data: 1395201500\nI0817 16:02:51.744184 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:02:51.744199 17550 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:02:51.744204 17550 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:02:51.744213 17550 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:02:51.745239 17550 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:02:51.745254 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.745259 17550 net.cpp:165] Memory required for data: 1397249500\nI0817 16:02:51.745266 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:02:51.745280 17550 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:02:51.745291 17550 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:02:51.745301 17550 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:02:51.745568 17550 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:02:51.745581 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.745586 17550 net.cpp:165] Memory required for data: 1399297500\nI0817 16:02:51.745596 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:02:51.745605 17550 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:02:51.745612 17550 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:02:51.745620 17550 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:02:51.745679 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:02:51.745833 17550 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:02:51.745848 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.745854 17550 net.cpp:165] Memory required for data: 1401345500\nI0817 16:02:51.745862 17550 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:02:51.745872 17550 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:02:51.745878 17550 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:02:51.745885 17550 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:02:51.745893 17550 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:02:51.745931 17550 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:02:51.745949 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.745954 17550 net.cpp:165] Memory required for data: 1403393500\nI0817 16:02:51.745967 17550 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:02:51.745977 17550 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:02:51.745985 17550 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:02:51.745991 17550 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:02:51.746001 17550 net.cpp:150] Setting up L3_b7_relu\nI0817 16:02:51.746007 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.746012 17550 net.cpp:165] Memory required for data: 1405441500\nI0817 16:02:51.746017 17550 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:51.746028 17550 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:51.746034 17550 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:02:51.746042 17550 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:02:51.746050 17550 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:02:51.746098 17550 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:51.746111 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.746119 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.746124 17550 net.cpp:165] Memory required for data: 1409537500\nI0817 16:02:51.746129 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:02:51.746140 17550 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:02:51.746145 17550 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:02:51.746155 17550 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:02:51.747179 17550 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:02:51.747192 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.747197 17550 net.cpp:165] Memory required for data: 1411585500\nI0817 16:02:51.747206 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:02:51.747218 17550 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:02:51.747225 17550 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:02:51.747234 17550 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:02:51.747498 17550 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:02:51.747511 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.747515 17550 net.cpp:165] Memory required for data: 1413633500\nI0817 16:02:51.747532 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:02:51.747545 17550 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:02:51.747552 17550 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:02:51.747562 17550 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.747619 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:02:51.747779 17550 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:02:51.747792 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.747797 17550 net.cpp:165] Memory required for data: 1415681500\nI0817 16:02:51.747805 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:02:51.747813 17550 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:02:51.747819 17550 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:02:51.747830 17550 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.747840 17550 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:02:51.747848 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.747853 17550 net.cpp:165] Memory required for data: 1417729500\nI0817 16:02:51.747858 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:02:51.747870 17550 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:02:51.747877 17550 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:02:51.747885 17550 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:02:51.748903 17550 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:02:51.748919 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.748924 17550 net.cpp:165] Memory required for data: 1419777500\nI0817 16:02:51.748932 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:02:51.748952 17550 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:02:51.748960 17550 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:02:51.748970 17550 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:02:51.749230 17550 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:02:51.749243 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.749248 17550 net.cpp:165] Memory required for data: 1421825500\nI0817 16:02:51.749258 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:02:51.749266 17550 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:02:51.749272 17550 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:02:51.749279 17550 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:02:51.749338 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:02:51.749496 17550 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:02:51.749510 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.749514 17550 net.cpp:165] Memory required for data: 1423873500\nI0817 16:02:51.749523 17550 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:02:51.749532 17550 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:02:51.749538 17550 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:02:51.749546 17550 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:02:51.749557 17550 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:02:51.749589 17550 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:02:51.749603 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.749608 17550 net.cpp:165] Memory required for data: 1425921500\nI0817 16:02:51.749614 17550 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:02:51.749621 17550 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:02:51.749627 17550 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:02:51.749634 17550 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:02:51.749644 17550 net.cpp:150] Setting up L3_b8_relu\nI0817 16:02:51.749650 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.749655 17550 net.cpp:165] Memory required for data: 1427969500\nI0817 16:02:51.749660 17550 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:51.749676 17550 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:51.749682 17550 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:02:51.749691 17550 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:02:51.749701 17550 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:02:51.749745 17550 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:51.749760 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.749768 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.749771 17550 net.cpp:165] Memory required for data: 1432065500\nI0817 16:02:51.749776 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:02:51.749788 17550 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:02:51.749794 17550 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:02:51.749804 17550 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:02:51.751814 17550 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:02:51.751832 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.751837 17550 net.cpp:165] Memory required for data: 1434113500\nI0817 16:02:51.751847 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:02:51.751859 17550 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:02:51.751866 17550 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:02:51.751878 17550 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:02:51.752146 17550 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:02:51.752161 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.752166 17550 net.cpp:165] Memory required for data: 1436161500\nI0817 16:02:51.752176 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:02:51.752184 17550 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:02:51.752192 17550 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:02:51.752202 17550 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.752259 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:02:51.752418 17550 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:02:51.752430 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.752435 17550 net.cpp:165] Memory required for data: 1438209500\nI0817 16:02:51.752450 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:02:51.752459 17550 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:02:51.752465 17550 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:02:51.752475 17550 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.752486 17550 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:02:51.752493 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.752497 17550 net.cpp:165] Memory required for data: 1440257500\nI0817 16:02:51.752502 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:02:51.752516 17550 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:02:51.752522 17550 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:02:51.752530 17550 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:02:51.753554 17550 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:02:51.753569 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.753574 17550 net.cpp:165] Memory required for data: 1442305500\nI0817 16:02:51.753583 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:02:51.753595 17550 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:02:51.753602 17550 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:02:51.753610 17550 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:02:51.753875 17550 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:02:51.753888 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.753893 17550 net.cpp:165] Memory required for data: 1444353500\nI0817 16:02:51.753913 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:02:51.753926 17550 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:02:51.753932 17550 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:02:51.753949 17550 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:02:51.754009 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:02:51.754170 17550 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:02:51.754184 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.754187 17550 net.cpp:165] Memory required for data: 1446401500\nI0817 16:02:51.754196 17550 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:02:51.754205 17550 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:02:51.754212 17550 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:02:51.754220 17550 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:02:51.754230 17550 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:02:51.754263 17550 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:02:51.754274 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.754279 17550 net.cpp:165] Memory required for data: 1448449500\nI0817 16:02:51.754286 17550 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:02:51.754297 17550 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:02:51.754303 17550 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:02:51.754310 17550 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:02:51.754320 17550 net.cpp:150] Setting up L3_b9_relu\nI0817 16:02:51.754328 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.754333 17550 net.cpp:165] Memory required for data: 1450497500\nI0817 16:02:51.754336 17550 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:02:51.754344 17550 net.cpp:100] Creating Layer post_pool\nI0817 16:02:51.754350 17550 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:02:51.754357 17550 net.cpp:408] post_pool -> post_pool\nI0817 16:02:51.754395 17550 net.cpp:150] Setting up post_pool\nI0817 16:02:51.754406 17550 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:02:51.754411 17550 net.cpp:165] Memory required for data: 1450529500\nI0817 16:02:51.754416 17550 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:02:51.754510 17550 net.cpp:100] Creating Layer post_FC\nI0817 16:02:51.754523 17550 net.cpp:434] post_FC <- post_pool\nI0817 16:02:51.754532 17550 net.cpp:408] post_FC -> post_FC_top\nI0817 16:02:51.754778 17550 net.cpp:150] Setting up post_FC\nI0817 16:02:51.754794 17550 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:51.754799 17550 net.cpp:165] Memory required for data: 1450534500\nI0817 16:02:51.754808 17550 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:02:51.754817 17550 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:02:51.754823 17550 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:02:51.754834 17550 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:02:51.754845 17550 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:02:51.754891 17550 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:02:51.754905 17550 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:51.754912 17550 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:51.754917 17550 net.cpp:165] Memory required for data: 1450544500\nI0817 16:02:51.754922 17550 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:02:51.754972 17550 net.cpp:100] Creating Layer accuracy\nI0817 16:02:51.754986 17550 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:02:51.754992 17550 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:02:51.755000 17550 net.cpp:408] accuracy -> accuracy\nI0817 16:02:51.755044 17550 net.cpp:150] Setting up accuracy\nI0817 16:02:51.755058 17550 net.cpp:157] Top shape: (1)\nI0817 16:02:51.755062 17550 net.cpp:165] Memory required for data: 1450544504\nI0817 16:02:51.755067 17550 layer_factory.hpp:77] Creating layer loss\nI0817 16:02:51.755084 17550 net.cpp:100] Creating Layer loss\nI0817 16:02:51.755090 17550 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:02:51.755098 17550 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:02:51.755108 17550 net.cpp:408] loss -> loss\nI0817 16:02:51.755156 17550 layer_factory.hpp:77] Creating layer loss\nI0817 16:02:51.755319 17550 net.cpp:150] Setting up loss\nI0817 16:02:51.755333 17550 net.cpp:157] Top shape: (1)\nI0817 16:02:51.755339 17550 net.cpp:160]     with loss weight 1\nI0817 16:02:51.755414 17550 net.cpp:165] Memory required for data: 1450544508\nI0817 16:02:51.755424 17550 net.cpp:226] loss needs backward computation.\nI0817 16:02:51.755430 17550 net.cpp:228] accuracy does not need backward computation.\nI0817 16:02:51.755436 17550 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:02:51.755442 17550 net.cpp:226] post_FC needs backward computation.\nI0817 16:02:51.755447 17550 net.cpp:226] post_pool needs backward computation.\nI0817 16:02:51.755451 17550 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:02:51.755456 17550 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:02:51.755461 17550 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:02:51.755466 17550 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:02:51.755471 17550 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:02:51.755476 17550 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:02:51.755481 17550 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:02:51.755486 17550 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:02:51.755491 17550 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:02:51.755496 17550 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:02:51.755501 17550 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:02:51.755506 17550 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:02:51.755511 17550 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:02:51.755515 17550 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:02:51.755522 17550 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:02:51.755527 17550 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:02:51.755530 17550 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:02:51.755535 17550 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:02:51.755540 17550 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:02:51.755547 17550 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:02:51.755551 17550 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:02:51.755556 17550 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:02:51.755561 17550 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:02:51.755566 17550 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:02:51.755571 17550 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:02:51.755576 17550 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:02:51.755581 17550 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:02:51.755586 17550 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:02:51.755591 17550 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:02:51.755596 17550 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:02:51.755601 17550 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:02:51.755606 17550 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:02:51.755611 17550 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:02:51.755616 17550 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:02:51.755621 17550 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:02:51.755630 17550 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:02:51.755642 17550 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:02:51.755647 17550 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:02:51.755652 17550 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:02:51.755658 17550 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:02:51.755663 17550 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:02:51.755668 17550 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:02:51.755673 17550 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:02:51.755678 17550 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:02:51.755684 17550 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:02:51.755689 17550 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:02:51.755694 17550 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:02:51.755698 17550 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:02:51.755704 17550 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:02:51.755709 17550 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:02:51.755714 17550 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:02:51.755719 17550 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:02:51.755725 17550 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:02:51.755729 17550 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:02:51.755735 17550 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:02:51.755740 17550 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:02:51.755744 17550 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:02:51.755749 17550 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:02:51.755754 17550 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:02:51.755760 17550 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:02:51.755765 17550 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:02:51.755770 17550 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:02:51.755775 17550 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:02:51.755780 17550 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:02:51.755785 17550 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:02:51.755790 17550 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:02:51.755795 17550 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:02:51.755800 17550 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:02:51.755805 17550 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:02:51.755810 17550 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:02:51.755815 17550 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:02:51.755820 17550 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:02:51.755826 17550 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:02:51.755831 17550 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:02:51.755837 17550 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:02:51.755842 17550 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:02:51.755847 17550 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:02:51.755851 17550 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:02:51.755857 17550 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:02:51.755862 17550 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:02:51.755867 17550 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:02:51.755873 17550 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:02:51.755878 17550 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:02:51.755888 17550 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:02:51.755894 17550 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:02:51.755903 17550 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:02:51.755908 17550 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:02:51.755914 17550 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:02:51.755919 17550 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:02:51.755924 17550 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:02:51.755929 17550 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:02:51.755935 17550 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:02:51.755949 17550 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:02:51.755954 17550 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:02:51.755959 17550 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:02:51.755965 17550 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:02:51.755970 17550 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:02:51.755976 17550 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:02:51.755981 17550 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:02:51.755986 17550 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:02:51.755991 17550 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:02:51.755997 17550 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:02:51.756002 17550 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:02:51.756008 17550 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:02:51.756013 17550 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:02:51.756019 17550 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:02:51.756024 17550 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:02:51.756029 17550 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:02:51.756034 17550 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:02:51.756041 17550 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:02:51.756045 17550 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:02:51.756052 17550 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:02:51.756057 17550 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:02:51.756062 17550 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:02:51.756067 17550 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:02:51.756073 17550 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:02:51.756078 17550 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:02:51.756084 17550 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:02:51.756089 17550 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:02:51.756094 17550 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:02:51.756099 17550 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:02:51.756105 17550 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:02:51.756110 17550 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:02:51.756116 17550 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:02:51.756121 17550 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:02:51.756127 17550 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:02:51.756132 17550 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:02:51.756137 17550 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:02:51.756144 17550 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:02:51.756148 17550 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:02:51.756160 17550 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:02:51.756165 17550 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:02:51.756171 17550 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:02:51.756176 17550 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:02:51.756182 17550 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:02:51.756188 17550 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:02:51.756193 17550 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:02:51.756199 17550 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:02:51.756204 17550 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:02:51.756209 17550 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:02:51.756214 17550 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:02:51.756219 17550 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:02:51.756225 17550 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:02:51.756230 17550 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:02:51.756235 17550 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:02:51.756242 17550 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:02:51.756247 17550 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:02:51.756253 17550 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:02:51.756258 17550 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:02:51.756263 17550 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:02:51.756268 17550 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:02:51.756273 17550 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:02:51.756279 17550 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:02:51.756284 17550 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:02:51.756289 17550 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:02:51.756299 17550 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:02:51.756304 17550 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:02:51.756310 17550 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:02:51.756315 17550 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:02:51.756320 17550 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:02:51.756326 17550 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:02:51.756331 17550 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:02:51.756337 17550 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:02:51.756342 17550 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:02:51.756347 17550 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:02:51.756353 17550 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:02:51.756359 17550 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:02:51.756364 17550 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:02:51.756369 17550 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:02:51.756374 17550 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:02:51.756381 17550 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:02:51.756386 17550 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:02:51.756392 17550 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:02:51.756397 17550 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:02:51.756403 17550 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:02:51.756408 17550 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:02:51.756413 17550 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:02:51.756424 17550 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:02:51.756431 17550 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:02:51.756436 17550 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:02:51.756441 17550 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:02:51.756448 17550 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:02:51.756453 17550 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:02:51.756458 17550 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:02:51.756464 17550 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:02:51.756469 17550 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:02:51.756474 17550 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:02:51.756479 17550 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:02:51.756485 17550 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:02:51.756490 17550 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:02:51.756496 17550 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:02:51.756501 17550 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:02:51.756507 17550 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:02:51.756513 17550 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:02:51.756518 17550 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:02:51.756525 17550 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:02:51.756530 17550 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:02:51.756534 17550 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:02:51.756541 17550 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:02:51.756546 17550 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:02:51.756551 17550 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:02:51.756557 17550 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:02:51.756562 17550 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:02:51.756567 17550 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:02:51.756573 17550 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:02:51.756578 17550 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:02:51.756584 17550 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:02:51.756590 17550 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:02:51.756597 17550 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:02:51.756602 17550 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:02:51.756608 17550 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:02:51.756613 17550 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:02:51.756618 17550 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:02:51.756623 17550 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:02:51.756628 17550 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:02:51.756634 17550 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:02:51.756640 17550 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:02:51.756645 17550 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:02:51.756652 17550 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:02:51.756657 17550 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:02:51.756664 17550 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:02:51.756669 17550 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:02:51.756673 17550 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:02:51.756680 17550 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:02:51.756690 17550 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:02:51.756695 17550 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:02:51.756701 17550 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:02:51.756706 17550 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:02:51.756712 17550 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:02:51.756718 17550 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:02:51.756723 17550 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:02:51.756729 17550 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:02:51.756734 17550 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:02:51.756739 17550 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:02:51.756745 17550 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:02:51.756752 17550 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:02:51.756757 17550 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:02:51.756762 17550 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:02:51.756768 17550 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:02:51.756774 17550 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:02:51.756779 17550 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:02:51.756785 17550 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:02:51.756790 17550 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:02:51.756796 17550 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:02:51.756801 17550 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:02:51.756808 17550 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:02:51.756814 17550 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:02:51.756819 17550 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:02:51.756825 17550 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:02:51.756831 17550 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:02:51.756836 17550 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:02:51.756842 17550 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:02:51.756847 17550 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:02:51.756852 17550 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:02:51.756858 17550 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:02:51.756865 17550 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:02:51.756870 17550 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:02:51.756875 17550 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:02:51.756881 17550 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:02:51.756887 17550 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:02:51.756893 17550 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:02:51.756898 17550 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:02:51.756904 17550 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:02:51.756909 17550 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:02:51.756916 17550 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:02:51.756922 17550 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:02:51.756927 17550 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:02:51.756932 17550 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:02:51.756944 17550 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:02:51.756952 17550 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:02:51.756958 17550 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:02:51.756968 17550 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:02:51.756974 17550 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:02:51.756979 17550 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:02:51.756991 17550 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:02:51.756997 17550 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:02:51.757002 17550 net.cpp:226] pre_relu needs backward computation.\nI0817 16:02:51.757009 17550 net.cpp:226] pre_scale needs backward computation.\nI0817 16:02:51.757014 17550 net.cpp:226] pre_bn needs backward computation.\nI0817 16:02:51.757019 17550 net.cpp:226] pre_conv needs backward computation.\nI0817 16:02:51.757025 17550 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:02:51.757032 17550 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:02:51.757036 17550 net.cpp:270] This network produces output accuracy\nI0817 16:02:51.757043 17550 net.cpp:270] This network produces output loss\nI0817 16:02:51.757412 17550 net.cpp:283] Network initialization done.\nI0817 16:02:51.766893 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:51.766935 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:51.767010 17550 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:02:51.767390 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:02:51.767408 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:02:51.767419 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:02:51.767427 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:02:51.767437 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:02:51.767446 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:02:51.767455 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:02:51.767463 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:02:51.767472 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:02:51.767482 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:02:51.767490 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:02:51.767498 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:02:51.767508 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:02:51.767515 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:02:51.767524 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:02:51.767532 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:02:51.767541 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:02:51.767549 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:02:51.767558 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:02:51.767578 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:02:51.767588 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:02:51.767596 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:02:51.767609 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:02:51.767618 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:02:51.767627 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:02:51.767634 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:02:51.767643 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:02:51.767652 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:02:51.767660 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:02:51.767668 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:02:51.767678 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:02:51.767685 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:02:51.767694 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:02:51.767702 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:02:51.767711 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:02:51.767719 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:02:51.767729 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:02:51.767737 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:02:51.767745 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:02:51.767753 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:02:51.767765 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:02:51.767774 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:02:51.767782 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:02:51.767791 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:02:51.767799 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:02:51.767808 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:02:51.767817 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:02:51.767824 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:02:51.767833 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:02:51.767841 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:02:51.767858 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:02:51.767868 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:02:51.767876 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:02:51.767885 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:02:51.767894 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:02:51.767901 17550 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:02:51.769544 17550 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0817 16:02:51.771142 17550 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:02:51.771395 17550 net.cpp:100] Creating Layer dataLayer\nI0817 16:02:51.771417 17550 net.cpp:408] dataLayer -> data_top\nI0817 16:02:51.771433 17550 net.cpp:408] dataLayer -> label\nI0817 16:02:51.771445 17550 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:02:51.781525 17557 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:02:51.781776 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:51.789803 17550 net.cpp:150] Setting up dataLayer\nI0817 16:02:51.789824 17550 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:02:51.789832 17550 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:51.789837 17550 net.cpp:165] Memory required for data: 1536500\nI0817 16:02:51.789844 17550 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:02:51.789856 17550 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:02:51.789862 17550 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:02:51.789883 17550 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:02:51.789898 17550 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:02:51.789973 17550 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:02:51.789991 17550 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:51.789999 17550 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:51.790004 17550 net.cpp:165] Memory required for data: 1537500\nI0817 16:02:51.790009 17550 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:02:51.790029 17550 net.cpp:100] Creating Layer pre_conv\nI0817 16:02:51.790035 17550 net.cpp:434] pre_conv <- data_top\nI0817 16:02:51.790045 17550 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:02:51.790488 17550 net.cpp:150] Setting up pre_conv\nI0817 16:02:51.790514 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.790521 17550 net.cpp:165] Memory required for data: 9729500\nI0817 16:02:51.790535 17550 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:02:51.790549 17550 net.cpp:100] Creating Layer pre_bn\nI0817 16:02:51.790555 17550 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:02:51.790563 17550 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:02:51.790910 17550 net.cpp:150] Setting up pre_bn\nI0817 16:02:51.790925 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.790930 17550 net.cpp:165] Memory required for data: 17921500\nI0817 16:02:51.790952 17550 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:02:51.790969 17550 net.cpp:100] Creating Layer pre_scale\nI0817 16:02:51.790978 17550 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:02:51.790989 17550 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:02:51.791074 17550 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:02:51.791283 17550 net.cpp:150] Setting up pre_scale\nI0817 16:02:51.791297 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.791302 17550 net.cpp:165] Memory required for data: 26113500\nI0817 16:02:51.791312 17550 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:02:51.791321 17550 net.cpp:100] Creating Layer pre_relu\nI0817 16:02:51.791326 17550 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:02:51.791337 17550 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:02:51.791350 17550 net.cpp:150] Setting up pre_relu\nI0817 16:02:51.791358 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.791363 17550 net.cpp:165] Memory required for data: 34305500\nI0817 16:02:51.791368 17550 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:02:51.791384 17550 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:02:51.791390 17550 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:02:51.791398 17550 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:02:51.791407 17550 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:02:51.791488 17550 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:02:51.791502 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.791508 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.791513 17550 net.cpp:165] Memory required for data: 50689500\nI0817 16:02:51.791518 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:02:51.791532 17550 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:02:51.791538 17550 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:02:51.791550 17550 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:02:51.791952 17550 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:02:51.791970 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.791975 17550 net.cpp:165] Memory required for data: 58881500\nI0817 16:02:51.791986 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:02:51.792003 17550 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:02:51.792009 17550 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:02:51.792021 17550 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:02:51.792616 17550 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:02:51.792632 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.792639 17550 net.cpp:165] Memory required for data: 67073500\nI0817 16:02:51.792649 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:02:51.792661 17550 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:02:51.792667 17550 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:02:51.792676 17550 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.792742 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:02:51.792932 17550 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:02:51.792950 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.792955 17550 net.cpp:165] Memory required for data: 75265500\nI0817 16:02:51.792976 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:02:51.792989 17550 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:02:51.792995 17550 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:02:51.793006 17550 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.793020 17550 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:02:51.793027 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.793032 17550 net.cpp:165] Memory required for data: 83457500\nI0817 16:02:51.793037 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:02:51.793051 17550 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:02:51.793056 17550 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:02:51.793067 17550 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:02:51.793479 17550 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:02:51.793495 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.793503 17550 net.cpp:165] Memory required for data: 91649500\nI0817 16:02:51.793512 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:02:51.793521 17550 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:02:51.793529 17550 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:02:51.793541 17550 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:02:51.793845 17550 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:02:51.793859 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.793864 17550 net.cpp:165] Memory required for data: 99841500\nI0817 16:02:51.793885 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:02:51.793895 17550 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:02:51.793901 17550 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:02:51.793910 17550 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:02:51.793983 17550 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:02:51.794168 17550 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:02:51.794184 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.794190 17550 net.cpp:165] Memory required for data: 108033500\nI0817 16:02:51.794199 17550 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:02:51.794211 17550 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:02:51.794217 17550 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:02:51.794224 17550 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:02:51.794234 17550 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:02:51.794272 17550 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:02:51.794284 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.794291 17550 net.cpp:165] Memory required for data: 116225500\nI0817 16:02:51.794296 17550 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:02:51.794307 17550 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:02:51.794313 17550 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:02:51.794323 17550 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:02:51.794333 17550 net.cpp:150] Setting up L1_b1_relu\nI0817 16:02:51.794340 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.794345 17550 net.cpp:165] Memory required for data: 124417500\nI0817 16:02:51.794350 17550 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:51.794363 17550 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:51.794369 17550 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:02:51.794376 17550 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:02:51.794386 17550 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:02:51.794445 17550 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:51.794456 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.794476 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.794481 17550 net.cpp:165] Memory required for data: 140801500\nI0817 16:02:51.794486 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:02:51.794500 17550 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:02:51.794507 17550 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:02:51.794517 17550 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:02:51.794924 17550 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:02:51.794945 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.794951 17550 net.cpp:165] Memory required for data: 148993500\nI0817 16:02:51.794960 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:02:51.794973 17550 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:02:51.794980 17550 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:02:51.794987 17550 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:02:51.795357 17550 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:02:51.795372 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.795377 17550 net.cpp:165] Memory required for data: 157185500\nI0817 16:02:51.795388 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:02:51.795397 17550 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:02:51.795406 17550 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:02:51.795415 17550 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.795568 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:02:51.795773 17550 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:02:51.795786 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.795795 17550 net.cpp:165] Memory required for data: 165377500\nI0817 16:02:51.795805 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:02:51.795814 17550 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:02:51.795819 17550 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:02:51.795840 17550 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.795852 17550 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:02:51.795859 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.795864 17550 net.cpp:165] Memory required for data: 173569500\nI0817 16:02:51.795873 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:02:51.795887 17550 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:02:51.795893 17550 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:02:51.795902 17550 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:02:51.796319 17550 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:02:51.796334 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.796339 17550 net.cpp:165] Memory required for data: 181761500\nI0817 16:02:51.796347 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:02:51.796360 17550 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:02:51.796366 17550 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:02:51.796378 17550 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:02:51.796696 17550 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:02:51.796722 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.796730 17550 net.cpp:165] Memory required for data: 189953500\nI0817 16:02:51.796746 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:02:51.796756 17550 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:02:51.796763 17550 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:02:51.796777 17550 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:02:51.796844 17550 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:02:51.797041 17550 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:02:51.797055 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.797060 17550 net.cpp:165] Memory required for data: 198145500\nI0817 16:02:51.797077 17550 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:02:51.797092 17550 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:02:51.797099 17550 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:02:51.797106 17550 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:02:51.797114 17550 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:02:51.797160 17550 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:02:51.797174 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.797179 17550 net.cpp:165] Memory required for data: 206337500\nI0817 16:02:51.797184 17550 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:02:51.797194 17550 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:02:51.797199 17550 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:02:51.797205 17550 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:02:51.797215 17550 net.cpp:150] Setting up L1_b2_relu\nI0817 16:02:51.797221 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.797226 17550 net.cpp:165] Memory required for data: 214529500\nI0817 16:02:51.797235 17550 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:51.797242 17550 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:51.797247 17550 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:02:51.797257 17550 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:02:51.797271 17550 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:02:51.797325 17550 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:51.797338 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.797343 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.797348 17550 net.cpp:165] Memory required for data: 230913500\nI0817 16:02:51.797353 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:02:51.797377 17550 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:02:51.797385 17550 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:02:51.797396 17550 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:02:51.797798 17550 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:02:51.797816 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.797821 17550 net.cpp:165] Memory required for data: 239105500\nI0817 16:02:51.797829 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:02:51.797838 17550 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:02:51.797848 17550 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:02:51.797857 17550 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:02:51.798213 17550 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:02:51.798228 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.798233 17550 net.cpp:165] Memory required for data: 247297500\nI0817 16:02:51.798243 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:02:51.798252 17550 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:02:51.798261 17550 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:02:51.798274 17550 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.798447 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:02:51.798640 17550 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:02:51.798655 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.798660 17550 net.cpp:165] Memory required for data: 255489500\nI0817 16:02:51.798669 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:02:51.798681 17550 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:02:51.798689 17550 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:02:51.798697 17550 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.798715 17550 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:02:51.798725 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.798730 17550 net.cpp:165] Memory required for data: 263681500\nI0817 16:02:51.798737 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:02:51.798753 17550 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:02:51.798758 17550 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:02:51.798771 17550 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:02:51.799190 17550 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:02:51.799206 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.799212 17550 net.cpp:165] Memory required for data: 271873500\nI0817 16:02:51.799223 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:02:51.799239 17550 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:02:51.799247 17550 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:02:51.799257 17550 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:02:51.799579 17550 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:02:51.799597 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.799602 17550 net.cpp:165] Memory required for data: 280065500\nI0817 16:02:51.799612 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:02:51.799621 17550 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:02:51.799628 17550 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:02:51.799635 17550 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:02:51.799710 17550 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:02:51.799892 17550 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:02:51.799906 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.799911 17550 net.cpp:165] Memory required for data: 288257500\nI0817 16:02:51.799919 17550 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:02:51.799932 17550 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:02:51.799950 17550 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:02:51.799959 17550 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:02:51.799968 17550 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:02:51.800007 17550 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:02:51.800019 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.800025 17550 net.cpp:165] Memory required for data: 296449500\nI0817 16:02:51.800030 17550 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:02:51.800038 17550 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:02:51.800043 17550 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:02:51.800053 17550 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:02:51.800063 17550 net.cpp:150] Setting up L1_b3_relu\nI0817 16:02:51.800071 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.800074 17550 net.cpp:165] Memory required for data: 304641500\nI0817 16:02:51.800079 17550 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:51.800086 17550 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:51.800091 17550 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:02:51.800101 17550 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:02:51.800112 17550 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:02:51.800159 17550 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:51.800170 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.800176 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.800181 17550 net.cpp:165] Memory required for data: 321025500\nI0817 16:02:51.800186 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:02:51.800200 17550 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:02:51.800213 17550 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:02:51.800222 17550 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:02:51.800578 17550 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:02:51.800593 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.800598 17550 net.cpp:165] Memory required for data: 329217500\nI0817 16:02:51.800607 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:02:51.800621 17550 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:02:51.800626 17550 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:02:51.800640 17550 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:02:51.800943 17550 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:02:51.800958 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.800963 17550 net.cpp:165] Memory required for data: 337409500\nI0817 16:02:51.800974 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:02:51.800982 17550 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:02:51.800988 17550 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:02:51.800997 17550 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.801057 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:02:51.801214 17550 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:02:51.801229 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.801232 17550 net.cpp:165] Memory required for data: 345601500\nI0817 16:02:51.801241 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:02:51.801254 17550 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:02:51.801260 17550 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:02:51.801267 17550 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.801276 17550 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:02:51.801283 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.801287 17550 net.cpp:165] Memory required for data: 353793500\nI0817 16:02:51.801292 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:02:51.801306 17550 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:02:51.801311 17550 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:02:51.801322 17550 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:02:51.801672 17550 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:02:51.801687 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.801692 17550 net.cpp:165] Memory required for data: 361985500\nI0817 16:02:51.801700 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:02:51.801712 17550 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:02:51.801718 17550 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:02:51.801726 17550 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:02:51.802007 17550 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:02:51.802022 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.802027 17550 net.cpp:165] Memory required for data: 370177500\nI0817 16:02:51.802037 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:02:51.802045 17550 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:02:51.802052 17550 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:02:51.802062 17550 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:02:51.802120 17550 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:02:51.802279 17550 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:02:51.802296 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.802301 17550 net.cpp:165] Memory required for data: 378369500\nI0817 16:02:51.802310 17550 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:02:51.802320 17550 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:02:51.802325 17550 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:02:51.802331 17550 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:02:51.802346 17550 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:02:51.802384 17550 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:02:51.802393 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.802398 17550 net.cpp:165] Memory required for data: 386561500\nI0817 16:02:51.802403 17550 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:02:51.802410 17550 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:02:51.802419 17550 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:02:51.802426 17550 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:02:51.802435 17550 net.cpp:150] Setting up L1_b4_relu\nI0817 16:02:51.802443 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.802446 17550 net.cpp:165] Memory required for data: 394753500\nI0817 16:02:51.802451 17550 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:51.802459 17550 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:51.802464 17550 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:02:51.802474 17550 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:02:51.802484 17550 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:02:51.802528 17550 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:51.802541 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.802547 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.802551 17550 net.cpp:165] Memory required for data: 411137500\nI0817 16:02:51.802556 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:02:51.802570 17550 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:02:51.802577 17550 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:02:51.802585 17550 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:02:51.802947 17550 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:02:51.802961 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.802966 17550 net.cpp:165] Memory required for data: 419329500\nI0817 16:02:51.802989 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:02:51.803001 17550 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:02:51.803007 17550 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:02:51.803015 17550 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:02:51.803285 17550 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:02:51.803298 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.803304 17550 net.cpp:165] Memory required for data: 427521500\nI0817 16:02:51.803314 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:02:51.803323 17550 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:02:51.803329 17550 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:02:51.803338 17550 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.803396 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:02:51.803553 17550 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:02:51.803567 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.803572 17550 net.cpp:165] Memory required for data: 435713500\nI0817 16:02:51.803581 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:02:51.803591 17550 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:02:51.803598 17550 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:02:51.803606 17550 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.803616 17550 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:02:51.803622 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.803627 17550 net.cpp:165] Memory required for data: 443905500\nI0817 16:02:51.803632 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:02:51.803652 17550 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:02:51.803658 17550 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:02:51.803670 17550 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:02:51.804038 17550 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:02:51.804052 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.804057 17550 net.cpp:165] Memory required for data: 452097500\nI0817 16:02:51.804065 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:02:51.804106 17550 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:02:51.804117 17550 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:02:51.804129 17550 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:02:51.804407 17550 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:02:51.804420 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.804425 17550 net.cpp:165] Memory required for data: 460289500\nI0817 16:02:51.804436 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:02:51.804445 17550 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:02:51.804450 17550 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:02:51.804457 17550 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:02:51.804519 17550 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:02:51.804677 17550 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:02:51.804689 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.804694 17550 net.cpp:165] Memory required for data: 468481500\nI0817 16:02:51.804703 17550 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:02:51.804711 17550 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:02:51.804721 17550 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:02:51.804728 17550 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:02:51.804735 17550 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:02:51.804774 17550 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:02:51.804785 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.804790 17550 net.cpp:165] Memory required for data: 476673500\nI0817 16:02:51.804795 17550 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:02:51.804802 17550 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:02:51.804808 17550 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:02:51.804817 17550 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:02:51.804827 17550 net.cpp:150] Setting up L1_b5_relu\nI0817 16:02:51.804834 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.804838 17550 net.cpp:165] Memory required for data: 484865500\nI0817 16:02:51.804843 17550 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:51.804849 17550 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:51.804854 17550 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:02:51.804862 17550 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:02:51.804872 17550 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:02:51.804926 17550 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:51.804944 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.804951 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.804955 17550 net.cpp:165] Memory required for data: 501249500\nI0817 16:02:51.804961 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:02:51.804976 17550 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:02:51.804982 17550 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:02:51.804991 17550 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:02:51.805351 17550 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:02:51.805366 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.805377 17550 net.cpp:165] Memory required for data: 509441500\nI0817 16:02:51.805385 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:02:51.805397 17550 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:02:51.805404 17550 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:02:51.805414 17550 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:02:51.805690 17550 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:02:51.805703 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.805708 17550 net.cpp:165] Memory required for data: 517633500\nI0817 16:02:51.805718 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:02:51.805727 17550 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:02:51.805733 17550 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:02:51.805740 17550 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.805802 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:02:51.805975 17550 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:02:51.805989 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.805994 17550 net.cpp:165] Memory required for data: 525825500\nI0817 16:02:51.806004 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:02:51.806015 17550 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:02:51.806020 17550 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:02:51.806027 17550 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.806037 17550 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:02:51.806044 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.806049 17550 net.cpp:165] Memory required for data: 534017500\nI0817 16:02:51.806053 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:02:51.806066 17550 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:02:51.806072 17550 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:02:51.806083 17550 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:02:51.806437 17550 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:02:51.806450 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.806455 17550 net.cpp:165] Memory required for data: 542209500\nI0817 16:02:51.806463 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:02:51.806475 17550 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:02:51.806483 17550 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:02:51.806490 17550 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:02:51.806764 17550 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:02:51.806777 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.806782 17550 net.cpp:165] Memory required for data: 550401500\nI0817 16:02:51.806792 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:02:51.806802 17550 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:02:51.806807 17550 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:02:51.806814 17550 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:02:51.806874 17550 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:02:51.807044 17550 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:02:51.807056 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.807061 17550 net.cpp:165] Memory required for data: 558593500\nI0817 16:02:51.807070 17550 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:02:51.807088 17550 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:02:51.807096 17550 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:02:51.807102 17550 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:02:51.807113 17550 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:02:51.807148 17550 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:02:51.807158 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.807163 17550 net.cpp:165] Memory required for data: 566785500\nI0817 16:02:51.807175 17550 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:02:51.807183 17550 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:02:51.807189 17550 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:02:51.807196 17550 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:02:51.807205 17550 net.cpp:150] Setting up L1_b6_relu\nI0817 16:02:51.807212 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.807216 17550 net.cpp:165] Memory required for data: 574977500\nI0817 16:02:51.807221 17550 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:51.807231 17550 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:51.807236 17550 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:02:51.807243 17550 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:02:51.807253 17550 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:02:51.807301 17550 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:51.807317 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.807323 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.807327 17550 net.cpp:165] Memory required for data: 591361500\nI0817 16:02:51.807332 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:02:51.807343 17550 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:02:51.807349 17550 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:02:51.807358 17550 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:02:51.807714 17550 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:02:51.807729 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.807734 17550 net.cpp:165] Memory required for data: 599553500\nI0817 16:02:51.807742 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:02:51.807754 17550 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:02:51.807760 17550 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:02:51.807768 17550 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:02:51.808053 17550 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:02:51.808068 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.808073 17550 net.cpp:165] Memory required for data: 607745500\nI0817 16:02:51.808084 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:02:51.808091 17550 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:02:51.808097 17550 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:02:51.808109 17550 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.808166 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:02:51.808327 17550 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:02:51.808341 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.808346 17550 net.cpp:165] Memory required for data: 615937500\nI0817 16:02:51.808354 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:02:51.808362 17550 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:02:51.808368 17550 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:02:51.808378 17550 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.808388 17550 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:02:51.808395 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.808399 17550 net.cpp:165] Memory required for data: 624129500\nI0817 16:02:51.808404 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:02:51.808418 17550 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:02:51.808423 17550 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:02:51.808432 17550 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:02:51.808796 17550 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:02:51.808811 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.808822 17550 net.cpp:165] Memory required for data: 632321500\nI0817 16:02:51.808831 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:02:51.808840 17550 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:02:51.808850 17550 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:02:51.808858 17550 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:02:51.809135 17550 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:02:51.809149 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.809154 17550 net.cpp:165] Memory required for data: 640513500\nI0817 16:02:51.809165 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:02:51.809173 17550 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:02:51.809180 17550 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:02:51.809190 17550 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:02:51.809248 17550 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:02:51.809409 17550 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:02:51.809427 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.809432 17550 net.cpp:165] Memory required for data: 648705500\nI0817 16:02:51.809440 17550 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:02:51.809450 17550 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:02:51.809456 17550 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:02:51.809463 17550 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:02:51.809470 17550 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:02:51.809507 17550 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:02:51.809520 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.809525 17550 net.cpp:165] Memory required for data: 656897500\nI0817 16:02:51.809530 17550 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:02:51.809540 17550 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:02:51.809545 17550 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:02:51.809552 17550 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:02:51.809561 17550 net.cpp:150] Setting up L1_b7_relu\nI0817 16:02:51.809568 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.809572 17550 net.cpp:165] Memory required for data: 665089500\nI0817 16:02:51.809577 17550 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:51.809587 17550 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:51.809592 17550 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:02:51.809599 17550 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:02:51.809608 17550 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:02:51.809655 17550 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:51.809669 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.809676 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.809681 17550 net.cpp:165] Memory required for data: 681473500\nI0817 16:02:51.809686 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:02:51.809697 17550 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:02:51.809703 17550 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:02:51.809711 17550 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:02:51.810081 17550 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:02:51.810096 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.810101 17550 net.cpp:165] Memory required for data: 689665500\nI0817 16:02:51.810111 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:02:51.810122 17550 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:02:51.810128 17550 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:02:51.810143 17550 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:02:51.810421 17550 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:02:51.810436 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.810441 17550 net.cpp:165] Memory required for data: 697857500\nI0817 16:02:51.810451 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:02:51.810458 17550 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:02:51.810464 17550 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:02:51.810475 17550 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.810533 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:02:51.810693 17550 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:02:51.810706 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.810711 17550 net.cpp:165] Memory required for data: 706049500\nI0817 16:02:51.810720 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:02:51.810727 17550 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:02:51.810734 17550 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:02:51.810741 17550 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.810750 17550 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:02:51.810757 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.810762 17550 net.cpp:165] Memory required for data: 714241500\nI0817 16:02:51.810767 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:02:51.810781 17550 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:02:51.810786 17550 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:02:51.810797 17550 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:02:51.811163 17550 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:02:51.811177 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.811182 17550 net.cpp:165] Memory required for data: 722433500\nI0817 16:02:51.811190 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:02:51.811203 17550 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:02:51.811209 17550 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:02:51.811219 17550 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:02:51.811493 17550 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:02:51.811506 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.811511 17550 net.cpp:165] Memory required for data: 730625500\nI0817 16:02:51.811522 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:02:51.811529 17550 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:02:51.811535 17550 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:02:51.811545 17550 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:02:51.811604 17550 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:02:51.811764 17550 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:02:51.811782 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.811787 17550 net.cpp:165] Memory required for data: 738817500\nI0817 16:02:51.811796 17550 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:02:51.811805 17550 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:02:51.811811 17550 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:02:51.811817 17550 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:02:51.811825 17550 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:02:51.811862 17550 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:02:51.811873 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.811878 17550 net.cpp:165] Memory required for data: 747009500\nI0817 16:02:51.811883 17550 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:02:51.811892 17550 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:02:51.811900 17550 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:02:51.811908 17550 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:02:51.811923 17550 net.cpp:150] Setting up L1_b8_relu\nI0817 16:02:51.811931 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.811935 17550 net.cpp:165] Memory required for data: 755201500\nI0817 16:02:51.811946 17550 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:51.811954 17550 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:51.811959 17550 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:02:51.811970 17550 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:02:51.811980 17550 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:02:51.812028 17550 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:51.812037 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.812044 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.812048 17550 net.cpp:165] Memory required for data: 771585500\nI0817 16:02:51.812053 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:02:51.812067 17550 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:02:51.812073 17550 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:02:51.812083 17550 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:02:51.812450 17550 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:02:51.812467 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.812472 17550 net.cpp:165] Memory required for data: 779777500\nI0817 16:02:51.812481 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:02:51.812490 17550 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:02:51.812496 17550 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:02:51.812507 17550 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:02:51.812785 17550 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:02:51.812798 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.812803 17550 net.cpp:165] Memory required for data: 787969500\nI0817 16:02:51.812813 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:02:51.812825 17550 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:02:51.812831 17550 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:02:51.812839 17550 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.812897 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:02:51.813067 17550 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:02:51.813081 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.813086 17550 net.cpp:165] Memory required for data: 796161500\nI0817 16:02:51.813096 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:02:51.813105 17550 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:02:51.813112 17550 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:02:51.813119 17550 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.813129 17550 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:02:51.813136 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.813140 17550 net.cpp:165] Memory required for data: 804353500\nI0817 16:02:51.813146 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:02:51.813160 17550 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:02:51.813166 17550 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:02:51.813177 17550 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:02:51.813531 17550 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:02:51.813544 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.813549 17550 net.cpp:165] Memory required for data: 812545500\nI0817 16:02:51.813558 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:02:51.813567 17550 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:02:51.813573 17550 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:02:51.813593 17550 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:02:51.813875 17550 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:02:51.813889 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.813894 17550 net.cpp:165] Memory required for data: 820737500\nI0817 16:02:51.813926 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:02:51.813936 17550 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:02:51.813948 17550 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:02:51.813966 17550 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:02:51.814025 17550 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:02:51.814185 17550 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:02:51.814198 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.814203 17550 net.cpp:165] Memory required for data: 828929500\nI0817 16:02:51.814213 17550 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:02:51.814221 17550 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:02:51.814227 17550 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:02:51.814234 17550 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:02:51.814244 17550 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:02:51.814280 17550 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:02:51.814293 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.814298 17550 net.cpp:165] Memory required for data: 837121500\nI0817 16:02:51.814303 17550 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:02:51.814311 17550 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:02:51.814316 17550 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:02:51.814323 17550 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:02:51.814332 17550 net.cpp:150] Setting up L1_b9_relu\nI0817 16:02:51.814339 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.814344 17550 net.cpp:165] Memory required for data: 845313500\nI0817 16:02:51.814348 17550 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:51.814358 17550 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:51.814364 17550 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:02:51.814371 17550 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:02:51.814380 17550 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:02:51.814429 17550 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:51.814445 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.814451 17550 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:51.814455 17550 net.cpp:165] Memory required for data: 861697500\nI0817 16:02:51.814460 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:02:51.814471 17550 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:02:51.814477 17550 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:02:51.814486 17550 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:02:51.814846 17550 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:02:51.814862 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.814868 17550 net.cpp:165] Memory required for data: 863745500\nI0817 16:02:51.814877 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:02:51.814885 17550 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:02:51.814891 17550 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:02:51.814899 17550 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:02:51.815181 17550 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:02:51.815194 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.815199 17550 net.cpp:165] Memory required for data: 865793500\nI0817 16:02:51.815209 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:02:51.815224 17550 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:02:51.815232 17550 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:02:51.815243 17550 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.815301 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:02:51.815469 17550 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:02:51.815481 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.815486 17550 net.cpp:165] Memory required for data: 867841500\nI0817 16:02:51.815495 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:02:51.815503 17550 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:02:51.815510 17550 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:02:51.815520 17550 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.815529 17550 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:02:51.815536 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.815541 17550 net.cpp:165] Memory required for data: 869889500\nI0817 16:02:51.815546 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:02:51.815558 17550 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:02:51.815564 17550 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:02:51.815572 17550 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:02:51.815932 17550 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:02:51.815950 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.815956 17550 net.cpp:165] Memory required for data: 871937500\nI0817 16:02:51.815964 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:02:51.815978 17550 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:02:51.815984 17550 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:02:51.815991 17550 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:02:51.816262 17550 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:02:51.816275 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.816280 17550 net.cpp:165] Memory required for data: 873985500\nI0817 16:02:51.816290 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:02:51.816299 17550 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:02:51.816305 17550 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:02:51.816315 17550 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:02:51.816375 17550 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:02:51.816537 17550 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:02:51.816550 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.816555 17550 net.cpp:165] Memory required for data: 876033500\nI0817 16:02:51.816563 17550 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:02:51.816573 17550 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:02:51.816581 17550 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:02:51.816591 17550 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:02:51.816622 17550 net.cpp:150] Setting up L2_b1_pool\nI0817 16:02:51.816632 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.816637 17550 net.cpp:165] Memory required for data: 878081500\nI0817 16:02:51.816642 17550 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:02:51.816653 17550 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:02:51.816658 17550 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:02:51.816665 17550 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:02:51.816673 17550 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:02:51.816709 17550 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:02:51.816717 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.816722 17550 net.cpp:165] Memory required for data: 880129500\nI0817 16:02:51.816727 17550 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:02:51.816735 17550 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:02:51.816746 17550 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:02:51.816754 17550 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:02:51.816763 17550 net.cpp:150] Setting up L2_b1_relu\nI0817 16:02:51.816771 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.816776 17550 net.cpp:165] Memory required for data: 882177500\nI0817 16:02:51.816779 17550 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:02:51.816789 17550 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:02:51.816799 17550 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:02:51.819068 17550 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:02:51.819087 17550 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:51.819095 17550 net.cpp:165] Memory required for data: 884225500\nI0817 16:02:51.819102 17550 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:02:51.819110 17550 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:02:51.819118 17550 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:02:51.819124 17550 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:02:51.819133 17550 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:02:51.819180 17550 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:02:51.819191 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.819196 17550 net.cpp:165] Memory required for data: 888321500\nI0817 16:02:51.819201 17550 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:51.819209 17550 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:51.819216 17550 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:02:51.819226 17550 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:02:51.819236 17550 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:02:51.819289 17550 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:51.819300 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.819308 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.819311 17550 net.cpp:165] Memory required for data: 896513500\nI0817 16:02:51.819316 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:02:51.819330 17550 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:02:51.819337 17550 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:02:51.819346 17550 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:02:51.819851 17550 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:02:51.819866 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.819871 17550 net.cpp:165] Memory required for data: 900609500\nI0817 16:02:51.819880 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:02:51.819892 17550 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:02:51.819900 17550 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:02:51.819911 17550 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:02:51.820195 17550 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:02:51.820209 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.820214 17550 net.cpp:165] Memory required for data: 904705500\nI0817 16:02:51.820225 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:02:51.820233 17550 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:02:51.820240 17550 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:02:51.820247 17550 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.820308 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:02:51.820471 17550 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:02:51.820483 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.820488 17550 net.cpp:165] Memory required for data: 908801500\nI0817 16:02:51.820497 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:02:51.820504 17550 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:02:51.820511 17550 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:02:51.820529 17550 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.820540 17550 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:02:51.820547 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.820551 17550 net.cpp:165] Memory required for data: 912897500\nI0817 16:02:51.820556 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:02:51.820569 17550 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:02:51.820575 17550 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:02:51.820585 17550 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:02:51.821095 17550 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:02:51.821110 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.821115 17550 net.cpp:165] Memory required for data: 916993500\nI0817 16:02:51.821125 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:02:51.821135 17550 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:02:51.821142 17550 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:02:51.821151 17550 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:02:51.821416 17550 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:02:51.821432 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.821437 17550 net.cpp:165] Memory required for data: 921089500\nI0817 16:02:51.821447 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:02:51.821456 17550 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:02:51.821462 17550 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:02:51.821470 17550 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:02:51.821528 17550 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:02:51.821691 17550 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:02:51.821704 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.821709 17550 net.cpp:165] Memory required for data: 925185500\nI0817 16:02:51.821718 17550 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:02:51.821727 17550 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:02:51.821733 17550 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:02:51.821740 17550 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:02:51.821751 17550 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:02:51.821780 17550 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:02:51.821789 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.821794 17550 net.cpp:165] Memory required for data: 929281500\nI0817 16:02:51.821799 17550 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:02:51.821810 17550 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:02:51.821815 17550 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:02:51.821822 17550 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:02:51.821831 17550 net.cpp:150] Setting up L2_b2_relu\nI0817 16:02:51.821838 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.821843 17550 net.cpp:165] Memory required for data: 933377500\nI0817 16:02:51.821847 17550 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:51.821854 17550 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:51.821861 17550 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:02:51.821867 17550 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:02:51.821877 17550 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:02:51.821928 17550 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:51.821944 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.821952 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.821957 17550 net.cpp:165] Memory required for data: 941569500\nI0817 16:02:51.821969 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:02:51.821982 17550 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:02:51.821990 17550 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:02:51.821998 17550 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:02:51.822502 17550 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:02:51.822517 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.822522 17550 net.cpp:165] Memory required for data: 945665500\nI0817 16:02:51.822530 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:02:51.822542 17550 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:02:51.822548 17550 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:02:51.822557 17550 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:02:51.822826 17550 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:02:51.822842 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.822847 17550 net.cpp:165] Memory required for data: 949761500\nI0817 16:02:51.822857 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:02:51.822866 17550 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:02:51.822872 17550 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:02:51.822880 17550 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.822942 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:02:51.823107 17550 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:02:51.823119 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.823124 17550 net.cpp:165] Memory required for data: 953857500\nI0817 16:02:51.823133 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:02:51.823141 17550 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:02:51.823148 17550 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:02:51.823158 17550 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.823168 17550 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:02:51.823174 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.823179 17550 net.cpp:165] Memory required for data: 957953500\nI0817 16:02:51.823184 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:02:51.823199 17550 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:02:51.823204 17550 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:02:51.823213 17550 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:02:51.823704 17550 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:02:51.823717 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.823722 17550 net.cpp:165] Memory required for data: 962049500\nI0817 16:02:51.823731 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:02:51.823742 17550 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:02:51.823750 17550 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:02:51.823757 17550 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:02:51.824038 17550 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:02:51.824051 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.824056 17550 net.cpp:165] Memory required for data: 966145500\nI0817 16:02:51.824067 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:02:51.824081 17550 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:02:51.824089 17550 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:02:51.824095 17550 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:02:51.824154 17550 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:02:51.824312 17550 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:02:51.824326 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.824331 17550 net.cpp:165] Memory required for data: 970241500\nI0817 16:02:51.824339 17550 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:02:51.824350 17550 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:02:51.824363 17550 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:02:51.824371 17550 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:02:51.824381 17550 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:02:51.824411 17550 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:02:51.824422 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.824427 17550 net.cpp:165] Memory required for data: 974337500\nI0817 16:02:51.824432 17550 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:02:51.824453 17550 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:02:51.824460 17550 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:02:51.824467 17550 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:02:51.824476 17550 net.cpp:150] Setting up L2_b3_relu\nI0817 16:02:51.824483 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.824488 17550 net.cpp:165] Memory required for data: 978433500\nI0817 16:02:51.824493 17550 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:51.824503 17550 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:51.824509 17550 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:02:51.824517 17550 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:02:51.824527 17550 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:02:51.824579 17550 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:51.824590 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.824597 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.824602 17550 net.cpp:165] Memory required for data: 986625500\nI0817 16:02:51.824606 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:02:51.824617 17550 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:02:51.824623 17550 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:02:51.824635 17550 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:02:51.825139 17550 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:02:51.825155 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.825160 17550 net.cpp:165] Memory required for data: 990721500\nI0817 16:02:51.825167 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:02:51.825177 17550 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:02:51.825183 17550 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:02:51.825196 17550 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:02:51.825562 17550 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:02:51.825583 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.825592 17550 net.cpp:165] Memory required for data: 994817500\nI0817 16:02:51.825613 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:02:51.825630 17550 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:02:51.825640 17550 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:02:51.825652 17550 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.825716 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:02:51.825883 17550 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:02:51.825896 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.825901 17550 net.cpp:165] Memory required for data: 998913500\nI0817 16:02:51.825911 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:02:51.825920 17550 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:02:51.825927 17550 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:02:51.825934 17550 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.825953 17550 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:02:51.825960 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.825965 17550 net.cpp:165] Memory required for data: 1003009500\nI0817 16:02:51.825978 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:02:51.825994 17550 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:02:51.825999 17550 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:02:51.826010 17550 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:02:51.826514 17550 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:02:51.826529 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.826534 17550 net.cpp:165] Memory required for data: 1007105500\nI0817 16:02:51.826541 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:02:51.826551 17550 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:02:51.826557 17550 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:02:51.826565 17550 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:02:51.826835 17550 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:02:51.826848 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.826853 17550 net.cpp:165] Memory required for data: 1011201500\nI0817 16:02:51.826864 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:02:51.826872 17550 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:02:51.826879 17550 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:02:51.826890 17550 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:02:51.826956 17550 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:02:51.827122 17550 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:02:51.827136 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.827141 17550 net.cpp:165] Memory required for data: 1015297500\nI0817 16:02:51.827149 17550 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:02:51.827158 17550 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:02:51.827164 17550 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:02:51.827172 17550 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:02:51.827183 17550 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:02:51.827210 17550 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:02:51.827224 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.827229 17550 net.cpp:165] Memory required for data: 1019393500\nI0817 16:02:51.827240 17550 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:02:51.827247 17550 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:02:51.827253 17550 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:02:51.827260 17550 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:02:51.827270 17550 net.cpp:150] Setting up L2_b4_relu\nI0817 16:02:51.827276 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.827281 17550 net.cpp:165] Memory required for data: 1023489500\nI0817 16:02:51.827286 17550 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:51.827296 17550 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:51.827301 17550 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:02:51.827308 17550 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:02:51.827318 17550 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:02:51.827368 17550 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:51.827380 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.827388 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.827391 17550 net.cpp:165] Memory required for data: 1031681500\nI0817 16:02:51.827396 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:02:51.827407 17550 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:02:51.827414 17550 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:02:51.827425 17550 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:02:51.827944 17550 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:02:51.827958 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.827965 17550 net.cpp:165] Memory required for data: 1035777500\nI0817 16:02:51.827972 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:02:51.827982 17550 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:02:51.827988 17550 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:02:51.827999 17550 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:02:51.828271 17550 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:02:51.828284 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.828289 17550 net.cpp:165] Memory required for data: 1039873500\nI0817 16:02:51.828299 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:02:51.828310 17550 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:02:51.828316 17550 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:02:51.828325 17550 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.828382 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:02:51.828552 17550 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:02:51.828564 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.828569 17550 net.cpp:165] Memory required for data: 1043969500\nI0817 16:02:51.828578 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:02:51.828586 17550 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:02:51.828593 17550 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:02:51.828603 17550 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.828613 17550 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:02:51.828620 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.828624 17550 net.cpp:165] Memory required for data: 1048065500\nI0817 16:02:51.828629 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:02:51.828642 17550 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:02:51.828649 17550 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:02:51.828660 17550 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:02:51.829159 17550 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:02:51.829174 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.829180 17550 net.cpp:165] Memory required for data: 1052161500\nI0817 16:02:51.829187 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:02:51.829196 17550 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:02:51.829203 17550 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:02:51.829211 17550 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:02:51.829483 17550 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:02:51.829495 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.829500 17550 net.cpp:165] Memory required for data: 1056257500\nI0817 16:02:51.829510 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:02:51.829519 17550 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:02:51.829525 17550 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:02:51.829536 17550 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:02:51.829594 17550 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:02:51.829758 17550 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:02:51.829771 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.829777 17550 net.cpp:165] Memory required for data: 1060353500\nI0817 16:02:51.829784 17550 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:02:51.829794 17550 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:02:51.829800 17550 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:02:51.829807 17550 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:02:51.829818 17550 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:02:51.829846 17550 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:02:51.829864 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.829869 17550 net.cpp:165] Memory required for data: 1064449500\nI0817 16:02:51.829874 17550 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:02:51.829883 17550 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:02:51.829890 17550 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:02:51.829896 17550 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:02:51.829906 17550 net.cpp:150] Setting up L2_b5_relu\nI0817 16:02:51.829913 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.829918 17550 net.cpp:165] Memory required for data: 1068545500\nI0817 16:02:51.829922 17550 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:51.829932 17550 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:51.829944 17550 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:02:51.829952 17550 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:02:51.829962 17550 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:02:51.830013 17550 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:51.830027 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.830034 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.830039 17550 net.cpp:165] Memory required for data: 1076737500\nI0817 16:02:51.830044 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:02:51.830054 17550 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:02:51.830061 17550 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:02:51.830070 17550 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:02:51.830571 17550 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:02:51.830585 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.830590 17550 net.cpp:165] Memory required for data: 1080833500\nI0817 16:02:51.830598 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:02:51.830610 17550 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:02:51.830616 17550 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:02:51.830626 17550 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:02:51.830894 17550 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:02:51.830909 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.830914 17550 net.cpp:165] Memory required for data: 1084929500\nI0817 16:02:51.830924 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:02:51.830934 17550 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:02:51.830945 17550 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:02:51.830955 17550 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.831014 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:02:51.831176 17550 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:02:51.831189 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.831194 17550 net.cpp:165] Memory required for data: 1089025500\nI0817 16:02:51.831203 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:02:51.831212 17550 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:02:51.831218 17550 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:02:51.831228 17550 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.831238 17550 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:02:51.831244 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.831249 17550 net.cpp:165] Memory required for data: 1093121500\nI0817 16:02:51.831254 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:02:51.831267 17550 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:02:51.831274 17550 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:02:51.831281 17550 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:02:51.831784 17550 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:02:51.831797 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.831802 17550 net.cpp:165] Memory required for data: 1097217500\nI0817 16:02:51.831811 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:02:51.831825 17550 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:02:51.831830 17550 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:02:51.831838 17550 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:02:51.832115 17550 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:02:51.832129 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.832134 17550 net.cpp:165] Memory required for data: 1101313500\nI0817 16:02:51.832144 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:02:51.832154 17550 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:02:51.832159 17550 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:02:51.832170 17550 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:02:51.832228 17550 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:02:51.832386 17550 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:02:51.832399 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.832403 17550 net.cpp:165] Memory required for data: 1105409500\nI0817 16:02:51.832412 17550 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:02:51.832422 17550 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:02:51.832427 17550 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:02:51.832434 17550 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:02:51.832445 17550 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:02:51.832473 17550 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:02:51.832482 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.832486 17550 net.cpp:165] Memory required for data: 1109505500\nI0817 16:02:51.832491 17550 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:02:51.832502 17550 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:02:51.832509 17550 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:02:51.832515 17550 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:02:51.832525 17550 net.cpp:150] Setting up L2_b6_relu\nI0817 16:02:51.832531 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.832536 17550 net.cpp:165] Memory required for data: 1113601500\nI0817 16:02:51.832540 17550 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:51.832551 17550 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:51.832556 17550 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:02:51.832563 17550 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:02:51.832573 17550 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:02:51.832620 17550 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:51.832636 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.832643 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.832648 17550 net.cpp:165] Memory required for data: 1121793500\nI0817 16:02:51.832653 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:02:51.832664 17550 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:02:51.832670 17550 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:02:51.832679 17550 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:02:51.834188 17550 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:02:51.834206 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.834211 17550 net.cpp:165] Memory required for data: 1125889500\nI0817 16:02:51.834221 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:02:51.834241 17550 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:02:51.834249 17550 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:02:51.834259 17550 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:02:51.834533 17550 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:02:51.834547 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.834552 17550 net.cpp:165] Memory required for data: 1129985500\nI0817 16:02:51.834563 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:02:51.834570 17550 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:02:51.834578 17550 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:02:51.834584 17550 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.834646 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:02:51.834806 17550 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:02:51.834820 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.834825 17550 net.cpp:165] Memory required for data: 1134081500\nI0817 16:02:51.834833 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:02:51.834841 17550 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:02:51.834851 17550 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:02:51.834858 17550 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.834868 17550 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:02:51.834875 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.834880 17550 net.cpp:165] Memory required for data: 1138177500\nI0817 16:02:51.834885 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:02:51.834898 17550 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:02:51.834905 17550 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:02:51.834913 17550 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:02:51.835427 17550 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:02:51.835441 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.835446 17550 net.cpp:165] Memory required for data: 1142273500\nI0817 16:02:51.835455 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:02:51.835467 17550 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:02:51.835474 17550 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:02:51.835482 17550 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:02:51.835757 17550 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:02:51.835772 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.835777 17550 net.cpp:165] Memory required for data: 1146369500\nI0817 16:02:51.835788 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:02:51.835796 17550 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:02:51.835803 17550 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:02:51.835809 17550 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:02:51.835867 17550 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:02:51.836040 17550 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:02:51.836053 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.836058 17550 net.cpp:165] Memory required for data: 1150465500\nI0817 16:02:51.836067 17550 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:02:51.836076 17550 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:02:51.836082 17550 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:02:51.836089 17550 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:02:51.836100 17550 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:02:51.836129 17550 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:02:51.836138 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.836143 17550 net.cpp:165] Memory required for data: 1154561500\nI0817 16:02:51.836148 17550 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:02:51.836159 17550 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:02:51.836171 17550 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:02:51.836179 17550 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:02:51.836189 17550 net.cpp:150] Setting up L2_b7_relu\nI0817 16:02:51.836196 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.836200 17550 net.cpp:165] Memory required for data: 1158657500\nI0817 16:02:51.836205 17550 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:51.836212 17550 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:51.836217 17550 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:02:51.836225 17550 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:02:51.836235 17550 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:02:51.836287 17550 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:51.836299 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.836307 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.836310 17550 net.cpp:165] Memory required for data: 1166849500\nI0817 16:02:51.836315 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:02:51.836329 17550 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:02:51.836336 17550 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:02:51.836345 17550 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:02:51.836833 17550 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:02:51.836846 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.836851 17550 net.cpp:165] Memory required for data: 1170945500\nI0817 16:02:51.836860 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:02:51.836872 17550 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:02:51.836879 17550 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:02:51.836887 17550 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:02:51.837167 17550 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:02:51.837183 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.837188 17550 net.cpp:165] Memory required for data: 1175041500\nI0817 16:02:51.837199 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:02:51.837208 17550 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:02:51.837214 17550 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:02:51.837221 17550 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.837281 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:02:51.837443 17550 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:02:51.837456 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.837460 17550 net.cpp:165] Memory required for data: 1179137500\nI0817 16:02:51.837469 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:02:51.837477 17550 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:02:51.837483 17550 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:02:51.837493 17550 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.837503 17550 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:02:51.837510 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.837515 17550 net.cpp:165] Memory required for data: 1183233500\nI0817 16:02:51.837520 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:02:51.837533 17550 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:02:51.837539 17550 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:02:51.837548 17550 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:02:51.838052 17550 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:02:51.838065 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.838070 17550 net.cpp:165] Memory required for data: 1187329500\nI0817 16:02:51.838079 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:02:51.838098 17550 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:02:51.838104 17550 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:02:51.838114 17550 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:02:51.838392 17550 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:02:51.838405 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.838410 17550 net.cpp:165] Memory required for data: 1191425500\nI0817 16:02:51.838420 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:02:51.838433 17550 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:02:51.838438 17550 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:02:51.838446 17550 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:02:51.838506 17550 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:02:51.838668 17550 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:02:51.838680 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.838685 17550 net.cpp:165] Memory required for data: 1195521500\nI0817 16:02:51.838695 17550 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:02:51.838706 17550 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:02:51.838713 17550 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:02:51.838719 17550 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:02:51.838732 17550 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:02:51.838760 17550 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:02:51.838773 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.838776 17550 net.cpp:165] Memory required for data: 1199617500\nI0817 16:02:51.838783 17550 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:02:51.838789 17550 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:02:51.838795 17550 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:02:51.838805 17550 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:02:51.838815 17550 net.cpp:150] Setting up L2_b8_relu\nI0817 16:02:51.838822 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.838826 17550 net.cpp:165] Memory required for data: 1203713500\nI0817 16:02:51.838831 17550 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:51.838838 17550 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:51.838843 17550 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:02:51.838851 17550 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:02:51.838873 17550 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:02:51.838932 17550 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:51.838950 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.838956 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.838961 17550 net.cpp:165] Memory required for data: 1211905500\nI0817 16:02:51.838969 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:02:51.838981 17550 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:02:51.838987 17550 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:02:51.839000 17550 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:02:51.839498 17550 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:02:51.839512 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.839517 17550 net.cpp:165] Memory required for data: 1216001500\nI0817 16:02:51.839526 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:02:51.839535 17550 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:02:51.839541 17550 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:02:51.839552 17550 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:02:51.839834 17550 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:02:51.839854 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.839859 17550 net.cpp:165] Memory required for data: 1220097500\nI0817 16:02:51.839869 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:02:51.839880 17550 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:02:51.839887 17550 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:02:51.839895 17550 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.839962 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:02:51.840127 17550 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:02:51.840139 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.840144 17550 net.cpp:165] Memory required for data: 1224193500\nI0817 16:02:51.840153 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:02:51.840164 17550 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:02:51.840171 17550 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:02:51.840178 17550 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.840191 17550 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:02:51.840198 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.840204 17550 net.cpp:165] Memory required for data: 1228289500\nI0817 16:02:51.840209 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:02:51.840219 17550 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:02:51.840224 17550 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:02:51.840235 17550 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:02:51.841742 17550 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:02:51.841758 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.841763 17550 net.cpp:165] Memory required for data: 1232385500\nI0817 16:02:51.841773 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:02:51.841786 17550 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:02:51.841794 17550 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:02:51.841804 17550 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:02:51.842084 17550 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:02:51.842097 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.842103 17550 net.cpp:165] Memory required for data: 1236481500\nI0817 16:02:51.842149 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:02:51.842164 17550 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:02:51.842170 17550 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:02:51.842178 17550 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:02:51.842242 17550 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:02:51.842396 17550 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:02:51.842409 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.842414 17550 net.cpp:165] Memory required for data: 1240577500\nI0817 16:02:51.842422 17550 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:02:51.842432 17550 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:02:51.842438 17550 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:02:51.842448 17550 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:02:51.842456 17550 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:02:51.842484 17550 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:02:51.842496 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.842501 17550 net.cpp:165] Memory required for data: 1244673500\nI0817 16:02:51.842506 17550 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:02:51.842514 17550 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:02:51.842520 17550 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:02:51.842527 17550 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:02:51.842536 17550 net.cpp:150] Setting up L2_b9_relu\nI0817 16:02:51.842543 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.842555 17550 net.cpp:165] Memory required for data: 1248769500\nI0817 16:02:51.842561 17550 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:51.842571 17550 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:51.842577 17550 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:02:51.842586 17550 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:02:51.842594 17550 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:02:51.842649 17550 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:51.842661 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.842669 17550 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:51.842672 17550 net.cpp:165] Memory required for data: 1256961500\nI0817 16:02:51.842677 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:02:51.842689 17550 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:02:51.842695 17550 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:02:51.842707 17550 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:02:51.843217 17550 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:02:51.843232 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.843237 17550 net.cpp:165] Memory required for data: 1257985500\nI0817 16:02:51.843246 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:02:51.843255 17550 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:02:51.843262 17550 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:02:51.843273 17550 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:02:51.843555 17550 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:02:51.843571 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.843576 17550 net.cpp:165] Memory required for data: 1259009500\nI0817 16:02:51.843587 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:02:51.843596 17550 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:02:51.843603 17550 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:02:51.843611 17550 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.843669 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:02:51.843835 17550 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:02:51.843849 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.843854 17550 net.cpp:165] Memory required for data: 1260033500\nI0817 16:02:51.843863 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:02:51.843871 17550 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:02:51.843881 17550 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:02:51.843888 17550 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:02:51.843899 17550 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:02:51.843905 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.843909 17550 net.cpp:165] Memory required for data: 1261057500\nI0817 16:02:51.843914 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:02:51.843927 17550 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:02:51.843933 17550 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:02:51.843948 17550 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:02:51.844445 17550 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:02:51.844460 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.844465 17550 net.cpp:165] Memory required for data: 1262081500\nI0817 16:02:51.844473 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:02:51.844485 17550 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:02:51.844492 17550 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:02:51.844503 17550 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:02:51.844781 17550 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:02:51.844800 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.844805 17550 net.cpp:165] Memory required for data: 1263105500\nI0817 16:02:51.844816 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:02:51.844825 17550 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:02:51.844831 17550 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:02:51.844846 17550 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:02:51.844904 17550 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:02:51.845078 17550 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:02:51.845093 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.845098 17550 net.cpp:165] Memory required for data: 1264129500\nI0817 16:02:51.845106 17550 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:02:51.845115 17550 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:02:51.845121 17550 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:02:51.845134 17550 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:02:51.845172 17550 net.cpp:150] Setting up L3_b1_pool\nI0817 16:02:51.845183 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.845188 17550 net.cpp:165] Memory required for data: 1265153500\nI0817 16:02:51.845194 17550 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:02:51.845202 17550 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:02:51.845209 17550 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:02:51.845216 17550 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:02:51.845227 17550 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:02:51.845260 17550 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:02:51.845269 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.845274 17550 net.cpp:165] Memory required for data: 1266177500\nI0817 16:02:51.845279 17550 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:02:51.845286 17550 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:02:51.845293 17550 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:02:51.845299 17550 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:02:51.845309 17550 net.cpp:150] Setting up L3_b1_relu\nI0817 16:02:51.845315 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.845320 17550 net.cpp:165] Memory required for data: 1267201500\nI0817 16:02:51.845325 17550 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:02:51.845337 17550 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:02:51.845345 17550 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:02:51.846626 17550 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:02:51.846650 17550 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:51.846660 17550 net.cpp:165] Memory required for data: 1268225500\nI0817 16:02:51.846668 17550 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:02:51.846678 17550 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:02:51.846683 17550 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:02:51.846690 17550 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:02:51.846701 17550 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:02:51.846746 17550 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:02:51.846761 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.846766 17550 net.cpp:165] Memory required for data: 1270273500\nI0817 16:02:51.846772 17550 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:51.846779 17550 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:51.846786 17550 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:02:51.846793 17550 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:02:51.846802 17550 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:02:51.846861 17550 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:51.846873 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.846889 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.846894 17550 net.cpp:165] Memory required for data: 1274369500\nI0817 16:02:51.846899 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:02:51.846912 17550 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:02:51.846920 17550 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:02:51.846930 17550 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:02:51.847986 17550 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:02:51.848001 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.848006 17550 net.cpp:165] Memory required for data: 1276417500\nI0817 16:02:51.848016 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:02:51.848028 17550 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:02:51.848036 17550 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:02:51.848047 17550 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:02:51.848322 17550 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:02:51.848335 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.848340 17550 net.cpp:165] Memory required for data: 1278465500\nI0817 16:02:51.848351 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:02:51.848361 17550 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:02:51.848366 17550 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:02:51.848377 17550 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.848436 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:02:51.848600 17550 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:02:51.848613 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.848618 17550 net.cpp:165] Memory required for data: 1280513500\nI0817 16:02:51.848628 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:02:51.848639 17550 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:02:51.848644 17550 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:02:51.848652 17550 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:02:51.848662 17550 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:02:51.848670 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.848675 17550 net.cpp:165] Memory required for data: 1282561500\nI0817 16:02:51.848678 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:02:51.848692 17550 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:02:51.848698 17550 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:02:51.848709 17550 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:02:51.849763 17550 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:02:51.849777 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.849783 17550 net.cpp:165] Memory required for data: 1284609500\nI0817 16:02:51.849792 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:02:51.849802 17550 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:02:51.849807 17550 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:02:51.849818 17550 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:02:51.850101 17550 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:02:51.850118 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.850123 17550 net.cpp:165] Memory required for data: 1286657500\nI0817 16:02:51.850133 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:02:51.850142 17550 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:02:51.850149 17550 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:02:51.850157 17550 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:02:51.850217 17550 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:02:51.850384 17550 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:02:51.850397 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.850402 17550 net.cpp:165] Memory required for data: 1288705500\nI0817 16:02:51.850419 17550 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:02:51.850431 17550 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:02:51.850438 17550 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:02:51.850445 17550 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:02:51.850453 17550 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:02:51.850491 17550 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:02:51.850502 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.850507 17550 net.cpp:165] Memory required for data: 1290753500\nI0817 16:02:51.850512 17550 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:02:51.850519 17550 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:02:51.850525 17550 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:02:51.850533 17550 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:02:51.850541 17550 net.cpp:150] Setting up L3_b2_relu\nI0817 16:02:51.850548 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.850553 17550 net.cpp:165] Memory required for data: 1292801500\nI0817 16:02:51.850558 17550 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:51.850565 17550 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:51.850570 17550 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:02:51.850581 17550 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:02:51.850591 17550 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:02:51.850638 17550 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:51.850649 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.850656 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.850661 17550 net.cpp:165] Memory required for data: 1296897500\nI0817 16:02:51.850666 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:02:51.850680 17550 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:02:51.850687 17550 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:02:51.850697 17550 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:02:51.851744 17550 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:02:51.851759 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.851764 17550 net.cpp:165] Memory required for data: 1298945500\nI0817 16:02:51.851773 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:02:51.851785 17550 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:02:51.851793 17550 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:02:51.851800 17550 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:02:51.852077 17550 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:02:51.852090 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.852095 17550 net.cpp:165] Memory required for data: 1300993500\nI0817 16:02:51.852105 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:02:51.852118 17550 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:02:51.852124 17550 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:02:51.852131 17550 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.852190 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:02:51.852351 17550 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:02:51.852363 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.852368 17550 net.cpp:165] Memory required for data: 1303041500\nI0817 16:02:51.852377 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:02:51.852388 17550 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:02:51.852394 17550 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:02:51.852402 17550 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:02:51.852412 17550 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:02:51.852425 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.852432 17550 net.cpp:165] Memory required for data: 1305089500\nI0817 16:02:51.852435 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:02:51.852452 17550 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:02:51.852458 17550 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:02:51.852469 17550 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:02:51.853526 17550 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:02:51.853541 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.853546 17550 net.cpp:165] Memory required for data: 1307137500\nI0817 16:02:51.853555 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:02:51.853564 17550 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:02:51.853571 17550 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:02:51.853582 17550 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:02:51.853857 17550 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:02:51.853874 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.853880 17550 net.cpp:165] Memory required for data: 1309185500\nI0817 16:02:51.853890 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:02:51.853899 17550 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:02:51.853905 17550 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:02:51.853914 17550 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:02:51.853981 17550 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:02:51.854140 17550 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:02:51.854153 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.854158 17550 net.cpp:165] Memory required for data: 1311233500\nI0817 16:02:51.854166 17550 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:02:51.854178 17550 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:02:51.854185 17550 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:02:51.854192 17550 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:02:51.854200 17550 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:02:51.854238 17550 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:02:51.854249 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.854254 17550 net.cpp:165] Memory required for data: 1313281500\nI0817 16:02:51.854259 17550 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:02:51.854266 17550 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:02:51.854272 17550 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:02:51.854279 17550 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:02:51.854288 17550 net.cpp:150] Setting up L3_b3_relu\nI0817 16:02:51.854295 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.854300 17550 net.cpp:165] Memory required for data: 1315329500\nI0817 16:02:51.854305 17550 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:51.854311 17550 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:51.854316 17550 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:02:51.854327 17550 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:02:51.854337 17550 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:02:51.854385 17550 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:51.854396 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.854403 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.854408 17550 net.cpp:165] Memory required for data: 1319425500\nI0817 16:02:51.854413 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:02:51.854425 17550 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:02:51.854432 17550 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:02:51.854449 17550 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:02:51.855509 17550 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:02:51.855525 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.855530 17550 net.cpp:165] Memory required for data: 1321473500\nI0817 16:02:51.855538 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:02:51.855551 17550 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:02:51.855557 17550 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:02:51.855566 17550 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:02:51.855835 17550 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:02:51.855849 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.855854 17550 net.cpp:165] Memory required for data: 1323521500\nI0817 16:02:51.855864 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:02:51.855875 17550 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:02:51.855881 17550 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:02:51.855890 17550 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.855960 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:02:51.856124 17550 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:02:51.856137 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.856142 17550 net.cpp:165] Memory required for data: 1325569500\nI0817 16:02:51.856151 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:02:51.856163 17550 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:02:51.856169 17550 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:02:51.856176 17550 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:02:51.856186 17550 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:02:51.856194 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.856197 17550 net.cpp:165] Memory required for data: 1327617500\nI0817 16:02:51.856202 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:02:51.856216 17550 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:02:51.856222 17550 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:02:51.856233 17550 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:02:51.858263 17550 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:02:51.858280 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.858285 17550 net.cpp:165] Memory required for data: 1329665500\nI0817 16:02:51.858294 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:02:51.858307 17550 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:02:51.858314 17550 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:02:51.858324 17550 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:02:51.858603 17550 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:02:51.858615 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.858620 17550 net.cpp:165] Memory required for data: 1331713500\nI0817 16:02:51.858630 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:02:51.858642 17550 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:02:51.858649 17550 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:02:51.858656 17550 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:02:51.858719 17550 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:02:51.858885 17550 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:02:51.858896 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.858901 17550 net.cpp:165] Memory required for data: 1333761500\nI0817 16:02:51.858911 17550 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:02:51.858922 17550 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:02:51.858929 17550 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:02:51.858937 17550 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:02:51.858952 17550 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:02:51.858997 17550 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:02:51.859011 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.859016 17550 net.cpp:165] Memory required for data: 1335809500\nI0817 16:02:51.859021 17550 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:02:51.859030 17550 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:02:51.859035 17550 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:02:51.859045 17550 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:02:51.859055 17550 net.cpp:150] Setting up L3_b4_relu\nI0817 16:02:51.859062 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.859067 17550 net.cpp:165] Memory required for data: 1337857500\nI0817 16:02:51.859071 17550 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:51.859079 17550 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:51.859084 17550 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:02:51.859091 17550 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:02:51.859100 17550 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:02:51.859151 17550 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:51.859163 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.859169 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.859174 17550 net.cpp:165] Memory required for data: 1341953500\nI0817 16:02:51.859179 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:02:51.859194 17550 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:02:51.859200 17550 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:02:51.859210 17550 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:02:51.860249 17550 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:02:51.860263 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.860270 17550 net.cpp:165] Memory required for data: 1344001500\nI0817 16:02:51.860278 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:02:51.860291 17550 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:02:51.860297 17550 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:02:51.860306 17550 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:02:51.860582 17550 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:02:51.860595 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.860600 17550 net.cpp:165] Memory required for data: 1346049500\nI0817 16:02:51.860610 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:02:51.860620 17550 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:02:51.860625 17550 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:02:51.860633 17550 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.860695 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:02:51.860862 17550 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:02:51.860874 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.860879 17550 net.cpp:165] Memory required for data: 1348097500\nI0817 16:02:51.860888 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:02:51.860896 17550 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:02:51.860903 17550 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:02:51.860910 17550 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:02:51.860924 17550 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:02:51.860931 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.860942 17550 net.cpp:165] Memory required for data: 1350145500\nI0817 16:02:51.860947 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:02:51.860967 17550 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:02:51.860973 17550 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:02:51.860991 17550 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:02:51.862027 17550 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:02:51.862041 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.862046 17550 net.cpp:165] Memory required for data: 1352193500\nI0817 16:02:51.862056 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:02:51.862067 17550 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:02:51.862074 17550 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:02:51.862082 17550 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:02:51.862349 17550 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:02:51.862362 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.862367 17550 net.cpp:165] Memory required for data: 1354241500\nI0817 16:02:51.862377 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:02:51.862390 17550 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:02:51.862397 17550 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:02:51.862406 17550 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:02:51.862467 17550 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:02:51.862628 17550 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:02:51.862643 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.862646 17550 net.cpp:165] Memory required for data: 1356289500\nI0817 16:02:51.862655 17550 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:02:51.862668 17550 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:02:51.862674 17550 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:02:51.862681 17550 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:02:51.862691 17550 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:02:51.862725 17550 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:02:51.862737 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.862741 17550 net.cpp:165] Memory required for data: 1358337500\nI0817 16:02:51.862747 17550 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:02:51.862757 17550 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:02:51.862764 17550 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:02:51.862771 17550 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:02:51.862781 17550 net.cpp:150] Setting up L3_b5_relu\nI0817 16:02:51.862787 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.862792 17550 net.cpp:165] Memory required for data: 1360385500\nI0817 16:02:51.862797 17550 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:51.862803 17550 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:51.862809 17550 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:02:51.862817 17550 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:02:51.862826 17550 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:02:51.862879 17550 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:51.862890 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.862896 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.862901 17550 net.cpp:165] Memory required for data: 1364481500\nI0817 16:02:51.862906 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:02:51.862920 17550 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:02:51.862926 17550 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:02:51.862936 17550 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:02:51.863973 17550 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:02:51.863988 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.863993 17550 net.cpp:165] Memory required for data: 1366529500\nI0817 16:02:51.864001 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:02:51.864020 17550 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:02:51.864027 17550 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:02:51.864037 17550 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:02:51.864313 17550 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:02:51.864326 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.864331 17550 net.cpp:165] Memory required for data: 1368577500\nI0817 16:02:51.864341 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:02:51.864351 17550 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:02:51.864356 17550 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:02:51.864364 17550 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.864426 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:02:51.864593 17550 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:02:51.864605 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.864610 17550 net.cpp:165] Memory required for data: 1370625500\nI0817 16:02:51.864619 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:02:51.864627 17550 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:02:51.864634 17550 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:02:51.864644 17550 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:02:51.864653 17550 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:02:51.864660 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.864665 17550 net.cpp:165] Memory required for data: 1372673500\nI0817 16:02:51.864670 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:02:51.864684 17550 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:02:51.864691 17550 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:02:51.864698 17550 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:02:51.865736 17550 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:02:51.865751 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.865756 17550 net.cpp:165] Memory required for data: 1374721500\nI0817 16:02:51.865764 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:02:51.865777 17550 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:02:51.865783 17550 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:02:51.865792 17550 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:02:51.866080 17550 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:02:51.866093 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.866098 17550 net.cpp:165] Memory required for data: 1376769500\nI0817 16:02:51.866109 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:02:51.866122 17550 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:02:51.866128 17550 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:02:51.866135 17550 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:02:51.866196 17550 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:02:51.866359 17550 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:02:51.866371 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.866376 17550 net.cpp:165] Memory required for data: 1378817500\nI0817 16:02:51.866385 17550 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:02:51.866397 17550 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:02:51.866405 17550 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:02:51.866411 17550 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:02:51.866421 17550 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:02:51.866456 17550 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:02:51.866467 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.866472 17550 net.cpp:165] Memory required for data: 1380865500\nI0817 16:02:51.866477 17550 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:02:51.866488 17550 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:02:51.866502 17550 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:02:51.866509 17550 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:02:51.866519 17550 net.cpp:150] Setting up L3_b6_relu\nI0817 16:02:51.866526 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.866530 17550 net.cpp:165] Memory required for data: 1382913500\nI0817 16:02:51.866535 17550 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:51.866542 17550 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:51.866547 17550 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:02:51.866555 17550 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:02:51.866564 17550 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:02:51.866618 17550 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:51.866631 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.866636 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.866641 17550 net.cpp:165] Memory required for data: 1387009500\nI0817 16:02:51.866647 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:02:51.866659 17550 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:02:51.866667 17550 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:02:51.866675 17550 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:02:51.867710 17550 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:02:51.867725 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.867730 17550 net.cpp:165] Memory required for data: 1389057500\nI0817 16:02:51.867739 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:02:51.867751 17550 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:02:51.867758 17550 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:02:51.867769 17550 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:02:51.868049 17550 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:02:51.868062 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.868067 17550 net.cpp:165] Memory required for data: 1391105500\nI0817 16:02:51.868077 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:02:51.868086 17550 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:02:51.868093 17550 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:02:51.868103 17550 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.868165 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:02:51.868333 17550 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:02:51.868346 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.868351 17550 net.cpp:165] Memory required for data: 1393153500\nI0817 16:02:51.868360 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:02:51.868394 17550 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:02:51.868403 17550 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:02:51.868412 17550 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:02:51.868422 17550 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:02:51.868429 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.868434 17550 net.cpp:165] Memory required for data: 1395201500\nI0817 16:02:51.868439 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:02:51.868453 17550 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:02:51.868460 17550 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:02:51.868468 17550 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:02:51.869510 17550 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:02:51.869524 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.869529 17550 net.cpp:165] Memory required for data: 1397249500\nI0817 16:02:51.869539 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:02:51.869559 17550 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:02:51.869566 17550 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:02:51.869575 17550 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:02:51.869849 17550 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:02:51.869863 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.869868 17550 net.cpp:165] Memory required for data: 1399297500\nI0817 16:02:51.869879 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:02:51.869887 17550 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:02:51.869894 17550 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:02:51.869901 17550 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:02:51.869973 17550 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:02:51.870138 17550 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:02:51.870151 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.870157 17550 net.cpp:165] Memory required for data: 1401345500\nI0817 16:02:51.870165 17550 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:02:51.870174 17550 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:02:51.870182 17550 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:02:51.870188 17550 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:02:51.870199 17550 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:02:51.870234 17550 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:02:51.870249 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.870254 17550 net.cpp:165] Memory required for data: 1403393500\nI0817 16:02:51.870260 17550 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:02:51.870266 17550 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:02:51.870272 17550 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:02:51.870280 17550 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:02:51.870290 17550 net.cpp:150] Setting up L3_b7_relu\nI0817 16:02:51.870296 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.870301 17550 net.cpp:165] Memory required for data: 1405441500\nI0817 16:02:51.870306 17550 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:51.870318 17550 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:51.870324 17550 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:02:51.870332 17550 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:02:51.870340 17550 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:02:51.870391 17550 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:51.870402 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.870409 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.870414 17550 net.cpp:165] Memory required for data: 1409537500\nI0817 16:02:51.870419 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:02:51.870429 17550 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:02:51.870436 17550 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:02:51.870448 17550 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:02:51.872485 17550 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:02:51.872503 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.872509 17550 net.cpp:165] Memory required for data: 1411585500\nI0817 16:02:51.872517 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:02:51.872530 17550 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:02:51.872537 17550 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:02:51.872548 17550 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:02:51.872825 17550 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:02:51.872838 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.872851 17550 net.cpp:165] Memory required for data: 1413633500\nI0817 16:02:51.872862 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:02:51.872871 17550 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:02:51.872879 17550 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:02:51.872889 17550 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.872957 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:02:51.873122 17550 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:02:51.873136 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.873141 17550 net.cpp:165] Memory required for data: 1415681500\nI0817 16:02:51.873149 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:02:51.873158 17550 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:02:51.873164 17550 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:02:51.873178 17550 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:02:51.873188 17550 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:02:51.873196 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.873200 17550 net.cpp:165] Memory required for data: 1417729500\nI0817 16:02:51.873205 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:02:51.873219 17550 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:02:51.873225 17550 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:02:51.873236 17550 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:02:51.874274 17550 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:02:51.874289 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.874294 17550 net.cpp:165] Memory required for data: 1419777500\nI0817 16:02:51.874303 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:02:51.874313 17550 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:02:51.874320 17550 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:02:51.874330 17550 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:02:51.874603 17550 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:02:51.874616 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.874621 17550 net.cpp:165] Memory required for data: 1421825500\nI0817 16:02:51.874631 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:02:51.874644 17550 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:02:51.874650 17550 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:02:51.874657 17550 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:02:51.874716 17550 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:02:51.874878 17550 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:02:51.874891 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.874897 17550 net.cpp:165] Memory required for data: 1423873500\nI0817 16:02:51.874905 17550 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:02:51.874915 17550 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:02:51.874922 17550 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:02:51.874928 17550 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:02:51.874946 17550 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:02:51.874982 17550 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:02:51.874994 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.875000 17550 net.cpp:165] Memory required for data: 1425921500\nI0817 16:02:51.875005 17550 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:02:51.875015 17550 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:02:51.875021 17550 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:02:51.875028 17550 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:02:51.875038 17550 net.cpp:150] Setting up L3_b8_relu\nI0817 16:02:51.875046 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.875049 17550 net.cpp:165] Memory required for data: 1427969500\nI0817 16:02:51.875066 17550 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:51.875073 17550 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:51.875079 17550 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:02:51.875087 17550 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:02:51.875097 17550 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:02:51.875147 17550 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:51.875159 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.875166 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.875170 17550 net.cpp:165] Memory required for data: 1432065500\nI0817 16:02:51.875175 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:02:51.875191 17550 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:02:51.875197 17550 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:02:51.875207 17550 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:02:51.876241 17550 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:02:51.876256 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.876261 17550 net.cpp:165] Memory required for data: 1434113500\nI0817 16:02:51.876269 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:02:51.876284 17550 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:02:51.876291 17550 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:02:51.876302 17550 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:02:51.876575 17550 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:02:51.876588 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.876593 17550 net.cpp:165] Memory required for data: 1436161500\nI0817 16:02:51.876605 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:02:51.876612 17550 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:02:51.876618 17550 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:02:51.876629 17550 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.876691 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:02:51.876852 17550 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:02:51.876865 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.876870 17550 net.cpp:165] Memory required for data: 1438209500\nI0817 16:02:51.876879 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:02:51.876890 17550 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:02:51.876898 17550 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:02:51.876904 17550 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:02:51.876914 17550 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:02:51.876921 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.876926 17550 net.cpp:165] Memory required for data: 1440257500\nI0817 16:02:51.876930 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:02:51.876950 17550 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:02:51.876958 17550 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:02:51.876969 17550 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:02:51.878005 17550 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:02:51.878018 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.878023 17550 net.cpp:165] Memory required for data: 1442305500\nI0817 16:02:51.878032 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:02:51.878041 17550 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:02:51.878048 17550 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:02:51.878062 17550 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:02:51.878334 17550 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:02:51.878350 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.878365 17550 net.cpp:165] Memory required for data: 1444353500\nI0817 16:02:51.878376 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:02:51.878386 17550 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:02:51.878391 17550 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:02:51.878399 17550 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:02:51.878458 17550 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:02:51.878619 17550 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:02:51.878633 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.878638 17550 net.cpp:165] Memory required for data: 1446401500\nI0817 16:02:51.878646 17550 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:02:51.878659 17550 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:02:51.878666 17550 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:02:51.878674 17550 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:02:51.878681 17550 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:02:51.878718 17550 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:02:51.878731 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.878736 17550 net.cpp:165] Memory required for data: 1448449500\nI0817 16:02:51.878741 17550 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:02:51.878747 17550 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:02:51.878753 17550 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:02:51.878760 17550 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:02:51.878770 17550 net.cpp:150] Setting up L3_b9_relu\nI0817 16:02:51.878777 17550 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:51.878782 17550 net.cpp:165] Memory required for data: 1450497500\nI0817 16:02:51.878787 17550 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:02:51.878794 17550 net.cpp:100] Creating Layer post_pool\nI0817 16:02:51.878800 17550 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:02:51.878811 17550 net.cpp:408] post_pool -> post_pool\nI0817 16:02:51.878847 17550 net.cpp:150] Setting up post_pool\nI0817 16:02:51.878856 17550 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:02:51.878861 17550 net.cpp:165] Memory required for data: 1450529500\nI0817 16:02:51.878866 17550 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:02:51.878876 17550 net.cpp:100] Creating Layer post_FC\nI0817 16:02:51.878882 17550 net.cpp:434] post_FC <- post_pool\nI0817 16:02:51.878895 17550 net.cpp:408] post_FC -> post_FC_top\nI0817 16:02:51.879070 17550 net.cpp:150] Setting up post_FC\nI0817 16:02:51.879083 17550 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:51.879088 17550 net.cpp:165] Memory required for data: 1450534500\nI0817 16:02:51.879097 17550 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:02:51.879106 17550 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:02:51.879112 17550 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:02:51.879122 17550 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:02:51.879132 17550 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:02:51.879186 17550 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:02:51.879197 17550 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:51.879204 17550 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:51.879209 17550 net.cpp:165] Memory required for data: 1450544500\nI0817 16:02:51.879214 17550 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:02:51.879221 17550 net.cpp:100] Creating Layer accuracy\nI0817 16:02:51.879227 17550 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:02:51.879235 17550 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:02:51.879241 17550 net.cpp:408] accuracy -> accuracy\nI0817 16:02:51.879253 17550 net.cpp:150] Setting up accuracy\nI0817 16:02:51.879261 17550 net.cpp:157] Top shape: (1)\nI0817 16:02:51.879272 17550 net.cpp:165] Memory required for data: 1450544504\nI0817 16:02:51.879277 17550 layer_factory.hpp:77] Creating layer loss\nI0817 16:02:51.879286 17550 net.cpp:100] Creating Layer loss\nI0817 16:02:51.879292 17550 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:02:51.879297 17550 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:02:51.879307 17550 net.cpp:408] loss -> loss\nI0817 16:02:51.879320 17550 layer_factory.hpp:77] Creating layer loss\nI0817 16:02:51.879444 17550 net.cpp:150] Setting up loss\nI0817 16:02:51.879456 17550 net.cpp:157] Top shape: (1)\nI0817 16:02:51.879462 17550 net.cpp:160]     with loss weight 1\nI0817 16:02:51.879478 17550 net.cpp:165] Memory required for data: 1450544508\nI0817 16:02:51.879484 17550 net.cpp:226] loss needs backward computation.\nI0817 16:02:51.879490 17550 net.cpp:228] accuracy does not need backward computation.\nI0817 16:02:51.879496 17550 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:02:51.879503 17550 net.cpp:226] post_FC needs backward computation.\nI0817 16:02:51.879508 17550 net.cpp:226] post_pool needs backward computation.\nI0817 16:02:51.879513 17550 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:02:51.879518 17550 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:02:51.879523 17550 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:02:51.879528 17550 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:02:51.879532 17550 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:02:51.879537 17550 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:02:51.879542 17550 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:02:51.879547 17550 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:02:51.879552 17550 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:02:51.879557 17550 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:02:51.879562 17550 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:02:51.879567 17550 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:02:51.879572 17550 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:02:51.879577 17550 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:02:51.879582 17550 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:02:51.879587 17550 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:02:51.879592 17550 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:02:51.879597 17550 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:02:51.879602 17550 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:02:51.879607 17550 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:02:51.879612 17550 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:02:51.879617 17550 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:02:51.879626 17550 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:02:51.879631 17550 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:02:51.879637 17550 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:02:51.879642 17550 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:02:51.879647 17550 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:02:51.879652 17550 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:02:51.879657 17550 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:02:51.879662 17550 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:02:51.879667 17550 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:02:51.879672 17550 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:02:51.879678 17550 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:02:51.879683 17550 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:02:51.879693 17550 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:02:51.879699 17550 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:02:51.879704 17550 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:02:51.879709 17550 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:02:51.879714 17550 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:02:51.879719 17550 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:02:51.879724 17550 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:02:51.879729 17550 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:02:51.879735 17550 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:02:51.879740 17550 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:02:51.879745 17550 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:02:51.879750 17550 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:02:51.879755 17550 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:02:51.879760 17550 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:02:51.879765 17550 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:02:51.879771 17550 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:02:51.879776 17550 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:02:51.879781 17550 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:02:51.879787 17550 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:02:51.879792 17550 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:02:51.879797 17550 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:02:51.879802 17550 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:02:51.879807 17550 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:02:51.879812 17550 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:02:51.879817 17550 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:02:51.879823 17550 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:02:51.879828 17550 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:02:51.879833 17550 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:02:51.879838 17550 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:02:51.879843 17550 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:02:51.879849 17550 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:02:51.879854 17550 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:02:51.879859 17550 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:02:51.879863 17550 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:02:51.879869 17550 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:02:51.879874 17550 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:02:51.879880 17550 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:02:51.879885 17550 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:02:51.879890 17550 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:02:51.879895 17550 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:02:51.879901 17550 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:02:51.879909 17550 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:02:51.879914 17550 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:02:51.879920 17550 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:02:51.879925 17550 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:02:51.879930 17550 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:02:51.879936 17550 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:02:51.879956 17550 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:02:51.879961 17550 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:02:51.879966 17550 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:02:51.879972 17550 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:02:51.879977 17550 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:02:51.879982 17550 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:02:51.879988 17550 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:02:51.879993 17550 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:02:51.879998 17550 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:02:51.880003 17550 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:02:51.880009 17550 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:02:51.880014 17550 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:02:51.880019 17550 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:02:51.880024 17550 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:02:51.880030 17550 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:02:51.880035 17550 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:02:51.880041 17550 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:02:51.880046 17550 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:02:51.880051 17550 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:02:51.880056 17550 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:02:51.880062 17550 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:02:51.880067 17550 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:02:51.880074 17550 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:02:51.880079 17550 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:02:51.880084 17550 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:02:51.880089 17550 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:02:51.880095 17550 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:02:51.880100 17550 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:02:51.880105 17550 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:02:51.880110 17550 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:02:51.880115 17550 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:02:51.880121 17550 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:02:51.880126 17550 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:02:51.880132 17550 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:02:51.880138 17550 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:02:51.880143 17550 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:02:51.880148 17550 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:02:51.880154 17550 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:02:51.880159 17550 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:02:51.880164 17550 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:02:51.880169 17550 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:02:51.880175 17550 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:02:51.880180 17550 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:02:51.880187 17550 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:02:51.880192 17550 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:02:51.880198 17550 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:02:51.880203 17550 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:02:51.880208 17550 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:02:51.880218 17550 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:02:51.880223 17550 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:02:51.880229 17550 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:02:51.880234 17550 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:02:51.880240 17550 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:02:51.880245 17550 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:02:51.880251 17550 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:02:51.880256 17550 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:02:51.880262 17550 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:02:51.880267 17550 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:02:51.880272 17550 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:02:51.880277 17550 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:02:51.880283 17550 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:02:51.880293 17550 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:02:51.880300 17550 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:02:51.880304 17550 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:02:51.880311 17550 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:02:51.880316 17550 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:02:51.880321 17550 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:02:51.880327 17550 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:02:51.880332 17550 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:02:51.880338 17550 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:02:51.880343 17550 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:02:51.880349 17550 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:02:51.880354 17550 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:02:51.880360 17550 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:02:51.880367 17550 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:02:51.880372 17550 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:02:51.880378 17550 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:02:51.880383 17550 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:02:51.880388 17550 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:02:51.880393 17550 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:02:51.880398 17550 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:02:51.880404 17550 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:02:51.880410 17550 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:02:51.880415 17550 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:02:51.880421 17550 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:02:51.880426 17550 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:02:51.880432 17550 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:02:51.880439 17550 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:02:51.880444 17550 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:02:51.880448 17550 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:02:51.880455 17550 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:02:51.880460 17550 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:02:51.880465 17550 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:02:51.880472 17550 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:02:51.880477 17550 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:02:51.880487 17550 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:02:51.880494 17550 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:02:51.880499 17550 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:02:51.880506 17550 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:02:51.880511 17550 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:02:51.880517 17550 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:02:51.880522 17550 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:02:51.880528 17550 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:02:51.880533 17550 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:02:51.880539 17550 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:02:51.880545 17550 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:02:51.880550 17550 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:02:51.880556 17550 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:02:51.880561 17550 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:02:51.880568 17550 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:02:51.880573 17550 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:02:51.880578 17550 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:02:51.880584 17550 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:02:51.880589 17550 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:02:51.880594 17550 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:02:51.880600 17550 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:02:51.880606 17550 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:02:51.880612 17550 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:02:51.880617 17550 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:02:51.880623 17550 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:02:51.880630 17550 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:02:51.880635 17550 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:02:51.880640 17550 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:02:51.880645 17550 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:02:51.880651 17550 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:02:51.880657 17550 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:02:51.880662 17550 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:02:51.880668 17550 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:02:51.880674 17550 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:02:51.880679 17550 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:02:51.880686 17550 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:02:51.880690 17550 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:02:51.880697 17550 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:02:51.880702 17550 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:02:51.880707 17550 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:02:51.880713 17550 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:02:51.880718 17550 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:02:51.880725 17550 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:02:51.880730 17550 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:02:51.880736 17550 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:02:51.880741 17550 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:02:51.880748 17550 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:02:51.880758 17550 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:02:51.880764 17550 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:02:51.880769 17550 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:02:51.880775 17550 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:02:51.880781 17550 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:02:51.880787 17550 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:02:51.880794 17550 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:02:51.880798 17550 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:02:51.880805 17550 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:02:51.880810 17550 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:02:51.880815 17550 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:02:51.880821 17550 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:02:51.880827 17550 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:02:51.880833 17550 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:02:51.880838 17550 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:02:51.880846 17550 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:02:51.880851 17550 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:02:51.880856 17550 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:02:51.880862 17550 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:02:51.880867 17550 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:02:51.880873 17550 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:02:51.880878 17550 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:02:51.880884 17550 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:02:51.880890 17550 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:02:51.880897 17550 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:02:51.880903 17550 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:02:51.880908 17550 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:02:51.880914 17550 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:02:51.880919 17550 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:02:51.880925 17550 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:02:51.880930 17550 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:02:51.880941 17550 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:02:51.880949 17550 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:02:51.880954 17550 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:02:51.880960 17550 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:02:51.880967 17550 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:02:51.880972 17550 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:02:51.880978 17550 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:02:51.880987 17550 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:02:51.880993 17550 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:02:51.881000 17550 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:02:51.881006 17550 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:02:51.881011 17550 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:02:51.881017 17550 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:02:51.881023 17550 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:02:51.881029 17550 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:02:51.881036 17550 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:02:51.881047 17550 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:02:51.881054 17550 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:02:51.881059 17550 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:02:51.881065 17550 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:02:51.881072 17550 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:02:51.881078 17550 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:02:51.881083 17550 net.cpp:226] pre_relu needs backward computation.\nI0817 16:02:51.881088 17550 net.cpp:226] pre_scale needs backward computation.\nI0817 16:02:51.881093 17550 net.cpp:226] pre_bn needs backward computation.\nI0817 16:02:51.881099 17550 net.cpp:226] pre_conv needs backward computation.\nI0817 16:02:51.881106 17550 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:02:51.881114 17550 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:02:51.881117 17550 net.cpp:270] This network produces output accuracy\nI0817 16:02:51.881124 17550 net.cpp:270] This network produces output loss\nI0817 16:02:51.881450 17550 net.cpp:283] Network initialization done.\nI0817 16:02:51.882447 17550 solver.cpp:60] Solver scaffolding done.\nI0817 16:02:52.107537 17550 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:02:52.468231 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:52.468315 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:52.475468 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:52.695858 17550 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:52.695974 17550 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:52.730353 17550 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:52.730468 17550 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:53.184435 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:53.184487 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:53.192353 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:53.438977 17550 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:53.439088 17550 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:53.490857 17550 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:53.490969 17550 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:54.004320 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:54.004372 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:54.013233 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:54.286218 17550 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:54.286348 17550 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:54.357522 17550 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:54.357652 17550 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:54.441594 17550 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:02:54.916570 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:54.916623 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:54.926154 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:55.228592 17550 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:55.228811 17550 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:55.319595 17550 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:55.319772 17550 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:55.952842 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:55.952916 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:55.963263 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:56.283182 17550 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:56.283397 17550 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:56.395391 17550 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:56.395597 17550 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:57.095948 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:57.096010 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:57.107014 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:57.445842 17550 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:57.446090 17550 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:57.577662 17550 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:57.577898 17550 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:58.355334 17550 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:58.355412 17550 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:58.368863 17550 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:58.410606 17556 blocking_queue.cpp:50] Waiting for data\nI0817 16:02:58.464435 17565 blocking_queue.cpp:50] Waiting for data\nI0817 16:02:58.800778 17550 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:58.801070 17550 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:58.952679 17550 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:58.952950 17550 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:59.124590 17550 parallel.cpp:425] Starting Optimization\nI0817 16:02:59.126509 17550 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:02:59.126530 17550 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:02:59.130903 17550 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:04:18.529222 17550 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:04:18.529506 17550 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:04:22.542944 17550 solver.cpp:228] Iteration 0, loss = 4.328\nI0817 16:04:22.542984 17550 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0817 16:04:22.543001 17550 solver.cpp:244]     Train net output #1: loss = 4.328 (* 1 = 4.328 loss)\nI0817 16:04:22.604670 17550 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0817 16:06:38.543889 17550 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:07:57.759919 17550 solver.cpp:404]     Test net output #0: accuracy = 0.21488\nI0817 16:07:57.760133 17550 solver.cpp:404]     Test net output #1: loss = 2.15539 (* 1 = 2.15539 loss)\nI0817 16:07:59.055047 17550 solver.cpp:228] Iteration 100, loss = 1.90421\nI0817 16:07:59.055088 17550 solver.cpp:244]     Train net output #0: accuracy = 0.272\nI0817 16:07:59.055104 17550 solver.cpp:244]     Train net output #1: loss = 1.90421 (* 1 = 1.90421 loss)\nI0817 16:07:59.162585 17550 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0817 16:10:15.448798 17550 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:11:34.707803 17550 solver.cpp:404]     Test net output #0: accuracy = 0.39836\nI0817 16:11:34.708057 17550 solver.cpp:404]     Test net output #1: loss = 1.59503 (* 1 = 1.59503 loss)\nI0817 16:11:36.004034 17550 solver.cpp:228] Iteration 200, loss = 1.49856\nI0817 16:11:36.004076 17550 solver.cpp:244]     Train net output #0: accuracy = 0.416\nI0817 16:11:36.004091 17550 solver.cpp:244]     Train net output #1: loss = 1.49856 (* 1 = 1.49856 loss)\nI0817 16:11:36.109378 17550 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0817 16:13:52.202064 17550 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:15:11.500473 17550 solver.cpp:404]     Test net output #0: accuracy = 0.49056\nI0817 16:15:11.500731 17550 solver.cpp:404]     Test net output #1: loss = 1.43277 (* 1 = 1.43277 loss)\nI0817 16:15:12.796511 17550 solver.cpp:228] Iteration 300, loss = 1.16427\nI0817 16:15:12.796552 17550 solver.cpp:244]     Train net output #0: accuracy = 0.608\nI0817 16:15:12.796568 17550 solver.cpp:244]     Train net output #1: loss = 1.16427 (* 1 = 1.16427 loss)\nI0817 16:15:12.901424 17550 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0817 16:17:29.158596 17550 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:18:48.426842 17550 solver.cpp:404]     Test net output #0: accuracy = 0.56324\nI0817 16:18:48.427099 17550 solver.cpp:404]     Test net output #1: loss = 1.41045 (* 1 = 1.41045 loss)\nI0817 16:18:49.723040 17550 solver.cpp:228] Iteration 400, loss = 0.832359\nI0817 16:18:49.723083 17550 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 16:18:49.723098 17550 solver.cpp:244]     Train net output #1: loss = 0.832359 (* 1 = 0.832359 loss)\nI0817 16:18:49.829357 17550 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0817 16:21:05.982477 17550 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:22:25.250708 17550 solver.cpp:404]     Test net output #0: accuracy = 0.6658\nI0817 16:22:25.250972 17550 solver.cpp:404]     Test net output #1: loss = 1.0527 (* 1 = 1.0527 loss)\nI0817 16:22:26.546646 17550 solver.cpp:228] Iteration 500, loss = 0.74426\nI0817 16:22:26.546685 17550 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 16:22:26.546700 17550 solver.cpp:244]     Train net output #1: loss = 0.74426 (* 1 = 0.74426 loss)\nI0817 16:22:26.651034 17550 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0817 16:24:42.739855 17550 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:26:02.001731 17550 solver.cpp:404]     Test net output #0: accuracy = 0.73128\nI0817 16:26:02.001983 17550 solver.cpp:404]     Test net output #1: loss = 0.807122 (* 1 = 0.807122 loss)\nI0817 16:26:03.297708 17550 solver.cpp:228] Iteration 600, loss = 0.572253\nI0817 16:26:03.297749 17550 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 16:26:03.297765 17550 solver.cpp:244]     Train net output #1: loss = 0.572253 (* 1 = 0.572253 loss)\nI0817 16:26:03.400477 17550 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0817 16:28:19.476117 17550 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:29:38.741566 17550 solver.cpp:404]     Test net output #0: accuracy = 0.75152\nI0817 16:29:38.741803 17550 solver.cpp:404]     Test net output #1: loss = 0.759734 (* 1 = 0.759734 loss)\nI0817 16:29:40.037418 17550 solver.cpp:228] Iteration 700, loss = 0.519497\nI0817 16:29:40.037461 17550 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 16:29:40.037477 17550 solver.cpp:244]     Train net output #1: loss = 0.519497 (* 1 = 0.519497 loss)\nI0817 16:29:40.144093 17550 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0817 16:31:56.273069 17550 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:33:15.514528 17550 solver.cpp:404]     Test net output #0: accuracy = 0.74088\nI0817 16:33:15.514786 17550 solver.cpp:404]     Test net output #1: loss = 0.80794 (* 1 = 0.80794 loss)\nI0817 16:33:16.810151 17550 solver.cpp:228] Iteration 800, loss = 0.584998\nI0817 16:33:16.810192 17550 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 16:33:16.810209 17550 solver.cpp:244]     Train net output #1: loss = 0.584998 (* 1 = 0.584998 loss)\nI0817 16:33:16.914577 17550 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0817 16:35:33.092196 17550 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:36:52.331403 17550 solver.cpp:404]     Test net output #0: accuracy = 0.73992\nI0817 16:36:52.331646 17550 solver.cpp:404]     Test net output #1: loss = 0.837011 (* 1 = 0.837011 loss)\nI0817 16:36:53.627372 17550 solver.cpp:228] Iteration 900, loss = 0.39595\nI0817 16:36:53.627413 17550 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 16:36:53.627429 17550 solver.cpp:244]     Train net output #1: loss = 0.39595 (* 1 = 0.39595 loss)\nI0817 16:36:53.735702 17550 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0817 16:39:09.943569 17550 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:40:29.168404 17550 solver.cpp:404]     Test net output #0: accuracy = 0.77324\nI0817 16:40:29.168637 17550 solver.cpp:404]     Test net output #1: loss = 0.73508 (* 1 = 0.73508 loss)\nI0817 16:40:30.463542 17550 solver.cpp:228] Iteration 1000, loss = 0.331968\nI0817 16:40:30.463584 17550 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 16:40:30.463599 17550 solver.cpp:244]     Train net output #1: loss = 0.331968 (* 1 = 0.331968 loss)\nI0817 16:40:30.569860 17550 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0817 16:42:46.610572 17550 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:44:05.835876 17550 solver.cpp:404]     Test net output #0: accuracy = 0.77528\nI0817 16:44:05.836115 17550 solver.cpp:404]     Test net output #1: loss = 0.724737 (* 1 = 0.724737 loss)\nI0817 16:44:07.132519 17550 solver.cpp:228] Iteration 1100, loss = 0.302828\nI0817 16:44:07.132558 17550 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 16:44:07.132575 17550 solver.cpp:244]     Train net output #1: loss = 0.302828 (* 1 = 0.302828 loss)\nI0817 16:44:07.240643 17550 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0817 16:46:23.514380 17550 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:47:42.709537 17550 solver.cpp:404]     Test net output #0: accuracy = 0.70016\nI0817 16:47:42.709787 17550 solver.cpp:404]     Test net output #1: loss = 1.05348 (* 1 = 1.05348 loss)\nI0817 16:47:44.005794 17550 solver.cpp:228] Iteration 1200, loss = 0.243298\nI0817 16:47:44.005834 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 16:47:44.005851 17550 solver.cpp:244]     Train net output #1: loss = 0.243298 (* 1 = 0.243298 loss)\nI0817 16:47:44.116003 17550 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0817 16:50:00.266695 17550 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 16:51:19.474261 17550 solver.cpp:404]     Test net output #0: accuracy = 0.78576\nI0817 16:51:19.474509 17550 solver.cpp:404]     Test net output #1: loss = 0.674913 (* 1 = 0.674913 loss)\nI0817 16:51:20.770069 17550 solver.cpp:228] Iteration 1300, loss = 0.299847\nI0817 16:51:20.770108 17550 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 16:51:20.770123 17550 solver.cpp:244]     Train net output #1: loss = 0.299847 (* 1 = 0.299847 loss)\nI0817 16:51:20.878896 17550 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0817 16:53:37.062969 17550 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 16:54:56.272586 17550 solver.cpp:404]     Test net output #0: accuracy = 0.77592\nI0817 16:54:56.272821 17550 solver.cpp:404]     Test net output #1: loss = 0.746969 (* 1 = 0.746969 loss)\nI0817 16:54:57.568086 17550 solver.cpp:228] Iteration 1400, loss = 0.215567\nI0817 16:54:57.568125 17550 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 16:54:57.568141 17550 solver.cpp:244]     Train net output #1: loss = 0.215567 (* 1 = 0.215567 loss)\nI0817 16:54:57.669657 17550 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0817 16:57:13.795236 17550 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 16:58:33.000932 17550 solver.cpp:404]     Test net output #0: accuracy = 0.7504\nI0817 16:58:33.001157 17550 solver.cpp:404]     Test net output #1: loss = 0.898782 (* 1 = 0.898782 loss)\nI0817 16:58:34.297158 17550 solver.cpp:228] Iteration 1500, loss = 0.383072\nI0817 16:58:34.297199 17550 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 16:58:34.297214 17550 solver.cpp:244]     Train net output #1: loss = 0.383072 (* 1 = 0.383072 loss)\nI0817 16:58:34.410221 17550 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0817 17:00:50.573071 17550 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:02:09.812053 17550 solver.cpp:404]     Test net output #0: accuracy = 0.76836\nI0817 17:02:09.812314 17550 solver.cpp:404]     Test net output #1: loss = 0.85454 (* 1 = 0.85454 loss)\nI0817 17:02:11.108186 17550 solver.cpp:228] Iteration 1600, loss = 0.239666\nI0817 17:02:11.108224 17550 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:02:11.108240 17550 solver.cpp:244]     Train net output #1: loss = 0.239666 (* 1 = 0.239666 loss)\nI0817 17:02:11.212538 17550 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0817 17:04:27.256711 17550 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:05:46.493762 17550 solver.cpp:404]     Test net output #0: accuracy = 0.77404\nI0817 17:05:46.494010 17550 solver.cpp:404]     Test net output #1: loss = 0.832404 (* 1 = 0.832404 loss)\nI0817 17:05:47.789861 17550 solver.cpp:228] Iteration 1700, loss = 0.232719\nI0817 17:05:47.789901 17550 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:05:47.789918 17550 solver.cpp:244]     Train net output #1: loss = 0.232719 (* 1 = 0.232719 loss)\nI0817 17:05:47.893460 17550 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0817 17:08:04.131605 17550 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:09:23.362634 17550 solver.cpp:404]     Test net output #0: accuracy = 0.79356\nI0817 17:09:23.362890 17550 solver.cpp:404]     Test net output #1: loss = 0.744617 (* 1 = 0.744617 loss)\nI0817 17:09:24.659149 17550 solver.cpp:228] Iteration 1800, loss = 0.259407\nI0817 17:09:24.659193 17550 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:09:24.659209 17550 solver.cpp:244]     Train net output #1: loss = 0.259407 (* 1 = 0.259407 loss)\nI0817 17:09:24.765426 17550 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0817 17:11:40.988533 17550 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:13:00.193508 17550 solver.cpp:404]     Test net output #0: accuracy = 0.75208\nI0817 17:13:00.193765 17550 solver.cpp:404]     Test net output #1: loss = 0.930149 (* 1 = 0.930149 loss)\nI0817 17:13:01.489665 17550 solver.cpp:228] Iteration 1900, loss = 0.220252\nI0817 17:13:01.489707 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:13:01.489722 17550 solver.cpp:244]     Train net output #1: loss = 0.220252 (* 1 = 0.220252 loss)\nI0817 17:13:01.590005 17550 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0817 17:15:17.633919 17550 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:16:36.837407 17550 solver.cpp:404]     Test net output #0: accuracy = 0.72024\nI0817 17:16:36.837668 17550 solver.cpp:404]     Test net output #1: loss = 1.03199 (* 1 = 1.03199 loss)\nI0817 17:16:38.134377 17550 solver.cpp:228] Iteration 2000, loss = 0.246496\nI0817 17:16:38.134419 17550 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:16:38.134434 17550 solver.cpp:244]     Train net output #1: loss = 0.246496 (* 1 = 0.246496 loss)\nI0817 17:16:38.238831 17550 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0817 17:18:54.225044 17550 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:20:13.432845 17550 solver.cpp:404]     Test net output #0: accuracy = 0.59616\nI0817 17:20:13.433111 17550 solver.cpp:404]     Test net output #1: loss = 2.11936 (* 1 = 2.11936 loss)\nI0817 17:20:14.729074 17550 solver.cpp:228] Iteration 2100, loss = 0.265511\nI0817 17:20:14.729115 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:20:14.729130 17550 solver.cpp:244]     Train net output #1: loss = 0.265511 (* 1 = 0.265511 loss)\nI0817 17:20:14.839445 17550 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0817 17:22:31.005122 17550 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:23:50.213690 17550 solver.cpp:404]     Test net output #0: accuracy = 0.79544\nI0817 17:23:50.213950 17550 solver.cpp:404]     Test net output #1: loss = 0.668015 (* 1 = 0.668015 loss)\nI0817 17:23:51.509951 17550 solver.cpp:228] Iteration 2200, loss = 0.149383\nI0817 17:23:51.509999 17550 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:23:51.510015 17550 solver.cpp:244]     Train net output #1: loss = 0.149383 (* 1 = 0.149383 loss)\nI0817 17:23:51.611524 17550 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0817 17:26:08.034189 17550 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:27:27.240474 17550 solver.cpp:404]     Test net output #0: accuracy = 0.751\nI0817 17:27:27.240734 17550 solver.cpp:404]     Test net output #1: loss = 0.880678 (* 1 = 0.880678 loss)\nI0817 17:27:28.536448 17550 solver.cpp:228] Iteration 2300, loss = 0.255625\nI0817 17:27:28.536490 17550 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:27:28.536506 17550 solver.cpp:244]     Train net output #1: loss = 0.255625 (* 1 = 0.255625 loss)\nI0817 17:27:28.646054 17550 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0817 17:29:45.107554 17550 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:31:04.347379 17550 solver.cpp:404]     Test net output #0: accuracy = 0.69396\nI0817 17:31:04.347635 17550 solver.cpp:404]     Test net output #1: loss = 1.21683 (* 1 = 1.21683 loss)\nI0817 17:31:05.643376 17550 solver.cpp:228] Iteration 2400, loss = 0.305993\nI0817 17:31:05.643419 17550 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:31:05.643435 17550 solver.cpp:244]     Train net output #1: loss = 0.305993 (* 1 = 0.305993 loss)\nI0817 17:31:05.746829 17550 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0817 17:33:21.940798 17550 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:34:41.177649 17550 solver.cpp:404]     Test net output #0: accuracy = 0.78356\nI0817 17:34:41.177907 17550 solver.cpp:404]     Test net output #1: loss = 0.715806 (* 1 = 0.715806 loss)\nI0817 17:34:42.473757 17550 solver.cpp:228] Iteration 2500, loss = 0.241807\nI0817 17:34:42.473800 17550 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:34:42.473816 17550 solver.cpp:244]     Train net output #1: loss = 0.241807 (* 1 = 0.241807 loss)\nI0817 17:34:42.580117 17550 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0817 17:36:59.065030 17550 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:38:18.279816 17550 solver.cpp:404]     Test net output #0: accuracy = 0.78612\nI0817 17:38:18.280053 17550 solver.cpp:404]     Test net output #1: loss = 0.771067 (* 1 = 0.771067 loss)\nI0817 17:38:19.576510 17550 solver.cpp:228] Iteration 2600, loss = 0.228788\nI0817 17:38:19.576555 17550 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:38:19.576570 17550 solver.cpp:244]     Train net output #1: loss = 0.228788 (* 1 = 0.228788 loss)\nI0817 17:38:19.680876 17550 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0817 17:40:36.157438 17550 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:41:55.391191 17550 solver.cpp:404]     Test net output #0: accuracy = 0.782\nI0817 17:41:55.391448 17550 solver.cpp:404]     Test net output #1: loss = 0.779361 (* 1 = 0.779361 loss)\nI0817 17:41:56.686846 17550 solver.cpp:228] Iteration 2700, loss = 0.255467\nI0817 17:41:56.686887 17550 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:41:56.686902 17550 solver.cpp:244]     Train net output #1: loss = 0.255467 (* 1 = 0.255467 loss)\nI0817 17:41:56.787431 17550 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0817 17:44:12.835914 17550 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:45:32.025843 17550 solver.cpp:404]     Test net output #0: accuracy = 0.76948\nI0817 17:45:32.026113 17550 solver.cpp:404]     Test net output #1: loss = 0.81994 (* 1 = 0.81994 loss)\nI0817 17:45:33.320952 17550 solver.cpp:228] Iteration 2800, loss = 0.227404\nI0817 17:45:33.320997 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:45:33.321014 17550 solver.cpp:244]     Train net output #1: loss = 0.227404 (* 1 = 0.227404 loss)\nI0817 17:45:33.434005 17550 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0817 17:47:49.890902 17550 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:49:09.081755 17550 solver.cpp:404]     Test net output #0: accuracy = 0.6864\nI0817 17:49:09.082015 17550 solver.cpp:404]     Test net output #1: loss = 1.3752 (* 1 = 1.3752 loss)\nI0817 17:49:10.377089 17550 solver.cpp:228] Iteration 2900, loss = 0.202077\nI0817 17:49:10.377132 17550 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:49:10.377148 17550 solver.cpp:244]     Train net output #1: loss = 0.202077 (* 1 = 0.202077 loss)\nI0817 17:49:10.486104 17550 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0817 17:51:26.607842 17550 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 17:52:45.821101 17550 solver.cpp:404]     Test net output #0: accuracy = 0.75792\nI0817 17:52:45.821357 17550 solver.cpp:404]     Test net output #1: loss = 0.974053 (* 1 = 0.974053 loss)\nI0817 17:52:47.117103 17550 solver.cpp:228] Iteration 3000, loss = 0.35354\nI0817 17:52:47.117146 17550 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:52:47.117162 17550 solver.cpp:244]     Train net output #1: loss = 0.353541 (* 1 = 0.353541 loss)\nI0817 17:52:47.226640 17550 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0817 17:55:03.721396 17550 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 17:56:22.946117 17550 solver.cpp:404]     Test net output #0: accuracy = 0.7718\nI0817 17:56:22.946374 17550 solver.cpp:404]     Test net output #1: loss = 0.770692 (* 1 = 0.770692 loss)\nI0817 17:56:24.241693 17550 solver.cpp:228] Iteration 3100, loss = 0.29309\nI0817 17:56:24.241735 17550 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:56:24.241751 17550 solver.cpp:244]     Train net output #1: loss = 0.29309 (* 1 = 0.29309 loss)\nI0817 17:56:24.345803 17550 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0817 17:58:40.811030 17550 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:00:00.040074 17550 solver.cpp:404]     Test net output #0: accuracy = 0.7826\nI0817 18:00:00.040334 17550 solver.cpp:404]     Test net output #1: loss = 0.693655 (* 1 = 0.693655 loss)\nI0817 18:00:01.335698 17550 solver.cpp:228] Iteration 3200, loss = 0.229777\nI0817 18:00:01.335738 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:00:01.335753 17550 solver.cpp:244]     Train net output #1: loss = 0.229777 (* 1 = 0.229777 loss)\nI0817 18:00:01.440701 17550 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0817 18:02:17.960917 17550 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:03:37.185461 17550 solver.cpp:404]     Test net output #0: accuracy = 0.56728\nI0817 18:03:37.185729 17550 solver.cpp:404]     Test net output #1: loss = 2.49743 (* 1 = 2.49743 loss)\nI0817 18:03:38.482117 17550 solver.cpp:228] Iteration 3300, loss = 0.301569\nI0817 18:03:38.482157 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:03:38.482173 17550 solver.cpp:244]     Train net output #1: loss = 0.301569 (* 1 = 0.301569 loss)\nI0817 18:03:38.582753 17550 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0817 18:05:54.997922 17550 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:07:14.225772 17550 solver.cpp:404]     Test net output #0: accuracy = 0.6942\nI0817 18:07:14.226017 17550 solver.cpp:404]     Test net output #1: loss = 1.06162 (* 1 = 1.06162 loss)\nI0817 18:07:15.522009 17550 solver.cpp:228] Iteration 3400, loss = 0.269571\nI0817 18:07:15.522048 17550 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:07:15.522063 17550 solver.cpp:244]     Train net output #1: loss = 0.269571 (* 1 = 0.269571 loss)\nI0817 18:07:15.624840 17550 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0817 18:09:32.057222 17550 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:10:51.291012 17550 solver.cpp:404]     Test net output #0: accuracy = 0.6834\nI0817 18:10:51.291249 17550 solver.cpp:404]     Test net output #1: loss = 1.05432 (* 1 = 1.05432 loss)\nI0817 18:10:52.586705 17550 solver.cpp:228] Iteration 3500, loss = 0.252588\nI0817 18:10:52.586745 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:10:52.586760 17550 solver.cpp:244]     Train net output #1: loss = 0.252588 (* 1 = 0.252588 loss)\nI0817 18:10:52.688526 17550 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0817 18:13:08.734474 17550 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:14:28.099512 17550 solver.cpp:404]     Test net output #0: accuracy = 0.6866\nI0817 18:14:28.099782 17550 solver.cpp:404]     Test net output #1: loss = 1.19811 (* 1 = 1.19811 loss)\nI0817 18:14:29.395593 17550 solver.cpp:228] Iteration 3600, loss = 0.24641\nI0817 18:14:29.395637 17550 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:14:29.395659 17550 solver.cpp:244]     Train net output #1: loss = 0.24641 (* 1 = 0.24641 loss)\nI0817 18:14:29.503597 17550 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0817 18:16:45.946228 17550 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:18:05.300601 17550 solver.cpp:404]     Test net output #0: accuracy = 0.74716\nI0817 18:18:05.300863 17550 solver.cpp:404]     Test net output #1: loss = 0.84334 (* 1 = 0.84334 loss)\nI0817 18:18:06.597270 17550 solver.cpp:228] Iteration 3700, loss = 0.305775\nI0817 18:18:06.597316 17550 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:18:06.597339 17550 solver.cpp:244]     Train net output #1: loss = 0.305775 (* 1 = 0.305775 loss)\nI0817 18:18:06.706953 17550 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0817 18:20:22.812770 17550 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:21:42.082687 17550 solver.cpp:404]     Test net output #0: accuracy = 0.67328\nI0817 18:21:42.082955 17550 solver.cpp:404]     Test net output #1: loss = 1.37571 (* 1 = 1.37571 loss)\nI0817 18:21:43.380530 17550 solver.cpp:228] Iteration 3800, loss = 0.326161\nI0817 18:21:43.380573 17550 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:21:43.380589 17550 solver.cpp:244]     Train net output #1: loss = 0.326161 (* 1 = 0.326161 loss)\nI0817 18:21:43.477684 17550 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0817 18:23:59.605398 17550 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:25:18.955476 17550 solver.cpp:404]     Test net output #0: accuracy = 0.69252\nI0817 18:25:18.955740 17550 solver.cpp:404]     Test net output #1: loss = 1.23146 (* 1 = 1.23146 loss)\nI0817 18:25:20.251440 17550 solver.cpp:228] Iteration 3900, loss = 0.338426\nI0817 18:25:20.251485 17550 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:25:20.251510 17550 solver.cpp:244]     Train net output #1: loss = 0.338427 (* 1 = 0.338427 loss)\nI0817 18:25:20.367019 17550 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0817 18:27:36.709888 17550 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:28:56.047109 17550 solver.cpp:404]     Test net output #0: accuracy = 0.75168\nI0817 18:28:56.047370 17550 solver.cpp:404]     Test net output #1: loss = 0.944291 (* 1 = 0.944291 loss)\nI0817 18:28:57.343683 17550 solver.cpp:228] Iteration 4000, loss = 0.283363\nI0817 18:28:57.343729 17550 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:28:57.343755 17550 solver.cpp:244]     Train net output #1: loss = 0.283363 (* 1 = 0.283363 loss)\nI0817 18:28:57.452890 17550 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0817 18:31:13.754884 17550 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:32:33.069181 17550 solver.cpp:404]     Test net output #0: accuracy = 0.77944\nI0817 18:32:33.069449 17550 solver.cpp:404]     Test net output #1: loss = 0.724286 (* 1 = 0.724286 loss)\nI0817 18:32:34.365823 17550 solver.cpp:228] Iteration 4100, loss = 0.379216\nI0817 18:32:34.365869 17550 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:32:34.365893 17550 solver.cpp:244]     Train net output #1: loss = 0.379217 (* 1 = 0.379217 loss)\nI0817 18:32:34.469449 17550 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0817 18:34:50.472564 17550 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:36:09.839064 17550 solver.cpp:404]     Test net output #0: accuracy = 0.577\nI0817 18:36:09.839332 17550 solver.cpp:404]     Test net output #1: loss = 2.25313 (* 1 = 2.25313 loss)\nI0817 18:36:11.136188 17550 solver.cpp:228] Iteration 4200, loss = 0.335264\nI0817 18:36:11.136235 17550 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:36:11.136258 17550 solver.cpp:244]     Train net output #1: loss = 0.335264 (* 1 = 0.335264 loss)\nI0817 18:36:11.235678 17550 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0817 18:38:27.710671 17550 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:39:47.085834 17550 solver.cpp:404]     Test net output #0: accuracy = 0.7464\nI0817 18:39:47.086094 17550 solver.cpp:404]     Test net output #1: loss = 0.857794 (* 1 = 0.857794 loss)\nI0817 18:39:48.382235 17550 solver.cpp:228] Iteration 4300, loss = 0.396371\nI0817 18:39:48.382280 17550 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:39:48.382304 17550 solver.cpp:244]     Train net output #1: loss = 0.396371 (* 1 = 0.396371 loss)\nI0817 18:39:48.482223 17550 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0817 18:42:04.704540 17550 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:43:23.980262 17550 solver.cpp:404]     Test net output #0: accuracy = 0.72588\nI0817 18:43:23.980516 17550 solver.cpp:404]     Test net output #1: loss = 0.978441 (* 1 = 0.978441 loss)\nI0817 18:43:25.277590 17550 solver.cpp:228] Iteration 4400, loss = 0.202088\nI0817 18:43:25.277637 17550 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:43:25.277662 17550 solver.cpp:244]     Train net output #1: loss = 0.202088 (* 1 = 0.202088 loss)\nI0817 18:43:25.386888 17550 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0817 18:45:41.551069 17550 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 18:47:00.783357 17550 solver.cpp:404]     Test net output #0: accuracy = 0.6814\nI0817 18:47:00.783625 17550 solver.cpp:404]     Test net output #1: loss = 1.20484 (* 1 = 1.20484 loss)\nI0817 18:47:02.079413 17550 solver.cpp:228] Iteration 4500, loss = 0.308643\nI0817 18:47:02.079457 17550 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:47:02.079480 17550 solver.cpp:244]     Train net output #1: loss = 0.308643 (* 1 = 0.308643 loss)\nI0817 18:47:02.189970 17550 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0817 18:49:18.495569 17550 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 18:50:37.839592 17550 solver.cpp:404]     Test net output #0: accuracy = 0.7888\nI0817 18:50:37.839845 17550 solver.cpp:404]     Test net output #1: loss = 0.674492 (* 1 = 0.674492 loss)\nI0817 18:50:39.136078 17550 solver.cpp:228] Iteration 4600, loss = 0.419253\nI0817 18:50:39.136124 17550 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 18:50:39.136150 17550 solver.cpp:244]     Train net output #1: loss = 0.419253 (* 1 = 0.419253 loss)\nI0817 18:50:39.249469 17550 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0817 18:52:55.547039 17550 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 18:54:14.923509 17550 solver.cpp:404]     Test net output #0: accuracy = 0.66908\nI0817 18:54:14.923776 17550 solver.cpp:404]     Test net output #1: loss = 1.23021 (* 1 = 1.23021 loss)\nI0817 18:54:16.220957 17550 solver.cpp:228] Iteration 4700, loss = 0.316523\nI0817 18:54:16.221004 17550 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:54:16.221030 17550 solver.cpp:244]     Train net output #1: loss = 0.316523 (* 1 = 0.316523 loss)\nI0817 18:54:16.328918 17550 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0817 18:56:32.434136 17550 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 18:57:51.837090 17550 solver.cpp:404]     Test net output #0: accuracy = 0.69448\nI0817 18:57:51.837358 17550 solver.cpp:404]     Test net output #1: loss = 1.01192 (* 1 = 1.01192 loss)\nI0817 18:57:53.133921 17550 solver.cpp:228] Iteration 4800, loss = 0.3777\nI0817 18:57:53.133970 17550 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:57:53.133994 17550 solver.cpp:244]     Train net output #1: loss = 0.3777 (* 1 = 0.3777 loss)\nI0817 18:57:53.242543 17550 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0817 19:00:09.818565 17550 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:01:29.156033 17550 solver.cpp:404]     Test net output #0: accuracy = 0.61028\nI0817 19:01:29.156296 17550 solver.cpp:404]     Test net output #1: loss = 1.61371 (* 1 = 1.61371 loss)\nI0817 19:01:30.452160 17550 solver.cpp:228] Iteration 4900, loss = 0.320621\nI0817 19:01:30.452211 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:01:30.452234 17550 solver.cpp:244]     Train net output #1: loss = 0.320621 (* 1 = 0.320621 loss)\nI0817 19:01:30.554095 17550 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0817 19:03:46.652704 17550 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:05:05.992590 17550 solver.cpp:404]     Test net output #0: accuracy = 0.75092\nI0817 19:05:05.992844 17550 solver.cpp:404]     Test net output #1: loss = 0.809487 (* 1 = 0.809487 loss)\nI0817 19:05:07.289346 17550 solver.cpp:228] Iteration 5000, loss = 0.317774\nI0817 19:05:07.289392 17550 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:05:07.289418 17550 solver.cpp:244]     Train net output #1: loss = 0.317774 (* 1 = 0.317774 loss)\nI0817 19:05:07.400178 17550 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0817 19:07:23.681455 17550 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:08:43.021260 17550 solver.cpp:404]     Test net output #0: accuracy = 0.63496\nI0817 19:08:43.021523 17550 solver.cpp:404]     Test net output #1: loss = 1.32452 (* 1 = 1.32452 loss)\nI0817 19:08:44.318599 17550 solver.cpp:228] Iteration 5100, loss = 0.311928\nI0817 19:08:44.318646 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:08:44.318670 17550 solver.cpp:244]     Train net output #1: loss = 0.311928 (* 1 = 0.311928 loss)\nI0817 19:08:44.420974 17550 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0817 19:11:00.443533 17550 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:12:19.765044 17550 solver.cpp:404]     Test net output #0: accuracy = 0.71252\nI0817 19:12:19.765298 17550 solver.cpp:404]     Test net output #1: loss = 1.0708 (* 1 = 1.0708 loss)\nI0817 19:12:21.060520 17550 solver.cpp:228] Iteration 5200, loss = 0.366929\nI0817 19:12:21.060564 17550 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 19:12:21.060580 17550 solver.cpp:244]     Train net output #1: loss = 0.366929 (* 1 = 0.366929 loss)\nI0817 19:12:21.163229 17550 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0817 19:14:37.371754 17550 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:15:56.571108 17550 solver.cpp:404]     Test net output #0: accuracy = 0.73356\nI0817 19:15:56.571377 17550 solver.cpp:404]     Test net output #1: loss = 0.880117 (* 1 = 0.880117 loss)\nI0817 19:15:57.866681 17550 solver.cpp:228] Iteration 5300, loss = 0.366798\nI0817 19:15:57.866725 17550 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:15:57.866740 17550 solver.cpp:244]     Train net output #1: loss = 0.366799 (* 1 = 0.366799 loss)\nI0817 19:15:57.975499 17550 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0817 19:18:14.222594 17550 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:19:33.417960 17550 solver.cpp:404]     Test net output #0: accuracy = 0.62364\nI0817 19:19:33.418221 17550 solver.cpp:404]     Test net output #1: loss = 1.36195 (* 1 = 1.36195 loss)\nI0817 19:19:34.715075 17550 solver.cpp:228] Iteration 5400, loss = 0.305687\nI0817 19:19:34.715117 17550 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 19:19:34.715133 17550 solver.cpp:244]     Train net output #1: loss = 0.305687 (* 1 = 0.305687 loss)\nI0817 19:19:34.831324 17550 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0817 19:21:50.915918 17550 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:23:10.149698 17550 solver.cpp:404]     Test net output #0: accuracy = 0.65632\nI0817 19:23:10.149965 17550 solver.cpp:404]     Test net output #1: loss = 1.43155 (* 1 = 1.43155 loss)\nI0817 19:23:11.445279 17550 solver.cpp:228] Iteration 5500, loss = 0.314186\nI0817 19:23:11.445322 17550 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:23:11.445338 17550 solver.cpp:244]     Train net output #1: loss = 0.314186 (* 1 = 0.314186 loss)\nI0817 19:23:11.555691 17550 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0817 19:25:27.606525 17550 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:26:46.827862 17550 solver.cpp:404]     Test net output #0: accuracy = 0.75328\nI0817 19:26:46.828135 17550 solver.cpp:404]     Test net output #1: loss = 0.744091 (* 1 = 0.744091 loss)\nI0817 19:26:48.123376 17550 solver.cpp:228] Iteration 5600, loss = 0.192202\nI0817 19:26:48.123419 17550 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:26:48.123435 17550 solver.cpp:244]     Train net output #1: loss = 0.192203 (* 1 = 0.192203 loss)\nI0817 19:26:48.227440 17550 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0817 19:29:04.342351 17550 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:30:23.567473 17550 solver.cpp:404]     Test net output #0: accuracy = 0.6278\nI0817 19:30:23.567734 17550 solver.cpp:404]     Test net output #1: loss = 1.2907 (* 1 = 1.2907 loss)\nI0817 19:30:24.863445 17550 solver.cpp:228] Iteration 5700, loss = 0.329318\nI0817 19:30:24.863485 17550 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 19:30:24.863500 17550 solver.cpp:244]     Train net output #1: loss = 0.329318 (* 1 = 0.329318 loss)\nI0817 19:30:24.966203 17550 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0817 19:32:41.103426 17550 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:34:00.329198 17550 solver.cpp:404]     Test net output #0: accuracy = 0.73276\nI0817 19:34:00.329470 17550 solver.cpp:404]     Test net output #1: loss = 0.949433 (* 1 = 0.949433 loss)\nI0817 19:34:01.624900 17550 solver.cpp:228] Iteration 5800, loss = 0.190877\nI0817 19:34:01.624939 17550 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:34:01.624954 17550 solver.cpp:244]     Train net output #1: loss = 0.190877 (* 1 = 0.190877 loss)\nI0817 19:34:01.726954 17550 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0817 19:36:17.826177 17550 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:37:37.059635 17550 solver.cpp:404]     Test net output #0: accuracy = 0.66716\nI0817 19:37:37.059878 17550 solver.cpp:404]     Test net output #1: loss = 1.10005 (* 1 = 1.10005 loss)\nI0817 19:37:38.356751 17550 solver.cpp:228] Iteration 5900, loss = 0.335368\nI0817 19:37:38.356792 17550 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 19:37:38.356808 17550 solver.cpp:244]     Train net output #1: loss = 0.335368 (* 1 = 0.335368 loss)\nI0817 19:37:38.456550 17550 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0817 19:39:54.614511 17550 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:41:13.909781 17550 solver.cpp:404]     Test net output #0: accuracy = 0.75612\nI0817 19:41:13.910037 17550 solver.cpp:404]     Test net output #1: loss = 0.773674 (* 1 = 0.773674 loss)\nI0817 19:41:15.206382 17550 solver.cpp:228] Iteration 6000, loss = 0.263529\nI0817 19:41:15.206421 17550 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:41:15.206437 17550 solver.cpp:244]     Train net output #1: loss = 0.26353 (* 1 = 0.26353 loss)\nI0817 19:41:15.309926 17550 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0817 19:43:31.316736 17550 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:44:50.631165 17550 solver.cpp:404]     Test net output #0: accuracy = 0.71084\nI0817 19:44:50.631438 17550 solver.cpp:404]     Test net output #1: loss = 1.05902 (* 1 = 1.05902 loss)\nI0817 19:44:51.928303 17550 solver.cpp:228] Iteration 6100, loss = 0.257212\nI0817 19:44:51.928344 17550 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 19:44:51.928359 17550 solver.cpp:244]     Train net output #1: loss = 0.257212 (* 1 = 0.257212 loss)\nI0817 19:44:52.032501 17550 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0817 19:47:08.125154 17550 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 19:48:27.372714 17550 solver.cpp:404]     Test net output #0: accuracy = 0.63668\nI0817 19:48:27.372987 17550 solver.cpp:404]     Test net output #1: loss = 1.51686 (* 1 = 1.51686 loss)\nI0817 19:48:28.668557 17550 solver.cpp:228] Iteration 6200, loss = 0.283416\nI0817 19:48:28.668596 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:48:28.668612 17550 solver.cpp:244]     Train net output #1: loss = 0.283416 (* 1 = 0.283416 loss)\nI0817 19:48:28.773457 17550 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0817 19:50:44.839849 17550 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 19:52:04.092499 17550 solver.cpp:404]     Test net output #0: accuracy = 0.76048\nI0817 19:52:04.092741 17550 solver.cpp:404]     Test net output #1: loss = 0.803108 (* 1 = 0.803108 loss)\nI0817 19:52:05.388672 17550 solver.cpp:228] Iteration 6300, loss = 0.17363\nI0817 19:52:05.388712 17550 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:52:05.388727 17550 solver.cpp:244]     Train net output #1: loss = 0.173631 (* 1 = 0.173631 loss)\nI0817 19:52:05.493960 17550 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0817 19:54:21.562934 17550 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 19:55:40.767752 17550 solver.cpp:404]     Test net output #0: accuracy = 0.76496\nI0817 19:55:40.768013 17550 solver.cpp:404]     Test net output #1: loss = 0.758337 (* 1 = 0.758337 loss)\nI0817 19:55:42.064728 17550 solver.cpp:228] Iteration 6400, loss = 0.230323\nI0817 19:55:42.064770 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:55:42.064787 17550 solver.cpp:244]     Train net output #1: loss = 0.230323 (* 1 = 0.230323 loss)\nI0817 19:55:42.174804 17550 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0817 19:57:58.309979 17550 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 19:59:17.521234 17550 solver.cpp:404]     Test net output #0: accuracy = 0.73476\nI0817 19:59:17.521499 17550 solver.cpp:404]     Test net output #1: loss = 0.822474 (* 1 = 0.822474 loss)\nI0817 19:59:18.817780 17550 solver.cpp:228] Iteration 6500, loss = 0.217201\nI0817 19:59:18.817823 17550 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:59:18.817840 17550 solver.cpp:244]     Train net output #1: loss = 0.217202 (* 1 = 0.217202 loss)\nI0817 19:59:18.923027 17550 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0817 20:01:34.919001 17550 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:02:54.163162 17550 solver.cpp:404]     Test net output #0: accuracy = 0.78804\nI0817 20:02:54.163444 17550 solver.cpp:404]     Test net output #1: loss = 0.779794 (* 1 = 0.779794 loss)\nI0817 20:02:55.458927 17550 solver.cpp:228] Iteration 6600, loss = 0.200826\nI0817 20:02:55.458967 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:02:55.458984 17550 solver.cpp:244]     Train net output #1: loss = 0.200826 (* 1 = 0.200826 loss)\nI0817 20:02:55.553833 17550 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0817 20:05:11.673773 17550 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:06:30.958992 17550 solver.cpp:404]     Test net output #0: accuracy = 0.82312\nI0817 20:06:30.959259 17550 solver.cpp:404]     Test net output #1: loss = 0.597341 (* 1 = 0.597341 loss)\nI0817 20:06:32.255851 17550 solver.cpp:228] Iteration 6700, loss = 0.370896\nI0817 20:06:32.255892 17550 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 20:06:32.255908 17550 solver.cpp:244]     Train net output #1: loss = 0.370896 (* 1 = 0.370896 loss)\nI0817 20:06:32.364337 17550 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0817 20:08:48.503438 17550 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:10:07.730154 17550 solver.cpp:404]     Test net output #0: accuracy = 0.66676\nI0817 20:10:07.730432 17550 solver.cpp:404]     Test net output #1: loss = 1.15601 (* 1 = 1.15601 loss)\nI0817 20:10:09.026484 17550 solver.cpp:228] Iteration 6800, loss = 0.263201\nI0817 20:10:09.026523 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:10:09.026540 17550 solver.cpp:244]     Train net output #1: loss = 0.263201 (* 1 = 0.263201 loss)\nI0817 20:10:09.133752 17550 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0817 20:12:25.216717 17550 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:13:44.441095 17550 solver.cpp:404]     Test net output #0: accuracy = 0.63944\nI0817 20:13:44.441373 17550 solver.cpp:404]     Test net output #1: loss = 1.63949 (* 1 = 1.63949 loss)\nI0817 20:13:45.737462 17550 solver.cpp:228] Iteration 6900, loss = 0.185979\nI0817 20:13:45.737504 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:13:45.737519 17550 solver.cpp:244]     Train net output #1: loss = 0.18598 (* 1 = 0.18598 loss)\nI0817 20:13:45.847239 17550 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0817 20:16:01.894353 17550 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:17:21.218575 17550 solver.cpp:404]     Test net output #0: accuracy = 0.80832\nI0817 20:17:21.218842 17550 solver.cpp:404]     Test net output #1: loss = 0.623322 (* 1 = 0.623322 loss)\nI0817 20:17:22.514653 17550 solver.cpp:228] Iteration 7000, loss = 0.19515\nI0817 20:17:22.514693 17550 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:17:22.514708 17550 solver.cpp:244]     Train net output #1: loss = 0.19515 (* 1 = 0.19515 loss)\nI0817 20:17:22.619652 17550 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0817 20:19:38.682821 17550 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:20:58.009708 17550 solver.cpp:404]     Test net output #0: accuracy = 0.79148\nI0817 20:20:58.009960 17550 solver.cpp:404]     Test net output #1: loss = 0.705675 (* 1 = 0.705675 loss)\nI0817 20:20:59.305346 17550 solver.cpp:228] Iteration 7100, loss = 0.27206\nI0817 20:20:59.305387 17550 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 20:20:59.305402 17550 solver.cpp:244]     Train net output #1: loss = 0.27206 (* 1 = 0.27206 loss)\nI0817 20:20:59.407968 17550 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0817 20:23:15.435611 17550 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:24:34.797477 17550 solver.cpp:404]     Test net output #0: accuracy = 0.82096\nI0817 20:24:34.797742 17550 solver.cpp:404]     Test net output #1: loss = 0.611968 (* 1 = 0.611968 loss)\nI0817 20:24:36.094359 17550 solver.cpp:228] Iteration 7200, loss = 0.306417\nI0817 20:24:36.094400 17550 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 20:24:36.094416 17550 solver.cpp:244]     Train net output #1: loss = 0.306417 (* 1 = 0.306417 loss)\nI0817 20:24:36.203625 17550 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0817 20:26:52.306876 17550 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:28:11.646469 17550 solver.cpp:404]     Test net output #0: accuracy = 0.75584\nI0817 20:28:11.646728 17550 solver.cpp:404]     Test net output #1: loss = 0.947494 (* 1 = 0.947494 loss)\nI0817 20:28:12.941390 17550 solver.cpp:228] Iteration 7300, loss = 0.145859\nI0817 20:28:12.941429 17550 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:28:12.941445 17550 solver.cpp:244]     Train net output #1: loss = 0.14586 (* 1 = 0.14586 loss)\nI0817 20:28:13.044960 17550 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0817 20:30:29.046232 17550 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:31:48.266440 17550 solver.cpp:404]     Test net output #0: accuracy = 0.64244\nI0817 20:31:48.266680 17550 solver.cpp:404]     Test net output #1: loss = 1.92191 (* 1 = 1.92191 loss)\nI0817 20:31:49.562644 17550 solver.cpp:228] Iteration 7400, loss = 0.223724\nI0817 20:31:49.562682 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:31:49.562697 17550 solver.cpp:244]     Train net output #1: loss = 0.223724 (* 1 = 0.223724 loss)\nI0817 20:31:49.664373 17550 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0817 20:34:05.854771 17550 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:35:25.088975 17550 solver.cpp:404]     Test net output #0: accuracy = 0.81192\nI0817 20:35:25.089256 17550 solver.cpp:404]     Test net output #1: loss = 0.621509 (* 1 = 0.621509 loss)\nI0817 20:35:26.385669 17550 solver.cpp:228] Iteration 7500, loss = 0.215953\nI0817 20:35:26.385710 17550 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:35:26.385726 17550 solver.cpp:244]     Train net output #1: loss = 0.215953 (* 1 = 0.215953 loss)\nI0817 20:35:26.490321 17550 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0817 20:37:42.549849 17550 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:39:01.834911 17550 solver.cpp:404]     Test net output #0: accuracy = 0.76992\nI0817 20:39:01.835196 17550 solver.cpp:404]     Test net output #1: loss = 0.92355 (* 1 = 0.92355 loss)\nI0817 20:39:03.132091 17550 solver.cpp:228] Iteration 7600, loss = 0.153647\nI0817 20:39:03.132131 17550 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 20:39:03.132148 17550 solver.cpp:244]     Train net output #1: loss = 0.153647 (* 1 = 0.153647 loss)\nI0817 20:39:03.236863 17550 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0817 20:41:19.429564 17550 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:42:38.696995 17550 solver.cpp:404]     Test net output #0: accuracy = 0.72488\nI0817 20:42:38.697263 17550 solver.cpp:404]     Test net output #1: loss = 0.975702 (* 1 = 0.975702 loss)\nI0817 20:42:39.992918 17550 solver.cpp:228] Iteration 7700, loss = 0.227976\nI0817 20:42:39.992957 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:42:39.992972 17550 solver.cpp:244]     Train net output #1: loss = 0.227976 (* 1 = 0.227976 loss)\nI0817 20:42:40.098603 17550 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0817 20:44:56.270773 17550 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 20:46:15.518072 17550 solver.cpp:404]     Test net output #0: accuracy = 0.76968\nI0817 20:46:15.518345 17550 solver.cpp:404]     Test net output #1: loss = 0.899389 (* 1 = 0.899389 loss)\nI0817 20:46:16.813678 17550 solver.cpp:228] Iteration 7800, loss = 0.102739\nI0817 20:46:16.813719 17550 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:46:16.813735 17550 solver.cpp:244]     Train net output #1: loss = 0.102739 (* 1 = 0.102739 loss)\nI0817 20:46:16.913506 17550 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0817 20:48:32.958353 17550 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 20:49:52.197355 17550 solver.cpp:404]     Test net output #0: accuracy = 0.80552\nI0817 20:49:52.197614 17550 solver.cpp:404]     Test net output #1: loss = 0.743873 (* 1 = 0.743873 loss)\nI0817 20:49:53.492698 17550 solver.cpp:228] Iteration 7900, loss = 0.150274\nI0817 20:49:53.492738 17550 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:49:53.492753 17550 solver.cpp:244]     Train net output #1: loss = 0.150274 (* 1 = 0.150274 loss)\nI0817 20:49:53.599596 17550 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0817 20:52:09.652813 17550 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 20:53:28.990013 17550 solver.cpp:404]     Test net output #0: accuracy = 0.77372\nI0817 20:53:28.990281 17550 solver.cpp:404]     Test net output #1: loss = 0.855636 (* 1 = 0.855636 loss)\nI0817 20:53:30.286356 17550 solver.cpp:228] Iteration 8000, loss = 0.150834\nI0817 20:53:30.286396 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:53:30.286412 17550 solver.cpp:244]     Train net output #1: loss = 0.150834 (* 1 = 0.150834 loss)\nI0817 20:53:30.387531 17550 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0817 20:55:46.579170 17550 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 20:57:05.939451 17550 solver.cpp:404]     Test net output #0: accuracy = 0.8036\nI0817 20:57:05.939716 17550 solver.cpp:404]     Test net output #1: loss = 0.750828 (* 1 = 0.750828 loss)\nI0817 20:57:07.235044 17550 solver.cpp:228] Iteration 8100, loss = 0.242566\nI0817 20:57:07.235083 17550 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 20:57:07.235098 17550 solver.cpp:244]     Train net output #1: loss = 0.242566 (* 1 = 0.242566 loss)\nI0817 20:57:07.345155 17550 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0817 20:59:23.412688 17550 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:00:42.782325 17550 solver.cpp:404]     Test net output #0: accuracy = 0.84676\nI0817 21:00:42.782585 17550 solver.cpp:404]     Test net output #1: loss = 0.499943 (* 1 = 0.499943 loss)\nI0817 21:00:44.078172 17550 solver.cpp:228] Iteration 8200, loss = 0.0969617\nI0817 21:00:44.078220 17550 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:00:44.078245 17550 solver.cpp:244]     Train net output #1: loss = 0.0969619 (* 1 = 0.0969619 loss)\nI0817 21:00:44.180101 17550 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0817 21:03:00.275164 17550 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:04:19.546403 17550 solver.cpp:404]     Test net output #0: accuracy = 0.77144\nI0817 21:04:19.546653 17550 solver.cpp:404]     Test net output #1: loss = 0.878993 (* 1 = 0.878993 loss)\nI0817 21:04:20.842139 17550 solver.cpp:228] Iteration 8300, loss = 0.100883\nI0817 21:04:20.842182 17550 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:04:20.842212 17550 solver.cpp:244]     Train net output #1: loss = 0.100883 (* 1 = 0.100883 loss)\nI0817 21:04:20.949712 17550 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0817 21:06:36.900327 17550 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:07:56.228818 17550 solver.cpp:404]     Test net output #0: accuracy = 0.84648\nI0817 21:07:56.229068 17550 solver.cpp:404]     Test net output #1: loss = 0.621039 (* 1 = 0.621039 loss)\nI0817 21:07:57.525542 17550 solver.cpp:228] Iteration 8400, loss = 0.130066\nI0817 21:07:57.525586 17550 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:07:57.525611 17550 solver.cpp:244]     Train net output #1: loss = 0.130066 (* 1 = 0.130066 loss)\nI0817 21:07:57.625262 17550 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0817 21:10:13.776988 17550 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:11:32.997139 17550 solver.cpp:404]     Test net output #0: accuracy = 0.84228\nI0817 21:11:32.997385 17550 solver.cpp:404]     Test net output #1: loss = 0.55602 (* 1 = 0.55602 loss)\nI0817 21:11:34.292546 17550 solver.cpp:228] Iteration 8500, loss = 0.0573213\nI0817 21:11:34.292589 17550 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:11:34.292613 17550 solver.cpp:244]     Train net output #1: loss = 0.0573215 (* 1 = 0.0573215 loss)\nI0817 21:11:34.399005 17550 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0817 21:13:50.411872 17550 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:15:09.693645 17550 solver.cpp:404]     Test net output #0: accuracy = 0.8474\nI0817 21:15:09.693914 17550 solver.cpp:404]     Test net output #1: loss = 0.596656 (* 1 = 0.596656 loss)\nI0817 21:15:10.991149 17550 solver.cpp:228] Iteration 8600, loss = 0.0871383\nI0817 21:15:10.991194 17550 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:15:10.991219 17550 solver.cpp:244]     Train net output #1: loss = 0.0871385 (* 1 = 0.0871385 loss)\nI0817 21:15:11.092279 17550 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0817 21:17:27.153604 17550 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:18:46.502846 17550 solver.cpp:404]     Test net output #0: accuracy = 0.86708\nI0817 21:18:46.503118 17550 solver.cpp:404]     Test net output #1: loss = 0.484488 (* 1 = 0.484488 loss)\nI0817 21:18:47.799160 17550 solver.cpp:228] Iteration 8700, loss = 0.0518973\nI0817 21:18:47.799206 17550 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:18:47.799230 17550 solver.cpp:244]     Train net output #1: loss = 0.0518975 (* 1 = 0.0518975 loss)\nI0817 21:18:47.907392 17550 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0817 21:21:04.005930 17550 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:22:23.323211 17550 solver.cpp:404]     Test net output #0: accuracy = 0.85732\nI0817 21:22:23.323489 17550 solver.cpp:404]     Test net output #1: loss = 0.542622 (* 1 = 0.542622 loss)\nI0817 21:22:24.623421 17550 solver.cpp:228] Iteration 8800, loss = 0.0784891\nI0817 21:22:24.623463 17550 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:22:24.623479 17550 solver.cpp:244]     Train net output #1: loss = 0.0784893 (* 1 = 0.0784893 loss)\nI0817 21:22:24.718160 17550 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0817 21:24:40.862640 17550 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:26:00.108470 17550 solver.cpp:404]     Test net output #0: accuracy = 0.85912\nI0817 21:26:00.108727 17550 solver.cpp:404]     Test net output #1: loss = 0.535482 (* 1 = 0.535482 loss)\nI0817 21:26:01.404362 17550 solver.cpp:228] Iteration 8900, loss = 0.155663\nI0817 21:26:01.404407 17550 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 21:26:01.404429 17550 solver.cpp:244]     Train net output #1: loss = 0.155664 (* 1 = 0.155664 loss)\nI0817 21:26:01.509922 17550 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0817 21:28:17.740319 17550 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:29:36.946604 17550 solver.cpp:404]     Test net output #0: accuracy = 0.85644\nI0817 21:29:36.946879 17550 solver.cpp:404]     Test net output #1: loss = 0.567108 (* 1 = 0.567108 loss)\nI0817 21:29:38.242892 17550 solver.cpp:228] Iteration 9000, loss = 0.0560352\nI0817 21:29:38.242935 17550 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:29:38.242959 17550 solver.cpp:244]     Train net output #1: loss = 0.0560354 (* 1 = 0.0560354 loss)\nI0817 21:29:38.354332 17550 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0817 21:31:54.504179 17550 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:33:13.734172 17550 solver.cpp:404]     Test net output #0: accuracy = 0.889\nI0817 21:33:13.734454 17550 solver.cpp:404]     Test net output #1: loss = 0.420259 (* 1 = 0.420259 loss)\nI0817 21:33:15.030783 17550 solver.cpp:228] Iteration 9100, loss = 0.0260743\nI0817 21:33:15.030827 17550 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:33:15.030851 17550 solver.cpp:244]     Train net output #1: loss = 0.0260745 (* 1 = 0.0260745 loss)\nI0817 21:33:15.133747 17550 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0817 21:35:31.346809 17550 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:36:50.541934 17550 solver.cpp:404]     Test net output #0: accuracy = 0.87184\nI0817 21:36:50.542229 17550 solver.cpp:404]     Test net output #1: loss = 0.458421 (* 1 = 0.458421 loss)\nI0817 21:36:51.837146 17550 solver.cpp:228] Iteration 9200, loss = 0.1051\nI0817 21:36:51.837193 17550 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:36:51.837218 17550 solver.cpp:244]     Train net output #1: loss = 0.1051 (* 1 = 0.1051 loss)\nI0817 21:36:51.943745 17550 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0817 21:39:08.086524 17550 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:40:27.335466 17550 solver.cpp:404]     Test net output #0: accuracy = 0.89172\nI0817 21:40:27.335747 17550 solver.cpp:404]     Test net output #1: loss = 0.451215 (* 1 = 0.451215 loss)\nI0817 21:40:28.632040 17550 solver.cpp:228] Iteration 9300, loss = 0.0264369\nI0817 21:40:28.632082 17550 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:40:28.632107 17550 solver.cpp:244]     Train net output #1: loss = 0.0264371 (* 1 = 0.0264371 loss)\nI0817 21:40:28.739727 17550 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0817 21:42:44.848719 17550 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 21:44:04.137275 17550 solver.cpp:404]     Test net output #0: accuracy = 0.87296\nI0817 21:44:04.137547 17550 solver.cpp:404]     Test net output #1: loss = 0.551783 (* 1 = 0.551783 loss)\nI0817 21:44:05.433873 17550 solver.cpp:228] Iteration 9400, loss = 0.0146245\nI0817 21:44:05.433917 17550 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:44:05.433940 17550 solver.cpp:244]     Train net output #1: loss = 0.0146247 (* 1 = 0.0146247 loss)\nI0817 21:44:05.546442 17550 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0817 21:46:21.783685 17550 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 21:47:41.146232 17550 solver.cpp:404]     Test net output #0: accuracy = 0.9052\nI0817 21:47:41.146518 17550 solver.cpp:404]     Test net output #1: loss = 0.406049 (* 1 = 0.406049 loss)\nI0817 21:47:42.443806 17550 solver.cpp:228] Iteration 9500, loss = 0.00597212\nI0817 21:47:42.443851 17550 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:47:42.443874 17550 solver.cpp:244]     Train net output #1: loss = 0.00597233 (* 1 = 0.00597233 loss)\nI0817 21:47:42.557922 17550 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0817 21:49:58.727128 17550 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 21:51:17.994335 17550 solver.cpp:404]     Test net output #0: accuracy = 0.9138\nI0817 21:51:17.994609 17550 solver.cpp:404]     Test net output #1: loss = 0.358968 (* 1 = 0.358968 loss)\nI0817 21:51:19.291512 17550 solver.cpp:228] Iteration 9600, loss = 0.00107883\nI0817 21:51:19.291560 17550 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:51:19.291584 17550 solver.cpp:244]     Train net output #1: loss = 0.00107904 (* 1 = 0.00107904 loss)\nI0817 21:51:19.395026 17550 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0817 21:53:35.723574 17550 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 21:54:54.992435 17550 solver.cpp:404]     Test net output #0: accuracy = 0.92488\nI0817 21:54:54.992712 17550 solver.cpp:404]     Test net output #1: loss = 0.31298 (* 1 = 0.31298 loss)\nI0817 21:54:56.289197 17550 solver.cpp:228] Iteration 9700, loss = 0.000591491\nI0817 21:54:56.289244 17550 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:54:56.289269 17550 solver.cpp:244]     Train net output #1: loss = 0.000591699 (* 1 = 0.000591699 loss)\nI0817 21:54:56.398182 17550 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0817 21:57:12.479706 17550 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 21:58:31.848784 17550 solver.cpp:404]     Test net output #0: accuracy = 0.92492\nI0817 21:58:31.849041 17550 solver.cpp:404]     Test net output #1: loss = 0.313974 (* 1 = 0.313974 loss)\nI0817 21:58:33.145417 17550 solver.cpp:228] Iteration 9800, loss = 0.000373439\nI0817 21:58:33.145464 17550 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:58:33.145488 17550 solver.cpp:244]     Train net output #1: loss = 0.000373647 (* 1 = 0.000373647 loss)\nI0817 21:58:33.246358 17550 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0817 22:00:49.431331 17550 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:02:08.768226 17550 solver.cpp:404]     Test net output #0: accuracy = 0.92476\nI0817 22:02:08.768501 17550 solver.cpp:404]     Test net output #1: loss = 0.306059 (* 1 = 0.306059 loss)\nI0817 22:02:10.065716 17550 solver.cpp:228] Iteration 9900, loss = 0.000533901\nI0817 22:02:10.065762 17550 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:02:10.065786 17550 solver.cpp:244]     Train net output #1: loss = 0.000534109 (* 1 = 0.000534109 loss)\nI0817 22:02:10.170817 17550 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0817 22:04:26.312650 17550 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kFig1a_iter_10000.caffemodel\nI0817 22:04:26.564766 17550 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kFig1a_iter_10000.solverstate\nI0817 22:04:26.997815 17550 solver.cpp:317] Iteration 10000, loss = 0.000332805\nI0817 22:04:26.997856 17550 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:05:46.308986 17550 solver.cpp:404]     Test net output #0: accuracy = 0.9244\nI0817 22:05:46.309272 17550 solver.cpp:404]     Test net output #1: loss = 0.308216 (* 1 = 0.308216 loss)\nI0817 22:05:46.309289 17550 solver.cpp:322] Optimization Done.\nI0817 22:05:51.624128 17550 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kMom85Fig11",
    "content": "I0821 06:45:44.116225  1322 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 06:45:44.118945  1322 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 06:45:44.120142  1322 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 06:45:44.121330  1322 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 06:45:44.122524  1322 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 06:45:44.124091  1322 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 06:45:44.125326  1322 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 06:45:44.126557  1322 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 06:45:44.127790  1322 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 06:45:44.544370  1322 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.85\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kMom85Fig11\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0821 06:45:44.547897  1322 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 06:45:44.560734  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:44.560817  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:44.561921  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 06:45:44.561982  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 06:45:44.562008  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:45:44.562029  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:45:44.562048  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:45:44.562067  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:45:44.562084  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:45:44.562101  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:45:44.562120  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:45:44.562139  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:45:44.562157  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:45:44.562175  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:45:44.562192  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:45:44.562211  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:45:44.562230  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:45:44.562248  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:45:44.562268  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:45:44.562285  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:45:44.562304  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:45:44.562322  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:45:44.562357  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:45:44.562376  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:45:44.562402  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:45:44.562418  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:45:44.562438  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:45:44.562453  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:45:44.562472  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:45:44.562489  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:45:44.562506  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:45:44.562526  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:45:44.562546  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:45:44.562563  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:45:44.562582  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:45:44.562598  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:45:44.562618  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:45:44.562635  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:45:44.562655  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:45:44.562672  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:45:44.562691  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:45:44.562708  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:45:44.562732  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:45:44.562749  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:45:44.562767  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:45:44.562785  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:45:44.562805  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:45:44.562834  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:45:44.562853  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:45:44.562870  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:45:44.562888  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:45:44.562904  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:45:44.562922  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:45:44.562949  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:45:44.562968  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:45:44.562986  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:45:44.563006  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:45:44.563022  1322 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:45:44.564787  1322 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0821 06:45:44.566951  1322 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:45:44.568725  1322 net.cpp:100] Creating Layer dataLayer\nI0821 06:45:44.568802  1322 net.cpp:408] dataLayer -> data_top\nI0821 06:45:44.569036  1322 net.cpp:408] dataLayer -> label\nI0821 06:45:44.569169  1322 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:45:44.580819  1327 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0821 06:45:44.629539  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:44.636646  1322 net.cpp:150] Setting up dataLayer\nI0821 06:45:44.636710  1322 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:45:44.636724  1322 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:44.636730  1322 net.cpp:165] Memory required for data: 1536500\nI0821 06:45:44.636746  1322 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:45:44.636762  1322 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:45:44.636770  1322 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:45:44.636790  1322 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:45:44.636806  1322 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:45:44.636884  1322 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:45:44.636900  1322 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:44.636907  1322 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:44.636912  1322 net.cpp:165] Memory required for data: 1537500\nI0821 06:45:44.636917  1322 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:45:44.636982  1322 net.cpp:100] Creating Layer pre_conv\nI0821 06:45:44.636994  1322 net.cpp:434] pre_conv <- data_top\nI0821 06:45:44.637007  1322 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:45:44.640036  1328 blocking_queue.cpp:50] Waiting for data\nI0821 06:45:44.640056  1322 net.cpp:150] Setting up pre_conv\nI0821 06:45:44.640076  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.640082  1322 net.cpp:165] Memory required for data: 9729500\nI0821 06:45:44.640156  1322 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:45:44.641803  1322 net.cpp:100] Creating Layer pre_bn\nI0821 06:45:44.641818  1322 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:45:44.641834  1322 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:45:44.642149  1322 net.cpp:150] Setting up pre_bn\nI0821 06:45:44.642165  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.642170  1322 net.cpp:165] Memory required for data: 17921500\nI0821 06:45:44.642189  1322 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:45:44.642249  1322 net.cpp:100] Creating Layer pre_scale\nI0821 06:45:44.642261  1322 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:45:44.642274  1322 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:45:44.642454  1322 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:45:44.643393  1322 net.cpp:150] Setting up pre_scale\nI0821 06:45:44.643409  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.643415  1322 net.cpp:165] Memory required for data: 26113500\nI0821 06:45:44.643425  1322 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:45:44.643474  1322 net.cpp:100] Creating Layer pre_relu\nI0821 06:45:44.643486  1322 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:45:44.643497  1322 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:45:44.643510  1322 net.cpp:150] Setting up pre_relu\nI0821 06:45:44.643517  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.643522  1322 net.cpp:165] Memory required for data: 34305500\nI0821 06:45:44.643527  1322 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:45:44.643535  1322 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:45:44.643540  1322 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:45:44.643550  1322 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:45:44.643560  1322 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:45:44.643610  1322 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:45:44.643621  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.643628  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.643632  1322 net.cpp:165] Memory required for data: 50689500\nI0821 06:45:44.643637  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:45:44.643651  1322 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:45:44.643656  1322 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:45:44.643667  1322 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:45:44.643983  1322 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:45:44.643997  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.644003  1322 net.cpp:165] Memory required for data: 58881500\nI0821 06:45:44.644016  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:45:44.644035  1322 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:45:44.644042  1322 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:45:44.644052  1322 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:45:44.644291  1322 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:45:44.644315  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.644320  1322 net.cpp:165] Memory required for data: 67073500\nI0821 06:45:44.644330  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:45:44.644340  1322 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:45:44.644345  1322 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:45:44.644353  1322 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.644403  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:45:44.644541  1322 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:45:44.644553  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.644558  1322 net.cpp:165] Memory required for data: 75265500\nI0821 06:45:44.644567  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:45:44.644585  1322 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:45:44.644592  1322 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:45:44.644601  1322 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.644611  1322 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:45:44.644618  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.644623  1322 net.cpp:165] Memory required for data: 83457500\nI0821 06:45:44.644629  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:45:44.644639  1322 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:45:44.644644  1322 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:45:44.644657  1322 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:45:44.644968  1322 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:45:44.644981  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.644986  1322 net.cpp:165] Memory required for data: 91649500\nI0821 06:45:44.644995  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:45:44.645005  1322 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:45:44.645010  1322 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:45:44.645022  1322 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:45:44.645251  1322 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:45:44.645264  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.645269  1322 net.cpp:165] Memory required for data: 99841500\nI0821 06:45:44.645287  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:45:44.645295  1322 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:45:44.645300  1322 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:45:44.645308  1322 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:45:44.645365  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:45:44.645504  1322 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:45:44.645516  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.645521  1322 net.cpp:165] Memory required for data: 108033500\nI0821 06:45:44.645531  1322 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:45:44.645582  1322 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:45:44.645594  1322 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:45:44.645601  1322 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:45:44.645613  1322 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:45:44.645685  1322 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:45:44.645700  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.645705  1322 net.cpp:165] Memory required for data: 116225500\nI0821 06:45:44.645711  1322 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:45:44.645723  1322 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:45:44.645730  1322 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:45:44.645736  1322 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:45:44.645746  1322 net.cpp:150] Setting up L1_b1_relu\nI0821 06:45:44.645753  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.645758  1322 net.cpp:165] Memory required for data: 124417500\nI0821 06:45:44.645763  1322 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:44.645772  1322 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:44.645777  1322 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:45:44.645784  1322 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:45:44.645794  1322 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:45:44.645844  1322 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:44.645856  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.645864  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.645875  1322 net.cpp:165] Memory required for data: 140801500\nI0821 06:45:44.645880  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:45:44.645895  1322 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:45:44.645901  1322 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:45:44.645910  1322 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:45:44.646216  1322 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:45:44.646230  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.646235  1322 net.cpp:165] Memory required for data: 148993500\nI0821 06:45:44.646245  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:45:44.646256  1322 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:45:44.646262  1322 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:45:44.646275  1322 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:45:44.646510  1322 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:45:44.646524  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.646528  1322 net.cpp:165] Memory required for data: 157185500\nI0821 06:45:44.646539  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:45:44.646548  1322 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:45:44.646554  1322 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:45:44.646564  1322 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.646615  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:45:44.646749  1322 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:45:44.646764  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.646769  1322 net.cpp:165] Memory required for data: 165377500\nI0821 06:45:44.646777  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:45:44.646785  1322 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:45:44.646790  1322 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:45:44.646798  1322 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.646807  1322 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:45:44.646814  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.646818  1322 net.cpp:165] Memory required for data: 173569500\nI0821 06:45:44.646823  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:45:44.646845  1322 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:45:44.646852  1322 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:45:44.646865  1322 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:45:44.647167  1322 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:45:44.647181  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.647186  1322 net.cpp:165] Memory required for data: 181761500\nI0821 06:45:44.647195  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:45:44.647207  1322 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:45:44.647213  1322 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:45:44.647223  1322 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:45:44.647466  1322 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:45:44.647480  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.647485  1322 net.cpp:165] Memory required for data: 189953500\nI0821 06:45:44.647500  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:45:44.647513  1322 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:45:44.647519  1322 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:45:44.647527  1322 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:45:44.647583  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:45:44.647719  1322 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:45:44.647732  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.647737  1322 net.cpp:165] Memory required for data: 198145500\nI0821 06:45:44.647745  1322 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:45:44.647761  1322 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:45:44.647768  1322 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:45:44.647774  1322 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:45:44.647785  1322 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:45:44.647819  1322 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:45:44.647835  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.647840  1322 net.cpp:165] Memory required for data: 206337500\nI0821 06:45:44.647845  1322 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:45:44.647852  1322 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:45:44.647857  1322 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:45:44.647868  1322 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:45:44.647877  1322 net.cpp:150] Setting up L1_b2_relu\nI0821 06:45:44.647884  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.647889  1322 net.cpp:165] Memory required for data: 214529500\nI0821 06:45:44.647894  1322 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:44.647902  1322 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:44.647905  1322 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:45:44.647913  1322 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:45:44.647922  1322 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:45:44.647967  1322 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:44.647979  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.647985  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.647989  1322 net.cpp:165] Memory required for data: 230913500\nI0821 06:45:44.647994  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:45:44.648005  1322 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:45:44.648011  1322 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:45:44.648023  1322 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:45:44.648322  1322 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:45:44.648336  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.648341  1322 net.cpp:165] Memory required for data: 239105500\nI0821 06:45:44.648350  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:45:44.648360  1322 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:45:44.648365  1322 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:45:44.648375  1322 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:45:44.648615  1322 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:45:44.648627  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.648633  1322 net.cpp:165] Memory required for data: 247297500\nI0821 06:45:44.648643  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:45:44.648654  1322 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:45:44.648660  1322 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:45:44.648669  1322 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.648718  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:45:44.648864  1322 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:45:44.648878  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.648883  1322 net.cpp:165] Memory required for data: 255489500\nI0821 06:45:44.648892  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:45:44.648900  1322 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:45:44.648905  1322 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:45:44.648916  1322 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.648926  1322 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:45:44.648939  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.648944  1322 net.cpp:165] Memory required for data: 263681500\nI0821 06:45:44.648949  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:45:44.648963  1322 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:45:44.648969  1322 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:45:44.648977  1322 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:45:44.649304  1322 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:45:44.649318  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.649323  1322 net.cpp:165] Memory required for data: 271873500\nI0821 06:45:44.649333  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:45:44.649346  1322 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:45:44.649353  1322 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:45:44.649364  1322 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:45:44.649598  1322 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:45:44.649611  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.649616  1322 net.cpp:165] Memory required for data: 280065500\nI0821 06:45:44.649626  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:45:44.649638  1322 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:45:44.649644  1322 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:45:44.649652  1322 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:45:44.649703  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:45:44.649849  1322 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:45:44.649863  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.649868  1322 net.cpp:165] Memory required for data: 288257500\nI0821 06:45:44.649876  1322 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:45:44.649888  1322 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:45:44.649894  1322 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:45:44.649901  1322 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:45:44.649909  1322 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:45:44.649943  1322 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:45:44.649955  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.649960  1322 net.cpp:165] Memory required for data: 296449500\nI0821 06:45:44.649966  1322 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:45:44.649973  1322 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:45:44.649979  1322 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:45:44.649989  1322 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:45:44.649998  1322 net.cpp:150] Setting up L1_b3_relu\nI0821 06:45:44.650005  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.650010  1322 net.cpp:165] Memory required for data: 304641500\nI0821 06:45:44.650015  1322 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:44.650022  1322 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:44.650027  1322 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:45:44.650034  1322 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:45:44.650043  1322 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:45:44.650087  1322 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:44.650099  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.650105  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.650110  1322 net.cpp:165] Memory required for data: 321025500\nI0821 06:45:44.650115  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:45:44.650126  1322 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:45:44.650131  1322 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:45:44.650151  1322 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:45:44.650460  1322 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:45:44.650473  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.650477  1322 net.cpp:165] Memory required for data: 329217500\nI0821 06:45:44.650486  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:45:44.650496  1322 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:45:44.650501  1322 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:45:44.650512  1322 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:45:44.650748  1322 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:45:44.650761  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.650766  1322 net.cpp:165] Memory required for data: 337409500\nI0821 06:45:44.650776  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:45:44.650789  1322 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:45:44.650794  1322 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:45:44.650801  1322 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.650866  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:45:44.651011  1322 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:45:44.651024  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.651029  1322 net.cpp:165] Memory required for data: 345601500\nI0821 06:45:44.651038  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:45:44.651049  1322 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:45:44.651054  1322 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:45:44.651062  1322 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.651072  1322 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:45:44.651078  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.651083  1322 net.cpp:165] Memory required for data: 353793500\nI0821 06:45:44.651087  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:45:44.651101  1322 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:45:44.651108  1322 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:45:44.651120  1322 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:45:44.651423  1322 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:45:44.651437  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.651443  1322 net.cpp:165] Memory required for data: 361985500\nI0821 06:45:44.651450  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:45:44.651459  1322 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:45:44.651465  1322 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:45:44.651473  1322 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:45:44.651712  1322 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:45:44.651726  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.651731  1322 net.cpp:165] Memory required for data: 370177500\nI0821 06:45:44.651743  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:45:44.651752  1322 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:45:44.651757  1322 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:45:44.651767  1322 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:45:44.651819  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:45:44.651963  1322 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:45:44.651976  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.651981  1322 net.cpp:165] Memory required for data: 378369500\nI0821 06:45:44.651990  1322 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:45:44.652003  1322 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:45:44.652007  1322 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:45:44.652014  1322 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:45:44.652025  1322 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:45:44.652062  1322 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:45:44.652072  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.652077  1322 net.cpp:165] Memory required for data: 386561500\nI0821 06:45:44.652082  1322 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:45:44.652094  1322 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:45:44.652099  1322 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:45:44.652107  1322 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:45:44.652117  1322 net.cpp:150] Setting up L1_b4_relu\nI0821 06:45:44.652123  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.652127  1322 net.cpp:165] Memory required for data: 394753500\nI0821 06:45:44.652132  1322 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:44.652139  1322 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:44.652145  1322 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:45:44.652153  1322 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:45:44.652161  1322 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:45:44.652206  1322 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:44.652217  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.652225  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.652230  1322 net.cpp:165] Memory required for data: 411137500\nI0821 06:45:44.652235  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:45:44.652247  1322 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:45:44.652253  1322 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:45:44.652262  1322 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:45:44.652570  1322 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:45:44.652582  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.652587  1322 net.cpp:165] Memory required for data: 419329500\nI0821 06:45:44.652612  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:45:44.652624  1322 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:45:44.652631  1322 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:45:44.652639  1322 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:45:44.652887  1322 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:45:44.652904  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.652909  1322 net.cpp:165] Memory required for data: 427521500\nI0821 06:45:44.652920  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:45:44.652930  1322 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:45:44.652935  1322 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:45:44.652942  1322 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.652994  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:45:44.653133  1322 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:45:44.653146  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.653151  1322 net.cpp:165] Memory required for data: 435713500\nI0821 06:45:44.653159  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:45:44.653170  1322 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:45:44.653177  1322 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:45:44.653183  1322 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.653193  1322 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:45:44.653200  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.653204  1322 net.cpp:165] Memory required for data: 443905500\nI0821 06:45:44.653209  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:45:44.653223  1322 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:45:44.653229  1322 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:45:44.653247  1322 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:45:44.653554  1322 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:45:44.653568  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.653573  1322 net.cpp:165] Memory required for data: 452097500\nI0821 06:45:44.653583  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:45:44.653592  1322 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:45:44.653597  1322 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:45:44.653610  1322 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:45:44.653852  1322 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:45:44.653865  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.653870  1322 net.cpp:165] Memory required for data: 460289500\nI0821 06:45:44.653880  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:45:44.653892  1322 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:45:44.653898  1322 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:45:44.653906  1322 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:45:44.653959  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:45:44.654101  1322 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:45:44.654114  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.654119  1322 net.cpp:165] Memory required for data: 468481500\nI0821 06:45:44.654129  1322 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:45:44.654140  1322 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:45:44.654146  1322 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:45:44.654153  1322 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:45:44.654160  1322 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:45:44.654194  1322 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:45:44.654204  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.654208  1322 net.cpp:165] Memory required for data: 476673500\nI0821 06:45:44.654213  1322 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:45:44.654222  1322 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:45:44.654227  1322 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:45:44.654237  1322 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:45:44.654247  1322 net.cpp:150] Setting up L1_b5_relu\nI0821 06:45:44.654253  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.654258  1322 net.cpp:165] Memory required for data: 484865500\nI0821 06:45:44.654263  1322 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:44.654269  1322 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:44.654274  1322 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:45:44.654281  1322 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:45:44.654290  1322 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:45:44.654335  1322 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:44.654346  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.654353  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.654357  1322 net.cpp:165] Memory required for data: 501249500\nI0821 06:45:44.654362  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:45:44.654372  1322 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:45:44.654378  1322 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:45:44.654391  1322 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:45:44.654696  1322 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:45:44.654711  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.654716  1322 net.cpp:165] Memory required for data: 509441500\nI0821 06:45:44.654731  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:45:44.654739  1322 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:45:44.654745  1322 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:45:44.654757  1322 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:45:44.655000  1322 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:45:44.655014  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.655019  1322 net.cpp:165] Memory required for data: 517633500\nI0821 06:45:44.655030  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:45:44.655041  1322 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:45:44.655046  1322 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:45:44.655055  1322 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.655105  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:45:44.655244  1322 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:45:44.655256  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.655261  1322 net.cpp:165] Memory required for data: 525825500\nI0821 06:45:44.655270  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:45:44.655283  1322 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:45:44.655289  1322 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:45:44.655297  1322 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.655306  1322 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:45:44.655313  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.655318  1322 net.cpp:165] Memory required for data: 534017500\nI0821 06:45:44.655323  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:45:44.655338  1322 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:45:44.655344  1322 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:45:44.655354  1322 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:45:44.655668  1322 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:45:44.655683  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.655686  1322 net.cpp:165] Memory required for data: 542209500\nI0821 06:45:44.655695  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:45:44.655704  1322 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:45:44.655710  1322 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:45:44.655719  1322 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:45:44.655966  1322 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:45:44.655978  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.655983  1322 net.cpp:165] Memory required for data: 550401500\nI0821 06:45:44.655994  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:45:44.656005  1322 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:45:44.656011  1322 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:45:44.656019  1322 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:45:44.656075  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:45:44.656213  1322 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:45:44.656225  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.656230  1322 net.cpp:165] Memory required for data: 558593500\nI0821 06:45:44.656239  1322 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:45:44.656257  1322 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:45:44.656265  1322 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:45:44.656271  1322 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:45:44.656280  1322 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:45:44.656311  1322 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:45:44.656322  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.656327  1322 net.cpp:165] Memory required for data: 566785500\nI0821 06:45:44.656332  1322 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:45:44.656350  1322 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:45:44.656358  1322 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:45:44.656364  1322 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:45:44.656373  1322 net.cpp:150] Setting up L1_b6_relu\nI0821 06:45:44.656381  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.656385  1322 net.cpp:165] Memory required for data: 574977500\nI0821 06:45:44.656390  1322 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:44.656397  1322 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:44.656402  1322 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:45:44.656409  1322 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:45:44.656419  1322 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:45:44.656464  1322 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:44.656476  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.656482  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.656486  1322 net.cpp:165] Memory required for data: 591361500\nI0821 06:45:44.656491  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:45:44.656505  1322 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:45:44.656512  1322 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:45:44.656520  1322 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:45:44.656836  1322 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:45:44.656852  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.656857  1322 net.cpp:165] Memory required for data: 599553500\nI0821 06:45:44.656865  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:45:44.656877  1322 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:45:44.656883  1322 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:45:44.656891  1322 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:45:44.657136  1322 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:45:44.657150  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.657155  1322 net.cpp:165] Memory required for data: 607745500\nI0821 06:45:44.657165  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:45:44.657173  1322 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:45:44.657179  1322 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:45:44.657186  1322 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.657241  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:45:44.657379  1322 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:45:44.657392  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.657397  1322 net.cpp:165] Memory required for data: 615937500\nI0821 06:45:44.657405  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:45:44.657413  1322 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:45:44.657419  1322 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:45:44.657430  1322 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.657439  1322 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:45:44.657446  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.657451  1322 net.cpp:165] Memory required for data: 624129500\nI0821 06:45:44.657456  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:45:44.657470  1322 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:45:44.657476  1322 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:45:44.657485  1322 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:45:44.657793  1322 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:45:44.657807  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.657812  1322 net.cpp:165] Memory required for data: 632321500\nI0821 06:45:44.657833  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:45:44.657846  1322 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:45:44.657853  1322 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:45:44.657861  1322 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:45:44.658100  1322 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:45:44.658116  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.658121  1322 net.cpp:165] Memory required for data: 640513500\nI0821 06:45:44.658131  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:45:44.658140  1322 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:45:44.658146  1322 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:45:44.658154  1322 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:45:44.658206  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:45:44.658350  1322 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:45:44.658363  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.658368  1322 net.cpp:165] Memory required for data: 648705500\nI0821 06:45:44.658377  1322 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:45:44.658388  1322 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:45:44.658394  1322 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:45:44.658402  1322 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:45:44.658412  1322 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:45:44.658443  1322 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:45:44.658454  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.658459  1322 net.cpp:165] Memory required for data: 656897500\nI0821 06:45:44.658464  1322 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:45:44.658475  1322 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:45:44.658481  1322 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:45:44.658488  1322 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:45:44.658499  1322 net.cpp:150] Setting up L1_b7_relu\nI0821 06:45:44.658504  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.658509  1322 net.cpp:165] Memory required for data: 665089500\nI0821 06:45:44.658514  1322 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:44.658521  1322 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:44.658526  1322 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:45:44.658534  1322 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:45:44.658543  1322 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:45:44.658589  1322 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:44.658601  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.658607  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.658612  1322 net.cpp:165] Memory required for data: 681473500\nI0821 06:45:44.658617  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:45:44.658630  1322 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:45:44.658637  1322 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:45:44.658645  1322 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:45:44.658965  1322 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:45:44.658979  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.658984  1322 net.cpp:165] Memory required for data: 689665500\nI0821 06:45:44.658993  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:45:44.659004  1322 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:45:44.659010  1322 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:45:44.659019  1322 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:45:44.659271  1322 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:45:44.659286  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.659291  1322 net.cpp:165] Memory required for data: 697857500\nI0821 06:45:44.659301  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:45:44.659309  1322 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:45:44.659314  1322 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:45:44.659322  1322 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.659377  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:45:44.659523  1322 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:45:44.659535  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.659540  1322 net.cpp:165] Memory required for data: 706049500\nI0821 06:45:44.659549  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:45:44.659557  1322 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:45:44.659562  1322 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:45:44.659574  1322 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.659582  1322 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:45:44.659590  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.659595  1322 net.cpp:165] Memory required for data: 714241500\nI0821 06:45:44.659598  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:45:44.659610  1322 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:45:44.659615  1322 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:45:44.659626  1322 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:45:44.659950  1322 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:45:44.659965  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.659970  1322 net.cpp:165] Memory required for data: 722433500\nI0821 06:45:44.659977  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:45:44.659986  1322 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:45:44.659992  1322 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:45:44.660004  1322 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:45:44.660248  1322 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:45:44.660264  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.660269  1322 net.cpp:165] Memory required for data: 730625500\nI0821 06:45:44.660279  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:45:44.660289  1322 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:45:44.660295  1322 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:45:44.660302  1322 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:45:44.660356  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:45:44.660495  1322 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:45:44.660508  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.660512  1322 net.cpp:165] Memory required for data: 738817500\nI0821 06:45:44.660521  1322 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:45:44.660533  1322 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:45:44.660539  1322 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:45:44.660547  1322 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:45:44.660557  1322 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:45:44.660588  1322 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:45:44.660598  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.660601  1322 net.cpp:165] Memory required for data: 747009500\nI0821 06:45:44.660607  1322 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:45:44.660617  1322 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:45:44.660624  1322 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:45:44.660630  1322 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:45:44.660640  1322 net.cpp:150] Setting up L1_b8_relu\nI0821 06:45:44.660646  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.660657  1322 net.cpp:165] Memory required for data: 755201500\nI0821 06:45:44.660662  1322 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:44.660670  1322 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:44.660675  1322 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:45:44.660682  1322 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:45:44.660691  1322 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:45:44.660737  1322 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:44.660749  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.660756  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.660760  1322 net.cpp:165] Memory required for data: 771585500\nI0821 06:45:44.660765  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:45:44.660779  1322 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:45:44.660785  1322 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:45:44.660795  1322 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:45:44.661123  1322 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:45:44.661139  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.661144  1322 net.cpp:165] Memory required for data: 779777500\nI0821 06:45:44.661154  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:45:44.661165  1322 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:45:44.661171  1322 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:45:44.661180  1322 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:45:44.661427  1322 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:45:44.661442  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.661447  1322 net.cpp:165] Memory required for data: 787969500\nI0821 06:45:44.661456  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:45:44.661465  1322 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:45:44.661471  1322 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:45:44.661481  1322 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.661535  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:45:44.661679  1322 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:45:44.661694  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.661698  1322 net.cpp:165] Memory required for data: 796161500\nI0821 06:45:44.661706  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:45:44.661715  1322 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:45:44.661720  1322 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:45:44.661728  1322 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.661737  1322 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:45:44.661744  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.661748  1322 net.cpp:165] Memory required for data: 804353500\nI0821 06:45:44.661753  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:45:44.661767  1322 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:45:44.661773  1322 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:45:44.661784  1322 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:45:44.662111  1322 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:45:44.662125  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.662130  1322 net.cpp:165] Memory required for data: 812545500\nI0821 06:45:44.662139  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:45:44.662151  1322 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:45:44.662158  1322 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:45:44.662168  1322 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:45:44.662412  1322 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:45:44.662426  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.662431  1322 net.cpp:165] Memory required for data: 820737500\nI0821 06:45:44.662462  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:45:44.662474  1322 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:45:44.662480  1322 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:45:44.662488  1322 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:45:44.662544  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:45:44.662689  1322 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:45:44.662701  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.662706  1322 net.cpp:165] Memory required for data: 828929500\nI0821 06:45:44.662715  1322 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:45:44.662724  1322 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:45:44.662730  1322 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:45:44.662737  1322 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:45:44.662744  1322 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:45:44.662775  1322 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:45:44.662784  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.662789  1322 net.cpp:165] Memory required for data: 837121500\nI0821 06:45:44.662794  1322 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:45:44.662806  1322 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:45:44.662813  1322 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:45:44.662820  1322 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:45:44.662835  1322 net.cpp:150] Setting up L1_b9_relu\nI0821 06:45:44.662847  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.662852  1322 net.cpp:165] Memory required for data: 845313500\nI0821 06:45:44.662856  1322 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:44.662864  1322 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:44.662869  1322 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:45:44.662876  1322 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:45:44.662885  1322 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:45:44.662933  1322 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:44.662945  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.662951  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.662955  1322 net.cpp:165] Memory required for data: 861697500\nI0821 06:45:44.662961  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:45:44.662974  1322 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:45:44.662981  1322 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:45:44.662989  1322 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:45:44.663311  1322 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:45:44.663324  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.663329  1322 net.cpp:165] Memory required for data: 863745500\nI0821 06:45:44.663338  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:45:44.663349  1322 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:45:44.663357  1322 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:45:44.663367  1322 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:45:44.663605  1322 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:45:44.663617  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.663622  1322 net.cpp:165] Memory required for data: 865793500\nI0821 06:45:44.663633  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:45:44.663641  1322 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:45:44.663655  1322 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:45:44.663662  1322 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.663720  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:45:44.663866  1322 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:45:44.663879  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.663884  1322 net.cpp:165] Memory required for data: 867841500\nI0821 06:45:44.663893  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:45:44.663904  1322 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:45:44.663910  1322 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:45:44.663918  1322 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.663926  1322 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:45:44.663933  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.663938  1322 net.cpp:165] Memory required for data: 869889500\nI0821 06:45:44.663942  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:45:44.663956  1322 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:45:44.663962  1322 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:45:44.663971  1322 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:45:44.664294  1322 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:45:44.664307  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.664312  1322 net.cpp:165] Memory required for data: 871937500\nI0821 06:45:44.664320  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:45:44.664332  1322 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:45:44.664338  1322 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:45:44.664347  1322 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:45:44.664592  1322 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:45:44.664607  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.664611  1322 net.cpp:165] Memory required for data: 873985500\nI0821 06:45:44.664623  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:45:44.664630  1322 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:45:44.664636  1322 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:45:44.664644  1322 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:45:44.664698  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:45:44.664852  1322 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:45:44.664865  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.664870  1322 net.cpp:165] Memory required for data: 876033500\nI0821 06:45:44.664880  1322 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:45:44.664892  1322 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:45:44.664898  1322 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:45:44.664911  1322 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:45:44.665000  1322 net.cpp:150] Setting up L2_b1_pool\nI0821 06:45:44.665014  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.665020  1322 net.cpp:165] Memory required for data: 878081500\nI0821 06:45:44.665025  1322 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:45:44.665038  1322 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:45:44.665045  1322 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:45:44.665052  1322 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:45:44.665060  1322 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:45:44.665093  1322 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:45:44.665102  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.665107  1322 net.cpp:165] Memory required for data: 880129500\nI0821 06:45:44.665112  1322 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:45:44.665119  1322 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:45:44.665125  1322 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:45:44.665135  1322 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:45:44.665153  1322 net.cpp:150] Setting up L2_b1_relu\nI0821 06:45:44.665159  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.665164  1322 net.cpp:165] Memory required for data: 882177500\nI0821 06:45:44.665169  1322 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:45:44.665225  1322 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:45:44.665240  1322 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:45:44.667611  1322 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:45:44.667629  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.667635  1322 net.cpp:165] Memory required for data: 884225500\nI0821 06:45:44.667641  1322 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:45:44.667659  1322 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:45:44.667665  1322 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:45:44.667672  1322 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:45:44.667680  1322 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:45:44.667763  1322 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:45:44.667778  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.667783  1322 net.cpp:165] Memory required for data: 888321500\nI0821 06:45:44.667789  1322 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:44.667798  1322 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:44.667804  1322 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:45:44.667814  1322 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:45:44.667825  1322 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:45:44.667882  1322 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:44.667896  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.667904  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.667908  1322 net.cpp:165] Memory required for data: 896513500\nI0821 06:45:44.667914  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:45:44.667925  1322 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:45:44.667932  1322 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:45:44.667942  1322 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:45:44.669386  1322 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:45:44.669404  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.669409  1322 net.cpp:165] Memory required for data: 900609500\nI0821 06:45:44.669419  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:45:44.669431  1322 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:45:44.669438  1322 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:45:44.669450  1322 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:45:44.669693  1322 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:45:44.669706  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.669711  1322 net.cpp:165] Memory required for data: 904705500\nI0821 06:45:44.669723  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:45:44.669731  1322 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:45:44.669737  1322 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:45:44.669745  1322 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.669803  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:45:44.669955  1322 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:45:44.669970  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.669975  1322 net.cpp:165] Memory required for data: 908801500\nI0821 06:45:44.669983  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:45:44.669994  1322 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:45:44.670001  1322 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:45:44.670008  1322 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.670027  1322 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:45:44.670034  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.670038  1322 net.cpp:165] Memory required for data: 912897500\nI0821 06:45:44.670043  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:45:44.670058  1322 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:45:44.670064  1322 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:45:44.670075  1322 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:45:44.670539  1322 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:45:44.670554  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.670558  1322 net.cpp:165] Memory required for data: 916993500\nI0821 06:45:44.670567  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:45:44.670580  1322 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:45:44.670586  1322 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:45:44.670598  1322 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:45:44.670850  1322 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:45:44.670863  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.670868  1322 net.cpp:165] Memory required for data: 921089500\nI0821 06:45:44.670879  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:45:44.670888  1322 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:45:44.670893  1322 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:45:44.670902  1322 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:45:44.670959  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:45:44.671103  1322 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:45:44.671115  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.671120  1322 net.cpp:165] Memory required for data: 925185500\nI0821 06:45:44.671129  1322 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:45:44.671141  1322 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:45:44.671147  1322 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:45:44.671154  1322 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:45:44.671162  1322 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:45:44.671190  1322 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:45:44.671198  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.671203  1322 net.cpp:165] Memory required for data: 929281500\nI0821 06:45:44.671208  1322 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:45:44.671219  1322 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:45:44.671226  1322 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:45:44.671232  1322 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:45:44.671241  1322 net.cpp:150] Setting up L2_b2_relu\nI0821 06:45:44.671248  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.671253  1322 net.cpp:165] Memory required for data: 933377500\nI0821 06:45:44.671258  1322 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:44.671265  1322 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:44.671272  1322 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:45:44.671278  1322 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:45:44.671288  1322 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:45:44.671334  1322 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:44.671346  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.671352  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.671357  1322 net.cpp:165] Memory required for data: 941569500\nI0821 06:45:44.671362  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:45:44.671383  1322 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:45:44.671391  1322 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:45:44.671401  1322 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:45:44.671871  1322 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:45:44.671886  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.671891  1322 net.cpp:165] Memory required for data: 945665500\nI0821 06:45:44.671900  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:45:44.671912  1322 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:45:44.671919  1322 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:45:44.671931  1322 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:45:44.672174  1322 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:45:44.672188  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.672193  1322 net.cpp:165] Memory required for data: 949761500\nI0821 06:45:44.672202  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:45:44.672211  1322 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:45:44.672216  1322 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:45:44.672224  1322 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.672282  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:45:44.672430  1322 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:45:44.672442  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.672447  1322 net.cpp:165] Memory required for data: 953857500\nI0821 06:45:44.672456  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:45:44.672467  1322 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:45:44.672473  1322 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:45:44.672482  1322 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.672490  1322 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:45:44.672497  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.672502  1322 net.cpp:165] Memory required for data: 957953500\nI0821 06:45:44.672508  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:45:44.672521  1322 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:45:44.672528  1322 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:45:44.672536  1322 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:45:44.673002  1322 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:45:44.673015  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.673020  1322 net.cpp:165] Memory required for data: 962049500\nI0821 06:45:44.673029  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:45:44.673041  1322 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:45:44.673048  1322 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:45:44.673056  1322 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:45:44.673303  1322 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:45:44.673319  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.673324  1322 net.cpp:165] Memory required for data: 966145500\nI0821 06:45:44.673334  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:45:44.673343  1322 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:45:44.673349  1322 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:45:44.673357  1322 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:45:44.673411  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:45:44.673560  1322 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:45:44.673573  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.673578  1322 net.cpp:165] Memory required for data: 970241500\nI0821 06:45:44.673586  1322 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:45:44.673595  1322 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:45:44.673601  1322 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:45:44.673609  1322 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:45:44.673630  1322 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:45:44.673658  1322 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:45:44.673668  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.673672  1322 net.cpp:165] Memory required for data: 974337500\nI0821 06:45:44.673677  1322 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:45:44.673701  1322 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:45:44.673707  1322 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:45:44.673714  1322 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:45:44.673724  1322 net.cpp:150] Setting up L2_b3_relu\nI0821 06:45:44.673732  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.673735  1322 net.cpp:165] Memory required for data: 978433500\nI0821 06:45:44.673741  1322 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:44.673748  1322 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:44.673753  1322 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:45:44.673760  1322 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:45:44.673770  1322 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:45:44.673818  1322 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:44.673835  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.673842  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.673846  1322 net.cpp:165] Memory required for data: 986625500\nI0821 06:45:44.673852  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:45:44.673863  1322 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:45:44.673869  1322 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:45:44.673882  1322 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:45:44.674341  1322 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:45:44.674355  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.674360  1322 net.cpp:165] Memory required for data: 990721500\nI0821 06:45:44.674370  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:45:44.674378  1322 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:45:44.674384  1322 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:45:44.674397  1322 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:45:44.674643  1322 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:45:44.674656  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.674661  1322 net.cpp:165] Memory required for data: 994817500\nI0821 06:45:44.674671  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:45:44.674684  1322 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:45:44.674690  1322 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:45:44.674696  1322 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.674751  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:45:44.674903  1322 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:45:44.674916  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.674921  1322 net.cpp:165] Memory required for data: 998913500\nI0821 06:45:44.674931  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:45:44.674942  1322 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:45:44.674947  1322 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:45:44.674955  1322 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.674964  1322 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:45:44.674974  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.674979  1322 net.cpp:165] Memory required for data: 1003009500\nI0821 06:45:44.674984  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:45:44.675002  1322 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:45:44.675009  1322 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:45:44.675019  1322 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:45:44.675475  1322 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:45:44.675490  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.675495  1322 net.cpp:165] Memory required for data: 1007105500\nI0821 06:45:44.675503  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:45:44.675513  1322 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:45:44.675519  1322 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:45:44.675532  1322 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:45:44.675775  1322 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:45:44.675786  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.675791  1322 net.cpp:165] Memory required for data: 1011201500\nI0821 06:45:44.675801  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:45:44.675812  1322 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:45:44.675819  1322 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:45:44.675827  1322 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:45:44.675887  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:45:44.676033  1322 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:45:44.676045  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.676050  1322 net.cpp:165] Memory required for data: 1015297500\nI0821 06:45:44.676059  1322 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:45:44.676074  1322 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:45:44.676079  1322 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:45:44.676086  1322 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:45:44.676095  1322 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:45:44.676125  1322 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:45:44.676133  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.676138  1322 net.cpp:165] Memory required for data: 1019393500\nI0821 06:45:44.676143  1322 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:45:44.676151  1322 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:45:44.676156  1322 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:45:44.676167  1322 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:45:44.676177  1322 net.cpp:150] Setting up L2_b4_relu\nI0821 06:45:44.676183  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.676187  1322 net.cpp:165] Memory required for data: 1023489500\nI0821 06:45:44.676192  1322 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:44.676199  1322 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:44.676204  1322 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:45:44.676213  1322 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:45:44.676221  1322 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:45:44.676267  1322 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:44.676278  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.676285  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.676290  1322 net.cpp:165] Memory required for data: 1031681500\nI0821 06:45:44.676295  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:45:44.676306  1322 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:45:44.676311  1322 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:45:44.676323  1322 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:45:44.676779  1322 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:45:44.676800  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.676805  1322 net.cpp:165] Memory required for data: 1035777500\nI0821 06:45:44.676813  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:45:44.676822  1322 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:45:44.676833  1322 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:45:44.676846  1322 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:45:44.677095  1322 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:45:44.677109  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.677114  1322 net.cpp:165] Memory required for data: 1039873500\nI0821 06:45:44.677124  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:45:44.677135  1322 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:45:44.677141  1322 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:45:44.677150  1322 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.677203  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:45:44.677351  1322 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:45:44.677363  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.677368  1322 net.cpp:165] Memory required for data: 1043969500\nI0821 06:45:44.677377  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:45:44.677388  1322 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:45:44.677395  1322 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:45:44.677402  1322 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.677412  1322 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:45:44.677419  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.677423  1322 net.cpp:165] Memory required for data: 1048065500\nI0821 06:45:44.677428  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:45:44.677443  1322 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:45:44.677448  1322 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:45:44.677459  1322 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:45:44.677919  1322 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:45:44.677934  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.677939  1322 net.cpp:165] Memory required for data: 1052161500\nI0821 06:45:44.677947  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:45:44.677958  1322 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:45:44.677963  1322 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:45:44.677974  1322 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:45:44.678223  1322 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:45:44.678236  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.678241  1322 net.cpp:165] Memory required for data: 1056257500\nI0821 06:45:44.678252  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:45:44.678263  1322 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:45:44.678269  1322 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:45:44.678277  1322 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:45:44.678333  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:45:44.678479  1322 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:45:44.678491  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.678496  1322 net.cpp:165] Memory required for data: 1060353500\nI0821 06:45:44.678505  1322 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:45:44.678515  1322 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:45:44.678521  1322 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:45:44.678530  1322 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:45:44.678539  1322 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:45:44.678565  1322 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:45:44.678577  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.678591  1322 net.cpp:165] Memory required for data: 1064449500\nI0821 06:45:44.678596  1322 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:45:44.678604  1322 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:45:44.678609  1322 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:45:44.678617  1322 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:45:44.678627  1322 net.cpp:150] Setting up L2_b5_relu\nI0821 06:45:44.678633  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.678637  1322 net.cpp:165] Memory required for data: 1068545500\nI0821 06:45:44.678642  1322 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:44.678653  1322 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:44.678658  1322 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:45:44.678665  1322 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:45:44.678675  1322 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:45:44.678722  1322 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:44.678735  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.678740  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.678745  1322 net.cpp:165] Memory required for data: 1076737500\nI0821 06:45:44.678750  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:45:44.678761  1322 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:45:44.678767  1322 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:45:44.678779  1322 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:45:44.679249  1322 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:45:44.679262  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.679267  1322 net.cpp:165] Memory required for data: 1080833500\nI0821 06:45:44.679276  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:45:44.679286  1322 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:45:44.679292  1322 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:45:44.679303  1322 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:45:44.679554  1322 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:45:44.679567  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.679572  1322 net.cpp:165] Memory required for data: 1084929500\nI0821 06:45:44.679584  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:45:44.679594  1322 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:45:44.679600  1322 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:45:44.679608  1322 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.679662  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:45:44.679813  1322 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:45:44.679826  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.679836  1322 net.cpp:165] Memory required for data: 1089025500\nI0821 06:45:44.679846  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:45:44.679857  1322 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:45:44.679863  1322 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:45:44.679870  1322 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.679880  1322 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:45:44.679888  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.679893  1322 net.cpp:165] Memory required for data: 1093121500\nI0821 06:45:44.679896  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:45:44.679913  1322 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:45:44.679919  1322 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:45:44.679929  1322 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:45:44.680388  1322 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:45:44.680408  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.680414  1322 net.cpp:165] Memory required for data: 1097217500\nI0821 06:45:44.680423  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:45:44.680433  1322 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:45:44.680438  1322 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:45:44.680446  1322 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:45:44.680696  1322 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:45:44.680707  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.680712  1322 net.cpp:165] Memory required for data: 1101313500\nI0821 06:45:44.680722  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:45:44.680732  1322 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:45:44.680737  1322 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:45:44.680748  1322 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:45:44.680804  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:45:44.680958  1322 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:45:44.680970  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.680975  1322 net.cpp:165] Memory required for data: 1105409500\nI0821 06:45:44.680984  1322 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:45:44.680994  1322 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:45:44.680999  1322 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:45:44.681005  1322 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:45:44.681016  1322 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:45:44.681043  1322 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:45:44.681056  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.681061  1322 net.cpp:165] Memory required for data: 1109505500\nI0821 06:45:44.681066  1322 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:45:44.681073  1322 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:45:44.681079  1322 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:45:44.681087  1322 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:45:44.681095  1322 net.cpp:150] Setting up L2_b6_relu\nI0821 06:45:44.681102  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.681107  1322 net.cpp:165] Memory required for data: 1113601500\nI0821 06:45:44.681112  1322 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:44.681121  1322 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:44.681128  1322 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:45:44.681134  1322 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:45:44.681144  1322 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:45:44.681190  1322 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:44.681202  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.681210  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.681213  1322 net.cpp:165] Memory required for data: 1121793500\nI0821 06:45:44.681218  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:45:44.681229  1322 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:45:44.681236  1322 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:45:44.681247  1322 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:45:44.681712  1322 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:45:44.681725  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.681730  1322 net.cpp:165] Memory required for data: 1125889500\nI0821 06:45:44.681740  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:45:44.681748  1322 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:45:44.681761  1322 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:45:44.681773  1322 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:45:44.682031  1322 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:45:44.682045  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.682050  1322 net.cpp:165] Memory required for data: 1129985500\nI0821 06:45:44.682060  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:45:44.682072  1322 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:45:44.682078  1322 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:45:44.682086  1322 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.682144  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:45:44.682296  1322 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:45:44.682308  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.682313  1322 net.cpp:165] Memory required for data: 1134081500\nI0821 06:45:44.682323  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:45:44.682330  1322 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:45:44.682337  1322 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:45:44.682348  1322 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.682358  1322 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:45:44.682364  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.682368  1322 net.cpp:165] Memory required for data: 1138177500\nI0821 06:45:44.682374  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:45:44.682387  1322 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:45:44.682394  1322 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:45:44.682404  1322 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:45:44.682873  1322 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:45:44.682888  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.682893  1322 net.cpp:165] Memory required for data: 1142273500\nI0821 06:45:44.682901  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:45:44.682910  1322 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:45:44.682916  1322 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:45:44.682925  1322 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:45:44.683172  1322 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:45:44.683185  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.683189  1322 net.cpp:165] Memory required for data: 1146369500\nI0821 06:45:44.683200  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:45:44.683208  1322 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:45:44.683214  1322 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:45:44.683226  1322 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:45:44.683284  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:45:44.683429  1322 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:45:44.683441  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.683446  1322 net.cpp:165] Memory required for data: 1150465500\nI0821 06:45:44.683455  1322 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:45:44.683465  1322 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:45:44.683470  1322 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:45:44.683477  1322 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:45:44.683487  1322 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:45:44.683514  1322 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:45:44.683523  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.683528  1322 net.cpp:165] Memory required for data: 1154561500\nI0821 06:45:44.683533  1322 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:45:44.683544  1322 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:45:44.683550  1322 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:45:44.683564  1322 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:45:44.683574  1322 net.cpp:150] Setting up L2_b7_relu\nI0821 06:45:44.683581  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.683585  1322 net.cpp:165] Memory required for data: 1158657500\nI0821 06:45:44.683590  1322 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:44.683600  1322 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:44.683606  1322 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:45:44.683614  1322 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:45:44.683624  1322 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:45:44.683668  1322 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:44.683682  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.683689  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.683694  1322 net.cpp:165] Memory required for data: 1166849500\nI0821 06:45:44.683699  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:45:44.683710  1322 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:45:44.683717  1322 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:45:44.683725  1322 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:45:44.684203  1322 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:45:44.684218  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.684223  1322 net.cpp:165] Memory required for data: 1170945500\nI0821 06:45:44.684232  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:45:44.684244  1322 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:45:44.684250  1322 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:45:44.684259  1322 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:45:44.684511  1322 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:45:44.684525  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.684530  1322 net.cpp:165] Memory required for data: 1175041500\nI0821 06:45:44.684540  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:45:44.684548  1322 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:45:44.684554  1322 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:45:44.684566  1322 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.684622  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:45:44.684772  1322 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:45:44.684785  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.684790  1322 net.cpp:165] Memory required for data: 1179137500\nI0821 06:45:44.684799  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:45:44.684808  1322 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:45:44.684814  1322 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:45:44.684824  1322 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.684841  1322 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:45:44.684849  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.684854  1322 net.cpp:165] Memory required for data: 1183233500\nI0821 06:45:44.684859  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:45:44.684873  1322 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:45:44.684880  1322 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:45:44.684888  1322 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:45:44.685351  1322 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:45:44.685364  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.685369  1322 net.cpp:165] Memory required for data: 1187329500\nI0821 06:45:44.685379  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:45:44.685390  1322 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:45:44.685405  1322 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:45:44.685413  1322 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:45:44.685670  1322 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:45:44.685683  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.685688  1322 net.cpp:165] Memory required for data: 1191425500\nI0821 06:45:44.685698  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:45:44.685708  1322 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:45:44.685714  1322 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:45:44.685724  1322 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:45:44.685781  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:45:44.686113  1322 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:45:44.686130  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.686134  1322 net.cpp:165] Memory required for data: 1195521500\nI0821 06:45:44.686143  1322 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:45:44.686153  1322 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:45:44.686159  1322 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:45:44.686167  1322 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:45:44.686179  1322 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:45:44.686208  1322 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:45:44.686218  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.686223  1322 net.cpp:165] Memory required for data: 1199617500\nI0821 06:45:44.686228  1322 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:45:44.686239  1322 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:45:44.686244  1322 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:45:44.686251  1322 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:45:44.686261  1322 net.cpp:150] Setting up L2_b8_relu\nI0821 06:45:44.686269  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.686272  1322 net.cpp:165] Memory required for data: 1203713500\nI0821 06:45:44.686277  1322 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:44.686285  1322 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:44.686290  1322 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:45:44.686300  1322 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:45:44.686322  1322 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:45:44.686369  1322 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:44.686381  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.686388  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.686393  1322 net.cpp:165] Memory required for data: 1211905500\nI0821 06:45:44.686398  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:45:44.686413  1322 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:45:44.686419  1322 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:45:44.686432  1322 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:45:44.686913  1322 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:45:44.686928  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.686933  1322 net.cpp:165] Memory required for data: 1216001500\nI0821 06:45:44.686942  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:45:44.686954  1322 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:45:44.686961  1322 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:45:44.686969  1322 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:45:44.687216  1322 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:45:44.687229  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.687234  1322 net.cpp:165] Memory required for data: 1220097500\nI0821 06:45:44.687252  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:45:44.687260  1322 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:45:44.687268  1322 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:45:44.687278  1322 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.687335  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:45:44.687490  1322 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:45:44.687503  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.687508  1322 net.cpp:165] Memory required for data: 1224193500\nI0821 06:45:44.687517  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:45:44.687525  1322 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:45:44.687531  1322 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:45:44.687539  1322 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.687548  1322 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:45:44.687556  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.687561  1322 net.cpp:165] Memory required for data: 1228289500\nI0821 06:45:44.687564  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:45:44.687578  1322 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:45:44.687585  1322 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:45:44.687597  1322 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:45:44.688072  1322 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:45:44.688086  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.688091  1322 net.cpp:165] Memory required for data: 1232385500\nI0821 06:45:44.688100  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:45:44.688112  1322 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:45:44.688119  1322 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:45:44.688130  1322 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:45:44.688382  1322 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:45:44.688395  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.688400  1322 net.cpp:165] Memory required for data: 1236481500\nI0821 06:45:44.688446  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:45:44.688457  1322 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:45:44.688463  1322 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:45:44.688472  1322 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:45:44.688534  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:45:44.688686  1322 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:45:44.688699  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.688704  1322 net.cpp:165] Memory required for data: 1240577500\nI0821 06:45:44.688714  1322 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:45:44.688725  1322 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:45:44.688731  1322 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:45:44.688738  1322 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:45:44.688747  1322 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:45:44.688777  1322 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:45:44.688787  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.688791  1322 net.cpp:165] Memory required for data: 1244673500\nI0821 06:45:44.688796  1322 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:45:44.688804  1322 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:45:44.688810  1322 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:45:44.688820  1322 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:45:44.688835  1322 net.cpp:150] Setting up L2_b9_relu\nI0821 06:45:44.688843  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.688848  1322 net.cpp:165] Memory required for data: 1248769500\nI0821 06:45:44.688853  1322 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:44.688870  1322 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:44.688876  1322 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:45:44.688886  1322 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:45:44.688897  1322 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:45:44.688946  1322 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:44.688958  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.688966  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.688969  1322 net.cpp:165] Memory required for data: 1256961500\nI0821 06:45:44.688974  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:45:44.688987  1322 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:45:44.688992  1322 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:45:44.689004  1322 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:45:44.689474  1322 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:45:44.689489  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.689493  1322 net.cpp:165] Memory required for data: 1257985500\nI0821 06:45:44.689502  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:45:44.689512  1322 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:45:44.689517  1322 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:45:44.689528  1322 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:45:44.689793  1322 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:45:44.689806  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.689811  1322 net.cpp:165] Memory required for data: 1259009500\nI0821 06:45:44.689821  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:45:44.689836  1322 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:45:44.689842  1322 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:45:44.689851  1322 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.689910  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:45:44.690062  1322 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:45:44.690078  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.690083  1322 net.cpp:165] Memory required for data: 1260033500\nI0821 06:45:44.690093  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:45:44.690100  1322 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:45:44.690106  1322 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:45:44.690114  1322 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.690124  1322 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:45:44.690129  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.690135  1322 net.cpp:165] Memory required for data: 1261057500\nI0821 06:45:44.690138  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:45:44.690152  1322 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:45:44.690158  1322 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:45:44.690167  1322 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:45:44.690641  1322 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:45:44.690655  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.690660  1322 net.cpp:165] Memory required for data: 1262081500\nI0821 06:45:44.690670  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:45:44.690681  1322 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:45:44.690687  1322 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:45:44.690698  1322 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:45:44.690966  1322 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:45:44.690979  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.690984  1322 net.cpp:165] Memory required for data: 1263105500\nI0821 06:45:44.691001  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:45:44.691011  1322 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:45:44.691017  1322 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:45:44.691027  1322 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:45:44.691084  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:45:44.691241  1322 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:45:44.691253  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.691258  1322 net.cpp:165] Memory required for data: 1264129500\nI0821 06:45:44.691267  1322 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:45:44.691279  1322 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:45:44.691287  1322 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:45:44.691294  1322 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:45:44.691331  1322 net.cpp:150] Setting up L3_b1_pool\nI0821 06:45:44.691341  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.691346  1322 net.cpp:165] Memory required for data: 1265153500\nI0821 06:45:44.691351  1322 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:45:44.691359  1322 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:45:44.691365  1322 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:45:44.691372  1322 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:45:44.691382  1322 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:45:44.691413  1322 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:45:44.691423  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.691427  1322 net.cpp:165] Memory required for data: 1266177500\nI0821 06:45:44.691432  1322 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:45:44.691440  1322 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:45:44.691445  1322 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:45:44.691455  1322 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:45:44.691465  1322 net.cpp:150] Setting up L3_b1_relu\nI0821 06:45:44.691473  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.691478  1322 net.cpp:165] Memory required for data: 1267201500\nI0821 06:45:44.691483  1322 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:45:44.691491  1322 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:45:44.691498  1322 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:45:44.692733  1322 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:45:44.692751  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.692756  1322 net.cpp:165] Memory required for data: 1268225500\nI0821 06:45:44.692762  1322 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:45:44.692771  1322 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:45:44.692777  1322 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:45:44.692785  1322 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:45:44.692795  1322 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:45:44.692845  1322 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:45:44.692857  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.692862  1322 net.cpp:165] Memory required for data: 1270273500\nI0821 06:45:44.692867  1322 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:44.692875  1322 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:44.692881  1322 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:45:44.692893  1322 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:45:44.692903  1322 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:45:44.692950  1322 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:44.692961  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.692968  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.692983  1322 net.cpp:165] Memory required for data: 1274369500\nI0821 06:45:44.692988  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:45:44.693003  1322 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:45:44.693009  1322 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:45:44.693019  1322 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:45:44.695029  1322 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:45:44.695046  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.695051  1322 net.cpp:165] Memory required for data: 1276417500\nI0821 06:45:44.695060  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:45:44.695073  1322 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:45:44.695080  1322 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:45:44.695089  1322 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:45:44.695354  1322 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:45:44.695367  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.695372  1322 net.cpp:165] Memory required for data: 1278465500\nI0821 06:45:44.695384  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:45:44.695392  1322 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:45:44.695399  1322 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:45:44.695406  1322 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.695467  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:45:44.695623  1322 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:45:44.695636  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.695641  1322 net.cpp:165] Memory required for data: 1280513500\nI0821 06:45:44.695650  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:45:44.695658  1322 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:45:44.695664  1322 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:45:44.695672  1322 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.695682  1322 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:45:44.695688  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.695693  1322 net.cpp:165] Memory required for data: 1282561500\nI0821 06:45:44.695698  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:45:44.695713  1322 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:45:44.695719  1322 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:45:44.695730  1322 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:45:44.696758  1322 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:45:44.696774  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.696779  1322 net.cpp:165] Memory required for data: 1284609500\nI0821 06:45:44.696787  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:45:44.696799  1322 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:45:44.696806  1322 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:45:44.696815  1322 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:45:44.697085  1322 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:45:44.697098  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.697103  1322 net.cpp:165] Memory required for data: 1286657500\nI0821 06:45:44.697114  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:45:44.697126  1322 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:45:44.697132  1322 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:45:44.697141  1322 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:45:44.697201  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:45:44.697357  1322 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:45:44.697371  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.697376  1322 net.cpp:165] Memory required for data: 1288705500\nI0821 06:45:44.697384  1322 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:45:44.697396  1322 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:45:44.697410  1322 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:45:44.697418  1322 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:45:44.697429  1322 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:45:44.697464  1322 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:45:44.697474  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.697479  1322 net.cpp:165] Memory required for data: 1290753500\nI0821 06:45:44.697484  1322 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:45:44.697496  1322 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:45:44.697502  1322 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:45:44.697510  1322 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:45:44.697520  1322 net.cpp:150] Setting up L3_b2_relu\nI0821 06:45:44.697526  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.697530  1322 net.cpp:165] Memory required for data: 1292801500\nI0821 06:45:44.697535  1322 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:44.697542  1322 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:44.697548  1322 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:45:44.697556  1322 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:45:44.697566  1322 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:45:44.697614  1322 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:44.697625  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.697633  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.697636  1322 net.cpp:165] Memory required for data: 1296897500\nI0821 06:45:44.697643  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:45:44.697656  1322 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:45:44.697662  1322 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:45:44.697672  1322 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:45:44.698698  1322 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:45:44.698712  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.698717  1322 net.cpp:165] Memory required for data: 1298945500\nI0821 06:45:44.698725  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:45:44.698737  1322 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:45:44.698745  1322 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:45:44.698753  1322 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:45:44.699028  1322 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:45:44.699041  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.699046  1322 net.cpp:165] Memory required for data: 1300993500\nI0821 06:45:44.699056  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:45:44.699065  1322 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:45:44.699071  1322 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:45:44.699079  1322 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.699142  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:45:44.699298  1322 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:45:44.699311  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.699316  1322 net.cpp:165] Memory required for data: 1303041500\nI0821 06:45:44.699326  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:45:44.699333  1322 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:45:44.699340  1322 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:45:44.699350  1322 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.699360  1322 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:45:44.699368  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.699379  1322 net.cpp:165] Memory required for data: 1305089500\nI0821 06:45:44.699384  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:45:44.699398  1322 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:45:44.699405  1322 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:45:44.699414  1322 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:45:44.700438  1322 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:45:44.700453  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.700458  1322 net.cpp:165] Memory required for data: 1307137500\nI0821 06:45:44.700466  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:45:44.700479  1322 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:45:44.700485  1322 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:45:44.700494  1322 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:45:44.700775  1322 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:45:44.700788  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.700793  1322 net.cpp:165] Memory required for data: 1309185500\nI0821 06:45:44.700803  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:45:44.700815  1322 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:45:44.700821  1322 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:45:44.700835  1322 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:45:44.700897  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:45:44.701061  1322 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:45:44.701073  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.701078  1322 net.cpp:165] Memory required for data: 1311233500\nI0821 06:45:44.701087  1322 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:45:44.701099  1322 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:45:44.701107  1322 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:45:44.701113  1322 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:45:44.701124  1322 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:45:44.701159  1322 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:45:44.701170  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.701174  1322 net.cpp:165] Memory required for data: 1313281500\nI0821 06:45:44.701180  1322 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:45:44.701191  1322 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:45:44.701197  1322 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:45:44.701205  1322 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:45:44.701215  1322 net.cpp:150] Setting up L3_b3_relu\nI0821 06:45:44.701221  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.701225  1322 net.cpp:165] Memory required for data: 1315329500\nI0821 06:45:44.701231  1322 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:44.701237  1322 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:44.701243  1322 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:45:44.701251  1322 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:45:44.701261  1322 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:45:44.701309  1322 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:44.701320  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.701328  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.701331  1322 net.cpp:165] Memory required for data: 1319425500\nI0821 06:45:44.701336  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:45:44.701350  1322 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:45:44.701357  1322 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:45:44.701366  1322 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:45:44.702394  1322 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:45:44.702409  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.702414  1322 net.cpp:165] Memory required for data: 1321473500\nI0821 06:45:44.702424  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:45:44.702435  1322 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:45:44.702441  1322 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:45:44.702452  1322 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:45:44.702723  1322 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:45:44.702736  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.702741  1322 net.cpp:165] Memory required for data: 1323521500\nI0821 06:45:44.702751  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:45:44.702760  1322 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:45:44.702766  1322 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:45:44.702775  1322 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.702841  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:45:44.703001  1322 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:45:44.703013  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.703018  1322 net.cpp:165] Memory required for data: 1325569500\nI0821 06:45:44.703027  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:45:44.703035  1322 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:45:44.703042  1322 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:45:44.703052  1322 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.703061  1322 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:45:44.703068  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.703073  1322 net.cpp:165] Memory required for data: 1327617500\nI0821 06:45:44.703078  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:45:44.703094  1322 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:45:44.703099  1322 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:45:44.703107  1322 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:45:44.704145  1322 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:45:44.704160  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.704165  1322 net.cpp:165] Memory required for data: 1329665500\nI0821 06:45:44.704174  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:45:44.704186  1322 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:45:44.704193  1322 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:45:44.704201  1322 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:45:44.704475  1322 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:45:44.704488  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.704493  1322 net.cpp:165] Memory required for data: 1331713500\nI0821 06:45:44.704504  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:45:44.704515  1322 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:45:44.704521  1322 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:45:44.704530  1322 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:45:44.704591  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:45:44.704754  1322 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:45:44.704767  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.704772  1322 net.cpp:165] Memory required for data: 1333761500\nI0821 06:45:44.704782  1322 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:45:44.704793  1322 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:45:44.704799  1322 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:45:44.704807  1322 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:45:44.704818  1322 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:45:44.704856  1322 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:45:44.704874  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.704879  1322 net.cpp:165] Memory required for data: 1335809500\nI0821 06:45:44.704885  1322 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:45:44.704895  1322 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:45:44.704901  1322 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:45:44.704908  1322 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:45:44.704918  1322 net.cpp:150] Setting up L3_b4_relu\nI0821 06:45:44.704926  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.704931  1322 net.cpp:165] Memory required for data: 1337857500\nI0821 06:45:44.704934  1322 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:44.704942  1322 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:44.704947  1322 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:45:44.704954  1322 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:45:44.704964  1322 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:45:44.705014  1322 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:44.705026  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.705034  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.705037  1322 net.cpp:165] Memory required for data: 1341953500\nI0821 06:45:44.705042  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:45:44.705057  1322 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:45:44.705063  1322 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:45:44.705072  1322 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:45:44.706101  1322 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:45:44.706117  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.706121  1322 net.cpp:165] Memory required for data: 1344001500\nI0821 06:45:44.706130  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:45:44.706142  1322 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:45:44.706149  1322 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:45:44.706161  1322 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:45:44.707419  1322 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:45:44.707437  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.707442  1322 net.cpp:165] Memory required for data: 1346049500\nI0821 06:45:44.707453  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:45:44.707465  1322 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:45:44.707473  1322 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:45:44.707480  1322 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.707547  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:45:44.707710  1322 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:45:44.707722  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.707727  1322 net.cpp:165] Memory required for data: 1348097500\nI0821 06:45:44.707736  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:45:44.707744  1322 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:45:44.707751  1322 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:45:44.707762  1322 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.707772  1322 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:45:44.707779  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.707783  1322 net.cpp:165] Memory required for data: 1350145500\nI0821 06:45:44.707789  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:45:44.707800  1322 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:45:44.707806  1322 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:45:44.707818  1322 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:45:44.709820  1322 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:45:44.709843  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.709849  1322 net.cpp:165] Memory required for data: 1352193500\nI0821 06:45:44.709858  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:45:44.709872  1322 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:45:44.709878  1322 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:45:44.709887  1322 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:45:44.710150  1322 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:45:44.710165  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.710170  1322 net.cpp:165] Memory required for data: 1354241500\nI0821 06:45:44.710180  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:45:44.710191  1322 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:45:44.710198  1322 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:45:44.710206  1322 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:45:44.710265  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:45:44.710418  1322 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:45:44.710431  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.710436  1322 net.cpp:165] Memory required for data: 1356289500\nI0821 06:45:44.710445  1322 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:45:44.710458  1322 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:45:44.710464  1322 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:45:44.710471  1322 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:45:44.710482  1322 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:45:44.710515  1322 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:45:44.710526  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.710531  1322 net.cpp:165] Memory required for data: 1358337500\nI0821 06:45:44.710536  1322 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:45:44.710547  1322 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:45:44.710553  1322 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:45:44.710561  1322 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:45:44.710571  1322 net.cpp:150] Setting up L3_b5_relu\nI0821 06:45:44.710577  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.710582  1322 net.cpp:165] Memory required for data: 1360385500\nI0821 06:45:44.710587  1322 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:44.710594  1322 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:44.710599  1322 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:45:44.710606  1322 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:45:44.710615  1322 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:45:44.710664  1322 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:44.710675  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.710681  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.710686  1322 net.cpp:165] Memory required for data: 1364481500\nI0821 06:45:44.710691  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:45:44.710705  1322 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:45:44.710711  1322 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:45:44.710721  1322 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:45:44.711735  1322 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:45:44.711750  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.711755  1322 net.cpp:165] Memory required for data: 1366529500\nI0821 06:45:44.711763  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:45:44.711776  1322 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:45:44.711789  1322 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:45:44.711802  1322 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:45:44.712066  1322 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:45:44.712080  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.712085  1322 net.cpp:165] Memory required for data: 1368577500\nI0821 06:45:44.712095  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:45:44.712105  1322 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:45:44.712110  1322 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:45:44.712121  1322 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.712179  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:45:44.712332  1322 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:45:44.712344  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.712349  1322 net.cpp:165] Memory required for data: 1370625500\nI0821 06:45:44.712358  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:45:44.712366  1322 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:45:44.712373  1322 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:45:44.712384  1322 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.712394  1322 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:45:44.712401  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.712406  1322 net.cpp:165] Memory required for data: 1372673500\nI0821 06:45:44.712410  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:45:44.712425  1322 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:45:44.712431  1322 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:45:44.712440  1322 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:45:44.713450  1322 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:45:44.713465  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.713470  1322 net.cpp:165] Memory required for data: 1374721500\nI0821 06:45:44.713479  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:45:44.713491  1322 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:45:44.713498  1322 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:45:44.713507  1322 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:45:44.713764  1322 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:45:44.713778  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.713783  1322 net.cpp:165] Memory required for data: 1376769500\nI0821 06:45:44.713793  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:45:44.713805  1322 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:45:44.713812  1322 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:45:44.713820  1322 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:45:44.713886  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:45:44.714047  1322 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:45:44.714061  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.714066  1322 net.cpp:165] Memory required for data: 1378817500\nI0821 06:45:44.714074  1322 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:45:44.714083  1322 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:45:44.714090  1322 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:45:44.714097  1322 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:45:44.714108  1322 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:45:44.714141  1322 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:45:44.714154  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.714159  1322 net.cpp:165] Memory required for data: 1380865500\nI0821 06:45:44.714164  1322 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:45:44.714174  1322 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:45:44.714180  1322 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:45:44.714193  1322 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:45:44.714205  1322 net.cpp:150] Setting up L3_b6_relu\nI0821 06:45:44.714211  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.714216  1322 net.cpp:165] Memory required for data: 1382913500\nI0821 06:45:44.714221  1322 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:44.714228  1322 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:44.714233  1322 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:45:44.714241  1322 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:45:44.714251  1322 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:45:44.714299  1322 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:44.714311  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.714318  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.714323  1322 net.cpp:165] Memory required for data: 1387009500\nI0821 06:45:44.714328  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:45:44.714341  1322 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:45:44.714347  1322 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:45:44.714356  1322 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:45:44.715368  1322 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:45:44.715382  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.715386  1322 net.cpp:165] Memory required for data: 1389057500\nI0821 06:45:44.715396  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:45:44.715409  1322 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:45:44.715415  1322 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:45:44.715426  1322 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:45:44.715683  1322 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:45:44.715695  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.715700  1322 net.cpp:165] Memory required for data: 1391105500\nI0821 06:45:44.715710  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:45:44.715719  1322 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:45:44.715725  1322 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:45:44.715736  1322 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.715793  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:45:44.715951  1322 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:45:44.715965  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.715970  1322 net.cpp:165] Memory required for data: 1393153500\nI0821 06:45:44.715978  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:45:44.716013  1322 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:45:44.716022  1322 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:45:44.716030  1322 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.716042  1322 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:45:44.716048  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.716053  1322 net.cpp:165] Memory required for data: 1395201500\nI0821 06:45:44.716058  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:45:44.716073  1322 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:45:44.716078  1322 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:45:44.716087  1322 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:45:44.717126  1322 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:45:44.717140  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.717145  1322 net.cpp:165] Memory required for data: 1397249500\nI0821 06:45:44.717154  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:45:44.717166  1322 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:45:44.717180  1322 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:45:44.717191  1322 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:45:44.717453  1322 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:45:44.717468  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.717471  1322 net.cpp:165] Memory required for data: 1399297500\nI0821 06:45:44.717483  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:45:44.717491  1322 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:45:44.717499  1322 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:45:44.717509  1322 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:45:44.717567  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:45:44.717721  1322 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:45:44.717734  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.717738  1322 net.cpp:165] Memory required for data: 1401345500\nI0821 06:45:44.717747  1322 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:45:44.717756  1322 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:45:44.717762  1322 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:45:44.717769  1322 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:45:44.717780  1322 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:45:44.717813  1322 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:45:44.717826  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.717838  1322 net.cpp:165] Memory required for data: 1403393500\nI0821 06:45:44.717842  1322 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:45:44.717851  1322 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:45:44.717856  1322 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:45:44.717864  1322 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:45:44.717874  1322 net.cpp:150] Setting up L3_b7_relu\nI0821 06:45:44.717880  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.717885  1322 net.cpp:165] Memory required for data: 1405441500\nI0821 06:45:44.717890  1322 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:44.717901  1322 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:44.717907  1322 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:45:44.717914  1322 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:45:44.717924  1322 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:45:44.717973  1322 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:44.717985  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.717993  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.717996  1322 net.cpp:165] Memory required for data: 1409537500\nI0821 06:45:44.718001  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:45:44.718013  1322 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:45:44.718019  1322 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:45:44.718030  1322 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:45:44.719049  1322 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:45:44.719063  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.719069  1322 net.cpp:165] Memory required for data: 1411585500\nI0821 06:45:44.719077  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:45:44.719086  1322 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:45:44.719092  1322 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:45:44.719105  1322 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:45:44.719373  1322 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:45:44.719389  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.719394  1322 net.cpp:165] Memory required for data: 1413633500\nI0821 06:45:44.719411  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:45:44.719420  1322 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:45:44.719426  1322 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:45:44.719434  1322 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.719496  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:45:44.719653  1322 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:45:44.719666  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.719671  1322 net.cpp:165] Memory required for data: 1415681500\nI0821 06:45:44.719679  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:45:44.719691  1322 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:45:44.719696  1322 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:45:44.719704  1322 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.719713  1322 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:45:44.719720  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.719725  1322 net.cpp:165] Memory required for data: 1417729500\nI0821 06:45:44.719730  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:45:44.719744  1322 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:45:44.719750  1322 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:45:44.719758  1322 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:45:44.720777  1322 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:45:44.720793  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.720798  1322 net.cpp:165] Memory required for data: 1419777500\nI0821 06:45:44.720806  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:45:44.720820  1322 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:45:44.720826  1322 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:45:44.720844  1322 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:45:44.721105  1322 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:45:44.721118  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.721123  1322 net.cpp:165] Memory required for data: 1421825500\nI0821 06:45:44.721134  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:45:44.721143  1322 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:45:44.721148  1322 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:45:44.721159  1322 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:45:44.721217  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:45:44.721375  1322 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:45:44.721387  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.721392  1322 net.cpp:165] Memory required for data: 1423873500\nI0821 06:45:44.721401  1322 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:45:44.721410  1322 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:45:44.721417  1322 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:45:44.721424  1322 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:45:44.721436  1322 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:45:44.721472  1322 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:45:44.721482  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.721487  1322 net.cpp:165] Memory required for data: 1425921500\nI0821 06:45:44.721493  1322 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:45:44.721500  1322 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:45:44.721505  1322 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:45:44.721515  1322 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:45:44.721525  1322 net.cpp:150] Setting up L3_b8_relu\nI0821 06:45:44.721532  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.721537  1322 net.cpp:165] Memory required for data: 1427969500\nI0821 06:45:44.721541  1322 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:44.721555  1322 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:44.721561  1322 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:45:44.721570  1322 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:45:44.721578  1322 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:45:44.721628  1322 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:44.721639  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.721647  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.721650  1322 net.cpp:165] Memory required for data: 1432065500\nI0821 06:45:44.721655  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:45:44.721666  1322 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:45:44.721673  1322 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:45:44.721684  1322 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:45:44.723685  1322 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:45:44.723702  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.723707  1322 net.cpp:165] Memory required for data: 1434113500\nI0821 06:45:44.723717  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:45:44.723731  1322 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:45:44.723737  1322 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:45:44.723745  1322 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:45:44.724017  1322 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:45:44.724030  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.724035  1322 net.cpp:165] Memory required for data: 1436161500\nI0821 06:45:44.724046  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:45:44.724061  1322 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:45:44.724066  1322 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:45:44.724074  1322 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.724134  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:45:44.724294  1322 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:45:44.724306  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.724311  1322 net.cpp:165] Memory required for data: 1438209500\nI0821 06:45:44.724321  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:45:44.724333  1322 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:45:44.724339  1322 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:45:44.724345  1322 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.724355  1322 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:45:44.724362  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.724367  1322 net.cpp:165] Memory required for data: 1440257500\nI0821 06:45:44.724372  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:45:44.724386  1322 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:45:44.724392  1322 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:45:44.724403  1322 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:45:44.725430  1322 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:45:44.725445  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.725450  1322 net.cpp:165] Memory required for data: 1442305500\nI0821 06:45:44.725459  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:45:44.725468  1322 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:45:44.725476  1322 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:45:44.725486  1322 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:45:44.725750  1322 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:45:44.725765  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.725771  1322 net.cpp:165] Memory required for data: 1444353500\nI0821 06:45:44.725790  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:45:44.725798  1322 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:45:44.725805  1322 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:45:44.725812  1322 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:45:44.725878  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:45:44.726035  1322 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:45:44.726048  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.726053  1322 net.cpp:165] Memory required for data: 1446401500\nI0821 06:45:44.726063  1322 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:45:44.726074  1322 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:45:44.726081  1322 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:45:44.726089  1322 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:45:44.726096  1322 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:45:44.726135  1322 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:45:44.726147  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.726151  1322 net.cpp:165] Memory required for data: 1448449500\nI0821 06:45:44.726157  1322 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:45:44.726164  1322 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:45:44.726171  1322 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:45:44.726177  1322 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:45:44.726187  1322 net.cpp:150] Setting up L3_b9_relu\nI0821 06:45:44.726194  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.726198  1322 net.cpp:165] Memory required for data: 1450497500\nI0821 06:45:44.726203  1322 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:45:44.726212  1322 net.cpp:100] Creating Layer post_pool\nI0821 06:45:44.726217  1322 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:45:44.726228  1322 net.cpp:408] post_pool -> post_pool\nI0821 06:45:44.726263  1322 net.cpp:150] Setting up post_pool\nI0821 06:45:44.726274  1322 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:45:44.726279  1322 net.cpp:165] Memory required for data: 1450529500\nI0821 06:45:44.726284  1322 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:45:44.726380  1322 net.cpp:100] Creating Layer post_FC\nI0821 06:45:44.726393  1322 net.cpp:434] post_FC <- post_pool\nI0821 06:45:44.726408  1322 net.cpp:408] post_FC -> post_FC_top\nI0821 06:45:44.726670  1322 net.cpp:150] Setting up post_FC\nI0821 06:45:44.726686  1322 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:44.726691  1322 net.cpp:165] Memory required for data: 1450534500\nI0821 06:45:44.726699  1322 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:45:44.726711  1322 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:45:44.726718  1322 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:45:44.726727  1322 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:45:44.726737  1322 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:45:44.726789  1322 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:45:44.726801  1322 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:44.726809  1322 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:44.726814  1322 net.cpp:165] Memory required for data: 1450544500\nI0821 06:45:44.726819  1322 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:45:44.726871  1322 net.cpp:100] Creating Layer accuracy\nI0821 06:45:44.726883  1322 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:45:44.726891  1322 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:45:44.726899  1322 net.cpp:408] accuracy -> accuracy\nI0821 06:45:44.726946  1322 net.cpp:150] Setting up accuracy\nI0821 06:45:44.726959  1322 net.cpp:157] Top shape: (1)\nI0821 06:45:44.726964  1322 net.cpp:165] Memory required for data: 1450544504\nI0821 06:45:44.726969  1322 layer_factory.hpp:77] Creating layer loss\nI0821 06:45:44.726989  1322 net.cpp:100] Creating Layer loss\nI0821 06:45:44.726995  1322 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:45:44.727003  1322 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:45:44.727010  1322 net.cpp:408] loss -> loss\nI0821 06:45:44.728176  1322 layer_factory.hpp:77] Creating layer loss\nI0821 06:45:44.728346  1322 net.cpp:150] Setting up loss\nI0821 06:45:44.728361  1322 net.cpp:157] Top shape: (1)\nI0821 06:45:44.728368  1322 net.cpp:160]     with loss weight 1\nI0821 06:45:44.728448  1322 net.cpp:165] Memory required for data: 1450544508\nI0821 06:45:44.728456  1322 net.cpp:226] loss needs backward computation.\nI0821 06:45:44.728463  1322 net.cpp:228] accuracy does not need backward computation.\nI0821 06:45:44.728469  1322 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:45:44.728474  1322 net.cpp:226] post_FC needs backward computation.\nI0821 06:45:44.728479  1322 net.cpp:226] post_pool needs backward computation.\nI0821 06:45:44.728484  1322 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:45:44.728489  1322 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:45:44.728494  1322 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:45:44.728499  1322 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:45:44.728505  1322 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:45:44.728510  1322 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:45:44.728514  1322 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:45:44.728519  1322 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:45:44.728524  1322 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:45:44.728529  1322 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:45:44.728534  1322 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:45:44.728539  1322 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:45:44.728545  1322 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:45:44.728549  1322 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:45:44.728554  1322 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:45:44.728564  1322 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:45:44.728569  1322 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:45:44.728574  1322 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:45:44.728579  1322 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:45:44.728585  1322 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:45:44.728590  1322 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:45:44.728595  1322 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:45:44.728601  1322 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:45:44.728606  1322 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:45:44.728611  1322 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:45:44.728616  1322 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:45:44.728621  1322 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:45:44.728626  1322 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:45:44.728631  1322 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:45:44.728636  1322 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:45:44.728641  1322 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:45:44.728646  1322 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:45:44.728652  1322 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:45:44.728657  1322 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:45:44.728662  1322 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:45:44.728667  1322 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:45:44.728679  1322 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:45:44.728684  1322 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:45:44.728690  1322 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:45:44.728695  1322 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:45:44.728700  1322 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:45:44.728705  1322 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:45:44.728711  1322 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:45:44.728716  1322 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:45:44.728721  1322 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:45:44.728726  1322 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:45:44.728731  1322 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:45:44.728736  1322 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:45:44.728741  1322 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:45:44.728747  1322 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:45:44.728752  1322 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:45:44.728757  1322 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:45:44.728762  1322 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:45:44.728767  1322 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:45:44.728773  1322 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:45:44.728778  1322 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:45:44.728783  1322 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:45:44.728787  1322 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:45:44.728793  1322 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:45:44.728798  1322 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:45:44.728803  1322 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:45:44.728808  1322 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:45:44.728814  1322 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:45:44.728819  1322 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:45:44.728824  1322 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:45:44.728834  1322 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:45:44.728840  1322 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:45:44.728847  1322 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:45:44.728852  1322 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:45:44.728860  1322 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:45:44.728865  1322 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:45:44.728871  1322 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:45:44.728878  1322 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:45:44.728883  1322 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:45:44.728888  1322 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:45:44.728893  1322 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:45:44.728899  1322 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:45:44.728904  1322 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:45:44.728909  1322 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:45:44.728914  1322 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:45:44.728919  1322 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:45:44.728926  1322 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:45:44.728930  1322 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:45:44.728941  1322 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:45:44.728948  1322 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:45:44.728953  1322 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:45:44.728958  1322 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:45:44.728965  1322 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:45:44.728970  1322 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:45:44.728974  1322 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:45:44.728979  1322 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:45:44.728986  1322 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:45:44.728991  1322 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:45:44.728996  1322 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:45:44.729001  1322 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:45:44.729007  1322 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:45:44.729012  1322 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:45:44.729017  1322 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:45:44.729022  1322 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:45:44.729028  1322 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:45:44.729033  1322 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:45:44.729038  1322 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:45:44.729044  1322 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:45:44.729049  1322 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:45:44.729054  1322 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:45:44.729060  1322 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:45:44.729065  1322 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:45:44.729071  1322 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:45:44.729076  1322 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:45:44.729081  1322 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:45:44.729086  1322 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:45:44.729091  1322 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:45:44.729097  1322 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:45:44.729102  1322 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:45:44.729107  1322 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:45:44.729113  1322 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:45:44.729118  1322 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:45:44.729125  1322 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:45:44.729130  1322 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:45:44.729135  1322 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:45:44.729140  1322 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:45:44.729145  1322 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:45:44.729151  1322 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:45:44.729156  1322 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:45:44.729161  1322 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:45:44.729166  1322 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:45:44.729171  1322 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:45:44.729177  1322 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:45:44.729182  1322 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:45:44.729187  1322 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:45:44.729197  1322 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:45:44.729203  1322 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:45:44.729209  1322 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:45:44.729214  1322 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:45:44.729219  1322 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:45:44.729228  1322 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:45:44.729234  1322 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:45:44.729239  1322 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:45:44.729245  1322 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:45:44.729250  1322 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:45:44.729255  1322 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:45:44.729261  1322 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:45:44.729266  1322 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:45:44.729272  1322 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:45:44.729277  1322 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:45:44.729284  1322 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:45:44.729288  1322 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:45:44.729295  1322 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:45:44.729300  1322 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:45:44.729305  1322 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:45:44.729310  1322 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:45:44.729315  1322 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:45:44.729321  1322 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:45:44.729326  1322 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:45:44.729332  1322 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:45:44.729338  1322 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:45:44.729343  1322 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:45:44.729349  1322 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:45:44.729354  1322 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:45:44.729359  1322 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:45:44.729364  1322 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:45:44.729370  1322 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:45:44.729375  1322 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:45:44.729382  1322 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:45:44.729387  1322 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:45:44.729393  1322 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:45:44.729398  1322 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:45:44.729404  1322 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:45:44.729409  1322 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:45:44.729414  1322 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:45:44.729419  1322 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:45:44.729425  1322 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:45:44.729430  1322 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:45:44.729436  1322 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:45:44.729444  1322 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:45:44.729449  1322 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:45:44.729454  1322 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:45:44.729465  1322 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:45:44.729470  1322 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:45:44.729476  1322 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:45:44.729482  1322 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:45:44.729488  1322 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:45:44.729493  1322 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:45:44.729499  1322 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:45:44.729504  1322 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:45:44.729511  1322 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:45:44.729516  1322 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:45:44.729522  1322 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:45:44.729528  1322 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:45:44.729533  1322 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:45:44.729539  1322 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:45:44.729544  1322 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:45:44.729549  1322 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:45:44.729554  1322 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:45:44.729560  1322 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:45:44.729567  1322 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:45:44.729571  1322 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:45:44.729578  1322 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:45:44.729583  1322 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:45:44.729588  1322 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:45:44.729594  1322 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:45:44.729599  1322 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:45:44.729605  1322 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:45:44.729610  1322 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:45:44.729616  1322 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:45:44.729621  1322 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:45:44.729627  1322 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:45:44.729634  1322 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:45:44.729640  1322 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:45:44.729645  1322 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:45:44.729650  1322 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:45:44.729656  1322 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:45:44.729661  1322 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:45:44.729667  1322 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:45:44.729672  1322 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:45:44.729678  1322 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:45:44.729683  1322 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:45:44.729689  1322 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:45:44.729696  1322 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:45:44.729701  1322 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:45:44.729707  1322 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:45:44.729713  1322 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:45:44.729718  1322 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:45:44.729723  1322 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:45:44.729734  1322 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:45:44.729740  1322 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:45:44.729746  1322 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:45:44.729753  1322 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:45:44.729758  1322 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:45:44.729763  1322 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:45:44.729769  1322 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:45:44.729775  1322 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:45:44.729780  1322 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:45:44.729786  1322 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:45:44.729791  1322 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:45:44.729797  1322 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:45:44.729804  1322 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:45:44.729809  1322 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:45:44.729815  1322 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:45:44.729821  1322 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:45:44.729826  1322 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:45:44.729837  1322 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:45:44.729843  1322 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:45:44.729848  1322 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:45:44.729854  1322 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:45:44.729861  1322 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:45:44.729866  1322 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:45:44.729872  1322 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:45:44.729878  1322 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:45:44.729883  1322 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:45:44.729889  1322 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:45:44.729894  1322 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:45:44.729900  1322 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:45:44.729905  1322 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:45:44.729914  1322 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:45:44.729920  1322 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:45:44.729926  1322 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:45:44.729933  1322 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:45:44.729938  1322 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:45:44.729944  1322 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:45:44.729950  1322 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:45:44.729955  1322 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:45:44.729961  1322 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:45:44.729966  1322 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:45:44.729972  1322 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:45:44.729979  1322 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:45:44.729984  1322 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:45:44.729990  1322 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:45:44.729996  1322 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:45:44.730002  1322 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:45:44.730008  1322 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:45:44.730020  1322 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:45:44.730024  1322 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:45:44.730031  1322 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:45:44.730036  1322 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:45:44.730042  1322 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:45:44.730047  1322 net.cpp:226] pre_relu needs backward computation.\nI0821 06:45:44.730052  1322 net.cpp:226] pre_scale needs backward computation.\nI0821 06:45:44.730057  1322 net.cpp:226] pre_bn needs backward computation.\nI0821 06:45:44.730063  1322 net.cpp:226] pre_conv needs backward computation.\nI0821 06:45:44.730070  1322 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:45:44.730077  1322 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:45:44.730080  1322 net.cpp:270] This network produces output accuracy\nI0821 06:45:44.730087  1322 net.cpp:270] This network produces output loss\nI0821 06:45:44.730451  1322 net.cpp:283] Network initialization done.\nI0821 06:45:44.740183  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:44.740226  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:44.740288  1322 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 06:45:44.740675  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 06:45:44.740694  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 06:45:44.740705  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:45:44.740713  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:45:44.740723  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:45:44.740732  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:45:44.740741  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:45:44.740749  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:45:44.740758  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:45:44.740767  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:45:44.740777  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:45:44.740784  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:45:44.740793  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:45:44.740803  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:45:44.740811  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:45:44.740819  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:45:44.740836  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:45:44.740846  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:45:44.740855  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:45:44.740875  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:45:44.740885  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:45:44.740892  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:45:44.740905  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:45:44.740913  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:45:44.740922  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:45:44.740931  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:45:44.740938  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:45:44.740947  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:45:44.740955  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:45:44.740963  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:45:44.740973  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:45:44.740981  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:45:44.740990  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:45:44.740998  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:45:44.741008  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:45:44.741015  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:45:44.741024  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:45:44.741032  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:45:44.741041  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:45:44.741050  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:45:44.741060  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:45:44.741070  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:45:44.741077  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:45:44.741086  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:45:44.741094  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:45:44.741102  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:45:44.741111  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:45:44.741118  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:45:44.741127  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:45:44.741135  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:45:44.741152  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:45:44.741160  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:45:44.741169  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:45:44.741178  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:45:44.741186  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:45:44.741194  1322 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:45:44.742833  1322 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0821 06:45:44.744611  1322 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:45:44.744880  1322 net.cpp:100] Creating Layer dataLayer\nI0821 06:45:44.744899  1322 net.cpp:408] dataLayer -> data_top\nI0821 06:45:44.744918  1322 net.cpp:408] dataLayer -> label\nI0821 06:45:44.744930  1322 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:45:44.757446  1329 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 06:45:44.757728  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:44.764966  1322 net.cpp:150] Setting up dataLayer\nI0821 06:45:44.764988  1322 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:45:44.764997  1322 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:44.765002  1322 net.cpp:165] Memory required for data: 1536500\nI0821 06:45:44.765007  1322 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:45:44.765035  1322 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:45:44.765045  1322 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:45:44.765054  1322 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:45:44.765066  1322 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:45:44.765213  1322 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:45:44.765233  1322 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:44.765239  1322 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:44.765244  1322 net.cpp:165] Memory required for data: 1537500\nI0821 06:45:44.765250  1322 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:45:44.765278  1322 net.cpp:100] Creating Layer pre_conv\nI0821 06:45:44.765286  1322 net.cpp:434] pre_conv <- data_top\nI0821 06:45:44.765298  1322 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:45:44.765733  1322 net.cpp:150] Setting up pre_conv\nI0821 06:45:44.765759  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.765765  1322 net.cpp:165] Memory required for data: 9729500\nI0821 06:45:44.765781  1322 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:45:44.765791  1322 net.cpp:100] Creating Layer pre_bn\nI0821 06:45:44.765796  1322 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:45:44.765808  1322 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:45:44.766121  1322 net.cpp:150] Setting up pre_bn\nI0821 06:45:44.766135  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.766140  1322 net.cpp:165] Memory required for data: 17921500\nI0821 06:45:44.766156  1322 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:45:44.766189  1322 net.cpp:100] Creating Layer pre_scale\nI0821 06:45:44.766198  1322 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:45:44.766207  1322 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:45:44.766276  1322 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:45:44.766523  1322 net.cpp:150] Setting up pre_scale\nI0821 06:45:44.766537  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.766542  1322 net.cpp:165] Memory required for data: 26113500\nI0821 06:45:44.766552  1322 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:45:44.766563  1322 net.cpp:100] Creating Layer pre_relu\nI0821 06:45:44.766569  1322 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:45:44.766579  1322 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:45:44.766590  1322 net.cpp:150] Setting up pre_relu\nI0821 06:45:44.766602  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.766605  1322 net.cpp:165] Memory required for data: 34305500\nI0821 06:45:44.766613  1322 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:45:44.766620  1322 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:45:44.766625  1322 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:45:44.766633  1322 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:45:44.766643  1322 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:45:44.766698  1322 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:45:44.766710  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.766717  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.766721  1322 net.cpp:165] Memory required for data: 50689500\nI0821 06:45:44.766726  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:45:44.766747  1322 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:45:44.766757  1322 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:45:44.766767  1322 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:45:44.767168  1322 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:45:44.767182  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.767187  1322 net.cpp:165] Memory required for data: 58881500\nI0821 06:45:44.767202  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:45:44.767220  1322 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:45:44.767225  1322 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:45:44.767235  1322 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:45:44.767557  1322 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:45:44.767573  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.767578  1322 net.cpp:165] Memory required for data: 67073500\nI0821 06:45:44.767590  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:45:44.767598  1322 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:45:44.767603  1322 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:45:44.767616  1322 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.767688  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:45:44.767881  1322 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:45:44.767895  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.767901  1322 net.cpp:165] Memory required for data: 75265500\nI0821 06:45:44.767921  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:45:44.767933  1322 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:45:44.767940  1322 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:45:44.767947  1322 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.767962  1322 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:45:44.767969  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.767974  1322 net.cpp:165] Memory required for data: 83457500\nI0821 06:45:44.767979  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:45:44.767993  1322 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:45:44.767999  1322 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:45:44.768010  1322 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:45:44.768607  1322 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:45:44.768622  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.768627  1322 net.cpp:165] Memory required for data: 91649500\nI0821 06:45:44.768636  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:45:44.768648  1322 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:45:44.768654  1322 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:45:44.768666  1322 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:45:44.769001  1322 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:45:44.769016  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.769021  1322 net.cpp:165] Memory required for data: 99841500\nI0821 06:45:44.769037  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:45:44.769045  1322 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:45:44.769054  1322 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:45:44.769067  1322 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:45:44.769136  1322 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:45:44.769326  1322 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:45:44.769343  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.769349  1322 net.cpp:165] Memory required for data: 108033500\nI0821 06:45:44.769358  1322 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:45:44.769371  1322 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:45:44.769379  1322 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:45:44.769387  1322 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:45:44.769394  1322 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:45:44.769434  1322 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:45:44.769448  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.769453  1322 net.cpp:165] Memory required for data: 116225500\nI0821 06:45:44.769457  1322 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:45:44.769464  1322 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:45:44.769470  1322 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:45:44.769480  1322 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:45:44.769490  1322 net.cpp:150] Setting up L1_b1_relu\nI0821 06:45:44.769498  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.769502  1322 net.cpp:165] Memory required for data: 124417500\nI0821 06:45:44.769508  1322 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:44.769520  1322 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:44.769525  1322 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:45:44.769536  1322 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:45:44.769546  1322 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:45:44.769599  1322 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:44.769613  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.769630  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.769635  1322 net.cpp:165] Memory required for data: 140801500\nI0821 06:45:44.769644  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:45:44.769659  1322 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:45:44.769665  1322 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:45:44.769675  1322 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:45:44.770097  1322 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:45:44.770114  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.770119  1322 net.cpp:165] Memory required for data: 148993500\nI0821 06:45:44.770128  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:45:44.770140  1322 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:45:44.770149  1322 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:45:44.770162  1322 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:45:44.770483  1322 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:45:44.770499  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.770505  1322 net.cpp:165] Memory required for data: 157185500\nI0821 06:45:44.770515  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:45:44.770524  1322 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:45:44.770532  1322 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:45:44.770545  1322 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.770612  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:45:44.770794  1322 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:45:44.770813  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.770818  1322 net.cpp:165] Memory required for data: 165377500\nI0821 06:45:44.770828  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:45:44.770840  1322 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:45:44.770846  1322 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:45:44.770854  1322 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.770866  1322 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:45:44.770874  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.770879  1322 net.cpp:165] Memory required for data: 173569500\nI0821 06:45:44.770884  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:45:44.770900  1322 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:45:44.770910  1322 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:45:44.770923  1322 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:45:44.771512  1322 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:45:44.771528  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.771536  1322 net.cpp:165] Memory required for data: 181761500\nI0821 06:45:44.771545  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:45:44.771559  1322 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:45:44.771565  1322 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:45:44.771580  1322 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:45:44.771905  1322 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:45:44.771922  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.771929  1322 net.cpp:165] Memory required for data: 189953500\nI0821 06:45:44.771945  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:45:44.771960  1322 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:45:44.771967  1322 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:45:44.771976  1322 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:45:44.772050  1322 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:45:44.772233  1322 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:45:44.772246  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.772251  1322 net.cpp:165] Memory required for data: 198145500\nI0821 06:45:44.772271  1322 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:45:44.772284  1322 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:45:44.772291  1322 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:45:44.772297  1322 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:45:44.772310  1322 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:45:44.772349  1322 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:45:44.772362  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.772367  1322 net.cpp:165] Memory required for data: 206337500\nI0821 06:45:44.772373  1322 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:45:44.772382  1322 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:45:44.772392  1322 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:45:44.772403  1322 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:45:44.772411  1322 net.cpp:150] Setting up L1_b2_relu\nI0821 06:45:44.772419  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.772424  1322 net.cpp:165] Memory required for data: 214529500\nI0821 06:45:44.772428  1322 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:44.772439  1322 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:44.772446  1322 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:45:44.772456  1322 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:45:44.772466  1322 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:45:44.772523  1322 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:44.772536  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.772542  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.772547  1322 net.cpp:165] Memory required for data: 230913500\nI0821 06:45:44.772552  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:45:44.772562  1322 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:45:44.772568  1322 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:45:44.772583  1322 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:45:44.772999  1322 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:45:44.773012  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.773018  1322 net.cpp:165] Memory required for data: 239105500\nI0821 06:45:44.773030  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:45:44.773041  1322 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:45:44.773046  1322 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:45:44.773054  1322 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:45:44.773363  1322 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:45:44.773377  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.773382  1322 net.cpp:165] Memory required for data: 247297500\nI0821 06:45:44.773396  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:45:44.773408  1322 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:45:44.773414  1322 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:45:44.773422  1322 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.773502  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:45:44.773696  1322 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:45:44.773718  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.773723  1322 net.cpp:165] Memory required for data: 255489500\nI0821 06:45:44.773732  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:45:44.773741  1322 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:45:44.773746  1322 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:45:44.773757  1322 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.773775  1322 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:45:44.773782  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.773790  1322 net.cpp:165] Memory required for data: 263681500\nI0821 06:45:44.773795  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:45:44.773809  1322 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:45:44.773816  1322 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:45:44.773833  1322 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:45:44.774412  1322 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:45:44.774427  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.774432  1322 net.cpp:165] Memory required for data: 271873500\nI0821 06:45:44.774442  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:45:44.774463  1322 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:45:44.774472  1322 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:45:44.774482  1322 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:45:44.774794  1322 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:45:44.774811  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.774816  1322 net.cpp:165] Memory required for data: 280065500\nI0821 06:45:44.774827  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:45:44.774839  1322 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:45:44.774845  1322 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:45:44.774858  1322 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:45:44.774927  1322 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:45:44.775130  1322 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:45:44.775143  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.775148  1322 net.cpp:165] Memory required for data: 288257500\nI0821 06:45:44.775161  1322 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:45:44.775171  1322 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:45:44.775177  1322 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:45:44.775183  1322 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:45:44.775197  1322 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:45:44.775236  1322 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:45:44.775249  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.775254  1322 net.cpp:165] Memory required for data: 296449500\nI0821 06:45:44.775260  1322 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:45:44.775267  1322 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:45:44.775272  1322 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:45:44.775280  1322 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:45:44.775291  1322 net.cpp:150] Setting up L1_b3_relu\nI0821 06:45:44.775300  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.775305  1322 net.cpp:165] Memory required for data: 304641500\nI0821 06:45:44.775310  1322 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:44.775319  1322 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:44.775324  1322 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:45:44.775332  1322 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:45:44.775341  1322 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:45:44.775393  1322 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:44.775405  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.775413  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.775416  1322 net.cpp:165] Memory required for data: 321025500\nI0821 06:45:44.775421  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:45:44.775432  1322 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:45:44.775445  1322 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:45:44.775457  1322 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:45:44.775845  1322 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:45:44.775861  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.775866  1322 net.cpp:165] Memory required for data: 329217500\nI0821 06:45:44.775876  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:45:44.775884  1322 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:45:44.775890  1322 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:45:44.775898  1322 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:45:44.776170  1322 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:45:44.776183  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.776188  1322 net.cpp:165] Memory required for data: 337409500\nI0821 06:45:44.776198  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:45:44.776207  1322 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:45:44.776212  1322 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:45:44.776226  1322 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.776285  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:45:44.776449  1322 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:45:44.776464  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.776469  1322 net.cpp:165] Memory required for data: 345601500\nI0821 06:45:44.776479  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:45:44.776486  1322 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:45:44.776492  1322 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:45:44.776499  1322 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.776510  1322 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:45:44.776516  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.776520  1322 net.cpp:165] Memory required for data: 353793500\nI0821 06:45:44.776525  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:45:44.776538  1322 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:45:44.776545  1322 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:45:44.776556  1322 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:45:44.776916  1322 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:45:44.776932  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.776937  1322 net.cpp:165] Memory required for data: 361985500\nI0821 06:45:44.776944  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:45:44.776957  1322 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:45:44.776963  1322 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:45:44.776974  1322 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:45:44.777246  1322 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:45:44.777261  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.777264  1322 net.cpp:165] Memory required for data: 370177500\nI0821 06:45:44.777276  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:45:44.777287  1322 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:45:44.777292  1322 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:45:44.777300  1322 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:45:44.777359  1322 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:45:44.777518  1322 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:45:44.777530  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.777535  1322 net.cpp:165] Memory required for data: 378369500\nI0821 06:45:44.777544  1322 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:45:44.777552  1322 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:45:44.777559  1322 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:45:44.777565  1322 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:45:44.777583  1322 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:45:44.777618  1322 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:45:44.777632  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.777637  1322 net.cpp:165] Memory required for data: 386561500\nI0821 06:45:44.777642  1322 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:45:44.777649  1322 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:45:44.777655  1322 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:45:44.777662  1322 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:45:44.777671  1322 net.cpp:150] Setting up L1_b4_relu\nI0821 06:45:44.777678  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.777683  1322 net.cpp:165] Memory required for data: 394753500\nI0821 06:45:44.777688  1322 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:44.777699  1322 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:44.777705  1322 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:45:44.777714  1322 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:45:44.777724  1322 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:45:44.777772  1322 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:44.777784  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.777791  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.777796  1322 net.cpp:165] Memory required for data: 411137500\nI0821 06:45:44.777801  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:45:44.777811  1322 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:45:44.777817  1322 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:45:44.777835  1322 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:45:44.778190  1322 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:45:44.778205  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.778209  1322 net.cpp:165] Memory required for data: 419329500\nI0821 06:45:44.778229  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:45:44.778241  1322 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:45:44.778249  1322 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:45:44.778256  1322 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:45:44.778528  1322 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:45:44.778542  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.778547  1322 net.cpp:165] Memory required for data: 427521500\nI0821 06:45:44.778558  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:45:44.778565  1322 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:45:44.778571  1322 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:45:44.778583  1322 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.778640  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:45:44.778797  1322 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:45:44.778810  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.778815  1322 net.cpp:165] Memory required for data: 435713500\nI0821 06:45:44.778825  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:45:44.778838  1322 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:45:44.778846  1322 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:45:44.778852  1322 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.778865  1322 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:45:44.778873  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.778877  1322 net.cpp:165] Memory required for data: 443905500\nI0821 06:45:44.778882  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:45:44.778899  1322 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:45:44.778908  1322 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:45:44.778918  1322 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:45:44.779269  1322 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:45:44.779284  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.779289  1322 net.cpp:165] Memory required for data: 452097500\nI0821 06:45:44.779299  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:45:44.779326  1322 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:45:44.779335  1322 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:45:44.779347  1322 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:45:44.779623  1322 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:45:44.779636  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.779641  1322 net.cpp:165] Memory required for data: 460289500\nI0821 06:45:44.779652  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:45:44.779660  1322 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:45:44.779666  1322 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:45:44.779677  1322 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:45:44.779737  1322 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:45:44.779902  1322 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:45:44.779919  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.779924  1322 net.cpp:165] Memory required for data: 468481500\nI0821 06:45:44.779933  1322 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:45:44.779942  1322 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:45:44.779948  1322 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:45:44.779955  1322 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:45:44.779963  1322 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:45:44.780000  1322 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:45:44.780011  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.780016  1322 net.cpp:165] Memory required for data: 476673500\nI0821 06:45:44.780021  1322 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:45:44.780032  1322 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:45:44.780038  1322 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:45:44.780045  1322 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:45:44.780055  1322 net.cpp:150] Setting up L1_b5_relu\nI0821 06:45:44.780061  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.780066  1322 net.cpp:165] Memory required for data: 484865500\nI0821 06:45:44.780071  1322 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:44.780077  1322 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:44.780082  1322 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:45:44.780094  1322 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:45:44.780104  1322 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:45:44.780151  1322 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:44.780165  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.780172  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.780177  1322 net.cpp:165] Memory required for data: 501249500\nI0821 06:45:44.780182  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:45:44.780192  1322 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:45:44.780199  1322 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:45:44.780207  1322 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:45:44.780570  1322 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:45:44.780585  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.780596  1322 net.cpp:165] Memory required for data: 509441500\nI0821 06:45:44.780606  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:45:44.780617  1322 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:45:44.780624  1322 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:45:44.780632  1322 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:45:44.780915  1322 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:45:44.780927  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.780932  1322 net.cpp:165] Memory required for data: 517633500\nI0821 06:45:44.780942  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:45:44.780951  1322 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:45:44.780956  1322 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:45:44.780969  1322 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.781030  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:45:44.781208  1322 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:45:44.781225  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.781230  1322 net.cpp:165] Memory required for data: 525825500\nI0821 06:45:44.781239  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:45:44.781249  1322 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:45:44.781255  1322 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:45:44.781261  1322 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.781270  1322 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:45:44.781277  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.781282  1322 net.cpp:165] Memory required for data: 534017500\nI0821 06:45:44.781287  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:45:44.781301  1322 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:45:44.781306  1322 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:45:44.781317  1322 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:45:44.781672  1322 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:45:44.781687  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.781692  1322 net.cpp:165] Memory required for data: 542209500\nI0821 06:45:44.781699  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:45:44.781711  1322 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:45:44.781718  1322 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:45:44.781729  1322 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:45:44.782012  1322 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:45:44.782025  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.782032  1322 net.cpp:165] Memory required for data: 550401500\nI0821 06:45:44.782042  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:45:44.782050  1322 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:45:44.782057  1322 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:45:44.782064  1322 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:45:44.782125  1322 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:45:44.782282  1322 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:45:44.782296  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.782300  1322 net.cpp:165] Memory required for data: 558593500\nI0821 06:45:44.782310  1322 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:45:44.782331  1322 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:45:44.782338  1322 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:45:44.782346  1322 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:45:44.782353  1322 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:45:44.782392  1322 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:45:44.782402  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.782407  1322 net.cpp:165] Memory required for data: 566785500\nI0821 06:45:44.782418  1322 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:45:44.782426  1322 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:45:44.782433  1322 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:45:44.782443  1322 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:45:44.782451  1322 net.cpp:150] Setting up L1_b6_relu\nI0821 06:45:44.782459  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.782464  1322 net.cpp:165] Memory required for data: 574977500\nI0821 06:45:44.782467  1322 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:44.782474  1322 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:44.782480  1322 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:45:44.782486  1322 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:45:44.782496  1322 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:45:44.782549  1322 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:44.782562  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.782567  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.782572  1322 net.cpp:165] Memory required for data: 591361500\nI0821 06:45:44.782577  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:45:44.782588  1322 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:45:44.782593  1322 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:45:44.782606  1322 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:45:44.782973  1322 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:45:44.782986  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.782992  1322 net.cpp:165] Memory required for data: 599553500\nI0821 06:45:44.783000  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:45:44.783010  1322 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:45:44.783015  1322 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:45:44.783046  1322 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:45:44.783329  1322 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:45:44.783344  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.783349  1322 net.cpp:165] Memory required for data: 607745500\nI0821 06:45:44.783359  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:45:44.783370  1322 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:45:44.783376  1322 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:45:44.783385  1322 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.783443  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:45:44.783604  1322 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:45:44.783618  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.783623  1322 net.cpp:165] Memory required for data: 615937500\nI0821 06:45:44.783632  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:45:44.783639  1322 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:45:44.783645  1322 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:45:44.783656  1322 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.783666  1322 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:45:44.783674  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.783677  1322 net.cpp:165] Memory required for data: 624129500\nI0821 06:45:44.783682  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:45:44.783695  1322 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:45:44.783701  1322 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:45:44.783712  1322 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:45:44.784077  1322 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:45:44.784091  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.784103  1322 net.cpp:165] Memory required for data: 632321500\nI0821 06:45:44.784112  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:45:44.784121  1322 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:45:44.784127  1322 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:45:44.784137  1322 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:45:44.784415  1322 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:45:44.784427  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.784432  1322 net.cpp:165] Memory required for data: 640513500\nI0821 06:45:44.784442  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:45:44.784453  1322 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:45:44.784461  1322 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:45:44.784467  1322 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:45:44.784529  1322 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:45:44.784692  1322 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:45:44.784704  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.784709  1322 net.cpp:165] Memory required for data: 648705500\nI0821 06:45:44.784718  1322 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:45:44.784728  1322 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:45:44.784734  1322 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:45:44.784740  1322 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:45:44.784754  1322 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:45:44.784788  1322 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:45:44.784803  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.784808  1322 net.cpp:165] Memory required for data: 656897500\nI0821 06:45:44.784813  1322 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:45:44.784821  1322 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:45:44.784826  1322 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:45:44.784839  1322 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:45:44.784848  1322 net.cpp:150] Setting up L1_b7_relu\nI0821 06:45:44.784855  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.784860  1322 net.cpp:165] Memory required for data: 665089500\nI0821 06:45:44.784865  1322 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:44.784875  1322 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:44.784880  1322 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:45:44.784888  1322 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:45:44.784898  1322 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:45:44.784948  1322 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:44.784960  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.784966  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.784971  1322 net.cpp:165] Memory required for data: 681473500\nI0821 06:45:44.784976  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:45:44.784986  1322 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:45:44.784992  1322 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:45:44.785004  1322 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:45:44.785360  1322 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:45:44.785374  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.785379  1322 net.cpp:165] Memory required for data: 689665500\nI0821 06:45:44.785388  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:45:44.785396  1322 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:45:44.785403  1322 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:45:44.785418  1322 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:45:44.785699  1322 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:45:44.785712  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.785717  1322 net.cpp:165] Memory required for data: 697857500\nI0821 06:45:44.785727  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:45:44.785738  1322 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:45:44.785745  1322 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:45:44.785753  1322 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.785815  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:45:44.786157  1322 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:45:44.786173  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.786178  1322 net.cpp:165] Memory required for data: 706049500\nI0821 06:45:44.786187  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:45:44.786195  1322 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:45:44.786202  1322 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:45:44.786212  1322 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.786223  1322 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:45:44.786231  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.786234  1322 net.cpp:165] Memory required for data: 714241500\nI0821 06:45:44.786239  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:45:44.786253  1322 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:45:44.786259  1322 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:45:44.786267  1322 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:45:44.786628  1322 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:45:44.786643  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.786648  1322 net.cpp:165] Memory required for data: 722433500\nI0821 06:45:44.786656  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:45:44.786669  1322 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:45:44.786674  1322 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:45:44.786682  1322 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:45:44.786970  1322 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:45:44.786984  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.786989  1322 net.cpp:165] Memory required for data: 730625500\nI0821 06:45:44.787000  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:45:44.787009  1322 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:45:44.787014  1322 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:45:44.787025  1322 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:45:44.787086  1322 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:45:44.787246  1322 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:45:44.787259  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.787264  1322 net.cpp:165] Memory required for data: 738817500\nI0821 06:45:44.787273  1322 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:45:44.787282  1322 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:45:44.787288  1322 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:45:44.787295  1322 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:45:44.787307  1322 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:45:44.787340  1322 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:45:44.787355  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.787360  1322 net.cpp:165] Memory required for data: 747009500\nI0821 06:45:44.787365  1322 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:45:44.787374  1322 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:45:44.787379  1322 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:45:44.787385  1322 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:45:44.787402  1322 net.cpp:150] Setting up L1_b8_relu\nI0821 06:45:44.787410  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.787415  1322 net.cpp:165] Memory required for data: 755201500\nI0821 06:45:44.787420  1322 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:44.787428  1322 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:44.787434  1322 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:45:44.787442  1322 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:45:44.787451  1322 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:45:44.787503  1322 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:44.787513  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.787520  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.787525  1322 net.cpp:165] Memory required for data: 771585500\nI0821 06:45:44.787529  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:45:44.787540  1322 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:45:44.787546  1322 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:45:44.787559  1322 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:45:44.787935  1322 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:45:44.787950  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.787955  1322 net.cpp:165] Memory required for data: 779777500\nI0821 06:45:44.787963  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:45:44.787976  1322 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:45:44.787981  1322 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:45:44.787991  1322 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:45:44.788267  1322 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:45:44.788280  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.788285  1322 net.cpp:165] Memory required for data: 787969500\nI0821 06:45:44.788295  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:45:44.788305  1322 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:45:44.788311  1322 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:45:44.788318  1322 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.788378  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:45:44.788542  1322 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:45:44.788554  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.788559  1322 net.cpp:165] Memory required for data: 796161500\nI0821 06:45:44.788568  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:45:44.788575  1322 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:45:44.788581  1322 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:45:44.788592  1322 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.788602  1322 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:45:44.788609  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.788614  1322 net.cpp:165] Memory required for data: 804353500\nI0821 06:45:44.788619  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:45:44.788628  1322 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:45:44.788635  1322 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:45:44.788645  1322 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:45:44.789013  1322 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:45:44.789027  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.789032  1322 net.cpp:165] Memory required for data: 812545500\nI0821 06:45:44.789041  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:45:44.789050  1322 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:45:44.789057  1322 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:45:44.789074  1322 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:45:44.789352  1322 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:45:44.789371  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.789376  1322 net.cpp:165] Memory required for data: 820737500\nI0821 06:45:44.789404  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:45:44.789415  1322 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:45:44.789422  1322 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:45:44.789429  1322 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:45:44.789490  1322 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:45:44.789649  1322 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:45:44.789661  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.789666  1322 net.cpp:165] Memory required for data: 828929500\nI0821 06:45:44.789675  1322 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:45:44.789683  1322 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:45:44.789690  1322 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:45:44.789695  1322 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:45:44.789707  1322 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:45:44.789741  1322 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:45:44.789750  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.789755  1322 net.cpp:165] Memory required for data: 837121500\nI0821 06:45:44.789760  1322 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:45:44.789767  1322 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:45:44.789773  1322 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:45:44.789783  1322 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:45:44.789793  1322 net.cpp:150] Setting up L1_b9_relu\nI0821 06:45:44.789799  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.789804  1322 net.cpp:165] Memory required for data: 845313500\nI0821 06:45:44.789808  1322 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:44.789815  1322 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:44.789820  1322 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:45:44.789836  1322 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:45:44.789849  1322 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:45:44.789899  1322 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:44.789911  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.789917  1322 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:44.789922  1322 net.cpp:165] Memory required for data: 861697500\nI0821 06:45:44.789927  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:45:44.789938  1322 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:45:44.789944  1322 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:45:44.789957  1322 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:45:44.790316  1322 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:45:44.790329  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.790334  1322 net.cpp:165] Memory required for data: 863745500\nI0821 06:45:44.790343  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:45:44.790352  1322 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:45:44.790359  1322 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:45:44.790369  1322 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:45:44.790637  1322 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:45:44.790650  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.790655  1322 net.cpp:165] Memory required for data: 865793500\nI0821 06:45:44.790665  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:45:44.790685  1322 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:45:44.790693  1322 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:45:44.790700  1322 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.790760  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:45:44.790928  1322 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:45:44.790942  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.790947  1322 net.cpp:165] Memory required for data: 867841500\nI0821 06:45:44.790956  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:45:44.790966  1322 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:45:44.790972  1322 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:45:44.790979  1322 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.790989  1322 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:45:44.790997  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.791000  1322 net.cpp:165] Memory required for data: 869889500\nI0821 06:45:44.791005  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:45:44.791018  1322 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:45:44.791024  1322 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:45:44.791035  1322 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:45:44.791388  1322 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:45:44.791402  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.791407  1322 net.cpp:165] Memory required for data: 871937500\nI0821 06:45:44.791415  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:45:44.791424  1322 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:45:44.791430  1322 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:45:44.791441  1322 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:45:44.791708  1322 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:45:44.791721  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.791726  1322 net.cpp:165] Memory required for data: 873985500\nI0821 06:45:44.791736  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:45:44.791748  1322 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:45:44.791754  1322 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:45:44.791761  1322 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:45:44.791821  1322 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:45:44.791986  1322 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:45:44.791999  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.792004  1322 net.cpp:165] Memory required for data: 876033500\nI0821 06:45:44.792013  1322 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:45:44.792028  1322 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:45:44.792035  1322 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:45:44.792043  1322 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:45:44.792076  1322 net.cpp:150] Setting up L2_b1_pool\nI0821 06:45:44.792085  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.792090  1322 net.cpp:165] Memory required for data: 878081500\nI0821 06:45:44.792095  1322 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:45:44.792104  1322 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:45:44.792109  1322 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:45:44.792115  1322 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:45:44.792126  1322 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:45:44.792160  1322 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:45:44.792170  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.792174  1322 net.cpp:165] Memory required for data: 880129500\nI0821 06:45:44.792179  1322 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:45:44.792186  1322 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:45:44.792199  1322 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:45:44.792207  1322 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:45:44.792217  1322 net.cpp:150] Setting up L2_b1_relu\nI0821 06:45:44.792223  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.792228  1322 net.cpp:165] Memory required for data: 882177500\nI0821 06:45:44.792232  1322 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:45:44.792245  1322 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:45:44.792254  1322 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:45:44.794466  1322 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:45:44.794483  1322 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:44.794489  1322 net.cpp:165] Memory required for data: 884225500\nI0821 06:45:44.794495  1322 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:45:44.794504  1322 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:45:44.794510  1322 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:45:44.794517  1322 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:45:44.794528  1322 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:45:44.794571  1322 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:45:44.794586  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.794592  1322 net.cpp:165] Memory required for data: 888321500\nI0821 06:45:44.794597  1322 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:44.794605  1322 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:44.794611  1322 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:45:44.794618  1322 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:45:44.794628  1322 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:45:44.794682  1322 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:44.794694  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.794700  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.794705  1322 net.cpp:165] Memory required for data: 896513500\nI0821 06:45:44.794710  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:45:44.794724  1322 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:45:44.794731  1322 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:45:44.794740  1322 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:45:44.795259  1322 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:45:44.795274  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.795279  1322 net.cpp:165] Memory required for data: 900609500\nI0821 06:45:44.795289  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:45:44.795298  1322 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:45:44.795305  1322 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:45:44.795315  1322 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:45:44.795585  1322 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:45:44.795598  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.795603  1322 net.cpp:165] Memory required for data: 904705500\nI0821 06:45:44.795614  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:45:44.795622  1322 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:45:44.795629  1322 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:45:44.795636  1322 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.795698  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:45:44.795866  1322 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:45:44.795882  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.795887  1322 net.cpp:165] Memory required for data: 908801500\nI0821 06:45:44.795897  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:45:44.795904  1322 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:45:44.795918  1322 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:45:44.795927  1322 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.795936  1322 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:45:44.795944  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.795948  1322 net.cpp:165] Memory required for data: 912897500\nI0821 06:45:44.795953  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:45:44.795967  1322 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:45:44.795974  1322 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:45:44.795984  1322 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:45:44.796484  1322 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:45:44.796499  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.796504  1322 net.cpp:165] Memory required for data: 916993500\nI0821 06:45:44.796511  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:45:44.796524  1322 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:45:44.796530  1322 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:45:44.796541  1322 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:45:44.796803  1322 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:45:44.796816  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.796821  1322 net.cpp:165] Memory required for data: 921089500\nI0821 06:45:44.796838  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:45:44.796846  1322 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:45:44.796852  1322 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:45:44.796859  1322 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:45:44.796921  1322 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:45:44.797080  1322 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:45:44.797093  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.797098  1322 net.cpp:165] Memory required for data: 925185500\nI0821 06:45:44.797106  1322 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:45:44.797118  1322 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:45:44.797125  1322 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:45:44.797132  1322 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:45:44.797140  1322 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:45:44.797169  1322 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:45:44.797178  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.797183  1322 net.cpp:165] Memory required for data: 929281500\nI0821 06:45:44.797188  1322 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:45:44.797199  1322 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:45:44.797204  1322 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:45:44.797211  1322 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:45:44.797220  1322 net.cpp:150] Setting up L2_b2_relu\nI0821 06:45:44.797227  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.797232  1322 net.cpp:165] Memory required for data: 933377500\nI0821 06:45:44.797236  1322 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:44.797245  1322 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:44.797250  1322 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:45:44.797256  1322 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:45:44.797266  1322 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:45:44.797315  1322 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:44.797327  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.797333  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.797338  1322 net.cpp:165] Memory required for data: 941569500\nI0821 06:45:44.797349  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:45:44.797363  1322 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:45:44.797370  1322 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:45:44.797379  1322 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:45:44.797883  1322 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:45:44.797897  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.797902  1322 net.cpp:165] Memory required for data: 945665500\nI0821 06:45:44.797911  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:45:44.797922  1322 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:45:44.797930  1322 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:45:44.797940  1322 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:45:44.798204  1322 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:45:44.798218  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.798223  1322 net.cpp:165] Memory required for data: 949761500\nI0821 06:45:44.798233  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:45:44.798241  1322 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:45:44.798247  1322 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:45:44.798255  1322 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.798316  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:45:44.798471  1322 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:45:44.798485  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.798490  1322 net.cpp:165] Memory required for data: 953857500\nI0821 06:45:44.798498  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:45:44.798508  1322 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:45:44.798514  1322 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:45:44.798522  1322 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.798532  1322 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:45:44.798538  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.798543  1322 net.cpp:165] Memory required for data: 957953500\nI0821 06:45:44.798548  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:45:44.798562  1322 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:45:44.798568  1322 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:45:44.798578  1322 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:45:44.799077  1322 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:45:44.799093  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.799098  1322 net.cpp:165] Memory required for data: 962049500\nI0821 06:45:44.799105  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:45:44.799118  1322 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:45:44.799124  1322 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:45:44.799135  1322 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:45:44.799402  1322 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:45:44.799418  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.799423  1322 net.cpp:165] Memory required for data: 966145500\nI0821 06:45:44.799433  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:45:44.799443  1322 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:45:44.799448  1322 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:45:44.799456  1322 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:45:44.799516  1322 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:45:44.799672  1322 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:45:44.799685  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.799690  1322 net.cpp:165] Memory required for data: 970241500\nI0821 06:45:44.799700  1322 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:45:44.799708  1322 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:45:44.799727  1322 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:45:44.799734  1322 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:45:44.799742  1322 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:45:44.799772  1322 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:45:44.799780  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.799785  1322 net.cpp:165] Memory required for data: 974337500\nI0821 06:45:44.799790  1322 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:45:44.799811  1322 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:45:44.799818  1322 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:45:44.799825  1322 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:45:44.799841  1322 net.cpp:150] Setting up L2_b3_relu\nI0821 06:45:44.799849  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.799854  1322 net.cpp:165] Memory required for data: 978433500\nI0821 06:45:44.799860  1322 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:44.799866  1322 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:44.799872  1322 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:45:44.799880  1322 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:45:44.799888  1322 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:45:44.799942  1322 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:44.799953  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.799960  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.799964  1322 net.cpp:165] Memory required for data: 986625500\nI0821 06:45:44.799970  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:45:44.799981  1322 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:45:44.799988  1322 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:45:44.799999  1322 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:45:44.800498  1322 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:45:44.800513  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.800518  1322 net.cpp:165] Memory required for data: 990721500\nI0821 06:45:44.800525  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:45:44.800539  1322 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:45:44.800544  1322 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:45:44.800552  1322 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:45:44.800822  1322 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:45:44.800843  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.800848  1322 net.cpp:165] Memory required for data: 994817500\nI0821 06:45:44.800858  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:45:44.800869  1322 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:45:44.800875  1322 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:45:44.800884  1322 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.800943  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:45:44.801102  1322 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:45:44.801115  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.801120  1322 net.cpp:165] Memory required for data: 998913500\nI0821 06:45:44.801129  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:45:44.801141  1322 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:45:44.801148  1322 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:45:44.801158  1322 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.801167  1322 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:45:44.801174  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.801179  1322 net.cpp:165] Memory required for data: 1003009500\nI0821 06:45:44.801192  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:45:44.801203  1322 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:45:44.801208  1322 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:45:44.801220  1322 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:45:44.801717  1322 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:45:44.801731  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.801736  1322 net.cpp:165] Memory required for data: 1007105500\nI0821 06:45:44.801745  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:45:44.801754  1322 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:45:44.801760  1322 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:45:44.801771  1322 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:45:44.802045  1322 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:45:44.802059  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.802064  1322 net.cpp:165] Memory required for data: 1011201500\nI0821 06:45:44.802074  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:45:44.802088  1322 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:45:44.802093  1322 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:45:44.802101  1322 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:45:44.802158  1322 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:45:44.802320  1322 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:45:44.802332  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.802337  1322 net.cpp:165] Memory required for data: 1015297500\nI0821 06:45:44.802346  1322 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:45:44.802357  1322 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:45:44.802364  1322 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:45:44.802371  1322 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:45:44.802379  1322 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:45:44.802409  1322 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:45:44.802419  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.802424  1322 net.cpp:165] Memory required for data: 1019393500\nI0821 06:45:44.802429  1322 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:45:44.802436  1322 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:45:44.802443  1322 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:45:44.802453  1322 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:45:44.802462  1322 net.cpp:150] Setting up L2_b4_relu\nI0821 06:45:44.802469  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.802474  1322 net.cpp:165] Memory required for data: 1023489500\nI0821 06:45:44.802479  1322 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:44.802486  1322 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:44.802491  1322 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:45:44.802498  1322 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:45:44.802508  1322 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:45:44.802558  1322 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:44.802570  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.802577  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.802582  1322 net.cpp:165] Memory required for data: 1031681500\nI0821 06:45:44.802587  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:45:44.802597  1322 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:45:44.802603  1322 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:45:44.802615  1322 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:45:44.803127  1322 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:45:44.803141  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.803146  1322 net.cpp:165] Memory required for data: 1035777500\nI0821 06:45:44.803155  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:45:44.803164  1322 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:45:44.803170  1322 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:45:44.803181  1322 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:45:44.803453  1322 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:45:44.803467  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.803472  1322 net.cpp:165] Memory required for data: 1039873500\nI0821 06:45:44.803483  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:45:44.803493  1322 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:45:44.803500  1322 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:45:44.803508  1322 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.803566  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:45:44.803730  1322 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:45:44.803742  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.803747  1322 net.cpp:165] Memory required for data: 1043969500\nI0821 06:45:44.803755  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:45:44.803766  1322 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:45:44.803772  1322 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:45:44.803781  1322 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.803789  1322 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:45:44.803797  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.803802  1322 net.cpp:165] Memory required for data: 1048065500\nI0821 06:45:44.803809  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:45:44.803820  1322 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:45:44.803825  1322 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:45:44.803843  1322 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:45:44.804337  1322 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:45:44.804350  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.804355  1322 net.cpp:165] Memory required for data: 1052161500\nI0821 06:45:44.804363  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:45:44.804373  1322 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:45:44.804379  1322 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:45:44.804390  1322 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:45:44.804659  1322 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:45:44.804672  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.804677  1322 net.cpp:165] Memory required for data: 1056257500\nI0821 06:45:44.804687  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:45:44.804699  1322 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:45:44.804705  1322 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:45:44.804713  1322 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:45:44.804770  1322 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:45:44.804935  1322 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:45:44.804949  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.804953  1322 net.cpp:165] Memory required for data: 1060353500\nI0821 06:45:44.804962  1322 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:45:44.804975  1322 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:45:44.804980  1322 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:45:44.804987  1322 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:45:44.804996  1322 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:45:44.805029  1322 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:45:44.805045  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.805050  1322 net.cpp:165] Memory required for data: 1064449500\nI0821 06:45:44.805055  1322 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:45:44.805063  1322 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:45:44.805069  1322 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:45:44.805079  1322 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:45:44.805088  1322 net.cpp:150] Setting up L2_b5_relu\nI0821 06:45:44.805095  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.805100  1322 net.cpp:165] Memory required for data: 1068545500\nI0821 06:45:44.805104  1322 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:44.805111  1322 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:44.805116  1322 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:45:44.805124  1322 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:45:44.805133  1322 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:45:44.805186  1322 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:44.805197  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.805204  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.805208  1322 net.cpp:165] Memory required for data: 1076737500\nI0821 06:45:44.805214  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:45:44.805224  1322 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:45:44.805232  1322 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:45:44.805243  1322 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:45:44.805743  1322 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:45:44.805757  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.805763  1322 net.cpp:165] Memory required for data: 1080833500\nI0821 06:45:44.805771  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:45:44.805779  1322 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:45:44.805786  1322 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:45:44.805799  1322 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:45:44.806076  1322 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:45:44.806088  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.806093  1322 net.cpp:165] Memory required for data: 1084929500\nI0821 06:45:44.806104  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:45:44.806115  1322 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:45:44.806123  1322 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:45:44.806129  1322 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.806187  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:45:44.806347  1322 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:45:44.806360  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.806365  1322 net.cpp:165] Memory required for data: 1089025500\nI0821 06:45:44.806373  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:45:44.806385  1322 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:45:44.806391  1322 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:45:44.806398  1322 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.806408  1322 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:45:44.806416  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.806419  1322 net.cpp:165] Memory required for data: 1093121500\nI0821 06:45:44.806424  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:45:44.806438  1322 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:45:44.806444  1322 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:45:44.806454  1322 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:45:44.806960  1322 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:45:44.806975  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.806980  1322 net.cpp:165] Memory required for data: 1097217500\nI0821 06:45:44.806988  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:45:44.806998  1322 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:45:44.807004  1322 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:45:44.807015  1322 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:45:44.807286  1322 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:45:44.807301  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.807304  1322 net.cpp:165] Memory required for data: 1101313500\nI0821 06:45:44.807314  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:45:44.807325  1322 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:45:44.807332  1322 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:45:44.807340  1322 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:45:44.807394  1322 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:45:44.807550  1322 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:45:44.807564  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.807569  1322 net.cpp:165] Memory required for data: 1105409500\nI0821 06:45:44.807577  1322 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:45:44.807586  1322 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:45:44.807592  1322 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:45:44.807605  1322 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:45:44.807613  1322 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:45:44.807641  1322 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:45:44.807653  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.807658  1322 net.cpp:165] Memory required for data: 1109505500\nI0821 06:45:44.807663  1322 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:45:44.807672  1322 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:45:44.807677  1322 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:45:44.807684  1322 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:45:44.807693  1322 net.cpp:150] Setting up L2_b6_relu\nI0821 06:45:44.807700  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.807704  1322 net.cpp:165] Memory required for data: 1113601500\nI0821 06:45:44.807709  1322 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:44.807719  1322 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:44.807724  1322 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:45:44.807732  1322 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:45:44.807741  1322 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:45:44.807791  1322 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:44.807803  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.807809  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.807814  1322 net.cpp:165] Memory required for data: 1121793500\nI0821 06:45:44.807819  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:45:44.807835  1322 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:45:44.807842  1322 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:45:44.807857  1322 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:45:44.809332  1322 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:45:44.809350  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.809355  1322 net.cpp:165] Memory required for data: 1125889500\nI0821 06:45:44.809363  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:45:44.809386  1322 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:45:44.809392  1322 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:45:44.809401  1322 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:45:44.809667  1322 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:45:44.809680  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.809686  1322 net.cpp:165] Memory required for data: 1129985500\nI0821 06:45:44.809696  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:45:44.809705  1322 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:45:44.809711  1322 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:45:44.809718  1322 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.809782  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:45:44.809945  1322 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:45:44.809962  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.809967  1322 net.cpp:165] Memory required for data: 1134081500\nI0821 06:45:44.809976  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:45:44.809984  1322 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:45:44.809990  1322 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:45:44.809998  1322 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.810008  1322 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:45:44.810014  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.810019  1322 net.cpp:165] Memory required for data: 1138177500\nI0821 06:45:44.810024  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:45:44.810037  1322 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:45:44.810044  1322 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:45:44.810055  1322 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:45:44.810539  1322 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:45:44.810552  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.810557  1322 net.cpp:165] Memory required for data: 1142273500\nI0821 06:45:44.810565  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:45:44.810577  1322 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:45:44.810585  1322 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:45:44.810595  1322 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:45:44.810871  1322 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:45:44.810885  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.810890  1322 net.cpp:165] Memory required for data: 1146369500\nI0821 06:45:44.810900  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:45:44.810909  1322 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:45:44.810915  1322 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:45:44.810923  1322 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:45:44.810986  1322 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:45:44.811141  1322 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:45:44.811153  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.811158  1322 net.cpp:165] Memory required for data: 1150465500\nI0821 06:45:44.811167  1322 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:45:44.811179  1322 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:45:44.811187  1322 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:45:44.811193  1322 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:45:44.811202  1322 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:45:44.811229  1322 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:45:44.811239  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.811242  1322 net.cpp:165] Memory required for data: 1154561500\nI0821 06:45:44.811247  1322 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:45:44.811259  1322 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:45:44.811272  1322 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:45:44.811280  1322 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:45:44.811290  1322 net.cpp:150] Setting up L2_b7_relu\nI0821 06:45:44.811296  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.811301  1322 net.cpp:165] Memory required for data: 1158657500\nI0821 06:45:44.811306  1322 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:44.811313  1322 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:44.811319  1322 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:45:44.811326  1322 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:45:44.811336  1322 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:45:44.811386  1322 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:44.811398  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.811405  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.811410  1322 net.cpp:165] Memory required for data: 1166849500\nI0821 06:45:44.811415  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:45:44.811429  1322 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:45:44.811435  1322 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:45:44.811444  1322 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:45:44.811946  1322 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:45:44.811961  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.811966  1322 net.cpp:165] Memory required for data: 1170945500\nI0821 06:45:44.811975  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:45:44.811987  1322 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:45:44.811995  1322 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:45:44.812005  1322 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:45:44.812273  1322 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:45:44.812286  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.812290  1322 net.cpp:165] Memory required for data: 1175041500\nI0821 06:45:44.812301  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:45:44.812310  1322 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:45:44.812316  1322 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:45:44.812325  1322 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.812386  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:45:44.812542  1322 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:45:44.812558  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.812563  1322 net.cpp:165] Memory required for data: 1179137500\nI0821 06:45:44.812572  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:45:44.812579  1322 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:45:44.812587  1322 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:45:44.812593  1322 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.812602  1322 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:45:44.812610  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.812614  1322 net.cpp:165] Memory required for data: 1183233500\nI0821 06:45:44.812619  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:45:44.812633  1322 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:45:44.812639  1322 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:45:44.812650  1322 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:45:44.813149  1322 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:45:44.813163  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.813169  1322 net.cpp:165] Memory required for data: 1187329500\nI0821 06:45:44.813177  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:45:44.813196  1322 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:45:44.813204  1322 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:45:44.813215  1322 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:45:44.813489  1322 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:45:44.813503  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.813508  1322 net.cpp:165] Memory required for data: 1191425500\nI0821 06:45:44.813518  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:45:44.813526  1322 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:45:44.813532  1322 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:45:44.813540  1322 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:45:44.813601  1322 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:45:44.813760  1322 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:45:44.813773  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.813778  1322 net.cpp:165] Memory required for data: 1195521500\nI0821 06:45:44.813786  1322 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:45:44.813798  1322 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:45:44.813805  1322 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:45:44.813812  1322 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:45:44.813820  1322 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:45:44.813854  1322 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:45:44.813865  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.813869  1322 net.cpp:165] Memory required for data: 1199617500\nI0821 06:45:44.813875  1322 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:45:44.813886  1322 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:45:44.813892  1322 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:45:44.813899  1322 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:45:44.813910  1322 net.cpp:150] Setting up L2_b8_relu\nI0821 06:45:44.813916  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.813920  1322 net.cpp:165] Memory required for data: 1203713500\nI0821 06:45:44.813925  1322 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:44.813932  1322 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:44.813938  1322 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:45:44.813946  1322 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:45:44.813971  1322 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:45:44.814024  1322 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:44.814038  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.814043  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.814049  1322 net.cpp:165] Memory required for data: 1211905500\nI0821 06:45:44.814054  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:45:44.814067  1322 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:45:44.814074  1322 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:45:44.814083  1322 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:45:44.814581  1322 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:45:44.814595  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.814600  1322 net.cpp:165] Memory required for data: 1216001500\nI0821 06:45:44.814610  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:45:44.814621  1322 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:45:44.814628  1322 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:45:44.814640  1322 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:45:44.814925  1322 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:45:44.814949  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.814954  1322 net.cpp:165] Memory required for data: 1220097500\nI0821 06:45:44.814965  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:45:44.814972  1322 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:45:44.814980  1322 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:45:44.814987  1322 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.815049  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:45:44.815214  1322 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:45:44.815227  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.815232  1322 net.cpp:165] Memory required for data: 1224193500\nI0821 06:45:44.815241  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:45:44.815249  1322 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:45:44.815255  1322 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:45:44.815266  1322 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.815275  1322 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:45:44.815282  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.815287  1322 net.cpp:165] Memory required for data: 1228289500\nI0821 06:45:44.815292  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:45:44.815305  1322 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:45:44.815312  1322 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:45:44.815320  1322 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:45:44.817028  1322 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:45:44.817045  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.817050  1322 net.cpp:165] Memory required for data: 1232385500\nI0821 06:45:44.817059  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:45:44.817072  1322 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:45:44.817080  1322 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:45:44.817090  1322 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:45:44.817358  1322 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:45:44.817371  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.817376  1322 net.cpp:165] Memory required for data: 1236481500\nI0821 06:45:44.817425  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:45:44.817438  1322 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:45:44.817445  1322 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:45:44.817453  1322 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:45:44.817517  1322 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:45:44.817674  1322 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:45:44.817687  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.817692  1322 net.cpp:165] Memory required for data: 1240577500\nI0821 06:45:44.817701  1322 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:45:44.817713  1322 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:45:44.817720  1322 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:45:44.817728  1322 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:45:44.817739  1322 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:45:44.817766  1322 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:45:44.817776  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.817780  1322 net.cpp:165] Memory required for data: 1244673500\nI0821 06:45:44.817785  1322 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:45:44.817793  1322 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:45:44.817800  1322 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:45:44.817809  1322 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:45:44.817819  1322 net.cpp:150] Setting up L2_b9_relu\nI0821 06:45:44.817826  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.817844  1322 net.cpp:165] Memory required for data: 1248769500\nI0821 06:45:44.817850  1322 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:44.817858  1322 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:44.817863  1322 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:45:44.817874  1322 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:45:44.817884  1322 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:45:44.817937  1322 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:44.817950  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.817955  1322 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:44.817960  1322 net.cpp:165] Memory required for data: 1256961500\nI0821 06:45:44.817965  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:45:44.817977  1322 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:45:44.817983  1322 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:45:44.817996  1322 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:45:44.818493  1322 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:45:44.818508  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.818513  1322 net.cpp:165] Memory required for data: 1257985500\nI0821 06:45:44.818522  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:45:44.818534  1322 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:45:44.818542  1322 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:45:44.818549  1322 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:45:44.818833  1322 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:45:44.818847  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.818852  1322 net.cpp:165] Memory required for data: 1259009500\nI0821 06:45:44.818863  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:45:44.818872  1322 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:45:44.818878  1322 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:45:44.818886  1322 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.818948  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:45:44.819109  1322 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:45:44.819125  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.819130  1322 net.cpp:165] Memory required for data: 1260033500\nI0821 06:45:44.819139  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:45:44.819149  1322 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:45:44.819154  1322 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:45:44.819161  1322 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:45:44.819170  1322 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:45:44.819177  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.819182  1322 net.cpp:165] Memory required for data: 1261057500\nI0821 06:45:44.819187  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:45:44.819201  1322 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:45:44.819207  1322 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:45:44.819216  1322 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:45:44.819710  1322 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:45:44.819723  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.819728  1322 net.cpp:165] Memory required for data: 1262081500\nI0821 06:45:44.819736  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:45:44.819746  1322 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:45:44.819756  1322 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:45:44.819764  1322 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:45:44.820044  1322 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:45:44.820065  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.820070  1322 net.cpp:165] Memory required for data: 1263105500\nI0821 06:45:44.820080  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:45:44.820089  1322 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:45:44.820096  1322 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:45:44.820106  1322 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:45:44.820166  1322 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:45:44.820333  1322 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:45:44.820346  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.820351  1322 net.cpp:165] Memory required for data: 1264129500\nI0821 06:45:44.820360  1322 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:45:44.820376  1322 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:45:44.820384  1322 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:45:44.820391  1322 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:45:44.820430  1322 net.cpp:150] Setting up L3_b1_pool\nI0821 06:45:44.820441  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.820446  1322 net.cpp:165] Memory required for data: 1265153500\nI0821 06:45:44.820451  1322 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:45:44.820461  1322 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:45:44.820466  1322 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:45:44.820477  1322 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:45:44.820484  1322 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:45:44.820518  1322 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:45:44.820528  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.820533  1322 net.cpp:165] Memory required for data: 1266177500\nI0821 06:45:44.820538  1322 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:45:44.820545  1322 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:45:44.820551  1322 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:45:44.820562  1322 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:45:44.820572  1322 net.cpp:150] Setting up L3_b1_relu\nI0821 06:45:44.820580  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.820583  1322 net.cpp:165] Memory required for data: 1267201500\nI0821 06:45:44.820588  1322 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:45:44.820598  1322 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:45:44.820605  1322 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:45:44.821815  1322 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:45:44.821841  1322 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:44.821846  1322 net.cpp:165] Memory required for data: 1268225500\nI0821 06:45:44.821852  1322 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:45:44.821861  1322 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:45:44.821868  1322 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:45:44.821876  1322 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:45:44.821887  1322 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:45:44.821933  1322 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:45:44.821944  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.821949  1322 net.cpp:165] Memory required for data: 1270273500\nI0821 06:45:44.821954  1322 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:44.821961  1322 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:44.821967  1322 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:45:44.821979  1322 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:45:44.821988  1322 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:45:44.822041  1322 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:44.822055  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.822069  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.822074  1322 net.cpp:165] Memory required for data: 1274369500\nI0821 06:45:44.822080  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:45:44.822091  1322 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:45:44.822098  1322 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:45:44.822108  1322 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:45:44.823160  1322 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:45:44.823175  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.823180  1322 net.cpp:165] Memory required for data: 1276417500\nI0821 06:45:44.823189  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:45:44.823201  1322 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:45:44.823209  1322 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:45:44.823216  1322 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:45:44.823493  1322 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:45:44.823506  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.823511  1322 net.cpp:165] Memory required for data: 1278465500\nI0821 06:45:44.823521  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:45:44.823534  1322 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:45:44.823539  1322 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:45:44.823547  1322 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.823614  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:45:44.823777  1322 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:45:44.823791  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.823796  1322 net.cpp:165] Memory required for data: 1280513500\nI0821 06:45:44.823804  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:45:44.823815  1322 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:45:44.823822  1322 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:45:44.823837  1322 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:45:44.823849  1322 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:45:44.823856  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.823861  1322 net.cpp:165] Memory required for data: 1282561500\nI0821 06:45:44.823866  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:45:44.823878  1322 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:45:44.823884  1322 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:45:44.823894  1322 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:45:44.824934  1322 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:45:44.824949  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.824954  1322 net.cpp:165] Memory required for data: 1284609500\nI0821 06:45:44.824962  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:45:44.824975  1322 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:45:44.824981  1322 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:45:44.824990  1322 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:45:44.825263  1322 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:45:44.825275  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.825280  1322 net.cpp:165] Memory required for data: 1286657500\nI0821 06:45:44.825290  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:45:44.825299  1322 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:45:44.825305  1322 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:45:44.825314  1322 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:45:44.825376  1322 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:45:44.825534  1322 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:45:44.825549  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.825556  1322 net.cpp:165] Memory required for data: 1288705500\nI0821 06:45:44.825572  1322 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:45:44.825580  1322 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:45:44.825587  1322 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:45:44.825594  1322 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:45:44.825603  1322 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:45:44.825639  1322 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:45:44.825651  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.825656  1322 net.cpp:165] Memory required for data: 1290753500\nI0821 06:45:44.825661  1322 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:45:44.825670  1322 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:45:44.825675  1322 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:45:44.825682  1322 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:45:44.825692  1322 net.cpp:150] Setting up L3_b2_relu\nI0821 06:45:44.825698  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.825703  1322 net.cpp:165] Memory required for data: 1292801500\nI0821 06:45:44.825708  1322 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:44.825717  1322 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:44.825723  1322 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:45:44.825731  1322 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:45:44.825742  1322 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:45:44.825788  1322 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:44.825805  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.825812  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.825817  1322 net.cpp:165] Memory required for data: 1296897500\nI0821 06:45:44.825822  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:45:44.825839  1322 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:45:44.825846  1322 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:45:44.825855  1322 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:45:44.826906  1322 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:45:44.826921  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.826926  1322 net.cpp:165] Memory required for data: 1298945500\nI0821 06:45:44.826936  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:45:44.826947  1322 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:45:44.826954  1322 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:45:44.826962  1322 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:45:44.827232  1322 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:45:44.827245  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.827250  1322 net.cpp:165] Memory required for data: 1300993500\nI0821 06:45:44.827260  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:45:44.827272  1322 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:45:44.827280  1322 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:45:44.827287  1322 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.827350  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:45:44.827512  1322 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:45:44.827525  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.827530  1322 net.cpp:165] Memory required for data: 1303041500\nI0821 06:45:44.827539  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:45:44.827548  1322 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:45:44.827554  1322 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:45:44.827564  1322 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:45:44.827574  1322 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:45:44.827589  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.827594  1322 net.cpp:165] Memory required for data: 1305089500\nI0821 06:45:44.827599  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:45:44.827610  1322 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:45:44.827615  1322 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:45:44.827626  1322 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:45:44.828663  1322 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:45:44.828678  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.828683  1322 net.cpp:165] Memory required for data: 1307137500\nI0821 06:45:44.828692  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:45:44.828706  1322 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:45:44.828711  1322 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:45:44.828721  1322 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:45:44.829000  1322 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:45:44.829015  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.829020  1322 net.cpp:165] Memory required for data: 1309185500\nI0821 06:45:44.829030  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:45:44.829038  1322 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:45:44.829044  1322 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:45:44.829052  1322 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:45:44.829118  1322 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:45:44.829277  1322 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:45:44.829293  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.829298  1322 net.cpp:165] Memory required for data: 1311233500\nI0821 06:45:44.829306  1322 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:45:44.829315  1322 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:45:44.829321  1322 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:45:44.829329  1322 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:45:44.829339  1322 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:45:44.829375  1322 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:45:44.829385  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.829391  1322 net.cpp:165] Memory required for data: 1313281500\nI0821 06:45:44.829396  1322 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:45:44.829406  1322 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:45:44.829412  1322 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:45:44.829421  1322 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:45:44.829429  1322 net.cpp:150] Setting up L3_b3_relu\nI0821 06:45:44.829437  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.829440  1322 net.cpp:165] Memory required for data: 1315329500\nI0821 06:45:44.829445  1322 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:44.829455  1322 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:44.829461  1322 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:45:44.829468  1322 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:45:44.829478  1322 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:45:44.829525  1322 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:44.829540  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.829546  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.829550  1322 net.cpp:165] Memory required for data: 1319425500\nI0821 06:45:44.829555  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:45:44.829566  1322 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:45:44.829573  1322 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:45:44.829589  1322 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:45:44.830643  1322 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:45:44.830658  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.830663  1322 net.cpp:165] Memory required for data: 1321473500\nI0821 06:45:44.830672  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:45:44.830684  1322 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:45:44.830691  1322 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:45:44.830699  1322 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:45:44.830981  1322 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:45:44.830996  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.831001  1322 net.cpp:165] Memory required for data: 1323521500\nI0821 06:45:44.831010  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:45:44.831022  1322 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:45:44.831028  1322 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:45:44.831039  1322 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.831102  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:45:44.831266  1322 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:45:44.831279  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.831284  1322 net.cpp:165] Memory required for data: 1325569500\nI0821 06:45:44.831292  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:45:44.831300  1322 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:45:44.831306  1322 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:45:44.831317  1322 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:45:44.831327  1322 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:45:44.831334  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.831338  1322 net.cpp:165] Memory required for data: 1327617500\nI0821 06:45:44.831343  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:45:44.831357  1322 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:45:44.831363  1322 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:45:44.831372  1322 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:45:44.833369  1322 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:45:44.833386  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.833392  1322 net.cpp:165] Memory required for data: 1329665500\nI0821 06:45:44.833401  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:45:44.833415  1322 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:45:44.833421  1322 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:45:44.833429  1322 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:45:44.833708  1322 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:45:44.833720  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.833724  1322 net.cpp:165] Memory required for data: 1331713500\nI0821 06:45:44.833735  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:45:44.833747  1322 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:45:44.833753  1322 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:45:44.833765  1322 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:45:44.833824  1322 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:45:44.834004  1322 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:45:44.834017  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.834022  1322 net.cpp:165] Memory required for data: 1333761500\nI0821 06:45:44.834031  1322 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:45:44.834041  1322 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:45:44.834048  1322 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:45:44.834055  1322 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:45:44.834066  1322 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:45:44.834110  1322 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:45:44.834118  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.834123  1322 net.cpp:165] Memory required for data: 1335809500\nI0821 06:45:44.834128  1322 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:45:44.834141  1322 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:45:44.834146  1322 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:45:44.834153  1322 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:45:44.834163  1322 net.cpp:150] Setting up L3_b4_relu\nI0821 06:45:44.834170  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.834175  1322 net.cpp:165] Memory required for data: 1337857500\nI0821 06:45:44.834180  1322 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:44.834187  1322 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:44.834192  1322 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:45:44.834200  1322 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:45:44.834210  1322 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:45:44.834260  1322 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:44.834273  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.834280  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.834285  1322 net.cpp:165] Memory required for data: 1341953500\nI0821 06:45:44.834290  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:45:44.834305  1322 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:45:44.834311  1322 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:45:44.834321  1322 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:45:44.835346  1322 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:45:44.835361  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.835366  1322 net.cpp:165] Memory required for data: 1344001500\nI0821 06:45:44.835376  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:45:44.835388  1322 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:45:44.835394  1322 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:45:44.835407  1322 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:45:44.835677  1322 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:45:44.835690  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.835695  1322 net.cpp:165] Memory required for data: 1346049500\nI0821 06:45:44.835705  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:45:44.835714  1322 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:45:44.835721  1322 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:45:44.835731  1322 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.835791  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:45:44.835959  1322 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:45:44.835973  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.835978  1322 net.cpp:165] Memory required for data: 1348097500\nI0821 06:45:44.835986  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:45:44.835994  1322 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:45:44.836001  1322 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:45:44.836012  1322 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:45:44.836024  1322 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:45:44.836030  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.836035  1322 net.cpp:165] Memory required for data: 1350145500\nI0821 06:45:44.836040  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:45:44.836053  1322 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:45:44.836060  1322 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:45:44.836078  1322 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:45:44.837106  1322 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:45:44.837124  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.837129  1322 net.cpp:165] Memory required for data: 1352193500\nI0821 06:45:44.837138  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:45:44.837147  1322 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:45:44.837153  1322 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:45:44.837162  1322 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:45:44.837431  1322 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:45:44.837445  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.837450  1322 net.cpp:165] Memory required for data: 1354241500\nI0821 06:45:44.837460  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:45:44.837473  1322 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:45:44.837479  1322 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:45:44.837489  1322 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:45:44.837549  1322 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:45:44.837710  1322 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:45:44.837723  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.837728  1322 net.cpp:165] Memory required for data: 1356289500\nI0821 06:45:44.837736  1322 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:45:44.837745  1322 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:45:44.837751  1322 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:45:44.837759  1322 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:45:44.837770  1322 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:45:44.837803  1322 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:45:44.837818  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.837823  1322 net.cpp:165] Memory required for data: 1358337500\nI0821 06:45:44.837833  1322 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:45:44.837844  1322 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:45:44.837851  1322 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:45:44.837857  1322 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:45:44.837867  1322 net.cpp:150] Setting up L3_b5_relu\nI0821 06:45:44.837874  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.837879  1322 net.cpp:165] Memory required for data: 1360385500\nI0821 06:45:44.837883  1322 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:44.837890  1322 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:44.837896  1322 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:45:44.837903  1322 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:45:44.837913  1322 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:45:44.837963  1322 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:44.837975  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.837981  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.837986  1322 net.cpp:165] Memory required for data: 1364481500\nI0821 06:45:44.837991  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:45:44.838006  1322 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:45:44.838012  1322 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:45:44.838021  1322 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:45:44.839051  1322 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:45:44.839066  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.839071  1322 net.cpp:165] Memory required for data: 1366529500\nI0821 06:45:44.839079  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:45:44.839098  1322 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:45:44.839105  1322 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:45:44.839118  1322 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:45:44.839392  1322 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:45:44.839406  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.839409  1322 net.cpp:165] Memory required for data: 1368577500\nI0821 06:45:44.839421  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:45:44.839428  1322 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:45:44.839434  1322 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:45:44.839445  1322 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.839506  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:45:44.839668  1322 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:45:44.839680  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.839685  1322 net.cpp:165] Memory required for data: 1370625500\nI0821 06:45:44.839695  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:45:44.839704  1322 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:45:44.839710  1322 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:45:44.839720  1322 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:45:44.839730  1322 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:45:44.839737  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.839742  1322 net.cpp:165] Memory required for data: 1372673500\nI0821 06:45:44.839746  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:45:44.839761  1322 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:45:44.839766  1322 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:45:44.839777  1322 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:45:44.840808  1322 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:45:44.840823  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.840833  1322 net.cpp:165] Memory required for data: 1374721500\nI0821 06:45:44.840842  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:45:44.840852  1322 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:45:44.840858  1322 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:45:44.840869  1322 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:45:44.841146  1322 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:45:44.841162  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.841167  1322 net.cpp:165] Memory required for data: 1376769500\nI0821 06:45:44.841178  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:45:44.841187  1322 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:45:44.841193  1322 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:45:44.841202  1322 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:45:44.841260  1322 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:45:44.841424  1322 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:45:44.841437  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.841441  1322 net.cpp:165] Memory required for data: 1378817500\nI0821 06:45:44.841450  1322 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:45:44.841459  1322 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:45:44.841470  1322 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:45:44.841476  1322 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:45:44.841485  1322 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:45:44.841523  1322 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:45:44.841536  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.841539  1322 net.cpp:165] Memory required for data: 1380865500\nI0821 06:45:44.841545  1322 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:45:44.841553  1322 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:45:44.841567  1322 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:45:44.841574  1322 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:45:44.841584  1322 net.cpp:150] Setting up L3_b6_relu\nI0821 06:45:44.841591  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.841595  1322 net.cpp:165] Memory required for data: 1382913500\nI0821 06:45:44.841600  1322 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:44.841608  1322 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:44.841612  1322 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:45:44.841624  1322 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:45:44.841634  1322 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:45:44.841681  1322 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:44.841693  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.841699  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.841704  1322 net.cpp:165] Memory required for data: 1387009500\nI0821 06:45:44.841709  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:45:44.841725  1322 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:45:44.841732  1322 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:45:44.841742  1322 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:45:44.842777  1322 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:45:44.842792  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.842797  1322 net.cpp:165] Memory required for data: 1389057500\nI0821 06:45:44.842805  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:45:44.842815  1322 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:45:44.842821  1322 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:45:44.842839  1322 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:45:44.843111  1322 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:45:44.843123  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.843128  1322 net.cpp:165] Memory required for data: 1391105500\nI0821 06:45:44.843138  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:45:44.843147  1322 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:45:44.843153  1322 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:45:44.843165  1322 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.843227  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:45:44.843387  1322 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:45:44.843400  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.843405  1322 net.cpp:165] Memory required for data: 1393153500\nI0821 06:45:44.843415  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:45:44.843453  1322 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:45:44.843462  1322 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:45:44.843471  1322 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:45:44.843480  1322 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:45:44.843488  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.843492  1322 net.cpp:165] Memory required for data: 1395201500\nI0821 06:45:44.843498  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:45:44.843509  1322 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:45:44.843515  1322 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:45:44.843523  1322 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:45:44.844555  1322 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:45:44.844570  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.844575  1322 net.cpp:165] Memory required for data: 1397249500\nI0821 06:45:44.844584  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:45:44.844604  1322 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:45:44.844611  1322 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:45:44.844622  1322 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:45:44.844897  1322 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:45:44.844909  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.844914  1322 net.cpp:165] Memory required for data: 1399297500\nI0821 06:45:44.844925  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:45:44.844933  1322 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:45:44.844940  1322 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:45:44.844950  1322 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:45:44.845012  1322 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:45:44.845173  1322 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:45:44.845186  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.845191  1322 net.cpp:165] Memory required for data: 1401345500\nI0821 06:45:44.845201  1322 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:45:44.845212  1322 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:45:44.845218  1322 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:45:44.845227  1322 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:45:44.845234  1322 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:45:44.845271  1322 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:45:44.845283  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.845288  1322 net.cpp:165] Memory required for data: 1403393500\nI0821 06:45:44.845293  1322 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:45:44.845301  1322 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:45:44.845307  1322 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:45:44.845317  1322 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:45:44.845327  1322 net.cpp:150] Setting up L3_b7_relu\nI0821 06:45:44.845335  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.845340  1322 net.cpp:165] Memory required for data: 1405441500\nI0821 06:45:44.845343  1322 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:44.845351  1322 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:44.845356  1322 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:45:44.845365  1322 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:45:44.845373  1322 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:45:44.845424  1322 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:44.845437  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.845443  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.845448  1322 net.cpp:165] Memory required for data: 1409537500\nI0821 06:45:44.845453  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:45:44.845463  1322 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:45:44.845469  1322 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:45:44.845481  1322 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:45:44.847489  1322 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:45:44.847506  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.847512  1322 net.cpp:165] Memory required for data: 1411585500\nI0821 06:45:44.847522  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:45:44.847533  1322 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:45:44.847540  1322 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:45:44.847549  1322 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:45:44.847826  1322 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:45:44.847844  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.847856  1322 net.cpp:165] Memory required for data: 1413633500\nI0821 06:45:44.847868  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:45:44.847879  1322 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:45:44.847887  1322 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:45:44.847894  1322 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.847960  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:45:44.848124  1322 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:45:44.848137  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.848142  1322 net.cpp:165] Memory required for data: 1415681500\nI0821 06:45:44.848151  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:45:44.848162  1322 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:45:44.848168  1322 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:45:44.848176  1322 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:45:44.848186  1322 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:45:44.848193  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.848197  1322 net.cpp:165] Memory required for data: 1417729500\nI0821 06:45:44.848202  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:45:44.848217  1322 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:45:44.848222  1322 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:45:44.848234  1322 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:45:44.849264  1322 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:45:44.849279  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.849284  1322 net.cpp:165] Memory required for data: 1419777500\nI0821 06:45:44.849293  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:45:44.849303  1322 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:45:44.849309  1322 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:45:44.849323  1322 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:45:44.849601  1322 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:45:44.849614  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.849619  1322 net.cpp:165] Memory required for data: 1421825500\nI0821 06:45:44.849629  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:45:44.849638  1322 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:45:44.849644  1322 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:45:44.849653  1322 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:45:44.849714  1322 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:45:44.849882  1322 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:45:44.849898  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.849903  1322 net.cpp:165] Memory required for data: 1423873500\nI0821 06:45:44.849912  1322 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:45:44.849922  1322 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:45:44.849928  1322 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:45:44.849936  1322 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:45:44.849943  1322 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:45:44.849982  1322 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:45:44.849993  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.849998  1322 net.cpp:165] Memory required for data: 1425921500\nI0821 06:45:44.850003  1322 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:45:44.850010  1322 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:45:44.850016  1322 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:45:44.850024  1322 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:45:44.850033  1322 net.cpp:150] Setting up L3_b8_relu\nI0821 06:45:44.850040  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.850045  1322 net.cpp:165] Memory required for data: 1427969500\nI0821 06:45:44.850056  1322 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:44.850064  1322 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:44.850069  1322 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:45:44.850080  1322 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:45:44.850090  1322 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:45:44.850137  1322 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:44.850148  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.850155  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.850160  1322 net.cpp:165] Memory required for data: 1432065500\nI0821 06:45:44.850165  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:45:44.850180  1322 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:45:44.850188  1322 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:45:44.850196  1322 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:45:44.851224  1322 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:45:44.851239  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.851244  1322 net.cpp:165] Memory required for data: 1434113500\nI0821 06:45:44.851253  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:45:44.851264  1322 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:45:44.851271  1322 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:45:44.851280  1322 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:45:44.851559  1322 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:45:44.851572  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.851577  1322 net.cpp:165] Memory required for data: 1436161500\nI0821 06:45:44.851588  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:45:44.851599  1322 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:45:44.851605  1322 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:45:44.851613  1322 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.851678  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:45:44.851845  1322 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:45:44.851860  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.851864  1322 net.cpp:165] Memory required for data: 1438209500\nI0821 06:45:44.851872  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:45:44.851883  1322 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:45:44.851891  1322 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:45:44.851900  1322 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:45:44.851910  1322 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:45:44.851917  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.851922  1322 net.cpp:165] Memory required for data: 1440257500\nI0821 06:45:44.851927  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:45:44.851938  1322 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:45:44.851943  1322 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:45:44.851954  1322 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:45:44.852984  1322 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:45:44.852998  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.853003  1322 net.cpp:165] Memory required for data: 1442305500\nI0821 06:45:44.853013  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:45:44.853026  1322 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:45:44.853034  1322 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:45:44.853042  1322 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:45:44.853317  1322 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:45:44.853330  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.853341  1322 net.cpp:165] Memory required for data: 1444353500\nI0821 06:45:44.853353  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:45:44.853361  1322 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:45:44.853368  1322 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:45:44.853375  1322 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:45:44.853440  1322 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:45:44.853597  1322 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:45:44.853617  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.853622  1322 net.cpp:165] Memory required for data: 1446401500\nI0821 06:45:44.853631  1322 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:45:44.853641  1322 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:45:44.853647  1322 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:45:44.853654  1322 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:45:44.853662  1322 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:45:44.853700  1322 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:45:44.853711  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.853716  1322 net.cpp:165] Memory required for data: 1448449500\nI0821 06:45:44.853721  1322 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:45:44.853729  1322 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:45:44.853735  1322 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:45:44.853742  1322 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:45:44.853752  1322 net.cpp:150] Setting up L3_b9_relu\nI0821 06:45:44.853759  1322 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:44.853763  1322 net.cpp:165] Memory required for data: 1450497500\nI0821 06:45:44.853768  1322 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:45:44.853780  1322 net.cpp:100] Creating Layer post_pool\nI0821 06:45:44.853785  1322 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:45:44.853793  1322 net.cpp:408] post_pool -> post_pool\nI0821 06:45:44.853834  1322 net.cpp:150] Setting up post_pool\nI0821 06:45:44.853845  1322 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:45:44.853850  1322 net.cpp:165] Memory required for data: 1450529500\nI0821 06:45:44.853855  1322 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:45:44.853868  1322 net.cpp:100] Creating Layer post_FC\nI0821 06:45:44.853873  1322 net.cpp:434] post_FC <- post_pool\nI0821 06:45:44.853885  1322 net.cpp:408] post_FC -> post_FC_top\nI0821 06:45:44.854055  1322 net.cpp:150] Setting up post_FC\nI0821 06:45:44.854068  1322 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:44.854074  1322 net.cpp:165] Memory required for data: 1450534500\nI0821 06:45:44.854082  1322 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:45:44.854094  1322 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:45:44.854100  1322 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:45:44.854107  1322 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:45:44.854120  1322 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:45:44.854171  1322 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:45:44.854182  1322 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:44.854189  1322 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:44.854193  1322 net.cpp:165] Memory required for data: 1450544500\nI0821 06:45:44.854198  1322 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:45:44.854207  1322 net.cpp:100] Creating Layer accuracy\nI0821 06:45:44.854213  1322 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:45:44.854220  1322 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:45:44.854228  1322 net.cpp:408] accuracy -> accuracy\nI0821 06:45:44.854241  1322 net.cpp:150] Setting up accuracy\nI0821 06:45:44.854249  1322 net.cpp:157] Top shape: (1)\nI0821 06:45:44.854259  1322 net.cpp:165] Memory required for data: 1450544504\nI0821 06:45:44.854265  1322 layer_factory.hpp:77] Creating layer loss\nI0821 06:45:44.854275  1322 net.cpp:100] Creating Layer loss\nI0821 06:45:44.854281  1322 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:45:44.854288  1322 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:45:44.854295  1322 net.cpp:408] loss -> loss\nI0821 06:45:44.854307  1322 layer_factory.hpp:77] Creating layer loss\nI0821 06:45:44.854434  1322 net.cpp:150] Setting up loss\nI0821 06:45:44.854449  1322 net.cpp:157] Top shape: (1)\nI0821 06:45:44.854454  1322 net.cpp:160]     with loss weight 1\nI0821 06:45:44.854470  1322 net.cpp:165] Memory required for data: 1450544508\nI0821 06:45:44.854476  1322 net.cpp:226] loss needs backward computation.\nI0821 06:45:44.854482  1322 net.cpp:228] accuracy does not need backward computation.\nI0821 06:45:44.854488  1322 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:45:44.854495  1322 net.cpp:226] post_FC needs backward computation.\nI0821 06:45:44.854499  1322 net.cpp:226] post_pool needs backward computation.\nI0821 06:45:44.854504  1322 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:45:44.854509  1322 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:45:44.854514  1322 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:45:44.854519  1322 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:45:44.854524  1322 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:45:44.854529  1322 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:45:44.854533  1322 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:45:44.854538  1322 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:45:44.854543  1322 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:45:44.854548  1322 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:45:44.854553  1322 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:45:44.854558  1322 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:45:44.854564  1322 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:45:44.854569  1322 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:45:44.854574  1322 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:45:44.854579  1322 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:45:44.854584  1322 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:45:44.854589  1322 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:45:44.854594  1322 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:45:44.854599  1322 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:45:44.854604  1322 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:45:44.854609  1322 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:45:44.854614  1322 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:45:44.854619  1322 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:45:44.854624  1322 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:45:44.854629  1322 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:45:44.854635  1322 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:45:44.854640  1322 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:45:44.854645  1322 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:45:44.854650  1322 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:45:44.854655  1322 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:45:44.854660  1322 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:45:44.854665  1322 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:45:44.854671  1322 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:45:44.854682  1322 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:45:44.854687  1322 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:45:44.854692  1322 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:45:44.854697  1322 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:45:44.854703  1322 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:45:44.854708  1322 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:45:44.854713  1322 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:45:44.854718  1322 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:45:44.854724  1322 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:45:44.854729  1322 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:45:44.854734  1322 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:45:44.854740  1322 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:45:44.854745  1322 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:45:44.854749  1322 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:45:44.854755  1322 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:45:44.854760  1322 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:45:44.854766  1322 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:45:44.854771  1322 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:45:44.854779  1322 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:45:44.854785  1322 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:45:44.854790  1322 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:45:44.854795  1322 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:45:44.854800  1322 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:45:44.854806  1322 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:45:44.854811  1322 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:45:44.854816  1322 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:45:44.854821  1322 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:45:44.854826  1322 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:45:44.854838  1322 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:45:44.854845  1322 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:45:44.854849  1322 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:45:44.854854  1322 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:45:44.854861  1322 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:45:44.854866  1322 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:45:44.854871  1322 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:45:44.854876  1322 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:45:44.854882  1322 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:45:44.854887  1322 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:45:44.854892  1322 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:45:44.854897  1322 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:45:44.854903  1322 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:45:44.854908  1322 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:45:44.854913  1322 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:45:44.854918  1322 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:45:44.854923  1322 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:45:44.854929  1322 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:45:44.854934  1322 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:45:44.854946  1322 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:45:44.854951  1322 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:45:44.854957  1322 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:45:44.854964  1322 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:45:44.854969  1322 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:45:44.854974  1322 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:45:44.854979  1322 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:45:44.854984  1322 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:45:44.854990  1322 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:45:44.854995  1322 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:45:44.855000  1322 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:45:44.855005  1322 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:45:44.855010  1322 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:45:44.855016  1322 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:45:44.855022  1322 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:45:44.855027  1322 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:45:44.855032  1322 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:45:44.855038  1322 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:45:44.855043  1322 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:45:44.855048  1322 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:45:44.855053  1322 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:45:44.855059  1322 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:45:44.855064  1322 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:45:44.855070  1322 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:45:44.855077  1322 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:45:44.855082  1322 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:45:44.855087  1322 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:45:44.855093  1322 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:45:44.855098  1322 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:45:44.855103  1322 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:45:44.855108  1322 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:45:44.855114  1322 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:45:44.855120  1322 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:45:44.855125  1322 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:45:44.855131  1322 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:45:44.855136  1322 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:45:44.855141  1322 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:45:44.855151  1322 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:45:44.855157  1322 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:45:44.855162  1322 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:45:44.855168  1322 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:45:44.855175  1322 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:45:44.855180  1322 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:45:44.855185  1322 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:45:44.855191  1322 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:45:44.855196  1322 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:45:44.855201  1322 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:45:44.855207  1322 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:45:44.855217  1322 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:45:44.855223  1322 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:45:44.855229  1322 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:45:44.855234  1322 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:45:44.855240  1322 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:45:44.855245  1322 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:45:44.855252  1322 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:45:44.855257  1322 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:45:44.855262  1322 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:45:44.855268  1322 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:45:44.855273  1322 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:45:44.855278  1322 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:45:44.855283  1322 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:45:44.855289  1322 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:45:44.855294  1322 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:45:44.855300  1322 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:45:44.855306  1322 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:45:44.855311  1322 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:45:44.855317  1322 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:45:44.855322  1322 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:45:44.855329  1322 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:45:44.855334  1322 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:45:44.855340  1322 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:45:44.855345  1322 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:45:44.855350  1322 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:45:44.855355  1322 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:45:44.855362  1322 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:45:44.855367  1322 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:45:44.855373  1322 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:45:44.855378  1322 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:45:44.855384  1322 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:45:44.855389  1322 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:45:44.855396  1322 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:45:44.855401  1322 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:45:44.855406  1322 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:45:44.855412  1322 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:45:44.855418  1322 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:45:44.855423  1322 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:45:44.855429  1322 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:45:44.855434  1322 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:45:44.855440  1322 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:45:44.855445  1322 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:45:44.855451  1322 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:45:44.855456  1322 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:45:44.855463  1322 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:45:44.855469  1322 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:45:44.855474  1322 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:45:44.855484  1322 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:45:44.855491  1322 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:45:44.855496  1322 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:45:44.855502  1322 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:45:44.855509  1322 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:45:44.855515  1322 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:45:44.855520  1322 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:45:44.855525  1322 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:45:44.855530  1322 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:45:44.855536  1322 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:45:44.855542  1322 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:45:44.855547  1322 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:45:44.855553  1322 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:45:44.855559  1322 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:45:44.855564  1322 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:45:44.855571  1322 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:45:44.855576  1322 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:45:44.855581  1322 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:45:44.855587  1322 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:45:44.855592  1322 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:45:44.855598  1322 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:45:44.855603  1322 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:45:44.855610  1322 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:45:44.855617  1322 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:45:44.855621  1322 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:45:44.855628  1322 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:45:44.855633  1322 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:45:44.855638  1322 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:45:44.855644  1322 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:45:44.855649  1322 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:45:44.855655  1322 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:45:44.855661  1322 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:45:44.855667  1322 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:45:44.855674  1322 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:45:44.855679  1322 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:45:44.855684  1322 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:45:44.855690  1322 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:45:44.855695  1322 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:45:44.855701  1322 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:45:44.855707  1322 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:45:44.855713  1322 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:45:44.855718  1322 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:45:44.855726  1322 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:45:44.855731  1322 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:45:44.855736  1322 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:45:44.855742  1322 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:45:44.855747  1322 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:45:44.855757  1322 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:45:44.855763  1322 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:45:44.855769  1322 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:45:44.855775  1322 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:45:44.855782  1322 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:45:44.855787  1322 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:45:44.855792  1322 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:45:44.855798  1322 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:45:44.855804  1322 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:45:44.855809  1322 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:45:44.855815  1322 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:45:44.855820  1322 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:45:44.855826  1322 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:45:44.855837  1322 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:45:44.855844  1322 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:45:44.855854  1322 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:45:44.855859  1322 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:45:44.855865  1322 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:45:44.855871  1322 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:45:44.855877  1322 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:45:44.855882  1322 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:45:44.855888  1322 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:45:44.855895  1322 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:45:44.855901  1322 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:45:44.855906  1322 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:45:44.855913  1322 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:45:44.855918  1322 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:45:44.855924  1322 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:45:44.855931  1322 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:45:44.855937  1322 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:45:44.855942  1322 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:45:44.855947  1322 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:45:44.855953  1322 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:45:44.855959  1322 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:45:44.855965  1322 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:45:44.855971  1322 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:45:44.855978  1322 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:45:44.855983  1322 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:45:44.855989  1322 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:45:44.855994  1322 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:45:44.856000  1322 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:45:44.856006  1322 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:45:44.856012  1322 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:45:44.856017  1322 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:45:44.856024  1322 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:45:44.856030  1322 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:45:44.856035  1322 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:45:44.856048  1322 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:45:44.856055  1322 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:45:44.856060  1322 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:45:44.856066  1322 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:45:44.856072  1322 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:45:44.856078  1322 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:45:44.856083  1322 net.cpp:226] pre_relu needs backward computation.\nI0821 06:45:44.856089  1322 net.cpp:226] pre_scale needs backward computation.\nI0821 06:45:44.856094  1322 net.cpp:226] pre_bn needs backward computation.\nI0821 06:45:44.856099  1322 net.cpp:226] pre_conv needs backward computation.\nI0821 06:45:44.856106  1322 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:45:44.856113  1322 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:45:44.856117  1322 net.cpp:270] This network produces output accuracy\nI0821 06:45:44.856124  1322 net.cpp:270] This network produces output loss\nI0821 06:45:44.856451  1322 net.cpp:283] Network initialization done.\nI0821 06:45:44.857458  1322 solver.cpp:60] Solver scaffolding done.\nI0821 06:45:45.078279  1322 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 06:45:45.434201  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:45.434272  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:45.441529  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:45.672832  1322 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:45.672924  1322 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:45.707324  1322 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:45.707404  1322 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:46.153261  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:46.153345  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:46.161286  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:46.406219  1322 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:46.406354  1322 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:46.458402  1322 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:46.458537  1322 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:46.982973  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:46.983029  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:46.991497  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:47.255676  1322 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:47.255805  1322 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:47.325784  1322 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:47.325919  1322 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:47.408339  1322 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 06:45:47.887912  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:47.887990  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:47.897665  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:48.184541  1322 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:48.184731  1322 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:48.275851  1322 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:48.276038  1322 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:48.926133  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:48.926216  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:48.936743  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:49.257308  1322 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:49.257536  1322 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:49.369606  1322 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:49.369822  1322 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:50.090446  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:50.090522  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:50.101999  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:50.445216  1322 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:50.445471  1322 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:50.577230  1322 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:50.577476  1322 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:51.367943  1322 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:51.368006  1322 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:51.380623  1322 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:51.427232  1349 blocking_queue.cpp:50] Waiting for data\nI0821 06:45:51.480139  1328 blocking_queue.cpp:50] Waiting for data\nI0821 06:45:51.794675  1322 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:51.794956  1322 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:51.946218  1322 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:51.946485  1322 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:52.118022  1322 parallel.cpp:425] Starting Optimization\nI0821 06:45:52.119841  1322 solver.cpp:279] Solving Cifar-Resnet\nI0821 06:45:52.119868  1322 solver.cpp:280] Learning Rate Policy: triangular\nI0821 06:45:52.123916  1322 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 06:47:13.859014  1322 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 06:47:13.859310  1322 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 06:47:17.949518  1322 solver.cpp:228] Iteration 0, loss = 3.4719\nI0821 06:47:17.949571  1322 solver.cpp:244]     Train net output #0: accuracy = 0.096\nI0821 06:47:17.949589  1322 solver.cpp:244]     Train net output #1: loss = 3.4719 (* 1 = 3.4719 loss)\nI0821 06:47:17.949750  1322 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0821 06:49:35.466806  1322 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 06:50:57.388710  1322 solver.cpp:404]     Test net output #0: accuracy = 0.44236\nI0821 06:50:57.389008  1322 solver.cpp:404]     Test net output #1: loss = 1.55819 (* 1 = 1.55819 loss)\nI0821 06:50:58.710096  1322 solver.cpp:228] Iteration 100, loss = 1.2425\nI0821 06:50:58.710151  1322 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0821 06:50:58.710168  1322 solver.cpp:244]     Train net output #1: loss = 1.2425 (* 1 = 1.2425 loss)\nI0821 06:50:58.796037  1322 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0821 06:53:16.154793  1322 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 06:54:38.072284  1322 solver.cpp:404]     Test net output #0: accuracy = 0.56204\nI0821 06:54:38.072561  1322 solver.cpp:404]     Test net output #1: loss = 1.22864 (* 1 = 1.22864 loss)\nI0821 06:54:39.393896  1322 solver.cpp:228] Iteration 200, loss = 0.871915\nI0821 06:54:39.393950  1322 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0821 06:54:39.393967  1322 solver.cpp:244]     Train net output #1: loss = 0.871915 (* 1 = 0.871915 loss)\nI0821 06:54:39.476297  1322 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0821 06:56:56.825026  1322 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 06:58:18.716042  1322 solver.cpp:404]     Test net output #0: accuracy = 0.5902\nI0821 06:58:18.716325  1322 solver.cpp:404]     Test net output #1: loss = 1.3408 (* 1 = 1.3408 loss)\nI0821 06:58:20.037920  1322 solver.cpp:228] Iteration 300, loss = 0.778692\nI0821 06:58:20.037976  1322 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0821 06:58:20.037992  1322 solver.cpp:244]     Train net output #1: loss = 0.778692 (* 1 = 0.778692 loss)\nI0821 06:58:20.118435  1322 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0821 07:00:37.396327  1322 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 07:01:59.277076  1322 solver.cpp:404]     Test net output #0: accuracy = 0.65724\nI0821 07:01:59.277348  1322 solver.cpp:404]     Test net output #1: loss = 1.06019 (* 1 = 1.06019 loss)\nI0821 07:02:00.599812  1322 solver.cpp:228] Iteration 400, loss = 0.633324\nI0821 07:02:00.599864  1322 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0821 07:02:00.599879  1322 solver.cpp:244]     Train net output #1: loss = 0.633324 (* 1 = 0.633324 loss)\nI0821 07:02:00.684023  1322 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0821 07:04:17.935041  1322 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 07:05:39.806064  1322 solver.cpp:404]     Test net output #0: accuracy = 0.737\nI0821 07:05:39.806345  1322 solver.cpp:404]     Test net output #1: loss = 0.84585 (* 1 = 0.84585 loss)\nI0821 07:05:41.128484  1322 solver.cpp:228] Iteration 500, loss = 0.557278\nI0821 07:05:41.128536  1322 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 07:05:41.128552  1322 solver.cpp:244]     Train net output #1: loss = 0.557278 (* 1 = 0.557278 loss)\nI0821 07:05:41.213225  1322 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0821 07:07:58.440665  1322 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 07:09:20.302783  1322 solver.cpp:404]     Test net output #0: accuracy = 0.6874\nI0821 07:09:20.303091  1322 solver.cpp:404]     Test net output #1: loss = 1.02948 (* 1 = 1.02948 loss)\nI0821 07:09:21.625195  1322 solver.cpp:228] Iteration 600, loss = 0.412397\nI0821 07:09:21.625250  1322 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0821 07:09:21.625267  1322 solver.cpp:244]     Train net output #1: loss = 0.412397 (* 1 = 0.412397 loss)\nI0821 07:09:21.706725  1322 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0821 07:11:38.924726  1322 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 07:13:00.773689  1322 solver.cpp:404]     Test net output #0: accuracy = 0.7172\nI0821 07:13:00.773968  1322 solver.cpp:404]     Test net output #1: loss = 0.938845 (* 1 = 0.938845 loss)\nI0821 07:13:02.095597  1322 solver.cpp:228] Iteration 700, loss = 0.337059\nI0821 07:13:02.095649  1322 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 07:13:02.095666  1322 solver.cpp:244]     Train net output #1: loss = 0.337059 (* 1 = 0.337059 loss)\nI0821 07:13:02.184448  1322 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0821 07:15:19.520380  1322 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 07:16:41.508333  1322 solver.cpp:404]     Test net output #0: accuracy = 0.74572\nI0821 07:16:41.508656  1322 solver.cpp:404]     Test net output #1: loss = 0.789542 (* 1 = 0.789542 loss)\nI0821 07:16:42.831288  1322 solver.cpp:228] Iteration 800, loss = 0.318813\nI0821 07:16:42.831341  1322 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 07:16:42.831357  1322 solver.cpp:244]     Train net output #1: loss = 0.318813 (* 1 = 0.318813 loss)\nI0821 07:16:42.917096  1322 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0821 07:19:00.166616  1322 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 07:20:22.031860  1322 solver.cpp:404]     Test net output #0: accuracy = 0.65804\nI0821 07:20:22.032160  1322 solver.cpp:404]     Test net output #1: loss = 1.24921 (* 1 = 1.24921 loss)\nI0821 07:20:23.353830  1322 solver.cpp:228] Iteration 900, loss = 0.347024\nI0821 07:20:23.353883  1322 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:20:23.353899  1322 solver.cpp:244]     Train net output #1: loss = 0.347024 (* 1 = 0.347024 loss)\nI0821 07:20:23.438391  1322 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0821 07:22:40.863824  1322 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 07:24:02.735930  1322 solver.cpp:404]     Test net output #0: accuracy = 0.70812\nI0821 07:24:02.736258  1322 solver.cpp:404]     Test net output #1: loss = 0.975334 (* 1 = 0.975334 loss)\nI0821 07:24:04.059612  1322 solver.cpp:228] Iteration 1000, loss = 0.215514\nI0821 07:24:04.059664  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 07:24:04.059680  1322 solver.cpp:244]     Train net output #1: loss = 0.215514 (* 1 = 0.215514 loss)\nI0821 07:24:04.146328  1322 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0821 07:26:21.437830  1322 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 07:27:43.310602  1322 solver.cpp:404]     Test net output #0: accuracy = 0.80276\nI0821 07:27:43.310901  1322 solver.cpp:404]     Test net output #1: loss = 0.653668 (* 1 = 0.653668 loss)\nI0821 07:27:44.632982  1322 solver.cpp:228] Iteration 1100, loss = 0.253223\nI0821 07:27:44.633043  1322 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:27:44.633059  1322 solver.cpp:244]     Train net output #1: loss = 0.253223 (* 1 = 0.253223 loss)\nI0821 07:27:44.721297  1322 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0821 07:30:02.072295  1322 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 07:31:23.945583  1322 solver.cpp:404]     Test net output #0: accuracy = 0.78364\nI0821 07:31:23.945863  1322 solver.cpp:404]     Test net output #1: loss = 0.688506 (* 1 = 0.688506 loss)\nI0821 07:31:25.268265  1322 solver.cpp:228] Iteration 1200, loss = 0.297279\nI0821 07:31:25.268318  1322 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 07:31:25.268333  1322 solver.cpp:244]     Train net output #1: loss = 0.297279 (* 1 = 0.297279 loss)\nI0821 07:31:25.353782  1322 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0821 07:33:42.672579  1322 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 07:35:04.553666  1322 solver.cpp:404]     Test net output #0: accuracy = 0.79432\nI0821 07:35:04.553949  1322 solver.cpp:404]     Test net output #1: loss = 0.67437 (* 1 = 0.67437 loss)\nI0821 07:35:05.877180  1322 solver.cpp:228] Iteration 1300, loss = 0.215681\nI0821 07:35:05.877233  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 07:35:05.877248  1322 solver.cpp:244]     Train net output #1: loss = 0.215681 (* 1 = 0.215681 loss)\nI0821 07:35:05.960422  1322 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0821 07:37:23.223237  1322 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 07:38:45.088723  1322 solver.cpp:404]     Test net output #0: accuracy = 0.76336\nI0821 07:38:45.089004  1322 solver.cpp:404]     Test net output #1: loss = 0.917429 (* 1 = 0.917429 loss)\nI0821 07:38:46.412181  1322 solver.cpp:228] Iteration 1400, loss = 0.213972\nI0821 07:38:46.412231  1322 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 07:38:46.412246  1322 solver.cpp:244]     Train net output #1: loss = 0.213972 (* 1 = 0.213972 loss)\nI0821 07:38:46.503782  1322 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0821 07:41:03.745219  1322 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 07:42:25.620576  1322 solver.cpp:404]     Test net output #0: accuracy = 0.7794\nI0821 07:42:25.620856  1322 solver.cpp:404]     Test net output #1: loss = 0.799433 (* 1 = 0.799433 loss)\nI0821 07:42:26.943076  1322 solver.cpp:228] Iteration 1500, loss = 0.270158\nI0821 07:42:26.943125  1322 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 07:42:26.943141  1322 solver.cpp:244]     Train net output #1: loss = 0.270158 (* 1 = 0.270158 loss)\nI0821 07:42:27.031054  1322 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0821 07:44:44.294790  1322 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 07:46:06.177830  1322 solver.cpp:404]     Test net output #0: accuracy = 0.56036\nI0821 07:46:06.178115  1322 solver.cpp:404]     Test net output #1: loss = 2.61997 (* 1 = 2.61997 loss)\nI0821 07:46:07.500918  1322 solver.cpp:228] Iteration 1600, loss = 0.184897\nI0821 07:46:07.500967  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 07:46:07.500983  1322 solver.cpp:244]     Train net output #1: loss = 0.184897 (* 1 = 0.184897 loss)\nI0821 07:46:07.583730  1322 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0821 07:48:24.842180  1322 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 07:49:46.720883  1322 solver.cpp:404]     Test net output #0: accuracy = 0.69532\nI0821 07:49:46.721194  1322 solver.cpp:404]     Test net output #1: loss = 1.34713 (* 1 = 1.34713 loss)\nI0821 07:49:48.044071  1322 solver.cpp:228] Iteration 1700, loss = 0.25536\nI0821 07:49:48.044119  1322 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 07:49:48.044136  1322 solver.cpp:244]     Train net output #1: loss = 0.25536 (* 1 = 0.25536 loss)\nI0821 07:49:48.129598  1322 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0821 07:52:05.334053  1322 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 07:53:27.210057  1322 solver.cpp:404]     Test net output #0: accuracy = 0.79996\nI0821 07:53:27.210364  1322 solver.cpp:404]     Test net output #1: loss = 0.766297 (* 1 = 0.766297 loss)\nI0821 07:53:28.532105  1322 solver.cpp:228] Iteration 1800, loss = 0.152793\nI0821 07:53:28.532152  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 07:53:28.532168  1322 solver.cpp:244]     Train net output #1: loss = 0.152793 (* 1 = 0.152793 loss)\nI0821 07:53:28.614836  1322 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0821 07:55:45.794066  1322 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 07:57:07.678386  1322 solver.cpp:404]     Test net output #0: accuracy = 0.78808\nI0821 07:57:07.678669  1322 solver.cpp:404]     Test net output #1: loss = 0.839694 (* 1 = 0.839694 loss)\nI0821 07:57:08.999672  1322 solver.cpp:228] Iteration 1900, loss = 0.234224\nI0821 07:57:08.999719  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 07:57:08.999734  1322 solver.cpp:244]     Train net output #1: loss = 0.234224 (* 1 = 0.234224 loss)\nI0821 07:57:09.080771  1322 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0821 07:59:26.371302  1322 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 08:00:48.245261  1322 solver.cpp:404]     Test net output #0: accuracy = 0.7106\nI0821 08:00:48.245563  1322 solver.cpp:404]     Test net output #1: loss = 1.19935 (* 1 = 1.19935 loss)\nI0821 08:00:49.566108  1322 solver.cpp:228] Iteration 2000, loss = 0.150829\nI0821 08:00:49.566157  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 08:00:49.566174  1322 solver.cpp:244]     Train net output #1: loss = 0.150829 (* 1 = 0.150829 loss)\nI0821 08:00:49.652195  1322 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0821 08:03:06.913297  1322 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 08:04:28.790696  1322 solver.cpp:404]     Test net output #0: accuracy = 0.74372\nI0821 08:04:28.791013  1322 solver.cpp:404]     Test net output #1: loss = 1.10799 (* 1 = 1.10799 loss)\nI0821 08:04:30.112007  1322 solver.cpp:228] Iteration 2100, loss = 0.165837\nI0821 08:04:30.112058  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 08:04:30.112074  1322 solver.cpp:244]     Train net output #1: loss = 0.165837 (* 1 = 0.165837 loss)\nI0821 08:04:30.199859  1322 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0821 08:06:47.551380  1322 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 08:08:09.424743  1322 solver.cpp:404]     Test net output #0: accuracy = 0.71904\nI0821 08:08:09.425114  1322 solver.cpp:404]     Test net output #1: loss = 1.24939 (* 1 = 1.24939 loss)\nI0821 08:08:10.745828  1322 solver.cpp:228] Iteration 2200, loss = 0.110407\nI0821 08:08:10.745879  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 08:08:10.745894  1322 solver.cpp:244]     Train net output #1: loss = 0.110407 (* 1 = 0.110407 loss)\nI0821 08:08:10.832995  1322 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0821 08:10:28.109297  1322 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 08:11:49.997414  1322 solver.cpp:404]     Test net output #0: accuracy = 0.75668\nI0821 08:11:49.997715  1322 solver.cpp:404]     Test net output #1: loss = 0.929739 (* 1 = 0.929739 loss)\nI0821 08:11:51.318840  1322 solver.cpp:228] Iteration 2300, loss = 0.144312\nI0821 08:11:51.318888  1322 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:11:51.318903  1322 solver.cpp:244]     Train net output #1: loss = 0.144312 (* 1 = 0.144312 loss)\nI0821 08:11:51.404875  1322 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0821 08:14:08.645408  1322 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 08:15:30.528151  1322 solver.cpp:404]     Test net output #0: accuracy = 0.81536\nI0821 08:15:30.528455  1322 solver.cpp:404]     Test net output #1: loss = 0.6102 (* 1 = 0.6102 loss)\nI0821 08:15:31.849635  1322 solver.cpp:228] Iteration 2400, loss = 0.101786\nI0821 08:15:31.849683  1322 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 08:15:31.849699  1322 solver.cpp:244]     Train net output #1: loss = 0.101786 (* 1 = 0.101786 loss)\nI0821 08:15:31.937305  1322 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0821 08:17:49.158440  1322 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 08:19:11.038444  1322 solver.cpp:404]     Test net output #0: accuracy = 0.82192\nI0821 08:19:11.038746  1322 solver.cpp:404]     Test net output #1: loss = 0.678425 (* 1 = 0.678425 loss)\nI0821 08:19:12.359635  1322 solver.cpp:228] Iteration 2500, loss = 0.138963\nI0821 08:19:12.359684  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:19:12.359699  1322 solver.cpp:244]     Train net output #1: loss = 0.138963 (* 1 = 0.138963 loss)\nI0821 08:19:12.444875  1322 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0821 08:21:29.732674  1322 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 08:22:51.603991  1322 solver.cpp:404]     Test net output #0: accuracy = 0.66192\nI0821 08:22:51.604290  1322 solver.cpp:404]     Test net output #1: loss = 1.42447 (* 1 = 1.42447 loss)\nI0821 08:22:52.925122  1322 solver.cpp:228] Iteration 2600, loss = 0.190246\nI0821 08:22:52.925171  1322 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 08:22:52.925186  1322 solver.cpp:244]     Train net output #1: loss = 0.190246 (* 1 = 0.190246 loss)\nI0821 08:22:53.011675  1322 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0821 08:25:10.235332  1322 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 08:26:32.107156  1322 solver.cpp:404]     Test net output #0: accuracy = 0.68592\nI0821 08:26:32.107466  1322 solver.cpp:404]     Test net output #1: loss = 1.3617 (* 1 = 1.3617 loss)\nI0821 08:26:33.428545  1322 solver.cpp:228] Iteration 2700, loss = 0.166088\nI0821 08:26:33.428593  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 08:26:33.428609  1322 solver.cpp:244]     Train net output #1: loss = 0.166088 (* 1 = 0.166088 loss)\nI0821 08:26:33.513839  1322 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0821 08:28:50.900511  1322 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 08:30:12.779080  1322 solver.cpp:404]     Test net output #0: accuracy = 0.55464\nI0821 08:30:12.779373  1322 solver.cpp:404]     Test net output #1: loss = 2.33381 (* 1 = 2.33381 loss)\nI0821 08:30:14.101109  1322 solver.cpp:228] Iteration 2800, loss = 0.163224\nI0821 08:30:14.101166  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 08:30:14.101183  1322 solver.cpp:244]     Train net output #1: loss = 0.163224 (* 1 = 0.163224 loss)\nI0821 08:30:14.181562  1322 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0821 08:32:31.416429  1322 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 08:33:53.296111  1322 solver.cpp:404]     Test net output #0: accuracy = 0.73168\nI0821 08:33:53.296396  1322 solver.cpp:404]     Test net output #1: loss = 0.972032 (* 1 = 0.972032 loss)\nI0821 08:33:54.618029  1322 solver.cpp:228] Iteration 2900, loss = 0.270358\nI0821 08:33:54.618080  1322 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 08:33:54.618096  1322 solver.cpp:244]     Train net output #1: loss = 0.270358 (* 1 = 0.270358 loss)\nI0821 08:33:54.705971  1322 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0821 08:36:12.162065  1322 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 08:37:34.029227  1322 solver.cpp:404]     Test net output #0: accuracy = 0.75404\nI0821 08:37:34.029530  1322 solver.cpp:404]     Test net output #1: loss = 0.965939 (* 1 = 0.965939 loss)\nI0821 08:37:35.351137  1322 solver.cpp:228] Iteration 3000, loss = 0.185276\nI0821 08:37:35.351189  1322 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 08:37:35.351204  1322 solver.cpp:244]     Train net output #1: loss = 0.185276 (* 1 = 0.185276 loss)\nI0821 08:37:35.437981  1322 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0821 08:39:52.938663  1322 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 08:41:14.811842  1322 solver.cpp:404]     Test net output #0: accuracy = 0.81468\nI0821 08:41:14.812113  1322 solver.cpp:404]     Test net output #1: loss = 0.661267 (* 1 = 0.661267 loss)\nI0821 08:41:16.133262  1322 solver.cpp:228] Iteration 3100, loss = 0.140937\nI0821 08:41:16.133313  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:41:16.133329  1322 solver.cpp:244]     Train net output #1: loss = 0.140937 (* 1 = 0.140937 loss)\nI0821 08:41:16.225885  1322 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0821 08:43:33.897325  1322 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 08:44:55.797592  1322 solver.cpp:404]     Test net output #0: accuracy = 0.7666\nI0821 08:44:55.797878  1322 solver.cpp:404]     Test net output #1: loss = 0.813033 (* 1 = 0.813033 loss)\nI0821 08:44:57.120082  1322 solver.cpp:228] Iteration 3200, loss = 0.202987\nI0821 08:44:57.120132  1322 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 08:44:57.120148  1322 solver.cpp:244]     Train net output #1: loss = 0.202987 (* 1 = 0.202987 loss)\nI0821 08:44:57.212390  1322 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0821 08:47:14.918300  1322 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 08:48:36.798774  1322 solver.cpp:404]     Test net output #0: accuracy = 0.75756\nI0821 08:48:36.799082  1322 solver.cpp:404]     Test net output #1: loss = 0.922124 (* 1 = 0.922124 loss)\nI0821 08:48:38.121778  1322 solver.cpp:228] Iteration 3300, loss = 0.216427\nI0821 08:48:38.121829  1322 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 08:48:38.121845  1322 solver.cpp:244]     Train net output #1: loss = 0.216428 (* 1 = 0.216428 loss)\nI0821 08:48:38.206610  1322 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0821 08:50:55.761487  1322 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 08:52:17.655966  1322 solver.cpp:404]     Test net output #0: accuracy = 0.775\nI0821 08:52:17.656268  1322 solver.cpp:404]     Test net output #1: loss = 0.781393 (* 1 = 0.781393 loss)\nI0821 08:52:18.978129  1322 solver.cpp:228] Iteration 3400, loss = 0.208974\nI0821 08:52:18.978180  1322 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 08:52:18.978195  1322 solver.cpp:244]     Train net output #1: loss = 0.208974 (* 1 = 0.208974 loss)\nI0821 08:52:19.070600  1322 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0821 08:54:36.644568  1322 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 08:55:58.539391  1322 solver.cpp:404]     Test net output #0: accuracy = 0.69768\nI0821 08:55:58.539674  1322 solver.cpp:404]     Test net output #1: loss = 1.29443 (* 1 = 1.29443 loss)\nI0821 08:55:59.861558  1322 solver.cpp:228] Iteration 3500, loss = 0.242345\nI0821 08:55:59.861610  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 08:55:59.861626  1322 solver.cpp:244]     Train net output #1: loss = 0.242345 (* 1 = 0.242345 loss)\nI0821 08:55:59.945628  1322 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0821 08:58:17.481674  1322 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 08:59:39.366582  1322 solver.cpp:404]     Test net output #0: accuracy = 0.81164\nI0821 08:59:39.366878  1322 solver.cpp:404]     Test net output #1: loss = 0.649832 (* 1 = 0.649832 loss)\nI0821 08:59:40.687677  1322 solver.cpp:228] Iteration 3600, loss = 0.174332\nI0821 08:59:40.687726  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 08:59:40.687742  1322 solver.cpp:244]     Train net output #1: loss = 0.174332 (* 1 = 0.174332 loss)\nI0821 08:59:40.779266  1322 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0821 09:01:58.351730  1322 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 09:03:20.321560  1322 solver.cpp:404]     Test net output #0: accuracy = 0.78048\nI0821 09:03:20.321846  1322 solver.cpp:404]     Test net output #1: loss = 0.814365 (* 1 = 0.814365 loss)\nI0821 09:03:21.645062  1322 solver.cpp:228] Iteration 3700, loss = 0.177983\nI0821 09:03:21.645114  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 09:03:21.645138  1322 solver.cpp:244]     Train net output #1: loss = 0.177983 (* 1 = 0.177983 loss)\nI0821 09:03:21.731624  1322 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0821 09:05:39.283862  1322 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 09:07:01.323123  1322 solver.cpp:404]     Test net output #0: accuracy = 0.82616\nI0821 09:07:01.323417  1322 solver.cpp:404]     Test net output #1: loss = 0.581596 (* 1 = 0.581596 loss)\nI0821 09:07:02.645231  1322 solver.cpp:228] Iteration 3800, loss = 0.14222\nI0821 09:07:02.645283  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:07:02.645306  1322 solver.cpp:244]     Train net output #1: loss = 0.14222 (* 1 = 0.14222 loss)\nI0821 09:07:02.728126  1322 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0821 09:09:20.258185  1322 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 09:10:42.203868  1322 solver.cpp:404]     Test net output #0: accuracy = 0.77244\nI0821 09:10:42.204170  1322 solver.cpp:404]     Test net output #1: loss = 0.791488 (* 1 = 0.791488 loss)\nI0821 09:10:43.526608  1322 solver.cpp:228] Iteration 3900, loss = 0.206343\nI0821 09:10:43.526661  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:10:43.526684  1322 solver.cpp:244]     Train net output #1: loss = 0.206343 (* 1 = 0.206343 loss)\nI0821 09:10:43.616030  1322 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0821 09:13:00.867812  1322 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 09:14:22.849547  1322 solver.cpp:404]     Test net output #0: accuracy = 0.7106\nI0821 09:14:22.849864  1322 solver.cpp:404]     Test net output #1: loss = 1.15105 (* 1 = 1.15105 loss)\nI0821 09:14:24.171905  1322 solver.cpp:228] Iteration 4000, loss = 0.254892\nI0821 09:14:24.171957  1322 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 09:14:24.171983  1322 solver.cpp:244]     Train net output #1: loss = 0.254892 (* 1 = 0.254892 loss)\nI0821 09:14:24.253707  1322 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0821 09:16:41.436352  1322 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 09:18:03.396287  1322 solver.cpp:404]     Test net output #0: accuracy = 0.74996\nI0821 09:18:03.396569  1322 solver.cpp:404]     Test net output #1: loss = 0.997601 (* 1 = 0.997601 loss)\nI0821 09:18:04.718627  1322 solver.cpp:228] Iteration 4100, loss = 0.162172\nI0821 09:18:04.718675  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 09:18:04.718693  1322 solver.cpp:244]     Train net output #1: loss = 0.162172 (* 1 = 0.162172 loss)\nI0821 09:18:04.801369  1322 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0821 09:20:22.026732  1322 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 09:21:44.025323  1322 solver.cpp:404]     Test net output #0: accuracy = 0.69208\nI0821 09:21:44.025609  1322 solver.cpp:404]     Test net output #1: loss = 1.26272 (* 1 = 1.26272 loss)\nI0821 09:21:45.347951  1322 solver.cpp:228] Iteration 4200, loss = 0.236797\nI0821 09:21:45.348002  1322 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 09:21:45.348026  1322 solver.cpp:244]     Train net output #1: loss = 0.236797 (* 1 = 0.236797 loss)\nI0821 09:21:45.428436  1322 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0821 09:24:02.601761  1322 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 09:25:24.396666  1322 solver.cpp:404]     Test net output #0: accuracy = 0.80512\nI0821 09:25:24.396957  1322 solver.cpp:404]     Test net output #1: loss = 0.70079 (* 1 = 0.70079 loss)\nI0821 09:25:25.718771  1322 solver.cpp:228] Iteration 4300, loss = 0.190128\nI0821 09:25:25.718824  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 09:25:25.718842  1322 solver.cpp:244]     Train net output #1: loss = 0.190128 (* 1 = 0.190128 loss)\nI0821 09:25:25.802278  1322 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0821 09:27:43.023191  1322 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 09:29:04.563827  1322 solver.cpp:404]     Test net output #0: accuracy = 0.71456\nI0821 09:29:04.564054  1322 solver.cpp:404]     Test net output #1: loss = 1.10672 (* 1 = 1.10672 loss)\nI0821 09:29:05.884892  1322 solver.cpp:228] Iteration 4400, loss = 0.298863\nI0821 09:29:05.884943  1322 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 09:29:05.884960  1322 solver.cpp:244]     Train net output #1: loss = 0.298863 (* 1 = 0.298863 loss)\nI0821 09:29:05.974038  1322 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0821 09:31:23.185641  1322 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 09:32:44.767156  1322 solver.cpp:404]     Test net output #0: accuracy = 0.7324\nI0821 09:32:44.767447  1322 solver.cpp:404]     Test net output #1: loss = 1.05358 (* 1 = 1.05358 loss)\nI0821 09:32:46.093771  1322 solver.cpp:228] Iteration 4500, loss = 0.295915\nI0821 09:32:46.093819  1322 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 09:32:46.093842  1322 solver.cpp:244]     Train net output #1: loss = 0.295915 (* 1 = 0.295915 loss)\nI0821 09:32:46.176992  1322 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0821 09:35:03.366714  1322 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 09:36:25.205415  1322 solver.cpp:404]     Test net output #0: accuracy = 0.72192\nI0821 09:36:25.205651  1322 solver.cpp:404]     Test net output #1: loss = 1.0018 (* 1 = 1.0018 loss)\nI0821 09:36:26.526685  1322 solver.cpp:228] Iteration 4600, loss = 0.206136\nI0821 09:36:26.526738  1322 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 09:36:26.526762  1322 solver.cpp:244]     Train net output #1: loss = 0.206136 (* 1 = 0.206136 loss)\nI0821 09:36:26.609863  1322 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0821 09:38:43.834954  1322 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 09:40:05.324983  1322 solver.cpp:404]     Test net output #0: accuracy = 0.6824\nI0821 09:40:05.325227  1322 solver.cpp:404]     Test net output #1: loss = 1.2229 (* 1 = 1.2229 loss)\nI0821 09:40:06.645524  1322 solver.cpp:228] Iteration 4700, loss = 0.287672\nI0821 09:40:06.645576  1322 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 09:40:06.645599  1322 solver.cpp:244]     Train net output #1: loss = 0.287673 (* 1 = 0.287673 loss)\nI0821 09:40:06.737841  1322 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0821 09:42:23.977820  1322 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 09:43:45.334831  1322 solver.cpp:404]     Test net output #0: accuracy = 0.71072\nI0821 09:43:45.335119  1322 solver.cpp:404]     Test net output #1: loss = 1.11129 (* 1 = 1.11129 loss)\nI0821 09:43:46.656131  1322 solver.cpp:228] Iteration 4800, loss = 0.259258\nI0821 09:43:46.656185  1322 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 09:43:46.656209  1322 solver.cpp:244]     Train net output #1: loss = 0.259258 (* 1 = 0.259258 loss)\nI0821 09:43:46.741940  1322 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0821 09:46:03.995246  1322 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 09:47:25.411921  1322 solver.cpp:404]     Test net output #0: accuracy = 0.79396\nI0821 09:47:25.412159  1322 solver.cpp:404]     Test net output #1: loss = 0.712305 (* 1 = 0.712305 loss)\nI0821 09:47:26.733029  1322 solver.cpp:228] Iteration 4900, loss = 0.322623\nI0821 09:47:26.733089  1322 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 09:47:26.733114  1322 solver.cpp:244]     Train net output #1: loss = 0.322623 (* 1 = 0.322623 loss)\nI0821 09:47:26.821192  1322 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0821 09:49:43.922528  1322 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 09:51:04.870545  1322 solver.cpp:404]     Test net output #0: accuracy = 0.71572\nI0821 09:51:04.870815  1322 solver.cpp:404]     Test net output #1: loss = 1.29879 (* 1 = 1.29879 loss)\nI0821 09:51:06.189659  1322 solver.cpp:228] Iteration 5000, loss = 0.191167\nI0821 09:51:06.189705  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 09:51:06.189730  1322 solver.cpp:244]     Train net output #1: loss = 0.191167 (* 1 = 0.191167 loss)\nI0821 09:51:06.275600  1322 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0821 09:53:23.415350  1322 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 09:54:44.341543  1322 solver.cpp:404]     Test net output #0: accuracy = 0.72012\nI0821 09:54:44.341809  1322 solver.cpp:404]     Test net output #1: loss = 1.22016 (* 1 = 1.22016 loss)\nI0821 09:54:45.659818  1322 solver.cpp:228] Iteration 5100, loss = 0.250881\nI0821 09:54:45.659857  1322 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 09:54:45.659873  1322 solver.cpp:244]     Train net output #1: loss = 0.250881 (* 1 = 0.250881 loss)\nI0821 09:54:45.751781  1322 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0821 09:57:02.897256  1322 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 09:58:23.812789  1322 solver.cpp:404]     Test net output #0: accuracy = 0.76392\nI0821 09:58:23.813071  1322 solver.cpp:404]     Test net output #1: loss = 0.856488 (* 1 = 0.856488 loss)\nI0821 09:58:25.130887  1322 solver.cpp:228] Iteration 5200, loss = 0.21538\nI0821 09:58:25.130930  1322 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 09:58:25.130946  1322 solver.cpp:244]     Train net output #1: loss = 0.21538 (* 1 = 0.21538 loss)\nI0821 09:58:25.224422  1322 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0821 10:00:42.337213  1322 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 10:02:03.256520  1322 solver.cpp:404]     Test net output #0: accuracy = 0.6478\nI0821 10:02:03.256736  1322 solver.cpp:404]     Test net output #1: loss = 1.4631 (* 1 = 1.4631 loss)\nI0821 10:02:04.574604  1322 solver.cpp:228] Iteration 5300, loss = 0.194016\nI0821 10:02:04.574637  1322 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 10:02:04.574652  1322 solver.cpp:244]     Train net output #1: loss = 0.194016 (* 1 = 0.194016 loss)\nI0821 10:02:04.663065  1322 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0821 10:04:21.731477  1322 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 10:05:42.648291  1322 solver.cpp:404]     Test net output #0: accuracy = 0.70544\nI0821 10:05:42.648561  1322 solver.cpp:404]     Test net output #1: loss = 1.187 (* 1 = 1.187 loss)\nI0821 10:05:43.966784  1322 solver.cpp:228] Iteration 5400, loss = 0.255729\nI0821 10:05:43.966825  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:05:43.966840  1322 solver.cpp:244]     Train net output #1: loss = 0.255729 (* 1 = 0.255729 loss)\nI0821 10:05:44.060127  1322 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0821 10:08:01.144737  1322 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 10:09:22.072563  1322 solver.cpp:404]     Test net output #0: accuracy = 0.70712\nI0821 10:09:22.072829  1322 solver.cpp:404]     Test net output #1: loss = 1.16302 (* 1 = 1.16302 loss)\nI0821 10:09:23.390447  1322 solver.cpp:228] Iteration 5500, loss = 0.157\nI0821 10:09:23.390491  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:09:23.390507  1322 solver.cpp:244]     Train net output #1: loss = 0.157 (* 1 = 0.157 loss)\nI0821 10:09:23.478058  1322 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0821 10:11:40.560061  1322 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 10:13:01.487857  1322 solver.cpp:404]     Test net output #0: accuracy = 0.74684\nI0821 10:13:01.488112  1322 solver.cpp:404]     Test net output #1: loss = 0.953932 (* 1 = 0.953932 loss)\nI0821 10:13:02.805979  1322 solver.cpp:228] Iteration 5600, loss = 0.161982\nI0821 10:13:02.806021  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:13:02.806037  1322 solver.cpp:244]     Train net output #1: loss = 0.161983 (* 1 = 0.161983 loss)\nI0821 10:13:02.893919  1322 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0821 10:15:19.903244  1322 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 10:16:40.830399  1322 solver.cpp:404]     Test net output #0: accuracy = 0.75936\nI0821 10:16:40.830659  1322 solver.cpp:404]     Test net output #1: loss = 0.842647 (* 1 = 0.842647 loss)\nI0821 10:16:42.148636  1322 solver.cpp:228] Iteration 5700, loss = 0.219793\nI0821 10:16:42.148669  1322 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 10:16:42.148684  1322 solver.cpp:244]     Train net output #1: loss = 0.219793 (* 1 = 0.219793 loss)\nI0821 10:16:42.241829  1322 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0821 10:18:59.392122  1322 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 10:20:20.319839  1322 solver.cpp:404]     Test net output #0: accuracy = 0.78776\nI0821 10:20:20.320096  1322 solver.cpp:404]     Test net output #1: loss = 0.769657 (* 1 = 0.769657 loss)\nI0821 10:20:21.637624  1322 solver.cpp:228] Iteration 5800, loss = 0.335705\nI0821 10:20:21.637667  1322 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 10:20:21.637683  1322 solver.cpp:244]     Train net output #1: loss = 0.335706 (* 1 = 0.335706 loss)\nI0821 10:20:21.725605  1322 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0821 10:22:38.749680  1322 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 10:23:59.665518  1322 solver.cpp:404]     Test net output #0: accuracy = 0.77996\nI0821 10:23:59.665771  1322 solver.cpp:404]     Test net output #1: loss = 0.805164 (* 1 = 0.805164 loss)\nI0821 10:24:00.983201  1322 solver.cpp:228] Iteration 5900, loss = 0.145419\nI0821 10:24:00.983233  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:24:00.983247  1322 solver.cpp:244]     Train net output #1: loss = 0.145419 (* 1 = 0.145419 loss)\nI0821 10:24:01.075085  1322 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0821 10:26:18.155555  1322 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 10:27:39.064695  1322 solver.cpp:404]     Test net output #0: accuracy = 0.725\nI0821 10:27:39.064965  1322 solver.cpp:404]     Test net output #1: loss = 1.1156 (* 1 = 1.1156 loss)\nI0821 10:27:40.383427  1322 solver.cpp:228] Iteration 6000, loss = 0.0982199\nI0821 10:27:40.383469  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:27:40.383486  1322 solver.cpp:244]     Train net output #1: loss = 0.0982201 (* 1 = 0.0982201 loss)\nI0821 10:27:40.474985  1322 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0821 10:29:57.597230  1322 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 10:31:18.503877  1322 solver.cpp:404]     Test net output #0: accuracy = 0.79652\nI0821 10:31:18.504148  1322 solver.cpp:404]     Test net output #1: loss = 0.758731 (* 1 = 0.758731 loss)\nI0821 10:31:19.821671  1322 solver.cpp:228] Iteration 6100, loss = 0.135496\nI0821 10:31:19.821703  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:31:19.821717  1322 solver.cpp:244]     Train net output #1: loss = 0.135496 (* 1 = 0.135496 loss)\nI0821 10:31:19.911851  1322 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0821 10:33:37.019655  1322 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 10:34:57.929353  1322 solver.cpp:404]     Test net output #0: accuracy = 0.76764\nI0821 10:34:57.929622  1322 solver.cpp:404]     Test net output #1: loss = 0.833091 (* 1 = 0.833091 loss)\nI0821 10:34:59.247117  1322 solver.cpp:228] Iteration 6200, loss = 0.126674\nI0821 10:34:59.247159  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:34:59.247175  1322 solver.cpp:244]     Train net output #1: loss = 0.126674 (* 1 = 0.126674 loss)\nI0821 10:34:59.336225  1322 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0821 10:37:16.442565  1322 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 10:38:37.351137  1322 solver.cpp:404]     Test net output #0: accuracy = 0.70348\nI0821 10:38:37.351390  1322 solver.cpp:404]     Test net output #1: loss = 1.16964 (* 1 = 1.16964 loss)\nI0821 10:38:38.669471  1322 solver.cpp:228] Iteration 6300, loss = 0.208697\nI0821 10:38:38.669514  1322 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 10:38:38.669531  1322 solver.cpp:244]     Train net output #1: loss = 0.208697 (* 1 = 0.208697 loss)\nI0821 10:38:38.762523  1322 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0821 10:40:55.849738  1322 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 10:42:16.768985  1322 solver.cpp:404]     Test net output #0: accuracy = 0.76324\nI0821 10:42:16.769234  1322 solver.cpp:404]     Test net output #1: loss = 0.851786 (* 1 = 0.851786 loss)\nI0821 10:42:18.087189  1322 solver.cpp:228] Iteration 6400, loss = 0.160517\nI0821 10:42:18.087234  1322 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:42:18.087249  1322 solver.cpp:244]     Train net output #1: loss = 0.160517 (* 1 = 0.160517 loss)\nI0821 10:42:18.175139  1322 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0821 10:44:35.186518  1322 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 10:45:56.105017  1322 solver.cpp:404]     Test net output #0: accuracy = 0.81464\nI0821 10:45:56.105289  1322 solver.cpp:404]     Test net output #1: loss = 0.651561 (* 1 = 0.651561 loss)\nI0821 10:45:57.423207  1322 solver.cpp:228] Iteration 6500, loss = 0.125891\nI0821 10:45:57.423240  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:45:57.423256  1322 solver.cpp:244]     Train net output #1: loss = 0.125891 (* 1 = 0.125891 loss)\nI0821 10:45:57.513696  1322 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0821 10:48:14.503298  1322 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 10:49:35.413357  1322 solver.cpp:404]     Test net output #0: accuracy = 0.73672\nI0821 10:49:35.413622  1322 solver.cpp:404]     Test net output #1: loss = 1.09423 (* 1 = 1.09423 loss)\nI0821 10:49:36.732514  1322 solver.cpp:228] Iteration 6600, loss = 0.102348\nI0821 10:49:36.732549  1322 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:49:36.732565  1322 solver.cpp:244]     Train net output #1: loss = 0.102348 (* 1 = 0.102348 loss)\nI0821 10:49:36.821115  1322 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0821 10:51:53.867599  1322 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 10:53:14.779814  1322 solver.cpp:404]     Test net output #0: accuracy = 0.75324\nI0821 10:53:14.780084  1322 solver.cpp:404]     Test net output #1: loss = 0.999696 (* 1 = 0.999696 loss)\nI0821 10:53:16.097633  1322 solver.cpp:228] Iteration 6700, loss = 0.0996803\nI0821 10:53:16.097671  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:53:16.097687  1322 solver.cpp:244]     Train net output #1: loss = 0.0996804 (* 1 = 0.0996804 loss)\nI0821 10:53:16.188403  1322 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0821 10:55:33.333307  1322 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 10:56:54.240217  1322 solver.cpp:404]     Test net output #0: accuracy = 0.74916\nI0821 10:56:54.240481  1322 solver.cpp:404]     Test net output #1: loss = 1.06561 (* 1 = 1.06561 loss)\nI0821 10:56:55.558221  1322 solver.cpp:228] Iteration 6800, loss = 0.0955752\nI0821 10:56:55.558269  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:56:55.558295  1322 solver.cpp:244]     Train net output #1: loss = 0.0955753 (* 1 = 0.0955753 loss)\nI0821 10:56:55.647070  1322 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0821 10:59:12.635885  1322 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 11:00:33.549304  1322 solver.cpp:404]     Test net output #0: accuracy = 0.81388\nI0821 11:00:33.549556  1322 solver.cpp:404]     Test net output #1: loss = 0.696111 (* 1 = 0.696111 loss)\nI0821 11:00:34.868088  1322 solver.cpp:228] Iteration 6900, loss = 0.1305\nI0821 11:00:34.868126  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:00:34.868149  1322 solver.cpp:244]     Train net output #1: loss = 0.1305 (* 1 = 0.1305 loss)\nI0821 11:00:34.959733  1322 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0821 11:02:52.014914  1322 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 11:04:12.926456  1322 solver.cpp:404]     Test net output #0: accuracy = 0.75116\nI0821 11:04:12.926713  1322 solver.cpp:404]     Test net output #1: loss = 1.23891 (* 1 = 1.23891 loss)\nI0821 11:04:14.244968  1322 solver.cpp:228] Iteration 7000, loss = 0.077638\nI0821 11:04:14.245007  1322 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:04:14.245029  1322 solver.cpp:244]     Train net output #1: loss = 0.0776381 (* 1 = 0.0776381 loss)\nI0821 11:04:14.334444  1322 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0821 11:06:31.380655  1322 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 11:07:52.292986  1322 solver.cpp:404]     Test net output #0: accuracy = 0.77484\nI0821 11:07:52.293221  1322 solver.cpp:404]     Test net output #1: loss = 0.974786 (* 1 = 0.974786 loss)\nI0821 11:07:53.612108  1322 solver.cpp:228] Iteration 7100, loss = 0.100026\nI0821 11:07:53.612155  1322 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:07:53.612179  1322 solver.cpp:244]     Train net output #1: loss = 0.100026 (* 1 = 0.100026 loss)\nI0821 11:07:53.702554  1322 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0821 11:10:10.780128  1322 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 11:11:31.683161  1322 solver.cpp:404]     Test net output #0: accuracy = 0.79944\nI0821 11:11:31.683415  1322 solver.cpp:404]     Test net output #1: loss = 0.901052 (* 1 = 0.901052 loss)\nI0821 11:11:33.001293  1322 solver.cpp:228] Iteration 7200, loss = 0.0924932\nI0821 11:11:33.001332  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:11:33.001355  1322 solver.cpp:244]     Train net output #1: loss = 0.0924934 (* 1 = 0.0924934 loss)\nI0821 11:11:33.086571  1322 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0821 11:13:50.216289  1322 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 11:15:11.121220  1322 solver.cpp:404]     Test net output #0: accuracy = 0.71632\nI0821 11:15:11.121490  1322 solver.cpp:404]     Test net output #1: loss = 1.51682 (* 1 = 1.51682 loss)\nI0821 11:15:12.439270  1322 solver.cpp:228] Iteration 7300, loss = 0.141877\nI0821 11:15:12.439308  1322 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:15:12.439332  1322 solver.cpp:244]     Train net output #1: loss = 0.141877 (* 1 = 0.141877 loss)\nI0821 11:15:12.532474  1322 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0821 11:17:29.663645  1322 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 11:18:50.575603  1322 solver.cpp:404]     Test net output #0: accuracy = 0.80944\nI0821 11:18:50.575846  1322 solver.cpp:404]     Test net output #1: loss = 0.851624 (* 1 = 0.851624 loss)\nI0821 11:18:51.894819  1322 solver.cpp:228] Iteration 7400, loss = 0.132775\nI0821 11:18:51.894866  1322 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 11:18:51.894891  1322 solver.cpp:244]     Train net output #1: loss = 0.132776 (* 1 = 0.132776 loss)\nI0821 11:18:51.984956  1322 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0821 11:21:09.056470  1322 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 11:22:29.984871  1322 solver.cpp:404]     Test net output #0: accuracy = 0.77752\nI0821 11:22:29.985142  1322 solver.cpp:404]     Test net output #1: loss = 0.876408 (* 1 = 0.876408 loss)\nI0821 11:22:31.303058  1322 solver.cpp:228] Iteration 7500, loss = 0.0723023\nI0821 11:22:31.303102  1322 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:22:31.303125  1322 solver.cpp:244]     Train net output #1: loss = 0.0723025 (* 1 = 0.0723025 loss)\nI0821 11:22:31.393752  1322 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0821 11:24:48.445849  1322 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 11:26:09.349331  1322 solver.cpp:404]     Test net output #0: accuracy = 0.77096\nI0821 11:26:09.349599  1322 solver.cpp:404]     Test net output #1: loss = 0.983315 (* 1 = 0.983315 loss)\nI0821 11:26:10.667387  1322 solver.cpp:228] Iteration 7600, loss = 0.215027\nI0821 11:26:10.667426  1322 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 11:26:10.667449  1322 solver.cpp:244]     Train net output #1: loss = 0.215027 (* 1 = 0.215027 loss)\nI0821 11:26:10.756922  1322 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0821 11:28:27.851047  1322 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 11:29:48.817181  1322 solver.cpp:404]     Test net output #0: accuracy = 0.7876\nI0821 11:29:48.817451  1322 solver.cpp:404]     Test net output #1: loss = 0.875252 (* 1 = 0.875252 loss)\nI0821 11:29:50.136561  1322 solver.cpp:228] Iteration 7700, loss = 0.0862918\nI0821 11:29:50.136600  1322 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:29:50.136623  1322 solver.cpp:244]     Train net output #1: loss = 0.086292 (* 1 = 0.086292 loss)\nI0821 11:29:50.227437  1322 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0821 11:32:07.395234  1322 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 11:33:28.216929  1322 solver.cpp:404]     Test net output #0: accuracy = 0.8286\nI0821 11:33:28.217196  1322 solver.cpp:404]     Test net output #1: loss = 0.663512 (* 1 = 0.663512 loss)\nI0821 11:33:29.535672  1322 solver.cpp:228] Iteration 7800, loss = 0.128546\nI0821 11:33:29.535712  1322 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:33:29.535734  1322 solver.cpp:244]     Train net output #1: loss = 0.128547 (* 1 = 0.128547 loss)\nI0821 11:33:29.622890  1322 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0821 11:35:46.641747  1322 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 11:37:07.469233  1322 solver.cpp:404]     Test net output #0: accuracy = 0.83612\nI0821 11:37:07.469499  1322 solver.cpp:404]     Test net output #1: loss = 0.665841 (* 1 = 0.665841 loss)\nI0821 11:37:08.788236  1322 solver.cpp:228] Iteration 7900, loss = 0.0951145\nI0821 11:37:08.788276  1322 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:37:08.788300  1322 solver.cpp:244]     Train net output #1: loss = 0.0951147 (* 1 = 0.0951147 loss)\nI0821 11:37:08.880020  1322 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0821 11:39:25.922386  1322 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 11:40:46.747755  1322 solver.cpp:404]     Test net output #0: accuracy = 0.84164\nI0821 11:40:46.748019  1322 solver.cpp:404]     Test net output #1: loss = 0.671788 (* 1 = 0.671788 loss)\nI0821 11:40:48.066272  1322 solver.cpp:228] Iteration 8000, loss = 0.159754\nI0821 11:40:48.066309  1322 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:40:48.066332  1322 solver.cpp:244]     Train net output #1: loss = 0.159754 (* 1 = 0.159754 loss)\nI0821 11:40:48.156100  1322 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0821 11:43:05.230772  1322 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 11:44:26.064450  1322 solver.cpp:404]     Test net output #0: accuracy = 0.8052\nI0821 11:44:26.064716  1322 solver.cpp:404]     Test net output #1: loss = 0.759007 (* 1 = 0.759007 loss)\nI0821 11:44:27.382933  1322 solver.cpp:228] Iteration 8100, loss = 0.036451\nI0821 11:44:27.382971  1322 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:44:27.382993  1322 solver.cpp:244]     Train net output #1: loss = 0.0364512 (* 1 = 0.0364512 loss)\nI0821 11:44:27.472400  1322 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0821 11:46:44.592686  1322 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 11:48:05.424098  1322 solver.cpp:404]     Test net output #0: accuracy = 0.8146\nI0821 11:48:05.424365  1322 solver.cpp:404]     Test net output #1: loss = 0.817489 (* 1 = 0.817489 loss)\nI0821 11:48:06.743041  1322 solver.cpp:228] Iteration 8200, loss = 0.0873584\nI0821 11:48:06.743083  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:48:06.743108  1322 solver.cpp:244]     Train net output #1: loss = 0.0873586 (* 1 = 0.0873586 loss)\nI0821 11:48:06.831825  1322 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0821 11:50:23.901041  1322 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 11:51:44.719938  1322 solver.cpp:404]     Test net output #0: accuracy = 0.845\nI0821 11:51:44.720163  1322 solver.cpp:404]     Test net output #1: loss = 0.605305 (* 1 = 0.605305 loss)\nI0821 11:51:46.038772  1322 solver.cpp:228] Iteration 8300, loss = 0.0630538\nI0821 11:51:46.038811  1322 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:51:46.038836  1322 solver.cpp:244]     Train net output #1: loss = 0.063054 (* 1 = 0.063054 loss)\nI0821 11:51:46.121976  1322 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0821 11:54:03.134953  1322 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 11:55:23.962452  1322 solver.cpp:404]     Test net output #0: accuracy = 0.821\nI0821 11:55:23.962723  1322 solver.cpp:404]     Test net output #1: loss = 0.78567 (* 1 = 0.78567 loss)\nI0821 11:55:25.281216  1322 solver.cpp:228] Iteration 8400, loss = 0.0695597\nI0821 11:55:25.281255  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:55:25.281278  1322 solver.cpp:244]     Train net output #1: loss = 0.0695599 (* 1 = 0.0695599 loss)\nI0821 11:55:25.367132  1322 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0821 11:57:42.370064  1322 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 11:59:03.193156  1322 solver.cpp:404]     Test net output #0: accuracy = 0.83452\nI0821 11:59:03.193423  1322 solver.cpp:404]     Test net output #1: loss = 0.746865 (* 1 = 0.746865 loss)\nI0821 11:59:04.512078  1322 solver.cpp:228] Iteration 8500, loss = 0.0718908\nI0821 11:59:04.512130  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:59:04.512154  1322 solver.cpp:244]     Train net output #1: loss = 0.0718911 (* 1 = 0.0718911 loss)\nI0821 11:59:04.599788  1322 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0821 12:01:21.743613  1322 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 12:02:42.584753  1322 solver.cpp:404]     Test net output #0: accuracy = 0.85936\nI0821 12:02:42.585005  1322 solver.cpp:404]     Test net output #1: loss = 0.610084 (* 1 = 0.610084 loss)\nI0821 12:02:43.903167  1322 solver.cpp:228] Iteration 8600, loss = 0.0291216\nI0821 12:02:43.903213  1322 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:02:43.903229  1322 solver.cpp:244]     Train net output #1: loss = 0.0291218 (* 1 = 0.0291218 loss)\nI0821 12:02:43.996418  1322 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0821 12:05:01.002035  1322 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 12:06:21.891315  1322 solver.cpp:404]     Test net output #0: accuracy = 0.8174\nI0821 12:06:21.891573  1322 solver.cpp:404]     Test net output #1: loss = 0.844198 (* 1 = 0.844198 loss)\nI0821 12:06:23.209380  1322 solver.cpp:228] Iteration 8700, loss = 0.100726\nI0821 12:06:23.209427  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:06:23.209444  1322 solver.cpp:244]     Train net output #1: loss = 0.100726 (* 1 = 0.100726 loss)\nI0821 12:06:23.294147  1322 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0821 12:08:40.302382  1322 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 12:10:01.215129  1322 solver.cpp:404]     Test net output #0: accuracy = 0.83624\nI0821 12:10:01.215348  1322 solver.cpp:404]     Test net output #1: loss = 0.760223 (* 1 = 0.760223 loss)\nI0821 12:10:02.533965  1322 solver.cpp:228] Iteration 8800, loss = 0.0460566\nI0821 12:10:02.533998  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:10:02.534013  1322 solver.cpp:244]     Train net output #1: loss = 0.0460568 (* 1 = 0.0460568 loss)\nI0821 12:10:02.623610  1322 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0821 12:12:19.635076  1322 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 12:13:40.535401  1322 solver.cpp:404]     Test net output #0: accuracy = 0.86576\nI0821 12:13:40.535621  1322 solver.cpp:404]     Test net output #1: loss = 0.60309 (* 1 = 0.60309 loss)\nI0821 12:13:41.853041  1322 solver.cpp:228] Iteration 8900, loss = 0.0311236\nI0821 12:13:41.853088  1322 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:13:41.853106  1322 solver.cpp:244]     Train net output #1: loss = 0.0311238 (* 1 = 0.0311238 loss)\nI0821 12:13:41.939069  1322 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0821 12:15:58.912752  1322 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 12:17:19.828635  1322 solver.cpp:404]     Test net output #0: accuracy = 0.85932\nI0821 12:17:19.828910  1322 solver.cpp:404]     Test net output #1: loss = 0.632329 (* 1 = 0.632329 loss)\nI0821 12:17:21.148365  1322 solver.cpp:228] Iteration 9000, loss = 0.0606288\nI0821 12:17:21.148401  1322 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:17:21.148416  1322 solver.cpp:244]     Train net output #1: loss = 0.0606291 (* 1 = 0.0606291 loss)\nI0821 12:17:21.237915  1322 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0821 12:19:38.213219  1322 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 12:20:59.130791  1322 solver.cpp:404]     Test net output #0: accuracy = 0.88092\nI0821 12:20:59.131057  1322 solver.cpp:404]     Test net output #1: loss = 0.529328 (* 1 = 0.529328 loss)\nI0821 12:21:00.449106  1322 solver.cpp:228] Iteration 9100, loss = 0.0170213\nI0821 12:21:00.449151  1322 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:21:00.449168  1322 solver.cpp:244]     Train net output #1: loss = 0.0170215 (* 1 = 0.0170215 loss)\nI0821 12:21:00.539103  1322 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0821 12:23:17.502588  1322 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 12:24:38.422345  1322 solver.cpp:404]     Test net output #0: accuracy = 0.88664\nI0821 12:24:38.422616  1322 solver.cpp:404]     Test net output #1: loss = 0.545237 (* 1 = 0.545237 loss)\nI0821 12:24:39.740727  1322 solver.cpp:228] Iteration 9200, loss = 0.0367138\nI0821 12:24:39.740773  1322 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:24:39.740789  1322 solver.cpp:244]     Train net output #1: loss = 0.0367141 (* 1 = 0.0367141 loss)\nI0821 12:24:39.833340  1322 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0821 12:26:56.833060  1322 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 12:28:17.740916  1322 solver.cpp:404]     Test net output #0: accuracy = 0.90996\nI0821 12:28:17.741163  1322 solver.cpp:404]     Test net output #1: loss = 0.426535 (* 1 = 0.426535 loss)\nI0821 12:28:19.058877  1322 solver.cpp:228] Iteration 9300, loss = 0.000572544\nI0821 12:28:19.058914  1322 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:28:19.058929  1322 solver.cpp:244]     Train net output #1: loss = 0.000572801 (* 1 = 0.000572801 loss)\nI0821 12:28:19.142735  1322 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0821 12:30:36.138571  1322 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 12:31:57.044803  1322 solver.cpp:404]     Test net output #0: accuracy = 0.91736\nI0821 12:31:57.045083  1322 solver.cpp:404]     Test net output #1: loss = 0.367017 (* 1 = 0.367017 loss)\nI0821 12:31:58.362555  1322 solver.cpp:228] Iteration 9400, loss = 0.000898958\nI0821 12:31:58.362588  1322 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:31:58.362603  1322 solver.cpp:244]     Train net output #1: loss = 0.000899214 (* 1 = 0.000899214 loss)\nI0821 12:31:58.452205  1322 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0821 12:34:15.592057  1322 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 12:35:36.502838  1322 solver.cpp:404]     Test net output #0: accuracy = 0.9206\nI0821 12:35:36.503131  1322 solver.cpp:404]     Test net output #1: loss = 0.324351 (* 1 = 0.324351 loss)\nI0821 12:35:37.820999  1322 solver.cpp:228] Iteration 9500, loss = 0.000323606\nI0821 12:35:37.821043  1322 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:35:37.821059  1322 solver.cpp:244]     Train net output #1: loss = 0.000323862 (* 1 = 0.000323862 loss)\nI0821 12:35:37.911483  1322 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0821 12:37:55.359727  1322 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 12:39:16.279230  1322 solver.cpp:404]     Test net output #0: accuracy = 0.92116\nI0821 12:39:16.279510  1322 solver.cpp:404]     Test net output #1: loss = 0.327694 (* 1 = 0.327694 loss)\nI0821 12:39:17.596771  1322 solver.cpp:228] Iteration 9600, loss = 0.000298181\nI0821 12:39:17.596814  1322 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:39:17.596832  1322 solver.cpp:244]     Train net output #1: loss = 0.000298437 (* 1 = 0.000298437 loss)\nI0821 12:39:17.693153  1322 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0821 12:41:35.131109  1322 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 12:42:56.056310  1322 solver.cpp:404]     Test net output #0: accuracy = 0.923\nI0821 12:42:56.056596  1322 solver.cpp:404]     Test net output #1: loss = 0.314734 (* 1 = 0.314734 loss)\nI0821 12:42:57.374047  1322 solver.cpp:228] Iteration 9700, loss = 0.000402135\nI0821 12:42:57.374089  1322 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:42:57.374105  1322 solver.cpp:244]     Train net output #1: loss = 0.000402391 (* 1 = 0.000402391 loss)\nI0821 12:42:57.464370  1322 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0821 12:45:14.875877  1322 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 12:46:35.809911  1322 solver.cpp:404]     Test net output #0: accuracy = 0.92252\nI0821 12:46:35.810189  1322 solver.cpp:404]     Test net output #1: loss = 0.319676 (* 1 = 0.319676 loss)\nI0821 12:46:37.128165  1322 solver.cpp:228] Iteration 9800, loss = 0.000306993\nI0821 12:46:37.128206  1322 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:46:37.128222  1322 solver.cpp:244]     Train net output #1: loss = 0.000307249 (* 1 = 0.000307249 loss)\nI0821 12:46:37.216609  1322 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0821 12:48:54.566385  1322 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 12:50:15.492763  1322 solver.cpp:404]     Test net output #0: accuracy = 0.92304\nI0821 12:50:15.493041  1322 solver.cpp:404]     Test net output #1: loss = 0.308704 (* 1 = 0.308704 loss)\nI0821 12:50:16.811318  1322 solver.cpp:228] Iteration 9900, loss = 0.000418927\nI0821 12:50:16.811352  1322 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:50:16.811367  1322 solver.cpp:244]     Train net output #1: loss = 0.000419183 (* 1 = 0.000419183 loss)\nI0821 12:50:16.902658  1322 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0821 12:52:34.229420  1322 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kMom85Fig11_iter_10000.caffemodel\nI0821 12:52:34.447582  1322 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kMom85Fig11_iter_10000.solverstate\nI0821 12:52:34.888617  1322 solver.cpp:317] Iteration 10000, loss = 0.0003799\nI0821 12:52:34.888656  1322 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 12:53:55.820403  1322 solver.cpp:404]     Test net output #0: accuracy = 0.92288\nI0821 12:53:55.820680  1322 solver.cpp:404]     Test net output #1: loss = 0.316364 (* 1 = 0.316364 loss)\nI0821 12:53:55.820693  1322 solver.cpp:322] Optimization Done.\nI0821 12:54:01.115838  1322 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kMom8Fig11",
    "content": "I0821 06:46:13.904450 32352 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 06:46:13.906965 32352 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 06:46:13.908185 32352 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 06:46:13.909399 32352 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 06:46:13.910615 32352 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 06:46:13.911849 32352 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 06:46:13.913079 32352 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 06:46:13.914311 32352 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 06:46:13.915544 32352 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 06:46:14.335017 32352 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.8\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kMom8Fig11\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0821 06:46:14.338481 32352 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 06:46:14.355836 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:14.355911 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:14.356984 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 06:46:14.357040 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 06:46:14.357066 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:46:14.357087 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:46:14.357106 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:46:14.357125 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:46:14.357142 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:46:14.357161 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:46:14.357182 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:46:14.357200 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:46:14.357219 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:46:14.357234 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:46:14.357254 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:46:14.357272 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:46:14.357291 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:46:14.357309 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:46:14.357327 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:46:14.357345 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:46:14.357363 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:46:14.357381 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:46:14.357416 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:46:14.357435 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:46:14.357460 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:46:14.357478 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:46:14.357496 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:46:14.357511 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:46:14.357529 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:46:14.357544 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:46:14.357561 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:46:14.357579 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:46:14.357599 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:46:14.357617 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:46:14.357635 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:46:14.357658 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:46:14.357681 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:46:14.357699 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:46:14.357719 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:46:14.357736 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:46:14.357755 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:46:14.357772 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:46:14.357797 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:46:14.357815 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:46:14.357831 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:46:14.357849 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:46:14.357867 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:46:14.357885 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:46:14.357903 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:46:14.357920 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:46:14.357940 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:46:14.357956 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:46:14.357973 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:46:14.358001 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:46:14.358022 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:46:14.358042 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:46:14.358060 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:46:14.358075 32352 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:46:14.359838 32352 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0821 06:46:14.361923 32352 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:46:14.363124 32352 net.cpp:100] Creating Layer dataLayer\nI0821 06:46:14.363201 32352 net.cpp:408] dataLayer -> data_top\nI0821 06:46:14.363390 32352 net.cpp:408] dataLayer -> label\nI0821 06:46:14.363508 32352 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:46:14.373512 32357 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0821 06:46:14.396286 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:14.404170 32352 net.cpp:150] Setting up dataLayer\nI0821 06:46:14.404239 32352 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:46:14.404256 32352 net.cpp:157] Top shape: 125 (125)\nI0821 06:46:14.404263 32352 net.cpp:165] Memory required for data: 1536500\nI0821 06:46:14.404276 32352 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:46:14.404291 32352 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:46:14.404307 32352 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:46:14.404325 32352 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:46:14.404340 32352 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:46:14.404403 32352 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:46:14.404417 32352 net.cpp:157] Top shape: 125 (125)\nI0821 06:46:14.404423 32352 net.cpp:157] Top shape: 125 (125)\nI0821 06:46:14.404428 32352 net.cpp:165] Memory required for data: 1537500\nI0821 06:46:14.404433 32352 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:46:14.404497 32352 net.cpp:100] Creating Layer pre_conv\nI0821 06:46:14.404508 32352 net.cpp:434] pre_conv <- data_top\nI0821 06:46:14.404521 32352 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:46:14.406410 32352 net.cpp:150] Setting up pre_conv\nI0821 06:46:14.406430 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.406436 32352 net.cpp:165] Memory required for data: 9729500\nI0821 06:46:14.406497 32352 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:46:14.406564 32352 net.cpp:100] Creating Layer pre_bn\nI0821 06:46:14.406574 32352 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:46:14.406584 32352 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:46:14.406973 32352 net.cpp:150] Setting up pre_bn\nI0821 06:46:14.406991 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.406997 32352 net.cpp:165] Memory required for data: 17921500\nI0821 06:46:14.407016 32352 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:46:14.407068 32352 net.cpp:100] Creating Layer pre_scale\nI0821 06:46:14.407078 32352 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:46:14.407088 32352 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:46:14.407253 32358 blocking_queue.cpp:50] Waiting for data\nI0821 06:46:14.407260 32352 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:46:14.407521 32352 net.cpp:150] Setting up pre_scale\nI0821 06:46:14.407537 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.407543 32352 net.cpp:165] Memory required for data: 26113500\nI0821 06:46:14.407553 32352 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:46:14.407598 32352 net.cpp:100] Creating Layer pre_relu\nI0821 06:46:14.407608 32352 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:46:14.407621 32352 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:46:14.407632 32352 net.cpp:150] Setting up pre_relu\nI0821 06:46:14.407640 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.407645 32352 net.cpp:165] Memory required for data: 34305500\nI0821 06:46:14.407655 32352 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:46:14.407665 32352 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:46:14.407670 32352 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:46:14.407680 32352 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:46:14.407690 32352 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:46:14.407733 32352 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:46:14.407745 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.407752 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.407757 32352 net.cpp:165] Memory required for data: 50689500\nI0821 06:46:14.407763 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:46:14.407781 32352 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:46:14.407788 32352 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:46:14.407796 32352 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:46:14.408119 32352 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:46:14.408135 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.408140 32352 net.cpp:165] Memory required for data: 58881500\nI0821 06:46:14.408154 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:46:14.408167 32352 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:46:14.408174 32352 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:46:14.408181 32352 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:46:14.408409 32352 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:46:14.408422 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.408427 32352 net.cpp:165] Memory required for data: 67073500\nI0821 06:46:14.408438 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:46:14.408450 32352 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:46:14.408457 32352 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:46:14.408464 32352 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.408514 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:46:14.408656 32352 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:46:14.408669 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.408675 32352 net.cpp:165] Memory required for data: 75265500\nI0821 06:46:14.408684 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:46:14.408700 32352 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:46:14.408706 32352 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:46:14.408716 32352 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.408726 32352 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:46:14.408735 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.408738 32352 net.cpp:165] Memory required for data: 83457500\nI0821 06:46:14.408743 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:46:14.408757 32352 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:46:14.408762 32352 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:46:14.408771 32352 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:46:14.409078 32352 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:46:14.409092 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.409097 32352 net.cpp:165] Memory required for data: 91649500\nI0821 06:46:14.409106 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:46:14.409117 32352 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:46:14.409123 32352 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:46:14.409132 32352 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:46:14.409363 32352 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:46:14.409376 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.409382 32352 net.cpp:165] Memory required for data: 99841500\nI0821 06:46:14.409396 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:46:14.409409 32352 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:46:14.409415 32352 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:46:14.409422 32352 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:46:14.409477 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:46:14.409616 32352 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:46:14.409631 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.409636 32352 net.cpp:165] Memory required for data: 108033500\nI0821 06:46:14.409643 32352 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:46:14.409699 32352 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:46:14.409713 32352 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:46:14.409720 32352 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:46:14.409732 32352 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:46:14.409807 32352 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:46:14.409822 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.409828 32352 net.cpp:165] Memory required for data: 116225500\nI0821 06:46:14.409834 32352 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:46:14.409842 32352 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:46:14.409847 32352 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:46:14.409858 32352 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:46:14.409868 32352 net.cpp:150] Setting up L1_b1_relu\nI0821 06:46:14.409875 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.409879 32352 net.cpp:165] Memory required for data: 124417500\nI0821 06:46:14.409884 32352 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:46:14.409893 32352 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:46:14.409898 32352 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:46:14.409905 32352 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:46:14.409914 32352 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:46:14.409960 32352 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:46:14.409971 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.409977 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.409991 32352 net.cpp:165] Memory required for data: 140801500\nI0821 06:46:14.409996 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:46:14.410007 32352 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:46:14.410013 32352 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:46:14.410025 32352 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:46:14.410327 32352 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:46:14.410341 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.410346 32352 net.cpp:165] Memory required for data: 148993500\nI0821 06:46:14.410356 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:46:14.410364 32352 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:46:14.410370 32352 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:46:14.410382 32352 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:46:14.410620 32352 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:46:14.410636 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.410641 32352 net.cpp:165] Memory required for data: 157185500\nI0821 06:46:14.410660 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:46:14.410670 32352 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:46:14.410676 32352 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:46:14.410682 32352 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.410734 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:46:14.410876 32352 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:46:14.410889 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.410894 32352 net.cpp:165] Memory required for data: 165377500\nI0821 06:46:14.410903 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:46:14.410914 32352 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:46:14.410920 32352 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:46:14.410928 32352 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.410936 32352 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:46:14.410946 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.410951 32352 net.cpp:165] Memory required for data: 173569500\nI0821 06:46:14.410956 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:46:14.410967 32352 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:46:14.410972 32352 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:46:14.410984 32352 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:46:14.411283 32352 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:46:14.411296 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.411301 32352 net.cpp:165] Memory required for data: 181761500\nI0821 06:46:14.411310 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:46:14.411319 32352 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:46:14.411324 32352 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:46:14.411337 32352 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:46:14.411571 32352 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:46:14.411583 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.411588 32352 net.cpp:165] Memory required for data: 189953500\nI0821 06:46:14.411607 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:46:14.411617 32352 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:46:14.411623 32352 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:46:14.411629 32352 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:46:14.411690 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:46:14.411828 32352 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:46:14.411840 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.411845 32352 net.cpp:165] Memory required for data: 198145500\nI0821 06:46:14.411854 32352 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:46:14.411873 32352 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:46:14.411880 32352 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:46:14.411886 32352 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:46:14.411895 32352 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:46:14.411928 32352 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:46:14.411940 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.411945 32352 net.cpp:165] Memory required for data: 206337500\nI0821 06:46:14.411950 32352 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:46:14.411958 32352 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:46:14.411963 32352 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:46:14.411970 32352 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:46:14.411978 32352 net.cpp:150] Setting up L1_b2_relu\nI0821 06:46:14.411985 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.411990 32352 net.cpp:165] Memory required for data: 214529500\nI0821 06:46:14.411994 32352 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:46:14.412001 32352 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:46:14.412006 32352 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:46:14.412016 32352 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:46:14.412026 32352 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:46:14.412066 32352 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:46:14.412077 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.412084 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.412088 32352 net.cpp:165] Memory required for data: 230913500\nI0821 06:46:14.412093 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:46:14.412107 32352 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:46:14.412113 32352 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:46:14.412122 32352 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:46:14.412428 32352 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:46:14.412442 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.412447 32352 net.cpp:165] Memory required for data: 239105500\nI0821 06:46:14.412456 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:46:14.412467 32352 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:46:14.412473 32352 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:46:14.412483 32352 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:46:14.412724 32352 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:46:14.412736 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.412741 32352 net.cpp:165] Memory required for data: 247297500\nI0821 06:46:14.412752 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:46:14.412760 32352 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:46:14.412766 32352 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:46:14.412775 32352 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.412827 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:46:14.412966 32352 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:46:14.412978 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.412983 32352 net.cpp:165] Memory required for data: 255489500\nI0821 06:46:14.412992 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:46:14.413002 32352 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:46:14.413008 32352 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:46:14.413017 32352 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.413025 32352 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:46:14.413039 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.413044 32352 net.cpp:165] Memory required for data: 263681500\nI0821 06:46:14.413049 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:46:14.413064 32352 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:46:14.413069 32352 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:46:14.413080 32352 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:46:14.413388 32352 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:46:14.413403 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.413408 32352 net.cpp:165] Memory required for data: 271873500\nI0821 06:46:14.413416 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:46:14.413432 32352 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:46:14.413439 32352 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:46:14.413447 32352 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:46:14.413702 32352 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:46:14.413714 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.413719 32352 net.cpp:165] Memory required for data: 280065500\nI0821 06:46:14.413730 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:46:14.413739 32352 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:46:14.413744 32352 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:46:14.413755 32352 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:46:14.413807 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:46:14.413940 32352 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:46:14.413955 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.413961 32352 net.cpp:165] Memory required for data: 288257500\nI0821 06:46:14.413970 32352 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:46:14.413978 32352 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:46:14.413985 32352 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:46:14.413990 32352 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:46:14.413998 32352 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:46:14.414031 32352 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:46:14.414041 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.414046 32352 net.cpp:165] Memory required for data: 296449500\nI0821 06:46:14.414050 32352 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:46:14.414060 32352 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:46:14.414067 32352 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:46:14.414073 32352 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:46:14.414083 32352 net.cpp:150] Setting up L1_b3_relu\nI0821 06:46:14.414088 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.414093 32352 net.cpp:165] Memory required for data: 304641500\nI0821 06:46:14.414098 32352 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:46:14.414104 32352 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:46:14.414109 32352 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:46:14.414119 32352 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:46:14.414129 32352 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:46:14.414170 32352 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:46:14.414185 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.414191 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.414196 32352 net.cpp:165] Memory required for data: 321025500\nI0821 06:46:14.414201 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:46:14.414212 32352 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:46:14.414218 32352 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:46:14.414234 32352 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:46:14.414548 32352 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:46:14.414562 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.414567 32352 net.cpp:165] Memory required for data: 329217500\nI0821 06:46:14.414577 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:46:14.414588 32352 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:46:14.414594 32352 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:46:14.414602 32352 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:46:14.414849 32352 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:46:14.414862 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.414867 32352 net.cpp:165] Memory required for data: 337409500\nI0821 06:46:14.414878 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:46:14.414886 32352 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:46:14.414892 32352 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:46:14.414903 32352 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.414954 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:46:14.415096 32352 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:46:14.415112 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.415117 32352 net.cpp:165] Memory required for data: 345601500\nI0821 06:46:14.415125 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:46:14.415133 32352 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:46:14.415138 32352 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:46:14.415146 32352 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.415155 32352 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:46:14.415163 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.415166 32352 net.cpp:165] Memory required for data: 353793500\nI0821 06:46:14.415171 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:46:14.415184 32352 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:46:14.415190 32352 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:46:14.415201 32352 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:46:14.415508 32352 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:46:14.415521 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.415526 32352 net.cpp:165] Memory required for data: 361985500\nI0821 06:46:14.415534 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:46:14.415546 32352 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:46:14.415552 32352 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:46:14.415563 32352 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:46:14.415808 32352 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:46:14.415822 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.415828 32352 net.cpp:165] Memory required for data: 370177500\nI0821 06:46:14.415838 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:46:14.415846 32352 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:46:14.415853 32352 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:46:14.415863 32352 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:46:14.415916 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:46:14.416054 32352 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:46:14.416067 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.416072 32352 net.cpp:165] Memory required for data: 378369500\nI0821 06:46:14.416081 32352 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:46:14.416090 32352 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:46:14.416095 32352 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:46:14.416102 32352 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:46:14.416112 32352 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:46:14.416151 32352 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:46:14.416164 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.416169 32352 net.cpp:165] Memory required for data: 386561500\nI0821 06:46:14.416174 32352 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:46:14.416182 32352 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:46:14.416188 32352 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:46:14.416194 32352 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:46:14.416203 32352 net.cpp:150] Setting up L1_b4_relu\nI0821 06:46:14.416209 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.416214 32352 net.cpp:165] Memory required for data: 394753500\nI0821 06:46:14.416219 32352 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:46:14.416230 32352 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:46:14.416236 32352 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:46:14.416244 32352 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:46:14.416252 32352 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:46:14.416296 32352 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:46:14.416308 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.416314 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.416319 32352 net.cpp:165] Memory required for data: 411137500\nI0821 06:46:14.416324 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:46:14.416334 32352 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:46:14.416340 32352 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:46:14.416352 32352 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:46:14.416676 32352 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:46:14.416690 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.416695 32352 net.cpp:165] Memory required for data: 419329500\nI0821 06:46:14.416715 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:46:14.416728 32352 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:46:14.416733 32352 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:46:14.416741 32352 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:46:14.416981 32352 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:46:14.416995 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.416999 32352 net.cpp:165] Memory required for data: 427521500\nI0821 06:46:14.417011 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:46:14.417018 32352 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:46:14.417024 32352 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:46:14.417034 32352 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.417086 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:46:14.417227 32352 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:46:14.417238 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.417243 32352 net.cpp:165] Memory required for data: 435713500\nI0821 06:46:14.417253 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:46:14.417261 32352 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:46:14.417266 32352 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:46:14.417273 32352 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.417282 32352 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:46:14.417289 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.417294 32352 net.cpp:165] Memory required for data: 443905500\nI0821 06:46:14.417299 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:46:14.417311 32352 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:46:14.417317 32352 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:46:14.417335 32352 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:46:14.417647 32352 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:46:14.417667 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.417672 32352 net.cpp:165] Memory required for data: 452097500\nI0821 06:46:14.417681 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:46:14.417695 32352 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:46:14.417701 32352 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:46:14.417712 32352 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:46:14.417950 32352 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:46:14.417963 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.417968 32352 net.cpp:165] Memory required for data: 460289500\nI0821 06:46:14.417978 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:46:14.417986 32352 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:46:14.417991 32352 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:46:14.418002 32352 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:46:14.418053 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:46:14.418190 32352 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:46:14.418207 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.418212 32352 net.cpp:165] Memory required for data: 468481500\nI0821 06:46:14.418221 32352 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:46:14.418231 32352 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:46:14.418236 32352 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:46:14.418242 32352 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:46:14.418251 32352 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:46:14.418282 32352 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:46:14.418292 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.418296 32352 net.cpp:165] Memory required for data: 476673500\nI0821 06:46:14.418301 32352 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:46:14.418309 32352 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:46:14.418318 32352 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:46:14.418324 32352 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:46:14.418334 32352 net.cpp:150] Setting up L1_b5_relu\nI0821 06:46:14.418340 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.418345 32352 net.cpp:165] Memory required for data: 484865500\nI0821 06:46:14.418349 32352 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:46:14.418356 32352 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:46:14.418361 32352 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:46:14.418371 32352 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:46:14.418380 32352 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:46:14.418421 32352 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:46:14.418433 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.418439 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.418444 32352 net.cpp:165] Memory required for data: 501249500\nI0821 06:46:14.418449 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:46:14.418463 32352 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:46:14.418469 32352 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:46:14.418478 32352 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:46:14.418794 32352 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:46:14.418809 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.418814 32352 net.cpp:165] Memory required for data: 509441500\nI0821 06:46:14.418830 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:46:14.418843 32352 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:46:14.418848 32352 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:46:14.418857 32352 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:46:14.419093 32352 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:46:14.419106 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.419111 32352 net.cpp:165] Memory required for data: 517633500\nI0821 06:46:14.419122 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:46:14.419131 32352 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:46:14.419137 32352 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:46:14.419147 32352 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.419199 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:46:14.419342 32352 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:46:14.419358 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.419363 32352 net.cpp:165] Memory required for data: 525825500\nI0821 06:46:14.419371 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:46:14.419379 32352 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:46:14.419385 32352 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:46:14.419392 32352 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.419401 32352 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:46:14.419409 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.419412 32352 net.cpp:165] Memory required for data: 534017500\nI0821 06:46:14.419417 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:46:14.419431 32352 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:46:14.419436 32352 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:46:14.419447 32352 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:46:14.419773 32352 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:46:14.419787 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.419792 32352 net.cpp:165] Memory required for data: 542209500\nI0821 06:46:14.419801 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:46:14.419812 32352 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:46:14.419819 32352 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:46:14.419829 32352 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:46:14.420068 32352 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:46:14.420080 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.420085 32352 net.cpp:165] Memory required for data: 550401500\nI0821 06:46:14.420096 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:46:14.420104 32352 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:46:14.420110 32352 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:46:14.420117 32352 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:46:14.420172 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:46:14.420312 32352 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:46:14.420325 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.420331 32352 net.cpp:165] Memory required for data: 558593500\nI0821 06:46:14.420338 32352 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:46:14.420357 32352 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:46:14.420364 32352 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:46:14.420372 32352 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:46:14.420379 32352 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:46:14.420413 32352 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:46:14.420425 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.420430 32352 net.cpp:165] Memory required for data: 566785500\nI0821 06:46:14.420435 32352 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:46:14.420452 32352 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:46:14.420459 32352 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:46:14.420469 32352 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:46:14.420478 32352 net.cpp:150] Setting up L1_b6_relu\nI0821 06:46:14.420485 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.420490 32352 net.cpp:165] Memory required for data: 574977500\nI0821 06:46:14.420495 32352 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:46:14.420501 32352 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:46:14.420506 32352 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:46:14.420513 32352 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:46:14.420522 32352 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:46:14.420568 32352 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:46:14.420579 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.420586 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.420590 32352 net.cpp:165] Memory required for data: 591361500\nI0821 06:46:14.420595 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:46:14.420606 32352 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:46:14.420613 32352 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:46:14.420624 32352 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:46:14.420951 32352 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:46:14.420966 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.420971 32352 net.cpp:165] Memory required for data: 599553500\nI0821 06:46:14.420980 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:46:14.420989 32352 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:46:14.420994 32352 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:46:14.421005 32352 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:46:14.421244 32352 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:46:14.421257 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.421262 32352 net.cpp:165] Memory required for data: 607745500\nI0821 06:46:14.421272 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:46:14.421284 32352 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:46:14.421290 32352 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:46:14.421298 32352 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.421350 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:46:14.421491 32352 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:46:14.421504 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.421509 32352 net.cpp:165] Memory required for data: 615937500\nI0821 06:46:14.421517 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:46:14.421525 32352 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:46:14.421531 32352 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:46:14.421541 32352 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.421551 32352 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:46:14.421558 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.421562 32352 net.cpp:165] Memory required for data: 624129500\nI0821 06:46:14.421567 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:46:14.421582 32352 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:46:14.421588 32352 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:46:14.421597 32352 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:46:14.421914 32352 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:46:14.421929 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.421934 32352 net.cpp:165] Memory required for data: 632321500\nI0821 06:46:14.421950 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:46:14.421963 32352 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:46:14.421969 32352 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:46:14.421978 32352 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:46:14.422219 32352 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:46:14.422231 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.422236 32352 net.cpp:165] Memory required for data: 640513500\nI0821 06:46:14.422246 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:46:14.422261 32352 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:46:14.422267 32352 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:46:14.422276 32352 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:46:14.422327 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:46:14.422471 32352 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:46:14.422483 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.422488 32352 net.cpp:165] Memory required for data: 648705500\nI0821 06:46:14.422497 32352 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:46:14.422505 32352 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:46:14.422511 32352 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:46:14.422518 32352 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:46:14.422529 32352 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:46:14.422560 32352 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:46:14.422572 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.422577 32352 net.cpp:165] Memory required for data: 656897500\nI0821 06:46:14.422582 32352 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:46:14.422590 32352 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:46:14.422595 32352 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:46:14.422602 32352 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:46:14.422611 32352 net.cpp:150] Setting up L1_b7_relu\nI0821 06:46:14.422618 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.422623 32352 net.cpp:165] Memory required for data: 665089500\nI0821 06:46:14.422627 32352 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:46:14.422637 32352 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:46:14.422643 32352 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:46:14.422657 32352 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:46:14.422667 32352 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:46:14.422714 32352 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:46:14.422726 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.422732 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.422737 32352 net.cpp:165] Memory required for data: 681473500\nI0821 06:46:14.422742 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:46:14.422754 32352 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:46:14.422760 32352 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:46:14.422771 32352 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:46:14.423086 32352 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:46:14.423100 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.423105 32352 net.cpp:165] Memory required for data: 689665500\nI0821 06:46:14.423115 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:46:14.423123 32352 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:46:14.423130 32352 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:46:14.423137 32352 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:46:14.423394 32352 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:46:14.423408 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.423413 32352 net.cpp:165] Memory required for data: 697857500\nI0821 06:46:14.423422 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:46:14.423434 32352 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:46:14.423440 32352 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:46:14.423449 32352 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.423503 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:46:14.423657 32352 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:46:14.423671 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.423676 32352 net.cpp:165] Memory required for data: 706049500\nI0821 06:46:14.423686 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:46:14.423693 32352 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:46:14.423699 32352 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:46:14.423709 32352 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.423719 32352 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:46:14.423727 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.423732 32352 net.cpp:165] Memory required for data: 714241500\nI0821 06:46:14.423737 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:46:14.423749 32352 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:46:14.423755 32352 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:46:14.423764 32352 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:46:14.424079 32352 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:46:14.424093 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.424098 32352 net.cpp:165] Memory required for data: 722433500\nI0821 06:46:14.424106 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:46:14.424119 32352 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:46:14.424125 32352 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:46:14.424134 32352 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:46:14.424378 32352 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:46:14.424391 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.424396 32352 net.cpp:165] Memory required for data: 730625500\nI0821 06:46:14.424407 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:46:14.424414 32352 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:46:14.424420 32352 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:46:14.424432 32352 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:46:14.424484 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:46:14.424625 32352 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:46:14.424638 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.424643 32352 net.cpp:165] Memory required for data: 738817500\nI0821 06:46:14.424657 32352 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:46:14.424667 32352 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:46:14.424674 32352 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:46:14.424681 32352 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:46:14.424691 32352 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:46:14.424723 32352 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:46:14.424736 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.424741 32352 net.cpp:165] Memory required for data: 747009500\nI0821 06:46:14.424746 32352 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:46:14.424753 32352 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:46:14.424758 32352 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:46:14.424765 32352 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:46:14.424774 32352 net.cpp:150] Setting up L1_b8_relu\nI0821 06:46:14.424782 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.424793 32352 net.cpp:165] Memory required for data: 755201500\nI0821 06:46:14.424798 32352 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:46:14.424808 32352 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:46:14.424813 32352 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:46:14.424821 32352 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:46:14.424831 32352 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:46:14.424878 32352 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:46:14.424890 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.424896 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.424901 32352 net.cpp:165] Memory required for data: 771585500\nI0821 06:46:14.424906 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:46:14.424916 32352 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:46:14.424922 32352 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:46:14.424934 32352 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:46:14.425257 32352 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:46:14.425271 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.425276 32352 net.cpp:165] Memory required for data: 779777500\nI0821 06:46:14.425285 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:46:14.425302 32352 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:46:14.425308 32352 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:46:14.425318 32352 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:46:14.425565 32352 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:46:14.425577 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.425582 32352 net.cpp:165] Memory required for data: 787969500\nI0821 06:46:14.425593 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:46:14.425601 32352 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:46:14.425607 32352 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:46:14.425614 32352 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.425676 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:46:14.425823 32352 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:46:14.425837 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.425842 32352 net.cpp:165] Memory required for data: 796161500\nI0821 06:46:14.425850 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:46:14.425858 32352 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:46:14.425864 32352 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:46:14.425874 32352 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.425884 32352 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:46:14.425891 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.425895 32352 net.cpp:165] Memory required for data: 804353500\nI0821 06:46:14.425900 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:46:14.425911 32352 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:46:14.425917 32352 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:46:14.425927 32352 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:46:14.426246 32352 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:46:14.426259 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.426265 32352 net.cpp:165] Memory required for data: 812545500\nI0821 06:46:14.426273 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:46:14.426282 32352 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:46:14.426288 32352 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:46:14.426298 32352 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:46:14.426549 32352 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:46:14.426565 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.426570 32352 net.cpp:165] Memory required for data: 820737500\nI0821 06:46:14.426602 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:46:14.426617 32352 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:46:14.426625 32352 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:46:14.426631 32352 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:46:14.426693 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:46:14.426837 32352 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:46:14.426851 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.426856 32352 net.cpp:165] Memory required for data: 828929500\nI0821 06:46:14.426865 32352 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:46:14.426874 32352 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:46:14.426880 32352 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:46:14.426887 32352 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:46:14.426898 32352 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:46:14.426928 32352 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:46:14.426937 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.426941 32352 net.cpp:165] Memory required for data: 837121500\nI0821 06:46:14.426946 32352 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:46:14.426954 32352 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:46:14.426960 32352 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:46:14.426970 32352 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:46:14.426980 32352 net.cpp:150] Setting up L1_b9_relu\nI0821 06:46:14.426986 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.426990 32352 net.cpp:165] Memory required for data: 845313500\nI0821 06:46:14.426995 32352 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:46:14.427002 32352 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:46:14.427007 32352 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:46:14.427018 32352 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:46:14.427028 32352 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:46:14.427073 32352 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:46:14.427085 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.427091 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.427096 32352 net.cpp:165] Memory required for data: 861697500\nI0821 06:46:14.427101 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:46:14.427112 32352 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:46:14.427119 32352 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:46:14.427130 32352 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:46:14.427448 32352 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:46:14.427462 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.427467 32352 net.cpp:165] Memory required for data: 863745500\nI0821 06:46:14.427476 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:46:14.427485 32352 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:46:14.427491 32352 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:46:14.427502 32352 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:46:14.427749 32352 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:46:14.427762 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.427767 32352 net.cpp:165] Memory required for data: 865793500\nI0821 06:46:14.427778 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:46:14.427791 32352 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:46:14.427804 32352 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:46:14.427812 32352 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.427866 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:46:14.428010 32352 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:46:14.428023 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.428028 32352 net.cpp:165] Memory required for data: 867841500\nI0821 06:46:14.428037 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:46:14.428048 32352 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:46:14.428055 32352 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:46:14.428061 32352 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.428071 32352 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:46:14.428077 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.428081 32352 net.cpp:165] Memory required for data: 869889500\nI0821 06:46:14.428086 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:46:14.428100 32352 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:46:14.428107 32352 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:46:14.428117 32352 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:46:14.428433 32352 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:46:14.428447 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.428452 32352 net.cpp:165] Memory required for data: 871937500\nI0821 06:46:14.428459 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:46:14.428468 32352 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:46:14.428474 32352 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:46:14.428488 32352 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:46:14.428741 32352 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:46:14.428755 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.428761 32352 net.cpp:165] Memory required for data: 873985500\nI0821 06:46:14.428771 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:46:14.428782 32352 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:46:14.428788 32352 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:46:14.428797 32352 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:46:14.428848 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:46:14.428993 32352 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:46:14.429006 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.429011 32352 net.cpp:165] Memory required for data: 876033500\nI0821 06:46:14.429020 32352 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:46:14.429030 32352 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:46:14.429036 32352 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:46:14.429049 32352 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:46:14.429132 32352 net.cpp:150] Setting up L2_b1_pool\nI0821 06:46:14.429154 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.429160 32352 net.cpp:165] Memory required for data: 878081500\nI0821 06:46:14.429165 32352 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:46:14.429175 32352 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:46:14.429181 32352 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:46:14.429188 32352 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:46:14.429199 32352 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:46:14.429234 32352 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:46:14.429245 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.429250 32352 net.cpp:165] Memory required for data: 880129500\nI0821 06:46:14.429255 32352 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:46:14.429263 32352 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:46:14.429268 32352 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:46:14.429275 32352 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:46:14.429293 32352 net.cpp:150] Setting up L2_b1_relu\nI0821 06:46:14.429301 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.429306 32352 net.cpp:165] Memory required for data: 882177500\nI0821 06:46:14.429311 32352 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:46:14.429361 32352 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:46:14.429375 32352 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:46:14.431743 32352 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:46:14.431771 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.431777 32352 net.cpp:165] Memory required for data: 884225500\nI0821 06:46:14.431782 32352 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:46:14.431792 32352 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:46:14.431798 32352 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:46:14.431807 32352 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:46:14.431813 32352 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:46:14.431895 32352 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:46:14.431910 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.431915 32352 net.cpp:165] Memory required for data: 888321500\nI0821 06:46:14.431921 32352 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:46:14.431932 32352 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:46:14.431938 32352 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:46:14.431946 32352 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:46:14.431957 32352 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:46:14.432008 32352 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:46:14.432019 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.432026 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.432030 32352 net.cpp:165] Memory required for data: 896513500\nI0821 06:46:14.432035 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:46:14.432050 32352 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:46:14.432057 32352 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:46:14.432066 32352 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:46:14.433532 32352 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:46:14.433550 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.433555 32352 net.cpp:165] Memory required for data: 900609500\nI0821 06:46:14.433565 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:46:14.433575 32352 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:46:14.433581 32352 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:46:14.433593 32352 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:46:14.433851 32352 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:46:14.433866 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.433871 32352 net.cpp:165] Memory required for data: 904705500\nI0821 06:46:14.433881 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:46:14.433892 32352 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:46:14.433899 32352 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:46:14.433907 32352 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.433962 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:46:14.434111 32352 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:46:14.434124 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.434129 32352 net.cpp:165] Memory required for data: 908801500\nI0821 06:46:14.434139 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:46:14.434152 32352 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:46:14.434159 32352 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:46:14.434166 32352 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.434185 32352 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:46:14.434193 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.434197 32352 net.cpp:165] Memory required for data: 912897500\nI0821 06:46:14.434202 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:46:14.434217 32352 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:46:14.434223 32352 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:46:14.434234 32352 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:46:14.434856 32352 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:46:14.434872 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.434877 32352 net.cpp:165] Memory required for data: 916993500\nI0821 06:46:14.434886 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:46:14.434897 32352 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:46:14.434903 32352 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:46:14.434914 32352 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:46:14.435163 32352 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:46:14.435176 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.435181 32352 net.cpp:165] Memory required for data: 921089500\nI0821 06:46:14.435191 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:46:14.435204 32352 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:46:14.435210 32352 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:46:14.435219 32352 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:46:14.435272 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:46:14.435418 32352 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:46:14.435431 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.435436 32352 net.cpp:165] Memory required for data: 925185500\nI0821 06:46:14.435446 32352 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:46:14.435458 32352 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:46:14.435464 32352 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:46:14.435472 32352 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:46:14.435479 32352 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:46:14.435509 32352 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:46:14.435518 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.435523 32352 net.cpp:165] Memory required for data: 929281500\nI0821 06:46:14.435528 32352 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:46:14.435536 32352 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:46:14.435541 32352 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:46:14.435549 32352 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:46:14.435557 32352 net.cpp:150] Setting up L2_b2_relu\nI0821 06:46:14.435564 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.435569 32352 net.cpp:165] Memory required for data: 933377500\nI0821 06:46:14.435573 32352 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:46:14.435583 32352 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:46:14.435588 32352 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:46:14.435596 32352 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:46:14.435606 32352 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:46:14.435658 32352 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:46:14.435672 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.435678 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.435683 32352 net.cpp:165] Memory required for data: 941569500\nI0821 06:46:14.435688 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:46:14.435706 32352 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:46:14.435714 32352 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:46:14.435725 32352 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:46:14.436189 32352 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:46:14.436203 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.436208 32352 net.cpp:165] Memory required for data: 945665500\nI0821 06:46:14.436218 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:46:14.436228 32352 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:46:14.436233 32352 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:46:14.436249 32352 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:46:14.436491 32352 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:46:14.436504 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.436509 32352 net.cpp:165] Memory required for data: 949761500\nI0821 06:46:14.436519 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:46:14.436532 32352 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:46:14.436537 32352 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:46:14.436544 32352 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.436599 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:46:14.436753 32352 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:46:14.436766 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.436771 32352 net.cpp:165] Memory required for data: 953857500\nI0821 06:46:14.436780 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:46:14.436791 32352 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:46:14.436797 32352 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:46:14.436805 32352 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.436815 32352 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:46:14.436820 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.436825 32352 net.cpp:165] Memory required for data: 957953500\nI0821 06:46:14.436830 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:46:14.436843 32352 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:46:14.436849 32352 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:46:14.436861 32352 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:46:14.437316 32352 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:46:14.437330 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.437335 32352 net.cpp:165] Memory required for data: 962049500\nI0821 06:46:14.437345 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:46:14.437353 32352 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:46:14.437360 32352 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:46:14.437367 32352 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:46:14.437615 32352 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:46:14.437628 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.437633 32352 net.cpp:165] Memory required for data: 966145500\nI0821 06:46:14.437644 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:46:14.437657 32352 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:46:14.437664 32352 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:46:14.437675 32352 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:46:14.437731 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:46:14.437885 32352 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:46:14.437898 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.437903 32352 net.cpp:165] Memory required for data: 970241500\nI0821 06:46:14.437912 32352 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:46:14.437922 32352 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:46:14.437927 32352 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:46:14.437934 32352 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:46:14.437952 32352 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:46:14.437980 32352 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:46:14.437994 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.437999 32352 net.cpp:165] Memory required for data: 974337500\nI0821 06:46:14.438004 32352 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:46:14.438024 32352 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:46:14.438030 32352 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:46:14.438038 32352 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:46:14.438047 32352 net.cpp:150] Setting up L2_b3_relu\nI0821 06:46:14.438055 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.438060 32352 net.cpp:165] Memory required for data: 978433500\nI0821 06:46:14.438066 32352 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:46:14.438072 32352 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:46:14.438077 32352 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:46:14.438087 32352 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:46:14.438097 32352 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:46:14.438143 32352 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:46:14.438154 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.438160 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.438165 32352 net.cpp:165] Memory required for data: 986625500\nI0821 06:46:14.438170 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:46:14.438186 32352 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:46:14.438194 32352 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:46:14.438202 32352 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:46:14.438668 32352 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:46:14.438683 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.438688 32352 net.cpp:165] Memory required for data: 990721500\nI0821 06:46:14.438697 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:46:14.438709 32352 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:46:14.438715 32352 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:46:14.438724 32352 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:46:14.438971 32352 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:46:14.438984 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.438989 32352 net.cpp:165] Memory required for data: 994817500\nI0821 06:46:14.438999 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:46:14.439008 32352 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:46:14.439014 32352 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:46:14.439025 32352 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.439080 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:46:14.439227 32352 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:46:14.439239 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.439244 32352 net.cpp:165] Memory required for data: 998913500\nI0821 06:46:14.439254 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:46:14.439261 32352 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:46:14.439267 32352 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:46:14.439275 32352 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.439286 32352 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:46:14.439294 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.439298 32352 net.cpp:165] Memory required for data: 1003009500\nI0821 06:46:14.439303 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:46:14.439321 32352 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:46:14.439333 32352 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:46:14.439342 32352 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:46:14.439811 32352 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:46:14.439826 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.439831 32352 net.cpp:165] Memory required for data: 1007105500\nI0821 06:46:14.439839 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:46:14.439854 32352 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:46:14.439862 32352 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:46:14.439872 32352 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:46:14.440115 32352 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:46:14.440129 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.440135 32352 net.cpp:165] Memory required for data: 1011201500\nI0821 06:46:14.440145 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:46:14.440152 32352 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:46:14.440158 32352 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:46:14.440166 32352 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:46:14.440223 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:46:14.440367 32352 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:46:14.440384 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.440389 32352 net.cpp:165] Memory required for data: 1015297500\nI0821 06:46:14.440398 32352 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:46:14.440407 32352 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:46:14.440413 32352 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:46:14.440419 32352 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:46:14.440428 32352 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:46:14.440456 32352 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:46:14.440465 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.440470 32352 net.cpp:165] Memory required for data: 1019393500\nI0821 06:46:14.440475 32352 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:46:14.440484 32352 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:46:14.440488 32352 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:46:14.440498 32352 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:46:14.440507 32352 net.cpp:150] Setting up L2_b4_relu\nI0821 06:46:14.440515 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.440518 32352 net.cpp:165] Memory required for data: 1023489500\nI0821 06:46:14.440523 32352 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:46:14.440531 32352 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:46:14.440536 32352 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:46:14.440546 32352 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:46:14.440554 32352 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:46:14.440598 32352 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:46:14.440609 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.440615 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.440620 32352 net.cpp:165] Memory required for data: 1031681500\nI0821 06:46:14.440625 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:46:14.440640 32352 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:46:14.440647 32352 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:46:14.440663 32352 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:46:14.441125 32352 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:46:14.441146 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.441151 32352 net.cpp:165] Memory required for data: 1035777500\nI0821 06:46:14.441160 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:46:14.441172 32352 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:46:14.441179 32352 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:46:14.441186 32352 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:46:14.441437 32352 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:46:14.441450 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.441455 32352 net.cpp:165] Memory required for data: 1039873500\nI0821 06:46:14.441467 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:46:14.441474 32352 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:46:14.441480 32352 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:46:14.441488 32352 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.441545 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:46:14.441701 32352 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:46:14.441717 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.441722 32352 net.cpp:165] Memory required for data: 1043969500\nI0821 06:46:14.441731 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:46:14.441740 32352 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:46:14.441745 32352 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:46:14.441752 32352 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.441761 32352 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:46:14.441768 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.441773 32352 net.cpp:165] Memory required for data: 1048065500\nI0821 06:46:14.441777 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:46:14.441792 32352 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:46:14.441797 32352 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:46:14.441808 32352 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:46:14.442270 32352 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:46:14.442283 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.442288 32352 net.cpp:165] Memory required for data: 1052161500\nI0821 06:46:14.442297 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:46:14.442308 32352 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:46:14.442315 32352 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:46:14.442325 32352 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:46:14.442569 32352 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:46:14.442582 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.442587 32352 net.cpp:165] Memory required for data: 1056257500\nI0821 06:46:14.442597 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:46:14.442606 32352 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:46:14.442612 32352 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:46:14.442620 32352 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:46:14.442683 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:46:14.442834 32352 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:46:14.442847 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.442852 32352 net.cpp:165] Memory required for data: 1060353500\nI0821 06:46:14.442862 32352 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:46:14.442873 32352 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:46:14.442879 32352 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:46:14.442886 32352 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:46:14.442894 32352 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:46:14.442920 32352 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:46:14.442929 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.442941 32352 net.cpp:165] Memory required for data: 1064449500\nI0821 06:46:14.442947 32352 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:46:14.442957 32352 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:46:14.442963 32352 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:46:14.442973 32352 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:46:14.442983 32352 net.cpp:150] Setting up L2_b5_relu\nI0821 06:46:14.442991 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.442994 32352 net.cpp:165] Memory required for data: 1068545500\nI0821 06:46:14.442999 32352 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:46:14.443006 32352 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:46:14.443011 32352 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:46:14.443018 32352 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:46:14.443028 32352 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:46:14.443075 32352 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:46:14.443087 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.443094 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.443099 32352 net.cpp:165] Memory required for data: 1076737500\nI0821 06:46:14.443104 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:46:14.443117 32352 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:46:14.443123 32352 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:46:14.443132 32352 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:46:14.443600 32352 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:46:14.443614 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.443619 32352 net.cpp:165] Memory required for data: 1080833500\nI0821 06:46:14.443629 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:46:14.443639 32352 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:46:14.443646 32352 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:46:14.443663 32352 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:46:14.443972 32352 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:46:14.443995 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.444005 32352 net.cpp:165] Memory required for data: 1084929500\nI0821 06:46:14.444018 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:46:14.444028 32352 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:46:14.444034 32352 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:46:14.444042 32352 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.444103 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:46:14.444250 32352 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:46:14.444272 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.444281 32352 net.cpp:165] Memory required for data: 1089025500\nI0821 06:46:14.444298 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:46:14.444308 32352 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:46:14.444320 32352 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:46:14.444339 32352 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.444355 32352 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:46:14.444367 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.444375 32352 net.cpp:165] Memory required for data: 1093121500\nI0821 06:46:14.444384 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:46:14.444401 32352 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:46:14.444406 32352 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:46:14.444418 32352 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:46:14.444900 32352 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:46:14.444922 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.444928 32352 net.cpp:165] Memory required for data: 1097217500\nI0821 06:46:14.444937 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:46:14.444949 32352 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:46:14.444957 32352 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:46:14.444967 32352 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:46:14.445215 32352 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:46:14.445230 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.445235 32352 net.cpp:165] Memory required for data: 1101313500\nI0821 06:46:14.445245 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:46:14.445253 32352 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:46:14.445260 32352 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:46:14.445267 32352 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:46:14.445327 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:46:14.445472 32352 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:46:14.445484 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.445489 32352 net.cpp:165] Memory required for data: 1105409500\nI0821 06:46:14.445498 32352 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:46:14.445510 32352 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:46:14.445516 32352 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:46:14.445523 32352 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:46:14.445531 32352 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:46:14.445557 32352 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:46:14.445566 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.445571 32352 net.cpp:165] Memory required for data: 1109505500\nI0821 06:46:14.445576 32352 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:46:14.445587 32352 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:46:14.445593 32352 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:46:14.445600 32352 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:46:14.445610 32352 net.cpp:150] Setting up L2_b6_relu\nI0821 06:46:14.445616 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.445621 32352 net.cpp:165] Memory required for data: 1113601500\nI0821 06:46:14.445626 32352 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:46:14.445632 32352 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:46:14.445637 32352 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:46:14.445644 32352 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:46:14.445660 32352 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:46:14.445709 32352 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:46:14.445721 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.445727 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.445732 32352 net.cpp:165] Memory required for data: 1121793500\nI0821 06:46:14.445737 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:46:14.445751 32352 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:46:14.445757 32352 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:46:14.445766 32352 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:46:14.446236 32352 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:46:14.446251 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.446256 32352 net.cpp:165] Memory required for data: 1125889500\nI0821 06:46:14.446264 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:46:14.446277 32352 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:46:14.446290 32352 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:46:14.446303 32352 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:46:14.446557 32352 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:46:14.446570 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.446575 32352 net.cpp:165] Memory required for data: 1129985500\nI0821 06:46:14.446585 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:46:14.446594 32352 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:46:14.446601 32352 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:46:14.446609 32352 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.446673 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:46:14.446823 32352 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:46:14.446836 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.446841 32352 net.cpp:165] Memory required for data: 1134081500\nI0821 06:46:14.446851 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:46:14.446861 32352 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:46:14.446867 32352 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:46:14.446874 32352 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.446884 32352 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:46:14.446892 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.446895 32352 net.cpp:165] Memory required for data: 1138177500\nI0821 06:46:14.446900 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:46:14.446915 32352 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:46:14.446923 32352 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:46:14.446933 32352 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:46:14.447396 32352 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:46:14.447409 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.447414 32352 net.cpp:165] Memory required for data: 1142273500\nI0821 06:46:14.447423 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:46:14.447435 32352 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:46:14.447441 32352 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:46:14.447453 32352 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:46:14.447710 32352 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:46:14.447727 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.447732 32352 net.cpp:165] Memory required for data: 1146369500\nI0821 06:46:14.447743 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:46:14.447752 32352 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:46:14.447758 32352 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:46:14.447765 32352 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:46:14.447821 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:46:14.447978 32352 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:46:14.447990 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.447995 32352 net.cpp:165] Memory required for data: 1150465500\nI0821 06:46:14.448004 32352 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:46:14.448014 32352 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:46:14.448022 32352 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:46:14.448029 32352 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:46:14.448037 32352 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:46:14.448065 32352 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:46:14.448073 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.448078 32352 net.cpp:165] Memory required for data: 1154561500\nI0821 06:46:14.448083 32352 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:46:14.448094 32352 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:46:14.448101 32352 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:46:14.448107 32352 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:46:14.448123 32352 net.cpp:150] Setting up L2_b7_relu\nI0821 06:46:14.448132 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.448135 32352 net.cpp:165] Memory required for data: 1158657500\nI0821 06:46:14.448140 32352 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:46:14.448148 32352 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:46:14.448153 32352 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:46:14.448160 32352 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:46:14.448170 32352 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:46:14.448218 32352 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:46:14.448230 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.448237 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.448241 32352 net.cpp:165] Memory required for data: 1166849500\nI0821 06:46:14.448246 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:46:14.448261 32352 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:46:14.448267 32352 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:46:14.448276 32352 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:46:14.448756 32352 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:46:14.448771 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.448776 32352 net.cpp:165] Memory required for data: 1170945500\nI0821 06:46:14.448786 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:46:14.448797 32352 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:46:14.448803 32352 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:46:14.448814 32352 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:46:14.449065 32352 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:46:14.449079 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.449084 32352 net.cpp:165] Memory required for data: 1175041500\nI0821 06:46:14.449095 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:46:14.449103 32352 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:46:14.449110 32352 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:46:14.449116 32352 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.449174 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:46:14.449324 32352 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:46:14.449337 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.449342 32352 net.cpp:165] Memory required for data: 1179137500\nI0821 06:46:14.449352 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:46:14.449362 32352 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:46:14.449368 32352 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:46:14.449375 32352 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.449384 32352 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:46:14.449391 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.449396 32352 net.cpp:165] Memory required for data: 1183233500\nI0821 06:46:14.449400 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:46:14.449414 32352 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:46:14.449420 32352 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:46:14.449429 32352 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:46:14.449899 32352 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:46:14.449914 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.449919 32352 net.cpp:165] Memory required for data: 1187329500\nI0821 06:46:14.449928 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:46:14.449944 32352 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:46:14.449959 32352 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:46:14.449967 32352 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:46:14.450224 32352 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:46:14.450240 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.450245 32352 net.cpp:165] Memory required for data: 1191425500\nI0821 06:46:14.450256 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:46:14.450265 32352 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:46:14.450271 32352 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:46:14.450278 32352 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:46:14.450335 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:46:14.450485 32352 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:46:14.450498 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.450503 32352 net.cpp:165] Memory required for data: 1195521500\nI0821 06:46:14.450512 32352 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:46:14.450520 32352 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:46:14.450527 32352 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:46:14.450533 32352 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:46:14.450544 32352 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:46:14.450572 32352 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:46:14.450582 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.450585 32352 net.cpp:165] Memory required for data: 1199617500\nI0821 06:46:14.450590 32352 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:46:14.450601 32352 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:46:14.450608 32352 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:46:14.450614 32352 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:46:14.450623 32352 net.cpp:150] Setting up L2_b8_relu\nI0821 06:46:14.450630 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.450634 32352 net.cpp:165] Memory required for data: 1203713500\nI0821 06:46:14.450639 32352 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:46:14.450645 32352 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:46:14.450659 32352 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:46:14.450666 32352 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:46:14.450690 32352 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:46:14.450740 32352 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:46:14.450753 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.450760 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.450764 32352 net.cpp:165] Memory required for data: 1211905500\nI0821 06:46:14.450769 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:46:14.450781 32352 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:46:14.450788 32352 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:46:14.450801 32352 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:46:14.451267 32352 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:46:14.451280 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.451285 32352 net.cpp:165] Memory required for data: 1216001500\nI0821 06:46:14.451293 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:46:14.451305 32352 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:46:14.451313 32352 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:46:14.451320 32352 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:46:14.451572 32352 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:46:14.451588 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.451593 32352 net.cpp:165] Memory required for data: 1220097500\nI0821 06:46:14.451611 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:46:14.451620 32352 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:46:14.451627 32352 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:46:14.451634 32352 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.451697 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:46:14.451853 32352 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:46:14.451866 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.451871 32352 net.cpp:165] Memory required for data: 1224193500\nI0821 06:46:14.451880 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:46:14.451889 32352 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:46:14.451894 32352 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:46:14.451905 32352 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.451915 32352 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:46:14.451921 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.451926 32352 net.cpp:165] Memory required for data: 1228289500\nI0821 06:46:14.451930 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:46:14.451941 32352 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:46:14.451947 32352 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:46:14.451958 32352 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:46:14.452421 32352 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:46:14.452436 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.452441 32352 net.cpp:165] Memory required for data: 1232385500\nI0821 06:46:14.452450 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:46:14.452458 32352 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:46:14.452466 32352 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:46:14.452476 32352 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:46:14.452736 32352 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:46:14.452750 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.452755 32352 net.cpp:165] Memory required for data: 1236481500\nI0821 06:46:14.452800 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:46:14.452812 32352 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:46:14.452819 32352 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:46:14.452826 32352 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:46:14.452885 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:46:14.453037 32352 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:46:14.453052 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.453058 32352 net.cpp:165] Memory required for data: 1240577500\nI0821 06:46:14.453068 32352 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:46:14.453076 32352 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:46:14.453083 32352 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:46:14.453089 32352 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:46:14.453097 32352 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:46:14.453127 32352 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:46:14.453137 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.453142 32352 net.cpp:165] Memory required for data: 1244673500\nI0821 06:46:14.453147 32352 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:46:14.453155 32352 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:46:14.453161 32352 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:46:14.453171 32352 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:46:14.453179 32352 net.cpp:150] Setting up L2_b9_relu\nI0821 06:46:14.453187 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.453191 32352 net.cpp:165] Memory required for data: 1248769500\nI0821 06:46:14.453197 32352 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:46:14.453210 32352 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:46:14.453217 32352 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:46:14.453227 32352 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:46:14.453238 32352 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:46:14.453282 32352 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:46:14.453294 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.453301 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.453305 32352 net.cpp:165] Memory required for data: 1256961500\nI0821 06:46:14.453310 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:46:14.453325 32352 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:46:14.453331 32352 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:46:14.453341 32352 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:46:14.453820 32352 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:46:14.453835 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.453840 32352 net.cpp:165] Memory required for data: 1257985500\nI0821 06:46:14.453850 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:46:14.453861 32352 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:46:14.453867 32352 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:46:14.453876 32352 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:46:14.454138 32352 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:46:14.454151 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.454156 32352 net.cpp:165] Memory required for data: 1259009500\nI0821 06:46:14.454166 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:46:14.454179 32352 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:46:14.454185 32352 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:46:14.454192 32352 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.454251 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:46:14.454404 32352 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:46:14.454416 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.454421 32352 net.cpp:165] Memory required for data: 1260033500\nI0821 06:46:14.454432 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:46:14.454442 32352 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:46:14.454448 32352 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:46:14.454455 32352 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.454464 32352 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:46:14.454471 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.454476 32352 net.cpp:165] Memory required for data: 1261057500\nI0821 06:46:14.454483 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:46:14.454495 32352 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:46:14.454500 32352 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:46:14.454512 32352 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:46:14.454996 32352 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:46:14.455011 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.455016 32352 net.cpp:165] Memory required for data: 1262081500\nI0821 06:46:14.455025 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:46:14.455034 32352 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:46:14.455040 32352 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:46:14.455051 32352 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:46:14.455312 32352 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:46:14.455325 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.455330 32352 net.cpp:165] Memory required for data: 1263105500\nI0821 06:46:14.455348 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:46:14.455358 32352 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:46:14.455363 32352 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:46:14.455371 32352 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:46:14.455430 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:46:14.455585 32352 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:46:14.455600 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.455605 32352 net.cpp:165] Memory required for data: 1264129500\nI0821 06:46:14.455615 32352 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:46:14.455623 32352 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:46:14.455631 32352 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:46:14.455638 32352 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:46:14.455682 32352 net.cpp:150] Setting up L3_b1_pool\nI0821 06:46:14.455693 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.455698 32352 net.cpp:165] Memory required for data: 1265153500\nI0821 06:46:14.455703 32352 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:46:14.455711 32352 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:46:14.455718 32352 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:46:14.455724 32352 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:46:14.455732 32352 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:46:14.455767 32352 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:46:14.455776 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.455781 32352 net.cpp:165] Memory required for data: 1266177500\nI0821 06:46:14.455786 32352 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:46:14.455795 32352 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:46:14.455799 32352 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:46:14.455806 32352 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:46:14.455816 32352 net.cpp:150] Setting up L3_b1_relu\nI0821 06:46:14.455822 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.455827 32352 net.cpp:165] Memory required for data: 1267201500\nI0821 06:46:14.455832 32352 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:46:14.455840 32352 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:46:14.455852 32352 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:46:14.457092 32352 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:46:14.457109 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.457114 32352 net.cpp:165] Memory required for data: 1268225500\nI0821 06:46:14.457120 32352 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:46:14.457134 32352 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:46:14.457139 32352 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:46:14.457147 32352 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:46:14.457154 32352 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:46:14.457197 32352 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:46:14.457209 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.457214 32352 net.cpp:165] Memory required for data: 1270273500\nI0821 06:46:14.457221 32352 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:46:14.457227 32352 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:46:14.457233 32352 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:46:14.457244 32352 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:46:14.457254 32352 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:46:14.457304 32352 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:46:14.457319 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.457325 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.457337 32352 net.cpp:165] Memory required for data: 1274369500\nI0821 06:46:14.457343 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:46:14.457358 32352 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:46:14.457365 32352 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:46:14.457375 32352 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:46:14.459386 32352 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:46:14.459404 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.459410 32352 net.cpp:165] Memory required for data: 1276417500\nI0821 06:46:14.459419 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:46:14.459432 32352 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:46:14.459439 32352 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:46:14.459447 32352 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:46:14.459720 32352 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:46:14.459734 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.459739 32352 net.cpp:165] Memory required for data: 1278465500\nI0821 06:46:14.459750 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:46:14.459763 32352 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:46:14.459769 32352 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:46:14.459779 32352 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.459836 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:46:14.460000 32352 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:46:14.460013 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.460018 32352 net.cpp:165] Memory required for data: 1280513500\nI0821 06:46:14.460027 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:46:14.460036 32352 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:46:14.460041 32352 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:46:14.460052 32352 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.460062 32352 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:46:14.460069 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.460073 32352 net.cpp:165] Memory required for data: 1282561500\nI0821 06:46:14.460078 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:46:14.460090 32352 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:46:14.460096 32352 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:46:14.460108 32352 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:46:14.461132 32352 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:46:14.461146 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.461151 32352 net.cpp:165] Memory required for data: 1284609500\nI0821 06:46:14.461160 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:46:14.461172 32352 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:46:14.461179 32352 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:46:14.461187 32352 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:46:14.461455 32352 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:46:14.461468 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.461473 32352 net.cpp:165] Memory required for data: 1286657500\nI0821 06:46:14.461484 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:46:14.461493 32352 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:46:14.461499 32352 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:46:14.461508 32352 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:46:14.461566 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:46:14.461730 32352 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:46:14.461745 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.461750 32352 net.cpp:165] Memory required for data: 1288705500\nI0821 06:46:14.461758 32352 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:46:14.461768 32352 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:46:14.461782 32352 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:46:14.461791 32352 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:46:14.461802 32352 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:46:14.461836 32352 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:46:14.461851 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.461858 32352 net.cpp:165] Memory required for data: 1290753500\nI0821 06:46:14.461863 32352 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:46:14.461870 32352 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:46:14.461876 32352 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:46:14.461884 32352 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:46:14.461892 32352 net.cpp:150] Setting up L3_b2_relu\nI0821 06:46:14.461899 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.461904 32352 net.cpp:165] Memory required for data: 1292801500\nI0821 06:46:14.461908 32352 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:46:14.461920 32352 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:46:14.461925 32352 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:46:14.461932 32352 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:46:14.461942 32352 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:46:14.461988 32352 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:46:14.462004 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.462011 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.462016 32352 net.cpp:165] Memory required for data: 1296897500\nI0821 06:46:14.462021 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:46:14.462033 32352 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:46:14.462039 32352 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:46:14.462049 32352 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:46:14.463075 32352 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:46:14.463094 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.463099 32352 net.cpp:165] Memory required for data: 1298945500\nI0821 06:46:14.463109 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:46:14.463117 32352 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:46:14.463124 32352 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:46:14.463132 32352 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:46:14.463397 32352 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:46:14.463409 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.463414 32352 net.cpp:165] Memory required for data: 1300993500\nI0821 06:46:14.463425 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:46:14.463436 32352 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:46:14.463443 32352 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:46:14.463454 32352 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.463511 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:46:14.463676 32352 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:46:14.463690 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.463696 32352 net.cpp:165] Memory required for data: 1303041500\nI0821 06:46:14.463704 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:46:14.463712 32352 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:46:14.463718 32352 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:46:14.463728 32352 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.463738 32352 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:46:14.463745 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.463757 32352 net.cpp:165] Memory required for data: 1305089500\nI0821 06:46:14.463762 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:46:14.463776 32352 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:46:14.463783 32352 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:46:14.463791 32352 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:46:14.464810 32352 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:46:14.464825 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.464830 32352 net.cpp:165] Memory required for data: 1307137500\nI0821 06:46:14.464839 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:46:14.464853 32352 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:46:14.464859 32352 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:46:14.464867 32352 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:46:14.465138 32352 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:46:14.465152 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.465157 32352 net.cpp:165] Memory required for data: 1309185500\nI0821 06:46:14.465167 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:46:14.465176 32352 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:46:14.465183 32352 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:46:14.465189 32352 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:46:14.465250 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:46:14.465406 32352 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:46:14.465420 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.465425 32352 net.cpp:165] Memory required for data: 1311233500\nI0821 06:46:14.465433 32352 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:46:14.465442 32352 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:46:14.465448 32352 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:46:14.465456 32352 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:46:14.465467 32352 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:46:14.465502 32352 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:46:14.465517 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.465522 32352 net.cpp:165] Memory required for data: 1313281500\nI0821 06:46:14.465526 32352 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:46:14.465534 32352 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:46:14.465539 32352 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:46:14.465546 32352 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:46:14.465555 32352 net.cpp:150] Setting up L3_b3_relu\nI0821 06:46:14.465562 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.465567 32352 net.cpp:165] Memory required for data: 1315329500\nI0821 06:46:14.465571 32352 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:46:14.465581 32352 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:46:14.465587 32352 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:46:14.465595 32352 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:46:14.465605 32352 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:46:14.465658 32352 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:46:14.465672 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.465678 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.465683 32352 net.cpp:165] Memory required for data: 1319425500\nI0821 06:46:14.465688 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:46:14.465699 32352 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:46:14.465705 32352 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:46:14.465718 32352 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:46:14.466748 32352 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:46:14.466764 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.466769 32352 net.cpp:165] Memory required for data: 1321473500\nI0821 06:46:14.466778 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:46:14.466787 32352 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:46:14.466794 32352 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:46:14.466805 32352 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:46:14.467077 32352 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:46:14.467093 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.467099 32352 net.cpp:165] Memory required for data: 1323521500\nI0821 06:46:14.467109 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:46:14.467118 32352 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:46:14.467125 32352 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:46:14.467133 32352 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.467192 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:46:14.467355 32352 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:46:14.467367 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.467372 32352 net.cpp:165] Memory required for data: 1325569500\nI0821 06:46:14.467382 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:46:14.467391 32352 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:46:14.467396 32352 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:46:14.467406 32352 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.467417 32352 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:46:14.467423 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.467428 32352 net.cpp:165] Memory required for data: 1327617500\nI0821 06:46:14.467432 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:46:14.467447 32352 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:46:14.467453 32352 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:46:14.467461 32352 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:46:14.468488 32352 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:46:14.468503 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.468508 32352 net.cpp:165] Memory required for data: 1329665500\nI0821 06:46:14.468518 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:46:14.468528 32352 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:46:14.468535 32352 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:46:14.468546 32352 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:46:14.468822 32352 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:46:14.468837 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.468842 32352 net.cpp:165] Memory required for data: 1331713500\nI0821 06:46:14.468852 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:46:14.468861 32352 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:46:14.468868 32352 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:46:14.468874 32352 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:46:14.468936 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:46:14.469100 32352 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:46:14.469113 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.469118 32352 net.cpp:165] Memory required for data: 1333761500\nI0821 06:46:14.469127 32352 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:46:14.469136 32352 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:46:14.469142 32352 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:46:14.469149 32352 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:46:14.469161 32352 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:46:14.469193 32352 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:46:14.469213 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.469218 32352 net.cpp:165] Memory required for data: 1335809500\nI0821 06:46:14.469223 32352 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:46:14.469231 32352 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:46:14.469238 32352 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:46:14.469244 32352 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:46:14.469254 32352 net.cpp:150] Setting up L3_b4_relu\nI0821 06:46:14.469260 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.469265 32352 net.cpp:165] Memory required for data: 1337857500\nI0821 06:46:14.469269 32352 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:46:14.469280 32352 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:46:14.469285 32352 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:46:14.469293 32352 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:46:14.469302 32352 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:46:14.469352 32352 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:46:14.469364 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.469370 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.469375 32352 net.cpp:165] Memory required for data: 1341953500\nI0821 06:46:14.469380 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:46:14.469391 32352 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:46:14.469398 32352 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:46:14.469410 32352 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:46:14.470443 32352 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:46:14.470459 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.470464 32352 net.cpp:165] Memory required for data: 1344001500\nI0821 06:46:14.470474 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:46:14.470482 32352 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:46:14.470489 32352 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:46:14.470500 32352 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:46:14.471771 32352 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:46:14.471787 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.471793 32352 net.cpp:165] Memory required for data: 1346049500\nI0821 06:46:14.471806 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:46:14.471815 32352 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:46:14.471822 32352 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:46:14.471830 32352 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.471894 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:46:14.472054 32352 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:46:14.472066 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.472071 32352 net.cpp:165] Memory required for data: 1348097500\nI0821 06:46:14.472080 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:46:14.472090 32352 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:46:14.472095 32352 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:46:14.472105 32352 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.472116 32352 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:46:14.472122 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.472127 32352 net.cpp:165] Memory required for data: 1350145500\nI0821 06:46:14.472132 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:46:14.472146 32352 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:46:14.472153 32352 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:46:14.472162 32352 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:46:14.474195 32352 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:46:14.474211 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.474217 32352 net.cpp:165] Memory required for data: 1352193500\nI0821 06:46:14.474226 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:46:14.474241 32352 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:46:14.474247 32352 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:46:14.474256 32352 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:46:14.474515 32352 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:46:14.474529 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.474534 32352 net.cpp:165] Memory required for data: 1354241500\nI0821 06:46:14.474545 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:46:14.474552 32352 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:46:14.474560 32352 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:46:14.474566 32352 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:46:14.474625 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:46:14.474788 32352 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:46:14.474803 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.474808 32352 net.cpp:165] Memory required for data: 1356289500\nI0821 06:46:14.474817 32352 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:46:14.474828 32352 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:46:14.474833 32352 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:46:14.474840 32352 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:46:14.474851 32352 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:46:14.474884 32352 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:46:14.474900 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.474905 32352 net.cpp:165] Memory required for data: 1358337500\nI0821 06:46:14.474910 32352 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:46:14.474917 32352 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:46:14.474923 32352 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:46:14.474931 32352 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:46:14.474941 32352 net.cpp:150] Setting up L3_b5_relu\nI0821 06:46:14.474947 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.474951 32352 net.cpp:165] Memory required for data: 1360385500\nI0821 06:46:14.474956 32352 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:46:14.474967 32352 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:46:14.474972 32352 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:46:14.474979 32352 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:46:14.474989 32352 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:46:14.475037 32352 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:46:14.475049 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.475055 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.475059 32352 net.cpp:165] Memory required for data: 1364481500\nI0821 06:46:14.475064 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:46:14.475076 32352 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:46:14.475082 32352 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:46:14.475095 32352 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:46:14.476115 32352 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:46:14.476130 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.476135 32352 net.cpp:165] Memory required for data: 1366529500\nI0821 06:46:14.476145 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:46:14.476155 32352 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:46:14.476168 32352 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:46:14.476182 32352 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:46:14.476438 32352 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:46:14.476454 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.476460 32352 net.cpp:165] Memory required for data: 1368577500\nI0821 06:46:14.476471 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:46:14.476480 32352 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:46:14.476486 32352 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:46:14.476495 32352 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.476549 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:46:14.476711 32352 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:46:14.476725 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.476730 32352 net.cpp:165] Memory required for data: 1370625500\nI0821 06:46:14.476739 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:46:14.476748 32352 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:46:14.476754 32352 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:46:14.476764 32352 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.476774 32352 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:46:14.476781 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.476785 32352 net.cpp:165] Memory required for data: 1372673500\nI0821 06:46:14.476790 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:46:14.476804 32352 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:46:14.476810 32352 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:46:14.476819 32352 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:46:14.477828 32352 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:46:14.477843 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.477847 32352 net.cpp:165] Memory required for data: 1374721500\nI0821 06:46:14.477856 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:46:14.477870 32352 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:46:14.477877 32352 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:46:14.477888 32352 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:46:14.478148 32352 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:46:14.478162 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.478166 32352 net.cpp:165] Memory required for data: 1376769500\nI0821 06:46:14.478178 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:46:14.478185 32352 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:46:14.478193 32352 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:46:14.478202 32352 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:46:14.478260 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:46:14.478412 32352 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:46:14.478425 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.478430 32352 net.cpp:165] Memory required for data: 1378817500\nI0821 06:46:14.478438 32352 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:46:14.478447 32352 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:46:14.478453 32352 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:46:14.478461 32352 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:46:14.478471 32352 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:46:14.478507 32352 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:46:14.478519 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.478524 32352 net.cpp:165] Memory required for data: 1380865500\nI0821 06:46:14.478529 32352 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:46:14.478536 32352 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:46:14.478543 32352 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:46:14.478557 32352 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:46:14.478569 32352 net.cpp:150] Setting up L3_b6_relu\nI0821 06:46:14.478575 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.478579 32352 net.cpp:165] Memory required for data: 1382913500\nI0821 06:46:14.478585 32352 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:46:14.478595 32352 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:46:14.478600 32352 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:46:14.478608 32352 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:46:14.478618 32352 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:46:14.478673 32352 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:46:14.478687 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.478693 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.478698 32352 net.cpp:165] Memory required for data: 1387009500\nI0821 06:46:14.478703 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:46:14.478714 32352 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:46:14.478720 32352 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:46:14.478732 32352 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:46:14.479753 32352 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:46:14.479768 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.479773 32352 net.cpp:165] Memory required for data: 1389057500\nI0821 06:46:14.479782 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:46:14.479791 32352 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:46:14.479797 32352 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:46:14.479809 32352 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:46:14.480069 32352 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:46:14.480085 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.480090 32352 net.cpp:165] Memory required for data: 1391105500\nI0821 06:46:14.480100 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:46:14.480110 32352 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:46:14.480116 32352 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:46:14.480123 32352 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.480180 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:46:14.480341 32352 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:46:14.480355 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.480360 32352 net.cpp:165] Memory required for data: 1393153500\nI0821 06:46:14.480368 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:46:14.480403 32352 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:46:14.480412 32352 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:46:14.480420 32352 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.480430 32352 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:46:14.480437 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.480443 32352 net.cpp:165] Memory required for data: 1395201500\nI0821 06:46:14.480448 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:46:14.480459 32352 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:46:14.480465 32352 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:46:14.480476 32352 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:46:14.481505 32352 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:46:14.481520 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.481525 32352 net.cpp:165] Memory required for data: 1397249500\nI0821 06:46:14.481534 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:46:14.481544 32352 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:46:14.481557 32352 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:46:14.481570 32352 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:46:14.481844 32352 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:46:14.481860 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.481866 32352 net.cpp:165] Memory required for data: 1399297500\nI0821 06:46:14.481876 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:46:14.481885 32352 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:46:14.481891 32352 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:46:14.481899 32352 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:46:14.481957 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:46:14.482112 32352 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:46:14.482125 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.482131 32352 net.cpp:165] Memory required for data: 1401345500\nI0821 06:46:14.482138 32352 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:46:14.482151 32352 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:46:14.482157 32352 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:46:14.482164 32352 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:46:14.482172 32352 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:46:14.482209 32352 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:46:14.482220 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.482225 32352 net.cpp:165] Memory required for data: 1403393500\nI0821 06:46:14.482230 32352 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:46:14.482239 32352 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:46:14.482244 32352 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:46:14.482251 32352 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:46:14.482260 32352 net.cpp:150] Setting up L3_b7_relu\nI0821 06:46:14.482267 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.482271 32352 net.cpp:165] Memory required for data: 1405441500\nI0821 06:46:14.482277 32352 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:46:14.482283 32352 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:46:14.482290 32352 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:46:14.482300 32352 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:46:14.482309 32352 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:46:14.482354 32352 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:46:14.482365 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.482372 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.482376 32352 net.cpp:165] Memory required for data: 1409537500\nI0821 06:46:14.482381 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:46:14.482395 32352 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:46:14.482403 32352 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:46:14.482411 32352 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:46:14.483438 32352 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:46:14.483453 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.483458 32352 net.cpp:165] Memory required for data: 1411585500\nI0821 06:46:14.483466 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:46:14.483475 32352 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:46:14.483485 32352 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:46:14.483494 32352 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:46:14.483758 32352 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:46:14.483772 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.483777 32352 net.cpp:165] Memory required for data: 1413633500\nI0821 06:46:14.483796 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:46:14.483805 32352 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:46:14.483811 32352 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:46:14.483822 32352 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.483881 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:46:14.484037 32352 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:46:14.484050 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.484055 32352 net.cpp:165] Memory required for data: 1415681500\nI0821 06:46:14.484064 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:46:14.484076 32352 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:46:14.484081 32352 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:46:14.484089 32352 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.484098 32352 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:46:14.484105 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.484110 32352 net.cpp:165] Memory required for data: 1417729500\nI0821 06:46:14.484114 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:46:14.484128 32352 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:46:14.484134 32352 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:46:14.484145 32352 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:46:14.485162 32352 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:46:14.485177 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.485182 32352 net.cpp:165] Memory required for data: 1419777500\nI0821 06:46:14.485190 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:46:14.485199 32352 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:46:14.485205 32352 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:46:14.485216 32352 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:46:14.485512 32352 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:46:14.485534 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.485543 32352 net.cpp:165] Memory required for data: 1421825500\nI0821 06:46:14.485563 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:46:14.485579 32352 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:46:14.485589 32352 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:46:14.485602 32352 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:46:14.485678 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:46:14.485841 32352 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:46:14.485853 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.485858 32352 net.cpp:165] Memory required for data: 1423873500\nI0821 06:46:14.485868 32352 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:46:14.485880 32352 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:46:14.485888 32352 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:46:14.485894 32352 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:46:14.485903 32352 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:46:14.485939 32352 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:46:14.485949 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.485954 32352 net.cpp:165] Memory required for data: 1425921500\nI0821 06:46:14.485960 32352 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:46:14.485967 32352 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:46:14.485973 32352 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:46:14.485980 32352 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:46:14.485990 32352 net.cpp:150] Setting up L3_b8_relu\nI0821 06:46:14.485996 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.486001 32352 net.cpp:165] Memory required for data: 1427969500\nI0821 06:46:14.486006 32352 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:46:14.486021 32352 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:46:14.486027 32352 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:46:14.486037 32352 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:46:14.486047 32352 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:46:14.486093 32352 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:46:14.486105 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.486112 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.486117 32352 net.cpp:165] Memory required for data: 1432065500\nI0821 06:46:14.486122 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:46:14.486138 32352 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:46:14.486145 32352 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:46:14.486155 32352 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:46:14.488173 32352 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:46:14.488191 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.488198 32352 net.cpp:165] Memory required for data: 1434113500\nI0821 06:46:14.488206 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:46:14.488219 32352 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:46:14.488226 32352 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:46:14.488234 32352 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:46:14.488507 32352 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:46:14.488521 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.488526 32352 net.cpp:165] Memory required for data: 1436161500\nI0821 06:46:14.488536 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:46:14.488545 32352 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:46:14.488551 32352 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:46:14.488559 32352 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.488620 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:46:14.488783 32352 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:46:14.488801 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.488806 32352 net.cpp:165] Memory required for data: 1438209500\nI0821 06:46:14.488816 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:46:14.488823 32352 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:46:14.488829 32352 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:46:14.488837 32352 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.488847 32352 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:46:14.488853 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.488857 32352 net.cpp:165] Memory required for data: 1440257500\nI0821 06:46:14.488862 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:46:14.488876 32352 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:46:14.488883 32352 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:46:14.488891 32352 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:46:14.489920 32352 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:46:14.489935 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.489940 32352 net.cpp:165] Memory required for data: 1442305500\nI0821 06:46:14.489949 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:46:14.489961 32352 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:46:14.489969 32352 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:46:14.489976 32352 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:46:14.490241 32352 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:46:14.490253 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.490258 32352 net.cpp:165] Memory required for data: 1444353500\nI0821 06:46:14.490278 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:46:14.490296 32352 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:46:14.490303 32352 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:46:14.490310 32352 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:46:14.490370 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:46:14.490528 32352 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:46:14.490541 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.490546 32352 net.cpp:165] Memory required for data: 1446401500\nI0821 06:46:14.490556 32352 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:46:14.490567 32352 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:46:14.490574 32352 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:46:14.490581 32352 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:46:14.490589 32352 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:46:14.490625 32352 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:46:14.490638 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.490643 32352 net.cpp:165] Memory required for data: 1448449500\nI0821 06:46:14.490648 32352 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:46:14.490662 32352 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:46:14.490669 32352 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:46:14.490679 32352 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:46:14.490689 32352 net.cpp:150] Setting up L3_b9_relu\nI0821 06:46:14.490695 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.490700 32352 net.cpp:165] Memory required for data: 1450497500\nI0821 06:46:14.490705 32352 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:46:14.490713 32352 net.cpp:100] Creating Layer post_pool\nI0821 06:46:14.490720 32352 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:46:14.490728 32352 net.cpp:408] post_pool -> post_pool\nI0821 06:46:14.490762 32352 net.cpp:150] Setting up post_pool\nI0821 06:46:14.490777 32352 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:46:14.490782 32352 net.cpp:165] Memory required for data: 1450529500\nI0821 06:46:14.490787 32352 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:46:14.490873 32352 net.cpp:100] Creating Layer post_FC\nI0821 06:46:14.490887 32352 net.cpp:434] post_FC <- post_pool\nI0821 06:46:14.490897 32352 net.cpp:408] post_FC -> post_FC_top\nI0821 06:46:14.491142 32352 net.cpp:150] Setting up post_FC\nI0821 06:46:14.491161 32352 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:46:14.491168 32352 net.cpp:165] Memory required for data: 1450534500\nI0821 06:46:14.491176 32352 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:46:14.491185 32352 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:46:14.491191 32352 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:46:14.491199 32352 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:46:14.491209 32352 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:46:14.491261 32352 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:46:14.491272 32352 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:46:14.491279 32352 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:46:14.491283 32352 net.cpp:165] Memory required for data: 1450544500\nI0821 06:46:14.491289 32352 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:46:14.491333 32352 net.cpp:100] Creating Layer accuracy\nI0821 06:46:14.491346 32352 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:46:14.491358 32352 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:46:14.491366 32352 net.cpp:408] accuracy -> accuracy\nI0821 06:46:14.491411 32352 net.cpp:150] Setting up accuracy\nI0821 06:46:14.491425 32352 net.cpp:157] Top shape: (1)\nI0821 06:46:14.491430 32352 net.cpp:165] Memory required for data: 1450544504\nI0821 06:46:14.491436 32352 layer_factory.hpp:77] Creating layer loss\nI0821 06:46:14.491452 32352 net.cpp:100] Creating Layer loss\nI0821 06:46:14.491459 32352 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:46:14.491466 32352 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:46:14.491473 32352 net.cpp:408] loss -> loss\nI0821 06:46:14.491523 32352 layer_factory.hpp:77] Creating layer loss\nI0821 06:46:14.491695 32352 net.cpp:150] Setting up loss\nI0821 06:46:14.491714 32352 net.cpp:157] Top shape: (1)\nI0821 06:46:14.491720 32352 net.cpp:160]     with loss weight 1\nI0821 06:46:14.491799 32352 net.cpp:165] Memory required for data: 1450544508\nI0821 06:46:14.491809 32352 net.cpp:226] loss needs backward computation.\nI0821 06:46:14.491816 32352 net.cpp:228] accuracy does not need backward computation.\nI0821 06:46:14.491822 32352 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:46:14.491827 32352 net.cpp:226] post_FC needs backward computation.\nI0821 06:46:14.491832 32352 net.cpp:226] post_pool needs backward computation.\nI0821 06:46:14.491837 32352 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:46:14.491842 32352 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:46:14.491847 32352 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:46:14.491852 32352 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:46:14.491858 32352 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:46:14.491863 32352 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:46:14.491868 32352 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:46:14.491873 32352 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:46:14.491878 32352 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:46:14.491883 32352 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:46:14.491888 32352 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:46:14.491892 32352 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:46:14.491899 32352 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:46:14.491904 32352 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:46:14.491909 32352 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:46:14.491914 32352 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:46:14.491919 32352 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:46:14.491924 32352 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:46:14.491928 32352 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:46:14.491933 32352 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:46:14.491938 32352 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:46:14.491943 32352 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:46:14.491950 32352 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:46:14.491955 32352 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:46:14.491960 32352 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:46:14.491964 32352 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:46:14.491969 32352 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:46:14.491973 32352 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:46:14.491978 32352 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:46:14.491984 32352 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:46:14.491989 32352 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:46:14.491994 32352 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:46:14.491999 32352 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:46:14.492004 32352 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:46:14.492010 32352 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:46:14.492015 32352 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:46:14.492028 32352 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:46:14.492034 32352 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:46:14.492039 32352 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:46:14.492044 32352 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:46:14.492050 32352 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:46:14.492054 32352 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:46:14.492060 32352 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:46:14.492065 32352 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:46:14.492070 32352 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:46:14.492076 32352 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:46:14.492081 32352 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:46:14.492086 32352 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:46:14.492091 32352 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:46:14.492096 32352 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:46:14.492101 32352 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:46:14.492106 32352 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:46:14.492112 32352 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:46:14.492117 32352 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:46:14.492122 32352 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:46:14.492127 32352 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:46:14.492132 32352 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:46:14.492137 32352 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:46:14.492142 32352 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:46:14.492147 32352 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:46:14.492153 32352 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:46:14.492158 32352 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:46:14.492163 32352 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:46:14.492168 32352 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:46:14.492173 32352 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:46:14.492178 32352 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:46:14.492183 32352 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:46:14.492188 32352 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:46:14.492197 32352 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:46:14.492203 32352 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:46:14.492209 32352 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:46:14.492214 32352 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:46:14.492220 32352 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:46:14.492225 32352 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:46:14.492230 32352 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:46:14.492235 32352 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:46:14.492240 32352 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:46:14.492245 32352 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:46:14.492251 32352 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:46:14.492256 32352 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:46:14.492261 32352 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:46:14.492269 32352 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:46:14.492274 32352 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:46:14.492283 32352 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:46:14.492290 32352 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:46:14.492296 32352 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:46:14.492301 32352 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:46:14.492307 32352 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:46:14.492312 32352 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:46:14.492317 32352 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:46:14.492322 32352 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:46:14.492327 32352 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:46:14.492333 32352 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:46:14.492338 32352 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:46:14.492343 32352 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:46:14.492349 32352 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:46:14.492354 32352 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:46:14.492360 32352 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:46:14.492365 32352 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:46:14.492370 32352 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:46:14.492375 32352 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:46:14.492380 32352 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:46:14.492386 32352 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:46:14.492391 32352 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:46:14.492398 32352 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:46:14.492403 32352 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:46:14.492408 32352 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:46:14.492413 32352 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:46:14.492419 32352 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:46:14.492424 32352 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:46:14.492429 32352 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:46:14.492434 32352 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:46:14.492439 32352 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:46:14.492445 32352 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:46:14.492473 32352 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:46:14.492482 32352 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:46:14.492491 32352 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:46:14.492497 32352 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:46:14.492503 32352 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:46:14.492508 32352 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:46:14.492513 32352 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:46:14.492518 32352 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:46:14.492524 32352 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:46:14.492530 32352 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:46:14.492535 32352 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:46:14.492542 32352 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:46:14.492547 32352 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:46:14.492552 32352 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:46:14.492558 32352 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:46:14.492563 32352 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:46:14.492573 32352 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:46:14.492579 32352 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:46:14.492585 32352 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:46:14.492590 32352 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:46:14.492595 32352 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:46:14.492601 32352 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:46:14.492606 32352 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:46:14.492612 32352 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:46:14.492617 32352 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:46:14.492622 32352 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:46:14.492628 32352 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:46:14.492633 32352 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:46:14.492640 32352 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:46:14.492645 32352 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:46:14.492656 32352 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:46:14.492663 32352 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:46:14.492669 32352 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:46:14.492674 32352 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:46:14.492681 32352 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:46:14.492686 32352 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:46:14.492691 32352 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:46:14.492697 32352 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:46:14.492703 32352 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:46:14.492708 32352 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:46:14.492714 32352 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:46:14.492720 32352 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:46:14.492725 32352 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:46:14.492732 32352 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:46:14.492736 32352 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:46:14.492741 32352 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:46:14.492748 32352 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:46:14.492753 32352 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:46:14.492758 32352 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:46:14.492763 32352 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:46:14.492769 32352 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:46:14.492779 32352 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:46:14.492784 32352 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:46:14.492789 32352 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:46:14.492795 32352 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:46:14.492800 32352 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:46:14.492805 32352 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:46:14.492811 32352 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:46:14.492816 32352 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:46:14.492822 32352 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:46:14.492828 32352 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:46:14.492833 32352 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:46:14.492839 32352 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:46:14.492851 32352 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:46:14.492858 32352 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:46:14.492863 32352 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:46:14.492869 32352 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:46:14.492874 32352 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:46:14.492880 32352 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:46:14.492885 32352 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:46:14.492894 32352 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:46:14.492900 32352 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:46:14.492907 32352 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:46:14.492911 32352 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:46:14.492918 32352 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:46:14.492923 32352 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:46:14.492928 32352 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:46:14.492934 32352 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:46:14.492939 32352 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:46:14.492944 32352 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:46:14.492950 32352 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:46:14.492956 32352 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:46:14.492961 32352 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:46:14.492967 32352 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:46:14.492974 32352 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:46:14.492979 32352 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:46:14.492985 32352 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:46:14.492990 32352 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:46:14.492995 32352 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:46:14.493001 32352 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:46:14.493006 32352 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:46:14.493012 32352 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:46:14.493018 32352 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:46:14.493023 32352 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:46:14.493029 32352 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:46:14.493036 32352 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:46:14.493041 32352 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:46:14.493046 32352 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:46:14.493052 32352 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:46:14.493057 32352 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:46:14.493062 32352 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:46:14.493068 32352 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:46:14.493074 32352 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:46:14.493079 32352 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:46:14.493086 32352 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:46:14.493091 32352 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:46:14.493098 32352 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:46:14.493103 32352 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:46:14.493108 32352 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:46:14.493113 32352 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:46:14.493124 32352 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:46:14.493130 32352 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:46:14.493136 32352 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:46:14.493142 32352 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:46:14.493149 32352 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:46:14.493155 32352 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:46:14.493160 32352 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:46:14.493165 32352 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:46:14.493170 32352 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:46:14.493175 32352 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:46:14.493181 32352 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:46:14.493187 32352 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:46:14.493192 32352 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:46:14.493198 32352 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:46:14.493204 32352 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:46:14.493211 32352 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:46:14.493216 32352 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:46:14.493221 32352 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:46:14.493227 32352 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:46:14.493232 32352 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:46:14.493238 32352 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:46:14.493244 32352 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:46:14.493250 32352 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:46:14.493255 32352 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:46:14.493261 32352 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:46:14.493268 32352 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:46:14.493273 32352 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:46:14.493278 32352 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:46:14.493284 32352 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:46:14.493289 32352 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:46:14.493294 32352 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:46:14.493300 32352 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:46:14.493306 32352 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:46:14.493312 32352 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:46:14.493319 32352 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:46:14.493324 32352 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:46:14.493330 32352 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:46:14.493335 32352 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:46:14.493340 32352 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:46:14.493346 32352 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:46:14.493351 32352 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:46:14.493357 32352 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:46:14.493362 32352 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:46:14.493368 32352 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:46:14.493374 32352 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:46:14.493381 32352 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:46:14.493386 32352 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:46:14.493397 32352 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:46:14.493403 32352 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:46:14.493409 32352 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:46:14.493415 32352 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:46:14.493420 32352 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:46:14.493427 32352 net.cpp:226] pre_relu needs backward computation.\nI0821 06:46:14.493432 32352 net.cpp:226] pre_scale needs backward computation.\nI0821 06:46:14.493436 32352 net.cpp:226] pre_bn needs backward computation.\nI0821 06:46:14.493443 32352 net.cpp:226] pre_conv needs backward computation.\nI0821 06:46:14.493449 32352 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:46:14.493456 32352 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:46:14.493460 32352 net.cpp:270] This network produces output accuracy\nI0821 06:46:14.493468 32352 net.cpp:270] This network produces output loss\nI0821 06:46:14.493844 32352 net.cpp:283] Network initialization done.\nI0821 06:46:14.503473 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:14.503515 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:14.503576 32352 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 06:46:14.503965 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 06:46:14.504006 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 06:46:14.504019 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:46:14.504029 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:46:14.504039 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:46:14.504047 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:46:14.504056 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:46:14.504065 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:46:14.504076 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:46:14.504083 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:46:14.504093 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:46:14.504101 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:46:14.504110 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:46:14.504119 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:46:14.504128 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:46:14.504137 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:46:14.504146 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:46:14.504154 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:46:14.504164 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:46:14.504184 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:46:14.504194 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:46:14.504202 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:46:14.504215 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:46:14.504223 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:46:14.504232 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:46:14.504240 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:46:14.504250 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:46:14.504258 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:46:14.504267 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:46:14.504276 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:46:14.504284 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:46:14.504293 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:46:14.504302 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:46:14.504310 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:46:14.504319 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:46:14.504328 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:46:14.504336 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:46:14.504345 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:46:14.504354 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:46:14.504364 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:46:14.504374 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:46:14.504384 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:46:14.504391 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:46:14.504400 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:46:14.504410 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:46:14.504418 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:46:14.504427 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:46:14.504436 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:46:14.504444 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:46:14.504452 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:46:14.504472 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:46:14.504482 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:46:14.504492 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:46:14.504499 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:46:14.504508 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:46:14.504516 32352 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:46:14.506181 32352 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0821 06:46:14.507797 32352 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:46:14.507994 32352 net.cpp:100] Creating Layer dataLayer\nI0821 06:46:14.508010 32352 net.cpp:408] dataLayer -> data_top\nI0821 06:46:14.508028 32352 net.cpp:408] dataLayer -> label\nI0821 06:46:14.508040 32352 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:46:14.517771 32359 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 06:46:14.518025 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:14.525383 32352 net.cpp:150] Setting up dataLayer\nI0821 06:46:14.525410 32352 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:46:14.525419 32352 net.cpp:157] Top shape: 125 (125)\nI0821 06:46:14.525424 32352 net.cpp:165] Memory required for data: 1536500\nI0821 06:46:14.525431 32352 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:46:14.525442 32352 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:46:14.525451 32352 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:46:14.525461 32352 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:46:14.525475 32352 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:46:14.525549 32352 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:46:14.525563 32352 net.cpp:157] Top shape: 125 (125)\nI0821 06:46:14.525569 32352 net.cpp:157] Top shape: 125 (125)\nI0821 06:46:14.525576 32352 net.cpp:165] Memory required for data: 1537500\nI0821 06:46:14.525583 32352 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:46:14.525599 32352 net.cpp:100] Creating Layer pre_conv\nI0821 06:46:14.525606 32352 net.cpp:434] pre_conv <- data_top\nI0821 06:46:14.525620 32352 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:46:14.526157 32352 net.cpp:150] Setting up pre_conv\nI0821 06:46:14.526185 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.526191 32352 net.cpp:165] Memory required for data: 9729500\nI0821 06:46:14.526209 32352 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:46:14.526222 32352 net.cpp:100] Creating Layer pre_bn\nI0821 06:46:14.526231 32352 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:46:14.526240 32352 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:46:14.526613 32352 net.cpp:150] Setting up pre_bn\nI0821 06:46:14.526628 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.526634 32352 net.cpp:165] Memory required for data: 17921500\nI0821 06:46:14.526669 32352 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:46:14.526681 32352 net.cpp:100] Creating Layer pre_scale\nI0821 06:46:14.526687 32352 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:46:14.526701 32352 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:46:14.526772 32352 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:46:14.526973 32352 net.cpp:150] Setting up pre_scale\nI0821 06:46:14.526989 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.526995 32352 net.cpp:165] Memory required for data: 26113500\nI0821 06:46:14.527004 32352 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:46:14.527014 32352 net.cpp:100] Creating Layer pre_relu\nI0821 06:46:14.527019 32352 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:46:14.527026 32352 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:46:14.527035 32352 net.cpp:150] Setting up pre_relu\nI0821 06:46:14.527042 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.527048 32352 net.cpp:165] Memory required for data: 34305500\nI0821 06:46:14.527055 32352 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:46:14.527091 32352 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:46:14.527097 32352 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:46:14.527108 32352 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:46:14.527119 32352 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:46:14.527178 32352 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:46:14.527190 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.527199 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.527202 32352 net.cpp:165] Memory required for data: 50689500\nI0821 06:46:14.527207 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:46:14.527220 32352 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:46:14.527225 32352 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:46:14.527236 32352 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:46:14.527631 32352 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:46:14.527648 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.527662 32352 net.cpp:165] Memory required for data: 58881500\nI0821 06:46:14.527676 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:46:14.527691 32352 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:46:14.527698 32352 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:46:14.527705 32352 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:46:14.528019 32352 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:46:14.528038 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.528044 32352 net.cpp:165] Memory required for data: 67073500\nI0821 06:46:14.528055 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:46:14.528064 32352 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:46:14.528072 32352 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:46:14.528080 32352 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.528151 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:46:14.528519 32352 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:46:14.528534 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.528542 32352 net.cpp:165] Memory required for data: 75265500\nI0821 06:46:14.528560 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:46:14.528574 32352 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:46:14.528584 32352 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:46:14.528592 32352 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.528604 32352 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:46:14.528610 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.528619 32352 net.cpp:165] Memory required for data: 83457500\nI0821 06:46:14.528623 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:46:14.528637 32352 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:46:14.528645 32352 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:46:14.528662 32352 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:46:14.529088 32352 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:46:14.529103 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.529109 32352 net.cpp:165] Memory required for data: 91649500\nI0821 06:46:14.529119 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:46:14.529127 32352 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:46:14.529134 32352 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:46:14.529142 32352 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:46:14.529451 32352 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:46:14.529465 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.529470 32352 net.cpp:165] Memory required for data: 99841500\nI0821 06:46:14.529489 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:46:14.529500 32352 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:46:14.529508 32352 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:46:14.529520 32352 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:46:14.529587 32352 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:46:14.529779 32352 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:46:14.529793 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.529803 32352 net.cpp:165] Memory required for data: 108033500\nI0821 06:46:14.529811 32352 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:46:14.529824 32352 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:46:14.529829 32352 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:46:14.529839 32352 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:46:14.529847 32352 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:46:14.529891 32352 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:46:14.529901 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.529906 32352 net.cpp:165] Memory required for data: 116225500\nI0821 06:46:14.529911 32352 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:46:14.529918 32352 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:46:14.529924 32352 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:46:14.529934 32352 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:46:14.529947 32352 net.cpp:150] Setting up L1_b1_relu\nI0821 06:46:14.529954 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.529959 32352 net.cpp:165] Memory required for data: 124417500\nI0821 06:46:14.529963 32352 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:46:14.529974 32352 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:46:14.529981 32352 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:46:14.529989 32352 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:46:14.529999 32352 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:46:14.530052 32352 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:46:14.530066 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.530084 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.530091 32352 net.cpp:165] Memory required for data: 140801500\nI0821 06:46:14.530095 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:46:14.530108 32352 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:46:14.530117 32352 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:46:14.530129 32352 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:46:14.530529 32352 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:46:14.530544 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.530550 32352 net.cpp:165] Memory required for data: 148993500\nI0821 06:46:14.530560 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:46:14.530570 32352 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:46:14.530578 32352 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:46:14.530591 32352 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:46:14.530926 32352 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:46:14.530947 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.530956 32352 net.cpp:165] Memory required for data: 157185500\nI0821 06:46:14.530967 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:46:14.530975 32352 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:46:14.530982 32352 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:46:14.530992 32352 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.531057 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:46:14.531275 32352 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:46:14.531289 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.531296 32352 net.cpp:165] Memory required for data: 165377500\nI0821 06:46:14.531307 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:46:14.531318 32352 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:46:14.531324 32352 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:46:14.531334 32352 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.531345 32352 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:46:14.531352 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.531358 32352 net.cpp:165] Memory required for data: 173569500\nI0821 06:46:14.531361 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:46:14.531378 32352 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:46:14.531383 32352 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:46:14.531405 32352 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:46:14.531939 32352 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:46:14.531958 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.531965 32352 net.cpp:165] Memory required for data: 181761500\nI0821 06:46:14.531973 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:46:14.531985 32352 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:46:14.531993 32352 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:46:14.532004 32352 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:46:14.532330 32352 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:46:14.532344 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.532352 32352 net.cpp:165] Memory required for data: 189953500\nI0821 06:46:14.532374 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:46:14.532383 32352 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:46:14.532389 32352 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:46:14.532397 32352 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:46:14.532480 32352 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:46:14.532682 32352 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:46:14.532696 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.532704 32352 net.cpp:165] Memory required for data: 198145500\nI0821 06:46:14.532722 32352 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:46:14.532732 32352 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:46:14.532742 32352 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:46:14.532749 32352 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:46:14.532760 32352 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:46:14.532800 32352 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:46:14.532810 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.532814 32352 net.cpp:165] Memory required for data: 206337500\nI0821 06:46:14.532820 32352 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:46:14.532831 32352 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:46:14.532837 32352 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:46:14.532847 32352 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:46:14.532857 32352 net.cpp:150] Setting up L1_b2_relu\nI0821 06:46:14.532865 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.532869 32352 net.cpp:165] Memory required for data: 214529500\nI0821 06:46:14.532876 32352 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:46:14.532884 32352 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:46:14.532889 32352 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:46:14.532897 32352 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:46:14.532907 32352 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:46:14.532961 32352 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:46:14.532971 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.532981 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.532987 32352 net.cpp:165] Memory required for data: 230913500\nI0821 06:46:14.532992 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:46:14.533006 32352 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:46:14.533015 32352 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:46:14.533025 32352 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:46:14.533416 32352 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:46:14.533432 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.533437 32352 net.cpp:165] Memory required for data: 239105500\nI0821 06:46:14.533448 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:46:14.533484 32352 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:46:14.533493 32352 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:46:14.533501 32352 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:46:14.533838 32352 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:46:14.533854 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.533859 32352 net.cpp:165] Memory required for data: 247297500\nI0821 06:46:14.533870 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:46:14.533879 32352 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:46:14.533885 32352 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:46:14.533892 32352 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.533963 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:46:14.534147 32352 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:46:14.534162 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.534165 32352 net.cpp:165] Memory required for data: 255489500\nI0821 06:46:14.534175 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:46:14.534183 32352 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:46:14.534191 32352 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:46:14.534202 32352 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.534220 32352 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:46:14.534230 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.534235 32352 net.cpp:165] Memory required for data: 263681500\nI0821 06:46:14.534240 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:46:14.534255 32352 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:46:14.534265 32352 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:46:14.534273 32352 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:46:14.534863 32352 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:46:14.534881 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.534886 32352 net.cpp:165] Memory required for data: 271873500\nI0821 06:46:14.534895 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:46:14.534914 32352 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:46:14.534921 32352 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:46:14.534942 32352 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:46:14.535264 32352 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:46:14.535280 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.535285 32352 net.cpp:165] Memory required for data: 280065500\nI0821 06:46:14.535296 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:46:14.535305 32352 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:46:14.535310 32352 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:46:14.535320 32352 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:46:14.535389 32352 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:46:14.535596 32352 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:46:14.535611 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.535619 32352 net.cpp:165] Memory required for data: 288257500\nI0821 06:46:14.535629 32352 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:46:14.535639 32352 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:46:14.535645 32352 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:46:14.535663 32352 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:46:14.535677 32352 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:46:14.535718 32352 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:46:14.535729 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.535734 32352 net.cpp:165] Memory required for data: 296449500\nI0821 06:46:14.535739 32352 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:46:14.535751 32352 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:46:14.535759 32352 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:46:14.535768 32352 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:46:14.535778 32352 net.cpp:150] Setting up L1_b3_relu\nI0821 06:46:14.535784 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.535789 32352 net.cpp:165] Memory required for data: 304641500\nI0821 06:46:14.535794 32352 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:46:14.535802 32352 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:46:14.535809 32352 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:46:14.535815 32352 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:46:14.535825 32352 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:46:14.535881 32352 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:46:14.535893 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.535903 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.535908 32352 net.cpp:165] Memory required for data: 321025500\nI0821 06:46:14.535913 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:46:14.535928 32352 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:46:14.535943 32352 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:46:14.535954 32352 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:46:14.536350 32352 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:46:14.536363 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.536370 32352 net.cpp:165] Memory required for data: 329217500\nI0821 06:46:14.536379 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:46:14.536391 32352 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:46:14.536397 32352 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:46:14.536406 32352 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:46:14.536691 32352 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:46:14.536708 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.536713 32352 net.cpp:165] Memory required for data: 337409500\nI0821 06:46:14.536725 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:46:14.536733 32352 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:46:14.536738 32352 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:46:14.536747 32352 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.536804 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:46:14.536965 32352 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:46:14.536979 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.536984 32352 net.cpp:165] Memory required for data: 345601500\nI0821 06:46:14.536993 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:46:14.537003 32352 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:46:14.537009 32352 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:46:14.537020 32352 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.537029 32352 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:46:14.537036 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.537041 32352 net.cpp:165] Memory required for data: 353793500\nI0821 06:46:14.537045 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:46:14.537056 32352 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:46:14.537062 32352 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:46:14.537073 32352 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:46:14.537448 32352 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:46:14.537463 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.537468 32352 net.cpp:165] Memory required for data: 361985500\nI0821 06:46:14.537477 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:46:14.537487 32352 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:46:14.537492 32352 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:46:14.537503 32352 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:46:14.537797 32352 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:46:14.537813 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.537818 32352 net.cpp:165] Memory required for data: 370177500\nI0821 06:46:14.537830 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:46:14.537838 32352 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:46:14.537844 32352 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:46:14.537852 32352 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:46:14.537914 32352 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:46:14.538072 32352 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:46:14.538086 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.538091 32352 net.cpp:165] Memory required for data: 378369500\nI0821 06:46:14.538100 32352 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:46:14.538108 32352 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:46:14.538115 32352 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:46:14.538121 32352 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:46:14.538139 32352 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:46:14.538175 32352 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:46:14.538184 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.538189 32352 net.cpp:165] Memory required for data: 386561500\nI0821 06:46:14.538194 32352 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:46:14.538205 32352 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:46:14.538211 32352 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:46:14.538218 32352 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:46:14.538228 32352 net.cpp:150] Setting up L1_b4_relu\nI0821 06:46:14.538234 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.538239 32352 net.cpp:165] Memory required for data: 394753500\nI0821 06:46:14.538244 32352 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:46:14.538250 32352 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:46:14.538255 32352 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:46:14.538262 32352 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:46:14.538272 32352 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:46:14.538321 32352 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:46:14.538331 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.538337 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.538341 32352 net.cpp:165] Memory required for data: 411137500\nI0821 06:46:14.538347 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:46:14.538362 32352 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:46:14.538368 32352 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:46:14.538378 32352 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:46:14.538733 32352 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:46:14.538748 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.538753 32352 net.cpp:165] Memory required for data: 419329500\nI0821 06:46:14.538775 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:46:14.538787 32352 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:46:14.538794 32352 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:46:14.538805 32352 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:46:14.539073 32352 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:46:14.539088 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.539093 32352 net.cpp:165] Memory required for data: 427521500\nI0821 06:46:14.539103 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:46:14.539110 32352 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:46:14.539116 32352 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:46:14.539124 32352 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.539183 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:46:14.539342 32352 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:46:14.539355 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.539361 32352 net.cpp:165] Memory required for data: 435713500\nI0821 06:46:14.539369 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:46:14.539376 32352 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:46:14.539382 32352 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:46:14.539392 32352 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.539402 32352 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:46:14.539410 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.539414 32352 net.cpp:165] Memory required for data: 443905500\nI0821 06:46:14.539419 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:46:14.539436 32352 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:46:14.539443 32352 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:46:14.539454 32352 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:46:14.539829 32352 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:46:14.539845 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.539851 32352 net.cpp:165] Memory required for data: 452097500\nI0821 06:46:14.539860 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:46:14.539870 32352 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:46:14.539875 32352 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:46:14.539886 32352 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:46:14.540163 32352 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:46:14.540179 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.540184 32352 net.cpp:165] Memory required for data: 460289500\nI0821 06:46:14.540195 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:46:14.540204 32352 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:46:14.540210 32352 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:46:14.540216 32352 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:46:14.540274 32352 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:46:14.540434 32352 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:46:14.540447 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.540452 32352 net.cpp:165] Memory required for data: 468481500\nI0821 06:46:14.540462 32352 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:46:14.540472 32352 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:46:14.540479 32352 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:46:14.540485 32352 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:46:14.540498 32352 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:46:14.540531 32352 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:46:14.540540 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.540545 32352 net.cpp:165] Memory required for data: 476673500\nI0821 06:46:14.540550 32352 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:46:14.540560 32352 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:46:14.540566 32352 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:46:14.540573 32352 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:46:14.540582 32352 net.cpp:150] Setting up L1_b5_relu\nI0821 06:46:14.540590 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.540593 32352 net.cpp:165] Memory required for data: 484865500\nI0821 06:46:14.540598 32352 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:46:14.540606 32352 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:46:14.540611 32352 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:46:14.540617 32352 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:46:14.540627 32352 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:46:14.540681 32352 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:46:14.540694 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.540701 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.540706 32352 net.cpp:165] Memory required for data: 501249500\nI0821 06:46:14.540711 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:46:14.540725 32352 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:46:14.540731 32352 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:46:14.540740 32352 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:46:14.541090 32352 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:46:14.541105 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.541116 32352 net.cpp:165] Memory required for data: 509441500\nI0821 06:46:14.541126 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:46:14.541137 32352 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:46:14.541143 32352 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:46:14.541152 32352 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:46:14.541440 32352 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:46:14.541456 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.541461 32352 net.cpp:165] Memory required for data: 517633500\nI0821 06:46:14.541472 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:46:14.541481 32352 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:46:14.541486 32352 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:46:14.541494 32352 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.541550 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:46:14.541739 32352 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:46:14.541754 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.541759 32352 net.cpp:165] Memory required for data: 525825500\nI0821 06:46:14.541769 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:46:14.541779 32352 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:46:14.541786 32352 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:46:14.541796 32352 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.541806 32352 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:46:14.541813 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.541818 32352 net.cpp:165] Memory required for data: 534017500\nI0821 06:46:14.541823 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:46:14.541833 32352 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:46:14.541839 32352 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:46:14.541851 32352 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:46:14.542201 32352 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:46:14.542215 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.542220 32352 net.cpp:165] Memory required for data: 542209500\nI0821 06:46:14.542228 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:46:14.542237 32352 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:46:14.542243 32352 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:46:14.542256 32352 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:46:14.542531 32352 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:46:14.542546 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.542551 32352 net.cpp:165] Memory required for data: 550401500\nI0821 06:46:14.542560 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:46:14.542572 32352 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:46:14.542578 32352 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:46:14.542585 32352 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:46:14.542642 32352 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:46:14.542814 32352 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:46:14.542826 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.542831 32352 net.cpp:165] Memory required for data: 558593500\nI0821 06:46:14.542840 32352 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:46:14.542860 32352 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:46:14.542866 32352 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:46:14.542873 32352 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:46:14.542881 32352 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:46:14.542919 32352 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:46:14.542928 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.542933 32352 net.cpp:165] Memory required for data: 566785500\nI0821 06:46:14.542945 32352 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:46:14.542953 32352 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:46:14.542959 32352 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:46:14.542966 32352 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:46:14.542975 32352 net.cpp:150] Setting up L1_b6_relu\nI0821 06:46:14.542982 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.542987 32352 net.cpp:165] Memory required for data: 574977500\nI0821 06:46:14.542991 32352 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:46:14.542999 32352 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:46:14.543004 32352 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:46:14.543014 32352 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:46:14.543023 32352 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:46:14.543071 32352 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:46:14.543081 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.543087 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.543092 32352 net.cpp:165] Memory required for data: 591361500\nI0821 06:46:14.543097 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:46:14.543110 32352 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:46:14.543117 32352 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:46:14.543125 32352 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:46:14.543476 32352 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:46:14.543490 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.543495 32352 net.cpp:165] Memory required for data: 599553500\nI0821 06:46:14.543504 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:46:14.543526 32352 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:46:14.543534 32352 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:46:14.543545 32352 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:46:14.543826 32352 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:46:14.543841 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.543846 32352 net.cpp:165] Memory required for data: 607745500\nI0821 06:46:14.543856 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:46:14.543865 32352 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:46:14.543871 32352 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:46:14.543879 32352 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.543939 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:46:14.544097 32352 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:46:14.544111 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.544116 32352 net.cpp:165] Memory required for data: 615937500\nI0821 06:46:14.544124 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:46:14.544136 32352 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:46:14.544142 32352 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:46:14.544148 32352 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.544157 32352 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:46:14.544164 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.544169 32352 net.cpp:165] Memory required for data: 624129500\nI0821 06:46:14.544173 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:46:14.544188 32352 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:46:14.544193 32352 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:46:14.544204 32352 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:46:14.544561 32352 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:46:14.544575 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.544590 32352 net.cpp:165] Memory required for data: 632321500\nI0821 06:46:14.544600 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:46:14.544611 32352 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:46:14.544617 32352 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:46:14.544625 32352 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:46:14.544906 32352 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:46:14.544921 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.544926 32352 net.cpp:165] Memory required for data: 640513500\nI0821 06:46:14.544936 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:46:14.544945 32352 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:46:14.544950 32352 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:46:14.544958 32352 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:46:14.545019 32352 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:46:14.545191 32352 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:46:14.545204 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.545209 32352 net.cpp:165] Memory required for data: 648705500\nI0821 06:46:14.545218 32352 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:46:14.545228 32352 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:46:14.545233 32352 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:46:14.545240 32352 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:46:14.545251 32352 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:46:14.545286 32352 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:46:14.545297 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.545302 32352 net.cpp:165] Memory required for data: 656897500\nI0821 06:46:14.545307 32352 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:46:14.545318 32352 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:46:14.545325 32352 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:46:14.545331 32352 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:46:14.545341 32352 net.cpp:150] Setting up L1_b7_relu\nI0821 06:46:14.545348 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.545352 32352 net.cpp:165] Memory required for data: 665089500\nI0821 06:46:14.545357 32352 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:46:14.545363 32352 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:46:14.545368 32352 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:46:14.545375 32352 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:46:14.545385 32352 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:46:14.545435 32352 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:46:14.545447 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.545454 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.545459 32352 net.cpp:165] Memory required for data: 681473500\nI0821 06:46:14.545464 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:46:14.545477 32352 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:46:14.545483 32352 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:46:14.545492 32352 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:46:14.545857 32352 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:46:14.545871 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.545877 32352 net.cpp:165] Memory required for data: 689665500\nI0821 06:46:14.545886 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:46:14.545897 32352 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:46:14.545903 32352 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:46:14.545922 32352 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:46:14.546201 32352 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:46:14.546216 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.546221 32352 net.cpp:165] Memory required for data: 697857500\nI0821 06:46:14.546231 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:46:14.546239 32352 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:46:14.546245 32352 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:46:14.546252 32352 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.546314 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:46:14.546475 32352 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:46:14.546489 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.546494 32352 net.cpp:165] Memory required for data: 706049500\nI0821 06:46:14.546501 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:46:14.546509 32352 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:46:14.546519 32352 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:46:14.546525 32352 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.546535 32352 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:46:14.546542 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.546546 32352 net.cpp:165] Memory required for data: 714241500\nI0821 06:46:14.546551 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:46:14.546564 32352 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:46:14.546571 32352 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:46:14.546581 32352 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:46:14.546950 32352 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:46:14.546965 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.546970 32352 net.cpp:165] Memory required for data: 722433500\nI0821 06:46:14.546979 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:46:14.546991 32352 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:46:14.546998 32352 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:46:14.547005 32352 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:46:14.547286 32352 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:46:14.547299 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.547304 32352 net.cpp:165] Memory required for data: 730625500\nI0821 06:46:14.547314 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:46:14.547322 32352 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:46:14.547328 32352 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:46:14.547336 32352 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:46:14.547397 32352 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:46:14.547583 32352 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:46:14.547597 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.547602 32352 net.cpp:165] Memory required for data: 738817500\nI0821 06:46:14.547611 32352 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:46:14.547621 32352 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:46:14.547626 32352 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:46:14.547633 32352 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:46:14.547648 32352 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:46:14.547691 32352 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:46:14.547703 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.547708 32352 net.cpp:165] Memory required for data: 747009500\nI0821 06:46:14.547713 32352 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:46:14.547726 32352 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:46:14.547734 32352 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:46:14.547740 32352 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:46:14.547757 32352 net.cpp:150] Setting up L1_b8_relu\nI0821 06:46:14.547765 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.547770 32352 net.cpp:165] Memory required for data: 755201500\nI0821 06:46:14.547775 32352 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:46:14.547781 32352 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:46:14.547786 32352 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:46:14.547794 32352 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:46:14.547803 32352 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:46:14.547854 32352 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:46:14.547866 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.547873 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.547878 32352 net.cpp:165] Memory required for data: 771585500\nI0821 06:46:14.547883 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:46:14.547896 32352 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:46:14.547904 32352 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:46:14.547912 32352 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:46:14.548276 32352 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:46:14.548291 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.548296 32352 net.cpp:165] Memory required for data: 779777500\nI0821 06:46:14.548305 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:46:14.548316 32352 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:46:14.548343 32352 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:46:14.548353 32352 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:46:14.548633 32352 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:46:14.548647 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.548660 32352 net.cpp:165] Memory required for data: 787969500\nI0821 06:46:14.548671 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:46:14.548682 32352 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:46:14.548688 32352 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:46:14.548696 32352 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.548759 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:46:14.548923 32352 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:46:14.548936 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.548941 32352 net.cpp:165] Memory required for data: 796161500\nI0821 06:46:14.548949 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:46:14.548957 32352 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:46:14.548964 32352 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:46:14.548974 32352 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.548984 32352 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:46:14.548990 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.548995 32352 net.cpp:165] Memory required for data: 804353500\nI0821 06:46:14.549000 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:46:14.549013 32352 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:46:14.549019 32352 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:46:14.549027 32352 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:46:14.549387 32352 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:46:14.549401 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.549407 32352 net.cpp:165] Memory required for data: 812545500\nI0821 06:46:14.549415 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:46:14.549427 32352 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:46:14.549433 32352 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:46:14.549453 32352 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:46:14.549734 32352 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:46:14.549748 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.549753 32352 net.cpp:165] Memory required for data: 820737500\nI0821 06:46:14.549785 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:46:14.549798 32352 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:46:14.549803 32352 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:46:14.549811 32352 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:46:14.549872 32352 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:46:14.550032 32352 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:46:14.550045 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.550050 32352 net.cpp:165] Memory required for data: 828929500\nI0821 06:46:14.550060 32352 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:46:14.550068 32352 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:46:14.550074 32352 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:46:14.550081 32352 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:46:14.550088 32352 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:46:14.550125 32352 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:46:14.550135 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.550140 32352 net.cpp:165] Memory required for data: 837121500\nI0821 06:46:14.550145 32352 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:46:14.550153 32352 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:46:14.550158 32352 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:46:14.550168 32352 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:46:14.550178 32352 net.cpp:150] Setting up L1_b9_relu\nI0821 06:46:14.550184 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.550189 32352 net.cpp:165] Memory required for data: 845313500\nI0821 06:46:14.550194 32352 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:46:14.550200 32352 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:46:14.550205 32352 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:46:14.550215 32352 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:46:14.550225 32352 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:46:14.550272 32352 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:46:14.550284 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.550292 32352 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:46:14.550295 32352 net.cpp:165] Memory required for data: 861697500\nI0821 06:46:14.550300 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:46:14.550314 32352 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:46:14.550320 32352 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:46:14.550329 32352 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:46:14.550703 32352 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:46:14.550716 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.550722 32352 net.cpp:165] Memory required for data: 863745500\nI0821 06:46:14.550730 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:46:14.550742 32352 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:46:14.550748 32352 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:46:14.550756 32352 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:46:14.551026 32352 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:46:14.551039 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.551044 32352 net.cpp:165] Memory required for data: 865793500\nI0821 06:46:14.551055 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:46:14.551070 32352 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:46:14.551077 32352 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:46:14.551085 32352 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.551149 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:46:14.551311 32352 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:46:14.551326 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.551332 32352 net.cpp:165] Memory required for data: 867841500\nI0821 06:46:14.551342 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:46:14.551348 32352 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:46:14.551354 32352 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:46:14.551362 32352 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.551371 32352 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:46:14.551378 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.551383 32352 net.cpp:165] Memory required for data: 869889500\nI0821 06:46:14.551388 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:46:14.551400 32352 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:46:14.551406 32352 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:46:14.551417 32352 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:46:14.551785 32352 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:46:14.551800 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.551805 32352 net.cpp:165] Memory required for data: 871937500\nI0821 06:46:14.551813 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:46:14.551826 32352 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:46:14.551832 32352 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:46:14.551843 32352 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:46:14.552109 32352 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:46:14.552122 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.552127 32352 net.cpp:165] Memory required for data: 873985500\nI0821 06:46:14.552139 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:46:14.552146 32352 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:46:14.552152 32352 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:46:14.552160 32352 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:46:14.552222 32352 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:46:14.552378 32352 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:46:14.552392 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.552395 32352 net.cpp:165] Memory required for data: 876033500\nI0821 06:46:14.552404 32352 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:46:14.552417 32352 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:46:14.552423 32352 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:46:14.552430 32352 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:46:14.552467 32352 net.cpp:150] Setting up L2_b1_pool\nI0821 06:46:14.552479 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.552484 32352 net.cpp:165] Memory required for data: 878081500\nI0821 06:46:14.552489 32352 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:46:14.552496 32352 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:46:14.552502 32352 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:46:14.552510 32352 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:46:14.552516 32352 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:46:14.552551 32352 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:46:14.552561 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.552567 32352 net.cpp:165] Memory required for data: 880129500\nI0821 06:46:14.552572 32352 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:46:14.552582 32352 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:46:14.552595 32352 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:46:14.552603 32352 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:46:14.552611 32352 net.cpp:150] Setting up L2_b1_relu\nI0821 06:46:14.552619 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.552623 32352 net.cpp:165] Memory required for data: 882177500\nI0821 06:46:14.552628 32352 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:46:14.552637 32352 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:46:14.552644 32352 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:46:14.554877 32352 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:46:14.554896 32352 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:46:14.554901 32352 net.cpp:165] Memory required for data: 884225500\nI0821 06:46:14.554908 32352 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:46:14.554918 32352 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:46:14.554924 32352 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:46:14.554930 32352 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:46:14.554941 32352 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:46:14.554985 32352 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:46:14.554999 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.555006 32352 net.cpp:165] Memory required for data: 888321500\nI0821 06:46:14.555011 32352 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:46:14.555017 32352 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:46:14.555023 32352 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:46:14.555030 32352 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:46:14.555043 32352 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:46:14.555094 32352 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:46:14.555105 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.555111 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.555116 32352 net.cpp:165] Memory required for data: 896513500\nI0821 06:46:14.555121 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:46:14.555135 32352 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:46:14.555141 32352 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:46:14.555153 32352 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:46:14.555665 32352 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:46:14.555680 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.555685 32352 net.cpp:165] Memory required for data: 900609500\nI0821 06:46:14.555694 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:46:14.555704 32352 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:46:14.555711 32352 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:46:14.555721 32352 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:46:14.555994 32352 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:46:14.556006 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.556011 32352 net.cpp:165] Memory required for data: 904705500\nI0821 06:46:14.556021 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:46:14.556033 32352 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:46:14.556040 32352 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:46:14.556047 32352 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.556105 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:46:14.556263 32352 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:46:14.556277 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.556282 32352 net.cpp:165] Memory required for data: 908801500\nI0821 06:46:14.556289 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:46:14.556303 32352 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:46:14.556318 32352 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:46:14.556325 32352 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.556335 32352 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:46:14.556342 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.556347 32352 net.cpp:165] Memory required for data: 912897500\nI0821 06:46:14.556360 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:46:14.556371 32352 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:46:14.556376 32352 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:46:14.556387 32352 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:46:14.556888 32352 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:46:14.556903 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.556908 32352 net.cpp:165] Memory required for data: 916993500\nI0821 06:46:14.556917 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:46:14.556926 32352 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:46:14.556932 32352 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:46:14.556943 32352 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:46:14.557207 32352 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:46:14.557220 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.557225 32352 net.cpp:165] Memory required for data: 921089500\nI0821 06:46:14.557235 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:46:14.557246 32352 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:46:14.557253 32352 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:46:14.557260 32352 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:46:14.557318 32352 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:46:14.557478 32352 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:46:14.557492 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.557497 32352 net.cpp:165] Memory required for data: 925185500\nI0821 06:46:14.557505 32352 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:46:14.557518 32352 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:46:14.557523 32352 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:46:14.557530 32352 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:46:14.557538 32352 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:46:14.557569 32352 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:46:14.557579 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.557584 32352 net.cpp:165] Memory required for data: 929281500\nI0821 06:46:14.557588 32352 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:46:14.557596 32352 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:46:14.557601 32352 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:46:14.557611 32352 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:46:14.557621 32352 net.cpp:150] Setting up L2_b2_relu\nI0821 06:46:14.557627 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.557632 32352 net.cpp:165] Memory required for data: 933377500\nI0821 06:46:14.557636 32352 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:46:14.557643 32352 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:46:14.557654 32352 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:46:14.557663 32352 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:46:14.557673 32352 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:46:14.557724 32352 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:46:14.557736 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.557744 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.557747 32352 net.cpp:165] Memory required for data: 941569500\nI0821 06:46:14.557760 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:46:14.557771 32352 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:46:14.557777 32352 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:46:14.557790 32352 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:46:14.558282 32352 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:46:14.558296 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.558301 32352 net.cpp:165] Memory required for data: 945665500\nI0821 06:46:14.558310 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:46:14.558320 32352 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:46:14.558326 32352 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:46:14.558341 32352 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:46:14.558609 32352 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:46:14.558621 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.558626 32352 net.cpp:165] Memory required for data: 949761500\nI0821 06:46:14.558636 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:46:14.558648 32352 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:46:14.558661 32352 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:46:14.558670 32352 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.558727 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:46:14.558886 32352 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:46:14.558898 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.558902 32352 net.cpp:165] Memory required for data: 953857500\nI0821 06:46:14.558912 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:46:14.558923 32352 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:46:14.558928 32352 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:46:14.558935 32352 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.558945 32352 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:46:14.558953 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.558956 32352 net.cpp:165] Memory required for data: 957953500\nI0821 06:46:14.558961 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:46:14.558976 32352 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:46:14.558982 32352 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:46:14.558993 32352 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:46:14.559481 32352 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:46:14.559495 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.559500 32352 net.cpp:165] Memory required for data: 962049500\nI0821 06:46:14.559509 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:46:14.559518 32352 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:46:14.559525 32352 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:46:14.559537 32352 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:46:14.559813 32352 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:46:14.559826 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.559831 32352 net.cpp:165] Memory required for data: 966145500\nI0821 06:46:14.559842 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:46:14.559854 32352 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:46:14.559860 32352 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:46:14.559869 32352 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:46:14.559922 32352 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:46:14.560083 32352 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:46:14.560096 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.560101 32352 net.cpp:165] Memory required for data: 970241500\nI0821 06:46:14.560109 32352 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:46:14.560118 32352 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:46:14.560132 32352 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:46:14.560142 32352 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:46:14.560150 32352 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:46:14.560179 32352 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:46:14.560195 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.560200 32352 net.cpp:165] Memory required for data: 974337500\nI0821 06:46:14.560205 32352 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:46:14.560225 32352 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:46:14.560232 32352 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:46:14.560240 32352 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:46:14.560250 32352 net.cpp:150] Setting up L2_b3_relu\nI0821 06:46:14.560256 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.560261 32352 net.cpp:165] Memory required for data: 978433500\nI0821 06:46:14.560266 32352 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:46:14.560272 32352 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:46:14.560278 32352 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:46:14.560288 32352 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:46:14.560298 32352 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:46:14.560348 32352 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:46:14.560359 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.560365 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.560370 32352 net.cpp:165] Memory required for data: 986625500\nI0821 06:46:14.560375 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:46:14.560391 32352 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:46:14.560397 32352 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:46:14.560406 32352 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:46:14.560909 32352 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:46:14.560925 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.560930 32352 net.cpp:165] Memory required for data: 990721500\nI0821 06:46:14.560937 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:46:14.560951 32352 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:46:14.560957 32352 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:46:14.560966 32352 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:46:14.561238 32352 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:46:14.561250 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.561255 32352 net.cpp:165] Memory required for data: 994817500\nI0821 06:46:14.561265 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:46:14.561275 32352 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:46:14.561280 32352 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:46:14.561290 32352 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.561349 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:46:14.561512 32352 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:46:14.561527 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.561530 32352 net.cpp:165] Memory required for data: 998913500\nI0821 06:46:14.561539 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:46:14.561547 32352 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:46:14.561553 32352 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:46:14.561563 32352 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.561573 32352 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:46:14.561579 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.561584 32352 net.cpp:165] Memory required for data: 1003009500\nI0821 06:46:14.561596 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:46:14.561614 32352 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:46:14.561620 32352 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:46:14.561630 32352 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:46:14.562132 32352 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:46:14.562147 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.562152 32352 net.cpp:165] Memory required for data: 1007105500\nI0821 06:46:14.562161 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:46:14.562170 32352 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:46:14.562180 32352 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:46:14.562188 32352 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:46:14.562458 32352 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:46:14.562470 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.562475 32352 net.cpp:165] Memory required for data: 1011201500\nI0821 06:46:14.562485 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:46:14.562494 32352 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:46:14.562500 32352 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:46:14.562507 32352 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:46:14.562567 32352 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:46:14.562734 32352 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:46:14.562750 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.562757 32352 net.cpp:165] Memory required for data: 1015297500\nI0821 06:46:14.562764 32352 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:46:14.562773 32352 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:46:14.562779 32352 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:46:14.562786 32352 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:46:14.562793 32352 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:46:14.562824 32352 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:46:14.562834 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.562839 32352 net.cpp:165] Memory required for data: 1019393500\nI0821 06:46:14.562844 32352 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:46:14.562851 32352 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:46:14.562857 32352 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:46:14.562866 32352 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:46:14.562876 32352 net.cpp:150] Setting up L2_b4_relu\nI0821 06:46:14.562883 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.562887 32352 net.cpp:165] Memory required for data: 1023489500\nI0821 06:46:14.562892 32352 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:46:14.562898 32352 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:46:14.562904 32352 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:46:14.562913 32352 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:46:14.562923 32352 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:46:14.562970 32352 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:46:14.562983 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.562989 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.562994 32352 net.cpp:165] Memory required for data: 1031681500\nI0821 06:46:14.562999 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:46:14.563012 32352 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:46:14.563019 32352 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:46:14.563029 32352 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:46:14.563537 32352 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:46:14.563551 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.563556 32352 net.cpp:165] Memory required for data: 1035777500\nI0821 06:46:14.563565 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:46:14.563576 32352 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:46:14.563583 32352 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:46:14.563591 32352 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:46:14.563869 32352 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:46:14.563884 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.563889 32352 net.cpp:165] Memory required for data: 1039873500\nI0821 06:46:14.563899 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:46:14.563908 32352 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:46:14.563915 32352 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:46:14.563925 32352 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.563983 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:46:14.564144 32352 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:46:14.564157 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.564162 32352 net.cpp:165] Memory required for data: 1043969500\nI0821 06:46:14.564172 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:46:14.564178 32352 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:46:14.564185 32352 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:46:14.564193 32352 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.564201 32352 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:46:14.564208 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.564213 32352 net.cpp:165] Memory required for data: 1048065500\nI0821 06:46:14.564218 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:46:14.564230 32352 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:46:14.564236 32352 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:46:14.564247 32352 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:46:14.564750 32352 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:46:14.564764 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.564769 32352 net.cpp:165] Memory required for data: 1052161500\nI0821 06:46:14.564779 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:46:14.564790 32352 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:46:14.564797 32352 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:46:14.564807 32352 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:46:14.565075 32352 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:46:14.565088 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.565093 32352 net.cpp:165] Memory required for data: 1056257500\nI0821 06:46:14.565104 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:46:14.565112 32352 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:46:14.565119 32352 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:46:14.565125 32352 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:46:14.565186 32352 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:46:14.565346 32352 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:46:14.565361 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.565366 32352 net.cpp:165] Memory required for data: 1060353500\nI0821 06:46:14.565376 32352 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:46:14.565384 32352 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:46:14.565390 32352 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:46:14.565397 32352 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:46:14.565404 32352 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:46:14.565436 32352 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:46:14.565452 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.565457 32352 net.cpp:165] Memory required for data: 1064449500\nI0821 06:46:14.565464 32352 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:46:14.565470 32352 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:46:14.565476 32352 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:46:14.565486 32352 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:46:14.565496 32352 net.cpp:150] Setting up L2_b5_relu\nI0821 06:46:14.565503 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.565507 32352 net.cpp:165] Memory required for data: 1068545500\nI0821 06:46:14.565512 32352 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:46:14.565520 32352 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:46:14.565524 32352 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:46:14.565534 32352 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:46:14.565543 32352 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:46:14.565593 32352 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:46:14.565604 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.565611 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.565616 32352 net.cpp:165] Memory required for data: 1076737500\nI0821 06:46:14.565620 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:46:14.565635 32352 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:46:14.565642 32352 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:46:14.565656 32352 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:46:14.566164 32352 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:46:14.566179 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.566184 32352 net.cpp:165] Memory required for data: 1080833500\nI0821 06:46:14.566192 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:46:14.566203 32352 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:46:14.566210 32352 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:46:14.566218 32352 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:46:14.566483 32352 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:46:14.566495 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.566500 32352 net.cpp:165] Memory required for data: 1084929500\nI0821 06:46:14.566510 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:46:14.566519 32352 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:46:14.566525 32352 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:46:14.566532 32352 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.566592 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:46:14.566758 32352 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:46:14.566776 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.566781 32352 net.cpp:165] Memory required for data: 1089025500\nI0821 06:46:14.566789 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:46:14.566797 32352 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:46:14.566803 32352 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:46:14.566810 32352 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.566820 32352 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:46:14.566826 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.566831 32352 net.cpp:165] Memory required for data: 1093121500\nI0821 06:46:14.566835 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:46:14.566849 32352 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:46:14.566855 32352 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:46:14.566866 32352 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:46:14.567369 32352 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:46:14.567384 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.567389 32352 net.cpp:165] Memory required for data: 1097217500\nI0821 06:46:14.567399 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:46:14.567409 32352 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:46:14.567416 32352 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:46:14.567427 32352 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:46:14.567703 32352 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:46:14.567716 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.567721 32352 net.cpp:165] Memory required for data: 1101313500\nI0821 06:46:14.567733 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:46:14.567740 32352 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:46:14.567746 32352 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:46:14.567754 32352 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:46:14.567813 32352 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:46:14.567971 32352 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:46:14.567984 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.567989 32352 net.cpp:165] Memory required for data: 1105409500\nI0821 06:46:14.567997 32352 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:46:14.568009 32352 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:46:14.568015 32352 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:46:14.568022 32352 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:46:14.568030 32352 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:46:14.568058 32352 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:46:14.568068 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.568073 32352 net.cpp:165] Memory required for data: 1109505500\nI0821 06:46:14.568078 32352 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:46:14.568089 32352 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:46:14.568094 32352 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:46:14.568101 32352 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:46:14.568110 32352 net.cpp:150] Setting up L2_b6_relu\nI0821 06:46:14.568117 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.568121 32352 net.cpp:165] Memory required for data: 1113601500\nI0821 06:46:14.568126 32352 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:46:14.568133 32352 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:46:14.568138 32352 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:46:14.568145 32352 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:46:14.568155 32352 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:46:14.568207 32352 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:46:14.568219 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.568225 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.568230 32352 net.cpp:165] Memory required for data: 1121793500\nI0821 06:46:14.568235 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:46:14.568248 32352 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:46:14.568254 32352 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:46:14.568264 32352 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:46:14.569777 32352 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:46:14.569795 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.569800 32352 net.cpp:165] Memory required for data: 1125889500\nI0821 06:46:14.569809 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:46:14.569828 32352 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:46:14.569834 32352 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:46:14.569846 32352 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:46:14.570114 32352 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:46:14.570127 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.570132 32352 net.cpp:165] Memory required for data: 1129985500\nI0821 06:46:14.570143 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:46:14.570155 32352 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:46:14.570161 32352 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:46:14.570169 32352 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.570227 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:46:14.570387 32352 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:46:14.570400 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.570405 32352 net.cpp:165] Memory required for data: 1134081500\nI0821 06:46:14.570415 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:46:14.570425 32352 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:46:14.570431 32352 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:46:14.570438 32352 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.570451 32352 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:46:14.570458 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.570462 32352 net.cpp:165] Memory required for data: 1138177500\nI0821 06:46:14.570467 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:46:14.570478 32352 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:46:14.570484 32352 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:46:14.570497 32352 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:46:14.570987 32352 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:46:14.571002 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.571007 32352 net.cpp:165] Memory required for data: 1142273500\nI0821 06:46:14.571017 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:46:14.571025 32352 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:46:14.571032 32352 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:46:14.571044 32352 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:46:14.571316 32352 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:46:14.571329 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.571334 32352 net.cpp:165] Memory required for data: 1146369500\nI0821 06:46:14.571346 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:46:14.571357 32352 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:46:14.571363 32352 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:46:14.571370 32352 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:46:14.571427 32352 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:46:14.571584 32352 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:46:14.571597 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.571602 32352 net.cpp:165] Memory required for data: 1150465500\nI0821 06:46:14.571610 32352 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:46:14.571622 32352 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:46:14.571630 32352 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:46:14.571636 32352 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:46:14.571645 32352 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:46:14.571681 32352 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:46:14.571693 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.571697 32352 net.cpp:165] Memory required for data: 1154561500\nI0821 06:46:14.571702 32352 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:46:14.571710 32352 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:46:14.571723 32352 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:46:14.571733 32352 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:46:14.571744 32352 net.cpp:150] Setting up L2_b7_relu\nI0821 06:46:14.571751 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.571756 32352 net.cpp:165] Memory required for data: 1158657500\nI0821 06:46:14.571760 32352 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:46:14.571768 32352 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:46:14.571772 32352 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:46:14.571780 32352 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:46:14.571789 32352 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:46:14.571843 32352 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:46:14.571856 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.571862 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.571866 32352 net.cpp:165] Memory required for data: 1166849500\nI0821 06:46:14.571871 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:46:14.571882 32352 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:46:14.571888 32352 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:46:14.571900 32352 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:46:14.572386 32352 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:46:14.572399 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.572404 32352 net.cpp:165] Memory required for data: 1170945500\nI0821 06:46:14.572413 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:46:14.572422 32352 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:46:14.572429 32352 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:46:14.572439 32352 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:46:14.572717 32352 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:46:14.572731 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.572736 32352 net.cpp:165] Memory required for data: 1175041500\nI0821 06:46:14.572746 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:46:14.572758 32352 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:46:14.572764 32352 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:46:14.572772 32352 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.572832 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:46:14.572996 32352 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:46:14.573009 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.573014 32352 net.cpp:165] Memory required for data: 1179137500\nI0821 06:46:14.573024 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:46:14.573034 32352 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:46:14.573040 32352 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:46:14.573047 32352 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.573057 32352 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:46:14.573065 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.573068 32352 net.cpp:165] Memory required for data: 1183233500\nI0821 06:46:14.573073 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:46:14.573086 32352 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:46:14.573092 32352 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:46:14.573103 32352 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:46:14.573621 32352 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:46:14.573635 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.573642 32352 net.cpp:165] Memory required for data: 1187329500\nI0821 06:46:14.573657 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:46:14.573673 32352 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:46:14.573680 32352 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:46:14.573694 32352 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:46:14.573972 32352 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:46:14.573987 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.573992 32352 net.cpp:165] Memory required for data: 1191425500\nI0821 06:46:14.574002 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:46:14.574012 32352 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:46:14.574019 32352 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:46:14.574028 32352 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:46:14.574086 32352 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:46:14.574245 32352 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:46:14.574259 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.574264 32352 net.cpp:165] Memory required for data: 1195521500\nI0821 06:46:14.574272 32352 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:46:14.574285 32352 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:46:14.574290 32352 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:46:14.574297 32352 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:46:14.574306 32352 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:46:14.574333 32352 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:46:14.574347 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.574352 32352 net.cpp:165] Memory required for data: 1199617500\nI0821 06:46:14.574357 32352 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:46:14.574364 32352 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:46:14.574370 32352 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:46:14.574378 32352 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:46:14.574386 32352 net.cpp:150] Setting up L2_b8_relu\nI0821 06:46:14.574393 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.574398 32352 net.cpp:165] Memory required for data: 1203713500\nI0821 06:46:14.574403 32352 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:46:14.574412 32352 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:46:14.574419 32352 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:46:14.574425 32352 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:46:14.574448 32352 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:46:14.574499 32352 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:46:14.574515 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.574522 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.574527 32352 net.cpp:165] Memory required for data: 1211905500\nI0821 06:46:14.574532 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:46:14.574542 32352 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:46:14.574549 32352 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:46:14.574558 32352 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:46:14.575073 32352 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:46:14.575090 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.575095 32352 net.cpp:165] Memory required for data: 1216001500\nI0821 06:46:14.575104 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:46:14.575114 32352 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:46:14.575119 32352 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:46:14.575130 32352 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:46:14.575408 32352 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:46:14.575428 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.575433 32352 net.cpp:165] Memory required for data: 1220097500\nI0821 06:46:14.575444 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:46:14.575455 32352 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:46:14.575462 32352 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:46:14.575470 32352 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.575526 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:46:14.575696 32352 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:46:14.575711 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.575716 32352 net.cpp:165] Memory required for data: 1224193500\nI0821 06:46:14.575724 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:46:14.575732 32352 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:46:14.575738 32352 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:46:14.575748 32352 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.575758 32352 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:46:14.575765 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.575770 32352 net.cpp:165] Memory required for data: 1228289500\nI0821 06:46:14.575775 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:46:14.575788 32352 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:46:14.575795 32352 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:46:14.575803 32352 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:46:14.577316 32352 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:46:14.577334 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.577339 32352 net.cpp:165] Memory required for data: 1232385500\nI0821 06:46:14.577349 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:46:14.577358 32352 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:46:14.577365 32352 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:46:14.577378 32352 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:46:14.577647 32352 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:46:14.577666 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.577672 32352 net.cpp:165] Memory required for data: 1236481500\nI0821 06:46:14.577724 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:46:14.577739 32352 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:46:14.577745 32352 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:46:14.577754 32352 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:46:14.577816 32352 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:46:14.577971 32352 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:46:14.577987 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.577992 32352 net.cpp:165] Memory required for data: 1240577500\nI0821 06:46:14.578001 32352 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:46:14.578011 32352 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:46:14.578017 32352 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:46:14.578024 32352 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:46:14.578035 32352 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:46:14.578063 32352 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:46:14.578073 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.578078 32352 net.cpp:165] Memory required for data: 1244673500\nI0821 06:46:14.578083 32352 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:46:14.578090 32352 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:46:14.578095 32352 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:46:14.578105 32352 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:46:14.578115 32352 net.cpp:150] Setting up L2_b9_relu\nI0821 06:46:14.578122 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.578135 32352 net.cpp:165] Memory required for data: 1248769500\nI0821 06:46:14.578140 32352 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:46:14.578147 32352 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:46:14.578153 32352 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:46:14.578163 32352 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:46:14.578173 32352 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:46:14.578222 32352 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:46:14.578234 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.578241 32352 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:46:14.578245 32352 net.cpp:165] Memory required for data: 1256961500\nI0821 06:46:14.578250 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:46:14.578265 32352 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:46:14.578272 32352 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:46:14.578281 32352 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:46:14.578793 32352 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:46:14.578807 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.578812 32352 net.cpp:165] Memory required for data: 1257985500\nI0821 06:46:14.578821 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:46:14.578835 32352 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:46:14.578840 32352 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:46:14.578848 32352 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:46:14.579126 32352 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:46:14.579139 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.579144 32352 net.cpp:165] Memory required for data: 1259009500\nI0821 06:46:14.579155 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:46:14.579166 32352 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:46:14.579174 32352 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:46:14.579181 32352 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.579239 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:46:14.579406 32352 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:46:14.579419 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.579424 32352 net.cpp:165] Memory required for data: 1260033500\nI0821 06:46:14.579433 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:46:14.579444 32352 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:46:14.579452 32352 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:46:14.579458 32352 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:46:14.579471 32352 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:46:14.579478 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.579483 32352 net.cpp:165] Memory required for data: 1261057500\nI0821 06:46:14.579488 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:46:14.579499 32352 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:46:14.579504 32352 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:46:14.579515 32352 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:46:14.580041 32352 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:46:14.580056 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.580062 32352 net.cpp:165] Memory required for data: 1262081500\nI0821 06:46:14.580071 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:46:14.580080 32352 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:46:14.580086 32352 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:46:14.580097 32352 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:46:14.580376 32352 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:46:14.580396 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.580402 32352 net.cpp:165] Memory required for data: 1263105500\nI0821 06:46:14.580412 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:46:14.580421 32352 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:46:14.580428 32352 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:46:14.580435 32352 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:46:14.580498 32352 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:46:14.580670 32352 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:46:14.580687 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.580693 32352 net.cpp:165] Memory required for data: 1264129500\nI0821 06:46:14.580701 32352 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:46:14.580711 32352 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:46:14.580718 32352 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:46:14.580725 32352 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:46:14.580765 32352 net.cpp:150] Setting up L3_b1_pool\nI0821 06:46:14.580773 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.580778 32352 net.cpp:165] Memory required for data: 1265153500\nI0821 06:46:14.580783 32352 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:46:14.580792 32352 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:46:14.580798 32352 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:46:14.580806 32352 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:46:14.580812 32352 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:46:14.580849 32352 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:46:14.580862 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.580866 32352 net.cpp:165] Memory required for data: 1266177500\nI0821 06:46:14.580871 32352 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:46:14.580878 32352 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:46:14.580884 32352 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:46:14.580891 32352 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:46:14.580900 32352 net.cpp:150] Setting up L3_b1_relu\nI0821 06:46:14.580907 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.580912 32352 net.cpp:165] Memory required for data: 1267201500\nI0821 06:46:14.580916 32352 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:46:14.580925 32352 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:46:14.580936 32352 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:46:14.582191 32352 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:46:14.582209 32352 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:46:14.582214 32352 net.cpp:165] Memory required for data: 1268225500\nI0821 06:46:14.582221 32352 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:46:14.582233 32352 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:46:14.582239 32352 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:46:14.582247 32352 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:46:14.582254 32352 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:46:14.582300 32352 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:46:14.582312 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.582317 32352 net.cpp:165] Memory required for data: 1270273500\nI0821 06:46:14.582322 32352 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:46:14.582330 32352 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:46:14.582336 32352 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:46:14.582346 32352 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:46:14.582356 32352 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:46:14.582408 32352 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:46:14.582423 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.582437 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.582442 32352 net.cpp:165] Memory required for data: 1274369500\nI0821 06:46:14.582448 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:46:14.582463 32352 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:46:14.582469 32352 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:46:14.582479 32352 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:46:14.583528 32352 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:46:14.583542 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.583547 32352 net.cpp:165] Memory required for data: 1276417500\nI0821 06:46:14.583557 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:46:14.583570 32352 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:46:14.583575 32352 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:46:14.583585 32352 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:46:14.583873 32352 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:46:14.583886 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.583892 32352 net.cpp:165] Memory required for data: 1278465500\nI0821 06:46:14.583902 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:46:14.583912 32352 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:46:14.583919 32352 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:46:14.583926 32352 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.583989 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:46:14.584153 32352 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:46:14.584167 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.584172 32352 net.cpp:165] Memory required for data: 1280513500\nI0821 06:46:14.584180 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:46:14.584188 32352 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:46:14.584194 32352 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:46:14.584204 32352 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:46:14.584215 32352 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:46:14.584223 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.584226 32352 net.cpp:165] Memory required for data: 1282561500\nI0821 06:46:14.584231 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:46:14.584244 32352 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:46:14.584251 32352 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:46:14.584259 32352 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:46:14.585310 32352 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:46:14.585325 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.585330 32352 net.cpp:165] Memory required for data: 1284609500\nI0821 06:46:14.585340 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:46:14.585351 32352 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:46:14.585358 32352 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:46:14.585366 32352 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:46:14.585716 32352 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:46:14.585733 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.585738 32352 net.cpp:165] Memory required for data: 1286657500\nI0821 06:46:14.585749 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:46:14.585762 32352 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:46:14.585768 32352 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:46:14.585777 32352 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:46:14.585839 32352 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:46:14.586004 32352 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:46:14.586017 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.586024 32352 net.cpp:165] Memory required for data: 1288705500\nI0821 06:46:14.586040 32352 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:46:14.586051 32352 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:46:14.586058 32352 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:46:14.586066 32352 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:46:14.586076 32352 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:46:14.586110 32352 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:46:14.586120 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.586125 32352 net.cpp:165] Memory required for data: 1290753500\nI0821 06:46:14.586130 32352 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:46:14.586143 32352 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:46:14.586148 32352 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:46:14.586155 32352 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:46:14.586165 32352 net.cpp:150] Setting up L3_b2_relu\nI0821 06:46:14.586172 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.586176 32352 net.cpp:165] Memory required for data: 1292801500\nI0821 06:46:14.586181 32352 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:46:14.586189 32352 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:46:14.586194 32352 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:46:14.586200 32352 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:46:14.586210 32352 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:46:14.586259 32352 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:46:14.586272 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.586277 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.586282 32352 net.cpp:165] Memory required for data: 1296897500\nI0821 06:46:14.586287 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:46:14.586302 32352 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:46:14.586308 32352 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:46:14.586318 32352 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:46:14.587360 32352 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:46:14.587375 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.587380 32352 net.cpp:165] Memory required for data: 1298945500\nI0821 06:46:14.587389 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:46:14.587401 32352 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:46:14.587409 32352 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:46:14.587419 32352 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:46:14.587695 32352 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:46:14.587709 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.587714 32352 net.cpp:165] Memory required for data: 1300993500\nI0821 06:46:14.587724 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:46:14.587733 32352 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:46:14.587739 32352 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:46:14.587748 32352 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.587808 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:46:14.587968 32352 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:46:14.587980 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.587985 32352 net.cpp:165] Memory required for data: 1303041500\nI0821 06:46:14.587994 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:46:14.588002 32352 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:46:14.588008 32352 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:46:14.588018 32352 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:46:14.588028 32352 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:46:14.588042 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.588047 32352 net.cpp:165] Memory required for data: 1305089500\nI0821 06:46:14.588052 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:46:14.588068 32352 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:46:14.588074 32352 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:46:14.588083 32352 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:46:14.589135 32352 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:46:14.589150 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.589155 32352 net.cpp:165] Memory required for data: 1307137500\nI0821 06:46:14.589164 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:46:14.589176 32352 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:46:14.589184 32352 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:46:14.589191 32352 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:46:14.589460 32352 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:46:14.589473 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.589478 32352 net.cpp:165] Memory required for data: 1309185500\nI0821 06:46:14.589488 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:46:14.589500 32352 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:46:14.589507 32352 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:46:14.589514 32352 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:46:14.589578 32352 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:46:14.589749 32352 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:46:14.589763 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.589769 32352 net.cpp:165] Memory required for data: 1311233500\nI0821 06:46:14.589778 32352 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:46:14.589790 32352 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:46:14.589797 32352 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:46:14.589804 32352 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:46:14.589814 32352 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:46:14.589848 32352 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:46:14.589860 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.589865 32352 net.cpp:165] Memory required for data: 1313281500\nI0821 06:46:14.589870 32352 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:46:14.589881 32352 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:46:14.589887 32352 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:46:14.589895 32352 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:46:14.589905 32352 net.cpp:150] Setting up L3_b3_relu\nI0821 06:46:14.589911 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.589915 32352 net.cpp:165] Memory required for data: 1315329500\nI0821 06:46:14.589920 32352 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:46:14.589927 32352 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:46:14.589933 32352 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:46:14.589941 32352 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:46:14.589951 32352 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:46:14.590000 32352 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:46:14.590011 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.590018 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.590023 32352 net.cpp:165] Memory required for data: 1319425500\nI0821 06:46:14.590029 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:46:14.590041 32352 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:46:14.590049 32352 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:46:14.590065 32352 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:46:14.591130 32352 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:46:14.591146 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.591151 32352 net.cpp:165] Memory required for data: 1321473500\nI0821 06:46:14.591161 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:46:14.591172 32352 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:46:14.591179 32352 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:46:14.591190 32352 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:46:14.591464 32352 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:46:14.591477 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.591482 32352 net.cpp:165] Memory required for data: 1323521500\nI0821 06:46:14.591493 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:46:14.591502 32352 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:46:14.591508 32352 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:46:14.591519 32352 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.591578 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:46:14.591750 32352 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:46:14.591764 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.591769 32352 net.cpp:165] Memory required for data: 1325569500\nI0821 06:46:14.591778 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:46:14.591786 32352 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:46:14.591792 32352 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:46:14.591804 32352 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:46:14.591814 32352 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:46:14.591820 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.591825 32352 net.cpp:165] Memory required for data: 1327617500\nI0821 06:46:14.591830 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:46:14.591843 32352 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:46:14.591850 32352 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:46:14.591858 32352 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:46:14.594024 32352 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:46:14.594043 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.594048 32352 net.cpp:165] Memory required for data: 1329665500\nI0821 06:46:14.594058 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:46:14.594070 32352 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:46:14.594077 32352 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:46:14.594089 32352 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:46:14.594369 32352 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:46:14.594383 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.594388 32352 net.cpp:165] Memory required for data: 1331713500\nI0821 06:46:14.594398 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:46:14.594408 32352 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:46:14.594413 32352 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:46:14.594424 32352 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:46:14.594485 32352 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:46:14.594655 32352 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:46:14.594669 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.594674 32352 net.cpp:165] Memory required for data: 1333761500\nI0821 06:46:14.594683 32352 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:46:14.594692 32352 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:46:14.594699 32352 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:46:14.594705 32352 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:46:14.594717 32352 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:46:14.594764 32352 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:46:14.594774 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.594779 32352 net.cpp:165] Memory required for data: 1335809500\nI0821 06:46:14.594784 32352 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:46:14.594792 32352 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:46:14.594799 32352 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:46:14.594805 32352 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:46:14.594815 32352 net.cpp:150] Setting up L3_b4_relu\nI0821 06:46:14.594821 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.594826 32352 net.cpp:165] Memory required for data: 1337857500\nI0821 06:46:14.594831 32352 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:46:14.594841 32352 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:46:14.594847 32352 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:46:14.594854 32352 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:46:14.594864 32352 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:46:14.594915 32352 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:46:14.594928 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.594934 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.594939 32352 net.cpp:165] Memory required for data: 1341953500\nI0821 06:46:14.594944 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:46:14.594956 32352 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:46:14.594964 32352 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:46:14.594975 32352 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:46:14.596014 32352 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:46:14.596029 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.596034 32352 net.cpp:165] Memory required for data: 1344001500\nI0821 06:46:14.596042 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:46:14.596051 32352 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:46:14.596058 32352 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:46:14.596071 32352 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:46:14.596348 32352 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:46:14.596364 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.596369 32352 net.cpp:165] Memory required for data: 1346049500\nI0821 06:46:14.596379 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:46:14.596388 32352 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:46:14.596395 32352 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:46:14.596402 32352 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.596463 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:46:14.596632 32352 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:46:14.596645 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.596655 32352 net.cpp:165] Memory required for data: 1348097500\nI0821 06:46:14.596665 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:46:14.596676 32352 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:46:14.596683 32352 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:46:14.596690 32352 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:46:14.596700 32352 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:46:14.596707 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.596712 32352 net.cpp:165] Memory required for data: 1350145500\nI0821 06:46:14.596717 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:46:14.596730 32352 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:46:14.596736 32352 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:46:14.596752 32352 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:46:14.597785 32352 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:46:14.597800 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.597805 32352 net.cpp:165] Memory required for data: 1352193500\nI0821 06:46:14.597815 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:46:14.597826 32352 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:46:14.597833 32352 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:46:14.597843 32352 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:46:14.598114 32352 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:46:14.598127 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.598132 32352 net.cpp:165] Memory required for data: 1354241500\nI0821 06:46:14.598142 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:46:14.598151 32352 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:46:14.598157 32352 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:46:14.598168 32352 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:46:14.598228 32352 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:46:14.598390 32352 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:46:14.598403 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.598408 32352 net.cpp:165] Memory required for data: 1356289500\nI0821 06:46:14.598417 32352 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:46:14.598426 32352 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:46:14.598433 32352 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:46:14.598439 32352 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:46:14.598449 32352 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:46:14.598486 32352 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:46:14.598498 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.598503 32352 net.cpp:165] Memory required for data: 1358337500\nI0821 06:46:14.598508 32352 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:46:14.598516 32352 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:46:14.598521 32352 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:46:14.598531 32352 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:46:14.598541 32352 net.cpp:150] Setting up L3_b5_relu\nI0821 06:46:14.598548 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.598552 32352 net.cpp:165] Memory required for data: 1360385500\nI0821 06:46:14.598557 32352 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:46:14.598564 32352 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:46:14.598569 32352 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:46:14.598577 32352 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:46:14.598587 32352 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:46:14.598634 32352 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:46:14.598646 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.598659 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.598664 32352 net.cpp:165] Memory required for data: 1364481500\nI0821 06:46:14.598668 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:46:14.598680 32352 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:46:14.598686 32352 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:46:14.598698 32352 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:46:14.599736 32352 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:46:14.599751 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.599756 32352 net.cpp:165] Memory required for data: 1366529500\nI0821 06:46:14.599766 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:46:14.599781 32352 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:46:14.599788 32352 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:46:14.599799 32352 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:46:14.600077 32352 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:46:14.600093 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.600098 32352 net.cpp:165] Memory required for data: 1368577500\nI0821 06:46:14.600109 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:46:14.600118 32352 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:46:14.600124 32352 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:46:14.600132 32352 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.600216 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:46:14.600383 32352 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:46:14.600396 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.600401 32352 net.cpp:165] Memory required for data: 1370625500\nI0821 06:46:14.600410 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:46:14.600421 32352 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:46:14.600427 32352 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:46:14.600435 32352 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:46:14.600445 32352 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:46:14.600452 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.600456 32352 net.cpp:165] Memory required for data: 1372673500\nI0821 06:46:14.600461 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:46:14.600474 32352 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:46:14.600481 32352 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:46:14.600489 32352 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:46:14.601528 32352 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:46:14.601543 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.601548 32352 net.cpp:165] Memory required for data: 1374721500\nI0821 06:46:14.601557 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:46:14.601569 32352 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:46:14.601577 32352 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:46:14.601588 32352 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:46:14.601871 32352 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:46:14.601884 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.601889 32352 net.cpp:165] Memory required for data: 1376769500\nI0821 06:46:14.601899 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:46:14.601908 32352 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:46:14.601915 32352 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:46:14.601925 32352 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:46:14.601985 32352 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:46:14.602149 32352 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:46:14.602162 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.602167 32352 net.cpp:165] Memory required for data: 1378817500\nI0821 06:46:14.602176 32352 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:46:14.602185 32352 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:46:14.602191 32352 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:46:14.602201 32352 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:46:14.602210 32352 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:46:14.602248 32352 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:46:14.602260 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.602264 32352 net.cpp:165] Memory required for data: 1380865500\nI0821 06:46:14.602269 32352 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:46:14.602277 32352 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:46:14.602291 32352 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:46:14.602301 32352 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:46:14.602311 32352 net.cpp:150] Setting up L3_b6_relu\nI0821 06:46:14.602319 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.602324 32352 net.cpp:165] Memory required for data: 1382913500\nI0821 06:46:14.602329 32352 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:46:14.602335 32352 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:46:14.602340 32352 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:46:14.602347 32352 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:46:14.602357 32352 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:46:14.602411 32352 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:46:14.602423 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.602429 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.602434 32352 net.cpp:165] Memory required for data: 1387009500\nI0821 06:46:14.602439 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:46:14.602450 32352 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:46:14.602458 32352 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:46:14.602468 32352 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:46:14.603507 32352 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:46:14.603521 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.603526 32352 net.cpp:165] Memory required for data: 1389057500\nI0821 06:46:14.603535 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:46:14.603545 32352 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:46:14.603551 32352 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:46:14.603562 32352 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:46:14.603848 32352 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:46:14.603863 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.603868 32352 net.cpp:165] Memory required for data: 1391105500\nI0821 06:46:14.603878 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:46:14.603886 32352 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:46:14.603893 32352 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:46:14.603900 32352 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.603962 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:46:14.604157 32352 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:46:14.604174 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.604179 32352 net.cpp:165] Memory required for data: 1393153500\nI0821 06:46:14.604188 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:46:14.604223 32352 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:46:14.604231 32352 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:46:14.604240 32352 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:46:14.604250 32352 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:46:14.604257 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.604262 32352 net.cpp:165] Memory required for data: 1395201500\nI0821 06:46:14.604267 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:46:14.604279 32352 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:46:14.604285 32352 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:46:14.604295 32352 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:46:14.605331 32352 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:46:14.605346 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.605351 32352 net.cpp:165] Memory required for data: 1397249500\nI0821 06:46:14.605360 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:46:14.605376 32352 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:46:14.605383 32352 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:46:14.605396 32352 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:46:14.605676 32352 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:46:14.605693 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.605698 32352 net.cpp:165] Memory required for data: 1399297500\nI0821 06:46:14.605710 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:46:14.605718 32352 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:46:14.605725 32352 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:46:14.605732 32352 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:46:14.605792 32352 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:46:14.605955 32352 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:46:14.605968 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.605973 32352 net.cpp:165] Memory required for data: 1401345500\nI0821 06:46:14.605983 32352 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:46:14.605993 32352 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:46:14.606000 32352 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:46:14.606007 32352 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:46:14.606015 32352 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:46:14.606052 32352 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:46:14.606065 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.606068 32352 net.cpp:165] Memory required for data: 1403393500\nI0821 06:46:14.606075 32352 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:46:14.606081 32352 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:46:14.606087 32352 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:46:14.606094 32352 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:46:14.606103 32352 net.cpp:150] Setting up L3_b7_relu\nI0821 06:46:14.606111 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.606114 32352 net.cpp:165] Memory required for data: 1405441500\nI0821 06:46:14.606119 32352 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:46:14.606127 32352 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:46:14.606132 32352 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:46:14.606142 32352 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:46:14.606153 32352 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:46:14.606200 32352 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:46:14.606212 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.606220 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.606223 32352 net.cpp:165] Memory required for data: 1409537500\nI0821 06:46:14.606228 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:46:14.606242 32352 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:46:14.606248 32352 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:46:14.606258 32352 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:46:14.608289 32352 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:46:14.608306 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.608311 32352 net.cpp:165] Memory required for data: 1411585500\nI0821 06:46:14.608321 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:46:14.608335 32352 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:46:14.608341 32352 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:46:14.608350 32352 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:46:14.608625 32352 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:46:14.608639 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.608657 32352 net.cpp:165] Memory required for data: 1413633500\nI0821 06:46:14.608669 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:46:14.608678 32352 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:46:14.608685 32352 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:46:14.608692 32352 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.608759 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:46:14.608927 32352 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:46:14.608940 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.608945 32352 net.cpp:165] Memory required for data: 1415681500\nI0821 06:46:14.608954 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:46:14.608963 32352 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:46:14.608969 32352 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:46:14.608976 32352 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:46:14.608986 32352 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:46:14.608992 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.608997 32352 net.cpp:165] Memory required for data: 1417729500\nI0821 06:46:14.609002 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:46:14.609017 32352 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:46:14.609024 32352 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:46:14.609035 32352 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:46:14.610075 32352 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:46:14.610090 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.610095 32352 net.cpp:165] Memory required for data: 1419777500\nI0821 06:46:14.610105 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:46:14.610117 32352 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:46:14.610123 32352 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:46:14.610131 32352 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:46:14.610409 32352 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:46:14.610421 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.610426 32352 net.cpp:165] Memory required for data: 1421825500\nI0821 06:46:14.610436 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:46:14.610447 32352 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:46:14.610455 32352 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:46:14.610461 32352 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:46:14.610523 32352 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:46:14.610693 32352 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:46:14.610707 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.610711 32352 net.cpp:165] Memory required for data: 1423873500\nI0821 06:46:14.610720 32352 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:46:14.610733 32352 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:46:14.610739 32352 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:46:14.610746 32352 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:46:14.610754 32352 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:46:14.610791 32352 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:46:14.610803 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.610807 32352 net.cpp:165] Memory required for data: 1425921500\nI0821 06:46:14.610813 32352 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:46:14.610821 32352 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:46:14.610826 32352 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:46:14.610838 32352 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:46:14.610848 32352 net.cpp:150] Setting up L3_b8_relu\nI0821 06:46:14.610855 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.610860 32352 net.cpp:165] Memory required for data: 1427969500\nI0821 06:46:14.610872 32352 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:46:14.610879 32352 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:46:14.610885 32352 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:46:14.610893 32352 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:46:14.610903 32352 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:46:14.610954 32352 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:46:14.610965 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.610972 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.610976 32352 net.cpp:165] Memory required for data: 1432065500\nI0821 06:46:14.610982 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:46:14.610996 32352 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:46:14.611003 32352 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:46:14.611012 32352 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:46:14.612048 32352 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:46:14.612063 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.612068 32352 net.cpp:165] Memory required for data: 1434113500\nI0821 06:46:14.612077 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:46:14.612089 32352 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:46:14.612097 32352 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:46:14.612104 32352 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:46:14.612386 32352 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:46:14.612399 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.612404 32352 net.cpp:165] Memory required for data: 1436161500\nI0821 06:46:14.612414 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:46:14.612423 32352 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:46:14.612429 32352 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:46:14.612437 32352 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.612501 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:46:14.612671 32352 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:46:14.612685 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.612690 32352 net.cpp:165] Memory required for data: 1438209500\nI0821 06:46:14.612699 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:46:14.612707 32352 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:46:14.612715 32352 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:46:14.612725 32352 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:46:14.612735 32352 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:46:14.612741 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.612746 32352 net.cpp:165] Memory required for data: 1440257500\nI0821 06:46:14.612751 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:46:14.612763 32352 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:46:14.612771 32352 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:46:14.612778 32352 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:46:14.613814 32352 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:46:14.613829 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.613833 32352 net.cpp:165] Memory required for data: 1442305500\nI0821 06:46:14.613842 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:46:14.613854 32352 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:46:14.613862 32352 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:46:14.613869 32352 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:46:14.614151 32352 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:46:14.614164 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.614176 32352 net.cpp:165] Memory required for data: 1444353500\nI0821 06:46:14.614187 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:46:14.614198 32352 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:46:14.614205 32352 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:46:14.614213 32352 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:46:14.614276 32352 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:46:14.614446 32352 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:46:14.614460 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.614465 32352 net.cpp:165] Memory required for data: 1446401500\nI0821 06:46:14.614475 32352 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:46:14.614486 32352 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:46:14.614493 32352 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:46:14.614500 32352 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:46:14.614511 32352 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:46:14.614544 32352 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:46:14.614555 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.614560 32352 net.cpp:165] Memory required for data: 1448449500\nI0821 06:46:14.614565 32352 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:46:14.614576 32352 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:46:14.614583 32352 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:46:14.614590 32352 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:46:14.614599 32352 net.cpp:150] Setting up L3_b9_relu\nI0821 06:46:14.614606 32352 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:46:14.614610 32352 net.cpp:165] Memory required for data: 1450497500\nI0821 06:46:14.614615 32352 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:46:14.614624 32352 net.cpp:100] Creating Layer post_pool\nI0821 06:46:14.614629 32352 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:46:14.614636 32352 net.cpp:408] post_pool -> post_pool\nI0821 06:46:14.614679 32352 net.cpp:150] Setting up post_pool\nI0821 06:46:14.614696 32352 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:46:14.614701 32352 net.cpp:165] Memory required for data: 1450529500\nI0821 06:46:14.614706 32352 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:46:14.614717 32352 net.cpp:100] Creating Layer post_FC\nI0821 06:46:14.614723 32352 net.cpp:434] post_FC <- post_pool\nI0821 06:46:14.614732 32352 net.cpp:408] post_FC -> post_FC_top\nI0821 06:46:14.614899 32352 net.cpp:150] Setting up post_FC\nI0821 06:46:14.614912 32352 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:46:14.614917 32352 net.cpp:165] Memory required for data: 1450534500\nI0821 06:46:14.614926 32352 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:46:14.614934 32352 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:46:14.614940 32352 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:46:14.614948 32352 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:46:14.614960 32352 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:46:14.615008 32352 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:46:14.615020 32352 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:46:14.615026 32352 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:46:14.615031 32352 net.cpp:165] Memory required for data: 1450544500\nI0821 06:46:14.615036 32352 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:46:14.615047 32352 net.cpp:100] Creating Layer accuracy\nI0821 06:46:14.615053 32352 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:46:14.615061 32352 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:46:14.615067 32352 net.cpp:408] accuracy -> accuracy\nI0821 06:46:14.615079 32352 net.cpp:150] Setting up accuracy\nI0821 06:46:14.615087 32352 net.cpp:157] Top shape: (1)\nI0821 06:46:14.615098 32352 net.cpp:165] Memory required for data: 1450544504\nI0821 06:46:14.615104 32352 layer_factory.hpp:77] Creating layer loss\nI0821 06:46:14.615113 32352 net.cpp:100] Creating Layer loss\nI0821 06:46:14.615118 32352 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:46:14.615124 32352 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:46:14.615131 32352 net.cpp:408] loss -> loss\nI0821 06:46:14.615144 32352 layer_factory.hpp:77] Creating layer loss\nI0821 06:46:14.615269 32352 net.cpp:150] Setting up loss\nI0821 06:46:14.615283 32352 net.cpp:157] Top shape: (1)\nI0821 06:46:14.615286 32352 net.cpp:160]     with loss weight 1\nI0821 06:46:14.615303 32352 net.cpp:165] Memory required for data: 1450544508\nI0821 06:46:14.615309 32352 net.cpp:226] loss needs backward computation.\nI0821 06:46:14.615315 32352 net.cpp:228] accuracy does not need backward computation.\nI0821 06:46:14.615321 32352 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:46:14.615326 32352 net.cpp:226] post_FC needs backward computation.\nI0821 06:46:14.615331 32352 net.cpp:226] post_pool needs backward computation.\nI0821 06:46:14.615336 32352 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:46:14.615341 32352 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:46:14.615346 32352 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:46:14.615351 32352 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:46:14.615356 32352 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:46:14.615361 32352 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:46:14.615365 32352 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:46:14.615370 32352 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:46:14.615375 32352 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:46:14.615381 32352 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:46:14.615386 32352 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:46:14.615391 32352 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:46:14.615396 32352 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:46:14.615401 32352 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:46:14.615406 32352 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:46:14.615411 32352 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:46:14.615416 32352 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:46:14.615419 32352 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:46:14.615424 32352 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:46:14.615429 32352 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:46:14.615435 32352 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:46:14.615439 32352 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:46:14.615445 32352 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:46:14.615450 32352 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:46:14.615455 32352 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:46:14.615460 32352 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:46:14.615464 32352 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:46:14.615469 32352 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:46:14.615474 32352 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:46:14.615479 32352 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:46:14.615484 32352 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:46:14.615489 32352 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:46:14.615494 32352 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:46:14.615500 32352 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:46:14.615512 32352 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:46:14.615519 32352 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:46:14.615523 32352 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:46:14.615528 32352 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:46:14.615535 32352 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:46:14.615540 32352 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:46:14.615545 32352 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:46:14.615550 32352 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:46:14.615555 32352 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:46:14.615559 32352 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:46:14.615564 32352 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:46:14.615569 32352 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:46:14.615574 32352 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:46:14.615579 32352 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:46:14.615584 32352 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:46:14.615589 32352 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:46:14.615595 32352 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:46:14.615599 32352 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:46:14.615605 32352 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:46:14.615610 32352 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:46:14.615618 32352 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:46:14.615624 32352 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:46:14.615629 32352 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:46:14.615634 32352 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:46:14.615639 32352 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:46:14.615644 32352 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:46:14.615655 32352 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:46:14.615661 32352 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:46:14.615667 32352 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:46:14.615674 32352 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:46:14.615679 32352 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:46:14.615684 32352 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:46:14.615689 32352 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:46:14.615694 32352 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:46:14.615698 32352 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:46:14.615705 32352 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:46:14.615710 32352 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:46:14.615715 32352 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:46:14.615720 32352 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:46:14.615725 32352 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:46:14.615731 32352 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:46:14.615736 32352 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:46:14.615741 32352 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:46:14.615746 32352 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:46:14.615751 32352 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:46:14.615756 32352 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:46:14.615762 32352 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:46:14.615774 32352 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:46:14.615780 32352 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:46:14.615785 32352 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:46:14.615792 32352 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:46:14.615797 32352 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:46:14.615803 32352 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:46:14.615808 32352 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:46:14.615813 32352 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:46:14.615818 32352 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:46:14.615823 32352 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:46:14.615828 32352 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:46:14.615833 32352 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:46:14.615839 32352 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:46:14.615844 32352 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:46:14.615849 32352 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:46:14.615854 32352 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:46:14.615860 32352 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:46:14.615865 32352 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:46:14.615870 32352 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:46:14.615875 32352 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:46:14.615880 32352 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:46:14.615886 32352 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:46:14.615891 32352 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:46:14.615900 32352 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:46:14.615906 32352 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:46:14.615911 32352 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:46:14.615917 32352 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:46:14.615922 32352 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:46:14.615927 32352 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:46:14.615932 32352 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:46:14.615937 32352 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:46:14.615943 32352 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:46:14.615948 32352 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:46:14.615953 32352 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:46:14.615959 32352 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:46:14.615964 32352 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:46:14.615969 32352 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:46:14.615975 32352 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:46:14.615980 32352 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:46:14.615985 32352 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:46:14.615990 32352 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:46:14.615995 32352 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:46:14.616001 32352 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:46:14.616006 32352 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:46:14.616013 32352 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:46:14.616017 32352 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:46:14.616022 32352 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:46:14.616029 32352 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:46:14.616039 32352 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:46:14.616044 32352 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:46:14.616050 32352 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:46:14.616056 32352 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:46:14.616061 32352 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:46:14.616066 32352 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:46:14.616072 32352 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:46:14.616077 32352 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:46:14.616082 32352 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:46:14.616088 32352 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:46:14.616093 32352 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:46:14.616098 32352 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:46:14.616103 32352 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:46:14.616108 32352 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:46:14.616114 32352 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:46:14.616119 32352 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:46:14.616125 32352 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:46:14.616130 32352 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:46:14.616135 32352 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:46:14.616142 32352 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:46:14.616147 32352 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:46:14.616152 32352 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:46:14.616156 32352 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:46:14.616163 32352 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:46:14.616168 32352 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:46:14.616174 32352 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:46:14.616180 32352 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:46:14.616185 32352 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:46:14.616190 32352 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:46:14.616196 32352 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:46:14.616201 32352 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:46:14.616206 32352 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:46:14.616211 32352 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:46:14.616217 32352 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:46:14.616224 32352 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:46:14.616228 32352 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:46:14.616235 32352 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:46:14.616240 32352 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:46:14.616245 32352 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:46:14.616250 32352 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:46:14.616255 32352 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:46:14.616261 32352 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:46:14.616266 32352 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:46:14.616271 32352 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:46:14.616281 32352 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:46:14.616288 32352 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:46:14.616294 32352 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:46:14.616304 32352 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:46:14.616312 32352 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:46:14.616318 32352 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:46:14.616323 32352 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:46:14.616329 32352 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:46:14.616334 32352 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:46:14.616340 32352 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:46:14.616345 32352 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:46:14.616351 32352 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:46:14.616356 32352 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:46:14.616363 32352 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:46:14.616369 32352 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:46:14.616374 32352 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:46:14.616379 32352 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:46:14.616385 32352 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:46:14.616390 32352 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:46:14.616395 32352 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:46:14.616401 32352 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:46:14.616406 32352 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:46:14.616412 32352 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:46:14.616417 32352 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:46:14.616423 32352 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:46:14.616430 32352 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:46:14.616435 32352 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:46:14.616439 32352 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:46:14.616446 32352 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:46:14.616451 32352 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:46:14.616456 32352 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:46:14.616461 32352 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:46:14.616467 32352 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:46:14.616473 32352 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:46:14.616478 32352 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:46:14.616485 32352 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:46:14.616490 32352 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:46:14.616497 32352 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:46:14.616502 32352 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:46:14.616508 32352 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:46:14.616513 32352 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:46:14.616518 32352 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:46:14.616524 32352 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:46:14.616529 32352 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:46:14.616535 32352 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:46:14.616541 32352 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:46:14.616546 32352 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:46:14.616552 32352 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:46:14.616557 32352 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:46:14.616564 32352 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:46:14.616574 32352 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:46:14.616580 32352 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:46:14.616586 32352 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:46:14.616592 32352 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:46:14.616597 32352 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:46:14.616603 32352 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:46:14.616610 32352 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:46:14.616616 32352 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:46:14.616621 32352 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:46:14.616626 32352 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:46:14.616631 32352 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:46:14.616637 32352 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:46:14.616643 32352 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:46:14.616653 32352 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:46:14.616660 32352 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:46:14.616667 32352 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:46:14.616672 32352 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:46:14.616678 32352 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:46:14.616684 32352 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:46:14.616689 32352 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:46:14.616695 32352 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:46:14.616700 32352 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:46:14.616706 32352 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:46:14.616713 32352 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:46:14.616717 32352 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:46:14.616724 32352 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:46:14.616730 32352 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:46:14.616735 32352 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:46:14.616741 32352 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:46:14.616747 32352 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:46:14.616752 32352 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:46:14.616758 32352 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:46:14.616765 32352 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:46:14.616770 32352 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:46:14.616775 32352 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:46:14.616781 32352 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:46:14.616787 32352 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:46:14.616793 32352 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:46:14.616798 32352 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:46:14.616803 32352 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:46:14.616809 32352 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:46:14.616816 32352 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:46:14.616822 32352 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:46:14.616827 32352 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:46:14.616832 32352 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:46:14.616839 32352 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:46:14.616844 32352 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:46:14.616858 32352 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:46:14.616864 32352 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:46:14.616870 32352 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:46:14.616875 32352 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:46:14.616881 32352 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:46:14.616888 32352 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:46:14.616892 32352 net.cpp:226] pre_relu needs backward computation.\nI0821 06:46:14.616899 32352 net.cpp:226] pre_scale needs backward computation.\nI0821 06:46:14.616904 32352 net.cpp:226] pre_bn needs backward computation.\nI0821 06:46:14.616909 32352 net.cpp:226] pre_conv needs backward computation.\nI0821 06:46:14.616915 32352 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:46:14.616922 32352 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:46:14.616926 32352 net.cpp:270] This network produces output accuracy\nI0821 06:46:14.616933 32352 net.cpp:270] This network produces output loss\nI0821 06:46:14.617262 32352 net.cpp:283] Network initialization done.\nI0821 06:46:14.618273 32352 solver.cpp:60] Solver scaffolding done.\nI0821 06:46:14.842901 32352 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 06:46:15.201809 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:15.201887 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:15.208679 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:15.432740 32352 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:46:15.432857 32352 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:46:15.467509 32352 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:46:15.467620 32352 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:46:15.919872 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:15.919947 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:15.927592 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:16.177239 32352 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:46:16.177386 32352 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:46:16.235513 32352 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:46:16.235651 32352 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:46:16.754197 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:16.754251 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:16.762907 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:17.028494 32352 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:46:17.028626 32352 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:46:17.099203 32352 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:46:17.099334 32352 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:46:17.182557 32352 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 06:46:17.663406 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:17.663475 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:17.673218 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:17.962687 32352 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:46:17.962889 32352 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:46:18.054087 32352 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:46:18.054270 32352 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:46:18.706960 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:18.707015 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:18.717664 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:19.028668 32352 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:46:19.028852 32352 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:46:19.140538 32352 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:46:19.140722 32352 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:46:19.851786 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:19.851838 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:19.863693 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:20.206939 32352 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:46:20.207144 32352 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:46:20.339346 32352 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:46:20.339548 32352 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:46:21.122627 32352 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:46:21.122704 32352 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:46:21.135316 32352 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:46:21.180939 32379 blocking_queue.cpp:50] Waiting for data\nI0821 06:46:21.234411 32379 blocking_queue.cpp:50] Waiting for data\nI0821 06:46:21.568100 32352 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:46:21.568348 32352 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:46:21.720207 32352 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:46:21.720439 32352 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:46:21.893887 32352 parallel.cpp:425] Starting Optimization\nI0821 06:46:21.895634 32352 solver.cpp:279] Solving Cifar-Resnet\nI0821 06:46:21.895658 32352 solver.cpp:280] Learning Rate Policy: triangular\nI0821 06:46:21.900686 32352 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 06:47:42.459180 32352 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 06:47:42.459517 32352 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 06:47:46.406862 32352 solver.cpp:228] Iteration 0, loss = 4.55336\nI0821 06:47:46.406919 32352 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 06:47:46.406936 32352 solver.cpp:244]     Train net output #1: loss = 4.55336 (* 1 = 4.55336 loss)\nI0821 06:47:46.525985 32352 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0821 06:50:05.119103 32352 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 06:51:25.795958 32352 solver.cpp:404]     Test net output #0: accuracy = 0.41112\nI0821 06:51:25.796236 32352 solver.cpp:404]     Test net output #1: loss = 1.63625 (* 1 = 1.63625 loss)\nI0821 06:51:27.103934 32352 solver.cpp:228] Iteration 100, loss = 1.30121\nI0821 06:51:27.103986 32352 solver.cpp:244]     Train net output #0: accuracy = 0.488\nI0821 06:51:27.104003 32352 solver.cpp:244]     Train net output #1: loss = 1.30121 (* 1 = 1.30121 loss)\nI0821 06:51:27.208003 32352 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0821 06:53:45.762316 32352 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 06:55:06.431396 32352 solver.cpp:404]     Test net output #0: accuracy = 0.58284\nI0821 06:55:06.431639 32352 solver.cpp:404]     Test net output #1: loss = 1.22607 (* 1 = 1.22607 loss)\nI0821 06:55:07.738960 32352 solver.cpp:228] Iteration 200, loss = 0.986049\nI0821 06:55:07.739012 32352 solver.cpp:244]     Train net output #0: accuracy = 0.624\nI0821 06:55:07.739028 32352 solver.cpp:244]     Train net output #1: loss = 0.986049 (* 1 = 0.986049 loss)\nI0821 06:55:07.849216 32352 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0821 06:57:26.431437 32352 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 06:58:46.990943 32352 solver.cpp:404]     Test net output #0: accuracy = 0.67348\nI0821 06:58:46.991166 32352 solver.cpp:404]     Test net output #1: loss = 0.977046 (* 1 = 0.977046 loss)\nI0821 06:58:48.299221 32352 solver.cpp:228] Iteration 300, loss = 0.781848\nI0821 06:58:48.299273 32352 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0821 06:58:48.299288 32352 solver.cpp:244]     Train net output #1: loss = 0.781848 (* 1 = 0.781848 loss)\nI0821 06:58:48.409917 32352 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0821 07:01:06.983309 32352 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 07:02:27.473403 32352 solver.cpp:404]     Test net output #0: accuracy = 0.67292\nI0821 07:02:27.473670 32352 solver.cpp:404]     Test net output #1: loss = 0.959184 (* 1 = 0.959184 loss)\nI0821 07:02:28.782034 32352 solver.cpp:228] Iteration 400, loss = 0.610043\nI0821 07:02:28.782086 32352 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0821 07:02:28.782104 32352 solver.cpp:244]     Train net output #1: loss = 0.610043 (* 1 = 0.610043 loss)\nI0821 07:02:28.888983 32352 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0821 07:04:47.499904 32352 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 07:06:08.292830 32352 solver.cpp:404]     Test net output #0: accuracy = 0.63896\nI0821 07:06:08.293054 32352 solver.cpp:404]     Test net output #1: loss = 1.27272 (* 1 = 1.27272 loss)\nI0821 07:06:09.600160 32352 solver.cpp:228] Iteration 500, loss = 0.601429\nI0821 07:06:09.600214 32352 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0821 07:06:09.600229 32352 solver.cpp:244]     Train net output #1: loss = 0.601429 (* 1 = 0.601429 loss)\nI0821 07:06:09.707932 32352 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0821 07:08:28.277667 32352 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 07:09:48.977967 32352 solver.cpp:404]     Test net output #0: accuracy = 0.7462\nI0821 07:09:48.978185 32352 solver.cpp:404]     Test net output #1: loss = 0.809919 (* 1 = 0.809919 loss)\nI0821 07:09:50.285548 32352 solver.cpp:228] Iteration 600, loss = 0.476328\nI0821 07:09:50.285599 32352 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 07:09:50.285614 32352 solver.cpp:244]     Train net output #1: loss = 0.476328 (* 1 = 0.476328 loss)\nI0821 07:09:50.396095 32352 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0821 07:12:08.879894 32352 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 07:13:29.461550 32352 solver.cpp:404]     Test net output #0: accuracy = 0.74228\nI0821 07:13:29.461827 32352 solver.cpp:404]     Test net output #1: loss = 0.864936 (* 1 = 0.864936 loss)\nI0821 07:13:30.769749 32352 solver.cpp:228] Iteration 700, loss = 0.391851\nI0821 07:13:30.769805 32352 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 07:13:30.769822 32352 solver.cpp:244]     Train net output #1: loss = 0.391851 (* 1 = 0.391851 loss)\nI0821 07:13:30.873648 32352 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0821 07:15:49.342386 32352 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 07:17:09.763588 32352 solver.cpp:404]     Test net output #0: accuracy = 0.75524\nI0821 07:17:09.763805 32352 solver.cpp:404]     Test net output #1: loss = 0.825677 (* 1 = 0.825677 loss)\nI0821 07:17:11.071130 32352 solver.cpp:228] Iteration 800, loss = 0.303571\nI0821 07:17:11.071182 32352 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:17:11.071198 32352 solver.cpp:244]     Train net output #1: loss = 0.303571 (* 1 = 0.303571 loss)\nI0821 07:17:11.182240 32352 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0821 07:19:29.705396 32352 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 07:20:50.329265 32352 solver.cpp:404]     Test net output #0: accuracy = 0.77712\nI0821 07:20:50.329538 32352 solver.cpp:404]     Test net output #1: loss = 0.723441 (* 1 = 0.723441 loss)\nI0821 07:20:51.637846 32352 solver.cpp:228] Iteration 900, loss = 0.365313\nI0821 07:20:51.637907 32352 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 07:20:51.637925 32352 solver.cpp:244]     Train net output #1: loss = 0.365313 (* 1 = 0.365313 loss)\nI0821 07:20:51.740890 32352 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0821 07:23:10.042235 32352 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 07:24:30.926615 32352 solver.cpp:404]     Test net output #0: accuracy = 0.67788\nI0821 07:24:30.926887 32352 solver.cpp:404]     Test net output #1: loss = 1.20716 (* 1 = 1.20716 loss)\nI0821 07:24:32.235170 32352 solver.cpp:228] Iteration 1000, loss = 0.363236\nI0821 07:24:32.235227 32352 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 07:24:32.235245 32352 solver.cpp:244]     Train net output #1: loss = 0.363236 (* 1 = 0.363236 loss)\nI0821 07:24:32.343443 32352 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0821 07:26:50.721719 32352 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 07:28:11.594272 32352 solver.cpp:404]     Test net output #0: accuracy = 0.7508\nI0821 07:28:11.594537 32352 solver.cpp:404]     Test net output #1: loss = 0.833577 (* 1 = 0.833577 loss)\nI0821 07:28:12.902460 32352 solver.cpp:228] Iteration 1100, loss = 0.344344\nI0821 07:28:12.902509 32352 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 07:28:12.902525 32352 solver.cpp:244]     Train net output #1: loss = 0.344344 (* 1 = 0.344344 loss)\nI0821 07:28:13.007614 32352 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0821 07:30:31.378373 32352 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 07:31:52.263468 32352 solver.cpp:404]     Test net output #0: accuracy = 0.71936\nI0821 07:31:52.263763 32352 solver.cpp:404]     Test net output #1: loss = 1.03203 (* 1 = 1.03203 loss)\nI0821 07:31:53.572003 32352 solver.cpp:228] Iteration 1200, loss = 0.230244\nI0821 07:31:53.572054 32352 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 07:31:53.572070 32352 solver.cpp:244]     Train net output #1: loss = 0.230244 (* 1 = 0.230244 loss)\nI0821 07:31:53.673286 32352 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0821 07:34:11.973649 32352 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 07:35:32.834504 32352 solver.cpp:404]     Test net output #0: accuracy = 0.7084\nI0821 07:35:32.834794 32352 solver.cpp:404]     Test net output #1: loss = 1.08225 (* 1 = 1.08225 loss)\nI0821 07:35:34.142709 32352 solver.cpp:228] Iteration 1300, loss = 0.233472\nI0821 07:35:34.142760 32352 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 07:35:34.142776 32352 solver.cpp:244]     Train net output #1: loss = 0.233472 (* 1 = 0.233472 loss)\nI0821 07:35:34.247277 32352 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0821 07:37:52.555433 32352 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 07:39:13.426837 32352 solver.cpp:404]     Test net output #0: accuracy = 0.74852\nI0821 07:39:13.427134 32352 solver.cpp:404]     Test net output #1: loss = 0.949977 (* 1 = 0.949977 loss)\nI0821 07:39:14.734771 32352 solver.cpp:228] Iteration 1400, loss = 0.199403\nI0821 07:39:14.734815 32352 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 07:39:14.734832 32352 solver.cpp:244]     Train net output #1: loss = 0.199403 (* 1 = 0.199403 loss)\nI0821 07:39:14.837860 32352 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0821 07:41:32.990713 32352 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 07:42:53.896847 32352 solver.cpp:404]     Test net output #0: accuracy = 0.75576\nI0821 07:42:53.897147 32352 solver.cpp:404]     Test net output #1: loss = 0.881173 (* 1 = 0.881173 loss)\nI0821 07:42:55.204799 32352 solver.cpp:228] Iteration 1500, loss = 0.173821\nI0821 07:42:55.204859 32352 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 07:42:55.204877 32352 solver.cpp:244]     Train net output #1: loss = 0.173821 (* 1 = 0.173821 loss)\nI0821 07:42:55.307126 32352 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0821 07:45:13.494696 32352 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 07:46:34.404115 32352 solver.cpp:404]     Test net output #0: accuracy = 0.74508\nI0821 07:46:34.404399 32352 solver.cpp:404]     Test net output #1: loss = 0.9323 (* 1 = 0.9323 loss)\nI0821 07:46:35.711822 32352 solver.cpp:228] Iteration 1600, loss = 0.260076\nI0821 07:46:35.711874 32352 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 07:46:35.711894 32352 solver.cpp:244]     Train net output #1: loss = 0.260076 (* 1 = 0.260076 loss)\nI0821 07:46:35.817728 32352 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0821 07:48:54.095157 32352 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 07:50:15.006204 32352 solver.cpp:404]     Test net output #0: accuracy = 0.76408\nI0821 07:50:15.006479 32352 solver.cpp:404]     Test net output #1: loss = 0.847204 (* 1 = 0.847204 loss)\nI0821 07:50:16.314074 32352 solver.cpp:228] Iteration 1700, loss = 0.237076\nI0821 07:50:16.314124 32352 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 07:50:16.314141 32352 solver.cpp:244]     Train net output #1: loss = 0.237076 (* 1 = 0.237076 loss)\nI0821 07:50:16.420693 32352 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0821 07:52:34.695525 32352 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 07:53:55.594811 32352 solver.cpp:404]     Test net output #0: accuracy = 0.79168\nI0821 07:53:55.595124 32352 solver.cpp:404]     Test net output #1: loss = 0.731166 (* 1 = 0.731166 loss)\nI0821 07:53:56.903030 32352 solver.cpp:228] Iteration 1800, loss = 0.142112\nI0821 07:53:56.903075 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 07:53:56.903091 32352 solver.cpp:244]     Train net output #1: loss = 0.142112 (* 1 = 0.142112 loss)\nI0821 07:53:57.008849 32352 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0821 07:56:15.192854 32352 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 07:57:36.085530 32352 solver.cpp:404]     Test net output #0: accuracy = 0.77248\nI0821 07:57:36.085855 32352 solver.cpp:404]     Test net output #1: loss = 0.8059 (* 1 = 0.8059 loss)\nI0821 07:57:37.391288 32352 solver.cpp:228] Iteration 1900, loss = 0.164146\nI0821 07:57:37.391335 32352 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 07:57:37.391350 32352 solver.cpp:244]     Train net output #1: loss = 0.164146 (* 1 = 0.164146 loss)\nI0821 07:57:37.495488 32352 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0821 07:59:55.673740 32352 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 08:01:16.503162 32352 solver.cpp:404]     Test net output #0: accuracy = 0.73256\nI0821 08:01:16.503437 32352 solver.cpp:404]     Test net output #1: loss = 1.0106 (* 1 = 1.0106 loss)\nI0821 08:01:17.808372 32352 solver.cpp:228] Iteration 2000, loss = 0.18459\nI0821 08:01:17.808423 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:01:17.808440 32352 solver.cpp:244]     Train net output #1: loss = 0.18459 (* 1 = 0.18459 loss)\nI0821 08:01:17.913723 32352 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0821 08:03:36.126302 32352 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 08:04:56.970970 32352 solver.cpp:404]     Test net output #0: accuracy = 0.73896\nI0821 08:04:56.971276 32352 solver.cpp:404]     Test net output #1: loss = 1.1846 (* 1 = 1.1846 loss)\nI0821 08:04:58.276011 32352 solver.cpp:228] Iteration 2100, loss = 0.220415\nI0821 08:04:58.276059 32352 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 08:04:58.276077 32352 solver.cpp:244]     Train net output #1: loss = 0.220415 (* 1 = 0.220415 loss)\nI0821 08:04:58.388075 32352 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0821 08:07:16.671211 32352 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 08:08:37.476850 32352 solver.cpp:404]     Test net output #0: accuracy = 0.7814\nI0821 08:08:37.477141 32352 solver.cpp:404]     Test net output #1: loss = 0.83991 (* 1 = 0.83991 loss)\nI0821 08:08:38.781895 32352 solver.cpp:228] Iteration 2200, loss = 0.155017\nI0821 08:08:38.781937 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:08:38.781954 32352 solver.cpp:244]     Train net output #1: loss = 0.155017 (* 1 = 0.155017 loss)\nI0821 08:08:38.890848 32352 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0821 08:10:57.284174 32352 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 08:12:18.104192 32352 solver.cpp:404]     Test net output #0: accuracy = 0.79584\nI0821 08:12:18.104465 32352 solver.cpp:404]     Test net output #1: loss = 0.729062 (* 1 = 0.729062 loss)\nI0821 08:12:19.408887 32352 solver.cpp:228] Iteration 2300, loss = 0.165101\nI0821 08:12:19.408931 32352 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:12:19.408947 32352 solver.cpp:244]     Train net output #1: loss = 0.165102 (* 1 = 0.165102 loss)\nI0821 08:12:19.515266 32352 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0821 08:14:37.828194 32352 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 08:15:58.649932 32352 solver.cpp:404]     Test net output #0: accuracy = 0.7888\nI0821 08:15:58.650220 32352 solver.cpp:404]     Test net output #1: loss = 0.774134 (* 1 = 0.774134 loss)\nI0821 08:15:59.954689 32352 solver.cpp:228] Iteration 2400, loss = 0.240816\nI0821 08:15:59.954732 32352 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 08:15:59.954749 32352 solver.cpp:244]     Train net output #1: loss = 0.240816 (* 1 = 0.240816 loss)\nI0821 08:16:00.065457 32352 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0821 08:18:18.285928 32352 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 08:19:39.053697 32352 solver.cpp:404]     Test net output #0: accuracy = 0.74276\nI0821 08:19:39.053961 32352 solver.cpp:404]     Test net output #1: loss = 1.04963 (* 1 = 1.04963 loss)\nI0821 08:19:40.358844 32352 solver.cpp:228] Iteration 2500, loss = 0.149995\nI0821 08:19:40.358893 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 08:19:40.358909 32352 solver.cpp:244]     Train net output #1: loss = 0.149995 (* 1 = 0.149995 loss)\nI0821 08:19:40.466574 32352 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0821 08:21:58.684276 32352 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 08:23:19.478057 32352 solver.cpp:404]     Test net output #0: accuracy = 0.68016\nI0821 08:23:19.478348 32352 solver.cpp:404]     Test net output #1: loss = 1.41097 (* 1 = 1.41097 loss)\nI0821 08:23:20.783090 32352 solver.cpp:228] Iteration 2600, loss = 0.273888\nI0821 08:23:20.783144 32352 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 08:23:20.783162 32352 solver.cpp:244]     Train net output #1: loss = 0.273888 (* 1 = 0.273888 loss)\nI0821 08:23:20.891760 32352 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0821 08:25:39.122862 32352 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 08:26:59.913573 32352 solver.cpp:404]     Test net output #0: accuracy = 0.80412\nI0821 08:26:59.913838 32352 solver.cpp:404]     Test net output #1: loss = 0.653133 (* 1 = 0.653133 loss)\nI0821 08:27:01.218564 32352 solver.cpp:228] Iteration 2700, loss = 0.185146\nI0821 08:27:01.218621 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 08:27:01.218639 32352 solver.cpp:244]     Train net output #1: loss = 0.185146 (* 1 = 0.185146 loss)\nI0821 08:27:01.328194 32352 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0821 08:29:19.501701 32352 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 08:30:40.333290 32352 solver.cpp:404]     Test net output #0: accuracy = 0.76312\nI0821 08:30:40.333523 32352 solver.cpp:404]     Test net output #1: loss = 0.85309 (* 1 = 0.85309 loss)\nI0821 08:30:41.639356 32352 solver.cpp:228] Iteration 2800, loss = 0.182727\nI0821 08:30:41.639413 32352 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 08:30:41.639431 32352 solver.cpp:244]     Train net output #1: loss = 0.182727 (* 1 = 0.182727 loss)\nI0821 08:30:41.748297 32352 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0821 08:32:59.888206 32352 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 08:34:20.209962 32352 solver.cpp:404]     Test net output #0: accuracy = 0.73328\nI0821 08:34:20.210211 32352 solver.cpp:404]     Test net output #1: loss = 1.04355 (* 1 = 1.04355 loss)\nI0821 08:34:21.514750 32352 solver.cpp:228] Iteration 2900, loss = 0.121994\nI0821 08:34:21.514811 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:34:21.514833 32352 solver.cpp:244]     Train net output #1: loss = 0.121994 (* 1 = 0.121994 loss)\nI0821 08:34:21.621093 32352 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0821 08:36:39.921445 32352 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 08:38:00.261499 32352 solver.cpp:404]     Test net output #0: accuracy = 0.74008\nI0821 08:38:00.261744 32352 solver.cpp:404]     Test net output #1: loss = 1.00931 (* 1 = 1.00931 loss)\nI0821 08:38:01.567961 32352 solver.cpp:228] Iteration 3000, loss = 0.24875\nI0821 08:38:01.568017 32352 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 08:38:01.568033 32352 solver.cpp:244]     Train net output #1: loss = 0.24875 (* 1 = 0.24875 loss)\nI0821 08:38:01.677171 32352 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0821 08:40:19.825168 32352 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 08:41:40.275224 32352 solver.cpp:404]     Test net output #0: accuracy = 0.78272\nI0821 08:41:40.275482 32352 solver.cpp:404]     Test net output #1: loss = 0.843761 (* 1 = 0.843761 loss)\nI0821 08:41:41.581833 32352 solver.cpp:228] Iteration 3100, loss = 0.190914\nI0821 08:41:41.581888 32352 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:41:41.581905 32352 solver.cpp:244]     Train net output #1: loss = 0.190914 (* 1 = 0.190914 loss)\nI0821 08:41:41.692836 32352 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0821 08:43:59.955154 32352 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 08:45:20.758934 32352 solver.cpp:404]     Test net output #0: accuracy = 0.79128\nI0821 08:45:20.759220 32352 solver.cpp:404]     Test net output #1: loss = 0.807646 (* 1 = 0.807646 loss)\nI0821 08:45:22.065497 32352 solver.cpp:228] Iteration 3200, loss = 0.168543\nI0821 08:45:22.065551 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 08:45:22.065568 32352 solver.cpp:244]     Train net output #1: loss = 0.168543 (* 1 = 0.168543 loss)\nI0821 08:45:22.176055 32352 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0821 08:47:40.401551 32352 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 08:49:01.245677 32352 solver.cpp:404]     Test net output #0: accuracy = 0.78848\nI0821 08:49:01.245987 32352 solver.cpp:404]     Test net output #1: loss = 0.720161 (* 1 = 0.720161 loss)\nI0821 08:49:02.550614 32352 solver.cpp:228] Iteration 3300, loss = 0.185721\nI0821 08:49:02.550674 32352 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:49:02.550693 32352 solver.cpp:244]     Train net output #1: loss = 0.185721 (* 1 = 0.185721 loss)\nI0821 08:49:02.654803 32352 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0821 08:51:20.837082 32352 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 08:52:41.686208 32352 solver.cpp:404]     Test net output #0: accuracy = 0.7092\nI0821 08:52:41.686509 32352 solver.cpp:404]     Test net output #1: loss = 1.44309 (* 1 = 1.44309 loss)\nI0821 08:52:42.991250 32352 solver.cpp:228] Iteration 3400, loss = 0.193936\nI0821 08:52:42.991304 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 08:52:42.991322 32352 solver.cpp:244]     Train net output #1: loss = 0.193936 (* 1 = 0.193936 loss)\nI0821 08:52:43.096384 32352 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0821 08:55:01.427309 32352 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 08:56:22.268343 32352 solver.cpp:404]     Test net output #0: accuracy = 0.78816\nI0821 08:56:22.268656 32352 solver.cpp:404]     Test net output #1: loss = 0.756056 (* 1 = 0.756056 loss)\nI0821 08:56:23.573207 32352 solver.cpp:228] Iteration 3500, loss = 0.261578\nI0821 08:56:23.573258 32352 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 08:56:23.573276 32352 solver.cpp:244]     Train net output #1: loss = 0.261578 (* 1 = 0.261578 loss)\nI0821 08:56:23.682482 32352 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0821 08:58:41.917356 32352 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 09:00:02.764493 32352 solver.cpp:404]     Test net output #0: accuracy = 0.80716\nI0821 09:00:02.764791 32352 solver.cpp:404]     Test net output #1: loss = 0.700004 (* 1 = 0.700004 loss)\nI0821 09:00:04.069932 32352 solver.cpp:228] Iteration 3600, loss = 0.243048\nI0821 09:00:04.069988 32352 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 09:00:04.070004 32352 solver.cpp:244]     Train net output #1: loss = 0.243048 (* 1 = 0.243048 loss)\nI0821 09:00:04.181625 32352 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0821 09:02:22.382213 32352 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 09:03:43.226614 32352 solver.cpp:404]     Test net output #0: accuracy = 0.77404\nI0821 09:03:43.226927 32352 solver.cpp:404]     Test net output #1: loss = 0.811211 (* 1 = 0.811211 loss)\nI0821 09:03:44.531756 32352 solver.cpp:228] Iteration 3700, loss = 0.18265\nI0821 09:03:44.531810 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 09:03:44.531831 32352 solver.cpp:244]     Train net output #1: loss = 0.18265 (* 1 = 0.18265 loss)\nI0821 09:03:44.640346 32352 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0821 09:06:02.869421 32352 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 09:07:23.709235 32352 solver.cpp:404]     Test net output #0: accuracy = 0.79436\nI0821 09:07:23.709522 32352 solver.cpp:404]     Test net output #1: loss = 0.72389 (* 1 = 0.72389 loss)\nI0821 09:07:25.014406 32352 solver.cpp:228] Iteration 3800, loss = 0.157343\nI0821 09:07:25.014466 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:07:25.014483 32352 solver.cpp:244]     Train net output #1: loss = 0.157343 (* 1 = 0.157343 loss)\nI0821 09:07:25.120002 32352 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0821 09:09:43.280712 32352 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 09:11:04.116775 32352 solver.cpp:404]     Test net output #0: accuracy = 0.7372\nI0821 09:11:04.117072 32352 solver.cpp:404]     Test net output #1: loss = 1.14375 (* 1 = 1.14375 loss)\nI0821 09:11:05.421710 32352 solver.cpp:228] Iteration 3900, loss = 0.141639\nI0821 09:11:05.421763 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 09:11:05.421780 32352 solver.cpp:244]     Train net output #1: loss = 0.141639 (* 1 = 0.141639 loss)\nI0821 09:11:05.529237 32352 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0821 09:13:23.768007 32352 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 09:14:44.609973 32352 solver.cpp:404]     Test net output #0: accuracy = 0.76512\nI0821 09:14:44.610282 32352 solver.cpp:404]     Test net output #1: loss = 0.834174 (* 1 = 0.834174 loss)\nI0821 09:14:45.915082 32352 solver.cpp:228] Iteration 4000, loss = 0.32665\nI0821 09:14:45.915143 32352 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 09:14:45.915160 32352 solver.cpp:244]     Train net output #1: loss = 0.32665 (* 1 = 0.32665 loss)\nI0821 09:14:46.019729 32352 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0821 09:17:04.299597 32352 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 09:18:25.124107 32352 solver.cpp:404]     Test net output #0: accuracy = 0.68152\nI0821 09:18:25.124421 32352 solver.cpp:404]     Test net output #1: loss = 1.29523 (* 1 = 1.29523 loss)\nI0821 09:18:26.429203 32352 solver.cpp:228] Iteration 4100, loss = 0.352177\nI0821 09:18:26.429257 32352 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 09:18:26.429275 32352 solver.cpp:244]     Train net output #1: loss = 0.352177 (* 1 = 0.352177 loss)\nI0821 09:18:26.537240 32352 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0821 09:20:44.897006 32352 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 09:22:05.727273 32352 solver.cpp:404]     Test net output #0: accuracy = 0.78912\nI0821 09:22:05.727563 32352 solver.cpp:404]     Test net output #1: loss = 0.754225 (* 1 = 0.754225 loss)\nI0821 09:22:07.032006 32352 solver.cpp:228] Iteration 4200, loss = 0.160579\nI0821 09:22:07.032058 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:22:07.032075 32352 solver.cpp:244]     Train net output #1: loss = 0.160579 (* 1 = 0.160579 loss)\nI0821 09:22:07.139506 32352 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0821 09:24:25.297511 32352 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 09:25:46.116797 32352 solver.cpp:404]     Test net output #0: accuracy = 0.71952\nI0821 09:25:46.117089 32352 solver.cpp:404]     Test net output #1: loss = 1.01821 (* 1 = 1.01821 loss)\nI0821 09:25:47.421800 32352 solver.cpp:228] Iteration 4300, loss = 0.212382\nI0821 09:25:47.421857 32352 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 09:25:47.421875 32352 solver.cpp:244]     Train net output #1: loss = 0.212382 (* 1 = 0.212382 loss)\nI0821 09:25:47.531417 32352 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0821 09:28:05.674921 32352 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 09:29:26.510814 32352 solver.cpp:404]     Test net output #0: accuracy = 0.64432\nI0821 09:29:26.511123 32352 solver.cpp:404]     Test net output #1: loss = 1.84488 (* 1 = 1.84488 loss)\nI0821 09:29:27.816135 32352 solver.cpp:228] Iteration 4400, loss = 0.141872\nI0821 09:29:27.816177 32352 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 09:29:27.816193 32352 solver.cpp:244]     Train net output #1: loss = 0.141872 (* 1 = 0.141872 loss)\nI0821 09:29:27.929214 32352 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0821 09:31:45.893918 32352 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 09:33:06.729619 32352 solver.cpp:404]     Test net output #0: accuracy = 0.78412\nI0821 09:33:06.729898 32352 solver.cpp:404]     Test net output #1: loss = 0.831221 (* 1 = 0.831221 loss)\nI0821 09:33:08.034283 32352 solver.cpp:228] Iteration 4500, loss = 0.215094\nI0821 09:33:08.034338 32352 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 09:33:08.034355 32352 solver.cpp:244]     Train net output #1: loss = 0.215094 (* 1 = 0.215094 loss)\nI0821 09:33:08.138309 32352 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0821 09:35:26.084957 32352 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 09:36:46.918718 32352 solver.cpp:404]     Test net output #0: accuracy = 0.59576\nI0821 09:36:46.919034 32352 solver.cpp:404]     Test net output #1: loss = 2.0797 (* 1 = 2.0797 loss)\nI0821 09:36:48.224019 32352 solver.cpp:228] Iteration 4600, loss = 0.26053\nI0821 09:36:48.224062 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 09:36:48.224078 32352 solver.cpp:244]     Train net output #1: loss = 0.26053 (* 1 = 0.26053 loss)\nI0821 09:36:48.330235 32352 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0821 09:39:06.221491 32352 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 09:40:27.096860 32352 solver.cpp:404]     Test net output #0: accuracy = 0.74988\nI0821 09:40:27.097152 32352 solver.cpp:404]     Test net output #1: loss = 1.05453 (* 1 = 1.05453 loss)\nI0821 09:40:28.402284 32352 solver.cpp:228] Iteration 4700, loss = 0.148759\nI0821 09:40:28.402326 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 09:40:28.402343 32352 solver.cpp:244]     Train net output #1: loss = 0.148759 (* 1 = 0.148759 loss)\nI0821 09:40:28.503690 32352 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0821 09:42:46.358675 32352 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 09:44:07.228045 32352 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI0821 09:44:07.228349 32352 solver.cpp:404]     Test net output #1: loss = 0.87991 (* 1 = 0.87991 loss)\nI0821 09:44:08.533107 32352 solver.cpp:228] Iteration 4800, loss = 0.204711\nI0821 09:44:08.533159 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 09:44:08.533176 32352 solver.cpp:244]     Train net output #1: loss = 0.204711 (* 1 = 0.204711 loss)\nI0821 09:44:08.634564 32352 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0821 09:46:26.581821 32352 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 09:47:47.443305 32352 solver.cpp:404]     Test net output #0: accuracy = 0.72284\nI0821 09:47:47.443614 32352 solver.cpp:404]     Test net output #1: loss = 1.07328 (* 1 = 1.07328 loss)\nI0821 09:47:48.748508 32352 solver.cpp:228] Iteration 4900, loss = 0.291741\nI0821 09:47:48.748549 32352 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 09:47:48.748565 32352 solver.cpp:244]     Train net output #1: loss = 0.291741 (* 1 = 0.291741 loss)\nI0821 09:47:48.854640 32352 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0821 09:50:06.687824 32352 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 09:51:27.551592 32352 solver.cpp:404]     Test net output #0: accuracy = 0.819\nI0821 09:51:27.551883 32352 solver.cpp:404]     Test net output #1: loss = 0.61786 (* 1 = 0.61786 loss)\nI0821 09:51:28.856699 32352 solver.cpp:228] Iteration 5000, loss = 0.139746\nI0821 09:51:28.856739 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:51:28.856755 32352 solver.cpp:244]     Train net output #1: loss = 0.139746 (* 1 = 0.139746 loss)\nI0821 09:51:28.959282 32352 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0821 09:53:46.881839 32352 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 09:55:07.741600 32352 solver.cpp:404]     Test net output #0: accuracy = 0.72124\nI0821 09:55:07.741914 32352 solver.cpp:404]     Test net output #1: loss = 1.12131 (* 1 = 1.12131 loss)\nI0821 09:55:09.046479 32352 solver.cpp:228] Iteration 5100, loss = 0.249126\nI0821 09:55:09.046521 32352 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 09:55:09.046536 32352 solver.cpp:244]     Train net output #1: loss = 0.249126 (* 1 = 0.249126 loss)\nI0821 09:55:09.151904 32352 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0821 09:57:27.069697 32352 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 09:58:47.969172 32352 solver.cpp:404]     Test net output #0: accuracy = 0.69192\nI0821 09:58:47.969485 32352 solver.cpp:404]     Test net output #1: loss = 1.39049 (* 1 = 1.39049 loss)\nI0821 09:58:49.274278 32352 solver.cpp:228] Iteration 5200, loss = 0.168101\nI0821 09:58:49.274322 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:58:49.274336 32352 solver.cpp:244]     Train net output #1: loss = 0.168101 (* 1 = 0.168101 loss)\nI0821 09:58:49.381830 32352 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0821 10:01:07.108328 32352 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 10:02:27.993479 32352 solver.cpp:404]     Test net output #0: accuracy = 0.75392\nI0821 10:02:27.993821 32352 solver.cpp:404]     Test net output #1: loss = 0.817973 (* 1 = 0.817973 loss)\nI0821 10:02:29.298708 32352 solver.cpp:228] Iteration 5300, loss = 0.156415\nI0821 10:02:29.298765 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:02:29.298782 32352 solver.cpp:244]     Train net output #1: loss = 0.156415 (* 1 = 0.156415 loss)\nI0821 10:02:29.404136 32352 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0821 10:04:47.214308 32352 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 10:06:08.112882 32352 solver.cpp:404]     Test net output #0: accuracy = 0.76564\nI0821 10:06:08.113168 32352 solver.cpp:404]     Test net output #1: loss = 0.882834 (* 1 = 0.882834 loss)\nI0821 10:06:09.417868 32352 solver.cpp:228] Iteration 5400, loss = 0.203302\nI0821 10:06:09.417922 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:06:09.417940 32352 solver.cpp:244]     Train net output #1: loss = 0.203303 (* 1 = 0.203303 loss)\nI0821 10:06:09.523246 32352 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0821 10:08:27.337678 32352 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 10:09:48.224695 32352 solver.cpp:404]     Test net output #0: accuracy = 0.79988\nI0821 10:09:48.225010 32352 solver.cpp:404]     Test net output #1: loss = 0.692093 (* 1 = 0.692093 loss)\nI0821 10:09:49.529702 32352 solver.cpp:228] Iteration 5500, loss = 0.14102\nI0821 10:09:49.529757 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:09:49.529772 32352 solver.cpp:244]     Train net output #1: loss = 0.14102 (* 1 = 0.14102 loss)\nI0821 10:09:49.635782 32352 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0821 10:12:07.489276 32352 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 10:13:28.383821 32352 solver.cpp:404]     Test net output #0: accuracy = 0.81768\nI0821 10:13:28.384138 32352 solver.cpp:404]     Test net output #1: loss = 0.674877 (* 1 = 0.674877 loss)\nI0821 10:13:29.689079 32352 solver.cpp:228] Iteration 5600, loss = 0.200282\nI0821 10:13:29.689128 32352 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 10:13:29.689143 32352 solver.cpp:244]     Train net output #1: loss = 0.200282 (* 1 = 0.200282 loss)\nI0821 10:13:29.795064 32352 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0821 10:15:47.625279 32352 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 10:17:08.521224 32352 solver.cpp:404]     Test net output #0: accuracy = 0.73288\nI0821 10:17:08.521510 32352 solver.cpp:404]     Test net output #1: loss = 1.02787 (* 1 = 1.02787 loss)\nI0821 10:17:09.826735 32352 solver.cpp:228] Iteration 5700, loss = 0.207047\nI0821 10:17:09.826786 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:17:09.826800 32352 solver.cpp:244]     Train net output #1: loss = 0.207047 (* 1 = 0.207047 loss)\nI0821 10:17:09.931917 32352 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0821 10:19:27.758885 32352 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 10:20:48.651129 32352 solver.cpp:404]     Test net output #0: accuracy = 0.75228\nI0821 10:20:48.651409 32352 solver.cpp:404]     Test net output #1: loss = 1.07093 (* 1 = 1.07093 loss)\nI0821 10:20:49.956445 32352 solver.cpp:228] Iteration 5800, loss = 0.2479\nI0821 10:20:49.956488 32352 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:20:49.956504 32352 solver.cpp:244]     Train net output #1: loss = 0.2479 (* 1 = 0.2479 loss)\nI0821 10:20:50.057178 32352 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0821 10:23:07.819010 32352 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 10:24:28.710145 32352 solver.cpp:404]     Test net output #0: accuracy = 0.68724\nI0821 10:24:28.710430 32352 solver.cpp:404]     Test net output #1: loss = 1.30307 (* 1 = 1.30307 loss)\nI0821 10:24:30.015733 32352 solver.cpp:228] Iteration 5900, loss = 0.225887\nI0821 10:24:30.015774 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:24:30.015789 32352 solver.cpp:244]     Train net output #1: loss = 0.225887 (* 1 = 0.225887 loss)\nI0821 10:24:30.118892 32352 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0821 10:26:47.914357 32352 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 10:28:08.816125 32352 solver.cpp:404]     Test net output #0: accuracy = 0.76276\nI0821 10:28:08.816416 32352 solver.cpp:404]     Test net output #1: loss = 0.996231 (* 1 = 0.996231 loss)\nI0821 10:28:10.122694 32352 solver.cpp:228] Iteration 6000, loss = 0.117012\nI0821 10:28:10.122736 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:28:10.122752 32352 solver.cpp:244]     Train net output #1: loss = 0.117012 (* 1 = 0.117012 loss)\nI0821 10:28:10.223479 32352 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0821 10:30:28.059316 32352 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 10:31:48.937058 32352 solver.cpp:404]     Test net output #0: accuracy = 0.75692\nI0821 10:31:48.937428 32352 solver.cpp:404]     Test net output #1: loss = 0.96962 (* 1 = 0.96962 loss)\nI0821 10:31:50.242159 32352 solver.cpp:228] Iteration 6100, loss = 0.164981\nI0821 10:31:50.242202 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:31:50.242218 32352 solver.cpp:244]     Train net output #1: loss = 0.164981 (* 1 = 0.164981 loss)\nI0821 10:31:50.347062 32352 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0821 10:34:08.197222 32352 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 10:35:29.065119 32352 solver.cpp:404]     Test net output #0: accuracy = 0.76264\nI0821 10:35:29.065407 32352 solver.cpp:404]     Test net output #1: loss = 0.980644 (* 1 = 0.980644 loss)\nI0821 10:35:30.371562 32352 solver.cpp:228] Iteration 6200, loss = 0.138693\nI0821 10:35:30.371604 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:35:30.371619 32352 solver.cpp:244]     Train net output #1: loss = 0.138693 (* 1 = 0.138693 loss)\nI0821 10:35:30.476884 32352 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0821 10:37:48.299616 32352 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 10:39:09.179035 32352 solver.cpp:404]     Test net output #0: accuracy = 0.82968\nI0821 10:39:09.179321 32352 solver.cpp:404]     Test net output #1: loss = 0.61203 (* 1 = 0.61203 loss)\nI0821 10:39:10.484328 32352 solver.cpp:228] Iteration 6300, loss = 0.135428\nI0821 10:39:10.484371 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:39:10.484387 32352 solver.cpp:244]     Train net output #1: loss = 0.135428 (* 1 = 0.135428 loss)\nI0821 10:39:10.587555 32352 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0821 10:41:28.449108 32352 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 10:42:49.300917 32352 solver.cpp:404]     Test net output #0: accuracy = 0.69868\nI0821 10:42:49.301221 32352 solver.cpp:404]     Test net output #1: loss = 1.44962 (* 1 = 1.44962 loss)\nI0821 10:42:50.607120 32352 solver.cpp:228] Iteration 6400, loss = 0.105634\nI0821 10:42:50.607178 32352 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:42:50.607197 32352 solver.cpp:244]     Train net output #1: loss = 0.105634 (* 1 = 0.105634 loss)\nI0821 10:42:50.707840 32352 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0821 10:45:08.660675 32352 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 10:46:29.525487 32352 solver.cpp:404]     Test net output #0: accuracy = 0.82188\nI0821 10:46:29.525799 32352 solver.cpp:404]     Test net output #1: loss = 0.751451 (* 1 = 0.751451 loss)\nI0821 10:46:30.830989 32352 solver.cpp:228] Iteration 6500, loss = 0.112728\nI0821 10:46:30.831032 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:46:30.831048 32352 solver.cpp:244]     Train net output #1: loss = 0.112728 (* 1 = 0.112728 loss)\nI0821 10:46:30.937778 32352 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0821 10:48:48.960494 32352 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 10:50:09.822990 32352 solver.cpp:404]     Test net output #0: accuracy = 0.7976\nI0821 10:50:09.823287 32352 solver.cpp:404]     Test net output #1: loss = 0.763296 (* 1 = 0.763296 loss)\nI0821 10:50:11.127894 32352 solver.cpp:228] Iteration 6600, loss = 0.159936\nI0821 10:50:11.127936 32352 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:50:11.127952 32352 solver.cpp:244]     Train net output #1: loss = 0.159936 (* 1 = 0.159936 loss)\nI0821 10:50:11.231295 32352 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0821 10:52:29.194577 32352 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 10:53:50.053740 32352 solver.cpp:404]     Test net output #0: accuracy = 0.85092\nI0821 10:53:50.054064 32352 solver.cpp:404]     Test net output #1: loss = 0.541837 (* 1 = 0.541837 loss)\nI0821 10:53:51.358861 32352 solver.cpp:228] Iteration 6700, loss = 0.110259\nI0821 10:53:51.358906 32352 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:53:51.358922 32352 solver.cpp:244]     Train net output #1: loss = 0.110259 (* 1 = 0.110259 loss)\nI0821 10:53:51.467152 32352 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0821 10:56:09.257647 32352 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 10:57:30.115478 32352 solver.cpp:404]     Test net output #0: accuracy = 0.79012\nI0821 10:57:30.115792 32352 solver.cpp:404]     Test net output #1: loss = 0.8637 (* 1 = 0.8637 loss)\nI0821 10:57:31.420516 32352 solver.cpp:228] Iteration 6800, loss = 0.135533\nI0821 10:57:31.420577 32352 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 10:57:31.420594 32352 solver.cpp:244]     Train net output #1: loss = 0.135533 (* 1 = 0.135533 loss)\nI0821 10:57:31.523982 32352 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0821 10:59:49.330552 32352 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 11:01:10.193586 32352 solver.cpp:404]     Test net output #0: accuracy = 0.784\nI0821 11:01:10.193872 32352 solver.cpp:404]     Test net output #1: loss = 0.874352 (* 1 = 0.874352 loss)\nI0821 11:01:11.500181 32352 solver.cpp:228] Iteration 6900, loss = 0.188994\nI0821 11:01:11.500243 32352 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 11:01:11.500262 32352 solver.cpp:244]     Train net output #1: loss = 0.188995 (* 1 = 0.188995 loss)\nI0821 11:01:11.604413 32352 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0821 11:03:29.930006 32352 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 11:04:50.767184 32352 solver.cpp:404]     Test net output #0: accuracy = 0.83224\nI0821 11:04:50.767498 32352 solver.cpp:404]     Test net output #1: loss = 0.653722 (* 1 = 0.653722 loss)\nI0821 11:04:52.073457 32352 solver.cpp:228] Iteration 7000, loss = 0.0934968\nI0821 11:04:52.073530 32352 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:04:52.073557 32352 solver.cpp:244]     Train net output #1: loss = 0.0934969 (* 1 = 0.0934969 loss)\nI0821 11:04:52.183320 32352 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0821 11:07:10.558746 32352 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 11:08:31.384274 32352 solver.cpp:404]     Test net output #0: accuracy = 0.78\nI0821 11:08:31.384590 32352 solver.cpp:404]     Test net output #1: loss = 0.974868 (* 1 = 0.974868 loss)\nI0821 11:08:32.689944 32352 solver.cpp:228] Iteration 7100, loss = 0.0939778\nI0821 11:08:32.689988 32352 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:08:32.690003 32352 solver.cpp:244]     Train net output #1: loss = 0.093978 (* 1 = 0.093978 loss)\nI0821 11:08:32.799782 32352 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0821 11:10:51.000612 32352 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 11:12:11.803617 32352 solver.cpp:404]     Test net output #0: accuracy = 0.84524\nI0821 11:12:11.803902 32352 solver.cpp:404]     Test net output #1: loss = 0.581995 (* 1 = 0.581995 loss)\nI0821 11:12:13.109603 32352 solver.cpp:228] Iteration 7200, loss = 0.122259\nI0821 11:12:13.109645 32352 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:12:13.109660 32352 solver.cpp:244]     Train net output #1: loss = 0.122259 (* 1 = 0.122259 loss)\nI0821 11:12:13.218262 32352 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0821 11:14:31.577782 32352 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 11:15:52.398913 32352 solver.cpp:404]     Test net output #0: accuracy = 0.79632\nI0821 11:15:52.399222 32352 solver.cpp:404]     Test net output #1: loss = 0.837389 (* 1 = 0.837389 loss)\nI0821 11:15:53.705230 32352 solver.cpp:228] Iteration 7300, loss = 0.0692883\nI0821 11:15:53.705273 32352 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:15:53.705289 32352 solver.cpp:244]     Train net output #1: loss = 0.0692884 (* 1 = 0.0692884 loss)\nI0821 11:15:53.813581 32352 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0821 11:18:12.109040 32352 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 11:19:32.922853 32352 solver.cpp:404]     Test net output #0: accuracy = 0.76028\nI0821 11:19:32.923171 32352 solver.cpp:404]     Test net output #1: loss = 1.11584 (* 1 = 1.11584 loss)\nI0821 11:19:34.228886 32352 solver.cpp:228] Iteration 7400, loss = 0.100834\nI0821 11:19:34.228947 32352 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:19:34.228965 32352 solver.cpp:244]     Train net output #1: loss = 0.100834 (* 1 = 0.100834 loss)\nI0821 11:19:34.331815 32352 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0821 11:21:52.611299 32352 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 11:23:13.412281 32352 solver.cpp:404]     Test net output #0: accuracy = 0.73012\nI0821 11:23:13.412590 32352 solver.cpp:404]     Test net output #1: loss = 1.24183 (* 1 = 1.24183 loss)\nI0821 11:23:14.718502 32352 solver.cpp:228] Iteration 7500, loss = 0.105659\nI0821 11:23:14.718547 32352 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:23:14.718564 32352 solver.cpp:244]     Train net output #1: loss = 0.105659 (* 1 = 0.105659 loss)\nI0821 11:23:14.826346 32352 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0821 11:25:33.003126 32352 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 11:26:53.818388 32352 solver.cpp:404]     Test net output #0: accuracy = 0.76016\nI0821 11:26:53.818702 32352 solver.cpp:404]     Test net output #1: loss = 0.998132 (* 1 = 0.998132 loss)\nI0821 11:26:55.125213 32352 solver.cpp:228] Iteration 7600, loss = 0.0759835\nI0821 11:26:55.125258 32352 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:26:55.125272 32352 solver.cpp:244]     Train net output #1: loss = 0.0759836 (* 1 = 0.0759836 loss)\nI0821 11:26:55.235152 32352 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0821 11:29:13.719127 32352 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 11:30:34.530436 32352 solver.cpp:404]     Test net output #0: accuracy = 0.84916\nI0821 11:30:34.530728 32352 solver.cpp:404]     Test net output #1: loss = 0.620007 (* 1 = 0.620007 loss)\nI0821 11:30:35.835590 32352 solver.cpp:228] Iteration 7700, loss = 0.098006\nI0821 11:30:35.835635 32352 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:30:35.835651 32352 solver.cpp:244]     Train net output #1: loss = 0.0980062 (* 1 = 0.0980062 loss)\nI0821 11:30:35.947794 32352 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0821 11:32:54.262459 32352 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 11:34:15.076947 32352 solver.cpp:404]     Test net output #0: accuracy = 0.831481\nI0821 11:34:15.077215 32352 solver.cpp:404]     Test net output #1: loss = 0.749039 (* 1 = 0.749039 loss)\nI0821 11:34:16.381664 32352 solver.cpp:228] Iteration 7800, loss = 0.0347325\nI0821 11:34:16.381705 32352 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 11:34:16.381721 32352 solver.cpp:244]     Train net output #1: loss = 0.0347326 (* 1 = 0.0347326 loss)\nI0821 11:34:16.489514 32352 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0821 11:36:34.806377 32352 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 11:37:55.627537 32352 solver.cpp:404]     Test net output #0: accuracy = 0.84444\nI0821 11:37:55.627832 32352 solver.cpp:404]     Test net output #1: loss = 0.612526 (* 1 = 0.612526 loss)\nI0821 11:37:56.932835 32352 solver.cpp:228] Iteration 7900, loss = 0.0718377\nI0821 11:37:56.932901 32352 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:37:56.932920 32352 solver.cpp:244]     Train net output #1: loss = 0.0718378 (* 1 = 0.0718378 loss)\nI0821 11:37:57.042789 32352 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0821 11:40:15.234283 32352 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 11:41:36.057112 32352 solver.cpp:404]     Test net output #0: accuracy = 0.78304\nI0821 11:41:36.057381 32352 solver.cpp:404]     Test net output #1: loss = 1.05558 (* 1 = 1.05558 loss)\nI0821 11:41:37.362556 32352 solver.cpp:228] Iteration 8000, loss = 0.0627784\nI0821 11:41:37.362617 32352 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:41:37.362635 32352 solver.cpp:244]     Train net output #1: loss = 0.0627785 (* 1 = 0.0627785 loss)\nI0821 11:41:37.471623 32352 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0821 11:43:55.742686 32352 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 11:45:16.564607 32352 solver.cpp:404]     Test net output #0: accuracy = 0.80628\nI0821 11:45:16.564923 32352 solver.cpp:404]     Test net output #1: loss = 0.904182 (* 1 = 0.904182 loss)\nI0821 11:45:17.869624 32352 solver.cpp:228] Iteration 8100, loss = 0.043601\nI0821 11:45:17.869668 32352 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 11:45:17.869683 32352 solver.cpp:244]     Train net output #1: loss = 0.0436012 (* 1 = 0.0436012 loss)\nI0821 11:45:17.984539 32352 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0821 11:47:36.425084 32352 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 11:48:57.270911 32352 solver.cpp:404]     Test net output #0: accuracy = 0.857\nI0821 11:48:57.271222 32352 solver.cpp:404]     Test net output #1: loss = 0.588313 (* 1 = 0.588313 loss)\nI0821 11:48:58.575742 32352 solver.cpp:228] Iteration 8200, loss = 0.0718373\nI0821 11:48:58.575799 32352 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:48:58.575815 32352 solver.cpp:244]     Train net output #1: loss = 0.0718374 (* 1 = 0.0718374 loss)\nI0821 11:48:58.681499 32352 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0821 11:51:16.921175 32352 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 11:52:37.764147 32352 solver.cpp:404]     Test net output #0: accuracy = 0.85136\nI0821 11:52:37.764427 32352 solver.cpp:404]     Test net output #1: loss = 0.630911 (* 1 = 0.630911 loss)\nI0821 11:52:39.069481 32352 solver.cpp:228] Iteration 8300, loss = 0.0623408\nI0821 11:52:39.069525 32352 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:52:39.069540 32352 solver.cpp:244]     Train net output #1: loss = 0.0623409 (* 1 = 0.0623409 loss)\nI0821 11:52:39.178640 32352 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0821 11:54:57.353713 32352 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 11:56:18.208140 32352 solver.cpp:404]     Test net output #0: accuracy = 0.84932\nI0821 11:56:18.208452 32352 solver.cpp:404]     Test net output #1: loss = 0.695669 (* 1 = 0.695669 loss)\nI0821 11:56:19.513594 32352 solver.cpp:228] Iteration 8400, loss = 0.0100934\nI0821 11:56:19.513653 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:56:19.513671 32352 solver.cpp:244]     Train net output #1: loss = 0.0100935 (* 1 = 0.0100935 loss)\nI0821 11:56:19.622586 32352 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0821 11:58:37.854207 32352 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 11:59:58.704408 32352 solver.cpp:404]     Test net output #0: accuracy = 0.8226\nI0821 11:59:58.704725 32352 solver.cpp:404]     Test net output #1: loss = 0.789625 (* 1 = 0.789625 loss)\nI0821 12:00:00.009263 32352 solver.cpp:228] Iteration 8500, loss = 0.0120306\nI0821 12:00:00.009315 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:00:00.009331 32352 solver.cpp:244]     Train net output #1: loss = 0.0120307 (* 1 = 0.0120307 loss)\nI0821 12:00:00.113061 32352 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0821 12:02:18.344120 32352 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 12:03:39.195632 32352 solver.cpp:404]     Test net output #0: accuracy = 0.85552\nI0821 12:03:39.195926 32352 solver.cpp:404]     Test net output #1: loss = 0.693301 (* 1 = 0.693301 loss)\nI0821 12:03:40.501071 32352 solver.cpp:228] Iteration 8600, loss = 0.0184404\nI0821 12:03:40.501122 32352 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:03:40.501139 32352 solver.cpp:244]     Train net output #1: loss = 0.0184406 (* 1 = 0.0184406 loss)\nI0821 12:03:40.610129 32352 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0821 12:05:58.857045 32352 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 12:07:19.705940 32352 solver.cpp:404]     Test net output #0: accuracy = 0.87292\nI0821 12:07:19.706234 32352 solver.cpp:404]     Test net output #1: loss = 0.537894 (* 1 = 0.537894 loss)\nI0821 12:07:21.011504 32352 solver.cpp:228] Iteration 8700, loss = 0.0196402\nI0821 12:07:21.011562 32352 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:07:21.011581 32352 solver.cpp:244]     Train net output #1: loss = 0.0196403 (* 1 = 0.0196403 loss)\nI0821 12:07:21.116881 32352 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0821 12:09:39.431972 32352 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 12:11:00.277906 32352 solver.cpp:404]     Test net output #0: accuracy = 0.87452\nI0821 12:11:00.278192 32352 solver.cpp:404]     Test net output #1: loss = 0.591599 (* 1 = 0.591599 loss)\nI0821 12:11:01.583183 32352 solver.cpp:228] Iteration 8800, loss = 0.00609574\nI0821 12:11:01.583240 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:11:01.583257 32352 solver.cpp:244]     Train net output #1: loss = 0.00609586 (* 1 = 0.00609586 loss)\nI0821 12:11:01.689152 32352 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0821 12:13:19.947955 32352 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 12:14:40.764667 32352 solver.cpp:404]     Test net output #0: accuracy = 0.8816\nI0821 12:14:40.764962 32352 solver.cpp:404]     Test net output #1: loss = 0.550594 (* 1 = 0.550594 loss)\nI0821 12:14:42.069391 32352 solver.cpp:228] Iteration 8900, loss = 0.00385077\nI0821 12:14:42.069444 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:14:42.069460 32352 solver.cpp:244]     Train net output #1: loss = 0.0038509 (* 1 = 0.0038509 loss)\nI0821 12:14:42.172554 32352 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0821 12:17:00.469458 32352 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 12:18:21.169720 32352 solver.cpp:404]     Test net output #0: accuracy = 0.90516\nI0821 12:18:21.170042 32352 solver.cpp:404]     Test net output #1: loss = 0.426187 (* 1 = 0.426187 loss)\nI0821 12:18:22.474812 32352 solver.cpp:228] Iteration 9000, loss = 0.000498877\nI0821 12:18:22.474872 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:18:22.474890 32352 solver.cpp:244]     Train net output #1: loss = 0.000499002 (* 1 = 0.000499002 loss)\nI0821 12:18:22.579021 32352 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0821 12:20:40.853150 32352 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 12:22:01.722564 32352 solver.cpp:404]     Test net output #0: accuracy = 0.91516\nI0821 12:22:01.722803 32352 solver.cpp:404]     Test net output #1: loss = 0.374465 (* 1 = 0.374465 loss)\nI0821 12:22:03.027251 32352 solver.cpp:228] Iteration 9100, loss = 0.000313795\nI0821 12:22:03.027302 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:22:03.027320 32352 solver.cpp:244]     Train net output #1: loss = 0.000313921 (* 1 = 0.000313921 loss)\nI0821 12:22:03.131834 32352 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0821 12:24:21.329349 32352 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 12:25:42.204474 32352 solver.cpp:404]     Test net output #0: accuracy = 0.915\nI0821 12:25:42.204783 32352 solver.cpp:404]     Test net output #1: loss = 0.362324 (* 1 = 0.362324 loss)\nI0821 12:25:43.509352 32352 solver.cpp:228] Iteration 9200, loss = 0.000287626\nI0821 12:25:43.509408 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:25:43.509425 32352 solver.cpp:244]     Train net output #1: loss = 0.000287751 (* 1 = 0.000287751 loss)\nI0821 12:25:43.621402 32352 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0821 12:28:01.976001 32352 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 12:29:22.852093 32352 solver.cpp:404]     Test net output #0: accuracy = 0.91696\nI0821 12:29:22.852385 32352 solver.cpp:404]     Test net output #1: loss = 0.343281 (* 1 = 0.343281 loss)\nI0821 12:29:24.157176 32352 solver.cpp:228] Iteration 9300, loss = 0.000218361\nI0821 12:29:24.157227 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:29:24.157243 32352 solver.cpp:244]     Train net output #1: loss = 0.000218487 (* 1 = 0.000218487 loss)\nI0821 12:29:24.268020 32352 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0821 12:31:42.515424 32352 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 12:33:03.386283 32352 solver.cpp:404]     Test net output #0: accuracy = 0.91664\nI0821 12:33:03.386590 32352 solver.cpp:404]     Test net output #1: loss = 0.345902 (* 1 = 0.345902 loss)\nI0821 12:33:04.691787 32352 solver.cpp:228] Iteration 9400, loss = 0.000251582\nI0821 12:33:04.691833 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:33:04.691849 32352 solver.cpp:244]     Train net output #1: loss = 0.000251708 (* 1 = 0.000251708 loss)\nI0821 12:33:04.796458 32352 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0821 12:35:23.194474 32352 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 12:36:44.068176 32352 solver.cpp:404]     Test net output #0: accuracy = 0.91808\nI0821 12:36:44.068495 32352 solver.cpp:404]     Test net output #1: loss = 0.330447 (* 1 = 0.330447 loss)\nI0821 12:36:45.373440 32352 solver.cpp:228] Iteration 9500, loss = 0.000298943\nI0821 12:36:45.373491 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:36:45.373507 32352 solver.cpp:244]     Train net output #1: loss = 0.000299068 (* 1 = 0.000299068 loss)\nI0821 12:36:45.482295 32352 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0821 12:39:03.886112 32352 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 12:40:24.743829 32352 solver.cpp:404]     Test net output #0: accuracy = 0.91848\nI0821 12:40:24.744143 32352 solver.cpp:404]     Test net output #1: loss = 0.338258 (* 1 = 0.338258 loss)\nI0821 12:40:26.049540 32352 solver.cpp:228] Iteration 9600, loss = 0.000300061\nI0821 12:40:26.049587 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:40:26.049604 32352 solver.cpp:244]     Train net output #1: loss = 0.000300186 (* 1 = 0.000300186 loss)\nI0821 12:40:26.152957 32352 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0821 12:42:44.427690 32352 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 12:44:05.304638 32352 solver.cpp:404]     Test net output #0: accuracy = 0.91844\nI0821 12:44:05.304940 32352 solver.cpp:404]     Test net output #1: loss = 0.325593 (* 1 = 0.325593 loss)\nI0821 12:44:06.609704 32352 solver.cpp:228] Iteration 9700, loss = 0.000315582\nI0821 12:44:06.609760 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:44:06.609777 32352 solver.cpp:244]     Train net output #1: loss = 0.000315708 (* 1 = 0.000315708 loss)\nI0821 12:44:06.714843 32352 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0821 12:46:25.052798 32352 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 12:47:45.769244 32352 solver.cpp:404]     Test net output #0: accuracy = 0.91876\nI0821 12:47:45.769500 32352 solver.cpp:404]     Test net output #1: loss = 0.333531 (* 1 = 0.333531 loss)\nI0821 12:47:47.074389 32352 solver.cpp:228] Iteration 9800, loss = 0.000312404\nI0821 12:47:47.074446 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:47:47.074463 32352 solver.cpp:244]     Train net output #1: loss = 0.00031253 (* 1 = 0.00031253 loss)\nI0821 12:47:47.177017 32352 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0821 12:50:05.456915 32352 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 12:51:25.897159 32352 solver.cpp:404]     Test net output #0: accuracy = 0.9198\nI0821 12:51:25.897395 32352 solver.cpp:404]     Test net output #1: loss = 0.322088 (* 1 = 0.322088 loss)\nI0821 12:51:27.202656 32352 solver.cpp:228] Iteration 9900, loss = 0.0002851\nI0821 12:51:27.202705 32352 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:51:27.202721 32352 solver.cpp:244]     Train net output #1: loss = 0.000285225 (* 1 = 0.000285225 loss)\nI0821 12:51:27.316416 32352 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0821 12:53:45.602583 32352 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kMom8Fig11_iter_10000.caffemodel\nI0821 12:53:45.820848 32352 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kMom8Fig11_iter_10000.solverstate\nI0821 12:53:46.261020 32352 solver.cpp:317] Iteration 10000, loss = 0.000370117\nI0821 12:53:46.261072 32352 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 12:55:06.752104 32352 solver.cpp:404]     Test net output #0: accuracy = 0.91856\nI0821 12:55:06.752378 32352 solver.cpp:404]     Test net output #1: loss = 0.331538 (* 1 = 0.331538 loss)\nI0821 12:55:06.752390 32352 solver.cpp:322] Optimization Done.\nI0821 12:55:12.087224 32352 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kMom95Fig11",
    "content": "I0821 06:45:14.235880 31846 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 06:45:14.238263 31846 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 06:45:14.239676 31846 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 06:45:14.240888 31846 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 06:45:14.242100 31846 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 06:45:14.243327 31846 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 06:45:14.244565 31846 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 06:45:14.245802 31846 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 06:45:14.247035 31846 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 06:45:14.664140 31846 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.95\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kMom95Fig11\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0821 06:45:14.668004 31846 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 06:45:14.686864 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:14.686944 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:14.688083 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 06:45:14.688143 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 06:45:14.688164 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:45:14.688184 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:45:14.688204 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:45:14.688220 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:45:14.688239 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:45:14.688257 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:45:14.688278 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:45:14.688297 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:45:14.688316 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:45:14.688333 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:45:14.688352 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:45:14.688371 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:45:14.688392 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:45:14.688410 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:45:14.688439 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:45:14.688459 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:45:14.688479 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:45:14.688498 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:45:14.688530 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:45:14.688550 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:45:14.688575 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:45:14.688596 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:45:14.688614 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:45:14.688632 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:45:14.688650 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:45:14.688668 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:45:14.688685 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:45:14.688704 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:45:14.688724 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:45:14.688741 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:45:14.688760 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:45:14.688776 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:45:14.688796 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:45:14.688814 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:45:14.688834 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:45:14.688853 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:45:14.688871 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:45:14.688890 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:45:14.688915 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:45:14.688932 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:45:14.688951 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:45:14.688968 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:45:14.688988 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:45:14.689007 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:45:14.689025 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:45:14.689041 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:45:14.689062 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:45:14.689079 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:45:14.689096 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:45:14.689124 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:45:14.689146 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:45:14.689164 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:45:14.689184 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:45:14.689199 31846 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:45:14.690948 31846 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0821 06:45:14.693069 31846 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:45:14.694305 31846 net.cpp:100] Creating Layer dataLayer\nI0821 06:45:14.694387 31846 net.cpp:408] dataLayer -> data_top\nI0821 06:45:14.694598 31846 net.cpp:408] dataLayer -> label\nI0821 06:45:14.694727 31846 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:45:14.803741 31851 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0821 06:45:14.979378 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:14.986333 31846 net.cpp:150] Setting up dataLayer\nI0821 06:45:14.986397 31846 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:45:14.986409 31846 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:14.986414 31846 net.cpp:165] Memory required for data: 1536500\nI0821 06:45:14.986438 31846 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:45:14.986454 31846 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:45:14.986464 31846 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:45:14.986488 31846 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:45:14.986507 31846 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:45:14.986577 31846 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:45:14.986593 31846 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:14.986599 31846 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:14.986604 31846 net.cpp:165] Memory required for data: 1537500\nI0821 06:45:14.986610 31846 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:45:14.986673 31846 net.cpp:100] Creating Layer pre_conv\nI0821 06:45:14.986685 31846 net.cpp:434] pre_conv <- data_top\nI0821 06:45:14.986696 31846 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:45:14.989073 31852 blocking_queue.cpp:50] Waiting for data\nI0821 06:45:14.997246 31846 net.cpp:150] Setting up pre_conv\nI0821 06:45:14.997274 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:14.997282 31846 net.cpp:165] Memory required for data: 9729500\nI0821 06:45:14.997355 31846 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:45:15.004544 31846 net.cpp:100] Creating Layer pre_bn\nI0821 06:45:15.004565 31846 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:45:15.004581 31846 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:45:15.004904 31846 net.cpp:150] Setting up pre_bn\nI0821 06:45:15.004920 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.004926 31846 net.cpp:165] Memory required for data: 17921500\nI0821 06:45:15.004943 31846 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:45:15.005002 31846 net.cpp:100] Creating Layer pre_scale\nI0821 06:45:15.005013 31846 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:45:15.005025 31846 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:45:15.005208 31846 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:45:15.008476 31846 net.cpp:150] Setting up pre_scale\nI0821 06:45:15.008502 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.008508 31846 net.cpp:165] Memory required for data: 26113500\nI0821 06:45:15.008520 31846 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:45:15.008575 31846 net.cpp:100] Creating Layer pre_relu\nI0821 06:45:15.008585 31846 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:45:15.008594 31846 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:45:15.008605 31846 net.cpp:150] Setting up pre_relu\nI0821 06:45:15.008613 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.008618 31846 net.cpp:165] Memory required for data: 34305500\nI0821 06:45:15.008623 31846 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:45:15.008635 31846 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:45:15.008641 31846 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:45:15.008648 31846 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:45:15.008657 31846 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:45:15.008709 31846 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:45:15.008723 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.008729 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.008733 31846 net.cpp:165] Memory required for data: 50689500\nI0821 06:45:15.008739 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:45:15.008751 31846 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:45:15.008757 31846 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:45:15.008769 31846 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:45:15.009091 31846 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:45:15.009106 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.009111 31846 net.cpp:165] Memory required for data: 58881500\nI0821 06:45:15.009124 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:45:15.009136 31846 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:45:15.009142 31846 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:45:15.009160 31846 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:45:15.009389 31846 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:45:15.009403 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.009408 31846 net.cpp:165] Memory required for data: 67073500\nI0821 06:45:15.009419 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:45:15.009438 31846 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:45:15.009445 31846 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:45:15.009454 31846 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.009505 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:45:15.009640 31846 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:45:15.009654 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.009658 31846 net.cpp:165] Memory required for data: 75265500\nI0821 06:45:15.009667 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:45:15.009687 31846 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:45:15.009693 31846 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:45:15.009701 31846 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.009711 31846 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:45:15.009717 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.009722 31846 net.cpp:165] Memory required for data: 83457500\nI0821 06:45:15.009727 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:45:15.009742 31846 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:45:15.009748 31846 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:45:15.009759 31846 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:45:15.010056 31846 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:45:15.010068 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.010073 31846 net.cpp:165] Memory required for data: 91649500\nI0821 06:45:15.010082 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:45:15.010092 31846 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:45:15.010097 31846 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:45:15.010108 31846 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:45:15.010336 31846 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:45:15.010349 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.010354 31846 net.cpp:165] Memory required for data: 99841500\nI0821 06:45:15.010371 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:45:15.010381 31846 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:45:15.010387 31846 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:45:15.010395 31846 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:45:15.010457 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:45:15.010594 31846 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:45:15.010608 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.010613 31846 net.cpp:165] Memory required for data: 108033500\nI0821 06:45:15.010622 31846 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:45:15.010677 31846 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:45:15.010690 31846 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:45:15.010697 31846 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:45:15.010709 31846 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:45:15.010782 31846 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:45:15.010795 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.010800 31846 net.cpp:165] Memory required for data: 116225500\nI0821 06:45:15.010807 31846 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:45:15.010818 31846 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:45:15.010825 31846 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:45:15.010833 31846 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:45:15.010841 31846 net.cpp:150] Setting up L1_b1_relu\nI0821 06:45:15.010848 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.010854 31846 net.cpp:165] Memory required for data: 124417500\nI0821 06:45:15.010859 31846 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:15.010867 31846 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:15.010874 31846 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:45:15.010880 31846 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:45:15.010888 31846 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:45:15.010933 31846 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:15.010944 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.010951 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.010963 31846 net.cpp:165] Memory required for data: 140801500\nI0821 06:45:15.010969 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:45:15.010983 31846 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:45:15.010989 31846 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:45:15.010998 31846 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:45:15.011303 31846 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:45:15.011317 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.011322 31846 net.cpp:165] Memory required for data: 148993500\nI0821 06:45:15.011330 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:45:15.011344 31846 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:45:15.011350 31846 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:45:15.011358 31846 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:45:15.011603 31846 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:45:15.011617 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.011623 31846 net.cpp:165] Memory required for data: 157185500\nI0821 06:45:15.011633 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:45:15.011642 31846 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:45:15.011648 31846 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:45:15.011656 31846 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.011735 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:45:15.011880 31846 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:45:15.011893 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.011898 31846 net.cpp:165] Memory required for data: 165377500\nI0821 06:45:15.011907 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:45:15.011919 31846 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:45:15.011926 31846 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:45:15.011934 31846 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.011942 31846 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:45:15.011950 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.011955 31846 net.cpp:165] Memory required for data: 173569500\nI0821 06:45:15.011958 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:45:15.011972 31846 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:45:15.011978 31846 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:45:15.011992 31846 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:45:15.012295 31846 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:45:15.012307 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.012312 31846 net.cpp:165] Memory required for data: 181761500\nI0821 06:45:15.012321 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:45:15.012333 31846 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:45:15.012339 31846 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:45:15.012347 31846 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:45:15.012594 31846 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:45:15.012609 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.012614 31846 net.cpp:165] Memory required for data: 189953500\nI0821 06:45:15.012629 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:45:15.012641 31846 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:45:15.012647 31846 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:45:15.012655 31846 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:45:15.012708 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:45:15.012845 31846 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:45:15.012858 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.012863 31846 net.cpp:165] Memory required for data: 198145500\nI0821 06:45:15.012872 31846 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:45:15.012889 31846 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:45:15.012895 31846 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:45:15.012902 31846 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:45:15.012912 31846 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:45:15.012943 31846 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:45:15.012953 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.012958 31846 net.cpp:165] Memory required for data: 206337500\nI0821 06:45:15.012962 31846 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:45:15.012970 31846 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:45:15.012975 31846 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:45:15.012982 31846 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:45:15.012991 31846 net.cpp:150] Setting up L1_b2_relu\nI0821 06:45:15.012998 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.013003 31846 net.cpp:165] Memory required for data: 214529500\nI0821 06:45:15.013007 31846 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:15.013018 31846 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:15.013023 31846 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:45:15.013031 31846 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:45:15.013039 31846 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:45:15.013082 31846 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:15.013093 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.013100 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.013105 31846 net.cpp:165] Memory required for data: 230913500\nI0821 06:45:15.013110 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:45:15.013121 31846 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:45:15.013128 31846 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:45:15.013139 31846 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:45:15.013450 31846 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:45:15.013464 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.013469 31846 net.cpp:165] Memory required for data: 239105500\nI0821 06:45:15.013478 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:45:15.013487 31846 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:45:15.013494 31846 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:45:15.013501 31846 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:45:15.013737 31846 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:45:15.013751 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.013756 31846 net.cpp:165] Memory required for data: 247297500\nI0821 06:45:15.013767 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:45:15.013778 31846 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:45:15.013784 31846 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:45:15.013792 31846 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.013844 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:45:15.013983 31846 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:45:15.013995 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.014000 31846 net.cpp:165] Memory required for data: 255489500\nI0821 06:45:15.014009 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:45:15.014017 31846 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:45:15.014024 31846 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:45:15.014034 31846 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.014044 31846 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:45:15.014057 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.014062 31846 net.cpp:165] Memory required for data: 263681500\nI0821 06:45:15.014067 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:45:15.014081 31846 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:45:15.014087 31846 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:45:15.014096 31846 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:45:15.014401 31846 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:45:15.014415 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.014420 31846 net.cpp:165] Memory required for data: 271873500\nI0821 06:45:15.014436 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:45:15.014451 31846 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:45:15.014457 31846 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:45:15.014466 31846 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:45:15.014698 31846 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:45:15.014711 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.014716 31846 net.cpp:165] Memory required for data: 280065500\nI0821 06:45:15.014726 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:45:15.014739 31846 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:45:15.014744 31846 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:45:15.014752 31846 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:45:15.014806 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:45:15.014943 31846 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:45:15.014956 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.014961 31846 net.cpp:165] Memory required for data: 288257500\nI0821 06:45:15.014971 31846 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:45:15.014979 31846 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:45:15.014986 31846 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:45:15.014992 31846 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:45:15.015002 31846 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:45:15.015035 31846 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:45:15.015044 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.015048 31846 net.cpp:165] Memory required for data: 296449500\nI0821 06:45:15.015054 31846 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:45:15.015063 31846 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:45:15.015067 31846 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:45:15.015077 31846 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:45:15.015086 31846 net.cpp:150] Setting up L1_b3_relu\nI0821 06:45:15.015094 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.015097 31846 net.cpp:165] Memory required for data: 304641500\nI0821 06:45:15.015102 31846 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:15.015110 31846 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:15.015115 31846 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:45:15.015122 31846 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:45:15.015131 31846 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:45:15.015175 31846 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:15.015187 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.015193 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.015198 31846 net.cpp:165] Memory required for data: 321025500\nI0821 06:45:15.015203 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:45:15.015214 31846 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:45:15.015220 31846 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:45:15.015239 31846 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:45:15.015552 31846 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:45:15.015566 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.015571 31846 net.cpp:165] Memory required for data: 329217500\nI0821 06:45:15.015580 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:45:15.015589 31846 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:45:15.015595 31846 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:45:15.015606 31846 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:45:15.015846 31846 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:45:15.015858 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.015863 31846 net.cpp:165] Memory required for data: 337409500\nI0821 06:45:15.015873 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:45:15.015884 31846 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:45:15.015890 31846 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:45:15.015898 31846 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.015951 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:45:15.016093 31846 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:45:15.016105 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.016110 31846 net.cpp:165] Memory required for data: 345601500\nI0821 06:45:15.016119 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:45:15.016127 31846 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:45:15.016134 31846 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:45:15.016144 31846 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.016152 31846 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:45:15.016160 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.016165 31846 net.cpp:165] Memory required for data: 353793500\nI0821 06:45:15.016170 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:45:15.016183 31846 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:45:15.016189 31846 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:45:15.016202 31846 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:45:15.016518 31846 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:45:15.016533 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.016538 31846 net.cpp:165] Memory required for data: 361985500\nI0821 06:45:15.016547 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:45:15.016556 31846 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:45:15.016561 31846 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:45:15.016569 31846 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:45:15.016809 31846 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:45:15.016824 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.016829 31846 net.cpp:165] Memory required for data: 370177500\nI0821 06:45:15.016839 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:45:15.016850 31846 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:45:15.016856 31846 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:45:15.016866 31846 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:45:15.016918 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:45:15.017055 31846 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:45:15.017068 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.017073 31846 net.cpp:165] Memory required for data: 378369500\nI0821 06:45:15.017082 31846 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:45:15.017093 31846 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:45:15.017099 31846 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:45:15.017107 31846 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:45:15.017114 31846 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:45:15.017156 31846 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:45:15.017166 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.017171 31846 net.cpp:165] Memory required for data: 386561500\nI0821 06:45:15.017176 31846 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:45:15.017184 31846 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:45:15.017190 31846 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:45:15.017199 31846 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:45:15.017210 31846 net.cpp:150] Setting up L1_b4_relu\nI0821 06:45:15.017216 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.017221 31846 net.cpp:165] Memory required for data: 394753500\nI0821 06:45:15.017225 31846 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:15.017232 31846 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:15.017238 31846 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:45:15.017246 31846 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:45:15.017254 31846 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:45:15.017298 31846 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:15.017310 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.017316 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.017321 31846 net.cpp:165] Memory required for data: 411137500\nI0821 06:45:15.017326 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:45:15.017338 31846 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:45:15.017343 31846 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:45:15.017354 31846 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:45:15.017666 31846 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:45:15.017681 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.017686 31846 net.cpp:165] Memory required for data: 419329500\nI0821 06:45:15.017710 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:45:15.017724 31846 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:45:15.017729 31846 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:45:15.017737 31846 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:45:15.017978 31846 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:45:15.017992 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.017997 31846 net.cpp:165] Memory required for data: 427521500\nI0821 06:45:15.018007 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:45:15.018018 31846 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:45:15.018024 31846 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:45:15.018033 31846 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.018084 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:45:15.018223 31846 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:45:15.018235 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.018240 31846 net.cpp:165] Memory required for data: 435713500\nI0821 06:45:15.018249 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:45:15.018260 31846 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:45:15.018266 31846 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:45:15.018273 31846 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.018282 31846 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:45:15.018290 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.018295 31846 net.cpp:165] Memory required for data: 443905500\nI0821 06:45:15.018298 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:45:15.018312 31846 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:45:15.018318 31846 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:45:15.018337 31846 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:45:15.018656 31846 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:45:15.018671 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.018676 31846 net.cpp:165] Memory required for data: 452097500\nI0821 06:45:15.018684 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:45:15.018693 31846 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:45:15.018699 31846 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:45:15.018708 31846 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:45:15.018945 31846 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:45:15.018959 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.018963 31846 net.cpp:165] Memory required for data: 460289500\nI0821 06:45:15.018973 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:45:15.018985 31846 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:45:15.018991 31846 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:45:15.018999 31846 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:45:15.019053 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:45:15.019196 31846 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:45:15.019208 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.019213 31846 net.cpp:165] Memory required for data: 468481500\nI0821 06:45:15.019222 31846 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:45:15.019253 31846 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:45:15.019259 31846 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:45:15.019268 31846 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:45:15.019278 31846 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:45:15.019317 31846 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:45:15.019328 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.019333 31846 net.cpp:165] Memory required for data: 476673500\nI0821 06:45:15.019340 31846 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:45:15.019346 31846 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:45:15.019352 31846 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:45:15.019361 31846 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:45:15.019371 31846 net.cpp:150] Setting up L1_b5_relu\nI0821 06:45:15.019378 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.019382 31846 net.cpp:165] Memory required for data: 484865500\nI0821 06:45:15.019387 31846 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:15.019394 31846 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:15.019400 31846 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:45:15.019407 31846 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:45:15.019417 31846 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:45:15.019469 31846 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:15.019481 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.019489 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.019492 31846 net.cpp:165] Memory required for data: 501249500\nI0821 06:45:15.019497 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:45:15.019510 31846 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:45:15.019515 31846 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:45:15.019543 31846 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:45:15.019857 31846 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:45:15.019871 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.019876 31846 net.cpp:165] Memory required for data: 509441500\nI0821 06:45:15.019892 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:45:15.019902 31846 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:45:15.019908 31846 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:45:15.019919 31846 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:45:15.020159 31846 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:45:15.020171 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.020176 31846 net.cpp:165] Memory required for data: 517633500\nI0821 06:45:15.020187 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:45:15.020198 31846 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:45:15.020205 31846 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:45:15.020212 31846 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.020264 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:45:15.020403 31846 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:45:15.020416 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.020428 31846 net.cpp:165] Memory required for data: 525825500\nI0821 06:45:15.020438 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:45:15.020447 31846 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:45:15.020453 31846 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:45:15.020463 31846 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.020474 31846 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:45:15.020481 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.020485 31846 net.cpp:165] Memory required for data: 534017500\nI0821 06:45:15.020490 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:45:15.020504 31846 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:45:15.020510 31846 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:45:15.020520 31846 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:45:15.020838 31846 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:45:15.020851 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.020856 31846 net.cpp:165] Memory required for data: 542209500\nI0821 06:45:15.020865 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:45:15.020874 31846 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:45:15.020880 31846 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:45:15.020889 31846 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:45:15.021131 31846 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:45:15.021145 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.021150 31846 net.cpp:165] Memory required for data: 550401500\nI0821 06:45:15.021160 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:45:15.021172 31846 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:45:15.021178 31846 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:45:15.021185 31846 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:45:15.021240 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:45:15.021378 31846 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:45:15.021391 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.021396 31846 net.cpp:165] Memory required for data: 558593500\nI0821 06:45:15.021404 31846 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:45:15.021427 31846 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:45:15.021435 31846 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:45:15.021442 31846 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:45:15.021453 31846 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:45:15.021486 31846 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:45:15.021497 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.021502 31846 net.cpp:165] Memory required for data: 566785500\nI0821 06:45:15.021512 31846 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:45:15.021530 31846 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:45:15.021538 31846 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:45:15.021544 31846 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:45:15.021553 31846 net.cpp:150] Setting up L1_b6_relu\nI0821 06:45:15.021560 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.021565 31846 net.cpp:165] Memory required for data: 574977500\nI0821 06:45:15.021570 31846 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:15.021577 31846 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:15.021582 31846 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:45:15.021589 31846 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:45:15.021598 31846 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:45:15.021644 31846 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:15.021656 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.021662 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.021667 31846 net.cpp:165] Memory required for data: 591361500\nI0821 06:45:15.021672 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:45:15.021687 31846 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:45:15.021693 31846 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:45:15.021703 31846 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:45:15.022011 31846 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:45:15.022024 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.022029 31846 net.cpp:165] Memory required for data: 599553500\nI0821 06:45:15.022038 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:45:15.022050 31846 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:45:15.022056 31846 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:45:15.022064 31846 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:45:15.022310 31846 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:45:15.022322 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.022327 31846 net.cpp:165] Memory required for data: 607745500\nI0821 06:45:15.022337 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:45:15.022346 31846 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:45:15.022351 31846 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:45:15.022359 31846 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.022411 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:45:15.022558 31846 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:45:15.022572 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.022578 31846 net.cpp:165] Memory required for data: 615937500\nI0821 06:45:15.022586 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:45:15.022595 31846 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:45:15.022601 31846 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:45:15.022613 31846 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.022622 31846 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:45:15.022629 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.022634 31846 net.cpp:165] Memory required for data: 624129500\nI0821 06:45:15.022639 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:45:15.022650 31846 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:45:15.022655 31846 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:45:15.022666 31846 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:45:15.022977 31846 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:45:15.022991 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.022996 31846 net.cpp:165] Memory required for data: 632321500\nI0821 06:45:15.023012 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:45:15.023022 31846 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:45:15.023028 31846 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:45:15.023039 31846 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:45:15.023280 31846 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:45:15.023295 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.023301 31846 net.cpp:165] Memory required for data: 640513500\nI0821 06:45:15.023311 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:45:15.023320 31846 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:45:15.023326 31846 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:45:15.023334 31846 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:45:15.023386 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:45:15.023536 31846 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:45:15.023550 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.023555 31846 net.cpp:165] Memory required for data: 648705500\nI0821 06:45:15.023564 31846 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:45:15.023576 31846 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:45:15.023583 31846 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:45:15.023591 31846 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:45:15.023597 31846 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:45:15.023632 31846 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:45:15.023640 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.023645 31846 net.cpp:165] Memory required for data: 656897500\nI0821 06:45:15.023650 31846 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:45:15.023658 31846 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:45:15.023664 31846 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:45:15.023674 31846 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:45:15.023684 31846 net.cpp:150] Setting up L1_b7_relu\nI0821 06:45:15.023691 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.023695 31846 net.cpp:165] Memory required for data: 665089500\nI0821 06:45:15.023700 31846 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:15.023707 31846 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:15.023712 31846 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:45:15.023720 31846 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:45:15.023730 31846 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:45:15.023773 31846 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:15.023785 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.023792 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.023797 31846 net.cpp:165] Memory required for data: 681473500\nI0821 06:45:15.023802 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:45:15.023815 31846 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:45:15.023823 31846 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:45:15.023831 31846 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:45:15.024142 31846 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:45:15.024157 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.024161 31846 net.cpp:165] Memory required for data: 689665500\nI0821 06:45:15.024170 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:45:15.024183 31846 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:45:15.024189 31846 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:45:15.024199 31846 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:45:15.024454 31846 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:45:15.024471 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.024477 31846 net.cpp:165] Memory required for data: 697857500\nI0821 06:45:15.024487 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:45:15.024497 31846 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:45:15.024502 31846 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:45:15.024510 31846 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.024562 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:45:15.024705 31846 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:45:15.024719 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.024724 31846 net.cpp:165] Memory required for data: 706049500\nI0821 06:45:15.024734 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:45:15.024744 31846 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:45:15.024751 31846 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:45:15.024758 31846 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.024770 31846 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:45:15.024777 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.024782 31846 net.cpp:165] Memory required for data: 714241500\nI0821 06:45:15.024787 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:45:15.024798 31846 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:45:15.024803 31846 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:45:15.024816 31846 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:45:15.025126 31846 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:45:15.025141 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.025146 31846 net.cpp:165] Memory required for data: 722433500\nI0821 06:45:15.025154 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:45:15.025162 31846 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:45:15.025169 31846 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:45:15.025180 31846 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:45:15.025430 31846 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:45:15.025444 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.025449 31846 net.cpp:165] Memory required for data: 730625500\nI0821 06:45:15.025460 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:45:15.025472 31846 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:45:15.025478 31846 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:45:15.025486 31846 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:45:15.025539 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:45:15.025679 31846 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:45:15.025691 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.025696 31846 net.cpp:165] Memory required for data: 738817500\nI0821 06:45:15.025705 31846 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:45:15.025717 31846 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:45:15.025724 31846 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:45:15.025732 31846 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:45:15.025738 31846 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:45:15.025773 31846 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:45:15.025781 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.025786 31846 net.cpp:165] Memory required for data: 747009500\nI0821 06:45:15.025792 31846 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:45:15.025799 31846 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:45:15.025805 31846 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:45:15.025815 31846 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:45:15.025825 31846 net.cpp:150] Setting up L1_b8_relu\nI0821 06:45:15.025831 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.025842 31846 net.cpp:165] Memory required for data: 755201500\nI0821 06:45:15.025848 31846 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:15.025856 31846 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:15.025861 31846 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:45:15.025868 31846 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:45:15.025877 31846 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:45:15.025923 31846 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:15.025935 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.025943 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.025946 31846 net.cpp:165] Memory required for data: 771585500\nI0821 06:45:15.025951 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:45:15.025962 31846 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:45:15.025969 31846 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:45:15.025980 31846 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:45:15.026302 31846 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:45:15.026321 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.026326 31846 net.cpp:165] Memory required for data: 779777500\nI0821 06:45:15.026335 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:45:15.026345 31846 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:45:15.026350 31846 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:45:15.026362 31846 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:45:15.026613 31846 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:45:15.026628 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.026633 31846 net.cpp:165] Memory required for data: 787969500\nI0821 06:45:15.026643 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:45:15.026652 31846 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:45:15.026659 31846 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:45:15.026669 31846 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.026721 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:45:15.026863 31846 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:45:15.026880 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.026885 31846 net.cpp:165] Memory required for data: 796161500\nI0821 06:45:15.026895 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:45:15.026901 31846 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:45:15.026907 31846 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:45:15.026914 31846 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.026924 31846 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:45:15.026931 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.026935 31846 net.cpp:165] Memory required for data: 804353500\nI0821 06:45:15.026940 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:45:15.026954 31846 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:45:15.026960 31846 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:45:15.026970 31846 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:45:15.027287 31846 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:45:15.027302 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.027307 31846 net.cpp:165] Memory required for data: 812545500\nI0821 06:45:15.027314 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:45:15.027326 31846 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:45:15.027333 31846 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:45:15.027343 31846 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:45:15.027598 31846 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:45:15.027612 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.027617 31846 net.cpp:165] Memory required for data: 820737500\nI0821 06:45:15.027649 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:45:15.027659 31846 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:45:15.027665 31846 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:45:15.027675 31846 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:45:15.027727 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:45:15.027873 31846 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:45:15.027886 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.027891 31846 net.cpp:165] Memory required for data: 828929500\nI0821 06:45:15.027900 31846 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:45:15.027912 31846 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:45:15.027918 31846 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:45:15.027925 31846 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:45:15.027932 31846 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:45:15.027964 31846 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:45:15.027973 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.027978 31846 net.cpp:165] Memory required for data: 837121500\nI0821 06:45:15.027983 31846 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:45:15.027994 31846 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:45:15.028000 31846 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:45:15.028007 31846 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:45:15.028017 31846 net.cpp:150] Setting up L1_b9_relu\nI0821 06:45:15.028023 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.028028 31846 net.cpp:165] Memory required for data: 845313500\nI0821 06:45:15.028033 31846 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:15.028044 31846 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:15.028050 31846 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:45:15.028057 31846 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:45:15.028066 31846 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:45:15.028111 31846 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:15.028122 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.028129 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.028133 31846 net.cpp:165] Memory required for data: 861697500\nI0821 06:45:15.028139 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:45:15.028153 31846 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:45:15.028159 31846 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:45:15.028168 31846 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:45:15.028492 31846 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:45:15.028506 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.028512 31846 net.cpp:165] Memory required for data: 863745500\nI0821 06:45:15.028520 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:45:15.028532 31846 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:45:15.028538 31846 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:45:15.028549 31846 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:45:15.028785 31846 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:45:15.028798 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.028803 31846 net.cpp:165] Memory required for data: 865793500\nI0821 06:45:15.028813 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:45:15.028822 31846 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:45:15.028836 31846 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:45:15.028844 31846 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.028898 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:45:15.029036 31846 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:45:15.029049 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.029054 31846 net.cpp:165] Memory required for data: 867841500\nI0821 06:45:15.029063 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:45:15.029072 31846 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:45:15.029078 31846 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:45:15.029088 31846 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.029098 31846 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:45:15.029104 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.029109 31846 net.cpp:165] Memory required for data: 869889500\nI0821 06:45:15.029114 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:45:15.029127 31846 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:45:15.029134 31846 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:45:15.029142 31846 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:45:15.029465 31846 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:45:15.029479 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.029484 31846 net.cpp:165] Memory required for data: 871937500\nI0821 06:45:15.029494 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:45:15.029507 31846 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:45:15.029513 31846 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:45:15.029522 31846 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:45:15.029765 31846 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:45:15.029781 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.029786 31846 net.cpp:165] Memory required for data: 873985500\nI0821 06:45:15.029796 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:45:15.029805 31846 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:45:15.029811 31846 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:45:15.029819 31846 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:45:15.029872 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:45:15.030017 31846 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:45:15.030030 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.030035 31846 net.cpp:165] Memory required for data: 876033500\nI0821 06:45:15.030045 31846 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:45:15.030055 31846 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:45:15.030061 31846 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:45:15.030072 31846 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:45:15.030160 31846 net.cpp:150] Setting up L2_b1_pool\nI0821 06:45:15.030175 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.030180 31846 net.cpp:165] Memory required for data: 878081500\nI0821 06:45:15.030185 31846 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:45:15.030200 31846 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:45:15.030205 31846 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:45:15.030212 31846 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:45:15.030220 31846 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:45:15.030252 31846 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:45:15.030261 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.030266 31846 net.cpp:165] Memory required for data: 880129500\nI0821 06:45:15.030272 31846 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:45:15.030278 31846 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:45:15.030284 31846 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:45:15.030294 31846 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:45:15.030311 31846 net.cpp:150] Setting up L2_b1_relu\nI0821 06:45:15.030319 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.030324 31846 net.cpp:165] Memory required for data: 882177500\nI0821 06:45:15.030328 31846 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:45:15.030382 31846 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:45:15.030396 31846 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:45:15.032778 31846 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:45:15.032797 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.032804 31846 net.cpp:165] Memory required for data: 884225500\nI0821 06:45:15.032809 31846 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:45:15.032819 31846 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:45:15.032825 31846 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:45:15.032836 31846 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:45:15.032845 31846 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:45:15.032927 31846 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:45:15.032943 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.032948 31846 net.cpp:165] Memory required for data: 888321500\nI0821 06:45:15.032953 31846 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:15.032963 31846 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:15.032968 31846 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:45:15.032979 31846 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:45:15.032989 31846 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:45:15.033037 31846 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:15.033052 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.033059 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.033064 31846 net.cpp:165] Memory required for data: 896513500\nI0821 06:45:15.033069 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:45:15.033082 31846 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:45:15.033087 31846 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:45:15.033097 31846 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:45:15.034553 31846 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:45:15.034570 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.034575 31846 net.cpp:165] Memory required for data: 900609500\nI0821 06:45:15.034585 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:45:15.034598 31846 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:45:15.034605 31846 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:45:15.034616 31846 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:45:15.034864 31846 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:45:15.034878 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.034883 31846 net.cpp:165] Memory required for data: 904705500\nI0821 06:45:15.034894 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:45:15.034904 31846 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:45:15.034909 31846 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:45:15.034917 31846 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.034976 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:45:15.035120 31846 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:45:15.035133 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.035138 31846 net.cpp:165] Memory required for data: 908801500\nI0821 06:45:15.035147 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:45:15.035156 31846 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:45:15.035164 31846 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:45:15.035172 31846 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.035190 31846 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:45:15.035198 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.035203 31846 net.cpp:165] Memory required for data: 912897500\nI0821 06:45:15.035208 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:45:15.035223 31846 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:45:15.035229 31846 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:45:15.035238 31846 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:45:15.035704 31846 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:45:15.035719 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.035724 31846 net.cpp:165] Memory required for data: 916993500\nI0821 06:45:15.035733 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:45:15.035745 31846 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:45:15.035753 31846 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:45:15.035760 31846 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:45:15.036001 31846 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:45:15.036017 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.036022 31846 net.cpp:165] Memory required for data: 921089500\nI0821 06:45:15.036033 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:45:15.036043 31846 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:45:15.036049 31846 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:45:15.036056 31846 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:45:15.036109 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:45:15.036252 31846 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:45:15.036265 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.036270 31846 net.cpp:165] Memory required for data: 925185500\nI0821 06:45:15.036279 31846 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:45:15.036288 31846 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:45:15.036294 31846 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:45:15.036301 31846 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:45:15.036312 31846 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:45:15.036339 31846 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:45:15.036347 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.036352 31846 net.cpp:165] Memory required for data: 929281500\nI0821 06:45:15.036357 31846 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:45:15.036368 31846 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:45:15.036375 31846 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:45:15.036381 31846 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:45:15.036391 31846 net.cpp:150] Setting up L2_b2_relu\nI0821 06:45:15.036397 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.036402 31846 net.cpp:165] Memory required for data: 933377500\nI0821 06:45:15.036407 31846 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:15.036414 31846 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:15.036419 31846 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:45:15.036433 31846 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:45:15.036443 31846 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:45:15.036492 31846 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:15.036504 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.036511 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.036515 31846 net.cpp:165] Memory required for data: 941569500\nI0821 06:45:15.036521 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:45:15.036542 31846 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:45:15.036550 31846 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:45:15.036558 31846 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:45:15.037021 31846 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:45:15.037035 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.037040 31846 net.cpp:165] Memory required for data: 945665500\nI0821 06:45:15.037050 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:45:15.037062 31846 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:45:15.037068 31846 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:45:15.037077 31846 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:45:15.037324 31846 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:45:15.037340 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.037345 31846 net.cpp:165] Memory required for data: 949761500\nI0821 06:45:15.037355 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:45:15.037364 31846 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:45:15.037370 31846 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:45:15.037379 31846 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.037441 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:45:15.037588 31846 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:45:15.037601 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.037606 31846 net.cpp:165] Memory required for data: 953857500\nI0821 06:45:15.037616 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:45:15.037623 31846 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:45:15.037629 31846 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:45:15.037639 31846 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.037649 31846 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:45:15.037657 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.037662 31846 net.cpp:165] Memory required for data: 957953500\nI0821 06:45:15.037667 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:45:15.037680 31846 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:45:15.037686 31846 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:45:15.037695 31846 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:45:15.038149 31846 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:45:15.038164 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.038168 31846 net.cpp:165] Memory required for data: 962049500\nI0821 06:45:15.038177 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:45:15.038189 31846 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:45:15.038195 31846 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:45:15.038204 31846 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:45:15.038463 31846 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:45:15.038477 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.038482 31846 net.cpp:165] Memory required for data: 966145500\nI0821 06:45:15.038492 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:45:15.038504 31846 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:45:15.038511 31846 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:45:15.038519 31846 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:45:15.038573 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:45:15.038720 31846 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:45:15.038733 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.038738 31846 net.cpp:165] Memory required for data: 970241500\nI0821 06:45:15.038748 31846 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:45:15.038758 31846 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:45:15.038765 31846 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:45:15.038772 31846 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:45:15.038794 31846 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:45:15.038822 31846 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:45:15.038831 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.038836 31846 net.cpp:165] Memory required for data: 974337500\nI0821 06:45:15.038841 31846 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:45:15.038862 31846 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:45:15.038868 31846 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:45:15.038877 31846 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:45:15.038885 31846 net.cpp:150] Setting up L2_b3_relu\nI0821 06:45:15.038892 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.038897 31846 net.cpp:165] Memory required for data: 978433500\nI0821 06:45:15.038902 31846 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:15.038913 31846 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:15.038918 31846 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:45:15.038925 31846 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:45:15.038934 31846 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:45:15.038982 31846 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:15.038995 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.039001 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.039005 31846 net.cpp:165] Memory required for data: 986625500\nI0821 06:45:15.039011 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:45:15.039022 31846 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:45:15.039028 31846 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:45:15.039041 31846 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:45:15.039501 31846 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:45:15.039515 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.039520 31846 net.cpp:165] Memory required for data: 990721500\nI0821 06:45:15.039530 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:45:15.039539 31846 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:45:15.039546 31846 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:45:15.039556 31846 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:45:15.039801 31846 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:45:15.039814 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.039819 31846 net.cpp:165] Memory required for data: 994817500\nI0821 06:45:15.039829 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:45:15.039841 31846 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:45:15.039849 31846 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:45:15.039855 31846 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.039909 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:45:15.040052 31846 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:45:15.040066 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.040071 31846 net.cpp:165] Memory required for data: 998913500\nI0821 06:45:15.040079 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:45:15.040091 31846 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:45:15.040096 31846 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:45:15.040103 31846 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.040113 31846 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:45:15.040119 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.040124 31846 net.cpp:165] Memory required for data: 1003009500\nI0821 06:45:15.040129 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:45:15.040150 31846 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:45:15.040158 31846 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:45:15.040168 31846 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:45:15.040629 31846 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:45:15.040644 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.040649 31846 net.cpp:165] Memory required for data: 1007105500\nI0821 06:45:15.040658 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:45:15.040668 31846 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:45:15.040673 31846 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:45:15.040681 31846 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:45:15.040930 31846 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:45:15.040942 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.040947 31846 net.cpp:165] Memory required for data: 1011201500\nI0821 06:45:15.040957 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:45:15.040966 31846 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:45:15.040971 31846 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:45:15.040982 31846 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:45:15.041036 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:45:15.041180 31846 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:45:15.041193 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.041198 31846 net.cpp:165] Memory required for data: 1015297500\nI0821 06:45:15.041206 31846 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:45:15.041215 31846 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:45:15.041221 31846 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:45:15.041229 31846 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:45:15.041239 31846 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:45:15.041265 31846 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:45:15.041277 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.041282 31846 net.cpp:165] Memory required for data: 1019393500\nI0821 06:45:15.041287 31846 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:45:15.041296 31846 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:45:15.041301 31846 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:45:15.041307 31846 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:45:15.041316 31846 net.cpp:150] Setting up L2_b4_relu\nI0821 06:45:15.041323 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.041328 31846 net.cpp:165] Memory required for data: 1023489500\nI0821 06:45:15.041333 31846 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:15.041343 31846 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:15.041348 31846 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:45:15.041357 31846 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:45:15.041365 31846 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:45:15.041411 31846 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:15.041429 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.041436 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.041440 31846 net.cpp:165] Memory required for data: 1031681500\nI0821 06:45:15.041446 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:45:15.041456 31846 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:45:15.041463 31846 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:45:15.041474 31846 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:45:15.041939 31846 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:45:15.041960 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.041965 31846 net.cpp:165] Memory required for data: 1035777500\nI0821 06:45:15.041975 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:45:15.041985 31846 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:45:15.041990 31846 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:45:15.042001 31846 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:45:15.042253 31846 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:45:15.042265 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.042270 31846 net.cpp:165] Memory required for data: 1039873500\nI0821 06:45:15.042280 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:45:15.042292 31846 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:45:15.042299 31846 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:45:15.042306 31846 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.042361 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:45:15.042518 31846 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:45:15.042532 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.042537 31846 net.cpp:165] Memory required for data: 1043969500\nI0821 06:45:15.042546 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:45:15.042556 31846 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:45:15.042562 31846 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:45:15.042572 31846 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.042582 31846 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:45:15.042588 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.042593 31846 net.cpp:165] Memory required for data: 1048065500\nI0821 06:45:15.042598 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:45:15.042613 31846 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:45:15.042618 31846 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:45:15.042629 31846 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:45:15.043083 31846 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:45:15.043097 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.043102 31846 net.cpp:165] Memory required for data: 1052161500\nI0821 06:45:15.043112 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:45:15.043120 31846 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:45:15.043126 31846 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:45:15.043135 31846 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:45:15.043381 31846 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:45:15.043393 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.043398 31846 net.cpp:165] Memory required for data: 1056257500\nI0821 06:45:15.043408 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:45:15.043417 31846 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:45:15.043577 31846 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:45:15.043596 31846 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:45:15.043656 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:45:15.043803 31846 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:45:15.043817 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.043822 31846 net.cpp:165] Memory required for data: 1060353500\nI0821 06:45:15.043830 31846 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:45:15.043839 31846 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:45:15.043845 31846 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:45:15.043853 31846 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:45:15.043864 31846 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:45:15.043890 31846 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:45:15.043900 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.043911 31846 net.cpp:165] Memory required for data: 1064449500\nI0821 06:45:15.043917 31846 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:45:15.043927 31846 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:45:15.043934 31846 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:45:15.043941 31846 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:45:15.043951 31846 net.cpp:150] Setting up L2_b5_relu\nI0821 06:45:15.043957 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.043962 31846 net.cpp:165] Memory required for data: 1068545500\nI0821 06:45:15.043967 31846 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:15.043979 31846 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:15.043985 31846 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:45:15.043992 31846 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:45:15.044003 31846 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:45:15.044046 31846 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:15.044061 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.044068 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.044073 31846 net.cpp:165] Memory required for data: 1076737500\nI0821 06:45:15.044078 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:45:15.044090 31846 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:45:15.044095 31846 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:45:15.044104 31846 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:45:15.044577 31846 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:45:15.044594 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.044598 31846 net.cpp:165] Memory required for data: 1080833500\nI0821 06:45:15.044607 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:45:15.044620 31846 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:45:15.044626 31846 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:45:15.044634 31846 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:45:15.044888 31846 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:45:15.044901 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.044906 31846 net.cpp:165] Memory required for data: 1084929500\nI0821 06:45:15.044916 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:45:15.044925 31846 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:45:15.044931 31846 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:45:15.044942 31846 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.044997 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:45:15.045142 31846 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:45:15.045156 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.045159 31846 net.cpp:165] Memory required for data: 1089025500\nI0821 06:45:15.045168 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:45:15.045177 31846 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:45:15.045183 31846 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:45:15.045193 31846 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.045203 31846 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:45:15.045210 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.045214 31846 net.cpp:165] Memory required for data: 1093121500\nI0821 06:45:15.045219 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:45:15.045233 31846 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:45:15.045239 31846 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:45:15.045248 31846 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:45:15.045713 31846 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:45:15.045734 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.045740 31846 net.cpp:165] Memory required for data: 1097217500\nI0821 06:45:15.045749 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:45:15.045761 31846 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:45:15.045768 31846 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:45:15.045776 31846 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:45:15.046025 31846 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:45:15.046038 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.046043 31846 net.cpp:165] Memory required for data: 1101313500\nI0821 06:45:15.046053 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:45:15.046062 31846 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:45:15.046068 31846 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:45:15.046078 31846 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:45:15.046133 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:45:15.046278 31846 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:45:15.046291 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.046296 31846 net.cpp:165] Memory required for data: 1105409500\nI0821 06:45:15.046305 31846 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:45:15.046314 31846 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:45:15.046320 31846 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:45:15.046327 31846 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:45:15.046339 31846 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:45:15.046365 31846 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:45:15.046373 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.046378 31846 net.cpp:165] Memory required for data: 1109505500\nI0821 06:45:15.046383 31846 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:45:15.046394 31846 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:45:15.046399 31846 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:45:15.046406 31846 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:45:15.046416 31846 net.cpp:150] Setting up L2_b6_relu\nI0821 06:45:15.046428 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.046434 31846 net.cpp:165] Memory required for data: 1113601500\nI0821 06:45:15.046439 31846 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:15.046450 31846 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:15.046456 31846 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:45:15.046463 31846 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:45:15.046473 31846 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:45:15.046517 31846 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:15.046531 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.046538 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.046542 31846 net.cpp:165] Memory required for data: 1121793500\nI0821 06:45:15.046547 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:45:15.046558 31846 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:45:15.046564 31846 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:45:15.046573 31846 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:45:15.047039 31846 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:45:15.047052 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.047057 31846 net.cpp:165] Memory required for data: 1125889500\nI0821 06:45:15.047066 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:45:15.047078 31846 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:45:15.047091 31846 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:45:15.047101 31846 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:45:15.047349 31846 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:45:15.047363 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.047368 31846 net.cpp:165] Memory required for data: 1129985500\nI0821 06:45:15.047377 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:45:15.047386 31846 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:45:15.047392 31846 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:45:15.047402 31846 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.047466 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:45:15.047616 31846 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:45:15.047628 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.047633 31846 net.cpp:165] Memory required for data: 1134081500\nI0821 06:45:15.047642 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:45:15.047650 31846 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:45:15.047657 31846 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:45:15.047668 31846 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.047678 31846 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:45:15.047685 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.047690 31846 net.cpp:165] Memory required for data: 1138177500\nI0821 06:45:15.047695 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:45:15.047708 31846 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:45:15.047714 31846 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:45:15.047724 31846 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:45:15.048187 31846 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:45:15.048200 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.048205 31846 net.cpp:165] Memory required for data: 1142273500\nI0821 06:45:15.048213 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:45:15.048225 31846 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:45:15.048231 31846 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:45:15.048240 31846 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:45:15.048492 31846 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:45:15.048506 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.048511 31846 net.cpp:165] Memory required for data: 1146369500\nI0821 06:45:15.048522 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:45:15.048530 31846 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:45:15.048537 31846 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:45:15.048548 31846 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:45:15.048604 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:45:15.048753 31846 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:45:15.048765 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.048770 31846 net.cpp:165] Memory required for data: 1150465500\nI0821 06:45:15.048779 31846 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:45:15.048789 31846 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:45:15.048794 31846 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:45:15.048801 31846 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:45:15.048812 31846 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:45:15.048840 31846 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:45:15.048848 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.048853 31846 net.cpp:165] Memory required for data: 1154561500\nI0821 06:45:15.048858 31846 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:45:15.048866 31846 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:45:15.048876 31846 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:45:15.048890 31846 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:45:15.048900 31846 net.cpp:150] Setting up L2_b7_relu\nI0821 06:45:15.048907 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.048912 31846 net.cpp:165] Memory required for data: 1158657500\nI0821 06:45:15.048916 31846 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:15.048923 31846 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:15.048928 31846 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:45:15.048938 31846 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:45:15.048949 31846 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:45:15.048992 31846 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:15.049005 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.049010 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.049015 31846 net.cpp:165] Memory required for data: 1166849500\nI0821 06:45:15.049021 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:45:15.049036 31846 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:45:15.049041 31846 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:45:15.049051 31846 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:45:15.049526 31846 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:45:15.049541 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.049546 31846 net.cpp:165] Memory required for data: 1170945500\nI0821 06:45:15.049556 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:45:15.049567 31846 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:45:15.049574 31846 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:45:15.049582 31846 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:45:15.049830 31846 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:45:15.049844 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.049849 31846 net.cpp:165] Memory required for data: 1175041500\nI0821 06:45:15.049860 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:45:15.049868 31846 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:45:15.049875 31846 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:45:15.049885 31846 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.049939 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:45:15.050089 31846 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:45:15.050102 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.050107 31846 net.cpp:165] Memory required for data: 1179137500\nI0821 06:45:15.050117 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:45:15.050124 31846 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:45:15.050130 31846 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:45:15.050140 31846 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.050150 31846 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:45:15.050158 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.050163 31846 net.cpp:165] Memory required for data: 1183233500\nI0821 06:45:15.050166 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:45:15.050180 31846 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:45:15.050187 31846 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:45:15.050195 31846 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:45:15.050665 31846 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:45:15.050680 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.050685 31846 net.cpp:165] Memory required for data: 1187329500\nI0821 06:45:15.050694 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:45:15.050704 31846 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:45:15.050720 31846 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:45:15.050729 31846 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:45:15.050981 31846 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:45:15.050994 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.050999 31846 net.cpp:165] Memory required for data: 1191425500\nI0821 06:45:15.051010 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:45:15.051018 31846 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:45:15.051025 31846 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:45:15.051033 31846 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:45:15.051090 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:45:15.051234 31846 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:45:15.051250 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.051255 31846 net.cpp:165] Memory required for data: 1195521500\nI0821 06:45:15.051265 31846 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:45:15.051273 31846 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:45:15.051280 31846 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:45:15.051286 31846 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:45:15.051293 31846 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:45:15.051323 31846 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:45:15.051332 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.051337 31846 net.cpp:165] Memory required for data: 1199617500\nI0821 06:45:15.051342 31846 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:45:15.051350 31846 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:45:15.051355 31846 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:45:15.051365 31846 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:45:15.051374 31846 net.cpp:150] Setting up L2_b8_relu\nI0821 06:45:15.051381 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.051386 31846 net.cpp:165] Memory required for data: 1203713500\nI0821 06:45:15.051390 31846 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:15.051398 31846 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:15.051403 31846 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:45:15.051414 31846 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:45:15.051442 31846 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:45:15.051493 31846 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:15.051506 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.051512 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.051517 31846 net.cpp:165] Memory required for data: 1211905500\nI0821 06:45:15.051523 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:45:15.051538 31846 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:45:15.051544 31846 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:45:15.051558 31846 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:45:15.052026 31846 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:45:15.052039 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.052044 31846 net.cpp:165] Memory required for data: 1216001500\nI0821 06:45:15.052053 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:45:15.052065 31846 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:45:15.052072 31846 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:45:15.052083 31846 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:45:15.052328 31846 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:45:15.052341 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.052346 31846 net.cpp:165] Memory required for data: 1220097500\nI0821 06:45:15.052364 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:45:15.052373 31846 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:45:15.052381 31846 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:45:15.052387 31846 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.052453 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:45:15.052605 31846 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:45:15.052621 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.052626 31846 net.cpp:165] Memory required for data: 1224193500\nI0821 06:45:15.052635 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:45:15.052644 31846 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:45:15.052650 31846 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:45:15.052657 31846 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.052666 31846 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:45:15.052673 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.052678 31846 net.cpp:165] Memory required for data: 1228289500\nI0821 06:45:15.052683 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:45:15.052696 31846 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:45:15.052702 31846 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:45:15.052713 31846 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:45:15.053179 31846 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:45:15.053192 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.053197 31846 net.cpp:165] Memory required for data: 1232385500\nI0821 06:45:15.053206 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:45:15.053218 31846 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:45:15.053225 31846 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:45:15.053236 31846 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:45:15.053494 31846 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:45:15.053508 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.053514 31846 net.cpp:165] Memory required for data: 1236481500\nI0821 06:45:15.053555 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:45:15.053570 31846 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:45:15.053577 31846 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:45:15.053586 31846 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:45:15.053644 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:45:15.053791 31846 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:45:15.053803 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.053808 31846 net.cpp:165] Memory required for data: 1240577500\nI0821 06:45:15.053817 31846 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:45:15.053829 31846 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:45:15.053835 31846 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:45:15.053843 31846 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:45:15.053850 31846 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:45:15.053877 31846 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:45:15.053890 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.053895 31846 net.cpp:165] Memory required for data: 1244673500\nI0821 06:45:15.053900 31846 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:45:15.053908 31846 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:45:15.053915 31846 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:45:15.053921 31846 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:45:15.053930 31846 net.cpp:150] Setting up L2_b9_relu\nI0821 06:45:15.053937 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.053941 31846 net.cpp:165] Memory required for data: 1248769500\nI0821 06:45:15.053946 31846 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:15.053966 31846 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:15.053972 31846 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:45:15.053979 31846 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:45:15.053992 31846 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:45:15.054040 31846 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:15.054052 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.054059 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.054064 31846 net.cpp:165] Memory required for data: 1256961500\nI0821 06:45:15.054069 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:45:15.054080 31846 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:45:15.054087 31846 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:45:15.054098 31846 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:45:15.054581 31846 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:45:15.054596 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.054601 31846 net.cpp:165] Memory required for data: 1257985500\nI0821 06:45:15.054610 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:45:15.054620 31846 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:45:15.054625 31846 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:45:15.054636 31846 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:45:15.054901 31846 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:45:15.054918 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.054922 31846 net.cpp:165] Memory required for data: 1259009500\nI0821 06:45:15.054934 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:45:15.054942 31846 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:45:15.054949 31846 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:45:15.054956 31846 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.055011 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:45:15.055164 31846 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:45:15.055177 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.055182 31846 net.cpp:165] Memory required for data: 1260033500\nI0821 06:45:15.055191 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:45:15.055202 31846 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:45:15.055209 31846 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:45:15.055217 31846 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.055225 31846 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:45:15.055233 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.055238 31846 net.cpp:165] Memory required for data: 1261057500\nI0821 06:45:15.055243 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:45:15.055255 31846 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:45:15.055261 31846 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:45:15.055270 31846 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:45:15.055747 31846 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:45:15.055761 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.055766 31846 net.cpp:165] Memory required for data: 1262081500\nI0821 06:45:15.055775 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:45:15.055788 31846 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:45:15.055794 31846 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:45:15.055804 31846 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:45:15.056064 31846 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:45:15.056077 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.056082 31846 net.cpp:165] Memory required for data: 1263105500\nI0821 06:45:15.056100 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:45:15.056109 31846 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:45:15.056115 31846 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:45:15.056126 31846 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:45:15.056182 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:45:15.056339 31846 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:45:15.056351 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.056356 31846 net.cpp:165] Memory required for data: 1264129500\nI0821 06:45:15.056365 31846 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:45:15.056375 31846 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:45:15.056380 31846 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:45:15.056392 31846 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:45:15.056437 31846 net.cpp:150] Setting up L3_b1_pool\nI0821 06:45:15.056448 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.056453 31846 net.cpp:165] Memory required for data: 1265153500\nI0821 06:45:15.056459 31846 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:45:15.056468 31846 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:45:15.056473 31846 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:45:15.056480 31846 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:45:15.056490 31846 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:45:15.056524 31846 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:45:15.056532 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.056537 31846 net.cpp:165] Memory required for data: 1266177500\nI0821 06:45:15.056542 31846 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:45:15.056550 31846 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:45:15.056555 31846 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:45:15.056562 31846 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:45:15.056571 31846 net.cpp:150] Setting up L3_b1_relu\nI0821 06:45:15.056578 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.056583 31846 net.cpp:165] Memory required for data: 1267201500\nI0821 06:45:15.056588 31846 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:45:15.056601 31846 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:45:15.056608 31846 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:45:15.057850 31846 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:45:15.057868 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.057873 31846 net.cpp:165] Memory required for data: 1268225500\nI0821 06:45:15.057879 31846 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:45:15.057888 31846 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:45:15.057895 31846 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:45:15.057902 31846 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:45:15.057912 31846 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:45:15.057952 31846 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:45:15.057968 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.057973 31846 net.cpp:165] Memory required for data: 1270273500\nI0821 06:45:15.057978 31846 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:15.057986 31846 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:15.057991 31846 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:45:15.057999 31846 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:45:15.058008 31846 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:45:15.058060 31846 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:15.058073 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.058079 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.058094 31846 net.cpp:165] Memory required for data: 1274369500\nI0821 06:45:15.058100 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:45:15.058115 31846 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:45:15.058122 31846 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:45:15.058131 31846 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:45:15.060118 31846 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:45:15.060137 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.060142 31846 net.cpp:165] Memory required for data: 1276417500\nI0821 06:45:15.060151 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:45:15.060161 31846 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:45:15.060168 31846 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:45:15.060179 31846 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:45:15.060449 31846 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:45:15.060462 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.060467 31846 net.cpp:165] Memory required for data: 1278465500\nI0821 06:45:15.060478 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:45:15.060487 31846 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:45:15.060494 31846 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:45:15.060501 31846 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.060561 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:45:15.060716 31846 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:45:15.060732 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.060737 31846 net.cpp:165] Memory required for data: 1280513500\nI0821 06:45:15.060746 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:45:15.060755 31846 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:45:15.060760 31846 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:45:15.060767 31846 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.060777 31846 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:45:15.060784 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.060789 31846 net.cpp:165] Memory required for data: 1282561500\nI0821 06:45:15.060793 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:45:15.060808 31846 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:45:15.060816 31846 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:45:15.060823 31846 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:45:15.061873 31846 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:45:15.061888 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.061894 31846 net.cpp:165] Memory required for data: 1284609500\nI0821 06:45:15.061902 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:45:15.061915 31846 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:45:15.061923 31846 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:45:15.061933 31846 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:45:15.062194 31846 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:45:15.062208 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.062213 31846 net.cpp:165] Memory required for data: 1286657500\nI0821 06:45:15.062224 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:45:15.062233 31846 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:45:15.062239 31846 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:45:15.062249 31846 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:45:15.062307 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:45:15.062469 31846 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:45:15.062482 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.062487 31846 net.cpp:165] Memory required for data: 1288705500\nI0821 06:45:15.062497 31846 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:45:15.062510 31846 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:45:15.062525 31846 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:45:15.062532 31846 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:45:15.062541 31846 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:45:15.062578 31846 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:45:15.062590 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.062595 31846 net.cpp:165] Memory required for data: 1290753500\nI0821 06:45:15.062600 31846 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:45:15.062608 31846 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:45:15.062614 31846 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:45:15.062624 31846 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:45:15.062634 31846 net.cpp:150] Setting up L3_b2_relu\nI0821 06:45:15.062641 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.062645 31846 net.cpp:165] Memory required for data: 1292801500\nI0821 06:45:15.062650 31846 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:15.062657 31846 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:15.062664 31846 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:45:15.062670 31846 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:45:15.062680 31846 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:45:15.062731 31846 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:15.062742 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.062749 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.062754 31846 net.cpp:165] Memory required for data: 1296897500\nI0821 06:45:15.062759 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:45:15.062772 31846 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:45:15.062777 31846 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:45:15.062789 31846 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:45:15.063805 31846 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:45:15.063820 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.063825 31846 net.cpp:165] Memory required for data: 1298945500\nI0821 06:45:15.063834 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:45:15.063846 31846 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:45:15.063853 31846 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:45:15.063861 31846 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:45:15.064128 31846 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:45:15.064141 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.064147 31846 net.cpp:165] Memory required for data: 1300993500\nI0821 06:45:15.064157 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:45:15.064167 31846 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:45:15.064172 31846 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:45:15.064179 31846 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.064239 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:45:15.064391 31846 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:45:15.064407 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.064412 31846 net.cpp:165] Memory required for data: 1303041500\nI0821 06:45:15.064426 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:45:15.064436 31846 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:45:15.064442 31846 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:45:15.064450 31846 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.064460 31846 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:45:15.064466 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.064478 31846 net.cpp:165] Memory required for data: 1305089500\nI0821 06:45:15.064484 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:45:15.064498 31846 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:45:15.064505 31846 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:45:15.064513 31846 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:45:15.065533 31846 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:45:15.065548 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.065553 31846 net.cpp:165] Memory required for data: 1307137500\nI0821 06:45:15.065562 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:45:15.065575 31846 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:45:15.065582 31846 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:45:15.065592 31846 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:45:15.065853 31846 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:45:15.065866 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.065871 31846 net.cpp:165] Memory required for data: 1309185500\nI0821 06:45:15.065881 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:45:15.065892 31846 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:45:15.065899 31846 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:45:15.065907 31846 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:45:15.065964 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:45:15.066121 31846 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:45:15.066134 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.066139 31846 net.cpp:165] Memory required for data: 1311233500\nI0821 06:45:15.066148 31846 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:45:15.066162 31846 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:45:15.066169 31846 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:45:15.066176 31846 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:45:15.066184 31846 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:45:15.066220 31846 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:45:15.066231 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.066236 31846 net.cpp:165] Memory required for data: 1313281500\nI0821 06:45:15.066241 31846 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:45:15.066249 31846 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:45:15.066256 31846 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:45:15.066265 31846 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:45:15.066275 31846 net.cpp:150] Setting up L3_b3_relu\nI0821 06:45:15.066282 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.066287 31846 net.cpp:165] Memory required for data: 1315329500\nI0821 06:45:15.066292 31846 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:15.066298 31846 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:15.066304 31846 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:45:15.066311 31846 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:45:15.066320 31846 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:45:15.066370 31846 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:15.066380 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.066387 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.066392 31846 net.cpp:165] Memory required for data: 1319425500\nI0821 06:45:15.066397 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:45:15.066408 31846 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:45:15.066414 31846 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:45:15.066432 31846 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:45:15.067486 31846 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:45:15.067502 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.067507 31846 net.cpp:165] Memory required for data: 1321473500\nI0821 06:45:15.067517 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:45:15.067528 31846 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:45:15.067535 31846 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:45:15.067544 31846 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:45:15.067811 31846 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:45:15.067824 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.067829 31846 net.cpp:165] Memory required for data: 1323521500\nI0821 06:45:15.067839 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:45:15.067848 31846 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:45:15.067854 31846 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:45:15.067862 31846 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.067921 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:45:15.068081 31846 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:45:15.068095 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.068100 31846 net.cpp:165] Memory required for data: 1325569500\nI0821 06:45:15.068109 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:45:15.068119 31846 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:45:15.068125 31846 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:45:15.068131 31846 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.068141 31846 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:45:15.068148 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.068152 31846 net.cpp:165] Memory required for data: 1327617500\nI0821 06:45:15.068157 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:45:15.068171 31846 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:45:15.068177 31846 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:45:15.068188 31846 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:45:15.069226 31846 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:45:15.069242 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.069247 31846 net.cpp:165] Memory required for data: 1329665500\nI0821 06:45:15.069255 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:45:15.069268 31846 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:45:15.069274 31846 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:45:15.069283 31846 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:45:15.069557 31846 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:45:15.069571 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.069576 31846 net.cpp:165] Memory required for data: 1331713500\nI0821 06:45:15.069586 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:45:15.069598 31846 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:45:15.069605 31846 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:45:15.069612 31846 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:45:15.069674 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:45:15.069835 31846 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:45:15.069849 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.069854 31846 net.cpp:165] Memory required for data: 1333761500\nI0821 06:45:15.069862 31846 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:45:15.069875 31846 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:45:15.069881 31846 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:45:15.069888 31846 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:45:15.069898 31846 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:45:15.069932 31846 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:45:15.069948 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.069953 31846 net.cpp:165] Memory required for data: 1335809500\nI0821 06:45:15.069958 31846 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:45:15.069967 31846 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:45:15.069972 31846 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:45:15.069983 31846 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:45:15.069993 31846 net.cpp:150] Setting up L3_b4_relu\nI0821 06:45:15.069999 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.070003 31846 net.cpp:165] Memory required for data: 1337857500\nI0821 06:45:15.070008 31846 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:15.070015 31846 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:15.070020 31846 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:45:15.070029 31846 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:45:15.070037 31846 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:45:15.070263 31846 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:15.070278 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.070286 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.070291 31846 net.cpp:165] Memory required for data: 1341953500\nI0821 06:45:15.070296 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:45:15.070310 31846 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:45:15.070317 31846 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:45:15.070327 31846 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:45:15.071353 31846 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:45:15.071368 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.071374 31846 net.cpp:165] Memory required for data: 1344001500\nI0821 06:45:15.071383 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:45:15.071398 31846 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:45:15.071405 31846 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:45:15.071413 31846 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:45:15.072692 31846 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:45:15.072710 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.072715 31846 net.cpp:165] Memory required for data: 1346049500\nI0821 06:45:15.072727 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:45:15.072741 31846 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:45:15.072747 31846 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:45:15.072755 31846 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.072819 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:45:15.072979 31846 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:45:15.072993 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.072999 31846 net.cpp:165] Memory required for data: 1348097500\nI0821 06:45:15.073007 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:45:15.073019 31846 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:45:15.073024 31846 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:45:15.073032 31846 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.073041 31846 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:45:15.073048 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.073053 31846 net.cpp:165] Memory required for data: 1350145500\nI0821 06:45:15.073058 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:45:15.073072 31846 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:45:15.073078 31846 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:45:15.073091 31846 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:45:15.075104 31846 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:45:15.075122 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.075127 31846 net.cpp:165] Memory required for data: 1352193500\nI0821 06:45:15.075137 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:45:15.075150 31846 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:45:15.075157 31846 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:45:15.075165 31846 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:45:15.075435 31846 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:45:15.075449 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.075454 31846 net.cpp:165] Memory required for data: 1354241500\nI0821 06:45:15.075464 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:45:15.075476 31846 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:45:15.075484 31846 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:45:15.075491 31846 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:45:15.075552 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:45:15.075708 31846 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:45:15.075722 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.075727 31846 net.cpp:165] Memory required for data: 1356289500\nI0821 06:45:15.075736 31846 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:45:15.075748 31846 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:45:15.075755 31846 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:45:15.075762 31846 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:45:15.075770 31846 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:45:15.075805 31846 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:45:15.075815 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.075819 31846 net.cpp:165] Memory required for data: 1358337500\nI0821 06:45:15.075824 31846 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:45:15.075832 31846 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:45:15.075839 31846 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:45:15.075848 31846 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:45:15.075858 31846 net.cpp:150] Setting up L3_b5_relu\nI0821 06:45:15.075865 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.075870 31846 net.cpp:165] Memory required for data: 1360385500\nI0821 06:45:15.075875 31846 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:15.075881 31846 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:15.075887 31846 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:45:15.075894 31846 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:45:15.075903 31846 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:45:15.075951 31846 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:15.075963 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.075970 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.075974 31846 net.cpp:165] Memory required for data: 1364481500\nI0821 06:45:15.075979 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:45:15.075994 31846 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:45:15.076000 31846 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:45:15.076009 31846 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:45:15.077020 31846 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:45:15.077035 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.077040 31846 net.cpp:165] Memory required for data: 1366529500\nI0821 06:45:15.077050 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:45:15.077064 31846 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:45:15.077077 31846 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:45:15.077087 31846 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:45:15.077349 31846 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:45:15.077363 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.077368 31846 net.cpp:165] Memory required for data: 1368577500\nI0821 06:45:15.077378 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:45:15.077388 31846 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:45:15.077394 31846 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:45:15.077401 31846 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.077467 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:45:15.077620 31846 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:45:15.077633 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.077638 31846 net.cpp:165] Memory required for data: 1370625500\nI0821 06:45:15.077647 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:45:15.077656 31846 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:45:15.077662 31846 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:45:15.077669 31846 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.077683 31846 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:45:15.077690 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.077695 31846 net.cpp:165] Memory required for data: 1372673500\nI0821 06:45:15.077700 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:45:15.077711 31846 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:45:15.077720 31846 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:45:15.077729 31846 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:45:15.078747 31846 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:45:15.078761 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.078766 31846 net.cpp:165] Memory required for data: 1374721500\nI0821 06:45:15.078775 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:45:15.078788 31846 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:45:15.078794 31846 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:45:15.078802 31846 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:45:15.079064 31846 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:45:15.079077 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.079082 31846 net.cpp:165] Memory required for data: 1376769500\nI0821 06:45:15.079092 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:45:15.079105 31846 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:45:15.079113 31846 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:45:15.079120 31846 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:45:15.079180 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:45:15.079337 31846 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:45:15.079350 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.079355 31846 net.cpp:165] Memory required for data: 1378817500\nI0821 06:45:15.079365 31846 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:45:15.079375 31846 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:45:15.079382 31846 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:45:15.079390 31846 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:45:15.079399 31846 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:45:15.079438 31846 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:45:15.079452 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.079457 31846 net.cpp:165] Memory required for data: 1380865500\nI0821 06:45:15.079463 31846 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:45:15.079473 31846 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:45:15.079479 31846 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:45:15.079493 31846 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:45:15.079504 31846 net.cpp:150] Setting up L3_b6_relu\nI0821 06:45:15.079511 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.079516 31846 net.cpp:165] Memory required for data: 1382913500\nI0821 06:45:15.079521 31846 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:15.079529 31846 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:15.079533 31846 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:45:15.079541 31846 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:45:15.079550 31846 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:45:15.079601 31846 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:15.079612 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.079618 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.079623 31846 net.cpp:165] Memory required for data: 1387009500\nI0821 06:45:15.079628 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:45:15.079643 31846 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:45:15.079649 31846 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:45:15.079658 31846 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:45:15.080672 31846 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:45:15.080687 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.080693 31846 net.cpp:165] Memory required for data: 1389057500\nI0821 06:45:15.080703 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:45:15.080714 31846 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:45:15.080720 31846 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:45:15.080729 31846 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:45:15.080992 31846 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:45:15.081007 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.081012 31846 net.cpp:165] Memory required for data: 1391105500\nI0821 06:45:15.081022 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:45:15.081030 31846 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:45:15.081037 31846 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:45:15.081044 31846 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.081104 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:45:15.081260 31846 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:45:15.081274 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.081279 31846 net.cpp:165] Memory required for data: 1393153500\nI0821 06:45:15.081288 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:45:15.081323 31846 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:45:15.081332 31846 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:45:15.081341 31846 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.081351 31846 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:45:15.081357 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.081362 31846 net.cpp:165] Memory required for data: 1395201500\nI0821 06:45:15.081367 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:45:15.081382 31846 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:45:15.081388 31846 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:45:15.081396 31846 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:45:15.082429 31846 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:45:15.082444 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.082449 31846 net.cpp:165] Memory required for data: 1397249500\nI0821 06:45:15.082458 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:45:15.082470 31846 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:45:15.082484 31846 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:45:15.082494 31846 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:45:15.082758 31846 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:45:15.082772 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.082777 31846 net.cpp:165] Memory required for data: 1399297500\nI0821 06:45:15.082787 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:45:15.082797 31846 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:45:15.082803 31846 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:45:15.082810 31846 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:45:15.082870 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:45:15.083024 31846 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:45:15.083037 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.083042 31846 net.cpp:165] Memory required for data: 1401345500\nI0821 06:45:15.083051 31846 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:45:15.083060 31846 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:45:15.083066 31846 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:45:15.083073 31846 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:45:15.083084 31846 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:45:15.083117 31846 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:45:15.083129 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.083134 31846 net.cpp:165] Memory required for data: 1403393500\nI0821 06:45:15.083139 31846 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:45:15.083148 31846 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:45:15.083153 31846 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:45:15.083160 31846 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:45:15.083169 31846 net.cpp:150] Setting up L3_b7_relu\nI0821 06:45:15.083176 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.083180 31846 net.cpp:165] Memory required for data: 1405441500\nI0821 06:45:15.083185 31846 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:15.083196 31846 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:15.083202 31846 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:45:15.083209 31846 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:45:15.083218 31846 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:45:15.083263 31846 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:15.083278 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.083286 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.083290 31846 net.cpp:165] Memory required for data: 1409537500\nI0821 06:45:15.083295 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:45:15.083307 31846 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:45:15.083313 31846 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:45:15.083323 31846 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:45:15.084342 31846 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:45:15.084360 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.084365 31846 net.cpp:165] Memory required for data: 1411585500\nI0821 06:45:15.084374 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:45:15.084383 31846 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:45:15.084390 31846 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:45:15.084401 31846 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:45:15.084676 31846 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:45:15.084692 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.084697 31846 net.cpp:165] Memory required for data: 1413633500\nI0821 06:45:15.084713 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:45:15.084725 31846 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:45:15.084733 31846 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:45:15.084739 31846 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.084797 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:45:15.084957 31846 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:45:15.084970 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.084975 31846 net.cpp:165] Memory required for data: 1415681500\nI0821 06:45:15.084985 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:45:15.084992 31846 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:45:15.084998 31846 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:45:15.085008 31846 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.085018 31846 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:45:15.085026 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.085029 31846 net.cpp:165] Memory required for data: 1417729500\nI0821 06:45:15.085034 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:45:15.085048 31846 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:45:15.085054 31846 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:45:15.085063 31846 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:45:15.086076 31846 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:45:15.086091 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.086096 31846 net.cpp:165] Memory required for data: 1419777500\nI0821 06:45:15.086104 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:45:15.086117 31846 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:45:15.086124 31846 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:45:15.086133 31846 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:45:15.086391 31846 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:45:15.086405 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.086410 31846 net.cpp:165] Memory required for data: 1421825500\nI0821 06:45:15.086419 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:45:15.086434 31846 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:45:15.086441 31846 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:45:15.086448 31846 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:45:15.086519 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:45:15.086678 31846 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:45:15.086691 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.086696 31846 net.cpp:165] Memory required for data: 1423873500\nI0821 06:45:15.086706 31846 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:45:15.086715 31846 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:45:15.086722 31846 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:45:15.086729 31846 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:45:15.086740 31846 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:45:15.086772 31846 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:45:15.086786 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.086789 31846 net.cpp:165] Memory required for data: 1425921500\nI0821 06:45:15.086796 31846 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:45:15.086802 31846 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:45:15.086808 31846 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:45:15.086815 31846 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:45:15.086824 31846 net.cpp:150] Setting up L3_b8_relu\nI0821 06:45:15.086832 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.086836 31846 net.cpp:165] Memory required for data: 1427969500\nI0821 06:45:15.086841 31846 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:15.086858 31846 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:15.086864 31846 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:45:15.086871 31846 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:45:15.086881 31846 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:45:15.086930 31846 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:15.086941 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.086948 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.086953 31846 net.cpp:165] Memory required for data: 1432065500\nI0821 06:45:15.086958 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:45:15.086969 31846 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:45:15.086977 31846 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:45:15.086987 31846 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:45:15.089005 31846 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:45:15.089023 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.089030 31846 net.cpp:165] Memory required for data: 1434113500\nI0821 06:45:15.089038 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:45:15.089051 31846 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:45:15.089059 31846 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:45:15.089071 31846 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:45:15.089332 31846 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:45:15.089346 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.089351 31846 net.cpp:165] Memory required for data: 1436161500\nI0821 06:45:15.089361 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:45:15.089370 31846 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:45:15.089377 31846 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:45:15.089390 31846 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.089457 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:45:15.089615 31846 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:45:15.089628 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.089633 31846 net.cpp:165] Memory required for data: 1438209500\nI0821 06:45:15.089643 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:45:15.089651 31846 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:45:15.089658 31846 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:45:15.089668 31846 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.089679 31846 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:45:15.089685 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.089689 31846 net.cpp:165] Memory required for data: 1440257500\nI0821 06:45:15.089694 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:45:15.089709 31846 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:45:15.089715 31846 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:45:15.089726 31846 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:45:15.090741 31846 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:45:15.090756 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.090761 31846 net.cpp:165] Memory required for data: 1442305500\nI0821 06:45:15.090770 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:45:15.090780 31846 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:45:15.090787 31846 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:45:15.090798 31846 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:45:15.091065 31846 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:45:15.091079 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.091084 31846 net.cpp:165] Memory required for data: 1444353500\nI0821 06:45:15.091104 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:45:15.091115 31846 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:45:15.091122 31846 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:45:15.091130 31846 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:45:15.091188 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:45:15.091343 31846 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:45:15.091356 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.091361 31846 net.cpp:165] Memory required for data: 1446401500\nI0821 06:45:15.091370 31846 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:45:15.091379 31846 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:45:15.091385 31846 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:45:15.091392 31846 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:45:15.091404 31846 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:45:15.091444 31846 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:45:15.091457 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.091462 31846 net.cpp:165] Memory required for data: 1448449500\nI0821 06:45:15.091467 31846 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:45:15.091481 31846 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:45:15.091487 31846 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:45:15.091495 31846 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:45:15.091505 31846 net.cpp:150] Setting up L3_b9_relu\nI0821 06:45:15.091511 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.091516 31846 net.cpp:165] Memory required for data: 1450497500\nI0821 06:45:15.091521 31846 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:45:15.091528 31846 net.cpp:100] Creating Layer post_pool\nI0821 06:45:15.091534 31846 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:45:15.091544 31846 net.cpp:408] post_pool -> post_pool\nI0821 06:45:15.091579 31846 net.cpp:150] Setting up post_pool\nI0821 06:45:15.091589 31846 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:45:15.091594 31846 net.cpp:165] Memory required for data: 1450529500\nI0821 06:45:15.091599 31846 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:45:15.091696 31846 net.cpp:100] Creating Layer post_FC\nI0821 06:45:15.091709 31846 net.cpp:434] post_FC <- post_pool\nI0821 06:45:15.091719 31846 net.cpp:408] post_FC -> post_FC_top\nI0821 06:45:15.091979 31846 net.cpp:150] Setting up post_FC\nI0821 06:45:15.091995 31846 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:15.092000 31846 net.cpp:165] Memory required for data: 1450534500\nI0821 06:45:15.092010 31846 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:45:15.092018 31846 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:45:15.092025 31846 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:45:15.092036 31846 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:45:15.092046 31846 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:45:15.092097 31846 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:45:15.092110 31846 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:15.092118 31846 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:15.092123 31846 net.cpp:165] Memory required for data: 1450544500\nI0821 06:45:15.092128 31846 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:45:15.092172 31846 net.cpp:100] Creating Layer accuracy\nI0821 06:45:15.092185 31846 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:45:15.092191 31846 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:45:15.092200 31846 net.cpp:408] accuracy -> accuracy\nI0821 06:45:15.092242 31846 net.cpp:150] Setting up accuracy\nI0821 06:45:15.092255 31846 net.cpp:157] Top shape: (1)\nI0821 06:45:15.092260 31846 net.cpp:165] Memory required for data: 1450544504\nI0821 06:45:15.092265 31846 layer_factory.hpp:77] Creating layer loss\nI0821 06:45:15.092281 31846 net.cpp:100] Creating Layer loss\nI0821 06:45:15.092288 31846 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:45:15.092295 31846 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:45:15.092306 31846 net.cpp:408] loss -> loss\nI0821 06:45:15.096879 31846 layer_factory.hpp:77] Creating layer loss\nI0821 06:45:15.097074 31846 net.cpp:150] Setting up loss\nI0821 06:45:15.097092 31846 net.cpp:157] Top shape: (1)\nI0821 06:45:15.097098 31846 net.cpp:160]     with loss weight 1\nI0821 06:45:15.097180 31846 net.cpp:165] Memory required for data: 1450544508\nI0821 06:45:15.097190 31846 net.cpp:226] loss needs backward computation.\nI0821 06:45:15.097198 31846 net.cpp:228] accuracy does not need backward computation.\nI0821 06:45:15.097204 31846 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:45:15.097209 31846 net.cpp:226] post_FC needs backward computation.\nI0821 06:45:15.097214 31846 net.cpp:226] post_pool needs backward computation.\nI0821 06:45:15.097219 31846 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:45:15.097224 31846 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:45:15.097230 31846 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:45:15.097235 31846 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:45:15.097240 31846 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:45:15.097245 31846 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:45:15.097250 31846 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:45:15.097254 31846 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:45:15.097259 31846 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:45:15.097265 31846 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:45:15.097270 31846 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:45:15.097275 31846 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:45:15.097280 31846 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:45:15.097285 31846 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:45:15.097290 31846 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:45:15.097295 31846 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:45:15.097301 31846 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:45:15.097306 31846 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:45:15.097311 31846 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:45:15.097316 31846 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:45:15.097321 31846 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:45:15.097326 31846 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:45:15.097332 31846 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:45:15.097337 31846 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:45:15.097343 31846 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:45:15.097348 31846 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:45:15.097352 31846 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:45:15.097357 31846 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:45:15.097362 31846 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:45:15.097371 31846 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:45:15.097378 31846 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:45:15.097383 31846 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:45:15.097388 31846 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:45:15.097393 31846 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:45:15.097398 31846 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:45:15.097404 31846 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:45:15.097416 31846 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:45:15.097430 31846 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:45:15.097437 31846 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:45:15.097443 31846 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:45:15.097448 31846 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:45:15.097453 31846 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:45:15.097460 31846 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:45:15.097465 31846 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:45:15.097470 31846 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:45:15.097476 31846 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:45:15.097481 31846 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:45:15.097486 31846 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:45:15.097491 31846 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:45:15.097496 31846 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:45:15.097501 31846 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:45:15.097506 31846 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:45:15.097512 31846 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:45:15.097518 31846 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:45:15.097523 31846 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:45:15.097528 31846 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:45:15.097533 31846 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:45:15.097539 31846 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:45:15.097545 31846 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:45:15.097550 31846 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:45:15.097555 31846 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:45:15.097561 31846 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:45:15.097566 31846 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:45:15.097571 31846 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:45:15.097578 31846 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:45:15.097582 31846 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:45:15.097587 31846 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:45:15.097592 31846 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:45:15.097597 31846 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:45:15.097602 31846 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:45:15.097609 31846 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:45:15.097615 31846 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:45:15.097620 31846 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:45:15.097625 31846 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:45:15.097630 31846 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:45:15.097635 31846 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:45:15.097641 31846 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:45:15.097646 31846 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:45:15.097651 31846 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:45:15.097656 31846 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:45:15.097662 31846 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:45:15.097672 31846 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:45:15.097677 31846 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:45:15.097688 31846 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:45:15.097695 31846 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:45:15.097700 31846 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:45:15.097707 31846 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:45:15.097712 31846 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:45:15.097718 31846 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:45:15.097723 31846 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:45:15.097728 31846 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:45:15.097733 31846 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:45:15.097738 31846 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:45:15.097743 31846 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:45:15.097748 31846 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:45:15.097754 31846 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:45:15.097760 31846 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:45:15.097765 31846 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:45:15.097770 31846 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:45:15.097776 31846 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:45:15.097781 31846 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:45:15.097787 31846 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:45:15.097792 31846 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:45:15.097798 31846 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:45:15.097805 31846 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:45:15.097810 31846 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:45:15.097815 31846 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:45:15.097821 31846 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:45:15.097826 31846 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:45:15.097831 31846 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:45:15.097836 31846 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:45:15.097842 31846 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:45:15.097847 31846 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:45:15.097852 31846 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:45:15.097858 31846 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:45:15.097864 31846 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:45:15.097869 31846 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:45:15.097874 31846 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:45:15.097880 31846 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:45:15.097885 31846 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:45:15.097890 31846 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:45:15.097895 31846 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:45:15.097901 31846 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:45:15.097906 31846 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:45:15.097913 31846 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:45:15.097918 31846 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:45:15.097923 31846 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:45:15.097929 31846 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:45:15.097934 31846 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:45:15.097939 31846 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:45:15.097950 31846 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:45:15.097956 31846 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:45:15.097962 31846 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:45:15.097967 31846 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:45:15.097972 31846 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:45:15.097978 31846 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:45:15.097985 31846 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:45:15.097990 31846 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:45:15.097995 31846 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:45:15.098001 31846 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:45:15.098006 31846 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:45:15.098011 31846 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:45:15.098016 31846 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:45:15.098021 31846 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:45:15.098026 31846 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:45:15.098032 31846 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:45:15.098038 31846 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:45:15.098043 31846 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:45:15.098052 31846 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:45:15.098057 31846 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:45:15.098062 31846 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:45:15.098068 31846 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:45:15.098073 31846 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:45:15.098079 31846 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:45:15.098085 31846 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:45:15.098091 31846 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:45:15.098096 31846 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:45:15.098103 31846 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:45:15.098107 31846 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:45:15.098114 31846 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:45:15.098119 31846 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:45:15.098124 31846 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:45:15.098130 31846 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:45:15.098135 31846 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:45:15.098141 31846 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:45:15.098147 31846 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:45:15.098152 31846 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:45:15.098158 31846 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:45:15.098165 31846 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:45:15.098170 31846 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:45:15.098175 31846 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:45:15.098181 31846 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:45:15.098186 31846 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:45:15.098191 31846 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:45:15.098198 31846 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:45:15.098203 31846 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:45:15.098209 31846 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:45:15.098220 31846 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:45:15.098227 31846 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:45:15.098232 31846 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:45:15.098238 31846 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:45:15.098243 31846 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:45:15.098249 31846 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:45:15.098254 31846 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:45:15.098260 31846 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:45:15.098265 31846 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:45:15.098271 31846 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:45:15.098276 31846 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:45:15.098282 31846 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:45:15.098289 31846 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:45:15.098294 31846 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:45:15.098299 31846 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:45:15.098304 31846 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:45:15.098310 31846 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:45:15.098315 31846 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:45:15.098321 31846 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:45:15.098327 31846 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:45:15.098332 31846 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:45:15.098340 31846 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:45:15.098345 31846 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:45:15.098351 31846 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:45:15.098356 31846 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:45:15.098362 31846 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:45:15.098367 31846 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:45:15.098373 31846 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:45:15.098379 31846 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:45:15.098386 31846 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:45:15.098392 31846 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:45:15.098397 31846 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:45:15.098402 31846 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:45:15.098408 31846 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:45:15.098414 31846 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:45:15.098419 31846 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:45:15.098433 31846 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:45:15.098438 31846 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:45:15.098444 31846 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:45:15.098450 31846 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:45:15.098455 31846 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:45:15.098462 31846 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:45:15.098467 31846 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:45:15.098474 31846 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:45:15.098479 31846 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:45:15.098484 31846 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:45:15.098490 31846 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:45:15.098501 31846 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:45:15.098508 31846 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:45:15.098513 31846 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:45:15.098520 31846 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:45:15.098526 31846 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:45:15.098532 31846 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:45:15.098537 31846 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:45:15.098543 31846 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:45:15.098549 31846 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:45:15.098554 31846 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:45:15.098561 31846 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:45:15.098565 31846 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:45:15.098572 31846 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:45:15.098577 31846 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:45:15.098583 31846 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:45:15.098589 31846 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:45:15.098595 31846 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:45:15.098601 31846 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:45:15.098606 31846 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:45:15.098613 31846 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:45:15.098618 31846 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:45:15.098623 31846 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:45:15.098629 31846 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:45:15.098635 31846 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:45:15.098641 31846 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:45:15.098646 31846 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:45:15.098652 31846 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:45:15.098659 31846 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:45:15.098664 31846 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:45:15.098670 31846 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:45:15.098675 31846 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:45:15.098680 31846 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:45:15.098686 31846 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:45:15.098692 31846 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:45:15.098698 31846 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:45:15.098703 31846 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:45:15.098709 31846 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:45:15.098716 31846 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:45:15.098721 31846 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:45:15.098726 31846 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:45:15.098732 31846 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:45:15.098737 31846 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:45:15.098743 31846 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:45:15.098749 31846 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:45:15.098758 31846 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:45:15.098764 31846 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:45:15.098770 31846 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:45:15.098780 31846 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:45:15.098786 31846 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:45:15.098793 31846 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:45:15.098798 31846 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:45:15.098804 31846 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:45:15.098810 31846 net.cpp:226] pre_relu needs backward computation.\nI0821 06:45:15.098815 31846 net.cpp:226] pre_scale needs backward computation.\nI0821 06:45:15.098820 31846 net.cpp:226] pre_bn needs backward computation.\nI0821 06:45:15.098826 31846 net.cpp:226] pre_conv needs backward computation.\nI0821 06:45:15.098832 31846 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:45:15.098839 31846 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:45:15.098843 31846 net.cpp:270] This network produces output accuracy\nI0821 06:45:15.098850 31846 net.cpp:270] This network produces output loss\nI0821 06:45:15.099218 31846 net.cpp:283] Network initialization done.\nI0821 06:45:15.108480 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:15.108520 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:15.108579 31846 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 06:45:15.108958 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 06:45:15.108976 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 06:45:15.108986 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:45:15.108996 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:45:15.109005 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:45:15.109014 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:45:15.109024 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:45:15.109031 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:45:15.109041 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:45:15.109050 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:45:15.109058 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:45:15.109066 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:45:15.109076 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:45:15.109084 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:45:15.109093 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:45:15.109102 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:45:15.109110 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:45:15.109119 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:45:15.109128 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:45:15.109146 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:45:15.109156 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:45:15.109165 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:45:15.109177 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:45:15.109186 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:45:15.109196 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:45:15.109205 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:45:15.109212 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:45:15.109221 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:45:15.109230 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:45:15.109238 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:45:15.109247 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:45:15.109256 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:45:15.109266 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:45:15.109273 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:45:15.109282 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:45:15.109290 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:45:15.109299 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:45:15.109308 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:45:15.109316 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:45:15.109324 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:45:15.109336 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:45:15.109345 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:45:15.109354 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:45:15.109361 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:45:15.109370 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:45:15.109380 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:45:15.109388 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:45:15.109395 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:45:15.109405 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:45:15.109412 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:45:15.109438 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:45:15.109448 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:45:15.109457 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:45:15.109465 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:45:15.109475 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:45:15.109483 31846 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:45:15.111130 31846 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0821 06:45:15.112793 31846 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:45:15.113042 31846 net.cpp:100] Creating Layer dataLayer\nI0821 06:45:15.113060 31846 net.cpp:408] dataLayer -> data_top\nI0821 06:45:15.113080 31846 net.cpp:408] dataLayer -> label\nI0821 06:45:15.113093 31846 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:45:15.180018 31853 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 06:45:15.180311 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:15.188204 31846 net.cpp:150] Setting up dataLayer\nI0821 06:45:15.188225 31846 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:45:15.188232 31846 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:15.188238 31846 net.cpp:165] Memory required for data: 1536500\nI0821 06:45:15.188244 31846 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:45:15.188254 31846 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:45:15.188261 31846 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:45:15.188289 31846 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:45:15.188305 31846 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:45:15.188395 31846 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:45:15.188408 31846 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:15.188417 31846 net.cpp:157] Top shape: 125 (125)\nI0821 06:45:15.188429 31846 net.cpp:165] Memory required for data: 1537500\nI0821 06:45:15.188436 31846 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:45:15.188455 31846 net.cpp:100] Creating Layer pre_conv\nI0821 06:45:15.188462 31846 net.cpp:434] pre_conv <- data_top\nI0821 06:45:15.188475 31846 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:45:15.188987 31846 net.cpp:150] Setting up pre_conv\nI0821 06:45:15.189013 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.189019 31846 net.cpp:165] Memory required for data: 9729500\nI0821 06:45:15.189034 31846 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:45:15.189044 31846 net.cpp:100] Creating Layer pre_bn\nI0821 06:45:15.189050 31846 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:45:15.189064 31846 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:45:15.189389 31846 net.cpp:150] Setting up pre_bn\nI0821 06:45:15.189404 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.189412 31846 net.cpp:165] Memory required for data: 17921500\nI0821 06:45:15.189435 31846 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:45:15.189448 31846 net.cpp:100] Creating Layer pre_scale\nI0821 06:45:15.189455 31846 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:45:15.189462 31846 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:45:15.189528 31846 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:45:15.189728 31846 net.cpp:150] Setting up pre_scale\nI0821 06:45:15.189745 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.189752 31846 net.cpp:165] Memory required for data: 26113500\nI0821 06:45:15.189761 31846 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:45:15.189774 31846 net.cpp:100] Creating Layer pre_relu\nI0821 06:45:15.189779 31846 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:45:15.189790 31846 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:45:15.189800 31846 net.cpp:150] Setting up pre_relu\nI0821 06:45:15.189807 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.189812 31846 net.cpp:165] Memory required for data: 34305500\nI0821 06:45:15.189816 31846 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:45:15.189826 31846 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:45:15.189832 31846 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:45:15.189839 31846 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:45:15.189848 31846 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:45:15.189903 31846 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:45:15.189915 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.189923 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.189926 31846 net.cpp:165] Memory required for data: 50689500\nI0821 06:45:15.189931 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:45:15.189949 31846 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:45:15.189955 31846 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:45:15.189965 31846 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:45:15.190362 31846 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:45:15.190378 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.190383 31846 net.cpp:165] Memory required for data: 58881500\nI0821 06:45:15.190395 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:45:15.190412 31846 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:45:15.190419 31846 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:45:15.190443 31846 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:45:15.190762 31846 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:45:15.190775 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.190780 31846 net.cpp:165] Memory required for data: 67073500\nI0821 06:45:15.190791 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:45:15.190804 31846 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:45:15.190810 31846 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:45:15.190819 31846 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.190886 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:45:15.191323 31846 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:45:15.191337 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.191344 31846 net.cpp:165] Memory required for data: 75265500\nI0821 06:45:15.191364 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:45:15.191371 31846 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:45:15.191380 31846 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:45:15.191391 31846 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.191401 31846 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:45:15.191408 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.191414 31846 net.cpp:165] Memory required for data: 83457500\nI0821 06:45:15.191419 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:45:15.191438 31846 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:45:15.191448 31846 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:45:15.191457 31846 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:45:15.191882 31846 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:45:15.191898 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.191905 31846 net.cpp:165] Memory required for data: 91649500\nI0821 06:45:15.191915 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:45:15.191931 31846 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:45:15.191936 31846 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:45:15.191944 31846 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:45:15.192261 31846 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:45:15.192278 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.192284 31846 net.cpp:165] Memory required for data: 99841500\nI0821 06:45:15.192302 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:45:15.192312 31846 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:45:15.192317 31846 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:45:15.192325 31846 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:45:15.192394 31846 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:45:15.192580 31846 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:45:15.192595 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.192601 31846 net.cpp:165] Memory required for data: 108033500\nI0821 06:45:15.192610 31846 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:45:15.192623 31846 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:45:15.192629 31846 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:45:15.192636 31846 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:45:15.192646 31846 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:45:15.192689 31846 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:45:15.192699 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.192704 31846 net.cpp:165] Memory required for data: 116225500\nI0821 06:45:15.192709 31846 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:45:15.192721 31846 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:45:15.192728 31846 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:45:15.192734 31846 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:45:15.192744 31846 net.cpp:150] Setting up L1_b1_relu\nI0821 06:45:15.192750 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.192755 31846 net.cpp:165] Memory required for data: 124417500\nI0821 06:45:15.192759 31846 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:15.192769 31846 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:15.192773 31846 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:45:15.192785 31846 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:45:15.192793 31846 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:45:15.192848 31846 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:45:15.192860 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.192876 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.192881 31846 net.cpp:165] Memory required for data: 140801500\nI0821 06:45:15.192888 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:45:15.192901 31846 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:45:15.192908 31846 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:45:15.192919 31846 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:45:15.193330 31846 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:45:15.193346 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.193351 31846 net.cpp:165] Memory required for data: 148993500\nI0821 06:45:15.193361 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:45:15.193373 31846 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:45:15.193379 31846 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:45:15.193388 31846 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:45:15.193712 31846 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:45:15.193728 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.193733 31846 net.cpp:165] Memory required for data: 157185500\nI0821 06:45:15.193744 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:45:15.193755 31846 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:45:15.193761 31846 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:45:15.193769 31846 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.193840 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:45:15.194038 31846 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:45:15.194053 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.194061 31846 net.cpp:165] Memory required for data: 165377500\nI0821 06:45:15.194070 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:45:15.194082 31846 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:45:15.194087 31846 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:45:15.194097 31846 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.194108 31846 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:45:15.194115 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.194119 31846 net.cpp:165] Memory required for data: 173569500\nI0821 06:45:15.194124 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:45:15.194141 31846 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:45:15.194147 31846 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:45:15.194160 31846 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:45:15.194767 31846 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:45:15.194782 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.194787 31846 net.cpp:165] Memory required for data: 181761500\nI0821 06:45:15.194797 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:45:15.194809 31846 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:45:15.194819 31846 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:45:15.194828 31846 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:45:15.195142 31846 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:45:15.195159 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.195166 31846 net.cpp:165] Memory required for data: 189953500\nI0821 06:45:15.195183 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:45:15.195192 31846 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:45:15.195199 31846 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:45:15.195209 31846 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:45:15.195277 31846 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:45:15.195462 31846 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:45:15.195482 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.195488 31846 net.cpp:165] Memory required for data: 198145500\nI0821 06:45:15.195505 31846 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:45:15.195518 31846 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:45:15.195524 31846 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:45:15.195531 31846 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:45:15.195539 31846 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:45:15.195581 31846 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:45:15.195591 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.195595 31846 net.cpp:165] Memory required for data: 206337500\nI0821 06:45:15.195600 31846 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:45:15.195611 31846 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:45:15.195617 31846 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:45:15.195624 31846 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:45:15.195634 31846 net.cpp:150] Setting up L1_b2_relu\nI0821 06:45:15.195643 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.195648 31846 net.cpp:165] Memory required for data: 214529500\nI0821 06:45:15.195653 31846 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:15.195667 31846 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:15.195672 31846 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:45:15.195683 31846 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:45:15.195693 31846 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:45:15.195744 31846 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:45:15.195757 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.195765 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.195770 31846 net.cpp:165] Memory required for data: 230913500\nI0821 06:45:15.195776 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:45:15.195788 31846 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:45:15.195794 31846 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:45:15.195803 31846 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:45:15.196213 31846 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:45:15.196230 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.196235 31846 net.cpp:165] Memory required for data: 239105500\nI0821 06:45:15.196244 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:45:15.196256 31846 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:45:15.196264 31846 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:45:15.196270 31846 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:45:15.196625 31846 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:45:15.196641 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.196646 31846 net.cpp:165] Memory required for data: 247297500\nI0821 06:45:15.196657 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:45:15.196666 31846 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:45:15.196672 31846 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:45:15.196691 31846 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.196763 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:45:15.196940 31846 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:45:15.196954 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.196959 31846 net.cpp:165] Memory required for data: 255489500\nI0821 06:45:15.196969 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:45:15.196980 31846 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:45:15.196986 31846 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:45:15.196993 31846 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.197013 31846 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:45:15.197022 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.197026 31846 net.cpp:165] Memory required for data: 263681500\nI0821 06:45:15.197031 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:45:15.197048 31846 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:45:15.197055 31846 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:45:15.197069 31846 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:45:15.197656 31846 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:45:15.197670 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.197676 31846 net.cpp:165] Memory required for data: 271873500\nI0821 06:45:15.197685 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:45:15.197706 31846 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:45:15.197715 31846 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:45:15.197722 31846 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:45:15.198047 31846 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:45:15.198062 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.198067 31846 net.cpp:165] Memory required for data: 280065500\nI0821 06:45:15.198077 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:45:15.198086 31846 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:45:15.198092 31846 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:45:15.198103 31846 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:45:15.198175 31846 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:45:15.198348 31846 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:45:15.198367 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.198372 31846 net.cpp:165] Memory required for data: 288257500\nI0821 06:45:15.198381 31846 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:45:15.198393 31846 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:45:15.198400 31846 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:45:15.198406 31846 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:45:15.198413 31846 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:45:15.198460 31846 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:45:15.198472 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.198477 31846 net.cpp:165] Memory required for data: 296449500\nI0821 06:45:15.198484 31846 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:45:15.198495 31846 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:45:15.198501 31846 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:45:15.198508 31846 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:45:15.198520 31846 net.cpp:150] Setting up L1_b3_relu\nI0821 06:45:15.198527 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.198532 31846 net.cpp:165] Memory required for data: 304641500\nI0821 06:45:15.198536 31846 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:15.198546 31846 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:15.198555 31846 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:45:15.198563 31846 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:45:15.198572 31846 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:45:15.198626 31846 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:45:15.198640 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.198647 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.198654 31846 net.cpp:165] Memory required for data: 321025500\nI0821 06:45:15.198660 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:45:15.198671 31846 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:45:15.198684 31846 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:45:15.198693 31846 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:45:15.199056 31846 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:45:15.199071 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.199076 31846 net.cpp:165] Memory required for data: 329217500\nI0821 06:45:15.199085 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:45:15.199100 31846 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:45:15.199105 31846 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:45:15.199116 31846 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:45:15.199385 31846 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:45:15.199398 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.199404 31846 net.cpp:165] Memory required for data: 337409500\nI0821 06:45:15.199414 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:45:15.199427 31846 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:45:15.199434 31846 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:45:15.199445 31846 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.199502 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:45:15.199659 31846 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:45:15.199677 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.199683 31846 net.cpp:165] Memory required for data: 345601500\nI0821 06:45:15.199692 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:45:15.199700 31846 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:45:15.199705 31846 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:45:15.199712 31846 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.199721 31846 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:45:15.199728 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.199733 31846 net.cpp:165] Memory required for data: 353793500\nI0821 06:45:15.199738 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:45:15.199751 31846 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:45:15.199757 31846 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:45:15.199769 31846 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:45:15.200115 31846 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:45:15.200129 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.200134 31846 net.cpp:165] Memory required for data: 361985500\nI0821 06:45:15.200143 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:45:15.200155 31846 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:45:15.200161 31846 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:45:15.200172 31846 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:45:15.200484 31846 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:45:15.200498 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.200503 31846 net.cpp:165] Memory required for data: 370177500\nI0821 06:45:15.200515 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:45:15.200523 31846 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:45:15.200529 31846 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:45:15.200539 31846 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:45:15.200598 31846 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:45:15.200765 31846 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:45:15.200783 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.200788 31846 net.cpp:165] Memory required for data: 378369500\nI0821 06:45:15.200798 31846 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:45:15.200806 31846 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:45:15.200812 31846 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:45:15.200819 31846 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:45:15.200834 31846 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:45:15.200872 31846 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:45:15.200882 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.200887 31846 net.cpp:165] Memory required for data: 386561500\nI0821 06:45:15.200892 31846 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:45:15.200902 31846 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:45:15.200908 31846 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:45:15.200915 31846 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:45:15.200924 31846 net.cpp:150] Setting up L1_b4_relu\nI0821 06:45:15.200930 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.200935 31846 net.cpp:165] Memory required for data: 394753500\nI0821 06:45:15.200940 31846 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:15.200949 31846 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:15.200955 31846 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:45:15.200963 31846 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:45:15.200973 31846 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:45:15.201016 31846 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:45:15.201031 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.201038 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.201043 31846 net.cpp:165] Memory required for data: 411137500\nI0821 06:45:15.201048 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:45:15.201058 31846 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:45:15.201064 31846 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:45:15.201073 31846 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:45:15.201434 31846 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:45:15.201449 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.201455 31846 net.cpp:165] Memory required for data: 419329500\nI0821 06:45:15.201478 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:45:15.201489 31846 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:45:15.201496 31846 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:45:15.201504 31846 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:45:15.201776 31846 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:45:15.201789 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.201794 31846 net.cpp:165] Memory required for data: 427521500\nI0821 06:45:15.201805 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:45:15.201813 31846 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:45:15.201819 31846 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:45:15.201829 31846 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.201886 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:45:15.202044 31846 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:45:15.202059 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.202064 31846 net.cpp:165] Memory required for data: 435713500\nI0821 06:45:15.202074 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:45:15.202081 31846 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:45:15.202087 31846 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:45:15.202095 31846 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.202103 31846 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:45:15.202111 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.202114 31846 net.cpp:165] Memory required for data: 443905500\nI0821 06:45:15.202119 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:45:15.202141 31846 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:45:15.202147 31846 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:45:15.202158 31846 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:45:15.202515 31846 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:45:15.202530 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.202535 31846 net.cpp:165] Memory required for data: 452097500\nI0821 06:45:15.202544 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:45:15.202570 31846 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:45:15.202579 31846 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:45:15.202590 31846 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:45:15.202867 31846 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:45:15.202880 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.202885 31846 net.cpp:165] Memory required for data: 460289500\nI0821 06:45:15.202895 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:45:15.202903 31846 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:45:15.202909 31846 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:45:15.202917 31846 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:45:15.202978 31846 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:45:15.203135 31846 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:45:15.203147 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.203152 31846 net.cpp:165] Memory required for data: 468481500\nI0821 06:45:15.203161 31846 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:45:15.203172 31846 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:45:15.203179 31846 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:45:15.203186 31846 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:45:15.203193 31846 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:45:15.203229 31846 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:45:15.203241 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.203246 31846 net.cpp:165] Memory required for data: 476673500\nI0821 06:45:15.203251 31846 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:45:15.203259 31846 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:45:15.203264 31846 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:45:15.203274 31846 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:45:15.203284 31846 net.cpp:150] Setting up L1_b5_relu\nI0821 06:45:15.203291 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.203295 31846 net.cpp:165] Memory required for data: 484865500\nI0821 06:45:15.203300 31846 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:15.203307 31846 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:15.203312 31846 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:45:15.203322 31846 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:45:15.203332 31846 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:45:15.203379 31846 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:45:15.203390 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.203397 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.203402 31846 net.cpp:165] Memory required for data: 501249500\nI0821 06:45:15.203407 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:45:15.203426 31846 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:45:15.203433 31846 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:45:15.203444 31846 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:45:15.203804 31846 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:45:15.203819 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.203830 31846 net.cpp:165] Memory required for data: 509441500\nI0821 06:45:15.203840 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:45:15.203851 31846 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:45:15.203857 31846 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:45:15.203867 31846 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:45:15.204138 31846 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:45:15.204152 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.204157 31846 net.cpp:165] Memory required for data: 517633500\nI0821 06:45:15.204166 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:45:15.204174 31846 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:45:15.204180 31846 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:45:15.204191 31846 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.204251 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:45:15.204435 31846 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:45:15.204453 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.204458 31846 net.cpp:165] Memory required for data: 525825500\nI0821 06:45:15.204468 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:45:15.204475 31846 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:45:15.204481 31846 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:45:15.204488 31846 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.204499 31846 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:45:15.204505 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.204509 31846 net.cpp:165] Memory required for data: 534017500\nI0821 06:45:15.204514 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:45:15.204527 31846 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:45:15.204533 31846 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:45:15.204545 31846 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:45:15.204900 31846 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:45:15.204913 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.204918 31846 net.cpp:165] Memory required for data: 542209500\nI0821 06:45:15.204927 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:45:15.204938 31846 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:45:15.204944 31846 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:45:15.204955 31846 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:45:15.205229 31846 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:45:15.205241 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.205246 31846 net.cpp:165] Memory required for data: 550401500\nI0821 06:45:15.205256 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:45:15.205265 31846 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:45:15.205271 31846 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:45:15.205278 31846 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:45:15.205338 31846 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:45:15.205500 31846 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:45:15.205514 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.205519 31846 net.cpp:165] Memory required for data: 558593500\nI0821 06:45:15.205528 31846 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:45:15.205545 31846 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:45:15.205551 31846 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:45:15.205559 31846 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:45:15.205569 31846 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:45:15.205605 31846 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:45:15.205618 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.205623 31846 net.cpp:165] Memory required for data: 566785500\nI0821 06:45:15.205636 31846 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:45:15.205643 31846 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:45:15.205649 31846 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:45:15.205657 31846 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:45:15.205665 31846 net.cpp:150] Setting up L1_b6_relu\nI0821 06:45:15.205672 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.205677 31846 net.cpp:165] Memory required for data: 574977500\nI0821 06:45:15.205682 31846 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:15.205691 31846 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:15.205698 31846 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:45:15.205704 31846 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:45:15.205713 31846 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:45:15.205763 31846 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:45:15.205775 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.205782 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.205787 31846 net.cpp:165] Memory required for data: 591361500\nI0821 06:45:15.205792 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:45:15.205802 31846 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:45:15.205808 31846 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:45:15.205821 31846 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:45:15.206181 31846 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:45:15.206194 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.206199 31846 net.cpp:165] Memory required for data: 599553500\nI0821 06:45:15.206207 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:45:15.206217 31846 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:45:15.206223 31846 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:45:15.206230 31846 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:45:15.206672 31846 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:45:15.206691 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.206696 31846 net.cpp:165] Memory required for data: 607745500\nI0821 06:45:15.206707 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:45:15.206718 31846 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:45:15.206725 31846 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:45:15.206732 31846 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.206795 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:45:15.206959 31846 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:45:15.206972 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.206977 31846 net.cpp:165] Memory required for data: 615937500\nI0821 06:45:15.206986 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:45:15.206995 31846 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:45:15.207000 31846 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:45:15.207010 31846 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.207020 31846 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:45:15.207027 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.207032 31846 net.cpp:165] Memory required for data: 624129500\nI0821 06:45:15.207037 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:45:15.207051 31846 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:45:15.207056 31846 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:45:15.207065 31846 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:45:15.207427 31846 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:45:15.207442 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.207454 31846 net.cpp:165] Memory required for data: 632321500\nI0821 06:45:15.207464 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:45:15.207476 31846 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:45:15.207482 31846 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:45:15.207491 31846 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:45:15.207767 31846 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:45:15.207782 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.207785 31846 net.cpp:165] Memory required for data: 640513500\nI0821 06:45:15.207797 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:45:15.207804 31846 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:45:15.207810 31846 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:45:15.207821 31846 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:45:15.207880 31846 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:45:15.208041 31846 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:45:15.208055 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.208060 31846 net.cpp:165] Memory required for data: 648705500\nI0821 06:45:15.208068 31846 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:45:15.208076 31846 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:45:15.208082 31846 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:45:15.208089 31846 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:45:15.208101 31846 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:45:15.208134 31846 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:45:15.208147 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.208151 31846 net.cpp:165] Memory required for data: 656897500\nI0821 06:45:15.208158 31846 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:45:15.208164 31846 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:45:15.208170 31846 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:45:15.208178 31846 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:45:15.208186 31846 net.cpp:150] Setting up L1_b7_relu\nI0821 06:45:15.208192 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.208197 31846 net.cpp:165] Memory required for data: 665089500\nI0821 06:45:15.208201 31846 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:15.208211 31846 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:15.208217 31846 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:45:15.208225 31846 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:45:15.208233 31846 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:45:15.208279 31846 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:45:15.208294 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.208302 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.208305 31846 net.cpp:165] Memory required for data: 681473500\nI0821 06:45:15.208310 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:45:15.208322 31846 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:45:15.208328 31846 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:45:15.208335 31846 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:45:15.208700 31846 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:45:15.208715 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.208720 31846 net.cpp:165] Memory required for data: 689665500\nI0821 06:45:15.208729 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:45:15.208740 31846 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:45:15.208747 31846 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:45:15.208761 31846 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:45:15.209041 31846 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:45:15.209054 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.209059 31846 net.cpp:165] Memory required for data: 697857500\nI0821 06:45:15.209069 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:45:15.209080 31846 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:45:15.209087 31846 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:45:15.209095 31846 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.209151 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:45:15.209314 31846 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:45:15.209327 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.209332 31846 net.cpp:165] Memory required for data: 706049500\nI0821 06:45:15.209341 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:45:15.209348 31846 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:45:15.209354 31846 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:45:15.209364 31846 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.209374 31846 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:45:15.209381 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.209386 31846 net.cpp:165] Memory required for data: 714241500\nI0821 06:45:15.209390 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:45:15.209403 31846 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:45:15.209409 31846 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:45:15.209419 31846 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:45:15.209784 31846 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:45:15.209800 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.209805 31846 net.cpp:165] Memory required for data: 722433500\nI0821 06:45:15.209813 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:45:15.209825 31846 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:45:15.209831 31846 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:45:15.209839 31846 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:45:15.210119 31846 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:45:15.210131 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.210137 31846 net.cpp:165] Memory required for data: 730625500\nI0821 06:45:15.210146 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:45:15.210155 31846 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:45:15.210160 31846 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:45:15.210171 31846 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:45:15.210229 31846 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:45:15.210389 31846 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:45:15.210405 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.210412 31846 net.cpp:165] Memory required for data: 738817500\nI0821 06:45:15.210419 31846 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:45:15.210435 31846 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:45:15.210441 31846 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:45:15.210448 31846 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:45:15.210456 31846 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:45:15.210494 31846 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:45:15.210506 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.210511 31846 net.cpp:165] Memory required for data: 747009500\nI0821 06:45:15.210516 31846 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:45:15.210526 31846 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:45:15.210532 31846 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:45:15.210539 31846 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:45:15.210556 31846 net.cpp:150] Setting up L1_b8_relu\nI0821 06:45:15.210563 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.210567 31846 net.cpp:165] Memory required for data: 755201500\nI0821 06:45:15.210572 31846 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:15.210582 31846 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:15.210587 31846 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:45:15.210595 31846 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:45:15.210604 31846 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:45:15.210651 31846 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:45:15.210665 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.210672 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.210677 31846 net.cpp:165] Memory required for data: 771585500\nI0821 06:45:15.210682 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:45:15.210692 31846 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:45:15.210698 31846 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:45:15.210707 31846 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:45:15.211076 31846 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:45:15.211091 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.211096 31846 net.cpp:165] Memory required for data: 779777500\nI0821 06:45:15.211104 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:45:15.211117 31846 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:45:15.211122 31846 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:45:15.211130 31846 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:45:15.211407 31846 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:45:15.211428 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.211434 31846 net.cpp:165] Memory required for data: 787969500\nI0821 06:45:15.211446 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:45:15.211454 31846 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:45:15.211460 31846 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:45:15.211468 31846 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.211527 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:45:15.211690 31846 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:45:15.211725 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.211731 31846 net.cpp:165] Memory required for data: 796161500\nI0821 06:45:15.211740 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:45:15.211752 31846 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:45:15.211758 31846 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:45:15.211766 31846 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.211776 31846 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:45:15.211787 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.211792 31846 net.cpp:165] Memory required for data: 804353500\nI0821 06:45:15.211796 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:45:15.211807 31846 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:45:15.211813 31846 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:45:15.211824 31846 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:45:15.212180 31846 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:45:15.212195 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.212200 31846 net.cpp:165] Memory required for data: 812545500\nI0821 06:45:15.212208 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:45:15.212218 31846 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:45:15.212224 31846 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:45:15.212244 31846 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:45:15.212534 31846 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:45:15.212548 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.212553 31846 net.cpp:165] Memory required for data: 820737500\nI0821 06:45:15.212584 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:45:15.212596 31846 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:45:15.212602 31846 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:45:15.212610 31846 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:45:15.212669 31846 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:45:15.212828 31846 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:45:15.212841 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.212846 31846 net.cpp:165] Memory required for data: 828929500\nI0821 06:45:15.212855 31846 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:45:15.212864 31846 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:45:15.212870 31846 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:45:15.212877 31846 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:45:15.212888 31846 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:45:15.212923 31846 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:45:15.212935 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.212940 31846 net.cpp:165] Memory required for data: 837121500\nI0821 06:45:15.212945 31846 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:45:15.212954 31846 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:45:15.212960 31846 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:45:15.212966 31846 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:45:15.212975 31846 net.cpp:150] Setting up L1_b9_relu\nI0821 06:45:15.212981 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.212986 31846 net.cpp:165] Memory required for data: 845313500\nI0821 06:45:15.212991 31846 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:15.213001 31846 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:15.213006 31846 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:45:15.213013 31846 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:45:15.213023 31846 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:45:15.213078 31846 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:45:15.213089 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.213095 31846 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:45:15.213100 31846 net.cpp:165] Memory required for data: 861697500\nI0821 06:45:15.213105 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:45:15.213115 31846 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:45:15.213121 31846 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:45:15.213134 31846 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:45:15.213501 31846 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:45:15.213515 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.213521 31846 net.cpp:165] Memory required for data: 863745500\nI0821 06:45:15.213529 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:45:15.213538 31846 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:45:15.213544 31846 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:45:15.213555 31846 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:45:15.213824 31846 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:45:15.213837 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.213841 31846 net.cpp:165] Memory required for data: 865793500\nI0821 06:45:15.213852 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:45:15.213870 31846 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:45:15.213876 31846 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:45:15.213884 31846 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.213945 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:45:15.214110 31846 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:45:15.214123 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.214128 31846 net.cpp:165] Memory required for data: 867841500\nI0821 06:45:15.214138 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:45:15.214148 31846 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:45:15.214154 31846 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:45:15.214161 31846 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.214170 31846 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:45:15.214177 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.214181 31846 net.cpp:165] Memory required for data: 869889500\nI0821 06:45:15.214186 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:45:15.214200 31846 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:45:15.214205 31846 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:45:15.214216 31846 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:45:15.214576 31846 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:45:15.214589 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.214594 31846 net.cpp:165] Memory required for data: 871937500\nI0821 06:45:15.214603 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:45:15.214612 31846 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:45:15.214619 31846 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:45:15.214627 31846 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:45:15.214895 31846 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:45:15.214907 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.214912 31846 net.cpp:165] Memory required for data: 873985500\nI0821 06:45:15.214922 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:45:15.214931 31846 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:45:15.214936 31846 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:45:15.214947 31846 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:45:15.215008 31846 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:45:15.215165 31846 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:45:15.215178 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.215183 31846 net.cpp:165] Memory required for data: 876033500\nI0821 06:45:15.215193 31846 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:45:15.215201 31846 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:45:15.215209 31846 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:45:15.215219 31846 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:45:15.215250 31846 net.cpp:150] Setting up L2_b1_pool\nI0821 06:45:15.215261 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.215266 31846 net.cpp:165] Memory required for data: 878081500\nI0821 06:45:15.215271 31846 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:45:15.215279 31846 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:45:15.215286 31846 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:45:15.215291 31846 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:45:15.215299 31846 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:45:15.215334 31846 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:45:15.215346 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.215350 31846 net.cpp:165] Memory required for data: 880129500\nI0821 06:45:15.215356 31846 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:45:15.215363 31846 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:45:15.215375 31846 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:45:15.215384 31846 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:45:15.215392 31846 net.cpp:150] Setting up L2_b1_relu\nI0821 06:45:15.215399 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.215404 31846 net.cpp:165] Memory required for data: 882177500\nI0821 06:45:15.215409 31846 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:45:15.215420 31846 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:45:15.215435 31846 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:45:15.217665 31846 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:45:15.217687 31846 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:45:15.217692 31846 net.cpp:165] Memory required for data: 884225500\nI0821 06:45:15.217699 31846 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:45:15.217708 31846 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:45:15.217715 31846 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:45:15.217722 31846 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:45:15.217730 31846 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:45:15.217777 31846 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:45:15.217788 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.217793 31846 net.cpp:165] Memory required for data: 888321500\nI0821 06:45:15.217798 31846 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:15.217808 31846 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:15.217814 31846 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:45:15.217821 31846 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:45:15.217831 31846 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:45:15.217885 31846 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:45:15.217895 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.217901 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.217906 31846 net.cpp:165] Memory required for data: 896513500\nI0821 06:45:15.217911 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:45:15.217926 31846 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:45:15.217931 31846 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:45:15.217941 31846 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:45:15.218456 31846 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:45:15.218472 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.218477 31846 net.cpp:165] Memory required for data: 900609500\nI0821 06:45:15.218485 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:45:15.218498 31846 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:45:15.218504 31846 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:45:15.218515 31846 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:45:15.218786 31846 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:45:15.218798 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.218803 31846 net.cpp:165] Memory required for data: 904705500\nI0821 06:45:15.218814 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:45:15.218822 31846 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:45:15.218828 31846 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:45:15.218837 31846 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.218897 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:45:15.219054 31846 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:45:15.219066 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.219071 31846 net.cpp:165] Memory required for data: 908801500\nI0821 06:45:15.219080 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:45:15.219090 31846 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:45:15.219105 31846 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:45:15.219113 31846 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.219123 31846 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:45:15.219130 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.219135 31846 net.cpp:165] Memory required for data: 912897500\nI0821 06:45:15.219141 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:45:15.219154 31846 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:45:15.219161 31846 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:45:15.219168 31846 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:45:15.219715 31846 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:45:15.219732 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.219738 31846 net.cpp:165] Memory required for data: 916993500\nI0821 06:45:15.219745 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:45:15.219760 31846 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:45:15.219768 31846 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:45:15.219775 31846 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:45:15.220041 31846 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:45:15.220057 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.220062 31846 net.cpp:165] Memory required for data: 921089500\nI0821 06:45:15.220072 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:45:15.220082 31846 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:45:15.220088 31846 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:45:15.220095 31846 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:45:15.220154 31846 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:45:15.220316 31846 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:45:15.220329 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.220335 31846 net.cpp:165] Memory required for data: 925185500\nI0821 06:45:15.220343 31846 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:45:15.220352 31846 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:45:15.220358 31846 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:45:15.220366 31846 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:45:15.220376 31846 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:45:15.220404 31846 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:45:15.220413 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.220418 31846 net.cpp:165] Memory required for data: 929281500\nI0821 06:45:15.220430 31846 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:45:15.220443 31846 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:45:15.220448 31846 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:45:15.220455 31846 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:45:15.220465 31846 net.cpp:150] Setting up L2_b2_relu\nI0821 06:45:15.220473 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.220476 31846 net.cpp:165] Memory required for data: 933377500\nI0821 06:45:15.220481 31846 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:15.220489 31846 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:15.220494 31846 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:45:15.220501 31846 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:45:15.220510 31846 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:45:15.220561 31846 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:45:15.220573 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.220580 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.220584 31846 net.cpp:165] Memory required for data: 941569500\nI0821 06:45:15.220597 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:45:15.220612 31846 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:45:15.220618 31846 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:45:15.220628 31846 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:45:15.221123 31846 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:45:15.221138 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.221143 31846 net.cpp:165] Memory required for data: 945665500\nI0821 06:45:15.221151 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:45:15.221163 31846 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:45:15.221170 31846 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:45:15.221181 31846 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:45:15.221462 31846 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:45:15.221477 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.221482 31846 net.cpp:165] Memory required for data: 949761500\nI0821 06:45:15.221493 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:45:15.221500 31846 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:45:15.221508 31846 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:45:15.221514 31846 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.221575 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:45:15.221732 31846 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:45:15.221745 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.221750 31846 net.cpp:165] Memory required for data: 953857500\nI0821 06:45:15.221758 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:45:15.221767 31846 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:45:15.221776 31846 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:45:15.221783 31846 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.221792 31846 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:45:15.221799 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.221804 31846 net.cpp:165] Memory required for data: 957953500\nI0821 06:45:15.221809 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:45:15.221822 31846 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:45:15.221828 31846 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:45:15.221837 31846 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:45:15.222327 31846 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:45:15.222342 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.222347 31846 net.cpp:165] Memory required for data: 962049500\nI0821 06:45:15.222355 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:45:15.222368 31846 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:45:15.222374 31846 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:45:15.222383 31846 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:45:15.222662 31846 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:45:15.222679 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.222684 31846 net.cpp:165] Memory required for data: 966145500\nI0821 06:45:15.222695 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:45:15.222703 31846 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:45:15.222709 31846 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:45:15.222718 31846 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:45:15.222776 31846 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:45:15.222937 31846 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:45:15.222950 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.222955 31846 net.cpp:165] Memory required for data: 970241500\nI0821 06:45:15.222965 31846 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:45:15.222972 31846 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:45:15.222985 31846 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:45:15.222993 31846 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:45:15.223006 31846 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:45:15.223036 31846 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:45:15.223044 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.223049 31846 net.cpp:165] Memory required for data: 974337500\nI0821 06:45:15.223054 31846 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:45:15.223078 31846 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:45:15.223084 31846 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:45:15.223091 31846 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:45:15.223100 31846 net.cpp:150] Setting up L2_b3_relu\nI0821 06:45:15.223107 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.223112 31846 net.cpp:165] Memory required for data: 978433500\nI0821 06:45:15.223117 31846 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:15.223124 31846 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:15.223130 31846 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:45:15.223137 31846 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:45:15.223146 31846 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:45:15.223199 31846 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:45:15.223211 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.223217 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.223222 31846 net.cpp:165] Memory required for data: 986625500\nI0821 06:45:15.223227 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:45:15.223238 31846 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:45:15.223244 31846 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:45:15.223256 31846 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:45:15.223755 31846 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:45:15.223769 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.223774 31846 net.cpp:165] Memory required for data: 990721500\nI0821 06:45:15.223783 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:45:15.223793 31846 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:45:15.223798 31846 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:45:15.223810 31846 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:45:15.224086 31846 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:45:15.224098 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.224103 31846 net.cpp:165] Memory required for data: 994817500\nI0821 06:45:15.224113 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:45:15.224125 31846 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:45:15.224131 31846 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:45:15.224139 31846 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.224197 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:45:15.224357 31846 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:45:15.224371 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.224376 31846 net.cpp:165] Memory required for data: 998913500\nI0821 06:45:15.224385 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:45:15.224396 31846 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:45:15.224402 31846 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:45:15.224409 31846 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.224418 31846 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:45:15.224431 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.224437 31846 net.cpp:165] Memory required for data: 1003009500\nI0821 06:45:15.224452 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:45:15.224467 31846 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:45:15.224472 31846 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:45:15.224483 31846 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:45:15.224978 31846 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:45:15.224993 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.224998 31846 net.cpp:165] Memory required for data: 1007105500\nI0821 06:45:15.225006 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:45:15.225015 31846 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:45:15.225023 31846 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:45:15.225033 31846 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:45:15.225306 31846 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:45:15.225318 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.225323 31846 net.cpp:165] Memory required for data: 1011201500\nI0821 06:45:15.225334 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:45:15.225345 31846 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:45:15.225352 31846 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:45:15.225359 31846 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:45:15.225416 31846 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:45:15.225586 31846 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:45:15.225600 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.225605 31846 net.cpp:165] Memory required for data: 1015297500\nI0821 06:45:15.225615 31846 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:45:15.225625 31846 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:45:15.225632 31846 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:45:15.225639 31846 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:45:15.225646 31846 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:45:15.225678 31846 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:45:15.225687 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.225692 31846 net.cpp:165] Memory required for data: 1019393500\nI0821 06:45:15.225697 31846 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:45:15.225705 31846 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:45:15.225711 31846 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:45:15.225718 31846 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:45:15.225726 31846 net.cpp:150] Setting up L2_b4_relu\nI0821 06:45:15.225733 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.225739 31846 net.cpp:165] Memory required for data: 1023489500\nI0821 06:45:15.225744 31846 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:15.225752 31846 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:15.225759 31846 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:45:15.225765 31846 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:45:15.225775 31846 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:45:15.225826 31846 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:45:15.225836 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.225843 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.225848 31846 net.cpp:165] Memory required for data: 1031681500\nI0821 06:45:15.225853 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:45:15.225863 31846 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:45:15.225870 31846 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:45:15.225881 31846 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:45:15.226382 31846 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:45:15.226397 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.226402 31846 net.cpp:165] Memory required for data: 1035777500\nI0821 06:45:15.226409 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:45:15.226418 31846 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:45:15.226431 31846 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:45:15.226444 31846 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:45:15.226727 31846 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:45:15.226739 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.226744 31846 net.cpp:165] Memory required for data: 1039873500\nI0821 06:45:15.226755 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:45:15.226769 31846 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:45:15.226776 31846 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:45:15.226783 31846 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.226842 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:45:15.227005 31846 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:45:15.227020 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.227023 31846 net.cpp:165] Memory required for data: 1043969500\nI0821 06:45:15.227032 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:45:15.227043 31846 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:45:15.227051 31846 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:45:15.227057 31846 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.227067 31846 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:45:15.227073 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.227078 31846 net.cpp:165] Memory required for data: 1048065500\nI0821 06:45:15.227083 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:45:15.227097 31846 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:45:15.227102 31846 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:45:15.227114 31846 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:45:15.227612 31846 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:45:15.227627 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.227632 31846 net.cpp:165] Memory required for data: 1052161500\nI0821 06:45:15.227641 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:45:15.227650 31846 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:45:15.227656 31846 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:45:15.227664 31846 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:45:15.227936 31846 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:45:15.227949 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.227954 31846 net.cpp:165] Memory required for data: 1056257500\nI0821 06:45:15.227964 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:45:15.227973 31846 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:45:15.227979 31846 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:45:15.227989 31846 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:45:15.228047 31846 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:45:15.228204 31846 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:45:15.228217 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.228222 31846 net.cpp:165] Memory required for data: 1060353500\nI0821 06:45:15.228231 31846 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:45:15.228240 31846 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:45:15.228246 31846 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:45:15.228253 31846 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:45:15.228263 31846 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:45:15.228292 31846 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:45:15.228312 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.228317 31846 net.cpp:165] Memory required for data: 1064449500\nI0821 06:45:15.228322 31846 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:45:15.228330 31846 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:45:15.228335 31846 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:45:15.228343 31846 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:45:15.228351 31846 net.cpp:150] Setting up L2_b5_relu\nI0821 06:45:15.228358 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.228363 31846 net.cpp:165] Memory required for data: 1068545500\nI0821 06:45:15.228368 31846 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:15.228377 31846 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:15.228384 31846 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:45:15.228390 31846 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:45:15.228400 31846 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:45:15.228461 31846 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:45:15.228473 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.228479 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.228484 31846 net.cpp:165] Memory required for data: 1076737500\nI0821 06:45:15.228490 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:45:15.228502 31846 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:45:15.228508 31846 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:45:15.228519 31846 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:45:15.229019 31846 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:45:15.229033 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.229038 31846 net.cpp:165] Memory required for data: 1080833500\nI0821 06:45:15.229048 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:45:15.229056 31846 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:45:15.229063 31846 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:45:15.229075 31846 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:45:15.229342 31846 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:45:15.229357 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.229362 31846 net.cpp:165] Memory required for data: 1084929500\nI0821 06:45:15.229372 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:45:15.229383 31846 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:45:15.229389 31846 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:45:15.229398 31846 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.229461 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:45:15.229631 31846 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:45:15.229645 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.229650 31846 net.cpp:165] Memory required for data: 1089025500\nI0821 06:45:15.229660 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:45:15.229667 31846 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:45:15.229673 31846 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:45:15.229683 31846 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.229693 31846 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:45:15.229701 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.229706 31846 net.cpp:165] Memory required for data: 1093121500\nI0821 06:45:15.229709 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:45:15.229723 31846 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:45:15.229730 31846 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:45:15.229742 31846 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:45:15.230238 31846 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:45:15.230253 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.230258 31846 net.cpp:165] Memory required for data: 1097217500\nI0821 06:45:15.230267 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:45:15.230275 31846 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:45:15.230281 31846 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:45:15.230289 31846 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:45:15.230571 31846 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:45:15.230585 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.230590 31846 net.cpp:165] Memory required for data: 1101313500\nI0821 06:45:15.230600 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:45:15.230609 31846 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:45:15.230615 31846 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:45:15.230626 31846 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:45:15.230684 31846 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:45:15.230842 31846 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:45:15.230855 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.230860 31846 net.cpp:165] Memory required for data: 1105409500\nI0821 06:45:15.230870 31846 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:45:15.230877 31846 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:45:15.230885 31846 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:45:15.230890 31846 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:45:15.230903 31846 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:45:15.230932 31846 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:45:15.230942 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.230947 31846 net.cpp:165] Memory required for data: 1109505500\nI0821 06:45:15.230952 31846 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:45:15.230962 31846 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:45:15.230968 31846 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:45:15.230975 31846 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:45:15.230984 31846 net.cpp:150] Setting up L2_b6_relu\nI0821 06:45:15.230991 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.230995 31846 net.cpp:165] Memory required for data: 1113601500\nI0821 06:45:15.231000 31846 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:15.231010 31846 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:15.231016 31846 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:45:15.231024 31846 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:45:15.231034 31846 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:45:15.231081 31846 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:45:15.231096 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.231103 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.231107 31846 net.cpp:165] Memory required for data: 1121793500\nI0821 06:45:15.231112 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:45:15.231123 31846 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:45:15.231129 31846 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:45:15.231138 31846 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:45:15.232830 31846 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:45:15.232847 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.232853 31846 net.cpp:165] Memory required for data: 1125889500\nI0821 06:45:15.232862 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:45:15.232883 31846 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:45:15.232890 31846 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:45:15.232903 31846 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:45:15.233175 31846 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:45:15.233189 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.233194 31846 net.cpp:165] Memory required for data: 1129985500\nI0821 06:45:15.233204 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:45:15.233213 31846 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:45:15.233219 31846 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:45:15.233227 31846 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.233289 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:45:15.233453 31846 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:45:15.233467 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.233472 31846 net.cpp:165] Memory required for data: 1134081500\nI0821 06:45:15.233481 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:45:15.233492 31846 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:45:15.233500 31846 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:45:15.233506 31846 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.233516 31846 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:45:15.233523 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.233528 31846 net.cpp:165] Memory required for data: 1138177500\nI0821 06:45:15.233533 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:45:15.233546 31846 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:45:15.233552 31846 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:45:15.233564 31846 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:45:15.234050 31846 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:45:15.234063 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.234068 31846 net.cpp:165] Memory required for data: 1142273500\nI0821 06:45:15.234076 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:45:15.234088 31846 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:45:15.234096 31846 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:45:15.234107 31846 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:45:15.234377 31846 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:45:15.234393 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.234398 31846 net.cpp:165] Memory required for data: 1146369500\nI0821 06:45:15.234410 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:45:15.234417 31846 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:45:15.234429 31846 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:45:15.234438 31846 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:45:15.234498 31846 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:45:15.234669 31846 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:45:15.234681 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.234688 31846 net.cpp:165] Memory required for data: 1150465500\nI0821 06:45:15.234696 31846 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:45:15.234705 31846 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:45:15.234711 31846 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:45:15.234721 31846 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:45:15.234730 31846 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:45:15.234757 31846 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:45:15.234766 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.234771 31846 net.cpp:165] Memory required for data: 1154561500\nI0821 06:45:15.234776 31846 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:45:15.234787 31846 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:45:15.234800 31846 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:45:15.234808 31846 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:45:15.234817 31846 net.cpp:150] Setting up L2_b7_relu\nI0821 06:45:15.234824 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.234829 31846 net.cpp:165] Memory required for data: 1158657500\nI0821 06:45:15.234833 31846 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:15.234840 31846 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:15.234845 31846 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:45:15.234853 31846 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:45:15.234863 31846 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:45:15.234915 31846 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:45:15.234925 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.234932 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.234937 31846 net.cpp:165] Memory required for data: 1166849500\nI0821 06:45:15.234942 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:45:15.234957 31846 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:45:15.234963 31846 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:45:15.234973 31846 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:45:15.235472 31846 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:45:15.235486 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.235492 31846 net.cpp:165] Memory required for data: 1170945500\nI0821 06:45:15.235501 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:45:15.235512 31846 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:45:15.235519 31846 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:45:15.235530 31846 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:45:15.235801 31846 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:45:15.235815 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.235819 31846 net.cpp:165] Memory required for data: 1175041500\nI0821 06:45:15.235829 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:45:15.235838 31846 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:45:15.235844 31846 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:45:15.235852 31846 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.235913 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:45:15.236068 31846 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:45:15.236080 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.236085 31846 net.cpp:165] Memory required for data: 1179137500\nI0821 06:45:15.236094 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:45:15.236105 31846 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:45:15.236111 31846 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:45:15.236119 31846 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.236129 31846 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:45:15.236135 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.236140 31846 net.cpp:165] Memory required for data: 1183233500\nI0821 06:45:15.236145 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:45:15.236157 31846 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:45:15.236163 31846 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:45:15.236172 31846 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:45:15.236680 31846 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:45:15.236696 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.236701 31846 net.cpp:165] Memory required for data: 1187329500\nI0821 06:45:15.236708 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:45:15.236727 31846 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:45:15.236734 31846 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:45:15.236742 31846 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:45:15.237021 31846 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:45:15.237037 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.237042 31846 net.cpp:165] Memory required for data: 1191425500\nI0821 06:45:15.237052 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:45:15.237061 31846 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:45:15.237067 31846 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:45:15.237076 31846 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:45:15.237133 31846 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:45:15.237295 31846 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:45:15.237308 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.237313 31846 net.cpp:165] Memory required for data: 1195521500\nI0821 06:45:15.237321 31846 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:45:15.237330 31846 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:45:15.237337 31846 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:45:15.237344 31846 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:45:15.237355 31846 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:45:15.237385 31846 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:45:15.237393 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.237398 31846 net.cpp:165] Memory required for data: 1199617500\nI0821 06:45:15.237403 31846 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:45:15.237414 31846 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:45:15.237421 31846 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:45:15.237434 31846 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:45:15.237444 31846 net.cpp:150] Setting up L2_b8_relu\nI0821 06:45:15.237452 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.237457 31846 net.cpp:165] Memory required for data: 1203713500\nI0821 06:45:15.237462 31846 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:15.237468 31846 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:15.237473 31846 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:45:15.237480 31846 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:45:15.237504 31846 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:45:15.237558 31846 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:45:15.237571 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.237578 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.237582 31846 net.cpp:165] Memory required for data: 1211905500\nI0821 06:45:15.237587 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:45:15.237607 31846 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:45:15.237612 31846 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:45:15.237624 31846 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:45:15.238118 31846 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:45:15.238132 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.238137 31846 net.cpp:165] Memory required for data: 1216001500\nI0821 06:45:15.238145 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:45:15.238157 31846 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:45:15.238164 31846 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:45:15.238173 31846 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:45:15.238471 31846 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:45:15.238493 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.238499 31846 net.cpp:165] Memory required for data: 1220097500\nI0821 06:45:15.238509 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:45:15.238518 31846 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:45:15.238524 31846 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:45:15.238533 31846 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.238593 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:45:15.238759 31846 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:45:15.238771 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.238776 31846 net.cpp:165] Memory required for data: 1224193500\nI0821 06:45:15.238785 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:45:15.238793 31846 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:45:15.238800 31846 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:45:15.238809 31846 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.238819 31846 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:45:15.238826 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.238831 31846 net.cpp:165] Memory required for data: 1228289500\nI0821 06:45:15.238836 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:45:15.238847 31846 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:45:15.238852 31846 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:45:15.238863 31846 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:45:15.240348 31846 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:45:15.240366 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.240372 31846 net.cpp:165] Memory required for data: 1232385500\nI0821 06:45:15.240381 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:45:15.240393 31846 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:45:15.240401 31846 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:45:15.240411 31846 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:45:15.240691 31846 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:45:15.240705 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.240710 31846 net.cpp:165] Memory required for data: 1236481500\nI0821 06:45:15.240757 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:45:15.240772 31846 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:45:15.240779 31846 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:45:15.240787 31846 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:45:15.240849 31846 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:45:15.241003 31846 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:45:15.241015 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.241020 31846 net.cpp:165] Memory required for data: 1240577500\nI0821 06:45:15.241029 31846 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:45:15.241041 31846 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:45:15.241048 31846 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:45:15.241055 31846 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:45:15.241063 31846 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:45:15.241093 31846 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:45:15.241103 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.241107 31846 net.cpp:165] Memory required for data: 1244673500\nI0821 06:45:15.241112 31846 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:45:15.241120 31846 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:45:15.241127 31846 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:45:15.241137 31846 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:45:15.241145 31846 net.cpp:150] Setting up L2_b9_relu\nI0821 06:45:15.241153 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.241165 31846 net.cpp:165] Memory required for data: 1248769500\nI0821 06:45:15.241170 31846 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:15.241178 31846 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:15.241184 31846 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:45:15.241194 31846 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:45:15.241204 31846 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:45:15.241255 31846 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:45:15.241266 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.241272 31846 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:45:15.241277 31846 net.cpp:165] Memory required for data: 1256961500\nI0821 06:45:15.241282 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:45:15.241294 31846 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:45:15.241300 31846 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:45:15.241312 31846 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:45:15.241817 31846 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:45:15.241833 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.241838 31846 net.cpp:165] Memory required for data: 1257985500\nI0821 06:45:15.241847 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:45:15.241856 31846 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:45:15.241863 31846 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:45:15.241873 31846 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:45:15.242147 31846 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:45:15.242167 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.242172 31846 net.cpp:165] Memory required for data: 1259009500\nI0821 06:45:15.242183 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:45:15.242192 31846 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:45:15.242198 31846 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:45:15.242205 31846 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.242262 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:45:15.242434 31846 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:45:15.242449 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.242453 31846 net.cpp:165] Memory required for data: 1260033500\nI0821 06:45:15.242463 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:45:15.242475 31846 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:45:15.242480 31846 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:45:15.242487 31846 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:45:15.242497 31846 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:45:15.242504 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.242508 31846 net.cpp:165] Memory required for data: 1261057500\nI0821 06:45:15.242513 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:45:15.242527 31846 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:45:15.242533 31846 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:45:15.242542 31846 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:45:15.243031 31846 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:45:15.243046 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.243050 31846 net.cpp:165] Memory required for data: 1262081500\nI0821 06:45:15.243059 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:45:15.243072 31846 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:45:15.243077 31846 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:45:15.243088 31846 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:45:15.243362 31846 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:45:15.243382 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.243387 31846 net.cpp:165] Memory required for data: 1263105500\nI0821 06:45:15.243398 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:45:15.243407 31846 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:45:15.243413 31846 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:45:15.243429 31846 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:45:15.243489 31846 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:45:15.243662 31846 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:45:15.243675 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.243680 31846 net.cpp:165] Memory required for data: 1264129500\nI0821 06:45:15.243690 31846 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:45:15.243700 31846 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:45:15.243705 31846 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:45:15.243716 31846 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:45:15.243755 31846 net.cpp:150] Setting up L3_b1_pool\nI0821 06:45:15.243764 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.243769 31846 net.cpp:165] Memory required for data: 1265153500\nI0821 06:45:15.243775 31846 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:45:15.243783 31846 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:45:15.243789 31846 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:45:15.243796 31846 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:45:15.243808 31846 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:45:15.243842 31846 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:45:15.243851 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.243855 31846 net.cpp:165] Memory required for data: 1266177500\nI0821 06:45:15.243861 31846 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:45:15.243868 31846 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:45:15.243875 31846 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:45:15.243880 31846 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:45:15.243890 31846 net.cpp:150] Setting up L3_b1_relu\nI0821 06:45:15.243896 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.243901 31846 net.cpp:165] Memory required for data: 1267201500\nI0821 06:45:15.243906 31846 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:45:15.243918 31846 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:45:15.243927 31846 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:45:15.245167 31846 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:45:15.245185 31846 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:45:15.245192 31846 net.cpp:165] Memory required for data: 1268225500\nI0821 06:45:15.245196 31846 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:45:15.245206 31846 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:45:15.245213 31846 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:45:15.245219 31846 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:45:15.245230 31846 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:45:15.245273 31846 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:45:15.245288 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.245293 31846 net.cpp:165] Memory required for data: 1270273500\nI0821 06:45:15.245298 31846 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:15.245306 31846 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:15.245312 31846 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:45:15.245319 31846 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:45:15.245332 31846 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:45:15.245385 31846 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:45:15.245396 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.245411 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.245416 31846 net.cpp:165] Memory required for data: 1274369500\nI0821 06:45:15.245426 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:45:15.245442 31846 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:45:15.245450 31846 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:45:15.245460 31846 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:45:15.246511 31846 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:45:15.246526 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.246531 31846 net.cpp:165] Memory required for data: 1276417500\nI0821 06:45:15.246541 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:45:15.246553 31846 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:45:15.246559 31846 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:45:15.246567 31846 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:45:15.246842 31846 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:45:15.246855 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.246860 31846 net.cpp:165] Memory required for data: 1278465500\nI0821 06:45:15.246871 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:45:15.246883 31846 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:45:15.246891 31846 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:45:15.246897 31846 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.246959 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:45:15.247125 31846 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:45:15.247139 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.247144 31846 net.cpp:165] Memory required for data: 1280513500\nI0821 06:45:15.247153 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:45:15.247164 31846 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:45:15.247170 31846 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:45:15.247177 31846 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:45:15.247187 31846 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:45:15.247195 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.247198 31846 net.cpp:165] Memory required for data: 1282561500\nI0821 06:45:15.247203 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:45:15.247217 31846 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:45:15.247223 31846 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:45:15.247234 31846 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:45:15.248273 31846 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:45:15.248288 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.248293 31846 net.cpp:165] Memory required for data: 1284609500\nI0821 06:45:15.248302 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:45:15.248312 31846 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:45:15.248318 31846 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:45:15.248329 31846 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:45:15.248611 31846 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:45:15.248627 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.248632 31846 net.cpp:165] Memory required for data: 1286657500\nI0821 06:45:15.248643 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:45:15.248652 31846 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:45:15.248658 31846 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:45:15.248667 31846 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:45:15.248726 31846 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:45:15.248886 31846 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:45:15.248899 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.248904 31846 net.cpp:165] Memory required for data: 1288705500\nI0821 06:45:15.248920 31846 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:45:15.248934 31846 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:45:15.248940 31846 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:45:15.248947 31846 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:45:15.248955 31846 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:45:15.248991 31846 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:45:15.249002 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.249007 31846 net.cpp:165] Memory required for data: 1290753500\nI0821 06:45:15.249012 31846 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:45:15.249020 31846 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:45:15.249027 31846 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:45:15.249033 31846 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:45:15.249042 31846 net.cpp:150] Setting up L3_b2_relu\nI0821 06:45:15.249049 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.249053 31846 net.cpp:165] Memory required for data: 1292801500\nI0821 06:45:15.249058 31846 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:15.249065 31846 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:15.249070 31846 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:45:15.249081 31846 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:45:15.249091 31846 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:45:15.249137 31846 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:45:15.249150 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.249155 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.249161 31846 net.cpp:165] Memory required for data: 1296897500\nI0821 06:45:15.249166 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:45:15.249179 31846 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:45:15.249186 31846 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:45:15.249195 31846 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:45:15.250243 31846 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:45:15.250258 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.250263 31846 net.cpp:165] Memory required for data: 1298945500\nI0821 06:45:15.250272 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:45:15.250283 31846 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:45:15.250290 31846 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:45:15.250298 31846 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:45:15.250581 31846 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:45:15.250596 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.250600 31846 net.cpp:165] Memory required for data: 1300993500\nI0821 06:45:15.250610 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:45:15.250622 31846 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:45:15.250629 31846 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:45:15.250636 31846 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.250700 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:45:15.250862 31846 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:45:15.250875 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.250880 31846 net.cpp:165] Memory required for data: 1303041500\nI0821 06:45:15.250890 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:45:15.250901 31846 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:45:15.250908 31846 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:45:15.250916 31846 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:45:15.250924 31846 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:45:15.250941 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.250946 31846 net.cpp:165] Memory required for data: 1305089500\nI0821 06:45:15.250952 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:45:15.250962 31846 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:45:15.250968 31846 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:45:15.250980 31846 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:45:15.252020 31846 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:45:15.252035 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.252040 31846 net.cpp:165] Memory required for data: 1307137500\nI0821 06:45:15.252049 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:45:15.252058 31846 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:45:15.252064 31846 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:45:15.252076 31846 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:45:15.252355 31846 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:45:15.252368 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.252373 31846 net.cpp:165] Memory required for data: 1309185500\nI0821 06:45:15.252384 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:45:15.252393 31846 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:45:15.252398 31846 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:45:15.252406 31846 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:45:15.252475 31846 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:45:15.252634 31846 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:45:15.252650 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.252655 31846 net.cpp:165] Memory required for data: 1311233500\nI0821 06:45:15.252663 31846 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:45:15.252673 31846 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:45:15.252679 31846 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:45:15.252686 31846 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:45:15.252694 31846 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:45:15.252730 31846 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:45:15.252743 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.252748 31846 net.cpp:165] Memory required for data: 1313281500\nI0821 06:45:15.252753 31846 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:45:15.252760 31846 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:45:15.252766 31846 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:45:15.252774 31846 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:45:15.252782 31846 net.cpp:150] Setting up L3_b3_relu\nI0821 06:45:15.252789 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.252794 31846 net.cpp:165] Memory required for data: 1315329500\nI0821 06:45:15.252797 31846 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:15.252804 31846 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:15.252810 31846 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:45:15.252820 31846 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:45:15.252830 31846 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:45:15.252876 31846 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:45:15.252890 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.252897 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.252902 31846 net.cpp:165] Memory required for data: 1319425500\nI0821 06:45:15.252907 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:45:15.252918 31846 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:45:15.252925 31846 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:45:15.252941 31846 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:45:15.253989 31846 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:45:15.254005 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.254010 31846 net.cpp:165] Memory required for data: 1321473500\nI0821 06:45:15.254019 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:45:15.254031 31846 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:45:15.254039 31846 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:45:15.254046 31846 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:45:15.254319 31846 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:45:15.254333 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.254338 31846 net.cpp:165] Memory required for data: 1323521500\nI0821 06:45:15.254348 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:45:15.254359 31846 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:45:15.254365 31846 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:45:15.254374 31846 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.254441 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:45:15.254607 31846 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:45:15.254621 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.254626 31846 net.cpp:165] Memory required for data: 1325569500\nI0821 06:45:15.254634 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:45:15.254647 31846 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:45:15.254652 31846 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:45:15.254662 31846 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:45:15.254673 31846 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:45:15.254678 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.254683 31846 net.cpp:165] Memory required for data: 1327617500\nI0821 06:45:15.254688 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:45:15.254699 31846 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:45:15.254705 31846 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:45:15.254716 31846 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:45:15.256736 31846 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:45:15.256754 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.256759 31846 net.cpp:165] Memory required for data: 1329665500\nI0821 06:45:15.256768 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:45:15.256781 31846 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:45:15.256788 31846 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:45:15.256798 31846 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:45:15.257071 31846 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:45:15.257084 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.257089 31846 net.cpp:165] Memory required for data: 1331713500\nI0821 06:45:15.257099 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:45:15.257112 31846 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:45:15.257118 31846 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:45:15.257125 31846 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:45:15.257187 31846 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:45:15.257354 31846 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:45:15.257367 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.257374 31846 net.cpp:165] Memory required for data: 1333761500\nI0821 06:45:15.257381 31846 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:45:15.257395 31846 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:45:15.257400 31846 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:45:15.257408 31846 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:45:15.257418 31846 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:45:15.257468 31846 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:45:15.257478 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.257483 31846 net.cpp:165] Memory required for data: 1335809500\nI0821 06:45:15.257488 31846 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:45:15.257500 31846 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:45:15.257506 31846 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:45:15.257513 31846 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:45:15.257524 31846 net.cpp:150] Setting up L3_b4_relu\nI0821 06:45:15.257530 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.257534 31846 net.cpp:165] Memory required for data: 1337857500\nI0821 06:45:15.257539 31846 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:15.257546 31846 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:15.257552 31846 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:45:15.257560 31846 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:45:15.257568 31846 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:45:15.257621 31846 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:45:15.257632 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.257639 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.257643 31846 net.cpp:165] Memory required for data: 1341953500\nI0821 06:45:15.257648 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:45:15.257663 31846 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:45:15.257669 31846 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:45:15.257678 31846 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:45:15.258707 31846 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:45:15.258721 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.258726 31846 net.cpp:165] Memory required for data: 1344001500\nI0821 06:45:15.258734 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:45:15.258747 31846 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:45:15.258754 31846 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:45:15.258766 31846 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:45:15.259037 31846 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:45:15.259049 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.259055 31846 net.cpp:165] Memory required for data: 1346049500\nI0821 06:45:15.259065 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:45:15.259074 31846 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:45:15.259080 31846 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:45:15.259088 31846 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.259148 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:45:15.259307 31846 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:45:15.259320 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.259325 31846 net.cpp:165] Memory required for data: 1348097500\nI0821 06:45:15.259335 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:45:15.259342 31846 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:45:15.259348 31846 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:45:15.259359 31846 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:45:15.259371 31846 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:45:15.259377 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.259382 31846 net.cpp:165] Memory required for data: 1350145500\nI0821 06:45:15.259387 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:45:15.259400 31846 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:45:15.259407 31846 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:45:15.259428 31846 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:45:15.260463 31846 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:45:15.260479 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.260484 31846 net.cpp:165] Memory required for data: 1352193500\nI0821 06:45:15.260493 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:45:15.260505 31846 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:45:15.260511 31846 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:45:15.260520 31846 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:45:15.260788 31846 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:45:15.260802 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.260807 31846 net.cpp:165] Memory required for data: 1354241500\nI0821 06:45:15.260818 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:45:15.260830 31846 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:45:15.260838 31846 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:45:15.260844 31846 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:45:15.260905 31846 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:45:15.261063 31846 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:45:15.261076 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.261081 31846 net.cpp:165] Memory required for data: 1356289500\nI0821 06:45:15.261090 31846 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:45:15.261102 31846 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:45:15.261109 31846 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:45:15.261116 31846 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:45:15.261126 31846 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:45:15.261159 31846 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:45:15.261168 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.261173 31846 net.cpp:165] Memory required for data: 1358337500\nI0821 06:45:15.261178 31846 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:45:15.261189 31846 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:45:15.261195 31846 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:45:15.261203 31846 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:45:15.261211 31846 net.cpp:150] Setting up L3_b5_relu\nI0821 06:45:15.261219 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.261222 31846 net.cpp:165] Memory required for data: 1360385500\nI0821 06:45:15.261227 31846 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:15.261234 31846 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:15.261240 31846 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:45:15.261246 31846 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:45:15.261256 31846 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:45:15.261304 31846 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:45:15.261315 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.261322 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.261327 31846 net.cpp:165] Memory required for data: 1364481500\nI0821 06:45:15.261332 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:45:15.261345 31846 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:45:15.261353 31846 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:45:15.261361 31846 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:45:15.262415 31846 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:45:15.262435 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.262441 31846 net.cpp:165] Memory required for data: 1366529500\nI0821 06:45:15.262450 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:45:15.262470 31846 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:45:15.262476 31846 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:45:15.262487 31846 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:45:15.262766 31846 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:45:15.262779 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.262784 31846 net.cpp:165] Memory required for data: 1368577500\nI0821 06:45:15.262794 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:45:15.262804 31846 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:45:15.262809 31846 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:45:15.262820 31846 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.262881 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:45:15.263046 31846 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:45:15.263058 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.263063 31846 net.cpp:165] Memory required for data: 1370625500\nI0821 06:45:15.263072 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:45:15.263080 31846 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:45:15.263087 31846 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:45:15.263098 31846 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:45:15.263108 31846 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:45:15.263114 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.263118 31846 net.cpp:165] Memory required for data: 1372673500\nI0821 06:45:15.263123 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:45:15.263137 31846 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:45:15.263144 31846 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:45:15.263152 31846 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:45:15.264183 31846 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:45:15.264197 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.264202 31846 net.cpp:165] Memory required for data: 1374721500\nI0821 06:45:15.264211 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:45:15.264223 31846 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:45:15.264230 31846 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:45:15.264238 31846 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:45:15.264521 31846 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:45:15.264535 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.264540 31846 net.cpp:165] Memory required for data: 1376769500\nI0821 06:45:15.264550 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:45:15.264562 31846 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:45:15.264569 31846 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:45:15.264580 31846 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:45:15.264639 31846 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:45:15.264803 31846 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:45:15.264817 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.264822 31846 net.cpp:165] Memory required for data: 1378817500\nI0821 06:45:15.264830 31846 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:45:15.264839 31846 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:45:15.264847 31846 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:45:15.264853 31846 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:45:15.264863 31846 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:45:15.264899 31846 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:45:15.264907 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.264912 31846 net.cpp:165] Memory required for data: 1380865500\nI0821 06:45:15.264917 31846 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:45:15.264928 31846 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:45:15.264941 31846 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:45:15.264948 31846 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:45:15.264958 31846 net.cpp:150] Setting up L3_b6_relu\nI0821 06:45:15.264966 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.264969 31846 net.cpp:165] Memory required for data: 1382913500\nI0821 06:45:15.264974 31846 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:15.264981 31846 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:15.264987 31846 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:45:15.264994 31846 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:45:15.265003 31846 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:45:15.265056 31846 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:45:15.265069 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.265075 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.265079 31846 net.cpp:165] Memory required for data: 1387009500\nI0821 06:45:15.265084 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:45:15.265099 31846 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:45:15.265105 31846 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:45:15.265115 31846 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:45:15.266142 31846 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:45:15.266158 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.266163 31846 net.cpp:165] Memory required for data: 1389057500\nI0821 06:45:15.266172 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:45:15.266186 31846 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:45:15.266193 31846 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:45:15.266204 31846 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:45:15.266484 31846 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:45:15.266499 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.266504 31846 net.cpp:165] Memory required for data: 1391105500\nI0821 06:45:15.266513 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:45:15.266522 31846 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:45:15.266528 31846 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:45:15.266540 31846 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.266600 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:45:15.266762 31846 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:45:15.266774 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.266779 31846 net.cpp:165] Memory required for data: 1393153500\nI0821 06:45:15.266788 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:45:15.266824 31846 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:45:15.266832 31846 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:45:15.266840 31846 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:45:15.266850 31846 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:45:15.266857 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.266861 31846 net.cpp:165] Memory required for data: 1395201500\nI0821 06:45:15.266867 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:45:15.266881 31846 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:45:15.266887 31846 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:45:15.266896 31846 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:45:15.267963 31846 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:45:15.267979 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.267984 31846 net.cpp:165] Memory required for data: 1397249500\nI0821 06:45:15.267993 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:45:15.268013 31846 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:45:15.268021 31846 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:45:15.268033 31846 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:45:15.268304 31846 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:45:15.268317 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.268323 31846 net.cpp:165] Memory required for data: 1399297500\nI0821 06:45:15.268333 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:45:15.268342 31846 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:45:15.268348 31846 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:45:15.268358 31846 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:45:15.268419 31846 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:45:15.268595 31846 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:45:15.268610 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.268615 31846 net.cpp:165] Memory required for data: 1401345500\nI0821 06:45:15.268623 31846 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:45:15.268632 31846 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:45:15.268640 31846 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:45:15.268646 31846 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:45:15.268656 31846 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:45:15.268694 31846 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:45:15.268707 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.268712 31846 net.cpp:165] Memory required for data: 1403393500\nI0821 06:45:15.268717 31846 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:45:15.268724 31846 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:45:15.268730 31846 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:45:15.268738 31846 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:45:15.268749 31846 net.cpp:150] Setting up L3_b7_relu\nI0821 06:45:15.268756 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.268761 31846 net.cpp:165] Memory required for data: 1405441500\nI0821 06:45:15.268766 31846 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:15.268772 31846 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:15.268779 31846 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:45:15.268785 31846 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:45:15.268795 31846 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:45:15.268846 31846 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:45:15.268857 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.268864 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.268868 31846 net.cpp:165] Memory required for data: 1409537500\nI0821 06:45:15.268873 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:45:15.268884 31846 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:45:15.268890 31846 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:45:15.268903 31846 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:45:15.271091 31846 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:45:15.271109 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.271116 31846 net.cpp:165] Memory required for data: 1411585500\nI0821 06:45:15.271124 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:45:15.271134 31846 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:45:15.271144 31846 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:45:15.271153 31846 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:45:15.271435 31846 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:45:15.271448 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.271461 31846 net.cpp:165] Memory required for data: 1413633500\nI0821 06:45:15.271473 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:45:15.271482 31846 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:45:15.271489 31846 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:45:15.271500 31846 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.271562 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:45:15.271726 31846 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:45:15.271739 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.271744 31846 net.cpp:165] Memory required for data: 1415681500\nI0821 06:45:15.271754 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:45:15.271764 31846 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:45:15.271771 31846 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:45:15.271778 31846 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:45:15.271788 31846 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:45:15.271795 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.271800 31846 net.cpp:165] Memory required for data: 1417729500\nI0821 06:45:15.271805 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:45:15.271819 31846 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:45:15.271826 31846 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:45:15.271836 31846 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:45:15.272871 31846 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:45:15.272886 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.272891 31846 net.cpp:165] Memory required for data: 1419777500\nI0821 06:45:15.272899 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:45:15.272909 31846 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:45:15.272915 31846 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:45:15.272927 31846 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:45:15.273207 31846 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:45:15.273222 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.273228 31846 net.cpp:165] Memory required for data: 1421825500\nI0821 06:45:15.273238 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:45:15.273247 31846 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:45:15.273253 31846 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:45:15.273262 31846 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:45:15.273319 31846 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:45:15.273484 31846 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:45:15.273499 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.273504 31846 net.cpp:165] Memory required for data: 1423873500\nI0821 06:45:15.273514 31846 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:45:15.273525 31846 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:45:15.273531 31846 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:45:15.273540 31846 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:45:15.273546 31846 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:45:15.273583 31846 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:45:15.273593 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.273597 31846 net.cpp:165] Memory required for data: 1425921500\nI0821 06:45:15.273602 31846 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:45:15.273610 31846 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:45:15.273617 31846 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:45:15.273623 31846 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:45:15.273633 31846 net.cpp:150] Setting up L3_b8_relu\nI0821 06:45:15.273639 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.273643 31846 net.cpp:165] Memory required for data: 1427969500\nI0821 06:45:15.273655 31846 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:15.273663 31846 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:15.273669 31846 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:45:15.273679 31846 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:45:15.273689 31846 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:45:15.273736 31846 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:45:15.273747 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.273754 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.273759 31846 net.cpp:165] Memory required for data: 1432065500\nI0821 06:45:15.273764 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:45:15.273780 31846 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:45:15.273787 31846 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:45:15.273797 31846 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:45:15.274824 31846 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:45:15.274839 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.274844 31846 net.cpp:165] Memory required for data: 1434113500\nI0821 06:45:15.274853 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:45:15.274864 31846 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:45:15.274871 31846 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:45:15.274880 31846 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:45:15.275153 31846 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:45:15.275166 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.275171 31846 net.cpp:165] Memory required for data: 1436161500\nI0821 06:45:15.275182 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:45:15.275193 31846 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:45:15.275199 31846 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:45:15.275207 31846 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.275275 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:45:15.275444 31846 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:45:15.275459 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.275463 31846 net.cpp:165] Memory required for data: 1438209500\nI0821 06:45:15.275472 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:45:15.275483 31846 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:45:15.275490 31846 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:45:15.275497 31846 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:45:15.275507 31846 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:45:15.275514 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.275518 31846 net.cpp:165] Memory required for data: 1440257500\nI0821 06:45:15.275523 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:45:15.275537 31846 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:45:15.275543 31846 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:45:15.275554 31846 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:45:15.276587 31846 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:45:15.276602 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.276607 31846 net.cpp:165] Memory required for data: 1442305500\nI0821 06:45:15.276615 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:45:15.276624 31846 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:45:15.276630 31846 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:45:15.276641 31846 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:45:15.276913 31846 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:45:15.276929 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.276942 31846 net.cpp:165] Memory required for data: 1444353500\nI0821 06:45:15.276952 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:45:15.276960 31846 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:45:15.276968 31846 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:45:15.276974 31846 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:45:15.277034 31846 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:45:15.277192 31846 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:45:15.277205 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.277210 31846 net.cpp:165] Memory required for data: 1446401500\nI0821 06:45:15.277220 31846 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:45:15.277232 31846 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:45:15.277240 31846 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:45:15.277246 31846 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:45:15.277254 31846 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:45:15.277292 31846 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:45:15.277302 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.277307 31846 net.cpp:165] Memory required for data: 1448449500\nI0821 06:45:15.277312 31846 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:45:15.277321 31846 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:45:15.277326 31846 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:45:15.277333 31846 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:45:15.277343 31846 net.cpp:150] Setting up L3_b9_relu\nI0821 06:45:15.277349 31846 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:45:15.277354 31846 net.cpp:165] Memory required for data: 1450497500\nI0821 06:45:15.277359 31846 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:45:15.277366 31846 net.cpp:100] Creating Layer post_pool\nI0821 06:45:15.277372 31846 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:45:15.277385 31846 net.cpp:408] post_pool -> post_pool\nI0821 06:45:15.277428 31846 net.cpp:150] Setting up post_pool\nI0821 06:45:15.277441 31846 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:45:15.277446 31846 net.cpp:165] Memory required for data: 1450529500\nI0821 06:45:15.277452 31846 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:45:15.277463 31846 net.cpp:100] Creating Layer post_FC\nI0821 06:45:15.277469 31846 net.cpp:434] post_FC <- post_pool\nI0821 06:45:15.277482 31846 net.cpp:408] post_FC -> post_FC_top\nI0821 06:45:15.277644 31846 net.cpp:150] Setting up post_FC\nI0821 06:45:15.277658 31846 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:15.277663 31846 net.cpp:165] Memory required for data: 1450534500\nI0821 06:45:15.277673 31846 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:45:15.277683 31846 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:45:15.277689 31846 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:45:15.277698 31846 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:45:15.277706 31846 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:45:15.277760 31846 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:45:15.277771 31846 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:15.277777 31846 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:45:15.277782 31846 net.cpp:165] Memory required for data: 1450544500\nI0821 06:45:15.277787 31846 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:45:15.277796 31846 net.cpp:100] Creating Layer accuracy\nI0821 06:45:15.277801 31846 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:45:15.277808 31846 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:45:15.277815 31846 net.cpp:408] accuracy -> accuracy\nI0821 06:45:15.277827 31846 net.cpp:150] Setting up accuracy\nI0821 06:45:15.277834 31846 net.cpp:157] Top shape: (1)\nI0821 06:45:15.277846 31846 net.cpp:165] Memory required for data: 1450544504\nI0821 06:45:15.277851 31846 layer_factory.hpp:77] Creating layer loss\nI0821 06:45:15.277864 31846 net.cpp:100] Creating Layer loss\nI0821 06:45:15.277870 31846 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:45:15.277878 31846 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:45:15.277884 31846 net.cpp:408] loss -> loss\nI0821 06:45:15.277896 31846 layer_factory.hpp:77] Creating layer loss\nI0821 06:45:15.278020 31846 net.cpp:150] Setting up loss\nI0821 06:45:15.278033 31846 net.cpp:157] Top shape: (1)\nI0821 06:45:15.278038 31846 net.cpp:160]     with loss weight 1\nI0821 06:45:15.278054 31846 net.cpp:165] Memory required for data: 1450544508\nI0821 06:45:15.278061 31846 net.cpp:226] loss needs backward computation.\nI0821 06:45:15.278067 31846 net.cpp:228] accuracy does not need backward computation.\nI0821 06:45:15.278074 31846 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:45:15.278079 31846 net.cpp:226] post_FC needs backward computation.\nI0821 06:45:15.278084 31846 net.cpp:226] post_pool needs backward computation.\nI0821 06:45:15.278089 31846 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:45:15.278093 31846 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:45:15.278098 31846 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:45:15.278103 31846 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:45:15.278108 31846 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:45:15.278113 31846 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:45:15.278118 31846 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:45:15.278123 31846 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:45:15.278128 31846 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:45:15.278133 31846 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:45:15.278138 31846 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:45:15.278143 31846 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:45:15.278151 31846 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:45:15.278157 31846 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:45:15.278162 31846 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:45:15.278167 31846 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:45:15.278172 31846 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:45:15.278177 31846 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:45:15.278182 31846 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:45:15.278187 31846 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:45:15.278192 31846 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:45:15.278198 31846 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:45:15.278203 31846 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:45:15.278208 31846 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:45:15.278213 31846 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:45:15.278218 31846 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:45:15.278223 31846 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:45:15.278228 31846 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:45:15.278232 31846 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:45:15.278237 31846 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:45:15.278242 31846 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:45:15.278247 31846 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:45:15.278254 31846 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:45:15.278259 31846 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:45:15.278270 31846 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:45:15.278275 31846 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:45:15.278280 31846 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:45:15.278285 31846 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:45:15.278290 31846 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:45:15.278295 31846 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:45:15.278301 31846 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:45:15.278306 31846 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:45:15.278311 31846 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:45:15.278316 31846 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:45:15.278321 31846 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:45:15.278326 31846 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:45:15.278331 31846 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:45:15.278337 31846 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:45:15.278342 31846 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:45:15.278347 31846 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:45:15.278352 31846 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:45:15.278357 31846 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:45:15.278363 31846 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:45:15.278368 31846 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:45:15.278373 31846 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:45:15.278378 31846 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:45:15.278383 31846 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:45:15.278388 31846 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:45:15.278393 31846 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:45:15.278399 31846 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:45:15.278404 31846 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:45:15.278409 31846 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:45:15.278414 31846 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:45:15.278429 31846 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:45:15.278435 31846 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:45:15.278441 31846 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:45:15.278446 31846 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:45:15.278451 31846 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:45:15.278456 31846 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:45:15.278461 31846 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:45:15.278467 31846 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:45:15.278472 31846 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:45:15.278478 31846 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:45:15.278483 31846 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:45:15.278488 31846 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:45:15.278493 31846 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:45:15.278498 31846 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:45:15.278503 31846 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:45:15.278509 31846 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:45:15.278514 31846 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:45:15.278519 31846 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:45:15.278532 31846 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:45:15.278537 31846 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:45:15.278542 31846 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:45:15.278548 31846 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:45:15.278554 31846 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:45:15.278559 31846 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:45:15.278564 31846 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:45:15.278569 31846 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:45:15.278574 31846 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:45:15.278580 31846 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:45:15.278585 31846 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:45:15.278590 31846 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:45:15.278596 31846 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:45:15.278601 31846 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:45:15.278607 31846 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:45:15.278612 31846 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:45:15.278617 31846 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:45:15.278623 31846 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:45:15.278628 31846 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:45:15.278633 31846 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:45:15.278638 31846 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:45:15.278645 31846 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:45:15.278650 31846 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:45:15.278656 31846 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:45:15.278661 31846 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:45:15.278666 31846 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:45:15.278671 31846 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:45:15.278677 31846 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:45:15.278682 31846 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:45:15.278687 31846 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:45:15.278692 31846 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:45:15.278698 31846 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:45:15.278703 31846 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:45:15.278708 31846 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:45:15.278714 31846 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:45:15.278719 31846 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:45:15.278724 31846 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:45:15.278730 31846 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:45:15.278735 31846 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:45:15.278740 31846 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:45:15.278745 31846 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:45:15.278751 31846 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:45:15.278756 31846 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:45:15.278761 31846 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:45:15.278767 31846 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:45:15.278772 31846 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:45:15.278779 31846 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:45:15.278784 31846 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:45:15.278794 31846 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:45:15.278800 31846 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:45:15.278810 31846 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:45:15.278815 31846 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:45:15.278821 31846 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:45:15.278826 31846 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:45:15.278832 31846 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:45:15.278837 31846 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:45:15.278843 31846 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:45:15.278848 31846 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:45:15.278853 31846 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:45:15.278859 31846 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:45:15.278864 31846 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:45:15.278869 31846 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:45:15.278875 31846 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:45:15.278880 31846 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:45:15.278887 31846 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:45:15.278892 31846 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:45:15.278898 31846 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:45:15.278903 31846 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:45:15.278908 31846 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:45:15.278913 31846 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:45:15.278918 31846 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:45:15.278924 31846 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:45:15.278929 31846 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:45:15.278935 31846 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:45:15.278941 31846 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:45:15.278946 31846 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:45:15.278952 31846 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:45:15.278957 31846 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:45:15.278962 31846 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:45:15.278969 31846 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:45:15.278973 31846 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:45:15.278978 31846 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:45:15.278985 31846 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:45:15.278990 31846 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:45:15.278995 31846 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:45:15.279001 31846 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:45:15.279006 31846 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:45:15.279012 31846 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:45:15.279017 31846 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:45:15.279022 31846 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:45:15.279028 31846 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:45:15.279033 31846 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:45:15.279039 31846 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:45:15.279047 31846 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:45:15.279052 31846 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:45:15.279062 31846 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:45:15.279067 31846 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:45:15.279073 31846 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:45:15.279079 31846 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:45:15.279084 31846 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:45:15.279090 31846 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:45:15.279095 31846 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:45:15.279100 31846 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:45:15.279105 31846 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:45:15.279111 31846 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:45:15.279117 31846 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:45:15.279122 31846 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:45:15.279129 31846 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:45:15.279134 31846 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:45:15.279139 31846 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:45:15.279145 31846 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:45:15.279150 31846 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:45:15.279155 31846 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:45:15.279161 31846 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:45:15.279166 31846 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:45:15.279172 31846 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:45:15.279177 31846 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:45:15.279183 31846 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:45:15.279189 31846 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:45:15.279194 31846 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:45:15.279201 31846 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:45:15.279206 31846 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:45:15.279211 31846 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:45:15.279217 31846 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:45:15.279222 31846 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:45:15.279228 31846 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:45:15.279233 31846 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:45:15.279240 31846 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:45:15.279245 31846 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:45:15.279251 31846 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:45:15.279256 31846 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:45:15.279263 31846 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:45:15.279268 31846 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:45:15.279273 31846 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:45:15.279279 31846 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:45:15.279285 31846 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:45:15.279291 31846 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:45:15.279297 31846 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:45:15.279302 31846 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:45:15.279309 31846 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:45:15.279314 31846 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:45:15.279320 31846 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:45:15.279330 31846 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:45:15.279336 31846 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:45:15.279342 31846 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:45:15.279348 31846 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:45:15.279353 31846 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:45:15.279361 31846 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:45:15.279366 31846 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:45:15.279371 31846 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:45:15.279377 31846 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:45:15.279382 31846 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:45:15.279388 31846 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:45:15.279393 31846 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:45:15.279399 31846 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:45:15.279405 31846 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:45:15.279412 31846 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:45:15.279417 31846 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:45:15.279428 31846 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:45:15.279434 31846 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:45:15.279440 31846 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:45:15.279446 31846 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:45:15.279453 31846 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:45:15.279458 31846 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:45:15.279464 31846 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:45:15.279469 31846 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:45:15.279475 31846 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:45:15.279481 31846 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:45:15.279487 31846 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:45:15.279496 31846 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:45:15.279502 31846 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:45:15.279508 31846 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:45:15.279515 31846 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:45:15.279520 31846 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:45:15.279525 31846 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:45:15.279531 31846 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:45:15.279537 31846 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:45:15.279543 31846 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:45:15.279549 31846 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:45:15.279556 31846 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:45:15.279561 31846 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:45:15.279567 31846 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:45:15.279572 31846 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:45:15.279577 31846 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:45:15.279583 31846 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:45:15.279589 31846 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:45:15.279595 31846 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:45:15.279604 31846 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:45:15.279610 31846 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:45:15.279623 31846 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:45:15.279628 31846 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:45:15.279634 31846 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:45:15.279640 31846 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:45:15.279645 31846 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:45:15.279651 31846 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:45:15.279657 31846 net.cpp:226] pre_relu needs backward computation.\nI0821 06:45:15.279662 31846 net.cpp:226] pre_scale needs backward computation.\nI0821 06:45:15.279669 31846 net.cpp:226] pre_bn needs backward computation.\nI0821 06:45:15.279673 31846 net.cpp:226] pre_conv needs backward computation.\nI0821 06:45:15.279680 31846 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:45:15.279687 31846 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:45:15.279691 31846 net.cpp:270] This network produces output accuracy\nI0821 06:45:15.279698 31846 net.cpp:270] This network produces output loss\nI0821 06:45:15.280025 31846 net.cpp:283] Network initialization done.\nI0821 06:45:15.281047 31846 solver.cpp:60] Solver scaffolding done.\nI0821 06:45:15.504343 31846 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 06:45:15.860393 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:15.860463 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:15.867410 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:16.089193 31846 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:16.089303 31846 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:16.123729 31846 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:16.123837 31846 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:16.573662 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:16.573724 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:16.581784 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:16.828819 31846 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:16.828927 31846 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:16.880883 31846 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:16.880986 31846 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:17.389840 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:17.389919 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:17.398659 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:17.661608 31846 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:17.661780 31846 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:17.733047 31846 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:17.733206 31846 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:17.817011 31846 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 06:45:18.289892 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:18.289947 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:18.299506 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:18.593338 31846 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:18.593499 31846 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:18.685262 31846 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:18.685415 31846 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:19.324477 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:19.324539 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:19.334939 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:19.649570 31846 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:19.649794 31846 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:19.762629 31846 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:19.762840 31846 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:20.466066 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:20.466131 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:20.477681 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:20.821832 31846 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:20.822077 31846 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:20.954851 31846 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:20.955088 31846 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:21.719588 31846 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:45:21.719651 31846 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:45:21.731839 31846 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:45:21.776459 31867 blocking_queue.cpp:50] Waiting for data\nI0821 06:45:21.822998 31857 blocking_queue.cpp:50] Waiting for data\nI0821 06:45:22.159255 31846 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:45:22.159536 31846 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:45:22.311389 31846 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:45:22.311650 31846 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:45:22.482797 31846 parallel.cpp:425] Starting Optimization\nI0821 06:45:22.484050 31846 solver.cpp:279] Solving Cifar-Resnet\nI0821 06:45:22.484067 31846 solver.cpp:280] Learning Rate Policy: triangular\nI0821 06:45:22.488963 31846 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 06:46:43.950877 31846 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 06:46:43.951172 31846 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 06:46:47.857064 31846 solver.cpp:228] Iteration 0, loss = 3.89072\nI0821 06:46:47.857105 31846 solver.cpp:244]     Train net output #0: accuracy = 0.064\nI0821 06:46:47.857125 31846 solver.cpp:244]     Train net output #1: loss = 3.89072 (* 1 = 3.89072 loss)\nI0821 06:46:47.936405 31846 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0821 06:49:05.301070 31846 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 06:50:26.192586 31846 solver.cpp:404]     Test net output #0: accuracy = 0.16712\nI0821 06:50:26.192831 31846 solver.cpp:404]     Test net output #1: loss = 2.22125 (* 1 = 2.22125 loss)\nI0821 06:50:27.509598 31846 solver.cpp:228] Iteration 100, loss = 2.20301\nI0821 06:50:27.509642 31846 solver.cpp:244]     Train net output #0: accuracy = 0.176\nI0821 06:50:27.509660 31846 solver.cpp:244]     Train net output #1: loss = 2.20301 (* 1 = 2.20301 loss)\nI0821 06:50:27.596174 31846 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0821 06:52:44.780040 31846 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 06:54:05.657578 31846 solver.cpp:404]     Test net output #0: accuracy = 0.23988\nI0821 06:54:05.657837 31846 solver.cpp:404]     Test net output #1: loss = 2.55596 (* 1 = 2.55596 loss)\nI0821 06:54:06.976218 31846 solver.cpp:228] Iteration 200, loss = 1.55322\nI0821 06:54:06.976264 31846 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI0821 06:54:06.976280 31846 solver.cpp:244]     Train net output #1: loss = 1.55322 (* 1 = 1.55322 loss)\nI0821 06:54:07.069826 31846 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0821 06:56:24.277611 31846 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 06:57:45.140787 31846 solver.cpp:404]     Test net output #0: accuracy = 0.48596\nI0821 06:57:45.141042 31846 solver.cpp:404]     Test net output #1: loss = 1.52835 (* 1 = 1.52835 loss)\nI0821 06:57:46.458600 31846 solver.cpp:228] Iteration 300, loss = 1.12812\nI0821 06:57:46.458644 31846 solver.cpp:244]     Train net output #0: accuracy = 0.624\nI0821 06:57:46.458662 31846 solver.cpp:244]     Train net output #1: loss = 1.12812 (* 1 = 1.12812 loss)\nI0821 06:57:46.545912 31846 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0821 07:00:03.847082 31846 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 07:01:24.705154 31846 solver.cpp:404]     Test net output #0: accuracy = 0.56904\nI0821 07:01:24.705420 31846 solver.cpp:404]     Test net output #1: loss = 1.21534 (* 1 = 1.21534 loss)\nI0821 07:01:26.022302 31846 solver.cpp:228] Iteration 400, loss = 0.798966\nI0821 07:01:26.022344 31846 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0821 07:01:26.022361 31846 solver.cpp:244]     Train net output #1: loss = 0.798966 (* 1 = 0.798966 loss)\nI0821 07:01:26.118151 31846 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0821 07:03:43.840446 31846 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 07:05:04.699944 31846 solver.cpp:404]     Test net output #0: accuracy = 0.58044\nI0821 07:05:04.700177 31846 solver.cpp:404]     Test net output #1: loss = 1.2226 (* 1 = 1.2226 loss)\nI0821 07:05:06.017369 31846 solver.cpp:228] Iteration 500, loss = 0.638642\nI0821 07:05:06.017411 31846 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0821 07:05:06.017427 31846 solver.cpp:244]     Train net output #1: loss = 0.638642 (* 1 = 0.638642 loss)\nI0821 07:05:06.109611 31846 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0821 07:07:23.282203 31846 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 07:08:44.135560 31846 solver.cpp:404]     Test net output #0: accuracy = 0.6866\nI0821 07:08:44.135821 31846 solver.cpp:404]     Test net output #1: loss = 0.918874 (* 1 = 0.918874 loss)\nI0821 07:08:45.453765 31846 solver.cpp:228] Iteration 600, loss = 0.648359\nI0821 07:08:45.453806 31846 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0821 07:08:45.453821 31846 solver.cpp:244]     Train net output #1: loss = 0.648359 (* 1 = 0.648359 loss)\nI0821 07:08:45.548840 31846 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0821 07:11:02.847030 31846 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 07:12:23.704591 31846 solver.cpp:404]     Test net output #0: accuracy = 0.6056\nI0821 07:12:23.704862 31846 solver.cpp:404]     Test net output #1: loss = 1.29639 (* 1 = 1.29639 loss)\nI0821 07:12:25.022598 31846 solver.cpp:228] Iteration 700, loss = 0.50602\nI0821 07:12:25.022641 31846 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 07:12:25.022657 31846 solver.cpp:244]     Train net output #1: loss = 0.50602 (* 1 = 0.50602 loss)\nI0821 07:12:25.110158 31846 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0821 07:14:42.451407 31846 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 07:16:03.305162 31846 solver.cpp:404]     Test net output #0: accuracy = 0.71376\nI0821 07:16:03.305403 31846 solver.cpp:404]     Test net output #1: loss = 0.876386 (* 1 = 0.876386 loss)\nI0821 07:16:04.623641 31846 solver.cpp:228] Iteration 800, loss = 0.401645\nI0821 07:16:04.623682 31846 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0821 07:16:04.623698 31846 solver.cpp:244]     Train net output #1: loss = 0.401645 (* 1 = 0.401645 loss)\nI0821 07:16:04.713032 31846 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0821 07:18:22.325762 31846 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 07:19:43.183789 31846 solver.cpp:404]     Test net output #0: accuracy = 0.70536\nI0821 07:19:43.184032 31846 solver.cpp:404]     Test net output #1: loss = 0.966048 (* 1 = 0.966048 loss)\nI0821 07:19:44.504329 31846 solver.cpp:228] Iteration 900, loss = 0.448367\nI0821 07:19:44.504365 31846 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 07:19:44.504379 31846 solver.cpp:244]     Train net output #1: loss = 0.448367 (* 1 = 0.448367 loss)\nI0821 07:19:44.592324 31846 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0821 07:22:02.184285 31846 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 07:23:23.547449 31846 solver.cpp:404]     Test net output #0: accuracy = 0.72372\nI0821 07:23:23.547690 31846 solver.cpp:404]     Test net output #1: loss = 0.842533 (* 1 = 0.842533 loss)\nI0821 07:23:24.868896 31846 solver.cpp:228] Iteration 1000, loss = 0.47658\nI0821 07:23:24.868957 31846 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 07:23:24.868974 31846 solver.cpp:244]     Train net output #1: loss = 0.47658 (* 1 = 0.47658 loss)\nI0821 07:23:24.955381 31846 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0821 07:25:42.647367 31846 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 07:27:03.883240 31846 solver.cpp:404]     Test net output #0: accuracy = 0.72644\nI0821 07:27:03.883451 31846 solver.cpp:404]     Test net output #1: loss = 0.829328 (* 1 = 0.829328 loss)\nI0821 07:27:05.204228 31846 solver.cpp:228] Iteration 1100, loss = 0.432955\nI0821 07:27:05.204288 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 07:27:05.204305 31846 solver.cpp:244]     Train net output #1: loss = 0.432955 (* 1 = 0.432955 loss)\nI0821 07:27:05.295419 31846 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0821 07:29:22.638697 31846 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 07:30:43.906461 31846 solver.cpp:404]     Test net output #0: accuracy = 0.7056\nI0821 07:30:43.906682 31846 solver.cpp:404]     Test net output #1: loss = 1.02848 (* 1 = 1.02848 loss)\nI0821 07:30:45.227543 31846 solver.cpp:228] Iteration 1200, loss = 0.399092\nI0821 07:30:45.227609 31846 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 07:30:45.227627 31846 solver.cpp:244]     Train net output #1: loss = 0.399092 (* 1 = 0.399092 loss)\nI0821 07:30:45.319767 31846 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0821 07:33:02.210952 31846 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 07:34:23.039590 31846 solver.cpp:404]     Test net output #0: accuracy = 0.69416\nI0821 07:34:23.039831 31846 solver.cpp:404]     Test net output #1: loss = 0.928092 (* 1 = 0.928092 loss)\nI0821 07:34:24.356781 31846 solver.cpp:228] Iteration 1300, loss = 0.397297\nI0821 07:34:24.356824 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 07:34:24.356840 31846 solver.cpp:244]     Train net output #1: loss = 0.397297 (* 1 = 0.397297 loss)\nI0821 07:34:24.439002 31846 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0821 07:36:41.023772 31846 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 07:38:01.843683 31846 solver.cpp:404]     Test net output #0: accuracy = 0.66556\nI0821 07:38:01.843945 31846 solver.cpp:404]     Test net output #1: loss = 1.19658 (* 1 = 1.19658 loss)\nI0821 07:38:03.160878 31846 solver.cpp:228] Iteration 1400, loss = 0.368562\nI0821 07:38:03.160923 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 07:38:03.160938 31846 solver.cpp:244]     Train net output #1: loss = 0.368562 (* 1 = 0.368562 loss)\nI0821 07:38:03.250546 31846 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0821 07:40:20.113116 31846 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 07:41:40.946285 31846 solver.cpp:404]     Test net output #0: accuracy = 0.61032\nI0821 07:41:40.946537 31846 solver.cpp:404]     Test net output #1: loss = 1.69349 (* 1 = 1.69349 loss)\nI0821 07:41:42.264117 31846 solver.cpp:228] Iteration 1500, loss = 0.295828\nI0821 07:41:42.264163 31846 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 07:41:42.264185 31846 solver.cpp:244]     Train net output #1: loss = 0.295828 (* 1 = 0.295828 loss)\nI0821 07:41:42.353987 31846 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0821 07:43:59.237351 31846 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 07:45:20.063601 31846 solver.cpp:404]     Test net output #0: accuracy = 0.74396\nI0821 07:45:20.063863 31846 solver.cpp:404]     Test net output #1: loss = 0.850949 (* 1 = 0.850949 loss)\nI0821 07:45:21.381945 31846 solver.cpp:228] Iteration 1600, loss = 0.414097\nI0821 07:45:21.381990 31846 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 07:45:21.382006 31846 solver.cpp:244]     Train net output #1: loss = 0.414097 (* 1 = 0.414097 loss)\nI0821 07:45:21.473188 31846 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0821 07:47:38.385877 31846 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 07:48:59.214404 31846 solver.cpp:404]     Test net output #0: accuracy = 0.7306\nI0821 07:48:59.214651 31846 solver.cpp:404]     Test net output #1: loss = 0.958758 (* 1 = 0.958758 loss)\nI0821 07:49:00.532881 31846 solver.cpp:228] Iteration 1700, loss = 0.308163\nI0821 07:49:00.532915 31846 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 07:49:00.532929 31846 solver.cpp:244]     Train net output #1: loss = 0.308163 (* 1 = 0.308163 loss)\nI0821 07:49:00.612782 31846 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0821 07:51:17.505746 31846 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 07:52:38.330996 31846 solver.cpp:404]     Test net output #0: accuracy = 0.68144\nI0821 07:52:38.331261 31846 solver.cpp:404]     Test net output #1: loss = 1.14081 (* 1 = 1.14081 loss)\nI0821 07:52:39.647719 31846 solver.cpp:228] Iteration 1800, loss = 0.317244\nI0821 07:52:39.647759 31846 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 07:52:39.647775 31846 solver.cpp:244]     Train net output #1: loss = 0.317244 (* 1 = 0.317244 loss)\nI0821 07:52:39.739090 31846 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0821 07:54:56.304345 31846 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 07:56:17.178333 31846 solver.cpp:404]     Test net output #0: accuracy = 0.68492\nI0821 07:56:17.178598 31846 solver.cpp:404]     Test net output #1: loss = 1.03685 (* 1 = 1.03685 loss)\nI0821 07:56:18.495584 31846 solver.cpp:228] Iteration 1900, loss = 0.34808\nI0821 07:56:18.495625 31846 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 07:56:18.495640 31846 solver.cpp:244]     Train net output #1: loss = 0.34808 (* 1 = 0.34808 loss)\nI0821 07:56:18.579191 31846 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0821 07:58:35.235699 31846 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 07:59:56.107558 31846 solver.cpp:404]     Test net output #0: accuracy = 0.64112\nI0821 07:59:56.107820 31846 solver.cpp:404]     Test net output #1: loss = 1.48448 (* 1 = 1.48448 loss)\nI0821 07:59:57.424918 31846 solver.cpp:228] Iteration 2000, loss = 0.272798\nI0821 07:59:57.424959 31846 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:59:57.424975 31846 solver.cpp:244]     Train net output #1: loss = 0.272798 (* 1 = 0.272798 loss)\nI0821 07:59:57.506745 31846 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0821 08:02:14.392247 31846 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 08:03:35.267163 31846 solver.cpp:404]     Test net output #0: accuracy = 0.6662\nI0821 08:03:35.267412 31846 solver.cpp:404]     Test net output #1: loss = 1.04662 (* 1 = 1.04662 loss)\nI0821 08:03:36.584880 31846 solver.cpp:228] Iteration 2100, loss = 0.460207\nI0821 08:03:36.584923 31846 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 08:03:36.584938 31846 solver.cpp:244]     Train net output #1: loss = 0.460207 (* 1 = 0.460207 loss)\nI0821 08:03:36.672880 31846 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0821 08:05:53.293597 31846 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 08:07:14.159221 31846 solver.cpp:404]     Test net output #0: accuracy = 0.54728\nI0821 08:07:14.159488 31846 solver.cpp:404]     Test net output #1: loss = 1.7254 (* 1 = 1.7254 loss)\nI0821 08:07:15.476838 31846 solver.cpp:228] Iteration 2200, loss = 0.391699\nI0821 08:07:15.476876 31846 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 08:07:15.476893 31846 solver.cpp:244]     Train net output #1: loss = 0.391699 (* 1 = 0.391699 loss)\nI0821 08:07:15.564893 31846 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0821 08:09:32.454730 31846 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 08:10:53.329512 31846 solver.cpp:404]     Test net output #0: accuracy = 0.7274\nI0821 08:10:53.329771 31846 solver.cpp:404]     Test net output #1: loss = 0.934057 (* 1 = 0.934057 loss)\nI0821 08:10:54.647708 31846 solver.cpp:228] Iteration 2300, loss = 0.424036\nI0821 08:10:54.647750 31846 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 08:10:54.647766 31846 solver.cpp:244]     Train net output #1: loss = 0.424036 (* 1 = 0.424036 loss)\nI0821 08:10:54.729121 31846 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0821 08:13:11.323284 31846 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 08:14:32.199213 31846 solver.cpp:404]     Test net output #0: accuracy = 0.72252\nI0821 08:14:32.199486 31846 solver.cpp:404]     Test net output #1: loss = 0.948446 (* 1 = 0.948446 loss)\nI0821 08:14:33.516865 31846 solver.cpp:228] Iteration 2400, loss = 0.290039\nI0821 08:14:33.516906 31846 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 08:14:33.516922 31846 solver.cpp:244]     Train net output #1: loss = 0.290039 (* 1 = 0.290039 loss)\nI0821 08:14:33.605012 31846 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0821 08:16:50.556393 31846 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 08:18:11.433452 31846 solver.cpp:404]     Test net output #0: accuracy = 0.66544\nI0821 08:18:11.433717 31846 solver.cpp:404]     Test net output #1: loss = 1.12175 (* 1 = 1.12175 loss)\nI0821 08:18:12.751049 31846 solver.cpp:228] Iteration 2500, loss = 0.330547\nI0821 08:18:12.751085 31846 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 08:18:12.751098 31846 solver.cpp:244]     Train net output #1: loss = 0.330546 (* 1 = 0.330546 loss)\nI0821 08:18:12.836370 31846 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0821 08:20:29.573637 31846 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 08:21:50.446487 31846 solver.cpp:404]     Test net output #0: accuracy = 0.62176\nI0821 08:21:50.446743 31846 solver.cpp:404]     Test net output #1: loss = 1.30841 (* 1 = 1.30841 loss)\nI0821 08:21:51.763568 31846 solver.cpp:228] Iteration 2600, loss = 0.319444\nI0821 08:21:51.763609 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 08:21:51.763625 31846 solver.cpp:244]     Train net output #1: loss = 0.319444 (* 1 = 0.319444 loss)\nI0821 08:21:51.853093 31846 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0821 08:24:08.763347 31846 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 08:25:29.634052 31846 solver.cpp:404]     Test net output #0: accuracy = 0.48744\nI0821 08:25:29.634371 31846 solver.cpp:404]     Test net output #1: loss = 2.28169 (* 1 = 2.28169 loss)\nI0821 08:25:30.951335 31846 solver.cpp:228] Iteration 2700, loss = 0.365727\nI0821 08:25:30.951377 31846 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 08:25:30.951393 31846 solver.cpp:244]     Train net output #1: loss = 0.365727 (* 1 = 0.365727 loss)\nI0821 08:25:31.035461 31846 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0821 08:27:47.969655 31846 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 08:29:08.833073 31846 solver.cpp:404]     Test net output #0: accuracy = 0.57748\nI0821 08:29:08.833317 31846 solver.cpp:404]     Test net output #1: loss = 1.58345 (* 1 = 1.58345 loss)\nI0821 08:29:10.150661 31846 solver.cpp:228] Iteration 2800, loss = 0.397739\nI0821 08:29:10.150701 31846 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 08:29:10.150715 31846 solver.cpp:244]     Train net output #1: loss = 0.397739 (* 1 = 0.397739 loss)\nI0821 08:29:10.237238 31846 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0821 08:31:26.867733 31846 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 08:32:48.184062 31846 solver.cpp:404]     Test net output #0: accuracy = 0.6242\nI0821 08:32:48.184303 31846 solver.cpp:404]     Test net output #1: loss = 1.33383 (* 1 = 1.33383 loss)\nI0821 08:32:49.505494 31846 solver.cpp:228] Iteration 2900, loss = 0.392959\nI0821 08:32:49.505555 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 08:32:49.505573 31846 solver.cpp:244]     Train net output #1: loss = 0.392959 (* 1 = 0.392959 loss)\nI0821 08:32:49.592105 31846 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0821 08:35:06.501153 31846 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 08:36:27.359221 31846 solver.cpp:404]     Test net output #0: accuracy = 0.74472\nI0821 08:36:27.359475 31846 solver.cpp:404]     Test net output #1: loss = 0.805322 (* 1 = 0.805322 loss)\nI0821 08:36:28.676230 31846 solver.cpp:228] Iteration 3000, loss = 0.382819\nI0821 08:36:28.676271 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 08:36:28.676286 31846 solver.cpp:244]     Train net output #1: loss = 0.382819 (* 1 = 0.382819 loss)\nI0821 08:36:28.759639 31846 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0821 08:38:45.694870 31846 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 08:40:06.553988 31846 solver.cpp:404]     Test net output #0: accuracy = 0.6312\nI0821 08:40:06.554239 31846 solver.cpp:404]     Test net output #1: loss = 1.20707 (* 1 = 1.20707 loss)\nI0821 08:40:07.871075 31846 solver.cpp:228] Iteration 3100, loss = 0.407366\nI0821 08:40:07.871115 31846 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 08:40:07.871130 31846 solver.cpp:244]     Train net output #1: loss = 0.407366 (* 1 = 0.407366 loss)\nI0821 08:40:07.958590 31846 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0821 08:42:24.929494 31846 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 08:43:45.795308 31846 solver.cpp:404]     Test net output #0: accuracy = 0.74368\nI0821 08:43:45.795567 31846 solver.cpp:404]     Test net output #1: loss = 0.850726 (* 1 = 0.850726 loss)\nI0821 08:43:47.113811 31846 solver.cpp:228] Iteration 3200, loss = 0.391659\nI0821 08:43:47.113853 31846 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 08:43:47.113868 31846 solver.cpp:244]     Train net output #1: loss = 0.391659 (* 1 = 0.391659 loss)\nI0821 08:43:47.200436 31846 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0821 08:46:04.111999 31846 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 08:47:24.978884 31846 solver.cpp:404]     Test net output #0: accuracy = 0.44764\nI0821 08:47:24.979125 31846 solver.cpp:404]     Test net output #1: loss = 2.58688 (* 1 = 2.58688 loss)\nI0821 08:47:26.296541 31846 solver.cpp:228] Iteration 3300, loss = 0.402682\nI0821 08:47:26.296581 31846 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 08:47:26.296597 31846 solver.cpp:244]     Train net output #1: loss = 0.402682 (* 1 = 0.402682 loss)\nI0821 08:47:26.386032 31846 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0821 08:49:43.347781 31846 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 08:51:04.224000 31846 solver.cpp:404]     Test net output #0: accuracy = 0.70556\nI0821 08:51:04.224272 31846 solver.cpp:404]     Test net output #1: loss = 0.904028 (* 1 = 0.904028 loss)\nI0821 08:51:05.542151 31846 solver.cpp:228] Iteration 3400, loss = 0.452423\nI0821 08:51:05.542194 31846 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 08:51:05.542218 31846 solver.cpp:244]     Train net output #1: loss = 0.452423 (* 1 = 0.452423 loss)\nI0821 08:51:05.633915 31846 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0821 08:53:22.608927 31846 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 08:54:43.493829 31846 solver.cpp:404]     Test net output #0: accuracy = 0.72196\nI0821 08:54:43.494103 31846 solver.cpp:404]     Test net output #1: loss = 0.885835 (* 1 = 0.885835 loss)\nI0821 08:54:44.812152 31846 solver.cpp:228] Iteration 3500, loss = 0.356756\nI0821 08:54:44.812201 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 08:54:44.812227 31846 solver.cpp:244]     Train net output #1: loss = 0.356755 (* 1 = 0.356755 loss)\nI0821 08:54:44.902640 31846 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0821 08:57:01.855746 31846 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 08:58:22.752614 31846 solver.cpp:404]     Test net output #0: accuracy = 0.75116\nI0821 08:58:22.752882 31846 solver.cpp:404]     Test net output #1: loss = 0.806504 (* 1 = 0.806504 loss)\nI0821 08:58:24.069821 31846 solver.cpp:228] Iteration 3600, loss = 0.532284\nI0821 08:58:24.069869 31846 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 08:58:24.069893 31846 solver.cpp:244]     Train net output #1: loss = 0.532284 (* 1 = 0.532284 loss)\nI0821 08:58:24.159155 31846 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0821 09:00:40.814347 31846 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 09:02:01.734189 31846 solver.cpp:404]     Test net output #0: accuracy = 0.65576\nI0821 09:02:01.734709 31846 solver.cpp:404]     Test net output #1: loss = 1.09117 (* 1 = 1.09117 loss)\nI0821 09:02:03.054244 31846 solver.cpp:228] Iteration 3700, loss = 0.380046\nI0821 09:02:03.054419 31846 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 09:02:03.054502 31846 solver.cpp:244]     Train net output #1: loss = 0.380046 (* 1 = 0.380046 loss)\nI0821 09:02:03.135434 31846 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0821 09:04:19.739168 31846 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 09:05:40.616024 31846 solver.cpp:404]     Test net output #0: accuracy = 0.7014\nI0821 09:05:40.616286 31846 solver.cpp:404]     Test net output #1: loss = 0.970662 (* 1 = 0.970662 loss)\nI0821 09:05:41.934419 31846 solver.cpp:228] Iteration 3800, loss = 0.434036\nI0821 09:05:41.934471 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 09:05:41.934494 31846 solver.cpp:244]     Train net output #1: loss = 0.434036 (* 1 = 0.434036 loss)\nI0821 09:05:42.023303 31846 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0821 09:07:58.957979 31846 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 09:09:19.842135 31846 solver.cpp:404]     Test net output #0: accuracy = 0.66544\nI0821 09:09:19.842397 31846 solver.cpp:404]     Test net output #1: loss = 1.17141 (* 1 = 1.17141 loss)\nI0821 09:09:21.158759 31846 solver.cpp:228] Iteration 3900, loss = 0.392411\nI0821 09:09:21.158805 31846 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 09:09:21.158821 31846 solver.cpp:244]     Train net output #1: loss = 0.39241 (* 1 = 0.39241 loss)\nI0821 09:09:21.248610 31846 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0821 09:11:38.112664 31846 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 09:12:58.994176 31846 solver.cpp:404]     Test net output #0: accuracy = 0.54376\nI0821 09:12:58.994451 31846 solver.cpp:404]     Test net output #1: loss = 2.39786 (* 1 = 2.39786 loss)\nI0821 09:13:00.311923 31846 solver.cpp:228] Iteration 4000, loss = 0.370117\nI0821 09:13:00.311967 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 09:13:00.311983 31846 solver.cpp:244]     Train net output #1: loss = 0.370117 (* 1 = 0.370117 loss)\nI0821 09:13:00.401466 31846 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0821 09:15:17.310834 31846 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 09:16:38.182787 31846 solver.cpp:404]     Test net output #0: accuracy = 0.52348\nI0821 09:16:38.183048 31846 solver.cpp:404]     Test net output #1: loss = 1.7356 (* 1 = 1.7356 loss)\nI0821 09:16:39.500407 31846 solver.cpp:228] Iteration 4100, loss = 0.456134\nI0821 09:16:39.500458 31846 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0821 09:16:39.500474 31846 solver.cpp:244]     Train net output #1: loss = 0.456134 (* 1 = 0.456134 loss)\nI0821 09:16:39.585969 31846 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0821 09:18:56.467535 31846 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 09:20:17.335706 31846 solver.cpp:404]     Test net output #0: accuracy = 0.60084\nI0821 09:20:17.335947 31846 solver.cpp:404]     Test net output #1: loss = 1.31265 (* 1 = 1.31265 loss)\nI0821 09:20:18.653514 31846 solver.cpp:228] Iteration 4200, loss = 0.432559\nI0821 09:20:18.653560 31846 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 09:20:18.653576 31846 solver.cpp:244]     Train net output #1: loss = 0.432559 (* 1 = 0.432559 loss)\nI0821 09:20:18.739831 31846 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0821 09:22:35.685456 31846 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 09:23:56.564141 31846 solver.cpp:404]     Test net output #0: accuracy = 0.7268\nI0821 09:23:56.564342 31846 solver.cpp:404]     Test net output #1: loss = 0.812906 (* 1 = 0.812906 loss)\nI0821 09:23:57.882491 31846 solver.cpp:228] Iteration 4300, loss = 0.395402\nI0821 09:23:57.882535 31846 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0821 09:23:57.882551 31846 solver.cpp:244]     Train net output #1: loss = 0.395402 (* 1 = 0.395402 loss)\nI0821 09:23:57.972493 31846 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0821 09:26:14.923099 31846 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 09:27:35.788002 31846 solver.cpp:404]     Test net output #0: accuracy = 0.51884\nI0821 09:27:35.788240 31846 solver.cpp:404]     Test net output #1: loss = 1.73334 (* 1 = 1.73334 loss)\nI0821 09:27:37.105832 31846 solver.cpp:228] Iteration 4400, loss = 0.429376\nI0821 09:27:37.105877 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 09:27:37.105893 31846 solver.cpp:244]     Train net output #1: loss = 0.429376 (* 1 = 0.429376 loss)\nI0821 09:27:37.195166 31846 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0821 09:29:54.167301 31846 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 09:31:15.034260 31846 solver.cpp:404]     Test net output #0: accuracy = 0.69116\nI0821 09:31:15.034518 31846 solver.cpp:404]     Test net output #1: loss = 0.960383 (* 1 = 0.960383 loss)\nI0821 09:31:16.352246 31846 solver.cpp:228] Iteration 4500, loss = 0.493247\nI0821 09:31:16.352291 31846 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 09:31:16.352308 31846 solver.cpp:244]     Train net output #1: loss = 0.493247 (* 1 = 0.493247 loss)\nI0821 09:31:16.441098 31846 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0821 09:33:33.424499 31846 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 09:34:54.289288 31846 solver.cpp:404]     Test net output #0: accuracy = 0.433\nI0821 09:34:54.289523 31846 solver.cpp:404]     Test net output #1: loss = 2.02192 (* 1 = 2.02192 loss)\nI0821 09:34:55.607627 31846 solver.cpp:228] Iteration 4600, loss = 0.433324\nI0821 09:34:55.607671 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 09:34:55.607688 31846 solver.cpp:244]     Train net output #1: loss = 0.433324 (* 1 = 0.433324 loss)\nI0821 09:34:55.695993 31846 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0821 09:37:12.641158 31846 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 09:38:33.490020 31846 solver.cpp:404]     Test net output #0: accuracy = 0.68264\nI0821 09:38:33.490265 31846 solver.cpp:404]     Test net output #1: loss = 1.04141 (* 1 = 1.04141 loss)\nI0821 09:38:34.808109 31846 solver.cpp:228] Iteration 4700, loss = 0.334354\nI0821 09:38:34.808152 31846 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 09:38:34.808168 31846 solver.cpp:244]     Train net output #1: loss = 0.334353 (* 1 = 0.334353 loss)\nI0821 09:38:34.890055 31846 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0821 09:40:51.887748 31846 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 09:42:12.726363 31846 solver.cpp:404]     Test net output #0: accuracy = 0.71696\nI0821 09:42:12.726610 31846 solver.cpp:404]     Test net output #1: loss = 0.823201 (* 1 = 0.823201 loss)\nI0821 09:42:14.044951 31846 solver.cpp:228] Iteration 4800, loss = 0.416533\nI0821 09:42:14.044996 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 09:42:14.045011 31846 solver.cpp:244]     Train net output #1: loss = 0.416533 (* 1 = 0.416533 loss)\nI0821 09:42:14.123618 31846 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0821 09:44:31.132843 31846 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 09:45:51.979571 31846 solver.cpp:404]     Test net output #0: accuracy = 0.48476\nI0821 09:45:51.979820 31846 solver.cpp:404]     Test net output #1: loss = 1.41684 (* 1 = 1.41684 loss)\nI0821 09:45:53.298003 31846 solver.cpp:228] Iteration 4900, loss = 0.36892\nI0821 09:45:53.298048 31846 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 09:45:53.298064 31846 solver.cpp:244]     Train net output #1: loss = 0.36892 (* 1 = 0.36892 loss)\nI0821 09:45:53.380694 31846 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0821 09:48:10.278184 31846 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 09:49:31.119503 31846 solver.cpp:404]     Test net output #0: accuracy = 0.51696\nI0821 09:49:31.119760 31846 solver.cpp:404]     Test net output #1: loss = 2.09209 (* 1 = 2.09209 loss)\nI0821 09:49:32.436470 31846 solver.cpp:228] Iteration 5000, loss = 0.533744\nI0821 09:49:32.436514 31846 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 09:49:32.436530 31846 solver.cpp:244]     Train net output #1: loss = 0.533744 (* 1 = 0.533744 loss)\nI0821 09:49:32.523150 31846 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0821 09:51:49.382699 31846 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 09:53:10.222942 31846 solver.cpp:404]     Test net output #0: accuracy = 0.53384\nI0821 09:53:10.223193 31846 solver.cpp:404]     Test net output #1: loss = 1.56906 (* 1 = 1.56906 loss)\nI0821 09:53:11.541100 31846 solver.cpp:228] Iteration 5100, loss = 0.634229\nI0821 09:53:11.541142 31846 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0821 09:53:11.541159 31846 solver.cpp:244]     Train net output #1: loss = 0.634229 (* 1 = 0.634229 loss)\nI0821 09:53:11.637625 31846 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0821 09:55:28.547366 31846 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 09:56:49.400521 31846 solver.cpp:404]     Test net output #0: accuracy = 0.65044\nI0821 09:56:49.400780 31846 solver.cpp:404]     Test net output #1: loss = 1.12922 (* 1 = 1.12922 loss)\nI0821 09:56:50.719005 31846 solver.cpp:228] Iteration 5200, loss = 0.474434\nI0821 09:56:50.719049 31846 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0821 09:56:50.719065 31846 solver.cpp:244]     Train net output #1: loss = 0.474434 (* 1 = 0.474434 loss)\nI0821 09:56:50.801945 31846 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0821 09:59:07.703227 31846 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 10:00:28.565937 31846 solver.cpp:404]     Test net output #0: accuracy = 0.59372\nI0821 10:00:28.566191 31846 solver.cpp:404]     Test net output #1: loss = 1.31417 (* 1 = 1.31417 loss)\nI0821 10:00:29.884097 31846 solver.cpp:228] Iteration 5300, loss = 0.499258\nI0821 10:00:29.884142 31846 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0821 10:00:29.884160 31846 solver.cpp:244]     Train net output #1: loss = 0.499258 (* 1 = 0.499258 loss)\nI0821 10:00:29.975018 31846 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0821 10:02:46.873836 31846 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 10:04:07.732909 31846 solver.cpp:404]     Test net output #0: accuracy = 0.60852\nI0821 10:04:07.733170 31846 solver.cpp:404]     Test net output #1: loss = 1.52864 (* 1 = 1.52864 loss)\nI0821 10:04:09.052008 31846 solver.cpp:228] Iteration 5400, loss = 0.410302\nI0821 10:04:09.052054 31846 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0821 10:04:09.052072 31846 solver.cpp:244]     Train net output #1: loss = 0.410302 (* 1 = 0.410302 loss)\nI0821 10:04:09.143759 31846 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0821 10:06:25.983621 31846 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 10:07:46.839576 31846 solver.cpp:404]     Test net output #0: accuracy = 0.6724\nI0821 10:07:46.839846 31846 solver.cpp:404]     Test net output #1: loss = 0.970257 (* 1 = 0.970257 loss)\nI0821 10:07:48.156759 31846 solver.cpp:228] Iteration 5500, loss = 0.51253\nI0821 10:07:48.156802 31846 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 10:07:48.156818 31846 solver.cpp:244]     Train net output #1: loss = 0.51253 (* 1 = 0.51253 loss)\nI0821 10:07:48.243788 31846 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0821 10:10:05.238616 31846 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 10:11:26.113255 31846 solver.cpp:404]     Test net output #0: accuracy = 0.71436\nI0821 10:11:26.113523 31846 solver.cpp:404]     Test net output #1: loss = 0.928663 (* 1 = 0.928663 loss)\nI0821 10:11:27.431620 31846 solver.cpp:228] Iteration 5600, loss = 0.483461\nI0821 10:11:27.431665 31846 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0821 10:11:27.431681 31846 solver.cpp:244]     Train net output #1: loss = 0.483461 (* 1 = 0.483461 loss)\nI0821 10:11:27.515086 31846 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0821 10:13:44.481541 31846 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 10:15:05.352088 31846 solver.cpp:404]     Test net output #0: accuracy = 0.50188\nI0821 10:15:05.352355 31846 solver.cpp:404]     Test net output #1: loss = 1.958 (* 1 = 1.958 loss)\nI0821 10:15:06.669054 31846 solver.cpp:228] Iteration 5700, loss = 0.485322\nI0821 10:15:06.669098 31846 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0821 10:15:06.669114 31846 solver.cpp:244]     Train net output #1: loss = 0.485322 (* 1 = 0.485322 loss)\nI0821 10:15:06.754001 31846 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0821 10:17:23.743050 31846 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 10:18:44.599721 31846 solver.cpp:404]     Test net output #0: accuracy = 0.57504\nI0821 10:18:44.599982 31846 solver.cpp:404]     Test net output #1: loss = 1.5565 (* 1 = 1.5565 loss)\nI0821 10:18:45.917233 31846 solver.cpp:228] Iteration 5800, loss = 0.429876\nI0821 10:18:45.917268 31846 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 10:18:45.917282 31846 solver.cpp:244]     Train net output #1: loss = 0.429876 (* 1 = 0.429876 loss)\nI0821 10:18:46.008410 31846 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0821 10:21:02.860508 31846 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 10:22:23.711457 31846 solver.cpp:404]     Test net output #0: accuracy = 0.67276\nI0821 10:22:23.711738 31846 solver.cpp:404]     Test net output #1: loss = 1.07743 (* 1 = 1.07743 loss)\nI0821 10:22:25.029001 31846 solver.cpp:228] Iteration 5900, loss = 0.356073\nI0821 10:22:25.029044 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 10:22:25.029060 31846 solver.cpp:244]     Train net output #1: loss = 0.356073 (* 1 = 0.356073 loss)\nI0821 10:22:25.117892 31846 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0821 10:24:42.056232 31846 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 10:26:02.908370 31846 solver.cpp:404]     Test net output #0: accuracy = 0.51676\nI0821 10:26:02.908619 31846 solver.cpp:404]     Test net output #1: loss = 1.88392 (* 1 = 1.88392 loss)\nI0821 10:26:04.226384 31846 solver.cpp:228] Iteration 6000, loss = 0.470325\nI0821 10:26:04.226428 31846 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 10:26:04.226445 31846 solver.cpp:244]     Train net output #1: loss = 0.470325 (* 1 = 0.470325 loss)\nI0821 10:26:04.309801 31846 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0821 10:28:21.301717 31846 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 10:29:42.164775 31846 solver.cpp:404]     Test net output #0: accuracy = 0.48788\nI0821 10:29:42.165041 31846 solver.cpp:404]     Test net output #1: loss = 2.0083 (* 1 = 2.0083 loss)\nI0821 10:29:43.481724 31846 solver.cpp:228] Iteration 6100, loss = 0.560524\nI0821 10:29:43.481770 31846 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0821 10:29:43.481786 31846 solver.cpp:244]     Train net output #1: loss = 0.560524 (* 1 = 0.560524 loss)\nI0821 10:29:43.565138 31846 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0821 10:32:00.569545 31846 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 10:33:21.432065 31846 solver.cpp:404]     Test net output #0: accuracy = 0.75028\nI0821 10:33:21.432329 31846 solver.cpp:404]     Test net output #1: loss = 0.729891 (* 1 = 0.729891 loss)\nI0821 10:33:22.748893 31846 solver.cpp:228] Iteration 6200, loss = 0.357736\nI0821 10:33:22.748936 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 10:33:22.748951 31846 solver.cpp:244]     Train net output #1: loss = 0.357736 (* 1 = 0.357736 loss)\nI0821 10:33:22.834919 31846 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0821 10:35:39.845271 31846 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 10:37:00.698218 31846 solver.cpp:404]     Test net output #0: accuracy = 0.63288\nI0821 10:37:00.698472 31846 solver.cpp:404]     Test net output #1: loss = 1.12928 (* 1 = 1.12928 loss)\nI0821 10:37:02.015576 31846 solver.cpp:228] Iteration 6300, loss = 0.48823\nI0821 10:37:02.015616 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 10:37:02.015631 31846 solver.cpp:244]     Train net output #1: loss = 0.48823 (* 1 = 0.48823 loss)\nI0821 10:37:02.097331 31846 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0821 10:39:19.492049 31846 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 10:40:40.349782 31846 solver.cpp:404]     Test net output #0: accuracy = 0.63856\nI0821 10:40:40.350052 31846 solver.cpp:404]     Test net output #1: loss = 1.18037 (* 1 = 1.18037 loss)\nI0821 10:40:41.666685 31846 solver.cpp:228] Iteration 6400, loss = 0.318473\nI0821 10:40:41.666726 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 10:40:41.666743 31846 solver.cpp:244]     Train net output #1: loss = 0.318473 (* 1 = 0.318473 loss)\nI0821 10:40:41.757611 31846 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0821 10:42:59.096992 31846 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 10:44:19.953905 31846 solver.cpp:404]     Test net output #0: accuracy = 0.63576\nI0821 10:44:19.954159 31846 solver.cpp:404]     Test net output #1: loss = 1.33339 (* 1 = 1.33339 loss)\nI0821 10:44:21.270819 31846 solver.cpp:228] Iteration 6500, loss = 0.3207\nI0821 10:44:21.270862 31846 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 10:44:21.270879 31846 solver.cpp:244]     Train net output #1: loss = 0.3207 (* 1 = 0.3207 loss)\nI0821 10:44:21.358140 31846 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0821 10:46:38.749052 31846 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 10:47:59.601143 31846 solver.cpp:404]     Test net output #0: accuracy = 0.67196\nI0821 10:47:59.601405 31846 solver.cpp:404]     Test net output #1: loss = 1.12644 (* 1 = 1.12644 loss)\nI0821 10:48:00.918992 31846 solver.cpp:228] Iteration 6600, loss = 0.393263\nI0821 10:48:00.919036 31846 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 10:48:00.919052 31846 solver.cpp:244]     Train net output #1: loss = 0.393263 (* 1 = 0.393263 loss)\nI0821 10:48:01.009848 31846 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0821 10:50:18.342353 31846 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 10:51:39.197244 31846 solver.cpp:404]     Test net output #0: accuracy = 0.38076\nI0821 10:51:39.197525 31846 solver.cpp:404]     Test net output #1: loss = 3.73136 (* 1 = 3.73136 loss)\nI0821 10:51:40.515069 31846 solver.cpp:228] Iteration 6700, loss = 0.442672\nI0821 10:51:40.515111 31846 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 10:51:40.515127 31846 solver.cpp:244]     Train net output #1: loss = 0.442672 (* 1 = 0.442672 loss)\nI0821 10:51:40.606645 31846 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0821 10:53:57.966657 31846 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 10:55:18.823829 31846 solver.cpp:404]     Test net output #0: accuracy = 0.62052\nI0821 10:55:18.824070 31846 solver.cpp:404]     Test net output #1: loss = 1.42122 (* 1 = 1.42122 loss)\nI0821 10:55:20.141516 31846 solver.cpp:228] Iteration 6800, loss = 0.419624\nI0821 10:55:20.141558 31846 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 10:55:20.141575 31846 solver.cpp:244]     Train net output #1: loss = 0.419624 (* 1 = 0.419624 loss)\nI0821 10:55:20.229658 31846 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0821 10:57:37.616963 31846 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 10:58:58.478147 31846 solver.cpp:404]     Test net output #0: accuracy = 0.70904\nI0821 10:58:58.478405 31846 solver.cpp:404]     Test net output #1: loss = 0.936406 (* 1 = 0.936406 loss)\nI0821 10:58:59.795053 31846 solver.cpp:228] Iteration 6900, loss = 0.377075\nI0821 10:58:59.795099 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 10:58:59.795114 31846 solver.cpp:244]     Train net output #1: loss = 0.377075 (* 1 = 0.377075 loss)\nI0821 10:58:59.878764 31846 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0821 11:01:17.275313 31846 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 11:02:38.142530 31846 solver.cpp:404]     Test net output #0: accuracy = 0.70544\nI0821 11:02:38.142805 31846 solver.cpp:404]     Test net output #1: loss = 0.940543 (* 1 = 0.940543 loss)\nI0821 11:02:39.459890 31846 solver.cpp:228] Iteration 7000, loss = 0.359011\nI0821 11:02:39.459936 31846 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 11:02:39.459952 31846 solver.cpp:244]     Train net output #1: loss = 0.359011 (* 1 = 0.359011 loss)\nI0821 11:02:39.551146 31846 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0821 11:04:56.787214 31846 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 11:06:17.654209 31846 solver.cpp:404]     Test net output #0: accuracy = 0.59192\nI0821 11:06:17.654469 31846 solver.cpp:404]     Test net output #1: loss = 1.34037 (* 1 = 1.34037 loss)\nI0821 11:06:18.971352 31846 solver.cpp:228] Iteration 7100, loss = 0.407871\nI0821 11:06:18.971396 31846 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 11:06:18.971412 31846 solver.cpp:244]     Train net output #1: loss = 0.407871 (* 1 = 0.407871 loss)\nI0821 11:06:19.053558 31846 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0821 11:08:36.345271 31846 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 11:09:57.199038 31846 solver.cpp:404]     Test net output #0: accuracy = 0.76048\nI0821 11:09:57.199307 31846 solver.cpp:404]     Test net output #1: loss = 0.759356 (* 1 = 0.759356 loss)\nI0821 11:09:58.515980 31846 solver.cpp:228] Iteration 7200, loss = 0.32346\nI0821 11:09:58.516021 31846 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 11:09:58.516037 31846 solver.cpp:244]     Train net output #1: loss = 0.32346 (* 1 = 0.32346 loss)\nI0821 11:09:58.603180 31846 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0821 11:12:15.850888 31846 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 11:13:36.725040 31846 solver.cpp:404]     Test net output #0: accuracy = 0.69556\nI0821 11:13:36.725313 31846 solver.cpp:404]     Test net output #1: loss = 0.956836 (* 1 = 0.956836 loss)\nI0821 11:13:38.044431 31846 solver.cpp:228] Iteration 7300, loss = 0.324204\nI0821 11:13:38.044482 31846 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 11:13:38.044498 31846 solver.cpp:244]     Train net output #1: loss = 0.324204 (* 1 = 0.324204 loss)\nI0821 11:13:38.143901 31846 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0821 11:15:55.511482 31846 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 11:17:16.384385 31846 solver.cpp:404]     Test net output #0: accuracy = 0.58468\nI0821 11:17:16.384655 31846 solver.cpp:404]     Test net output #1: loss = 1.70122 (* 1 = 1.70122 loss)\nI0821 11:17:17.703202 31846 solver.cpp:228] Iteration 7400, loss = 0.331235\nI0821 11:17:17.703248 31846 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 11:17:17.703263 31846 solver.cpp:244]     Train net output #1: loss = 0.331235 (* 1 = 0.331235 loss)\nI0821 11:17:17.785248 31846 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0821 11:19:35.107151 31846 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 11:20:55.983171 31846 solver.cpp:404]     Test net output #0: accuracy = 0.729\nI0821 11:20:55.983458 31846 solver.cpp:404]     Test net output #1: loss = 0.785826 (* 1 = 0.785826 loss)\nI0821 11:20:57.300626 31846 solver.cpp:228] Iteration 7500, loss = 0.295059\nI0821 11:20:57.300670 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 11:20:57.300686 31846 solver.cpp:244]     Train net output #1: loss = 0.295059 (* 1 = 0.295059 loss)\nI0821 11:20:57.381614 31846 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0821 11:23:14.724442 31846 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 11:24:35.593991 31846 solver.cpp:404]     Test net output #0: accuracy = 0.73432\nI0821 11:24:35.594265 31846 solver.cpp:404]     Test net output #1: loss = 0.867934 (* 1 = 0.867934 loss)\nI0821 11:24:36.912122 31846 solver.cpp:228] Iteration 7600, loss = 0.351157\nI0821 11:24:36.912166 31846 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 11:24:36.912184 31846 solver.cpp:244]     Train net output #1: loss = 0.351157 (* 1 = 0.351157 loss)\nI0821 11:24:36.992133 31846 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0821 11:26:54.299473 31846 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 11:28:15.164319 31846 solver.cpp:404]     Test net output #0: accuracy = 0.73432\nI0821 11:28:15.164575 31846 solver.cpp:404]     Test net output #1: loss = 0.807588 (* 1 = 0.807588 loss)\nI0821 11:28:16.481426 31846 solver.cpp:228] Iteration 7700, loss = 0.409717\nI0821 11:28:16.481477 31846 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 11:28:16.481494 31846 solver.cpp:244]     Train net output #1: loss = 0.409717 (* 1 = 0.409717 loss)\nI0821 11:28:16.562290 31846 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0821 11:30:33.848625 31846 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 11:31:54.715418 31846 solver.cpp:404]     Test net output #0: accuracy = 0.6244\nI0821 11:31:54.715662 31846 solver.cpp:404]     Test net output #1: loss = 1.48542 (* 1 = 1.48542 loss)\nI0821 11:31:56.032486 31846 solver.cpp:228] Iteration 7800, loss = 0.294159\nI0821 11:31:56.032531 31846 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 11:31:56.032546 31846 solver.cpp:244]     Train net output #1: loss = 0.294159 (* 1 = 0.294159 loss)\nI0821 11:31:56.114969 31846 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0821 11:34:13.417874 31846 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 11:35:34.298674 31846 solver.cpp:404]     Test net output #0: accuracy = 0.75204\nI0821 11:35:34.298869 31846 solver.cpp:404]     Test net output #1: loss = 0.826894 (* 1 = 0.826894 loss)\nI0821 11:35:35.616519 31846 solver.cpp:228] Iteration 7900, loss = 0.29312\nI0821 11:35:35.616565 31846 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 11:35:35.616580 31846 solver.cpp:244]     Train net output #1: loss = 0.29312 (* 1 = 0.29312 loss)\nI0821 11:35:35.697762 31846 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0821 11:37:53.048581 31846 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 11:39:13.916538 31846 solver.cpp:404]     Test net output #0: accuracy = 0.65216\nI0821 11:39:13.916752 31846 solver.cpp:404]     Test net output #1: loss = 1.61345 (* 1 = 1.61345 loss)\nI0821 11:39:15.235399 31846 solver.cpp:228] Iteration 8000, loss = 0.386227\nI0821 11:39:15.235450 31846 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 11:39:15.235466 31846 solver.cpp:244]     Train net output #1: loss = 0.386227 (* 1 = 0.386227 loss)\nI0821 11:39:15.315553 31846 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0821 11:41:32.679440 31846 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 11:42:54.133802 31846 solver.cpp:404]     Test net output #0: accuracy = 0.74192\nI0821 11:42:54.134109 31846 solver.cpp:404]     Test net output #1: loss = 0.91142 (* 1 = 0.91142 loss)\nI0821 11:42:55.453302 31846 solver.cpp:228] Iteration 8100, loss = 0.399053\nI0821 11:42:55.453351 31846 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 11:42:55.453367 31846 solver.cpp:244]     Train net output #1: loss = 0.399053 (* 1 = 0.399053 loss)\nI0821 11:42:55.544504 31846 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0821 11:45:12.899020 31846 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 11:46:33.740556 31846 solver.cpp:404]     Test net output #0: accuracy = 0.62144\nI0821 11:46:33.740766 31846 solver.cpp:404]     Test net output #1: loss = 1.52705 (* 1 = 1.52705 loss)\nI0821 11:46:35.057516 31846 solver.cpp:228] Iteration 8200, loss = 0.390988\nI0821 11:46:35.057566 31846 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 11:46:35.057582 31846 solver.cpp:244]     Train net output #1: loss = 0.390988 (* 1 = 0.390988 loss)\nI0821 11:46:35.145447 31846 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0821 11:48:52.513684 31846 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 11:50:13.353335 31846 solver.cpp:404]     Test net output #0: accuracy = 0.77528\nI0821 11:50:13.353544 31846 solver.cpp:404]     Test net output #1: loss = 0.6633 (* 1 = 0.6633 loss)\nI0821 11:50:14.670996 31846 solver.cpp:228] Iteration 8300, loss = 0.307522\nI0821 11:50:14.671042 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 11:50:14.671058 31846 solver.cpp:244]     Train net output #1: loss = 0.307522 (* 1 = 0.307522 loss)\nI0821 11:50:14.751746 31846 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0821 11:52:32.130095 31846 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 11:53:52.961551 31846 solver.cpp:404]     Test net output #0: accuracy = 0.77328\nI0821 11:53:52.961761 31846 solver.cpp:404]     Test net output #1: loss = 0.820061 (* 1 = 0.820061 loss)\nI0821 11:53:54.278348 31846 solver.cpp:228] Iteration 8400, loss = 0.282041\nI0821 11:53:54.278394 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 11:53:54.278410 31846 solver.cpp:244]     Train net output #1: loss = 0.282041 (* 1 = 0.282041 loss)\nI0821 11:53:54.368340 31846 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0821 11:56:11.732465 31846 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 11:57:32.569845 31846 solver.cpp:404]     Test net output #0: accuracy = 0.83472\nI0821 11:57:32.570087 31846 solver.cpp:404]     Test net output #1: loss = 0.531833 (* 1 = 0.531833 loss)\nI0821 11:57:33.888016 31846 solver.cpp:228] Iteration 8500, loss = 0.164278\nI0821 11:57:33.888061 31846 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 11:57:33.888075 31846 solver.cpp:244]     Train net output #1: loss = 0.164278 (* 1 = 0.164278 loss)\nI0821 11:57:33.978374 31846 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0821 11:59:51.418550 31846 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 12:01:12.248934 31846 solver.cpp:404]     Test net output #0: accuracy = 0.73868\nI0821 12:01:12.249145 31846 solver.cpp:404]     Test net output #1: loss = 1.03791 (* 1 = 1.03791 loss)\nI0821 12:01:13.566741 31846 solver.cpp:228] Iteration 8600, loss = 0.321147\nI0821 12:01:13.566787 31846 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 12:01:13.566802 31846 solver.cpp:244]     Train net output #1: loss = 0.321147 (* 1 = 0.321147 loss)\nI0821 12:01:13.648983 31846 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0821 12:03:31.021302 31846 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 12:04:51.845854 31846 solver.cpp:404]     Test net output #0: accuracy = 0.82216\nI0821 12:04:51.846069 31846 solver.cpp:404]     Test net output #1: loss = 0.554655 (* 1 = 0.554655 loss)\nI0821 12:04:53.160370 31846 solver.cpp:228] Iteration 8700, loss = 0.196918\nI0821 12:04:53.160413 31846 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 12:04:53.160430 31846 solver.cpp:244]     Train net output #1: loss = 0.196918 (* 1 = 0.196918 loss)\nI0821 12:04:53.242774 31846 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0821 12:07:10.604140 31846 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 12:08:31.439152 31846 solver.cpp:404]     Test net output #0: accuracy = 0.80908\nI0821 12:08:31.439366 31846 solver.cpp:404]     Test net output #1: loss = 0.643717 (* 1 = 0.643717 loss)\nI0821 12:08:32.757192 31846 solver.cpp:228] Iteration 8800, loss = 0.19928\nI0821 12:08:32.757236 31846 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 12:08:32.757251 31846 solver.cpp:244]     Train net output #1: loss = 0.19928 (* 1 = 0.19928 loss)\nI0821 12:08:32.839512 31846 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0821 12:10:49.703748 31846 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 12:12:10.532815 31846 solver.cpp:404]     Test net output #0: accuracy = 0.77236\nI0821 12:12:10.533063 31846 solver.cpp:404]     Test net output #1: loss = 0.818784 (* 1 = 0.818784 loss)\nI0821 12:12:11.850787 31846 solver.cpp:228] Iteration 8900, loss = 0.309974\nI0821 12:12:11.850831 31846 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 12:12:11.850847 31846 solver.cpp:244]     Train net output #1: loss = 0.309974 (* 1 = 0.309974 loss)\nI0821 12:12:11.931833 31846 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0821 12:14:28.784113 31846 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 12:15:49.616194 31846 solver.cpp:404]     Test net output #0: accuracy = 0.8408\nI0821 12:15:49.616457 31846 solver.cpp:404]     Test net output #1: loss = 0.520315 (* 1 = 0.520315 loss)\nI0821 12:15:50.933912 31846 solver.cpp:228] Iteration 9000, loss = 0.289625\nI0821 12:15:50.933955 31846 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 12:15:50.933970 31846 solver.cpp:244]     Train net output #1: loss = 0.289625 (* 1 = 0.289625 loss)\nI0821 12:15:51.017699 31846 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0821 12:18:07.873978 31846 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 12:19:28.712113 31846 solver.cpp:404]     Test net output #0: accuracy = 0.81228\nI0821 12:19:28.712378 31846 solver.cpp:404]     Test net output #1: loss = 0.621964 (* 1 = 0.621964 loss)\nI0821 12:19:30.030490 31846 solver.cpp:228] Iteration 9100, loss = 0.134752\nI0821 12:19:30.030537 31846 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 12:19:30.030553 31846 solver.cpp:244]     Train net output #1: loss = 0.134752 (* 1 = 0.134752 loss)\nI0821 12:19:30.120553 31846 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0821 12:21:46.919155 31846 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 12:23:07.761363 31846 solver.cpp:404]     Test net output #0: accuracy = 0.77516\nI0821 12:23:07.761620 31846 solver.cpp:404]     Test net output #1: loss = 0.956356 (* 1 = 0.956356 loss)\nI0821 12:23:09.079711 31846 solver.cpp:228] Iteration 9200, loss = 0.188586\nI0821 12:23:09.079753 31846 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 12:23:09.079769 31846 solver.cpp:244]     Train net output #1: loss = 0.188586 (* 1 = 0.188586 loss)\nI0821 12:23:09.157996 31846 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0821 12:25:26.013559 31846 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 12:26:46.856097 31846 solver.cpp:404]     Test net output #0: accuracy = 0.8414\nI0821 12:26:46.856355 31846 solver.cpp:404]     Test net output #1: loss = 0.532197 (* 1 = 0.532197 loss)\nI0821 12:26:48.174877 31846 solver.cpp:228] Iteration 9300, loss = 0.138431\nI0821 12:26:48.174921 31846 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 12:26:48.174935 31846 solver.cpp:244]     Train net output #1: loss = 0.138431 (* 1 = 0.138431 loss)\nI0821 12:26:48.266492 31846 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0821 12:29:05.123988 31846 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 12:30:25.969554 31846 solver.cpp:404]     Test net output #0: accuracy = 0.8682\nI0821 12:30:25.969815 31846 solver.cpp:404]     Test net output #1: loss = 0.453286 (* 1 = 0.453286 loss)\nI0821 12:30:27.286911 31846 solver.cpp:228] Iteration 9400, loss = 0.193742\nI0821 12:30:27.286954 31846 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 12:30:27.286970 31846 solver.cpp:244]     Train net output #1: loss = 0.193742 (* 1 = 0.193742 loss)\nI0821 12:30:27.375075 31846 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0821 12:32:44.165848 31846 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 12:34:05.016232 31846 solver.cpp:404]     Test net output #0: accuracy = 0.85848\nI0821 12:34:05.016495 31846 solver.cpp:404]     Test net output #1: loss = 0.475788 (* 1 = 0.475788 loss)\nI0821 12:34:06.333194 31846 solver.cpp:228] Iteration 9500, loss = 0.203437\nI0821 12:34:06.333238 31846 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 12:34:06.333254 31846 solver.cpp:244]     Train net output #1: loss = 0.203437 (* 1 = 0.203437 loss)\nI0821 12:34:06.426234 31846 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0821 12:36:23.314926 31846 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 12:37:44.170657 31846 solver.cpp:404]     Test net output #0: accuracy = 0.8228\nI0821 12:37:44.170905 31846 solver.cpp:404]     Test net output #1: loss = 0.685356 (* 1 = 0.685356 loss)\nI0821 12:37:45.488083 31846 solver.cpp:228] Iteration 9600, loss = 0.085502\nI0821 12:37:45.488129 31846 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:37:45.488145 31846 solver.cpp:244]     Train net output #1: loss = 0.0855019 (* 1 = 0.0855019 loss)\nI0821 12:37:45.576412 31846 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0821 12:40:02.541823 31846 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 12:41:23.400512 31846 solver.cpp:404]     Test net output #0: accuracy = 0.85912\nI0821 12:41:23.400765 31846 solver.cpp:404]     Test net output #1: loss = 0.582944 (* 1 = 0.582944 loss)\nI0821 12:41:24.717892 31846 solver.cpp:228] Iteration 9700, loss = 0.0533319\nI0821 12:41:24.717936 31846 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:41:24.717952 31846 solver.cpp:244]     Train net output #1: loss = 0.0533318 (* 1 = 0.0533318 loss)\nI0821 12:41:24.811650 31846 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0821 12:43:41.683724 31846 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 12:45:02.541559 31846 solver.cpp:404]     Test net output #0: accuracy = 0.88488\nI0821 12:45:02.541826 31846 solver.cpp:404]     Test net output #1: loss = 0.435675 (* 1 = 0.435675 loss)\nI0821 12:45:03.859344 31846 solver.cpp:228] Iteration 9800, loss = 0.0502184\nI0821 12:45:03.859387 31846 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:45:03.859403 31846 solver.cpp:244]     Train net output #1: loss = 0.0502183 (* 1 = 0.0502183 loss)\nI0821 12:45:03.941244 31846 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0821 12:47:20.816910 31846 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 12:48:41.682070 31846 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0821 12:48:41.682308 31846 solver.cpp:404]     Test net output #1: loss = 0.47024 (* 1 = 0.47024 loss)\nI0821 12:48:42.999045 31846 solver.cpp:228] Iteration 9900, loss = 0.0560819\nI0821 12:48:42.999089 31846 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:48:42.999104 31846 solver.cpp:244]     Train net output #1: loss = 0.0560818 (* 1 = 0.0560818 loss)\nI0821 12:48:43.091538 31846 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0821 12:50:59.950704 31846 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kMom95Fig11_iter_10000.caffemodel\nI0821 12:51:00.170464 31846 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kMom95Fig11_iter_10000.solverstate\nI0821 12:51:00.610749 31846 solver.cpp:317] Iteration 10000, loss = 0.0054841\nI0821 12:51:00.610795 31846 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 12:52:21.471532 31846 solver.cpp:404]     Test net output #0: accuracy = 0.90744\nI0821 12:52:21.471815 31846 solver.cpp:404]     Test net output #1: loss = 0.35665 (* 1 = 0.35665 loss)\nI0821 12:52:21.471828 31846 solver.cpp:322] Optimization Done.\nI0821 12:52:26.802824 31846 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kNestFig9",
    "content": "I0817 16:29:31.332793 17472 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:29:31.335319 17472 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:29:31.336544 17472 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:29:31.337769 17472 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:29:31.338990 17472 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:29:31.340473 17472 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:29:31.341703 17472 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:29:31.342931 17472 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:29:31.344167 17472 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:29:31.765518 17472 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kNestFig9\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\ntype: \"Nesterov\"\nmax_lr: 3\nI0817 16:29:31.769240 17472 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:29:31.787233 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:31.787308 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:31.788398 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:29:31.788453 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:29:31.788481 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:29:31.788501 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:29:31.788522 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:29:31.788539 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:29:31.788558 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:29:31.788575 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:29:31.788596 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:29:31.788614 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:29:31.788632 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:29:31.788648 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:29:31.788668 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:29:31.788686 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:29:31.788707 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:29:31.788724 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:29:31.788743 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:29:31.788760 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:29:31.788779 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:29:31.788796 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:29:31.788830 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:29:31.788848 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:29:31.788872 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:29:31.788890 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:29:31.788908 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:29:31.788923 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:29:31.788940 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:29:31.788955 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:29:31.788972 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:29:31.788991 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:29:31.789011 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:29:31.789028 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:29:31.789047 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:29:31.789063 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:29:31.789083 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:29:31.789100 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:29:31.789139 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:29:31.789157 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:29:31.789175 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:29:31.789194 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:29:31.789217 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:29:31.789234 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:29:31.789252 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:29:31.789269 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:29:31.789289 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:29:31.789306 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:29:31.789324 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:29:31.789340 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:29:31.789358 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:29:31.789376 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:29:31.789393 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:29:31.789422 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:29:31.789441 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:29:31.789459 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:29:31.789479 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:29:31.789494 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:29:31.791250 17472 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0817 16:29:31.793385 17472 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:29:31.794610 17472 net.cpp:100] Creating Layer dataLayer\nI0817 16:29:31.794697 17472 net.cpp:408] dataLayer -> data_top\nI0817 16:29:31.794879 17472 net.cpp:408] dataLayer -> label\nI0817 16:29:31.794998 17472 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:29:31.804155 17477 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:29:31.826515 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:31.834045 17472 net.cpp:150] Setting up dataLayer\nI0817 16:29:31.834116 17472 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:29:31.834132 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:31.834139 17472 net.cpp:165] Memory required for data: 1536500\nI0817 16:29:31.834156 17472 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:29:31.834170 17472 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:29:31.834178 17472 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:29:31.834197 17472 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:29:31.834213 17472 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:29:31.834357 17472 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:29:31.834372 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:31.834379 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:31.834384 17472 net.cpp:165] Memory required for data: 1537500\nI0817 16:29:31.834389 17472 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:29:31.834457 17472 net.cpp:100] Creating Layer pre_conv\nI0817 16:29:31.834470 17472 net.cpp:434] pre_conv <- data_top\nI0817 16:29:31.834482 17472 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:29:31.836349 17472 net.cpp:150] Setting up pre_conv\nI0817 16:29:31.836369 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.836374 17472 net.cpp:165] Memory required for data: 9729500\nI0817 16:29:31.836434 17472 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:29:31.836500 17472 net.cpp:100] Creating Layer pre_bn\nI0817 16:29:31.836511 17472 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:29:31.836521 17472 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:29:31.837172 17472 net.cpp:150] Setting up pre_bn\nI0817 16:29:31.837190 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.837196 17472 net.cpp:165] Memory required for data: 17921500\nI0817 16:29:31.837214 17472 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:29:31.837235 17478 blocking_queue.cpp:50] Waiting for data\nI0817 16:29:31.837266 17472 net.cpp:100] Creating Layer pre_scale\nI0817 16:29:31.837277 17472 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:29:31.837286 17472 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:29:31.837460 17472 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:29:31.837715 17472 net.cpp:150] Setting up pre_scale\nI0817 16:29:31.837730 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.837736 17472 net.cpp:165] Memory required for data: 26113500\nI0817 16:29:31.837754 17472 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:29:31.837796 17472 net.cpp:100] Creating Layer pre_relu\nI0817 16:29:31.837805 17472 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:29:31.837817 17472 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:29:31.837828 17472 net.cpp:150] Setting up pre_relu\nI0817 16:29:31.837836 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.837841 17472 net.cpp:165] Memory required for data: 34305500\nI0817 16:29:31.837846 17472 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:29:31.837852 17472 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:29:31.837857 17472 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:29:31.837867 17472 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:29:31.837878 17472 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:29:31.837923 17472 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:29:31.837934 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.837940 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.837945 17472 net.cpp:165] Memory required for data: 50689500\nI0817 16:29:31.837950 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:29:31.837965 17472 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:29:31.837971 17472 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:29:31.837980 17472 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:29:31.838312 17472 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:29:31.838327 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.838333 17472 net.cpp:165] Memory required for data: 58881500\nI0817 16:29:31.838346 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:29:31.838361 17472 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:29:31.838367 17472 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:29:31.838376 17472 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:29:31.838605 17472 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:29:31.838618 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.838624 17472 net.cpp:165] Memory required for data: 67073500\nI0817 16:29:31.838634 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:29:31.838645 17472 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:29:31.838651 17472 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:29:31.838660 17472 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.838712 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:29:31.838846 17472 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:29:31.838860 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.838865 17472 net.cpp:165] Memory required for data: 75265500\nI0817 16:29:31.838873 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:29:31.838889 17472 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:29:31.838896 17472 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:29:31.838906 17472 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.838915 17472 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:29:31.838922 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.838927 17472 net.cpp:165] Memory required for data: 83457500\nI0817 16:29:31.838932 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:29:31.838946 17472 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:29:31.838953 17472 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:29:31.838960 17472 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:29:31.839267 17472 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:29:31.839280 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.839285 17472 net.cpp:165] Memory required for data: 91649500\nI0817 16:29:31.839294 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:29:31.839308 17472 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:29:31.839313 17472 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:29:31.839321 17472 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:29:31.839546 17472 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:29:31.839560 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.839565 17472 net.cpp:165] Memory required for data: 99841500\nI0817 16:29:31.839578 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:29:31.839591 17472 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:29:31.839597 17472 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:29:31.839606 17472 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:29:31.839663 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:29:31.839805 17472 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:29:31.839818 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.839823 17472 net.cpp:165] Memory required for data: 108033500\nI0817 16:29:31.839831 17472 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:29:31.839882 17472 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:29:31.839893 17472 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:29:31.839901 17472 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:29:31.839912 17472 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:29:31.839985 17472 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:29:31.839999 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.840004 17472 net.cpp:165] Memory required for data: 116225500\nI0817 16:29:31.840010 17472 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:29:31.840018 17472 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:29:31.840025 17472 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:29:31.840035 17472 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:29:31.840045 17472 net.cpp:150] Setting up L1_b1_relu\nI0817 16:29:31.840052 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.840057 17472 net.cpp:165] Memory required for data: 124417500\nI0817 16:29:31.840062 17472 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:31.840071 17472 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:31.840076 17472 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:29:31.840083 17472 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:29:31.840093 17472 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:29:31.840142 17472 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:31.840155 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.840162 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.840173 17472 net.cpp:165] Memory required for data: 140801500\nI0817 16:29:31.840178 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:29:31.840190 17472 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:29:31.840196 17472 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:29:31.840209 17472 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:29:31.840507 17472 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:29:31.840520 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.840525 17472 net.cpp:165] Memory required for data: 148993500\nI0817 16:29:31.840544 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:29:31.840553 17472 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:29:31.840559 17472 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:29:31.840572 17472 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:29:31.840811 17472 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:29:31.840827 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.840832 17472 net.cpp:165] Memory required for data: 157185500\nI0817 16:29:31.840843 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:29:31.840852 17472 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:29:31.840857 17472 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:29:31.840865 17472 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.840915 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:29:31.841053 17472 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:29:31.841065 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.841070 17472 net.cpp:165] Memory required for data: 165377500\nI0817 16:29:31.841079 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:29:31.841094 17472 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:29:31.841099 17472 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:29:31.841116 17472 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.841127 17472 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:29:31.841135 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.841140 17472 net.cpp:165] Memory required for data: 173569500\nI0817 16:29:31.841143 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:29:31.841154 17472 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:29:31.841161 17472 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:29:31.841171 17472 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:29:31.841467 17472 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:29:31.841480 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.841485 17472 net.cpp:165] Memory required for data: 181761500\nI0817 16:29:31.841495 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:29:31.841503 17472 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:29:31.841509 17472 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:29:31.841519 17472 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:29:31.841751 17472 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:29:31.841764 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.841769 17472 net.cpp:165] Memory required for data: 189953500\nI0817 16:29:31.841787 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:29:31.841797 17472 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:29:31.841804 17472 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:29:31.841814 17472 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:29:31.841866 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:29:31.842000 17472 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:29:31.842012 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.842016 17472 net.cpp:165] Memory required for data: 198145500\nI0817 16:29:31.842025 17472 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:29:31.842044 17472 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:29:31.842051 17472 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:29:31.842057 17472 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:29:31.842066 17472 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:29:31.842098 17472 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:29:31.842118 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.842123 17472 net.cpp:165] Memory required for data: 206337500\nI0817 16:29:31.842128 17472 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:29:31.842136 17472 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:29:31.842142 17472 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:29:31.842149 17472 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:29:31.842157 17472 net.cpp:150] Setting up L1_b2_relu\nI0817 16:29:31.842164 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.842170 17472 net.cpp:165] Memory required for data: 214529500\nI0817 16:29:31.842173 17472 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:31.842180 17472 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:31.842185 17472 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:29:31.842196 17472 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:29:31.842206 17472 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:29:31.842247 17472 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:31.842258 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.842264 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.842269 17472 net.cpp:165] Memory required for data: 230913500\nI0817 16:29:31.842274 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:29:31.842288 17472 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:29:31.842294 17472 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:29:31.842303 17472 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:29:31.842605 17472 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:29:31.842619 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.842623 17472 net.cpp:165] Memory required for data: 239105500\nI0817 16:29:31.842633 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:29:31.842646 17472 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:29:31.842653 17472 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:29:31.842665 17472 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:29:31.842895 17472 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:29:31.842907 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.842912 17472 net.cpp:165] Memory required for data: 247297500\nI0817 16:29:31.842923 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:29:31.842931 17472 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:29:31.842937 17472 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:29:31.842947 17472 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.842998 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:29:31.843140 17472 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:29:31.843158 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.843163 17472 net.cpp:165] Memory required for data: 255489500\nI0817 16:29:31.843171 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:29:31.843179 17472 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:29:31.843185 17472 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:29:31.843192 17472 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.843201 17472 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:29:31.843215 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.843220 17472 net.cpp:165] Memory required for data: 263681500\nI0817 16:29:31.843225 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:29:31.843240 17472 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:29:31.843245 17472 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:29:31.843256 17472 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:29:31.843555 17472 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:29:31.843569 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.843575 17472 net.cpp:165] Memory required for data: 271873500\nI0817 16:29:31.843583 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:29:31.843600 17472 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:29:31.843606 17472 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:29:31.843614 17472 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:29:31.843840 17472 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:29:31.843852 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.843858 17472 net.cpp:165] Memory required for data: 280065500\nI0817 16:29:31.843868 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:29:31.843876 17472 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:29:31.843883 17472 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:29:31.843893 17472 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:29:31.843943 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:29:31.844079 17472 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:29:31.844094 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.844099 17472 net.cpp:165] Memory required for data: 288257500\nI0817 16:29:31.844120 17472 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:29:31.844130 17472 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:29:31.844136 17472 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:29:31.844142 17472 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:29:31.844151 17472 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:29:31.844183 17472 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:29:31.844203 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.844208 17472 net.cpp:165] Memory required for data: 296449500\nI0817 16:29:31.844213 17472 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:29:31.844223 17472 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:29:31.844230 17472 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:29:31.844238 17472 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:29:31.844246 17472 net.cpp:150] Setting up L1_b3_relu\nI0817 16:29:31.844254 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.844257 17472 net.cpp:165] Memory required for data: 304641500\nI0817 16:29:31.844262 17472 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:31.844272 17472 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:31.844277 17472 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:29:31.844285 17472 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:29:31.844295 17472 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:29:31.844336 17472 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:31.844349 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.844357 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.844360 17472 net.cpp:165] Memory required for data: 321025500\nI0817 16:29:31.844365 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:29:31.844377 17472 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:29:31.844383 17472 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:29:31.844398 17472 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:29:31.844703 17472 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:29:31.844717 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.844722 17472 net.cpp:165] Memory required for data: 329217500\nI0817 16:29:31.844732 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:29:31.844743 17472 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:29:31.844749 17472 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:29:31.844758 17472 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:29:31.844987 17472 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:29:31.845000 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.845005 17472 net.cpp:165] Memory required for data: 337409500\nI0817 16:29:31.845016 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:29:31.845024 17472 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:29:31.845031 17472 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:29:31.845041 17472 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.845091 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:29:31.845238 17472 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:29:31.845252 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.845257 17472 net.cpp:165] Memory required for data: 345601500\nI0817 16:29:31.845265 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:29:31.845274 17472 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:29:31.845279 17472 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:29:31.845286 17472 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.845295 17472 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:29:31.845302 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.845306 17472 net.cpp:165] Memory required for data: 353793500\nI0817 16:29:31.845311 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:29:31.845325 17472 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:29:31.845331 17472 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:29:31.845342 17472 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:29:31.845654 17472 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:29:31.845667 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.845672 17472 net.cpp:165] Memory required for data: 361985500\nI0817 16:29:31.845680 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:29:31.845692 17472 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:29:31.845698 17472 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:29:31.845710 17472 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:29:31.845947 17472 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:29:31.845959 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.845964 17472 net.cpp:165] Memory required for data: 370177500\nI0817 16:29:31.845974 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:29:31.845986 17472 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:29:31.845993 17472 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:29:31.845999 17472 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:29:31.846051 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:29:31.846194 17472 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:29:31.846207 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.846212 17472 net.cpp:165] Memory required for data: 378369500\nI0817 16:29:31.846221 17472 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:29:31.846230 17472 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:29:31.846236 17472 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:29:31.846242 17472 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:29:31.846253 17472 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:29:31.846290 17472 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:29:31.846303 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.846308 17472 net.cpp:165] Memory required for data: 386561500\nI0817 16:29:31.846313 17472 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:29:31.846320 17472 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:29:31.846326 17472 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:29:31.846333 17472 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:29:31.846343 17472 net.cpp:150] Setting up L1_b4_relu\nI0817 16:29:31.846349 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.846354 17472 net.cpp:165] Memory required for data: 394753500\nI0817 16:29:31.846359 17472 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:31.846369 17472 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:31.846375 17472 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:29:31.846382 17472 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:29:31.846392 17472 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:29:31.846436 17472 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:31.846447 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.846453 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.846458 17472 net.cpp:165] Memory required for data: 411137500\nI0817 16:29:31.846463 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:29:31.846474 17472 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:29:31.846480 17472 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:29:31.846491 17472 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:29:31.846799 17472 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:29:31.846813 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.846818 17472 net.cpp:165] Memory required for data: 419329500\nI0817 16:29:31.846837 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:29:31.846850 17472 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:29:31.846856 17472 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:29:31.846869 17472 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:29:31.847111 17472 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:29:31.847126 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.847131 17472 net.cpp:165] Memory required for data: 427521500\nI0817 16:29:31.847141 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:29:31.847149 17472 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:29:31.847157 17472 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:29:31.847167 17472 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.847216 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:29:31.847357 17472 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:29:31.847369 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.847374 17472 net.cpp:165] Memory required for data: 435713500\nI0817 16:29:31.847383 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:29:31.847391 17472 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:29:31.847398 17472 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:29:31.847407 17472 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.847417 17472 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:29:31.847424 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.847429 17472 net.cpp:165] Memory required for data: 443905500\nI0817 16:29:31.847434 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:29:31.847447 17472 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:29:31.847453 17472 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:29:31.847467 17472 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:29:31.847784 17472 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:29:31.847797 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.847802 17472 net.cpp:165] Memory required for data: 452097500\nI0817 16:29:31.847811 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:29:31.847820 17472 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:29:31.847829 17472 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:29:31.847838 17472 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:29:31.848067 17472 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:29:31.848079 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.848084 17472 net.cpp:165] Memory required for data: 460289500\nI0817 16:29:31.848094 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:29:31.848103 17472 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:29:31.848114 17472 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:29:31.848126 17472 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:29:31.848178 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:29:31.848314 17472 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:29:31.848330 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.848335 17472 net.cpp:165] Memory required for data: 468481500\nI0817 16:29:31.848345 17472 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:29:31.848353 17472 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:29:31.848359 17472 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:29:31.848366 17472 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:29:31.848373 17472 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:29:31.848405 17472 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:29:31.848415 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.848419 17472 net.cpp:165] Memory required for data: 476673500\nI0817 16:29:31.848424 17472 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:29:31.848435 17472 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:29:31.848441 17472 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:29:31.848448 17472 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:29:31.848457 17472 net.cpp:150] Setting up L1_b5_relu\nI0817 16:29:31.848464 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.848469 17472 net.cpp:165] Memory required for data: 484865500\nI0817 16:29:31.848474 17472 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:31.848482 17472 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:31.848489 17472 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:29:31.848495 17472 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:29:31.848505 17472 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:29:31.848546 17472 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:31.848559 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.848567 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.848572 17472 net.cpp:165] Memory required for data: 501249500\nI0817 16:29:31.848577 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:29:31.848587 17472 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:29:31.848592 17472 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:29:31.848601 17472 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:29:31.848904 17472 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:29:31.848917 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.848922 17472 net.cpp:165] Memory required for data: 509441500\nI0817 16:29:31.848938 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:29:31.848951 17472 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:29:31.848958 17472 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:29:31.848965 17472 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:29:31.849203 17472 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:29:31.849216 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.849221 17472 net.cpp:165] Memory required for data: 517633500\nI0817 16:29:31.849231 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:29:31.849241 17472 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:29:31.849246 17472 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:29:31.849256 17472 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.849308 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:29:31.849449 17472 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:29:31.849462 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.849467 17472 net.cpp:165] Memory required for data: 525825500\nI0817 16:29:31.849475 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:29:31.849483 17472 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:29:31.849489 17472 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:29:31.849496 17472 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.849505 17472 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:29:31.849512 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.849517 17472 net.cpp:165] Memory required for data: 534017500\nI0817 16:29:31.849521 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:29:31.849535 17472 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:29:31.849541 17472 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:29:31.849551 17472 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:29:31.849869 17472 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:29:31.849881 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.849886 17472 net.cpp:165] Memory required for data: 542209500\nI0817 16:29:31.849896 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:29:31.849907 17472 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:29:31.849915 17472 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:29:31.849925 17472 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:29:31.850165 17472 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:29:31.850178 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.850183 17472 net.cpp:165] Memory required for data: 550401500\nI0817 16:29:31.850193 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:29:31.850203 17472 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:29:31.850208 17472 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:29:31.850219 17472 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:29:31.850270 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:29:31.850407 17472 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:29:31.850420 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.850425 17472 net.cpp:165] Memory required for data: 558593500\nI0817 16:29:31.850435 17472 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:29:31.850453 17472 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:29:31.850459 17472 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:29:31.850466 17472 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:29:31.850474 17472 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:29:31.850508 17472 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:29:31.850519 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.850524 17472 net.cpp:165] Memory required for data: 566785500\nI0817 16:29:31.850529 17472 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:29:31.850545 17472 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:29:31.850551 17472 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:29:31.850561 17472 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:29:31.850570 17472 net.cpp:150] Setting up L1_b6_relu\nI0817 16:29:31.850577 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.850582 17472 net.cpp:165] Memory required for data: 574977500\nI0817 16:29:31.850586 17472 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:31.850594 17472 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:31.850599 17472 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:29:31.850606 17472 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:29:31.850615 17472 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:29:31.850661 17472 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:31.850672 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.850678 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.850682 17472 net.cpp:165] Memory required for data: 591361500\nI0817 16:29:31.850687 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:29:31.850698 17472 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:29:31.850704 17472 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:29:31.850715 17472 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:29:31.851023 17472 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:29:31.851037 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.851042 17472 net.cpp:165] Memory required for data: 599553500\nI0817 16:29:31.851050 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:29:31.851059 17472 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:29:31.851065 17472 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:29:31.851078 17472 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:29:31.851325 17472 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:29:31.851339 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.851344 17472 net.cpp:165] Memory required for data: 607745500\nI0817 16:29:31.851354 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:29:31.851366 17472 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:29:31.851372 17472 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:29:31.851380 17472 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.851431 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:29:31.851567 17472 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:29:31.851579 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.851584 17472 net.cpp:165] Memory required for data: 615937500\nI0817 16:29:31.851593 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:29:31.851603 17472 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:29:31.851608 17472 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:29:31.851617 17472 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.851627 17472 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:29:31.851634 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.851639 17472 net.cpp:165] Memory required for data: 624129500\nI0817 16:29:31.851644 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:29:31.851658 17472 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:29:31.851665 17472 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:29:31.851675 17472 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:29:31.851980 17472 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:29:31.851994 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.851999 17472 net.cpp:165] Memory required for data: 632321500\nI0817 16:29:31.852015 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:29:31.852025 17472 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:29:31.852030 17472 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:29:31.852038 17472 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:29:31.852280 17472 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:29:31.852293 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.852298 17472 net.cpp:165] Memory required for data: 640513500\nI0817 16:29:31.852308 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:29:31.852320 17472 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:29:31.852326 17472 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:29:31.852334 17472 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:29:31.852390 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:29:31.852530 17472 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:29:31.852543 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.852547 17472 net.cpp:165] Memory required for data: 648705500\nI0817 16:29:31.852556 17472 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:29:31.852566 17472 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:29:31.852571 17472 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:29:31.852578 17472 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:29:31.852591 17472 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:29:31.852625 17472 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:29:31.852638 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.852641 17472 net.cpp:165] Memory required for data: 656897500\nI0817 16:29:31.852648 17472 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:29:31.852655 17472 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:29:31.852661 17472 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:29:31.852669 17472 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:29:31.852679 17472 net.cpp:150] Setting up L1_b7_relu\nI0817 16:29:31.852687 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.852691 17472 net.cpp:165] Memory required for data: 665089500\nI0817 16:29:31.852696 17472 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:31.852704 17472 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:31.852708 17472 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:29:31.852715 17472 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:29:31.852725 17472 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:29:31.852769 17472 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:31.852780 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.852787 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.852792 17472 net.cpp:165] Memory required for data: 681473500\nI0817 16:29:31.852797 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:29:31.852808 17472 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:29:31.852813 17472 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:29:31.852825 17472 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:29:31.853142 17472 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:29:31.853155 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.853159 17472 net.cpp:165] Memory required for data: 689665500\nI0817 16:29:31.853168 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:29:31.853178 17472 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:29:31.853184 17472 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:29:31.853196 17472 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:29:31.853440 17472 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:29:31.853453 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.853458 17472 net.cpp:165] Memory required for data: 697857500\nI0817 16:29:31.853468 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:29:31.853480 17472 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:29:31.853487 17472 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:29:31.853494 17472 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.853545 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:29:31.853685 17472 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:29:31.853698 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.853703 17472 net.cpp:165] Memory required for data: 706049500\nI0817 16:29:31.853711 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:29:31.853719 17472 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:29:31.853725 17472 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:29:31.853735 17472 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.853744 17472 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:29:31.853751 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.853756 17472 net.cpp:165] Memory required for data: 714241500\nI0817 16:29:31.853761 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:29:31.853775 17472 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:29:31.853780 17472 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:29:31.853788 17472 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:29:31.854101 17472 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:29:31.854121 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.854126 17472 net.cpp:165] Memory required for data: 722433500\nI0817 16:29:31.854135 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:29:31.854147 17472 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:29:31.854153 17472 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:29:31.854161 17472 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:29:31.854406 17472 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:29:31.854419 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.854424 17472 net.cpp:165] Memory required for data: 730625500\nI0817 16:29:31.854434 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:29:31.854447 17472 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:29:31.854454 17472 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:29:31.854461 17472 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:29:31.854514 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:29:31.854653 17472 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:29:31.854665 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.854671 17472 net.cpp:165] Memory required for data: 738817500\nI0817 16:29:31.854679 17472 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:29:31.854688 17472 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:29:31.854694 17472 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:29:31.854701 17472 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:29:31.854712 17472 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:29:31.854743 17472 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:29:31.854755 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.854759 17472 net.cpp:165] Memory required for data: 747009500\nI0817 16:29:31.854764 17472 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:29:31.854773 17472 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:29:31.854779 17472 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:29:31.854785 17472 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:29:31.854794 17472 net.cpp:150] Setting up L1_b8_relu\nI0817 16:29:31.854807 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.854812 17472 net.cpp:165] Memory required for data: 755201500\nI0817 16:29:31.854817 17472 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:31.854827 17472 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:31.854832 17472 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:29:31.854840 17472 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:29:31.854849 17472 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:29:31.854895 17472 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:31.854907 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.854913 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.854918 17472 net.cpp:165] Memory required for data: 771585500\nI0817 16:29:31.854923 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:29:31.854934 17472 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:29:31.854940 17472 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:29:31.854951 17472 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:29:31.855283 17472 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:29:31.855298 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.855303 17472 net.cpp:165] Memory required for data: 779777500\nI0817 16:29:31.855311 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:29:31.855324 17472 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:29:31.855329 17472 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:29:31.855339 17472 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:29:31.855582 17472 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:29:31.855594 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.855599 17472 net.cpp:165] Memory required for data: 787969500\nI0817 16:29:31.855609 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:29:31.855618 17472 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:29:31.855624 17472 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:29:31.855631 17472 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:29:31.855686 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:29:31.855830 17472 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:29:31.855844 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.855849 17472 net.cpp:165] Memory required for data: 796161500\nI0817 16:29:31.855856 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:29:31.855865 17472 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:29:31.855871 17472 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:29:31.855881 17472 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:29:31.855891 17472 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:29:31.855898 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.855902 17472 net.cpp:165] Memory required for data: 804353500\nI0817 16:29:31.855907 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:29:31.855921 17472 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:29:31.855927 17472 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:29:31.855936 17472 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:29:31.856259 17472 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:29:31.856273 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.856278 17472 net.cpp:165] Memory required for data: 812545500\nI0817 16:29:31.856287 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:29:31.856297 17472 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:29:31.856302 17472 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:29:31.856313 17472 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:29:31.856560 17472 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:29:31.856575 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.856580 17472 net.cpp:165] Memory required for data: 820737500\nI0817 16:29:31.856611 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:29:31.856621 17472 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:29:31.856627 17472 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:29:31.856636 17472 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:29:31.856689 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:29:31.856833 17472 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:29:31.856845 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.856850 17472 net.cpp:165] Memory required for data: 828929500\nI0817 16:29:31.856859 17472 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:29:31.856868 17472 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:29:31.856874 17472 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:29:31.856881 17472 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:29:31.856892 17472 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:29:31.856923 17472 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:29:31.856932 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.856936 17472 net.cpp:165] Memory required for data: 837121500\nI0817 16:29:31.856941 17472 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:29:31.856950 17472 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:29:31.856954 17472 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:29:31.856966 17472 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:29:31.856974 17472 net.cpp:150] Setting up L1_b9_relu\nI0817 16:29:31.856981 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.856986 17472 net.cpp:165] Memory required for data: 845313500\nI0817 16:29:31.856990 17472 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:31.856997 17472 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:31.857002 17472 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:29:31.857014 17472 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:29:31.857024 17472 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:29:31.857069 17472 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:31.857080 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.857086 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.857091 17472 net.cpp:165] Memory required for data: 861697500\nI0817 16:29:31.857096 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:29:31.857112 17472 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:29:31.857120 17472 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:29:31.857131 17472 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:29:31.857455 17472 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:29:31.857468 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.857473 17472 net.cpp:165] Memory required for data: 863745500\nI0817 16:29:31.857482 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:29:31.857491 17472 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:29:31.857497 17472 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:29:31.857508 17472 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:29:31.857743 17472 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:29:31.857754 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.857759 17472 net.cpp:165] Memory required for data: 865793500\nI0817 16:29:31.857769 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:29:31.857785 17472 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:29:31.857798 17472 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:29:31.857806 17472 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.857861 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:29:31.858001 17472 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:29:31.858013 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.858018 17472 net.cpp:165] Memory required for data: 867841500\nI0817 16:29:31.858027 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:29:31.858038 17472 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:29:31.858044 17472 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:29:31.858052 17472 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.858062 17472 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:29:31.858068 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.858072 17472 net.cpp:165] Memory required for data: 869889500\nI0817 16:29:31.858078 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:29:31.858091 17472 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:29:31.858098 17472 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:29:31.858114 17472 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:29:31.858433 17472 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:29:31.858445 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.858450 17472 net.cpp:165] Memory required for data: 871937500\nI0817 16:29:31.858459 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:29:31.858469 17472 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:29:31.858475 17472 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:29:31.858486 17472 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:29:31.858729 17472 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:29:31.858741 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.858747 17472 net.cpp:165] Memory required for data: 873985500\nI0817 16:29:31.858757 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:29:31.858768 17472 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:29:31.858775 17472 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:29:31.858783 17472 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:29:31.858836 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:29:31.858979 17472 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:29:31.858991 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.858996 17472 net.cpp:165] Memory required for data: 876033500\nI0817 16:29:31.859005 17472 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:29:31.859020 17472 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:29:31.859026 17472 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:29:31.859035 17472 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:29:31.859129 17472 net.cpp:150] Setting up L2_b1_pool\nI0817 16:29:31.859144 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.859149 17472 net.cpp:165] Memory required for data: 878081500\nI0817 16:29:31.859155 17472 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:29:31.859164 17472 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:29:31.859171 17472 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:29:31.859177 17472 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:29:31.859189 17472 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:29:31.859221 17472 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:29:31.859231 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.859236 17472 net.cpp:165] Memory required for data: 880129500\nI0817 16:29:31.859241 17472 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:29:31.859248 17472 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:29:31.859254 17472 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:29:31.859268 17472 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:29:31.859278 17472 net.cpp:150] Setting up L2_b1_relu\nI0817 16:29:31.859285 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.859290 17472 net.cpp:165] Memory required for data: 882177500\nI0817 16:29:31.859294 17472 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:29:31.859345 17472 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:29:31.859359 17472 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:29:31.861726 17472 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:29:31.861744 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.861749 17472 net.cpp:165] Memory required for data: 884225500\nI0817 16:29:31.861755 17472 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:29:31.861766 17472 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:29:31.861773 17472 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:29:31.861779 17472 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:29:31.861790 17472 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:29:31.861866 17472 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:29:31.861881 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.861891 17472 net.cpp:165] Memory required for data: 888321500\nI0817 16:29:31.861896 17472 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:31.861904 17472 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:31.861910 17472 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:29:31.861918 17472 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:29:31.861928 17472 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:29:31.861979 17472 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:31.861989 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.861996 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.862001 17472 net.cpp:165] Memory required for data: 896513500\nI0817 16:29:31.862006 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:29:31.862022 17472 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:29:31.862030 17472 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:29:31.862038 17472 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:29:31.863502 17472 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:29:31.863520 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.863525 17472 net.cpp:165] Memory required for data: 900609500\nI0817 16:29:31.863535 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:29:31.863548 17472 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:29:31.863554 17472 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:29:31.863564 17472 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:29:31.863806 17472 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:29:31.863819 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.863824 17472 net.cpp:165] Memory required for data: 904705500\nI0817 16:29:31.863836 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:29:31.863847 17472 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:29:31.863854 17472 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:29:31.863862 17472 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.863916 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:29:31.864063 17472 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:29:31.864076 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.864081 17472 net.cpp:165] Memory required for data: 908801500\nI0817 16:29:31.864090 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:29:31.864101 17472 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:29:31.864114 17472 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:29:31.864122 17472 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.864142 17472 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:29:31.864151 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.864156 17472 net.cpp:165] Memory required for data: 912897500\nI0817 16:29:31.864161 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:29:31.864172 17472 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:29:31.864177 17472 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:29:31.864189 17472 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:29:31.864670 17472 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:29:31.864683 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.864688 17472 net.cpp:165] Memory required for data: 916993500\nI0817 16:29:31.864697 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:29:31.864707 17472 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:29:31.864713 17472 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:29:31.864724 17472 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:29:31.864967 17472 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:29:31.864980 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.864985 17472 net.cpp:165] Memory required for data: 921089500\nI0817 16:29:31.864996 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:29:31.865008 17472 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:29:31.865015 17472 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:29:31.865022 17472 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:29:31.865077 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:29:31.865226 17472 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:29:31.865241 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.865245 17472 net.cpp:165] Memory required for data: 925185500\nI0817 16:29:31.865254 17472 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:29:31.865267 17472 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:29:31.865273 17472 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:29:31.865280 17472 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:29:31.865288 17472 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:29:31.865317 17472 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:29:31.865327 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.865332 17472 net.cpp:165] Memory required for data: 929281500\nI0817 16:29:31.865337 17472 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:29:31.865345 17472 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:29:31.865350 17472 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:29:31.865360 17472 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:29:31.865370 17472 net.cpp:150] Setting up L2_b2_relu\nI0817 16:29:31.865377 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.865382 17472 net.cpp:165] Memory required for data: 933377500\nI0817 16:29:31.865386 17472 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:31.865394 17472 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:31.865399 17472 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:29:31.865406 17472 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:29:31.865417 17472 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:29:31.865464 17472 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:31.865476 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.865483 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.865487 17472 net.cpp:165] Memory required for data: 941569500\nI0817 16:29:31.865492 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:29:31.865510 17472 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:29:31.865516 17472 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:29:31.865528 17472 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:29:31.865986 17472 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:29:31.865999 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.866004 17472 net.cpp:165] Memory required for data: 945665500\nI0817 16:29:31.866014 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:29:31.866024 17472 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:29:31.866029 17472 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:29:31.866040 17472 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:29:31.866291 17472 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:29:31.866305 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.866310 17472 net.cpp:165] Memory required for data: 949761500\nI0817 16:29:31.866322 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:29:31.866333 17472 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:29:31.866339 17472 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:29:31.866348 17472 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.866403 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:29:31.866546 17472 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:29:31.866559 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.866564 17472 net.cpp:165] Memory required for data: 953857500\nI0817 16:29:31.866572 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:29:31.866583 17472 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:29:31.866590 17472 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:29:31.866597 17472 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.866606 17472 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:29:31.866613 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.866618 17472 net.cpp:165] Memory required for data: 957953500\nI0817 16:29:31.866623 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:29:31.866637 17472 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:29:31.866643 17472 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:29:31.866654 17472 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:29:31.867113 17472 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:29:31.867127 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.867132 17472 net.cpp:165] Memory required for data: 962049500\nI0817 16:29:31.867141 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:29:31.867151 17472 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:29:31.867156 17472 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:29:31.867167 17472 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:29:31.867414 17472 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:29:31.867427 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.867432 17472 net.cpp:165] Memory required for data: 966145500\nI0817 16:29:31.867442 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:29:31.867455 17472 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:29:31.867460 17472 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:29:31.867468 17472 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:29:31.867522 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:29:31.867668 17472 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:29:31.867681 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.867686 17472 net.cpp:165] Memory required for data: 970241500\nI0817 16:29:31.867694 17472 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:29:31.867708 17472 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:29:31.867715 17472 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:29:31.867729 17472 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:29:31.867738 17472 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:29:31.867764 17472 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:29:31.867779 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.867784 17472 net.cpp:165] Memory required for data: 974337500\nI0817 16:29:31.867789 17472 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:29:31.867808 17472 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:29:31.867815 17472 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:29:31.867823 17472 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:29:31.867832 17472 net.cpp:150] Setting up L2_b3_relu\nI0817 16:29:31.867839 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.867844 17472 net.cpp:165] Memory required for data: 978433500\nI0817 16:29:31.867849 17472 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:31.867856 17472 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:31.867861 17472 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:29:31.867871 17472 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:29:31.867882 17472 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:29:31.867928 17472 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:31.867941 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.867949 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.867954 17472 net.cpp:165] Memory required for data: 986625500\nI0817 16:29:31.867959 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:29:31.867969 17472 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:29:31.867975 17472 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:29:31.867985 17472 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:29:31.868445 17472 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:29:31.868459 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.868464 17472 net.cpp:165] Memory required for data: 990721500\nI0817 16:29:31.868474 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:29:31.868485 17472 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:29:31.868491 17472 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:29:31.868500 17472 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:29:31.868742 17472 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:29:31.868755 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.868760 17472 net.cpp:165] Memory required for data: 994817500\nI0817 16:29:31.868770 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:29:31.868779 17472 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:29:31.868785 17472 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:29:31.868795 17472 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.868850 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:29:31.868996 17472 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:29:31.869009 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.869014 17472 net.cpp:165] Memory required for data: 998913500\nI0817 16:29:31.869022 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:29:31.869031 17472 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:29:31.869037 17472 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:29:31.869047 17472 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.869057 17472 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:29:31.869065 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.869068 17472 net.cpp:165] Memory required for data: 1003009500\nI0817 16:29:31.869073 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:29:31.869094 17472 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:29:31.869101 17472 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:29:31.869114 17472 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:29:31.869576 17472 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:29:31.869590 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.869596 17472 net.cpp:165] Memory required for data: 1007105500\nI0817 16:29:31.869604 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:29:31.869616 17472 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:29:31.869622 17472 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:29:31.869630 17472 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:29:31.869869 17472 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:29:31.869882 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.869887 17472 net.cpp:165] Memory required for data: 1011201500\nI0817 16:29:31.869897 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:29:31.869905 17472 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:29:31.869911 17472 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:29:31.869920 17472 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:29:31.869974 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:29:31.870122 17472 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:29:31.870139 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.870144 17472 net.cpp:165] Memory required for data: 1015297500\nI0817 16:29:31.870153 17472 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:29:31.870162 17472 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:29:31.870168 17472 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:29:31.870175 17472 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:29:31.870183 17472 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:29:31.870213 17472 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:29:31.870223 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.870226 17472 net.cpp:165] Memory required for data: 1019393500\nI0817 16:29:31.870231 17472 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:29:31.870239 17472 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:29:31.870245 17472 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:29:31.870254 17472 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:29:31.870263 17472 net.cpp:150] Setting up L2_b4_relu\nI0817 16:29:31.870270 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.870275 17472 net.cpp:165] Memory required for data: 1023489500\nI0817 16:29:31.870280 17472 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:31.870286 17472 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:31.870291 17472 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:29:31.870301 17472 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:29:31.870312 17472 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:29:31.870355 17472 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:31.870367 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.870373 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.870378 17472 net.cpp:165] Memory required for data: 1031681500\nI0817 16:29:31.870383 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:29:31.870396 17472 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:29:31.870402 17472 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:29:31.870411 17472 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:29:31.870867 17472 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:29:31.870887 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.870893 17472 net.cpp:165] Memory required for data: 1035777500\nI0817 16:29:31.870901 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:29:31.870913 17472 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:29:31.870919 17472 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:29:31.870928 17472 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:29:31.871186 17472 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:29:31.871201 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.871206 17472 net.cpp:165] Memory required for data: 1039873500\nI0817 16:29:31.871215 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:29:31.871224 17472 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:29:31.871230 17472 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:29:31.871242 17472 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.871296 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:29:31.871443 17472 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:29:31.871455 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.871460 17472 net.cpp:165] Memory required for data: 1043969500\nI0817 16:29:31.871469 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:29:31.871477 17472 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:29:31.871484 17472 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:29:31.871490 17472 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.871502 17472 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:29:31.871510 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.871515 17472 net.cpp:165] Memory required for data: 1048065500\nI0817 16:29:31.871520 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:29:31.871529 17472 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:29:31.871538 17472 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:29:31.871546 17472 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:29:31.872005 17472 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:29:31.872020 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.872025 17472 net.cpp:165] Memory required for data: 1052161500\nI0817 16:29:31.872033 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:29:31.872045 17472 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:29:31.872051 17472 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:29:31.872062 17472 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:29:31.872309 17472 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:29:31.872323 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.872328 17472 net.cpp:165] Memory required for data: 1056257500\nI0817 16:29:31.872337 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:29:31.872346 17472 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:29:31.872352 17472 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:29:31.872359 17472 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:29:31.872416 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:29:31.872561 17472 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:29:31.872577 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.872582 17472 net.cpp:165] Memory required for data: 1060353500\nI0817 16:29:31.872591 17472 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:29:31.872601 17472 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:29:31.872606 17472 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:29:31.872613 17472 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:29:31.872620 17472 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:29:31.872650 17472 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:29:31.872659 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.872670 17472 net.cpp:165] Memory required for data: 1064449500\nI0817 16:29:31.872675 17472 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:29:31.872684 17472 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:29:31.872689 17472 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:29:31.872702 17472 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:29:31.872712 17472 net.cpp:150] Setting up L2_b5_relu\nI0817 16:29:31.872720 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.872725 17472 net.cpp:165] Memory required for data: 1068545500\nI0817 16:29:31.872728 17472 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:31.872735 17472 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:31.872741 17472 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:29:31.872751 17472 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:29:31.872761 17472 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:29:31.872805 17472 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:31.872817 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.872822 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.872826 17472 net.cpp:165] Memory required for data: 1076737500\nI0817 16:29:31.872833 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:29:31.872846 17472 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:29:31.872853 17472 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:29:31.872861 17472 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:29:31.873337 17472 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:29:31.873353 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.873358 17472 net.cpp:165] Memory required for data: 1080833500\nI0817 16:29:31.873366 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:29:31.873378 17472 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:29:31.873384 17472 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:29:31.873392 17472 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:29:31.873633 17472 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:29:31.873646 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.873651 17472 net.cpp:165] Memory required for data: 1084929500\nI0817 16:29:31.873661 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:29:31.873670 17472 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:29:31.873677 17472 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:29:31.873683 17472 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.873739 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:29:31.873881 17472 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:29:31.873896 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.873901 17472 net.cpp:165] Memory required for data: 1089025500\nI0817 16:29:31.873909 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:29:31.873919 17472 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:29:31.873924 17472 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:29:31.873931 17472 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.873940 17472 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:29:31.873947 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.873951 17472 net.cpp:165] Memory required for data: 1093121500\nI0817 16:29:31.873956 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:29:31.873970 17472 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:29:31.873976 17472 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:29:31.873987 17472 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:29:31.874451 17472 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:29:31.874471 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.874477 17472 net.cpp:165] Memory required for data: 1097217500\nI0817 16:29:31.874485 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:29:31.874497 17472 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:29:31.874505 17472 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:29:31.874516 17472 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:29:31.874759 17472 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:29:31.874771 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.874776 17472 net.cpp:165] Memory required for data: 1101313500\nI0817 16:29:31.874786 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:29:31.874795 17472 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:29:31.874801 17472 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:29:31.874809 17472 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:29:31.874866 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:29:31.875010 17472 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:29:31.875023 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.875027 17472 net.cpp:165] Memory required for data: 1105409500\nI0817 16:29:31.875036 17472 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:29:31.875048 17472 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:29:31.875056 17472 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:29:31.875062 17472 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:29:31.875069 17472 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:29:31.875095 17472 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:29:31.875104 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.875115 17472 net.cpp:165] Memory required for data: 1109505500\nI0817 16:29:31.875121 17472 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:29:31.875133 17472 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:29:31.875138 17472 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:29:31.875149 17472 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:29:31.875157 17472 net.cpp:150] Setting up L2_b6_relu\nI0817 16:29:31.875165 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.875169 17472 net.cpp:165] Memory required for data: 1113601500\nI0817 16:29:31.875174 17472 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:31.875181 17472 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:31.875186 17472 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:29:31.875193 17472 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:29:31.875203 17472 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:29:31.875250 17472 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:31.875262 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.875268 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.875273 17472 net.cpp:165] Memory required for data: 1121793500\nI0817 16:29:31.875277 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:29:31.875291 17472 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:29:31.875298 17472 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:29:31.875308 17472 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:29:31.875767 17472 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:29:31.875782 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.875787 17472 net.cpp:165] Memory required for data: 1125889500\nI0817 16:29:31.875794 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:29:31.875807 17472 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:29:31.875819 17472 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:29:31.875833 17472 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:29:31.876076 17472 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:29:31.876090 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.876094 17472 net.cpp:165] Memory required for data: 1129985500\nI0817 16:29:31.876106 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:29:31.876121 17472 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:29:31.876127 17472 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:29:31.876134 17472 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.876204 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:29:31.876348 17472 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:29:31.876363 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.876368 17472 net.cpp:165] Memory required for data: 1134081500\nI0817 16:29:31.876377 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:29:31.876385 17472 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:29:31.876391 17472 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:29:31.876399 17472 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.876408 17472 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:29:31.876415 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.876420 17472 net.cpp:165] Memory required for data: 1138177500\nI0817 16:29:31.876425 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:29:31.876440 17472 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:29:31.876446 17472 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:29:31.876457 17472 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:29:31.876920 17472 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:29:31.876935 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.876940 17472 net.cpp:165] Memory required for data: 1142273500\nI0817 16:29:31.876948 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:29:31.876960 17472 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:29:31.876966 17472 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:29:31.876977 17472 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:29:31.877235 17472 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:29:31.877249 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.877254 17472 net.cpp:165] Memory required for data: 1146369500\nI0817 16:29:31.877264 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:29:31.877274 17472 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:29:31.877279 17472 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:29:31.877286 17472 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:29:31.877344 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:29:31.877490 17472 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:29:31.877501 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.877506 17472 net.cpp:165] Memory required for data: 1150465500\nI0817 16:29:31.877514 17472 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:29:31.877526 17472 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:29:31.877533 17472 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:29:31.877539 17472 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:29:31.877547 17472 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:29:31.877573 17472 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:29:31.877583 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.877588 17472 net.cpp:165] Memory required for data: 1154561500\nI0817 16:29:31.877593 17472 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:29:31.877604 17472 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:29:31.877609 17472 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:29:31.877622 17472 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:29:31.877632 17472 net.cpp:150] Setting up L2_b7_relu\nI0817 16:29:31.877640 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.877645 17472 net.cpp:165] Memory required for data: 1158657500\nI0817 16:29:31.877650 17472 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:31.877656 17472 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:31.877661 17472 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:29:31.877668 17472 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:29:31.877678 17472 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:29:31.877727 17472 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:31.877739 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.877745 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.877750 17472 net.cpp:165] Memory required for data: 1166849500\nI0817 16:29:31.877755 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:29:31.877771 17472 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:29:31.877777 17472 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:29:31.877786 17472 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:29:31.878288 17472 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:29:31.878306 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.878311 17472 net.cpp:165] Memory required for data: 1170945500\nI0817 16:29:31.878320 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:29:31.878332 17472 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:29:31.878340 17472 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:29:31.878350 17472 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:29:31.878595 17472 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:29:31.878608 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.878613 17472 net.cpp:165] Memory required for data: 1175041500\nI0817 16:29:31.878623 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:29:31.878633 17472 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:29:31.878638 17472 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:29:31.878645 17472 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.878703 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:29:31.878854 17472 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:29:31.878865 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.878870 17472 net.cpp:165] Memory required for data: 1179137500\nI0817 16:29:31.878880 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:29:31.878890 17472 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:29:31.878896 17472 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:29:31.878903 17472 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.878913 17472 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:29:31.878921 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.878924 17472 net.cpp:165] Memory required for data: 1183233500\nI0817 16:29:31.878929 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:29:31.878943 17472 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:29:31.878949 17472 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:29:31.878960 17472 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:29:31.879432 17472 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:29:31.879446 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.879451 17472 net.cpp:165] Memory required for data: 1187329500\nI0817 16:29:31.879461 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:29:31.879472 17472 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:29:31.879487 17472 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:29:31.879498 17472 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:29:31.879751 17472 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:29:31.879767 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.879772 17472 net.cpp:165] Memory required for data: 1191425500\nI0817 16:29:31.879782 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:29:31.879791 17472 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:29:31.879797 17472 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:29:31.879804 17472 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:29:31.879859 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:29:31.880007 17472 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:29:31.880020 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.880025 17472 net.cpp:165] Memory required for data: 1195521500\nI0817 16:29:31.880033 17472 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:29:31.880043 17472 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:29:31.880048 17472 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:29:31.880059 17472 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:29:31.880066 17472 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:29:31.880093 17472 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:29:31.880102 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.880112 17472 net.cpp:165] Memory required for data: 1199617500\nI0817 16:29:31.880118 17472 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:29:31.880129 17472 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:29:31.880136 17472 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:29:31.880142 17472 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:29:31.880152 17472 net.cpp:150] Setting up L2_b8_relu\nI0817 16:29:31.880159 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.880163 17472 net.cpp:165] Memory required for data: 1203713500\nI0817 16:29:31.880168 17472 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:31.880175 17472 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:31.880182 17472 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:29:31.880188 17472 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:29:31.880210 17472 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:29:31.880260 17472 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:31.880273 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.880280 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.880285 17472 net.cpp:165] Memory required for data: 1211905500\nI0817 16:29:31.880290 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:29:31.880303 17472 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:29:31.880309 17472 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:29:31.880318 17472 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:29:31.880785 17472 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:29:31.880800 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.880805 17472 net.cpp:165] Memory required for data: 1216001500\nI0817 16:29:31.880813 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:29:31.880825 17472 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:29:31.880832 17472 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:29:31.880841 17472 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:29:31.881088 17472 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:29:31.881103 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.881120 17472 net.cpp:165] Memory required for data: 1220097500\nI0817 16:29:31.881131 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:29:31.881140 17472 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:29:31.881147 17472 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:29:31.881155 17472 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:29:31.881213 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:29:31.881371 17472 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:29:31.881383 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.881388 17472 net.cpp:165] Memory required for data: 1224193500\nI0817 16:29:31.881397 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:29:31.881405 17472 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:29:31.881412 17472 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:29:31.881422 17472 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:29:31.881431 17472 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:29:31.881438 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.881443 17472 net.cpp:165] Memory required for data: 1228289500\nI0817 16:29:31.881448 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:29:31.881461 17472 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:29:31.881467 17472 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:29:31.881476 17472 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:29:31.881933 17472 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:29:31.881947 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.881953 17472 net.cpp:165] Memory required for data: 1232385500\nI0817 16:29:31.881960 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:29:31.881973 17472 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:29:31.881978 17472 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:29:31.881988 17472 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:29:31.882246 17472 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:29:31.882258 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.882263 17472 net.cpp:165] Memory required for data: 1236481500\nI0817 16:29:31.882308 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:29:31.882320 17472 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:29:31.882326 17472 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:29:31.882334 17472 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:29:31.882393 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:29:31.882544 17472 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:29:31.882556 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.882561 17472 net.cpp:165] Memory required for data: 1240577500\nI0817 16:29:31.882570 17472 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:29:31.882580 17472 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:29:31.882586 17472 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:29:31.882593 17472 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:29:31.882603 17472 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:29:31.882632 17472 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:29:31.882640 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.882645 17472 net.cpp:165] Memory required for data: 1244673500\nI0817 16:29:31.882650 17472 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:29:31.882661 17472 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:29:31.882668 17472 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:29:31.882674 17472 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:29:31.882683 17472 net.cpp:150] Setting up L2_b9_relu\nI0817 16:29:31.882690 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.882695 17472 net.cpp:165] Memory required for data: 1248769500\nI0817 16:29:31.882709 17472 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:31.882716 17472 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:31.882722 17472 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:29:31.882737 17472 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:29:31.882748 17472 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:29:31.882794 17472 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:31.882809 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.882817 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.882820 17472 net.cpp:165] Memory required for data: 1256961500\nI0817 16:29:31.882825 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:29:31.882836 17472 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:29:31.882843 17472 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:29:31.882851 17472 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:29:31.883333 17472 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:29:31.883348 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.883353 17472 net.cpp:165] Memory required for data: 1257985500\nI0817 16:29:31.883361 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:29:31.883374 17472 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:29:31.883380 17472 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:29:31.883388 17472 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:29:31.883648 17472 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:29:31.883661 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.883666 17472 net.cpp:165] Memory required for data: 1259009500\nI0817 16:29:31.883677 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:29:31.883688 17472 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:29:31.883694 17472 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:29:31.883702 17472 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.883759 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:29:31.883911 17472 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:29:31.883924 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.883929 17472 net.cpp:165] Memory required for data: 1260033500\nI0817 16:29:31.883939 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:29:31.883949 17472 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:29:31.883955 17472 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:29:31.883965 17472 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.883975 17472 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:29:31.883982 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.883987 17472 net.cpp:165] Memory required for data: 1261057500\nI0817 16:29:31.883991 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:29:31.884002 17472 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:29:31.884008 17472 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:29:31.884019 17472 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:29:31.884491 17472 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:29:31.884505 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.884510 17472 net.cpp:165] Memory required for data: 1262081500\nI0817 16:29:31.884519 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:29:31.884531 17472 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:29:31.884538 17472 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:29:31.884546 17472 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:29:31.884807 17472 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:29:31.884820 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.884831 17472 net.cpp:165] Memory required for data: 1263105500\nI0817 16:29:31.884841 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:29:31.884850 17472 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:29:31.884856 17472 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:29:31.884863 17472 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:29:31.884922 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:29:31.885076 17472 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:29:31.885092 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.885097 17472 net.cpp:165] Memory required for data: 1264129500\nI0817 16:29:31.885107 17472 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:29:31.885121 17472 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:29:31.885128 17472 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:29:31.885136 17472 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:29:31.885174 17472 net.cpp:150] Setting up L3_b1_pool\nI0817 16:29:31.885185 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.885188 17472 net.cpp:165] Memory required for data: 1265153500\nI0817 16:29:31.885202 17472 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:29:31.885211 17472 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:29:31.885217 17472 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:29:31.885223 17472 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:29:31.885231 17472 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:29:31.885265 17472 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:29:31.885277 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.885282 17472 net.cpp:165] Memory required for data: 1266177500\nI0817 16:29:31.885295 17472 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:29:31.885303 17472 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:29:31.885308 17472 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:29:31.885315 17472 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:29:31.885324 17472 net.cpp:150] Setting up L3_b1_relu\nI0817 16:29:31.885331 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.885335 17472 net.cpp:165] Memory required for data: 1267201500\nI0817 16:29:31.885340 17472 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:29:31.885349 17472 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:29:31.885360 17472 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:29:31.886605 17472 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:29:31.886623 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:31.886629 17472 net.cpp:165] Memory required for data: 1268225500\nI0817 16:29:31.886634 17472 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:29:31.886646 17472 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:29:31.886653 17472 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:29:31.886660 17472 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:29:31.886668 17472 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:29:31.886710 17472 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:29:31.886723 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.886728 17472 net.cpp:165] Memory required for data: 1270273500\nI0817 16:29:31.886732 17472 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:31.886740 17472 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:31.886746 17472 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:29:31.886756 17472 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:29:31.886766 17472 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:29:31.886813 17472 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:31.886831 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.886837 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.886848 17472 net.cpp:165] Memory required for data: 1274369500\nI0817 16:29:31.886854 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:29:31.886868 17472 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:29:31.886875 17472 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:29:31.886885 17472 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:29:31.888871 17472 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:29:31.888891 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.888897 17472 net.cpp:165] Memory required for data: 1276417500\nI0817 16:29:31.888906 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:29:31.888916 17472 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:29:31.888923 17472 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:29:31.888934 17472 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:29:31.889202 17472 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:29:31.889216 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.889221 17472 net.cpp:165] Memory required for data: 1278465500\nI0817 16:29:31.889231 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:29:31.889243 17472 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:29:31.889250 17472 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:29:31.889258 17472 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.889315 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:29:31.889473 17472 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:29:31.889487 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.889492 17472 net.cpp:165] Memory required for data: 1280513500\nI0817 16:29:31.889500 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:29:31.889509 17472 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:29:31.889515 17472 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:29:31.889525 17472 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.889535 17472 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:29:31.889544 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.889547 17472 net.cpp:165] Memory required for data: 1282561500\nI0817 16:29:31.889552 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:29:31.889566 17472 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:29:31.889574 17472 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:29:31.889582 17472 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:29:31.890609 17472 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:29:31.890624 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.890628 17472 net.cpp:165] Memory required for data: 1284609500\nI0817 16:29:31.890637 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:29:31.890650 17472 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:29:31.890656 17472 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:29:31.890664 17472 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:29:31.890926 17472 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:29:31.890939 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.890944 17472 net.cpp:165] Memory required for data: 1286657500\nI0817 16:29:31.890954 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:29:31.890964 17472 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:29:31.890970 17472 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:29:31.890977 17472 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:29:31.891036 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:29:31.891198 17472 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:29:31.891211 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.891216 17472 net.cpp:165] Memory required for data: 1288705500\nI0817 16:29:31.891225 17472 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:29:31.891242 17472 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:29:31.891249 17472 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:29:31.891257 17472 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:29:31.891268 17472 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:29:31.891302 17472 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:29:31.891317 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.891322 17472 net.cpp:165] Memory required for data: 1290753500\nI0817 16:29:31.891327 17472 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:29:31.891335 17472 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:29:31.891341 17472 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:29:31.891348 17472 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:29:31.891357 17472 net.cpp:150] Setting up L3_b2_relu\nI0817 16:29:31.891364 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.891369 17472 net.cpp:165] Memory required for data: 1292801500\nI0817 16:29:31.891373 17472 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:31.891383 17472 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:31.891389 17472 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:29:31.891397 17472 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:29:31.891407 17472 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:29:31.891456 17472 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:31.891469 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.891475 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.891479 17472 net.cpp:165] Memory required for data: 1296897500\nI0817 16:29:31.891484 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:29:31.891496 17472 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:29:31.891502 17472 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:29:31.891515 17472 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:29:31.892570 17472 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:29:31.892585 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.892590 17472 net.cpp:165] Memory required for data: 1298945500\nI0817 16:29:31.892599 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:29:31.892609 17472 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:29:31.892616 17472 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:29:31.892627 17472 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:29:31.892894 17472 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:29:31.892911 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.892916 17472 net.cpp:165] Memory required for data: 1300993500\nI0817 16:29:31.892927 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:29:31.892936 17472 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:29:31.892942 17472 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:29:31.892951 17472 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.893009 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:29:31.893173 17472 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:29:31.893187 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.893193 17472 net.cpp:165] Memory required for data: 1303041500\nI0817 16:29:31.893201 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:29:31.893209 17472 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:29:31.893215 17472 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:29:31.893225 17472 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.893236 17472 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:29:31.893244 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.893254 17472 net.cpp:165] Memory required for data: 1305089500\nI0817 16:29:31.893260 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:29:31.893275 17472 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:29:31.893280 17472 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:29:31.893290 17472 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:29:31.894304 17472 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:29:31.894317 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.894322 17472 net.cpp:165] Memory required for data: 1307137500\nI0817 16:29:31.894331 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:29:31.894343 17472 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:29:31.894351 17472 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:29:31.894362 17472 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:29:31.894624 17472 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:29:31.894636 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.894641 17472 net.cpp:165] Memory required for data: 1309185500\nI0817 16:29:31.894651 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:29:31.894660 17472 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:29:31.894666 17472 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:29:31.894675 17472 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:29:31.894734 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:29:31.894892 17472 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:29:31.894906 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.894911 17472 net.cpp:165] Memory required for data: 1311233500\nI0817 16:29:31.894919 17472 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:29:31.894928 17472 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:29:31.894934 17472 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:29:31.894942 17472 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:29:31.894953 17472 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:29:31.894986 17472 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:29:31.895001 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.895006 17472 net.cpp:165] Memory required for data: 1313281500\nI0817 16:29:31.895011 17472 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:29:31.895020 17472 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:29:31.895025 17472 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:29:31.895032 17472 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:29:31.895041 17472 net.cpp:150] Setting up L3_b3_relu\nI0817 16:29:31.895048 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.895052 17472 net.cpp:165] Memory required for data: 1315329500\nI0817 16:29:31.895057 17472 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:31.895067 17472 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:31.895072 17472 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:29:31.895081 17472 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:29:31.895089 17472 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:29:31.895145 17472 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:31.895157 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.895164 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.895169 17472 net.cpp:165] Memory required for data: 1319425500\nI0817 16:29:31.895174 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:29:31.895185 17472 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:29:31.895191 17472 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:29:31.895203 17472 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:29:31.896230 17472 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:29:31.896245 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.896250 17472 net.cpp:165] Memory required for data: 1321473500\nI0817 16:29:31.896258 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:29:31.896268 17472 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:29:31.896275 17472 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:29:31.896286 17472 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:29:31.896559 17472 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:29:31.896574 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.896579 17472 net.cpp:165] Memory required for data: 1323521500\nI0817 16:29:31.896589 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:29:31.896597 17472 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:29:31.896603 17472 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:29:31.896611 17472 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.896669 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:29:31.896826 17472 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:29:31.896838 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.896843 17472 net.cpp:165] Memory required for data: 1325569500\nI0817 16:29:31.896852 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:29:31.896863 17472 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:29:31.896869 17472 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:29:31.896877 17472 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.896886 17472 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:29:31.896893 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.896898 17472 net.cpp:165] Memory required for data: 1327617500\nI0817 16:29:31.896903 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:29:31.896916 17472 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:29:31.896924 17472 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:29:31.896931 17472 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:29:31.897964 17472 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:29:31.897979 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.897984 17472 net.cpp:165] Memory required for data: 1329665500\nI0817 16:29:31.897992 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:29:31.898005 17472 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:29:31.898011 17472 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:29:31.898022 17472 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:29:31.898298 17472 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:29:31.898310 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.898315 17472 net.cpp:165] Memory required for data: 1331713500\nI0817 16:29:31.898326 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:29:31.898335 17472 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:29:31.898341 17472 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:29:31.898351 17472 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:29:31.898411 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:29:31.898574 17472 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:29:31.898587 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.898591 17472 net.cpp:165] Memory required for data: 1333761500\nI0817 16:29:31.898600 17472 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:29:31.898610 17472 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:29:31.898617 17472 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:29:31.898623 17472 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:29:31.898633 17472 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:29:31.898671 17472 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:29:31.898687 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.898692 17472 net.cpp:165] Memory required for data: 1335809500\nI0817 16:29:31.898697 17472 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:29:31.898705 17472 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:29:31.898711 17472 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:29:31.898720 17472 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:29:31.898730 17472 net.cpp:150] Setting up L3_b4_relu\nI0817 16:29:31.898738 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.898742 17472 net.cpp:165] Memory required for data: 1337857500\nI0817 16:29:31.898747 17472 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:31.898754 17472 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:31.898759 17472 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:29:31.898767 17472 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:29:31.898777 17472 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:29:31.898828 17472 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:31.898838 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.898845 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.898849 17472 net.cpp:165] Memory required for data: 1341953500\nI0817 16:29:31.898854 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:29:31.898865 17472 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:29:31.898871 17472 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:29:31.898883 17472 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:29:31.899914 17472 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:29:31.899927 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.899932 17472 net.cpp:165] Memory required for data: 1344001500\nI0817 16:29:31.899941 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:29:31.899951 17472 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:29:31.899957 17472 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:29:31.899968 17472 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:29:31.901232 17472 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:29:31.901249 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.901254 17472 net.cpp:165] Memory required for data: 1346049500\nI0817 16:29:31.901268 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:29:31.901276 17472 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:29:31.901283 17472 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:29:31.901294 17472 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.901356 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:29:31.901516 17472 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:29:31.901528 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.901533 17472 net.cpp:165] Memory required for data: 1348097500\nI0817 16:29:31.901542 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:29:31.901551 17472 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:29:31.901556 17472 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:29:31.901566 17472 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.901577 17472 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:29:31.901584 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.901588 17472 net.cpp:165] Memory required for data: 1350145500\nI0817 16:29:31.901593 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:29:31.901607 17472 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:29:31.901614 17472 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:29:31.901623 17472 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:29:31.903641 17472 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:29:31.903658 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.903664 17472 net.cpp:165] Memory required for data: 1352193500\nI0817 16:29:31.903673 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:29:31.903687 17472 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:29:31.903693 17472 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:29:31.903704 17472 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:29:31.903966 17472 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:29:31.903980 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.903985 17472 net.cpp:165] Memory required for data: 1354241500\nI0817 16:29:31.903995 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:29:31.904005 17472 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:29:31.904011 17472 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:29:31.904021 17472 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:29:31.904080 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:29:31.904242 17472 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:29:31.904256 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.904261 17472 net.cpp:165] Memory required for data: 1356289500\nI0817 16:29:31.904270 17472 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:29:31.904280 17472 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:29:31.904286 17472 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:29:31.904294 17472 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:29:31.904304 17472 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:29:31.904340 17472 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:29:31.904352 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.904357 17472 net.cpp:165] Memory required for data: 1358337500\nI0817 16:29:31.904362 17472 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:29:31.904371 17472 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:29:31.904376 17472 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:29:31.904383 17472 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:29:31.904395 17472 net.cpp:150] Setting up L3_b5_relu\nI0817 16:29:31.904403 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.904407 17472 net.cpp:165] Memory required for data: 1360385500\nI0817 16:29:31.904412 17472 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:31.904419 17472 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:31.904424 17472 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:29:31.904433 17472 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:29:31.904443 17472 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:29:31.904490 17472 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:31.904501 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.904508 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.904512 17472 net.cpp:165] Memory required for data: 1364481500\nI0817 16:29:31.904517 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:29:31.904530 17472 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:29:31.904536 17472 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:29:31.904547 17472 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:29:31.905565 17472 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:29:31.905580 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.905586 17472 net.cpp:165] Memory required for data: 1366529500\nI0817 16:29:31.905594 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:29:31.905603 17472 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:29:31.905617 17472 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:29:31.905630 17472 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:29:31.905892 17472 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:29:31.905907 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.905912 17472 net.cpp:165] Memory required for data: 1368577500\nI0817 16:29:31.905922 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:29:31.905931 17472 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:29:31.905938 17472 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:29:31.905946 17472 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.906004 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:29:31.906167 17472 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:29:31.906180 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.906185 17472 net.cpp:165] Memory required for data: 1370625500\nI0817 16:29:31.906194 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:29:31.906205 17472 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:29:31.906213 17472 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:29:31.906219 17472 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.906229 17472 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:29:31.906236 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.906241 17472 net.cpp:165] Memory required for data: 1372673500\nI0817 16:29:31.906246 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:29:31.906260 17472 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:29:31.906266 17472 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:29:31.906275 17472 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:29:31.907289 17472 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:29:31.907302 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.907307 17472 net.cpp:165] Memory required for data: 1374721500\nI0817 16:29:31.907316 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:29:31.907330 17472 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:29:31.907336 17472 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:29:31.907346 17472 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:29:31.907605 17472 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:29:31.907618 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.907624 17472 net.cpp:165] Memory required for data: 1376769500\nI0817 16:29:31.907634 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:29:31.907642 17472 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:29:31.907649 17472 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:29:31.907660 17472 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:29:31.907717 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:29:31.907873 17472 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:29:31.907886 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.907891 17472 net.cpp:165] Memory required for data: 1378817500\nI0817 16:29:31.907899 17472 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:29:31.907908 17472 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:29:31.907915 17472 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:29:31.907922 17472 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:29:31.907932 17472 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:29:31.907969 17472 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:29:31.907981 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.907985 17472 net.cpp:165] Memory required for data: 1380865500\nI0817 16:29:31.907990 17472 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:29:31.907999 17472 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:29:31.908004 17472 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:29:31.908020 17472 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:29:31.908031 17472 net.cpp:150] Setting up L3_b6_relu\nI0817 16:29:31.908038 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.908042 17472 net.cpp:165] Memory required for data: 1382913500\nI0817 16:29:31.908047 17472 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:31.908054 17472 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:31.908059 17472 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:29:31.908067 17472 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:29:31.908077 17472 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:29:31.908130 17472 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:31.908143 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.908150 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.908154 17472 net.cpp:165] Memory required for data: 1387009500\nI0817 16:29:31.908159 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:29:31.908170 17472 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:29:31.908177 17472 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:29:31.908190 17472 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:29:31.909210 17472 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:29:31.909225 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.909230 17472 net.cpp:165] Memory required for data: 1389057500\nI0817 16:29:31.909240 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:29:31.909248 17472 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:29:31.909255 17472 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:29:31.909265 17472 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:29:31.909530 17472 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:29:31.909543 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.909548 17472 net.cpp:165] Memory required for data: 1391105500\nI0817 16:29:31.909559 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:29:31.909567 17472 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:29:31.909574 17472 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:29:31.909581 17472 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.909641 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:29:31.909795 17472 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:29:31.909807 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.909812 17472 net.cpp:165] Memory required for data: 1393153500\nI0817 16:29:31.909821 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:29:31.909857 17472 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:29:31.909864 17472 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:29:31.909873 17472 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.909883 17472 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:29:31.909890 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.909895 17472 net.cpp:165] Memory required for data: 1395201500\nI0817 16:29:31.909900 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:29:31.909912 17472 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:29:31.909917 17472 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:29:31.909932 17472 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:29:31.910951 17472 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:29:31.910965 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.910970 17472 net.cpp:165] Memory required for data: 1397249500\nI0817 16:29:31.910979 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:29:31.910989 17472 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:29:31.911002 17472 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:29:31.911013 17472 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:29:31.911284 17472 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:29:31.911300 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.911305 17472 net.cpp:165] Memory required for data: 1399297500\nI0817 16:29:31.911315 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:29:31.911324 17472 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:29:31.911330 17472 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:29:31.911339 17472 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:29:31.911394 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:29:31.911551 17472 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:29:31.911564 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.911569 17472 net.cpp:165] Memory required for data: 1401345500\nI0817 16:29:31.911578 17472 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:29:31.911589 17472 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:29:31.911597 17472 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:29:31.911603 17472 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:29:31.911612 17472 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:29:31.911649 17472 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:29:31.911661 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.911665 17472 net.cpp:165] Memory required for data: 1403393500\nI0817 16:29:31.911670 17472 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:29:31.911679 17472 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:29:31.911684 17472 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:29:31.911691 17472 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:29:31.911700 17472 net.cpp:150] Setting up L3_b7_relu\nI0817 16:29:31.911707 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.911712 17472 net.cpp:165] Memory required for data: 1405441500\nI0817 16:29:31.911717 17472 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:31.911725 17472 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:31.911730 17472 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:29:31.911741 17472 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:29:31.911751 17472 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:29:31.911795 17472 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:31.911806 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.911813 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.911818 17472 net.cpp:165] Memory required for data: 1409537500\nI0817 16:29:31.911823 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:29:31.911837 17472 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:29:31.911844 17472 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:29:31.911854 17472 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:29:31.912868 17472 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:29:31.912883 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.912889 17472 net.cpp:165] Memory required for data: 1411585500\nI0817 16:29:31.912896 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:29:31.912910 17472 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:29:31.912917 17472 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:29:31.912925 17472 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:29:31.913193 17472 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:29:31.913206 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.913211 17472 net.cpp:165] Memory required for data: 1413633500\nI0817 16:29:31.913228 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:29:31.913240 17472 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:29:31.913246 17472 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:29:31.913254 17472 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.913316 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:29:31.913470 17472 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:29:31.913482 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.913487 17472 net.cpp:165] Memory required for data: 1415681500\nI0817 16:29:31.913496 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:29:31.913508 17472 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:29:31.913514 17472 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:29:31.913522 17472 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.913532 17472 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:29:31.913538 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.913543 17472 net.cpp:165] Memory required for data: 1417729500\nI0817 16:29:31.913548 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:29:31.913563 17472 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:29:31.913568 17472 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:29:31.913579 17472 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:29:31.914597 17472 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:29:31.914610 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.914615 17472 net.cpp:165] Memory required for data: 1419777500\nI0817 16:29:31.914624 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:29:31.914634 17472 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:29:31.914640 17472 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:29:31.914654 17472 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:29:31.914916 17472 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:29:31.914932 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.914937 17472 net.cpp:165] Memory required for data: 1421825500\nI0817 16:29:31.914947 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:29:31.914955 17472 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:29:31.914961 17472 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:29:31.914968 17472 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:29:31.915029 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:29:31.915191 17472 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:29:31.915205 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.915210 17472 net.cpp:165] Memory required for data: 1423873500\nI0817 16:29:31.915218 17472 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:29:31.915230 17472 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:29:31.915236 17472 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:29:31.915243 17472 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:29:31.915251 17472 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:29:31.915287 17472 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:29:31.915299 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.915303 17472 net.cpp:165] Memory required for data: 1425921500\nI0817 16:29:31.915309 17472 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:29:31.915318 17472 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:29:31.915323 17472 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:29:31.915330 17472 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:29:31.915339 17472 net.cpp:150] Setting up L3_b8_relu\nI0817 16:29:31.915346 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.915350 17472 net.cpp:165] Memory required for data: 1427969500\nI0817 16:29:31.915355 17472 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:31.915369 17472 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:31.915375 17472 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:29:31.915385 17472 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:29:31.915395 17472 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:29:31.915441 17472 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:31.915453 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.915459 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.915464 17472 net.cpp:165] Memory required for data: 1432065500\nI0817 16:29:31.915469 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:29:31.915483 17472 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:29:31.915490 17472 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:29:31.915499 17472 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:29:31.917495 17472 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:29:31.917512 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.917518 17472 net.cpp:165] Memory required for data: 1434113500\nI0817 16:29:31.917527 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:29:31.917541 17472 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:29:31.917547 17472 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:29:31.917557 17472 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:29:31.917827 17472 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:29:31.917840 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.917845 17472 net.cpp:165] Memory required for data: 1436161500\nI0817 16:29:31.917855 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:29:31.917865 17472 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:29:31.917870 17472 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:29:31.917878 17472 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:29:31.917939 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:29:31.918099 17472 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:29:31.918118 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.918124 17472 net.cpp:165] Memory required for data: 1438209500\nI0817 16:29:31.918133 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:29:31.918141 17472 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:29:31.918148 17472 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:29:31.918155 17472 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:29:31.918165 17472 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:29:31.918172 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.918177 17472 net.cpp:165] Memory required for data: 1440257500\nI0817 16:29:31.918181 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:29:31.918196 17472 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:29:31.918202 17472 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:29:31.918213 17472 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:29:31.919240 17472 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:29:31.919255 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.919260 17472 net.cpp:165] Memory required for data: 1442305500\nI0817 16:29:31.919270 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:29:31.919282 17472 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:29:31.919288 17472 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:29:31.919296 17472 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:29:31.919559 17472 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:29:31.919571 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.919576 17472 net.cpp:165] Memory required for data: 1444353500\nI0817 16:29:31.919595 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:29:31.919606 17472 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:29:31.919613 17472 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:29:31.919620 17472 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:29:31.919682 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:29:31.919836 17472 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:29:31.919847 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.919852 17472 net.cpp:165] Memory required for data: 1446401500\nI0817 16:29:31.919862 17472 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:29:31.919875 17472 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:29:31.919883 17472 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:29:31.919889 17472 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:29:31.919900 17472 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:29:31.919934 17472 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:29:31.919945 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.919950 17472 net.cpp:165] Memory required for data: 1448449500\nI0817 16:29:31.919955 17472 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:29:31.919967 17472 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:29:31.919975 17472 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:29:31.919981 17472 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:29:31.919991 17472 net.cpp:150] Setting up L3_b9_relu\nI0817 16:29:31.919998 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:31.920002 17472 net.cpp:165] Memory required for data: 1450497500\nI0817 16:29:31.920007 17472 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:29:31.920016 17472 net.cpp:100] Creating Layer post_pool\nI0817 16:29:31.920020 17472 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:29:31.920028 17472 net.cpp:408] post_pool -> post_pool\nI0817 16:29:31.920063 17472 net.cpp:150] Setting up post_pool\nI0817 16:29:31.920076 17472 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:29:31.920081 17472 net.cpp:165] Memory required for data: 1450529500\nI0817 16:29:31.920087 17472 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:29:31.920177 17472 net.cpp:100] Creating Layer post_FC\nI0817 16:29:31.920192 17472 net.cpp:434] post_FC <- post_pool\nI0817 16:29:31.920200 17472 net.cpp:408] post_FC -> post_FC_top\nI0817 16:29:31.920447 17472 net.cpp:150] Setting up post_FC\nI0817 16:29:31.920462 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:31.920467 17472 net.cpp:165] Memory required for data: 1450534500\nI0817 16:29:31.920477 17472 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:29:31.920485 17472 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:29:31.920491 17472 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:29:31.920500 17472 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:29:31.920513 17472 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:29:31.920560 17472 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:29:31.920572 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:31.920578 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:31.920583 17472 net.cpp:165] Memory required for data: 1450544500\nI0817 16:29:31.920588 17472 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:29:31.920634 17472 net.cpp:100] Creating Layer accuracy\nI0817 16:29:31.920645 17472 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:29:31.920653 17472 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:29:31.920661 17472 net.cpp:408] accuracy -> accuracy\nI0817 16:29:31.920704 17472 net.cpp:150] Setting up accuracy\nI0817 16:29:31.920717 17472 net.cpp:157] Top shape: (1)\nI0817 16:29:31.920722 17472 net.cpp:165] Memory required for data: 1450544504\nI0817 16:29:31.920734 17472 layer_factory.hpp:77] Creating layer loss\nI0817 16:29:31.920743 17472 net.cpp:100] Creating Layer loss\nI0817 16:29:31.920749 17472 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:29:31.920756 17472 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:29:31.920764 17472 net.cpp:408] loss -> loss\nI0817 16:29:31.920810 17472 layer_factory.hpp:77] Creating layer loss\nI0817 16:29:31.920969 17472 net.cpp:150] Setting up loss\nI0817 16:29:31.920986 17472 net.cpp:157] Top shape: (1)\nI0817 16:29:31.920991 17472 net.cpp:160]     with loss weight 1\nI0817 16:29:31.921066 17472 net.cpp:165] Memory required for data: 1450544508\nI0817 16:29:31.921075 17472 net.cpp:226] loss needs backward computation.\nI0817 16:29:31.921082 17472 net.cpp:228] accuracy does not need backward computation.\nI0817 16:29:31.921087 17472 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:29:31.921092 17472 net.cpp:226] post_FC needs backward computation.\nI0817 16:29:31.921097 17472 net.cpp:226] post_pool needs backward computation.\nI0817 16:29:31.921103 17472 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:29:31.921113 17472 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:29:31.921119 17472 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:29:31.921124 17472 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:29:31.921129 17472 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:29:31.921134 17472 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:29:31.921139 17472 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:29:31.921144 17472 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:29:31.921149 17472 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:29:31.921154 17472 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:29:31.921159 17472 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:29:31.921164 17472 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:29:31.921169 17472 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:29:31.921175 17472 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:29:31.921180 17472 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:29:31.921185 17472 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:29:31.921190 17472 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:29:31.921193 17472 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:29:31.921198 17472 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:29:31.921203 17472 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:29:31.921208 17472 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:29:31.921213 17472 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:29:31.921218 17472 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:29:31.921223 17472 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:29:31.921228 17472 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:29:31.921233 17472 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:29:31.921238 17472 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:29:31.921242 17472 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:29:31.921247 17472 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:29:31.921252 17472 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:29:31.921257 17472 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:29:31.921262 17472 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:29:31.921267 17472 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:29:31.921272 17472 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:29:31.921277 17472 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:29:31.921289 17472 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:29:31.921295 17472 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:29:31.921300 17472 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:29:31.921305 17472 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:29:31.921310 17472 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:29:31.921315 17472 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:29:31.921320 17472 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:29:31.921325 17472 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:29:31.921330 17472 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:29:31.921335 17472 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:29:31.921340 17472 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:29:31.921345 17472 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:29:31.921350 17472 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:29:31.921355 17472 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:29:31.921360 17472 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:29:31.921365 17472 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:29:31.921370 17472 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:29:31.921376 17472 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:29:31.921381 17472 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:29:31.921386 17472 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:29:31.921391 17472 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:29:31.921396 17472 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:29:31.921401 17472 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:29:31.921406 17472 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:29:31.921416 17472 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:29:31.921423 17472 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:29:31.921428 17472 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:29:31.921434 17472 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:29:31.921439 17472 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:29:31.921444 17472 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:29:31.921449 17472 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:29:31.921454 17472 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:29:31.921458 17472 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:29:31.921463 17472 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:29:31.921469 17472 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:29:31.921474 17472 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:29:31.921479 17472 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:29:31.921484 17472 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:29:31.921490 17472 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:29:31.921495 17472 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:29:31.921500 17472 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:29:31.921505 17472 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:29:31.921509 17472 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:29:31.921515 17472 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:29:31.921520 17472 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:29:31.921525 17472 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:29:31.921531 17472 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:29:31.921541 17472 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:29:31.921547 17472 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:29:31.921553 17472 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:29:31.921560 17472 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:29:31.921564 17472 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:29:31.921569 17472 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:29:31.921574 17472 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:29:31.921579 17472 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:29:31.921584 17472 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:29:31.921591 17472 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:29:31.921596 17472 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:29:31.921600 17472 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:29:31.921605 17472 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:29:31.921612 17472 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:29:31.921617 17472 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:29:31.921622 17472 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:29:31.921627 17472 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:29:31.921631 17472 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:29:31.921636 17472 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:29:31.921641 17472 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:29:31.921648 17472 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:29:31.921653 17472 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:29:31.921658 17472 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:29:31.921663 17472 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:29:31.921669 17472 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:29:31.921674 17472 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:29:31.921679 17472 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:29:31.921687 17472 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:29:31.921692 17472 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:29:31.921697 17472 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:29:31.921703 17472 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:29:31.921708 17472 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:29:31.921713 17472 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:29:31.921720 17472 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:29:31.921725 17472 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:29:31.921730 17472 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:29:31.921736 17472 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:29:31.921741 17472 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:29:31.921746 17472 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:29:31.921751 17472 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:29:31.921756 17472 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:29:31.921761 17472 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:29:31.921766 17472 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:29:31.921772 17472 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:29:31.921777 17472 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:29:31.921782 17472 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:29:31.921788 17472 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:29:31.921792 17472 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:29:31.921802 17472 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:29:31.921808 17472 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:29:31.921814 17472 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:29:31.921819 17472 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:29:31.921825 17472 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:29:31.921830 17472 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:29:31.921835 17472 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:29:31.921841 17472 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:29:31.921846 17472 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:29:31.921851 17472 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:29:31.921856 17472 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:29:31.921862 17472 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:29:31.921867 17472 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:29:31.921874 17472 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:29:31.921878 17472 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:29:31.921885 17472 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:29:31.921890 17472 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:29:31.921895 17472 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:29:31.921900 17472 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:29:31.921905 17472 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:29:31.921911 17472 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:29:31.921916 17472 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:29:31.921921 17472 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:29:31.921927 17472 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:29:31.921932 17472 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:29:31.921938 17472 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:29:31.921943 17472 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:29:31.921949 17472 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:29:31.921955 17472 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:29:31.921960 17472 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:29:31.921965 17472 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:29:31.921972 17472 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:29:31.921977 17472 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:29:31.921983 17472 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:29:31.921988 17472 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:29:31.921993 17472 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:29:31.921999 17472 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:29:31.922004 17472 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:29:31.922009 17472 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:29:31.922015 17472 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:29:31.922020 17472 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:29:31.922025 17472 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:29:31.922031 17472 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:29:31.922036 17472 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:29:31.922044 17472 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:29:31.922047 17472 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:29:31.922053 17472 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:29:31.922067 17472 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:29:31.922073 17472 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:29:31.922080 17472 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:29:31.922085 17472 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:29:31.922091 17472 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:29:31.922096 17472 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:29:31.922101 17472 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:29:31.922111 17472 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:29:31.922119 17472 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:29:31.922124 17472 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:29:31.922130 17472 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:29:31.922137 17472 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:29:31.922142 17472 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:29:31.922147 17472 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:29:31.922152 17472 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:29:31.922158 17472 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:29:31.922163 17472 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:29:31.922169 17472 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:29:31.922174 17472 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:29:31.922180 17472 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:29:31.922185 17472 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:29:31.922191 17472 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:29:31.922197 17472 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:29:31.922202 17472 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:29:31.922209 17472 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:29:31.922214 17472 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:29:31.922219 17472 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:29:31.922224 17472 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:29:31.922230 17472 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:29:31.922235 17472 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:29:31.922240 17472 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:29:31.922246 17472 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:29:31.922252 17472 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:29:31.922257 17472 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:29:31.922263 17472 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:29:31.922268 17472 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:29:31.922273 17472 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:29:31.922279 17472 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:29:31.922286 17472 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:29:31.922291 17472 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:29:31.922297 17472 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:29:31.922302 17472 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:29:31.922307 17472 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:29:31.922313 17472 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:29:31.922318 17472 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:29:31.922323 17472 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:29:31.922329 17472 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:29:31.922340 17472 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:29:31.922346 17472 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:29:31.922353 17472 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:29:31.922358 17472 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:29:31.922364 17472 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:29:31.922369 17472 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:29:31.922375 17472 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:29:31.922381 17472 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:29:31.922386 17472 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:29:31.922391 17472 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:29:31.922397 17472 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:29:31.922404 17472 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:29:31.922408 17472 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:29:31.922415 17472 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:29:31.922420 17472 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:29:31.922425 17472 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:29:31.922431 17472 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:29:31.922437 17472 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:29:31.922442 17472 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:29:31.922447 17472 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:29:31.922453 17472 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:29:31.922458 17472 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:29:31.922464 17472 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:29:31.922469 17472 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:29:31.922475 17472 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:29:31.922482 17472 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:29:31.922487 17472 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:29:31.922492 17472 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:29:31.922497 17472 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:29:31.922502 17472 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:29:31.922508 17472 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:29:31.922514 17472 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:29:31.922519 17472 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:29:31.922524 17472 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:29:31.922531 17472 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:29:31.922536 17472 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:29:31.922543 17472 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:29:31.922547 17472 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:29:31.922554 17472 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:29:31.922559 17472 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:29:31.922564 17472 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:29:31.922569 17472 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:29:31.922575 17472 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:29:31.922580 17472 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:29:31.922587 17472 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:29:31.922592 17472 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:29:31.922598 17472 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:29:31.922608 17472 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:29:31.922614 17472 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:29:31.922621 17472 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:29:31.922626 17472 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:29:31.922631 17472 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:29:31.922637 17472 net.cpp:226] pre_relu needs backward computation.\nI0817 16:29:31.922642 17472 net.cpp:226] pre_scale needs backward computation.\nI0817 16:29:31.922647 17472 net.cpp:226] pre_bn needs backward computation.\nI0817 16:29:31.922652 17472 net.cpp:226] pre_conv needs backward computation.\nI0817 16:29:31.922659 17472 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:29:31.922665 17472 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:29:31.922669 17472 net.cpp:270] This network produces output accuracy\nI0817 16:29:31.922677 17472 net.cpp:270] This network produces output loss\nI0817 16:29:31.923043 17472 net.cpp:283] Network initialization done.\nI0817 16:29:31.932657 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:31.932698 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:31.932760 17472 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:29:31.933158 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:29:31.933177 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:29:31.933187 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:29:31.933197 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:29:31.933207 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:29:31.933215 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:29:31.933223 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:29:31.933233 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:29:31.933241 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:29:31.933250 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:29:31.933259 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:29:31.933267 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:29:31.933277 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:29:31.933285 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:29:31.933295 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:29:31.933302 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:29:31.933311 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:29:31.933320 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:29:31.933328 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:29:31.933347 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:29:31.933357 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:29:31.933365 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:29:31.933377 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:29:31.933387 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:29:31.933395 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:29:31.933403 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:29:31.933413 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:29:31.933420 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:29:31.933429 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:29:31.933437 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:29:31.933446 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:29:31.933455 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:29:31.933465 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:29:31.933472 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:29:31.933480 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:29:31.933488 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:29:31.933497 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:29:31.933506 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:29:31.933514 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:29:31.933522 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:29:31.933534 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:29:31.933543 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:29:31.933552 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:29:31.933559 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:29:31.933568 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:29:31.933576 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:29:31.933585 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:29:31.933593 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:29:31.933601 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:29:31.933619 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:29:31.933627 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:29:31.933635 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:29:31.933645 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:29:31.933653 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:29:31.933662 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:29:31.933670 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:29:31.935322 17472 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0817 16:29:31.936952 17472 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:29:31.937726 17472 net.cpp:100] Creating Layer dataLayer\nI0817 16:29:31.937749 17472 net.cpp:408] dataLayer -> data_top\nI0817 16:29:31.937765 17472 net.cpp:408] dataLayer -> label\nI0817 16:29:31.937778 17472 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:29:31.944628 17479 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:29:31.944944 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:31.952291 17472 net.cpp:150] Setting up dataLayer\nI0817 16:29:31.952317 17472 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:29:31.952327 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:31.952332 17472 net.cpp:165] Memory required for data: 1536500\nI0817 16:29:31.952338 17472 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:29:31.952352 17472 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:29:31.952358 17472 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:29:31.952365 17472 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:29:31.952380 17472 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:29:31.952507 17472 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:29:31.952520 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:31.952527 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:31.952534 17472 net.cpp:165] Memory required for data: 1537500\nI0817 16:29:31.952541 17472 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:29:31.952558 17472 net.cpp:100] Creating Layer pre_conv\nI0817 16:29:31.952565 17472 net.cpp:434] pre_conv <- data_top\nI0817 16:29:31.952580 17472 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:29:31.953058 17472 net.cpp:150] Setting up pre_conv\nI0817 16:29:31.953083 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.953088 17472 net.cpp:165] Memory required for data: 9729500\nI0817 16:29:31.953104 17472 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:29:31.953128 17472 net.cpp:100] Creating Layer pre_bn\nI0817 16:29:31.953135 17472 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:29:31.953181 17472 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:29:31.953526 17472 net.cpp:150] Setting up pre_bn\nI0817 16:29:31.953539 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.953544 17472 net.cpp:165] Memory required for data: 17921500\nI0817 16:29:31.953564 17472 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:29:31.953574 17472 net.cpp:100] Creating Layer pre_scale\nI0817 16:29:31.953580 17472 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:29:31.953591 17472 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:29:31.953680 17472 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:29:31.953879 17472 net.cpp:150] Setting up pre_scale\nI0817 16:29:31.953893 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.953898 17472 net.cpp:165] Memory required for data: 26113500\nI0817 16:29:31.953912 17472 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:29:31.953919 17472 net.cpp:100] Creating Layer pre_relu\nI0817 16:29:31.953925 17472 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:29:31.953933 17472 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:29:31.953949 17472 net.cpp:150] Setting up pre_relu\nI0817 16:29:31.953956 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.953964 17472 net.cpp:165] Memory required for data: 34305500\nI0817 16:29:31.953969 17472 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:29:31.953976 17472 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:29:31.953982 17472 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:29:31.954005 17472 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:29:31.954017 17472 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:29:31.954071 17472 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:29:31.954080 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.954087 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.954092 17472 net.cpp:165] Memory required for data: 50689500\nI0817 16:29:31.954097 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:29:31.954113 17472 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:29:31.954119 17472 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:29:31.954134 17472 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:29:31.954514 17472 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:29:31.954530 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.954533 17472 net.cpp:165] Memory required for data: 58881500\nI0817 16:29:31.954546 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:29:31.954569 17472 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:29:31.954578 17472 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:29:31.954587 17472 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:29:31.954896 17472 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:29:31.954916 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.954923 17472 net.cpp:165] Memory required for data: 67073500\nI0817 16:29:31.954934 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:29:31.954942 17472 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:29:31.954948 17472 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:29:31.954957 17472 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.955067 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:29:31.955435 17472 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:29:31.955449 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.955461 17472 net.cpp:165] Memory required for data: 75265500\nI0817 16:29:31.955471 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:29:31.955483 17472 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:29:31.955492 17472 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:29:31.955500 17472 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.955510 17472 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:29:31.955518 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.955523 17472 net.cpp:165] Memory required for data: 83457500\nI0817 16:29:31.955533 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:29:31.955548 17472 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:29:31.955552 17472 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:29:31.955564 17472 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:29:31.955960 17472 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:29:31.955976 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.955981 17472 net.cpp:165] Memory required for data: 91649500\nI0817 16:29:31.955991 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:29:31.955999 17472 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:29:31.956004 17472 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:29:31.956012 17472 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:29:31.956326 17472 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:29:31.956341 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.956346 17472 net.cpp:165] Memory required for data: 99841500\nI0817 16:29:31.956364 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:29:31.956373 17472 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:29:31.956382 17472 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:29:31.956394 17472 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:29:31.956460 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:29:31.956643 17472 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:29:31.956656 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.956661 17472 net.cpp:165] Memory required for data: 108033500\nI0817 16:29:31.956671 17472 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:29:31.956687 17472 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:29:31.956694 17472 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:29:31.956701 17472 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:29:31.956712 17472 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:29:31.956755 17472 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:29:31.956765 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.956770 17472 net.cpp:165] Memory required for data: 116225500\nI0817 16:29:31.956775 17472 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:29:31.956784 17472 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:29:31.956789 17472 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:29:31.956799 17472 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:29:31.956809 17472 net.cpp:150] Setting up L1_b1_relu\nI0817 16:29:31.956818 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.956822 17472 net.cpp:165] Memory required for data: 124417500\nI0817 16:29:31.956827 17472 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:31.956836 17472 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:31.956841 17472 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:29:31.956852 17472 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:29:31.956861 17472 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:29:31.956917 17472 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:31.956938 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.956948 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.956954 17472 net.cpp:165] Memory required for data: 140801500\nI0817 16:29:31.956959 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:29:31.956974 17472 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:29:31.956982 17472 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:29:31.956991 17472 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:29:31.957398 17472 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:29:31.957413 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.957418 17472 net.cpp:165] Memory required for data: 148993500\nI0817 16:29:31.957429 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:29:31.957439 17472 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:29:31.957445 17472 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:29:31.957473 17472 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:29:31.957801 17472 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:29:31.957821 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.957826 17472 net.cpp:165] Memory required for data: 157185500\nI0817 16:29:31.957840 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:29:31.957850 17472 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:29:31.957856 17472 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:29:31.957865 17472 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.957931 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:29:31.958142 17472 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:29:31.958155 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.958160 17472 net.cpp:165] Memory required for data: 165377500\nI0817 16:29:31.958173 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:29:31.958184 17472 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:29:31.958190 17472 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:29:31.958197 17472 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.958210 17472 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:29:31.958217 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.958222 17472 net.cpp:165] Memory required for data: 173569500\nI0817 16:29:31.958227 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:29:31.958241 17472 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:29:31.958247 17472 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:29:31.958259 17472 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:29:31.958830 17472 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:29:31.958845 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.958850 17472 net.cpp:165] Memory required for data: 181761500\nI0817 16:29:31.958863 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:29:31.958873 17472 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:29:31.958878 17472 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:29:31.958890 17472 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:29:31.959209 17472 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:29:31.959226 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.959231 17472 net.cpp:165] Memory required for data: 189953500\nI0817 16:29:31.959254 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:29:31.959264 17472 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:29:31.959270 17472 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:29:31.959277 17472 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:29:31.959352 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:29:31.959571 17472 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:29:31.959584 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.959592 17472 net.cpp:165] Memory required for data: 198145500\nI0817 16:29:31.959609 17472 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:29:31.959619 17472 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:29:31.959628 17472 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:29:31.959635 17472 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:29:31.959647 17472 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:29:31.959686 17472 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:29:31.959697 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.959700 17472 net.cpp:165] Memory required for data: 206337500\nI0817 16:29:31.959707 17472 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:29:31.959720 17472 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:29:31.959729 17472 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:29:31.959736 17472 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:29:31.959746 17472 net.cpp:150] Setting up L1_b2_relu\nI0817 16:29:31.959753 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.959758 17472 net.cpp:165] Memory required for data: 214529500\nI0817 16:29:31.959765 17472 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:31.959772 17472 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:31.959777 17472 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:29:31.959784 17472 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:29:31.959796 17472 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:29:31.959849 17472 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:31.959861 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.959868 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.959873 17472 net.cpp:165] Memory required for data: 230913500\nI0817 16:29:31.959878 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:29:31.959895 17472 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:29:31.959911 17472 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:29:31.959921 17472 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:29:31.960324 17472 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:29:31.960338 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.960343 17472 net.cpp:165] Memory required for data: 239105500\nI0817 16:29:31.960352 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:29:31.960369 17472 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:29:31.960378 17472 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:29:31.960392 17472 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:29:31.960696 17472 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:29:31.960711 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.960718 17472 net.cpp:165] Memory required for data: 247297500\nI0817 16:29:31.960729 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:29:31.960738 17472 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:29:31.960744 17472 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:29:31.960757 17472 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.960824 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:29:31.961004 17472 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:29:31.961019 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.961025 17472 net.cpp:165] Memory required for data: 255489500\nI0817 16:29:31.961035 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:29:31.961042 17472 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:29:31.961048 17472 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:29:31.961061 17472 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.961079 17472 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:29:31.961087 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.961091 17472 net.cpp:165] Memory required for data: 263681500\nI0817 16:29:31.961097 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:29:31.961120 17472 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:29:31.961132 17472 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:29:31.961141 17472 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:29:31.961771 17472 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:29:31.961787 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.961793 17472 net.cpp:165] Memory required for data: 271873500\nI0817 16:29:31.961802 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:29:31.961819 17472 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:29:31.961827 17472 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:29:31.961834 17472 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:29:31.962174 17472 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:29:31.962189 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.962194 17472 net.cpp:165] Memory required for data: 280065500\nI0817 16:29:31.962208 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:29:31.962216 17472 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:29:31.962222 17472 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:29:31.962230 17472 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:29:31.962301 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:29:31.962483 17472 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:29:31.962496 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.962502 17472 net.cpp:165] Memory required for data: 288257500\nI0817 16:29:31.962510 17472 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:29:31.962519 17472 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:29:31.962528 17472 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:29:31.962535 17472 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:29:31.962546 17472 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:29:31.962584 17472 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:29:31.962596 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.962602 17472 net.cpp:165] Memory required for data: 296449500\nI0817 16:29:31.962607 17472 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:29:31.962618 17472 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:29:31.962625 17472 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:29:31.962632 17472 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:29:31.962642 17472 net.cpp:150] Setting up L1_b3_relu\nI0817 16:29:31.962649 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.962653 17472 net.cpp:165] Memory required for data: 304641500\nI0817 16:29:31.962658 17472 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:31.962668 17472 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:31.962673 17472 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:29:31.962682 17472 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:29:31.962690 17472 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:29:31.962749 17472 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:31.962759 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.962766 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.962770 17472 net.cpp:165] Memory required for data: 321025500\nI0817 16:29:31.962775 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:29:31.962790 17472 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:29:31.962806 17472 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:29:31.962816 17472 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:29:31.963192 17472 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:29:31.963207 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.963212 17472 net.cpp:165] Memory required for data: 329217500\nI0817 16:29:31.963222 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:29:31.963234 17472 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:29:31.963241 17472 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:29:31.963250 17472 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:29:31.963523 17472 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:29:31.963539 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.963544 17472 net.cpp:165] Memory required for data: 337409500\nI0817 16:29:31.963555 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:29:31.963563 17472 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:29:31.963569 17472 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:29:31.963577 17472 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.963639 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:29:31.963800 17472 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:29:31.963814 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.963819 17472 net.cpp:165] Memory required for data: 345601500\nI0817 16:29:31.963827 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:29:31.963835 17472 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:29:31.963841 17472 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:29:31.963851 17472 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.963861 17472 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:29:31.963868 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.963873 17472 net.cpp:165] Memory required for data: 353793500\nI0817 16:29:31.963877 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:29:31.963887 17472 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:29:31.963893 17472 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:29:31.963904 17472 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:29:31.964267 17472 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:29:31.964282 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.964287 17472 net.cpp:165] Memory required for data: 361985500\nI0817 16:29:31.964298 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:29:31.964306 17472 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:29:31.964313 17472 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:29:31.964323 17472 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:29:31.964596 17472 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:29:31.964609 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.964614 17472 net.cpp:165] Memory required for data: 370177500\nI0817 16:29:31.964624 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:29:31.964633 17472 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:29:31.964638 17472 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:29:31.964646 17472 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:29:31.964705 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:29:31.964862 17472 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:29:31.964875 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.964880 17472 net.cpp:165] Memory required for data: 378369500\nI0817 16:29:31.964890 17472 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:29:31.964898 17472 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:29:31.964905 17472 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:29:31.964910 17472 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:29:31.964928 17472 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:29:31.964962 17472 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:29:31.964972 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.964977 17472 net.cpp:165] Memory required for data: 386561500\nI0817 16:29:31.964982 17472 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:29:31.964993 17472 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:29:31.964998 17472 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:29:31.965005 17472 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:29:31.965015 17472 net.cpp:150] Setting up L1_b4_relu\nI0817 16:29:31.965023 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.965026 17472 net.cpp:165] Memory required for data: 394753500\nI0817 16:29:31.965030 17472 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:31.965037 17472 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:31.965042 17472 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:29:31.965049 17472 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:29:31.965059 17472 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:29:31.965113 17472 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:31.965131 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.965144 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.965149 17472 net.cpp:165] Memory required for data: 411137500\nI0817 16:29:31.965155 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:29:31.965171 17472 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:29:31.965178 17472 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:29:31.965188 17472 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:29:31.965540 17472 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:29:31.965554 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.965559 17472 net.cpp:165] Memory required for data: 419329500\nI0817 16:29:31.965581 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:29:31.965595 17472 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:29:31.965600 17472 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:29:31.965611 17472 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:29:31.965883 17472 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:29:31.965898 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.965901 17472 net.cpp:165] Memory required for data: 427521500\nI0817 16:29:31.965912 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:29:31.965920 17472 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:29:31.965926 17472 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:29:31.965934 17472 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.966017 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:29:31.966187 17472 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:29:31.966200 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.966205 17472 net.cpp:165] Memory required for data: 435713500\nI0817 16:29:31.966214 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:29:31.966223 17472 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:29:31.966228 17472 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:29:31.966238 17472 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.966248 17472 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:29:31.966256 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.966260 17472 net.cpp:165] Memory required for data: 443905500\nI0817 16:29:31.966265 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:29:31.966282 17472 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:29:31.966289 17472 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:29:31.966300 17472 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:29:31.966650 17472 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:29:31.966665 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.966670 17472 net.cpp:165] Memory required for data: 452097500\nI0817 16:29:31.966677 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:29:31.966686 17472 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:29:31.966692 17472 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:29:31.966704 17472 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:29:31.966982 17472 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:29:31.966998 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.967003 17472 net.cpp:165] Memory required for data: 460289500\nI0817 16:29:31.967015 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:29:31.967022 17472 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:29:31.967028 17472 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:29:31.967036 17472 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:29:31.967093 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:29:31.967259 17472 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:29:31.967272 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.967278 17472 net.cpp:165] Memory required for data: 468481500\nI0817 16:29:31.967286 17472 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:29:31.967298 17472 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:29:31.967305 17472 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:29:31.967311 17472 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:29:31.967322 17472 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:29:31.967356 17472 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:29:31.967366 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.967370 17472 net.cpp:165] Memory required for data: 476673500\nI0817 16:29:31.967375 17472 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:29:31.967387 17472 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:29:31.967393 17472 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:29:31.967401 17472 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:29:31.967409 17472 net.cpp:150] Setting up L1_b5_relu\nI0817 16:29:31.967417 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.967422 17472 net.cpp:165] Memory required for data: 484865500\nI0817 16:29:31.967425 17472 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:31.967432 17472 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:31.967437 17472 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:29:31.967444 17472 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:29:31.967453 17472 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:29:31.967504 17472 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:31.967515 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.967521 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.967526 17472 net.cpp:165] Memory required for data: 501249500\nI0817 16:29:31.967530 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:29:31.967545 17472 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:29:31.967550 17472 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:29:31.967559 17472 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:29:31.967906 17472 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:29:31.967921 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.967931 17472 net.cpp:165] Memory required for data: 509441500\nI0817 16:29:31.967941 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:29:31.967952 17472 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:29:31.967959 17472 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:29:31.967967 17472 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:29:31.968252 17472 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:29:31.968271 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.968276 17472 net.cpp:165] Memory required for data: 517633500\nI0817 16:29:31.968286 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:29:31.968296 17472 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:29:31.968302 17472 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:29:31.968308 17472 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.968365 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:29:31.968524 17472 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:29:31.968538 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.968542 17472 net.cpp:165] Memory required for data: 525825500\nI0817 16:29:31.968551 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:29:31.968562 17472 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:29:31.968569 17472 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:29:31.968580 17472 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.968590 17472 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:29:31.968596 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.968600 17472 net.cpp:165] Memory required for data: 534017500\nI0817 16:29:31.968605 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:29:31.968616 17472 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:29:31.968621 17472 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:29:31.968632 17472 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:29:31.968977 17472 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:29:31.968991 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.968996 17472 net.cpp:165] Memory required for data: 542209500\nI0817 16:29:31.969004 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:29:31.969013 17472 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:29:31.969019 17472 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:29:31.969030 17472 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:29:31.969312 17472 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:29:31.969326 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.969331 17472 net.cpp:165] Memory required for data: 550401500\nI0817 16:29:31.969341 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:29:31.969352 17472 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:29:31.969359 17472 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:29:31.969367 17472 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:29:31.969424 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:29:31.969594 17472 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:29:31.969609 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.969614 17472 net.cpp:165] Memory required for data: 558593500\nI0817 16:29:31.969624 17472 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:29:31.969645 17472 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:29:31.969650 17472 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:29:31.969657 17472 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:29:31.969666 17472 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:29:31.969704 17472 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:29:31.969713 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.969718 17472 net.cpp:165] Memory required for data: 566785500\nI0817 16:29:31.969730 17472 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:29:31.969738 17472 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:29:31.969744 17472 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:29:31.969750 17472 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:29:31.969760 17472 net.cpp:150] Setting up L1_b6_relu\nI0817 16:29:31.969768 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.969771 17472 net.cpp:165] Memory required for data: 574977500\nI0817 16:29:31.969775 17472 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:31.969782 17472 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:31.969787 17472 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:29:31.969799 17472 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:29:31.969808 17472 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:29:31.969856 17472 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:31.969867 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.969873 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.969878 17472 net.cpp:165] Memory required for data: 591361500\nI0817 16:29:31.969883 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:29:31.969897 17472 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:29:31.969903 17472 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:29:31.969913 17472 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:29:31.970295 17472 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:29:31.970310 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.970315 17472 net.cpp:165] Memory required for data: 599553500\nI0817 16:29:31.970324 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:29:31.970336 17472 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:29:31.970343 17472 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:29:31.970355 17472 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:29:31.970626 17472 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:29:31.970639 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.970644 17472 net.cpp:165] Memory required for data: 607745500\nI0817 16:29:31.970654 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:29:31.970664 17472 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:29:31.970669 17472 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:29:31.970677 17472 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.970738 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:29:31.970896 17472 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:29:31.970908 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.970913 17472 net.cpp:165] Memory required for data: 615937500\nI0817 16:29:31.970921 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:29:31.970932 17472 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:29:31.970939 17472 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:29:31.970947 17472 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.970955 17472 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:29:31.970963 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.970968 17472 net.cpp:165] Memory required for data: 624129500\nI0817 16:29:31.970971 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:29:31.970985 17472 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:29:31.970991 17472 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:29:31.971002 17472 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:29:31.971365 17472 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:29:31.971379 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.971391 17472 net.cpp:165] Memory required for data: 632321500\nI0817 16:29:31.971400 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:29:31.971418 17472 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:29:31.971426 17472 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:29:31.971436 17472 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:29:31.971711 17472 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:29:31.971724 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.971729 17472 net.cpp:165] Memory required for data: 640513500\nI0817 16:29:31.971740 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:29:31.971748 17472 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:29:31.971755 17472 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:29:31.971761 17472 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:29:31.971822 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:29:31.971982 17472 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:29:31.971995 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.971999 17472 net.cpp:165] Memory required for data: 648705500\nI0817 16:29:31.972008 17472 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:29:31.972018 17472 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:29:31.972023 17472 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:29:31.972033 17472 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:29:31.972041 17472 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:29:31.972074 17472 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:29:31.972085 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.972090 17472 net.cpp:165] Memory required for data: 656897500\nI0817 16:29:31.972095 17472 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:29:31.972111 17472 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:29:31.972118 17472 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:29:31.972128 17472 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:29:31.972139 17472 net.cpp:150] Setting up L1_b7_relu\nI0817 16:29:31.972146 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.972151 17472 net.cpp:165] Memory required for data: 665089500\nI0817 16:29:31.972156 17472 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:31.972162 17472 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:31.972167 17472 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:29:31.972174 17472 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:29:31.972183 17472 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:29:31.972234 17472 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:31.972246 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.972252 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.972257 17472 net.cpp:165] Memory required for data: 681473500\nI0817 16:29:31.972262 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:29:31.972275 17472 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:29:31.972281 17472 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:29:31.972290 17472 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:29:31.972651 17472 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:29:31.972664 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.972669 17472 net.cpp:165] Memory required for data: 689665500\nI0817 16:29:31.972678 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:29:31.972690 17472 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:29:31.972697 17472 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:29:31.972714 17472 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:29:31.972991 17472 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:29:31.973004 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.973009 17472 net.cpp:165] Memory required for data: 697857500\nI0817 16:29:31.973019 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:29:31.973027 17472 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:29:31.973033 17472 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:29:31.973042 17472 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.973101 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:29:31.973289 17472 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:29:31.973302 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.973307 17472 net.cpp:165] Memory required for data: 706049500\nI0817 16:29:31.973316 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:29:31.973327 17472 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:29:31.973333 17472 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:29:31.973341 17472 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.973351 17472 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:29:31.973357 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.973362 17472 net.cpp:165] Memory required for data: 714241500\nI0817 16:29:31.973366 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:29:31.973381 17472 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:29:31.973387 17472 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:29:31.973397 17472 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:29:31.973754 17472 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:29:31.973768 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.973773 17472 net.cpp:165] Memory required for data: 722433500\nI0817 16:29:31.973783 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:29:31.973794 17472 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:29:31.973800 17472 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:29:31.973809 17472 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:29:31.974086 17472 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:29:31.974098 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.974103 17472 net.cpp:165] Memory required for data: 730625500\nI0817 16:29:31.974122 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:29:31.974130 17472 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:29:31.974136 17472 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:29:31.974144 17472 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:29:31.974205 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:29:31.974366 17472 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:29:31.974380 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.974385 17472 net.cpp:165] Memory required for data: 738817500\nI0817 16:29:31.974393 17472 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:29:31.974402 17472 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:29:31.974408 17472 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:29:31.974414 17472 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:29:31.974426 17472 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:29:31.974459 17472 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:29:31.974468 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.974473 17472 net.cpp:165] Memory required for data: 747009500\nI0817 16:29:31.974478 17472 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:29:31.974488 17472 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:29:31.974494 17472 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:29:31.974501 17472 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:29:31.974517 17472 net.cpp:150] Setting up L1_b8_relu\nI0817 16:29:31.974525 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.974529 17472 net.cpp:165] Memory required for data: 755201500\nI0817 16:29:31.974534 17472 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:31.974541 17472 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:31.974546 17472 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:29:31.974553 17472 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:29:31.974563 17472 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:29:31.974613 17472 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:31.974625 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.974632 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.974637 17472 net.cpp:165] Memory required for data: 771585500\nI0817 16:29:31.974642 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:29:31.974654 17472 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:29:31.974661 17472 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:29:31.974670 17472 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:29:31.975033 17472 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:29:31.975049 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.975054 17472 net.cpp:165] Memory required for data: 779777500\nI0817 16:29:31.975062 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:29:31.975073 17472 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:29:31.975080 17472 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:29:31.975088 17472 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:29:31.975374 17472 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:29:31.975388 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.975392 17472 net.cpp:165] Memory required for data: 787969500\nI0817 16:29:31.975404 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:29:31.975414 17472 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:29:31.975421 17472 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:29:31.975428 17472 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:29:31.975489 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:29:31.975652 17472 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:29:31.975664 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.975669 17472 net.cpp:165] Memory required for data: 796161500\nI0817 16:29:31.975678 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:29:31.975687 17472 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:29:31.975693 17472 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:29:31.975704 17472 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:29:31.975714 17472 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:29:31.975721 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.975726 17472 net.cpp:165] Memory required for data: 804353500\nI0817 16:29:31.975731 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:29:31.975744 17472 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:29:31.975750 17472 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:29:31.975759 17472 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:29:31.976119 17472 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:29:31.976132 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.976137 17472 net.cpp:165] Memory required for data: 812545500\nI0817 16:29:31.976146 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:29:31.976157 17472 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:29:31.976164 17472 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:29:31.976179 17472 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:29:31.976452 17472 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:29:31.976465 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.976470 17472 net.cpp:165] Memory required for data: 820737500\nI0817 16:29:31.976506 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:29:31.976521 17472 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:29:31.976527 17472 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:29:31.976534 17472 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:29:31.976596 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:29:31.976755 17472 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:29:31.976768 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.976773 17472 net.cpp:165] Memory required for data: 828929500\nI0817 16:29:31.976781 17472 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:29:31.976790 17472 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:29:31.976796 17472 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:29:31.976804 17472 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:29:31.976810 17472 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:29:31.976847 17472 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:29:31.976856 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.976861 17472 net.cpp:165] Memory required for data: 837121500\nI0817 16:29:31.976866 17472 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:29:31.976873 17472 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:29:31.976879 17472 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:29:31.976889 17472 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:29:31.976898 17472 net.cpp:150] Setting up L1_b9_relu\nI0817 16:29:31.976905 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.976910 17472 net.cpp:165] Memory required for data: 845313500\nI0817 16:29:31.976914 17472 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:31.976922 17472 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:31.976927 17472 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:29:31.976936 17472 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:29:31.976946 17472 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:29:31.976994 17472 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:31.977005 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.977011 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:31.977016 17472 net.cpp:165] Memory required for data: 861697500\nI0817 16:29:31.977021 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:29:31.977035 17472 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:29:31.977041 17472 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:29:31.977049 17472 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:29:31.977419 17472 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:29:31.977433 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.977438 17472 net.cpp:165] Memory required for data: 863745500\nI0817 16:29:31.977447 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:29:31.977458 17472 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:29:31.977465 17472 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:29:31.977473 17472 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:29:31.977737 17472 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:29:31.977751 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.977756 17472 net.cpp:165] Memory required for data: 865793500\nI0817 16:29:31.977772 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:29:31.977782 17472 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:29:31.977787 17472 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:29:31.977794 17472 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.977855 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:29:31.978041 17472 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:29:31.978065 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.978070 17472 net.cpp:165] Memory required for data: 867841500\nI0817 16:29:31.978080 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:29:31.978087 17472 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:29:31.978093 17472 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:29:31.978101 17472 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:29:31.978116 17472 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:29:31.978126 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.978129 17472 net.cpp:165] Memory required for data: 869889500\nI0817 16:29:31.978134 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:29:31.978148 17472 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:29:31.978154 17472 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:29:31.978165 17472 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:29:31.978528 17472 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:29:31.978543 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.978548 17472 net.cpp:165] Memory required for data: 871937500\nI0817 16:29:31.978555 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:29:31.978567 17472 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:29:31.978574 17472 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:29:31.978585 17472 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:29:31.978849 17472 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:29:31.978863 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.978868 17472 net.cpp:165] Memory required for data: 873985500\nI0817 16:29:31.978878 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:29:31.978886 17472 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:29:31.978893 17472 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:29:31.978899 17472 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:29:31.978961 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:29:31.979127 17472 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:29:31.979141 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.979146 17472 net.cpp:165] Memory required for data: 876033500\nI0817 16:29:31.979156 17472 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:29:31.979168 17472 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:29:31.979176 17472 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:29:31.979183 17472 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:29:31.979218 17472 net.cpp:150] Setting up L2_b1_pool\nI0817 16:29:31.979226 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.979231 17472 net.cpp:165] Memory required for data: 878081500\nI0817 16:29:31.979236 17472 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:29:31.979244 17472 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:29:31.979250 17472 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:29:31.979256 17472 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:29:31.979266 17472 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:29:31.979300 17472 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:29:31.979311 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.979316 17472 net.cpp:165] Memory required for data: 880129500\nI0817 16:29:31.979321 17472 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:29:31.979331 17472 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:29:31.979344 17472 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:29:31.979351 17472 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:29:31.979362 17472 net.cpp:150] Setting up L2_b1_relu\nI0817 16:29:31.979368 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.979373 17472 net.cpp:165] Memory required for data: 882177500\nI0817 16:29:31.979377 17472 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:29:31.979387 17472 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:29:31.979393 17472 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:29:31.981652 17472 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:29:31.981669 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:31.981674 17472 net.cpp:165] Memory required for data: 884225500\nI0817 16:29:31.981680 17472 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:29:31.981690 17472 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:29:31.981696 17472 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:29:31.981704 17472 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:29:31.981715 17472 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:29:31.981758 17472 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:29:31.981773 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.981778 17472 net.cpp:165] Memory required for data: 888321500\nI0817 16:29:31.981784 17472 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:31.981791 17472 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:31.981797 17472 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:29:31.981806 17472 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:29:31.981818 17472 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:29:31.981869 17472 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:31.981880 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.981887 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.981891 17472 net.cpp:165] Memory required for data: 896513500\nI0817 16:29:31.981897 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:29:31.981911 17472 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:29:31.981917 17472 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:29:31.981930 17472 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:29:31.982442 17472 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:29:31.982456 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.982461 17472 net.cpp:165] Memory required for data: 900609500\nI0817 16:29:31.982470 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:29:31.982481 17472 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:29:31.982486 17472 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:29:31.982498 17472 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:29:31.982769 17472 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:29:31.982781 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.982786 17472 net.cpp:165] Memory required for data: 904705500\nI0817 16:29:31.982797 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:29:31.982808 17472 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:29:31.982815 17472 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:29:31.982822 17472 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.982882 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:29:31.983039 17472 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:29:31.983052 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.983057 17472 net.cpp:165] Memory required for data: 908801500\nI0817 16:29:31.983067 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:29:31.983078 17472 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:29:31.983091 17472 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:29:31.983099 17472 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:29:31.983115 17472 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:29:31.983126 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.983130 17472 net.cpp:165] Memory required for data: 912897500\nI0817 16:29:31.983135 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:29:31.983146 17472 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:29:31.983152 17472 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:29:31.983163 17472 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:29:31.983657 17472 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:29:31.983671 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.983676 17472 net.cpp:165] Memory required for data: 916993500\nI0817 16:29:31.983685 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:29:31.983695 17472 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:29:31.983701 17472 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:29:31.983716 17472 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:29:31.983978 17472 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:29:31.983991 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.983996 17472 net.cpp:165] Memory required for data: 921089500\nI0817 16:29:31.984007 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:29:31.984019 17472 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:29:31.984026 17472 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:29:31.984033 17472 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:29:31.984091 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:29:31.984256 17472 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:29:31.984269 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.984274 17472 net.cpp:165] Memory required for data: 925185500\nI0817 16:29:31.984283 17472 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:29:31.984295 17472 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:29:31.984302 17472 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:29:31.984309 17472 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:29:31.984318 17472 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:29:31.984349 17472 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:29:31.984357 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.984362 17472 net.cpp:165] Memory required for data: 929281500\nI0817 16:29:31.984367 17472 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:29:31.984375 17472 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:29:31.984381 17472 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:29:31.984390 17472 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:29:31.984400 17472 net.cpp:150] Setting up L2_b2_relu\nI0817 16:29:31.984407 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.984412 17472 net.cpp:165] Memory required for data: 933377500\nI0817 16:29:31.984416 17472 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:31.984423 17472 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:31.984429 17472 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:29:31.984436 17472 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:29:31.984447 17472 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:29:31.984495 17472 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:31.984508 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.984513 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.984519 17472 net.cpp:165] Memory required for data: 941569500\nI0817 16:29:31.984530 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:29:31.984542 17472 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:29:31.984549 17472 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:29:31.984560 17472 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:29:31.985052 17472 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:29:31.985066 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.985071 17472 net.cpp:165] Memory required for data: 945665500\nI0817 16:29:31.985080 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:29:31.985090 17472 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:29:31.985095 17472 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:29:31.985112 17472 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:29:31.985389 17472 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:29:31.985404 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.985409 17472 net.cpp:165] Memory required for data: 949761500\nI0817 16:29:31.985419 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:29:31.985430 17472 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:29:31.985436 17472 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:29:31.985443 17472 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.985501 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:29:31.985661 17472 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:29:31.985673 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.985678 17472 net.cpp:165] Memory required for data: 953857500\nI0817 16:29:31.985687 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:29:31.985698 17472 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:29:31.985705 17472 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:29:31.985713 17472 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:29:31.985723 17472 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:29:31.985729 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.985733 17472 net.cpp:165] Memory required for data: 957953500\nI0817 16:29:31.985738 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:29:31.985754 17472 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:29:31.985760 17472 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:29:31.985771 17472 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:29:31.986271 17472 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:29:31.986286 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.986291 17472 net.cpp:165] Memory required for data: 962049500\nI0817 16:29:31.986300 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:29:31.986310 17472 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:29:31.986315 17472 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:29:31.986326 17472 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:29:31.986598 17472 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:29:31.986610 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.986615 17472 net.cpp:165] Memory required for data: 966145500\nI0817 16:29:31.986625 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:29:31.986637 17472 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:29:31.986644 17472 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:29:31.986651 17472 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:29:31.986711 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:29:31.986866 17472 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:29:31.986879 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.986884 17472 net.cpp:165] Memory required for data: 970241500\nI0817 16:29:31.986893 17472 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:29:31.986902 17472 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:29:31.986915 17472 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:29:31.986927 17472 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:29:31.986934 17472 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:29:31.986963 17472 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:29:31.986981 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.986986 17472 net.cpp:165] Memory required for data: 974337500\nI0817 16:29:31.986991 17472 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:29:31.987011 17472 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:29:31.987018 17472 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:29:31.987025 17472 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:29:31.987035 17472 net.cpp:150] Setting up L2_b3_relu\nI0817 16:29:31.987042 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.987046 17472 net.cpp:165] Memory required for data: 978433500\nI0817 16:29:31.987051 17472 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:31.987058 17472 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:31.987064 17472 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:29:31.987074 17472 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:29:31.987085 17472 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:29:31.987140 17472 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:31.987152 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.987159 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.987164 17472 net.cpp:165] Memory required for data: 986625500\nI0817 16:29:31.987169 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:29:31.987184 17472 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:29:31.987190 17472 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:29:31.987200 17472 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:29:31.987695 17472 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:29:31.987709 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.987715 17472 net.cpp:165] Memory required for data: 990721500\nI0817 16:29:31.987723 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:29:31.987736 17472 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:29:31.987743 17472 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:29:31.987751 17472 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:29:31.988024 17472 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:29:31.988037 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.988041 17472 net.cpp:165] Memory required for data: 994817500\nI0817 16:29:31.988052 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:29:31.988061 17472 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:29:31.988067 17472 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:29:31.988078 17472 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.988143 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:29:31.988309 17472 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:29:31.988322 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.988327 17472 net.cpp:165] Memory required for data: 998913500\nI0817 16:29:31.988337 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:29:31.988344 17472 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:29:31.988350 17472 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:29:31.988360 17472 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:29:31.988371 17472 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:29:31.988379 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.988382 17472 net.cpp:165] Memory required for data: 1003009500\nI0817 16:29:31.988395 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:29:31.988410 17472 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:29:31.988416 17472 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:29:31.988425 17472 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:29:31.988919 17472 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:29:31.988934 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.988939 17472 net.cpp:165] Memory required for data: 1007105500\nI0817 16:29:31.988947 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:29:31.988960 17472 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:29:31.988966 17472 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:29:31.988975 17472 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:29:31.989250 17472 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:29:31.989264 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.989269 17472 net.cpp:165] Memory required for data: 1011201500\nI0817 16:29:31.989279 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:29:31.989287 17472 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:29:31.989293 17472 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:29:31.989301 17472 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:29:31.989362 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:29:31.989524 17472 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:29:31.989540 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.989545 17472 net.cpp:165] Memory required for data: 1015297500\nI0817 16:29:31.989554 17472 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:29:31.989563 17472 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:29:31.989569 17472 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:29:31.989576 17472 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:29:31.989583 17472 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:29:31.989615 17472 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:29:31.989624 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.989629 17472 net.cpp:165] Memory required for data: 1019393500\nI0817 16:29:31.989634 17472 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:29:31.989641 17472 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:29:31.989647 17472 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:29:31.989658 17472 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:29:31.989667 17472 net.cpp:150] Setting up L2_b4_relu\nI0817 16:29:31.989675 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.989679 17472 net.cpp:165] Memory required for data: 1023489500\nI0817 16:29:31.989683 17472 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:31.989691 17472 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:31.989696 17472 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:29:31.989706 17472 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:29:31.989717 17472 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:29:31.989764 17472 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:31.989775 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.989781 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.989786 17472 net.cpp:165] Memory required for data: 1031681500\nI0817 16:29:31.989791 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:29:31.989806 17472 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:29:31.989812 17472 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:29:31.989821 17472 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:29:31.990337 17472 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:29:31.990351 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.990356 17472 net.cpp:165] Memory required for data: 1035777500\nI0817 16:29:31.990365 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:29:31.990377 17472 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:29:31.990384 17472 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:29:31.990392 17472 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:29:31.990658 17472 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:29:31.990670 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.990675 17472 net.cpp:165] Memory required for data: 1039873500\nI0817 16:29:31.990685 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:29:31.990694 17472 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:29:31.990700 17472 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:29:31.990710 17472 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.990773 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:29:31.990936 17472 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:29:31.990948 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.990953 17472 net.cpp:165] Memory required for data: 1043969500\nI0817 16:29:31.990962 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:29:31.990970 17472 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:29:31.990977 17472 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:29:31.990983 17472 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:29:31.990993 17472 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:29:31.991000 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.991004 17472 net.cpp:165] Memory required for data: 1048065500\nI0817 16:29:31.991009 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:29:31.991022 17472 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:29:31.991029 17472 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:29:31.991039 17472 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:29:31.991539 17472 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:29:31.991554 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.991559 17472 net.cpp:165] Memory required for data: 1052161500\nI0817 16:29:31.991567 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:29:31.991580 17472 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:29:31.991586 17472 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:29:31.991597 17472 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:29:31.991894 17472 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:29:31.991909 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.991914 17472 net.cpp:165] Memory required for data: 1056257500\nI0817 16:29:31.991925 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:29:31.991933 17472 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:29:31.991940 17472 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:29:31.991947 17472 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:29:31.992008 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:29:31.992204 17472 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:29:31.992221 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.992226 17472 net.cpp:165] Memory required for data: 1060353500\nI0817 16:29:31.992235 17472 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:29:31.992244 17472 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:29:31.992251 17472 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:29:31.992259 17472 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:29:31.992265 17472 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:29:31.992296 17472 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:29:31.992312 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.992317 17472 net.cpp:165] Memory required for data: 1064449500\nI0817 16:29:31.992322 17472 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:29:31.992331 17472 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:29:31.992336 17472 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:29:31.992347 17472 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:29:31.992357 17472 net.cpp:150] Setting up L2_b5_relu\nI0817 16:29:31.992362 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.992367 17472 net.cpp:165] Memory required for data: 1068545500\nI0817 16:29:31.992372 17472 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:31.992379 17472 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:31.992384 17472 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:29:31.992394 17472 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:29:31.992404 17472 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:29:31.992453 17472 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:31.992465 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.992470 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.992475 17472 net.cpp:165] Memory required for data: 1076737500\nI0817 16:29:31.992480 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:29:31.992497 17472 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:29:31.992504 17472 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:29:31.992513 17472 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:29:31.993011 17472 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:29:31.993026 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.993031 17472 net.cpp:165] Memory required for data: 1080833500\nI0817 16:29:31.993039 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:29:31.993052 17472 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:29:31.993058 17472 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:29:31.993067 17472 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:29:31.993336 17472 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:29:31.993350 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.993355 17472 net.cpp:165] Memory required for data: 1084929500\nI0817 16:29:31.993366 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:29:31.993374 17472 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:29:31.993381 17472 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:29:31.993387 17472 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.993448 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:29:31.993605 17472 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:29:31.993620 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.993625 17472 net.cpp:165] Memory required for data: 1089025500\nI0817 16:29:31.993634 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:29:31.993643 17472 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:29:31.993649 17472 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:29:31.993655 17472 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:29:31.993665 17472 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:29:31.993672 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.993676 17472 net.cpp:165] Memory required for data: 1093121500\nI0817 16:29:31.993681 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:29:31.993695 17472 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:29:31.993701 17472 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:29:31.993712 17472 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:29:31.994220 17472 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:29:31.994235 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.994240 17472 net.cpp:165] Memory required for data: 1097217500\nI0817 16:29:31.994249 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:29:31.994261 17472 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:29:31.994268 17472 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:29:31.994279 17472 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:29:31.994549 17472 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:29:31.994562 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.994567 17472 net.cpp:165] Memory required for data: 1101313500\nI0817 16:29:31.994577 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:29:31.994586 17472 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:29:31.994592 17472 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:29:31.994601 17472 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:29:31.994659 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:29:31.994815 17472 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:29:31.994828 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.994832 17472 net.cpp:165] Memory required for data: 1105409500\nI0817 16:29:31.994841 17472 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:29:31.994853 17472 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:29:31.994860 17472 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:29:31.994868 17472 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:29:31.994874 17472 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:29:31.994902 17472 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:29:31.994911 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.994916 17472 net.cpp:165] Memory required for data: 1109505500\nI0817 16:29:31.994921 17472 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:29:31.994935 17472 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:29:31.994940 17472 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:29:31.994947 17472 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:29:31.994956 17472 net.cpp:150] Setting up L2_b6_relu\nI0817 16:29:31.994963 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.994967 17472 net.cpp:165] Memory required for data: 1113601500\nI0817 16:29:31.994972 17472 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:31.994979 17472 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:31.994985 17472 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:29:31.994992 17472 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:29:31.995002 17472 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:29:31.995052 17472 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:31.995064 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.995070 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.995075 17472 net.cpp:165] Memory required for data: 1121793500\nI0817 16:29:31.995080 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:29:31.995095 17472 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:29:31.995100 17472 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:29:31.995115 17472 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:29:31.996620 17472 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:29:31.996637 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.996644 17472 net.cpp:165] Memory required for data: 1125889500\nI0817 16:29:31.996652 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:29:31.996672 17472 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:29:31.996680 17472 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:29:31.996688 17472 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:29:31.996965 17472 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:29:31.996979 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.996984 17472 net.cpp:165] Memory required for data: 1129985500\nI0817 16:29:31.996994 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:29:31.997006 17472 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:29:31.997014 17472 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:29:31.997020 17472 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.997081 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:29:31.997251 17472 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:29:31.997264 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.997269 17472 net.cpp:165] Memory required for data: 1134081500\nI0817 16:29:31.997279 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:29:31.997290 17472 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:29:31.997297 17472 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:29:31.997304 17472 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:29:31.997318 17472 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:29:31.997324 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.997328 17472 net.cpp:165] Memory required for data: 1138177500\nI0817 16:29:31.997334 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:29:31.997344 17472 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:29:31.997350 17472 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:29:31.997361 17472 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:29:31.997846 17472 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:29:31.997860 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.997865 17472 net.cpp:165] Memory required for data: 1142273500\nI0817 16:29:31.997874 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:29:31.997884 17472 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:29:31.997889 17472 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:29:31.997900 17472 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:29:31.998179 17472 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:29:31.998193 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.998198 17472 net.cpp:165] Memory required for data: 1146369500\nI0817 16:29:31.998208 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:29:31.998220 17472 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:29:31.998226 17472 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:29:31.998234 17472 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:29:31.998292 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:29:31.998453 17472 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:29:31.998466 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.998471 17472 net.cpp:165] Memory required for data: 1150465500\nI0817 16:29:31.998481 17472 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:29:31.998492 17472 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:29:31.998498 17472 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:29:31.998505 17472 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:29:31.998513 17472 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:29:31.998544 17472 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:29:31.998554 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.998558 17472 net.cpp:165] Memory required for data: 1154561500\nI0817 16:29:31.998564 17472 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:29:31.998571 17472 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:29:31.998584 17472 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:29:31.998594 17472 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:29:31.998605 17472 net.cpp:150] Setting up L2_b7_relu\nI0817 16:29:31.998611 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.998615 17472 net.cpp:165] Memory required for data: 1158657500\nI0817 16:29:31.998620 17472 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:31.998627 17472 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:31.998632 17472 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:29:31.998639 17472 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:29:31.998648 17472 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:29:31.998702 17472 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:31.998713 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.998720 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.998724 17472 net.cpp:165] Memory required for data: 1166849500\nI0817 16:29:31.998729 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:29:31.998740 17472 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:29:31.998747 17472 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:29:31.998759 17472 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:29:31.999263 17472 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:29:31.999276 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.999281 17472 net.cpp:165] Memory required for data: 1170945500\nI0817 16:29:31.999290 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:29:31.999300 17472 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:29:31.999306 17472 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:29:31.999317 17472 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:29:31.999589 17472 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:29:31.999603 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.999608 17472 net.cpp:165] Memory required for data: 1175041500\nI0817 16:29:31.999617 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:29:31.999629 17472 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:29:31.999635 17472 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:29:31.999644 17472 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.999703 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:29:31.999861 17472 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:29:31.999874 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.999879 17472 net.cpp:165] Memory required for data: 1179137500\nI0817 16:29:31.999888 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:29:31.999899 17472 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:29:31.999907 17472 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:29:31.999919 17472 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:29:31.999929 17472 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:29:31.999936 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:31.999940 17472 net.cpp:165] Memory required for data: 1183233500\nI0817 16:29:31.999945 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:29:31.999959 17472 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:29:31.999965 17472 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:29:31.999976 17472 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:29:32.000474 17472 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:29:32.000489 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.000494 17472 net.cpp:165] Memory required for data: 1187329500\nI0817 16:29:32.000504 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:29:32.000519 17472 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:29:32.000526 17472 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:29:32.000537 17472 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:29:32.000813 17472 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:29:32.000825 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.000830 17472 net.cpp:165] Memory required for data: 1191425500\nI0817 16:29:32.000841 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:29:32.000852 17472 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:29:32.000859 17472 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:29:32.000867 17472 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:29:32.000926 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:29:32.001087 17472 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:29:32.001101 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.001106 17472 net.cpp:165] Memory required for data: 1195521500\nI0817 16:29:32.001121 17472 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:29:32.001134 17472 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:29:32.001140 17472 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:29:32.001147 17472 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:29:32.001155 17472 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:29:32.001184 17472 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:29:32.001199 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.001204 17472 net.cpp:165] Memory required for data: 1199617500\nI0817 16:29:32.001209 17472 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:29:32.001216 17472 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:29:32.001222 17472 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:29:32.001230 17472 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:29:32.001238 17472 net.cpp:150] Setting up L2_b8_relu\nI0817 16:29:32.001245 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.001250 17472 net.cpp:165] Memory required for data: 1203713500\nI0817 16:29:32.001255 17472 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:32.001265 17472 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:32.001271 17472 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:29:32.001277 17472 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:29:32.001301 17472 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:29:32.001354 17472 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:32.001369 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.001376 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.001381 17472 net.cpp:165] Memory required for data: 1211905500\nI0817 16:29:32.001386 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:29:32.001397 17472 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:29:32.001404 17472 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:29:32.001412 17472 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:29:32.001909 17472 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:29:32.001926 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.001931 17472 net.cpp:165] Memory required for data: 1216001500\nI0817 16:29:32.001940 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:29:32.001950 17472 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:29:32.001956 17472 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:29:32.001967 17472 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:29:32.002266 17472 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:29:32.002290 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.002300 17472 net.cpp:165] Memory required for data: 1220097500\nI0817 16:29:32.002321 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:29:32.002337 17472 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:29:32.002349 17472 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:29:32.002370 17472 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:29:32.002452 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:29:32.002635 17472 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:29:32.002650 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.002655 17472 net.cpp:165] Memory required for data: 1224193500\nI0817 16:29:32.002665 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:29:32.002672 17472 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:29:32.002679 17472 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:29:32.002689 17472 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:29:32.002701 17472 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:29:32.002707 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.002712 17472 net.cpp:165] Memory required for data: 1228289500\nI0817 16:29:32.002717 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:29:32.002730 17472 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:29:32.002737 17472 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:29:32.002745 17472 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:29:32.004247 17472 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:29:32.004266 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.004271 17472 net.cpp:165] Memory required for data: 1232385500\nI0817 16:29:32.004281 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:29:32.004290 17472 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:29:32.004297 17472 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:29:32.004309 17472 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:29:32.004582 17472 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:29:32.004595 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.004601 17472 net.cpp:165] Memory required for data: 1236481500\nI0817 16:29:32.004650 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:29:32.004663 17472 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:29:32.004669 17472 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:29:32.004678 17472 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:29:32.004740 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:29:32.004896 17472 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:29:32.004911 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.004916 17472 net.cpp:165] Memory required for data: 1240577500\nI0817 16:29:32.004925 17472 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:29:32.004935 17472 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:29:32.004942 17472 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:29:32.004950 17472 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:29:32.004959 17472 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:29:32.004987 17472 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:29:32.004997 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.005002 17472 net.cpp:165] Memory required for data: 1244673500\nI0817 16:29:32.005007 17472 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:29:32.005014 17472 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:29:32.005020 17472 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:29:32.005030 17472 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:29:32.005040 17472 net.cpp:150] Setting up L2_b9_relu\nI0817 16:29:32.005048 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.005059 17472 net.cpp:165] Memory required for data: 1248769500\nI0817 16:29:32.005065 17472 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:32.005072 17472 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:32.005077 17472 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:29:32.005087 17472 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:29:32.005098 17472 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:29:32.005152 17472 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:32.005163 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.005170 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:32.005174 17472 net.cpp:165] Memory required for data: 1256961500\nI0817 16:29:32.005179 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:29:32.005201 17472 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:29:32.005208 17472 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:29:32.005218 17472 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:29:32.005714 17472 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:29:32.005728 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.005733 17472 net.cpp:165] Memory required for data: 1257985500\nI0817 16:29:32.005743 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:29:32.005754 17472 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:29:32.005761 17472 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:29:32.005769 17472 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:29:32.006045 17472 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:29:32.006058 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.006063 17472 net.cpp:165] Memory required for data: 1259009500\nI0817 16:29:32.006074 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:29:32.006086 17472 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:29:32.006093 17472 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:29:32.006100 17472 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:29:32.006400 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:29:32.006568 17472 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:29:32.006582 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.006587 17472 net.cpp:165] Memory required for data: 1260033500\nI0817 16:29:32.006597 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:29:32.006608 17472 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:29:32.006614 17472 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:29:32.006621 17472 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:29:32.006634 17472 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:29:32.006641 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.006646 17472 net.cpp:165] Memory required for data: 1261057500\nI0817 16:29:32.006651 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:29:32.006662 17472 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:29:32.006669 17472 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:29:32.006680 17472 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:29:32.007179 17472 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:29:32.007194 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.007199 17472 net.cpp:165] Memory required for data: 1262081500\nI0817 16:29:32.007207 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:29:32.007217 17472 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:29:32.007223 17472 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:29:32.007235 17472 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:29:32.007513 17472 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:29:32.007534 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.007539 17472 net.cpp:165] Memory required for data: 1263105500\nI0817 16:29:32.007550 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:29:32.007558 17472 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:29:32.007565 17472 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:29:32.007571 17472 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:29:32.007632 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:29:32.007796 17472 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:29:32.007812 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.007817 17472 net.cpp:165] Memory required for data: 1264129500\nI0817 16:29:32.007825 17472 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:29:32.007835 17472 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:29:32.007841 17472 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:29:32.007850 17472 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:29:32.007887 17472 net.cpp:150] Setting up L3_b1_pool\nI0817 16:29:32.007897 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.007901 17472 net.cpp:165] Memory required for data: 1265153500\nI0817 16:29:32.007906 17472 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:29:32.007915 17472 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:29:32.007921 17472 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:29:32.007928 17472 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:29:32.007936 17472 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:29:32.007977 17472 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:29:32.007988 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.007993 17472 net.cpp:165] Memory required for data: 1266177500\nI0817 16:29:32.007998 17472 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:29:32.008007 17472 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:29:32.008011 17472 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:29:32.008018 17472 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:29:32.008028 17472 net.cpp:150] Setting up L3_b1_relu\nI0817 16:29:32.008034 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.008039 17472 net.cpp:165] Memory required for data: 1267201500\nI0817 16:29:32.008044 17472 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:29:32.008054 17472 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:29:32.008064 17472 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:29:32.009315 17472 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:29:32.009332 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:32.009337 17472 net.cpp:165] Memory required for data: 1268225500\nI0817 16:29:32.009343 17472 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:29:32.009356 17472 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:29:32.009362 17472 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:29:32.009369 17472 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:29:32.009377 17472 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:29:32.009423 17472 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:29:32.009434 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.009439 17472 net.cpp:165] Memory required for data: 1270273500\nI0817 16:29:32.009444 17472 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:32.009452 17472 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:32.009459 17472 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:29:32.009469 17472 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:29:32.009479 17472 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:29:32.009532 17472 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:32.009547 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.009562 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.009567 17472 net.cpp:165] Memory required for data: 1274369500\nI0817 16:29:32.009572 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:29:32.009587 17472 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:29:32.009593 17472 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:29:32.009603 17472 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:29:32.010656 17472 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:29:32.010671 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.010676 17472 net.cpp:165] Memory required for data: 1276417500\nI0817 16:29:32.010685 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:29:32.010697 17472 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:29:32.010704 17472 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:29:32.010712 17472 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:29:32.010987 17472 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:29:32.011000 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.011005 17472 net.cpp:165] Memory required for data: 1278465500\nI0817 16:29:32.011016 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:29:32.011025 17472 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:29:32.011031 17472 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:29:32.011039 17472 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:29:32.011102 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:29:32.011274 17472 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:29:32.011288 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.011293 17472 net.cpp:165] Memory required for data: 1280513500\nI0817 16:29:32.011302 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:29:32.011312 17472 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:29:32.011317 17472 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:29:32.011327 17472 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:29:32.011338 17472 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:29:32.011345 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.011349 17472 net.cpp:165] Memory required for data: 1282561500\nI0817 16:29:32.011354 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:29:32.011368 17472 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:29:32.011374 17472 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:29:32.011384 17472 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:29:32.012428 17472 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:29:32.012444 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.012449 17472 net.cpp:165] Memory required for data: 1284609500\nI0817 16:29:32.012457 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:29:32.012470 17472 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:29:32.012476 17472 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:29:32.012485 17472 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:29:32.012759 17472 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:29:32.012773 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.012778 17472 net.cpp:165] Memory required for data: 1286657500\nI0817 16:29:32.012787 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:29:32.012799 17472 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:29:32.012805 17472 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:29:32.012814 17472 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:29:32.012876 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:29:32.013036 17472 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:29:32.013048 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.013053 17472 net.cpp:165] Memory required for data: 1288705500\nI0817 16:29:32.013069 17472 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:29:32.013082 17472 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:29:32.013088 17472 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:29:32.013095 17472 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:29:32.013105 17472 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:29:32.013147 17472 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:29:32.013156 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.013161 17472 net.cpp:165] Memory required for data: 1290753500\nI0817 16:29:32.013166 17472 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:29:32.013178 17472 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:29:32.013185 17472 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:29:32.013191 17472 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:29:32.013201 17472 net.cpp:150] Setting up L3_b2_relu\nI0817 16:29:32.013208 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.013212 17472 net.cpp:165] Memory required for data: 1292801500\nI0817 16:29:32.013217 17472 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:32.013224 17472 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:32.013229 17472 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:29:32.013237 17472 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:29:32.013247 17472 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:29:32.013296 17472 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:32.013309 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.013315 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.013319 17472 net.cpp:165] Memory required for data: 1296897500\nI0817 16:29:32.013324 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:29:32.013339 17472 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:29:32.013345 17472 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:29:32.013355 17472 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:29:32.014395 17472 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:29:32.014410 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.014415 17472 net.cpp:165] Memory required for data: 1298945500\nI0817 16:29:32.014423 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:29:32.014436 17472 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:29:32.014442 17472 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:29:32.014453 17472 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:29:32.014719 17472 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:29:32.014732 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.014737 17472 net.cpp:165] Memory required for data: 1300993500\nI0817 16:29:32.014747 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:29:32.014756 17472 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:29:32.014762 17472 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:29:32.014770 17472 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:29:32.014830 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:29:32.014989 17472 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:29:32.015002 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.015007 17472 net.cpp:165] Memory required for data: 1303041500\nI0817 16:29:32.015017 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:29:32.015024 17472 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:29:32.015030 17472 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:29:32.015040 17472 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:29:32.015051 17472 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:29:32.015064 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.015069 17472 net.cpp:165] Memory required for data: 1305089500\nI0817 16:29:32.015074 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:29:32.015089 17472 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:29:32.015096 17472 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:29:32.015105 17472 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:29:32.016150 17472 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:29:32.016165 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.016170 17472 net.cpp:165] Memory required for data: 1307137500\nI0817 16:29:32.016180 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:29:32.016191 17472 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:29:32.016198 17472 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:29:32.016206 17472 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:29:32.016479 17472 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:29:32.016492 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.016497 17472 net.cpp:165] Memory required for data: 1309185500\nI0817 16:29:32.016507 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:29:32.016520 17472 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:29:32.016525 17472 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:29:32.016533 17472 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:29:32.016597 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:29:32.016757 17472 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:29:32.016770 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.016774 17472 net.cpp:165] Memory required for data: 1311233500\nI0817 16:29:32.016784 17472 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:29:32.016796 17472 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:29:32.016803 17472 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:29:32.016809 17472 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:29:32.016819 17472 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:29:32.016854 17472 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:29:32.016865 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.016870 17472 net.cpp:165] Memory required for data: 1313281500\nI0817 16:29:32.016875 17472 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:29:32.016885 17472 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:29:32.016891 17472 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:29:32.016898 17472 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:29:32.016908 17472 net.cpp:150] Setting up L3_b3_relu\nI0817 16:29:32.016916 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.016919 17472 net.cpp:165] Memory required for data: 1315329500\nI0817 16:29:32.016924 17472 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:32.016932 17472 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:32.016937 17472 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:29:32.016943 17472 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:29:32.016953 17472 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:29:32.017004 17472 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:32.017015 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.017021 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.017025 17472 net.cpp:165] Memory required for data: 1319425500\nI0817 16:29:32.017031 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:29:32.017045 17472 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:29:32.017051 17472 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:29:32.017067 17472 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:29:32.018116 17472 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:29:32.018131 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.018136 17472 net.cpp:165] Memory required for data: 1321473500\nI0817 16:29:32.018144 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:29:32.018157 17472 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:29:32.018164 17472 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:29:32.018175 17472 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:29:32.018443 17472 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:29:32.018456 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.018461 17472 net.cpp:165] Memory required for data: 1323521500\nI0817 16:29:32.018471 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:29:32.018481 17472 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:29:32.018486 17472 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:29:32.018497 17472 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:29:32.018556 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:29:32.018723 17472 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:29:32.018736 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.018740 17472 net.cpp:165] Memory required for data: 1325569500\nI0817 16:29:32.018749 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:29:32.018759 17472 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:29:32.018764 17472 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:29:32.018774 17472 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:29:32.018785 17472 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:29:32.018792 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.018796 17472 net.cpp:165] Memory required for data: 1327617500\nI0817 16:29:32.018801 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:29:32.018815 17472 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:29:32.018821 17472 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:29:32.018831 17472 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:29:32.020853 17472 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:29:32.020871 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.020876 17472 net.cpp:165] Memory required for data: 1329665500\nI0817 16:29:32.020885 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:29:32.020900 17472 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:29:32.020906 17472 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:29:32.020917 17472 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:29:32.021198 17472 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:29:32.021212 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.021217 17472 net.cpp:165] Memory required for data: 1331713500\nI0817 16:29:32.021229 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:29:32.021237 17472 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:29:32.021245 17472 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:29:32.021255 17472 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:29:32.021315 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:29:32.021482 17472 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:29:32.021495 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.021500 17472 net.cpp:165] Memory required for data: 1333761500\nI0817 16:29:32.021509 17472 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:29:32.021519 17472 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:29:32.021525 17472 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:29:32.021533 17472 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:29:32.021544 17472 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:29:32.021589 17472 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:29:32.021600 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.021603 17472 net.cpp:165] Memory required for data: 1335809500\nI0817 16:29:32.021608 17472 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:29:32.021616 17472 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:29:32.021622 17472 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:29:32.021630 17472 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:29:32.021639 17472 net.cpp:150] Setting up L3_b4_relu\nI0817 16:29:32.021646 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.021651 17472 net.cpp:165] Memory required for data: 1337857500\nI0817 16:29:32.021656 17472 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:32.021664 17472 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:32.021670 17472 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:29:32.021678 17472 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:29:32.021688 17472 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:29:32.021739 17472 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:32.021750 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.021757 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.021761 17472 net.cpp:165] Memory required for data: 1341953500\nI0817 16:29:32.021766 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:29:32.021778 17472 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:29:32.021785 17472 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:29:32.021796 17472 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:29:32.022831 17472 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:29:32.022846 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.022851 17472 net.cpp:165] Memory required for data: 1344001500\nI0817 16:29:32.022861 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:29:32.022871 17472 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:29:32.022877 17472 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:29:32.022889 17472 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:29:32.023171 17472 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:29:32.023187 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.023192 17472 net.cpp:165] Memory required for data: 1346049500\nI0817 16:29:32.023203 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:29:32.023212 17472 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:29:32.023218 17472 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:29:32.023226 17472 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:29:32.023285 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:29:32.023447 17472 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:29:32.023458 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.023463 17472 net.cpp:165] Memory required for data: 1348097500\nI0817 16:29:32.023473 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:29:32.023484 17472 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:29:32.023491 17472 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:29:32.023499 17472 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:29:32.023509 17472 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:29:32.023515 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.023519 17472 net.cpp:165] Memory required for data: 1350145500\nI0817 16:29:32.023525 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:29:32.023538 17472 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:29:32.023545 17472 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:29:32.023561 17472 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:29:32.024590 17472 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:29:32.024605 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.024610 17472 net.cpp:165] Memory required for data: 1352193500\nI0817 16:29:32.024617 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:29:32.024629 17472 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:29:32.024636 17472 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:29:32.024647 17472 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:29:32.024912 17472 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:29:32.024925 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.024930 17472 net.cpp:165] Memory required for data: 1354241500\nI0817 16:29:32.024940 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:29:32.024950 17472 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:29:32.024955 17472 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:29:32.024967 17472 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:29:32.025027 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:29:32.025195 17472 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:29:32.025208 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.025213 17472 net.cpp:165] Memory required for data: 1356289500\nI0817 16:29:32.025223 17472 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:29:32.025233 17472 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:29:32.025238 17472 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:29:32.025245 17472 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:29:32.025256 17472 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:29:32.025293 17472 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:29:32.025305 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.025310 17472 net.cpp:165] Memory required for data: 1358337500\nI0817 16:29:32.025315 17472 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:29:32.025323 17472 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:29:32.025329 17472 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:29:32.025338 17472 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:29:32.025348 17472 net.cpp:150] Setting up L3_b5_relu\nI0817 16:29:32.025355 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.025360 17472 net.cpp:165] Memory required for data: 1360385500\nI0817 16:29:32.025364 17472 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:32.025372 17472 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:32.025377 17472 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:29:32.025385 17472 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:29:32.025394 17472 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:29:32.025444 17472 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:32.025454 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.025460 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.025465 17472 net.cpp:165] Memory required for data: 1364481500\nI0817 16:29:32.025470 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:29:32.025481 17472 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:29:32.025488 17472 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:29:32.025499 17472 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:29:32.026527 17472 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:29:32.026542 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.026547 17472 net.cpp:165] Memory required for data: 1366529500\nI0817 16:29:32.026563 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:29:32.026572 17472 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:29:32.026579 17472 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:29:32.026590 17472 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:29:32.026866 17472 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:29:32.026880 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.026885 17472 net.cpp:165] Memory required for data: 1368577500\nI0817 16:29:32.026895 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:29:32.026904 17472 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:29:32.026911 17472 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:29:32.026918 17472 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:29:32.026979 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:29:32.027145 17472 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:29:32.027159 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.027164 17472 net.cpp:165] Memory required for data: 1370625500\nI0817 16:29:32.027173 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:29:32.027184 17472 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:29:32.027191 17472 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:29:32.027199 17472 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:29:32.027209 17472 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:29:32.027215 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.027220 17472 net.cpp:165] Memory required for data: 1372673500\nI0817 16:29:32.027225 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:29:32.027238 17472 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:29:32.027245 17472 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:29:32.027253 17472 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:29:32.028278 17472 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:29:32.028293 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.028298 17472 net.cpp:165] Memory required for data: 1374721500\nI0817 16:29:32.028306 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:29:32.028321 17472 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:29:32.028328 17472 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:29:32.028339 17472 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:29:32.028609 17472 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:29:32.028622 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.028627 17472 net.cpp:165] Memory required for data: 1376769500\nI0817 16:29:32.028637 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:29:32.028646 17472 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:29:32.028652 17472 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:29:32.028663 17472 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:29:32.028723 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:29:32.028885 17472 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:29:32.028898 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.028903 17472 net.cpp:165] Memory required for data: 1378817500\nI0817 16:29:32.028913 17472 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:29:32.028921 17472 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:29:32.028928 17472 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:29:32.028940 17472 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:29:32.028950 17472 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:29:32.029003 17472 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:29:32.029024 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.029034 17472 net.cpp:165] Memory required for data: 1380865500\nI0817 16:29:32.029042 17472 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:29:32.029054 17472 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:29:32.029072 17472 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:29:32.029091 17472 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:29:32.029119 17472 net.cpp:150] Setting up L3_b6_relu\nI0817 16:29:32.029136 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.029145 17472 net.cpp:165] Memory required for data: 1382913500\nI0817 16:29:32.029153 17472 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:32.029165 17472 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:32.029175 17472 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:29:32.029187 17472 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:29:32.029198 17472 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:29:32.029255 17472 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:32.029268 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.029275 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.029279 17472 net.cpp:165] Memory required for data: 1387009500\nI0817 16:29:32.029284 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:29:32.029297 17472 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:29:32.029304 17472 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:29:32.029315 17472 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:29:32.030351 17472 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:29:32.030366 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.030371 17472 net.cpp:165] Memory required for data: 1389057500\nI0817 16:29:32.030380 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:29:32.030390 17472 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:29:32.030396 17472 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:29:32.030407 17472 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:29:32.030684 17472 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:29:32.030697 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.030702 17472 net.cpp:165] Memory required for data: 1391105500\nI0817 16:29:32.030714 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:29:32.030721 17472 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:29:32.030728 17472 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:29:32.030735 17472 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:29:32.030797 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:29:32.030958 17472 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:29:32.030973 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.030978 17472 net.cpp:165] Memory required for data: 1393153500\nI0817 16:29:32.030988 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:29:32.031023 17472 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:29:32.031031 17472 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:29:32.031040 17472 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:29:32.031050 17472 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:29:32.031057 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.031062 17472 net.cpp:165] Memory required for data: 1395201500\nI0817 16:29:32.031067 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:29:32.031081 17472 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:29:32.031088 17472 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:29:32.031096 17472 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:29:32.032167 17472 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:29:32.032182 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.032188 17472 net.cpp:165] Memory required for data: 1397249500\nI0817 16:29:32.032197 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:29:32.032213 17472 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:29:32.032222 17472 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:29:32.032232 17472 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:29:32.032502 17472 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:29:32.032518 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.032523 17472 net.cpp:165] Memory required for data: 1399297500\nI0817 16:29:32.032533 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:29:32.032542 17472 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:29:32.032548 17472 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:29:32.032557 17472 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:29:32.032618 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:29:32.032780 17472 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:29:32.032793 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.032799 17472 net.cpp:165] Memory required for data: 1401345500\nI0817 16:29:32.032807 17472 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:29:32.032819 17472 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:29:32.032826 17472 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:29:32.032833 17472 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:29:32.032840 17472 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:29:32.032877 17472 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:29:32.032888 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.032893 17472 net.cpp:165] Memory required for data: 1403393500\nI0817 16:29:32.032898 17472 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:29:32.032907 17472 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:29:32.032912 17472 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:29:32.032919 17472 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:29:32.032929 17472 net.cpp:150] Setting up L3_b7_relu\nI0817 16:29:32.032935 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.032940 17472 net.cpp:165] Memory required for data: 1405441500\nI0817 16:29:32.032944 17472 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:32.032951 17472 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:32.032956 17472 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:29:32.032968 17472 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:29:32.032977 17472 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:29:32.033025 17472 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:32.033036 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.033042 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.033047 17472 net.cpp:165] Memory required for data: 1409537500\nI0817 16:29:32.033052 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:29:32.033066 17472 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:29:32.033073 17472 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:29:32.033083 17472 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:29:32.035125 17472 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:29:32.035143 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.035148 17472 net.cpp:165] Memory required for data: 1411585500\nI0817 16:29:32.035158 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:29:32.035171 17472 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:29:32.035178 17472 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:29:32.035187 17472 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:29:32.035465 17472 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:29:32.035486 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.035492 17472 net.cpp:165] Memory required for data: 1413633500\nI0817 16:29:32.035503 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:29:32.035512 17472 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:29:32.035519 17472 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:29:32.035526 17472 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:29:32.035591 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:29:32.035758 17472 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:29:32.035771 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.035776 17472 net.cpp:165] Memory required for data: 1415681500\nI0817 16:29:32.035785 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:29:32.035794 17472 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:29:32.035800 17472 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:29:32.035807 17472 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:29:32.035817 17472 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:29:32.035825 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.035828 17472 net.cpp:165] Memory required for data: 1417729500\nI0817 16:29:32.035833 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:29:32.035847 17472 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:29:32.035854 17472 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:29:32.035866 17472 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:29:32.036905 17472 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:29:32.036919 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.036924 17472 net.cpp:165] Memory required for data: 1419777500\nI0817 16:29:32.036933 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:29:32.036945 17472 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:29:32.036952 17472 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:29:32.036960 17472 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:29:32.037245 17472 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:29:32.037257 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.037263 17472 net.cpp:165] Memory required for data: 1421825500\nI0817 16:29:32.037273 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:29:32.037286 17472 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:29:32.037292 17472 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:29:32.037299 17472 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:29:32.037362 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:29:32.037526 17472 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:29:32.037539 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.037544 17472 net.cpp:165] Memory required for data: 1423873500\nI0817 16:29:32.037552 17472 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:29:32.037565 17472 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:29:32.037572 17472 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:29:32.037580 17472 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:29:32.037590 17472 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:29:32.037624 17472 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:29:32.037636 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.037639 17472 net.cpp:165] Memory required for data: 1425921500\nI0817 16:29:32.037644 17472 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:29:32.037653 17472 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:29:32.037659 17472 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:29:32.037670 17472 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:29:32.037681 17472 net.cpp:150] Setting up L3_b8_relu\nI0817 16:29:32.037688 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.037693 17472 net.cpp:165] Memory required for data: 1427969500\nI0817 16:29:32.037704 17472 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:32.037713 17472 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:32.037717 17472 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:29:32.037725 17472 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:29:32.037735 17472 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:29:32.037786 17472 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:32.037797 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.037803 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.037808 17472 net.cpp:165] Memory required for data: 1432065500\nI0817 16:29:32.037813 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:29:32.037827 17472 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:29:32.037834 17472 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:29:32.037843 17472 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:29:32.038898 17472 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:29:32.038914 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.038919 17472 net.cpp:165] Memory required for data: 1434113500\nI0817 16:29:32.038928 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:29:32.038941 17472 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:29:32.038949 17472 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:29:32.038957 17472 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:29:32.039239 17472 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:29:32.039253 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.039258 17472 net.cpp:165] Memory required for data: 1436161500\nI0817 16:29:32.039268 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:29:32.039278 17472 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:29:32.039284 17472 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:29:32.039293 17472 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:29:32.039361 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:29:32.039523 17472 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:29:32.039536 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.039541 17472 net.cpp:165] Memory required for data: 1438209500\nI0817 16:29:32.039551 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:29:32.039558 17472 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:29:32.039566 17472 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:29:32.039575 17472 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:29:32.039585 17472 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:29:32.039592 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.039597 17472 net.cpp:165] Memory required for data: 1440257500\nI0817 16:29:32.039602 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:29:32.039616 17472 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:29:32.039623 17472 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:29:32.039630 17472 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:29:32.040665 17472 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:29:32.040680 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.040685 17472 net.cpp:165] Memory required for data: 1442305500\nI0817 16:29:32.040694 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:29:32.040707 17472 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:29:32.040714 17472 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:29:32.040722 17472 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:29:32.041002 17472 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:29:32.041014 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.041025 17472 net.cpp:165] Memory required for data: 1444353500\nI0817 16:29:32.041036 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:29:32.041049 17472 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:29:32.041055 17472 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:29:32.041064 17472 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:29:32.041132 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:29:32.041299 17472 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:29:32.041312 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.041317 17472 net.cpp:165] Memory required for data: 1446401500\nI0817 16:29:32.041326 17472 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:29:32.041338 17472 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:29:32.041345 17472 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:29:32.041352 17472 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:29:32.041363 17472 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:29:32.041398 17472 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:29:32.041409 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.041414 17472 net.cpp:165] Memory required for data: 1448449500\nI0817 16:29:32.041419 17472 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:29:32.041432 17472 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:29:32.041438 17472 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:29:32.041445 17472 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:29:32.041455 17472 net.cpp:150] Setting up L3_b9_relu\nI0817 16:29:32.041462 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:32.041466 17472 net.cpp:165] Memory required for data: 1450497500\nI0817 16:29:32.041471 17472 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:29:32.041479 17472 net.cpp:100] Creating Layer post_pool\nI0817 16:29:32.041486 17472 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:29:32.041493 17472 net.cpp:408] post_pool -> post_pool\nI0817 16:29:32.041532 17472 net.cpp:150] Setting up post_pool\nI0817 16:29:32.041543 17472 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:29:32.041548 17472 net.cpp:165] Memory required for data: 1450529500\nI0817 16:29:32.041553 17472 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:29:32.041564 17472 net.cpp:100] Creating Layer post_FC\nI0817 16:29:32.041569 17472 net.cpp:434] post_FC <- post_pool\nI0817 16:29:32.041579 17472 net.cpp:408] post_FC -> post_FC_top\nI0817 16:29:32.041744 17472 net.cpp:150] Setting up post_FC\nI0817 16:29:32.041759 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:32.041764 17472 net.cpp:165] Memory required for data: 1450534500\nI0817 16:29:32.041772 17472 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:29:32.041780 17472 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:29:32.041786 17472 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:29:32.041795 17472 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:29:32.041806 17472 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:29:32.041856 17472 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:29:32.041867 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:32.041872 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:32.041877 17472 net.cpp:165] Memory required for data: 1450544500\nI0817 16:29:32.041882 17472 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:29:32.041893 17472 net.cpp:100] Creating Layer accuracy\nI0817 16:29:32.041899 17472 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:29:32.041906 17472 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:29:32.041914 17472 net.cpp:408] accuracy -> accuracy\nI0817 16:29:32.041927 17472 net.cpp:150] Setting up accuracy\nI0817 16:29:32.041934 17472 net.cpp:157] Top shape: (1)\nI0817 16:29:32.041945 17472 net.cpp:165] Memory required for data: 1450544504\nI0817 16:29:32.041950 17472 layer_factory.hpp:77] Creating layer loss\nI0817 16:29:32.041959 17472 net.cpp:100] Creating Layer loss\nI0817 16:29:32.041965 17472 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:29:32.041970 17472 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:29:32.041978 17472 net.cpp:408] loss -> loss\nI0817 16:29:32.041990 17472 layer_factory.hpp:77] Creating layer loss\nI0817 16:29:32.042122 17472 net.cpp:150] Setting up loss\nI0817 16:29:32.042135 17472 net.cpp:157] Top shape: (1)\nI0817 16:29:32.042140 17472 net.cpp:160]     with loss weight 1\nI0817 16:29:32.042156 17472 net.cpp:165] Memory required for data: 1450544508\nI0817 16:29:32.042163 17472 net.cpp:226] loss needs backward computation.\nI0817 16:29:32.042170 17472 net.cpp:228] accuracy does not need backward computation.\nI0817 16:29:32.042176 17472 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:29:32.042181 17472 net.cpp:226] post_FC needs backward computation.\nI0817 16:29:32.042186 17472 net.cpp:226] post_pool needs backward computation.\nI0817 16:29:32.042191 17472 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:29:32.042196 17472 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:29:32.042201 17472 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:29:32.042206 17472 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:29:32.042210 17472 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:29:32.042215 17472 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:29:32.042220 17472 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:29:32.042224 17472 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:29:32.042229 17472 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:29:32.042235 17472 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:29:32.042240 17472 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:29:32.042245 17472 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:29:32.042250 17472 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:29:32.042255 17472 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:29:32.042260 17472 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:29:32.042265 17472 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:29:32.042270 17472 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:29:32.042275 17472 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:29:32.042280 17472 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:29:32.042285 17472 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:29:32.042290 17472 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:29:32.042295 17472 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:29:32.042301 17472 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:29:32.042305 17472 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:29:32.042310 17472 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:29:32.042315 17472 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:29:32.042320 17472 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:29:32.042325 17472 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:29:32.042330 17472 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:29:32.042335 17472 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:29:32.042340 17472 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:29:32.042346 17472 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:29:32.042351 17472 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:29:32.042356 17472 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:29:32.042368 17472 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:29:32.042373 17472 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:29:32.042378 17472 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:29:32.042383 17472 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:29:32.042388 17472 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:29:32.042394 17472 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:29:32.042400 17472 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:29:32.042405 17472 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:29:32.042410 17472 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:29:32.042415 17472 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:29:32.042420 17472 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:29:32.042426 17472 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:29:32.042431 17472 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:29:32.042435 17472 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:29:32.042441 17472 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:29:32.042446 17472 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:29:32.042451 17472 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:29:32.042456 17472 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:29:32.042465 17472 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:29:32.042471 17472 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:29:32.042476 17472 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:29:32.042481 17472 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:29:32.042486 17472 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:29:32.042491 17472 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:29:32.042496 17472 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:29:32.042501 17472 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:29:32.042507 17472 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:29:32.042512 17472 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:29:32.042517 17472 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:29:32.042522 17472 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:29:32.042527 17472 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:29:32.042532 17472 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:29:32.042537 17472 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:29:32.042541 17472 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:29:32.042547 17472 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:29:32.042552 17472 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:29:32.042557 17472 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:29:32.042562 17472 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:29:32.042568 17472 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:29:32.042573 17472 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:29:32.042578 17472 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:29:32.042583 17472 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:29:32.042588 17472 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:29:32.042593 17472 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:29:32.042598 17472 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:29:32.042604 17472 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:29:32.042609 17472 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:29:32.042620 17472 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:29:32.042626 17472 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:29:32.042631 17472 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:29:32.042637 17472 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:29:32.042642 17472 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:29:32.042647 17472 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:29:32.042654 17472 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:29:32.042659 17472 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:29:32.042664 17472 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:29:32.042668 17472 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:29:32.042673 17472 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:29:32.042678 17472 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:29:32.042683 17472 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:29:32.042688 17472 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:29:32.042695 17472 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:29:32.042699 17472 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:29:32.042706 17472 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:29:32.042711 17472 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:29:32.042716 17472 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:29:32.042721 17472 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:29:32.042726 17472 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:29:32.042732 17472 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:29:32.042739 17472 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:29:32.042745 17472 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:29:32.042752 17472 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:29:32.042757 17472 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:29:32.042762 17472 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:29:32.042768 17472 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:29:32.042773 17472 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:29:32.042778 17472 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:29:32.042783 17472 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:29:32.042788 17472 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:29:32.042793 17472 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:29:32.042799 17472 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:29:32.042804 17472 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:29:32.042809 17472 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:29:32.042815 17472 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:29:32.042820 17472 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:29:32.042825 17472 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:29:32.042830 17472 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:29:32.042835 17472 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:29:32.042840 17472 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:29:32.042846 17472 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:29:32.042851 17472 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:29:32.042857 17472 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:29:32.042862 17472 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:29:32.042868 17472 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:29:32.042873 17472 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:29:32.042883 17472 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:29:32.042889 17472 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:29:32.042894 17472 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:29:32.042901 17472 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:29:32.042906 17472 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:29:32.042912 17472 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:29:32.042917 17472 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:29:32.042922 17472 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:29:32.042928 17472 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:29:32.042933 17472 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:29:32.042938 17472 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:29:32.042943 17472 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:29:32.042949 17472 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:29:32.042954 17472 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:29:32.042960 17472 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:29:32.042965 17472 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:29:32.042971 17472 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:29:32.042976 17472 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:29:32.042981 17472 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:29:32.042987 17472 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:29:32.042992 17472 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:29:32.042999 17472 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:29:32.043004 17472 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:29:32.043009 17472 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:29:32.043015 17472 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:29:32.043020 17472 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:29:32.043026 17472 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:29:32.043031 17472 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:29:32.043037 17472 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:29:32.043042 17472 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:29:32.043048 17472 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:29:32.043053 17472 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:29:32.043058 17472 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:29:32.043064 17472 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:29:32.043069 17472 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:29:32.043076 17472 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:29:32.043081 17472 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:29:32.043087 17472 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:29:32.043092 17472 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:29:32.043097 17472 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:29:32.043102 17472 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:29:32.043112 17472 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:29:32.043119 17472 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:29:32.043125 17472 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:29:32.043135 17472 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:29:32.043143 17472 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:29:32.043148 17472 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:29:32.043159 17472 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:29:32.043165 17472 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:29:32.043170 17472 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:29:32.043176 17472 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:29:32.043182 17472 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:29:32.043187 17472 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:29:32.043193 17472 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:29:32.043198 17472 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:29:32.043203 17472 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:29:32.043210 17472 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:29:32.043215 17472 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:29:32.043220 17472 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:29:32.043226 17472 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:29:32.043231 17472 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:29:32.043237 17472 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:29:32.043242 17472 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:29:32.043248 17472 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:29:32.043253 17472 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:29:32.043259 17472 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:29:32.043264 17472 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:29:32.043270 17472 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:29:32.043275 17472 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:29:32.043282 17472 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:29:32.043287 17472 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:29:32.043292 17472 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:29:32.043298 17472 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:29:32.043303 17472 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:29:32.043309 17472 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:29:32.043315 17472 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:29:32.043320 17472 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:29:32.043326 17472 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:29:32.043332 17472 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:29:32.043339 17472 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:29:32.043344 17472 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:29:32.043349 17472 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:29:32.043354 17472 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:29:32.043360 17472 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:29:32.043365 17472 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:29:32.043371 17472 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:29:32.043377 17472 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:29:32.043382 17472 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:29:32.043388 17472 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:29:32.043395 17472 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:29:32.043401 17472 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:29:32.043406 17472 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:29:32.043411 17472 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:29:32.043416 17472 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:29:32.043427 17472 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:29:32.043433 17472 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:29:32.043439 17472 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:29:32.043444 17472 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:29:32.043450 17472 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:29:32.043457 17472 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:29:32.043462 17472 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:29:32.043468 17472 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:29:32.043473 17472 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:29:32.043478 17472 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:29:32.043484 17472 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:29:32.043489 17472 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:29:32.043495 17472 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:29:32.043501 17472 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:29:32.043506 17472 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:29:32.043512 17472 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:29:32.043519 17472 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:29:32.043524 17472 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:29:32.043529 17472 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:29:32.043534 17472 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:29:32.043540 17472 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:29:32.043545 17472 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:29:32.043551 17472 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:29:32.043557 17472 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:29:32.043562 17472 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:29:32.043568 17472 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:29:32.043575 17472 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:29:32.043581 17472 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:29:32.043586 17472 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:29:32.043591 17472 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:29:32.043596 17472 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:29:32.043602 17472 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:29:32.043608 17472 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:29:32.043613 17472 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:29:32.043619 17472 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:29:32.043625 17472 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:29:32.043632 17472 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:29:32.043637 17472 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:29:32.043642 17472 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:29:32.043648 17472 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:29:32.043653 17472 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:29:32.043658 17472 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:29:32.043664 17472 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:29:32.043670 17472 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:29:32.043675 17472 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:29:32.043681 17472 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:29:32.043687 17472 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:29:32.043699 17472 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:29:32.043706 17472 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:29:32.043711 17472 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:29:32.043717 17472 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:29:32.043722 17472 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:29:32.043728 17472 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:29:32.043735 17472 net.cpp:226] pre_relu needs backward computation.\nI0817 16:29:32.043740 17472 net.cpp:226] pre_scale needs backward computation.\nI0817 16:29:32.043745 17472 net.cpp:226] pre_bn needs backward computation.\nI0817 16:29:32.043750 17472 net.cpp:226] pre_conv needs backward computation.\nI0817 16:29:32.043756 17472 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:29:32.043762 17472 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:29:32.043766 17472 net.cpp:270] This network produces output accuracy\nI0817 16:29:32.043773 17472 net.cpp:270] This network produces output loss\nI0817 16:29:32.044106 17472 net.cpp:283] Network initialization done.\nI0817 16:29:32.045133 17472 solver.cpp:60] Solver scaffolding done.\nI0817 16:29:32.269156 17472 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:29:32.628806 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:32.628883 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:32.635746 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:32.858618 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:32.858732 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:32.893359 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:32.893471 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:33.341883 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:33.341938 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:33.349310 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:33.593457 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:33.593569 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:33.645390 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:33.645494 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:34.155808 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:34.155885 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:34.164413 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:34.430758 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:34.430922 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:34.501878 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:34.502034 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:34.585889 17472 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:29:35.072227 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:35.072283 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:35.082240 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:35.369915 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:35.370069 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:35.462163 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:35.462317 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:36.099645 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:36.099699 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:36.110088 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:36.429579 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:36.429759 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:36.542595 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:36.542775 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:37.249464 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:37.249516 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:37.261081 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:37.601984 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:37.602198 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:37.734689 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:37.734896 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:38.511642 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:38.511694 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:38.523955 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:38.574899 17498 blocking_queue.cpp:50] Waiting for data\nI0817 16:29:38.628140 17498 blocking_queue.cpp:50] Waiting for data\nI0817 16:29:38.965925 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:38.966174 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:39.118549 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:39.118778 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:39.290251 17472 parallel.cpp:425] Starting Optimization\nI0817 16:29:39.292815 17472 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:29:39.292834 17472 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:29:39.297310 17472 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:31:01.306813 17472 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:31:01.307157 17472 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:31:05.426169 17472 solver.cpp:228] Iteration 0, loss = 3.52262\nI0817 16:31:05.426224 17472 solver.cpp:244]     Train net output #0: accuracy = 0.128\nI0817 16:31:05.426244 17472 solver.cpp:244]     Train net output #1: loss = 3.52262 (* 1 = 3.52262 loss)\nI0817 16:31:05.426425 17472 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0817 16:33:22.907527 17472 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:34:45.018479 17472 solver.cpp:404]     Test net output #0: accuracy = 0.15904\nI0817 16:34:45.018756 17472 solver.cpp:404]     Test net output #1: loss = 2.31152 (* 1 = 2.31152 loss)\nI0817 16:34:46.337889 17472 solver.cpp:228] Iteration 100, loss = 2.09658\nI0817 16:34:46.337944 17472 solver.cpp:244]     Train net output #0: accuracy = 0.224\nI0817 16:34:46.337962 17472 solver.cpp:244]     Train net output #1: loss = 2.09658 (* 1 = 2.09658 loss)\nI0817 16:34:46.427425 17472 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0817 16:37:03.709414 17472 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:38:25.762405 17472 solver.cpp:404]     Test net output #0: accuracy = 0.38668\nI0817 16:38:25.762656 17472 solver.cpp:404]     Test net output #1: loss = 1.75075 (* 1 = 1.75075 loss)\nI0817 16:38:27.082765 17472 solver.cpp:228] Iteration 200, loss = 1.37343\nI0817 16:38:27.082826 17472 solver.cpp:244]     Train net output #0: accuracy = 0.464\nI0817 16:38:27.082844 17472 solver.cpp:244]     Train net output #1: loss = 1.37343 (* 1 = 1.37343 loss)\nI0817 16:38:27.173719 17472 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0817 16:40:44.720300 17472 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:42:06.779134 17472 solver.cpp:404]     Test net output #0: accuracy = 0.514\nI0817 16:42:06.779392 17472 solver.cpp:404]     Test net output #1: loss = 1.43084 (* 1 = 1.43084 loss)\nI0817 16:42:08.100057 17472 solver.cpp:228] Iteration 300, loss = 1.04488\nI0817 16:42:08.100100 17472 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0817 16:42:08.100118 17472 solver.cpp:244]     Train net output #1: loss = 1.04488 (* 1 = 1.04488 loss)\nI0817 16:42:08.183135 17472 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0817 16:44:25.493755 17472 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:45:47.521796 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67828\nI0817 16:45:47.522027 17472 solver.cpp:404]     Test net output #1: loss = 0.945623 (* 1 = 0.945623 loss)\nI0817 16:45:48.843420 17472 solver.cpp:228] Iteration 400, loss = 0.630501\nI0817 16:45:48.843463 17472 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 16:45:48.843479 17472 solver.cpp:244]     Train net output #1: loss = 0.630501 (* 1 = 0.630501 loss)\nI0817 16:45:48.925921 17472 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0817 16:48:06.156519 17472 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:49:28.243558 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7292\nI0817 16:49:28.243824 17472 solver.cpp:404]     Test net output #1: loss = 0.811036 (* 1 = 0.811036 loss)\nI0817 16:49:29.564554 17472 solver.cpp:228] Iteration 500, loss = 0.568538\nI0817 16:49:29.564610 17472 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 16:49:29.564627 17472 solver.cpp:244]     Train net output #1: loss = 0.568538 (* 1 = 0.568538 loss)\nI0817 16:49:29.650681 17472 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0817 16:51:46.972028 17472 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:53:09.039630 17472 solver.cpp:404]     Test net output #0: accuracy = 0.72764\nI0817 16:53:09.039898 17472 solver.cpp:404]     Test net output #1: loss = 0.834725 (* 1 = 0.834725 loss)\nI0817 16:53:10.360703 17472 solver.cpp:228] Iteration 600, loss = 0.439158\nI0817 16:53:10.360744 17472 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 16:53:10.360759 17472 solver.cpp:244]     Train net output #1: loss = 0.439158 (* 1 = 0.439158 loss)\nI0817 16:53:10.444954 17472 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0817 16:55:27.952365 17472 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:56:50.032537 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7414\nI0817 16:56:50.032806 17472 solver.cpp:404]     Test net output #1: loss = 0.83825 (* 1 = 0.83825 loss)\nI0817 16:56:51.353000 17472 solver.cpp:228] Iteration 700, loss = 0.36264\nI0817 16:56:51.353040 17472 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 16:56:51.353056 17472 solver.cpp:244]     Train net output #1: loss = 0.36264 (* 1 = 0.36264 loss)\nI0817 16:56:51.441853 17472 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0817 16:59:08.726045 17472 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 17:00:30.796978 17472 solver.cpp:404]     Test net output #0: accuracy = 0.73244\nI0817 17:00:30.797288 17472 solver.cpp:404]     Test net output #1: loss = 0.898145 (* 1 = 0.898145 loss)\nI0817 17:00:32.117414 17472 solver.cpp:228] Iteration 800, loss = 0.395186\nI0817 17:00:32.117455 17472 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 17:00:32.117470 17472 solver.cpp:244]     Train net output #1: loss = 0.395186 (* 1 = 0.395186 loss)\nI0817 17:00:32.205422 17472 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0817 17:02:49.529623 17472 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 17:04:11.633112 17472 solver.cpp:404]     Test net output #0: accuracy = 0.72824\nI0817 17:04:11.633425 17472 solver.cpp:404]     Test net output #1: loss = 0.933042 (* 1 = 0.933042 loss)\nI0817 17:04:12.953960 17472 solver.cpp:228] Iteration 900, loss = 0.251157\nI0817 17:04:12.954005 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:04:12.954027 17472 solver.cpp:244]     Train net output #1: loss = 0.251157 (* 1 = 0.251157 loss)\nI0817 17:04:13.045538 17472 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0817 17:06:30.410589 17472 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 17:07:52.539317 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7362\nI0817 17:07:52.539602 17472 solver.cpp:404]     Test net output #1: loss = 0.904127 (* 1 = 0.904127 loss)\nI0817 17:07:53.860015 17472 solver.cpp:228] Iteration 1000, loss = 0.284688\nI0817 17:07:53.860076 17472 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:07:53.860101 17472 solver.cpp:244]     Train net output #1: loss = 0.284688 (* 1 = 0.284688 loss)\nI0817 17:07:53.945696 17472 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0817 17:10:11.284512 17472 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 17:11:32.542101 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70344\nI0817 17:11:32.542356 17472 solver.cpp:404]     Test net output #1: loss = 1.08535 (* 1 = 1.08535 loss)\nI0817 17:11:33.862112 17472 solver.cpp:228] Iteration 1100, loss = 0.287582\nI0817 17:11:33.862157 17472 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 17:11:33.862172 17472 solver.cpp:244]     Train net output #1: loss = 0.287582 (* 1 = 0.287582 loss)\nI0817 17:11:33.949863 17472 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0817 17:13:51.080204 17472 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 17:15:12.328760 17472 solver.cpp:404]     Test net output #0: accuracy = 0.63552\nI0817 17:15:12.328999 17472 solver.cpp:404]     Test net output #1: loss = 1.54386 (* 1 = 1.54386 loss)\nI0817 17:15:13.648404 17472 solver.cpp:228] Iteration 1200, loss = 0.260211\nI0817 17:15:13.648448 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:15:13.648464 17472 solver.cpp:244]     Train net output #1: loss = 0.260211 (* 1 = 0.260211 loss)\nI0817 17:15:13.731904 17472 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0817 17:17:30.827139 17472 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 17:18:52.046476 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69996\nI0817 17:18:52.046736 17472 solver.cpp:404]     Test net output #1: loss = 1.23723 (* 1 = 1.23723 loss)\nI0817 17:18:53.366278 17472 solver.cpp:228] Iteration 1300, loss = 0.293489\nI0817 17:18:53.366319 17472 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:18:53.366335 17472 solver.cpp:244]     Train net output #1: loss = 0.293488 (* 1 = 0.293488 loss)\nI0817 17:18:53.452980 17472 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0817 17:21:10.559018 17472 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:22:31.763792 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7838\nI0817 17:22:31.764053 17472 solver.cpp:404]     Test net output #1: loss = 0.78227 (* 1 = 0.78227 loss)\nI0817 17:22:33.083376 17472 solver.cpp:228] Iteration 1400, loss = 0.199226\nI0817 17:22:33.083418 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:22:33.083434 17472 solver.cpp:244]     Train net output #1: loss = 0.199226 (* 1 = 0.199226 loss)\nI0817 17:22:33.171222 17472 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0817 17:24:50.295665 17472 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:26:11.497259 17472 solver.cpp:404]     Test net output #0: accuracy = 0.77024\nI0817 17:26:11.497524 17472 solver.cpp:404]     Test net output #1: loss = 0.855528 (* 1 = 0.855528 loss)\nI0817 17:26:12.816104 17472 solver.cpp:228] Iteration 1500, loss = 0.233258\nI0817 17:26:12.816145 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:26:12.816161 17472 solver.cpp:244]     Train net output #1: loss = 0.233258 (* 1 = 0.233258 loss)\nI0817 17:26:12.904335 17472 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0817 17:28:30.006069 17472 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:29:51.205304 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64088\nI0817 17:29:51.205538 17472 solver.cpp:404]     Test net output #1: loss = 1.56488 (* 1 = 1.56488 loss)\nI0817 17:29:52.524454 17472 solver.cpp:228] Iteration 1600, loss = 0.182728\nI0817 17:29:52.524495 17472 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:29:52.524511 17472 solver.cpp:244]     Train net output #1: loss = 0.182728 (* 1 = 0.182728 loss)\nI0817 17:29:52.615370 17472 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0817 17:32:09.758661 17472 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:33:30.969525 17472 solver.cpp:404]     Test net output #0: accuracy = 0.75568\nI0817 17:33:30.969770 17472 solver.cpp:404]     Test net output #1: loss = 0.954023 (* 1 = 0.954023 loss)\nI0817 17:33:32.288637 17472 solver.cpp:228] Iteration 1700, loss = 0.236139\nI0817 17:33:32.288681 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:33:32.288696 17472 solver.cpp:244]     Train net output #1: loss = 0.236139 (* 1 = 0.236139 loss)\nI0817 17:33:32.378407 17472 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0817 17:35:49.467105 17472 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:37:10.674454 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70812\nI0817 17:37:10.674726 17472 solver.cpp:404]     Test net output #1: loss = 1.12194 (* 1 = 1.12194 loss)\nI0817 17:37:11.993854 17472 solver.cpp:228] Iteration 1800, loss = 0.260316\nI0817 17:37:11.993896 17472 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:37:11.993911 17472 solver.cpp:244]     Train net output #1: loss = 0.260315 (* 1 = 0.260315 loss)\nI0817 17:37:12.084673 17472 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0817 17:39:29.154342 17472 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:40:50.353538 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68968\nI0817 17:40:50.353790 17472 solver.cpp:404]     Test net output #1: loss = 1.38804 (* 1 = 1.38804 loss)\nI0817 17:40:51.672989 17472 solver.cpp:228] Iteration 1900, loss = 0.171435\nI0817 17:40:51.673029 17472 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:40:51.673045 17472 solver.cpp:244]     Train net output #1: loss = 0.171435 (* 1 = 0.171435 loss)\nI0817 17:40:51.762853 17472 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0817 17:43:09.063830 17472 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:44:30.248416 17472 solver.cpp:404]     Test net output #0: accuracy = 0.77808\nI0817 17:44:30.248661 17472 solver.cpp:404]     Test net output #1: loss = 0.914692 (* 1 = 0.914692 loss)\nI0817 17:44:31.568270 17472 solver.cpp:228] Iteration 2000, loss = 0.196282\nI0817 17:44:31.568313 17472 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:44:31.568330 17472 solver.cpp:244]     Train net output #1: loss = 0.196282 (* 1 = 0.196282 loss)\nI0817 17:44:31.660712 17472 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0817 17:46:49.002372 17472 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:48:10.188671 17472 solver.cpp:404]     Test net output #0: accuracy = 0.75236\nI0817 17:48:10.188915 17472 solver.cpp:404]     Test net output #1: loss = 0.947288 (* 1 = 0.947288 loss)\nI0817 17:48:11.507791 17472 solver.cpp:228] Iteration 2100, loss = 0.187502\nI0817 17:48:11.507835 17472 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:48:11.507851 17472 solver.cpp:244]     Train net output #1: loss = 0.187502 (* 1 = 0.187502 loss)\nI0817 17:48:11.601183 17472 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0817 17:50:28.864861 17472 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:51:50.068671 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7064\nI0817 17:51:50.068933 17472 solver.cpp:404]     Test net output #1: loss = 1.23628 (* 1 = 1.23628 loss)\nI0817 17:51:51.387825 17472 solver.cpp:228] Iteration 2200, loss = 0.221047\nI0817 17:51:51.387871 17472 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:51:51.387886 17472 solver.cpp:244]     Train net output #1: loss = 0.221046 (* 1 = 0.221046 loss)\nI0817 17:51:51.472378 17472 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0817 17:54:08.551493 17472 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:55:29.735028 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70088\nI0817 17:55:29.735261 17472 solver.cpp:404]     Test net output #1: loss = 1.38355 (* 1 = 1.38355 loss)\nI0817 17:55:31.053978 17472 solver.cpp:228] Iteration 2300, loss = 0.304214\nI0817 17:55:31.054020 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:55:31.054036 17472 solver.cpp:244]     Train net output #1: loss = 0.304214 (* 1 = 0.304214 loss)\nI0817 17:55:31.142413 17472 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0817 17:57:48.280210 17472 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:59:09.465876 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66712\nI0817 17:59:09.466122 17472 solver.cpp:404]     Test net output #1: loss = 1.69084 (* 1 = 1.69084 loss)\nI0817 17:59:10.784251 17472 solver.cpp:228] Iteration 2400, loss = 0.221431\nI0817 17:59:10.784296 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:59:10.784312 17472 solver.cpp:244]     Train net output #1: loss = 0.221431 (* 1 = 0.221431 loss)\nI0817 17:59:10.874166 17472 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0817 18:01:27.988154 17472 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 18:02:49.180984 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66684\nI0817 18:02:49.181251 17472 solver.cpp:404]     Test net output #1: loss = 1.48594 (* 1 = 1.48594 loss)\nI0817 18:02:50.500309 17472 solver.cpp:228] Iteration 2500, loss = 0.203832\nI0817 18:02:50.500353 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:02:50.500370 17472 solver.cpp:244]     Train net output #1: loss = 0.203832 (* 1 = 0.203832 loss)\nI0817 18:02:50.596922 17472 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0817 18:05:07.768188 17472 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 18:06:28.975476 17472 solver.cpp:404]     Test net output #0: accuracy = 0.76892\nI0817 18:06:28.975733 17472 solver.cpp:404]     Test net output #1: loss = 0.821492 (* 1 = 0.821492 loss)\nI0817 18:06:30.294701 17472 solver.cpp:228] Iteration 2600, loss = 0.157024\nI0817 18:06:30.294745 17472 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:06:30.294759 17472 solver.cpp:244]     Train net output #1: loss = 0.157024 (* 1 = 0.157024 loss)\nI0817 18:06:30.385445 17472 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0817 18:08:47.605106 17472 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 18:10:08.808506 17472 solver.cpp:404]     Test net output #0: accuracy = 0.73568\nI0817 18:10:08.808773 17472 solver.cpp:404]     Test net output #1: loss = 1.04878 (* 1 = 1.04878 loss)\nI0817 18:10:10.128656 17472 solver.cpp:228] Iteration 2700, loss = 0.272502\nI0817 18:10:10.128700 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:10:10.128713 17472 solver.cpp:244]     Train net output #1: loss = 0.272502 (* 1 = 0.272502 loss)\nI0817 18:10:10.216059 17472 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0817 18:12:27.345118 17472 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 18:13:48.555965 17472 solver.cpp:404]     Test net output #0: accuracy = 0.75168\nI0817 18:13:48.556216 17472 solver.cpp:404]     Test net output #1: loss = 0.813167 (* 1 = 0.813167 loss)\nI0817 18:13:49.875233 17472 solver.cpp:228] Iteration 2800, loss = 0.273075\nI0817 18:13:49.875277 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:13:49.875293 17472 solver.cpp:244]     Train net output #1: loss = 0.273075 (* 1 = 0.273075 loss)\nI0817 18:13:49.962685 17472 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0817 18:16:07.177394 17472 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 18:17:28.388458 17472 solver.cpp:404]     Test net output #0: accuracy = 0.768\nI0817 18:17:28.388723 17472 solver.cpp:404]     Test net output #1: loss = 0.778037 (* 1 = 0.778037 loss)\nI0817 18:17:29.707664 17472 solver.cpp:228] Iteration 2900, loss = 0.275019\nI0817 18:17:29.707705 17472 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:17:29.707721 17472 solver.cpp:244]     Train net output #1: loss = 0.275019 (* 1 = 0.275019 loss)\nI0817 18:17:29.801673 17472 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0817 18:19:47.101816 17472 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 18:21:08.315075 17472 solver.cpp:404]     Test net output #0: accuracy = 0.77752\nI0817 18:21:08.315335 17472 solver.cpp:404]     Test net output #1: loss = 0.727123 (* 1 = 0.727123 loss)\nI0817 18:21:09.634039 17472 solver.cpp:228] Iteration 3000, loss = 0.21313\nI0817 18:21:09.634081 17472 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:21:09.634095 17472 solver.cpp:244]     Train net output #1: loss = 0.21313 (* 1 = 0.21313 loss)\nI0817 18:21:09.725033 17472 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0817 18:23:26.952148 17472 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:24:48.173485 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7538\nI0817 18:24:48.173712 17472 solver.cpp:404]     Test net output #1: loss = 0.835422 (* 1 = 0.835422 loss)\nI0817 18:24:49.493134 17472 solver.cpp:228] Iteration 3100, loss = 0.291958\nI0817 18:24:49.493175 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:24:49.493196 17472 solver.cpp:244]     Train net output #1: loss = 0.291959 (* 1 = 0.291959 loss)\nI0817 18:24:49.586470 17472 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0817 18:27:06.783357 17472 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:28:28.011664 17472 solver.cpp:404]     Test net output #0: accuracy = 0.77552\nI0817 18:28:28.011926 17472 solver.cpp:404]     Test net output #1: loss = 0.74192 (* 1 = 0.74192 loss)\nI0817 18:28:29.331653 17472 solver.cpp:228] Iteration 3200, loss = 0.287954\nI0817 18:28:29.331696 17472 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:28:29.331712 17472 solver.cpp:244]     Train net output #1: loss = 0.287954 (* 1 = 0.287954 loss)\nI0817 18:28:29.418069 17472 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0817 18:30:46.542922 17472 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:32:07.759294 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64544\nI0817 18:32:07.759560 17472 solver.cpp:404]     Test net output #1: loss = 1.19015 (* 1 = 1.19015 loss)\nI0817 18:32:09.080052 17472 solver.cpp:228] Iteration 3300, loss = 0.286922\nI0817 18:32:09.080096 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:32:09.080111 17472 solver.cpp:244]     Train net output #1: loss = 0.286922 (* 1 = 0.286922 loss)\nI0817 18:32:09.165772 17472 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0817 18:34:26.275480 17472 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:35:47.455380 17472 solver.cpp:404]     Test net output #0: accuracy = 0.79964\nI0817 18:35:47.455617 17472 solver.cpp:404]     Test net output #1: loss = 0.658774 (* 1 = 0.658774 loss)\nI0817 18:35:48.775622 17472 solver.cpp:228] Iteration 3400, loss = 0.255433\nI0817 18:35:48.775666 17472 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:35:48.775681 17472 solver.cpp:244]     Train net output #1: loss = 0.255433 (* 1 = 0.255433 loss)\nI0817 18:35:48.864429 17472 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0817 18:38:06.010509 17472 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:39:27.188248 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67152\nI0817 18:39:27.188513 17472 solver.cpp:404]     Test net output #1: loss = 1.31515 (* 1 = 1.31515 loss)\nI0817 18:39:28.508779 17472 solver.cpp:228] Iteration 3500, loss = 0.201505\nI0817 18:39:28.508818 17472 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:39:28.508834 17472 solver.cpp:244]     Train net output #1: loss = 0.201505 (* 1 = 0.201505 loss)\nI0817 18:39:28.599892 17472 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0817 18:41:45.755070 17472 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:43:06.933989 17472 solver.cpp:404]     Test net output #0: accuracy = 0.76224\nI0817 18:43:06.934247 17472 solver.cpp:404]     Test net output #1: loss = 0.775611 (* 1 = 0.775611 loss)\nI0817 18:43:08.254164 17472 solver.cpp:228] Iteration 3600, loss = 0.282895\nI0817 18:43:08.254209 17472 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:43:08.254225 17472 solver.cpp:244]     Train net output #1: loss = 0.282895 (* 1 = 0.282895 loss)\nI0817 18:43:08.346457 17472 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0817 18:45:25.648444 17472 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:46:46.831491 17472 solver.cpp:404]     Test net output #0: accuracy = 0.73536\nI0817 18:46:46.831754 17472 solver.cpp:404]     Test net output #1: loss = 1.04671 (* 1 = 1.04671 loss)\nI0817 18:46:48.151101 17472 solver.cpp:228] Iteration 3700, loss = 0.279975\nI0817 18:46:48.151145 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:46:48.151160 17472 solver.cpp:244]     Train net output #1: loss = 0.279974 (* 1 = 0.279974 loss)\nI0817 18:46:48.243367 17472 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0817 18:49:05.342147 17472 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:50:26.527638 17472 solver.cpp:404]     Test net output #0: accuracy = 0.79692\nI0817 18:50:26.527875 17472 solver.cpp:404]     Test net output #1: loss = 0.654551 (* 1 = 0.654551 loss)\nI0817 18:50:27.847570 17472 solver.cpp:228] Iteration 3800, loss = 0.3036\nI0817 18:50:27.847612 17472 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:50:27.847628 17472 solver.cpp:244]     Train net output #1: loss = 0.303599 (* 1 = 0.303599 loss)\nI0817 18:50:27.933742 17472 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0817 18:52:45.154608 17472 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:54:06.338310 17472 solver.cpp:404]     Test net output #0: accuracy = 0.75644\nI0817 18:54:06.338570 17472 solver.cpp:404]     Test net output #1: loss = 0.873225 (* 1 = 0.873225 loss)\nI0817 18:54:07.659132 17472 solver.cpp:228] Iteration 3900, loss = 0.227769\nI0817 18:54:07.659175 17472 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:54:07.659195 17472 solver.cpp:244]     Train net output #1: loss = 0.227769 (* 1 = 0.227769 loss)\nI0817 18:54:07.752933 17472 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0817 18:56:24.926926 17472 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:57:46.117251 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71288\nI0817 18:57:46.117511 17472 solver.cpp:404]     Test net output #1: loss = 1.2354 (* 1 = 1.2354 loss)\nI0817 18:57:47.437101 17472 solver.cpp:228] Iteration 4000, loss = 0.340257\nI0817 18:57:47.437144 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:57:47.437160 17472 solver.cpp:244]     Train net output #1: loss = 0.340257 (* 1 = 0.340257 loss)\nI0817 18:57:47.533789 17472 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0817 19:00:04.671006 17472 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 19:01:25.864866 17472 solver.cpp:404]     Test net output #0: accuracy = 0.79268\nI0817 19:01:25.865129 17472 solver.cpp:404]     Test net output #1: loss = 0.791883 (* 1 = 0.791883 loss)\nI0817 19:01:27.184422 17472 solver.cpp:228] Iteration 4100, loss = 0.26626\nI0817 19:01:27.184463 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:01:27.184479 17472 solver.cpp:244]     Train net output #1: loss = 0.26626 (* 1 = 0.26626 loss)\nI0817 19:01:27.274413 17472 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0817 19:03:44.446094 17472 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 19:05:05.616119 17472 solver.cpp:404]     Test net output #0: accuracy = 0.77332\nI0817 19:05:05.616371 17472 solver.cpp:404]     Test net output #1: loss = 0.703694 (* 1 = 0.703694 loss)\nI0817 19:05:06.935479 17472 solver.cpp:228] Iteration 4200, loss = 0.436462\nI0817 19:05:06.935519 17472 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 19:05:06.935534 17472 solver.cpp:244]     Train net output #1: loss = 0.436462 (* 1 = 0.436462 loss)\nI0817 19:05:07.024881 17472 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0817 19:07:24.269148 17472 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 19:08:45.480144 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67624\nI0817 19:08:45.480422 17472 solver.cpp:404]     Test net output #1: loss = 1.05471 (* 1 = 1.05471 loss)\nI0817 19:08:46.799849 17472 solver.cpp:228] Iteration 4300, loss = 0.258958\nI0817 19:08:46.799891 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:08:46.799906 17472 solver.cpp:244]     Train net output #1: loss = 0.258958 (* 1 = 0.258958 loss)\nI0817 19:08:46.890126 17472 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0817 19:11:04.167297 17472 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 19:12:25.397085 17472 solver.cpp:404]     Test net output #0: accuracy = 0.77628\nI0817 19:12:25.397378 17472 solver.cpp:404]     Test net output #1: loss = 0.743955 (* 1 = 0.743955 loss)\nI0817 19:12:26.716827 17472 solver.cpp:228] Iteration 4400, loss = 0.199875\nI0817 19:12:26.716868 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:12:26.716883 17472 solver.cpp:244]     Train net output #1: loss = 0.199875 (* 1 = 0.199875 loss)\nI0817 19:12:26.799844 17472 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0817 19:14:44.030031 17472 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 19:16:05.261580 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7072\nI0817 19:16:05.261843 17472 solver.cpp:404]     Test net output #1: loss = 1.15618 (* 1 = 1.15618 loss)\nI0817 19:16:06.581786 17472 solver.cpp:228] Iteration 4500, loss = 0.286904\nI0817 19:16:06.581825 17472 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:16:06.581840 17472 solver.cpp:244]     Train net output #1: loss = 0.286904 (* 1 = 0.286904 loss)\nI0817 19:16:06.669000 17472 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0817 19:18:23.965471 17472 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 19:19:45.180472 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64112\nI0817 19:19:45.180711 17472 solver.cpp:404]     Test net output #1: loss = 1.29835 (* 1 = 1.29835 loss)\nI0817 19:19:46.500072 17472 solver.cpp:228] Iteration 4600, loss = 0.230678\nI0817 19:19:46.500110 17472 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:19:46.500125 17472 solver.cpp:244]     Train net output #1: loss = 0.230678 (* 1 = 0.230678 loss)\nI0817 19:19:46.586148 17472 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0817 19:22:03.804705 17472 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:23:24.995576 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65592\nI0817 19:23:24.995841 17472 solver.cpp:404]     Test net output #1: loss = 1.3476 (* 1 = 1.3476 loss)\nI0817 19:23:26.315069 17472 solver.cpp:228] Iteration 4700, loss = 0.323311\nI0817 19:23:26.315109 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:23:26.315125 17472 solver.cpp:244]     Train net output #1: loss = 0.323311 (* 1 = 0.323311 loss)\nI0817 19:23:26.411137 17472 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0817 19:25:43.506676 17472 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:27:04.687412 17472 solver.cpp:404]     Test net output #0: accuracy = 0.74712\nI0817 19:27:04.687685 17472 solver.cpp:404]     Test net output #1: loss = 0.94653 (* 1 = 0.94653 loss)\nI0817 19:27:06.007664 17472 solver.cpp:228] Iteration 4800, loss = 0.260494\nI0817 19:27:06.007705 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:27:06.007721 17472 solver.cpp:244]     Train net output #1: loss = 0.260494 (* 1 = 0.260494 loss)\nI0817 19:27:06.094810 17472 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0817 19:29:23.223315 17472 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:30:44.393995 17472 solver.cpp:404]     Test net output #0: accuracy = 0.80516\nI0817 19:30:44.394259 17472 solver.cpp:404]     Test net output #1: loss = 0.603791 (* 1 = 0.603791 loss)\nI0817 19:30:45.713585 17472 solver.cpp:228] Iteration 4900, loss = 0.284717\nI0817 19:30:45.713625 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:30:45.713641 17472 solver.cpp:244]     Train net output #1: loss = 0.284717 (* 1 = 0.284717 loss)\nI0817 19:30:45.805222 17472 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0817 19:33:02.960456 17472 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:34:24.130342 17472 solver.cpp:404]     Test net output #0: accuracy = 0.79136\nI0817 19:34:24.130605 17472 solver.cpp:404]     Test net output #1: loss = 0.638466 (* 1 = 0.638466 loss)\nI0817 19:34:25.450212 17472 solver.cpp:228] Iteration 5000, loss = 0.242641\nI0817 19:34:25.450254 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:34:25.450270 17472 solver.cpp:244]     Train net output #1: loss = 0.242641 (* 1 = 0.242641 loss)\nI0817 19:34:25.542568 17472 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0817 19:36:42.770705 17472 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:38:03.944566 17472 solver.cpp:404]     Test net output #0: accuracy = 0.80672\nI0817 19:38:03.944823 17472 solver.cpp:404]     Test net output #1: loss = 0.609316 (* 1 = 0.609316 loss)\nI0817 19:38:05.264638 17472 solver.cpp:228] Iteration 5100, loss = 0.277629\nI0817 19:38:05.264681 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:38:05.264696 17472 solver.cpp:244]     Train net output #1: loss = 0.277628 (* 1 = 0.277628 loss)\nI0817 19:38:05.351501 17472 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0817 19:40:22.472298 17472 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:41:43.646811 17472 solver.cpp:404]     Test net output #0: accuracy = 0.74716\nI0817 19:41:43.647074 17472 solver.cpp:404]     Test net output #1: loss = 0.824622 (* 1 = 0.824622 loss)\nI0817 19:41:44.965570 17472 solver.cpp:228] Iteration 5200, loss = 0.357672\nI0817 19:41:44.965612 17472 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 19:41:44.965628 17472 solver.cpp:244]     Train net output #1: loss = 0.357672 (* 1 = 0.357672 loss)\nI0817 19:41:45.057240 17472 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0817 19:44:02.177405 17472 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:45:23.350469 17472 solver.cpp:404]     Test net output #0: accuracy = 0.72168\nI0817 19:45:23.350733 17472 solver.cpp:404]     Test net output #1: loss = 0.890224 (* 1 = 0.890224 loss)\nI0817 19:45:24.671062 17472 solver.cpp:228] Iteration 5300, loss = 0.329245\nI0817 19:45:24.671097 17472 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 19:45:24.671111 17472 solver.cpp:244]     Train net output #1: loss = 0.329245 (* 1 = 0.329245 loss)\nI0817 19:45:24.761155 17472 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0817 19:47:41.905498 17472 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:49:03.058383 17472 solver.cpp:404]     Test net output #0: accuracy = 0.79744\nI0817 19:49:03.058645 17472 solver.cpp:404]     Test net output #1: loss = 0.643975 (* 1 = 0.643975 loss)\nI0817 19:49:04.378264 17472 solver.cpp:228] Iteration 5400, loss = 0.268617\nI0817 19:49:04.378309 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:49:04.378324 17472 solver.cpp:244]     Train net output #1: loss = 0.268617 (* 1 = 0.268617 loss)\nI0817 19:49:04.464669 17472 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0817 19:51:21.584672 17472 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:52:42.744580 17472 solver.cpp:404]     Test net output #0: accuracy = 0.75504\nI0817 19:52:42.744849 17472 solver.cpp:404]     Test net output #1: loss = 0.89912 (* 1 = 0.89912 loss)\nI0817 19:52:44.063242 17472 solver.cpp:228] Iteration 5500, loss = 0.259585\nI0817 19:52:44.063287 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:52:44.063305 17472 solver.cpp:244]     Train net output #1: loss = 0.259585 (* 1 = 0.259585 loss)\nI0817 19:52:44.154461 17472 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0817 19:55:01.342361 17472 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:56:22.495422 17472 solver.cpp:404]     Test net output #0: accuracy = 0.676\nI0817 19:56:22.495687 17472 solver.cpp:404]     Test net output #1: loss = 1.17121 (* 1 = 1.17121 loss)\nI0817 19:56:23.814643 17472 solver.cpp:228] Iteration 5600, loss = 0.271635\nI0817 19:56:23.814687 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:56:23.814704 17472 solver.cpp:244]     Train net output #1: loss = 0.271635 (* 1 = 0.271635 loss)\nI0817 19:56:23.903795 17472 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0817 19:58:41.035485 17472 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 20:00:02.194046 17472 solver.cpp:404]     Test net output #0: accuracy = 0.81584\nI0817 20:00:02.194283 17472 solver.cpp:404]     Test net output #1: loss = 0.568684 (* 1 = 0.568684 loss)\nI0817 20:00:03.514204 17472 solver.cpp:228] Iteration 5700, loss = 0.187671\nI0817 20:00:03.514245 17472 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:00:03.514261 17472 solver.cpp:244]     Train net output #1: loss = 0.187671 (* 1 = 0.187671 loss)\nI0817 20:00:03.604820 17472 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0817 20:02:20.846982 17472 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 20:03:42.008744 17472 solver.cpp:404]     Test net output #0: accuracy = 0.78668\nI0817 20:03:42.009011 17472 solver.cpp:404]     Test net output #1: loss = 0.687663 (* 1 = 0.687663 loss)\nI0817 20:03:43.329257 17472 solver.cpp:228] Iteration 5800, loss = 0.196271\nI0817 20:03:43.329298 17472 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:03:43.329313 17472 solver.cpp:244]     Train net output #1: loss = 0.196271 (* 1 = 0.196271 loss)\nI0817 20:03:43.416616 17472 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0817 20:06:00.531528 17472 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 20:07:21.834743 17472 solver.cpp:404]     Test net output #0: accuracy = 0.81204\nI0817 20:07:21.835011 17472 solver.cpp:404]     Test net output #1: loss = 0.602343 (* 1 = 0.602343 loss)\nI0817 20:07:23.155820 17472 solver.cpp:228] Iteration 5900, loss = 0.266009\nI0817 20:07:23.155863 17472 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 20:07:23.155879 17472 solver.cpp:244]     Train net output #1: loss = 0.266009 (* 1 = 0.266009 loss)\nI0817 20:07:23.248062 17472 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0817 20:09:40.475759 17472 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 20:11:01.743604 17472 solver.cpp:404]     Test net output #0: accuracy = 0.79184\nI0817 20:11:01.743867 17472 solver.cpp:404]     Test net output #1: loss = 0.665226 (* 1 = 0.665226 loss)\nI0817 20:11:03.063733 17472 solver.cpp:228] Iteration 6000, loss = 0.27099\nI0817 20:11:03.063772 17472 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 20:11:03.063789 17472 solver.cpp:244]     Train net output #1: loss = 0.270989 (* 1 = 0.270989 loss)\nI0817 20:11:03.147325 17472 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0817 20:13:20.336652 17472 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 20:14:41.545104 17472 solver.cpp:404]     Test net output #0: accuracy = 0.73288\nI0817 20:14:41.545367 17472 solver.cpp:404]     Test net output #1: loss = 0.917599 (* 1 = 0.917599 loss)\nI0817 20:14:42.864917 17472 solver.cpp:228] Iteration 6100, loss = 0.229929\nI0817 20:14:42.864959 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:14:42.864974 17472 solver.cpp:244]     Train net output #1: loss = 0.229929 (* 1 = 0.229929 loss)\nI0817 20:14:42.955574 17472 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0817 20:17:00.153692 17472 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 20:18:21.396252 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66024\nI0817 20:18:21.396519 17472 solver.cpp:404]     Test net output #1: loss = 1.21146 (* 1 = 1.21146 loss)\nI0817 20:18:22.715934 17472 solver.cpp:228] Iteration 6200, loss = 0.286323\nI0817 20:18:22.715975 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:18:22.715991 17472 solver.cpp:244]     Train net output #1: loss = 0.286323 (* 1 = 0.286323 loss)\nI0817 20:18:22.804795 17472 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0817 20:20:40.181746 17472 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:22:01.430024 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7202\nI0817 20:22:01.430296 17472 solver.cpp:404]     Test net output #1: loss = 1.07069 (* 1 = 1.07069 loss)\nI0817 20:22:02.751047 17472 solver.cpp:228] Iteration 6300, loss = 0.204151\nI0817 20:22:02.751091 17472 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:22:02.751106 17472 solver.cpp:244]     Train net output #1: loss = 0.204151 (* 1 = 0.204151 loss)\nI0817 20:22:02.838759 17472 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0817 20:24:20.047650 17472 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:25:41.249243 17472 solver.cpp:404]     Test net output #0: accuracy = 0.77696\nI0817 20:25:41.249478 17472 solver.cpp:404]     Test net output #1: loss = 0.824719 (* 1 = 0.824719 loss)\nI0817 20:25:42.569839 17472 solver.cpp:228] Iteration 6400, loss = 0.172103\nI0817 20:25:42.569881 17472 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:25:42.569897 17472 solver.cpp:244]     Train net output #1: loss = 0.172103 (* 1 = 0.172103 loss)\nI0817 20:25:42.653077 17472 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0817 20:27:59.920790 17472 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:29:21.159606 17472 solver.cpp:404]     Test net output #0: accuracy = 0.77904\nI0817 20:29:21.159870 17472 solver.cpp:404]     Test net output #1: loss = 0.758299 (* 1 = 0.758299 loss)\nI0817 20:29:22.479442 17472 solver.cpp:228] Iteration 6500, loss = 0.156647\nI0817 20:29:22.479485 17472 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:29:22.479501 17472 solver.cpp:244]     Train net output #1: loss = 0.156647 (* 1 = 0.156647 loss)\nI0817 20:29:22.562922 17472 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0817 20:31:39.823243 17472 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:33:01.016796 17472 solver.cpp:404]     Test net output #0: accuracy = 0.81096\nI0817 20:33:01.017057 17472 solver.cpp:404]     Test net output #1: loss = 0.60862 (* 1 = 0.60862 loss)\nI0817 20:33:02.336730 17472 solver.cpp:228] Iteration 6600, loss = 0.151941\nI0817 20:33:02.336772 17472 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:33:02.336789 17472 solver.cpp:244]     Train net output #1: loss = 0.151941 (* 1 = 0.151941 loss)\nI0817 20:33:02.424425 17472 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0817 20:35:19.655737 17472 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:36:40.818922 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68848\nI0817 20:36:40.819193 17472 solver.cpp:404]     Test net output #1: loss = 1.14436 (* 1 = 1.14436 loss)\nI0817 20:36:42.139262 17472 solver.cpp:228] Iteration 6700, loss = 0.148044\nI0817 20:36:42.139307 17472 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:36:42.139323 17472 solver.cpp:244]     Train net output #1: loss = 0.148044 (* 1 = 0.148044 loss)\nI0817 20:36:42.223650 17472 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0817 20:38:59.499701 17472 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:40:20.644634 17472 solver.cpp:404]     Test net output #0: accuracy = 0.8154\nI0817 20:40:20.644912 17472 solver.cpp:404]     Test net output #1: loss = 0.68031 (* 1 = 0.68031 loss)\nI0817 20:40:21.966351 17472 solver.cpp:228] Iteration 6800, loss = 0.282254\nI0817 20:40:21.966397 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 20:40:21.966420 17472 solver.cpp:244]     Train net output #1: loss = 0.282254 (* 1 = 0.282254 loss)\nI0817 20:40:22.053670 17472 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0817 20:42:39.219112 17472 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:44:00.346858 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67072\nI0817 20:44:00.347100 17472 solver.cpp:404]     Test net output #1: loss = 1.34316 (* 1 = 1.34316 loss)\nI0817 20:44:01.666296 17472 solver.cpp:228] Iteration 6900, loss = 0.15394\nI0817 20:44:01.666337 17472 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:44:01.666352 17472 solver.cpp:244]     Train net output #1: loss = 0.15394 (* 1 = 0.15394 loss)\nI0817 20:44:01.757035 17472 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0817 20:46:18.971976 17472 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:47:40.114262 17472 solver.cpp:404]     Test net output #0: accuracy = 0.83292\nI0817 20:47:40.114531 17472 solver.cpp:404]     Test net output #1: loss = 0.547803 (* 1 = 0.547803 loss)\nI0817 20:47:41.434056 17472 solver.cpp:228] Iteration 7000, loss = 0.232845\nI0817 20:47:41.434096 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:47:41.434111 17472 solver.cpp:244]     Train net output #1: loss = 0.232845 (* 1 = 0.232845 loss)\nI0817 20:47:41.524874 17472 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0817 20:49:58.745687 17472 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:51:19.854256 17472 solver.cpp:404]     Test net output #0: accuracy = 0.72116\nI0817 20:51:19.854518 17472 solver.cpp:404]     Test net output #1: loss = 1.01007 (* 1 = 1.01007 loss)\nI0817 20:51:21.174002 17472 solver.cpp:228] Iteration 7100, loss = 0.128844\nI0817 20:51:21.174043 17472 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:51:21.174059 17472 solver.cpp:244]     Train net output #1: loss = 0.128844 (* 1 = 0.128844 loss)\nI0817 20:51:21.263401 17472 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0817 20:53:38.417827 17472 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:54:59.513015 17472 solver.cpp:404]     Test net output #0: accuracy = 0.73692\nI0817 20:54:59.513288 17472 solver.cpp:404]     Test net output #1: loss = 1.04626 (* 1 = 1.04626 loss)\nI0817 20:55:00.835058 17472 solver.cpp:228] Iteration 7200, loss = 0.161884\nI0817 20:55:00.835109 17472 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:55:00.835134 17472 solver.cpp:244]     Train net output #1: loss = 0.161884 (* 1 = 0.161884 loss)\nI0817 20:55:00.919764 17472 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0817 20:57:18.083940 17472 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:58:39.193267 17472 solver.cpp:404]     Test net output #0: accuracy = 0.78872\nI0817 20:58:39.193539 17472 solver.cpp:404]     Test net output #1: loss = 0.740647 (* 1 = 0.740647 loss)\nI0817 20:58:40.514189 17472 solver.cpp:228] Iteration 7300, loss = 0.11896\nI0817 20:58:40.514233 17472 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:58:40.514257 17472 solver.cpp:244]     Train net output #1: loss = 0.11896 (* 1 = 0.11896 loss)\nI0817 20:58:40.600782 17472 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0817 21:00:57.742005 17472 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 21:02:19.769850 17472 solver.cpp:404]     Test net output #0: accuracy = 0.801\nI0817 21:02:19.770102 17472 solver.cpp:404]     Test net output #1: loss = 0.820759 (* 1 = 0.820759 loss)\nI0817 21:02:21.093484 17472 solver.cpp:228] Iteration 7400, loss = 0.116933\nI0817 21:02:21.093540 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:02:21.093557 17472 solver.cpp:244]     Train net output #1: loss = 0.116933 (* 1 = 0.116933 loss)\nI0817 21:02:21.179324 17472 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0817 21:04:38.491566 17472 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 21:06:00.466682 17472 solver.cpp:404]     Test net output #0: accuracy = 0.8218\nI0817 21:06:00.466910 17472 solver.cpp:404]     Test net output #1: loss = 0.627915 (* 1 = 0.627915 loss)\nI0817 21:06:01.789770 17472 solver.cpp:228] Iteration 7500, loss = 0.144626\nI0817 21:06:01.789832 17472 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 21:06:01.789849 17472 solver.cpp:244]     Train net output #1: loss = 0.144626 (* 1 = 0.144626 loss)\nI0817 21:06:01.870620 17472 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0817 21:08:19.182299 17472 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 21:09:41.152005 17472 solver.cpp:404]     Test net output #0: accuracy = 0.79364\nI0817 21:09:41.152238 17472 solver.cpp:404]     Test net output #1: loss = 0.807514 (* 1 = 0.807514 loss)\nI0817 21:09:42.475414 17472 solver.cpp:228] Iteration 7600, loss = 0.0704024\nI0817 21:09:42.475476 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:09:42.475493 17472 solver.cpp:244]     Train net output #1: loss = 0.0704024 (* 1 = 0.0704024 loss)\nI0817 21:09:42.559049 17472 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0817 21:11:59.927841 17472 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 21:13:21.874943 17472 solver.cpp:404]     Test net output #0: accuracy = 0.81988\nI0817 21:13:21.875221 17472 solver.cpp:404]     Test net output #1: loss = 0.615246 (* 1 = 0.615246 loss)\nI0817 21:13:23.198412 17472 solver.cpp:228] Iteration 7700, loss = 0.104917\nI0817 21:13:23.198465 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:13:23.198483 17472 solver.cpp:244]     Train net output #1: loss = 0.104917 (* 1 = 0.104917 loss)\nI0817 21:13:23.286062 17472 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0817 21:15:40.588680 17472 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 21:17:02.524790 17472 solver.cpp:404]     Test net output #0: accuracy = 0.81692\nI0817 21:17:02.525008 17472 solver.cpp:404]     Test net output #1: loss = 0.729165 (* 1 = 0.729165 loss)\nI0817 21:17:03.847947 17472 solver.cpp:228] Iteration 7800, loss = 0.0632585\nI0817 21:17:03.848007 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:17:03.848024 17472 solver.cpp:244]     Train net output #1: loss = 0.0632585 (* 1 = 0.0632585 loss)\nI0817 21:17:03.931318 17472 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0817 21:19:21.236294 17472 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:20:43.216601 17472 solver.cpp:404]     Test net output #0: accuracy = 0.8324\nI0817 21:20:43.216825 17472 solver.cpp:404]     Test net output #1: loss = 0.576145 (* 1 = 0.576145 loss)\nI0817 21:20:44.540433 17472 solver.cpp:228] Iteration 7900, loss = 0.154889\nI0817 21:20:44.540487 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:20:44.540503 17472 solver.cpp:244]     Train net output #1: loss = 0.154889 (* 1 = 0.154889 loss)\nI0817 21:20:44.625836 17472 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0817 21:23:01.972810 17472 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:24:23.906121 17472 solver.cpp:404]     Test net output #0: accuracy = 0.78924\nI0817 21:24:23.906363 17472 solver.cpp:404]     Test net output #1: loss = 0.807435 (* 1 = 0.807435 loss)\nI0817 21:24:25.230016 17472 solver.cpp:228] Iteration 8000, loss = 0.0891061\nI0817 21:24:25.230075 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:24:25.230093 17472 solver.cpp:244]     Train net output #1: loss = 0.0891062 (* 1 = 0.0891062 loss)\nI0817 21:24:25.308894 17472 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0817 21:26:42.653568 17472 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:28:04.636056 17472 solver.cpp:404]     Test net output #0: accuracy = 0.85104\nI0817 21:28:04.636301 17472 solver.cpp:404]     Test net output #1: loss = 0.561366 (* 1 = 0.561366 loss)\nI0817 21:28:05.959292 17472 solver.cpp:228] Iteration 8100, loss = 0.123673\nI0817 21:28:05.959357 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:28:05.959374 17472 solver.cpp:244]     Train net output #1: loss = 0.123673 (* 1 = 0.123673 loss)\nI0817 21:28:06.039353 17472 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0817 21:30:23.408663 17472 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:31:45.383222 17472 solver.cpp:404]     Test net output #0: accuracy = 0.81748\nI0817 21:31:45.383450 17472 solver.cpp:404]     Test net output #1: loss = 0.710742 (* 1 = 0.710742 loss)\nI0817 21:31:46.706555 17472 solver.cpp:228] Iteration 8200, loss = 0.131792\nI0817 21:31:46.706615 17472 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:31:46.706632 17472 solver.cpp:244]     Train net output #1: loss = 0.131792 (* 1 = 0.131792 loss)\nI0817 21:31:46.787967 17472 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0817 21:34:04.203769 17472 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:35:26.158258 17472 solver.cpp:404]     Test net output #0: accuracy = 0.78464\nI0817 21:35:26.158504 17472 solver.cpp:404]     Test net output #1: loss = 0.835032 (* 1 = 0.835032 loss)\nI0817 21:35:27.480679 17472 solver.cpp:228] Iteration 8300, loss = 0.0921328\nI0817 21:35:27.480739 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:35:27.480757 17472 solver.cpp:244]     Train net output #1: loss = 0.0921329 (* 1 = 0.0921329 loss)\nI0817 21:35:27.564342 17472 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0817 21:37:45.010543 17472 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:39:06.983700 17472 solver.cpp:404]     Test net output #0: accuracy = 0.81452\nI0817 21:39:06.983986 17472 solver.cpp:404]     Test net output #1: loss = 0.781726 (* 1 = 0.781726 loss)\nI0817 21:39:08.307126 17472 solver.cpp:228] Iteration 8400, loss = 0.113906\nI0817 21:39:08.307188 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:39:08.307205 17472 solver.cpp:244]     Train net output #1: loss = 0.113906 (* 1 = 0.113906 loss)\nI0817 21:39:08.394326 17472 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0817 21:41:25.824169 17472 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:42:47.838971 17472 solver.cpp:404]     Test net output #0: accuracy = 0.78268\nI0817 21:42:47.839272 17472 solver.cpp:404]     Test net output #1: loss = 1.06744 (* 1 = 1.06744 loss)\nI0817 21:42:49.163094 17472 solver.cpp:228] Iteration 8500, loss = 0.0747968\nI0817 21:42:49.163153 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:42:49.163172 17472 solver.cpp:244]     Train net output #1: loss = 0.0747969 (* 1 = 0.0747969 loss)\nI0817 21:42:49.244197 17472 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0817 21:45:06.761941 17472 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:46:28.783031 17472 solver.cpp:404]     Test net output #0: accuracy = 0.8358\nI0817 21:46:28.783329 17472 solver.cpp:404]     Test net output #1: loss = 0.752455 (* 1 = 0.752455 loss)\nI0817 21:46:30.107270 17472 solver.cpp:228] Iteration 8600, loss = 0.0595524\nI0817 21:46:30.107334 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:46:30.107353 17472 solver.cpp:244]     Train net output #1: loss = 0.0595525 (* 1 = 0.0595525 loss)\nI0817 21:46:30.187343 17472 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0817 21:48:47.634867 17472 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:50:09.664135 17472 solver.cpp:404]     Test net output #0: accuracy = 0.78132\nI0817 21:50:09.664448 17472 solver.cpp:404]     Test net output #1: loss = 1.05673 (* 1 = 1.05673 loss)\nI0817 21:50:10.986390 17472 solver.cpp:228] Iteration 8700, loss = 0.0428493\nI0817 21:50:10.986452 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:50:10.986469 17472 solver.cpp:244]     Train net output #1: loss = 0.0428494 (* 1 = 0.0428494 loss)\nI0817 21:50:11.069782 17472 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0817 21:52:28.436355 17472 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:53:50.441542 17472 solver.cpp:404]     Test net output #0: accuracy = 0.85912\nI0817 21:53:50.441857 17472 solver.cpp:404]     Test net output #1: loss = 0.589864 (* 1 = 0.589864 loss)\nI0817 21:53:51.763919 17472 solver.cpp:228] Iteration 8800, loss = 0.0840886\nI0817 21:53:51.763981 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:53:51.763998 17472 solver.cpp:244]     Train net output #1: loss = 0.0840887 (* 1 = 0.0840887 loss)\nI0817 21:53:51.845728 17472 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0817 21:56:09.291820 17472 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:57:31.329309 17472 solver.cpp:404]     Test net output #0: accuracy = 0.84992\nI0817 21:57:31.329623 17472 solver.cpp:404]     Test net output #1: loss = 0.58365 (* 1 = 0.58365 loss)\nI0817 21:57:32.653630 17472 solver.cpp:228] Iteration 8900, loss = 0.025335\nI0817 21:57:32.653687 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:57:32.653710 17472 solver.cpp:244]     Train net output #1: loss = 0.0253352 (* 1 = 0.0253352 loss)\nI0817 21:57:32.737185 17472 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0817 21:59:50.091904 17472 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 22:01:12.117533 17472 solver.cpp:404]     Test net output #0: accuracy = 0.87692\nI0817 22:01:12.117821 17472 solver.cpp:404]     Test net output #1: loss = 0.509232 (* 1 = 0.509232 loss)\nI0817 22:01:13.440970 17472 solver.cpp:228] Iteration 9000, loss = 0.040893\nI0817 22:01:13.441035 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:01:13.441061 17472 solver.cpp:244]     Train net output #1: loss = 0.0408931 (* 1 = 0.0408931 loss)\nI0817 22:01:13.519691 17472 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0817 22:03:30.849002 17472 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 22:04:52.948101 17472 solver.cpp:404]     Test net output #0: accuracy = 0.88088\nI0817 22:04:52.948395 17472 solver.cpp:404]     Test net output #1: loss = 0.455904 (* 1 = 0.455904 loss)\nI0817 22:04:54.272217 17472 solver.cpp:228] Iteration 9100, loss = 0.0243631\nI0817 22:04:54.272280 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:04:54.272305 17472 solver.cpp:244]     Train net output #1: loss = 0.0243632 (* 1 = 0.0243632 loss)\nI0817 22:04:54.364912 17472 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0817 22:07:11.845578 17472 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 22:08:33.999502 17472 solver.cpp:404]     Test net output #0: accuracy = 0.8832\nI0817 22:08:33.999814 17472 solver.cpp:404]     Test net output #1: loss = 0.51273 (* 1 = 0.51273 loss)\nI0817 22:08:35.322597 17472 solver.cpp:228] Iteration 9200, loss = 0.0245087\nI0817 22:08:35.322643 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:08:35.322667 17472 solver.cpp:244]     Train net output #1: loss = 0.0245088 (* 1 = 0.0245088 loss)\nI0817 22:08:35.409308 17472 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0817 22:10:52.747058 17472 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 22:12:14.882616 17472 solver.cpp:404]     Test net output #0: accuracy = 0.87888\nI0817 22:12:14.882925 17472 solver.cpp:404]     Test net output #1: loss = 0.507929 (* 1 = 0.507929 loss)\nI0817 22:12:16.205416 17472 solver.cpp:228] Iteration 9300, loss = 0.0142727\nI0817 22:12:16.205468 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:12:16.205499 17472 solver.cpp:244]     Train net output #1: loss = 0.0142729 (* 1 = 0.0142729 loss)\nI0817 22:12:16.302146 17472 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0817 22:14:33.834899 17472 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 22:15:55.848354 17472 solver.cpp:404]     Test net output #0: accuracy = 0.88896\nI0817 22:15:55.848685 17472 solver.cpp:404]     Test net output #1: loss = 0.464973 (* 1 = 0.464973 loss)\nI0817 22:15:57.171253 17472 solver.cpp:228] Iteration 9400, loss = 0.00133294\nI0817 22:15:57.171310 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:15:57.171335 17472 solver.cpp:244]     Train net output #1: loss = 0.00133307 (* 1 = 0.00133307 loss)\nI0817 22:15:57.256218 17472 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0817 22:18:14.549907 17472 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:19:36.483824 17472 solver.cpp:404]     Test net output #0: accuracy = 0.8952\nI0817 22:19:36.484091 17472 solver.cpp:404]     Test net output #1: loss = 0.465842 (* 1 = 0.465842 loss)\nI0817 22:19:37.806691 17472 solver.cpp:228] Iteration 9500, loss = 0.0284376\nI0817 22:19:37.806746 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:19:37.806771 17472 solver.cpp:244]     Train net output #1: loss = 0.0284377 (* 1 = 0.0284377 loss)\nI0817 22:19:37.887985 17472 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0817 22:21:55.191289 17472 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:23:17.084398 17472 solver.cpp:404]     Test net output #0: accuracy = 0.90244\nI0817 22:23:17.084693 17472 solver.cpp:404]     Test net output #1: loss = 0.42193 (* 1 = 0.42193 loss)\nI0817 22:23:18.406872 17472 solver.cpp:228] Iteration 9600, loss = 0.00518377\nI0817 22:23:18.406924 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:23:18.406949 17472 solver.cpp:244]     Train net output #1: loss = 0.00518391 (* 1 = 0.00518391 loss)\nI0817 22:23:18.488801 17472 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0817 22:25:35.858433 17472 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:26:57.668143 17472 solver.cpp:404]     Test net output #0: accuracy = 0.9188\nI0817 22:26:57.668442 17472 solver.cpp:404]     Test net output #1: loss = 0.348657 (* 1 = 0.348657 loss)\nI0817 22:26:58.992259 17472 solver.cpp:228] Iteration 9700, loss = 0.000542351\nI0817 22:26:58.992322 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:26:58.992347 17472 solver.cpp:244]     Train net output #1: loss = 0.000542491 (* 1 = 0.000542491 loss)\nI0817 22:26:59.075060 17472 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0817 22:29:16.483438 17472 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:30:38.254602 17472 solver.cpp:404]     Test net output #0: accuracy = 0.92248\nI0817 22:30:38.254861 17472 solver.cpp:404]     Test net output #1: loss = 0.326537 (* 1 = 0.326537 loss)\nI0817 22:30:39.579280 17472 solver.cpp:228] Iteration 9800, loss = 0.00037503\nI0817 22:30:39.579344 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:30:39.579370 17472 solver.cpp:244]     Train net output #1: loss = 0.00037517 (* 1 = 0.00037517 loss)\nI0817 22:30:39.661432 17472 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0817 22:32:57.135112 17472 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:34:18.961707 17472 solver.cpp:404]     Test net output #0: accuracy = 0.9248\nI0817 22:34:18.961987 17472 solver.cpp:404]     Test net output #1: loss = 0.315607 (* 1 = 0.315607 loss)\nI0817 22:34:20.285503 17472 solver.cpp:228] Iteration 9900, loss = 0.000285302\nI0817 22:34:20.285567 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:34:20.285593 17472 solver.cpp:244]     Train net output #1: loss = 0.000285442 (* 1 = 0.000285442 loss)\nI0817 22:34:20.374223 17472 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0817 22:36:37.830253 17472 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kNestFig9_iter_10000.caffemodel\nI0817 22:36:38.053578 17472 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kNestFig9_iter_10000.solverstate\nI0817 22:36:38.499812 17472 solver.cpp:317] Iteration 10000, loss = 0.000720312\nI0817 22:36:38.499863 17472 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:38:00.278651 17472 solver.cpp:404]     Test net output #0: accuracy = 0.92472\nI0817 22:38:00.278952 17472 solver.cpp:404]     Test net output #1: loss = 0.316682 (* 1 = 0.316682 loss)\nI0817 22:38:00.278970 17472 solver.cpp:322] Optimization Done.\nI0817 22:38:05.590410 17472 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kRes110Tab1",
    "content": "I0821 08:59:38.323160 32364 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 08:59:38.325628 32364 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 08:59:38.326850 32364 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 08:59:38.328069 32364 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 08:59:38.329283 32364 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 08:59:38.330520 32364 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 08:59:38.331748 32364 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 08:59:38.333185 32364 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 08:59:38.334422 32364 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 08:59:38.752012 32364 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kRes110Tab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0821 08:59:38.756321 32364 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 08:59:38.783159 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:38.783267 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:38.784900 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 08:59:38.784960 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 08:59:38.784981 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:59:38.785001 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:59:38.785027 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:59:38.785048 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:59:38.785065 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:59:38.785084 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:59:38.785104 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:59:38.785122 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:59:38.785141 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:59:38.785166 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:59:38.785187 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:59:38.785205 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:59:38.785226 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:59:38.785243 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:59:38.785261 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:59:38.785280 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:59:38.785300 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:59:38.785318 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:59:38.785351 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b10_cbr1_bn\nI0821 08:59:38.785369 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b10_cbr2_bn\nI0821 08:59:38.785385 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b11_cbr1_bn\nI0821 08:59:38.785404 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b11_cbr2_bn\nI0821 08:59:38.785423 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b12_cbr1_bn\nI0821 08:59:38.785440 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b12_cbr2_bn\nI0821 08:59:38.785459 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b13_cbr1_bn\nI0821 08:59:38.785473 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b13_cbr2_bn\nI0821 08:59:38.785490 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b14_cbr1_bn\nI0821 08:59:38.785509 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b14_cbr2_bn\nI0821 08:59:38.785529 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b15_cbr1_bn\nI0821 08:59:38.785548 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b15_cbr2_bn\nI0821 08:59:38.785567 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b16_cbr1_bn\nI0821 08:59:38.785583 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b16_cbr2_bn\nI0821 08:59:38.785601 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b17_cbr1_bn\nI0821 08:59:38.785620 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b17_cbr2_bn\nI0821 08:59:38.785640 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b18_cbr1_bn\nI0821 08:59:38.785657 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b18_cbr2_bn\nI0821 08:59:38.785676 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:59:38.785692 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:59:38.785718 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:59:38.785735 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:59:38.785753 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:59:38.785770 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:59:38.785790 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:59:38.785809 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:59:38.785826 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:59:38.785842 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:59:38.785861 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:59:38.785877 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:59:38.785894 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:59:38.785923 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:59:38.785943 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:59:38.785961 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:59:38.785981 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:59:38.785997 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:59:38.786017 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b10_cbr1_bn\nI0821 08:59:38.786034 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b10_cbr2_bn\nI0821 08:59:38.786053 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b11_cbr1_bn\nI0821 08:59:38.786072 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b11_cbr2_bn\nI0821 08:59:38.786089 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b12_cbr1_bn\nI0821 08:59:38.786108 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b12_cbr2_bn\nI0821 08:59:38.786125 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b13_cbr1_bn\nI0821 08:59:38.786144 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b13_cbr2_bn\nI0821 08:59:38.786170 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b14_cbr1_bn\nI0821 08:59:38.786188 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b14_cbr2_bn\nI0821 08:59:38.786206 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b15_cbr1_bn\nI0821 08:59:38.786223 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b15_cbr2_bn\nI0821 08:59:38.786242 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b16_cbr1_bn\nI0821 08:59:38.786260 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b16_cbr2_bn\nI0821 08:59:38.786280 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b17_cbr1_bn\nI0821 08:59:38.786296 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b17_cbr2_bn\nI0821 08:59:38.786314 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b18_cbr1_bn\nI0821 08:59:38.786332 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b18_cbr2_bn\nI0821 08:59:38.786351 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:59:38.786370 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:59:38.786393 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:59:38.786412 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:59:38.786430 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:59:38.786448 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:59:38.786464 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:59:38.786489 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:59:38.786511 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:59:38.786530 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:59:38.786548 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:59:38.786566 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:59:38.786581 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:59:38.786599 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:59:38.786619 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:59:38.786636 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:59:38.786655 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:59:38.786670 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:59:38.786689 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b10_cbr1_bn\nI0821 08:59:38.786706 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b10_cbr2_bn\nI0821 08:59:38.786722 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b11_cbr1_bn\nI0821 08:59:38.786741 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b11_cbr2_bn\nI0821 08:59:38.786759 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b12_cbr1_bn\nI0821 08:59:38.786778 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b12_cbr2_bn\nI0821 08:59:38.786798 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b13_cbr1_bn\nI0821 08:59:38.786813 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b13_cbr2_bn\nI0821 08:59:38.786833 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b14_cbr1_bn\nI0821 08:59:38.786850 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b14_cbr2_bn\nI0821 08:59:38.786870 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b15_cbr1_bn\nI0821 08:59:38.786887 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b15_cbr2_bn\nI0821 08:59:38.786906 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b16_cbr1_bn\nI0821 08:59:38.786924 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b16_cbr2_bn\nI0821 08:59:38.786943 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b17_cbr1_bn\nI0821 08:59:38.786962 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b17_cbr2_bn\nI0821 08:59:38.786988 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b18_cbr1_bn\nI0821 08:59:38.787005 32364 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b18_cbr2_bn\nI0821 08:59:38.790319 32364 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\nI0821 08:59:38.794114 32364 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:59:38.796608 32364 net.cpp:100] Creating Layer dataLayer\nI0821 08:59:38.796690 32364 net.cpp:408] dataLayer -> data_top\nI0821 08:59:38.796897 32364 net.cpp:408] dataLayer -> label\nI0821 08:59:38.797026 32364 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:59:38.810976 32370 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0821 08:59:38.858021 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:38.864029 32364 net.cpp:150] Setting up dataLayer\nI0821 08:59:38.864112 32364 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI0821 08:59:38.864133 32364 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:38.864145 32364 net.cpp:165] Memory required for data: 1229200\nI0821 08:59:38.864176 32364 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:59:38.864200 32364 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:59:38.864214 32364 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:59:38.864244 32364 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:59:38.864271 32364 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:59:38.864377 32364 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:59:38.864397 32364 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:38.864409 32364 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:38.864418 32364 net.cpp:165] Memory required for data: 1230000\nI0821 08:59:38.864428 32364 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:59:38.864511 32364 net.cpp:100] Creating Layer pre_conv\nI0821 08:59:38.864526 32364 net.cpp:434] pre_conv <- data_top\nI0821 08:59:38.864547 32364 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:59:38.866408 32364 net.cpp:150] Setting up pre_conv\nI0821 08:59:38.866433 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.866444 32364 net.cpp:165] Memory required for data: 7783600\nI0821 08:59:38.866526 32364 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:59:38.866639 32364 net.cpp:100] Creating Layer pre_bn\nI0821 08:59:38.866655 32364 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:59:38.866672 32364 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:59:38.866693 32371 blocking_queue.cpp:50] Waiting for data\nI0821 08:59:38.867028 32364 net.cpp:150] Setting up pre_bn\nI0821 08:59:38.867050 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.867060 32364 net.cpp:165] Memory required for data: 14337200\nI0821 08:59:38.867089 32364 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:59:38.867166 32364 net.cpp:100] Creating Layer pre_scale\nI0821 08:59:38.867182 32364 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:59:38.867198 32364 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:59:38.867432 32364 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:59:38.870558 32364 net.cpp:150] Setting up pre_scale\nI0821 08:59:38.870584 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.870594 32364 net.cpp:165] Memory required for data: 20890800\nI0821 08:59:38.870615 32364 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:59:38.870682 32364 net.cpp:100] Creating Layer pre_relu\nI0821 08:59:38.870702 32364 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:59:38.870718 32364 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:59:38.870738 32364 net.cpp:150] Setting up pre_relu\nI0821 08:59:38.870754 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.870764 32364 net.cpp:165] Memory required for data: 27444400\nI0821 08:59:38.870774 32364 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:59:38.870787 32364 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:59:38.870797 32364 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:59:38.870815 32364 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:59:38.870836 32364 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:59:38.870910 32364 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:59:38.870929 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.870942 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.870951 32364 net.cpp:165] Memory required for data: 40551600\nI0821 08:59:38.870962 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:59:38.870990 32364 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:59:38.871003 32364 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:59:38.871021 32364 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:59:38.871405 32364 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:59:38.871426 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.871436 32364 net.cpp:165] Memory required for data: 47105200\nI0821 08:59:38.871459 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:59:38.871485 32364 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:59:38.871497 32364 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:59:38.871515 32364 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:59:38.871772 32364 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:59:38.871790 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.871800 32364 net.cpp:165] Memory required for data: 53658800\nI0821 08:59:38.871821 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:59:38.871842 32364 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:59:38.871853 32364 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:59:38.871870 32364 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:59:38.871959 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:59:38.872133 32364 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:59:38.872158 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.872169 32364 net.cpp:165] Memory required for data: 60212400\nI0821 08:59:38.872187 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:59:38.872202 32364 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:59:38.872213 32364 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:59:38.872232 32364 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:59:38.872252 32364 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:59:38.872267 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.872277 32364 net.cpp:165] Memory required for data: 66766000\nI0821 08:59:38.872287 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:59:38.872314 32364 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:59:38.872326 32364 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:59:38.872344 32364 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:59:38.872699 32364 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:59:38.872725 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.872736 32364 net.cpp:165] Memory required for data: 73319600\nI0821 08:59:38.872755 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:59:38.872776 32364 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:59:38.872787 32364 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:59:38.872803 32364 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:59:38.873073 32364 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:59:38.873092 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.873101 32364 net.cpp:165] Memory required for data: 79873200\nI0821 08:59:38.873127 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:59:38.873157 32364 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:59:38.873172 32364 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:59:38.873191 32364 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:59:38.873284 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:59:38.873457 32364 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:59:38.873476 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.873486 32364 net.cpp:165] Memory required for data: 86426800\nI0821 08:59:38.873504 32364 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:59:38.873576 32364 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:59:38.873592 32364 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:59:38.873610 32364 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:59:38.873627 32364 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:59:38.873726 32364 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:59:38.873746 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.873756 32364 net.cpp:165] Memory required for data: 92980400\nI0821 08:59:38.873766 32364 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:59:38.873781 32364 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:59:38.873792 32364 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:59:38.873811 32364 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:59:38.873831 32364 net.cpp:150] Setting up L1_b1_relu\nI0821 08:59:38.873845 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.873854 32364 net.cpp:165] Memory required for data: 99534000\nI0821 08:59:38.873865 32364 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:38.873883 32364 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:38.873893 32364 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:59:38.873908 32364 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:59:38.873927 32364 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:59:38.874003 32364 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:38.874022 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.874034 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.874043 32364 net.cpp:165] Memory required for data: 112641200\nI0821 08:59:38.874053 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:59:38.874073 32364 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:59:38.874085 32364 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:59:38.874109 32364 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:59:38.874467 32364 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:59:38.874487 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.874497 32364 net.cpp:165] Memory required for data: 119194800\nI0821 08:59:38.874516 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:59:38.874532 32364 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:59:38.874544 32364 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:59:38.874574 32364 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:59:38.874860 32364 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:59:38.874883 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.874893 32364 net.cpp:165] Memory required for data: 125748400\nI0821 08:59:38.874917 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:59:38.874933 32364 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:59:38.874944 32364 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:59:38.874959 32364 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:59:38.875048 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:59:38.875231 32364 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:59:38.875250 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.875260 32364 net.cpp:165] Memory required for data: 132302000\nI0821 08:59:38.875278 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:59:38.875298 32364 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:59:38.875310 32364 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:59:38.875329 32364 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:59:38.875349 32364 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:59:38.875362 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.875372 32364 net.cpp:165] Memory required for data: 138855600\nI0821 08:59:38.875382 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:59:38.875402 32364 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:59:38.875414 32364 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:59:38.875437 32364 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:59:38.875783 32364 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:59:38.875802 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.875813 32364 net.cpp:165] Memory required for data: 145409200\nI0821 08:59:38.875830 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:59:38.875847 32364 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:59:38.875857 32364 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:59:38.875881 32364 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:59:38.876163 32364 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:59:38.876181 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.876191 32364 net.cpp:165] Memory required for data: 151962800\nI0821 08:59:38.876225 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:59:38.876243 32364 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:59:38.876256 32364 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:59:38.876276 32364 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:59:38.876368 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:59:38.876540 32364 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:59:38.876559 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.876569 32364 net.cpp:165] Memory required for data: 158516400\nI0821 08:59:38.876586 32364 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:59:38.876608 32364 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:59:38.876619 32364 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:59:38.876633 32364 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:59:38.876649 32364 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:59:38.876704 32364 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:59:38.876723 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.876732 32364 net.cpp:165] Memory required for data: 165070000\nI0821 08:59:38.876742 32364 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:59:38.876756 32364 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:59:38.876768 32364 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:59:38.876782 32364 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:59:38.876808 32364 net.cpp:150] Setting up L1_b2_relu\nI0821 08:59:38.876824 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.876833 32364 net.cpp:165] Memory required for data: 171623600\nI0821 08:59:38.876843 32364 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:38.876857 32364 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:38.876868 32364 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:59:38.876888 32364 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:59:38.876909 32364 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:59:38.876984 32364 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:38.877002 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.877017 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.877025 32364 net.cpp:165] Memory required for data: 184730800\nI0821 08:59:38.877037 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:59:38.877061 32364 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:59:38.877074 32364 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:59:38.877091 32364 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:59:38.877452 32364 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:59:38.877472 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.877482 32364 net.cpp:165] Memory required for data: 191284400\nI0821 08:59:38.877501 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:59:38.877517 32364 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:59:38.877528 32364 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:59:38.877550 32364 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:59:38.877817 32364 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:59:38.877836 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.877846 32364 net.cpp:165] Memory required for data: 197838000\nI0821 08:59:38.877866 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:59:38.877883 32364 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:59:38.877893 32364 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:59:38.877913 32364 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:59:38.878002 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:59:38.878190 32364 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:59:38.878213 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.878223 32364 net.cpp:165] Memory required for data: 204391600\nI0821 08:59:38.878242 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:59:38.878257 32364 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:59:38.878268 32364 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:59:38.878283 32364 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:59:38.878301 32364 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:59:38.878315 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.878324 32364 net.cpp:165] Memory required for data: 210945200\nI0821 08:59:38.878334 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:59:38.878358 32364 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:59:38.878371 32364 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:59:38.878392 32364 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:59:38.878741 32364 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:59:38.878762 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.878770 32364 net.cpp:165] Memory required for data: 217498800\nI0821 08:59:38.878788 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:59:38.878818 32364 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:59:38.878830 32364 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:59:38.878856 32364 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:59:38.879163 32364 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:59:38.879184 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.879194 32364 net.cpp:165] Memory required for data: 224052400\nI0821 08:59:38.879215 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:59:38.879231 32364 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:59:38.879242 32364 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:59:38.879262 32364 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:59:38.879349 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:59:38.879519 32364 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:59:38.879542 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.879552 32364 net.cpp:165] Memory required for data: 230606000\nI0821 08:59:38.879570 32364 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:59:38.879587 32364 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:59:38.879597 32364 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:59:38.879611 32364 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:59:38.879626 32364 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:59:38.879683 32364 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:59:38.879701 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.879710 32364 net.cpp:165] Memory required for data: 237159600\nI0821 08:59:38.879720 32364 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:59:38.879739 32364 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:59:38.879751 32364 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:59:38.879765 32364 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:59:38.879782 32364 net.cpp:150] Setting up L1_b3_relu\nI0821 08:59:38.879796 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.879806 32364 net.cpp:165] Memory required for data: 243713200\nI0821 08:59:38.879815 32364 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:38.879833 32364 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:38.879845 32364 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:59:38.879860 32364 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:59:38.879881 32364 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:59:38.879956 32364 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:38.879978 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.879992 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.880002 32364 net.cpp:165] Memory required for data: 256820400\nI0821 08:59:38.880013 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:59:38.880033 32364 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:59:38.880045 32364 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:59:38.880064 32364 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:59:38.880431 32364 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:59:38.880451 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.880460 32364 net.cpp:165] Memory required for data: 263374000\nI0821 08:59:38.880478 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:59:38.880501 32364 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:59:38.880512 32364 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:59:38.880529 32364 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:59:38.880803 32364 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:59:38.880822 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.880832 32364 net.cpp:165] Memory required for data: 269927600\nI0821 08:59:38.880862 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:59:38.880879 32364 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:59:38.880890 32364 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:59:38.880910 32364 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:59:38.881006 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:59:38.881196 32364 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:59:38.881216 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.881225 32364 net.cpp:165] Memory required for data: 276481200\nI0821 08:59:38.881244 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:59:38.881260 32364 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:59:38.881271 32364 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:59:38.881284 32364 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:59:38.881304 32364 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:59:38.881319 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.881327 32364 net.cpp:165] Memory required for data: 283034800\nI0821 08:59:38.881336 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:59:38.881361 32364 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:59:38.881374 32364 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:59:38.881397 32364 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:59:38.881752 32364 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:59:38.881772 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.881781 32364 net.cpp:165] Memory required for data: 289588400\nI0821 08:59:38.881798 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:59:38.881819 32364 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:59:38.881831 32364 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:59:38.881852 32364 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:59:38.882123 32364 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:59:38.882143 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.882159 32364 net.cpp:165] Memory required for data: 296142000\nI0821 08:59:38.882181 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:59:38.882203 32364 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:59:38.882215 32364 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:59:38.882231 32364 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:59:38.882316 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:59:38.882488 32364 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:59:38.882506 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.882516 32364 net.cpp:165] Memory required for data: 302695600\nI0821 08:59:38.882534 32364 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:59:38.882550 32364 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:59:38.882561 32364 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:59:38.882575 32364 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:59:38.882596 32364 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:59:38.882647 32364 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:59:38.882673 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.882683 32364 net.cpp:165] Memory required for data: 309249200\nI0821 08:59:38.882692 32364 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:59:38.882707 32364 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:59:38.882719 32364 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:59:38.882732 32364 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:59:38.882750 32364 net.cpp:150] Setting up L1_b4_relu\nI0821 08:59:38.882766 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.882774 32364 net.cpp:165] Memory required for data: 315802800\nI0821 08:59:38.882784 32364 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:38.882810 32364 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:38.882822 32364 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:59:38.882838 32364 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:59:38.882858 32364 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:59:38.882936 32364 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:38.882958 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.882972 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.882982 32364 net.cpp:165] Memory required for data: 328910000\nI0821 08:59:38.882992 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:59:38.883011 32364 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:59:38.883023 32364 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:59:38.883045 32364 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:59:38.883410 32364 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:59:38.883430 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.883440 32364 net.cpp:165] Memory required for data: 335463600\nI0821 08:59:38.883474 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:59:38.883496 32364 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:59:38.883508 32364 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:59:38.883529 32364 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:59:38.883811 32364 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:59:38.883831 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.883841 32364 net.cpp:165] Memory required for data: 342017200\nI0821 08:59:38.883862 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:59:38.883883 32364 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:59:38.883895 32364 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:59:38.883911 32364 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:59:38.883996 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:59:38.884176 32364 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:59:38.884194 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.884204 32364 net.cpp:165] Memory required for data: 348570800\nI0821 08:59:38.884222 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:59:38.884238 32364 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:59:38.884248 32364 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:59:38.884268 32364 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:59:38.884287 32364 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:59:38.884301 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.884311 32364 net.cpp:165] Memory required for data: 355124400\nI0821 08:59:38.884321 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:59:38.884346 32364 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:59:38.884359 32364 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:59:38.884377 32364 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:59:38.884734 32364 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:59:38.884754 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.884763 32364 net.cpp:165] Memory required for data: 361678000\nI0821 08:59:38.884781 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:59:38.884802 32364 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:59:38.884814 32364 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:59:38.884830 32364 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:59:38.885104 32364 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:59:38.885123 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.885133 32364 net.cpp:165] Memory required for data: 368231600\nI0821 08:59:38.885169 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:59:38.885187 32364 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:59:38.885200 32364 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:59:38.885220 32364 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:59:38.885313 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:59:38.885488 32364 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:59:38.885512 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.885524 32364 net.cpp:165] Memory required for data: 374785200\nI0821 08:59:38.885540 32364 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:59:38.885557 32364 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:59:38.885570 32364 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:59:38.885582 32364 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:59:38.885598 32364 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:59:38.885653 32364 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:59:38.885670 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.885680 32364 net.cpp:165] Memory required for data: 381338800\nI0821 08:59:38.885689 32364 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:59:38.885709 32364 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:59:38.885720 32364 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:59:38.885735 32364 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:59:38.885753 32364 net.cpp:150] Setting up L1_b5_relu\nI0821 08:59:38.885768 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.885776 32364 net.cpp:165] Memory required for data: 387892400\nI0821 08:59:38.885787 32364 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:38.885805 32364 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:38.885818 32364 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:59:38.885833 32364 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:59:38.885851 32364 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:59:38.885926 32364 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:38.885949 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.885962 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.885972 32364 net.cpp:165] Memory required for data: 400999600\nI0821 08:59:38.885983 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:59:38.886003 32364 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:59:38.886015 32364 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:59:38.886034 32364 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:59:38.886400 32364 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:59:38.886420 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.886430 32364 net.cpp:165] Memory required for data: 407553200\nI0821 08:59:38.886448 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:59:38.886469 32364 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:59:38.886482 32364 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:59:38.886498 32364 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:59:38.886768 32364 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:59:38.886787 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.886797 32364 net.cpp:165] Memory required for data: 414106800\nI0821 08:59:38.886819 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:59:38.886835 32364 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:59:38.886847 32364 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:59:38.886867 32364 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:59:38.886970 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:59:38.887161 32364 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:59:38.887181 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.887190 32364 net.cpp:165] Memory required for data: 420660400\nI0821 08:59:38.887209 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:59:38.887225 32364 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:59:38.887236 32364 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:59:38.887250 32364 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:59:38.887270 32364 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:59:38.887284 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.887295 32364 net.cpp:165] Memory required for data: 427214000\nI0821 08:59:38.887303 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:59:38.887328 32364 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:59:38.887342 32364 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:59:38.887364 32364 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:59:38.887720 32364 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:59:38.887740 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.887749 32364 net.cpp:165] Memory required for data: 433767600\nI0821 08:59:38.887768 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:59:38.887789 32364 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:59:38.887801 32364 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:59:38.887822 32364 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:59:38.888092 32364 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:59:38.888110 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.888120 32364 net.cpp:165] Memory required for data: 440321200\nI0821 08:59:38.888142 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:59:38.888165 32364 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:59:38.888178 32364 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:59:38.888198 32364 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:59:38.888289 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:59:38.888464 32364 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:59:38.888486 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.888497 32364 net.cpp:165] Memory required for data: 446874800\nI0821 08:59:38.888515 32364 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:59:38.888542 32364 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:59:38.888556 32364 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:59:38.888569 32364 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:59:38.888586 32364 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:59:38.888643 32364 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:59:38.888661 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.888671 32364 net.cpp:165] Memory required for data: 453428400\nI0821 08:59:38.888682 32364 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:59:38.888697 32364 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:59:38.888708 32364 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:59:38.888726 32364 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:59:38.888746 32364 net.cpp:150] Setting up L1_b6_relu\nI0821 08:59:38.888761 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.888769 32364 net.cpp:165] Memory required for data: 459982000\nI0821 08:59:38.888779 32364 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:38.888793 32364 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:38.888804 32364 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:59:38.888819 32364 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:59:38.888850 32364 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:59:38.888933 32364 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:38.888953 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.888967 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.888977 32364 net.cpp:165] Memory required for data: 473089200\nI0821 08:59:38.888988 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:59:38.889008 32364 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:59:38.889019 32364 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:59:38.889042 32364 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:59:38.889413 32364 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:59:38.889434 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.889443 32364 net.cpp:165] Memory required for data: 479642800\nI0821 08:59:38.889461 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:59:38.889478 32364 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:59:38.889490 32364 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:59:38.889511 32364 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:59:38.889783 32364 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:59:38.889802 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.889812 32364 net.cpp:165] Memory required for data: 486196400\nI0821 08:59:38.889833 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:59:38.889854 32364 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:59:38.889866 32364 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:59:38.889883 32364 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:59:38.889972 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:59:38.890153 32364 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:59:38.890172 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.890182 32364 net.cpp:165] Memory required for data: 492750000\nI0821 08:59:38.890199 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:59:38.890219 32364 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:59:38.890230 32364 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:59:38.890245 32364 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:59:38.890264 32364 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:59:38.890280 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.890288 32364 net.cpp:165] Memory required for data: 499303600\nI0821 08:59:38.890298 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:59:38.890326 32364 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:59:38.890339 32364 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:59:38.890362 32364 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:59:38.890713 32364 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:59:38.890733 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.890743 32364 net.cpp:165] Memory required for data: 505857200\nI0821 08:59:38.890760 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:59:38.890777 32364 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:59:38.890789 32364 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:59:38.890805 32364 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:59:38.891077 32364 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:59:38.891095 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.891104 32364 net.cpp:165] Memory required for data: 512410800\nI0821 08:59:38.891125 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:59:38.891151 32364 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:59:38.891165 32364 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:59:38.891181 32364 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:59:38.891291 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:59:38.891464 32364 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:59:38.891484 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.891492 32364 net.cpp:165] Memory required for data: 518964400\nI0821 08:59:38.891510 32364 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:59:38.891527 32364 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:59:38.891540 32364 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:59:38.891552 32364 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:59:38.891573 32364 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:59:38.891630 32364 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:59:38.891649 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.891659 32364 net.cpp:165] Memory required for data: 525518000\nI0821 08:59:38.891669 32364 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:59:38.891682 32364 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:59:38.891695 32364 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:59:38.891713 32364 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:59:38.891733 32364 net.cpp:150] Setting up L1_b7_relu\nI0821 08:59:38.891747 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.891757 32364 net.cpp:165] Memory required for data: 532071600\nI0821 08:59:38.891767 32364 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:38.891780 32364 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:38.891790 32364 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:59:38.891805 32364 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:59:38.891825 32364 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:59:38.891904 32364 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:38.891923 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.891937 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.891947 32364 net.cpp:165] Memory required for data: 545178800\nI0821 08:59:38.891957 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:59:38.891976 32364 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:59:38.891988 32364 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:59:38.892011 32364 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:59:38.892392 32364 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:59:38.892413 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.892422 32364 net.cpp:165] Memory required for data: 551732400\nI0821 08:59:38.892441 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:59:38.892457 32364 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:59:38.892468 32364 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:59:38.892493 32364 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:59:38.892769 32364 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:59:38.892788 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.892798 32364 net.cpp:165] Memory required for data: 558286000\nI0821 08:59:38.892819 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:59:38.892839 32364 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:59:38.892851 32364 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:59:38.892868 32364 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:59:38.892954 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:59:38.893136 32364 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:59:38.893162 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.893172 32364 net.cpp:165] Memory required for data: 564839600\nI0821 08:59:38.893190 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:59:38.893219 32364 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:59:38.893232 32364 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:59:38.893252 32364 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:59:38.893272 32364 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:59:38.893287 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.893296 32364 net.cpp:165] Memory required for data: 571393200\nI0821 08:59:38.893306 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:59:38.893332 32364 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:59:38.893345 32364 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:59:38.893362 32364 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:59:38.893721 32364 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:59:38.893740 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.893749 32364 net.cpp:165] Memory required for data: 577946800\nI0821 08:59:38.893767 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:59:38.893788 32364 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:59:38.893800 32364 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:59:38.893818 32364 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:59:38.894094 32364 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:59:38.894112 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.894122 32364 net.cpp:165] Memory required for data: 584500400\nI0821 08:59:38.894145 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:59:38.894173 32364 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:59:38.894186 32364 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:59:38.894203 32364 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:59:38.894295 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:59:38.894472 32364 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:59:38.894490 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.894500 32364 net.cpp:165] Memory required for data: 591054000\nI0821 08:59:38.894520 32364 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:59:38.894536 32364 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:59:38.894546 32364 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:59:38.894560 32364 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:59:38.894580 32364 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:59:38.894634 32364 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:59:38.894657 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.894668 32364 net.cpp:165] Memory required for data: 597607600\nI0821 08:59:38.894678 32364 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:59:38.894692 32364 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:59:38.894704 32364 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:59:38.894718 32364 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:59:38.894735 32364 net.cpp:150] Setting up L1_b8_relu\nI0821 08:59:38.894752 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.894760 32364 net.cpp:165] Memory required for data: 604161200\nI0821 08:59:38.894769 32364 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:38.894788 32364 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:38.894799 32364 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:59:38.894814 32364 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:59:38.894834 32364 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:59:38.894915 32364 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:38.894934 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.894956 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.894968 32364 net.cpp:165] Memory required for data: 617268400\nI0821 08:59:38.894978 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:59:38.894999 32364 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:59:38.895009 32364 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:59:38.895031 32364 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:59:38.895397 32364 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:59:38.895417 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.895427 32364 net.cpp:165] Memory required for data: 623822000\nI0821 08:59:38.895445 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:59:38.895473 32364 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:59:38.895488 32364 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:59:38.895505 32364 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:59:38.895781 32364 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:59:38.895799 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.895809 32364 net.cpp:165] Memory required for data: 630375600\nI0821 08:59:38.895829 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:59:38.895846 32364 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:59:38.895858 32364 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:59:38.895872 32364 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:59:38.895967 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:59:38.896152 32364 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:59:38.896170 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.896180 32364 net.cpp:165] Memory required for data: 636929200\nI0821 08:59:38.896198 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:59:38.896214 32364 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:59:38.896226 32364 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:59:38.896246 32364 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:59:38.896266 32364 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:59:38.896281 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.896291 32364 net.cpp:165] Memory required for data: 643482800\nI0821 08:59:38.896299 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:59:38.896323 32364 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:59:38.896337 32364 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:59:38.896353 32364 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:59:38.896715 32364 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:59:38.896735 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.896744 32364 net.cpp:165] Memory required for data: 650036400\nI0821 08:59:38.896762 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:59:38.896780 32364 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:59:38.896791 32364 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:59:38.896812 32364 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:59:38.897086 32364 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:59:38.897109 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.897119 32364 net.cpp:165] Memory required for data: 656590000\nI0821 08:59:38.897181 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:59:38.897200 32364 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:59:38.897212 32364 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:59:38.897227 32364 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:59:38.897313 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:59:38.897487 32364 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:59:38.897506 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.897516 32364 net.cpp:165] Memory required for data: 663143600\nI0821 08:59:38.897543 32364 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:59:38.897560 32364 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:59:38.897572 32364 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:59:38.897590 32364 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:59:38.897608 32364 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:59:38.897661 32364 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:59:38.897680 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.897688 32364 net.cpp:165] Memory required for data: 669697200\nI0821 08:59:38.897699 32364 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:59:38.897713 32364 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:59:38.897723 32364 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:59:38.897742 32364 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:59:38.897763 32364 net.cpp:150] Setting up L1_b9_relu\nI0821 08:59:38.897778 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.897788 32364 net.cpp:165] Memory required for data: 676250800\nI0821 08:59:38.897799 32364 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:38.897811 32364 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:38.897822 32364 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:59:38.897845 32364 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:59:38.897864 32364 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:59:38.897945 32364 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:38.897966 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.897980 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.897990 32364 net.cpp:165] Memory required for data: 689358000\nI0821 08:59:38.898001 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_conv\nI0821 08:59:38.898021 32364 net.cpp:100] Creating Layer L1_b10_cbr1_conv\nI0821 08:59:38.898035 32364 net.cpp:434] L1_b10_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:59:38.898057 32364 net.cpp:408] L1_b10_cbr1_conv -> L1_b10_cbr1_conv_top\nI0821 08:59:38.898422 32364 net.cpp:150] Setting up L1_b10_cbr1_conv\nI0821 08:59:38.898440 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.898450 32364 net.cpp:165] Memory required for data: 695911600\nI0821 08:59:38.898468 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_bn\nI0821 08:59:38.898484 32364 net.cpp:100] Creating Layer L1_b10_cbr1_bn\nI0821 08:59:38.898496 32364 net.cpp:434] L1_b10_cbr1_bn <- L1_b10_cbr1_conv_top\nI0821 08:59:38.898516 32364 net.cpp:408] L1_b10_cbr1_bn -> L1_b10_cbr1_bn_top\nI0821 08:59:38.898792 32364 net.cpp:150] Setting up L1_b10_cbr1_bn\nI0821 08:59:38.898811 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.898821 32364 net.cpp:165] Memory required for data: 702465200\nI0821 08:59:38.898841 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0821 08:59:38.898864 32364 net.cpp:100] Creating Layer L1_b10_cbr1_scale\nI0821 08:59:38.898876 32364 net.cpp:434] L1_b10_cbr1_scale <- L1_b10_cbr1_bn_top\nI0821 08:59:38.898892 32364 net.cpp:395] L1_b10_cbr1_scale -> L1_b10_cbr1_bn_top (in-place)\nI0821 08:59:38.898982 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0821 08:59:38.899163 32364 net.cpp:150] Setting up L1_b10_cbr1_scale\nI0821 08:59:38.899181 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.899191 32364 net.cpp:165] Memory required for data: 709018800\nI0821 08:59:38.899210 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_relu\nI0821 08:59:38.899233 32364 net.cpp:100] Creating Layer L1_b10_cbr1_relu\nI0821 08:59:38.899246 32364 net.cpp:434] L1_b10_cbr1_relu <- L1_b10_cbr1_bn_top\nI0821 08:59:38.899261 32364 net.cpp:395] L1_b10_cbr1_relu -> L1_b10_cbr1_bn_top (in-place)\nI0821 08:59:38.899289 32364 net.cpp:150] Setting up L1_b10_cbr1_relu\nI0821 08:59:38.899304 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.899313 32364 net.cpp:165] Memory required for data: 715572400\nI0821 08:59:38.899323 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr2_conv\nI0821 08:59:38.899349 32364 net.cpp:100] Creating Layer L1_b10_cbr2_conv\nI0821 08:59:38.899361 32364 net.cpp:434] L1_b10_cbr2_conv <- L1_b10_cbr1_bn_top\nI0821 08:59:38.899384 32364 net.cpp:408] L1_b10_cbr2_conv -> L1_b10_cbr2_conv_top\nI0821 08:59:38.899731 32364 net.cpp:150] Setting up L1_b10_cbr2_conv\nI0821 08:59:38.899751 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.899760 32364 net.cpp:165] Memory required for data: 722126000\nI0821 08:59:38.899778 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr2_bn\nI0821 08:59:38.899796 32364 net.cpp:100] Creating Layer L1_b10_cbr2_bn\nI0821 08:59:38.899806 32364 net.cpp:434] L1_b10_cbr2_bn <- L1_b10_cbr2_conv_top\nI0821 08:59:38.899822 32364 net.cpp:408] L1_b10_cbr2_bn -> L1_b10_cbr2_bn_top\nI0821 08:59:38.900110 32364 net.cpp:150] Setting up L1_b10_cbr2_bn\nI0821 08:59:38.900130 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.900140 32364 net.cpp:165] Memory required for data: 728679600\nI0821 08:59:38.900167 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0821 08:59:38.900189 32364 net.cpp:100] Creating Layer L1_b10_cbr2_scale\nI0821 08:59:38.900202 32364 net.cpp:434] L1_b10_cbr2_scale <- L1_b10_cbr2_bn_top\nI0821 08:59:38.900218 32364 net.cpp:395] L1_b10_cbr2_scale -> L1_b10_cbr2_bn_top (in-place)\nI0821 08:59:38.900317 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0821 08:59:38.900496 32364 net.cpp:150] Setting up L1_b10_cbr2_scale\nI0821 08:59:38.900514 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.900523 32364 net.cpp:165] Memory required for data: 735233200\nI0821 08:59:38.900542 32364 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise\nI0821 08:59:38.900559 32364 net.cpp:100] Creating Layer L1_b10_sum_eltwise\nI0821 08:59:38.900570 32364 net.cpp:434] L1_b10_sum_eltwise <- L1_b10_cbr2_bn_top\nI0821 08:59:38.900583 32364 net.cpp:434] L1_b10_sum_eltwise <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:59:38.900606 32364 net.cpp:408] L1_b10_sum_eltwise -> L1_b10_sum_eltwise_top\nI0821 08:59:38.900665 32364 net.cpp:150] Setting up L1_b10_sum_eltwise\nI0821 08:59:38.900683 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.900693 32364 net.cpp:165] Memory required for data: 741786800\nI0821 08:59:38.900702 32364 layer_factory.hpp:77] Creating layer L1_b10_relu\nI0821 08:59:38.900717 32364 net.cpp:100] Creating Layer L1_b10_relu\nI0821 08:59:38.900729 32364 net.cpp:434] L1_b10_relu <- L1_b10_sum_eltwise_top\nI0821 08:59:38.900748 32364 net.cpp:395] L1_b10_relu -> L1_b10_sum_eltwise_top (in-place)\nI0821 08:59:38.900766 32364 net.cpp:150] Setting up L1_b10_relu\nI0821 08:59:38.900780 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.900789 32364 net.cpp:165] Memory required for data: 748340400\nI0821 08:59:38.900799 32364 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:38.900812 32364 net.cpp:100] Creating Layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:38.900822 32364 net.cpp:434] L1_b10_sum_eltwise_top_L1_b10_relu_0_split <- L1_b10_sum_eltwise_top\nI0821 08:59:38.900836 32364 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0821 08:59:38.900856 32364 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0821 08:59:38.900935 32364 net.cpp:150] Setting up L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:38.900954 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.900967 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.900976 32364 net.cpp:165] Memory required for data: 761447600\nI0821 08:59:38.900986 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_conv\nI0821 08:59:38.901015 32364 net.cpp:100] Creating Layer L1_b11_cbr1_conv\nI0821 08:59:38.901028 32364 net.cpp:434] L1_b11_cbr1_conv <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0821 08:59:38.901052 32364 net.cpp:408] L1_b11_cbr1_conv -> L1_b11_cbr1_conv_top\nI0821 08:59:38.901413 32364 net.cpp:150] Setting up L1_b11_cbr1_conv\nI0821 08:59:38.901433 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.901443 32364 net.cpp:165] Memory required for data: 768001200\nI0821 08:59:38.901463 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_bn\nI0821 08:59:38.901479 32364 net.cpp:100] Creating Layer L1_b11_cbr1_bn\nI0821 08:59:38.901491 32364 net.cpp:434] L1_b11_cbr1_bn <- L1_b11_cbr1_conv_top\nI0821 08:59:38.901515 32364 net.cpp:408] L1_b11_cbr1_bn -> L1_b11_cbr1_bn_top\nI0821 08:59:38.901798 32364 net.cpp:150] Setting up L1_b11_cbr1_bn\nI0821 08:59:38.901816 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.901825 32364 net.cpp:165] Memory required for data: 774554800\nI0821 08:59:38.901849 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0821 08:59:38.901868 32364 net.cpp:100] Creating Layer L1_b11_cbr1_scale\nI0821 08:59:38.901881 32364 net.cpp:434] L1_b11_cbr1_scale <- L1_b11_cbr1_bn_top\nI0821 08:59:38.901896 32364 net.cpp:395] L1_b11_cbr1_scale -> L1_b11_cbr1_bn_top (in-place)\nI0821 08:59:38.901985 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0821 08:59:38.902169 32364 net.cpp:150] Setting up L1_b11_cbr1_scale\nI0821 08:59:38.902189 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.902197 32364 net.cpp:165] Memory required for data: 781108400\nI0821 08:59:38.902216 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_relu\nI0821 08:59:38.902231 32364 net.cpp:100] Creating Layer L1_b11_cbr1_relu\nI0821 08:59:38.902242 32364 net.cpp:434] L1_b11_cbr1_relu <- L1_b11_cbr1_bn_top\nI0821 08:59:38.902261 32364 net.cpp:395] L1_b11_cbr1_relu -> L1_b11_cbr1_bn_top (in-place)\nI0821 08:59:38.902281 32364 net.cpp:150] Setting up L1_b11_cbr1_relu\nI0821 08:59:38.902295 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.902305 32364 net.cpp:165] Memory required for data: 787662000\nI0821 08:59:38.902314 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr2_conv\nI0821 08:59:38.902339 32364 net.cpp:100] Creating Layer L1_b11_cbr2_conv\nI0821 08:59:38.902351 32364 net.cpp:434] L1_b11_cbr2_conv <- L1_b11_cbr1_bn_top\nI0821 08:59:38.902369 32364 net.cpp:408] L1_b11_cbr2_conv -> L1_b11_cbr2_conv_top\nI0821 08:59:38.902736 32364 net.cpp:150] Setting up L1_b11_cbr2_conv\nI0821 08:59:38.902755 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.902765 32364 net.cpp:165] Memory required for data: 794215600\nI0821 08:59:38.902783 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr2_bn\nI0821 08:59:38.902807 32364 net.cpp:100] Creating Layer L1_b11_cbr2_bn\nI0821 08:59:38.902820 32364 net.cpp:434] L1_b11_cbr2_bn <- L1_b11_cbr2_conv_top\nI0821 08:59:38.902837 32364 net.cpp:408] L1_b11_cbr2_bn -> L1_b11_cbr2_bn_top\nI0821 08:59:38.903116 32364 net.cpp:150] Setting up L1_b11_cbr2_bn\nI0821 08:59:38.903136 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.903151 32364 net.cpp:165] Memory required for data: 800769200\nI0821 08:59:38.903173 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0821 08:59:38.903194 32364 net.cpp:100] Creating Layer L1_b11_cbr2_scale\nI0821 08:59:38.903208 32364 net.cpp:434] L1_b11_cbr2_scale <- L1_b11_cbr2_bn_top\nI0821 08:59:38.903223 32364 net.cpp:395] L1_b11_cbr2_scale -> L1_b11_cbr2_bn_top (in-place)\nI0821 08:59:38.903308 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0821 08:59:38.903488 32364 net.cpp:150] Setting up L1_b11_cbr2_scale\nI0821 08:59:38.903507 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.903517 32364 net.cpp:165] Memory required for data: 807322800\nI0821 08:59:38.903534 32364 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise\nI0821 08:59:38.903551 32364 net.cpp:100] Creating Layer L1_b11_sum_eltwise\nI0821 08:59:38.903563 32364 net.cpp:434] L1_b11_sum_eltwise <- L1_b11_cbr2_bn_top\nI0821 08:59:38.903583 32364 net.cpp:434] L1_b11_sum_eltwise <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0821 08:59:38.903604 32364 net.cpp:408] L1_b11_sum_eltwise -> L1_b11_sum_eltwise_top\nI0821 08:59:38.903659 32364 net.cpp:150] Setting up L1_b11_sum_eltwise\nI0821 08:59:38.903683 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.903693 32364 net.cpp:165] Memory required for data: 813876400\nI0821 08:59:38.903703 32364 layer_factory.hpp:77] Creating layer L1_b11_relu\nI0821 08:59:38.903718 32364 net.cpp:100] Creating Layer L1_b11_relu\nI0821 08:59:38.903729 32364 net.cpp:434] L1_b11_relu <- L1_b11_sum_eltwise_top\nI0821 08:59:38.903743 32364 net.cpp:395] L1_b11_relu -> L1_b11_sum_eltwise_top (in-place)\nI0821 08:59:38.903762 32364 net.cpp:150] Setting up L1_b11_relu\nI0821 08:59:38.903776 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.903786 32364 net.cpp:165] Memory required for data: 820430000\nI0821 08:59:38.903796 32364 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:38.903813 32364 net.cpp:100] Creating Layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:38.903825 32364 net.cpp:434] L1_b11_sum_eltwise_top_L1_b11_relu_0_split <- L1_b11_sum_eltwise_top\nI0821 08:59:38.903841 32364 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0821 08:59:38.903861 32364 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0821 08:59:38.903940 32364 net.cpp:150] Setting up L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:38.903961 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.903975 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.903985 32364 net.cpp:165] Memory required for data: 833537200\nI0821 08:59:38.903995 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_conv\nI0821 08:59:38.904016 32364 net.cpp:100] Creating Layer L1_b12_cbr1_conv\nI0821 08:59:38.904029 32364 net.cpp:434] L1_b12_cbr1_conv <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0821 08:59:38.904052 32364 net.cpp:408] L1_b12_cbr1_conv -> L1_b12_cbr1_conv_top\nI0821 08:59:38.904422 32364 net.cpp:150] Setting up L1_b12_cbr1_conv\nI0821 08:59:38.904441 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.904451 32364 net.cpp:165] Memory required for data: 840090800\nI0821 08:59:38.904469 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_bn\nI0821 08:59:38.904486 32364 net.cpp:100] Creating Layer L1_b12_cbr1_bn\nI0821 08:59:38.904497 32364 net.cpp:434] L1_b12_cbr1_bn <- L1_b12_cbr1_conv_top\nI0821 08:59:38.904513 32364 net.cpp:408] L1_b12_cbr1_bn -> L1_b12_cbr1_bn_top\nI0821 08:59:38.904794 32364 net.cpp:150] Setting up L1_b12_cbr1_bn\nI0821 08:59:38.904814 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.904822 32364 net.cpp:165] Memory required for data: 846644400\nI0821 08:59:38.904844 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0821 08:59:38.904865 32364 net.cpp:100] Creating Layer L1_b12_cbr1_scale\nI0821 08:59:38.904877 32364 net.cpp:434] L1_b12_cbr1_scale <- L1_b12_cbr1_bn_top\nI0821 08:59:38.904894 32364 net.cpp:395] L1_b12_cbr1_scale -> L1_b12_cbr1_bn_top (in-place)\nI0821 08:59:38.904989 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0821 08:59:38.905175 32364 net.cpp:150] Setting up L1_b12_cbr1_scale\nI0821 08:59:38.905194 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.905205 32364 net.cpp:165] Memory required for data: 853198000\nI0821 08:59:38.905222 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_relu\nI0821 08:59:38.905237 32364 net.cpp:100] Creating Layer L1_b12_cbr1_relu\nI0821 08:59:38.905249 32364 net.cpp:434] L1_b12_cbr1_relu <- L1_b12_cbr1_bn_top\nI0821 08:59:38.905268 32364 net.cpp:395] L1_b12_cbr1_relu -> L1_b12_cbr1_bn_top (in-place)\nI0821 08:59:38.905288 32364 net.cpp:150] Setting up L1_b12_cbr1_relu\nI0821 08:59:38.905303 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.905321 32364 net.cpp:165] Memory required for data: 859751600\nI0821 08:59:38.905333 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr2_conv\nI0821 08:59:38.905360 32364 net.cpp:100] Creating Layer L1_b12_cbr2_conv\nI0821 08:59:38.905375 32364 net.cpp:434] L1_b12_cbr2_conv <- L1_b12_cbr1_bn_top\nI0821 08:59:38.905391 32364 net.cpp:408] L1_b12_cbr2_conv -> L1_b12_cbr2_conv_top\nI0821 08:59:38.905757 32364 net.cpp:150] Setting up L1_b12_cbr2_conv\nI0821 08:59:38.905777 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.905786 32364 net.cpp:165] Memory required for data: 866305200\nI0821 08:59:38.905804 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr2_bn\nI0821 08:59:38.905825 32364 net.cpp:100] Creating Layer L1_b12_cbr2_bn\nI0821 08:59:38.905838 32364 net.cpp:434] L1_b12_cbr2_bn <- L1_b12_cbr2_conv_top\nI0821 08:59:38.905853 32364 net.cpp:408] L1_b12_cbr2_bn -> L1_b12_cbr2_bn_top\nI0821 08:59:38.906131 32364 net.cpp:150] Setting up L1_b12_cbr2_bn\nI0821 08:59:38.906157 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.906167 32364 net.cpp:165] Memory required for data: 872858800\nI0821 08:59:38.906188 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0821 08:59:38.906204 32364 net.cpp:100] Creating Layer L1_b12_cbr2_scale\nI0821 08:59:38.906216 32364 net.cpp:434] L1_b12_cbr2_scale <- L1_b12_cbr2_bn_top\nI0821 08:59:38.906236 32364 net.cpp:395] L1_b12_cbr2_scale -> L1_b12_cbr2_bn_top (in-place)\nI0821 08:59:38.906322 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0821 08:59:38.906502 32364 net.cpp:150] Setting up L1_b12_cbr2_scale\nI0821 08:59:38.906519 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.906529 32364 net.cpp:165] Memory required for data: 879412400\nI0821 08:59:38.906548 32364 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise\nI0821 08:59:38.906564 32364 net.cpp:100] Creating Layer L1_b12_sum_eltwise\nI0821 08:59:38.906575 32364 net.cpp:434] L1_b12_sum_eltwise <- L1_b12_cbr2_bn_top\nI0821 08:59:38.906589 32364 net.cpp:434] L1_b12_sum_eltwise <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0821 08:59:38.906610 32364 net.cpp:408] L1_b12_sum_eltwise -> L1_b12_sum_eltwise_top\nI0821 08:59:38.906663 32364 net.cpp:150] Setting up L1_b12_sum_eltwise\nI0821 08:59:38.906687 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.906697 32364 net.cpp:165] Memory required for data: 885966000\nI0821 08:59:38.906707 32364 layer_factory.hpp:77] Creating layer L1_b12_relu\nI0821 08:59:38.906720 32364 net.cpp:100] Creating Layer L1_b12_relu\nI0821 08:59:38.906733 32364 net.cpp:434] L1_b12_relu <- L1_b12_sum_eltwise_top\nI0821 08:59:38.906746 32364 net.cpp:395] L1_b12_relu -> L1_b12_sum_eltwise_top (in-place)\nI0821 08:59:38.906764 32364 net.cpp:150] Setting up L1_b12_relu\nI0821 08:59:38.906780 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.906788 32364 net.cpp:165] Memory required for data: 892519600\nI0821 08:59:38.906798 32364 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:38.906817 32364 net.cpp:100] Creating Layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:38.906828 32364 net.cpp:434] L1_b12_sum_eltwise_top_L1_b12_relu_0_split <- L1_b12_sum_eltwise_top\nI0821 08:59:38.906843 32364 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0821 08:59:38.906863 32364 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0821 08:59:38.906940 32364 net.cpp:150] Setting up L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:38.906963 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.906978 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.906988 32364 net.cpp:165] Memory required for data: 905626800\nI0821 08:59:38.906999 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_conv\nI0821 08:59:38.907019 32364 net.cpp:100] Creating Layer L1_b13_cbr1_conv\nI0821 08:59:38.907032 32364 net.cpp:434] L1_b13_cbr1_conv <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0821 08:59:38.907058 32364 net.cpp:408] L1_b13_cbr1_conv -> L1_b13_cbr1_conv_top\nI0821 08:59:38.907440 32364 net.cpp:150] Setting up L1_b13_cbr1_conv\nI0821 08:59:38.907460 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.907469 32364 net.cpp:165] Memory required for data: 912180400\nI0821 08:59:38.907487 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_bn\nI0821 08:59:38.907531 32364 net.cpp:100] Creating Layer L1_b13_cbr1_bn\nI0821 08:59:38.907546 32364 net.cpp:434] L1_b13_cbr1_bn <- L1_b13_cbr1_conv_top\nI0821 08:59:38.907563 32364 net.cpp:408] L1_b13_cbr1_bn -> L1_b13_cbr1_bn_top\nI0821 08:59:38.907851 32364 net.cpp:150] Setting up L1_b13_cbr1_bn\nI0821 08:59:38.907871 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.907879 32364 net.cpp:165] Memory required for data: 918734000\nI0821 08:59:38.907902 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0821 08:59:38.907920 32364 net.cpp:100] Creating Layer L1_b13_cbr1_scale\nI0821 08:59:38.907932 32364 net.cpp:434] L1_b13_cbr1_scale <- L1_b13_cbr1_bn_top\nI0821 08:59:38.907951 32364 net.cpp:395] L1_b13_cbr1_scale -> L1_b13_cbr1_bn_top (in-place)\nI0821 08:59:38.908042 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0821 08:59:38.908226 32364 net.cpp:150] Setting up L1_b13_cbr1_scale\nI0821 08:59:38.908244 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.908254 32364 net.cpp:165] Memory required for data: 925287600\nI0821 08:59:38.908272 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_relu\nI0821 08:59:38.908291 32364 net.cpp:100] Creating Layer L1_b13_cbr1_relu\nI0821 08:59:38.908303 32364 net.cpp:434] L1_b13_cbr1_relu <- L1_b13_cbr1_bn_top\nI0821 08:59:38.908318 32364 net.cpp:395] L1_b13_cbr1_relu -> L1_b13_cbr1_bn_top (in-place)\nI0821 08:59:38.908336 32364 net.cpp:150] Setting up L1_b13_cbr1_relu\nI0821 08:59:38.908350 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.908360 32364 net.cpp:165] Memory required for data: 931841200\nI0821 08:59:38.908370 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr2_conv\nI0821 08:59:38.908394 32364 net.cpp:100] Creating Layer L1_b13_cbr2_conv\nI0821 08:59:38.908407 32364 net.cpp:434] L1_b13_cbr2_conv <- L1_b13_cbr1_bn_top\nI0821 08:59:38.908432 32364 net.cpp:408] L1_b13_cbr2_conv -> L1_b13_cbr2_conv_top\nI0821 08:59:38.908794 32364 net.cpp:150] Setting up L1_b13_cbr2_conv\nI0821 08:59:38.908814 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.908823 32364 net.cpp:165] Memory required for data: 938394800\nI0821 08:59:38.908841 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr2_bn\nI0821 08:59:38.908862 32364 net.cpp:100] Creating Layer L1_b13_cbr2_bn\nI0821 08:59:38.908874 32364 net.cpp:434] L1_b13_cbr2_bn <- L1_b13_cbr2_conv_top\nI0821 08:59:38.908895 32364 net.cpp:408] L1_b13_cbr2_bn -> L1_b13_cbr2_bn_top\nI0821 08:59:38.909180 32364 net.cpp:150] Setting up L1_b13_cbr2_bn\nI0821 08:59:38.909199 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.909209 32364 net.cpp:165] Memory required for data: 944948400\nI0821 08:59:38.909230 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0821 08:59:38.909247 32364 net.cpp:100] Creating Layer L1_b13_cbr2_scale\nI0821 08:59:38.909258 32364 net.cpp:434] L1_b13_cbr2_scale <- L1_b13_cbr2_bn_top\nI0821 08:59:38.909274 32364 net.cpp:395] L1_b13_cbr2_scale -> L1_b13_cbr2_bn_top (in-place)\nI0821 08:59:38.909368 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0821 08:59:38.909548 32364 net.cpp:150] Setting up L1_b13_cbr2_scale\nI0821 08:59:38.909566 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.909576 32364 net.cpp:165] Memory required for data: 951502000\nI0821 08:59:38.909595 32364 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise\nI0821 08:59:38.909617 32364 net.cpp:100] Creating Layer L1_b13_sum_eltwise\nI0821 08:59:38.909629 32364 net.cpp:434] L1_b13_sum_eltwise <- L1_b13_cbr2_bn_top\nI0821 08:59:38.909642 32364 net.cpp:434] L1_b13_sum_eltwise <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0821 08:59:38.909668 32364 net.cpp:408] L1_b13_sum_eltwise -> L1_b13_sum_eltwise_top\nI0821 08:59:38.909726 32364 net.cpp:150] Setting up L1_b13_sum_eltwise\nI0821 08:59:38.909744 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.909754 32364 net.cpp:165] Memory required for data: 958055600\nI0821 08:59:38.909765 32364 layer_factory.hpp:77] Creating layer L1_b13_relu\nI0821 08:59:38.909778 32364 net.cpp:100] Creating Layer L1_b13_relu\nI0821 08:59:38.909790 32364 net.cpp:434] L1_b13_relu <- L1_b13_sum_eltwise_top\nI0821 08:59:38.909809 32364 net.cpp:395] L1_b13_relu -> L1_b13_sum_eltwise_top (in-place)\nI0821 08:59:38.909828 32364 net.cpp:150] Setting up L1_b13_relu\nI0821 08:59:38.909843 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.909852 32364 net.cpp:165] Memory required for data: 964609200\nI0821 08:59:38.909862 32364 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:38.909876 32364 net.cpp:100] Creating Layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:38.909888 32364 net.cpp:434] L1_b13_sum_eltwise_top_L1_b13_relu_0_split <- L1_b13_sum_eltwise_top\nI0821 08:59:38.909907 32364 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0821 08:59:38.909927 32364 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0821 08:59:38.910006 32364 net.cpp:150] Setting up L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:38.910025 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.910039 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.910048 32364 net.cpp:165] Memory required for data: 977716400\nI0821 08:59:38.910058 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_conv\nI0821 08:59:38.910084 32364 net.cpp:100] Creating Layer L1_b14_cbr1_conv\nI0821 08:59:38.910097 32364 net.cpp:434] L1_b14_cbr1_conv <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0821 08:59:38.910116 32364 net.cpp:408] L1_b14_cbr1_conv -> L1_b14_cbr1_conv_top\nI0821 08:59:38.910485 32364 net.cpp:150] Setting up L1_b14_cbr1_conv\nI0821 08:59:38.910504 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.910514 32364 net.cpp:165] Memory required for data: 984270000\nI0821 08:59:38.910532 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_bn\nI0821 08:59:38.910555 32364 net.cpp:100] Creating Layer L1_b14_cbr1_bn\nI0821 08:59:38.910568 32364 net.cpp:434] L1_b14_cbr1_bn <- L1_b14_cbr1_conv_top\nI0821 08:59:38.910589 32364 net.cpp:408] L1_b14_cbr1_bn -> L1_b14_cbr1_bn_top\nI0821 08:59:38.910859 32364 net.cpp:150] Setting up L1_b14_cbr1_bn\nI0821 08:59:38.910878 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.910888 32364 net.cpp:165] Memory required for data: 990823600\nI0821 08:59:38.910909 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0821 08:59:38.910926 32364 net.cpp:100] Creating Layer L1_b14_cbr1_scale\nI0821 08:59:38.910938 32364 net.cpp:434] L1_b14_cbr1_scale <- L1_b14_cbr1_bn_top\nI0821 08:59:38.910954 32364 net.cpp:395] L1_b14_cbr1_scale -> L1_b14_cbr1_bn_top (in-place)\nI0821 08:59:38.911047 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0821 08:59:38.911227 32364 net.cpp:150] Setting up L1_b14_cbr1_scale\nI0821 08:59:38.911247 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.911255 32364 net.cpp:165] Memory required for data: 997377200\nI0821 08:59:38.911274 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_relu\nI0821 08:59:38.911296 32364 net.cpp:100] Creating Layer L1_b14_cbr1_relu\nI0821 08:59:38.911309 32364 net.cpp:434] L1_b14_cbr1_relu <- L1_b14_cbr1_bn_top\nI0821 08:59:38.911324 32364 net.cpp:395] L1_b14_cbr1_relu -> L1_b14_cbr1_bn_top (in-place)\nI0821 08:59:38.911345 32364 net.cpp:150] Setting up L1_b14_cbr1_relu\nI0821 08:59:38.911358 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.911368 32364 net.cpp:165] Memory required for data: 1003930800\nI0821 08:59:38.911377 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr2_conv\nI0821 08:59:38.911411 32364 net.cpp:100] Creating Layer L1_b14_cbr2_conv\nI0821 08:59:38.911424 32364 net.cpp:434] L1_b14_cbr2_conv <- L1_b14_cbr1_bn_top\nI0821 08:59:38.911445 32364 net.cpp:408] L1_b14_cbr2_conv -> L1_b14_cbr2_conv_top\nI0821 08:59:38.911825 32364 net.cpp:150] Setting up L1_b14_cbr2_conv\nI0821 08:59:38.912001 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.912014 32364 net.cpp:165] Memory required for data: 1010484400\nI0821 08:59:38.912032 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr2_bn\nI0821 08:59:38.912055 32364 net.cpp:100] Creating Layer L1_b14_cbr2_bn\nI0821 08:59:38.912067 32364 net.cpp:434] L1_b14_cbr2_bn <- L1_b14_cbr2_conv_top\nI0821 08:59:38.912086 32364 net.cpp:408] L1_b14_cbr2_bn -> L1_b14_cbr2_bn_top\nI0821 08:59:38.912374 32364 net.cpp:150] Setting up L1_b14_cbr2_bn\nI0821 08:59:38.912394 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.912403 32364 net.cpp:165] Memory required for data: 1017038000\nI0821 08:59:38.912425 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0821 08:59:38.912442 32364 net.cpp:100] Creating Layer L1_b14_cbr2_scale\nI0821 08:59:38.912453 32364 net.cpp:434] L1_b14_cbr2_scale <- L1_b14_cbr2_bn_top\nI0821 08:59:38.912468 32364 net.cpp:395] L1_b14_cbr2_scale -> L1_b14_cbr2_bn_top (in-place)\nI0821 08:59:38.912560 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0821 08:59:38.912740 32364 net.cpp:150] Setting up L1_b14_cbr2_scale\nI0821 08:59:38.912760 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.912768 32364 net.cpp:165] Memory required for data: 1023591600\nI0821 08:59:38.912786 32364 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise\nI0821 08:59:38.912803 32364 net.cpp:100] Creating Layer L1_b14_sum_eltwise\nI0821 08:59:38.912814 32364 net.cpp:434] L1_b14_sum_eltwise <- L1_b14_cbr2_bn_top\nI0821 08:59:38.912828 32364 net.cpp:434] L1_b14_sum_eltwise <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0821 08:59:38.912850 32364 net.cpp:408] L1_b14_sum_eltwise -> L1_b14_sum_eltwise_top\nI0821 08:59:38.912904 32364 net.cpp:150] Setting up L1_b14_sum_eltwise\nI0821 08:59:38.912921 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.912930 32364 net.cpp:165] Memory required for data: 1030145200\nI0821 08:59:38.912940 32364 layer_factory.hpp:77] Creating layer L1_b14_relu\nI0821 08:59:38.912959 32364 net.cpp:100] Creating Layer L1_b14_relu\nI0821 08:59:38.912971 32364 net.cpp:434] L1_b14_relu <- L1_b14_sum_eltwise_top\nI0821 08:59:38.912986 32364 net.cpp:395] L1_b14_relu -> L1_b14_sum_eltwise_top (in-place)\nI0821 08:59:38.913004 32364 net.cpp:150] Setting up L1_b14_relu\nI0821 08:59:38.913020 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.913029 32364 net.cpp:165] Memory required for data: 1036698800\nI0821 08:59:38.913038 32364 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:38.913051 32364 net.cpp:100] Creating Layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:38.913063 32364 net.cpp:434] L1_b14_sum_eltwise_top_L1_b14_relu_0_split <- L1_b14_sum_eltwise_top\nI0821 08:59:38.913079 32364 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0821 08:59:38.913096 32364 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0821 08:59:38.913185 32364 net.cpp:150] Setting up L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:38.913205 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.913218 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.913228 32364 net.cpp:165] Memory required for data: 1049806000\nI0821 08:59:38.913238 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_conv\nI0821 08:59:38.913261 32364 net.cpp:100] Creating Layer L1_b15_cbr1_conv\nI0821 08:59:38.913275 32364 net.cpp:434] L1_b15_cbr1_conv <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0821 08:59:38.913292 32364 net.cpp:408] L1_b15_cbr1_conv -> L1_b15_cbr1_conv_top\nI0821 08:59:38.913676 32364 net.cpp:150] Setting up L1_b15_cbr1_conv\nI0821 08:59:38.913702 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.913712 32364 net.cpp:165] Memory required for data: 1056359600\nI0821 08:59:38.913730 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_bn\nI0821 08:59:38.913753 32364 net.cpp:100] Creating Layer L1_b15_cbr1_bn\nI0821 08:59:38.913765 32364 net.cpp:434] L1_b15_cbr1_bn <- L1_b15_cbr1_conv_top\nI0821 08:59:38.913786 32364 net.cpp:408] L1_b15_cbr1_bn -> L1_b15_cbr1_bn_top\nI0821 08:59:38.914069 32364 net.cpp:150] Setting up L1_b15_cbr1_bn\nI0821 08:59:38.914088 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.914098 32364 net.cpp:165] Memory required for data: 1062913200\nI0821 08:59:38.914119 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0821 08:59:38.914136 32364 net.cpp:100] Creating Layer L1_b15_cbr1_scale\nI0821 08:59:38.914155 32364 net.cpp:434] L1_b15_cbr1_scale <- L1_b15_cbr1_bn_top\nI0821 08:59:38.914171 32364 net.cpp:395] L1_b15_cbr1_scale -> L1_b15_cbr1_bn_top (in-place)\nI0821 08:59:38.914261 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0821 08:59:38.914440 32364 net.cpp:150] Setting up L1_b15_cbr1_scale\nI0821 08:59:38.914459 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.914469 32364 net.cpp:165] Memory required for data: 1069466800\nI0821 08:59:38.914487 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_relu\nI0821 08:59:38.914502 32364 net.cpp:100] Creating Layer L1_b15_cbr1_relu\nI0821 08:59:38.914515 32364 net.cpp:434] L1_b15_cbr1_relu <- L1_b15_cbr1_bn_top\nI0821 08:59:38.914533 32364 net.cpp:395] L1_b15_cbr1_relu -> L1_b15_cbr1_bn_top (in-place)\nI0821 08:59:38.914554 32364 net.cpp:150] Setting up L1_b15_cbr1_relu\nI0821 08:59:38.914568 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.914577 32364 net.cpp:165] Memory required for data: 1076020400\nI0821 08:59:38.914587 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr2_conv\nI0821 08:59:38.914613 32364 net.cpp:100] Creating Layer L1_b15_cbr2_conv\nI0821 08:59:38.914626 32364 net.cpp:434] L1_b15_cbr2_conv <- L1_b15_cbr1_bn_top\nI0821 08:59:38.914649 32364 net.cpp:408] L1_b15_cbr2_conv -> L1_b15_cbr2_conv_top\nI0821 08:59:38.915015 32364 net.cpp:150] Setting up L1_b15_cbr2_conv\nI0821 08:59:38.915035 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.915045 32364 net.cpp:165] Memory required for data: 1082574000\nI0821 08:59:38.915062 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr2_bn\nI0821 08:59:38.915083 32364 net.cpp:100] Creating Layer L1_b15_cbr2_bn\nI0821 08:59:38.915096 32364 net.cpp:434] L1_b15_cbr2_bn <- L1_b15_cbr2_conv_top\nI0821 08:59:38.915112 32364 net.cpp:408] L1_b15_cbr2_bn -> L1_b15_cbr2_bn_top\nI0821 08:59:38.915402 32364 net.cpp:150] Setting up L1_b15_cbr2_bn\nI0821 08:59:38.915421 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.915431 32364 net.cpp:165] Memory required for data: 1089127600\nI0821 08:59:38.915452 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0821 08:59:38.915468 32364 net.cpp:100] Creating Layer L1_b15_cbr2_scale\nI0821 08:59:38.915479 32364 net.cpp:434] L1_b15_cbr2_scale <- L1_b15_cbr2_bn_top\nI0821 08:59:38.915494 32364 net.cpp:395] L1_b15_cbr2_scale -> L1_b15_cbr2_bn_top (in-place)\nI0821 08:59:38.915586 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0821 08:59:38.915768 32364 net.cpp:150] Setting up L1_b15_cbr2_scale\nI0821 08:59:38.915787 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.915797 32364 net.cpp:165] Memory required for data: 1095681200\nI0821 08:59:38.915815 32364 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise\nI0821 08:59:38.915833 32364 net.cpp:100] Creating Layer L1_b15_sum_eltwise\nI0821 08:59:38.915844 32364 net.cpp:434] L1_b15_sum_eltwise <- L1_b15_cbr2_bn_top\nI0821 08:59:38.915858 32364 net.cpp:434] L1_b15_sum_eltwise <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0821 08:59:38.915880 32364 net.cpp:408] L1_b15_sum_eltwise -> L1_b15_sum_eltwise_top\nI0821 08:59:38.915935 32364 net.cpp:150] Setting up L1_b15_sum_eltwise\nI0821 08:59:38.915961 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.915971 32364 net.cpp:165] Memory required for data: 1102234800\nI0821 08:59:38.915982 32364 layer_factory.hpp:77] Creating layer L1_b15_relu\nI0821 08:59:38.916000 32364 net.cpp:100] Creating Layer L1_b15_relu\nI0821 08:59:38.916013 32364 net.cpp:434] L1_b15_relu <- L1_b15_sum_eltwise_top\nI0821 08:59:38.916026 32364 net.cpp:395] L1_b15_relu -> L1_b15_sum_eltwise_top (in-place)\nI0821 08:59:38.916046 32364 net.cpp:150] Setting up L1_b15_relu\nI0821 08:59:38.916060 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.916070 32364 net.cpp:165] Memory required for data: 1108788400\nI0821 08:59:38.916080 32364 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:38.916095 32364 net.cpp:100] Creating Layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:38.916105 32364 net.cpp:434] L1_b15_sum_eltwise_top_L1_b15_relu_0_split <- L1_b15_sum_eltwise_top\nI0821 08:59:38.916121 32364 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0821 08:59:38.916141 32364 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0821 08:59:38.916234 32364 net.cpp:150] Setting up L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:38.916252 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.916266 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.916276 32364 net.cpp:165] Memory required for data: 1121895600\nI0821 08:59:38.916286 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_conv\nI0821 08:59:38.916313 32364 net.cpp:100] Creating Layer L1_b16_cbr1_conv\nI0821 08:59:38.916327 32364 net.cpp:434] L1_b16_cbr1_conv <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0821 08:59:38.916347 32364 net.cpp:408] L1_b16_cbr1_conv -> L1_b16_cbr1_conv_top\nI0821 08:59:38.916713 32364 net.cpp:150] Setting up L1_b16_cbr1_conv\nI0821 08:59:38.916733 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.916743 32364 net.cpp:165] Memory required for data: 1128449200\nI0821 08:59:38.916759 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_bn\nI0821 08:59:38.916780 32364 net.cpp:100] Creating Layer L1_b16_cbr1_bn\nI0821 08:59:38.916793 32364 net.cpp:434] L1_b16_cbr1_bn <- L1_b16_cbr1_conv_top\nI0821 08:59:38.916810 32364 net.cpp:408] L1_b16_cbr1_bn -> L1_b16_cbr1_bn_top\nI0821 08:59:38.917094 32364 net.cpp:150] Setting up L1_b16_cbr1_bn\nI0821 08:59:38.917114 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.917122 32364 net.cpp:165] Memory required for data: 1135002800\nI0821 08:59:38.917145 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0821 08:59:38.917168 32364 net.cpp:100] Creating Layer L1_b16_cbr1_scale\nI0821 08:59:38.917181 32364 net.cpp:434] L1_b16_cbr1_scale <- L1_b16_cbr1_bn_top\nI0821 08:59:38.917196 32364 net.cpp:395] L1_b16_cbr1_scale -> L1_b16_cbr1_bn_top (in-place)\nI0821 08:59:38.917286 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0821 08:59:38.917469 32364 net.cpp:150] Setting up L1_b16_cbr1_scale\nI0821 08:59:38.917487 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.917496 32364 net.cpp:165] Memory required for data: 1141556400\nI0821 08:59:38.917515 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_relu\nI0821 08:59:38.917531 32364 net.cpp:100] Creating Layer L1_b16_cbr1_relu\nI0821 08:59:38.917541 32364 net.cpp:434] L1_b16_cbr1_relu <- L1_b16_cbr1_bn_top\nI0821 08:59:38.917562 32364 net.cpp:395] L1_b16_cbr1_relu -> L1_b16_cbr1_bn_top (in-place)\nI0821 08:59:38.917582 32364 net.cpp:150] Setting up L1_b16_cbr1_relu\nI0821 08:59:38.917596 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.917606 32364 net.cpp:165] Memory required for data: 1148110000\nI0821 08:59:38.917616 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr2_conv\nI0821 08:59:38.917640 32364 net.cpp:100] Creating Layer L1_b16_cbr2_conv\nI0821 08:59:38.917652 32364 net.cpp:434] L1_b16_cbr2_conv <- L1_b16_cbr1_bn_top\nI0821 08:59:38.917681 32364 net.cpp:408] L1_b16_cbr2_conv -> L1_b16_cbr2_conv_top\nI0821 08:59:38.918062 32364 net.cpp:150] Setting up L1_b16_cbr2_conv\nI0821 08:59:38.918082 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.918092 32364 net.cpp:165] Memory required for data: 1154663600\nI0821 08:59:38.918110 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr2_bn\nI0821 08:59:38.918133 32364 net.cpp:100] Creating Layer L1_b16_cbr2_bn\nI0821 08:59:38.918145 32364 net.cpp:434] L1_b16_cbr2_bn <- L1_b16_cbr2_conv_top\nI0821 08:59:38.918170 32364 net.cpp:408] L1_b16_cbr2_bn -> L1_b16_cbr2_bn_top\nI0821 08:59:38.918448 32364 net.cpp:150] Setting up L1_b16_cbr2_bn\nI0821 08:59:38.918471 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.918483 32364 net.cpp:165] Memory required for data: 1161217200\nI0821 08:59:38.918503 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0821 08:59:38.918520 32364 net.cpp:100] Creating Layer L1_b16_cbr2_scale\nI0821 08:59:38.918532 32364 net.cpp:434] L1_b16_cbr2_scale <- L1_b16_cbr2_bn_top\nI0821 08:59:38.918547 32364 net.cpp:395] L1_b16_cbr2_scale -> L1_b16_cbr2_bn_top (in-place)\nI0821 08:59:38.918634 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0821 08:59:38.918814 32364 net.cpp:150] Setting up L1_b16_cbr2_scale\nI0821 08:59:38.918833 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.918843 32364 net.cpp:165] Memory required for data: 1167770800\nI0821 08:59:38.918861 32364 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise\nI0821 08:59:38.918882 32364 net.cpp:100] Creating Layer L1_b16_sum_eltwise\nI0821 08:59:38.918895 32364 net.cpp:434] L1_b16_sum_eltwise <- L1_b16_cbr2_bn_top\nI0821 08:59:38.918908 32364 net.cpp:434] L1_b16_sum_eltwise <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0821 08:59:38.918929 32364 net.cpp:408] L1_b16_sum_eltwise -> L1_b16_sum_eltwise_top\nI0821 08:59:38.918983 32364 net.cpp:150] Setting up L1_b16_sum_eltwise\nI0821 08:59:38.919001 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.919011 32364 net.cpp:165] Memory required for data: 1174324400\nI0821 08:59:38.919021 32364 layer_factory.hpp:77] Creating layer L1_b16_relu\nI0821 08:59:38.919042 32364 net.cpp:100] Creating Layer L1_b16_relu\nI0821 08:59:38.919055 32364 net.cpp:434] L1_b16_relu <- L1_b16_sum_eltwise_top\nI0821 08:59:38.919070 32364 net.cpp:395] L1_b16_relu -> L1_b16_sum_eltwise_top (in-place)\nI0821 08:59:38.919088 32364 net.cpp:150] Setting up L1_b16_relu\nI0821 08:59:38.919103 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.919112 32364 net.cpp:165] Memory required for data: 1180878000\nI0821 08:59:38.919122 32364 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:38.919137 32364 net.cpp:100] Creating Layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:38.919155 32364 net.cpp:434] L1_b16_sum_eltwise_top_L1_b16_relu_0_split <- L1_b16_sum_eltwise_top\nI0821 08:59:38.919173 32364 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0821 08:59:38.919193 32364 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0821 08:59:38.919275 32364 net.cpp:150] Setting up L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:38.919296 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.919309 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.919319 32364 net.cpp:165] Memory required for data: 1193985200\nI0821 08:59:38.919329 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_conv\nI0821 08:59:38.919354 32364 net.cpp:100] Creating Layer L1_b17_cbr1_conv\nI0821 08:59:38.919369 32364 net.cpp:434] L1_b17_cbr1_conv <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0821 08:59:38.919386 32364 net.cpp:408] L1_b17_cbr1_conv -> L1_b17_cbr1_conv_top\nI0821 08:59:38.919756 32364 net.cpp:150] Setting up L1_b17_cbr1_conv\nI0821 08:59:38.919775 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.919785 32364 net.cpp:165] Memory required for data: 1200538800\nI0821 08:59:38.919813 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_bn\nI0821 08:59:38.919836 32364 net.cpp:100] Creating Layer L1_b17_cbr1_bn\nI0821 08:59:38.919849 32364 net.cpp:434] L1_b17_cbr1_bn <- L1_b17_cbr1_conv_top\nI0821 08:59:38.919867 32364 net.cpp:408] L1_b17_cbr1_bn -> L1_b17_cbr1_bn_top\nI0821 08:59:38.920176 32364 net.cpp:150] Setting up L1_b17_cbr1_bn\nI0821 08:59:38.920195 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.920205 32364 net.cpp:165] Memory required for data: 1207092400\nI0821 08:59:38.920228 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0821 08:59:38.920243 32364 net.cpp:100] Creating Layer L1_b17_cbr1_scale\nI0821 08:59:38.920255 32364 net.cpp:434] L1_b17_cbr1_scale <- L1_b17_cbr1_bn_top\nI0821 08:59:38.920270 32364 net.cpp:395] L1_b17_cbr1_scale -> L1_b17_cbr1_bn_top (in-place)\nI0821 08:59:38.920363 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0821 08:59:38.920545 32364 net.cpp:150] Setting up L1_b17_cbr1_scale\nI0821 08:59:38.920563 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.920572 32364 net.cpp:165] Memory required for data: 1213646000\nI0821 08:59:38.920590 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_relu\nI0821 08:59:38.920605 32364 net.cpp:100] Creating Layer L1_b17_cbr1_relu\nI0821 08:59:38.920617 32364 net.cpp:434] L1_b17_cbr1_relu <- L1_b17_cbr1_bn_top\nI0821 08:59:38.920637 32364 net.cpp:395] L1_b17_cbr1_relu -> L1_b17_cbr1_bn_top (in-place)\nI0821 08:59:38.920657 32364 net.cpp:150] Setting up L1_b17_cbr1_relu\nI0821 08:59:38.920672 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.920681 32364 net.cpp:165] Memory required for data: 1220199600\nI0821 08:59:38.920692 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr2_conv\nI0821 08:59:38.920712 32364 net.cpp:100] Creating Layer L1_b17_cbr2_conv\nI0821 08:59:38.920725 32364 net.cpp:434] L1_b17_cbr2_conv <- L1_b17_cbr1_bn_top\nI0821 08:59:38.920747 32364 net.cpp:408] L1_b17_cbr2_conv -> L1_b17_cbr2_conv_top\nI0821 08:59:38.921120 32364 net.cpp:150] Setting up L1_b17_cbr2_conv\nI0821 08:59:38.921139 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.921155 32364 net.cpp:165] Memory required for data: 1226753200\nI0821 08:59:38.921175 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr2_bn\nI0821 08:59:38.921191 32364 net.cpp:100] Creating Layer L1_b17_cbr2_bn\nI0821 08:59:38.921202 32364 net.cpp:434] L1_b17_cbr2_bn <- L1_b17_cbr2_conv_top\nI0821 08:59:38.921223 32364 net.cpp:408] L1_b17_cbr2_bn -> L1_b17_cbr2_bn_top\nI0821 08:59:38.921511 32364 net.cpp:150] Setting up L1_b17_cbr2_bn\nI0821 08:59:38.921535 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.921545 32364 net.cpp:165] Memory required for data: 1233306800\nI0821 08:59:38.921566 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0821 08:59:38.921581 32364 net.cpp:100] Creating Layer L1_b17_cbr2_scale\nI0821 08:59:38.921592 32364 net.cpp:434] L1_b17_cbr2_scale <- L1_b17_cbr2_bn_top\nI0821 08:59:38.921608 32364 net.cpp:395] L1_b17_cbr2_scale -> L1_b17_cbr2_bn_top (in-place)\nI0821 08:59:38.921697 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0821 08:59:38.921880 32364 net.cpp:150] Setting up L1_b17_cbr2_scale\nI0821 08:59:38.921898 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.921908 32364 net.cpp:165] Memory required for data: 1239860400\nI0821 08:59:38.921926 32364 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise\nI0821 08:59:38.921949 32364 net.cpp:100] Creating Layer L1_b17_sum_eltwise\nI0821 08:59:38.921962 32364 net.cpp:434] L1_b17_sum_eltwise <- L1_b17_cbr2_bn_top\nI0821 08:59:38.921975 32364 net.cpp:434] L1_b17_sum_eltwise <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0821 08:59:38.921999 32364 net.cpp:408] L1_b17_sum_eltwise -> L1_b17_sum_eltwise_top\nI0821 08:59:38.922055 32364 net.cpp:150] Setting up L1_b17_sum_eltwise\nI0821 08:59:38.922073 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.922083 32364 net.cpp:165] Memory required for data: 1246414000\nI0821 08:59:38.922101 32364 layer_factory.hpp:77] Creating layer L1_b17_relu\nI0821 08:59:38.922116 32364 net.cpp:100] Creating Layer L1_b17_relu\nI0821 08:59:38.922127 32364 net.cpp:434] L1_b17_relu <- L1_b17_sum_eltwise_top\nI0821 08:59:38.922153 32364 net.cpp:395] L1_b17_relu -> L1_b17_sum_eltwise_top (in-place)\nI0821 08:59:38.922175 32364 net.cpp:150] Setting up L1_b17_relu\nI0821 08:59:38.922191 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.922200 32364 net.cpp:165] Memory required for data: 1252967600\nI0821 08:59:38.922210 32364 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:38.922224 32364 net.cpp:100] Creating Layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:38.922235 32364 net.cpp:434] L1_b17_sum_eltwise_top_L1_b17_relu_0_split <- L1_b17_sum_eltwise_top\nI0821 08:59:38.922251 32364 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0821 08:59:38.922271 32364 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0821 08:59:38.922353 32364 net.cpp:150] Setting up L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:38.922372 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.922385 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.922394 32364 net.cpp:165] Memory required for data: 1266074800\nI0821 08:59:38.922405 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_conv\nI0821 08:59:38.922430 32364 net.cpp:100] Creating Layer L1_b18_cbr1_conv\nI0821 08:59:38.922443 32364 net.cpp:434] L1_b18_cbr1_conv <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0821 08:59:38.922462 32364 net.cpp:408] L1_b18_cbr1_conv -> L1_b18_cbr1_conv_top\nI0821 08:59:38.922842 32364 net.cpp:150] Setting up L1_b18_cbr1_conv\nI0821 08:59:38.922863 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.922871 32364 net.cpp:165] Memory required for data: 1272628400\nI0821 08:59:38.922889 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_bn\nI0821 08:59:38.922910 32364 net.cpp:100] Creating Layer L1_b18_cbr1_bn\nI0821 08:59:38.922924 32364 net.cpp:434] L1_b18_cbr1_bn <- L1_b18_cbr1_conv_top\nI0821 08:59:38.922940 32364 net.cpp:408] L1_b18_cbr1_bn -> L1_b18_cbr1_bn_top\nI0821 08:59:38.923264 32364 net.cpp:150] Setting up L1_b18_cbr1_bn\nI0821 08:59:38.923285 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.923295 32364 net.cpp:165] Memory required for data: 1279182000\nI0821 08:59:38.923316 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0821 08:59:38.923337 32364 net.cpp:100] Creating Layer L1_b18_cbr1_scale\nI0821 08:59:38.923351 32364 net.cpp:434] L1_b18_cbr1_scale <- L1_b18_cbr1_bn_top\nI0821 08:59:38.923370 32364 net.cpp:395] L1_b18_cbr1_scale -> L1_b18_cbr1_bn_top (in-place)\nI0821 08:59:38.923467 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0821 08:59:38.923652 32364 net.cpp:150] Setting up L1_b18_cbr1_scale\nI0821 08:59:38.923671 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.923681 32364 net.cpp:165] Memory required for data: 1285735600\nI0821 08:59:38.923699 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_relu\nI0821 08:59:38.923718 32364 net.cpp:100] Creating Layer L1_b18_cbr1_relu\nI0821 08:59:38.923732 32364 net.cpp:434] L1_b18_cbr1_relu <- L1_b18_cbr1_bn_top\nI0821 08:59:38.923746 32364 net.cpp:395] L1_b18_cbr1_relu -> L1_b18_cbr1_bn_top (in-place)\nI0821 08:59:38.923765 32364 net.cpp:150] Setting up L1_b18_cbr1_relu\nI0821 08:59:38.923780 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.923790 32364 net.cpp:165] Memory required for data: 1292289200\nI0821 08:59:38.923800 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr2_conv\nI0821 08:59:38.923825 32364 net.cpp:100] Creating Layer L1_b18_cbr2_conv\nI0821 08:59:38.923837 32364 net.cpp:434] L1_b18_cbr2_conv <- L1_b18_cbr1_bn_top\nI0821 08:59:38.923859 32364 net.cpp:408] L1_b18_cbr2_conv -> L1_b18_cbr2_conv_top\nI0821 08:59:38.924258 32364 net.cpp:150] Setting up L1_b18_cbr2_conv\nI0821 08:59:38.924284 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.924294 32364 net.cpp:165] Memory required for data: 1298842800\nI0821 08:59:38.924312 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr2_bn\nI0821 08:59:38.924330 32364 net.cpp:100] Creating Layer L1_b18_cbr2_bn\nI0821 08:59:38.924341 32364 net.cpp:434] L1_b18_cbr2_bn <- L1_b18_cbr2_conv_top\nI0821 08:59:38.924366 32364 net.cpp:408] L1_b18_cbr2_bn -> L1_b18_cbr2_bn_top\nI0821 08:59:38.924654 32364 net.cpp:150] Setting up L1_b18_cbr2_bn\nI0821 08:59:38.924672 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.924681 32364 net.cpp:165] Memory required for data: 1305396400\nI0821 08:59:38.924751 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0821 08:59:38.924777 32364 net.cpp:100] Creating Layer L1_b18_cbr2_scale\nI0821 08:59:38.924790 32364 net.cpp:434] L1_b18_cbr2_scale <- L1_b18_cbr2_bn_top\nI0821 08:59:38.924806 32364 net.cpp:395] L1_b18_cbr2_scale -> L1_b18_cbr2_bn_top (in-place)\nI0821 08:59:38.924903 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0821 08:59:38.925084 32364 net.cpp:150] Setting up L1_b18_cbr2_scale\nI0821 08:59:38.925104 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.925113 32364 net.cpp:165] Memory required for data: 1311950000\nI0821 08:59:38.925132 32364 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise\nI0821 08:59:38.925160 32364 net.cpp:100] Creating Layer L1_b18_sum_eltwise\nI0821 08:59:38.925174 32364 net.cpp:434] L1_b18_sum_eltwise <- L1_b18_cbr2_bn_top\nI0821 08:59:38.925189 32364 net.cpp:434] L1_b18_sum_eltwise <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0821 08:59:38.925204 32364 net.cpp:408] L1_b18_sum_eltwise -> L1_b18_sum_eltwise_top\nI0821 08:59:38.925261 32364 net.cpp:150] Setting up L1_b18_sum_eltwise\nI0821 08:59:38.925279 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.925288 32364 net.cpp:165] Memory required for data: 1318503600\nI0821 08:59:38.925298 32364 layer_factory.hpp:77] Creating layer L1_b18_relu\nI0821 08:59:38.925313 32364 net.cpp:100] Creating Layer L1_b18_relu\nI0821 08:59:38.925324 32364 net.cpp:434] L1_b18_relu <- L1_b18_sum_eltwise_top\nI0821 08:59:38.925343 32364 net.cpp:395] L1_b18_relu -> L1_b18_sum_eltwise_top (in-place)\nI0821 08:59:38.925362 32364 net.cpp:150] Setting up L1_b18_relu\nI0821 08:59:38.925377 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.925386 32364 net.cpp:165] Memory required for data: 1325057200\nI0821 08:59:38.925396 32364 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:38.925410 32364 net.cpp:100] Creating Layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:38.925420 32364 net.cpp:434] L1_b18_sum_eltwise_top_L1_b18_relu_0_split <- L1_b18_sum_eltwise_top\nI0821 08:59:38.925439 32364 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0821 08:59:38.925462 32364 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0821 08:59:38.925540 32364 net.cpp:150] Setting up L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:38.925561 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.925575 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:38.925583 32364 net.cpp:165] Memory required for data: 1338164400\nI0821 08:59:38.925593 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:59:38.925618 32364 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:59:38.925631 32364 net.cpp:434] L2_b1_cbr1_conv <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0821 08:59:38.925650 32364 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:59:38.926033 32364 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:59:38.926054 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.926062 32364 net.cpp:165] Memory required for data: 1339802800\nI0821 08:59:38.926080 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:59:38.926102 32364 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:59:38.926122 32364 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:59:38.926141 32364 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:59:38.926445 32364 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:59:38.926463 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.926472 32364 net.cpp:165] Memory required for data: 1341441200\nI0821 08:59:38.926496 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:59:38.926512 32364 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:59:38.926522 32364 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:59:38.926538 32364 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:59:38.926632 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:59:38.926816 32364 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:59:38.926838 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.926847 32364 net.cpp:165] Memory required for data: 1343079600\nI0821 08:59:38.926867 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:59:38.926882 32364 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:59:38.926892 32364 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:59:38.926908 32364 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:59:38.926928 32364 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:59:38.926941 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.926951 32364 net.cpp:165] Memory required for data: 1344718000\nI0821 08:59:38.926960 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:59:38.926986 32364 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:59:38.926998 32364 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:59:38.927021 32364 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:59:38.927407 32364 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:59:38.927426 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.927436 32364 net.cpp:165] Memory required for data: 1346356400\nI0821 08:59:38.927454 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:59:38.927475 32364 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:59:38.927487 32364 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:59:38.927510 32364 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:59:38.927791 32364 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:59:38.927810 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.927819 32364 net.cpp:165] Memory required for data: 1347994800\nI0821 08:59:38.927841 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:59:38.927858 32364 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:59:38.927870 32364 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:59:38.927884 32364 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:59:38.927983 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:59:38.928172 32364 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:59:38.928192 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.928202 32364 net.cpp:165] Memory required for data: 1349633200\nI0821 08:59:38.928220 32364 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:59:38.928246 32364 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:59:38.928261 32364 net.cpp:434] L2_b1_pool <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0821 08:59:38.928278 32364 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:59:38.928393 32364 net.cpp:150] Setting up L2_b1_pool\nI0821 08:59:38.928413 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.928423 32364 net.cpp:165] Memory required for data: 1351271600\nI0821 08:59:38.928433 32364 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:59:38.928449 32364 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:59:38.928460 32364 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:59:38.928473 32364 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:59:38.928501 32364 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:59:38.928560 32364 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:59:38.928580 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.928589 32364 net.cpp:165] Memory required for data: 1352910000\nI0821 08:59:38.928601 32364 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:59:38.928619 32364 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:59:38.928632 32364 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:59:38.928647 32364 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:59:38.928666 32364 net.cpp:150] Setting up L2_b1_relu\nI0821 08:59:38.928681 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.928690 32364 net.cpp:165] Memory required for data: 1354548400\nI0821 08:59:38.928700 32364 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:59:38.928764 32364 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:59:38.928783 32364 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:59:38.930799 32364 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:59:38.930826 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:38.930837 32364 net.cpp:165] Memory required for data: 1356186800\nI0821 08:59:38.930848 32364 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:59:38.930865 32364 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:59:38.930876 32364 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:59:38.930889 32364 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:59:38.930912 32364 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:59:38.931011 32364 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:59:38.931031 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.931041 32364 net.cpp:165] Memory required for data: 1359463600\nI0821 08:59:38.931052 32364 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:38.931072 32364 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:38.931084 32364 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:59:38.931099 32364 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:59:38.931120 32364 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:59:38.931218 32364 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:38.931239 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.931252 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.931262 32364 net.cpp:165] Memory required for data: 1366017200\nI0821 08:59:38.931272 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:59:38.931298 32364 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:59:38.931311 32364 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:59:38.931334 32364 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:59:38.932869 32364 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:59:38.932893 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.932904 32364 net.cpp:165] Memory required for data: 1369294000\nI0821 08:59:38.932924 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:59:38.932945 32364 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:59:38.932958 32364 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:59:38.932976 32364 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:59:38.933277 32364 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:59:38.933297 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.933306 32364 net.cpp:165] Memory required for data: 1372570800\nI0821 08:59:38.933329 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:59:38.933346 32364 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:59:38.933357 32364 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:59:38.933377 32364 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:59:38.933477 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:59:38.933679 32364 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:59:38.933698 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.933707 32364 net.cpp:165] Memory required for data: 1375847600\nI0821 08:59:38.933727 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:59:38.933742 32364 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:59:38.933753 32364 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:59:38.933768 32364 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:59:38.933787 32364 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:59:38.933801 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.933810 32364 net.cpp:165] Memory required for data: 1379124400\nI0821 08:59:38.933820 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:59:38.933845 32364 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:59:38.933859 32364 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:59:38.933881 32364 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:59:38.934403 32364 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:59:38.934424 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.934434 32364 net.cpp:165] Memory required for data: 1382401200\nI0821 08:59:38.934453 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:59:38.934473 32364 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:59:38.934485 32364 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:59:38.934505 32364 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:59:38.934788 32364 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:59:38.934808 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.934818 32364 net.cpp:165] Memory required for data: 1385678000\nI0821 08:59:38.934839 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:59:38.934854 32364 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:59:38.934865 32364 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:59:38.934881 32364 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:59:38.934978 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:59:38.935168 32364 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:59:38.935191 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.935201 32364 net.cpp:165] Memory required for data: 1388954800\nI0821 08:59:38.935220 32364 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:59:38.935236 32364 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:59:38.935247 32364 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:59:38.935261 32364 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:59:38.935277 32364 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:59:38.935328 32364 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:59:38.935346 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.935358 32364 net.cpp:165] Memory required for data: 1392231600\nI0821 08:59:38.935366 32364 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:59:38.935380 32364 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:59:38.935392 32364 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:59:38.935411 32364 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:59:38.935431 32364 net.cpp:150] Setting up L2_b2_relu\nI0821 08:59:38.935444 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.935453 32364 net.cpp:165] Memory required for data: 1395508400\nI0821 08:59:38.935462 32364 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:38.935477 32364 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:38.935488 32364 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:59:38.935508 32364 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:59:38.935535 32364 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:59:38.935619 32364 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:38.935642 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.935654 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.935663 32364 net.cpp:165] Memory required for data: 1402062000\nI0821 08:59:38.935673 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:59:38.935699 32364 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:59:38.935712 32364 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:59:38.935732 32364 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:59:38.936260 32364 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:59:38.936280 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.936290 32364 net.cpp:165] Memory required for data: 1405338800\nI0821 08:59:38.936307 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:59:38.936329 32364 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:59:38.936342 32364 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:59:38.936358 32364 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:59:38.936640 32364 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:59:38.936659 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.936668 32364 net.cpp:165] Memory required for data: 1408615600\nI0821 08:59:38.936689 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:59:38.936707 32364 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:59:38.936718 32364 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:59:38.936733 32364 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:59:38.936827 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:59:38.937012 32364 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:59:38.937034 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.937044 32364 net.cpp:165] Memory required for data: 1411892400\nI0821 08:59:38.937063 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:59:38.937078 32364 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:59:38.937089 32364 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:59:38.937103 32364 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:59:38.937122 32364 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:59:38.937136 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.937151 32364 net.cpp:165] Memory required for data: 1415169200\nI0821 08:59:38.937163 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:59:38.937189 32364 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:59:38.937202 32364 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:59:38.937224 32364 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:59:38.937744 32364 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:59:38.937765 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.937774 32364 net.cpp:165] Memory required for data: 1418446000\nI0821 08:59:38.937791 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:59:38.937813 32364 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:59:38.937825 32364 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:59:38.937847 32364 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:59:38.938141 32364 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:59:38.938168 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.938177 32364 net.cpp:165] Memory required for data: 1421722800\nI0821 08:59:38.938199 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:59:38.938215 32364 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:59:38.938226 32364 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:59:38.938242 32364 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:59:38.938355 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:59:38.938544 32364 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:59:38.938561 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.938571 32364 net.cpp:165] Memory required for data: 1424999600\nI0821 08:59:38.938591 32364 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:59:38.938611 32364 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:59:38.938623 32364 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:59:38.938637 32364 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:59:38.938653 32364 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:59:38.938701 32364 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:59:38.938719 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.938729 32364 net.cpp:165] Memory required for data: 1428276400\nI0821 08:59:38.938738 32364 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:59:38.938756 32364 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:59:38.938769 32364 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:59:38.938786 32364 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:59:38.938802 32364 net.cpp:150] Setting up L2_b3_relu\nI0821 08:59:38.938817 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.938827 32364 net.cpp:165] Memory required for data: 1431553200\nI0821 08:59:38.938836 32364 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:38.938849 32364 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:38.938861 32364 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:59:38.938876 32364 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:59:38.938895 32364 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:59:38.938984 32364 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:38.939002 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.939018 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.939025 32364 net.cpp:165] Memory required for data: 1438106800\nI0821 08:59:38.939036 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:59:38.939060 32364 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:59:38.939074 32364 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:59:38.939092 32364 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:59:38.939623 32364 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:59:38.939643 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.939652 32364 net.cpp:165] Memory required for data: 1441383600\nI0821 08:59:38.939671 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:59:38.939692 32364 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:59:38.939704 32364 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:59:38.939725 32364 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:59:38.940014 32364 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:59:38.940033 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.940042 32364 net.cpp:165] Memory required for data: 1444660400\nI0821 08:59:38.940064 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:59:38.940081 32364 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:59:38.940093 32364 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:59:38.940109 32364 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:59:38.940212 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:59:38.940397 32364 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:59:38.940423 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.940434 32364 net.cpp:165] Memory required for data: 1447937200\nI0821 08:59:38.940454 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:59:38.940479 32364 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:59:38.940491 32364 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:59:38.940507 32364 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:59:38.940527 32364 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:59:38.940541 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.940551 32364 net.cpp:165] Memory required for data: 1451214000\nI0821 08:59:38.940560 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:59:38.940587 32364 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:59:38.940600 32364 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:59:38.940623 32364 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:59:38.941153 32364 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:59:38.941171 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.941181 32364 net.cpp:165] Memory required for data: 1454490800\nI0821 08:59:38.941198 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:59:38.941221 32364 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:59:38.941233 32364 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:59:38.941257 32364 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:59:38.941552 32364 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:59:38.941572 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.941581 32364 net.cpp:165] Memory required for data: 1457767600\nI0821 08:59:38.941603 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:59:38.941620 32364 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:59:38.941632 32364 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:59:38.941648 32364 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:59:38.941743 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:59:38.941931 32364 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:59:38.941951 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.941959 32364 net.cpp:165] Memory required for data: 1461044400\nI0821 08:59:38.941977 32364 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:59:38.942001 32364 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:59:38.942013 32364 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:59:38.942028 32364 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:59:38.942044 32364 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:59:38.942090 32364 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:59:38.942107 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.942117 32364 net.cpp:165] Memory required for data: 1464321200\nI0821 08:59:38.942127 32364 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:59:38.942152 32364 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:59:38.942167 32364 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:59:38.942181 32364 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:59:38.942200 32364 net.cpp:150] Setting up L2_b4_relu\nI0821 08:59:38.942215 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.942224 32364 net.cpp:165] Memory required for data: 1467598000\nI0821 08:59:38.942234 32364 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:38.942247 32364 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:38.942258 32364 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:59:38.942272 32364 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:59:38.942292 32364 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:59:38.942376 32364 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:38.942395 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.942409 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.942427 32364 net.cpp:165] Memory required for data: 1474151600\nI0821 08:59:38.942437 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:59:38.942462 32364 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:59:38.942476 32364 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:59:38.942495 32364 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:59:38.943017 32364 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:59:38.943035 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.943045 32364 net.cpp:165] Memory required for data: 1477428400\nI0821 08:59:38.943063 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:59:38.943085 32364 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:59:38.943097 32364 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:59:38.943117 32364 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:59:38.943418 32364 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:59:38.943436 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.943445 32364 net.cpp:165] Memory required for data: 1480705200\nI0821 08:59:38.943467 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:59:38.943483 32364 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:59:38.943495 32364 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:59:38.943511 32364 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:59:38.943604 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:59:38.943790 32364 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:59:38.943809 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.943819 32364 net.cpp:165] Memory required for data: 1483982000\nI0821 08:59:38.943837 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:59:38.943856 32364 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:59:38.943868 32364 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:59:38.943882 32364 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:59:38.943902 32364 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:59:38.943917 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.943925 32364 net.cpp:165] Memory required for data: 1487258800\nI0821 08:59:38.943935 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:59:38.943960 32364 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:59:38.943974 32364 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:59:38.943991 32364 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:59:38.944524 32364 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:59:38.944543 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.944553 32364 net.cpp:165] Memory required for data: 1490535600\nI0821 08:59:38.944571 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:59:38.944592 32364 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:59:38.944604 32364 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:59:38.944622 32364 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:59:38.944914 32364 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:59:38.944937 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.944947 32364 net.cpp:165] Memory required for data: 1493812400\nI0821 08:59:38.944969 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:59:38.944986 32364 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:59:38.944998 32364 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:59:38.945013 32364 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:59:38.945106 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:59:38.945304 32364 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:59:38.945323 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.945333 32364 net.cpp:165] Memory required for data: 1497089200\nI0821 08:59:38.945351 32364 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:59:38.945377 32364 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:59:38.945389 32364 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:59:38.945402 32364 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:59:38.945425 32364 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:59:38.945473 32364 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:59:38.945492 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.945500 32364 net.cpp:165] Memory required for data: 1500366000\nI0821 08:59:38.945511 32364 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:59:38.945530 32364 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:59:38.945541 32364 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:59:38.945556 32364 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:59:38.945575 32364 net.cpp:150] Setting up L2_b5_relu\nI0821 08:59:38.945590 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.945600 32364 net.cpp:165] Memory required for data: 1503642800\nI0821 08:59:38.945610 32364 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:38.945622 32364 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:38.945632 32364 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:59:38.945648 32364 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:59:38.945667 32364 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:59:38.945753 32364 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:38.945773 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.945786 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.945796 32364 net.cpp:165] Memory required for data: 1510196400\nI0821 08:59:38.945806 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:59:38.945832 32364 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:59:38.945845 32364 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:59:38.945864 32364 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:59:38.946393 32364 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:59:38.946413 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.946422 32364 net.cpp:165] Memory required for data: 1513473200\nI0821 08:59:38.946440 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:59:38.946461 32364 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:59:38.946473 32364 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:59:38.946494 32364 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:59:38.946789 32364 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:59:38.946808 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.946817 32364 net.cpp:165] Memory required for data: 1516750000\nI0821 08:59:38.946840 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:59:38.946856 32364 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:59:38.946867 32364 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:59:38.946882 32364 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:59:38.946979 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:59:38.948158 32364 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:59:38.948179 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.948189 32364 net.cpp:165] Memory required for data: 1520026800\nI0821 08:59:38.948209 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:59:38.948225 32364 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:59:38.948236 32364 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:59:38.948256 32364 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:59:38.948277 32364 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:59:38.948300 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.948310 32364 net.cpp:165] Memory required for data: 1523303600\nI0821 08:59:38.948320 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:59:38.948343 32364 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:59:38.948354 32364 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:59:38.948372 32364 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:59:38.948890 32364 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:59:38.948910 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.948920 32364 net.cpp:165] Memory required for data: 1526580400\nI0821 08:59:38.948937 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:59:38.948958 32364 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:59:38.948971 32364 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:59:38.948987 32364 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:59:38.949285 32364 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:59:38.949303 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.949313 32364 net.cpp:165] Memory required for data: 1529857200\nI0821 08:59:38.949335 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:59:38.949352 32364 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:59:38.949363 32364 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:59:38.949383 32364 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:59:38.949476 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:59:38.949663 32364 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:59:38.949681 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.949692 32364 net.cpp:165] Memory required for data: 1533134000\nI0821 08:59:38.949709 32364 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:59:38.949725 32364 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:59:38.949736 32364 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:59:38.949749 32364 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:59:38.949769 32364 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:59:38.949817 32364 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:59:38.949836 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.949844 32364 net.cpp:165] Memory required for data: 1536410800\nI0821 08:59:38.949856 32364 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:59:38.949869 32364 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:59:38.949887 32364 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:59:38.949903 32364 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:59:38.949923 32364 net.cpp:150] Setting up L2_b6_relu\nI0821 08:59:38.949937 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.949946 32364 net.cpp:165] Memory required for data: 1539687600\nI0821 08:59:38.949956 32364 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:38.949970 32364 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:38.949981 32364 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:59:38.950006 32364 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:59:38.950027 32364 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:59:38.950106 32364 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:38.950125 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.950139 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.950153 32364 net.cpp:165] Memory required for data: 1546241200\nI0821 08:59:38.950165 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:59:38.950189 32364 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:59:38.950202 32364 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:59:38.950230 32364 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:59:38.950934 32364 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:59:38.950954 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.950964 32364 net.cpp:165] Memory required for data: 1549518000\nI0821 08:59:38.950983 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:59:38.951004 32364 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:59:38.951017 32364 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:59:38.951035 32364 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:59:38.951336 32364 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:59:38.951355 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.951365 32364 net.cpp:165] Memory required for data: 1552794800\nI0821 08:59:38.951386 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:59:38.951403 32364 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:59:38.951414 32364 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:59:38.951434 32364 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:59:38.951524 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:59:38.951714 32364 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:59:38.951731 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.951741 32364 net.cpp:165] Memory required for data: 1556071600\nI0821 08:59:38.951759 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:59:38.951774 32364 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:59:38.951786 32364 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:59:38.951805 32364 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:59:38.951825 32364 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:59:38.951839 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.951849 32364 net.cpp:165] Memory required for data: 1559348400\nI0821 08:59:38.951859 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:59:38.951884 32364 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:59:38.951896 32364 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:59:38.951915 32364 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:59:38.952446 32364 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:59:38.952466 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.952476 32364 net.cpp:165] Memory required for data: 1562625200\nI0821 08:59:38.952493 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:59:38.952512 32364 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:59:38.952528 32364 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:59:38.952545 32364 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:59:38.952832 32364 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:59:38.952852 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.952862 32364 net.cpp:165] Memory required for data: 1565902000\nI0821 08:59:38.952883 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:59:38.952935 32364 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:59:38.952950 32364 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:59:38.952966 32364 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:59:38.953060 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:59:38.953251 32364 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:59:38.953270 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.953280 32364 net.cpp:165] Memory required for data: 1569178800\nI0821 08:59:38.953300 32364 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:59:38.953322 32364 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:59:38.953335 32364 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:59:38.953347 32364 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:59:38.953364 32364 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:59:38.953420 32364 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:59:38.953439 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.953449 32364 net.cpp:165] Memory required for data: 1572455600\nI0821 08:59:38.953459 32364 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:59:38.953474 32364 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:59:38.953485 32364 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:59:38.953505 32364 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:59:38.953526 32364 net.cpp:150] Setting up L2_b7_relu\nI0821 08:59:38.953539 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.953548 32364 net.cpp:165] Memory required for data: 1575732400\nI0821 08:59:38.953559 32364 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:38.953573 32364 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:38.953584 32364 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:59:38.953604 32364 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:59:38.953624 32364 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:59:38.953711 32364 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:38.953729 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.953742 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.953752 32364 net.cpp:165] Memory required for data: 1582286000\nI0821 08:59:38.953761 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:59:38.953783 32364 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:59:38.953794 32364 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:59:38.953817 32364 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:59:38.954349 32364 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:59:38.954370 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.954380 32364 net.cpp:165] Memory required for data: 1585562800\nI0821 08:59:38.954397 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:59:38.954419 32364 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:59:38.954432 32364 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:59:38.954449 32364 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:59:38.954738 32364 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:59:38.954757 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.954767 32364 net.cpp:165] Memory required for data: 1588839600\nI0821 08:59:38.954788 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:59:38.954814 32364 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:59:38.954828 32364 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:59:38.954844 32364 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:59:38.954941 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:59:38.955129 32364 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:59:38.955154 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.955165 32364 net.cpp:165] Memory required for data: 1592116400\nI0821 08:59:38.955183 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:59:38.955204 32364 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:59:38.955215 32364 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:59:38.955235 32364 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:59:38.955255 32364 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:59:38.955269 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.955279 32364 net.cpp:165] Memory required for data: 1595393200\nI0821 08:59:38.955288 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:59:38.955308 32364 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:59:38.955329 32364 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:59:38.955351 32364 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:59:38.955870 32364 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:59:38.955889 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.955899 32364 net.cpp:165] Memory required for data: 1598670000\nI0821 08:59:38.955917 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:59:38.955935 32364 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:59:38.955947 32364 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:59:38.955971 32364 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:59:38.956267 32364 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:59:38.956287 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.956296 32364 net.cpp:165] Memory required for data: 1601946800\nI0821 08:59:38.956318 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:59:38.956341 32364 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:59:38.956353 32364 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:59:38.956369 32364 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:59:38.956457 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:59:38.956645 32364 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:59:38.956665 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.956674 32364 net.cpp:165] Memory required for data: 1605223600\nI0821 08:59:38.956692 32364 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:59:38.956713 32364 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:59:38.956725 32364 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:59:38.956739 32364 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:59:38.956756 32364 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:59:38.956806 32364 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:59:38.956823 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.956833 32364 net.cpp:165] Memory required for data: 1608500400\nI0821 08:59:38.956843 32364 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:59:38.956857 32364 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:59:38.956869 32364 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:59:38.956892 32364 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:59:38.956912 32364 net.cpp:150] Setting up L2_b8_relu\nI0821 08:59:38.956928 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.956936 32364 net.cpp:165] Memory required for data: 1611777200\nI0821 08:59:38.956946 32364 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:38.956960 32364 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:38.956972 32364 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:59:38.956987 32364 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:59:38.957006 32364 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:59:38.957092 32364 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:38.957109 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.957123 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.957132 32364 net.cpp:165] Memory required for data: 1618330800\nI0821 08:59:38.957142 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:59:38.957170 32364 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:59:38.957182 32364 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:59:38.957206 32364 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:59:38.957722 32364 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:59:38.957742 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.957752 32364 net.cpp:165] Memory required for data: 1621607600\nI0821 08:59:38.957780 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:59:38.957798 32364 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:59:38.957809 32364 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:59:38.957831 32364 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:59:38.958128 32364 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:59:38.958153 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.958164 32364 net.cpp:165] Memory required for data: 1624884400\nI0821 08:59:38.958185 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:59:38.958207 32364 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:59:38.958220 32364 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:59:38.958236 32364 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:59:38.958333 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:59:38.958520 32364 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:59:38.958539 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.958549 32364 net.cpp:165] Memory required for data: 1628161200\nI0821 08:59:38.958566 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:59:38.958586 32364 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:59:38.958598 32364 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:59:38.958613 32364 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:59:38.958632 32364 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:59:38.958647 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.958657 32364 net.cpp:165] Memory required for data: 1631438000\nI0821 08:59:38.958667 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:59:38.958690 32364 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:59:38.958704 32364 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:59:38.958726 32364 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:59:38.959246 32364 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:59:38.959266 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.959276 32364 net.cpp:165] Memory required for data: 1634714800\nI0821 08:59:38.959295 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:59:38.959311 32364 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:59:38.959322 32364 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:59:38.959347 32364 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:59:38.959641 32364 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:59:38.959659 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.959669 32364 net.cpp:165] Memory required for data: 1637991600\nI0821 08:59:38.959692 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:59:38.959712 32364 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:59:38.959725 32364 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:59:38.959741 32364 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:59:38.959830 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:59:38.960021 32364 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:59:38.960041 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.960050 32364 net.cpp:165] Memory required for data: 1641268400\nI0821 08:59:38.960068 32364 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:59:38.960090 32364 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:59:38.960103 32364 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:59:38.960116 32364 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:59:38.960132 32364 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:59:38.960191 32364 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:59:38.960209 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.960219 32364 net.cpp:165] Memory required for data: 1644545200\nI0821 08:59:38.960230 32364 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:59:38.960253 32364 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:59:38.960265 32364 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:59:38.960280 32364 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:59:38.960304 32364 net.cpp:150] Setting up L2_b9_relu\nI0821 08:59:38.960320 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.960330 32364 net.cpp:165] Memory required for data: 1647822000\nI0821 08:59:38.960340 32364 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:38.960353 32364 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:38.960366 32364 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:59:38.960381 32364 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:59:38.960400 32364 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:59:38.960486 32364 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:38.960505 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.960518 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.960528 32364 net.cpp:165] Memory required for data: 1654375600\nI0821 08:59:38.960538 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_conv\nI0821 08:59:38.960559 32364 net.cpp:100] Creating Layer L2_b10_cbr1_conv\nI0821 08:59:38.960572 32364 net.cpp:434] L2_b10_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:59:38.960595 32364 net.cpp:408] L2_b10_cbr1_conv -> L2_b10_cbr1_conv_top\nI0821 08:59:38.961115 32364 net.cpp:150] Setting up L2_b10_cbr1_conv\nI0821 08:59:38.961134 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.961144 32364 net.cpp:165] Memory required for data: 1657652400\nI0821 08:59:38.961169 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_bn\nI0821 08:59:38.961187 32364 net.cpp:100] Creating Layer L2_b10_cbr1_bn\nI0821 08:59:38.961199 32364 net.cpp:434] L2_b10_cbr1_bn <- L2_b10_cbr1_conv_top\nI0821 08:59:38.961220 32364 net.cpp:408] L2_b10_cbr1_bn -> L2_b10_cbr1_bn_top\nI0821 08:59:38.961508 32364 net.cpp:150] Setting up L2_b10_cbr1_bn\nI0821 08:59:38.961527 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.961537 32364 net.cpp:165] Memory required for data: 1660929200\nI0821 08:59:38.961557 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0821 08:59:38.961578 32364 net.cpp:100] Creating Layer L2_b10_cbr1_scale\nI0821 08:59:38.961591 32364 net.cpp:434] L2_b10_cbr1_scale <- L2_b10_cbr1_bn_top\nI0821 08:59:38.961607 32364 net.cpp:395] L2_b10_cbr1_scale -> L2_b10_cbr1_bn_top (in-place)\nI0821 08:59:38.961697 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0821 08:59:38.961889 32364 net.cpp:150] Setting up L2_b10_cbr1_scale\nI0821 08:59:38.961907 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.961917 32364 net.cpp:165] Memory required for data: 1664206000\nI0821 08:59:38.961935 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_relu\nI0821 08:59:38.961954 32364 net.cpp:100] Creating Layer L2_b10_cbr1_relu\nI0821 08:59:38.961966 32364 net.cpp:434] L2_b10_cbr1_relu <- L2_b10_cbr1_bn_top\nI0821 08:59:38.961980 32364 net.cpp:395] L2_b10_cbr1_relu -> L2_b10_cbr1_bn_top (in-place)\nI0821 08:59:38.962000 32364 net.cpp:150] Setting up L2_b10_cbr1_relu\nI0821 08:59:38.962013 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.962023 32364 net.cpp:165] Memory required for data: 1667482800\nI0821 08:59:38.962033 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr2_conv\nI0821 08:59:38.962059 32364 net.cpp:100] Creating Layer L2_b10_cbr2_conv\nI0821 08:59:38.962071 32364 net.cpp:434] L2_b10_cbr2_conv <- L2_b10_cbr1_bn_top\nI0821 08:59:38.962095 32364 net.cpp:408] L2_b10_cbr2_conv -> L2_b10_cbr2_conv_top\nI0821 08:59:38.962617 32364 net.cpp:150] Setting up L2_b10_cbr2_conv\nI0821 08:59:38.962636 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.962654 32364 net.cpp:165] Memory required for data: 1670759600\nI0821 08:59:38.962673 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr2_bn\nI0821 08:59:38.962690 32364 net.cpp:100] Creating Layer L2_b10_cbr2_bn\nI0821 08:59:38.962703 32364 net.cpp:434] L2_b10_cbr2_bn <- L2_b10_cbr2_conv_top\nI0821 08:59:38.962719 32364 net.cpp:408] L2_b10_cbr2_bn -> L2_b10_cbr2_bn_top\nI0821 08:59:38.963021 32364 net.cpp:150] Setting up L2_b10_cbr2_bn\nI0821 08:59:38.963039 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.963049 32364 net.cpp:165] Memory required for data: 1674036400\nI0821 08:59:38.963070 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0821 08:59:38.963091 32364 net.cpp:100] Creating Layer L2_b10_cbr2_scale\nI0821 08:59:38.963104 32364 net.cpp:434] L2_b10_cbr2_scale <- L2_b10_cbr2_bn_top\nI0821 08:59:38.963119 32364 net.cpp:395] L2_b10_cbr2_scale -> L2_b10_cbr2_bn_top (in-place)\nI0821 08:59:38.963214 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0821 08:59:38.963404 32364 net.cpp:150] Setting up L2_b10_cbr2_scale\nI0821 08:59:38.963423 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.963433 32364 net.cpp:165] Memory required for data: 1677313200\nI0821 08:59:38.963450 32364 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise\nI0821 08:59:38.963467 32364 net.cpp:100] Creating Layer L2_b10_sum_eltwise\nI0821 08:59:38.963479 32364 net.cpp:434] L2_b10_sum_eltwise <- L2_b10_cbr2_bn_top\nI0821 08:59:38.963492 32364 net.cpp:434] L2_b10_sum_eltwise <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:59:38.963513 32364 net.cpp:408] L2_b10_sum_eltwise -> L2_b10_sum_eltwise_top\nI0821 08:59:38.963562 32364 net.cpp:150] Setting up L2_b10_sum_eltwise\nI0821 08:59:38.963583 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.963594 32364 net.cpp:165] Memory required for data: 1680590000\nI0821 08:59:38.963604 32364 layer_factory.hpp:77] Creating layer L2_b10_relu\nI0821 08:59:38.963618 32364 net.cpp:100] Creating Layer L2_b10_relu\nI0821 08:59:38.963630 32364 net.cpp:434] L2_b10_relu <- L2_b10_sum_eltwise_top\nI0821 08:59:38.963644 32364 net.cpp:395] L2_b10_relu -> L2_b10_sum_eltwise_top (in-place)\nI0821 08:59:38.963662 32364 net.cpp:150] Setting up L2_b10_relu\nI0821 08:59:38.963677 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.963686 32364 net.cpp:165] Memory required for data: 1683866800\nI0821 08:59:38.963696 32364 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:38.963714 32364 net.cpp:100] Creating Layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:38.963726 32364 net.cpp:434] L2_b10_sum_eltwise_top_L2_b10_relu_0_split <- L2_b10_sum_eltwise_top\nI0821 08:59:38.963742 32364 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0821 08:59:38.963762 32364 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0821 08:59:38.963843 32364 net.cpp:150] Setting up L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:38.963862 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.963876 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.963884 32364 net.cpp:165] Memory required for data: 1690420400\nI0821 08:59:38.963894 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_conv\nI0821 08:59:38.963915 32364 net.cpp:100] Creating Layer L2_b11_cbr1_conv\nI0821 08:59:38.963927 32364 net.cpp:434] L2_b11_cbr1_conv <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0821 08:59:38.963949 32364 net.cpp:408] L2_b11_cbr1_conv -> L2_b11_cbr1_conv_top\nI0821 08:59:38.964469 32364 net.cpp:150] Setting up L2_b11_cbr1_conv\nI0821 08:59:38.964488 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.964498 32364 net.cpp:165] Memory required for data: 1693697200\nI0821 08:59:38.964516 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_bn\nI0821 08:59:38.964534 32364 net.cpp:100] Creating Layer L2_b11_cbr1_bn\nI0821 08:59:38.964546 32364 net.cpp:434] L2_b11_cbr1_bn <- L2_b11_cbr1_conv_top\nI0821 08:59:38.964578 32364 net.cpp:408] L2_b11_cbr1_bn -> L2_b11_cbr1_bn_top\nI0821 08:59:38.964879 32364 net.cpp:150] Setting up L2_b11_cbr1_bn\nI0821 08:59:38.964900 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.964908 32364 net.cpp:165] Memory required for data: 1696974000\nI0821 08:59:38.964931 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0821 08:59:38.964951 32364 net.cpp:100] Creating Layer L2_b11_cbr1_scale\nI0821 08:59:38.964963 32364 net.cpp:434] L2_b11_cbr1_scale <- L2_b11_cbr1_bn_top\nI0821 08:59:38.964977 32364 net.cpp:395] L2_b11_cbr1_scale -> L2_b11_cbr1_bn_top (in-place)\nI0821 08:59:38.965076 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0821 08:59:38.965270 32364 net.cpp:150] Setting up L2_b11_cbr1_scale\nI0821 08:59:38.965291 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.965299 32364 net.cpp:165] Memory required for data: 1700250800\nI0821 08:59:38.965318 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_relu\nI0821 08:59:38.965333 32364 net.cpp:100] Creating Layer L2_b11_cbr1_relu\nI0821 08:59:38.965345 32364 net.cpp:434] L2_b11_cbr1_relu <- L2_b11_cbr1_bn_top\nI0821 08:59:38.965366 32364 net.cpp:395] L2_b11_cbr1_relu -> L2_b11_cbr1_bn_top (in-place)\nI0821 08:59:38.965387 32364 net.cpp:150] Setting up L2_b11_cbr1_relu\nI0821 08:59:38.965402 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.965411 32364 net.cpp:165] Memory required for data: 1703527600\nI0821 08:59:38.965421 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr2_conv\nI0821 08:59:38.965445 32364 net.cpp:100] Creating Layer L2_b11_cbr2_conv\nI0821 08:59:38.965459 32364 net.cpp:434] L2_b11_cbr2_conv <- L2_b11_cbr1_bn_top\nI0821 08:59:38.965481 32364 net.cpp:408] L2_b11_cbr2_conv -> L2_b11_cbr2_conv_top\nI0821 08:59:38.965992 32364 net.cpp:150] Setting up L2_b11_cbr2_conv\nI0821 08:59:38.966012 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.966022 32364 net.cpp:165] Memory required for data: 1706804400\nI0821 08:59:38.966039 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr2_bn\nI0821 08:59:38.966056 32364 net.cpp:100] Creating Layer L2_b11_cbr2_bn\nI0821 08:59:38.966068 32364 net.cpp:434] L2_b11_cbr2_bn <- L2_b11_cbr2_conv_top\nI0821 08:59:38.966084 32364 net.cpp:408] L2_b11_cbr2_bn -> L2_b11_cbr2_bn_top\nI0821 08:59:38.966383 32364 net.cpp:150] Setting up L2_b11_cbr2_bn\nI0821 08:59:38.966403 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.966411 32364 net.cpp:165] Memory required for data: 1710081200\nI0821 08:59:38.966433 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0821 08:59:38.966449 32364 net.cpp:100] Creating Layer L2_b11_cbr2_scale\nI0821 08:59:38.966460 32364 net.cpp:434] L2_b11_cbr2_scale <- L2_b11_cbr2_bn_top\nI0821 08:59:38.966481 32364 net.cpp:395] L2_b11_cbr2_scale -> L2_b11_cbr2_bn_top (in-place)\nI0821 08:59:38.966574 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0821 08:59:38.966761 32364 net.cpp:150] Setting up L2_b11_cbr2_scale\nI0821 08:59:38.966780 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.966789 32364 net.cpp:165] Memory required for data: 1713358000\nI0821 08:59:38.966807 32364 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise\nI0821 08:59:38.966823 32364 net.cpp:100] Creating Layer L2_b11_sum_eltwise\nI0821 08:59:38.966835 32364 net.cpp:434] L2_b11_sum_eltwise <- L2_b11_cbr2_bn_top\nI0821 08:59:38.966848 32364 net.cpp:434] L2_b11_sum_eltwise <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0821 08:59:38.966871 32364 net.cpp:408] L2_b11_sum_eltwise -> L2_b11_sum_eltwise_top\nI0821 08:59:38.966919 32364 net.cpp:150] Setting up L2_b11_sum_eltwise\nI0821 08:59:38.966941 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.966953 32364 net.cpp:165] Memory required for data: 1716634800\nI0821 08:59:38.966964 32364 layer_factory.hpp:77] Creating layer L2_b11_relu\nI0821 08:59:38.966979 32364 net.cpp:100] Creating Layer L2_b11_relu\nI0821 08:59:38.966989 32364 net.cpp:434] L2_b11_relu <- L2_b11_sum_eltwise_top\nI0821 08:59:38.967005 32364 net.cpp:395] L2_b11_relu -> L2_b11_sum_eltwise_top (in-place)\nI0821 08:59:38.967032 32364 net.cpp:150] Setting up L2_b11_relu\nI0821 08:59:38.967048 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.967057 32364 net.cpp:165] Memory required for data: 1719911600\nI0821 08:59:38.967067 32364 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:38.967085 32364 net.cpp:100] Creating Layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:38.967097 32364 net.cpp:434] L2_b11_sum_eltwise_top_L2_b11_relu_0_split <- L2_b11_sum_eltwise_top\nI0821 08:59:38.967113 32364 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0821 08:59:38.967133 32364 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0821 08:59:38.967222 32364 net.cpp:150] Setting up L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:38.967247 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.967262 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.967272 32364 net.cpp:165] Memory required for data: 1726465200\nI0821 08:59:38.967283 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_conv\nI0821 08:59:38.967301 32364 net.cpp:100] Creating Layer L2_b12_cbr1_conv\nI0821 08:59:38.967314 32364 net.cpp:434] L2_b12_cbr1_conv <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0821 08:59:38.967332 32364 net.cpp:408] L2_b12_cbr1_conv -> L2_b12_cbr1_conv_top\nI0821 08:59:38.967854 32364 net.cpp:150] Setting up L2_b12_cbr1_conv\nI0821 08:59:38.967880 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.967890 32364 net.cpp:165] Memory required for data: 1729742000\nI0821 08:59:38.967908 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_bn\nI0821 08:59:38.967926 32364 net.cpp:100] Creating Layer L2_b12_cbr1_bn\nI0821 08:59:38.967938 32364 net.cpp:434] L2_b12_cbr1_bn <- L2_b12_cbr1_conv_top\nI0821 08:59:38.967958 32364 net.cpp:408] L2_b12_cbr1_bn -> L2_b12_cbr1_bn_top\nI0821 08:59:38.968257 32364 net.cpp:150] Setting up L2_b12_cbr1_bn\nI0821 08:59:38.968277 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.968286 32364 net.cpp:165] Memory required for data: 1733018800\nI0821 08:59:38.968308 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0821 08:59:38.968329 32364 net.cpp:100] Creating Layer L2_b12_cbr1_scale\nI0821 08:59:38.968343 32364 net.cpp:434] L2_b12_cbr1_scale <- L2_b12_cbr1_bn_top\nI0821 08:59:38.968358 32364 net.cpp:395] L2_b12_cbr1_scale -> L2_b12_cbr1_bn_top (in-place)\nI0821 08:59:38.968444 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0821 08:59:38.968638 32364 net.cpp:150] Setting up L2_b12_cbr1_scale\nI0821 08:59:38.968657 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.968667 32364 net.cpp:165] Memory required for data: 1736295600\nI0821 08:59:38.968684 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_relu\nI0821 08:59:38.968700 32364 net.cpp:100] Creating Layer L2_b12_cbr1_relu\nI0821 08:59:38.968713 32364 net.cpp:434] L2_b12_cbr1_relu <- L2_b12_cbr1_bn_top\nI0821 08:59:38.968730 32364 net.cpp:395] L2_b12_cbr1_relu -> L2_b12_cbr1_bn_top (in-place)\nI0821 08:59:38.968750 32364 net.cpp:150] Setting up L2_b12_cbr1_relu\nI0821 08:59:38.968765 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.968775 32364 net.cpp:165] Memory required for data: 1739572400\nI0821 08:59:38.968785 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr2_conv\nI0821 08:59:38.968808 32364 net.cpp:100] Creating Layer L2_b12_cbr2_conv\nI0821 08:59:38.968822 32364 net.cpp:434] L2_b12_cbr2_conv <- L2_b12_cbr1_bn_top\nI0821 08:59:38.968838 32364 net.cpp:408] L2_b12_cbr2_conv -> L2_b12_cbr2_conv_top\nI0821 08:59:38.969363 32364 net.cpp:150] Setting up L2_b12_cbr2_conv\nI0821 08:59:38.969383 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.969393 32364 net.cpp:165] Memory required for data: 1742849200\nI0821 08:59:38.969411 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr2_bn\nI0821 08:59:38.969444 32364 net.cpp:100] Creating Layer L2_b12_cbr2_bn\nI0821 08:59:38.969457 32364 net.cpp:434] L2_b12_cbr2_bn <- L2_b12_cbr2_conv_top\nI0821 08:59:38.969475 32364 net.cpp:408] L2_b12_cbr2_bn -> L2_b12_cbr2_bn_top\nI0821 08:59:38.969779 32364 net.cpp:150] Setting up L2_b12_cbr2_bn\nI0821 08:59:38.969797 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.969807 32364 net.cpp:165] Memory required for data: 1746126000\nI0821 08:59:38.969830 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0821 08:59:38.969846 32364 net.cpp:100] Creating Layer L2_b12_cbr2_scale\nI0821 08:59:38.969857 32364 net.cpp:434] L2_b12_cbr2_scale <- L2_b12_cbr2_bn_top\nI0821 08:59:38.969877 32364 net.cpp:395] L2_b12_cbr2_scale -> L2_b12_cbr2_bn_top (in-place)\nI0821 08:59:38.969971 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0821 08:59:38.970165 32364 net.cpp:150] Setting up L2_b12_cbr2_scale\nI0821 08:59:38.970185 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.970193 32364 net.cpp:165] Memory required for data: 1749402800\nI0821 08:59:38.970212 32364 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise\nI0821 08:59:38.970229 32364 net.cpp:100] Creating Layer L2_b12_sum_eltwise\nI0821 08:59:38.970240 32364 net.cpp:434] L2_b12_sum_eltwise <- L2_b12_cbr2_bn_top\nI0821 08:59:38.970254 32364 net.cpp:434] L2_b12_sum_eltwise <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0821 08:59:38.970275 32364 net.cpp:408] L2_b12_sum_eltwise -> L2_b12_sum_eltwise_top\nI0821 08:59:38.970322 32364 net.cpp:150] Setting up L2_b12_sum_eltwise\nI0821 08:59:38.970340 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.970350 32364 net.cpp:165] Memory required for data: 1752679600\nI0821 08:59:38.970360 32364 layer_factory.hpp:77] Creating layer L2_b12_relu\nI0821 08:59:38.970386 32364 net.cpp:100] Creating Layer L2_b12_relu\nI0821 08:59:38.970399 32364 net.cpp:434] L2_b12_relu <- L2_b12_sum_eltwise_top\nI0821 08:59:38.970414 32364 net.cpp:395] L2_b12_relu -> L2_b12_sum_eltwise_top (in-place)\nI0821 08:59:38.970432 32364 net.cpp:150] Setting up L2_b12_relu\nI0821 08:59:38.970448 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.970456 32364 net.cpp:165] Memory required for data: 1755956400\nI0821 08:59:38.970466 32364 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:38.970490 32364 net.cpp:100] Creating Layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:38.970504 32364 net.cpp:434] L2_b12_sum_eltwise_top_L2_b12_relu_0_split <- L2_b12_sum_eltwise_top\nI0821 08:59:38.970518 32364 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0821 08:59:38.970538 32364 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0821 08:59:38.970615 32364 net.cpp:150] Setting up L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:38.970639 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.970654 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.970662 32364 net.cpp:165] Memory required for data: 1762510000\nI0821 08:59:38.970672 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_conv\nI0821 08:59:38.970692 32364 net.cpp:100] Creating Layer L2_b13_cbr1_conv\nI0821 08:59:38.970705 32364 net.cpp:434] L2_b13_cbr1_conv <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0821 08:59:38.970723 32364 net.cpp:408] L2_b13_cbr1_conv -> L2_b13_cbr1_conv_top\nI0821 08:59:38.971261 32364 net.cpp:150] Setting up L2_b13_cbr1_conv\nI0821 08:59:38.971282 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.971290 32364 net.cpp:165] Memory required for data: 1765786800\nI0821 08:59:38.971307 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_bn\nI0821 08:59:38.971329 32364 net.cpp:100] Creating Layer L2_b13_cbr1_bn\nI0821 08:59:38.971341 32364 net.cpp:434] L2_b13_cbr1_bn <- L2_b13_cbr1_conv_top\nI0821 08:59:38.971357 32364 net.cpp:408] L2_b13_cbr1_bn -> L2_b13_cbr1_bn_top\nI0821 08:59:38.971666 32364 net.cpp:150] Setting up L2_b13_cbr1_bn\nI0821 08:59:38.971693 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.971704 32364 net.cpp:165] Memory required for data: 1769063600\nI0821 08:59:38.971724 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0821 08:59:38.971740 32364 net.cpp:100] Creating Layer L2_b13_cbr1_scale\nI0821 08:59:38.971752 32364 net.cpp:434] L2_b13_cbr1_scale <- L2_b13_cbr1_bn_top\nI0821 08:59:38.971771 32364 net.cpp:395] L2_b13_cbr1_scale -> L2_b13_cbr1_bn_top (in-place)\nI0821 08:59:38.971864 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0821 08:59:38.972057 32364 net.cpp:150] Setting up L2_b13_cbr1_scale\nI0821 08:59:38.972077 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.972086 32364 net.cpp:165] Memory required for data: 1772340400\nI0821 08:59:38.972105 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_relu\nI0821 08:59:38.972121 32364 net.cpp:100] Creating Layer L2_b13_cbr1_relu\nI0821 08:59:38.972131 32364 net.cpp:434] L2_b13_cbr1_relu <- L2_b13_cbr1_bn_top\nI0821 08:59:38.972157 32364 net.cpp:395] L2_b13_cbr1_relu -> L2_b13_cbr1_bn_top (in-place)\nI0821 08:59:38.972178 32364 net.cpp:150] Setting up L2_b13_cbr1_relu\nI0821 08:59:38.972193 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.972203 32364 net.cpp:165] Memory required for data: 1775617200\nI0821 08:59:38.972213 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr2_conv\nI0821 08:59:38.972235 32364 net.cpp:100] Creating Layer L2_b13_cbr2_conv\nI0821 08:59:38.972249 32364 net.cpp:434] L2_b13_cbr2_conv <- L2_b13_cbr1_bn_top\nI0821 08:59:38.972266 32364 net.cpp:408] L2_b13_cbr2_conv -> L2_b13_cbr2_conv_top\nI0821 08:59:38.972785 32364 net.cpp:150] Setting up L2_b13_cbr2_conv\nI0821 08:59:38.972805 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.972815 32364 net.cpp:165] Memory required for data: 1778894000\nI0821 08:59:38.972832 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr2_bn\nI0821 08:59:38.972854 32364 net.cpp:100] Creating Layer L2_b13_cbr2_bn\nI0821 08:59:38.972867 32364 net.cpp:434] L2_b13_cbr2_bn <- L2_b13_cbr2_conv_top\nI0821 08:59:38.972882 32364 net.cpp:408] L2_b13_cbr2_bn -> L2_b13_cbr2_bn_top\nI0821 08:59:38.973186 32364 net.cpp:150] Setting up L2_b13_cbr2_bn\nI0821 08:59:38.973204 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.973214 32364 net.cpp:165] Memory required for data: 1782170800\nI0821 08:59:38.973235 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0821 08:59:38.973253 32364 net.cpp:100] Creating Layer L2_b13_cbr2_scale\nI0821 08:59:38.973263 32364 net.cpp:434] L2_b13_cbr2_scale <- L2_b13_cbr2_bn_top\nI0821 08:59:38.973289 32364 net.cpp:395] L2_b13_cbr2_scale -> L2_b13_cbr2_bn_top (in-place)\nI0821 08:59:38.973378 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0821 08:59:38.973565 32364 net.cpp:150] Setting up L2_b13_cbr2_scale\nI0821 08:59:38.973584 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.973593 32364 net.cpp:165] Memory required for data: 1785447600\nI0821 08:59:38.973611 32364 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise\nI0821 08:59:38.973628 32364 net.cpp:100] Creating Layer L2_b13_sum_eltwise\nI0821 08:59:38.973640 32364 net.cpp:434] L2_b13_sum_eltwise <- L2_b13_cbr2_bn_top\nI0821 08:59:38.973654 32364 net.cpp:434] L2_b13_sum_eltwise <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0821 08:59:38.973673 32364 net.cpp:408] L2_b13_sum_eltwise -> L2_b13_sum_eltwise_top\nI0821 08:59:38.973721 32364 net.cpp:150] Setting up L2_b13_sum_eltwise\nI0821 08:59:38.973738 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.973748 32364 net.cpp:165] Memory required for data: 1788724400\nI0821 08:59:38.973758 32364 layer_factory.hpp:77] Creating layer L2_b13_relu\nI0821 08:59:38.973776 32364 net.cpp:100] Creating Layer L2_b13_relu\nI0821 08:59:38.973789 32364 net.cpp:434] L2_b13_relu <- L2_b13_sum_eltwise_top\nI0821 08:59:38.973804 32364 net.cpp:395] L2_b13_relu -> L2_b13_sum_eltwise_top (in-place)\nI0821 08:59:38.973822 32364 net.cpp:150] Setting up L2_b13_relu\nI0821 08:59:38.973837 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.973855 32364 net.cpp:165] Memory required for data: 1792001200\nI0821 08:59:38.973866 32364 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:38.973881 32364 net.cpp:100] Creating Layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:38.973892 32364 net.cpp:434] L2_b13_sum_eltwise_top_L2_b13_relu_0_split <- L2_b13_sum_eltwise_top\nI0821 08:59:38.973913 32364 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0821 08:59:38.973935 32364 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0821 08:59:38.974020 32364 net.cpp:150] Setting up L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:38.974040 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.974054 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.974063 32364 net.cpp:165] Memory required for data: 1798554800\nI0821 08:59:38.974073 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_conv\nI0821 08:59:38.974099 32364 net.cpp:100] Creating Layer L2_b14_cbr1_conv\nI0821 08:59:38.974113 32364 net.cpp:434] L2_b14_cbr1_conv <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0821 08:59:38.974131 32364 net.cpp:408] L2_b14_cbr1_conv -> L2_b14_cbr1_conv_top\nI0821 08:59:38.974658 32364 net.cpp:150] Setting up L2_b14_cbr1_conv\nI0821 08:59:38.974678 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.974687 32364 net.cpp:165] Memory required for data: 1801831600\nI0821 08:59:38.974706 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_bn\nI0821 08:59:38.974727 32364 net.cpp:100] Creating Layer L2_b14_cbr1_bn\nI0821 08:59:38.974740 32364 net.cpp:434] L2_b14_cbr1_bn <- L2_b14_cbr1_conv_top\nI0821 08:59:38.974757 32364 net.cpp:408] L2_b14_cbr1_bn -> L2_b14_cbr1_bn_top\nI0821 08:59:38.975045 32364 net.cpp:150] Setting up L2_b14_cbr1_bn\nI0821 08:59:38.975064 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.975075 32364 net.cpp:165] Memory required for data: 1805108400\nI0821 08:59:38.975095 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0821 08:59:38.975112 32364 net.cpp:100] Creating Layer L2_b14_cbr1_scale\nI0821 08:59:38.975123 32364 net.cpp:434] L2_b14_cbr1_scale <- L2_b14_cbr1_bn_top\nI0821 08:59:38.975143 32364 net.cpp:395] L2_b14_cbr1_scale -> L2_b14_cbr1_bn_top (in-place)\nI0821 08:59:38.975241 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0821 08:59:38.975431 32364 net.cpp:150] Setting up L2_b14_cbr1_scale\nI0821 08:59:38.975450 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.975459 32364 net.cpp:165] Memory required for data: 1808385200\nI0821 08:59:38.975477 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_relu\nI0821 08:59:38.975492 32364 net.cpp:100] Creating Layer L2_b14_cbr1_relu\nI0821 08:59:38.975504 32364 net.cpp:434] L2_b14_cbr1_relu <- L2_b14_cbr1_bn_top\nI0821 08:59:38.975528 32364 net.cpp:395] L2_b14_cbr1_relu -> L2_b14_cbr1_bn_top (in-place)\nI0821 08:59:38.975550 32364 net.cpp:150] Setting up L2_b14_cbr1_relu\nI0821 08:59:38.975564 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.975574 32364 net.cpp:165] Memory required for data: 1811662000\nI0821 08:59:38.975584 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr2_conv\nI0821 08:59:38.975608 32364 net.cpp:100] Creating Layer L2_b14_cbr2_conv\nI0821 08:59:38.975622 32364 net.cpp:434] L2_b14_cbr2_conv <- L2_b14_cbr1_bn_top\nI0821 08:59:38.975641 32364 net.cpp:408] L2_b14_cbr2_conv -> L2_b14_cbr2_conv_top\nI0821 08:59:38.976164 32364 net.cpp:150] Setting up L2_b14_cbr2_conv\nI0821 08:59:38.976183 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.976193 32364 net.cpp:165] Memory required for data: 1814938800\nI0821 08:59:38.976212 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr2_bn\nI0821 08:59:38.976233 32364 net.cpp:100] Creating Layer L2_b14_cbr2_bn\nI0821 08:59:38.976246 32364 net.cpp:434] L2_b14_cbr2_bn <- L2_b14_cbr2_conv_top\nI0821 08:59:38.976263 32364 net.cpp:408] L2_b14_cbr2_bn -> L2_b14_cbr2_bn_top\nI0821 08:59:38.976567 32364 net.cpp:150] Setting up L2_b14_cbr2_bn\nI0821 08:59:38.976586 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.976595 32364 net.cpp:165] Memory required for data: 1818215600\nI0821 08:59:38.976617 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0821 08:59:38.976634 32364 net.cpp:100] Creating Layer L2_b14_cbr2_scale\nI0821 08:59:38.976645 32364 net.cpp:434] L2_b14_cbr2_scale <- L2_b14_cbr2_bn_top\nI0821 08:59:38.976660 32364 net.cpp:395] L2_b14_cbr2_scale -> L2_b14_cbr2_bn_top (in-place)\nI0821 08:59:38.976754 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0821 08:59:38.976943 32364 net.cpp:150] Setting up L2_b14_cbr2_scale\nI0821 08:59:38.976965 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.976975 32364 net.cpp:165] Memory required for data: 1821492400\nI0821 08:59:38.976994 32364 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise\nI0821 08:59:38.977010 32364 net.cpp:100] Creating Layer L2_b14_sum_eltwise\nI0821 08:59:38.977022 32364 net.cpp:434] L2_b14_sum_eltwise <- L2_b14_cbr2_bn_top\nI0821 08:59:38.977035 32364 net.cpp:434] L2_b14_sum_eltwise <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0821 08:59:38.977052 32364 net.cpp:408] L2_b14_sum_eltwise -> L2_b14_sum_eltwise_top\nI0821 08:59:38.977103 32364 net.cpp:150] Setting up L2_b14_sum_eltwise\nI0821 08:59:38.977121 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.977131 32364 net.cpp:165] Memory required for data: 1824769200\nI0821 08:59:38.977141 32364 layer_factory.hpp:77] Creating layer L2_b14_relu\nI0821 08:59:38.977162 32364 net.cpp:100] Creating Layer L2_b14_relu\nI0821 08:59:38.977174 32364 net.cpp:434] L2_b14_relu <- L2_b14_sum_eltwise_top\nI0821 08:59:38.977193 32364 net.cpp:395] L2_b14_relu -> L2_b14_sum_eltwise_top (in-place)\nI0821 08:59:38.977213 32364 net.cpp:150] Setting up L2_b14_relu\nI0821 08:59:38.977228 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.977237 32364 net.cpp:165] Memory required for data: 1828046000\nI0821 08:59:38.977247 32364 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:38.977262 32364 net.cpp:100] Creating Layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:38.977272 32364 net.cpp:434] L2_b14_sum_eltwise_top_L2_b14_relu_0_split <- L2_b14_sum_eltwise_top\nI0821 08:59:38.977294 32364 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0821 08:59:38.977316 32364 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0821 08:59:38.977398 32364 net.cpp:150] Setting up L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:38.977421 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.977433 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.977442 32364 net.cpp:165] Memory required for data: 1834599600\nI0821 08:59:38.977452 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_conv\nI0821 08:59:38.977476 32364 net.cpp:100] Creating Layer L2_b15_cbr1_conv\nI0821 08:59:38.977490 32364 net.cpp:434] L2_b15_cbr1_conv <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0821 08:59:38.977509 32364 net.cpp:408] L2_b15_cbr1_conv -> L2_b15_cbr1_conv_top\nI0821 08:59:38.978034 32364 net.cpp:150] Setting up L2_b15_cbr1_conv\nI0821 08:59:38.978055 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.978063 32364 net.cpp:165] Memory required for data: 1837876400\nI0821 08:59:38.978081 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_bn\nI0821 08:59:38.978106 32364 net.cpp:100] Creating Layer L2_b15_cbr1_bn\nI0821 08:59:38.978121 32364 net.cpp:434] L2_b15_cbr1_bn <- L2_b15_cbr1_conv_top\nI0821 08:59:38.978137 32364 net.cpp:408] L2_b15_cbr1_bn -> L2_b15_cbr1_bn_top\nI0821 08:59:38.978430 32364 net.cpp:150] Setting up L2_b15_cbr1_bn\nI0821 08:59:38.978449 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.978459 32364 net.cpp:165] Memory required for data: 1841153200\nI0821 08:59:38.978488 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0821 08:59:38.978505 32364 net.cpp:100] Creating Layer L2_b15_cbr1_scale\nI0821 08:59:38.978518 32364 net.cpp:434] L2_b15_cbr1_scale <- L2_b15_cbr1_bn_top\nI0821 08:59:38.978539 32364 net.cpp:395] L2_b15_cbr1_scale -> L2_b15_cbr1_bn_top (in-place)\nI0821 08:59:38.978638 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0821 08:59:38.978826 32364 net.cpp:150] Setting up L2_b15_cbr1_scale\nI0821 08:59:38.978844 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.978853 32364 net.cpp:165] Memory required for data: 1844430000\nI0821 08:59:38.978871 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_relu\nI0821 08:59:38.978888 32364 net.cpp:100] Creating Layer L2_b15_cbr1_relu\nI0821 08:59:38.978899 32364 net.cpp:434] L2_b15_cbr1_relu <- L2_b15_cbr1_bn_top\nI0821 08:59:38.978912 32364 net.cpp:395] L2_b15_cbr1_relu -> L2_b15_cbr1_bn_top (in-place)\nI0821 08:59:38.978932 32364 net.cpp:150] Setting up L2_b15_cbr1_relu\nI0821 08:59:38.978947 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.978956 32364 net.cpp:165] Memory required for data: 1847706800\nI0821 08:59:38.978965 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr2_conv\nI0821 08:59:38.978991 32364 net.cpp:100] Creating Layer L2_b15_cbr2_conv\nI0821 08:59:38.979003 32364 net.cpp:434] L2_b15_cbr2_conv <- L2_b15_cbr1_bn_top\nI0821 08:59:38.979025 32364 net.cpp:408] L2_b15_cbr2_conv -> L2_b15_cbr2_conv_top\nI0821 08:59:38.979559 32364 net.cpp:150] Setting up L2_b15_cbr2_conv\nI0821 08:59:38.979579 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.979589 32364 net.cpp:165] Memory required for data: 1850983600\nI0821 08:59:38.979607 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr2_bn\nI0821 08:59:38.979630 32364 net.cpp:100] Creating Layer L2_b15_cbr2_bn\nI0821 08:59:38.979643 32364 net.cpp:434] L2_b15_cbr2_bn <- L2_b15_cbr2_conv_top\nI0821 08:59:38.979666 32364 net.cpp:408] L2_b15_cbr2_bn -> L2_b15_cbr2_bn_top\nI0821 08:59:38.979954 32364 net.cpp:150] Setting up L2_b15_cbr2_bn\nI0821 08:59:38.979974 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.979982 32364 net.cpp:165] Memory required for data: 1854260400\nI0821 08:59:38.980005 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0821 08:59:38.980020 32364 net.cpp:100] Creating Layer L2_b15_cbr2_scale\nI0821 08:59:38.980032 32364 net.cpp:434] L2_b15_cbr2_scale <- L2_b15_cbr2_bn_top\nI0821 08:59:38.980047 32364 net.cpp:395] L2_b15_cbr2_scale -> L2_b15_cbr2_bn_top (in-place)\nI0821 08:59:38.980142 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0821 08:59:38.980340 32364 net.cpp:150] Setting up L2_b15_cbr2_scale\nI0821 08:59:38.980362 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.980372 32364 net.cpp:165] Memory required for data: 1857537200\nI0821 08:59:38.980391 32364 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise\nI0821 08:59:38.980406 32364 net.cpp:100] Creating Layer L2_b15_sum_eltwise\nI0821 08:59:38.980418 32364 net.cpp:434] L2_b15_sum_eltwise <- L2_b15_cbr2_bn_top\nI0821 08:59:38.980432 32364 net.cpp:434] L2_b15_sum_eltwise <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0821 08:59:38.980449 32364 net.cpp:408] L2_b15_sum_eltwise -> L2_b15_sum_eltwise_top\nI0821 08:59:38.980502 32364 net.cpp:150] Setting up L2_b15_sum_eltwise\nI0821 08:59:38.980520 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.980531 32364 net.cpp:165] Memory required for data: 1860814000\nI0821 08:59:38.980541 32364 layer_factory.hpp:77] Creating layer L2_b15_relu\nI0821 08:59:38.980556 32364 net.cpp:100] Creating Layer L2_b15_relu\nI0821 08:59:38.980567 32364 net.cpp:434] L2_b15_relu <- L2_b15_sum_eltwise_top\nI0821 08:59:38.980587 32364 net.cpp:395] L2_b15_relu -> L2_b15_sum_eltwise_top (in-place)\nI0821 08:59:38.980607 32364 net.cpp:150] Setting up L2_b15_relu\nI0821 08:59:38.980621 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.980630 32364 net.cpp:165] Memory required for data: 1864090800\nI0821 08:59:38.980640 32364 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:38.980664 32364 net.cpp:100] Creating Layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:38.980675 32364 net.cpp:434] L2_b15_sum_eltwise_top_L2_b15_relu_0_split <- L2_b15_sum_eltwise_top\nI0821 08:59:38.980695 32364 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0821 08:59:38.980717 32364 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0821 08:59:38.980800 32364 net.cpp:150] Setting up L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:38.980818 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.980832 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.980841 32364 net.cpp:165] Memory required for data: 1870644400\nI0821 08:59:38.980852 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_conv\nI0821 08:59:38.980878 32364 net.cpp:100] Creating Layer L2_b16_cbr1_conv\nI0821 08:59:38.980892 32364 net.cpp:434] L2_b16_cbr1_conv <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0821 08:59:38.980911 32364 net.cpp:408] L2_b16_cbr1_conv -> L2_b16_cbr1_conv_top\nI0821 08:59:38.982655 32364 net.cpp:150] Setting up L2_b16_cbr1_conv\nI0821 08:59:38.982676 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.982686 32364 net.cpp:165] Memory required for data: 1873921200\nI0821 08:59:38.982704 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_bn\nI0821 08:59:38.982725 32364 net.cpp:100] Creating Layer L2_b16_cbr1_bn\nI0821 08:59:38.982738 32364 net.cpp:434] L2_b16_cbr1_bn <- L2_b16_cbr1_conv_top\nI0821 08:59:38.982755 32364 net.cpp:408] L2_b16_cbr1_bn -> L2_b16_cbr1_bn_top\nI0821 08:59:38.983058 32364 net.cpp:150] Setting up L2_b16_cbr1_bn\nI0821 08:59:38.983078 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.983088 32364 net.cpp:165] Memory required for data: 1877198000\nI0821 08:59:38.983108 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0821 08:59:38.983132 32364 net.cpp:100] Creating Layer L2_b16_cbr1_scale\nI0821 08:59:38.983152 32364 net.cpp:434] L2_b16_cbr1_scale <- L2_b16_cbr1_bn_top\nI0821 08:59:38.983170 32364 net.cpp:395] L2_b16_cbr1_scale -> L2_b16_cbr1_bn_top (in-place)\nI0821 08:59:38.983270 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0821 08:59:38.983484 32364 net.cpp:150] Setting up L2_b16_cbr1_scale\nI0821 08:59:38.983502 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.983511 32364 net.cpp:165] Memory required for data: 1880474800\nI0821 08:59:38.983530 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_relu\nI0821 08:59:38.983551 32364 net.cpp:100] Creating Layer L2_b16_cbr1_relu\nI0821 08:59:38.983562 32364 net.cpp:434] L2_b16_cbr1_relu <- L2_b16_cbr1_bn_top\nI0821 08:59:38.983582 32364 net.cpp:395] L2_b16_cbr1_relu -> L2_b16_cbr1_bn_top (in-place)\nI0821 08:59:38.983603 32364 net.cpp:150] Setting up L2_b16_cbr1_relu\nI0821 08:59:38.983616 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.983625 32364 net.cpp:165] Memory required for data: 1883751600\nI0821 08:59:38.983635 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr2_conv\nI0821 08:59:38.983655 32364 net.cpp:100] Creating Layer L2_b16_cbr2_conv\nI0821 08:59:38.983667 32364 net.cpp:434] L2_b16_cbr2_conv <- L2_b16_cbr1_bn_top\nI0821 08:59:38.983690 32364 net.cpp:408] L2_b16_cbr2_conv -> L2_b16_cbr2_conv_top\nI0821 08:59:38.984220 32364 net.cpp:150] Setting up L2_b16_cbr2_conv\nI0821 08:59:38.984239 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.984249 32364 net.cpp:165] Memory required for data: 1887028400\nI0821 08:59:38.984267 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr2_bn\nI0821 08:59:38.984284 32364 net.cpp:100] Creating Layer L2_b16_cbr2_bn\nI0821 08:59:38.984297 32364 net.cpp:434] L2_b16_cbr2_bn <- L2_b16_cbr2_conv_top\nI0821 08:59:38.984318 32364 net.cpp:408] L2_b16_cbr2_bn -> L2_b16_cbr2_bn_top\nI0821 08:59:38.984622 32364 net.cpp:150] Setting up L2_b16_cbr2_bn\nI0821 08:59:38.984640 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.984658 32364 net.cpp:165] Memory required for data: 1890305200\nI0821 08:59:38.984681 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0821 08:59:38.984702 32364 net.cpp:100] Creating Layer L2_b16_cbr2_scale\nI0821 08:59:38.984715 32364 net.cpp:434] L2_b16_cbr2_scale <- L2_b16_cbr2_bn_top\nI0821 08:59:38.984731 32364 net.cpp:395] L2_b16_cbr2_scale -> L2_b16_cbr2_bn_top (in-place)\nI0821 08:59:38.984828 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0821 08:59:38.985023 32364 net.cpp:150] Setting up L2_b16_cbr2_scale\nI0821 08:59:38.985043 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.985052 32364 net.cpp:165] Memory required for data: 1893582000\nI0821 08:59:38.985070 32364 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise\nI0821 08:59:38.985092 32364 net.cpp:100] Creating Layer L2_b16_sum_eltwise\nI0821 08:59:38.985105 32364 net.cpp:434] L2_b16_sum_eltwise <- L2_b16_cbr2_bn_top\nI0821 08:59:38.985118 32364 net.cpp:434] L2_b16_sum_eltwise <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0821 08:59:38.985134 32364 net.cpp:408] L2_b16_sum_eltwise -> L2_b16_sum_eltwise_top\nI0821 08:59:38.985193 32364 net.cpp:150] Setting up L2_b16_sum_eltwise\nI0821 08:59:38.985213 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.985222 32364 net.cpp:165] Memory required for data: 1896858800\nI0821 08:59:38.985232 32364 layer_factory.hpp:77] Creating layer L2_b16_relu\nI0821 08:59:38.985247 32364 net.cpp:100] Creating Layer L2_b16_relu\nI0821 08:59:38.985260 32364 net.cpp:434] L2_b16_relu <- L2_b16_sum_eltwise_top\nI0821 08:59:38.985282 32364 net.cpp:395] L2_b16_relu -> L2_b16_sum_eltwise_top (in-place)\nI0821 08:59:38.985303 32364 net.cpp:150] Setting up L2_b16_relu\nI0821 08:59:38.985317 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.985327 32364 net.cpp:165] Memory required for data: 1900135600\nI0821 08:59:38.985335 32364 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:38.985350 32364 net.cpp:100] Creating Layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:38.985360 32364 net.cpp:434] L2_b16_sum_eltwise_top_L2_b16_relu_0_split <- L2_b16_sum_eltwise_top\nI0821 08:59:38.985376 32364 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0821 08:59:38.985396 32364 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0821 08:59:38.985486 32364 net.cpp:150] Setting up L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:38.985505 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.985518 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.985528 32364 net.cpp:165] Memory required for data: 1906689200\nI0821 08:59:38.985538 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_conv\nI0821 08:59:38.985559 32364 net.cpp:100] Creating Layer L2_b17_cbr1_conv\nI0821 08:59:38.985572 32364 net.cpp:434] L2_b17_cbr1_conv <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0821 08:59:38.985595 32364 net.cpp:408] L2_b17_cbr1_conv -> L2_b17_cbr1_conv_top\nI0821 08:59:38.986114 32364 net.cpp:150] Setting up L2_b17_cbr1_conv\nI0821 08:59:38.986133 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.986142 32364 net.cpp:165] Memory required for data: 1909966000\nI0821 08:59:38.986168 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_bn\nI0821 08:59:38.986186 32364 net.cpp:100] Creating Layer L2_b17_cbr1_bn\nI0821 08:59:38.986198 32364 net.cpp:434] L2_b17_cbr1_bn <- L2_b17_cbr1_conv_top\nI0821 08:59:38.986219 32364 net.cpp:408] L2_b17_cbr1_bn -> L2_b17_cbr1_bn_top\nI0821 08:59:38.986508 32364 net.cpp:150] Setting up L2_b17_cbr1_bn\nI0821 08:59:38.986527 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.986536 32364 net.cpp:165] Memory required for data: 1913242800\nI0821 08:59:38.986557 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0821 08:59:38.986578 32364 net.cpp:100] Creating Layer L2_b17_cbr1_scale\nI0821 08:59:38.986598 32364 net.cpp:434] L2_b17_cbr1_scale <- L2_b17_cbr1_bn_top\nI0821 08:59:38.986616 32364 net.cpp:395] L2_b17_cbr1_scale -> L2_b17_cbr1_bn_top (in-place)\nI0821 08:59:38.986716 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0821 08:59:38.986905 32364 net.cpp:150] Setting up L2_b17_cbr1_scale\nI0821 08:59:38.986924 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.986933 32364 net.cpp:165] Memory required for data: 1916519600\nI0821 08:59:38.986951 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_relu\nI0821 08:59:38.986971 32364 net.cpp:100] Creating Layer L2_b17_cbr1_relu\nI0821 08:59:38.986984 32364 net.cpp:434] L2_b17_cbr1_relu <- L2_b17_cbr1_bn_top\nI0821 08:59:38.986999 32364 net.cpp:395] L2_b17_cbr1_relu -> L2_b17_cbr1_bn_top (in-place)\nI0821 08:59:38.987020 32364 net.cpp:150] Setting up L2_b17_cbr1_relu\nI0821 08:59:38.987033 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.987043 32364 net.cpp:165] Memory required for data: 1919796400\nI0821 08:59:38.987053 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr2_conv\nI0821 08:59:38.987078 32364 net.cpp:100] Creating Layer L2_b17_cbr2_conv\nI0821 08:59:38.987092 32364 net.cpp:434] L2_b17_cbr2_conv <- L2_b17_cbr1_bn_top\nI0821 08:59:38.987112 32364 net.cpp:408] L2_b17_cbr2_conv -> L2_b17_cbr2_conv_top\nI0821 08:59:38.987630 32364 net.cpp:150] Setting up L2_b17_cbr2_conv\nI0821 08:59:38.987649 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.987659 32364 net.cpp:165] Memory required for data: 1923073200\nI0821 08:59:38.987678 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr2_bn\nI0821 08:59:38.987694 32364 net.cpp:100] Creating Layer L2_b17_cbr2_bn\nI0821 08:59:38.987706 32364 net.cpp:434] L2_b17_cbr2_bn <- L2_b17_cbr2_conv_top\nI0821 08:59:38.987730 32364 net.cpp:408] L2_b17_cbr2_bn -> L2_b17_cbr2_bn_top\nI0821 08:59:38.988031 32364 net.cpp:150] Setting up L2_b17_cbr2_bn\nI0821 08:59:38.988051 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.988060 32364 net.cpp:165] Memory required for data: 1926350000\nI0821 08:59:38.988083 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0821 08:59:38.988104 32364 net.cpp:100] Creating Layer L2_b17_cbr2_scale\nI0821 08:59:38.988117 32364 net.cpp:434] L2_b17_cbr2_scale <- L2_b17_cbr2_bn_top\nI0821 08:59:38.988133 32364 net.cpp:395] L2_b17_cbr2_scale -> L2_b17_cbr2_bn_top (in-place)\nI0821 08:59:38.988229 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0821 08:59:38.988420 32364 net.cpp:150] Setting up L2_b17_cbr2_scale\nI0821 08:59:38.988438 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.988447 32364 net.cpp:165] Memory required for data: 1929626800\nI0821 08:59:38.988466 32364 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise\nI0821 08:59:38.988487 32364 net.cpp:100] Creating Layer L2_b17_sum_eltwise\nI0821 08:59:38.988499 32364 net.cpp:434] L2_b17_sum_eltwise <- L2_b17_cbr2_bn_top\nI0821 08:59:38.988513 32364 net.cpp:434] L2_b17_sum_eltwise <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0821 08:59:38.988530 32364 net.cpp:408] L2_b17_sum_eltwise -> L2_b17_sum_eltwise_top\nI0821 08:59:38.988584 32364 net.cpp:150] Setting up L2_b17_sum_eltwise\nI0821 08:59:38.988601 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.988611 32364 net.cpp:165] Memory required for data: 1932903600\nI0821 08:59:38.988621 32364 layer_factory.hpp:77] Creating layer L2_b17_relu\nI0821 08:59:38.988636 32364 net.cpp:100] Creating Layer L2_b17_relu\nI0821 08:59:38.988648 32364 net.cpp:434] L2_b17_relu <- L2_b17_sum_eltwise_top\nI0821 08:59:38.988662 32364 net.cpp:395] L2_b17_relu -> L2_b17_sum_eltwise_top (in-place)\nI0821 08:59:38.988680 32364 net.cpp:150] Setting up L2_b17_relu\nI0821 08:59:38.988695 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.988704 32364 net.cpp:165] Memory required for data: 1936180400\nI0821 08:59:38.988714 32364 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:38.988732 32364 net.cpp:100] Creating Layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:38.988754 32364 net.cpp:434] L2_b17_sum_eltwise_top_L2_b17_relu_0_split <- L2_b17_sum_eltwise_top\nI0821 08:59:38.988770 32364 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0821 08:59:38.988790 32364 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0821 08:59:38.988878 32364 net.cpp:150] Setting up L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:38.988896 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.988909 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.988919 32364 net.cpp:165] Memory required for data: 1942734000\nI0821 08:59:38.988929 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_conv\nI0821 08:59:38.988948 32364 net.cpp:100] Creating Layer L2_b18_cbr1_conv\nI0821 08:59:38.988961 32364 net.cpp:434] L2_b18_cbr1_conv <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0821 08:59:38.988983 32364 net.cpp:408] L2_b18_cbr1_conv -> L2_b18_cbr1_conv_top\nI0821 08:59:38.989542 32364 net.cpp:150] Setting up L2_b18_cbr1_conv\nI0821 08:59:38.989569 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.989580 32364 net.cpp:165] Memory required for data: 1946010800\nI0821 08:59:38.989598 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_bn\nI0821 08:59:38.989615 32364 net.cpp:100] Creating Layer L2_b18_cbr1_bn\nI0821 08:59:38.989627 32364 net.cpp:434] L2_b18_cbr1_bn <- L2_b18_cbr1_conv_top\nI0821 08:59:38.989648 32364 net.cpp:408] L2_b18_cbr1_bn -> L2_b18_cbr1_bn_top\nI0821 08:59:38.989961 32364 net.cpp:150] Setting up L2_b18_cbr1_bn\nI0821 08:59:38.989980 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.989990 32364 net.cpp:165] Memory required for data: 1949287600\nI0821 08:59:38.990011 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0821 08:59:38.990028 32364 net.cpp:100] Creating Layer L2_b18_cbr1_scale\nI0821 08:59:38.990041 32364 net.cpp:434] L2_b18_cbr1_scale <- L2_b18_cbr1_bn_top\nI0821 08:59:38.990063 32364 net.cpp:395] L2_b18_cbr1_scale -> L2_b18_cbr1_bn_top (in-place)\nI0821 08:59:38.990157 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0821 08:59:38.990352 32364 net.cpp:150] Setting up L2_b18_cbr1_scale\nI0821 08:59:38.990370 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.990381 32364 net.cpp:165] Memory required for data: 1952564400\nI0821 08:59:38.990398 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_relu\nI0821 08:59:38.990413 32364 net.cpp:100] Creating Layer L2_b18_cbr1_relu\nI0821 08:59:38.990424 32364 net.cpp:434] L2_b18_cbr1_relu <- L2_b18_cbr1_bn_top\nI0821 08:59:38.990443 32364 net.cpp:395] L2_b18_cbr1_relu -> L2_b18_cbr1_bn_top (in-place)\nI0821 08:59:38.990464 32364 net.cpp:150] Setting up L2_b18_cbr1_relu\nI0821 08:59:38.990478 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.990489 32364 net.cpp:165] Memory required for data: 1955841200\nI0821 08:59:38.990499 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr2_conv\nI0821 08:59:38.990523 32364 net.cpp:100] Creating Layer L2_b18_cbr2_conv\nI0821 08:59:38.990537 32364 net.cpp:434] L2_b18_cbr2_conv <- L2_b18_cbr1_bn_top\nI0821 08:59:38.990555 32364 net.cpp:408] L2_b18_cbr2_conv -> L2_b18_cbr2_conv_top\nI0821 08:59:38.991078 32364 net.cpp:150] Setting up L2_b18_cbr2_conv\nI0821 08:59:38.991097 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.991107 32364 net.cpp:165] Memory required for data: 1959118000\nI0821 08:59:38.991125 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr2_bn\nI0821 08:59:38.991153 32364 net.cpp:100] Creating Layer L2_b18_cbr2_bn\nI0821 08:59:38.991166 32364 net.cpp:434] L2_b18_cbr2_bn <- L2_b18_cbr2_conv_top\nI0821 08:59:38.991184 32364 net.cpp:408] L2_b18_cbr2_bn -> L2_b18_cbr2_bn_top\nI0821 08:59:38.991478 32364 net.cpp:150] Setting up L2_b18_cbr2_bn\nI0821 08:59:38.991498 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.991506 32364 net.cpp:165] Memory required for data: 1962394800\nI0821 08:59:38.991526 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0821 08:59:38.991552 32364 net.cpp:100] Creating Layer L2_b18_cbr2_scale\nI0821 08:59:38.991565 32364 net.cpp:434] L2_b18_cbr2_scale <- L2_b18_cbr2_bn_top\nI0821 08:59:38.991580 32364 net.cpp:395] L2_b18_cbr2_scale -> L2_b18_cbr2_bn_top (in-place)\nI0821 08:59:38.991686 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0821 08:59:38.991879 32364 net.cpp:150] Setting up L2_b18_cbr2_scale\nI0821 08:59:38.991901 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.991910 32364 net.cpp:165] Memory required for data: 1965671600\nI0821 08:59:38.991930 32364 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise\nI0821 08:59:38.991946 32364 net.cpp:100] Creating Layer L2_b18_sum_eltwise\nI0821 08:59:38.991958 32364 net.cpp:434] L2_b18_sum_eltwise <- L2_b18_cbr2_bn_top\nI0821 08:59:38.991971 32364 net.cpp:434] L2_b18_sum_eltwise <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0821 08:59:38.991992 32364 net.cpp:408] L2_b18_sum_eltwise -> L2_b18_sum_eltwise_top\nI0821 08:59:38.992040 32364 net.cpp:150] Setting up L2_b18_sum_eltwise\nI0821 08:59:38.992058 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.992069 32364 net.cpp:165] Memory required for data: 1968948400\nI0821 08:59:38.992079 32364 layer_factory.hpp:77] Creating layer L2_b18_relu\nI0821 08:59:38.992092 32364 net.cpp:100] Creating Layer L2_b18_relu\nI0821 08:59:38.992105 32364 net.cpp:434] L2_b18_relu <- L2_b18_sum_eltwise_top\nI0821 08:59:38.992123 32364 net.cpp:395] L2_b18_relu -> L2_b18_sum_eltwise_top (in-place)\nI0821 08:59:38.992142 32364 net.cpp:150] Setting up L2_b18_relu\nI0821 08:59:38.992167 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.992177 32364 net.cpp:165] Memory required for data: 1972225200\nI0821 08:59:38.992187 32364 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:38.992200 32364 net.cpp:100] Creating Layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:38.992213 32364 net.cpp:434] L2_b18_sum_eltwise_top_L2_b18_relu_0_split <- L2_b18_sum_eltwise_top\nI0821 08:59:38.992231 32364 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0821 08:59:38.992254 32364 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0821 08:59:38.992336 32364 net.cpp:150] Setting up L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:38.992355 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.992368 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:38.992378 32364 net.cpp:165] Memory required for data: 1978778800\nI0821 08:59:38.992389 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:59:38.992419 32364 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:59:38.992434 32364 net.cpp:434] L3_b1_cbr1_conv <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0821 08:59:38.992451 32364 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:59:38.992980 32364 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:59:38.993000 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.993008 32364 net.cpp:165] Memory required for data: 1979598000\nI0821 08:59:38.993119 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:59:38.993139 32364 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:59:38.993160 32364 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:59:38.993178 32364 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:59:38.993502 32364 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:59:38.993521 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.993531 32364 net.cpp:165] Memory required for data: 1980417200\nI0821 08:59:38.993553 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:59:38.993574 32364 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:59:38.993587 32364 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:59:38.993603 32364 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:59:38.993712 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:59:38.993913 32364 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:59:38.993933 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.993942 32364 net.cpp:165] Memory required for data: 1981236400\nI0821 08:59:38.993962 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:59:38.993980 32364 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:59:38.993993 32364 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:59:38.994011 32364 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:59:38.994032 32364 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:59:38.994047 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.994056 32364 net.cpp:165] Memory required for data: 1982055600\nI0821 08:59:38.994067 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:59:38.994087 32364 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:59:38.994101 32364 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:59:38.994122 32364 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:59:38.994658 32364 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:59:38.994678 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.994688 32364 net.cpp:165] Memory required for data: 1982874800\nI0821 08:59:38.994705 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:59:38.994726 32364 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:59:38.994740 32364 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:59:38.994756 32364 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:59:38.995066 32364 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:59:38.995085 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.995095 32364 net.cpp:165] Memory required for data: 1983694000\nI0821 08:59:38.995115 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:59:38.995132 32364 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:59:38.995143 32364 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:59:38.995167 32364 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:59:38.995262 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:59:38.995458 32364 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:59:38.995481 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.995491 32364 net.cpp:165] Memory required for data: 1984513200\nI0821 08:59:38.995509 32364 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:59:38.995527 32364 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:59:38.995538 32364 net.cpp:434] L3_b1_pool <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0821 08:59:38.995555 32364 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:59:38.995618 32364 net.cpp:150] Setting up L3_b1_pool\nI0821 08:59:38.995638 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.995647 32364 net.cpp:165] Memory required for data: 1985332400\nI0821 08:59:38.995658 32364 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:59:38.995673 32364 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:59:38.995685 32364 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:59:38.995698 32364 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:59:38.995714 32364 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:59:38.995776 32364 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:59:38.995795 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.995805 32364 net.cpp:165] Memory required for data: 1986151600\nI0821 08:59:38.995815 32364 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:59:38.995828 32364 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:59:38.995841 32364 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:59:38.995854 32364 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:59:38.995873 32364 net.cpp:150] Setting up L3_b1_relu\nI0821 08:59:38.995888 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.995906 32364 net.cpp:165] Memory required for data: 1986970800\nI0821 08:59:38.995918 32364 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:59:38.995934 32364 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:59:38.995956 32364 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:59:38.997201 32364 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:59:38.997225 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:38.997234 32364 net.cpp:165] Memory required for data: 1987790000\nI0821 08:59:38.997246 32364 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:59:38.997267 32364 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:59:38.997279 32364 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:59:38.997292 32364 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:59:38.997308 32364 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:59:38.997375 32364 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:59:38.997395 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:38.997404 32364 net.cpp:165] Memory required for data: 1989428400\nI0821 08:59:38.997416 32364 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:38.997429 32364 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:38.997442 32364 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:59:38.997462 32364 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:59:38.997483 32364 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:59:38.997573 32364 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:38.997596 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:38.997608 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:38.997618 32364 net.cpp:165] Memory required for data: 1992705200\nI0821 08:59:38.997628 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:59:38.997653 32364 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:59:38.997665 32364 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:59:38.997684 32364 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:59:38.999734 32364 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:59:38.999760 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:38.999771 32364 net.cpp:165] Memory required for data: 1994343600\nI0821 08:59:38.999790 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:59:38.999807 32364 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:59:38.999819 32364 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:59:38.999835 32364 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:59:39.000157 32364 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:59:39.000177 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.000187 32364 net.cpp:165] Memory required for data: 1995982000\nI0821 08:59:39.000210 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:59:39.000231 32364 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:59:39.000243 32364 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:59:39.000263 32364 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:59:39.000356 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:59:39.000560 32364 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:59:39.000579 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.000588 32364 net.cpp:165] Memory required for data: 1997620400\nI0821 08:59:39.000607 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:59:39.000622 32364 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:59:39.000634 32364 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:59:39.000653 32364 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:59:39.000674 32364 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:59:39.000689 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.000699 32364 net.cpp:165] Memory required for data: 1999258800\nI0821 08:59:39.000717 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:59:39.000744 32364 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:59:39.000758 32364 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:59:39.000777 32364 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:59:39.001844 32364 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:59:39.001864 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.001874 32364 net.cpp:165] Memory required for data: 2000897200\nI0821 08:59:39.001893 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:59:39.001915 32364 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:59:39.001929 32364 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:59:39.001945 32364 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:59:39.002265 32364 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:59:39.002285 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.002295 32364 net.cpp:165] Memory required for data: 2002535600\nI0821 08:59:39.002315 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:59:39.002332 32364 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:59:39.002344 32364 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:59:39.002360 32364 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:59:39.002459 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:59:39.002657 32364 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:59:39.002676 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.002686 32364 net.cpp:165] Memory required for data: 2004174000\nI0821 08:59:39.002704 32364 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:59:39.002722 32364 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:59:39.002733 32364 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:59:39.002748 32364 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:59:39.002768 32364 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:59:39.002825 32364 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:59:39.002848 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.002858 32364 net.cpp:165] Memory required for data: 2005812400\nI0821 08:59:39.002869 32364 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:59:39.002883 32364 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:59:39.002897 32364 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:59:39.002910 32364 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:59:39.002929 32364 net.cpp:150] Setting up L3_b2_relu\nI0821 08:59:39.002944 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.002954 32364 net.cpp:165] Memory required for data: 2007450800\nI0821 08:59:39.002962 32364 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:39.002981 32364 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:39.002993 32364 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:59:39.003008 32364 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:59:39.003029 32364 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:59:39.003111 32364 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:39.003130 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.003144 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.003160 32364 net.cpp:165] Memory required for data: 2010727600\nI0821 08:59:39.003170 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:59:39.003191 32364 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:59:39.003204 32364 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:59:39.003229 32364 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:59:39.004326 32364 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:59:39.004348 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.004357 32364 net.cpp:165] Memory required for data: 2012366000\nI0821 08:59:39.004376 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:59:39.004393 32364 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:59:39.004405 32364 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:59:39.004427 32364 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:59:39.004734 32364 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:59:39.004758 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.004770 32364 net.cpp:165] Memory required for data: 2014004400\nI0821 08:59:39.004791 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:59:39.004807 32364 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:59:39.004820 32364 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:59:39.004835 32364 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:59:39.004928 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:59:39.005126 32364 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:59:39.005151 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.005162 32364 net.cpp:165] Memory required for data: 2015642800\nI0821 08:59:39.005180 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:59:39.005197 32364 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:59:39.005208 32364 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:59:39.005228 32364 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:59:39.005249 32364 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:59:39.005262 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.005272 32364 net.cpp:165] Memory required for data: 2017281200\nI0821 08:59:39.005282 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:59:39.005307 32364 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:59:39.005321 32364 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:59:39.005338 32364 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:59:39.006476 32364 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:59:39.006497 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.006507 32364 net.cpp:165] Memory required for data: 2018919600\nI0821 08:59:39.006526 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:59:39.006548 32364 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:59:39.006561 32364 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:59:39.006587 32364 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:59:39.006886 32364 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:59:39.006904 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.006914 32364 net.cpp:165] Memory required for data: 2020558000\nI0821 08:59:39.006935 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:59:39.006953 32364 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:59:39.006964 32364 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:59:39.006980 32364 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:59:39.007082 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:59:39.007287 32364 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:59:39.007305 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.007314 32364 net.cpp:165] Memory required for data: 2022196400\nI0821 08:59:39.007333 32364 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:59:39.007349 32364 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:59:39.007361 32364 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:59:39.007375 32364 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:59:39.007396 32364 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:59:39.007455 32364 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:59:39.007478 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.007498 32364 net.cpp:165] Memory required for data: 2023834800\nI0821 08:59:39.007509 32364 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:59:39.007524 32364 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:59:39.007536 32364 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:59:39.007551 32364 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:59:39.007570 32364 net.cpp:150] Setting up L3_b3_relu\nI0821 08:59:39.007586 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.007596 32364 net.cpp:165] Memory required for data: 2025473200\nI0821 08:59:39.007606 32364 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:39.007624 32364 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:39.007637 32364 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:59:39.007652 32364 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:59:39.007671 32364 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:59:39.007760 32364 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:39.007777 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.007791 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.007800 32364 net.cpp:165] Memory required for data: 2028750000\nI0821 08:59:39.007810 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:59:39.007829 32364 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:59:39.007843 32364 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:59:39.007866 32364 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:59:39.008942 32364 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:59:39.008963 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.008973 32364 net.cpp:165] Memory required for data: 2030388400\nI0821 08:59:39.008996 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:59:39.009014 32364 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:59:39.009027 32364 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:59:39.009047 32364 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:59:39.009358 32364 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:59:39.009380 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.009392 32364 net.cpp:165] Memory required for data: 2032026800\nI0821 08:59:39.009413 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:59:39.009428 32364 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:59:39.009440 32364 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:59:39.009455 32364 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:59:39.009549 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:59:39.009747 32364 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:59:39.009766 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.009775 32364 net.cpp:165] Memory required for data: 2033665200\nI0821 08:59:39.009795 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:59:39.009814 32364 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:59:39.009826 32364 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:59:39.009842 32364 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:59:39.009861 32364 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:59:39.009876 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.009886 32364 net.cpp:165] Memory required for data: 2035303600\nI0821 08:59:39.009896 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:59:39.009922 32364 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:59:39.009934 32364 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:59:39.009953 32364 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:59:39.011039 32364 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:59:39.011065 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.011076 32364 net.cpp:165] Memory required for data: 2036942000\nI0821 08:59:39.011095 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:59:39.011116 32364 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:59:39.011129 32364 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:59:39.011157 32364 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:59:39.011478 32364 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:59:39.011497 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.011507 32364 net.cpp:165] Memory required for data: 2038580400\nI0821 08:59:39.011528 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:59:39.011544 32364 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:59:39.011556 32364 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:59:39.011577 32364 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:59:39.011672 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:59:39.011870 32364 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:59:39.011889 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.011898 32364 net.cpp:165] Memory required for data: 2040218800\nI0821 08:59:39.011916 32364 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:59:39.011934 32364 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:59:39.011946 32364 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:59:39.011958 32364 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:59:39.011981 32364 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:59:39.012042 32364 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:59:39.012061 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.012071 32364 net.cpp:165] Memory required for data: 2041857200\nI0821 08:59:39.012081 32364 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:59:39.012096 32364 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:59:39.012107 32364 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:59:39.012126 32364 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:59:39.012153 32364 net.cpp:150] Setting up L3_b4_relu\nI0821 08:59:39.012168 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.012178 32364 net.cpp:165] Memory required for data: 2043495600\nI0821 08:59:39.012189 32364 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:39.012203 32364 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:39.012215 32364 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:59:39.012230 32364 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:59:39.012251 32364 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:59:39.012341 32364 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:39.012358 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.012372 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.012382 32364 net.cpp:165] Memory required for data: 2046772400\nI0821 08:59:39.012392 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:59:39.012411 32364 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:59:39.012424 32364 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:59:39.012447 32364 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:59:39.013525 32364 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:59:39.013545 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.013555 32364 net.cpp:165] Memory required for data: 2048410800\nI0821 08:59:39.013572 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:59:39.013589 32364 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:59:39.013610 32364 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:59:39.013633 32364 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:59:39.013957 32364 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:59:39.013979 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.013990 32364 net.cpp:165] Memory required for data: 2050049200\nI0821 08:59:39.014014 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:59:39.014029 32364 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:59:39.014042 32364 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:59:39.014058 32364 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:59:39.014155 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:59:39.014355 32364 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:59:39.014374 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.014384 32364 net.cpp:165] Memory required for data: 2051687600\nI0821 08:59:39.014402 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:59:39.014425 32364 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:59:39.014437 32364 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:59:39.014453 32364 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:59:39.014472 32364 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:59:39.014487 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.014497 32364 net.cpp:165] Memory required for data: 2053326000\nI0821 08:59:39.014508 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:59:39.014531 32364 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:59:39.014545 32364 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:59:39.014562 32364 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:59:39.016628 32364 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:59:39.016652 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.016662 32364 net.cpp:165] Memory required for data: 2054964400\nI0821 08:59:39.016681 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:59:39.016700 32364 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:59:39.016711 32364 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:59:39.016733 32364 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:59:39.017042 32364 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:59:39.017066 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.017076 32364 net.cpp:165] Memory required for data: 2056602800\nI0821 08:59:39.017098 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:59:39.017115 32364 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:59:39.017127 32364 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:59:39.017143 32364 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:59:39.017244 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:59:39.017441 32364 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:59:39.017462 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.017472 32364 net.cpp:165] Memory required for data: 2058241200\nI0821 08:59:39.017490 32364 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:59:39.017513 32364 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:59:39.017525 32364 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:59:39.017539 32364 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:59:39.017555 32364 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:59:39.017616 32364 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:59:39.017633 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.017643 32364 net.cpp:165] Memory required for data: 2059879600\nI0821 08:59:39.017654 32364 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:59:39.017668 32364 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:59:39.017681 32364 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:59:39.017695 32364 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:59:39.017724 32364 net.cpp:150] Setting up L3_b5_relu\nI0821 08:59:39.017740 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.017750 32364 net.cpp:165] Memory required for data: 2061518000\nI0821 08:59:39.017760 32364 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:39.017773 32364 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:39.017784 32364 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:59:39.017804 32364 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:59:39.017825 32364 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:59:39.017911 32364 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:39.017931 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.017946 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.017954 32364 net.cpp:165] Memory required for data: 2064794800\nI0821 08:59:39.017966 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:59:39.017992 32364 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:59:39.018007 32364 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:59:39.018026 32364 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:59:39.019106 32364 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:59:39.019127 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.019137 32364 net.cpp:165] Memory required for data: 2066433200\nI0821 08:59:39.019162 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:59:39.019186 32364 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:59:39.019198 32364 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:59:39.019215 32364 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:59:39.019521 32364 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:59:39.019539 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.019549 32364 net.cpp:165] Memory required for data: 2068071600\nI0821 08:59:39.019570 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:59:39.019593 32364 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:59:39.019605 32364 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:59:39.019621 32364 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:59:39.019718 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:59:39.019917 32364 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:59:39.019937 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.019945 32364 net.cpp:165] Memory required for data: 2069710000\nI0821 08:59:39.019964 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:59:39.019984 32364 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:59:39.019996 32364 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:59:39.020011 32364 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:59:39.020031 32364 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:59:39.020046 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.020056 32364 net.cpp:165] Memory required for data: 2071348400\nI0821 08:59:39.020066 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:59:39.020090 32364 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:59:39.020104 32364 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:59:39.020125 32364 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:59:39.021198 32364 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:59:39.021217 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.021227 32364 net.cpp:165] Memory required for data: 2072986800\nI0821 08:59:39.021245 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:59:39.021263 32364 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:59:39.021275 32364 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:59:39.021304 32364 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:59:39.021625 32364 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:59:39.021647 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.021658 32364 net.cpp:165] Memory required for data: 2074625200\nI0821 08:59:39.021680 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:59:39.021697 32364 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:59:39.021709 32364 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:59:39.021725 32364 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:59:39.021817 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:59:39.022014 32364 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:59:39.022033 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.022042 32364 net.cpp:165] Memory required for data: 2076263600\nI0821 08:59:39.022060 32364 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:59:39.022081 32364 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:59:39.022094 32364 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:59:39.022106 32364 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:59:39.022122 32364 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:59:39.022191 32364 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:59:39.022209 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.022219 32364 net.cpp:165] Memory required for data: 2077902000\nI0821 08:59:39.022229 32364 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:59:39.022244 32364 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:59:39.022256 32364 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:59:39.022270 32364 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:59:39.022290 32364 net.cpp:150] Setting up L3_b6_relu\nI0821 08:59:39.022305 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.022313 32364 net.cpp:165] Memory required for data: 2079540400\nI0821 08:59:39.022323 32364 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:39.022338 32364 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:39.022349 32364 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:59:39.022368 32364 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:59:39.022389 32364 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:59:39.022472 32364 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:39.022493 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.022506 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.022516 32364 net.cpp:165] Memory required for data: 2082817200\nI0821 08:59:39.022526 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:59:39.022552 32364 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:59:39.022565 32364 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:59:39.022583 32364 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:59:39.023659 32364 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:59:39.023679 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.023689 32364 net.cpp:165] Memory required for data: 2084455600\nI0821 08:59:39.023707 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:59:39.023728 32364 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:59:39.023741 32364 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:59:39.023758 32364 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:59:39.024068 32364 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:59:39.024087 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.024096 32364 net.cpp:165] Memory required for data: 2086094000\nI0821 08:59:39.024127 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:59:39.024155 32364 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:59:39.024169 32364 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:59:39.024186 32364 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:59:39.024288 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:59:39.024488 32364 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:59:39.024507 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.024516 32364 net.cpp:165] Memory required for data: 2087732400\nI0821 08:59:39.024534 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:59:39.024554 32364 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:59:39.024566 32364 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:59:39.024580 32364 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:59:39.024600 32364 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:59:39.024619 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.024629 32364 net.cpp:165] Memory required for data: 2089370800\nI0821 08:59:39.024639 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:59:39.024659 32364 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:59:39.024672 32364 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:59:39.024694 32364 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:59:39.025773 32364 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:59:39.025792 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.025802 32364 net.cpp:165] Memory required for data: 2091009200\nI0821 08:59:39.025820 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:59:39.025837 32364 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:59:39.025849 32364 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:59:39.025869 32364 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:59:39.026196 32364 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:59:39.026216 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.026226 32364 net.cpp:165] Memory required for data: 2092647600\nI0821 08:59:39.026247 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:59:39.026263 32364 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:59:39.026275 32364 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:59:39.026291 32364 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:59:39.026389 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:59:39.026588 32364 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:59:39.026610 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.026620 32364 net.cpp:165] Memory required for data: 2094286000\nI0821 08:59:39.026638 32364 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:59:39.026656 32364 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:59:39.026669 32364 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:59:39.026681 32364 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:59:39.026697 32364 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:59:39.026757 32364 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:59:39.026777 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.026787 32364 net.cpp:165] Memory required for data: 2095924400\nI0821 08:59:39.026795 32364 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:59:39.026810 32364 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:59:39.026823 32364 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:59:39.026837 32364 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:59:39.026856 32364 net.cpp:150] Setting up L3_b7_relu\nI0821 08:59:39.026871 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.026880 32364 net.cpp:165] Memory required for data: 2097562800\nI0821 08:59:39.026890 32364 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:39.026913 32364 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:39.026926 32364 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:59:39.026947 32364 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:59:39.026970 32364 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:59:39.027055 32364 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:39.027078 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.027093 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.027102 32364 net.cpp:165] Memory required for data: 2100839600\nI0821 08:59:39.027113 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:59:39.027133 32364 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:59:39.027153 32364 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:59:39.027174 32364 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:59:39.028246 32364 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:59:39.028266 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.028276 32364 net.cpp:165] Memory required for data: 2102478000\nI0821 08:59:39.028295 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:59:39.028317 32364 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:59:39.028329 32364 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:59:39.028347 32364 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:59:39.028658 32364 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:59:39.028678 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.028687 32364 net.cpp:165] Memory required for data: 2104116400\nI0821 08:59:39.028709 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:59:39.028733 32364 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:59:39.028745 32364 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:59:39.028762 32364 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:59:39.028857 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:59:39.029058 32364 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:59:39.029078 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.029086 32364 net.cpp:165] Memory required for data: 2105754800\nI0821 08:59:39.029105 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:59:39.029125 32364 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:59:39.029137 32364 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:59:39.029165 32364 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:59:39.029186 32364 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:59:39.029201 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.029211 32364 net.cpp:165] Memory required for data: 2107393200\nI0821 08:59:39.029222 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:59:39.029242 32364 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:59:39.029255 32364 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:59:39.029278 32364 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:59:39.030350 32364 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:59:39.030370 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.030380 32364 net.cpp:165] Memory required for data: 2109031600\nI0821 08:59:39.030397 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:59:39.030423 32364 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:59:39.030437 32364 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:59:39.030454 32364 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:59:39.030763 32364 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:59:39.030783 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.030793 32364 net.cpp:165] Memory required for data: 2110670000\nI0821 08:59:39.030814 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:59:39.030840 32364 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:59:39.030854 32364 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:59:39.030870 32364 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:59:39.030977 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:59:39.031188 32364 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:59:39.031211 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.031221 32364 net.cpp:165] Memory required for data: 2112308400\nI0821 08:59:39.031240 32364 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:59:39.031257 32364 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:59:39.031270 32364 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:59:39.031282 32364 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:59:39.031298 32364 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:59:39.031360 32364 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:59:39.031379 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.031389 32364 net.cpp:165] Memory required for data: 2113946800\nI0821 08:59:39.031399 32364 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:59:39.031414 32364 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:59:39.031425 32364 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:59:39.031440 32364 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:59:39.031461 32364 net.cpp:150] Setting up L3_b8_relu\nI0821 08:59:39.031473 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.031482 32364 net.cpp:165] Memory required for data: 2115585200\nI0821 08:59:39.031492 32364 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:39.031510 32364 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:39.031522 32364 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:59:39.031538 32364 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:59:39.031558 32364 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:59:39.031642 32364 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:39.031668 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.031682 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.031692 32364 net.cpp:165] Memory required for data: 2118862000\nI0821 08:59:39.031702 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:59:39.031721 32364 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:59:39.031734 32364 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:59:39.031752 32364 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:59:39.033888 32364 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:59:39.033911 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.033922 32364 net.cpp:165] Memory required for data: 2120500400\nI0821 08:59:39.033941 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:59:39.033962 32364 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:59:39.033977 32364 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:59:39.033998 32364 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:59:39.034319 32364 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:59:39.034338 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.034348 32364 net.cpp:165] Memory required for data: 2122138800\nI0821 08:59:39.034368 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:59:39.034386 32364 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:59:39.034397 32364 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:59:39.034412 32364 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:59:39.034523 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:59:39.034740 32364 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:59:39.034759 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.034770 32364 net.cpp:165] Memory required for data: 2123777200\nI0821 08:59:39.034788 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:59:39.034803 32364 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:59:39.034816 32364 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:59:39.034835 32364 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:59:39.034857 32364 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:59:39.034870 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.034880 32364 net.cpp:165] Memory required for data: 2125415600\nI0821 08:59:39.034891 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:59:39.034916 32364 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:59:39.034929 32364 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:59:39.034947 32364 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:59:39.036031 32364 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:59:39.036052 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.036062 32364 net.cpp:165] Memory required for data: 2127054000\nI0821 08:59:39.036079 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:59:39.036101 32364 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:59:39.036114 32364 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:59:39.036131 32364 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:59:39.036447 32364 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:59:39.036466 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.036475 32364 net.cpp:165] Memory required for data: 2128692400\nI0821 08:59:39.036499 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:59:39.036520 32364 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:59:39.036532 32364 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:59:39.036547 32364 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:59:39.036644 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:59:39.036844 32364 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:59:39.036862 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.036871 32364 net.cpp:165] Memory required for data: 2130330800\nI0821 08:59:39.036890 32364 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:59:39.036911 32364 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:59:39.036924 32364 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:59:39.036937 32364 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:59:39.036958 32364 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:59:39.037017 32364 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:59:39.037035 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.037045 32364 net.cpp:165] Memory required for data: 2131969200\nI0821 08:59:39.037055 32364 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:59:39.037075 32364 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:59:39.037089 32364 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:59:39.037103 32364 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:59:39.037123 32364 net.cpp:150] Setting up L3_b9_relu\nI0821 08:59:39.037137 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.037154 32364 net.cpp:165] Memory required for data: 2133607600\nI0821 08:59:39.037166 32364 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:39.037180 32364 net.cpp:100] Creating Layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:39.037191 32364 net.cpp:434] L3_b9_sum_eltwise_top_L3_b9_relu_0_split <- L3_b9_sum_eltwise_top\nI0821 08:59:39.037207 32364 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0821 08:59:39.037228 32364 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0821 08:59:39.037328 32364 net.cpp:150] Setting up L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:39.037348 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.037361 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.037371 32364 net.cpp:165] Memory required for data: 2136884400\nI0821 08:59:39.037381 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_conv\nI0821 08:59:39.037412 32364 net.cpp:100] Creating Layer L3_b10_cbr1_conv\nI0821 08:59:39.037426 32364 net.cpp:434] L3_b10_cbr1_conv <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0821 08:59:39.037446 32364 net.cpp:408] L3_b10_cbr1_conv -> L3_b10_cbr1_conv_top\nI0821 08:59:39.038507 32364 net.cpp:150] Setting up L3_b10_cbr1_conv\nI0821 08:59:39.038527 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.038537 32364 net.cpp:165] Memory required for data: 2138522800\nI0821 08:59:39.038555 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_bn\nI0821 08:59:39.038580 32364 net.cpp:100] Creating Layer L3_b10_cbr1_bn\nI0821 08:59:39.038594 32364 net.cpp:434] L3_b10_cbr1_bn <- L3_b10_cbr1_conv_top\nI0821 08:59:39.038615 32364 net.cpp:408] L3_b10_cbr1_bn -> L3_b10_cbr1_bn_top\nI0821 08:59:39.039922 32364 net.cpp:150] Setting up L3_b10_cbr1_bn\nI0821 08:59:39.039944 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.039954 32364 net.cpp:165] Memory required for data: 2140161200\nI0821 08:59:39.039975 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0821 08:59:39.039997 32364 net.cpp:100] Creating Layer L3_b10_cbr1_scale\nI0821 08:59:39.040009 32364 net.cpp:434] L3_b10_cbr1_scale <- L3_b10_cbr1_bn_top\nI0821 08:59:39.040026 32364 net.cpp:395] L3_b10_cbr1_scale -> L3_b10_cbr1_bn_top (in-place)\nI0821 08:59:39.040129 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0821 08:59:39.040334 32364 net.cpp:150] Setting up L3_b10_cbr1_scale\nI0821 08:59:39.040354 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.040364 32364 net.cpp:165] Memory required for data: 2141799600\nI0821 08:59:39.040382 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_relu\nI0821 08:59:39.040402 32364 net.cpp:100] Creating Layer L3_b10_cbr1_relu\nI0821 08:59:39.040416 32364 net.cpp:434] L3_b10_cbr1_relu <- L3_b10_cbr1_bn_top\nI0821 08:59:39.040436 32364 net.cpp:395] L3_b10_cbr1_relu -> L3_b10_cbr1_bn_top (in-place)\nI0821 08:59:39.040455 32364 net.cpp:150] Setting up L3_b10_cbr1_relu\nI0821 08:59:39.040472 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.040480 32364 net.cpp:165] Memory required for data: 2143438000\nI0821 08:59:39.040490 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr2_conv\nI0821 08:59:39.040511 32364 net.cpp:100] Creating Layer L3_b10_cbr2_conv\nI0821 08:59:39.040524 32364 net.cpp:434] L3_b10_cbr2_conv <- L3_b10_cbr1_bn_top\nI0821 08:59:39.040546 32364 net.cpp:408] L3_b10_cbr2_conv -> L3_b10_cbr2_conv_top\nI0821 08:59:39.041620 32364 net.cpp:150] Setting up L3_b10_cbr2_conv\nI0821 08:59:39.041640 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.041648 32364 net.cpp:165] Memory required for data: 2145076400\nI0821 08:59:39.041667 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr2_bn\nI0821 08:59:39.041690 32364 net.cpp:100] Creating Layer L3_b10_cbr2_bn\nI0821 08:59:39.041702 32364 net.cpp:434] L3_b10_cbr2_bn <- L3_b10_cbr2_conv_top\nI0821 08:59:39.041720 32364 net.cpp:408] L3_b10_cbr2_bn -> L3_b10_cbr2_bn_top\nI0821 08:59:39.042026 32364 net.cpp:150] Setting up L3_b10_cbr2_bn\nI0821 08:59:39.042045 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.042054 32364 net.cpp:165] Memory required for data: 2146714800\nI0821 08:59:39.042075 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0821 08:59:39.042093 32364 net.cpp:100] Creating Layer L3_b10_cbr2_scale\nI0821 08:59:39.042104 32364 net.cpp:434] L3_b10_cbr2_scale <- L3_b10_cbr2_bn_top\nI0821 08:59:39.042119 32364 net.cpp:395] L3_b10_cbr2_scale -> L3_b10_cbr2_bn_top (in-place)\nI0821 08:59:39.042230 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0821 08:59:39.042439 32364 net.cpp:150] Setting up L3_b10_cbr2_scale\nI0821 08:59:39.042462 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.042474 32364 net.cpp:165] Memory required for data: 2148353200\nI0821 08:59:39.042492 32364 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise\nI0821 08:59:39.042510 32364 net.cpp:100] Creating Layer L3_b10_sum_eltwise\nI0821 08:59:39.042522 32364 net.cpp:434] L3_b10_sum_eltwise <- L3_b10_cbr2_bn_top\nI0821 08:59:39.042536 32364 net.cpp:434] L3_b10_sum_eltwise <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0821 08:59:39.042553 32364 net.cpp:408] L3_b10_sum_eltwise -> L3_b10_sum_eltwise_top\nI0821 08:59:39.042615 32364 net.cpp:150] Setting up L3_b10_sum_eltwise\nI0821 08:59:39.042634 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.042642 32364 net.cpp:165] Memory required for data: 2149991600\nI0821 08:59:39.042654 32364 layer_factory.hpp:77] Creating layer L3_b10_relu\nI0821 08:59:39.042668 32364 net.cpp:100] Creating Layer L3_b10_relu\nI0821 08:59:39.042680 32364 net.cpp:434] L3_b10_relu <- L3_b10_sum_eltwise_top\nI0821 08:59:39.042693 32364 net.cpp:395] L3_b10_relu -> L3_b10_sum_eltwise_top (in-place)\nI0821 08:59:39.042712 32364 net.cpp:150] Setting up L3_b10_relu\nI0821 08:59:39.042727 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.042737 32364 net.cpp:165] Memory required for data: 2151630000\nI0821 08:59:39.042747 32364 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:39.042764 32364 net.cpp:100] Creating Layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:39.042776 32364 net.cpp:434] L3_b10_sum_eltwise_top_L3_b10_relu_0_split <- L3_b10_sum_eltwise_top\nI0821 08:59:39.042791 32364 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0821 08:59:39.042812 32364 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0821 08:59:39.042898 32364 net.cpp:150] Setting up L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:39.042925 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.042942 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.042951 32364 net.cpp:165] Memory required for data: 2154906800\nI0821 08:59:39.042961 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_conv\nI0821 08:59:39.042982 32364 net.cpp:100] Creating Layer L3_b11_cbr1_conv\nI0821 08:59:39.042995 32364 net.cpp:434] L3_b11_cbr1_conv <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0821 08:59:39.043015 32364 net.cpp:408] L3_b11_cbr1_conv -> L3_b11_cbr1_conv_top\nI0821 08:59:39.044081 32364 net.cpp:150] Setting up L3_b11_cbr1_conv\nI0821 08:59:39.044101 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.044111 32364 net.cpp:165] Memory required for data: 2156545200\nI0821 08:59:39.044129 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_bn\nI0821 08:59:39.044157 32364 net.cpp:100] Creating Layer L3_b11_cbr1_bn\nI0821 08:59:39.044170 32364 net.cpp:434] L3_b11_cbr1_bn <- L3_b11_cbr1_conv_top\nI0821 08:59:39.044188 32364 net.cpp:408] L3_b11_cbr1_bn -> L3_b11_cbr1_bn_top\nI0821 08:59:39.044498 32364 net.cpp:150] Setting up L3_b11_cbr1_bn\nI0821 08:59:39.044517 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.044528 32364 net.cpp:165] Memory required for data: 2158183600\nI0821 08:59:39.044549 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0821 08:59:39.044570 32364 net.cpp:100] Creating Layer L3_b11_cbr1_scale\nI0821 08:59:39.044582 32364 net.cpp:434] L3_b11_cbr1_scale <- L3_b11_cbr1_bn_top\nI0821 08:59:39.044598 32364 net.cpp:395] L3_b11_cbr1_scale -> L3_b11_cbr1_bn_top (in-place)\nI0821 08:59:39.044694 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0821 08:59:39.044891 32364 net.cpp:150] Setting up L3_b11_cbr1_scale\nI0821 08:59:39.044911 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.044920 32364 net.cpp:165] Memory required for data: 2159822000\nI0821 08:59:39.044939 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_relu\nI0821 08:59:39.044963 32364 net.cpp:100] Creating Layer L3_b11_cbr1_relu\nI0821 08:59:39.044975 32364 net.cpp:434] L3_b11_cbr1_relu <- L3_b11_cbr1_bn_top\nI0821 08:59:39.044997 32364 net.cpp:395] L3_b11_cbr1_relu -> L3_b11_cbr1_bn_top (in-place)\nI0821 08:59:39.045019 32364 net.cpp:150] Setting up L3_b11_cbr1_relu\nI0821 08:59:39.045034 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.045043 32364 net.cpp:165] Memory required for data: 2161460400\nI0821 08:59:39.045053 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr2_conv\nI0821 08:59:39.045074 32364 net.cpp:100] Creating Layer L3_b11_cbr2_conv\nI0821 08:59:39.045086 32364 net.cpp:434] L3_b11_cbr2_conv <- L3_b11_cbr1_bn_top\nI0821 08:59:39.045107 32364 net.cpp:408] L3_b11_cbr2_conv -> L3_b11_cbr2_conv_top\nI0821 08:59:39.046172 32364 net.cpp:150] Setting up L3_b11_cbr2_conv\nI0821 08:59:39.046193 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.046203 32364 net.cpp:165] Memory required for data: 2163098800\nI0821 08:59:39.046221 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr2_bn\nI0821 08:59:39.046245 32364 net.cpp:100] Creating Layer L3_b11_cbr2_bn\nI0821 08:59:39.046259 32364 net.cpp:434] L3_b11_cbr2_bn <- L3_b11_cbr2_conv_top\nI0821 08:59:39.046277 32364 net.cpp:408] L3_b11_cbr2_bn -> L3_b11_cbr2_bn_top\nI0821 08:59:39.046587 32364 net.cpp:150] Setting up L3_b11_cbr2_bn\nI0821 08:59:39.046607 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.046617 32364 net.cpp:165] Memory required for data: 2164737200\nI0821 08:59:39.046638 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0821 08:59:39.046654 32364 net.cpp:100] Creating Layer L3_b11_cbr2_scale\nI0821 08:59:39.046666 32364 net.cpp:434] L3_b11_cbr2_scale <- L3_b11_cbr2_bn_top\nI0821 08:59:39.046681 32364 net.cpp:395] L3_b11_cbr2_scale -> L3_b11_cbr2_bn_top (in-place)\nI0821 08:59:39.046780 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0821 08:59:39.046977 32364 net.cpp:150] Setting up L3_b11_cbr2_scale\nI0821 08:59:39.046999 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.047009 32364 net.cpp:165] Memory required for data: 2166375600\nI0821 08:59:39.047026 32364 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise\nI0821 08:59:39.047044 32364 net.cpp:100] Creating Layer L3_b11_sum_eltwise\nI0821 08:59:39.047055 32364 net.cpp:434] L3_b11_sum_eltwise <- L3_b11_cbr2_bn_top\nI0821 08:59:39.047068 32364 net.cpp:434] L3_b11_sum_eltwise <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0821 08:59:39.047089 32364 net.cpp:408] L3_b11_sum_eltwise -> L3_b11_sum_eltwise_top\nI0821 08:59:39.047152 32364 net.cpp:150] Setting up L3_b11_sum_eltwise\nI0821 08:59:39.047171 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.047181 32364 net.cpp:165] Memory required for data: 2168014000\nI0821 08:59:39.047191 32364 layer_factory.hpp:77] Creating layer L3_b11_relu\nI0821 08:59:39.047210 32364 net.cpp:100] Creating Layer L3_b11_relu\nI0821 08:59:39.047224 32364 net.cpp:434] L3_b11_relu <- L3_b11_sum_eltwise_top\nI0821 08:59:39.047236 32364 net.cpp:395] L3_b11_relu -> L3_b11_sum_eltwise_top (in-place)\nI0821 08:59:39.047256 32364 net.cpp:150] Setting up L3_b11_relu\nI0821 08:59:39.047271 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.047279 32364 net.cpp:165] Memory required for data: 2169652400\nI0821 08:59:39.047289 32364 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:39.047307 32364 net.cpp:100] Creating Layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:39.047318 32364 net.cpp:434] L3_b11_sum_eltwise_top_L3_b11_relu_0_split <- L3_b11_sum_eltwise_top\nI0821 08:59:39.047334 32364 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0821 08:59:39.047354 32364 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0821 08:59:39.047439 32364 net.cpp:150] Setting up L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:39.047464 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.047489 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.047500 32364 net.cpp:165] Memory required for data: 2172929200\nI0821 08:59:39.047510 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_conv\nI0821 08:59:39.047531 32364 net.cpp:100] Creating Layer L3_b12_cbr1_conv\nI0821 08:59:39.047544 32364 net.cpp:434] L3_b12_cbr1_conv <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0821 08:59:39.047562 32364 net.cpp:408] L3_b12_cbr1_conv -> L3_b12_cbr1_conv_top\nI0821 08:59:39.048647 32364 net.cpp:150] Setting up L3_b12_cbr1_conv\nI0821 08:59:39.048667 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.048677 32364 net.cpp:165] Memory required for data: 2174567600\nI0821 08:59:39.048694 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_bn\nI0821 08:59:39.048718 32364 net.cpp:100] Creating Layer L3_b12_cbr1_bn\nI0821 08:59:39.048732 32364 net.cpp:434] L3_b12_cbr1_bn <- L3_b12_cbr1_conv_top\nI0821 08:59:39.048749 32364 net.cpp:408] L3_b12_cbr1_bn -> L3_b12_cbr1_bn_top\nI0821 08:59:39.049055 32364 net.cpp:150] Setting up L3_b12_cbr1_bn\nI0821 08:59:39.049074 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.049084 32364 net.cpp:165] Memory required for data: 2176206000\nI0821 08:59:39.049105 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0821 08:59:39.049126 32364 net.cpp:100] Creating Layer L3_b12_cbr1_scale\nI0821 08:59:39.049139 32364 net.cpp:434] L3_b12_cbr1_scale <- L3_b12_cbr1_bn_top\nI0821 08:59:39.049167 32364 net.cpp:395] L3_b12_cbr1_scale -> L3_b12_cbr1_bn_top (in-place)\nI0821 08:59:39.049268 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0821 08:59:39.049470 32364 net.cpp:150] Setting up L3_b12_cbr1_scale\nI0821 08:59:39.049489 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.049499 32364 net.cpp:165] Memory required for data: 2177844400\nI0821 08:59:39.049517 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_relu\nI0821 08:59:39.049532 32364 net.cpp:100] Creating Layer L3_b12_cbr1_relu\nI0821 08:59:39.049545 32364 net.cpp:434] L3_b12_cbr1_relu <- L3_b12_cbr1_bn_top\nI0821 08:59:39.049563 32364 net.cpp:395] L3_b12_cbr1_relu -> L3_b12_cbr1_bn_top (in-place)\nI0821 08:59:39.049584 32364 net.cpp:150] Setting up L3_b12_cbr1_relu\nI0821 08:59:39.049599 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.049608 32364 net.cpp:165] Memory required for data: 2179482800\nI0821 08:59:39.049619 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr2_conv\nI0821 08:59:39.049644 32364 net.cpp:100] Creating Layer L3_b12_cbr2_conv\nI0821 08:59:39.049657 32364 net.cpp:434] L3_b12_cbr2_conv <- L3_b12_cbr1_bn_top\nI0821 08:59:39.049675 32364 net.cpp:408] L3_b12_cbr2_conv -> L3_b12_cbr2_conv_top\nI0821 08:59:39.051887 32364 net.cpp:150] Setting up L3_b12_cbr2_conv\nI0821 08:59:39.051909 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.051920 32364 net.cpp:165] Memory required for data: 2181121200\nI0821 08:59:39.051939 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr2_bn\nI0821 08:59:39.051961 32364 net.cpp:100] Creating Layer L3_b12_cbr2_bn\nI0821 08:59:39.051975 32364 net.cpp:434] L3_b12_cbr2_bn <- L3_b12_cbr2_conv_top\nI0821 08:59:39.051991 32364 net.cpp:408] L3_b12_cbr2_bn -> L3_b12_cbr2_bn_top\nI0821 08:59:39.052312 32364 net.cpp:150] Setting up L3_b12_cbr2_bn\nI0821 08:59:39.052332 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.052341 32364 net.cpp:165] Memory required for data: 2182759600\nI0821 08:59:39.052364 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0821 08:59:39.052384 32364 net.cpp:100] Creating Layer L3_b12_cbr2_scale\nI0821 08:59:39.052398 32364 net.cpp:434] L3_b12_cbr2_scale <- L3_b12_cbr2_bn_top\nI0821 08:59:39.052418 32364 net.cpp:395] L3_b12_cbr2_scale -> L3_b12_cbr2_bn_top (in-place)\nI0821 08:59:39.052511 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0821 08:59:39.052716 32364 net.cpp:150] Setting up L3_b12_cbr2_scale\nI0821 08:59:39.052734 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.052753 32364 net.cpp:165] Memory required for data: 2184398000\nI0821 08:59:39.052773 32364 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise\nI0821 08:59:39.052790 32364 net.cpp:100] Creating Layer L3_b12_sum_eltwise\nI0821 08:59:39.052803 32364 net.cpp:434] L3_b12_sum_eltwise <- L3_b12_cbr2_bn_top\nI0821 08:59:39.052816 32364 net.cpp:434] L3_b12_sum_eltwise <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0821 08:59:39.052837 32364 net.cpp:408] L3_b12_sum_eltwise -> L3_b12_sum_eltwise_top\nI0821 08:59:39.052897 32364 net.cpp:150] Setting up L3_b12_sum_eltwise\nI0821 08:59:39.052916 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.052924 32364 net.cpp:165] Memory required for data: 2186036400\nI0821 08:59:39.052934 32364 layer_factory.hpp:77] Creating layer L3_b12_relu\nI0821 08:59:39.052953 32364 net.cpp:100] Creating Layer L3_b12_relu\nI0821 08:59:39.052966 32364 net.cpp:434] L3_b12_relu <- L3_b12_sum_eltwise_top\nI0821 08:59:39.052981 32364 net.cpp:395] L3_b12_relu -> L3_b12_sum_eltwise_top (in-place)\nI0821 08:59:39.053000 32364 net.cpp:150] Setting up L3_b12_relu\nI0821 08:59:39.053014 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.053025 32364 net.cpp:165] Memory required for data: 2187674800\nI0821 08:59:39.053035 32364 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:39.053048 32364 net.cpp:100] Creating Layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:39.053061 32364 net.cpp:434] L3_b12_sum_eltwise_top_L3_b12_relu_0_split <- L3_b12_sum_eltwise_top\nI0821 08:59:39.053076 32364 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0821 08:59:39.053097 32364 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0821 08:59:39.053194 32364 net.cpp:150] Setting up L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:39.053215 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.053227 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.053237 32364 net.cpp:165] Memory required for data: 2190951600\nI0821 08:59:39.053246 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_conv\nI0821 08:59:39.053272 32364 net.cpp:100] Creating Layer L3_b13_cbr1_conv\nI0821 08:59:39.053285 32364 net.cpp:434] L3_b13_cbr1_conv <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0821 08:59:39.053305 32364 net.cpp:408] L3_b13_cbr1_conv -> L3_b13_cbr1_conv_top\nI0821 08:59:39.054374 32364 net.cpp:150] Setting up L3_b13_cbr1_conv\nI0821 08:59:39.054395 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.054404 32364 net.cpp:165] Memory required for data: 2192590000\nI0821 08:59:39.054422 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_bn\nI0821 08:59:39.054446 32364 net.cpp:100] Creating Layer L3_b13_cbr1_bn\nI0821 08:59:39.054460 32364 net.cpp:434] L3_b13_cbr1_bn <- L3_b13_cbr1_conv_top\nI0821 08:59:39.054483 32364 net.cpp:408] L3_b13_cbr1_bn -> L3_b13_cbr1_bn_top\nI0821 08:59:39.054810 32364 net.cpp:150] Setting up L3_b13_cbr1_bn\nI0821 08:59:39.054828 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.054837 32364 net.cpp:165] Memory required for data: 2194228400\nI0821 08:59:39.054859 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0821 08:59:39.054877 32364 net.cpp:100] Creating Layer L3_b13_cbr1_scale\nI0821 08:59:39.054888 32364 net.cpp:434] L3_b13_cbr1_scale <- L3_b13_cbr1_bn_top\nI0821 08:59:39.054908 32364 net.cpp:395] L3_b13_cbr1_scale -> L3_b13_cbr1_bn_top (in-place)\nI0821 08:59:39.055007 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0821 08:59:39.055215 32364 net.cpp:150] Setting up L3_b13_cbr1_scale\nI0821 08:59:39.055234 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.055243 32364 net.cpp:165] Memory required for data: 2195866800\nI0821 08:59:39.055263 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_relu\nI0821 08:59:39.055279 32364 net.cpp:100] Creating Layer L3_b13_cbr1_relu\nI0821 08:59:39.055290 32364 net.cpp:434] L3_b13_cbr1_relu <- L3_b13_cbr1_bn_top\nI0821 08:59:39.055320 32364 net.cpp:395] L3_b13_cbr1_relu -> L3_b13_cbr1_bn_top (in-place)\nI0821 08:59:39.055341 32364 net.cpp:150] Setting up L3_b13_cbr1_relu\nI0821 08:59:39.055356 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.055366 32364 net.cpp:165] Memory required for data: 2197505200\nI0821 08:59:39.055377 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr2_conv\nI0821 08:59:39.055402 32364 net.cpp:100] Creating Layer L3_b13_cbr2_conv\nI0821 08:59:39.055414 32364 net.cpp:434] L3_b13_cbr2_conv <- L3_b13_cbr1_bn_top\nI0821 08:59:39.055433 32364 net.cpp:408] L3_b13_cbr2_conv -> L3_b13_cbr2_conv_top\nI0821 08:59:39.056496 32364 net.cpp:150] Setting up L3_b13_cbr2_conv\nI0821 08:59:39.056517 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.056527 32364 net.cpp:165] Memory required for data: 2199143600\nI0821 08:59:39.056545 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr2_bn\nI0821 08:59:39.056566 32364 net.cpp:100] Creating Layer L3_b13_cbr2_bn\nI0821 08:59:39.056579 32364 net.cpp:434] L3_b13_cbr2_bn <- L3_b13_cbr2_conv_top\nI0821 08:59:39.056596 32364 net.cpp:408] L3_b13_cbr2_bn -> L3_b13_cbr2_bn_top\nI0821 08:59:39.056910 32364 net.cpp:150] Setting up L3_b13_cbr2_bn\nI0821 08:59:39.056929 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.056938 32364 net.cpp:165] Memory required for data: 2200782000\nI0821 08:59:39.056962 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0821 08:59:39.056983 32364 net.cpp:100] Creating Layer L3_b13_cbr2_scale\nI0821 08:59:39.056998 32364 net.cpp:434] L3_b13_cbr2_scale <- L3_b13_cbr2_bn_top\nI0821 08:59:39.057018 32364 net.cpp:395] L3_b13_cbr2_scale -> L3_b13_cbr2_bn_top (in-place)\nI0821 08:59:39.057109 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0821 08:59:39.057317 32364 net.cpp:150] Setting up L3_b13_cbr2_scale\nI0821 08:59:39.057335 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.057344 32364 net.cpp:165] Memory required for data: 2202420400\nI0821 08:59:39.057363 32364 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise\nI0821 08:59:39.057379 32364 net.cpp:100] Creating Layer L3_b13_sum_eltwise\nI0821 08:59:39.057392 32364 net.cpp:434] L3_b13_sum_eltwise <- L3_b13_cbr2_bn_top\nI0821 08:59:39.057405 32364 net.cpp:434] L3_b13_sum_eltwise <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0821 08:59:39.057427 32364 net.cpp:408] L3_b13_sum_eltwise -> L3_b13_sum_eltwise_top\nI0821 08:59:39.057484 32364 net.cpp:150] Setting up L3_b13_sum_eltwise\nI0821 08:59:39.057502 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.057513 32364 net.cpp:165] Memory required for data: 2204058800\nI0821 08:59:39.057523 32364 layer_factory.hpp:77] Creating layer L3_b13_relu\nI0821 08:59:39.057541 32364 net.cpp:100] Creating Layer L3_b13_relu\nI0821 08:59:39.057555 32364 net.cpp:434] L3_b13_relu <- L3_b13_sum_eltwise_top\nI0821 08:59:39.057569 32364 net.cpp:395] L3_b13_relu -> L3_b13_sum_eltwise_top (in-place)\nI0821 08:59:39.057588 32364 net.cpp:150] Setting up L3_b13_relu\nI0821 08:59:39.057603 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.057612 32364 net.cpp:165] Memory required for data: 2205697200\nI0821 08:59:39.057622 32364 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:39.057636 32364 net.cpp:100] Creating Layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:39.057646 32364 net.cpp:434] L3_b13_sum_eltwise_top_L3_b13_relu_0_split <- L3_b13_sum_eltwise_top\nI0821 08:59:39.057662 32364 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0821 08:59:39.057682 32364 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0821 08:59:39.057771 32364 net.cpp:150] Setting up L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:39.057793 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.057806 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.057816 32364 net.cpp:165] Memory required for data: 2208974000\nI0821 08:59:39.057835 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_conv\nI0821 08:59:39.057860 32364 net.cpp:100] Creating Layer L3_b14_cbr1_conv\nI0821 08:59:39.057874 32364 net.cpp:434] L3_b14_cbr1_conv <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0821 08:59:39.057893 32364 net.cpp:408] L3_b14_cbr1_conv -> L3_b14_cbr1_conv_top\nI0821 08:59:39.058961 32364 net.cpp:150] Setting up L3_b14_cbr1_conv\nI0821 08:59:39.058982 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.058991 32364 net.cpp:165] Memory required for data: 2210612400\nI0821 08:59:39.059010 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_bn\nI0821 08:59:39.059031 32364 net.cpp:100] Creating Layer L3_b14_cbr1_bn\nI0821 08:59:39.059043 32364 net.cpp:434] L3_b14_cbr1_bn <- L3_b14_cbr1_conv_top\nI0821 08:59:39.059065 32364 net.cpp:408] L3_b14_cbr1_bn -> L3_b14_cbr1_bn_top\nI0821 08:59:39.059376 32364 net.cpp:150] Setting up L3_b14_cbr1_bn\nI0821 08:59:39.059396 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.059404 32364 net.cpp:165] Memory required for data: 2212250800\nI0821 08:59:39.059427 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0821 08:59:39.059442 32364 net.cpp:100] Creating Layer L3_b14_cbr1_scale\nI0821 08:59:39.059454 32364 net.cpp:434] L3_b14_cbr1_scale <- L3_b14_cbr1_bn_top\nI0821 08:59:39.059475 32364 net.cpp:395] L3_b14_cbr1_scale -> L3_b14_cbr1_bn_top (in-place)\nI0821 08:59:39.059571 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0821 08:59:39.059772 32364 net.cpp:150] Setting up L3_b14_cbr1_scale\nI0821 08:59:39.059792 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.059801 32364 net.cpp:165] Memory required for data: 2213889200\nI0821 08:59:39.059819 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_relu\nI0821 08:59:39.059835 32364 net.cpp:100] Creating Layer L3_b14_cbr1_relu\nI0821 08:59:39.059847 32364 net.cpp:434] L3_b14_cbr1_relu <- L3_b14_cbr1_bn_top\nI0821 08:59:39.059866 32364 net.cpp:395] L3_b14_cbr1_relu -> L3_b14_cbr1_bn_top (in-place)\nI0821 08:59:39.059887 32364 net.cpp:150] Setting up L3_b14_cbr1_relu\nI0821 08:59:39.059902 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.059911 32364 net.cpp:165] Memory required for data: 2215527600\nI0821 08:59:39.059922 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr2_conv\nI0821 08:59:39.059947 32364 net.cpp:100] Creating Layer L3_b14_cbr2_conv\nI0821 08:59:39.059962 32364 net.cpp:434] L3_b14_cbr2_conv <- L3_b14_cbr1_bn_top\nI0821 08:59:39.059983 32364 net.cpp:408] L3_b14_cbr2_conv -> L3_b14_cbr2_conv_top\nI0821 08:59:39.061058 32364 net.cpp:150] Setting up L3_b14_cbr2_conv\nI0821 08:59:39.061079 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.061089 32364 net.cpp:165] Memory required for data: 2217166000\nI0821 08:59:39.061106 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr2_bn\nI0821 08:59:39.061123 32364 net.cpp:100] Creating Layer L3_b14_cbr2_bn\nI0821 08:59:39.061136 32364 net.cpp:434] L3_b14_cbr2_bn <- L3_b14_cbr2_conv_top\nI0821 08:59:39.061167 32364 net.cpp:408] L3_b14_cbr2_bn -> L3_b14_cbr2_bn_top\nI0821 08:59:39.061471 32364 net.cpp:150] Setting up L3_b14_cbr2_bn\nI0821 08:59:39.061494 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.061506 32364 net.cpp:165] Memory required for data: 2218804400\nI0821 08:59:39.061527 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0821 08:59:39.061543 32364 net.cpp:100] Creating Layer L3_b14_cbr2_scale\nI0821 08:59:39.061555 32364 net.cpp:434] L3_b14_cbr2_scale <- L3_b14_cbr2_bn_top\nI0821 08:59:39.061570 32364 net.cpp:395] L3_b14_cbr2_scale -> L3_b14_cbr2_bn_top (in-place)\nI0821 08:59:39.061663 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0821 08:59:39.061859 32364 net.cpp:150] Setting up L3_b14_cbr2_scale\nI0821 08:59:39.061878 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.061888 32364 net.cpp:165] Memory required for data: 2220442800\nI0821 08:59:39.061906 32364 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise\nI0821 08:59:39.061923 32364 net.cpp:100] Creating Layer L3_b14_sum_eltwise\nI0821 08:59:39.061949 32364 net.cpp:434] L3_b14_sum_eltwise <- L3_b14_cbr2_bn_top\nI0821 08:59:39.061964 32364 net.cpp:434] L3_b14_sum_eltwise <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0821 08:59:39.061980 32364 net.cpp:408] L3_b14_sum_eltwise -> L3_b14_sum_eltwise_top\nI0821 08:59:39.062043 32364 net.cpp:150] Setting up L3_b14_sum_eltwise\nI0821 08:59:39.062063 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.062073 32364 net.cpp:165] Memory required for data: 2222081200\nI0821 08:59:39.062084 32364 layer_factory.hpp:77] Creating layer L3_b14_relu\nI0821 08:59:39.062098 32364 net.cpp:100] Creating Layer L3_b14_relu\nI0821 08:59:39.062109 32364 net.cpp:434] L3_b14_relu <- L3_b14_sum_eltwise_top\nI0821 08:59:39.062124 32364 net.cpp:395] L3_b14_relu -> L3_b14_sum_eltwise_top (in-place)\nI0821 08:59:39.062144 32364 net.cpp:150] Setting up L3_b14_relu\nI0821 08:59:39.062166 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.062177 32364 net.cpp:165] Memory required for data: 2223719600\nI0821 08:59:39.062186 32364 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:39.062288 32364 net.cpp:100] Creating Layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:39.062304 32364 net.cpp:434] L3_b14_sum_eltwise_top_L3_b14_relu_0_split <- L3_b14_sum_eltwise_top\nI0821 08:59:39.062320 32364 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0821 08:59:39.062340 32364 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0821 08:59:39.062420 32364 net.cpp:150] Setting up L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:39.062438 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.062453 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.062460 32364 net.cpp:165] Memory required for data: 2226996400\nI0821 08:59:39.062472 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_conv\nI0821 08:59:39.062497 32364 net.cpp:100] Creating Layer L3_b15_cbr1_conv\nI0821 08:59:39.062511 32364 net.cpp:434] L3_b15_cbr1_conv <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0821 08:59:39.062536 32364 net.cpp:408] L3_b15_cbr1_conv -> L3_b15_cbr1_conv_top\nI0821 08:59:39.063611 32364 net.cpp:150] Setting up L3_b15_cbr1_conv\nI0821 08:59:39.063630 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.063639 32364 net.cpp:165] Memory required for data: 2228634800\nI0821 08:59:39.063657 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_bn\nI0821 08:59:39.063675 32364 net.cpp:100] Creating Layer L3_b15_cbr1_bn\nI0821 08:59:39.063688 32364 net.cpp:434] L3_b15_cbr1_bn <- L3_b15_cbr1_conv_top\nI0821 08:59:39.063710 32364 net.cpp:408] L3_b15_cbr1_bn -> L3_b15_cbr1_bn_top\nI0821 08:59:39.064021 32364 net.cpp:150] Setting up L3_b15_cbr1_bn\nI0821 08:59:39.064040 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.064050 32364 net.cpp:165] Memory required for data: 2230273200\nI0821 08:59:39.064071 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0821 08:59:39.064088 32364 net.cpp:100] Creating Layer L3_b15_cbr1_scale\nI0821 08:59:39.064100 32364 net.cpp:434] L3_b15_cbr1_scale <- L3_b15_cbr1_bn_top\nI0821 08:59:39.064121 32364 net.cpp:395] L3_b15_cbr1_scale -> L3_b15_cbr1_bn_top (in-place)\nI0821 08:59:39.064221 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0821 08:59:39.064421 32364 net.cpp:150] Setting up L3_b15_cbr1_scale\nI0821 08:59:39.064440 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.064450 32364 net.cpp:165] Memory required for data: 2231911600\nI0821 08:59:39.064468 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_relu\nI0821 08:59:39.064488 32364 net.cpp:100] Creating Layer L3_b15_cbr1_relu\nI0821 08:59:39.064502 32364 net.cpp:434] L3_b15_cbr1_relu <- L3_b15_cbr1_bn_top\nI0821 08:59:39.064517 32364 net.cpp:395] L3_b15_cbr1_relu -> L3_b15_cbr1_bn_top (in-place)\nI0821 08:59:39.064537 32364 net.cpp:150] Setting up L3_b15_cbr1_relu\nI0821 08:59:39.064549 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.064568 32364 net.cpp:165] Memory required for data: 2233550000\nI0821 08:59:39.064579 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr2_conv\nI0821 08:59:39.064604 32364 net.cpp:100] Creating Layer L3_b15_cbr2_conv\nI0821 08:59:39.064617 32364 net.cpp:434] L3_b15_cbr2_conv <- L3_b15_cbr1_bn_top\nI0821 08:59:39.064640 32364 net.cpp:408] L3_b15_cbr2_conv -> L3_b15_cbr2_conv_top\nI0821 08:59:39.065713 32364 net.cpp:150] Setting up L3_b15_cbr2_conv\nI0821 08:59:39.065734 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.065744 32364 net.cpp:165] Memory required for data: 2235188400\nI0821 08:59:39.065762 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr2_bn\nI0821 08:59:39.065779 32364 net.cpp:100] Creating Layer L3_b15_cbr2_bn\nI0821 08:59:39.065793 32364 net.cpp:434] L3_b15_cbr2_bn <- L3_b15_cbr2_conv_top\nI0821 08:59:39.065814 32364 net.cpp:408] L3_b15_cbr2_bn -> L3_b15_cbr2_bn_top\nI0821 08:59:39.066126 32364 net.cpp:150] Setting up L3_b15_cbr2_bn\nI0821 08:59:39.066155 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.066167 32364 net.cpp:165] Memory required for data: 2236826800\nI0821 08:59:39.066190 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0821 08:59:39.066206 32364 net.cpp:100] Creating Layer L3_b15_cbr2_scale\nI0821 08:59:39.066220 32364 net.cpp:434] L3_b15_cbr2_scale <- L3_b15_cbr2_bn_top\nI0821 08:59:39.066236 32364 net.cpp:395] L3_b15_cbr2_scale -> L3_b15_cbr2_bn_top (in-place)\nI0821 08:59:39.066339 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0821 08:59:39.066540 32364 net.cpp:150] Setting up L3_b15_cbr2_scale\nI0821 08:59:39.066558 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.066568 32364 net.cpp:165] Memory required for data: 2238465200\nI0821 08:59:39.066586 32364 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise\nI0821 08:59:39.066607 32364 net.cpp:100] Creating Layer L3_b15_sum_eltwise\nI0821 08:59:39.066620 32364 net.cpp:434] L3_b15_sum_eltwise <- L3_b15_cbr2_bn_top\nI0821 08:59:39.066634 32364 net.cpp:434] L3_b15_sum_eltwise <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0821 08:59:39.066650 32364 net.cpp:408] L3_b15_sum_eltwise -> L3_b15_sum_eltwise_top\nI0821 08:59:39.066714 32364 net.cpp:150] Setting up L3_b15_sum_eltwise\nI0821 08:59:39.066731 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.066741 32364 net.cpp:165] Memory required for data: 2240103600\nI0821 08:59:39.066751 32364 layer_factory.hpp:77] Creating layer L3_b15_relu\nI0821 08:59:39.066766 32364 net.cpp:100] Creating Layer L3_b15_relu\nI0821 08:59:39.066778 32364 net.cpp:434] L3_b15_relu <- L3_b15_sum_eltwise_top\nI0821 08:59:39.066792 32364 net.cpp:395] L3_b15_relu -> L3_b15_sum_eltwise_top (in-place)\nI0821 08:59:39.066812 32364 net.cpp:150] Setting up L3_b15_relu\nI0821 08:59:39.066826 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.066835 32364 net.cpp:165] Memory required for data: 2241742000\nI0821 08:59:39.066844 32364 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:39.066859 32364 net.cpp:100] Creating Layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:39.066869 32364 net.cpp:434] L3_b15_sum_eltwise_top_L3_b15_relu_0_split <- L3_b15_sum_eltwise_top\nI0821 08:59:39.066889 32364 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0821 08:59:39.066910 32364 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0821 08:59:39.066994 32364 net.cpp:150] Setting up L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:39.067014 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.067028 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.067039 32364 net.cpp:165] Memory required for data: 2245018800\nI0821 08:59:39.067049 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_conv\nI0821 08:59:39.067073 32364 net.cpp:100] Creating Layer L3_b16_cbr1_conv\nI0821 08:59:39.067087 32364 net.cpp:434] L3_b16_cbr1_conv <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0821 08:59:39.067116 32364 net.cpp:408] L3_b16_cbr1_conv -> L3_b16_cbr1_conv_top\nI0821 08:59:39.069211 32364 net.cpp:150] Setting up L3_b16_cbr1_conv\nI0821 08:59:39.069233 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.069243 32364 net.cpp:165] Memory required for data: 2246657200\nI0821 08:59:39.069262 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_bn\nI0821 08:59:39.069283 32364 net.cpp:100] Creating Layer L3_b16_cbr1_bn\nI0821 08:59:39.069298 32364 net.cpp:434] L3_b16_cbr1_bn <- L3_b16_cbr1_conv_top\nI0821 08:59:39.069314 32364 net.cpp:408] L3_b16_cbr1_bn -> L3_b16_cbr1_bn_top\nI0821 08:59:39.069633 32364 net.cpp:150] Setting up L3_b16_cbr1_bn\nI0821 08:59:39.069653 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.069663 32364 net.cpp:165] Memory required for data: 2248295600\nI0821 08:59:39.069684 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0821 08:59:39.069701 32364 net.cpp:100] Creating Layer L3_b16_cbr1_scale\nI0821 08:59:39.069712 32364 net.cpp:434] L3_b16_cbr1_scale <- L3_b16_cbr1_bn_top\nI0821 08:59:39.069728 32364 net.cpp:395] L3_b16_cbr1_scale -> L3_b16_cbr1_bn_top (in-place)\nI0821 08:59:39.069826 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0821 08:59:39.070029 32364 net.cpp:150] Setting up L3_b16_cbr1_scale\nI0821 08:59:39.070051 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.070061 32364 net.cpp:165] Memory required for data: 2249934000\nI0821 08:59:39.070080 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_relu\nI0821 08:59:39.070096 32364 net.cpp:100] Creating Layer L3_b16_cbr1_relu\nI0821 08:59:39.070107 32364 net.cpp:434] L3_b16_cbr1_relu <- L3_b16_cbr1_bn_top\nI0821 08:59:39.070122 32364 net.cpp:395] L3_b16_cbr1_relu -> L3_b16_cbr1_bn_top (in-place)\nI0821 08:59:39.070143 32364 net.cpp:150] Setting up L3_b16_cbr1_relu\nI0821 08:59:39.070166 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.070176 32364 net.cpp:165] Memory required for data: 2251572400\nI0821 08:59:39.070186 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr2_conv\nI0821 08:59:39.070212 32364 net.cpp:100] Creating Layer L3_b16_cbr2_conv\nI0821 08:59:39.070226 32364 net.cpp:434] L3_b16_cbr2_conv <- L3_b16_cbr1_bn_top\nI0821 08:59:39.070247 32364 net.cpp:408] L3_b16_cbr2_conv -> L3_b16_cbr2_conv_top\nI0821 08:59:39.071317 32364 net.cpp:150] Setting up L3_b16_cbr2_conv\nI0821 08:59:39.071337 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.071347 32364 net.cpp:165] Memory required for data: 2253210800\nI0821 08:59:39.071365 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr2_bn\nI0821 08:59:39.071388 32364 net.cpp:100] Creating Layer L3_b16_cbr2_bn\nI0821 08:59:39.071399 32364 net.cpp:434] L3_b16_cbr2_bn <- L3_b16_cbr2_conv_top\nI0821 08:59:39.071418 32364 net.cpp:408] L3_b16_cbr2_bn -> L3_b16_cbr2_bn_top\nI0821 08:59:39.071732 32364 net.cpp:150] Setting up L3_b16_cbr2_bn\nI0821 08:59:39.071751 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.071761 32364 net.cpp:165] Memory required for data: 2254849200\nI0821 08:59:39.071782 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0821 08:59:39.071802 32364 net.cpp:100] Creating Layer L3_b16_cbr2_scale\nI0821 08:59:39.071815 32364 net.cpp:434] L3_b16_cbr2_scale <- L3_b16_cbr2_bn_top\nI0821 08:59:39.071832 32364 net.cpp:395] L3_b16_cbr2_scale -> L3_b16_cbr2_bn_top (in-place)\nI0821 08:59:39.071930 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0821 08:59:39.072134 32364 net.cpp:150] Setting up L3_b16_cbr2_scale\nI0821 08:59:39.072157 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.072168 32364 net.cpp:165] Memory required for data: 2256487600\nI0821 08:59:39.072186 32364 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise\nI0821 08:59:39.072207 32364 net.cpp:100] Creating Layer L3_b16_sum_eltwise\nI0821 08:59:39.072221 32364 net.cpp:434] L3_b16_sum_eltwise <- L3_b16_cbr2_bn_top\nI0821 08:59:39.072235 32364 net.cpp:434] L3_b16_sum_eltwise <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0821 08:59:39.072263 32364 net.cpp:408] L3_b16_sum_eltwise -> L3_b16_sum_eltwise_top\nI0821 08:59:39.072326 32364 net.cpp:150] Setting up L3_b16_sum_eltwise\nI0821 08:59:39.072346 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.072356 32364 net.cpp:165] Memory required for data: 2258126000\nI0821 08:59:39.072366 32364 layer_factory.hpp:77] Creating layer L3_b16_relu\nI0821 08:59:39.072379 32364 net.cpp:100] Creating Layer L3_b16_relu\nI0821 08:59:39.072392 32364 net.cpp:434] L3_b16_relu <- L3_b16_sum_eltwise_top\nI0821 08:59:39.072412 32364 net.cpp:395] L3_b16_relu -> L3_b16_sum_eltwise_top (in-place)\nI0821 08:59:39.072432 32364 net.cpp:150] Setting up L3_b16_relu\nI0821 08:59:39.072446 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.072456 32364 net.cpp:165] Memory required for data: 2259764400\nI0821 08:59:39.072465 32364 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:39.072480 32364 net.cpp:100] Creating Layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:39.072492 32364 net.cpp:434] L3_b16_sum_eltwise_top_L3_b16_relu_0_split <- L3_b16_sum_eltwise_top\nI0821 08:59:39.072507 32364 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0821 08:59:39.072527 32364 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0821 08:59:39.072619 32364 net.cpp:150] Setting up L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:39.072640 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.072654 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.072664 32364 net.cpp:165] Memory required for data: 2263041200\nI0821 08:59:39.072674 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_conv\nI0821 08:59:39.072695 32364 net.cpp:100] Creating Layer L3_b17_cbr1_conv\nI0821 08:59:39.072708 32364 net.cpp:434] L3_b17_cbr1_conv <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0821 08:59:39.072731 32364 net.cpp:408] L3_b17_cbr1_conv -> L3_b17_cbr1_conv_top\nI0821 08:59:39.073806 32364 net.cpp:150] Setting up L3_b17_cbr1_conv\nI0821 08:59:39.073825 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.073835 32364 net.cpp:165] Memory required for data: 2264679600\nI0821 08:59:39.073853 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_bn\nI0821 08:59:39.073874 32364 net.cpp:100] Creating Layer L3_b17_cbr1_bn\nI0821 08:59:39.073887 32364 net.cpp:434] L3_b17_cbr1_bn <- L3_b17_cbr1_conv_top\nI0821 08:59:39.073905 32364 net.cpp:408] L3_b17_cbr1_bn -> L3_b17_cbr1_bn_top\nI0821 08:59:39.074224 32364 net.cpp:150] Setting up L3_b17_cbr1_bn\nI0821 08:59:39.074244 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.074254 32364 net.cpp:165] Memory required for data: 2266318000\nI0821 08:59:39.074275 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0821 08:59:39.074291 32364 net.cpp:100] Creating Layer L3_b17_cbr1_scale\nI0821 08:59:39.074304 32364 net.cpp:434] L3_b17_cbr1_scale <- L3_b17_cbr1_bn_top\nI0821 08:59:39.074319 32364 net.cpp:395] L3_b17_cbr1_scale -> L3_b17_cbr1_bn_top (in-place)\nI0821 08:59:39.074420 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0821 08:59:39.074620 32364 net.cpp:150] Setting up L3_b17_cbr1_scale\nI0821 08:59:39.074637 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.074647 32364 net.cpp:165] Memory required for data: 2267956400\nI0821 08:59:39.074666 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_relu\nI0821 08:59:39.074682 32364 net.cpp:100] Creating Layer L3_b17_cbr1_relu\nI0821 08:59:39.074694 32364 net.cpp:434] L3_b17_cbr1_relu <- L3_b17_cbr1_bn_top\nI0821 08:59:39.074709 32364 net.cpp:395] L3_b17_cbr1_relu -> L3_b17_cbr1_bn_top (in-place)\nI0821 08:59:39.074729 32364 net.cpp:150] Setting up L3_b17_cbr1_relu\nI0821 08:59:39.074743 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.074751 32364 net.cpp:165] Memory required for data: 2269594800\nI0821 08:59:39.074762 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr2_conv\nI0821 08:59:39.074800 32364 net.cpp:100] Creating Layer L3_b17_cbr2_conv\nI0821 08:59:39.074815 32364 net.cpp:434] L3_b17_cbr2_conv <- L3_b17_cbr1_bn_top\nI0821 08:59:39.074837 32364 net.cpp:408] L3_b17_cbr2_conv -> L3_b17_cbr2_conv_top\nI0821 08:59:39.075932 32364 net.cpp:150] Setting up L3_b17_cbr2_conv\nI0821 08:59:39.075953 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.075961 32364 net.cpp:165] Memory required for data: 2271233200\nI0821 08:59:39.075979 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr2_bn\nI0821 08:59:39.076004 32364 net.cpp:100] Creating Layer L3_b17_cbr2_bn\nI0821 08:59:39.076016 32364 net.cpp:434] L3_b17_cbr2_bn <- L3_b17_cbr2_conv_top\nI0821 08:59:39.076033 32364 net.cpp:408] L3_b17_cbr2_bn -> L3_b17_cbr2_bn_top\nI0821 08:59:39.076350 32364 net.cpp:150] Setting up L3_b17_cbr2_bn\nI0821 08:59:39.076370 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.076380 32364 net.cpp:165] Memory required for data: 2272871600\nI0821 08:59:39.076401 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0821 08:59:39.076421 32364 net.cpp:100] Creating Layer L3_b17_cbr2_scale\nI0821 08:59:39.076434 32364 net.cpp:434] L3_b17_cbr2_scale <- L3_b17_cbr2_bn_top\nI0821 08:59:39.076450 32364 net.cpp:395] L3_b17_cbr2_scale -> L3_b17_cbr2_bn_top (in-place)\nI0821 08:59:39.076548 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0821 08:59:39.076748 32364 net.cpp:150] Setting up L3_b17_cbr2_scale\nI0821 08:59:39.076767 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.076776 32364 net.cpp:165] Memory required for data: 2274510000\nI0821 08:59:39.076795 32364 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise\nI0821 08:59:39.076817 32364 net.cpp:100] Creating Layer L3_b17_sum_eltwise\nI0821 08:59:39.076831 32364 net.cpp:434] L3_b17_sum_eltwise <- L3_b17_cbr2_bn_top\nI0821 08:59:39.076845 32364 net.cpp:434] L3_b17_sum_eltwise <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0821 08:59:39.076865 32364 net.cpp:408] L3_b17_sum_eltwise -> L3_b17_sum_eltwise_top\nI0821 08:59:39.076923 32364 net.cpp:150] Setting up L3_b17_sum_eltwise\nI0821 08:59:39.076941 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.076951 32364 net.cpp:165] Memory required for data: 2276148400\nI0821 08:59:39.076961 32364 layer_factory.hpp:77] Creating layer L3_b17_relu\nI0821 08:59:39.076980 32364 net.cpp:100] Creating Layer L3_b17_relu\nI0821 08:59:39.076994 32364 net.cpp:434] L3_b17_relu <- L3_b17_sum_eltwise_top\nI0821 08:59:39.077008 32364 net.cpp:395] L3_b17_relu -> L3_b17_sum_eltwise_top (in-place)\nI0821 08:59:39.077028 32364 net.cpp:150] Setting up L3_b17_relu\nI0821 08:59:39.077044 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.077052 32364 net.cpp:165] Memory required for data: 2277786800\nI0821 08:59:39.077064 32364 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:39.077076 32364 net.cpp:100] Creating Layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:39.077088 32364 net.cpp:434] L3_b17_sum_eltwise_top_L3_b17_relu_0_split <- L3_b17_sum_eltwise_top\nI0821 08:59:39.077103 32364 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0821 08:59:39.077123 32364 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0821 08:59:39.077220 32364 net.cpp:150] Setting up L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:39.077241 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.077255 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.077265 32364 net.cpp:165] Memory required for data: 2281063600\nI0821 08:59:39.077275 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_conv\nI0821 08:59:39.077299 32364 net.cpp:100] Creating Layer L3_b18_cbr1_conv\nI0821 08:59:39.077313 32364 net.cpp:434] L3_b18_cbr1_conv <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0821 08:59:39.077332 32364 net.cpp:408] L3_b18_cbr1_conv -> L3_b18_cbr1_conv_top\nI0821 08:59:39.078416 32364 net.cpp:150] Setting up L3_b18_cbr1_conv\nI0821 08:59:39.078447 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.078457 32364 net.cpp:165] Memory required for data: 2282702000\nI0821 08:59:39.078475 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_bn\nI0821 08:59:39.078496 32364 net.cpp:100] Creating Layer L3_b18_cbr1_bn\nI0821 08:59:39.078510 32364 net.cpp:434] L3_b18_cbr1_bn <- L3_b18_cbr1_conv_top\nI0821 08:59:39.078527 32364 net.cpp:408] L3_b18_cbr1_bn -> L3_b18_cbr1_bn_top\nI0821 08:59:39.078845 32364 net.cpp:150] Setting up L3_b18_cbr1_bn\nI0821 08:59:39.078863 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.078873 32364 net.cpp:165] Memory required for data: 2284340400\nI0821 08:59:39.078896 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0821 08:59:39.078912 32364 net.cpp:100] Creating Layer L3_b18_cbr1_scale\nI0821 08:59:39.078924 32364 net.cpp:434] L3_b18_cbr1_scale <- L3_b18_cbr1_bn_top\nI0821 08:59:39.078939 32364 net.cpp:395] L3_b18_cbr1_scale -> L3_b18_cbr1_bn_top (in-place)\nI0821 08:59:39.079041 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0821 08:59:39.079248 32364 net.cpp:150] Setting up L3_b18_cbr1_scale\nI0821 08:59:39.079268 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.079278 32364 net.cpp:165] Memory required for data: 2285978800\nI0821 08:59:39.079296 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_relu\nI0821 08:59:39.079311 32364 net.cpp:100] Creating Layer L3_b18_cbr1_relu\nI0821 08:59:39.079324 32364 net.cpp:434] L3_b18_cbr1_relu <- L3_b18_cbr1_bn_top\nI0821 08:59:39.079344 32364 net.cpp:395] L3_b18_cbr1_relu -> L3_b18_cbr1_bn_top (in-place)\nI0821 08:59:39.079365 32364 net.cpp:150] Setting up L3_b18_cbr1_relu\nI0821 08:59:39.079378 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.079388 32364 net.cpp:165] Memory required for data: 2287617200\nI0821 08:59:39.079398 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr2_conv\nI0821 08:59:39.079422 32364 net.cpp:100] Creating Layer L3_b18_cbr2_conv\nI0821 08:59:39.079437 32364 net.cpp:434] L3_b18_cbr2_conv <- L3_b18_cbr1_bn_top\nI0821 08:59:39.079453 32364 net.cpp:408] L3_b18_cbr2_conv -> L3_b18_cbr2_conv_top\nI0821 08:59:39.080533 32364 net.cpp:150] Setting up L3_b18_cbr2_conv\nI0821 08:59:39.080552 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.080561 32364 net.cpp:165] Memory required for data: 2289255600\nI0821 08:59:39.080579 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr2_bn\nI0821 08:59:39.080600 32364 net.cpp:100] Creating Layer L3_b18_cbr2_bn\nI0821 08:59:39.080613 32364 net.cpp:434] L3_b18_cbr2_bn <- L3_b18_cbr2_conv_top\nI0821 08:59:39.080631 32364 net.cpp:408] L3_b18_cbr2_bn -> L3_b18_cbr2_bn_top\nI0821 08:59:39.080938 32364 net.cpp:150] Setting up L3_b18_cbr2_bn\nI0821 08:59:39.080958 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.080967 32364 net.cpp:165] Memory required for data: 2290894000\nI0821 08:59:39.080989 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0821 08:59:39.081009 32364 net.cpp:100] Creating Layer L3_b18_cbr2_scale\nI0821 08:59:39.081022 32364 net.cpp:434] L3_b18_cbr2_scale <- L3_b18_cbr2_bn_top\nI0821 08:59:39.081038 32364 net.cpp:395] L3_b18_cbr2_scale -> L3_b18_cbr2_bn_top (in-place)\nI0821 08:59:39.081140 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0821 08:59:39.081344 32364 net.cpp:150] Setting up L3_b18_cbr2_scale\nI0821 08:59:39.081363 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.081372 32364 net.cpp:165] Memory required for data: 2292532400\nI0821 08:59:39.081390 32364 layer_factory.hpp:77] Creating layer L3_b18_sum_eltwise\nI0821 08:59:39.081413 32364 net.cpp:100] Creating Layer L3_b18_sum_eltwise\nI0821 08:59:39.081426 32364 net.cpp:434] L3_b18_sum_eltwise <- L3_b18_cbr2_bn_top\nI0821 08:59:39.081440 32364 net.cpp:434] L3_b18_sum_eltwise <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0821 08:59:39.081461 32364 net.cpp:408] L3_b18_sum_eltwise -> L3_b18_sum_eltwise_top\nI0821 08:59:39.081519 32364 net.cpp:150] Setting up L3_b18_sum_eltwise\nI0821 08:59:39.081537 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.081555 32364 net.cpp:165] Memory required for data: 2294170800\nI0821 08:59:39.081567 32364 layer_factory.hpp:77] Creating layer L3_b18_relu\nI0821 08:59:39.081585 32364 net.cpp:100] Creating Layer L3_b18_relu\nI0821 08:59:39.081598 32364 net.cpp:434] L3_b18_relu <- L3_b18_sum_eltwise_top\nI0821 08:59:39.081614 32364 net.cpp:395] L3_b18_relu -> L3_b18_sum_eltwise_top (in-place)\nI0821 08:59:39.081631 32364 net.cpp:150] Setting up L3_b18_relu\nI0821 08:59:39.081646 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.081655 32364 net.cpp:165] Memory required for data: 2295809200\nI0821 08:59:39.081665 32364 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:59:39.081681 32364 net.cpp:100] Creating Layer post_pool\nI0821 08:59:39.081692 32364 net.cpp:434] post_pool <- L3_b18_sum_eltwise_top\nI0821 08:59:39.081709 32364 net.cpp:408] post_pool -> post_pool\nI0821 08:59:39.081773 32364 net.cpp:150] Setting up post_pool\nI0821 08:59:39.081792 32364 net.cpp:157] Top shape: 100 64 1 1 (6400)\nI0821 08:59:39.081802 32364 net.cpp:165] Memory required for data: 2295834800\nI0821 08:59:39.081812 32364 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:59:39.081928 32364 net.cpp:100] Creating Layer post_FC\nI0821 08:59:39.081945 32364 net.cpp:434] post_FC <- post_pool\nI0821 08:59:39.081964 32364 net.cpp:408] post_FC -> post_FC_top\nI0821 08:59:39.082283 32364 net.cpp:150] Setting up post_FC\nI0821 08:59:39.082303 32364 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:39.082312 32364 net.cpp:165] Memory required for data: 2295838800\nI0821 08:59:39.082331 32364 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:59:39.082346 32364 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:59:39.082358 32364 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:59:39.082373 32364 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:59:39.082398 32364 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:59:39.082484 32364 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:59:39.082505 32364 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:39.082518 32364 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:39.082528 32364 net.cpp:165] Memory required for data: 2295846800\nI0821 08:59:39.082538 32364 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:59:39.082602 32364 net.cpp:100] Creating Layer accuracy\nI0821 08:59:39.082619 32364 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:59:39.082633 32364 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:59:39.082649 32364 net.cpp:408] accuracy -> accuracy\nI0821 08:59:39.082720 32364 net.cpp:150] Setting up accuracy\nI0821 08:59:39.082738 32364 net.cpp:157] Top shape: (1)\nI0821 08:59:39.082748 32364 net.cpp:165] Memory required for data: 2295846804\nI0821 08:59:39.082759 32364 layer_factory.hpp:77] Creating layer loss\nI0821 08:59:39.082774 32364 net.cpp:100] Creating Layer loss\nI0821 08:59:39.082787 32364 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:59:39.082799 32364 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:59:39.082815 32364 net.cpp:408] loss -> loss\nI0821 08:59:39.084439 32364 layer_factory.hpp:77] Creating layer loss\nI0821 08:59:39.085760 32364 net.cpp:150] Setting up loss\nI0821 08:59:39.085782 32364 net.cpp:157] Top shape: (1)\nI0821 08:59:39.085790 32364 net.cpp:160]     with loss weight 1\nI0821 08:59:39.085897 32364 net.cpp:165] Memory required for data: 2295846808\nI0821 08:59:39.085912 32364 net.cpp:226] loss needs backward computation.\nI0821 08:59:39.085924 32364 net.cpp:228] accuracy does not need backward computation.\nI0821 08:59:39.085935 32364 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:59:39.085945 32364 net.cpp:226] post_FC needs backward computation.\nI0821 08:59:39.085954 32364 net.cpp:226] post_pool needs backward computation.\nI0821 08:59:39.085964 32364 net.cpp:226] L3_b18_relu needs backward computation.\nI0821 08:59:39.085973 32364 net.cpp:226] L3_b18_sum_eltwise needs backward computation.\nI0821 08:59:39.085992 32364 net.cpp:226] L3_b18_cbr2_scale needs backward computation.\nI0821 08:59:39.086002 32364 net.cpp:226] L3_b18_cbr2_bn needs backward computation.\nI0821 08:59:39.086012 32364 net.cpp:226] L3_b18_cbr2_conv needs backward computation.\nI0821 08:59:39.086022 32364 net.cpp:226] L3_b18_cbr1_relu needs backward computation.\nI0821 08:59:39.086032 32364 net.cpp:226] L3_b18_cbr1_scale needs backward computation.\nI0821 08:59:39.086042 32364 net.cpp:226] L3_b18_cbr1_bn needs backward computation.\nI0821 08:59:39.086052 32364 net.cpp:226] L3_b18_cbr1_conv needs backward computation.\nI0821 08:59:39.086062 32364 net.cpp:226] L3_b17_sum_eltwise_top_L3_b17_relu_0_split needs backward computation.\nI0821 08:59:39.086073 32364 net.cpp:226] L3_b17_relu needs backward computation.\nI0821 08:59:39.086083 32364 net.cpp:226] L3_b17_sum_eltwise needs backward computation.\nI0821 08:59:39.086093 32364 net.cpp:226] L3_b17_cbr2_scale needs backward computation.\nI0821 08:59:39.086103 32364 net.cpp:226] L3_b17_cbr2_bn needs backward computation.\nI0821 08:59:39.086113 32364 net.cpp:226] L3_b17_cbr2_conv needs backward computation.\nI0821 08:59:39.086123 32364 net.cpp:226] L3_b17_cbr1_relu needs backward computation.\nI0821 08:59:39.086133 32364 net.cpp:226] L3_b17_cbr1_scale needs backward computation.\nI0821 08:59:39.086143 32364 net.cpp:226] L3_b17_cbr1_bn needs backward computation.\nI0821 08:59:39.086161 32364 net.cpp:226] L3_b17_cbr1_conv needs backward computation.\nI0821 08:59:39.086172 32364 net.cpp:226] L3_b16_sum_eltwise_top_L3_b16_relu_0_split needs backward computation.\nI0821 08:59:39.086184 32364 net.cpp:226] L3_b16_relu needs backward computation.\nI0821 08:59:39.086192 32364 net.cpp:226] L3_b16_sum_eltwise needs backward computation.\nI0821 08:59:39.086203 32364 net.cpp:226] L3_b16_cbr2_scale needs backward computation.\nI0821 08:59:39.086212 32364 net.cpp:226] L3_b16_cbr2_bn needs backward computation.\nI0821 08:59:39.086222 32364 net.cpp:226] L3_b16_cbr2_conv needs backward computation.\nI0821 08:59:39.086233 32364 net.cpp:226] L3_b16_cbr1_relu needs backward computation.\nI0821 08:59:39.086243 32364 net.cpp:226] L3_b16_cbr1_scale needs backward computation.\nI0821 08:59:39.086252 32364 net.cpp:226] L3_b16_cbr1_bn needs backward computation.\nI0821 08:59:39.086262 32364 net.cpp:226] L3_b16_cbr1_conv needs backward computation.\nI0821 08:59:39.086273 32364 net.cpp:226] L3_b15_sum_eltwise_top_L3_b15_relu_0_split needs backward computation.\nI0821 08:59:39.086283 32364 net.cpp:226] L3_b15_relu needs backward computation.\nI0821 08:59:39.086293 32364 net.cpp:226] L3_b15_sum_eltwise needs backward computation.\nI0821 08:59:39.086303 32364 net.cpp:226] L3_b15_cbr2_scale needs backward computation.\nI0821 08:59:39.086313 32364 net.cpp:226] L3_b15_cbr2_bn needs backward computation.\nI0821 08:59:39.086323 32364 net.cpp:226] L3_b15_cbr2_conv needs backward computation.\nI0821 08:59:39.086333 32364 net.cpp:226] L3_b15_cbr1_relu needs backward computation.\nI0821 08:59:39.086343 32364 net.cpp:226] L3_b15_cbr1_scale needs backward computation.\nI0821 08:59:39.086352 32364 net.cpp:226] L3_b15_cbr1_bn needs backward computation.\nI0821 08:59:39.086362 32364 net.cpp:226] L3_b15_cbr1_conv needs backward computation.\nI0821 08:59:39.086372 32364 net.cpp:226] L3_b14_sum_eltwise_top_L3_b14_relu_0_split needs backward computation.\nI0821 08:59:39.086382 32364 net.cpp:226] L3_b14_relu needs backward computation.\nI0821 08:59:39.086392 32364 net.cpp:226] L3_b14_sum_eltwise needs backward computation.\nI0821 08:59:39.086402 32364 net.cpp:226] L3_b14_cbr2_scale needs backward computation.\nI0821 08:59:39.086412 32364 net.cpp:226] L3_b14_cbr2_bn needs backward computation.\nI0821 08:59:39.086422 32364 net.cpp:226] L3_b14_cbr2_conv needs backward computation.\nI0821 08:59:39.086433 32364 net.cpp:226] L3_b14_cbr1_relu needs backward computation.\nI0821 08:59:39.086443 32364 net.cpp:226] L3_b14_cbr1_scale needs backward computation.\nI0821 08:59:39.086453 32364 net.cpp:226] L3_b14_cbr1_bn needs backward computation.\nI0821 08:59:39.086472 32364 net.cpp:226] L3_b14_cbr1_conv needs backward computation.\nI0821 08:59:39.086483 32364 net.cpp:226] L3_b13_sum_eltwise_top_L3_b13_relu_0_split needs backward computation.\nI0821 08:59:39.086493 32364 net.cpp:226] L3_b13_relu needs backward computation.\nI0821 08:59:39.086503 32364 net.cpp:226] L3_b13_sum_eltwise needs backward computation.\nI0821 08:59:39.086522 32364 net.cpp:226] L3_b13_cbr2_scale needs backward computation.\nI0821 08:59:39.086535 32364 net.cpp:226] L3_b13_cbr2_bn needs backward computation.\nI0821 08:59:39.086546 32364 net.cpp:226] L3_b13_cbr2_conv needs backward computation.\nI0821 08:59:39.086556 32364 net.cpp:226] L3_b13_cbr1_relu needs backward computation.\nI0821 08:59:39.086565 32364 net.cpp:226] L3_b13_cbr1_scale needs backward computation.\nI0821 08:59:39.086575 32364 net.cpp:226] L3_b13_cbr1_bn needs backward computation.\nI0821 08:59:39.086585 32364 net.cpp:226] L3_b13_cbr1_conv needs backward computation.\nI0821 08:59:39.086596 32364 net.cpp:226] L3_b12_sum_eltwise_top_L3_b12_relu_0_split needs backward computation.\nI0821 08:59:39.086606 32364 net.cpp:226] L3_b12_relu needs backward computation.\nI0821 08:59:39.086616 32364 net.cpp:226] L3_b12_sum_eltwise needs backward computation.\nI0821 08:59:39.086628 32364 net.cpp:226] L3_b12_cbr2_scale needs backward computation.\nI0821 08:59:39.086638 32364 net.cpp:226] L3_b12_cbr2_bn needs backward computation.\nI0821 08:59:39.086648 32364 net.cpp:226] L3_b12_cbr2_conv needs backward computation.\nI0821 08:59:39.086658 32364 net.cpp:226] L3_b12_cbr1_relu needs backward computation.\nI0821 08:59:39.086668 32364 net.cpp:226] L3_b12_cbr1_scale needs backward computation.\nI0821 08:59:39.086678 32364 net.cpp:226] L3_b12_cbr1_bn needs backward computation.\nI0821 08:59:39.086688 32364 net.cpp:226] L3_b12_cbr1_conv needs backward computation.\nI0821 08:59:39.086699 32364 net.cpp:226] L3_b11_sum_eltwise_top_L3_b11_relu_0_split needs backward computation.\nI0821 08:59:39.086709 32364 net.cpp:226] L3_b11_relu needs backward computation.\nI0821 08:59:39.086719 32364 net.cpp:226] L3_b11_sum_eltwise needs backward computation.\nI0821 08:59:39.086730 32364 net.cpp:226] L3_b11_cbr2_scale needs backward computation.\nI0821 08:59:39.086740 32364 net.cpp:226] L3_b11_cbr2_bn needs backward computation.\nI0821 08:59:39.086751 32364 net.cpp:226] L3_b11_cbr2_conv needs backward computation.\nI0821 08:59:39.086761 32364 net.cpp:226] L3_b11_cbr1_relu needs backward computation.\nI0821 08:59:39.086771 32364 net.cpp:226] L3_b11_cbr1_scale needs backward computation.\nI0821 08:59:39.086781 32364 net.cpp:226] L3_b11_cbr1_bn needs backward computation.\nI0821 08:59:39.086791 32364 net.cpp:226] L3_b11_cbr1_conv needs backward computation.\nI0821 08:59:39.086802 32364 net.cpp:226] L3_b10_sum_eltwise_top_L3_b10_relu_0_split needs backward computation.\nI0821 08:59:39.086812 32364 net.cpp:226] L3_b10_relu needs backward computation.\nI0821 08:59:39.086822 32364 net.cpp:226] L3_b10_sum_eltwise needs backward computation.\nI0821 08:59:39.086833 32364 net.cpp:226] L3_b10_cbr2_scale needs backward computation.\nI0821 08:59:39.086843 32364 net.cpp:226] L3_b10_cbr2_bn needs backward computation.\nI0821 08:59:39.086853 32364 net.cpp:226] L3_b10_cbr2_conv needs backward computation.\nI0821 08:59:39.086863 32364 net.cpp:226] L3_b10_cbr1_relu needs backward computation.\nI0821 08:59:39.086874 32364 net.cpp:226] L3_b10_cbr1_scale needs backward computation.\nI0821 08:59:39.086884 32364 net.cpp:226] L3_b10_cbr1_bn needs backward computation.\nI0821 08:59:39.086894 32364 net.cpp:226] L3_b10_cbr1_conv needs backward computation.\nI0821 08:59:39.086905 32364 net.cpp:226] L3_b9_sum_eltwise_top_L3_b9_relu_0_split needs backward computation.\nI0821 08:59:39.086915 32364 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:59:39.086925 32364 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:59:39.086935 32364 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:59:39.086944 32364 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:59:39.086956 32364 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:59:39.086974 32364 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:59:39.086985 32364 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:59:39.086995 32364 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:59:39.087005 32364 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:59:39.087016 32364 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:59:39.087028 32364 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:59:39.087038 32364 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:59:39.087049 32364 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:59:39.087057 32364 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:59:39.087067 32364 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:59:39.087083 32364 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:59:39.087093 32364 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:59:39.087103 32364 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:59:39.087113 32364 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:59:39.087126 32364 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:59:39.087136 32364 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:59:39.087152 32364 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:59:39.087164 32364 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:59:39.087174 32364 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:59:39.087184 32364 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:59:39.087195 32364 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:59:39.087204 32364 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:59:39.087214 32364 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:59:39.087225 32364 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:59:39.087235 32364 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:59:39.087247 32364 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:59:39.087257 32364 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:59:39.087268 32364 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:59:39.087278 32364 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:59:39.087290 32364 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:59:39.087299 32364 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:59:39.087309 32364 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:59:39.087318 32364 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:59:39.087329 32364 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:59:39.087339 32364 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:59:39.087349 32364 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:59:39.087359 32364 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:59:39.087370 32364 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:59:39.087380 32364 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:59:39.087390 32364 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:59:39.087401 32364 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:59:39.087411 32364 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:59:39.087421 32364 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:59:39.087431 32364 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:59:39.087442 32364 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:59:39.087453 32364 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:59:39.087465 32364 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:59:39.087482 32364 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:59:39.087493 32364 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:59:39.087505 32364 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:59:39.087517 32364 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:59:39.087527 32364 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:59:39.087537 32364 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:59:39.087546 32364 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:59:39.087558 32364 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:59:39.087568 32364 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:59:39.087577 32364 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:59:39.087589 32364 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:59:39.087599 32364 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:59:39.087610 32364 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:59:39.087620 32364 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:59:39.087630 32364 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:59:39.087638 32364 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:59:39.087649 32364 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:59:39.087661 32364 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:59:39.087671 32364 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:59:39.087682 32364 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:59:39.087693 32364 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:59:39.087703 32364 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:59:39.087714 32364 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:59:39.087725 32364 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:59:39.087735 32364 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:59:39.087744 32364 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:59:39.087754 32364 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:59:39.087766 32364 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:59:39.087776 32364 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:59:39.087795 32364 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:59:39.087805 32364 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:59:39.087815 32364 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:59:39.087827 32364 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:59:39.087839 32364 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:59:39.087851 32364 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:59:39.087860 32364 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:59:39.087872 32364 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:59:39.087882 32364 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:59:39.087891 32364 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:59:39.087903 32364 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:59:39.087914 32364 net.cpp:226] L2_b18_sum_eltwise_top_L2_b18_relu_0_split needs backward computation.\nI0821 08:59:39.087926 32364 net.cpp:226] L2_b18_relu needs backward computation.\nI0821 08:59:39.087936 32364 net.cpp:226] L2_b18_sum_eltwise needs backward computation.\nI0821 08:59:39.087949 32364 net.cpp:226] L2_b18_cbr2_scale needs backward computation.\nI0821 08:59:39.087959 32364 net.cpp:226] L2_b18_cbr2_bn needs backward computation.\nI0821 08:59:39.087970 32364 net.cpp:226] L2_b18_cbr2_conv needs backward computation.\nI0821 08:59:39.087980 32364 net.cpp:226] L2_b18_cbr1_relu needs backward computation.\nI0821 08:59:39.088001 32364 net.cpp:226] L2_b18_cbr1_scale needs backward computation.\nI0821 08:59:39.088011 32364 net.cpp:226] L2_b18_cbr1_bn needs backward computation.\nI0821 08:59:39.088023 32364 net.cpp:226] L2_b18_cbr1_conv needs backward computation.\nI0821 08:59:39.088035 32364 net.cpp:226] L2_b17_sum_eltwise_top_L2_b17_relu_0_split needs backward computation.\nI0821 08:59:39.088047 32364 net.cpp:226] L2_b17_relu needs backward computation.\nI0821 08:59:39.088057 32364 net.cpp:226] L2_b17_sum_eltwise needs backward computation.\nI0821 08:59:39.088068 32364 net.cpp:226] L2_b17_cbr2_scale needs backward computation.\nI0821 08:59:39.088079 32364 net.cpp:226] L2_b17_cbr2_bn needs backward computation.\nI0821 08:59:39.088089 32364 net.cpp:226] L2_b17_cbr2_conv needs backward computation.\nI0821 08:59:39.088099 32364 net.cpp:226] L2_b17_cbr1_relu needs backward computation.\nI0821 08:59:39.088110 32364 net.cpp:226] L2_b17_cbr1_scale needs backward computation.\nI0821 08:59:39.088119 32364 net.cpp:226] L2_b17_cbr1_bn needs backward computation.\nI0821 08:59:39.088130 32364 net.cpp:226] L2_b17_cbr1_conv needs backward computation.\nI0821 08:59:39.088142 32364 net.cpp:226] L2_b16_sum_eltwise_top_L2_b16_relu_0_split needs backward computation.\nI0821 08:59:39.088161 32364 net.cpp:226] L2_b16_relu needs backward computation.\nI0821 08:59:39.088172 32364 net.cpp:226] L2_b16_sum_eltwise needs backward computation.\nI0821 08:59:39.088183 32364 net.cpp:226] L2_b16_cbr2_scale needs backward computation.\nI0821 08:59:39.088193 32364 net.cpp:226] L2_b16_cbr2_bn needs backward computation.\nI0821 08:59:39.088205 32364 net.cpp:226] L2_b16_cbr2_conv needs backward computation.\nI0821 08:59:39.088217 32364 net.cpp:226] L2_b16_cbr1_relu needs backward computation.\nI0821 08:59:39.088227 32364 net.cpp:226] L2_b16_cbr1_scale needs backward computation.\nI0821 08:59:39.088237 32364 net.cpp:226] L2_b16_cbr1_bn needs backward computation.\nI0821 08:59:39.088246 32364 net.cpp:226] L2_b16_cbr1_conv needs backward computation.\nI0821 08:59:39.088258 32364 net.cpp:226] L2_b15_sum_eltwise_top_L2_b15_relu_0_split needs backward computation.\nI0821 08:59:39.088268 32364 net.cpp:226] L2_b15_relu needs backward computation.\nI0821 08:59:39.088279 32364 net.cpp:226] L2_b15_sum_eltwise needs backward computation.\nI0821 08:59:39.088290 32364 net.cpp:226] L2_b15_cbr2_scale needs backward computation.\nI0821 08:59:39.088300 32364 net.cpp:226] L2_b15_cbr2_bn needs backward computation.\nI0821 08:59:39.088311 32364 net.cpp:226] L2_b15_cbr2_conv needs backward computation.\nI0821 08:59:39.088322 32364 net.cpp:226] L2_b15_cbr1_relu needs backward computation.\nI0821 08:59:39.088333 32364 net.cpp:226] L2_b15_cbr1_scale needs backward computation.\nI0821 08:59:39.088343 32364 net.cpp:226] L2_b15_cbr1_bn needs backward computation.\nI0821 08:59:39.088353 32364 net.cpp:226] L2_b15_cbr1_conv needs backward computation.\nI0821 08:59:39.088366 32364 net.cpp:226] L2_b14_sum_eltwise_top_L2_b14_relu_0_split needs backward computation.\nI0821 08:59:39.088376 32364 net.cpp:226] L2_b14_relu needs backward computation.\nI0821 08:59:39.088387 32364 net.cpp:226] L2_b14_sum_eltwise needs backward computation.\nI0821 08:59:39.088398 32364 net.cpp:226] L2_b14_cbr2_scale needs backward computation.\nI0821 08:59:39.088408 32364 net.cpp:226] L2_b14_cbr2_bn needs backward computation.\nI0821 08:59:39.088419 32364 net.cpp:226] L2_b14_cbr2_conv needs backward computation.\nI0821 08:59:39.088430 32364 net.cpp:226] L2_b14_cbr1_relu needs backward computation.\nI0821 08:59:39.088440 32364 net.cpp:226] L2_b14_cbr1_scale needs backward computation.\nI0821 08:59:39.088451 32364 net.cpp:226] L2_b14_cbr1_bn needs backward computation.\nI0821 08:59:39.088462 32364 net.cpp:226] L2_b14_cbr1_conv needs backward computation.\nI0821 08:59:39.088474 32364 net.cpp:226] L2_b13_sum_eltwise_top_L2_b13_relu_0_split needs backward computation.\nI0821 08:59:39.088485 32364 net.cpp:226] L2_b13_relu needs backward computation.\nI0821 08:59:39.088495 32364 net.cpp:226] L2_b13_sum_eltwise needs backward computation.\nI0821 08:59:39.088517 32364 net.cpp:226] L2_b13_cbr2_scale needs backward computation.\nI0821 08:59:39.088529 32364 net.cpp:226] L2_b13_cbr2_bn needs backward computation.\nI0821 08:59:39.088541 32364 net.cpp:226] L2_b13_cbr2_conv needs backward computation.\nI0821 08:59:39.088551 32364 net.cpp:226] L2_b13_cbr1_relu needs backward computation.\nI0821 08:59:39.088562 32364 net.cpp:226] L2_b13_cbr1_scale needs backward computation.\nI0821 08:59:39.088572 32364 net.cpp:226] L2_b13_cbr1_bn needs backward computation.\nI0821 08:59:39.088583 32364 net.cpp:226] L2_b13_cbr1_conv needs backward computation.\nI0821 08:59:39.088594 32364 net.cpp:226] L2_b12_sum_eltwise_top_L2_b12_relu_0_split needs backward computation.\nI0821 08:59:39.088606 32364 net.cpp:226] L2_b12_relu needs backward computation.\nI0821 08:59:39.088618 32364 net.cpp:226] L2_b12_sum_eltwise needs backward computation.\nI0821 08:59:39.088629 32364 net.cpp:226] L2_b12_cbr2_scale needs backward computation.\nI0821 08:59:39.088640 32364 net.cpp:226] L2_b12_cbr2_bn needs backward computation.\nI0821 08:59:39.088651 32364 net.cpp:226] L2_b12_cbr2_conv needs backward computation.\nI0821 08:59:39.088662 32364 net.cpp:226] L2_b12_cbr1_relu needs backward computation.\nI0821 08:59:39.088672 32364 net.cpp:226] L2_b12_cbr1_scale needs backward computation.\nI0821 08:59:39.088683 32364 net.cpp:226] L2_b12_cbr1_bn needs backward computation.\nI0821 08:59:39.088696 32364 net.cpp:226] L2_b12_cbr1_conv needs backward computation.\nI0821 08:59:39.088706 32364 net.cpp:226] L2_b11_sum_eltwise_top_L2_b11_relu_0_split needs backward computation.\nI0821 08:59:39.088717 32364 net.cpp:226] L2_b11_relu needs backward computation.\nI0821 08:59:39.088728 32364 net.cpp:226] L2_b11_sum_eltwise needs backward computation.\nI0821 08:59:39.088739 32364 net.cpp:226] L2_b11_cbr2_scale needs backward computation.\nI0821 08:59:39.088750 32364 net.cpp:226] L2_b11_cbr2_bn needs backward computation.\nI0821 08:59:39.088762 32364 net.cpp:226] L2_b11_cbr2_conv needs backward computation.\nI0821 08:59:39.088773 32364 net.cpp:226] L2_b11_cbr1_relu needs backward computation.\nI0821 08:59:39.088783 32364 net.cpp:226] L2_b11_cbr1_scale needs backward computation.\nI0821 08:59:39.088793 32364 net.cpp:226] L2_b11_cbr1_bn needs backward computation.\nI0821 08:59:39.088804 32364 net.cpp:226] L2_b11_cbr1_conv needs backward computation.\nI0821 08:59:39.088815 32364 net.cpp:226] L2_b10_sum_eltwise_top_L2_b10_relu_0_split needs backward computation.\nI0821 08:59:39.088826 32364 net.cpp:226] L2_b10_relu needs backward computation.\nI0821 08:59:39.088836 32364 net.cpp:226] L2_b10_sum_eltwise needs backward computation.\nI0821 08:59:39.088850 32364 net.cpp:226] L2_b10_cbr2_scale needs backward computation.\nI0821 08:59:39.088860 32364 net.cpp:226] L2_b10_cbr2_bn needs backward computation.\nI0821 08:59:39.088871 32364 net.cpp:226] L2_b10_cbr2_conv needs backward computation.\nI0821 08:59:39.088883 32364 net.cpp:226] L2_b10_cbr1_relu needs backward computation.\nI0821 08:59:39.088893 32364 net.cpp:226] L2_b10_cbr1_scale needs backward computation.\nI0821 08:59:39.088904 32364 net.cpp:226] L2_b10_cbr1_bn needs backward computation.\nI0821 08:59:39.088917 32364 net.cpp:226] L2_b10_cbr1_conv needs backward computation.\nI0821 08:59:39.088927 32364 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:59:39.088937 32364 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:59:39.088948 32364 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:59:39.088960 32364 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:59:39.088970 32364 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:59:39.088981 32364 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:59:39.088992 32364 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:59:39.089002 32364 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:59:39.089012 32364 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:59:39.089023 32364 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:59:39.089046 32364 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:59:39.089057 32364 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:59:39.089068 32364 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:59:39.089082 32364 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:59:39.089092 32364 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:59:39.089104 32364 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:59:39.089114 32364 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:59:39.089124 32364 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:59:39.089134 32364 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:59:39.089151 32364 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:59:39.089164 32364 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:59:39.089185 32364 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:59:39.089197 32364 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:59:39.089210 32364 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:59:39.089222 32364 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:59:39.089233 32364 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:59:39.089244 32364 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:59:39.089254 32364 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:59:39.089265 32364 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:59:39.089277 32364 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:59:39.089289 32364 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:59:39.089300 32364 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:59:39.089311 32364 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:59:39.089323 32364 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:59:39.089334 32364 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:59:39.089345 32364 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:59:39.089356 32364 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:59:39.089366 32364 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:59:39.089376 32364 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:59:39.089388 32364 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:59:39.089399 32364 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:59:39.089411 32364 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:59:39.089421 32364 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:59:39.089434 32364 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:59:39.089445 32364 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:59:39.089457 32364 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:59:39.089468 32364 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:59:39.089478 32364 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:59:39.089488 32364 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:59:39.089499 32364 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:59:39.089511 32364 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:59:39.089521 32364 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:59:39.089532 32364 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:59:39.089545 32364 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:59:39.089556 32364 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:59:39.089567 32364 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:59:39.089578 32364 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:59:39.089597 32364 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:59:39.089608 32364 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:59:39.089619 32364 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:59:39.089630 32364 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:59:39.089643 32364 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:59:39.089653 32364 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:59:39.089665 32364 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:59:39.089675 32364 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:59:39.089694 32364 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:59:39.089705 32364 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:59:39.089715 32364 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:59:39.089725 32364 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:59:39.089736 32364 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:59:39.089748 32364 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:59:39.089759 32364 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:59:39.089769 32364 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:59:39.089782 32364 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:59:39.089793 32364 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:59:39.089804 32364 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:59:39.089815 32364 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:59:39.089828 32364 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:59:39.089838 32364 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:59:39.089848 32364 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:59:39.089860 32364 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:59:39.089872 32364 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:59:39.089885 32364 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:59:39.089895 32364 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:59:39.089906 32364 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:59:39.089920 32364 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:59:39.089931 32364 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:59:39.089941 32364 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:59:39.089953 32364 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:59:39.089964 32364 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:59:39.089975 32364 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:59:39.089987 32364 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:59:39.089998 32364 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:59:39.090010 32364 net.cpp:226] L1_b18_sum_eltwise_top_L1_b18_relu_0_split needs backward computation.\nI0821 08:59:39.090021 32364 net.cpp:226] L1_b18_relu needs backward computation.\nI0821 08:59:39.090032 32364 net.cpp:226] L1_b18_sum_eltwise needs backward computation.\nI0821 08:59:39.090044 32364 net.cpp:226] L1_b18_cbr2_scale needs backward computation.\nI0821 08:59:39.090055 32364 net.cpp:226] L1_b18_cbr2_bn needs backward computation.\nI0821 08:59:39.090066 32364 net.cpp:226] L1_b18_cbr2_conv needs backward computation.\nI0821 08:59:39.090077 32364 net.cpp:226] L1_b18_cbr1_relu needs backward computation.\nI0821 08:59:39.090088 32364 net.cpp:226] L1_b18_cbr1_scale needs backward computation.\nI0821 08:59:39.090100 32364 net.cpp:226] L1_b18_cbr1_bn needs backward computation.\nI0821 08:59:39.090111 32364 net.cpp:226] L1_b18_cbr1_conv needs backward computation.\nI0821 08:59:39.090121 32364 net.cpp:226] L1_b17_sum_eltwise_top_L1_b17_relu_0_split needs backward computation.\nI0821 08:59:39.090142 32364 net.cpp:226] L1_b17_relu needs backward computation.\nI0821 08:59:39.090159 32364 net.cpp:226] L1_b17_sum_eltwise needs backward computation.\nI0821 08:59:39.090173 32364 net.cpp:226] L1_b17_cbr2_scale needs backward computation.\nI0821 08:59:39.090184 32364 net.cpp:226] L1_b17_cbr2_bn needs backward computation.\nI0821 08:59:39.090196 32364 net.cpp:226] L1_b17_cbr2_conv needs backward computation.\nI0821 08:59:39.090209 32364 net.cpp:226] L1_b17_cbr1_relu needs backward computation.\nI0821 08:59:39.090219 32364 net.cpp:226] L1_b17_cbr1_scale needs backward computation.\nI0821 08:59:39.090230 32364 net.cpp:226] L1_b17_cbr1_bn needs backward computation.\nI0821 08:59:39.090241 32364 net.cpp:226] L1_b17_cbr1_conv needs backward computation.\nI0821 08:59:39.090253 32364 net.cpp:226] L1_b16_sum_eltwise_top_L1_b16_relu_0_split needs backward computation.\nI0821 08:59:39.090265 32364 net.cpp:226] L1_b16_relu needs backward computation.\nI0821 08:59:39.090276 32364 net.cpp:226] L1_b16_sum_eltwise needs backward computation.\nI0821 08:59:39.090288 32364 net.cpp:226] L1_b16_cbr2_scale needs backward computation.\nI0821 08:59:39.090298 32364 net.cpp:226] L1_b16_cbr2_bn needs backward computation.\nI0821 08:59:39.090311 32364 net.cpp:226] L1_b16_cbr2_conv needs backward computation.\nI0821 08:59:39.090322 32364 net.cpp:226] L1_b16_cbr1_relu needs backward computation.\nI0821 08:59:39.090332 32364 net.cpp:226] L1_b16_cbr1_scale needs backward computation.\nI0821 08:59:39.090342 32364 net.cpp:226] L1_b16_cbr1_bn needs backward computation.\nI0821 08:59:39.090354 32364 net.cpp:226] L1_b16_cbr1_conv needs backward computation.\nI0821 08:59:39.090366 32364 net.cpp:226] L1_b15_sum_eltwise_top_L1_b15_relu_0_split needs backward computation.\nI0821 08:59:39.090378 32364 net.cpp:226] L1_b15_relu needs backward computation.\nI0821 08:59:39.090389 32364 net.cpp:226] L1_b15_sum_eltwise needs backward computation.\nI0821 08:59:39.090400 32364 net.cpp:226] L1_b15_cbr2_scale needs backward computation.\nI0821 08:59:39.090411 32364 net.cpp:226] L1_b15_cbr2_bn needs backward computation.\nI0821 08:59:39.090422 32364 net.cpp:226] L1_b15_cbr2_conv needs backward computation.\nI0821 08:59:39.090433 32364 net.cpp:226] L1_b15_cbr1_relu needs backward computation.\nI0821 08:59:39.090445 32364 net.cpp:226] L1_b15_cbr1_scale needs backward computation.\nI0821 08:59:39.090456 32364 net.cpp:226] L1_b15_cbr1_bn needs backward computation.\nI0821 08:59:39.090466 32364 net.cpp:226] L1_b15_cbr1_conv needs backward computation.\nI0821 08:59:39.090477 32364 net.cpp:226] L1_b14_sum_eltwise_top_L1_b14_relu_0_split needs backward computation.\nI0821 08:59:39.090489 32364 net.cpp:226] L1_b14_relu needs backward computation.\nI0821 08:59:39.090500 32364 net.cpp:226] L1_b14_sum_eltwise needs backward computation.\nI0821 08:59:39.090512 32364 net.cpp:226] L1_b14_cbr2_scale needs backward computation.\nI0821 08:59:39.090523 32364 net.cpp:226] L1_b14_cbr2_bn needs backward computation.\nI0821 08:59:39.090533 32364 net.cpp:226] L1_b14_cbr2_conv needs backward computation.\nI0821 08:59:39.090543 32364 net.cpp:226] L1_b14_cbr1_relu needs backward computation.\nI0821 08:59:39.090554 32364 net.cpp:226] L1_b14_cbr1_scale needs backward computation.\nI0821 08:59:39.090564 32364 net.cpp:226] L1_b14_cbr1_bn needs backward computation.\nI0821 08:59:39.090580 32364 net.cpp:226] L1_b14_cbr1_conv needs backward computation.\nI0821 08:59:39.090593 32364 net.cpp:226] L1_b13_sum_eltwise_top_L1_b13_relu_0_split needs backward computation.\nI0821 08:59:39.090605 32364 net.cpp:226] L1_b13_relu needs backward computation.\nI0821 08:59:39.090616 32364 net.cpp:226] L1_b13_sum_eltwise needs backward computation.\nI0821 08:59:39.090628 32364 net.cpp:226] L1_b13_cbr2_scale needs backward computation.\nI0821 08:59:39.090639 32364 net.cpp:226] L1_b13_cbr2_bn needs backward computation.\nI0821 08:59:39.090651 32364 net.cpp:226] L1_b13_cbr2_conv needs backward computation.\nI0821 08:59:39.090662 32364 net.cpp:226] L1_b13_cbr1_relu needs backward computation.\nI0821 08:59:39.090672 32364 net.cpp:226] L1_b13_cbr1_scale needs backward computation.\nI0821 08:59:39.090692 32364 net.cpp:226] L1_b13_cbr1_bn needs backward computation.\nI0821 08:59:39.090703 32364 net.cpp:226] L1_b13_cbr1_conv needs backward computation.\nI0821 08:59:39.090714 32364 net.cpp:226] L1_b12_sum_eltwise_top_L1_b12_relu_0_split needs backward computation.\nI0821 08:59:39.090725 32364 net.cpp:226] L1_b12_relu needs backward computation.\nI0821 08:59:39.090736 32364 net.cpp:226] L1_b12_sum_eltwise needs backward computation.\nI0821 08:59:39.090749 32364 net.cpp:226] L1_b12_cbr2_scale needs backward computation.\nI0821 08:59:39.090760 32364 net.cpp:226] L1_b12_cbr2_bn needs backward computation.\nI0821 08:59:39.090771 32364 net.cpp:226] L1_b12_cbr2_conv needs backward computation.\nI0821 08:59:39.090782 32364 net.cpp:226] L1_b12_cbr1_relu needs backward computation.\nI0821 08:59:39.090792 32364 net.cpp:226] L1_b12_cbr1_scale needs backward computation.\nI0821 08:59:39.090803 32364 net.cpp:226] L1_b12_cbr1_bn needs backward computation.\nI0821 08:59:39.090814 32364 net.cpp:226] L1_b12_cbr1_conv needs backward computation.\nI0821 08:59:39.090826 32364 net.cpp:226] L1_b11_sum_eltwise_top_L1_b11_relu_0_split needs backward computation.\nI0821 08:59:39.090837 32364 net.cpp:226] L1_b11_relu needs backward computation.\nI0821 08:59:39.090848 32364 net.cpp:226] L1_b11_sum_eltwise needs backward computation.\nI0821 08:59:39.090860 32364 net.cpp:226] L1_b11_cbr2_scale needs backward computation.\nI0821 08:59:39.090872 32364 net.cpp:226] L1_b11_cbr2_bn needs backward computation.\nI0821 08:59:39.090883 32364 net.cpp:226] L1_b11_cbr2_conv needs backward computation.\nI0821 08:59:39.090894 32364 net.cpp:226] L1_b11_cbr1_relu needs backward computation.\nI0821 08:59:39.090905 32364 net.cpp:226] L1_b11_cbr1_scale needs backward computation.\nI0821 08:59:39.090916 32364 net.cpp:226] L1_b11_cbr1_bn needs backward computation.\nI0821 08:59:39.090929 32364 net.cpp:226] L1_b11_cbr1_conv needs backward computation.\nI0821 08:59:39.090939 32364 net.cpp:226] L1_b10_sum_eltwise_top_L1_b10_relu_0_split needs backward computation.\nI0821 08:59:39.090950 32364 net.cpp:226] L1_b10_relu needs backward computation.\nI0821 08:59:39.090962 32364 net.cpp:226] L1_b10_sum_eltwise needs backward computation.\nI0821 08:59:39.090975 32364 net.cpp:226] L1_b10_cbr2_scale needs backward computation.\nI0821 08:59:39.090986 32364 net.cpp:226] L1_b10_cbr2_bn needs backward computation.\nI0821 08:59:39.090996 32364 net.cpp:226] L1_b10_cbr2_conv needs backward computation.\nI0821 08:59:39.091008 32364 net.cpp:226] L1_b10_cbr1_relu needs backward computation.\nI0821 08:59:39.091019 32364 net.cpp:226] L1_b10_cbr1_scale needs backward computation.\nI0821 08:59:39.091029 32364 net.cpp:226] L1_b10_cbr1_bn needs backward computation.\nI0821 08:59:39.091042 32364 net.cpp:226] L1_b10_cbr1_conv needs backward computation.\nI0821 08:59:39.091054 32364 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:59:39.091065 32364 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:59:39.091075 32364 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:59:39.091089 32364 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:59:39.091099 32364 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:59:39.091109 32364 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:59:39.091121 32364 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:59:39.091132 32364 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:59:39.091141 32364 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:59:39.091161 32364 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:59:39.091173 32364 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:59:39.091184 32364 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:59:39.091194 32364 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:59:39.091207 32364 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:59:39.091226 32364 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:59:39.091238 32364 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:59:39.091250 32364 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:59:39.091261 32364 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:59:39.091271 32364 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:59:39.091284 32364 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:59:39.091296 32364 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:59:39.091307 32364 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:59:39.091318 32364 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:59:39.091331 32364 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:59:39.091343 32364 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:59:39.091354 32364 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:59:39.091367 32364 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:59:39.091377 32364 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:59:39.091387 32364 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:59:39.091398 32364 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:59:39.091410 32364 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:59:39.091423 32364 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:59:39.091434 32364 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:59:39.091446 32364 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:59:39.091457 32364 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:59:39.091469 32364 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:59:39.091480 32364 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:59:39.091491 32364 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:59:39.091501 32364 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:59:39.091513 32364 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:59:39.091526 32364 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:59:39.091537 32364 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:59:39.091547 32364 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:59:39.091560 32364 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:59:39.091572 32364 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:59:39.091584 32364 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:59:39.091596 32364 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:59:39.091606 32364 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:59:39.091619 32364 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:59:39.091629 32364 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:59:39.091641 32364 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:59:39.091653 32364 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:59:39.091665 32364 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:59:39.091677 32364 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:59:39.091689 32364 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:59:39.091701 32364 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:59:39.091712 32364 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:59:39.091724 32364 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:59:39.091735 32364 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:59:39.091747 32364 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:59:39.091758 32364 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:59:39.091779 32364 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:59:39.091791 32364 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:59:39.091804 32364 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:59:39.091816 32364 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:59:39.091828 32364 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:59:39.091840 32364 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:59:39.091850 32364 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:59:39.091861 32364 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:59:39.091872 32364 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:59:39.091886 32364 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:59:39.091897 32364 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:59:39.091908 32364 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:59:39.091922 32364 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:59:39.091933 32364 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:59:39.091945 32364 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:59:39.091956 32364 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:59:39.091969 32364 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:59:39.091979 32364 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:59:39.091990 32364 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:59:39.092007 32364 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:59:39.092020 32364 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:59:39.092031 32364 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:59:39.092043 32364 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:59:39.092054 32364 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:59:39.092067 32364 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:59:39.092078 32364 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:59:39.092090 32364 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:59:39.092102 32364 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:59:39.092113 32364 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:59:39.092124 32364 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:59:39.092135 32364 net.cpp:226] pre_relu needs backward computation.\nI0821 08:59:39.092152 32364 net.cpp:226] pre_scale needs backward computation.\nI0821 08:59:39.092164 32364 net.cpp:226] pre_bn needs backward computation.\nI0821 08:59:39.092175 32364 net.cpp:226] pre_conv needs backward computation.\nI0821 08:59:39.092186 32364 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:59:39.092198 32364 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:59:39.092207 32364 net.cpp:270] This network produces output accuracy\nI0821 08:59:39.092221 32364 net.cpp:270] This network produces output loss\nI0821 08:59:39.092983 32364 net.cpp:283] Network initialization done.\nI0821 08:59:39.110307 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:39.110388 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:39.110460 32364 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 08:59:39.111208 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 08:59:39.111233 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 08:59:39.111253 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:59:39.111284 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:59:39.111305 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:59:39.111325 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:59:39.111344 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:59:39.111363 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:59:39.111384 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:59:39.111403 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:59:39.111423 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:59:39.111441 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:59:39.111461 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:59:39.111480 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:59:39.111500 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:59:39.111521 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:59:39.111541 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:59:39.111559 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:59:39.111580 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:59:39.111599 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:59:39.111619 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b10_cbr1_bn\nI0821 08:59:39.111639 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b10_cbr2_bn\nI0821 08:59:39.111656 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b11_cbr1_bn\nI0821 08:59:39.111675 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b11_cbr2_bn\nI0821 08:59:39.111694 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b12_cbr1_bn\nI0821 08:59:39.111712 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b12_cbr2_bn\nI0821 08:59:39.111733 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b13_cbr1_bn\nI0821 08:59:39.111752 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b13_cbr2_bn\nI0821 08:59:39.111769 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b14_cbr1_bn\nI0821 08:59:39.111788 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b14_cbr2_bn\nI0821 08:59:39.111806 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b15_cbr1_bn\nI0821 08:59:39.111824 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b15_cbr2_bn\nI0821 08:59:39.111843 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b16_cbr1_bn\nI0821 08:59:39.111860 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b16_cbr2_bn\nI0821 08:59:39.111888 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b17_cbr1_bn\nI0821 08:59:39.112089 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b17_cbr2_bn\nI0821 08:59:39.112112 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b18_cbr1_bn\nI0821 08:59:39.112133 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b18_cbr2_bn\nI0821 08:59:39.112159 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:59:39.112179 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:59:39.112205 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:59:39.112223 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:59:39.112241 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:59:39.112259 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:59:39.112277 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:59:39.112295 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:59:39.112314 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:59:39.112331 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:59:39.112351 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:59:39.112367 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:59:39.112385 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:59:39.112403 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:59:39.112423 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:59:39.112442 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:59:39.112460 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:59:39.112478 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:59:39.112496 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b10_cbr1_bn\nI0821 08:59:39.112514 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b10_cbr2_bn\nI0821 08:59:39.112534 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b11_cbr1_bn\nI0821 08:59:39.112552 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b11_cbr2_bn\nI0821 08:59:39.112571 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b12_cbr1_bn\nI0821 08:59:39.112588 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b12_cbr2_bn\nI0821 08:59:39.112608 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b13_cbr1_bn\nI0821 08:59:39.112627 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b13_cbr2_bn\nI0821 08:59:39.112655 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b14_cbr1_bn\nI0821 08:59:39.112673 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b14_cbr2_bn\nI0821 08:59:39.112690 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b15_cbr1_bn\nI0821 08:59:39.112709 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b15_cbr2_bn\nI0821 08:59:39.112730 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b16_cbr1_bn\nI0821 08:59:39.112747 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b16_cbr2_bn\nI0821 08:59:39.112766 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b17_cbr1_bn\nI0821 08:59:39.112783 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b17_cbr2_bn\nI0821 08:59:39.112802 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b18_cbr1_bn\nI0821 08:59:39.112819 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b18_cbr2_bn\nI0821 08:59:39.112838 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:59:39.112856 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:59:39.112879 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:59:39.112898 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:59:39.112917 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:59:39.112936 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:59:39.112952 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:59:39.112968 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:59:39.112988 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:59:39.113005 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:59:39.113024 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:59:39.113041 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:59:39.113059 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:59:39.113076 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:59:39.113095 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:59:39.113113 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:59:39.113132 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:59:39.113157 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:59:39.113178 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b10_cbr1_bn\nI0821 08:59:39.113194 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b10_cbr2_bn\nI0821 08:59:39.113221 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b11_cbr1_bn\nI0821 08:59:39.113240 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b11_cbr2_bn\nI0821 08:59:39.113260 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b12_cbr1_bn\nI0821 08:59:39.113278 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b12_cbr2_bn\nI0821 08:59:39.113297 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b13_cbr1_bn\nI0821 08:59:39.113313 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b13_cbr2_bn\nI0821 08:59:39.113332 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b14_cbr1_bn\nI0821 08:59:39.113350 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b14_cbr2_bn\nI0821 08:59:39.113369 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b15_cbr1_bn\nI0821 08:59:39.113386 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b15_cbr2_bn\nI0821 08:59:39.113404 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b16_cbr1_bn\nI0821 08:59:39.113422 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b16_cbr2_bn\nI0821 08:59:39.113440 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b17_cbr1_bn\nI0821 08:59:39.113458 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b17_cbr2_bn\nI0821 08:59:39.113477 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b18_cbr1_bn\nI0821 08:59:39.113497 32364 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b18_cbr2_bn\nI0821 08:59:39.116662 32364 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight\nI0821 08:59:39.120055 32364 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:59:39.120311 32364 net.cpp:100] Creating Layer dataLayer\nI0821 08:59:39.120339 32364 net.cpp:408] dataLayer -> data_top\nI0821 08:59:39.120367 32364 net.cpp:408] dataLayer -> label\nI0821 08:59:39.120390 32364 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:59:39.134192 32372 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 08:59:39.134505 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:39.141281 32364 net.cpp:150] Setting up dataLayer\nI0821 08:59:39.141306 32364 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI0821 08:59:39.141378 32364 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:39.141392 32364 net.cpp:165] Memory required for data: 1229200\nI0821 08:59:39.141405 32364 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:59:39.141422 32364 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:59:39.141435 32364 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:59:39.141453 32364 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:59:39.141474 32364 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:59:39.141633 32364 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:59:39.141651 32364 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:39.141664 32364 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:39.141674 32364 net.cpp:165] Memory required for data: 1230000\nI0821 08:59:39.141683 32364 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:59:39.141715 32364 net.cpp:100] Creating Layer pre_conv\nI0821 08:59:39.141727 32364 net.cpp:434] pre_conv <- data_top\nI0821 08:59:39.141752 32364 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:59:39.142313 32364 net.cpp:150] Setting up pre_conv\nI0821 08:59:39.142334 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.142346 32364 net.cpp:165] Memory required for data: 7783600\nI0821 08:59:39.142374 32364 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:59:39.142400 32364 net.cpp:100] Creating Layer pre_bn\nI0821 08:59:39.142411 32364 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:59:39.142431 32364 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:59:39.142822 32364 net.cpp:150] Setting up pre_bn\nI0821 08:59:39.142841 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.142850 32364 net.cpp:165] Memory required for data: 14337200\nI0821 08:59:39.142880 32364 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:59:39.142907 32364 net.cpp:100] Creating Layer pre_scale\nI0821 08:59:39.142920 32364 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:59:39.142938 32364 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:59:39.143046 32364 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:59:39.143278 32364 net.cpp:150] Setting up pre_scale\nI0821 08:59:39.143299 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.143311 32364 net.cpp:165] Memory required for data: 20890800\nI0821 08:59:39.143328 32364 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:59:39.143347 32364 net.cpp:100] Creating Layer pre_relu\nI0821 08:59:39.143358 32364 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:59:39.143383 32364 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:59:39.143407 32364 net.cpp:150] Setting up pre_relu\nI0821 08:59:39.143422 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.143435 32364 net.cpp:165] Memory required for data: 27444400\nI0821 08:59:39.143445 32364 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:59:39.143465 32364 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:59:39.143476 32364 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:59:39.143489 32364 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:59:39.143512 32364 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:59:39.143654 32364 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:59:39.143674 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.143687 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.143699 32364 net.cpp:165] Memory required for data: 40551600\nI0821 08:59:39.143710 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:59:39.143733 32364 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:59:39.143744 32364 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:59:39.143767 32364 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:59:39.144238 32364 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:59:39.144269 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.144280 32364 net.cpp:165] Memory required for data: 47105200\nI0821 08:59:39.144306 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:59:39.144331 32364 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:59:39.144346 32364 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:59:39.144364 32364 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:59:39.144723 32364 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:59:39.144745 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.144754 32364 net.cpp:165] Memory required for data: 53658800\nI0821 08:59:39.144778 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:59:39.144799 32364 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:59:39.144810 32364 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:59:39.144825 32364 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:59:39.144938 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:59:39.145206 32364 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:59:39.145227 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.145237 32364 net.cpp:165] Memory required for data: 60212400\nI0821 08:59:39.145259 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:59:39.145280 32364 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:59:39.145292 32364 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:59:39.145306 32364 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:59:39.145328 32364 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:59:39.145342 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.145355 32364 net.cpp:165] Memory required for data: 66766000\nI0821 08:59:39.145370 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:59:39.145393 32364 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:59:39.145409 32364 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:59:39.145432 32364 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:59:39.145885 32364 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:59:39.145905 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.145915 32364 net.cpp:165] Memory required for data: 73319600\nI0821 08:59:39.145932 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:59:39.145949 32364 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:59:39.145961 32364 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:59:39.145979 32364 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:59:39.146371 32364 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:59:39.146390 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.146399 32364 net.cpp:165] Memory required for data: 79873200\nI0821 08:59:39.146432 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:59:39.146450 32364 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:59:39.146461 32364 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:59:39.146477 32364 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:59:39.146570 32364 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:59:39.146771 32364 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:59:39.146791 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.146805 32364 net.cpp:165] Memory required for data: 86426800\nI0821 08:59:39.146823 32364 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:59:39.146843 32364 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:59:39.146855 32364 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:59:39.146868 32364 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:59:39.146889 32364 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:59:39.146947 32364 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:59:39.146965 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.146986 32364 net.cpp:165] Memory required for data: 92980400\nI0821 08:59:39.146996 32364 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:59:39.147014 32364 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:59:39.147027 32364 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:59:39.147042 32364 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:59:39.147060 32364 net.cpp:150] Setting up L1_b1_relu\nI0821 08:59:39.147075 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.147083 32364 net.cpp:165] Memory required for data: 99534000\nI0821 08:59:39.147094 32364 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:39.147110 32364 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:39.147121 32364 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:59:39.147136 32364 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:59:39.147162 32364 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:59:39.147254 32364 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:39.147274 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.147287 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.147296 32364 net.cpp:165] Memory required for data: 112641200\nI0821 08:59:39.147307 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:59:39.147331 32364 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:59:39.147344 32364 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:59:39.147362 32364 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:59:39.147774 32364 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:59:39.147795 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.147805 32364 net.cpp:165] Memory required for data: 119194800\nI0821 08:59:39.147822 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:59:39.147846 32364 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:59:39.147863 32364 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:59:39.147884 32364 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:59:39.148430 32364 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:59:39.148453 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.148464 32364 net.cpp:165] Memory required for data: 125748400\nI0821 08:59:39.148486 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:59:39.148502 32364 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:59:39.148514 32364 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:59:39.148531 32364 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:59:39.148640 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:59:39.148864 32364 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:59:39.148886 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.148901 32364 net.cpp:165] Memory required for data: 132302000\nI0821 08:59:39.148921 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:59:39.148934 32364 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:59:39.148946 32364 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:59:39.148968 32364 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:59:39.148988 32364 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:59:39.149006 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.149016 32364 net.cpp:165] Memory required for data: 138855600\nI0821 08:59:39.149027 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:59:39.149060 32364 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:59:39.149075 32364 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:59:39.149094 32364 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:59:39.149577 32364 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:59:39.149606 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.149617 32364 net.cpp:165] Memory required for data: 145409200\nI0821 08:59:39.149637 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:59:39.149659 32364 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:59:39.149674 32364 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:59:39.149691 32364 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:59:39.150053 32364 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:59:39.150079 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.150089 32364 net.cpp:165] Memory required for data: 151962800\nI0821 08:59:39.150121 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:59:39.150138 32364 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:59:39.150157 32364 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:59:39.150178 32364 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:59:39.150300 32364 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:59:39.150534 32364 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:59:39.150555 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.150568 32364 net.cpp:165] Memory required for data: 158516400\nI0821 08:59:39.150589 32364 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:59:39.150606 32364 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:59:39.150617 32364 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:59:39.150630 32364 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:59:39.150650 32364 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:59:39.150722 32364 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:59:39.150740 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.150750 32364 net.cpp:165] Memory required for data: 165070000\nI0821 08:59:39.150758 32364 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:59:39.150965 32364 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:59:39.150979 32364 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:59:39.150995 32364 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:59:39.151015 32364 net.cpp:150] Setting up L1_b2_relu\nI0821 08:59:39.151029 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.151038 32364 net.cpp:165] Memory required for data: 171623600\nI0821 08:59:39.151047 32364 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:39.151062 32364 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:39.151074 32364 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:59:39.151095 32364 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:59:39.151120 32364 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:59:39.151226 32364 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:39.151245 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.151257 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.151269 32364 net.cpp:165] Memory required for data: 184730800\nI0821 08:59:39.151280 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:59:39.151304 32364 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:59:39.151320 32364 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:59:39.151340 32364 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:59:39.151809 32364 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:59:39.151831 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.151840 32364 net.cpp:165] Memory required for data: 191284400\nI0821 08:59:39.151861 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:59:39.151878 32364 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:59:39.151912 32364 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:59:39.151932 32364 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:59:39.152310 32364 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:59:39.152333 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.152341 32364 net.cpp:165] Memory required for data: 197838000\nI0821 08:59:39.152365 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:59:39.152382 32364 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:59:39.152392 32364 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:59:39.152412 32364 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:59:39.152521 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:59:39.152758 32364 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:59:39.152781 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.152796 32364 net.cpp:165] Memory required for data: 204391600\nI0821 08:59:39.152813 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:59:39.152828 32364 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:59:39.152839 32364 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:59:39.152855 32364 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:59:39.152876 32364 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:59:39.152889 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.152902 32364 net.cpp:165] Memory required for data: 210945200\nI0821 08:59:39.152912 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:59:39.152942 32364 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:59:39.152956 32364 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:59:39.152984 32364 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:59:39.153458 32364 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:59:39.153477 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.153486 32364 net.cpp:165] Memory required for data: 217498800\nI0821 08:59:39.153502 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:59:39.153528 32364 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:59:39.153540 32364 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:59:39.153560 32364 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:59:39.153899 32364 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:59:39.153919 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.153928 32364 net.cpp:165] Memory required for data: 224052400\nI0821 08:59:39.153949 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:59:39.153965 32364 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:59:39.153976 32364 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:59:39.153991 32364 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:59:39.154098 32364 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:59:39.154304 32364 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:59:39.154322 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.154331 32364 net.cpp:165] Memory required for data: 230606000\nI0821 08:59:39.154350 32364 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:59:39.154369 32364 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:59:39.154381 32364 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:59:39.154393 32364 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:59:39.154408 32364 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:59:39.154471 32364 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:59:39.154489 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.154500 32364 net.cpp:165] Memory required for data: 237159600\nI0821 08:59:39.154510 32364 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:59:39.154525 32364 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:59:39.154536 32364 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:59:39.154561 32364 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:59:39.154582 32364 net.cpp:150] Setting up L1_b3_relu\nI0821 08:59:39.154597 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.154605 32364 net.cpp:165] Memory required for data: 243713200\nI0821 08:59:39.154614 32364 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:39.154628 32364 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:39.154639 32364 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:59:39.154659 32364 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:59:39.154678 32364 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:59:39.154765 32364 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:39.154786 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.154799 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.154808 32364 net.cpp:165] Memory required for data: 256820400\nI0821 08:59:39.154819 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:59:39.154844 32364 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:59:39.154855 32364 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:59:39.154875 32364 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:59:39.155289 32364 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:59:39.155309 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.155318 32364 net.cpp:165] Memory required for data: 263374000\nI0821 08:59:39.155335 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:59:39.155357 32364 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:59:39.155369 32364 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:59:39.155390 32364 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:59:39.155711 32364 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:59:39.155730 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.155740 32364 net.cpp:165] Memory required for data: 269927600\nI0821 08:59:39.155760 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:59:39.155776 32364 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:59:39.155786 32364 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:59:39.155800 32364 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:59:39.155905 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:59:39.156108 32364 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:59:39.156126 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.156136 32364 net.cpp:165] Memory required for data: 276481200\nI0821 08:59:39.156162 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:59:39.156183 32364 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:59:39.156193 32364 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:59:39.156208 32364 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:59:39.156226 32364 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:59:39.156239 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.156250 32364 net.cpp:165] Memory required for data: 283034800\nI0821 08:59:39.156258 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:59:39.156282 32364 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:59:39.156294 32364 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:59:39.156316 32364 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:59:39.157083 32364 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:59:39.157102 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.157111 32364 net.cpp:165] Memory required for data: 289588400\nI0821 08:59:39.157130 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:59:39.157158 32364 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:59:39.157181 32364 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:59:39.157198 32364 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:59:39.157523 32364 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:59:39.157543 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.157553 32364 net.cpp:165] Memory required for data: 296142000\nI0821 08:59:39.157574 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:59:39.157593 32364 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:59:39.157604 32364 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:59:39.157626 32364 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:59:39.157719 32364 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:59:39.157918 32364 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:59:39.157940 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.157950 32364 net.cpp:165] Memory required for data: 302695600\nI0821 08:59:39.157969 32364 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:59:39.157986 32364 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:59:39.157997 32364 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:59:39.158011 32364 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:59:39.158027 32364 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:59:39.158088 32364 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:59:39.158107 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.158116 32364 net.cpp:165] Memory required for data: 309249200\nI0821 08:59:39.158126 32364 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:59:39.158145 32364 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:59:39.158164 32364 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:59:39.158180 32364 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:59:39.158200 32364 net.cpp:150] Setting up L1_b4_relu\nI0821 08:59:39.158215 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.158223 32364 net.cpp:165] Memory required for data: 315802800\nI0821 08:59:39.158233 32364 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:39.158246 32364 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:39.158258 32364 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:59:39.158277 32364 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:59:39.158298 32364 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:59:39.158385 32364 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:39.158407 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.158421 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.158430 32364 net.cpp:165] Memory required for data: 328910000\nI0821 08:59:39.158440 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:59:39.158460 32364 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:59:39.158473 32364 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:59:39.158490 32364 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:59:39.158905 32364 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:59:39.158923 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.158933 32364 net.cpp:165] Memory required for data: 335463600\nI0821 08:59:39.158969 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:59:39.158991 32364 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:59:39.159003 32364 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:59:39.159019 32364 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:59:39.159338 32364 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:59:39.159358 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.159366 32364 net.cpp:165] Memory required for data: 342017200\nI0821 08:59:39.159396 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:59:39.159412 32364 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:59:39.159422 32364 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:59:39.159464 32364 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:59:39.159567 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:59:39.159767 32364 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:59:39.159786 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.159795 32364 net.cpp:165] Memory required for data: 348570800\nI0821 08:59:39.159812 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:59:39.159837 32364 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:59:39.159847 32364 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:59:39.159862 32364 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:59:39.159880 32364 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:59:39.159894 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.159904 32364 net.cpp:165] Memory required for data: 355124400\nI0821 08:59:39.159912 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:59:39.159936 32364 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:59:39.159948 32364 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:59:39.159970 32364 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:59:39.160387 32364 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:59:39.160406 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.160415 32364 net.cpp:165] Memory required for data: 361678000\nI0821 08:59:39.160432 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:59:39.160454 32364 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:59:39.160466 32364 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:59:39.160485 32364 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:59:39.160810 32364 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:59:39.160830 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.160837 32364 net.cpp:165] Memory required for data: 368231600\nI0821 08:59:39.160857 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:59:39.160874 32364 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:59:39.160886 32364 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:59:39.160899 32364 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:59:39.161003 32364 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:59:39.161216 32364 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:59:39.161236 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.161245 32364 net.cpp:165] Memory required for data: 374785200\nI0821 08:59:39.161262 32364 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:59:39.161279 32364 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:59:39.161296 32364 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:59:39.161309 32364 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:59:39.161324 32364 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:59:39.161387 32364 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:59:39.161407 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.161417 32364 net.cpp:165] Memory required for data: 381338800\nI0821 08:59:39.161427 32364 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:59:39.161440 32364 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:59:39.161450 32364 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:59:39.161469 32364 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:59:39.161489 32364 net.cpp:150] Setting up L1_b5_relu\nI0821 08:59:39.161502 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.161511 32364 net.cpp:165] Memory required for data: 387892400\nI0821 08:59:39.161520 32364 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:39.161545 32364 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:39.161556 32364 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:59:39.161571 32364 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:59:39.161589 32364 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:59:39.161680 32364 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:39.161702 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.161716 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.161725 32364 net.cpp:165] Memory required for data: 400999600\nI0821 08:59:39.161736 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:59:39.161759 32364 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:59:39.161772 32364 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:59:39.161790 32364 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:59:39.162209 32364 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:59:39.162230 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.162240 32364 net.cpp:165] Memory required for data: 407553200\nI0821 08:59:39.162257 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:59:39.162282 32364 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:59:39.162294 32364 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:59:39.162314 32364 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:59:39.162631 32364 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:59:39.162650 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.162659 32364 net.cpp:165] Memory required for data: 414106800\nI0821 08:59:39.162680 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:59:39.162696 32364 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:59:39.162706 32364 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:59:39.162720 32364 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:59:39.162827 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:59:39.163030 32364 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:59:39.163049 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.163059 32364 net.cpp:165] Memory required for data: 420660400\nI0821 08:59:39.163076 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:59:39.163096 32364 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:59:39.163108 32364 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:59:39.163122 32364 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:59:39.163141 32364 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:59:39.163163 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.163173 32364 net.cpp:165] Memory required for data: 427214000\nI0821 08:59:39.163182 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:59:39.163208 32364 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:59:39.163219 32364 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:59:39.163240 32364 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:59:39.163648 32364 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:59:39.163667 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.163676 32364 net.cpp:165] Memory required for data: 433767600\nI0821 08:59:39.163693 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:59:39.163714 32364 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:59:39.163727 32364 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:59:39.163741 32364 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:59:39.164075 32364 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:59:39.164095 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.164103 32364 net.cpp:165] Memory required for data: 440321200\nI0821 08:59:39.164134 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:59:39.164156 32364 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:59:39.164170 32364 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:59:39.164185 32364 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:59:39.164294 32364 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:59:39.164503 32364 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:59:39.164523 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.164532 32364 net.cpp:165] Memory required for data: 446874800\nI0821 08:59:39.164551 32364 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:59:39.164578 32364 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:59:39.164592 32364 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:59:39.164605 32364 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:59:39.164626 32364 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:59:39.164685 32364 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:59:39.164703 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.164712 32364 net.cpp:165] Memory required for data: 453428400\nI0821 08:59:39.164723 32364 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:59:39.164738 32364 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:59:39.164749 32364 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:59:39.164762 32364 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:59:39.164780 32364 net.cpp:150] Setting up L1_b6_relu\nI0821 08:59:39.164795 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.164804 32364 net.cpp:165] Memory required for data: 459982000\nI0821 08:59:39.164814 32364 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:39.164834 32364 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:39.164844 32364 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:59:39.164860 32364 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:59:39.164880 32364 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:59:39.164968 32364 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:39.164991 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.165005 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.165015 32364 net.cpp:165] Memory required for data: 473089200\nI0821 08:59:39.165025 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:59:39.165045 32364 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:59:39.165057 32364 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:59:39.165074 32364 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:59:39.165477 32364 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:59:39.165495 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.165504 32364 net.cpp:165] Memory required for data: 479642800\nI0821 08:59:39.165522 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:59:39.165544 32364 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:59:39.165556 32364 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:59:39.165573 32364 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:59:39.165890 32364 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:59:39.165910 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.165918 32364 net.cpp:165] Memory required for data: 486196400\nI0821 08:59:39.165940 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:59:39.165956 32364 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:59:39.165967 32364 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:59:39.165987 32364 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:59:39.166097 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:59:39.166301 32364 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:59:39.166321 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.166329 32364 net.cpp:165] Memory required for data: 492750000\nI0821 08:59:39.166347 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:59:39.166363 32364 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:59:39.166374 32364 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:59:39.166393 32364 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:59:39.166414 32364 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:59:39.166427 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.166436 32364 net.cpp:165] Memory required for data: 499303600\nI0821 08:59:39.166446 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:59:39.166476 32364 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:59:39.166488 32364 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:59:39.166507 32364 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:59:39.166914 32364 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:59:39.166934 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.166944 32364 net.cpp:165] Memory required for data: 505857200\nI0821 08:59:39.166962 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:59:39.166978 32364 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:59:39.166995 32364 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:59:39.167011 32364 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:59:39.167331 32364 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:59:39.167351 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.167361 32364 net.cpp:165] Memory required for data: 512410800\nI0821 08:59:39.167382 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:59:39.167399 32364 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:59:39.167410 32364 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:59:39.167430 32364 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:59:39.167524 32364 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:59:39.167722 32364 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:59:39.167745 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.167755 32364 net.cpp:165] Memory required for data: 518964400\nI0821 08:59:39.167774 32364 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:59:39.167790 32364 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:59:39.167803 32364 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:59:39.167815 32364 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:59:39.167831 32364 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:59:39.167893 32364 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:59:39.167913 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.167923 32364 net.cpp:165] Memory required for data: 525518000\nI0821 08:59:39.167933 32364 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:59:39.167951 32364 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:59:39.167964 32364 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:59:39.167979 32364 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:59:39.167997 32364 net.cpp:150] Setting up L1_b7_relu\nI0821 08:59:39.168011 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.168020 32364 net.cpp:165] Memory required for data: 532071600\nI0821 08:59:39.168030 32364 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:39.168048 32364 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:39.168061 32364 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:59:39.168076 32364 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:59:39.168105 32364 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:59:39.168203 32364 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:39.168228 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.168243 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.168252 32364 net.cpp:165] Memory required for data: 545178800\nI0821 08:59:39.168262 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:59:39.168282 32364 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:59:39.168295 32364 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:59:39.168313 32364 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:59:39.168732 32364 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:59:39.168752 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.168761 32364 net.cpp:165] Memory required for data: 551732400\nI0821 08:59:39.168779 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:59:39.168800 32364 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:59:39.168812 32364 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:59:39.168829 32364 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:59:39.169158 32364 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:59:39.169178 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.169188 32364 net.cpp:165] Memory required for data: 558286000\nI0821 08:59:39.169209 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:59:39.169225 32364 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:59:39.169237 32364 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:59:39.169258 32364 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:59:39.169353 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:59:39.169554 32364 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:59:39.169572 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.169582 32364 net.cpp:165] Memory required for data: 564839600\nI0821 08:59:39.169600 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:59:39.169617 32364 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:59:39.169628 32364 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:59:39.169642 32364 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:59:39.169661 32364 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:59:39.169675 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.169684 32364 net.cpp:165] Memory required for data: 571393200\nI0821 08:59:39.169694 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:59:39.169719 32364 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:59:39.169731 32364 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:59:39.169754 32364 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:59:39.170176 32364 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:59:39.170197 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.170207 32364 net.cpp:165] Memory required for data: 577946800\nI0821 08:59:39.170224 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:59:39.170245 32364 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:59:39.170258 32364 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:59:39.170279 32364 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:59:39.170593 32364 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:59:39.170611 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.170621 32364 net.cpp:165] Memory required for data: 584500400\nI0821 08:59:39.170642 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:59:39.170660 32364 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:59:39.170670 32364 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:59:39.170691 32364 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:59:39.170802 32364 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:59:39.171007 32364 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:59:39.171026 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.171034 32364 net.cpp:165] Memory required for data: 591054000\nI0821 08:59:39.171052 32364 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:59:39.171074 32364 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:59:39.171087 32364 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:59:39.171099 32364 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:59:39.171116 32364 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:59:39.171185 32364 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:59:39.171203 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.171213 32364 net.cpp:165] Memory required for data: 597607600\nI0821 08:59:39.171224 32364 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:59:39.171239 32364 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:59:39.171250 32364 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:59:39.171315 32364 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:59:39.171336 32364 net.cpp:150] Setting up L1_b8_relu\nI0821 08:59:39.171351 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.171360 32364 net.cpp:165] Memory required for data: 604161200\nI0821 08:59:39.171370 32364 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:39.171386 32364 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:39.171396 32364 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:59:39.171416 32364 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:59:39.171437 32364 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:59:39.171524 32364 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:39.171545 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.171556 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.171566 32364 net.cpp:165] Memory required for data: 617268400\nI0821 08:59:39.171576 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:59:39.171602 32364 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:59:39.171614 32364 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:59:39.171634 32364 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:59:39.172051 32364 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:59:39.172075 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.172086 32364 net.cpp:165] Memory required for data: 623822000\nI0821 08:59:39.172103 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:59:39.172121 32364 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:59:39.172132 32364 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:59:39.172159 32364 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:59:39.172495 32364 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:59:39.172514 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.172523 32364 net.cpp:165] Memory required for data: 630375600\nI0821 08:59:39.172544 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:59:39.172564 32364 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:59:39.172576 32364 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:59:39.172592 32364 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:59:39.172688 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:59:39.172886 32364 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:59:39.172904 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.172914 32364 net.cpp:165] Memory required for data: 636929200\nI0821 08:59:39.172941 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:59:39.172963 32364 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:59:39.172976 32364 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:59:39.172991 32364 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:59:39.173010 32364 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:59:39.173025 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.173034 32364 net.cpp:165] Memory required for data: 643482800\nI0821 08:59:39.173043 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:59:39.173069 32364 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:59:39.173081 32364 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:59:39.173108 32364 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:59:39.173524 32364 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:59:39.173544 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.173554 32364 net.cpp:165] Memory required for data: 650036400\nI0821 08:59:39.173573 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:59:39.173589 32364 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:59:39.173601 32364 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:59:39.173622 32364 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:59:39.173939 32364 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:59:39.173959 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.173969 32364 net.cpp:165] Memory required for data: 656590000\nI0821 08:59:39.174023 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:59:39.174042 32364 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:59:39.174055 32364 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:59:39.174074 32364 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:59:39.174175 32364 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:59:39.174376 32364 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:59:39.174394 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.174403 32364 net.cpp:165] Memory required for data: 663143600\nI0821 08:59:39.174422 32364 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:59:39.174438 32364 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:59:39.174450 32364 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:59:39.174463 32364 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:59:39.174484 32364 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:59:39.174542 32364 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:59:39.174561 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.174571 32364 net.cpp:165] Memory required for data: 669697200\nI0821 08:59:39.174581 32364 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:59:39.174599 32364 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:59:39.174612 32364 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:59:39.174626 32364 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:59:39.174644 32364 net.cpp:150] Setting up L1_b9_relu\nI0821 08:59:39.174659 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.174669 32364 net.cpp:165] Memory required for data: 676250800\nI0821 08:59:39.174677 32364 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:39.174696 32364 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:39.174708 32364 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:59:39.174723 32364 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:59:39.174742 32364 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:59:39.174827 32364 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:39.174850 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.174873 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.174885 32364 net.cpp:165] Memory required for data: 689358000\nI0821 08:59:39.174895 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_conv\nI0821 08:59:39.174916 32364 net.cpp:100] Creating Layer L1_b10_cbr1_conv\nI0821 08:59:39.174927 32364 net.cpp:434] L1_b10_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:59:39.174947 32364 net.cpp:408] L1_b10_cbr1_conv -> L1_b10_cbr1_conv_top\nI0821 08:59:39.175354 32364 net.cpp:150] Setting up L1_b10_cbr1_conv\nI0821 08:59:39.175374 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.175384 32364 net.cpp:165] Memory required for data: 695911600\nI0821 08:59:39.175402 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_bn\nI0821 08:59:39.175424 32364 net.cpp:100] Creating Layer L1_b10_cbr1_bn\nI0821 08:59:39.175436 32364 net.cpp:434] L1_b10_cbr1_bn <- L1_b10_cbr1_conv_top\nI0821 08:59:39.175451 32364 net.cpp:408] L1_b10_cbr1_bn -> L1_b10_cbr1_bn_top\nI0821 08:59:39.175767 32364 net.cpp:150] Setting up L1_b10_cbr1_bn\nI0821 08:59:39.175786 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.175796 32364 net.cpp:165] Memory required for data: 702465200\nI0821 08:59:39.175817 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0821 08:59:39.175833 32364 net.cpp:100] Creating Layer L1_b10_cbr1_scale\nI0821 08:59:39.175844 32364 net.cpp:434] L1_b10_cbr1_scale <- L1_b10_cbr1_bn_top\nI0821 08:59:39.175865 32364 net.cpp:395] L1_b10_cbr1_scale -> L1_b10_cbr1_bn_top (in-place)\nI0821 08:59:39.175964 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0821 08:59:39.176175 32364 net.cpp:150] Setting up L1_b10_cbr1_scale\nI0821 08:59:39.176195 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.176204 32364 net.cpp:165] Memory required for data: 709018800\nI0821 08:59:39.176223 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr1_relu\nI0821 08:59:39.176239 32364 net.cpp:100] Creating Layer L1_b10_cbr1_relu\nI0821 08:59:39.176250 32364 net.cpp:434] L1_b10_cbr1_relu <- L1_b10_cbr1_bn_top\nI0821 08:59:39.176268 32364 net.cpp:395] L1_b10_cbr1_relu -> L1_b10_cbr1_bn_top (in-place)\nI0821 08:59:39.176288 32364 net.cpp:150] Setting up L1_b10_cbr1_relu\nI0821 08:59:39.176303 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.176312 32364 net.cpp:165] Memory required for data: 715572400\nI0821 08:59:39.176322 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr2_conv\nI0821 08:59:39.176347 32364 net.cpp:100] Creating Layer L1_b10_cbr2_conv\nI0821 08:59:39.176359 32364 net.cpp:434] L1_b10_cbr2_conv <- L1_b10_cbr1_bn_top\nI0821 08:59:39.176376 32364 net.cpp:408] L1_b10_cbr2_conv -> L1_b10_cbr2_conv_top\nI0821 08:59:39.176789 32364 net.cpp:150] Setting up L1_b10_cbr2_conv\nI0821 08:59:39.176808 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.176817 32364 net.cpp:165] Memory required for data: 722126000\nI0821 08:59:39.176836 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr2_bn\nI0821 08:59:39.176853 32364 net.cpp:100] Creating Layer L1_b10_cbr2_bn\nI0821 08:59:39.176868 32364 net.cpp:434] L1_b10_cbr2_bn <- L1_b10_cbr2_conv_top\nI0821 08:59:39.176887 32364 net.cpp:408] L1_b10_cbr2_bn -> L1_b10_cbr2_bn_top\nI0821 08:59:39.177211 32364 net.cpp:150] Setting up L1_b10_cbr2_bn\nI0821 08:59:39.177230 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.177240 32364 net.cpp:165] Memory required for data: 728679600\nI0821 08:59:39.177261 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0821 08:59:39.177278 32364 net.cpp:100] Creating Layer L1_b10_cbr2_scale\nI0821 08:59:39.177289 32364 net.cpp:434] L1_b10_cbr2_scale <- L1_b10_cbr2_bn_top\nI0821 08:59:39.177309 32364 net.cpp:395] L1_b10_cbr2_scale -> L1_b10_cbr2_bn_top (in-place)\nI0821 08:59:39.177403 32364 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0821 08:59:39.177608 32364 net.cpp:150] Setting up L1_b10_cbr2_scale\nI0821 08:59:39.177630 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.177649 32364 net.cpp:165] Memory required for data: 735233200\nI0821 08:59:39.177669 32364 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise\nI0821 08:59:39.177685 32364 net.cpp:100] Creating Layer L1_b10_sum_eltwise\nI0821 08:59:39.177697 32364 net.cpp:434] L1_b10_sum_eltwise <- L1_b10_cbr2_bn_top\nI0821 08:59:39.177711 32364 net.cpp:434] L1_b10_sum_eltwise <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:59:39.177726 32364 net.cpp:408] L1_b10_sum_eltwise -> L1_b10_sum_eltwise_top\nI0821 08:59:39.177788 32364 net.cpp:150] Setting up L1_b10_sum_eltwise\nI0821 08:59:39.177809 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.177819 32364 net.cpp:165] Memory required for data: 741786800\nI0821 08:59:39.177829 32364 layer_factory.hpp:77] Creating layer L1_b10_relu\nI0821 08:59:39.177848 32364 net.cpp:100] Creating Layer L1_b10_relu\nI0821 08:59:39.177860 32364 net.cpp:434] L1_b10_relu <- L1_b10_sum_eltwise_top\nI0821 08:59:39.177873 32364 net.cpp:395] L1_b10_relu -> L1_b10_sum_eltwise_top (in-place)\nI0821 08:59:39.177892 32364 net.cpp:150] Setting up L1_b10_relu\nI0821 08:59:39.177906 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.177916 32364 net.cpp:165] Memory required for data: 748340400\nI0821 08:59:39.177925 32364 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:39.177943 32364 net.cpp:100] Creating Layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:39.177954 32364 net.cpp:434] L1_b10_sum_eltwise_top_L1_b10_relu_0_split <- L1_b10_sum_eltwise_top\nI0821 08:59:39.177968 32364 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0821 08:59:39.177989 32364 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0821 08:59:39.178074 32364 net.cpp:150] Setting up L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:39.178100 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.178115 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.178125 32364 net.cpp:165] Memory required for data: 761447600\nI0821 08:59:39.178135 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_conv\nI0821 08:59:39.178161 32364 net.cpp:100] Creating Layer L1_b11_cbr1_conv\nI0821 08:59:39.178176 32364 net.cpp:434] L1_b11_cbr1_conv <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0821 08:59:39.178194 32364 net.cpp:408] L1_b11_cbr1_conv -> L1_b11_cbr1_conv_top\nI0821 08:59:39.178608 32364 net.cpp:150] Setting up L1_b11_cbr1_conv\nI0821 08:59:39.178628 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.178637 32364 net.cpp:165] Memory required for data: 768001200\nI0821 08:59:39.178654 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_bn\nI0821 08:59:39.178675 32364 net.cpp:100] Creating Layer L1_b11_cbr1_bn\nI0821 08:59:39.178689 32364 net.cpp:434] L1_b11_cbr1_bn <- L1_b11_cbr1_conv_top\nI0821 08:59:39.178705 32364 net.cpp:408] L1_b11_cbr1_bn -> L1_b11_cbr1_bn_top\nI0821 08:59:39.179021 32364 net.cpp:150] Setting up L1_b11_cbr1_bn\nI0821 08:59:39.179041 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.179050 32364 net.cpp:165] Memory required for data: 774554800\nI0821 08:59:39.179070 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0821 08:59:39.179087 32364 net.cpp:100] Creating Layer L1_b11_cbr1_scale\nI0821 08:59:39.179098 32364 net.cpp:434] L1_b11_cbr1_scale <- L1_b11_cbr1_bn_top\nI0821 08:59:39.179118 32364 net.cpp:395] L1_b11_cbr1_scale -> L1_b11_cbr1_bn_top (in-place)\nI0821 08:59:39.179219 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0821 08:59:39.179420 32364 net.cpp:150] Setting up L1_b11_cbr1_scale\nI0821 08:59:39.179440 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.179450 32364 net.cpp:165] Memory required for data: 781108400\nI0821 08:59:39.179467 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr1_relu\nI0821 08:59:39.179482 32364 net.cpp:100] Creating Layer L1_b11_cbr1_relu\nI0821 08:59:39.179494 32364 net.cpp:434] L1_b11_cbr1_relu <- L1_b11_cbr1_bn_top\nI0821 08:59:39.179517 32364 net.cpp:395] L1_b11_cbr1_relu -> L1_b11_cbr1_bn_top (in-place)\nI0821 08:59:39.179536 32364 net.cpp:150] Setting up L1_b11_cbr1_relu\nI0821 08:59:39.179553 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.179561 32364 net.cpp:165] Memory required for data: 787662000\nI0821 08:59:39.179570 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr2_conv\nI0821 08:59:39.179594 32364 net.cpp:100] Creating Layer L1_b11_cbr2_conv\nI0821 08:59:39.179607 32364 net.cpp:434] L1_b11_cbr2_conv <- L1_b11_cbr1_bn_top\nI0821 08:59:39.179628 32364 net.cpp:408] L1_b11_cbr2_conv -> L1_b11_cbr2_conv_top\nI0821 08:59:39.180033 32364 net.cpp:150] Setting up L1_b11_cbr2_conv\nI0821 08:59:39.180053 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.180063 32364 net.cpp:165] Memory required for data: 794215600\nI0821 08:59:39.180080 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr2_bn\nI0821 08:59:39.180102 32364 net.cpp:100] Creating Layer L1_b11_cbr2_bn\nI0821 08:59:39.180114 32364 net.cpp:434] L1_b11_cbr2_bn <- L1_b11_cbr2_conv_top\nI0821 08:59:39.180135 32364 net.cpp:408] L1_b11_cbr2_bn -> L1_b11_cbr2_bn_top\nI0821 08:59:39.180456 32364 net.cpp:150] Setting up L1_b11_cbr2_bn\nI0821 08:59:39.180475 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.180485 32364 net.cpp:165] Memory required for data: 800769200\nI0821 08:59:39.180506 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0821 08:59:39.180522 32364 net.cpp:100] Creating Layer L1_b11_cbr2_scale\nI0821 08:59:39.180534 32364 net.cpp:434] L1_b11_cbr2_scale <- L1_b11_cbr2_bn_top\nI0821 08:59:39.180554 32364 net.cpp:395] L1_b11_cbr2_scale -> L1_b11_cbr2_bn_top (in-place)\nI0821 08:59:39.180649 32364 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0821 08:59:39.180845 32364 net.cpp:150] Setting up L1_b11_cbr2_scale\nI0821 08:59:39.180868 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.180877 32364 net.cpp:165] Memory required for data: 807322800\nI0821 08:59:39.180896 32364 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise\nI0821 08:59:39.180912 32364 net.cpp:100] Creating Layer L1_b11_sum_eltwise\nI0821 08:59:39.180924 32364 net.cpp:434] L1_b11_sum_eltwise <- L1_b11_cbr2_bn_top\nI0821 08:59:39.180938 32364 net.cpp:434] L1_b11_sum_eltwise <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0821 08:59:39.180953 32364 net.cpp:408] L1_b11_sum_eltwise -> L1_b11_sum_eltwise_top\nI0821 08:59:39.181017 32364 net.cpp:150] Setting up L1_b11_sum_eltwise\nI0821 08:59:39.181036 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.181046 32364 net.cpp:165] Memory required for data: 813876400\nI0821 08:59:39.181056 32364 layer_factory.hpp:77] Creating layer L1_b11_relu\nI0821 08:59:39.181071 32364 net.cpp:100] Creating Layer L1_b11_relu\nI0821 08:59:39.181087 32364 net.cpp:434] L1_b11_relu <- L1_b11_sum_eltwise_top\nI0821 08:59:39.181102 32364 net.cpp:395] L1_b11_relu -> L1_b11_sum_eltwise_top (in-place)\nI0821 08:59:39.181120 32364 net.cpp:150] Setting up L1_b11_relu\nI0821 08:59:39.181134 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.181143 32364 net.cpp:165] Memory required for data: 820430000\nI0821 08:59:39.181161 32364 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:39.181175 32364 net.cpp:100] Creating Layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:39.181185 32364 net.cpp:434] L1_b11_sum_eltwise_top_L1_b11_relu_0_split <- L1_b11_sum_eltwise_top\nI0821 08:59:39.181207 32364 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0821 08:59:39.181228 32364 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0821 08:59:39.181316 32364 net.cpp:150] Setting up L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:39.181335 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.181349 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.181358 32364 net.cpp:165] Memory required for data: 833537200\nI0821 08:59:39.181376 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_conv\nI0821 08:59:39.181402 32364 net.cpp:100] Creating Layer L1_b12_cbr1_conv\nI0821 08:59:39.181416 32364 net.cpp:434] L1_b12_cbr1_conv <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0821 08:59:39.181437 32364 net.cpp:408] L1_b12_cbr1_conv -> L1_b12_cbr1_conv_top\nI0821 08:59:39.181844 32364 net.cpp:150] Setting up L1_b12_cbr1_conv\nI0821 08:59:39.181864 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.181874 32364 net.cpp:165] Memory required for data: 840090800\nI0821 08:59:39.181890 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_bn\nI0821 08:59:39.181913 32364 net.cpp:100] Creating Layer L1_b12_cbr1_bn\nI0821 08:59:39.181926 32364 net.cpp:434] L1_b12_cbr1_bn <- L1_b12_cbr1_conv_top\nI0821 08:59:39.181942 32364 net.cpp:408] L1_b12_cbr1_bn -> L1_b12_cbr1_bn_top\nI0821 08:59:39.182286 32364 net.cpp:150] Setting up L1_b12_cbr1_bn\nI0821 08:59:39.182305 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.182315 32364 net.cpp:165] Memory required for data: 846644400\nI0821 08:59:39.182337 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0821 08:59:39.182353 32364 net.cpp:100] Creating Layer L1_b12_cbr1_scale\nI0821 08:59:39.182365 32364 net.cpp:434] L1_b12_cbr1_scale <- L1_b12_cbr1_bn_top\nI0821 08:59:39.182387 32364 net.cpp:395] L1_b12_cbr1_scale -> L1_b12_cbr1_bn_top (in-place)\nI0821 08:59:39.182482 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0821 08:59:39.182711 32364 net.cpp:150] Setting up L1_b12_cbr1_scale\nI0821 08:59:39.182742 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.182752 32364 net.cpp:165] Memory required for data: 853198000\nI0821 08:59:39.182768 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr1_relu\nI0821 08:59:39.182781 32364 net.cpp:100] Creating Layer L1_b12_cbr1_relu\nI0821 08:59:39.182792 32364 net.cpp:434] L1_b12_cbr1_relu <- L1_b12_cbr1_bn_top\nI0821 08:59:39.182806 32364 net.cpp:395] L1_b12_cbr1_relu -> L1_b12_cbr1_bn_top (in-place)\nI0821 08:59:39.182819 32364 net.cpp:150] Setting up L1_b12_cbr1_relu\nI0821 08:59:39.182832 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.182838 32364 net.cpp:165] Memory required for data: 859751600\nI0821 08:59:39.182848 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr2_conv\nI0821 08:59:39.182870 32364 net.cpp:100] Creating Layer L1_b12_cbr2_conv\nI0821 08:59:39.182880 32364 net.cpp:434] L1_b12_cbr2_conv <- L1_b12_cbr1_bn_top\nI0821 08:59:39.182898 32364 net.cpp:408] L1_b12_cbr2_conv -> L1_b12_cbr2_conv_top\nI0821 08:59:39.183394 32364 net.cpp:150] Setting up L1_b12_cbr2_conv\nI0821 08:59:39.183415 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.183425 32364 net.cpp:165] Memory required for data: 866305200\nI0821 08:59:39.183442 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr2_bn\nI0821 08:59:39.183465 32364 net.cpp:100] Creating Layer L1_b12_cbr2_bn\nI0821 08:59:39.183478 32364 net.cpp:434] L1_b12_cbr2_bn <- L1_b12_cbr2_conv_top\nI0821 08:59:39.183501 32364 net.cpp:408] L1_b12_cbr2_bn -> L1_b12_cbr2_bn_top\nI0821 08:59:39.183820 32364 net.cpp:150] Setting up L1_b12_cbr2_bn\nI0821 08:59:39.183840 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.183848 32364 net.cpp:165] Memory required for data: 872858800\nI0821 08:59:39.183869 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0821 08:59:39.183887 32364 net.cpp:100] Creating Layer L1_b12_cbr2_scale\nI0821 08:59:39.183897 32364 net.cpp:434] L1_b12_cbr2_scale <- L1_b12_cbr2_bn_top\nI0821 08:59:39.183912 32364 net.cpp:395] L1_b12_cbr2_scale -> L1_b12_cbr2_bn_top (in-place)\nI0821 08:59:39.184013 32364 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0821 08:59:39.184224 32364 net.cpp:150] Setting up L1_b12_cbr2_scale\nI0821 08:59:39.184243 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.184253 32364 net.cpp:165] Memory required for data: 879412400\nI0821 08:59:39.184273 32364 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise\nI0821 08:59:39.184306 32364 net.cpp:100] Creating Layer L1_b12_sum_eltwise\nI0821 08:59:39.184320 32364 net.cpp:434] L1_b12_sum_eltwise <- L1_b12_cbr2_bn_top\nI0821 08:59:39.184334 32364 net.cpp:434] L1_b12_sum_eltwise <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0821 08:59:39.184350 32364 net.cpp:408] L1_b12_sum_eltwise -> L1_b12_sum_eltwise_top\nI0821 08:59:39.184417 32364 net.cpp:150] Setting up L1_b12_sum_eltwise\nI0821 08:59:39.184437 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.184445 32364 net.cpp:165] Memory required for data: 885966000\nI0821 08:59:39.184455 32364 layer_factory.hpp:77] Creating layer L1_b12_relu\nI0821 08:59:39.184469 32364 net.cpp:100] Creating Layer L1_b12_relu\nI0821 08:59:39.184480 32364 net.cpp:434] L1_b12_relu <- L1_b12_sum_eltwise_top\nI0821 08:59:39.184499 32364 net.cpp:395] L1_b12_relu -> L1_b12_sum_eltwise_top (in-place)\nI0821 08:59:39.184520 32364 net.cpp:150] Setting up L1_b12_relu\nI0821 08:59:39.184533 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.184542 32364 net.cpp:165] Memory required for data: 892519600\nI0821 08:59:39.184552 32364 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:39.184566 32364 net.cpp:100] Creating Layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:39.184576 32364 net.cpp:434] L1_b12_sum_eltwise_top_L1_b12_relu_0_split <- L1_b12_sum_eltwise_top\nI0821 08:59:39.184597 32364 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0821 08:59:39.184617 32364 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0821 08:59:39.184706 32364 net.cpp:150] Setting up L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:39.184725 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.184737 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.184746 32364 net.cpp:165] Memory required for data: 905626800\nI0821 08:59:39.184756 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_conv\nI0821 08:59:39.184782 32364 net.cpp:100] Creating Layer L1_b13_cbr1_conv\nI0821 08:59:39.184795 32364 net.cpp:434] L1_b13_cbr1_conv <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0821 08:59:39.184813 32364 net.cpp:408] L1_b13_cbr1_conv -> L1_b13_cbr1_conv_top\nI0821 08:59:39.185241 32364 net.cpp:150] Setting up L1_b13_cbr1_conv\nI0821 08:59:39.185261 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.185271 32364 net.cpp:165] Memory required for data: 912180400\nI0821 08:59:39.185288 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_bn\nI0821 08:59:39.185328 32364 net.cpp:100] Creating Layer L1_b13_cbr1_bn\nI0821 08:59:39.185341 32364 net.cpp:434] L1_b13_cbr1_bn <- L1_b13_cbr1_conv_top\nI0821 08:59:39.185359 32364 net.cpp:408] L1_b13_cbr1_bn -> L1_b13_cbr1_bn_top\nI0821 08:59:39.185696 32364 net.cpp:150] Setting up L1_b13_cbr1_bn\nI0821 08:59:39.185716 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.185725 32364 net.cpp:165] Memory required for data: 918734000\nI0821 08:59:39.185747 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0821 08:59:39.185765 32364 net.cpp:100] Creating Layer L1_b13_cbr1_scale\nI0821 08:59:39.185776 32364 net.cpp:434] L1_b13_cbr1_scale <- L1_b13_cbr1_bn_top\nI0821 08:59:39.185791 32364 net.cpp:395] L1_b13_cbr1_scale -> L1_b13_cbr1_bn_top (in-place)\nI0821 08:59:39.185890 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0821 08:59:39.186094 32364 net.cpp:150] Setting up L1_b13_cbr1_scale\nI0821 08:59:39.186115 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.186123 32364 net.cpp:165] Memory required for data: 925287600\nI0821 08:59:39.186141 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr1_relu\nI0821 08:59:39.186166 32364 net.cpp:100] Creating Layer L1_b13_cbr1_relu\nI0821 08:59:39.186177 32364 net.cpp:434] L1_b13_cbr1_relu <- L1_b13_cbr1_bn_top\nI0821 08:59:39.186197 32364 net.cpp:395] L1_b13_cbr1_relu -> L1_b13_cbr1_bn_top (in-place)\nI0821 08:59:39.186218 32364 net.cpp:150] Setting up L1_b13_cbr1_relu\nI0821 08:59:39.186240 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.186251 32364 net.cpp:165] Memory required for data: 931841200\nI0821 08:59:39.186261 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr2_conv\nI0821 08:59:39.186286 32364 net.cpp:100] Creating Layer L1_b13_cbr2_conv\nI0821 08:59:39.186300 32364 net.cpp:434] L1_b13_cbr2_conv <- L1_b13_cbr1_bn_top\nI0821 08:59:39.186317 32364 net.cpp:408] L1_b13_cbr2_conv -> L1_b13_cbr2_conv_top\nI0821 08:59:39.186782 32364 net.cpp:150] Setting up L1_b13_cbr2_conv\nI0821 08:59:39.186803 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.186813 32364 net.cpp:165] Memory required for data: 938394800\nI0821 08:59:39.186830 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr2_bn\nI0821 08:59:39.186849 32364 net.cpp:100] Creating Layer L1_b13_cbr2_bn\nI0821 08:59:39.186861 32364 net.cpp:434] L1_b13_cbr2_bn <- L1_b13_cbr2_conv_top\nI0821 08:59:39.186882 32364 net.cpp:408] L1_b13_cbr2_bn -> L1_b13_cbr2_bn_top\nI0821 08:59:39.188225 32364 net.cpp:150] Setting up L1_b13_cbr2_bn\nI0821 08:59:39.188247 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.188257 32364 net.cpp:165] Memory required for data: 944948400\nI0821 08:59:39.188279 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0821 08:59:39.188302 32364 net.cpp:100] Creating Layer L1_b13_cbr2_scale\nI0821 08:59:39.188313 32364 net.cpp:434] L1_b13_cbr2_scale <- L1_b13_cbr2_bn_top\nI0821 08:59:39.188329 32364 net.cpp:395] L1_b13_cbr2_scale -> L1_b13_cbr2_bn_top (in-place)\nI0821 08:59:39.188428 32364 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0821 08:59:39.188630 32364 net.cpp:150] Setting up L1_b13_cbr2_scale\nI0821 08:59:39.188649 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.188658 32364 net.cpp:165] Memory required for data: 951502000\nI0821 08:59:39.188678 32364 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise\nI0821 08:59:39.188694 32364 net.cpp:100] Creating Layer L1_b13_sum_eltwise\nI0821 08:59:39.188705 32364 net.cpp:434] L1_b13_sum_eltwise <- L1_b13_cbr2_bn_top\nI0821 08:59:39.188719 32364 net.cpp:434] L1_b13_sum_eltwise <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0821 08:59:39.188740 32364 net.cpp:408] L1_b13_sum_eltwise -> L1_b13_sum_eltwise_top\nI0821 08:59:39.188798 32364 net.cpp:150] Setting up L1_b13_sum_eltwise\nI0821 08:59:39.188822 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.188832 32364 net.cpp:165] Memory required for data: 958055600\nI0821 08:59:39.188840 32364 layer_factory.hpp:77] Creating layer L1_b13_relu\nI0821 08:59:39.188855 32364 net.cpp:100] Creating Layer L1_b13_relu\nI0821 08:59:39.188868 32364 net.cpp:434] L1_b13_relu <- L1_b13_sum_eltwise_top\nI0821 08:59:39.188881 32364 net.cpp:395] L1_b13_relu -> L1_b13_sum_eltwise_top (in-place)\nI0821 08:59:39.188899 32364 net.cpp:150] Setting up L1_b13_relu\nI0821 08:59:39.188915 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.188925 32364 net.cpp:165] Memory required for data: 964609200\nI0821 08:59:39.188933 32364 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:39.188952 32364 net.cpp:100] Creating Layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:39.188964 32364 net.cpp:434] L1_b13_sum_eltwise_top_L1_b13_relu_0_split <- L1_b13_sum_eltwise_top\nI0821 08:59:39.188979 32364 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0821 08:59:39.188998 32364 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0821 08:59:39.189085 32364 net.cpp:150] Setting up L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:39.189105 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.189117 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.189126 32364 net.cpp:165] Memory required for data: 977716400\nI0821 08:59:39.189137 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_conv\nI0821 08:59:39.189164 32364 net.cpp:100] Creating Layer L1_b14_cbr1_conv\nI0821 08:59:39.189188 32364 net.cpp:434] L1_b14_cbr1_conv <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0821 08:59:39.189213 32364 net.cpp:408] L1_b14_cbr1_conv -> L1_b14_cbr1_conv_top\nI0821 08:59:39.189626 32364 net.cpp:150] Setting up L1_b14_cbr1_conv\nI0821 08:59:39.189646 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.189656 32364 net.cpp:165] Memory required for data: 984270000\nI0821 08:59:39.189673 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_bn\nI0821 08:59:39.189690 32364 net.cpp:100] Creating Layer L1_b14_cbr1_bn\nI0821 08:59:39.189702 32364 net.cpp:434] L1_b14_cbr1_bn <- L1_b14_cbr1_conv_top\nI0821 08:59:39.189718 32364 net.cpp:408] L1_b14_cbr1_bn -> L1_b14_cbr1_bn_top\nI0821 08:59:39.190042 32364 net.cpp:150] Setting up L1_b14_cbr1_bn\nI0821 08:59:39.190062 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.190071 32364 net.cpp:165] Memory required for data: 990823600\nI0821 08:59:39.190093 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0821 08:59:39.190114 32364 net.cpp:100] Creating Layer L1_b14_cbr1_scale\nI0821 08:59:39.190125 32364 net.cpp:434] L1_b14_cbr1_scale <- L1_b14_cbr1_bn_top\nI0821 08:59:39.190141 32364 net.cpp:395] L1_b14_cbr1_scale -> L1_b14_cbr1_bn_top (in-place)\nI0821 08:59:39.190246 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0821 08:59:39.190444 32364 net.cpp:150] Setting up L1_b14_cbr1_scale\nI0821 08:59:39.190464 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.190472 32364 net.cpp:165] Memory required for data: 997377200\nI0821 08:59:39.190490 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr1_relu\nI0821 08:59:39.190505 32364 net.cpp:100] Creating Layer L1_b14_cbr1_relu\nI0821 08:59:39.190516 32364 net.cpp:434] L1_b14_cbr1_relu <- L1_b14_cbr1_bn_top\nI0821 08:59:39.190536 32364 net.cpp:395] L1_b14_cbr1_relu -> L1_b14_cbr1_bn_top (in-place)\nI0821 08:59:39.190556 32364 net.cpp:150] Setting up L1_b14_cbr1_relu\nI0821 08:59:39.190569 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.190578 32364 net.cpp:165] Memory required for data: 1003930800\nI0821 08:59:39.190588 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr2_conv\nI0821 08:59:39.190613 32364 net.cpp:100] Creating Layer L1_b14_cbr2_conv\nI0821 08:59:39.190625 32364 net.cpp:434] L1_b14_cbr2_conv <- L1_b14_cbr1_bn_top\nI0821 08:59:39.190644 32364 net.cpp:408] L1_b14_cbr2_conv -> L1_b14_cbr2_conv_top\nI0821 08:59:39.191052 32364 net.cpp:150] Setting up L1_b14_cbr2_conv\nI0821 08:59:39.191072 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.191082 32364 net.cpp:165] Memory required for data: 1010484400\nI0821 08:59:39.191099 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr2_bn\nI0821 08:59:39.191121 32364 net.cpp:100] Creating Layer L1_b14_cbr2_bn\nI0821 08:59:39.191134 32364 net.cpp:434] L1_b14_cbr2_bn <- L1_b14_cbr2_conv_top\nI0821 08:59:39.191157 32364 net.cpp:408] L1_b14_cbr2_bn -> L1_b14_cbr2_bn_top\nI0821 08:59:39.191473 32364 net.cpp:150] Setting up L1_b14_cbr2_bn\nI0821 08:59:39.191493 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.191501 32364 net.cpp:165] Memory required for data: 1017038000\nI0821 08:59:39.191524 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0821 08:59:39.191540 32364 net.cpp:100] Creating Layer L1_b14_cbr2_scale\nI0821 08:59:39.191550 32364 net.cpp:434] L1_b14_cbr2_scale <- L1_b14_cbr2_bn_top\nI0821 08:59:39.191571 32364 net.cpp:395] L1_b14_cbr2_scale -> L1_b14_cbr2_bn_top (in-place)\nI0821 08:59:39.191665 32364 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0821 08:59:39.191862 32364 net.cpp:150] Setting up L1_b14_cbr2_scale\nI0821 08:59:39.191881 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.191891 32364 net.cpp:165] Memory required for data: 1023591600\nI0821 08:59:39.191910 32364 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise\nI0821 08:59:39.191928 32364 net.cpp:100] Creating Layer L1_b14_sum_eltwise\nI0821 08:59:39.191939 32364 net.cpp:434] L1_b14_sum_eltwise <- L1_b14_cbr2_bn_top\nI0821 08:59:39.191961 32364 net.cpp:434] L1_b14_sum_eltwise <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0821 08:59:39.191982 32364 net.cpp:408] L1_b14_sum_eltwise -> L1_b14_sum_eltwise_top\nI0821 08:59:39.192042 32364 net.cpp:150] Setting up L1_b14_sum_eltwise\nI0821 08:59:39.192066 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.192077 32364 net.cpp:165] Memory required for data: 1030145200\nI0821 08:59:39.192088 32364 layer_factory.hpp:77] Creating layer L1_b14_relu\nI0821 08:59:39.192102 32364 net.cpp:100] Creating Layer L1_b14_relu\nI0821 08:59:39.192113 32364 net.cpp:434] L1_b14_relu <- L1_b14_sum_eltwise_top\nI0821 08:59:39.192128 32364 net.cpp:395] L1_b14_relu -> L1_b14_sum_eltwise_top (in-place)\nI0821 08:59:39.192154 32364 net.cpp:150] Setting up L1_b14_relu\nI0821 08:59:39.192170 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.192180 32364 net.cpp:165] Memory required for data: 1036698800\nI0821 08:59:39.192189 32364 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:39.192207 32364 net.cpp:100] Creating Layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:39.192220 32364 net.cpp:434] L1_b14_sum_eltwise_top_L1_b14_relu_0_split <- L1_b14_sum_eltwise_top\nI0821 08:59:39.192235 32364 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0821 08:59:39.192255 32364 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0821 08:59:39.192342 32364 net.cpp:150] Setting up L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:39.192368 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.192383 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.192392 32364 net.cpp:165] Memory required for data: 1049806000\nI0821 08:59:39.192402 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_conv\nI0821 08:59:39.192422 32364 net.cpp:100] Creating Layer L1_b15_cbr1_conv\nI0821 08:59:39.192435 32364 net.cpp:434] L1_b15_cbr1_conv <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0821 08:59:39.192453 32364 net.cpp:408] L1_b15_cbr1_conv -> L1_b15_cbr1_conv_top\nI0821 08:59:39.192868 32364 net.cpp:150] Setting up L1_b15_cbr1_conv\nI0821 08:59:39.192888 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.192896 32364 net.cpp:165] Memory required for data: 1056359600\nI0821 08:59:39.192914 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_bn\nI0821 08:59:39.192936 32364 net.cpp:100] Creating Layer L1_b15_cbr1_bn\nI0821 08:59:39.192950 32364 net.cpp:434] L1_b15_cbr1_bn <- L1_b15_cbr1_conv_top\nI0821 08:59:39.192965 32364 net.cpp:408] L1_b15_cbr1_bn -> L1_b15_cbr1_bn_top\nI0821 08:59:39.193290 32364 net.cpp:150] Setting up L1_b15_cbr1_bn\nI0821 08:59:39.193310 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.193320 32364 net.cpp:165] Memory required for data: 1062913200\nI0821 08:59:39.193341 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0821 08:59:39.193361 32364 net.cpp:100] Creating Layer L1_b15_cbr1_scale\nI0821 08:59:39.193373 32364 net.cpp:434] L1_b15_cbr1_scale <- L1_b15_cbr1_bn_top\nI0821 08:59:39.193390 32364 net.cpp:395] L1_b15_cbr1_scale -> L1_b15_cbr1_bn_top (in-place)\nI0821 08:59:39.193485 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0821 08:59:39.193681 32364 net.cpp:150] Setting up L1_b15_cbr1_scale\nI0821 08:59:39.193699 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.193708 32364 net.cpp:165] Memory required for data: 1069466800\nI0821 08:59:39.193727 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr1_relu\nI0821 08:59:39.193743 32364 net.cpp:100] Creating Layer L1_b15_cbr1_relu\nI0821 08:59:39.193754 32364 net.cpp:434] L1_b15_cbr1_relu <- L1_b15_cbr1_bn_top\nI0821 08:59:39.193773 32364 net.cpp:395] L1_b15_cbr1_relu -> L1_b15_cbr1_bn_top (in-place)\nI0821 08:59:39.193794 32364 net.cpp:150] Setting up L1_b15_cbr1_relu\nI0821 08:59:39.193809 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.193819 32364 net.cpp:165] Memory required for data: 1076020400\nI0821 08:59:39.193837 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr2_conv\nI0821 08:59:39.193862 32364 net.cpp:100] Creating Layer L1_b15_cbr2_conv\nI0821 08:59:39.193877 32364 net.cpp:434] L1_b15_cbr2_conv <- L1_b15_cbr1_bn_top\nI0821 08:59:39.193895 32364 net.cpp:408] L1_b15_cbr2_conv -> L1_b15_cbr2_conv_top\nI0821 08:59:39.194316 32364 net.cpp:150] Setting up L1_b15_cbr2_conv\nI0821 08:59:39.194336 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.194346 32364 net.cpp:165] Memory required for data: 1082574000\nI0821 08:59:39.194365 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr2_bn\nI0821 08:59:39.194386 32364 net.cpp:100] Creating Layer L1_b15_cbr2_bn\nI0821 08:59:39.194398 32364 net.cpp:434] L1_b15_cbr2_bn <- L1_b15_cbr2_conv_top\nI0821 08:59:39.194416 32364 net.cpp:408] L1_b15_cbr2_bn -> L1_b15_cbr2_bn_top\nI0821 08:59:39.194738 32364 net.cpp:150] Setting up L1_b15_cbr2_bn\nI0821 08:59:39.194757 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.194766 32364 net.cpp:165] Memory required for data: 1089127600\nI0821 08:59:39.194788 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0821 08:59:39.194805 32364 net.cpp:100] Creating Layer L1_b15_cbr2_scale\nI0821 08:59:39.194816 32364 net.cpp:434] L1_b15_cbr2_scale <- L1_b15_cbr2_bn_top\nI0821 08:59:39.194836 32364 net.cpp:395] L1_b15_cbr2_scale -> L1_b15_cbr2_bn_top (in-place)\nI0821 08:59:39.194934 32364 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0821 08:59:39.196120 32364 net.cpp:150] Setting up L1_b15_cbr2_scale\nI0821 08:59:39.196142 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.196159 32364 net.cpp:165] Memory required for data: 1095681200\nI0821 08:59:39.196178 32364 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise\nI0821 08:59:39.196203 32364 net.cpp:100] Creating Layer L1_b15_sum_eltwise\nI0821 08:59:39.196218 32364 net.cpp:434] L1_b15_sum_eltwise <- L1_b15_cbr2_bn_top\nI0821 08:59:39.196231 32364 net.cpp:434] L1_b15_sum_eltwise <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0821 08:59:39.196247 32364 net.cpp:408] L1_b15_sum_eltwise -> L1_b15_sum_eltwise_top\nI0821 08:59:39.196310 32364 net.cpp:150] Setting up L1_b15_sum_eltwise\nI0821 08:59:39.196328 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.196337 32364 net.cpp:165] Memory required for data: 1102234800\nI0821 08:59:39.196348 32364 layer_factory.hpp:77] Creating layer L1_b15_relu\nI0821 08:59:39.196362 32364 net.cpp:100] Creating Layer L1_b15_relu\nI0821 08:59:39.196374 32364 net.cpp:434] L1_b15_relu <- L1_b15_sum_eltwise_top\nI0821 08:59:39.196393 32364 net.cpp:395] L1_b15_relu -> L1_b15_sum_eltwise_top (in-place)\nI0821 08:59:39.196413 32364 net.cpp:150] Setting up L1_b15_relu\nI0821 08:59:39.196426 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.196436 32364 net.cpp:165] Memory required for data: 1108788400\nI0821 08:59:39.196445 32364 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:39.196460 32364 net.cpp:100] Creating Layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:39.196470 32364 net.cpp:434] L1_b15_sum_eltwise_top_L1_b15_relu_0_split <- L1_b15_sum_eltwise_top\nI0821 08:59:39.196485 32364 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0821 08:59:39.196506 32364 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0821 08:59:39.196596 32364 net.cpp:150] Setting up L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:39.196615 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.196629 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.196638 32364 net.cpp:165] Memory required for data: 1121895600\nI0821 08:59:39.196648 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_conv\nI0821 08:59:39.196669 32364 net.cpp:100] Creating Layer L1_b16_cbr1_conv\nI0821 08:59:39.196682 32364 net.cpp:434] L1_b16_cbr1_conv <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0821 08:59:39.196714 32364 net.cpp:408] L1_b16_cbr1_conv -> L1_b16_cbr1_conv_top\nI0821 08:59:39.197151 32364 net.cpp:150] Setting up L1_b16_cbr1_conv\nI0821 08:59:39.197171 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.197181 32364 net.cpp:165] Memory required for data: 1128449200\nI0821 08:59:39.197199 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_bn\nI0821 08:59:39.197216 32364 net.cpp:100] Creating Layer L1_b16_cbr1_bn\nI0821 08:59:39.197227 32364 net.cpp:434] L1_b16_cbr1_bn <- L1_b16_cbr1_conv_top\nI0821 08:59:39.197252 32364 net.cpp:408] L1_b16_cbr1_bn -> L1_b16_cbr1_bn_top\nI0821 08:59:39.197566 32364 net.cpp:150] Setting up L1_b16_cbr1_bn\nI0821 08:59:39.197584 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.197593 32364 net.cpp:165] Memory required for data: 1135002800\nI0821 08:59:39.197615 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0821 08:59:39.197638 32364 net.cpp:100] Creating Layer L1_b16_cbr1_scale\nI0821 08:59:39.197650 32364 net.cpp:434] L1_b16_cbr1_scale <- L1_b16_cbr1_bn_top\nI0821 08:59:39.197667 32364 net.cpp:395] L1_b16_cbr1_scale -> L1_b16_cbr1_bn_top (in-place)\nI0821 08:59:39.197759 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0821 08:59:39.197957 32364 net.cpp:150] Setting up L1_b16_cbr1_scale\nI0821 08:59:39.197974 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.197984 32364 net.cpp:165] Memory required for data: 1141556400\nI0821 08:59:39.198002 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr1_relu\nI0821 08:59:39.198022 32364 net.cpp:100] Creating Layer L1_b16_cbr1_relu\nI0821 08:59:39.198035 32364 net.cpp:434] L1_b16_cbr1_relu <- L1_b16_cbr1_bn_top\nI0821 08:59:39.198050 32364 net.cpp:395] L1_b16_cbr1_relu -> L1_b16_cbr1_bn_top (in-place)\nI0821 08:59:39.198068 32364 net.cpp:150] Setting up L1_b16_cbr1_relu\nI0821 08:59:39.198082 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.198092 32364 net.cpp:165] Memory required for data: 1148110000\nI0821 08:59:39.198101 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr2_conv\nI0821 08:59:39.198127 32364 net.cpp:100] Creating Layer L1_b16_cbr2_conv\nI0821 08:59:39.198140 32364 net.cpp:434] L1_b16_cbr2_conv <- L1_b16_cbr1_bn_top\nI0821 08:59:39.198170 32364 net.cpp:408] L1_b16_cbr2_conv -> L1_b16_cbr2_conv_top\nI0821 08:59:39.198575 32364 net.cpp:150] Setting up L1_b16_cbr2_conv\nI0821 08:59:39.198596 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.198604 32364 net.cpp:165] Memory required for data: 1154663600\nI0821 08:59:39.198622 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr2_bn\nI0821 08:59:39.198639 32364 net.cpp:100] Creating Layer L1_b16_cbr2_bn\nI0821 08:59:39.198650 32364 net.cpp:434] L1_b16_cbr2_bn <- L1_b16_cbr2_conv_top\nI0821 08:59:39.198675 32364 net.cpp:408] L1_b16_cbr2_bn -> L1_b16_cbr2_bn_top\nI0821 08:59:39.198988 32364 net.cpp:150] Setting up L1_b16_cbr2_bn\nI0821 08:59:39.199007 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.199018 32364 net.cpp:165] Memory required for data: 1161217200\nI0821 08:59:39.199038 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0821 08:59:39.199059 32364 net.cpp:100] Creating Layer L1_b16_cbr2_scale\nI0821 08:59:39.199074 32364 net.cpp:434] L1_b16_cbr2_scale <- L1_b16_cbr2_bn_top\nI0821 08:59:39.199089 32364 net.cpp:395] L1_b16_cbr2_scale -> L1_b16_cbr2_bn_top (in-place)\nI0821 08:59:39.199187 32364 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0821 08:59:39.199381 32364 net.cpp:150] Setting up L1_b16_cbr2_scale\nI0821 08:59:39.199400 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.199409 32364 net.cpp:165] Memory required for data: 1167770800\nI0821 08:59:39.199427 32364 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise\nI0821 08:59:39.199443 32364 net.cpp:100] Creating Layer L1_b16_sum_eltwise\nI0821 08:59:39.199455 32364 net.cpp:434] L1_b16_sum_eltwise <- L1_b16_cbr2_bn_top\nI0821 08:59:39.199473 32364 net.cpp:434] L1_b16_sum_eltwise <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0821 08:59:39.199491 32364 net.cpp:408] L1_b16_sum_eltwise -> L1_b16_sum_eltwise_top\nI0821 08:59:39.199561 32364 net.cpp:150] Setting up L1_b16_sum_eltwise\nI0821 08:59:39.199579 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.199589 32364 net.cpp:165] Memory required for data: 1174324400\nI0821 08:59:39.199600 32364 layer_factory.hpp:77] Creating layer L1_b16_relu\nI0821 08:59:39.199615 32364 net.cpp:100] Creating Layer L1_b16_relu\nI0821 08:59:39.199626 32364 net.cpp:434] L1_b16_relu <- L1_b16_sum_eltwise_top\nI0821 08:59:39.199646 32364 net.cpp:395] L1_b16_relu -> L1_b16_sum_eltwise_top (in-place)\nI0821 08:59:39.199666 32364 net.cpp:150] Setting up L1_b16_relu\nI0821 08:59:39.199681 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.199690 32364 net.cpp:165] Memory required for data: 1180878000\nI0821 08:59:39.199699 32364 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:39.199713 32364 net.cpp:100] Creating Layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:39.199724 32364 net.cpp:434] L1_b16_sum_eltwise_top_L1_b16_relu_0_split <- L1_b16_sum_eltwise_top\nI0821 08:59:39.199739 32364 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0821 08:59:39.199759 32364 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0821 08:59:39.199848 32364 net.cpp:150] Setting up L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:39.199867 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.199882 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.199890 32364 net.cpp:165] Memory required for data: 1193985200\nI0821 08:59:39.199900 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_conv\nI0821 08:59:39.199921 32364 net.cpp:100] Creating Layer L1_b17_cbr1_conv\nI0821 08:59:39.199934 32364 net.cpp:434] L1_b17_cbr1_conv <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0821 08:59:39.199956 32364 net.cpp:408] L1_b17_cbr1_conv -> L1_b17_cbr1_conv_top\nI0821 08:59:39.200362 32364 net.cpp:150] Setting up L1_b17_cbr1_conv\nI0821 08:59:39.200382 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.200392 32364 net.cpp:165] Memory required for data: 1200538800\nI0821 08:59:39.200410 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_bn\nI0821 08:59:39.200428 32364 net.cpp:100] Creating Layer L1_b17_cbr1_bn\nI0821 08:59:39.200439 32364 net.cpp:434] L1_b17_cbr1_bn <- L1_b17_cbr1_conv_top\nI0821 08:59:39.200460 32364 net.cpp:408] L1_b17_cbr1_bn -> L1_b17_cbr1_bn_top\nI0821 08:59:39.200788 32364 net.cpp:150] Setting up L1_b17_cbr1_bn\nI0821 08:59:39.200808 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.200816 32364 net.cpp:165] Memory required for data: 1207092400\nI0821 08:59:39.200837 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0821 08:59:39.200860 32364 net.cpp:100] Creating Layer L1_b17_cbr1_scale\nI0821 08:59:39.200871 32364 net.cpp:434] L1_b17_cbr1_scale <- L1_b17_cbr1_bn_top\nI0821 08:59:39.200887 32364 net.cpp:395] L1_b17_cbr1_scale -> L1_b17_cbr1_bn_top (in-place)\nI0821 08:59:39.200984 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0821 08:59:39.201189 32364 net.cpp:150] Setting up L1_b17_cbr1_scale\nI0821 08:59:39.201208 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.201218 32364 net.cpp:165] Memory required for data: 1213646000\nI0821 08:59:39.201236 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr1_relu\nI0821 08:59:39.201256 32364 net.cpp:100] Creating Layer L1_b17_cbr1_relu\nI0821 08:59:39.201269 32364 net.cpp:434] L1_b17_cbr1_relu <- L1_b17_cbr1_bn_top\nI0821 08:59:39.201283 32364 net.cpp:395] L1_b17_cbr1_relu -> L1_b17_cbr1_bn_top (in-place)\nI0821 08:59:39.201303 32364 net.cpp:150] Setting up L1_b17_cbr1_relu\nI0821 08:59:39.201316 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.201325 32364 net.cpp:165] Memory required for data: 1220199600\nI0821 08:59:39.201335 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr2_conv\nI0821 08:59:39.201369 32364 net.cpp:100] Creating Layer L1_b17_cbr2_conv\nI0821 08:59:39.201381 32364 net.cpp:434] L1_b17_cbr2_conv <- L1_b17_cbr1_bn_top\nI0821 08:59:39.201403 32364 net.cpp:408] L1_b17_cbr2_conv -> L1_b17_cbr2_conv_top\nI0821 08:59:39.201825 32364 net.cpp:150] Setting up L1_b17_cbr2_conv\nI0821 08:59:39.201845 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.201854 32364 net.cpp:165] Memory required for data: 1226753200\nI0821 08:59:39.201872 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr2_bn\nI0821 08:59:39.201889 32364 net.cpp:100] Creating Layer L1_b17_cbr2_bn\nI0821 08:59:39.201908 32364 net.cpp:434] L1_b17_cbr2_bn <- L1_b17_cbr2_conv_top\nI0821 08:59:39.201925 32364 net.cpp:408] L1_b17_cbr2_bn -> L1_b17_cbr2_bn_top\nI0821 08:59:39.202255 32364 net.cpp:150] Setting up L1_b17_cbr2_bn\nI0821 08:59:39.202275 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.202284 32364 net.cpp:165] Memory required for data: 1233306800\nI0821 08:59:39.202306 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0821 08:59:39.202327 32364 net.cpp:100] Creating Layer L1_b17_cbr2_scale\nI0821 08:59:39.202339 32364 net.cpp:434] L1_b17_cbr2_scale <- L1_b17_cbr2_bn_top\nI0821 08:59:39.202356 32364 net.cpp:395] L1_b17_cbr2_scale -> L1_b17_cbr2_bn_top (in-place)\nI0821 08:59:39.202451 32364 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0821 08:59:39.202647 32364 net.cpp:150] Setting up L1_b17_cbr2_scale\nI0821 08:59:39.202666 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.202675 32364 net.cpp:165] Memory required for data: 1239860400\nI0821 08:59:39.202693 32364 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise\nI0821 08:59:39.202710 32364 net.cpp:100] Creating Layer L1_b17_sum_eltwise\nI0821 08:59:39.202721 32364 net.cpp:434] L1_b17_sum_eltwise <- L1_b17_cbr2_bn_top\nI0821 08:59:39.202735 32364 net.cpp:434] L1_b17_sum_eltwise <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0821 08:59:39.202755 32364 net.cpp:408] L1_b17_sum_eltwise -> L1_b17_sum_eltwise_top\nI0821 08:59:39.202821 32364 net.cpp:150] Setting up L1_b17_sum_eltwise\nI0821 08:59:39.202839 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.202848 32364 net.cpp:165] Memory required for data: 1246414000\nI0821 08:59:39.202858 32364 layer_factory.hpp:77] Creating layer L1_b17_relu\nI0821 08:59:39.202872 32364 net.cpp:100] Creating Layer L1_b17_relu\nI0821 08:59:39.202884 32364 net.cpp:434] L1_b17_relu <- L1_b17_sum_eltwise_top\nI0821 08:59:39.202903 32364 net.cpp:395] L1_b17_relu -> L1_b17_sum_eltwise_top (in-place)\nI0821 08:59:39.202924 32364 net.cpp:150] Setting up L1_b17_relu\nI0821 08:59:39.202939 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.202947 32364 net.cpp:165] Memory required for data: 1252967600\nI0821 08:59:39.202957 32364 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:39.202971 32364 net.cpp:100] Creating Layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:39.202982 32364 net.cpp:434] L1_b17_sum_eltwise_top_L1_b17_relu_0_split <- L1_b17_sum_eltwise_top\nI0821 08:59:39.202996 32364 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0821 08:59:39.203017 32364 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0821 08:59:39.203099 32364 net.cpp:150] Setting up L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:39.203117 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.203131 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.203140 32364 net.cpp:165] Memory required for data: 1266074800\nI0821 08:59:39.203156 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_conv\nI0821 08:59:39.203177 32364 net.cpp:100] Creating Layer L1_b18_cbr1_conv\nI0821 08:59:39.203191 32364 net.cpp:434] L1_b18_cbr1_conv <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0821 08:59:39.203217 32364 net.cpp:408] L1_b18_cbr1_conv -> L1_b18_cbr1_conv_top\nI0821 08:59:39.203641 32364 net.cpp:150] Setting up L1_b18_cbr1_conv\nI0821 08:59:39.203667 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.203677 32364 net.cpp:165] Memory required for data: 1272628400\nI0821 08:59:39.203696 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_bn\nI0821 08:59:39.203713 32364 net.cpp:100] Creating Layer L1_b18_cbr1_bn\nI0821 08:59:39.203725 32364 net.cpp:434] L1_b18_cbr1_bn <- L1_b18_cbr1_conv_top\nI0821 08:59:39.203748 32364 net.cpp:408] L1_b18_cbr1_bn -> L1_b18_cbr1_bn_top\nI0821 08:59:39.204071 32364 net.cpp:150] Setting up L1_b18_cbr1_bn\nI0821 08:59:39.204092 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.204102 32364 net.cpp:165] Memory required for data: 1279182000\nI0821 08:59:39.204123 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0821 08:59:39.204144 32364 net.cpp:100] Creating Layer L1_b18_cbr1_scale\nI0821 08:59:39.204164 32364 net.cpp:434] L1_b18_cbr1_scale <- L1_b18_cbr1_bn_top\nI0821 08:59:39.204181 32364 net.cpp:395] L1_b18_cbr1_scale -> L1_b18_cbr1_bn_top (in-place)\nI0821 08:59:39.204277 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0821 08:59:39.204473 32364 net.cpp:150] Setting up L1_b18_cbr1_scale\nI0821 08:59:39.204493 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.204502 32364 net.cpp:165] Memory required for data: 1285735600\nI0821 08:59:39.204520 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr1_relu\nI0821 08:59:39.204536 32364 net.cpp:100] Creating Layer L1_b18_cbr1_relu\nI0821 08:59:39.204548 32364 net.cpp:434] L1_b18_cbr1_relu <- L1_b18_cbr1_bn_top\nI0821 08:59:39.204567 32364 net.cpp:395] L1_b18_cbr1_relu -> L1_b18_cbr1_bn_top (in-place)\nI0821 08:59:39.204588 32364 net.cpp:150] Setting up L1_b18_cbr1_relu\nI0821 08:59:39.204604 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.204613 32364 net.cpp:165] Memory required for data: 1292289200\nI0821 08:59:39.204623 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr2_conv\nI0821 08:59:39.204648 32364 net.cpp:100] Creating Layer L1_b18_cbr2_conv\nI0821 08:59:39.204660 32364 net.cpp:434] L1_b18_cbr2_conv <- L1_b18_cbr1_bn_top\nI0821 08:59:39.204679 32364 net.cpp:408] L1_b18_cbr2_conv -> L1_b18_cbr2_conv_top\nI0821 08:59:39.205087 32364 net.cpp:150] Setting up L1_b18_cbr2_conv\nI0821 08:59:39.205107 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.205116 32364 net.cpp:165] Memory required for data: 1298842800\nI0821 08:59:39.205134 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr2_bn\nI0821 08:59:39.205168 32364 net.cpp:100] Creating Layer L1_b18_cbr2_bn\nI0821 08:59:39.205183 32364 net.cpp:434] L1_b18_cbr2_bn <- L1_b18_cbr2_conv_top\nI0821 08:59:39.205199 32364 net.cpp:408] L1_b18_cbr2_bn -> L1_b18_cbr2_bn_top\nI0821 08:59:39.205515 32364 net.cpp:150] Setting up L1_b18_cbr2_bn\nI0821 08:59:39.205534 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.205544 32364 net.cpp:165] Memory required for data: 1305396400\nI0821 08:59:39.205613 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0821 08:59:39.205639 32364 net.cpp:100] Creating Layer L1_b18_cbr2_scale\nI0821 08:59:39.205653 32364 net.cpp:434] L1_b18_cbr2_scale <- L1_b18_cbr2_bn_top\nI0821 08:59:39.205669 32364 net.cpp:395] L1_b18_cbr2_scale -> L1_b18_cbr2_bn_top (in-place)\nI0821 08:59:39.205767 32364 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0821 08:59:39.206006 32364 net.cpp:150] Setting up L1_b18_cbr2_scale\nI0821 08:59:39.206028 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.206038 32364 net.cpp:165] Memory required for data: 1311950000\nI0821 08:59:39.206058 32364 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise\nI0821 08:59:39.206079 32364 net.cpp:100] Creating Layer L1_b18_sum_eltwise\nI0821 08:59:39.206091 32364 net.cpp:434] L1_b18_sum_eltwise <- L1_b18_cbr2_bn_top\nI0821 08:59:39.206105 32364 net.cpp:434] L1_b18_sum_eltwise <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0821 08:59:39.206125 32364 net.cpp:408] L1_b18_sum_eltwise -> L1_b18_sum_eltwise_top\nI0821 08:59:39.206192 32364 net.cpp:150] Setting up L1_b18_sum_eltwise\nI0821 08:59:39.206218 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.206226 32364 net.cpp:165] Memory required for data: 1318503600\nI0821 08:59:39.206235 32364 layer_factory.hpp:77] Creating layer L1_b18_relu\nI0821 08:59:39.206252 32364 net.cpp:100] Creating Layer L1_b18_relu\nI0821 08:59:39.206264 32364 net.cpp:434] L1_b18_relu <- L1_b18_sum_eltwise_top\nI0821 08:59:39.206276 32364 net.cpp:395] L1_b18_relu -> L1_b18_sum_eltwise_top (in-place)\nI0821 08:59:39.206302 32364 net.cpp:150] Setting up L1_b18_relu\nI0821 08:59:39.206313 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.206321 32364 net.cpp:165] Memory required for data: 1325057200\nI0821 08:59:39.206328 32364 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:39.206346 32364 net.cpp:100] Creating Layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:39.206358 32364 net.cpp:434] L1_b18_sum_eltwise_top_L1_b18_relu_0_split <- L1_b18_sum_eltwise_top\nI0821 08:59:39.206373 32364 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0821 08:59:39.206393 32364 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0821 08:59:39.206485 32364 net.cpp:150] Setting up L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:39.206506 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.206518 32364 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:39.206527 32364 net.cpp:165] Memory required for data: 1338164400\nI0821 08:59:39.206537 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:59:39.206562 32364 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:59:39.206575 32364 net.cpp:434] L2_b1_cbr1_conv <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0821 08:59:39.206595 32364 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:59:39.207010 32364 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:59:39.207031 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.207041 32364 net.cpp:165] Memory required for data: 1339802800\nI0821 08:59:39.207057 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:59:39.207079 32364 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:59:39.207093 32364 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:59:39.207113 32364 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:59:39.207432 32364 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:59:39.207450 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.207459 32364 net.cpp:165] Memory required for data: 1341441200\nI0821 08:59:39.207481 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:59:39.207497 32364 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:59:39.207509 32364 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:59:39.207525 32364 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:59:39.207621 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:59:39.207816 32364 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:59:39.207835 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.207845 32364 net.cpp:165] Memory required for data: 1343079600\nI0821 08:59:39.207864 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:59:39.207878 32364 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:59:39.207890 32364 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:59:39.207909 32364 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:59:39.207929 32364 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:59:39.207944 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.207953 32364 net.cpp:165] Memory required for data: 1344718000\nI0821 08:59:39.207963 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:59:39.207988 32364 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:59:39.208001 32364 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:59:39.208019 32364 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:59:39.208465 32364 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:59:39.208487 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.208497 32364 net.cpp:165] Memory required for data: 1346356400\nI0821 08:59:39.208513 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:59:39.208535 32364 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:59:39.208549 32364 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:59:39.208565 32364 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:59:39.208874 32364 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:59:39.208897 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.208909 32364 net.cpp:165] Memory required for data: 1347994800\nI0821 08:59:39.208930 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:59:39.208950 32364 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:59:39.208961 32364 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:59:39.208977 32364 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:59:39.209074 32364 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:59:39.209285 32364 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:59:39.209305 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.209313 32364 net.cpp:165] Memory required for data: 1349633200\nI0821 08:59:39.209332 32364 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:59:39.209350 32364 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:59:39.209362 32364 net.cpp:434] L2_b1_pool <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0821 08:59:39.209385 32364 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:59:39.209439 32364 net.cpp:150] Setting up L2_b1_pool\nI0821 08:59:39.209460 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.209470 32364 net.cpp:165] Memory required for data: 1351271600\nI0821 08:59:39.209481 32364 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:59:39.209501 32364 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:59:39.209512 32364 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:59:39.209527 32364 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:59:39.209542 32364 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:59:39.209601 32364 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:59:39.209619 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.209630 32364 net.cpp:165] Memory required for data: 1352910000\nI0821 08:59:39.209638 32364 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:59:39.209653 32364 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:59:39.209666 32364 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:59:39.209683 32364 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:59:39.209703 32364 net.cpp:150] Setting up L2_b1_relu\nI0821 08:59:39.209718 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.209728 32364 net.cpp:165] Memory required for data: 1354548400\nI0821 08:59:39.209738 32364 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:59:39.209754 32364 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:59:39.209769 32364 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:59:39.211716 32364 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:59:39.211738 32364 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:39.211748 32364 net.cpp:165] Memory required for data: 1356186800\nI0821 08:59:39.211760 32364 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:59:39.211779 32364 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:59:39.211792 32364 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:59:39.211807 32364 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:59:39.211822 32364 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:59:39.211894 32364 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:59:39.211912 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.211922 32364 net.cpp:165] Memory required for data: 1359463600\nI0821 08:59:39.211941 32364 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:39.211956 32364 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:39.211967 32364 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:59:39.211987 32364 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:59:39.212008 32364 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:59:39.212102 32364 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:39.212126 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.212141 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.212157 32364 net.cpp:165] Memory required for data: 1366017200\nI0821 08:59:39.212168 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:59:39.212189 32364 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:59:39.212203 32364 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:59:39.212220 32364 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:59:39.212774 32364 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:59:39.212793 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.212803 32364 net.cpp:165] Memory required for data: 1369294000\nI0821 08:59:39.212821 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:59:39.212842 32364 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:59:39.212855 32364 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:59:39.212872 32364 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:59:39.213196 32364 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:59:39.213217 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.213227 32364 net.cpp:165] Memory required for data: 1372570800\nI0821 08:59:39.213249 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:59:39.213265 32364 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:59:39.213277 32364 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:59:39.213297 32364 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:59:39.213392 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:59:39.213589 32364 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:59:39.213608 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.213618 32364 net.cpp:165] Memory required for data: 1375847600\nI0821 08:59:39.213635 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:59:39.213650 32364 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:59:39.213662 32364 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:59:39.213680 32364 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:59:39.213701 32364 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:59:39.213716 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.213724 32364 net.cpp:165] Memory required for data: 1379124400\nI0821 08:59:39.213734 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:59:39.213759 32364 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:59:39.213773 32364 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:59:39.213790 32364 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:59:39.214351 32364 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:59:39.214371 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.214381 32364 net.cpp:165] Memory required for data: 1382401200\nI0821 08:59:39.214399 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:59:39.214421 32364 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:59:39.214433 32364 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:59:39.214449 32364 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:59:39.214756 32364 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:59:39.214776 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.214787 32364 net.cpp:165] Memory required for data: 1385678000\nI0821 08:59:39.214817 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:59:39.214833 32364 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:59:39.214845 32364 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:59:39.214866 32364 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:59:39.214968 32364 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:59:39.215173 32364 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:59:39.215193 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.215203 32364 net.cpp:165] Memory required for data: 1388954800\nI0821 08:59:39.215220 32364 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:59:39.215237 32364 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:59:39.215248 32364 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:59:39.215262 32364 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:59:39.215283 32364 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:59:39.215332 32364 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:59:39.215349 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.215359 32364 net.cpp:165] Memory required for data: 1392231600\nI0821 08:59:39.215369 32364 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:59:39.215384 32364 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:59:39.215400 32364 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:59:39.215415 32364 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:59:39.215435 32364 net.cpp:150] Setting up L2_b2_relu\nI0821 08:59:39.215450 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.215459 32364 net.cpp:165] Memory required for data: 1395508400\nI0821 08:59:39.215468 32364 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:39.215482 32364 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:39.215494 32364 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:59:39.215513 32364 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:59:39.215533 32364 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:59:39.215616 32364 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:39.215634 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.215647 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.215656 32364 net.cpp:165] Memory required for data: 1402062000\nI0821 08:59:39.215667 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:59:39.215690 32364 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:59:39.215703 32364 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:59:39.215723 32364 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:59:39.216282 32364 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:59:39.216302 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.216312 32364 net.cpp:165] Memory required for data: 1405338800\nI0821 08:59:39.216331 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:59:39.216352 32364 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:59:39.216365 32364 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:59:39.216382 32364 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:59:39.216688 32364 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:59:39.216707 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.216717 32364 net.cpp:165] Memory required for data: 1408615600\nI0821 08:59:39.216737 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:59:39.216753 32364 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:59:39.216765 32364 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:59:39.216784 32364 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:59:39.216894 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:59:39.217108 32364 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:59:39.217126 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.217136 32364 net.cpp:165] Memory required for data: 1411892400\nI0821 08:59:39.217161 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:59:39.217178 32364 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:59:39.217190 32364 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:59:39.217211 32364 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:59:39.217231 32364 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:59:39.217247 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.217255 32364 net.cpp:165] Memory required for data: 1415169200\nI0821 08:59:39.217265 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:59:39.217289 32364 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:59:39.217303 32364 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:59:39.217321 32364 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:59:39.217857 32364 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:59:39.217877 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.217886 32364 net.cpp:165] Memory required for data: 1418446000\nI0821 08:59:39.217903 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:59:39.217926 32364 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:59:39.217939 32364 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:59:39.217957 32364 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:59:39.218268 32364 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:59:39.218288 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.218297 32364 net.cpp:165] Memory required for data: 1421722800\nI0821 08:59:39.218319 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:59:39.218336 32364 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:59:39.218348 32364 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:59:39.218364 32364 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:59:39.218461 32364 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:59:39.218657 32364 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:59:39.218679 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.218689 32364 net.cpp:165] Memory required for data: 1424999600\nI0821 08:59:39.218708 32364 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:59:39.218725 32364 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:59:39.218736 32364 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:59:39.218750 32364 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:59:39.218766 32364 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:59:39.218818 32364 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:59:39.218837 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.218847 32364 net.cpp:165] Memory required for data: 1428276400\nI0821 08:59:39.218858 32364 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:59:39.218873 32364 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:59:39.218883 32364 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:59:39.218907 32364 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:59:39.218927 32364 net.cpp:150] Setting up L2_b3_relu\nI0821 08:59:39.218941 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.218951 32364 net.cpp:165] Memory required for data: 1431553200\nI0821 08:59:39.218961 32364 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:39.218976 32364 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:39.218986 32364 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:59:39.219007 32364 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:59:39.219036 32364 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:59:39.219125 32364 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:39.219144 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.219167 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.219177 32364 net.cpp:165] Memory required for data: 1438106800\nI0821 08:59:39.219188 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:59:39.219213 32364 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:59:39.219228 32364 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:59:39.219246 32364 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:59:39.219797 32364 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:59:39.219817 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.219826 32364 net.cpp:165] Memory required for data: 1441383600\nI0821 08:59:39.219844 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:59:39.219866 32364 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:59:39.219879 32364 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:59:39.219895 32364 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:59:39.220214 32364 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:59:39.220234 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.220243 32364 net.cpp:165] Memory required for data: 1444660400\nI0821 08:59:39.220265 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:59:39.220281 32364 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:59:39.220293 32364 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:59:39.220314 32364 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:59:39.220409 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:59:39.220605 32364 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:59:39.220624 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.220633 32364 net.cpp:165] Memory required for data: 1447937200\nI0821 08:59:39.220652 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:59:39.220666 32364 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:59:39.220679 32364 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:59:39.220693 32364 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:59:39.220713 32364 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:59:39.220726 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.220736 32364 net.cpp:165] Memory required for data: 1451214000\nI0821 08:59:39.220746 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:59:39.220772 32364 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:59:39.220784 32364 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:59:39.220808 32364 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:59:39.221362 32364 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:59:39.221382 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.221391 32364 net.cpp:165] Memory required for data: 1454490800\nI0821 08:59:39.221410 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:59:39.221431 32364 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:59:39.221443 32364 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:59:39.221464 32364 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:59:39.221767 32364 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:59:39.221787 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.221796 32364 net.cpp:165] Memory required for data: 1457767600\nI0821 08:59:39.221818 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:59:39.221834 32364 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:59:39.221845 32364 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:59:39.221861 32364 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:59:39.221978 32364 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:59:39.222187 32364 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:59:39.222211 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.222221 32364 net.cpp:165] Memory required for data: 1461044400\nI0821 08:59:39.222239 32364 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:59:39.222256 32364 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:59:39.222267 32364 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:59:39.222281 32364 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:59:39.222297 32364 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:59:39.222349 32364 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:59:39.222368 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.222378 32364 net.cpp:165] Memory required for data: 1464321200\nI0821 08:59:39.222388 32364 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:59:39.222403 32364 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:59:39.222414 32364 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:59:39.222434 32364 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:59:39.222453 32364 net.cpp:150] Setting up L2_b4_relu\nI0821 08:59:39.222468 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.222477 32364 net.cpp:165] Memory required for data: 1467598000\nI0821 08:59:39.222487 32364 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:39.222502 32364 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:39.222513 32364 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:59:39.222532 32364 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:59:39.222554 32364 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:59:39.222640 32364 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:39.222661 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.222674 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.222684 32364 net.cpp:165] Memory required for data: 1474151600\nI0821 08:59:39.222694 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:59:39.222718 32364 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:59:39.222733 32364 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:59:39.222751 32364 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:59:39.223317 32364 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:59:39.223337 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.223347 32364 net.cpp:165] Memory required for data: 1477428400\nI0821 08:59:39.223364 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:59:39.223387 32364 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:59:39.223398 32364 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:59:39.223415 32364 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:59:39.223717 32364 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:59:39.223736 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.223745 32364 net.cpp:165] Memory required for data: 1480705200\nI0821 08:59:39.223767 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:59:39.223783 32364 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:59:39.223794 32364 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:59:39.223810 32364 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:59:39.223912 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:59:39.224105 32364 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:59:39.224129 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.224139 32364 net.cpp:165] Memory required for data: 1483982000\nI0821 08:59:39.224177 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:59:39.224194 32364 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:59:39.224205 32364 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:59:39.224220 32364 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:59:39.224241 32364 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:59:39.224256 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.224264 32364 net.cpp:165] Memory required for data: 1487258800\nI0821 08:59:39.224274 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:59:39.224299 32364 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:59:39.224313 32364 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:59:39.224334 32364 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:59:39.224881 32364 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:59:39.224901 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.224910 32364 net.cpp:165] Memory required for data: 1490535600\nI0821 08:59:39.224928 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:59:39.224951 32364 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:59:39.224963 32364 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:59:39.224984 32364 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:59:39.225299 32364 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:59:39.225318 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.225328 32364 net.cpp:165] Memory required for data: 1493812400\nI0821 08:59:39.225350 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:59:39.225366 32364 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:59:39.225378 32364 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:59:39.225394 32364 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:59:39.225492 32364 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:59:39.225684 32364 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:59:39.225703 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.225713 32364 net.cpp:165] Memory required for data: 1497089200\nI0821 08:59:39.225731 32364 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:59:39.225752 32364 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:59:39.225765 32364 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:59:39.225778 32364 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:59:39.225795 32364 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:59:39.225843 32364 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:59:39.225860 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.225870 32364 net.cpp:165] Memory required for data: 1500366000\nI0821 08:59:39.225880 32364 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:59:39.225898 32364 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:59:39.225913 32364 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:59:39.225926 32364 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:59:39.225945 32364 net.cpp:150] Setting up L2_b5_relu\nI0821 08:59:39.225960 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.225970 32364 net.cpp:165] Memory required for data: 1503642800\nI0821 08:59:39.225980 32364 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:39.225993 32364 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:39.226006 32364 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:59:39.226020 32364 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:59:39.226040 32364 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:59:39.226131 32364 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:39.226155 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.226186 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.226197 32364 net.cpp:165] Memory required for data: 1510196400\nI0821 08:59:39.226207 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:59:39.226233 32364 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:59:39.226246 32364 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:59:39.226264 32364 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:59:39.226814 32364 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:59:39.226835 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.226843 32364 net.cpp:165] Memory required for data: 1513473200\nI0821 08:59:39.226862 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:59:39.226883 32364 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:59:39.226897 32364 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:59:39.226917 32364 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:59:39.227228 32364 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:59:39.227248 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.227257 32364 net.cpp:165] Memory required for data: 1516750000\nI0821 08:59:39.227278 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:59:39.227295 32364 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:59:39.227306 32364 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:59:39.227321 32364 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:59:39.227421 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:59:39.227617 32364 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:59:39.227639 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.227648 32364 net.cpp:165] Memory required for data: 1520026800\nI0821 08:59:39.227668 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:59:39.227682 32364 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:59:39.227694 32364 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:59:39.227708 32364 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:59:39.227727 32364 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:59:39.227741 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.227751 32364 net.cpp:165] Memory required for data: 1523303600\nI0821 08:59:39.227759 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:59:39.227783 32364 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:59:39.227797 32364 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:59:39.227819 32364 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:59:39.228373 32364 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:59:39.228392 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.228401 32364 net.cpp:165] Memory required for data: 1526580400\nI0821 08:59:39.228420 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:59:39.228440 32364 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:59:39.228453 32364 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:59:39.228476 32364 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:59:39.228782 32364 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:59:39.228801 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.228811 32364 net.cpp:165] Memory required for data: 1529857200\nI0821 08:59:39.228832 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:59:39.228850 32364 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:59:39.228862 32364 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:59:39.228878 32364 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:59:39.228976 32364 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:59:39.229225 32364 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:59:39.229245 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.229255 32364 net.cpp:165] Memory required for data: 1533134000\nI0821 08:59:39.229284 32364 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:59:39.229305 32364 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:59:39.229318 32364 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:59:39.229332 32364 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:59:39.229349 32364 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:59:39.229398 32364 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:59:39.229418 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.229426 32364 net.cpp:165] Memory required for data: 1536410800\nI0821 08:59:39.229436 32364 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:59:39.229455 32364 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:59:39.229467 32364 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:59:39.229482 32364 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:59:39.229501 32364 net.cpp:150] Setting up L2_b6_relu\nI0821 08:59:39.229516 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.229524 32364 net.cpp:165] Memory required for data: 1539687600\nI0821 08:59:39.229534 32364 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:39.229548 32364 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:39.229559 32364 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:59:39.229574 32364 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:59:39.229594 32364 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:59:39.229686 32364 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:39.229704 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.229717 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.229727 32364 net.cpp:165] Memory required for data: 1546241200\nI0821 08:59:39.229737 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:59:39.229761 32364 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:59:39.229775 32364 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:59:39.229794 32364 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:59:39.230350 32364 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:59:39.230370 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.230381 32364 net.cpp:165] Memory required for data: 1549518000\nI0821 08:59:39.230397 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:59:39.230419 32364 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:59:39.230432 32364 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:59:39.230453 32364 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:59:39.230756 32364 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:59:39.230774 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.230784 32364 net.cpp:165] Memory required for data: 1552794800\nI0821 08:59:39.230805 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:59:39.230823 32364 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:59:39.230834 32364 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:59:39.230849 32364 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:59:39.230948 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:59:39.231143 32364 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:59:39.231168 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.231178 32364 net.cpp:165] Memory required for data: 1556071600\nI0821 08:59:39.231196 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:59:39.231215 32364 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:59:39.231227 32364 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:59:39.231241 32364 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:59:39.231261 32364 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:59:39.231283 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.231293 32364 net.cpp:165] Memory required for data: 1559348400\nI0821 08:59:39.231304 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:59:39.231329 32364 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:59:39.231343 32364 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:59:39.231360 32364 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:59:39.231894 32364 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:59:39.231912 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.231922 32364 net.cpp:165] Memory required for data: 1562625200\nI0821 08:59:39.231940 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:59:39.231962 32364 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:59:39.231976 32364 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:59:39.231992 32364 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:59:39.232301 32364 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:59:39.232324 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.232336 32364 net.cpp:165] Memory required for data: 1565902000\nI0821 08:59:39.232357 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:59:39.232409 32364 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:59:39.232431 32364 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:59:39.232448 32364 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:59:39.232543 32364 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:59:39.232736 32364 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:59:39.232755 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.232764 32364 net.cpp:165] Memory required for data: 1569178800\nI0821 08:59:39.232780 32364 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:59:39.232796 32364 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:59:39.232805 32364 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:59:39.232818 32364 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:59:39.232836 32364 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:59:39.232882 32364 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:59:39.232897 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.232905 32364 net.cpp:165] Memory required for data: 1572455600\nI0821 08:59:39.232915 32364 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:59:39.232928 32364 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:59:39.232936 32364 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:59:39.232947 32364 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:59:39.232964 32364 net.cpp:150] Setting up L2_b7_relu\nI0821 08:59:39.232977 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.232987 32364 net.cpp:165] Memory required for data: 1575732400\nI0821 08:59:39.232996 32364 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:39.233017 32364 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:39.233031 32364 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:59:39.233047 32364 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:59:39.233067 32364 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:59:39.233167 32364 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:39.233191 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.233204 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.233214 32364 net.cpp:165] Memory required for data: 1582286000\nI0821 08:59:39.233224 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:59:39.233243 32364 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:59:39.233269 32364 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:59:39.233290 32364 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:59:39.233832 32364 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:59:39.233856 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.233866 32364 net.cpp:165] Memory required for data: 1585562800\nI0821 08:59:39.233885 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:59:39.233901 32364 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:59:39.233913 32364 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:59:39.233934 32364 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:59:39.234253 32364 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:59:39.234274 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.234283 32364 net.cpp:165] Memory required for data: 1588839600\nI0821 08:59:39.234304 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:59:39.234326 32364 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:59:39.234339 32364 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:59:39.234355 32364 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:59:39.234447 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:59:39.234643 32364 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:59:39.234661 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.234671 32364 net.cpp:165] Memory required for data: 1592116400\nI0821 08:59:39.234689 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:59:39.234704 32364 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:59:39.234715 32364 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:59:39.234735 32364 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:59:39.234755 32364 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:59:39.234768 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.234777 32364 net.cpp:165] Memory required for data: 1595393200\nI0821 08:59:39.234787 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:59:39.234813 32364 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:59:39.234827 32364 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:59:39.234844 32364 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:59:39.235396 32364 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:59:39.235416 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.235425 32364 net.cpp:165] Memory required for data: 1598670000\nI0821 08:59:39.235443 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:59:39.235466 32364 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:59:39.235478 32364 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:59:39.235494 32364 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:59:39.235803 32364 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:59:39.235823 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.235832 32364 net.cpp:165] Memory required for data: 1601946800\nI0821 08:59:39.235854 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:59:39.235872 32364 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:59:39.235882 32364 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:59:39.235903 32364 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:59:39.235998 32364 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:59:39.236204 32364 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:59:39.236224 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.236233 32364 net.cpp:165] Memory required for data: 1605223600\nI0821 08:59:39.236251 32364 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:59:39.236268 32364 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:59:39.236279 32364 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:59:39.236292 32364 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:59:39.236321 32364 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:59:39.236372 32364 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:59:39.236390 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.236399 32364 net.cpp:165] Memory required for data: 1608500400\nI0821 08:59:39.236409 32364 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:59:39.236428 32364 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:59:39.236441 32364 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:59:39.236456 32364 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:59:39.236475 32364 net.cpp:150] Setting up L2_b8_relu\nI0821 08:59:39.236490 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.236500 32364 net.cpp:165] Memory required for data: 1611777200\nI0821 08:59:39.236510 32364 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:39.236527 32364 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:39.236539 32364 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:59:39.236555 32364 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:59:39.236575 32364 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:59:39.236661 32364 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:39.236686 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.236701 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.236711 32364 net.cpp:165] Memory required for data: 1618330800\nI0821 08:59:39.236721 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:59:39.236740 32364 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:59:39.236754 32364 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:59:39.236773 32364 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:59:39.237344 32364 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:59:39.237363 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.237373 32364 net.cpp:165] Memory required for data: 1621607600\nI0821 08:59:39.237391 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:59:39.237416 32364 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:59:39.237428 32364 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:59:39.237445 32364 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:59:39.237752 32364 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:59:39.237771 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.237782 32364 net.cpp:165] Memory required for data: 1624884400\nI0821 08:59:39.237803 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:59:39.237819 32364 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:59:39.237831 32364 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:59:39.237851 32364 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:59:39.237948 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:59:39.238165 32364 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:59:39.238184 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.238194 32364 net.cpp:165] Memory required for data: 1628161200\nI0821 08:59:39.238212 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:59:39.238227 32364 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:59:39.238240 32364 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:59:39.238258 32364 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:59:39.238279 32364 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:59:39.238293 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.238303 32364 net.cpp:165] Memory required for data: 1631438000\nI0821 08:59:39.238313 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:59:39.238337 32364 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:59:39.238360 32364 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:59:39.238380 32364 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:59:39.238914 32364 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:59:39.238934 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.238943 32364 net.cpp:165] Memory required for data: 1634714800\nI0821 08:59:39.238961 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:59:39.238982 32364 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:59:39.238996 32364 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:59:39.239012 32364 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:59:39.239325 32364 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:59:39.239344 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.239353 32364 net.cpp:165] Memory required for data: 1637991600\nI0821 08:59:39.239374 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:59:39.239390 32364 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:59:39.239403 32364 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:59:39.239423 32364 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:59:39.239517 32364 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:59:39.239715 32364 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:59:39.239733 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.239742 32364 net.cpp:165] Memory required for data: 1641268400\nI0821 08:59:39.239760 32364 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:59:39.239778 32364 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:59:39.239789 32364 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:59:39.239802 32364 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:59:39.239823 32364 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:59:39.239871 32364 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:59:39.239888 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.239898 32364 net.cpp:165] Memory required for data: 1644545200\nI0821 08:59:39.239909 32364 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:59:39.239928 32364 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:59:39.239940 32364 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:59:39.239955 32364 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:59:39.239974 32364 net.cpp:150] Setting up L2_b9_relu\nI0821 08:59:39.239989 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.240000 32364 net.cpp:165] Memory required for data: 1647822000\nI0821 08:59:39.240010 32364 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:39.240023 32364 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:39.240036 32364 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:59:39.240054 32364 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:59:39.240075 32364 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:59:39.240164 32364 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:39.240187 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.240201 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.240211 32364 net.cpp:165] Memory required for data: 1654375600\nI0821 08:59:39.240221 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_conv\nI0821 08:59:39.240241 32364 net.cpp:100] Creating Layer L2_b10_cbr1_conv\nI0821 08:59:39.240253 32364 net.cpp:434] L2_b10_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:59:39.240272 32364 net.cpp:408] L2_b10_cbr1_conv -> L2_b10_cbr1_conv_top\nI0821 08:59:39.240824 32364 net.cpp:150] Setting up L2_b10_cbr1_conv\nI0821 08:59:39.240844 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.240861 32364 net.cpp:165] Memory required for data: 1657652400\nI0821 08:59:39.240880 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_bn\nI0821 08:59:39.240903 32364 net.cpp:100] Creating Layer L2_b10_cbr1_bn\nI0821 08:59:39.240916 32364 net.cpp:434] L2_b10_cbr1_bn <- L2_b10_cbr1_conv_top\nI0821 08:59:39.240932 32364 net.cpp:408] L2_b10_cbr1_bn -> L2_b10_cbr1_bn_top\nI0821 08:59:39.241259 32364 net.cpp:150] Setting up L2_b10_cbr1_bn\nI0821 08:59:39.241279 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.241288 32364 net.cpp:165] Memory required for data: 1660929200\nI0821 08:59:39.241309 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0821 08:59:39.241327 32364 net.cpp:100] Creating Layer L2_b10_cbr1_scale\nI0821 08:59:39.241338 32364 net.cpp:434] L2_b10_cbr1_scale <- L2_b10_cbr1_bn_top\nI0821 08:59:39.241358 32364 net.cpp:395] L2_b10_cbr1_scale -> L2_b10_cbr1_bn_top (in-place)\nI0821 08:59:39.241451 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0821 08:59:39.241648 32364 net.cpp:150] Setting up L2_b10_cbr1_scale\nI0821 08:59:39.241667 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.241677 32364 net.cpp:165] Memory required for data: 1664206000\nI0821 08:59:39.241694 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr1_relu\nI0821 08:59:39.241709 32364 net.cpp:100] Creating Layer L2_b10_cbr1_relu\nI0821 08:59:39.241721 32364 net.cpp:434] L2_b10_cbr1_relu <- L2_b10_cbr1_bn_top\nI0821 08:59:39.241740 32364 net.cpp:395] L2_b10_cbr1_relu -> L2_b10_cbr1_bn_top (in-place)\nI0821 08:59:39.241760 32364 net.cpp:150] Setting up L2_b10_cbr1_relu\nI0821 08:59:39.241775 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.241786 32364 net.cpp:165] Memory required for data: 1667482800\nI0821 08:59:39.241794 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr2_conv\nI0821 08:59:39.241818 32364 net.cpp:100] Creating Layer L2_b10_cbr2_conv\nI0821 08:59:39.241832 32364 net.cpp:434] L2_b10_cbr2_conv <- L2_b10_cbr1_bn_top\nI0821 08:59:39.241849 32364 net.cpp:408] L2_b10_cbr2_conv -> L2_b10_cbr2_conv_top\nI0821 08:59:39.242411 32364 net.cpp:150] Setting up L2_b10_cbr2_conv\nI0821 08:59:39.242432 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.242441 32364 net.cpp:165] Memory required for data: 1670759600\nI0821 08:59:39.242460 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr2_bn\nI0821 08:59:39.242481 32364 net.cpp:100] Creating Layer L2_b10_cbr2_bn\nI0821 08:59:39.242493 32364 net.cpp:434] L2_b10_cbr2_bn <- L2_b10_cbr2_conv_top\nI0821 08:59:39.242509 32364 net.cpp:408] L2_b10_cbr2_bn -> L2_b10_cbr2_bn_top\nI0821 08:59:39.242815 32364 net.cpp:150] Setting up L2_b10_cbr2_bn\nI0821 08:59:39.242835 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.242844 32364 net.cpp:165] Memory required for data: 1674036400\nI0821 08:59:39.242864 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0821 08:59:39.242880 32364 net.cpp:100] Creating Layer L2_b10_cbr2_scale\nI0821 08:59:39.242892 32364 net.cpp:434] L2_b10_cbr2_scale <- L2_b10_cbr2_bn_top\nI0821 08:59:39.242908 32364 net.cpp:395] L2_b10_cbr2_scale -> L2_b10_cbr2_bn_top (in-place)\nI0821 08:59:39.243010 32364 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0821 08:59:39.243213 32364 net.cpp:150] Setting up L2_b10_cbr2_scale\nI0821 08:59:39.243237 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.243247 32364 net.cpp:165] Memory required for data: 1677313200\nI0821 08:59:39.243264 32364 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise\nI0821 08:59:39.243281 32364 net.cpp:100] Creating Layer L2_b10_sum_eltwise\nI0821 08:59:39.243293 32364 net.cpp:434] L2_b10_sum_eltwise <- L2_b10_cbr2_bn_top\nI0821 08:59:39.243306 32364 net.cpp:434] L2_b10_sum_eltwise <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:59:39.243322 32364 net.cpp:408] L2_b10_sum_eltwise -> L2_b10_sum_eltwise_top\nI0821 08:59:39.243376 32364 net.cpp:150] Setting up L2_b10_sum_eltwise\nI0821 08:59:39.243392 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.243402 32364 net.cpp:165] Memory required for data: 1680590000\nI0821 08:59:39.243420 32364 layer_factory.hpp:77] Creating layer L2_b10_relu\nI0821 08:59:39.243436 32364 net.cpp:100] Creating Layer L2_b10_relu\nI0821 08:59:39.243448 32364 net.cpp:434] L2_b10_relu <- L2_b10_sum_eltwise_top\nI0821 08:59:39.243468 32364 net.cpp:395] L2_b10_relu -> L2_b10_sum_eltwise_top (in-place)\nI0821 08:59:39.243487 32364 net.cpp:150] Setting up L2_b10_relu\nI0821 08:59:39.243501 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.243510 32364 net.cpp:165] Memory required for data: 1683866800\nI0821 08:59:39.243521 32364 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:39.243535 32364 net.cpp:100] Creating Layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:39.243546 32364 net.cpp:434] L2_b10_sum_eltwise_top_L2_b10_relu_0_split <- L2_b10_sum_eltwise_top\nI0821 08:59:39.243566 32364 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0821 08:59:39.243587 32364 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0821 08:59:39.243674 32364 net.cpp:150] Setting up L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:39.243692 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.243705 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.243715 32364 net.cpp:165] Memory required for data: 1690420400\nI0821 08:59:39.243724 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_conv\nI0821 08:59:39.243748 32364 net.cpp:100] Creating Layer L2_b11_cbr1_conv\nI0821 08:59:39.243762 32364 net.cpp:434] L2_b11_cbr1_conv <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0821 08:59:39.243782 32364 net.cpp:408] L2_b11_cbr1_conv -> L2_b11_cbr1_conv_top\nI0821 08:59:39.244344 32364 net.cpp:150] Setting up L2_b11_cbr1_conv\nI0821 08:59:39.244364 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.244374 32364 net.cpp:165] Memory required for data: 1693697200\nI0821 08:59:39.244392 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_bn\nI0821 08:59:39.244413 32364 net.cpp:100] Creating Layer L2_b11_cbr1_bn\nI0821 08:59:39.244426 32364 net.cpp:434] L2_b11_cbr1_bn <- L2_b11_cbr1_conv_top\nI0821 08:59:39.244442 32364 net.cpp:408] L2_b11_cbr1_bn -> L2_b11_cbr1_bn_top\nI0821 08:59:39.244750 32364 net.cpp:150] Setting up L2_b11_cbr1_bn\nI0821 08:59:39.244770 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.244778 32364 net.cpp:165] Memory required for data: 1696974000\nI0821 08:59:39.244799 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0821 08:59:39.244815 32364 net.cpp:100] Creating Layer L2_b11_cbr1_scale\nI0821 08:59:39.244827 32364 net.cpp:434] L2_b11_cbr1_scale <- L2_b11_cbr1_bn_top\nI0821 08:59:39.244848 32364 net.cpp:395] L2_b11_cbr1_scale -> L2_b11_cbr1_bn_top (in-place)\nI0821 08:59:39.244945 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0821 08:59:39.245151 32364 net.cpp:150] Setting up L2_b11_cbr1_scale\nI0821 08:59:39.245170 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.245180 32364 net.cpp:165] Memory required for data: 1700250800\nI0821 08:59:39.245198 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr1_relu\nI0821 08:59:39.245214 32364 net.cpp:100] Creating Layer L2_b11_cbr1_relu\nI0821 08:59:39.245226 32364 net.cpp:434] L2_b11_cbr1_relu <- L2_b11_cbr1_bn_top\nI0821 08:59:39.245240 32364 net.cpp:395] L2_b11_cbr1_relu -> L2_b11_cbr1_bn_top (in-place)\nI0821 08:59:39.245267 32364 net.cpp:150] Setting up L2_b11_cbr1_relu\nI0821 08:59:39.245282 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.245291 32364 net.cpp:165] Memory required for data: 1703527600\nI0821 08:59:39.245301 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr2_conv\nI0821 08:59:39.245321 32364 net.cpp:100] Creating Layer L2_b11_cbr2_conv\nI0821 08:59:39.245333 32364 net.cpp:434] L2_b11_cbr2_conv <- L2_b11_cbr1_bn_top\nI0821 08:59:39.245357 32364 net.cpp:408] L2_b11_cbr2_conv -> L2_b11_cbr2_conv_top\nI0821 08:59:39.245913 32364 net.cpp:150] Setting up L2_b11_cbr2_conv\nI0821 08:59:39.245939 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.245949 32364 net.cpp:165] Memory required for data: 1706804400\nI0821 08:59:39.245966 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr2_bn\nI0821 08:59:39.245987 32364 net.cpp:100] Creating Layer L2_b11_cbr2_bn\nI0821 08:59:39.246001 32364 net.cpp:434] L2_b11_cbr2_bn <- L2_b11_cbr2_conv_top\nI0821 08:59:39.246021 32364 net.cpp:408] L2_b11_cbr2_bn -> L2_b11_cbr2_bn_top\nI0821 08:59:39.246328 32364 net.cpp:150] Setting up L2_b11_cbr2_bn\nI0821 08:59:39.246347 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.246356 32364 net.cpp:165] Memory required for data: 1710081200\nI0821 08:59:39.246378 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0821 08:59:39.246395 32364 net.cpp:100] Creating Layer L2_b11_cbr2_scale\nI0821 08:59:39.246407 32364 net.cpp:434] L2_b11_cbr2_scale <- L2_b11_cbr2_bn_top\nI0821 08:59:39.246423 32364 net.cpp:395] L2_b11_cbr2_scale -> L2_b11_cbr2_bn_top (in-place)\nI0821 08:59:39.246522 32364 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0821 08:59:39.246716 32364 net.cpp:150] Setting up L2_b11_cbr2_scale\nI0821 08:59:39.246738 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.246748 32364 net.cpp:165] Memory required for data: 1713358000\nI0821 08:59:39.246767 32364 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise\nI0821 08:59:39.246784 32364 net.cpp:100] Creating Layer L2_b11_sum_eltwise\nI0821 08:59:39.246795 32364 net.cpp:434] L2_b11_sum_eltwise <- L2_b11_cbr2_bn_top\nI0821 08:59:39.246809 32364 net.cpp:434] L2_b11_sum_eltwise <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0821 08:59:39.246825 32364 net.cpp:408] L2_b11_sum_eltwise -> L2_b11_sum_eltwise_top\nI0821 08:59:39.246877 32364 net.cpp:150] Setting up L2_b11_sum_eltwise\nI0821 08:59:39.246896 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.246906 32364 net.cpp:165] Memory required for data: 1716634800\nI0821 08:59:39.246917 32364 layer_factory.hpp:77] Creating layer L2_b11_relu\nI0821 08:59:39.246930 32364 net.cpp:100] Creating Layer L2_b11_relu\nI0821 08:59:39.246942 32364 net.cpp:434] L2_b11_relu <- L2_b11_sum_eltwise_top\nI0821 08:59:39.246973 32364 net.cpp:395] L2_b11_relu -> L2_b11_sum_eltwise_top (in-place)\nI0821 08:59:39.246991 32364 net.cpp:150] Setting up L2_b11_relu\nI0821 08:59:39.247006 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.247015 32364 net.cpp:165] Memory required for data: 1719911600\nI0821 08:59:39.247025 32364 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:39.247040 32364 net.cpp:100] Creating Layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:39.247051 32364 net.cpp:434] L2_b11_sum_eltwise_top_L2_b11_relu_0_split <- L2_b11_sum_eltwise_top\nI0821 08:59:39.247071 32364 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0821 08:59:39.247092 32364 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0821 08:59:39.247180 32364 net.cpp:150] Setting up L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:39.247198 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.247211 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.247221 32364 net.cpp:165] Memory required for data: 1726465200\nI0821 08:59:39.247231 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_conv\nI0821 08:59:39.247256 32364 net.cpp:100] Creating Layer L2_b12_cbr1_conv\nI0821 08:59:39.247269 32364 net.cpp:434] L2_b12_cbr1_conv <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0821 08:59:39.247288 32364 net.cpp:408] L2_b12_cbr1_conv -> L2_b12_cbr1_conv_top\nI0821 08:59:39.248847 32364 net.cpp:150] Setting up L2_b12_cbr1_conv\nI0821 08:59:39.248868 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.248878 32364 net.cpp:165] Memory required for data: 1729742000\nI0821 08:59:39.248896 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_bn\nI0821 08:59:39.248931 32364 net.cpp:100] Creating Layer L2_b12_cbr1_bn\nI0821 08:59:39.248945 32364 net.cpp:434] L2_b12_cbr1_bn <- L2_b12_cbr1_conv_top\nI0821 08:59:39.248962 32364 net.cpp:408] L2_b12_cbr1_bn -> L2_b12_cbr1_bn_top\nI0821 08:59:39.249296 32364 net.cpp:150] Setting up L2_b12_cbr1_bn\nI0821 08:59:39.249320 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.249330 32364 net.cpp:165] Memory required for data: 1733018800\nI0821 08:59:39.249351 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0821 08:59:39.249367 32364 net.cpp:100] Creating Layer L2_b12_cbr1_scale\nI0821 08:59:39.249379 32364 net.cpp:434] L2_b12_cbr1_scale <- L2_b12_cbr1_bn_top\nI0821 08:59:39.249395 32364 net.cpp:395] L2_b12_cbr1_scale -> L2_b12_cbr1_bn_top (in-place)\nI0821 08:59:39.249490 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0821 08:59:39.249693 32364 net.cpp:150] Setting up L2_b12_cbr1_scale\nI0821 08:59:39.249711 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.249722 32364 net.cpp:165] Memory required for data: 1736295600\nI0821 08:59:39.249739 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr1_relu\nI0821 08:59:39.249754 32364 net.cpp:100] Creating Layer L2_b12_cbr1_relu\nI0821 08:59:39.249766 32364 net.cpp:434] L2_b12_cbr1_relu <- L2_b12_cbr1_bn_top\nI0821 08:59:39.249785 32364 net.cpp:395] L2_b12_cbr1_relu -> L2_b12_cbr1_bn_top (in-place)\nI0821 08:59:39.249806 32364 net.cpp:150] Setting up L2_b12_cbr1_relu\nI0821 08:59:39.249820 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.249830 32364 net.cpp:165] Memory required for data: 1739572400\nI0821 08:59:39.249840 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr2_conv\nI0821 08:59:39.249861 32364 net.cpp:100] Creating Layer L2_b12_cbr2_conv\nI0821 08:59:39.249874 32364 net.cpp:434] L2_b12_cbr2_conv <- L2_b12_cbr1_bn_top\nI0821 08:59:39.249897 32364 net.cpp:408] L2_b12_cbr2_conv -> L2_b12_cbr2_conv_top\nI0821 08:59:39.250443 32364 net.cpp:150] Setting up L2_b12_cbr2_conv\nI0821 08:59:39.250463 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.250473 32364 net.cpp:165] Memory required for data: 1742849200\nI0821 08:59:39.250489 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr2_bn\nI0821 08:59:39.250506 32364 net.cpp:100] Creating Layer L2_b12_cbr2_bn\nI0821 08:59:39.250519 32364 net.cpp:434] L2_b12_cbr2_bn <- L2_b12_cbr2_conv_top\nI0821 08:59:39.250543 32364 net.cpp:408] L2_b12_cbr2_bn -> L2_b12_cbr2_bn_top\nI0821 08:59:39.251037 32364 net.cpp:150] Setting up L2_b12_cbr2_bn\nI0821 08:59:39.251058 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.251068 32364 net.cpp:165] Memory required for data: 1746126000\nI0821 08:59:39.251091 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0821 08:59:39.251111 32364 net.cpp:100] Creating Layer L2_b12_cbr2_scale\nI0821 08:59:39.251124 32364 net.cpp:434] L2_b12_cbr2_scale <- L2_b12_cbr2_bn_top\nI0821 08:59:39.251140 32364 net.cpp:395] L2_b12_cbr2_scale -> L2_b12_cbr2_bn_top (in-place)\nI0821 08:59:39.251255 32364 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0821 08:59:39.251479 32364 net.cpp:150] Setting up L2_b12_cbr2_scale\nI0821 08:59:39.251492 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.251497 32364 net.cpp:165] Memory required for data: 1749402800\nI0821 08:59:39.251507 32364 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise\nI0821 08:59:39.251521 32364 net.cpp:100] Creating Layer L2_b12_sum_eltwise\nI0821 08:59:39.251529 32364 net.cpp:434] L2_b12_sum_eltwise <- L2_b12_cbr2_bn_top\nI0821 08:59:39.251536 32364 net.cpp:434] L2_b12_sum_eltwise <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0821 08:59:39.251546 32364 net.cpp:408] L2_b12_sum_eltwise -> L2_b12_sum_eltwise_top\nI0821 08:59:39.251576 32364 net.cpp:150] Setting up L2_b12_sum_eltwise\nI0821 08:59:39.251585 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.251590 32364 net.cpp:165] Memory required for data: 1752679600\nI0821 08:59:39.251595 32364 layer_factory.hpp:77] Creating layer L2_b12_relu\nI0821 08:59:39.251602 32364 net.cpp:100] Creating Layer L2_b12_relu\nI0821 08:59:39.251616 32364 net.cpp:434] L2_b12_relu <- L2_b12_sum_eltwise_top\nI0821 08:59:39.251626 32364 net.cpp:395] L2_b12_relu -> L2_b12_sum_eltwise_top (in-place)\nI0821 08:59:39.251636 32364 net.cpp:150] Setting up L2_b12_relu\nI0821 08:59:39.251643 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.251648 32364 net.cpp:165] Memory required for data: 1755956400\nI0821 08:59:39.251653 32364 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:39.251660 32364 net.cpp:100] Creating Layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:39.251668 32364 net.cpp:434] L2_b12_sum_eltwise_top_L2_b12_relu_0_split <- L2_b12_sum_eltwise_top\nI0821 08:59:39.251682 32364 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0821 08:59:39.252073 32364 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0821 08:59:39.252182 32364 net.cpp:150] Setting up L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:39.252202 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.252215 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.252224 32364 net.cpp:165] Memory required for data: 1762510000\nI0821 08:59:39.252234 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_conv\nI0821 08:59:39.252254 32364 net.cpp:100] Creating Layer L2_b13_cbr1_conv\nI0821 08:59:39.252267 32364 net.cpp:434] L2_b13_cbr1_conv <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0821 08:59:39.252291 32364 net.cpp:408] L2_b13_cbr1_conv -> L2_b13_cbr1_conv_top\nI0821 08:59:39.252841 32364 net.cpp:150] Setting up L2_b13_cbr1_conv\nI0821 08:59:39.252862 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.252871 32364 net.cpp:165] Memory required for data: 1765786800\nI0821 08:59:39.252889 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_bn\nI0821 08:59:39.252910 32364 net.cpp:100] Creating Layer L2_b13_cbr1_bn\nI0821 08:59:39.252923 32364 net.cpp:434] L2_b13_cbr1_bn <- L2_b13_cbr1_conv_top\nI0821 08:59:39.252941 32364 net.cpp:408] L2_b13_cbr1_bn -> L2_b13_cbr1_bn_top\nI0821 08:59:39.253267 32364 net.cpp:150] Setting up L2_b13_cbr1_bn\nI0821 08:59:39.253286 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.253295 32364 net.cpp:165] Memory required for data: 1769063600\nI0821 08:59:39.253317 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0821 08:59:39.253338 32364 net.cpp:100] Creating Layer L2_b13_cbr1_scale\nI0821 08:59:39.253351 32364 net.cpp:434] L2_b13_cbr1_scale <- L2_b13_cbr1_bn_top\nI0821 08:59:39.253367 32364 net.cpp:395] L2_b13_cbr1_scale -> L2_b13_cbr1_bn_top (in-place)\nI0821 08:59:39.253461 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0821 08:59:39.253659 32364 net.cpp:150] Setting up L2_b13_cbr1_scale\nI0821 08:59:39.253677 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.253687 32364 net.cpp:165] Memory required for data: 1772340400\nI0821 08:59:39.253705 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr1_relu\nI0821 08:59:39.253726 32364 net.cpp:100] Creating Layer L2_b13_cbr1_relu\nI0821 08:59:39.253737 32364 net.cpp:434] L2_b13_cbr1_relu <- L2_b13_cbr1_bn_top\nI0821 08:59:39.253756 32364 net.cpp:395] L2_b13_cbr1_relu -> L2_b13_cbr1_bn_top (in-place)\nI0821 08:59:39.253777 32364 net.cpp:150] Setting up L2_b13_cbr1_relu\nI0821 08:59:39.253792 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.253801 32364 net.cpp:165] Memory required for data: 1775617200\nI0821 08:59:39.253810 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr2_conv\nI0821 08:59:39.253831 32364 net.cpp:100] Creating Layer L2_b13_cbr2_conv\nI0821 08:59:39.253844 32364 net.cpp:434] L2_b13_cbr2_conv <- L2_b13_cbr1_bn_top\nI0821 08:59:39.253867 32364 net.cpp:408] L2_b13_cbr2_conv -> L2_b13_cbr2_conv_top\nI0821 08:59:39.254416 32364 net.cpp:150] Setting up L2_b13_cbr2_conv\nI0821 08:59:39.254436 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.254446 32364 net.cpp:165] Memory required for data: 1778894000\nI0821 08:59:39.254472 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr2_bn\nI0821 08:59:39.254489 32364 net.cpp:100] Creating Layer L2_b13_cbr2_bn\nI0821 08:59:39.254501 32364 net.cpp:434] L2_b13_cbr2_bn <- L2_b13_cbr2_conv_top\nI0821 08:59:39.254523 32364 net.cpp:408] L2_b13_cbr2_bn -> L2_b13_cbr2_bn_top\nI0821 08:59:39.254847 32364 net.cpp:150] Setting up L2_b13_cbr2_bn\nI0821 08:59:39.254866 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.254875 32364 net.cpp:165] Memory required for data: 1782170800\nI0821 08:59:39.254897 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0821 08:59:39.254918 32364 net.cpp:100] Creating Layer L2_b13_cbr2_scale\nI0821 08:59:39.254931 32364 net.cpp:434] L2_b13_cbr2_scale <- L2_b13_cbr2_bn_top\nI0821 08:59:39.254947 32364 net.cpp:395] L2_b13_cbr2_scale -> L2_b13_cbr2_bn_top (in-place)\nI0821 08:59:39.255044 32364 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0821 08:59:39.255254 32364 net.cpp:150] Setting up L2_b13_cbr2_scale\nI0821 08:59:39.255273 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.255282 32364 net.cpp:165] Memory required for data: 1785447600\nI0821 08:59:39.255300 32364 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise\nI0821 08:59:39.255322 32364 net.cpp:100] Creating Layer L2_b13_sum_eltwise\nI0821 08:59:39.255334 32364 net.cpp:434] L2_b13_sum_eltwise <- L2_b13_cbr2_bn_top\nI0821 08:59:39.255347 32364 net.cpp:434] L2_b13_sum_eltwise <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0821 08:59:39.255364 32364 net.cpp:408] L2_b13_sum_eltwise -> L2_b13_sum_eltwise_top\nI0821 08:59:39.255419 32364 net.cpp:150] Setting up L2_b13_sum_eltwise\nI0821 08:59:39.255437 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.255447 32364 net.cpp:165] Memory required for data: 1788724400\nI0821 08:59:39.255458 32364 layer_factory.hpp:77] Creating layer L2_b13_relu\nI0821 08:59:39.255472 32364 net.cpp:100] Creating Layer L2_b13_relu\nI0821 08:59:39.255484 32364 net.cpp:434] L2_b13_relu <- L2_b13_sum_eltwise_top\nI0821 08:59:39.255504 32364 net.cpp:395] L2_b13_relu -> L2_b13_sum_eltwise_top (in-place)\nI0821 08:59:39.255524 32364 net.cpp:150] Setting up L2_b13_relu\nI0821 08:59:39.255539 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.255548 32364 net.cpp:165] Memory required for data: 1792001200\nI0821 08:59:39.255558 32364 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:39.255573 32364 net.cpp:100] Creating Layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:39.255584 32364 net.cpp:434] L2_b13_sum_eltwise_top_L2_b13_relu_0_split <- L2_b13_sum_eltwise_top\nI0821 08:59:39.255597 32364 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0821 08:59:39.255619 32364 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0821 08:59:39.255704 32364 net.cpp:150] Setting up L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:39.255722 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.255735 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.255744 32364 net.cpp:165] Memory required for data: 1798554800\nI0821 08:59:39.255754 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_conv\nI0821 08:59:39.255774 32364 net.cpp:100] Creating Layer L2_b14_cbr1_conv\nI0821 08:59:39.255786 32364 net.cpp:434] L2_b14_cbr1_conv <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0821 08:59:39.255808 32364 net.cpp:408] L2_b14_cbr1_conv -> L2_b14_cbr1_conv_top\nI0821 08:59:39.256361 32364 net.cpp:150] Setting up L2_b14_cbr1_conv\nI0821 08:59:39.256382 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.256392 32364 net.cpp:165] Memory required for data: 1801831600\nI0821 08:59:39.256409 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_bn\nI0821 08:59:39.256427 32364 net.cpp:100] Creating Layer L2_b14_cbr1_bn\nI0821 08:59:39.256438 32364 net.cpp:434] L2_b14_cbr1_bn <- L2_b14_cbr1_conv_top\nI0821 08:59:39.256459 32364 net.cpp:408] L2_b14_cbr1_bn -> L2_b14_cbr1_bn_top\nI0821 08:59:39.256791 32364 net.cpp:150] Setting up L2_b14_cbr1_bn\nI0821 08:59:39.256811 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.256821 32364 net.cpp:165] Memory required for data: 1805108400\nI0821 08:59:39.256842 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0821 08:59:39.256863 32364 net.cpp:100] Creating Layer L2_b14_cbr1_scale\nI0821 08:59:39.256876 32364 net.cpp:434] L2_b14_cbr1_scale <- L2_b14_cbr1_bn_top\nI0821 08:59:39.256892 32364 net.cpp:395] L2_b14_cbr1_scale -> L2_b14_cbr1_bn_top (in-place)\nI0821 08:59:39.256989 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0821 08:59:39.257195 32364 net.cpp:150] Setting up L2_b14_cbr1_scale\nI0821 08:59:39.257215 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.257225 32364 net.cpp:165] Memory required for data: 1808385200\nI0821 08:59:39.257242 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr1_relu\nI0821 08:59:39.257262 32364 net.cpp:100] Creating Layer L2_b14_cbr1_relu\nI0821 08:59:39.257275 32364 net.cpp:434] L2_b14_cbr1_relu <- L2_b14_cbr1_bn_top\nI0821 08:59:39.257289 32364 net.cpp:395] L2_b14_cbr1_relu -> L2_b14_cbr1_bn_top (in-place)\nI0821 08:59:39.257309 32364 net.cpp:150] Setting up L2_b14_cbr1_relu\nI0821 08:59:39.257323 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.257333 32364 net.cpp:165] Memory required for data: 1811662000\nI0821 08:59:39.257342 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr2_conv\nI0821 08:59:39.257367 32364 net.cpp:100] Creating Layer L2_b14_cbr2_conv\nI0821 08:59:39.257380 32364 net.cpp:434] L2_b14_cbr2_conv <- L2_b14_cbr1_bn_top\nI0821 08:59:39.257403 32364 net.cpp:408] L2_b14_cbr2_conv -> L2_b14_cbr2_conv_top\nI0821 08:59:39.257939 32364 net.cpp:150] Setting up L2_b14_cbr2_conv\nI0821 08:59:39.257958 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.257968 32364 net.cpp:165] Memory required for data: 1814938800\nI0821 08:59:39.257987 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr2_bn\nI0821 08:59:39.258003 32364 net.cpp:100] Creating Layer L2_b14_cbr2_bn\nI0821 08:59:39.258016 32364 net.cpp:434] L2_b14_cbr2_bn <- L2_b14_cbr2_conv_top\nI0821 08:59:39.258044 32364 net.cpp:408] L2_b14_cbr2_bn -> L2_b14_cbr2_bn_top\nI0821 08:59:39.258360 32364 net.cpp:150] Setting up L2_b14_cbr2_bn\nI0821 08:59:39.258380 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.258389 32364 net.cpp:165] Memory required for data: 1818215600\nI0821 08:59:39.258411 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0821 08:59:39.258432 32364 net.cpp:100] Creating Layer L2_b14_cbr2_scale\nI0821 08:59:39.258445 32364 net.cpp:434] L2_b14_cbr2_scale <- L2_b14_cbr2_bn_top\nI0821 08:59:39.258461 32364 net.cpp:395] L2_b14_cbr2_scale -> L2_b14_cbr2_bn_top (in-place)\nI0821 08:59:39.258554 32364 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0821 08:59:39.258756 32364 net.cpp:150] Setting up L2_b14_cbr2_scale\nI0821 08:59:39.258775 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.258785 32364 net.cpp:165] Memory required for data: 1821492400\nI0821 08:59:39.258803 32364 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise\nI0821 08:59:39.258824 32364 net.cpp:100] Creating Layer L2_b14_sum_eltwise\nI0821 08:59:39.258837 32364 net.cpp:434] L2_b14_sum_eltwise <- L2_b14_cbr2_bn_top\nI0821 08:59:39.258852 32364 net.cpp:434] L2_b14_sum_eltwise <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0821 08:59:39.258867 32364 net.cpp:408] L2_b14_sum_eltwise -> L2_b14_sum_eltwise_top\nI0821 08:59:39.258920 32364 net.cpp:150] Setting up L2_b14_sum_eltwise\nI0821 08:59:39.258939 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.258949 32364 net.cpp:165] Memory required for data: 1824769200\nI0821 08:59:39.258965 32364 layer_factory.hpp:77] Creating layer L2_b14_relu\nI0821 08:59:39.258978 32364 net.cpp:100] Creating Layer L2_b14_relu\nI0821 08:59:39.258991 32364 net.cpp:434] L2_b14_relu <- L2_b14_sum_eltwise_top\nI0821 08:59:39.259006 32364 net.cpp:395] L2_b14_relu -> L2_b14_sum_eltwise_top (in-place)\nI0821 08:59:39.259037 32364 net.cpp:150] Setting up L2_b14_relu\nI0821 08:59:39.259053 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.259063 32364 net.cpp:165] Memory required for data: 1828046000\nI0821 08:59:39.259073 32364 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:39.259088 32364 net.cpp:100] Creating Layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:39.259097 32364 net.cpp:434] L2_b14_sum_eltwise_top_L2_b14_relu_0_split <- L2_b14_sum_eltwise_top\nI0821 08:59:39.259114 32364 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0821 08:59:39.259135 32364 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0821 08:59:39.259234 32364 net.cpp:150] Setting up L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:39.259253 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.259268 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.259276 32364 net.cpp:165] Memory required for data: 1834599600\nI0821 08:59:39.259287 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_conv\nI0821 08:59:39.259307 32364 net.cpp:100] Creating Layer L2_b15_cbr1_conv\nI0821 08:59:39.259320 32364 net.cpp:434] L2_b15_cbr1_conv <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0821 08:59:39.259342 32364 net.cpp:408] L2_b15_cbr1_conv -> L2_b15_cbr1_conv_top\nI0821 08:59:39.259886 32364 net.cpp:150] Setting up L2_b15_cbr1_conv\nI0821 08:59:39.259904 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.259913 32364 net.cpp:165] Memory required for data: 1837876400\nI0821 08:59:39.259930 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_bn\nI0821 08:59:39.259948 32364 net.cpp:100] Creating Layer L2_b15_cbr1_bn\nI0821 08:59:39.259960 32364 net.cpp:434] L2_b15_cbr1_bn <- L2_b15_cbr1_conv_top\nI0821 08:59:39.259981 32364 net.cpp:408] L2_b15_cbr1_bn -> L2_b15_cbr1_bn_top\nI0821 08:59:39.260306 32364 net.cpp:150] Setting up L2_b15_cbr1_bn\nI0821 08:59:39.260325 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.260334 32364 net.cpp:165] Memory required for data: 1841153200\nI0821 08:59:39.260356 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0821 08:59:39.260377 32364 net.cpp:100] Creating Layer L2_b15_cbr1_scale\nI0821 08:59:39.260390 32364 net.cpp:434] L2_b15_cbr1_scale <- L2_b15_cbr1_bn_top\nI0821 08:59:39.260407 32364 net.cpp:395] L2_b15_cbr1_scale -> L2_b15_cbr1_bn_top (in-place)\nI0821 08:59:39.260501 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0821 08:59:39.260700 32364 net.cpp:150] Setting up L2_b15_cbr1_scale\nI0821 08:59:39.260718 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.260728 32364 net.cpp:165] Memory required for data: 1844430000\nI0821 08:59:39.260746 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr1_relu\nI0821 08:59:39.260766 32364 net.cpp:100] Creating Layer L2_b15_cbr1_relu\nI0821 08:59:39.260778 32364 net.cpp:434] L2_b15_cbr1_relu <- L2_b15_cbr1_bn_top\nI0821 08:59:39.260793 32364 net.cpp:395] L2_b15_cbr1_relu -> L2_b15_cbr1_bn_top (in-place)\nI0821 08:59:39.260812 32364 net.cpp:150] Setting up L2_b15_cbr1_relu\nI0821 08:59:39.260828 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.260836 32364 net.cpp:165] Memory required for data: 1847706800\nI0821 08:59:39.260848 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr2_conv\nI0821 08:59:39.260871 32364 net.cpp:100] Creating Layer L2_b15_cbr2_conv\nI0821 08:59:39.260885 32364 net.cpp:434] L2_b15_cbr2_conv <- L2_b15_cbr1_bn_top\nI0821 08:59:39.260907 32364 net.cpp:408] L2_b15_cbr2_conv -> L2_b15_cbr2_conv_top\nI0821 08:59:39.261458 32364 net.cpp:150] Setting up L2_b15_cbr2_conv\nI0821 08:59:39.261478 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.261487 32364 net.cpp:165] Memory required for data: 1850983600\nI0821 08:59:39.261505 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr2_bn\nI0821 08:59:39.261523 32364 net.cpp:100] Creating Layer L2_b15_cbr2_bn\nI0821 08:59:39.261543 32364 net.cpp:434] L2_b15_cbr2_bn <- L2_b15_cbr2_conv_top\nI0821 08:59:39.261561 32364 net.cpp:408] L2_b15_cbr2_bn -> L2_b15_cbr2_bn_top\nI0821 08:59:39.261885 32364 net.cpp:150] Setting up L2_b15_cbr2_bn\nI0821 08:59:39.261904 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.261914 32364 net.cpp:165] Memory required for data: 1854260400\nI0821 08:59:39.261935 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0821 08:59:39.261952 32364 net.cpp:100] Creating Layer L2_b15_cbr2_scale\nI0821 08:59:39.261965 32364 net.cpp:434] L2_b15_cbr2_scale <- L2_b15_cbr2_bn_top\nI0821 08:59:39.261984 32364 net.cpp:395] L2_b15_cbr2_scale -> L2_b15_cbr2_bn_top (in-place)\nI0821 08:59:39.262079 32364 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0821 08:59:39.262287 32364 net.cpp:150] Setting up L2_b15_cbr2_scale\nI0821 08:59:39.262306 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.262315 32364 net.cpp:165] Memory required for data: 1857537200\nI0821 08:59:39.262333 32364 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise\nI0821 08:59:39.262349 32364 net.cpp:100] Creating Layer L2_b15_sum_eltwise\nI0821 08:59:39.262362 32364 net.cpp:434] L2_b15_sum_eltwise <- L2_b15_cbr2_bn_top\nI0821 08:59:39.262375 32364 net.cpp:434] L2_b15_sum_eltwise <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0821 08:59:39.262395 32364 net.cpp:408] L2_b15_sum_eltwise -> L2_b15_sum_eltwise_top\nI0821 08:59:39.262446 32364 net.cpp:150] Setting up L2_b15_sum_eltwise\nI0821 08:59:39.262468 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.262480 32364 net.cpp:165] Memory required for data: 1860814000\nI0821 08:59:39.262490 32364 layer_factory.hpp:77] Creating layer L2_b15_relu\nI0821 08:59:39.262503 32364 net.cpp:100] Creating Layer L2_b15_relu\nI0821 08:59:39.262516 32364 net.cpp:434] L2_b15_relu <- L2_b15_sum_eltwise_top\nI0821 08:59:39.262529 32364 net.cpp:395] L2_b15_relu -> L2_b15_sum_eltwise_top (in-place)\nI0821 08:59:39.262547 32364 net.cpp:150] Setting up L2_b15_relu\nI0821 08:59:39.262562 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.262572 32364 net.cpp:165] Memory required for data: 1864090800\nI0821 08:59:39.262580 32364 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:39.262601 32364 net.cpp:100] Creating Layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:39.262614 32364 net.cpp:434] L2_b15_sum_eltwise_top_L2_b15_relu_0_split <- L2_b15_sum_eltwise_top\nI0821 08:59:39.262629 32364 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0821 08:59:39.262650 32364 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0821 08:59:39.262734 32364 net.cpp:150] Setting up L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:39.262753 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.262765 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.262774 32364 net.cpp:165] Memory required for data: 1870644400\nI0821 08:59:39.262784 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_conv\nI0821 08:59:39.262804 32364 net.cpp:100] Creating Layer L2_b16_cbr1_conv\nI0821 08:59:39.262815 32364 net.cpp:434] L2_b16_cbr1_conv <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0821 08:59:39.262838 32364 net.cpp:408] L2_b16_cbr1_conv -> L2_b16_cbr1_conv_top\nI0821 08:59:39.263406 32364 net.cpp:150] Setting up L2_b16_cbr1_conv\nI0821 08:59:39.263427 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.263437 32364 net.cpp:165] Memory required for data: 1873921200\nI0821 08:59:39.263454 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_bn\nI0821 08:59:39.263471 32364 net.cpp:100] Creating Layer L2_b16_cbr1_bn\nI0821 08:59:39.263484 32364 net.cpp:434] L2_b16_cbr1_bn <- L2_b16_cbr1_conv_top\nI0821 08:59:39.263507 32364 net.cpp:408] L2_b16_cbr1_bn -> L2_b16_cbr1_bn_top\nI0821 08:59:39.263828 32364 net.cpp:150] Setting up L2_b16_cbr1_bn\nI0821 08:59:39.263846 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.263864 32364 net.cpp:165] Memory required for data: 1877198000\nI0821 08:59:39.263886 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0821 08:59:39.263907 32364 net.cpp:100] Creating Layer L2_b16_cbr1_scale\nI0821 08:59:39.263921 32364 net.cpp:434] L2_b16_cbr1_scale <- L2_b16_cbr1_bn_top\nI0821 08:59:39.263936 32364 net.cpp:395] L2_b16_cbr1_scale -> L2_b16_cbr1_bn_top (in-place)\nI0821 08:59:39.264039 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0821 08:59:39.264245 32364 net.cpp:150] Setting up L2_b16_cbr1_scale\nI0821 08:59:39.264263 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.264273 32364 net.cpp:165] Memory required for data: 1880474800\nI0821 08:59:39.264292 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr1_relu\nI0821 08:59:39.264307 32364 net.cpp:100] Creating Layer L2_b16_cbr1_relu\nI0821 08:59:39.264318 32364 net.cpp:434] L2_b16_cbr1_relu <- L2_b16_cbr1_bn_top\nI0821 08:59:39.264338 32364 net.cpp:395] L2_b16_cbr1_relu -> L2_b16_cbr1_bn_top (in-place)\nI0821 08:59:39.264359 32364 net.cpp:150] Setting up L2_b16_cbr1_relu\nI0821 08:59:39.264374 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.264384 32364 net.cpp:165] Memory required for data: 1883751600\nI0821 08:59:39.264394 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr2_conv\nI0821 08:59:39.264418 32364 net.cpp:100] Creating Layer L2_b16_cbr2_conv\nI0821 08:59:39.264432 32364 net.cpp:434] L2_b16_cbr2_conv <- L2_b16_cbr1_bn_top\nI0821 08:59:39.264457 32364 net.cpp:408] L2_b16_cbr2_conv -> L2_b16_cbr2_conv_top\nI0821 08:59:39.264997 32364 net.cpp:150] Setting up L2_b16_cbr2_conv\nI0821 08:59:39.265018 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.265028 32364 net.cpp:165] Memory required for data: 1887028400\nI0821 08:59:39.265044 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr2_bn\nI0821 08:59:39.265063 32364 net.cpp:100] Creating Layer L2_b16_cbr2_bn\nI0821 08:59:39.265074 32364 net.cpp:434] L2_b16_cbr2_bn <- L2_b16_cbr2_conv_top\nI0821 08:59:39.265090 32364 net.cpp:408] L2_b16_cbr2_bn -> L2_b16_cbr2_bn_top\nI0821 08:59:39.265413 32364 net.cpp:150] Setting up L2_b16_cbr2_bn\nI0821 08:59:39.265431 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.265440 32364 net.cpp:165] Memory required for data: 1890305200\nI0821 08:59:39.265460 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0821 08:59:39.265476 32364 net.cpp:100] Creating Layer L2_b16_cbr2_scale\nI0821 08:59:39.265487 32364 net.cpp:434] L2_b16_cbr2_scale <- L2_b16_cbr2_bn_top\nI0821 08:59:39.265507 32364 net.cpp:395] L2_b16_cbr2_scale -> L2_b16_cbr2_bn_top (in-place)\nI0821 08:59:39.265604 32364 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0821 08:59:39.265803 32364 net.cpp:150] Setting up L2_b16_cbr2_scale\nI0821 08:59:39.265822 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.265831 32364 net.cpp:165] Memory required for data: 1893582000\nI0821 08:59:39.265849 32364 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise\nI0821 08:59:39.265866 32364 net.cpp:100] Creating Layer L2_b16_sum_eltwise\nI0821 08:59:39.265878 32364 net.cpp:434] L2_b16_sum_eltwise <- L2_b16_cbr2_bn_top\nI0821 08:59:39.265892 32364 net.cpp:434] L2_b16_sum_eltwise <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0821 08:59:39.265916 32364 net.cpp:408] L2_b16_sum_eltwise -> L2_b16_sum_eltwise_top\nI0821 08:59:39.265967 32364 net.cpp:150] Setting up L2_b16_sum_eltwise\nI0821 08:59:39.265993 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.266005 32364 net.cpp:165] Memory required for data: 1896858800\nI0821 08:59:39.266016 32364 layer_factory.hpp:77] Creating layer L2_b16_relu\nI0821 08:59:39.266031 32364 net.cpp:100] Creating Layer L2_b16_relu\nI0821 08:59:39.266041 32364 net.cpp:434] L2_b16_relu <- L2_b16_sum_eltwise_top\nI0821 08:59:39.266057 32364 net.cpp:395] L2_b16_relu -> L2_b16_sum_eltwise_top (in-place)\nI0821 08:59:39.266077 32364 net.cpp:150] Setting up L2_b16_relu\nI0821 08:59:39.266090 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.266108 32364 net.cpp:165] Memory required for data: 1900135600\nI0821 08:59:39.266119 32364 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:39.266136 32364 net.cpp:100] Creating Layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:39.266155 32364 net.cpp:434] L2_b16_sum_eltwise_top_L2_b16_relu_0_split <- L2_b16_sum_eltwise_top\nI0821 08:59:39.266173 32364 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0821 08:59:39.266194 32364 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0821 08:59:39.266283 32364 net.cpp:150] Setting up L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:39.266311 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.266326 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.266336 32364 net.cpp:165] Memory required for data: 1906689200\nI0821 08:59:39.266346 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_conv\nI0821 08:59:39.266366 32364 net.cpp:100] Creating Layer L2_b17_cbr1_conv\nI0821 08:59:39.266379 32364 net.cpp:434] L2_b17_cbr1_conv <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0821 08:59:39.266398 32364 net.cpp:408] L2_b17_cbr1_conv -> L2_b17_cbr1_conv_top\nI0821 08:59:39.266944 32364 net.cpp:150] Setting up L2_b17_cbr1_conv\nI0821 08:59:39.266968 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.266978 32364 net.cpp:165] Memory required for data: 1909966000\nI0821 08:59:39.266995 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_bn\nI0821 08:59:39.267012 32364 net.cpp:100] Creating Layer L2_b17_cbr1_bn\nI0821 08:59:39.267024 32364 net.cpp:434] L2_b17_cbr1_bn <- L2_b17_cbr1_conv_top\nI0821 08:59:39.267046 32364 net.cpp:408] L2_b17_cbr1_bn -> L2_b17_cbr1_bn_top\nI0821 08:59:39.267365 32364 net.cpp:150] Setting up L2_b17_cbr1_bn\nI0821 08:59:39.267385 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.267395 32364 net.cpp:165] Memory required for data: 1913242800\nI0821 08:59:39.267416 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0821 08:59:39.267436 32364 net.cpp:100] Creating Layer L2_b17_cbr1_scale\nI0821 08:59:39.267449 32364 net.cpp:434] L2_b17_cbr1_scale <- L2_b17_cbr1_bn_top\nI0821 08:59:39.267465 32364 net.cpp:395] L2_b17_cbr1_scale -> L2_b17_cbr1_bn_top (in-place)\nI0821 08:59:39.267556 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0821 08:59:39.267761 32364 net.cpp:150] Setting up L2_b17_cbr1_scale\nI0821 08:59:39.267781 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.267789 32364 net.cpp:165] Memory required for data: 1916519600\nI0821 08:59:39.267808 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr1_relu\nI0821 08:59:39.267823 32364 net.cpp:100] Creating Layer L2_b17_cbr1_relu\nI0821 08:59:39.267834 32364 net.cpp:434] L2_b17_cbr1_relu <- L2_b17_cbr1_bn_top\nI0821 08:59:39.267853 32364 net.cpp:395] L2_b17_cbr1_relu -> L2_b17_cbr1_bn_top (in-place)\nI0821 08:59:39.267874 32364 net.cpp:150] Setting up L2_b17_cbr1_relu\nI0821 08:59:39.267889 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.267899 32364 net.cpp:165] Memory required for data: 1919796400\nI0821 08:59:39.267907 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr2_conv\nI0821 08:59:39.267935 32364 net.cpp:100] Creating Layer L2_b17_cbr2_conv\nI0821 08:59:39.267949 32364 net.cpp:434] L2_b17_cbr2_conv <- L2_b17_cbr1_bn_top\nI0821 08:59:39.267969 32364 net.cpp:408] L2_b17_cbr2_conv -> L2_b17_cbr2_conv_top\nI0821 08:59:39.268522 32364 net.cpp:150] Setting up L2_b17_cbr2_conv\nI0821 08:59:39.268543 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.268553 32364 net.cpp:165] Memory required for data: 1923073200\nI0821 08:59:39.268570 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr2_bn\nI0821 08:59:39.268591 32364 net.cpp:100] Creating Layer L2_b17_cbr2_bn\nI0821 08:59:39.268604 32364 net.cpp:434] L2_b17_cbr2_bn <- L2_b17_cbr2_conv_top\nI0821 08:59:39.268621 32364 net.cpp:408] L2_b17_cbr2_bn -> L2_b17_cbr2_bn_top\nI0821 08:59:39.268970 32364 net.cpp:150] Setting up L2_b17_cbr2_bn\nI0821 08:59:39.268990 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.268999 32364 net.cpp:165] Memory required for data: 1926350000\nI0821 08:59:39.269021 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0821 08:59:39.269037 32364 net.cpp:100] Creating Layer L2_b17_cbr2_scale\nI0821 08:59:39.269049 32364 net.cpp:434] L2_b17_cbr2_scale <- L2_b17_cbr2_bn_top\nI0821 08:59:39.269069 32364 net.cpp:395] L2_b17_cbr2_scale -> L2_b17_cbr2_bn_top (in-place)\nI0821 08:59:39.269176 32364 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0821 08:59:39.269378 32364 net.cpp:150] Setting up L2_b17_cbr2_scale\nI0821 08:59:39.269397 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.269407 32364 net.cpp:165] Memory required for data: 1929626800\nI0821 08:59:39.269424 32364 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise\nI0821 08:59:39.269441 32364 net.cpp:100] Creating Layer L2_b17_sum_eltwise\nI0821 08:59:39.269454 32364 net.cpp:434] L2_b17_sum_eltwise <- L2_b17_cbr2_bn_top\nI0821 08:59:39.269466 32364 net.cpp:434] L2_b17_sum_eltwise <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0821 08:59:39.269487 32364 net.cpp:408] L2_b17_sum_eltwise -> L2_b17_sum_eltwise_top\nI0821 08:59:39.269537 32364 net.cpp:150] Setting up L2_b17_sum_eltwise\nI0821 08:59:39.269554 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.269564 32364 net.cpp:165] Memory required for data: 1932903600\nI0821 08:59:39.269574 32364 layer_factory.hpp:77] Creating layer L2_b17_relu\nI0821 08:59:39.269593 32364 net.cpp:100] Creating Layer L2_b17_relu\nI0821 08:59:39.269608 32364 net.cpp:434] L2_b17_relu <- L2_b17_sum_eltwise_top\nI0821 08:59:39.269621 32364 net.cpp:395] L2_b17_relu -> L2_b17_sum_eltwise_top (in-place)\nI0821 08:59:39.269641 32364 net.cpp:150] Setting up L2_b17_relu\nI0821 08:59:39.269656 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.269665 32364 net.cpp:165] Memory required for data: 1936180400\nI0821 08:59:39.269675 32364 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:39.269695 32364 net.cpp:100] Creating Layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:39.269706 32364 net.cpp:434] L2_b17_sum_eltwise_top_L2_b17_relu_0_split <- L2_b17_sum_eltwise_top\nI0821 08:59:39.269721 32364 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0821 08:59:39.269742 32364 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0821 08:59:39.269824 32364 net.cpp:150] Setting up L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:39.269847 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.269861 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.269871 32364 net.cpp:165] Memory required for data: 1942734000\nI0821 08:59:39.269881 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_conv\nI0821 08:59:39.269901 32364 net.cpp:100] Creating Layer L2_b18_cbr1_conv\nI0821 08:59:39.269914 32364 net.cpp:434] L2_b18_cbr1_conv <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0821 08:59:39.269932 32364 net.cpp:408] L2_b18_cbr1_conv -> L2_b18_cbr1_conv_top\nI0821 08:59:39.270515 32364 net.cpp:150] Setting up L2_b18_cbr1_conv\nI0821 08:59:39.270539 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.270550 32364 net.cpp:165] Memory required for data: 1946010800\nI0821 08:59:39.270568 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_bn\nI0821 08:59:39.270589 32364 net.cpp:100] Creating Layer L2_b18_cbr1_bn\nI0821 08:59:39.270602 32364 net.cpp:434] L2_b18_cbr1_bn <- L2_b18_cbr1_conv_top\nI0821 08:59:39.270619 32364 net.cpp:408] L2_b18_cbr1_bn -> L2_b18_cbr1_bn_top\nI0821 08:59:39.270926 32364 net.cpp:150] Setting up L2_b18_cbr1_bn\nI0821 08:59:39.270946 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.270956 32364 net.cpp:165] Memory required for data: 1949287600\nI0821 08:59:39.270977 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0821 08:59:39.271006 32364 net.cpp:100] Creating Layer L2_b18_cbr1_scale\nI0821 08:59:39.271014 32364 net.cpp:434] L2_b18_cbr1_scale <- L2_b18_cbr1_bn_top\nI0821 08:59:39.271023 32364 net.cpp:395] L2_b18_cbr1_scale -> L2_b18_cbr1_bn_top (in-place)\nI0821 08:59:39.271091 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0821 08:59:39.271260 32364 net.cpp:150] Setting up L2_b18_cbr1_scale\nI0821 08:59:39.271281 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.271291 32364 net.cpp:165] Memory required for data: 1952564400\nI0821 08:59:39.271307 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr1_relu\nI0821 08:59:39.271322 32364 net.cpp:100] Creating Layer L2_b18_cbr1_relu\nI0821 08:59:39.271334 32364 net.cpp:434] L2_b18_cbr1_relu <- L2_b18_cbr1_bn_top\nI0821 08:59:39.271348 32364 net.cpp:395] L2_b18_cbr1_relu -> L2_b18_cbr1_bn_top (in-place)\nI0821 08:59:39.271368 32364 net.cpp:150] Setting up L2_b18_cbr1_relu\nI0821 08:59:39.271384 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.271392 32364 net.cpp:165] Memory required for data: 1955841200\nI0821 08:59:39.271401 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr2_conv\nI0821 08:59:39.271426 32364 net.cpp:100] Creating Layer L2_b18_cbr2_conv\nI0821 08:59:39.271438 32364 net.cpp:434] L2_b18_cbr2_conv <- L2_b18_cbr1_bn_top\nI0821 08:59:39.271461 32364 net.cpp:408] L2_b18_cbr2_conv -> L2_b18_cbr2_conv_top\nI0821 08:59:39.271993 32364 net.cpp:150] Setting up L2_b18_cbr2_conv\nI0821 08:59:39.272012 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.272022 32364 net.cpp:165] Memory required for data: 1959118000\nI0821 08:59:39.272040 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr2_bn\nI0821 08:59:39.272061 32364 net.cpp:100] Creating Layer L2_b18_cbr2_bn\nI0821 08:59:39.272074 32364 net.cpp:434] L2_b18_cbr2_bn <- L2_b18_cbr2_conv_top\nI0821 08:59:39.272095 32364 net.cpp:408] L2_b18_cbr2_bn -> L2_b18_cbr2_bn_top\nI0821 08:59:39.272411 32364 net.cpp:150] Setting up L2_b18_cbr2_bn\nI0821 08:59:39.272430 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.272440 32364 net.cpp:165] Memory required for data: 1962394800\nI0821 08:59:39.272461 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0821 08:59:39.272477 32364 net.cpp:100] Creating Layer L2_b18_cbr2_scale\nI0821 08:59:39.272490 32364 net.cpp:434] L2_b18_cbr2_scale <- L2_b18_cbr2_bn_top\nI0821 08:59:39.272505 32364 net.cpp:395] L2_b18_cbr2_scale -> L2_b18_cbr2_bn_top (in-place)\nI0821 08:59:39.272603 32364 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0821 08:59:39.272799 32364 net.cpp:150] Setting up L2_b18_cbr2_scale\nI0821 08:59:39.272817 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.272827 32364 net.cpp:165] Memory required for data: 1965671600\nI0821 08:59:39.272845 32364 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise\nI0821 08:59:39.272866 32364 net.cpp:100] Creating Layer L2_b18_sum_eltwise\nI0821 08:59:39.272878 32364 net.cpp:434] L2_b18_sum_eltwise <- L2_b18_cbr2_bn_top\nI0821 08:59:39.272892 32364 net.cpp:434] L2_b18_sum_eltwise <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0821 08:59:39.272909 32364 net.cpp:408] L2_b18_sum_eltwise -> L2_b18_sum_eltwise_top\nI0821 08:59:39.272967 32364 net.cpp:150] Setting up L2_b18_sum_eltwise\nI0821 08:59:39.272989 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.272999 32364 net.cpp:165] Memory required for data: 1968948400\nI0821 08:59:39.273008 32364 layer_factory.hpp:77] Creating layer L2_b18_relu\nI0821 08:59:39.273028 32364 net.cpp:100] Creating Layer L2_b18_relu\nI0821 08:59:39.273041 32364 net.cpp:434] L2_b18_relu <- L2_b18_sum_eltwise_top\nI0821 08:59:39.273056 32364 net.cpp:395] L2_b18_relu -> L2_b18_sum_eltwise_top (in-place)\nI0821 08:59:39.273074 32364 net.cpp:150] Setting up L2_b18_relu\nI0821 08:59:39.273089 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.273098 32364 net.cpp:165] Memory required for data: 1972225200\nI0821 08:59:39.273108 32364 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:39.273130 32364 net.cpp:100] Creating Layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:39.273144 32364 net.cpp:434] L2_b18_sum_eltwise_top_L2_b18_relu_0_split <- L2_b18_sum_eltwise_top\nI0821 08:59:39.273167 32364 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0821 08:59:39.273190 32364 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0821 08:59:39.273283 32364 net.cpp:150] Setting up L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:39.273303 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.273315 32364 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:39.273324 32364 net.cpp:165] Memory required for data: 1978778800\nI0821 08:59:39.273334 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:59:39.273358 32364 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:59:39.273372 32364 net.cpp:434] L3_b1_cbr1_conv <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0821 08:59:39.273391 32364 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:59:39.273953 32364 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:59:39.273972 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.273982 32364 net.cpp:165] Memory required for data: 1979598000\nI0821 08:59:39.274085 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:59:39.274104 32364 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:59:39.274117 32364 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:59:39.274134 32364 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:59:39.274533 32364 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:59:39.274554 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.274562 32364 net.cpp:165] Memory required for data: 1980417200\nI0821 08:59:39.274585 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:59:39.274603 32364 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:59:39.274616 32364 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:59:39.274636 32364 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:59:39.274726 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:59:39.274935 32364 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:59:39.274953 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.274963 32364 net.cpp:165] Memory required for data: 1981236400\nI0821 08:59:39.274981 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:59:39.275002 32364 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:59:39.275013 32364 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:59:39.275027 32364 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:59:39.275048 32364 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:59:39.275061 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.275070 32364 net.cpp:165] Memory required for data: 1982055600\nI0821 08:59:39.275080 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:59:39.275105 32364 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:59:39.275120 32364 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:59:39.275141 32364 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:59:39.276698 32364 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:59:39.276721 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.276731 32364 net.cpp:165] Memory required for data: 1982874800\nI0821 08:59:39.276748 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:59:39.276770 32364 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:59:39.276782 32364 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:59:39.276804 32364 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:59:39.277137 32364 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:59:39.277163 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.277174 32364 net.cpp:165] Memory required for data: 1983694000\nI0821 08:59:39.277206 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:59:39.277222 32364 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:59:39.277235 32364 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:59:39.277252 32364 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:59:39.277354 32364 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:59:39.277565 32364 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:59:39.277585 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.277595 32364 net.cpp:165] Memory required for data: 1984513200\nI0821 08:59:39.277613 32364 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:59:39.277629 32364 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:59:39.277642 32364 net.cpp:434] L3_b1_pool <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0821 08:59:39.277663 32364 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:59:39.277725 32364 net.cpp:150] Setting up L3_b1_pool\nI0821 08:59:39.277748 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.277758 32364 net.cpp:165] Memory required for data: 1985332400\nI0821 08:59:39.277768 32364 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:59:39.277784 32364 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:59:39.277796 32364 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:59:39.277809 32364 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:59:39.277829 32364 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:59:39.277890 32364 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:59:39.277909 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.277918 32364 net.cpp:165] Memory required for data: 1986151600\nI0821 08:59:39.277928 32364 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:59:39.277941 32364 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:59:39.277953 32364 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:59:39.277967 32364 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:59:39.277983 32364 net.cpp:150] Setting up L3_b1_relu\nI0821 08:59:39.277993 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.277998 32364 net.cpp:165] Memory required for data: 1986970800\nI0821 08:59:39.278003 32364 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:59:39.278017 32364 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:59:39.278024 32364 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:59:39.279238 32364 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:59:39.279264 32364 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:39.279275 32364 net.cpp:165] Memory required for data: 1987790000\nI0821 08:59:39.279285 32364 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:59:39.279300 32364 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:59:39.279312 32364 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:59:39.279325 32364 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:59:39.279340 32364 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:59:39.279409 32364 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:59:39.279429 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.279439 32364 net.cpp:165] Memory required for data: 1989428400\nI0821 08:59:39.279450 32364 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:39.279469 32364 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:39.279481 32364 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:59:39.279497 32364 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:59:39.279517 32364 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:59:39.279618 32364 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:39.279636 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.279649 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.279667 32364 net.cpp:165] Memory required for data: 1992705200\nI0821 08:59:39.279678 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:59:39.279703 32364 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:59:39.279717 32364 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:59:39.279737 32364 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:59:39.280833 32364 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:59:39.280854 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.280864 32364 net.cpp:165] Memory required for data: 1994343600\nI0821 08:59:39.280881 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:59:39.280905 32364 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:59:39.280918 32364 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:59:39.280939 32364 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:59:39.281263 32364 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:59:39.281283 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.281293 32364 net.cpp:165] Memory required for data: 1995982000\nI0821 08:59:39.281316 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:59:39.281332 32364 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:59:39.281345 32364 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:59:39.281365 32364 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:59:39.281461 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:59:39.281666 32364 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:59:39.281684 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.281693 32364 net.cpp:165] Memory required for data: 1997620400\nI0821 08:59:39.281711 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:59:39.281726 32364 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:59:39.281738 32364 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:59:39.281757 32364 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:59:39.281779 32364 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:59:39.281793 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.281802 32364 net.cpp:165] Memory required for data: 1999258800\nI0821 08:59:39.281813 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:59:39.281838 32364 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:59:39.281852 32364 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:59:39.281873 32364 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:59:39.283953 32364 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:59:39.283977 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.283987 32364 net.cpp:165] Memory required for data: 2000897200\nI0821 08:59:39.284005 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:59:39.284030 32364 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:59:39.284044 32364 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:59:39.284066 32364 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:59:39.284390 32364 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:59:39.284410 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.284420 32364 net.cpp:165] Memory required for data: 2002535600\nI0821 08:59:39.284441 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:59:39.284457 32364 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:59:39.284468 32364 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:59:39.284489 32364 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:59:39.284588 32364 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:59:39.284795 32364 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:59:39.284812 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.284822 32364 net.cpp:165] Memory required for data: 2004174000\nI0821 08:59:39.284842 32364 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:59:39.284858 32364 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:59:39.284878 32364 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:59:39.284896 32364 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:59:39.284915 32364 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:59:39.284987 32364 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:59:39.285001 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.285006 32364 net.cpp:165] Memory required for data: 2005812400\nI0821 08:59:39.285012 32364 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:59:39.285020 32364 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:59:39.285027 32364 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:59:39.285037 32364 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:59:39.285048 32364 net.cpp:150] Setting up L3_b2_relu\nI0821 08:59:39.285054 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.285059 32364 net.cpp:165] Memory required for data: 2007450800\nI0821 08:59:39.285064 32364 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:39.285073 32364 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:39.285079 32364 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:59:39.285085 32364 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:59:39.285095 32364 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:59:39.285156 32364 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:39.285173 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.285185 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.285194 32364 net.cpp:165] Memory required for data: 2010727600\nI0821 08:59:39.285204 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:59:39.285224 32364 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:59:39.285238 32364 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:59:39.285260 32364 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:59:39.286345 32364 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:59:39.286365 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.286375 32364 net.cpp:165] Memory required for data: 2012366000\nI0821 08:59:39.286392 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:59:39.286409 32364 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:59:39.286422 32364 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:59:39.286442 32364 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:59:39.286768 32364 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:59:39.286787 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.286798 32364 net.cpp:165] Memory required for data: 2014004400\nI0821 08:59:39.286819 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:59:39.286835 32364 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:59:39.286847 32364 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:59:39.286862 32364 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:59:39.286973 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:59:39.287184 32364 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:59:39.287207 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.287217 32364 net.cpp:165] Memory required for data: 2015642800\nI0821 08:59:39.287235 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:59:39.287251 32364 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:59:39.287263 32364 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:59:39.287277 32364 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:59:39.287297 32364 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:59:39.287312 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.287322 32364 net.cpp:165] Memory required for data: 2017281200\nI0821 08:59:39.287339 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:59:39.287364 32364 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:59:39.287379 32364 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:59:39.287397 32364 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:59:39.288478 32364 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:59:39.288498 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.288508 32364 net.cpp:165] Memory required for data: 2018919600\nI0821 08:59:39.288525 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:59:39.288547 32364 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:59:39.288560 32364 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:59:39.288583 32364 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:59:39.288900 32364 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:59:39.288920 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.288929 32364 net.cpp:165] Memory required for data: 2020558000\nI0821 08:59:39.288950 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:59:39.288967 32364 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:59:39.288980 32364 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:59:39.289001 32364 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:59:39.289098 32364 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:59:39.289309 32364 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:59:39.289327 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.289336 32364 net.cpp:165] Memory required for data: 2022196400\nI0821 08:59:39.289356 32364 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:59:39.289382 32364 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:59:39.289396 32364 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:59:39.289410 32364 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:59:39.289427 32364 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:59:39.289491 32364 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:59:39.289510 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.289520 32364 net.cpp:165] Memory required for data: 2023834800\nI0821 08:59:39.289530 32364 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:59:39.289546 32364 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:59:39.289557 32364 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:59:39.289575 32364 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:59:39.289597 32364 net.cpp:150] Setting up L3_b3_relu\nI0821 08:59:39.289611 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.289620 32364 net.cpp:165] Memory required for data: 2025473200\nI0821 08:59:39.289630 32364 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:39.289645 32364 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:39.289657 32364 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:59:39.289672 32364 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:59:39.289691 32364 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:59:39.289783 32364 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:39.289800 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.289813 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.289824 32364 net.cpp:165] Memory required for data: 2028750000\nI0821 08:59:39.289834 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:59:39.289854 32364 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:59:39.289866 32364 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:59:39.289890 32364 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:59:39.291007 32364 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:59:39.291028 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.291036 32364 net.cpp:165] Memory required for data: 2030388400\nI0821 08:59:39.291055 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:59:39.291077 32364 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:59:39.291090 32364 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:59:39.291107 32364 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:59:39.291437 32364 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:59:39.291456 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.291465 32364 net.cpp:165] Memory required for data: 2032026800\nI0821 08:59:39.291487 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:59:39.291503 32364 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:59:39.291515 32364 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:59:39.291530 32364 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:59:39.291633 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:59:39.291839 32364 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:59:39.291862 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.291872 32364 net.cpp:165] Memory required for data: 2033665200\nI0821 08:59:39.291890 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:59:39.291905 32364 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:59:39.291918 32364 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:59:39.291932 32364 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:59:39.291955 32364 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:59:39.291970 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.291978 32364 net.cpp:165] Memory required for data: 2035303600\nI0821 08:59:39.291988 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:59:39.292012 32364 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:59:39.292026 32364 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:59:39.292045 32364 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:59:39.293160 32364 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:59:39.293181 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.293191 32364 net.cpp:165] Memory required for data: 2036942000\nI0821 08:59:39.293210 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:59:39.293226 32364 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:59:39.293242 32364 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:59:39.293261 32364 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:59:39.293582 32364 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:59:39.293602 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.293612 32364 net.cpp:165] Memory required for data: 2038580400\nI0821 08:59:39.293632 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:59:39.293649 32364 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:59:39.293661 32364 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:59:39.293681 32364 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:59:39.293779 32364 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:59:39.293985 32364 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:59:39.294003 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.294013 32364 net.cpp:165] Memory required for data: 2040218800\nI0821 08:59:39.294031 32364 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:59:39.294052 32364 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:59:39.294064 32364 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:59:39.294078 32364 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:59:39.294095 32364 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:59:39.294165 32364 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:59:39.294193 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.294203 32364 net.cpp:165] Memory required for data: 2041857200\nI0821 08:59:39.294212 32364 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:59:39.294226 32364 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:59:39.294239 32364 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:59:39.294258 32364 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:59:39.294277 32364 net.cpp:150] Setting up L3_b4_relu\nI0821 08:59:39.294292 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.294302 32364 net.cpp:165] Memory required for data: 2043495600\nI0821 08:59:39.294312 32364 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:39.294325 32364 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:39.294337 32364 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:59:39.294353 32364 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:59:39.294373 32364 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:59:39.294461 32364 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:39.294478 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.294492 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.294502 32364 net.cpp:165] Memory required for data: 2046772400\nI0821 08:59:39.294512 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:59:39.294530 32364 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:59:39.294543 32364 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:59:39.294569 32364 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:59:39.295671 32364 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:59:39.295691 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.295701 32364 net.cpp:165] Memory required for data: 2048410800\nI0821 08:59:39.295719 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:59:39.295742 32364 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:59:39.295754 32364 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:59:39.295770 32364 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:59:39.296097 32364 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:59:39.296115 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.296125 32364 net.cpp:165] Memory required for data: 2050049200\nI0821 08:59:39.296154 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:59:39.296170 32364 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:59:39.296183 32364 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:59:39.296200 32364 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:59:39.296300 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:59:39.296504 32364 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:59:39.296525 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.296535 32364 net.cpp:165] Memory required for data: 2051687600\nI0821 08:59:39.296553 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:59:39.296569 32364 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:59:39.296581 32364 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:59:39.296596 32364 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:59:39.296614 32364 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:59:39.296628 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.296638 32364 net.cpp:165] Memory required for data: 2053326000\nI0821 08:59:39.296648 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:59:39.296672 32364 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:59:39.296686 32364 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:59:39.296708 32364 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:59:39.297819 32364 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:59:39.297840 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.297850 32364 net.cpp:165] Memory required for data: 2054964400\nI0821 08:59:39.297868 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:59:39.297891 32364 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:59:39.297904 32364 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:59:39.297920 32364 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:59:39.298247 32364 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:59:39.298266 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.298276 32364 net.cpp:165] Memory required for data: 2056602800\nI0821 08:59:39.298296 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:59:39.298318 32364 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:59:39.298331 32364 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:59:39.298346 32364 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:59:39.298444 32364 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:59:39.298652 32364 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:59:39.298671 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.298681 32364 net.cpp:165] Memory required for data: 2058241200\nI0821 08:59:39.298698 32364 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:59:39.298722 32364 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:59:39.298737 32364 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:59:39.298750 32364 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:59:39.298766 32364 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:59:39.298830 32364 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:59:39.298848 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.298856 32364 net.cpp:165] Memory required for data: 2059879600\nI0821 08:59:39.298867 32364 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:59:39.298882 32364 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:59:39.298894 32364 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:59:39.298913 32364 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:59:39.298934 32364 net.cpp:150] Setting up L3_b5_relu\nI0821 08:59:39.298949 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.298959 32364 net.cpp:165] Memory required for data: 2061518000\nI0821 08:59:39.298967 32364 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:39.298982 32364 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:39.298993 32364 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:59:39.299008 32364 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:59:39.299029 32364 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:59:39.299119 32364 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:39.299139 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.299160 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.299170 32364 net.cpp:165] Memory required for data: 2064794800\nI0821 08:59:39.299180 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:59:39.299204 32364 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:59:39.299218 32364 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:59:39.299237 32364 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:59:39.301321 32364 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:59:39.301342 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.301353 32364 net.cpp:165] Memory required for data: 2066433200\nI0821 08:59:39.301371 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:59:39.301394 32364 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:59:39.301415 32364 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:59:39.301434 32364 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:59:39.301766 32364 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:59:39.301785 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.301795 32364 net.cpp:165] Memory required for data: 2068071600\nI0821 08:59:39.301816 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:59:39.301838 32364 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:59:39.301852 32364 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:59:39.301872 32364 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:59:39.301981 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:59:39.302173 32364 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:59:39.302192 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.302201 32364 net.cpp:165] Memory required for data: 2069710000\nI0821 08:59:39.302219 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:59:39.302233 32364 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:59:39.302245 32364 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:59:39.302264 32364 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:59:39.302285 32364 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:59:39.302300 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.302309 32364 net.cpp:165] Memory required for data: 2071348400\nI0821 08:59:39.302320 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:59:39.302340 32364 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:59:39.302352 32364 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:59:39.302376 32364 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:59:39.303453 32364 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:59:39.303473 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.303483 32364 net.cpp:165] Memory required for data: 2072986800\nI0821 08:59:39.303499 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:59:39.303521 32364 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:59:39.303534 32364 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:59:39.303551 32364 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:59:39.303874 32364 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:59:39.303892 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.303901 32364 net.cpp:165] Memory required for data: 2074625200\nI0821 08:59:39.303922 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:59:39.303938 32364 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:59:39.303951 32364 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:59:39.303964 32364 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:59:39.304045 32364 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:59:39.304224 32364 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:59:39.304241 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.304250 32364 net.cpp:165] Memory required for data: 2076263600\nI0821 08:59:39.304268 32364 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:59:39.304286 32364 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:59:39.304298 32364 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:59:39.304311 32364 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:59:39.304332 32364 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:59:39.304394 32364 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:59:39.304417 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.304428 32364 net.cpp:165] Memory required for data: 2077902000\nI0821 08:59:39.304438 32364 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:59:39.304452 32364 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:59:39.304463 32364 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:59:39.304486 32364 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:59:39.304507 32364 net.cpp:150] Setting up L3_b6_relu\nI0821 08:59:39.304522 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.304533 32364 net.cpp:165] Memory required for data: 2079540400\nI0821 08:59:39.304541 32364 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:39.304559 32364 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:39.304572 32364 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:59:39.304587 32364 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:59:39.304610 32364 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:59:39.304692 32364 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:39.304716 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.304733 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.304741 32364 net.cpp:165] Memory required for data: 2082817200\nI0821 08:59:39.304752 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:59:39.304771 32364 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:59:39.304785 32364 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:59:39.304803 32364 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:59:39.305899 32364 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:59:39.305923 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.305933 32364 net.cpp:165] Memory required for data: 2084455600\nI0821 08:59:39.305951 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:59:39.305969 32364 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:59:39.305980 32364 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:59:39.305997 32364 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:59:39.306329 32364 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:59:39.306349 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.306357 32364 net.cpp:165] Memory required for data: 2086094000\nI0821 08:59:39.306378 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:59:39.306399 32364 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:59:39.306411 32364 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:59:39.306435 32364 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:59:39.306542 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:59:39.306748 32364 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:59:39.306767 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.306777 32364 net.cpp:165] Memory required for data: 2087732400\nI0821 08:59:39.306795 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:59:39.306810 32364 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:59:39.306823 32364 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:59:39.306841 32364 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:59:39.306862 32364 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:59:39.306876 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.306886 32364 net.cpp:165] Memory required for data: 2089370800\nI0821 08:59:39.306896 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:59:39.306921 32364 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:59:39.306934 32364 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:59:39.306954 32364 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:59:39.308038 32364 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:59:39.308058 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.308068 32364 net.cpp:165] Memory required for data: 2091009200\nI0821 08:59:39.308090 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:59:39.308115 32364 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:59:39.308135 32364 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:59:39.308161 32364 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:59:39.308493 32364 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:59:39.308512 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.308522 32364 net.cpp:165] Memory required for data: 2092647600\nI0821 08:59:39.308544 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:59:39.308562 32364 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:59:39.308573 32364 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:59:39.308588 32364 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:59:39.308689 32364 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:59:39.308897 32364 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:59:39.308915 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.308924 32364 net.cpp:165] Memory required for data: 2094286000\nI0821 08:59:39.308943 32364 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:59:39.308959 32364 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:59:39.308971 32364 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:59:39.308985 32364 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:59:39.309006 32364 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:59:39.309064 32364 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:59:39.309088 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.309098 32364 net.cpp:165] Memory required for data: 2095924400\nI0821 08:59:39.309108 32364 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:59:39.309123 32364 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:59:39.309135 32364 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:59:39.309156 32364 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:59:39.309177 32364 net.cpp:150] Setting up L3_b7_relu\nI0821 08:59:39.309191 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.309201 32364 net.cpp:165] Memory required for data: 2097562800\nI0821 08:59:39.309211 32364 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:39.309229 32364 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:39.309242 32364 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:59:39.309257 32364 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:59:39.309276 32364 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:59:39.309367 32364 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:39.309386 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.309399 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.309409 32364 net.cpp:165] Memory required for data: 2100839600\nI0821 08:59:39.309419 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:59:39.309440 32364 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:59:39.309453 32364 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:59:39.309478 32364 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:59:39.310575 32364 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:59:39.310596 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.310606 32364 net.cpp:165] Memory required for data: 2102478000\nI0821 08:59:39.310624 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:59:39.310642 32364 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:59:39.310654 32364 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:59:39.310675 32364 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:59:39.310999 32364 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:59:39.311023 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.311033 32364 net.cpp:165] Memory required for data: 2104116400\nI0821 08:59:39.311064 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:59:39.311081 32364 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:59:39.311094 32364 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:59:39.311110 32364 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:59:39.311218 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:59:39.311425 32364 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:59:39.311444 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.311455 32364 net.cpp:165] Memory required for data: 2105754800\nI0821 08:59:39.311473 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:59:39.311488 32364 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:59:39.311501 32364 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:59:39.311520 32364 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:59:39.311542 32364 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:59:39.311555 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.311565 32364 net.cpp:165] Memory required for data: 2107393200\nI0821 08:59:39.311574 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:59:39.311599 32364 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:59:39.311614 32364 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:59:39.311631 32364 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:59:39.312885 32364 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:59:39.312906 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.312916 32364 net.cpp:165] Memory required for data: 2109031600\nI0821 08:59:39.312932 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:59:39.312955 32364 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:59:39.312968 32364 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:59:39.312990 32364 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:59:39.313318 32364 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:59:39.313338 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.313347 32364 net.cpp:165] Memory required for data: 2110670000\nI0821 08:59:39.313369 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:59:39.313385 32364 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:59:39.313396 32364 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:59:39.313413 32364 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:59:39.313513 32364 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:59:39.313724 32364 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:59:39.313742 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.313752 32364 net.cpp:165] Memory required for data: 2112308400\nI0821 08:59:39.313771 32364 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:59:39.313787 32364 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:59:39.313799 32364 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:59:39.313813 32364 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:59:39.313834 32364 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:59:39.313895 32364 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:59:39.313918 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.313930 32364 net.cpp:165] Memory required for data: 2113946800\nI0821 08:59:39.313940 32364 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:59:39.313954 32364 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:59:39.313973 32364 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:59:39.313987 32364 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:59:39.313998 32364 net.cpp:150] Setting up L3_b8_relu\nI0821 08:59:39.314007 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.314010 32364 net.cpp:165] Memory required for data: 2115585200\nI0821 08:59:39.314015 32364 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:39.314033 32364 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:39.314040 32364 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:59:39.314047 32364 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:59:39.314059 32364 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:59:39.314116 32364 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:39.314133 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.314152 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.314162 32364 net.cpp:165] Memory required for data: 2118862000\nI0821 08:59:39.314172 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:59:39.314193 32364 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:59:39.314205 32364 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:59:39.314229 32364 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:59:39.315310 32364 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:59:39.315331 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.315340 32364 net.cpp:165] Memory required for data: 2120500400\nI0821 08:59:39.315357 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:59:39.315374 32364 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:59:39.315387 32364 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:59:39.315409 32364 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:59:39.315731 32364 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:59:39.315753 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.315763 32364 net.cpp:165] Memory required for data: 2122138800\nI0821 08:59:39.315785 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:59:39.315803 32364 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:59:39.315814 32364 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:59:39.315831 32364 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:59:39.315925 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:59:39.316131 32364 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:59:39.316155 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.316165 32364 net.cpp:165] Memory required for data: 2123777200\nI0821 08:59:39.316184 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:59:39.316206 32364 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:59:39.316220 32364 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:59:39.316236 32364 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:59:39.316254 32364 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:59:39.316269 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.316279 32364 net.cpp:165] Memory required for data: 2125415600\nI0821 08:59:39.316289 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:59:39.316314 32364 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:59:39.316329 32364 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:59:39.316346 32364 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:59:39.318423 32364 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:59:39.318449 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.318459 32364 net.cpp:165] Memory required for data: 2127054000\nI0821 08:59:39.318478 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:59:39.318496 32364 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:59:39.318508 32364 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:59:39.318531 32364 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:59:39.318857 32364 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:59:39.318881 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.318891 32364 net.cpp:165] Memory required for data: 2128692400\nI0821 08:59:39.318913 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:59:39.318940 32364 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:59:39.318954 32364 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:59:39.318970 32364 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:59:39.319082 32364 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:59:39.319294 32364 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:59:39.319314 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.319324 32364 net.cpp:165] Memory required for data: 2130330800\nI0821 08:59:39.319341 32364 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:59:39.319361 32364 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:59:39.319375 32364 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:59:39.319388 32364 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:59:39.319404 32364 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:59:39.319468 32364 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:59:39.319488 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.319495 32364 net.cpp:165] Memory required for data: 2131969200\nI0821 08:59:39.319505 32364 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:59:39.319520 32364 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:59:39.319532 32364 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:59:39.319546 32364 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:59:39.319566 32364 net.cpp:150] Setting up L3_b9_relu\nI0821 08:59:39.319581 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.319589 32364 net.cpp:165] Memory required for data: 2133607600\nI0821 08:59:39.319598 32364 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:39.319612 32364 net.cpp:100] Creating Layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:39.319624 32364 net.cpp:434] L3_b9_sum_eltwise_top_L3_b9_relu_0_split <- L3_b9_sum_eltwise_top\nI0821 08:59:39.319643 32364 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0821 08:59:39.319665 32364 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0821 08:59:39.319753 32364 net.cpp:150] Setting up L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:39.319772 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.319785 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.319795 32364 net.cpp:165] Memory required for data: 2136884400\nI0821 08:59:39.319805 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_conv\nI0821 08:59:39.319829 32364 net.cpp:100] Creating Layer L3_b10_cbr1_conv\nI0821 08:59:39.319844 32364 net.cpp:434] L3_b10_cbr1_conv <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0821 08:59:39.319864 32364 net.cpp:408] L3_b10_cbr1_conv -> L3_b10_cbr1_conv_top\nI0821 08:59:39.320966 32364 net.cpp:150] Setting up L3_b10_cbr1_conv\nI0821 08:59:39.320987 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.320997 32364 net.cpp:165] Memory required for data: 2138522800\nI0821 08:59:39.321014 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_bn\nI0821 08:59:39.321032 32364 net.cpp:100] Creating Layer L3_b10_cbr1_bn\nI0821 08:59:39.321048 32364 net.cpp:434] L3_b10_cbr1_bn <- L3_b10_cbr1_conv_top\nI0821 08:59:39.321065 32364 net.cpp:408] L3_b10_cbr1_bn -> L3_b10_cbr1_bn_top\nI0821 08:59:39.321393 32364 net.cpp:150] Setting up L3_b10_cbr1_bn\nI0821 08:59:39.321411 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.321421 32364 net.cpp:165] Memory required for data: 2140161200\nI0821 08:59:39.321442 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0821 08:59:39.321458 32364 net.cpp:100] Creating Layer L3_b10_cbr1_scale\nI0821 08:59:39.321470 32364 net.cpp:434] L3_b10_cbr1_scale <- L3_b10_cbr1_bn_top\nI0821 08:59:39.321491 32364 net.cpp:395] L3_b10_cbr1_scale -> L3_b10_cbr1_bn_top (in-place)\nI0821 08:59:39.321597 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0821 08:59:39.321815 32364 net.cpp:150] Setting up L3_b10_cbr1_scale\nI0821 08:59:39.321835 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.321844 32364 net.cpp:165] Memory required for data: 2141799600\nI0821 08:59:39.321863 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr1_relu\nI0821 08:59:39.321883 32364 net.cpp:100] Creating Layer L3_b10_cbr1_relu\nI0821 08:59:39.321897 32364 net.cpp:434] L3_b10_cbr1_relu <- L3_b10_cbr1_bn_top\nI0821 08:59:39.321912 32364 net.cpp:395] L3_b10_cbr1_relu -> L3_b10_cbr1_bn_top (in-place)\nI0821 08:59:39.321930 32364 net.cpp:150] Setting up L3_b10_cbr1_relu\nI0821 08:59:39.321945 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.321954 32364 net.cpp:165] Memory required for data: 2143438000\nI0821 08:59:39.321974 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr2_conv\nI0821 08:59:39.321995 32364 net.cpp:100] Creating Layer L3_b10_cbr2_conv\nI0821 08:59:39.322003 32364 net.cpp:434] L3_b10_cbr2_conv <- L3_b10_cbr1_bn_top\nI0821 08:59:39.322016 32364 net.cpp:408] L3_b10_cbr2_conv -> L3_b10_cbr2_conv_top\nI0821 08:59:39.323067 32364 net.cpp:150] Setting up L3_b10_cbr2_conv\nI0821 08:59:39.323086 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.323096 32364 net.cpp:165] Memory required for data: 2145076400\nI0821 08:59:39.323112 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr2_bn\nI0821 08:59:39.323129 32364 net.cpp:100] Creating Layer L3_b10_cbr2_bn\nI0821 08:59:39.323141 32364 net.cpp:434] L3_b10_cbr2_bn <- L3_b10_cbr2_conv_top\nI0821 08:59:39.323170 32364 net.cpp:408] L3_b10_cbr2_bn -> L3_b10_cbr2_bn_top\nI0821 08:59:39.323503 32364 net.cpp:150] Setting up L3_b10_cbr2_bn\nI0821 08:59:39.323525 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.323535 32364 net.cpp:165] Memory required for data: 2146714800\nI0821 08:59:39.323557 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0821 08:59:39.323573 32364 net.cpp:100] Creating Layer L3_b10_cbr2_scale\nI0821 08:59:39.323585 32364 net.cpp:434] L3_b10_cbr2_scale <- L3_b10_cbr2_bn_top\nI0821 08:59:39.323601 32364 net.cpp:395] L3_b10_cbr2_scale -> L3_b10_cbr2_bn_top (in-place)\nI0821 08:59:39.323695 32364 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0821 08:59:39.323901 32364 net.cpp:150] Setting up L3_b10_cbr2_scale\nI0821 08:59:39.323920 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.323930 32364 net.cpp:165] Memory required for data: 2148353200\nI0821 08:59:39.323948 32364 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise\nI0821 08:59:39.323971 32364 net.cpp:100] Creating Layer L3_b10_sum_eltwise\nI0821 08:59:39.323983 32364 net.cpp:434] L3_b10_sum_eltwise <- L3_b10_cbr2_bn_top\nI0821 08:59:39.323997 32364 net.cpp:434] L3_b10_sum_eltwise <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0821 08:59:39.324013 32364 net.cpp:408] L3_b10_sum_eltwise -> L3_b10_sum_eltwise_top\nI0821 08:59:39.324077 32364 net.cpp:150] Setting up L3_b10_sum_eltwise\nI0821 08:59:39.324095 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.324105 32364 net.cpp:165] Memory required for data: 2149991600\nI0821 08:59:39.324115 32364 layer_factory.hpp:77] Creating layer L3_b10_relu\nI0821 08:59:39.324131 32364 net.cpp:100] Creating Layer L3_b10_relu\nI0821 08:59:39.324142 32364 net.cpp:434] L3_b10_relu <- L3_b10_sum_eltwise_top\nI0821 08:59:39.324163 32364 net.cpp:395] L3_b10_relu -> L3_b10_sum_eltwise_top (in-place)\nI0821 08:59:39.324183 32364 net.cpp:150] Setting up L3_b10_relu\nI0821 08:59:39.324198 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.324206 32364 net.cpp:165] Memory required for data: 2151630000\nI0821 08:59:39.324216 32364 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:39.324230 32364 net.cpp:100] Creating Layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:39.324240 32364 net.cpp:434] L3_b10_sum_eltwise_top_L3_b10_relu_0_split <- L3_b10_sum_eltwise_top\nI0821 08:59:39.324261 32364 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0821 08:59:39.324291 32364 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0821 08:59:39.324383 32364 net.cpp:150] Setting up L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:39.324404 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.324417 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.324427 32364 net.cpp:165] Memory required for data: 2154906800\nI0821 08:59:39.324437 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_conv\nI0821 08:59:39.324461 32364 net.cpp:100] Creating Layer L3_b11_cbr1_conv\nI0821 08:59:39.324476 32364 net.cpp:434] L3_b11_cbr1_conv <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0821 08:59:39.324493 32364 net.cpp:408] L3_b11_cbr1_conv -> L3_b11_cbr1_conv_top\nI0821 08:59:39.325592 32364 net.cpp:150] Setting up L3_b11_cbr1_conv\nI0821 08:59:39.325613 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.325621 32364 net.cpp:165] Memory required for data: 2156545200\nI0821 08:59:39.325639 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_bn\nI0821 08:59:39.325661 32364 net.cpp:100] Creating Layer L3_b11_cbr1_bn\nI0821 08:59:39.325673 32364 net.cpp:434] L3_b11_cbr1_bn <- L3_b11_cbr1_conv_top\nI0821 08:59:39.325690 32364 net.cpp:408] L3_b11_cbr1_bn -> L3_b11_cbr1_bn_top\nI0821 08:59:39.326011 32364 net.cpp:150] Setting up L3_b11_cbr1_bn\nI0821 08:59:39.326030 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.326040 32364 net.cpp:165] Memory required for data: 2158183600\nI0821 08:59:39.326061 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0821 08:59:39.326082 32364 net.cpp:100] Creating Layer L3_b11_cbr1_scale\nI0821 08:59:39.326095 32364 net.cpp:434] L3_b11_cbr1_scale <- L3_b11_cbr1_bn_top\nI0821 08:59:39.326112 32364 net.cpp:395] L3_b11_cbr1_scale -> L3_b11_cbr1_bn_top (in-place)\nI0821 08:59:39.326216 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0821 08:59:39.326421 32364 net.cpp:150] Setting up L3_b11_cbr1_scale\nI0821 08:59:39.326439 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.326448 32364 net.cpp:165] Memory required for data: 2159822000\nI0821 08:59:39.326467 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr1_relu\nI0821 08:59:39.326488 32364 net.cpp:100] Creating Layer L3_b11_cbr1_relu\nI0821 08:59:39.326499 32364 net.cpp:434] L3_b11_cbr1_relu <- L3_b11_cbr1_bn_top\nI0821 08:59:39.326514 32364 net.cpp:395] L3_b11_cbr1_relu -> L3_b11_cbr1_bn_top (in-place)\nI0821 08:59:39.326534 32364 net.cpp:150] Setting up L3_b11_cbr1_relu\nI0821 08:59:39.326547 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.326557 32364 net.cpp:165] Memory required for data: 2161460400\nI0821 08:59:39.326567 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr2_conv\nI0821 08:59:39.326592 32364 net.cpp:100] Creating Layer L3_b11_cbr2_conv\nI0821 08:59:39.326606 32364 net.cpp:434] L3_b11_cbr2_conv <- L3_b11_cbr1_bn_top\nI0821 08:59:39.326628 32364 net.cpp:408] L3_b11_cbr2_conv -> L3_b11_cbr2_conv_top\nI0821 08:59:39.327719 32364 net.cpp:150] Setting up L3_b11_cbr2_conv\nI0821 08:59:39.327740 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.327749 32364 net.cpp:165] Memory required for data: 2163098800\nI0821 08:59:39.327769 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr2_bn\nI0821 08:59:39.327785 32364 net.cpp:100] Creating Layer L3_b11_cbr2_bn\nI0821 08:59:39.327796 32364 net.cpp:434] L3_b11_cbr2_bn <- L3_b11_cbr2_conv_top\nI0821 08:59:39.327818 32364 net.cpp:408] L3_b11_cbr2_bn -> L3_b11_cbr2_bn_top\nI0821 08:59:39.328142 32364 net.cpp:150] Setting up L3_b11_cbr2_bn\nI0821 08:59:39.328172 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.328182 32364 net.cpp:165] Memory required for data: 2164737200\nI0821 08:59:39.328203 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0821 08:59:39.328219 32364 net.cpp:100] Creating Layer L3_b11_cbr2_scale\nI0821 08:59:39.328232 32364 net.cpp:434] L3_b11_cbr2_scale <- L3_b11_cbr2_bn_top\nI0821 08:59:39.328248 32364 net.cpp:395] L3_b11_cbr2_scale -> L3_b11_cbr2_bn_top (in-place)\nI0821 08:59:39.328361 32364 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0821 08:59:39.328573 32364 net.cpp:150] Setting up L3_b11_cbr2_scale\nI0821 08:59:39.328593 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.328603 32364 net.cpp:165] Memory required for data: 2166375600\nI0821 08:59:39.328620 32364 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise\nI0821 08:59:39.328642 32364 net.cpp:100] Creating Layer L3_b11_sum_eltwise\nI0821 08:59:39.328655 32364 net.cpp:434] L3_b11_sum_eltwise <- L3_b11_cbr2_bn_top\nI0821 08:59:39.328670 32364 net.cpp:434] L3_b11_sum_eltwise <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0821 08:59:39.328686 32364 net.cpp:408] L3_b11_sum_eltwise -> L3_b11_sum_eltwise_top\nI0821 08:59:39.328749 32364 net.cpp:150] Setting up L3_b11_sum_eltwise\nI0821 08:59:39.328766 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.328776 32364 net.cpp:165] Memory required for data: 2168014000\nI0821 08:59:39.328786 32364 layer_factory.hpp:77] Creating layer L3_b11_relu\nI0821 08:59:39.328800 32364 net.cpp:100] Creating Layer L3_b11_relu\nI0821 08:59:39.328814 32364 net.cpp:434] L3_b11_relu <- L3_b11_sum_eltwise_top\nI0821 08:59:39.328826 32364 net.cpp:395] L3_b11_relu -> L3_b11_sum_eltwise_top (in-place)\nI0821 08:59:39.328845 32364 net.cpp:150] Setting up L3_b11_relu\nI0821 08:59:39.328860 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.328868 32364 net.cpp:165] Memory required for data: 2169652400\nI0821 08:59:39.328879 32364 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:39.328893 32364 net.cpp:100] Creating Layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:39.328904 32364 net.cpp:434] L3_b11_sum_eltwise_top_L3_b11_relu_0_split <- L3_b11_sum_eltwise_top\nI0821 08:59:39.328927 32364 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0821 08:59:39.328949 32364 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0821 08:59:39.329036 32364 net.cpp:150] Setting up L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:39.329058 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.329071 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.329080 32364 net.cpp:165] Memory required for data: 2172929200\nI0821 08:59:39.329090 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_conv\nI0821 08:59:39.329115 32364 net.cpp:100] Creating Layer L3_b12_cbr1_conv\nI0821 08:59:39.329129 32364 net.cpp:434] L3_b12_cbr1_conv <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0821 08:59:39.329155 32364 net.cpp:408] L3_b12_cbr1_conv -> L3_b12_cbr1_conv_top\nI0821 08:59:39.330247 32364 net.cpp:150] Setting up L3_b12_cbr1_conv\nI0821 08:59:39.330267 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.330276 32364 net.cpp:165] Memory required for data: 2174567600\nI0821 08:59:39.330294 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_bn\nI0821 08:59:39.330317 32364 net.cpp:100] Creating Layer L3_b12_cbr1_bn\nI0821 08:59:39.330329 32364 net.cpp:434] L3_b12_cbr1_bn <- L3_b12_cbr1_conv_top\nI0821 08:59:39.330346 32364 net.cpp:408] L3_b12_cbr1_bn -> L3_b12_cbr1_bn_top\nI0821 08:59:39.330673 32364 net.cpp:150] Setting up L3_b12_cbr1_bn\nI0821 08:59:39.330693 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.330701 32364 net.cpp:165] Memory required for data: 2176206000\nI0821 08:59:39.330723 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0821 08:59:39.330745 32364 net.cpp:100] Creating Layer L3_b12_cbr1_scale\nI0821 08:59:39.330760 32364 net.cpp:434] L3_b12_cbr1_scale <- L3_b12_cbr1_bn_top\nI0821 08:59:39.330775 32364 net.cpp:395] L3_b12_cbr1_scale -> L3_b12_cbr1_bn_top (in-place)\nI0821 08:59:39.330874 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0821 08:59:39.331099 32364 net.cpp:150] Setting up L3_b12_cbr1_scale\nI0821 08:59:39.331120 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.331137 32364 net.cpp:165] Memory required for data: 2177844400\nI0821 08:59:39.331163 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr1_relu\nI0821 08:59:39.331185 32364 net.cpp:100] Creating Layer L3_b12_cbr1_relu\nI0821 08:59:39.331198 32364 net.cpp:434] L3_b12_cbr1_relu <- L3_b12_cbr1_bn_top\nI0821 08:59:39.331213 32364 net.cpp:395] L3_b12_cbr1_relu -> L3_b12_cbr1_bn_top (in-place)\nI0821 08:59:39.331233 32364 net.cpp:150] Setting up L3_b12_cbr1_relu\nI0821 08:59:39.331252 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.331262 32364 net.cpp:165] Memory required for data: 2179482800\nI0821 08:59:39.331274 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr2_conv\nI0821 08:59:39.331292 32364 net.cpp:100] Creating Layer L3_b12_cbr2_conv\nI0821 08:59:39.331305 32364 net.cpp:434] L3_b12_cbr2_conv <- L3_b12_cbr1_bn_top\nI0821 08:59:39.331328 32364 net.cpp:408] L3_b12_cbr2_conv -> L3_b12_cbr2_conv_top\nI0821 08:59:39.332427 32364 net.cpp:150] Setting up L3_b12_cbr2_conv\nI0821 08:59:39.332446 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.332455 32364 net.cpp:165] Memory required for data: 2181121200\nI0821 08:59:39.332473 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr2_bn\nI0821 08:59:39.332491 32364 net.cpp:100] Creating Layer L3_b12_cbr2_bn\nI0821 08:59:39.332504 32364 net.cpp:434] L3_b12_cbr2_bn <- L3_b12_cbr2_conv_top\nI0821 08:59:39.332530 32364 net.cpp:408] L3_b12_cbr2_bn -> L3_b12_cbr2_bn_top\nI0821 08:59:39.332908 32364 net.cpp:150] Setting up L3_b12_cbr2_bn\nI0821 08:59:39.332932 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.332943 32364 net.cpp:165] Memory required for data: 2182759600\nI0821 08:59:39.332970 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0821 08:59:39.332985 32364 net.cpp:100] Creating Layer L3_b12_cbr2_scale\nI0821 08:59:39.332993 32364 net.cpp:434] L3_b12_cbr2_scale <- L3_b12_cbr2_bn_top\nI0821 08:59:39.333000 32364 net.cpp:395] L3_b12_cbr2_scale -> L3_b12_cbr2_bn_top (in-place)\nI0821 08:59:39.333071 32364 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0821 08:59:39.333252 32364 net.cpp:150] Setting up L3_b12_cbr2_scale\nI0821 08:59:39.333273 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.333283 32364 net.cpp:165] Memory required for data: 2184398000\nI0821 08:59:39.333302 32364 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise\nI0821 08:59:39.333318 32364 net.cpp:100] Creating Layer L3_b12_sum_eltwise\nI0821 08:59:39.333330 32364 net.cpp:434] L3_b12_sum_eltwise <- L3_b12_cbr2_bn_top\nI0821 08:59:39.333343 32364 net.cpp:434] L3_b12_sum_eltwise <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0821 08:59:39.333360 32364 net.cpp:408] L3_b12_sum_eltwise -> L3_b12_sum_eltwise_top\nI0821 08:59:39.333425 32364 net.cpp:150] Setting up L3_b12_sum_eltwise\nI0821 08:59:39.333443 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.333453 32364 net.cpp:165] Memory required for data: 2186036400\nI0821 08:59:39.333463 32364 layer_factory.hpp:77] Creating layer L3_b12_relu\nI0821 08:59:39.333477 32364 net.cpp:100] Creating Layer L3_b12_relu\nI0821 08:59:39.333489 32364 net.cpp:434] L3_b12_relu <- L3_b12_sum_eltwise_top\nI0821 08:59:39.333503 32364 net.cpp:395] L3_b12_relu -> L3_b12_sum_eltwise_top (in-place)\nI0821 08:59:39.333523 32364 net.cpp:150] Setting up L3_b12_relu\nI0821 08:59:39.333537 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.333547 32364 net.cpp:165] Memory required for data: 2187674800\nI0821 08:59:39.333557 32364 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:39.333572 32364 net.cpp:100] Creating Layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:39.333583 32364 net.cpp:434] L3_b12_sum_eltwise_top_L3_b12_relu_0_split <- L3_b12_sum_eltwise_top\nI0821 08:59:39.333603 32364 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0821 08:59:39.333624 32364 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0821 08:59:39.333714 32364 net.cpp:150] Setting up L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:39.333750 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.333765 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.333775 32364 net.cpp:165] Memory required for data: 2190951600\nI0821 08:59:39.333784 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_conv\nI0821 08:59:39.333806 32364 net.cpp:100] Creating Layer L3_b13_cbr1_conv\nI0821 08:59:39.333818 32364 net.cpp:434] L3_b13_cbr1_conv <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0821 08:59:39.333837 32364 net.cpp:408] L3_b13_cbr1_conv -> L3_b13_cbr1_conv_top\nI0821 08:59:39.335922 32364 net.cpp:150] Setting up L3_b13_cbr1_conv\nI0821 08:59:39.335943 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.335954 32364 net.cpp:165] Memory required for data: 2192590000\nI0821 08:59:39.335973 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_bn\nI0821 08:59:39.335994 32364 net.cpp:100] Creating Layer L3_b13_cbr1_bn\nI0821 08:59:39.336006 32364 net.cpp:434] L3_b13_cbr1_bn <- L3_b13_cbr1_conv_top\nI0821 08:59:39.336024 32364 net.cpp:408] L3_b13_cbr1_bn -> L3_b13_cbr1_bn_top\nI0821 08:59:39.336361 32364 net.cpp:150] Setting up L3_b13_cbr1_bn\nI0821 08:59:39.336380 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.336390 32364 net.cpp:165] Memory required for data: 2194228400\nI0821 08:59:39.336412 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0821 08:59:39.336428 32364 net.cpp:100] Creating Layer L3_b13_cbr1_scale\nI0821 08:59:39.336441 32364 net.cpp:434] L3_b13_cbr1_scale <- L3_b13_cbr1_bn_top\nI0821 08:59:39.336457 32364 net.cpp:395] L3_b13_cbr1_scale -> L3_b13_cbr1_bn_top (in-place)\nI0821 08:59:39.336556 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0821 08:59:39.336773 32364 net.cpp:150] Setting up L3_b13_cbr1_scale\nI0821 08:59:39.336791 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.336802 32364 net.cpp:165] Memory required for data: 2195866800\nI0821 08:59:39.336820 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr1_relu\nI0821 08:59:39.336835 32364 net.cpp:100] Creating Layer L3_b13_cbr1_relu\nI0821 08:59:39.336846 32364 net.cpp:434] L3_b13_cbr1_relu <- L3_b13_cbr1_bn_top\nI0821 08:59:39.336866 32364 net.cpp:395] L3_b13_cbr1_relu -> L3_b13_cbr1_bn_top (in-place)\nI0821 08:59:39.336887 32364 net.cpp:150] Setting up L3_b13_cbr1_relu\nI0821 08:59:39.336902 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.336911 32364 net.cpp:165] Memory required for data: 2197505200\nI0821 08:59:39.336921 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr2_conv\nI0821 08:59:39.336946 32364 net.cpp:100] Creating Layer L3_b13_cbr2_conv\nI0821 08:59:39.336959 32364 net.cpp:434] L3_b13_cbr2_conv <- L3_b13_cbr1_bn_top\nI0821 08:59:39.336978 32364 net.cpp:408] L3_b13_cbr2_conv -> L3_b13_cbr2_conv_top\nI0821 08:59:39.338089 32364 net.cpp:150] Setting up L3_b13_cbr2_conv\nI0821 08:59:39.338110 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.338119 32364 net.cpp:165] Memory required for data: 2199143600\nI0821 08:59:39.338137 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr2_bn\nI0821 08:59:39.338166 32364 net.cpp:100] Creating Layer L3_b13_cbr2_bn\nI0821 08:59:39.338181 32364 net.cpp:434] L3_b13_cbr2_bn <- L3_b13_cbr2_conv_top\nI0821 08:59:39.338198 32364 net.cpp:408] L3_b13_cbr2_bn -> L3_b13_cbr2_bn_top\nI0821 08:59:39.338524 32364 net.cpp:150] Setting up L3_b13_cbr2_bn\nI0821 08:59:39.338543 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.338553 32364 net.cpp:165] Memory required for data: 2200782000\nI0821 08:59:39.338574 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0821 08:59:39.338596 32364 net.cpp:100] Creating Layer L3_b13_cbr2_scale\nI0821 08:59:39.338609 32364 net.cpp:434] L3_b13_cbr2_scale <- L3_b13_cbr2_bn_top\nI0821 08:59:39.338625 32364 net.cpp:395] L3_b13_cbr2_scale -> L3_b13_cbr2_bn_top (in-place)\nI0821 08:59:39.338726 32364 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0821 08:59:39.338942 32364 net.cpp:150] Setting up L3_b13_cbr2_scale\nI0821 08:59:39.338968 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.338977 32364 net.cpp:165] Memory required for data: 2202420400\nI0821 08:59:39.338996 32364 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise\nI0821 08:59:39.339017 32364 net.cpp:100] Creating Layer L3_b13_sum_eltwise\nI0821 08:59:39.339030 32364 net.cpp:434] L3_b13_sum_eltwise <- L3_b13_cbr2_bn_top\nI0821 08:59:39.339045 32364 net.cpp:434] L3_b13_sum_eltwise <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0821 08:59:39.339064 32364 net.cpp:408] L3_b13_sum_eltwise -> L3_b13_sum_eltwise_top\nI0821 08:59:39.339125 32364 net.cpp:150] Setting up L3_b13_sum_eltwise\nI0821 08:59:39.339143 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.339159 32364 net.cpp:165] Memory required for data: 2204058800\nI0821 08:59:39.339170 32364 layer_factory.hpp:77] Creating layer L3_b13_relu\nI0821 08:59:39.339191 32364 net.cpp:100] Creating Layer L3_b13_relu\nI0821 08:59:39.339205 32364 net.cpp:434] L3_b13_relu <- L3_b13_sum_eltwise_top\nI0821 08:59:39.339220 32364 net.cpp:395] L3_b13_relu -> L3_b13_sum_eltwise_top (in-place)\nI0821 08:59:39.339238 32364 net.cpp:150] Setting up L3_b13_relu\nI0821 08:59:39.339253 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.339262 32364 net.cpp:165] Memory required for data: 2205697200\nI0821 08:59:39.339272 32364 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:39.339287 32364 net.cpp:100] Creating Layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:39.339298 32364 net.cpp:434] L3_b13_sum_eltwise_top_L3_b13_relu_0_split <- L3_b13_sum_eltwise_top\nI0821 08:59:39.339313 32364 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0821 08:59:39.339334 32364 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0821 08:59:39.339429 32364 net.cpp:150] Setting up L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:39.339447 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.339460 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.339469 32364 net.cpp:165] Memory required for data: 2208974000\nI0821 08:59:39.339479 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_conv\nI0821 08:59:39.339504 32364 net.cpp:100] Creating Layer L3_b14_cbr1_conv\nI0821 08:59:39.339519 32364 net.cpp:434] L3_b14_cbr1_conv <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0821 08:59:39.339539 32364 net.cpp:408] L3_b14_cbr1_conv -> L3_b14_cbr1_conv_top\nI0821 08:59:39.340626 32364 net.cpp:150] Setting up L3_b14_cbr1_conv\nI0821 08:59:39.340646 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.340656 32364 net.cpp:165] Memory required for data: 2210612400\nI0821 08:59:39.340672 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_bn\nI0821 08:59:39.340698 32364 net.cpp:100] Creating Layer L3_b14_cbr1_bn\nI0821 08:59:39.340710 32364 net.cpp:434] L3_b14_cbr1_bn <- L3_b14_cbr1_conv_top\nI0821 08:59:39.340734 32364 net.cpp:408] L3_b14_cbr1_bn -> L3_b14_cbr1_bn_top\nI0821 08:59:39.341048 32364 net.cpp:150] Setting up L3_b14_cbr1_bn\nI0821 08:59:39.341066 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.341076 32364 net.cpp:165] Memory required for data: 2212250800\nI0821 08:59:39.341099 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0821 08:59:39.341114 32364 net.cpp:100] Creating Layer L3_b14_cbr1_scale\nI0821 08:59:39.341126 32364 net.cpp:434] L3_b14_cbr1_scale <- L3_b14_cbr1_bn_top\nI0821 08:59:39.341141 32364 net.cpp:395] L3_b14_cbr1_scale -> L3_b14_cbr1_bn_top (in-place)\nI0821 08:59:39.341250 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0821 08:59:39.341459 32364 net.cpp:150] Setting up L3_b14_cbr1_scale\nI0821 08:59:39.341477 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.341487 32364 net.cpp:165] Memory required for data: 2213889200\nI0821 08:59:39.341506 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr1_relu\nI0821 08:59:39.341521 32364 net.cpp:100] Creating Layer L3_b14_cbr1_relu\nI0821 08:59:39.341542 32364 net.cpp:434] L3_b14_cbr1_relu <- L3_b14_cbr1_bn_top\nI0821 08:59:39.341562 32364 net.cpp:395] L3_b14_cbr1_relu -> L3_b14_cbr1_bn_top (in-place)\nI0821 08:59:39.341584 32364 net.cpp:150] Setting up L3_b14_cbr1_relu\nI0821 08:59:39.341598 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.341609 32364 net.cpp:165] Memory required for data: 2215527600\nI0821 08:59:39.341619 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr2_conv\nI0821 08:59:39.341644 32364 net.cpp:100] Creating Layer L3_b14_cbr2_conv\nI0821 08:59:39.341656 32364 net.cpp:434] L3_b14_cbr2_conv <- L3_b14_cbr1_bn_top\nI0821 08:59:39.341675 32364 net.cpp:408] L3_b14_cbr2_conv -> L3_b14_cbr2_conv_top\nI0821 08:59:39.342759 32364 net.cpp:150] Setting up L3_b14_cbr2_conv\nI0821 08:59:39.342779 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.342789 32364 net.cpp:165] Memory required for data: 2217166000\nI0821 08:59:39.342808 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr2_bn\nI0821 08:59:39.342828 32364 net.cpp:100] Creating Layer L3_b14_cbr2_bn\nI0821 08:59:39.342840 32364 net.cpp:434] L3_b14_cbr2_bn <- L3_b14_cbr2_conv_top\nI0821 08:59:39.342859 32364 net.cpp:408] L3_b14_cbr2_bn -> L3_b14_cbr2_bn_top\nI0821 08:59:39.343205 32364 net.cpp:150] Setting up L3_b14_cbr2_bn\nI0821 08:59:39.343225 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.343235 32364 net.cpp:165] Memory required for data: 2218804400\nI0821 08:59:39.343255 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0821 08:59:39.343276 32364 net.cpp:100] Creating Layer L3_b14_cbr2_scale\nI0821 08:59:39.343288 32364 net.cpp:434] L3_b14_cbr2_scale <- L3_b14_cbr2_bn_top\nI0821 08:59:39.343304 32364 net.cpp:395] L3_b14_cbr2_scale -> L3_b14_cbr2_bn_top (in-place)\nI0821 08:59:39.343403 32364 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0821 08:59:39.343613 32364 net.cpp:150] Setting up L3_b14_cbr2_scale\nI0821 08:59:39.343632 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.343641 32364 net.cpp:165] Memory required for data: 2220442800\nI0821 08:59:39.343660 32364 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise\nI0821 08:59:39.343682 32364 net.cpp:100] Creating Layer L3_b14_sum_eltwise\nI0821 08:59:39.343694 32364 net.cpp:434] L3_b14_sum_eltwise <- L3_b14_cbr2_bn_top\nI0821 08:59:39.343708 32364 net.cpp:434] L3_b14_sum_eltwise <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0821 08:59:39.343729 32364 net.cpp:408] L3_b14_sum_eltwise -> L3_b14_sum_eltwise_top\nI0821 08:59:39.343789 32364 net.cpp:150] Setting up L3_b14_sum_eltwise\nI0821 08:59:39.343808 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.343817 32364 net.cpp:165] Memory required for data: 2222081200\nI0821 08:59:39.343827 32364 layer_factory.hpp:77] Creating layer L3_b14_relu\nI0821 08:59:39.343847 32364 net.cpp:100] Creating Layer L3_b14_relu\nI0821 08:59:39.343859 32364 net.cpp:434] L3_b14_relu <- L3_b14_sum_eltwise_top\nI0821 08:59:39.343873 32364 net.cpp:395] L3_b14_relu -> L3_b14_sum_eltwise_top (in-place)\nI0821 08:59:39.343893 32364 net.cpp:150] Setting up L3_b14_relu\nI0821 08:59:39.343907 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.343916 32364 net.cpp:165] Memory required for data: 2223719600\nI0821 08:59:39.343926 32364 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:39.344018 32364 net.cpp:100] Creating Layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:39.344034 32364 net.cpp:434] L3_b14_sum_eltwise_top_L3_b14_relu_0_split <- L3_b14_sum_eltwise_top\nI0821 08:59:39.344051 32364 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0821 08:59:39.344071 32364 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0821 08:59:39.344161 32364 net.cpp:150] Setting up L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:39.344179 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.344192 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.344209 32364 net.cpp:165] Memory required for data: 2226996400\nI0821 08:59:39.344223 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_conv\nI0821 08:59:39.344247 32364 net.cpp:100] Creating Layer L3_b15_cbr1_conv\nI0821 08:59:39.344264 32364 net.cpp:434] L3_b15_cbr1_conv <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0821 08:59:39.344283 32364 net.cpp:408] L3_b15_cbr1_conv -> L3_b15_cbr1_conv_top\nI0821 08:59:39.345369 32364 net.cpp:150] Setting up L3_b15_cbr1_conv\nI0821 08:59:39.345391 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.345401 32364 net.cpp:165] Memory required for data: 2228634800\nI0821 08:59:39.345418 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_bn\nI0821 08:59:39.345440 32364 net.cpp:100] Creating Layer L3_b15_cbr1_bn\nI0821 08:59:39.345453 32364 net.cpp:434] L3_b15_cbr1_bn <- L3_b15_cbr1_conv_top\nI0821 08:59:39.345474 32364 net.cpp:408] L3_b15_cbr1_bn -> L3_b15_cbr1_bn_top\nI0821 08:59:39.345796 32364 net.cpp:150] Setting up L3_b15_cbr1_bn\nI0821 08:59:39.345815 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.345825 32364 net.cpp:165] Memory required for data: 2230273200\nI0821 08:59:39.345847 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0821 08:59:39.345863 32364 net.cpp:100] Creating Layer L3_b15_cbr1_scale\nI0821 08:59:39.345875 32364 net.cpp:434] L3_b15_cbr1_scale <- L3_b15_cbr1_bn_top\nI0821 08:59:39.345898 32364 net.cpp:395] L3_b15_cbr1_scale -> L3_b15_cbr1_bn_top (in-place)\nI0821 08:59:39.346007 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0821 08:59:39.346223 32364 net.cpp:150] Setting up L3_b15_cbr1_scale\nI0821 08:59:39.346241 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.346251 32364 net.cpp:165] Memory required for data: 2231911600\nI0821 08:59:39.346269 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr1_relu\nI0821 08:59:39.346284 32364 net.cpp:100] Creating Layer L3_b15_cbr1_relu\nI0821 08:59:39.346297 32364 net.cpp:434] L3_b15_cbr1_relu <- L3_b15_cbr1_bn_top\nI0821 08:59:39.346315 32364 net.cpp:395] L3_b15_cbr1_relu -> L3_b15_cbr1_bn_top (in-place)\nI0821 08:59:39.346336 32364 net.cpp:150] Setting up L3_b15_cbr1_relu\nI0821 08:59:39.346352 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.346361 32364 net.cpp:165] Memory required for data: 2233550000\nI0821 08:59:39.346371 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr2_conv\nI0821 08:59:39.346395 32364 net.cpp:100] Creating Layer L3_b15_cbr2_conv\nI0821 08:59:39.346410 32364 net.cpp:434] L3_b15_cbr2_conv <- L3_b15_cbr1_bn_top\nI0821 08:59:39.346428 32364 net.cpp:408] L3_b15_cbr2_conv -> L3_b15_cbr2_conv_top\nI0821 08:59:39.347522 32364 net.cpp:150] Setting up L3_b15_cbr2_conv\nI0821 08:59:39.347543 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.347553 32364 net.cpp:165] Memory required for data: 2235188400\nI0821 08:59:39.347569 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr2_bn\nI0821 08:59:39.347591 32364 net.cpp:100] Creating Layer L3_b15_cbr2_bn\nI0821 08:59:39.347604 32364 net.cpp:434] L3_b15_cbr2_bn <- L3_b15_cbr2_conv_top\nI0821 08:59:39.347621 32364 net.cpp:408] L3_b15_cbr2_bn -> L3_b15_cbr2_bn_top\nI0821 08:59:39.347945 32364 net.cpp:150] Setting up L3_b15_cbr2_bn\nI0821 08:59:39.347965 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.347973 32364 net.cpp:165] Memory required for data: 2236826800\nI0821 08:59:39.347995 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0821 08:59:39.348014 32364 net.cpp:100] Creating Layer L3_b15_cbr2_scale\nI0821 08:59:39.348027 32364 net.cpp:434] L3_b15_cbr2_scale <- L3_b15_cbr2_bn_top\nI0821 08:59:39.348047 32364 net.cpp:395] L3_b15_cbr2_scale -> L3_b15_cbr2_bn_top (in-place)\nI0821 08:59:39.348145 32364 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0821 08:59:39.348357 32364 net.cpp:150] Setting up L3_b15_cbr2_scale\nI0821 08:59:39.348376 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.348386 32364 net.cpp:165] Memory required for data: 2238465200\nI0821 08:59:39.348404 32364 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise\nI0821 08:59:39.348429 32364 net.cpp:100] Creating Layer L3_b15_sum_eltwise\nI0821 08:59:39.348441 32364 net.cpp:434] L3_b15_sum_eltwise <- L3_b15_cbr2_bn_top\nI0821 08:59:39.348455 32364 net.cpp:434] L3_b15_sum_eltwise <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0821 08:59:39.348481 32364 net.cpp:408] L3_b15_sum_eltwise -> L3_b15_sum_eltwise_top\nI0821 08:59:39.348543 32364 net.cpp:150] Setting up L3_b15_sum_eltwise\nI0821 08:59:39.348564 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.348574 32364 net.cpp:165] Memory required for data: 2240103600\nI0821 08:59:39.348584 32364 layer_factory.hpp:77] Creating layer L3_b15_relu\nI0821 08:59:39.348603 32364 net.cpp:100] Creating Layer L3_b15_relu\nI0821 08:59:39.348616 32364 net.cpp:434] L3_b15_relu <- L3_b15_sum_eltwise_top\nI0821 08:59:39.348630 32364 net.cpp:395] L3_b15_relu -> L3_b15_sum_eltwise_top (in-place)\nI0821 08:59:39.348650 32364 net.cpp:150] Setting up L3_b15_relu\nI0821 08:59:39.348664 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.348675 32364 net.cpp:165] Memory required for data: 2241742000\nI0821 08:59:39.348685 32364 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:39.348697 32364 net.cpp:100] Creating Layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:39.348708 32364 net.cpp:434] L3_b15_sum_eltwise_top_L3_b15_relu_0_split <- L3_b15_sum_eltwise_top\nI0821 08:59:39.348726 32364 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0821 08:59:39.348745 32364 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0821 08:59:39.348839 32364 net.cpp:150] Setting up L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:39.348856 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.348870 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.348878 32364 net.cpp:165] Memory required for data: 2245018800\nI0821 08:59:39.348888 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_conv\nI0821 08:59:39.348912 32364 net.cpp:100] Creating Layer L3_b16_cbr1_conv\nI0821 08:59:39.348927 32364 net.cpp:434] L3_b16_cbr1_conv <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0821 08:59:39.348945 32364 net.cpp:408] L3_b16_cbr1_conv -> L3_b16_cbr1_conv_top\nI0821 08:59:39.350025 32364 net.cpp:150] Setting up L3_b16_cbr1_conv\nI0821 08:59:39.350045 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.350054 32364 net.cpp:165] Memory required for data: 2246657200\nI0821 08:59:39.350072 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_bn\nI0821 08:59:39.350096 32364 net.cpp:100] Creating Layer L3_b16_cbr1_bn\nI0821 08:59:39.350109 32364 net.cpp:434] L3_b16_cbr1_bn <- L3_b16_cbr1_conv_top\nI0821 08:59:39.350131 32364 net.cpp:408] L3_b16_cbr1_bn -> L3_b16_cbr1_bn_top\nI0821 08:59:39.350455 32364 net.cpp:150] Setting up L3_b16_cbr1_bn\nI0821 08:59:39.350474 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.350483 32364 net.cpp:165] Memory required for data: 2248295600\nI0821 08:59:39.350504 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0821 08:59:39.350520 32364 net.cpp:100] Creating Layer L3_b16_cbr1_scale\nI0821 08:59:39.350531 32364 net.cpp:434] L3_b16_cbr1_scale <- L3_b16_cbr1_bn_top\nI0821 08:59:39.350551 32364 net.cpp:395] L3_b16_cbr1_scale -> L3_b16_cbr1_bn_top (in-place)\nI0821 08:59:39.350651 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0821 08:59:39.351037 32364 net.cpp:150] Setting up L3_b16_cbr1_scale\nI0821 08:59:39.351058 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.351066 32364 net.cpp:165] Memory required for data: 2249934000\nI0821 08:59:39.351085 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr1_relu\nI0821 08:59:39.351101 32364 net.cpp:100] Creating Layer L3_b16_cbr1_relu\nI0821 08:59:39.351114 32364 net.cpp:434] L3_b16_cbr1_relu <- L3_b16_cbr1_bn_top\nI0821 08:59:39.351133 32364 net.cpp:395] L3_b16_cbr1_relu -> L3_b16_cbr1_bn_top (in-place)\nI0821 08:59:39.351171 32364 net.cpp:150] Setting up L3_b16_cbr1_relu\nI0821 08:59:39.351187 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.351197 32364 net.cpp:165] Memory required for data: 2251572400\nI0821 08:59:39.351207 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr2_conv\nI0821 08:59:39.351233 32364 net.cpp:100] Creating Layer L3_b16_cbr2_conv\nI0821 08:59:39.351248 32364 net.cpp:434] L3_b16_cbr2_conv <- L3_b16_cbr1_bn_top\nI0821 08:59:39.351269 32364 net.cpp:408] L3_b16_cbr2_conv -> L3_b16_cbr2_conv_top\nI0821 08:59:39.353365 32364 net.cpp:150] Setting up L3_b16_cbr2_conv\nI0821 08:59:39.353387 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.353397 32364 net.cpp:165] Memory required for data: 2253210800\nI0821 08:59:39.353416 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr2_bn\nI0821 08:59:39.353438 32364 net.cpp:100] Creating Layer L3_b16_cbr2_bn\nI0821 08:59:39.353451 32364 net.cpp:434] L3_b16_cbr2_bn <- L3_b16_cbr2_conv_top\nI0821 08:59:39.353473 32364 net.cpp:408] L3_b16_cbr2_bn -> L3_b16_cbr2_bn_top\nI0821 08:59:39.353798 32364 net.cpp:150] Setting up L3_b16_cbr2_bn\nI0821 08:59:39.353817 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.353827 32364 net.cpp:165] Memory required for data: 2254849200\nI0821 08:59:39.353849 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0821 08:59:39.353865 32364 net.cpp:100] Creating Layer L3_b16_cbr2_scale\nI0821 08:59:39.353878 32364 net.cpp:434] L3_b16_cbr2_scale <- L3_b16_cbr2_bn_top\nI0821 08:59:39.353898 32364 net.cpp:395] L3_b16_cbr2_scale -> L3_b16_cbr2_bn_top (in-place)\nI0821 08:59:39.354004 32364 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0821 08:59:39.354226 32364 net.cpp:150] Setting up L3_b16_cbr2_scale\nI0821 08:59:39.354244 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.354254 32364 net.cpp:165] Memory required for data: 2256487600\nI0821 08:59:39.354274 32364 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise\nI0821 08:59:39.354290 32364 net.cpp:100] Creating Layer L3_b16_sum_eltwise\nI0821 08:59:39.354302 32364 net.cpp:434] L3_b16_sum_eltwise <- L3_b16_cbr2_bn_top\nI0821 08:59:39.354315 32364 net.cpp:434] L3_b16_sum_eltwise <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0821 08:59:39.354336 32364 net.cpp:408] L3_b16_sum_eltwise -> L3_b16_sum_eltwise_top\nI0821 08:59:39.354401 32364 net.cpp:150] Setting up L3_b16_sum_eltwise\nI0821 08:59:39.354419 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.354429 32364 net.cpp:165] Memory required for data: 2258126000\nI0821 08:59:39.354439 32364 layer_factory.hpp:77] Creating layer L3_b16_relu\nI0821 08:59:39.354454 32364 net.cpp:100] Creating Layer L3_b16_relu\nI0821 08:59:39.354466 32364 net.cpp:434] L3_b16_relu <- L3_b16_sum_eltwise_top\nI0821 08:59:39.354485 32364 net.cpp:395] L3_b16_relu -> L3_b16_sum_eltwise_top (in-place)\nI0821 08:59:39.354506 32364 net.cpp:150] Setting up L3_b16_relu\nI0821 08:59:39.354518 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.354528 32364 net.cpp:165] Memory required for data: 2259764400\nI0821 08:59:39.354538 32364 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:39.354552 32364 net.cpp:100] Creating Layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:39.354563 32364 net.cpp:434] L3_b16_sum_eltwise_top_L3_b16_relu_0_split <- L3_b16_sum_eltwise_top\nI0821 08:59:39.354579 32364 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0821 08:59:39.354599 32364 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0821 08:59:39.354692 32364 net.cpp:150] Setting up L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:39.354712 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.354725 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.354735 32364 net.cpp:165] Memory required for data: 2263041200\nI0821 08:59:39.354745 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_conv\nI0821 08:59:39.354766 32364 net.cpp:100] Creating Layer L3_b17_cbr1_conv\nI0821 08:59:39.354789 32364 net.cpp:434] L3_b17_cbr1_conv <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0821 08:59:39.354815 32364 net.cpp:408] L3_b17_cbr1_conv -> L3_b17_cbr1_conv_top\nI0821 08:59:39.355908 32364 net.cpp:150] Setting up L3_b17_cbr1_conv\nI0821 08:59:39.355928 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.355938 32364 net.cpp:165] Memory required for data: 2264679600\nI0821 08:59:39.355957 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_bn\nI0821 08:59:39.355973 32364 net.cpp:100] Creating Layer L3_b17_cbr1_bn\nI0821 08:59:39.355985 32364 net.cpp:434] L3_b17_cbr1_bn <- L3_b17_cbr1_conv_top\nI0821 08:59:39.356007 32364 net.cpp:408] L3_b17_cbr1_bn -> L3_b17_cbr1_bn_top\nI0821 08:59:39.356344 32364 net.cpp:150] Setting up L3_b17_cbr1_bn\nI0821 08:59:39.356364 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.356374 32364 net.cpp:165] Memory required for data: 2266318000\nI0821 08:59:39.356395 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0821 08:59:39.356411 32364 net.cpp:100] Creating Layer L3_b17_cbr1_scale\nI0821 08:59:39.356423 32364 net.cpp:434] L3_b17_cbr1_scale <- L3_b17_cbr1_bn_top\nI0821 08:59:39.356438 32364 net.cpp:395] L3_b17_cbr1_scale -> L3_b17_cbr1_bn_top (in-place)\nI0821 08:59:39.356539 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0821 08:59:39.356748 32364 net.cpp:150] Setting up L3_b17_cbr1_scale\nI0821 08:59:39.356773 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.356783 32364 net.cpp:165] Memory required for data: 2267956400\nI0821 08:59:39.356802 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr1_relu\nI0821 08:59:39.356817 32364 net.cpp:100] Creating Layer L3_b17_cbr1_relu\nI0821 08:59:39.356830 32364 net.cpp:434] L3_b17_cbr1_relu <- L3_b17_cbr1_bn_top\nI0821 08:59:39.356845 32364 net.cpp:395] L3_b17_cbr1_relu -> L3_b17_cbr1_bn_top (in-place)\nI0821 08:59:39.356864 32364 net.cpp:150] Setting up L3_b17_cbr1_relu\nI0821 08:59:39.356878 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.356889 32364 net.cpp:165] Memory required for data: 2269594800\nI0821 08:59:39.356899 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr2_conv\nI0821 08:59:39.356922 32364 net.cpp:100] Creating Layer L3_b17_cbr2_conv\nI0821 08:59:39.356936 32364 net.cpp:434] L3_b17_cbr2_conv <- L3_b17_cbr1_bn_top\nI0821 08:59:39.356954 32364 net.cpp:408] L3_b17_cbr2_conv -> L3_b17_cbr2_conv_top\nI0821 08:59:39.358031 32364 net.cpp:150] Setting up L3_b17_cbr2_conv\nI0821 08:59:39.358049 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.358058 32364 net.cpp:165] Memory required for data: 2271233200\nI0821 08:59:39.358077 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr2_bn\nI0821 08:59:39.358098 32364 net.cpp:100] Creating Layer L3_b17_cbr2_bn\nI0821 08:59:39.358111 32364 net.cpp:434] L3_b17_cbr2_bn <- L3_b17_cbr2_conv_top\nI0821 08:59:39.358134 32364 net.cpp:408] L3_b17_cbr2_bn -> L3_b17_cbr2_bn_top\nI0821 08:59:39.358459 32364 net.cpp:150] Setting up L3_b17_cbr2_bn\nI0821 08:59:39.358479 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.358489 32364 net.cpp:165] Memory required for data: 2272871600\nI0821 08:59:39.358508 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0821 08:59:39.358525 32364 net.cpp:100] Creating Layer L3_b17_cbr2_scale\nI0821 08:59:39.358537 32364 net.cpp:434] L3_b17_cbr2_scale <- L3_b17_cbr2_bn_top\nI0821 08:59:39.358561 32364 net.cpp:395] L3_b17_cbr2_scale -> L3_b17_cbr2_bn_top (in-place)\nI0821 08:59:39.358659 32364 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0821 08:59:39.358870 32364 net.cpp:150] Setting up L3_b17_cbr2_scale\nI0821 08:59:39.358889 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.358898 32364 net.cpp:165] Memory required for data: 2274510000\nI0821 08:59:39.358916 32364 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise\nI0821 08:59:39.358938 32364 net.cpp:100] Creating Layer L3_b17_sum_eltwise\nI0821 08:59:39.358960 32364 net.cpp:434] L3_b17_sum_eltwise <- L3_b17_cbr2_bn_top\nI0821 08:59:39.358983 32364 net.cpp:434] L3_b17_sum_eltwise <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0821 08:59:39.359000 32364 net.cpp:408] L3_b17_sum_eltwise -> L3_b17_sum_eltwise_top\nI0821 08:59:39.359068 32364 net.cpp:150] Setting up L3_b17_sum_eltwise\nI0821 08:59:39.359087 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.359097 32364 net.cpp:165] Memory required for data: 2276148400\nI0821 08:59:39.359107 32364 layer_factory.hpp:77] Creating layer L3_b17_relu\nI0821 08:59:39.359122 32364 net.cpp:100] Creating Layer L3_b17_relu\nI0821 08:59:39.359134 32364 net.cpp:434] L3_b17_relu <- L3_b17_sum_eltwise_top\nI0821 08:59:39.359161 32364 net.cpp:395] L3_b17_relu -> L3_b17_sum_eltwise_top (in-place)\nI0821 08:59:39.359184 32364 net.cpp:150] Setting up L3_b17_relu\nI0821 08:59:39.359197 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.359207 32364 net.cpp:165] Memory required for data: 2277786800\nI0821 08:59:39.359216 32364 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:39.359231 32364 net.cpp:100] Creating Layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:39.359243 32364 net.cpp:434] L3_b17_sum_eltwise_top_L3_b17_relu_0_split <- L3_b17_sum_eltwise_top\nI0821 08:59:39.359258 32364 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0821 08:59:39.359278 32364 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0821 08:59:39.359367 32364 net.cpp:150] Setting up L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:39.359386 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.359400 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.359408 32364 net.cpp:165] Memory required for data: 2281063600\nI0821 08:59:39.359418 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_conv\nI0821 08:59:39.359438 32364 net.cpp:100] Creating Layer L3_b18_cbr1_conv\nI0821 08:59:39.359452 32364 net.cpp:434] L3_b18_cbr1_conv <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0821 08:59:39.359475 32364 net.cpp:408] L3_b18_cbr1_conv -> L3_b18_cbr1_conv_top\nI0821 08:59:39.360574 32364 net.cpp:150] Setting up L3_b18_cbr1_conv\nI0821 08:59:39.360594 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.360605 32364 net.cpp:165] Memory required for data: 2282702000\nI0821 08:59:39.360622 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_bn\nI0821 08:59:39.360643 32364 net.cpp:100] Creating Layer L3_b18_cbr1_bn\nI0821 08:59:39.360656 32364 net.cpp:434] L3_b18_cbr1_bn <- L3_b18_cbr1_conv_top\nI0821 08:59:39.360674 32364 net.cpp:408] L3_b18_cbr1_bn -> L3_b18_cbr1_bn_top\nI0821 08:59:39.361002 32364 net.cpp:150] Setting up L3_b18_cbr1_bn\nI0821 08:59:39.361021 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.361030 32364 net.cpp:165] Memory required for data: 2284340400\nI0821 08:59:39.361052 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0821 08:59:39.361068 32364 net.cpp:100] Creating Layer L3_b18_cbr1_scale\nI0821 08:59:39.361080 32364 net.cpp:434] L3_b18_cbr1_scale <- L3_b18_cbr1_bn_top\nI0821 08:59:39.361096 32364 net.cpp:395] L3_b18_cbr1_scale -> L3_b18_cbr1_bn_top (in-place)\nI0821 08:59:39.361202 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0821 08:59:39.361407 32364 net.cpp:150] Setting up L3_b18_cbr1_scale\nI0821 08:59:39.361433 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.361444 32364 net.cpp:165] Memory required for data: 2285978800\nI0821 08:59:39.361464 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr1_relu\nI0821 08:59:39.361479 32364 net.cpp:100] Creating Layer L3_b18_cbr1_relu\nI0821 08:59:39.361490 32364 net.cpp:434] L3_b18_cbr1_relu <- L3_b18_cbr1_bn_top\nI0821 08:59:39.361505 32364 net.cpp:395] L3_b18_cbr1_relu -> L3_b18_cbr1_bn_top (in-place)\nI0821 08:59:39.361523 32364 net.cpp:150] Setting up L3_b18_cbr1_relu\nI0821 08:59:39.361538 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.361547 32364 net.cpp:165] Memory required for data: 2287617200\nI0821 08:59:39.361567 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr2_conv\nI0821 08:59:39.361591 32364 net.cpp:100] Creating Layer L3_b18_cbr2_conv\nI0821 08:59:39.361606 32364 net.cpp:434] L3_b18_cbr2_conv <- L3_b18_cbr1_bn_top\nI0821 08:59:39.361625 32364 net.cpp:408] L3_b18_cbr2_conv -> L3_b18_cbr2_conv_top\nI0821 08:59:39.362725 32364 net.cpp:150] Setting up L3_b18_cbr2_conv\nI0821 08:59:39.362746 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.362756 32364 net.cpp:165] Memory required for data: 2289255600\nI0821 08:59:39.362773 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr2_bn\nI0821 08:59:39.362794 32364 net.cpp:100] Creating Layer L3_b18_cbr2_bn\nI0821 08:59:39.362808 32364 net.cpp:434] L3_b18_cbr2_bn <- L3_b18_cbr2_conv_top\nI0821 08:59:39.362829 32364 net.cpp:408] L3_b18_cbr2_bn -> L3_b18_cbr2_bn_top\nI0821 08:59:39.363160 32364 net.cpp:150] Setting up L3_b18_cbr2_bn\nI0821 08:59:39.363180 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.363189 32364 net.cpp:165] Memory required for data: 2290894000\nI0821 08:59:39.363210 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0821 08:59:39.363227 32364 net.cpp:100] Creating Layer L3_b18_cbr2_scale\nI0821 08:59:39.363240 32364 net.cpp:434] L3_b18_cbr2_scale <- L3_b18_cbr2_bn_top\nI0821 08:59:39.363260 32364 net.cpp:395] L3_b18_cbr2_scale -> L3_b18_cbr2_bn_top (in-place)\nI0821 08:59:39.363358 32364 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0821 08:59:39.363564 32364 net.cpp:150] Setting up L3_b18_cbr2_scale\nI0821 08:59:39.363582 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.363591 32364 net.cpp:165] Memory required for data: 2292532400\nI0821 08:59:39.363610 32364 layer_factory.hpp:77] Creating layer L3_b18_sum_eltwise\nI0821 08:59:39.363631 32364 net.cpp:100] Creating Layer L3_b18_sum_eltwise\nI0821 08:59:39.363643 32364 net.cpp:434] L3_b18_sum_eltwise <- L3_b18_cbr2_bn_top\nI0821 08:59:39.363657 32364 net.cpp:434] L3_b18_sum_eltwise <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0821 08:59:39.363673 32364 net.cpp:408] L3_b18_sum_eltwise -> L3_b18_sum_eltwise_top\nI0821 08:59:39.363735 32364 net.cpp:150] Setting up L3_b18_sum_eltwise\nI0821 08:59:39.363754 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.363764 32364 net.cpp:165] Memory required for data: 2294170800\nI0821 08:59:39.363773 32364 layer_factory.hpp:77] Creating layer L3_b18_relu\nI0821 08:59:39.363788 32364 net.cpp:100] Creating Layer L3_b18_relu\nI0821 08:59:39.363801 32364 net.cpp:434] L3_b18_relu <- L3_b18_sum_eltwise_top\nI0821 08:59:39.363819 32364 net.cpp:395] L3_b18_relu -> L3_b18_sum_eltwise_top (in-place)\nI0821 08:59:39.363839 32364 net.cpp:150] Setting up L3_b18_relu\nI0821 08:59:39.363854 32364 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:39.363864 32364 net.cpp:165] Memory required for data: 2295809200\nI0821 08:59:39.363874 32364 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:59:39.363890 32364 net.cpp:100] Creating Layer post_pool\nI0821 08:59:39.363903 32364 net.cpp:434] post_pool <- L3_b18_sum_eltwise_top\nI0821 08:59:39.363919 32364 net.cpp:408] post_pool -> post_pool\nI0821 08:59:39.363981 32364 net.cpp:150] Setting up post_pool\nI0821 08:59:39.364007 32364 net.cpp:157] Top shape: 100 64 1 1 (6400)\nI0821 08:59:39.364019 32364 net.cpp:165] Memory required for data: 2295834800\nI0821 08:59:39.364029 32364 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:59:39.364049 32364 net.cpp:100] Creating Layer post_FC\nI0821 08:59:39.364063 32364 net.cpp:434] post_FC <- post_pool\nI0821 08:59:39.364082 32364 net.cpp:408] post_FC -> post_FC_top\nI0821 08:59:39.364310 32364 net.cpp:150] Setting up post_FC\nI0821 08:59:39.364333 32364 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:39.364343 32364 net.cpp:165] Memory required for data: 2295838800\nI0821 08:59:39.364362 32364 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:59:39.364377 32364 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:59:39.364387 32364 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:59:39.364413 32364 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:59:39.364434 32364 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:59:39.364532 32364 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:59:39.364555 32364 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:39.364568 32364 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:39.364578 32364 net.cpp:165] Memory required for data: 2295846800\nI0821 08:59:39.364588 32364 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:59:39.364603 32364 net.cpp:100] Creating Layer accuracy\nI0821 08:59:39.364616 32364 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:59:39.364629 32364 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:59:39.364650 32364 net.cpp:408] accuracy -> accuracy\nI0821 08:59:39.364675 32364 net.cpp:150] Setting up accuracy\nI0821 08:59:39.364689 32364 net.cpp:157] Top shape: (1)\nI0821 08:59:39.364698 32364 net.cpp:165] Memory required for data: 2295846804\nI0821 08:59:39.364708 32364 layer_factory.hpp:77] Creating layer loss\nI0821 08:59:39.364722 32364 net.cpp:100] Creating Layer loss\nI0821 08:59:39.364734 32364 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:59:39.364748 32364 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:59:39.364763 32364 net.cpp:408] loss -> loss\nI0821 08:59:39.364784 32364 layer_factory.hpp:77] Creating layer loss\nI0821 08:59:39.364948 32364 net.cpp:150] Setting up loss\nI0821 08:59:39.364969 32364 net.cpp:157] Top shape: (1)\nI0821 08:59:39.364979 32364 net.cpp:160]     with loss weight 1\nI0821 08:59:39.365002 32364 net.cpp:165] Memory required for data: 2295846808\nI0821 08:59:39.365015 32364 net.cpp:226] loss needs backward computation.\nI0821 08:59:39.365025 32364 net.cpp:228] accuracy does not need backward computation.\nI0821 08:59:39.365036 32364 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:59:39.365046 32364 net.cpp:226] post_FC needs backward computation.\nI0821 08:59:39.365056 32364 net.cpp:226] post_pool needs backward computation.\nI0821 08:59:39.365067 32364 net.cpp:226] L3_b18_relu needs backward computation.\nI0821 08:59:39.365077 32364 net.cpp:226] L3_b18_sum_eltwise needs backward computation.\nI0821 08:59:39.365087 32364 net.cpp:226] L3_b18_cbr2_scale needs backward computation.\nI0821 08:59:39.365097 32364 net.cpp:226] L3_b18_cbr2_bn needs backward computation.\nI0821 08:59:39.365106 32364 net.cpp:226] L3_b18_cbr2_conv needs backward computation.\nI0821 08:59:39.365116 32364 net.cpp:226] L3_b18_cbr1_relu needs backward computation.\nI0821 08:59:39.365125 32364 net.cpp:226] L3_b18_cbr1_scale needs backward computation.\nI0821 08:59:39.365134 32364 net.cpp:226] L3_b18_cbr1_bn needs backward computation.\nI0821 08:59:39.365144 32364 net.cpp:226] L3_b18_cbr1_conv needs backward computation.\nI0821 08:59:39.365162 32364 net.cpp:226] L3_b17_sum_eltwise_top_L3_b17_relu_0_split needs backward computation.\nI0821 08:59:39.365172 32364 net.cpp:226] L3_b17_relu needs backward computation.\nI0821 08:59:39.365182 32364 net.cpp:226] L3_b17_sum_eltwise needs backward computation.\nI0821 08:59:39.365193 32364 net.cpp:226] L3_b17_cbr2_scale needs backward computation.\nI0821 08:59:39.365203 32364 net.cpp:226] L3_b17_cbr2_bn needs backward computation.\nI0821 08:59:39.365213 32364 net.cpp:226] L3_b17_cbr2_conv needs backward computation.\nI0821 08:59:39.365222 32364 net.cpp:226] L3_b17_cbr1_relu needs backward computation.\nI0821 08:59:39.365232 32364 net.cpp:226] L3_b17_cbr1_scale needs backward computation.\nI0821 08:59:39.365242 32364 net.cpp:226] L3_b17_cbr1_bn needs backward computation.\nI0821 08:59:39.365252 32364 net.cpp:226] L3_b17_cbr1_conv needs backward computation.\nI0821 08:59:39.365262 32364 net.cpp:226] L3_b16_sum_eltwise_top_L3_b16_relu_0_split needs backward computation.\nI0821 08:59:39.365273 32364 net.cpp:226] L3_b16_relu needs backward computation.\nI0821 08:59:39.365283 32364 net.cpp:226] L3_b16_sum_eltwise needs backward computation.\nI0821 08:59:39.365294 32364 net.cpp:226] L3_b16_cbr2_scale needs backward computation.\nI0821 08:59:39.365311 32364 net.cpp:226] L3_b16_cbr2_bn needs backward computation.\nI0821 08:59:39.365322 32364 net.cpp:226] L3_b16_cbr2_conv needs backward computation.\nI0821 08:59:39.365331 32364 net.cpp:226] L3_b16_cbr1_relu needs backward computation.\nI0821 08:59:39.365341 32364 net.cpp:226] L3_b16_cbr1_scale needs backward computation.\nI0821 08:59:39.365351 32364 net.cpp:226] L3_b16_cbr1_bn needs backward computation.\nI0821 08:59:39.365361 32364 net.cpp:226] L3_b16_cbr1_conv needs backward computation.\nI0821 08:59:39.365371 32364 net.cpp:226] L3_b15_sum_eltwise_top_L3_b15_relu_0_split needs backward computation.\nI0821 08:59:39.365381 32364 net.cpp:226] L3_b15_relu needs backward computation.\nI0821 08:59:39.365391 32364 net.cpp:226] L3_b15_sum_eltwise needs backward computation.\nI0821 08:59:39.365401 32364 net.cpp:226] L3_b15_cbr2_scale needs backward computation.\nI0821 08:59:39.365411 32364 net.cpp:226] L3_b15_cbr2_bn needs backward computation.\nI0821 08:59:39.365422 32364 net.cpp:226] L3_b15_cbr2_conv needs backward computation.\nI0821 08:59:39.365432 32364 net.cpp:226] L3_b15_cbr1_relu needs backward computation.\nI0821 08:59:39.365440 32364 net.cpp:226] L3_b15_cbr1_scale needs backward computation.\nI0821 08:59:39.365449 32364 net.cpp:226] L3_b15_cbr1_bn needs backward computation.\nI0821 08:59:39.365459 32364 net.cpp:226] L3_b15_cbr1_conv needs backward computation.\nI0821 08:59:39.365469 32364 net.cpp:226] L3_b14_sum_eltwise_top_L3_b14_relu_0_split needs backward computation.\nI0821 08:59:39.365480 32364 net.cpp:226] L3_b14_relu needs backward computation.\nI0821 08:59:39.365490 32364 net.cpp:226] L3_b14_sum_eltwise needs backward computation.\nI0821 08:59:39.365500 32364 net.cpp:226] L3_b14_cbr2_scale needs backward computation.\nI0821 08:59:39.365510 32364 net.cpp:226] L3_b14_cbr2_bn needs backward computation.\nI0821 08:59:39.365521 32364 net.cpp:226] L3_b14_cbr2_conv needs backward computation.\nI0821 08:59:39.365531 32364 net.cpp:226] L3_b14_cbr1_relu needs backward computation.\nI0821 08:59:39.365540 32364 net.cpp:226] L3_b14_cbr1_scale needs backward computation.\nI0821 08:59:39.365550 32364 net.cpp:226] L3_b14_cbr1_bn needs backward computation.\nI0821 08:59:39.365562 32364 net.cpp:226] L3_b14_cbr1_conv needs backward computation.\nI0821 08:59:39.365572 32364 net.cpp:226] L3_b13_sum_eltwise_top_L3_b13_relu_0_split needs backward computation.\nI0821 08:59:39.365582 32364 net.cpp:226] L3_b13_relu needs backward computation.\nI0821 08:59:39.365592 32364 net.cpp:226] L3_b13_sum_eltwise needs backward computation.\nI0821 08:59:39.365602 32364 net.cpp:226] L3_b13_cbr2_scale needs backward computation.\nI0821 08:59:39.365613 32364 net.cpp:226] L3_b13_cbr2_bn needs backward computation.\nI0821 08:59:39.365623 32364 net.cpp:226] L3_b13_cbr2_conv needs backward computation.\nI0821 08:59:39.365633 32364 net.cpp:226] L3_b13_cbr1_relu needs backward computation.\nI0821 08:59:39.365643 32364 net.cpp:226] L3_b13_cbr1_scale needs backward computation.\nI0821 08:59:39.365653 32364 net.cpp:226] L3_b13_cbr1_bn needs backward computation.\nI0821 08:59:39.365662 32364 net.cpp:226] L3_b13_cbr1_conv needs backward computation.\nI0821 08:59:39.365672 32364 net.cpp:226] L3_b12_sum_eltwise_top_L3_b12_relu_0_split needs backward computation.\nI0821 08:59:39.365684 32364 net.cpp:226] L3_b12_relu needs backward computation.\nI0821 08:59:39.365692 32364 net.cpp:226] L3_b12_sum_eltwise needs backward computation.\nI0821 08:59:39.365703 32364 net.cpp:226] L3_b12_cbr2_scale needs backward computation.\nI0821 08:59:39.365713 32364 net.cpp:226] L3_b12_cbr2_bn needs backward computation.\nI0821 08:59:39.365723 32364 net.cpp:226] L3_b12_cbr2_conv needs backward computation.\nI0821 08:59:39.365733 32364 net.cpp:226] L3_b12_cbr1_relu needs backward computation.\nI0821 08:59:39.365742 32364 net.cpp:226] L3_b12_cbr1_scale needs backward computation.\nI0821 08:59:39.365752 32364 net.cpp:226] L3_b12_cbr1_bn needs backward computation.\nI0821 08:59:39.365762 32364 net.cpp:226] L3_b12_cbr1_conv needs backward computation.\nI0821 08:59:39.365782 32364 net.cpp:226] L3_b11_sum_eltwise_top_L3_b11_relu_0_split needs backward computation.\nI0821 08:59:39.365794 32364 net.cpp:226] L3_b11_relu needs backward computation.\nI0821 08:59:39.365804 32364 net.cpp:226] L3_b11_sum_eltwise needs backward computation.\nI0821 08:59:39.365819 32364 net.cpp:226] L3_b11_cbr2_scale needs backward computation.\nI0821 08:59:39.365831 32364 net.cpp:226] L3_b11_cbr2_bn needs backward computation.\nI0821 08:59:39.365841 32364 net.cpp:226] L3_b11_cbr2_conv needs backward computation.\nI0821 08:59:39.365851 32364 net.cpp:226] L3_b11_cbr1_relu needs backward computation.\nI0821 08:59:39.365861 32364 net.cpp:226] L3_b11_cbr1_scale needs backward computation.\nI0821 08:59:39.365871 32364 net.cpp:226] L3_b11_cbr1_bn needs backward computation.\nI0821 08:59:39.365881 32364 net.cpp:226] L3_b11_cbr1_conv needs backward computation.\nI0821 08:59:39.365892 32364 net.cpp:226] L3_b10_sum_eltwise_top_L3_b10_relu_0_split needs backward computation.\nI0821 08:59:39.365902 32364 net.cpp:226] L3_b10_relu needs backward computation.\nI0821 08:59:39.365912 32364 net.cpp:226] L3_b10_sum_eltwise needs backward computation.\nI0821 08:59:39.365924 32364 net.cpp:226] L3_b10_cbr2_scale needs backward computation.\nI0821 08:59:39.365934 32364 net.cpp:226] L3_b10_cbr2_bn needs backward computation.\nI0821 08:59:39.365945 32364 net.cpp:226] L3_b10_cbr2_conv needs backward computation.\nI0821 08:59:39.365957 32364 net.cpp:226] L3_b10_cbr1_relu needs backward computation.\nI0821 08:59:39.365967 32364 net.cpp:226] L3_b10_cbr1_scale needs backward computation.\nI0821 08:59:39.365977 32364 net.cpp:226] L3_b10_cbr1_bn needs backward computation.\nI0821 08:59:39.365988 32364 net.cpp:226] L3_b10_cbr1_conv needs backward computation.\nI0821 08:59:39.365998 32364 net.cpp:226] L3_b9_sum_eltwise_top_L3_b9_relu_0_split needs backward computation.\nI0821 08:59:39.366008 32364 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:59:39.366019 32364 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:59:39.366030 32364 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:59:39.366039 32364 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:59:39.366050 32364 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:59:39.366060 32364 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:59:39.366070 32364 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:59:39.366078 32364 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:59:39.366089 32364 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:59:39.366101 32364 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:59:39.366111 32364 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:59:39.366119 32364 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:59:39.366130 32364 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:59:39.366140 32364 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:59:39.366158 32364 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:59:39.366169 32364 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:59:39.366178 32364 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:59:39.366189 32364 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:59:39.366199 32364 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:59:39.366209 32364 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:59:39.366220 32364 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:59:39.366230 32364 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:59:39.366241 32364 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:59:39.366251 32364 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:59:39.366262 32364 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:59:39.366272 32364 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:59:39.366288 32364 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:59:39.366299 32364 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:59:39.366310 32364 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:59:39.366322 32364 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:59:39.366333 32364 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:59:39.366343 32364 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:59:39.366353 32364 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:59:39.366369 32364 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:59:39.366380 32364 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:59:39.366390 32364 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:59:39.366400 32364 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:59:39.366410 32364 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:59:39.366420 32364 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:59:39.366431 32364 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:59:39.366441 32364 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:59:39.366452 32364 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:59:39.366463 32364 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:59:39.366473 32364 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:59:39.366483 32364 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:59:39.366494 32364 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:59:39.366504 32364 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:59:39.366513 32364 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:59:39.366523 32364 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:59:39.366534 32364 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:59:39.366545 32364 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:59:39.366555 32364 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:59:39.366566 32364 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:59:39.366577 32364 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:59:39.366588 32364 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:59:39.366598 32364 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:59:39.366608 32364 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:59:39.366618 32364 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:59:39.366628 32364 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:59:39.366639 32364 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:59:39.366650 32364 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:59:39.366662 32364 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:59:39.366672 32364 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:59:39.366683 32364 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:59:39.366694 32364 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:59:39.366705 32364 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:59:39.366714 32364 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:59:39.366724 32364 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:59:39.366735 32364 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:59:39.366746 32364 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:59:39.366756 32364 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:59:39.366767 32364 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:59:39.366780 32364 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:59:39.366798 32364 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:59:39.366809 32364 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:59:39.366821 32364 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:59:39.366830 32364 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:59:39.366840 32364 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:59:39.366852 32364 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:59:39.366861 32364 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:59:39.366873 32364 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:59:39.366884 32364 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:59:39.366894 32364 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:59:39.366905 32364 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:59:39.366916 32364 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:59:39.366928 32364 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:59:39.366938 32364 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:59:39.366950 32364 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:59:39.366961 32364 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:59:39.366971 32364 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:59:39.366982 32364 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:59:39.366993 32364 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:59:39.367003 32364 net.cpp:226] L2_b18_sum_eltwise_top_L2_b18_relu_0_split needs backward computation.\nI0821 08:59:39.367015 32364 net.cpp:226] L2_b18_relu needs backward computation.\nI0821 08:59:39.367025 32364 net.cpp:226] L2_b18_sum_eltwise needs backward computation.\nI0821 08:59:39.367035 32364 net.cpp:226] L2_b18_cbr2_scale needs backward computation.\nI0821 08:59:39.367045 32364 net.cpp:226] L2_b18_cbr2_bn needs backward computation.\nI0821 08:59:39.367056 32364 net.cpp:226] L2_b18_cbr2_conv needs backward computation.\nI0821 08:59:39.367067 32364 net.cpp:226] L2_b18_cbr1_relu needs backward computation.\nI0821 08:59:39.367077 32364 net.cpp:226] L2_b18_cbr1_scale needs backward computation.\nI0821 08:59:39.367087 32364 net.cpp:226] L2_b18_cbr1_bn needs backward computation.\nI0821 08:59:39.367103 32364 net.cpp:226] L2_b18_cbr1_conv needs backward computation.\nI0821 08:59:39.367115 32364 net.cpp:226] L2_b17_sum_eltwise_top_L2_b17_relu_0_split needs backward computation.\nI0821 08:59:39.367125 32364 net.cpp:226] L2_b17_relu needs backward computation.\nI0821 08:59:39.367136 32364 net.cpp:226] L2_b17_sum_eltwise needs backward computation.\nI0821 08:59:39.367156 32364 net.cpp:226] L2_b17_cbr2_scale needs backward computation.\nI0821 08:59:39.367167 32364 net.cpp:226] L2_b17_cbr2_bn needs backward computation.\nI0821 08:59:39.367179 32364 net.cpp:226] L2_b17_cbr2_conv needs backward computation.\nI0821 08:59:39.367190 32364 net.cpp:226] L2_b17_cbr1_relu needs backward computation.\nI0821 08:59:39.367200 32364 net.cpp:226] L2_b17_cbr1_scale needs backward computation.\nI0821 08:59:39.367210 32364 net.cpp:226] L2_b17_cbr1_bn needs backward computation.\nI0821 08:59:39.367223 32364 net.cpp:226] L2_b17_cbr1_conv needs backward computation.\nI0821 08:59:39.367233 32364 net.cpp:226] L2_b16_sum_eltwise_top_L2_b16_relu_0_split needs backward computation.\nI0821 08:59:39.367244 32364 net.cpp:226] L2_b16_relu needs backward computation.\nI0821 08:59:39.367254 32364 net.cpp:226] L2_b16_sum_eltwise needs backward computation.\nI0821 08:59:39.367265 32364 net.cpp:226] L2_b16_cbr2_scale needs backward computation.\nI0821 08:59:39.367276 32364 net.cpp:226] L2_b16_cbr2_bn needs backward computation.\nI0821 08:59:39.367286 32364 net.cpp:226] L2_b16_cbr2_conv needs backward computation.\nI0821 08:59:39.367297 32364 net.cpp:226] L2_b16_cbr1_relu needs backward computation.\nI0821 08:59:39.367308 32364 net.cpp:226] L2_b16_cbr1_scale needs backward computation.\nI0821 08:59:39.367327 32364 net.cpp:226] L2_b16_cbr1_bn needs backward computation.\nI0821 08:59:39.367339 32364 net.cpp:226] L2_b16_cbr1_conv needs backward computation.\nI0821 08:59:39.367352 32364 net.cpp:226] L2_b15_sum_eltwise_top_L2_b15_relu_0_split needs backward computation.\nI0821 08:59:39.367363 32364 net.cpp:226] L2_b15_relu needs backward computation.\nI0821 08:59:39.367373 32364 net.cpp:226] L2_b15_sum_eltwise needs backward computation.\nI0821 08:59:39.367384 32364 net.cpp:226] L2_b15_cbr2_scale needs backward computation.\nI0821 08:59:39.367394 32364 net.cpp:226] L2_b15_cbr2_bn needs backward computation.\nI0821 08:59:39.367405 32364 net.cpp:226] L2_b15_cbr2_conv needs backward computation.\nI0821 08:59:39.367416 32364 net.cpp:226] L2_b15_cbr1_relu needs backward computation.\nI0821 08:59:39.367425 32364 net.cpp:226] L2_b15_cbr1_scale needs backward computation.\nI0821 08:59:39.367436 32364 net.cpp:226] L2_b15_cbr1_bn needs backward computation.\nI0821 08:59:39.367447 32364 net.cpp:226] L2_b15_cbr1_conv needs backward computation.\nI0821 08:59:39.367458 32364 net.cpp:226] L2_b14_sum_eltwise_top_L2_b14_relu_0_split needs backward computation.\nI0821 08:59:39.367470 32364 net.cpp:226] L2_b14_relu needs backward computation.\nI0821 08:59:39.367480 32364 net.cpp:226] L2_b14_sum_eltwise needs backward computation.\nI0821 08:59:39.367492 32364 net.cpp:226] L2_b14_cbr2_scale needs backward computation.\nI0821 08:59:39.367503 32364 net.cpp:226] L2_b14_cbr2_bn needs backward computation.\nI0821 08:59:39.367513 32364 net.cpp:226] L2_b14_cbr2_conv needs backward computation.\nI0821 08:59:39.367525 32364 net.cpp:226] L2_b14_cbr1_relu needs backward computation.\nI0821 08:59:39.367537 32364 net.cpp:226] L2_b14_cbr1_scale needs backward computation.\nI0821 08:59:39.367547 32364 net.cpp:226] L2_b14_cbr1_bn needs backward computation.\nI0821 08:59:39.367558 32364 net.cpp:226] L2_b14_cbr1_conv needs backward computation.\nI0821 08:59:39.367568 32364 net.cpp:226] L2_b13_sum_eltwise_top_L2_b13_relu_0_split needs backward computation.\nI0821 08:59:39.367579 32364 net.cpp:226] L2_b13_relu needs backward computation.\nI0821 08:59:39.367589 32364 net.cpp:226] L2_b13_sum_eltwise needs backward computation.\nI0821 08:59:39.367601 32364 net.cpp:226] L2_b13_cbr2_scale needs backward computation.\nI0821 08:59:39.367612 32364 net.cpp:226] L2_b13_cbr2_bn needs backward computation.\nI0821 08:59:39.367624 32364 net.cpp:226] L2_b13_cbr2_conv needs backward computation.\nI0821 08:59:39.367633 32364 net.cpp:226] L2_b13_cbr1_relu needs backward computation.\nI0821 08:59:39.367645 32364 net.cpp:226] L2_b13_cbr1_scale needs backward computation.\nI0821 08:59:39.367655 32364 net.cpp:226] L2_b13_cbr1_bn needs backward computation.\nI0821 08:59:39.367666 32364 net.cpp:226] L2_b13_cbr1_conv needs backward computation.\nI0821 08:59:39.367677 32364 net.cpp:226] L2_b12_sum_eltwise_top_L2_b12_relu_0_split needs backward computation.\nI0821 08:59:39.367688 32364 net.cpp:226] L2_b12_relu needs backward computation.\nI0821 08:59:39.367700 32364 net.cpp:226] L2_b12_sum_eltwise needs backward computation.\nI0821 08:59:39.367712 32364 net.cpp:226] L2_b12_cbr2_scale needs backward computation.\nI0821 08:59:39.367722 32364 net.cpp:226] L2_b12_cbr2_bn needs backward computation.\nI0821 08:59:39.367734 32364 net.cpp:226] L2_b12_cbr2_conv needs backward computation.\nI0821 08:59:39.367746 32364 net.cpp:226] L2_b12_cbr1_relu needs backward computation.\nI0821 08:59:39.367756 32364 net.cpp:226] L2_b12_cbr1_scale needs backward computation.\nI0821 08:59:39.367768 32364 net.cpp:226] L2_b12_cbr1_bn needs backward computation.\nI0821 08:59:39.367779 32364 net.cpp:226] L2_b12_cbr1_conv needs backward computation.\nI0821 08:59:39.367789 32364 net.cpp:226] L2_b11_sum_eltwise_top_L2_b11_relu_0_split needs backward computation.\nI0821 08:59:39.367800 32364 net.cpp:226] L2_b11_relu needs backward computation.\nI0821 08:59:39.367811 32364 net.cpp:226] L2_b11_sum_eltwise needs backward computation.\nI0821 08:59:39.367823 32364 net.cpp:226] L2_b11_cbr2_scale needs backward computation.\nI0821 08:59:39.367844 32364 net.cpp:226] L2_b11_cbr2_bn needs backward computation.\nI0821 08:59:39.367856 32364 net.cpp:226] L2_b11_cbr2_conv needs backward computation.\nI0821 08:59:39.367868 32364 net.cpp:226] L2_b11_cbr1_relu needs backward computation.\nI0821 08:59:39.367879 32364 net.cpp:226] L2_b11_cbr1_scale needs backward computation.\nI0821 08:59:39.367890 32364 net.cpp:226] L2_b11_cbr1_bn needs backward computation.\nI0821 08:59:39.367900 32364 net.cpp:226] L2_b11_cbr1_conv needs backward computation.\nI0821 08:59:39.367911 32364 net.cpp:226] L2_b10_sum_eltwise_top_L2_b10_relu_0_split needs backward computation.\nI0821 08:59:39.367923 32364 net.cpp:226] L2_b10_relu needs backward computation.\nI0821 08:59:39.367933 32364 net.cpp:226] L2_b10_sum_eltwise needs backward computation.\nI0821 08:59:39.367945 32364 net.cpp:226] L2_b10_cbr2_scale needs backward computation.\nI0821 08:59:39.367955 32364 net.cpp:226] L2_b10_cbr2_bn needs backward computation.\nI0821 08:59:39.367972 32364 net.cpp:226] L2_b10_cbr2_conv needs backward computation.\nI0821 08:59:39.367982 32364 net.cpp:226] L2_b10_cbr1_relu needs backward computation.\nI0821 08:59:39.367991 32364 net.cpp:226] L2_b10_cbr1_scale needs backward computation.\nI0821 08:59:39.367997 32364 net.cpp:226] L2_b10_cbr1_bn needs backward computation.\nI0821 08:59:39.368003 32364 net.cpp:226] L2_b10_cbr1_conv needs backward computation.\nI0821 08:59:39.368010 32364 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:59:39.368016 32364 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:59:39.368021 32364 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:59:39.368027 32364 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:59:39.368032 32364 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:59:39.368038 32364 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:59:39.368043 32364 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:59:39.368049 32364 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:59:39.368054 32364 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:59:39.368060 32364 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:59:39.368067 32364 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:59:39.368073 32364 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:59:39.368079 32364 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:59:39.368085 32364 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:59:39.368091 32364 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:59:39.368096 32364 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:59:39.368103 32364 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:59:39.368108 32364 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:59:39.368113 32364 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:59:39.368119 32364 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:59:39.368126 32364 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:59:39.368131 32364 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:59:39.368137 32364 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:59:39.368144 32364 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:59:39.368155 32364 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:59:39.368160 32364 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:59:39.368166 32364 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:59:39.368173 32364 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:59:39.368178 32364 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:59:39.368185 32364 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:59:39.368191 32364 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:59:39.368204 32364 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:59:39.368211 32364 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:59:39.368217 32364 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:59:39.368222 32364 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:59:39.368228 32364 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:59:39.368234 32364 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:59:39.368239 32364 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:59:39.368245 32364 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:59:39.368250 32364 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:59:39.368257 32364 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:59:39.368265 32364 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:59:39.368271 32364 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:59:39.368278 32364 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:59:39.368283 32364 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:59:39.368289 32364 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:59:39.368295 32364 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:59:39.368301 32364 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:59:39.368306 32364 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:59:39.368312 32364 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:59:39.368319 32364 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:59:39.368324 32364 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:59:39.368330 32364 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:59:39.368336 32364 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:59:39.368342 32364 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:59:39.368347 32364 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:59:39.368353 32364 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:59:39.368358 32364 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:59:39.368365 32364 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:59:39.368369 32364 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:59:39.368376 32364 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:59:39.368381 32364 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:59:39.368386 32364 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:59:39.368393 32364 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:59:39.368398 32364 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:59:39.368404 32364 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:59:39.368410 32364 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:59:39.368415 32364 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:59:39.368422 32364 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:59:39.368427 32364 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:59:39.368432 32364 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:59:39.368438 32364 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:59:39.368444 32364 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:59:39.368450 32364 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:59:39.368456 32364 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:59:39.368461 32364 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:59:39.368468 32364 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:59:39.368472 32364 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:59:39.368482 32364 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:59:39.368489 32364 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:59:39.368495 32364 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:59:39.368501 32364 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:59:39.368508 32364 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:59:39.368513 32364 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:59:39.368518 32364 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:59:39.368525 32364 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:59:39.368531 32364 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:59:39.368537 32364 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:59:39.368543 32364 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:59:39.368549 32364 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:59:39.368554 32364 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:59:39.368559 32364 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:59:39.368566 32364 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:59:39.368571 32364 net.cpp:226] L1_b18_sum_eltwise_top_L1_b18_relu_0_split needs backward computation.\nI0821 08:59:39.368577 32364 net.cpp:226] L1_b18_relu needs backward computation.\nI0821 08:59:39.368583 32364 net.cpp:226] L1_b18_sum_eltwise needs backward computation.\nI0821 08:59:39.368589 32364 net.cpp:226] L1_b18_cbr2_scale needs backward computation.\nI0821 08:59:39.368594 32364 net.cpp:226] L1_b18_cbr2_bn needs backward computation.\nI0821 08:59:39.368600 32364 net.cpp:226] L1_b18_cbr2_conv needs backward computation.\nI0821 08:59:39.368605 32364 net.cpp:226] L1_b18_cbr1_relu needs backward computation.\nI0821 08:59:39.368612 32364 net.cpp:226] L1_b18_cbr1_scale needs backward computation.\nI0821 08:59:39.368616 32364 net.cpp:226] L1_b18_cbr1_bn needs backward computation.\nI0821 08:59:39.368623 32364 net.cpp:226] L1_b18_cbr1_conv needs backward computation.\nI0821 08:59:39.368628 32364 net.cpp:226] L1_b17_sum_eltwise_top_L1_b17_relu_0_split needs backward computation.\nI0821 08:59:39.368633 32364 net.cpp:226] L1_b17_relu needs backward computation.\nI0821 08:59:39.368638 32364 net.cpp:226] L1_b17_sum_eltwise needs backward computation.\nI0821 08:59:39.368645 32364 net.cpp:226] L1_b17_cbr2_scale needs backward computation.\nI0821 08:59:39.368650 32364 net.cpp:226] L1_b17_cbr2_bn needs backward computation.\nI0821 08:59:39.368656 32364 net.cpp:226] L1_b17_cbr2_conv needs backward computation.\nI0821 08:59:39.368661 32364 net.cpp:226] L1_b17_cbr1_relu needs backward computation.\nI0821 08:59:39.368667 32364 net.cpp:226] L1_b17_cbr1_scale needs backward computation.\nI0821 08:59:39.368672 32364 net.cpp:226] L1_b17_cbr1_bn needs backward computation.\nI0821 08:59:39.368679 32364 net.cpp:226] L1_b17_cbr1_conv needs backward computation.\nI0821 08:59:39.368685 32364 net.cpp:226] L1_b16_sum_eltwise_top_L1_b16_relu_0_split needs backward computation.\nI0821 08:59:39.368690 32364 net.cpp:226] L1_b16_relu needs backward computation.\nI0821 08:59:39.368695 32364 net.cpp:226] L1_b16_sum_eltwise needs backward computation.\nI0821 08:59:39.368701 32364 net.cpp:226] L1_b16_cbr2_scale needs backward computation.\nI0821 08:59:39.368707 32364 net.cpp:226] L1_b16_cbr2_bn needs backward computation.\nI0821 08:59:39.368713 32364 net.cpp:226] L1_b16_cbr2_conv needs backward computation.\nI0821 08:59:39.368718 32364 net.cpp:226] L1_b16_cbr1_relu needs backward computation.\nI0821 08:59:39.368723 32364 net.cpp:226] L1_b16_cbr1_scale needs backward computation.\nI0821 08:59:39.368729 32364 net.cpp:226] L1_b16_cbr1_bn needs backward computation.\nI0821 08:59:39.368734 32364 net.cpp:226] L1_b16_cbr1_conv needs backward computation.\nI0821 08:59:39.368741 32364 net.cpp:226] L1_b15_sum_eltwise_top_L1_b15_relu_0_split needs backward computation.\nI0821 08:59:39.368746 32364 net.cpp:226] L1_b15_relu needs backward computation.\nI0821 08:59:39.368757 32364 net.cpp:226] L1_b15_sum_eltwise needs backward computation.\nI0821 08:59:39.368763 32364 net.cpp:226] L1_b15_cbr2_scale needs backward computation.\nI0821 08:59:39.368769 32364 net.cpp:226] L1_b15_cbr2_bn needs backward computation.\nI0821 08:59:39.368774 32364 net.cpp:226] L1_b15_cbr2_conv needs backward computation.\nI0821 08:59:39.368780 32364 net.cpp:226] L1_b15_cbr1_relu needs backward computation.\nI0821 08:59:39.368785 32364 net.cpp:226] L1_b15_cbr1_scale needs backward computation.\nI0821 08:59:39.368790 32364 net.cpp:226] L1_b15_cbr1_bn needs backward computation.\nI0821 08:59:39.368796 32364 net.cpp:226] L1_b15_cbr1_conv needs backward computation.\nI0821 08:59:39.368803 32364 net.cpp:226] L1_b14_sum_eltwise_top_L1_b14_relu_0_split needs backward computation.\nI0821 08:59:39.368808 32364 net.cpp:226] L1_b14_relu needs backward computation.\nI0821 08:59:39.368813 32364 net.cpp:226] L1_b14_sum_eltwise needs backward computation.\nI0821 08:59:39.368819 32364 net.cpp:226] L1_b14_cbr2_scale needs backward computation.\nI0821 08:59:39.368825 32364 net.cpp:226] L1_b14_cbr2_bn needs backward computation.\nI0821 08:59:39.368831 32364 net.cpp:226] L1_b14_cbr2_conv needs backward computation.\nI0821 08:59:39.368836 32364 net.cpp:226] L1_b14_cbr1_relu needs backward computation.\nI0821 08:59:39.368842 32364 net.cpp:226] L1_b14_cbr1_scale needs backward computation.\nI0821 08:59:39.368847 32364 net.cpp:226] L1_b14_cbr1_bn needs backward computation.\nI0821 08:59:39.368854 32364 net.cpp:226] L1_b14_cbr1_conv needs backward computation.\nI0821 08:59:39.368858 32364 net.cpp:226] L1_b13_sum_eltwise_top_L1_b13_relu_0_split needs backward computation.\nI0821 08:59:39.368865 32364 net.cpp:226] L1_b13_relu needs backward computation.\nI0821 08:59:39.368870 32364 net.cpp:226] L1_b13_sum_eltwise needs backward computation.\nI0821 08:59:39.368876 32364 net.cpp:226] L1_b13_cbr2_scale needs backward computation.\nI0821 08:59:39.368882 32364 net.cpp:226] L1_b13_cbr2_bn needs backward computation.\nI0821 08:59:39.368887 32364 net.cpp:226] L1_b13_cbr2_conv needs backward computation.\nI0821 08:59:39.368893 32364 net.cpp:226] L1_b13_cbr1_relu needs backward computation.\nI0821 08:59:39.368898 32364 net.cpp:226] L1_b13_cbr1_scale needs backward computation.\nI0821 08:59:39.368903 32364 net.cpp:226] L1_b13_cbr1_bn needs backward computation.\nI0821 08:59:39.368909 32364 net.cpp:226] L1_b13_cbr1_conv needs backward computation.\nI0821 08:59:39.368916 32364 net.cpp:226] L1_b12_sum_eltwise_top_L1_b12_relu_0_split needs backward computation.\nI0821 08:59:39.368921 32364 net.cpp:226] L1_b12_relu needs backward computation.\nI0821 08:59:39.368927 32364 net.cpp:226] L1_b12_sum_eltwise needs backward computation.\nI0821 08:59:39.368932 32364 net.cpp:226] L1_b12_cbr2_scale needs backward computation.\nI0821 08:59:39.368938 32364 net.cpp:226] L1_b12_cbr2_bn needs backward computation.\nI0821 08:59:39.368944 32364 net.cpp:226] L1_b12_cbr2_conv needs backward computation.\nI0821 08:59:39.368952 32364 net.cpp:226] L1_b12_cbr1_relu needs backward computation.\nI0821 08:59:39.368958 32364 net.cpp:226] L1_b12_cbr1_scale needs backward computation.\nI0821 08:59:39.368964 32364 net.cpp:226] L1_b12_cbr1_bn needs backward computation.\nI0821 08:59:39.368970 32364 net.cpp:226] L1_b12_cbr1_conv needs backward computation.\nI0821 08:59:39.368976 32364 net.cpp:226] L1_b11_sum_eltwise_top_L1_b11_relu_0_split needs backward computation.\nI0821 08:59:39.368983 32364 net.cpp:226] L1_b11_relu needs backward computation.\nI0821 08:59:39.368988 32364 net.cpp:226] L1_b11_sum_eltwise needs backward computation.\nI0821 08:59:39.368994 32364 net.cpp:226] L1_b11_cbr2_scale needs backward computation.\nI0821 08:59:39.369000 32364 net.cpp:226] L1_b11_cbr2_bn needs backward computation.\nI0821 08:59:39.369006 32364 net.cpp:226] L1_b11_cbr2_conv needs backward computation.\nI0821 08:59:39.369011 32364 net.cpp:226] L1_b11_cbr1_relu needs backward computation.\nI0821 08:59:39.369017 32364 net.cpp:226] L1_b11_cbr1_scale needs backward computation.\nI0821 08:59:39.369024 32364 net.cpp:226] L1_b11_cbr1_bn needs backward computation.\nI0821 08:59:39.369033 32364 net.cpp:226] L1_b11_cbr1_conv needs backward computation.\nI0821 08:59:39.369040 32364 net.cpp:226] L1_b10_sum_eltwise_top_L1_b10_relu_0_split needs backward computation.\nI0821 08:59:39.369045 32364 net.cpp:226] L1_b10_relu needs backward computation.\nI0821 08:59:39.369051 32364 net.cpp:226] L1_b10_sum_eltwise needs backward computation.\nI0821 08:59:39.369058 32364 net.cpp:226] L1_b10_cbr2_scale needs backward computation.\nI0821 08:59:39.369063 32364 net.cpp:226] L1_b10_cbr2_bn needs backward computation.\nI0821 08:59:39.369069 32364 net.cpp:226] L1_b10_cbr2_conv needs backward computation.\nI0821 08:59:39.369076 32364 net.cpp:226] L1_b10_cbr1_relu needs backward computation.\nI0821 08:59:39.369081 32364 net.cpp:226] L1_b10_cbr1_scale needs backward computation.\nI0821 08:59:39.369087 32364 net.cpp:226] L1_b10_cbr1_bn needs backward computation.\nI0821 08:59:39.369093 32364 net.cpp:226] L1_b10_cbr1_conv needs backward computation.\nI0821 08:59:39.369098 32364 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:59:39.369104 32364 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:59:39.369110 32364 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:59:39.369117 32364 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:59:39.369122 32364 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:59:39.369128 32364 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:59:39.369134 32364 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:59:39.369139 32364 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:59:39.369150 32364 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:59:39.369158 32364 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:59:39.369164 32364 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:59:39.369170 32364 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:59:39.369175 32364 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:59:39.369182 32364 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:59:39.369189 32364 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:59:39.369194 32364 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:59:39.369200 32364 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:59:39.369205 32364 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:59:39.369211 32364 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:59:39.369217 32364 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:59:39.369223 32364 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:59:39.369228 32364 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:59:39.369235 32364 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:59:39.369241 32364 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:59:39.369247 32364 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:59:39.369253 32364 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:59:39.369259 32364 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:59:39.369266 32364 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:59:39.369271 32364 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:59:39.369277 32364 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:59:39.369283 32364 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:59:39.369289 32364 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:59:39.369294 32364 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:59:39.369302 32364 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:59:39.369307 32364 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:59:39.369313 32364 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:59:39.369324 32364 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:59:39.369330 32364 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:59:39.369336 32364 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:59:39.369341 32364 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:59:39.369349 32364 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:59:39.369354 32364 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:59:39.369359 32364 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:59:39.369366 32364 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:59:39.369372 32364 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:59:39.369379 32364 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:59:39.369384 32364 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:59:39.369390 32364 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:59:39.369395 32364 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:59:39.369401 32364 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:59:39.369407 32364 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:59:39.369413 32364 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:59:39.369419 32364 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:59:39.369426 32364 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:59:39.369431 32364 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:59:39.369437 32364 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:59:39.369442 32364 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:59:39.369448 32364 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:59:39.369453 32364 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:59:39.369460 32364 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:59:39.369467 32364 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:59:39.369472 32364 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:59:39.369477 32364 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:59:39.369484 32364 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:59:39.369490 32364 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:59:39.369496 32364 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:59:39.369503 32364 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:59:39.369508 32364 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:59:39.369513 32364 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:59:39.369519 32364 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:59:39.369525 32364 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:59:39.369531 32364 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:59:39.369536 32364 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:59:39.369544 32364 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:59:39.369549 32364 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:59:39.369555 32364 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:59:39.369561 32364 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:59:39.369566 32364 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:59:39.369572 32364 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:59:39.369578 32364 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:59:39.369585 32364 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:59:39.369590 32364 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:59:39.369596 32364 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:59:39.369607 32364 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:59:39.369613 32364 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:59:39.369619 32364 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:59:39.369626 32364 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:59:39.369632 32364 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:59:39.369637 32364 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:59:39.369642 32364 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:59:39.369648 32364 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:59:39.369653 32364 net.cpp:226] pre_relu needs backward computation.\nI0821 08:59:39.369658 32364 net.cpp:226] pre_scale needs backward computation.\nI0821 08:59:39.369663 32364 net.cpp:226] pre_bn needs backward computation.\nI0821 08:59:39.369669 32364 net.cpp:226] pre_conv needs backward computation.\nI0821 08:59:39.369676 32364 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:59:39.369685 32364 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:59:39.369690 32364 net.cpp:270] This network produces output accuracy\nI0821 08:59:39.369698 32364 net.cpp:270] This network produces output loss\nI0821 08:59:39.370396 32364 net.cpp:283] Network initialization done.\nI0821 08:59:39.372547 32364 solver.cpp:60] Solver scaffolding done.\nI0821 08:59:39.621337 32364 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 08:59:40.072688 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:40.072791 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:40.082258 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:40.336798 32364 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:40.336923 32364 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:40.408972 32364 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:40.409088 32364 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:41.029752 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:41.029824 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:41.040565 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:41.340577 32364 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:41.340701 32364 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:41.450577 32364 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:41.450688 32364 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:42.184469 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:42.184561 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:42.196436 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:42.546586 32364 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:42.546772 32364 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:42.698551 32364 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:42.698698 32364 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:42.876744 32364 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 08:59:43.556486 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:43.556612 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:43.568912 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:43.958142 32364 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:43.958366 32364 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:44.151401 32364 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:44.151623 32364 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:45.152489 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:45.152580 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:45.165514 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:45.598814 32364 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:45.599057 32364 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:45.837128 32364 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:45.837373 32364 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:46.963243 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:46.963330 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:46.977489 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:47.461954 32364 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:47.462229 32364 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:47.741564 32364 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:47.741827 32364 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:48.989050 32364 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:48.989138 32364 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:49.003757 32364 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:49.066388 32391 blocking_queue.cpp:50] Waiting for data\nI0821 08:59:49.135507 32388 blocking_queue.cpp:50] Waiting for data\nI0821 08:59:49.591724 32364 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:49.591987 32364 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:49.912367 32364 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:49.912621 32364 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:50.272851 32364 parallel.cpp:425] Starting Optimization\nI0821 08:59:50.274785 32364 solver.cpp:279] Solving Cifar-Resnet\nI0821 08:59:50.274799 32364 solver.cpp:280] Learning Rate Policy: triangular\nI0821 08:59:50.282449 32364 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 09:01:59.067268 32364 solver.cpp:404]     Test net output #0: accuracy = 0.1\nI0821 09:01:59.067598 32364 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 09:02:06.431232 32364 solver.cpp:228] Iteration 0, loss = 5.86725\nI0821 09:02:06.431291 32364 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI0821 09:02:06.431311 32364 solver.cpp:244]     Train net output #1: loss = 5.86725 (* 1 = 5.86725 loss)\nI0821 09:02:06.431519 32364 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0821 09:05:44.884516 32364 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 09:07:54.579848 32364 solver.cpp:404]     Test net output #0: accuracy = 0.116\nI0821 09:07:54.580252 32364 solver.cpp:404]     Test net output #1: loss = 2.26961 (* 1 = 2.26961 loss)\nI0821 09:07:56.683547 32364 solver.cpp:228] Iteration 100, loss = 2.25762\nI0821 09:07:56.683593 32364 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 09:07:56.683610 32364 solver.cpp:244]     Train net output #1: loss = 2.25762 (* 1 = 2.25762 loss)\nI0821 09:07:56.803390 32364 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0821 09:11:35.007571 32364 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 09:13:44.848976 32364 solver.cpp:404]     Test net output #0: accuracy = 0.1894\nI0821 09:13:44.849351 32364 solver.cpp:404]     Test net output #1: loss = 2.24083 (* 1 = 2.24083 loss)\nI0821 09:13:46.952831 32364 solver.cpp:228] Iteration 200, loss = 1.91815\nI0821 09:13:46.952877 32364 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI0821 09:13:46.952893 32364 solver.cpp:244]     Train net output #1: loss = 1.91815 (* 1 = 1.91815 loss)\nI0821 09:13:47.074394 32364 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0821 09:17:25.462072 32364 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 09:19:35.293637 32364 solver.cpp:404]     Test net output #0: accuracy = 0.3558\nI0821 09:19:35.294004 32364 solver.cpp:404]     Test net output #1: loss = 1.76167 (* 1 = 1.76167 loss)\nI0821 09:19:37.397593 32364 solver.cpp:228] Iteration 300, loss = 1.60256\nI0821 09:19:37.397637 32364 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI0821 09:19:37.397655 32364 solver.cpp:244]     Train net output #1: loss = 1.60256 (* 1 = 1.60256 loss)\nI0821 09:19:37.521528 32364 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0821 09:23:15.958148 32364 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 09:25:25.803732 32364 solver.cpp:404]     Test net output #0: accuracy = 0.4459\nI0821 09:25:25.804127 32364 solver.cpp:404]     Test net output #1: loss = 1.50231 (* 1 = 1.50231 loss)\nI0821 09:25:27.907744 32364 solver.cpp:228] Iteration 400, loss = 1.3597\nI0821 09:25:27.907793 32364 solver.cpp:244]     Train net output #0: accuracy = 0.5\nI0821 09:25:27.907809 32364 solver.cpp:244]     Train net output #1: loss = 1.3597 (* 1 = 1.3597 loss)\nI0821 09:25:28.017606 32364 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0821 09:29:06.155242 32364 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 09:31:16.013732 32364 solver.cpp:404]     Test net output #0: accuracy = 0.5112\nI0821 09:31:16.014114 32364 solver.cpp:404]     Test net output #1: loss = 1.45944 (* 1 = 1.45944 loss)\nI0821 09:31:18.118621 32364 solver.cpp:228] Iteration 500, loss = 1.05255\nI0821 09:31:18.118666 32364 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0821 09:31:18.118685 32364 solver.cpp:244]     Train net output #1: loss = 1.05255 (* 1 = 1.05255 loss)\nI0821 09:31:18.227782 32364 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0821 09:34:56.313851 32364 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 09:37:06.136396 32364 solver.cpp:404]     Test net output #0: accuracy = 0.5554\nI0821 09:37:06.136764 32364 solver.cpp:404]     Test net output #1: loss = 1.35221 (* 1 = 1.35221 loss)\nI0821 09:37:08.241080 32364 solver.cpp:228] Iteration 600, loss = 0.952325\nI0821 09:37:08.241132 32364 solver.cpp:244]     Train net output #0: accuracy = 0.66\nI0821 09:37:08.241154 32364 solver.cpp:244]     Train net output #1: loss = 0.952325 (* 1 = 0.952325 loss)\nI0821 09:37:08.357512 32364 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0821 09:40:46.641830 32364 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 09:42:56.388847 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6444\nI0821 09:42:56.389209 32364 solver.cpp:404]     Test net output #1: loss = 1.04179 (* 1 = 1.04179 loss)\nI0821 09:42:58.494010 32364 solver.cpp:228] Iteration 700, loss = 0.742243\nI0821 09:42:58.494065 32364 solver.cpp:244]     Train net output #0: accuracy = 0.74\nI0821 09:42:58.494087 32364 solver.cpp:244]     Train net output #1: loss = 0.742243 (* 1 = 0.742243 loss)\nI0821 09:42:58.605196 32364 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0821 09:46:37.035816 32364 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 09:48:46.782138 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6707\nI0821 09:48:46.782549 32364 solver.cpp:404]     Test net output #1: loss = 0.940572 (* 1 = 0.940572 loss)\nI0821 09:48:48.886384 32364 solver.cpp:228] Iteration 800, loss = 0.630473\nI0821 09:48:48.886433 32364 solver.cpp:244]     Train net output #0: accuracy = 0.78\nI0821 09:48:48.886457 32364 solver.cpp:244]     Train net output #1: loss = 0.630473 (* 1 = 0.630473 loss)\nI0821 09:48:48.994606 32364 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0821 09:52:27.178952 32364 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 09:54:36.911095 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6847\nI0821 09:54:36.911478 32364 solver.cpp:404]     Test net output #1: loss = 0.941762 (* 1 = 0.941762 loss)\nI0821 09:54:39.016796 32364 solver.cpp:228] Iteration 900, loss = 0.806494\nI0821 09:54:39.016846 32364 solver.cpp:244]     Train net output #0: accuracy = 0.75\nI0821 09:54:39.016870 32364 solver.cpp:244]     Train net output #1: loss = 0.806494 (* 1 = 0.806494 loss)\nI0821 09:54:39.130874 32364 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0821 09:58:17.422206 32364 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 10:00:27.129616 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6878\nI0821 10:00:27.129995 32364 solver.cpp:404]     Test net output #1: loss = 0.905272 (* 1 = 0.905272 loss)\nI0821 10:00:29.234036 32364 solver.cpp:228] Iteration 1000, loss = 0.422571\nI0821 10:00:29.234102 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 10:00:29.234122 32364 solver.cpp:244]     Train net output #1: loss = 0.422571 (* 1 = 0.422571 loss)\nI0821 10:00:29.344503 32364 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0821 10:04:07.561866 32364 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 10:06:17.092983 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6817\nI0821 10:06:17.093407 32364 solver.cpp:404]     Test net output #1: loss = 0.946443 (* 1 = 0.946443 loss)\nI0821 10:06:19.197607 32364 solver.cpp:228] Iteration 1100, loss = 0.558489\nI0821 10:06:19.197654 32364 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 10:06:19.197670 32364 solver.cpp:244]     Train net output #1: loss = 0.558489 (* 1 = 0.558489 loss)\nI0821 10:06:19.318190 32364 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0821 10:09:57.713826 32364 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 10:12:07.421401 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6377\nI0821 10:12:07.421751 32364 solver.cpp:404]     Test net output #1: loss = 1.21132 (* 1 = 1.21132 loss)\nI0821 10:12:09.526163 32364 solver.cpp:228] Iteration 1200, loss = 0.615226\nI0821 10:12:09.526231 32364 solver.cpp:244]     Train net output #0: accuracy = 0.78\nI0821 10:12:09.526255 32364 solver.cpp:244]     Train net output #1: loss = 0.615226 (* 1 = 0.615226 loss)\nI0821 10:12:09.644467 32364 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0821 10:15:47.841966 32364 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 10:17:57.582499 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6532\nI0821 10:17:57.582875 32364 solver.cpp:404]     Test net output #1: loss = 1.03326 (* 1 = 1.03326 loss)\nI0821 10:17:59.687384 32364 solver.cpp:228] Iteration 1300, loss = 0.551921\nI0821 10:17:59.687436 32364 solver.cpp:244]     Train net output #0: accuracy = 0.79\nI0821 10:17:59.687459 32364 solver.cpp:244]     Train net output #1: loss = 0.551921 (* 1 = 0.551921 loss)\nI0821 10:17:59.797194 32364 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0821 10:21:38.215176 32364 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 10:23:48.031339 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6666\nI0821 10:23:48.031707 32364 solver.cpp:404]     Test net output #1: loss = 1.03829 (* 1 = 1.03829 loss)\nI0821 10:23:50.136472 32364 solver.cpp:228] Iteration 1400, loss = 0.690415\nI0821 10:23:50.136520 32364 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0821 10:23:50.136545 32364 solver.cpp:244]     Train net output #1: loss = 0.690415 (* 1 = 0.690415 loss)\nI0821 10:23:50.252790 32364 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0821 10:27:28.551115 32364 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 10:29:38.351116 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7586\nI0821 10:29:38.351521 32364 solver.cpp:404]     Test net output #1: loss = 0.7318 (* 1 = 0.7318 loss)\nI0821 10:29:40.456895 32364 solver.cpp:228] Iteration 1500, loss = 0.370264\nI0821 10:29:40.456945 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 10:29:40.456970 32364 solver.cpp:244]     Train net output #1: loss = 0.370264 (* 1 = 0.370264 loss)\nI0821 10:29:40.572803 32364 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0821 10:33:18.965304 32364 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 10:35:28.795567 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7444\nI0821 10:35:28.795936 32364 solver.cpp:404]     Test net output #1: loss = 0.765024 (* 1 = 0.765024 loss)\nI0821 10:35:30.901228 32364 solver.cpp:228] Iteration 1600, loss = 0.507266\nI0821 10:35:30.901283 32364 solver.cpp:244]     Train net output #0: accuracy = 0.83\nI0821 10:35:30.901307 32364 solver.cpp:244]     Train net output #1: loss = 0.507266 (* 1 = 0.507266 loss)\nI0821 10:35:31.010280 32364 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0821 10:39:09.324821 32364 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 10:41:19.071190 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6565\nI0821 10:41:19.071561 32364 solver.cpp:404]     Test net output #1: loss = 1.11544 (* 1 = 1.11544 loss)\nI0821 10:41:21.176540 32364 solver.cpp:228] Iteration 1700, loss = 0.390454\nI0821 10:41:21.176587 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 10:41:21.176604 32364 solver.cpp:244]     Train net output #1: loss = 0.390454 (* 1 = 0.390454 loss)\nI0821 10:41:21.291652 32364 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0821 10:44:59.667177 32364 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 10:47:09.342628 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7528\nI0821 10:47:09.343013 32364 solver.cpp:404]     Test net output #1: loss = 0.74813 (* 1 = 0.74813 loss)\nI0821 10:47:11.446957 32364 solver.cpp:228] Iteration 1800, loss = 0.371331\nI0821 10:47:11.447021 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 10:47:11.447039 32364 solver.cpp:244]     Train net output #1: loss = 0.371331 (* 1 = 0.371331 loss)\nI0821 10:47:11.556265 32364 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0821 10:50:49.774793 32364 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 10:52:59.467139 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7128\nI0821 10:52:59.467492 32364 solver.cpp:404]     Test net output #1: loss = 0.91126 (* 1 = 0.91126 loss)\nI0821 10:53:01.571758 32364 solver.cpp:228] Iteration 1900, loss = 0.378542\nI0821 10:53:01.571805 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 10:53:01.571820 32364 solver.cpp:244]     Train net output #1: loss = 0.378542 (* 1 = 0.378542 loss)\nI0821 10:53:01.687597 32364 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0821 10:56:39.906142 32364 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 10:58:49.626991 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7689\nI0821 10:58:49.627367 32364 solver.cpp:404]     Test net output #1: loss = 0.732813 (* 1 = 0.732813 loss)\nI0821 10:58:51.732367 32364 solver.cpp:228] Iteration 2000, loss = 0.36877\nI0821 10:58:51.732412 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 10:58:51.732429 32364 solver.cpp:244]     Train net output #1: loss = 0.36877 (* 1 = 0.36877 loss)\nI0821 10:58:51.851233 32364 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0821 11:02:30.141019 32364 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 11:04:39.876549 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7532\nI0821 11:04:39.876935 32364 solver.cpp:404]     Test net output #1: loss = 0.757873 (* 1 = 0.757873 loss)\nI0821 11:04:41.981724 32364 solver.cpp:228] Iteration 2100, loss = 0.455652\nI0821 11:04:41.981768 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 11:04:41.981784 32364 solver.cpp:244]     Train net output #1: loss = 0.455652 (* 1 = 0.455652 loss)\nI0821 11:04:42.092821 32364 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0821 11:08:20.332293 32364 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 11:10:30.066390 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6899\nI0821 11:10:30.066761 32364 solver.cpp:404]     Test net output #1: loss = 1.02555 (* 1 = 1.02555 loss)\nI0821 11:10:32.172106 32364 solver.cpp:228] Iteration 2200, loss = 0.452326\nI0821 11:10:32.172152 32364 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 11:10:32.172168 32364 solver.cpp:244]     Train net output #1: loss = 0.452326 (* 1 = 0.452326 loss)\nI0821 11:10:32.284747 32364 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0821 11:14:10.699709 32364 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 11:16:20.448900 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7024\nI0821 11:16:20.449291 32364 solver.cpp:404]     Test net output #1: loss = 1.01424 (* 1 = 1.01424 loss)\nI0821 11:16:22.554991 32364 solver.cpp:228] Iteration 2300, loss = 0.307186\nI0821 11:16:22.555038 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 11:16:22.555054 32364 solver.cpp:244]     Train net output #1: loss = 0.307186 (* 1 = 0.307186 loss)\nI0821 11:16:22.667840 32364 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0821 11:20:00.947672 32364 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 11:22:10.670255 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7544\nI0821 11:22:10.670622 32364 solver.cpp:404]     Test net output #1: loss = 0.784139 (* 1 = 0.784139 loss)\nI0821 11:22:12.774909 32364 solver.cpp:228] Iteration 2400, loss = 0.432098\nI0821 11:22:12.774956 32364 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 11:22:12.774972 32364 solver.cpp:244]     Train net output #1: loss = 0.432098 (* 1 = 0.432098 loss)\nI0821 11:22:12.888762 32364 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0821 11:25:51.377699 32364 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 11:28:01.086333 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7149\nI0821 11:28:01.086695 32364 solver.cpp:404]     Test net output #1: loss = 0.990927 (* 1 = 0.990927 loss)\nI0821 11:28:03.191565 32364 solver.cpp:228] Iteration 2500, loss = 0.438658\nI0821 11:28:03.191610 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 11:28:03.191627 32364 solver.cpp:244]     Train net output #1: loss = 0.438658 (* 1 = 0.438658 loss)\nI0821 11:28:03.307221 32364 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0821 11:31:41.450326 32364 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 11:33:51.123145 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7583\nI0821 11:33:51.123498 32364 solver.cpp:404]     Test net output #1: loss = 0.761327 (* 1 = 0.761327 loss)\nI0821 11:33:53.227661 32364 solver.cpp:228] Iteration 2600, loss = 0.502126\nI0821 11:33:53.227708 32364 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 11:33:53.227725 32364 solver.cpp:244]     Train net output #1: loss = 0.502126 (* 1 = 0.502126 loss)\nI0821 11:33:53.346099 32364 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0821 11:37:31.692673 32364 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 11:39:41.407500 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7787\nI0821 11:39:41.407862 32364 solver.cpp:404]     Test net output #1: loss = 0.69122 (* 1 = 0.69122 loss)\nI0821 11:39:43.512470 32364 solver.cpp:228] Iteration 2700, loss = 0.363243\nI0821 11:39:43.512516 32364 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 11:39:43.512532 32364 solver.cpp:244]     Train net output #1: loss = 0.363243 (* 1 = 0.363243 loss)\nI0821 11:39:43.625334 32364 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0821 11:43:21.901538 32364 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 11:45:31.563217 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6857\nI0821 11:45:31.563609 32364 solver.cpp:404]     Test net output #1: loss = 1.09863 (* 1 = 1.09863 loss)\nI0821 11:45:33.667620 32364 solver.cpp:228] Iteration 2800, loss = 0.384745\nI0821 11:45:33.667666 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 11:45:33.667683 32364 solver.cpp:244]     Train net output #1: loss = 0.384744 (* 1 = 0.384744 loss)\nI0821 11:45:33.785343 32364 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0821 11:49:12.211745 32364 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 11:51:21.882490 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7368\nI0821 11:51:21.882864 32364 solver.cpp:404]     Test net output #1: loss = 0.835426 (* 1 = 0.835426 loss)\nI0821 11:51:23.986884 32364 solver.cpp:228] Iteration 2900, loss = 0.415224\nI0821 11:51:23.986930 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 11:51:23.986946 32364 solver.cpp:244]     Train net output #1: loss = 0.415223 (* 1 = 0.415223 loss)\nI0821 11:51:24.096556 32364 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0821 11:55:02.516829 32364 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 11:57:12.260313 32364 solver.cpp:404]     Test net output #0: accuracy = 0.678\nI0821 11:57:12.260707 32364 solver.cpp:404]     Test net output #1: loss = 1.2986 (* 1 = 1.2986 loss)\nI0821 11:57:14.365578 32364 solver.cpp:228] Iteration 3000, loss = 0.239921\nI0821 11:57:14.365627 32364 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 11:57:14.365643 32364 solver.cpp:244]     Train net output #1: loss = 0.239921 (* 1 = 0.239921 loss)\nI0821 11:57:14.474494 32364 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0821 12:00:52.839685 32364 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 12:03:02.554478 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6904\nI0821 12:03:02.554833 32364 solver.cpp:404]     Test net output #1: loss = 1.0658 (* 1 = 1.0658 loss)\nI0821 12:03:04.663128 32364 solver.cpp:228] Iteration 3100, loss = 0.424216\nI0821 12:03:04.663178 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 12:03:04.663194 32364 solver.cpp:244]     Train net output #1: loss = 0.424216 (* 1 = 0.424216 loss)\nI0821 12:03:04.775440 32364 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0821 12:06:43.038486 32364 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 12:08:52.726052 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6295\nI0821 12:08:52.726424 32364 solver.cpp:404]     Test net output #1: loss = 1.41529 (* 1 = 1.41529 loss)\nI0821 12:08:54.830137 32364 solver.cpp:228] Iteration 3200, loss = 0.322727\nI0821 12:08:54.830185 32364 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 12:08:54.830201 32364 solver.cpp:244]     Train net output #1: loss = 0.322726 (* 1 = 0.322726 loss)\nI0821 12:08:54.947613 32364 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0821 12:12:33.281240 32364 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 12:14:42.971791 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7036\nI0821 12:14:42.972157 32364 solver.cpp:404]     Test net output #1: loss = 0.899807 (* 1 = 0.899807 loss)\nI0821 12:14:45.076184 32364 solver.cpp:228] Iteration 3300, loss = 0.268033\nI0821 12:14:45.076231 32364 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 12:14:45.076246 32364 solver.cpp:244]     Train net output #1: loss = 0.268033 (* 1 = 0.268033 loss)\nI0821 12:14:45.193392 32364 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0821 12:18:23.548885 32364 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 12:20:33.268224 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6365\nI0821 12:20:33.268579 32364 solver.cpp:404]     Test net output #1: loss = 1.49927 (* 1 = 1.49927 loss)\nI0821 12:20:35.373311 32364 solver.cpp:228] Iteration 3400, loss = 0.370503\nI0821 12:20:35.373358 32364 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 12:20:35.373373 32364 solver.cpp:244]     Train net output #1: loss = 0.370503 (* 1 = 0.370503 loss)\nI0821 12:20:35.485091 32364 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0821 12:24:13.980319 32364 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 12:26:23.665539 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6394\nI0821 12:26:23.665900 32364 solver.cpp:404]     Test net output #1: loss = 1.09344 (* 1 = 1.09344 loss)\nI0821 12:26:25.770217 32364 solver.cpp:228] Iteration 3500, loss = 0.279204\nI0821 12:26:25.770262 32364 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 12:26:25.770277 32364 solver.cpp:244]     Train net output #1: loss = 0.279204 (* 1 = 0.279204 loss)\nI0821 12:26:25.879238 32364 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0821 12:30:04.273152 32364 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 12:32:13.942139 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6938\nI0821 12:32:13.942520 32364 solver.cpp:404]     Test net output #1: loss = 0.896442 (* 1 = 0.896442 loss)\nI0821 12:32:16.046804 32364 solver.cpp:228] Iteration 3600, loss = 0.37404\nI0821 12:32:16.046847 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 12:32:16.046862 32364 solver.cpp:244]     Train net output #1: loss = 0.37404 (* 1 = 0.37404 loss)\nI0821 12:32:16.163106 32364 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0821 12:35:54.588371 32364 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 12:38:04.280359 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7701\nI0821 12:38:04.280725 32364 solver.cpp:404]     Test net output #1: loss = 0.680865 (* 1 = 0.680865 loss)\nI0821 12:38:06.385102 32364 solver.cpp:228] Iteration 3700, loss = 0.379198\nI0821 12:38:06.385164 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 12:38:06.385181 32364 solver.cpp:244]     Train net output #1: loss = 0.379198 (* 1 = 0.379198 loss)\nI0821 12:38:06.507556 32364 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0821 12:41:44.822235 32364 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 12:43:54.526876 32364 solver.cpp:404]     Test net output #0: accuracy = 0.688\nI0821 12:43:54.527261 32364 solver.cpp:404]     Test net output #1: loss = 0.981274 (* 1 = 0.981274 loss)\nI0821 12:43:56.631703 32364 solver.cpp:228] Iteration 3800, loss = 0.247007\nI0821 12:43:56.631748 32364 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 12:43:56.631763 32364 solver.cpp:244]     Train net output #1: loss = 0.247007 (* 1 = 0.247007 loss)\nI0821 12:43:56.746065 32364 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0821 12:47:35.077302 32364 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 12:49:44.801605 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7064\nI0821 12:49:44.802000 32364 solver.cpp:404]     Test net output #1: loss = 0.978993 (* 1 = 0.978993 loss)\nI0821 12:49:46.906636 32364 solver.cpp:228] Iteration 3900, loss = 0.458951\nI0821 12:49:46.906682 32364 solver.cpp:244]     Train net output #0: accuracy = 0.82\nI0821 12:49:46.906697 32364 solver.cpp:244]     Train net output #1: loss = 0.458951 (* 1 = 0.458951 loss)\nI0821 12:49:47.026074 32364 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0821 12:53:25.627907 32364 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 12:55:35.290848 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6918\nI0821 12:55:35.291220 32364 solver.cpp:404]     Test net output #1: loss = 0.982338 (* 1 = 0.982338 loss)\nI0821 12:55:37.395571 32364 solver.cpp:228] Iteration 4000, loss = 0.406561\nI0821 12:55:37.395634 32364 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 12:55:37.395651 32364 solver.cpp:244]     Train net output #1: loss = 0.406561 (* 1 = 0.406561 loss)\nI0821 12:55:37.510718 32364 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0821 12:59:15.763133 32364 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 13:01:25.324223 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6464\nI0821 13:01:25.324573 32364 solver.cpp:404]     Test net output #1: loss = 1.21425 (* 1 = 1.21425 loss)\nI0821 13:01:27.428493 32364 solver.cpp:228] Iteration 4100, loss = 0.364808\nI0821 13:01:27.428541 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 13:01:27.428556 32364 solver.cpp:244]     Train net output #1: loss = 0.364807 (* 1 = 0.364807 loss)\nI0821 13:01:27.541791 32364 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0821 13:05:05.951411 32364 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 13:07:15.633694 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6659\nI0821 13:07:15.634027 32364 solver.cpp:404]     Test net output #1: loss = 1.04699 (* 1 = 1.04699 loss)\nI0821 13:07:17.738610 32364 solver.cpp:228] Iteration 4200, loss = 0.374929\nI0821 13:07:17.738656 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 13:07:17.738672 32364 solver.cpp:244]     Train net output #1: loss = 0.374929 (* 1 = 0.374929 loss)\nI0821 13:07:17.854054 32364 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0821 13:10:56.237578 32364 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 13:13:05.918306 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7099\nI0821 13:13:05.918687 32364 solver.cpp:404]     Test net output #1: loss = 1.00242 (* 1 = 1.00242 loss)\nI0821 13:13:08.018410 32364 solver.cpp:228] Iteration 4300, loss = 0.306544\nI0821 13:13:08.018455 32364 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 13:13:08.018471 32364 solver.cpp:244]     Train net output #1: loss = 0.306544 (* 1 = 0.306544 loss)\nI0821 13:13:08.121392 32364 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0821 13:16:45.645184 32364 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 13:18:55.248859 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6068\nI0821 13:18:55.249155 32364 solver.cpp:404]     Test net output #1: loss = 1.34918 (* 1 = 1.34918 loss)\nI0821 13:18:57.348342 32364 solver.cpp:228] Iteration 4400, loss = 0.422078\nI0821 13:18:57.348407 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 13:18:57.348424 32364 solver.cpp:244]     Train net output #1: loss = 0.422078 (* 1 = 0.422078 loss)\nI0821 13:18:57.456145 32364 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0821 13:22:35.026247 32364 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 13:24:44.678508 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7042\nI0821 13:24:44.678812 32364 solver.cpp:404]     Test net output #1: loss = 0.977862 (* 1 = 0.977862 loss)\nI0821 13:24:46.778558 32364 solver.cpp:228] Iteration 4500, loss = 0.295045\nI0821 13:24:46.778602 32364 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 13:24:46.778619 32364 solver.cpp:244]     Train net output #1: loss = 0.295045 (* 1 = 0.295045 loss)\nI0821 13:24:46.891973 32364 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0821 13:28:24.434362 32364 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 13:30:33.807651 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7267\nI0821 13:30:33.808012 32364 solver.cpp:404]     Test net output #1: loss = 0.789931 (* 1 = 0.789931 loss)\nI0821 13:30:35.907735 32364 solver.cpp:228] Iteration 4600, loss = 0.455924\nI0821 13:30:35.907797 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 13:30:35.907815 32364 solver.cpp:244]     Train net output #1: loss = 0.455924 (* 1 = 0.455924 loss)\nI0821 13:30:36.011929 32364 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0821 13:34:13.648502 32364 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 13:36:22.800690 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6663\nI0821 13:36:22.801012 32364 solver.cpp:404]     Test net output #1: loss = 1.07876 (* 1 = 1.07876 loss)\nI0821 13:36:24.899806 32364 solver.cpp:228] Iteration 4700, loss = 0.396221\nI0821 13:36:24.899852 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 13:36:24.899868 32364 solver.cpp:244]     Train net output #1: loss = 0.396221 (* 1 = 0.396221 loss)\nI0821 13:36:25.005983 32364 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0821 13:40:02.568498 32364 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 13:42:11.958494 32364 solver.cpp:404]     Test net output #0: accuracy = 0.747\nI0821 13:42:11.958837 32364 solver.cpp:404]     Test net output #1: loss = 0.816835 (* 1 = 0.816835 loss)\nI0821 13:42:14.061561 32364 solver.cpp:228] Iteration 4800, loss = 0.389938\nI0821 13:42:14.061625 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 13:42:14.061643 32364 solver.cpp:244]     Train net output #1: loss = 0.389938 (* 1 = 0.389938 loss)\nI0821 13:42:14.174559 32364 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0821 13:45:52.272236 32364 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 13:48:01.847928 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7324\nI0821 13:48:01.848248 32364 solver.cpp:404]     Test net output #1: loss = 0.86326 (* 1 = 0.86326 loss)\nI0821 13:48:03.951776 32364 solver.cpp:228] Iteration 4900, loss = 0.344946\nI0821 13:48:03.951839 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 13:48:03.951858 32364 solver.cpp:244]     Train net output #1: loss = 0.344946 (* 1 = 0.344946 loss)\nI0821 13:48:04.057132 32364 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0821 13:51:42.039077 32364 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 13:53:51.592164 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7398\nI0821 13:53:51.592545 32364 solver.cpp:404]     Test net output #1: loss = 0.887596 (* 1 = 0.887596 loss)\nI0821 13:53:53.696966 32364 solver.cpp:228] Iteration 5000, loss = 0.313643\nI0821 13:53:53.697033 32364 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 13:53:53.697052 32364 solver.cpp:244]     Train net output #1: loss = 0.313643 (* 1 = 0.313643 loss)\nI0821 13:53:53.810734 32364 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0821 13:57:31.931813 32364 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 13:59:41.581879 32364 solver.cpp:404]     Test net output #0: accuracy = 0.5937\nI0821 13:59:41.582180 32364 solver.cpp:404]     Test net output #1: loss = 1.49318 (* 1 = 1.49318 loss)\nI0821 13:59:43.686858 32364 solver.cpp:228] Iteration 5100, loss = 0.346743\nI0821 13:59:43.686923 32364 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 13:59:43.686941 32364 solver.cpp:244]     Train net output #1: loss = 0.346743 (* 1 = 0.346743 loss)\nI0821 13:59:43.798280 32364 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0821 14:03:21.883316 32364 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 14:05:31.502533 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7886\nI0821 14:05:31.502830 32364 solver.cpp:404]     Test net output #1: loss = 0.669441 (* 1 = 0.669441 loss)\nI0821 14:05:33.606315 32364 solver.cpp:228] Iteration 5200, loss = 0.275637\nI0821 14:05:33.606379 32364 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 14:05:33.606397 32364 solver.cpp:244]     Train net output #1: loss = 0.275637 (* 1 = 0.275637 loss)\nI0821 14:05:33.720316 32364 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0821 14:09:11.929401 32364 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 14:11:21.561522 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6168\nI0821 14:11:21.561887 32364 solver.cpp:404]     Test net output #1: loss = 1.61994 (* 1 = 1.61994 loss)\nI0821 14:11:23.665686 32364 solver.cpp:228] Iteration 5300, loss = 0.327495\nI0821 14:11:23.665750 32364 solver.cpp:244]     Train net output #0: accuracy = 0.82\nI0821 14:11:23.665769 32364 solver.cpp:244]     Train net output #1: loss = 0.327495 (* 1 = 0.327495 loss)\nI0821 14:11:23.774907 32364 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0821 14:15:01.980720 32364 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 14:17:10.879688 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7305\nI0821 14:17:10.879959 32364 solver.cpp:404]     Test net output #1: loss = 0.843697 (* 1 = 0.843697 loss)\nI0821 14:17:12.984351 32364 solver.cpp:228] Iteration 5400, loss = 0.499512\nI0821 14:17:12.984416 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 14:17:12.984436 32364 solver.cpp:244]     Train net output #1: loss = 0.499512 (* 1 = 0.499512 loss)\nI0821 14:17:13.094776 32364 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0821 14:20:51.202535 32364 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 14:23:00.246829 32364 solver.cpp:404]     Test net output #0: accuracy = 0.679\nI0821 14:23:00.247203 32364 solver.cpp:404]     Test net output #1: loss = 1.19307 (* 1 = 1.19307 loss)\nI0821 14:23:02.351781 32364 solver.cpp:228] Iteration 5500, loss = 0.31972\nI0821 14:23:02.351845 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 14:23:02.351863 32364 solver.cpp:244]     Train net output #1: loss = 0.31972 (* 1 = 0.31972 loss)\nI0821 14:23:02.463342 32364 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0821 14:26:40.723398 32364 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 14:28:50.341867 32364 solver.cpp:404]     Test net output #0: accuracy = 0.5569\nI0821 14:28:50.342200 32364 solver.cpp:404]     Test net output #1: loss = 1.43379 (* 1 = 1.43379 loss)\nI0821 14:28:52.444875 32364 solver.cpp:228] Iteration 5600, loss = 0.364475\nI0821 14:28:52.444938 32364 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 14:28:52.444957 32364 solver.cpp:244]     Train net output #1: loss = 0.364474 (* 1 = 0.364474 loss)\nI0821 14:28:52.552417 32364 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0821 14:32:30.738783 32364 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 14:34:40.419380 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7872\nI0821 14:34:40.419729 32364 solver.cpp:404]     Test net output #1: loss = 0.675014 (* 1 = 0.675014 loss)\nI0821 14:34:42.524477 32364 solver.cpp:228] Iteration 5700, loss = 0.28363\nI0821 14:34:42.524535 32364 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 14:34:42.524552 32364 solver.cpp:244]     Train net output #1: loss = 0.28363 (* 1 = 0.28363 loss)\nI0821 14:34:42.638527 32364 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0821 14:38:20.715512 32364 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 14:40:30.388360 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6588\nI0821 14:40:30.388684 32364 solver.cpp:404]     Test net output #1: loss = 1.1741 (* 1 = 1.1741 loss)\nI0821 14:40:32.492055 32364 solver.cpp:228] Iteration 5800, loss = 0.357571\nI0821 14:40:32.492120 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 14:40:32.492137 32364 solver.cpp:244]     Train net output #1: loss = 0.357571 (* 1 = 0.357571 loss)\nI0821 14:40:32.611500 32364 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0821 14:44:10.927536 32364 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 14:46:20.625741 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7127\nI0821 14:46:20.626067 32364 solver.cpp:404]     Test net output #1: loss = 1.10921 (* 1 = 1.10921 loss)\nI0821 14:46:22.730907 32364 solver.cpp:228] Iteration 5900, loss = 0.369217\nI0821 14:46:22.730973 32364 solver.cpp:244]     Train net output #0: accuracy = 0.85\nI0821 14:46:22.730989 32364 solver.cpp:244]     Train net output #1: loss = 0.369217 (* 1 = 0.369217 loss)\nI0821 14:46:22.849395 32364 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0821 14:50:01.043473 32364 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 14:52:10.603747 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7704\nI0821 14:52:10.604086 32364 solver.cpp:404]     Test net output #1: loss = 0.695136 (* 1 = 0.695136 loss)\nI0821 14:52:12.707096 32364 solver.cpp:228] Iteration 6000, loss = 0.34571\nI0821 14:52:12.707157 32364 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 14:52:12.707175 32364 solver.cpp:244]     Train net output #1: loss = 0.34571 (* 1 = 0.34571 loss)\nI0821 14:52:12.815351 32364 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0821 14:55:50.964404 32364 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 14:58:00.670943 32364 solver.cpp:404]     Test net output #0: accuracy = 0.5254\nI0821 14:58:00.671272 32364 solver.cpp:404]     Test net output #1: loss = 1.88023 (* 1 = 1.88023 loss)\nI0821 14:58:02.774904 32364 solver.cpp:228] Iteration 6100, loss = 0.344612\nI0821 14:58:02.774967 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 14:58:02.774986 32364 solver.cpp:244]     Train net output #1: loss = 0.344612 (* 1 = 0.344612 loss)\nI0821 14:58:02.881795 32364 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0821 15:01:40.546175 32364 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 15:03:48.542512 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7574\nI0821 15:03:48.542830 32364 solver.cpp:404]     Test net output #1: loss = 0.738529 (* 1 = 0.738529 loss)\nI0821 15:03:50.641060 32364 solver.cpp:228] Iteration 6200, loss = 0.324168\nI0821 15:03:50.641115 32364 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 15:03:50.641132 32364 solver.cpp:244]     Train net output #1: loss = 0.324168 (* 1 = 0.324168 loss)\nI0821 15:03:50.754707 32364 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0821 15:07:28.544128 32364 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 15:09:36.516458 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7573\nI0821 15:09:36.516793 32364 solver.cpp:404]     Test net output #1: loss = 0.764747 (* 1 = 0.764747 loss)\nI0821 15:09:38.614686 32364 solver.cpp:228] Iteration 6300, loss = 0.16533\nI0821 15:09:38.614732 32364 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 15:09:38.614748 32364 solver.cpp:244]     Train net output #1: loss = 0.16533 (* 1 = 0.16533 loss)\nI0821 15:09:38.730689 32364 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0821 15:13:16.438874 32364 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 15:15:24.368146 32364 solver.cpp:404]     Test net output #0: accuracy = 0.6919\nI0821 15:15:24.368492 32364 solver.cpp:404]     Test net output #1: loss = 1.27054 (* 1 = 1.27054 loss)\nI0821 15:15:26.468029 32364 solver.cpp:228] Iteration 6400, loss = 0.413852\nI0821 15:15:26.468073 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 15:15:26.468089 32364 solver.cpp:244]     Train net output #1: loss = 0.413851 (* 1 = 0.413851 loss)\nI0821 15:15:26.584113 32364 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0821 15:19:04.344707 32364 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 15:21:12.251708 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7392\nI0821 15:21:12.252048 32364 solver.cpp:404]     Test net output #1: loss = 0.890261 (* 1 = 0.890261 loss)\nI0821 15:21:14.352218 32364 solver.cpp:228] Iteration 6500, loss = 0.359244\nI0821 15:21:14.352268 32364 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 15:21:14.352294 32364 solver.cpp:244]     Train net output #1: loss = 0.359243 (* 1 = 0.359243 loss)\nI0821 15:21:14.462404 32364 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0821 15:24:52.199108 32364 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 15:27:00.136716 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7009\nI0821 15:27:00.137059 32364 solver.cpp:404]     Test net output #1: loss = 0.975446 (* 1 = 0.975446 loss)\nI0821 15:27:02.236484 32364 solver.cpp:228] Iteration 6600, loss = 0.167177\nI0821 15:27:02.236529 32364 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 15:27:02.236553 32364 solver.cpp:244]     Train net output #1: loss = 0.167176 (* 1 = 0.167176 loss)\nI0821 15:27:02.347498 32364 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0821 15:30:39.956553 32364 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 15:32:47.881546 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8062\nI0821 15:32:47.881886 32364 solver.cpp:404]     Test net output #1: loss = 0.603427 (* 1 = 0.603427 loss)\nI0821 15:32:49.980510 32364 solver.cpp:228] Iteration 6700, loss = 0.292341\nI0821 15:32:49.980559 32364 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 15:32:49.980583 32364 solver.cpp:244]     Train net output #1: loss = 0.292341 (* 1 = 0.292341 loss)\nI0821 15:32:50.100205 32364 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0821 15:36:27.640868 32364 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 15:38:35.403446 32364 solver.cpp:404]     Test net output #0: accuracy = 0.5351\nI0821 15:38:35.403784 32364 solver.cpp:404]     Test net output #1: loss = 1.87967 (* 1 = 1.87967 loss)\nI0821 15:38:37.502490 32364 solver.cpp:228] Iteration 6800, loss = 0.224166\nI0821 15:38:37.502540 32364 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 15:38:37.502564 32364 solver.cpp:244]     Train net output #1: loss = 0.224165 (* 1 = 0.224165 loss)\nI0821 15:38:37.623886 32364 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0821 15:42:15.219214 32364 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 15:44:22.993930 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7125\nI0821 15:44:22.994269 32364 solver.cpp:404]     Test net output #1: loss = 0.912124 (* 1 = 0.912124 loss)\nI0821 15:44:25.094113 32364 solver.cpp:228] Iteration 6900, loss = 0.373321\nI0821 15:44:25.094162 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 15:44:25.094192 32364 solver.cpp:244]     Train net output #1: loss = 0.37332 (* 1 = 0.37332 loss)\nI0821 15:44:25.210964 32364 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0821 15:48:02.725816 32364 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 15:50:10.604959 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7253\nI0821 15:50:10.605298 32364 solver.cpp:404]     Test net output #1: loss = 1.04419 (* 1 = 1.04419 loss)\nI0821 15:50:12.704535 32364 solver.cpp:228] Iteration 7000, loss = 0.332963\nI0821 15:50:12.704586 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 15:50:12.704608 32364 solver.cpp:244]     Train net output #1: loss = 0.332963 (* 1 = 0.332963 loss)\nI0821 15:50:12.823413 32364 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0821 15:53:50.389461 32364 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 15:55:58.248333 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7942\nI0821 15:55:58.248677 32364 solver.cpp:404]     Test net output #1: loss = 0.619181 (* 1 = 0.619181 loss)\nI0821 15:56:00.346904 32364 solver.cpp:228] Iteration 7100, loss = 0.35864\nI0821 15:56:00.346946 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 15:56:00.346967 32364 solver.cpp:244]     Train net output #1: loss = 0.358639 (* 1 = 0.358639 loss)\nI0821 15:56:00.459249 32364 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0821 15:59:38.010098 32364 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 16:01:45.759233 32364 solver.cpp:404]     Test net output #0: accuracy = 0.799\nI0821 16:01:45.759574 32364 solver.cpp:404]     Test net output #1: loss = 0.646951 (* 1 = 0.646951 loss)\nI0821 16:01:47.858253 32364 solver.cpp:228] Iteration 7200, loss = 0.249248\nI0821 16:01:47.858301 32364 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 16:01:47.858325 32364 solver.cpp:244]     Train net output #1: loss = 0.249248 (* 1 = 0.249248 loss)\nI0821 16:01:47.969379 32364 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0821 16:05:25.558676 32364 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 16:07:33.314568 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7627\nI0821 16:07:33.314909 32364 solver.cpp:404]     Test net output #1: loss = 0.757044 (* 1 = 0.757044 loss)\nI0821 16:07:35.412796 32364 solver.cpp:228] Iteration 7300, loss = 0.193384\nI0821 16:07:35.412845 32364 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 16:07:35.412869 32364 solver.cpp:244]     Train net output #1: loss = 0.193383 (* 1 = 0.193383 loss)\nI0821 16:07:35.536741 32364 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0821 16:11:13.122350 32364 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 16:13:20.992981 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7636\nI0821 16:13:20.993301 32364 solver.cpp:404]     Test net output #1: loss = 0.876175 (* 1 = 0.876175 loss)\nI0821 16:13:23.091755 32364 solver.cpp:228] Iteration 7400, loss = 0.343472\nI0821 16:13:23.091804 32364 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 16:13:23.091828 32364 solver.cpp:244]     Train net output #1: loss = 0.343472 (* 1 = 0.343472 loss)\nI0821 16:13:23.218926 32364 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0821 16:17:00.818943 32364 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 16:19:08.685991 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7461\nI0821 16:19:08.686339 32364 solver.cpp:404]     Test net output #1: loss = 0.920882 (* 1 = 0.920882 loss)\nI0821 16:19:10.785192 32364 solver.cpp:228] Iteration 7500, loss = 0.0984758\nI0821 16:19:10.785240 32364 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 16:19:10.785265 32364 solver.cpp:244]     Train net output #1: loss = 0.0984755 (* 1 = 0.0984755 loss)\nI0821 16:19:10.899565 32364 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0821 16:22:48.355301 32364 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 16:24:56.225797 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7519\nI0821 16:24:56.226152 32364 solver.cpp:404]     Test net output #1: loss = 0.791302 (* 1 = 0.791302 loss)\nI0821 16:24:58.325469 32364 solver.cpp:228] Iteration 7600, loss = 0.250759\nI0821 16:24:58.325518 32364 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 16:24:58.325543 32364 solver.cpp:244]     Train net output #1: loss = 0.250759 (* 1 = 0.250759 loss)\nI0821 16:24:58.447842 32364 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0821 16:28:36.183367 32364 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 16:30:44.112066 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8232\nI0821 16:30:44.112408 32364 solver.cpp:404]     Test net output #1: loss = 0.55855 (* 1 = 0.55855 loss)\nI0821 16:30:46.211136 32364 solver.cpp:228] Iteration 7700, loss = 0.115761\nI0821 16:30:46.211189 32364 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 16:30:46.211215 32364 solver.cpp:244]     Train net output #1: loss = 0.115761 (* 1 = 0.115761 loss)\nI0821 16:30:46.329617 32364 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0821 16:34:23.917289 32364 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 16:36:31.961655 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7823\nI0821 16:36:31.962011 32364 solver.cpp:404]     Test net output #1: loss = 0.741586 (* 1 = 0.741586 loss)\nI0821 16:36:34.061758 32364 solver.cpp:228] Iteration 7800, loss = 0.0999317\nI0821 16:36:34.061806 32364 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 16:36:34.061832 32364 solver.cpp:244]     Train net output #1: loss = 0.0999314 (* 1 = 0.0999314 loss)\nI0821 16:36:34.184564 32364 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0821 16:40:11.940217 32364 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 16:42:20.020683 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7711\nI0821 16:42:20.021054 32364 solver.cpp:404]     Test net output #1: loss = 0.774581 (* 1 = 0.774581 loss)\nI0821 16:42:22.128497 32364 solver.cpp:228] Iteration 7900, loss = 0.316933\nI0821 16:42:22.128547 32364 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 16:42:22.128564 32364 solver.cpp:244]     Train net output #1: loss = 0.316932 (* 1 = 0.316932 loss)\nI0821 16:42:22.234983 32364 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0821 16:45:59.795389 32364 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 16:48:07.668581 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8334\nI0821 16:48:07.668933 32364 solver.cpp:404]     Test net output #1: loss = 0.545826 (* 1 = 0.545826 loss)\nI0821 16:48:09.767359 32364 solver.cpp:228] Iteration 8000, loss = 0.160634\nI0821 16:48:09.767405 32364 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 16:48:09.767421 32364 solver.cpp:244]     Train net output #1: loss = 0.160633 (* 1 = 0.160633 loss)\nI0821 16:48:09.888875 32364 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0821 16:51:47.549789 32364 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 16:53:55.593062 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8381\nI0821 16:53:55.593410 32364 solver.cpp:404]     Test net output #1: loss = 0.513869 (* 1 = 0.513869 loss)\nI0821 16:53:57.692397 32364 solver.cpp:228] Iteration 8100, loss = 0.160379\nI0821 16:53:57.692441 32364 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 16:53:57.692458 32364 solver.cpp:244]     Train net output #1: loss = 0.160378 (* 1 = 0.160378 loss)\nI0821 16:53:57.808460 32364 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0821 16:57:35.261276 32364 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 16:59:43.147845 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7738\nI0821 16:59:43.148200 32364 solver.cpp:404]     Test net output #1: loss = 0.771397 (* 1 = 0.771397 loss)\nI0821 16:59:45.246448 32364 solver.cpp:228] Iteration 8200, loss = 0.204953\nI0821 16:59:45.246493 32364 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 16:59:45.246510 32364 solver.cpp:244]     Train net output #1: loss = 0.204952 (* 1 = 0.204952 loss)\nI0821 16:59:45.360345 32364 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0821 17:03:22.724211 32364 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 17:05:30.583679 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7984\nI0821 17:05:30.584022 32364 solver.cpp:404]     Test net output #1: loss = 0.827659 (* 1 = 0.827659 loss)\nI0821 17:05:32.682376 32364 solver.cpp:228] Iteration 8300, loss = 0.140567\nI0821 17:05:32.682425 32364 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 17:05:32.682441 32364 solver.cpp:244]     Train net output #1: loss = 0.140566 (* 1 = 0.140566 loss)\nI0821 17:05:32.800390 32364 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0821 17:09:10.302675 32364 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 17:11:18.146317 32364 solver.cpp:404]     Test net output #0: accuracy = 0.7955\nI0821 17:11:18.146654 32364 solver.cpp:404]     Test net output #1: loss = 0.715829 (* 1 = 0.715829 loss)\nI0821 17:11:20.244868 32364 solver.cpp:228] Iteration 8400, loss = 0.32865\nI0821 17:11:20.244915 32364 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 17:11:20.244931 32364 solver.cpp:244]     Train net output #1: loss = 0.32865 (* 1 = 0.32865 loss)\nI0821 17:11:20.360762 32364 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0821 17:14:57.981617 32364 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 17:17:05.811651 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8225\nI0821 17:17:05.811991 32364 solver.cpp:404]     Test net output #1: loss = 0.627259 (* 1 = 0.627259 loss)\nI0821 17:17:07.910230 32364 solver.cpp:228] Iteration 8500, loss = 0.116385\nI0821 17:17:07.910277 32364 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 17:17:07.910295 32364 solver.cpp:244]     Train net output #1: loss = 0.116385 (* 1 = 0.116385 loss)\nI0821 17:17:08.028539 32364 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0821 17:20:45.400898 32364 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 17:22:53.232920 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8192\nI0821 17:22:53.233265 32364 solver.cpp:404]     Test net output #1: loss = 0.659114 (* 1 = 0.659114 loss)\nI0821 17:22:55.332736 32364 solver.cpp:228] Iteration 8600, loss = 0.111726\nI0821 17:22:55.332782 32364 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 17:22:55.332800 32364 solver.cpp:244]     Train net output #1: loss = 0.111726 (* 1 = 0.111726 loss)\nI0821 17:22:55.448460 32364 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0821 17:26:33.134929 32364 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 17:28:41.021303 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8451\nI0821 17:28:41.021644 32364 solver.cpp:404]     Test net output #1: loss = 0.511855 (* 1 = 0.511855 loss)\nI0821 17:28:43.120831 32364 solver.cpp:228] Iteration 8700, loss = 0.200795\nI0821 17:28:43.120880 32364 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 17:28:43.120896 32364 solver.cpp:244]     Train net output #1: loss = 0.200795 (* 1 = 0.200795 loss)\nI0821 17:28:43.239706 32364 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0821 17:32:20.817322 32364 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 17:34:28.709935 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8731\nI0821 17:34:28.710270 32364 solver.cpp:404]     Test net output #1: loss = 0.436212 (* 1 = 0.436212 loss)\nI0821 17:34:30.809913 32364 solver.cpp:228] Iteration 8800, loss = 0.172835\nI0821 17:34:30.809959 32364 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 17:34:30.809976 32364 solver.cpp:244]     Train net output #1: loss = 0.172835 (* 1 = 0.172835 loss)\nI0821 17:34:30.932684 32364 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0821 17:38:08.638423 32364 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 17:40:16.510243 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8309\nI0821 17:40:16.510579 32364 solver.cpp:404]     Test net output #1: loss = 0.630922 (* 1 = 0.630922 loss)\nI0821 17:40:18.609298 32364 solver.cpp:228] Iteration 8900, loss = 0.151512\nI0821 17:40:18.609346 32364 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 17:40:18.609364 32364 solver.cpp:244]     Train net output #1: loss = 0.151512 (* 1 = 0.151512 loss)\nI0821 17:40:18.730409 32364 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0821 17:43:56.216503 32364 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 17:46:04.095137 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8251\nI0821 17:46:04.095489 32364 solver.cpp:404]     Test net output #1: loss = 0.669082 (* 1 = 0.669082 loss)\nI0821 17:46:06.194427 32364 solver.cpp:228] Iteration 9000, loss = 0.15746\nI0821 17:46:06.194474 32364 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 17:46:06.194490 32364 solver.cpp:244]     Train net output #1: loss = 0.15746 (* 1 = 0.15746 loss)\nI0821 17:46:06.306072 32364 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0821 17:49:43.883810 32364 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 17:51:51.617429 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8714\nI0821 17:51:51.617765 32364 solver.cpp:404]     Test net output #1: loss = 0.466156 (* 1 = 0.466156 loss)\nI0821 17:51:53.716373 32364 solver.cpp:228] Iteration 9100, loss = 0.0720707\nI0821 17:51:53.716423 32364 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 17:51:53.716439 32364 solver.cpp:244]     Train net output #1: loss = 0.0720705 (* 1 = 0.0720705 loss)\nI0821 17:51:53.832010 32364 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0821 17:55:31.384663 32364 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 17:57:39.177177 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8576\nI0821 17:57:39.177517 32364 solver.cpp:404]     Test net output #1: loss = 0.54994 (* 1 = 0.54994 loss)\nI0821 17:57:41.275698 32364 solver.cpp:228] Iteration 9200, loss = 0.0518215\nI0821 17:57:41.275745 32364 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 17:57:41.275760 32364 solver.cpp:244]     Train net output #1: loss = 0.0518213 (* 1 = 0.0518213 loss)\nI0821 17:57:41.389550 32364 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0821 18:01:19.216310 32364 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 18:03:26.916117 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8366\nI0821 18:03:26.916442 32364 solver.cpp:404]     Test net output #1: loss = 0.744137 (* 1 = 0.744137 loss)\nI0821 18:03:29.015400 32364 solver.cpp:228] Iteration 9300, loss = 0.0274755\nI0821 18:03:29.015447 32364 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 18:03:29.015465 32364 solver.cpp:244]     Train net output #1: loss = 0.0274753 (* 1 = 0.0274753 loss)\nI0821 18:03:29.129011 32364 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0821 18:07:06.789052 32364 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 18:09:14.509632 32364 solver.cpp:404]     Test net output #0: accuracy = 0.893\nI0821 18:09:14.509975 32364 solver.cpp:404]     Test net output #1: loss = 0.385811 (* 1 = 0.385811 loss)\nI0821 18:09:16.608423 32364 solver.cpp:228] Iteration 9400, loss = 0.0392768\nI0821 18:09:16.608469 32364 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0821 18:09:16.608485 32364 solver.cpp:244]     Train net output #1: loss = 0.0392766 (* 1 = 0.0392766 loss)\nI0821 18:09:16.731365 32364 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0821 18:12:54.262681 32364 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 18:15:01.989091 32364 solver.cpp:404]     Test net output #0: accuracy = 0.8773\nI0821 18:15:01.989439 32364 solver.cpp:404]     Test net output #1: loss = 0.54144 (* 1 = 0.54144 loss)\nI0821 18:15:04.088235 32364 solver.cpp:228] Iteration 9500, loss = 0.0136921\nI0821 18:15:04.088284 32364 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:15:04.088300 32364 solver.cpp:244]     Train net output #1: loss = 0.0136919 (* 1 = 0.0136919 loss)\nI0821 18:15:04.207804 32364 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0821 18:18:41.986145 32364 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 18:20:49.732070 32364 solver.cpp:404]     Test net output #0: accuracy = 0.9073\nI0821 18:20:49.732399 32364 solver.cpp:404]     Test net output #1: loss = 0.36002 (* 1 = 0.36002 loss)\nI0821 18:20:51.830847 32364 solver.cpp:228] Iteration 9600, loss = 0.00613778\nI0821 18:20:51.830893 32364 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:20:51.830910 32364 solver.cpp:244]     Train net output #1: loss = 0.00613758 (* 1 = 0.00613758 loss)\nI0821 18:20:51.949131 32364 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0821 18:24:29.481161 32364 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 18:26:37.339061 32364 solver.cpp:404]     Test net output #0: accuracy = 0.9055\nI0821 18:26:37.339395 32364 solver.cpp:404]     Test net output #1: loss = 0.38719 (* 1 = 0.38719 loss)\nI0821 18:26:39.437677 32364 solver.cpp:228] Iteration 9700, loss = 0.00673644\nI0821 18:26:39.437723 32364 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:26:39.437741 32364 solver.cpp:244]     Train net output #1: loss = 0.00673624 (* 1 = 0.00673624 loss)\nI0821 18:26:39.549401 32364 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0821 18:30:17.167723 32364 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 18:32:25.101254 32364 solver.cpp:404]     Test net output #0: accuracy = 0.9185\nI0821 18:32:25.101593 32364 solver.cpp:404]     Test net output #1: loss = 0.351868 (* 1 = 0.351868 loss)\nI0821 18:32:27.199882 32364 solver.cpp:228] Iteration 9800, loss = 0.000775465\nI0821 18:32:27.199928 32364 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:32:27.199950 32364 solver.cpp:244]     Train net output #1: loss = 0.000775266 (* 1 = 0.000775266 loss)\nI0821 18:32:27.323686 32364 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0821 18:36:05.118196 32364 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 18:38:12.945931 32364 solver.cpp:404]     Test net output #0: accuracy = 0.9217\nI0821 18:38:12.946276 32364 solver.cpp:404]     Test net output #1: loss = 0.334414 (* 1 = 0.334414 loss)\nI0821 18:38:15.043411 32364 solver.cpp:228] Iteration 9900, loss = 0.000543994\nI0821 18:38:15.043457 32364 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:38:15.043473 32364 solver.cpp:244]     Train net output #1: loss = 0.000543796 (* 1 = 0.000543796 loss)\nI0821 18:38:15.167251 32364 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0821 18:41:52.708469 32364 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kRes110Tab1_iter_10000.caffemodel\nI0821 18:41:53.326715 32364 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kRes110Tab1_iter_10000.solverstate\nI0821 18:41:54.031558 32364 solver.cpp:317] Iteration 10000, loss = 0.000936799\nI0821 18:41:54.031606 32364 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 18:44:01.857681 32364 solver.cpp:404]     Test net output #0: accuracy = 0.9237\nI0821 18:44:01.858023 32364 solver.cpp:404]     Test net output #1: loss = 0.333729 (* 1 = 0.333729 loss)\nI0821 18:44:01.858042 32364 solver.cpp:322] Optimization Done.\nI0821 18:44:12.275151 32364 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kRes20Tab1",
    "content": "I0817 16:27:02.973675 17345 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:27:02.976038 17345 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:27:02.978123 17345 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:27:02.979356 17345 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:27:02.980576 17345 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:27:02.981812 17345 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:27:02.983047 17345 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:27:02.984275 17345 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:27:02.985508 17345 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:27:03.401885 17345 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kRes20Tab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0817 16:27:03.406042 17345 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:27:03.418248 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:03.418304 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:03.419014 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:27:03.419068 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:27:03.419090 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:27:03.419108 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:27:03.419128 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:27:03.419154 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:27:03.419173 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:27:03.419191 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:27:03.419210 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:27:03.419229 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:27:03.419255 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:27:03.419275 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:27:03.419294 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:27:03.419312 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:27:03.419332 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:27:03.419348 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:27:03.419373 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:27:03.419390 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:27:03.419410 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:27:03.419430 17345 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:27:03.420193 17345 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 32\n      dim: 8\n      dim: 8\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer {\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer {\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\nI0817 16:27:03.421129 17345 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:27:03.422317 17345 net.cpp:100] Creating Layer dataLayer\nI0817 16:27:03.422389 17345 net.cpp:408] dataLayer -> data_top\nI0817 16:27:03.422577 17345 net.cpp:408] dataLayer -> label\nI0817 16:27:03.422694 17345 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:27:03.431902 17350 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:27:03.453838 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:03.460916 17345 net.cpp:150] Setting up dataLayer\nI0817 16:27:03.460978 17345 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:27:03.460990 17345 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:03.460996 17345 net.cpp:165] Memory required for data: 1536500\nI0817 16:27:03.461011 17345 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:27:03.461027 17345 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:27:03.461035 17345 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:27:03.461055 17345 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:27:03.461071 17345 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:27:03.461155 17345 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:27:03.461169 17345 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:03.461176 17345 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:03.461181 17345 net.cpp:165] Memory required for data: 1537500\nI0817 16:27:03.461186 17345 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:27:03.461247 17345 net.cpp:100] Creating Layer pre_conv\nI0817 16:27:03.461261 17345 net.cpp:434] pre_conv <- data_top\nI0817 16:27:03.461273 17345 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:27:03.463297 17345 net.cpp:150] Setting up pre_conv\nI0817 16:27:03.463318 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.463325 17345 net.cpp:165] Memory required for data: 9729500\nI0817 16:27:03.463376 17345 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:27:03.463439 17345 net.cpp:100] Creating Layer pre_bn\nI0817 16:27:03.463451 17345 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:27:03.463465 17345 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:27:03.463575 17351 blocking_queue.cpp:50] Waiting for data\nI0817 16:27:03.463800 17345 net.cpp:150] Setting up pre_bn\nI0817 16:27:03.463820 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.463826 17345 net.cpp:165] Memory required for data: 17921500\nI0817 16:27:03.463843 17345 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:27:03.463889 17345 net.cpp:100] Creating Layer pre_scale\nI0817 16:27:03.463898 17345 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:27:03.463907 17345 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:27:03.464068 17345 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:27:03.464318 17345 net.cpp:150] Setting up pre_scale\nI0817 16:27:03.464332 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.464337 17345 net.cpp:165] Memory required for data: 26113500\nI0817 16:27:03.464349 17345 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:27:03.464390 17345 net.cpp:100] Creating Layer pre_relu\nI0817 16:27:03.464401 17345 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:27:03.464412 17345 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:27:03.464423 17345 net.cpp:150] Setting up pre_relu\nI0817 16:27:03.464460 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.464469 17345 net.cpp:165] Memory required for data: 34305500\nI0817 16:27:03.464484 17345 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:27:03.464499 17345 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:27:03.464509 17345 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:27:03.464534 17345 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:27:03.464553 17345 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:27:03.464619 17345 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:27:03.464637 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.464644 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.464649 17345 net.cpp:165] Memory required for data: 50689500\nI0817 16:27:03.464654 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:27:03.464670 17345 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:27:03.464676 17345 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:27:03.464685 17345 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:27:03.464992 17345 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:27:03.465006 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.465011 17345 net.cpp:165] Memory required for data: 58881500\nI0817 16:27:03.465029 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:27:03.465044 17345 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:27:03.465049 17345 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:27:03.465059 17345 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:27:03.465286 17345 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:27:03.465298 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.465303 17345 net.cpp:165] Memory required for data: 67073500\nI0817 16:27:03.465314 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:27:03.465323 17345 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:27:03.465328 17345 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:27:03.465339 17345 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.465389 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:27:03.465530 17345 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:27:03.465546 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.465553 17345 net.cpp:165] Memory required for data: 75265500\nI0817 16:27:03.465561 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:27:03.465570 17345 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:27:03.465575 17345 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:27:03.465582 17345 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.465591 17345 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:27:03.465598 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.465603 17345 net.cpp:165] Memory required for data: 83457500\nI0817 16:27:03.465607 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:27:03.465622 17345 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:27:03.465627 17345 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:27:03.465641 17345 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:27:03.465941 17345 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:27:03.465955 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.465960 17345 net.cpp:165] Memory required for data: 91649500\nI0817 16:27:03.465968 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:27:03.465981 17345 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:27:03.465986 17345 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:27:03.465997 17345 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:27:03.466223 17345 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:27:03.466235 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.466241 17345 net.cpp:165] Memory required for data: 99841500\nI0817 16:27:03.466255 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:27:03.466266 17345 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:27:03.466272 17345 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:27:03.466280 17345 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:27:03.466342 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:27:03.466485 17345 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:27:03.466500 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.466505 17345 net.cpp:165] Memory required for data: 108033500\nI0817 16:27:03.466513 17345 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:27:03.466563 17345 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:27:03.466575 17345 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:27:03.466583 17345 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:27:03.466594 17345 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:27:03.466665 17345 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:27:03.466686 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.466691 17345 net.cpp:165] Memory required for data: 116225500\nI0817 16:27:03.466696 17345 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:27:03.466706 17345 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:27:03.466711 17345 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:27:03.466717 17345 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:27:03.466727 17345 net.cpp:150] Setting up L1_b1_relu\nI0817 16:27:03.466734 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.466738 17345 net.cpp:165] Memory required for data: 124417500\nI0817 16:27:03.466743 17345 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:03.466755 17345 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:03.466760 17345 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:27:03.466768 17345 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:27:03.466778 17345 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:27:03.466820 17345 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:03.466832 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.466840 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.466845 17345 net.cpp:165] Memory required for data: 140801500\nI0817 16:27:03.466850 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:27:03.466861 17345 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:27:03.466866 17345 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:27:03.466878 17345 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:27:03.467182 17345 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:27:03.467196 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.467201 17345 net.cpp:165] Memory required for data: 148993500\nI0817 16:27:03.467211 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:27:03.467221 17345 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:27:03.467226 17345 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:27:03.467237 17345 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:27:03.467475 17345 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:27:03.467494 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.467500 17345 net.cpp:165] Memory required for data: 157185500\nI0817 16:27:03.467510 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:27:03.467522 17345 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:27:03.467528 17345 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:27:03.467536 17345 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.467587 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:27:03.467722 17345 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:27:03.467736 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.467741 17345 net.cpp:165] Memory required for data: 165377500\nI0817 16:27:03.467756 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:27:03.467767 17345 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:27:03.467773 17345 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:27:03.467782 17345 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.467790 17345 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:27:03.467797 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.467802 17345 net.cpp:165] Memory required for data: 173569500\nI0817 16:27:03.467806 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:27:03.467819 17345 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:27:03.467825 17345 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:27:03.467836 17345 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:27:03.468134 17345 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:27:03.468148 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.468153 17345 net.cpp:165] Memory required for data: 181761500\nI0817 16:27:03.468163 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:27:03.468173 17345 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:27:03.468178 17345 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:27:03.468186 17345 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:27:03.468423 17345 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:27:03.468436 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.468441 17345 net.cpp:165] Memory required for data: 189953500\nI0817 16:27:03.468461 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:27:03.468472 17345 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:27:03.468483 17345 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:27:03.468492 17345 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:27:03.468547 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:27:03.468684 17345 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:27:03.468698 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.468703 17345 net.cpp:165] Memory required for data: 198145500\nI0817 16:27:03.468713 17345 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:27:03.468721 17345 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:27:03.468727 17345 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:27:03.468734 17345 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:27:03.468744 17345 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:27:03.468773 17345 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:27:03.468782 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.468787 17345 net.cpp:165] Memory required for data: 206337500\nI0817 16:27:03.468792 17345 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:27:03.468802 17345 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:27:03.468807 17345 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:27:03.468816 17345 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:27:03.468823 17345 net.cpp:150] Setting up L1_b2_relu\nI0817 16:27:03.468830 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.468834 17345 net.cpp:165] Memory required for data: 214529500\nI0817 16:27:03.468839 17345 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:03.468847 17345 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:03.468852 17345 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:27:03.468858 17345 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:27:03.468868 17345 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:27:03.468909 17345 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:03.468921 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.468935 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.468940 17345 net.cpp:165] Memory required for data: 230913500\nI0817 16:27:03.468945 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:27:03.468958 17345 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:27:03.468964 17345 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:27:03.468973 17345 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:27:03.469274 17345 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:27:03.469287 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.469293 17345 net.cpp:165] Memory required for data: 239105500\nI0817 16:27:03.469301 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:27:03.469313 17345 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:27:03.469319 17345 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:27:03.469329 17345 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:27:03.469568 17345 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:27:03.469581 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.469586 17345 net.cpp:165] Memory required for data: 247297500\nI0817 16:27:03.469596 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:27:03.469605 17345 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:27:03.469611 17345 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:27:03.469619 17345 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.469676 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:27:03.469812 17345 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:27:03.469825 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.469830 17345 net.cpp:165] Memory required for data: 255489500\nI0817 16:27:03.469840 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:27:03.469847 17345 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:27:03.469852 17345 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:27:03.469863 17345 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.469872 17345 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:27:03.469879 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.469884 17345 net.cpp:165] Memory required for data: 263681500\nI0817 16:27:03.469888 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:27:03.469902 17345 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:27:03.469908 17345 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:27:03.469919 17345 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:27:03.470222 17345 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:27:03.470237 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.470242 17345 net.cpp:165] Memory required for data: 271873500\nI0817 16:27:03.470250 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:27:03.470264 17345 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:27:03.470270 17345 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:27:03.470281 17345 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:27:03.470516 17345 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:27:03.470530 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.470535 17345 net.cpp:165] Memory required for data: 280065500\nI0817 16:27:03.470546 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:27:03.470554 17345 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:27:03.470561 17345 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:27:03.470567 17345 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:27:03.470621 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:27:03.470754 17345 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:27:03.470767 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.470772 17345 net.cpp:165] Memory required for data: 288257500\nI0817 16:27:03.470788 17345 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:27:03.470800 17345 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:27:03.470806 17345 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:27:03.470813 17345 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:27:03.470820 17345 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:27:03.470852 17345 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:27:03.470865 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.470870 17345 net.cpp:165] Memory required for data: 296449500\nI0817 16:27:03.470875 17345 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:27:03.470882 17345 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:27:03.470887 17345 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:27:03.470897 17345 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:27:03.470906 17345 net.cpp:150] Setting up L1_b3_relu\nI0817 16:27:03.470913 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.470918 17345 net.cpp:165] Memory required for data: 304641500\nI0817 16:27:03.470922 17345 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:03.470929 17345 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:03.470934 17345 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:27:03.470947 17345 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:27:03.470957 17345 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:27:03.470998 17345 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:03.471009 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.471015 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.471020 17345 net.cpp:165] Memory required for data: 321025500\nI0817 16:27:03.471025 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:27:03.471040 17345 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:27:03.471046 17345 net.cpp:434] L2_b1_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:27:03.471055 17345 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:27:03.471362 17345 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:27:03.471376 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.471381 17345 net.cpp:165] Memory required for data: 323073500\nI0817 16:27:03.471390 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:27:03.471402 17345 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:27:03.471408 17345 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:27:03.471416 17345 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:27:03.471652 17345 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:27:03.471668 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.471673 17345 net.cpp:165] Memory required for data: 325121500\nI0817 16:27:03.471683 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:27:03.471691 17345 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:27:03.471698 17345 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:27:03.471704 17345 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.471760 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:27:03.471900 17345 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:27:03.471916 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.471921 17345 net.cpp:165] Memory required for data: 327169500\nI0817 16:27:03.471930 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:27:03.471938 17345 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:27:03.471943 17345 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:27:03.471951 17345 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.471959 17345 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:27:03.471973 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.471978 17345 net.cpp:165] Memory required for data: 329217500\nI0817 16:27:03.471983 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:27:03.471997 17345 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:27:03.472002 17345 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:27:03.472013 17345 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:27:03.472321 17345 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:27:03.472335 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.472340 17345 net.cpp:165] Memory required for data: 331265500\nI0817 16:27:03.472348 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:27:03.472360 17345 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:27:03.472367 17345 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:27:03.472376 17345 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:27:03.472622 17345 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:27:03.472637 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.472642 17345 net.cpp:165] Memory required for data: 333313500\nI0817 16:27:03.472653 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:27:03.472661 17345 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:27:03.472667 17345 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:27:03.472677 17345 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:27:03.472731 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:27:03.472893 17345 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:27:03.472908 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.472913 17345 net.cpp:165] Memory required for data: 335361500\nI0817 16:27:03.472921 17345 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:27:03.472931 17345 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:27:03.472937 17345 net.cpp:434] L2_b1_pool <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:27:03.472949 17345 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:27:03.473031 17345 net.cpp:150] Setting up L2_b1_pool\nI0817 16:27:03.473047 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.473052 17345 net.cpp:165] Memory required for data: 337409500\nI0817 16:27:03.473057 17345 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:27:03.473073 17345 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:27:03.473078 17345 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:27:03.473085 17345 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:27:03.473093 17345 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:27:03.473129 17345 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:27:03.473139 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.473142 17345 net.cpp:165] Memory required for data: 339457500\nI0817 16:27:03.473147 17345 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:27:03.473155 17345 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:27:03.473160 17345 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:27:03.473167 17345 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:27:03.473176 17345 net.cpp:150] Setting up L2_b1_relu\nI0817 16:27:03.473183 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.473187 17345 net.cpp:165] Memory required for data: 341505500\nI0817 16:27:03.473192 17345 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:27:03.473237 17345 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:27:03.473253 17345 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:27:03.475541 17345 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:27:03.475560 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.475566 17345 net.cpp:165] Memory required for data: 343553500\nI0817 16:27:03.475571 17345 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:27:03.475585 17345 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:27:03.475600 17345 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:27:03.475607 17345 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:27:03.475615 17345 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:27:03.475693 17345 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:27:03.475708 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.475714 17345 net.cpp:165] Memory required for data: 347649500\nI0817 16:27:03.475719 17345 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:03.475728 17345 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:03.475733 17345 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:27:03.475744 17345 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:27:03.475755 17345 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:27:03.475800 17345 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:03.475814 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.475821 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.475826 17345 net.cpp:165] Memory required for data: 355841500\nI0817 16:27:03.475831 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:27:03.475845 17345 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:27:03.475852 17345 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:27:03.475860 17345 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:27:03.477290 17345 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:27:03.477310 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.477315 17345 net.cpp:165] Memory required for data: 359937500\nI0817 16:27:03.477339 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:27:03.477349 17345 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:27:03.477355 17345 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:27:03.477367 17345 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:27:03.477608 17345 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:27:03.477622 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.477627 17345 net.cpp:165] Memory required for data: 364033500\nI0817 16:27:03.477638 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:27:03.477648 17345 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:27:03.477653 17345 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:27:03.477664 17345 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.477715 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:27:03.477855 17345 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:27:03.477869 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.477874 17345 net.cpp:165] Memory required for data: 368129500\nI0817 16:27:03.477882 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:27:03.477890 17345 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:27:03.477896 17345 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:27:03.477906 17345 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.477916 17345 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:27:03.477923 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.477927 17345 net.cpp:165] Memory required for data: 372225500\nI0817 16:27:03.477932 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:27:03.477946 17345 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:27:03.477953 17345 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:27:03.477962 17345 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:27:03.478406 17345 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:27:03.478420 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.478425 17345 net.cpp:165] Memory required for data: 376321500\nI0817 16:27:03.478435 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:27:03.478454 17345 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:27:03.478461 17345 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:27:03.478469 17345 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:27:03.478709 17345 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:27:03.478724 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.478729 17345 net.cpp:165] Memory required for data: 380417500\nI0817 16:27:03.478739 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:27:03.478747 17345 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:27:03.478754 17345 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:27:03.478765 17345 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:27:03.478816 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:27:03.478957 17345 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:27:03.478971 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.478976 17345 net.cpp:165] Memory required for data: 384513500\nI0817 16:27:03.478984 17345 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:27:03.478993 17345 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:27:03.478998 17345 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:27:03.479007 17345 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:27:03.479017 17345 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:27:03.479043 17345 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:27:03.479051 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.479056 17345 net.cpp:165] Memory required for data: 388609500\nI0817 16:27:03.479061 17345 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:27:03.479068 17345 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:27:03.479077 17345 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:27:03.479084 17345 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:27:03.479094 17345 net.cpp:150] Setting up L2_b2_relu\nI0817 16:27:03.479100 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.479105 17345 net.cpp:165] Memory required for data: 392705500\nI0817 16:27:03.479110 17345 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:03.479116 17345 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:03.479121 17345 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:27:03.479131 17345 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:27:03.479142 17345 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:27:03.479182 17345 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:03.479194 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.479202 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.479205 17345 net.cpp:165] Memory required for data: 400897500\nI0817 16:27:03.479212 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:27:03.479226 17345 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:27:03.479233 17345 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:27:03.479241 17345 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:27:03.479701 17345 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:27:03.479715 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.479720 17345 net.cpp:165] Memory required for data: 404993500\nI0817 16:27:03.479729 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:27:03.479742 17345 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:27:03.479748 17345 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:27:03.479755 17345 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:27:03.479988 17345 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:27:03.480008 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.480015 17345 net.cpp:165] Memory required for data: 409089500\nI0817 16:27:03.480024 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:27:03.480033 17345 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:27:03.480039 17345 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:27:03.480049 17345 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.480108 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:27:03.480250 17345 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:27:03.480263 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.480268 17345 net.cpp:165] Memory required for data: 413185500\nI0817 16:27:03.480278 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:27:03.480285 17345 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:27:03.480290 17345 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:27:03.480301 17345 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.480310 17345 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:27:03.480317 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.480321 17345 net.cpp:165] Memory required for data: 417281500\nI0817 16:27:03.480326 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:27:03.480345 17345 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:27:03.480352 17345 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:27:03.480363 17345 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:27:03.480821 17345 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:27:03.480836 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.480841 17345 net.cpp:165] Memory required for data: 421377500\nI0817 16:27:03.480850 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:27:03.480860 17345 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:27:03.480866 17345 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:27:03.480880 17345 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:27:03.481112 17345 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:27:03.481125 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.481132 17345 net.cpp:165] Memory required for data: 425473500\nI0817 16:27:03.481142 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:27:03.481153 17345 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:27:03.481159 17345 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:27:03.481166 17345 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:27:03.481220 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:27:03.481359 17345 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:27:03.481371 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.481376 17345 net.cpp:165] Memory required for data: 429569500\nI0817 16:27:03.481385 17345 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:27:03.481398 17345 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:27:03.481405 17345 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:27:03.481411 17345 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:27:03.481420 17345 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:27:03.481448 17345 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:27:03.481458 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.481462 17345 net.cpp:165] Memory required for data: 433665500\nI0817 16:27:03.481468 17345 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:27:03.481475 17345 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:27:03.481487 17345 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:27:03.481498 17345 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:27:03.481508 17345 net.cpp:150] Setting up L2_b3_relu\nI0817 16:27:03.481515 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.481528 17345 net.cpp:165] Memory required for data: 437761500\nI0817 16:27:03.481534 17345 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:03.481541 17345 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:03.481546 17345 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:27:03.481554 17345 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:27:03.481564 17345 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:27:03.481608 17345 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:03.481621 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.481627 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.481631 17345 net.cpp:165] Memory required for data: 445953500\nI0817 16:27:03.481637 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:27:03.481648 17345 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:27:03.481653 17345 net.cpp:434] L3_b1_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:27:03.481665 17345 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:27:03.482110 17345 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:27:03.482125 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.482130 17345 net.cpp:165] Memory required for data: 446977500\nI0817 16:27:03.482138 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:27:03.482147 17345 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:27:03.482152 17345 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:27:03.482164 17345 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:27:03.482404 17345 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:27:03.482420 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.482425 17345 net.cpp:165] Memory required for data: 448001500\nI0817 16:27:03.482436 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:27:03.482445 17345 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:27:03.482450 17345 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:27:03.482458 17345 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.482517 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:27:03.482663 17345 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:27:03.482677 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.482682 17345 net.cpp:165] Memory required for data: 449025500\nI0817 16:27:03.482692 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:27:03.482702 17345 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:27:03.482708 17345 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:27:03.482715 17345 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.482725 17345 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:27:03.482733 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.482736 17345 net.cpp:165] Memory required for data: 450049500\nI0817 16:27:03.482741 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:27:03.482755 17345 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:27:03.482761 17345 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:27:03.482770 17345 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:27:03.483220 17345 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:27:03.483234 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.483239 17345 net.cpp:165] Memory required for data: 451073500\nI0817 16:27:03.483247 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:27:03.483259 17345 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:27:03.483265 17345 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:27:03.483276 17345 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:27:03.483528 17345 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:27:03.483541 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.483553 17345 net.cpp:165] Memory required for data: 452097500\nI0817 16:27:03.483564 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:27:03.483572 17345 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:27:03.483578 17345 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:27:03.483589 17345 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:27:03.483642 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:27:03.483790 17345 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:27:03.483804 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.483809 17345 net.cpp:165] Memory required for data: 453121500\nI0817 16:27:03.483819 17345 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:27:03.483827 17345 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:27:03.483834 17345 net.cpp:434] L3_b1_pool <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:27:03.483844 17345 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:27:03.483880 17345 net.cpp:150] Setting up L3_b1_pool\nI0817 16:27:03.483891 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.483896 17345 net.cpp:165] Memory required for data: 454145500\nI0817 16:27:03.483901 17345 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:27:03.483909 17345 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:27:03.483914 17345 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:27:03.483922 17345 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:27:03.483932 17345 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:27:03.483961 17345 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:27:03.483970 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.483975 17345 net.cpp:165] Memory required for data: 455169500\nI0817 16:27:03.483980 17345 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:27:03.483989 17345 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:27:03.483994 17345 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:27:03.484000 17345 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:27:03.484009 17345 net.cpp:150] Setting up L3_b1_relu\nI0817 16:27:03.484016 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.484020 17345 net.cpp:165] Memory required for data: 456193500\nI0817 16:27:03.484025 17345 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:27:03.484036 17345 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:27:03.484045 17345 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:27:03.485251 17345 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:27:03.485270 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.485275 17345 net.cpp:165] Memory required for data: 457217500\nI0817 16:27:03.485280 17345 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:27:03.485291 17345 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:27:03.485296 17345 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:27:03.485303 17345 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:27:03.485314 17345 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:27:03.485353 17345 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:27:03.485368 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.485373 17345 net.cpp:165] Memory required for data: 459265500\nI0817 16:27:03.485378 17345 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:03.485385 17345 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:03.485391 17345 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:27:03.485399 17345 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:27:03.485410 17345 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:27:03.485456 17345 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:03.485468 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.485491 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.485496 17345 net.cpp:165] Memory required for data: 463361500\nI0817 16:27:03.485501 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:27:03.485517 17345 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:27:03.485523 17345 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:27:03.485532 17345 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:27:03.487490 17345 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:27:03.487507 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.487514 17345 net.cpp:165] Memory required for data: 465409500\nI0817 16:27:03.487522 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:27:03.487535 17345 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:27:03.487542 17345 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:27:03.487551 17345 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:27:03.487807 17345 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:27:03.487820 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.487825 17345 net.cpp:165] Memory required for data: 467457500\nI0817 16:27:03.487836 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:27:03.487844 17345 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:27:03.487850 17345 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:27:03.487861 17345 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.487918 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:27:03.488068 17345 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:27:03.488081 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.488086 17345 net.cpp:165] Memory required for data: 469505500\nI0817 16:27:03.488095 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:27:03.488106 17345 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:27:03.488112 17345 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:27:03.488121 17345 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.488129 17345 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:27:03.488137 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.488142 17345 net.cpp:165] Memory required for data: 471553500\nI0817 16:27:03.488145 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:27:03.488160 17345 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:27:03.488167 17345 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:27:03.488178 17345 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:27:03.489181 17345 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:27:03.489195 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.489200 17345 net.cpp:165] Memory required for data: 473601500\nI0817 16:27:03.489209 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:27:03.489219 17345 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:27:03.489225 17345 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:27:03.489236 17345 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:27:03.489490 17345 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:27:03.489506 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.489512 17345 net.cpp:165] Memory required for data: 475649500\nI0817 16:27:03.489522 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:27:03.489531 17345 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:27:03.489537 17345 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:27:03.489545 17345 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:27:03.489600 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:27:03.489748 17345 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:27:03.489761 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.489766 17345 net.cpp:165] Memory required for data: 477697500\nI0817 16:27:03.489775 17345 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:27:03.489794 17345 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:27:03.489801 17345 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:27:03.489809 17345 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:27:03.489816 17345 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:27:03.489851 17345 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:27:03.489863 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.489868 17345 net.cpp:165] Memory required for data: 479745500\nI0817 16:27:03.489873 17345 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:27:03.489881 17345 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:27:03.489886 17345 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:27:03.489893 17345 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:27:03.489902 17345 net.cpp:150] Setting up L3_b2_relu\nI0817 16:27:03.489909 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.489913 17345 net.cpp:165] Memory required for data: 481793500\nI0817 16:27:03.489918 17345 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:03.489925 17345 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:03.489930 17345 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:27:03.489941 17345 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:27:03.489953 17345 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:27:03.489995 17345 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:03.490007 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.490013 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.490018 17345 net.cpp:165] Memory required for data: 485889500\nI0817 16:27:03.490023 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:27:03.490036 17345 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:27:03.490043 17345 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:27:03.490052 17345 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:27:03.491063 17345 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:27:03.491078 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.491084 17345 net.cpp:165] Memory required for data: 487937500\nI0817 16:27:03.491093 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:27:03.491106 17345 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:27:03.491111 17345 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:27:03.491119 17345 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:27:03.491372 17345 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:27:03.491385 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.491390 17345 net.cpp:165] Memory required for data: 489985500\nI0817 16:27:03.491400 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:27:03.491412 17345 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:27:03.491420 17345 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:27:03.491426 17345 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.491490 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:27:03.491641 17345 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:27:03.491653 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.491658 17345 net.cpp:165] Memory required for data: 492033500\nI0817 16:27:03.491667 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:27:03.491678 17345 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:27:03.491684 17345 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:27:03.491691 17345 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.491701 17345 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:27:03.491708 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.491719 17345 net.cpp:165] Memory required for data: 494081500\nI0817 16:27:03.491724 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:27:03.491739 17345 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:27:03.491744 17345 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:27:03.491755 17345 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:27:03.492760 17345 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:27:03.492775 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.492780 17345 net.cpp:165] Memory required for data: 496129500\nI0817 16:27:03.492789 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:27:03.492799 17345 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:27:03.492805 17345 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:27:03.492820 17345 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:27:03.493073 17345 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:27:03.493086 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.493091 17345 net.cpp:165] Memory required for data: 498177500\nI0817 16:27:03.493122 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:27:03.493132 17345 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:27:03.493139 17345 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:27:03.493146 17345 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:27:03.493203 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:27:03.493357 17345 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:27:03.493369 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.493376 17345 net.cpp:165] Memory required for data: 500225500\nI0817 16:27:03.493384 17345 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:27:03.493393 17345 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:27:03.493399 17345 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:27:03.493407 17345 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:27:03.493413 17345 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:27:03.493445 17345 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:27:03.493454 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.493459 17345 net.cpp:165] Memory required for data: 502273500\nI0817 16:27:03.493464 17345 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:27:03.493475 17345 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:27:03.493487 17345 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:27:03.493499 17345 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:27:03.493508 17345 net.cpp:150] Setting up L3_b3_relu\nI0817 16:27:03.493515 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.493520 17345 net.cpp:165] Memory required for data: 504321500\nI0817 16:27:03.493525 17345 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:27:03.493533 17345 net.cpp:100] Creating Layer post_pool\nI0817 16:27:03.493538 17345 net.cpp:434] post_pool <- L3_b3_sum_eltwise_top\nI0817 16:27:03.493546 17345 net.cpp:408] post_pool -> post_pool\nI0817 16:27:03.493582 17345 net.cpp:150] Setting up post_pool\nI0817 16:27:03.493595 17345 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:27:03.493599 17345 net.cpp:165] Memory required for data: 504353500\nI0817 16:27:03.493605 17345 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:27:03.493681 17345 net.cpp:100] Creating Layer post_FC\nI0817 16:27:03.493693 17345 net.cpp:434] post_FC <- post_pool\nI0817 16:27:03.493703 17345 net.cpp:408] post_FC -> post_FC_top\nI0817 16:27:03.493929 17345 net.cpp:150] Setting up post_FC\nI0817 16:27:03.493945 17345 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:03.493950 17345 net.cpp:165] Memory required for data: 504358500\nI0817 16:27:03.493959 17345 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:27:03.493968 17345 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:27:03.493981 17345 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:27:03.493993 17345 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:27:03.494004 17345 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:27:03.494047 17345 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:27:03.494062 17345 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:03.494069 17345 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:03.494074 17345 net.cpp:165] Memory required for data: 504368500\nI0817 16:27:03.494079 17345 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:27:03.494120 17345 net.cpp:100] Creating Layer accuracy\nI0817 16:27:03.494132 17345 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:27:03.494139 17345 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:27:03.494148 17345 net.cpp:408] accuracy -> accuracy\nI0817 16:27:03.494189 17345 net.cpp:150] Setting up accuracy\nI0817 16:27:03.494201 17345 net.cpp:157] Top shape: (1)\nI0817 16:27:03.494207 17345 net.cpp:165] Memory required for data: 504368504\nI0817 16:27:03.494212 17345 layer_factory.hpp:77] Creating layer loss\nI0817 16:27:03.494220 17345 net.cpp:100] Creating Layer loss\nI0817 16:27:03.494226 17345 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:27:03.494233 17345 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:27:03.494244 17345 net.cpp:408] loss -> loss\nI0817 16:27:03.494289 17345 layer_factory.hpp:77] Creating layer loss\nI0817 16:27:03.494441 17345 net.cpp:150] Setting up loss\nI0817 16:27:03.494455 17345 net.cpp:157] Top shape: (1)\nI0817 16:27:03.494462 17345 net.cpp:160]     with loss weight 1\nI0817 16:27:03.494539 17345 net.cpp:165] Memory required for data: 504368508\nI0817 16:27:03.494549 17345 net.cpp:226] loss needs backward computation.\nI0817 16:27:03.494555 17345 net.cpp:228] accuracy does not need backward computation.\nI0817 16:27:03.494561 17345 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:27:03.494566 17345 net.cpp:226] post_FC needs backward computation.\nI0817 16:27:03.494571 17345 net.cpp:226] post_pool needs backward computation.\nI0817 16:27:03.494576 17345 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:27:03.494581 17345 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:27:03.494586 17345 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:27:03.494591 17345 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:27:03.494596 17345 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:27:03.494601 17345 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:27:03.494606 17345 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:27:03.494611 17345 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:27:03.494616 17345 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:27:03.494621 17345 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:27:03.494626 17345 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:27:03.494630 17345 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:27:03.494635 17345 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:27:03.494640 17345 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:27:03.494645 17345 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:27:03.494650 17345 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:27:03.494655 17345 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:27:03.494660 17345 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:27:03.494665 17345 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:27:03.494670 17345 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:27:03.494675 17345 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:27:03.494681 17345 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:27:03.494693 17345 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:27:03.494699 17345 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:27:03.494704 17345 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:27:03.494709 17345 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:27:03.494714 17345 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:27:03.494719 17345 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:27:03.494724 17345 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:27:03.494729 17345 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:27:03.494735 17345 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:27:03.494740 17345 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:27:03.494745 17345 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:27:03.494753 17345 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:27:03.494758 17345 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:27:03.494765 17345 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:27:03.494770 17345 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:27:03.494774 17345 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:27:03.494779 17345 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:27:03.494784 17345 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:27:03.494789 17345 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:27:03.494793 17345 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:27:03.494799 17345 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:27:03.494804 17345 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:27:03.494809 17345 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:27:03.494815 17345 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:27:03.494820 17345 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:27:03.494825 17345 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:27:03.494830 17345 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:27:03.494835 17345 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:27:03.494840 17345 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:27:03.494845 17345 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:27:03.494850 17345 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:27:03.494855 17345 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:27:03.494861 17345 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:27:03.494866 17345 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:27:03.494871 17345 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:27:03.494876 17345 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:27:03.494881 17345 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:27:03.494886 17345 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:27:03.494892 17345 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:27:03.494897 17345 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:27:03.494902 17345 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:27:03.494906 17345 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:27:03.494912 17345 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:27:03.494917 17345 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:27:03.494922 17345 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:27:03.494927 17345 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:27:03.494932 17345 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:27:03.494942 17345 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:27:03.494948 17345 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:27:03.494953 17345 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:27:03.494958 17345 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:27:03.494963 17345 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:27:03.494968 17345 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:27:03.494974 17345 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:27:03.494979 17345 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:27:03.494984 17345 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:27:03.494989 17345 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:27:03.494994 17345 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:27:03.495000 17345 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:27:03.495005 17345 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:27:03.495012 17345 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:27:03.495018 17345 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:27:03.495023 17345 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:27:03.495028 17345 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:27:03.495034 17345 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:27:03.495039 17345 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:27:03.495044 17345 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:27:03.495050 17345 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:27:03.495055 17345 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:27:03.495060 17345 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:27:03.495065 17345 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:27:03.495070 17345 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:27:03.495075 17345 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:27:03.495080 17345 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:27:03.495085 17345 net.cpp:226] pre_relu needs backward computation.\nI0817 16:27:03.495090 17345 net.cpp:226] pre_scale needs backward computation.\nI0817 16:27:03.495095 17345 net.cpp:226] pre_bn needs backward computation.\nI0817 16:27:03.495100 17345 net.cpp:226] pre_conv needs backward computation.\nI0817 16:27:03.495106 17345 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:27:03.495112 17345 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:27:03.495116 17345 net.cpp:270] This network produces output accuracy\nI0817 16:27:03.495123 17345 net.cpp:270] This network produces output loss\nI0817 16:27:03.495267 17345 net.cpp:283] Network initialization done.\nI0817 16:27:03.499585 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:03.499609 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:03.499657 17345 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:27:03.499814 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:27:03.499832 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:27:03.499842 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:27:03.499852 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:27:03.499861 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:27:03.499879 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:27:03.499888 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:27:03.499897 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:27:03.499907 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:27:03.499915 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:27:03.499927 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:27:03.499936 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:27:03.499945 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:27:03.499954 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:27:03.499963 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:27:03.499971 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:27:03.499982 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:27:03.499991 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:27:03.500000 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:27:03.500008 17345 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:27:03.500675 17345 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 32\n      dim: 8\n      dim: 8\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer {\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer {\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\nI0817 16:27:03.501247 17345 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:27:03.501467 17345 net.cpp:100] Creating Layer dataLayer\nI0817 16:27:03.501492 17345 net.cpp:408] dataLayer -> data_top\nI0817 16:27:03.501508 17345 net.cpp:408] dataLayer -> label\nI0817 16:27:03.501520 17345 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:27:03.511634 17352 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:27:03.511883 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:03.519763 17345 net.cpp:150] Setting up dataLayer\nI0817 16:27:03.519784 17345 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:27:03.519795 17345 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:03.519801 17345 net.cpp:165] Memory required for data: 1536500\nI0817 16:27:03.519807 17345 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:27:03.519820 17345 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:27:03.519826 17345 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:27:03.519834 17345 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:27:03.519846 17345 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:27:03.519932 17345 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:27:03.519948 17345 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:03.519956 17345 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:03.519961 17345 net.cpp:165] Memory required for data: 1537500\nI0817 16:27:03.519968 17345 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:27:03.519984 17345 net.cpp:100] Creating Layer pre_conv\nI0817 16:27:03.519992 17345 net.cpp:434] pre_conv <- data_top\nI0817 16:27:03.520005 17345 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:27:03.520494 17345 net.cpp:150] Setting up pre_conv\nI0817 16:27:03.520510 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.520524 17345 net.cpp:165] Memory required for data: 9729500\nI0817 16:27:03.520539 17345 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:27:03.520557 17345 net.cpp:100] Creating Layer pre_bn\nI0817 16:27:03.520565 17345 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:27:03.520576 17345 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:27:03.520866 17345 net.cpp:150] Setting up pre_bn\nI0817 16:27:03.520882 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.520887 17345 net.cpp:165] Memory required for data: 17921500\nI0817 16:27:03.520903 17345 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:27:03.520913 17345 net.cpp:100] Creating Layer pre_scale\nI0817 16:27:03.520920 17345 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:27:03.520931 17345 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:27:03.520997 17345 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:27:03.521209 17345 net.cpp:150] Setting up pre_scale\nI0817 16:27:03.521229 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.521236 17345 net.cpp:165] Memory required for data: 26113500\nI0817 16:27:03.521247 17345 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:27:03.521255 17345 net.cpp:100] Creating Layer pre_relu\nI0817 16:27:03.521261 17345 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:27:03.521268 17345 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:27:03.521278 17345 net.cpp:150] Setting up pre_relu\nI0817 16:27:03.521286 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.521289 17345 net.cpp:165] Memory required for data: 34305500\nI0817 16:27:03.521294 17345 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:27:03.521304 17345 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:27:03.521309 17345 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:27:03.521317 17345 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:27:03.521329 17345 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:27:03.521427 17345 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:27:03.521456 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.521466 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.521471 17345 net.cpp:165] Memory required for data: 50689500\nI0817 16:27:03.521476 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:27:03.521497 17345 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:27:03.521508 17345 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:27:03.521523 17345 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:27:03.521965 17345 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:27:03.521981 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.521986 17345 net.cpp:165] Memory required for data: 58881500\nI0817 16:27:03.521998 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:27:03.522012 17345 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:27:03.522018 17345 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:27:03.522032 17345 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:27:03.522385 17345 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:27:03.522403 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.522408 17345 net.cpp:165] Memory required for data: 67073500\nI0817 16:27:03.522418 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:27:03.522430 17345 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:27:03.522436 17345 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:27:03.522444 17345 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.522552 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:27:03.522902 17345 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:27:03.522919 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.522925 17345 net.cpp:165] Memory required for data: 75265500\nI0817 16:27:03.522934 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:27:03.522950 17345 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:27:03.522956 17345 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:27:03.522967 17345 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.522980 17345 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:27:03.522989 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.522992 17345 net.cpp:165] Memory required for data: 83457500\nI0817 16:27:03.522997 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:27:03.523018 17345 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:27:03.523025 17345 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:27:03.523035 17345 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:27:03.523439 17345 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:27:03.523454 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.523463 17345 net.cpp:165] Memory required for data: 91649500\nI0817 16:27:03.523473 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:27:03.523491 17345 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:27:03.523499 17345 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:27:03.523506 17345 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:27:03.523810 17345 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:27:03.523825 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.523830 17345 net.cpp:165] Memory required for data: 99841500\nI0817 16:27:03.523849 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:27:03.523862 17345 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:27:03.523869 17345 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:27:03.523876 17345 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:27:03.523944 17345 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:27:03.524118 17345 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:27:03.524159 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.524164 17345 net.cpp:165] Memory required for data: 108033500\nI0817 16:27:03.524173 17345 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:27:03.524183 17345 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:27:03.524188 17345 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:27:03.524195 17345 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:27:03.524209 17345 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:27:03.524250 17345 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:27:03.524265 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.524269 17345 net.cpp:165] Memory required for data: 116225500\nI0817 16:27:03.524277 17345 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:27:03.524286 17345 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:27:03.524291 17345 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:27:03.524299 17345 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:27:03.524308 17345 net.cpp:150] Setting up L1_b1_relu\nI0817 16:27:03.524315 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.524319 17345 net.cpp:165] Memory required for data: 124417500\nI0817 16:27:03.524324 17345 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:03.524340 17345 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:03.524350 17345 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:27:03.524358 17345 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:27:03.524368 17345 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:27:03.524425 17345 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:03.524440 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.524456 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.524461 17345 net.cpp:165] Memory required for data: 140801500\nI0817 16:27:03.524466 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:27:03.524482 17345 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:27:03.524489 17345 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:27:03.524507 17345 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:27:03.524917 17345 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:27:03.524935 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.524940 17345 net.cpp:165] Memory required for data: 148993500\nI0817 16:27:03.524952 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:27:03.524962 17345 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:27:03.524967 17345 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:27:03.524976 17345 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:27:03.525283 17345 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:27:03.525298 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.525303 17345 net.cpp:165] Memory required for data: 157185500\nI0817 16:27:03.525315 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:27:03.525329 17345 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:27:03.525336 17345 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:27:03.525344 17345 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.525408 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:27:03.525604 17345 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:27:03.525620 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.525626 17345 net.cpp:165] Memory required for data: 165377500\nI0817 16:27:03.525635 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:27:03.525643 17345 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:27:03.525650 17345 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:27:03.525663 17345 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.525674 17345 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:27:03.525681 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.525686 17345 net.cpp:165] Memory required for data: 173569500\nI0817 16:27:03.525691 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:27:03.525704 17345 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:27:03.525710 17345 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:27:03.525732 17345 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:27:03.526317 17345 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:27:03.526331 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.526336 17345 net.cpp:165] Memory required for data: 181761500\nI0817 16:27:03.526348 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:27:03.526358 17345 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:27:03.526365 17345 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:27:03.526372 17345 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:27:03.526684 17345 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:27:03.526698 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.526705 17345 net.cpp:165] Memory required for data: 189953500\nI0817 16:27:03.526732 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:27:03.526746 17345 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:27:03.526752 17345 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:27:03.526760 17345 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:27:03.526829 17345 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:27:03.527032 17345 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:27:03.527047 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.527052 17345 net.cpp:165] Memory required for data: 198145500\nI0817 16:27:03.527065 17345 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:27:03.527086 17345 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:27:03.527092 17345 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:27:03.527103 17345 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:27:03.527112 17345 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:27:03.527150 17345 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:27:03.527163 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.527168 17345 net.cpp:165] Memory required for data: 206337500\nI0817 16:27:03.527173 17345 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:27:03.527181 17345 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:27:03.527186 17345 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:27:03.527201 17345 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:27:03.527216 17345 net.cpp:150] Setting up L1_b2_relu\nI0817 16:27:03.527225 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.527228 17345 net.cpp:165] Memory required for data: 214529500\nI0817 16:27:03.527235 17345 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:03.527241 17345 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:03.527249 17345 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:27:03.527257 17345 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:27:03.527266 17345 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:27:03.527320 17345 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:03.527331 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.527338 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.527343 17345 net.cpp:165] Memory required for data: 230913500\nI0817 16:27:03.527349 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:27:03.527364 17345 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:27:03.527370 17345 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:27:03.527381 17345 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:27:03.527786 17345 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:27:03.527802 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.527807 17345 net.cpp:165] Memory required for data: 239105500\nI0817 16:27:03.527817 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:27:03.527829 17345 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:27:03.527837 17345 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:27:03.527848 17345 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:27:03.528154 17345 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:27:03.528172 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.528178 17345 net.cpp:165] Memory required for data: 247297500\nI0817 16:27:03.528189 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:27:03.528198 17345 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:27:03.528203 17345 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:27:03.528214 17345 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.528283 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:27:03.528460 17345 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:27:03.528473 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.528483 17345 net.cpp:165] Memory required for data: 255489500\nI0817 16:27:03.528493 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:27:03.528506 17345 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:27:03.528514 17345 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:27:03.528523 17345 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.528537 17345 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:27:03.528553 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.528560 17345 net.cpp:165] Memory required for data: 263681500\nI0817 16:27:03.528564 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:27:03.528576 17345 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:27:03.528584 17345 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:27:03.528597 17345 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:27:03.529011 17345 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:27:03.529027 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.529033 17345 net.cpp:165] Memory required for data: 271873500\nI0817 16:27:03.529042 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:27:03.529058 17345 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:27:03.529065 17345 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:27:03.529109 17345 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:27:03.529480 17345 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:27:03.529502 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.529511 17345 net.cpp:165] Memory required for data: 280065500\nI0817 16:27:03.529523 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:27:03.529532 17345 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:27:03.529538 17345 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:27:03.529547 17345 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:27:03.529615 17345 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:27:03.529803 17345 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:27:03.529816 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.529821 17345 net.cpp:165] Memory required for data: 288257500\nI0817 16:27:03.529831 17345 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:27:03.529845 17345 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:27:03.529855 17345 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:27:03.529862 17345 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:27:03.529870 17345 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:27:03.529911 17345 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:27:03.529923 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.529929 17345 net.cpp:165] Memory required for data: 296449500\nI0817 16:27:03.529934 17345 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:27:03.529942 17345 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:27:03.529947 17345 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:27:03.529960 17345 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:27:03.529974 17345 net.cpp:150] Setting up L1_b3_relu\nI0817 16:27:03.529981 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.529985 17345 net.cpp:165] Memory required for data: 304641500\nI0817 16:27:03.529990 17345 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:03.529997 17345 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:03.530004 17345 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:27:03.530010 17345 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:27:03.530019 17345 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:27:03.530076 17345 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:03.530087 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.530093 17345 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:03.530100 17345 net.cpp:165] Memory required for data: 321025500\nI0817 16:27:03.530107 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:27:03.530120 17345 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:27:03.530128 17345 net.cpp:434] L2_b1_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:27:03.530143 17345 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:27:03.530532 17345 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:27:03.530547 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.530556 17345 net.cpp:165] Memory required for data: 323073500\nI0817 16:27:03.530567 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:27:03.530580 17345 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:27:03.530588 17345 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:27:03.530598 17345 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:27:03.530866 17345 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:27:03.530884 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.530889 17345 net.cpp:165] Memory required for data: 325121500\nI0817 16:27:03.530899 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:27:03.530907 17345 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:27:03.530912 17345 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:27:03.530921 17345 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.530977 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:27:03.531148 17345 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:27:03.531163 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.531168 17345 net.cpp:165] Memory required for data: 327169500\nI0817 16:27:03.531177 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:27:03.531188 17345 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:27:03.531193 17345 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:27:03.531203 17345 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.531214 17345 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:27:03.531221 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.531225 17345 net.cpp:165] Memory required for data: 329217500\nI0817 16:27:03.531231 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:27:03.531241 17345 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:27:03.531247 17345 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:27:03.531258 17345 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:27:03.531599 17345 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:27:03.531613 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.531620 17345 net.cpp:165] Memory required for data: 331265500\nI0817 16:27:03.531628 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:27:03.531637 17345 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:27:03.531643 17345 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:27:03.531654 17345 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:27:03.531926 17345 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:27:03.531940 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.531945 17345 net.cpp:165] Memory required for data: 333313500\nI0817 16:27:03.531957 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:27:03.531967 17345 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:27:03.531972 17345 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:27:03.531980 17345 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:27:03.532037 17345 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:27:03.532191 17345 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:27:03.532203 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.532208 17345 net.cpp:165] Memory required for data: 335361500\nI0817 16:27:03.532217 17345 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:27:03.532227 17345 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:27:03.532232 17345 net.cpp:434] L2_b1_pool <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:27:03.532243 17345 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:27:03.532272 17345 net.cpp:150] Setting up L2_b1_pool\nI0817 16:27:03.532289 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.532294 17345 net.cpp:165] Memory required for data: 337409500\nI0817 16:27:03.532299 17345 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:27:03.532310 17345 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:27:03.532316 17345 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:27:03.532323 17345 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:27:03.532330 17345 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:27:03.532363 17345 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:27:03.532372 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.532377 17345 net.cpp:165] Memory required for data: 339457500\nI0817 16:27:03.532382 17345 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:27:03.532389 17345 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:27:03.532394 17345 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:27:03.532405 17345 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:27:03.532414 17345 net.cpp:150] Setting up L2_b1_relu\nI0817 16:27:03.532421 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.532425 17345 net.cpp:165] Memory required for data: 341505500\nI0817 16:27:03.532430 17345 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:27:03.532438 17345 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:27:03.532446 17345 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:27:03.534664 17345 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:27:03.534684 17345 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:03.534689 17345 net.cpp:165] Memory required for data: 343553500\nI0817 16:27:03.534695 17345 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:27:03.534708 17345 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:27:03.534714 17345 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:27:03.534721 17345 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:27:03.534729 17345 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:27:03.534773 17345 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:27:03.534786 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.534790 17345 net.cpp:165] Memory required for data: 347649500\nI0817 16:27:03.534795 17345 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:03.534803 17345 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:03.534808 17345 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:27:03.534819 17345 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:27:03.534829 17345 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:27:03.534876 17345 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:03.534891 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.534898 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.534904 17345 net.cpp:165] Memory required for data: 355841500\nI0817 16:27:03.534909 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:27:03.534919 17345 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:27:03.534925 17345 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:27:03.534934 17345 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:27:03.535413 17345 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:27:03.535430 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.535436 17345 net.cpp:165] Memory required for data: 359937500\nI0817 16:27:03.535459 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:27:03.535468 17345 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:27:03.535475 17345 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:27:03.535516 17345 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:27:03.535784 17345 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:27:03.535797 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.535809 17345 net.cpp:165] Memory required for data: 364033500\nI0817 16:27:03.535821 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:27:03.535830 17345 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:27:03.535836 17345 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:27:03.535847 17345 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.535900 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:27:03.536056 17345 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:27:03.536067 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.536072 17345 net.cpp:165] Memory required for data: 368129500\nI0817 16:27:03.536082 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:27:03.536089 17345 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:27:03.536095 17345 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:27:03.536105 17345 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.536115 17345 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:27:03.536123 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.536128 17345 net.cpp:165] Memory required for data: 372225500\nI0817 16:27:03.536131 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:27:03.536145 17345 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:27:03.536151 17345 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:27:03.536160 17345 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:27:03.536645 17345 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:27:03.536661 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.536666 17345 net.cpp:165] Memory required for data: 376321500\nI0817 16:27:03.536675 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:27:03.536687 17345 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:27:03.536695 17345 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:27:03.536702 17345 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:27:03.536953 17345 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:27:03.536967 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.536972 17345 net.cpp:165] Memory required for data: 380417500\nI0817 16:27:03.536981 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:27:03.536990 17345 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:27:03.536996 17345 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:27:03.537003 17345 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:27:03.537062 17345 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:27:03.537237 17345 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:27:03.537255 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.537261 17345 net.cpp:165] Memory required for data: 384513500\nI0817 16:27:03.537268 17345 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:27:03.537277 17345 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:27:03.537283 17345 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:27:03.537291 17345 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:27:03.537302 17345 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:27:03.537331 17345 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:27:03.537343 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.537348 17345 net.cpp:165] Memory required for data: 388609500\nI0817 16:27:03.537353 17345 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:27:03.537360 17345 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:27:03.537365 17345 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:27:03.537376 17345 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:27:03.537385 17345 net.cpp:150] Setting up L2_b2_relu\nI0817 16:27:03.537392 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.537396 17345 net.cpp:165] Memory required for data: 392705500\nI0817 16:27:03.537408 17345 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:03.537415 17345 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:03.537421 17345 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:27:03.537431 17345 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:27:03.537441 17345 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:27:03.537492 17345 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:03.537503 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.537509 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.537514 17345 net.cpp:165] Memory required for data: 400897500\nI0817 16:27:03.537519 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:27:03.537533 17345 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:27:03.537539 17345 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:27:03.537549 17345 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:27:03.538027 17345 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:27:03.538041 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.538046 17345 net.cpp:165] Memory required for data: 404993500\nI0817 16:27:03.538054 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:27:03.538066 17345 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:27:03.538072 17345 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:27:03.538080 17345 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:27:03.538338 17345 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:27:03.538352 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.538357 17345 net.cpp:165] Memory required for data: 409089500\nI0817 16:27:03.538367 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:27:03.538375 17345 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:27:03.538381 17345 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:27:03.538393 17345 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.538449 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:27:03.538607 17345 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:27:03.538622 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.538627 17345 net.cpp:165] Memory required for data: 413185500\nI0817 16:27:03.538636 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:27:03.538645 17345 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:27:03.538650 17345 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:27:03.538661 17345 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.538671 17345 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:27:03.538676 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.538681 17345 net.cpp:165] Memory required for data: 417281500\nI0817 16:27:03.538686 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:27:03.538704 17345 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:27:03.538710 17345 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:27:03.538723 17345 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:27:03.539191 17345 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:27:03.539206 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.539211 17345 net.cpp:165] Memory required for data: 421377500\nI0817 16:27:03.539221 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:27:03.539228 17345 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:27:03.539234 17345 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:27:03.539273 17345 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:27:03.539541 17345 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:27:03.539556 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.539569 17345 net.cpp:165] Memory required for data: 425473500\nI0817 16:27:03.539580 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:27:03.539592 17345 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:27:03.539598 17345 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:27:03.539607 17345 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:27:03.539664 17345 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:27:03.539815 17345 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:27:03.539829 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.539834 17345 net.cpp:165] Memory required for data: 429569500\nI0817 16:27:03.539844 17345 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:27:03.539855 17345 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:27:03.539860 17345 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:27:03.539867 17345 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:27:03.539875 17345 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:27:03.539906 17345 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:27:03.539916 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.539921 17345 net.cpp:165] Memory required for data: 433665500\nI0817 16:27:03.539924 17345 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:27:03.539932 17345 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:27:03.539937 17345 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:27:03.539944 17345 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:27:03.539953 17345 net.cpp:150] Setting up L2_b3_relu\nI0817 16:27:03.539960 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.539965 17345 net.cpp:165] Memory required for data: 437761500\nI0817 16:27:03.539969 17345 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:03.539979 17345 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:03.539984 17345 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:27:03.539993 17345 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:27:03.540001 17345 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:27:03.540050 17345 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:03.540062 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.540069 17345 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:03.540073 17345 net.cpp:165] Memory required for data: 445953500\nI0817 16:27:03.540078 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:27:03.540089 17345 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:27:03.540096 17345 net.cpp:434] L3_b1_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:27:03.540107 17345 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:27:03.540598 17345 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:27:03.540613 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.540618 17345 net.cpp:165] Memory required for data: 446977500\nI0817 16:27:03.540627 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:27:03.540637 17345 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:27:03.540642 17345 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:27:03.540657 17345 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:27:03.540920 17345 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:27:03.540936 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.540941 17345 net.cpp:165] Memory required for data: 448001500\nI0817 16:27:03.540952 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:27:03.540961 17345 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:27:03.540966 17345 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:27:03.540982 17345 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.541039 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:27:03.541204 17345 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:27:03.541218 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.541223 17345 net.cpp:165] Memory required for data: 449025500\nI0817 16:27:03.541231 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:27:03.541242 17345 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:27:03.541249 17345 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:27:03.541255 17345 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:27:03.541265 17345 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:27:03.541272 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.541276 17345 net.cpp:165] Memory required for data: 450049500\nI0817 16:27:03.541281 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:27:03.541296 17345 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:27:03.541301 17345 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:27:03.541309 17345 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:27:03.541797 17345 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:27:03.541812 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.541817 17345 net.cpp:165] Memory required for data: 451073500\nI0817 16:27:03.541826 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:27:03.541837 17345 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:27:03.541844 17345 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:27:03.541854 17345 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:27:03.542119 17345 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:27:03.542132 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.542137 17345 net.cpp:165] Memory required for data: 452097500\nI0817 16:27:03.542147 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:27:03.542155 17345 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:27:03.542161 17345 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:27:03.542171 17345 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:27:03.542229 17345 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:27:03.542390 17345 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:27:03.542404 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.542409 17345 net.cpp:165] Memory required for data: 453121500\nI0817 16:27:03.542418 17345 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:27:03.542426 17345 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:27:03.542433 17345 net.cpp:434] L3_b1_pool <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:27:03.542444 17345 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:27:03.542487 17345 net.cpp:150] Setting up L3_b1_pool\nI0817 16:27:03.542500 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.542505 17345 net.cpp:165] Memory required for data: 454145500\nI0817 16:27:03.542510 17345 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:27:03.542520 17345 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:27:03.542524 17345 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:27:03.542531 17345 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:27:03.542542 17345 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:27:03.542575 17345 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:27:03.542587 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.542593 17345 net.cpp:165] Memory required for data: 455169500\nI0817 16:27:03.542598 17345 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:27:03.542604 17345 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:27:03.542610 17345 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:27:03.542618 17345 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:27:03.542626 17345 net.cpp:150] Setting up L3_b1_relu\nI0817 16:27:03.542641 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.542646 17345 net.cpp:165] Memory required for data: 456193500\nI0817 16:27:03.542651 17345 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:27:03.542662 17345 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:27:03.542670 17345 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:27:03.543912 17345 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:27:03.543931 17345 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:03.543936 17345 net.cpp:165] Memory required for data: 457217500\nI0817 16:27:03.543942 17345 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:27:03.543951 17345 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:27:03.543957 17345 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:27:03.543964 17345 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:27:03.543975 17345 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:27:03.544018 17345 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:27:03.544034 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.544039 17345 net.cpp:165] Memory required for data: 459265500\nI0817 16:27:03.544044 17345 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:03.544051 17345 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:03.544057 17345 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:27:03.544064 17345 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:27:03.544077 17345 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:27:03.544126 17345 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:03.544138 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.544145 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.544149 17345 net.cpp:165] Memory required for data: 463361500\nI0817 16:27:03.544154 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:27:03.544168 17345 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:27:03.544175 17345 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:27:03.544184 17345 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:27:03.545223 17345 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:27:03.545238 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.545244 17345 net.cpp:165] Memory required for data: 465409500\nI0817 16:27:03.545253 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:27:03.545264 17345 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:27:03.545271 17345 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:27:03.545280 17345 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:27:03.545560 17345 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:27:03.545578 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.545583 17345 net.cpp:165] Memory required for data: 467457500\nI0817 16:27:03.545593 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:27:03.545605 17345 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:27:03.545613 17345 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:27:03.545619 17345 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.545680 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:27:03.545838 17345 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:27:03.545853 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.545858 17345 net.cpp:165] Memory required for data: 469505500\nI0817 16:27:03.545867 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:27:03.545876 17345 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:27:03.545881 17345 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:27:03.545889 17345 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:27:03.545898 17345 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:27:03.545913 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.545919 17345 net.cpp:165] Memory required for data: 471553500\nI0817 16:27:03.545923 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:27:03.545938 17345 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:27:03.545944 17345 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:27:03.545953 17345 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:27:03.547003 17345 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:27:03.547019 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.547024 17345 net.cpp:165] Memory required for data: 473601500\nI0817 16:27:03.547032 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:27:03.547044 17345 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:27:03.547050 17345 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:27:03.547061 17345 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:27:03.547327 17345 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:27:03.547339 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.547344 17345 net.cpp:165] Memory required for data: 475649500\nI0817 16:27:03.547354 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:27:03.547363 17345 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:27:03.547369 17345 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:27:03.547379 17345 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:27:03.547438 17345 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:27:03.547608 17345 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:27:03.547622 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.547627 17345 net.cpp:165] Memory required for data: 477697500\nI0817 16:27:03.547636 17345 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:27:03.547648 17345 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:27:03.547655 17345 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:27:03.547662 17345 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:27:03.547669 17345 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:27:03.547706 17345 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:27:03.547719 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.547724 17345 net.cpp:165] Memory required for data: 479745500\nI0817 16:27:03.547729 17345 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:27:03.547735 17345 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:27:03.547741 17345 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:27:03.547751 17345 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:27:03.547760 17345 net.cpp:150] Setting up L3_b2_relu\nI0817 16:27:03.547767 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.547772 17345 net.cpp:165] Memory required for data: 481793500\nI0817 16:27:03.547776 17345 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:03.547783 17345 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:03.547788 17345 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:27:03.547796 17345 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:27:03.547806 17345 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:27:03.547854 17345 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:03.547866 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.547873 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.547878 17345 net.cpp:165] Memory required for data: 485889500\nI0817 16:27:03.547881 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:27:03.547893 17345 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:27:03.547899 17345 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:27:03.547917 17345 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:27:03.548954 17345 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:27:03.548969 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.548974 17345 net.cpp:165] Memory required for data: 487937500\nI0817 16:27:03.548984 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:27:03.548995 17345 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:27:03.549001 17345 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:27:03.549010 17345 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:27:03.549280 17345 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:27:03.549294 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.549299 17345 net.cpp:165] Memory required for data: 489985500\nI0817 16:27:03.549309 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:27:03.549317 17345 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:27:03.549324 17345 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:27:03.549331 17345 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.549391 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:27:03.549554 17345 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:27:03.549572 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.549577 17345 net.cpp:165] Memory required for data: 492033500\nI0817 16:27:03.549587 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:27:03.549594 17345 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:27:03.549600 17345 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:27:03.549607 17345 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:27:03.549618 17345 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:27:03.549624 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.549628 17345 net.cpp:165] Memory required for data: 494081500\nI0817 16:27:03.549633 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:27:03.549648 17345 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:27:03.549652 17345 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:27:03.549661 17345 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:27:03.551679 17345 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:27:03.551697 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.551702 17345 net.cpp:165] Memory required for data: 496129500\nI0817 16:27:03.551712 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:27:03.551722 17345 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:27:03.551728 17345 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:27:03.551739 17345 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:27:03.552012 17345 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:27:03.552026 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.552031 17345 net.cpp:165] Memory required for data: 498177500\nI0817 16:27:03.552064 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:27:03.552074 17345 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:27:03.552080 17345 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:27:03.552091 17345 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:27:03.552150 17345 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:27:03.552309 17345 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:27:03.552322 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.552327 17345 net.cpp:165] Memory required for data: 500225500\nI0817 16:27:03.552336 17345 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:27:03.552346 17345 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:27:03.552352 17345 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:27:03.552359 17345 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:27:03.552367 17345 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:27:03.552410 17345 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:27:03.552420 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.552425 17345 net.cpp:165] Memory required for data: 502273500\nI0817 16:27:03.552430 17345 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:27:03.552440 17345 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:27:03.552446 17345 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:27:03.552456 17345 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:27:03.552465 17345 net.cpp:150] Setting up L3_b3_relu\nI0817 16:27:03.552472 17345 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:03.552482 17345 net.cpp:165] Memory required for data: 504321500\nI0817 16:27:03.552489 17345 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:27:03.552497 17345 net.cpp:100] Creating Layer post_pool\nI0817 16:27:03.552502 17345 net.cpp:434] post_pool <- L3_b3_sum_eltwise_top\nI0817 16:27:03.552513 17345 net.cpp:408] post_pool -> post_pool\nI0817 16:27:03.552548 17345 net.cpp:150] Setting up post_pool\nI0817 16:27:03.552564 17345 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:27:03.552569 17345 net.cpp:165] Memory required for data: 504353500\nI0817 16:27:03.552574 17345 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:27:03.552584 17345 net.cpp:100] Creating Layer post_FC\nI0817 16:27:03.552590 17345 net.cpp:434] post_FC <- post_pool\nI0817 16:27:03.552598 17345 net.cpp:408] post_FC -> post_FC_top\nI0817 16:27:03.552758 17345 net.cpp:150] Setting up post_FC\nI0817 16:27:03.552772 17345 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:03.552776 17345 net.cpp:165] Memory required for data: 504358500\nI0817 16:27:03.552785 17345 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:27:03.552793 17345 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:27:03.552799 17345 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:27:03.552811 17345 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:27:03.552821 17345 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:27:03.552868 17345 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:27:03.552883 17345 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:03.552891 17345 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:03.552894 17345 net.cpp:165] Memory required for data: 504368500\nI0817 16:27:03.552899 17345 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:27:03.552907 17345 net.cpp:100] Creating Layer accuracy\nI0817 16:27:03.552913 17345 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:27:03.552920 17345 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:27:03.552928 17345 net.cpp:408] accuracy -> accuracy\nI0817 16:27:03.552940 17345 net.cpp:150] Setting up accuracy\nI0817 16:27:03.552947 17345 net.cpp:157] Top shape: (1)\nI0817 16:27:03.552952 17345 net.cpp:165] Memory required for data: 504368504\nI0817 16:27:03.552956 17345 layer_factory.hpp:77] Creating layer loss\nI0817 16:27:03.552964 17345 net.cpp:100] Creating Layer loss\nI0817 16:27:03.552969 17345 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:27:03.552975 17345 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:27:03.552985 17345 net.cpp:408] loss -> loss\nI0817 16:27:03.552997 17345 layer_factory.hpp:77] Creating layer loss\nI0817 16:27:03.553115 17345 net.cpp:150] Setting up loss\nI0817 16:27:03.553128 17345 net.cpp:157] Top shape: (1)\nI0817 16:27:03.553133 17345 net.cpp:160]     with loss weight 1\nI0817 16:27:03.553146 17345 net.cpp:165] Memory required for data: 504368508\nI0817 16:27:03.553153 17345 net.cpp:226] loss needs backward computation.\nI0817 16:27:03.553158 17345 net.cpp:228] accuracy does not need backward computation.\nI0817 16:27:03.553164 17345 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:27:03.553169 17345 net.cpp:226] post_FC needs backward computation.\nI0817 16:27:03.553174 17345 net.cpp:226] post_pool needs backward computation.\nI0817 16:27:03.553179 17345 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:27:03.553191 17345 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:27:03.553197 17345 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:27:03.553202 17345 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:27:03.553207 17345 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:27:03.553211 17345 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:27:03.553216 17345 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:27:03.553221 17345 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:27:03.553226 17345 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:27:03.553231 17345 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:27:03.553236 17345 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:27:03.553241 17345 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:27:03.553246 17345 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:27:03.553251 17345 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:27:03.553256 17345 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:27:03.553261 17345 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:27:03.553267 17345 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:27:03.553272 17345 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:27:03.553277 17345 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:27:03.553282 17345 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:27:03.553287 17345 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:27:03.553292 17345 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:27:03.553297 17345 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:27:03.553302 17345 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:27:03.553308 17345 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:27:03.553313 17345 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:27:03.553316 17345 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:27:03.553323 17345 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:27:03.553330 17345 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:27:03.553335 17345 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:27:03.553340 17345 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:27:03.553346 17345 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:27:03.553351 17345 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:27:03.553356 17345 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:27:03.553361 17345 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:27:03.553367 17345 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:27:03.553371 17345 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:27:03.553377 17345 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:27:03.553382 17345 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:27:03.553386 17345 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:27:03.553391 17345 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:27:03.553396 17345 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:27:03.553401 17345 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:27:03.553406 17345 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:27:03.553411 17345 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:27:03.553417 17345 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:27:03.553422 17345 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:27:03.553427 17345 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:27:03.553437 17345 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:27:03.553443 17345 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:27:03.553448 17345 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:27:03.553453 17345 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:27:03.553458 17345 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:27:03.553463 17345 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:27:03.553470 17345 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:27:03.553474 17345 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:27:03.553488 17345 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:27:03.553493 17345 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:27:03.553499 17345 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:27:03.553504 17345 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:27:03.553509 17345 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:27:03.553515 17345 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:27:03.553520 17345 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:27:03.553525 17345 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:27:03.553530 17345 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:27:03.553535 17345 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:27:03.553541 17345 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:27:03.553546 17345 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:27:03.553551 17345 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:27:03.553556 17345 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:27:03.553562 17345 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:27:03.553567 17345 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:27:03.553572 17345 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:27:03.553577 17345 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:27:03.553582 17345 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:27:03.553587 17345 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:27:03.553593 17345 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:27:03.553598 17345 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:27:03.553603 17345 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:27:03.553612 17345 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:27:03.553618 17345 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:27:03.553623 17345 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:27:03.553628 17345 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:27:03.553633 17345 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:27:03.553639 17345 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:27:03.553644 17345 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:27:03.553649 17345 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:27:03.553655 17345 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:27:03.553660 17345 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:27:03.553666 17345 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:27:03.553671 17345 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:27:03.553676 17345 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:27:03.553683 17345 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:27:03.553688 17345 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:27:03.553692 17345 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:27:03.553705 17345 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:27:03.553711 17345 net.cpp:226] pre_relu needs backward computation.\nI0817 16:27:03.553716 17345 net.cpp:226] pre_scale needs backward computation.\nI0817 16:27:03.553721 17345 net.cpp:226] pre_bn needs backward computation.\nI0817 16:27:03.553727 17345 net.cpp:226] pre_conv needs backward computation.\nI0817 16:27:03.553733 17345 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:27:03.553740 17345 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:27:03.553745 17345 net.cpp:270] This network produces output accuracy\nI0817 16:27:03.553750 17345 net.cpp:270] This network produces output loss\nI0817 16:27:03.553864 17345 net.cpp:283] Network initialization done.\nI0817 16:27:03.554179 17345 solver.cpp:60] Solver scaffolding done.\nI0817 16:27:03.765177 17345 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:27:04.059396 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:04.059458 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:04.065623 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:04.260330 17345 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:04.260437 17345 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:04.271961 17345 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:04.272066 17345 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:04.611120 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:04.611162 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:04.617539 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:04.825798 17345 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:04.825897 17345 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:04.842347 17345 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:04.842444 17345 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:05.195267 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:05.195307 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:05.202306 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:05.423298 17345 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:05.423419 17345 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:05.445930 17345 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:05.446045 17345 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:05.466024 17345 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:27:05.813159 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:05.813221 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:05.821296 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:06.042021 17345 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:06.042192 17345 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:06.071064 17345 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:06.071230 17345 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:06.488272 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:06.488355 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:06.497457 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:06.737263 17345 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:06.737457 17345 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:06.773067 17345 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:06.773252 17345 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:07.212282 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:07.212323 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:07.222168 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:07.465695 17345 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:07.465881 17345 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:07.507828 17345 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:07.508009 17345 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:07.964396 17345 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:07.964435 17345 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:07.974912 17345 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:08.026531 17371 blocking_queue.cpp:50] Waiting for data\nI0817 16:27:08.081861 17371 blocking_queue.cpp:50] Waiting for data\nI0817 16:27:08.309856 17345 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:08.310117 17345 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:08.359241 17345 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:08.359467 17345 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:08.404621 17345 parallel.cpp:425] Starting Optimization\nI0817 16:27:08.405860 17345 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:27:08.405879 17345 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:27:08.408310 17345 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:27:35.043149 17345 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:27:35.043395 17345 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:27:36.484261 17345 solver.cpp:228] Iteration 0, loss = 2.62119\nI0817 16:27:36.484303 17345 solver.cpp:244]     Train net output #0: accuracy = 0.104\nI0817 16:27:36.484329 17345 solver.cpp:244]     Train net output #1: loss = 2.62119 (* 1 = 2.62119 loss)\nI0817 16:27:36.575520 17345 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0817 16:28:23.591013 17345 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:28:50.074995 17345 solver.cpp:404]     Test net output #0: accuracy = 0.35296\nI0817 16:28:50.075050 17345 solver.cpp:404]     Test net output #1: loss = 2.32112 (* 1 = 2.32112 loss)\nI0817 16:28:50.497474 17345 solver.cpp:228] Iteration 100, loss = 1.04593\nI0817 16:28:50.497514 17345 solver.cpp:244]     Train net output #0: accuracy = 0.632\nI0817 16:28:50.497539 17345 solver.cpp:244]     Train net output #1: loss = 1.04593 (* 1 = 1.04593 loss)\nI0817 16:28:50.575708 17345 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0817 16:29:37.499243 17345 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:30:04.166803 17345 solver.cpp:404]     Test net output #0: accuracy = 0.61772\nI0817 16:30:04.166862 17345 solver.cpp:404]     Test net output #1: loss = 1.17459 (* 1 = 1.17459 loss)\nI0817 16:30:04.593287 17345 solver.cpp:228] Iteration 200, loss = 0.700095\nI0817 16:30:04.593333 17345 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 16:30:04.593358 17345 solver.cpp:244]     Train net output #1: loss = 0.700095 (* 1 = 0.700095 loss)\nI0817 16:30:04.672219 17345 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0817 16:30:51.620731 17345 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:31:18.287822 17345 solver.cpp:404]     Test net output #0: accuracy = 0.6794\nI0817 16:31:18.287874 17345 solver.cpp:404]     Test net output #1: loss = 0.965092 (* 1 = 0.965092 loss)\nI0817 16:31:18.714032 17345 solver.cpp:228] Iteration 300, loss = 0.618393\nI0817 16:31:18.714082 17345 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 16:31:18.714107 17345 solver.cpp:244]     Train net output #1: loss = 0.618393 (* 1 = 0.618393 loss)\nI0817 16:31:18.783635 17345 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0817 16:32:05.709496 17345 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:32:32.372766 17345 solver.cpp:404]     Test net output #0: accuracy = 0.74732\nI0817 16:32:32.372822 17345 solver.cpp:404]     Test net output #1: loss = 0.757991 (* 1 = 0.757991 loss)\nI0817 16:32:32.799055 17345 solver.cpp:228] Iteration 400, loss = 0.57813\nI0817 16:32:32.799100 17345 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 16:32:32.799124 17345 solver.cpp:244]     Train net output #1: loss = 0.57813 (* 1 = 0.57813 loss)\nI0817 16:32:32.877929 17345 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0817 16:33:19.839668 17345 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:33:46.499034 17345 solver.cpp:404]     Test net output #0: accuracy = 0.71252\nI0817 16:33:46.499086 17345 solver.cpp:404]     Test net output #1: loss = 0.87127 (* 1 = 0.87127 loss)\nI0817 16:33:46.925354 17345 solver.cpp:228] Iteration 500, loss = 0.510887\nI0817 16:33:46.925400 17345 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 16:33:46.925424 17345 solver.cpp:244]     Train net output #1: loss = 0.510887 (* 1 = 0.510887 loss)\nI0817 16:33:46.997162 17345 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0817 16:34:33.956357 17345 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:35:00.623590 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7596\nI0817 16:35:00.623646 17345 solver.cpp:404]     Test net output #1: loss = 0.708917 (* 1 = 0.708917 loss)\nI0817 16:35:01.049937 17345 solver.cpp:228] Iteration 600, loss = 0.467104\nI0817 16:35:01.049983 17345 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 16:35:01.050009 17345 solver.cpp:244]     Train net output #1: loss = 0.467104 (* 1 = 0.467104 loss)\nI0817 16:35:01.117740 17345 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0817 16:35:48.063331 17345 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:36:14.733832 17345 solver.cpp:404]     Test net output #0: accuracy = 0.71072\nI0817 16:36:14.733887 17345 solver.cpp:404]     Test net output #1: loss = 1.00071 (* 1 = 1.00071 loss)\nI0817 16:36:15.159217 17345 solver.cpp:228] Iteration 700, loss = 0.413413\nI0817 16:36:15.159265 17345 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 16:36:15.159291 17345 solver.cpp:244]     Train net output #1: loss = 0.413413 (* 1 = 0.413413 loss)\nI0817 16:36:15.235831 17345 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0817 16:37:02.150718 17345 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:37:28.820292 17345 solver.cpp:404]     Test net output #0: accuracy = 0.74312\nI0817 16:37:28.820343 17345 solver.cpp:404]     Test net output #1: loss = 0.75587 (* 1 = 0.75587 loss)\nI0817 16:37:29.246007 17345 solver.cpp:228] Iteration 800, loss = 0.306705\nI0817 16:37:29.246052 17345 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 16:37:29.246076 17345 solver.cpp:244]     Train net output #1: loss = 0.306705 (* 1 = 0.306705 loss)\nI0817 16:37:29.320210 17345 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0817 16:38:16.243719 17345 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:38:42.915258 17345 solver.cpp:404]     Test net output #0: accuracy = 0.78856\nI0817 16:38:42.915295 17345 solver.cpp:404]     Test net output #1: loss = 0.66217 (* 1 = 0.66217 loss)\nI0817 16:38:43.341313 17345 solver.cpp:228] Iteration 900, loss = 0.308061\nI0817 16:38:43.341357 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 16:38:43.341372 17345 solver.cpp:244]     Train net output #1: loss = 0.308061 (* 1 = 0.308061 loss)\nI0817 16:38:43.411756 17345 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0817 16:39:30.318084 17345 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:39:56.987720 17345 solver.cpp:404]     Test net output #0: accuracy = 0.78776\nI0817 16:39:56.987757 17345 solver.cpp:404]     Test net output #1: loss = 0.661987 (* 1 = 0.661987 loss)\nI0817 16:39:57.413590 17345 solver.cpp:228] Iteration 1000, loss = 0.244667\nI0817 16:39:57.413635 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 16:39:57.413650 17345 solver.cpp:244]     Train net output #1: loss = 0.244667 (* 1 = 0.244667 loss)\nI0817 16:39:57.482190 17345 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0817 16:40:44.359033 17345 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:41:11.028462 17345 solver.cpp:404]     Test net output #0: accuracy = 0.66516\nI0817 16:41:11.028501 17345 solver.cpp:404]     Test net output #1: loss = 1.29038 (* 1 = 1.29038 loss)\nI0817 16:41:11.453092 17345 solver.cpp:228] Iteration 1100, loss = 0.32434\nI0817 16:41:11.453135 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 16:41:11.453150 17345 solver.cpp:244]     Train net output #1: loss = 0.32434 (* 1 = 0.32434 loss)\nI0817 16:41:11.524878 17345 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0817 16:41:58.415292 17345 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:42:25.085024 17345 solver.cpp:404]     Test net output #0: accuracy = 0.73084\nI0817 16:42:25.085073 17345 solver.cpp:404]     Test net output #1: loss = 0.988709 (* 1 = 0.988709 loss)\nI0817 16:42:25.510774 17345 solver.cpp:228] Iteration 1200, loss = 0.34402\nI0817 16:42:25.510818 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 16:42:25.510834 17345 solver.cpp:244]     Train net output #1: loss = 0.34402 (* 1 = 0.34402 loss)\nI0817 16:42:25.582535 17345 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0817 16:43:12.514816 17345 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 16:43:39.182946 17345 solver.cpp:404]     Test net output #0: accuracy = 0.67356\nI0817 16:43:39.182986 17345 solver.cpp:404]     Test net output #1: loss = 1.20147 (* 1 = 1.20147 loss)\nI0817 16:43:39.608361 17345 solver.cpp:228] Iteration 1300, loss = 0.335209\nI0817 16:43:39.608405 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 16:43:39.608422 17345 solver.cpp:244]     Train net output #1: loss = 0.335209 (* 1 = 0.335209 loss)\nI0817 16:43:39.676165 17345 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0817 16:44:26.553925 17345 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 16:44:53.220235 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7562\nI0817 16:44:53.220273 17345 solver.cpp:404]     Test net output #1: loss = 0.8605 (* 1 = 0.8605 loss)\nI0817 16:44:53.646046 17345 solver.cpp:228] Iteration 1400, loss = 0.329017\nI0817 16:44:53.646090 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 16:44:53.646106 17345 solver.cpp:244]     Train net output #1: loss = 0.329017 (* 1 = 0.329017 loss)\nI0817 16:44:53.714320 17345 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0817 16:45:40.633689 17345 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 16:46:07.298945 17345 solver.cpp:404]     Test net output #0: accuracy = 0.81012\nI0817 16:46:07.298982 17345 solver.cpp:404]     Test net output #1: loss = 0.645555 (* 1 = 0.645555 loss)\nI0817 16:46:07.724617 17345 solver.cpp:228] Iteration 1500, loss = 0.238714\nI0817 16:46:07.724663 17345 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 16:46:07.724678 17345 solver.cpp:244]     Train net output #1: loss = 0.238714 (* 1 = 0.238714 loss)\nI0817 16:46:07.790338 17345 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0817 16:46:54.628921 17345 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 16:47:21.298512 17345 solver.cpp:404]     Test net output #0: accuracy = 0.76\nI0817 16:47:21.298560 17345 solver.cpp:404]     Test net output #1: loss = 0.802024 (* 1 = 0.802024 loss)\nI0817 16:47:21.723258 17345 solver.cpp:228] Iteration 1600, loss = 0.224606\nI0817 16:47:21.723301 17345 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 16:47:21.723317 17345 solver.cpp:244]     Train net output #1: loss = 0.224606 (* 1 = 0.224606 loss)\nI0817 16:47:21.796522 17345 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0817 16:48:08.616952 17345 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 16:48:35.283996 17345 solver.cpp:404]     Test net output #0: accuracy = 0.77648\nI0817 16:48:35.284034 17345 solver.cpp:404]     Test net output #1: loss = 0.757646 (* 1 = 0.757646 loss)\nI0817 16:48:35.709432 17345 solver.cpp:228] Iteration 1700, loss = 0.288376\nI0817 16:48:35.709476 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 16:48:35.709492 17345 solver.cpp:244]     Train net output #1: loss = 0.288376 (* 1 = 0.288376 loss)\nI0817 16:48:35.786478 17345 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0817 16:49:22.671950 17345 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 16:49:49.341614 17345 solver.cpp:404]     Test net output #0: accuracy = 0.65052\nI0817 16:49:49.341650 17345 solver.cpp:404]     Test net output #1: loss = 1.51776 (* 1 = 1.51776 loss)\nI0817 16:49:49.766419 17345 solver.cpp:228] Iteration 1800, loss = 0.23846\nI0817 16:49:49.766463 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 16:49:49.766480 17345 solver.cpp:244]     Train net output #1: loss = 0.23846 (* 1 = 0.23846 loss)\nI0817 16:49:49.841083 17345 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0817 16:50:36.687170 17345 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 16:51:03.358291 17345 solver.cpp:404]     Test net output #0: accuracy = 0.79004\nI0817 16:51:03.358330 17345 solver.cpp:404]     Test net output #1: loss = 0.693062 (* 1 = 0.693062 loss)\nI0817 16:51:03.783504 17345 solver.cpp:228] Iteration 1900, loss = 0.235394\nI0817 16:51:03.783550 17345 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 16:51:03.783567 17345 solver.cpp:244]     Train net output #1: loss = 0.235394 (* 1 = 0.235394 loss)\nI0817 16:51:03.848374 17345 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0817 16:51:50.679261 17345 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 16:52:17.350214 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7664\nI0817 16:52:17.350263 17345 solver.cpp:404]     Test net output #1: loss = 0.773116 (* 1 = 0.773116 loss)\nI0817 16:52:17.775101 17345 solver.cpp:228] Iteration 2000, loss = 0.240901\nI0817 16:52:17.775143 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 16:52:17.775161 17345 solver.cpp:244]     Train net output #1: loss = 0.240901 (* 1 = 0.240901 loss)\nI0817 16:52:17.847643 17345 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0817 16:53:04.647660 17345 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 16:53:31.317525 17345 solver.cpp:404]     Test net output #0: accuracy = 0.69184\nI0817 16:53:31.317570 17345 solver.cpp:404]     Test net output #1: loss = 1.14189 (* 1 = 1.14189 loss)\nI0817 16:53:31.742528 17345 solver.cpp:228] Iteration 2100, loss = 0.26005\nI0817 16:53:31.742575 17345 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 16:53:31.742593 17345 solver.cpp:244]     Train net output #1: loss = 0.26005 (* 1 = 0.26005 loss)\nI0817 16:53:31.806730 17345 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0817 16:54:18.639001 17345 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 16:54:45.304136 17345 solver.cpp:404]     Test net output #0: accuracy = 0.73612\nI0817 16:54:45.304174 17345 solver.cpp:404]     Test net output #1: loss = 0.862564 (* 1 = 0.862564 loss)\nI0817 16:54:45.729146 17345 solver.cpp:228] Iteration 2200, loss = 0.218404\nI0817 16:54:45.729188 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 16:54:45.729204 17345 solver.cpp:244]     Train net output #1: loss = 0.218404 (* 1 = 0.218404 loss)\nI0817 16:54:45.799757 17345 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0817 16:55:32.637188 17345 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 16:55:59.302963 17345 solver.cpp:404]     Test net output #0: accuracy = 0.76948\nI0817 16:55:59.303001 17345 solver.cpp:404]     Test net output #1: loss = 0.773262 (* 1 = 0.773262 loss)\nI0817 16:55:59.727759 17345 solver.cpp:228] Iteration 2300, loss = 0.222406\nI0817 16:55:59.727804 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 16:55:59.727820 17345 solver.cpp:244]     Train net output #1: loss = 0.222406 (* 1 = 0.222406 loss)\nI0817 16:55:59.803500 17345 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0817 16:56:46.566401 17345 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 16:57:13.230448 17345 solver.cpp:404]     Test net output #0: accuracy = 0.71452\nI0817 16:57:13.230485 17345 solver.cpp:404]     Test net output #1: loss = 1.04585 (* 1 = 1.04585 loss)\nI0817 16:57:13.655427 17345 solver.cpp:228] Iteration 2400, loss = 0.268759\nI0817 16:57:13.655469 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 16:57:13.655485 17345 solver.cpp:244]     Train net output #1: loss = 0.268759 (* 1 = 0.268759 loss)\nI0817 16:57:13.730592 17345 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0817 16:58:00.458159 17345 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 16:58:27.123744 17345 solver.cpp:404]     Test net output #0: accuracy = 0.58244\nI0817 16:58:27.123781 17345 solver.cpp:404]     Test net output #1: loss = 2.23963 (* 1 = 2.23963 loss)\nI0817 16:58:27.548867 17345 solver.cpp:228] Iteration 2500, loss = 0.295989\nI0817 16:58:27.548909 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 16:58:27.548924 17345 solver.cpp:244]     Train net output #1: loss = 0.295989 (* 1 = 0.295989 loss)\nI0817 16:58:27.626070 17345 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0817 16:59:14.470376 17345 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 16:59:41.136353 17345 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI0817 16:59:41.136391 17345 solver.cpp:404]     Test net output #1: loss = 0.730106 (* 1 = 0.730106 loss)\nI0817 16:59:41.561517 17345 solver.cpp:228] Iteration 2600, loss = 0.194419\nI0817 16:59:41.561563 17345 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 16:59:41.561579 17345 solver.cpp:244]     Train net output #1: loss = 0.194419 (* 1 = 0.194419 loss)\nI0817 16:59:41.633260 17345 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0817 17:00:28.459072 17345 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:00:55.124943 17345 solver.cpp:404]     Test net output #0: accuracy = 0.76412\nI0817 17:00:55.124980 17345 solver.cpp:404]     Test net output #1: loss = 0.798946 (* 1 = 0.798946 loss)\nI0817 17:00:55.549532 17345 solver.cpp:228] Iteration 2700, loss = 0.207055\nI0817 17:00:55.549579 17345 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:00:55.549595 17345 solver.cpp:244]     Train net output #1: loss = 0.207055 (* 1 = 0.207055 loss)\nI0817 17:00:55.623129 17345 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0817 17:01:42.458364 17345 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:02:09.122412 17345 solver.cpp:404]     Test net output #0: accuracy = 0.73616\nI0817 17:02:09.122460 17345 solver.cpp:404]     Test net output #1: loss = 0.863977 (* 1 = 0.863977 loss)\nI0817 17:02:09.547775 17345 solver.cpp:228] Iteration 2800, loss = 0.211376\nI0817 17:02:09.547816 17345 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:02:09.547832 17345 solver.cpp:244]     Train net output #1: loss = 0.211376 (* 1 = 0.211376 loss)\nI0817 17:02:09.614953 17345 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0817 17:02:56.358008 17345 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:03:23.022454 17345 solver.cpp:404]     Test net output #0: accuracy = 0.71116\nI0817 17:03:23.022492 17345 solver.cpp:404]     Test net output #1: loss = 0.957644 (* 1 = 0.957644 loss)\nI0817 17:03:23.447757 17345 solver.cpp:228] Iteration 2900, loss = 0.3614\nI0817 17:03:23.447800 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:03:23.447815 17345 solver.cpp:244]     Train net output #1: loss = 0.3614 (* 1 = 0.3614 loss)\nI0817 17:03:23.513497 17345 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0817 17:04:10.340822 17345 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 17:04:37.003409 17345 solver.cpp:404]     Test net output #0: accuracy = 0.6724\nI0817 17:04:37.003448 17345 solver.cpp:404]     Test net output #1: loss = 1.3834 (* 1 = 1.3834 loss)\nI0817 17:04:37.428279 17345 solver.cpp:228] Iteration 3000, loss = 0.245417\nI0817 17:04:37.428321 17345 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:04:37.428338 17345 solver.cpp:244]     Train net output #1: loss = 0.245417 (* 1 = 0.245417 loss)\nI0817 17:04:37.502168 17345 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0817 17:05:24.255139 17345 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 17:05:50.922308 17345 solver.cpp:404]     Test net output #0: accuracy = 0.75388\nI0817 17:05:50.922348 17345 solver.cpp:404]     Test net output #1: loss = 0.764581 (* 1 = 0.764581 loss)\nI0817 17:05:51.347604 17345 solver.cpp:228] Iteration 3100, loss = 0.269998\nI0817 17:05:51.347645 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:05:51.347661 17345 solver.cpp:244]     Train net output #1: loss = 0.269998 (* 1 = 0.269998 loss)\nI0817 17:05:51.416803 17345 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0817 17:06:38.232697 17345 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 17:07:04.903548 17345 solver.cpp:404]     Test net output #0: accuracy = 0.75636\nI0817 17:07:04.903585 17345 solver.cpp:404]     Test net output #1: loss = 0.814005 (* 1 = 0.814005 loss)\nI0817 17:07:05.328418 17345 solver.cpp:228] Iteration 3200, loss = 0.192725\nI0817 17:07:05.328459 17345 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:07:05.328475 17345 solver.cpp:244]     Train net output #1: loss = 0.192725 (* 1 = 0.192725 loss)\nI0817 17:07:05.407562 17345 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0817 17:07:52.290789 17345 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 17:08:18.958199 17345 solver.cpp:404]     Test net output #0: accuracy = 0.73876\nI0817 17:08:18.958238 17345 solver.cpp:404]     Test net output #1: loss = 0.872181 (* 1 = 0.872181 loss)\nI0817 17:08:19.383025 17345 solver.cpp:228] Iteration 3300, loss = 0.245044\nI0817 17:08:19.383066 17345 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:08:19.383082 17345 solver.cpp:244]     Train net output #1: loss = 0.245044 (* 1 = 0.245044 loss)\nI0817 17:08:19.453227 17345 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0817 17:09:06.265064 17345 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 17:09:32.931005 17345 solver.cpp:404]     Test net output #0: accuracy = 0.75748\nI0817 17:09:32.931049 17345 solver.cpp:404]     Test net output #1: loss = 0.770408 (* 1 = 0.770408 loss)\nI0817 17:09:33.357117 17345 solver.cpp:228] Iteration 3400, loss = 0.264271\nI0817 17:09:33.357162 17345 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:09:33.357185 17345 solver.cpp:244]     Train net output #1: loss = 0.264271 (* 1 = 0.264271 loss)\nI0817 17:09:33.427520 17345 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0817 17:10:20.374132 17345 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 17:10:47.045127 17345 solver.cpp:404]     Test net output #0: accuracy = 0.6698\nI0817 17:10:47.045172 17345 solver.cpp:404]     Test net output #1: loss = 1.20767 (* 1 = 1.20767 loss)\nI0817 17:10:47.470168 17345 solver.cpp:228] Iteration 3500, loss = 0.31675\nI0817 17:10:47.470213 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:10:47.470237 17345 solver.cpp:244]     Train net output #1: loss = 0.31675 (* 1 = 0.31675 loss)\nI0817 17:10:47.540181 17345 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0817 17:11:34.373692 17345 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 17:12:01.046187 17345 solver.cpp:404]     Test net output #0: accuracy = 0.72904\nI0817 17:12:01.046236 17345 solver.cpp:404]     Test net output #1: loss = 0.926204 (* 1 = 0.926204 loss)\nI0817 17:12:01.473980 17345 solver.cpp:228] Iteration 3600, loss = 0.300603\nI0817 17:12:01.474025 17345 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:12:01.474050 17345 solver.cpp:244]     Train net output #1: loss = 0.300603 (* 1 = 0.300603 loss)\nI0817 17:12:01.545209 17345 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0817 17:12:48.407757 17345 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 17:13:15.081161 17345 solver.cpp:404]     Test net output #0: accuracy = 0.79504\nI0817 17:13:15.081197 17345 solver.cpp:404]     Test net output #1: loss = 0.631562 (* 1 = 0.631562 loss)\nI0817 17:13:15.506196 17345 solver.cpp:228] Iteration 3700, loss = 0.288276\nI0817 17:13:15.506238 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:13:15.506254 17345 solver.cpp:244]     Train net output #1: loss = 0.288276 (* 1 = 0.288276 loss)\nI0817 17:13:15.580426 17345 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0817 17:14:02.551295 17345 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 17:14:29.211254 17345 solver.cpp:404]     Test net output #0: accuracy = 0.69908\nI0817 17:14:29.211292 17345 solver.cpp:404]     Test net output #1: loss = 1.09383 (* 1 = 1.09383 loss)\nI0817 17:14:29.636124 17345 solver.cpp:228] Iteration 3800, loss = 0.357071\nI0817 17:14:29.636164 17345 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 17:14:29.636180 17345 solver.cpp:244]     Train net output #1: loss = 0.35707 (* 1 = 0.35707 loss)\nI0817 17:14:29.704401 17345 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0817 17:15:16.562258 17345 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 17:15:43.222509 17345 solver.cpp:404]     Test net output #0: accuracy = 0.70072\nI0817 17:15:43.222549 17345 solver.cpp:404]     Test net output #1: loss = 1.09551 (* 1 = 1.09551 loss)\nI0817 17:15:43.647384 17345 solver.cpp:228] Iteration 3900, loss = 0.220746\nI0817 17:15:43.647426 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:15:43.647442 17345 solver.cpp:244]     Train net output #1: loss = 0.220746 (* 1 = 0.220746 loss)\nI0817 17:15:43.718648 17345 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0817 17:16:30.586100 17345 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 17:16:57.243121 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7096\nI0817 17:16:57.243158 17345 solver.cpp:404]     Test net output #1: loss = 1.03051 (* 1 = 1.03051 loss)\nI0817 17:16:57.667945 17345 solver.cpp:228] Iteration 4000, loss = 0.344436\nI0817 17:16:57.667985 17345 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 17:16:57.668000 17345 solver.cpp:244]     Train net output #1: loss = 0.344436 (* 1 = 0.344436 loss)\nI0817 17:16:57.744027 17345 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0817 17:17:44.576448 17345 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 17:18:11.233867 17345 solver.cpp:404]     Test net output #0: accuracy = 0.67892\nI0817 17:18:11.233904 17345 solver.cpp:404]     Test net output #1: loss = 1.14601 (* 1 = 1.14601 loss)\nI0817 17:18:11.658594 17345 solver.cpp:228] Iteration 4100, loss = 0.276228\nI0817 17:18:11.658637 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:18:11.658653 17345 solver.cpp:244]     Train net output #1: loss = 0.276227 (* 1 = 0.276227 loss)\nI0817 17:18:11.728253 17345 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0817 17:18:58.524937 17345 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 17:19:25.183148 17345 solver.cpp:404]     Test net output #0: accuracy = 0.74536\nI0817 17:19:25.183187 17345 solver.cpp:404]     Test net output #1: loss = 0.833217 (* 1 = 0.833217 loss)\nI0817 17:19:25.608098 17345 solver.cpp:228] Iteration 4200, loss = 0.351429\nI0817 17:19:25.608139 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:19:25.608155 17345 solver.cpp:244]     Train net output #1: loss = 0.351429 (* 1 = 0.351429 loss)\nI0817 17:19:25.683079 17345 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0817 17:20:12.477708 17345 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 17:20:39.132987 17345 solver.cpp:404]     Test net output #0: accuracy = 0.67036\nI0817 17:20:39.133024 17345 solver.cpp:404]     Test net output #1: loss = 1.17787 (* 1 = 1.17787 loss)\nI0817 17:20:39.557701 17345 solver.cpp:228] Iteration 4300, loss = 0.417006\nI0817 17:20:39.557742 17345 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:20:39.557757 17345 solver.cpp:244]     Train net output #1: loss = 0.417006 (* 1 = 0.417006 loss)\nI0817 17:20:39.635057 17345 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0817 17:21:26.546408 17345 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 17:21:53.214265 17345 solver.cpp:404]     Test net output #0: accuracy = 0.6538\nI0817 17:21:53.214304 17345 solver.cpp:404]     Test net output #1: loss = 1.38567 (* 1 = 1.38567 loss)\nI0817 17:21:53.639106 17345 solver.cpp:228] Iteration 4400, loss = 0.307412\nI0817 17:21:53.639148 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:21:53.639164 17345 solver.cpp:244]     Train net output #1: loss = 0.307411 (* 1 = 0.307411 loss)\nI0817 17:21:53.706360 17345 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0817 17:22:40.550282 17345 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 17:23:07.222640 17345 solver.cpp:404]     Test net output #0: accuracy = 0.71296\nI0817 17:23:07.222676 17345 solver.cpp:404]     Test net output #1: loss = 0.937685 (* 1 = 0.937685 loss)\nI0817 17:23:07.647792 17345 solver.cpp:228] Iteration 4500, loss = 0.28151\nI0817 17:23:07.647835 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:23:07.647850 17345 solver.cpp:244]     Train net output #1: loss = 0.28151 (* 1 = 0.28151 loss)\nI0817 17:23:07.722653 17345 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0817 17:23:54.510097 17345 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 17:24:21.179663 17345 solver.cpp:404]     Test net output #0: accuracy = 0.65332\nI0817 17:24:21.179700 17345 solver.cpp:404]     Test net output #1: loss = 1.27424 (* 1 = 1.27424 loss)\nI0817 17:24:21.604717 17345 solver.cpp:228] Iteration 4600, loss = 0.332897\nI0817 17:24:21.604759 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:24:21.604774 17345 solver.cpp:244]     Train net output #1: loss = 0.332896 (* 1 = 0.332896 loss)\nI0817 17:24:21.674780 17345 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0817 17:25:08.482575 17345 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 17:25:35.150862 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7052\nI0817 17:25:35.150902 17345 solver.cpp:404]     Test net output #1: loss = 0.991549 (* 1 = 0.991549 loss)\nI0817 17:25:35.575584 17345 solver.cpp:228] Iteration 4700, loss = 0.350307\nI0817 17:25:35.575625 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:25:35.575641 17345 solver.cpp:244]     Train net output #1: loss = 0.350307 (* 1 = 0.350307 loss)\nI0817 17:25:35.648084 17345 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0817 17:26:22.488489 17345 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 17:26:49.157757 17345 solver.cpp:404]     Test net output #0: accuracy = 0.62848\nI0817 17:26:49.157800 17345 solver.cpp:404]     Test net output #1: loss = 1.37288 (* 1 = 1.37288 loss)\nI0817 17:26:49.583626 17345 solver.cpp:228] Iteration 4800, loss = 0.2794\nI0817 17:26:49.583673 17345 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:26:49.583696 17345 solver.cpp:244]     Train net output #1: loss = 0.2794 (* 1 = 0.2794 loss)\nI0817 17:26:49.662940 17345 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0817 17:27:36.569254 17345 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 17:28:03.236014 17345 solver.cpp:404]     Test net output #0: accuracy = 0.70168\nI0817 17:28:03.236058 17345 solver.cpp:404]     Test net output #1: loss = 1.04957 (* 1 = 1.04957 loss)\nI0817 17:28:03.661885 17345 solver.cpp:228] Iteration 4900, loss = 0.247757\nI0817 17:28:03.661929 17345 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:28:03.661953 17345 solver.cpp:244]     Train net output #1: loss = 0.247757 (* 1 = 0.247757 loss)\nI0817 17:28:03.740631 17345 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0817 17:28:50.643563 17345 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 17:29:17.312332 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7354\nI0817 17:29:17.312368 17345 solver.cpp:404]     Test net output #1: loss = 0.874691 (* 1 = 0.874691 loss)\nI0817 17:29:17.737259 17345 solver.cpp:228] Iteration 5000, loss = 0.280819\nI0817 17:29:17.737300 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:29:17.737316 17345 solver.cpp:244]     Train net output #1: loss = 0.280818 (* 1 = 0.280818 loss)\nI0817 17:29:17.812113 17345 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0817 17:30:04.565819 17345 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 17:30:31.236363 17345 solver.cpp:404]     Test net output #0: accuracy = 0.61468\nI0817 17:30:31.236400 17345 solver.cpp:404]     Test net output #1: loss = 1.39336 (* 1 = 1.39336 loss)\nI0817 17:30:31.661340 17345 solver.cpp:228] Iteration 5100, loss = 0.300906\nI0817 17:30:31.661381 17345 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:30:31.661397 17345 solver.cpp:244]     Train net output #1: loss = 0.300906 (* 1 = 0.300906 loss)\nI0817 17:30:31.734030 17345 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0817 17:31:18.536731 17345 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 17:31:45.194641 17345 solver.cpp:404]     Test net output #0: accuracy = 0.69008\nI0817 17:31:45.194680 17345 solver.cpp:404]     Test net output #1: loss = 1.29841 (* 1 = 1.29841 loss)\nI0817 17:31:45.619426 17345 solver.cpp:228] Iteration 5200, loss = 0.355377\nI0817 17:31:45.619467 17345 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:31:45.619482 17345 solver.cpp:244]     Train net output #1: loss = 0.355377 (* 1 = 0.355377 loss)\nI0817 17:31:45.688539 17345 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0817 17:32:32.402302 17345 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 17:32:59.060499 17345 solver.cpp:404]     Test net output #0: accuracy = 0.72008\nI0817 17:32:59.060537 17345 solver.cpp:404]     Test net output #1: loss = 0.871383 (* 1 = 0.871383 loss)\nI0817 17:32:59.485077 17345 solver.cpp:228] Iteration 5300, loss = 0.227915\nI0817 17:32:59.485116 17345 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:32:59.485131 17345 solver.cpp:244]     Train net output #1: loss = 0.227914 (* 1 = 0.227914 loss)\nI0817 17:32:59.559700 17345 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0817 17:33:46.448662 17345 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 17:34:13.107635 17345 solver.cpp:404]     Test net output #0: accuracy = 0.40336\nI0817 17:34:13.107671 17345 solver.cpp:404]     Test net output #1: loss = 2.95544 (* 1 = 2.95544 loss)\nI0817 17:34:13.532743 17345 solver.cpp:228] Iteration 5400, loss = 0.288796\nI0817 17:34:13.532783 17345 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:34:13.532799 17345 solver.cpp:244]     Train net output #1: loss = 0.288796 (* 1 = 0.288796 loss)\nI0817 17:34:13.608633 17345 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0817 17:35:00.511543 17345 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 17:35:27.170752 17345 solver.cpp:404]     Test net output #0: accuracy = 0.70348\nI0817 17:35:27.170790 17345 solver.cpp:404]     Test net output #1: loss = 1.13985 (* 1 = 1.13985 loss)\nI0817 17:35:27.595295 17345 solver.cpp:228] Iteration 5500, loss = 0.297587\nI0817 17:35:27.595329 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:35:27.595343 17345 solver.cpp:244]     Train net output #1: loss = 0.297587 (* 1 = 0.297587 loss)\nI0817 17:35:27.669802 17345 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0817 17:36:14.606290 17345 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 17:36:41.244231 17345 solver.cpp:404]     Test net output #0: accuracy = 0.76444\nI0817 17:36:41.244271 17345 solver.cpp:404]     Test net output #1: loss = 0.761409 (* 1 = 0.761409 loss)\nI0817 17:36:41.668992 17345 solver.cpp:228] Iteration 5600, loss = 0.330463\nI0817 17:36:41.669023 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:36:41.669037 17345 solver.cpp:244]     Train net output #1: loss = 0.330463 (* 1 = 0.330463 loss)\nI0817 17:36:41.740964 17345 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0817 17:37:28.717006 17345 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 17:37:55.706909 17345 solver.cpp:404]     Test net output #0: accuracy = 0.6872\nI0817 17:37:55.706979 17345 solver.cpp:404]     Test net output #1: loss = 1.08756 (* 1 = 1.08756 loss)\nI0817 17:37:56.133925 17345 solver.cpp:228] Iteration 5700, loss = 0.298093\nI0817 17:37:56.133985 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:37:56.134002 17345 solver.cpp:244]     Train net output #1: loss = 0.298093 (* 1 = 0.298093 loss)\nI0817 17:37:56.199076 17345 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0817 17:38:43.208014 17345 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 17:39:10.192260 17345 solver.cpp:404]     Test net output #0: accuracy = 0.70644\nI0817 17:39:10.192327 17345 solver.cpp:404]     Test net output #1: loss = 0.963641 (* 1 = 0.963641 loss)\nI0817 17:39:10.618055 17345 solver.cpp:228] Iteration 5800, loss = 0.293707\nI0817 17:39:10.618114 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:39:10.618132 17345 solver.cpp:244]     Train net output #1: loss = 0.293707 (* 1 = 0.293707 loss)\nI0817 17:39:10.697327 17345 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0817 17:39:57.660991 17345 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 17:40:24.640750 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7422\nI0817 17:40:24.640818 17345 solver.cpp:404]     Test net output #1: loss = 0.821883 (* 1 = 0.821883 loss)\nI0817 17:40:25.066768 17345 solver.cpp:228] Iteration 5900, loss = 0.246833\nI0817 17:40:25.066828 17345 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:40:25.066845 17345 solver.cpp:244]     Train net output #1: loss = 0.246833 (* 1 = 0.246833 loss)\nI0817 17:40:25.134065 17345 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0817 17:41:12.128808 17345 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 17:41:39.079793 17345 solver.cpp:404]     Test net output #0: accuracy = 0.74224\nI0817 17:41:39.079845 17345 solver.cpp:404]     Test net output #1: loss = 0.856348 (* 1 = 0.856348 loss)\nI0817 17:41:39.506114 17345 solver.cpp:228] Iteration 6000, loss = 0.266266\nI0817 17:41:39.506175 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:41:39.506192 17345 solver.cpp:244]     Train net output #1: loss = 0.266266 (* 1 = 0.266266 loss)\nI0817 17:41:39.571450 17345 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0817 17:42:26.520567 17345 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 17:42:53.370333 17345 solver.cpp:404]     Test net output #0: accuracy = 0.78076\nI0817 17:42:53.370396 17345 solver.cpp:404]     Test net output #1: loss = 0.703634 (* 1 = 0.703634 loss)\nI0817 17:42:53.796258 17345 solver.cpp:228] Iteration 6100, loss = 0.241659\nI0817 17:42:53.796308 17345 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:42:53.796334 17345 solver.cpp:244]     Train net output #1: loss = 0.241659 (* 1 = 0.241659 loss)\nI0817 17:42:53.871820 17345 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0817 17:43:40.866331 17345 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 17:44:07.837359 17345 solver.cpp:404]     Test net output #0: accuracy = 0.73372\nI0817 17:44:07.837420 17345 solver.cpp:404]     Test net output #1: loss = 0.888293 (* 1 = 0.888293 loss)\nI0817 17:44:08.263310 17345 solver.cpp:228] Iteration 6200, loss = 0.283858\nI0817 17:44:08.263365 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:44:08.263391 17345 solver.cpp:244]     Train net output #1: loss = 0.283857 (* 1 = 0.283857 loss)\nI0817 17:44:08.332155 17345 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0817 17:44:55.507047 17345 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 17:45:22.486541 17345 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0817 17:45:22.486604 17345 solver.cpp:404]     Test net output #1: loss = 0.593141 (* 1 = 0.593141 loss)\nI0817 17:45:22.913852 17345 solver.cpp:228] Iteration 6300, loss = 0.271159\nI0817 17:45:22.913908 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:45:22.913931 17345 solver.cpp:244]     Train net output #1: loss = 0.271159 (* 1 = 0.271159 loss)\nI0817 17:45:22.983736 17345 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0817 17:46:10.145057 17345 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 17:46:37.124207 17345 solver.cpp:404]     Test net output #0: accuracy = 0.65376\nI0817 17:46:37.124284 17345 solver.cpp:404]     Test net output #1: loss = 1.194 (* 1 = 1.194 loss)\nI0817 17:46:37.551553 17345 solver.cpp:228] Iteration 6400, loss = 0.283275\nI0817 17:46:37.551605 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:46:37.551635 17345 solver.cpp:244]     Train net output #1: loss = 0.283275 (* 1 = 0.283275 loss)\nI0817 17:46:37.620049 17345 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0817 17:47:24.809180 17345 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 17:47:51.799077 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7116\nI0817 17:47:51.799151 17345 solver.cpp:404]     Test net output #1: loss = 0.893725 (* 1 = 0.893725 loss)\nI0817 17:47:52.226146 17345 solver.cpp:228] Iteration 6500, loss = 0.228049\nI0817 17:47:52.226197 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:47:52.226222 17345 solver.cpp:244]     Train net output #1: loss = 0.228049 (* 1 = 0.228049 loss)\nI0817 17:47:52.299177 17345 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0817 17:48:39.317785 17345 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 17:49:06.306630 17345 solver.cpp:404]     Test net output #0: accuracy = 0.75704\nI0817 17:49:06.306705 17345 solver.cpp:404]     Test net output #1: loss = 0.784198 (* 1 = 0.784198 loss)\nI0817 17:49:06.733911 17345 solver.cpp:228] Iteration 6600, loss = 0.21526\nI0817 17:49:06.733960 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:49:06.733985 17345 solver.cpp:244]     Train net output #1: loss = 0.21526 (* 1 = 0.21526 loss)\nI0817 17:49:06.802996 17345 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0817 17:49:53.962987 17345 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 17:50:20.892362 17345 solver.cpp:404]     Test net output #0: accuracy = 0.78144\nI0817 17:50:20.892412 17345 solver.cpp:404]     Test net output #1: loss = 0.707215 (* 1 = 0.707215 loss)\nI0817 17:50:21.320407 17345 solver.cpp:228] Iteration 6700, loss = 0.327201\nI0817 17:50:21.320451 17345 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:50:21.320467 17345 solver.cpp:244]     Train net output #1: loss = 0.327201 (* 1 = 0.327201 loss)\nI0817 17:50:21.394889 17345 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0817 17:51:08.339740 17345 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 17:51:35.283396 17345 solver.cpp:404]     Test net output #0: accuracy = 0.81168\nI0817 17:51:35.283466 17345 solver.cpp:404]     Test net output #1: loss = 0.598334 (* 1 = 0.598334 loss)\nI0817 17:51:35.711205 17345 solver.cpp:228] Iteration 6800, loss = 0.256919\nI0817 17:51:35.711257 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:51:35.711274 17345 solver.cpp:244]     Train net output #1: loss = 0.256918 (* 1 = 0.256918 loss)\nI0817 17:51:35.785853 17345 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0817 17:52:22.744496 17345 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 17:52:49.721441 17345 solver.cpp:404]     Test net output #0: accuracy = 0.78136\nI0817 17:52:49.721498 17345 solver.cpp:404]     Test net output #1: loss = 0.67791 (* 1 = 0.67791 loss)\nI0817 17:52:50.148728 17345 solver.cpp:228] Iteration 6900, loss = 0.206665\nI0817 17:52:50.148780 17345 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:52:50.148797 17345 solver.cpp:244]     Train net output #1: loss = 0.206665 (* 1 = 0.206665 loss)\nI0817 17:52:50.224314 17345 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0817 17:53:37.197746 17345 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 17:54:04.169708 17345 solver.cpp:404]     Test net output #0: accuracy = 0.72352\nI0817 17:54:04.169764 17345 solver.cpp:404]     Test net output #1: loss = 0.98046 (* 1 = 0.98046 loss)\nI0817 17:54:04.595762 17345 solver.cpp:228] Iteration 7000, loss = 0.221378\nI0817 17:54:04.595813 17345 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:54:04.595829 17345 solver.cpp:244]     Train net output #1: loss = 0.221378 (* 1 = 0.221378 loss)\nI0817 17:54:04.661151 17345 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0817 17:54:51.616087 17345 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 17:55:18.587208 17345 solver.cpp:404]     Test net output #0: accuracy = 0.76992\nI0817 17:55:18.587265 17345 solver.cpp:404]     Test net output #1: loss = 0.736924 (* 1 = 0.736924 loss)\nI0817 17:55:19.013306 17345 solver.cpp:228] Iteration 7100, loss = 0.211728\nI0817 17:55:19.013355 17345 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:55:19.013372 17345 solver.cpp:244]     Train net output #1: loss = 0.211727 (* 1 = 0.211727 loss)\nI0817 17:55:19.077625 17345 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0817 17:56:06.233656 17345 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 17:56:33.188940 17345 solver.cpp:404]     Test net output #0: accuracy = 0.79268\nI0817 17:56:33.188995 17345 solver.cpp:404]     Test net output #1: loss = 0.657472 (* 1 = 0.657472 loss)\nI0817 17:56:33.616024 17345 solver.cpp:228] Iteration 7200, loss = 0.217366\nI0817 17:56:33.616075 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:56:33.616091 17345 solver.cpp:244]     Train net output #1: loss = 0.217366 (* 1 = 0.217366 loss)\nI0817 17:56:33.688200 17345 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0817 17:57:20.655478 17345 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 17:57:47.614573 17345 solver.cpp:404]     Test net output #0: accuracy = 0.73128\nI0817 17:57:47.614631 17345 solver.cpp:404]     Test net output #1: loss = 1.09476 (* 1 = 1.09476 loss)\nI0817 17:57:48.040393 17345 solver.cpp:228] Iteration 7300, loss = 0.154825\nI0817 17:57:48.040443 17345 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:57:48.040459 17345 solver.cpp:244]     Train net output #1: loss = 0.154825 (* 1 = 0.154825 loss)\nI0817 17:57:48.111688 17345 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0817 17:58:35.235023 17345 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 17:59:02.201505 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7376\nI0817 17:59:02.201568 17345 solver.cpp:404]     Test net output #1: loss = 0.872981 (* 1 = 0.872981 loss)\nI0817 17:59:02.628610 17345 solver.cpp:228] Iteration 7400, loss = 0.250983\nI0817 17:59:02.628671 17345 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:59:02.628695 17345 solver.cpp:244]     Train net output #1: loss = 0.250982 (* 1 = 0.250982 loss)\nI0817 17:59:02.698362 17345 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0817 17:59:49.833869 17345 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 18:00:16.797053 17345 solver.cpp:404]     Test net output #0: accuracy = 0.76316\nI0817 18:00:16.797116 17345 solver.cpp:404]     Test net output #1: loss = 0.922604 (* 1 = 0.922604 loss)\nI0817 18:00:17.223901 17345 solver.cpp:228] Iteration 7500, loss = 0.184306\nI0817 18:00:17.223963 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:00:17.223989 17345 solver.cpp:244]     Train net output #1: loss = 0.184306 (* 1 = 0.184306 loss)\nI0817 18:00:17.289847 17345 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0817 18:01:04.436723 17345 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 18:01:31.403434 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7368\nI0817 18:01:31.403496 17345 solver.cpp:404]     Test net output #1: loss = 0.982361 (* 1 = 0.982361 loss)\nI0817 18:01:31.830327 17345 solver.cpp:228] Iteration 7600, loss = 0.201838\nI0817 18:01:31.830389 17345 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:01:31.830413 17345 solver.cpp:244]     Train net output #1: loss = 0.201837 (* 1 = 0.201837 loss)\nI0817 18:01:31.906285 17345 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0817 18:02:18.961661 17345 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 18:02:45.758487 17345 solver.cpp:404]     Test net output #0: accuracy = 0.76724\nI0817 18:02:45.758541 17345 solver.cpp:404]     Test net output #1: loss = 0.755447 (* 1 = 0.755447 loss)\nI0817 18:02:46.184211 17345 solver.cpp:228] Iteration 7700, loss = 0.160733\nI0817 18:02:46.184247 17345 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:02:46.184262 17345 solver.cpp:244]     Train net output #1: loss = 0.160733 (* 1 = 0.160733 loss)\nI0817 18:02:46.251790 17345 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0817 18:03:33.184938 17345 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 18:03:59.839057 17345 solver.cpp:404]     Test net output #0: accuracy = 0.79584\nI0817 18:03:59.839112 17345 solver.cpp:404]     Test net output #1: loss = 0.677161 (* 1 = 0.677161 loss)\nI0817 18:04:00.264930 17345 solver.cpp:228] Iteration 7800, loss = 0.112591\nI0817 18:04:00.264968 17345 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:04:00.264983 17345 solver.cpp:244]     Train net output #1: loss = 0.112591 (* 1 = 0.112591 loss)\nI0817 18:04:00.331668 17345 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0817 18:04:47.281842 17345 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 18:05:13.930776 17345 solver.cpp:404]     Test net output #0: accuracy = 0.78992\nI0817 18:05:13.930829 17345 solver.cpp:404]     Test net output #1: loss = 0.728862 (* 1 = 0.728862 loss)\nI0817 18:05:14.356533 17345 solver.cpp:228] Iteration 7900, loss = 0.191173\nI0817 18:05:14.356570 17345 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:05:14.356586 17345 solver.cpp:244]     Train net output #1: loss = 0.191173 (* 1 = 0.191173 loss)\nI0817 18:05:14.433562 17345 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0817 18:06:01.359014 17345 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 18:06:28.014108 17345 solver.cpp:404]     Test net output #0: accuracy = 0.82248\nI0817 18:06:28.014168 17345 solver.cpp:404]     Test net output #1: loss = 0.625015 (* 1 = 0.625015 loss)\nI0817 18:06:28.440253 17345 solver.cpp:228] Iteration 8000, loss = 0.21421\nI0817 18:06:28.440294 17345 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:06:28.440317 17345 solver.cpp:244]     Train net output #1: loss = 0.21421 (* 1 = 0.21421 loss)\nI0817 18:06:28.515385 17345 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0817 18:07:15.456888 17345 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 18:07:42.110960 17345 solver.cpp:404]     Test net output #0: accuracy = 0.79848\nI0817 18:07:42.111019 17345 solver.cpp:404]     Test net output #1: loss = 0.714333 (* 1 = 0.714333 loss)\nI0817 18:07:42.536880 17345 solver.cpp:228] Iteration 8100, loss = 0.1366\nI0817 18:07:42.536921 17345 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:07:42.536944 17345 solver.cpp:244]     Train net output #1: loss = 0.1366 (* 1 = 0.1366 loss)\nI0817 18:07:42.609895 17345 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0817 18:08:29.529307 17345 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 18:08:56.180064 17345 solver.cpp:404]     Test net output #0: accuracy = 0.70916\nI0817 18:08:56.180124 17345 solver.cpp:404]     Test net output #1: loss = 1.26449 (* 1 = 1.26449 loss)\nI0817 18:08:56.606071 17345 solver.cpp:228] Iteration 8200, loss = 0.178993\nI0817 18:08:56.606112 17345 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:08:56.606137 17345 solver.cpp:244]     Train net output #1: loss = 0.178993 (* 1 = 0.178993 loss)\nI0817 18:08:56.678838 17345 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0817 18:09:43.564194 17345 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 18:10:10.229756 17345 solver.cpp:404]     Test net output #0: accuracy = 0.75896\nI0817 18:10:10.229813 17345 solver.cpp:404]     Test net output #1: loss = 0.857439 (* 1 = 0.857439 loss)\nI0817 18:10:10.655827 17345 solver.cpp:228] Iteration 8300, loss = 0.187849\nI0817 18:10:10.655866 17345 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:10:10.655890 17345 solver.cpp:244]     Train net output #1: loss = 0.187849 (* 1 = 0.187849 loss)\nI0817 18:10:10.729979 17345 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0817 18:10:57.638260 17345 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 18:11:24.296931 17345 solver.cpp:404]     Test net output #0: accuracy = 0.81884\nI0817 18:11:24.296991 17345 solver.cpp:404]     Test net output #1: loss = 0.62798 (* 1 = 0.62798 loss)\nI0817 18:11:24.722921 17345 solver.cpp:228] Iteration 8400, loss = 0.128213\nI0817 18:11:24.722962 17345 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:11:24.722986 17345 solver.cpp:244]     Train net output #1: loss = 0.128213 (* 1 = 0.128213 loss)\nI0817 18:11:24.794785 17345 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0817 18:12:11.719444 17345 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 18:12:38.378444 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7208\nI0817 18:12:38.378506 17345 solver.cpp:404]     Test net output #1: loss = 1.1162 (* 1 = 1.1162 loss)\nI0817 18:12:38.804285 17345 solver.cpp:228] Iteration 8500, loss = 0.256134\nI0817 18:12:38.804327 17345 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:12:38.804352 17345 solver.cpp:244]     Train net output #1: loss = 0.256134 (* 1 = 0.256134 loss)\nI0817 18:12:38.878051 17345 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0817 18:13:25.765847 17345 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 18:13:52.431018 17345 solver.cpp:404]     Test net output #0: accuracy = 0.80184\nI0817 18:13:52.431079 17345 solver.cpp:404]     Test net output #1: loss = 0.711676 (* 1 = 0.711676 loss)\nI0817 18:13:52.856783 17345 solver.cpp:228] Iteration 8600, loss = 0.167611\nI0817 18:13:52.856823 17345 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:13:52.856847 17345 solver.cpp:244]     Train net output #1: loss = 0.167611 (* 1 = 0.167611 loss)\nI0817 18:13:52.927711 17345 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0817 18:14:39.850811 17345 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 18:15:06.511198 17345 solver.cpp:404]     Test net output #0: accuracy = 0.81692\nI0817 18:15:06.511261 17345 solver.cpp:404]     Test net output #1: loss = 0.668646 (* 1 = 0.668646 loss)\nI0817 18:15:06.936156 17345 solver.cpp:228] Iteration 8700, loss = 0.139569\nI0817 18:15:06.936197 17345 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:15:06.936211 17345 solver.cpp:244]     Train net output #1: loss = 0.139569 (* 1 = 0.139569 loss)\nI0817 18:15:07.008653 17345 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0817 18:15:53.810067 17345 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 18:16:20.470495 17345 solver.cpp:404]     Test net output #0: accuracy = 0.7928\nI0817 18:16:20.470556 17345 solver.cpp:404]     Test net output #1: loss = 0.824184 (* 1 = 0.824184 loss)\nI0817 18:16:20.895064 17345 solver.cpp:228] Iteration 8800, loss = 0.258152\nI0817 18:16:20.895103 17345 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:16:20.895119 17345 solver.cpp:244]     Train net output #1: loss = 0.258152 (* 1 = 0.258152 loss)\nI0817 18:16:20.963207 17345 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0817 18:17:07.782793 17345 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 18:17:34.445557 17345 solver.cpp:404]     Test net output #0: accuracy = 0.80652\nI0817 18:17:34.445612 17345 solver.cpp:404]     Test net output #1: loss = 0.82255 (* 1 = 0.82255 loss)\nI0817 18:17:34.870441 17345 solver.cpp:228] Iteration 8900, loss = 0.0808478\nI0817 18:17:34.870476 17345 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:17:34.870491 17345 solver.cpp:244]     Train net output #1: loss = 0.0808478 (* 1 = 0.0808478 loss)\nI0817 18:17:34.945847 17345 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0817 18:18:21.909086 17345 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 18:18:48.571072 17345 solver.cpp:404]     Test net output #0: accuracy = 0.797\nI0817 18:18:48.571127 17345 solver.cpp:404]     Test net output #1: loss = 0.862962 (* 1 = 0.862962 loss)\nI0817 18:18:48.995668 17345 solver.cpp:228] Iteration 9000, loss = 0.100039\nI0817 18:18:48.995703 17345 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:18:48.995718 17345 solver.cpp:244]     Train net output #1: loss = 0.100039 (* 1 = 0.100039 loss)\nI0817 18:18:49.065500 17345 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0817 18:19:35.998174 17345 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 18:20:02.661252 17345 solver.cpp:404]     Test net output #0: accuracy = 0.82744\nI0817 18:20:02.661298 17345 solver.cpp:404]     Test net output #1: loss = 0.690191 (* 1 = 0.690191 loss)\nI0817 18:20:03.085979 17345 solver.cpp:228] Iteration 9100, loss = 0.0941399\nI0817 18:20:03.086022 17345 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:20:03.086040 17345 solver.cpp:244]     Train net output #1: loss = 0.0941399 (* 1 = 0.0941399 loss)\nI0817 18:20:03.153172 17345 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0817 18:20:50.084560 17345 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 18:21:16.747002 17345 solver.cpp:404]     Test net output #0: accuracy = 0.85192\nI0817 18:21:16.747057 17345 solver.cpp:404]     Test net output #1: loss = 0.574227 (* 1 = 0.574227 loss)\nI0817 18:21:17.172318 17345 solver.cpp:228] Iteration 9200, loss = 0.0798575\nI0817 18:21:17.172360 17345 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:21:17.172376 17345 solver.cpp:244]     Train net output #1: loss = 0.0798574 (* 1 = 0.0798574 loss)\nI0817 18:21:17.238692 17345 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0817 18:22:04.079748 17345 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 18:22:30.747697 17345 solver.cpp:404]     Test net output #0: accuracy = 0.86732\nI0817 18:22:30.747752 17345 solver.cpp:404]     Test net output #1: loss = 0.551647 (* 1 = 0.551647 loss)\nI0817 18:22:31.172508 17345 solver.cpp:228] Iteration 9300, loss = 0.101956\nI0817 18:22:31.172545 17345 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:22:31.172562 17345 solver.cpp:244]     Train net output #1: loss = 0.101956 (* 1 = 0.101956 loss)\nI0817 18:22:31.242820 17345 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0817 18:23:18.141577 17345 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 18:23:44.809697 17345 solver.cpp:404]     Test net output #0: accuracy = 0.84092\nI0817 18:23:44.809752 17345 solver.cpp:404]     Test net output #1: loss = 0.698852 (* 1 = 0.698852 loss)\nI0817 18:23:45.234586 17345 solver.cpp:228] Iteration 9400, loss = 0.0337179\nI0817 18:23:45.234617 17345 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 18:23:45.234632 17345 solver.cpp:244]     Train net output #1: loss = 0.0337178 (* 1 = 0.0337178 loss)\nI0817 18:23:45.304291 17345 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0817 18:24:32.217154 17345 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 18:24:58.884414 17345 solver.cpp:404]     Test net output #0: accuracy = 0.8486\nI0817 18:24:58.884467 17345 solver.cpp:404]     Test net output #1: loss = 0.676389 (* 1 = 0.676389 loss)\nI0817 18:24:59.309797 17345 solver.cpp:228] Iteration 9500, loss = 0.0914484\nI0817 18:24:59.309839 17345 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:24:59.309854 17345 solver.cpp:244]     Train net output #1: loss = 0.0914483 (* 1 = 0.0914483 loss)\nI0817 18:24:59.378975 17345 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0817 18:25:46.299860 17345 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 18:26:12.964479 17345 solver.cpp:404]     Test net output #0: accuracy = 0.85956\nI0817 18:26:12.964534 17345 solver.cpp:404]     Test net output #1: loss = 0.681553 (* 1 = 0.681553 loss)\nI0817 18:26:13.390102 17345 solver.cpp:228] Iteration 9600, loss = 0.0258621\nI0817 18:26:13.390146 17345 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 18:26:13.390161 17345 solver.cpp:244]     Train net output #1: loss = 0.025862 (* 1 = 0.025862 loss)\nI0817 18:26:13.457438 17345 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0817 18:27:00.364756 17345 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 18:27:27.020292 17345 solver.cpp:404]     Test net output #0: accuracy = 0.88904\nI0817 18:27:27.020347 17345 solver.cpp:404]     Test net output #1: loss = 0.483573 (* 1 = 0.483573 loss)\nI0817 18:27:27.445689 17345 solver.cpp:228] Iteration 9700, loss = 0.0199523\nI0817 18:27:27.445732 17345 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 18:27:27.445749 17345 solver.cpp:244]     Train net output #1: loss = 0.0199522 (* 1 = 0.0199522 loss)\nI0817 18:27:27.513144 17345 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0817 18:28:14.393153 17345 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 18:28:41.051162 17345 solver.cpp:404]     Test net output #0: accuracy = 0.88808\nI0817 18:28:41.051214 17345 solver.cpp:404]     Test net output #1: loss = 0.4837 (* 1 = 0.4837 loss)\nI0817 18:28:41.476709 17345 solver.cpp:228] Iteration 9800, loss = 0.00418442\nI0817 18:28:41.476753 17345 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 18:28:41.476768 17345 solver.cpp:244]     Train net output #1: loss = 0.00418434 (* 1 = 0.00418434 loss)\nI0817 18:28:41.546854 17345 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0817 18:29:28.460436 17345 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 18:29:55.130314 17345 solver.cpp:404]     Test net output #0: accuracy = 0.902641\nI0817 18:29:55.130367 17345 solver.cpp:404]     Test net output #1: loss = 0.398095 (* 1 = 0.398095 loss)\nI0817 18:29:55.556247 17345 solver.cpp:228] Iteration 9900, loss = 0.00221887\nI0817 18:29:55.556293 17345 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 18:29:55.556308 17345 solver.cpp:244]     Train net output #1: loss = 0.00221879 (* 1 = 0.00221879 loss)\nI0817 18:29:55.630501 17345 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0817 18:30:42.570489 17345 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kRes20Tab1_iter_10000.caffemodel\nI0817 18:30:42.643676 17345 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kRes20Tab1_iter_10000.solverstate\nI0817 18:30:42.789958 17345 solver.cpp:317] Iteration 10000, loss = 0.00184778\nI0817 18:30:42.789990 17345 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 18:31:09.459599 17345 solver.cpp:404]     Test net output #0: accuracy = 0.9042\nI0817 18:31:09.459653 17345 solver.cpp:404]     Test net output #1: loss = 0.400408 (* 1 = 0.400408 loss)\nI0817 18:31:09.459664 17345 solver.cpp:322] Optimization Done.\nI0817 18:31:11.429644 17345 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kTr10kTab1",
    "content": "I0817 16:08:12.263655 17615 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:08:12.265838 17615 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:08:12.267029 17615 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:08:12.268218 17615 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:08:12.269610 17615 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:08:12.270814 17615 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:08:12.272017 17615 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:08:12.273216 17615 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:08:12.274416 17615 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:08:12.691339 17615 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kTr10kTab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0817 16:08:12.694919 17615 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:08:12.709002 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:12.709066 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:12.710109 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:08:12.710160 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:08:12.710171 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:08:12.710180 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:08:12.710193 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:08:12.710202 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:08:12.710211 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:08:12.710220 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:08:12.710230 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:08:12.710239 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:08:12.710249 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:08:12.710256 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:08:12.710266 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:08:12.710275 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:08:12.710285 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:08:12.710294 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:08:12.710302 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:08:12.710311 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:08:12.710320 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:08:12.710330 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:08:12.710351 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:08:12.710361 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:08:12.710373 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:08:12.710383 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:08:12.710392 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:08:12.710400 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:08:12.710409 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:08:12.710417 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:08:12.710427 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:08:12.710434 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:08:12.710444 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:08:12.710453 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:08:12.710463 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:08:12.710470 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:08:12.710479 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:08:12.710489 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:08:12.710499 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:08:12.710506 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:08:12.710515 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:08:12.710525 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:08:12.710536 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:08:12.710544 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:08:12.710552 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:08:12.710561 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:08:12.710571 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:08:12.710579 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:08:12.710588 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:08:12.710597 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:08:12.710605 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:08:12.710620 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:08:12.710630 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:08:12.710646 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:08:12.710656 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:08:12.710664 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:08:12.710675 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:08:12.710681 17615 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:08:12.712389 17615 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train10k_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b\nI0817 16:08:12.714296 17615 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:08:12.715435 17615 net.cpp:100] Creating Layer dataLayer\nI0817 16:08:12.715502 17615 net.cpp:408] dataLayer -> data_top\nI0817 16:08:12.715685 17615 net.cpp:408] dataLayer -> label\nI0817 16:08:12.715777 17615 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:08:12.746305 17620 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train10k_lmdb\nI0817 16:08:12.746752 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:12.754341 17615 net.cpp:150] Setting up dataLayer\nI0817 16:08:12.754403 17615 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:08:12.754416 17615 net.cpp:157] Top shape: 125 (125)\nI0817 16:08:12.754422 17615 net.cpp:165] Memory required for data: 1536500\nI0817 16:08:12.754436 17615 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:08:12.754449 17615 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:08:12.754457 17615 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:08:12.754474 17615 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:08:12.754489 17615 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:08:12.754560 17615 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:08:12.754575 17615 net.cpp:157] Top shape: 125 (125)\nI0817 16:08:12.754581 17615 net.cpp:157] Top shape: 125 (125)\nI0817 16:08:12.754586 17615 net.cpp:165] Memory required for data: 1537500\nI0817 16:08:12.754591 17615 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:08:12.754658 17615 net.cpp:100] Creating Layer pre_conv\nI0817 16:08:12.754672 17615 net.cpp:434] pre_conv <- data_top\nI0817 16:08:12.754684 17615 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:08:12.756400 17615 net.cpp:150] Setting up pre_conv\nI0817 16:08:12.756422 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.756428 17615 net.cpp:165] Memory required for data: 9729500\nI0817 16:08:12.756490 17615 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:08:12.756557 17615 net.cpp:100] Creating Layer pre_bn\nI0817 16:08:12.756569 17615 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:08:12.756578 17615 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:08:12.757114 17621 blocking_queue.cpp:50] Waiting for data\nI0817 16:08:12.757135 17615 net.cpp:150] Setting up pre_bn\nI0817 16:08:12.757155 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.757161 17615 net.cpp:165] Memory required for data: 17921500\nI0817 16:08:12.757179 17615 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:08:12.757232 17615 net.cpp:100] Creating Layer pre_scale\nI0817 16:08:12.757242 17615 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:08:12.757252 17615 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:08:12.757423 17615 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:08:12.757683 17615 net.cpp:150] Setting up pre_scale\nI0817 16:08:12.757699 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.757704 17615 net.cpp:165] Memory required for data: 26113500\nI0817 16:08:12.757715 17615 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:08:12.757760 17615 net.cpp:100] Creating Layer pre_relu\nI0817 16:08:12.757771 17615 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:08:12.757781 17615 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:08:12.757793 17615 net.cpp:150] Setting up pre_relu\nI0817 16:08:12.757800 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.757805 17615 net.cpp:165] Memory required for data: 34305500\nI0817 16:08:12.757810 17615 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:08:12.757818 17615 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:08:12.757823 17615 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:08:12.757833 17615 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:08:12.757843 17615 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:08:12.757889 17615 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:08:12.757901 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.757908 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.757913 17615 net.cpp:165] Memory required for data: 50689500\nI0817 16:08:12.757918 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:08:12.757933 17615 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:08:12.757939 17615 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:08:12.757948 17615 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:08:12.758255 17615 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:08:12.758270 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.758275 17615 net.cpp:165] Memory required for data: 58881500\nI0817 16:08:12.758291 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:08:12.758306 17615 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:08:12.758311 17615 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:08:12.758319 17615 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:08:12.758546 17615 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:08:12.758560 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.758565 17615 net.cpp:165] Memory required for data: 67073500\nI0817 16:08:12.758576 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:08:12.758586 17615 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:08:12.758591 17615 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:08:12.758602 17615 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.758659 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:08:12.758800 17615 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:08:12.758815 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.758819 17615 net.cpp:165] Memory required for data: 75265500\nI0817 16:08:12.758828 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:08:12.758844 17615 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:08:12.758852 17615 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:08:12.758858 17615 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.758867 17615 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:08:12.758874 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.758879 17615 net.cpp:165] Memory required for data: 83457500\nI0817 16:08:12.758884 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:08:12.758899 17615 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:08:12.758905 17615 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:08:12.758916 17615 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:08:12.759220 17615 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:08:12.759234 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.759239 17615 net.cpp:165] Memory required for data: 91649500\nI0817 16:08:12.759248 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:08:12.759268 17615 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:08:12.759274 17615 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:08:12.759285 17615 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:08:12.759516 17615 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:08:12.759528 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.759533 17615 net.cpp:165] Memory required for data: 99841500\nI0817 16:08:12.759547 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:08:12.759559 17615 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:08:12.759565 17615 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:08:12.759573 17615 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:08:12.759636 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:08:12.759773 17615 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:08:12.759788 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.759793 17615 net.cpp:165] Memory required for data: 108033500\nI0817 16:08:12.759801 17615 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:08:12.759852 17615 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:08:12.759863 17615 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:08:12.759871 17615 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:08:12.759883 17615 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:08:12.759953 17615 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:08:12.759970 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.759976 17615 net.cpp:165] Memory required for data: 116225500\nI0817 16:08:12.759982 17615 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:08:12.759990 17615 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:08:12.759996 17615 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:08:12.760004 17615 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:08:12.760013 17615 net.cpp:150] Setting up L1_b1_relu\nI0817 16:08:12.760020 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.760025 17615 net.cpp:165] Memory required for data: 124417500\nI0817 16:08:12.760030 17615 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:08:12.760051 17615 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:08:12.760057 17615 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:08:12.760064 17615 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:08:12.760073 17615 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:08:12.760118 17615 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:08:12.760129 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.760136 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.760149 17615 net.cpp:165] Memory required for data: 140801500\nI0817 16:08:12.760154 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:08:12.760165 17615 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:08:12.760171 17615 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:08:12.760184 17615 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:08:12.760514 17615 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:08:12.760529 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.760534 17615 net.cpp:165] Memory required for data: 148993500\nI0817 16:08:12.760543 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:08:12.760552 17615 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:08:12.760558 17615 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:08:12.760571 17615 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:08:12.760849 17615 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:08:12.760869 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.760874 17615 net.cpp:165] Memory required for data: 157185500\nI0817 16:08:12.760885 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:08:12.760895 17615 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:08:12.760900 17615 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:08:12.760907 17615 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.760958 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:08:12.761101 17615 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:08:12.761113 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.761118 17615 net.cpp:165] Memory required for data: 165377500\nI0817 16:08:12.761128 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:08:12.761139 17615 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:08:12.761145 17615 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:08:12.761152 17615 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.761162 17615 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:08:12.761168 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.761173 17615 net.cpp:165] Memory required for data: 173569500\nI0817 16:08:12.761178 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:08:12.761193 17615 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:08:12.761198 17615 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:08:12.761209 17615 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:08:12.761509 17615 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:08:12.761523 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.761528 17615 net.cpp:165] Memory required for data: 181761500\nI0817 16:08:12.761538 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:08:12.761546 17615 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:08:12.761553 17615 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:08:12.761562 17615 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:08:12.761804 17615 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:08:12.761818 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.761823 17615 net.cpp:165] Memory required for data: 189953500\nI0817 16:08:12.761842 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:08:12.761852 17615 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:08:12.761857 17615 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:08:12.761864 17615 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:08:12.761919 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:08:12.762058 17615 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:08:12.762071 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.762076 17615 net.cpp:165] Memory required for data: 198145500\nI0817 16:08:12.762085 17615 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:08:12.762101 17615 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:08:12.762118 17615 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:08:12.762125 17615 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:08:12.762133 17615 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:08:12.762167 17615 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:08:12.762178 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.762183 17615 net.cpp:165] Memory required for data: 206337500\nI0817 16:08:12.762189 17615 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:08:12.762197 17615 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:08:12.762202 17615 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:08:12.762209 17615 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:08:12.762218 17615 net.cpp:150] Setting up L1_b2_relu\nI0817 16:08:12.762224 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.762229 17615 net.cpp:165] Memory required for data: 214529500\nI0817 16:08:12.762233 17615 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:08:12.762241 17615 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:08:12.762246 17615 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:08:12.762253 17615 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:08:12.762262 17615 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:08:12.762306 17615 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:08:12.762318 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.762325 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.762329 17615 net.cpp:165] Memory required for data: 230913500\nI0817 16:08:12.762334 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:08:12.762348 17615 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:08:12.762354 17615 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:08:12.762364 17615 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:08:12.762675 17615 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:08:12.762688 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.762694 17615 net.cpp:165] Memory required for data: 239105500\nI0817 16:08:12.762702 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:08:12.762714 17615 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:08:12.762720 17615 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:08:12.762732 17615 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:08:12.762961 17615 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:08:12.762974 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.762979 17615 net.cpp:165] Memory required for data: 247297500\nI0817 16:08:12.762990 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:08:12.762998 17615 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:08:12.763005 17615 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:08:12.763012 17615 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.763069 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:08:12.763207 17615 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:08:12.763221 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.763226 17615 net.cpp:165] Memory required for data: 255489500\nI0817 16:08:12.763234 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:08:12.763245 17615 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:08:12.763250 17615 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:08:12.763258 17615 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.763267 17615 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:08:12.763281 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.763286 17615 net.cpp:165] Memory required for data: 263681500\nI0817 16:08:12.763291 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:08:12.763305 17615 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:08:12.763311 17615 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:08:12.763322 17615 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:08:12.763635 17615 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:08:12.763649 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.763655 17615 net.cpp:165] Memory required for data: 271873500\nI0817 16:08:12.763664 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:08:12.763679 17615 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:08:12.763685 17615 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:08:12.763695 17615 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:08:12.763934 17615 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:08:12.763948 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.763953 17615 net.cpp:165] Memory required for data: 280065500\nI0817 16:08:12.763963 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:08:12.763972 17615 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:08:12.763978 17615 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:08:12.763985 17615 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:08:12.764039 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:08:12.764180 17615 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:08:12.764194 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.764199 17615 net.cpp:165] Memory required for data: 288257500\nI0817 16:08:12.764209 17615 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:08:12.764219 17615 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:08:12.764226 17615 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:08:12.764232 17615 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:08:12.764240 17615 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:08:12.764273 17615 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:08:12.764283 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.764288 17615 net.cpp:165] Memory required for data: 296449500\nI0817 16:08:12.764293 17615 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:08:12.764302 17615 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:08:12.764307 17615 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:08:12.764317 17615 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:08:12.764325 17615 net.cpp:150] Setting up L1_b3_relu\nI0817 16:08:12.764333 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.764336 17615 net.cpp:165] Memory required for data: 304641500\nI0817 16:08:12.764341 17615 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:08:12.764349 17615 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:08:12.764354 17615 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:08:12.764364 17615 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:08:12.764374 17615 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:08:12.764415 17615 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:08:12.764425 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.764432 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.764437 17615 net.cpp:165] Memory required for data: 321025500\nI0817 16:08:12.764442 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:08:12.764457 17615 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:08:12.764464 17615 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:08:12.764479 17615 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:08:12.764797 17615 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:08:12.764812 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.764817 17615 net.cpp:165] Memory required for data: 329217500\nI0817 16:08:12.764827 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:08:12.764838 17615 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:08:12.764844 17615 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:08:12.764855 17615 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:08:12.765087 17615 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:08:12.765101 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.765106 17615 net.cpp:165] Memory required for data: 337409500\nI0817 16:08:12.765116 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:08:12.765125 17615 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:08:12.765130 17615 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:08:12.765141 17615 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.765194 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:08:12.765331 17615 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:08:12.765350 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.765357 17615 net.cpp:165] Memory required for data: 345601500\nI0817 16:08:12.765365 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:08:12.765373 17615 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:08:12.765378 17615 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:08:12.765385 17615 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.765395 17615 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:08:12.765401 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.765406 17615 net.cpp:165] Memory required for data: 353793500\nI0817 16:08:12.765411 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:08:12.765425 17615 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:08:12.765431 17615 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:08:12.765441 17615 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:08:12.765760 17615 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:08:12.765775 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.765780 17615 net.cpp:165] Memory required for data: 361985500\nI0817 16:08:12.765789 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:08:12.765800 17615 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:08:12.765806 17615 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:08:12.765817 17615 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:08:12.766053 17615 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:08:12.766067 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.766072 17615 net.cpp:165] Memory required for data: 370177500\nI0817 16:08:12.766082 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:08:12.766090 17615 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:08:12.766096 17615 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:08:12.766106 17615 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:08:12.766160 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:08:12.766299 17615 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:08:12.766314 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.766319 17615 net.cpp:165] Memory required for data: 378369500\nI0817 16:08:12.766329 17615 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:08:12.766336 17615 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:08:12.766342 17615 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:08:12.766350 17615 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:08:12.766357 17615 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:08:12.766398 17615 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:08:12.766408 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.766413 17615 net.cpp:165] Memory required for data: 386561500\nI0817 16:08:12.766418 17615 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:08:12.766429 17615 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:08:12.766435 17615 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:08:12.766443 17615 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:08:12.766453 17615 net.cpp:150] Setting up L1_b4_relu\nI0817 16:08:12.766458 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.766463 17615 net.cpp:165] Memory required for data: 394753500\nI0817 16:08:12.766469 17615 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:08:12.766479 17615 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:08:12.766484 17615 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:08:12.766491 17615 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:08:12.766500 17615 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:08:12.766542 17615 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:08:12.766556 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.766564 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.766569 17615 net.cpp:165] Memory required for data: 411137500\nI0817 16:08:12.766574 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:08:12.766585 17615 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:08:12.766592 17615 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:08:12.766600 17615 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:08:12.766943 17615 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:08:12.766958 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.766963 17615 net.cpp:165] Memory required for data: 419329500\nI0817 16:08:12.766989 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:08:12.767001 17615 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:08:12.767007 17615 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:08:12.767016 17615 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:08:12.767253 17615 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:08:12.767267 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.767272 17615 net.cpp:165] Memory required for data: 427521500\nI0817 16:08:12.767282 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:08:12.767292 17615 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:08:12.767297 17615 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:08:12.767310 17615 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.767362 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:08:12.767499 17615 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:08:12.767515 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.767521 17615 net.cpp:165] Memory required for data: 435713500\nI0817 16:08:12.767530 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:08:12.767539 17615 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:08:12.767544 17615 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:08:12.767550 17615 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.767560 17615 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:08:12.767566 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.767571 17615 net.cpp:165] Memory required for data: 443905500\nI0817 16:08:12.767576 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:08:12.767590 17615 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:08:12.767596 17615 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:08:12.767630 17615 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:08:12.767941 17615 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:08:12.767956 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.767961 17615 net.cpp:165] Memory required for data: 452097500\nI0817 16:08:12.767971 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:08:12.767982 17615 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:08:12.767988 17615 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:08:12.767999 17615 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:08:12.768232 17615 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:08:12.768245 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.768250 17615 net.cpp:165] Memory required for data: 460289500\nI0817 16:08:12.768261 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:08:12.768270 17615 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:08:12.768275 17615 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:08:12.768283 17615 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:08:12.768339 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:08:12.768476 17615 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:08:12.768489 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.768494 17615 net.cpp:165] Memory required for data: 468481500\nI0817 16:08:12.768504 17615 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:08:12.768517 17615 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:08:12.768522 17615 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:08:12.768529 17615 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:08:12.768537 17615 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:08:12.768569 17615 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:08:12.768579 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.768584 17615 net.cpp:165] Memory required for data: 476673500\nI0817 16:08:12.768589 17615 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:08:12.768596 17615 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:08:12.768601 17615 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:08:12.768617 17615 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:08:12.768628 17615 net.cpp:150] Setting up L1_b5_relu\nI0817 16:08:12.768635 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.768640 17615 net.cpp:165] Memory required for data: 484865500\nI0817 16:08:12.768646 17615 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:08:12.768652 17615 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:08:12.768657 17615 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:08:12.768667 17615 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:08:12.768676 17615 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:08:12.768719 17615 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:08:12.768731 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.768738 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.768743 17615 net.cpp:165] Memory required for data: 501249500\nI0817 16:08:12.768748 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:08:12.768762 17615 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:08:12.768769 17615 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:08:12.768777 17615 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:08:12.769089 17615 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:08:12.769104 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.769109 17615 net.cpp:165] Memory required for data: 509441500\nI0817 16:08:12.769124 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:08:12.769136 17615 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:08:12.769142 17615 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:08:12.769153 17615 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:08:12.769387 17615 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:08:12.769400 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.769405 17615 net.cpp:165] Memory required for data: 517633500\nI0817 16:08:12.769417 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:08:12.769424 17615 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:08:12.769430 17615 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:08:12.769441 17615 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.769493 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:08:12.769639 17615 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:08:12.769655 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.769661 17615 net.cpp:165] Memory required for data: 525825500\nI0817 16:08:12.769670 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:08:12.769678 17615 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:08:12.769685 17615 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:08:12.769691 17615 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.769701 17615 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:08:12.769707 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.769712 17615 net.cpp:165] Memory required for data: 534017500\nI0817 16:08:12.769716 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:08:12.769732 17615 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:08:12.769738 17615 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:08:12.769749 17615 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:08:12.770061 17615 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:08:12.770076 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.770081 17615 net.cpp:165] Memory required for data: 542209500\nI0817 16:08:12.770089 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:08:12.770100 17615 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:08:12.770107 17615 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:08:12.770118 17615 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:08:12.770350 17615 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:08:12.770364 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.770368 17615 net.cpp:165] Memory required for data: 550401500\nI0817 16:08:12.770380 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:08:12.770387 17615 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:08:12.770393 17615 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:08:12.770401 17615 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:08:12.770454 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:08:12.770594 17615 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:08:12.770607 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.770618 17615 net.cpp:165] Memory required for data: 558593500\nI0817 16:08:12.770628 17615 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:08:12.770645 17615 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:08:12.770653 17615 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:08:12.770659 17615 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:08:12.770669 17615 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:08:12.770702 17615 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:08:12.770717 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.770722 17615 net.cpp:165] Memory required for data: 566785500\nI0817 16:08:12.770728 17615 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:08:12.770745 17615 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:08:12.770750 17615 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:08:12.770757 17615 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:08:12.770766 17615 net.cpp:150] Setting up L1_b6_relu\nI0817 16:08:12.770773 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.770778 17615 net.cpp:165] Memory required for data: 574977500\nI0817 16:08:12.770783 17615 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:08:12.770793 17615 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:08:12.770798 17615 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:08:12.770805 17615 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:08:12.770815 17615 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:08:12.770860 17615 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:08:12.770872 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.770879 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.770884 17615 net.cpp:165] Memory required for data: 591361500\nI0817 16:08:12.770889 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:08:12.770901 17615 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:08:12.770907 17615 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:08:12.770918 17615 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:08:12.771232 17615 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:08:12.771246 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.771251 17615 net.cpp:165] Memory required for data: 599553500\nI0817 16:08:12.771260 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:08:12.771270 17615 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:08:12.771275 17615 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:08:12.771283 17615 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:08:12.771524 17615 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:08:12.771538 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.771543 17615 net.cpp:165] Memory required for data: 607745500\nI0817 16:08:12.771553 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:08:12.771565 17615 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:08:12.771571 17615 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:08:12.771579 17615 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.771639 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:08:12.771780 17615 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:08:12.771793 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.771800 17615 net.cpp:165] Memory required for data: 615937500\nI0817 16:08:12.771808 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:08:12.771816 17615 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:08:12.771821 17615 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:08:12.771833 17615 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.771843 17615 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:08:12.771852 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.771855 17615 net.cpp:165] Memory required for data: 624129500\nI0817 16:08:12.771860 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:08:12.771874 17615 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:08:12.771880 17615 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:08:12.771888 17615 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:08:12.772194 17615 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:08:12.772207 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.772212 17615 net.cpp:165] Memory required for data: 632321500\nI0817 16:08:12.772228 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:08:12.772240 17615 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:08:12.772248 17615 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:08:12.772255 17615 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:08:12.772496 17615 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:08:12.772509 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.772514 17615 net.cpp:165] Memory required for data: 640513500\nI0817 16:08:12.772526 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:08:12.772534 17615 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:08:12.772539 17615 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:08:12.772550 17615 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:08:12.772605 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:08:12.772754 17615 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:08:12.772768 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.772773 17615 net.cpp:165] Memory required for data: 648705500\nI0817 16:08:12.772783 17615 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:08:12.772791 17615 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:08:12.772797 17615 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:08:12.772804 17615 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:08:12.772815 17615 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:08:12.772846 17615 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:08:12.772858 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.772864 17615 net.cpp:165] Memory required for data: 656897500\nI0817 16:08:12.772869 17615 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:08:12.772877 17615 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:08:12.772882 17615 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:08:12.772891 17615 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:08:12.772898 17615 net.cpp:150] Setting up L1_b7_relu\nI0817 16:08:12.772905 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.772910 17615 net.cpp:165] Memory required for data: 665089500\nI0817 16:08:12.772914 17615 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:08:12.772924 17615 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:08:12.772929 17615 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:08:12.772938 17615 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:08:12.772946 17615 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:08:12.772991 17615 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:08:12.773003 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.773010 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.773015 17615 net.cpp:165] Memory required for data: 681473500\nI0817 16:08:12.773020 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:08:12.773031 17615 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:08:12.773037 17615 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:08:12.773049 17615 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:08:12.773365 17615 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:08:12.773380 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.773385 17615 net.cpp:165] Memory required for data: 689665500\nI0817 16:08:12.773393 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:08:12.773403 17615 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:08:12.773409 17615 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:08:12.773416 17615 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:08:12.773675 17615 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:08:12.773689 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.773694 17615 net.cpp:165] Memory required for data: 697857500\nI0817 16:08:12.773705 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:08:12.773716 17615 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:08:12.773722 17615 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:08:12.773730 17615 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.773787 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:08:12.773929 17615 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:08:12.773943 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.773948 17615 net.cpp:165] Memory required for data: 706049500\nI0817 16:08:12.773957 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:08:12.773965 17615 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:08:12.773972 17615 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:08:12.773982 17615 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.773991 17615 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:08:12.773998 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.774003 17615 net.cpp:165] Memory required for data: 714241500\nI0817 16:08:12.774008 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:08:12.774024 17615 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:08:12.774029 17615 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:08:12.774039 17615 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:08:12.774353 17615 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:08:12.774366 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.774371 17615 net.cpp:165] Memory required for data: 722433500\nI0817 16:08:12.774380 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:08:12.774392 17615 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:08:12.774399 17615 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:08:12.774406 17615 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:08:12.774655 17615 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:08:12.774668 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.774673 17615 net.cpp:165] Memory required for data: 730625500\nI0817 16:08:12.774684 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:08:12.774693 17615 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:08:12.774699 17615 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:08:12.774710 17615 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:08:12.774762 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:08:12.774900 17615 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:08:12.774916 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.774921 17615 net.cpp:165] Memory required for data: 738817500\nI0817 16:08:12.774930 17615 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:08:12.774940 17615 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:08:12.774946 17615 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:08:12.774953 17615 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:08:12.774960 17615 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:08:12.774993 17615 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:08:12.775003 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.775007 17615 net.cpp:165] Memory required for data: 747009500\nI0817 16:08:12.775012 17615 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:08:12.775023 17615 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:08:12.775029 17615 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:08:12.775037 17615 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:08:12.775045 17615 net.cpp:150] Setting up L1_b8_relu\nI0817 16:08:12.775053 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.775063 17615 net.cpp:165] Memory required for data: 755201500\nI0817 16:08:12.775068 17615 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:08:12.775079 17615 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:08:12.775084 17615 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:08:12.775091 17615 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:08:12.775101 17615 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:08:12.775143 17615 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:08:12.775158 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.775166 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.775171 17615 net.cpp:165] Memory required for data: 771585500\nI0817 16:08:12.775177 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:08:12.775187 17615 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:08:12.775193 17615 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:08:12.775202 17615 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:08:12.775526 17615 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:08:12.775540 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.775545 17615 net.cpp:165] Memory required for data: 779777500\nI0817 16:08:12.775554 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:08:12.775566 17615 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:08:12.775573 17615 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:08:12.775581 17615 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:08:12.775833 17615 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:08:12.775851 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.775856 17615 net.cpp:165] Memory required for data: 787969500\nI0817 16:08:12.775866 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:08:12.775874 17615 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:08:12.775880 17615 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:08:12.775888 17615 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.775940 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:08:12.776087 17615 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:08:12.776100 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.776105 17615 net.cpp:165] Memory required for data: 796161500\nI0817 16:08:12.776114 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:08:12.776125 17615 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:08:12.776131 17615 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:08:12.776139 17615 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.776151 17615 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:08:12.776159 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.776163 17615 net.cpp:165] Memory required for data: 804353500\nI0817 16:08:12.776168 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:08:12.776178 17615 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:08:12.776185 17615 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:08:12.776196 17615 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:08:12.776510 17615 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:08:12.776525 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.776530 17615 net.cpp:165] Memory required for data: 812545500\nI0817 16:08:12.776538 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:08:12.776547 17615 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:08:12.776553 17615 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:08:12.776564 17615 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:08:12.776819 17615 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:08:12.776834 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.776839 17615 net.cpp:165] Memory required for data: 820737500\nI0817 16:08:12.776867 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:08:12.776880 17615 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:08:12.776886 17615 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:08:12.776895 17615 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:08:12.776947 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:08:12.777087 17615 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:08:12.777101 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.777106 17615 net.cpp:165] Memory required for data: 828929500\nI0817 16:08:12.777114 17615 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:08:12.777123 17615 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:08:12.777129 17615 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:08:12.777137 17615 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:08:12.777148 17615 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:08:12.777179 17615 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:08:12.777190 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.777195 17615 net.cpp:165] Memory required for data: 837121500\nI0817 16:08:12.777201 17615 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:08:12.777209 17615 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:08:12.777215 17615 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:08:12.777221 17615 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:08:12.777230 17615 net.cpp:150] Setting up L1_b9_relu\nI0817 16:08:12.777237 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.777241 17615 net.cpp:165] Memory required for data: 845313500\nI0817 16:08:12.777246 17615 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:08:12.777256 17615 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:08:12.777261 17615 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:08:12.777269 17615 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:08:12.777278 17615 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:08:12.777328 17615 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:08:12.777339 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.777346 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.777350 17615 net.cpp:165] Memory required for data: 861697500\nI0817 16:08:12.777355 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:08:12.777366 17615 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:08:12.777374 17615 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:08:12.777384 17615 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:08:12.777710 17615 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:08:12.777724 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.777729 17615 net.cpp:165] Memory required for data: 863745500\nI0817 16:08:12.777739 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:08:12.777747 17615 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:08:12.777753 17615 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:08:12.777765 17615 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:08:12.778026 17615 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:08:12.778040 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.778046 17615 net.cpp:165] Memory required for data: 865793500\nI0817 16:08:12.778057 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:08:12.778069 17615 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:08:12.778081 17615 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:08:12.778090 17615 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.778144 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:08:12.778285 17615 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:08:12.778297 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.778303 17615 net.cpp:165] Memory required for data: 867841500\nI0817 16:08:12.778312 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:08:12.778322 17615 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:08:12.778329 17615 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:08:12.778337 17615 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.778347 17615 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:08:12.778352 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.778357 17615 net.cpp:165] Memory required for data: 869889500\nI0817 16:08:12.778362 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:08:12.778378 17615 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:08:12.778384 17615 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:08:12.778396 17615 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:08:12.778718 17615 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:08:12.778733 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.778738 17615 net.cpp:165] Memory required for data: 871937500\nI0817 16:08:12.778748 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:08:12.778756 17615 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:08:12.778762 17615 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:08:12.778770 17615 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:08:12.779012 17615 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:08:12.779026 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.779031 17615 net.cpp:165] Memory required for data: 873985500\nI0817 16:08:12.779042 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:08:12.779049 17615 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:08:12.779055 17615 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:08:12.779067 17615 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:08:12.779121 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:08:12.779268 17615 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:08:12.779280 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.779286 17615 net.cpp:165] Memory required for data: 876033500\nI0817 16:08:12.779295 17615 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:08:12.779305 17615 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:08:12.779311 17615 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:08:12.779325 17615 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:08:12.779407 17615 net.cpp:150] Setting up L2_b1_pool\nI0817 16:08:12.779428 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.779433 17615 net.cpp:165] Memory required for data: 878081500\nI0817 16:08:12.779438 17615 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:08:12.779448 17615 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:08:12.779454 17615 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:08:12.779461 17615 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:08:12.779469 17615 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:08:12.779505 17615 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:08:12.779517 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.779522 17615 net.cpp:165] Memory required for data: 880129500\nI0817 16:08:12.779527 17615 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:08:12.779536 17615 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:08:12.779541 17615 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:08:12.779548 17615 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:08:12.779566 17615 net.cpp:150] Setting up L2_b1_relu\nI0817 16:08:12.779572 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.779577 17615 net.cpp:165] Memory required for data: 882177500\nI0817 16:08:12.779582 17615 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:08:12.779639 17615 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:08:12.779654 17615 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:08:12.781968 17615 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:08:12.781991 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.781996 17615 net.cpp:165] Memory required for data: 884225500\nI0817 16:08:12.782002 17615 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:08:12.782013 17615 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:08:12.782019 17615 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:08:12.782027 17615 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:08:12.782035 17615 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:08:12.782115 17615 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:08:12.782131 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.782136 17615 net.cpp:165] Memory required for data: 888321500\nI0817 16:08:12.782142 17615 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:08:12.782150 17615 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:08:12.782162 17615 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:08:12.782171 17615 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:08:12.782181 17615 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:08:12.782232 17615 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:08:12.782243 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.782249 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.782254 17615 net.cpp:165] Memory required for data: 896513500\nI0817 16:08:12.782260 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:08:12.782274 17615 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:08:12.782281 17615 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:08:12.782290 17615 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:08:12.783732 17615 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:08:12.783751 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.783756 17615 net.cpp:165] Memory required for data: 900609500\nI0817 16:08:12.783766 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:08:12.783776 17615 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:08:12.783782 17615 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:08:12.783793 17615 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:08:12.784040 17615 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:08:12.784054 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.784060 17615 net.cpp:165] Memory required for data: 904705500\nI0817 16:08:12.784070 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:08:12.784081 17615 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:08:12.784088 17615 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:08:12.784096 17615 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.784150 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:08:12.784298 17615 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:08:12.784312 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.784317 17615 net.cpp:165] Memory required for data: 908801500\nI0817 16:08:12.784325 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:08:12.784337 17615 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:08:12.784343 17615 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:08:12.784351 17615 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.784368 17615 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:08:12.784376 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.784381 17615 net.cpp:165] Memory required for data: 912897500\nI0817 16:08:12.784386 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:08:12.784404 17615 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:08:12.784409 17615 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:08:12.784420 17615 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:08:12.784883 17615 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:08:12.784898 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.784904 17615 net.cpp:165] Memory required for data: 916993500\nI0817 16:08:12.784912 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:08:12.784922 17615 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:08:12.784929 17615 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:08:12.784936 17615 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:08:12.785182 17615 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:08:12.785197 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.785202 17615 net.cpp:165] Memory required for data: 921089500\nI0817 16:08:12.785212 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:08:12.785221 17615 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:08:12.785228 17615 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:08:12.785238 17615 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:08:12.785292 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:08:12.785437 17615 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:08:12.785450 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.785455 17615 net.cpp:165] Memory required for data: 925185500\nI0817 16:08:12.785465 17615 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:08:12.785473 17615 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:08:12.785480 17615 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:08:12.785487 17615 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:08:12.785497 17615 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:08:12.785524 17615 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:08:12.785537 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.785542 17615 net.cpp:165] Memory required for data: 929281500\nI0817 16:08:12.785547 17615 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:08:12.785555 17615 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:08:12.785562 17615 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:08:12.785568 17615 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:08:12.785578 17615 net.cpp:150] Setting up L2_b2_relu\nI0817 16:08:12.785584 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.785588 17615 net.cpp:165] Memory required for data: 933377500\nI0817 16:08:12.785593 17615 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:08:12.785604 17615 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:08:12.785609 17615 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:08:12.785624 17615 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:08:12.785634 17615 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:08:12.785681 17615 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:08:12.785693 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.785701 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.785704 17615 net.cpp:165] Memory required for data: 941569500\nI0817 16:08:12.785711 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:08:12.785728 17615 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:08:12.785735 17615 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:08:12.785748 17615 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:08:12.786211 17615 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:08:12.786224 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.786231 17615 net.cpp:165] Memory required for data: 945665500\nI0817 16:08:12.786238 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:08:12.786247 17615 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:08:12.786254 17615 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:08:12.786265 17615 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:08:12.786512 17615 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:08:12.786525 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.786530 17615 net.cpp:165] Memory required for data: 949761500\nI0817 16:08:12.786540 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:08:12.786552 17615 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:08:12.786558 17615 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:08:12.786566 17615 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.786626 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:08:12.786782 17615 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:08:12.786795 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.786800 17615 net.cpp:165] Memory required for data: 953857500\nI0817 16:08:12.786809 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:08:12.786818 17615 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:08:12.786823 17615 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:08:12.786834 17615 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.786844 17615 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:08:12.786850 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.786855 17615 net.cpp:165] Memory required for data: 957953500\nI0817 16:08:12.786860 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:08:12.786873 17615 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:08:12.786880 17615 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:08:12.786890 17615 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:08:12.787346 17615 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:08:12.787361 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.787366 17615 net.cpp:165] Memory required for data: 962049500\nI0817 16:08:12.787375 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:08:12.787384 17615 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:08:12.787390 17615 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:08:12.787398 17615 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:08:12.787653 17615 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:08:12.787667 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.787672 17615 net.cpp:165] Memory required for data: 966145500\nI0817 16:08:12.787683 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:08:12.787691 17615 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:08:12.787698 17615 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:08:12.787708 17615 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:08:12.787761 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:08:12.787912 17615 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:08:12.787925 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.787930 17615 net.cpp:165] Memory required for data: 970241500\nI0817 16:08:12.787940 17615 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:08:12.787948 17615 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:08:12.787955 17615 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:08:12.787961 17615 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:08:12.787978 17615 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:08:12.788007 17615 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:08:12.788018 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.788023 17615 net.cpp:165] Memory required for data: 974337500\nI0817 16:08:12.788029 17615 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:08:12.788050 17615 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:08:12.788058 17615 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:08:12.788064 17615 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:08:12.788074 17615 net.cpp:150] Setting up L2_b3_relu\nI0817 16:08:12.788081 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.788085 17615 net.cpp:165] Memory required for data: 978433500\nI0817 16:08:12.788091 17615 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:08:12.788099 17615 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:08:12.788103 17615 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:08:12.788113 17615 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:08:12.788123 17615 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:08:12.788168 17615 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:08:12.788180 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.788187 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.788192 17615 net.cpp:165] Memory required for data: 986625500\nI0817 16:08:12.788197 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:08:12.788210 17615 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:08:12.788218 17615 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:08:12.788229 17615 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:08:12.788696 17615 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:08:12.788710 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.788715 17615 net.cpp:165] Memory required for data: 990721500\nI0817 16:08:12.788724 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:08:12.788736 17615 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:08:12.788743 17615 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:08:12.788750 17615 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:08:12.788992 17615 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:08:12.789005 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.789011 17615 net.cpp:165] Memory required for data: 994817500\nI0817 16:08:12.789021 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:08:12.789029 17615 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:08:12.789036 17615 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:08:12.789043 17615 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.789099 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:08:12.789243 17615 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:08:12.789259 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.789264 17615 net.cpp:165] Memory required for data: 998913500\nI0817 16:08:12.789273 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:08:12.789281 17615 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:08:12.789288 17615 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:08:12.789294 17615 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.789304 17615 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:08:12.789310 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.789315 17615 net.cpp:165] Memory required for data: 1003009500\nI0817 16:08:12.789320 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:08:12.789340 17615 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:08:12.789347 17615 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:08:12.789358 17615 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:08:12.789825 17615 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:08:12.789840 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.789845 17615 net.cpp:165] Memory required for data: 1007105500\nI0817 16:08:12.789855 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:08:12.789867 17615 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:08:12.789875 17615 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:08:12.789885 17615 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:08:12.790127 17615 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:08:12.790140 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.790145 17615 net.cpp:165] Memory required for data: 1011201500\nI0817 16:08:12.790155 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:08:12.790164 17615 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:08:12.790170 17615 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:08:12.790177 17615 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:08:12.790235 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:08:12.790380 17615 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:08:12.790393 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.790398 17615 net.cpp:165] Memory required for data: 1015297500\nI0817 16:08:12.790407 17615 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:08:12.790421 17615 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:08:12.790427 17615 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:08:12.790434 17615 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:08:12.790442 17615 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:08:12.790472 17615 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:08:12.790482 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.790487 17615 net.cpp:165] Memory required for data: 1019393500\nI0817 16:08:12.790491 17615 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:08:12.790499 17615 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:08:12.790504 17615 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:08:12.790514 17615 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:08:12.790524 17615 net.cpp:150] Setting up L2_b4_relu\nI0817 16:08:12.790531 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.790535 17615 net.cpp:165] Memory required for data: 1023489500\nI0817 16:08:12.790540 17615 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:08:12.790547 17615 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:08:12.790552 17615 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:08:12.790560 17615 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:08:12.790570 17615 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:08:12.790621 17615 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:08:12.790633 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.790640 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.790645 17615 net.cpp:165] Memory required for data: 1031681500\nI0817 16:08:12.790650 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:08:12.790670 17615 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:08:12.790678 17615 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:08:12.790686 17615 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:08:12.791152 17615 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:08:12.791173 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.791178 17615 net.cpp:165] Memory required for data: 1035777500\nI0817 16:08:12.791188 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:08:12.791196 17615 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:08:12.791203 17615 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:08:12.791214 17615 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:08:12.791462 17615 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:08:12.791476 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.791481 17615 net.cpp:165] Memory required for data: 1039873500\nI0817 16:08:12.791491 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:08:12.791501 17615 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:08:12.791507 17615 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:08:12.791514 17615 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.791570 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:08:12.791725 17615 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:08:12.791741 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.791748 17615 net.cpp:165] Memory required for data: 1043969500\nI0817 16:08:12.791756 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:08:12.791764 17615 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:08:12.791770 17615 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:08:12.791777 17615 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.791786 17615 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:08:12.791793 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.791798 17615 net.cpp:165] Memory required for data: 1048065500\nI0817 16:08:12.791802 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:08:12.791816 17615 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:08:12.791822 17615 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:08:12.791833 17615 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:08:12.792290 17615 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:08:12.792304 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.792309 17615 net.cpp:165] Memory required for data: 1052161500\nI0817 16:08:12.792318 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:08:12.792330 17615 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:08:12.792337 17615 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:08:12.792347 17615 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:08:12.792593 17615 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:08:12.792606 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.792618 17615 net.cpp:165] Memory required for data: 1056257500\nI0817 16:08:12.792629 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:08:12.792637 17615 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:08:12.792644 17615 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:08:12.792650 17615 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:08:12.792708 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:08:12.792856 17615 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:08:12.792870 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.792874 17615 net.cpp:165] Memory required for data: 1060353500\nI0817 16:08:12.792883 17615 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:08:12.792896 17615 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:08:12.792902 17615 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:08:12.792909 17615 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:08:12.792917 17615 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:08:12.792943 17615 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:08:12.792953 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.792963 17615 net.cpp:165] Memory required for data: 1064449500\nI0817 16:08:12.792969 17615 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:08:12.792979 17615 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:08:12.792985 17615 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:08:12.792992 17615 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:08:12.793002 17615 net.cpp:150] Setting up L2_b5_relu\nI0817 16:08:12.793009 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.793014 17615 net.cpp:165] Memory required for data: 1068545500\nI0817 16:08:12.793018 17615 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:08:12.793026 17615 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:08:12.793031 17615 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:08:12.793038 17615 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:08:12.793047 17615 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:08:12.793094 17615 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:08:12.793107 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.793113 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.793118 17615 net.cpp:165] Memory required for data: 1076737500\nI0817 16:08:12.793123 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:08:12.793138 17615 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:08:12.793143 17615 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:08:12.793153 17615 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:08:12.793622 17615 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:08:12.793635 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.793640 17615 net.cpp:165] Memory required for data: 1080833500\nI0817 16:08:12.793649 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:08:12.793661 17615 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:08:12.793668 17615 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:08:12.793679 17615 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:08:12.793927 17615 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:08:12.793941 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.793946 17615 net.cpp:165] Memory required for data: 1084929500\nI0817 16:08:12.793956 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:08:12.793963 17615 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:08:12.793970 17615 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:08:12.793977 17615 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.794034 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:08:12.794178 17615 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:08:12.794191 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.794196 17615 net.cpp:165] Memory required for data: 1089025500\nI0817 16:08:12.794205 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:08:12.794216 17615 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:08:12.794222 17615 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:08:12.794229 17615 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.794239 17615 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:08:12.794246 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.794250 17615 net.cpp:165] Memory required for data: 1093121500\nI0817 16:08:12.794255 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:08:12.794270 17615 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:08:12.794275 17615 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:08:12.794286 17615 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:08:12.794785 17615 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:08:12.794808 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.794814 17615 net.cpp:165] Memory required for data: 1097217500\nI0817 16:08:12.794823 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:08:12.794839 17615 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:08:12.794845 17615 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:08:12.794857 17615 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:08:12.795100 17615 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:08:12.795116 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.795121 17615 net.cpp:165] Memory required for data: 1101313500\nI0817 16:08:12.795132 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:08:12.795140 17615 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:08:12.795147 17615 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:08:12.795155 17615 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:08:12.795212 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:08:12.795356 17615 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:08:12.795369 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.795374 17615 net.cpp:165] Memory required for data: 1105409500\nI0817 16:08:12.795383 17615 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:08:12.795393 17615 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:08:12.795403 17615 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:08:12.795409 17615 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:08:12.795416 17615 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:08:12.795444 17615 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:08:12.795452 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.795457 17615 net.cpp:165] Memory required for data: 1109505500\nI0817 16:08:12.795462 17615 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:08:12.795473 17615 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:08:12.795480 17615 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:08:12.795486 17615 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:08:12.795495 17615 net.cpp:150] Setting up L2_b6_relu\nI0817 16:08:12.795502 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.795506 17615 net.cpp:165] Memory required for data: 1113601500\nI0817 16:08:12.795511 17615 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:08:12.795519 17615 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:08:12.795524 17615 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:08:12.795532 17615 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:08:12.795542 17615 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:08:12.795588 17615 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:08:12.795598 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.795604 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.795609 17615 net.cpp:165] Memory required for data: 1121793500\nI0817 16:08:12.795622 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:08:12.795636 17615 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:08:12.795644 17615 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:08:12.795652 17615 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:08:12.796118 17615 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:08:12.796131 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.796138 17615 net.cpp:165] Memory required for data: 1125889500\nI0817 16:08:12.796145 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:08:12.796157 17615 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:08:12.796170 17615 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:08:12.796182 17615 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:08:12.796437 17615 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:08:12.796450 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.796455 17615 net.cpp:165] Memory required for data: 1129985500\nI0817 16:08:12.796465 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:08:12.796474 17615 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:08:12.796480 17615 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:08:12.796489 17615 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.796545 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:08:12.796701 17615 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:08:12.796715 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.796720 17615 net.cpp:165] Memory required for data: 1134081500\nI0817 16:08:12.796730 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:08:12.796741 17615 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:08:12.796747 17615 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:08:12.796754 17615 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.796764 17615 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:08:12.796772 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.796777 17615 net.cpp:165] Memory required for data: 1138177500\nI0817 16:08:12.796780 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:08:12.796794 17615 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:08:12.796800 17615 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:08:12.796809 17615 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:08:12.797273 17615 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:08:12.797287 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.797292 17615 net.cpp:165] Memory required for data: 1142273500\nI0817 16:08:12.797300 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:08:12.797312 17615 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:08:12.797319 17615 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:08:12.797327 17615 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:08:12.797575 17615 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:08:12.797593 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.797598 17615 net.cpp:165] Memory required for data: 1146369500\nI0817 16:08:12.797610 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:08:12.797624 17615 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:08:12.797631 17615 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:08:12.797638 17615 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:08:12.797693 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:08:12.797844 17615 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:08:12.797857 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.797863 17615 net.cpp:165] Memory required for data: 1150465500\nI0817 16:08:12.797871 17615 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:08:12.797880 17615 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:08:12.797886 17615 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:08:12.797894 17615 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:08:12.797906 17615 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:08:12.797933 17615 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:08:12.797943 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.797948 17615 net.cpp:165] Memory required for data: 1154561500\nI0817 16:08:12.797953 17615 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:08:12.797965 17615 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:08:12.797971 17615 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:08:12.797984 17615 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:08:12.797994 17615 net.cpp:150] Setting up L2_b7_relu\nI0817 16:08:12.798002 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.798007 17615 net.cpp:165] Memory required for data: 1158657500\nI0817 16:08:12.798010 17615 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:08:12.798018 17615 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:08:12.798023 17615 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:08:12.798030 17615 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:08:12.798040 17615 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:08:12.798087 17615 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:08:12.798099 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.798106 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.798111 17615 net.cpp:165] Memory required for data: 1166849500\nI0817 16:08:12.798116 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:08:12.798130 17615 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:08:12.798137 17615 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:08:12.798146 17615 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:08:12.798619 17615 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:08:12.798635 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.798640 17615 net.cpp:165] Memory required for data: 1170945500\nI0817 16:08:12.798648 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:08:12.798660 17615 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:08:12.798667 17615 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:08:12.798677 17615 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:08:12.798931 17615 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:08:12.798945 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.798950 17615 net.cpp:165] Memory required for data: 1175041500\nI0817 16:08:12.798960 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:08:12.798969 17615 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:08:12.798975 17615 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:08:12.798984 17615 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.799041 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:08:12.799193 17615 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:08:12.799206 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.799211 17615 net.cpp:165] Memory required for data: 1179137500\nI0817 16:08:12.799221 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:08:12.799228 17615 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:08:12.799234 17615 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:08:12.799244 17615 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.799254 17615 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:08:12.799262 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.799265 17615 net.cpp:165] Memory required for data: 1183233500\nI0817 16:08:12.799270 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:08:12.799284 17615 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:08:12.799290 17615 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:08:12.799299 17615 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:08:12.799770 17615 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:08:12.799785 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.799790 17615 net.cpp:165] Memory required for data: 1187329500\nI0817 16:08:12.799799 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:08:12.799811 17615 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:08:12.799825 17615 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:08:12.799834 17615 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:08:12.800091 17615 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:08:12.800107 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.800112 17615 net.cpp:165] Memory required for data: 1191425500\nI0817 16:08:12.800122 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:08:12.800130 17615 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:08:12.800137 17615 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:08:12.800144 17615 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:08:12.800199 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:08:12.800348 17615 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:08:12.800361 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.800366 17615 net.cpp:165] Memory required for data: 1195521500\nI0817 16:08:12.800375 17615 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:08:12.800384 17615 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:08:12.800390 17615 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:08:12.800397 17615 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:08:12.800408 17615 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:08:12.800436 17615 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:08:12.800444 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.800448 17615 net.cpp:165] Memory required for data: 1199617500\nI0817 16:08:12.800454 17615 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:08:12.800462 17615 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:08:12.800467 17615 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:08:12.800477 17615 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:08:12.800487 17615 net.cpp:150] Setting up L2_b8_relu\nI0817 16:08:12.800493 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.800498 17615 net.cpp:165] Memory required for data: 1203713500\nI0817 16:08:12.800503 17615 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:08:12.800509 17615 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:08:12.800516 17615 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:08:12.800523 17615 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:08:12.800545 17615 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:08:12.800595 17615 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:08:12.800608 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.800621 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.800626 17615 net.cpp:165] Memory required for data: 1211905500\nI0817 16:08:12.800631 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:08:12.800643 17615 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:08:12.800650 17615 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:08:12.800663 17615 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:08:12.801131 17615 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:08:12.801144 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.801151 17615 net.cpp:165] Memory required for data: 1216001500\nI0817 16:08:12.801159 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:08:12.801172 17615 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:08:12.801177 17615 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:08:12.801185 17615 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:08:12.801434 17615 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:08:12.801446 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.801451 17615 net.cpp:165] Memory required for data: 1220097500\nI0817 16:08:12.801471 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:08:12.801483 17615 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:08:12.801491 17615 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:08:12.801498 17615 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.801553 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:08:12.801710 17615 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:08:12.801724 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.801729 17615 net.cpp:165] Memory required for data: 1224193500\nI0817 16:08:12.801738 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:08:12.801749 17615 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:08:12.801756 17615 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:08:12.801766 17615 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.801776 17615 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:08:12.801784 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.801789 17615 net.cpp:165] Memory required for data: 1228289500\nI0817 16:08:12.801792 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:08:12.801803 17615 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:08:12.801810 17615 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:08:12.801820 17615 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:08:12.802280 17615 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:08:12.802294 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.802300 17615 net.cpp:165] Memory required for data: 1232385500\nI0817 16:08:12.802309 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:08:12.802317 17615 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:08:12.802325 17615 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:08:12.802335 17615 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:08:12.802585 17615 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:08:12.802598 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.802603 17615 net.cpp:165] Memory required for data: 1236481500\nI0817 16:08:12.802651 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:08:12.802670 17615 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:08:12.802676 17615 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:08:12.802685 17615 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:08:12.802745 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:08:12.802892 17615 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:08:12.802912 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.802918 17615 net.cpp:165] Memory required for data: 1240577500\nI0817 16:08:12.802927 17615 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:08:12.802937 17615 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:08:12.802943 17615 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:08:12.802950 17615 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:08:12.802958 17615 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:08:12.802987 17615 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:08:12.802997 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.803001 17615 net.cpp:165] Memory required for data: 1244673500\nI0817 16:08:12.803007 17615 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:08:12.803014 17615 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:08:12.803020 17615 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:08:12.803030 17615 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:08:12.803040 17615 net.cpp:150] Setting up L2_b9_relu\nI0817 16:08:12.803047 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.803051 17615 net.cpp:165] Memory required for data: 1248769500\nI0817 16:08:12.803056 17615 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:08:12.803071 17615 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:08:12.803076 17615 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:08:12.803086 17615 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:08:12.803097 17615 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:08:12.803141 17615 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:08:12.803153 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.803159 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.803164 17615 net.cpp:165] Memory required for data: 1256961500\nI0817 16:08:12.803169 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:08:12.803184 17615 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:08:12.803190 17615 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:08:12.803200 17615 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:08:12.803683 17615 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:08:12.803699 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.803704 17615 net.cpp:165] Memory required for data: 1257985500\nI0817 16:08:12.803711 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:08:12.803724 17615 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:08:12.803730 17615 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:08:12.803738 17615 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:08:12.804025 17615 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:08:12.804046 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.804051 17615 net.cpp:165] Memory required for data: 1259009500\nI0817 16:08:12.804062 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:08:12.804075 17615 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:08:12.804080 17615 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:08:12.804088 17615 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.804147 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:08:12.804301 17615 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:08:12.804313 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.804318 17615 net.cpp:165] Memory required for data: 1260033500\nI0817 16:08:12.804327 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:08:12.804338 17615 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:08:12.804344 17615 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:08:12.804352 17615 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.804361 17615 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:08:12.804368 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.804373 17615 net.cpp:165] Memory required for data: 1261057500\nI0817 16:08:12.804376 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:08:12.804391 17615 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:08:12.804397 17615 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:08:12.804407 17615 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:08:12.804888 17615 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:08:12.804901 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.804906 17615 net.cpp:165] Memory required for data: 1262081500\nI0817 16:08:12.804915 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:08:12.804924 17615 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:08:12.804930 17615 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:08:12.804942 17615 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:08:12.805198 17615 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:08:12.805214 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.805219 17615 net.cpp:165] Memory required for data: 1263105500\nI0817 16:08:12.805236 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:08:12.805245 17615 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:08:12.805251 17615 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:08:12.805259 17615 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:08:12.805315 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:08:12.805469 17615 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:08:12.805482 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.805487 17615 net.cpp:165] Memory required for data: 1264129500\nI0817 16:08:12.805497 17615 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:08:12.805510 17615 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:08:12.805516 17615 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:08:12.805524 17615 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:08:12.805562 17615 net.cpp:150] Setting up L3_b1_pool\nI0817 16:08:12.805570 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.805575 17615 net.cpp:165] Memory required for data: 1265153500\nI0817 16:08:12.805580 17615 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:08:12.805589 17615 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:08:12.805595 17615 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:08:12.805603 17615 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:08:12.805609 17615 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:08:12.805652 17615 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:08:12.805662 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.805667 17615 net.cpp:165] Memory required for data: 1266177500\nI0817 16:08:12.805673 17615 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:08:12.805680 17615 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:08:12.805686 17615 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:08:12.805694 17615 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:08:12.805702 17615 net.cpp:150] Setting up L3_b1_relu\nI0817 16:08:12.805709 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.805714 17615 net.cpp:165] Memory required for data: 1267201500\nI0817 16:08:12.805719 17615 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:08:12.805728 17615 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:08:12.805738 17615 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:08:12.806959 17615 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:08:12.806977 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.806982 17615 net.cpp:165] Memory required for data: 1268225500\nI0817 16:08:12.806988 17615 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:08:12.806998 17615 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:08:12.807008 17615 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:08:12.807015 17615 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:08:12.807024 17615 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:08:12.807062 17615 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:08:12.807080 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.807085 17615 net.cpp:165] Memory required for data: 1270273500\nI0817 16:08:12.807090 17615 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:08:12.807097 17615 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:08:12.807103 17615 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:08:12.807114 17615 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:08:12.807124 17615 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:08:12.807171 17615 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:08:12.807185 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.807193 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.807204 17615 net.cpp:165] Memory required for data: 1274369500\nI0817 16:08:12.807209 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:08:12.807221 17615 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:08:12.807227 17615 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:08:12.807240 17615 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:08:12.809219 17615 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:08:12.809237 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.809243 17615 net.cpp:165] Memory required for data: 1276417500\nI0817 16:08:12.809252 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:08:12.809265 17615 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:08:12.809273 17615 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:08:12.809281 17615 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:08:12.809545 17615 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:08:12.809557 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.809563 17615 net.cpp:165] Memory required for data: 1278465500\nI0817 16:08:12.809574 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:08:12.809586 17615 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:08:12.809592 17615 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:08:12.809600 17615 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.809666 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:08:12.809823 17615 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:08:12.809837 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.809842 17615 net.cpp:165] Memory required for data: 1280513500\nI0817 16:08:12.809851 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:08:12.809865 17615 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:08:12.809872 17615 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:08:12.809882 17615 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.809892 17615 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:08:12.809900 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.809904 17615 net.cpp:165] Memory required for data: 1282561500\nI0817 16:08:12.809909 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:08:12.809921 17615 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:08:12.809926 17615 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:08:12.809937 17615 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:08:12.810961 17615 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:08:12.810976 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.810981 17615 net.cpp:165] Memory required for data: 1284609500\nI0817 16:08:12.810989 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:08:12.811002 17615 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:08:12.811008 17615 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:08:12.811017 17615 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:08:12.811282 17615 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:08:12.811295 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.811300 17615 net.cpp:165] Memory required for data: 1286657500\nI0817 16:08:12.811311 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:08:12.811319 17615 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:08:12.811326 17615 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:08:12.811333 17615 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:08:12.811393 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:08:12.811545 17615 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:08:12.811561 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.811566 17615 net.cpp:165] Memory required for data: 1288705500\nI0817 16:08:12.811576 17615 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:08:12.811585 17615 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:08:12.811599 17615 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:08:12.811606 17615 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:08:12.811620 17615 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:08:12.811659 17615 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:08:12.811669 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.811674 17615 net.cpp:165] Memory required for data: 1290753500\nI0817 16:08:12.811679 17615 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:08:12.811686 17615 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:08:12.811692 17615 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:08:12.811700 17615 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:08:12.811709 17615 net.cpp:150] Setting up L3_b2_relu\nI0817 16:08:12.811717 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.811722 17615 net.cpp:165] Memory required for data: 1292801500\nI0817 16:08:12.811727 17615 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:08:12.811736 17615 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:08:12.811741 17615 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:08:12.811749 17615 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:08:12.811758 17615 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:08:12.811803 17615 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:08:12.811820 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.811826 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.811831 17615 net.cpp:165] Memory required for data: 1296897500\nI0817 16:08:12.811836 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:08:12.811848 17615 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:08:12.811856 17615 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:08:12.811864 17615 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:08:12.812891 17615 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:08:12.812904 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.812911 17615 net.cpp:165] Memory required for data: 1298945500\nI0817 16:08:12.812919 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:08:12.812932 17615 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:08:12.812937 17615 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:08:12.812947 17615 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:08:12.813207 17615 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:08:12.813221 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.813226 17615 net.cpp:165] Memory required for data: 1300993500\nI0817 16:08:12.813236 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:08:12.813248 17615 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:08:12.813254 17615 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:08:12.813266 17615 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.813323 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:08:12.813479 17615 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:08:12.813493 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.813498 17615 net.cpp:165] Memory required for data: 1303041500\nI0817 16:08:12.813508 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:08:12.813515 17615 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:08:12.813521 17615 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:08:12.813531 17615 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.813541 17615 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:08:12.813549 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.813560 17615 net.cpp:165] Memory required for data: 1305089500\nI0817 16:08:12.813565 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:08:12.813577 17615 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:08:12.813582 17615 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:08:12.813594 17615 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:08:12.814610 17615 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:08:12.814630 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.814635 17615 net.cpp:165] Memory required for data: 1307137500\nI0817 16:08:12.814644 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:08:12.814656 17615 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:08:12.814663 17615 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:08:12.814672 17615 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:08:12.814936 17615 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:08:12.814950 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.814955 17615 net.cpp:165] Memory required for data: 1309185500\nI0817 16:08:12.814965 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:08:12.814975 17615 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:08:12.814980 17615 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:08:12.814988 17615 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:08:12.815050 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:08:12.815209 17615 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:08:12.815223 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.815228 17615 net.cpp:165] Memory required for data: 1311233500\nI0817 16:08:12.815238 17615 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:08:12.815246 17615 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:08:12.815253 17615 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:08:12.815260 17615 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:08:12.815273 17615 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:08:12.815305 17615 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:08:12.815315 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.815320 17615 net.cpp:165] Memory required for data: 1313281500\nI0817 16:08:12.815325 17615 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:08:12.815336 17615 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:08:12.815342 17615 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:08:12.815349 17615 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:08:12.815358 17615 net.cpp:150] Setting up L3_b3_relu\nI0817 16:08:12.815366 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.815369 17615 net.cpp:165] Memory required for data: 1315329500\nI0817 16:08:12.815374 17615 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:08:12.815384 17615 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:08:12.815390 17615 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:08:12.815397 17615 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:08:12.815407 17615 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:08:12.815451 17615 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:08:12.815466 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.815474 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.815479 17615 net.cpp:165] Memory required for data: 1319425500\nI0817 16:08:12.815484 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:08:12.815495 17615 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:08:12.815501 17615 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:08:12.815510 17615 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:08:12.816542 17615 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:08:12.816557 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.816563 17615 net.cpp:165] Memory required for data: 1321473500\nI0817 16:08:12.816571 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:08:12.816583 17615 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:08:12.816591 17615 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:08:12.816598 17615 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:08:12.816874 17615 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:08:12.816889 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.816893 17615 net.cpp:165] Memory required for data: 1323521500\nI0817 16:08:12.816903 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:08:12.816915 17615 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:08:12.816922 17615 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:08:12.816932 17615 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.816989 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:08:12.817152 17615 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:08:12.817164 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.817169 17615 net.cpp:165] Memory required for data: 1325569500\nI0817 16:08:12.817179 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:08:12.817188 17615 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:08:12.817193 17615 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:08:12.817203 17615 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.817214 17615 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:08:12.817220 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.817224 17615 net.cpp:165] Memory required for data: 1327617500\nI0817 16:08:12.817229 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:08:12.817243 17615 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:08:12.817250 17615 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:08:12.817258 17615 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:08:12.818289 17615 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:08:12.818303 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.818308 17615 net.cpp:165] Memory required for data: 1329665500\nI0817 16:08:12.818317 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:08:12.818331 17615 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:08:12.818338 17615 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:08:12.818346 17615 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:08:12.818624 17615 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:08:12.818639 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.818644 17615 net.cpp:165] Memory required for data: 1331713500\nI0817 16:08:12.818653 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:08:12.818661 17615 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:08:12.818668 17615 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:08:12.818675 17615 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:08:12.818737 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:08:12.818902 17615 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:08:12.818915 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.818922 17615 net.cpp:165] Memory required for data: 1333761500\nI0817 16:08:12.818930 17615 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:08:12.818939 17615 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:08:12.818945 17615 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:08:12.818953 17615 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:08:12.818963 17615 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:08:12.818996 17615 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:08:12.819016 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.819021 17615 net.cpp:165] Memory required for data: 1335809500\nI0817 16:08:12.819026 17615 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:08:12.819034 17615 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:08:12.819041 17615 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:08:12.819047 17615 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:08:12.819056 17615 net.cpp:150] Setting up L3_b4_relu\nI0817 16:08:12.819063 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.819068 17615 net.cpp:165] Memory required for data: 1337857500\nI0817 16:08:12.819073 17615 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:08:12.819083 17615 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:08:12.819089 17615 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:08:12.819097 17615 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:08:12.819106 17615 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:08:12.819155 17615 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:08:12.819167 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.819175 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.819180 17615 net.cpp:165] Memory required for data: 1341953500\nI0817 16:08:12.819185 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:08:12.819195 17615 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:08:12.819202 17615 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:08:12.819216 17615 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:08:12.820250 17615 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:08:12.820266 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.820271 17615 net.cpp:165] Memory required for data: 1344001500\nI0817 16:08:12.820279 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:08:12.820288 17615 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:08:12.820294 17615 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:08:12.820307 17615 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:08:12.821571 17615 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:08:12.821588 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.821593 17615 net.cpp:165] Memory required for data: 1346049500\nI0817 16:08:12.821605 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:08:12.821620 17615 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:08:12.821627 17615 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:08:12.821635 17615 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.821699 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:08:12.821861 17615 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:08:12.821873 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.821879 17615 net.cpp:165] Memory required for data: 1348097500\nI0817 16:08:12.821888 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:08:12.821897 17615 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:08:12.821902 17615 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:08:12.821913 17615 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.821923 17615 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:08:12.821930 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.821935 17615 net.cpp:165] Memory required for data: 1350145500\nI0817 16:08:12.821940 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:08:12.821954 17615 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:08:12.821960 17615 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:08:12.821969 17615 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:08:12.823976 17615 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:08:12.823993 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.823999 17615 net.cpp:165] Memory required for data: 1352193500\nI0817 16:08:12.824008 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:08:12.824021 17615 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:08:12.824028 17615 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:08:12.824038 17615 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:08:12.824300 17615 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:08:12.824313 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.824318 17615 net.cpp:165] Memory required for data: 1354241500\nI0817 16:08:12.824328 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:08:12.824338 17615 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:08:12.824344 17615 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:08:12.824352 17615 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:08:12.824411 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:08:12.824565 17615 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:08:12.824579 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.824584 17615 net.cpp:165] Memory required for data: 1356289500\nI0817 16:08:12.824594 17615 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:08:12.824602 17615 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:08:12.824609 17615 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:08:12.824622 17615 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:08:12.824633 17615 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:08:12.824667 17615 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:08:12.824682 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.824687 17615 net.cpp:165] Memory required for data: 1358337500\nI0817 16:08:12.824692 17615 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:08:12.824700 17615 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:08:12.824707 17615 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:08:12.824713 17615 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:08:12.824723 17615 net.cpp:150] Setting up L3_b5_relu\nI0817 16:08:12.824729 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.824734 17615 net.cpp:165] Memory required for data: 1360385500\nI0817 16:08:12.824739 17615 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:08:12.824749 17615 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:08:12.824755 17615 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:08:12.824762 17615 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:08:12.824772 17615 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:08:12.824816 17615 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:08:12.824831 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.824838 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.824843 17615 net.cpp:165] Memory required for data: 1364481500\nI0817 16:08:12.824848 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:08:12.824861 17615 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:08:12.824867 17615 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:08:12.824875 17615 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:08:12.825896 17615 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:08:12.825917 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.825922 17615 net.cpp:165] Memory required for data: 1366529500\nI0817 16:08:12.825932 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:08:12.825942 17615 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:08:12.825954 17615 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:08:12.825968 17615 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:08:12.826225 17615 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:08:12.826238 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.826244 17615 net.cpp:165] Memory required for data: 1368577500\nI0817 16:08:12.826254 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:08:12.826267 17615 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:08:12.826273 17615 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:08:12.826282 17615 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.826336 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:08:12.826489 17615 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:08:12.826503 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.826508 17615 net.cpp:165] Memory required for data: 1370625500\nI0817 16:08:12.826516 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:08:12.826525 17615 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:08:12.826531 17615 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:08:12.826541 17615 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.826551 17615 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:08:12.826558 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.826563 17615 net.cpp:165] Memory required for data: 1372673500\nI0817 16:08:12.826568 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:08:12.826581 17615 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:08:12.826588 17615 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:08:12.826596 17615 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:08:12.827601 17615 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:08:12.827620 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.827626 17615 net.cpp:165] Memory required for data: 1374721500\nI0817 16:08:12.827636 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:08:12.827647 17615 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:08:12.827654 17615 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:08:12.827663 17615 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:08:12.827921 17615 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:08:12.827934 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.827939 17615 net.cpp:165] Memory required for data: 1376769500\nI0817 16:08:12.827950 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:08:12.827958 17615 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:08:12.827965 17615 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:08:12.827972 17615 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:08:12.828033 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:08:12.828191 17615 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:08:12.828203 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.828208 17615 net.cpp:165] Memory required for data: 1378817500\nI0817 16:08:12.828217 17615 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:08:12.828227 17615 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:08:12.828233 17615 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:08:12.828240 17615 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:08:12.828250 17615 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:08:12.828284 17615 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:08:12.828295 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.828301 17615 net.cpp:165] Memory required for data: 1380865500\nI0817 16:08:12.828306 17615 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:08:12.828313 17615 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:08:12.828320 17615 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:08:12.828333 17615 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:08:12.828344 17615 net.cpp:150] Setting up L3_b6_relu\nI0817 16:08:12.828351 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.828356 17615 net.cpp:165] Memory required for data: 1382913500\nI0817 16:08:12.828361 17615 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:08:12.828371 17615 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:08:12.828377 17615 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:08:12.828383 17615 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:08:12.828393 17615 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:08:12.828440 17615 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:08:12.828452 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.828459 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.828464 17615 net.cpp:165] Memory required for data: 1387009500\nI0817 16:08:12.828469 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:08:12.828480 17615 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:08:12.828487 17615 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:08:12.828500 17615 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:08:12.829515 17615 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:08:12.829530 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.829535 17615 net.cpp:165] Memory required for data: 1389057500\nI0817 16:08:12.829545 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:08:12.829555 17615 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:08:12.829560 17615 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:08:12.829571 17615 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:08:12.829838 17615 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:08:12.829855 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.829860 17615 net.cpp:165] Memory required for data: 1391105500\nI0817 16:08:12.829871 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:08:12.829880 17615 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:08:12.829886 17615 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:08:12.829895 17615 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.829952 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:08:12.830111 17615 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:08:12.830124 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.830129 17615 net.cpp:165] Memory required for data: 1393153500\nI0817 16:08:12.830138 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:08:12.830171 17615 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:08:12.830180 17615 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:08:12.830188 17615 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.830198 17615 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:08:12.830205 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.830209 17615 net.cpp:165] Memory required for data: 1395201500\nI0817 16:08:12.830215 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:08:12.830226 17615 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:08:12.830232 17615 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:08:12.830245 17615 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:08:12.831267 17615 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:08:12.831282 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.831288 17615 net.cpp:165] Memory required for data: 1397249500\nI0817 16:08:12.831296 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:08:12.831306 17615 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:08:12.831320 17615 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:08:12.831328 17615 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:08:12.831593 17615 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:08:12.831607 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.831617 17615 net.cpp:165] Memory required for data: 1399297500\nI0817 16:08:12.831629 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:08:12.831640 17615 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:08:12.831647 17615 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:08:12.831657 17615 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:08:12.831715 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:08:12.831871 17615 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:08:12.831884 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.831889 17615 net.cpp:165] Memory required for data: 1401345500\nI0817 16:08:12.831898 17615 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:08:12.831907 17615 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:08:12.831913 17615 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:08:12.831920 17615 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:08:12.831933 17615 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:08:12.831965 17615 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:08:12.831974 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.831979 17615 net.cpp:165] Memory required for data: 1403393500\nI0817 16:08:12.831984 17615 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:08:12.831995 17615 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:08:12.832000 17615 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:08:12.832007 17615 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:08:12.832017 17615 net.cpp:150] Setting up L3_b7_relu\nI0817 16:08:12.832023 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.832028 17615 net.cpp:165] Memory required for data: 1405441500\nI0817 16:08:12.832033 17615 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:08:12.832041 17615 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:08:12.832046 17615 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:08:12.832053 17615 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:08:12.832062 17615 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:08:12.832110 17615 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:08:12.832123 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.832129 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.832134 17615 net.cpp:165] Memory required for data: 1409537500\nI0817 16:08:12.832139 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:08:12.832152 17615 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:08:12.832159 17615 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:08:12.832168 17615 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:08:12.833187 17615 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:08:12.833202 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.833207 17615 net.cpp:165] Memory required for data: 1411585500\nI0817 16:08:12.833216 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:08:12.833228 17615 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:08:12.833235 17615 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:08:12.833245 17615 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:08:12.833508 17615 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:08:12.833521 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.833526 17615 net.cpp:165] Memory required for data: 1413633500\nI0817 16:08:12.833544 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:08:12.833554 17615 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:08:12.833559 17615 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:08:12.833570 17615 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.833639 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:08:12.833801 17615 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:08:12.833814 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.833819 17615 net.cpp:165] Memory required for data: 1415681500\nI0817 16:08:12.833828 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:08:12.833837 17615 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:08:12.833843 17615 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:08:12.833853 17615 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.833863 17615 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:08:12.833870 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.833875 17615 net.cpp:165] Memory required for data: 1417729500\nI0817 16:08:12.833880 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:08:12.833894 17615 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:08:12.833900 17615 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:08:12.833911 17615 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:08:12.834926 17615 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:08:12.834941 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.834946 17615 net.cpp:165] Memory required for data: 1419777500\nI0817 16:08:12.834955 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:08:12.834964 17615 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:08:12.834971 17615 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:08:12.834982 17615 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:08:12.835248 17615 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:08:12.835264 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.835269 17615 net.cpp:165] Memory required for data: 1421825500\nI0817 16:08:12.835280 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:08:12.835289 17615 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:08:12.835295 17615 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:08:12.835304 17615 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:08:12.835361 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:08:12.835516 17615 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:08:12.835530 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.835535 17615 net.cpp:165] Memory required for data: 1423873500\nI0817 16:08:12.835543 17615 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:08:12.835554 17615 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:08:12.835561 17615 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:08:12.835568 17615 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:08:12.835577 17615 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:08:12.835618 17615 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:08:12.835630 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.835635 17615 net.cpp:165] Memory required for data: 1425921500\nI0817 16:08:12.835641 17615 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:08:12.835649 17615 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:08:12.835659 17615 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:08:12.835671 17615 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:08:12.835686 17615 net.cpp:150] Setting up L3_b8_relu\nI0817 16:08:12.835698 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.835706 17615 net.cpp:165] Memory required for data: 1427969500\nI0817 16:08:12.835716 17615 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:08:12.835748 17615 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:08:12.835759 17615 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:08:12.835779 17615 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:08:12.835798 17615 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:08:12.835865 17615 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:08:12.835881 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.835888 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.835893 17615 net.cpp:165] Memory required for data: 1432065500\nI0817 16:08:12.835898 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:08:12.835914 17615 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:08:12.835922 17615 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:08:12.835930 17615 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:08:12.837934 17615 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:08:12.837952 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.837957 17615 net.cpp:165] Memory required for data: 1434113500\nI0817 16:08:12.837967 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:08:12.837976 17615 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:08:12.837983 17615 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:08:12.837994 17615 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:08:12.838265 17615 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:08:12.838279 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.838284 17615 net.cpp:165] Memory required for data: 1436161500\nI0817 16:08:12.838294 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:08:12.838304 17615 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:08:12.838310 17615 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:08:12.838318 17615 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.838378 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:08:12.838536 17615 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:08:12.838551 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.838557 17615 net.cpp:165] Memory required for data: 1438209500\nI0817 16:08:12.838567 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:08:12.838574 17615 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:08:12.838580 17615 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:08:12.838588 17615 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.838598 17615 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:08:12.838604 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.838609 17615 net.cpp:165] Memory required for data: 1440257500\nI0817 16:08:12.838620 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:08:12.838636 17615 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:08:12.838642 17615 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:08:12.838651 17615 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:08:12.839676 17615 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:08:12.839691 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.839696 17615 net.cpp:165] Memory required for data: 1442305500\nI0817 16:08:12.839705 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:08:12.839717 17615 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:08:12.839725 17615 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:08:12.839735 17615 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:08:12.839998 17615 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:08:12.840011 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.840018 17615 net.cpp:165] Memory required for data: 1444353500\nI0817 16:08:12.840035 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:08:12.840044 17615 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:08:12.840050 17615 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:08:12.840061 17615 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:08:12.840119 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:08:12.840279 17615 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:08:12.840292 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.840297 17615 net.cpp:165] Memory required for data: 1446401500\nI0817 16:08:12.840306 17615 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:08:12.840318 17615 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:08:12.840325 17615 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:08:12.840332 17615 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:08:12.840340 17615 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:08:12.840376 17615 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:08:12.840389 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.840394 17615 net.cpp:165] Memory required for data: 1448449500\nI0817 16:08:12.840399 17615 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:08:12.840406 17615 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:08:12.840412 17615 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:08:12.840422 17615 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:08:12.840432 17615 net.cpp:150] Setting up L3_b9_relu\nI0817 16:08:12.840440 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.840443 17615 net.cpp:165] Memory required for data: 1450497500\nI0817 16:08:12.840448 17615 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:08:12.840456 17615 net.cpp:100] Creating Layer post_pool\nI0817 16:08:12.840462 17615 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:08:12.840469 17615 net.cpp:408] post_pool -> post_pool\nI0817 16:08:12.840503 17615 net.cpp:150] Setting up post_pool\nI0817 16:08:12.840515 17615 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:08:12.840520 17615 net.cpp:165] Memory required for data: 1450529500\nI0817 16:08:12.840525 17615 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:08:12.840618 17615 net.cpp:100] Creating Layer post_FC\nI0817 16:08:12.840633 17615 net.cpp:434] post_FC <- post_pool\nI0817 16:08:12.840643 17615 net.cpp:408] post_FC -> post_FC_top\nI0817 16:08:12.840891 17615 net.cpp:150] Setting up post_FC\nI0817 16:08:12.840909 17615 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:08:12.840915 17615 net.cpp:165] Memory required for data: 1450534500\nI0817 16:08:12.840925 17615 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:08:12.840934 17615 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:08:12.840939 17615 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:08:12.840947 17615 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:08:12.840957 17615 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:08:12.841007 17615 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:08:12.841019 17615 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:08:12.841027 17615 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:08:12.841030 17615 net.cpp:165] Memory required for data: 1450544500\nI0817 16:08:12.841037 17615 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:08:12.841078 17615 net.cpp:100] Creating Layer accuracy\nI0817 16:08:12.841089 17615 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:08:12.841097 17615 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:08:12.841109 17615 net.cpp:408] accuracy -> accuracy\nI0817 16:08:12.841151 17615 net.cpp:150] Setting up accuracy\nI0817 16:08:12.841164 17615 net.cpp:157] Top shape: (1)\nI0817 16:08:12.841169 17615 net.cpp:165] Memory required for data: 1450544504\nI0817 16:08:12.841176 17615 layer_factory.hpp:77] Creating layer loss\nI0817 16:08:12.841190 17615 net.cpp:100] Creating Layer loss\nI0817 16:08:12.841197 17615 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:08:12.841204 17615 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:08:12.841212 17615 net.cpp:408] loss -> loss\nI0817 16:08:12.841259 17615 layer_factory.hpp:77] Creating layer loss\nI0817 16:08:12.841416 17615 net.cpp:150] Setting up loss\nI0817 16:08:12.841434 17615 net.cpp:157] Top shape: (1)\nI0817 16:08:12.841440 17615 net.cpp:160]     with loss weight 1\nI0817 16:08:12.841517 17615 net.cpp:165] Memory required for data: 1450544508\nI0817 16:08:12.841526 17615 net.cpp:226] loss needs backward computation.\nI0817 16:08:12.841532 17615 net.cpp:228] accuracy does not need backward computation.\nI0817 16:08:12.841539 17615 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:08:12.841544 17615 net.cpp:226] post_FC needs backward computation.\nI0817 16:08:12.841549 17615 net.cpp:226] post_pool needs backward computation.\nI0817 16:08:12.841554 17615 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:08:12.841559 17615 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:08:12.841565 17615 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:08:12.841569 17615 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:08:12.841574 17615 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:08:12.841579 17615 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:08:12.841584 17615 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:08:12.841589 17615 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:08:12.841594 17615 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:08:12.841599 17615 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:08:12.841605 17615 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:08:12.841610 17615 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:08:12.841622 17615 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:08:12.841627 17615 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:08:12.841634 17615 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:08:12.841639 17615 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:08:12.841644 17615 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:08:12.841648 17615 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:08:12.841653 17615 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:08:12.841660 17615 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:08:12.841665 17615 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:08:12.841670 17615 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:08:12.841675 17615 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:08:12.841680 17615 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:08:12.841686 17615 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:08:12.841691 17615 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:08:12.841694 17615 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:08:12.841699 17615 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:08:12.841704 17615 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:08:12.841709 17615 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:08:12.841716 17615 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:08:12.841720 17615 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:08:12.841725 17615 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:08:12.841730 17615 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:08:12.841737 17615 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:08:12.841748 17615 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:08:12.841754 17615 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:08:12.841759 17615 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:08:12.841764 17615 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:08:12.841769 17615 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:08:12.841775 17615 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:08:12.841779 17615 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:08:12.841785 17615 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:08:12.841790 17615 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:08:12.841795 17615 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:08:12.841801 17615 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:08:12.841806 17615 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:08:12.841811 17615 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:08:12.841816 17615 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:08:12.841821 17615 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:08:12.841827 17615 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:08:12.841832 17615 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:08:12.841837 17615 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:08:12.841842 17615 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:08:12.841847 17615 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:08:12.841853 17615 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:08:12.841857 17615 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:08:12.841862 17615 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:08:12.841868 17615 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:08:12.841873 17615 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:08:12.841878 17615 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:08:12.841883 17615 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:08:12.841889 17615 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:08:12.841894 17615 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:08:12.841899 17615 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:08:12.841904 17615 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:08:12.841909 17615 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:08:12.841914 17615 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:08:12.841920 17615 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:08:12.841925 17615 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:08:12.841930 17615 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:08:12.841935 17615 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:08:12.841941 17615 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:08:12.841946 17615 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:08:12.841955 17615 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:08:12.841961 17615 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:08:12.841966 17615 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:08:12.841971 17615 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:08:12.841976 17615 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:08:12.841982 17615 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:08:12.841989 17615 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:08:12.841995 17615 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:08:12.842005 17615 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:08:12.842010 17615 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:08:12.842016 17615 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:08:12.842021 17615 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:08:12.842026 17615 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:08:12.842032 17615 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:08:12.842037 17615 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:08:12.842042 17615 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:08:12.842047 17615 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:08:12.842053 17615 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:08:12.842059 17615 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:08:12.842064 17615 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:08:12.842069 17615 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:08:12.842075 17615 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:08:12.842080 17615 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:08:12.842087 17615 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:08:12.842092 17615 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:08:12.842097 17615 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:08:12.842102 17615 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:08:12.842108 17615 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:08:12.842113 17615 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:08:12.842118 17615 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:08:12.842123 17615 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:08:12.842129 17615 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:08:12.842134 17615 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:08:12.842140 17615 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:08:12.842145 17615 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:08:12.842150 17615 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:08:12.842155 17615 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:08:12.842161 17615 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:08:12.842166 17615 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:08:12.842172 17615 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:08:12.842177 17615 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:08:12.842183 17615 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:08:12.842188 17615 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:08:12.842195 17615 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:08:12.842200 17615 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:08:12.842205 17615 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:08:12.842209 17615 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:08:12.842214 17615 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:08:12.842221 17615 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:08:12.842226 17615 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:08:12.842234 17615 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:08:12.842241 17615 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:08:12.842245 17615 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:08:12.842252 17615 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:08:12.842257 17615 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:08:12.842262 17615 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:08:12.842272 17615 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:08:12.842278 17615 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:08:12.842283 17615 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:08:12.842288 17615 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:08:12.842294 17615 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:08:12.842300 17615 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:08:12.842305 17615 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:08:12.842310 17615 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:08:12.842316 17615 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:08:12.842321 17615 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:08:12.842326 17615 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:08:12.842331 17615 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:08:12.842337 17615 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:08:12.842344 17615 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:08:12.842348 17615 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:08:12.842355 17615 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:08:12.842360 17615 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:08:12.842365 17615 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:08:12.842370 17615 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:08:12.842375 17615 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:08:12.842381 17615 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:08:12.842386 17615 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:08:12.842392 17615 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:08:12.842397 17615 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:08:12.842403 17615 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:08:12.842409 17615 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:08:12.842414 17615 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:08:12.842420 17615 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:08:12.842427 17615 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:08:12.842432 17615 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:08:12.842437 17615 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:08:12.842442 17615 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:08:12.842448 17615 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:08:12.842453 17615 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:08:12.842458 17615 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:08:12.842464 17615 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:08:12.842470 17615 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:08:12.842475 17615 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:08:12.842481 17615 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:08:12.842486 17615 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:08:12.842491 17615 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:08:12.842497 17615 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:08:12.842504 17615 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:08:12.842509 17615 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:08:12.842515 17615 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:08:12.842519 17615 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:08:12.842525 17615 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:08:12.842536 17615 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:08:12.842542 17615 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:08:12.842548 17615 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:08:12.842555 17615 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:08:12.842559 17615 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:08:12.842566 17615 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:08:12.842571 17615 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:08:12.842576 17615 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:08:12.842581 17615 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:08:12.842586 17615 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:08:12.842592 17615 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:08:12.842598 17615 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:08:12.842603 17615 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:08:12.842617 17615 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:08:12.842624 17615 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:08:12.842629 17615 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:08:12.842635 17615 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:08:12.842641 17615 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:08:12.842648 17615 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:08:12.842653 17615 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:08:12.842658 17615 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:08:12.842664 17615 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:08:12.842670 17615 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:08:12.842676 17615 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:08:12.842682 17615 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:08:12.842687 17615 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:08:12.842692 17615 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:08:12.842699 17615 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:08:12.842705 17615 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:08:12.842710 17615 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:08:12.842716 17615 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:08:12.842722 17615 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:08:12.842727 17615 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:08:12.842733 17615 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:08:12.842738 17615 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:08:12.842744 17615 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:08:12.842749 17615 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:08:12.842756 17615 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:08:12.842761 17615 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:08:12.842767 17615 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:08:12.842772 17615 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:08:12.842778 17615 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:08:12.842784 17615 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:08:12.842790 17615 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:08:12.842795 17615 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:08:12.842802 17615 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:08:12.842806 17615 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:08:12.842818 17615 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:08:12.842824 17615 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:08:12.842830 17615 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:08:12.842835 17615 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:08:12.842842 17615 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:08:12.842849 17615 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:08:12.842854 17615 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:08:12.842859 17615 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:08:12.842864 17615 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:08:12.842870 17615 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:08:12.842876 17615 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:08:12.842882 17615 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:08:12.842888 17615 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:08:12.842893 17615 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:08:12.842900 17615 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:08:12.842905 17615 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:08:12.842911 17615 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:08:12.842916 17615 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:08:12.842921 17615 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:08:12.842927 17615 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:08:12.842933 17615 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:08:12.842938 17615 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:08:12.842944 17615 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:08:12.842950 17615 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:08:12.842957 17615 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:08:12.842962 17615 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:08:12.842967 17615 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:08:12.842973 17615 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:08:12.842978 17615 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:08:12.842984 17615 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:08:12.842989 17615 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:08:12.842996 17615 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:08:12.843003 17615 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:08:12.843008 17615 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:08:12.843014 17615 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:08:12.843019 17615 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:08:12.843024 17615 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:08:12.843030 17615 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:08:12.843036 17615 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:08:12.843041 17615 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:08:12.843047 17615 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:08:12.843053 17615 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:08:12.843058 17615 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:08:12.843065 17615 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:08:12.843070 17615 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:08:12.843076 17615 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:08:12.843082 17615 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:08:12.843093 17615 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:08:12.843099 17615 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:08:12.843106 17615 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:08:12.843111 17615 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:08:12.843116 17615 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:08:12.843122 17615 net.cpp:226] pre_relu needs backward computation.\nI0817 16:08:12.843127 17615 net.cpp:226] pre_scale needs backward computation.\nI0817 16:08:12.843132 17615 net.cpp:226] pre_bn needs backward computation.\nI0817 16:08:12.843138 17615 net.cpp:226] pre_conv needs backward computation.\nI0817 16:08:12.843144 17615 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:08:12.843152 17615 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:08:12.843155 17615 net.cpp:270] This network produces output accuracy\nI0817 16:08:12.843163 17615 net.cpp:270] This network produces output loss\nI0817 16:08:12.843525 17615 net.cpp:283] Network initialization done.\nI0817 16:08:12.853029 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:12.853068 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:12.853127 17615 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:08:12.853507 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:08:12.853523 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:08:12.853534 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:08:12.853566 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:08:12.853579 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:08:12.853588 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:08:12.853597 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:08:12.853606 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:08:12.853623 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:08:12.853636 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:08:12.853644 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:08:12.853653 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:08:12.853662 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:08:12.853672 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:08:12.853680 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:08:12.853689 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:08:12.853698 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:08:12.853708 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:08:12.853716 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:08:12.853734 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:08:12.853744 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:08:12.853754 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:08:12.853765 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:08:12.853775 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:08:12.853783 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:08:12.853791 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:08:12.853801 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:08:12.853809 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:08:12.853817 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:08:12.853826 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:08:12.853837 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:08:12.853844 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:08:12.853853 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:08:12.853862 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:08:12.853870 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:08:12.853879 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:08:12.853888 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:08:12.853898 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:08:12.853906 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:08:12.853914 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:08:12.853926 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:08:12.853935 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:08:12.853943 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:08:12.853952 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:08:12.853961 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:08:12.853970 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:08:12.853978 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:08:12.853987 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:08:12.853996 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:08:12.854004 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:08:12.854022 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:08:12.854030 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:08:12.854040 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:08:12.854048 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:08:12.854058 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:08:12.854065 17615 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:08:12.855712 17615 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0817 16:08:12.857307 17615 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:08:12.857542 17615 net.cpp:100] Creating Layer dataLayer\nI0817 16:08:12.857558 17615 net.cpp:408] dataLayer -> data_top\nI0817 16:08:12.857578 17615 net.cpp:408] dataLayer -> label\nI0817 16:08:12.857591 17615 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:08:12.869004 17622 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:08:12.869242 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:12.876555 17615 net.cpp:150] Setting up dataLayer\nI0817 16:08:12.876577 17615 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:08:12.876610 17615 net.cpp:157] Top shape: 125 (125)\nI0817 16:08:12.876626 17615 net.cpp:165] Memory required for data: 1536500\nI0817 16:08:12.876633 17615 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:08:12.876644 17615 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:08:12.876649 17615 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:08:12.876657 17615 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:08:12.876670 17615 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:08:12.876793 17615 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:08:12.876807 17615 net.cpp:157] Top shape: 125 (125)\nI0817 16:08:12.876814 17615 net.cpp:157] Top shape: 125 (125)\nI0817 16:08:12.876819 17615 net.cpp:165] Memory required for data: 1537500\nI0817 16:08:12.876824 17615 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:08:12.876843 17615 net.cpp:100] Creating Layer pre_conv\nI0817 16:08:12.876850 17615 net.cpp:434] pre_conv <- data_top\nI0817 16:08:12.876863 17615 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:08:12.877240 17615 net.cpp:150] Setting up pre_conv\nI0817 16:08:12.877265 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.877271 17615 net.cpp:165] Memory required for data: 9729500\nI0817 16:08:12.877291 17615 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:08:12.877305 17615 net.cpp:100] Creating Layer pre_bn\nI0817 16:08:12.877311 17615 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:08:12.877323 17615 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:08:12.877666 17615 net.cpp:150] Setting up pre_bn\nI0817 16:08:12.877681 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.877687 17615 net.cpp:165] Memory required for data: 17921500\nI0817 16:08:12.877706 17615 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:08:12.877717 17615 net.cpp:100] Creating Layer pre_scale\nI0817 16:08:12.877722 17615 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:08:12.877732 17615 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:08:12.877795 17615 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:08:12.877996 17615 net.cpp:150] Setting up pre_scale\nI0817 16:08:12.878015 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.878021 17615 net.cpp:165] Memory required for data: 26113500\nI0817 16:08:12.878031 17615 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:08:12.878039 17615 net.cpp:100] Creating Layer pre_relu\nI0817 16:08:12.878047 17615 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:08:12.878057 17615 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:08:12.878067 17615 net.cpp:150] Setting up pre_relu\nI0817 16:08:12.878073 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.878077 17615 net.cpp:165] Memory required for data: 34305500\nI0817 16:08:12.878082 17615 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:08:12.878093 17615 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:08:12.878098 17615 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:08:12.878105 17615 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:08:12.878118 17615 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:08:12.878197 17615 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:08:12.878211 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.878221 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.878226 17615 net.cpp:165] Memory required for data: 50689500\nI0817 16:08:12.878232 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:08:12.878243 17615 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:08:12.878249 17615 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:08:12.878264 17615 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:08:12.878692 17615 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:08:12.878710 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.878716 17615 net.cpp:165] Memory required for data: 58881500\nI0817 16:08:12.878728 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:08:12.878739 17615 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:08:12.878746 17615 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:08:12.878756 17615 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:08:12.879070 17615 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:08:12.879086 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.879091 17615 net.cpp:165] Memory required for data: 67073500\nI0817 16:08:12.879103 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:08:12.879119 17615 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:08:12.879129 17615 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:08:12.879140 17615 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.879287 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:08:12.879652 17615 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:08:12.879665 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.879672 17615 net.cpp:165] Memory required for data: 75265500\nI0817 16:08:12.879688 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:08:12.879699 17615 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:08:12.879706 17615 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:08:12.879717 17615 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.879727 17615 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:08:12.879737 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.879742 17615 net.cpp:165] Memory required for data: 83457500\nI0817 16:08:12.879747 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:08:12.879761 17615 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:08:12.879770 17615 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:08:12.879786 17615 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:08:12.880179 17615 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:08:12.880194 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.880199 17615 net.cpp:165] Memory required for data: 91649500\nI0817 16:08:12.880208 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:08:12.880218 17615 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:08:12.880226 17615 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:08:12.880235 17615 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:08:12.880563 17615 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:08:12.880578 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.880586 17615 net.cpp:165] Memory required for data: 99841500\nI0817 16:08:12.880605 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:08:12.880622 17615 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:08:12.880630 17615 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:08:12.880640 17615 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:08:12.880703 17615 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:08:12.880889 17615 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:08:12.880905 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.880911 17615 net.cpp:165] Memory required for data: 108033500\nI0817 16:08:12.880920 17615 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:08:12.880933 17615 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:08:12.880939 17615 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:08:12.880946 17615 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:08:12.880957 17615 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:08:12.881000 17615 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:08:12.881009 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.881014 17615 net.cpp:165] Memory required for data: 116225500\nI0817 16:08:12.881021 17615 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:08:12.881031 17615 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:08:12.881037 17615 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:08:12.881047 17615 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:08:12.881058 17615 net.cpp:150] Setting up L1_b1_relu\nI0817 16:08:12.881067 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.881072 17615 net.cpp:165] Memory required for data: 124417500\nI0817 16:08:12.881078 17615 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:08:12.881086 17615 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:08:12.881091 17615 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:08:12.881103 17615 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:08:12.881111 17615 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:08:12.881172 17615 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:08:12.881194 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.881204 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.881209 17615 net.cpp:165] Memory required for data: 140801500\nI0817 16:08:12.881216 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:08:12.881227 17615 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:08:12.881234 17615 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:08:12.881248 17615 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:08:12.881659 17615 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:08:12.881675 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.881680 17615 net.cpp:165] Memory required for data: 148993500\nI0817 16:08:12.881692 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:08:12.881701 17615 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:08:12.881707 17615 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:08:12.881721 17615 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:08:12.882037 17615 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:08:12.882055 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.882068 17615 net.cpp:165] Memory required for data: 157185500\nI0817 16:08:12.882081 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:08:12.882094 17615 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:08:12.882102 17615 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:08:12.882109 17615 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.882169 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:08:12.882395 17615 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:08:12.882411 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.882416 17615 net.cpp:165] Memory required for data: 165377500\nI0817 16:08:12.882426 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:08:12.882436 17615 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:08:12.882442 17615 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:08:12.882450 17615 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.882460 17615 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:08:12.882470 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.882475 17615 net.cpp:165] Memory required for data: 173569500\nI0817 16:08:12.882480 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:08:12.882495 17615 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:08:12.882504 17615 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:08:12.882517 17615 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:08:12.883059 17615 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:08:12.883074 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.883080 17615 net.cpp:165] Memory required for data: 181761500\nI0817 16:08:12.883090 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:08:12.883100 17615 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:08:12.883105 17615 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:08:12.883114 17615 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:08:12.883435 17615 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:08:12.883451 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.883456 17615 net.cpp:165] Memory required for data: 189953500\nI0817 16:08:12.883476 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:08:12.883488 17615 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:08:12.883494 17615 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:08:12.883505 17615 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:08:12.883579 17615 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:08:12.883780 17615 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:08:12.883795 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.883800 17615 net.cpp:165] Memory required for data: 198145500\nI0817 16:08:12.883821 17615 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:08:12.883833 17615 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:08:12.883841 17615 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:08:12.883849 17615 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:08:12.883862 17615 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:08:12.883901 17615 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:08:12.883913 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.883918 17615 net.cpp:165] Memory required for data: 206337500\nI0817 16:08:12.883924 17615 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:08:12.883934 17615 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:08:12.883940 17615 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:08:12.883947 17615 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:08:12.883960 17615 net.cpp:150] Setting up L1_b2_relu\nI0817 16:08:12.883967 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.883972 17615 net.cpp:165] Memory required for data: 214529500\nI0817 16:08:12.883977 17615 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:08:12.883985 17615 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:08:12.883992 17615 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:08:12.884001 17615 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:08:12.884009 17615 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:08:12.884064 17615 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:08:12.884074 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.884081 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.884089 17615 net.cpp:165] Memory required for data: 230913500\nI0817 16:08:12.884095 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:08:12.884109 17615 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:08:12.884115 17615 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:08:12.884127 17615 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:08:12.884521 17615 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:08:12.884538 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.884544 17615 net.cpp:165] Memory required for data: 239105500\nI0817 16:08:12.884554 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:08:12.884567 17615 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:08:12.884573 17615 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:08:12.884582 17615 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:08:12.884910 17615 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:08:12.884927 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.884932 17615 net.cpp:165] Memory required for data: 247297500\nI0817 16:08:12.884943 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:08:12.884954 17615 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:08:12.884961 17615 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:08:12.884969 17615 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.885035 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:08:12.885227 17615 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:08:12.885241 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.885246 17615 net.cpp:165] Memory required for data: 255489500\nI0817 16:08:12.885257 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:08:12.885264 17615 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:08:12.885270 17615 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:08:12.885284 17615 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.885303 17615 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:08:12.885313 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.885318 17615 net.cpp:165] Memory required for data: 263681500\nI0817 16:08:12.885323 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:08:12.885337 17615 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:08:12.885345 17615 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:08:12.885355 17615 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:08:12.885992 17615 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:08:12.886008 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.886013 17615 net.cpp:165] Memory required for data: 271873500\nI0817 16:08:12.886023 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:08:12.886041 17615 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:08:12.886049 17615 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:08:12.886057 17615 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:08:12.886373 17615 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:08:12.886399 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.886406 17615 net.cpp:165] Memory required for data: 280065500\nI0817 16:08:12.886418 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:08:12.886426 17615 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:08:12.886435 17615 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:08:12.886445 17615 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:08:12.886512 17615 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:08:12.886699 17615 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:08:12.886716 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.886723 17615 net.cpp:165] Memory required for data: 288257500\nI0817 16:08:12.886731 17615 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:08:12.886745 17615 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:08:12.886755 17615 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:08:12.886762 17615 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:08:12.886773 17615 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:08:12.886809 17615 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:08:12.886819 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.886827 17615 net.cpp:165] Memory required for data: 296449500\nI0817 16:08:12.886832 17615 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:08:12.886843 17615 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:08:12.886849 17615 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:08:12.886859 17615 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:08:12.886870 17615 net.cpp:150] Setting up L1_b3_relu\nI0817 16:08:12.886878 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.886881 17615 net.cpp:165] Memory required for data: 304641500\nI0817 16:08:12.886886 17615 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:08:12.886893 17615 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:08:12.886901 17615 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:08:12.886909 17615 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:08:12.886919 17615 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:08:12.886976 17615 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:08:12.886987 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.886996 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.887002 17615 net.cpp:165] Memory required for data: 321025500\nI0817 16:08:12.887007 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:08:12.887022 17615 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:08:12.887037 17615 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:08:12.887048 17615 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:08:12.887428 17615 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:08:12.887442 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.887449 17615 net.cpp:165] Memory required for data: 329217500\nI0817 16:08:12.887457 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:08:12.887470 17615 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:08:12.887475 17615 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:08:12.887483 17615 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:08:12.887799 17615 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:08:12.887817 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.887823 17615 net.cpp:165] Memory required for data: 337409500\nI0817 16:08:12.887835 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:08:12.887845 17615 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:08:12.887850 17615 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:08:12.887858 17615 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.887917 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:08:12.888075 17615 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:08:12.888088 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.888093 17615 net.cpp:165] Memory required for data: 345601500\nI0817 16:08:12.888103 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:08:12.888113 17615 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:08:12.888119 17615 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:08:12.888128 17615 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.888139 17615 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:08:12.888146 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.888151 17615 net.cpp:165] Memory required for data: 353793500\nI0817 16:08:12.888155 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:08:12.888166 17615 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:08:12.888171 17615 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:08:12.888182 17615 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:08:12.888533 17615 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:08:12.888546 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.888551 17615 net.cpp:165] Memory required for data: 361985500\nI0817 16:08:12.888561 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:08:12.888569 17615 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:08:12.888576 17615 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:08:12.888586 17615 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:08:12.888864 17615 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:08:12.888880 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.888886 17615 net.cpp:165] Memory required for data: 370177500\nI0817 16:08:12.888896 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:08:12.888906 17615 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:08:12.888911 17615 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:08:12.888918 17615 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:08:12.888978 17615 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:08:12.889137 17615 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:08:12.889150 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.889155 17615 net.cpp:165] Memory required for data: 378369500\nI0817 16:08:12.889164 17615 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:08:12.889173 17615 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:08:12.889178 17615 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:08:12.889185 17615 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:08:12.889202 17615 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:08:12.889237 17615 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:08:12.889247 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.889252 17615 net.cpp:165] Memory required for data: 386561500\nI0817 16:08:12.889257 17615 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:08:12.889267 17615 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:08:12.889273 17615 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:08:12.889281 17615 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:08:12.889289 17615 net.cpp:150] Setting up L1_b4_relu\nI0817 16:08:12.889297 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.889302 17615 net.cpp:165] Memory required for data: 394753500\nI0817 16:08:12.889307 17615 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:08:12.889313 17615 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:08:12.889318 17615 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:08:12.889325 17615 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:08:12.889334 17615 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:08:12.889382 17615 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:08:12.889394 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.889400 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.889405 17615 net.cpp:165] Memory required for data: 411137500\nI0817 16:08:12.889410 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:08:12.889425 17615 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:08:12.889431 17615 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:08:12.889441 17615 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:08:12.889809 17615 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:08:12.889824 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.889830 17615 net.cpp:165] Memory required for data: 419329500\nI0817 16:08:12.889853 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:08:12.889865 17615 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:08:12.889871 17615 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:08:12.889883 17615 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:08:12.890153 17615 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:08:12.890171 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.890175 17615 net.cpp:165] Memory required for data: 427521500\nI0817 16:08:12.890187 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:08:12.890194 17615 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:08:12.890200 17615 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:08:12.890208 17615 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.890265 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:08:12.890424 17615 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:08:12.890436 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.890441 17615 net.cpp:165] Memory required for data: 435713500\nI0817 16:08:12.890450 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:08:12.890461 17615 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:08:12.890467 17615 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:08:12.890477 17615 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.890487 17615 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:08:12.890494 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.890498 17615 net.cpp:165] Memory required for data: 443905500\nI0817 16:08:12.890503 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:08:12.890522 17615 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:08:12.890527 17615 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:08:12.890538 17615 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:08:12.890895 17615 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:08:12.890910 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.890916 17615 net.cpp:165] Memory required for data: 452097500\nI0817 16:08:12.890925 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:08:12.890933 17615 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:08:12.890939 17615 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:08:12.890951 17615 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:08:12.891224 17615 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:08:12.891237 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.891243 17615 net.cpp:165] Memory required for data: 460289500\nI0817 16:08:12.891253 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:08:12.891264 17615 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:08:12.891271 17615 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:08:12.891279 17615 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:08:12.891335 17615 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:08:12.891520 17615 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:08:12.891535 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.891541 17615 net.cpp:165] Memory required for data: 468481500\nI0817 16:08:12.891549 17615 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:08:12.891561 17615 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:08:12.891567 17615 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:08:12.891574 17615 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:08:12.891582 17615 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:08:12.891628 17615 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:08:12.891640 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.891645 17615 net.cpp:165] Memory required for data: 476673500\nI0817 16:08:12.891651 17615 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:08:12.891659 17615 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:08:12.891664 17615 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:08:12.891674 17615 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:08:12.891685 17615 net.cpp:150] Setting up L1_b5_relu\nI0817 16:08:12.891691 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.891697 17615 net.cpp:165] Memory required for data: 484865500\nI0817 16:08:12.891701 17615 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:08:12.891708 17615 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:08:12.891713 17615 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:08:12.891721 17615 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:08:12.891731 17615 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:08:12.891780 17615 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:08:12.891793 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.891798 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.891803 17615 net.cpp:165] Memory required for data: 501249500\nI0817 16:08:12.891808 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:08:12.891822 17615 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:08:12.891829 17615 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:08:12.891837 17615 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:08:12.892182 17615 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:08:12.892197 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.892208 17615 net.cpp:165] Memory required for data: 509441500\nI0817 16:08:12.892217 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:08:12.892227 17615 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:08:12.892232 17615 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:08:12.892244 17615 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:08:12.892539 17615 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:08:12.892555 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.892561 17615 net.cpp:165] Memory required for data: 517633500\nI0817 16:08:12.892571 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:08:12.892580 17615 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:08:12.892586 17615 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:08:12.892593 17615 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.892657 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:08:12.892817 17615 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:08:12.892832 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.892837 17615 net.cpp:165] Memory required for data: 525825500\nI0817 16:08:12.892845 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:08:12.892856 17615 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:08:12.892863 17615 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:08:12.892870 17615 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.892879 17615 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:08:12.892889 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.892894 17615 net.cpp:165] Memory required for data: 534017500\nI0817 16:08:12.892899 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:08:12.892910 17615 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:08:12.892915 17615 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:08:12.892927 17615 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:08:12.893271 17615 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:08:12.893286 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.893291 17615 net.cpp:165] Memory required for data: 542209500\nI0817 16:08:12.893301 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:08:12.893308 17615 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:08:12.893314 17615 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:08:12.893342 17615 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:08:12.893626 17615 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:08:12.893640 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.893646 17615 net.cpp:165] Memory required for data: 550401500\nI0817 16:08:12.893656 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:08:12.893667 17615 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:08:12.893674 17615 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:08:12.893682 17615 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:08:12.893738 17615 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:08:12.893898 17615 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:08:12.893913 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.893918 17615 net.cpp:165] Memory required for data: 558593500\nI0817 16:08:12.893926 17615 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:08:12.893947 17615 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:08:12.893954 17615 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:08:12.893961 17615 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:08:12.893970 17615 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:08:12.894008 17615 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:08:12.894018 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.894023 17615 net.cpp:165] Memory required for data: 566785500\nI0817 16:08:12.894035 17615 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:08:12.894043 17615 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:08:12.894049 17615 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:08:12.894057 17615 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:08:12.894065 17615 net.cpp:150] Setting up L1_b6_relu\nI0817 16:08:12.894073 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.894078 17615 net.cpp:165] Memory required for data: 574977500\nI0817 16:08:12.894081 17615 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:08:12.894088 17615 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:08:12.894093 17615 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:08:12.894104 17615 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:08:12.894114 17615 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:08:12.894160 17615 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:08:12.894172 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.894179 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.894184 17615 net.cpp:165] Memory required for data: 591361500\nI0817 16:08:12.894189 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:08:12.894203 17615 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:08:12.894209 17615 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:08:12.894218 17615 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:08:12.894598 17615 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:08:12.894618 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.894624 17615 net.cpp:165] Memory required for data: 599553500\nI0817 16:08:12.894634 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:08:12.894645 17615 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:08:12.894652 17615 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:08:12.894664 17615 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:08:12.894935 17615 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:08:12.894949 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.894954 17615 net.cpp:165] Memory required for data: 607745500\nI0817 16:08:12.894964 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:08:12.894973 17615 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:08:12.894979 17615 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:08:12.894985 17615 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.895045 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:08:12.895229 17615 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:08:12.895244 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.895249 17615 net.cpp:165] Memory required for data: 615937500\nI0817 16:08:12.895258 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:08:12.895269 17615 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:08:12.895275 17615 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:08:12.895282 17615 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.895292 17615 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:08:12.895299 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.895304 17615 net.cpp:165] Memory required for data: 624129500\nI0817 16:08:12.895308 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:08:12.895323 17615 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:08:12.895328 17615 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:08:12.895339 17615 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:08:12.895704 17615 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:08:12.895718 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.895730 17615 net.cpp:165] Memory required for data: 632321500\nI0817 16:08:12.895740 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:08:12.895751 17615 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:08:12.895757 17615 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:08:12.895766 17615 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:08:12.896040 17615 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:08:12.896054 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.896059 17615 net.cpp:165] Memory required for data: 640513500\nI0817 16:08:12.896070 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:08:12.896077 17615 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:08:12.896083 17615 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:08:12.896091 17615 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:08:12.896152 17615 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:08:12.896312 17615 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:08:12.896327 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.896332 17615 net.cpp:165] Memory required for data: 648705500\nI0817 16:08:12.896340 17615 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:08:12.896348 17615 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:08:12.896355 17615 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:08:12.896363 17615 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:08:12.896373 17615 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:08:12.896406 17615 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:08:12.896419 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.896423 17615 net.cpp:165] Memory required for data: 656897500\nI0817 16:08:12.896428 17615 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:08:12.896438 17615 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:08:12.896445 17615 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:08:12.896452 17615 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:08:12.896461 17615 net.cpp:150] Setting up L1_b7_relu\nI0817 16:08:12.896468 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.896473 17615 net.cpp:165] Memory required for data: 665089500\nI0817 16:08:12.896478 17615 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:08:12.896484 17615 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:08:12.896491 17615 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:08:12.896497 17615 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:08:12.896507 17615 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:08:12.896562 17615 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:08:12.896574 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.896580 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.896585 17615 net.cpp:165] Memory required for data: 681473500\nI0817 16:08:12.896590 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:08:12.896605 17615 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:08:12.896617 17615 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:08:12.896628 17615 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:08:12.896981 17615 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:08:12.896996 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.897001 17615 net.cpp:165] Memory required for data: 689665500\nI0817 16:08:12.897009 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:08:12.897022 17615 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:08:12.897027 17615 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:08:12.897044 17615 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:08:12.897323 17615 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:08:12.897336 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.897341 17615 net.cpp:165] Memory required for data: 697857500\nI0817 16:08:12.897351 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:08:12.897361 17615 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:08:12.897367 17615 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:08:12.897374 17615 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.897438 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:08:12.897596 17615 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:08:12.897609 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.897620 17615 net.cpp:165] Memory required for data: 706049500\nI0817 16:08:12.897630 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:08:12.897639 17615 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:08:12.897644 17615 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:08:12.897655 17615 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.897665 17615 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:08:12.897672 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.897676 17615 net.cpp:165] Memory required for data: 714241500\nI0817 16:08:12.897681 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:08:12.897696 17615 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:08:12.897701 17615 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:08:12.897712 17615 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:08:12.898069 17615 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:08:12.898083 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.898088 17615 net.cpp:165] Memory required for data: 722433500\nI0817 16:08:12.898097 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:08:12.898108 17615 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:08:12.898115 17615 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:08:12.898123 17615 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:08:12.898397 17615 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:08:12.898411 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.898416 17615 net.cpp:165] Memory required for data: 730625500\nI0817 16:08:12.898427 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:08:12.898434 17615 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:08:12.898440 17615 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:08:12.898448 17615 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:08:12.898509 17615 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:08:12.898675 17615 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:08:12.898689 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.898694 17615 net.cpp:165] Memory required for data: 738817500\nI0817 16:08:12.898703 17615 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:08:12.898711 17615 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:08:12.898718 17615 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:08:12.898725 17615 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:08:12.898736 17615 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:08:12.898769 17615 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:08:12.898779 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.898784 17615 net.cpp:165] Memory required for data: 747009500\nI0817 16:08:12.898789 17615 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:08:12.898799 17615 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:08:12.898805 17615 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:08:12.898813 17615 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:08:12.898830 17615 net.cpp:150] Setting up L1_b8_relu\nI0817 16:08:12.898838 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.898844 17615 net.cpp:165] Memory required for data: 755201500\nI0817 16:08:12.898847 17615 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:08:12.898855 17615 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:08:12.898860 17615 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:08:12.898867 17615 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:08:12.898876 17615 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:08:12.898926 17615 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:08:12.898938 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.898946 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.898950 17615 net.cpp:165] Memory required for data: 771585500\nI0817 16:08:12.898955 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:08:12.898968 17615 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:08:12.898975 17615 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:08:12.898984 17615 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:08:12.899348 17615 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:08:12.899363 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.899368 17615 net.cpp:165] Memory required for data: 779777500\nI0817 16:08:12.899376 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:08:12.899389 17615 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:08:12.899396 17615 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:08:12.899405 17615 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:08:12.899682 17615 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:08:12.899696 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.899701 17615 net.cpp:165] Memory required for data: 787969500\nI0817 16:08:12.899713 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:08:12.899724 17615 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:08:12.899729 17615 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:08:12.899739 17615 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.899796 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:08:12.899957 17615 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:08:12.899971 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.899976 17615 net.cpp:165] Memory required for data: 796161500\nI0817 16:08:12.899984 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:08:12.899992 17615 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:08:12.899998 17615 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:08:12.900009 17615 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.900020 17615 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:08:12.900027 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.900032 17615 net.cpp:165] Memory required for data: 804353500\nI0817 16:08:12.900037 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:08:12.900049 17615 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:08:12.900055 17615 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:08:12.900064 17615 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:08:12.900418 17615 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:08:12.900431 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.900436 17615 net.cpp:165] Memory required for data: 812545500\nI0817 16:08:12.900445 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:08:12.900457 17615 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:08:12.900463 17615 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:08:12.900478 17615 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:08:12.900763 17615 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:08:12.900776 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.900781 17615 net.cpp:165] Memory required for data: 820737500\nI0817 16:08:12.900815 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:08:12.900826 17615 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:08:12.900832 17615 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:08:12.900840 17615 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:08:12.900900 17615 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:08:12.901062 17615 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:08:12.901077 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.901082 17615 net.cpp:165] Memory required for data: 828929500\nI0817 16:08:12.901090 17615 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:08:12.901099 17615 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:08:12.901105 17615 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:08:12.901113 17615 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:08:12.901119 17615 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:08:12.901156 17615 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:08:12.901166 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.901171 17615 net.cpp:165] Memory required for data: 837121500\nI0817 16:08:12.901176 17615 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:08:12.901183 17615 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:08:12.901190 17615 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:08:12.901199 17615 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:08:12.901208 17615 net.cpp:150] Setting up L1_b9_relu\nI0817 16:08:12.901216 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.901221 17615 net.cpp:165] Memory required for data: 845313500\nI0817 16:08:12.901224 17615 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:08:12.901232 17615 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:08:12.901237 17615 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:08:12.901244 17615 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:08:12.901253 17615 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:08:12.901304 17615 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:08:12.901316 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.901324 17615 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:08:12.901329 17615 net.cpp:165] Memory required for data: 861697500\nI0817 16:08:12.901334 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:08:12.901346 17615 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:08:12.901353 17615 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:08:12.901362 17615 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:08:12.901732 17615 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:08:12.901747 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.901752 17615 net.cpp:165] Memory required for data: 863745500\nI0817 16:08:12.901762 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:08:12.901770 17615 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:08:12.901777 17615 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:08:12.901787 17615 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:08:12.902053 17615 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:08:12.902066 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.902071 17615 net.cpp:165] Memory required for data: 865793500\nI0817 16:08:12.902081 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:08:12.902096 17615 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:08:12.902103 17615 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:08:12.902110 17615 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.902175 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:08:12.902333 17615 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:08:12.902349 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.902354 17615 net.cpp:165] Memory required for data: 867841500\nI0817 16:08:12.902364 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:08:12.902371 17615 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:08:12.902377 17615 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:08:12.902384 17615 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.902393 17615 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:08:12.902400 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.902405 17615 net.cpp:165] Memory required for data: 869889500\nI0817 16:08:12.902410 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:08:12.902423 17615 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:08:12.902429 17615 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:08:12.902441 17615 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:08:12.902803 17615 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:08:12.902818 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.902823 17615 net.cpp:165] Memory required for data: 871937500\nI0817 16:08:12.902832 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:08:12.902844 17615 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:08:12.902850 17615 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:08:12.902861 17615 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:08:12.903127 17615 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:08:12.903141 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.903146 17615 net.cpp:165] Memory required for data: 873985500\nI0817 16:08:12.903156 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:08:12.903164 17615 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:08:12.903170 17615 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:08:12.903178 17615 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:08:12.903239 17615 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:08:12.903394 17615 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:08:12.903408 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.903412 17615 net.cpp:165] Memory required for data: 876033500\nI0817 16:08:12.903421 17615 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:08:12.903434 17615 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:08:12.903440 17615 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:08:12.903448 17615 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:08:12.903477 17615 net.cpp:150] Setting up L2_b1_pool\nI0817 16:08:12.903487 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.903492 17615 net.cpp:165] Memory required for data: 878081500\nI0817 16:08:12.903497 17615 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:08:12.903508 17615 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:08:12.903514 17615 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:08:12.903522 17615 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:08:12.903528 17615 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:08:12.903561 17615 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:08:12.903573 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.903578 17615 net.cpp:165] Memory required for data: 880129500\nI0817 16:08:12.903584 17615 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:08:12.903594 17615 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:08:12.903609 17615 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:08:12.903623 17615 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:08:12.903633 17615 net.cpp:150] Setting up L2_b1_relu\nI0817 16:08:12.903640 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.903645 17615 net.cpp:165] Memory required for data: 882177500\nI0817 16:08:12.903650 17615 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:08:12.903659 17615 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:08:12.903666 17615 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:08:12.905923 17615 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:08:12.905944 17615 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:08:12.905951 17615 net.cpp:165] Memory required for data: 884225500\nI0817 16:08:12.905956 17615 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:08:12.905966 17615 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:08:12.905972 17615 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:08:12.905979 17615 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:08:12.905989 17615 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:08:12.906033 17615 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:08:12.906045 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.906050 17615 net.cpp:165] Memory required for data: 888321500\nI0817 16:08:12.906056 17615 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:08:12.906066 17615 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:08:12.906072 17615 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:08:12.906080 17615 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:08:12.906090 17615 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:08:12.906142 17615 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:08:12.906154 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.906162 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.906167 17615 net.cpp:165] Memory required for data: 896513500\nI0817 16:08:12.906172 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:08:12.906185 17615 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:08:12.906193 17615 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:08:12.906204 17615 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:08:12.906710 17615 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:08:12.906725 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.906730 17615 net.cpp:165] Memory required for data: 900609500\nI0817 16:08:12.906740 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:08:12.906749 17615 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:08:12.906755 17615 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:08:12.906766 17615 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:08:12.907035 17615 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:08:12.907048 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.907053 17615 net.cpp:165] Memory required for data: 904705500\nI0817 16:08:12.907064 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:08:12.907075 17615 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:08:12.907081 17615 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:08:12.907089 17615 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.907147 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:08:12.907305 17615 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:08:12.907317 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.907322 17615 net.cpp:165] Memory required for data: 908801500\nI0817 16:08:12.907331 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:08:12.907342 17615 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:08:12.907356 17615 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:08:12.907364 17615 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.907374 17615 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:08:12.907382 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.907387 17615 net.cpp:165] Memory required for data: 912897500\nI0817 16:08:12.907390 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:08:12.907407 17615 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:08:12.907413 17615 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:08:12.907426 17615 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:08:12.907924 17615 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:08:12.907939 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.907944 17615 net.cpp:165] Memory required for data: 916993500\nI0817 16:08:12.907953 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:08:12.907963 17615 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:08:12.907968 17615 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:08:12.907979 17615 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:08:12.908241 17615 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:08:12.908255 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.908260 17615 net.cpp:165] Memory required for data: 921089500\nI0817 16:08:12.908270 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:08:12.908282 17615 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:08:12.908288 17615 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:08:12.908296 17615 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:08:12.908354 17615 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:08:12.908509 17615 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:08:12.908524 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.908529 17615 net.cpp:165] Memory required for data: 925185500\nI0817 16:08:12.908537 17615 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:08:12.908550 17615 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:08:12.908556 17615 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:08:12.908563 17615 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:08:12.908571 17615 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:08:12.908599 17615 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:08:12.908617 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.908623 17615 net.cpp:165] Memory required for data: 929281500\nI0817 16:08:12.908629 17615 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:08:12.908637 17615 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:08:12.908643 17615 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:08:12.908649 17615 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:08:12.908659 17615 net.cpp:150] Setting up L2_b2_relu\nI0817 16:08:12.908666 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.908670 17615 net.cpp:165] Memory required for data: 933377500\nI0817 16:08:12.908675 17615 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:08:12.908685 17615 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:08:12.908690 17615 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:08:12.908699 17615 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:08:12.908709 17615 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:08:12.908758 17615 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:08:12.908771 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.908777 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.908782 17615 net.cpp:165] Memory required for data: 941569500\nI0817 16:08:12.908793 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:08:12.908804 17615 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:08:12.908812 17615 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:08:12.908823 17615 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:08:12.909313 17615 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:08:12.909327 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.909332 17615 net.cpp:165] Memory required for data: 945665500\nI0817 16:08:12.909342 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:08:12.909350 17615 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:08:12.909356 17615 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:08:12.909368 17615 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:08:12.909646 17615 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:08:12.909660 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.909665 17615 net.cpp:165] Memory required for data: 949761500\nI0817 16:08:12.909675 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:08:12.909687 17615 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:08:12.909693 17615 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:08:12.909701 17615 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.909759 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:08:12.909916 17615 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:08:12.909929 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.909934 17615 net.cpp:165] Memory required for data: 953857500\nI0817 16:08:12.909943 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:08:12.909955 17615 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:08:12.909960 17615 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:08:12.909967 17615 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.909977 17615 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:08:12.909984 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.909988 17615 net.cpp:165] Memory required for data: 957953500\nI0817 16:08:12.909993 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:08:12.910008 17615 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:08:12.910014 17615 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:08:12.910025 17615 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:08:12.910509 17615 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:08:12.910524 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.910529 17615 net.cpp:165] Memory required for data: 962049500\nI0817 16:08:12.910538 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:08:12.910547 17615 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:08:12.910553 17615 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:08:12.910562 17615 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:08:12.910838 17615 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:08:12.910852 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.910857 17615 net.cpp:165] Memory required for data: 966145500\nI0817 16:08:12.910868 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:08:12.910876 17615 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:08:12.910883 17615 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:08:12.910893 17615 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:08:12.910951 17615 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:08:12.911110 17615 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:08:12.911124 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.911129 17615 net.cpp:165] Memory required for data: 970241500\nI0817 16:08:12.911139 17615 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:08:12.911146 17615 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:08:12.911160 17615 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:08:12.911167 17615 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:08:12.911178 17615 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:08:12.911206 17615 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:08:12.911221 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.911226 17615 net.cpp:165] Memory required for data: 974337500\nI0817 16:08:12.911232 17615 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:08:12.911252 17615 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:08:12.911258 17615 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:08:12.911265 17615 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:08:12.911275 17615 net.cpp:150] Setting up L2_b3_relu\nI0817 16:08:12.911283 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.911288 17615 net.cpp:165] Memory required for data: 978433500\nI0817 16:08:12.911293 17615 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:08:12.911299 17615 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:08:12.911304 17615 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:08:12.911314 17615 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:08:12.911324 17615 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:08:12.911375 17615 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:08:12.911386 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.911392 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.911397 17615 net.cpp:165] Memory required for data: 986625500\nI0817 16:08:12.911402 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:08:12.911417 17615 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:08:12.911422 17615 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:08:12.911434 17615 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:08:12.911936 17615 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:08:12.911952 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.911957 17615 net.cpp:165] Memory required for data: 990721500\nI0817 16:08:12.911965 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:08:12.911978 17615 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:08:12.911984 17615 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:08:12.911993 17615 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:08:12.912258 17615 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:08:12.912272 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.912277 17615 net.cpp:165] Memory required for data: 994817500\nI0817 16:08:12.912287 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:08:12.912297 17615 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:08:12.912302 17615 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:08:12.912313 17615 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.912371 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:08:12.912530 17615 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:08:12.912544 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.912549 17615 net.cpp:165] Memory required for data: 998913500\nI0817 16:08:12.912559 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:08:12.912565 17615 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:08:12.912572 17615 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:08:12.912580 17615 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.912588 17615 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:08:12.912595 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.912600 17615 net.cpp:165] Memory required for data: 1003009500\nI0817 16:08:12.912619 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:08:12.912634 17615 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:08:12.912642 17615 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:08:12.912652 17615 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:08:12.913148 17615 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:08:12.913162 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.913168 17615 net.cpp:165] Memory required for data: 1007105500\nI0817 16:08:12.913177 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:08:12.913188 17615 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:08:12.913195 17615 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:08:12.913205 17615 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:08:12.913471 17615 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:08:12.913485 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.913489 17615 net.cpp:165] Memory required for data: 1011201500\nI0817 16:08:12.913499 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:08:12.913507 17615 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:08:12.913513 17615 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:08:12.913522 17615 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:08:12.913581 17615 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:08:12.913749 17615 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:08:12.913766 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.913771 17615 net.cpp:165] Memory required for data: 1015297500\nI0817 16:08:12.913780 17615 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:08:12.913789 17615 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:08:12.913795 17615 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:08:12.913802 17615 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:08:12.913810 17615 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:08:12.913841 17615 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:08:12.913851 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.913856 17615 net.cpp:165] Memory required for data: 1019393500\nI0817 16:08:12.913861 17615 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:08:12.913867 17615 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:08:12.913873 17615 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:08:12.913883 17615 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:08:12.913892 17615 net.cpp:150] Setting up L2_b4_relu\nI0817 16:08:12.913899 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.913904 17615 net.cpp:165] Memory required for data: 1023489500\nI0817 16:08:12.913908 17615 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:08:12.913915 17615 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:08:12.913921 17615 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:08:12.913931 17615 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:08:12.913941 17615 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:08:12.913988 17615 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:08:12.914000 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.914007 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.914012 17615 net.cpp:165] Memory required for data: 1031681500\nI0817 16:08:12.914017 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:08:12.914031 17615 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:08:12.914037 17615 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:08:12.914047 17615 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:08:12.914551 17615 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:08:12.914564 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.914571 17615 net.cpp:165] Memory required for data: 1035777500\nI0817 16:08:12.914579 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:08:12.914590 17615 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:08:12.914597 17615 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:08:12.914605 17615 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:08:12.914880 17615 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:08:12.914893 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.914898 17615 net.cpp:165] Memory required for data: 1039873500\nI0817 16:08:12.914909 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:08:12.914917 17615 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:08:12.914923 17615 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:08:12.914932 17615 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.914991 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:08:12.915148 17615 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:08:12.915164 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.915169 17615 net.cpp:165] Memory required for data: 1043969500\nI0817 16:08:12.915179 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:08:12.915186 17615 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:08:12.915192 17615 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:08:12.915199 17615 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.915210 17615 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:08:12.915215 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.915220 17615 net.cpp:165] Memory required for data: 1048065500\nI0817 16:08:12.915225 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:08:12.915238 17615 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:08:12.915244 17615 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:08:12.915256 17615 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:08:12.915753 17615 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:08:12.915768 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.915773 17615 net.cpp:165] Memory required for data: 1052161500\nI0817 16:08:12.915782 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:08:12.915794 17615 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:08:12.915801 17615 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:08:12.915812 17615 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:08:12.916081 17615 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:08:12.916095 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.916100 17615 net.cpp:165] Memory required for data: 1056257500\nI0817 16:08:12.916110 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:08:12.916118 17615 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:08:12.916124 17615 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:08:12.916132 17615 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:08:12.916193 17615 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:08:12.916348 17615 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:08:12.916363 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.916368 17615 net.cpp:165] Memory required for data: 1060353500\nI0817 16:08:12.916376 17615 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:08:12.916388 17615 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:08:12.916394 17615 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:08:12.916401 17615 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:08:12.916409 17615 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:08:12.916436 17615 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:08:12.916452 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.916457 17615 net.cpp:165] Memory required for data: 1064449500\nI0817 16:08:12.916463 17615 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:08:12.916473 17615 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:08:12.916481 17615 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:08:12.916487 17615 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:08:12.916496 17615 net.cpp:150] Setting up L2_b5_relu\nI0817 16:08:12.916503 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.916508 17615 net.cpp:165] Memory required for data: 1068545500\nI0817 16:08:12.916513 17615 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:08:12.916520 17615 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:08:12.916525 17615 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:08:12.916532 17615 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:08:12.916543 17615 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:08:12.916594 17615 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:08:12.916606 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.916618 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.916623 17615 net.cpp:165] Memory required for data: 1076737500\nI0817 16:08:12.916630 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:08:12.916643 17615 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:08:12.916651 17615 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:08:12.916659 17615 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:08:12.917155 17615 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:08:12.917168 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.917174 17615 net.cpp:165] Memory required for data: 1080833500\nI0817 16:08:12.917182 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:08:12.917194 17615 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:08:12.917201 17615 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:08:12.917212 17615 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:08:12.917474 17615 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:08:12.917486 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.917491 17615 net.cpp:165] Memory required for data: 1084929500\nI0817 16:08:12.917501 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:08:12.917510 17615 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:08:12.917516 17615 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:08:12.917524 17615 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.917583 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:08:12.917748 17615 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:08:12.917765 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.917771 17615 net.cpp:165] Memory required for data: 1089025500\nI0817 16:08:12.917780 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:08:12.917788 17615 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:08:12.917794 17615 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:08:12.917801 17615 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.917810 17615 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:08:12.917817 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.917822 17615 net.cpp:165] Memory required for data: 1093121500\nI0817 16:08:12.917827 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:08:12.917840 17615 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:08:12.917846 17615 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:08:12.917857 17615 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:08:12.918359 17615 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:08:12.918375 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.918380 17615 net.cpp:165] Memory required for data: 1097217500\nI0817 16:08:12.918388 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:08:12.918401 17615 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:08:12.918406 17615 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:08:12.918417 17615 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:08:12.918692 17615 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:08:12.918706 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.918712 17615 net.cpp:165] Memory required for data: 1101313500\nI0817 16:08:12.918722 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:08:12.918731 17615 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:08:12.918737 17615 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:08:12.918745 17615 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:08:12.918804 17615 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:08:12.918959 17615 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:08:12.918972 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.918978 17615 net.cpp:165] Memory required for data: 1105409500\nI0817 16:08:12.918987 17615 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:08:12.918998 17615 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:08:12.919004 17615 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:08:12.919013 17615 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:08:12.919019 17615 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:08:12.919047 17615 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:08:12.919056 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.919061 17615 net.cpp:165] Memory required for data: 1109505500\nI0817 16:08:12.919066 17615 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:08:12.919077 17615 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:08:12.919083 17615 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:08:12.919091 17615 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:08:12.919100 17615 net.cpp:150] Setting up L2_b6_relu\nI0817 16:08:12.919107 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.919111 17615 net.cpp:165] Memory required for data: 1113601500\nI0817 16:08:12.919116 17615 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:08:12.919123 17615 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:08:12.919128 17615 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:08:12.919136 17615 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:08:12.919145 17615 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:08:12.919196 17615 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:08:12.919209 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.919215 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.919220 17615 net.cpp:165] Memory required for data: 1121793500\nI0817 16:08:12.919225 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:08:12.919239 17615 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:08:12.919245 17615 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:08:12.919255 17615 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:08:12.920735 17615 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:08:12.920753 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.920758 17615 net.cpp:165] Memory required for data: 1125889500\nI0817 16:08:12.920768 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:08:12.920785 17615 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:08:12.920792 17615 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:08:12.920804 17615 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:08:12.921075 17615 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:08:12.921089 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.921094 17615 net.cpp:165] Memory required for data: 1129985500\nI0817 16:08:12.921105 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:08:12.921116 17615 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:08:12.921123 17615 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:08:12.921131 17615 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.921192 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:08:12.921353 17615 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:08:12.921366 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.921372 17615 net.cpp:165] Memory required for data: 1134081500\nI0817 16:08:12.921381 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:08:12.921392 17615 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:08:12.921398 17615 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:08:12.921406 17615 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.921416 17615 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:08:12.921423 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.921428 17615 net.cpp:165] Memory required for data: 1138177500\nI0817 16:08:12.921432 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:08:12.921447 17615 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:08:12.921453 17615 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:08:12.921464 17615 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:08:12.921953 17615 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:08:12.921968 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.921973 17615 net.cpp:165] Memory required for data: 1142273500\nI0817 16:08:12.921983 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:08:12.921993 17615 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:08:12.921998 17615 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:08:12.922009 17615 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:08:12.922284 17615 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:08:12.922297 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.922303 17615 net.cpp:165] Memory required for data: 1146369500\nI0817 16:08:12.922313 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:08:12.922325 17615 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:08:12.922332 17615 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:08:12.922339 17615 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:08:12.922396 17615 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:08:12.922554 17615 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:08:12.922566 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.922571 17615 net.cpp:165] Memory required for data: 1150465500\nI0817 16:08:12.922580 17615 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:08:12.922593 17615 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:08:12.922600 17615 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:08:12.922607 17615 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:08:12.922621 17615 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:08:12.922654 17615 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:08:12.922667 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.922672 17615 net.cpp:165] Memory required for data: 1154561500\nI0817 16:08:12.922677 17615 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:08:12.922684 17615 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:08:12.922696 17615 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:08:12.922704 17615 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:08:12.922713 17615 net.cpp:150] Setting up L2_b7_relu\nI0817 16:08:12.922720 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.922725 17615 net.cpp:165] Memory required for data: 1158657500\nI0817 16:08:12.922730 17615 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:08:12.922740 17615 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:08:12.922745 17615 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:08:12.922754 17615 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:08:12.922762 17615 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:08:12.922816 17615 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:08:12.922828 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.922835 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.922840 17615 net.cpp:165] Memory required for data: 1166849500\nI0817 16:08:12.922845 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:08:12.922855 17615 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:08:12.922863 17615 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:08:12.922873 17615 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:08:12.923364 17615 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:08:12.923378 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.923384 17615 net.cpp:165] Memory required for data: 1170945500\nI0817 16:08:12.923393 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:08:12.923403 17615 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:08:12.923408 17615 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:08:12.923419 17615 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:08:12.923698 17615 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:08:12.923712 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.923717 17615 net.cpp:165] Memory required for data: 1175041500\nI0817 16:08:12.923727 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:08:12.923739 17615 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:08:12.923746 17615 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:08:12.923753 17615 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.923812 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:08:12.923974 17615 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:08:12.923986 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.923991 17615 net.cpp:165] Memory required for data: 1179137500\nI0817 16:08:12.924000 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:08:12.924011 17615 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:08:12.924017 17615 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:08:12.924024 17615 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.924034 17615 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:08:12.924041 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.924046 17615 net.cpp:165] Memory required for data: 1183233500\nI0817 16:08:12.924051 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:08:12.924064 17615 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:08:12.924070 17615 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:08:12.924082 17615 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:08:12.924567 17615 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:08:12.924582 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.924587 17615 net.cpp:165] Memory required for data: 1187329500\nI0817 16:08:12.924595 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:08:12.924616 17615 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:08:12.924624 17615 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:08:12.924633 17615 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:08:12.924908 17615 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:08:12.924922 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.924927 17615 net.cpp:165] Memory required for data: 1191425500\nI0817 16:08:12.924937 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:08:12.924947 17615 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:08:12.924952 17615 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:08:12.924962 17615 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:08:12.925022 17615 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:08:12.925182 17615 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:08:12.925196 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.925201 17615 net.cpp:165] Memory required for data: 1195521500\nI0817 16:08:12.925210 17615 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:08:12.925220 17615 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:08:12.925225 17615 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:08:12.925232 17615 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:08:12.925243 17615 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:08:12.925271 17615 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:08:12.925283 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.925289 17615 net.cpp:165] Memory required for data: 1199617500\nI0817 16:08:12.925293 17615 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:08:12.925302 17615 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:08:12.925307 17615 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:08:12.925313 17615 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:08:12.925323 17615 net.cpp:150] Setting up L2_b8_relu\nI0817 16:08:12.925330 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.925334 17615 net.cpp:165] Memory required for data: 1203713500\nI0817 16:08:12.925339 17615 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:08:12.925350 17615 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:08:12.925356 17615 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:08:12.925364 17615 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:08:12.925386 17615 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:08:12.925437 17615 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:08:12.925452 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.925460 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.925465 17615 net.cpp:165] Memory required for data: 1211905500\nI0817 16:08:12.925470 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:08:12.925484 17615 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:08:12.925492 17615 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:08:12.925500 17615 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:08:12.926003 17615 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:08:12.926018 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.926023 17615 net.cpp:165] Memory required for data: 1216001500\nI0817 16:08:12.926033 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:08:12.926048 17615 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:08:12.926054 17615 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:08:12.926062 17615 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:08:12.926342 17615 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:08:12.926362 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.926367 17615 net.cpp:165] Memory required for data: 1220097500\nI0817 16:08:12.926378 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:08:12.926386 17615 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:08:12.926393 17615 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:08:12.926403 17615 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.926462 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:08:12.926631 17615 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:08:12.926645 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.926651 17615 net.cpp:165] Memory required for data: 1224193500\nI0817 16:08:12.926661 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:08:12.926668 17615 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:08:12.926674 17615 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:08:12.926684 17615 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.926694 17615 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:08:12.926702 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.926707 17615 net.cpp:165] Memory required for data: 1228289500\nI0817 16:08:12.926712 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:08:12.926725 17615 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:08:12.926731 17615 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:08:12.926740 17615 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:08:12.928200 17615 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:08:12.928216 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.928221 17615 net.cpp:165] Memory required for data: 1232385500\nI0817 16:08:12.928231 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:08:12.928241 17615 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:08:12.928248 17615 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:08:12.928262 17615 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:08:12.928529 17615 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:08:12.928544 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.928548 17615 net.cpp:165] Memory required for data: 1236481500\nI0817 16:08:12.928596 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:08:12.928606 17615 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:08:12.928619 17615 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:08:12.928628 17615 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:08:12.928691 17615 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:08:12.928845 17615 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:08:12.928860 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.928866 17615 net.cpp:165] Memory required for data: 1240577500\nI0817 16:08:12.928875 17615 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:08:12.928885 17615 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:08:12.928891 17615 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:08:12.928899 17615 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:08:12.928906 17615 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:08:12.928936 17615 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:08:12.928946 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.928951 17615 net.cpp:165] Memory required for data: 1244673500\nI0817 16:08:12.928956 17615 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:08:12.928963 17615 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:08:12.928969 17615 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:08:12.928979 17615 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:08:12.928989 17615 net.cpp:150] Setting up L2_b9_relu\nI0817 16:08:12.928997 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.929008 17615 net.cpp:165] Memory required for data: 1248769500\nI0817 16:08:12.929013 17615 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:08:12.929020 17615 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:08:12.929026 17615 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:08:12.929036 17615 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:08:12.929047 17615 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:08:12.929095 17615 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:08:12.929106 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.929113 17615 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:08:12.929118 17615 net.cpp:165] Memory required for data: 1256961500\nI0817 16:08:12.929123 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:08:12.929137 17615 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:08:12.929144 17615 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:08:12.929153 17615 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:08:12.929656 17615 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:08:12.929672 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.929677 17615 net.cpp:165] Memory required for data: 1257985500\nI0817 16:08:12.929687 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:08:12.929698 17615 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:08:12.929704 17615 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:08:12.929713 17615 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:08:12.929987 17615 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:08:12.930001 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.930006 17615 net.cpp:165] Memory required for data: 1259009500\nI0817 16:08:12.930016 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:08:12.930028 17615 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:08:12.930034 17615 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:08:12.930042 17615 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.930101 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:08:12.930264 17615 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:08:12.930277 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.930282 17615 net.cpp:165] Memory required for data: 1260033500\nI0817 16:08:12.930292 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:08:12.930302 17615 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:08:12.930310 17615 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:08:12.930316 17615 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:08:12.930326 17615 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:08:12.930333 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.930337 17615 net.cpp:165] Memory required for data: 1261057500\nI0817 16:08:12.930342 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:08:12.930356 17615 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:08:12.930362 17615 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:08:12.930373 17615 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:08:12.930869 17615 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:08:12.930883 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.930889 17615 net.cpp:165] Memory required for data: 1262081500\nI0817 16:08:12.930898 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:08:12.930907 17615 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:08:12.930913 17615 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:08:12.930924 17615 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:08:12.931197 17615 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:08:12.931217 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.931223 17615 net.cpp:165] Memory required for data: 1263105500\nI0817 16:08:12.931233 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:08:12.931243 17615 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:08:12.931249 17615 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:08:12.931257 17615 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:08:12.931315 17615 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:08:12.931480 17615 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:08:12.931494 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.931499 17615 net.cpp:165] Memory required for data: 1264129500\nI0817 16:08:12.931509 17615 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:08:12.931520 17615 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:08:12.931527 17615 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:08:12.931536 17615 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:08:12.931573 17615 net.cpp:150] Setting up L3_b1_pool\nI0817 16:08:12.931583 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.931588 17615 net.cpp:165] Memory required for data: 1265153500\nI0817 16:08:12.931593 17615 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:08:12.931602 17615 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:08:12.931607 17615 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:08:12.931620 17615 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:08:12.931628 17615 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:08:12.931668 17615 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:08:12.931677 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.931682 17615 net.cpp:165] Memory required for data: 1266177500\nI0817 16:08:12.931687 17615 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:08:12.931694 17615 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:08:12.931700 17615 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:08:12.931707 17615 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:08:12.931717 17615 net.cpp:150] Setting up L3_b1_relu\nI0817 16:08:12.931723 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.931728 17615 net.cpp:165] Memory required for data: 1267201500\nI0817 16:08:12.931733 17615 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:08:12.931742 17615 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:08:12.931752 17615 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:08:12.932965 17615 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:08:12.932982 17615 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:08:12.932988 17615 net.cpp:165] Memory required for data: 1268225500\nI0817 16:08:12.932994 17615 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:08:12.933007 17615 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:08:12.933013 17615 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:08:12.933022 17615 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:08:12.933028 17615 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:08:12.933074 17615 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:08:12.933087 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.933092 17615 net.cpp:165] Memory required for data: 1270273500\nI0817 16:08:12.933097 17615 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:08:12.933105 17615 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:08:12.933111 17615 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:08:12.933121 17615 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:08:12.933131 17615 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:08:12.933185 17615 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:08:12.933200 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.933215 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.933220 17615 net.cpp:165] Memory required for data: 1274369500\nI0817 16:08:12.933225 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:08:12.933238 17615 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:08:12.933243 17615 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:08:12.933255 17615 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:08:12.934307 17615 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:08:12.934324 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.934329 17615 net.cpp:165] Memory required for data: 1276417500\nI0817 16:08:12.934337 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:08:12.934350 17615 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:08:12.934356 17615 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:08:12.934365 17615 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:08:12.934649 17615 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:08:12.934664 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.934669 17615 net.cpp:165] Memory required for data: 1278465500\nI0817 16:08:12.934679 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:08:12.934689 17615 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:08:12.934695 17615 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:08:12.934702 17615 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.934764 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:08:12.934927 17615 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:08:12.934942 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.934947 17615 net.cpp:165] Memory required for data: 1280513500\nI0817 16:08:12.934955 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:08:12.934963 17615 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:08:12.934970 17615 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:08:12.934978 17615 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:08:12.934986 17615 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:08:12.934993 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.934998 17615 net.cpp:165] Memory required for data: 1282561500\nI0817 16:08:12.935003 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:08:12.935016 17615 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:08:12.935024 17615 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:08:12.935034 17615 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:08:12.936142 17615 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:08:12.936159 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.936164 17615 net.cpp:165] Memory required for data: 1284609500\nI0817 16:08:12.936174 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:08:12.936187 17615 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:08:12.936193 17615 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:08:12.936203 17615 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:08:12.936475 17615 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:08:12.936488 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.936493 17615 net.cpp:165] Memory required for data: 1286657500\nI0817 16:08:12.936504 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:08:12.936517 17615 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:08:12.936522 17615 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:08:12.936530 17615 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:08:12.936591 17615 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:08:12.936761 17615 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:08:12.936775 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.936781 17615 net.cpp:165] Memory required for data: 1288705500\nI0817 16:08:12.936797 17615 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:08:12.936812 17615 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:08:12.936820 17615 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:08:12.936827 17615 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:08:12.936837 17615 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:08:12.936872 17615 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:08:12.936884 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.936889 17615 net.cpp:165] Memory required for data: 1290753500\nI0817 16:08:12.936894 17615 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:08:12.936905 17615 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:08:12.936911 17615 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:08:12.936918 17615 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:08:12.936928 17615 net.cpp:150] Setting up L3_b2_relu\nI0817 16:08:12.936935 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.936940 17615 net.cpp:165] Memory required for data: 1292801500\nI0817 16:08:12.936945 17615 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:08:12.936952 17615 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:08:12.936957 17615 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:08:12.936965 17615 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:08:12.936975 17615 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:08:12.937023 17615 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:08:12.937036 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.937043 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.937047 17615 net.cpp:165] Memory required for data: 1296897500\nI0817 16:08:12.937052 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:08:12.937067 17615 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:08:12.937073 17615 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:08:12.937083 17615 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:08:12.938127 17615 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:08:12.938143 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.938148 17615 net.cpp:165] Memory required for data: 1298945500\nI0817 16:08:12.938156 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:08:12.938169 17615 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:08:12.938175 17615 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:08:12.938184 17615 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:08:12.938452 17615 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:08:12.938467 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.938472 17615 net.cpp:165] Memory required for data: 1300993500\nI0817 16:08:12.938482 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:08:12.938491 17615 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:08:12.938498 17615 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:08:12.938505 17615 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.938565 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:08:12.938735 17615 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:08:12.938747 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.938753 17615 net.cpp:165] Memory required for data: 1303041500\nI0817 16:08:12.938762 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:08:12.938771 17615 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:08:12.938776 17615 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:08:12.938787 17615 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:08:12.938797 17615 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:08:12.938812 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.938817 17615 net.cpp:165] Memory required for data: 1305089500\nI0817 16:08:12.938822 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:08:12.938836 17615 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:08:12.938843 17615 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:08:12.938853 17615 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:08:12.939894 17615 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:08:12.939909 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.939914 17615 net.cpp:165] Memory required for data: 1307137500\nI0817 16:08:12.939924 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:08:12.939935 17615 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:08:12.939942 17615 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:08:12.939951 17615 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:08:12.940224 17615 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:08:12.940238 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.940243 17615 net.cpp:165] Memory required for data: 1309185500\nI0817 16:08:12.940253 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:08:12.940265 17615 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:08:12.940271 17615 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:08:12.940279 17615 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:08:12.940343 17615 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:08:12.940502 17615 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:08:12.940515 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.940521 17615 net.cpp:165] Memory required for data: 1311233500\nI0817 16:08:12.940531 17615 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:08:12.940542 17615 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:08:12.940548 17615 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:08:12.940557 17615 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:08:12.940567 17615 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:08:12.940600 17615 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:08:12.940618 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.940623 17615 net.cpp:165] Memory required for data: 1313281500\nI0817 16:08:12.940629 17615 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:08:12.940640 17615 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:08:12.940646 17615 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:08:12.940654 17615 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:08:12.940663 17615 net.cpp:150] Setting up L3_b3_relu\nI0817 16:08:12.940670 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.940675 17615 net.cpp:165] Memory required for data: 1315329500\nI0817 16:08:12.940680 17615 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:08:12.940687 17615 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:08:12.940692 17615 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:08:12.940701 17615 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:08:12.940709 17615 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:08:12.940760 17615 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:08:12.940773 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.940778 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.940783 17615 net.cpp:165] Memory required for data: 1319425500\nI0817 16:08:12.940788 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:08:12.940803 17615 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:08:12.940809 17615 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:08:12.940825 17615 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:08:12.941867 17615 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:08:12.941884 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.941889 17615 net.cpp:165] Memory required for data: 1321473500\nI0817 16:08:12.941897 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:08:12.941910 17615 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:08:12.941916 17615 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:08:12.941926 17615 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:08:12.942196 17615 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:08:12.942209 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.942214 17615 net.cpp:165] Memory required for data: 1323521500\nI0817 16:08:12.942224 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:08:12.942234 17615 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:08:12.942239 17615 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:08:12.942247 17615 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.942308 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:08:12.942471 17615 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:08:12.942484 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.942490 17615 net.cpp:165] Memory required for data: 1325569500\nI0817 16:08:12.942499 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:08:12.942507 17615 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:08:12.942513 17615 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:08:12.942523 17615 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:08:12.942533 17615 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:08:12.942540 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.942545 17615 net.cpp:165] Memory required for data: 1327617500\nI0817 16:08:12.942550 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:08:12.942564 17615 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:08:12.942569 17615 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:08:12.942579 17615 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:08:12.944586 17615 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:08:12.944602 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.944608 17615 net.cpp:165] Memory required for data: 1329665500\nI0817 16:08:12.944624 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:08:12.944638 17615 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:08:12.944644 17615 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:08:12.944653 17615 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:08:12.944929 17615 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:08:12.944942 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.944948 17615 net.cpp:165] Memory required for data: 1331713500\nI0817 16:08:12.944958 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:08:12.944968 17615 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:08:12.944974 17615 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:08:12.944983 17615 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:08:12.945047 17615 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:08:12.945212 17615 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:08:12.945225 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.945231 17615 net.cpp:165] Memory required for data: 1333761500\nI0817 16:08:12.945240 17615 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:08:12.945250 17615 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:08:12.945255 17615 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:08:12.945262 17615 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:08:12.945273 17615 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:08:12.945317 17615 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:08:12.945329 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.945334 17615 net.cpp:165] Memory required for data: 1335809500\nI0817 16:08:12.945340 17615 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:08:12.945348 17615 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:08:12.945353 17615 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:08:12.945360 17615 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:08:12.945370 17615 net.cpp:150] Setting up L3_b4_relu\nI0817 16:08:12.945377 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.945381 17615 net.cpp:165] Memory required for data: 1337857500\nI0817 16:08:12.945386 17615 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:08:12.945396 17615 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:08:12.945402 17615 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:08:12.945410 17615 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:08:12.945420 17615 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:08:12.945469 17615 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:08:12.945482 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.945487 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.945492 17615 net.cpp:165] Memory required for data: 1341953500\nI0817 16:08:12.945498 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:08:12.945509 17615 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:08:12.945515 17615 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:08:12.945528 17615 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:08:12.946566 17615 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:08:12.946581 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.946586 17615 net.cpp:165] Memory required for data: 1344001500\nI0817 16:08:12.946595 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:08:12.946605 17615 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:08:12.946617 17615 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:08:12.946631 17615 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:08:12.946907 17615 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:08:12.946923 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.946929 17615 net.cpp:165] Memory required for data: 1346049500\nI0817 16:08:12.946939 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:08:12.946949 17615 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:08:12.946955 17615 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:08:12.946964 17615 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.947021 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:08:12.947186 17615 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:08:12.947201 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.947206 17615 net.cpp:165] Memory required for data: 1348097500\nI0817 16:08:12.947214 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:08:12.947222 17615 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:08:12.947228 17615 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:08:12.947239 17615 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:08:12.947249 17615 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:08:12.947257 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.947262 17615 net.cpp:165] Memory required for data: 1350145500\nI0817 16:08:12.947265 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:08:12.947279 17615 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:08:12.947286 17615 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:08:12.947301 17615 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:08:12.948331 17615 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:08:12.948346 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.948351 17615 net.cpp:165] Memory required for data: 1352193500\nI0817 16:08:12.948360 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:08:12.948372 17615 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:08:12.948379 17615 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:08:12.948390 17615 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:08:12.948664 17615 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:08:12.948678 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.948683 17615 net.cpp:165] Memory required for data: 1354241500\nI0817 16:08:12.948694 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:08:12.948703 17615 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:08:12.948709 17615 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:08:12.948720 17615 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:08:12.948779 17615 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:08:12.948942 17615 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:08:12.948956 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.948961 17615 net.cpp:165] Memory required for data: 1356289500\nI0817 16:08:12.948969 17615 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:08:12.948978 17615 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:08:12.948985 17615 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:08:12.948992 17615 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:08:12.949003 17615 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:08:12.949039 17615 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:08:12.949049 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.949054 17615 net.cpp:165] Memory required for data: 1358337500\nI0817 16:08:12.949059 17615 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:08:12.949066 17615 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:08:12.949072 17615 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:08:12.949080 17615 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:08:12.949090 17615 net.cpp:150] Setting up L3_b5_relu\nI0817 16:08:12.949096 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.949100 17615 net.cpp:165] Memory required for data: 1360385500\nI0817 16:08:12.949105 17615 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:08:12.949116 17615 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:08:12.949121 17615 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:08:12.949129 17615 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:08:12.949139 17615 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:08:12.949187 17615 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:08:12.949198 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.949205 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.949210 17615 net.cpp:165] Memory required for data: 1364481500\nI0817 16:08:12.949215 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:08:12.949226 17615 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:08:12.949234 17615 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:08:12.949245 17615 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:08:12.950275 17615 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:08:12.950290 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.950295 17615 net.cpp:165] Memory required for data: 1366529500\nI0817 16:08:12.950304 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:08:12.950320 17615 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:08:12.950327 17615 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:08:12.950338 17615 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:08:12.950618 17615 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:08:12.950634 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.950640 17615 net.cpp:165] Memory required for data: 1368577500\nI0817 16:08:12.950650 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:08:12.950659 17615 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:08:12.950666 17615 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:08:12.950673 17615 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.950733 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:08:12.950896 17615 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:08:12.950909 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.950914 17615 net.cpp:165] Memory required for data: 1370625500\nI0817 16:08:12.950923 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:08:12.950934 17615 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:08:12.950940 17615 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:08:12.950947 17615 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:08:12.950958 17615 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:08:12.950964 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.950969 17615 net.cpp:165] Memory required for data: 1372673500\nI0817 16:08:12.950974 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:08:12.950989 17615 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:08:12.950994 17615 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:08:12.951004 17615 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:08:12.952033 17615 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:08:12.952049 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.952054 17615 net.cpp:165] Memory required for data: 1374721500\nI0817 16:08:12.952062 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:08:12.952076 17615 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:08:12.952083 17615 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:08:12.952095 17615 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:08:12.952363 17615 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:08:12.952376 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.952381 17615 net.cpp:165] Memory required for data: 1376769500\nI0817 16:08:12.952391 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:08:12.952400 17615 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:08:12.952406 17615 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:08:12.952419 17615 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:08:12.952478 17615 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:08:12.952648 17615 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:08:12.952661 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.952666 17615 net.cpp:165] Memory required for data: 1378817500\nI0817 16:08:12.952677 17615 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:08:12.952685 17615 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:08:12.952692 17615 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:08:12.952698 17615 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:08:12.952709 17615 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:08:12.952749 17615 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:08:12.952760 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.952765 17615 net.cpp:165] Memory required for data: 1380865500\nI0817 16:08:12.952770 17615 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:08:12.952778 17615 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:08:12.952791 17615 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:08:12.952801 17615 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:08:12.952811 17615 net.cpp:150] Setting up L3_b6_relu\nI0817 16:08:12.952818 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.952823 17615 net.cpp:165] Memory required for data: 1382913500\nI0817 16:08:12.952828 17615 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:08:12.952836 17615 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:08:12.952841 17615 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:08:12.952848 17615 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:08:12.952858 17615 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:08:12.952911 17615 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:08:12.952924 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.952930 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.952935 17615 net.cpp:165] Memory required for data: 1387009500\nI0817 16:08:12.952940 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:08:12.952951 17615 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:08:12.952957 17615 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:08:12.952970 17615 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:08:12.954030 17615 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:08:12.954046 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.954051 17615 net.cpp:165] Memory required for data: 1389057500\nI0817 16:08:12.954061 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:08:12.954071 17615 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:08:12.954077 17615 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:08:12.954088 17615 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:08:12.954361 17615 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:08:12.954378 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.954383 17615 net.cpp:165] Memory required for data: 1391105500\nI0817 16:08:12.954394 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:08:12.954402 17615 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:08:12.954409 17615 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:08:12.954416 17615 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.954478 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:08:12.954649 17615 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:08:12.954663 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.954668 17615 net.cpp:165] Memory required for data: 1393153500\nI0817 16:08:12.954679 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:08:12.954712 17615 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:08:12.954721 17615 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:08:12.954730 17615 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:08:12.954741 17615 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:08:12.954747 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.954752 17615 net.cpp:165] Memory required for data: 1395201500\nI0817 16:08:12.954757 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:08:12.954769 17615 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:08:12.954776 17615 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:08:12.954787 17615 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:08:12.955828 17615 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:08:12.955843 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.955849 17615 net.cpp:165] Memory required for data: 1397249500\nI0817 16:08:12.955858 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:08:12.955874 17615 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:08:12.955881 17615 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:08:12.955893 17615 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:08:12.956169 17615 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:08:12.956185 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.956192 17615 net.cpp:165] Memory required for data: 1399297500\nI0817 16:08:12.956202 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:08:12.956212 17615 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:08:12.956218 17615 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:08:12.956225 17615 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:08:12.956285 17615 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:08:12.956447 17615 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:08:12.956460 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.956465 17615 net.cpp:165] Memory required for data: 1401345500\nI0817 16:08:12.956475 17615 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:08:12.956486 17615 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:08:12.956493 17615 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:08:12.956501 17615 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:08:12.956508 17615 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:08:12.956544 17615 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:08:12.956557 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.956562 17615 net.cpp:165] Memory required for data: 1403393500\nI0817 16:08:12.956567 17615 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:08:12.956574 17615 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:08:12.956580 17615 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:08:12.956588 17615 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:08:12.956598 17615 net.cpp:150] Setting up L3_b7_relu\nI0817 16:08:12.956604 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.956609 17615 net.cpp:165] Memory required for data: 1405441500\nI0817 16:08:12.956620 17615 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:08:12.956629 17615 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:08:12.956634 17615 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:08:12.956645 17615 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:08:12.956655 17615 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:08:12.956703 17615 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:08:12.956715 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.956722 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.956727 17615 net.cpp:165] Memory required for data: 1409537500\nI0817 16:08:12.956732 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:08:12.956745 17615 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:08:12.956753 17615 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:08:12.956761 17615 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:08:12.958760 17615 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:08:12.958777 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.958783 17615 net.cpp:165] Memory required for data: 1411585500\nI0817 16:08:12.958792 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:08:12.958806 17615 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:08:12.958812 17615 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:08:12.958822 17615 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:08:12.959100 17615 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:08:12.959115 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.959127 17615 net.cpp:165] Memory required for data: 1413633500\nI0817 16:08:12.959138 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:08:12.959147 17615 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:08:12.959153 17615 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:08:12.959161 17615 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.959224 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:08:12.959388 17615 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:08:12.959404 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.959410 17615 net.cpp:165] Memory required for data: 1415681500\nI0817 16:08:12.959419 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:08:12.959427 17615 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:08:12.959434 17615 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:08:12.959441 17615 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:08:12.959450 17615 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:08:12.959457 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.959462 17615 net.cpp:165] Memory required for data: 1417729500\nI0817 16:08:12.959467 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:08:12.959481 17615 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:08:12.959487 17615 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:08:12.959496 17615 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:08:12.960530 17615 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:08:12.960544 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.960549 17615 net.cpp:165] Memory required for data: 1419777500\nI0817 16:08:12.960559 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:08:12.960572 17615 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:08:12.960578 17615 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:08:12.960587 17615 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:08:12.960891 17615 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:08:12.960906 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.960911 17615 net.cpp:165] Memory required for data: 1421825500\nI0817 16:08:12.960922 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:08:12.960932 17615 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:08:12.960937 17615 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:08:12.960948 17615 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:08:12.961009 17615 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:08:12.961174 17615 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:08:12.961189 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.961194 17615 net.cpp:165] Memory required for data: 1423873500\nI0817 16:08:12.961202 17615 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:08:12.961215 17615 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:08:12.961221 17615 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:08:12.961228 17615 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:08:12.961236 17615 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:08:12.961273 17615 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:08:12.961285 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.961290 17615 net.cpp:165] Memory required for data: 1425921500\nI0817 16:08:12.961295 17615 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:08:12.961303 17615 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:08:12.961309 17615 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:08:12.961319 17615 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:08:12.961329 17615 net.cpp:150] Setting up L3_b8_relu\nI0817 16:08:12.961336 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.961340 17615 net.cpp:165] Memory required for data: 1427969500\nI0817 16:08:12.961351 17615 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:08:12.961359 17615 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:08:12.961365 17615 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:08:12.961372 17615 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:08:12.961382 17615 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:08:12.961434 17615 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:08:12.961447 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.961453 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.961458 17615 net.cpp:165] Memory required for data: 1432065500\nI0817 16:08:12.961463 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:08:12.961475 17615 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:08:12.961482 17615 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:08:12.961493 17615 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:08:12.962525 17615 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:08:12.962540 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.962546 17615 net.cpp:165] Memory required for data: 1434113500\nI0817 16:08:12.962555 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:08:12.962568 17615 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:08:12.962574 17615 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:08:12.962582 17615 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:08:12.962868 17615 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:08:12.962882 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.962888 17615 net.cpp:165] Memory required for data: 1436161500\nI0817 16:08:12.962898 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:08:12.962908 17615 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:08:12.962913 17615 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:08:12.962921 17615 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.962985 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:08:12.963150 17615 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:08:12.963162 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.963167 17615 net.cpp:165] Memory required for data: 1438209500\nI0817 16:08:12.963177 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:08:12.963186 17615 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:08:12.963191 17615 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:08:12.963198 17615 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:08:12.963208 17615 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:08:12.963215 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.963220 17615 net.cpp:165] Memory required for data: 1440257500\nI0817 16:08:12.963224 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:08:12.963238 17615 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:08:12.963245 17615 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:08:12.963255 17615 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:08:12.964295 17615 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:08:12.964310 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.964315 17615 net.cpp:165] Memory required for data: 1442305500\nI0817 16:08:12.964324 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:08:12.964337 17615 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:08:12.964344 17615 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:08:12.964354 17615 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:08:12.964628 17615 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:08:12.964643 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.964654 17615 net.cpp:165] Memory required for data: 1444353500\nI0817 16:08:12.964665 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:08:12.964678 17615 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:08:12.964684 17615 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:08:12.964691 17615 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:08:12.964754 17615 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:08:12.964921 17615 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:08:12.964936 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.964941 17615 net.cpp:165] Memory required for data: 1446401500\nI0817 16:08:12.964949 17615 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:08:12.964962 17615 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:08:12.964968 17615 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:08:12.964977 17615 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:08:12.964984 17615 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:08:12.965020 17615 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:08:12.965032 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.965037 17615 net.cpp:165] Memory required for data: 1448449500\nI0817 16:08:12.965042 17615 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:08:12.965050 17615 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:08:12.965056 17615 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:08:12.965066 17615 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:08:12.965076 17615 net.cpp:150] Setting up L3_b9_relu\nI0817 16:08:12.965083 17615 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:08:12.965088 17615 net.cpp:165] Memory required for data: 1450497500\nI0817 16:08:12.965092 17615 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:08:12.965101 17615 net.cpp:100] Creating Layer post_pool\nI0817 16:08:12.965106 17615 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:08:12.965114 17615 net.cpp:408] post_pool -> post_pool\nI0817 16:08:12.965149 17615 net.cpp:150] Setting up post_pool\nI0817 16:08:12.965164 17615 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:08:12.965169 17615 net.cpp:165] Memory required for data: 1450529500\nI0817 16:08:12.965175 17615 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:08:12.965186 17615 net.cpp:100] Creating Layer post_FC\nI0817 16:08:12.965193 17615 net.cpp:434] post_FC <- post_pool\nI0817 16:08:12.965200 17615 net.cpp:408] post_FC -> post_FC_top\nI0817 16:08:12.965363 17615 net.cpp:150] Setting up post_FC\nI0817 16:08:12.965378 17615 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:08:12.965384 17615 net.cpp:165] Memory required for data: 1450534500\nI0817 16:08:12.965392 17615 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:08:12.965400 17615 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:08:12.965406 17615 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:08:12.965415 17615 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:08:12.965423 17615 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:08:12.965473 17615 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:08:12.965486 17615 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:08:12.965492 17615 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:08:12.965497 17615 net.cpp:165] Memory required for data: 1450544500\nI0817 16:08:12.965502 17615 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:08:12.965514 17615 net.cpp:100] Creating Layer accuracy\nI0817 16:08:12.965520 17615 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:08:12.965528 17615 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:08:12.965534 17615 net.cpp:408] accuracy -> accuracy\nI0817 16:08:12.965546 17615 net.cpp:150] Setting up accuracy\nI0817 16:08:12.965553 17615 net.cpp:157] Top shape: (1)\nI0817 16:08:12.965564 17615 net.cpp:165] Memory required for data: 1450544504\nI0817 16:08:12.965570 17615 layer_factory.hpp:77] Creating layer loss\nI0817 16:08:12.965577 17615 net.cpp:100] Creating Layer loss\nI0817 16:08:12.965584 17615 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:08:12.965590 17615 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:08:12.965597 17615 net.cpp:408] loss -> loss\nI0817 16:08:12.965610 17615 layer_factory.hpp:77] Creating layer loss\nI0817 16:08:12.965742 17615 net.cpp:150] Setting up loss\nI0817 16:08:12.965759 17615 net.cpp:157] Top shape: (1)\nI0817 16:08:12.965764 17615 net.cpp:160]     with loss weight 1\nI0817 16:08:12.965780 17615 net.cpp:165] Memory required for data: 1450544508\nI0817 16:08:12.965786 17615 net.cpp:226] loss needs backward computation.\nI0817 16:08:12.965792 17615 net.cpp:228] accuracy does not need backward computation.\nI0817 16:08:12.965798 17615 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:08:12.965804 17615 net.cpp:226] post_FC needs backward computation.\nI0817 16:08:12.965809 17615 net.cpp:226] post_pool needs backward computation.\nI0817 16:08:12.965813 17615 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:08:12.965818 17615 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:08:12.965824 17615 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:08:12.965828 17615 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:08:12.965833 17615 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:08:12.965839 17615 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:08:12.965843 17615 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:08:12.965848 17615 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:08:12.965853 17615 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:08:12.965858 17615 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:08:12.965863 17615 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:08:12.965868 17615 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:08:12.965874 17615 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:08:12.965879 17615 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:08:12.965884 17615 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:08:12.965889 17615 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:08:12.965894 17615 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:08:12.965899 17615 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:08:12.965904 17615 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:08:12.965909 17615 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:08:12.965914 17615 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:08:12.965919 17615 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:08:12.965925 17615 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:08:12.965930 17615 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:08:12.965935 17615 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:08:12.965940 17615 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:08:12.965945 17615 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:08:12.965950 17615 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:08:12.965955 17615 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:08:12.965960 17615 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:08:12.965965 17615 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:08:12.965970 17615 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:08:12.965976 17615 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:08:12.965981 17615 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:08:12.965993 17615 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:08:12.965999 17615 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:08:12.966004 17615 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:08:12.966009 17615 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:08:12.966014 17615 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:08:12.966019 17615 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:08:12.966024 17615 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:08:12.966029 17615 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:08:12.966035 17615 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:08:12.966040 17615 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:08:12.966047 17615 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:08:12.966051 17615 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:08:12.966056 17615 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:08:12.966061 17615 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:08:12.966066 17615 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:08:12.966073 17615 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:08:12.966078 17615 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:08:12.966083 17615 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:08:12.966089 17615 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:08:12.966094 17615 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:08:12.966099 17615 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:08:12.966104 17615 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:08:12.966109 17615 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:08:12.966114 17615 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:08:12.966120 17615 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:08:12.966125 17615 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:08:12.966132 17615 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:08:12.966138 17615 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:08:12.966145 17615 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:08:12.966150 17615 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:08:12.966154 17615 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:08:12.966159 17615 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:08:12.966164 17615 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:08:12.966169 17615 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:08:12.966174 17615 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:08:12.966181 17615 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:08:12.966186 17615 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:08:12.966190 17615 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:08:12.966197 17615 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:08:12.966202 17615 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:08:12.966207 17615 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:08:12.966212 17615 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:08:12.966217 17615 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:08:12.966223 17615 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:08:12.966228 17615 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:08:12.966233 17615 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:08:12.966239 17615 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:08:12.966250 17615 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:08:12.966256 17615 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:08:12.966261 17615 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:08:12.966267 17615 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:08:12.966274 17615 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:08:12.966279 17615 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:08:12.966284 17615 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:08:12.966289 17615 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:08:12.966295 17615 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:08:12.966300 17615 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:08:12.966305 17615 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:08:12.966310 17615 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:08:12.966315 17615 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:08:12.966320 17615 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:08:12.966326 17615 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:08:12.966331 17615 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:08:12.966337 17615 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:08:12.966342 17615 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:08:12.966347 17615 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:08:12.966353 17615 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:08:12.966358 17615 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:08:12.966364 17615 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:08:12.966369 17615 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:08:12.966374 17615 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:08:12.966380 17615 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:08:12.966385 17615 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:08:12.966392 17615 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:08:12.966397 17615 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:08:12.966401 17615 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:08:12.966406 17615 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:08:12.966413 17615 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:08:12.966421 17615 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:08:12.966428 17615 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:08:12.966433 17615 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:08:12.966439 17615 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:08:12.966444 17615 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:08:12.966449 17615 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:08:12.966454 17615 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:08:12.966460 17615 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:08:12.966465 17615 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:08:12.966470 17615 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:08:12.966475 17615 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:08:12.966481 17615 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:08:12.966486 17615 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:08:12.966492 17615 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:08:12.966497 17615 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:08:12.966502 17615 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:08:12.966508 17615 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:08:12.966518 17615 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:08:12.966523 17615 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:08:12.966529 17615 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:08:12.966536 17615 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:08:12.966540 17615 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:08:12.966545 17615 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:08:12.966552 17615 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:08:12.966557 17615 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:08:12.966562 17615 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:08:12.966567 17615 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:08:12.966573 17615 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:08:12.966578 17615 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:08:12.966584 17615 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:08:12.966589 17615 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:08:12.966595 17615 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:08:12.966600 17615 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:08:12.966606 17615 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:08:12.966616 17615 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:08:12.966624 17615 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:08:12.966629 17615 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:08:12.966635 17615 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:08:12.966640 17615 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:08:12.966646 17615 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:08:12.966652 17615 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:08:12.966657 17615 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:08:12.966663 17615 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:08:12.966670 17615 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:08:12.966675 17615 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:08:12.966681 17615 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:08:12.966686 17615 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:08:12.966691 17615 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:08:12.966696 17615 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:08:12.966723 17615 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:08:12.966732 17615 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:08:12.966737 17615 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:08:12.966743 17615 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:08:12.966749 17615 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:08:12.966754 17615 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:08:12.966760 17615 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:08:12.966765 17615 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:08:12.966771 17615 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:08:12.966776 17615 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:08:12.966783 17615 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:08:12.966789 17615 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:08:12.966794 17615 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:08:12.966800 17615 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:08:12.966805 17615 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:08:12.966816 17615 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:08:12.966827 17615 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:08:12.966833 17615 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:08:12.966840 17615 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:08:12.966845 17615 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:08:12.966850 17615 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:08:12.966856 17615 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:08:12.966861 17615 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:08:12.966867 17615 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:08:12.966873 17615 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:08:12.966878 17615 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:08:12.966884 17615 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:08:12.966891 17615 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:08:12.966897 17615 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:08:12.966902 17615 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:08:12.966907 17615 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:08:12.966912 17615 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:08:12.966918 17615 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:08:12.966923 17615 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:08:12.966929 17615 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:08:12.966935 17615 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:08:12.966940 17615 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:08:12.966946 17615 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:08:12.966951 17615 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:08:12.966958 17615 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:08:12.966964 17615 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:08:12.966969 17615 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:08:12.966974 17615 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:08:12.966980 17615 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:08:12.966985 17615 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:08:12.966991 17615 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:08:12.966997 17615 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:08:12.967003 17615 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:08:12.967010 17615 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:08:12.967015 17615 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:08:12.967020 17615 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:08:12.967026 17615 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:08:12.967031 17615 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:08:12.967037 17615 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:08:12.967043 17615 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:08:12.967049 17615 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:08:12.967054 17615 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:08:12.967061 17615 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:08:12.967067 17615 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:08:12.967072 17615 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:08:12.967077 17615 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:08:12.967083 17615 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:08:12.967093 17615 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:08:12.967099 17615 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:08:12.967105 17615 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:08:12.967111 17615 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:08:12.967116 17615 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:08:12.967123 17615 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:08:12.967128 17615 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:08:12.967133 17615 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:08:12.967139 17615 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:08:12.967145 17615 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:08:12.967150 17615 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:08:12.967156 17615 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:08:12.967162 17615 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:08:12.967167 17615 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:08:12.967173 17615 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:08:12.967180 17615 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:08:12.967185 17615 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:08:12.967191 17615 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:08:12.967196 17615 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:08:12.967202 17615 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:08:12.967207 17615 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:08:12.967213 17615 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:08:12.967219 17615 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:08:12.967226 17615 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:08:12.967231 17615 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:08:12.967237 17615 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:08:12.967242 17615 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:08:12.967248 17615 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:08:12.967254 17615 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:08:12.967259 17615 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:08:12.967265 17615 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:08:12.967272 17615 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:08:12.967277 17615 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:08:12.967283 17615 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:08:12.967288 17615 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:08:12.967294 17615 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:08:12.967300 17615 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:08:12.967306 17615 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:08:12.967312 17615 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:08:12.967319 17615 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:08:12.967324 17615 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:08:12.967329 17615 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:08:12.967335 17615 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:08:12.967341 17615 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:08:12.967346 17615 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:08:12.967353 17615 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:08:12.967360 17615 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:08:12.967371 17615 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:08:12.967377 17615 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:08:12.967383 17615 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:08:12.967388 17615 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:08:12.967394 17615 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:08:12.967401 17615 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:08:12.967406 17615 net.cpp:226] pre_relu needs backward computation.\nI0817 16:08:12.967411 17615 net.cpp:226] pre_scale needs backward computation.\nI0817 16:08:12.967417 17615 net.cpp:226] pre_bn needs backward computation.\nI0817 16:08:12.967422 17615 net.cpp:226] pre_conv needs backward computation.\nI0817 16:08:12.967428 17615 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:08:12.967435 17615 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:08:12.967439 17615 net.cpp:270] This network produces output accuracy\nI0817 16:08:12.967447 17615 net.cpp:270] This network produces output loss\nI0817 16:08:12.967780 17615 net.cpp:283] Network initialization done.\nI0817 16:08:12.968781 17615 solver.cpp:60] Solver scaffolding done.\nI0817 16:08:13.188704 17615 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:08:13.548171 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:13.548240 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:13.555608 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:13.780853 17615 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:08:13.780938 17615 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:08:13.815307 17615 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:08:13.815388 17615 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:08:14.262301 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:14.262356 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:14.270294 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:14.517992 17615 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:08:14.518097 17615 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:08:14.569527 17615 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:08:14.569636 17615 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:08:15.081616 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:15.081696 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:15.090113 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:15.353310 17615 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:08:15.353477 17615 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:08:15.424180 17615 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:08:15.424340 17615 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:08:15.508091 17615 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:08:15.994940 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:15.995025 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:16.004827 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:16.298890 17615 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:08:16.299087 17615 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:08:16.390471 17615 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:08:16.390652 17615 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:08:17.037575 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:17.037634 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:17.048230 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:17.360769 17615 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:08:17.360947 17615 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:08:17.473311 17615 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:08:17.473489 17615 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:08:18.183872 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:18.183925 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:18.195304 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:18.537247 17615 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:08:18.537456 17615 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:08:18.670210 17615 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:08:18.670414 17615 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:08:19.448956 17615 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:08:19.449019 17615 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:08:19.465065 17615 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:08:19.500506 17642 blocking_queue.cpp:50] Waiting for data\nI0817 16:08:19.547228 17639 blocking_queue.cpp:50] Waiting for data\nI0817 16:08:19.888702 17615 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:08:19.888937 17615 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:08:20.040515 17615 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:08:20.040752 17615 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:08:20.212697 17615 parallel.cpp:425] Starting Optimization\nI0817 16:08:20.215183 17615 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:08:20.215198 17615 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:08:20.220530 17615 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:09:40.272672 17615 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:09:40.272953 17615 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:09:44.119906 17615 solver.cpp:228] Iteration 0, loss = 4.74131\nI0817 16:09:44.119948 17615 solver.cpp:244]     Train net output #0: accuracy = 0.144\nI0817 16:09:44.119976 17615 solver.cpp:244]     Train net output #1: loss = 4.74131 (* 1 = 4.74131 loss)\nI0817 16:09:44.172559 17615 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0817 16:11:59.733654 17615 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:13:20.206792 17615 solver.cpp:404]     Test net output #0: accuracy = 0.20076\nI0817 16:13:20.207026 17615 solver.cpp:404]     Test net output #1: loss = 2.18803 (* 1 = 2.18803 loss)\nI0817 16:13:21.513965 17615 solver.cpp:228] Iteration 100, loss = 2.01377\nI0817 16:13:21.514009 17615 solver.cpp:244]     Train net output #0: accuracy = 0.208\nI0817 16:13:21.514034 17615 solver.cpp:244]     Train net output #1: loss = 2.01377 (* 1 = 2.01377 loss)\nI0817 16:13:21.594738 17615 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0817 16:15:36.955823 17615 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:16:57.349313 17615 solver.cpp:404]     Test net output #0: accuracy = 0.26332\nI0817 16:16:57.349570 17615 solver.cpp:404]     Test net output #1: loss = 1.86965 (* 1 = 1.86965 loss)\nI0817 16:16:58.657461 17615 solver.cpp:228] Iteration 200, loss = 1.73986\nI0817 16:16:58.657502 17615 solver.cpp:244]     Train net output #0: accuracy = 0.328\nI0817 16:16:58.657517 17615 solver.cpp:244]     Train net output #1: loss = 1.73986 (* 1 = 1.73986 loss)\nI0817 16:16:58.739907 17615 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0817 16:19:14.698546 17615 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:20:35.107658 17615 solver.cpp:404]     Test net output #0: accuracy = 0.33792\nI0817 16:20:35.107911 17615 solver.cpp:404]     Test net output #1: loss = 1.93653 (* 1 = 1.93653 loss)\nI0817 16:20:36.415483 17615 solver.cpp:228] Iteration 300, loss = 1.49025\nI0817 16:20:36.415516 17615 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI0817 16:20:36.415531 17615 solver.cpp:244]     Train net output #1: loss = 1.49025 (* 1 = 1.49025 loss)\nI0817 16:20:36.492929 17615 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0817 16:22:52.420805 17615 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:24:12.827657 17615 solver.cpp:404]     Test net output #0: accuracy = 0.37732\nI0817 16:24:12.827868 17615 solver.cpp:404]     Test net output #1: loss = 1.8002 (* 1 = 1.8002 loss)\nI0817 16:24:14.136109 17615 solver.cpp:228] Iteration 400, loss = 1.16765\nI0817 16:24:14.136152 17615 solver.cpp:244]     Train net output #0: accuracy = 0.552\nI0817 16:24:14.136168 17615 solver.cpp:244]     Train net output #1: loss = 1.16765 (* 1 = 1.16765 loss)\nI0817 16:24:14.215713 17615 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0817 16:26:30.243379 17615 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:27:50.642556 17615 solver.cpp:404]     Test net output #0: accuracy = 0.50536\nI0817 16:27:50.642810 17615 solver.cpp:404]     Test net output #1: loss = 1.52599 (* 1 = 1.52599 loss)\nI0817 16:27:51.950981 17615 solver.cpp:228] Iteration 500, loss = 0.809649\nI0817 16:27:51.951015 17615 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 16:27:51.951031 17615 solver.cpp:244]     Train net output #1: loss = 0.809649 (* 1 = 0.809649 loss)\nI0817 16:27:52.025272 17615 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0817 16:30:07.961262 17615 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:31:28.362891 17615 solver.cpp:404]     Test net output #0: accuracy = 0.54176\nI0817 16:31:28.363121 17615 solver.cpp:404]     Test net output #1: loss = 1.52764 (* 1 = 1.52764 loss)\nI0817 16:31:29.672646 17615 solver.cpp:228] Iteration 600, loss = 0.611177\nI0817 16:31:29.672679 17615 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 16:31:29.672694 17615 solver.cpp:244]     Train net output #1: loss = 0.611177 (* 1 = 0.611177 loss)\nI0817 16:31:29.745836 17615 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0817 16:33:45.692795 17615 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:35:06.077314 17615 solver.cpp:404]     Test net output #0: accuracy = 0.5332\nI0817 16:35:06.077571 17615 solver.cpp:404]     Test net output #1: loss = 2.46894 (* 1 = 2.46894 loss)\nI0817 16:35:07.386178 17615 solver.cpp:228] Iteration 700, loss = 0.328851\nI0817 16:35:07.386220 17615 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 16:35:07.386237 17615 solver.cpp:244]     Train net output #1: loss = 0.328851 (* 1 = 0.328851 loss)\nI0817 16:35:07.461912 17615 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0817 16:37:23.370921 17615 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:38:43.756765 17615 solver.cpp:404]     Test net output #0: accuracy = 0.60604\nI0817 16:38:43.757027 17615 solver.cpp:404]     Test net output #1: loss = 1.68036 (* 1 = 1.68036 loss)\nI0817 16:38:45.065322 17615 solver.cpp:228] Iteration 800, loss = 0.338984\nI0817 16:38:45.065363 17615 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 16:38:45.065381 17615 solver.cpp:244]     Train net output #1: loss = 0.338984 (* 1 = 0.338984 loss)\nI0817 16:38:45.138339 17615 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0817 16:41:01.092741 17615 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:42:21.482334 17615 solver.cpp:404]     Test net output #0: accuracy = 0.58652\nI0817 16:42:21.482583 17615 solver.cpp:404]     Test net output #1: loss = 2.0523 (* 1 = 2.0523 loss)\nI0817 16:42:22.790784 17615 solver.cpp:228] Iteration 900, loss = 0.172845\nI0817 16:42:22.790818 17615 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 16:42:22.790833 17615 solver.cpp:244]     Train net output #1: loss = 0.172845 (* 1 = 0.172845 loss)\nI0817 16:42:22.865447 17615 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0817 16:44:38.782228 17615 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:45:59.139104 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6016\nI0817 16:45:59.139349 17615 solver.cpp:404]     Test net output #1: loss = 2.25904 (* 1 = 2.25904 loss)\nI0817 16:46:00.446995 17615 solver.cpp:228] Iteration 1000, loss = 0.105077\nI0817 16:46:00.447037 17615 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 16:46:00.447052 17615 solver.cpp:244]     Train net output #1: loss = 0.105077 (* 1 = 0.105077 loss)\nI0817 16:46:00.533401 17615 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0817 16:48:16.477349 17615 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:49:36.835007 17615 solver.cpp:404]     Test net output #0: accuracy = 0.55276\nI0817 16:49:36.835266 17615 solver.cpp:404]     Test net output #1: loss = 2.39214 (* 1 = 2.39214 loss)\nI0817 16:49:38.143055 17615 solver.cpp:228] Iteration 1100, loss = 0.117812\nI0817 16:49:38.143098 17615 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 16:49:38.143115 17615 solver.cpp:244]     Train net output #1: loss = 0.117812 (* 1 = 0.117812 loss)\nI0817 16:49:38.216856 17615 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0817 16:51:54.207526 17615 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:53:14.562108 17615 solver.cpp:404]     Test net output #0: accuracy = 0.58396\nI0817 16:53:14.562367 17615 solver.cpp:404]     Test net output #1: loss = 2.66815 (* 1 = 2.66815 loss)\nI0817 16:53:15.870055 17615 solver.cpp:228] Iteration 1200, loss = 0.181989\nI0817 16:53:15.870098 17615 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 16:53:15.870115 17615 solver.cpp:244]     Train net output #1: loss = 0.181989 (* 1 = 0.181989 loss)\nI0817 16:53:15.946527 17615 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0817 16:55:31.841651 17615 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 16:56:52.204933 17615 solver.cpp:404]     Test net output #0: accuracy = 0.64976\nI0817 16:56:52.205195 17615 solver.cpp:404]     Test net output #1: loss = 1.99457 (* 1 = 1.99457 loss)\nI0817 16:56:53.513229 17615 solver.cpp:228] Iteration 1300, loss = 0.0639486\nI0817 16:56:53.513274 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 16:56:53.513289 17615 solver.cpp:244]     Train net output #1: loss = 0.0639486 (* 1 = 0.0639486 loss)\nI0817 16:56:53.591605 17615 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0817 16:59:09.627017 17615 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:00:29.976239 17615 solver.cpp:404]     Test net output #0: accuracy = 0.60496\nI0817 17:00:29.976480 17615 solver.cpp:404]     Test net output #1: loss = 2.15864 (* 1 = 2.15864 loss)\nI0817 17:00:31.284409 17615 solver.cpp:228] Iteration 1400, loss = 0.152782\nI0817 17:00:31.284452 17615 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:00:31.284467 17615 solver.cpp:244]     Train net output #1: loss = 0.152782 (* 1 = 0.152782 loss)\nI0817 17:00:31.368443 17615 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0817 17:02:47.379058 17615 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:04:07.748821 17615 solver.cpp:404]     Test net output #0: accuracy = 0.67548\nI0817 17:04:07.749064 17615 solver.cpp:404]     Test net output #1: loss = 1.78294 (* 1 = 1.78294 loss)\nI0817 17:04:09.057121 17615 solver.cpp:228] Iteration 1500, loss = 0.206473\nI0817 17:04:09.057166 17615 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:04:09.057183 17615 solver.cpp:244]     Train net output #1: loss = 0.206473 (* 1 = 0.206473 loss)\nI0817 17:04:09.136199 17615 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0817 17:06:25.100908 17615 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:07:45.472333 17615 solver.cpp:404]     Test net output #0: accuracy = 0.59712\nI0817 17:07:45.472568 17615 solver.cpp:404]     Test net output #1: loss = 2.73646 (* 1 = 2.73646 loss)\nI0817 17:07:46.781139 17615 solver.cpp:228] Iteration 1600, loss = 0.137661\nI0817 17:07:46.781184 17615 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:07:46.781201 17615 solver.cpp:244]     Train net output #1: loss = 0.137661 (* 1 = 0.137661 loss)\nI0817 17:07:46.863911 17615 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0817 17:10:02.833753 17615 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:11:23.208273 17615 solver.cpp:404]     Test net output #0: accuracy = 0.57504\nI0817 17:11:23.208534 17615 solver.cpp:404]     Test net output #1: loss = 2.44807 (* 1 = 2.44807 loss)\nI0817 17:11:24.517292 17615 solver.cpp:228] Iteration 1700, loss = 0.144419\nI0817 17:11:24.517335 17615 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:11:24.517351 17615 solver.cpp:244]     Train net output #1: loss = 0.144419 (* 1 = 0.144419 loss)\nI0817 17:11:24.595626 17615 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0817 17:13:40.545836 17615 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:15:00.918237 17615 solver.cpp:404]     Test net output #0: accuracy = 0.52792\nI0817 17:15:00.918503 17615 solver.cpp:404]     Test net output #1: loss = 3.28607 (* 1 = 3.28607 loss)\nI0817 17:15:02.227274 17615 solver.cpp:228] Iteration 1800, loss = 0.048433\nI0817 17:15:02.227319 17615 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 17:15:02.227335 17615 solver.cpp:244]     Train net output #1: loss = 0.048433 (* 1 = 0.048433 loss)\nI0817 17:15:02.302152 17615 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0817 17:17:18.196144 17615 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:18:38.568909 17615 solver.cpp:404]     Test net output #0: accuracy = 0.5544\nI0817 17:18:38.569177 17615 solver.cpp:404]     Test net output #1: loss = 2.62635 (* 1 = 2.62635 loss)\nI0817 17:18:39.877966 17615 solver.cpp:228] Iteration 1900, loss = 0.156173\nI0817 17:18:39.878010 17615 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:18:39.878026 17615 solver.cpp:244]     Train net output #1: loss = 0.156173 (* 1 = 0.156173 loss)\nI0817 17:18:39.958178 17615 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0817 17:20:55.909878 17615 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:22:16.272009 17615 solver.cpp:404]     Test net output #0: accuracy = 0.66564\nI0817 17:22:16.272266 17615 solver.cpp:404]     Test net output #1: loss = 1.74247 (* 1 = 1.74247 loss)\nI0817 17:22:17.581118 17615 solver.cpp:228] Iteration 2000, loss = 0.10611\nI0817 17:22:17.581161 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:22:17.581177 17615 solver.cpp:244]     Train net output #1: loss = 0.10611 (* 1 = 0.10611 loss)\nI0817 17:22:17.660545 17615 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0817 17:24:33.501189 17615 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:25:53.872380 17615 solver.cpp:404]     Test net output #0: accuracy = 0.62828\nI0817 17:25:53.872614 17615 solver.cpp:404]     Test net output #1: loss = 2.12522 (* 1 = 2.12522 loss)\nI0817 17:25:55.180734 17615 solver.cpp:228] Iteration 2100, loss = 0.0432028\nI0817 17:25:55.180779 17615 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 17:25:55.180795 17615 solver.cpp:244]     Train net output #1: loss = 0.0432028 (* 1 = 0.0432028 loss)\nI0817 17:25:55.262481 17615 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0817 17:28:11.214169 17615 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:29:31.587272 17615 solver.cpp:404]     Test net output #0: accuracy = 0.61028\nI0817 17:29:31.587507 17615 solver.cpp:404]     Test net output #1: loss = 2.22566 (* 1 = 2.22566 loss)\nI0817 17:29:32.897557 17615 solver.cpp:228] Iteration 2200, loss = 0.0615764\nI0817 17:29:32.897600 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:29:32.897616 17615 solver.cpp:244]     Train net output #1: loss = 0.0615763 (* 1 = 0.0615763 loss)\nI0817 17:29:32.974450 17615 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0817 17:31:48.870635 17615 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:33:09.238363 17615 solver.cpp:404]     Test net output #0: accuracy = 0.65804\nI0817 17:33:09.238623 17615 solver.cpp:404]     Test net output #1: loss = 1.6841 (* 1 = 1.6841 loss)\nI0817 17:33:10.548108 17615 solver.cpp:228] Iteration 2300, loss = 0.111342\nI0817 17:33:10.548153 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:33:10.548171 17615 solver.cpp:244]     Train net output #1: loss = 0.111342 (* 1 = 0.111342 loss)\nI0817 17:33:10.623101 17615 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0817 17:35:26.546176 17615 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:36:46.886979 17615 solver.cpp:404]     Test net output #0: accuracy = 0.58328\nI0817 17:36:46.902964 17615 solver.cpp:404]     Test net output #1: loss = 2.52603 (* 1 = 2.52603 loss)\nI0817 17:36:48.211477 17615 solver.cpp:228] Iteration 2400, loss = 0.177902\nI0817 17:36:48.211521 17615 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:36:48.211537 17615 solver.cpp:244]     Train net output #1: loss = 0.177902 (* 1 = 0.177902 loss)\nI0817 17:36:48.292199 17615 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0817 17:39:04.352656 17615 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:40:24.685384 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6526\nI0817 17:40:24.685632 17615 solver.cpp:404]     Test net output #1: loss = 2.01 (* 1 = 2.01 loss)\nI0817 17:40:25.993109 17615 solver.cpp:228] Iteration 2500, loss = 0.0431915\nI0817 17:40:25.993155 17615 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 17:40:25.993170 17615 solver.cpp:244]     Train net output #1: loss = 0.0431915 (* 1 = 0.0431915 loss)\nI0817 17:40:26.074090 17615 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0817 17:42:41.977854 17615 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:44:02.354249 17615 solver.cpp:404]     Test net output #0: accuracy = 0.56332\nI0817 17:44:02.354483 17615 solver.cpp:404]     Test net output #1: loss = 2.58641 (* 1 = 2.58641 loss)\nI0817 17:44:03.663522 17615 solver.cpp:228] Iteration 2600, loss = 0.115852\nI0817 17:44:03.663565 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:44:03.663581 17615 solver.cpp:244]     Train net output #1: loss = 0.115852 (* 1 = 0.115852 loss)\nI0817 17:44:03.742508 17615 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0817 17:46:19.756841 17615 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:47:40.098956 17615 solver.cpp:404]     Test net output #0: accuracy = 0.63776\nI0817 17:47:40.099215 17615 solver.cpp:404]     Test net output #1: loss = 2.02126 (* 1 = 2.02126 loss)\nI0817 17:47:41.407582 17615 solver.cpp:228] Iteration 2700, loss = 0.0862093\nI0817 17:47:41.407625 17615 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:47:41.407641 17615 solver.cpp:244]     Train net output #1: loss = 0.0862092 (* 1 = 0.0862092 loss)\nI0817 17:47:41.486990 17615 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0817 17:49:57.535604 17615 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:51:17.864347 17615 solver.cpp:404]     Test net output #0: accuracy = 0.448\nI0817 17:51:17.864609 17615 solver.cpp:404]     Test net output #1: loss = 4.60586 (* 1 = 4.60586 loss)\nI0817 17:51:19.173034 17615 solver.cpp:228] Iteration 2800, loss = 0.152044\nI0817 17:51:19.173079 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:51:19.173094 17615 solver.cpp:244]     Train net output #1: loss = 0.152044 (* 1 = 0.152044 loss)\nI0817 17:51:19.250226 17615 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0817 17:53:35.229429 17615 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:54:55.581821 17615 solver.cpp:404]     Test net output #0: accuracy = 0.66364\nI0817 17:54:55.582079 17615 solver.cpp:404]     Test net output #1: loss = 1.63914 (* 1 = 1.63914 loss)\nI0817 17:54:56.890768 17615 solver.cpp:228] Iteration 2900, loss = 0.0851549\nI0817 17:54:56.890811 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:54:56.890827 17615 solver.cpp:244]     Train net output #1: loss = 0.0851548 (* 1 = 0.0851548 loss)\nI0817 17:54:56.968570 17615 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0817 17:57:12.880928 17615 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 17:58:33.217566 17615 solver.cpp:404]     Test net output #0: accuracy = 0.61436\nI0817 17:58:33.217821 17615 solver.cpp:404]     Test net output #1: loss = 1.85536 (* 1 = 1.85536 loss)\nI0817 17:58:34.526315 17615 solver.cpp:228] Iteration 3000, loss = 0.166007\nI0817 17:58:34.526347 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:58:34.526362 17615 solver.cpp:244]     Train net output #1: loss = 0.166007 (* 1 = 0.166007 loss)\nI0817 17:58:34.604496 17615 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0817 18:00:50.525640 17615 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:02:10.867252 17615 solver.cpp:404]     Test net output #0: accuracy = 0.64632\nI0817 18:02:10.867508 17615 solver.cpp:404]     Test net output #1: loss = 2.08312 (* 1 = 2.08312 loss)\nI0817 18:02:12.175508 17615 solver.cpp:228] Iteration 3100, loss = 0.115221\nI0817 18:02:12.175551 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:02:12.175568 17615 solver.cpp:244]     Train net output #1: loss = 0.115221 (* 1 = 0.115221 loss)\nI0817 18:02:12.253342 17615 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0817 18:04:27.996750 17615 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:05:48.346103 17615 solver.cpp:404]     Test net output #0: accuracy = 0.64824\nI0817 18:05:48.346375 17615 solver.cpp:404]     Test net output #1: loss = 1.8536 (* 1 = 1.8536 loss)\nI0817 18:05:49.654430 17615 solver.cpp:228] Iteration 3200, loss = 0.0862698\nI0817 18:05:49.654464 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:05:49.654479 17615 solver.cpp:244]     Train net output #1: loss = 0.0862697 (* 1 = 0.0862697 loss)\nI0817 18:05:49.734897 17615 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0817 18:08:05.625227 17615 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:09:25.965168 17615 solver.cpp:404]     Test net output #0: accuracy = 0.59716\nI0817 18:09:25.965418 17615 solver.cpp:404]     Test net output #1: loss = 2.91937 (* 1 = 2.91937 loss)\nI0817 18:09:27.273339 17615 solver.cpp:228] Iteration 3300, loss = 0.111328\nI0817 18:09:27.273375 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:09:27.273391 17615 solver.cpp:244]     Train net output #1: loss = 0.111328 (* 1 = 0.111328 loss)\nI0817 18:09:27.353159 17615 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0817 18:11:43.335208 17615 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:13:03.678419 17615 solver.cpp:404]     Test net output #0: accuracy = 0.64412\nI0817 18:13:03.678661 17615 solver.cpp:404]     Test net output #1: loss = 1.92309 (* 1 = 1.92309 loss)\nI0817 18:13:04.987517 17615 solver.cpp:228] Iteration 3400, loss = 0.0463051\nI0817 18:13:04.987557 17615 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:13:04.987572 17615 solver.cpp:244]     Train net output #1: loss = 0.0463051 (* 1 = 0.0463051 loss)\nI0817 18:13:05.063295 17615 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0817 18:15:20.996312 17615 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:16:41.353816 17615 solver.cpp:404]     Test net output #0: accuracy = 0.54244\nI0817 18:16:41.354041 17615 solver.cpp:404]     Test net output #1: loss = 2.48087 (* 1 = 2.48087 loss)\nI0817 18:16:42.662492 17615 solver.cpp:228] Iteration 3500, loss = 0.145527\nI0817 18:16:42.662533 17615 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:16:42.662547 17615 solver.cpp:244]     Train net output #1: loss = 0.145527 (* 1 = 0.145527 loss)\nI0817 18:16:42.741055 17615 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0817 18:18:58.591080 17615 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:20:18.977708 17615 solver.cpp:404]     Test net output #0: accuracy = 0.58904\nI0817 18:20:18.977923 17615 solver.cpp:404]     Test net output #1: loss = 2.42103 (* 1 = 2.42103 loss)\nI0817 18:20:20.286448 17615 solver.cpp:228] Iteration 3600, loss = 0.0780311\nI0817 18:20:20.286489 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:20:20.286505 17615 solver.cpp:244]     Train net output #1: loss = 0.0780311 (* 1 = 0.0780311 loss)\nI0817 18:20:20.364547 17615 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0817 18:22:36.253450 17615 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:23:56.643226 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6768\nI0817 18:23:56.643458 17615 solver.cpp:404]     Test net output #1: loss = 1.65819 (* 1 = 1.65819 loss)\nI0817 18:23:57.952129 17615 solver.cpp:228] Iteration 3700, loss = 0.047642\nI0817 18:23:57.952163 17615 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 18:23:57.952179 17615 solver.cpp:244]     Train net output #1: loss = 0.047642 (* 1 = 0.047642 loss)\nI0817 18:23:58.029490 17615 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0817 18:26:13.973176 17615 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:27:34.364852 17615 solver.cpp:404]     Test net output #0: accuracy = 0.69684\nI0817 18:27:34.365104 17615 solver.cpp:404]     Test net output #1: loss = 1.42107 (* 1 = 1.42107 loss)\nI0817 18:27:35.672945 17615 solver.cpp:228] Iteration 3800, loss = 0.210252\nI0817 18:27:35.672991 17615 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:27:35.673007 17615 solver.cpp:244]     Train net output #1: loss = 0.210252 (* 1 = 0.210252 loss)\nI0817 18:27:35.754542 17615 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0817 18:29:51.768415 17615 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:31:12.154844 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6402\nI0817 18:31:12.155084 17615 solver.cpp:404]     Test net output #1: loss = 1.83924 (* 1 = 1.83924 loss)\nI0817 18:31:13.462852 17615 solver.cpp:228] Iteration 3900, loss = 0.15222\nI0817 18:31:13.462888 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:31:13.462908 17615 solver.cpp:244]     Train net output #1: loss = 0.15222 (* 1 = 0.15222 loss)\nI0817 18:31:13.541597 17615 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0817 18:33:29.462000 17615 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:34:49.848176 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6462\nI0817 18:34:49.848409 17615 solver.cpp:404]     Test net output #1: loss = 1.76431 (* 1 = 1.76431 loss)\nI0817 18:34:51.157480 17615 solver.cpp:228] Iteration 4000, loss = 0.142035\nI0817 18:34:51.157522 17615 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:34:51.157538 17615 solver.cpp:244]     Train net output #1: loss = 0.142035 (* 1 = 0.142035 loss)\nI0817 18:34:51.232761 17615 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0817 18:37:07.158668 17615 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:38:27.551790 17615 solver.cpp:404]     Test net output #0: accuracy = 0.64828\nI0817 18:38:27.552006 17615 solver.cpp:404]     Test net output #1: loss = 1.74402 (* 1 = 1.74402 loss)\nI0817 18:38:28.860245 17615 solver.cpp:228] Iteration 4100, loss = 0.11557\nI0817 18:38:28.860290 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:38:28.860306 17615 solver.cpp:244]     Train net output #1: loss = 0.11557 (* 1 = 0.11557 loss)\nI0817 18:38:28.939822 17615 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0817 18:40:44.984269 17615 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:42:05.357259 17615 solver.cpp:404]     Test net output #0: accuracy = 0.67384\nI0817 18:42:05.357496 17615 solver.cpp:404]     Test net output #1: loss = 1.60852 (* 1 = 1.60852 loss)\nI0817 18:42:06.665742 17615 solver.cpp:228] Iteration 4200, loss = 0.211797\nI0817 18:42:06.665784 17615 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:42:06.665799 17615 solver.cpp:244]     Train net output #1: loss = 0.211797 (* 1 = 0.211797 loss)\nI0817 18:42:06.743484 17615 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0817 18:44:22.616461 17615 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:45:42.997763 17615 solver.cpp:404]     Test net output #0: accuracy = 0.69108\nI0817 18:45:42.997974 17615 solver.cpp:404]     Test net output #1: loss = 1.34776 (* 1 = 1.34776 loss)\nI0817 18:45:44.306521 17615 solver.cpp:228] Iteration 4300, loss = 0.0848977\nI0817 18:45:44.306555 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:45:44.306571 17615 solver.cpp:244]     Train net output #1: loss = 0.0848977 (* 1 = 0.0848977 loss)\nI0817 18:45:44.381482 17615 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0817 18:48:00.395114 17615 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:49:20.787830 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6494\nI0817 18:49:20.788081 17615 solver.cpp:404]     Test net output #1: loss = 2.04072 (* 1 = 2.04072 loss)\nI0817 18:49:22.096635 17615 solver.cpp:228] Iteration 4400, loss = 0.104312\nI0817 18:49:22.096679 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:49:22.096695 17615 solver.cpp:244]     Train net output #1: loss = 0.104312 (* 1 = 0.104312 loss)\nI0817 18:49:22.174350 17615 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0817 18:51:38.178091 17615 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 18:52:58.586784 17615 solver.cpp:404]     Test net output #0: accuracy = 0.60836\nI0817 18:52:58.587026 17615 solver.cpp:404]     Test net output #1: loss = 2.33966 (* 1 = 2.33966 loss)\nI0817 18:52:59.896677 17615 solver.cpp:228] Iteration 4500, loss = 0.0647086\nI0817 18:52:59.896721 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:52:59.896739 17615 solver.cpp:244]     Train net output #1: loss = 0.0647087 (* 1 = 0.0647087 loss)\nI0817 18:52:59.975910 17615 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0817 18:55:16.018676 17615 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 18:56:36.399186 17615 solver.cpp:404]     Test net output #0: accuracy = 0.57592\nI0817 18:56:36.399410 17615 solver.cpp:404]     Test net output #1: loss = 2.11843 (* 1 = 2.11843 loss)\nI0817 18:56:37.707888 17615 solver.cpp:228] Iteration 4600, loss = 0.178527\nI0817 18:56:37.707928 17615 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:56:37.707943 17615 solver.cpp:244]     Train net output #1: loss = 0.178527 (* 1 = 0.178527 loss)\nI0817 18:56:37.785302 17615 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0817 18:58:53.827046 17615 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:00:14.212653 17615 solver.cpp:404]     Test net output #0: accuracy = 0.65776\nI0817 19:00:14.212893 17615 solver.cpp:404]     Test net output #1: loss = 1.79211 (* 1 = 1.79211 loss)\nI0817 19:00:15.522593 17615 solver.cpp:228] Iteration 4700, loss = 0.13018\nI0817 19:00:15.522639 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:00:15.522655 17615 solver.cpp:244]     Train net output #1: loss = 0.13018 (* 1 = 0.13018 loss)\nI0817 19:00:15.598227 17615 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0817 19:02:31.409327 17615 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:03:51.783849 17615 solver.cpp:404]     Test net output #0: accuracy = 0.636\nI0817 19:03:51.784107 17615 solver.cpp:404]     Test net output #1: loss = 1.86712 (* 1 = 1.86712 loss)\nI0817 19:03:53.093282 17615 solver.cpp:228] Iteration 4800, loss = 0.16701\nI0817 19:03:53.093327 17615 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:03:53.093343 17615 solver.cpp:244]     Train net output #1: loss = 0.16701 (* 1 = 0.16701 loss)\nI0817 19:03:53.168288 17615 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0817 19:06:09.199749 17615 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:07:29.560029 17615 solver.cpp:404]     Test net output #0: accuracy = 0.66164\nI0817 19:07:29.560253 17615 solver.cpp:404]     Test net output #1: loss = 1.77646 (* 1 = 1.77646 loss)\nI0817 19:07:30.869237 17615 solver.cpp:228] Iteration 4900, loss = 0.0535519\nI0817 19:07:30.869282 17615 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:07:30.869298 17615 solver.cpp:244]     Train net output #1: loss = 0.053552 (* 1 = 0.053552 loss)\nI0817 19:07:30.947552 17615 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0817 19:09:46.886256 17615 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:11:07.238044 17615 solver.cpp:404]     Test net output #0: accuracy = 0.64668\nI0817 19:11:07.238293 17615 solver.cpp:404]     Test net output #1: loss = 1.90348 (* 1 = 1.90348 loss)\nI0817 19:11:08.546818 17615 solver.cpp:228] Iteration 5000, loss = 0.108182\nI0817 19:11:08.546864 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:11:08.546880 17615 solver.cpp:244]     Train net output #1: loss = 0.108182 (* 1 = 0.108182 loss)\nI0817 19:11:08.623438 17615 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0817 19:13:24.583772 17615 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:14:44.963141 17615 solver.cpp:404]     Test net output #0: accuracy = 0.63312\nI0817 19:14:44.963337 17615 solver.cpp:404]     Test net output #1: loss = 1.48766 (* 1 = 1.48766 loss)\nI0817 19:14:46.272538 17615 solver.cpp:228] Iteration 5100, loss = 0.134789\nI0817 19:14:46.272573 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:14:46.272588 17615 solver.cpp:244]     Train net output #1: loss = 0.134789 (* 1 = 0.134789 loss)\nI0817 19:14:46.348685 17615 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0817 19:17:02.221501 17615 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:18:22.611268 17615 solver.cpp:404]     Test net output #0: accuracy = 0.54744\nI0817 19:18:22.611531 17615 solver.cpp:404]     Test net output #1: loss = 3.33783 (* 1 = 3.33783 loss)\nI0817 19:18:23.919833 17615 solver.cpp:228] Iteration 5200, loss = 0.11709\nI0817 19:18:23.919879 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:18:23.919895 17615 solver.cpp:244]     Train net output #1: loss = 0.11709 (* 1 = 0.11709 loss)\nI0817 19:18:23.997864 17615 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0817 19:20:40.013495 17615 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:22:00.396900 17615 solver.cpp:404]     Test net output #0: accuracy = 0.62536\nI0817 19:22:00.397143 17615 solver.cpp:404]     Test net output #1: loss = 2.0756 (* 1 = 2.0756 loss)\nI0817 19:22:01.705870 17615 solver.cpp:228] Iteration 5300, loss = 0.0938581\nI0817 19:22:01.705919 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:22:01.705936 17615 solver.cpp:244]     Train net output #1: loss = 0.0938581 (* 1 = 0.0938581 loss)\nI0817 19:22:01.784169 17615 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0817 19:24:17.681854 17615 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:25:38.073299 17615 solver.cpp:404]     Test net output #0: accuracy = 0.63916\nI0817 19:25:38.073557 17615 solver.cpp:404]     Test net output #1: loss = 1.59788 (* 1 = 1.59788 loss)\nI0817 19:25:39.382215 17615 solver.cpp:228] Iteration 5400, loss = 0.115653\nI0817 19:25:39.382261 17615 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:25:39.382277 17615 solver.cpp:244]     Train net output #1: loss = 0.115653 (* 1 = 0.115653 loss)\nI0817 19:25:39.455895 17615 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0817 19:27:55.394278 17615 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:29:15.788214 17615 solver.cpp:404]     Test net output #0: accuracy = 0.59836\nI0817 19:29:15.788480 17615 solver.cpp:404]     Test net output #1: loss = 1.93623 (* 1 = 1.93623 loss)\nI0817 19:29:17.097173 17615 solver.cpp:228] Iteration 5500, loss = 0.136256\nI0817 19:29:17.097218 17615 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:29:17.097234 17615 solver.cpp:244]     Train net output #1: loss = 0.136256 (* 1 = 0.136256 loss)\nI0817 19:29:17.173218 17615 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0817 19:31:33.132642 17615 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:32:53.530853 17615 solver.cpp:404]     Test net output #0: accuracy = 0.53588\nI0817 19:32:53.531119 17615 solver.cpp:404]     Test net output #1: loss = 3.02275 (* 1 = 3.02275 loss)\nI0817 19:32:54.842402 17615 solver.cpp:228] Iteration 5600, loss = 0.0437411\nI0817 19:32:54.842447 17615 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:32:54.842473 17615 solver.cpp:244]     Train net output #1: loss = 0.0437412 (* 1 = 0.0437412 loss)\nI0817 19:32:54.917408 17615 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0817 19:35:10.895272 17615 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:36:31.368366 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6878\nI0817 19:36:31.368638 17615 solver.cpp:404]     Test net output #1: loss = 1.38453 (* 1 = 1.38453 loss)\nI0817 19:36:32.677848 17615 solver.cpp:228] Iteration 5700, loss = 0.174944\nI0817 19:36:32.677901 17615 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:36:32.677924 17615 solver.cpp:244]     Train net output #1: loss = 0.174944 (* 1 = 0.174944 loss)\nI0817 19:36:32.755815 17615 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0817 19:38:48.739513 17615 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:40:09.280925 17615 solver.cpp:404]     Test net output #0: accuracy = 0.67568\nI0817 19:40:09.281188 17615 solver.cpp:404]     Test net output #1: loss = 1.52353 (* 1 = 1.52353 loss)\nI0817 19:40:10.589933 17615 solver.cpp:228] Iteration 5800, loss = 0.0732769\nI0817 19:40:10.589982 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:40:10.590008 17615 solver.cpp:244]     Train net output #1: loss = 0.073277 (* 1 = 0.073277 loss)\nI0817 19:40:10.668176 17615 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0817 19:42:26.604871 17615 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:43:47.083588 17615 solver.cpp:404]     Test net output #0: accuracy = 0.50868\nI0817 19:43:47.083874 17615 solver.cpp:404]     Test net output #1: loss = 3.50085 (* 1 = 3.50085 loss)\nI0817 19:43:48.392879 17615 solver.cpp:228] Iteration 5900, loss = 0.0850568\nI0817 19:43:48.392916 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:43:48.392940 17615 solver.cpp:244]     Train net output #1: loss = 0.0850568 (* 1 = 0.0850568 loss)\nI0817 19:43:48.466342 17615 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0817 19:46:04.403923 17615 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:47:24.863912 17615 solver.cpp:404]     Test net output #0: accuracy = 0.62056\nI0817 19:47:24.864181 17615 solver.cpp:404]     Test net output #1: loss = 1.77938 (* 1 = 1.77938 loss)\nI0817 19:47:26.173931 17615 solver.cpp:228] Iteration 6000, loss = 0.106011\nI0817 19:47:26.173979 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:47:26.174003 17615 solver.cpp:244]     Train net output #1: loss = 0.106011 (* 1 = 0.106011 loss)\nI0817 19:47:26.251456 17615 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0817 19:49:42.158745 17615 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:51:02.611631 17615 solver.cpp:404]     Test net output #0: accuracy = 0.60628\nI0817 19:51:02.611914 17615 solver.cpp:404]     Test net output #1: loss = 2.44327 (* 1 = 2.44327 loss)\nI0817 19:51:03.919847 17615 solver.cpp:228] Iteration 6100, loss = 0.225082\nI0817 19:51:03.919899 17615 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:51:03.919924 17615 solver.cpp:244]     Train net output #1: loss = 0.225082 (* 1 = 0.225082 loss)\nI0817 19:51:04.002056 17615 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0817 19:53:19.953951 17615 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 19:54:40.349647 17615 solver.cpp:404]     Test net output #0: accuracy = 0.66012\nI0817 19:54:40.349939 17615 solver.cpp:404]     Test net output #1: loss = 1.88064 (* 1 = 1.88064 loss)\nI0817 19:54:41.658926 17615 solver.cpp:228] Iteration 6200, loss = 0.193027\nI0817 19:54:41.658974 17615 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:54:41.658999 17615 solver.cpp:244]     Train net output #1: loss = 0.193027 (* 1 = 0.193027 loss)\nI0817 19:54:41.739699 17615 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0817 19:56:57.683188 17615 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 19:58:18.170243 17615 solver.cpp:404]     Test net output #0: accuracy = 0.7014\nI0817 19:58:18.170514 17615 solver.cpp:404]     Test net output #1: loss = 1.46176 (* 1 = 1.46176 loss)\nI0817 19:58:19.479076 17615 solver.cpp:228] Iteration 6300, loss = 0.112216\nI0817 19:58:19.479125 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:58:19.479148 17615 solver.cpp:244]     Train net output #1: loss = 0.112216 (* 1 = 0.112216 loss)\nI0817 19:58:19.554738 17615 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0817 20:00:35.631395 17615 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:01:56.104632 17615 solver.cpp:404]     Test net output #0: accuracy = 0.56928\nI0817 20:01:56.104915 17615 solver.cpp:404]     Test net output #1: loss = 3.18127 (* 1 = 3.18127 loss)\nI0817 20:01:57.414515 17615 solver.cpp:228] Iteration 6400, loss = 0.137423\nI0817 20:01:57.414564 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:01:57.414588 17615 solver.cpp:244]     Train net output #1: loss = 0.137423 (* 1 = 0.137423 loss)\nI0817 20:01:57.486287 17615 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0817 20:04:13.428766 17615 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:05:33.777442 17615 solver.cpp:404]     Test net output #0: accuracy = 0.67192\nI0817 20:05:33.777710 17615 solver.cpp:404]     Test net output #1: loss = 1.67843 (* 1 = 1.67843 loss)\nI0817 20:05:35.085624 17615 solver.cpp:228] Iteration 6500, loss = 0.0856063\nI0817 20:05:35.085669 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:05:35.085690 17615 solver.cpp:244]     Train net output #1: loss = 0.0856063 (* 1 = 0.0856063 loss)\nI0817 20:05:35.164773 17615 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0817 20:07:51.148720 17615 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:09:11.499300 17615 solver.cpp:404]     Test net output #0: accuracy = 0.72548\nI0817 20:09:11.499542 17615 solver.cpp:404]     Test net output #1: loss = 1.28357 (* 1 = 1.28357 loss)\nI0817 20:09:12.807739 17615 solver.cpp:228] Iteration 6600, loss = 0.0835306\nI0817 20:09:12.807773 17615 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:09:12.807788 17615 solver.cpp:244]     Train net output #1: loss = 0.0835306 (* 1 = 0.0835306 loss)\nI0817 20:09:12.888217 17615 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0817 20:11:28.843111 17615 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:12:49.188594 17615 solver.cpp:404]     Test net output #0: accuracy = 0.692\nI0817 20:12:49.188861 17615 solver.cpp:404]     Test net output #1: loss = 1.49999 (* 1 = 1.49999 loss)\nI0817 20:12:50.496920 17615 solver.cpp:228] Iteration 6700, loss = 0.102427\nI0817 20:12:50.496968 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:12:50.496982 17615 solver.cpp:244]     Train net output #1: loss = 0.102427 (* 1 = 0.102427 loss)\nI0817 20:12:50.578294 17615 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0817 20:15:06.524232 17615 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:16:26.890461 17615 solver.cpp:404]     Test net output #0: accuracy = 0.705\nI0817 20:16:26.890705 17615 solver.cpp:404]     Test net output #1: loss = 1.40997 (* 1 = 1.40997 loss)\nI0817 20:16:28.199481 17615 solver.cpp:228] Iteration 6800, loss = 0.0623898\nI0817 20:16:28.199525 17615 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:16:28.199542 17615 solver.cpp:244]     Train net output #1: loss = 0.0623898 (* 1 = 0.0623898 loss)\nI0817 20:16:28.274919 17615 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0817 20:18:44.277838 17615 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:20:04.603688 17615 solver.cpp:404]     Test net output #0: accuracy = 0.71016\nI0817 20:20:04.603948 17615 solver.cpp:404]     Test net output #1: loss = 1.44054 (* 1 = 1.44054 loss)\nI0817 20:20:05.912689 17615 solver.cpp:228] Iteration 6900, loss = 0.0620003\nI0817 20:20:05.912734 17615 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:20:05.912750 17615 solver.cpp:244]     Train net output #1: loss = 0.0620003 (* 1 = 0.0620003 loss)\nI0817 20:20:05.989729 17615 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0817 20:22:22.059581 17615 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:23:42.382583 17615 solver.cpp:404]     Test net output #0: accuracy = 0.71092\nI0817 20:23:42.382846 17615 solver.cpp:404]     Test net output #1: loss = 1.52855 (* 1 = 1.52855 loss)\nI0817 20:23:43.691936 17615 solver.cpp:228] Iteration 7000, loss = 0.156037\nI0817 20:23:43.691979 17615 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:23:43.691995 17615 solver.cpp:244]     Train net output #1: loss = 0.156037 (* 1 = 0.156037 loss)\nI0817 20:23:43.771615 17615 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0817 20:25:59.617955 17615 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:27:19.935515 17615 solver.cpp:404]     Test net output #0: accuracy = 0.5942\nI0817 20:27:19.935757 17615 solver.cpp:404]     Test net output #1: loss = 2.84392 (* 1 = 2.84392 loss)\nI0817 20:27:21.243816 17615 solver.cpp:228] Iteration 7100, loss = 0.0210383\nI0817 20:27:21.243858 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:27:21.243875 17615 solver.cpp:244]     Train net output #1: loss = 0.0210384 (* 1 = 0.0210384 loss)\nI0817 20:27:21.316901 17615 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0817 20:29:37.285619 17615 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:30:57.613767 17615 solver.cpp:404]     Test net output #0: accuracy = 0.72116\nI0817 20:30:57.614033 17615 solver.cpp:404]     Test net output #1: loss = 1.21323 (* 1 = 1.21323 loss)\nI0817 20:30:58.921895 17615 solver.cpp:228] Iteration 7200, loss = 0.0298883\nI0817 20:30:58.921926 17615 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:30:58.921941 17615 solver.cpp:244]     Train net output #1: loss = 0.0298884 (* 1 = 0.0298884 loss)\nI0817 20:30:58.998829 17615 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0817 20:33:14.905486 17615 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:34:35.239859 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6798\nI0817 20:34:35.240123 17615 solver.cpp:404]     Test net output #1: loss = 1.64806 (* 1 = 1.64806 loss)\nI0817 20:34:36.548135 17615 solver.cpp:228] Iteration 7300, loss = 0.00652792\nI0817 20:34:36.548177 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:34:36.548193 17615 solver.cpp:244]     Train net output #1: loss = 0.006528 (* 1 = 0.006528 loss)\nI0817 20:34:36.621052 17615 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0817 20:36:52.565320 17615 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:38:12.917520 17615 solver.cpp:404]     Test net output #0: accuracy = 0.6688\nI0817 20:38:12.917763 17615 solver.cpp:404]     Test net output #1: loss = 1.92345 (* 1 = 1.92345 loss)\nI0817 20:38:14.223702 17615 solver.cpp:228] Iteration 7400, loss = 0.0538204\nI0817 20:38:14.223743 17615 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:38:14.223760 17615 solver.cpp:244]     Train net output #1: loss = 0.0538204 (* 1 = 0.0538204 loss)\nI0817 20:38:14.301043 17615 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0817 20:40:29.917003 17615 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:41:50.284319 17615 solver.cpp:404]     Test net output #0: accuracy = 0.7244\nI0817 20:41:50.284579 17615 solver.cpp:404]     Test net output #1: loss = 1.43484 (* 1 = 1.43484 loss)\nI0817 20:41:51.590482 17615 solver.cpp:228] Iteration 7500, loss = 0.0288347\nI0817 20:41:51.590523 17615 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:41:51.590539 17615 solver.cpp:244]     Train net output #1: loss = 0.0288348 (* 1 = 0.0288348 loss)\nI0817 20:41:51.670272 17615 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0817 20:44:07.175454 17615 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:45:27.535187 17615 solver.cpp:404]     Test net output #0: accuracy = 0.66744\nI0817 20:45:27.535424 17615 solver.cpp:404]     Test net output #1: loss = 1.50526 (* 1 = 1.50526 loss)\nI0817 20:45:28.841351 17615 solver.cpp:228] Iteration 7600, loss = 0.068058\nI0817 20:45:28.841392 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:45:28.841408 17615 solver.cpp:244]     Train net output #1: loss = 0.068058 (* 1 = 0.068058 loss)\nI0817 20:45:28.920192 17615 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0817 20:47:44.423471 17615 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:49:04.802320 17615 solver.cpp:404]     Test net output #0: accuracy = 0.62168\nI0817 20:49:04.802582 17615 solver.cpp:404]     Test net output #1: loss = 2.26438 (* 1 = 2.26438 loss)\nI0817 20:49:06.108891 17615 solver.cpp:228] Iteration 7700, loss = 0.0665288\nI0817 20:49:06.108934 17615 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:49:06.108952 17615 solver.cpp:244]     Train net output #1: loss = 0.0665289 (* 1 = 0.0665289 loss)\nI0817 20:49:06.187912 17615 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0817 20:51:21.608445 17615 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 20:52:41.957648 17615 solver.cpp:404]     Test net output #0: accuracy = 0.7336\nI0817 20:52:41.957937 17615 solver.cpp:404]     Test net output #1: loss = 1.30576 (* 1 = 1.30576 loss)\nI0817 20:52:43.264044 17615 solver.cpp:228] Iteration 7800, loss = 0.00305624\nI0817 20:52:43.264086 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:52:43.264103 17615 solver.cpp:244]     Train net output #1: loss = 0.00305626 (* 1 = 0.00305626 loss)\nI0817 20:52:43.346925 17615 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0817 20:54:58.993227 17615 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 20:56:19.348384 17615 solver.cpp:404]     Test net output #0: accuracy = 0.77544\nI0817 20:56:19.348654 17615 solver.cpp:404]     Test net output #1: loss = 0.9825 (* 1 = 0.9825 loss)\nI0817 20:56:20.654626 17615 solver.cpp:228] Iteration 7900, loss = 0.0017525\nI0817 20:56:20.654667 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:56:20.654690 17615 solver.cpp:244]     Train net output #1: loss = 0.00175252 (* 1 = 0.00175252 loss)\nI0817 20:56:20.727948 17615 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0817 20:58:36.224592 17615 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 20:59:56.588816 17615 solver.cpp:404]     Test net output #0: accuracy = 0.78228\nI0817 20:59:56.589058 17615 solver.cpp:404]     Test net output #1: loss = 0.861207 (* 1 = 0.861207 loss)\nI0817 20:59:57.894837 17615 solver.cpp:228] Iteration 8000, loss = 0.000460804\nI0817 20:59:57.894878 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:59:57.894893 17615 solver.cpp:244]     Train net output #1: loss = 0.000460828 (* 1 = 0.000460828 loss)\nI0817 20:59:57.970016 17615 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0817 21:02:13.519630 17615 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:03:33.881876 17615 solver.cpp:404]     Test net output #0: accuracy = 0.78332\nI0817 21:03:33.882122 17615 solver.cpp:404]     Test net output #1: loss = 0.789921 (* 1 = 0.789921 loss)\nI0817 21:03:35.188045 17615 solver.cpp:228] Iteration 8100, loss = 0.000461896\nI0817 21:03:35.188087 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:03:35.188103 17615 solver.cpp:244]     Train net output #1: loss = 0.00046192 (* 1 = 0.00046192 loss)\nI0817 21:03:35.269371 17615 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0817 21:05:50.955364 17615 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:07:11.345252 17615 solver.cpp:404]     Test net output #0: accuracy = 0.79032\nI0817 21:07:11.345522 17615 solver.cpp:404]     Test net output #1: loss = 0.748521 (* 1 = 0.748521 loss)\nI0817 21:07:12.651679 17615 solver.cpp:228] Iteration 8200, loss = 0.00049698\nI0817 21:07:12.651721 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:07:12.651736 17615 solver.cpp:244]     Train net output #1: loss = 0.000497004 (* 1 = 0.000497004 loss)\nI0817 21:07:12.732718 17615 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0817 21:09:28.205417 17615 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:10:48.562577 17615 solver.cpp:404]     Test net output #0: accuracy = 0.7934\nI0817 21:10:48.562860 17615 solver.cpp:404]     Test net output #1: loss = 0.734797 (* 1 = 0.734797 loss)\nI0817 21:10:49.868965 17615 solver.cpp:228] Iteration 8300, loss = 0.000636655\nI0817 21:10:49.869006 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:10:49.869024 17615 solver.cpp:244]     Train net output #1: loss = 0.00063668 (* 1 = 0.00063668 loss)\nI0817 21:10:49.946967 17615 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0817 21:13:05.468938 17615 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:14:25.839380 17615 solver.cpp:404]     Test net output #0: accuracy = 0.7988\nI0817 21:14:25.839638 17615 solver.cpp:404]     Test net output #1: loss = 0.724745 (* 1 = 0.724745 loss)\nI0817 21:14:27.145807 17615 solver.cpp:228] Iteration 8400, loss = 0.000611126\nI0817 21:14:27.145848 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:14:27.145864 17615 solver.cpp:244]     Train net output #1: loss = 0.00061115 (* 1 = 0.00061115 loss)\nI0817 21:14:27.223543 17615 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0817 21:16:42.769539 17615 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:18:03.140684 17615 solver.cpp:404]     Test net output #0: accuracy = 0.79948\nI0817 21:18:03.140933 17615 solver.cpp:404]     Test net output #1: loss = 0.71079 (* 1 = 0.71079 loss)\nI0817 21:18:04.447134 17615 solver.cpp:228] Iteration 8500, loss = 0.000479099\nI0817 21:18:04.447175 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:18:04.447192 17615 solver.cpp:244]     Train net output #1: loss = 0.000479124 (* 1 = 0.000479124 loss)\nI0817 21:18:04.522234 17615 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0817 21:20:20.117779 17615 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:21:40.483927 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0817 21:21:40.484192 17615 solver.cpp:404]     Test net output #1: loss = 0.713053 (* 1 = 0.713053 loss)\nI0817 21:21:41.790351 17615 solver.cpp:228] Iteration 8600, loss = 0.000478333\nI0817 21:21:41.790395 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:21:41.790411 17615 solver.cpp:244]     Train net output #1: loss = 0.000478358 (* 1 = 0.000478358 loss)\nI0817 21:21:41.868618 17615 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0817 21:23:57.414577 17615 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:25:17.801580 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0817 21:25:17.801853 17615 solver.cpp:404]     Test net output #1: loss = 0.705532 (* 1 = 0.705532 loss)\nI0817 21:25:19.108029 17615 solver.cpp:228] Iteration 8700, loss = 0.000449438\nI0817 21:25:19.108063 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:25:19.108078 17615 solver.cpp:244]     Train net output #1: loss = 0.000449462 (* 1 = 0.000449462 loss)\nI0817 21:25:19.190201 17615 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0817 21:27:34.674427 17615 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:28:55.067157 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80484\nI0817 21:28:55.067400 17615 solver.cpp:404]     Test net output #1: loss = 0.711142 (* 1 = 0.711142 loss)\nI0817 21:28:56.373417 17615 solver.cpp:228] Iteration 8800, loss = 0.000435296\nI0817 21:28:56.373461 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:28:56.373478 17615 solver.cpp:244]     Train net output #1: loss = 0.00043532 (* 1 = 0.00043532 loss)\nI0817 21:28:56.453464 17615 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0817 21:31:11.926245 17615 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:32:32.307971 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80452\nI0817 21:32:32.308241 17615 solver.cpp:404]     Test net output #1: loss = 0.722316 (* 1 = 0.722316 loss)\nI0817 21:32:33.614270 17615 solver.cpp:228] Iteration 8900, loss = 0.000333693\nI0817 21:32:33.614313 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:32:33.614329 17615 solver.cpp:244]     Train net output #1: loss = 0.000333717 (* 1 = 0.000333717 loss)\nI0817 21:32:33.693272 17615 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0817 21:34:49.160903 17615 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:36:09.556545 17615 solver.cpp:404]     Test net output #0: accuracy = 0.8062\nI0817 21:36:09.556813 17615 solver.cpp:404]     Test net output #1: loss = 0.730183 (* 1 = 0.730183 loss)\nI0817 21:36:10.862890 17615 solver.cpp:228] Iteration 9000, loss = 0.00032467\nI0817 21:36:10.862932 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:36:10.862948 17615 solver.cpp:244]     Train net output #1: loss = 0.000324694 (* 1 = 0.000324694 loss)\nI0817 21:36:10.940136 17615 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0817 21:38:26.494556 17615 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:39:46.922209 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80744\nI0817 21:39:46.922467 17615 solver.cpp:404]     Test net output #1: loss = 0.738831 (* 1 = 0.738831 loss)\nI0817 21:39:48.229375 17615 solver.cpp:228] Iteration 9100, loss = 0.000277574\nI0817 21:39:48.229420 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:39:48.229445 17615 solver.cpp:244]     Train net output #1: loss = 0.000277598 (* 1 = 0.000277598 loss)\nI0817 21:39:48.309118 17615 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0817 21:42:03.834002 17615 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:43:24.215960 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80556\nI0817 21:43:24.216230 17615 solver.cpp:404]     Test net output #1: loss = 0.752835 (* 1 = 0.752835 loss)\nI0817 21:43:25.523191 17615 solver.cpp:228] Iteration 9200, loss = 0.000254614\nI0817 21:43:25.523224 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:43:25.523239 17615 solver.cpp:244]     Train net output #1: loss = 0.000254638 (* 1 = 0.000254638 loss)\nI0817 21:43:25.599807 17615 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0817 21:45:41.106050 17615 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:47:01.489883 17615 solver.cpp:404]     Test net output #0: accuracy = 0.8058\nI0817 21:47:01.490146 17615 solver.cpp:404]     Test net output #1: loss = 0.760187 (* 1 = 0.760187 loss)\nI0817 21:47:02.795805 17615 solver.cpp:228] Iteration 9300, loss = 0.000258314\nI0817 21:47:02.795847 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:47:02.795863 17615 solver.cpp:244]     Train net output #1: loss = 0.000258338 (* 1 = 0.000258338 loss)\nI0817 21:47:02.873117 17615 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0817 21:49:18.397543 17615 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 21:50:38.781492 17615 solver.cpp:404]     Test net output #0: accuracy = 0.8056\nI0817 21:50:38.781765 17615 solver.cpp:404]     Test net output #1: loss = 0.778294 (* 1 = 0.778294 loss)\nI0817 21:50:40.087589 17615 solver.cpp:228] Iteration 9400, loss = 0.000269384\nI0817 21:50:40.087631 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:50:40.087647 17615 solver.cpp:244]     Train net output #1: loss = 0.000269409 (* 1 = 0.000269409 loss)\nI0817 21:50:40.169994 17615 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0817 21:52:55.652209 17615 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 21:54:16.031658 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80788\nI0817 21:54:16.031945 17615 solver.cpp:404]     Test net output #1: loss = 0.782414 (* 1 = 0.782414 loss)\nI0817 21:54:17.338374 17615 solver.cpp:228] Iteration 9500, loss = 0.000247275\nI0817 21:54:17.338416 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:54:17.338431 17615 solver.cpp:244]     Train net output #1: loss = 0.000247299 (* 1 = 0.000247299 loss)\nI0817 21:54:17.411109 17615 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0817 21:56:32.877022 17615 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 21:57:53.250027 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80748\nI0817 21:57:53.250282 17615 solver.cpp:404]     Test net output #1: loss = 0.797546 (* 1 = 0.797546 loss)\nI0817 21:57:54.555860 17615 solver.cpp:228] Iteration 9600, loss = 0.000254421\nI0817 21:57:54.555901 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:57:54.555915 17615 solver.cpp:244]     Train net output #1: loss = 0.000254446 (* 1 = 0.000254446 loss)\nI0817 21:57:54.631953 17615 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0817 22:00:10.188973 17615 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:01:30.543431 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80896\nI0817 22:01:30.543695 17615 solver.cpp:404]     Test net output #1: loss = 0.804328 (* 1 = 0.804328 loss)\nI0817 22:01:31.849874 17615 solver.cpp:228] Iteration 9700, loss = 0.000239619\nI0817 22:01:31.849915 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:01:31.849931 17615 solver.cpp:244]     Train net output #1: loss = 0.000239644 (* 1 = 0.000239644 loss)\nI0817 22:01:31.931984 17615 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0817 22:03:47.401037 17615 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:05:07.755475 17615 solver.cpp:404]     Test net output #0: accuracy = 0.8076\nI0817 22:05:07.755733 17615 solver.cpp:404]     Test net output #1: loss = 0.821723 (* 1 = 0.821723 loss)\nI0817 22:05:09.061509 17615 solver.cpp:228] Iteration 9800, loss = 0.000239691\nI0817 22:05:09.061550 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:05:09.061568 17615 solver.cpp:244]     Train net output #1: loss = 0.000239716 (* 1 = 0.000239716 loss)\nI0817 22:05:09.135053 17615 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0817 22:07:24.783998 17615 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:08:45.153082 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80832\nI0817 22:08:45.153329 17615 solver.cpp:404]     Test net output #1: loss = 0.821378 (* 1 = 0.821378 loss)\nI0817 22:08:46.459678 17615 solver.cpp:228] Iteration 9900, loss = 0.000239789\nI0817 22:08:46.459722 17615 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:08:46.459738 17615 solver.cpp:244]     Train net output #1: loss = 0.000239814 (* 1 = 0.000239814 loss)\nI0817 22:08:46.531864 17615 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0817 22:11:02.001116 17615 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kTr10kTab1_iter_10000.caffemodel\nI0817 22:11:02.217769 17615 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kTr10kTab1_iter_10000.solverstate\nI0817 22:11:02.655589 17615 solver.cpp:317] Iteration 10000, loss = 0.000246585\nI0817 22:11:02.655630 17615 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:12:23.019202 17615 solver.cpp:404]     Test net output #0: accuracy = 0.80592\nI0817 22:12:23.019461 17615 solver.cpp:404]     Test net output #1: loss = 0.83438 (* 1 = 0.83438 loss)\nI0817 22:12:23.019474 17615 solver.cpp:322] Optimization Done.\nI0817 22:12:28.368108 17615 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kTr20kTab1",
    "content": "I0817 16:07:12.060456 17621 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:07:12.062778 17621 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:07:12.063998 17621 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:07:12.065218 17621 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:07:12.066452 17621 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:07:12.067824 17621 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:07:12.069056 17621 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:07:12.070282 17621 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:07:12.071517 17621 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:07:12.487483 17621 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kTr20kTab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0817 16:07:12.490504 17621 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:07:12.512619 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:12.512696 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:12.513763 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:07:12.513823 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:07:12.513846 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:07:12.513866 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:07:12.513886 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:07:12.513905 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:07:12.513921 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:07:12.513941 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:07:12.513962 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:07:12.513979 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:07:12.513999 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:07:12.514014 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:07:12.514034 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:07:12.514053 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:07:12.514072 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:07:12.514091 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:07:12.514108 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:07:12.514127 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:07:12.514147 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:07:12.514165 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:07:12.514199 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:07:12.514219 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:07:12.514245 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:07:12.514264 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:07:12.514283 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:07:12.514298 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:07:12.514318 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:07:12.514334 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:07:12.514353 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:07:12.514371 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:07:12.514392 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:07:12.514410 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:07:12.514428 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:07:12.514444 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:07:12.514463 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:07:12.514482 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:07:12.514502 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:07:12.514519 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:07:12.514538 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:07:12.514555 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:07:12.514580 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:07:12.514598 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:07:12.514616 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:07:12.514634 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:07:12.514654 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:07:12.514672 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:07:12.514691 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:07:12.514706 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:07:12.514726 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:07:12.514744 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:07:12.514770 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:07:12.514798 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:07:12.514819 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:07:12.514838 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:07:12.514858 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:07:12.514873 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:07:12.516609 17621 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train20k_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b\nI0817 16:07:12.518676 17621 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:07:12.520558 17621 net.cpp:100] Creating Layer dataLayer\nI0817 16:07:12.520637 17621 net.cpp:408] dataLayer -> data_top\nI0817 16:07:12.520843 17621 net.cpp:408] dataLayer -> label\nI0817 16:07:12.520959 17621 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:07:12.626410 17626 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train20k_lmdb\nI0817 16:07:12.626857 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:12.634059 17621 net.cpp:150] Setting up dataLayer\nI0817 16:07:12.634122 17621 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:07:12.634137 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:07:12.634143 17621 net.cpp:165] Memory required for data: 1536500\nI0817 16:07:12.634158 17621 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:07:12.634174 17621 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:07:12.634183 17621 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:07:12.634205 17621 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:07:12.634222 17621 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:07:12.634290 17621 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:07:12.634304 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:07:12.634310 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:07:12.634315 17621 net.cpp:165] Memory required for data: 1537500\nI0817 16:07:12.634320 17621 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:07:12.634382 17621 net.cpp:100] Creating Layer pre_conv\nI0817 16:07:12.634394 17621 net.cpp:434] pre_conv <- data_top\nI0817 16:07:12.634407 17621 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:07:12.636137 17621 net.cpp:150] Setting up pre_conv\nI0817 16:07:12.636157 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.636163 17621 net.cpp:165] Memory required for data: 9729500\nI0817 16:07:12.636231 17621 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:07:12.636301 17621 net.cpp:100] Creating Layer pre_bn\nI0817 16:07:12.636312 17621 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:07:12.636325 17621 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:07:12.636765 17627 blocking_queue.cpp:50] Waiting for data\nI0817 16:07:12.636883 17621 net.cpp:150] Setting up pre_bn\nI0817 16:07:12.636901 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.636907 17621 net.cpp:165] Memory required for data: 17921500\nI0817 16:07:12.636925 17621 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:07:12.636973 17621 net.cpp:100] Creating Layer pre_scale\nI0817 16:07:12.636986 17621 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:07:12.636997 17621 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:07:12.637171 17621 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:07:12.637429 17621 net.cpp:150] Setting up pre_scale\nI0817 16:07:12.637444 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.637449 17621 net.cpp:165] Memory required for data: 26113500\nI0817 16:07:12.637460 17621 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:07:12.637506 17621 net.cpp:100] Creating Layer pre_relu\nI0817 16:07:12.637516 17621 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:07:12.637526 17621 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:07:12.637537 17621 net.cpp:150] Setting up pre_relu\nI0817 16:07:12.637543 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.637548 17621 net.cpp:165] Memory required for data: 34305500\nI0817 16:07:12.637553 17621 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:07:12.637563 17621 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:07:12.637569 17621 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:07:12.637576 17621 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:07:12.637586 17621 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:07:12.637635 17621 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:07:12.637650 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.637657 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.637661 17621 net.cpp:165] Memory required for data: 50689500\nI0817 16:07:12.637666 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:07:12.637678 17621 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:07:12.637684 17621 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:07:12.637693 17621 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:07:12.638010 17621 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:07:12.638026 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.638031 17621 net.cpp:165] Memory required for data: 58881500\nI0817 16:07:12.638043 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:07:12.638058 17621 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:07:12.638064 17621 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:07:12.638075 17621 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:07:12.638314 17621 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:07:12.638327 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.638332 17621 net.cpp:165] Memory required for data: 67073500\nI0817 16:07:12.638344 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:07:12.638355 17621 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:07:12.638361 17621 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:07:12.638370 17621 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.638419 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:07:12.638556 17621 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:07:12.638568 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.638573 17621 net.cpp:165] Memory required for data: 75265500\nI0817 16:07:12.638582 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:07:12.638599 17621 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:07:12.638605 17621 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:07:12.638615 17621 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.638625 17621 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:07:12.638633 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.638638 17621 net.cpp:165] Memory required for data: 83457500\nI0817 16:07:12.638643 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:07:12.638659 17621 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:07:12.638665 17621 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:07:12.638676 17621 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:07:12.638985 17621 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:07:12.639000 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.639005 17621 net.cpp:165] Memory required for data: 91649500\nI0817 16:07:12.639014 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:07:12.639024 17621 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:07:12.639029 17621 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:07:12.639037 17621 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:07:12.639267 17621 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:07:12.639281 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.639286 17621 net.cpp:165] Memory required for data: 99841500\nI0817 16:07:12.639303 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:07:12.639313 17621 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:07:12.639318 17621 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:07:12.639328 17621 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:07:12.639382 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:07:12.639520 17621 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:07:12.639533 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.639539 17621 net.cpp:165] Memory required for data: 108033500\nI0817 16:07:12.639549 17621 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:07:12.639600 17621 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:07:12.639612 17621 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:07:12.639619 17621 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:07:12.639627 17621 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:07:12.639699 17621 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:07:12.639714 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.639719 17621 net.cpp:165] Memory required for data: 116225500\nI0817 16:07:12.639725 17621 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:07:12.639734 17621 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:07:12.639739 17621 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:07:12.639750 17621 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:07:12.639777 17621 net.cpp:150] Setting up L1_b1_relu\nI0817 16:07:12.639786 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.639789 17621 net.cpp:165] Memory required for data: 124417500\nI0817 16:07:12.639794 17621 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:07:12.639804 17621 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:07:12.639809 17621 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:07:12.639817 17621 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:07:12.639827 17621 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:07:12.639871 17621 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:07:12.639883 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.639889 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.639901 17621 net.cpp:165] Memory required for data: 140801500\nI0817 16:07:12.639907 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:07:12.639921 17621 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:07:12.639928 17621 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:07:12.639937 17621 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:07:12.640241 17621 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:07:12.640259 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.640264 17621 net.cpp:165] Memory required for data: 148993500\nI0817 16:07:12.640274 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:07:12.640287 17621 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:07:12.640293 17621 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:07:12.640301 17621 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:07:12.640547 17621 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:07:12.640559 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.640565 17621 net.cpp:165] Memory required for data: 157185500\nI0817 16:07:12.640575 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:07:12.640584 17621 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:07:12.640590 17621 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:07:12.640597 17621 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.640651 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:07:12.640799 17621 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:07:12.640812 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.640817 17621 net.cpp:165] Memory required for data: 165377500\nI0817 16:07:12.640826 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:07:12.640836 17621 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:07:12.640841 17621 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:07:12.640851 17621 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.640861 17621 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:07:12.640867 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.640872 17621 net.cpp:165] Memory required for data: 173569500\nI0817 16:07:12.640877 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:07:12.640890 17621 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:07:12.640897 17621 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:07:12.640905 17621 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:07:12.641204 17621 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:07:12.641218 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.641223 17621 net.cpp:165] Memory required for data: 181761500\nI0817 16:07:12.641232 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:07:12.641247 17621 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:07:12.641253 17621 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:07:12.641263 17621 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:07:12.641499 17621 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:07:12.641515 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.641520 17621 net.cpp:165] Memory required for data: 189953500\nI0817 16:07:12.641535 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:07:12.641546 17621 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:07:12.641551 17621 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:07:12.641561 17621 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:07:12.641614 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:07:12.641762 17621 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:07:12.641778 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.641784 17621 net.cpp:165] Memory required for data: 198145500\nI0817 16:07:12.641793 17621 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:07:12.641810 17621 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:07:12.641816 17621 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:07:12.641824 17621 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:07:12.641831 17621 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:07:12.641866 17621 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:07:12.641878 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.641883 17621 net.cpp:165] Memory required for data: 206337500\nI0817 16:07:12.641888 17621 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:07:12.641896 17621 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:07:12.641901 17621 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:07:12.641908 17621 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:07:12.641917 17621 net.cpp:150] Setting up L1_b2_relu\nI0817 16:07:12.641924 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.641928 17621 net.cpp:165] Memory required for data: 214529500\nI0817 16:07:12.641933 17621 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:07:12.641943 17621 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:07:12.641949 17621 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:07:12.641957 17621 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:07:12.641965 17621 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:07:12.642007 17621 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:07:12.642021 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.642029 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.642032 17621 net.cpp:165] Memory required for data: 230913500\nI0817 16:07:12.642037 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:07:12.642050 17621 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:07:12.642055 17621 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:07:12.642063 17621 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:07:12.642370 17621 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:07:12.642385 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.642390 17621 net.cpp:165] Memory required for data: 239105500\nI0817 16:07:12.642398 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:07:12.642410 17621 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:07:12.642416 17621 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:07:12.642424 17621 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:07:12.642658 17621 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:07:12.642671 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.642675 17621 net.cpp:165] Memory required for data: 247297500\nI0817 16:07:12.642686 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:07:12.642695 17621 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:07:12.642701 17621 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:07:12.642711 17621 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.642771 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:07:12.642913 17621 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:07:12.642926 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.642931 17621 net.cpp:165] Memory required for data: 255489500\nI0817 16:07:12.642940 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:07:12.642948 17621 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:07:12.642954 17621 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:07:12.642961 17621 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.642971 17621 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:07:12.642985 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.642990 17621 net.cpp:165] Memory required for data: 263681500\nI0817 16:07:12.642995 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:07:12.643009 17621 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:07:12.643015 17621 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:07:12.643026 17621 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:07:12.643337 17621 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:07:12.643352 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.643357 17621 net.cpp:165] Memory required for data: 271873500\nI0817 16:07:12.643364 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:07:12.643383 17621 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:07:12.643388 17621 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:07:12.643396 17621 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:07:12.643630 17621 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:07:12.643642 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.643648 17621 net.cpp:165] Memory required for data: 280065500\nI0817 16:07:12.643658 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:07:12.643667 17621 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:07:12.643673 17621 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:07:12.643683 17621 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:07:12.643735 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:07:12.643882 17621 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:07:12.643895 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.643900 17621 net.cpp:165] Memory required for data: 288257500\nI0817 16:07:12.643909 17621 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:07:12.643918 17621 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:07:12.643924 17621 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:07:12.643931 17621 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:07:12.643942 17621 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:07:12.643972 17621 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:07:12.643985 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.643990 17621 net.cpp:165] Memory required for data: 296449500\nI0817 16:07:12.643996 17621 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:07:12.644003 17621 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:07:12.644008 17621 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:07:12.644016 17621 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:07:12.644024 17621 net.cpp:150] Setting up L1_b3_relu\nI0817 16:07:12.644032 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.644035 17621 net.cpp:165] Memory required for data: 304641500\nI0817 16:07:12.644040 17621 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:07:12.644050 17621 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:07:12.644055 17621 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:07:12.644063 17621 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:07:12.644073 17621 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:07:12.644116 17621 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:07:12.644127 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.644134 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.644140 17621 net.cpp:165] Memory required for data: 321025500\nI0817 16:07:12.644145 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:07:12.644155 17621 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:07:12.644161 17621 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:07:12.644189 17621 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:07:12.644502 17621 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:07:12.644517 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.644521 17621 net.cpp:165] Memory required for data: 329217500\nI0817 16:07:12.644531 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:07:12.644539 17621 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:07:12.644546 17621 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:07:12.644554 17621 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:07:12.644806 17621 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:07:12.644819 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.644825 17621 net.cpp:165] Memory required for data: 337409500\nI0817 16:07:12.644835 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:07:12.644847 17621 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:07:12.644855 17621 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:07:12.644861 17621 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.644917 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:07:12.645058 17621 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:07:12.645071 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.645076 17621 net.cpp:165] Memory required for data: 345601500\nI0817 16:07:12.645086 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:07:12.645093 17621 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:07:12.645098 17621 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:07:12.645109 17621 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.645119 17621 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:07:12.645126 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.645130 17621 net.cpp:165] Memory required for data: 353793500\nI0817 16:07:12.645135 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:07:12.645149 17621 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:07:12.645155 17621 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:07:12.645164 17621 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:07:12.645473 17621 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:07:12.645486 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.645491 17621 net.cpp:165] Memory required for data: 361985500\nI0817 16:07:12.645500 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:07:12.645512 17621 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:07:12.645519 17621 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:07:12.645526 17621 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:07:12.645771 17621 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:07:12.645786 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.645792 17621 net.cpp:165] Memory required for data: 370177500\nI0817 16:07:12.645802 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:07:12.645813 17621 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:07:12.645819 17621 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:07:12.645826 17621 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:07:12.645884 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:07:12.646026 17621 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:07:12.646039 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.646044 17621 net.cpp:165] Memory required for data: 378369500\nI0817 16:07:12.646054 17621 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:07:12.646062 17621 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:07:12.646069 17621 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:07:12.646075 17621 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:07:12.646085 17621 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:07:12.646128 17621 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:07:12.646138 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.646143 17621 net.cpp:165] Memory required for data: 386561500\nI0817 16:07:12.646148 17621 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:07:12.646157 17621 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:07:12.646162 17621 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:07:12.646173 17621 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:07:12.646183 17621 net.cpp:150] Setting up L1_b4_relu\nI0817 16:07:12.646189 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.646193 17621 net.cpp:165] Memory required for data: 394753500\nI0817 16:07:12.646198 17621 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:07:12.646205 17621 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:07:12.646210 17621 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:07:12.646217 17621 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:07:12.646226 17621 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:07:12.646270 17621 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:07:12.646282 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.646288 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.646293 17621 net.cpp:165] Memory required for data: 411137500\nI0817 16:07:12.646298 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:07:12.646309 17621 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:07:12.646314 17621 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:07:12.646327 17621 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:07:12.646637 17621 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:07:12.646651 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.646656 17621 net.cpp:165] Memory required for data: 419329500\nI0817 16:07:12.646679 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:07:12.646689 17621 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:07:12.646695 17621 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:07:12.646706 17621 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:07:12.646953 17621 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:07:12.646967 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.646972 17621 net.cpp:165] Memory required for data: 427521500\nI0817 16:07:12.646982 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:07:12.646994 17621 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:07:12.647001 17621 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:07:12.647009 17621 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.647060 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:07:12.647202 17621 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:07:12.647214 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.647219 17621 net.cpp:165] Memory required for data: 435713500\nI0817 16:07:12.647228 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:07:12.647236 17621 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:07:12.647243 17621 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:07:12.647253 17621 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.647263 17621 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:07:12.647269 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.647274 17621 net.cpp:165] Memory required for data: 443905500\nI0817 16:07:12.647279 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:07:12.647292 17621 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:07:12.647298 17621 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:07:12.647313 17621 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:07:12.647627 17621 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:07:12.647642 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.647647 17621 net.cpp:165] Memory required for data: 452097500\nI0817 16:07:12.647655 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:07:12.647670 17621 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:07:12.647676 17621 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:07:12.647685 17621 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:07:12.647931 17621 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:07:12.647945 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.647950 17621 net.cpp:165] Memory required for data: 460289500\nI0817 16:07:12.647961 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:07:12.647969 17621 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:07:12.647975 17621 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:07:12.647986 17621 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:07:12.648038 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:07:12.648180 17621 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:07:12.648193 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.648198 17621 net.cpp:165] Memory required for data: 468481500\nI0817 16:07:12.648207 17621 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:07:12.648216 17621 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:07:12.648222 17621 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:07:12.648228 17621 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:07:12.648239 17621 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:07:12.648270 17621 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:07:12.648283 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.648288 17621 net.cpp:165] Memory required for data: 476673500\nI0817 16:07:12.648293 17621 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:07:12.648300 17621 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:07:12.648305 17621 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:07:12.648313 17621 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:07:12.648321 17621 net.cpp:150] Setting up L1_b5_relu\nI0817 16:07:12.648329 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.648334 17621 net.cpp:165] Memory required for data: 484865500\nI0817 16:07:12.648337 17621 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:07:12.648347 17621 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:07:12.648353 17621 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:07:12.648360 17621 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:07:12.648370 17621 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:07:12.648416 17621 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:07:12.648427 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.648433 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.648438 17621 net.cpp:165] Memory required for data: 501249500\nI0817 16:07:12.648443 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:07:12.648453 17621 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:07:12.648459 17621 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:07:12.648471 17621 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:07:12.648787 17621 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:07:12.648802 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.648808 17621 net.cpp:165] Memory required for data: 509441500\nI0817 16:07:12.648823 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:07:12.648833 17621 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:07:12.648838 17621 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:07:12.648846 17621 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:07:12.649091 17621 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:07:12.649103 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.649108 17621 net.cpp:165] Memory required for data: 517633500\nI0817 16:07:12.649118 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:07:12.649130 17621 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:07:12.649137 17621 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:07:12.649144 17621 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.649199 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:07:12.649339 17621 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:07:12.649353 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.649358 17621 net.cpp:165] Memory required for data: 525825500\nI0817 16:07:12.649366 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:07:12.649374 17621 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:07:12.649380 17621 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:07:12.649390 17621 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.649400 17621 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:07:12.649407 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.649412 17621 net.cpp:165] Memory required for data: 534017500\nI0817 16:07:12.649416 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:07:12.649430 17621 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:07:12.649436 17621 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:07:12.649446 17621 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:07:12.649765 17621 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:07:12.649778 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.649783 17621 net.cpp:165] Memory required for data: 542209500\nI0817 16:07:12.649792 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:07:12.649804 17621 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:07:12.649811 17621 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:07:12.649819 17621 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:07:12.650058 17621 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:07:12.650071 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.650076 17621 net.cpp:165] Memory required for data: 550401500\nI0817 16:07:12.650086 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:07:12.650095 17621 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:07:12.650101 17621 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:07:12.650111 17621 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:07:12.650163 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:07:12.650302 17621 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:07:12.650318 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.650323 17621 net.cpp:165] Memory required for data: 558593500\nI0817 16:07:12.650332 17621 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:07:12.650352 17621 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:07:12.650359 17621 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:07:12.650367 17621 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:07:12.650374 17621 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:07:12.650409 17621 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:07:12.650420 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.650425 17621 net.cpp:165] Memory required for data: 566785500\nI0817 16:07:12.650431 17621 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:07:12.650449 17621 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:07:12.650454 17621 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:07:12.650465 17621 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:07:12.650475 17621 net.cpp:150] Setting up L1_b6_relu\nI0817 16:07:12.650482 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.650486 17621 net.cpp:165] Memory required for data: 574977500\nI0817 16:07:12.650491 17621 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:07:12.650498 17621 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:07:12.650504 17621 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:07:12.650511 17621 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:07:12.650521 17621 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:07:12.650568 17621 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:07:12.650578 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.650585 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.650589 17621 net.cpp:165] Memory required for data: 591361500\nI0817 16:07:12.650595 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:07:12.650606 17621 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:07:12.650612 17621 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:07:12.650624 17621 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:07:12.650948 17621 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:07:12.650962 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.650967 17621 net.cpp:165] Memory required for data: 599553500\nI0817 16:07:12.650976 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:07:12.650985 17621 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:07:12.650991 17621 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:07:12.651002 17621 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:07:12.651242 17621 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:07:12.651258 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.651263 17621 net.cpp:165] Memory required for data: 607745500\nI0817 16:07:12.651274 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:07:12.651283 17621 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:07:12.651289 17621 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:07:12.651296 17621 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.651348 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:07:12.651489 17621 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:07:12.651501 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.651506 17621 net.cpp:165] Memory required for data: 615937500\nI0817 16:07:12.651515 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:07:12.651525 17621 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:07:12.651532 17621 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:07:12.651540 17621 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.651549 17621 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:07:12.651556 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.651561 17621 net.cpp:165] Memory required for data: 624129500\nI0817 16:07:12.651566 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:07:12.651581 17621 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:07:12.651587 17621 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:07:12.651597 17621 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:07:12.651916 17621 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:07:12.651932 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.651937 17621 net.cpp:165] Memory required for data: 632321500\nI0817 16:07:12.651957 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:07:12.651967 17621 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:07:12.651973 17621 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:07:12.651983 17621 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:07:12.652223 17621 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:07:12.652237 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.652242 17621 net.cpp:165] Memory required for data: 640513500\nI0817 16:07:12.652252 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:07:12.652264 17621 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:07:12.652271 17621 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:07:12.652278 17621 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:07:12.652331 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:07:12.652470 17621 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:07:12.652483 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.652488 17621 net.cpp:165] Memory required for data: 648705500\nI0817 16:07:12.652498 17621 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:07:12.652509 17621 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:07:12.652516 17621 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:07:12.652523 17621 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:07:12.652530 17621 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:07:12.652565 17621 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:07:12.652573 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.652578 17621 net.cpp:165] Memory required for data: 656897500\nI0817 16:07:12.652583 17621 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:07:12.652591 17621 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:07:12.652596 17621 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:07:12.652606 17621 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:07:12.652616 17621 net.cpp:150] Setting up L1_b7_relu\nI0817 16:07:12.652623 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.652627 17621 net.cpp:165] Memory required for data: 665089500\nI0817 16:07:12.652632 17621 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:07:12.652639 17621 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:07:12.652644 17621 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:07:12.652652 17621 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:07:12.652662 17621 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:07:12.652705 17621 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:07:12.652717 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.652724 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.652729 17621 net.cpp:165] Memory required for data: 681473500\nI0817 16:07:12.652734 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:07:12.652745 17621 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:07:12.652756 17621 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:07:12.652770 17621 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:07:12.653080 17621 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:07:12.653095 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.653100 17621 net.cpp:165] Memory required for data: 689665500\nI0817 16:07:12.653108 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:07:12.653117 17621 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:07:12.653123 17621 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:07:12.653136 17621 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:07:12.653389 17621 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:07:12.653403 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.653409 17621 net.cpp:165] Memory required for data: 697857500\nI0817 16:07:12.653419 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:07:12.653430 17621 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:07:12.653437 17621 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:07:12.653445 17621 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.653497 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:07:12.653642 17621 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:07:12.653656 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.653661 17621 net.cpp:165] Memory required for data: 706049500\nI0817 16:07:12.653671 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:07:12.653681 17621 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:07:12.653688 17621 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:07:12.653695 17621 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.653705 17621 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:07:12.653712 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.653717 17621 net.cpp:165] Memory required for data: 714241500\nI0817 16:07:12.653722 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:07:12.653735 17621 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:07:12.653741 17621 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:07:12.653758 17621 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:07:12.654073 17621 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:07:12.654088 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.654093 17621 net.cpp:165] Memory required for data: 722433500\nI0817 16:07:12.654101 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:07:12.654111 17621 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:07:12.654117 17621 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:07:12.654125 17621 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:07:12.654369 17621 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:07:12.654382 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.654387 17621 net.cpp:165] Memory required for data: 730625500\nI0817 16:07:12.654398 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:07:12.654412 17621 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:07:12.654417 17621 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:07:12.654425 17621 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:07:12.654480 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:07:12.654620 17621 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:07:12.654633 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.654637 17621 net.cpp:165] Memory required for data: 738817500\nI0817 16:07:12.654646 17621 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:07:12.654655 17621 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:07:12.654661 17621 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:07:12.654669 17621 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:07:12.654680 17621 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:07:12.654716 17621 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:07:12.654727 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.654732 17621 net.cpp:165] Memory required for data: 747009500\nI0817 16:07:12.654738 17621 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:07:12.654745 17621 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:07:12.654757 17621 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:07:12.654767 17621 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:07:12.654778 17621 net.cpp:150] Setting up L1_b8_relu\nI0817 16:07:12.654784 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.654796 17621 net.cpp:165] Memory required for data: 755201500\nI0817 16:07:12.654801 17621 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:07:12.654808 17621 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:07:12.654814 17621 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:07:12.654821 17621 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:07:12.654831 17621 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:07:12.654877 17621 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:07:12.654889 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.654896 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.654901 17621 net.cpp:165] Memory required for data: 771585500\nI0817 16:07:12.654906 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:07:12.654917 17621 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:07:12.654923 17621 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:07:12.654935 17621 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:07:12.655258 17621 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:07:12.655273 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.655278 17621 net.cpp:165] Memory required for data: 779777500\nI0817 16:07:12.655287 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:07:12.655302 17621 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:07:12.655308 17621 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:07:12.655319 17621 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:07:12.655562 17621 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:07:12.655575 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.655580 17621 net.cpp:165] Memory required for data: 787969500\nI0817 16:07:12.655591 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:07:12.655599 17621 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:07:12.655606 17621 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:07:12.655613 17621 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.655669 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:07:12.655820 17621 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:07:12.655834 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.655839 17621 net.cpp:165] Memory required for data: 796161500\nI0817 16:07:12.655848 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:07:12.655856 17621 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:07:12.655866 17621 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:07:12.655874 17621 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.655884 17621 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:07:12.655890 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.655895 17621 net.cpp:165] Memory required for data: 804353500\nI0817 16:07:12.655900 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:07:12.655915 17621 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:07:12.655920 17621 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:07:12.655931 17621 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:07:12.656252 17621 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:07:12.656266 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.656271 17621 net.cpp:165] Memory required for data: 812545500\nI0817 16:07:12.656280 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:07:12.656292 17621 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:07:12.656299 17621 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:07:12.656307 17621 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:07:12.656559 17621 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:07:12.656574 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.656579 17621 net.cpp:165] Memory required for data: 820737500\nI0817 16:07:12.656610 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:07:12.656620 17621 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:07:12.656626 17621 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:07:12.656633 17621 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:07:12.656687 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:07:12.656836 17621 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:07:12.656850 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.656855 17621 net.cpp:165] Memory required for data: 828929500\nI0817 16:07:12.656864 17621 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:07:12.656877 17621 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:07:12.656883 17621 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:07:12.656890 17621 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:07:12.656898 17621 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:07:12.656929 17621 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:07:12.656939 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.656944 17621 net.cpp:165] Memory required for data: 837121500\nI0817 16:07:12.656949 17621 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:07:12.656956 17621 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:07:12.656961 17621 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:07:12.656972 17621 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:07:12.656982 17621 net.cpp:150] Setting up L1_b9_relu\nI0817 16:07:12.656989 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.656994 17621 net.cpp:165] Memory required for data: 845313500\nI0817 16:07:12.656998 17621 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:07:12.657006 17621 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:07:12.657016 17621 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:07:12.657023 17621 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:07:12.657033 17621 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:07:12.657078 17621 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:07:12.657090 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.657097 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.657102 17621 net.cpp:165] Memory required for data: 861697500\nI0817 16:07:12.657107 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:07:12.657117 17621 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:07:12.657124 17621 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:07:12.657135 17621 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:07:12.657455 17621 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:07:12.657469 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.657474 17621 net.cpp:165] Memory required for data: 863745500\nI0817 16:07:12.657483 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:07:12.657495 17621 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:07:12.657501 17621 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:07:12.657510 17621 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:07:12.657757 17621 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:07:12.657774 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.657779 17621 net.cpp:165] Memory required for data: 865793500\nI0817 16:07:12.657791 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:07:12.657799 17621 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:07:12.657812 17621 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:07:12.657820 17621 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.657874 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:07:12.658020 17621 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:07:12.658032 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.658037 17621 net.cpp:165] Memory required for data: 867841500\nI0817 16:07:12.658046 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:07:12.658054 17621 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:07:12.658061 17621 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:07:12.658071 17621 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.658080 17621 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:07:12.658087 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.658092 17621 net.cpp:165] Memory required for data: 869889500\nI0817 16:07:12.658097 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:07:12.658108 17621 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:07:12.658114 17621 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:07:12.658125 17621 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:07:12.658440 17621 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:07:12.658454 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.658460 17621 net.cpp:165] Memory required for data: 871937500\nI0817 16:07:12.658469 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:07:12.658478 17621 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:07:12.658484 17621 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:07:12.658498 17621 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:07:12.658742 17621 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:07:12.658762 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.658767 17621 net.cpp:165] Memory required for data: 873985500\nI0817 16:07:12.658778 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:07:12.658790 17621 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:07:12.658797 17621 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:07:12.658805 17621 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:07:12.658861 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:07:12.659008 17621 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:07:12.659020 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.659025 17621 net.cpp:165] Memory required for data: 876033500\nI0817 16:07:12.659035 17621 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:07:12.659050 17621 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:07:12.659057 17621 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:07:12.659070 17621 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:07:12.659154 17621 net.cpp:150] Setting up L2_b1_pool\nI0817 16:07:12.659169 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.659175 17621 net.cpp:165] Memory required for data: 878081500\nI0817 16:07:12.659180 17621 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:07:12.659190 17621 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:07:12.659196 17621 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:07:12.659204 17621 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:07:12.659214 17621 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:07:12.659248 17621 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:07:12.659257 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.659261 17621 net.cpp:165] Memory required for data: 880129500\nI0817 16:07:12.659267 17621 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:07:12.659274 17621 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:07:12.659281 17621 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:07:12.659291 17621 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:07:12.659307 17621 net.cpp:150] Setting up L2_b1_relu\nI0817 16:07:12.659315 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.659319 17621 net.cpp:165] Memory required for data: 882177500\nI0817 16:07:12.659324 17621 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:07:12.659371 17621 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:07:12.659385 17621 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:07:12.661715 17621 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:07:12.661734 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.661739 17621 net.cpp:165] Memory required for data: 884225500\nI0817 16:07:12.661746 17621 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:07:12.661761 17621 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:07:12.661769 17621 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:07:12.661777 17621 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:07:12.661787 17621 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:07:12.661865 17621 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:07:12.661885 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.661890 17621 net.cpp:165] Memory required for data: 888321500\nI0817 16:07:12.661895 17621 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:07:12.661903 17621 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:07:12.661909 17621 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:07:12.661917 17621 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:07:12.661931 17621 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:07:12.662005 17621 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:07:12.662019 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.662026 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.662031 17621 net.cpp:165] Memory required for data: 896513500\nI0817 16:07:12.662036 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:07:12.662052 17621 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:07:12.662060 17621 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:07:12.662070 17621 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:07:12.663527 17621 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:07:12.663544 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.663549 17621 net.cpp:165] Memory required for data: 900609500\nI0817 16:07:12.663559 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:07:12.663573 17621 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:07:12.663580 17621 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:07:12.663589 17621 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:07:12.663849 17621 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:07:12.663866 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.663872 17621 net.cpp:165] Memory required for data: 904705500\nI0817 16:07:12.663883 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:07:12.663893 17621 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:07:12.663899 17621 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:07:12.663908 17621 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.663964 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:07:12.664114 17621 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:07:12.664127 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.664132 17621 net.cpp:165] Memory required for data: 908801500\nI0817 16:07:12.664141 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:07:12.664149 17621 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:07:12.664156 17621 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:07:12.664166 17621 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.664186 17621 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:07:12.664192 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.664197 17621 net.cpp:165] Memory required for data: 912897500\nI0817 16:07:12.664202 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:07:12.664216 17621 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:07:12.664222 17621 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:07:12.664232 17621 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:07:12.664687 17621 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:07:12.664702 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.664707 17621 net.cpp:165] Memory required for data: 916993500\nI0817 16:07:12.664716 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:07:12.664726 17621 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:07:12.664731 17621 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:07:12.664742 17621 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:07:12.664995 17621 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:07:12.665009 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.665014 17621 net.cpp:165] Memory required for data: 921089500\nI0817 16:07:12.665025 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:07:12.665037 17621 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:07:12.665045 17621 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:07:12.665052 17621 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:07:12.665107 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:07:12.665251 17621 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:07:12.665264 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.665269 17621 net.cpp:165] Memory required for data: 925185500\nI0817 16:07:12.665279 17621 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:07:12.665293 17621 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:07:12.665300 17621 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:07:12.665307 17621 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:07:12.665320 17621 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:07:12.665347 17621 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:07:12.665359 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.665364 17621 net.cpp:165] Memory required for data: 929281500\nI0817 16:07:12.665369 17621 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:07:12.665376 17621 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:07:12.665382 17621 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:07:12.665392 17621 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:07:12.665402 17621 net.cpp:150] Setting up L2_b2_relu\nI0817 16:07:12.665410 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.665415 17621 net.cpp:165] Memory required for data: 933377500\nI0817 16:07:12.665419 17621 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:07:12.665426 17621 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:07:12.665432 17621 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:07:12.665439 17621 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:07:12.665448 17621 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:07:12.665498 17621 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:07:12.665508 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.665515 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.665520 17621 net.cpp:165] Memory required for data: 941569500\nI0817 16:07:12.665525 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:07:12.665544 17621 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:07:12.665550 17621 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:07:12.665563 17621 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:07:12.666039 17621 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:07:12.666054 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.666059 17621 net.cpp:165] Memory required for data: 945665500\nI0817 16:07:12.666069 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:07:12.666080 17621 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:07:12.666087 17621 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:07:12.666095 17621 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:07:12.666342 17621 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:07:12.666354 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.666379 17621 net.cpp:165] Memory required for data: 949761500\nI0817 16:07:12.666391 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:07:12.666404 17621 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:07:12.666411 17621 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:07:12.666419 17621 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.666476 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:07:12.666623 17621 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:07:12.666635 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.666640 17621 net.cpp:165] Memory required for data: 953857500\nI0817 16:07:12.666649 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:07:12.666661 17621 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:07:12.666667 17621 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:07:12.666677 17621 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.666687 17621 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:07:12.666693 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.666698 17621 net.cpp:165] Memory required for data: 957953500\nI0817 16:07:12.666703 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:07:12.666714 17621 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:07:12.666719 17621 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:07:12.666731 17621 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:07:12.667197 17621 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:07:12.667212 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.667217 17621 net.cpp:165] Memory required for data: 962049500\nI0817 16:07:12.667227 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:07:12.667235 17621 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:07:12.667243 17621 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:07:12.667253 17621 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:07:12.667505 17621 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:07:12.667520 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.667524 17621 net.cpp:165] Memory required for data: 966145500\nI0817 16:07:12.667534 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:07:12.667546 17621 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:07:12.667553 17621 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:07:12.667560 17621 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:07:12.667614 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:07:12.667771 17621 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:07:12.667785 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.667790 17621 net.cpp:165] Memory required for data: 970241500\nI0817 16:07:12.667799 17621 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:07:12.667811 17621 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:07:12.667819 17621 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:07:12.667825 17621 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:07:12.667840 17621 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:07:12.667873 17621 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:07:12.667883 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.667888 17621 net.cpp:165] Memory required for data: 974337500\nI0817 16:07:12.667893 17621 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:07:12.667914 17621 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:07:12.667922 17621 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:07:12.667928 17621 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:07:12.667938 17621 net.cpp:150] Setting up L2_b3_relu\nI0817 16:07:12.667945 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.667949 17621 net.cpp:165] Memory required for data: 978433500\nI0817 16:07:12.667955 17621 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:07:12.667965 17621 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:07:12.667970 17621 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:07:12.667979 17621 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:07:12.667987 17621 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:07:12.668032 17621 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:07:12.668047 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.668054 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.668059 17621 net.cpp:165] Memory required for data: 986625500\nI0817 16:07:12.668064 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:07:12.668076 17621 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:07:12.668081 17621 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:07:12.668090 17621 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:07:12.668553 17621 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:07:12.668570 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.668576 17621 net.cpp:165] Memory required for data: 990721500\nI0817 16:07:12.668584 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:07:12.668594 17621 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:07:12.668601 17621 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:07:12.668608 17621 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:07:12.668861 17621 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:07:12.668875 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.668880 17621 net.cpp:165] Memory required for data: 994817500\nI0817 16:07:12.668890 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:07:12.668902 17621 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:07:12.668910 17621 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:07:12.668917 17621 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.668975 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:07:12.669122 17621 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:07:12.669136 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.669140 17621 net.cpp:165] Memory required for data: 998913500\nI0817 16:07:12.669149 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:07:12.669157 17621 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:07:12.669163 17621 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:07:12.669173 17621 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.669183 17621 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:07:12.669190 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.669194 17621 net.cpp:165] Memory required for data: 1003009500\nI0817 16:07:12.669199 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:07:12.669220 17621 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:07:12.669227 17621 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:07:12.669235 17621 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:07:12.669697 17621 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:07:12.669713 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.669718 17621 net.cpp:165] Memory required for data: 1007105500\nI0817 16:07:12.669726 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:07:12.669739 17621 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:07:12.669746 17621 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:07:12.669760 17621 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:07:12.670006 17621 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:07:12.670019 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.670024 17621 net.cpp:165] Memory required for data: 1011201500\nI0817 16:07:12.670035 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:07:12.670044 17621 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:07:12.670050 17621 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:07:12.670061 17621 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:07:12.670116 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:07:12.670264 17621 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:07:12.670277 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.670282 17621 net.cpp:165] Memory required for data: 1015297500\nI0817 16:07:12.670291 17621 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:07:12.670300 17621 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:07:12.670306 17621 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:07:12.670312 17621 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:07:12.670323 17621 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:07:12.670351 17621 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:07:12.670359 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.670364 17621 net.cpp:165] Memory required for data: 1019393500\nI0817 16:07:12.670369 17621 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:07:12.670380 17621 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:07:12.670387 17621 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:07:12.670393 17621 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:07:12.670403 17621 net.cpp:150] Setting up L2_b4_relu\nI0817 16:07:12.670410 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.670414 17621 net.cpp:165] Memory required for data: 1023489500\nI0817 16:07:12.670419 17621 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:07:12.670429 17621 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:07:12.670435 17621 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:07:12.670442 17621 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:07:12.670452 17621 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:07:12.670496 17621 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:07:12.670513 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.670521 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.670526 17621 net.cpp:165] Memory required for data: 1031681500\nI0817 16:07:12.670531 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:07:12.670542 17621 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:07:12.670547 17621 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:07:12.670557 17621 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:07:12.671030 17621 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:07:12.671049 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.671056 17621 net.cpp:165] Memory required for data: 1035777500\nI0817 16:07:12.671064 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:07:12.671077 17621 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:07:12.671083 17621 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:07:12.671092 17621 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:07:12.671339 17621 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:07:12.671352 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.671357 17621 net.cpp:165] Memory required for data: 1039873500\nI0817 16:07:12.671367 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:07:12.671376 17621 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:07:12.671382 17621 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:07:12.671393 17621 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.671448 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:07:12.671600 17621 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:07:12.671613 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.671617 17621 net.cpp:165] Memory required for data: 1043969500\nI0817 16:07:12.671627 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:07:12.671634 17621 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:07:12.671640 17621 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:07:12.671651 17621 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.671660 17621 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:07:12.671667 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.671672 17621 net.cpp:165] Memory required for data: 1048065500\nI0817 16:07:12.671677 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:07:12.671691 17621 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:07:12.671697 17621 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:07:12.671706 17621 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:07:12.672178 17621 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:07:12.672191 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.672196 17621 net.cpp:165] Memory required for data: 1052161500\nI0817 16:07:12.672205 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:07:12.672219 17621 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:07:12.672224 17621 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:07:12.672232 17621 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:07:12.672477 17621 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:07:12.672490 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.672495 17621 net.cpp:165] Memory required for data: 1056257500\nI0817 16:07:12.672507 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:07:12.672514 17621 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:07:12.672520 17621 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:07:12.672531 17621 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:07:12.672586 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:07:12.672734 17621 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:07:12.672747 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.672757 17621 net.cpp:165] Memory required for data: 1060353500\nI0817 16:07:12.672767 17621 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:07:12.672776 17621 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:07:12.672783 17621 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:07:12.672791 17621 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:07:12.672801 17621 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:07:12.672828 17621 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:07:12.672837 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.672849 17621 net.cpp:165] Memory required for data: 1064449500\nI0817 16:07:12.672855 17621 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:07:12.672863 17621 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:07:12.672873 17621 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:07:12.672879 17621 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:07:12.672889 17621 net.cpp:150] Setting up L2_b5_relu\nI0817 16:07:12.672896 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.672900 17621 net.cpp:165] Memory required for data: 1068545500\nI0817 16:07:12.672905 17621 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:07:12.672912 17621 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:07:12.672917 17621 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:07:12.672929 17621 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:07:12.672938 17621 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:07:12.672982 17621 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:07:12.672994 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.673001 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.673005 17621 net.cpp:165] Memory required for data: 1076737500\nI0817 16:07:12.673010 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:07:12.673024 17621 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:07:12.673032 17621 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:07:12.673040 17621 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:07:12.673509 17621 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:07:12.673523 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.673528 17621 net.cpp:165] Memory required for data: 1080833500\nI0817 16:07:12.673537 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:07:12.673549 17621 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:07:12.673555 17621 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:07:12.673564 17621 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:07:12.673825 17621 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:07:12.673840 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.673844 17621 net.cpp:165] Memory required for data: 1084929500\nI0817 16:07:12.673856 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:07:12.673863 17621 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:07:12.673871 17621 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:07:12.673880 17621 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.673938 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:07:12.674088 17621 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:07:12.674101 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.674106 17621 net.cpp:165] Memory required for data: 1089025500\nI0817 16:07:12.674115 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:07:12.674124 17621 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:07:12.674129 17621 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:07:12.674139 17621 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.674149 17621 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:07:12.674156 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.674161 17621 net.cpp:165] Memory required for data: 1093121500\nI0817 16:07:12.674166 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:07:12.674180 17621 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:07:12.674186 17621 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:07:12.674195 17621 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:07:12.674662 17621 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:07:12.674682 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.674687 17621 net.cpp:165] Memory required for data: 1097217500\nI0817 16:07:12.674696 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:07:12.674710 17621 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:07:12.674716 17621 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:07:12.674724 17621 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:07:12.674976 17621 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:07:12.674989 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.674994 17621 net.cpp:165] Memory required for data: 1101313500\nI0817 16:07:12.675005 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:07:12.675014 17621 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:07:12.675020 17621 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:07:12.675029 17621 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:07:12.675086 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:07:12.675232 17621 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:07:12.675248 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.675253 17621 net.cpp:165] Memory required for data: 1105409500\nI0817 16:07:12.675263 17621 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:07:12.675271 17621 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:07:12.675278 17621 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:07:12.675285 17621 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:07:12.675292 17621 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:07:12.675323 17621 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:07:12.675333 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.675338 17621 net.cpp:165] Memory required for data: 1109505500\nI0817 16:07:12.675343 17621 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:07:12.675350 17621 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:07:12.675355 17621 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:07:12.675365 17621 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:07:12.675375 17621 net.cpp:150] Setting up L2_b6_relu\nI0817 16:07:12.675382 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.675386 17621 net.cpp:165] Memory required for data: 1113601500\nI0817 16:07:12.675391 17621 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:07:12.675398 17621 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:07:12.675403 17621 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:07:12.675413 17621 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:07:12.675423 17621 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:07:12.675467 17621 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:07:12.675478 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.675484 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.675489 17621 net.cpp:165] Memory required for data: 1121793500\nI0817 16:07:12.675494 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:07:12.675508 17621 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:07:12.675515 17621 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:07:12.675524 17621 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:07:12.676010 17621 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:07:12.676025 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.676030 17621 net.cpp:165] Memory required for data: 1125889500\nI0817 16:07:12.676039 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:07:12.676054 17621 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:07:12.676066 17621 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:07:12.676074 17621 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:07:12.676327 17621 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:07:12.676340 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.676345 17621 net.cpp:165] Memory required for data: 1129985500\nI0817 16:07:12.676357 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:07:12.676365 17621 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:07:12.676371 17621 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:07:12.676381 17621 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.676439 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:07:12.676587 17621 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:07:12.676600 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.676605 17621 net.cpp:165] Memory required for data: 1134081500\nI0817 16:07:12.676614 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:07:12.676621 17621 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:07:12.676627 17621 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:07:12.676635 17621 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.676645 17621 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:07:12.676651 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.676656 17621 net.cpp:165] Memory required for data: 1138177500\nI0817 16:07:12.676661 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:07:12.676676 17621 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:07:12.676682 17621 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:07:12.676693 17621 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:07:12.677165 17621 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:07:12.677178 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.677183 17621 net.cpp:165] Memory required for data: 1142273500\nI0817 16:07:12.677192 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:07:12.677204 17621 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:07:12.677211 17621 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:07:12.677222 17621 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:07:12.677472 17621 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:07:12.677485 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.677490 17621 net.cpp:165] Memory required for data: 1146369500\nI0817 16:07:12.677500 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:07:12.677510 17621 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:07:12.677516 17621 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:07:12.677523 17621 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:07:12.677583 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:07:12.677733 17621 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:07:12.677749 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.677760 17621 net.cpp:165] Memory required for data: 1150465500\nI0817 16:07:12.677769 17621 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:07:12.677779 17621 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:07:12.677786 17621 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:07:12.677793 17621 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:07:12.677801 17621 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:07:12.677832 17621 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:07:12.677842 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.677846 17621 net.cpp:165] Memory required for data: 1154561500\nI0817 16:07:12.677852 17621 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:07:12.677860 17621 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:07:12.677866 17621 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:07:12.677882 17621 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:07:12.677893 17621 net.cpp:150] Setting up L2_b7_relu\nI0817 16:07:12.677901 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.677906 17621 net.cpp:165] Memory required for data: 1158657500\nI0817 16:07:12.677911 17621 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:07:12.677917 17621 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:07:12.677923 17621 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:07:12.677933 17621 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:07:12.677943 17621 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:07:12.677989 17621 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:07:12.677999 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.678006 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.678010 17621 net.cpp:165] Memory required for data: 1166849500\nI0817 16:07:12.678016 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:07:12.678030 17621 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:07:12.678037 17621 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:07:12.678046 17621 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:07:12.678520 17621 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:07:12.678534 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.678539 17621 net.cpp:165] Memory required for data: 1170945500\nI0817 16:07:12.678548 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:07:12.678560 17621 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:07:12.678567 17621 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:07:12.678575 17621 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:07:12.678833 17621 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:07:12.678845 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.678850 17621 net.cpp:165] Memory required for data: 1175041500\nI0817 16:07:12.678861 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:07:12.678870 17621 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:07:12.678876 17621 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:07:12.678884 17621 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.678942 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:07:12.679096 17621 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:07:12.679112 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.679117 17621 net.cpp:165] Memory required for data: 1179137500\nI0817 16:07:12.679127 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:07:12.679134 17621 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:07:12.679141 17621 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:07:12.679148 17621 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.679158 17621 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:07:12.679164 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.679169 17621 net.cpp:165] Memory required for data: 1183233500\nI0817 16:07:12.679173 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:07:12.679188 17621 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:07:12.679194 17621 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:07:12.679205 17621 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:07:12.679672 17621 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:07:12.679687 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.679692 17621 net.cpp:165] Memory required for data: 1187329500\nI0817 16:07:12.679699 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:07:12.679711 17621 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:07:12.679726 17621 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:07:12.679738 17621 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:07:12.680001 17621 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:07:12.680014 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.680019 17621 net.cpp:165] Memory required for data: 1191425500\nI0817 16:07:12.680029 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:07:12.680038 17621 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:07:12.680045 17621 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:07:12.680052 17621 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:07:12.680111 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:07:12.680258 17621 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:07:12.680271 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.680276 17621 net.cpp:165] Memory required for data: 1195521500\nI0817 16:07:12.680285 17621 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:07:12.680297 17621 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:07:12.680304 17621 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:07:12.680310 17621 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:07:12.680318 17621 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:07:12.680346 17621 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:07:12.680354 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.680358 17621 net.cpp:165] Memory required for data: 1199617500\nI0817 16:07:12.680363 17621 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:07:12.680374 17621 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:07:12.680380 17621 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:07:12.680388 17621 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:07:12.680397 17621 net.cpp:150] Setting up L2_b8_relu\nI0817 16:07:12.680404 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.680409 17621 net.cpp:165] Memory required for data: 1203713500\nI0817 16:07:12.680413 17621 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:07:12.680421 17621 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:07:12.680426 17621 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:07:12.680433 17621 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:07:12.680456 17621 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:07:12.680506 17621 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:07:12.680518 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.680526 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.680531 17621 net.cpp:165] Memory required for data: 1211905500\nI0817 16:07:12.680536 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:07:12.680549 17621 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:07:12.680557 17621 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:07:12.680569 17621 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:07:12.681051 17621 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:07:12.681066 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.681071 17621 net.cpp:165] Memory required for data: 1216001500\nI0817 16:07:12.681080 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:07:12.681092 17621 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:07:12.681099 17621 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:07:12.681110 17621 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:07:12.681357 17621 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:07:12.681370 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.681375 17621 net.cpp:165] Memory required for data: 1220097500\nI0817 16:07:12.681392 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:07:12.681402 17621 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:07:12.681408 17621 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:07:12.681416 17621 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.681476 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:07:12.681624 17621 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:07:12.681638 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.681643 17621 net.cpp:165] Memory required for data: 1224193500\nI0817 16:07:12.681651 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:07:12.681661 17621 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:07:12.681668 17621 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:07:12.681675 17621 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.681685 17621 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:07:12.681692 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.681696 17621 net.cpp:165] Memory required for data: 1228289500\nI0817 16:07:12.681701 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:07:12.681715 17621 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:07:12.681721 17621 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:07:12.681730 17621 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:07:12.682204 17621 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:07:12.682217 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.682222 17621 net.cpp:165] Memory required for data: 1232385500\nI0817 16:07:12.682231 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:07:12.682245 17621 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:07:12.682251 17621 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:07:12.682260 17621 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:07:12.682514 17621 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:07:12.682530 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.682536 17621 net.cpp:165] Memory required for data: 1236481500\nI0817 16:07:12.682577 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:07:12.682592 17621 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:07:12.682600 17621 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:07:12.682607 17621 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:07:12.682663 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:07:12.682817 17621 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:07:12.682832 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.682837 17621 net.cpp:165] Memory required for data: 1240577500\nI0817 16:07:12.682845 17621 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:07:12.682854 17621 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:07:12.682862 17621 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:07:12.682868 17621 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:07:12.682880 17621 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:07:12.682909 17621 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:07:12.682919 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.682924 17621 net.cpp:165] Memory required for data: 1244673500\nI0817 16:07:12.682929 17621 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:07:12.682940 17621 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:07:12.682945 17621 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:07:12.682952 17621 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:07:12.682961 17621 net.cpp:150] Setting up L2_b9_relu\nI0817 16:07:12.682970 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.682973 17621 net.cpp:165] Memory required for data: 1248769500\nI0817 16:07:12.682978 17621 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:07:12.682996 17621 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:07:12.683001 17621 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:07:12.683009 17621 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:07:12.683019 17621 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:07:12.683090 17621 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:07:12.683115 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.683122 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.683126 17621 net.cpp:165] Memory required for data: 1256961500\nI0817 16:07:12.683131 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:07:12.683143 17621 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:07:12.683151 17621 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:07:12.683159 17621 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:07:12.683634 17621 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:07:12.683648 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.683653 17621 net.cpp:165] Memory required for data: 1257985500\nI0817 16:07:12.683661 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:07:12.683675 17621 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:07:12.683681 17621 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:07:12.683689 17621 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:07:12.683961 17621 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:07:12.683975 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.683980 17621 net.cpp:165] Memory required for data: 1259009500\nI0817 16:07:12.683991 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:07:12.684003 17621 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:07:12.684010 17621 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:07:12.684020 17621 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.684075 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:07:12.684236 17621 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:07:12.684248 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.684253 17621 net.cpp:165] Memory required for data: 1260033500\nI0817 16:07:12.684262 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:07:12.684270 17621 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:07:12.684276 17621 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:07:12.684288 17621 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.684296 17621 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:07:12.684304 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.684309 17621 net.cpp:165] Memory required for data: 1261057500\nI0817 16:07:12.684314 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:07:12.684327 17621 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:07:12.684334 17621 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:07:12.684342 17621 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:07:12.684845 17621 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:07:12.684860 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.684865 17621 net.cpp:165] Memory required for data: 1262081500\nI0817 16:07:12.684875 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:07:12.684886 17621 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:07:12.684893 17621 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:07:12.684902 17621 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:07:12.685159 17621 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:07:12.685173 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.685178 17621 net.cpp:165] Memory required for data: 1263105500\nI0817 16:07:12.685195 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:07:12.685205 17621 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:07:12.685211 17621 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:07:12.685219 17621 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:07:12.685277 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:07:12.685433 17621 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:07:12.685446 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.685451 17621 net.cpp:165] Memory required for data: 1264129500\nI0817 16:07:12.685461 17621 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:07:12.685470 17621 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:07:12.685477 17621 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:07:12.685488 17621 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:07:12.685523 17621 net.cpp:150] Setting up L3_b1_pool\nI0817 16:07:12.685535 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.685540 17621 net.cpp:165] Memory required for data: 1265153500\nI0817 16:07:12.685545 17621 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:07:12.685554 17621 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:07:12.685560 17621 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:07:12.685566 17621 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:07:12.685575 17621 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:07:12.685608 17621 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:07:12.685618 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.685623 17621 net.cpp:165] Memory required for data: 1266177500\nI0817 16:07:12.685628 17621 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:07:12.685636 17621 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:07:12.685642 17621 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:07:12.685648 17621 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:07:12.685657 17621 net.cpp:150] Setting up L3_b1_relu\nI0817 16:07:12.685664 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.685669 17621 net.cpp:165] Memory required for data: 1267201500\nI0817 16:07:12.685673 17621 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:07:12.685685 17621 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:07:12.685693 17621 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:07:12.686935 17621 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:07:12.686956 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.686962 17621 net.cpp:165] Memory required for data: 1268225500\nI0817 16:07:12.686969 17621 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:07:12.686977 17621 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:07:12.686985 17621 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:07:12.686991 17621 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:07:12.687000 17621 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:07:12.687041 17621 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:07:12.687054 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.687059 17621 net.cpp:165] Memory required for data: 1270273500\nI0817 16:07:12.687064 17621 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:07:12.687074 17621 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:07:12.687081 17621 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:07:12.687088 17621 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:07:12.687098 17621 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:07:12.687150 17621 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:07:12.687162 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.687170 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.687181 17621 net.cpp:165] Memory required for data: 1274369500\nI0817 16:07:12.687187 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:07:12.687202 17621 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:07:12.687208 17621 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:07:12.687218 17621 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:07:12.689204 17621 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:07:12.689224 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.689230 17621 net.cpp:165] Memory required for data: 1276417500\nI0817 16:07:12.689240 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:07:12.689250 17621 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:07:12.689256 17621 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:07:12.689268 17621 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:07:12.689534 17621 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:07:12.689550 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.689555 17621 net.cpp:165] Memory required for data: 1278465500\nI0817 16:07:12.689566 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:07:12.689576 17621 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:07:12.689582 17621 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:07:12.689590 17621 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.689648 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:07:12.689812 17621 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:07:12.689826 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.689831 17621 net.cpp:165] Memory required for data: 1280513500\nI0817 16:07:12.689841 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:07:12.689852 17621 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:07:12.689859 17621 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:07:12.689867 17621 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.689877 17621 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:07:12.689884 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.689888 17621 net.cpp:165] Memory required for data: 1282561500\nI0817 16:07:12.689893 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:07:12.689908 17621 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:07:12.689914 17621 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:07:12.689924 17621 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:07:12.690945 17621 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:07:12.690960 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.690965 17621 net.cpp:165] Memory required for data: 1284609500\nI0817 16:07:12.690975 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:07:12.690987 17621 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:07:12.690995 17621 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:07:12.691006 17621 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:07:12.691272 17621 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:07:12.691285 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.691290 17621 net.cpp:165] Memory required for data: 1286657500\nI0817 16:07:12.691300 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:07:12.691309 17621 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:07:12.691316 17621 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:07:12.691326 17621 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:07:12.691385 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:07:12.691542 17621 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:07:12.691555 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.691560 17621 net.cpp:165] Memory required for data: 1288705500\nI0817 16:07:12.691570 17621 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:07:12.691578 17621 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:07:12.691596 17621 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:07:12.691604 17621 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:07:12.691615 17621 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:07:12.691654 17621 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:07:12.691668 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.691673 17621 net.cpp:165] Memory required for data: 1290753500\nI0817 16:07:12.691678 17621 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:07:12.691685 17621 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:07:12.691691 17621 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:07:12.691701 17621 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:07:12.691711 17621 net.cpp:150] Setting up L3_b2_relu\nI0817 16:07:12.691720 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.691723 17621 net.cpp:165] Memory required for data: 1292801500\nI0817 16:07:12.691728 17621 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:07:12.691735 17621 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:07:12.691740 17621 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:07:12.691748 17621 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:07:12.691764 17621 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:07:12.691817 17621 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:07:12.691829 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.691836 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.691840 17621 net.cpp:165] Memory required for data: 1296897500\nI0817 16:07:12.691845 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:07:12.691857 17621 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:07:12.691864 17621 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:07:12.691875 17621 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:07:12.692901 17621 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:07:12.692916 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.692921 17621 net.cpp:165] Memory required for data: 1298945500\nI0817 16:07:12.692930 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:07:12.692939 17621 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:07:12.692946 17621 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:07:12.692957 17621 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:07:12.693219 17621 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:07:12.693236 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.693241 17621 net.cpp:165] Memory required for data: 1300993500\nI0817 16:07:12.693253 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:07:12.693261 17621 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:07:12.693267 17621 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:07:12.693274 17621 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.693331 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:07:12.693486 17621 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:07:12.693498 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.693503 17621 net.cpp:165] Memory required for data: 1303041500\nI0817 16:07:12.693512 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:07:12.693523 17621 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:07:12.693531 17621 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:07:12.693537 17621 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.693547 17621 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:07:12.693554 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.693567 17621 net.cpp:165] Memory required for data: 1305089500\nI0817 16:07:12.693572 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:07:12.693585 17621 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:07:12.693591 17621 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:07:12.693600 17621 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:07:12.694625 17621 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:07:12.694641 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.694646 17621 net.cpp:165] Memory required for data: 1307137500\nI0817 16:07:12.694655 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:07:12.694667 17621 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:07:12.694674 17621 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:07:12.694687 17621 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:07:12.694958 17621 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:07:12.694972 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.694977 17621 net.cpp:165] Memory required for data: 1309185500\nI0817 16:07:12.694988 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:07:12.694998 17621 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:07:12.695003 17621 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:07:12.695014 17621 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:07:12.695073 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:07:12.695230 17621 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:07:12.695243 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.695248 17621 net.cpp:165] Memory required for data: 1311233500\nI0817 16:07:12.695258 17621 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:07:12.695267 17621 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:07:12.695273 17621 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:07:12.695284 17621 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:07:12.695293 17621 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:07:12.695329 17621 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:07:12.695341 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.695346 17621 net.cpp:165] Memory required for data: 1313281500\nI0817 16:07:12.695351 17621 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:07:12.695359 17621 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:07:12.695365 17621 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:07:12.695375 17621 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:07:12.695385 17621 net.cpp:150] Setting up L3_b3_relu\nI0817 16:07:12.695392 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.695397 17621 net.cpp:165] Memory required for data: 1315329500\nI0817 16:07:12.695402 17621 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:07:12.695410 17621 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:07:12.695415 17621 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:07:12.695422 17621 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:07:12.695432 17621 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:07:12.695480 17621 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:07:12.695492 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.695498 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.695503 17621 net.cpp:165] Memory required for data: 1319425500\nI0817 16:07:12.695508 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:07:12.695519 17621 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:07:12.695525 17621 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:07:12.695538 17621 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:07:12.696569 17621 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:07:12.696584 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.696589 17621 net.cpp:165] Memory required for data: 1321473500\nI0817 16:07:12.696599 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:07:12.696607 17621 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:07:12.696614 17621 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:07:12.696625 17621 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:07:12.696907 17621 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:07:12.696920 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.696925 17621 net.cpp:165] Memory required for data: 1323521500\nI0817 16:07:12.696936 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:07:12.696945 17621 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:07:12.696951 17621 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:07:12.696959 17621 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.697022 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:07:12.697204 17621 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:07:12.697221 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.697227 17621 net.cpp:165] Memory required for data: 1325569500\nI0817 16:07:12.697237 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:07:12.697244 17621 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:07:12.697250 17621 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:07:12.697258 17621 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.697268 17621 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:07:12.697275 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.697279 17621 net.cpp:165] Memory required for data: 1327617500\nI0817 16:07:12.697284 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:07:12.697299 17621 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:07:12.697305 17621 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:07:12.697314 17621 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:07:12.698355 17621 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:07:12.698370 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.698375 17621 net.cpp:165] Memory required for data: 1329665500\nI0817 16:07:12.698385 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:07:12.698397 17621 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:07:12.698405 17621 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:07:12.698415 17621 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:07:12.698684 17621 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:07:12.698698 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.698703 17621 net.cpp:165] Memory required for data: 1331713500\nI0817 16:07:12.698714 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:07:12.698722 17621 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:07:12.698729 17621 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:07:12.698740 17621 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:07:12.698806 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:07:12.698971 17621 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:07:12.698983 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.698988 17621 net.cpp:165] Memory required for data: 1333761500\nI0817 16:07:12.698997 17621 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:07:12.699012 17621 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:07:12.699019 17621 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:07:12.699026 17621 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:07:12.699034 17621 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:07:12.699071 17621 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:07:12.699089 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.699093 17621 net.cpp:165] Memory required for data: 1335809500\nI0817 16:07:12.699098 17621 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:07:12.699106 17621 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:07:12.699112 17621 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:07:12.699122 17621 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:07:12.699133 17621 net.cpp:150] Setting up L3_b4_relu\nI0817 16:07:12.699141 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.699144 17621 net.cpp:165] Memory required for data: 1337857500\nI0817 16:07:12.699149 17621 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:07:12.699156 17621 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:07:12.699162 17621 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:07:12.699169 17621 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:07:12.699178 17621 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:07:12.699229 17621 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:07:12.699240 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.699246 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.699251 17621 net.cpp:165] Memory required for data: 1341953500\nI0817 16:07:12.699256 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:07:12.699267 17621 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:07:12.699275 17621 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:07:12.699285 17621 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:07:12.700328 17621 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:07:12.700343 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.700350 17621 net.cpp:165] Memory required for data: 1344001500\nI0817 16:07:12.700358 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:07:12.700371 17621 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:07:12.700378 17621 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:07:12.700387 17621 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:07:12.701647 17621 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:07:12.701665 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.701670 17621 net.cpp:165] Memory required for data: 1346049500\nI0817 16:07:12.701683 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:07:12.701692 17621 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:07:12.701699 17621 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:07:12.701710 17621 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.701778 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:07:12.701942 17621 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:07:12.701956 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.701961 17621 net.cpp:165] Memory required for data: 1348097500\nI0817 16:07:12.701970 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:07:12.701982 17621 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:07:12.701988 17621 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:07:12.701997 17621 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.702006 17621 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:07:12.702013 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.702018 17621 net.cpp:165] Memory required for data: 1350145500\nI0817 16:07:12.702023 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:07:12.702038 17621 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:07:12.702044 17621 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:07:12.702055 17621 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:07:12.704069 17621 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:07:12.704087 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.704093 17621 net.cpp:165] Memory required for data: 1352193500\nI0817 16:07:12.704103 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:07:12.704116 17621 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:07:12.704123 17621 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:07:12.704135 17621 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:07:12.704393 17621 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:07:12.704406 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.704411 17621 net.cpp:165] Memory required for data: 1354241500\nI0817 16:07:12.704422 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:07:12.704432 17621 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:07:12.704438 17621 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:07:12.704448 17621 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:07:12.704506 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:07:12.704666 17621 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:07:12.704679 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.704684 17621 net.cpp:165] Memory required for data: 1356289500\nI0817 16:07:12.704694 17621 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:07:12.704706 17621 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:07:12.704713 17621 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:07:12.704720 17621 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:07:12.704728 17621 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:07:12.704771 17621 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:07:12.704784 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.704789 17621 net.cpp:165] Memory required for data: 1358337500\nI0817 16:07:12.704794 17621 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:07:12.704802 17621 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:07:12.704808 17621 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:07:12.704818 17621 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:07:12.704829 17621 net.cpp:150] Setting up L3_b5_relu\nI0817 16:07:12.704836 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.704840 17621 net.cpp:165] Memory required for data: 1360385500\nI0817 16:07:12.704845 17621 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:07:12.704852 17621 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:07:12.704859 17621 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:07:12.704865 17621 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:07:12.704876 17621 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:07:12.704924 17621 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:07:12.704936 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.704942 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.704947 17621 net.cpp:165] Memory required for data: 1364481500\nI0817 16:07:12.704952 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:07:12.704963 17621 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:07:12.704970 17621 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:07:12.704982 17621 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:07:12.705999 17621 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:07:12.706014 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.706019 17621 net.cpp:165] Memory required for data: 1366529500\nI0817 16:07:12.706028 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:07:12.706038 17621 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:07:12.706053 17621 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:07:12.706064 17621 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:07:12.706326 17621 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:07:12.706339 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.706344 17621 net.cpp:165] Memory required for data: 1368577500\nI0817 16:07:12.706354 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:07:12.706364 17621 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:07:12.706370 17621 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:07:12.706378 17621 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.706437 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:07:12.706586 17621 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:07:12.706604 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.706609 17621 net.cpp:165] Memory required for data: 1370625500\nI0817 16:07:12.706617 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:07:12.706626 17621 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:07:12.706632 17621 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:07:12.706640 17621 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.706650 17621 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:07:12.706656 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.706660 17621 net.cpp:165] Memory required for data: 1372673500\nI0817 16:07:12.706665 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:07:12.706679 17621 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:07:12.706686 17621 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:07:12.706694 17621 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:07:12.707708 17621 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:07:12.707723 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.707728 17621 net.cpp:165] Memory required for data: 1374721500\nI0817 16:07:12.707737 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:07:12.707757 17621 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:07:12.707765 17621 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:07:12.707777 17621 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:07:12.708031 17621 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:07:12.708045 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.708050 17621 net.cpp:165] Memory required for data: 1376769500\nI0817 16:07:12.708060 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:07:12.708070 17621 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:07:12.708076 17621 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:07:12.708087 17621 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:07:12.708144 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:07:12.708297 17621 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:07:12.708310 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.708315 17621 net.cpp:165] Memory required for data: 1378817500\nI0817 16:07:12.708324 17621 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:07:12.708336 17621 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:07:12.708343 17621 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:07:12.708350 17621 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:07:12.708359 17621 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:07:12.708395 17621 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:07:12.708406 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.708411 17621 net.cpp:165] Memory required for data: 1380865500\nI0817 16:07:12.708416 17621 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:07:12.708425 17621 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:07:12.708431 17621 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:07:12.708447 17621 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:07:12.708458 17621 net.cpp:150] Setting up L3_b6_relu\nI0817 16:07:12.708465 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.708469 17621 net.cpp:165] Memory required for data: 1382913500\nI0817 16:07:12.708474 17621 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:07:12.708482 17621 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:07:12.708487 17621 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:07:12.708495 17621 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:07:12.708505 17621 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:07:12.708554 17621 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:07:12.708565 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.708572 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.708576 17621 net.cpp:165] Memory required for data: 1387009500\nI0817 16:07:12.708581 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:07:12.708592 17621 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:07:12.708600 17621 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:07:12.708611 17621 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:07:12.709630 17621 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:07:12.709645 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.709650 17621 net.cpp:165] Memory required for data: 1389057500\nI0817 16:07:12.709659 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:07:12.709671 17621 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:07:12.709678 17621 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:07:12.709687 17621 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:07:12.709959 17621 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:07:12.709971 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.709976 17621 net.cpp:165] Memory required for data: 1391105500\nI0817 16:07:12.709987 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:07:12.709996 17621 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:07:12.710002 17621 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:07:12.710011 17621 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.710070 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:07:12.710227 17621 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:07:12.710242 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.710247 17621 net.cpp:165] Memory required for data: 1393153500\nI0817 16:07:12.710258 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:07:12.710291 17621 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:07:12.710300 17621 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:07:12.710309 17621 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.710319 17621 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:07:12.710326 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.710331 17621 net.cpp:165] Memory required for data: 1395201500\nI0817 16:07:12.710337 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:07:12.710351 17621 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:07:12.710357 17621 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:07:12.710366 17621 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:07:12.711388 17621 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:07:12.711403 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.711408 17621 net.cpp:165] Memory required for data: 1397249500\nI0817 16:07:12.711417 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:07:12.711426 17621 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:07:12.711439 17621 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:07:12.711452 17621 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:07:12.711720 17621 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:07:12.711733 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.711738 17621 net.cpp:165] Memory required for data: 1399297500\nI0817 16:07:12.711750 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:07:12.711765 17621 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:07:12.711771 17621 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:07:12.711779 17621 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:07:12.711840 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:07:12.711997 17621 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:07:12.712013 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.712018 17621 net.cpp:165] Memory required for data: 1401345500\nI0817 16:07:12.712028 17621 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:07:12.712036 17621 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:07:12.712043 17621 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:07:12.712049 17621 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:07:12.712057 17621 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:07:12.712093 17621 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:07:12.712105 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.712110 17621 net.cpp:165] Memory required for data: 1403393500\nI0817 16:07:12.712116 17621 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:07:12.712123 17621 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:07:12.712129 17621 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:07:12.712136 17621 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:07:12.712146 17621 net.cpp:150] Setting up L3_b7_relu\nI0817 16:07:12.712152 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.712157 17621 net.cpp:165] Memory required for data: 1405441500\nI0817 16:07:12.712162 17621 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:07:12.712168 17621 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:07:12.712173 17621 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:07:12.712185 17621 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:07:12.712195 17621 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:07:12.712240 17621 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:07:12.712255 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.712262 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.712267 17621 net.cpp:165] Memory required for data: 1409537500\nI0817 16:07:12.712272 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:07:12.712283 17621 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:07:12.712290 17621 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:07:12.712298 17621 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:07:12.713320 17621 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:07:12.713335 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.713340 17621 net.cpp:165] Memory required for data: 1411585500\nI0817 16:07:12.713349 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:07:12.713361 17621 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:07:12.713368 17621 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:07:12.713376 17621 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:07:12.713639 17621 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:07:12.713652 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.713657 17621 net.cpp:165] Memory required for data: 1413633500\nI0817 16:07:12.713675 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:07:12.713688 17621 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:07:12.713696 17621 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:07:12.713703 17621 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.713770 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:07:12.713928 17621 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:07:12.713942 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.713946 17621 net.cpp:165] Memory required for data: 1415681500\nI0817 16:07:12.713956 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:07:12.713966 17621 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:07:12.713973 17621 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:07:12.713984 17621 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.713994 17621 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:07:12.714000 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.714005 17621 net.cpp:165] Memory required for data: 1417729500\nI0817 16:07:12.714010 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:07:12.714021 17621 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:07:12.714027 17621 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:07:12.714041 17621 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:07:12.715054 17621 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:07:12.715068 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.715073 17621 net.cpp:165] Memory required for data: 1419777500\nI0817 16:07:12.715082 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:07:12.715096 17621 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:07:12.715103 17621 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:07:12.715112 17621 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:07:12.715376 17621 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:07:12.715389 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.715394 17621 net.cpp:165] Memory required for data: 1421825500\nI0817 16:07:12.715404 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:07:12.715414 17621 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:07:12.715420 17621 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:07:12.715427 17621 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:07:12.715487 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:07:12.715642 17621 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:07:12.715659 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.715664 17621 net.cpp:165] Memory required for data: 1423873500\nI0817 16:07:12.715673 17621 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:07:12.715682 17621 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:07:12.715689 17621 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:07:12.715697 17621 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:07:12.715704 17621 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:07:12.715739 17621 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:07:12.715757 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.715762 17621 net.cpp:165] Memory required for data: 1425921500\nI0817 16:07:12.715768 17621 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:07:12.715776 17621 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:07:12.715782 17621 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:07:12.715790 17621 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:07:12.715798 17621 net.cpp:150] Setting up L3_b8_relu\nI0817 16:07:12.715806 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.715811 17621 net.cpp:165] Memory required for data: 1427969500\nI0817 16:07:12.715816 17621 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:07:12.715832 17621 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:07:12.715838 17621 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:07:12.715847 17621 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:07:12.715857 17621 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:07:12.715903 17621 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:07:12.715917 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.715925 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.715929 17621 net.cpp:165] Memory required for data: 1432065500\nI0817 16:07:12.715935 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:07:12.715946 17621 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:07:12.715952 17621 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:07:12.715962 17621 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:07:12.717957 17621 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:07:12.717973 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.717979 17621 net.cpp:165] Memory required for data: 1434113500\nI0817 16:07:12.717989 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:07:12.718001 17621 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:07:12.718009 17621 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:07:12.718020 17621 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:07:12.718282 17621 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:07:12.718296 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.718300 17621 net.cpp:165] Memory required for data: 1436161500\nI0817 16:07:12.718312 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:07:12.718320 17621 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:07:12.718327 17621 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:07:12.718338 17621 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.718395 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:07:12.718554 17621 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:07:12.718569 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.718574 17621 net.cpp:165] Memory required for data: 1438209500\nI0817 16:07:12.718582 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:07:12.718590 17621 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:07:12.718596 17621 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:07:12.718607 17621 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.718617 17621 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:07:12.718624 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.718628 17621 net.cpp:165] Memory required for data: 1440257500\nI0817 16:07:12.718633 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:07:12.718648 17621 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:07:12.718654 17621 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:07:12.718663 17621 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:07:12.719684 17621 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:07:12.719699 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.719704 17621 net.cpp:165] Memory required for data: 1442305500\nI0817 16:07:12.719713 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:07:12.719725 17621 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:07:12.719733 17621 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:07:12.719741 17621 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:07:12.720015 17621 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:07:12.720028 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.720033 17621 net.cpp:165] Memory required for data: 1444353500\nI0817 16:07:12.720052 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:07:12.720064 17621 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:07:12.720072 17621 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:07:12.720079 17621 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:07:12.720140 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:07:12.720296 17621 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:07:12.720309 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.720314 17621 net.cpp:165] Memory required for data: 1446401500\nI0817 16:07:12.720324 17621 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:07:12.720333 17621 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:07:12.720340 17621 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:07:12.720346 17621 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:07:12.720357 17621 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:07:12.720391 17621 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:07:12.720402 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.720407 17621 net.cpp:165] Memory required for data: 1448449500\nI0817 16:07:12.720413 17621 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:07:12.720425 17621 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:07:12.720432 17621 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:07:12.720439 17621 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:07:12.720449 17621 net.cpp:150] Setting up L3_b9_relu\nI0817 16:07:12.720456 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.720460 17621 net.cpp:165] Memory required for data: 1450497500\nI0817 16:07:12.720465 17621 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:07:12.720474 17621 net.cpp:100] Creating Layer post_pool\nI0817 16:07:12.720479 17621 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:07:12.720487 17621 net.cpp:408] post_pool -> post_pool\nI0817 16:07:12.720525 17621 net.cpp:150] Setting up post_pool\nI0817 16:07:12.720535 17621 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:07:12.720540 17621 net.cpp:165] Memory required for data: 1450529500\nI0817 16:07:12.720544 17621 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:07:12.720630 17621 net.cpp:100] Creating Layer post_FC\nI0817 16:07:12.720643 17621 net.cpp:434] post_FC <- post_pool\nI0817 16:07:12.720652 17621 net.cpp:408] post_FC -> post_FC_top\nI0817 16:07:12.720904 17621 net.cpp:150] Setting up post_FC\nI0817 16:07:12.720921 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:07:12.720926 17621 net.cpp:165] Memory required for data: 1450534500\nI0817 16:07:12.720935 17621 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:07:12.720943 17621 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:07:12.720949 17621 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:07:12.720960 17621 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:07:12.720971 17621 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:07:12.721019 17621 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:07:12.721030 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:07:12.721037 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:07:12.721041 17621 net.cpp:165] Memory required for data: 1450544500\nI0817 16:07:12.721046 17621 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:07:12.721092 17621 net.cpp:100] Creating Layer accuracy\nI0817 16:07:12.721104 17621 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:07:12.721112 17621 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:07:12.721119 17621 net.cpp:408] accuracy -> accuracy\nI0817 16:07:12.721163 17621 net.cpp:150] Setting up accuracy\nI0817 16:07:12.721174 17621 net.cpp:157] Top shape: (1)\nI0817 16:07:12.721179 17621 net.cpp:165] Memory required for data: 1450544504\nI0817 16:07:12.721185 17621 layer_factory.hpp:77] Creating layer loss\nI0817 16:07:12.721200 17621 net.cpp:100] Creating Layer loss\nI0817 16:07:12.721207 17621 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:07:12.721215 17621 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:07:12.721222 17621 net.cpp:408] loss -> loss\nI0817 16:07:12.721269 17621 layer_factory.hpp:77] Creating layer loss\nI0817 16:07:12.721431 17621 net.cpp:150] Setting up loss\nI0817 16:07:12.721446 17621 net.cpp:157] Top shape: (1)\nI0817 16:07:12.721451 17621 net.cpp:160]     with loss weight 1\nI0817 16:07:12.721527 17621 net.cpp:165] Memory required for data: 1450544508\nI0817 16:07:12.721536 17621 net.cpp:226] loss needs backward computation.\nI0817 16:07:12.721542 17621 net.cpp:228] accuracy does not need backward computation.\nI0817 16:07:12.721549 17621 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:07:12.721554 17621 net.cpp:226] post_FC needs backward computation.\nI0817 16:07:12.721560 17621 net.cpp:226] post_pool needs backward computation.\nI0817 16:07:12.721565 17621 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:07:12.721570 17621 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:07:12.721575 17621 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:07:12.721580 17621 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:07:12.721585 17621 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:07:12.721590 17621 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:07:12.721595 17621 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:07:12.721598 17621 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:07:12.721603 17621 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:07:12.721608 17621 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:07:12.721614 17621 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:07:12.721619 17621 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:07:12.721624 17621 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:07:12.721629 17621 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:07:12.721634 17621 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:07:12.721640 17621 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:07:12.721645 17621 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:07:12.721649 17621 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:07:12.721654 17621 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:07:12.721660 17621 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:07:12.721665 17621 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:07:12.721670 17621 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:07:12.721675 17621 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:07:12.721680 17621 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:07:12.721685 17621 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:07:12.721690 17621 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:07:12.721695 17621 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:07:12.721700 17621 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:07:12.721705 17621 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:07:12.721710 17621 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:07:12.721716 17621 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:07:12.721721 17621 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:07:12.721726 17621 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:07:12.721731 17621 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:07:12.721736 17621 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:07:12.721748 17621 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:07:12.721761 17621 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:07:12.721766 17621 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:07:12.721771 17621 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:07:12.721777 17621 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:07:12.721782 17621 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:07:12.721787 17621 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:07:12.721793 17621 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:07:12.721798 17621 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:07:12.721807 17621 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:07:12.721813 17621 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:07:12.721819 17621 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:07:12.721824 17621 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:07:12.721829 17621 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:07:12.721835 17621 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:07:12.721840 17621 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:07:12.721845 17621 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:07:12.721851 17621 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:07:12.721856 17621 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:07:12.721861 17621 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:07:12.721868 17621 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:07:12.721871 17621 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:07:12.721876 17621 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:07:12.721882 17621 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:07:12.721887 17621 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:07:12.721892 17621 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:07:12.721897 17621 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:07:12.721904 17621 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:07:12.721909 17621 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:07:12.721913 17621 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:07:12.721918 17621 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:07:12.721923 17621 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:07:12.721928 17621 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:07:12.721933 17621 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:07:12.721940 17621 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:07:12.721945 17621 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:07:12.721949 17621 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:07:12.721956 17621 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:07:12.721961 17621 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:07:12.721966 17621 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:07:12.721971 17621 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:07:12.721976 17621 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:07:12.721981 17621 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:07:12.721987 17621 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:07:12.721992 17621 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:07:12.721997 17621 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:07:12.722003 17621 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:07:12.722013 17621 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:07:12.722019 17621 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:07:12.722025 17621 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:07:12.722030 17621 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:07:12.722035 17621 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:07:12.722041 17621 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:07:12.722046 17621 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:07:12.722051 17621 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:07:12.722056 17621 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:07:12.722061 17621 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:07:12.722066 17621 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:07:12.722072 17621 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:07:12.722077 17621 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:07:12.722085 17621 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:07:12.722091 17621 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:07:12.722097 17621 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:07:12.722102 17621 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:07:12.722108 17621 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:07:12.722113 17621 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:07:12.722118 17621 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:07:12.722123 17621 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:07:12.722129 17621 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:07:12.722134 17621 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:07:12.722141 17621 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:07:12.722146 17621 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:07:12.722151 17621 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:07:12.722157 17621 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:07:12.722162 17621 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:07:12.722167 17621 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:07:12.722172 17621 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:07:12.722177 17621 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:07:12.722183 17621 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:07:12.722188 17621 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:07:12.722193 17621 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:07:12.722199 17621 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:07:12.722204 17621 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:07:12.722209 17621 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:07:12.722214 17621 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:07:12.722220 17621 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:07:12.722225 17621 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:07:12.722231 17621 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:07:12.722236 17621 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:07:12.722241 17621 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:07:12.722247 17621 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:07:12.722252 17621 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:07:12.722257 17621 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:07:12.722263 17621 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:07:12.722268 17621 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:07:12.722277 17621 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:07:12.722283 17621 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:07:12.722290 17621 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:07:12.722295 17621 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:07:12.722299 17621 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:07:12.722306 17621 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:07:12.722311 17621 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:07:12.722316 17621 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:07:12.722321 17621 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:07:12.722326 17621 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:07:12.722332 17621 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:07:12.722337 17621 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:07:12.722342 17621 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:07:12.722347 17621 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:07:12.722352 17621 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:07:12.722358 17621 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:07:12.722363 17621 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:07:12.722369 17621 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:07:12.722374 17621 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:07:12.722379 17621 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:07:12.722385 17621 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:07:12.722390 17621 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:07:12.722396 17621 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:07:12.722401 17621 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:07:12.722406 17621 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:07:12.722412 17621 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:07:12.722417 17621 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:07:12.722424 17621 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:07:12.722429 17621 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:07:12.722434 17621 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:07:12.722440 17621 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:07:12.722445 17621 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:07:12.722450 17621 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:07:12.722456 17621 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:07:12.722461 17621 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:07:12.722470 17621 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:07:12.722476 17621 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:07:12.722482 17621 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:07:12.722487 17621 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:07:12.722493 17621 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:07:12.722498 17621 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:07:12.722503 17621 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:07:12.722509 17621 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:07:12.722515 17621 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:07:12.722522 17621 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:07:12.722527 17621 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:07:12.722532 17621 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:07:12.722545 17621 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:07:12.722551 17621 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:07:12.722556 17621 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:07:12.722563 17621 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:07:12.722568 17621 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:07:12.722574 17621 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:07:12.722579 17621 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:07:12.722584 17621 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:07:12.722590 17621 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:07:12.722595 17621 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:07:12.722601 17621 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:07:12.722607 17621 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:07:12.722612 17621 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:07:12.722618 17621 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:07:12.722623 17621 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:07:12.722628 17621 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:07:12.722633 17621 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:07:12.722640 17621 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:07:12.722645 17621 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:07:12.722651 17621 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:07:12.722656 17621 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:07:12.722661 17621 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:07:12.722666 17621 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:07:12.722672 17621 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:07:12.722677 17621 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:07:12.722683 17621 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:07:12.722688 17621 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:07:12.722694 17621 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:07:12.722700 17621 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:07:12.722707 17621 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:07:12.722712 17621 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:07:12.722718 17621 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:07:12.722723 17621 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:07:12.722728 17621 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:07:12.722734 17621 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:07:12.722739 17621 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:07:12.722744 17621 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:07:12.722750 17621 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:07:12.722762 17621 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:07:12.722769 17621 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:07:12.722774 17621 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:07:12.722780 17621 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:07:12.722785 17621 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:07:12.722791 17621 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:07:12.722797 17621 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:07:12.722802 17621 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:07:12.722807 17621 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:07:12.722818 17621 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:07:12.722825 17621 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:07:12.722831 17621 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:07:12.722836 17621 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:07:12.722843 17621 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:07:12.722848 17621 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:07:12.722854 17621 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:07:12.722860 17621 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:07:12.722865 17621 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:07:12.722870 17621 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:07:12.722877 17621 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:07:12.722882 17621 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:07:12.722888 17621 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:07:12.722893 17621 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:07:12.722899 17621 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:07:12.722905 17621 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:07:12.722911 17621 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:07:12.722916 17621 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:07:12.722921 17621 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:07:12.722928 17621 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:07:12.722932 17621 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:07:12.722939 17621 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:07:12.722944 17621 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:07:12.722949 17621 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:07:12.722956 17621 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:07:12.722961 17621 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:07:12.722967 17621 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:07:12.722972 17621 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:07:12.722978 17621 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:07:12.722983 17621 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:07:12.722990 17621 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:07:12.722995 17621 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:07:12.723001 17621 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:07:12.723007 17621 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:07:12.723013 17621 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:07:12.723019 17621 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:07:12.723026 17621 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:07:12.723031 17621 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:07:12.723037 17621 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:07:12.723042 17621 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:07:12.723047 17621 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:07:12.723052 17621 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:07:12.723058 17621 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:07:12.723063 17621 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:07:12.723070 17621 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:07:12.723076 17621 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:07:12.723081 17621 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:07:12.723093 17621 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:07:12.723098 17621 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:07:12.723104 17621 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:07:12.723109 17621 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:07:12.723115 17621 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:07:12.723121 17621 net.cpp:226] pre_relu needs backward computation.\nI0817 16:07:12.723126 17621 net.cpp:226] pre_scale needs backward computation.\nI0817 16:07:12.723131 17621 net.cpp:226] pre_bn needs backward computation.\nI0817 16:07:12.723136 17621 net.cpp:226] pre_conv needs backward computation.\nI0817 16:07:12.723143 17621 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:07:12.723152 17621 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:07:12.723157 17621 net.cpp:270] This network produces output accuracy\nI0817 16:07:12.723165 17621 net.cpp:270] This network produces output loss\nI0817 16:07:12.723531 17621 net.cpp:283] Network initialization done.\nI0817 16:07:12.733229 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:12.733270 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:12.733330 17621 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:07:12.733711 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:07:12.733728 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:07:12.733739 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:07:12.733748 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:07:12.733767 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:07:12.733777 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:07:12.733785 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:07:12.733794 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:07:12.733804 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:07:12.733813 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:07:12.733821 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:07:12.733829 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:07:12.733839 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:07:12.733847 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:07:12.733856 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:07:12.733865 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:07:12.733875 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:07:12.733882 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:07:12.733891 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:07:12.733911 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:07:12.733922 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:07:12.733930 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:07:12.733942 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:07:12.733952 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:07:12.733960 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:07:12.733968 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:07:12.733978 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:07:12.733985 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:07:12.733994 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:07:12.734002 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:07:12.734011 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:07:12.734019 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:07:12.734028 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:07:12.734036 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:07:12.734045 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:07:12.734053 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:07:12.734062 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:07:12.734071 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:07:12.734079 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:07:12.734088 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:07:12.734099 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:07:12.734108 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:07:12.734117 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:07:12.734125 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:07:12.734134 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:07:12.734143 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:07:12.734151 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:07:12.734159 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:07:12.734169 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:07:12.734176 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:07:12.734194 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:07:12.734202 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:07:12.734211 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:07:12.734220 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:07:12.734228 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:07:12.734236 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:07:12.735888 17621 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0817 16:07:12.737476 17621 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:07:12.737720 17621 net.cpp:100] Creating Layer dataLayer\nI0817 16:07:12.737742 17621 net.cpp:408] dataLayer -> data_top\nI0817 16:07:12.737766 17621 net.cpp:408] dataLayer -> label\nI0817 16:07:12.737779 17621 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:07:12.747027 17628 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:07:12.747267 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:12.754622 17621 net.cpp:150] Setting up dataLayer\nI0817 16:07:12.754643 17621 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:07:12.754652 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:07:12.754657 17621 net.cpp:165] Memory required for data: 1536500\nI0817 16:07:12.754663 17621 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:07:12.754673 17621 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:07:12.754679 17621 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:07:12.754707 17621 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:07:12.754724 17621 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:07:12.754854 17621 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:07:12.754884 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:07:12.754892 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:07:12.754897 17621 net.cpp:165] Memory required for data: 1537500\nI0817 16:07:12.754902 17621 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:07:12.754920 17621 net.cpp:100] Creating Layer pre_conv\nI0817 16:07:12.754928 17621 net.cpp:434] pre_conv <- data_top\nI0817 16:07:12.754938 17621 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:07:12.755384 17621 net.cpp:150] Setting up pre_conv\nI0817 16:07:12.755409 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.755414 17621 net.cpp:165] Memory required for data: 9729500\nI0817 16:07:12.755429 17621 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:07:12.755483 17621 net.cpp:100] Creating Layer pre_bn\nI0817 16:07:12.755491 17621 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:07:12.755501 17621 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:07:12.755915 17621 net.cpp:150] Setting up pre_bn\nI0817 16:07:12.755931 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.755939 17621 net.cpp:165] Memory required for data: 17921500\nI0817 16:07:12.755955 17621 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:07:12.755970 17621 net.cpp:100] Creating Layer pre_scale\nI0817 16:07:12.755978 17621 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:07:12.755986 17621 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:07:12.756057 17621 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:07:12.756263 17621 net.cpp:150] Setting up pre_scale\nI0817 16:07:12.756279 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.756284 17621 net.cpp:165] Memory required for data: 26113500\nI0817 16:07:12.756294 17621 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:07:12.756302 17621 net.cpp:100] Creating Layer pre_relu\nI0817 16:07:12.756309 17621 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:07:12.756319 17621 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:07:12.756330 17621 net.cpp:150] Setting up pre_relu\nI0817 16:07:12.756336 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.756341 17621 net.cpp:165] Memory required for data: 34305500\nI0817 16:07:12.756346 17621 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:07:12.756359 17621 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:07:12.756366 17621 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:07:12.756372 17621 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:07:12.756382 17621 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:07:12.756438 17621 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:07:12.756448 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.756458 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.756464 17621 net.cpp:165] Memory required for data: 50689500\nI0817 16:07:12.756469 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:07:12.756479 17621 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:07:12.756485 17621 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:07:12.756497 17621 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:07:12.756904 17621 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:07:12.756922 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.756928 17621 net.cpp:165] Memory required for data: 58881500\nI0817 16:07:12.756940 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:07:12.756958 17621 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:07:12.756968 17621 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:07:12.756980 17621 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:07:12.757544 17621 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:07:12.757560 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.757565 17621 net.cpp:165] Memory required for data: 67073500\nI0817 16:07:12.757576 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:07:12.757586 17621 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:07:12.757594 17621 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:07:12.757603 17621 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.757669 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:07:12.757859 17621 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:07:12.757875 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.757881 17621 net.cpp:165] Memory required for data: 75265500\nI0817 16:07:12.757899 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:07:12.757910 17621 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:07:12.757920 17621 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:07:12.757931 17621 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.757941 17621 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:07:12.757949 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.757954 17621 net.cpp:165] Memory required for data: 83457500\nI0817 16:07:12.757959 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:07:12.757969 17621 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:07:12.757975 17621 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:07:12.757990 17621 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:07:12.758417 17621 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:07:12.758432 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.758437 17621 net.cpp:165] Memory required for data: 91649500\nI0817 16:07:12.758450 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:07:12.758460 17621 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:07:12.758466 17621 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:07:12.758478 17621 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:07:12.758793 17621 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:07:12.758808 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.758816 17621 net.cpp:165] Memory required for data: 99841500\nI0817 16:07:12.758836 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:07:12.758844 17621 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:07:12.758853 17621 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:07:12.758862 17621 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:07:12.758927 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:07:12.759111 17621 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:07:12.759127 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.759132 17621 net.cpp:165] Memory required for data: 108033500\nI0817 16:07:12.759141 17621 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:07:12.759153 17621 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:07:12.759160 17621 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:07:12.759167 17621 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:07:12.759181 17621 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:07:12.759222 17621 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:07:12.759232 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.759237 17621 net.cpp:165] Memory required for data: 116225500\nI0817 16:07:12.759241 17621 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:07:12.759258 17621 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:07:12.759264 17621 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:07:12.759272 17621 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:07:12.759281 17621 net.cpp:150] Setting up L1_b1_relu\nI0817 16:07:12.759289 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.759294 17621 net.cpp:165] Memory required for data: 124417500\nI0817 16:07:12.759299 17621 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:07:12.759308 17621 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:07:12.759316 17621 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:07:12.759325 17621 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:07:12.759333 17621 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:07:12.759392 17621 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:07:12.759413 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.759423 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.759428 17621 net.cpp:165] Memory required for data: 140801500\nI0817 16:07:12.759434 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:07:12.759450 17621 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:07:12.759460 17621 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:07:12.759470 17621 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:07:12.759886 17621 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:07:12.759902 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.759907 17621 net.cpp:165] Memory required for data: 148993500\nI0817 16:07:12.759917 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:07:12.759928 17621 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:07:12.759935 17621 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:07:12.759946 17621 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:07:12.760280 17621 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:07:12.760296 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.760301 17621 net.cpp:165] Memory required for data: 157185500\nI0817 16:07:12.760314 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:07:12.760324 17621 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:07:12.760329 17621 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:07:12.760337 17621 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.760404 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:07:12.760820 17621 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:07:12.760835 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.760840 17621 net.cpp:165] Memory required for data: 165377500\nI0817 16:07:12.760854 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:07:12.760862 17621 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:07:12.760869 17621 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:07:12.760879 17621 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.760890 17621 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:07:12.760896 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.760901 17621 net.cpp:165] Memory required for data: 173569500\nI0817 16:07:12.760906 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:07:12.760924 17621 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:07:12.760931 17621 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:07:12.760941 17621 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:07:12.761339 17621 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:07:12.761355 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.761360 17621 net.cpp:165] Memory required for data: 181761500\nI0817 16:07:12.761369 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:07:12.761381 17621 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:07:12.761389 17621 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:07:12.761400 17621 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:07:12.761732 17621 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:07:12.761749 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.761760 17621 net.cpp:165] Memory required for data: 189953500\nI0817 16:07:12.761780 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:07:12.761790 17621 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:07:12.761795 17621 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:07:12.761809 17621 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:07:12.761883 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:07:12.762063 17621 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:07:12.762078 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.762085 17621 net.cpp:165] Memory required for data: 198145500\nI0817 16:07:12.762104 17621 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:07:12.762118 17621 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:07:12.762125 17621 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:07:12.762132 17621 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:07:12.762140 17621 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:07:12.762184 17621 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:07:12.762197 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.762202 17621 net.cpp:165] Memory required for data: 206337500\nI0817 16:07:12.762207 17621 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:07:12.762213 17621 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:07:12.762219 17621 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:07:12.762226 17621 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:07:12.762235 17621 net.cpp:150] Setting up L1_b2_relu\nI0817 16:07:12.762243 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.762250 17621 net.cpp:165] Memory required for data: 214529500\nI0817 16:07:12.762256 17621 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:07:12.762264 17621 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:07:12.762269 17621 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:07:12.762279 17621 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:07:12.762292 17621 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:07:12.762346 17621 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:07:12.762356 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.762362 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.762367 17621 net.cpp:165] Memory required for data: 230913500\nI0817 16:07:12.762372 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:07:12.762388 17621 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:07:12.762395 17621 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:07:12.762405 17621 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:07:12.762809 17621 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:07:12.762823 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.762830 17621 net.cpp:165] Memory required for data: 239105500\nI0817 16:07:12.762840 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:07:12.762853 17621 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:07:12.762859 17621 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:07:12.762872 17621 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:07:12.763206 17621 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:07:12.763221 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.763226 17621 net.cpp:165] Memory required for data: 247297500\nI0817 16:07:12.763239 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:07:12.763249 17621 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:07:12.763255 17621 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:07:12.763267 17621 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.763334 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:07:12.763531 17621 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:07:12.763545 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.763550 17621 net.cpp:165] Memory required for data: 255489500\nI0817 16:07:12.763561 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:07:12.763573 17621 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:07:12.763579 17621 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:07:12.763589 17621 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.763607 17621 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:07:12.763615 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.763622 17621 net.cpp:165] Memory required for data: 263681500\nI0817 16:07:12.763628 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:07:12.763644 17621 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:07:12.763650 17621 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:07:12.763662 17621 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:07:12.764286 17621 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:07:12.764302 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.764307 17621 net.cpp:165] Memory required for data: 271873500\nI0817 16:07:12.764315 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:07:12.764334 17621 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:07:12.764341 17621 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:07:12.764353 17621 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:07:12.764664 17621 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:07:12.764678 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.764683 17621 net.cpp:165] Memory required for data: 280065500\nI0817 16:07:12.764694 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:07:12.764703 17621 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:07:12.764709 17621 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:07:12.764719 17621 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:07:12.764798 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:07:12.764987 17621 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:07:12.765002 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.765007 17621 net.cpp:165] Memory required for data: 288257500\nI0817 16:07:12.765020 17621 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:07:12.765033 17621 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:07:12.765039 17621 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:07:12.765046 17621 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:07:12.765058 17621 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:07:12.765100 17621 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:07:12.765111 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.765115 17621 net.cpp:165] Memory required for data: 296449500\nI0817 16:07:12.765121 17621 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:07:12.765128 17621 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:07:12.765135 17621 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:07:12.765144 17621 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:07:12.765157 17621 net.cpp:150] Setting up L1_b3_relu\nI0817 16:07:12.765166 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.765171 17621 net.cpp:165] Memory required for data: 304641500\nI0817 16:07:12.765174 17621 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:07:12.765182 17621 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:07:12.765187 17621 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:07:12.765198 17621 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:07:12.765208 17621 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:07:12.765257 17621 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:07:12.765269 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.765276 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.765281 17621 net.cpp:165] Memory required for data: 321025500\nI0817 16:07:12.765286 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:07:12.765300 17621 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:07:12.765317 17621 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:07:12.765326 17621 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:07:12.765704 17621 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:07:12.765719 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.765724 17621 net.cpp:165] Memory required for data: 329217500\nI0817 16:07:12.765733 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:07:12.765746 17621 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:07:12.765758 17621 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:07:12.765769 17621 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:07:12.766038 17621 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:07:12.766052 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.766058 17621 net.cpp:165] Memory required for data: 337409500\nI0817 16:07:12.766068 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:07:12.766077 17621 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:07:12.766083 17621 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:07:12.766090 17621 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.766150 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:07:12.766307 17621 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:07:12.766320 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.766325 17621 net.cpp:165] Memory required for data: 345601500\nI0817 16:07:12.766335 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:07:12.766345 17621 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:07:12.766351 17621 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:07:12.766379 17621 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.766391 17621 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:07:12.766398 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.766403 17621 net.cpp:165] Memory required for data: 353793500\nI0817 16:07:12.766408 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:07:12.766422 17621 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:07:12.766428 17621 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:07:12.766439 17621 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:07:12.766798 17621 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:07:12.766813 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.766818 17621 net.cpp:165] Memory required for data: 361985500\nI0817 16:07:12.766826 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:07:12.766839 17621 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:07:12.766845 17621 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:07:12.766854 17621 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:07:12.767125 17621 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:07:12.767140 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.767145 17621 net.cpp:165] Memory required for data: 370177500\nI0817 16:07:12.767155 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:07:12.767164 17621 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:07:12.767170 17621 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:07:12.767177 17621 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:07:12.767237 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:07:12.767413 17621 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:07:12.767428 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.767433 17621 net.cpp:165] Memory required for data: 378369500\nI0817 16:07:12.767442 17621 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:07:12.767454 17621 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:07:12.767460 17621 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:07:12.767467 17621 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:07:12.767482 17621 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:07:12.767521 17621 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:07:12.767532 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.767536 17621 net.cpp:165] Memory required for data: 386561500\nI0817 16:07:12.767541 17621 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:07:12.767549 17621 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:07:12.767555 17621 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:07:12.767565 17621 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:07:12.767575 17621 net.cpp:150] Setting up L1_b4_relu\nI0817 16:07:12.767582 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.767586 17621 net.cpp:165] Memory required for data: 394753500\nI0817 16:07:12.767591 17621 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:07:12.767598 17621 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:07:12.767603 17621 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:07:12.767613 17621 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:07:12.767623 17621 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:07:12.767669 17621 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:07:12.767681 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.767688 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.767693 17621 net.cpp:165] Memory required for data: 411137500\nI0817 16:07:12.767698 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:07:12.767711 17621 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:07:12.767719 17621 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:07:12.767727 17621 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:07:12.768088 17621 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:07:12.768103 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.768108 17621 net.cpp:165] Memory required for data: 419329500\nI0817 16:07:12.768131 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:07:12.768144 17621 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:07:12.768151 17621 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:07:12.768159 17621 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:07:12.768425 17621 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:07:12.768440 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.768445 17621 net.cpp:165] Memory required for data: 427521500\nI0817 16:07:12.768455 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:07:12.768463 17621 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:07:12.768470 17621 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:07:12.768477 17621 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.768537 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:07:12.768697 17621 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:07:12.768710 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.768715 17621 net.cpp:165] Memory required for data: 435713500\nI0817 16:07:12.768724 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:07:12.768734 17621 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:07:12.768741 17621 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:07:12.768748 17621 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.768764 17621 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:07:12.768771 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.768775 17621 net.cpp:165] Memory required for data: 443905500\nI0817 16:07:12.768780 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:07:12.768801 17621 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:07:12.768807 17621 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:07:12.768820 17621 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:07:12.769172 17621 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:07:12.769187 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.769192 17621 net.cpp:165] Memory required for data: 452097500\nI0817 16:07:12.769201 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:07:12.769219 17621 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:07:12.769227 17621 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:07:12.769235 17621 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:07:12.769512 17621 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:07:12.769526 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.769531 17621 net.cpp:165] Memory required for data: 460289500\nI0817 16:07:12.769541 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:07:12.769549 17621 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:07:12.769556 17621 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:07:12.769563 17621 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:07:12.769624 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:07:12.769791 17621 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:07:12.769805 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.769810 17621 net.cpp:165] Memory required for data: 468481500\nI0817 16:07:12.769819 17621 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:07:12.769829 17621 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:07:12.769834 17621 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:07:12.769841 17621 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:07:12.769852 17621 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:07:12.769886 17621 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:07:12.769896 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.769901 17621 net.cpp:165] Memory required for data: 476673500\nI0817 16:07:12.769906 17621 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:07:12.769917 17621 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:07:12.769924 17621 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:07:12.769932 17621 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:07:12.769940 17621 net.cpp:150] Setting up L1_b5_relu\nI0817 16:07:12.769948 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.769953 17621 net.cpp:165] Memory required for data: 484865500\nI0817 16:07:12.769958 17621 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:07:12.769964 17621 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:07:12.769969 17621 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:07:12.769976 17621 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:07:12.769985 17621 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:07:12.770035 17621 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:07:12.770047 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.770054 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.770059 17621 net.cpp:165] Memory required for data: 501249500\nI0817 16:07:12.770063 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:07:12.770077 17621 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:07:12.770084 17621 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:07:12.770092 17621 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:07:12.770447 17621 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:07:12.770462 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.770474 17621 net.cpp:165] Memory required for data: 509441500\nI0817 16:07:12.770483 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:07:12.770496 17621 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:07:12.770503 17621 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:07:12.770514 17621 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:07:12.770792 17621 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:07:12.770807 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.770812 17621 net.cpp:165] Memory required for data: 517633500\nI0817 16:07:12.770822 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:07:12.770830 17621 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:07:12.770836 17621 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:07:12.770843 17621 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.770905 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:07:12.771090 17621 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:07:12.771105 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.771109 17621 net.cpp:165] Memory required for data: 525825500\nI0817 16:07:12.771119 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:07:12.771127 17621 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:07:12.771136 17621 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:07:12.771144 17621 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.771154 17621 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:07:12.771162 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.771165 17621 net.cpp:165] Memory required for data: 534017500\nI0817 16:07:12.771170 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:07:12.771183 17621 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:07:12.771189 17621 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:07:12.771200 17621 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:07:12.771553 17621 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:07:12.771566 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.771571 17621 net.cpp:165] Memory required for data: 542209500\nI0817 16:07:12.771579 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:07:12.771591 17621 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:07:12.771598 17621 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:07:12.771606 17621 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:07:12.771893 17621 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:07:12.771908 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.771912 17621 net.cpp:165] Memory required for data: 550401500\nI0817 16:07:12.771922 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:07:12.771931 17621 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:07:12.771937 17621 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:07:12.771945 17621 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:07:12.772004 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:07:12.772164 17621 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:07:12.772178 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.772183 17621 net.cpp:165] Memory required for data: 558593500\nI0817 16:07:12.772192 17621 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:07:12.772209 17621 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:07:12.772215 17621 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:07:12.772222 17621 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:07:12.772233 17621 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:07:12.772269 17621 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:07:12.772277 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.772282 17621 net.cpp:165] Memory required for data: 566785500\nI0817 16:07:12.772294 17621 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:07:12.772303 17621 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:07:12.772308 17621 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:07:12.772316 17621 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:07:12.772325 17621 net.cpp:150] Setting up L1_b6_relu\nI0817 16:07:12.772333 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.772337 17621 net.cpp:165] Memory required for data: 574977500\nI0817 16:07:12.772342 17621 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:07:12.772352 17621 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:07:12.772358 17621 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:07:12.772366 17621 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:07:12.772374 17621 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:07:12.772423 17621 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:07:12.772438 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.772444 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.772449 17621 net.cpp:165] Memory required for data: 591361500\nI0817 16:07:12.772454 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:07:12.772464 17621 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:07:12.772470 17621 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:07:12.772480 17621 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:07:12.772842 17621 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:07:12.772856 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.772861 17621 net.cpp:165] Memory required for data: 599553500\nI0817 16:07:12.772871 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:07:12.772882 17621 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:07:12.772888 17621 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:07:12.772897 17621 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:07:12.773195 17621 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:07:12.773211 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.773216 17621 net.cpp:165] Memory required for data: 607745500\nI0817 16:07:12.773226 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:07:12.773236 17621 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:07:12.773241 17621 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:07:12.773252 17621 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.773311 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:07:12.773475 17621 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:07:12.773489 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.773494 17621 net.cpp:165] Memory required for data: 615937500\nI0817 16:07:12.773504 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:07:12.773511 17621 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:07:12.773517 17621 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:07:12.773524 17621 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.773537 17621 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:07:12.773545 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.773550 17621 net.cpp:165] Memory required for data: 624129500\nI0817 16:07:12.773555 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:07:12.773564 17621 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:07:12.773573 17621 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:07:12.773582 17621 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:07:12.773952 17621 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:07:12.773965 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.773977 17621 net.cpp:165] Memory required for data: 632321500\nI0817 16:07:12.773986 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:07:12.773999 17621 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:07:12.774006 17621 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:07:12.774018 17621 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:07:12.774286 17621 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:07:12.774299 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.774304 17621 net.cpp:165] Memory required for data: 640513500\nI0817 16:07:12.774315 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:07:12.774323 17621 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:07:12.774329 17621 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:07:12.774340 17621 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:07:12.774399 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:07:12.774557 17621 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:07:12.774574 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.774579 17621 net.cpp:165] Memory required for data: 648705500\nI0817 16:07:12.774587 17621 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:07:12.774596 17621 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:07:12.774602 17621 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:07:12.774610 17621 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:07:12.774617 17621 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:07:12.774654 17621 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:07:12.774667 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.774670 17621 net.cpp:165] Memory required for data: 656897500\nI0817 16:07:12.774677 17621 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:07:12.774686 17621 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:07:12.774693 17621 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:07:12.774699 17621 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:07:12.774708 17621 net.cpp:150] Setting up L1_b7_relu\nI0817 16:07:12.774715 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.774720 17621 net.cpp:165] Memory required for data: 665089500\nI0817 16:07:12.774724 17621 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:07:12.774731 17621 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:07:12.774737 17621 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:07:12.774747 17621 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:07:12.774765 17621 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:07:12.774813 17621 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:07:12.774827 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.774835 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.774839 17621 net.cpp:165] Memory required for data: 681473500\nI0817 16:07:12.774844 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:07:12.774855 17621 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:07:12.774862 17621 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:07:12.774870 17621 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:07:12.775236 17621 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:07:12.775250 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.775255 17621 net.cpp:165] Memory required for data: 689665500\nI0817 16:07:12.775264 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:07:12.775276 17621 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:07:12.775283 17621 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:07:12.775298 17621 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:07:12.775573 17621 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:07:12.775586 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.775591 17621 net.cpp:165] Memory required for data: 697857500\nI0817 16:07:12.775602 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:07:12.775610 17621 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:07:12.775616 17621 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:07:12.775627 17621 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.775686 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:07:12.775854 17621 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:07:12.775871 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.775876 17621 net.cpp:165] Memory required for data: 706049500\nI0817 16:07:12.775884 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:07:12.775892 17621 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:07:12.775899 17621 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:07:12.775907 17621 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.775915 17621 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:07:12.775923 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.775928 17621 net.cpp:165] Memory required for data: 714241500\nI0817 16:07:12.775931 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:07:12.775945 17621 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:07:12.775951 17621 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:07:12.775962 17621 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:07:12.776321 17621 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:07:12.776336 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.776341 17621 net.cpp:165] Memory required for data: 722433500\nI0817 16:07:12.776350 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:07:12.776361 17621 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:07:12.776368 17621 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:07:12.776378 17621 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:07:12.776650 17621 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:07:12.776664 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.776669 17621 net.cpp:165] Memory required for data: 730625500\nI0817 16:07:12.776679 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:07:12.776687 17621 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:07:12.776693 17621 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:07:12.776700 17621 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:07:12.776772 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:07:12.776934 17621 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:07:12.776947 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.776952 17621 net.cpp:165] Memory required for data: 738817500\nI0817 16:07:12.776962 17621 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:07:12.776973 17621 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:07:12.776979 17621 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:07:12.776986 17621 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:07:12.776994 17621 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:07:12.777031 17621 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:07:12.777043 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.777048 17621 net.cpp:165] Memory required for data: 747009500\nI0817 16:07:12.777053 17621 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:07:12.777060 17621 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:07:12.777065 17621 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:07:12.777076 17621 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:07:12.777092 17621 net.cpp:150] Setting up L1_b8_relu\nI0817 16:07:12.777101 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.777106 17621 net.cpp:165] Memory required for data: 755201500\nI0817 16:07:12.777109 17621 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:07:12.777117 17621 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:07:12.777122 17621 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:07:12.777132 17621 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:07:12.777143 17621 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:07:12.777189 17621 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:07:12.777201 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.777207 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.777212 17621 net.cpp:165] Memory required for data: 771585500\nI0817 16:07:12.777217 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:07:12.777230 17621 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:07:12.777237 17621 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:07:12.777246 17621 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:07:12.777611 17621 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:07:12.777627 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.777632 17621 net.cpp:165] Memory required for data: 779777500\nI0817 16:07:12.777642 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:07:12.777652 17621 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:07:12.777657 17621 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:07:12.777668 17621 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:07:12.777951 17621 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:07:12.777966 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.777971 17621 net.cpp:165] Memory required for data: 787969500\nI0817 16:07:12.777981 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:07:12.777992 17621 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:07:12.777999 17621 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:07:12.778007 17621 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.778064 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:07:12.778228 17621 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:07:12.778242 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.778247 17621 net.cpp:165] Memory required for data: 796161500\nI0817 16:07:12.778256 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:07:12.778268 17621 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:07:12.778275 17621 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:07:12.778282 17621 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.778292 17621 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:07:12.778300 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.778304 17621 net.cpp:165] Memory required for data: 804353500\nI0817 16:07:12.778309 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:07:12.778322 17621 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:07:12.778328 17621 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:07:12.778339 17621 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:07:12.778692 17621 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:07:12.778707 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.778712 17621 net.cpp:165] Memory required for data: 812545500\nI0817 16:07:12.778720 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:07:12.778729 17621 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:07:12.778735 17621 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:07:12.778756 17621 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:07:12.779033 17621 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:07:12.779047 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.779052 17621 net.cpp:165] Memory required for data: 820737500\nI0817 16:07:12.779086 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:07:12.779096 17621 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:07:12.779103 17621 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:07:12.779112 17621 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:07:12.779170 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:07:12.779328 17621 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:07:12.779341 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.779346 17621 net.cpp:165] Memory required for data: 828929500\nI0817 16:07:12.779356 17621 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:07:12.779364 17621 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:07:12.779371 17621 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:07:12.779377 17621 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:07:12.779386 17621 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:07:12.779507 17621 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:07:12.779521 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.779526 17621 net.cpp:165] Memory required for data: 837121500\nI0817 16:07:12.779532 17621 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:07:12.779542 17621 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:07:12.779549 17621 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:07:12.779644 17621 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:07:12.779659 17621 net.cpp:150] Setting up L1_b9_relu\nI0817 16:07:12.779666 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.779671 17621 net.cpp:165] Memory required for data: 845313500\nI0817 16:07:12.779676 17621 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:07:12.779686 17621 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:07:12.779692 17621 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:07:12.779700 17621 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:07:12.779709 17621 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:07:12.779769 17621 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:07:12.779784 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.779793 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:07:12.779796 17621 net.cpp:165] Memory required for data: 861697500\nI0817 16:07:12.779801 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:07:12.779812 17621 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:07:12.779819 17621 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:07:12.779829 17621 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:07:12.780192 17621 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:07:12.780206 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.780211 17621 net.cpp:165] Memory required for data: 863745500\nI0817 16:07:12.780220 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:07:12.780232 17621 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:07:12.780239 17621 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:07:12.780247 17621 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:07:12.780515 17621 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:07:12.780529 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.780534 17621 net.cpp:165] Memory required for data: 865793500\nI0817 16:07:12.780544 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:07:12.780560 17621 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:07:12.780566 17621 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:07:12.780577 17621 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.780637 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:07:12.780804 17621 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:07:12.780818 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.780823 17621 net.cpp:165] Memory required for data: 867841500\nI0817 16:07:12.780833 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:07:12.780840 17621 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:07:12.780846 17621 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:07:12.780856 17621 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.780867 17621 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:07:12.780874 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.780879 17621 net.cpp:165] Memory required for data: 869889500\nI0817 16:07:12.780884 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:07:12.780896 17621 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:07:12.780903 17621 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:07:12.780911 17621 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:07:12.781266 17621 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:07:12.781280 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.781286 17621 net.cpp:165] Memory required for data: 871937500\nI0817 16:07:12.781294 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:07:12.781306 17621 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:07:12.781313 17621 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:07:12.781322 17621 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:07:12.781586 17621 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:07:12.781599 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.781605 17621 net.cpp:165] Memory required for data: 873985500\nI0817 16:07:12.781615 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:07:12.781623 17621 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:07:12.781630 17621 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:07:12.781642 17621 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:07:12.781702 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:07:12.781893 17621 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:07:12.781908 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.781913 17621 net.cpp:165] Memory required for data: 876033500\nI0817 16:07:12.781922 17621 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:07:12.781932 17621 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:07:12.781939 17621 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:07:12.781950 17621 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:07:12.781981 17621 net.cpp:150] Setting up L2_b1_pool\nI0817 16:07:12.781991 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.781996 17621 net.cpp:165] Memory required for data: 878081500\nI0817 16:07:12.782001 17621 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:07:12.782012 17621 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:07:12.782019 17621 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:07:12.782025 17621 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:07:12.782033 17621 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:07:12.782069 17621 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:07:12.782081 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.782086 17621 net.cpp:165] Memory required for data: 880129500\nI0817 16:07:12.782091 17621 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:07:12.782099 17621 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:07:12.782112 17621 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:07:12.782119 17621 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:07:12.782130 17621 net.cpp:150] Setting up L2_b1_relu\nI0817 16:07:12.782136 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.782141 17621 net.cpp:165] Memory required for data: 882177500\nI0817 16:07:12.782146 17621 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:07:12.782155 17621 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:07:12.782166 17621 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:07:12.784431 17621 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:07:12.784449 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:07:12.784456 17621 net.cpp:165] Memory required for data: 884225500\nI0817 16:07:12.784461 17621 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:07:12.784473 17621 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:07:12.784481 17621 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:07:12.784487 17621 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:07:12.784495 17621 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:07:12.784543 17621 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:07:12.784554 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.784559 17621 net.cpp:165] Memory required for data: 888321500\nI0817 16:07:12.784564 17621 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:07:12.784572 17621 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:07:12.784579 17621 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:07:12.784590 17621 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:07:12.784600 17621 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:07:12.784649 17621 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:07:12.784663 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.784670 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.784674 17621 net.cpp:165] Memory required for data: 896513500\nI0817 16:07:12.784679 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:07:12.784693 17621 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:07:12.784700 17621 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:07:12.784709 17621 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:07:12.785214 17621 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:07:12.785228 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.785234 17621 net.cpp:165] Memory required for data: 900609500\nI0817 16:07:12.785243 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:07:12.785255 17621 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:07:12.785262 17621 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:07:12.785274 17621 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:07:12.785545 17621 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:07:12.785562 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.785567 17621 net.cpp:165] Memory required for data: 904705500\nI0817 16:07:12.785578 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:07:12.785588 17621 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:07:12.785594 17621 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:07:12.785603 17621 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.785661 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:07:12.785825 17621 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:07:12.785840 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.785845 17621 net.cpp:165] Memory required for data: 908801500\nI0817 16:07:12.785853 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:07:12.785861 17621 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:07:12.785876 17621 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:07:12.785887 17621 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.785897 17621 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:07:12.785904 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.785908 17621 net.cpp:165] Memory required for data: 912897500\nI0817 16:07:12.785913 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:07:12.785926 17621 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:07:12.785933 17621 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:07:12.785941 17621 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:07:12.786437 17621 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:07:12.786451 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.786456 17621 net.cpp:165] Memory required for data: 916993500\nI0817 16:07:12.786465 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:07:12.786478 17621 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:07:12.786485 17621 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:07:12.786494 17621 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:07:12.786763 17621 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:07:12.786777 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.786782 17621 net.cpp:165] Memory required for data: 921089500\nI0817 16:07:12.786793 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:07:12.786804 17621 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:07:12.786811 17621 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:07:12.786819 17621 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:07:12.786877 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:07:12.787039 17621 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:07:12.787051 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.787056 17621 net.cpp:165] Memory required for data: 925185500\nI0817 16:07:12.787065 17621 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:07:12.787077 17621 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:07:12.787083 17621 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:07:12.787091 17621 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:07:12.787103 17621 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:07:12.787133 17621 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:07:12.787142 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.787147 17621 net.cpp:165] Memory required for data: 929281500\nI0817 16:07:12.787153 17621 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:07:12.787159 17621 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:07:12.787165 17621 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:07:12.787175 17621 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:07:12.787185 17621 net.cpp:150] Setting up L2_b2_relu\nI0817 16:07:12.787192 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.787197 17621 net.cpp:165] Memory required for data: 933377500\nI0817 16:07:12.787202 17621 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:07:12.787209 17621 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:07:12.787214 17621 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:07:12.787221 17621 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:07:12.787231 17621 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:07:12.787282 17621 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:07:12.787294 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.787302 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.787307 17621 net.cpp:165] Memory required for data: 941569500\nI0817 16:07:12.787317 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:07:12.787331 17621 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:07:12.787338 17621 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:07:12.787348 17621 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:07:12.787853 17621 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:07:12.787868 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.787873 17621 net.cpp:165] Memory required for data: 945665500\nI0817 16:07:12.787881 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:07:12.787894 17621 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:07:12.787900 17621 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:07:12.787909 17621 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:07:12.788175 17621 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:07:12.788192 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.788197 17621 net.cpp:165] Memory required for data: 949761500\nI0817 16:07:12.788208 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:07:12.788216 17621 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:07:12.788223 17621 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:07:12.788230 17621 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.788287 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:07:12.788447 17621 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:07:12.788460 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.788465 17621 net.cpp:165] Memory required for data: 953857500\nI0817 16:07:12.788475 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:07:12.788482 17621 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:07:12.788488 17621 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:07:12.788499 17621 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.788509 17621 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:07:12.788516 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.788522 17621 net.cpp:165] Memory required for data: 957953500\nI0817 16:07:12.788527 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:07:12.788540 17621 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:07:12.788547 17621 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:07:12.788556 17621 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:07:12.789055 17621 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:07:12.789069 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.789075 17621 net.cpp:165] Memory required for data: 962049500\nI0817 16:07:12.789083 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:07:12.789093 17621 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:07:12.789099 17621 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:07:12.789110 17621 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:07:12.789382 17621 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:07:12.789397 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.789400 17621 net.cpp:165] Memory required for data: 966145500\nI0817 16:07:12.789412 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:07:12.789422 17621 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:07:12.789429 17621 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:07:12.789436 17621 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:07:12.789494 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:07:12.789655 17621 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:07:12.789669 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.789674 17621 net.cpp:165] Memory required for data: 970241500\nI0817 16:07:12.789682 17621 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:07:12.789695 17621 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:07:12.789707 17621 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:07:12.789716 17621 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:07:12.789726 17621 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:07:12.789762 17621 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:07:12.789772 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.789777 17621 net.cpp:165] Memory required for data: 974337500\nI0817 16:07:12.789782 17621 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:07:12.789803 17621 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:07:12.789810 17621 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:07:12.789819 17621 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:07:12.789827 17621 net.cpp:150] Setting up L2_b3_relu\nI0817 16:07:12.789835 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.789839 17621 net.cpp:165] Memory required for data: 978433500\nI0817 16:07:12.789845 17621 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:07:12.789855 17621 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:07:12.789860 17621 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:07:12.789868 17621 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:07:12.789878 17621 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:07:12.789930 17621 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:07:12.789943 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.789950 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.789954 17621 net.cpp:165] Memory required for data: 986625500\nI0817 16:07:12.789959 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:07:12.789970 17621 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:07:12.789976 17621 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:07:12.789988 17621 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:07:12.790483 17621 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:07:12.790498 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.790503 17621 net.cpp:165] Memory required for data: 990721500\nI0817 16:07:12.790511 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:07:12.790520 17621 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:07:12.790526 17621 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:07:12.790539 17621 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:07:12.790818 17621 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:07:12.790832 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.790838 17621 net.cpp:165] Memory required for data: 994817500\nI0817 16:07:12.790848 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:07:12.790863 17621 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:07:12.790868 17621 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:07:12.790876 17621 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.790935 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:07:12.791095 17621 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:07:12.791107 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.791112 17621 net.cpp:165] Memory required for data: 998913500\nI0817 16:07:12.791122 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:07:12.791131 17621 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:07:12.791136 17621 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:07:12.791146 17621 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.791157 17621 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:07:12.791163 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.791168 17621 net.cpp:165] Memory required for data: 1003009500\nI0817 16:07:12.791182 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:07:12.791195 17621 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:07:12.791201 17621 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:07:12.791213 17621 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:07:12.791704 17621 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:07:12.791719 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.791724 17621 net.cpp:165] Memory required for data: 1007105500\nI0817 16:07:12.791733 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:07:12.791743 17621 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:07:12.791749 17621 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:07:12.791764 17621 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:07:12.792032 17621 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:07:12.792044 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.792049 17621 net.cpp:165] Memory required for data: 1011201500\nI0817 16:07:12.792060 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:07:12.792069 17621 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:07:12.792075 17621 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:07:12.792088 17621 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:07:12.792146 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:07:12.792311 17621 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:07:12.792325 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.792330 17621 net.cpp:165] Memory required for data: 1015297500\nI0817 16:07:12.792338 17621 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:07:12.792347 17621 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:07:12.792353 17621 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:07:12.792361 17621 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:07:12.792371 17621 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:07:12.792399 17621 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:07:12.792412 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.792418 17621 net.cpp:165] Memory required for data: 1019393500\nI0817 16:07:12.792423 17621 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:07:12.792430 17621 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:07:12.792436 17621 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:07:12.792443 17621 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:07:12.792453 17621 net.cpp:150] Setting up L2_b4_relu\nI0817 16:07:12.792459 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.792464 17621 net.cpp:165] Memory required for data: 1023489500\nI0817 16:07:12.792469 17621 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:07:12.792479 17621 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:07:12.792484 17621 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:07:12.792491 17621 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:07:12.792501 17621 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:07:12.792548 17621 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:07:12.792567 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.792573 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.792577 17621 net.cpp:165] Memory required for data: 1031681500\nI0817 16:07:12.792583 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:07:12.792593 17621 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:07:12.792599 17621 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:07:12.792608 17621 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:07:12.793123 17621 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:07:12.793141 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.793146 17621 net.cpp:165] Memory required for data: 1035777500\nI0817 16:07:12.793155 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:07:12.793165 17621 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:07:12.793171 17621 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:07:12.793182 17621 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:07:12.793453 17621 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:07:12.793467 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.793471 17621 net.cpp:165] Memory required for data: 1039873500\nI0817 16:07:12.793481 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:07:12.793493 17621 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:07:12.793499 17621 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:07:12.793507 17621 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.793561 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:07:12.793725 17621 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:07:12.793737 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.793742 17621 net.cpp:165] Memory required for data: 1043969500\nI0817 16:07:12.793756 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:07:12.793766 17621 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:07:12.793772 17621 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:07:12.793782 17621 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.793793 17621 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:07:12.793800 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.793804 17621 net.cpp:165] Memory required for data: 1048065500\nI0817 16:07:12.793809 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:07:12.793823 17621 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:07:12.793829 17621 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:07:12.793838 17621 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:07:12.794332 17621 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:07:12.794347 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.794351 17621 net.cpp:165] Memory required for data: 1052161500\nI0817 16:07:12.794359 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:07:12.794373 17621 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:07:12.794379 17621 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:07:12.794387 17621 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:07:12.794661 17621 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:07:12.794675 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.794680 17621 net.cpp:165] Memory required for data: 1056257500\nI0817 16:07:12.794690 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:07:12.794699 17621 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:07:12.794705 17621 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:07:12.794716 17621 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:07:12.794780 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:07:12.794939 17621 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:07:12.794951 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.794956 17621 net.cpp:165] Memory required for data: 1060353500\nI0817 16:07:12.794965 17621 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:07:12.794975 17621 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:07:12.794981 17621 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:07:12.794987 17621 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:07:12.794998 17621 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:07:12.795027 17621 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:07:12.795043 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.795048 17621 net.cpp:165] Memory required for data: 1064449500\nI0817 16:07:12.795053 17621 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:07:12.795064 17621 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:07:12.795070 17621 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:07:12.795078 17621 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:07:12.795087 17621 net.cpp:150] Setting up L2_b5_relu\nI0817 16:07:12.795094 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.795099 17621 net.cpp:165] Memory required for data: 1068545500\nI0817 16:07:12.795104 17621 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:07:12.795114 17621 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:07:12.795120 17621 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:07:12.795127 17621 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:07:12.795136 17621 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:07:12.795184 17621 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:07:12.795199 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.795207 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.795212 17621 net.cpp:165] Memory required for data: 1076737500\nI0817 16:07:12.795217 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:07:12.795228 17621 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:07:12.795233 17621 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:07:12.795243 17621 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:07:12.795742 17621 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:07:12.795763 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.795768 17621 net.cpp:165] Memory required for data: 1080833500\nI0817 16:07:12.795776 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:07:12.795789 17621 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:07:12.795795 17621 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:07:12.795804 17621 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:07:12.796070 17621 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:07:12.796084 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.796089 17621 net.cpp:165] Memory required for data: 1084929500\nI0817 16:07:12.796099 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:07:12.796108 17621 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:07:12.796115 17621 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:07:12.796125 17621 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.796185 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:07:12.796346 17621 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:07:12.796360 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.796365 17621 net.cpp:165] Memory required for data: 1089025500\nI0817 16:07:12.796373 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:07:12.796381 17621 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:07:12.796387 17621 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:07:12.796398 17621 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.796408 17621 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:07:12.796416 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.796419 17621 net.cpp:165] Memory required for data: 1093121500\nI0817 16:07:12.796424 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:07:12.796437 17621 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:07:12.796444 17621 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:07:12.796452 17621 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:07:12.796962 17621 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:07:12.796977 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.796982 17621 net.cpp:165] Memory required for data: 1097217500\nI0817 16:07:12.796990 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:07:12.797003 17621 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:07:12.797009 17621 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:07:12.797018 17621 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:07:12.797307 17621 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:07:12.797322 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.797327 17621 net.cpp:165] Memory required for data: 1101313500\nI0817 16:07:12.797338 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:07:12.797346 17621 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:07:12.797353 17621 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:07:12.797363 17621 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:07:12.797427 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:07:12.797587 17621 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:07:12.797600 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.797605 17621 net.cpp:165] Memory required for data: 1105409500\nI0817 16:07:12.797614 17621 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:07:12.797623 17621 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:07:12.797629 17621 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:07:12.797636 17621 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:07:12.797647 17621 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:07:12.797675 17621 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:07:12.797684 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.797689 17621 net.cpp:165] Memory required for data: 1109505500\nI0817 16:07:12.797694 17621 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:07:12.797703 17621 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:07:12.797713 17621 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:07:12.797719 17621 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:07:12.797729 17621 net.cpp:150] Setting up L2_b6_relu\nI0817 16:07:12.797735 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.797740 17621 net.cpp:165] Memory required for data: 1113601500\nI0817 16:07:12.797745 17621 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:07:12.797758 17621 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:07:12.797765 17621 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:07:12.797775 17621 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:07:12.797785 17621 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:07:12.797833 17621 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:07:12.797845 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.797852 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.797857 17621 net.cpp:165] Memory required for data: 1121793500\nI0817 16:07:12.797863 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:07:12.797878 17621 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:07:12.797884 17621 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:07:12.797894 17621 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:07:12.799392 17621 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:07:12.799409 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.799415 17621 net.cpp:165] Memory required for data: 1125889500\nI0817 16:07:12.799424 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:07:12.799445 17621 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:07:12.799453 17621 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:07:12.799464 17621 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:07:12.799736 17621 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:07:12.799748 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.799760 17621 net.cpp:165] Memory required for data: 1129985500\nI0817 16:07:12.799772 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:07:12.799780 17621 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:07:12.799787 17621 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:07:12.799794 17621 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.799857 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:07:12.800016 17621 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:07:12.800029 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.800034 17621 net.cpp:165] Memory required for data: 1134081500\nI0817 16:07:12.800043 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:07:12.800052 17621 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:07:12.800058 17621 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:07:12.800068 17621 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.800079 17621 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:07:12.800086 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.800091 17621 net.cpp:165] Memory required for data: 1138177500\nI0817 16:07:12.800096 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:07:12.800109 17621 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:07:12.800117 17621 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:07:12.800124 17621 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:07:12.800621 17621 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:07:12.800634 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.800639 17621 net.cpp:165] Memory required for data: 1142273500\nI0817 16:07:12.800648 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:07:12.800660 17621 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:07:12.800668 17621 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:07:12.800675 17621 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:07:12.800953 17621 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:07:12.800971 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.800976 17621 net.cpp:165] Memory required for data: 1146369500\nI0817 16:07:12.800987 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:07:12.800994 17621 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:07:12.801002 17621 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:07:12.801009 17621 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:07:12.801067 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:07:12.801229 17621 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:07:12.801242 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.801247 17621 net.cpp:165] Memory required for data: 1150465500\nI0817 16:07:12.801256 17621 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:07:12.801265 17621 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:07:12.801272 17621 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:07:12.801278 17621 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:07:12.801290 17621 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:07:12.801318 17621 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:07:12.801327 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.801332 17621 net.cpp:165] Memory required for data: 1154561500\nI0817 16:07:12.801337 17621 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:07:12.801345 17621 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:07:12.801358 17621 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:07:12.801368 17621 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:07:12.801379 17621 net.cpp:150] Setting up L2_b7_relu\nI0817 16:07:12.801386 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.801391 17621 net.cpp:165] Memory required for data: 1158657500\nI0817 16:07:12.801395 17621 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:07:12.801403 17621 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:07:12.801409 17621 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:07:12.801415 17621 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:07:12.801425 17621 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:07:12.801479 17621 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:07:12.801491 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.801498 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.801503 17621 net.cpp:165] Memory required for data: 1166849500\nI0817 16:07:12.801508 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:07:12.801524 17621 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:07:12.801532 17621 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:07:12.801540 17621 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:07:12.802039 17621 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:07:12.802053 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.802059 17621 net.cpp:165] Memory required for data: 1170945500\nI0817 16:07:12.802068 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:07:12.802080 17621 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:07:12.802088 17621 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:07:12.802095 17621 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:07:12.802371 17621 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:07:12.802386 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.802392 17621 net.cpp:165] Memory required for data: 1175041500\nI0817 16:07:12.802402 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:07:12.802410 17621 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:07:12.802417 17621 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:07:12.802424 17621 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.802484 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:07:12.802646 17621 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:07:12.802659 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.802664 17621 net.cpp:165] Memory required for data: 1179137500\nI0817 16:07:12.802673 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:07:12.802681 17621 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:07:12.802687 17621 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:07:12.802698 17621 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.802708 17621 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:07:12.802716 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.802719 17621 net.cpp:165] Memory required for data: 1183233500\nI0817 16:07:12.802724 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:07:12.802738 17621 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:07:12.802744 17621 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:07:12.802758 17621 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:07:12.803243 17621 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:07:12.803257 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.803262 17621 net.cpp:165] Memory required for data: 1187329500\nI0817 16:07:12.803272 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:07:12.803287 17621 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:07:12.803294 17621 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:07:12.803305 17621 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:07:12.803583 17621 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:07:12.803596 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.803602 17621 net.cpp:165] Memory required for data: 1191425500\nI0817 16:07:12.803612 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:07:12.803624 17621 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:07:12.803630 17621 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:07:12.803638 17621 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:07:12.803699 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:07:12.803869 17621 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:07:12.803882 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.803887 17621 net.cpp:165] Memory required for data: 1195521500\nI0817 16:07:12.803896 17621 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:07:12.803908 17621 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:07:12.803915 17621 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:07:12.803921 17621 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:07:12.803935 17621 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:07:12.803963 17621 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:07:12.803972 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.803977 17621 net.cpp:165] Memory required for data: 1199617500\nI0817 16:07:12.803982 17621 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:07:12.803990 17621 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:07:12.803995 17621 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:07:12.804006 17621 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:07:12.804016 17621 net.cpp:150] Setting up L2_b8_relu\nI0817 16:07:12.804023 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.804028 17621 net.cpp:165] Memory required for data: 1203713500\nI0817 16:07:12.804033 17621 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:07:12.804040 17621 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:07:12.804045 17621 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:07:12.804052 17621 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:07:12.804075 17621 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:07:12.804128 17621 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:07:12.804141 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.804147 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.804152 17621 net.cpp:165] Memory required for data: 1211905500\nI0817 16:07:12.804157 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:07:12.804169 17621 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:07:12.804175 17621 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:07:12.804188 17621 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:07:12.804684 17621 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:07:12.804698 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.804703 17621 net.cpp:165] Memory required for data: 1216001500\nI0817 16:07:12.804713 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:07:12.804721 17621 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:07:12.804728 17621 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:07:12.804738 17621 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:07:12.805024 17621 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:07:12.805044 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.805049 17621 net.cpp:165] Memory required for data: 1220097500\nI0817 16:07:12.805060 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:07:12.805073 17621 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:07:12.805078 17621 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:07:12.805086 17621 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.805145 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:07:12.805310 17621 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:07:12.805322 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.805327 17621 net.cpp:165] Memory required for data: 1224193500\nI0817 16:07:12.805336 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:07:12.805347 17621 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:07:12.805354 17621 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:07:12.805361 17621 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.805371 17621 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:07:12.805378 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.805383 17621 net.cpp:165] Memory required for data: 1228289500\nI0817 16:07:12.805387 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:07:12.805402 17621 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:07:12.805408 17621 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:07:12.805419 17621 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:07:12.806921 17621 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:07:12.806939 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.806944 17621 net.cpp:165] Memory required for data: 1232385500\nI0817 16:07:12.806954 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:07:12.806967 17621 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:07:12.806974 17621 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:07:12.806985 17621 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:07:12.807252 17621 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:07:12.807268 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.807274 17621 net.cpp:165] Memory required for data: 1236481500\nI0817 16:07:12.807319 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:07:12.807335 17621 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:07:12.807343 17621 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:07:12.807350 17621 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:07:12.807409 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:07:12.807562 17621 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:07:12.807576 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.807581 17621 net.cpp:165] Memory required for data: 1240577500\nI0817 16:07:12.807590 17621 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:07:12.807600 17621 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:07:12.807606 17621 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:07:12.807613 17621 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:07:12.807624 17621 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:07:12.807652 17621 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:07:12.807665 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.807670 17621 net.cpp:165] Memory required for data: 1244673500\nI0817 16:07:12.807675 17621 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:07:12.807684 17621 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:07:12.807690 17621 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:07:12.807698 17621 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:07:12.807706 17621 net.cpp:150] Setting up L2_b9_relu\nI0817 16:07:12.807713 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.807726 17621 net.cpp:165] Memory required for data: 1248769500\nI0817 16:07:12.807732 17621 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:07:12.807742 17621 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:07:12.807749 17621 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:07:12.807762 17621 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:07:12.807773 17621 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:07:12.807833 17621 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:07:12.807845 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.807852 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:07:12.807857 17621 net.cpp:165] Memory required for data: 1256961500\nI0817 16:07:12.807862 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:07:12.807873 17621 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:07:12.807879 17621 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:07:12.807893 17621 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:07:12.808393 17621 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:07:12.808408 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.808413 17621 net.cpp:165] Memory required for data: 1257985500\nI0817 16:07:12.808421 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:07:12.808431 17621 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:07:12.808437 17621 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:07:12.808449 17621 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:07:12.808725 17621 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:07:12.808742 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.808748 17621 net.cpp:165] Memory required for data: 1259009500\nI0817 16:07:12.808763 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:07:12.808773 17621 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:07:12.808779 17621 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:07:12.808787 17621 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.808845 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:07:12.809010 17621 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:07:12.809025 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.809029 17621 net.cpp:165] Memory required for data: 1260033500\nI0817 16:07:12.809038 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:07:12.809046 17621 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:07:12.809053 17621 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:07:12.809063 17621 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:07:12.809073 17621 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:07:12.809080 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.809084 17621 net.cpp:165] Memory required for data: 1261057500\nI0817 16:07:12.809089 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:07:12.809103 17621 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:07:12.809109 17621 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:07:12.809118 17621 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:07:12.809605 17621 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:07:12.809619 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.809624 17621 net.cpp:165] Memory required for data: 1262081500\nI0817 16:07:12.809633 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:07:12.809645 17621 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:07:12.809653 17621 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:07:12.809664 17621 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:07:12.809947 17621 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:07:12.809967 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.809973 17621 net.cpp:165] Memory required for data: 1263105500\nI0817 16:07:12.809983 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:07:12.809993 17621 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:07:12.809998 17621 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:07:12.810006 17621 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:07:12.810066 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:07:12.810232 17621 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:07:12.810245 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.810250 17621 net.cpp:165] Memory required for data: 1264129500\nI0817 16:07:12.810259 17621 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:07:12.810268 17621 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:07:12.810276 17621 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:07:12.810286 17621 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:07:12.810324 17621 net.cpp:150] Setting up L3_b1_pool\nI0817 16:07:12.810334 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.810339 17621 net.cpp:165] Memory required for data: 1265153500\nI0817 16:07:12.810344 17621 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:07:12.810353 17621 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:07:12.810359 17621 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:07:12.810365 17621 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:07:12.810375 17621 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:07:12.810408 17621 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:07:12.810418 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.810422 17621 net.cpp:165] Memory required for data: 1266177500\nI0817 16:07:12.810427 17621 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:07:12.810436 17621 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:07:12.810441 17621 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:07:12.810448 17621 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:07:12.810457 17621 net.cpp:150] Setting up L3_b1_relu\nI0817 16:07:12.810464 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.810468 17621 net.cpp:165] Memory required for data: 1267201500\nI0817 16:07:12.810473 17621 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:07:12.810487 17621 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:07:12.810495 17621 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:07:12.811771 17621 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:07:12.811792 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:07:12.811797 17621 net.cpp:165] Memory required for data: 1268225500\nI0817 16:07:12.811803 17621 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:07:12.811812 17621 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:07:12.811820 17621 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:07:12.811827 17621 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:07:12.811835 17621 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:07:12.811882 17621 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:07:12.811893 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.811900 17621 net.cpp:165] Memory required for data: 1270273500\nI0817 16:07:12.811906 17621 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:07:12.811914 17621 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:07:12.811920 17621 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:07:12.811928 17621 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:07:12.811939 17621 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:07:12.811995 17621 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:07:12.812008 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.812026 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.812031 17621 net.cpp:165] Memory required for data: 1274369500\nI0817 16:07:12.812036 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:07:12.812050 17621 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:07:12.812057 17621 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:07:12.812067 17621 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:07:12.813120 17621 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:07:12.813135 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.813140 17621 net.cpp:165] Memory required for data: 1276417500\nI0817 16:07:12.813149 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:07:12.813163 17621 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:07:12.813169 17621 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:07:12.813180 17621 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:07:12.813454 17621 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:07:12.813467 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.813472 17621 net.cpp:165] Memory required for data: 1278465500\nI0817 16:07:12.813483 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:07:12.813491 17621 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:07:12.813498 17621 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:07:12.813509 17621 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.813570 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:07:12.813733 17621 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:07:12.813746 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.813756 17621 net.cpp:165] Memory required for data: 1280513500\nI0817 16:07:12.813767 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:07:12.813776 17621 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:07:12.813782 17621 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:07:12.813792 17621 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:07:12.813803 17621 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:07:12.813810 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.813815 17621 net.cpp:165] Memory required for data: 1282561500\nI0817 16:07:12.813820 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:07:12.813834 17621 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:07:12.813840 17621 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:07:12.813853 17621 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:07:12.814903 17621 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:07:12.814918 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.814923 17621 net.cpp:165] Memory required for data: 1284609500\nI0817 16:07:12.814932 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:07:12.814941 17621 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:07:12.814947 17621 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:07:12.814960 17621 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:07:12.815237 17621 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:07:12.815253 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.815258 17621 net.cpp:165] Memory required for data: 1286657500\nI0817 16:07:12.815268 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:07:12.815277 17621 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:07:12.815284 17621 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:07:12.815291 17621 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:07:12.815351 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:07:12.815513 17621 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:07:12.815526 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.815531 17621 net.cpp:165] Memory required for data: 1288705500\nI0817 16:07:12.815548 17621 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:07:12.815560 17621 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:07:12.815567 17621 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:07:12.815575 17621 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:07:12.815583 17621 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:07:12.815620 17621 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:07:12.815632 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.815637 17621 net.cpp:165] Memory required for data: 1290753500\nI0817 16:07:12.815642 17621 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:07:12.815649 17621 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:07:12.815655 17621 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:07:12.815662 17621 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:07:12.815672 17621 net.cpp:150] Setting up L3_b2_relu\nI0817 16:07:12.815680 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.815683 17621 net.cpp:165] Memory required for data: 1292801500\nI0817 16:07:12.815688 17621 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:07:12.815696 17621 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:07:12.815701 17621 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:07:12.815711 17621 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:07:12.815721 17621 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:07:12.815774 17621 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:07:12.815788 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.815793 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.815798 17621 net.cpp:165] Memory required for data: 1296897500\nI0817 16:07:12.815804 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:07:12.815819 17621 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:07:12.815825 17621 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:07:12.815834 17621 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:07:12.816876 17621 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:07:12.816891 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.816896 17621 net.cpp:165] Memory required for data: 1298945500\nI0817 16:07:12.816905 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:07:12.816915 17621 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:07:12.816926 17621 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:07:12.816933 17621 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:07:12.817203 17621 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:07:12.817217 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.817222 17621 net.cpp:165] Memory required for data: 1300993500\nI0817 16:07:12.817232 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:07:12.817241 17621 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:07:12.817247 17621 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:07:12.817258 17621 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.817317 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:07:12.817481 17621 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:07:12.817494 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.817499 17621 net.cpp:165] Memory required for data: 1303041500\nI0817 16:07:12.817508 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:07:12.817519 17621 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:07:12.817526 17621 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:07:12.817534 17621 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:07:12.817543 17621 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:07:12.817558 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.817562 17621 net.cpp:165] Memory required for data: 1305089500\nI0817 16:07:12.817567 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:07:12.817584 17621 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:07:12.817589 17621 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:07:12.817600 17621 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:07:12.818645 17621 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:07:12.818660 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.818665 17621 net.cpp:165] Memory required for data: 1307137500\nI0817 16:07:12.818675 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:07:12.818683 17621 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:07:12.818691 17621 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:07:12.818701 17621 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:07:12.818980 17621 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:07:12.818998 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.819005 17621 net.cpp:165] Memory required for data: 1309185500\nI0817 16:07:12.819015 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:07:12.819025 17621 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:07:12.819031 17621 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:07:12.819038 17621 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:07:12.819098 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:07:12.819259 17621 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:07:12.819272 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.819278 17621 net.cpp:165] Memory required for data: 1311233500\nI0817 16:07:12.819286 17621 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:07:12.819298 17621 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:07:12.819305 17621 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:07:12.819313 17621 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:07:12.819320 17621 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:07:12.819357 17621 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:07:12.819370 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.819375 17621 net.cpp:165] Memory required for data: 1313281500\nI0817 16:07:12.819380 17621 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:07:12.819387 17621 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:07:12.819393 17621 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:07:12.819401 17621 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:07:12.819411 17621 net.cpp:150] Setting up L3_b3_relu\nI0817 16:07:12.819417 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.819422 17621 net.cpp:165] Memory required for data: 1315329500\nI0817 16:07:12.819427 17621 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:07:12.819433 17621 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:07:12.819438 17621 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:07:12.819449 17621 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:07:12.819459 17621 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:07:12.819506 17621 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:07:12.819519 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.819525 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.819530 17621 net.cpp:165] Memory required for data: 1319425500\nI0817 16:07:12.819535 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:07:12.819550 17621 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:07:12.819555 17621 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:07:12.819572 17621 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:07:12.820627 17621 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:07:12.820642 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.820647 17621 net.cpp:165] Memory required for data: 1321473500\nI0817 16:07:12.820657 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:07:12.820669 17621 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:07:12.820675 17621 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:07:12.820684 17621 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:07:12.820958 17621 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:07:12.820972 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.820977 17621 net.cpp:165] Memory required for data: 1323521500\nI0817 16:07:12.820987 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:07:12.820999 17621 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:07:12.821007 17621 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:07:12.821014 17621 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.821079 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:07:12.821240 17621 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:07:12.821254 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.821259 17621 net.cpp:165] Memory required for data: 1325569500\nI0817 16:07:12.821269 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:07:12.821280 17621 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:07:12.821285 17621 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:07:12.821293 17621 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:07:12.821303 17621 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:07:12.821310 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.821315 17621 net.cpp:165] Memory required for data: 1327617500\nI0817 16:07:12.821319 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:07:12.821333 17621 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:07:12.821339 17621 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:07:12.821350 17621 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:07:12.823375 17621 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:07:12.823393 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.823398 17621 net.cpp:165] Memory required for data: 1329665500\nI0817 16:07:12.823407 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:07:12.823421 17621 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:07:12.823428 17621 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:07:12.823436 17621 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:07:12.823710 17621 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:07:12.823724 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.823729 17621 net.cpp:165] Memory required for data: 1331713500\nI0817 16:07:12.823740 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:07:12.823748 17621 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:07:12.823760 17621 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:07:12.823772 17621 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:07:12.823833 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:07:12.823998 17621 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:07:12.824012 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.824017 17621 net.cpp:165] Memory required for data: 1333761500\nI0817 16:07:12.824025 17621 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:07:12.824038 17621 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:07:12.824045 17621 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:07:12.824053 17621 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:07:12.824060 17621 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:07:12.824106 17621 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:07:12.824116 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.824121 17621 net.cpp:165] Memory required for data: 1335809500\nI0817 16:07:12.824149 17621 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:07:12.824157 17621 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:07:12.824164 17621 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:07:12.824174 17621 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:07:12.824185 17621 net.cpp:150] Setting up L3_b4_relu\nI0817 16:07:12.824193 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.824196 17621 net.cpp:165] Memory required for data: 1337857500\nI0817 16:07:12.824201 17621 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:07:12.824214 17621 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:07:12.824219 17621 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:07:12.824228 17621 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:07:12.824237 17621 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:07:12.824288 17621 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:07:12.824301 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.824307 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.824312 17621 net.cpp:165] Memory required for data: 1341953500\nI0817 16:07:12.824317 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:07:12.824328 17621 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:07:12.824335 17621 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:07:12.824347 17621 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:07:12.825390 17621 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:07:12.825407 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.825412 17621 net.cpp:165] Memory required for data: 1344001500\nI0817 16:07:12.825420 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:07:12.825433 17621 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:07:12.825439 17621 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:07:12.825448 17621 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:07:12.825728 17621 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:07:12.825742 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.825747 17621 net.cpp:165] Memory required for data: 1346049500\nI0817 16:07:12.825763 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:07:12.825773 17621 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:07:12.825779 17621 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:07:12.825786 17621 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.825850 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:07:12.826014 17621 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:07:12.826026 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.826031 17621 net.cpp:165] Memory required for data: 1348097500\nI0817 16:07:12.826041 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:07:12.826050 17621 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:07:12.826056 17621 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:07:12.826063 17621 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:07:12.826072 17621 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:07:12.826079 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.826084 17621 net.cpp:165] Memory required for data: 1350145500\nI0817 16:07:12.826088 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:07:12.826104 17621 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:07:12.826112 17621 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:07:12.826129 17621 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:07:12.827165 17621 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:07:12.827180 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.827185 17621 net.cpp:165] Memory required for data: 1352193500\nI0817 16:07:12.827194 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:07:12.827206 17621 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:07:12.827214 17621 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:07:12.827221 17621 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:07:12.827491 17621 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:07:12.827503 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.827508 17621 net.cpp:165] Memory required for data: 1354241500\nI0817 16:07:12.827519 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:07:12.827531 17621 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:07:12.827539 17621 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:07:12.827546 17621 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:07:12.827610 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:07:12.827782 17621 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:07:12.827795 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.827800 17621 net.cpp:165] Memory required for data: 1356289500\nI0817 16:07:12.827810 17621 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:07:12.827822 17621 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:07:12.827829 17621 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:07:12.827836 17621 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:07:12.827844 17621 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:07:12.827881 17621 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:07:12.827893 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.827898 17621 net.cpp:165] Memory required for data: 1358337500\nI0817 16:07:12.827903 17621 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:07:12.827911 17621 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:07:12.827917 17621 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:07:12.827927 17621 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:07:12.827939 17621 net.cpp:150] Setting up L3_b5_relu\nI0817 16:07:12.827945 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.827950 17621 net.cpp:165] Memory required for data: 1360385500\nI0817 16:07:12.827955 17621 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:07:12.827961 17621 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:07:12.827967 17621 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:07:12.827975 17621 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:07:12.827985 17621 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:07:12.828033 17621 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:07:12.828045 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.828052 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.828057 17621 net.cpp:165] Memory required for data: 1364481500\nI0817 16:07:12.828061 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:07:12.828074 17621 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:07:12.828081 17621 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:07:12.828090 17621 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:07:12.829147 17621 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:07:12.829164 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.829169 17621 net.cpp:165] Memory required for data: 1366529500\nI0817 16:07:12.829177 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:07:12.829196 17621 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:07:12.829203 17621 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:07:12.829212 17621 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:07:12.829489 17621 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:07:12.829504 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.829509 17621 net.cpp:165] Memory required for data: 1368577500\nI0817 16:07:12.829519 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:07:12.829527 17621 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:07:12.829535 17621 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:07:12.829541 17621 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.829605 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:07:12.829779 17621 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:07:12.829793 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.829798 17621 net.cpp:165] Memory required for data: 1370625500\nI0817 16:07:12.829807 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:07:12.829815 17621 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:07:12.829821 17621 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:07:12.829833 17621 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:07:12.829843 17621 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:07:12.829849 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.829854 17621 net.cpp:165] Memory required for data: 1372673500\nI0817 16:07:12.829859 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:07:12.829874 17621 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:07:12.829880 17621 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:07:12.829890 17621 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:07:12.830930 17621 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:07:12.830945 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.830950 17621 net.cpp:165] Memory required for data: 1374721500\nI0817 16:07:12.830960 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:07:12.830972 17621 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:07:12.830979 17621 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:07:12.830988 17621 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:07:12.831267 17621 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:07:12.831280 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.831285 17621 net.cpp:165] Memory required for data: 1376769500\nI0817 16:07:12.831296 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:07:12.831308 17621 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:07:12.831315 17621 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:07:12.831323 17621 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:07:12.831385 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:07:12.831547 17621 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:07:12.831560 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.831565 17621 net.cpp:165] Memory required for data: 1378817500\nI0817 16:07:12.831574 17621 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:07:12.831586 17621 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:07:12.831593 17621 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:07:12.831600 17621 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:07:12.831610 17621 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:07:12.831646 17621 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:07:12.831657 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.831662 17621 net.cpp:165] Memory required for data: 1380865500\nI0817 16:07:12.831667 17621 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:07:12.831678 17621 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:07:12.831691 17621 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:07:12.831699 17621 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:07:12.831709 17621 net.cpp:150] Setting up L3_b6_relu\nI0817 16:07:12.831717 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.831720 17621 net.cpp:165] Memory required for data: 1382913500\nI0817 16:07:12.831725 17621 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:07:12.831732 17621 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:07:12.831738 17621 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:07:12.831745 17621 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:07:12.831761 17621 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:07:12.831815 17621 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:07:12.831827 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.831835 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.831840 17621 net.cpp:165] Memory required for data: 1387009500\nI0817 16:07:12.831845 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:07:12.831858 17621 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:07:12.831866 17621 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:07:12.831874 17621 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:07:12.832906 17621 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:07:12.832921 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.832926 17621 net.cpp:165] Memory required for data: 1389057500\nI0817 16:07:12.832934 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:07:12.832947 17621 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:07:12.832953 17621 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:07:12.832967 17621 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:07:12.833405 17621 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:07:12.833420 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.833426 17621 net.cpp:165] Memory required for data: 1391105500\nI0817 16:07:12.833436 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:07:12.833446 17621 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:07:12.833452 17621 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:07:12.833461 17621 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.833525 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:07:12.833691 17621 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:07:12.833704 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.833709 17621 net.cpp:165] Memory required for data: 1393153500\nI0817 16:07:12.833719 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:07:12.833760 17621 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:07:12.833771 17621 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:07:12.833780 17621 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:07:12.833791 17621 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:07:12.833797 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.833802 17621 net.cpp:165] Memory required for data: 1395201500\nI0817 16:07:12.833807 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:07:12.833822 17621 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:07:12.833828 17621 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:07:12.833837 17621 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:07:12.834879 17621 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:07:12.834895 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.834900 17621 net.cpp:165] Memory required for data: 1397249500\nI0817 16:07:12.834909 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:07:12.834929 17621 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:07:12.834938 17621 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:07:12.834945 17621 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:07:12.835220 17621 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:07:12.835234 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.835239 17621 net.cpp:165] Memory required for data: 1399297500\nI0817 16:07:12.835249 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:07:12.835258 17621 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:07:12.835264 17621 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:07:12.835273 17621 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:07:12.835335 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:07:12.835503 17621 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:07:12.835516 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.835521 17621 net.cpp:165] Memory required for data: 1401345500\nI0817 16:07:12.835530 17621 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:07:12.835539 17621 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:07:12.835546 17621 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:07:12.835552 17621 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:07:12.835563 17621 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:07:12.835598 17621 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:07:12.835613 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.835618 17621 net.cpp:165] Memory required for data: 1403393500\nI0817 16:07:12.835623 17621 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:07:12.835631 17621 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:07:12.835636 17621 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:07:12.835644 17621 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:07:12.835654 17621 net.cpp:150] Setting up L3_b7_relu\nI0817 16:07:12.835660 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.835664 17621 net.cpp:165] Memory required for data: 1405441500\nI0817 16:07:12.835669 17621 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:07:12.835680 17621 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:07:12.835685 17621 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:07:12.835693 17621 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:07:12.835703 17621 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:07:12.835758 17621 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:07:12.835772 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.835778 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.835783 17621 net.cpp:165] Memory required for data: 1409537500\nI0817 16:07:12.835788 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:07:12.835799 17621 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:07:12.835806 17621 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:07:12.835819 17621 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:07:12.837862 17621 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:07:12.837879 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.837885 17621 net.cpp:165] Memory required for data: 1411585500\nI0817 16:07:12.837894 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:07:12.837908 17621 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:07:12.837915 17621 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:07:12.837926 17621 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:07:12.838203 17621 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:07:12.838217 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.838229 17621 net.cpp:165] Memory required for data: 1413633500\nI0817 16:07:12.838241 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:07:12.838250 17621 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:07:12.838258 17621 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:07:12.838268 17621 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.838335 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:07:12.838501 17621 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:07:12.838515 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.838520 17621 net.cpp:165] Memory required for data: 1415681500\nI0817 16:07:12.838528 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:07:12.838537 17621 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:07:12.838543 17621 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:07:12.838553 17621 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:07:12.838564 17621 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:07:12.838572 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.838577 17621 net.cpp:165] Memory required for data: 1417729500\nI0817 16:07:12.838582 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:07:12.838595 17621 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:07:12.838601 17621 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:07:12.838610 17621 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:07:12.839644 17621 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:07:12.839659 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.839665 17621 net.cpp:165] Memory required for data: 1419777500\nI0817 16:07:12.839674 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:07:12.839686 17621 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:07:12.839694 17621 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:07:12.839702 17621 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:07:12.839982 17621 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:07:12.839996 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.840003 17621 net.cpp:165] Memory required for data: 1421825500\nI0817 16:07:12.840013 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:07:12.840024 17621 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:07:12.840032 17621 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:07:12.840042 17621 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:07:12.840102 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:07:12.840270 17621 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:07:12.840283 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.840288 17621 net.cpp:165] Memory required for data: 1423873500\nI0817 16:07:12.840297 17621 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:07:12.840306 17621 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:07:12.840313 17621 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:07:12.840320 17621 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:07:12.840332 17621 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:07:12.840365 17621 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:07:12.840378 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.840381 17621 net.cpp:165] Memory required for data: 1425921500\nI0817 16:07:12.840387 17621 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:07:12.840399 17621 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:07:12.840405 17621 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:07:12.840412 17621 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:07:12.840422 17621 net.cpp:150] Setting up L3_b8_relu\nI0817 16:07:12.840430 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.840435 17621 net.cpp:165] Memory required for data: 1427969500\nI0817 16:07:12.840448 17621 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:07:12.840456 17621 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:07:12.840461 17621 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:07:12.840468 17621 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:07:12.840478 17621 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:07:12.840530 17621 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:07:12.840543 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.840550 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.840554 17621 net.cpp:165] Memory required for data: 1432065500\nI0817 16:07:12.840560 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:07:12.840574 17621 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:07:12.840580 17621 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:07:12.840590 17621 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:07:12.841614 17621 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:07:12.841629 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.841634 17621 net.cpp:165] Memory required for data: 1434113500\nI0817 16:07:12.841642 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:07:12.841655 17621 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:07:12.841661 17621 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:07:12.841672 17621 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:07:12.841953 17621 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:07:12.841966 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.841971 17621 net.cpp:165] Memory required for data: 1436161500\nI0817 16:07:12.841982 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:07:12.841991 17621 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:07:12.841998 17621 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:07:12.842008 17621 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.842072 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:07:12.842234 17621 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:07:12.842247 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.842252 17621 net.cpp:165] Memory required for data: 1438209500\nI0817 16:07:12.842262 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:07:12.842269 17621 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:07:12.842275 17621 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:07:12.842285 17621 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:07:12.842296 17621 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:07:12.842303 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.842308 17621 net.cpp:165] Memory required for data: 1440257500\nI0817 16:07:12.842312 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:07:12.842326 17621 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:07:12.842332 17621 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:07:12.842344 17621 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:07:12.843372 17621 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:07:12.843387 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.843392 17621 net.cpp:165] Memory required for data: 1442305500\nI0817 16:07:12.843401 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:07:12.843411 17621 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:07:12.843417 17621 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:07:12.843431 17621 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:07:12.843706 17621 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:07:12.843722 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.843734 17621 net.cpp:165] Memory required for data: 1444353500\nI0817 16:07:12.843745 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:07:12.843760 17621 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:07:12.843767 17621 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:07:12.843775 17621 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:07:12.843835 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:07:12.843996 17621 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:07:12.844008 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.844013 17621 net.cpp:165] Memory required for data: 1446401500\nI0817 16:07:12.844023 17621 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:07:12.844032 17621 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:07:12.844043 17621 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:07:12.844050 17621 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:07:12.844058 17621 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:07:12.844095 17621 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:07:12.844107 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.844112 17621 net.cpp:165] Memory required for data: 1448449500\nI0817 16:07:12.844117 17621 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:07:12.844125 17621 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:07:12.844131 17621 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:07:12.844138 17621 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:07:12.844147 17621 net.cpp:150] Setting up L3_b9_relu\nI0817 16:07:12.844154 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:07:12.844159 17621 net.cpp:165] Memory required for data: 1450497500\nI0817 16:07:12.844163 17621 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:07:12.844172 17621 net.cpp:100] Creating Layer post_pool\nI0817 16:07:12.844177 17621 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:07:12.844187 17621 net.cpp:408] post_pool -> post_pool\nI0817 16:07:12.844224 17621 net.cpp:150] Setting up post_pool\nI0817 16:07:12.844236 17621 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:07:12.844241 17621 net.cpp:165] Memory required for data: 1450529500\nI0817 16:07:12.844246 17621 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:07:12.844259 17621 net.cpp:100] Creating Layer post_FC\nI0817 16:07:12.844264 17621 net.cpp:434] post_FC <- post_pool\nI0817 16:07:12.844274 17621 net.cpp:408] post_FC -> post_FC_top\nI0817 16:07:12.844439 17621 net.cpp:150] Setting up post_FC\nI0817 16:07:12.844452 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:07:12.844457 17621 net.cpp:165] Memory required for data: 1450534500\nI0817 16:07:12.844466 17621 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:07:12.844475 17621 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:07:12.844480 17621 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:07:12.844491 17621 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:07:12.844501 17621 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:07:12.844552 17621 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:07:12.844568 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:07:12.844573 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:07:12.844578 17621 net.cpp:165] Memory required for data: 1450544500\nI0817 16:07:12.844583 17621 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:07:12.844591 17621 net.cpp:100] Creating Layer accuracy\nI0817 16:07:12.844597 17621 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:07:12.844604 17621 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:07:12.844612 17621 net.cpp:408] accuracy -> accuracy\nI0817 16:07:12.844625 17621 net.cpp:150] Setting up accuracy\nI0817 16:07:12.844632 17621 net.cpp:157] Top shape: (1)\nI0817 16:07:12.844643 17621 net.cpp:165] Memory required for data: 1450544504\nI0817 16:07:12.844648 17621 layer_factory.hpp:77] Creating layer loss\nI0817 16:07:12.844656 17621 net.cpp:100] Creating Layer loss\nI0817 16:07:12.844662 17621 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:07:12.844668 17621 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:07:12.844678 17621 net.cpp:408] loss -> loss\nI0817 16:07:12.844691 17621 layer_factory.hpp:77] Creating layer loss\nI0817 16:07:12.844825 17621 net.cpp:150] Setting up loss\nI0817 16:07:12.844840 17621 net.cpp:157] Top shape: (1)\nI0817 16:07:12.844844 17621 net.cpp:160]     with loss weight 1\nI0817 16:07:12.844861 17621 net.cpp:165] Memory required for data: 1450544508\nI0817 16:07:12.844866 17621 net.cpp:226] loss needs backward computation.\nI0817 16:07:12.844873 17621 net.cpp:228] accuracy does not need backward computation.\nI0817 16:07:12.844879 17621 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:07:12.844884 17621 net.cpp:226] post_FC needs backward computation.\nI0817 16:07:12.844889 17621 net.cpp:226] post_pool needs backward computation.\nI0817 16:07:12.844894 17621 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:07:12.844899 17621 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:07:12.844904 17621 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:07:12.844909 17621 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:07:12.844913 17621 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:07:12.844919 17621 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:07:12.844923 17621 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:07:12.844928 17621 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:07:12.844933 17621 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:07:12.844938 17621 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:07:12.844944 17621 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:07:12.844949 17621 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:07:12.844954 17621 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:07:12.844959 17621 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:07:12.844964 17621 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:07:12.844969 17621 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:07:12.844974 17621 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:07:12.844979 17621 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:07:12.844983 17621 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:07:12.844988 17621 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:07:12.844995 17621 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:07:12.844998 17621 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:07:12.845005 17621 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:07:12.845010 17621 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:07:12.845015 17621 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:07:12.845019 17621 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:07:12.845024 17621 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:07:12.845028 17621 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:07:12.845038 17621 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:07:12.845044 17621 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:07:12.845051 17621 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:07:12.845055 17621 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:07:12.845060 17621 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:07:12.845065 17621 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:07:12.845077 17621 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:07:12.845083 17621 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:07:12.845088 17621 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:07:12.845093 17621 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:07:12.845098 17621 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:07:12.845103 17621 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:07:12.845108 17621 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:07:12.845113 17621 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:07:12.845119 17621 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:07:12.845124 17621 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:07:12.845129 17621 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:07:12.845134 17621 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:07:12.845139 17621 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:07:12.845144 17621 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:07:12.845149 17621 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:07:12.845155 17621 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:07:12.845160 17621 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:07:12.845165 17621 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:07:12.845171 17621 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:07:12.845175 17621 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:07:12.845181 17621 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:07:12.845187 17621 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:07:12.845191 17621 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:07:12.845196 17621 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:07:12.845201 17621 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:07:12.845207 17621 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:07:12.845212 17621 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:07:12.845217 17621 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:07:12.845222 17621 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:07:12.845227 17621 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:07:12.845233 17621 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:07:12.845238 17621 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:07:12.845243 17621 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:07:12.845247 17621 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:07:12.845253 17621 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:07:12.845258 17621 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:07:12.845263 17621 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:07:12.845269 17621 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:07:12.845274 17621 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:07:12.845279 17621 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:07:12.845285 17621 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:07:12.845290 17621 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:07:12.845295 17621 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:07:12.845300 17621 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:07:12.845305 17621 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:07:12.845310 17621 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:07:12.845319 17621 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:07:12.845331 17621 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:07:12.845336 17621 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:07:12.845341 17621 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:07:12.845347 17621 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:07:12.845353 17621 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:07:12.845358 17621 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:07:12.845363 17621 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:07:12.845369 17621 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:07:12.845374 17621 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:07:12.845379 17621 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:07:12.845384 17621 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:07:12.845391 17621 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:07:12.845396 17621 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:07:12.845401 17621 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:07:12.845407 17621 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:07:12.845412 17621 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:07:12.845417 17621 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:07:12.845422 17621 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:07:12.845427 17621 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:07:12.845432 17621 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:07:12.845438 17621 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:07:12.845443 17621 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:07:12.845448 17621 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:07:12.845453 17621 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:07:12.845459 17621 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:07:12.845464 17621 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:07:12.845470 17621 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:07:12.845475 17621 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:07:12.845480 17621 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:07:12.845485 17621 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:07:12.845491 17621 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:07:12.845496 17621 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:07:12.845502 17621 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:07:12.845507 17621 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:07:12.845513 17621 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:07:12.845518 17621 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:07:12.845525 17621 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:07:12.845530 17621 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:07:12.845535 17621 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:07:12.845540 17621 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:07:12.845546 17621 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:07:12.845551 17621 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:07:12.845556 17621 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:07:12.845561 17621 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:07:12.845567 17621 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:07:12.845572 17621 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:07:12.845578 17621 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:07:12.845583 17621 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:07:12.845593 17621 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:07:12.845598 17621 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:07:12.845604 17621 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:07:12.845609 17621 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:07:12.845615 17621 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:07:12.845620 17621 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:07:12.845626 17621 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:07:12.845631 17621 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:07:12.845638 17621 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:07:12.845643 17621 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:07:12.845649 17621 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:07:12.845654 17621 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:07:12.845659 17621 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:07:12.845664 17621 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:07:12.845669 17621 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:07:12.845674 17621 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:07:12.845687 17621 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:07:12.845693 17621 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:07:12.845700 17621 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:07:12.845705 17621 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:07:12.845710 17621 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:07:12.845715 17621 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:07:12.845721 17621 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:07:12.845726 17621 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:07:12.845732 17621 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:07:12.845738 17621 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:07:12.845744 17621 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:07:12.845749 17621 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:07:12.845762 17621 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:07:12.845767 17621 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:07:12.845772 17621 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:07:12.845777 17621 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:07:12.845783 17621 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:07:12.845789 17621 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:07:12.845794 17621 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:07:12.845800 17621 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:07:12.845806 17621 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:07:12.845811 17621 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:07:12.845818 17621 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:07:12.845823 17621 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:07:12.845829 17621 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:07:12.845834 17621 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:07:12.845839 17621 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:07:12.845844 17621 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:07:12.845850 17621 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:07:12.845857 17621 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:07:12.845862 17621 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:07:12.845873 17621 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:07:12.845880 17621 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:07:12.845885 17621 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:07:12.845891 17621 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:07:12.845897 17621 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:07:12.845902 17621 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:07:12.845908 17621 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:07:12.845913 17621 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:07:12.845919 17621 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:07:12.845926 17621 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:07:12.845930 17621 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:07:12.845937 17621 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:07:12.845942 17621 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:07:12.845947 17621 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:07:12.845953 17621 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:07:12.845958 17621 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:07:12.845964 17621 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:07:12.845969 17621 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:07:12.845974 17621 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:07:12.845980 17621 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:07:12.845986 17621 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:07:12.845991 17621 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:07:12.845998 17621 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:07:12.846002 17621 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:07:12.846009 17621 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:07:12.846014 17621 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:07:12.846019 17621 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:07:12.846024 17621 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:07:12.846030 17621 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:07:12.846036 17621 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:07:12.846042 17621 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:07:12.846047 17621 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:07:12.846055 17621 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:07:12.846060 17621 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:07:12.846065 17621 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:07:12.846071 17621 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:07:12.846076 17621 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:07:12.846082 17621 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:07:12.846087 17621 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:07:12.846093 17621 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:07:12.846099 17621 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:07:12.846104 17621 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:07:12.846112 17621 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:07:12.846117 17621 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:07:12.846122 17621 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:07:12.846128 17621 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:07:12.846133 17621 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:07:12.846143 17621 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:07:12.846149 17621 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:07:12.846155 17621 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:07:12.846161 17621 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:07:12.846168 17621 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:07:12.846174 17621 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:07:12.846179 17621 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:07:12.846185 17621 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:07:12.846191 17621 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:07:12.846196 17621 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:07:12.846202 17621 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:07:12.846209 17621 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:07:12.846213 17621 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:07:12.846220 17621 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:07:12.846225 17621 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:07:12.846231 17621 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:07:12.846237 17621 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:07:12.846243 17621 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:07:12.846248 17621 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:07:12.846254 17621 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:07:12.846259 17621 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:07:12.846266 17621 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:07:12.846271 17621 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:07:12.846277 17621 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:07:12.846283 17621 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:07:12.846289 17621 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:07:12.846295 17621 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:07:12.846302 17621 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:07:12.846307 17621 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:07:12.846312 17621 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:07:12.846318 17621 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:07:12.846323 17621 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:07:12.846329 17621 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:07:12.846335 17621 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:07:12.846341 17621 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:07:12.846348 17621 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:07:12.846354 17621 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:07:12.846359 17621 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:07:12.846364 17621 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:07:12.846370 17621 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:07:12.846375 17621 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:07:12.846381 17621 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:07:12.846387 17621 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:07:12.846395 17621 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:07:12.846402 17621 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:07:12.846408 17621 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:07:12.846415 17621 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:07:12.846426 17621 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:07:12.846432 17621 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:07:12.846438 17621 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:07:12.846443 17621 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:07:12.846449 17621 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:07:12.846456 17621 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:07:12.846460 17621 net.cpp:226] pre_relu needs backward computation.\nI0817 16:07:12.846465 17621 net.cpp:226] pre_scale needs backward computation.\nI0817 16:07:12.846470 17621 net.cpp:226] pre_bn needs backward computation.\nI0817 16:07:12.846477 17621 net.cpp:226] pre_conv needs backward computation.\nI0817 16:07:12.846483 17621 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:07:12.846489 17621 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:07:12.846494 17621 net.cpp:270] This network produces output accuracy\nI0817 16:07:12.846500 17621 net.cpp:270] This network produces output loss\nI0817 16:07:12.846830 17621 net.cpp:283] Network initialization done.\nI0817 16:07:12.847833 17621 solver.cpp:60] Solver scaffolding done.\nI0817 16:07:13.073093 17621 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:07:13.437458 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:13.437536 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:13.444247 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:13.674471 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:07:13.674589 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:07:13.709681 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:07:13.709794 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:07:14.162582 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:14.162662 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:14.170369 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:14.421990 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:07:14.422097 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:07:14.474215 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:07:14.474319 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:07:14.991603 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:14.991659 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:15.000500 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:15.267549 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:07:15.267715 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:07:15.339535 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:07:15.339692 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:07:15.423528 17621 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:07:15.903461 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:15.903513 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:15.913276 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:16.207994 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:07:16.208186 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:07:16.300206 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:07:16.300388 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:07:16.942559 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:16.942620 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:16.952826 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:17.269552 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:07:17.269779 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:07:17.382128 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:07:17.382338 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:07:18.089045 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:18.089098 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:18.100931 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:18.441802 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:07:18.442009 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:07:18.573627 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:07:18.573832 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:07:19.346989 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:07:19.347056 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:07:19.359390 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:07:19.399466 17635 blocking_queue.cpp:50] Waiting for data\nI0817 16:07:19.448093 17632 blocking_queue.cpp:50] Waiting for data\nI0817 16:07:19.789088 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:07:19.789324 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:07:19.940030 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:07:19.940260 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:07:20.110249 17621 parallel.cpp:425] Starting Optimization\nI0817 16:07:20.111537 17621 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:07:20.111552 17621 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:07:20.116816 17621 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:08:41.277812 17621 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:08:41.278143 17621 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:08:45.319625 17621 solver.cpp:228] Iteration 0, loss = 4.08289\nI0817 16:08:45.319679 17621 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0817 16:08:45.319710 17621 solver.cpp:244]     Train net output #1: loss = 4.08289 (* 1 = 4.08289 loss)\nI0817 16:08:45.405177 17621 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0817 16:11:03.169293 17621 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:12:24.030776 17621 solver.cpp:404]     Test net output #0: accuracy = 0.28412\nI0817 16:12:24.031097 17621 solver.cpp:404]     Test net output #1: loss = 1.90362 (* 1 = 1.90362 loss)\nI0817 16:12:25.335544 17621 solver.cpp:228] Iteration 100, loss = 1.51048\nI0817 16:12:25.335608 17621 solver.cpp:244]     Train net output #0: accuracy = 0.368\nI0817 16:12:25.335634 17621 solver.cpp:244]     Train net output #1: loss = 1.51048 (* 1 = 1.51048 loss)\nI0817 16:12:25.452798 17621 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0817 16:14:43.045418 17621 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:16:03.936043 17621 solver.cpp:404]     Test net output #0: accuracy = 0.38244\nI0817 16:16:03.936333 17621 solver.cpp:404]     Test net output #1: loss = 1.98755 (* 1 = 1.98755 loss)\nI0817 16:16:05.244102 17621 solver.cpp:228] Iteration 200, loss = 1.09551\nI0817 16:16:05.244165 17621 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0817 16:16:05.244192 17621 solver.cpp:244]     Train net output #1: loss = 1.09551 (* 1 = 1.09551 loss)\nI0817 16:16:05.353272 17621 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0817 16:18:23.042929 17621 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:19:43.892679 17621 solver.cpp:404]     Test net output #0: accuracy = 0.49744\nI0817 16:19:43.892982 17621 solver.cpp:404]     Test net output #1: loss = 1.52431 (* 1 = 1.52431 loss)\nI0817 16:19:45.196126 17621 solver.cpp:228] Iteration 300, loss = 0.843874\nI0817 16:19:45.196192 17621 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 16:19:45.196216 17621 solver.cpp:244]     Train net output #1: loss = 0.843874 (* 1 = 0.843874 loss)\nI0817 16:19:45.309044 17621 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0817 16:22:03.026474 17621 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:23:23.908020 17621 solver.cpp:404]     Test net output #0: accuracy = 0.66008\nI0817 16:23:23.908326 17621 solver.cpp:404]     Test net output #1: loss = 1.11399 (* 1 = 1.11399 loss)\nI0817 16:23:25.212710 17621 solver.cpp:228] Iteration 400, loss = 0.56318\nI0817 16:23:25.212769 17621 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 16:23:25.212795 17621 solver.cpp:244]     Train net output #1: loss = 0.56318 (* 1 = 0.56318 loss)\nI0817 16:23:25.331300 17621 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0817 16:25:42.964329 17621 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:27:03.851676 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6608\nI0817 16:27:03.851985 17621 solver.cpp:404]     Test net output #1: loss = 1.09047 (* 1 = 1.09047 loss)\nI0817 16:27:05.155753 17621 solver.cpp:228] Iteration 500, loss = 0.422668\nI0817 16:27:05.155809 17621 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 16:27:05.155834 17621 solver.cpp:244]     Train net output #1: loss = 0.422668 (* 1 = 0.422668 loss)\nI0817 16:27:05.269006 17621 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0817 16:29:22.905968 17621 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:30:43.772452 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71436\nI0817 16:30:43.772756 17621 solver.cpp:404]     Test net output #1: loss = 1.06004 (* 1 = 1.06004 loss)\nI0817 16:30:45.076004 17621 solver.cpp:228] Iteration 600, loss = 0.33385\nI0817 16:30:45.076059 17621 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 16:30:45.076086 17621 solver.cpp:244]     Train net output #1: loss = 0.33385 (* 1 = 0.33385 loss)\nI0817 16:30:45.190781 17621 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0817 16:33:02.732118 17621 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:34:23.514788 17621 solver.cpp:404]     Test net output #0: accuracy = 0.61296\nI0817 16:34:23.515012 17621 solver.cpp:404]     Test net output #1: loss = 1.60178 (* 1 = 1.60178 loss)\nI0817 16:34:24.818449 17621 solver.cpp:228] Iteration 700, loss = 0.334332\nI0817 16:34:24.818505 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 16:34:24.818531 17621 solver.cpp:244]     Train net output #1: loss = 0.334332 (* 1 = 0.334332 loss)\nI0817 16:34:24.934943 17621 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0817 16:36:42.596616 17621 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:38:03.396867 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7168\nI0817 16:38:03.397117 17621 solver.cpp:404]     Test net output #1: loss = 1.14694 (* 1 = 1.14694 loss)\nI0817 16:38:04.701815 17621 solver.cpp:228] Iteration 800, loss = 0.222196\nI0817 16:38:04.701870 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 16:38:04.701894 17621 solver.cpp:244]     Train net output #1: loss = 0.222196 (* 1 = 0.222196 loss)\nI0817 16:38:04.810348 17621 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0817 16:40:22.467887 17621 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:41:43.250540 17621 solver.cpp:404]     Test net output #0: accuracy = 0.75612\nI0817 16:41:43.250844 17621 solver.cpp:404]     Test net output #1: loss = 0.8674 (* 1 = 0.8674 loss)\nI0817 16:41:44.555496 17621 solver.cpp:228] Iteration 900, loss = 0.244758\nI0817 16:41:44.555553 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 16:41:44.555578 17621 solver.cpp:244]     Train net output #1: loss = 0.244758 (* 1 = 0.244758 loss)\nI0817 16:41:44.662947 17621 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0817 16:44:02.239779 17621 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:45:22.970518 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68972\nI0817 16:45:22.970736 17621 solver.cpp:404]     Test net output #1: loss = 1.64149 (* 1 = 1.64149 loss)\nI0817 16:45:24.273967 17621 solver.cpp:228] Iteration 1000, loss = 0.168296\nI0817 16:45:24.274027 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 16:45:24.274052 17621 solver.cpp:244]     Train net output #1: loss = 0.168296 (* 1 = 0.168296 loss)\nI0817 16:45:24.387204 17621 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0817 16:47:42.080766 17621 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:49:02.882542 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71436\nI0817 16:49:02.882756 17621 solver.cpp:404]     Test net output #1: loss = 1.24426 (* 1 = 1.24426 loss)\nI0817 16:49:04.181620 17621 solver.cpp:228] Iteration 1100, loss = 0.0667781\nI0817 16:49:04.181681 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 16:49:04.181709 17621 solver.cpp:244]     Train net output #1: loss = 0.0667782 (* 1 = 0.0667782 loss)\nI0817 16:49:04.298923 17621 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0817 16:51:22.028617 17621 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:52:42.822331 17621 solver.cpp:404]     Test net output #0: accuracy = 0.69624\nI0817 16:52:42.822588 17621 solver.cpp:404]     Test net output #1: loss = 1.36594 (* 1 = 1.36594 loss)\nI0817 16:52:44.120414 17621 solver.cpp:228] Iteration 1200, loss = 0.192135\nI0817 16:52:44.120476 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 16:52:44.120499 17621 solver.cpp:244]     Train net output #1: loss = 0.192135 (* 1 = 0.192135 loss)\nI0817 16:52:44.238368 17621 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0817 16:55:01.939692 17621 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 16:56:22.721182 17621 solver.cpp:404]     Test net output #0: accuracy = 0.74848\nI0817 16:56:22.721400 17621 solver.cpp:404]     Test net output #1: loss = 1.08796 (* 1 = 1.08796 loss)\nI0817 16:56:24.020543 17621 solver.cpp:228] Iteration 1300, loss = 0.19862\nI0817 16:56:24.020606 17621 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 16:56:24.020632 17621 solver.cpp:244]     Train net output #1: loss = 0.198621 (* 1 = 0.198621 loss)\nI0817 16:56:24.140334 17621 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0817 16:58:41.880481 17621 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:00:02.681569 17621 solver.cpp:404]     Test net output #0: accuracy = 0.712\nI0817 17:00:02.681789 17621 solver.cpp:404]     Test net output #1: loss = 1.32957 (* 1 = 1.32957 loss)\nI0817 17:00:03.981204 17621 solver.cpp:228] Iteration 1400, loss = 0.0800694\nI0817 17:00:03.981258 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:00:03.981276 17621 solver.cpp:244]     Train net output #1: loss = 0.0800695 (* 1 = 0.0800695 loss)\nI0817 17:00:04.096397 17621 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0817 17:02:21.774722 17621 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:03:42.560708 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71456\nI0817 17:03:42.560955 17621 solver.cpp:404]     Test net output #1: loss = 1.36745 (* 1 = 1.36745 loss)\nI0817 17:03:43.860424 17621 solver.cpp:228] Iteration 1500, loss = 0.189259\nI0817 17:03:43.860483 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:03:43.860501 17621 solver.cpp:244]     Train net output #1: loss = 0.189259 (* 1 = 0.189259 loss)\nI0817 17:03:43.977457 17621 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0817 17:06:01.576529 17621 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:07:22.369416 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7408\nI0817 17:07:22.369628 17621 solver.cpp:404]     Test net output #1: loss = 1.20345 (* 1 = 1.20345 loss)\nI0817 17:07:23.668212 17621 solver.cpp:228] Iteration 1600, loss = 0.0561247\nI0817 17:07:23.668272 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:07:23.668289 17621 solver.cpp:244]     Train net output #1: loss = 0.0561248 (* 1 = 0.0561248 loss)\nI0817 17:07:23.782563 17621 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0817 17:09:41.438863 17621 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:11:02.208202 17621 solver.cpp:404]     Test net output #0: accuracy = 0.59124\nI0817 17:11:02.208427 17621 solver.cpp:404]     Test net output #1: loss = 2.42323 (* 1 = 2.42323 loss)\nI0817 17:11:03.507170 17621 solver.cpp:228] Iteration 1700, loss = 0.185138\nI0817 17:11:03.507230 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:11:03.507247 17621 solver.cpp:244]     Train net output #1: loss = 0.185138 (* 1 = 0.185138 loss)\nI0817 17:11:03.624727 17621 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0817 17:13:21.277500 17621 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:14:42.083262 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7216\nI0817 17:14:42.083493 17621 solver.cpp:404]     Test net output #1: loss = 1.21886 (* 1 = 1.21886 loss)\nI0817 17:14:43.381497 17621 solver.cpp:228] Iteration 1800, loss = 0.0770961\nI0817 17:14:43.381561 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:14:43.381579 17621 solver.cpp:244]     Train net output #1: loss = 0.0770962 (* 1 = 0.0770962 loss)\nI0817 17:14:43.492430 17621 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0817 17:17:01.243186 17621 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:18:22.002431 17621 solver.cpp:404]     Test net output #0: accuracy = 0.74776\nI0817 17:18:22.002641 17621 solver.cpp:404]     Test net output #1: loss = 1.05166 (* 1 = 1.05166 loss)\nI0817 17:18:23.301746 17621 solver.cpp:228] Iteration 1900, loss = 0.152435\nI0817 17:18:23.301805 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:18:23.301822 17621 solver.cpp:244]     Train net output #1: loss = 0.152435 (* 1 = 0.152435 loss)\nI0817 17:18:23.411993 17621 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0817 17:20:41.076934 17621 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:22:01.875185 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68656\nI0817 17:22:01.875396 17621 solver.cpp:404]     Test net output #1: loss = 1.4652 (* 1 = 1.4652 loss)\nI0817 17:22:03.173106 17621 solver.cpp:228] Iteration 2000, loss = 0.156384\nI0817 17:22:03.173166 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:22:03.173183 17621 solver.cpp:244]     Train net output #1: loss = 0.156384 (* 1 = 0.156384 loss)\nI0817 17:22:03.288244 17621 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0817 17:24:20.964792 17621 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:25:41.729228 17621 solver.cpp:404]     Test net output #0: accuracy = 0.70276\nI0817 17:25:41.729444 17621 solver.cpp:404]     Test net output #1: loss = 1.34031 (* 1 = 1.34031 loss)\nI0817 17:25:43.028533 17621 solver.cpp:228] Iteration 2100, loss = 0.156751\nI0817 17:25:43.028594 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:25:43.028620 17621 solver.cpp:244]     Train net output #1: loss = 0.156751 (* 1 = 0.156751 loss)\nI0817 17:25:43.145021 17621 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0817 17:28:00.826916 17621 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:29:21.588892 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76412\nI0817 17:29:21.589134 17621 solver.cpp:404]     Test net output #1: loss = 0.977782 (* 1 = 0.977782 loss)\nI0817 17:29:22.887991 17621 solver.cpp:228] Iteration 2200, loss = 0.0799985\nI0817 17:29:22.888053 17621 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 17:29:22.888079 17621 solver.cpp:244]     Train net output #1: loss = 0.0799986 (* 1 = 0.0799986 loss)\nI0817 17:29:23.002348 17621 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0817 17:31:40.811949 17621 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:33:01.597734 17621 solver.cpp:404]     Test net output #0: accuracy = 0.70168\nI0817 17:33:01.597985 17621 solver.cpp:404]     Test net output #1: loss = 1.424 (* 1 = 1.424 loss)\nI0817 17:33:02.897435 17621 solver.cpp:228] Iteration 2300, loss = 0.114534\nI0817 17:33:02.897495 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:33:02.897521 17621 solver.cpp:244]     Train net output #1: loss = 0.114534 (* 1 = 0.114534 loss)\nI0817 17:33:03.012784 17621 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0817 17:35:20.741050 17621 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:36:41.492730 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73232\nI0817 17:36:41.492954 17621 solver.cpp:404]     Test net output #1: loss = 1.2368 (* 1 = 1.2368 loss)\nI0817 17:36:42.792151 17621 solver.cpp:228] Iteration 2400, loss = 0.125584\nI0817 17:36:42.792212 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:36:42.792238 17621 solver.cpp:244]     Train net output #1: loss = 0.125584 (* 1 = 0.125584 loss)\nI0817 17:36:42.911427 17621 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0817 17:39:01.152679 17621 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:40:21.912194 17621 solver.cpp:404]     Test net output #0: accuracy = 0.72372\nI0817 17:40:21.912443 17621 solver.cpp:404]     Test net output #1: loss = 1.15952 (* 1 = 1.15952 loss)\nI0817 17:40:23.217437 17621 solver.cpp:228] Iteration 2500, loss = 0.104322\nI0817 17:40:23.217497 17621 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:40:23.217521 17621 solver.cpp:244]     Train net output #1: loss = 0.104322 (* 1 = 0.104322 loss)\nI0817 17:40:23.335399 17621 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0817 17:42:41.758905 17621 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:44:02.528673 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76196\nI0817 17:44:02.528897 17621 solver.cpp:404]     Test net output #1: loss = 0.933548 (* 1 = 0.933548 loss)\nI0817 17:44:03.832259 17621 solver.cpp:228] Iteration 2600, loss = 0.066382\nI0817 17:44:03.832320 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 17:44:03.832345 17621 solver.cpp:244]     Train net output #1: loss = 0.0663821 (* 1 = 0.0663821 loss)\nI0817 17:44:03.951134 17621 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0817 17:46:22.408695 17621 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:47:43.211160 17621 solver.cpp:404]     Test net output #0: accuracy = 0.64428\nI0817 17:47:43.211398 17621 solver.cpp:404]     Test net output #1: loss = 1.90922 (* 1 = 1.90922 loss)\nI0817 17:47:44.516288 17621 solver.cpp:228] Iteration 2700, loss = 0.0730485\nI0817 17:47:44.516350 17621 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 17:47:44.516376 17621 solver.cpp:244]     Train net output #1: loss = 0.0730486 (* 1 = 0.0730486 loss)\nI0817 17:47:44.627749 17621 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0817 17:50:02.800652 17621 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:51:23.613265 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73192\nI0817 17:51:23.613478 17621 solver.cpp:404]     Test net output #1: loss = 1.1117 (* 1 = 1.1117 loss)\nI0817 17:51:24.917971 17621 solver.cpp:228] Iteration 2800, loss = 0.164667\nI0817 17:51:24.918035 17621 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:51:24.918059 17621 solver.cpp:244]     Train net output #1: loss = 0.164667 (* 1 = 0.164667 loss)\nI0817 17:51:25.035188 17621 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0817 17:53:43.050840 17621 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:55:03.756132 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71664\nI0817 17:55:03.756345 17621 solver.cpp:404]     Test net output #1: loss = 1.27117 (* 1 = 1.27117 loss)\nI0817 17:55:05.054669 17621 solver.cpp:228] Iteration 2900, loss = 0.17097\nI0817 17:55:05.054733 17621 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:55:05.054757 17621 solver.cpp:244]     Train net output #1: loss = 0.17097 (* 1 = 0.17097 loss)\nI0817 17:55:05.178086 17621 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0817 17:57:22.343389 17621 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 17:58:43.093364 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6566\nI0817 17:58:43.093653 17621 solver.cpp:404]     Test net output #1: loss = 1.82476 (* 1 = 1.82476 loss)\nI0817 17:58:44.392300 17621 solver.cpp:228] Iteration 3000, loss = 0.226575\nI0817 17:58:44.392364 17621 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:58:44.392390 17621 solver.cpp:244]     Train net output #1: loss = 0.226575 (* 1 = 0.226575 loss)\nI0817 17:58:44.501593 17621 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0817 18:01:01.587730 17621 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:02:22.015974 17621 solver.cpp:404]     Test net output #0: accuracy = 0.69208\nI0817 18:02:22.016249 17621 solver.cpp:404]     Test net output #1: loss = 1.93751 (* 1 = 1.93751 loss)\nI0817 18:02:23.314707 17621 solver.cpp:228] Iteration 3100, loss = 0.323878\nI0817 18:02:23.314771 17621 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:02:23.314796 17621 solver.cpp:244]     Train net output #1: loss = 0.323878 (* 1 = 0.323878 loss)\nI0817 18:02:23.432680 17621 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0817 18:04:40.610293 17621 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:06:01.093086 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68524\nI0817 18:06:01.093303 17621 solver.cpp:404]     Test net output #1: loss = 1.59931 (* 1 = 1.59931 loss)\nI0817 18:06:02.392349 17621 solver.cpp:228] Iteration 3200, loss = 0.134314\nI0817 18:06:02.392411 17621 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:06:02.392436 17621 solver.cpp:244]     Train net output #1: loss = 0.134315 (* 1 = 0.134315 loss)\nI0817 18:06:02.501215 17621 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0817 18:08:19.609381 17621 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:09:40.055022 17621 solver.cpp:404]     Test net output #0: accuracy = 0.65168\nI0817 18:09:40.055255 17621 solver.cpp:404]     Test net output #1: loss = 1.35919 (* 1 = 1.35919 loss)\nI0817 18:09:41.354775 17621 solver.cpp:228] Iteration 3300, loss = 0.157366\nI0817 18:09:41.354842 17621 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:09:41.354869 17621 solver.cpp:244]     Train net output #1: loss = 0.157366 (* 1 = 0.157366 loss)\nI0817 18:09:41.467928 17621 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0817 18:11:58.597028 17621 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:13:19.320343 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73376\nI0817 18:13:19.320621 17621 solver.cpp:404]     Test net output #1: loss = 1.24663 (* 1 = 1.24663 loss)\nI0817 18:13:20.618263 17621 solver.cpp:228] Iteration 3400, loss = 0.0533247\nI0817 18:13:20.618327 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:13:20.618353 17621 solver.cpp:244]     Train net output #1: loss = 0.0533248 (* 1 = 0.0533248 loss)\nI0817 18:13:20.728943 17621 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0817 18:15:37.824870 17621 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:16:58.322878 17621 solver.cpp:404]     Test net output #0: accuracy = 0.66756\nI0817 18:16:58.323096 17621 solver.cpp:404]     Test net output #1: loss = 1.79496 (* 1 = 1.79496 loss)\nI0817 18:16:59.622122 17621 solver.cpp:228] Iteration 3500, loss = 0.205123\nI0817 18:16:59.622189 17621 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:16:59.622215 17621 solver.cpp:244]     Train net output #1: loss = 0.205123 (* 1 = 0.205123 loss)\nI0817 18:16:59.734855 17621 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0817 18:19:16.808888 17621 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:20:37.181576 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7602\nI0817 18:20:37.181828 17621 solver.cpp:404]     Test net output #1: loss = 0.821321 (* 1 = 0.821321 loss)\nI0817 18:20:38.480427 17621 solver.cpp:228] Iteration 3600, loss = 0.220295\nI0817 18:20:38.480490 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:20:38.480516 17621 solver.cpp:244]     Train net output #1: loss = 0.220296 (* 1 = 0.220296 loss)\nI0817 18:20:38.586470 17621 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0817 18:22:55.664924 17621 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:24:16.442554 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68752\nI0817 18:24:16.442776 17621 solver.cpp:404]     Test net output #1: loss = 1.31623 (* 1 = 1.31623 loss)\nI0817 18:24:17.742094 17621 solver.cpp:228] Iteration 3700, loss = 0.115204\nI0817 18:24:17.742161 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:24:17.742187 17621 solver.cpp:244]     Train net output #1: loss = 0.115204 (* 1 = 0.115204 loss)\nI0817 18:24:17.855846 17621 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0817 18:26:34.960589 17621 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:27:55.490602 17621 solver.cpp:404]     Test net output #0: accuracy = 0.66384\nI0817 18:27:55.490792 17621 solver.cpp:404]     Test net output #1: loss = 1.39635 (* 1 = 1.39635 loss)\nI0817 18:27:56.790164 17621 solver.cpp:228] Iteration 3800, loss = 0.236246\nI0817 18:27:56.790226 17621 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:27:56.790251 17621 solver.cpp:244]     Train net output #1: loss = 0.236246 (* 1 = 0.236246 loss)\nI0817 18:27:56.901262 17621 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0817 18:30:14.004674 17621 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:31:34.765810 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73592\nI0817 18:31:34.766044 17621 solver.cpp:404]     Test net output #1: loss = 1.06707 (* 1 = 1.06707 loss)\nI0817 18:31:36.064373 17621 solver.cpp:228] Iteration 3900, loss = 0.132712\nI0817 18:31:36.064435 17621 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:31:36.064460 17621 solver.cpp:244]     Train net output #1: loss = 0.132712 (* 1 = 0.132712 loss)\nI0817 18:31:36.178953 17621 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0817 18:33:53.335312 17621 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:35:13.751518 17621 solver.cpp:404]     Test net output #0: accuracy = 0.63992\nI0817 18:35:13.751751 17621 solver.cpp:404]     Test net output #1: loss = 2.10274 (* 1 = 2.10274 loss)\nI0817 18:35:15.051192 17621 solver.cpp:228] Iteration 4000, loss = 0.176395\nI0817 18:35:15.051255 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:35:15.051280 17621 solver.cpp:244]     Train net output #1: loss = 0.176395 (* 1 = 0.176395 loss)\nI0817 18:35:15.160706 17621 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0817 18:37:32.329839 17621 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:38:52.736769 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6114\nI0817 18:38:52.736982 17621 solver.cpp:404]     Test net output #1: loss = 1.96732 (* 1 = 1.96732 loss)\nI0817 18:38:54.037050 17621 solver.cpp:228] Iteration 4100, loss = 0.191699\nI0817 18:38:54.037114 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:38:54.037144 17621 solver.cpp:244]     Train net output #1: loss = 0.191699 (* 1 = 0.191699 loss)\nI0817 18:38:54.143069 17621 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0817 18:41:11.395082 17621 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:42:32.142499 17621 solver.cpp:404]     Test net output #0: accuracy = 0.69704\nI0817 18:42:32.142726 17621 solver.cpp:404]     Test net output #1: loss = 1.26863 (* 1 = 1.26863 loss)\nI0817 18:42:33.441835 17621 solver.cpp:228] Iteration 4200, loss = 0.153761\nI0817 18:42:33.441900 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:42:33.441926 17621 solver.cpp:244]     Train net output #1: loss = 0.153761 (* 1 = 0.153761 loss)\nI0817 18:42:33.548252 17621 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0817 18:44:50.855762 17621 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:46:11.626406 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68548\nI0817 18:46:11.626622 17621 solver.cpp:404]     Test net output #1: loss = 1.28291 (* 1 = 1.28291 loss)\nI0817 18:46:12.925364 17621 solver.cpp:228] Iteration 4300, loss = 0.337255\nI0817 18:46:12.925429 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:46:12.925456 17621 solver.cpp:244]     Train net output #1: loss = 0.337255 (* 1 = 0.337255 loss)\nI0817 18:46:13.036341 17621 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0817 18:48:30.448503 17621 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:49:51.197688 17621 solver.cpp:404]     Test net output #0: accuracy = 0.75648\nI0817 18:49:51.197933 17621 solver.cpp:404]     Test net output #1: loss = 0.975969 (* 1 = 0.975969 loss)\nI0817 18:49:52.497426 17621 solver.cpp:228] Iteration 4400, loss = 0.25621\nI0817 18:49:52.497488 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:49:52.497514 17621 solver.cpp:244]     Train net output #1: loss = 0.25621 (* 1 = 0.25621 loss)\nI0817 18:49:52.615504 17621 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0817 18:52:10.636000 17621 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 18:53:31.386081 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68536\nI0817 18:53:31.386322 17621 solver.cpp:404]     Test net output #1: loss = 1.34913 (* 1 = 1.34913 loss)\nI0817 18:53:32.685606 17621 solver.cpp:228] Iteration 4500, loss = 0.161514\nI0817 18:53:32.685672 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:53:32.685696 17621 solver.cpp:244]     Train net output #1: loss = 0.161514 (* 1 = 0.161514 loss)\nI0817 18:53:32.802484 17621 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0817 18:55:50.839483 17621 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 18:57:11.622711 17621 solver.cpp:404]     Test net output #0: accuracy = 0.72356\nI0817 18:57:11.622949 17621 solver.cpp:404]     Test net output #1: loss = 1.1405 (* 1 = 1.1405 loss)\nI0817 18:57:12.922498 17621 solver.cpp:228] Iteration 4600, loss = 0.236963\nI0817 18:57:12.922562 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:57:12.922587 17621 solver.cpp:244]     Train net output #1: loss = 0.236963 (* 1 = 0.236963 loss)\nI0817 18:57:13.045501 17621 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0817 18:59:31.058048 17621 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:00:51.873808 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76908\nI0817 19:00:51.874038 17621 solver.cpp:404]     Test net output #1: loss = 0.82009 (* 1 = 0.82009 loss)\nI0817 19:00:53.173673 17621 solver.cpp:228] Iteration 4700, loss = 0.177328\nI0817 19:00:53.173737 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:00:53.173763 17621 solver.cpp:244]     Train net output #1: loss = 0.177328 (* 1 = 0.177328 loss)\nI0817 19:00:53.289455 17621 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0817 19:03:11.368353 17621 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:04:32.152006 17621 solver.cpp:404]     Test net output #0: accuracy = 0.65296\nI0817 19:04:32.152221 17621 solver.cpp:404]     Test net output #1: loss = 1.61946 (* 1 = 1.61946 loss)\nI0817 19:04:33.451972 17621 solver.cpp:228] Iteration 4800, loss = 0.281546\nI0817 19:04:33.452036 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:04:33.452060 17621 solver.cpp:244]     Train net output #1: loss = 0.281546 (* 1 = 0.281546 loss)\nI0817 19:04:33.569365 17621 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0817 19:06:51.640723 17621 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:08:12.438688 17621 solver.cpp:404]     Test net output #0: accuracy = 0.60852\nI0817 19:08:12.438933 17621 solver.cpp:404]     Test net output #1: loss = 2.1216 (* 1 = 2.1216 loss)\nI0817 19:08:13.738591 17621 solver.cpp:228] Iteration 4900, loss = 0.187368\nI0817 19:08:13.738656 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:08:13.738680 17621 solver.cpp:244]     Train net output #1: loss = 0.187368 (* 1 = 0.187368 loss)\nI0817 19:08:13.857744 17621 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0817 19:10:31.812059 17621 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:11:52.606209 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68744\nI0817 19:11:52.606447 17621 solver.cpp:404]     Test net output #1: loss = 1.33965 (* 1 = 1.33965 loss)\nI0817 19:11:53.906103 17621 solver.cpp:228] Iteration 5000, loss = 0.156899\nI0817 19:11:53.906170 17621 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:11:53.906198 17621 solver.cpp:244]     Train net output #1: loss = 0.156899 (* 1 = 0.156899 loss)\nI0817 19:11:54.023792 17621 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0817 19:14:12.062216 17621 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:15:32.872213 17621 solver.cpp:404]     Test net output #0: accuracy = 0.65044\nI0817 19:15:32.872449 17621 solver.cpp:404]     Test net output #1: loss = 1.4448 (* 1 = 1.4448 loss)\nI0817 19:15:34.172175 17621 solver.cpp:228] Iteration 5100, loss = 0.136385\nI0817 19:15:34.172240 17621 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:15:34.172263 17621 solver.cpp:244]     Train net output #1: loss = 0.136385 (* 1 = 0.136385 loss)\nI0817 19:15:34.286085 17621 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0817 19:17:52.291476 17621 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:19:12.934473 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71168\nI0817 19:19:12.934677 17621 solver.cpp:404]     Test net output #1: loss = 1.15633 (* 1 = 1.15633 loss)\nI0817 19:19:14.233973 17621 solver.cpp:228] Iteration 5200, loss = 0.23938\nI0817 19:19:14.234035 17621 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:19:14.234061 17621 solver.cpp:244]     Train net output #1: loss = 0.23938 (* 1 = 0.23938 loss)\nI0817 19:19:14.344902 17621 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0817 19:21:31.567016 17621 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:22:52.214624 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6424\nI0817 19:22:52.214897 17621 solver.cpp:404]     Test net output #1: loss = 1.27225 (* 1 = 1.27225 loss)\nI0817 19:22:53.514909 17621 solver.cpp:228] Iteration 5300, loss = 0.431339\nI0817 19:22:53.514972 17621 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 19:22:53.514998 17621 solver.cpp:244]     Train net output #1: loss = 0.431339 (* 1 = 0.431339 loss)\nI0817 19:22:53.616762 17621 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0817 19:25:10.807634 17621 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:26:31.560396 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7218\nI0817 19:26:31.560617 17621 solver.cpp:404]     Test net output #1: loss = 1.05767 (* 1 = 1.05767 loss)\nI0817 19:26:32.860098 17621 solver.cpp:228] Iteration 5400, loss = 0.135613\nI0817 19:26:32.860162 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:26:32.860188 17621 solver.cpp:244]     Train net output #1: loss = 0.135613 (* 1 = 0.135613 loss)\nI0817 19:26:32.970953 17621 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0817 19:28:50.170660 17621 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:30:10.930325 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6612\nI0817 19:30:10.930567 17621 solver.cpp:404]     Test net output #1: loss = 1.70668 (* 1 = 1.70668 loss)\nI0817 19:30:12.230312 17621 solver.cpp:228] Iteration 5500, loss = 0.170223\nI0817 19:30:12.230376 17621 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:30:12.230401 17621 solver.cpp:244]     Train net output #1: loss = 0.170223 (* 1 = 0.170223 loss)\nI0817 19:30:12.343194 17621 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0817 19:32:29.600774 17621 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:33:50.347605 17621 solver.cpp:404]     Test net output #0: accuracy = 0.40668\nI0817 19:33:50.347822 17621 solver.cpp:404]     Test net output #1: loss = 5.48075 (* 1 = 5.48075 loss)\nI0817 19:33:51.647326 17621 solver.cpp:228] Iteration 5600, loss = 0.205138\nI0817 19:33:51.647388 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:33:51.647414 17621 solver.cpp:244]     Train net output #1: loss = 0.205138 (* 1 = 0.205138 loss)\nI0817 19:33:51.757277 17621 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0817 19:36:08.931538 17621 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:37:29.689842 17621 solver.cpp:404]     Test net output #0: accuracy = 0.664\nI0817 19:37:29.690064 17621 solver.cpp:404]     Test net output #1: loss = 1.55278 (* 1 = 1.55278 loss)\nI0817 19:37:30.988051 17621 solver.cpp:228] Iteration 5700, loss = 0.215777\nI0817 19:37:30.988113 17621 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:37:30.988143 17621 solver.cpp:244]     Train net output #1: loss = 0.215777 (* 1 = 0.215777 loss)\nI0817 19:37:31.100518 17621 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0817 19:39:48.286794 17621 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:41:09.030709 17621 solver.cpp:404]     Test net output #0: accuracy = 0.69216\nI0817 19:41:09.030978 17621 solver.cpp:404]     Test net output #1: loss = 1.46079 (* 1 = 1.46079 loss)\nI0817 19:41:10.329557 17621 solver.cpp:228] Iteration 5800, loss = 0.236603\nI0817 19:41:10.329620 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:41:10.329645 17621 solver.cpp:244]     Train net output #1: loss = 0.236603 (* 1 = 0.236603 loss)\nI0817 19:41:10.440064 17621 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0817 19:43:27.623118 17621 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:44:47.922154 17621 solver.cpp:404]     Test net output #0: accuracy = 0.61996\nI0817 19:44:47.922358 17621 solver.cpp:404]     Test net output #1: loss = 1.56753 (* 1 = 1.56753 loss)\nI0817 19:44:49.220253 17621 solver.cpp:228] Iteration 5900, loss = 0.245119\nI0817 19:44:49.220312 17621 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:44:49.220337 17621 solver.cpp:244]     Train net output #1: loss = 0.245119 (* 1 = 0.245119 loss)\nI0817 19:44:49.333189 17621 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0817 19:47:06.585510 17621 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:48:27.009245 17621 solver.cpp:404]     Test net output #0: accuracy = 0.72692\nI0817 19:48:27.009440 17621 solver.cpp:404]     Test net output #1: loss = 1.00177 (* 1 = 1.00177 loss)\nI0817 19:48:28.308898 17621 solver.cpp:228] Iteration 6000, loss = 0.169817\nI0817 19:48:28.308964 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:48:28.308990 17621 solver.cpp:244]     Train net output #1: loss = 0.169817 (* 1 = 0.169817 loss)\nI0817 19:48:28.418855 17621 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0817 19:50:45.544832 17621 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:52:05.992349 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73744\nI0817 19:52:05.992599 17621 solver.cpp:404]     Test net output #1: loss = 0.910548 (* 1 = 0.910548 loss)\nI0817 19:52:07.291452 17621 solver.cpp:228] Iteration 6100, loss = 0.200391\nI0817 19:52:07.291512 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:52:07.291538 17621 solver.cpp:244]     Train net output #1: loss = 0.200391 (* 1 = 0.200391 loss)\nI0817 19:52:07.400321 17621 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0817 19:54:24.639560 17621 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 19:55:44.993851 17621 solver.cpp:404]     Test net output #0: accuracy = 0.74948\nI0817 19:55:44.994061 17621 solver.cpp:404]     Test net output #1: loss = 1.03776 (* 1 = 1.03776 loss)\nI0817 19:55:46.293298 17621 solver.cpp:228] Iteration 6200, loss = 0.236596\nI0817 19:55:46.293361 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:55:46.293387 17621 solver.cpp:244]     Train net output #1: loss = 0.236596 (* 1 = 0.236596 loss)\nI0817 19:55:46.404531 17621 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0817 19:58:03.668712 17621 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 19:59:24.129395 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7338\nI0817 19:59:24.129616 17621 solver.cpp:404]     Test net output #1: loss = 1.15475 (* 1 = 1.15475 loss)\nI0817 19:59:25.428827 17621 solver.cpp:228] Iteration 6300, loss = 0.0976608\nI0817 19:59:25.428890 17621 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:59:25.428915 17621 solver.cpp:244]     Train net output #1: loss = 0.0976608 (* 1 = 0.0976608 loss)\nI0817 19:59:25.536329 17621 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0817 20:01:42.751168 17621 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:03:03.468220 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68196\nI0817 20:03:03.468569 17621 solver.cpp:404]     Test net output #1: loss = 1.52848 (* 1 = 1.52848 loss)\nI0817 20:03:04.768437 17621 solver.cpp:228] Iteration 6400, loss = 0.21206\nI0817 20:03:04.768501 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 20:03:04.768527 17621 solver.cpp:244]     Train net output #1: loss = 0.21206 (* 1 = 0.21206 loss)\nI0817 20:03:04.883659 17621 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0817 20:05:22.076889 17621 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:06:42.670768 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6602\nI0817 20:06:42.671080 17621 solver.cpp:404]     Test net output #1: loss = 1.64997 (* 1 = 1.64997 loss)\nI0817 20:06:43.970623 17621 solver.cpp:228] Iteration 6500, loss = 0.161086\nI0817 20:06:43.970685 17621 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:06:43.970710 17621 solver.cpp:244]     Train net output #1: loss = 0.161086 (* 1 = 0.161086 loss)\nI0817 20:06:44.080917 17621 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0817 20:09:01.382520 17621 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:10:22.290395 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76672\nI0817 20:10:22.290674 17621 solver.cpp:404]     Test net output #1: loss = 0.916857 (* 1 = 0.916857 loss)\nI0817 20:10:23.588588 17621 solver.cpp:228] Iteration 6600, loss = 0.12693\nI0817 20:10:23.588651 17621 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:10:23.588676 17621 solver.cpp:244]     Train net output #1: loss = 0.12693 (* 1 = 0.12693 loss)\nI0817 20:10:23.694780 17621 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0817 20:12:40.856179 17621 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:14:01.787470 17621 solver.cpp:404]     Test net output #0: accuracy = 0.74652\nI0817 20:14:01.787760 17621 solver.cpp:404]     Test net output #1: loss = 1.1138 (* 1 = 1.1138 loss)\nI0817 20:14:03.087229 17621 solver.cpp:228] Iteration 6700, loss = 0.0878011\nI0817 20:14:03.087290 17621 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:14:03.087316 17621 solver.cpp:244]     Train net output #1: loss = 0.087801 (* 1 = 0.087801 loss)\nI0817 20:14:03.199496 17621 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0817 20:16:21.025430 17621 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:17:41.900955 17621 solver.cpp:404]     Test net output #0: accuracy = 0.766999\nI0817 20:17:41.901257 17621 solver.cpp:404]     Test net output #1: loss = 0.861983 (* 1 = 0.861983 loss)\nI0817 20:17:43.200702 17621 solver.cpp:228] Iteration 6800, loss = 0.0667251\nI0817 20:17:43.200759 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:17:43.200785 17621 solver.cpp:244]     Train net output #1: loss = 0.066725 (* 1 = 0.066725 loss)\nI0817 20:17:43.317850 17621 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0817 20:20:01.437438 17621 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:21:22.299190 17621 solver.cpp:404]     Test net output #0: accuracy = 0.57928\nI0817 20:21:22.299489 17621 solver.cpp:404]     Test net output #1: loss = 2.3899 (* 1 = 2.3899 loss)\nI0817 20:21:23.607038 17621 solver.cpp:228] Iteration 6900, loss = 0.177508\nI0817 20:21:23.607095 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:21:23.607120 17621 solver.cpp:244]     Train net output #1: loss = 0.177508 (* 1 = 0.177508 loss)\nI0817 20:21:23.717978 17621 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0817 20:23:41.720445 17621 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:25:02.646049 17621 solver.cpp:404]     Test net output #0: accuracy = 0.80116\nI0817 20:25:02.646370 17621 solver.cpp:404]     Test net output #1: loss = 0.769186 (* 1 = 0.769186 loss)\nI0817 20:25:03.946162 17621 solver.cpp:228] Iteration 7000, loss = 0.106902\nI0817 20:25:03.946221 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:25:03.946247 17621 solver.cpp:244]     Train net output #1: loss = 0.106902 (* 1 = 0.106902 loss)\nI0817 20:25:04.063331 17621 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0817 20:27:22.127311 17621 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:28:42.987529 17621 solver.cpp:404]     Test net output #0: accuracy = 0.75764\nI0817 20:28:42.987824 17621 solver.cpp:404]     Test net output #1: loss = 1.11441 (* 1 = 1.11441 loss)\nI0817 20:28:44.287456 17621 solver.cpp:228] Iteration 7100, loss = 0.107819\nI0817 20:28:44.287510 17621 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:28:44.287536 17621 solver.cpp:244]     Train net output #1: loss = 0.107819 (* 1 = 0.107819 loss)\nI0817 20:28:44.407452 17621 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0817 20:31:02.475733 17621 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:32:23.333318 17621 solver.cpp:404]     Test net output #0: accuracy = 0.72536\nI0817 20:32:23.333627 17621 solver.cpp:404]     Test net output #1: loss = 1.27013 (* 1 = 1.27013 loss)\nI0817 20:32:24.633430 17621 solver.cpp:228] Iteration 7200, loss = 0.0604574\nI0817 20:32:24.633486 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:32:24.633512 17621 solver.cpp:244]     Train net output #1: loss = 0.0604574 (* 1 = 0.0604574 loss)\nI0817 20:32:24.749290 17621 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0817 20:34:42.833230 17621 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:36:03.670094 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76608\nI0817 20:36:03.670449 17621 solver.cpp:404]     Test net output #1: loss = 1.05686 (* 1 = 1.05686 loss)\nI0817 20:36:04.969533 17621 solver.cpp:228] Iteration 7300, loss = 0.0902274\nI0817 20:36:04.969588 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:36:04.969614 17621 solver.cpp:244]     Train net output #1: loss = 0.0902273 (* 1 = 0.0902273 loss)\nI0817 20:36:05.090911 17621 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0817 20:38:23.222739 17621 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:39:44.056617 17621 solver.cpp:404]     Test net output #0: accuracy = 0.79016\nI0817 20:39:44.057044 17621 solver.cpp:404]     Test net output #1: loss = 0.868164 (* 1 = 0.868164 loss)\nI0817 20:39:45.355109 17621 solver.cpp:228] Iteration 7400, loss = 0.0591542\nI0817 20:39:45.355166 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:39:45.355191 17621 solver.cpp:244]     Train net output #1: loss = 0.0591542 (* 1 = 0.0591542 loss)\nI0817 20:39:45.476495 17621 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0817 20:42:03.571687 17621 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:43:24.384759 17621 solver.cpp:404]     Test net output #0: accuracy = 0.79164\nI0817 20:43:24.385063 17621 solver.cpp:404]     Test net output #1: loss = 0.899965 (* 1 = 0.899965 loss)\nI0817 20:43:25.683590 17621 solver.cpp:228] Iteration 7500, loss = 0.0867834\nI0817 20:43:25.683646 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:43:25.683663 17621 solver.cpp:244]     Train net output #1: loss = 0.0867833 (* 1 = 0.0867833 loss)\nI0817 20:43:25.803702 17621 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0817 20:45:43.750954 17621 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:47:04.536078 17621 solver.cpp:404]     Test net output #0: accuracy = 0.72792\nI0817 20:47:04.536324 17621 solver.cpp:404]     Test net output #1: loss = 1.18544 (* 1 = 1.18544 loss)\nI0817 20:47:05.836091 17621 solver.cpp:228] Iteration 7600, loss = 0.0899452\nI0817 20:47:05.836149 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:47:05.836171 17621 solver.cpp:244]     Train net output #1: loss = 0.0899452 (* 1 = 0.0899452 loss)\nI0817 20:47:05.935788 17621 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0817 20:49:24.011391 17621 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:50:44.757074 17621 solver.cpp:404]     Test net output #0: accuracy = 0.77608\nI0817 20:50:44.757334 17621 solver.cpp:404]     Test net output #1: loss = 0.953354 (* 1 = 0.953354 loss)\nI0817 20:50:46.055856 17621 solver.cpp:228] Iteration 7700, loss = 0.029522\nI0817 20:50:46.055912 17621 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:50:46.055929 17621 solver.cpp:244]     Train net output #1: loss = 0.029522 (* 1 = 0.029522 loss)\nI0817 20:50:46.161244 17621 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0817 20:53:04.290794 17621 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 20:54:25.041333 17621 solver.cpp:404]     Test net output #0: accuracy = 0.69324\nI0817 20:54:25.041544 17621 solver.cpp:404]     Test net output #1: loss = 1.72792 (* 1 = 1.72792 loss)\nI0817 20:54:26.340900 17621 solver.cpp:228] Iteration 7800, loss = 0.0648893\nI0817 20:54:26.340955 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:54:26.340972 17621 solver.cpp:244]     Train net output #1: loss = 0.0648892 (* 1 = 0.0648892 loss)\nI0817 20:54:26.442144 17621 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0817 20:56:44.426533 17621 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 20:58:05.070344 17621 solver.cpp:404]     Test net output #0: accuracy = 0.79192\nI0817 20:58:05.070567 17621 solver.cpp:404]     Test net output #1: loss = 0.869118 (* 1 = 0.869118 loss)\nI0817 20:58:06.369586 17621 solver.cpp:228] Iteration 7900, loss = 0.0341727\nI0817 20:58:06.369637 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:58:06.369654 17621 solver.cpp:244]     Train net output #1: loss = 0.0341727 (* 1 = 0.0341727 loss)\nI0817 20:58:06.472266 17621 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0817 21:00:24.557824 17621 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:01:44.977146 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7644\nI0817 21:01:44.977396 17621 solver.cpp:404]     Test net output #1: loss = 1.03605 (* 1 = 1.03605 loss)\nI0817 21:01:46.275892 17621 solver.cpp:228] Iteration 8000, loss = 0.0555832\nI0817 21:01:46.275944 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:01:46.275962 17621 solver.cpp:244]     Train net output #1: loss = 0.0555832 (* 1 = 0.0555832 loss)\nI0817 21:01:46.380444 17621 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0817 21:04:04.478652 17621 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:05:25.104182 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76456\nI0817 21:05:25.104396 17621 solver.cpp:404]     Test net output #1: loss = 1.10103 (* 1 = 1.10103 loss)\nI0817 21:05:26.402529 17621 solver.cpp:228] Iteration 8100, loss = 0.0674487\nI0817 21:05:26.402590 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:05:26.402608 17621 solver.cpp:244]     Train net output #1: loss = 0.0674487 (* 1 = 0.0674487 loss)\nI0817 21:05:26.507532 17621 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0817 21:07:44.513929 17621 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:09:05.075677 17621 solver.cpp:404]     Test net output #0: accuracy = 0.78244\nI0817 21:09:05.075925 17621 solver.cpp:404]     Test net output #1: loss = 0.940302 (* 1 = 0.940302 loss)\nI0817 21:09:06.375176 17621 solver.cpp:228] Iteration 8200, loss = 0.0351043\nI0817 21:09:06.375236 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:09:06.375255 17621 solver.cpp:244]     Train net output #1: loss = 0.0351044 (* 1 = 0.0351044 loss)\nI0817 21:09:06.479151 17621 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0817 21:11:24.554937 17621 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:12:45.225486 17621 solver.cpp:404]     Test net output #0: accuracy = 0.84868\nI0817 21:12:45.225744 17621 solver.cpp:404]     Test net output #1: loss = 0.634812 (* 1 = 0.634812 loss)\nI0817 21:12:46.524170 17621 solver.cpp:228] Iteration 8300, loss = 0.0493211\nI0817 21:12:46.524230 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:12:46.524248 17621 solver.cpp:244]     Train net output #1: loss = 0.0493211 (* 1 = 0.0493211 loss)\nI0817 21:12:46.631852 17621 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0817 21:15:04.727082 17621 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:16:25.360116 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76616\nI0817 21:16:25.360354 17621 solver.cpp:404]     Test net output #1: loss = 1.17125 (* 1 = 1.17125 loss)\nI0817 21:16:26.660176 17621 solver.cpp:228] Iteration 8400, loss = 0.0208385\nI0817 21:16:26.660238 17621 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:16:26.660257 17621 solver.cpp:244]     Train net output #1: loss = 0.0208385 (* 1 = 0.0208385 loss)\nI0817 21:16:26.764842 17621 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0817 21:18:44.758788 17621 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:20:05.505005 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7844\nI0817 21:20:05.505239 17621 solver.cpp:404]     Test net output #1: loss = 1.0424 (* 1 = 1.0424 loss)\nI0817 21:20:06.803957 17621 solver.cpp:228] Iteration 8500, loss = 0.0547762\nI0817 21:20:06.804013 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:20:06.804031 17621 solver.cpp:244]     Train net output #1: loss = 0.0547762 (* 1 = 0.0547762 loss)\nI0817 21:20:06.905433 17621 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0817 21:22:24.989346 17621 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:23:45.746940 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76552\nI0817 21:23:45.747184 17621 solver.cpp:404]     Test net output #1: loss = 1.26242 (* 1 = 1.26242 loss)\nI0817 21:23:47.046283 17621 solver.cpp:228] Iteration 8600, loss = 0.0464737\nI0817 21:23:47.046341 17621 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:23:47.046358 17621 solver.cpp:244]     Train net output #1: loss = 0.0464738 (* 1 = 0.0464738 loss)\nI0817 21:23:47.155182 17621 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0817 21:26:05.214870 17621 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:27:25.974005 17621 solver.cpp:404]     Test net output #0: accuracy = 0.8278\nI0817 21:27:25.974241 17621 solver.cpp:404]     Test net output #1: loss = 0.71811 (* 1 = 0.71811 loss)\nI0817 21:27:27.272971 17621 solver.cpp:228] Iteration 8700, loss = 0.0265528\nI0817 21:27:27.273031 17621 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:27:27.273049 17621 solver.cpp:244]     Train net output #1: loss = 0.0265528 (* 1 = 0.0265528 loss)\nI0817 21:27:27.381255 17621 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0817 21:29:45.480121 17621 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:31:06.240476 17621 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0817 21:31:06.240727 17621 solver.cpp:404]     Test net output #1: loss = 0.943887 (* 1 = 0.943887 loss)\nI0817 21:31:07.539536 17621 solver.cpp:228] Iteration 8800, loss = 0.0487053\nI0817 21:31:07.539597 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:31:07.539614 17621 solver.cpp:244]     Train net output #1: loss = 0.0487053 (* 1 = 0.0487053 loss)\nI0817 21:31:07.647375 17621 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0817 21:33:25.565424 17621 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:34:46.134618 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7906\nI0817 21:34:46.134851 17621 solver.cpp:404]     Test net output #1: loss = 1.08697 (* 1 = 1.08697 loss)\nI0817 21:34:47.434801 17621 solver.cpp:228] Iteration 8900, loss = 0.0311413\nI0817 21:34:47.434860 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:34:47.434878 17621 solver.cpp:244]     Train net output #1: loss = 0.0311414 (* 1 = 0.0311414 loss)\nI0817 21:34:47.543104 17621 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0817 21:37:05.583267 17621 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:38:26.220520 17621 solver.cpp:404]     Test net output #0: accuracy = 0.824\nI0817 21:38:26.220749 17621 solver.cpp:404]     Test net output #1: loss = 0.797823 (* 1 = 0.797823 loss)\nI0817 21:38:27.519230 17621 solver.cpp:228] Iteration 9000, loss = 0.00582197\nI0817 21:38:27.519286 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:38:27.519304 17621 solver.cpp:244]     Train net output #1: loss = 0.005822 (* 1 = 0.005822 loss)\nI0817 21:38:27.627374 17621 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0817 21:40:45.755749 17621 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:42:06.264261 17621 solver.cpp:404]     Test net output #0: accuracy = 0.81288\nI0817 21:42:06.264549 17621 solver.cpp:404]     Test net output #1: loss = 0.974768 (* 1 = 0.974768 loss)\nI0817 21:42:07.562314 17621 solver.cpp:228] Iteration 9100, loss = 0.00395323\nI0817 21:42:07.562371 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:42:07.562388 17621 solver.cpp:244]     Train net output #1: loss = 0.00395325 (* 1 = 0.00395325 loss)\nI0817 21:42:07.673528 17621 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0817 21:44:25.755214 17621 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:45:46.288851 17621 solver.cpp:404]     Test net output #0: accuracy = 0.84308\nI0817 21:45:46.289125 17621 solver.cpp:404]     Test net output #1: loss = 0.729022 (* 1 = 0.729022 loss)\nI0817 21:45:47.588649 17621 solver.cpp:228] Iteration 9200, loss = 0.000618838\nI0817 21:45:47.588711 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:45:47.588728 17621 solver.cpp:244]     Train net output #1: loss = 0.00061887 (* 1 = 0.00061887 loss)\nI0817 21:45:47.693295 17621 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0817 21:48:05.822926 17621 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:49:26.597290 17621 solver.cpp:404]     Test net output #0: accuracy = 0.87196\nI0817 21:49:26.597576 17621 solver.cpp:404]     Test net output #1: loss = 0.537117 (* 1 = 0.537117 loss)\nI0817 21:49:27.896582 17621 solver.cpp:228] Iteration 9300, loss = 0.000169646\nI0817 21:49:27.896644 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:49:27.896662 17621 solver.cpp:244]     Train net output #1: loss = 0.000169678 (* 1 = 0.000169678 loss)\nI0817 21:49:28.008379 17621 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0817 21:51:46.071557 17621 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 21:53:06.835503 17621 solver.cpp:404]     Test net output #0: accuracy = 0.87208\nI0817 21:53:06.835804 17621 solver.cpp:404]     Test net output #1: loss = 0.512594 (* 1 = 0.512594 loss)\nI0817 21:53:08.135383 17621 solver.cpp:228] Iteration 9400, loss = 0.00017594\nI0817 21:53:08.135447 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:53:08.135473 17621 solver.cpp:244]     Train net output #1: loss = 0.000175971 (* 1 = 0.000175971 loss)\nI0817 21:53:08.237504 17621 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0817 21:55:26.322331 17621 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 21:56:47.110509 17621 solver.cpp:404]     Test net output #0: accuracy = 0.87492\nI0817 21:56:47.110790 17621 solver.cpp:404]     Test net output #1: loss = 0.48507 (* 1 = 0.48507 loss)\nI0817 21:56:48.410267 17621 solver.cpp:228] Iteration 9500, loss = 0.000251764\nI0817 21:56:48.410331 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:56:48.410357 17621 solver.cpp:244]     Train net output #1: loss = 0.000251795 (* 1 = 0.000251795 loss)\nI0817 21:56:48.521440 17621 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0817 21:59:06.640683 17621 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:00:27.407240 17621 solver.cpp:404]     Test net output #0: accuracy = 0.8758\nI0817 22:00:27.407500 17621 solver.cpp:404]     Test net output #1: loss = 0.479183 (* 1 = 0.479183 loss)\nI0817 22:00:28.706060 17621 solver.cpp:228] Iteration 9600, loss = 0.000259976\nI0817 22:00:28.706121 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:00:28.706140 17621 solver.cpp:244]     Train net output #1: loss = 0.000260008 (* 1 = 0.000260008 loss)\nI0817 22:00:28.815070 17621 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0817 22:02:46.928756 17621 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:04:07.693250 17621 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0817 22:04:07.693517 17621 solver.cpp:404]     Test net output #1: loss = 0.462415 (* 1 = 0.462415 loss)\nI0817 22:04:08.992632 17621 solver.cpp:228] Iteration 9700, loss = 0.000352992\nI0817 22:04:08.992694 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:04:08.992712 17621 solver.cpp:244]     Train net output #1: loss = 0.000353023 (* 1 = 0.000353023 loss)\nI0817 22:04:09.099704 17621 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0817 22:06:27.239516 17621 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:07:47.991775 17621 solver.cpp:404]     Test net output #0: accuracy = 0.8788\nI0817 22:07:47.992050 17621 solver.cpp:404]     Test net output #1: loss = 0.464012 (* 1 = 0.464012 loss)\nI0817 22:07:49.290359 17621 solver.cpp:228] Iteration 9800, loss = 0.000451388\nI0817 22:07:49.290419 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:07:49.290437 17621 solver.cpp:244]     Train net output #1: loss = 0.000451419 (* 1 = 0.000451419 loss)\nI0817 22:07:49.398821 17621 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0817 22:10:07.503428 17621 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:11:28.266757 17621 solver.cpp:404]     Test net output #0: accuracy = 0.87944\nI0817 22:11:28.267025 17621 solver.cpp:404]     Test net output #1: loss = 0.452552 (* 1 = 0.452552 loss)\nI0817 22:11:29.565629 17621 solver.cpp:228] Iteration 9900, loss = 0.000393066\nI0817 22:11:29.565690 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:11:29.565706 17621 solver.cpp:244]     Train net output #1: loss = 0.000393098 (* 1 = 0.000393098 loss)\nI0817 22:11:29.667955 17621 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0817 22:13:47.697530 17621 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kTr20kTab1_iter_10000.caffemodel\nI0817 22:13:47.919103 17621 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kTr20kTab1_iter_10000.solverstate\nI0817 22:13:48.359396 17621 solver.cpp:317] Iteration 10000, loss = 0.000465167\nI0817 22:13:48.359452 17621 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:15:09.124236 17621 solver.cpp:404]     Test net output #0: accuracy = 0.87924\nI0817 22:15:09.124533 17621 solver.cpp:404]     Test net output #1: loss = 0.45792 (* 1 = 0.45792 loss)\nI0817 22:15:09.124546 17621 solver.cpp:322] Optimization Done.\nI0817 22:15:14.435595 17621 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kTr30kTab1",
    "content": "I0817 16:06:11.855854 17619 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:06:11.858067 17619 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:06:11.859459 17619 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:06:11.860671 17619 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:06:11.861891 17619 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:06:11.863118 17619 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:06:11.864343 17619 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:06:11.865571 17619 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:06:11.866801 17619 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:06:12.283774 17619 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kTr30kTab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0817 16:06:12.287794 17619 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:06:12.302937 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:12.303014 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:12.304075 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:06:12.304131 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:06:12.304152 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:06:12.304177 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:06:12.304198 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:06:12.304215 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:06:12.304234 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:06:12.304251 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:06:12.304271 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:06:12.304288 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:06:12.304307 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:06:12.304322 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:06:12.304342 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:06:12.304359 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:06:12.304379 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:06:12.304397 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:06:12.304415 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:06:12.304433 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:06:12.304451 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:06:12.304468 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:06:12.304503 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:06:12.304522 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:06:12.304548 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:06:12.304566 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:06:12.304584 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:06:12.304596 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:06:12.304615 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:06:12.304630 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:06:12.304647 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:06:12.304666 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:06:12.304685 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:06:12.304713 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:06:12.304733 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:06:12.304749 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:06:12.304767 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:06:12.304785 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:06:12.304802 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:06:12.304821 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:06:12.304838 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:06:12.304855 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:06:12.304879 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:06:12.304896 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:06:12.304913 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:06:12.304930 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:06:12.304950 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:06:12.304967 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:06:12.304986 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:06:12.305001 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:06:12.305019 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:06:12.305035 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:06:12.305052 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:06:12.305079 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:06:12.305099 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:06:12.305119 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:06:12.305137 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:06:12.305152 17619 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:06:12.306900 17619 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train30k_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b\nI0817 16:06:12.308953 17619 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:06:12.310138 17619 net.cpp:100] Creating Layer dataLayer\nI0817 16:06:12.310210 17619 net.cpp:408] dataLayer -> data_top\nI0817 16:06:12.310410 17619 net.cpp:408] dataLayer -> label\nI0817 16:06:12.310525 17619 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:06:12.443207 17624 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train30k_lmdb\nI0817 16:06:12.443711 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:12.450789 17619 net.cpp:150] Setting up dataLayer\nI0817 16:06:12.450850 17619 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:06:12.450863 17619 net.cpp:157] Top shape: 125 (125)\nI0817 16:06:12.450870 17619 net.cpp:165] Memory required for data: 1536500\nI0817 16:06:12.450883 17619 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:06:12.450897 17619 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:06:12.450906 17619 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:06:12.450922 17619 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:06:12.450937 17619 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:06:12.451020 17619 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:06:12.451033 17619 net.cpp:157] Top shape: 125 (125)\nI0817 16:06:12.451040 17619 net.cpp:157] Top shape: 125 (125)\nI0817 16:06:12.451045 17619 net.cpp:165] Memory required for data: 1537500\nI0817 16:06:12.451051 17619 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:06:12.451112 17619 net.cpp:100] Creating Layer pre_conv\nI0817 16:06:12.451123 17619 net.cpp:434] pre_conv <- data_top\nI0817 16:06:12.451136 17619 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:06:12.452854 17619 net.cpp:150] Setting up pre_conv\nI0817 16:06:12.452873 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.452879 17619 net.cpp:165] Memory required for data: 9729500\nI0817 16:06:12.452947 17619 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:06:12.453013 17619 net.cpp:100] Creating Layer pre_bn\nI0817 16:06:12.453025 17619 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:06:12.453034 17619 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:06:12.453506 17625 blocking_queue.cpp:50] Waiting for data\nI0817 16:06:12.453601 17619 net.cpp:150] Setting up pre_bn\nI0817 16:06:12.453621 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.453627 17619 net.cpp:165] Memory required for data: 17921500\nI0817 16:06:12.453644 17619 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:06:12.453694 17619 net.cpp:100] Creating Layer pre_scale\nI0817 16:06:12.453716 17619 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:06:12.453725 17619 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:06:12.453888 17619 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:06:12.454157 17619 net.cpp:150] Setting up pre_scale\nI0817 16:06:12.454174 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.454179 17619 net.cpp:165] Memory required for data: 26113500\nI0817 16:06:12.454188 17619 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:06:12.454229 17619 net.cpp:100] Creating Layer pre_relu\nI0817 16:06:12.454238 17619 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:06:12.454249 17619 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:06:12.454260 17619 net.cpp:150] Setting up pre_relu\nI0817 16:06:12.454267 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.454272 17619 net.cpp:165] Memory required for data: 34305500\nI0817 16:06:12.454277 17619 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:06:12.454285 17619 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:06:12.454290 17619 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:06:12.454299 17619 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:06:12.454309 17619 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:06:12.454355 17619 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:06:12.454365 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.454372 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.454377 17619 net.cpp:165] Memory required for data: 50689500\nI0817 16:06:12.454382 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:06:12.454397 17619 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:06:12.454403 17619 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:06:12.454412 17619 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:06:12.454744 17619 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:06:12.454761 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.454766 17619 net.cpp:165] Memory required for data: 58881500\nI0817 16:06:12.454778 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:06:12.454792 17619 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:06:12.454798 17619 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:06:12.454807 17619 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:06:12.455034 17619 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:06:12.455047 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.455052 17619 net.cpp:165] Memory required for data: 67073500\nI0817 16:06:12.455063 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:06:12.455078 17619 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:06:12.455083 17619 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:06:12.455091 17619 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.455142 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:06:12.455286 17619 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:06:12.455298 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.455303 17619 net.cpp:165] Memory required for data: 75265500\nI0817 16:06:12.455312 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:06:12.455328 17619 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:06:12.455334 17619 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:06:12.455344 17619 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.455354 17619 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:06:12.455361 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.455366 17619 net.cpp:165] Memory required for data: 83457500\nI0817 16:06:12.455370 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:06:12.455385 17619 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:06:12.455390 17619 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:06:12.455399 17619 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:06:12.455708 17619 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:06:12.455724 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.455729 17619 net.cpp:165] Memory required for data: 91649500\nI0817 16:06:12.455737 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:06:12.455749 17619 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:06:12.455755 17619 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:06:12.455763 17619 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:06:12.455989 17619 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:06:12.456003 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.456008 17619 net.cpp:165] Memory required for data: 99841500\nI0817 16:06:12.456022 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:06:12.456035 17619 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:06:12.456040 17619 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:06:12.456048 17619 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:06:12.456104 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:06:12.456243 17619 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:06:12.456255 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.456260 17619 net.cpp:165] Memory required for data: 108033500\nI0817 16:06:12.456269 17619 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:06:12.456318 17619 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:06:12.456329 17619 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:06:12.456336 17619 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:06:12.456348 17619 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:06:12.456424 17619 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:06:12.456439 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.456444 17619 net.cpp:165] Memory required for data: 116225500\nI0817 16:06:12.456449 17619 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:06:12.456459 17619 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:06:12.456465 17619 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:06:12.456475 17619 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:06:12.456485 17619 net.cpp:150] Setting up L1_b1_relu\nI0817 16:06:12.456491 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.456496 17619 net.cpp:165] Memory required for data: 124417500\nI0817 16:06:12.456501 17619 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:06:12.456509 17619 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:06:12.456516 17619 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:06:12.456522 17619 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:06:12.456532 17619 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:06:12.456575 17619 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:06:12.456588 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.456594 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.456605 17619 net.cpp:165] Memory required for data: 140801500\nI0817 16:06:12.456611 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:06:12.456624 17619 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:06:12.456629 17619 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:06:12.456640 17619 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:06:12.456959 17619 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:06:12.456974 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.456979 17619 net.cpp:165] Memory required for data: 148993500\nI0817 16:06:12.456987 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:06:12.456996 17619 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:06:12.457002 17619 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:06:12.457015 17619 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:06:12.457262 17619 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:06:12.457278 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.457283 17619 net.cpp:165] Memory required for data: 157185500\nI0817 16:06:12.457294 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:06:12.457304 17619 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:06:12.457309 17619 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:06:12.457316 17619 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.457366 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:06:12.457505 17619 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:06:12.457518 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.457523 17619 net.cpp:165] Memory required for data: 165377500\nI0817 16:06:12.457531 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:06:12.457542 17619 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:06:12.457548 17619 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:06:12.457556 17619 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.457564 17619 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:06:12.457574 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.457579 17619 net.cpp:165] Memory required for data: 173569500\nI0817 16:06:12.457584 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:06:12.457595 17619 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:06:12.457600 17619 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:06:12.457612 17619 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:06:12.457921 17619 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:06:12.457936 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.457940 17619 net.cpp:165] Memory required for data: 181761500\nI0817 16:06:12.457948 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:06:12.457957 17619 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:06:12.457963 17619 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:06:12.457976 17619 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:06:12.458216 17619 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:06:12.458230 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.458235 17619 net.cpp:165] Memory required for data: 189953500\nI0817 16:06:12.458252 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:06:12.458262 17619 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:06:12.458268 17619 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:06:12.458276 17619 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:06:12.458330 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:06:12.458492 17619 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:06:12.458505 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.458510 17619 net.cpp:165] Memory required for data: 198145500\nI0817 16:06:12.458519 17619 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:06:12.458537 17619 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:06:12.458544 17619 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:06:12.458551 17619 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:06:12.458559 17619 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:06:12.458592 17619 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:06:12.458602 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.458607 17619 net.cpp:165] Memory required for data: 206337500\nI0817 16:06:12.458612 17619 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:06:12.458621 17619 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:06:12.458626 17619 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:06:12.458632 17619 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:06:12.458642 17619 net.cpp:150] Setting up L1_b2_relu\nI0817 16:06:12.458647 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.458652 17619 net.cpp:165] Memory required for data: 214529500\nI0817 16:06:12.458657 17619 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:06:12.458664 17619 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:06:12.458669 17619 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:06:12.458679 17619 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:06:12.458688 17619 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:06:12.458735 17619 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:06:12.458747 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.458755 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.458760 17619 net.cpp:165] Memory required for data: 230913500\nI0817 16:06:12.458765 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:06:12.458780 17619 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:06:12.458786 17619 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:06:12.458794 17619 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:06:12.459100 17619 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:06:12.459113 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.459118 17619 net.cpp:165] Memory required for data: 239105500\nI0817 16:06:12.459127 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:06:12.459138 17619 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:06:12.459144 17619 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:06:12.459156 17619 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:06:12.459386 17619 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:06:12.459399 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.459404 17619 net.cpp:165] Memory required for data: 247297500\nI0817 16:06:12.459414 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:06:12.459424 17619 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:06:12.459429 17619 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:06:12.459436 17619 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.459493 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:06:12.459632 17619 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:06:12.459645 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.459650 17619 net.cpp:165] Memory required for data: 255489500\nI0817 16:06:12.459659 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:06:12.459669 17619 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:06:12.459676 17619 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:06:12.459683 17619 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.459692 17619 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:06:12.459712 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.459718 17619 net.cpp:165] Memory required for data: 263681500\nI0817 16:06:12.459723 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:06:12.459738 17619 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:06:12.459743 17619 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:06:12.459753 17619 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:06:12.460062 17619 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:06:12.460077 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.460081 17619 net.cpp:165] Memory required for data: 271873500\nI0817 16:06:12.460089 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:06:12.460106 17619 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:06:12.460113 17619 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:06:12.460120 17619 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:06:12.460350 17619 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:06:12.460363 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.460368 17619 net.cpp:165] Memory required for data: 280065500\nI0817 16:06:12.460378 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:06:12.460387 17619 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:06:12.460392 17619 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:06:12.460403 17619 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:06:12.460455 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:06:12.460589 17619 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:06:12.460604 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.460609 17619 net.cpp:165] Memory required for data: 288257500\nI0817 16:06:12.460618 17619 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:06:12.460628 17619 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:06:12.460633 17619 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:06:12.460639 17619 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:06:12.460646 17619 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:06:12.460680 17619 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:06:12.460690 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.460695 17619 net.cpp:165] Memory required for data: 296449500\nI0817 16:06:12.460707 17619 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:06:12.460719 17619 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:06:12.460726 17619 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:06:12.460732 17619 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:06:12.460741 17619 net.cpp:150] Setting up L1_b3_relu\nI0817 16:06:12.460748 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.460753 17619 net.cpp:165] Memory required for data: 304641500\nI0817 16:06:12.460757 17619 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:06:12.460764 17619 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:06:12.460769 17619 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:06:12.460779 17619 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:06:12.460788 17619 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:06:12.460829 17619 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:06:12.460846 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.460853 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.460857 17619 net.cpp:165] Memory required for data: 321025500\nI0817 16:06:12.460862 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:06:12.460875 17619 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:06:12.460880 17619 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:06:12.460896 17619 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:06:12.461206 17619 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:06:12.461220 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.461225 17619 net.cpp:165] Memory required for data: 329217500\nI0817 16:06:12.461235 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:06:12.461246 17619 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:06:12.461251 17619 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:06:12.461259 17619 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:06:12.461498 17619 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:06:12.461513 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.461518 17619 net.cpp:165] Memory required for data: 337409500\nI0817 16:06:12.461527 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:06:12.461536 17619 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:06:12.461542 17619 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:06:12.461552 17619 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.461606 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:06:12.461752 17619 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:06:12.461768 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.461774 17619 net.cpp:165] Memory required for data: 345601500\nI0817 16:06:12.461783 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:06:12.461791 17619 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:06:12.461796 17619 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:06:12.461803 17619 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.461812 17619 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:06:12.461819 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.461824 17619 net.cpp:165] Memory required for data: 353793500\nI0817 16:06:12.461829 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:06:12.461843 17619 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:06:12.461848 17619 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:06:12.461859 17619 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:06:12.462170 17619 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:06:12.462183 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.462188 17619 net.cpp:165] Memory required for data: 361985500\nI0817 16:06:12.462198 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:06:12.462209 17619 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:06:12.462215 17619 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:06:12.462225 17619 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:06:12.462465 17619 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:06:12.462477 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.462482 17619 net.cpp:165] Memory required for data: 370177500\nI0817 16:06:12.462493 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:06:12.462504 17619 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:06:12.462510 17619 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:06:12.462517 17619 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:06:12.462569 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:06:12.462710 17619 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:06:12.462723 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.462729 17619 net.cpp:165] Memory required for data: 378369500\nI0817 16:06:12.462738 17619 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:06:12.462746 17619 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:06:12.462752 17619 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:06:12.462759 17619 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:06:12.462769 17619 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:06:12.462807 17619 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:06:12.462821 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.462826 17619 net.cpp:165] Memory required for data: 386561500\nI0817 16:06:12.462831 17619 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:06:12.462838 17619 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:06:12.462844 17619 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:06:12.462852 17619 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:06:12.462860 17619 net.cpp:150] Setting up L1_b4_relu\nI0817 16:06:12.462867 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.462872 17619 net.cpp:165] Memory required for data: 394753500\nI0817 16:06:12.462877 17619 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:06:12.462888 17619 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:06:12.462893 17619 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:06:12.462900 17619 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:06:12.462909 17619 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:06:12.462952 17619 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:06:12.462965 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.462970 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.462975 17619 net.cpp:165] Memory required for data: 411137500\nI0817 16:06:12.462980 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:06:12.462991 17619 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:06:12.462997 17619 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:06:12.463008 17619 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:06:12.463320 17619 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:06:12.463333 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.463337 17619 net.cpp:165] Memory required for data: 419329500\nI0817 16:06:12.463357 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:06:12.463369 17619 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:06:12.463376 17619 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:06:12.463383 17619 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:06:12.463621 17619 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:06:12.463634 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.463639 17619 net.cpp:165] Memory required for data: 427521500\nI0817 16:06:12.463650 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:06:12.463660 17619 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:06:12.463665 17619 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:06:12.463675 17619 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.463733 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:06:12.463876 17619 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:06:12.463889 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.463893 17619 net.cpp:165] Memory required for data: 435713500\nI0817 16:06:12.463902 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:06:12.463910 17619 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:06:12.463917 17619 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:06:12.463923 17619 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.463933 17619 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:06:12.463939 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.463944 17619 net.cpp:165] Memory required for data: 443905500\nI0817 16:06:12.463949 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:06:12.463963 17619 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:06:12.463968 17619 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:06:12.463986 17619 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:06:12.464298 17619 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:06:12.464311 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.464316 17619 net.cpp:165] Memory required for data: 452097500\nI0817 16:06:12.464324 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:06:12.464339 17619 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:06:12.464344 17619 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:06:12.464354 17619 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:06:12.464586 17619 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:06:12.464597 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.464602 17619 net.cpp:165] Memory required for data: 460289500\nI0817 16:06:12.464612 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:06:12.464620 17619 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:06:12.464627 17619 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:06:12.464637 17619 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:06:12.464689 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:06:12.464831 17619 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:06:12.464849 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.464854 17619 net.cpp:165] Memory required for data: 468481500\nI0817 16:06:12.464864 17619 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:06:12.464872 17619 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:06:12.464879 17619 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:06:12.464884 17619 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:06:12.464892 17619 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:06:12.464926 17619 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:06:12.464937 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.464942 17619 net.cpp:165] Memory required for data: 476673500\nI0817 16:06:12.464947 17619 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:06:12.464957 17619 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:06:12.464963 17619 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:06:12.464970 17619 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:06:12.464979 17619 net.cpp:150] Setting up L1_b5_relu\nI0817 16:06:12.464987 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.464990 17619 net.cpp:165] Memory required for data: 484865500\nI0817 16:06:12.464995 17619 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:06:12.465003 17619 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:06:12.465008 17619 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:06:12.465018 17619 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:06:12.465028 17619 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:06:12.465068 17619 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:06:12.465080 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.465085 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.465090 17619 net.cpp:165] Memory required for data: 501249500\nI0817 16:06:12.465095 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:06:12.465109 17619 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:06:12.465116 17619 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:06:12.465124 17619 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:06:12.465435 17619 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:06:12.465448 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.465453 17619 net.cpp:165] Memory required for data: 509441500\nI0817 16:06:12.465469 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:06:12.465481 17619 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:06:12.465487 17619 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:06:12.465495 17619 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:06:12.465737 17619 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:06:12.465751 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.465756 17619 net.cpp:165] Memory required for data: 517633500\nI0817 16:06:12.465766 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:06:12.465775 17619 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:06:12.465780 17619 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:06:12.465791 17619 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.465842 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:06:12.465979 17619 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:06:12.465994 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.465999 17619 net.cpp:165] Memory required for data: 525825500\nI0817 16:06:12.466008 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:06:12.466017 17619 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:06:12.466022 17619 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:06:12.466029 17619 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.466038 17619 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:06:12.466045 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.466050 17619 net.cpp:165] Memory required for data: 534017500\nI0817 16:06:12.466054 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:06:12.466068 17619 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:06:12.466074 17619 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:06:12.466085 17619 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:06:12.466398 17619 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:06:12.466411 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.466416 17619 net.cpp:165] Memory required for data: 542209500\nI0817 16:06:12.466424 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:06:12.466436 17619 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:06:12.466442 17619 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:06:12.466452 17619 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:06:12.466688 17619 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:06:12.466707 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.466713 17619 net.cpp:165] Memory required for data: 550401500\nI0817 16:06:12.466723 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:06:12.466732 17619 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:06:12.466738 17619 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:06:12.466747 17619 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:06:12.466801 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:06:12.466938 17619 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:06:12.466951 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.466956 17619 net.cpp:165] Memory required for data: 558593500\nI0817 16:06:12.466965 17619 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:06:12.466984 17619 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:06:12.466989 17619 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:06:12.466996 17619 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:06:12.467005 17619 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:06:12.467038 17619 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:06:12.467049 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.467054 17619 net.cpp:165] Memory required for data: 566785500\nI0817 16:06:12.467061 17619 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:06:12.467077 17619 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:06:12.467082 17619 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:06:12.467092 17619 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:06:12.467103 17619 net.cpp:150] Setting up L1_b6_relu\nI0817 16:06:12.467109 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.467113 17619 net.cpp:165] Memory required for data: 574977500\nI0817 16:06:12.467118 17619 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:06:12.467125 17619 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:06:12.467130 17619 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:06:12.467139 17619 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:06:12.467147 17619 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:06:12.467191 17619 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:06:12.467203 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.467211 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.467214 17619 net.cpp:165] Memory required for data: 591361500\nI0817 16:06:12.467219 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:06:12.467231 17619 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:06:12.467236 17619 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:06:12.467248 17619 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:06:12.467561 17619 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:06:12.467574 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.467579 17619 net.cpp:165] Memory required for data: 599553500\nI0817 16:06:12.467588 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:06:12.467597 17619 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:06:12.467603 17619 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:06:12.467613 17619 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:06:12.467860 17619 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:06:12.467874 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.467878 17619 net.cpp:165] Memory required for data: 607745500\nI0817 16:06:12.467890 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:06:12.467900 17619 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:06:12.467906 17619 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:06:12.467914 17619 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.467965 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:06:12.468108 17619 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:06:12.468122 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.468125 17619 net.cpp:165] Memory required for data: 615937500\nI0817 16:06:12.468134 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:06:12.468142 17619 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:06:12.468148 17619 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:06:12.468158 17619 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.468168 17619 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:06:12.468174 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.468179 17619 net.cpp:165] Memory required for data: 624129500\nI0817 16:06:12.468183 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:06:12.468199 17619 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:06:12.468205 17619 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:06:12.468216 17619 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:06:12.468524 17619 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:06:12.468539 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.468544 17619 net.cpp:165] Memory required for data: 632321500\nI0817 16:06:12.468560 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:06:12.468569 17619 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:06:12.468575 17619 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:06:12.468583 17619 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:06:12.468828 17619 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:06:12.468842 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.468847 17619 net.cpp:165] Memory required for data: 640513500\nI0817 16:06:12.468858 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:06:12.468869 17619 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:06:12.468876 17619 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:06:12.468883 17619 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:06:12.468940 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:06:12.469077 17619 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:06:12.469090 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.469095 17619 net.cpp:165] Memory required for data: 648705500\nI0817 16:06:12.469105 17619 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:06:12.469113 17619 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:06:12.469120 17619 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:06:12.469125 17619 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:06:12.469136 17619 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:06:12.469166 17619 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:06:12.469179 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.469184 17619 net.cpp:165] Memory required for data: 656897500\nI0817 16:06:12.469189 17619 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:06:12.469197 17619 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:06:12.469202 17619 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:06:12.469209 17619 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:06:12.469218 17619 net.cpp:150] Setting up L1_b7_relu\nI0817 16:06:12.469225 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.469229 17619 net.cpp:165] Memory required for data: 665089500\nI0817 16:06:12.469234 17619 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:06:12.469244 17619 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:06:12.469249 17619 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:06:12.469256 17619 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:06:12.469266 17619 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:06:12.469310 17619 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:06:12.469321 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.469328 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.469332 17619 net.cpp:165] Memory required for data: 681473500\nI0817 16:06:12.469338 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:06:12.469349 17619 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:06:12.469355 17619 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:06:12.469367 17619 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:06:12.469679 17619 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:06:12.469693 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.469704 17619 net.cpp:165] Memory required for data: 689665500\nI0817 16:06:12.469714 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:06:12.469723 17619 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:06:12.469729 17619 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:06:12.469738 17619 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:06:12.469987 17619 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:06:12.470001 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.470006 17619 net.cpp:165] Memory required for data: 697857500\nI0817 16:06:12.470016 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:06:12.470027 17619 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:06:12.470033 17619 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:06:12.470041 17619 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.470095 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:06:12.470240 17619 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:06:12.470254 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.470259 17619 net.cpp:165] Memory required for data: 706049500\nI0817 16:06:12.470268 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:06:12.470275 17619 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:06:12.470281 17619 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:06:12.470291 17619 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.470301 17619 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:06:12.470309 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.470312 17619 net.cpp:165] Memory required for data: 714241500\nI0817 16:06:12.470317 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:06:12.470331 17619 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:06:12.470337 17619 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:06:12.470345 17619 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:06:12.470655 17619 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:06:12.470669 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.470674 17619 net.cpp:165] Memory required for data: 722433500\nI0817 16:06:12.470683 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:06:12.470695 17619 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:06:12.470707 17619 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:06:12.470716 17619 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:06:12.470964 17619 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:06:12.470978 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.470983 17619 net.cpp:165] Memory required for data: 730625500\nI0817 16:06:12.470993 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:06:12.471002 17619 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:06:12.471009 17619 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:06:12.471019 17619 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:06:12.471073 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:06:12.471213 17619 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:06:12.471226 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.471231 17619 net.cpp:165] Memory required for data: 738817500\nI0817 16:06:12.471240 17619 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:06:12.471248 17619 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:06:12.471254 17619 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:06:12.471261 17619 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:06:12.471271 17619 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:06:12.471302 17619 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:06:12.471315 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.471320 17619 net.cpp:165] Memory required for data: 747009500\nI0817 16:06:12.471325 17619 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:06:12.471333 17619 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:06:12.471338 17619 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:06:12.471345 17619 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:06:12.471354 17619 net.cpp:150] Setting up L1_b8_relu\nI0817 16:06:12.471361 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.471372 17619 net.cpp:165] Memory required for data: 755201500\nI0817 16:06:12.471379 17619 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:06:12.471388 17619 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:06:12.471393 17619 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:06:12.471401 17619 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:06:12.471410 17619 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:06:12.471457 17619 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:06:12.471467 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.471474 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.471478 17619 net.cpp:165] Memory required for data: 771585500\nI0817 16:06:12.471484 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:06:12.471494 17619 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:06:12.471500 17619 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:06:12.471513 17619 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:06:12.471844 17619 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:06:12.471859 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.471864 17619 net.cpp:165] Memory required for data: 779777500\nI0817 16:06:12.471873 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:06:12.471887 17619 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:06:12.471894 17619 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:06:12.471902 17619 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:06:12.472151 17619 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:06:12.472163 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.472168 17619 net.cpp:165] Memory required for data: 787969500\nI0817 16:06:12.472178 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:06:12.472187 17619 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:06:12.472193 17619 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:06:12.472200 17619 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.472254 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:06:12.472404 17619 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:06:12.472416 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.472421 17619 net.cpp:165] Memory required for data: 796161500\nI0817 16:06:12.472430 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:06:12.472439 17619 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:06:12.472445 17619 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:06:12.472455 17619 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.472463 17619 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:06:12.472470 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.472476 17619 net.cpp:165] Memory required for data: 804353500\nI0817 16:06:12.472481 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:06:12.472491 17619 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:06:12.472496 17619 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:06:12.472507 17619 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:06:12.472836 17619 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:06:12.472851 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.472856 17619 net.cpp:165] Memory required for data: 812545500\nI0817 16:06:12.472864 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:06:12.472873 17619 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:06:12.472879 17619 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:06:12.472889 17619 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:06:12.473140 17619 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:06:12.473155 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.473160 17619 net.cpp:165] Memory required for data: 820737500\nI0817 16:06:12.473188 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:06:12.473201 17619 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:06:12.473206 17619 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:06:12.473214 17619 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:06:12.473268 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:06:12.473413 17619 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:06:12.473428 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.473433 17619 net.cpp:165] Memory required for data: 828929500\nI0817 16:06:12.473440 17619 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:06:12.473450 17619 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:06:12.473456 17619 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:06:12.473464 17619 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:06:12.473474 17619 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:06:12.473505 17619 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:06:12.473517 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.473522 17619 net.cpp:165] Memory required for data: 837121500\nI0817 16:06:12.473528 17619 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:06:12.473536 17619 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:06:12.473541 17619 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:06:12.473551 17619 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:06:12.473561 17619 net.cpp:150] Setting up L1_b9_relu\nI0817 16:06:12.473567 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.473572 17619 net.cpp:165] Memory required for data: 845313500\nI0817 16:06:12.473577 17619 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:06:12.473583 17619 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:06:12.473588 17619 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:06:12.473599 17619 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:06:12.473609 17619 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:06:12.473655 17619 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:06:12.473666 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.473673 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.473678 17619 net.cpp:165] Memory required for data: 861697500\nI0817 16:06:12.473683 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:06:12.473695 17619 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:06:12.473706 17619 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:06:12.473719 17619 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:06:12.474040 17619 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:06:12.474054 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.474061 17619 net.cpp:165] Memory required for data: 863745500\nI0817 16:06:12.474069 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:06:12.474078 17619 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:06:12.474084 17619 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:06:12.474095 17619 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:06:12.474333 17619 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:06:12.474345 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.474350 17619 net.cpp:165] Memory required for data: 865793500\nI0817 16:06:12.474360 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:06:12.474373 17619 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:06:12.474385 17619 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:06:12.474395 17619 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.474447 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:06:12.474587 17619 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:06:12.474601 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.474606 17619 net.cpp:165] Memory required for data: 867841500\nI0817 16:06:12.474614 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:06:12.474624 17619 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:06:12.474630 17619 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:06:12.474638 17619 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.474647 17619 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:06:12.474654 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.474658 17619 net.cpp:165] Memory required for data: 869889500\nI0817 16:06:12.474663 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:06:12.474676 17619 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:06:12.474684 17619 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:06:12.474694 17619 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:06:12.475014 17619 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:06:12.475029 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.475034 17619 net.cpp:165] Memory required for data: 871937500\nI0817 16:06:12.475044 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:06:12.475051 17619 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:06:12.475059 17619 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:06:12.475070 17619 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:06:12.475316 17619 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:06:12.475329 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.475333 17619 net.cpp:165] Memory required for data: 873985500\nI0817 16:06:12.475344 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:06:12.475356 17619 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:06:12.475363 17619 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:06:12.475369 17619 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:06:12.475422 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:06:12.475565 17619 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:06:12.475579 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.475584 17619 net.cpp:165] Memory required for data: 876033500\nI0817 16:06:12.475591 17619 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:06:12.475602 17619 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:06:12.475608 17619 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:06:12.475620 17619 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:06:12.475709 17619 net.cpp:150] Setting up L2_b1_pool\nI0817 16:06:12.475728 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.475734 17619 net.cpp:165] Memory required for data: 878081500\nI0817 16:06:12.475740 17619 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:06:12.475749 17619 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:06:12.475755 17619 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:06:12.475762 17619 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:06:12.475772 17619 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:06:12.475806 17619 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:06:12.475816 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.475821 17619 net.cpp:165] Memory required for data: 880129500\nI0817 16:06:12.475826 17619 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:06:12.475834 17619 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:06:12.475841 17619 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:06:12.475847 17619 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:06:12.475864 17619 net.cpp:150] Setting up L2_b1_relu\nI0817 16:06:12.475872 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.475877 17619 net.cpp:165] Memory required for data: 882177500\nI0817 16:06:12.475881 17619 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:06:12.475929 17619 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:06:12.475944 17619 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:06:12.478292 17619 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:06:12.478312 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.478317 17619 net.cpp:165] Memory required for data: 884225500\nI0817 16:06:12.478322 17619 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:06:12.478332 17619 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:06:12.478339 17619 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:06:12.478346 17619 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:06:12.478354 17619 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:06:12.478433 17619 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:06:12.478447 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.478452 17619 net.cpp:165] Memory required for data: 888321500\nI0817 16:06:12.478458 17619 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:06:12.478469 17619 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:06:12.478476 17619 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:06:12.478483 17619 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:06:12.478493 17619 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:06:12.478543 17619 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:06:12.478555 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.478561 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.478566 17619 net.cpp:165] Memory required for data: 896513500\nI0817 16:06:12.478571 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:06:12.478585 17619 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:06:12.478592 17619 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:06:12.478601 17619 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:06:12.480047 17619 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:06:12.480064 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.480070 17619 net.cpp:165] Memory required for data: 900609500\nI0817 16:06:12.480079 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:06:12.480089 17619 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:06:12.480096 17619 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:06:12.480108 17619 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:06:12.480352 17619 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:06:12.480365 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.480370 17619 net.cpp:165] Memory required for data: 904705500\nI0817 16:06:12.480381 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:06:12.480392 17619 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:06:12.480399 17619 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:06:12.480407 17619 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.480461 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:06:12.480607 17619 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:06:12.480620 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.480626 17619 net.cpp:165] Memory required for data: 908801500\nI0817 16:06:12.480635 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:06:12.480646 17619 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:06:12.480653 17619 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:06:12.480659 17619 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.480677 17619 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:06:12.480685 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.480690 17619 net.cpp:165] Memory required for data: 912897500\nI0817 16:06:12.480695 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:06:12.480715 17619 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:06:12.480723 17619 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:06:12.480734 17619 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:06:12.481187 17619 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:06:12.481201 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.481207 17619 net.cpp:165] Memory required for data: 916993500\nI0817 16:06:12.481215 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:06:12.481225 17619 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:06:12.481230 17619 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:06:12.481242 17619 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:06:12.481487 17619 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:06:12.481499 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.481504 17619 net.cpp:165] Memory required for data: 921089500\nI0817 16:06:12.481515 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:06:12.481526 17619 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:06:12.481534 17619 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:06:12.481540 17619 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:06:12.481595 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:06:12.481744 17619 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:06:12.481757 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.481762 17619 net.cpp:165] Memory required for data: 925185500\nI0817 16:06:12.481771 17619 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:06:12.481783 17619 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:06:12.481789 17619 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:06:12.481797 17619 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:06:12.481804 17619 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:06:12.481834 17619 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:06:12.481844 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.481848 17619 net.cpp:165] Memory required for data: 929281500\nI0817 16:06:12.481854 17619 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:06:12.481861 17619 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:06:12.481868 17619 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:06:12.481874 17619 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:06:12.481885 17619 net.cpp:150] Setting up L2_b2_relu\nI0817 16:06:12.481894 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.481897 17619 net.cpp:165] Memory required for data: 933377500\nI0817 16:06:12.481902 17619 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:06:12.481909 17619 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:06:12.481915 17619 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:06:12.481922 17619 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:06:12.481930 17619 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:06:12.481977 17619 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:06:12.481989 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.481997 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.482000 17619 net.cpp:165] Memory required for data: 941569500\nI0817 16:06:12.482005 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:06:12.482024 17619 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:06:12.482030 17619 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:06:12.482043 17619 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:06:12.482523 17619 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:06:12.482556 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.482563 17619 net.cpp:165] Memory required for data: 945665500\nI0817 16:06:12.482573 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:06:12.482581 17619 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:06:12.482587 17619 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:06:12.482599 17619 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:06:12.482858 17619 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:06:12.482872 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.482877 17619 net.cpp:165] Memory required for data: 949761500\nI0817 16:06:12.482887 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:06:12.482899 17619 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:06:12.482905 17619 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:06:12.482913 17619 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.482990 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:06:12.483136 17619 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:06:12.483150 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.483155 17619 net.cpp:165] Memory required for data: 953857500\nI0817 16:06:12.483163 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:06:12.483175 17619 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:06:12.483180 17619 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:06:12.483188 17619 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.483197 17619 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:06:12.483204 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.483208 17619 net.cpp:165] Memory required for data: 957953500\nI0817 16:06:12.483213 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:06:12.483227 17619 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:06:12.483233 17619 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:06:12.483244 17619 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:06:12.483691 17619 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:06:12.483713 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.483718 17619 net.cpp:165] Memory required for data: 962049500\nI0817 16:06:12.483727 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:06:12.483736 17619 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:06:12.483742 17619 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:06:12.483750 17619 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:06:12.483994 17619 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:06:12.484007 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.484012 17619 net.cpp:165] Memory required for data: 966145500\nI0817 16:06:12.484022 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:06:12.484031 17619 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:06:12.484037 17619 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:06:12.484047 17619 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:06:12.484102 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:06:12.484247 17619 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:06:12.484261 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.484266 17619 net.cpp:165] Memory required for data: 970241500\nI0817 16:06:12.484274 17619 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:06:12.484283 17619 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:06:12.484289 17619 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:06:12.484297 17619 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:06:12.484313 17619 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:06:12.484342 17619 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:06:12.484354 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.484359 17619 net.cpp:165] Memory required for data: 974337500\nI0817 16:06:12.484364 17619 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:06:12.484385 17619 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:06:12.484392 17619 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:06:12.484400 17619 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:06:12.484408 17619 net.cpp:150] Setting up L2_b3_relu\nI0817 16:06:12.484416 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.484421 17619 net.cpp:165] Memory required for data: 978433500\nI0817 16:06:12.484426 17619 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:06:12.484433 17619 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:06:12.484438 17619 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:06:12.484448 17619 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:06:12.484458 17619 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:06:12.484503 17619 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:06:12.484514 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.484520 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.484525 17619 net.cpp:165] Memory required for data: 986625500\nI0817 16:06:12.484530 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:06:12.484547 17619 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:06:12.484553 17619 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:06:12.484562 17619 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:06:12.485033 17619 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:06:12.485047 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.485052 17619 net.cpp:165] Memory required for data: 990721500\nI0817 16:06:12.485061 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:06:12.485074 17619 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:06:12.485080 17619 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:06:12.485088 17619 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:06:12.485330 17619 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:06:12.485343 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.485348 17619 net.cpp:165] Memory required for data: 994817500\nI0817 16:06:12.485358 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:06:12.485368 17619 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:06:12.485373 17619 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:06:12.485383 17619 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.485437 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:06:12.485580 17619 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:06:12.485594 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.485599 17619 net.cpp:165] Memory required for data: 998913500\nI0817 16:06:12.485607 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:06:12.485615 17619 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:06:12.485621 17619 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:06:12.485631 17619 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.485641 17619 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:06:12.485648 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.485652 17619 net.cpp:165] Memory required for data: 1003009500\nI0817 16:06:12.485657 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:06:12.485678 17619 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:06:12.485685 17619 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:06:12.485693 17619 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:06:12.486152 17619 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:06:12.486167 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.486173 17619 net.cpp:165] Memory required for data: 1007105500\nI0817 16:06:12.486181 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:06:12.486196 17619 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:06:12.486202 17619 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:06:12.486212 17619 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:06:12.486452 17619 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:06:12.486465 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.486469 17619 net.cpp:165] Memory required for data: 1011201500\nI0817 16:06:12.486480 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:06:12.486488 17619 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:06:12.486495 17619 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:06:12.486502 17619 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:06:12.486560 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:06:12.486706 17619 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:06:12.486723 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.486728 17619 net.cpp:165] Memory required for data: 1015297500\nI0817 16:06:12.486737 17619 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:06:12.486747 17619 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:06:12.486753 17619 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:06:12.486759 17619 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:06:12.486768 17619 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:06:12.486796 17619 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:06:12.486806 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.486811 17619 net.cpp:165] Memory required for data: 1019393500\nI0817 16:06:12.486816 17619 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:06:12.486824 17619 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:06:12.486829 17619 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:06:12.486838 17619 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:06:12.486848 17619 net.cpp:150] Setting up L2_b4_relu\nI0817 16:06:12.486855 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.486860 17619 net.cpp:165] Memory required for data: 1023489500\nI0817 16:06:12.486865 17619 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:06:12.486871 17619 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:06:12.486876 17619 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:06:12.486886 17619 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:06:12.486896 17619 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:06:12.486938 17619 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:06:12.486950 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.486956 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.486961 17619 net.cpp:165] Memory required for data: 1031681500\nI0817 16:06:12.486966 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:06:12.486981 17619 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:06:12.486987 17619 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:06:12.486996 17619 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:06:12.487454 17619 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:06:12.487474 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.487480 17619 net.cpp:165] Memory required for data: 1035777500\nI0817 16:06:12.487489 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:06:12.487500 17619 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:06:12.487507 17619 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:06:12.487515 17619 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:06:12.487762 17619 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:06:12.487776 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.487782 17619 net.cpp:165] Memory required for data: 1039873500\nI0817 16:06:12.487792 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:06:12.487800 17619 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:06:12.487807 17619 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:06:12.487813 17619 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.487870 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:06:12.488015 17619 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:06:12.488031 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.488036 17619 net.cpp:165] Memory required for data: 1043969500\nI0817 16:06:12.488045 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:06:12.488054 17619 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:06:12.488059 17619 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:06:12.488066 17619 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.488075 17619 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:06:12.488082 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.488086 17619 net.cpp:165] Memory required for data: 1048065500\nI0817 16:06:12.488091 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:06:12.488107 17619 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:06:12.488113 17619 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:06:12.488124 17619 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:06:12.488584 17619 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:06:12.488598 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.488603 17619 net.cpp:165] Memory required for data: 1052161500\nI0817 16:06:12.488612 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:06:12.488625 17619 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:06:12.488631 17619 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:06:12.488641 17619 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:06:12.488886 17619 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:06:12.488899 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.488904 17619 net.cpp:165] Memory required for data: 1056257500\nI0817 16:06:12.488915 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:06:12.488924 17619 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:06:12.488930 17619 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:06:12.488937 17619 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:06:12.488993 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:06:12.489135 17619 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:06:12.489146 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.489151 17619 net.cpp:165] Memory required for data: 1060353500\nI0817 16:06:12.489161 17619 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:06:12.489172 17619 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:06:12.489178 17619 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:06:12.489186 17619 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:06:12.489192 17619 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:06:12.489222 17619 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:06:12.489231 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.489243 17619 net.cpp:165] Memory required for data: 1064449500\nI0817 16:06:12.489248 17619 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:06:12.489255 17619 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:06:12.489261 17619 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:06:12.489271 17619 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:06:12.489280 17619 net.cpp:150] Setting up L2_b5_relu\nI0817 16:06:12.489287 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.489292 17619 net.cpp:165] Memory required for data: 1068545500\nI0817 16:06:12.489296 17619 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:06:12.489305 17619 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:06:12.489310 17619 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:06:12.489316 17619 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:06:12.489326 17619 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:06:12.489372 17619 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:06:12.489383 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.489389 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.489394 17619 net.cpp:165] Memory required for data: 1076737500\nI0817 16:06:12.489399 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:06:12.489413 17619 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:06:12.489419 17619 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:06:12.489428 17619 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:06:12.489898 17619 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:06:12.489913 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.489918 17619 net.cpp:165] Memory required for data: 1080833500\nI0817 16:06:12.489928 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:06:12.489936 17619 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:06:12.489943 17619 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:06:12.489956 17619 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:06:12.490198 17619 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:06:12.490211 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.490216 17619 net.cpp:165] Memory required for data: 1084929500\nI0817 16:06:12.490226 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:06:12.490236 17619 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:06:12.490242 17619 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:06:12.490249 17619 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.490304 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:06:12.490445 17619 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:06:12.490460 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.490465 17619 net.cpp:165] Memory required for data: 1089025500\nI0817 16:06:12.490474 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:06:12.490483 17619 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:06:12.490489 17619 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:06:12.490495 17619 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.490504 17619 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:06:12.490511 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.490516 17619 net.cpp:165] Memory required for data: 1093121500\nI0817 16:06:12.490521 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:06:12.490535 17619 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:06:12.490540 17619 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:06:12.490551 17619 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:06:12.491016 17619 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:06:12.491037 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.491044 17619 net.cpp:165] Memory required for data: 1097217500\nI0817 16:06:12.491052 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:06:12.491065 17619 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:06:12.491070 17619 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:06:12.491081 17619 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:06:12.491322 17619 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:06:12.491335 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.491339 17619 net.cpp:165] Memory required for data: 1101313500\nI0817 16:06:12.491350 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:06:12.491358 17619 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:06:12.491364 17619 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:06:12.491372 17619 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:06:12.491431 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:06:12.491574 17619 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:06:12.491586 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.491591 17619 net.cpp:165] Memory required for data: 1105409500\nI0817 16:06:12.491600 17619 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:06:12.491611 17619 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:06:12.491618 17619 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:06:12.491626 17619 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:06:12.491632 17619 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:06:12.491659 17619 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:06:12.491668 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.491673 17619 net.cpp:165] Memory required for data: 1109505500\nI0817 16:06:12.491678 17619 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:06:12.491688 17619 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:06:12.491694 17619 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:06:12.491708 17619 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:06:12.491717 17619 net.cpp:150] Setting up L2_b6_relu\nI0817 16:06:12.491725 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.491729 17619 net.cpp:165] Memory required for data: 1113601500\nI0817 16:06:12.491734 17619 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:06:12.491742 17619 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:06:12.491747 17619 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:06:12.491755 17619 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:06:12.491763 17619 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:06:12.491811 17619 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:06:12.491822 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.491828 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.491833 17619 net.cpp:165] Memory required for data: 1121793500\nI0817 16:06:12.491838 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:06:12.491852 17619 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:06:12.491858 17619 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:06:12.491868 17619 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:06:12.492331 17619 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:06:12.492346 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.492350 17619 net.cpp:165] Memory required for data: 1125889500\nI0817 16:06:12.492358 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:06:12.492370 17619 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:06:12.492383 17619 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:06:12.492396 17619 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:06:12.492640 17619 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:06:12.492653 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.492658 17619 net.cpp:165] Memory required for data: 1129985500\nI0817 16:06:12.492668 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:06:12.492677 17619 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:06:12.492683 17619 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:06:12.492691 17619 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.492756 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:06:12.492903 17619 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:06:12.492916 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.492921 17619 net.cpp:165] Memory required for data: 1134081500\nI0817 16:06:12.492930 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:06:12.492940 17619 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:06:12.492947 17619 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:06:12.492954 17619 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.492964 17619 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:06:12.492970 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.492975 17619 net.cpp:165] Memory required for data: 1138177500\nI0817 16:06:12.492980 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:06:12.492995 17619 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:06:12.493001 17619 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:06:12.493012 17619 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:06:12.493480 17619 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:06:12.493494 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.493499 17619 net.cpp:165] Memory required for data: 1142273500\nI0817 16:06:12.493508 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:06:12.493520 17619 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:06:12.493526 17619 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:06:12.493537 17619 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:06:12.493789 17619 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:06:12.493808 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.493813 17619 net.cpp:165] Memory required for data: 1146369500\nI0817 16:06:12.493822 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:06:12.493831 17619 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:06:12.493837 17619 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:06:12.493845 17619 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:06:12.493902 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:06:12.494048 17619 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:06:12.494061 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.494066 17619 net.cpp:165] Memory required for data: 1150465500\nI0817 16:06:12.494076 17619 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:06:12.494083 17619 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:06:12.494093 17619 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:06:12.494101 17619 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:06:12.494107 17619 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:06:12.494134 17619 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:06:12.494143 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.494148 17619 net.cpp:165] Memory required for data: 1154561500\nI0817 16:06:12.494153 17619 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:06:12.494163 17619 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:06:12.494169 17619 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:06:12.494184 17619 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:06:12.494194 17619 net.cpp:150] Setting up L2_b7_relu\nI0817 16:06:12.494200 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.494204 17619 net.cpp:165] Memory required for data: 1158657500\nI0817 16:06:12.494210 17619 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:06:12.494216 17619 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:06:12.494221 17619 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:06:12.494228 17619 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:06:12.494238 17619 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:06:12.494285 17619 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:06:12.494297 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.494303 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.494308 17619 net.cpp:165] Memory required for data: 1166849500\nI0817 16:06:12.494313 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:06:12.494328 17619 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:06:12.494334 17619 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:06:12.494343 17619 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:06:12.494818 17619 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:06:12.494832 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.494838 17619 net.cpp:165] Memory required for data: 1170945500\nI0817 16:06:12.494846 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:06:12.494858 17619 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:06:12.494864 17619 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:06:12.494875 17619 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:06:12.495127 17619 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:06:12.495141 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.495146 17619 net.cpp:165] Memory required for data: 1175041500\nI0817 16:06:12.495156 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:06:12.495164 17619 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:06:12.495172 17619 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:06:12.495178 17619 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.495236 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:06:12.495386 17619 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:06:12.495399 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.495404 17619 net.cpp:165] Memory required for data: 1179137500\nI0817 16:06:12.495414 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:06:12.495424 17619 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:06:12.495430 17619 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:06:12.495437 17619 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.495446 17619 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:06:12.495453 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.495458 17619 net.cpp:165] Memory required for data: 1183233500\nI0817 16:06:12.495463 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:06:12.495476 17619 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:06:12.495483 17619 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:06:12.495491 17619 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:06:12.495965 17619 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:06:12.495980 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.495985 17619 net.cpp:165] Memory required for data: 1187329500\nI0817 16:06:12.495993 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:06:12.496004 17619 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:06:12.496019 17619 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:06:12.496027 17619 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:06:12.496282 17619 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:06:12.496299 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.496304 17619 net.cpp:165] Memory required for data: 1191425500\nI0817 16:06:12.496315 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:06:12.496322 17619 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:06:12.496330 17619 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:06:12.496336 17619 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:06:12.496392 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:06:12.496546 17619 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:06:12.496558 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.496563 17619 net.cpp:165] Memory required for data: 1195521500\nI0817 16:06:12.496572 17619 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:06:12.496580 17619 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:06:12.496587 17619 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:06:12.496593 17619 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:06:12.496604 17619 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:06:12.496631 17619 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:06:12.496640 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.496645 17619 net.cpp:165] Memory required for data: 1199617500\nI0817 16:06:12.496650 17619 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:06:12.496661 17619 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:06:12.496667 17619 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:06:12.496675 17619 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:06:12.496683 17619 net.cpp:150] Setting up L2_b8_relu\nI0817 16:06:12.496690 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.496695 17619 net.cpp:165] Memory required for data: 1203713500\nI0817 16:06:12.496706 17619 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:06:12.496713 17619 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:06:12.496719 17619 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:06:12.496726 17619 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:06:12.496749 17619 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:06:12.496799 17619 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:06:12.496812 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.496819 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.496824 17619 net.cpp:165] Memory required for data: 1211905500\nI0817 16:06:12.496829 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:06:12.496840 17619 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:06:12.496846 17619 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:06:12.496860 17619 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:06:12.497329 17619 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:06:12.497344 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.497349 17619 net.cpp:165] Memory required for data: 1216001500\nI0817 16:06:12.497356 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:06:12.497369 17619 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:06:12.497375 17619 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:06:12.497383 17619 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:06:12.497632 17619 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:06:12.497647 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.497651 17619 net.cpp:165] Memory required for data: 1220097500\nI0817 16:06:12.497669 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:06:12.497678 17619 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:06:12.497684 17619 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:06:12.497691 17619 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.497753 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:06:12.497906 17619 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:06:12.497920 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.497925 17619 net.cpp:165] Memory required for data: 1224193500\nI0817 16:06:12.497933 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:06:12.497941 17619 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:06:12.497947 17619 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:06:12.497957 17619 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.497967 17619 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:06:12.497974 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.497979 17619 net.cpp:165] Memory required for data: 1228289500\nI0817 16:06:12.497983 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:06:12.497995 17619 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:06:12.498000 17619 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:06:12.498011 17619 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:06:12.498476 17619 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:06:12.498491 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.498495 17619 net.cpp:165] Memory required for data: 1232385500\nI0817 16:06:12.498504 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:06:12.498513 17619 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:06:12.498520 17619 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:06:12.498531 17619 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:06:12.498795 17619 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:06:12.498808 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.498813 17619 net.cpp:165] Memory required for data: 1236481500\nI0817 16:06:12.498857 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:06:12.498869 17619 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:06:12.498875 17619 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:06:12.498883 17619 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:06:12.498944 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:06:12.499091 17619 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:06:12.499109 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.499114 17619 net.cpp:165] Memory required for data: 1240577500\nI0817 16:06:12.499122 17619 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:06:12.499132 17619 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:06:12.499138 17619 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:06:12.499145 17619 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:06:12.499153 17619 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:06:12.499183 17619 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:06:12.499195 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.499200 17619 net.cpp:165] Memory required for data: 1244673500\nI0817 16:06:12.499205 17619 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:06:12.499213 17619 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:06:12.499219 17619 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:06:12.499229 17619 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:06:12.499238 17619 net.cpp:150] Setting up L2_b9_relu\nI0817 16:06:12.499245 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.499250 17619 net.cpp:165] Memory required for data: 1248769500\nI0817 16:06:12.499255 17619 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:06:12.499269 17619 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:06:12.499275 17619 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:06:12.499285 17619 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:06:12.499295 17619 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:06:12.499341 17619 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:06:12.499352 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.499359 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.499363 17619 net.cpp:165] Memory required for data: 1256961500\nI0817 16:06:12.499368 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:06:12.499382 17619 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:06:12.499389 17619 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:06:12.499398 17619 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:06:12.499882 17619 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:06:12.499897 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.499902 17619 net.cpp:165] Memory required for data: 1257985500\nI0817 16:06:12.499912 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:06:12.499923 17619 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:06:12.499929 17619 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:06:12.499938 17619 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:06:12.500208 17619 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:06:12.500222 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.500226 17619 net.cpp:165] Memory required for data: 1259009500\nI0817 16:06:12.500237 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:06:12.500248 17619 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:06:12.500255 17619 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:06:12.500262 17619 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.500320 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:06:12.500474 17619 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:06:12.500488 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.500494 17619 net.cpp:165] Memory required for data: 1260033500\nI0817 16:06:12.500501 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:06:12.500512 17619 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:06:12.500519 17619 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:06:12.500526 17619 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.500535 17619 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:06:12.500545 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.500550 17619 net.cpp:165] Memory required for data: 1261057500\nI0817 16:06:12.500555 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:06:12.500566 17619 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:06:12.500571 17619 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:06:12.500581 17619 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:06:12.501060 17619 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:06:12.501075 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.501080 17619 net.cpp:165] Memory required for data: 1262081500\nI0817 16:06:12.501088 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:06:12.501097 17619 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:06:12.501104 17619 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:06:12.501114 17619 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:06:12.501375 17619 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:06:12.501389 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.501394 17619 net.cpp:165] Memory required for data: 1263105500\nI0817 16:06:12.501410 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:06:12.501420 17619 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:06:12.501425 17619 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:06:12.501433 17619 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:06:12.501493 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:06:12.501646 17619 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:06:12.501662 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.501667 17619 net.cpp:165] Memory required for data: 1264129500\nI0817 16:06:12.501677 17619 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:06:12.501685 17619 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:06:12.501691 17619 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:06:12.501704 17619 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:06:12.501744 17619 net.cpp:150] Setting up L3_b1_pool\nI0817 16:06:12.501757 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.501762 17619 net.cpp:165] Memory required for data: 1265153500\nI0817 16:06:12.501767 17619 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:06:12.501776 17619 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:06:12.501782 17619 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:06:12.501788 17619 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:06:12.501796 17619 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:06:12.501832 17619 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:06:12.501842 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.501845 17619 net.cpp:165] Memory required for data: 1266177500\nI0817 16:06:12.501850 17619 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:06:12.501858 17619 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:06:12.501864 17619 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:06:12.501870 17619 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:06:12.501880 17619 net.cpp:150] Setting up L3_b1_relu\nI0817 16:06:12.501886 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.501891 17619 net.cpp:165] Memory required for data: 1267201500\nI0817 16:06:12.501896 17619 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:06:12.501905 17619 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:06:12.501915 17619 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:06:12.503167 17619 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:06:12.503186 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.503191 17619 net.cpp:165] Memory required for data: 1268225500\nI0817 16:06:12.503197 17619 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:06:12.503211 17619 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:06:12.503217 17619 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:06:12.503224 17619 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:06:12.503232 17619 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:06:12.503275 17619 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:06:12.503288 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.503293 17619 net.cpp:165] Memory required for data: 1270273500\nI0817 16:06:12.503298 17619 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:06:12.503305 17619 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:06:12.503311 17619 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:06:12.503321 17619 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:06:12.503331 17619 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:06:12.503382 17619 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:06:12.503396 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.503403 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.503415 17619 net.cpp:165] Memory required for data: 1274369500\nI0817 16:06:12.503422 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:06:12.503435 17619 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:06:12.503443 17619 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:06:12.503453 17619 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:06:12.505434 17619 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:06:12.505452 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.505457 17619 net.cpp:165] Memory required for data: 1276417500\nI0817 16:06:12.505467 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:06:12.505481 17619 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:06:12.505486 17619 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:06:12.505496 17619 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:06:12.505766 17619 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:06:12.505779 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.505785 17619 net.cpp:165] Memory required for data: 1278465500\nI0817 16:06:12.505795 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:06:12.505807 17619 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:06:12.505815 17619 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:06:12.505825 17619 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.505882 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:06:12.506038 17619 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:06:12.506052 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.506057 17619 net.cpp:165] Memory required for data: 1280513500\nI0817 16:06:12.506065 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:06:12.506073 17619 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:06:12.506079 17619 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:06:12.506090 17619 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.506100 17619 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:06:12.506108 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.506112 17619 net.cpp:165] Memory required for data: 1282561500\nI0817 16:06:12.506116 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:06:12.506129 17619 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:06:12.506134 17619 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:06:12.506145 17619 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:06:12.507169 17619 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:06:12.507184 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.507189 17619 net.cpp:165] Memory required for data: 1284609500\nI0817 16:06:12.507197 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:06:12.507210 17619 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:06:12.507216 17619 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:06:12.507225 17619 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:06:12.507493 17619 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:06:12.507508 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.507513 17619 net.cpp:165] Memory required for data: 1286657500\nI0817 16:06:12.507522 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:06:12.507531 17619 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:06:12.507539 17619 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:06:12.507545 17619 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:06:12.507606 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:06:12.507768 17619 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:06:12.507782 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.507787 17619 net.cpp:165] Memory required for data: 1288705500\nI0817 16:06:12.507797 17619 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:06:12.507805 17619 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:06:12.507820 17619 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:06:12.507827 17619 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:06:12.507838 17619 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:06:12.507874 17619 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:06:12.507885 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.507890 17619 net.cpp:165] Memory required for data: 1290753500\nI0817 16:06:12.507896 17619 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:06:12.507903 17619 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:06:12.507910 17619 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:06:12.507916 17619 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:06:12.507925 17619 net.cpp:150] Setting up L3_b2_relu\nI0817 16:06:12.507932 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.507937 17619 net.cpp:165] Memory required for data: 1292801500\nI0817 16:06:12.507941 17619 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:06:12.507952 17619 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:06:12.507958 17619 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:06:12.507966 17619 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:06:12.507975 17619 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:06:12.508020 17619 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:06:12.508036 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.508044 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.508049 17619 net.cpp:165] Memory required for data: 1296897500\nI0817 16:06:12.508054 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:06:12.508064 17619 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:06:12.508071 17619 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:06:12.508080 17619 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:06:12.509099 17619 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:06:12.509116 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.509121 17619 net.cpp:165] Memory required for data: 1298945500\nI0817 16:06:12.509130 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:06:12.509140 17619 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:06:12.509146 17619 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:06:12.509160 17619 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:06:12.509420 17619 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:06:12.509433 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.509438 17619 net.cpp:165] Memory required for data: 1300993500\nI0817 16:06:12.509449 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:06:12.509461 17619 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:06:12.509469 17619 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:06:12.509475 17619 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.509532 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:06:12.509690 17619 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:06:12.509707 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.509713 17619 net.cpp:165] Memory required for data: 1303041500\nI0817 16:06:12.509723 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:06:12.509732 17619 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:06:12.509737 17619 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:06:12.509747 17619 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.509757 17619 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:06:12.509764 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.509776 17619 net.cpp:165] Memory required for data: 1305089500\nI0817 16:06:12.509781 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:06:12.509795 17619 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:06:12.509801 17619 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:06:12.509810 17619 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:06:12.510831 17619 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:06:12.510846 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.510851 17619 net.cpp:165] Memory required for data: 1307137500\nI0817 16:06:12.510860 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:06:12.510872 17619 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:06:12.510879 17619 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:06:12.510887 17619 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:06:12.511152 17619 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:06:12.511164 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.511169 17619 net.cpp:165] Memory required for data: 1309185500\nI0817 16:06:12.511179 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:06:12.511188 17619 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:06:12.511195 17619 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:06:12.511203 17619 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:06:12.511261 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:06:12.511416 17619 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:06:12.511430 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.511435 17619 net.cpp:165] Memory required for data: 1311233500\nI0817 16:06:12.511443 17619 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:06:12.511452 17619 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:06:12.511459 17619 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:06:12.511466 17619 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:06:12.511478 17619 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:06:12.511512 17619 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:06:12.511526 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.511531 17619 net.cpp:165] Memory required for data: 1313281500\nI0817 16:06:12.511536 17619 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:06:12.511544 17619 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:06:12.511550 17619 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:06:12.511557 17619 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:06:12.511566 17619 net.cpp:150] Setting up L3_b3_relu\nI0817 16:06:12.511574 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.511579 17619 net.cpp:165] Memory required for data: 1315329500\nI0817 16:06:12.511584 17619 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:06:12.511593 17619 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:06:12.511600 17619 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:06:12.511606 17619 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:06:12.511616 17619 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:06:12.511663 17619 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:06:12.511674 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.511682 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.511685 17619 net.cpp:165] Memory required for data: 1319425500\nI0817 16:06:12.511692 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:06:12.511708 17619 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:06:12.511715 17619 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:06:12.511728 17619 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:06:12.512760 17619 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:06:12.512775 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.512780 17619 net.cpp:165] Memory required for data: 1321473500\nI0817 16:06:12.512789 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:06:12.512799 17619 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:06:12.512805 17619 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:06:12.512816 17619 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:06:12.513083 17619 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:06:12.513098 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.513104 17619 net.cpp:165] Memory required for data: 1323521500\nI0817 16:06:12.513114 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:06:12.513123 17619 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:06:12.513128 17619 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:06:12.513136 17619 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.513195 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:06:12.513352 17619 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:06:12.513365 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.513370 17619 net.cpp:165] Memory required for data: 1325569500\nI0817 16:06:12.513380 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:06:12.513387 17619 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:06:12.513394 17619 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:06:12.513404 17619 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.513415 17619 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:06:12.513422 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.513427 17619 net.cpp:165] Memory required for data: 1327617500\nI0817 16:06:12.513432 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:06:12.513445 17619 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:06:12.513451 17619 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:06:12.513460 17619 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:06:12.514483 17619 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:06:12.514498 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.514503 17619 net.cpp:165] Memory required for data: 1329665500\nI0817 16:06:12.514513 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:06:12.514524 17619 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:06:12.514531 17619 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:06:12.514541 17619 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:06:12.514814 17619 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:06:12.514828 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.514833 17619 net.cpp:165] Memory required for data: 1331713500\nI0817 16:06:12.514843 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:06:12.514853 17619 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:06:12.514858 17619 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:06:12.514865 17619 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:06:12.514926 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:06:12.515087 17619 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:06:12.515101 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.515106 17619 net.cpp:165] Memory required for data: 1333761500\nI0817 16:06:12.515115 17619 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:06:12.515123 17619 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:06:12.515130 17619 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:06:12.515136 17619 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:06:12.515147 17619 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:06:12.515182 17619 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:06:12.515200 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.515206 17619 net.cpp:165] Memory required for data: 1335809500\nI0817 16:06:12.515211 17619 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:06:12.515219 17619 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:06:12.515225 17619 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:06:12.515233 17619 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:06:12.515241 17619 net.cpp:150] Setting up L3_b4_relu\nI0817 16:06:12.515249 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.515252 17619 net.cpp:165] Memory required for data: 1337857500\nI0817 16:06:12.515257 17619 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:06:12.515267 17619 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:06:12.515274 17619 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:06:12.515280 17619 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:06:12.515290 17619 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:06:12.515338 17619 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:06:12.515350 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.515357 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.515362 17619 net.cpp:165] Memory required for data: 1341953500\nI0817 16:06:12.515367 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:06:12.515377 17619 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:06:12.515383 17619 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:06:12.515395 17619 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:06:12.516425 17619 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:06:12.516440 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.516445 17619 net.cpp:165] Memory required for data: 1344001500\nI0817 16:06:12.516454 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:06:12.516464 17619 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:06:12.516470 17619 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:06:12.516481 17619 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:06:12.517736 17619 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:06:12.517753 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.517758 17619 net.cpp:165] Memory required for data: 1346049500\nI0817 16:06:12.517769 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:06:12.517779 17619 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:06:12.517786 17619 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:06:12.517797 17619 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.517858 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:06:12.518016 17619 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:06:12.518028 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.518033 17619 net.cpp:165] Memory required for data: 1348097500\nI0817 16:06:12.518043 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:06:12.518051 17619 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:06:12.518057 17619 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:06:12.518067 17619 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.518077 17619 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:06:12.518085 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.518090 17619 net.cpp:165] Memory required for data: 1350145500\nI0817 16:06:12.518095 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:06:12.518108 17619 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:06:12.518115 17619 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:06:12.518123 17619 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:06:12.520153 17619 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:06:12.520171 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.520176 17619 net.cpp:165] Memory required for data: 1352193500\nI0817 16:06:12.520186 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:06:12.520200 17619 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:06:12.520206 17619 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:06:12.520217 17619 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:06:12.520479 17619 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:06:12.520493 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.520498 17619 net.cpp:165] Memory required for data: 1354241500\nI0817 16:06:12.520509 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:06:12.520517 17619 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:06:12.520524 17619 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:06:12.520531 17619 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:06:12.520591 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:06:12.520751 17619 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:06:12.520766 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.520771 17619 net.cpp:165] Memory required for data: 1356289500\nI0817 16:06:12.520779 17619 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:06:12.520788 17619 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:06:12.520795 17619 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:06:12.520802 17619 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:06:12.520813 17619 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:06:12.520846 17619 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:06:12.520861 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.520866 17619 net.cpp:165] Memory required for data: 1358337500\nI0817 16:06:12.520871 17619 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:06:12.520879 17619 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:06:12.520885 17619 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:06:12.520892 17619 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:06:12.520902 17619 net.cpp:150] Setting up L3_b5_relu\nI0817 16:06:12.520910 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.520913 17619 net.cpp:165] Memory required for data: 1360385500\nI0817 16:06:12.520918 17619 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:06:12.520929 17619 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:06:12.520934 17619 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:06:12.520942 17619 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:06:12.520951 17619 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:06:12.521000 17619 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:06:12.521011 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.521018 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.521023 17619 net.cpp:165] Memory required for data: 1364481500\nI0817 16:06:12.521028 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:06:12.521039 17619 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:06:12.521045 17619 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:06:12.521057 17619 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:06:12.522075 17619 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:06:12.522090 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.522095 17619 net.cpp:165] Memory required for data: 1366529500\nI0817 16:06:12.522104 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:06:12.522114 17619 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:06:12.522128 17619 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:06:12.522141 17619 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:06:12.522399 17619 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:06:12.522414 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.522420 17619 net.cpp:165] Memory required for data: 1368577500\nI0817 16:06:12.522430 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:06:12.522439 17619 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:06:12.522446 17619 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:06:12.522454 17619 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.522514 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:06:12.522670 17619 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:06:12.522682 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.522687 17619 net.cpp:165] Memory required for data: 1370625500\nI0817 16:06:12.522696 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:06:12.522711 17619 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:06:12.522720 17619 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:06:12.522728 17619 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.522737 17619 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:06:12.522745 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.522749 17619 net.cpp:165] Memory required for data: 1372673500\nI0817 16:06:12.522754 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:06:12.522768 17619 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:06:12.522774 17619 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:06:12.522783 17619 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:06:12.523797 17619 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:06:12.523811 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.523816 17619 net.cpp:165] Memory required for data: 1374721500\nI0817 16:06:12.523825 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:06:12.523839 17619 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:06:12.523846 17619 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:06:12.523857 17619 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:06:12.524116 17619 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:06:12.524128 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.524133 17619 net.cpp:165] Memory required for data: 1376769500\nI0817 16:06:12.524143 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:06:12.524152 17619 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:06:12.524158 17619 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:06:12.524169 17619 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:06:12.524225 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:06:12.524379 17619 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:06:12.524392 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.524397 17619 net.cpp:165] Memory required for data: 1378817500\nI0817 16:06:12.524406 17619 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:06:12.524415 17619 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:06:12.524422 17619 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:06:12.524428 17619 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:06:12.524440 17619 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:06:12.524475 17619 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:06:12.524487 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.524492 17619 net.cpp:165] Memory required for data: 1380865500\nI0817 16:06:12.524497 17619 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:06:12.524504 17619 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:06:12.524510 17619 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:06:12.524524 17619 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:06:12.524538 17619 net.cpp:150] Setting up L3_b6_relu\nI0817 16:06:12.524544 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.524549 17619 net.cpp:165] Memory required for data: 1382913500\nI0817 16:06:12.524554 17619 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:06:12.524561 17619 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:06:12.524566 17619 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:06:12.524574 17619 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:06:12.524583 17619 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:06:12.524634 17619 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:06:12.524646 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.524653 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.524658 17619 net.cpp:165] Memory required for data: 1387009500\nI0817 16:06:12.524663 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:06:12.524674 17619 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:06:12.524680 17619 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:06:12.524693 17619 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:06:12.525717 17619 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:06:12.525733 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.525738 17619 net.cpp:165] Memory required for data: 1389057500\nI0817 16:06:12.525748 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:06:12.525756 17619 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:06:12.525763 17619 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:06:12.525774 17619 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:06:12.526036 17619 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:06:12.526051 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.526055 17619 net.cpp:165] Memory required for data: 1391105500\nI0817 16:06:12.526065 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:06:12.526075 17619 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:06:12.526082 17619 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:06:12.526088 17619 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.526144 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:06:12.526301 17619 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:06:12.526314 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.526319 17619 net.cpp:165] Memory required for data: 1393153500\nI0817 16:06:12.526329 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:06:12.526362 17619 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:06:12.526371 17619 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:06:12.526379 17619 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.526389 17619 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:06:12.526396 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.526401 17619 net.cpp:165] Memory required for data: 1395201500\nI0817 16:06:12.526407 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:06:12.526418 17619 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:06:12.526423 17619 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:06:12.526435 17619 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:06:12.527457 17619 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:06:12.527473 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.527478 17619 net.cpp:165] Memory required for data: 1397249500\nI0817 16:06:12.527487 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:06:12.527496 17619 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:06:12.527509 17619 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:06:12.527521 17619 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:06:12.527792 17619 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:06:12.527809 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.527814 17619 net.cpp:165] Memory required for data: 1399297500\nI0817 16:06:12.527825 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:06:12.527834 17619 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:06:12.527840 17619 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:06:12.527848 17619 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:06:12.527904 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:06:12.528059 17619 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:06:12.528071 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.528076 17619 net.cpp:165] Memory required for data: 1401345500\nI0817 16:06:12.528084 17619 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:06:12.528096 17619 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:06:12.528102 17619 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:06:12.528110 17619 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:06:12.528117 17619 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:06:12.528154 17619 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:06:12.528167 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.528172 17619 net.cpp:165] Memory required for data: 1403393500\nI0817 16:06:12.528177 17619 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:06:12.528184 17619 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:06:12.528189 17619 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:06:12.528197 17619 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:06:12.528206 17619 net.cpp:150] Setting up L3_b7_relu\nI0817 16:06:12.528213 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.528218 17619 net.cpp:165] Memory required for data: 1405441500\nI0817 16:06:12.528223 17619 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:06:12.528228 17619 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:06:12.528234 17619 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:06:12.528244 17619 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:06:12.528254 17619 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:06:12.528298 17619 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:06:12.528311 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.528316 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.528321 17619 net.cpp:165] Memory required for data: 1409537500\nI0817 16:06:12.528326 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:06:12.528342 17619 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:06:12.528348 17619 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:06:12.528357 17619 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:06:12.529371 17619 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:06:12.529386 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.529392 17619 net.cpp:165] Memory required for data: 1411585500\nI0817 16:06:12.529400 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:06:12.529412 17619 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:06:12.529418 17619 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:06:12.529428 17619 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:06:12.529685 17619 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:06:12.529703 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.529709 17619 net.cpp:165] Memory required for data: 1413633500\nI0817 16:06:12.529727 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:06:12.529736 17619 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:06:12.529743 17619 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:06:12.529757 17619 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.529815 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:06:12.529971 17619 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:06:12.529984 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.529989 17619 net.cpp:165] Memory required for data: 1415681500\nI0817 16:06:12.529999 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:06:12.530009 17619 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:06:12.530016 17619 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:06:12.530023 17619 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.530032 17619 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:06:12.530040 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.530045 17619 net.cpp:165] Memory required for data: 1417729500\nI0817 16:06:12.530048 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:06:12.530062 17619 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:06:12.530068 17619 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:06:12.530079 17619 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:06:12.531097 17619 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:06:12.531112 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.531117 17619 net.cpp:165] Memory required for data: 1419777500\nI0817 16:06:12.531126 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:06:12.531136 17619 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:06:12.531141 17619 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:06:12.531152 17619 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:06:12.531412 17619 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:06:12.531428 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.531433 17619 net.cpp:165] Memory required for data: 1421825500\nI0817 16:06:12.531443 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:06:12.531452 17619 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:06:12.531458 17619 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:06:12.531466 17619 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:06:12.531522 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:06:12.531677 17619 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:06:12.531692 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.531697 17619 net.cpp:165] Memory required for data: 1423873500\nI0817 16:06:12.531710 17619 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:06:12.531723 17619 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:06:12.531729 17619 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:06:12.531736 17619 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:06:12.531744 17619 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:06:12.531781 17619 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:06:12.531793 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.531798 17619 net.cpp:165] Memory required for data: 1425921500\nI0817 16:06:12.531803 17619 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:06:12.531811 17619 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:06:12.531816 17619 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:06:12.531824 17619 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:06:12.531833 17619 net.cpp:150] Setting up L3_b8_relu\nI0817 16:06:12.531841 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.531844 17619 net.cpp:165] Memory required for data: 1427969500\nI0817 16:06:12.531849 17619 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:06:12.531865 17619 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:06:12.531872 17619 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:06:12.531883 17619 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:06:12.531893 17619 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:06:12.531939 17619 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:06:12.531950 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.531956 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.531961 17619 net.cpp:165] Memory required for data: 1432065500\nI0817 16:06:12.531966 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:06:12.531981 17619 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:06:12.531988 17619 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:06:12.531997 17619 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:06:12.533991 17619 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:06:12.534008 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.534014 17619 net.cpp:165] Memory required for data: 1434113500\nI0817 16:06:12.534024 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:06:12.534036 17619 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:06:12.534044 17619 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:06:12.534052 17619 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:06:12.534318 17619 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:06:12.534332 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.534337 17619 net.cpp:165] Memory required for data: 1436161500\nI0817 16:06:12.534346 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:06:12.534355 17619 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:06:12.534361 17619 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:06:12.534369 17619 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.534430 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:06:12.534584 17619 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:06:12.534600 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.534605 17619 net.cpp:165] Memory required for data: 1438209500\nI0817 16:06:12.534615 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:06:12.534622 17619 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:06:12.534628 17619 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:06:12.534636 17619 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.534646 17619 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:06:12.534652 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.534657 17619 net.cpp:165] Memory required for data: 1440257500\nI0817 16:06:12.534662 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:06:12.534679 17619 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:06:12.534685 17619 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:06:12.534696 17619 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:06:12.535907 17619 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:06:12.535923 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.535928 17619 net.cpp:165] Memory required for data: 1442305500\nI0817 16:06:12.535938 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:06:12.535950 17619 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:06:12.535957 17619 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:06:12.535966 17619 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:06:12.536229 17619 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:06:12.536242 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.536247 17619 net.cpp:165] Memory required for data: 1444353500\nI0817 16:06:12.536267 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:06:12.536278 17619 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:06:12.536285 17619 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:06:12.536293 17619 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:06:12.536350 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:06:12.536502 17619 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:06:12.536515 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.536520 17619 net.cpp:165] Memory required for data: 1446401500\nI0817 16:06:12.536530 17619 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:06:12.536541 17619 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:06:12.536548 17619 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:06:12.536556 17619 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:06:12.536563 17619 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:06:12.536598 17619 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:06:12.536610 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.536615 17619 net.cpp:165] Memory required for data: 1448449500\nI0817 16:06:12.536620 17619 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:06:12.536628 17619 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:06:12.536633 17619 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:06:12.536643 17619 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:06:12.536653 17619 net.cpp:150] Setting up L3_b9_relu\nI0817 16:06:12.536660 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.536665 17619 net.cpp:165] Memory required for data: 1450497500\nI0817 16:06:12.536670 17619 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:06:12.536679 17619 net.cpp:100] Creating Layer post_pool\nI0817 16:06:12.536684 17619 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:06:12.536691 17619 net.cpp:408] post_pool -> post_pool\nI0817 16:06:12.536731 17619 net.cpp:150] Setting up post_pool\nI0817 16:06:12.536746 17619 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:06:12.536752 17619 net.cpp:165] Memory required for data: 1450529500\nI0817 16:06:12.536757 17619 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:06:12.536839 17619 net.cpp:100] Creating Layer post_FC\nI0817 16:06:12.536851 17619 net.cpp:434] post_FC <- post_pool\nI0817 16:06:12.536861 17619 net.cpp:408] post_FC -> post_FC_top\nI0817 16:06:12.537099 17619 net.cpp:150] Setting up post_FC\nI0817 16:06:12.537117 17619 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:06:12.537123 17619 net.cpp:165] Memory required for data: 1450534500\nI0817 16:06:12.537132 17619 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:06:12.537142 17619 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:06:12.537147 17619 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:06:12.537155 17619 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:06:12.537165 17619 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:06:12.537214 17619 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:06:12.537226 17619 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:06:12.537233 17619 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:06:12.537237 17619 net.cpp:165] Memory required for data: 1450544500\nI0817 16:06:12.537243 17619 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:06:12.537284 17619 net.cpp:100] Creating Layer accuracy\nI0817 16:06:12.537295 17619 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:06:12.537309 17619 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:06:12.537317 17619 net.cpp:408] accuracy -> accuracy\nI0817 16:06:12.537359 17619 net.cpp:150] Setting up accuracy\nI0817 16:06:12.537371 17619 net.cpp:157] Top shape: (1)\nI0817 16:06:12.537376 17619 net.cpp:165] Memory required for data: 1450544504\nI0817 16:06:12.537382 17619 layer_factory.hpp:77] Creating layer loss\nI0817 16:06:12.537397 17619 net.cpp:100] Creating Layer loss\nI0817 16:06:12.537405 17619 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:06:12.537411 17619 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:06:12.537418 17619 net.cpp:408] loss -> loss\nI0817 16:06:12.537467 17619 layer_factory.hpp:77] Creating layer loss\nI0817 16:06:12.537624 17619 net.cpp:150] Setting up loss\nI0817 16:06:12.537642 17619 net.cpp:157] Top shape: (1)\nI0817 16:06:12.537647 17619 net.cpp:160]     with loss weight 1\nI0817 16:06:12.537729 17619 net.cpp:165] Memory required for data: 1450544508\nI0817 16:06:12.537737 17619 net.cpp:226] loss needs backward computation.\nI0817 16:06:12.537744 17619 net.cpp:228] accuracy does not need backward computation.\nI0817 16:06:12.537750 17619 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:06:12.537756 17619 net.cpp:226] post_FC needs backward computation.\nI0817 16:06:12.537761 17619 net.cpp:226] post_pool needs backward computation.\nI0817 16:06:12.537766 17619 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:06:12.537771 17619 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:06:12.537776 17619 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:06:12.537781 17619 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:06:12.537786 17619 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:06:12.537792 17619 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:06:12.537797 17619 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:06:12.537801 17619 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:06:12.537806 17619 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:06:12.537812 17619 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:06:12.537817 17619 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:06:12.537822 17619 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:06:12.537827 17619 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:06:12.537833 17619 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:06:12.537838 17619 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:06:12.537843 17619 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:06:12.537848 17619 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:06:12.537853 17619 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:06:12.537858 17619 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:06:12.537863 17619 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:06:12.537868 17619 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:06:12.537873 17619 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:06:12.537879 17619 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:06:12.537884 17619 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:06:12.537889 17619 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:06:12.537894 17619 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:06:12.537899 17619 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:06:12.537904 17619 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:06:12.537909 17619 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:06:12.537914 17619 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:06:12.537920 17619 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:06:12.537925 17619 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:06:12.537930 17619 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:06:12.537935 17619 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:06:12.537941 17619 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:06:12.537953 17619 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:06:12.537959 17619 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:06:12.537964 17619 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:06:12.537971 17619 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:06:12.537976 17619 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:06:12.537981 17619 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:06:12.537986 17619 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:06:12.537992 17619 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:06:12.537997 17619 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:06:12.538002 17619 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:06:12.538007 17619 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:06:12.538012 17619 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:06:12.538017 17619 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:06:12.538022 17619 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:06:12.538028 17619 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:06:12.538033 17619 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:06:12.538038 17619 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:06:12.538044 17619 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:06:12.538049 17619 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:06:12.538054 17619 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:06:12.538059 17619 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:06:12.538064 17619 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:06:12.538070 17619 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:06:12.538075 17619 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:06:12.538080 17619 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:06:12.538085 17619 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:06:12.538090 17619 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:06:12.538096 17619 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:06:12.538101 17619 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:06:12.538107 17619 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:06:12.538115 17619 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:06:12.538121 17619 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:06:12.538126 17619 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:06:12.538131 17619 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:06:12.538136 17619 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:06:12.538142 17619 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:06:12.538147 17619 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:06:12.538153 17619 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:06:12.538158 17619 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:06:12.538163 17619 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:06:12.538169 17619 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:06:12.538174 17619 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:06:12.538179 17619 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:06:12.538184 17619 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:06:12.538190 17619 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:06:12.538195 17619 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:06:12.538202 17619 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:06:12.538211 17619 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:06:12.538218 17619 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:06:12.538223 17619 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:06:12.538229 17619 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:06:12.538234 17619 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:06:12.538239 17619 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:06:12.538245 17619 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:06:12.538250 17619 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:06:12.538255 17619 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:06:12.538260 17619 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:06:12.538266 17619 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:06:12.538271 17619 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:06:12.538276 17619 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:06:12.538282 17619 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:06:12.538287 17619 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:06:12.538293 17619 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:06:12.538298 17619 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:06:12.538303 17619 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:06:12.538308 17619 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:06:12.538314 17619 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:06:12.538321 17619 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:06:12.538326 17619 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:06:12.538331 17619 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:06:12.538336 17619 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:06:12.538342 17619 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:06:12.538347 17619 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:06:12.538352 17619 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:06:12.538358 17619 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:06:12.538363 17619 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:06:12.538368 17619 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:06:12.538374 17619 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:06:12.538379 17619 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:06:12.538384 17619 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:06:12.538390 17619 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:06:12.538399 17619 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:06:12.538404 17619 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:06:12.538410 17619 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:06:12.538415 17619 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:06:12.538420 17619 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:06:12.538426 17619 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:06:12.538431 17619 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:06:12.538437 17619 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:06:12.538442 17619 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:06:12.538449 17619 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:06:12.538453 17619 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:06:12.538460 17619 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:06:12.538465 17619 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:06:12.538470 17619 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:06:12.538480 17619 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:06:12.538486 17619 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:06:12.538491 17619 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:06:12.538496 17619 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:06:12.538502 17619 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:06:12.538508 17619 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:06:12.538513 17619 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:06:12.538518 17619 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:06:12.538524 17619 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:06:12.538529 17619 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:06:12.538535 17619 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:06:12.538540 17619 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:06:12.538547 17619 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:06:12.538552 17619 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:06:12.538556 17619 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:06:12.538563 17619 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:06:12.538568 17619 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:06:12.538573 17619 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:06:12.538578 17619 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:06:12.538584 17619 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:06:12.538589 17619 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:06:12.538594 17619 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:06:12.538600 17619 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:06:12.538606 17619 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:06:12.538611 17619 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:06:12.538617 17619 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:06:12.538622 17619 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:06:12.538628 17619 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:06:12.538633 17619 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:06:12.538638 17619 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:06:12.538645 17619 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:06:12.538650 17619 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:06:12.538655 17619 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:06:12.538661 17619 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:06:12.538666 17619 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:06:12.538672 17619 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:06:12.538677 17619 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:06:12.538682 17619 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:06:12.538688 17619 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:06:12.538693 17619 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:06:12.538703 17619 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:06:12.538710 17619 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:06:12.538717 17619 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:06:12.538722 17619 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:06:12.538728 17619 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:06:12.538733 17619 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:06:12.538739 17619 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:06:12.538751 17619 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:06:12.538758 17619 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:06:12.538763 17619 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:06:12.538769 17619 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:06:12.538777 17619 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:06:12.538784 17619 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:06:12.538789 17619 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:06:12.538794 17619 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:06:12.538800 17619 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:06:12.538806 17619 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:06:12.538811 17619 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:06:12.538817 17619 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:06:12.538823 17619 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:06:12.538830 17619 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:06:12.538835 17619 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:06:12.538839 17619 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:06:12.538846 17619 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:06:12.538851 17619 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:06:12.538856 17619 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:06:12.538862 17619 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:06:12.538868 17619 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:06:12.538874 17619 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:06:12.538879 17619 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:06:12.538885 17619 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:06:12.538892 17619 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:06:12.538897 17619 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:06:12.538902 17619 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:06:12.538908 17619 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:06:12.538913 17619 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:06:12.538919 17619 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:06:12.538925 17619 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:06:12.538931 17619 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:06:12.538936 17619 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:06:12.538942 17619 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:06:12.538949 17619 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:06:12.538954 17619 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:06:12.538959 17619 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:06:12.538964 17619 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:06:12.538970 17619 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:06:12.538976 17619 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:06:12.538981 17619 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:06:12.538988 17619 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:06:12.538993 17619 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:06:12.539000 17619 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:06:12.539005 17619 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:06:12.539011 17619 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:06:12.539016 17619 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:06:12.539026 17619 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:06:12.539032 17619 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:06:12.539038 17619 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:06:12.539044 17619 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:06:12.539050 17619 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:06:12.539057 17619 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:06:12.539062 17619 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:06:12.539067 17619 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:06:12.539073 17619 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:06:12.539078 17619 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:06:12.539083 17619 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:06:12.539090 17619 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:06:12.539095 17619 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:06:12.539101 17619 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:06:12.539108 17619 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:06:12.539113 17619 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:06:12.539119 17619 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:06:12.539124 17619 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:06:12.539130 17619 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:06:12.539135 17619 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:06:12.539141 17619 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:06:12.539147 17619 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:06:12.539153 17619 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:06:12.539160 17619 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:06:12.539165 17619 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:06:12.539170 17619 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:06:12.539176 17619 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:06:12.539182 17619 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:06:12.539187 17619 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:06:12.539193 17619 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:06:12.539198 17619 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:06:12.539204 17619 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:06:12.539211 17619 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:06:12.539216 17619 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:06:12.539222 17619 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:06:12.539227 17619 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:06:12.539233 17619 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:06:12.539239 17619 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:06:12.539244 17619 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:06:12.539250 17619 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:06:12.539257 17619 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:06:12.539261 17619 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:06:12.539268 17619 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:06:12.539273 17619 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:06:12.539279 17619 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:06:12.539285 17619 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:06:12.539291 17619 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:06:12.539301 17619 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:06:12.539309 17619 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:06:12.539314 17619 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:06:12.539319 17619 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:06:12.539325 17619 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:06:12.539331 17619 net.cpp:226] pre_relu needs backward computation.\nI0817 16:06:12.539336 17619 net.cpp:226] pre_scale needs backward computation.\nI0817 16:06:12.539341 17619 net.cpp:226] pre_bn needs backward computation.\nI0817 16:06:12.539347 17619 net.cpp:226] pre_conv needs backward computation.\nI0817 16:06:12.539353 17619 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:06:12.539361 17619 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:06:12.539364 17619 net.cpp:270] This network produces output accuracy\nI0817 16:06:12.539371 17619 net.cpp:270] This network produces output loss\nI0817 16:06:12.539741 17619 net.cpp:283] Network initialization done.\nI0817 16:06:12.549197 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:12.549239 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:12.549304 17619 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:06:12.549687 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:06:12.549715 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:06:12.549726 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:06:12.549736 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:06:12.549746 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:06:12.549753 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:06:12.549762 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:06:12.549772 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:06:12.549780 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:06:12.549789 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:06:12.549798 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:06:12.549806 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:06:12.549816 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:06:12.549825 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:06:12.549834 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:06:12.549842 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:06:12.549851 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:06:12.549860 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:06:12.549870 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:06:12.549888 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:06:12.549898 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:06:12.549906 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:06:12.549919 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:06:12.549928 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:06:12.549937 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:06:12.549945 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:06:12.549953 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:06:12.549962 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:06:12.549970 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:06:12.549979 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:06:12.549988 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:06:12.549996 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:06:12.550006 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:06:12.550014 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:06:12.550022 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:06:12.550031 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:06:12.550040 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:06:12.550048 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:06:12.550057 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:06:12.550065 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:06:12.550077 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:06:12.550086 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:06:12.550094 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:06:12.550102 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:06:12.550112 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:06:12.550120 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:06:12.550129 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:06:12.550137 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:06:12.550145 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:06:12.550154 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:06:12.550171 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:06:12.550180 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:06:12.550189 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:06:12.550199 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:06:12.550207 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:06:12.550215 17619 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:06:12.552079 17619 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0817 16:06:12.553676 17619 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:06:12.553930 17619 net.cpp:100] Creating Layer dataLayer\nI0817 16:06:12.553948 17619 net.cpp:408] dataLayer -> data_top\nI0817 16:06:12.553967 17619 net.cpp:408] dataLayer -> label\nI0817 16:06:12.553980 17619 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:06:12.560026 17626 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:06:12.560246 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:12.567975 17619 net.cpp:150] Setting up dataLayer\nI0817 16:06:12.568011 17619 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:06:12.568022 17619 net.cpp:157] Top shape: 125 (125)\nI0817 16:06:12.568028 17619 net.cpp:165] Memory required for data: 1536500\nI0817 16:06:12.568034 17619 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:06:12.568044 17619 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:06:12.568050 17619 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:06:12.568058 17619 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:06:12.568070 17619 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:06:12.568140 17619 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:06:12.568150 17619 net.cpp:157] Top shape: 125 (125)\nI0817 16:06:12.568158 17619 net.cpp:157] Top shape: 125 (125)\nI0817 16:06:12.568164 17619 net.cpp:165] Memory required for data: 1537500\nI0817 16:06:12.568169 17619 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:06:12.568187 17619 net.cpp:100] Creating Layer pre_conv\nI0817 16:06:12.568197 17619 net.cpp:434] pre_conv <- data_top\nI0817 16:06:12.568209 17619 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:06:12.568718 17619 net.cpp:150] Setting up pre_conv\nI0817 16:06:12.568745 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.568752 17619 net.cpp:165] Memory required for data: 9729500\nI0817 16:06:12.568771 17619 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:06:12.568781 17619 net.cpp:100] Creating Layer pre_bn\nI0817 16:06:12.568790 17619 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:06:12.568801 17619 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:06:12.569176 17619 net.cpp:150] Setting up pre_bn\nI0817 16:06:12.569192 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.569197 17619 net.cpp:165] Memory required for data: 17921500\nI0817 16:06:12.569216 17619 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:06:12.569226 17619 net.cpp:100] Creating Layer pre_scale\nI0817 16:06:12.569232 17619 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:06:12.569243 17619 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:06:12.569308 17619 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:06:12.569519 17619 net.cpp:150] Setting up pre_scale\nI0817 16:06:12.569535 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.569541 17619 net.cpp:165] Memory required for data: 26113500\nI0817 16:06:12.569550 17619 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:06:12.569558 17619 net.cpp:100] Creating Layer pre_relu\nI0817 16:06:12.569564 17619 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:06:12.569574 17619 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:06:12.569584 17619 net.cpp:150] Setting up pre_relu\nI0817 16:06:12.569591 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.569597 17619 net.cpp:165] Memory required for data: 34305500\nI0817 16:06:12.569600 17619 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:06:12.569612 17619 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:06:12.569617 17619 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:06:12.569623 17619 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:06:12.569636 17619 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:06:12.569715 17619 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:06:12.569730 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.569736 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.569743 17619 net.cpp:165] Memory required for data: 50689500\nI0817 16:06:12.569749 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:06:12.569761 17619 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:06:12.569766 17619 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:06:12.569778 17619 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:06:12.570185 17619 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:06:12.570204 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.570209 17619 net.cpp:165] Memory required for data: 58881500\nI0817 16:06:12.570222 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:06:12.570241 17619 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:06:12.570251 17619 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:06:12.570261 17619 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:06:12.570565 17619 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:06:12.570582 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.570590 17619 net.cpp:165] Memory required for data: 67073500\nI0817 16:06:12.570601 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:06:12.570611 17619 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:06:12.570616 17619 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:06:12.570626 17619 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.570690 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:06:12.570969 17619 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:06:12.570986 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.570991 17619 net.cpp:165] Memory required for data: 75265500\nI0817 16:06:12.571009 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:06:12.571032 17619 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:06:12.571039 17619 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:06:12.571048 17619 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.571058 17619 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:06:12.571069 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.571074 17619 net.cpp:165] Memory required for data: 83457500\nI0817 16:06:12.571077 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:06:12.571092 17619 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:06:12.571099 17619 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:06:12.571110 17619 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:06:12.571548 17619 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:06:12.571563 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.571570 17619 net.cpp:165] Memory required for data: 91649500\nI0817 16:06:12.571580 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:06:12.571589 17619 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:06:12.571594 17619 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:06:12.571604 17619 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:06:12.571909 17619 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:06:12.571926 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.571931 17619 net.cpp:165] Memory required for data: 99841500\nI0817 16:06:12.571961 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:06:12.571974 17619 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:06:12.571979 17619 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:06:12.571990 17619 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:06:12.572054 17619 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:06:12.572238 17619 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:06:12.572254 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.572260 17619 net.cpp:165] Memory required for data: 108033500\nI0817 16:06:12.572269 17619 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:06:12.572283 17619 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:06:12.572290 17619 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:06:12.572299 17619 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:06:12.572306 17619 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:06:12.572343 17619 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:06:12.572355 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.572360 17619 net.cpp:165] Memory required for data: 116225500\nI0817 16:06:12.572366 17619 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:06:12.572373 17619 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:06:12.572378 17619 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:06:12.572392 17619 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:06:12.572403 17619 net.cpp:150] Setting up L1_b1_relu\nI0817 16:06:12.572410 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.572415 17619 net.cpp:165] Memory required for data: 124417500\nI0817 16:06:12.572422 17619 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:06:12.572432 17619 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:06:12.572438 17619 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:06:12.572445 17619 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:06:12.572453 17619 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:06:12.572509 17619 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:06:12.572532 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.572540 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.572544 17619 net.cpp:165] Memory required for data: 140801500\nI0817 16:06:12.572551 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:06:12.572567 17619 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:06:12.572574 17619 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:06:12.572584 17619 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:06:12.572993 17619 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:06:12.573010 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.573016 17619 net.cpp:165] Memory required for data: 148993500\nI0817 16:06:12.573025 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:06:12.573037 17619 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:06:12.573043 17619 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:06:12.573055 17619 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:06:12.573375 17619 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:06:12.573395 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.573401 17619 net.cpp:165] Memory required for data: 157185500\nI0817 16:06:12.573411 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:06:12.573422 17619 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:06:12.573429 17619 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:06:12.573436 17619 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.573498 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:06:12.573712 17619 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:06:12.573727 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.573732 17619 net.cpp:165] Memory required for data: 165377500\nI0817 16:06:12.573745 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:06:12.573756 17619 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:06:12.573762 17619 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:06:12.573770 17619 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.573782 17619 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:06:12.573789 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.573794 17619 net.cpp:165] Memory required for data: 173569500\nI0817 16:06:12.573799 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:06:12.573817 17619 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:06:12.573822 17619 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:06:12.573834 17619 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:06:12.574445 17619 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:06:12.574462 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.574467 17619 net.cpp:165] Memory required for data: 181761500\nI0817 16:06:12.574477 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:06:12.574486 17619 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:06:12.574492 17619 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:06:12.574506 17619 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:06:12.574820 17619 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:06:12.574833 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.574838 17619 net.cpp:165] Memory required for data: 189953500\nI0817 16:06:12.574857 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:06:12.574869 17619 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:06:12.574875 17619 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:06:12.574883 17619 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:06:12.574962 17619 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:06:12.575145 17619 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:06:12.575160 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.575166 17619 net.cpp:165] Memory required for data: 198145500\nI0817 16:06:12.575183 17619 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:06:12.575192 17619 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:06:12.575201 17619 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:06:12.575208 17619 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:06:12.575219 17619 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:06:12.575258 17619 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:06:12.575268 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.575273 17619 net.cpp:165] Memory required for data: 206337500\nI0817 16:06:12.575278 17619 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:06:12.575289 17619 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:06:12.575296 17619 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:06:12.575305 17619 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:06:12.575314 17619 net.cpp:150] Setting up L1_b2_relu\nI0817 16:06:12.575322 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.575326 17619 net.cpp:165] Memory required for data: 214529500\nI0817 16:06:12.575335 17619 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:06:12.575341 17619 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:06:12.575346 17619 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:06:12.575354 17619 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:06:12.575363 17619 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:06:12.575417 17619 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:06:12.575426 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.575436 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.575441 17619 net.cpp:165] Memory required for data: 230913500\nI0817 16:06:12.575446 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:06:12.575460 17619 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:06:12.575466 17619 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:06:12.575479 17619 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:06:12.575867 17619 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:06:12.575882 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.575888 17619 net.cpp:165] Memory required for data: 239105500\nI0817 16:06:12.575901 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:06:12.575934 17619 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:06:12.575942 17619 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:06:12.575951 17619 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:06:12.576273 17619 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:06:12.576289 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.576297 17619 net.cpp:165] Memory required for data: 247297500\nI0817 16:06:12.576308 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:06:12.576316 17619 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:06:12.576323 17619 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:06:12.576329 17619 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.576401 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:06:12.576609 17619 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:06:12.576623 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.576628 17619 net.cpp:165] Memory required for data: 255489500\nI0817 16:06:12.576642 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:06:12.576650 17619 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:06:12.576656 17619 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:06:12.576666 17619 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.576687 17619 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:06:12.576695 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.576719 17619 net.cpp:165] Memory required for data: 263681500\nI0817 16:06:12.576725 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:06:12.576740 17619 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:06:12.576747 17619 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:06:12.576756 17619 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:06:12.577184 17619 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:06:12.577199 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.577204 17619 net.cpp:165] Memory required for data: 271873500\nI0817 16:06:12.577214 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:06:12.577232 17619 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:06:12.577239 17619 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:06:12.577247 17619 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:06:12.577767 17619 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:06:12.577782 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.577787 17619 net.cpp:165] Memory required for data: 280065500\nI0817 16:06:12.577801 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:06:12.577811 17619 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:06:12.577817 17619 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:06:12.577826 17619 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:06:12.577893 17619 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:06:12.578101 17619 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:06:12.578115 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.578121 17619 net.cpp:165] Memory required for data: 288257500\nI0817 16:06:12.578130 17619 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:06:12.578142 17619 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:06:12.578148 17619 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:06:12.578155 17619 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:06:12.578166 17619 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:06:12.578205 17619 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:06:12.578214 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.578219 17619 net.cpp:165] Memory required for data: 296449500\nI0817 16:06:12.578224 17619 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:06:12.578238 17619 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:06:12.578244 17619 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:06:12.578253 17619 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:06:12.578261 17619 net.cpp:150] Setting up L1_b3_relu\nI0817 16:06:12.578271 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.578276 17619 net.cpp:165] Memory required for data: 304641500\nI0817 16:06:12.578281 17619 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:06:12.578289 17619 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:06:12.578295 17619 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:06:12.578305 17619 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:06:12.578316 17619 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:06:12.578367 17619 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:06:12.578380 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.578387 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.578392 17619 net.cpp:165] Memory required for data: 321025500\nI0817 16:06:12.578397 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:06:12.578413 17619 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:06:12.578428 17619 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:06:12.578438 17619 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:06:12.578830 17619 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:06:12.578845 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.578850 17619 net.cpp:165] Memory required for data: 329217500\nI0817 16:06:12.578860 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:06:12.578871 17619 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:06:12.578878 17619 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:06:12.578886 17619 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:06:12.579152 17619 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:06:12.579169 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.579174 17619 net.cpp:165] Memory required for data: 337409500\nI0817 16:06:12.579185 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:06:12.579192 17619 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:06:12.579198 17619 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:06:12.579205 17619 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.579262 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:06:12.579418 17619 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:06:12.579432 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.579437 17619 net.cpp:165] Memory required for data: 345601500\nI0817 16:06:12.579447 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:06:12.579457 17619 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:06:12.579463 17619 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:06:12.579473 17619 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.579483 17619 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:06:12.579489 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.579493 17619 net.cpp:165] Memory required for data: 353793500\nI0817 16:06:12.579499 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:06:12.579509 17619 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:06:12.579514 17619 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:06:12.579525 17619 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:06:12.579874 17619 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:06:12.579888 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.579895 17619 net.cpp:165] Memory required for data: 361985500\nI0817 16:06:12.579903 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:06:12.579912 17619 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:06:12.579918 17619 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:06:12.579928 17619 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:06:12.580224 17619 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:06:12.580238 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.580245 17619 net.cpp:165] Memory required for data: 370177500\nI0817 16:06:12.580255 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:06:12.580263 17619 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:06:12.580269 17619 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:06:12.580276 17619 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:06:12.580338 17619 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:06:12.580492 17619 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:06:12.580504 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.580509 17619 net.cpp:165] Memory required for data: 378369500\nI0817 16:06:12.580518 17619 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:06:12.580526 17619 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:06:12.580533 17619 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:06:12.580539 17619 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:06:12.580556 17619 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:06:12.580590 17619 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:06:12.580600 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.580605 17619 net.cpp:165] Memory required for data: 386561500\nI0817 16:06:12.580610 17619 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:06:12.580620 17619 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:06:12.580626 17619 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:06:12.580632 17619 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:06:12.580641 17619 net.cpp:150] Setting up L1_b4_relu\nI0817 16:06:12.580648 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.580653 17619 net.cpp:165] Memory required for data: 394753500\nI0817 16:06:12.580657 17619 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:06:12.580664 17619 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:06:12.580669 17619 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:06:12.580677 17619 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:06:12.580685 17619 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:06:12.580740 17619 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:06:12.580754 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.580760 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.580765 17619 net.cpp:165] Memory required for data: 411137500\nI0817 16:06:12.580770 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:06:12.580783 17619 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:06:12.580790 17619 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:06:12.580799 17619 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:06:12.581146 17619 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:06:12.581161 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.581166 17619 net.cpp:165] Memory required for data: 419329500\nI0817 16:06:12.581188 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:06:12.581200 17619 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:06:12.581207 17619 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:06:12.581217 17619 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:06:12.581485 17619 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:06:12.581498 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.581503 17619 net.cpp:165] Memory required for data: 427521500\nI0817 16:06:12.581513 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:06:12.581522 17619 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:06:12.581528 17619 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:06:12.581535 17619 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.581595 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:06:12.581753 17619 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:06:12.581768 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.581773 17619 net.cpp:165] Memory required for data: 435713500\nI0817 16:06:12.581782 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:06:12.581789 17619 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:06:12.581795 17619 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:06:12.581805 17619 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.581815 17619 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:06:12.581822 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.581827 17619 net.cpp:165] Memory required for data: 443905500\nI0817 16:06:12.581832 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:06:12.581849 17619 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:06:12.581856 17619 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:06:12.581866 17619 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:06:12.582213 17619 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:06:12.582227 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.582233 17619 net.cpp:165] Memory required for data: 452097500\nI0817 16:06:12.582242 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:06:12.582250 17619 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:06:12.582257 17619 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:06:12.582267 17619 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:06:12.582540 17619 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:06:12.582556 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.582561 17619 net.cpp:165] Memory required for data: 460289500\nI0817 16:06:12.582571 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:06:12.582579 17619 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:06:12.582586 17619 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:06:12.582593 17619 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:06:12.582650 17619 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:06:12.582815 17619 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:06:12.582829 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.582834 17619 net.cpp:165] Memory required for data: 468481500\nI0817 16:06:12.582844 17619 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:06:12.582855 17619 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:06:12.582861 17619 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:06:12.582891 17619 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:06:12.582903 17619 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:06:12.582938 17619 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:06:12.582948 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.582952 17619 net.cpp:165] Memory required for data: 476673500\nI0817 16:06:12.582958 17619 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:06:12.582969 17619 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:06:12.582975 17619 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:06:12.582983 17619 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:06:12.582991 17619 net.cpp:150] Setting up L1_b5_relu\nI0817 16:06:12.582998 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.583003 17619 net.cpp:165] Memory required for data: 484865500\nI0817 16:06:12.583009 17619 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:06:12.583014 17619 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:06:12.583020 17619 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:06:12.583026 17619 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:06:12.583036 17619 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:06:12.583086 17619 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:06:12.583096 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.583103 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.583107 17619 net.cpp:165] Memory required for data: 501249500\nI0817 16:06:12.583112 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:06:12.583127 17619 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:06:12.583132 17619 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:06:12.583142 17619 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:06:12.583487 17619 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:06:12.583500 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.583513 17619 net.cpp:165] Memory required for data: 509441500\nI0817 16:06:12.583521 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:06:12.583534 17619 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:06:12.583540 17619 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:06:12.583549 17619 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:06:12.583825 17619 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:06:12.583843 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.583848 17619 net.cpp:165] Memory required for data: 517633500\nI0817 16:06:12.583859 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:06:12.583868 17619 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:06:12.583873 17619 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:06:12.583881 17619 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.583937 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:06:12.584105 17619 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:06:12.584117 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.584123 17619 net.cpp:165] Memory required for data: 525825500\nI0817 16:06:12.584132 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:06:12.584142 17619 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:06:12.584149 17619 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:06:12.584158 17619 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.584168 17619 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:06:12.584175 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.584180 17619 net.cpp:165] Memory required for data: 534017500\nI0817 16:06:12.584185 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:06:12.584195 17619 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:06:12.584202 17619 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:06:12.584213 17619 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:06:12.584558 17619 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:06:12.584573 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.584578 17619 net.cpp:165] Memory required for data: 542209500\nI0817 16:06:12.584586 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:06:12.584595 17619 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:06:12.584601 17619 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:06:12.584612 17619 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:06:12.584890 17619 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:06:12.584904 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.584910 17619 net.cpp:165] Memory required for data: 550401500\nI0817 16:06:12.584920 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:06:12.584931 17619 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:06:12.584938 17619 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:06:12.584945 17619 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:06:12.585001 17619 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:06:12.585158 17619 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:06:12.585171 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.585176 17619 net.cpp:165] Memory required for data: 558593500\nI0817 16:06:12.585186 17619 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:06:12.585206 17619 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:06:12.585212 17619 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:06:12.585219 17619 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:06:12.585227 17619 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:06:12.585264 17619 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:06:12.585273 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.585278 17619 net.cpp:165] Memory required for data: 566785500\nI0817 16:06:12.585290 17619 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:06:12.585299 17619 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:06:12.585304 17619 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:06:12.585311 17619 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:06:12.585320 17619 net.cpp:150] Setting up L1_b6_relu\nI0817 16:06:12.585327 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.585332 17619 net.cpp:165] Memory required for data: 574977500\nI0817 16:06:12.585336 17619 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:06:12.585343 17619 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:06:12.585348 17619 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:06:12.585358 17619 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:06:12.585367 17619 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:06:12.585414 17619 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:06:12.585425 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.585433 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.585436 17619 net.cpp:165] Memory required for data: 591361500\nI0817 16:06:12.585441 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:06:12.585455 17619 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:06:12.585463 17619 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:06:12.585470 17619 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:06:12.585825 17619 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:06:12.585839 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.585845 17619 net.cpp:165] Memory required for data: 599553500\nI0817 16:06:12.585853 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:06:12.585878 17619 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:06:12.585886 17619 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:06:12.585898 17619 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:06:12.586169 17619 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:06:12.586184 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.586189 17619 net.cpp:165] Memory required for data: 607745500\nI0817 16:06:12.586199 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:06:12.586207 17619 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:06:12.586213 17619 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:06:12.586220 17619 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.586280 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:06:12.586436 17619 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:06:12.586449 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.586454 17619 net.cpp:165] Memory required for data: 615937500\nI0817 16:06:12.586462 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:06:12.586473 17619 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:06:12.586479 17619 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:06:12.586486 17619 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.586495 17619 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:06:12.586503 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.586508 17619 net.cpp:165] Memory required for data: 624129500\nI0817 16:06:12.586513 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:06:12.586524 17619 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:06:12.586531 17619 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:06:12.586541 17619 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:06:12.586904 17619 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:06:12.586918 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.586930 17619 net.cpp:165] Memory required for data: 632321500\nI0817 16:06:12.586940 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:06:12.586951 17619 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:06:12.586957 17619 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:06:12.586966 17619 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:06:12.587234 17619 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:06:12.587249 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.587252 17619 net.cpp:165] Memory required for data: 640513500\nI0817 16:06:12.587262 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:06:12.587270 17619 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:06:12.587276 17619 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:06:12.587285 17619 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:06:12.587343 17619 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:06:12.587527 17619 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:06:12.587543 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.587548 17619 net.cpp:165] Memory required for data: 648705500\nI0817 16:06:12.587556 17619 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:06:12.587565 17619 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:06:12.587572 17619 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:06:12.587579 17619 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:06:12.587589 17619 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:06:12.587626 17619 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:06:12.587635 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.587641 17619 net.cpp:165] Memory required for data: 656897500\nI0817 16:06:12.587646 17619 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:06:12.587656 17619 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:06:12.587661 17619 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:06:12.587668 17619 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:06:12.587677 17619 net.cpp:150] Setting up L1_b7_relu\nI0817 16:06:12.587684 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.587689 17619 net.cpp:165] Memory required for data: 665089500\nI0817 16:06:12.587693 17619 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:06:12.587705 17619 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:06:12.587712 17619 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:06:12.587719 17619 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:06:12.587728 17619 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:06:12.587779 17619 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:06:12.587790 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.587797 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.587802 17619 net.cpp:165] Memory required for data: 681473500\nI0817 16:06:12.587807 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:06:12.587821 17619 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:06:12.587827 17619 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:06:12.587836 17619 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:06:12.588191 17619 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:06:12.588204 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.588209 17619 net.cpp:165] Memory required for data: 689665500\nI0817 16:06:12.588218 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:06:12.588230 17619 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:06:12.588237 17619 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:06:12.588254 17619 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:06:12.588526 17619 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:06:12.588539 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.588544 17619 net.cpp:165] Memory required for data: 697857500\nI0817 16:06:12.588554 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:06:12.588562 17619 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:06:12.588568 17619 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:06:12.588577 17619 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.588635 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:06:12.588800 17619 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:06:12.588814 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.588819 17619 net.cpp:165] Memory required for data: 706049500\nI0817 16:06:12.588829 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:06:12.588835 17619 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:06:12.588845 17619 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:06:12.588852 17619 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.588861 17619 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:06:12.588868 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.588873 17619 net.cpp:165] Memory required for data: 714241500\nI0817 16:06:12.588877 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:06:12.588891 17619 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:06:12.588897 17619 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:06:12.588908 17619 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:06:12.589264 17619 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:06:12.589278 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.589283 17619 net.cpp:165] Memory required for data: 722433500\nI0817 16:06:12.589293 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:06:12.589305 17619 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:06:12.589313 17619 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:06:12.589320 17619 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:06:12.589596 17619 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:06:12.589609 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.589614 17619 net.cpp:165] Memory required for data: 730625500\nI0817 16:06:12.589624 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:06:12.589633 17619 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:06:12.589638 17619 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:06:12.589646 17619 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:06:12.589711 17619 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:06:12.589874 17619 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:06:12.589886 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.589891 17619 net.cpp:165] Memory required for data: 738817500\nI0817 16:06:12.589900 17619 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:06:12.589910 17619 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:06:12.589915 17619 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:06:12.589922 17619 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:06:12.589932 17619 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:06:12.589965 17619 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:06:12.589978 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.589983 17619 net.cpp:165] Memory required for data: 747009500\nI0817 16:06:12.589988 17619 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:06:12.589999 17619 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:06:12.590005 17619 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:06:12.590013 17619 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:06:12.590028 17619 net.cpp:150] Setting up L1_b8_relu\nI0817 16:06:12.590035 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.590040 17619 net.cpp:165] Memory required for data: 755201500\nI0817 16:06:12.590044 17619 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:06:12.590051 17619 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:06:12.590056 17619 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:06:12.590065 17619 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:06:12.590073 17619 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:06:12.590123 17619 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:06:12.590136 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.590142 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.590147 17619 net.cpp:165] Memory required for data: 771585500\nI0817 16:06:12.590152 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:06:12.590165 17619 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:06:12.590171 17619 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:06:12.590180 17619 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:06:12.590541 17619 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:06:12.590555 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.590560 17619 net.cpp:165] Memory required for data: 779777500\nI0817 16:06:12.590569 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:06:12.590580 17619 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:06:12.590587 17619 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:06:12.590595 17619 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:06:12.590879 17619 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:06:12.590893 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.590898 17619 net.cpp:165] Memory required for data: 787969500\nI0817 16:06:12.590909 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:06:12.590919 17619 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:06:12.590926 17619 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:06:12.590934 17619 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.590994 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:06:12.591153 17619 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:06:12.591166 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.591171 17619 net.cpp:165] Memory required for data: 796161500\nI0817 16:06:12.591181 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:06:12.591188 17619 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:06:12.591194 17619 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:06:12.591205 17619 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.591215 17619 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:06:12.591223 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.591228 17619 net.cpp:165] Memory required for data: 804353500\nI0817 16:06:12.591231 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:06:12.591245 17619 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:06:12.591251 17619 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:06:12.591260 17619 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:06:12.591614 17619 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:06:12.591627 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.591632 17619 net.cpp:165] Memory required for data: 812545500\nI0817 16:06:12.591641 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:06:12.591653 17619 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:06:12.591660 17619 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:06:12.591673 17619 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:06:12.591949 17619 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:06:12.591964 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.591969 17619 net.cpp:165] Memory required for data: 820737500\nI0817 16:06:12.592001 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:06:12.592013 17619 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:06:12.592020 17619 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:06:12.592027 17619 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:06:12.592087 17619 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:06:12.592247 17619 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:06:12.592259 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.592264 17619 net.cpp:165] Memory required for data: 828929500\nI0817 16:06:12.592273 17619 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:06:12.592283 17619 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:06:12.592288 17619 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:06:12.592295 17619 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:06:12.592303 17619 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:06:12.592340 17619 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:06:12.592352 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.592357 17619 net.cpp:165] Memory required for data: 837121500\nI0817 16:06:12.592362 17619 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:06:12.592370 17619 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:06:12.592375 17619 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:06:12.592384 17619 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:06:12.592394 17619 net.cpp:150] Setting up L1_b9_relu\nI0817 16:06:12.592401 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.592406 17619 net.cpp:165] Memory required for data: 845313500\nI0817 16:06:12.592411 17619 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:06:12.592417 17619 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:06:12.592422 17619 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:06:12.592432 17619 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:06:12.592442 17619 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:06:12.592489 17619 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:06:12.592500 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.592507 17619 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:06:12.592511 17619 net.cpp:165] Memory required for data: 861697500\nI0817 16:06:12.592516 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:06:12.592530 17619 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:06:12.592537 17619 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:06:12.592546 17619 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:06:12.592913 17619 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:06:12.592928 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.592933 17619 net.cpp:165] Memory required for data: 863745500\nI0817 16:06:12.592941 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:06:12.592953 17619 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:06:12.592959 17619 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:06:12.592968 17619 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:06:12.593231 17619 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:06:12.593245 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.593248 17619 net.cpp:165] Memory required for data: 865793500\nI0817 16:06:12.593258 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:06:12.593273 17619 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:06:12.593281 17619 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:06:12.593288 17619 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.593350 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:06:12.593508 17619 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:06:12.593523 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.593528 17619 net.cpp:165] Memory required for data: 867841500\nI0817 16:06:12.593538 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:06:12.593545 17619 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:06:12.593551 17619 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:06:12.593559 17619 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.593569 17619 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:06:12.593575 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.593580 17619 net.cpp:165] Memory required for data: 869889500\nI0817 16:06:12.593585 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:06:12.593597 17619 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:06:12.593603 17619 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:06:12.593614 17619 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:06:12.593974 17619 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:06:12.593988 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.593994 17619 net.cpp:165] Memory required for data: 871937500\nI0817 16:06:12.594002 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:06:12.594017 17619 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:06:12.594024 17619 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:06:12.594034 17619 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:06:12.594297 17619 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:06:12.594310 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.594316 17619 net.cpp:165] Memory required for data: 873985500\nI0817 16:06:12.594326 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:06:12.594334 17619 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:06:12.594341 17619 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:06:12.594347 17619 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:06:12.594408 17619 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:06:12.594563 17619 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:06:12.594575 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.594580 17619 net.cpp:165] Memory required for data: 876033500\nI0817 16:06:12.594589 17619 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:06:12.594600 17619 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:06:12.594607 17619 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:06:12.594615 17619 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:06:12.594647 17619 net.cpp:150] Setting up L2_b1_pool\nI0817 16:06:12.594656 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.594661 17619 net.cpp:165] Memory required for data: 878081500\nI0817 16:06:12.594666 17619 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:06:12.594674 17619 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:06:12.594681 17619 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:06:12.594687 17619 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:06:12.594694 17619 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:06:12.594733 17619 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:06:12.594745 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.594750 17619 net.cpp:165] Memory required for data: 880129500\nI0817 16:06:12.594756 17619 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:06:12.594766 17619 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:06:12.594779 17619 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:06:12.594786 17619 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:06:12.594796 17619 net.cpp:150] Setting up L2_b1_relu\nI0817 16:06:12.594804 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.594808 17619 net.cpp:165] Memory required for data: 882177500\nI0817 16:06:12.594812 17619 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:06:12.594821 17619 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:06:12.594830 17619 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:06:12.597028 17619 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:06:12.597045 17619 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:06:12.597050 17619 net.cpp:165] Memory required for data: 884225500\nI0817 16:06:12.597056 17619 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:06:12.597066 17619 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:06:12.597072 17619 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:06:12.597080 17619 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:06:12.597090 17619 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:06:12.597133 17619 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:06:12.597148 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.597153 17619 net.cpp:165] Memory required for data: 888321500\nI0817 16:06:12.597158 17619 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:06:12.597167 17619 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:06:12.597172 17619 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:06:12.597179 17619 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:06:12.597193 17619 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:06:12.597241 17619 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:06:12.597252 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.597259 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.597263 17619 net.cpp:165] Memory required for data: 896513500\nI0817 16:06:12.597270 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:06:12.597283 17619 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:06:12.597290 17619 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:06:12.597301 17619 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:06:12.597810 17619 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:06:12.597826 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.597831 17619 net.cpp:165] Memory required for data: 900609500\nI0817 16:06:12.597841 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:06:12.597851 17619 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:06:12.597856 17619 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:06:12.597867 17619 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:06:12.598134 17619 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:06:12.598147 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.598152 17619 net.cpp:165] Memory required for data: 904705500\nI0817 16:06:12.598162 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:06:12.598175 17619 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:06:12.598181 17619 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:06:12.598188 17619 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.598246 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:06:12.598402 17619 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:06:12.598415 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.598420 17619 net.cpp:165] Memory required for data: 908801500\nI0817 16:06:12.598429 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:06:12.598440 17619 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:06:12.598453 17619 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:06:12.598461 17619 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.598471 17619 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:06:12.598479 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.598484 17619 net.cpp:165] Memory required for data: 912897500\nI0817 16:06:12.598489 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:06:12.598502 17619 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:06:12.598508 17619 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:06:12.598518 17619 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:06:12.599014 17619 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:06:12.599028 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.599033 17619 net.cpp:165] Memory required for data: 916993500\nI0817 16:06:12.599042 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:06:12.599051 17619 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:06:12.599058 17619 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:06:12.599068 17619 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:06:12.599328 17619 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:06:12.599341 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.599346 17619 net.cpp:165] Memory required for data: 921089500\nI0817 16:06:12.599356 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:06:12.599370 17619 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:06:12.599376 17619 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:06:12.599383 17619 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:06:12.599440 17619 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:06:12.599598 17619 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:06:12.599612 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.599617 17619 net.cpp:165] Memory required for data: 925185500\nI0817 16:06:12.599627 17619 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:06:12.599638 17619 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:06:12.599644 17619 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:06:12.599651 17619 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:06:12.599659 17619 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:06:12.599690 17619 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:06:12.599706 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.599712 17619 net.cpp:165] Memory required for data: 929281500\nI0817 16:06:12.599717 17619 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:06:12.599725 17619 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:06:12.599730 17619 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:06:12.599740 17619 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:06:12.599750 17619 net.cpp:150] Setting up L2_b2_relu\nI0817 16:06:12.599758 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.599762 17619 net.cpp:165] Memory required for data: 933377500\nI0817 16:06:12.599767 17619 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:06:12.599774 17619 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:06:12.599779 17619 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:06:12.599787 17619 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:06:12.599797 17619 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:06:12.599846 17619 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:06:12.599858 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.599864 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.599869 17619 net.cpp:165] Memory required for data: 941569500\nI0817 16:06:12.599880 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:06:12.599892 17619 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:06:12.599898 17619 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:06:12.599910 17619 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:06:12.600401 17619 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:06:12.600416 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.600421 17619 net.cpp:165] Memory required for data: 945665500\nI0817 16:06:12.600430 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:06:12.600438 17619 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:06:12.600445 17619 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:06:12.600455 17619 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:06:12.600724 17619 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:06:12.600739 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.600744 17619 net.cpp:165] Memory required for data: 949761500\nI0817 16:06:12.600754 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:06:12.600765 17619 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:06:12.600771 17619 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:06:12.600780 17619 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.600836 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:06:12.600991 17619 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:06:12.601003 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.601008 17619 net.cpp:165] Memory required for data: 953857500\nI0817 16:06:12.601016 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:06:12.601029 17619 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:06:12.601037 17619 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:06:12.601043 17619 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.601053 17619 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:06:12.601059 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.601064 17619 net.cpp:165] Memory required for data: 957953500\nI0817 16:06:12.601069 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:06:12.601084 17619 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:06:12.601090 17619 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:06:12.601101 17619 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:06:12.601588 17619 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:06:12.601603 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.601608 17619 net.cpp:165] Memory required for data: 962049500\nI0817 16:06:12.601616 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:06:12.601625 17619 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:06:12.601631 17619 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:06:12.601644 17619 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:06:12.601917 17619 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:06:12.601932 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.601936 17619 net.cpp:165] Memory required for data: 966145500\nI0817 16:06:12.601946 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:06:12.601958 17619 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:06:12.601964 17619 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:06:12.601971 17619 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:06:12.602025 17619 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:06:12.602206 17619 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:06:12.602221 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.602226 17619 net.cpp:165] Memory required for data: 970241500\nI0817 16:06:12.602234 17619 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:06:12.602243 17619 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:06:12.602257 17619 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:06:12.602267 17619 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:06:12.602274 17619 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:06:12.602303 17619 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:06:12.602316 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.602321 17619 net.cpp:165] Memory required for data: 974337500\nI0817 16:06:12.602326 17619 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:06:12.602346 17619 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:06:12.602354 17619 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:06:12.602360 17619 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:06:12.602370 17619 net.cpp:150] Setting up L2_b3_relu\nI0817 16:06:12.602377 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.602381 17619 net.cpp:165] Memory required for data: 978433500\nI0817 16:06:12.602387 17619 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:06:12.602394 17619 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:06:12.602399 17619 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:06:12.602409 17619 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:06:12.602418 17619 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:06:12.602468 17619 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:06:12.602478 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.602485 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.602489 17619 net.cpp:165] Memory required for data: 986625500\nI0817 16:06:12.602495 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:06:12.602511 17619 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:06:12.602517 17619 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:06:12.602526 17619 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:06:12.603025 17619 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:06:12.603040 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.603045 17619 net.cpp:165] Memory required for data: 990721500\nI0817 16:06:12.603055 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:06:12.603067 17619 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:06:12.603073 17619 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:06:12.603081 17619 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:06:12.603353 17619 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:06:12.603366 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.603371 17619 net.cpp:165] Memory required for data: 994817500\nI0817 16:06:12.603381 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:06:12.603390 17619 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:06:12.603396 17619 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:06:12.603406 17619 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.603466 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:06:12.603627 17619 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:06:12.603641 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.603646 17619 net.cpp:165] Memory required for data: 998913500\nI0817 16:06:12.603654 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:06:12.603662 17619 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:06:12.603668 17619 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:06:12.603678 17619 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.603688 17619 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:06:12.603695 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.603705 17619 net.cpp:165] Memory required for data: 1003009500\nI0817 16:06:12.603725 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:06:12.603747 17619 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:06:12.603754 17619 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:06:12.603763 17619 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:06:12.604267 17619 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:06:12.604284 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.604288 17619 net.cpp:165] Memory required for data: 1007105500\nI0817 16:06:12.604296 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:06:12.604305 17619 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:06:12.604315 17619 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:06:12.604323 17619 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:06:12.604584 17619 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:06:12.604598 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.604602 17619 net.cpp:165] Memory required for data: 1011201500\nI0817 16:06:12.604612 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:06:12.604621 17619 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:06:12.604627 17619 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:06:12.604635 17619 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:06:12.604696 17619 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:06:12.604862 17619 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:06:12.604878 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.604883 17619 net.cpp:165] Memory required for data: 1015297500\nI0817 16:06:12.604892 17619 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:06:12.604902 17619 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:06:12.604907 17619 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:06:12.604914 17619 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:06:12.604921 17619 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:06:12.604954 17619 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:06:12.604969 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.604974 17619 net.cpp:165] Memory required for data: 1019393500\nI0817 16:06:12.604979 17619 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:06:12.604987 17619 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:06:12.604993 17619 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:06:12.605003 17619 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:06:12.605013 17619 net.cpp:150] Setting up L2_b4_relu\nI0817 16:06:12.605020 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.605024 17619 net.cpp:165] Memory required for data: 1023489500\nI0817 16:06:12.605029 17619 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:06:12.605036 17619 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:06:12.605041 17619 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:06:12.605051 17619 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:06:12.605062 17619 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:06:12.605109 17619 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:06:12.605121 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.605128 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.605132 17619 net.cpp:165] Memory required for data: 1031681500\nI0817 16:06:12.605137 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:06:12.605151 17619 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:06:12.605159 17619 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:06:12.605167 17619 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:06:12.605674 17619 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:06:12.605689 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.605693 17619 net.cpp:165] Memory required for data: 1035777500\nI0817 16:06:12.605708 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:06:12.605721 17619 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:06:12.605728 17619 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:06:12.605736 17619 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:06:12.606011 17619 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:06:12.606025 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.606030 17619 net.cpp:165] Memory required for data: 1039873500\nI0817 16:06:12.606041 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:06:12.606050 17619 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:06:12.606055 17619 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:06:12.606066 17619 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.606125 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:06:12.606284 17619 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:06:12.606297 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.606302 17619 net.cpp:165] Memory required for data: 1043969500\nI0817 16:06:12.606312 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:06:12.606319 17619 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:06:12.606325 17619 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:06:12.606333 17619 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.606341 17619 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:06:12.606348 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.606353 17619 net.cpp:165] Memory required for data: 1048065500\nI0817 16:06:12.606357 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:06:12.606371 17619 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:06:12.606377 17619 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:06:12.606389 17619 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:06:12.606894 17619 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:06:12.606909 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.606914 17619 net.cpp:165] Memory required for data: 1052161500\nI0817 16:06:12.606921 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:06:12.606933 17619 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:06:12.606940 17619 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:06:12.606952 17619 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:06:12.607223 17619 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:06:12.607237 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.607242 17619 net.cpp:165] Memory required for data: 1056257500\nI0817 16:06:12.607252 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:06:12.607260 17619 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:06:12.607267 17619 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:06:12.607275 17619 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:06:12.607334 17619 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:06:12.607491 17619 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:06:12.607504 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.607509 17619 net.cpp:165] Memory required for data: 1060353500\nI0817 16:06:12.607517 17619 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:06:12.607528 17619 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:06:12.607535 17619 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:06:12.607542 17619 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:06:12.607549 17619 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:06:12.607580 17619 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:06:12.607596 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.607601 17619 net.cpp:165] Memory required for data: 1064449500\nI0817 16:06:12.607606 17619 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:06:12.607614 17619 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:06:12.607620 17619 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:06:12.607630 17619 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:06:12.607640 17619 net.cpp:150] Setting up L2_b5_relu\nI0817 16:06:12.607646 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.607651 17619 net.cpp:165] Memory required for data: 1068545500\nI0817 16:06:12.607656 17619 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:06:12.607663 17619 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:06:12.607668 17619 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:06:12.607678 17619 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:06:12.607688 17619 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:06:12.607748 17619 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:06:12.607760 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.607767 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.607771 17619 net.cpp:165] Memory required for data: 1076737500\nI0817 16:06:12.607777 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:06:12.607791 17619 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:06:12.607798 17619 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:06:12.607807 17619 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:06:12.608345 17619 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:06:12.608361 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.608366 17619 net.cpp:165] Memory required for data: 1080833500\nI0817 16:06:12.608376 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:06:12.608386 17619 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:06:12.608394 17619 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:06:12.608402 17619 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:06:12.608667 17619 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:06:12.608681 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.608685 17619 net.cpp:165] Memory required for data: 1084929500\nI0817 16:06:12.608696 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:06:12.608712 17619 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:06:12.608718 17619 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:06:12.608726 17619 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.608788 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:06:12.608944 17619 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:06:12.608960 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.608965 17619 net.cpp:165] Memory required for data: 1089025500\nI0817 16:06:12.608974 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:06:12.608983 17619 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:06:12.608989 17619 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:06:12.608995 17619 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.609005 17619 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:06:12.609012 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.609016 17619 net.cpp:165] Memory required for data: 1093121500\nI0817 16:06:12.609021 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:06:12.609035 17619 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:06:12.609041 17619 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:06:12.609052 17619 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:06:12.609551 17619 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:06:12.609566 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.609571 17619 net.cpp:165] Memory required for data: 1097217500\nI0817 16:06:12.609580 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:06:12.609592 17619 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:06:12.609598 17619 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:06:12.609609 17619 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:06:12.609882 17619 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:06:12.609896 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.609901 17619 net.cpp:165] Memory required for data: 1101313500\nI0817 16:06:12.609911 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:06:12.609920 17619 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:06:12.609926 17619 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:06:12.609933 17619 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:06:12.609993 17619 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:06:12.610152 17619 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:06:12.610167 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.610172 17619 net.cpp:165] Memory required for data: 1105409500\nI0817 16:06:12.610180 17619 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:06:12.610191 17619 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:06:12.610198 17619 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:06:12.610205 17619 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:06:12.610213 17619 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:06:12.610241 17619 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:06:12.610250 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.610255 17619 net.cpp:165] Memory required for data: 1109505500\nI0817 16:06:12.610260 17619 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:06:12.610271 17619 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:06:12.610277 17619 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:06:12.610285 17619 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:06:12.610293 17619 net.cpp:150] Setting up L2_b6_relu\nI0817 16:06:12.610301 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.610304 17619 net.cpp:165] Memory required for data: 1113601500\nI0817 16:06:12.610309 17619 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:06:12.610316 17619 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:06:12.610321 17619 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:06:12.610329 17619 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:06:12.610338 17619 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:06:12.610390 17619 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:06:12.610402 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.610409 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.610414 17619 net.cpp:165] Memory required for data: 1121793500\nI0817 16:06:12.610419 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:06:12.610431 17619 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:06:12.610438 17619 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:06:12.610447 17619 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:06:12.611946 17619 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:06:12.611963 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.611969 17619 net.cpp:165] Memory required for data: 1125889500\nI0817 16:06:12.611979 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:06:12.611996 17619 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:06:12.612004 17619 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:06:12.612015 17619 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:06:12.612284 17619 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:06:12.612298 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.612303 17619 net.cpp:165] Memory required for data: 1129985500\nI0817 16:06:12.612313 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:06:12.612325 17619 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:06:12.612331 17619 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:06:12.612339 17619 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.612397 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:06:12.612560 17619 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:06:12.612573 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.612579 17619 net.cpp:165] Memory required for data: 1134081500\nI0817 16:06:12.612588 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:06:12.612599 17619 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:06:12.612606 17619 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:06:12.612612 17619 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.612625 17619 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:06:12.612632 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.612637 17619 net.cpp:165] Memory required for data: 1138177500\nI0817 16:06:12.612643 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:06:12.612653 17619 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:06:12.612658 17619 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:06:12.612670 17619 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:06:12.613160 17619 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:06:12.613175 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.613181 17619 net.cpp:165] Memory required for data: 1142273500\nI0817 16:06:12.613189 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:06:12.613199 17619 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:06:12.613205 17619 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:06:12.613219 17619 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:06:12.613489 17619 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:06:12.613502 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.613507 17619 net.cpp:165] Memory required for data: 1146369500\nI0817 16:06:12.613517 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:06:12.613529 17619 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:06:12.613535 17619 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:06:12.613543 17619 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:06:12.613600 17619 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:06:12.613761 17619 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:06:12.613775 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.613780 17619 net.cpp:165] Memory required for data: 1150465500\nI0817 16:06:12.613790 17619 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:06:12.613801 17619 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:06:12.613807 17619 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:06:12.613814 17619 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:06:12.613822 17619 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:06:12.613854 17619 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:06:12.613867 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.613871 17619 net.cpp:165] Memory required for data: 1154561500\nI0817 16:06:12.613876 17619 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:06:12.613884 17619 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:06:12.613896 17619 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:06:12.613907 17619 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:06:12.613916 17619 net.cpp:150] Setting up L2_b7_relu\nI0817 16:06:12.613924 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.613929 17619 net.cpp:165] Memory required for data: 1158657500\nI0817 16:06:12.613934 17619 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:06:12.613940 17619 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:06:12.613945 17619 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:06:12.613952 17619 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:06:12.613962 17619 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:06:12.614014 17619 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:06:12.614027 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.614033 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.614037 17619 net.cpp:165] Memory required for data: 1166849500\nI0817 16:06:12.614043 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:06:12.614054 17619 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:06:12.614060 17619 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:06:12.614074 17619 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:06:12.614562 17619 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:06:12.614576 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.614581 17619 net.cpp:165] Memory required for data: 1170945500\nI0817 16:06:12.614590 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:06:12.614599 17619 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:06:12.614605 17619 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:06:12.614616 17619 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:06:12.614897 17619 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:06:12.614910 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.614915 17619 net.cpp:165] Memory required for data: 1175041500\nI0817 16:06:12.614925 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:06:12.614938 17619 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:06:12.614943 17619 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:06:12.614951 17619 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.615010 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:06:12.615170 17619 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:06:12.615183 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.615188 17619 net.cpp:165] Memory required for data: 1179137500\nI0817 16:06:12.615197 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:06:12.615208 17619 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:06:12.615214 17619 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:06:12.615222 17619 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.615231 17619 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:06:12.615238 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.615243 17619 net.cpp:165] Memory required for data: 1183233500\nI0817 16:06:12.615247 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:06:12.615262 17619 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:06:12.615267 17619 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:06:12.615278 17619 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:06:12.615772 17619 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:06:12.615787 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.615792 17619 net.cpp:165] Memory required for data: 1187329500\nI0817 16:06:12.615802 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:06:12.615816 17619 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:06:12.615823 17619 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:06:12.615834 17619 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:06:12.616114 17619 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:06:12.616127 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.616132 17619 net.cpp:165] Memory required for data: 1191425500\nI0817 16:06:12.616142 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:06:12.616153 17619 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:06:12.616160 17619 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:06:12.616168 17619 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:06:12.616225 17619 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:06:12.616384 17619 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:06:12.616397 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.616402 17619 net.cpp:165] Memory required for data: 1195521500\nI0817 16:06:12.616410 17619 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:06:12.616422 17619 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:06:12.616428 17619 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:06:12.616436 17619 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:06:12.616443 17619 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:06:12.616472 17619 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:06:12.616485 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.616490 17619 net.cpp:165] Memory required for data: 1199617500\nI0817 16:06:12.616495 17619 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:06:12.616503 17619 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:06:12.616508 17619 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:06:12.616515 17619 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:06:12.616524 17619 net.cpp:150] Setting up L2_b8_relu\nI0817 16:06:12.616531 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.616536 17619 net.cpp:165] Memory required for data: 1203713500\nI0817 16:06:12.616540 17619 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:06:12.616551 17619 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:06:12.616556 17619 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:06:12.616564 17619 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:06:12.616587 17619 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:06:12.616637 17619 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:06:12.616652 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.616659 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.616664 17619 net.cpp:165] Memory required for data: 1211905500\nI0817 16:06:12.616669 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:06:12.616680 17619 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:06:12.616688 17619 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:06:12.616696 17619 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:06:12.617203 17619 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:06:12.617220 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.617226 17619 net.cpp:165] Memory required for data: 1216001500\nI0817 16:06:12.617235 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:06:12.617244 17619 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:06:12.617250 17619 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:06:12.617259 17619 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:06:12.617535 17619 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:06:12.617554 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.617560 17619 net.cpp:165] Memory required for data: 1220097500\nI0817 16:06:12.617570 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:06:12.617583 17619 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:06:12.617589 17619 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:06:12.617596 17619 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.617657 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:06:12.617831 17619 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:06:12.617846 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.617851 17619 net.cpp:165] Memory required for data: 1224193500\nI0817 16:06:12.617861 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:06:12.617868 17619 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:06:12.617874 17619 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:06:12.617884 17619 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.617894 17619 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:06:12.617902 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.617905 17619 net.cpp:165] Memory required for data: 1228289500\nI0817 16:06:12.617911 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:06:12.617925 17619 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:06:12.617931 17619 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:06:12.617939 17619 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:06:12.619436 17619 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:06:12.619453 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.619458 17619 net.cpp:165] Memory required for data: 1232385500\nI0817 16:06:12.619468 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:06:12.619478 17619 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:06:12.619484 17619 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:06:12.619495 17619 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:06:12.619770 17619 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:06:12.619783 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.619788 17619 net.cpp:165] Memory required for data: 1236481500\nI0817 16:06:12.619838 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:06:12.619851 17619 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:06:12.619858 17619 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:06:12.619866 17619 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:06:12.619933 17619 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:06:12.620088 17619 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:06:12.620103 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.620110 17619 net.cpp:165] Memory required for data: 1240577500\nI0817 16:06:12.620118 17619 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:06:12.620127 17619 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:06:12.620133 17619 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:06:12.620141 17619 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:06:12.620151 17619 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:06:12.620179 17619 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:06:12.620188 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.620193 17619 net.cpp:165] Memory required for data: 1244673500\nI0817 16:06:12.620198 17619 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:06:12.620206 17619 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:06:12.620213 17619 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:06:12.620223 17619 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:06:12.620232 17619 net.cpp:150] Setting up L2_b9_relu\nI0817 16:06:12.620239 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.620251 17619 net.cpp:165] Memory required for data: 1248769500\nI0817 16:06:12.620256 17619 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:06:12.620265 17619 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:06:12.620270 17619 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:06:12.620280 17619 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:06:12.620290 17619 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:06:12.620337 17619 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:06:12.620349 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.620357 17619 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:06:12.620360 17619 net.cpp:165] Memory required for data: 1256961500\nI0817 16:06:12.620365 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:06:12.620381 17619 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:06:12.620388 17619 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:06:12.620396 17619 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:06:12.620904 17619 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:06:12.620919 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.620924 17619 net.cpp:165] Memory required for data: 1257985500\nI0817 16:06:12.620934 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:06:12.620945 17619 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:06:12.620952 17619 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:06:12.620960 17619 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:06:12.621232 17619 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:06:12.621245 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.621250 17619 net.cpp:165] Memory required for data: 1259009500\nI0817 16:06:12.621260 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:06:12.621273 17619 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:06:12.621279 17619 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:06:12.621286 17619 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.621345 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:06:12.621510 17619 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:06:12.621522 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.621527 17619 net.cpp:165] Memory required for data: 1260033500\nI0817 16:06:12.621537 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:06:12.621547 17619 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:06:12.621554 17619 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:06:12.621562 17619 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:06:12.621570 17619 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:06:12.621583 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.621588 17619 net.cpp:165] Memory required for data: 1261057500\nI0817 16:06:12.621593 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:06:12.621604 17619 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:06:12.621610 17619 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:06:12.621621 17619 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:06:12.622119 17619 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:06:12.622134 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.622140 17619 net.cpp:165] Memory required for data: 1262081500\nI0817 16:06:12.622148 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:06:12.622159 17619 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:06:12.622164 17619 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:06:12.622175 17619 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:06:12.622455 17619 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:06:12.622475 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.622480 17619 net.cpp:165] Memory required for data: 1263105500\nI0817 16:06:12.622491 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:06:12.622500 17619 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:06:12.622506 17619 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:06:12.622514 17619 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:06:12.622575 17619 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:06:12.622746 17619 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:06:12.622762 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.622768 17619 net.cpp:165] Memory required for data: 1264129500\nI0817 16:06:12.622777 17619 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:06:12.622786 17619 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:06:12.622793 17619 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:06:12.622802 17619 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:06:12.622839 17619 net.cpp:150] Setting up L3_b1_pool\nI0817 16:06:12.622851 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.622856 17619 net.cpp:165] Memory required for data: 1265153500\nI0817 16:06:12.622861 17619 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:06:12.622869 17619 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:06:12.622875 17619 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:06:12.622882 17619 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:06:12.622890 17619 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:06:12.622927 17619 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:06:12.622937 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.622942 17619 net.cpp:165] Memory required for data: 1266177500\nI0817 16:06:12.622947 17619 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:06:12.622956 17619 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:06:12.622961 17619 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:06:12.622967 17619 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:06:12.622977 17619 net.cpp:150] Setting up L3_b1_relu\nI0817 16:06:12.622983 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.622988 17619 net.cpp:165] Memory required for data: 1267201500\nI0817 16:06:12.622992 17619 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:06:12.623003 17619 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:06:12.623013 17619 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:06:12.624264 17619 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:06:12.624281 17619 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:06:12.624287 17619 net.cpp:165] Memory required for data: 1268225500\nI0817 16:06:12.624294 17619 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:06:12.624305 17619 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:06:12.624312 17619 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:06:12.624320 17619 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:06:12.624326 17619 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:06:12.624372 17619 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:06:12.624383 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.624388 17619 net.cpp:165] Memory required for data: 1270273500\nI0817 16:06:12.624394 17619 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:06:12.624402 17619 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:06:12.624408 17619 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:06:12.624418 17619 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:06:12.624428 17619 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:06:12.624480 17619 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:06:12.624495 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.624508 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.624513 17619 net.cpp:165] Memory required for data: 1274369500\nI0817 16:06:12.624518 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:06:12.624533 17619 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:06:12.624539 17619 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:06:12.624549 17619 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:06:12.625600 17619 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:06:12.625615 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.625620 17619 net.cpp:165] Memory required for data: 1276417500\nI0817 16:06:12.625630 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:06:12.625643 17619 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:06:12.625650 17619 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:06:12.625658 17619 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:06:12.625944 17619 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:06:12.625958 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.625963 17619 net.cpp:165] Memory required for data: 1278465500\nI0817 16:06:12.625973 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:06:12.625983 17619 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:06:12.625989 17619 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:06:12.625998 17619 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.626060 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:06:12.626221 17619 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:06:12.626235 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.626240 17619 net.cpp:165] Memory required for data: 1280513500\nI0817 16:06:12.626248 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:06:12.626256 17619 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:06:12.626263 17619 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:06:12.626273 17619 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:06:12.626283 17619 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:06:12.626291 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.626294 17619 net.cpp:165] Memory required for data: 1282561500\nI0817 16:06:12.626299 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:06:12.626313 17619 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:06:12.626319 17619 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:06:12.626328 17619 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:06:12.627399 17619 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:06:12.627415 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.627420 17619 net.cpp:165] Memory required for data: 1284609500\nI0817 16:06:12.627430 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:06:12.627442 17619 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:06:12.627449 17619 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:06:12.627457 17619 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:06:12.627738 17619 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:06:12.627753 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.627758 17619 net.cpp:165] Memory required for data: 1286657500\nI0817 16:06:12.627768 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:06:12.627780 17619 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:06:12.627786 17619 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:06:12.627794 17619 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:06:12.627856 17619 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:06:12.628018 17619 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:06:12.628031 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.628036 17619 net.cpp:165] Memory required for data: 1288705500\nI0817 16:06:12.628053 17619 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:06:12.628065 17619 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:06:12.628072 17619 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:06:12.628079 17619 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:06:12.628089 17619 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:06:12.628123 17619 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:06:12.628132 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.628137 17619 net.cpp:165] Memory required for data: 1290753500\nI0817 16:06:12.628142 17619 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:06:12.628154 17619 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:06:12.628160 17619 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:06:12.628167 17619 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:06:12.628176 17619 net.cpp:150] Setting up L3_b2_relu\nI0817 16:06:12.628183 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.628188 17619 net.cpp:165] Memory required for data: 1292801500\nI0817 16:06:12.628192 17619 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:06:12.628201 17619 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:06:12.628206 17619 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:06:12.628212 17619 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:06:12.628221 17619 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:06:12.628271 17619 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:06:12.628283 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.628289 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.628294 17619 net.cpp:165] Memory required for data: 1296897500\nI0817 16:06:12.628299 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:06:12.628314 17619 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:06:12.628320 17619 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:06:12.628329 17619 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:06:12.629371 17619 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:06:12.629387 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.629392 17619 net.cpp:165] Memory required for data: 1298945500\nI0817 16:06:12.629401 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:06:12.629413 17619 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:06:12.629420 17619 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:06:12.629431 17619 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:06:12.629707 17619 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:06:12.629721 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.629727 17619 net.cpp:165] Memory required for data: 1300993500\nI0817 16:06:12.629737 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:06:12.629746 17619 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:06:12.629753 17619 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:06:12.629761 17619 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.629822 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:06:12.629982 17619 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:06:12.629995 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.630000 17619 net.cpp:165] Memory required for data: 1303041500\nI0817 16:06:12.630009 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:06:12.630017 17619 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:06:12.630023 17619 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:06:12.630033 17619 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:06:12.630043 17619 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:06:12.630058 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.630062 17619 net.cpp:165] Memory required for data: 1305089500\nI0817 16:06:12.630066 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:06:12.630082 17619 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:06:12.630089 17619 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:06:12.630097 17619 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:06:12.631144 17619 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:06:12.631158 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.631165 17619 net.cpp:165] Memory required for data: 1307137500\nI0817 16:06:12.631172 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:06:12.631186 17619 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:06:12.631191 17619 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:06:12.631199 17619 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:06:12.631471 17619 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:06:12.631484 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.631489 17619 net.cpp:165] Memory required for data: 1309185500\nI0817 16:06:12.631500 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:06:12.631512 17619 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:06:12.631518 17619 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:06:12.631526 17619 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:06:12.631589 17619 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:06:12.631760 17619 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:06:12.631774 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.631779 17619 net.cpp:165] Memory required for data: 1311233500\nI0817 16:06:12.631789 17619 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:06:12.631801 17619 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:06:12.631808 17619 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:06:12.631815 17619 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:06:12.631825 17619 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:06:12.631860 17619 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:06:12.631870 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.631875 17619 net.cpp:165] Memory required for data: 1313281500\nI0817 16:06:12.631880 17619 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:06:12.631891 17619 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:06:12.631898 17619 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:06:12.631906 17619 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:06:12.631914 17619 net.cpp:150] Setting up L3_b3_relu\nI0817 16:06:12.631922 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.631927 17619 net.cpp:165] Memory required for data: 1315329500\nI0817 16:06:12.631932 17619 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:06:12.631938 17619 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:06:12.631944 17619 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:06:12.631952 17619 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:06:12.631961 17619 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:06:12.632010 17619 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:06:12.632022 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.632030 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.632033 17619 net.cpp:165] Memory required for data: 1319425500\nI0817 16:06:12.632038 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:06:12.632052 17619 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:06:12.632058 17619 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:06:12.632074 17619 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:06:12.633116 17619 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:06:12.633131 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.633136 17619 net.cpp:165] Memory required for data: 1321473500\nI0817 16:06:12.633146 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:06:12.633157 17619 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:06:12.633164 17619 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:06:12.633175 17619 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:06:12.633446 17619 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:06:12.633462 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.633467 17619 net.cpp:165] Memory required for data: 1323521500\nI0817 16:06:12.633477 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:06:12.633486 17619 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:06:12.633492 17619 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:06:12.633502 17619 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.633561 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:06:12.633726 17619 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:06:12.633740 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.633745 17619 net.cpp:165] Memory required for data: 1325569500\nI0817 16:06:12.633754 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:06:12.633762 17619 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:06:12.633769 17619 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:06:12.633779 17619 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:06:12.633790 17619 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:06:12.633796 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.633800 17619 net.cpp:165] Memory required for data: 1327617500\nI0817 16:06:12.633805 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:06:12.633819 17619 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:06:12.633826 17619 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:06:12.633834 17619 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:06:12.636015 17619 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:06:12.636034 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.636039 17619 net.cpp:165] Memory required for data: 1329665500\nI0817 16:06:12.636049 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:06:12.636061 17619 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:06:12.636068 17619 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:06:12.636080 17619 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:06:12.636354 17619 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:06:12.636368 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.636373 17619 net.cpp:165] Memory required for data: 1331713500\nI0817 16:06:12.636384 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:06:12.636392 17619 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:06:12.636399 17619 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:06:12.636409 17619 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:06:12.636469 17619 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:06:12.636632 17619 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:06:12.636646 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.636651 17619 net.cpp:165] Memory required for data: 1333761500\nI0817 16:06:12.636659 17619 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:06:12.636669 17619 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:06:12.636675 17619 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:06:12.636682 17619 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:06:12.636693 17619 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:06:12.636744 17619 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:06:12.636757 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.636762 17619 net.cpp:165] Memory required for data: 1335809500\nI0817 16:06:12.636768 17619 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:06:12.636775 17619 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:06:12.636781 17619 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:06:12.636788 17619 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:06:12.636798 17619 net.cpp:150] Setting up L3_b4_relu\nI0817 16:06:12.636806 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.636809 17619 net.cpp:165] Memory required for data: 1337857500\nI0817 16:06:12.636814 17619 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:06:12.636824 17619 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:06:12.636831 17619 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:06:12.636837 17619 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:06:12.636847 17619 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:06:12.636898 17619 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:06:12.636909 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.636915 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.636919 17619 net.cpp:165] Memory required for data: 1341953500\nI0817 16:06:12.636925 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:06:12.636936 17619 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:06:12.636943 17619 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:06:12.636955 17619 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:06:12.637991 17619 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:06:12.638006 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.638011 17619 net.cpp:165] Memory required for data: 1344001500\nI0817 16:06:12.638020 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:06:12.638031 17619 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:06:12.638037 17619 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:06:12.638049 17619 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:06:12.638322 17619 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:06:12.638337 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.638344 17619 net.cpp:165] Memory required for data: 1346049500\nI0817 16:06:12.638353 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:06:12.638362 17619 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:06:12.638368 17619 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:06:12.638376 17619 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.638437 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:06:12.638597 17619 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:06:12.638612 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.638617 17619 net.cpp:165] Memory required for data: 1348097500\nI0817 16:06:12.638625 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:06:12.638636 17619 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:06:12.638643 17619 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:06:12.638650 17619 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:06:12.638660 17619 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:06:12.638667 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.638672 17619 net.cpp:165] Memory required for data: 1350145500\nI0817 16:06:12.638677 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:06:12.638691 17619 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:06:12.638697 17619 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:06:12.638720 17619 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:06:12.639755 17619 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:06:12.639770 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.639775 17619 net.cpp:165] Memory required for data: 1352193500\nI0817 16:06:12.639784 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:06:12.639796 17619 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:06:12.639803 17619 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:06:12.639814 17619 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:06:12.640084 17619 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:06:12.640097 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.640102 17619 net.cpp:165] Memory required for data: 1354241500\nI0817 16:06:12.640112 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:06:12.640121 17619 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:06:12.640127 17619 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:06:12.640137 17619 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:06:12.640197 17619 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:06:12.640357 17619 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:06:12.640369 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.640374 17619 net.cpp:165] Memory required for data: 1356289500\nI0817 16:06:12.640383 17619 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:06:12.640393 17619 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:06:12.640399 17619 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:06:12.640406 17619 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:06:12.640416 17619 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:06:12.640453 17619 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:06:12.640465 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.640470 17619 net.cpp:165] Memory required for data: 1358337500\nI0817 16:06:12.640475 17619 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:06:12.640483 17619 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:06:12.640489 17619 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:06:12.640498 17619 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:06:12.640508 17619 net.cpp:150] Setting up L3_b5_relu\nI0817 16:06:12.640516 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.640521 17619 net.cpp:165] Memory required for data: 1360385500\nI0817 16:06:12.640525 17619 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:06:12.640532 17619 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:06:12.640538 17619 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:06:12.640545 17619 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:06:12.640554 17619 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:06:12.640604 17619 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:06:12.640615 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.640621 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.640626 17619 net.cpp:165] Memory required for data: 1364481500\nI0817 16:06:12.640631 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:06:12.640642 17619 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:06:12.640650 17619 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:06:12.640661 17619 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:06:12.641690 17619 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:06:12.641710 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.641716 17619 net.cpp:165] Memory required for data: 1366529500\nI0817 16:06:12.641726 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:06:12.641741 17619 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:06:12.641748 17619 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:06:12.641759 17619 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:06:12.642037 17619 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:06:12.642052 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.642058 17619 net.cpp:165] Memory required for data: 1368577500\nI0817 16:06:12.642068 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:06:12.642077 17619 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:06:12.642084 17619 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:06:12.642091 17619 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.642149 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:06:12.642314 17619 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:06:12.642328 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.642333 17619 net.cpp:165] Memory required for data: 1370625500\nI0817 16:06:12.642343 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:06:12.642354 17619 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:06:12.642359 17619 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:06:12.642366 17619 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:06:12.642376 17619 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:06:12.642383 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.642387 17619 net.cpp:165] Memory required for data: 1372673500\nI0817 16:06:12.642392 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:06:12.642406 17619 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:06:12.642412 17619 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:06:12.642421 17619 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:06:12.643447 17619 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:06:12.643462 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.643467 17619 net.cpp:165] Memory required for data: 1374721500\nI0817 16:06:12.643476 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:06:12.643488 17619 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:06:12.643496 17619 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:06:12.643507 17619 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:06:12.643784 17619 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:06:12.643797 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.643802 17619 net.cpp:165] Memory required for data: 1376769500\nI0817 16:06:12.643812 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:06:12.643821 17619 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:06:12.643827 17619 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:06:12.643838 17619 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:06:12.643898 17619 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:06:12.644059 17619 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:06:12.644073 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.644078 17619 net.cpp:165] Memory required for data: 1378817500\nI0817 16:06:12.644088 17619 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:06:12.644096 17619 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:06:12.644103 17619 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:06:12.644112 17619 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:06:12.644120 17619 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:06:12.644158 17619 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:06:12.644170 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.644174 17619 net.cpp:165] Memory required for data: 1380865500\nI0817 16:06:12.644179 17619 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:06:12.644187 17619 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:06:12.644199 17619 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:06:12.644210 17619 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:06:12.644220 17619 net.cpp:150] Setting up L3_b6_relu\nI0817 16:06:12.644227 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.644232 17619 net.cpp:165] Memory required for data: 1382913500\nI0817 16:06:12.644237 17619 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:06:12.644244 17619 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:06:12.644249 17619 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:06:12.644256 17619 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:06:12.644266 17619 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:06:12.644320 17619 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:06:12.644331 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.644337 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.644342 17619 net.cpp:165] Memory required for data: 1387009500\nI0817 16:06:12.644347 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:06:12.644358 17619 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:06:12.644366 17619 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:06:12.644377 17619 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:06:12.645418 17619 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:06:12.645433 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.645438 17619 net.cpp:165] Memory required for data: 1389057500\nI0817 16:06:12.645447 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:06:12.645457 17619 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:06:12.645463 17619 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:06:12.645474 17619 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:06:12.645759 17619 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:06:12.645773 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.645778 17619 net.cpp:165] Memory required for data: 1391105500\nI0817 16:06:12.645789 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:06:12.645797 17619 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:06:12.645803 17619 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:06:12.645812 17619 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.645872 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:06:12.646031 17619 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:06:12.646047 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.646052 17619 net.cpp:165] Memory required for data: 1393153500\nI0817 16:06:12.646061 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:06:12.646093 17619 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:06:12.646102 17619 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:06:12.646111 17619 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:06:12.646121 17619 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:06:12.646127 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.646132 17619 net.cpp:165] Memory required for data: 1395201500\nI0817 16:06:12.646138 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:06:12.646149 17619 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:06:12.646155 17619 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:06:12.646167 17619 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:06:12.647205 17619 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:06:12.647220 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.647225 17619 net.cpp:165] Memory required for data: 1397249500\nI0817 16:06:12.647234 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:06:12.647251 17619 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:06:12.647258 17619 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:06:12.647270 17619 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:06:12.647548 17619 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:06:12.647563 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.647569 17619 net.cpp:165] Memory required for data: 1399297500\nI0817 16:06:12.647579 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:06:12.647588 17619 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:06:12.647594 17619 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:06:12.647603 17619 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:06:12.647661 17619 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:06:12.647830 17619 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:06:12.647843 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.647848 17619 net.cpp:165] Memory required for data: 1401345500\nI0817 16:06:12.647857 17619 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:06:12.647869 17619 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:06:12.647876 17619 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:06:12.647883 17619 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:06:12.647891 17619 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:06:12.647927 17619 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:06:12.647939 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.647943 17619 net.cpp:165] Memory required for data: 1403393500\nI0817 16:06:12.647949 17619 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:06:12.647956 17619 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:06:12.647963 17619 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:06:12.647969 17619 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:06:12.647979 17619 net.cpp:150] Setting up L3_b7_relu\nI0817 16:06:12.647986 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.647990 17619 net.cpp:165] Memory required for data: 1405441500\nI0817 16:06:12.647995 17619 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:06:12.648002 17619 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:06:12.648007 17619 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:06:12.648017 17619 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:06:12.648027 17619 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:06:12.648075 17619 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:06:12.648087 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.648092 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.648097 17619 net.cpp:165] Memory required for data: 1409537500\nI0817 16:06:12.648102 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:06:12.648116 17619 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:06:12.648123 17619 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:06:12.648133 17619 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:06:12.650146 17619 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:06:12.650163 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.650168 17619 net.cpp:165] Memory required for data: 1411585500\nI0817 16:06:12.650177 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:06:12.650190 17619 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:06:12.650197 17619 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:06:12.650207 17619 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:06:12.650487 17619 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:06:12.650501 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.650513 17619 net.cpp:165] Memory required for data: 1413633500\nI0817 16:06:12.650524 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:06:12.650533 17619 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:06:12.650539 17619 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:06:12.650547 17619 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.650614 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:06:12.650786 17619 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:06:12.650800 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.650806 17619 net.cpp:165] Memory required for data: 1415681500\nI0817 16:06:12.650815 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:06:12.650823 17619 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:06:12.650830 17619 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:06:12.650836 17619 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:06:12.650846 17619 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:06:12.650853 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.650858 17619 net.cpp:165] Memory required for data: 1417729500\nI0817 16:06:12.650863 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:06:12.650877 17619 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:06:12.650884 17619 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:06:12.650895 17619 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:06:12.651926 17619 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:06:12.651942 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.651947 17619 net.cpp:165] Memory required for data: 1419777500\nI0817 16:06:12.651955 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:06:12.651968 17619 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:06:12.651975 17619 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:06:12.651983 17619 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:06:12.652254 17619 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:06:12.652267 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.652272 17619 net.cpp:165] Memory required for data: 1421825500\nI0817 16:06:12.652282 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:06:12.652294 17619 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:06:12.652302 17619 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:06:12.652308 17619 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:06:12.652370 17619 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:06:12.652531 17619 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:06:12.652544 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.652549 17619 net.cpp:165] Memory required for data: 1423873500\nI0817 16:06:12.652559 17619 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:06:12.652570 17619 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:06:12.652577 17619 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:06:12.652585 17619 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:06:12.652592 17619 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:06:12.652628 17619 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:06:12.652640 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.652645 17619 net.cpp:165] Memory required for data: 1425921500\nI0817 16:06:12.652650 17619 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:06:12.652657 17619 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:06:12.652663 17619 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:06:12.652674 17619 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:06:12.652684 17619 net.cpp:150] Setting up L3_b8_relu\nI0817 16:06:12.652691 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.652696 17619 net.cpp:165] Memory required for data: 1427969500\nI0817 16:06:12.652714 17619 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:06:12.652722 17619 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:06:12.652729 17619 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:06:12.652735 17619 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:06:12.652745 17619 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:06:12.652796 17619 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:06:12.652808 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.652814 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.652819 17619 net.cpp:165] Memory required for data: 1432065500\nI0817 16:06:12.652824 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:06:12.652838 17619 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:06:12.652845 17619 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:06:12.652854 17619 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:06:12.653878 17619 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:06:12.653894 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.653899 17619 net.cpp:165] Memory required for data: 1434113500\nI0817 16:06:12.653908 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:06:12.653920 17619 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:06:12.653926 17619 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:06:12.653934 17619 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:06:12.654211 17619 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:06:12.654223 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.654228 17619 net.cpp:165] Memory required for data: 1436161500\nI0817 16:06:12.654239 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:06:12.654247 17619 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:06:12.654254 17619 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:06:12.654261 17619 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.654325 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:06:12.654489 17619 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:06:12.654501 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.654507 17619 net.cpp:165] Memory required for data: 1438209500\nI0817 16:06:12.654516 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:06:12.654525 17619 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:06:12.654531 17619 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:06:12.654537 17619 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:06:12.654549 17619 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:06:12.654557 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.654561 17619 net.cpp:165] Memory required for data: 1440257500\nI0817 16:06:12.654567 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:06:12.654578 17619 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:06:12.654587 17619 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:06:12.654595 17619 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:06:12.655628 17619 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:06:12.655643 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.655648 17619 net.cpp:165] Memory required for data: 1442305500\nI0817 16:06:12.655658 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:06:12.655669 17619 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:06:12.655676 17619 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:06:12.655684 17619 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:06:12.655967 17619 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:06:12.655982 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.655993 17619 net.cpp:165] Memory required for data: 1444353500\nI0817 16:06:12.656004 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:06:12.656015 17619 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:06:12.656023 17619 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:06:12.656030 17619 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:06:12.656092 17619 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:06:12.656257 17619 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:06:12.656271 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.656276 17619 net.cpp:165] Memory required for data: 1446401500\nI0817 16:06:12.656286 17619 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:06:12.656297 17619 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:06:12.656304 17619 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:06:12.656311 17619 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:06:12.656322 17619 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:06:12.656355 17619 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:06:12.656368 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.656373 17619 net.cpp:165] Memory required for data: 1448449500\nI0817 16:06:12.656378 17619 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:06:12.656388 17619 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:06:12.656394 17619 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:06:12.656401 17619 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:06:12.656410 17619 net.cpp:150] Setting up L3_b9_relu\nI0817 16:06:12.656417 17619 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:06:12.656422 17619 net.cpp:165] Memory required for data: 1450497500\nI0817 16:06:12.656427 17619 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:06:12.656435 17619 net.cpp:100] Creating Layer post_pool\nI0817 16:06:12.656441 17619 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:06:12.656448 17619 net.cpp:408] post_pool -> post_pool\nI0817 16:06:12.656484 17619 net.cpp:150] Setting up post_pool\nI0817 16:06:12.656497 17619 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:06:12.656502 17619 net.cpp:165] Memory required for data: 1450529500\nI0817 16:06:12.656507 17619 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:06:12.656518 17619 net.cpp:100] Creating Layer post_FC\nI0817 16:06:12.656524 17619 net.cpp:434] post_FC <- post_pool\nI0817 16:06:12.656533 17619 net.cpp:408] post_FC -> post_FC_top\nI0817 16:06:12.656694 17619 net.cpp:150] Setting up post_FC\nI0817 16:06:12.656713 17619 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:06:12.656719 17619 net.cpp:165] Memory required for data: 1450534500\nI0817 16:06:12.656728 17619 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:06:12.656736 17619 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:06:12.656743 17619 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:06:12.656750 17619 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:06:12.656764 17619 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:06:12.656811 17619 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:06:12.656823 17619 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:06:12.656829 17619 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:06:12.656834 17619 net.cpp:165] Memory required for data: 1450544500\nI0817 16:06:12.656839 17619 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:06:12.656850 17619 net.cpp:100] Creating Layer accuracy\nI0817 16:06:12.656857 17619 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:06:12.656863 17619 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:06:12.656872 17619 net.cpp:408] accuracy -> accuracy\nI0817 16:06:12.656883 17619 net.cpp:150] Setting up accuracy\nI0817 16:06:12.656890 17619 net.cpp:157] Top shape: (1)\nI0817 16:06:12.656901 17619 net.cpp:165] Memory required for data: 1450544504\nI0817 16:06:12.656908 17619 layer_factory.hpp:77] Creating layer loss\nI0817 16:06:12.656915 17619 net.cpp:100] Creating Layer loss\nI0817 16:06:12.656921 17619 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:06:12.656927 17619 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:06:12.656934 17619 net.cpp:408] loss -> loss\nI0817 16:06:12.656947 17619 layer_factory.hpp:77] Creating layer loss\nI0817 16:06:12.657079 17619 net.cpp:150] Setting up loss\nI0817 16:06:12.657091 17619 net.cpp:157] Top shape: (1)\nI0817 16:06:12.657096 17619 net.cpp:160]     with loss weight 1\nI0817 16:06:12.657114 17619 net.cpp:165] Memory required for data: 1450544508\nI0817 16:06:12.657120 17619 net.cpp:226] loss needs backward computation.\nI0817 16:06:12.657126 17619 net.cpp:228] accuracy does not need backward computation.\nI0817 16:06:12.657131 17619 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:06:12.657137 17619 net.cpp:226] post_FC needs backward computation.\nI0817 16:06:12.657142 17619 net.cpp:226] post_pool needs backward computation.\nI0817 16:06:12.657147 17619 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:06:12.657152 17619 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:06:12.657157 17619 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:06:12.657162 17619 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:06:12.657167 17619 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:06:12.657172 17619 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:06:12.657177 17619 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:06:12.657182 17619 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:06:12.657187 17619 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:06:12.657192 17619 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:06:12.657197 17619 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:06:12.657202 17619 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:06:12.657208 17619 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:06:12.657213 17619 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:06:12.657218 17619 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:06:12.657223 17619 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:06:12.657228 17619 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:06:12.657233 17619 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:06:12.657238 17619 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:06:12.657243 17619 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:06:12.657248 17619 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:06:12.657253 17619 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:06:12.657259 17619 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:06:12.657264 17619 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:06:12.657269 17619 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:06:12.657274 17619 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:06:12.657279 17619 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:06:12.657284 17619 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:06:12.657289 17619 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:06:12.657294 17619 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:06:12.657299 17619 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:06:12.657305 17619 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:06:12.657310 17619 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:06:12.657315 17619 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:06:12.657327 17619 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:06:12.657332 17619 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:06:12.657337 17619 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:06:12.657342 17619 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:06:12.657348 17619 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:06:12.657353 17619 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:06:12.657359 17619 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:06:12.657364 17619 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:06:12.657369 17619 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:06:12.657374 17619 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:06:12.657380 17619 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:06:12.657385 17619 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:06:12.657390 17619 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:06:12.657395 17619 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:06:12.657400 17619 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:06:12.657405 17619 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:06:12.657411 17619 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:06:12.657416 17619 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:06:12.657421 17619 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:06:12.657426 17619 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:06:12.657434 17619 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:06:12.657440 17619 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:06:12.657445 17619 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:06:12.657450 17619 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:06:12.657455 17619 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:06:12.657461 17619 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:06:12.657466 17619 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:06:12.657471 17619 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:06:12.657477 17619 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:06:12.657482 17619 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:06:12.657487 17619 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:06:12.657492 17619 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:06:12.657497 17619 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:06:12.657502 17619 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:06:12.657507 17619 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:06:12.657513 17619 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:06:12.657518 17619 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:06:12.657523 17619 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:06:12.657529 17619 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:06:12.657534 17619 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:06:12.657539 17619 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:06:12.657546 17619 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:06:12.657551 17619 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:06:12.657555 17619 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:06:12.657562 17619 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:06:12.657567 17619 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:06:12.657572 17619 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:06:12.657583 17619 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:06:12.657588 17619 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:06:12.657594 17619 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:06:12.657600 17619 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:06:12.657605 17619 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:06:12.657610 17619 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:06:12.657616 17619 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:06:12.657621 17619 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:06:12.657626 17619 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:06:12.657631 17619 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:06:12.657636 17619 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:06:12.657642 17619 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:06:12.657647 17619 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:06:12.657652 17619 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:06:12.657658 17619 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:06:12.657663 17619 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:06:12.657670 17619 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:06:12.657675 17619 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:06:12.657680 17619 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:06:12.657685 17619 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:06:12.657691 17619 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:06:12.657696 17619 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:06:12.657707 17619 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:06:12.657713 17619 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:06:12.657722 17619 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:06:12.657728 17619 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:06:12.657733 17619 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:06:12.657739 17619 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:06:12.657744 17619 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:06:12.657750 17619 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:06:12.657755 17619 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:06:12.657762 17619 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:06:12.657766 17619 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:06:12.657771 17619 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:06:12.657778 17619 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:06:12.657783 17619 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:06:12.657788 17619 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:06:12.657793 17619 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:06:12.657799 17619 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:06:12.657804 17619 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:06:12.657809 17619 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:06:12.657814 17619 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:06:12.657820 17619 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:06:12.657825 17619 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:06:12.657831 17619 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:06:12.657836 17619 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:06:12.657842 17619 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:06:12.657847 17619 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:06:12.657858 17619 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:06:12.657863 17619 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:06:12.657869 17619 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:06:12.657874 17619 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:06:12.657881 17619 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:06:12.657886 17619 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:06:12.657891 17619 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:06:12.657897 17619 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:06:12.657902 17619 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:06:12.657908 17619 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:06:12.657913 17619 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:06:12.657918 17619 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:06:12.657924 17619 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:06:12.657929 17619 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:06:12.657935 17619 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:06:12.657940 17619 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:06:12.657946 17619 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:06:12.657953 17619 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:06:12.657958 17619 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:06:12.657963 17619 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:06:12.657968 17619 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:06:12.657974 17619 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:06:12.657979 17619 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:06:12.657985 17619 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:06:12.657991 17619 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:06:12.657996 17619 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:06:12.658002 17619 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:06:12.658007 17619 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:06:12.658013 17619 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:06:12.658018 17619 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:06:12.658025 17619 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:06:12.658030 17619 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:06:12.658035 17619 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:06:12.658041 17619 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:06:12.658046 17619 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:06:12.658051 17619 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:06:12.658058 17619 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:06:12.658064 17619 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:06:12.658069 17619 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:06:12.658076 17619 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:06:12.658080 17619 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:06:12.658085 17619 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:06:12.658092 17619 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:06:12.658097 17619 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:06:12.658102 17619 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:06:12.658113 17619 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:06:12.658119 17619 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:06:12.658129 17619 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:06:12.658136 17619 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:06:12.658143 17619 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:06:12.658149 17619 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:06:12.658154 17619 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:06:12.658159 17619 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:06:12.658164 17619 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:06:12.658170 17619 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:06:12.658175 17619 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:06:12.658181 17619 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:06:12.658187 17619 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:06:12.658193 17619 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:06:12.658200 17619 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:06:12.658205 17619 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:06:12.658210 17619 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:06:12.658216 17619 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:06:12.658221 17619 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:06:12.658226 17619 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:06:12.658232 17619 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:06:12.658238 17619 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:06:12.658243 17619 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:06:12.658249 17619 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:06:12.658255 17619 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:06:12.658262 17619 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:06:12.658267 17619 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:06:12.658272 17619 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:06:12.658277 17619 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:06:12.658284 17619 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:06:12.658290 17619 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:06:12.658296 17619 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:06:12.658303 17619 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:06:12.658308 17619 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:06:12.658314 17619 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:06:12.658319 17619 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:06:12.658325 17619 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:06:12.658331 17619 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:06:12.658337 17619 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:06:12.658342 17619 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:06:12.658349 17619 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:06:12.658354 17619 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:06:12.658360 17619 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:06:12.658366 17619 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:06:12.658372 17619 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:06:12.658378 17619 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:06:12.658385 17619 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:06:12.658390 17619 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:06:12.658396 17619 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:06:12.658428 17619 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:06:12.658437 17619 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:06:12.658443 17619 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:06:12.658449 17619 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:06:12.658455 17619 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:06:12.658463 17619 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:06:12.658468 17619 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:06:12.658473 17619 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:06:12.658479 17619 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:06:12.658484 17619 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:06:12.658490 17619 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:06:12.658496 17619 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:06:12.658502 17619 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:06:12.658507 17619 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:06:12.658514 17619 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:06:12.658519 17619 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:06:12.658525 17619 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:06:12.658531 17619 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:06:12.658536 17619 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:06:12.658542 17619 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:06:12.658548 17619 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:06:12.658553 17619 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:06:12.658560 17619 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:06:12.658565 17619 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:06:12.658571 17619 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:06:12.658577 17619 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:06:12.658583 17619 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:06:12.658588 17619 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:06:12.658594 17619 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:06:12.658601 17619 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:06:12.658605 17619 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:06:12.658612 17619 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:06:12.658617 17619 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:06:12.658623 17619 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:06:12.658628 17619 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:06:12.658635 17619 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:06:12.658641 17619 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:06:12.658648 17619 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:06:12.658653 17619 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:06:12.658658 17619 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:06:12.658664 17619 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:06:12.658669 17619 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:06:12.658675 17619 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:06:12.658681 17619 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:06:12.658687 17619 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:06:12.658694 17619 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:06:12.658704 17619 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:06:12.658718 17619 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:06:12.658725 17619 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:06:12.658731 17619 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:06:12.658736 17619 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:06:12.658742 17619 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:06:12.658749 17619 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:06:12.658754 17619 net.cpp:226] pre_relu needs backward computation.\nI0817 16:06:12.658761 17619 net.cpp:226] pre_scale needs backward computation.\nI0817 16:06:12.658766 17619 net.cpp:226] pre_bn needs backward computation.\nI0817 16:06:12.658771 17619 net.cpp:226] pre_conv needs backward computation.\nI0817 16:06:12.658778 17619 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:06:12.658784 17619 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:06:12.658789 17619 net.cpp:270] This network produces output accuracy\nI0817 16:06:12.658795 17619 net.cpp:270] This network produces output loss\nI0817 16:06:12.659124 17619 net.cpp:283] Network initialization done.\nI0817 16:06:12.660151 17619 solver.cpp:60] Solver scaffolding done.\nI0817 16:06:12.883235 17619 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:06:13.243937 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:13.244016 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:13.251008 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:13.471704 17619 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:06:13.471818 17619 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:06:13.506239 17619 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:06:13.506348 17619 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:06:13.957129 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:13.957185 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:13.965142 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:14.209784 17619 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:06:14.209894 17619 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:06:14.261764 17619 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:06:14.261869 17619 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:06:14.766782 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:14.766837 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:14.775645 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:15.046960 17619 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:06:15.047092 17619 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:06:15.117617 17619 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:06:15.117754 17619 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:06:15.201259 17619 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:06:15.677274 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:15.677343 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:15.687399 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:15.975597 17619 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:06:15.975759 17619 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:06:16.067817 17619 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:06:16.067976 17619 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:06:16.716513 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:16.716590 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:16.727144 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:17.042181 17619 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:06:17.042404 17619 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:06:17.155447 17619 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:06:17.155757 17619 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:06:17.867763 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:17.867827 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:17.879238 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:18.222592 17619 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:06:18.222839 17619 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:06:18.355568 17619 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:06:18.355801 17619 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:06:19.132371 17619 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:06:19.132442 17619 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:06:19.144490 17619 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:06:19.195443 17625 blocking_queue.cpp:50] Waiting for data\nI0817 16:06:19.244977 17646 blocking_queue.cpp:50] Waiting for data\nI0817 16:06:19.579319 17619 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:06:19.579594 17619 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:06:19.731372 17619 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:06:19.731634 17619 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:06:19.902101 17619 parallel.cpp:425] Starting Optimization\nI0817 16:06:19.903372 17619 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:06:19.903394 17619 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:06:19.907336 17619 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:07:42.537979 17619 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:07:42.538318 17619 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:07:46.663350 17619 solver.cpp:228] Iteration 0, loss = 4.7044\nI0817 16:07:46.663390 17619 solver.cpp:244]     Train net output #0: accuracy = 0.064\nI0817 16:07:46.663410 17619 solver.cpp:244]     Train net output #1: loss = 4.7044 (* 1 = 4.7044 loss)\nI0817 16:07:46.663555 17619 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0817 16:10:04.826632 17619 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:11:25.746911 17619 solver.cpp:404]     Test net output #0: accuracy = 0.22264\nI0817 16:11:25.747169 17619 solver.cpp:404]     Test net output #1: loss = 2.05395 (* 1 = 2.05395 loss)\nI0817 16:11:27.062892 17619 solver.cpp:228] Iteration 100, loss = 1.81709\nI0817 16:11:27.062935 17619 solver.cpp:244]     Train net output #0: accuracy = 0.336\nI0817 16:11:27.062952 17619 solver.cpp:244]     Train net output #1: loss = 1.81709 (* 1 = 1.81709 loss)\nI0817 16:11:27.155699 17619 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0817 16:13:45.154752 17619 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:15:06.081311 17619 solver.cpp:404]     Test net output #0: accuracy = 0.46408\nI0817 16:15:06.081564 17619 solver.cpp:404]     Test net output #1: loss = 1.57115 (* 1 = 1.57115 loss)\nI0817 16:15:07.397950 17619 solver.cpp:228] Iteration 200, loss = 1.26286\nI0817 16:15:07.397994 17619 solver.cpp:244]     Train net output #0: accuracy = 0.52\nI0817 16:15:07.398010 17619 solver.cpp:244]     Train net output #1: loss = 1.26286 (* 1 = 1.26286 loss)\nI0817 16:15:07.482551 17619 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0817 16:17:25.491067 17619 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:18:46.418196 17619 solver.cpp:404]     Test net output #0: accuracy = 0.57\nI0817 16:18:46.418457 17619 solver.cpp:404]     Test net output #1: loss = 1.31596 (* 1 = 1.31596 loss)\nI0817 16:18:47.733456 17619 solver.cpp:228] Iteration 300, loss = 0.851053\nI0817 16:18:47.733501 17619 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 16:18:47.733517 17619 solver.cpp:244]     Train net output #1: loss = 0.851053 (* 1 = 0.851053 loss)\nI0817 16:18:47.826900 17619 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0817 16:21:05.933567 17619 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:22:26.847796 17619 solver.cpp:404]     Test net output #0: accuracy = 0.68748\nI0817 16:22:26.848055 17619 solver.cpp:404]     Test net output #1: loss = 0.929873 (* 1 = 0.929873 loss)\nI0817 16:22:28.163413 17619 solver.cpp:228] Iteration 400, loss = 0.608023\nI0817 16:22:28.163457 17619 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 16:22:28.163475 17619 solver.cpp:244]     Train net output #1: loss = 0.608023 (* 1 = 0.608023 loss)\nI0817 16:22:28.255164 17619 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0817 16:24:46.278062 17619 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:26:07.194967 17619 solver.cpp:404]     Test net output #0: accuracy = 0.6482\nI0817 16:26:07.195225 17619 solver.cpp:404]     Test net output #1: loss = 1.25926 (* 1 = 1.25926 loss)\nI0817 16:26:08.511632 17619 solver.cpp:228] Iteration 500, loss = 0.540085\nI0817 16:26:08.511675 17619 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 16:26:08.511692 17619 solver.cpp:244]     Train net output #1: loss = 0.540085 (* 1 = 0.540085 loss)\nI0817 16:26:08.608263 17619 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0817 16:28:26.826879 17619 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:29:47.849133 17619 solver.cpp:404]     Test net output #0: accuracy = 0.72428\nI0817 16:29:47.849385 17619 solver.cpp:404]     Test net output #1: loss = 0.899114 (* 1 = 0.899114 loss)\nI0817 16:29:49.166127 17619 solver.cpp:228] Iteration 600, loss = 0.467435\nI0817 16:29:49.166172 17619 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 16:29:49.166187 17619 solver.cpp:244]     Train net output #1: loss = 0.467435 (* 1 = 0.467435 loss)\nI0817 16:29:49.256464 17619 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0817 16:32:07.394793 17619 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:33:28.421245 17619 solver.cpp:404]     Test net output #0: accuracy = 0.74836\nI0817 16:33:28.421504 17619 solver.cpp:404]     Test net output #1: loss = 0.787284 (* 1 = 0.787284 loss)\nI0817 16:33:29.737785 17619 solver.cpp:228] Iteration 700, loss = 0.317001\nI0817 16:33:29.737828 17619 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 16:33:29.737843 17619 solver.cpp:244]     Train net output #1: loss = 0.317001 (* 1 = 0.317001 loss)\nI0817 16:33:29.829843 17619 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0817 16:35:47.886198 17619 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:37:08.914644 17619 solver.cpp:404]     Test net output #0: accuracy = 0.68508\nI0817 16:37:08.914914 17619 solver.cpp:404]     Test net output #1: loss = 1.22029 (* 1 = 1.22029 loss)\nI0817 16:37:10.231040 17619 solver.cpp:228] Iteration 800, loss = 0.324364\nI0817 16:37:10.231083 17619 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 16:37:10.231098 17619 solver.cpp:244]     Train net output #1: loss = 0.324364 (* 1 = 0.324364 loss)\nI0817 16:37:10.315099 17619 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0817 16:39:28.407515 17619 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:40:49.446038 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7034\nI0817 16:40:49.446296 17619 solver.cpp:404]     Test net output #1: loss = 1.22688 (* 1 = 1.22688 loss)\nI0817 16:40:50.762079 17619 solver.cpp:228] Iteration 900, loss = 0.413326\nI0817 16:40:50.762122 17619 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 16:40:50.762137 17619 solver.cpp:244]     Train net output #1: loss = 0.413326 (* 1 = 0.413326 loss)\nI0817 16:40:50.846359 17619 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0817 16:43:08.871105 17619 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:44:29.902220 17619 solver.cpp:404]     Test net output #0: accuracy = 0.65744\nI0817 16:44:29.902472 17619 solver.cpp:404]     Test net output #1: loss = 1.32586 (* 1 = 1.32586 loss)\nI0817 16:44:31.218637 17619 solver.cpp:228] Iteration 1000, loss = 0.38108\nI0817 16:44:31.218683 17619 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 16:44:31.218698 17619 solver.cpp:244]     Train net output #1: loss = 0.38108 (* 1 = 0.38108 loss)\nI0817 16:44:31.300212 17619 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0817 16:46:49.339152 17619 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:48:10.932287 17619 solver.cpp:404]     Test net output #0: accuracy = 0.69036\nI0817 16:48:10.932584 17619 solver.cpp:404]     Test net output #1: loss = 1.20009 (* 1 = 1.20009 loss)\nI0817 16:48:12.254341 17619 solver.cpp:228] Iteration 1100, loss = 0.269939\nI0817 16:48:12.254390 17619 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 16:48:12.254405 17619 solver.cpp:244]     Train net output #1: loss = 0.269939 (* 1 = 0.269939 loss)\nI0817 16:48:12.330819 17619 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0817 16:50:30.564431 17619 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:51:52.588306 17619 solver.cpp:404]     Test net output #0: accuracy = 0.692\nI0817 16:51:52.588553 17619 solver.cpp:404]     Test net output #1: loss = 1.42343 (* 1 = 1.42343 loss)\nI0817 16:51:53.907419 17619 solver.cpp:228] Iteration 1200, loss = 0.229513\nI0817 16:51:53.907461 17619 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 16:51:53.907476 17619 solver.cpp:244]     Train net output #1: loss = 0.229513 (* 1 = 0.229513 loss)\nI0817 16:51:53.997658 17619 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0817 16:54:12.303586 17619 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 16:55:34.236137 17619 solver.cpp:404]     Test net output #0: accuracy = 0.67284\nI0817 16:55:34.236351 17619 solver.cpp:404]     Test net output #1: loss = 1.61979 (* 1 = 1.61979 loss)\nI0817 16:55:35.555060 17619 solver.cpp:228] Iteration 1300, loss = 0.202252\nI0817 16:55:35.555104 17619 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 16:55:35.555119 17619 solver.cpp:244]     Train net output #1: loss = 0.202252 (* 1 = 0.202252 loss)\nI0817 16:55:35.640360 17619 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0817 16:57:53.970484 17619 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 16:59:15.993079 17619 solver.cpp:404]     Test net output #0: accuracy = 0.70056\nI0817 16:59:15.993299 17619 solver.cpp:404]     Test net output #1: loss = 1.34261 (* 1 = 1.34261 loss)\nI0817 16:59:17.311794 17619 solver.cpp:228] Iteration 1400, loss = 0.215961\nI0817 16:59:17.311838 17619 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 16:59:17.311854 17619 solver.cpp:244]     Train net output #1: loss = 0.215961 (* 1 = 0.215961 loss)\nI0817 16:59:17.399363 17619 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0817 17:01:35.631705 17619 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:02:57.639986 17619 solver.cpp:404]     Test net output #0: accuracy = 0.739\nI0817 17:02:57.640272 17619 solver.cpp:404]     Test net output #1: loss = 0.958026 (* 1 = 0.958026 loss)\nI0817 17:02:58.958860 17619 solver.cpp:228] Iteration 1500, loss = 0.15198\nI0817 17:02:58.958914 17619 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:02:58.958930 17619 solver.cpp:244]     Train net output #1: loss = 0.15198 (* 1 = 0.15198 loss)\nI0817 17:02:59.039551 17619 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0817 17:05:17.207912 17619 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:06:38.937209 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7284\nI0817 17:06:38.937433 17619 solver.cpp:404]     Test net output #1: loss = 1.12641 (* 1 = 1.12641 loss)\nI0817 17:06:40.256527 17619 solver.cpp:228] Iteration 1600, loss = 0.162161\nI0817 17:06:40.256569 17619 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:06:40.256584 17619 solver.cpp:244]     Train net output #1: loss = 0.162161 (* 1 = 0.162161 loss)\nI0817 17:06:40.340725 17619 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0817 17:08:58.694356 17619 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:10:20.711446 17619 solver.cpp:404]     Test net output #0: accuracy = 0.75296\nI0817 17:10:20.711675 17619 solver.cpp:404]     Test net output #1: loss = 0.933958 (* 1 = 0.933958 loss)\nI0817 17:10:22.030933 17619 solver.cpp:228] Iteration 1700, loss = 0.280673\nI0817 17:10:22.030977 17619 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:10:22.030992 17619 solver.cpp:244]     Train net output #1: loss = 0.280673 (* 1 = 0.280673 loss)\nI0817 17:10:22.113482 17619 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0817 17:12:40.367254 17619 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:14:01.962088 17619 solver.cpp:404]     Test net output #0: accuracy = 0.67644\nI0817 17:14:01.962350 17619 solver.cpp:404]     Test net output #1: loss = 1.63696 (* 1 = 1.63696 loss)\nI0817 17:14:03.282142 17619 solver.cpp:228] Iteration 1800, loss = 0.210412\nI0817 17:14:03.282186 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:14:03.282202 17619 solver.cpp:244]     Train net output #1: loss = 0.210412 (* 1 = 0.210412 loss)\nI0817 17:14:03.363436 17619 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0817 17:16:21.558416 17619 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:17:43.515193 17619 solver.cpp:404]     Test net output #0: accuracy = 0.67504\nI0817 17:17:43.515413 17619 solver.cpp:404]     Test net output #1: loss = 1.5425 (* 1 = 1.5425 loss)\nI0817 17:17:44.833679 17619 solver.cpp:228] Iteration 1900, loss = 0.158672\nI0817 17:17:44.833722 17619 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:17:44.833737 17619 solver.cpp:244]     Train net output #1: loss = 0.158672 (* 1 = 0.158672 loss)\nI0817 17:17:44.922197 17619 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0817 17:20:03.221282 17619 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:21:25.193740 17619 solver.cpp:404]     Test net output #0: accuracy = 0.73236\nI0817 17:21:25.193992 17619 solver.cpp:404]     Test net output #1: loss = 1.35203 (* 1 = 1.35203 loss)\nI0817 17:21:26.513715 17619 solver.cpp:228] Iteration 2000, loss = 0.179019\nI0817 17:21:26.513758 17619 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:21:26.513774 17619 solver.cpp:244]     Train net output #1: loss = 0.179019 (* 1 = 0.179019 loss)\nI0817 17:21:26.604753 17619 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0817 17:23:44.804684 17619 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:25:06.730219 17619 solver.cpp:404]     Test net output #0: accuracy = 0.76288\nI0817 17:25:06.730443 17619 solver.cpp:404]     Test net output #1: loss = 1.03743 (* 1 = 1.03743 loss)\nI0817 17:25:08.049692 17619 solver.cpp:228] Iteration 2100, loss = 0.141762\nI0817 17:25:08.049737 17619 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:25:08.049753 17619 solver.cpp:244]     Train net output #1: loss = 0.141762 (* 1 = 0.141762 loss)\nI0817 17:25:08.132302 17619 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0817 17:27:26.502620 17619 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:28:48.119846 17619 solver.cpp:404]     Test net output #0: accuracy = 0.59436\nI0817 17:28:48.120064 17619 solver.cpp:404]     Test net output #1: loss = 2.37066 (* 1 = 2.37066 loss)\nI0817 17:28:49.440142 17619 solver.cpp:228] Iteration 2200, loss = 0.208796\nI0817 17:28:49.440186 17619 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:28:49.440202 17619 solver.cpp:244]     Train net output #1: loss = 0.208796 (* 1 = 0.208796 loss)\nI0817 17:28:49.523602 17619 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0817 17:31:07.704974 17619 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:32:29.306391 17619 solver.cpp:404]     Test net output #0: accuracy = 0.75052\nI0817 17:32:29.306648 17619 solver.cpp:404]     Test net output #1: loss = 0.929896 (* 1 = 0.929896 loss)\nI0817 17:32:30.626085 17619 solver.cpp:228] Iteration 2300, loss = 0.210416\nI0817 17:32:30.626130 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:32:30.626150 17619 solver.cpp:244]     Train net output #1: loss = 0.210416 (* 1 = 0.210416 loss)\nI0817 17:32:30.711879 17619 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0817 17:34:48.893167 17619 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:36:10.665169 17619 solver.cpp:404]     Test net output #0: accuracy = 0.68832\nI0817 17:36:10.665462 17619 solver.cpp:404]     Test net output #1: loss = 1.36471 (* 1 = 1.36471 loss)\nI0817 17:36:11.985025 17619 solver.cpp:228] Iteration 2400, loss = 0.284016\nI0817 17:36:11.985070 17619 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:36:11.985086 17619 solver.cpp:244]     Train net output #1: loss = 0.284015 (* 1 = 0.284015 loss)\nI0817 17:36:12.064774 17619 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0817 17:38:30.383252 17619 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:39:52.420370 17619 solver.cpp:404]     Test net output #0: accuracy = 0.75612\nI0817 17:39:52.420650 17619 solver.cpp:404]     Test net output #1: loss = 1.07144 (* 1 = 1.07144 loss)\nI0817 17:39:53.740089 17619 solver.cpp:228] Iteration 2500, loss = 0.129375\nI0817 17:39:53.740128 17619 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:39:53.740150 17619 solver.cpp:244]     Train net output #1: loss = 0.129375 (* 1 = 0.129375 loss)\nI0817 17:39:53.822890 17619 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0817 17:42:12.083760 17619 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:43:34.142968 17619 solver.cpp:404]     Test net output #0: accuracy = 0.74868\nI0817 17:43:34.143254 17619 solver.cpp:404]     Test net output #1: loss = 1.01844 (* 1 = 1.01844 loss)\nI0817 17:43:35.461390 17619 solver.cpp:228] Iteration 2600, loss = 0.162094\nI0817 17:43:35.461427 17619 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:43:35.461443 17619 solver.cpp:244]     Train net output #1: loss = 0.162094 (* 1 = 0.162094 loss)\nI0817 17:43:35.545775 17619 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0817 17:45:53.702517 17619 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:47:15.757304 17619 solver.cpp:404]     Test net output #0: accuracy = 0.76128\nI0817 17:47:15.757578 17619 solver.cpp:404]     Test net output #1: loss = 1.03861 (* 1 = 1.03861 loss)\nI0817 17:47:17.076247 17619 solver.cpp:228] Iteration 2700, loss = 0.182659\nI0817 17:47:17.076283 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:47:17.076298 17619 solver.cpp:244]     Train net output #1: loss = 0.182659 (* 1 = 0.182659 loss)\nI0817 17:47:17.159432 17619 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0817 17:49:35.330742 17619 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:50:57.386912 17619 solver.cpp:404]     Test net output #0: accuracy = 0.78324\nI0817 17:50:57.387205 17619 solver.cpp:404]     Test net output #1: loss = 0.801711 (* 1 = 0.801711 loss)\nI0817 17:50:58.705698 17619 solver.cpp:228] Iteration 2800, loss = 0.147504\nI0817 17:50:58.705740 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:50:58.705755 17619 solver.cpp:244]     Train net output #1: loss = 0.147504 (* 1 = 0.147504 loss)\nI0817 17:50:58.801956 17619 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0817 17:53:16.979058 17619 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:54:39.029079 17619 solver.cpp:404]     Test net output #0: accuracy = 0.6248\nI0817 17:54:39.029400 17619 solver.cpp:404]     Test net output #1: loss = 1.73501 (* 1 = 1.73501 loss)\nI0817 17:54:40.348351 17619 solver.cpp:228] Iteration 2900, loss = 0.266133\nI0817 17:54:40.348392 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:54:40.348407 17619 solver.cpp:244]     Train net output #1: loss = 0.266133 (* 1 = 0.266133 loss)\nI0817 17:54:40.435573 17619 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0817 17:56:58.710839 17619 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 17:58:20.765684 17619 solver.cpp:404]     Test net output #0: accuracy = 0.73288\nI0817 17:58:20.765964 17619 solver.cpp:404]     Test net output #1: loss = 0.992896 (* 1 = 0.992896 loss)\nI0817 17:58:22.085995 17619 solver.cpp:228] Iteration 3000, loss = 0.130104\nI0817 17:58:22.086038 17619 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:58:22.086055 17619 solver.cpp:244]     Train net output #1: loss = 0.130104 (* 1 = 0.130104 loss)\nI0817 17:58:22.172431 17619 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0817 18:00:40.374563 17619 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:02:02.420003 17619 solver.cpp:404]     Test net output #0: accuracy = 0.62688\nI0817 18:02:02.420251 17619 solver.cpp:404]     Test net output #1: loss = 2.03623 (* 1 = 2.03623 loss)\nI0817 18:02:03.740286 17619 solver.cpp:228] Iteration 3100, loss = 0.176485\nI0817 18:02:03.740329 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:02:03.740344 17619 solver.cpp:244]     Train net output #1: loss = 0.176486 (* 1 = 0.176486 loss)\nI0817 18:02:03.821211 17619 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0817 18:04:22.140204 17619 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:05:44.182999 17619 solver.cpp:404]     Test net output #0: accuracy = 0.75864\nI0817 18:05:44.183305 17619 solver.cpp:404]     Test net output #1: loss = 0.961508 (* 1 = 0.961508 loss)\nI0817 18:05:45.502825 17619 solver.cpp:228] Iteration 3200, loss = 0.22085\nI0817 18:05:45.502864 17619 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:05:45.502881 17619 solver.cpp:244]     Train net output #1: loss = 0.22085 (* 1 = 0.22085 loss)\nI0817 18:05:45.590462 17619 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0817 18:08:03.818785 17619 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:09:25.876482 17619 solver.cpp:404]     Test net output #0: accuracy = 0.6432\nI0817 18:09:25.876756 17619 solver.cpp:404]     Test net output #1: loss = 1.76298 (* 1 = 1.76298 loss)\nI0817 18:09:27.195300 17619 solver.cpp:228] Iteration 3300, loss = 0.213758\nI0817 18:09:27.195340 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:09:27.195356 17619 solver.cpp:244]     Train net output #1: loss = 0.213758 (* 1 = 0.213758 loss)\nI0817 18:09:27.279469 17619 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0817 18:11:45.501561 17619 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:13:07.537585 17619 solver.cpp:404]     Test net output #0: accuracy = 0.68844\nI0817 18:13:07.537892 17619 solver.cpp:404]     Test net output #1: loss = 1.3431 (* 1 = 1.3431 loss)\nI0817 18:13:08.857738 17619 solver.cpp:228] Iteration 3400, loss = 0.19491\nI0817 18:13:08.857781 17619 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:13:08.857797 17619 solver.cpp:244]     Train net output #1: loss = 0.19491 (* 1 = 0.19491 loss)\nI0817 18:13:08.947521 17619 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0817 18:15:27.162403 17619 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:16:49.211961 17619 solver.cpp:404]     Test net output #0: accuracy = 0.73244\nI0817 18:16:49.212265 17619 solver.cpp:404]     Test net output #1: loss = 0.953648 (* 1 = 0.953648 loss)\nI0817 18:16:50.531632 17619 solver.cpp:228] Iteration 3500, loss = 0.253283\nI0817 18:16:50.531673 17619 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:16:50.531689 17619 solver.cpp:244]     Train net output #1: loss = 0.253283 (* 1 = 0.253283 loss)\nI0817 18:16:50.611126 17619 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0817 18:19:08.776593 17619 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:20:30.822551 17619 solver.cpp:404]     Test net output #0: accuracy = 0.72928\nI0817 18:20:30.822849 17619 solver.cpp:404]     Test net output #1: loss = 1.11355 (* 1 = 1.11355 loss)\nI0817 18:20:32.139937 17619 solver.cpp:228] Iteration 3600, loss = 0.287644\nI0817 18:20:32.139981 17619 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:20:32.139995 17619 solver.cpp:244]     Train net output #1: loss = 0.287644 (* 1 = 0.287644 loss)\nI0817 18:20:32.221647 17619 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0817 18:22:50.531929 17619 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:24:12.577692 17619 solver.cpp:404]     Test net output #0: accuracy = 0.75256\nI0817 18:24:12.577992 17619 solver.cpp:404]     Test net output #1: loss = 0.92251 (* 1 = 0.92251 loss)\nI0817 18:24:13.894662 17619 solver.cpp:228] Iteration 3700, loss = 0.232013\nI0817 18:24:13.894704 17619 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:24:13.894721 17619 solver.cpp:244]     Train net output #1: loss = 0.232013 (* 1 = 0.232013 loss)\nI0817 18:24:13.983531 17619 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0817 18:26:32.173960 17619 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:27:54.227907 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7884\nI0817 18:27:54.228210 17619 solver.cpp:404]     Test net output #1: loss = 0.718212 (* 1 = 0.718212 loss)\nI0817 18:27:55.545555 17619 solver.cpp:228] Iteration 3800, loss = 0.265409\nI0817 18:27:55.545594 17619 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:27:55.545610 17619 solver.cpp:244]     Train net output #1: loss = 0.265409 (* 1 = 0.265409 loss)\nI0817 18:27:55.630398 17619 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0817 18:30:13.681954 17619 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:31:34.607825 17619 solver.cpp:404]     Test net output #0: accuracy = 0.6128\nI0817 18:31:34.608029 17619 solver.cpp:404]     Test net output #1: loss = 1.92596 (* 1 = 1.92596 loss)\nI0817 18:31:35.920153 17619 solver.cpp:228] Iteration 3900, loss = 0.229058\nI0817 18:31:35.920187 17619 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:31:35.920202 17619 solver.cpp:244]     Train net output #1: loss = 0.229058 (* 1 = 0.229058 loss)\nI0817 18:31:36.010733 17619 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0817 18:33:53.951488 17619 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:35:14.876798 17619 solver.cpp:404]     Test net output #0: accuracy = 0.60432\nI0817 18:35:14.877035 17619 solver.cpp:404]     Test net output #1: loss = 1.60309 (* 1 = 1.60309 loss)\nI0817 18:35:16.188848 17619 solver.cpp:228] Iteration 4000, loss = 0.255182\nI0817 18:35:16.188882 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:35:16.188897 17619 solver.cpp:244]     Train net output #1: loss = 0.255182 (* 1 = 0.255182 loss)\nI0817 18:35:16.281855 17619 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0817 18:37:34.283605 17619 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:38:55.211601 17619 solver.cpp:404]     Test net output #0: accuracy = 0.71116\nI0817 18:38:55.211788 17619 solver.cpp:404]     Test net output #1: loss = 1.09014 (* 1 = 1.09014 loss)\nI0817 18:38:56.523890 17619 solver.cpp:228] Iteration 4100, loss = 0.284252\nI0817 18:38:56.523926 17619 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:38:56.523941 17619 solver.cpp:244]     Train net output #1: loss = 0.284252 (* 1 = 0.284252 loss)\nI0817 18:38:56.611359 17619 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0817 18:41:14.638566 17619 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:42:35.563037 17619 solver.cpp:404]     Test net output #0: accuracy = 0.69472\nI0817 18:42:35.563282 17619 solver.cpp:404]     Test net output #1: loss = 1.2184 (* 1 = 1.2184 loss)\nI0817 18:42:36.875331 17619 solver.cpp:228] Iteration 4200, loss = 0.262827\nI0817 18:42:36.875365 17619 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:42:36.875380 17619 solver.cpp:244]     Train net output #1: loss = 0.262827 (* 1 = 0.262827 loss)\nI0817 18:42:36.966734 17619 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0817 18:44:55.079633 17619 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:46:16.114130 17619 solver.cpp:404]     Test net output #0: accuracy = 0.71272\nI0817 18:46:16.114368 17619 solver.cpp:404]     Test net output #1: loss = 1.09499 (* 1 = 1.09499 loss)\nI0817 18:46:17.425840 17619 solver.cpp:228] Iteration 4300, loss = 0.192261\nI0817 18:46:17.425874 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:46:17.425889 17619 solver.cpp:244]     Train net output #1: loss = 0.192261 (* 1 = 0.192261 loss)\nI0817 18:46:17.525188 17619 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0817 18:48:35.526263 17619 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:49:56.565994 17619 solver.cpp:404]     Test net output #0: accuracy = 0.58404\nI0817 18:49:56.566236 17619 solver.cpp:404]     Test net output #1: loss = 1.56738 (* 1 = 1.56738 loss)\nI0817 18:49:57.877930 17619 solver.cpp:228] Iteration 4400, loss = 0.31711\nI0817 18:49:57.877964 17619 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:49:57.877979 17619 solver.cpp:244]     Train net output #1: loss = 0.31711 (* 1 = 0.31711 loss)\nI0817 18:49:57.967329 17619 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0817 18:52:16.003043 17619 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 18:53:37.044504 17619 solver.cpp:404]     Test net output #0: accuracy = 0.69288\nI0817 18:53:37.044744 17619 solver.cpp:404]     Test net output #1: loss = 1.09942 (* 1 = 1.09942 loss)\nI0817 18:53:38.356827 17619 solver.cpp:228] Iteration 4500, loss = 0.256957\nI0817 18:53:38.356861 17619 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:53:38.356875 17619 solver.cpp:244]     Train net output #1: loss = 0.256957 (* 1 = 0.256957 loss)\nI0817 18:53:38.443863 17619 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0817 18:55:56.584753 17619 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 18:57:17.617851 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7362\nI0817 18:57:17.618095 17619 solver.cpp:404]     Test net output #1: loss = 0.9462 (* 1 = 0.9462 loss)\nI0817 18:57:18.933076 17619 solver.cpp:228] Iteration 4600, loss = 0.295367\nI0817 18:57:18.933112 17619 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:57:18.933127 17619 solver.cpp:244]     Train net output #1: loss = 0.295367 (* 1 = 0.295367 loss)\nI0817 18:57:19.021509 17619 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0817 18:59:37.030767 17619 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:00:58.069398 17619 solver.cpp:404]     Test net output #0: accuracy = 0.69632\nI0817 19:00:58.069638 17619 solver.cpp:404]     Test net output #1: loss = 1.01137 (* 1 = 1.01137 loss)\nI0817 19:00:59.385365 17619 solver.cpp:228] Iteration 4700, loss = 0.490221\nI0817 19:00:59.385401 17619 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 19:00:59.385416 17619 solver.cpp:244]     Train net output #1: loss = 0.490221 (* 1 = 0.490221 loss)\nI0817 19:00:59.479527 17619 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0817 19:03:17.434715 17619 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:04:38.480526 17619 solver.cpp:404]     Test net output #0: accuracy = 0.72668\nI0817 19:04:38.480710 17619 solver.cpp:404]     Test net output #1: loss = 0.992316 (* 1 = 0.992316 loss)\nI0817 19:04:39.796278 17619 solver.cpp:228] Iteration 4800, loss = 0.329379\nI0817 19:04:39.796314 17619 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 19:04:39.796329 17619 solver.cpp:244]     Train net output #1: loss = 0.329379 (* 1 = 0.329379 loss)\nI0817 19:04:39.889971 17619 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0817 19:06:57.958973 17619 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:08:18.992956 17619 solver.cpp:404]     Test net output #0: accuracy = 0.64252\nI0817 19:08:18.993172 17619 solver.cpp:404]     Test net output #1: loss = 1.52452 (* 1 = 1.52452 loss)\nI0817 19:08:20.308997 17619 solver.cpp:228] Iteration 4900, loss = 0.286784\nI0817 19:08:20.309032 17619 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 19:08:20.309047 17619 solver.cpp:244]     Train net output #1: loss = 0.286783 (* 1 = 0.286783 loss)\nI0817 19:08:20.400945 17619 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0817 19:10:38.401334 17619 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:11:59.438191 17619 solver.cpp:404]     Test net output #0: accuracy = 0.63508\nI0817 19:11:59.438429 17619 solver.cpp:404]     Test net output #1: loss = 1.6109 (* 1 = 1.6109 loss)\nI0817 19:12:00.753621 17619 solver.cpp:228] Iteration 5000, loss = 0.306568\nI0817 19:12:00.753656 17619 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 19:12:00.753671 17619 solver.cpp:244]     Train net output #1: loss = 0.306568 (* 1 = 0.306568 loss)\nI0817 19:12:00.848613 17619 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0817 19:14:18.921439 17619 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:15:39.964184 17619 solver.cpp:404]     Test net output #0: accuracy = 0.75884\nI0817 19:15:39.964413 17619 solver.cpp:404]     Test net output #1: loss = 0.821871 (* 1 = 0.821871 loss)\nI0817 19:15:41.279194 17619 solver.cpp:228] Iteration 5100, loss = 0.326503\nI0817 19:15:41.279229 17619 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 19:15:41.279243 17619 solver.cpp:244]     Train net output #1: loss = 0.326503 (* 1 = 0.326503 loss)\nI0817 19:15:41.365398 17619 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0817 19:17:59.430735 17619 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:19:20.483485 17619 solver.cpp:404]     Test net output #0: accuracy = 0.69124\nI0817 19:19:20.483731 17619 solver.cpp:404]     Test net output #1: loss = 1.18108 (* 1 = 1.18108 loss)\nI0817 19:19:21.799491 17619 solver.cpp:228] Iteration 5200, loss = 0.302124\nI0817 19:19:21.799526 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:19:21.799540 17619 solver.cpp:244]     Train net output #1: loss = 0.302124 (* 1 = 0.302124 loss)\nI0817 19:19:21.894032 17619 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0817 19:21:39.949301 17619 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:23:00.988672 17619 solver.cpp:404]     Test net output #0: accuracy = 0.72932\nI0817 19:23:00.988916 17619 solver.cpp:404]     Test net output #1: loss = 0.954607 (* 1 = 0.954607 loss)\nI0817 19:23:02.305019 17619 solver.cpp:228] Iteration 5300, loss = 0.248719\nI0817 19:23:02.305054 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:23:02.305069 17619 solver.cpp:244]     Train net output #1: loss = 0.248719 (* 1 = 0.248719 loss)\nI0817 19:23:02.389562 17619 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0817 19:25:20.450170 17619 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:26:41.488698 17619 solver.cpp:404]     Test net output #0: accuracy = 0.73824\nI0817 19:26:41.488920 17619 solver.cpp:404]     Test net output #1: loss = 0.973063 (* 1 = 0.973063 loss)\nI0817 19:26:42.804541 17619 solver.cpp:228] Iteration 5400, loss = 0.280432\nI0817 19:26:42.804575 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:26:42.804590 17619 solver.cpp:244]     Train net output #1: loss = 0.280432 (* 1 = 0.280432 loss)\nI0817 19:26:42.893131 17619 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0817 19:29:00.959215 17619 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:30:21.992620 17619 solver.cpp:404]     Test net output #0: accuracy = 0.72728\nI0817 19:30:21.992872 17619 solver.cpp:404]     Test net output #1: loss = 1.10866 (* 1 = 1.10866 loss)\nI0817 19:30:23.309054 17619 solver.cpp:228] Iteration 5500, loss = 0.256641\nI0817 19:30:23.309089 17619 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 19:30:23.309104 17619 solver.cpp:244]     Train net output #1: loss = 0.25664 (* 1 = 0.25664 loss)\nI0817 19:30:23.389976 17619 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0817 19:32:41.308099 17619 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:34:02.356756 17619 solver.cpp:404]     Test net output #0: accuracy = 0.70784\nI0817 19:34:02.356992 17619 solver.cpp:404]     Test net output #1: loss = 1.06723 (* 1 = 1.06723 loss)\nI0817 19:34:03.672632 17619 solver.cpp:228] Iteration 5600, loss = 0.213945\nI0817 19:34:03.672670 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:34:03.672693 17619 solver.cpp:244]     Train net output #1: loss = 0.213945 (* 1 = 0.213945 loss)\nI0817 19:34:03.760674 17619 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0817 19:36:21.846516 17619 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:37:42.889530 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7184\nI0817 19:37:42.889744 17619 solver.cpp:404]     Test net output #1: loss = 1.04756 (* 1 = 1.04756 loss)\nI0817 19:37:44.206212 17619 solver.cpp:228] Iteration 5700, loss = 0.259857\nI0817 19:37:44.206250 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:37:44.206272 17619 solver.cpp:244]     Train net output #1: loss = 0.259856 (* 1 = 0.259856 loss)\nI0817 19:37:44.293154 17619 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0817 19:40:02.399698 17619 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:41:23.441177 17619 solver.cpp:404]     Test net output #0: accuracy = 0.63448\nI0817 19:41:23.441426 17619 solver.cpp:404]     Test net output #1: loss = 1.77833 (* 1 = 1.77833 loss)\nI0817 19:41:24.757807 17619 solver.cpp:228] Iteration 5800, loss = 0.196006\nI0817 19:41:24.757846 17619 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:41:24.757869 17619 solver.cpp:244]     Train net output #1: loss = 0.196005 (* 1 = 0.196005 loss)\nI0817 19:41:24.845664 17619 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0817 19:43:42.760602 17619 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:45:03.803062 17619 solver.cpp:404]     Test net output #0: accuracy = 0.76352\nI0817 19:45:03.803295 17619 solver.cpp:404]     Test net output #1: loss = 0.90574 (* 1 = 0.90574 loss)\nI0817 19:45:05.119443 17619 solver.cpp:228] Iteration 5900, loss = 0.272223\nI0817 19:45:05.119480 17619 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 19:45:05.119501 17619 solver.cpp:244]     Train net output #1: loss = 0.272223 (* 1 = 0.272223 loss)\nI0817 19:45:05.208472 17619 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0817 19:47:23.278607 17619 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:48:44.346704 17619 solver.cpp:404]     Test net output #0: accuracy = 0.77732\nI0817 19:48:44.346930 17619 solver.cpp:404]     Test net output #1: loss = 0.811826 (* 1 = 0.811826 loss)\nI0817 19:48:45.663027 17619 solver.cpp:228] Iteration 6000, loss = 0.244286\nI0817 19:48:45.663065 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:48:45.663087 17619 solver.cpp:244]     Train net output #1: loss = 0.244286 (* 1 = 0.244286 loss)\nI0817 19:48:45.747166 17619 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0817 19:51:03.676616 17619 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:52:24.742563 17619 solver.cpp:404]     Test net output #0: accuracy = 0.62876\nI0817 19:52:24.742816 17619 solver.cpp:404]     Test net output #1: loss = 1.47478 (* 1 = 1.47478 loss)\nI0817 19:52:26.058989 17619 solver.cpp:228] Iteration 6100, loss = 0.23239\nI0817 19:52:26.059023 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:52:26.059038 17619 solver.cpp:244]     Train net output #1: loss = 0.23239 (* 1 = 0.23239 loss)\nI0817 19:52:26.146834 17619 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0817 19:54:44.133137 17619 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 19:56:05.190088 17619 solver.cpp:404]     Test net output #0: accuracy = 0.77476\nI0817 19:56:05.190290 17619 solver.cpp:404]     Test net output #1: loss = 0.772615 (* 1 = 0.772615 loss)\nI0817 19:56:06.506237 17619 solver.cpp:228] Iteration 6200, loss = 0.321976\nI0817 19:56:06.506273 17619 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 19:56:06.506289 17619 solver.cpp:244]     Train net output #1: loss = 0.321976 (* 1 = 0.321976 loss)\nI0817 19:56:06.590996 17619 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0817 19:58:24.584899 17619 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 19:59:45.642309 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7214\nI0817 19:59:45.642552 17619 solver.cpp:404]     Test net output #1: loss = 1.08549 (* 1 = 1.08549 loss)\nI0817 19:59:46.958302 17619 solver.cpp:228] Iteration 6300, loss = 0.201217\nI0817 19:59:46.958336 17619 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:59:46.958351 17619 solver.cpp:244]     Train net output #1: loss = 0.201217 (* 1 = 0.201217 loss)\nI0817 19:59:47.043437 17619 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0817 20:02:05.038914 17619 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:03:26.090374 17619 solver.cpp:404]     Test net output #0: accuracy = 0.70136\nI0817 20:03:26.090584 17619 solver.cpp:404]     Test net output #1: loss = 1.40516 (* 1 = 1.40516 loss)\nI0817 20:03:27.406263 17619 solver.cpp:228] Iteration 6400, loss = 0.256742\nI0817 20:03:27.406297 17619 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 20:03:27.406312 17619 solver.cpp:244]     Train net output #1: loss = 0.256742 (* 1 = 0.256742 loss)\nI0817 20:03:27.493264 17619 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0817 20:05:45.586365 17619 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:07:06.650146 17619 solver.cpp:404]     Test net output #0: accuracy = 0.78064\nI0817 20:07:06.650369 17619 solver.cpp:404]     Test net output #1: loss = 0.828075 (* 1 = 0.828075 loss)\nI0817 20:07:07.966408 17619 solver.cpp:228] Iteration 6500, loss = 0.142302\nI0817 20:07:07.966442 17619 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:07:07.966457 17619 solver.cpp:244]     Train net output #1: loss = 0.142302 (* 1 = 0.142302 loss)\nI0817 20:07:08.053377 17619 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0817 20:09:26.048130 17619 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:10:47.098606 17619 solver.cpp:404]     Test net output #0: accuracy = 0.6708\nI0817 20:10:47.098855 17619 solver.cpp:404]     Test net output #1: loss = 1.39187 (* 1 = 1.39187 loss)\nI0817 20:10:48.414366 17619 solver.cpp:228] Iteration 6600, loss = 0.150272\nI0817 20:10:48.414402 17619 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:10:48.414417 17619 solver.cpp:244]     Train net output #1: loss = 0.150272 (* 1 = 0.150272 loss)\nI0817 20:10:48.500144 17619 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0817 20:13:06.480598 17619 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:14:27.541725 17619 solver.cpp:404]     Test net output #0: accuracy = 0.75548\nI0817 20:14:27.541970 17619 solver.cpp:404]     Test net output #1: loss = 0.916554 (* 1 = 0.916554 loss)\nI0817 20:14:28.857062 17619 solver.cpp:228] Iteration 6700, loss = 0.314982\nI0817 20:14:28.857096 17619 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 20:14:28.857111 17619 solver.cpp:244]     Train net output #1: loss = 0.314982 (* 1 = 0.314982 loss)\nI0817 20:14:28.951606 17619 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0817 20:16:47.088115 17619 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:18:08.151262 17619 solver.cpp:404]     Test net output #0: accuracy = 0.80256\nI0817 20:18:08.151505 17619 solver.cpp:404]     Test net output #1: loss = 0.779385 (* 1 = 0.779385 loss)\nI0817 20:18:09.467100 17619 solver.cpp:228] Iteration 6800, loss = 0.111522\nI0817 20:18:09.467135 17619 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:18:09.467150 17619 solver.cpp:244]     Train net output #1: loss = 0.111521 (* 1 = 0.111521 loss)\nI0817 20:18:09.559963 17619 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0817 20:20:27.630033 17619 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:21:48.696866 17619 solver.cpp:404]     Test net output #0: accuracy = 0.76888\nI0817 20:21:48.697115 17619 solver.cpp:404]     Test net output #1: loss = 0.931821 (* 1 = 0.931821 loss)\nI0817 20:21:50.012231 17619 solver.cpp:228] Iteration 6900, loss = 0.197728\nI0817 20:21:50.012265 17619 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:21:50.012279 17619 solver.cpp:244]     Train net output #1: loss = 0.197728 (* 1 = 0.197728 loss)\nI0817 20:21:50.104626 17619 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0817 20:24:08.101265 17619 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:25:29.164165 17619 solver.cpp:404]     Test net output #0: accuracy = 0.77888\nI0817 20:25:29.164405 17619 solver.cpp:404]     Test net output #1: loss = 0.766677 (* 1 = 0.766677 loss)\nI0817 20:25:30.480612 17619 solver.cpp:228] Iteration 7000, loss = 0.172467\nI0817 20:25:30.480645 17619 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:25:30.480660 17619 solver.cpp:244]     Train net output #1: loss = 0.172467 (* 1 = 0.172467 loss)\nI0817 20:25:30.565191 17619 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0817 20:27:48.515417 17619 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:29:09.469709 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7886\nI0817 20:29:09.469931 17619 solver.cpp:404]     Test net output #1: loss = 0.822867 (* 1 = 0.822867 loss)\nI0817 20:29:10.785784 17619 solver.cpp:228] Iteration 7100, loss = 0.230616\nI0817 20:29:10.785821 17619 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:29:10.785836 17619 solver.cpp:244]     Train net output #1: loss = 0.230616 (* 1 = 0.230616 loss)\nI0817 20:29:10.872779 17619 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0817 20:31:28.984057 17619 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:32:49.941679 17619 solver.cpp:404]     Test net output #0: accuracy = 0.75732\nI0817 20:32:49.941929 17619 solver.cpp:404]     Test net output #1: loss = 1.0059 (* 1 = 1.0059 loss)\nI0817 20:32:51.258205 17619 solver.cpp:228] Iteration 7200, loss = 0.161389\nI0817 20:32:51.258240 17619 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:32:51.258255 17619 solver.cpp:244]     Train net output #1: loss = 0.161389 (* 1 = 0.161389 loss)\nI0817 20:32:51.345604 17619 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0817 20:35:09.436718 17619 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:36:30.399361 17619 solver.cpp:404]     Test net output #0: accuracy = 0.71168\nI0817 20:36:30.399606 17619 solver.cpp:404]     Test net output #1: loss = 1.42146 (* 1 = 1.42146 loss)\nI0817 20:36:31.715816 17619 solver.cpp:228] Iteration 7300, loss = 0.117428\nI0817 20:36:31.715848 17619 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:36:31.715863 17619 solver.cpp:244]     Train net output #1: loss = 0.117428 (* 1 = 0.117428 loss)\nI0817 20:36:31.813367 17619 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0817 20:38:49.990792 17619 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:40:10.952383 17619 solver.cpp:404]     Test net output #0: accuracy = 0.81848\nI0817 20:40:10.952625 17619 solver.cpp:404]     Test net output #1: loss = 0.719862 (* 1 = 0.719862 loss)\nI0817 20:40:12.268960 17619 solver.cpp:228] Iteration 7400, loss = 0.0976553\nI0817 20:40:12.268996 17619 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:40:12.269011 17619 solver.cpp:244]     Train net output #1: loss = 0.0976552 (* 1 = 0.0976552 loss)\nI0817 20:40:12.362251 17619 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0817 20:42:30.465111 17619 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:43:51.423521 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7692\nI0817 20:43:51.423771 17619 solver.cpp:404]     Test net output #1: loss = 0.872355 (* 1 = 0.872355 loss)\nI0817 20:43:52.739495 17619 solver.cpp:228] Iteration 7500, loss = 0.15638\nI0817 20:43:52.739532 17619 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:43:52.739554 17619 solver.cpp:244]     Train net output #1: loss = 0.15638 (* 1 = 0.15638 loss)\nI0817 20:43:52.829896 17619 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0817 20:46:10.973764 17619 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:47:31.933800 17619 solver.cpp:404]     Test net output #0: accuracy = 0.79752\nI0817 20:47:31.934026 17619 solver.cpp:404]     Test net output #1: loss = 0.823257 (* 1 = 0.823257 loss)\nI0817 20:47:33.251194 17619 solver.cpp:228] Iteration 7600, loss = 0.0777076\nI0817 20:47:33.251231 17619 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:47:33.251253 17619 solver.cpp:244]     Train net output #1: loss = 0.0777075 (* 1 = 0.0777075 loss)\nI0817 20:47:33.331523 17619 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0817 20:49:51.392537 17619 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:51:12.351225 17619 solver.cpp:404]     Test net output #0: accuracy = 0.76004\nI0817 20:51:12.351425 17619 solver.cpp:404]     Test net output #1: loss = 1.02141 (* 1 = 1.02141 loss)\nI0817 20:51:13.667850 17619 solver.cpp:228] Iteration 7700, loss = 0.0625461\nI0817 20:51:13.667887 17619 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:51:13.667910 17619 solver.cpp:244]     Train net output #1: loss = 0.0625461 (* 1 = 0.0625461 loss)\nI0817 20:51:13.748734 17619 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0817 20:53:31.819265 17619 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 20:54:54.011687 17619 solver.cpp:404]     Test net output #0: accuracy = 0.73928\nI0817 20:54:54.011996 17619 solver.cpp:404]     Test net output #1: loss = 1.13459 (* 1 = 1.13459 loss)\nI0817 20:54:55.338539 17619 solver.cpp:228] Iteration 7800, loss = 0.0897668\nI0817 20:54:55.338590 17619 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:54:55.338618 17619 solver.cpp:244]     Train net output #1: loss = 0.0897668 (* 1 = 0.0897668 loss)\nI0817 20:54:55.419313 17619 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0817 20:57:13.655647 17619 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 20:58:35.806408 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7794\nI0817 20:58:35.806694 17619 solver.cpp:404]     Test net output #1: loss = 0.879536 (* 1 = 0.879536 loss)\nI0817 20:58:37.126602 17619 solver.cpp:228] Iteration 7900, loss = 0.145932\nI0817 20:58:37.126657 17619 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:58:37.126680 17619 solver.cpp:244]     Train net output #1: loss = 0.145932 (* 1 = 0.145932 loss)\nI0817 20:58:37.204819 17619 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0817 21:00:55.428376 17619 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:02:17.628005 17619 solver.cpp:404]     Test net output #0: accuracy = 0.77788\nI0817 21:02:17.628298 17619 solver.cpp:404]     Test net output #1: loss = 0.937829 (* 1 = 0.937829 loss)\nI0817 21:02:18.948856 17619 solver.cpp:228] Iteration 8000, loss = 0.0744413\nI0817 21:02:18.948911 17619 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:02:18.948936 17619 solver.cpp:244]     Train net output #1: loss = 0.0744413 (* 1 = 0.0744413 loss)\nI0817 21:02:19.034857 17619 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0817 21:04:37.238807 17619 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:05:59.419435 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7348\nI0817 21:05:59.419740 17619 solver.cpp:404]     Test net output #1: loss = 1.0329 (* 1 = 1.0329 loss)\nI0817 21:06:00.740319 17619 solver.cpp:228] Iteration 8100, loss = 0.0600527\nI0817 21:06:00.740372 17619 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:06:00.740391 17619 solver.cpp:244]     Train net output #1: loss = 0.0600528 (* 1 = 0.0600528 loss)\nI0817 21:06:00.824965 17619 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0817 21:08:19.061902 17619 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:09:41.193658 17619 solver.cpp:404]     Test net output #0: accuracy = 0.69176\nI0817 21:09:41.193951 17619 solver.cpp:404]     Test net output #1: loss = 1.51142 (* 1 = 1.51142 loss)\nI0817 21:09:42.514204 17619 solver.cpp:228] Iteration 8200, loss = 0.0329177\nI0817 21:09:42.514248 17619 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:09:42.514264 17619 solver.cpp:244]     Train net output #1: loss = 0.0329178 (* 1 = 0.0329178 loss)\nI0817 21:09:42.599273 17619 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0817 21:12:00.774960 17619 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:13:22.905127 17619 solver.cpp:404]     Test net output #0: accuracy = 0.77972\nI0817 21:13:22.905428 17619 solver.cpp:404]     Test net output #1: loss = 1.03276 (* 1 = 1.03276 loss)\nI0817 21:13:24.225456 17619 solver.cpp:228] Iteration 8300, loss = 0.0684564\nI0817 21:13:24.225508 17619 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:13:24.225527 17619 solver.cpp:244]     Train net output #1: loss = 0.0684565 (* 1 = 0.0684565 loss)\nI0817 21:13:24.309741 17619 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0817 21:15:42.673178 17619 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:17:04.867396 17619 solver.cpp:404]     Test net output #0: accuracy = 0.8052\nI0817 21:17:04.867679 17619 solver.cpp:404]     Test net output #1: loss = 0.823419 (* 1 = 0.823419 loss)\nI0817 21:17:06.187657 17619 solver.cpp:228] Iteration 8400, loss = 0.0739082\nI0817 21:17:06.187711 17619 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:17:06.187727 17619 solver.cpp:244]     Train net output #1: loss = 0.0739082 (* 1 = 0.0739082 loss)\nI0817 21:17:06.271932 17619 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0817 21:19:24.560901 17619 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:20:46.772650 17619 solver.cpp:404]     Test net output #0: accuracy = 0.81256\nI0817 21:20:46.772927 17619 solver.cpp:404]     Test net output #1: loss = 0.791223 (* 1 = 0.791223 loss)\nI0817 21:20:48.092943 17619 solver.cpp:228] Iteration 8500, loss = 0.0503193\nI0817 21:20:48.092998 17619 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:20:48.093015 17619 solver.cpp:244]     Train net output #1: loss = 0.0503194 (* 1 = 0.0503194 loss)\nI0817 21:20:48.177439 17619 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0817 21:23:06.278944 17619 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:24:28.496342 17619 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI0817 21:24:28.496645 17619 solver.cpp:404]     Test net output #1: loss = 0.851508 (* 1 = 0.851508 loss)\nI0817 21:24:29.816720 17619 solver.cpp:228] Iteration 8600, loss = 0.0742706\nI0817 21:24:29.816774 17619 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:24:29.816792 17619 solver.cpp:244]     Train net output #1: loss = 0.0742706 (* 1 = 0.0742706 loss)\nI0817 21:24:29.902873 17619 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0817 21:26:47.921782 17619 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:28:10.136411 17619 solver.cpp:404]     Test net output #0: accuracy = 0.8088\nI0817 21:28:10.136695 17619 solver.cpp:404]     Test net output #1: loss = 0.835531 (* 1 = 0.835531 loss)\nI0817 21:28:11.457696 17619 solver.cpp:228] Iteration 8700, loss = 0.0636272\nI0817 21:28:11.457751 17619 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:28:11.457768 17619 solver.cpp:244]     Train net output #1: loss = 0.0636272 (* 1 = 0.0636272 loss)\nI0817 21:28:11.540736 17619 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0817 21:30:29.552965 17619 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:31:51.708611 17619 solver.cpp:404]     Test net output #0: accuracy = 0.7398\nI0817 21:31:51.708896 17619 solver.cpp:404]     Test net output #1: loss = 1.16659 (* 1 = 1.16659 loss)\nI0817 21:31:53.029839 17619 solver.cpp:228] Iteration 8800, loss = 0.0342006\nI0817 21:31:53.029891 17619 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:31:53.029908 17619 solver.cpp:244]     Train net output #1: loss = 0.0342006 (* 1 = 0.0342006 loss)\nI0817 21:31:53.119837 17619 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0817 21:34:11.253926 17619 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:35:33.323199 17619 solver.cpp:404]     Test net output #0: accuracy = 0.81024\nI0817 21:35:33.323482 17619 solver.cpp:404]     Test net output #1: loss = 0.918675 (* 1 = 0.918675 loss)\nI0817 21:35:34.643864 17619 solver.cpp:228] Iteration 8900, loss = 0.0286211\nI0817 21:35:34.643918 17619 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:35:34.643935 17619 solver.cpp:244]     Train net output #1: loss = 0.0286212 (* 1 = 0.0286212 loss)\nI0817 21:35:34.727295 17619 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0817 21:37:52.693964 17619 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:39:13.758631 17619 solver.cpp:404]     Test net output #0: accuracy = 0.82036\nI0817 21:39:13.758855 17619 solver.cpp:404]     Test net output #1: loss = 0.789592 (* 1 = 0.789592 loss)\nI0817 21:39:15.074801 17619 solver.cpp:228] Iteration 9000, loss = 0.0278305\nI0817 21:39:15.074833 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:39:15.074848 17619 solver.cpp:244]     Train net output #1: loss = 0.0278305 (* 1 = 0.0278305 loss)\nI0817 21:39:15.161872 17619 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0817 21:41:33.026757 17619 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:42:54.091816 17619 solver.cpp:404]     Test net output #0: accuracy = 0.84856\nI0817 21:42:54.092073 17619 solver.cpp:404]     Test net output #1: loss = 0.669651 (* 1 = 0.669651 loss)\nI0817 21:42:55.409075 17619 solver.cpp:228] Iteration 9100, loss = 0.0287215\nI0817 21:42:55.409106 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:42:55.409121 17619 solver.cpp:244]     Train net output #1: loss = 0.0287216 (* 1 = 0.0287216 loss)\nI0817 21:42:55.499179 17619 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0817 21:45:13.347602 17619 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:46:34.405977 17619 solver.cpp:404]     Test net output #0: accuracy = 0.84748\nI0817 21:46:34.406227 17619 solver.cpp:404]     Test net output #1: loss = 0.70277 (* 1 = 0.70277 loss)\nI0817 21:46:35.722029 17619 solver.cpp:228] Iteration 9200, loss = 0.00383642\nI0817 21:46:35.722064 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:46:35.722079 17619 solver.cpp:244]     Train net output #1: loss = 0.00383647 (* 1 = 0.00383647 loss)\nI0817 21:46:35.808096 17619 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0817 21:48:53.629923 17619 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:50:14.703985 17619 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0817 21:50:14.704241 17619 solver.cpp:404]     Test net output #1: loss = 0.477105 (* 1 = 0.477105 loss)\nI0817 21:50:16.021196 17619 solver.cpp:228] Iteration 9300, loss = 0.000464644\nI0817 21:50:16.021231 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:50:16.021247 17619 solver.cpp:244]     Train net output #1: loss = 0.000464695 (* 1 = 0.000464695 loss)\nI0817 21:50:16.105245 17619 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0817 21:52:33.992765 17619 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 21:53:55.051138 17619 solver.cpp:404]     Test net output #0: accuracy = 0.89368\nI0817 21:53:55.051400 17619 solver.cpp:404]     Test net output #1: loss = 0.432081 (* 1 = 0.432081 loss)\nI0817 21:53:56.368093 17619 solver.cpp:228] Iteration 9400, loss = 0.000389945\nI0817 21:53:56.368129 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:53:56.368144 17619 solver.cpp:244]     Train net output #1: loss = 0.000389996 (* 1 = 0.000389996 loss)\nI0817 21:53:56.458268 17619 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0817 21:56:14.288580 17619 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 21:57:35.349356 17619 solver.cpp:404]     Test net output #0: accuracy = 0.8936\nI0817 21:57:35.349625 17619 solver.cpp:404]     Test net output #1: loss = 0.412583 (* 1 = 0.412583 loss)\nI0817 21:57:36.665678 17619 solver.cpp:228] Iteration 9500, loss = 0.000396698\nI0817 21:57:36.665714 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:57:36.665729 17619 solver.cpp:244]     Train net output #1: loss = 0.000396749 (* 1 = 0.000396749 loss)\nI0817 21:57:36.751893 17619 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0817 21:59:54.663967 17619 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:01:15.716856 17619 solver.cpp:404]     Test net output #0: accuracy = 0.89504\nI0817 22:01:15.717109 17619 solver.cpp:404]     Test net output #1: loss = 0.408219 (* 1 = 0.408219 loss)\nI0817 22:01:17.033018 17619 solver.cpp:228] Iteration 9600, loss = 0.000447035\nI0817 22:01:17.033053 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:01:17.033068 17619 solver.cpp:244]     Train net output #1: loss = 0.000447087 (* 1 = 0.000447087 loss)\nI0817 22:01:17.122534 17619 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0817 22:03:35.086804 17619 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:04:56.142693 17619 solver.cpp:404]     Test net output #0: accuracy = 0.8962\nI0817 22:04:56.142956 17619 solver.cpp:404]     Test net output #1: loss = 0.395007 (* 1 = 0.395007 loss)\nI0817 22:04:57.459700 17619 solver.cpp:228] Iteration 9700, loss = 0.00044678\nI0817 22:04:57.459738 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:04:57.459753 17619 solver.cpp:244]     Train net output #1: loss = 0.000446831 (* 1 = 0.000446831 loss)\nI0817 22:04:57.547541 17619 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0817 22:07:15.495504 17619 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:08:36.551023 17619 solver.cpp:404]     Test net output #0: accuracy = 0.89648\nI0817 22:08:36.551287 17619 solver.cpp:404]     Test net output #1: loss = 0.396002 (* 1 = 0.396002 loss)\nI0817 22:08:37.867720 17619 solver.cpp:228] Iteration 9800, loss = 0.000467907\nI0817 22:08:37.867756 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:08:37.867775 17619 solver.cpp:244]     Train net output #1: loss = 0.000467958 (* 1 = 0.000467958 loss)\nI0817 22:08:37.955703 17619 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0817 22:10:55.823710 17619 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:12:16.879784 17619 solver.cpp:404]     Test net output #0: accuracy = 0.89568\nI0817 22:12:16.880036 17619 solver.cpp:404]     Test net output #1: loss = 0.390431 (* 1 = 0.390431 loss)\nI0817 22:12:18.196522 17619 solver.cpp:228] Iteration 9900, loss = 0.000363203\nI0817 22:12:18.196558 17619 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:12:18.196573 17619 solver.cpp:244]     Train net output #1: loss = 0.000363255 (* 1 = 0.000363255 loss)\nI0817 22:12:18.282316 17619 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0817 22:14:36.199548 17619 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kTr30kTab1_iter_10000.caffemodel\nI0817 22:14:36.416821 17619 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kTr30kTab1_iter_10000.solverstate\nI0817 22:14:36.857905 17619 solver.cpp:317] Iteration 10000, loss = 0.000412098\nI0817 22:14:36.857945 17619 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:15:57.911000 17619 solver.cpp:404]     Test net output #0: accuracy = 0.89644\nI0817 22:15:57.911267 17619 solver.cpp:404]     Test net output #1: loss = 0.393001 (* 1 = 0.393001 loss)\nI0817 22:15:57.911280 17619 solver.cpp:322] Optimization Done.\nI0817 22:16:03.243692 17619 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kTr40kTab1",
    "content": "I0817 16:05:11.654896 17621 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:05:11.657191 17621 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:05:11.658409 17621 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:05:11.659617 17621 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:05:11.660825 17621 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:05:11.662051 17621 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:05:11.663272 17621 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:05:11.664494 17621 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:05:11.665724 17621 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:05:12.082221 17621 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kTr40kTab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0817 16:05:12.086784 17621 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:05:12.102996 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:12.103070 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:12.104120 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:05:12.104178 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:05:12.104200 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:05:12.104220 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:05:12.104239 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:05:12.104255 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:05:12.104274 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:05:12.104292 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:05:12.104312 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:05:12.104331 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:05:12.104351 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:05:12.104365 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:05:12.104384 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:05:12.104403 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:05:12.104423 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:05:12.104440 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:05:12.104459 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:05:12.104476 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:05:12.104496 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:05:12.104514 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:05:12.104548 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:05:12.104568 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:05:12.104594 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:05:12.104614 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:05:12.104631 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:05:12.104646 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:05:12.104665 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:05:12.104681 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:05:12.104710 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:05:12.104728 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:05:12.104748 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:05:12.104765 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:05:12.104784 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:05:12.104799 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:05:12.104818 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:05:12.104836 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:05:12.104856 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:05:12.104873 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:05:12.104892 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:05:12.104908 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:05:12.104933 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:05:12.104950 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:05:12.104969 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:05:12.104987 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:05:12.105006 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:05:12.105024 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:05:12.105043 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:05:12.105060 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:05:12.105079 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:05:12.105095 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:05:12.105113 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:05:12.105140 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:05:12.105161 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:05:12.105180 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:05:12.105197 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:05:12.105213 17621 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:05:12.106993 17621 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train40k_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b\nI0817 16:05:12.109069 17621 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:05:12.110252 17621 net.cpp:100] Creating Layer dataLayer\nI0817 16:05:12.110328 17621 net.cpp:408] dataLayer -> data_top\nI0817 16:05:12.110519 17621 net.cpp:408] dataLayer -> label\nI0817 16:05:12.110635 17621 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:05:12.137941 17627 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train40k_lmdb\nI0817 16:05:12.139029 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:12.145910 17621 net.cpp:150] Setting up dataLayer\nI0817 16:05:12.145982 17621 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:05:12.145999 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:05:12.146005 17621 net.cpp:165] Memory required for data: 1536500\nI0817 16:05:12.146020 17621 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:05:12.146034 17621 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:05:12.146042 17621 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:05:12.146059 17621 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:05:12.146078 17621 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:05:12.146188 17621 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:05:12.146203 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:05:12.146209 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:05:12.146214 17621 net.cpp:165] Memory required for data: 1537500\nI0817 16:05:12.146219 17621 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:05:12.146282 17621 net.cpp:100] Creating Layer pre_conv\nI0817 16:05:12.146296 17621 net.cpp:434] pre_conv <- data_top\nI0817 16:05:12.146308 17621 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:05:12.148183 17621 net.cpp:150] Setting up pre_conv\nI0817 16:05:12.148203 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.148210 17621 net.cpp:165] Memory required for data: 9729500\nI0817 16:05:12.148267 17621 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:05:12.148331 17621 net.cpp:100] Creating Layer pre_bn\nI0817 16:05:12.148344 17621 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:05:12.148353 17621 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:05:12.148878 17628 blocking_queue.cpp:50] Waiting for data\nI0817 16:05:12.148950 17621 net.cpp:150] Setting up pre_bn\nI0817 16:05:12.148967 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.148972 17621 net.cpp:165] Memory required for data: 17921500\nI0817 16:05:12.148990 17621 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:05:12.149045 17621 net.cpp:100] Creating Layer pre_scale\nI0817 16:05:12.149058 17621 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:05:12.149067 17621 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:05:12.149230 17621 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:05:12.149483 17621 net.cpp:150] Setting up pre_scale\nI0817 16:05:12.149498 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.149504 17621 net.cpp:165] Memory required for data: 26113500\nI0817 16:05:12.149515 17621 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:05:12.149556 17621 net.cpp:100] Creating Layer pre_relu\nI0817 16:05:12.149565 17621 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:05:12.149576 17621 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:05:12.149588 17621 net.cpp:150] Setting up pre_relu\nI0817 16:05:12.149595 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.149600 17621 net.cpp:165] Memory required for data: 34305500\nI0817 16:05:12.149605 17621 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:05:12.149616 17621 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:05:12.149621 17621 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:05:12.149628 17621 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:05:12.149637 17621 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:05:12.149693 17621 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:05:12.149706 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.149714 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.149719 17621 net.cpp:165] Memory required for data: 50689500\nI0817 16:05:12.149725 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:05:12.149742 17621 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:05:12.149749 17621 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:05:12.149758 17621 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:05:12.150066 17621 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:05:12.150081 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.150086 17621 net.cpp:165] Memory required for data: 58881500\nI0817 16:05:12.150099 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:05:12.150113 17621 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:05:12.150120 17621 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:05:12.150133 17621 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:05:12.150367 17621 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:05:12.150382 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.150387 17621 net.cpp:165] Memory required for data: 67073500\nI0817 16:05:12.150398 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:05:12.150406 17621 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:05:12.150411 17621 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:05:12.150419 17621 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.150473 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:05:12.150609 17621 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:05:12.150621 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.150626 17621 net.cpp:165] Memory required for data: 75265500\nI0817 16:05:12.150635 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:05:12.150651 17621 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:05:12.150657 17621 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:05:12.150668 17621 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.150677 17621 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:05:12.150691 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.150696 17621 net.cpp:165] Memory required for data: 83457500\nI0817 16:05:12.150701 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:05:12.150714 17621 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:05:12.150720 17621 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:05:12.150728 17621 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:05:12.151038 17621 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:05:12.151052 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.151057 17621 net.cpp:165] Memory required for data: 91649500\nI0817 16:05:12.151067 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:05:12.151079 17621 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:05:12.151087 17621 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:05:12.151094 17621 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:05:12.151327 17621 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:05:12.151343 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.151348 17621 net.cpp:165] Memory required for data: 99841500\nI0817 16:05:12.151363 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:05:12.151372 17621 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:05:12.151377 17621 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:05:12.151386 17621 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:05:12.151445 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:05:12.151579 17621 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:05:12.151592 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.151597 17621 net.cpp:165] Memory required for data: 108033500\nI0817 16:05:12.151607 17621 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:05:12.151659 17621 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:05:12.151671 17621 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:05:12.151679 17621 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:05:12.151695 17621 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:05:12.151767 17621 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:05:12.151782 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.151787 17621 net.cpp:165] Memory required for data: 116225500\nI0817 16:05:12.151793 17621 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:05:12.151803 17621 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:05:12.151808 17621 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:05:12.151814 17621 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:05:12.151824 17621 net.cpp:150] Setting up L1_b1_relu\nI0817 16:05:12.151831 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.151835 17621 net.cpp:165] Memory required for data: 124417500\nI0817 16:05:12.151840 17621 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:05:12.151849 17621 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:05:12.151854 17621 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:05:12.151865 17621 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:05:12.151875 17621 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:05:12.151916 17621 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:05:12.151931 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.151938 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.151949 17621 net.cpp:165] Memory required for data: 140801500\nI0817 16:05:12.151955 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:05:12.151967 17621 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:05:12.151973 17621 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:05:12.151981 17621 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:05:12.152289 17621 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:05:12.152303 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.152308 17621 net.cpp:165] Memory required for data: 148993500\nI0817 16:05:12.152318 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:05:12.152330 17621 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:05:12.152336 17621 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:05:12.152344 17621 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:05:12.152581 17621 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:05:12.152595 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.152600 17621 net.cpp:165] Memory required for data: 157185500\nI0817 16:05:12.152611 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:05:12.152621 17621 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:05:12.152627 17621 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:05:12.152636 17621 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.152693 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:05:12.152832 17621 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:05:12.152844 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.152850 17621 net.cpp:165] Memory required for data: 165377500\nI0817 16:05:12.152858 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:05:12.152866 17621 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:05:12.152873 17621 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:05:12.152882 17621 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.152892 17621 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:05:12.152899 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.152904 17621 net.cpp:165] Memory required for data: 173569500\nI0817 16:05:12.152909 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:05:12.152922 17621 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:05:12.152928 17621 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:05:12.152936 17621 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:05:12.153244 17621 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:05:12.153259 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.153264 17621 net.cpp:165] Memory required for data: 181761500\nI0817 16:05:12.153272 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:05:12.153281 17621 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:05:12.153290 17621 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:05:12.153300 17621 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:05:12.153532 17621 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:05:12.153544 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.153549 17621 net.cpp:165] Memory required for data: 189953500\nI0817 16:05:12.153568 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:05:12.153578 17621 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:05:12.153583 17621 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:05:12.153592 17621 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:05:12.153645 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:05:12.153790 17621 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:05:12.153803 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.153810 17621 net.cpp:165] Memory required for data: 198145500\nI0817 16:05:12.153818 17621 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:05:12.153837 17621 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:05:12.153844 17621 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:05:12.153851 17621 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:05:12.153858 17621 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:05:12.153892 17621 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:05:12.153904 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.153908 17621 net.cpp:165] Memory required for data: 206337500\nI0817 16:05:12.153914 17621 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:05:12.153921 17621 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:05:12.153928 17621 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:05:12.153937 17621 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:05:12.153946 17621 net.cpp:150] Setting up L1_b2_relu\nI0817 16:05:12.153954 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.153959 17621 net.cpp:165] Memory required for data: 214529500\nI0817 16:05:12.153964 17621 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:05:12.153970 17621 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:05:12.153975 17621 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:05:12.153982 17621 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:05:12.153991 17621 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:05:12.154034 17621 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:05:12.154047 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.154053 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.154057 17621 net.cpp:165] Memory required for data: 230913500\nI0817 16:05:12.154062 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:05:12.154073 17621 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:05:12.154079 17621 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:05:12.154090 17621 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:05:12.154397 17621 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:05:12.154410 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.154415 17621 net.cpp:165] Memory required for data: 239105500\nI0817 16:05:12.154424 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:05:12.154433 17621 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:05:12.154439 17621 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:05:12.154450 17621 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:05:12.154695 17621 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:05:12.154711 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.154716 17621 net.cpp:165] Memory required for data: 247297500\nI0817 16:05:12.154727 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:05:12.154736 17621 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:05:12.154742 17621 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:05:12.154749 17621 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.154800 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:05:12.154939 17621 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:05:12.154953 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.154958 17621 net.cpp:165] Memory required for data: 255489500\nI0817 16:05:12.154966 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:05:12.154978 17621 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:05:12.154983 17621 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:05:12.154990 17621 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.154999 17621 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:05:12.155014 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.155019 17621 net.cpp:165] Memory required for data: 263681500\nI0817 16:05:12.155023 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:05:12.155036 17621 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:05:12.155042 17621 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:05:12.155052 17621 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:05:12.155356 17621 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:05:12.155370 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.155375 17621 net.cpp:165] Memory required for data: 271873500\nI0817 16:05:12.155385 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:05:12.155400 17621 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:05:12.155405 17621 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:05:12.155414 17621 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:05:12.155647 17621 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:05:12.155663 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.155668 17621 net.cpp:165] Memory required for data: 280065500\nI0817 16:05:12.155678 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:05:12.155692 17621 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:05:12.155699 17621 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:05:12.155706 17621 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:05:12.155758 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:05:12.155896 17621 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:05:12.155910 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.155915 17621 net.cpp:165] Memory required for data: 288257500\nI0817 16:05:12.155925 17621 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:05:12.155936 17621 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:05:12.155941 17621 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:05:12.155948 17621 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:05:12.155958 17621 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:05:12.155989 17621 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:05:12.155998 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.156002 17621 net.cpp:165] Memory required for data: 296449500\nI0817 16:05:12.156008 17621 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:05:12.156018 17621 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:05:12.156024 17621 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:05:12.156031 17621 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:05:12.156040 17621 net.cpp:150] Setting up L1_b3_relu\nI0817 16:05:12.156047 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.156052 17621 net.cpp:165] Memory required for data: 304641500\nI0817 16:05:12.156056 17621 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:05:12.156064 17621 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:05:12.156069 17621 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:05:12.156076 17621 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:05:12.156085 17621 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:05:12.156131 17621 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:05:12.156142 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.156149 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.156153 17621 net.cpp:165] Memory required for data: 321025500\nI0817 16:05:12.156158 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:05:12.156172 17621 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:05:12.156178 17621 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:05:12.156193 17621 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:05:12.156499 17621 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:05:12.156513 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.156518 17621 net.cpp:165] Memory required for data: 329217500\nI0817 16:05:12.156527 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:05:12.156541 17621 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:05:12.156548 17621 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:05:12.156556 17621 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:05:12.156803 17621 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:05:12.156817 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.156822 17621 net.cpp:165] Memory required for data: 337409500\nI0817 16:05:12.156832 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:05:12.156841 17621 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:05:12.156847 17621 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:05:12.156854 17621 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.156909 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:05:12.157050 17621 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:05:12.157063 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.157068 17621 net.cpp:165] Memory required for data: 345601500\nI0817 16:05:12.157078 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:05:12.157084 17621 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:05:12.157090 17621 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:05:12.157100 17621 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.157110 17621 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:05:12.157117 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.157122 17621 net.cpp:165] Memory required for data: 353793500\nI0817 16:05:12.157126 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:05:12.157137 17621 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:05:12.157142 17621 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:05:12.157153 17621 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:05:12.157462 17621 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:05:12.157476 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.157481 17621 net.cpp:165] Memory required for data: 361985500\nI0817 16:05:12.157490 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:05:12.157500 17621 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:05:12.157505 17621 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:05:12.157516 17621 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:05:12.157765 17621 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:05:12.157779 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.157784 17621 net.cpp:165] Memory required for data: 370177500\nI0817 16:05:12.157794 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:05:12.157804 17621 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:05:12.157809 17621 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:05:12.157816 17621 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:05:12.157871 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:05:12.158012 17621 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:05:12.158025 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.158030 17621 net.cpp:165] Memory required for data: 378369500\nI0817 16:05:12.158040 17621 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:05:12.158048 17621 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:05:12.158054 17621 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:05:12.158061 17621 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:05:12.158072 17621 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:05:12.158110 17621 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:05:12.158120 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.158125 17621 net.cpp:165] Memory required for data: 386561500\nI0817 16:05:12.158130 17621 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:05:12.158141 17621 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:05:12.158146 17621 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:05:12.158154 17621 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:05:12.158162 17621 net.cpp:150] Setting up L1_b4_relu\nI0817 16:05:12.158169 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.158174 17621 net.cpp:165] Memory required for data: 394753500\nI0817 16:05:12.158179 17621 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:05:12.158186 17621 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:05:12.158191 17621 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:05:12.158200 17621 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:05:12.158208 17621 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:05:12.158252 17621 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:05:12.158264 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.158270 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.158275 17621 net.cpp:165] Memory required for data: 411137500\nI0817 16:05:12.158280 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:05:12.158294 17621 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:05:12.158300 17621 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:05:12.158309 17621 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:05:12.158617 17621 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:05:12.158632 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.158637 17621 net.cpp:165] Memory required for data: 419329500\nI0817 16:05:12.158658 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:05:12.158670 17621 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:05:12.158676 17621 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:05:12.158694 17621 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:05:12.158937 17621 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:05:12.158951 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.158957 17621 net.cpp:165] Memory required for data: 427521500\nI0817 16:05:12.158967 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:05:12.158975 17621 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:05:12.158982 17621 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:05:12.158989 17621 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.159044 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:05:12.159183 17621 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:05:12.159196 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.159201 17621 net.cpp:165] Memory required for data: 435713500\nI0817 16:05:12.159210 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:05:12.159217 17621 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:05:12.159224 17621 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:05:12.159234 17621 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.159243 17621 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:05:12.159250 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.159255 17621 net.cpp:165] Memory required for data: 443905500\nI0817 16:05:12.159260 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:05:12.159273 17621 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:05:12.159279 17621 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:05:12.159294 17621 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:05:12.159605 17621 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:05:12.159617 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.159622 17621 net.cpp:165] Memory required for data: 452097500\nI0817 16:05:12.159631 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:05:12.159641 17621 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:05:12.159647 17621 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:05:12.159660 17621 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:05:12.159914 17621 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:05:12.159930 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.159935 17621 net.cpp:165] Memory required for data: 460289500\nI0817 16:05:12.159946 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:05:12.159955 17621 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:05:12.159960 17621 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:05:12.159967 17621 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:05:12.160019 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:05:12.160162 17621 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:05:12.160174 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.160181 17621 net.cpp:165] Memory required for data: 468481500\nI0817 16:05:12.160189 17621 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:05:12.160200 17621 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:05:12.160207 17621 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:05:12.160213 17621 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:05:12.160223 17621 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:05:12.160254 17621 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:05:12.160262 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.160267 17621 net.cpp:165] Memory required for data: 476673500\nI0817 16:05:12.160272 17621 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:05:12.160282 17621 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:05:12.160289 17621 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:05:12.160295 17621 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:05:12.160305 17621 net.cpp:150] Setting up L1_b5_relu\nI0817 16:05:12.160311 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.160315 17621 net.cpp:165] Memory required for data: 484865500\nI0817 16:05:12.160320 17621 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:05:12.160327 17621 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:05:12.160332 17621 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:05:12.160339 17621 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:05:12.160348 17621 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:05:12.160393 17621 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:05:12.160404 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.160411 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.160415 17621 net.cpp:165] Memory required for data: 501249500\nI0817 16:05:12.160421 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:05:12.160434 17621 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:05:12.160440 17621 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:05:12.160449 17621 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:05:12.160763 17621 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:05:12.160778 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.160784 17621 net.cpp:165] Memory required for data: 509441500\nI0817 16:05:12.160799 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:05:12.160811 17621 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:05:12.160817 17621 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:05:12.160825 17621 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:05:12.161064 17621 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:05:12.161080 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.161085 17621 net.cpp:165] Memory required for data: 517633500\nI0817 16:05:12.161095 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:05:12.161104 17621 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:05:12.161110 17621 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:05:12.161118 17621 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.161170 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:05:12.161315 17621 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:05:12.161329 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.161334 17621 net.cpp:165] Memory required for data: 525825500\nI0817 16:05:12.161342 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:05:12.161350 17621 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:05:12.161356 17621 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:05:12.161366 17621 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.161375 17621 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:05:12.161382 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.161387 17621 net.cpp:165] Memory required for data: 534017500\nI0817 16:05:12.161392 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:05:12.161403 17621 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:05:12.161408 17621 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:05:12.161419 17621 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:05:12.161746 17621 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:05:12.161759 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.161765 17621 net.cpp:165] Memory required for data: 542209500\nI0817 16:05:12.161774 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:05:12.161783 17621 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:05:12.161789 17621 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:05:12.161800 17621 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:05:12.162037 17621 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:05:12.162050 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.162055 17621 net.cpp:165] Memory required for data: 550401500\nI0817 16:05:12.162065 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:05:12.162077 17621 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:05:12.162083 17621 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:05:12.162091 17621 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:05:12.162143 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:05:12.162284 17621 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:05:12.162297 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.162302 17621 net.cpp:165] Memory required for data: 558593500\nI0817 16:05:12.162310 17621 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:05:12.162329 17621 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:05:12.162336 17621 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:05:12.162343 17621 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:05:12.162350 17621 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:05:12.162385 17621 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:05:12.162397 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.162401 17621 net.cpp:165] Memory required for data: 566785500\nI0817 16:05:12.162407 17621 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:05:12.162423 17621 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:05:12.162430 17621 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:05:12.162436 17621 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:05:12.162446 17621 net.cpp:150] Setting up L1_b6_relu\nI0817 16:05:12.162452 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.162457 17621 net.cpp:165] Memory required for data: 574977500\nI0817 16:05:12.162461 17621 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:05:12.162468 17621 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:05:12.162473 17621 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:05:12.162483 17621 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:05:12.162493 17621 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:05:12.162536 17621 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:05:12.162547 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.162554 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.162559 17621 net.cpp:165] Memory required for data: 591361500\nI0817 16:05:12.162564 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:05:12.162577 17621 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:05:12.162583 17621 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:05:12.162592 17621 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:05:12.162914 17621 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:05:12.162928 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.162933 17621 net.cpp:165] Memory required for data: 599553500\nI0817 16:05:12.162942 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:05:12.162955 17621 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:05:12.162961 17621 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:05:12.162971 17621 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:05:12.163213 17621 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:05:12.163228 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.163233 17621 net.cpp:165] Memory required for data: 607745500\nI0817 16:05:12.163242 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:05:12.163251 17621 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:05:12.163257 17621 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:05:12.163267 17621 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.163321 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:05:12.163460 17621 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:05:12.163472 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.163477 17621 net.cpp:165] Memory required for data: 615937500\nI0817 16:05:12.163486 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:05:12.163497 17621 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:05:12.163503 17621 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:05:12.163511 17621 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.163520 17621 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:05:12.163527 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.163532 17621 net.cpp:165] Memory required for data: 624129500\nI0817 16:05:12.163537 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:05:12.163549 17621 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:05:12.163555 17621 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:05:12.163568 17621 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:05:12.163890 17621 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:05:12.163904 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.163909 17621 net.cpp:165] Memory required for data: 632321500\nI0817 16:05:12.163925 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:05:12.163938 17621 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:05:12.163944 17621 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:05:12.163955 17621 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:05:12.164194 17621 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:05:12.164207 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.164212 17621 net.cpp:165] Memory required for data: 640513500\nI0817 16:05:12.164223 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:05:12.164232 17621 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:05:12.164237 17621 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:05:12.164244 17621 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:05:12.164300 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:05:12.164438 17621 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:05:12.164451 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.164456 17621 net.cpp:165] Memory required for data: 648705500\nI0817 16:05:12.164465 17621 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:05:12.164479 17621 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:05:12.164485 17621 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:05:12.164492 17621 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:05:12.164500 17621 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:05:12.164535 17621 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:05:12.164546 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.164551 17621 net.cpp:165] Memory required for data: 656897500\nI0817 16:05:12.164556 17621 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:05:12.164564 17621 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:05:12.164569 17621 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:05:12.164579 17621 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:05:12.164588 17621 net.cpp:150] Setting up L1_b7_relu\nI0817 16:05:12.164595 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.164600 17621 net.cpp:165] Memory required for data: 665089500\nI0817 16:05:12.164605 17621 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:05:12.164613 17621 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:05:12.164618 17621 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:05:12.164628 17621 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:05:12.164638 17621 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:05:12.164680 17621 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:05:12.164697 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.164705 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.164710 17621 net.cpp:165] Memory required for data: 681473500\nI0817 16:05:12.164714 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:05:12.164728 17621 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:05:12.164736 17621 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:05:12.164744 17621 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:05:12.165062 17621 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:05:12.165076 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.165081 17621 net.cpp:165] Memory required for data: 689665500\nI0817 16:05:12.165091 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:05:12.165103 17621 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:05:12.165110 17621 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:05:12.165120 17621 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:05:12.165370 17621 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:05:12.165385 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.165390 17621 net.cpp:165] Memory required for data: 697857500\nI0817 16:05:12.165400 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:05:12.165408 17621 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:05:12.165416 17621 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:05:12.165422 17621 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.165477 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:05:12.165617 17621 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:05:12.165630 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.165635 17621 net.cpp:165] Memory required for data: 706049500\nI0817 16:05:12.165644 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:05:12.165657 17621 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:05:12.165663 17621 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:05:12.165669 17621 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.165679 17621 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:05:12.165693 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.165697 17621 net.cpp:165] Memory required for data: 714241500\nI0817 16:05:12.165702 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:05:12.165716 17621 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:05:12.165722 17621 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:05:12.165733 17621 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:05:12.166076 17621 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:05:12.166097 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.166103 17621 net.cpp:165] Memory required for data: 722433500\nI0817 16:05:12.166112 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:05:12.166127 17621 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:05:12.166133 17621 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:05:12.166142 17621 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:05:12.166388 17621 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:05:12.166401 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.166406 17621 net.cpp:165] Memory required for data: 730625500\nI0817 16:05:12.166416 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:05:12.166425 17621 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:05:12.166431 17621 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:05:12.166438 17621 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:05:12.166492 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:05:12.166635 17621 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:05:12.166647 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.166652 17621 net.cpp:165] Memory required for data: 738817500\nI0817 16:05:12.166661 17621 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:05:12.166671 17621 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:05:12.166677 17621 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:05:12.166692 17621 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:05:12.166702 17621 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:05:12.166734 17621 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:05:12.166746 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.166751 17621 net.cpp:165] Memory required for data: 747009500\nI0817 16:05:12.166757 17621 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:05:12.166769 17621 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:05:12.166774 17621 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:05:12.166781 17621 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:05:12.166790 17621 net.cpp:150] Setting up L1_b8_relu\nI0817 16:05:12.166797 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.166808 17621 net.cpp:165] Memory required for data: 755201500\nI0817 16:05:12.166813 17621 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:05:12.166821 17621 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:05:12.166826 17621 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:05:12.166833 17621 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:05:12.166843 17621 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:05:12.166888 17621 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:05:12.166900 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.166906 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.166911 17621 net.cpp:165] Memory required for data: 771585500\nI0817 16:05:12.166916 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:05:12.166929 17621 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:05:12.166936 17621 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:05:12.166945 17621 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:05:12.167266 17621 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:05:12.167281 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.167286 17621 net.cpp:165] Memory required for data: 779777500\nI0817 16:05:12.167295 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:05:12.167306 17621 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:05:12.167313 17621 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:05:12.167321 17621 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:05:12.167567 17621 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:05:12.167579 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.167584 17621 net.cpp:165] Memory required for data: 787969500\nI0817 16:05:12.167594 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:05:12.167606 17621 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:05:12.167613 17621 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:05:12.167620 17621 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.167675 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:05:12.167827 17621 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:05:12.167840 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.167845 17621 net.cpp:165] Memory required for data: 796161500\nI0817 16:05:12.167855 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:05:12.167862 17621 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:05:12.167868 17621 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:05:12.167878 17621 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.167888 17621 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:05:12.167896 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.167899 17621 net.cpp:165] Memory required for data: 804353500\nI0817 16:05:12.167904 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:05:12.167918 17621 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:05:12.167924 17621 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:05:12.167932 17621 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:05:12.168251 17621 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:05:12.168264 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.168269 17621 net.cpp:165] Memory required for data: 812545500\nI0817 16:05:12.168278 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:05:12.168290 17621 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:05:12.168296 17621 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:05:12.168304 17621 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:05:12.168553 17621 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:05:12.168567 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.168572 17621 net.cpp:165] Memory required for data: 820737500\nI0817 16:05:12.168603 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:05:12.168615 17621 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:05:12.168622 17621 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:05:12.168629 17621 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:05:12.168690 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:05:12.168836 17621 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:05:12.168849 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.168855 17621 net.cpp:165] Memory required for data: 828929500\nI0817 16:05:12.168864 17621 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:05:12.168874 17621 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:05:12.168879 17621 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:05:12.168886 17621 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:05:12.168893 17621 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:05:12.168928 17621 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:05:12.168938 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.168942 17621 net.cpp:165] Memory required for data: 837121500\nI0817 16:05:12.168947 17621 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:05:12.168956 17621 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:05:12.168962 17621 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:05:12.168970 17621 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:05:12.168979 17621 net.cpp:150] Setting up L1_b9_relu\nI0817 16:05:12.168987 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.168990 17621 net.cpp:165] Memory required for data: 845313500\nI0817 16:05:12.168995 17621 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:05:12.169003 17621 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:05:12.169008 17621 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:05:12.169019 17621 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:05:12.169028 17621 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:05:12.169071 17621 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:05:12.169082 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.169090 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.169095 17621 net.cpp:165] Memory required for data: 861697500\nI0817 16:05:12.169100 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:05:12.169113 17621 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:05:12.169119 17621 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:05:12.169128 17621 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:05:12.169451 17621 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:05:12.169466 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.169471 17621 net.cpp:165] Memory required for data: 863745500\nI0817 16:05:12.169479 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:05:12.169492 17621 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:05:12.169497 17621 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:05:12.169505 17621 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:05:12.169746 17621 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:05:12.169759 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.169764 17621 net.cpp:165] Memory required for data: 865793500\nI0817 16:05:12.169775 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:05:12.169785 17621 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:05:12.169797 17621 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:05:12.169809 17621 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.169863 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:05:12.170006 17621 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:05:12.170017 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.170022 17621 net.cpp:165] Memory required for data: 867841500\nI0817 16:05:12.170032 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:05:12.170039 17621 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:05:12.170045 17621 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:05:12.170053 17621 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.170061 17621 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:05:12.170068 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.170073 17621 net.cpp:165] Memory required for data: 869889500\nI0817 16:05:12.170078 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:05:12.170092 17621 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:05:12.170099 17621 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:05:12.170109 17621 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:05:12.170428 17621 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:05:12.170441 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.170446 17621 net.cpp:165] Memory required for data: 871937500\nI0817 16:05:12.170455 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:05:12.170469 17621 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:05:12.170476 17621 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:05:12.170488 17621 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:05:12.170735 17621 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:05:12.170749 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.170754 17621 net.cpp:165] Memory required for data: 873985500\nI0817 16:05:12.170765 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:05:12.170773 17621 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:05:12.170780 17621 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:05:12.170788 17621 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:05:12.170845 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:05:12.170985 17621 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:05:12.170999 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.171003 17621 net.cpp:165] Memory required for data: 876033500\nI0817 16:05:12.171012 17621 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:05:12.171026 17621 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:05:12.171033 17621 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:05:12.171041 17621 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:05:12.171129 17621 net.cpp:150] Setting up L2_b1_pool\nI0817 16:05:12.171144 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.171149 17621 net.cpp:165] Memory required for data: 878081500\nI0817 16:05:12.171155 17621 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:05:12.171164 17621 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:05:12.171170 17621 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:05:12.171177 17621 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:05:12.171188 17621 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:05:12.171221 17621 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:05:12.171231 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.171234 17621 net.cpp:165] Memory required for data: 880129500\nI0817 16:05:12.171241 17621 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:05:12.171252 17621 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:05:12.171257 17621 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:05:12.171264 17621 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:05:12.171280 17621 net.cpp:150] Setting up L2_b1_relu\nI0817 16:05:12.171288 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.171293 17621 net.cpp:165] Memory required for data: 882177500\nI0817 16:05:12.171298 17621 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:05:12.171344 17621 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:05:12.171357 17621 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:05:12.173701 17621 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:05:12.173722 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.173727 17621 net.cpp:165] Memory required for data: 884225500\nI0817 16:05:12.173734 17621 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:05:12.173744 17621 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:05:12.173750 17621 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:05:12.173758 17621 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:05:12.173768 17621 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:05:12.173843 17621 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:05:12.173859 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.173864 17621 net.cpp:165] Memory required for data: 888321500\nI0817 16:05:12.173871 17621 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:05:12.173882 17621 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:05:12.173887 17621 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:05:12.173897 17621 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:05:12.173909 17621 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:05:12.173957 17621 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:05:12.173969 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.173976 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.173980 17621 net.cpp:165] Memory required for data: 896513500\nI0817 16:05:12.173985 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:05:12.174000 17621 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:05:12.174006 17621 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:05:12.174018 17621 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:05:12.175532 17621 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:05:12.175551 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.175556 17621 net.cpp:165] Memory required for data: 900609500\nI0817 16:05:12.175566 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:05:12.175580 17621 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:05:12.175587 17621 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:05:12.175595 17621 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:05:12.175853 17621 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:05:12.175868 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.175873 17621 net.cpp:165] Memory required for data: 904705500\nI0817 16:05:12.175884 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:05:12.175894 17621 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:05:12.175900 17621 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:05:12.175910 17621 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.175967 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:05:12.176116 17621 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:05:12.176129 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.176134 17621 net.cpp:165] Memory required for data: 908801500\nI0817 16:05:12.176143 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:05:12.176151 17621 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:05:12.176157 17621 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:05:12.176164 17621 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.176185 17621 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:05:12.176193 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.176198 17621 net.cpp:165] Memory required for data: 912897500\nI0817 16:05:12.176203 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:05:12.176215 17621 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:05:12.176223 17621 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:05:12.176232 17621 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:05:12.176693 17621 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:05:12.176707 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.176713 17621 net.cpp:165] Memory required for data: 916993500\nI0817 16:05:12.176723 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:05:12.176738 17621 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:05:12.176743 17621 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:05:12.176754 17621 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:05:12.176997 17621 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:05:12.177011 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.177016 17621 net.cpp:165] Memory required for data: 921089500\nI0817 16:05:12.177026 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:05:12.177036 17621 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:05:12.177042 17621 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:05:12.177049 17621 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:05:12.177106 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:05:12.177249 17621 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:05:12.177264 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.177270 17621 net.cpp:165] Memory required for data: 925185500\nI0817 16:05:12.177279 17621 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:05:12.177289 17621 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:05:12.177295 17621 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:05:12.177302 17621 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:05:12.177309 17621 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:05:12.177340 17621 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:05:12.177348 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.177353 17621 net.cpp:165] Memory required for data: 929281500\nI0817 16:05:12.177358 17621 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:05:12.177366 17621 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:05:12.177372 17621 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:05:12.177382 17621 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:05:12.177392 17621 net.cpp:150] Setting up L2_b2_relu\nI0817 16:05:12.177398 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.177403 17621 net.cpp:165] Memory required for data: 933377500\nI0817 16:05:12.177408 17621 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:05:12.177415 17621 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:05:12.177420 17621 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:05:12.177430 17621 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:05:12.177440 17621 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:05:12.177484 17621 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:05:12.177495 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.177501 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.177506 17621 net.cpp:165] Memory required for data: 941569500\nI0817 16:05:12.177511 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:05:12.177532 17621 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:05:12.177541 17621 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:05:12.177549 17621 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:05:12.178048 17621 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:05:12.178064 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.178069 17621 net.cpp:165] Memory required for data: 945665500\nI0817 16:05:12.178079 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:05:12.178092 17621 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:05:12.178098 17621 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:05:12.178107 17621 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:05:12.178349 17621 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:05:12.178362 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.178367 17621 net.cpp:165] Memory required for data: 949761500\nI0817 16:05:12.178378 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:05:12.178387 17621 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:05:12.178393 17621 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:05:12.178400 17621 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.178457 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:05:12.178601 17621 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:05:12.178617 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.178622 17621 net.cpp:165] Memory required for data: 953857500\nI0817 16:05:12.178630 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:05:12.178638 17621 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:05:12.178644 17621 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:05:12.178652 17621 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.178660 17621 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:05:12.178668 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.178673 17621 net.cpp:165] Memory required for data: 957953500\nI0817 16:05:12.178676 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:05:12.178696 17621 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:05:12.178704 17621 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:05:12.178715 17621 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:05:12.179172 17621 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:05:12.179188 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.179193 17621 net.cpp:165] Memory required for data: 962049500\nI0817 16:05:12.179200 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:05:12.179213 17621 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:05:12.179219 17621 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:05:12.179230 17621 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:05:12.179472 17621 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:05:12.179484 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.179491 17621 net.cpp:165] Memory required for data: 966145500\nI0817 16:05:12.179500 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:05:12.179508 17621 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:05:12.179515 17621 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:05:12.179522 17621 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:05:12.179579 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:05:12.179734 17621 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:05:12.179749 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.179754 17621 net.cpp:165] Memory required for data: 970241500\nI0817 16:05:12.179762 17621 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:05:12.179774 17621 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:05:12.179781 17621 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:05:12.179787 17621 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:05:12.179802 17621 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:05:12.179831 17621 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:05:12.179839 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.179844 17621 net.cpp:165] Memory required for data: 974337500\nI0817 16:05:12.179849 17621 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:05:12.179877 17621 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:05:12.179883 17621 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:05:12.179890 17621 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:05:12.179899 17621 net.cpp:150] Setting up L2_b3_relu\nI0817 16:05:12.179906 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.179911 17621 net.cpp:165] Memory required for data: 978433500\nI0817 16:05:12.179916 17621 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:05:12.179924 17621 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:05:12.179929 17621 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:05:12.179935 17621 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:05:12.179945 17621 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:05:12.179996 17621 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:05:12.180007 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.180014 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.180018 17621 net.cpp:165] Memory required for data: 986625500\nI0817 16:05:12.180023 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:05:12.180038 17621 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:05:12.180044 17621 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:05:12.180053 17621 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:05:12.180517 17621 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:05:12.180531 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.180536 17621 net.cpp:165] Memory required for data: 990721500\nI0817 16:05:12.180546 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:05:12.180557 17621 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:05:12.180563 17621 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:05:12.180572 17621 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:05:12.180824 17621 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:05:12.180840 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.180846 17621 net.cpp:165] Memory required for data: 994817500\nI0817 16:05:12.180856 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:05:12.180866 17621 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:05:12.180871 17621 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:05:12.180878 17621 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.180932 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:05:12.181079 17621 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:05:12.181092 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.181097 17621 net.cpp:165] Memory required for data: 998913500\nI0817 16:05:12.181107 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:05:12.181114 17621 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:05:12.181120 17621 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:05:12.181130 17621 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.181140 17621 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:05:12.181147 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.181152 17621 net.cpp:165] Memory required for data: 1003009500\nI0817 16:05:12.181157 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:05:12.181177 17621 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:05:12.181185 17621 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:05:12.181193 17621 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:05:12.181646 17621 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:05:12.181660 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.181665 17621 net.cpp:165] Memory required for data: 1007105500\nI0817 16:05:12.181674 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:05:12.181695 17621 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:05:12.181704 17621 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:05:12.181711 17621 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:05:12.181960 17621 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:05:12.181973 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.181978 17621 net.cpp:165] Memory required for data: 1011201500\nI0817 16:05:12.181989 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:05:12.182000 17621 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:05:12.182006 17621 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:05:12.182014 17621 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:05:12.182068 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:05:12.182214 17621 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:05:12.182227 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.182232 17621 net.cpp:165] Memory required for data: 1015297500\nI0817 16:05:12.182241 17621 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:05:12.182252 17621 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:05:12.182260 17621 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:05:12.182265 17621 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:05:12.182276 17621 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:05:12.182302 17621 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:05:12.182312 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.182317 17621 net.cpp:165] Memory required for data: 1019393500\nI0817 16:05:12.182322 17621 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:05:12.182329 17621 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:05:12.182334 17621 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:05:12.182344 17621 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:05:12.182354 17621 net.cpp:150] Setting up L2_b4_relu\nI0817 16:05:12.182361 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.182365 17621 net.cpp:165] Memory required for data: 1023489500\nI0817 16:05:12.182370 17621 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:05:12.182377 17621 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:05:12.182382 17621 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:05:12.182389 17621 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:05:12.182399 17621 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:05:12.182446 17621 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:05:12.182457 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.182463 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.182468 17621 net.cpp:165] Memory required for data: 1031681500\nI0817 16:05:12.182473 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:05:12.182484 17621 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:05:12.182490 17621 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:05:12.182502 17621 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:05:12.182968 17621 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:05:12.182989 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.182994 17621 net.cpp:165] Memory required for data: 1035777500\nI0817 16:05:12.183003 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:05:12.183015 17621 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:05:12.183022 17621 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:05:12.183030 17621 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:05:12.183279 17621 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:05:12.183297 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.183302 17621 net.cpp:165] Memory required for data: 1039873500\nI0817 16:05:12.183313 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:05:12.183322 17621 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:05:12.183328 17621 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:05:12.183336 17621 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.183389 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:05:12.183540 17621 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:05:12.183553 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.183558 17621 net.cpp:165] Memory required for data: 1043969500\nI0817 16:05:12.183567 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:05:12.183575 17621 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:05:12.183581 17621 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:05:12.183591 17621 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.183600 17621 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:05:12.183607 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.183612 17621 net.cpp:165] Memory required for data: 1048065500\nI0817 16:05:12.183617 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:05:12.183629 17621 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:05:12.183634 17621 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:05:12.183645 17621 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:05:12.184110 17621 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:05:12.184124 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.184130 17621 net.cpp:165] Memory required for data: 1052161500\nI0817 16:05:12.184139 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:05:12.184149 17621 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:05:12.184154 17621 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:05:12.184165 17621 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:05:12.184411 17621 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:05:12.184423 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.184428 17621 net.cpp:165] Memory required for data: 1056257500\nI0817 16:05:12.184438 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:05:12.184450 17621 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:05:12.184456 17621 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:05:12.184464 17621 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:05:12.184518 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:05:12.184665 17621 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:05:12.184679 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.184689 17621 net.cpp:165] Memory required for data: 1060353500\nI0817 16:05:12.184698 17621 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:05:12.184711 17621 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:05:12.184717 17621 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:05:12.184725 17621 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:05:12.184734 17621 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:05:12.184762 17621 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:05:12.184772 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.184783 17621 net.cpp:165] Memory required for data: 1064449500\nI0817 16:05:12.184789 17621 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:05:12.184797 17621 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:05:12.184803 17621 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:05:12.184814 17621 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:05:12.184824 17621 net.cpp:150] Setting up L2_b5_relu\nI0817 16:05:12.184831 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.184835 17621 net.cpp:165] Memory required for data: 1068545500\nI0817 16:05:12.184840 17621 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:05:12.184847 17621 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:05:12.184854 17621 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:05:12.184860 17621 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:05:12.184870 17621 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:05:12.184917 17621 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:05:12.184929 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.184936 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.184940 17621 net.cpp:165] Memory required for data: 1076737500\nI0817 16:05:12.184945 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:05:12.184957 17621 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:05:12.184962 17621 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:05:12.184973 17621 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:05:12.185439 17621 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:05:12.185454 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.185458 17621 net.cpp:165] Memory required for data: 1080833500\nI0817 16:05:12.185468 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:05:12.185480 17621 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:05:12.185487 17621 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:05:12.185494 17621 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:05:12.185755 17621 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:05:12.185770 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.185775 17621 net.cpp:165] Memory required for data: 1084929500\nI0817 16:05:12.185786 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:05:12.185796 17621 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:05:12.185802 17621 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:05:12.185811 17621 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.185865 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:05:12.186012 17621 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:05:12.186025 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.186030 17621 net.cpp:165] Memory required for data: 1089025500\nI0817 16:05:12.186039 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:05:12.186049 17621 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:05:12.186056 17621 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:05:12.186065 17621 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.186075 17621 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:05:12.186082 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.186087 17621 net.cpp:165] Memory required for data: 1093121500\nI0817 16:05:12.186092 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:05:12.186102 17621 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:05:12.186108 17621 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:05:12.186120 17621 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:05:12.186574 17621 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:05:12.186595 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.186600 17621 net.cpp:165] Memory required for data: 1097217500\nI0817 16:05:12.186609 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:05:12.186619 17621 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:05:12.186625 17621 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:05:12.186636 17621 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:05:12.186890 17621 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:05:12.186904 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.186909 17621 net.cpp:165] Memory required for data: 1101313500\nI0817 16:05:12.186920 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:05:12.186931 17621 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:05:12.186938 17621 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:05:12.186945 17621 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:05:12.187000 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:05:12.187146 17621 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:05:12.187160 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.187165 17621 net.cpp:165] Memory required for data: 1105409500\nI0817 16:05:12.187172 17621 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:05:12.187185 17621 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:05:12.187191 17621 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:05:12.187198 17621 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:05:12.187206 17621 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:05:12.187234 17621 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:05:12.187244 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.187248 17621 net.cpp:165] Memory required for data: 1109505500\nI0817 16:05:12.187253 17621 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:05:12.187261 17621 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:05:12.187266 17621 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:05:12.187276 17621 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:05:12.187286 17621 net.cpp:150] Setting up L2_b6_relu\nI0817 16:05:12.187294 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.187297 17621 net.cpp:165] Memory required for data: 1113601500\nI0817 16:05:12.187302 17621 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:05:12.187309 17621 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:05:12.187314 17621 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:05:12.187321 17621 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:05:12.187330 17621 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:05:12.187376 17621 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:05:12.187388 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.187396 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.187399 17621 net.cpp:165] Memory required for data: 1121793500\nI0817 16:05:12.187405 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:05:12.187417 17621 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:05:12.187422 17621 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:05:12.187434 17621 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:05:12.187906 17621 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:05:12.187922 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.187927 17621 net.cpp:165] Memory required for data: 1125889500\nI0817 16:05:12.187935 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:05:12.187944 17621 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:05:12.187958 17621 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:05:12.187968 17621 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:05:12.188220 17621 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:05:12.188235 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.188239 17621 net.cpp:165] Memory required for data: 1129985500\nI0817 16:05:12.188249 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:05:12.188261 17621 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:05:12.188266 17621 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:05:12.188274 17621 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.188329 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:05:12.188480 17621 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:05:12.188493 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.188498 17621 net.cpp:165] Memory required for data: 1134081500\nI0817 16:05:12.188508 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:05:12.188519 17621 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:05:12.188525 17621 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:05:12.188534 17621 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.188544 17621 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:05:12.188549 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.188555 17621 net.cpp:165] Memory required for data: 1138177500\nI0817 16:05:12.188560 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:05:12.188573 17621 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:05:12.188580 17621 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:05:12.188591 17621 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:05:12.189064 17621 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:05:12.189077 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.189082 17621 net.cpp:165] Memory required for data: 1142273500\nI0817 16:05:12.189091 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:05:12.189100 17621 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:05:12.189106 17621 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:05:12.189118 17621 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:05:12.189368 17621 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:05:12.189380 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.189385 17621 net.cpp:165] Memory required for data: 1146369500\nI0817 16:05:12.189395 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:05:12.189409 17621 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:05:12.189414 17621 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:05:12.189422 17621 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:05:12.189476 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:05:12.189626 17621 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:05:12.189640 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.189644 17621 net.cpp:165] Memory required for data: 1150465500\nI0817 16:05:12.189653 17621 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:05:12.189664 17621 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:05:12.189671 17621 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:05:12.189678 17621 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:05:12.189692 17621 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:05:12.189723 17621 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:05:12.189733 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.189738 17621 net.cpp:165] Memory required for data: 1154561500\nI0817 16:05:12.189743 17621 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:05:12.189751 17621 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:05:12.189756 17621 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:05:12.189770 17621 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:05:12.189784 17621 net.cpp:150] Setting up L2_b7_relu\nI0817 16:05:12.189790 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.189795 17621 net.cpp:165] Memory required for data: 1158657500\nI0817 16:05:12.189800 17621 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:05:12.189806 17621 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:05:12.189812 17621 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:05:12.189820 17621 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:05:12.189829 17621 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:05:12.189878 17621 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:05:12.189890 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.189896 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.189901 17621 net.cpp:165] Memory required for data: 1166849500\nI0817 16:05:12.189906 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:05:12.189918 17621 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:05:12.189924 17621 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:05:12.189937 17621 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:05:12.190405 17621 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:05:12.190419 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.190424 17621 net.cpp:165] Memory required for data: 1170945500\nI0817 16:05:12.190433 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:05:12.190443 17621 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:05:12.190448 17621 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:05:12.190459 17621 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:05:12.190717 17621 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:05:12.190732 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.190737 17621 net.cpp:165] Memory required for data: 1175041500\nI0817 16:05:12.190747 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:05:12.190758 17621 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:05:12.190764 17621 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:05:12.190773 17621 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.190826 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:05:12.190978 17621 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:05:12.190990 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.190995 17621 net.cpp:165] Memory required for data: 1179137500\nI0817 16:05:12.191004 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:05:12.191015 17621 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:05:12.191021 17621 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:05:12.191028 17621 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.191037 17621 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:05:12.191045 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.191049 17621 net.cpp:165] Memory required for data: 1183233500\nI0817 16:05:12.191054 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:05:12.191067 17621 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:05:12.191074 17621 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:05:12.191084 17621 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:05:12.191547 17621 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:05:12.191562 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.191567 17621 net.cpp:165] Memory required for data: 1187329500\nI0817 16:05:12.191576 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:05:12.191586 17621 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:05:12.191599 17621 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:05:12.191609 17621 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:05:12.191874 17621 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:05:12.191886 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.191891 17621 net.cpp:165] Memory required for data: 1191425500\nI0817 16:05:12.191902 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:05:12.191915 17621 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:05:12.191921 17621 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:05:12.191928 17621 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:05:12.191985 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:05:12.192133 17621 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:05:12.192147 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.192152 17621 net.cpp:165] Memory required for data: 1195521500\nI0817 16:05:12.192160 17621 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:05:12.192168 17621 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:05:12.192175 17621 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:05:12.192181 17621 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:05:12.192193 17621 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:05:12.192219 17621 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:05:12.192232 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.192236 17621 net.cpp:165] Memory required for data: 1199617500\nI0817 16:05:12.192242 17621 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:05:12.192250 17621 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:05:12.192255 17621 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:05:12.192262 17621 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:05:12.192271 17621 net.cpp:150] Setting up L2_b8_relu\nI0817 16:05:12.192278 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.192282 17621 net.cpp:165] Memory required for data: 1203713500\nI0817 16:05:12.192287 17621 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:05:12.192297 17621 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:05:12.192303 17621 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:05:12.192311 17621 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:05:12.192334 17621 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:05:12.192384 17621 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:05:12.192399 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.192406 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.192411 17621 net.cpp:165] Memory required for data: 1211905500\nI0817 16:05:12.192416 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:05:12.192430 17621 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:05:12.192437 17621 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:05:12.192446 17621 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:05:12.192922 17621 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:05:12.192937 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.192942 17621 net.cpp:165] Memory required for data: 1216001500\nI0817 16:05:12.192951 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:05:12.192963 17621 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:05:12.192970 17621 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:05:12.192978 17621 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:05:12.193228 17621 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:05:12.193240 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.193245 17621 net.cpp:165] Memory required for data: 1220097500\nI0817 16:05:12.193264 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:05:12.193274 17621 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:05:12.193279 17621 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:05:12.193289 17621 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.193346 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:05:12.193502 17621 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:05:12.193516 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.193521 17621 net.cpp:165] Memory required for data: 1224193500\nI0817 16:05:12.193529 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:05:12.193537 17621 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:05:12.193543 17621 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:05:12.193553 17621 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.193563 17621 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:05:12.193570 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.193575 17621 net.cpp:165] Memory required for data: 1228289500\nI0817 16:05:12.193580 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:05:12.193594 17621 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:05:12.193600 17621 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:05:12.193608 17621 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:05:12.194077 17621 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:05:12.194092 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.194097 17621 net.cpp:165] Memory required for data: 1232385500\nI0817 16:05:12.194106 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:05:12.194118 17621 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:05:12.194125 17621 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:05:12.194133 17621 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:05:12.194382 17621 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:05:12.194396 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.194401 17621 net.cpp:165] Memory required for data: 1236481500\nI0817 16:05:12.194444 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:05:12.194458 17621 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:05:12.194463 17621 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:05:12.194471 17621 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:05:12.194530 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:05:12.194680 17621 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:05:12.194700 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.194705 17621 net.cpp:165] Memory required for data: 1240577500\nI0817 16:05:12.194713 17621 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:05:12.194723 17621 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:05:12.194730 17621 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:05:12.194736 17621 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:05:12.194747 17621 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:05:12.194775 17621 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:05:12.194784 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.194789 17621 net.cpp:165] Memory required for data: 1244673500\nI0817 16:05:12.194794 17621 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:05:12.194805 17621 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:05:12.194811 17621 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:05:12.194818 17621 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:05:12.194828 17621 net.cpp:150] Setting up L2_b9_relu\nI0817 16:05:12.194834 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.194839 17621 net.cpp:165] Memory required for data: 1248769500\nI0817 16:05:12.194844 17621 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:05:12.194864 17621 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:05:12.194869 17621 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:05:12.194877 17621 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:05:12.194886 17621 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:05:12.194934 17621 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:05:12.194947 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.194953 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.194957 17621 net.cpp:165] Memory required for data: 1256961500\nI0817 16:05:12.194962 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:05:12.194977 17621 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:05:12.194983 17621 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:05:12.194993 17621 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:05:12.195459 17621 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:05:12.195474 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.195480 17621 net.cpp:165] Memory required for data: 1257985500\nI0817 16:05:12.195488 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:05:12.195500 17621 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:05:12.195508 17621 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:05:12.195515 17621 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:05:12.195782 17621 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:05:12.195796 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.195801 17621 net.cpp:165] Memory required for data: 1259009500\nI0817 16:05:12.195812 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:05:12.195821 17621 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:05:12.195827 17621 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:05:12.195834 17621 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.195891 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:05:12.196045 17621 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:05:12.196058 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.196063 17621 net.cpp:165] Memory required for data: 1260033500\nI0817 16:05:12.196072 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:05:12.196080 17621 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:05:12.196086 17621 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:05:12.196096 17621 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.196106 17621 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:05:12.196113 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.196117 17621 net.cpp:165] Memory required for data: 1261057500\nI0817 16:05:12.196122 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:05:12.196136 17621 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:05:12.196142 17621 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:05:12.196151 17621 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:05:12.196619 17621 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:05:12.196633 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.196638 17621 net.cpp:165] Memory required for data: 1262081500\nI0817 16:05:12.196647 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:05:12.196660 17621 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:05:12.196666 17621 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:05:12.196674 17621 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:05:12.196943 17621 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:05:12.196957 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.196962 17621 net.cpp:165] Memory required for data: 1263105500\nI0817 16:05:12.196979 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:05:12.196991 17621 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:05:12.196997 17621 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:05:12.197005 17621 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:05:12.197063 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:05:12.197221 17621 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:05:12.197234 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.197239 17621 net.cpp:165] Memory required for data: 1264129500\nI0817 16:05:12.197248 17621 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:05:12.197260 17621 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:05:12.197266 17621 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:05:12.197278 17621 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:05:12.197311 17621 net.cpp:150] Setting up L3_b1_pool\nI0817 16:05:12.197321 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.197325 17621 net.cpp:165] Memory required for data: 1265153500\nI0817 16:05:12.197331 17621 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:05:12.197343 17621 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:05:12.197350 17621 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:05:12.197355 17621 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:05:12.197363 17621 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:05:12.197394 17621 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:05:12.197404 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.197408 17621 net.cpp:165] Memory required for data: 1266177500\nI0817 16:05:12.197413 17621 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:05:12.197422 17621 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:05:12.197427 17621 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:05:12.197438 17621 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:05:12.197446 17621 net.cpp:150] Setting up L3_b1_relu\nI0817 16:05:12.197453 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.197458 17621 net.cpp:165] Memory required for data: 1267201500\nI0817 16:05:12.197463 17621 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:05:12.197473 17621 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:05:12.197479 17621 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:05:12.198704 17621 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:05:12.198721 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.198726 17621 net.cpp:165] Memory required for data: 1268225500\nI0817 16:05:12.198732 17621 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:05:12.198745 17621 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:05:12.198751 17621 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:05:12.198760 17621 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:05:12.198767 17621 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:05:12.198810 17621 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:05:12.198822 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.198827 17621 net.cpp:165] Memory required for data: 1270273500\nI0817 16:05:12.198832 17621 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:05:12.198840 17621 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:05:12.198845 17621 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:05:12.198856 17621 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:05:12.198866 17621 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:05:12.198915 17621 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:05:12.198930 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.198936 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.198951 17621 net.cpp:165] Memory required for data: 1274369500\nI0817 16:05:12.198956 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:05:12.198967 17621 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:05:12.198974 17621 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:05:12.198983 17621 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:05:12.200968 17621 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:05:12.200984 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.200990 17621 net.cpp:165] Memory required for data: 1276417500\nI0817 16:05:12.200999 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:05:12.201012 17621 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:05:12.201020 17621 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:05:12.201030 17621 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:05:12.201292 17621 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:05:12.201305 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.201310 17621 net.cpp:165] Memory required for data: 1278465500\nI0817 16:05:12.201320 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:05:12.201329 17621 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:05:12.201336 17621 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:05:12.201347 17621 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.201406 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:05:12.201561 17621 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:05:12.201575 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.201580 17621 net.cpp:165] Memory required for data: 1280513500\nI0817 16:05:12.201588 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:05:12.201596 17621 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:05:12.201603 17621 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:05:12.201613 17621 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.201624 17621 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:05:12.201632 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.201637 17621 net.cpp:165] Memory required for data: 1282561500\nI0817 16:05:12.201640 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:05:12.201655 17621 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:05:12.201660 17621 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:05:12.201669 17621 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:05:12.202694 17621 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:05:12.202709 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.202714 17621 net.cpp:165] Memory required for data: 1284609500\nI0817 16:05:12.202724 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:05:12.202736 17621 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:05:12.202744 17621 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:05:12.202751 17621 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:05:12.203016 17621 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:05:12.203029 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.203034 17621 net.cpp:165] Memory required for data: 1286657500\nI0817 16:05:12.203044 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:05:12.203057 17621 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:05:12.203063 17621 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:05:12.203073 17621 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:05:12.203130 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:05:12.203286 17621 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:05:12.203299 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.203305 17621 net.cpp:165] Memory required for data: 1288705500\nI0817 16:05:12.203313 17621 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:05:12.203322 17621 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:05:12.203336 17621 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:05:12.203344 17621 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:05:12.203356 17621 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:05:12.203390 17621 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:05:12.203399 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.203404 17621 net.cpp:165] Memory required for data: 1290753500\nI0817 16:05:12.203409 17621 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:05:12.203421 17621 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:05:12.203428 17621 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:05:12.203435 17621 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:05:12.203444 17621 net.cpp:150] Setting up L3_b2_relu\nI0817 16:05:12.203451 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.203456 17621 net.cpp:165] Memory required for data: 1292801500\nI0817 16:05:12.203460 17621 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:05:12.203469 17621 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:05:12.203474 17621 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:05:12.203481 17621 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:05:12.203491 17621 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:05:12.203541 17621 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:05:12.203552 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.203558 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.203563 17621 net.cpp:165] Memory required for data: 1296897500\nI0817 16:05:12.203568 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:05:12.203583 17621 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:05:12.203589 17621 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:05:12.203598 17621 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:05:12.204617 17621 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:05:12.204632 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.204638 17621 net.cpp:165] Memory required for data: 1298945500\nI0817 16:05:12.204648 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:05:12.204659 17621 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:05:12.204665 17621 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:05:12.204676 17621 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:05:12.204944 17621 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:05:12.204957 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.204962 17621 net.cpp:165] Memory required for data: 1300993500\nI0817 16:05:12.204973 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:05:12.204982 17621 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:05:12.204988 17621 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:05:12.205000 17621 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.205057 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:05:12.205214 17621 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:05:12.205229 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.205234 17621 net.cpp:165] Memory required for data: 1303041500\nI0817 16:05:12.205242 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:05:12.205250 17621 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:05:12.205256 17621 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:05:12.205266 17621 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.205276 17621 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:05:12.205284 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.205296 17621 net.cpp:165] Memory required for data: 1305089500\nI0817 16:05:12.205301 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:05:12.205314 17621 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:05:12.205320 17621 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:05:12.205332 17621 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:05:12.206351 17621 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:05:12.206367 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.206372 17621 net.cpp:165] Memory required for data: 1307137500\nI0817 16:05:12.206380 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:05:12.206389 17621 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:05:12.206396 17621 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:05:12.206408 17621 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:05:12.206676 17621 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:05:12.206696 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.206701 17621 net.cpp:165] Memory required for data: 1309185500\nI0817 16:05:12.206712 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:05:12.206723 17621 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:05:12.206729 17621 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:05:12.206737 17621 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:05:12.206794 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:05:12.206956 17621 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:05:12.206970 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.206975 17621 net.cpp:165] Memory required for data: 1311233500\nI0817 16:05:12.206984 17621 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:05:12.206993 17621 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:05:12.207000 17621 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:05:12.207007 17621 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:05:12.207018 17621 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:05:12.207051 17621 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:05:12.207063 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.207067 17621 net.cpp:165] Memory required for data: 1313281500\nI0817 16:05:12.207072 17621 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:05:12.207083 17621 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:05:12.207089 17621 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:05:12.207096 17621 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:05:12.207105 17621 net.cpp:150] Setting up L3_b3_relu\nI0817 16:05:12.207113 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.207118 17621 net.cpp:165] Memory required for data: 1315329500\nI0817 16:05:12.207123 17621 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:05:12.207129 17621 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:05:12.207134 17621 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:05:12.207141 17621 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:05:12.207151 17621 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:05:12.207201 17621 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:05:12.207212 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.207218 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.207223 17621 net.cpp:165] Memory required for data: 1319425500\nI0817 16:05:12.207228 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:05:12.207242 17621 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:05:12.207248 17621 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:05:12.207257 17621 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:05:12.208283 17621 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:05:12.208298 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.208303 17621 net.cpp:165] Memory required for data: 1321473500\nI0817 16:05:12.208312 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:05:12.208328 17621 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:05:12.208334 17621 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:05:12.208345 17621 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:05:12.208613 17621 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:05:12.208627 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.208632 17621 net.cpp:165] Memory required for data: 1323521500\nI0817 16:05:12.208642 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:05:12.208652 17621 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:05:12.208657 17621 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:05:12.208667 17621 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.208732 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:05:12.208890 17621 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:05:12.208904 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.208909 17621 net.cpp:165] Memory required for data: 1325569500\nI0817 16:05:12.208917 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:05:12.208928 17621 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:05:12.208935 17621 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:05:12.208942 17621 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.208951 17621 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:05:12.208958 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.208963 17621 net.cpp:165] Memory required for data: 1327617500\nI0817 16:05:12.208968 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:05:12.208982 17621 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:05:12.208988 17621 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:05:12.208998 17621 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:05:12.210031 17621 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:05:12.210047 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.210052 17621 net.cpp:165] Memory required for data: 1329665500\nI0817 16:05:12.210060 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:05:12.210069 17621 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:05:12.210077 17621 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:05:12.210088 17621 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:05:12.210358 17621 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:05:12.210374 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.210379 17621 net.cpp:165] Memory required for data: 1331713500\nI0817 16:05:12.210389 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:05:12.210398 17621 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:05:12.210404 17621 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:05:12.210412 17621 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:05:12.210471 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:05:12.210634 17621 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:05:12.210647 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.210652 17621 net.cpp:165] Memory required for data: 1333761500\nI0817 16:05:12.210661 17621 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:05:12.210674 17621 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:05:12.210680 17621 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:05:12.210693 17621 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:05:12.210701 17621 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:05:12.210739 17621 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:05:12.210757 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.210762 17621 net.cpp:165] Memory required for data: 1335809500\nI0817 16:05:12.210767 17621 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:05:12.210774 17621 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:05:12.210779 17621 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:05:12.210788 17621 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:05:12.210798 17621 net.cpp:150] Setting up L3_b4_relu\nI0817 16:05:12.210804 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.210808 17621 net.cpp:165] Memory required for data: 1337857500\nI0817 16:05:12.210813 17621 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:05:12.210820 17621 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:05:12.210826 17621 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:05:12.210836 17621 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:05:12.210846 17621 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:05:12.210893 17621 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:05:12.210906 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.210911 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.210916 17621 net.cpp:165] Memory required for data: 1341953500\nI0817 16:05:12.210922 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:05:12.210937 17621 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:05:12.210943 17621 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:05:12.210952 17621 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:05:12.211982 17621 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:05:12.211998 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.212003 17621 net.cpp:165] Memory required for data: 1344001500\nI0817 16:05:12.212011 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:05:12.212023 17621 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:05:12.212030 17621 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:05:12.212038 17621 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:05:12.213290 17621 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:05:12.213310 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.213316 17621 net.cpp:165] Memory required for data: 1346049500\nI0817 16:05:12.213328 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:05:12.213337 17621 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:05:12.213345 17621 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:05:12.213356 17621 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.213416 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:05:12.213574 17621 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:05:12.213587 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.213593 17621 net.cpp:165] Memory required for data: 1348097500\nI0817 16:05:12.213603 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:05:12.213610 17621 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:05:12.213620 17621 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:05:12.213627 17621 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.213637 17621 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:05:12.213644 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.213649 17621 net.cpp:165] Memory required for data: 1350145500\nI0817 16:05:12.213654 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:05:12.213668 17621 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:05:12.213675 17621 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:05:12.213688 17621 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:05:12.215986 17621 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:05:12.216007 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.216013 17621 net.cpp:165] Memory required for data: 1352193500\nI0817 16:05:12.216022 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:05:12.216033 17621 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:05:12.216040 17621 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:05:12.216051 17621 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:05:12.216315 17621 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:05:12.216331 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.216336 17621 net.cpp:165] Memory required for data: 1354241500\nI0817 16:05:12.216346 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:05:12.216356 17621 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:05:12.216361 17621 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:05:12.216369 17621 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:05:12.216428 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:05:12.216578 17621 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:05:12.216591 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.216596 17621 net.cpp:165] Memory required for data: 1356289500\nI0817 16:05:12.216605 17621 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:05:12.216615 17621 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:05:12.216625 17621 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:05:12.216632 17621 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:05:12.216640 17621 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:05:12.216675 17621 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:05:12.216692 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.216697 17621 net.cpp:165] Memory required for data: 1358337500\nI0817 16:05:12.216703 17621 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:05:12.216711 17621 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:05:12.216717 17621 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:05:12.216724 17621 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:05:12.216734 17621 net.cpp:150] Setting up L3_b5_relu\nI0817 16:05:12.216742 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.216745 17621 net.cpp:165] Memory required for data: 1360385500\nI0817 16:05:12.216750 17621 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:05:12.216758 17621 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:05:12.216763 17621 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:05:12.216773 17621 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:05:12.216784 17621 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:05:12.216830 17621 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:05:12.216840 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.216847 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.216851 17621 net.cpp:165] Memory required for data: 1364481500\nI0817 16:05:12.216856 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:05:12.216871 17621 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:05:12.216877 17621 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:05:12.216887 17621 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:05:12.217900 17621 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:05:12.217916 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.217921 17621 net.cpp:165] Memory required for data: 1366529500\nI0817 16:05:12.217931 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:05:12.217939 17621 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:05:12.217953 17621 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:05:12.217965 17621 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:05:12.218224 17621 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:05:12.218236 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.218241 17621 net.cpp:165] Memory required for data: 1368577500\nI0817 16:05:12.218252 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:05:12.218261 17621 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:05:12.218267 17621 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:05:12.218277 17621 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.218334 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:05:12.218489 17621 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:05:12.218503 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.218508 17621 net.cpp:165] Memory required for data: 1370625500\nI0817 16:05:12.218518 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:05:12.218528 17621 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:05:12.218535 17621 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:05:12.218542 17621 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.218552 17621 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:05:12.218559 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.218564 17621 net.cpp:165] Memory required for data: 1372673500\nI0817 16:05:12.218569 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:05:12.218583 17621 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:05:12.218590 17621 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:05:12.218600 17621 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:05:12.219607 17621 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:05:12.219622 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.219629 17621 net.cpp:165] Memory required for data: 1374721500\nI0817 16:05:12.219637 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:05:12.219646 17621 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:05:12.219652 17621 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:05:12.219663 17621 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:05:12.219930 17621 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:05:12.219946 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.219951 17621 net.cpp:165] Memory required for data: 1376769500\nI0817 16:05:12.219962 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:05:12.219971 17621 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:05:12.219977 17621 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:05:12.219985 17621 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:05:12.220041 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:05:12.220198 17621 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:05:12.220211 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.220216 17621 net.cpp:165] Memory required for data: 1378817500\nI0817 16:05:12.220226 17621 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:05:12.220237 17621 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:05:12.220244 17621 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:05:12.220252 17621 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:05:12.220259 17621 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:05:12.220295 17621 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:05:12.220306 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.220311 17621 net.cpp:165] Memory required for data: 1380865500\nI0817 16:05:12.220316 17621 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:05:12.220324 17621 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:05:12.220330 17621 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:05:12.220343 17621 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:05:12.220353 17621 net.cpp:150] Setting up L3_b6_relu\nI0817 16:05:12.220361 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.220366 17621 net.cpp:165] Memory required for data: 1382913500\nI0817 16:05:12.220371 17621 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:05:12.220378 17621 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:05:12.220383 17621 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:05:12.220393 17621 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:05:12.220403 17621 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:05:12.220449 17621 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:05:12.220461 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.220468 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.220473 17621 net.cpp:165] Memory required for data: 1387009500\nI0817 16:05:12.220477 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:05:12.220491 17621 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:05:12.220499 17621 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:05:12.220507 17621 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:05:12.221525 17621 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:05:12.221541 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.221546 17621 net.cpp:165] Memory required for data: 1389057500\nI0817 16:05:12.221555 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:05:12.221567 17621 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:05:12.221575 17621 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:05:12.221582 17621 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:05:12.221848 17621 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:05:12.221863 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.221866 17621 net.cpp:165] Memory required for data: 1391105500\nI0817 16:05:12.221877 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:05:12.221889 17621 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:05:12.221895 17621 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:05:12.221904 17621 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.221963 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:05:12.222118 17621 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:05:12.222132 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.222137 17621 net.cpp:165] Memory required for data: 1393153500\nI0817 16:05:12.222146 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:05:12.222183 17621 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:05:12.222193 17621 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:05:12.222200 17621 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.222210 17621 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:05:12.222218 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.222223 17621 net.cpp:165] Memory required for data: 1395201500\nI0817 16:05:12.222228 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:05:12.222239 17621 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:05:12.222245 17621 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:05:12.222254 17621 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:05:12.223281 17621 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:05:12.223296 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.223301 17621 net.cpp:165] Memory required for data: 1397249500\nI0817 16:05:12.223310 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:05:12.223322 17621 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:05:12.223336 17621 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:05:12.223347 17621 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:05:12.223614 17621 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:05:12.223628 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.223633 17621 net.cpp:165] Memory required for data: 1399297500\nI0817 16:05:12.223642 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:05:12.223651 17621 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:05:12.223657 17621 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:05:12.223668 17621 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:05:12.223733 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:05:12.223891 17621 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:05:12.223906 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.223911 17621 net.cpp:165] Memory required for data: 1401345500\nI0817 16:05:12.223919 17621 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:05:12.223932 17621 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:05:12.223938 17621 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:05:12.223945 17621 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:05:12.223953 17621 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:05:12.223990 17621 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:05:12.224002 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.224007 17621 net.cpp:165] Memory required for data: 1403393500\nI0817 16:05:12.224012 17621 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:05:12.224020 17621 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:05:12.224026 17621 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:05:12.224036 17621 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:05:12.224045 17621 net.cpp:150] Setting up L3_b7_relu\nI0817 16:05:12.224053 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.224057 17621 net.cpp:165] Memory required for data: 1405441500\nI0817 16:05:12.224062 17621 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:05:12.224069 17621 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:05:12.224074 17621 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:05:12.224082 17621 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:05:12.224092 17621 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:05:12.224140 17621 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:05:12.224151 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.224159 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.224162 17621 net.cpp:165] Memory required for data: 1409537500\nI0817 16:05:12.224169 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:05:12.224179 17621 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:05:12.224185 17621 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:05:12.224197 17621 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:05:12.225214 17621 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:05:12.225229 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.225234 17621 net.cpp:165] Memory required for data: 1411585500\nI0817 16:05:12.225244 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:05:12.225256 17621 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:05:12.225262 17621 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:05:12.225271 17621 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:05:12.225540 17621 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:05:12.225553 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.225558 17621 net.cpp:165] Memory required for data: 1413633500\nI0817 16:05:12.225576 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:05:12.225585 17621 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:05:12.225592 17621 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:05:12.225600 17621 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.225659 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:05:12.225819 17621 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:05:12.225836 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.225841 17621 net.cpp:165] Memory required for data: 1415681500\nI0817 16:05:12.225850 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:05:12.225858 17621 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:05:12.225864 17621 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:05:12.225872 17621 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.225881 17621 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:05:12.225888 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.225893 17621 net.cpp:165] Memory required for data: 1417729500\nI0817 16:05:12.225898 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:05:12.225911 17621 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:05:12.225917 17621 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:05:12.225929 17621 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:05:12.226948 17621 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:05:12.226963 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.226969 17621 net.cpp:165] Memory required for data: 1419777500\nI0817 16:05:12.226977 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:05:12.226989 17621 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:05:12.226996 17621 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:05:12.227005 17621 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:05:12.227267 17621 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:05:12.227279 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.227284 17621 net.cpp:165] Memory required for data: 1421825500\nI0817 16:05:12.227295 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:05:12.227306 17621 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:05:12.227313 17621 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:05:12.227320 17621 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:05:12.227377 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:05:12.227535 17621 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:05:12.227550 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.227555 17621 net.cpp:165] Memory required for data: 1423873500\nI0817 16:05:12.227563 17621 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:05:12.227576 17621 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:05:12.227582 17621 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:05:12.227589 17621 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:05:12.227597 17621 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:05:12.227633 17621 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:05:12.227644 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.227649 17621 net.cpp:165] Memory required for data: 1425921500\nI0817 16:05:12.227654 17621 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:05:12.227661 17621 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:05:12.227668 17621 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:05:12.227677 17621 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:05:12.227692 17621 net.cpp:150] Setting up L3_b8_relu\nI0817 16:05:12.227700 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.227705 17621 net.cpp:165] Memory required for data: 1427969500\nI0817 16:05:12.227710 17621 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:05:12.227725 17621 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:05:12.227730 17621 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:05:12.227737 17621 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:05:12.227747 17621 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:05:12.227798 17621 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:05:12.227810 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.227816 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.227821 17621 net.cpp:165] Memory required for data: 1432065500\nI0817 16:05:12.227826 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:05:12.227838 17621 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:05:12.227844 17621 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:05:12.227856 17621 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:05:12.229919 17621 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:05:12.229938 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.229943 17621 net.cpp:165] Memory required for data: 1434113500\nI0817 16:05:12.229953 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:05:12.229965 17621 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:05:12.229972 17621 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:05:12.229981 17621 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:05:12.230248 17621 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:05:12.230262 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.230267 17621 net.cpp:165] Memory required for data: 1436161500\nI0817 16:05:12.230278 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:05:12.230288 17621 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:05:12.230295 17621 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:05:12.230303 17621 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.230368 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:05:12.230530 17621 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:05:12.230543 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.230550 17621 net.cpp:165] Memory required for data: 1438209500\nI0817 16:05:12.230558 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:05:12.230566 17621 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:05:12.230572 17621 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:05:12.230583 17621 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.230593 17621 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:05:12.230600 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.230605 17621 net.cpp:165] Memory required for data: 1440257500\nI0817 16:05:12.230610 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:05:12.230621 17621 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:05:12.230628 17621 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:05:12.230638 17621 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:05:12.231657 17621 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:05:12.231673 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.231678 17621 net.cpp:165] Memory required for data: 1442305500\nI0817 16:05:12.231691 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:05:12.231705 17621 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:05:12.231712 17621 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:05:12.231720 17621 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:05:12.231987 17621 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:05:12.232007 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.232012 17621 net.cpp:165] Memory required for data: 1444353500\nI0817 16:05:12.232030 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:05:12.232039 17621 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:05:12.232046 17621 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:05:12.232053 17621 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:05:12.232117 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:05:12.232270 17621 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:05:12.232285 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.232290 17621 net.cpp:165] Memory required for data: 1446401500\nI0817 16:05:12.232300 17621 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:05:12.232308 17621 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:05:12.232316 17621 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:05:12.232322 17621 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:05:12.232332 17621 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:05:12.232365 17621 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:05:12.232378 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.232383 17621 net.cpp:165] Memory required for data: 1448449500\nI0817 16:05:12.232388 17621 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:05:12.232398 17621 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:05:12.232404 17621 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:05:12.232411 17621 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:05:12.232420 17621 net.cpp:150] Setting up L3_b9_relu\nI0817 16:05:12.232427 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.232432 17621 net.cpp:165] Memory required for data: 1450497500\nI0817 16:05:12.232437 17621 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:05:12.232472 17621 net.cpp:100] Creating Layer post_pool\nI0817 16:05:12.232482 17621 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:05:12.232491 17621 net.cpp:408] post_pool -> post_pool\nI0817 16:05:12.232527 17621 net.cpp:150] Setting up post_pool\nI0817 16:05:12.232539 17621 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:05:12.232544 17621 net.cpp:165] Memory required for data: 1450529500\nI0817 16:05:12.232549 17621 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:05:12.232632 17621 net.cpp:100] Creating Layer post_FC\nI0817 16:05:12.232645 17621 net.cpp:434] post_FC <- post_pool\nI0817 16:05:12.232661 17621 net.cpp:408] post_FC -> post_FC_top\nI0817 16:05:12.232914 17621 net.cpp:150] Setting up post_FC\nI0817 16:05:12.232931 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:05:12.232936 17621 net.cpp:165] Memory required for data: 1450534500\nI0817 16:05:12.232946 17621 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:05:12.232954 17621 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:05:12.232960 17621 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:05:12.232971 17621 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:05:12.232982 17621 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:05:12.233033 17621 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:05:12.233045 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:05:12.233052 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:05:12.233057 17621 net.cpp:165] Memory required for data: 1450544500\nI0817 16:05:12.233062 17621 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:05:12.233104 17621 net.cpp:100] Creating Layer accuracy\nI0817 16:05:12.233115 17621 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:05:12.233124 17621 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:05:12.233131 17621 net.cpp:408] accuracy -> accuracy\nI0817 16:05:12.233172 17621 net.cpp:150] Setting up accuracy\nI0817 16:05:12.233186 17621 net.cpp:157] Top shape: (1)\nI0817 16:05:12.233191 17621 net.cpp:165] Memory required for data: 1450544504\nI0817 16:05:12.233196 17621 layer_factory.hpp:77] Creating layer loss\nI0817 16:05:12.233217 17621 net.cpp:100] Creating Layer loss\nI0817 16:05:12.233224 17621 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:05:12.233232 17621 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:05:12.233238 17621 net.cpp:408] loss -> loss\nI0817 16:05:12.233286 17621 layer_factory.hpp:77] Creating layer loss\nI0817 16:05:12.233446 17621 net.cpp:150] Setting up loss\nI0817 16:05:12.233464 17621 net.cpp:157] Top shape: (1)\nI0817 16:05:12.233469 17621 net.cpp:160]     with loss weight 1\nI0817 16:05:12.233544 17621 net.cpp:165] Memory required for data: 1450544508\nI0817 16:05:12.233553 17621 net.cpp:226] loss needs backward computation.\nI0817 16:05:12.233561 17621 net.cpp:228] accuracy does not need backward computation.\nI0817 16:05:12.233566 17621 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:05:12.233572 17621 net.cpp:226] post_FC needs backward computation.\nI0817 16:05:12.233577 17621 net.cpp:226] post_pool needs backward computation.\nI0817 16:05:12.233582 17621 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:05:12.233587 17621 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:05:12.233592 17621 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:05:12.233597 17621 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:05:12.233602 17621 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:05:12.233606 17621 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:05:12.233611 17621 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:05:12.233616 17621 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:05:12.233621 17621 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:05:12.233626 17621 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:05:12.233631 17621 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:05:12.233636 17621 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:05:12.233642 17621 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:05:12.233647 17621 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:05:12.233652 17621 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:05:12.233657 17621 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:05:12.233662 17621 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:05:12.233667 17621 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:05:12.233672 17621 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:05:12.233677 17621 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:05:12.233690 17621 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:05:12.233695 17621 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:05:12.233701 17621 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:05:12.233706 17621 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:05:12.233712 17621 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:05:12.233717 17621 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:05:12.233722 17621 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:05:12.233726 17621 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:05:12.233732 17621 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:05:12.233737 17621 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:05:12.233742 17621 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:05:12.233747 17621 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:05:12.233753 17621 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:05:12.233758 17621 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:05:12.233763 17621 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:05:12.233777 17621 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:05:12.233781 17621 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:05:12.233786 17621 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:05:12.233793 17621 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:05:12.233798 17621 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:05:12.233803 17621 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:05:12.233808 17621 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:05:12.233814 17621 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:05:12.233819 17621 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:05:12.233824 17621 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:05:12.233832 17621 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:05:12.233839 17621 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:05:12.233844 17621 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:05:12.233850 17621 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:05:12.233855 17621 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:05:12.233860 17621 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:05:12.233865 17621 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:05:12.233870 17621 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:05:12.233876 17621 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:05:12.233881 17621 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:05:12.233886 17621 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:05:12.233891 17621 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:05:12.233896 17621 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:05:12.233902 17621 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:05:12.233907 17621 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:05:12.233912 17621 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:05:12.233917 17621 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:05:12.233923 17621 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:05:12.233928 17621 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:05:12.233934 17621 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:05:12.233939 17621 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:05:12.233944 17621 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:05:12.233949 17621 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:05:12.233954 17621 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:05:12.233959 17621 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:05:12.233965 17621 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:05:12.233970 17621 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:05:12.233976 17621 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:05:12.233981 17621 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:05:12.233988 17621 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:05:12.233992 17621 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:05:12.233997 17621 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:05:12.234002 17621 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:05:12.234007 17621 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:05:12.234014 17621 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:05:12.234019 17621 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:05:12.234025 17621 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:05:12.234035 17621 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:05:12.234040 17621 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:05:12.234046 17621 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:05:12.234052 17621 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:05:12.234057 17621 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:05:12.234062 17621 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:05:12.234068 17621 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:05:12.234073 17621 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:05:12.234078 17621 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:05:12.234083 17621 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:05:12.234089 17621 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:05:12.234094 17621 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:05:12.234099 17621 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:05:12.234105 17621 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:05:12.234110 17621 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:05:12.234117 17621 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:05:12.234122 17621 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:05:12.234127 17621 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:05:12.234133 17621 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:05:12.234138 17621 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:05:12.234143 17621 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:05:12.234149 17621 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:05:12.234154 17621 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:05:12.234160 17621 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:05:12.234166 17621 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:05:12.234171 17621 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:05:12.234177 17621 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:05:12.234182 17621 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:05:12.234187 17621 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:05:12.234194 17621 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:05:12.234201 17621 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:05:12.234207 17621 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:05:12.234213 17621 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:05:12.234220 17621 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:05:12.234225 17621 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:05:12.234230 17621 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:05:12.234236 17621 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:05:12.234241 17621 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:05:12.234246 17621 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:05:12.234252 17621 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:05:12.234257 17621 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:05:12.234263 17621 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:05:12.234268 17621 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:05:12.234274 17621 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:05:12.234279 17621 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:05:12.234285 17621 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:05:12.234290 17621 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:05:12.234295 17621 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:05:12.234307 17621 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:05:12.234311 17621 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:05:12.234318 17621 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:05:12.234323 17621 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:05:12.234328 17621 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:05:12.234334 17621 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:05:12.234339 17621 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:05:12.234345 17621 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:05:12.234350 17621 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:05:12.234355 17621 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:05:12.234360 17621 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:05:12.234366 17621 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:05:12.234371 17621 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:05:12.234377 17621 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:05:12.234382 17621 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:05:12.234388 17621 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:05:12.234393 17621 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:05:12.234400 17621 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:05:12.234405 17621 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:05:12.234411 17621 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:05:12.234416 17621 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:05:12.234421 17621 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:05:12.234426 17621 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:05:12.234432 17621 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:05:12.234437 17621 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:05:12.234444 17621 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:05:12.234450 17621 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:05:12.234455 17621 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:05:12.234462 17621 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:05:12.234467 17621 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:05:12.234472 17621 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:05:12.234477 17621 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:05:12.234483 17621 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:05:12.234489 17621 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:05:12.234494 17621 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:05:12.234500 17621 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:05:12.234505 17621 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:05:12.234511 17621 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:05:12.234518 17621 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:05:12.234522 17621 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:05:12.234527 17621 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:05:12.234534 17621 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:05:12.234539 17621 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:05:12.234544 17621 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:05:12.234550 17621 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:05:12.234555 17621 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:05:12.234561 17621 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:05:12.234572 17621 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:05:12.234578 17621 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:05:12.234585 17621 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:05:12.234591 17621 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:05:12.234596 17621 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:05:12.234601 17621 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:05:12.234607 17621 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:05:12.234613 17621 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:05:12.234618 17621 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:05:12.234624 17621 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:05:12.234629 17621 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:05:12.234635 17621 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:05:12.234642 17621 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:05:12.234647 17621 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:05:12.234652 17621 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:05:12.234658 17621 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:05:12.234663 17621 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:05:12.234669 17621 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:05:12.234675 17621 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:05:12.234680 17621 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:05:12.234693 17621 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:05:12.234699 17621 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:05:12.234704 17621 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:05:12.234710 17621 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:05:12.234716 17621 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:05:12.234722 17621 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:05:12.234727 17621 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:05:12.234733 17621 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:05:12.234740 17621 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:05:12.234745 17621 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:05:12.234750 17621 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:05:12.234757 17621 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:05:12.234762 17621 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:05:12.234768 17621 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:05:12.234774 17621 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:05:12.234779 17621 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:05:12.234786 17621 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:05:12.234791 17621 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:05:12.234797 17621 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:05:12.234803 17621 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:05:12.234808 17621 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:05:12.234815 17621 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:05:12.234820 17621 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:05:12.234827 17621 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:05:12.234833 17621 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:05:12.234838 17621 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:05:12.234843 17621 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:05:12.234854 17621 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:05:12.234860 17621 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:05:12.234866 17621 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:05:12.234872 17621 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:05:12.234879 17621 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:05:12.234884 17621 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:05:12.234892 17621 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:05:12.234899 17621 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:05:12.234905 17621 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:05:12.234910 17621 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:05:12.234915 17621 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:05:12.234921 17621 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:05:12.234927 17621 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:05:12.234933 17621 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:05:12.234941 17621 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:05:12.234946 17621 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:05:12.234951 17621 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:05:12.234957 17621 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:05:12.234962 17621 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:05:12.234968 17621 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:05:12.234974 17621 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:05:12.234979 17621 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:05:12.234985 17621 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:05:12.234992 17621 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:05:12.234997 17621 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:05:12.235003 17621 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:05:12.235008 17621 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:05:12.235014 17621 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:05:12.235020 17621 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:05:12.235025 17621 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:05:12.235031 17621 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:05:12.235036 17621 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:05:12.235043 17621 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:05:12.235047 17621 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:05:12.235054 17621 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:05:12.235059 17621 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:05:12.235065 17621 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:05:12.235070 17621 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:05:12.235076 17621 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:05:12.235081 17621 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:05:12.235087 17621 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:05:12.235093 17621 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:05:12.235100 17621 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:05:12.235105 17621 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:05:12.235111 17621 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:05:12.235116 17621 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:05:12.235122 17621 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:05:12.235133 17621 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:05:12.235139 17621 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:05:12.235144 17621 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:05:12.235150 17621 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:05:12.235157 17621 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:05:12.235162 17621 net.cpp:226] pre_relu needs backward computation.\nI0817 16:05:12.235167 17621 net.cpp:226] pre_scale needs backward computation.\nI0817 16:05:12.235172 17621 net.cpp:226] pre_bn needs backward computation.\nI0817 16:05:12.235177 17621 net.cpp:226] pre_conv needs backward computation.\nI0817 16:05:12.235184 17621 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:05:12.235191 17621 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:05:12.235196 17621 net.cpp:270] This network produces output accuracy\nI0817 16:05:12.235203 17621 net.cpp:270] This network produces output loss\nI0817 16:05:12.235563 17621 net.cpp:283] Network initialization done.\nI0817 16:05:12.245051 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:12.245090 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:12.245147 17621 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:05:12.245524 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:05:12.245543 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:05:12.245553 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:05:12.245563 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:05:12.245573 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:05:12.245581 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:05:12.245590 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:05:12.245599 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:05:12.245609 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:05:12.245616 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:05:12.245626 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:05:12.245635 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:05:12.245643 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:05:12.245651 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:05:12.245661 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:05:12.245669 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:05:12.245678 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:05:12.245695 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:05:12.245705 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:05:12.245724 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:05:12.245734 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:05:12.245743 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:05:12.245755 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:05:12.245764 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:05:12.245774 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:05:12.245781 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:05:12.245790 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:05:12.245798 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:05:12.245806 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:05:12.245815 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:05:12.245824 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:05:12.245832 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:05:12.245842 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:05:12.245849 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:05:12.245858 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:05:12.245867 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:05:12.245877 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:05:12.245884 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:05:12.245893 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:05:12.245903 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:05:12.245914 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:05:12.245923 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:05:12.245931 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:05:12.245939 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:05:12.245949 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:05:12.245957 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:05:12.245966 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:05:12.245975 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:05:12.245982 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:05:12.245991 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:05:12.246008 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:05:12.246017 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:05:12.246026 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:05:12.246034 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:05:12.246044 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:05:12.246052 17621 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:05:12.247712 17621 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0817 16:05:12.249326 17621 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:05:12.249565 17621 net.cpp:100] Creating Layer dataLayer\nI0817 16:05:12.249588 17621 net.cpp:408] dataLayer -> data_top\nI0817 16:05:12.249604 17621 net.cpp:408] dataLayer -> label\nI0817 16:05:12.249616 17621 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:05:12.259485 17629 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:05:12.259742 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:12.267010 17621 net.cpp:150] Setting up dataLayer\nI0817 16:05:12.267060 17621 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:05:12.267073 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:05:12.267079 17621 net.cpp:165] Memory required for data: 1536500\nI0817 16:05:12.267086 17621 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:05:12.267098 17621 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:05:12.267104 17621 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:05:12.267117 17621 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:05:12.267128 17621 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:05:12.267271 17621 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:05:12.267287 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:05:12.267294 17621 net.cpp:157] Top shape: 125 (125)\nI0817 16:05:12.267299 17621 net.cpp:165] Memory required for data: 1537500\nI0817 16:05:12.267304 17621 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:05:12.267334 17621 net.cpp:100] Creating Layer pre_conv\nI0817 16:05:12.267343 17621 net.cpp:434] pre_conv <- data_top\nI0817 16:05:12.267354 17621 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:05:12.267771 17621 net.cpp:150] Setting up pre_conv\nI0817 16:05:12.267798 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.267804 17621 net.cpp:165] Memory required for data: 9729500\nI0817 16:05:12.267819 17621 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:05:12.267833 17621 net.cpp:100] Creating Layer pre_bn\nI0817 16:05:12.267839 17621 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:05:12.267848 17621 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:05:12.268189 17621 net.cpp:150] Setting up pre_bn\nI0817 16:05:12.268204 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.268211 17621 net.cpp:165] Memory required for data: 17921500\nI0817 16:05:12.268229 17621 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:05:12.268239 17621 net.cpp:100] Creating Layer pre_scale\nI0817 16:05:12.268244 17621 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:05:12.268251 17621 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:05:12.268318 17621 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:05:12.268533 17621 net.cpp:150] Setting up pre_scale\nI0817 16:05:12.268548 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.268553 17621 net.cpp:165] Memory required for data: 26113500\nI0817 16:05:12.268563 17621 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:05:12.268571 17621 net.cpp:100] Creating Layer pre_relu\nI0817 16:05:12.268577 17621 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:05:12.268590 17621 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:05:12.268601 17621 net.cpp:150] Setting up pre_relu\nI0817 16:05:12.268609 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.268613 17621 net.cpp:165] Memory required for data: 34305500\nI0817 16:05:12.268618 17621 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:05:12.268627 17621 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:05:12.268633 17621 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:05:12.268653 17621 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:05:12.268666 17621 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:05:12.268725 17621 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:05:12.268736 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.268743 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.268748 17621 net.cpp:165] Memory required for data: 50689500\nI0817 16:05:12.268754 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:05:12.268767 17621 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:05:12.268774 17621 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:05:12.268786 17621 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:05:12.269210 17621 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:05:12.269227 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.269232 17621 net.cpp:165] Memory required for data: 58881500\nI0817 16:05:12.269249 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:05:12.269263 17621 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:05:12.269269 17621 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:05:12.269279 17621 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:05:12.269884 17621 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:05:12.269901 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.269908 17621 net.cpp:165] Memory required for data: 67073500\nI0817 16:05:12.269922 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:05:12.269929 17621 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:05:12.269935 17621 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:05:12.269946 17621 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.270015 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:05:12.270192 17621 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:05:12.270208 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.270215 17621 net.cpp:165] Memory required for data: 75265500\nI0817 16:05:12.270236 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:05:12.270244 17621 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:05:12.270249 17621 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:05:12.270261 17621 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.270270 17621 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:05:12.270277 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.270282 17621 net.cpp:165] Memory required for data: 83457500\nI0817 16:05:12.270287 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:05:12.270304 17621 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:05:12.270310 17621 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:05:12.270323 17621 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:05:12.270726 17621 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:05:12.270745 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.270750 17621 net.cpp:165] Memory required for data: 91649500\nI0817 16:05:12.270758 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:05:12.270774 17621 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:05:12.270781 17621 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:05:12.270794 17621 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:05:12.271098 17621 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:05:12.271111 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.271117 17621 net.cpp:165] Memory required for data: 99841500\nI0817 16:05:12.271131 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:05:12.271147 17621 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:05:12.271153 17621 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:05:12.271162 17621 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:05:12.271227 17621 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:05:12.271409 17621 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:05:12.271428 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.271435 17621 net.cpp:165] Memory required for data: 108033500\nI0817 16:05:12.271445 17621 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:05:12.271453 17621 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:05:12.271459 17621 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:05:12.271469 17621 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:05:12.271477 17621 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:05:12.271515 17621 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:05:12.271525 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.271534 17621 net.cpp:165] Memory required for data: 116225500\nI0817 16:05:12.271539 17621 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:05:12.271549 17621 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:05:12.271555 17621 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:05:12.271565 17621 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:05:12.271575 17621 net.cpp:150] Setting up L1_b1_relu\nI0817 16:05:12.271582 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.271587 17621 net.cpp:165] Memory required for data: 124417500\nI0817 16:05:12.271591 17621 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:05:12.271606 17621 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:05:12.271613 17621 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:05:12.271621 17621 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:05:12.271631 17621 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:05:12.271688 17621 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:05:12.271713 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.271720 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.271725 17621 net.cpp:165] Memory required for data: 140801500\nI0817 16:05:12.271733 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:05:12.271744 17621 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:05:12.271750 17621 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:05:12.271759 17621 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:05:12.272166 17621 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:05:12.272182 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.272191 17621 net.cpp:165] Memory required for data: 148993500\nI0817 16:05:12.272199 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:05:12.272212 17621 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:05:12.272217 17621 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:05:12.272230 17621 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:05:12.272544 17621 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:05:12.272558 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.272563 17621 net.cpp:165] Memory required for data: 157185500\nI0817 16:05:12.272578 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:05:12.272593 17621 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:05:12.272599 17621 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:05:12.272608 17621 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.272691 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:05:12.273071 17621 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:05:12.273085 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.273092 17621 net.cpp:165] Memory required for data: 165377500\nI0817 16:05:12.273104 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:05:12.273113 17621 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:05:12.273118 17621 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:05:12.273131 17621 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.273145 17621 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:05:12.273154 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.273159 17621 net.cpp:165] Memory required for data: 173569500\nI0817 16:05:12.273162 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:05:12.273188 17621 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:05:12.273196 17621 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:05:12.273206 17621 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:05:12.273617 17621 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:05:12.273633 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.273638 17621 net.cpp:165] Memory required for data: 181761500\nI0817 16:05:12.273648 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:05:12.273658 17621 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:05:12.273665 17621 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:05:12.273677 17621 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:05:12.274008 17621 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:05:12.274024 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.274031 17621 net.cpp:165] Memory required for data: 189953500\nI0817 16:05:12.274049 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:05:12.274060 17621 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:05:12.274070 17621 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:05:12.274078 17621 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:05:12.274149 17621 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:05:12.274338 17621 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:05:12.274353 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.274358 17621 net.cpp:165] Memory required for data: 198145500\nI0817 16:05:12.274374 17621 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:05:12.274387 17621 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:05:12.274394 17621 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:05:12.274400 17621 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:05:12.274411 17621 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:05:12.274459 17621 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:05:12.274471 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.274475 17621 net.cpp:165] Memory required for data: 206337500\nI0817 16:05:12.274480 17621 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:05:12.274488 17621 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:05:12.274497 17621 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:05:12.274507 17621 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:05:12.274518 17621 net.cpp:150] Setting up L1_b2_relu\nI0817 16:05:12.274524 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.274528 17621 net.cpp:165] Memory required for data: 214529500\nI0817 16:05:12.274533 17621 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:05:12.274540 17621 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:05:12.274545 17621 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:05:12.274554 17621 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:05:12.274565 17621 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:05:12.274621 17621 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:05:12.274636 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.274643 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.274647 17621 net.cpp:165] Memory required for data: 230913500\nI0817 16:05:12.274652 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:05:12.274664 17621 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:05:12.274670 17621 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:05:12.274691 17621 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:05:12.275161 17621 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:05:12.275177 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.275182 17621 net.cpp:165] Memory required for data: 239105500\nI0817 16:05:12.275192 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:05:12.275204 17621 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:05:12.275212 17621 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:05:12.275224 17621 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:05:12.275537 17621 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:05:12.275552 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.275557 17621 net.cpp:165] Memory required for data: 247297500\nI0817 16:05:12.275568 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:05:12.275579 17621 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:05:12.275588 17621 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:05:12.275598 17621 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.275663 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:05:12.275890 17621 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:05:12.275905 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.275910 17621 net.cpp:165] Memory required for data: 255489500\nI0817 16:05:12.275919 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:05:12.275930 17621 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:05:12.275938 17621 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:05:12.275949 17621 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.275969 17621 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:05:12.275976 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.275981 17621 net.cpp:165] Memory required for data: 263681500\nI0817 16:05:12.275986 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:05:12.276006 17621 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:05:12.276013 17621 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:05:12.276021 17621 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:05:12.276582 17621 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:05:12.276597 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.276602 17621 net.cpp:165] Memory required for data: 271873500\nI0817 16:05:12.276614 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:05:12.276631 17621 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:05:12.276639 17621 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:05:12.276654 17621 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:05:12.277000 17621 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:05:12.277016 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.277024 17621 net.cpp:165] Memory required for data: 280065500\nI0817 16:05:12.277036 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:05:12.277048 17621 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:05:12.277055 17621 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:05:12.277065 17621 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:05:12.277127 17621 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:05:12.277312 17621 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:05:12.277328 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.277333 17621 net.cpp:165] Memory required for data: 288257500\nI0817 16:05:12.277343 17621 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:05:12.277354 17621 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:05:12.277360 17621 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:05:12.277367 17621 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:05:12.277379 17621 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:05:12.277420 17621 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:05:12.277431 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.277436 17621 net.cpp:165] Memory required for data: 296449500\nI0817 16:05:12.277441 17621 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:05:12.277449 17621 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:05:12.277454 17621 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:05:12.277464 17621 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:05:12.277474 17621 net.cpp:150] Setting up L1_b3_relu\nI0817 16:05:12.277482 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.277487 17621 net.cpp:165] Memory required for data: 304641500\nI0817 16:05:12.277492 17621 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:05:12.277498 17621 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:05:12.277503 17621 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:05:12.277510 17621 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:05:12.277519 17621 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:05:12.277592 17621 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:05:12.277606 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.277612 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.277617 17621 net.cpp:165] Memory required for data: 321025500\nI0817 16:05:12.277622 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:05:12.277633 17621 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:05:12.277647 17621 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:05:12.277659 17621 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:05:12.278024 17621 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:05:12.278039 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.278045 17621 net.cpp:165] Memory required for data: 329217500\nI0817 16:05:12.278055 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:05:12.278064 17621 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:05:12.278070 17621 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:05:12.278079 17621 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:05:12.278353 17621 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:05:12.278367 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.278373 17621 net.cpp:165] Memory required for data: 337409500\nI0817 16:05:12.278383 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:05:12.278394 17621 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:05:12.278401 17621 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:05:12.278409 17621 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.278473 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:05:12.278658 17621 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:05:12.278674 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.278679 17621 net.cpp:165] Memory required for data: 345601500\nI0817 16:05:12.278694 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:05:12.278703 17621 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:05:12.278709 17621 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:05:12.278719 17621 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.278730 17621 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:05:12.278738 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.278743 17621 net.cpp:165] Memory required for data: 353793500\nI0817 16:05:12.278746 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:05:12.278761 17621 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:05:12.278766 17621 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:05:12.278775 17621 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:05:12.279145 17621 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:05:12.279160 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.279165 17621 net.cpp:165] Memory required for data: 361985500\nI0817 16:05:12.279175 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:05:12.279187 17621 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:05:12.279193 17621 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:05:12.279201 17621 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:05:12.279474 17621 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:05:12.279487 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.279492 17621 net.cpp:165] Memory required for data: 370177500\nI0817 16:05:12.279503 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:05:12.279515 17621 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:05:12.279520 17621 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:05:12.279528 17621 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:05:12.279588 17621 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:05:12.279752 17621 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:05:12.279765 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.279770 17621 net.cpp:165] Memory required for data: 378369500\nI0817 16:05:12.279779 17621 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:05:12.279791 17621 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:05:12.279798 17621 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:05:12.279804 17621 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:05:12.279819 17621 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:05:12.279857 17621 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:05:12.279867 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.279871 17621 net.cpp:165] Memory required for data: 386561500\nI0817 16:05:12.279877 17621 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:05:12.279884 17621 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:05:12.279891 17621 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:05:12.279899 17621 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:05:12.279909 17621 net.cpp:150] Setting up L1_b4_relu\nI0817 16:05:12.279917 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.279922 17621 net.cpp:165] Memory required for data: 394753500\nI0817 16:05:12.279925 17621 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:05:12.279932 17621 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:05:12.279937 17621 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:05:12.279945 17621 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:05:12.279954 17621 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:05:12.280006 17621 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:05:12.280019 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.280025 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.280030 17621 net.cpp:165] Memory required for data: 411137500\nI0817 16:05:12.280035 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:05:12.280046 17621 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:05:12.280052 17621 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:05:12.280066 17621 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:05:12.280416 17621 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:05:12.280431 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.280436 17621 net.cpp:165] Memory required for data: 419329500\nI0817 16:05:12.280480 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:05:12.280493 17621 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:05:12.280499 17621 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:05:12.280510 17621 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:05:12.280791 17621 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:05:12.280823 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.280830 17621 net.cpp:165] Memory required for data: 427521500\nI0817 16:05:12.280841 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:05:12.280853 17621 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:05:12.280859 17621 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:05:12.280867 17621 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.280928 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:05:12.281086 17621 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:05:12.281100 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.281105 17621 net.cpp:165] Memory required for data: 435713500\nI0817 16:05:12.281113 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:05:12.281121 17621 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:05:12.281127 17621 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:05:12.281137 17621 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.281147 17621 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:05:12.281154 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.281159 17621 net.cpp:165] Memory required for data: 443905500\nI0817 16:05:12.281163 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:05:12.281184 17621 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:05:12.281190 17621 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:05:12.281199 17621 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:05:12.281571 17621 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:05:12.281587 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.281592 17621 net.cpp:165] Memory required for data: 452097500\nI0817 16:05:12.281601 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:05:12.281613 17621 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:05:12.281620 17621 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:05:12.281627 17621 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:05:12.281919 17621 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:05:12.281932 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.281937 17621 net.cpp:165] Memory required for data: 460289500\nI0817 16:05:12.281947 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:05:12.281956 17621 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:05:12.281962 17621 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:05:12.281972 17621 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:05:12.282032 17621 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:05:12.282213 17621 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:05:12.282228 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.282233 17621 net.cpp:165] Memory required for data: 468481500\nI0817 16:05:12.282243 17621 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:05:12.282251 17621 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:05:12.282258 17621 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:05:12.282265 17621 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:05:12.282275 17621 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:05:12.282312 17621 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:05:12.282327 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.282332 17621 net.cpp:165] Memory required for data: 476673500\nI0817 16:05:12.282337 17621 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:05:12.282346 17621 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:05:12.282351 17621 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:05:12.282357 17621 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:05:12.282366 17621 net.cpp:150] Setting up L1_b5_relu\nI0817 16:05:12.282373 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.282378 17621 net.cpp:165] Memory required for data: 484865500\nI0817 16:05:12.282383 17621 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:05:12.282394 17621 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:05:12.282400 17621 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:05:12.282407 17621 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:05:12.282418 17621 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:05:12.282474 17621 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:05:12.282486 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.282493 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.282497 17621 net.cpp:165] Memory required for data: 501249500\nI0817 16:05:12.282502 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:05:12.282513 17621 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:05:12.282519 17621 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:05:12.282531 17621 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:05:12.282896 17621 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:05:12.282910 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.282922 17621 net.cpp:165] Memory required for data: 509441500\nI0817 16:05:12.282932 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:05:12.282941 17621 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:05:12.282948 17621 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:05:12.282955 17621 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:05:12.283231 17621 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:05:12.283246 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.283251 17621 net.cpp:165] Memory required for data: 517633500\nI0817 16:05:12.283260 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:05:12.283273 17621 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:05:12.283279 17621 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:05:12.283288 17621 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.283347 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:05:12.283511 17621 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:05:12.283524 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.283529 17621 net.cpp:165] Memory required for data: 525825500\nI0817 16:05:12.283538 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:05:12.283545 17621 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:05:12.283551 17621 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:05:12.283561 17621 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.283571 17621 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:05:12.283578 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.283583 17621 net.cpp:165] Memory required for data: 534017500\nI0817 16:05:12.283588 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:05:12.283601 17621 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:05:12.283607 17621 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:05:12.283615 17621 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:05:12.283977 17621 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:05:12.283993 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.283998 17621 net.cpp:165] Memory required for data: 542209500\nI0817 16:05:12.284006 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:05:12.284019 17621 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:05:12.284026 17621 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:05:12.284034 17621 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:05:12.284333 17621 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:05:12.284348 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.284353 17621 net.cpp:165] Memory required for data: 550401500\nI0817 16:05:12.284364 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:05:12.284373 17621 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:05:12.284379 17621 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:05:12.284389 17621 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:05:12.284448 17621 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:05:12.284608 17621 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:05:12.284621 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.284626 17621 net.cpp:165] Memory required for data: 558593500\nI0817 16:05:12.284636 17621 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:05:12.284653 17621 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:05:12.284660 17621 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:05:12.284667 17621 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:05:12.284677 17621 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:05:12.284719 17621 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:05:12.284730 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.284734 17621 net.cpp:165] Memory required for data: 566785500\nI0817 16:05:12.284747 17621 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:05:12.284755 17621 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:05:12.284761 17621 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:05:12.284771 17621 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:05:12.284781 17621 net.cpp:150] Setting up L1_b6_relu\nI0817 16:05:12.284788 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.284793 17621 net.cpp:165] Memory required for data: 574977500\nI0817 16:05:12.284798 17621 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:05:12.284804 17621 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:05:12.284811 17621 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:05:12.284817 17621 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:05:12.284826 17621 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:05:12.284878 17621 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:05:12.284889 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.284896 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.284901 17621 net.cpp:165] Memory required for data: 591361500\nI0817 16:05:12.284906 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:05:12.284919 17621 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:05:12.284926 17621 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:05:12.284935 17621 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:05:12.285298 17621 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:05:12.285312 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.285317 17621 net.cpp:165] Memory required for data: 599553500\nI0817 16:05:12.285326 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:05:12.285336 17621 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:05:12.285341 17621 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:05:12.285352 17621 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:05:12.285630 17621 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:05:12.285647 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.285652 17621 net.cpp:165] Memory required for data: 607745500\nI0817 16:05:12.285663 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:05:12.285671 17621 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:05:12.285677 17621 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:05:12.285691 17621 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.285749 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:05:12.285912 17621 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:05:12.285925 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.285930 17621 net.cpp:165] Memory required for data: 615937500\nI0817 16:05:12.285939 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:05:12.285950 17621 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:05:12.285956 17621 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:05:12.285964 17621 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.285974 17621 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:05:12.285984 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.285989 17621 net.cpp:165] Memory required for data: 624129500\nI0817 16:05:12.285993 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:05:12.286003 17621 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:05:12.286010 17621 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:05:12.286020 17621 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:05:12.286383 17621 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:05:12.286397 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.286409 17621 net.cpp:165] Memory required for data: 632321500\nI0817 16:05:12.286418 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:05:12.286427 17621 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:05:12.286433 17621 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:05:12.286444 17621 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:05:12.286741 17621 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:05:12.286756 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.286761 17621 net.cpp:165] Memory required for data: 640513500\nI0817 16:05:12.286772 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:05:12.286783 17621 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:05:12.286789 17621 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:05:12.286798 17621 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:05:12.286855 17621 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:05:12.287016 17621 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:05:12.287030 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.287035 17621 net.cpp:165] Memory required for data: 648705500\nI0817 16:05:12.287045 17621 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:05:12.287055 17621 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:05:12.287062 17621 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:05:12.287068 17621 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:05:12.287076 17621 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:05:12.287113 17621 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:05:12.287124 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.287129 17621 net.cpp:165] Memory required for data: 656897500\nI0817 16:05:12.287134 17621 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:05:12.287142 17621 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:05:12.287147 17621 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:05:12.287158 17621 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:05:12.287168 17621 net.cpp:150] Setting up L1_b7_relu\nI0817 16:05:12.287174 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.287178 17621 net.cpp:165] Memory required for data: 665089500\nI0817 16:05:12.287184 17621 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:05:12.287189 17621 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:05:12.287195 17621 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:05:12.287202 17621 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:05:12.287211 17621 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:05:12.287261 17621 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:05:12.287273 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.287281 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.287284 17621 net.cpp:165] Memory required for data: 681473500\nI0817 16:05:12.287289 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:05:12.287300 17621 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:05:12.287307 17621 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:05:12.287317 17621 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:05:12.287673 17621 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:05:12.287693 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.287698 17621 net.cpp:165] Memory required for data: 689665500\nI0817 16:05:12.287708 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:05:12.287715 17621 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:05:12.287721 17621 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:05:12.287739 17621 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:05:12.288018 17621 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:05:12.288031 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.288036 17621 net.cpp:165] Memory required for data: 697857500\nI0817 16:05:12.288048 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:05:12.288058 17621 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:05:12.288064 17621 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:05:12.288072 17621 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.288130 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:05:12.288295 17621 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:05:12.288308 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.288313 17621 net.cpp:165] Memory required for data: 706049500\nI0817 16:05:12.288322 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:05:12.288333 17621 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:05:12.288339 17621 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:05:12.288347 17621 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.288357 17621 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:05:12.288363 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.288367 17621 net.cpp:165] Memory required for data: 714241500\nI0817 16:05:12.288372 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:05:12.288385 17621 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:05:12.288391 17621 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:05:12.288404 17621 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:05:12.288765 17621 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:05:12.288780 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.288785 17621 net.cpp:165] Memory required for data: 722433500\nI0817 16:05:12.288794 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:05:12.288803 17621 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:05:12.288810 17621 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:05:12.288820 17621 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:05:12.289098 17621 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:05:12.289111 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.289116 17621 net.cpp:165] Memory required for data: 730625500\nI0817 16:05:12.289126 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:05:12.289137 17621 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:05:12.289144 17621 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:05:12.289151 17621 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:05:12.289209 17621 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:05:12.289369 17621 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:05:12.289382 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.289387 17621 net.cpp:165] Memory required for data: 738817500\nI0817 16:05:12.289397 17621 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:05:12.289404 17621 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:05:12.289412 17621 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:05:12.289420 17621 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:05:12.289428 17621 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:05:12.289465 17621 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:05:12.289477 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.289481 17621 net.cpp:165] Memory required for data: 747009500\nI0817 16:05:12.289486 17621 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:05:12.289494 17621 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:05:12.289500 17621 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:05:12.289510 17621 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:05:12.289526 17621 net.cpp:150] Setting up L1_b8_relu\nI0817 16:05:12.289535 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.289538 17621 net.cpp:165] Memory required for data: 755201500\nI0817 16:05:12.289543 17621 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:05:12.289551 17621 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:05:12.289556 17621 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:05:12.289562 17621 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:05:12.289572 17621 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:05:12.289623 17621 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:05:12.289634 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.289640 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.289645 17621 net.cpp:165] Memory required for data: 771585500\nI0817 16:05:12.289650 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:05:12.289661 17621 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:05:12.289667 17621 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:05:12.289679 17621 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:05:12.290055 17621 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:05:12.290069 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.290074 17621 net.cpp:165] Memory required for data: 779777500\nI0817 16:05:12.290083 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:05:12.290097 17621 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:05:12.290104 17621 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:05:12.290117 17621 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:05:12.290391 17621 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:05:12.290405 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.290410 17621 net.cpp:165] Memory required for data: 787969500\nI0817 16:05:12.290421 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:05:12.290429 17621 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:05:12.290436 17621 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:05:12.290443 17621 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.290504 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:05:12.290668 17621 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:05:12.290681 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.290693 17621 net.cpp:165] Memory required for data: 796161500\nI0817 16:05:12.290702 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:05:12.290714 17621 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:05:12.290720 17621 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:05:12.290729 17621 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.290737 17621 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:05:12.290745 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.290750 17621 net.cpp:165] Memory required for data: 804353500\nI0817 16:05:12.290753 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:05:12.290768 17621 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:05:12.290774 17621 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:05:12.290786 17621 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:05:12.291143 17621 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:05:12.291157 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.291162 17621 net.cpp:165] Memory required for data: 812545500\nI0817 16:05:12.291172 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:05:12.291183 17621 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:05:12.291189 17621 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:05:12.291204 17621 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:05:12.291489 17621 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:05:12.291503 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.291508 17621 net.cpp:165] Memory required for data: 820737500\nI0817 16:05:12.291540 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:05:12.291549 17621 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:05:12.291555 17621 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:05:12.291565 17621 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:05:12.291623 17621 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:05:12.291795 17621 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:05:12.291808 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.291815 17621 net.cpp:165] Memory required for data: 828929500\nI0817 16:05:12.291822 17621 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:05:12.291834 17621 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:05:12.291841 17621 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:05:12.291848 17621 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:05:12.291857 17621 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:05:12.291890 17621 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:05:12.291903 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.291908 17621 net.cpp:165] Memory required for data: 837121500\nI0817 16:05:12.291913 17621 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:05:12.291919 17621 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:05:12.291925 17621 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:05:12.291935 17621 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:05:12.291945 17621 net.cpp:150] Setting up L1_b9_relu\nI0817 16:05:12.291952 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.291957 17621 net.cpp:165] Memory required for data: 845313500\nI0817 16:05:12.291961 17621 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:05:12.291971 17621 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:05:12.291976 17621 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:05:12.291983 17621 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:05:12.291993 17621 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:05:12.292045 17621 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:05:12.292057 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.292063 17621 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:05:12.292068 17621 net.cpp:165] Memory required for data: 861697500\nI0817 16:05:12.292073 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:05:12.292086 17621 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:05:12.292093 17621 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:05:12.292102 17621 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:05:12.292462 17621 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:05:12.292476 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.292482 17621 net.cpp:165] Memory required for data: 863745500\nI0817 16:05:12.292490 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:05:12.292502 17621 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:05:12.292508 17621 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:05:12.292516 17621 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:05:12.292795 17621 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:05:12.292812 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.292817 17621 net.cpp:165] Memory required for data: 865793500\nI0817 16:05:12.292827 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:05:12.292842 17621 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:05:12.292850 17621 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:05:12.292857 17621 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.292917 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:05:12.293081 17621 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:05:12.293095 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.293100 17621 net.cpp:165] Memory required for data: 867841500\nI0817 16:05:12.293109 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:05:12.293117 17621 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:05:12.293123 17621 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:05:12.293133 17621 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.293143 17621 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:05:12.293149 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.293154 17621 net.cpp:165] Memory required for data: 869889500\nI0817 16:05:12.293159 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:05:12.293169 17621 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:05:12.293174 17621 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:05:12.293185 17621 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:05:12.293539 17621 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:05:12.293552 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.293557 17621 net.cpp:165] Memory required for data: 871937500\nI0817 16:05:12.293566 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:05:12.293576 17621 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:05:12.293581 17621 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:05:12.293594 17621 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:05:12.293869 17621 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:05:12.293882 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.293887 17621 net.cpp:165] Memory required for data: 873985500\nI0817 16:05:12.293898 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:05:12.293910 17621 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:05:12.293915 17621 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:05:12.293923 17621 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:05:12.293983 17621 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:05:12.294142 17621 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:05:12.294155 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.294160 17621 net.cpp:165] Memory required for data: 876033500\nI0817 16:05:12.294169 17621 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:05:12.294183 17621 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:05:12.294188 17621 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:05:12.294199 17621 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:05:12.294229 17621 net.cpp:150] Setting up L2_b1_pool\nI0817 16:05:12.294239 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.294242 17621 net.cpp:165] Memory required for data: 878081500\nI0817 16:05:12.294247 17621 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:05:12.294256 17621 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:05:12.294261 17621 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:05:12.294268 17621 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:05:12.294278 17621 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:05:12.294312 17621 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:05:12.294322 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.294327 17621 net.cpp:165] Memory required for data: 880129500\nI0817 16:05:12.294332 17621 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:05:12.294338 17621 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:05:12.294351 17621 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:05:12.294363 17621 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:05:12.294373 17621 net.cpp:150] Setting up L2_b1_relu\nI0817 16:05:12.294380 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.294385 17621 net.cpp:165] Memory required for data: 882177500\nI0817 16:05:12.294389 17621 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:05:12.294399 17621 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:05:12.294406 17621 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:05:12.296648 17621 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:05:12.296666 17621 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:05:12.296672 17621 net.cpp:165] Memory required for data: 884225500\nI0817 16:05:12.296677 17621 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:05:12.296694 17621 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:05:12.296700 17621 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:05:12.296707 17621 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:05:12.296718 17621 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:05:12.296766 17621 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:05:12.296778 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.296783 17621 net.cpp:165] Memory required for data: 888321500\nI0817 16:05:12.296788 17621 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:05:12.296797 17621 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:05:12.296802 17621 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:05:12.296813 17621 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:05:12.296823 17621 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:05:12.296875 17621 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:05:12.296890 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.296896 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.296901 17621 net.cpp:165] Memory required for data: 896513500\nI0817 16:05:12.296906 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:05:12.296917 17621 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:05:12.296923 17621 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:05:12.296932 17621 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:05:12.297438 17621 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:05:12.297453 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.297458 17621 net.cpp:165] Memory required for data: 900609500\nI0817 16:05:12.297467 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:05:12.297479 17621 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:05:12.297487 17621 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:05:12.297494 17621 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:05:12.297775 17621 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:05:12.297790 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.297794 17621 net.cpp:165] Memory required for data: 904705500\nI0817 16:05:12.297806 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:05:12.297814 17621 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:05:12.297821 17621 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:05:12.297832 17621 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.297893 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:05:12.298055 17621 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:05:12.298069 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.298074 17621 net.cpp:165] Memory required for data: 908801500\nI0817 16:05:12.298082 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:05:12.298090 17621 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:05:12.298105 17621 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:05:12.298115 17621 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.298125 17621 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:05:12.298132 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.298137 17621 net.cpp:165] Memory required for data: 912897500\nI0817 16:05:12.298141 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:05:12.298156 17621 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:05:12.298161 17621 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:05:12.298171 17621 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:05:12.298674 17621 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:05:12.298694 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.298701 17621 net.cpp:165] Memory required for data: 916993500\nI0817 16:05:12.298709 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:05:12.298722 17621 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:05:12.298728 17621 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:05:12.298737 17621 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:05:12.299000 17621 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:05:12.299013 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.299018 17621 net.cpp:165] Memory required for data: 921089500\nI0817 16:05:12.299028 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:05:12.299037 17621 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:05:12.299043 17621 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:05:12.299051 17621 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:05:12.299113 17621 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:05:12.299269 17621 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:05:12.299285 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.299290 17621 net.cpp:165] Memory required for data: 925185500\nI0817 16:05:12.299299 17621 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:05:12.299307 17621 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:05:12.299314 17621 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:05:12.299321 17621 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:05:12.299329 17621 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:05:12.299360 17621 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:05:12.299370 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.299374 17621 net.cpp:165] Memory required for data: 929281500\nI0817 16:05:12.299379 17621 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:05:12.299387 17621 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:05:12.299393 17621 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:05:12.299403 17621 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:05:12.299412 17621 net.cpp:150] Setting up L2_b2_relu\nI0817 16:05:12.299419 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.299424 17621 net.cpp:165] Memory required for data: 933377500\nI0817 16:05:12.299429 17621 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:05:12.299435 17621 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:05:12.299441 17621 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:05:12.299450 17621 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:05:12.299460 17621 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:05:12.299507 17621 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:05:12.299520 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.299526 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.299530 17621 net.cpp:165] Memory required for data: 941569500\nI0817 16:05:12.299542 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:05:12.299556 17621 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:05:12.299563 17621 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:05:12.299572 17621 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:05:12.300082 17621 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:05:12.300097 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.300102 17621 net.cpp:165] Memory required for data: 945665500\nI0817 16:05:12.300112 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:05:12.300124 17621 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:05:12.300130 17621 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:05:12.300138 17621 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:05:12.300405 17621 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:05:12.300420 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.300424 17621 net.cpp:165] Memory required for data: 949761500\nI0817 16:05:12.300434 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:05:12.300443 17621 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:05:12.300449 17621 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:05:12.300459 17621 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.300518 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:05:12.300676 17621 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:05:12.300699 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.300709 17621 net.cpp:165] Memory required for data: 953857500\nI0817 16:05:12.300724 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:05:12.300734 17621 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:05:12.300740 17621 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:05:12.300746 17621 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.300757 17621 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:05:12.300765 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.300768 17621 net.cpp:165] Memory required for data: 957953500\nI0817 16:05:12.300773 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:05:12.300787 17621 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:05:12.300793 17621 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:05:12.300804 17621 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:05:12.301301 17621 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:05:12.301314 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.301319 17621 net.cpp:165] Memory required for data: 962049500\nI0817 16:05:12.301328 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:05:12.301342 17621 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:05:12.301347 17621 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:05:12.301358 17621 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:05:12.301627 17621 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:05:12.301641 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.301646 17621 net.cpp:165] Memory required for data: 966145500\nI0817 16:05:12.301656 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:05:12.301666 17621 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:05:12.301671 17621 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:05:12.301678 17621 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:05:12.301748 17621 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:05:12.301911 17621 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:05:12.301928 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.301933 17621 net.cpp:165] Memory required for data: 970241500\nI0817 16:05:12.301941 17621 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:05:12.301950 17621 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:05:12.301964 17621 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:05:12.301971 17621 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:05:12.301980 17621 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:05:12.302011 17621 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:05:12.302023 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.302028 17621 net.cpp:165] Memory required for data: 974337500\nI0817 16:05:12.302033 17621 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:05:12.302058 17621 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:05:12.302064 17621 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:05:12.302073 17621 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:05:12.302081 17621 net.cpp:150] Setting up L2_b3_relu\nI0817 16:05:12.302089 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.302093 17621 net.cpp:165] Memory required for data: 978433500\nI0817 16:05:12.302098 17621 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:05:12.302106 17621 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:05:12.302111 17621 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:05:12.302119 17621 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:05:12.302129 17621 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:05:12.302182 17621 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:05:12.302194 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.302201 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.302206 17621 net.cpp:165] Memory required for data: 986625500\nI0817 16:05:12.302211 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:05:12.302224 17621 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:05:12.302232 17621 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:05:12.302243 17621 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:05:12.302747 17621 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:05:12.302762 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.302767 17621 net.cpp:165] Memory required for data: 990721500\nI0817 16:05:12.302775 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:05:12.302788 17621 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:05:12.302794 17621 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:05:12.302806 17621 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:05:12.303078 17621 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:05:12.303092 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.303097 17621 net.cpp:165] Memory required for data: 994817500\nI0817 16:05:12.303107 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:05:12.303115 17621 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:05:12.303122 17621 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:05:12.303129 17621 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.303190 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:05:12.303349 17621 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:05:12.303362 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.303367 17621 net.cpp:165] Memory required for data: 998913500\nI0817 16:05:12.303376 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:05:12.303385 17621 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:05:12.303390 17621 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:05:12.303401 17621 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.303411 17621 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:05:12.303418 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.303423 17621 net.cpp:165] Memory required for data: 1003009500\nI0817 16:05:12.303436 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:05:12.303450 17621 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:05:12.303457 17621 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:05:12.303465 17621 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:05:12.303970 17621 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:05:12.303985 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.303990 17621 net.cpp:165] Memory required for data: 1007105500\nI0817 16:05:12.303999 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:05:12.304013 17621 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:05:12.304020 17621 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:05:12.304028 17621 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:05:12.304301 17621 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:05:12.304317 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.304323 17621 net.cpp:165] Memory required for data: 1011201500\nI0817 16:05:12.304333 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:05:12.304342 17621 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:05:12.304348 17621 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:05:12.304355 17621 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:05:12.304414 17621 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:05:12.304579 17621 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:05:12.304592 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.304597 17621 net.cpp:165] Memory required for data: 1015297500\nI0817 16:05:12.304606 17621 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:05:12.304615 17621 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:05:12.304621 17621 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:05:12.304628 17621 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:05:12.304638 17621 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:05:12.304667 17621 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:05:12.304677 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.304682 17621 net.cpp:165] Memory required for data: 1019393500\nI0817 16:05:12.304692 17621 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:05:12.304700 17621 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:05:12.304707 17621 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:05:12.304716 17621 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:05:12.304726 17621 net.cpp:150] Setting up L2_b4_relu\nI0817 16:05:12.304733 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.304738 17621 net.cpp:165] Memory required for data: 1023489500\nI0817 16:05:12.304744 17621 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:05:12.304749 17621 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:05:12.304755 17621 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:05:12.304762 17621 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:05:12.304771 17621 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:05:12.304823 17621 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:05:12.304836 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.304842 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.304847 17621 net.cpp:165] Memory required for data: 1031681500\nI0817 16:05:12.304852 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:05:12.304865 17621 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:05:12.304872 17621 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:05:12.304882 17621 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:05:12.305384 17621 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:05:12.305399 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.305404 17621 net.cpp:165] Memory required for data: 1035777500\nI0817 16:05:12.305413 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:05:12.305425 17621 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:05:12.305431 17621 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:05:12.305439 17621 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:05:12.305717 17621 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:05:12.305737 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.305742 17621 net.cpp:165] Memory required for data: 1039873500\nI0817 16:05:12.305752 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:05:12.305761 17621 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:05:12.305768 17621 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:05:12.305775 17621 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.305835 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:05:12.306004 17621 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:05:12.306016 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.306021 17621 net.cpp:165] Memory required for data: 1043969500\nI0817 16:05:12.306030 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:05:12.306038 17621 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:05:12.306044 17621 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:05:12.306054 17621 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.306064 17621 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:05:12.306071 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.306077 17621 net.cpp:165] Memory required for data: 1048065500\nI0817 16:05:12.306082 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:05:12.306094 17621 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:05:12.306100 17621 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:05:12.306108 17621 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:05:12.306604 17621 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:05:12.306619 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.306623 17621 net.cpp:165] Memory required for data: 1052161500\nI0817 16:05:12.306632 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:05:12.306645 17621 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:05:12.306651 17621 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:05:12.306659 17621 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:05:12.306938 17621 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:05:12.306952 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.306957 17621 net.cpp:165] Memory required for data: 1056257500\nI0817 16:05:12.306967 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:05:12.306978 17621 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:05:12.306985 17621 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:05:12.306993 17621 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:05:12.307052 17621 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:05:12.307212 17621 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:05:12.307225 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.307230 17621 net.cpp:165] Memory required for data: 1060353500\nI0817 16:05:12.307240 17621 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:05:12.307250 17621 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:05:12.307257 17621 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:05:12.307265 17621 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:05:12.307274 17621 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:05:12.307302 17621 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:05:12.307319 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.307324 17621 net.cpp:165] Memory required for data: 1064449500\nI0817 16:05:12.307329 17621 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:05:12.307337 17621 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:05:12.307343 17621 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:05:12.307355 17621 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:05:12.307365 17621 net.cpp:150] Setting up L2_b5_relu\nI0817 16:05:12.307373 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.307377 17621 net.cpp:165] Memory required for data: 1068545500\nI0817 16:05:12.307382 17621 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:05:12.307389 17621 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:05:12.307394 17621 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:05:12.307401 17621 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:05:12.307411 17621 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:05:12.307464 17621 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:05:12.307476 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.307483 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.307487 17621 net.cpp:165] Memory required for data: 1076737500\nI0817 16:05:12.307492 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:05:12.307503 17621 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:05:12.307509 17621 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:05:12.307521 17621 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:05:12.308028 17621 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:05:12.308043 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.308048 17621 net.cpp:165] Memory required for data: 1080833500\nI0817 16:05:12.308058 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:05:12.308069 17621 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:05:12.308075 17621 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:05:12.308084 17621 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:05:12.308353 17621 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:05:12.308369 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.308374 17621 net.cpp:165] Memory required for data: 1084929500\nI0817 16:05:12.308385 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:05:12.308394 17621 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:05:12.308400 17621 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:05:12.308408 17621 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.308465 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:05:12.308629 17621 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:05:12.308642 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.308647 17621 net.cpp:165] Memory required for data: 1089025500\nI0817 16:05:12.308656 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:05:12.308663 17621 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:05:12.308670 17621 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:05:12.308681 17621 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.308696 17621 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:05:12.308703 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.308708 17621 net.cpp:165] Memory required for data: 1093121500\nI0817 16:05:12.308713 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:05:12.308723 17621 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:05:12.308729 17621 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:05:12.308740 17621 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:05:12.309234 17621 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:05:12.309249 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.309254 17621 net.cpp:165] Memory required for data: 1097217500\nI0817 16:05:12.309263 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:05:12.309273 17621 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:05:12.309279 17621 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:05:12.309290 17621 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:05:12.309561 17621 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:05:12.309574 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.309579 17621 net.cpp:165] Memory required for data: 1101313500\nI0817 16:05:12.309589 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:05:12.309602 17621 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:05:12.309607 17621 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:05:12.309615 17621 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:05:12.309674 17621 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:05:12.309839 17621 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:05:12.309854 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.309859 17621 net.cpp:165] Memory required for data: 1105409500\nI0817 16:05:12.309867 17621 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:05:12.309878 17621 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:05:12.309885 17621 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:05:12.309892 17621 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:05:12.309902 17621 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:05:12.309931 17621 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:05:12.309940 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.309945 17621 net.cpp:165] Memory required for data: 1109505500\nI0817 16:05:12.309950 17621 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:05:12.309957 17621 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:05:12.309963 17621 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:05:12.309973 17621 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:05:12.309983 17621 net.cpp:150] Setting up L2_b6_relu\nI0817 16:05:12.309990 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.309994 17621 net.cpp:165] Memory required for data: 1113601500\nI0817 16:05:12.309999 17621 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:05:12.310006 17621 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:05:12.310011 17621 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:05:12.310019 17621 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:05:12.310027 17621 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:05:12.310079 17621 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:05:12.310092 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.310098 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.310103 17621 net.cpp:165] Memory required for data: 1121793500\nI0817 16:05:12.310108 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:05:12.310118 17621 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:05:12.310125 17621 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:05:12.310137 17621 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:05:12.311744 17621 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:05:12.311764 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.311769 17621 net.cpp:165] Memory required for data: 1125889500\nI0817 16:05:12.311779 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:05:12.311799 17621 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:05:12.311806 17621 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:05:12.311815 17621 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:05:12.312149 17621 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:05:12.312165 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.312170 17621 net.cpp:165] Memory required for data: 1129985500\nI0817 16:05:12.312181 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:05:12.312191 17621 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:05:12.312196 17621 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:05:12.312207 17621 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.312268 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:05:12.312429 17621 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:05:12.312443 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.312448 17621 net.cpp:165] Memory required for data: 1134081500\nI0817 16:05:12.312458 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:05:12.312465 17621 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:05:12.312472 17621 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:05:12.312482 17621 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.312492 17621 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:05:12.312500 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.312505 17621 net.cpp:165] Memory required for data: 1138177500\nI0817 16:05:12.312510 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:05:12.312525 17621 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:05:12.312530 17621 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:05:12.312538 17621 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:05:12.313040 17621 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:05:12.313055 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.313060 17621 net.cpp:165] Memory required for data: 1142273500\nI0817 16:05:12.313068 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:05:12.313081 17621 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:05:12.313088 17621 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:05:12.313097 17621 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:05:12.313364 17621 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:05:12.313376 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.313381 17621 net.cpp:165] Memory required for data: 1146369500\nI0817 16:05:12.313391 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:05:12.313400 17621 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:05:12.313406 17621 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:05:12.313415 17621 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:05:12.313474 17621 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:05:12.313635 17621 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:05:12.313652 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.313657 17621 net.cpp:165] Memory required for data: 1150465500\nI0817 16:05:12.313665 17621 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:05:12.313674 17621 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:05:12.313681 17621 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:05:12.313694 17621 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:05:12.313704 17621 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:05:12.313736 17621 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:05:12.313748 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.313753 17621 net.cpp:165] Memory required for data: 1154561500\nI0817 16:05:12.313758 17621 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:05:12.313766 17621 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:05:12.313781 17621 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:05:12.313791 17621 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:05:12.313801 17621 net.cpp:150] Setting up L2_b7_relu\nI0817 16:05:12.313807 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.313812 17621 net.cpp:165] Memory required for data: 1158657500\nI0817 16:05:12.313817 17621 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:05:12.313824 17621 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:05:12.313829 17621 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:05:12.313839 17621 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:05:12.313850 17621 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:05:12.313899 17621 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:05:12.313911 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.313918 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.313923 17621 net.cpp:165] Memory required for data: 1166849500\nI0817 16:05:12.313928 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:05:12.313942 17621 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:05:12.313949 17621 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:05:12.313958 17621 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:05:12.314481 17621 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:05:12.314496 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.314501 17621 net.cpp:165] Memory required for data: 1170945500\nI0817 16:05:12.314509 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:05:12.314522 17621 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:05:12.314528 17621 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:05:12.314537 17621 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:05:12.314823 17621 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:05:12.314838 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.314843 17621 net.cpp:165] Memory required for data: 1175041500\nI0817 16:05:12.314853 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:05:12.314862 17621 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:05:12.314868 17621 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:05:12.314879 17621 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.314939 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:05:12.315100 17621 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:05:12.315114 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.315119 17621 net.cpp:165] Memory required for data: 1179137500\nI0817 16:05:12.315127 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:05:12.315135 17621 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:05:12.315141 17621 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:05:12.315150 17621 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.315161 17621 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:05:12.315170 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.315173 17621 net.cpp:165] Memory required for data: 1183233500\nI0817 16:05:12.315178 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:05:12.315189 17621 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:05:12.315198 17621 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:05:12.315207 17621 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:05:12.315707 17621 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:05:12.315721 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.315726 17621 net.cpp:165] Memory required for data: 1187329500\nI0817 16:05:12.315735 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:05:12.315753 17621 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:05:12.315760 17621 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:05:12.315771 17621 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:05:12.316047 17621 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:05:12.316061 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.316066 17621 net.cpp:165] Memory required for data: 1191425500\nI0817 16:05:12.316076 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:05:12.316084 17621 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:05:12.316092 17621 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:05:12.316099 17621 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:05:12.316161 17621 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:05:12.316319 17621 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:05:12.316335 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.316340 17621 net.cpp:165] Memory required for data: 1195521500\nI0817 16:05:12.316350 17621 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:05:12.316359 17621 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:05:12.316365 17621 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:05:12.316372 17621 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:05:12.316380 17621 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:05:12.316411 17621 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:05:12.316421 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.316426 17621 net.cpp:165] Memory required for data: 1199617500\nI0817 16:05:12.316431 17621 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:05:12.316439 17621 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:05:12.316445 17621 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:05:12.316454 17621 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:05:12.316464 17621 net.cpp:150] Setting up L2_b8_relu\nI0817 16:05:12.316471 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.316476 17621 net.cpp:165] Memory required for data: 1203713500\nI0817 16:05:12.316480 17621 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:05:12.316488 17621 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:05:12.316493 17621 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:05:12.316504 17621 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:05:12.316527 17621 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:05:12.316579 17621 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:05:12.316591 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.316598 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.316603 17621 net.cpp:165] Memory required for data: 1211905500\nI0817 16:05:12.316608 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:05:12.316623 17621 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:05:12.316630 17621 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:05:12.316642 17621 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:05:12.317140 17621 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:05:12.317155 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.317160 17621 net.cpp:165] Memory required for data: 1216001500\nI0817 16:05:12.317169 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:05:12.317181 17621 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:05:12.317188 17621 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:05:12.317198 17621 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:05:12.317477 17621 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:05:12.317497 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.317502 17621 net.cpp:165] Memory required for data: 1220097500\nI0817 16:05:12.317513 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:05:12.317522 17621 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:05:12.317528 17621 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:05:12.317535 17621 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.317598 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:05:12.317767 17621 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:05:12.317785 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.317790 17621 net.cpp:165] Memory required for data: 1224193500\nI0817 16:05:12.317798 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:05:12.317806 17621 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:05:12.317812 17621 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:05:12.317821 17621 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.317829 17621 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:05:12.317836 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.317842 17621 net.cpp:165] Memory required for data: 1228289500\nI0817 16:05:12.317847 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:05:12.317860 17621 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:05:12.317867 17621 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:05:12.317878 17621 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:05:12.319355 17621 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:05:12.319373 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.319380 17621 net.cpp:165] Memory required for data: 1232385500\nI0817 16:05:12.319388 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:05:12.319401 17621 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:05:12.319408 17621 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:05:12.319416 17621 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:05:12.319697 17621 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:05:12.319711 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.319716 17621 net.cpp:165] Memory required for data: 1236481500\nI0817 16:05:12.319767 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:05:12.319782 17621 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:05:12.319789 17621 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:05:12.319797 17621 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:05:12.319857 17621 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:05:12.320017 17621 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:05:12.320030 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.320035 17621 net.cpp:165] Memory required for data: 1240577500\nI0817 16:05:12.320044 17621 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:05:12.320055 17621 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:05:12.320060 17621 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:05:12.320068 17621 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:05:12.320080 17621 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:05:12.320107 17621 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:05:12.320116 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.320122 17621 net.cpp:165] Memory required for data: 1244673500\nI0817 16:05:12.320127 17621 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:05:12.320137 17621 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:05:12.320142 17621 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:05:12.320150 17621 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:05:12.320159 17621 net.cpp:150] Setting up L2_b9_relu\nI0817 16:05:12.320166 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.320179 17621 net.cpp:165] Memory required for data: 1248769500\nI0817 16:05:12.320184 17621 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:05:12.320194 17621 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:05:12.320200 17621 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:05:12.320207 17621 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:05:12.320217 17621 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:05:12.320269 17621 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:05:12.320281 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.320288 17621 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:05:12.320292 17621 net.cpp:165] Memory required for data: 1256961500\nI0817 16:05:12.320297 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:05:12.320312 17621 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:05:12.320317 17621 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:05:12.320327 17621 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:05:12.320832 17621 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:05:12.320847 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.320853 17621 net.cpp:165] Memory required for data: 1257985500\nI0817 16:05:12.320863 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:05:12.320874 17621 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:05:12.320880 17621 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:05:12.320891 17621 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:05:12.321167 17621 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:05:12.321182 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.321187 17621 net.cpp:165] Memory required for data: 1259009500\nI0817 16:05:12.321197 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:05:12.321205 17621 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:05:12.321211 17621 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:05:12.321219 17621 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.321282 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:05:12.321451 17621 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:05:12.321465 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.321470 17621 net.cpp:165] Memory required for data: 1260033500\nI0817 16:05:12.321478 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:05:12.321487 17621 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:05:12.321493 17621 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:05:12.321503 17621 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:05:12.321513 17621 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:05:12.321521 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.321526 17621 net.cpp:165] Memory required for data: 1261057500\nI0817 16:05:12.321530 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:05:12.321544 17621 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:05:12.321550 17621 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:05:12.321558 17621 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:05:12.322062 17621 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:05:12.322077 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.322082 17621 net.cpp:165] Memory required for data: 1262081500\nI0817 16:05:12.322090 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:05:12.322103 17621 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:05:12.322109 17621 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:05:12.322118 17621 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:05:12.322394 17621 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:05:12.322414 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.322419 17621 net.cpp:165] Memory required for data: 1263105500\nI0817 16:05:12.322430 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:05:12.322443 17621 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:05:12.322449 17621 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:05:12.322456 17621 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:05:12.322515 17621 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:05:12.322681 17621 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:05:12.322701 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.322706 17621 net.cpp:165] Memory required for data: 1264129500\nI0817 16:05:12.322715 17621 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:05:12.322727 17621 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:05:12.322734 17621 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:05:12.322748 17621 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:05:12.322784 17621 net.cpp:150] Setting up L3_b1_pool\nI0817 16:05:12.322793 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.322798 17621 net.cpp:165] Memory required for data: 1265153500\nI0817 16:05:12.322803 17621 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:05:12.322818 17621 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:05:12.322824 17621 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:05:12.322830 17621 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:05:12.322837 17621 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:05:12.322870 17621 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:05:12.322882 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.322887 17621 net.cpp:165] Memory required for data: 1266177500\nI0817 16:05:12.322892 17621 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:05:12.322903 17621 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:05:12.322909 17621 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:05:12.322916 17621 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:05:12.322926 17621 net.cpp:150] Setting up L3_b1_relu\nI0817 16:05:12.322932 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.322937 17621 net.cpp:165] Memory required for data: 1267201500\nI0817 16:05:12.322942 17621 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:05:12.322952 17621 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:05:12.322958 17621 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:05:12.324178 17621 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:05:12.324198 17621 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:05:12.324204 17621 net.cpp:165] Memory required for data: 1268225500\nI0817 16:05:12.324210 17621 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:05:12.324220 17621 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:05:12.324228 17621 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:05:12.324234 17621 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:05:12.324241 17621 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:05:12.324287 17621 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:05:12.324301 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.324304 17621 net.cpp:165] Memory required for data: 1270273500\nI0817 16:05:12.324311 17621 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:05:12.324317 17621 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:05:12.324323 17621 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:05:12.324334 17621 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:05:12.324344 17621 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:05:12.324403 17621 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:05:12.324415 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.324430 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.324435 17621 net.cpp:165] Memory required for data: 1274369500\nI0817 16:05:12.324440 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:05:12.324452 17621 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:05:12.324458 17621 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:05:12.324470 17621 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:05:12.325526 17621 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:05:12.325542 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.325547 17621 net.cpp:165] Memory required for data: 1276417500\nI0817 16:05:12.325556 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:05:12.325565 17621 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:05:12.325572 17621 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:05:12.325583 17621 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:05:12.325867 17621 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:05:12.325884 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.325891 17621 net.cpp:165] Memory required for data: 1278465500\nI0817 16:05:12.325901 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:05:12.325909 17621 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:05:12.325917 17621 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:05:12.325923 17621 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.325984 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:05:12.326153 17621 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:05:12.326165 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.326171 17621 net.cpp:165] Memory required for data: 1280513500\nI0817 16:05:12.326180 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:05:12.326189 17621 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:05:12.326195 17621 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:05:12.326205 17621 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:05:12.326215 17621 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:05:12.326222 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.326227 17621 net.cpp:165] Memory required for data: 1282561500\nI0817 16:05:12.326232 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:05:12.326246 17621 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:05:12.326252 17621 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:05:12.326261 17621 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:05:12.327311 17621 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:05:12.327325 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.327332 17621 net.cpp:165] Memory required for data: 1284609500\nI0817 16:05:12.327340 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:05:12.327353 17621 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:05:12.327358 17621 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:05:12.327369 17621 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:05:12.327643 17621 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:05:12.327657 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.327662 17621 net.cpp:165] Memory required for data: 1286657500\nI0817 16:05:12.327672 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:05:12.327682 17621 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:05:12.327694 17621 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:05:12.327702 17621 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:05:12.327765 17621 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:05:12.327929 17621 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:05:12.327942 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.327947 17621 net.cpp:165] Memory required for data: 1288705500\nI0817 16:05:12.327963 17621 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:05:12.327973 17621 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:05:12.327980 17621 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:05:12.327987 17621 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:05:12.327997 17621 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:05:12.328033 17621 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:05:12.328045 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.328050 17621 net.cpp:165] Memory required for data: 1290753500\nI0817 16:05:12.328055 17621 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:05:12.328063 17621 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:05:12.328069 17621 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:05:12.328076 17621 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:05:12.328086 17621 net.cpp:150] Setting up L3_b2_relu\nI0817 16:05:12.328094 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.328097 17621 net.cpp:165] Memory required for data: 1292801500\nI0817 16:05:12.328102 17621 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:05:12.328112 17621 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:05:12.328117 17621 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:05:12.328125 17621 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:05:12.328135 17621 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:05:12.328186 17621 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:05:12.328197 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.328204 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.328208 17621 net.cpp:165] Memory required for data: 1296897500\nI0817 16:05:12.328214 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:05:12.328225 17621 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:05:12.328232 17621 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:05:12.328243 17621 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:05:12.329296 17621 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:05:12.329313 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.329318 17621 net.cpp:165] Memory required for data: 1298945500\nI0817 16:05:12.329326 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:05:12.329335 17621 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:05:12.329342 17621 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:05:12.329354 17621 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:05:12.329627 17621 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:05:12.329643 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.329648 17621 net.cpp:165] Memory required for data: 1300993500\nI0817 16:05:12.329658 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:05:12.329668 17621 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:05:12.329674 17621 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:05:12.329680 17621 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.329747 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:05:12.329910 17621 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:05:12.329922 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.329927 17621 net.cpp:165] Memory required for data: 1303041500\nI0817 16:05:12.329936 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:05:12.329947 17621 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:05:12.329953 17621 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:05:12.329962 17621 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:05:12.329970 17621 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:05:12.329985 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.329990 17621 net.cpp:165] Memory required for data: 1305089500\nI0817 16:05:12.329994 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:05:12.330009 17621 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:05:12.330016 17621 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:05:12.330025 17621 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:05:12.331076 17621 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:05:12.331091 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.331096 17621 net.cpp:165] Memory required for data: 1307137500\nI0817 16:05:12.331105 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:05:12.331118 17621 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:05:12.331125 17621 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:05:12.331135 17621 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:05:12.331408 17621 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:05:12.331421 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.331425 17621 net.cpp:165] Memory required for data: 1309185500\nI0817 16:05:12.331436 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:05:12.331444 17621 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:05:12.331451 17621 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:05:12.331461 17621 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:05:12.331523 17621 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:05:12.331687 17621 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:05:12.331701 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.331707 17621 net.cpp:165] Memory required for data: 1311233500\nI0817 16:05:12.331715 17621 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:05:12.331724 17621 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:05:12.331732 17621 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:05:12.331738 17621 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:05:12.331749 17621 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:05:12.331786 17621 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:05:12.331800 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.331805 17621 net.cpp:165] Memory required for data: 1313281500\nI0817 16:05:12.331810 17621 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:05:12.331818 17621 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:05:12.331823 17621 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:05:12.331833 17621 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:05:12.331843 17621 net.cpp:150] Setting up L3_b3_relu\nI0817 16:05:12.331851 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.331856 17621 net.cpp:165] Memory required for data: 1315329500\nI0817 16:05:12.331861 17621 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:05:12.331867 17621 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:05:12.331872 17621 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:05:12.331879 17621 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:05:12.331889 17621 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:05:12.331939 17621 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:05:12.331953 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.331959 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.331964 17621 net.cpp:165] Memory required for data: 1319425500\nI0817 16:05:12.331969 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:05:12.331979 17621 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:05:12.331985 17621 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:05:12.332012 17621 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:05:12.333086 17621 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:05:12.333102 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.333107 17621 net.cpp:165] Memory required for data: 1321473500\nI0817 16:05:12.333117 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:05:12.333127 17621 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:05:12.333133 17621 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:05:12.333144 17621 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:05:12.333415 17621 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:05:12.333431 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.333436 17621 net.cpp:165] Memory required for data: 1323521500\nI0817 16:05:12.333447 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:05:12.333456 17621 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:05:12.333462 17621 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:05:12.333469 17621 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.333529 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:05:12.333703 17621 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:05:12.333716 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.333721 17621 net.cpp:165] Memory required for data: 1325569500\nI0817 16:05:12.333730 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:05:12.333741 17621 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:05:12.333748 17621 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:05:12.333756 17621 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:05:12.333766 17621 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:05:12.333773 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.333778 17621 net.cpp:165] Memory required for data: 1327617500\nI0817 16:05:12.333783 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:05:12.333796 17621 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:05:12.333802 17621 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:05:12.333811 17621 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:05:12.335832 17621 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:05:12.335853 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.335858 17621 net.cpp:165] Memory required for data: 1329665500\nI0817 16:05:12.335868 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:05:12.335878 17621 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:05:12.335886 17621 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:05:12.335896 17621 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:05:12.336177 17621 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:05:12.336194 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.336199 17621 net.cpp:165] Memory required for data: 1331713500\nI0817 16:05:12.336210 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:05:12.336218 17621 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:05:12.336225 17621 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:05:12.336232 17621 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:05:12.336293 17621 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:05:12.336462 17621 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:05:12.336475 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.336479 17621 net.cpp:165] Memory required for data: 1333761500\nI0817 16:05:12.336489 17621 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:05:12.336501 17621 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:05:12.336508 17621 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:05:12.336515 17621 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:05:12.336524 17621 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:05:12.336570 17621 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:05:12.336580 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.336585 17621 net.cpp:165] Memory required for data: 1335809500\nI0817 16:05:12.336589 17621 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:05:12.336597 17621 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:05:12.336603 17621 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:05:12.336611 17621 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:05:12.336621 17621 net.cpp:150] Setting up L3_b4_relu\nI0817 16:05:12.336627 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.336632 17621 net.cpp:165] Memory required for data: 1337857500\nI0817 16:05:12.336637 17621 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:05:12.336643 17621 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:05:12.336649 17621 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:05:12.336659 17621 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:05:12.336669 17621 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:05:12.336724 17621 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:05:12.336736 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.336743 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.336748 17621 net.cpp:165] Memory required for data: 1341953500\nI0817 16:05:12.336753 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:05:12.336768 17621 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:05:12.336776 17621 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:05:12.336784 17621 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:05:12.337829 17621 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:05:12.337844 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.337849 17621 net.cpp:165] Memory required for data: 1344001500\nI0817 16:05:12.337858 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:05:12.337872 17621 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:05:12.337878 17621 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:05:12.337887 17621 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:05:12.338158 17621 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:05:12.338172 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.338177 17621 net.cpp:165] Memory required for data: 1346049500\nI0817 16:05:12.338187 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:05:12.338199 17621 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:05:12.338207 17621 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:05:12.338214 17621 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.338276 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:05:12.338438 17621 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:05:12.338451 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.338456 17621 net.cpp:165] Memory required for data: 1348097500\nI0817 16:05:12.338465 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:05:12.338476 17621 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:05:12.338482 17621 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:05:12.338490 17621 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:05:12.338500 17621 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:05:12.338506 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.338511 17621 net.cpp:165] Memory required for data: 1350145500\nI0817 16:05:12.338516 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:05:12.338531 17621 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:05:12.338536 17621 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:05:12.338557 17621 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:05:12.339586 17621 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:05:12.339601 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.339606 17621 net.cpp:165] Memory required for data: 1352193500\nI0817 16:05:12.339614 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:05:12.339623 17621 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:05:12.339629 17621 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:05:12.339642 17621 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:05:12.339918 17621 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:05:12.339934 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.339941 17621 net.cpp:165] Memory required for data: 1354241500\nI0817 16:05:12.339951 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:05:12.339959 17621 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:05:12.339965 17621 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:05:12.339972 17621 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:05:12.340031 17621 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:05:12.340193 17621 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:05:12.340207 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.340212 17621 net.cpp:165] Memory required for data: 1356289500\nI0817 16:05:12.340221 17621 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:05:12.340234 17621 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:05:12.340240 17621 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:05:12.340247 17621 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:05:12.340255 17621 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:05:12.340292 17621 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:05:12.340304 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.340309 17621 net.cpp:165] Memory required for data: 1358337500\nI0817 16:05:12.340314 17621 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:05:12.340322 17621 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:05:12.340327 17621 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:05:12.340335 17621 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:05:12.340344 17621 net.cpp:150] Setting up L3_b5_relu\nI0817 16:05:12.340351 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.340356 17621 net.cpp:165] Memory required for data: 1360385500\nI0817 16:05:12.340360 17621 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:05:12.340368 17621 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:05:12.340373 17621 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:05:12.340384 17621 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:05:12.340394 17621 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:05:12.340440 17621 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:05:12.340451 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.340458 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.340462 17621 net.cpp:165] Memory required for data: 1364481500\nI0817 16:05:12.340467 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:05:12.340482 17621 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:05:12.340488 17621 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:05:12.340498 17621 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:05:12.341526 17621 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:05:12.341542 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.341547 17621 net.cpp:165] Memory required for data: 1366529500\nI0817 16:05:12.341555 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:05:12.341574 17621 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:05:12.341581 17621 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:05:12.341590 17621 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:05:12.341876 17621 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:05:12.341889 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.341894 17621 net.cpp:165] Memory required for data: 1368577500\nI0817 16:05:12.341905 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:05:12.341917 17621 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:05:12.341923 17621 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:05:12.341931 17621 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.341994 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:05:12.342156 17621 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:05:12.342170 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.342175 17621 net.cpp:165] Memory required for data: 1370625500\nI0817 16:05:12.342183 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:05:12.342195 17621 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:05:12.342201 17621 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:05:12.342208 17621 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:05:12.342218 17621 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:05:12.342228 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.342233 17621 net.cpp:165] Memory required for data: 1372673500\nI0817 16:05:12.342237 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:05:12.342249 17621 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:05:12.342255 17621 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:05:12.342267 17621 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:05:12.343286 17621 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:05:12.343302 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.343307 17621 net.cpp:165] Memory required for data: 1374721500\nI0817 16:05:12.343315 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:05:12.343324 17621 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:05:12.343330 17621 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:05:12.343343 17621 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:05:12.343619 17621 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:05:12.343633 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.343638 17621 net.cpp:165] Memory required for data: 1376769500\nI0817 16:05:12.343648 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:05:12.343657 17621 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:05:12.343663 17621 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:05:12.343670 17621 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:05:12.343739 17621 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:05:12.343901 17621 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:05:12.343917 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.343922 17621 net.cpp:165] Memory required for data: 1378817500\nI0817 16:05:12.343931 17621 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:05:12.343940 17621 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:05:12.343947 17621 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:05:12.343955 17621 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:05:12.343962 17621 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:05:12.344000 17621 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:05:12.344012 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.344017 17621 net.cpp:165] Memory required for data: 1380865500\nI0817 16:05:12.344022 17621 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:05:12.344030 17621 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:05:12.344043 17621 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:05:12.344050 17621 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:05:12.344059 17621 net.cpp:150] Setting up L3_b6_relu\nI0817 16:05:12.344066 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.344071 17621 net.cpp:165] Memory required for data: 1382913500\nI0817 16:05:12.344076 17621 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:05:12.344084 17621 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:05:12.344089 17621 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:05:12.344100 17621 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:05:12.344110 17621 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:05:12.344159 17621 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:05:12.344174 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.344182 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.344187 17621 net.cpp:165] Memory required for data: 1387009500\nI0817 16:05:12.344192 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:05:12.344202 17621 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:05:12.344209 17621 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:05:12.344218 17621 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:05:12.345257 17621 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:05:12.345271 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.345276 17621 net.cpp:165] Memory required for data: 1389057500\nI0817 16:05:12.345285 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:05:12.345297 17621 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:05:12.345304 17621 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:05:12.345312 17621 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:05:12.345587 17621 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:05:12.345600 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.345605 17621 net.cpp:165] Memory required for data: 1391105500\nI0817 16:05:12.345615 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:05:12.345628 17621 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:05:12.345634 17621 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:05:12.345643 17621 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.345710 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:05:12.345876 17621 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:05:12.345890 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.345896 17621 net.cpp:165] Memory required for data: 1393153500\nI0817 16:05:12.345904 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:05:12.345939 17621 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:05:12.345948 17621 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:05:12.345957 17621 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:05:12.345966 17621 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:05:12.345974 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.345978 17621 net.cpp:165] Memory required for data: 1395201500\nI0817 16:05:12.345984 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:05:12.345995 17621 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:05:12.346001 17621 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:05:12.346010 17621 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:05:12.347051 17621 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:05:12.347067 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.347072 17621 net.cpp:165] Memory required for data: 1397249500\nI0817 16:05:12.347081 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:05:12.347100 17621 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:05:12.347107 17621 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:05:12.347115 17621 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:05:12.347393 17621 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:05:12.347405 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.347410 17621 net.cpp:165] Memory required for data: 1399297500\nI0817 16:05:12.347421 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:05:12.347432 17621 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:05:12.347440 17621 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:05:12.347447 17621 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:05:12.347510 17621 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:05:12.347673 17621 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:05:12.347692 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.347697 17621 net.cpp:165] Memory required for data: 1401345500\nI0817 16:05:12.347707 17621 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:05:12.347718 17621 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:05:12.347725 17621 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:05:12.347733 17621 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:05:12.347743 17621 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:05:12.347779 17621 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:05:12.347790 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.347795 17621 net.cpp:165] Memory required for data: 1403393500\nI0817 16:05:12.347800 17621 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:05:12.347810 17621 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:05:12.347817 17621 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:05:12.347825 17621 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:05:12.347833 17621 net.cpp:150] Setting up L3_b7_relu\nI0817 16:05:12.347841 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.347846 17621 net.cpp:165] Memory required for data: 1405441500\nI0817 16:05:12.347851 17621 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:05:12.347857 17621 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:05:12.347862 17621 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:05:12.347869 17621 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:05:12.347878 17621 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:05:12.347929 17621 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:05:12.347941 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.347949 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.347952 17621 net.cpp:165] Memory required for data: 1409537500\nI0817 16:05:12.347957 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:05:12.347971 17621 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:05:12.347978 17621 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:05:12.347988 17621 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:05:12.349995 17621 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:05:12.350013 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.350018 17621 net.cpp:165] Memory required for data: 1411585500\nI0817 16:05:12.350028 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:05:12.350040 17621 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:05:12.350047 17621 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:05:12.350056 17621 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:05:12.350333 17621 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:05:12.350347 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.350359 17621 net.cpp:165] Memory required for data: 1413633500\nI0817 16:05:12.350371 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:05:12.350383 17621 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:05:12.350390 17621 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:05:12.350400 17621 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.350462 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:05:12.350628 17621 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:05:12.350641 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.350646 17621 net.cpp:165] Memory required for data: 1415681500\nI0817 16:05:12.350656 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:05:12.350663 17621 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:05:12.350670 17621 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:05:12.350680 17621 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:05:12.350697 17621 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:05:12.350704 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.350709 17621 net.cpp:165] Memory required for data: 1417729500\nI0817 16:05:12.350714 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:05:12.350728 17621 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:05:12.350735 17621 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:05:12.350744 17621 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:05:12.351771 17621 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:05:12.351786 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.351791 17621 net.cpp:165] Memory required for data: 1419777500\nI0817 16:05:12.351800 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:05:12.351814 17621 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:05:12.351819 17621 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:05:12.351828 17621 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:05:12.352108 17621 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:05:12.352120 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.352125 17621 net.cpp:165] Memory required for data: 1421825500\nI0817 16:05:12.352135 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:05:12.352144 17621 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:05:12.352150 17621 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:05:12.352159 17621 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:05:12.352221 17621 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:05:12.352385 17621 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:05:12.352398 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.352403 17621 net.cpp:165] Memory required for data: 1423873500\nI0817 16:05:12.352412 17621 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:05:12.352421 17621 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:05:12.352428 17621 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:05:12.352435 17621 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:05:12.352445 17621 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:05:12.352480 17621 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:05:12.352495 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.352500 17621 net.cpp:165] Memory required for data: 1425921500\nI0817 16:05:12.352506 17621 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:05:12.352514 17621 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:05:12.352520 17621 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:05:12.352527 17621 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:05:12.352536 17621 net.cpp:150] Setting up L3_b8_relu\nI0817 16:05:12.352543 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.352548 17621 net.cpp:165] Memory required for data: 1427969500\nI0817 16:05:12.352560 17621 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:05:12.352571 17621 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:05:12.352576 17621 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:05:12.352584 17621 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:05:12.352594 17621 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:05:12.352646 17621 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:05:12.352659 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.352665 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.352670 17621 net.cpp:165] Memory required for data: 1432065500\nI0817 16:05:12.352675 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:05:12.352691 17621 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:05:12.352699 17621 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:05:12.352711 17621 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:05:12.353744 17621 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:05:12.353759 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.353763 17621 net.cpp:165] Memory required for data: 1434113500\nI0817 16:05:12.353772 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:05:12.353781 17621 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:05:12.353788 17621 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:05:12.353799 17621 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:05:12.354079 17621 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:05:12.354099 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.354104 17621 net.cpp:165] Memory required for data: 1436161500\nI0817 16:05:12.354115 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:05:12.354123 17621 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:05:12.354130 17621 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:05:12.354137 17621 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.354197 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:05:12.354360 17621 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:05:12.354373 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.354378 17621 net.cpp:165] Memory required for data: 1438209500\nI0817 16:05:12.354387 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:05:12.354395 17621 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:05:12.354401 17621 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:05:12.354411 17621 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:05:12.354421 17621 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:05:12.354429 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.354434 17621 net.cpp:165] Memory required for data: 1440257500\nI0817 16:05:12.354439 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:05:12.354451 17621 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:05:12.354457 17621 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:05:12.354466 17621 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:05:12.355500 17621 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:05:12.355515 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.355520 17621 net.cpp:165] Memory required for data: 1442305500\nI0817 16:05:12.355530 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:05:12.355543 17621 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:05:12.355551 17621 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:05:12.355562 17621 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:05:12.355842 17621 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:05:12.355856 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.355868 17621 net.cpp:165] Memory required for data: 1444353500\nI0817 16:05:12.355880 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:05:12.355888 17621 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:05:12.355895 17621 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:05:12.355902 17621 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:05:12.355965 17621 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:05:12.356129 17621 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:05:12.356142 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.356148 17621 net.cpp:165] Memory required for data: 1446401500\nI0817 16:05:12.356156 17621 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:05:12.356165 17621 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:05:12.356173 17621 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:05:12.356179 17621 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:05:12.356191 17621 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:05:12.356225 17621 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:05:12.356240 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.356245 17621 net.cpp:165] Memory required for data: 1448449500\nI0817 16:05:12.356251 17621 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:05:12.356258 17621 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:05:12.356264 17621 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:05:12.356271 17621 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:05:12.356281 17621 net.cpp:150] Setting up L3_b9_relu\nI0817 16:05:12.356287 17621 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:05:12.356292 17621 net.cpp:165] Memory required for data: 1450497500\nI0817 16:05:12.356297 17621 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:05:12.356310 17621 net.cpp:100] Creating Layer post_pool\nI0817 16:05:12.356317 17621 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:05:12.356324 17621 net.cpp:408] post_pool -> post_pool\nI0817 16:05:12.356359 17621 net.cpp:150] Setting up post_pool\nI0817 16:05:12.356372 17621 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:05:12.356377 17621 net.cpp:165] Memory required for data: 1450529500\nI0817 16:05:12.356382 17621 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:05:12.356396 17621 net.cpp:100] Creating Layer post_FC\nI0817 16:05:12.356403 17621 net.cpp:434] post_FC <- post_pool\nI0817 16:05:12.356411 17621 net.cpp:408] post_FC -> post_FC_top\nI0817 16:05:12.356578 17621 net.cpp:150] Setting up post_FC\nI0817 16:05:12.356591 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:05:12.356597 17621 net.cpp:165] Memory required for data: 1450534500\nI0817 16:05:12.356606 17621 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:05:12.356614 17621 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:05:12.356621 17621 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:05:12.356631 17621 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:05:12.356640 17621 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:05:12.356698 17621 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:05:12.356711 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:05:12.356717 17621 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:05:12.356722 17621 net.cpp:165] Memory required for data: 1450544500\nI0817 16:05:12.356727 17621 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:05:12.356735 17621 net.cpp:100] Creating Layer accuracy\nI0817 16:05:12.356742 17621 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:05:12.356750 17621 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:05:12.356756 17621 net.cpp:408] accuracy -> accuracy\nI0817 16:05:12.356772 17621 net.cpp:150] Setting up accuracy\nI0817 16:05:12.356779 17621 net.cpp:157] Top shape: (1)\nI0817 16:05:12.356791 17621 net.cpp:165] Memory required for data: 1450544504\nI0817 16:05:12.356796 17621 layer_factory.hpp:77] Creating layer loss\nI0817 16:05:12.356804 17621 net.cpp:100] Creating Layer loss\nI0817 16:05:12.356811 17621 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:05:12.356817 17621 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:05:12.356823 17621 net.cpp:408] loss -> loss\nI0817 16:05:12.356835 17621 layer_factory.hpp:77] Creating layer loss\nI0817 16:05:12.356963 17621 net.cpp:150] Setting up loss\nI0817 16:05:12.356976 17621 net.cpp:157] Top shape: (1)\nI0817 16:05:12.356981 17621 net.cpp:160]     with loss weight 1\nI0817 16:05:12.356997 17621 net.cpp:165] Memory required for data: 1450544508\nI0817 16:05:12.357003 17621 net.cpp:226] loss needs backward computation.\nI0817 16:05:12.357009 17621 net.cpp:228] accuracy does not need backward computation.\nI0817 16:05:12.357015 17621 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:05:12.357022 17621 net.cpp:226] post_FC needs backward computation.\nI0817 16:05:12.357026 17621 net.cpp:226] post_pool needs backward computation.\nI0817 16:05:12.357031 17621 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:05:12.357036 17621 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:05:12.357041 17621 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:05:12.357046 17621 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:05:12.357051 17621 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:05:12.357056 17621 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:05:12.357061 17621 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:05:12.357066 17621 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:05:12.357071 17621 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:05:12.357076 17621 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:05:12.357081 17621 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:05:12.357086 17621 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:05:12.357092 17621 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:05:12.357097 17621 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:05:12.357102 17621 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:05:12.357107 17621 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:05:12.357112 17621 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:05:12.357117 17621 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:05:12.357122 17621 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:05:12.357128 17621 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:05:12.357134 17621 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:05:12.357139 17621 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:05:12.357144 17621 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:05:12.357149 17621 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:05:12.357156 17621 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:05:12.357161 17621 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:05:12.357164 17621 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:05:12.357169 17621 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:05:12.357175 17621 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:05:12.357180 17621 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:05:12.357189 17621 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:05:12.357194 17621 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:05:12.357200 17621 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:05:12.357205 17621 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:05:12.357216 17621 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:05:12.357223 17621 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:05:12.357228 17621 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:05:12.357233 17621 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:05:12.357237 17621 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:05:12.357244 17621 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:05:12.357249 17621 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:05:12.357254 17621 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:05:12.357259 17621 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:05:12.357264 17621 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:05:12.357270 17621 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:05:12.357275 17621 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:05:12.357280 17621 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:05:12.357285 17621 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:05:12.357290 17621 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:05:12.357295 17621 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:05:12.357300 17621 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:05:12.357306 17621 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:05:12.357311 17621 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:05:12.357316 17621 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:05:12.357321 17621 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:05:12.357326 17621 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:05:12.357331 17621 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:05:12.357336 17621 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:05:12.357342 17621 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:05:12.357347 17621 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:05:12.357352 17621 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:05:12.357357 17621 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:05:12.357363 17621 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:05:12.357368 17621 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:05:12.357373 17621 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:05:12.357379 17621 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:05:12.357384 17621 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:05:12.357388 17621 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:05:12.357394 17621 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:05:12.357399 17621 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:05:12.357405 17621 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:05:12.357410 17621 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:05:12.357415 17621 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:05:12.357421 17621 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:05:12.357426 17621 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:05:12.357432 17621 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:05:12.357437 17621 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:05:12.357442 17621 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:05:12.357447 17621 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:05:12.357452 17621 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:05:12.357458 17621 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:05:12.357470 17621 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:05:12.357475 17621 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:05:12.357481 17621 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:05:12.357486 17621 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:05:12.357491 17621 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:05:12.357496 17621 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:05:12.357502 17621 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:05:12.357507 17621 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:05:12.357512 17621 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:05:12.357517 17621 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:05:12.357522 17621 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:05:12.357528 17621 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:05:12.357533 17621 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:05:12.357538 17621 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:05:12.357547 17621 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:05:12.357553 17621 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:05:12.357558 17621 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:05:12.357564 17621 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:05:12.357569 17621 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:05:12.357574 17621 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:05:12.357580 17621 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:05:12.357585 17621 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:05:12.357591 17621 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:05:12.357596 17621 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:05:12.357602 17621 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:05:12.357607 17621 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:05:12.357612 17621 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:05:12.357619 17621 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:05:12.357623 17621 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:05:12.357628 17621 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:05:12.357633 17621 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:05:12.357640 17621 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:05:12.357645 17621 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:05:12.357650 17621 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:05:12.357656 17621 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:05:12.357661 17621 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:05:12.357666 17621 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:05:12.357671 17621 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:05:12.357677 17621 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:05:12.357687 17621 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:05:12.357694 17621 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:05:12.357700 17621 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:05:12.357705 17621 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:05:12.357712 17621 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:05:12.357717 17621 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:05:12.357722 17621 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:05:12.357728 17621 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:05:12.357733 17621 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:05:12.357744 17621 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:05:12.357750 17621 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:05:12.357756 17621 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:05:12.357761 17621 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:05:12.357767 17621 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:05:12.357772 17621 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:05:12.357779 17621 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:05:12.357784 17621 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:05:12.357789 17621 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:05:12.357795 17621 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:05:12.357801 17621 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:05:12.357806 17621 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:05:12.357811 17621 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:05:12.357817 17621 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:05:12.357822 17621 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:05:12.357828 17621 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:05:12.357834 17621 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:05:12.357839 17621 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:05:12.357846 17621 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:05:12.357856 17621 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:05:12.357861 17621 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:05:12.357867 17621 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:05:12.357872 17621 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:05:12.357878 17621 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:05:12.357884 17621 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:05:12.357889 17621 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:05:12.357895 17621 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:05:12.357902 17621 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:05:12.357906 17621 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:05:12.357913 17621 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:05:12.357918 17621 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:05:12.357923 17621 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:05:12.357928 17621 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:05:12.357934 17621 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:05:12.357939 17621 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:05:12.357945 17621 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:05:12.357951 17621 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:05:12.357957 17621 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:05:12.357962 17621 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:05:12.357969 17621 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:05:12.357973 17621 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:05:12.357978 17621 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:05:12.357985 17621 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:05:12.357990 17621 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:05:12.357996 17621 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:05:12.358002 17621 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:05:12.358007 17621 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:05:12.358017 17621 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:05:12.358024 17621 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:05:12.358031 17621 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:05:12.358036 17621 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:05:12.358042 17621 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:05:12.358047 17621 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:05:12.358053 17621 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:05:12.358058 17621 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:05:12.358064 17621 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:05:12.358069 17621 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:05:12.358075 17621 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:05:12.358080 17621 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:05:12.358088 17621 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:05:12.358093 17621 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:05:12.358098 17621 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:05:12.358103 17621 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:05:12.358108 17621 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:05:12.358114 17621 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:05:12.358120 17621 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:05:12.358125 17621 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:05:12.358131 17621 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:05:12.358137 17621 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:05:12.358144 17621 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:05:12.358148 17621 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:05:12.358155 17621 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:05:12.358160 17621 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:05:12.358165 17621 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:05:12.358171 17621 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:05:12.358177 17621 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:05:12.358182 17621 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:05:12.358188 17621 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:05:12.358194 17621 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:05:12.358201 17621 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:05:12.358206 17621 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:05:12.358211 17621 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:05:12.358217 17621 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:05:12.358223 17621 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:05:12.358228 17621 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:05:12.358234 17621 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:05:12.358243 17621 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:05:12.358249 17621 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:05:12.358254 17621 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:05:12.358261 17621 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:05:12.358266 17621 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:05:12.358273 17621 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:05:12.358278 17621 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:05:12.358284 17621 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:05:12.358294 17621 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:05:12.358300 17621 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:05:12.358306 17621 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:05:12.358311 17621 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:05:12.358317 17621 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:05:12.358325 17621 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:05:12.358330 17621 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:05:12.358335 17621 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:05:12.358341 17621 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:05:12.358346 17621 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:05:12.358352 17621 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:05:12.358358 17621 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:05:12.358363 17621 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:05:12.358369 17621 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:05:12.358376 17621 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:05:12.358381 17621 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:05:12.358386 17621 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:05:12.358392 17621 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:05:12.358398 17621 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:05:12.358404 17621 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:05:12.358409 17621 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:05:12.358415 17621 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:05:12.358422 17621 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:05:12.358427 17621 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:05:12.358433 17621 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:05:12.358438 17621 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:05:12.358444 17621 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:05:12.358450 17621 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:05:12.358456 17621 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:05:12.358461 17621 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:05:12.358467 17621 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:05:12.358472 17621 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:05:12.358479 17621 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:05:12.358484 17621 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:05:12.358490 17621 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:05:12.358496 17621 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:05:12.358502 17621 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:05:12.358508 17621 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:05:12.358513 17621 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:05:12.358520 17621 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:05:12.358525 17621 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:05:12.358530 17621 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:05:12.358536 17621 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:05:12.358542 17621 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:05:12.358547 17621 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:05:12.358554 17621 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:05:12.358561 17621 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:05:12.358572 17621 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:05:12.358579 17621 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:05:12.358584 17621 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:05:12.358590 17621 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:05:12.358597 17621 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:05:12.358603 17621 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:05:12.358608 17621 net.cpp:226] pre_relu needs backward computation.\nI0817 16:05:12.358613 17621 net.cpp:226] pre_scale needs backward computation.\nI0817 16:05:12.358618 17621 net.cpp:226] pre_bn needs backward computation.\nI0817 16:05:12.358623 17621 net.cpp:226] pre_conv needs backward computation.\nI0817 16:05:12.358630 17621 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:05:12.358638 17621 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:05:12.358641 17621 net.cpp:270] This network produces output accuracy\nI0817 16:05:12.358649 17621 net.cpp:270] This network produces output loss\nI0817 16:05:12.358978 17621 net.cpp:283] Network initialization done.\nI0817 16:05:12.359992 17621 solver.cpp:60] Solver scaffolding done.\nI0817 16:05:12.584482 17621 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:05:12.938554 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:12.938606 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:12.945492 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:13.178833 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:05:13.178920 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:05:13.213294 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:05:13.213376 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:05:13.654002 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:13.654090 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:13.662113 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:13.904808 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:05:13.904945 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:05:13.956745 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:05:13.956881 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:05:14.470948 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:14.471011 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:14.479845 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:14.747097 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:05:14.747227 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:05:14.819239 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:05:14.819368 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:05:14.902839 17621 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:05:15.383561 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:15.383641 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:15.393342 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:15.681056 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:05:15.681254 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:05:15.773259 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:05:15.773439 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:05:16.425405 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:16.425462 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:16.435767 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:16.747369 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:05:16.747556 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:05:16.860541 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:05:16.860725 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:05:17.567625 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:17.567678 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:17.578840 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:17.917577 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:05:17.917795 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:05:18.051136 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:05:18.051344 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:05:18.832064 17621 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:05:18.832118 17621 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:05:18.844295 17621 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:05:18.883504 17633 blocking_queue.cpp:50] Waiting for data\nI0817 16:05:18.931571 17636 blocking_queue.cpp:50] Waiting for data\nI0817 16:05:19.275936 17621 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:05:19.276185 17621 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:05:19.435434 17621 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:05:19.435667 17621 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:05:19.606187 17621 parallel.cpp:425] Starting Optimization\nI0817 16:05:19.607514 17621 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:05:19.607529 17621 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:05:19.612434 17621 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:06:42.429648 17621 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:06:42.429950 17621 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:06:46.409657 17621 solver.cpp:228] Iteration 0, loss = 6.83357\nI0817 16:06:46.409699 17621 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0817 16:06:46.409725 17621 solver.cpp:244]     Train net output #1: loss = 6.83357 (* 1 = 6.83357 loss)\nI0817 16:06:46.503199 17621 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0817 16:09:05.184540 17621 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:10:28.363365 17621 solver.cpp:404]     Test net output #0: accuracy = 0.1088\nI0817 16:10:28.363623 17621 solver.cpp:404]     Test net output #1: loss = 2.30213 (* 1 = 2.30213 loss)\nI0817 16:10:29.715476 17621 solver.cpp:228] Iteration 100, loss = 2.32272\nI0817 16:10:29.715510 17621 solver.cpp:244]     Train net output #0: accuracy = 0.136\nI0817 16:10:29.715526 17621 solver.cpp:244]     Train net output #1: loss = 2.32272 (* 1 = 2.32272 loss)\nI0817 16:10:29.759393 17621 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0817 16:12:48.912114 17621 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:14:11.081451 17621 solver.cpp:404]     Test net output #0: accuracy = 0.13572\nI0817 16:14:11.081712 17621 solver.cpp:404]     Test net output #1: loss = 2.26722 (* 1 = 2.26722 loss)\nI0817 16:14:12.417426 17621 solver.cpp:228] Iteration 200, loss = 2.24292\nI0817 16:14:12.417459 17621 solver.cpp:244]     Train net output #0: accuracy = 0.176\nI0817 16:14:12.417474 17621 solver.cpp:244]     Train net output #1: loss = 2.24292 (* 1 = 2.24292 loss)\nI0817 16:14:12.487385 17621 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0817 16:16:30.445088 17621 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:17:52.594841 17621 solver.cpp:404]     Test net output #0: accuracy = 0.27528\nI0817 16:17:52.595094 17621 solver.cpp:404]     Test net output #1: loss = 2.14089 (* 1 = 2.14089 loss)\nI0817 16:17:53.932086 17621 solver.cpp:228] Iteration 300, loss = 1.68765\nI0817 16:17:53.932118 17621 solver.cpp:244]     Train net output #0: accuracy = 0.392\nI0817 16:17:53.932133 17621 solver.cpp:244]     Train net output #1: loss = 1.68765 (* 1 = 1.68765 loss)\nI0817 16:17:54.005491 17621 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0817 16:20:12.214165 17621 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:21:34.382392 17621 solver.cpp:404]     Test net output #0: accuracy = 0.45184\nI0817 16:21:34.382648 17621 solver.cpp:404]     Test net output #1: loss = 1.51888 (* 1 = 1.51888 loss)\nI0817 16:21:35.719856 17621 solver.cpp:228] Iteration 400, loss = 1.16644\nI0817 16:21:35.719890 17621 solver.cpp:244]     Train net output #0: accuracy = 0.568\nI0817 16:21:35.719907 17621 solver.cpp:244]     Train net output #1: loss = 1.16644 (* 1 = 1.16644 loss)\nI0817 16:21:35.796205 17621 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0817 16:23:54.142616 17621 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:25:16.315382 17621 solver.cpp:404]     Test net output #0: accuracy = 0.5992\nI0817 16:25:16.315642 17621 solver.cpp:404]     Test net output #1: loss = 1.11513 (* 1 = 1.11513 loss)\nI0817 16:25:17.653865 17621 solver.cpp:228] Iteration 500, loss = 1.14427\nI0817 16:25:17.653899 17621 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0817 16:25:17.653915 17621 solver.cpp:244]     Train net output #1: loss = 1.14427 (* 1 = 1.14427 loss)\nI0817 16:25:17.728199 17621 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0817 16:27:36.068886 17621 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:28:58.229487 17621 solver.cpp:404]     Test net output #0: accuracy = 0.52728\nI0817 16:28:58.229745 17621 solver.cpp:404]     Test net output #1: loss = 1.4768 (* 1 = 1.4768 loss)\nI0817 16:28:59.567787 17621 solver.cpp:228] Iteration 600, loss = 0.981666\nI0817 16:28:59.567822 17621 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 16:28:59.567837 17621 solver.cpp:244]     Train net output #1: loss = 0.981666 (* 1 = 0.981666 loss)\nI0817 16:28:59.642103 17621 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0817 16:31:18.030555 17621 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:32:40.197360 17621 solver.cpp:404]     Test net output #0: accuracy = 0.46136\nI0817 16:32:40.197638 17621 solver.cpp:404]     Test net output #1: loss = 1.94073 (* 1 = 1.94073 loss)\nI0817 16:32:41.535396 17621 solver.cpp:228] Iteration 700, loss = 0.927895\nI0817 16:32:41.535434 17621 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0817 16:32:41.535457 17621 solver.cpp:244]     Train net output #1: loss = 0.927895 (* 1 = 0.927895 loss)\nI0817 16:32:41.607723 17621 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0817 16:34:59.871613 17621 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:36:22.043153 17621 solver.cpp:404]     Test net output #0: accuracy = 0.64024\nI0817 16:36:22.043431 17621 solver.cpp:404]     Test net output #1: loss = 1.10925 (* 1 = 1.10925 loss)\nI0817 16:36:23.381197 17621 solver.cpp:228] Iteration 800, loss = 0.708151\nI0817 16:36:23.381233 17621 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 16:36:23.381256 17621 solver.cpp:244]     Train net output #1: loss = 0.708151 (* 1 = 0.708151 loss)\nI0817 16:36:23.455492 17621 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0817 16:38:41.746587 17621 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:40:03.904839 17621 solver.cpp:404]     Test net output #0: accuracy = 0.62268\nI0817 16:40:03.905094 17621 solver.cpp:404]     Test net output #1: loss = 1.22447 (* 1 = 1.22447 loss)\nI0817 16:40:05.242317 17621 solver.cpp:228] Iteration 900, loss = 0.746925\nI0817 16:40:05.242350 17621 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 16:40:05.242364 17621 solver.cpp:244]     Train net output #1: loss = 0.746925 (* 1 = 0.746925 loss)\nI0817 16:40:05.310901 17621 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0817 16:42:23.554414 17621 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:43:45.700740 17621 solver.cpp:404]     Test net output #0: accuracy = 0.56876\nI0817 16:43:45.701004 17621 solver.cpp:404]     Test net output #1: loss = 1.57736 (* 1 = 1.57736 loss)\nI0817 16:43:47.037935 17621 solver.cpp:228] Iteration 1000, loss = 0.607538\nI0817 16:43:47.037968 17621 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 16:43:47.037982 17621 solver.cpp:244]     Train net output #1: loss = 0.607538 (* 1 = 0.607538 loss)\nI0817 16:43:47.107106 17621 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0817 16:46:05.353782 17621 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:47:27.502686 17621 solver.cpp:404]     Test net output #0: accuracy = 0.59544\nI0817 16:47:27.502945 17621 solver.cpp:404]     Test net output #1: loss = 1.29983 (* 1 = 1.29983 loss)\nI0817 16:47:28.840497 17621 solver.cpp:228] Iteration 1100, loss = 0.599476\nI0817 16:47:28.840529 17621 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 16:47:28.840544 17621 solver.cpp:244]     Train net output #1: loss = 0.599476 (* 1 = 0.599476 loss)\nI0817 16:47:28.915403 17621 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0817 16:49:47.157924 17621 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:51:09.302836 17621 solver.cpp:404]     Test net output #0: accuracy = 0.59928\nI0817 16:51:09.303079 17621 solver.cpp:404]     Test net output #1: loss = 1.32767 (* 1 = 1.32767 loss)\nI0817 16:51:10.640694 17621 solver.cpp:228] Iteration 1200, loss = 0.479087\nI0817 16:51:10.640727 17621 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 16:51:10.640741 17621 solver.cpp:244]     Train net output #1: loss = 0.479087 (* 1 = 0.479087 loss)\nI0817 16:51:10.710212 17621 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0817 16:53:28.929517 17621 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 16:54:51.075978 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6174\nI0817 16:54:51.076231 17621 solver.cpp:404]     Test net output #1: loss = 1.37418 (* 1 = 1.37418 loss)\nI0817 16:54:52.414170 17621 solver.cpp:228] Iteration 1300, loss = 0.451753\nI0817 16:54:52.414206 17621 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 16:54:52.414222 17621 solver.cpp:244]     Train net output #1: loss = 0.451753 (* 1 = 0.451753 loss)\nI0817 16:54:52.482692 17621 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0817 16:57:11.088335 17621 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 16:58:33.983095 17621 solver.cpp:404]     Test net output #0: accuracy = 0.62888\nI0817 16:58:33.983325 17621 solver.cpp:404]     Test net output #1: loss = 1.37278 (* 1 = 1.37278 loss)\nI0817 16:58:35.323606 17621 solver.cpp:228] Iteration 1400, loss = 0.428229\nI0817 16:58:35.323649 17621 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 16:58:35.323663 17621 solver.cpp:244]     Train net output #1: loss = 0.428229 (* 1 = 0.428229 loss)\nI0817 16:58:35.393504 17621 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0817 17:00:54.005442 17621 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:02:16.882390 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68904\nI0817 17:02:16.882668 17621 solver.cpp:404]     Test net output #1: loss = 1.10065 (* 1 = 1.10065 loss)\nI0817 17:02:18.222908 17621 solver.cpp:228] Iteration 1500, loss = 0.413776\nI0817 17:02:18.222951 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:02:18.222967 17621 solver.cpp:244]     Train net output #1: loss = 0.413776 (* 1 = 0.413776 loss)\nI0817 17:02:18.295065 17621 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0817 17:04:36.640172 17621 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:05:59.593296 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68584\nI0817 17:05:59.593575 17621 solver.cpp:404]     Test net output #1: loss = 1.05631 (* 1 = 1.05631 loss)\nI0817 17:06:00.931627 17621 solver.cpp:228] Iteration 1600, loss = 0.356642\nI0817 17:06:00.931679 17621 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 17:06:00.931695 17621 solver.cpp:244]     Train net output #1: loss = 0.356642 (* 1 = 0.356642 loss)\nI0817 17:06:00.990129 17621 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0817 17:08:19.175817 17621 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:09:42.161190 17621 solver.cpp:404]     Test net output #0: accuracy = 0.56388\nI0817 17:09:42.161514 17621 solver.cpp:404]     Test net output #1: loss = 2.02906 (* 1 = 2.02906 loss)\nI0817 17:09:43.499258 17621 solver.cpp:228] Iteration 1700, loss = 0.388036\nI0817 17:09:43.499311 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:09:43.499328 17621 solver.cpp:244]     Train net output #1: loss = 0.388036 (* 1 = 0.388036 loss)\nI0817 17:09:43.560675 17621 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0817 17:12:01.809319 17621 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:13:24.812216 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7036\nI0817 17:13:24.812492 17621 solver.cpp:404]     Test net output #1: loss = 1.13321 (* 1 = 1.13321 loss)\nI0817 17:13:26.150354 17621 solver.cpp:228] Iteration 1800, loss = 0.244744\nI0817 17:13:26.150400 17621 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:13:26.150416 17621 solver.cpp:244]     Train net output #1: loss = 0.244744 (* 1 = 0.244744 loss)\nI0817 17:13:26.222282 17621 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0817 17:15:44.358460 17621 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:17:07.269423 17621 solver.cpp:404]     Test net output #0: accuracy = 0.58484\nI0817 17:17:07.269706 17621 solver.cpp:404]     Test net output #1: loss = 1.72238 (* 1 = 1.72238 loss)\nI0817 17:17:08.606714 17621 solver.cpp:228] Iteration 1900, loss = 0.307238\nI0817 17:17:08.606757 17621 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 17:17:08.606772 17621 solver.cpp:244]     Train net output #1: loss = 0.307238 (* 1 = 0.307238 loss)\nI0817 17:17:08.671797 17621 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0817 17:19:26.834570 17621 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:20:49.730070 17621 solver.cpp:404]     Test net output #0: accuracy = 0.74368\nI0817 17:20:49.730347 17621 solver.cpp:404]     Test net output #1: loss = 0.90088 (* 1 = 0.90088 loss)\nI0817 17:20:51.067755 17621 solver.cpp:228] Iteration 2000, loss = 0.31159\nI0817 17:20:51.067798 17621 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:20:51.067813 17621 solver.cpp:244]     Train net output #1: loss = 0.31159 (* 1 = 0.31159 loss)\nI0817 17:20:51.131618 17621 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0817 17:23:09.337606 17621 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:24:32.229683 17621 solver.cpp:404]     Test net output #0: accuracy = 0.61484\nI0817 17:24:32.229962 17621 solver.cpp:404]     Test net output #1: loss = 1.7748 (* 1 = 1.7748 loss)\nI0817 17:24:33.567585 17621 solver.cpp:228] Iteration 2100, loss = 0.416521\nI0817 17:24:33.567641 17621 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 17:24:33.567656 17621 solver.cpp:244]     Train net output #1: loss = 0.416521 (* 1 = 0.416521 loss)\nI0817 17:24:33.630794 17621 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0817 17:26:51.740873 17621 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:28:14.634832 17621 solver.cpp:404]     Test net output #0: accuracy = 0.63096\nI0817 17:28:14.635119 17621 solver.cpp:404]     Test net output #1: loss = 1.42958 (* 1 = 1.42958 loss)\nI0817 17:28:15.971668 17621 solver.cpp:228] Iteration 2200, loss = 0.304643\nI0817 17:28:15.971711 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:28:15.971725 17621 solver.cpp:244]     Train net output #1: loss = 0.304643 (* 1 = 0.304643 loss)\nI0817 17:28:16.039398 17621 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0817 17:30:34.217288 17621 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:31:57.115603 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68736\nI0817 17:31:57.115886 17621 solver.cpp:404]     Test net output #1: loss = 1.13786 (* 1 = 1.13786 loss)\nI0817 17:31:58.453898 17621 solver.cpp:228] Iteration 2300, loss = 0.270399\nI0817 17:31:58.453941 17621 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:31:58.453958 17621 solver.cpp:244]     Train net output #1: loss = 0.270399 (* 1 = 0.270399 loss)\nI0817 17:31:58.517042 17621 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0817 17:34:16.739241 17621 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:35:39.635778 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73444\nI0817 17:35:39.636092 17621 solver.cpp:404]     Test net output #1: loss = 0.93226 (* 1 = 0.93226 loss)\nI0817 17:35:40.972705 17621 solver.cpp:228] Iteration 2400, loss = 0.365573\nI0817 17:35:40.972745 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:35:40.972760 17621 solver.cpp:244]     Train net output #1: loss = 0.365573 (* 1 = 0.365573 loss)\nI0817 17:35:41.043867 17621 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0817 17:37:59.214623 17621 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:39:22.118767 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68964\nI0817 17:39:22.119076 17621 solver.cpp:404]     Test net output #1: loss = 1.30505 (* 1 = 1.30505 loss)\nI0817 17:39:23.455600 17621 solver.cpp:228] Iteration 2500, loss = 0.323764\nI0817 17:39:23.455639 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:39:23.455654 17621 solver.cpp:244]     Train net output #1: loss = 0.323764 (* 1 = 0.323764 loss)\nI0817 17:39:23.521334 17621 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0817 17:41:41.656344 17621 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:43:04.541544 17621 solver.cpp:404]     Test net output #0: accuracy = 0.74396\nI0817 17:43:04.541836 17621 solver.cpp:404]     Test net output #1: loss = 1.02956 (* 1 = 1.02956 loss)\nI0817 17:43:05.878523 17621 solver.cpp:228] Iteration 2600, loss = 0.239691\nI0817 17:43:05.878573 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:43:05.878590 17621 solver.cpp:244]     Train net output #1: loss = 0.239692 (* 1 = 0.239692 loss)\nI0817 17:43:05.947049 17621 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0817 17:45:24.098536 17621 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:46:46.996567 17621 solver.cpp:404]     Test net output #0: accuracy = 0.70832\nI0817 17:46:46.996881 17621 solver.cpp:404]     Test net output #1: loss = 1.0548 (* 1 = 1.0548 loss)\nI0817 17:46:48.334578 17621 solver.cpp:228] Iteration 2700, loss = 0.327419\nI0817 17:46:48.334620 17621 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 17:46:48.334635 17621 solver.cpp:244]     Train net output #1: loss = 0.327419 (* 1 = 0.327419 loss)\nI0817 17:46:48.400991 17621 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0817 17:49:06.616755 17621 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:50:29.486814 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68212\nI0817 17:50:29.487072 17621 solver.cpp:404]     Test net output #1: loss = 1.1518 (* 1 = 1.1518 loss)\nI0817 17:50:30.824041 17621 solver.cpp:228] Iteration 2800, loss = 0.226367\nI0817 17:50:30.824084 17621 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:50:30.824100 17621 solver.cpp:244]     Train net output #1: loss = 0.226367 (* 1 = 0.226367 loss)\nI0817 17:50:30.885109 17621 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0817 17:52:49.064821 17621 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:54:11.894067 17621 solver.cpp:404]     Test net output #0: accuracy = 0.643\nI0817 17:54:11.894281 17621 solver.cpp:404]     Test net output #1: loss = 1.37997 (* 1 = 1.37997 loss)\nI0817 17:54:13.231881 17621 solver.cpp:228] Iteration 2900, loss = 0.274295\nI0817 17:54:13.231926 17621 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 17:54:13.231941 17621 solver.cpp:244]     Train net output #1: loss = 0.274295 (* 1 = 0.274295 loss)\nI0817 17:54:13.293546 17621 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0817 17:56:31.525446 17621 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 17:57:54.359076 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73348\nI0817 17:57:54.359292 17621 solver.cpp:404]     Test net output #1: loss = 0.845525 (* 1 = 0.845525 loss)\nI0817 17:57:55.696280 17621 solver.cpp:228] Iteration 3000, loss = 0.297935\nI0817 17:57:55.696321 17621 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:57:55.696336 17621 solver.cpp:244]     Train net output #1: loss = 0.297935 (* 1 = 0.297935 loss)\nI0817 17:57:55.758934 17621 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0817 18:00:13.946965 17621 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:01:36.595625 17621 solver.cpp:404]     Test net output #0: accuracy = 0.69648\nI0817 18:01:36.595834 17621 solver.cpp:404]     Test net output #1: loss = 0.975185 (* 1 = 0.975185 loss)\nI0817 18:01:37.933027 17621 solver.cpp:228] Iteration 3100, loss = 0.323253\nI0817 18:01:37.933073 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:01:37.933100 17621 solver.cpp:244]     Train net output #1: loss = 0.323253 (* 1 = 0.323253 loss)\nI0817 18:01:37.996829 17621 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0817 18:03:56.138386 17621 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:05:18.927613 17621 solver.cpp:404]     Test net output #0: accuracy = 0.67156\nI0817 18:05:18.927829 17621 solver.cpp:404]     Test net output #1: loss = 1.29345 (* 1 = 1.29345 loss)\nI0817 18:05:20.265429 17621 solver.cpp:228] Iteration 3200, loss = 0.34572\nI0817 18:05:20.265476 17621 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:05:20.265499 17621 solver.cpp:244]     Train net output #1: loss = 0.34572 (* 1 = 0.34572 loss)\nI0817 18:05:20.336333 17621 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0817 18:07:38.544214 17621 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:09:01.411501 17621 solver.cpp:404]     Test net output #0: accuracy = 0.5294\nI0817 18:09:01.411746 17621 solver.cpp:404]     Test net output #1: loss = 2.63062 (* 1 = 2.63062 loss)\nI0817 18:09:02.748831 17621 solver.cpp:228] Iteration 3300, loss = 0.361991\nI0817 18:09:02.748888 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:09:02.748911 17621 solver.cpp:244]     Train net output #1: loss = 0.361991 (* 1 = 0.361991 loss)\nI0817 18:09:02.820319 17621 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0817 18:11:21.014539 17621 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:12:43.883719 17621 solver.cpp:404]     Test net output #0: accuracy = 0.574\nI0817 18:12:43.884035 17621 solver.cpp:404]     Test net output #1: loss = 1.77076 (* 1 = 1.77076 loss)\nI0817 18:12:45.222461 17621 solver.cpp:228] Iteration 3400, loss = 0.30723\nI0817 18:12:45.222508 17621 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:12:45.222532 17621 solver.cpp:244]     Train net output #1: loss = 0.30723 (* 1 = 0.30723 loss)\nI0817 18:12:45.283984 17621 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0817 18:15:03.511235 17621 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:16:26.391517 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71012\nI0817 18:16:26.391794 17621 solver.cpp:404]     Test net output #1: loss = 1.07859 (* 1 = 1.07859 loss)\nI0817 18:16:27.729544 17621 solver.cpp:228] Iteration 3500, loss = 0.397942\nI0817 18:16:27.729590 17621 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 18:16:27.729614 17621 solver.cpp:244]     Train net output #1: loss = 0.397942 (* 1 = 0.397942 loss)\nI0817 18:16:27.796866 17621 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0817 18:18:46.333964 17621 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:20:09.190574 17621 solver.cpp:404]     Test net output #0: accuracy = 0.69612\nI0817 18:20:09.190874 17621 solver.cpp:404]     Test net output #1: loss = 1.00857 (* 1 = 1.00857 loss)\nI0817 18:20:10.530238 17621 solver.cpp:228] Iteration 3600, loss = 0.439758\nI0817 18:20:10.530287 17621 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 18:20:10.530309 17621 solver.cpp:244]     Train net output #1: loss = 0.439758 (* 1 = 0.439758 loss)\nI0817 18:20:10.600522 17621 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0817 18:22:29.084677 17621 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:23:51.944020 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73428\nI0817 18:23:51.944288 17621 solver.cpp:404]     Test net output #1: loss = 0.826187 (* 1 = 0.826187 loss)\nI0817 18:23:53.284142 17621 solver.cpp:228] Iteration 3700, loss = 0.422196\nI0817 18:23:53.284186 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:23:53.284202 17621 solver.cpp:244]     Train net output #1: loss = 0.422196 (* 1 = 0.422196 loss)\nI0817 18:23:53.353041 17621 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0817 18:26:11.976822 17621 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:27:34.832701 17621 solver.cpp:404]     Test net output #0: accuracy = 0.67476\nI0817 18:27:34.832988 17621 solver.cpp:404]     Test net output #1: loss = 1.03836 (* 1 = 1.03836 loss)\nI0817 18:27:36.173302 17621 solver.cpp:228] Iteration 3800, loss = 0.180606\nI0817 18:27:36.173346 17621 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:27:36.173362 17621 solver.cpp:244]     Train net output #1: loss = 0.180607 (* 1 = 0.180607 loss)\nI0817 18:27:36.244758 17621 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0817 18:29:54.822017 17621 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:31:17.669720 17621 solver.cpp:404]     Test net output #0: accuracy = 0.70952\nI0817 18:31:17.670006 17621 solver.cpp:404]     Test net output #1: loss = 1.06061 (* 1 = 1.06061 loss)\nI0817 18:31:19.010421 17621 solver.cpp:228] Iteration 3900, loss = 0.307718\nI0817 18:31:19.010462 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:31:19.010478 17621 solver.cpp:244]     Train net output #1: loss = 0.307718 (* 1 = 0.307718 loss)\nI0817 18:31:19.076335 17621 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0817 18:33:37.603299 17621 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:35:00.458631 17621 solver.cpp:404]     Test net output #0: accuracy = 0.64508\nI0817 18:35:00.458945 17621 solver.cpp:404]     Test net output #1: loss = 1.31589 (* 1 = 1.31589 loss)\nI0817 18:35:01.799721 17621 solver.cpp:228] Iteration 4000, loss = 0.256404\nI0817 18:35:01.799760 17621 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:35:01.799777 17621 solver.cpp:244]     Train net output #1: loss = 0.256404 (* 1 = 0.256404 loss)\nI0817 18:35:01.859802 17621 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0817 18:37:20.380522 17621 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:38:43.241170 17621 solver.cpp:404]     Test net output #0: accuracy = 0.63116\nI0817 18:38:43.241453 17621 solver.cpp:404]     Test net output #1: loss = 1.61601 (* 1 = 1.61601 loss)\nI0817 18:38:44.581984 17621 solver.cpp:228] Iteration 4100, loss = 0.408211\nI0817 18:38:44.582023 17621 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 18:38:44.582039 17621 solver.cpp:244]     Train net output #1: loss = 0.408211 (* 1 = 0.408211 loss)\nI0817 18:38:44.645807 17621 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0817 18:41:03.285207 17621 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:42:26.135978 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71052\nI0817 18:42:26.136272 17621 solver.cpp:404]     Test net output #1: loss = 1.07789 (* 1 = 1.07789 loss)\nI0817 18:42:27.477262 17621 solver.cpp:228] Iteration 4200, loss = 0.301338\nI0817 18:42:27.477303 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:42:27.477319 17621 solver.cpp:244]     Train net output #1: loss = 0.301338 (* 1 = 0.301338 loss)\nI0817 18:42:27.543452 17621 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0817 18:44:46.217090 17621 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:46:09.074354 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71884\nI0817 18:46:09.074635 17621 solver.cpp:404]     Test net output #1: loss = 0.929472 (* 1 = 0.929472 loss)\nI0817 18:46:10.415109 17621 solver.cpp:228] Iteration 4300, loss = 0.328462\nI0817 18:46:10.415151 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:46:10.415166 17621 solver.cpp:244]     Train net output #1: loss = 0.328462 (* 1 = 0.328462 loss)\nI0817 18:46:10.484761 17621 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0817 18:48:28.996186 17621 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:49:51.847630 17621 solver.cpp:404]     Test net output #0: accuracy = 0.634\nI0817 18:49:51.847932 17621 solver.cpp:404]     Test net output #1: loss = 1.3843 (* 1 = 1.3843 loss)\nI0817 18:49:53.188244 17621 solver.cpp:228] Iteration 4400, loss = 0.305962\nI0817 18:49:53.188287 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:49:53.188302 17621 solver.cpp:244]     Train net output #1: loss = 0.305963 (* 1 = 0.305963 loss)\nI0817 18:49:53.254709 17621 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0817 18:52:11.799139 17621 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 18:53:34.646503 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6958\nI0817 18:53:34.646808 17621 solver.cpp:404]     Test net output #1: loss = 1.04184 (* 1 = 1.04184 loss)\nI0817 18:53:35.987658 17621 solver.cpp:228] Iteration 4500, loss = 0.287788\nI0817 18:53:35.987699 17621 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:53:35.987715 17621 solver.cpp:244]     Train net output #1: loss = 0.287788 (* 1 = 0.287788 loss)\nI0817 18:53:36.049126 17621 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0817 18:55:54.632407 17621 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 18:57:17.505084 17621 solver.cpp:404]     Test net output #0: accuracy = 0.65052\nI0817 18:57:17.505369 17621 solver.cpp:404]     Test net output #1: loss = 1.15447 (* 1 = 1.15447 loss)\nI0817 18:57:18.844511 17621 solver.cpp:228] Iteration 4600, loss = 0.428522\nI0817 18:57:18.844557 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:57:18.844579 17621 solver.cpp:244]     Train net output #1: loss = 0.428522 (* 1 = 0.428522 loss)\nI0817 18:57:18.913048 17621 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0817 18:59:37.371464 17621 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:01:00.245507 17621 solver.cpp:404]     Test net output #0: accuracy = 0.71556\nI0817 19:01:00.245805 17621 solver.cpp:404]     Test net output #1: loss = 1.05516 (* 1 = 1.05516 loss)\nI0817 19:01:01.587970 17621 solver.cpp:228] Iteration 4700, loss = 0.341207\nI0817 19:01:01.588011 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:01:01.588035 17621 solver.cpp:244]     Train net output #1: loss = 0.341207 (* 1 = 0.341207 loss)\nI0817 19:01:01.655180 17621 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0817 19:03:20.193346 17621 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:04:43.063474 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6004\nI0817 19:04:43.063781 17621 solver.cpp:404]     Test net output #1: loss = 1.51505 (* 1 = 1.51505 loss)\nI0817 19:04:44.404235 17621 solver.cpp:228] Iteration 4800, loss = 0.362234\nI0817 19:04:44.404283 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:04:44.404305 17621 solver.cpp:244]     Train net output #1: loss = 0.362234 (* 1 = 0.362234 loss)\nI0817 19:04:44.468124 17621 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0817 19:07:02.995332 17621 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:08:25.871587 17621 solver.cpp:404]     Test net output #0: accuracy = 0.67216\nI0817 19:08:25.871882 17621 solver.cpp:404]     Test net output #1: loss = 1.22297 (* 1 = 1.22297 loss)\nI0817 19:08:27.213224 17621 solver.cpp:228] Iteration 4900, loss = 0.374725\nI0817 19:08:27.213269 17621 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 19:08:27.213292 17621 solver.cpp:244]     Train net output #1: loss = 0.374725 (* 1 = 0.374725 loss)\nI0817 19:08:27.283175 17621 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0817 19:10:45.831001 17621 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:12:08.732969 17621 solver.cpp:404]     Test net output #0: accuracy = 0.63868\nI0817 19:12:08.733265 17621 solver.cpp:404]     Test net output #1: loss = 1.26025 (* 1 = 1.26025 loss)\nI0817 19:12:10.073673 17621 solver.cpp:228] Iteration 5000, loss = 0.387631\nI0817 19:12:10.073717 17621 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 19:12:10.073734 17621 solver.cpp:244]     Train net output #1: loss = 0.387631 (* 1 = 0.387631 loss)\nI0817 19:12:10.140038 17621 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0817 19:14:28.727196 17621 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:15:51.606072 17621 solver.cpp:404]     Test net output #0: accuracy = 0.76516\nI0817 19:15:51.606375 17621 solver.cpp:404]     Test net output #1: loss = 0.769357 (* 1 = 0.769357 loss)\nI0817 19:15:52.945930 17621 solver.cpp:228] Iteration 5100, loss = 0.325335\nI0817 19:15:52.945973 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:15:52.945988 17621 solver.cpp:244]     Train net output #1: loss = 0.325335 (* 1 = 0.325335 loss)\nI0817 19:15:53.016765 17621 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0817 19:18:11.544008 17621 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:19:34.484122 17621 solver.cpp:404]     Test net output #0: accuracy = 0.68388\nI0817 19:19:34.484407 17621 solver.cpp:404]     Test net output #1: loss = 1.21392 (* 1 = 1.21392 loss)\nI0817 19:19:35.824134 17621 solver.cpp:228] Iteration 5200, loss = 0.283056\nI0817 19:19:35.824175 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:19:35.824190 17621 solver.cpp:244]     Train net output #1: loss = 0.283056 (* 1 = 0.283056 loss)\nI0817 19:19:35.897554 17621 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0817 19:21:54.527698 17621 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:23:17.446666 17621 solver.cpp:404]     Test net output #0: accuracy = 0.75748\nI0817 19:23:17.446961 17621 solver.cpp:404]     Test net output #1: loss = 0.854917 (* 1 = 0.854917 loss)\nI0817 19:23:18.786203 17621 solver.cpp:228] Iteration 5300, loss = 0.256943\nI0817 19:23:18.786244 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:23:18.786259 17621 solver.cpp:244]     Train net output #1: loss = 0.256943 (* 1 = 0.256943 loss)\nI0817 19:23:18.862856 17621 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0817 19:25:37.388417 17621 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:27:00.270385 17621 solver.cpp:404]     Test net output #0: accuracy = 0.74672\nI0817 19:27:00.270689 17621 solver.cpp:404]     Test net output #1: loss = 0.753813 (* 1 = 0.753813 loss)\nI0817 19:27:01.612215 17621 solver.cpp:228] Iteration 5400, loss = 0.321861\nI0817 19:27:01.612252 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:27:01.612267 17621 solver.cpp:244]     Train net output #1: loss = 0.321861 (* 1 = 0.321861 loss)\nI0817 19:27:01.678983 17621 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0817 19:29:20.234227 17621 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:30:43.113528 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73448\nI0817 19:30:43.113824 17621 solver.cpp:404]     Test net output #1: loss = 0.929464 (* 1 = 0.929464 loss)\nI0817 19:30:44.454413 17621 solver.cpp:228] Iteration 5500, loss = 0.31798\nI0817 19:30:44.454454 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:30:44.454478 17621 solver.cpp:244]     Train net output #1: loss = 0.31798 (* 1 = 0.31798 loss)\nI0817 19:30:44.528143 17621 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0817 19:33:03.010331 17621 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:34:25.885736 17621 solver.cpp:404]     Test net output #0: accuracy = 0.56612\nI0817 19:34:25.886023 17621 solver.cpp:404]     Test net output #1: loss = 1.81776 (* 1 = 1.81776 loss)\nI0817 19:34:27.226747 17621 solver.cpp:228] Iteration 5600, loss = 0.327118\nI0817 19:34:27.226789 17621 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 19:34:27.226819 17621 solver.cpp:244]     Train net output #1: loss = 0.327118 (* 1 = 0.327118 loss)\nI0817 19:34:27.298396 17621 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0817 19:36:45.817143 17621 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:38:08.691213 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7718\nI0817 19:38:08.691525 17621 solver.cpp:404]     Test net output #1: loss = 0.702516 (* 1 = 0.702516 loss)\nI0817 19:38:10.031774 17621 solver.cpp:228] Iteration 5700, loss = 0.333571\nI0817 19:38:10.031822 17621 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 19:38:10.031846 17621 solver.cpp:244]     Train net output #1: loss = 0.333572 (* 1 = 0.333572 loss)\nI0817 19:38:10.102047 17621 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0817 19:40:28.655866 17621 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:41:51.533565 17621 solver.cpp:404]     Test net output #0: accuracy = 0.7104\nI0817 19:41:51.533869 17621 solver.cpp:404]     Test net output #1: loss = 0.976494 (* 1 = 0.976494 loss)\nI0817 19:41:52.873731 17621 solver.cpp:228] Iteration 5800, loss = 0.236998\nI0817 19:41:52.873775 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:41:52.873797 17621 solver.cpp:244]     Train net output #1: loss = 0.236998 (* 1 = 0.236998 loss)\nI0817 19:41:52.937224 17621 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0817 19:44:11.438441 17621 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:45:34.315687 17621 solver.cpp:404]     Test net output #0: accuracy = 0.66548\nI0817 19:45:34.315964 17621 solver.cpp:404]     Test net output #1: loss = 1.19535 (* 1 = 1.19535 loss)\nI0817 19:45:35.656034 17621 solver.cpp:228] Iteration 5900, loss = 0.444351\nI0817 19:45:35.656078 17621 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 19:45:35.656100 17621 solver.cpp:244]     Train net output #1: loss = 0.444351 (* 1 = 0.444351 loss)\nI0817 19:45:35.728855 17621 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0817 19:47:54.226296 17621 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:49:17.091361 17621 solver.cpp:404]     Test net output #0: accuracy = 0.6794\nI0817 19:49:17.091665 17621 solver.cpp:404]     Test net output #1: loss = 1.14376 (* 1 = 1.14376 loss)\nI0817 19:49:18.432209 17621 solver.cpp:228] Iteration 6000, loss = 0.279201\nI0817 19:49:18.432248 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 19:49:18.432263 17621 solver.cpp:244]     Train net output #1: loss = 0.279201 (* 1 = 0.279201 loss)\nI0817 19:49:18.504333 17621 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0817 19:51:36.967272 17621 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:52:59.853946 17621 solver.cpp:404]     Test net output #0: accuracy = 0.66384\nI0817 19:52:59.854203 17621 solver.cpp:404]     Test net output #1: loss = 1.34236 (* 1 = 1.34236 loss)\nI0817 19:53:01.193667 17621 solver.cpp:228] Iteration 6100, loss = 0.318815\nI0817 19:53:01.193709 17621 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 19:53:01.193724 17621 solver.cpp:244]     Train net output #1: loss = 0.318815 (* 1 = 0.318815 loss)\nI0817 19:53:01.265218 17621 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0817 19:55:19.804901 17621 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 19:56:42.734405 17621 solver.cpp:404]     Test net output #0: accuracy = 0.63624\nI0817 19:56:42.734700 17621 solver.cpp:404]     Test net output #1: loss = 1.55111 (* 1 = 1.55111 loss)\nI0817 19:56:44.075986 17621 solver.cpp:228] Iteration 6200, loss = 0.314675\nI0817 19:56:44.076026 17621 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 19:56:44.076042 17621 solver.cpp:244]     Train net output #1: loss = 0.314675 (* 1 = 0.314675 loss)\nI0817 19:56:44.139356 17621 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0817 19:59:02.662456 17621 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:00:25.562414 17621 solver.cpp:404]     Test net output #0: accuracy = 0.77296\nI0817 20:00:25.562714 17621 solver.cpp:404]     Test net output #1: loss = 0.833763 (* 1 = 0.833763 loss)\nI0817 20:00:26.903095 17621 solver.cpp:228] Iteration 6300, loss = 0.289508\nI0817 20:00:26.903139 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:00:26.903156 17621 solver.cpp:244]     Train net output #1: loss = 0.289508 (* 1 = 0.289508 loss)\nI0817 20:00:26.974545 17621 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0817 20:02:45.449394 17621 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:04:08.326020 17621 solver.cpp:404]     Test net output #0: accuracy = 0.77212\nI0817 20:04:08.326325 17621 solver.cpp:404]     Test net output #1: loss = 0.754735 (* 1 = 0.754735 loss)\nI0817 20:04:09.666942 17621 solver.cpp:228] Iteration 6400, loss = 0.209688\nI0817 20:04:09.666985 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 20:04:09.667001 17621 solver.cpp:244]     Train net output #1: loss = 0.209688 (* 1 = 0.209688 loss)\nI0817 20:04:09.736979 17621 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0817 20:06:28.277238 17621 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:07:51.145181 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73152\nI0817 20:07:51.145470 17621 solver.cpp:404]     Test net output #1: loss = 0.879976 (* 1 = 0.879976 loss)\nI0817 20:07:52.485790 17621 solver.cpp:228] Iteration 6500, loss = 0.33055\nI0817 20:07:52.485833 17621 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 20:07:52.485849 17621 solver.cpp:244]     Train net output #1: loss = 0.33055 (* 1 = 0.33055 loss)\nI0817 20:07:52.553445 17621 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0817 20:10:11.000818 17621 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:11:33.867214 17621 solver.cpp:404]     Test net output #0: accuracy = 0.80628\nI0817 20:11:33.867496 17621 solver.cpp:404]     Test net output #1: loss = 0.654978 (* 1 = 0.654978 loss)\nI0817 20:11:35.206588 17621 solver.cpp:228] Iteration 6600, loss = 0.21025\nI0817 20:11:35.206629 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 20:11:35.206643 17621 solver.cpp:244]     Train net output #1: loss = 0.21025 (* 1 = 0.21025 loss)\nI0817 20:11:35.278537 17621 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0817 20:13:53.706961 17621 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:15:16.573169 17621 solver.cpp:404]     Test net output #0: accuracy = 0.72472\nI0817 20:15:16.573454 17621 solver.cpp:404]     Test net output #1: loss = 1.03318 (* 1 = 1.03318 loss)\nI0817 20:15:17.912523 17621 solver.cpp:228] Iteration 6700, loss = 0.270941\nI0817 20:15:17.912564 17621 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 20:15:17.912580 17621 solver.cpp:244]     Train net output #1: loss = 0.270941 (* 1 = 0.270941 loss)\nI0817 20:15:17.987524 17621 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0817 20:17:36.427091 17621 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:18:59.295657 17621 solver.cpp:404]     Test net output #0: accuracy = 0.67344\nI0817 20:18:59.295944 17621 solver.cpp:404]     Test net output #1: loss = 1.42904 (* 1 = 1.42904 loss)\nI0817 20:19:00.635334 17621 solver.cpp:228] Iteration 6800, loss = 0.117274\nI0817 20:19:00.635375 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:19:00.635391 17621 solver.cpp:244]     Train net output #1: loss = 0.117274 (* 1 = 0.117274 loss)\nI0817 20:19:00.701395 17621 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0817 20:21:19.121861 17621 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:22:41.987056 17621 solver.cpp:404]     Test net output #0: accuracy = 0.75664\nI0817 20:22:41.987351 17621 solver.cpp:404]     Test net output #1: loss = 0.881757 (* 1 = 0.881757 loss)\nI0817 20:22:43.326860 17621 solver.cpp:228] Iteration 6900, loss = 0.248739\nI0817 20:22:43.326905 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:22:43.326921 17621 solver.cpp:244]     Train net output #1: loss = 0.248739 (* 1 = 0.248739 loss)\nI0817 20:22:43.397336 17621 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0817 20:25:01.824107 17621 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:26:24.685860 17621 solver.cpp:404]     Test net output #0: accuracy = 0.69952\nI0817 20:26:24.686174 17621 solver.cpp:404]     Test net output #1: loss = 1.10589 (* 1 = 1.10589 loss)\nI0817 20:26:26.025406 17621 solver.cpp:228] Iteration 7000, loss = 0.173028\nI0817 20:26:26.025446 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:26:26.025462 17621 solver.cpp:244]     Train net output #1: loss = 0.173028 (* 1 = 0.173028 loss)\nI0817 20:26:26.099272 17621 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0817 20:28:44.521670 17621 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:30:07.391124 17621 solver.cpp:404]     Test net output #0: accuracy = 0.78304\nI0817 20:30:07.391397 17621 solver.cpp:404]     Test net output #1: loss = 0.739801 (* 1 = 0.739801 loss)\nI0817 20:30:08.729960 17621 solver.cpp:228] Iteration 7100, loss = 0.226338\nI0817 20:30:08.730001 17621 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 20:30:08.730016 17621 solver.cpp:244]     Train net output #1: loss = 0.226338 (* 1 = 0.226338 loss)\nI0817 20:30:08.794417 17621 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0817 20:32:27.191258 17621 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:33:50.058629 17621 solver.cpp:404]     Test net output #0: accuracy = 0.74988\nI0817 20:33:50.058920 17621 solver.cpp:404]     Test net output #1: loss = 0.978449 (* 1 = 0.978449 loss)\nI0817 20:33:51.398341 17621 solver.cpp:228] Iteration 7200, loss = 0.237316\nI0817 20:33:51.398382 17621 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:33:51.398397 17621 solver.cpp:244]     Train net output #1: loss = 0.237316 (* 1 = 0.237316 loss)\nI0817 20:33:51.469070 17621 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0817 20:36:10.875888 17621 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:37:34.615792 17621 solver.cpp:404]     Test net output #0: accuracy = 0.77452\nI0817 20:37:34.616103 17621 solver.cpp:404]     Test net output #1: loss = 0.769234 (* 1 = 0.769234 loss)\nI0817 20:37:35.969979 17621 solver.cpp:228] Iteration 7300, loss = 0.227877\nI0817 20:37:35.970018 17621 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:37:35.970033 17621 solver.cpp:244]     Train net output #1: loss = 0.227877 (* 1 = 0.227877 loss)\nI0817 20:37:36.023157 17621 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0817 20:39:55.771772 17621 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:41:19.491498 17621 solver.cpp:404]     Test net output #0: accuracy = 0.78164\nI0817 20:41:19.491801 17621 solver.cpp:404]     Test net output #1: loss = 0.787621 (* 1 = 0.787621 loss)\nI0817 20:41:20.844494 17621 solver.cpp:228] Iteration 7400, loss = 0.172875\nI0817 20:41:20.844532 17621 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:41:20.844547 17621 solver.cpp:244]     Train net output #1: loss = 0.172875 (* 1 = 0.172875 loss)\nI0817 20:41:20.905846 17621 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0817 20:43:40.631649 17621 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:45:04.349334 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73996\nI0817 20:45:04.349633 17621 solver.cpp:404]     Test net output #1: loss = 0.964547 (* 1 = 0.964547 loss)\nI0817 20:45:05.703464 17621 solver.cpp:228] Iteration 7500, loss = 0.165322\nI0817 20:45:05.703505 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:45:05.703521 17621 solver.cpp:244]     Train net output #1: loss = 0.165322 (* 1 = 0.165322 loss)\nI0817 20:45:05.760560 17621 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0817 20:47:25.514644 17621 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:48:49.231523 17621 solver.cpp:404]     Test net output #0: accuracy = 0.78032\nI0817 20:48:49.231808 17621 solver.cpp:404]     Test net output #1: loss = 0.805574 (* 1 = 0.805574 loss)\nI0817 20:48:50.585340 17621 solver.cpp:228] Iteration 7600, loss = 0.301629\nI0817 20:48:50.585378 17621 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 20:48:50.585392 17621 solver.cpp:244]     Train net output #1: loss = 0.301629 (* 1 = 0.301629 loss)\nI0817 20:48:50.640965 17621 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0817 20:51:10.417011 17621 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:52:34.141950 17621 solver.cpp:404]     Test net output #0: accuracy = 0.79748\nI0817 20:52:34.142217 17621 solver.cpp:404]     Test net output #1: loss = 0.72571 (* 1 = 0.72571 loss)\nI0817 20:52:35.495899 17621 solver.cpp:228] Iteration 7700, loss = 0.164705\nI0817 20:52:35.495940 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:52:35.495955 17621 solver.cpp:244]     Train net output #1: loss = 0.164705 (* 1 = 0.164705 loss)\nI0817 20:52:35.557479 17621 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0817 20:54:55.324153 17621 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 20:56:19.053082 17621 solver.cpp:404]     Test net output #0: accuracy = 0.75684\nI0817 20:56:19.061780 17621 solver.cpp:404]     Test net output #1: loss = 0.944465 (* 1 = 0.944465 loss)\nI0817 20:56:20.414520 17621 solver.cpp:228] Iteration 7800, loss = 0.125689\nI0817 20:56:20.414559 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:56:20.414573 17621 solver.cpp:244]     Train net output #1: loss = 0.125689 (* 1 = 0.125689 loss)\nI0817 20:56:20.476053 17621 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0817 20:58:40.249212 17621 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:00:03.990614 17621 solver.cpp:404]     Test net output #0: accuracy = 0.72544\nI0817 21:00:03.990907 17621 solver.cpp:404]     Test net output #1: loss = 1.39692 (* 1 = 1.39692 loss)\nI0817 21:00:05.344720 17621 solver.cpp:228] Iteration 7900, loss = 0.106059\nI0817 21:00:05.344760 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:00:05.344775 17621 solver.cpp:244]     Train net output #1: loss = 0.106059 (* 1 = 0.106059 loss)\nI0817 21:00:05.403363 17621 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0817 21:02:25.176189 17621 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:03:48.915954 17621 solver.cpp:404]     Test net output #0: accuracy = 0.80752\nI0817 21:03:48.916244 17621 solver.cpp:404]     Test net output #1: loss = 0.714281 (* 1 = 0.714281 loss)\nI0817 21:03:50.269862 17621 solver.cpp:228] Iteration 8000, loss = 0.118952\nI0817 21:03:50.269904 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:03:50.269920 17621 solver.cpp:244]     Train net output #1: loss = 0.118952 (* 1 = 0.118952 loss)\nI0817 21:03:50.323812 17621 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0817 21:06:10.101940 17621 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:07:33.842331 17621 solver.cpp:404]     Test net output #0: accuracy = 0.80356\nI0817 21:07:33.842643 17621 solver.cpp:404]     Test net output #1: loss = 0.745346 (* 1 = 0.745346 loss)\nI0817 21:07:35.196121 17621 solver.cpp:228] Iteration 8100, loss = 0.144818\nI0817 21:07:35.196161 17621 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 21:07:35.196177 17621 solver.cpp:244]     Train net output #1: loss = 0.144818 (* 1 = 0.144818 loss)\nI0817 21:07:35.254134 17621 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0817 21:09:55.028352 17621 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:11:18.752797 17621 solver.cpp:404]     Test net output #0: accuracy = 0.77304\nI0817 21:11:18.753109 17621 solver.cpp:404]     Test net output #1: loss = 0.866575 (* 1 = 0.866575 loss)\nI0817 21:11:20.106858 17621 solver.cpp:228] Iteration 8200, loss = 0.0889016\nI0817 21:11:20.106904 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:11:20.106920 17621 solver.cpp:244]     Train net output #1: loss = 0.0889015 (* 1 = 0.0889015 loss)\nI0817 21:11:20.167837 17621 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0817 21:13:39.938880 17621 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:15:03.665356 17621 solver.cpp:404]     Test net output #0: accuracy = 0.81588\nI0817 21:15:03.665639 17621 solver.cpp:404]     Test net output #1: loss = 0.720982 (* 1 = 0.720982 loss)\nI0817 21:15:05.019516 17621 solver.cpp:228] Iteration 8300, loss = 0.110096\nI0817 21:15:05.019557 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:15:05.019572 17621 solver.cpp:244]     Train net output #1: loss = 0.110096 (* 1 = 0.110096 loss)\nI0817 21:15:05.081110 17621 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0817 21:17:24.851442 17621 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:18:48.580648 17621 solver.cpp:404]     Test net output #0: accuracy = 0.73788\nI0817 21:18:48.580947 17621 solver.cpp:404]     Test net output #1: loss = 1.30167 (* 1 = 1.30167 loss)\nI0817 21:18:49.934033 17621 solver.cpp:228] Iteration 8400, loss = 0.0801664\nI0817 21:18:49.934072 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:18:49.934087 17621 solver.cpp:244]     Train net output #1: loss = 0.0801664 (* 1 = 0.0801664 loss)\nI0817 21:18:49.995137 17621 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0817 21:21:09.719250 17621 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:22:33.441715 17621 solver.cpp:404]     Test net output #0: accuracy = 0.79496\nI0817 21:22:33.442013 17621 solver.cpp:404]     Test net output #1: loss = 0.767027 (* 1 = 0.767027 loss)\nI0817 21:22:34.795824 17621 solver.cpp:228] Iteration 8500, loss = 0.0401013\nI0817 21:22:34.795861 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:22:34.795876 17621 solver.cpp:244]     Train net output #1: loss = 0.0401013 (* 1 = 0.0401013 loss)\nI0817 21:22:34.850361 17621 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0817 21:24:54.598793 17621 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:26:18.335883 17621 solver.cpp:404]     Test net output #0: accuracy = 0.78672\nI0817 21:26:18.336199 17621 solver.cpp:404]     Test net output #1: loss = 1.00405 (* 1 = 1.00405 loss)\nI0817 21:26:19.689872 17621 solver.cpp:228] Iteration 8600, loss = 0.0880791\nI0817 21:26:19.689919 17621 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:26:19.689935 17621 solver.cpp:244]     Train net output #1: loss = 0.088079 (* 1 = 0.088079 loss)\nI0817 21:26:19.751119 17621 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0817 21:28:39.499037 17621 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:30:03.233268 17621 solver.cpp:404]     Test net output #0: accuracy = 0.78948\nI0817 21:30:03.233530 17621 solver.cpp:404]     Test net output #1: loss = 0.793672 (* 1 = 0.793672 loss)\nI0817 21:30:04.586908 17621 solver.cpp:228] Iteration 8700, loss = 0.0599695\nI0817 21:30:04.586947 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:30:04.586963 17621 solver.cpp:244]     Train net output #1: loss = 0.0599695 (* 1 = 0.0599695 loss)\nI0817 21:30:04.651993 17621 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0817 21:32:24.387617 17621 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:33:48.105008 17621 solver.cpp:404]     Test net output #0: accuracy = 0.8374\nI0817 21:33:48.105382 17621 solver.cpp:404]     Test net output #1: loss = 0.643694 (* 1 = 0.643694 loss)\nI0817 21:33:49.459082 17621 solver.cpp:228] Iteration 8800, loss = 0.0132627\nI0817 21:33:49.459122 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:33:49.459137 17621 solver.cpp:244]     Train net output #1: loss = 0.0132627 (* 1 = 0.0132627 loss)\nI0817 21:33:49.517647 17621 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0817 21:36:09.228965 17621 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:37:32.942296 17621 solver.cpp:404]     Test net output #0: accuracy = 0.85228\nI0817 21:37:32.942606 17621 solver.cpp:404]     Test net output #1: loss = 0.525902 (* 1 = 0.525902 loss)\nI0817 21:37:34.296630 17621 solver.cpp:228] Iteration 8900, loss = 0.11735\nI0817 21:37:34.296669 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:37:34.296685 17621 solver.cpp:244]     Train net output #1: loss = 0.11735 (* 1 = 0.11735 loss)\nI0817 21:37:34.360524 17621 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0817 21:39:54.081640 17621 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:41:17.799237 17621 solver.cpp:404]     Test net output #0: accuracy = 0.84644\nI0817 21:41:17.799563 17621 solver.cpp:404]     Test net output #1: loss = 0.683973 (* 1 = 0.683973 loss)\nI0817 21:41:19.152829 17621 solver.cpp:228] Iteration 9000, loss = 0.0373602\nI0817 21:41:19.152870 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:41:19.152886 17621 solver.cpp:244]     Train net output #1: loss = 0.0373602 (* 1 = 0.0373602 loss)\nI0817 21:41:19.213274 17621 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0817 21:43:38.939556 17621 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:45:02.659714 17621 solver.cpp:404]     Test net output #0: accuracy = 0.85204\nI0817 21:45:02.660022 17621 solver.cpp:404]     Test net output #1: loss = 0.606981 (* 1 = 0.606981 loss)\nI0817 21:45:04.013485 17621 solver.cpp:228] Iteration 9100, loss = 0.0895845\nI0817 21:45:04.013525 17621 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:45:04.013540 17621 solver.cpp:244]     Train net output #1: loss = 0.0895844 (* 1 = 0.0895844 loss)\nI0817 21:45:04.070242 17621 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0817 21:47:23.778162 17621 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:48:47.506335 17621 solver.cpp:404]     Test net output #0: accuracy = 0.85072\nI0817 21:48:47.506645 17621 solver.cpp:404]     Test net output #1: loss = 0.665532 (* 1 = 0.665532 loss)\nI0817 21:48:48.860247 17621 solver.cpp:228] Iteration 9200, loss = 0.022562\nI0817 21:48:48.860286 17621 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:48:48.860302 17621 solver.cpp:244]     Train net output #1: loss = 0.0225619 (* 1 = 0.0225619 loss)\nI0817 21:48:48.923200 17621 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0817 21:51:08.643728 17621 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:52:32.358958 17621 solver.cpp:404]     Test net output #0: accuracy = 0.8436\nI0817 21:52:32.359261 17621 solver.cpp:404]     Test net output #1: loss = 0.693474 (* 1 = 0.693474 loss)\nI0817 21:52:33.712599 17621 solver.cpp:228] Iteration 9300, loss = 0.038228\nI0817 21:52:33.712641 17621 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:52:33.712656 17621 solver.cpp:244]     Train net output #1: loss = 0.038228 (* 1 = 0.038228 loss)\nI0817 21:52:33.768829 17621 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0817 21:54:53.485273 17621 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 21:56:17.195613 17621 solver.cpp:404]     Test net output #0: accuracy = 0.83084\nI0817 21:56:17.195909 17621 solver.cpp:404]     Test net output #1: loss = 0.756643 (* 1 = 0.756643 loss)\nI0817 21:56:18.548832 17621 solver.cpp:228] Iteration 9400, loss = 0.0131586\nI0817 21:56:18.548871 17621 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:56:18.548887 17621 solver.cpp:244]     Train net output #1: loss = 0.0131586 (* 1 = 0.0131586 loss)\nI0817 21:56:18.616528 17621 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0817 21:58:38.336628 17621 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:00:02.053841 17621 solver.cpp:404]     Test net output #0: accuracy = 0.89228\nI0817 22:00:02.054126 17621 solver.cpp:404]     Test net output #1: loss = 0.504428 (* 1 = 0.504428 loss)\nI0817 22:00:03.407790 17621 solver.cpp:228] Iteration 9500, loss = 0.00181439\nI0817 22:00:03.407832 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:00:03.407847 17621 solver.cpp:244]     Train net output #1: loss = 0.00181437 (* 1 = 0.00181437 loss)\nI0817 22:00:03.465955 17621 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0817 22:02:23.192953 17621 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:03:46.909126 17621 solver.cpp:404]     Test net output #0: accuracy = 0.90508\nI0817 22:03:46.909436 17621 solver.cpp:404]     Test net output #1: loss = 0.443354 (* 1 = 0.443354 loss)\nI0817 22:03:48.262704 17621 solver.cpp:228] Iteration 9600, loss = 0.00111915\nI0817 22:03:48.262745 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:03:48.262759 17621 solver.cpp:244]     Train net output #1: loss = 0.00111914 (* 1 = 0.00111914 loss)\nI0817 22:03:48.326122 17621 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0817 22:06:08.061539 17621 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:07:31.772467 17621 solver.cpp:404]     Test net output #0: accuracy = 0.9084\nI0817 22:07:31.772779 17621 solver.cpp:404]     Test net output #1: loss = 0.41972 (* 1 = 0.41972 loss)\nI0817 22:07:33.125814 17621 solver.cpp:228] Iteration 9700, loss = 0.000315094\nI0817 22:07:33.125854 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:07:33.125867 17621 solver.cpp:244]     Train net output #1: loss = 0.000315082 (* 1 = 0.000315082 loss)\nI0817 22:07:33.180276 17621 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0817 22:09:52.890776 17621 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:11:16.611958 17621 solver.cpp:404]     Test net output #0: accuracy = 0.9094\nI0817 22:11:16.612249 17621 solver.cpp:404]     Test net output #1: loss = 0.415031 (* 1 = 0.415031 loss)\nI0817 22:11:17.964963 17621 solver.cpp:228] Iteration 9800, loss = 0.000234102\nI0817 22:11:17.965004 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:11:17.965018 17621 solver.cpp:244]     Train net output #1: loss = 0.00023409 (* 1 = 0.00023409 loss)\nI0817 22:11:18.027122 17621 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0817 22:13:37.756090 17621 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:15:01.477136 17621 solver.cpp:404]     Test net output #0: accuracy = 0.91052\nI0817 22:15:01.477419 17621 solver.cpp:404]     Test net output #1: loss = 0.400372 (* 1 = 0.400372 loss)\nI0817 22:15:02.830984 17621 solver.cpp:228] Iteration 9900, loss = 0.000209662\nI0817 22:15:02.831023 17621 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:15:02.831038 17621 solver.cpp:244]     Train net output #1: loss = 0.000209649 (* 1 = 0.000209649 loss)\nI0817 22:15:02.889437 17621 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0817 22:17:22.631440 17621 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kTr40kTab1_iter_10000.caffemodel\nI0817 22:17:22.851143 17621 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kTr40kTab1_iter_10000.solverstate\nI0817 22:17:23.305609 17621 solver.cpp:317] Iteration 10000, loss = 0.000185516\nI0817 22:17:23.305661 17621 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:18:47.023762 17621 solver.cpp:404]     Test net output #0: accuracy = 0.91104\nI0817 22:18:47.024083 17621 solver.cpp:404]     Test net output #1: loss = 0.408686 (* 1 = 0.408686 loss)\nI0817 22:18:47.024096 17621 solver.cpp:322] Optimization Done.\nI0817 22:18:52.364759 17621 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kWD-3Fig11",
    "content": "I0821 06:48:43.064079 32543 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 06:48:43.066614 32543 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 06:48:43.067888 32543 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 06:48:43.070372 32543 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 06:48:43.071738 32543 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 06:48:43.072966 32543 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 06:48:43.074200 32543 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 06:48:43.075428 32543 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 06:48:43.076654 32543 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 06:48:43.494364 32543 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.001\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kWD-3Fig11\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0821 06:48:43.498262 32543 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 06:48:43.517555 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:43.517630 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:43.518719 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 06:48:43.518780 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 06:48:43.518800 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:48:43.518826 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:48:43.518846 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:48:43.518863 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:48:43.518882 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:48:43.518901 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:48:43.518921 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:48:43.518940 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:48:43.518959 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:48:43.518975 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:48:43.518996 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:48:43.519014 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:48:43.519034 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:48:43.519052 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:48:43.519071 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:48:43.519090 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:48:43.519109 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:48:43.519137 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:48:43.519171 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:48:43.519191 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:48:43.519215 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:48:43.519235 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:48:43.519253 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:48:43.519269 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:48:43.519289 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:48:43.519305 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:48:43.519322 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:48:43.519342 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:48:43.519362 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:48:43.519379 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:48:43.519399 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:48:43.519415 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:48:43.519435 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:48:43.519454 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:48:43.519474 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:48:43.519491 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:48:43.519510 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:48:43.519527 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:48:43.519552 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:48:43.519570 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:48:43.519588 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:48:43.519606 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:48:43.519628 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:48:43.519645 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:48:43.519665 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:48:43.519681 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:48:43.519701 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:48:43.519717 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:48:43.519734 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:48:43.519762 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:48:43.519783 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:48:43.519801 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:48:43.519822 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:48:43.519837 32543 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:48:43.521600 32543 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0821 06:48:43.523707 32543 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:48:43.524942 32543 net.cpp:100] Creating Layer dataLayer\nI0821 06:48:43.525020 32543 net.cpp:408] dataLayer -> data_top\nI0821 06:48:43.525243 32543 net.cpp:408] dataLayer -> label\nI0821 06:48:43.525370 32543 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:48:43.532778 32549 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0821 06:48:43.584661 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:43.592945 32543 net.cpp:150] Setting up dataLayer\nI0821 06:48:43.593008 32543 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:48:43.593019 32543 net.cpp:157] Top shape: 125 (125)\nI0821 06:48:43.593025 32543 net.cpp:165] Memory required for data: 1536500\nI0821 06:48:43.593040 32543 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:48:43.593055 32543 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:48:43.593063 32543 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:48:43.593082 32543 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:48:43.593098 32543 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:48:43.593178 32543 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:48:43.593197 32543 net.cpp:157] Top shape: 125 (125)\nI0821 06:48:43.593205 32543 net.cpp:157] Top shape: 125 (125)\nI0821 06:48:43.593209 32543 net.cpp:165] Memory required for data: 1537500\nI0821 06:48:43.593215 32543 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:48:43.593277 32543 net.cpp:100] Creating Layer pre_conv\nI0821 06:48:43.593289 32543 net.cpp:434] pre_conv <- data_top\nI0821 06:48:43.593299 32543 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:48:43.596437 32550 blocking_queue.cpp:50] Waiting for data\nI0821 06:48:43.596457 32543 net.cpp:150] Setting up pre_conv\nI0821 06:48:43.596478 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.596484 32543 net.cpp:165] Memory required for data: 9729500\nI0821 06:48:43.596559 32543 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:48:43.597776 32543 net.cpp:100] Creating Layer pre_bn\nI0821 06:48:43.597791 32543 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:48:43.597801 32543 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:48:43.598114 32543 net.cpp:150] Setting up pre_bn\nI0821 06:48:43.598137 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.598143 32543 net.cpp:165] Memory required for data: 17921500\nI0821 06:48:43.598161 32543 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:48:43.598214 32543 net.cpp:100] Creating Layer pre_scale\nI0821 06:48:43.598224 32543 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:48:43.598237 32543 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:48:43.598410 32543 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:48:43.599231 32543 net.cpp:150] Setting up pre_scale\nI0821 06:48:43.599247 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.599253 32543 net.cpp:165] Memory required for data: 26113500\nI0821 06:48:43.599264 32543 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:48:43.599313 32543 net.cpp:100] Creating Layer pre_relu\nI0821 06:48:43.599323 32543 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:48:43.599334 32543 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:48:43.599345 32543 net.cpp:150] Setting up pre_relu\nI0821 06:48:43.599354 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.599359 32543 net.cpp:165] Memory required for data: 34305500\nI0821 06:48:43.599364 32543 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:48:43.599370 32543 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:48:43.599375 32543 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:48:43.599385 32543 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:48:43.599395 32543 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:48:43.599444 32543 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:48:43.599457 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.599462 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.599467 32543 net.cpp:165] Memory required for data: 50689500\nI0821 06:48:43.599472 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:48:43.599484 32543 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:48:43.599490 32543 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:48:43.599501 32543 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:48:43.599815 32543 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:48:43.599829 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.599834 32543 net.cpp:165] Memory required for data: 58881500\nI0821 06:48:43.599846 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:48:43.599862 32543 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:48:43.599869 32543 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:48:43.599877 32543 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:48:43.600109 32543 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:48:43.600132 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.600138 32543 net.cpp:165] Memory required for data: 67073500\nI0821 06:48:43.600149 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:48:43.600158 32543 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:48:43.600163 32543 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:48:43.600172 32543 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.600224 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:48:43.600360 32543 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:48:43.600373 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.600378 32543 net.cpp:165] Memory required for data: 75265500\nI0821 06:48:43.600386 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:48:43.600405 32543 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:48:43.600411 32543 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:48:43.600419 32543 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.600432 32543 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:48:43.600440 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.600445 32543 net.cpp:165] Memory required for data: 83457500\nI0821 06:48:43.600450 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:48:43.600461 32543 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:48:43.600466 32543 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:48:43.600477 32543 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:48:43.600782 32543 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:48:43.600796 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.600801 32543 net.cpp:165] Memory required for data: 91649500\nI0821 06:48:43.600811 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:48:43.600819 32543 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:48:43.600826 32543 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:48:43.600836 32543 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:48:43.601068 32543 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:48:43.601080 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.601085 32543 net.cpp:165] Memory required for data: 99841500\nI0821 06:48:43.601102 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:48:43.601112 32543 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:48:43.601122 32543 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:48:43.601131 32543 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:48:43.601188 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:48:43.601327 32543 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:48:43.601341 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.601346 32543 net.cpp:165] Memory required for data: 108033500\nI0821 06:48:43.601354 32543 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:48:43.601403 32543 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:48:43.601415 32543 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:48:43.601423 32543 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:48:43.601434 32543 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:48:43.601505 32543 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:48:43.601519 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.601524 32543 net.cpp:165] Memory required for data: 116225500\nI0821 06:48:43.601531 32543 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:48:43.601542 32543 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:48:43.601548 32543 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:48:43.601555 32543 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:48:43.601565 32543 net.cpp:150] Setting up L1_b1_relu\nI0821 06:48:43.601572 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.601577 32543 net.cpp:165] Memory required for data: 124417500\nI0821 06:48:43.601582 32543 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:48:43.601590 32543 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:48:43.601596 32543 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:48:43.601603 32543 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:48:43.601613 32543 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:48:43.601657 32543 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:48:43.601668 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.601675 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.601686 32543 net.cpp:165] Memory required for data: 140801500\nI0821 06:48:43.601692 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:48:43.601706 32543 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:48:43.601713 32543 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:48:43.601722 32543 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:48:43.602032 32543 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:48:43.602046 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.602051 32543 net.cpp:165] Memory required for data: 148993500\nI0821 06:48:43.602061 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:48:43.602074 32543 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:48:43.602080 32543 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:48:43.602090 32543 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:48:43.602331 32543 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:48:43.602346 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.602351 32543 net.cpp:165] Memory required for data: 157185500\nI0821 06:48:43.602361 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:48:43.602370 32543 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:48:43.602376 32543 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:48:43.602387 32543 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.602439 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:48:43.602572 32543 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:48:43.602589 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.602594 32543 net.cpp:165] Memory required for data: 165377500\nI0821 06:48:43.602603 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:48:43.602612 32543 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:48:43.602617 32543 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:48:43.602624 32543 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.602633 32543 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:48:43.602640 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.602645 32543 net.cpp:165] Memory required for data: 173569500\nI0821 06:48:43.602649 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:48:43.602663 32543 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:48:43.602669 32543 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:48:43.602680 32543 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:48:43.602987 32543 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:48:43.602999 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.603004 32543 net.cpp:165] Memory required for data: 181761500\nI0821 06:48:43.603013 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:48:43.603029 32543 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:48:43.603034 32543 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:48:43.603045 32543 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:48:43.603284 32543 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:48:43.603298 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.603303 32543 net.cpp:165] Memory required for data: 189953500\nI0821 06:48:43.603318 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:48:43.603332 32543 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:48:43.603338 32543 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:48:43.603345 32543 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:48:43.603399 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:48:43.603539 32543 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:48:43.603552 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.603557 32543 net.cpp:165] Memory required for data: 198145500\nI0821 06:48:43.603566 32543 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:48:43.603582 32543 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:48:43.603588 32543 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:48:43.603595 32543 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:48:43.603606 32543 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:48:43.603641 32543 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:48:43.603651 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.603655 32543 net.cpp:165] Memory required for data: 206337500\nI0821 06:48:43.603660 32543 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:48:43.603668 32543 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:48:43.603673 32543 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:48:43.603680 32543 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:48:43.603693 32543 net.cpp:150] Setting up L1_b2_relu\nI0821 06:48:43.603699 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.603704 32543 net.cpp:165] Memory required for data: 214529500\nI0821 06:48:43.603709 32543 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:48:43.603716 32543 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:48:43.603721 32543 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:48:43.603729 32543 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:48:43.603737 32543 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:48:43.603780 32543 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:48:43.603792 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.603798 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.603803 32543 net.cpp:165] Memory required for data: 230913500\nI0821 06:48:43.603808 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:48:43.603819 32543 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:48:43.603826 32543 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:48:43.603837 32543 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:48:43.604148 32543 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:48:43.604162 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.604167 32543 net.cpp:165] Memory required for data: 239105500\nI0821 06:48:43.604176 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:48:43.604185 32543 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:48:43.604192 32543 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:48:43.604202 32543 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:48:43.604440 32543 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:48:43.604454 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.604459 32543 net.cpp:165] Memory required for data: 247297500\nI0821 06:48:43.604468 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:48:43.604480 32543 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:48:43.604485 32543 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:48:43.604493 32543 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.604543 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:48:43.604681 32543 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:48:43.604694 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.604699 32543 net.cpp:165] Memory required for data: 255489500\nI0821 06:48:43.604708 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:48:43.604717 32543 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:48:43.604722 32543 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:48:43.604732 32543 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.604742 32543 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:48:43.604755 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.604760 32543 net.cpp:165] Memory required for data: 263681500\nI0821 06:48:43.604765 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:48:43.604779 32543 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:48:43.604785 32543 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:48:43.604794 32543 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:48:43.605099 32543 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:48:43.605113 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.605123 32543 net.cpp:165] Memory required for data: 271873500\nI0821 06:48:43.605134 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:48:43.605147 32543 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:48:43.605154 32543 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:48:43.605166 32543 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:48:43.605399 32543 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:48:43.605412 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.605417 32543 net.cpp:165] Memory required for data: 280065500\nI0821 06:48:43.605427 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:48:43.605439 32543 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:48:43.605445 32543 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:48:43.605453 32543 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:48:43.605504 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:48:43.605639 32543 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:48:43.605653 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.605657 32543 net.cpp:165] Memory required for data: 288257500\nI0821 06:48:43.605665 32543 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:48:43.605680 32543 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:48:43.605686 32543 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:48:43.605693 32543 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:48:43.605700 32543 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:48:43.605733 32543 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:48:43.605746 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.605751 32543 net.cpp:165] Memory required for data: 296449500\nI0821 06:48:43.605756 32543 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:48:43.605763 32543 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:48:43.605768 32543 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:48:43.605778 32543 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:48:43.605787 32543 net.cpp:150] Setting up L1_b3_relu\nI0821 06:48:43.605794 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.605799 32543 net.cpp:165] Memory required for data: 304641500\nI0821 06:48:43.605804 32543 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:48:43.605811 32543 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:48:43.605816 32543 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:48:43.605823 32543 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:48:43.605834 32543 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:48:43.605878 32543 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:48:43.605890 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.605896 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.605901 32543 net.cpp:165] Memory required for data: 321025500\nI0821 06:48:43.605906 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:48:43.605917 32543 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:48:43.605923 32543 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:48:43.605942 32543 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:48:43.606266 32543 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:48:43.606281 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.606286 32543 net.cpp:165] Memory required for data: 329217500\nI0821 06:48:43.606294 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:48:43.606303 32543 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:48:43.606309 32543 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:48:43.606320 32543 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:48:43.606556 32543 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:48:43.606570 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.606575 32543 net.cpp:165] Memory required for data: 337409500\nI0821 06:48:43.606583 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:48:43.606597 32543 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:48:43.606603 32543 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:48:43.606611 32543 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.606663 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:48:43.606803 32543 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:48:43.606817 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.606822 32543 net.cpp:165] Memory required for data: 345601500\nI0821 06:48:43.606832 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:48:43.606842 32543 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:48:43.606848 32543 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:48:43.606855 32543 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.606864 32543 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:48:43.606870 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.606875 32543 net.cpp:165] Memory required for data: 353793500\nI0821 06:48:43.606880 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:48:43.606894 32543 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:48:43.606900 32543 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:48:43.606911 32543 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:48:43.607234 32543 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:48:43.607247 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.607252 32543 net.cpp:165] Memory required for data: 361985500\nI0821 06:48:43.607261 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:48:43.607270 32543 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:48:43.607276 32543 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:48:43.607285 32543 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:48:43.607525 32543 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:48:43.607539 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.607544 32543 net.cpp:165] Memory required for data: 370177500\nI0821 06:48:43.607558 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:48:43.607566 32543 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:48:43.607573 32543 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:48:43.607581 32543 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:48:43.607633 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:48:43.607771 32543 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:48:43.607784 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.607789 32543 net.cpp:165] Memory required for data: 378369500\nI0821 06:48:43.607797 32543 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:48:43.607810 32543 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:48:43.607816 32543 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:48:43.607823 32543 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:48:43.607834 32543 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:48:43.607872 32543 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:48:43.607882 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.607887 32543 net.cpp:165] Memory required for data: 386561500\nI0821 06:48:43.607892 32543 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:48:43.607903 32543 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:48:43.607908 32543 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:48:43.607916 32543 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:48:43.607924 32543 net.cpp:150] Setting up L1_b4_relu\nI0821 06:48:43.607931 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.607936 32543 net.cpp:165] Memory required for data: 394753500\nI0821 06:48:43.607940 32543 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:48:43.607949 32543 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:48:43.607954 32543 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:48:43.607960 32543 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:48:43.607969 32543 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:48:43.608013 32543 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:48:43.608024 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.608031 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.608036 32543 net.cpp:165] Memory required for data: 411137500\nI0821 06:48:43.608042 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:48:43.608054 32543 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:48:43.608062 32543 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:48:43.608069 32543 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:48:43.608388 32543 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:48:43.608402 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.608407 32543 net.cpp:165] Memory required for data: 419329500\nI0821 06:48:43.608431 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:48:43.608444 32543 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:48:43.608450 32543 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:48:43.608458 32543 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:48:43.608700 32543 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:48:43.608716 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.608721 32543 net.cpp:165] Memory required for data: 427521500\nI0821 06:48:43.608732 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:48:43.608741 32543 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:48:43.608747 32543 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:48:43.608755 32543 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.608806 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:48:43.608947 32543 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:48:43.608959 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.608964 32543 net.cpp:165] Memory required for data: 435713500\nI0821 06:48:43.608973 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:48:43.608984 32543 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:48:43.608990 32543 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:48:43.608997 32543 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.609007 32543 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:48:43.609014 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.609019 32543 net.cpp:165] Memory required for data: 443905500\nI0821 06:48:43.609024 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:48:43.609037 32543 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:48:43.609043 32543 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:48:43.609061 32543 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:48:43.609385 32543 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:48:43.609398 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.609405 32543 net.cpp:165] Memory required for data: 452097500\nI0821 06:48:43.609413 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:48:43.609422 32543 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:48:43.609428 32543 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:48:43.609439 32543 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:48:43.609675 32543 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:48:43.609688 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.609694 32543 net.cpp:165] Memory required for data: 460289500\nI0821 06:48:43.609704 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:48:43.609714 32543 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:48:43.609720 32543 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:48:43.609728 32543 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:48:43.609779 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:48:43.609920 32543 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:48:43.609933 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.609938 32543 net.cpp:165] Memory required for data: 468481500\nI0821 06:48:43.609947 32543 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:48:43.609959 32543 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:48:43.609966 32543 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:48:43.609972 32543 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:48:43.609980 32543 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:48:43.610015 32543 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:48:43.610025 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.610029 32543 net.cpp:165] Memory required for data: 476673500\nI0821 06:48:43.610035 32543 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:48:43.610043 32543 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:48:43.610049 32543 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:48:43.610057 32543 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:48:43.610067 32543 net.cpp:150] Setting up L1_b5_relu\nI0821 06:48:43.610074 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.610079 32543 net.cpp:165] Memory required for data: 484865500\nI0821 06:48:43.610083 32543 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:48:43.610090 32543 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:48:43.610095 32543 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:48:43.610102 32543 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:48:43.610111 32543 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:48:43.610164 32543 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:48:43.610177 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.610183 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.610188 32543 net.cpp:165] Memory required for data: 501249500\nI0821 06:48:43.610193 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:48:43.610203 32543 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:48:43.610209 32543 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:48:43.610221 32543 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:48:43.610532 32543 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:48:43.610545 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.610550 32543 net.cpp:165] Memory required for data: 509441500\nI0821 06:48:43.610566 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:48:43.610575 32543 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:48:43.610581 32543 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:48:43.610592 32543 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:48:43.610828 32543 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:48:43.610841 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.610846 32543 net.cpp:165] Memory required for data: 517633500\nI0821 06:48:43.610857 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:48:43.610867 32543 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:48:43.610873 32543 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:48:43.610882 32543 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.610932 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:48:43.611071 32543 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:48:43.611084 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.611089 32543 net.cpp:165] Memory required for data: 525825500\nI0821 06:48:43.611099 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:48:43.611109 32543 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:48:43.611115 32543 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:48:43.611129 32543 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.611140 32543 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:48:43.611146 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.611151 32543 net.cpp:165] Memory required for data: 534017500\nI0821 06:48:43.611155 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:48:43.611171 32543 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:48:43.611176 32543 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:48:43.611186 32543 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:48:43.611505 32543 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:48:43.611518 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.611523 32543 net.cpp:165] Memory required for data: 542209500\nI0821 06:48:43.611531 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:48:43.611541 32543 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:48:43.611546 32543 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:48:43.611554 32543 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:48:43.611796 32543 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:48:43.611809 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.611814 32543 net.cpp:165] Memory required for data: 550401500\nI0821 06:48:43.611824 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:48:43.611836 32543 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:48:43.611842 32543 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:48:43.611850 32543 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:48:43.611904 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:48:43.612046 32543 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:48:43.612057 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.612062 32543 net.cpp:165] Memory required for data: 558593500\nI0821 06:48:43.612071 32543 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:48:43.612088 32543 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:48:43.612097 32543 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:48:43.612104 32543 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:48:43.612112 32543 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:48:43.612150 32543 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:48:43.612164 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.612169 32543 net.cpp:165] Memory required for data: 566785500\nI0821 06:48:43.612174 32543 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:48:43.612192 32543 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:48:43.612200 32543 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:48:43.612206 32543 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:48:43.612215 32543 net.cpp:150] Setting up L1_b6_relu\nI0821 06:48:43.612222 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.612227 32543 net.cpp:165] Memory required for data: 574977500\nI0821 06:48:43.612232 32543 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:48:43.612239 32543 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:48:43.612244 32543 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:48:43.612251 32543 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:48:43.612262 32543 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:48:43.612308 32543 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:48:43.612318 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.612325 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.612330 32543 net.cpp:165] Memory required for data: 591361500\nI0821 06:48:43.612335 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:48:43.612349 32543 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:48:43.612355 32543 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:48:43.612365 32543 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:48:43.612680 32543 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:48:43.612694 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.612699 32543 net.cpp:165] Memory required for data: 599553500\nI0821 06:48:43.612709 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:48:43.612720 32543 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:48:43.612726 32543 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:48:43.612735 32543 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:48:43.612973 32543 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:48:43.612987 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.612992 32543 net.cpp:165] Memory required for data: 607745500\nI0821 06:48:43.613001 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:48:43.613010 32543 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:48:43.613016 32543 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:48:43.613024 32543 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.613080 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:48:43.613227 32543 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:48:43.613241 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.613246 32543 net.cpp:165] Memory required for data: 615937500\nI0821 06:48:43.613255 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:48:43.613263 32543 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:48:43.613270 32543 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:48:43.613281 32543 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.613291 32543 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:48:43.613297 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.613301 32543 net.cpp:165] Memory required for data: 624129500\nI0821 06:48:43.613306 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:48:43.613320 32543 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:48:43.613327 32543 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:48:43.613334 32543 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:48:43.613651 32543 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:48:43.613664 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.613669 32543 net.cpp:165] Memory required for data: 632321500\nI0821 06:48:43.613685 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:48:43.613698 32543 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:48:43.613703 32543 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:48:43.613713 32543 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:48:43.613950 32543 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:48:43.613966 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.613971 32543 net.cpp:165] Memory required for data: 640513500\nI0821 06:48:43.613981 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:48:43.613991 32543 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:48:43.613996 32543 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:48:43.614004 32543 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:48:43.614058 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:48:43.614204 32543 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:48:43.614218 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.614223 32543 net.cpp:165] Memory required for data: 648705500\nI0821 06:48:43.614233 32543 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:48:43.614244 32543 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:48:43.614250 32543 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:48:43.614258 32543 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:48:43.614267 32543 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:48:43.614298 32543 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:48:43.614307 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.614311 32543 net.cpp:165] Memory required for data: 656897500\nI0821 06:48:43.614317 32543 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:48:43.614327 32543 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:48:43.614333 32543 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:48:43.614341 32543 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:48:43.614349 32543 net.cpp:150] Setting up L1_b7_relu\nI0821 06:48:43.614356 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.614362 32543 net.cpp:165] Memory required for data: 665089500\nI0821 06:48:43.614365 32543 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:48:43.614372 32543 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:48:43.614378 32543 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:48:43.614385 32543 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:48:43.614394 32543 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:48:43.614441 32543 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:48:43.614454 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.614459 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.614464 32543 net.cpp:165] Memory required for data: 681473500\nI0821 06:48:43.614470 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:48:43.614482 32543 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:48:43.614490 32543 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:48:43.614497 32543 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:48:43.614814 32543 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:48:43.614827 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.614832 32543 net.cpp:165] Memory required for data: 689665500\nI0821 06:48:43.614841 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:48:43.614855 32543 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:48:43.614861 32543 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:48:43.614869 32543 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:48:43.615128 32543 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:48:43.615141 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.615146 32543 net.cpp:165] Memory required for data: 697857500\nI0821 06:48:43.615156 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:48:43.615165 32543 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:48:43.615171 32543 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:48:43.615180 32543 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.615234 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:48:43.615381 32543 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:48:43.615393 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.615398 32543 net.cpp:165] Memory required for data: 706049500\nI0821 06:48:43.615407 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:48:43.615416 32543 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:48:43.615422 32543 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:48:43.615432 32543 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.615440 32543 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:48:43.615447 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.615453 32543 net.cpp:165] Memory required for data: 714241500\nI0821 06:48:43.615458 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:48:43.615468 32543 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:48:43.615473 32543 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:48:43.615484 32543 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:48:43.615805 32543 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:48:43.615818 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.615823 32543 net.cpp:165] Memory required for data: 722433500\nI0821 06:48:43.615833 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:48:43.615841 32543 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:48:43.615847 32543 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:48:43.615859 32543 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:48:43.616107 32543 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:48:43.616128 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.616134 32543 net.cpp:165] Memory required for data: 730625500\nI0821 06:48:43.616145 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:48:43.616154 32543 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:48:43.616160 32543 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:48:43.616168 32543 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:48:43.616220 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:48:43.616361 32543 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:48:43.616374 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.616379 32543 net.cpp:165] Memory required for data: 738817500\nI0821 06:48:43.616389 32543 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:48:43.616400 32543 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:48:43.616406 32543 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:48:43.616413 32543 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:48:43.616425 32543 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:48:43.616456 32543 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:48:43.616466 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.616469 32543 net.cpp:165] Memory required for data: 747009500\nI0821 06:48:43.616474 32543 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:48:43.616482 32543 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:48:43.616487 32543 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:48:43.616497 32543 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:48:43.616508 32543 net.cpp:150] Setting up L1_b8_relu\nI0821 06:48:43.616514 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.616524 32543 net.cpp:165] Memory required for data: 755201500\nI0821 06:48:43.616529 32543 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:48:43.616538 32543 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:48:43.616542 32543 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:48:43.616550 32543 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:48:43.616559 32543 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:48:43.616605 32543 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:48:43.616617 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.616624 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.616628 32543 net.cpp:165] Memory required for data: 771585500\nI0821 06:48:43.616633 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:48:43.616647 32543 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:48:43.616653 32543 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:48:43.616662 32543 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:48:43.616987 32543 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:48:43.617004 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.617009 32543 net.cpp:165] Memory required for data: 779777500\nI0821 06:48:43.617018 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:48:43.617030 32543 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:48:43.617036 32543 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:48:43.617044 32543 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:48:43.617297 32543 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:48:43.617311 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.617316 32543 net.cpp:165] Memory required for data: 787969500\nI0821 06:48:43.617327 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:48:43.617336 32543 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:48:43.617341 32543 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:48:43.617352 32543 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.617406 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:48:43.617552 32543 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:48:43.617568 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.617573 32543 net.cpp:165] Memory required for data: 796161500\nI0821 06:48:43.617581 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:48:43.617589 32543 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:48:43.617595 32543 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:48:43.617602 32543 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.617611 32543 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:48:43.617619 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.617624 32543 net.cpp:165] Memory required for data: 804353500\nI0821 06:48:43.617627 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:48:43.617641 32543 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:48:43.617647 32543 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:48:43.617658 32543 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:48:43.617985 32543 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:48:43.618000 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.618005 32543 net.cpp:165] Memory required for data: 812545500\nI0821 06:48:43.618012 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:48:43.618024 32543 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:48:43.618031 32543 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:48:43.618041 32543 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:48:43.618295 32543 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:48:43.618309 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.618314 32543 net.cpp:165] Memory required for data: 820737500\nI0821 06:48:43.618345 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:48:43.618355 32543 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:48:43.618361 32543 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:48:43.618371 32543 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:48:43.618427 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:48:43.618568 32543 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:48:43.618582 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.618587 32543 net.cpp:165] Memory required for data: 828929500\nI0821 06:48:43.618595 32543 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:48:43.618604 32543 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:48:43.618610 32543 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:48:43.618618 32543 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:48:43.618624 32543 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:48:43.618655 32543 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:48:43.618664 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.618669 32543 net.cpp:165] Memory required for data: 837121500\nI0821 06:48:43.618674 32543 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:48:43.618685 32543 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:48:43.618691 32543 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:48:43.618698 32543 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:48:43.618707 32543 net.cpp:150] Setting up L1_b9_relu\nI0821 06:48:43.618715 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.618718 32543 net.cpp:165] Memory required for data: 845313500\nI0821 06:48:43.618723 32543 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:48:43.618734 32543 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:48:43.618741 32543 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:48:43.618747 32543 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:48:43.618757 32543 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:48:43.618804 32543 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:48:43.618816 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.618824 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.618827 32543 net.cpp:165] Memory required for data: 861697500\nI0821 06:48:43.618832 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:48:43.618846 32543 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:48:43.618852 32543 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:48:43.618861 32543 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:48:43.619194 32543 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:48:43.619209 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.619215 32543 net.cpp:165] Memory required for data: 863745500\nI0821 06:48:43.619223 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:48:43.619235 32543 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:48:43.619241 32543 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:48:43.619252 32543 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:48:43.619490 32543 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:48:43.619503 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.619508 32543 net.cpp:165] Memory required for data: 865793500\nI0821 06:48:43.619518 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:48:43.619527 32543 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:48:43.619540 32543 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:48:43.619549 32543 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.619604 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:48:43.619743 32543 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:48:43.619755 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.619760 32543 net.cpp:165] Memory required for data: 867841500\nI0821 06:48:43.619770 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:48:43.619781 32543 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:48:43.619786 32543 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:48:43.619793 32543 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.619803 32543 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:48:43.619809 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.619814 32543 net.cpp:165] Memory required for data: 869889500\nI0821 06:48:43.619819 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:48:43.619833 32543 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:48:43.619839 32543 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:48:43.619848 32543 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:48:43.620177 32543 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:48:43.620192 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.620196 32543 net.cpp:165] Memory required for data: 871937500\nI0821 06:48:43.620205 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:48:43.620218 32543 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:48:43.620223 32543 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:48:43.620232 32543 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:48:43.620477 32543 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:48:43.620493 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.620498 32543 net.cpp:165] Memory required for data: 873985500\nI0821 06:48:43.620508 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:48:43.620517 32543 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:48:43.620522 32543 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:48:43.620530 32543 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:48:43.620584 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:48:43.620731 32543 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:48:43.620744 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.620749 32543 net.cpp:165] Memory required for data: 876033500\nI0821 06:48:43.620759 32543 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:48:43.620769 32543 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:48:43.620775 32543 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:48:43.620786 32543 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:48:43.620872 32543 net.cpp:150] Setting up L2_b1_pool\nI0821 06:48:43.620887 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.620893 32543 net.cpp:165] Memory required for data: 878081500\nI0821 06:48:43.620898 32543 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:48:43.620910 32543 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:48:43.620918 32543 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:48:43.620924 32543 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:48:43.620931 32543 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:48:43.620965 32543 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:48:43.620973 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.620978 32543 net.cpp:165] Memory required for data: 880129500\nI0821 06:48:43.620983 32543 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:48:43.620990 32543 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:48:43.620996 32543 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:48:43.621007 32543 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:48:43.621023 32543 net.cpp:150] Setting up L2_b1_relu\nI0821 06:48:43.621031 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.621035 32543 net.cpp:165] Memory required for data: 882177500\nI0821 06:48:43.621040 32543 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:48:43.621093 32543 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:48:43.621107 32543 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:48:43.623419 32543 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:48:43.623437 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.623443 32543 net.cpp:165] Memory required for data: 884225500\nI0821 06:48:43.623450 32543 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:48:43.623462 32543 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:48:43.623469 32543 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:48:43.623476 32543 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:48:43.623484 32543 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:48:43.623565 32543 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:48:43.623580 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.623585 32543 net.cpp:165] Memory required for data: 888321500\nI0821 06:48:43.623591 32543 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:48:43.623600 32543 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:48:43.623605 32543 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:48:43.623616 32543 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:48:43.623627 32543 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:48:43.623677 32543 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:48:43.623690 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.623697 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.623702 32543 net.cpp:165] Memory required for data: 896513500\nI0821 06:48:43.623708 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:48:43.623718 32543 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:48:43.623725 32543 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:48:43.623734 32543 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:48:43.625198 32543 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:48:43.625216 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.625221 32543 net.cpp:165] Memory required for data: 900609500\nI0821 06:48:43.625231 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:48:43.625244 32543 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:48:43.625252 32543 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:48:43.625263 32543 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:48:43.625505 32543 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:48:43.625519 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.625524 32543 net.cpp:165] Memory required for data: 904705500\nI0821 06:48:43.625535 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:48:43.625543 32543 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:48:43.625550 32543 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:48:43.625557 32543 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.625615 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:48:43.625761 32543 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:48:43.625774 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.625779 32543 net.cpp:165] Memory required for data: 908801500\nI0821 06:48:43.625789 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:48:43.625800 32543 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:48:43.625807 32543 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:48:43.625814 32543 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.625831 32543 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:48:43.625839 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.625844 32543 net.cpp:165] Memory required for data: 912897500\nI0821 06:48:43.625849 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:48:43.625864 32543 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:48:43.625869 32543 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:48:43.625881 32543 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:48:43.626379 32543 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:48:43.626394 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.626399 32543 net.cpp:165] Memory required for data: 916993500\nI0821 06:48:43.626407 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:48:43.626420 32543 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:48:43.626426 32543 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:48:43.626437 32543 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:48:43.626684 32543 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:48:43.626700 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.626705 32543 net.cpp:165] Memory required for data: 921089500\nI0821 06:48:43.626715 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:48:43.626724 32543 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:48:43.626730 32543 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:48:43.626739 32543 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:48:43.626792 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:48:43.626937 32543 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:48:43.626950 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.626955 32543 net.cpp:165] Memory required for data: 925185500\nI0821 06:48:43.626963 32543 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:48:43.626973 32543 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:48:43.626979 32543 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:48:43.626989 32543 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:48:43.626997 32543 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:48:43.627023 32543 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:48:43.627032 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.627038 32543 net.cpp:165] Memory required for data: 929281500\nI0821 06:48:43.627043 32543 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:48:43.627053 32543 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:48:43.627060 32543 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:48:43.627068 32543 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:48:43.627076 32543 net.cpp:150] Setting up L2_b2_relu\nI0821 06:48:43.627082 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.627087 32543 net.cpp:165] Memory required for data: 933377500\nI0821 06:48:43.627092 32543 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:48:43.627099 32543 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:48:43.627104 32543 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:48:43.627111 32543 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:48:43.627127 32543 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:48:43.627177 32543 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:48:43.627188 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.627194 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.627199 32543 net.cpp:165] Memory required for data: 941569500\nI0821 06:48:43.627204 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:48:43.627226 32543 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:48:43.627233 32543 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:48:43.627243 32543 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:48:43.627733 32543 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:48:43.627748 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.627753 32543 net.cpp:165] Memory required for data: 945665500\nI0821 06:48:43.627760 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:48:43.627773 32543 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:48:43.627779 32543 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:48:43.627790 32543 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:48:43.628031 32543 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:48:43.628044 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.628049 32543 net.cpp:165] Memory required for data: 949761500\nI0821 06:48:43.628059 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:48:43.628068 32543 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:48:43.628074 32543 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:48:43.628082 32543 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.628145 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:48:43.628294 32543 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:48:43.628309 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.628314 32543 net.cpp:165] Memory required for data: 953857500\nI0821 06:48:43.628322 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:48:43.628334 32543 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:48:43.628340 32543 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:48:43.628346 32543 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.628356 32543 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:48:43.628363 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.628367 32543 net.cpp:165] Memory required for data: 957953500\nI0821 06:48:43.628372 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:48:43.628386 32543 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:48:43.628392 32543 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:48:43.628401 32543 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:48:43.628883 32543 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:48:43.628896 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.628901 32543 net.cpp:165] Memory required for data: 962049500\nI0821 06:48:43.628911 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:48:43.628922 32543 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:48:43.628929 32543 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:48:43.628937 32543 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:48:43.629192 32543 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:48:43.629209 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.629214 32543 net.cpp:165] Memory required for data: 966145500\nI0821 06:48:43.629225 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:48:43.629233 32543 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:48:43.629240 32543 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:48:43.629247 32543 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:48:43.629302 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:48:43.629452 32543 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:48:43.629464 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.629469 32543 net.cpp:165] Memory required for data: 970241500\nI0821 06:48:43.629478 32543 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:48:43.629487 32543 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:48:43.629493 32543 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:48:43.629500 32543 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:48:43.629519 32543 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:48:43.629547 32543 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:48:43.629557 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.629561 32543 net.cpp:165] Memory required for data: 974337500\nI0821 06:48:43.629567 32543 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:48:43.629590 32543 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:48:43.629596 32543 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:48:43.629603 32543 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:48:43.629612 32543 net.cpp:150] Setting up L2_b3_relu\nI0821 06:48:43.629619 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.629624 32543 net.cpp:165] Memory required for data: 978433500\nI0821 06:48:43.629629 32543 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:48:43.629637 32543 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:48:43.629642 32543 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:48:43.629649 32543 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:48:43.629658 32543 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:48:43.629707 32543 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:48:43.629719 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.629725 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.629730 32543 net.cpp:165] Memory required for data: 986625500\nI0821 06:48:43.629735 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:48:43.629746 32543 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:48:43.629752 32543 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:48:43.629766 32543 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:48:43.630259 32543 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:48:43.630273 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.630278 32543 net.cpp:165] Memory required for data: 990721500\nI0821 06:48:43.630287 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:48:43.630297 32543 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:48:43.630303 32543 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:48:43.630316 32543 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:48:43.630563 32543 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:48:43.630576 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.630581 32543 net.cpp:165] Memory required for data: 994817500\nI0821 06:48:43.630591 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:48:43.630604 32543 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:48:43.630609 32543 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:48:43.630617 32543 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.630671 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:48:43.630816 32543 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:48:43.630830 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.630834 32543 net.cpp:165] Memory required for data: 998913500\nI0821 06:48:43.630843 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:48:43.630854 32543 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:48:43.630861 32543 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:48:43.630867 32543 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.630877 32543 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:48:43.630884 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.630888 32543 net.cpp:165] Memory required for data: 1003009500\nI0821 06:48:43.630894 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:48:43.630914 32543 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:48:43.630921 32543 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:48:43.630933 32543 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:48:43.631418 32543 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:48:43.631433 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.631438 32543 net.cpp:165] Memory required for data: 1007105500\nI0821 06:48:43.631448 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:48:43.631456 32543 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:48:43.631463 32543 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:48:43.631474 32543 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:48:43.631721 32543 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:48:43.631734 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.631739 32543 net.cpp:165] Memory required for data: 1011201500\nI0821 06:48:43.631749 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:48:43.631760 32543 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:48:43.631767 32543 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:48:43.631774 32543 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:48:43.631829 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:48:43.631975 32543 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:48:43.631989 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.631994 32543 net.cpp:165] Memory required for data: 1015297500\nI0821 06:48:43.632001 32543 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:48:43.632015 32543 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:48:43.632022 32543 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:48:43.632030 32543 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:48:43.632037 32543 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:48:43.632067 32543 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:48:43.632076 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.632081 32543 net.cpp:165] Memory required for data: 1019393500\nI0821 06:48:43.632086 32543 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:48:43.632094 32543 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:48:43.632100 32543 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:48:43.632107 32543 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:48:43.632125 32543 net.cpp:150] Setting up L2_b4_relu\nI0821 06:48:43.632133 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.632138 32543 net.cpp:165] Memory required for data: 1023489500\nI0821 06:48:43.632143 32543 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:48:43.632150 32543 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:48:43.632156 32543 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:48:43.632164 32543 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:48:43.632174 32543 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:48:43.632221 32543 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:48:43.632233 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.632241 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.632244 32543 net.cpp:165] Memory required for data: 1031681500\nI0821 06:48:43.632249 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:48:43.632261 32543 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:48:43.632267 32543 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:48:43.632278 32543 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:48:43.632766 32543 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:48:43.632786 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.632791 32543 net.cpp:165] Memory required for data: 1035777500\nI0821 06:48:43.632799 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:48:43.632808 32543 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:48:43.632814 32543 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:48:43.632827 32543 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:48:43.633078 32543 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:48:43.633092 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.633097 32543 net.cpp:165] Memory required for data: 1039873500\nI0821 06:48:43.633107 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:48:43.633126 32543 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:48:43.633132 32543 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:48:43.633141 32543 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.633196 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:48:43.633348 32543 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:48:43.633361 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.633366 32543 net.cpp:165] Memory required for data: 1043969500\nI0821 06:48:43.633376 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:48:43.633388 32543 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:48:43.633394 32543 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:48:43.633400 32543 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.633410 32543 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:48:43.633416 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.633421 32543 net.cpp:165] Memory required for data: 1048065500\nI0821 06:48:43.633426 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:48:43.633440 32543 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:48:43.633446 32543 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:48:43.633456 32543 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:48:43.633944 32543 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:48:43.633956 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.633961 32543 net.cpp:165] Memory required for data: 1052161500\nI0821 06:48:43.633970 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:48:43.633980 32543 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:48:43.633985 32543 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:48:43.633996 32543 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:48:43.634253 32543 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:48:43.634268 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.634272 32543 net.cpp:165] Memory required for data: 1056257500\nI0821 06:48:43.634282 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:48:43.634294 32543 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:48:43.634300 32543 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:48:43.634308 32543 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:48:43.634359 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:48:43.634531 32543 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:48:43.634546 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.634551 32543 net.cpp:165] Memory required for data: 1060353500\nI0821 06:48:43.634559 32543 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:48:43.634569 32543 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:48:43.634575 32543 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:48:43.634582 32543 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:48:43.634593 32543 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:48:43.634620 32543 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:48:43.634632 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.634644 32543 net.cpp:165] Memory required for data: 1064449500\nI0821 06:48:43.634649 32543 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:48:43.634657 32543 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:48:43.634663 32543 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:48:43.634670 32543 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:48:43.634680 32543 net.cpp:150] Setting up L2_b5_relu\nI0821 06:48:43.634686 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.634691 32543 net.cpp:165] Memory required for data: 1068545500\nI0821 06:48:43.634696 32543 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:48:43.634706 32543 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:48:43.634711 32543 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:48:43.634718 32543 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:48:43.634728 32543 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:48:43.634775 32543 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:48:43.634788 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.634794 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.634799 32543 net.cpp:165] Memory required for data: 1076737500\nI0821 06:48:43.634804 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:48:43.634815 32543 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:48:43.634821 32543 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:48:43.634834 32543 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:48:43.635337 32543 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:48:43.635351 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.635357 32543 net.cpp:165] Memory required for data: 1080833500\nI0821 06:48:43.635366 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:48:43.635375 32543 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:48:43.635381 32543 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:48:43.635392 32543 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:48:43.635643 32543 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:48:43.635655 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.635660 32543 net.cpp:165] Memory required for data: 1084929500\nI0821 06:48:43.635670 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:48:43.635682 32543 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:48:43.635689 32543 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:48:43.635696 32543 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.635751 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:48:43.635896 32543 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:48:43.635910 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.635915 32543 net.cpp:165] Memory required for data: 1089025500\nI0821 06:48:43.635922 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:48:43.635933 32543 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:48:43.635941 32543 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:48:43.635947 32543 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.635957 32543 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:48:43.635963 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.635968 32543 net.cpp:165] Memory required for data: 1093121500\nI0821 06:48:43.635972 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:48:43.635988 32543 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:48:43.635994 32543 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:48:43.636005 32543 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:48:43.636495 32543 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:48:43.636517 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.636521 32543 net.cpp:165] Memory required for data: 1097217500\nI0821 06:48:43.636530 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:48:43.636540 32543 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:48:43.636546 32543 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:48:43.636554 32543 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:48:43.636803 32543 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:48:43.636816 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.636821 32543 net.cpp:165] Memory required for data: 1101313500\nI0821 06:48:43.636831 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:48:43.636840 32543 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:48:43.636847 32543 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:48:43.636860 32543 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:48:43.636914 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:48:43.637063 32543 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:48:43.637075 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.637079 32543 net.cpp:165] Memory required for data: 1105409500\nI0821 06:48:43.637089 32543 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:48:43.637097 32543 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:48:43.637104 32543 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:48:43.637111 32543 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:48:43.637127 32543 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:48:43.637156 32543 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:48:43.637169 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.637174 32543 net.cpp:165] Memory required for data: 1109505500\nI0821 06:48:43.637179 32543 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:48:43.637187 32543 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:48:43.637193 32543 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:48:43.637200 32543 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:48:43.637209 32543 net.cpp:150] Setting up L2_b6_relu\nI0821 06:48:43.637217 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.637220 32543 net.cpp:165] Memory required for data: 1113601500\nI0821 06:48:43.637225 32543 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:48:43.637235 32543 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:48:43.637241 32543 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:48:43.637248 32543 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:48:43.637259 32543 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:48:43.637302 32543 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:48:43.637316 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.637323 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.637328 32543 net.cpp:165] Memory required for data: 1121793500\nI0821 06:48:43.637333 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:48:43.637346 32543 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:48:43.637352 32543 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:48:43.637361 32543 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:48:43.637854 32543 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:48:43.637871 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.637876 32543 net.cpp:165] Memory required for data: 1125889500\nI0821 06:48:43.637886 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:48:43.637894 32543 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:48:43.637907 32543 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:48:43.637920 32543 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:48:43.638182 32543 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:48:43.638196 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.638201 32543 net.cpp:165] Memory required for data: 1129985500\nI0821 06:48:43.638211 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:48:43.638223 32543 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:48:43.638231 32543 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:48:43.638237 32543 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.638290 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:48:43.638442 32543 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:48:43.638455 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.638460 32543 net.cpp:165] Memory required for data: 1134081500\nI0821 06:48:43.638469 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:48:43.638478 32543 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:48:43.638484 32543 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:48:43.638499 32543 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.638509 32543 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:48:43.638516 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.638520 32543 net.cpp:165] Memory required for data: 1138177500\nI0821 06:48:43.638525 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:48:43.638540 32543 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:48:43.638545 32543 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:48:43.638555 32543 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:48:43.639044 32543 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:48:43.639057 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.639062 32543 net.cpp:165] Memory required for data: 1142273500\nI0821 06:48:43.639071 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:48:43.639083 32543 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:48:43.639091 32543 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:48:43.639098 32543 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:48:43.639358 32543 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:48:43.639371 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.639376 32543 net.cpp:165] Memory required for data: 1146369500\nI0821 06:48:43.639386 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:48:43.639395 32543 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:48:43.639401 32543 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:48:43.639413 32543 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:48:43.639470 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:48:43.639621 32543 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:48:43.639633 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.639638 32543 net.cpp:165] Memory required for data: 1150465500\nI0821 06:48:43.639647 32543 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:48:43.639657 32543 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:48:43.639663 32543 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:48:43.639670 32543 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:48:43.639680 32543 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:48:43.639708 32543 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:48:43.639717 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.639721 32543 net.cpp:165] Memory required for data: 1154561500\nI0821 06:48:43.639727 32543 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:48:43.639737 32543 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:48:43.639744 32543 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:48:43.639750 32543 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:48:43.639766 32543 net.cpp:150] Setting up L2_b7_relu\nI0821 06:48:43.639775 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.639778 32543 net.cpp:165] Memory required for data: 1158657500\nI0821 06:48:43.639783 32543 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:48:43.639793 32543 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:48:43.639799 32543 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:48:43.639806 32543 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:48:43.639816 32543 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:48:43.639861 32543 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:48:43.639876 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.639883 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.639889 32543 net.cpp:165] Memory required for data: 1166849500\nI0821 06:48:43.639894 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:48:43.639904 32543 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:48:43.639910 32543 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:48:43.639919 32543 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:48:43.640424 32543 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:48:43.640439 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.640444 32543 net.cpp:165] Memory required for data: 1170945500\nI0821 06:48:43.640452 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:48:43.640465 32543 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:48:43.640470 32543 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:48:43.640480 32543 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:48:43.640738 32543 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:48:43.640750 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.640755 32543 net.cpp:165] Memory required for data: 1175041500\nI0821 06:48:43.640766 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:48:43.640774 32543 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:48:43.640780 32543 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:48:43.640791 32543 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.640847 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:48:43.641001 32543 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:48:43.641014 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.641019 32543 net.cpp:165] Memory required for data: 1179137500\nI0821 06:48:43.641028 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:48:43.641036 32543 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:48:43.641042 32543 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:48:43.641052 32543 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.641062 32543 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:48:43.641069 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.641073 32543 net.cpp:165] Memory required for data: 1183233500\nI0821 06:48:43.641078 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:48:43.641093 32543 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:48:43.641098 32543 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:48:43.641106 32543 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:48:43.641607 32543 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:48:43.641621 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.641626 32543 net.cpp:165] Memory required for data: 1187329500\nI0821 06:48:43.641635 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:48:43.641647 32543 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:48:43.641661 32543 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:48:43.641670 32543 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:48:43.641923 32543 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:48:43.641937 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.641942 32543 net.cpp:165] Memory required for data: 1191425500\nI0821 06:48:43.641952 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:48:43.641960 32543 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:48:43.641966 32543 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:48:43.641976 32543 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:48:43.642033 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:48:43.642194 32543 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:48:43.642210 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.642215 32543 net.cpp:165] Memory required for data: 1195521500\nI0821 06:48:43.642222 32543 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:48:43.642231 32543 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:48:43.642237 32543 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:48:43.642244 32543 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:48:43.642256 32543 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:48:43.642282 32543 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:48:43.642292 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.642297 32543 net.cpp:165] Memory required for data: 1199617500\nI0821 06:48:43.642302 32543 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:48:43.642313 32543 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:48:43.642319 32543 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:48:43.642326 32543 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:48:43.642335 32543 net.cpp:150] Setting up L2_b8_relu\nI0821 06:48:43.642343 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.642348 32543 net.cpp:165] Memory required for data: 1203713500\nI0821 06:48:43.642352 32543 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:48:43.642359 32543 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:48:43.642364 32543 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:48:43.642374 32543 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:48:43.642396 32543 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:48:43.642443 32543 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:48:43.642457 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.642462 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.642467 32543 net.cpp:165] Memory required for data: 1211905500\nI0821 06:48:43.642472 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:48:43.642488 32543 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:48:43.642494 32543 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:48:43.642508 32543 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:48:43.643005 32543 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:48:43.643019 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.643024 32543 net.cpp:165] Memory required for data: 1216001500\nI0821 06:48:43.643033 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:48:43.643044 32543 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:48:43.643051 32543 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:48:43.643059 32543 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:48:43.643337 32543 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:48:43.643350 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.643355 32543 net.cpp:165] Memory required for data: 1220097500\nI0821 06:48:43.643373 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:48:43.643383 32543 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:48:43.643389 32543 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:48:43.643398 32543 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.643456 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:48:43.643605 32543 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:48:43.643620 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.643625 32543 net.cpp:165] Memory required for data: 1224193500\nI0821 06:48:43.643635 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:48:43.643642 32543 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:48:43.643648 32543 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:48:43.643656 32543 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.643666 32543 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:48:43.643672 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.643676 32543 net.cpp:165] Memory required for data: 1228289500\nI0821 06:48:43.643682 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:48:43.643694 32543 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:48:43.643702 32543 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:48:43.643714 32543 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:48:43.644212 32543 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:48:43.644227 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.644232 32543 net.cpp:165] Memory required for data: 1232385500\nI0821 06:48:43.644240 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:48:43.644253 32543 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:48:43.644258 32543 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:48:43.644269 32543 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:48:43.644520 32543 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:48:43.644532 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.644536 32543 net.cpp:165] Memory required for data: 1236481500\nI0821 06:48:43.644579 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:48:43.644594 32543 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:48:43.644601 32543 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:48:43.644608 32543 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:48:43.644670 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:48:43.644821 32543 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:48:43.644834 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.644839 32543 net.cpp:165] Memory required for data: 1240577500\nI0821 06:48:43.644848 32543 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:48:43.644860 32543 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:48:43.644867 32543 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:48:43.644875 32543 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:48:43.644882 32543 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:48:43.644912 32543 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:48:43.644922 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.644927 32543 net.cpp:165] Memory required for data: 1244673500\nI0821 06:48:43.644932 32543 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:48:43.644939 32543 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:48:43.644945 32543 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:48:43.644955 32543 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:48:43.644964 32543 net.cpp:150] Setting up L2_b9_relu\nI0821 06:48:43.644971 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.644976 32543 net.cpp:165] Memory required for data: 1248769500\nI0821 06:48:43.644980 32543 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:48:43.644994 32543 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:48:43.645000 32543 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:48:43.645011 32543 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:48:43.645021 32543 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:48:43.645071 32543 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:48:43.645082 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.645088 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.645093 32543 net.cpp:165] Memory required for data: 1256961500\nI0821 06:48:43.645098 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:48:43.645109 32543 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:48:43.645117 32543 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:48:43.645134 32543 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:48:43.645635 32543 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:48:43.645650 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.645655 32543 net.cpp:165] Memory required for data: 1257985500\nI0821 06:48:43.645664 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:48:43.645673 32543 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:48:43.645679 32543 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:48:43.645690 32543 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:48:43.645956 32543 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:48:43.645969 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.645973 32543 net.cpp:165] Memory required for data: 1259009500\nI0821 06:48:43.645983 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:48:43.645992 32543 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:48:43.645999 32543 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:48:43.646006 32543 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.646064 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:48:43.646224 32543 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:48:43.646240 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.646246 32543 net.cpp:165] Memory required for data: 1260033500\nI0821 06:48:43.646255 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:48:43.646263 32543 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:48:43.646270 32543 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:48:43.646276 32543 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.646286 32543 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:48:43.646292 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.646297 32543 net.cpp:165] Memory required for data: 1261057500\nI0821 06:48:43.646302 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:48:43.646315 32543 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:48:43.646322 32543 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:48:43.646330 32543 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:48:43.646833 32543 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:48:43.646847 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.646852 32543 net.cpp:165] Memory required for data: 1262081500\nI0821 06:48:43.646862 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:48:43.646873 32543 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:48:43.646879 32543 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:48:43.646890 32543 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:48:43.647156 32543 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:48:43.647171 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.647176 32543 net.cpp:165] Memory required for data: 1263105500\nI0821 06:48:43.647194 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:48:43.647203 32543 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:48:43.647210 32543 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:48:43.647222 32543 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:48:43.647279 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:48:43.647435 32543 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:48:43.647449 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.647452 32543 net.cpp:165] Memory required for data: 1264129500\nI0821 06:48:43.647462 32543 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:48:43.647475 32543 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:48:43.647481 32543 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:48:43.647490 32543 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:48:43.647527 32543 net.cpp:150] Setting up L3_b1_pool\nI0821 06:48:43.647537 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.647541 32543 net.cpp:165] Memory required for data: 1265153500\nI0821 06:48:43.647547 32543 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:48:43.647555 32543 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:48:43.647562 32543 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:48:43.647568 32543 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:48:43.647578 32543 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:48:43.647610 32543 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:48:43.647619 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.647624 32543 net.cpp:165] Memory required for data: 1266177500\nI0821 06:48:43.647629 32543 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:48:43.647637 32543 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:48:43.647642 32543 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:48:43.647652 32543 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:48:43.647662 32543 net.cpp:150] Setting up L3_b1_relu\nI0821 06:48:43.647670 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.647673 32543 net.cpp:165] Memory required for data: 1267201500\nI0821 06:48:43.647678 32543 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:48:43.647687 32543 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:48:43.647694 32543 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:48:43.648927 32543 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:48:43.648947 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.648952 32543 net.cpp:165] Memory required for data: 1268225500\nI0821 06:48:43.648957 32543 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:48:43.648967 32543 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:48:43.648973 32543 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:48:43.648980 32543 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:48:43.648991 32543 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:48:43.649032 32543 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:48:43.649047 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.649052 32543 net.cpp:165] Memory required for data: 1270273500\nI0821 06:48:43.649057 32543 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:48:43.649065 32543 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:48:43.649071 32543 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:48:43.649080 32543 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:48:43.649091 32543 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:48:43.649147 32543 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:48:43.649160 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.649166 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.649178 32543 net.cpp:165] Memory required for data: 1274369500\nI0821 06:48:43.649184 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:48:43.649199 32543 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:48:43.649206 32543 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:48:43.649215 32543 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:48:43.651309 32543 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:48:43.651327 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.651332 32543 net.cpp:165] Memory required for data: 1276417500\nI0821 06:48:43.651342 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:48:43.651355 32543 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:48:43.651362 32543 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:48:43.651371 32543 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:48:43.651636 32543 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:48:43.651649 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.651654 32543 net.cpp:165] Memory required for data: 1278465500\nI0821 06:48:43.651665 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:48:43.651674 32543 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:48:43.651681 32543 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:48:43.651690 32543 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.651751 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:48:43.651906 32543 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:48:43.651918 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.651923 32543 net.cpp:165] Memory required for data: 1280513500\nI0821 06:48:43.651933 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:48:43.651942 32543 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:48:43.651947 32543 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:48:43.651954 32543 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.651964 32543 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:48:43.651971 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.651976 32543 net.cpp:165] Memory required for data: 1282561500\nI0821 06:48:43.651980 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:48:43.651995 32543 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:48:43.652001 32543 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:48:43.652012 32543 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:48:43.653137 32543 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:48:43.653152 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.653157 32543 net.cpp:165] Memory required for data: 1284609500\nI0821 06:48:43.653167 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:48:43.653178 32543 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:48:43.653185 32543 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:48:43.653194 32543 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:48:43.653460 32543 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:48:43.653472 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.653477 32543 net.cpp:165] Memory required for data: 1286657500\nI0821 06:48:43.653488 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:48:43.653499 32543 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:48:43.653506 32543 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:48:43.653514 32543 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:48:43.653573 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:48:43.653728 32543 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:48:43.653741 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.653746 32543 net.cpp:165] Memory required for data: 1288705500\nI0821 06:48:43.653754 32543 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:48:43.653767 32543 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:48:43.653780 32543 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:48:43.653789 32543 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:48:43.653796 32543 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:48:43.653833 32543 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:48:43.653843 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.653847 32543 net.cpp:165] Memory required for data: 1290753500\nI0821 06:48:43.653852 32543 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:48:43.653861 32543 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:48:43.653867 32543 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:48:43.653877 32543 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:48:43.653887 32543 net.cpp:150] Setting up L3_b2_relu\nI0821 06:48:43.653894 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.653898 32543 net.cpp:165] Memory required for data: 1292801500\nI0821 06:48:43.653903 32543 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:48:43.653910 32543 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:48:43.653915 32543 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:48:43.653923 32543 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:48:43.653933 32543 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:48:43.653981 32543 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:48:43.653993 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.654000 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.654005 32543 net.cpp:165] Memory required for data: 1296897500\nI0821 06:48:43.654009 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:48:43.654023 32543 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:48:43.654031 32543 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:48:43.654039 32543 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:48:43.655164 32543 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:48:43.655180 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.655185 32543 net.cpp:165] Memory required for data: 1298945500\nI0821 06:48:43.655194 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:48:43.655206 32543 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:48:43.655213 32543 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:48:43.655221 32543 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:48:43.655484 32543 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:48:43.655498 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.655503 32543 net.cpp:165] Memory required for data: 1300993500\nI0821 06:48:43.655513 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:48:43.655521 32543 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:48:43.655527 32543 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:48:43.655535 32543 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.655598 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:48:43.655755 32543 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:48:43.655767 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.655772 32543 net.cpp:165] Memory required for data: 1303041500\nI0821 06:48:43.655781 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:48:43.655789 32543 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:48:43.655797 32543 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:48:43.655803 32543 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.655817 32543 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:48:43.655823 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.655834 32543 net.cpp:165] Memory required for data: 1305089500\nI0821 06:48:43.655839 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:48:43.655851 32543 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:48:43.655860 32543 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:48:43.655869 32543 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:48:43.656989 32543 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:48:43.657004 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.657009 32543 net.cpp:165] Memory required for data: 1307137500\nI0821 06:48:43.657019 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:48:43.657032 32543 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:48:43.657037 32543 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:48:43.657045 32543 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:48:43.657322 32543 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:48:43.657336 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.657341 32543 net.cpp:165] Memory required for data: 1309185500\nI0821 06:48:43.657351 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:48:43.657363 32543 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:48:43.657371 32543 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:48:43.657378 32543 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:48:43.657438 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:48:43.657603 32543 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:48:43.657615 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.657620 32543 net.cpp:165] Memory required for data: 1311233500\nI0821 06:48:43.657629 32543 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:48:43.657641 32543 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:48:43.657649 32543 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:48:43.657655 32543 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:48:43.657666 32543 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:48:43.657699 32543 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:48:43.657711 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.657716 32543 net.cpp:165] Memory required for data: 1313281500\nI0821 06:48:43.657721 32543 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:48:43.657735 32543 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:48:43.657742 32543 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:48:43.657749 32543 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:48:43.657758 32543 net.cpp:150] Setting up L3_b3_relu\nI0821 06:48:43.657765 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.657770 32543 net.cpp:165] Memory required for data: 1315329500\nI0821 06:48:43.657774 32543 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:48:43.657781 32543 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:48:43.657788 32543 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:48:43.657794 32543 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:48:43.657804 32543 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:48:43.657853 32543 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:48:43.657865 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.657871 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.657876 32543 net.cpp:165] Memory required for data: 1319425500\nI0821 06:48:43.657881 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:48:43.657894 32543 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:48:43.657902 32543 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:48:43.657910 32543 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:48:43.659039 32543 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:48:43.659054 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.659060 32543 net.cpp:165] Memory required for data: 1321473500\nI0821 06:48:43.659068 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:48:43.659080 32543 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:48:43.659087 32543 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:48:43.659098 32543 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:48:43.659374 32543 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:48:43.659389 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.659394 32543 net.cpp:165] Memory required for data: 1323521500\nI0821 06:48:43.659404 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:48:43.659413 32543 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:48:43.659420 32543 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:48:43.659427 32543 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.659487 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:48:43.659646 32543 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:48:43.659658 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.659663 32543 net.cpp:165] Memory required for data: 1325569500\nI0821 06:48:43.659673 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:48:43.659682 32543 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:48:43.659687 32543 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:48:43.659698 32543 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.659708 32543 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:48:43.659714 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.659718 32543 net.cpp:165] Memory required for data: 1327617500\nI0821 06:48:43.659723 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:48:43.659737 32543 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:48:43.659744 32543 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:48:43.659752 32543 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:48:43.660879 32543 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:48:43.660894 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.660899 32543 net.cpp:165] Memory required for data: 1329665500\nI0821 06:48:43.660909 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:48:43.660922 32543 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:48:43.660928 32543 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:48:43.660935 32543 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:48:43.661217 32543 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:48:43.661231 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.661236 32543 net.cpp:165] Memory required for data: 1331713500\nI0821 06:48:43.661247 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:48:43.661257 32543 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:48:43.661264 32543 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:48:43.661273 32543 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:48:43.661334 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:48:43.661496 32543 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:48:43.661509 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.661514 32543 net.cpp:165] Memory required for data: 1333761500\nI0821 06:48:43.661522 32543 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:48:43.661535 32543 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:48:43.661541 32543 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:48:43.661548 32543 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:48:43.661558 32543 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:48:43.661592 32543 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:48:43.661608 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.661612 32543 net.cpp:165] Memory required for data: 1335809500\nI0821 06:48:43.661618 32543 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:48:43.661628 32543 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:48:43.661634 32543 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:48:43.661641 32543 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:48:43.661651 32543 net.cpp:150] Setting up L3_b4_relu\nI0821 06:48:43.661659 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.661662 32543 net.cpp:165] Memory required for data: 1337857500\nI0821 06:48:43.661667 32543 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:48:43.661674 32543 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:48:43.661679 32543 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:48:43.661687 32543 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:48:43.661696 32543 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:48:43.661747 32543 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:48:43.661758 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.661765 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.661769 32543 net.cpp:165] Memory required for data: 1341953500\nI0821 06:48:43.661774 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:48:43.661789 32543 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:48:43.661795 32543 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:48:43.661804 32543 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:48:43.662957 32543 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:48:43.662973 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.662978 32543 net.cpp:165] Memory required for data: 1344001500\nI0821 06:48:43.662987 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:48:43.662999 32543 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:48:43.663007 32543 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:48:43.663017 32543 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:48:43.664284 32543 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:48:43.664302 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.664307 32543 net.cpp:165] Memory required for data: 1346049500\nI0821 06:48:43.664319 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:48:43.664332 32543 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:48:43.664340 32543 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:48:43.664347 32543 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.664413 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:48:43.664574 32543 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:48:43.664587 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.664592 32543 net.cpp:165] Memory required for data: 1348097500\nI0821 06:48:43.664602 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:48:43.664613 32543 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:48:43.664619 32543 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:48:43.664629 32543 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.664639 32543 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:48:43.664646 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.664651 32543 net.cpp:165] Memory required for data: 1350145500\nI0821 06:48:43.664656 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:48:43.664667 32543 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:48:43.664674 32543 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:48:43.664685 32543 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:48:43.666831 32543 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:48:43.666848 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.666854 32543 net.cpp:165] Memory required for data: 1352193500\nI0821 06:48:43.666863 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:48:43.666877 32543 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:48:43.666884 32543 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:48:43.666893 32543 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:48:43.667165 32543 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:48:43.667179 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.667184 32543 net.cpp:165] Memory required for data: 1354241500\nI0821 06:48:43.667196 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:48:43.667207 32543 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:48:43.667214 32543 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:48:43.667222 32543 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:48:43.667282 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:48:43.667435 32543 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:48:43.667448 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.667454 32543 net.cpp:165] Memory required for data: 1356289500\nI0821 06:48:43.667462 32543 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:48:43.667474 32543 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:48:43.667481 32543 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:48:43.667490 32543 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:48:43.667500 32543 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:48:43.667532 32543 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:48:43.667543 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.667547 32543 net.cpp:165] Memory required for data: 1358337500\nI0821 06:48:43.667553 32543 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:48:43.667564 32543 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:48:43.667570 32543 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:48:43.667578 32543 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:48:43.667587 32543 net.cpp:150] Setting up L3_b5_relu\nI0821 06:48:43.667594 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.667599 32543 net.cpp:165] Memory required for data: 1360385500\nI0821 06:48:43.667603 32543 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:48:43.667611 32543 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:48:43.667616 32543 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:48:43.667623 32543 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:48:43.667634 32543 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:48:43.667680 32543 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:48:43.667690 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.667697 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.667701 32543 net.cpp:165] Memory required for data: 1364481500\nI0821 06:48:43.667706 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:48:43.667721 32543 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:48:43.667745 32543 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:48:43.667757 32543 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:48:43.668870 32543 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:48:43.668886 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.668891 32543 net.cpp:165] Memory required for data: 1366529500\nI0821 06:48:43.668900 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:48:43.668913 32543 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:48:43.668927 32543 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:48:43.668941 32543 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:48:43.669206 32543 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:48:43.669220 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.669225 32543 net.cpp:165] Memory required for data: 1368577500\nI0821 06:48:43.669235 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:48:43.669245 32543 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:48:43.669250 32543 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:48:43.669262 32543 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.669319 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:48:43.669471 32543 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:48:43.669483 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.669488 32543 net.cpp:165] Memory required for data: 1370625500\nI0821 06:48:43.669497 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:48:43.669505 32543 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:48:43.669512 32543 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:48:43.669523 32543 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.669533 32543 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:48:43.669540 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.669544 32543 net.cpp:165] Memory required for data: 1372673500\nI0821 06:48:43.669549 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:48:43.669564 32543 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:48:43.669570 32543 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:48:43.669579 32543 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:48:43.670744 32543 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:48:43.670761 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.670766 32543 net.cpp:165] Memory required for data: 1374721500\nI0821 06:48:43.670775 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:48:43.670789 32543 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:48:43.670795 32543 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:48:43.670804 32543 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:48:43.671061 32543 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:48:43.671073 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.671078 32543 net.cpp:165] Memory required for data: 1376769500\nI0821 06:48:43.671088 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:48:43.671103 32543 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:48:43.671108 32543 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:48:43.671116 32543 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:48:43.671185 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:48:43.671342 32543 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:48:43.671355 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.671360 32543 net.cpp:165] Memory required for data: 1378817500\nI0821 06:48:43.671370 32543 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:48:43.671380 32543 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:48:43.671386 32543 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:48:43.671393 32543 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:48:43.671406 32543 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:48:43.671439 32543 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:48:43.671450 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.671455 32543 net.cpp:165] Memory required for data: 1380865500\nI0821 06:48:43.671460 32543 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:48:43.671473 32543 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:48:43.671478 32543 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:48:43.671492 32543 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:48:43.671502 32543 net.cpp:150] Setting up L3_b6_relu\nI0821 06:48:43.671509 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.671514 32543 net.cpp:165] Memory required for data: 1382913500\nI0821 06:48:43.671519 32543 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:48:43.671526 32543 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:48:43.671532 32543 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:48:43.671540 32543 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:48:43.671550 32543 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:48:43.671597 32543 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:48:43.671608 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.671615 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.671619 32543 net.cpp:165] Memory required for data: 1387009500\nI0821 06:48:43.671624 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:48:43.671638 32543 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:48:43.671645 32543 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:48:43.671655 32543 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:48:43.672760 32543 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:48:43.672775 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.672780 32543 net.cpp:165] Memory required for data: 1389057500\nI0821 06:48:43.672788 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:48:43.672801 32543 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:48:43.672807 32543 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:48:43.672818 32543 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:48:43.673071 32543 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:48:43.673084 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.673089 32543 net.cpp:165] Memory required for data: 1391105500\nI0821 06:48:43.673100 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:48:43.673110 32543 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:48:43.673115 32543 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:48:43.673132 32543 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.673190 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:48:43.673346 32543 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:48:43.673359 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.673364 32543 net.cpp:165] Memory required for data: 1393153500\nI0821 06:48:43.673373 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:48:43.673406 32543 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:48:43.673414 32543 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:48:43.673422 32543 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.673434 32543 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:48:43.673440 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.673444 32543 net.cpp:165] Memory required for data: 1395201500\nI0821 06:48:43.673450 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:48:43.673465 32543 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:48:43.673471 32543 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:48:43.673480 32543 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:48:43.674604 32543 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:48:43.674619 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.674624 32543 net.cpp:165] Memory required for data: 1397249500\nI0821 06:48:43.674633 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:48:43.674648 32543 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:48:43.674661 32543 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:48:43.674672 32543 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:48:43.674937 32543 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:48:43.674949 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.674954 32543 net.cpp:165] Memory required for data: 1399297500\nI0821 06:48:43.674964 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:48:43.674973 32543 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:48:43.674979 32543 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:48:43.674988 32543 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:48:43.675047 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:48:43.675209 32543 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:48:43.675223 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.675228 32543 net.cpp:165] Memory required for data: 1401345500\nI0821 06:48:43.675237 32543 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:48:43.675246 32543 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:48:43.675253 32543 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:48:43.675261 32543 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:48:43.675271 32543 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:48:43.675304 32543 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:48:43.675317 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.675321 32543 net.cpp:165] Memory required for data: 1403393500\nI0821 06:48:43.675326 32543 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:48:43.675334 32543 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:48:43.675340 32543 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:48:43.675348 32543 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:48:43.675356 32543 net.cpp:150] Setting up L3_b7_relu\nI0821 06:48:43.675364 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.675369 32543 net.cpp:165] Memory required for data: 1405441500\nI0821 06:48:43.675372 32543 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:48:43.675384 32543 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:48:43.675390 32543 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:48:43.675397 32543 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:48:43.675407 32543 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:48:43.675456 32543 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:48:43.675467 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.675472 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.675477 32543 net.cpp:165] Memory required for data: 1409537500\nI0821 06:48:43.675482 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:48:43.675493 32543 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:48:43.675500 32543 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:48:43.675511 32543 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:48:43.676626 32543 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:48:43.676641 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.676646 32543 net.cpp:165] Memory required for data: 1411585500\nI0821 06:48:43.676656 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:48:43.676663 32543 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:48:43.676671 32543 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:48:43.676681 32543 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:48:43.676947 32543 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:48:43.676964 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.676968 32543 net.cpp:165] Memory required for data: 1413633500\nI0821 06:48:43.676985 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:48:43.676995 32543 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:48:43.677000 32543 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:48:43.677008 32543 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.677067 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:48:43.677232 32543 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:48:43.677247 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.677251 32543 net.cpp:165] Memory required for data: 1415681500\nI0821 06:48:43.677260 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:48:43.677271 32543 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:48:43.677278 32543 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:48:43.677285 32543 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.677295 32543 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:48:43.677302 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.677306 32543 net.cpp:165] Memory required for data: 1417729500\nI0821 06:48:43.677311 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:48:43.677325 32543 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:48:43.677331 32543 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:48:43.677340 32543 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:48:43.678457 32543 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:48:43.678472 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.678478 32543 net.cpp:165] Memory required for data: 1419777500\nI0821 06:48:43.678485 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:48:43.678499 32543 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:48:43.678506 32543 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:48:43.678517 32543 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:48:43.678777 32543 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:48:43.678791 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.678795 32543 net.cpp:165] Memory required for data: 1421825500\nI0821 06:48:43.678805 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:48:43.678815 32543 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:48:43.678822 32543 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:48:43.678833 32543 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:48:43.678889 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:48:43.679046 32543 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:48:43.679059 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.679064 32543 net.cpp:165] Memory required for data: 1423873500\nI0821 06:48:43.679074 32543 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:48:43.679082 32543 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:48:43.679088 32543 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:48:43.679095 32543 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:48:43.679108 32543 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:48:43.679152 32543 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:48:43.679164 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.679168 32543 net.cpp:165] Memory required for data: 1425921500\nI0821 06:48:43.679174 32543 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:48:43.679181 32543 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:48:43.679188 32543 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:48:43.679198 32543 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:48:43.679208 32543 net.cpp:150] Setting up L3_b8_relu\nI0821 06:48:43.679214 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.679219 32543 net.cpp:165] Memory required for data: 1427969500\nI0821 06:48:43.679224 32543 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:48:43.679237 32543 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:48:43.679244 32543 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:48:43.679251 32543 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:48:43.679260 32543 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:48:43.679309 32543 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:48:43.679322 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.679327 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.679332 32543 net.cpp:165] Memory required for data: 1432065500\nI0821 06:48:43.679337 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:48:43.679348 32543 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:48:43.679354 32543 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:48:43.679368 32543 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:48:43.681480 32543 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:48:43.681499 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.681504 32543 net.cpp:165] Memory required for data: 1434113500\nI0821 06:48:43.681514 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:48:43.681524 32543 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:48:43.681532 32543 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:48:43.681541 32543 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:48:43.681804 32543 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:48:43.681818 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.681823 32543 net.cpp:165] Memory required for data: 1436161500\nI0821 06:48:43.681833 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:48:43.681841 32543 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:48:43.681848 32543 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:48:43.681859 32543 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.681917 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:48:43.682078 32543 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:48:43.682092 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.682097 32543 net.cpp:165] Memory required for data: 1438209500\nI0821 06:48:43.682106 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:48:43.682117 32543 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:48:43.682129 32543 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:48:43.682137 32543 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.682147 32543 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:48:43.682154 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.682159 32543 net.cpp:165] Memory required for data: 1440257500\nI0821 06:48:43.682164 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:48:43.682179 32543 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:48:43.682185 32543 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:48:43.682198 32543 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:48:43.683316 32543 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:48:43.683331 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.683336 32543 net.cpp:165] Memory required for data: 1442305500\nI0821 06:48:43.683346 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:48:43.683354 32543 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:48:43.683360 32543 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:48:43.683372 32543 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:48:43.683634 32543 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:48:43.683650 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.683655 32543 net.cpp:165] Memory required for data: 1444353500\nI0821 06:48:43.683671 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:48:43.683681 32543 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:48:43.683687 32543 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:48:43.683696 32543 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:48:43.683753 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:48:43.683908 32543 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:48:43.683921 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.683926 32543 net.cpp:165] Memory required for data: 1446401500\nI0821 06:48:43.683935 32543 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:48:43.683948 32543 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:48:43.683954 32543 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:48:43.683961 32543 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:48:43.683969 32543 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:48:43.684005 32543 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:48:43.684017 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.684022 32543 net.cpp:165] Memory required for data: 1448449500\nI0821 06:48:43.684026 32543 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:48:43.684034 32543 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:48:43.684041 32543 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:48:43.684047 32543 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:48:43.684057 32543 net.cpp:150] Setting up L3_b9_relu\nI0821 06:48:43.684063 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.684068 32543 net.cpp:165] Memory required for data: 1450497500\nI0821 06:48:43.684073 32543 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:48:43.684082 32543 net.cpp:100] Creating Layer post_pool\nI0821 06:48:43.684087 32543 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:48:43.684098 32543 net.cpp:408] post_pool -> post_pool\nI0821 06:48:43.684139 32543 net.cpp:150] Setting up post_pool\nI0821 06:48:43.684151 32543 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:48:43.684155 32543 net.cpp:165] Memory required for data: 1450529500\nI0821 06:48:43.684161 32543 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:48:43.684257 32543 net.cpp:100] Creating Layer post_FC\nI0821 06:48:43.684269 32543 net.cpp:434] post_FC <- post_pool\nI0821 06:48:43.684284 32543 net.cpp:408] post_FC -> post_FC_top\nI0821 06:48:43.684545 32543 net.cpp:150] Setting up post_FC\nI0821 06:48:43.684561 32543 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:48:43.684566 32543 net.cpp:165] Memory required for data: 1450534500\nI0821 06:48:43.684576 32543 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:48:43.684587 32543 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:48:43.684593 32543 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:48:43.684602 32543 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:48:43.684612 32543 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:48:43.684664 32543 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:48:43.684676 32543 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:48:43.684682 32543 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:48:43.684687 32543 net.cpp:165] Memory required for data: 1450544500\nI0821 06:48:43.684692 32543 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:48:43.684736 32543 net.cpp:100] Creating Layer accuracy\nI0821 06:48:43.684747 32543 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:48:43.684756 32543 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:48:43.684763 32543 net.cpp:408] accuracy -> accuracy\nI0821 06:48:43.684806 32543 net.cpp:150] Setting up accuracy\nI0821 06:48:43.684819 32543 net.cpp:157] Top shape: (1)\nI0821 06:48:43.684823 32543 net.cpp:165] Memory required for data: 1450544504\nI0821 06:48:43.684829 32543 layer_factory.hpp:77] Creating layer loss\nI0821 06:48:43.684845 32543 net.cpp:100] Creating Layer loss\nI0821 06:48:43.684851 32543 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:48:43.684859 32543 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:48:43.684870 32543 net.cpp:408] loss -> loss\nI0821 06:48:43.685897 32543 layer_factory.hpp:77] Creating layer loss\nI0821 06:48:43.686060 32543 net.cpp:150] Setting up loss\nI0821 06:48:43.686076 32543 net.cpp:157] Top shape: (1)\nI0821 06:48:43.686081 32543 net.cpp:160]     with loss weight 1\nI0821 06:48:43.686177 32543 net.cpp:165] Memory required for data: 1450544508\nI0821 06:48:43.686187 32543 net.cpp:226] loss needs backward computation.\nI0821 06:48:43.686193 32543 net.cpp:228] accuracy does not need backward computation.\nI0821 06:48:43.686199 32543 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:48:43.686204 32543 net.cpp:226] post_FC needs backward computation.\nI0821 06:48:43.686209 32543 net.cpp:226] post_pool needs backward computation.\nI0821 06:48:43.686214 32543 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:48:43.686219 32543 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:48:43.686225 32543 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:48:43.686229 32543 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:48:43.686235 32543 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:48:43.686240 32543 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:48:43.686245 32543 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:48:43.686250 32543 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:48:43.686255 32543 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:48:43.686260 32543 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:48:43.686265 32543 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:48:43.686270 32543 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:48:43.686276 32543 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:48:43.686281 32543 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:48:43.686286 32543 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:48:43.686291 32543 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:48:43.686296 32543 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:48:43.686301 32543 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:48:43.686311 32543 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:48:43.686316 32543 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:48:43.686321 32543 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:48:43.686326 32543 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:48:43.686332 32543 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:48:43.686337 32543 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:48:43.686342 32543 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:48:43.686347 32543 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:48:43.686352 32543 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:48:43.686357 32543 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:48:43.686362 32543 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:48:43.686367 32543 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:48:43.686372 32543 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:48:43.686378 32543 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:48:43.686383 32543 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:48:43.686388 32543 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:48:43.686393 32543 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:48:43.686398 32543 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:48:43.686410 32543 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:48:43.686416 32543 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:48:43.686421 32543 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:48:43.686427 32543 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:48:43.686432 32543 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:48:43.686437 32543 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:48:43.686444 32543 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:48:43.686449 32543 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:48:43.686453 32543 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:48:43.686458 32543 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:48:43.686465 32543 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:48:43.686468 32543 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:48:43.686475 32543 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:48:43.686480 32543 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:48:43.686486 32543 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:48:43.686491 32543 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:48:43.686496 32543 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:48:43.686501 32543 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:48:43.686506 32543 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:48:43.686511 32543 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:48:43.686517 32543 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:48:43.686522 32543 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:48:43.686527 32543 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:48:43.686532 32543 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:48:43.686537 32543 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:48:43.686542 32543 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:48:43.686548 32543 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:48:43.686553 32543 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:48:43.686558 32543 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:48:43.686563 32543 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:48:43.686568 32543 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:48:43.686573 32543 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:48:43.686578 32543 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:48:43.686583 32543 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:48:43.686592 32543 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:48:43.686599 32543 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:48:43.686604 32543 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:48:43.686609 32543 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:48:43.686615 32543 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:48:43.686620 32543 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:48:43.686625 32543 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:48:43.686630 32543 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:48:43.686636 32543 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:48:43.686641 32543 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:48:43.686646 32543 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:48:43.686653 32543 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:48:43.686658 32543 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:48:43.686668 32543 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:48:43.686676 32543 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:48:43.686681 32543 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:48:43.686686 32543 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:48:43.686691 32543 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:48:43.686697 32543 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:48:43.686702 32543 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:48:43.686707 32543 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:48:43.686712 32543 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:48:43.686718 32543 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:48:43.686723 32543 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:48:43.686728 32543 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:48:43.686734 32543 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:48:43.686739 32543 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:48:43.686745 32543 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:48:43.686750 32543 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:48:43.686755 32543 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:48:43.686760 32543 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:48:43.686765 32543 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:48:43.686771 32543 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:48:43.686776 32543 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:48:43.686782 32543 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:48:43.686789 32543 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:48:43.686794 32543 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:48:43.686799 32543 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:48:43.686805 32543 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:48:43.686810 32543 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:48:43.686815 32543 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:48:43.686820 32543 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:48:43.686825 32543 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:48:43.686831 32543 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:48:43.686836 32543 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:48:43.686841 32543 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:48:43.686847 32543 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:48:43.686852 32543 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:48:43.686857 32543 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:48:43.686863 32543 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:48:43.686868 32543 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:48:43.686873 32543 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:48:43.686879 32543 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:48:43.686884 32543 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:48:43.686889 32543 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:48:43.686895 32543 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:48:43.686900 32543 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:48:43.686906 32543 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:48:43.686911 32543 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:48:43.686916 32543 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:48:43.686926 32543 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:48:43.686933 32543 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:48:43.686939 32543 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:48:43.686944 32543 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:48:43.686949 32543 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:48:43.686954 32543 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:48:43.686959 32543 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:48:43.686969 32543 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:48:43.686975 32543 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:48:43.686980 32543 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:48:43.686985 32543 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:48:43.686990 32543 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:48:43.686995 32543 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:48:43.687001 32543 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:48:43.687006 32543 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:48:43.687012 32543 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:48:43.687017 32543 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:48:43.687023 32543 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:48:43.687028 32543 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:48:43.687034 32543 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:48:43.687039 32543 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:48:43.687046 32543 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:48:43.687050 32543 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:48:43.687057 32543 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:48:43.687062 32543 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:48:43.687068 32543 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:48:43.687073 32543 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:48:43.687079 32543 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:48:43.687084 32543 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:48:43.687089 32543 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:48:43.687095 32543 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:48:43.687100 32543 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:48:43.687106 32543 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:48:43.687111 32543 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:48:43.687122 32543 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:48:43.687130 32543 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:48:43.687135 32543 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:48:43.687141 32543 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:48:43.687147 32543 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:48:43.687152 32543 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:48:43.687158 32543 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:48:43.687163 32543 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:48:43.687170 32543 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:48:43.687175 32543 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:48:43.687181 32543 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:48:43.687186 32543 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:48:43.687192 32543 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:48:43.687204 32543 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:48:43.687211 32543 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:48:43.687216 32543 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:48:43.687222 32543 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:48:43.687227 32543 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:48:43.687232 32543 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:48:43.687238 32543 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:48:43.687243 32543 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:48:43.687249 32543 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:48:43.687255 32543 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:48:43.687260 32543 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:48:43.687266 32543 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:48:43.687273 32543 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:48:43.687278 32543 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:48:43.687283 32543 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:48:43.687289 32543 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:48:43.687295 32543 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:48:43.687300 32543 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:48:43.687306 32543 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:48:43.687311 32543 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:48:43.687317 32543 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:48:43.687324 32543 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:48:43.687330 32543 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:48:43.687335 32543 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:48:43.687340 32543 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:48:43.687346 32543 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:48:43.687352 32543 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:48:43.687357 32543 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:48:43.687363 32543 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:48:43.687369 32543 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:48:43.687376 32543 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:48:43.687381 32543 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:48:43.687387 32543 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:48:43.687393 32543 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:48:43.687398 32543 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:48:43.687404 32543 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:48:43.687410 32543 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:48:43.687417 32543 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:48:43.687422 32543 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:48:43.687427 32543 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:48:43.687433 32543 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:48:43.687439 32543 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:48:43.687444 32543 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:48:43.687450 32543 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:48:43.687456 32543 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:48:43.687461 32543 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:48:43.687467 32543 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:48:43.687477 32543 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:48:43.687484 32543 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:48:43.687489 32543 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:48:43.687495 32543 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:48:43.687502 32543 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:48:43.687507 32543 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:48:43.687513 32543 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:48:43.687520 32543 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:48:43.687525 32543 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:48:43.687530 32543 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:48:43.687536 32543 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:48:43.687542 32543 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:48:43.687548 32543 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:48:43.687553 32543 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:48:43.687561 32543 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:48:43.687566 32543 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:48:43.687572 32543 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:48:43.687577 32543 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:48:43.687583 32543 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:48:43.687588 32543 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:48:43.687594 32543 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:48:43.687600 32543 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:48:43.687607 32543 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:48:43.687611 32543 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:48:43.687618 32543 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:48:43.687623 32543 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:48:43.687629 32543 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:48:43.687635 32543 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:48:43.687640 32543 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:48:43.687646 32543 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:48:43.687651 32543 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:48:43.687657 32543 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:48:43.687666 32543 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:48:43.687672 32543 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:48:43.687680 32543 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:48:43.687685 32543 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:48:43.687690 32543 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:48:43.687696 32543 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:48:43.687702 32543 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:48:43.687707 32543 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:48:43.687713 32543 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:48:43.687719 32543 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:48:43.687724 32543 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:48:43.687731 32543 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:48:43.687737 32543 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:48:43.687742 32543 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:48:43.687748 32543 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:48:43.687759 32543 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:48:43.687765 32543 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:48:43.687770 32543 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:48:43.687777 32543 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:48:43.687783 32543 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:48:43.687788 32543 net.cpp:226] pre_relu needs backward computation.\nI0821 06:48:43.687793 32543 net.cpp:226] pre_scale needs backward computation.\nI0821 06:48:43.687798 32543 net.cpp:226] pre_bn needs backward computation.\nI0821 06:48:43.687804 32543 net.cpp:226] pre_conv needs backward computation.\nI0821 06:48:43.687811 32543 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:48:43.687818 32543 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:48:43.687822 32543 net.cpp:270] This network produces output accuracy\nI0821 06:48:43.687829 32543 net.cpp:270] This network produces output loss\nI0821 06:48:43.688196 32543 net.cpp:283] Network initialization done.\nI0821 06:48:43.697439 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:43.697477 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:43.697536 32543 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 06:48:43.697921 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 06:48:43.697939 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 06:48:43.697950 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:48:43.697960 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:48:43.697970 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:48:43.697979 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:48:43.697988 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:48:43.697998 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:48:43.698007 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:48:43.698015 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:48:43.698025 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:48:43.698034 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:48:43.698043 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:48:43.698052 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:48:43.698061 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:48:43.698071 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:48:43.698081 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:48:43.698089 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:48:43.698098 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:48:43.698117 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:48:43.698135 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:48:43.698144 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:48:43.698156 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:48:43.698166 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:48:43.698175 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:48:43.698184 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:48:43.698192 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:48:43.698200 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:48:43.698210 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:48:43.698218 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:48:43.698227 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:48:43.698235 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:48:43.698246 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:48:43.698252 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:48:43.698262 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:48:43.698271 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:48:43.698279 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:48:43.698288 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:48:43.698297 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:48:43.698305 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:48:43.698318 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:48:43.698326 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:48:43.698335 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:48:43.698343 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:48:43.698352 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:48:43.698361 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:48:43.698370 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:48:43.698379 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:48:43.698387 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:48:43.698395 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:48:43.698412 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:48:43.698422 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:48:43.698431 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:48:43.698441 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:48:43.698449 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:48:43.698457 32543 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:48:43.700111 32543 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0821 06:48:43.701714 32543 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:48:43.701947 32543 net.cpp:100] Creating Layer dataLayer\nI0821 06:48:43.701966 32543 net.cpp:408] dataLayer -> data_top\nI0821 06:48:43.701985 32543 net.cpp:408] dataLayer -> label\nI0821 06:48:43.701997 32543 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:48:43.709633 32551 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 06:48:43.709908 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:43.717272 32543 net.cpp:150] Setting up dataLayer\nI0821 06:48:43.717295 32543 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:48:43.717303 32543 net.cpp:157] Top shape: 125 (125)\nI0821 06:48:43.717309 32543 net.cpp:165] Memory required for data: 1536500\nI0821 06:48:43.717315 32543 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:48:43.717344 32543 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:48:43.717355 32543 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:48:43.717370 32543 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:48:43.717383 32543 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:48:43.717507 32543 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:48:43.717521 32543 net.cpp:157] Top shape: 125 (125)\nI0821 06:48:43.717528 32543 net.cpp:157] Top shape: 125 (125)\nI0821 06:48:43.717532 32543 net.cpp:165] Memory required for data: 1537500\nI0821 06:48:43.717538 32543 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:48:43.717557 32543 net.cpp:100] Creating Layer pre_conv\nI0821 06:48:43.717564 32543 net.cpp:434] pre_conv <- data_top\nI0821 06:48:43.717576 32543 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:48:43.718010 32543 net.cpp:150] Setting up pre_conv\nI0821 06:48:43.718034 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.718040 32543 net.cpp:165] Memory required for data: 9729500\nI0821 06:48:43.718056 32543 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:48:43.718068 32543 net.cpp:100] Creating Layer pre_bn\nI0821 06:48:43.718075 32543 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:48:43.718086 32543 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:48:43.718427 32543 net.cpp:150] Setting up pre_bn\nI0821 06:48:43.718442 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.718448 32543 net.cpp:165] Memory required for data: 17921500\nI0821 06:48:43.718466 32543 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:48:43.718480 32543 net.cpp:100] Creating Layer pre_scale\nI0821 06:48:43.718487 32543 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:48:43.718497 32543 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:48:43.718559 32543 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:48:43.718778 32543 net.cpp:150] Setting up pre_scale\nI0821 06:48:43.718793 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.718798 32543 net.cpp:165] Memory required for data: 26113500\nI0821 06:48:43.718811 32543 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:48:43.718829 32543 net.cpp:100] Creating Layer pre_relu\nI0821 06:48:43.718837 32543 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:48:43.718844 32543 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:48:43.718854 32543 net.cpp:150] Setting up pre_relu\nI0821 06:48:43.718861 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.718866 32543 net.cpp:165] Memory required for data: 34305500\nI0821 06:48:43.718870 32543 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:48:43.718881 32543 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:48:43.718889 32543 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:48:43.718897 32543 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:48:43.718906 32543 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:48:43.718966 32543 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:48:43.718978 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.718987 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.718992 32543 net.cpp:165] Memory required for data: 50689500\nI0821 06:48:43.718997 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:48:43.719029 32543 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:48:43.719038 32543 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:48:43.719048 32543 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:48:43.719447 32543 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:48:43.719465 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.719470 32543 net.cpp:165] Memory required for data: 58881500\nI0821 06:48:43.719482 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:48:43.719498 32543 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:48:43.719504 32543 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:48:43.719513 32543 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:48:43.719816 32543 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:48:43.719831 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.719837 32543 net.cpp:165] Memory required for data: 67073500\nI0821 06:48:43.719848 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:48:43.719857 32543 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:48:43.719864 32543 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:48:43.719877 32543 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.719992 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:48:43.720451 32543 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:48:43.720468 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.720473 32543 net.cpp:165] Memory required for data: 75265500\nI0821 06:48:43.720489 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:48:43.720504 32543 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:48:43.720512 32543 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:48:43.720521 32543 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.720531 32543 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:48:43.720538 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.720542 32543 net.cpp:165] Memory required for data: 83457500\nI0821 06:48:43.720547 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:48:43.720564 32543 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:48:43.720571 32543 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:48:43.720582 32543 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:48:43.720978 32543 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:48:43.720994 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.720999 32543 net.cpp:165] Memory required for data: 91649500\nI0821 06:48:43.721009 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:48:43.721021 32543 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:48:43.721027 32543 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:48:43.721037 32543 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:48:43.721377 32543 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:48:43.721392 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.721398 32543 net.cpp:165] Memory required for data: 99841500\nI0821 06:48:43.721413 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:48:43.721422 32543 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:48:43.721442 32543 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:48:43.721456 32543 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:48:43.721531 32543 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:48:43.721711 32543 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:48:43.721727 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.721732 32543 net.cpp:165] Memory required for data: 108033500\nI0821 06:48:43.721742 32543 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:48:43.721755 32543 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:48:43.721762 32543 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:48:43.721771 32543 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:48:43.721781 32543 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:48:43.721818 32543 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:48:43.721832 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.721837 32543 net.cpp:165] Memory required for data: 116225500\nI0821 06:48:43.721842 32543 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:48:43.721849 32543 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:48:43.721854 32543 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:48:43.721865 32543 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:48:43.721874 32543 net.cpp:150] Setting up L1_b1_relu\nI0821 06:48:43.721881 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.721885 32543 net.cpp:165] Memory required for data: 124417500\nI0821 06:48:43.721890 32543 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:48:43.721902 32543 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:48:43.721907 32543 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:48:43.721915 32543 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:48:43.721927 32543 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:48:43.721978 32543 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:48:43.721987 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.722007 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.722012 32543 net.cpp:165] Memory required for data: 140801500\nI0821 06:48:43.722017 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:48:43.722034 32543 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:48:43.722041 32543 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:48:43.722051 32543 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:48:43.722465 32543 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:48:43.722479 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.722486 32543 net.cpp:165] Memory required for data: 148993500\nI0821 06:48:43.722494 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:48:43.722509 32543 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:48:43.722517 32543 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:48:43.722527 32543 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:48:43.722841 32543 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:48:43.722856 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.722860 32543 net.cpp:165] Memory required for data: 157185500\nI0821 06:48:43.722872 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:48:43.722880 32543 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:48:43.722885 32543 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:48:43.722898 32543 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.722968 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:48:43.723162 32543 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:48:43.723178 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.723184 32543 net.cpp:165] Memory required for data: 165377500\nI0821 06:48:43.723193 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:48:43.723206 32543 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:48:43.723213 32543 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:48:43.723222 32543 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.723232 32543 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:48:43.723239 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.723243 32543 net.cpp:165] Memory required for data: 173569500\nI0821 06:48:43.723249 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:48:43.723265 32543 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:48:43.723271 32543 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:48:43.723284 32543 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:48:43.723932 32543 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:48:43.723948 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.723953 32543 net.cpp:165] Memory required for data: 181761500\nI0821 06:48:43.723963 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:48:43.723975 32543 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:48:43.723981 32543 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:48:43.724002 32543 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:48:43.724339 32543 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:48:43.724355 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.724361 32543 net.cpp:165] Memory required for data: 189953500\nI0821 06:48:43.724377 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:48:43.724390 32543 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:48:43.724396 32543 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:48:43.724405 32543 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:48:43.724478 32543 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:48:43.724658 32543 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:48:43.724673 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.724678 32543 net.cpp:165] Memory required for data: 198145500\nI0821 06:48:43.724695 32543 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:48:43.724709 32543 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:48:43.724714 32543 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:48:43.724721 32543 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:48:43.724732 32543 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:48:43.724772 32543 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:48:43.724782 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.724787 32543 net.cpp:165] Memory required for data: 206337500\nI0821 06:48:43.724792 32543 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:48:43.724799 32543 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:48:43.724805 32543 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:48:43.724815 32543 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:48:43.724824 32543 net.cpp:150] Setting up L1_b2_relu\nI0821 06:48:43.724831 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.724836 32543 net.cpp:165] Memory required for data: 214529500\nI0821 06:48:43.724841 32543 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:48:43.724855 32543 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:48:43.724862 32543 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:48:43.724869 32543 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:48:43.724879 32543 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:48:43.724928 32543 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:48:43.724941 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.724948 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.724956 32543 net.cpp:165] Memory required for data: 230913500\nI0821 06:48:43.724961 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:48:43.724980 32543 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:48:43.724988 32543 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:48:43.724999 32543 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:48:43.725419 32543 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:48:43.725435 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.725440 32543 net.cpp:165] Memory required for data: 239105500\nI0821 06:48:43.725450 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:48:43.725462 32543 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:48:43.725468 32543 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:48:43.725476 32543 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:48:43.725802 32543 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:48:43.725818 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.725826 32543 net.cpp:165] Memory required for data: 247297500\nI0821 06:48:43.725836 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:48:43.725850 32543 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:48:43.725857 32543 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:48:43.725865 32543 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.725930 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:48:43.726150 32543 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:48:43.726166 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.726171 32543 net.cpp:165] Memory required for data: 255489500\nI0821 06:48:43.726181 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:48:43.726188 32543 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:48:43.726194 32543 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:48:43.726207 32543 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.726227 32543 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:48:43.726235 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.726239 32543 net.cpp:165] Memory required for data: 263681500\nI0821 06:48:43.726244 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:48:43.726261 32543 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:48:43.726269 32543 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:48:43.726277 32543 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:48:43.726907 32543 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:48:43.726923 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.726928 32543 net.cpp:165] Memory required for data: 271873500\nI0821 06:48:43.726938 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:48:43.726961 32543 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:48:43.726969 32543 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:48:43.726977 32543 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:48:43.727303 32543 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:48:43.727319 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.727324 32543 net.cpp:165] Memory required for data: 280065500\nI0821 06:48:43.727335 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:48:43.727344 32543 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:48:43.727350 32543 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:48:43.727362 32543 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:48:43.727432 32543 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:48:43.727615 32543 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:48:43.727628 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.727633 32543 net.cpp:165] Memory required for data: 288257500\nI0821 06:48:43.727643 32543 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:48:43.727651 32543 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:48:43.727660 32543 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:48:43.727668 32543 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:48:43.727679 32543 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:48:43.727716 32543 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:48:43.727740 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.727746 32543 net.cpp:165] Memory required for data: 296449500\nI0821 06:48:43.727751 32543 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:48:43.727759 32543 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:48:43.727766 32543 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:48:43.727773 32543 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:48:43.727783 32543 net.cpp:150] Setting up L1_b3_relu\nI0821 06:48:43.727790 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.727794 32543 net.cpp:165] Memory required for data: 304641500\nI0821 06:48:43.727802 32543 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:48:43.727813 32543 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:48:43.727826 32543 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:48:43.727838 32543 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:48:43.727847 32543 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:48:43.727902 32543 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:48:43.727916 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.727922 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.727927 32543 net.cpp:165] Memory required for data: 321025500\nI0821 06:48:43.727933 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:48:43.727946 32543 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:48:43.727959 32543 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:48:43.727972 32543 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:48:43.728365 32543 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:48:43.728381 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.728386 32543 net.cpp:165] Memory required for data: 329217500\nI0821 06:48:43.728397 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:48:43.728410 32543 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:48:43.728415 32543 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:48:43.728425 32543 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:48:43.728698 32543 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:48:43.728711 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.728716 32543 net.cpp:165] Memory required for data: 337409500\nI0821 06:48:43.728726 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:48:43.728736 32543 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:48:43.728742 32543 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:48:43.728754 32543 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.728811 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:48:43.728971 32543 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:48:43.728986 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.728991 32543 net.cpp:165] Memory required for data: 345601500\nI0821 06:48:43.729001 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:48:43.729008 32543 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:48:43.729014 32543 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:48:43.729022 32543 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.729032 32543 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:48:43.729038 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.729043 32543 net.cpp:165] Memory required for data: 353793500\nI0821 06:48:43.729048 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:48:43.729060 32543 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:48:43.729066 32543 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:48:43.729077 32543 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:48:43.729441 32543 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:48:43.729456 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.729461 32543 net.cpp:165] Memory required for data: 361985500\nI0821 06:48:43.729470 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:48:43.729482 32543 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:48:43.729488 32543 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:48:43.729499 32543 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:48:43.729769 32543 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:48:43.729784 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.729789 32543 net.cpp:165] Memory required for data: 370177500\nI0821 06:48:43.729799 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:48:43.729807 32543 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:48:43.729813 32543 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:48:43.729825 32543 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:48:43.729882 32543 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:48:43.730038 32543 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:48:43.730051 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.730057 32543 net.cpp:165] Memory required for data: 378369500\nI0821 06:48:43.730065 32543 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:48:43.730074 32543 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:48:43.730080 32543 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:48:43.730087 32543 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:48:43.730104 32543 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:48:43.730146 32543 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:48:43.730160 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.730165 32543 net.cpp:165] Memory required for data: 386561500\nI0821 06:48:43.730171 32543 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:48:43.730178 32543 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:48:43.730185 32543 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:48:43.730191 32543 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:48:43.730201 32543 net.cpp:150] Setting up L1_b4_relu\nI0821 06:48:43.730207 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.730211 32543 net.cpp:165] Memory required for data: 394753500\nI0821 06:48:43.730216 32543 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:48:43.730226 32543 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:48:43.730232 32543 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:48:43.730238 32543 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:48:43.730248 32543 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:48:43.730296 32543 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:48:43.730309 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.730315 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.730319 32543 net.cpp:165] Memory required for data: 411137500\nI0821 06:48:43.730324 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:48:43.730335 32543 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:48:43.730341 32543 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:48:43.730353 32543 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:48:43.730732 32543 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:48:43.730748 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.730753 32543 net.cpp:165] Memory required for data: 419329500\nI0821 06:48:43.730773 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:48:43.730785 32543 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:48:43.730792 32543 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:48:43.730800 32543 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:48:43.731071 32543 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:48:43.731084 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.731089 32543 net.cpp:165] Memory required for data: 427521500\nI0821 06:48:43.731099 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:48:43.731108 32543 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:48:43.731114 32543 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:48:43.731132 32543 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.731191 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:48:43.731351 32543 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:48:43.731364 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.731369 32543 net.cpp:165] Memory required for data: 435713500\nI0821 06:48:43.731377 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:48:43.731385 32543 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:48:43.731391 32543 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:48:43.731398 32543 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.731408 32543 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:48:43.731415 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.731420 32543 net.cpp:165] Memory required for data: 443905500\nI0821 06:48:43.731423 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:48:43.731444 32543 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:48:43.731451 32543 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:48:43.731462 32543 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:48:43.731815 32543 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:48:43.731829 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.731834 32543 net.cpp:165] Memory required for data: 452097500\nI0821 06:48:43.731843 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:48:43.731855 32543 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:48:43.731861 32543 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:48:43.731873 32543 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:48:43.732154 32543 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:48:43.732168 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.732173 32543 net.cpp:165] Memory required for data: 460289500\nI0821 06:48:43.732183 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:48:43.732192 32543 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:48:43.732198 32543 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:48:43.732208 32543 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:48:43.732269 32543 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:48:43.732445 32543 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:48:43.732462 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.732467 32543 net.cpp:165] Memory required for data: 468481500\nI0821 06:48:43.732476 32543 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:48:43.732486 32543 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:48:43.732491 32543 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:48:43.732497 32543 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:48:43.732506 32543 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:48:43.732543 32543 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:48:43.732555 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.732560 32543 net.cpp:165] Memory required for data: 476673500\nI0821 06:48:43.732565 32543 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:48:43.732573 32543 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:48:43.732581 32543 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:48:43.732589 32543 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:48:43.732597 32543 net.cpp:150] Setting up L1_b5_relu\nI0821 06:48:43.732604 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.732609 32543 net.cpp:165] Memory required for data: 484865500\nI0821 06:48:43.732614 32543 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:48:43.732620 32543 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:48:43.732625 32543 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:48:43.732636 32543 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:48:43.732646 32543 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:48:43.732693 32543 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:48:43.732705 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.732712 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.732717 32543 net.cpp:165] Memory required for data: 501249500\nI0821 06:48:43.732722 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:48:43.732735 32543 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:48:43.732741 32543 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:48:43.732750 32543 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:48:43.733108 32543 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:48:43.733130 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.733142 32543 net.cpp:165] Memory required for data: 509441500\nI0821 06:48:43.733151 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:48:43.733163 32543 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:48:43.733170 32543 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:48:43.733178 32543 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:48:43.733459 32543 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:48:43.733472 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.733477 32543 net.cpp:165] Memory required for data: 517633500\nI0821 06:48:43.733489 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:48:43.733496 32543 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:48:43.733502 32543 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:48:43.733515 32543 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.733573 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:48:43.733732 32543 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:48:43.733752 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.733757 32543 net.cpp:165] Memory required for data: 525825500\nI0821 06:48:43.733765 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:48:43.733773 32543 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:48:43.733779 32543 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:48:43.733786 32543 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.733795 32543 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:48:43.733803 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.733806 32543 net.cpp:165] Memory required for data: 534017500\nI0821 06:48:43.733811 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:48:43.733824 32543 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:48:43.733830 32543 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:48:43.733841 32543 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:48:43.734200 32543 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:48:43.734215 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.734220 32543 net.cpp:165] Memory required for data: 542209500\nI0821 06:48:43.734230 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:48:43.734241 32543 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:48:43.734247 32543 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:48:43.734259 32543 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:48:43.734581 32543 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:48:43.734596 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.734601 32543 net.cpp:165] Memory required for data: 550401500\nI0821 06:48:43.734612 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:48:43.734622 32543 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:48:43.734627 32543 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:48:43.734635 32543 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:48:43.734699 32543 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:48:43.734855 32543 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:48:43.734869 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.734874 32543 net.cpp:165] Memory required for data: 558593500\nI0821 06:48:43.734882 32543 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:48:43.734904 32543 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:48:43.734910 32543 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:48:43.734916 32543 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:48:43.734925 32543 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:48:43.734963 32543 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:48:43.734973 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.734978 32543 net.cpp:165] Memory required for data: 566785500\nI0821 06:48:43.734990 32543 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:48:43.734998 32543 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:48:43.735003 32543 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:48:43.735013 32543 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:48:43.735023 32543 net.cpp:150] Setting up L1_b6_relu\nI0821 06:48:43.735030 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.735034 32543 net.cpp:165] Memory required for data: 574977500\nI0821 06:48:43.735039 32543 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:48:43.735046 32543 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:48:43.735051 32543 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:48:43.735059 32543 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:48:43.735069 32543 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:48:43.735124 32543 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:48:43.735136 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.735143 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.735148 32543 net.cpp:165] Memory required for data: 591361500\nI0821 06:48:43.735153 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:48:43.735163 32543 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:48:43.735169 32543 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:48:43.735182 32543 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:48:43.735540 32543 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:48:43.735554 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.735559 32543 net.cpp:165] Memory required for data: 599553500\nI0821 06:48:43.735569 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:48:43.735577 32543 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:48:43.735584 32543 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:48:43.735594 32543 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:48:43.735868 32543 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:48:43.735882 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.735887 32543 net.cpp:165] Memory required for data: 607745500\nI0821 06:48:43.735896 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:48:43.735908 32543 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:48:43.735914 32543 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:48:43.735921 32543 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.735980 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:48:43.736147 32543 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:48:43.736161 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.736166 32543 net.cpp:165] Memory required for data: 615937500\nI0821 06:48:43.736176 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:48:43.736182 32543 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:48:43.736188 32543 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:48:43.736199 32543 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.736208 32543 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:48:43.736215 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.736220 32543 net.cpp:165] Memory required for data: 624129500\nI0821 06:48:43.736224 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:48:43.736238 32543 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:48:43.736244 32543 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:48:43.736253 32543 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:48:43.736618 32543 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:48:43.736634 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.736644 32543 net.cpp:165] Memory required for data: 632321500\nI0821 06:48:43.736654 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:48:43.736666 32543 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:48:43.736672 32543 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:48:43.736680 32543 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:48:43.736955 32543 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:48:43.736968 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.736974 32543 net.cpp:165] Memory required for data: 640513500\nI0821 06:48:43.736984 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:48:43.736994 32543 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:48:43.737001 32543 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:48:43.737009 32543 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:48:43.737072 32543 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:48:43.737242 32543 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:48:43.737257 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.737262 32543 net.cpp:165] Memory required for data: 648705500\nI0821 06:48:43.737270 32543 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:48:43.737279 32543 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:48:43.737285 32543 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:48:43.737293 32543 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:48:43.737303 32543 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:48:43.737337 32543 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:48:43.737352 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.737357 32543 net.cpp:165] Memory required for data: 656897500\nI0821 06:48:43.737362 32543 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:48:43.737370 32543 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:48:43.737375 32543 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:48:43.737382 32543 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:48:43.737392 32543 net.cpp:150] Setting up L1_b7_relu\nI0821 06:48:43.737398 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.737403 32543 net.cpp:165] Memory required for data: 665089500\nI0821 06:48:43.737407 32543 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:48:43.737417 32543 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:48:43.737423 32543 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:48:43.737431 32543 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:48:43.737440 32543 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:48:43.737490 32543 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:48:43.737501 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.737509 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.737512 32543 net.cpp:165] Memory required for data: 681473500\nI0821 06:48:43.737517 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:48:43.737529 32543 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:48:43.737534 32543 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:48:43.737546 32543 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:48:43.737897 32543 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:48:43.737911 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.737916 32543 net.cpp:165] Memory required for data: 689665500\nI0821 06:48:43.737924 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:48:43.737933 32543 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:48:43.737939 32543 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:48:43.737954 32543 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:48:43.738241 32543 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:48:43.738255 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.738260 32543 net.cpp:165] Memory required for data: 697857500\nI0821 06:48:43.738270 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:48:43.738281 32543 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:48:43.738287 32543 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:48:43.738296 32543 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.738355 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:48:43.738517 32543 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:48:43.738530 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.738535 32543 net.cpp:165] Memory required for data: 706049500\nI0821 06:48:43.738544 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:48:43.738551 32543 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:48:43.738557 32543 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:48:43.738570 32543 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.738580 32543 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:48:43.738587 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.738592 32543 net.cpp:165] Memory required for data: 714241500\nI0821 06:48:43.738597 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:48:43.738610 32543 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:48:43.738616 32543 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:48:43.738625 32543 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:48:43.738979 32543 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:48:43.738993 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.738998 32543 net.cpp:165] Memory required for data: 722433500\nI0821 06:48:43.739006 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:48:43.739018 32543 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:48:43.739025 32543 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:48:43.739033 32543 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:48:43.739316 32543 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:48:43.739331 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.739336 32543 net.cpp:165] Memory required for data: 730625500\nI0821 06:48:43.739346 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:48:43.739354 32543 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:48:43.739361 32543 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:48:43.739372 32543 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:48:43.739429 32543 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:48:43.739590 32543 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:48:43.739603 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.739609 32543 net.cpp:165] Memory required for data: 738817500\nI0821 06:48:43.739616 32543 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:48:43.739625 32543 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:48:43.739631 32543 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:48:43.739639 32543 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:48:43.739648 32543 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:48:43.739683 32543 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:48:43.739696 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.739701 32543 net.cpp:165] Memory required for data: 747009500\nI0821 06:48:43.739706 32543 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:48:43.739713 32543 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:48:43.739718 32543 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:48:43.739725 32543 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:48:43.739742 32543 net.cpp:150] Setting up L1_b8_relu\nI0821 06:48:43.739748 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.739753 32543 net.cpp:165] Memory required for data: 755201500\nI0821 06:48:43.739758 32543 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:48:43.739768 32543 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:48:43.739773 32543 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:48:43.739780 32543 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:48:43.739790 32543 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:48:43.739840 32543 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:48:43.739851 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.739858 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.739862 32543 net.cpp:165] Memory required for data: 771585500\nI0821 06:48:43.739867 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:48:43.739878 32543 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:48:43.739884 32543 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:48:43.739895 32543 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:48:43.740268 32543 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:48:43.740284 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.740289 32543 net.cpp:165] Memory required for data: 779777500\nI0821 06:48:43.740298 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:48:43.740311 32543 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:48:43.740319 32543 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:48:43.740327 32543 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:48:43.740609 32543 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:48:43.740622 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.740628 32543 net.cpp:165] Memory required for data: 787969500\nI0821 06:48:43.740638 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:48:43.740646 32543 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:48:43.740653 32543 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:48:43.740660 32543 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.740720 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:48:43.740887 32543 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:48:43.740901 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.740906 32543 net.cpp:165] Memory required for data: 796161500\nI0821 06:48:43.740914 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:48:43.740922 32543 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:48:43.740928 32543 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:48:43.740938 32543 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.740948 32543 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:48:43.740955 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.740960 32543 net.cpp:165] Memory required for data: 804353500\nI0821 06:48:43.740964 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:48:43.740975 32543 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:48:43.740980 32543 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:48:43.740991 32543 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:48:43.741354 32543 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:48:43.741369 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.741374 32543 net.cpp:165] Memory required for data: 812545500\nI0821 06:48:43.741384 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:48:43.741391 32543 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:48:43.741399 32543 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:48:43.741415 32543 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:48:43.741693 32543 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:48:43.741710 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.741715 32543 net.cpp:165] Memory required for data: 820737500\nI0821 06:48:43.741746 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:48:43.741760 32543 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:48:43.741766 32543 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:48:43.741775 32543 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:48:43.741835 32543 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:48:43.741997 32543 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:48:43.742010 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.742015 32543 net.cpp:165] Memory required for data: 828929500\nI0821 06:48:43.742024 32543 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:48:43.742033 32543 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:48:43.742039 32543 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:48:43.742046 32543 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:48:43.742056 32543 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:48:43.742091 32543 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:48:43.742103 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.742107 32543 net.cpp:165] Memory required for data: 837121500\nI0821 06:48:43.742112 32543 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:48:43.742125 32543 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:48:43.742132 32543 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:48:43.742143 32543 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:48:43.742153 32543 net.cpp:150] Setting up L1_b9_relu\nI0821 06:48:43.742161 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.742166 32543 net.cpp:165] Memory required for data: 845313500\nI0821 06:48:43.742169 32543 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:48:43.742177 32543 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:48:43.742182 32543 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:48:43.742192 32543 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:48:43.742202 32543 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:48:43.742254 32543 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:48:43.742266 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.742274 32543 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:48:43.742277 32543 net.cpp:165] Memory required for data: 861697500\nI0821 06:48:43.742282 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:48:43.742293 32543 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:48:43.742300 32543 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:48:43.742311 32543 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:48:43.742671 32543 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:48:43.742684 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.742689 32543 net.cpp:165] Memory required for data: 863745500\nI0821 06:48:43.742697 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:48:43.742707 32543 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:48:43.742713 32543 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:48:43.742723 32543 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:48:43.742995 32543 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:48:43.743007 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.743012 32543 net.cpp:165] Memory required for data: 865793500\nI0821 06:48:43.743022 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:48:43.743041 32543 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:48:43.743047 32543 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:48:43.743054 32543 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.743140 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:48:43.743305 32543 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:48:43.743319 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.743324 32543 net.cpp:165] Memory required for data: 867841500\nI0821 06:48:43.743333 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:48:43.743347 32543 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:48:43.743353 32543 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:48:43.743361 32543 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.743371 32543 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:48:43.743377 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.743382 32543 net.cpp:165] Memory required for data: 869889500\nI0821 06:48:43.743387 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:48:43.743401 32543 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:48:43.743407 32543 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:48:43.743417 32543 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:48:43.743772 32543 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:48:43.743787 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.743791 32543 net.cpp:165] Memory required for data: 871937500\nI0821 06:48:43.743800 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:48:43.743808 32543 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:48:43.743815 32543 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:48:43.743829 32543 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:48:43.744096 32543 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:48:43.744109 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.744114 32543 net.cpp:165] Memory required for data: 873985500\nI0821 06:48:43.744132 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:48:43.744143 32543 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:48:43.744149 32543 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:48:43.744158 32543 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:48:43.744215 32543 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:48:43.744374 32543 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:48:43.744385 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.744390 32543 net.cpp:165] Memory required for data: 876033500\nI0821 06:48:43.744400 32543 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:48:43.744408 32543 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:48:43.744415 32543 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:48:43.744426 32543 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:48:43.744458 32543 net.cpp:150] Setting up L2_b1_pool\nI0821 06:48:43.744472 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.744477 32543 net.cpp:165] Memory required for data: 878081500\nI0821 06:48:43.744482 32543 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:48:43.744491 32543 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:48:43.744496 32543 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:48:43.744503 32543 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:48:43.744513 32543 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:48:43.744549 32543 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:48:43.744557 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.744561 32543 net.cpp:165] Memory required for data: 880129500\nI0821 06:48:43.744566 32543 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:48:43.744575 32543 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:48:43.744586 32543 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:48:43.744595 32543 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:48:43.744603 32543 net.cpp:150] Setting up L2_b1_relu\nI0821 06:48:43.744611 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.744616 32543 net.cpp:165] Memory required for data: 882177500\nI0821 06:48:43.744619 32543 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:48:43.744632 32543 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:48:43.744639 32543 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:48:43.746886 32543 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:48:43.746904 32543 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:48:43.746909 32543 net.cpp:165] Memory required for data: 884225500\nI0821 06:48:43.746915 32543 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:48:43.746925 32543 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:48:43.746932 32543 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:48:43.746938 32543 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:48:43.746953 32543 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:48:43.746997 32543 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:48:43.747012 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.747017 32543 net.cpp:165] Memory required for data: 888321500\nI0821 06:48:43.747022 32543 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:48:43.747030 32543 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:48:43.747036 32543 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:48:43.747043 32543 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:48:43.747053 32543 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:48:43.747107 32543 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:48:43.747124 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.747131 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.747136 32543 net.cpp:165] Memory required for data: 896513500\nI0821 06:48:43.747141 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:48:43.747156 32543 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:48:43.747164 32543 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:48:43.747172 32543 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:48:43.747675 32543 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:48:43.747689 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.747694 32543 net.cpp:165] Memory required for data: 900609500\nI0821 06:48:43.747704 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:48:43.747715 32543 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:48:43.747722 32543 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:48:43.747733 32543 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:48:43.748000 32543 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:48:43.748014 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.748019 32543 net.cpp:165] Memory required for data: 904705500\nI0821 06:48:43.748029 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:48:43.748039 32543 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:48:43.748044 32543 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:48:43.748052 32543 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.748116 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:48:43.748281 32543 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:48:43.748297 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.748302 32543 net.cpp:165] Memory required for data: 908801500\nI0821 06:48:43.748311 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:48:43.748319 32543 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:48:43.748333 32543 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:48:43.748342 32543 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.748352 32543 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:48:43.748358 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.748363 32543 net.cpp:165] Memory required for data: 912897500\nI0821 06:48:43.748368 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:48:43.748381 32543 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:48:43.748387 32543 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:48:43.748399 32543 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:48:43.748888 32543 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:48:43.748903 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.748908 32543 net.cpp:165] Memory required for data: 916993500\nI0821 06:48:43.748916 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:48:43.748927 32543 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:48:43.748934 32543 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:48:43.748945 32543 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:48:43.749218 32543 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:48:43.749233 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.749238 32543 net.cpp:165] Memory required for data: 921089500\nI0821 06:48:43.749248 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:48:43.749256 32543 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:48:43.749263 32543 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:48:43.749270 32543 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:48:43.749330 32543 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:48:43.749485 32543 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:48:43.749497 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.749502 32543 net.cpp:165] Memory required for data: 925185500\nI0821 06:48:43.749511 32543 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:48:43.749523 32543 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:48:43.749529 32543 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:48:43.749537 32543 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:48:43.749544 32543 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:48:43.749572 32543 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:48:43.749581 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.749586 32543 net.cpp:165] Memory required for data: 929281500\nI0821 06:48:43.749591 32543 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:48:43.749601 32543 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:48:43.749608 32543 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:48:43.749614 32543 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:48:43.749624 32543 net.cpp:150] Setting up L2_b2_relu\nI0821 06:48:43.749630 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.749635 32543 net.cpp:165] Memory required for data: 933377500\nI0821 06:48:43.749639 32543 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:48:43.749646 32543 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:48:43.749651 32543 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:48:43.749660 32543 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:48:43.749668 32543 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:48:43.749718 32543 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:48:43.749729 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.749737 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.749740 32543 net.cpp:165] Memory required for data: 941569500\nI0821 06:48:43.749752 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:48:43.749766 32543 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:48:43.749773 32543 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:48:43.749783 32543 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:48:43.750288 32543 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:48:43.750303 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.750308 32543 net.cpp:165] Memory required for data: 945665500\nI0821 06:48:43.750318 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:48:43.750329 32543 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:48:43.750335 32543 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:48:43.750346 32543 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:48:43.750612 32543 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:48:43.750624 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.750629 32543 net.cpp:165] Memory required for data: 949761500\nI0821 06:48:43.750639 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:48:43.750648 32543 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:48:43.750654 32543 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:48:43.750661 32543 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.750723 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:48:43.750880 32543 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:48:43.750892 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.750897 32543 net.cpp:165] Memory required for data: 953857500\nI0821 06:48:43.750905 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:48:43.750916 32543 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:48:43.750923 32543 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:48:43.750931 32543 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.750939 32543 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:48:43.750946 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.750952 32543 net.cpp:165] Memory required for data: 957953500\nI0821 06:48:43.750955 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:48:43.750968 32543 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:48:43.750974 32543 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:48:43.750983 32543 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:48:43.751485 32543 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:48:43.751500 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.751505 32543 net.cpp:165] Memory required for data: 962049500\nI0821 06:48:43.751514 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:48:43.751525 32543 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:48:43.751533 32543 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:48:43.751541 32543 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:48:43.751808 32543 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:48:43.751824 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.751829 32543 net.cpp:165] Memory required for data: 966145500\nI0821 06:48:43.751839 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:48:43.751847 32543 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:48:43.751853 32543 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:48:43.751862 32543 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:48:43.751919 32543 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:48:43.752079 32543 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:48:43.752091 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.752096 32543 net.cpp:165] Memory required for data: 970241500\nI0821 06:48:43.752105 32543 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:48:43.752115 32543 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:48:43.752133 32543 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:48:43.752141 32543 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:48:43.752152 32543 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:48:43.752182 32543 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:48:43.752193 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.752198 32543 net.cpp:165] Memory required for data: 974337500\nI0821 06:48:43.752203 32543 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:48:43.752226 32543 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:48:43.752233 32543 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:48:43.752239 32543 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:48:43.752249 32543 net.cpp:150] Setting up L2_b3_relu\nI0821 06:48:43.752256 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.752260 32543 net.cpp:165] Memory required for data: 978433500\nI0821 06:48:43.752266 32543 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:48:43.752274 32543 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:48:43.752279 32543 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:48:43.752285 32543 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:48:43.752295 32543 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:48:43.752347 32543 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:48:43.752359 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.752365 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.752370 32543 net.cpp:165] Memory required for data: 986625500\nI0821 06:48:43.752375 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:48:43.752385 32543 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:48:43.752393 32543 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:48:43.752405 32543 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:48:43.752899 32543 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:48:43.752913 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.752918 32543 net.cpp:165] Memory required for data: 990721500\nI0821 06:48:43.752928 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:48:43.752940 32543 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:48:43.752948 32543 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:48:43.752955 32543 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:48:43.753237 32543 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:48:43.753250 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.753255 32543 net.cpp:165] Memory required for data: 994817500\nI0821 06:48:43.753265 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:48:43.753276 32543 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:48:43.753283 32543 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:48:43.753291 32543 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.753350 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:48:43.753510 32543 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:48:43.753523 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.753528 32543 net.cpp:165] Memory required for data: 998913500\nI0821 06:48:43.753537 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:48:43.753547 32543 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:48:43.753554 32543 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:48:43.753563 32543 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.753573 32543 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:48:43.753581 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.753585 32543 net.cpp:165] Memory required for data: 1003009500\nI0821 06:48:43.753597 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:48:43.753608 32543 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:48:43.753614 32543 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:48:43.753625 32543 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:48:43.754123 32543 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:48:43.754138 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.754143 32543 net.cpp:165] Memory required for data: 1007105500\nI0821 06:48:43.754153 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:48:43.754161 32543 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:48:43.754168 32543 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:48:43.754180 32543 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:48:43.754453 32543 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:48:43.754467 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.754472 32543 net.cpp:165] Memory required for data: 1011201500\nI0821 06:48:43.754482 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:48:43.754493 32543 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:48:43.754499 32543 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:48:43.754508 32543 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:48:43.754565 32543 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:48:43.754725 32543 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:48:43.754739 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.754743 32543 net.cpp:165] Memory required for data: 1015297500\nI0821 06:48:43.754752 32543 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:48:43.754763 32543 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:48:43.754770 32543 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:48:43.754777 32543 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:48:43.754786 32543 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:48:43.754817 32543 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:48:43.754825 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.754830 32543 net.cpp:165] Memory required for data: 1019393500\nI0821 06:48:43.754835 32543 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:48:43.754842 32543 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:48:43.754848 32543 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:48:43.754858 32543 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:48:43.754868 32543 net.cpp:150] Setting up L2_b4_relu\nI0821 06:48:43.754875 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.754879 32543 net.cpp:165] Memory required for data: 1023489500\nI0821 06:48:43.754884 32543 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:48:43.754891 32543 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:48:43.754896 32543 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:48:43.754904 32543 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:48:43.754914 32543 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:48:43.754964 32543 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:48:43.754976 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.754982 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.754987 32543 net.cpp:165] Memory required for data: 1031681500\nI0821 06:48:43.754992 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:48:43.755002 32543 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:48:43.755008 32543 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:48:43.755020 32543 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:48:43.755532 32543 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:48:43.755548 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.755553 32543 net.cpp:165] Memory required for data: 1035777500\nI0821 06:48:43.755561 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:48:43.755570 32543 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:48:43.755576 32543 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:48:43.755587 32543 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:48:43.755857 32543 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:48:43.755870 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.755875 32543 net.cpp:165] Memory required for data: 1039873500\nI0821 06:48:43.755885 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:48:43.755897 32543 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:48:43.755903 32543 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:48:43.755910 32543 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.755969 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:48:43.756140 32543 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:48:43.756155 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.756160 32543 net.cpp:165] Memory required for data: 1043969500\nI0821 06:48:43.756168 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:48:43.756180 32543 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:48:43.756186 32543 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:48:43.756193 32543 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.756203 32543 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:48:43.756211 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.756214 32543 net.cpp:165] Memory required for data: 1048065500\nI0821 06:48:43.756219 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:48:43.756232 32543 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:48:43.756238 32543 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:48:43.756249 32543 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:48:43.756742 32543 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:48:43.756755 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.756760 32543 net.cpp:165] Memory required for data: 1052161500\nI0821 06:48:43.756769 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:48:43.756778 32543 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:48:43.756784 32543 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:48:43.756795 32543 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:48:43.757067 32543 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:48:43.757081 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.757086 32543 net.cpp:165] Memory required for data: 1056257500\nI0821 06:48:43.757095 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:48:43.757107 32543 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:48:43.757112 32543 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:48:43.757127 32543 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:48:43.757186 32543 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:48:43.757344 32543 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:48:43.757357 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.757362 32543 net.cpp:165] Memory required for data: 1060353500\nI0821 06:48:43.757371 32543 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:48:43.757383 32543 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:48:43.757390 32543 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:48:43.757396 32543 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:48:43.757405 32543 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:48:43.757436 32543 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:48:43.757452 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.757457 32543 net.cpp:165] Memory required for data: 1064449500\nI0821 06:48:43.757462 32543 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:48:43.757469 32543 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:48:43.757474 32543 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:48:43.757481 32543 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:48:43.757490 32543 net.cpp:150] Setting up L2_b5_relu\nI0821 06:48:43.757498 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.757503 32543 net.cpp:165] Memory required for data: 1068545500\nI0821 06:48:43.757506 32543 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:48:43.757516 32543 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:48:43.757521 32543 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:48:43.757529 32543 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:48:43.757539 32543 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:48:43.757591 32543 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:48:43.757602 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.757608 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.757613 32543 net.cpp:165] Memory required for data: 1076737500\nI0821 06:48:43.757618 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:48:43.757629 32543 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:48:43.757635 32543 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:48:43.757648 32543 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:48:43.758155 32543 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:48:43.758169 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.758174 32543 net.cpp:165] Memory required for data: 1080833500\nI0821 06:48:43.758183 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:48:43.758193 32543 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:48:43.758198 32543 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:48:43.758211 32543 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:48:43.758478 32543 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:48:43.758491 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.758496 32543 net.cpp:165] Memory required for data: 1084929500\nI0821 06:48:43.758507 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:48:43.758518 32543 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:48:43.758524 32543 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:48:43.758533 32543 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.758589 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:48:43.758747 32543 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:48:43.758760 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.758765 32543 net.cpp:165] Memory required for data: 1089025500\nI0821 06:48:43.758774 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:48:43.758785 32543 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:48:43.758791 32543 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:48:43.758798 32543 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.758808 32543 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:48:43.758815 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.758819 32543 net.cpp:165] Memory required for data: 1093121500\nI0821 06:48:43.758823 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:48:43.758837 32543 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:48:43.758843 32543 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:48:43.758854 32543 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:48:43.759361 32543 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:48:43.759374 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.759379 32543 net.cpp:165] Memory required for data: 1097217500\nI0821 06:48:43.759388 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:48:43.759397 32543 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:48:43.759404 32543 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:48:43.759413 32543 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:48:43.759683 32543 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:48:43.759696 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.759701 32543 net.cpp:165] Memory required for data: 1101313500\nI0821 06:48:43.759711 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:48:43.759719 32543 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:48:43.759726 32543 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:48:43.759735 32543 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:48:43.759793 32543 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:48:43.759949 32543 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:48:43.759963 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.759968 32543 net.cpp:165] Memory required for data: 1105409500\nI0821 06:48:43.759976 32543 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:48:43.759984 32543 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:48:43.759991 32543 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:48:43.759997 32543 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:48:43.760010 32543 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:48:43.760038 32543 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:48:43.760051 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.760056 32543 net.cpp:165] Memory required for data: 1109505500\nI0821 06:48:43.760061 32543 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:48:43.760068 32543 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:48:43.760074 32543 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:48:43.760082 32543 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:48:43.760090 32543 net.cpp:150] Setting up L2_b6_relu\nI0821 06:48:43.760097 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.760102 32543 net.cpp:165] Memory required for data: 1113601500\nI0821 06:48:43.760107 32543 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:48:43.760115 32543 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:48:43.760128 32543 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:48:43.760136 32543 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:48:43.760146 32543 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:48:43.760200 32543 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:48:43.760212 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.760218 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.760223 32543 net.cpp:165] Memory required for data: 1121793500\nI0821 06:48:43.760228 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:48:43.760239 32543 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:48:43.760246 32543 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:48:43.760257 32543 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:48:43.761770 32543 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:48:43.761787 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.761792 32543 net.cpp:165] Memory required for data: 1125889500\nI0821 06:48:43.761802 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:48:43.761821 32543 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:48:43.761827 32543 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:48:43.761838 32543 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:48:43.762109 32543 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:48:43.762127 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.762133 32543 net.cpp:165] Memory required for data: 1129985500\nI0821 06:48:43.762143 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:48:43.762152 32543 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:48:43.762158 32543 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:48:43.762166 32543 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.762229 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:48:43.762387 32543 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:48:43.762403 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.762408 32543 net.cpp:165] Memory required for data: 1134081500\nI0821 06:48:43.762416 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:48:43.762424 32543 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:48:43.762430 32543 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:48:43.762439 32543 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.762447 32543 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:48:43.762454 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.762459 32543 net.cpp:165] Memory required for data: 1138177500\nI0821 06:48:43.762464 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:48:43.762477 32543 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:48:43.762483 32543 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:48:43.762495 32543 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:48:43.762981 32543 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:48:43.762995 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.763000 32543 net.cpp:165] Memory required for data: 1142273500\nI0821 06:48:43.763010 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:48:43.763021 32543 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:48:43.763027 32543 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:48:43.763038 32543 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:48:43.763319 32543 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:48:43.763334 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.763339 32543 net.cpp:165] Memory required for data: 1146369500\nI0821 06:48:43.763350 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:48:43.763358 32543 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:48:43.763365 32543 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:48:43.763372 32543 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:48:43.763433 32543 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:48:43.763589 32543 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:48:43.763602 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.763607 32543 net.cpp:165] Memory required for data: 1150465500\nI0821 06:48:43.763617 32543 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:48:43.763631 32543 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:48:43.763638 32543 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:48:43.763645 32543 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:48:43.763653 32543 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:48:43.763681 32543 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:48:43.763690 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.763695 32543 net.cpp:165] Memory required for data: 1154561500\nI0821 06:48:43.763700 32543 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:48:43.763710 32543 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:48:43.763723 32543 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:48:43.763731 32543 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:48:43.763741 32543 net.cpp:150] Setting up L2_b7_relu\nI0821 06:48:43.763748 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.763752 32543 net.cpp:165] Memory required for data: 1158657500\nI0821 06:48:43.763757 32543 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:48:43.763764 32543 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:48:43.763769 32543 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:48:43.763777 32543 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:48:43.763787 32543 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:48:43.763839 32543 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:48:43.763849 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.763856 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.763860 32543 net.cpp:165] Memory required for data: 1166849500\nI0821 06:48:43.763866 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:48:43.763880 32543 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:48:43.763886 32543 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:48:43.763895 32543 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:48:43.764398 32543 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:48:43.764413 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.764418 32543 net.cpp:165] Memory required for data: 1170945500\nI0821 06:48:43.764427 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:48:43.764439 32543 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:48:43.764446 32543 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:48:43.764456 32543 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:48:43.764729 32543 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:48:43.764741 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.764745 32543 net.cpp:165] Memory required for data: 1175041500\nI0821 06:48:43.764756 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:48:43.764765 32543 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:48:43.764770 32543 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:48:43.764778 32543 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.764839 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:48:43.765025 32543 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:48:43.765039 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.765044 32543 net.cpp:165] Memory required for data: 1179137500\nI0821 06:48:43.765053 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:48:43.765064 32543 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:48:43.765071 32543 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:48:43.765079 32543 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.765089 32543 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:48:43.765095 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.765100 32543 net.cpp:165] Memory required for data: 1183233500\nI0821 06:48:43.765105 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:48:43.765125 32543 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:48:43.765132 32543 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:48:43.765144 32543 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:48:43.765637 32543 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:48:43.765651 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.765656 32543 net.cpp:165] Memory required for data: 1187329500\nI0821 06:48:43.765664 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:48:43.765686 32543 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:48:43.765693 32543 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:48:43.765704 32543 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:48:43.765980 32543 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:48:43.765995 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.766000 32543 net.cpp:165] Memory required for data: 1191425500\nI0821 06:48:43.766010 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:48:43.766018 32543 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:48:43.766026 32543 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:48:43.766032 32543 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:48:43.766094 32543 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:48:43.766259 32543 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:48:43.766273 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.766278 32543 net.cpp:165] Memory required for data: 1195521500\nI0821 06:48:43.766288 32543 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:48:43.766296 32543 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:48:43.766306 32543 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:48:43.766314 32543 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:48:43.766321 32543 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:48:43.766350 32543 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:48:43.766358 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.766362 32543 net.cpp:165] Memory required for data: 1199617500\nI0821 06:48:43.766368 32543 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:48:43.766378 32543 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:48:43.766384 32543 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:48:43.766391 32543 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:48:43.766402 32543 net.cpp:150] Setting up L2_b8_relu\nI0821 06:48:43.766407 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.766412 32543 net.cpp:165] Memory required for data: 1203713500\nI0821 06:48:43.766417 32543 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:48:43.766424 32543 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:48:43.766429 32543 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:48:43.766436 32543 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:48:43.766461 32543 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:48:43.766515 32543 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:48:43.766527 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.766535 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.766538 32543 net.cpp:165] Memory required for data: 1211905500\nI0821 06:48:43.766543 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:48:43.766558 32543 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:48:43.766566 32543 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:48:43.766574 32543 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:48:43.767069 32543 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:48:43.767083 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.767088 32543 net.cpp:165] Memory required for data: 1216001500\nI0821 06:48:43.767097 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:48:43.767109 32543 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:48:43.767115 32543 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:48:43.767130 32543 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:48:43.767415 32543 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:48:43.767436 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.767442 32543 net.cpp:165] Memory required for data: 1220097500\nI0821 06:48:43.767452 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:48:43.767462 32543 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:48:43.767467 32543 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:48:43.767475 32543 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.767537 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:48:43.767702 32543 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:48:43.767715 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.767720 32543 net.cpp:165] Memory required for data: 1224193500\nI0821 06:48:43.767751 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:48:43.767761 32543 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:48:43.767767 32543 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:48:43.767778 32543 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.767789 32543 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:48:43.767796 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.767801 32543 net.cpp:165] Memory required for data: 1228289500\nI0821 06:48:43.767805 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:48:43.767819 32543 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:48:43.767825 32543 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:48:43.767833 32543 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:48:43.769309 32543 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:48:43.769327 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.769332 32543 net.cpp:165] Memory required for data: 1232385500\nI0821 06:48:43.769341 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:48:43.769354 32543 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:48:43.769361 32543 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:48:43.769372 32543 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:48:43.769637 32543 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:48:43.769650 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.769655 32543 net.cpp:165] Memory required for data: 1236481500\nI0821 06:48:43.769706 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:48:43.769718 32543 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:48:43.769726 32543 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:48:43.769733 32543 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:48:43.769795 32543 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:48:43.769953 32543 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:48:43.769966 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.769971 32543 net.cpp:165] Memory required for data: 1240577500\nI0821 06:48:43.769980 32543 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:48:43.769992 32543 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:48:43.769999 32543 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:48:43.770006 32543 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:48:43.770015 32543 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:48:43.770045 32543 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:48:43.770054 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.770058 32543 net.cpp:165] Memory required for data: 1244673500\nI0821 06:48:43.770063 32543 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:48:43.770071 32543 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:48:43.770077 32543 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:48:43.770087 32543 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:48:43.770097 32543 net.cpp:150] Setting up L2_b9_relu\nI0821 06:48:43.770104 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.770115 32543 net.cpp:165] Memory required for data: 1248769500\nI0821 06:48:43.770128 32543 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:48:43.770135 32543 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:48:43.770141 32543 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:48:43.770153 32543 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:48:43.770162 32543 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:48:43.770215 32543 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:48:43.770226 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.770232 32543 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:48:43.770237 32543 net.cpp:165] Memory required for data: 1256961500\nI0821 06:48:43.770242 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:48:43.770253 32543 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:48:43.770261 32543 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:48:43.770272 32543 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:48:43.770844 32543 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:48:43.770861 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.770866 32543 net.cpp:165] Memory required for data: 1257985500\nI0821 06:48:43.770875 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:48:43.770884 32543 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:48:43.770891 32543 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:48:43.770902 32543 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:48:43.771189 32543 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:48:43.771203 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.771209 32543 net.cpp:165] Memory required for data: 1259009500\nI0821 06:48:43.771219 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:48:43.771229 32543 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:48:43.771234 32543 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:48:43.771242 32543 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.771302 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:48:43.771471 32543 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:48:43.771486 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.771492 32543 net.cpp:165] Memory required for data: 1260033500\nI0821 06:48:43.771500 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:48:43.771508 32543 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:48:43.771515 32543 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:48:43.771522 32543 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:48:43.771531 32543 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:48:43.771538 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.771543 32543 net.cpp:165] Memory required for data: 1261057500\nI0821 06:48:43.771548 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:48:43.771561 32543 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:48:43.771567 32543 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:48:43.771576 32543 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:48:43.772070 32543 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:48:43.772084 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.772089 32543 net.cpp:165] Memory required for data: 1262081500\nI0821 06:48:43.772099 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:48:43.772110 32543 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:48:43.772116 32543 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:48:43.772135 32543 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:48:43.772410 32543 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:48:43.772430 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.772436 32543 net.cpp:165] Memory required for data: 1263105500\nI0821 06:48:43.772446 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:48:43.772455 32543 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:48:43.772461 32543 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:48:43.772472 32543 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:48:43.772531 32543 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:48:43.772698 32543 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:48:43.772712 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.772717 32543 net.cpp:165] Memory required for data: 1264129500\nI0821 06:48:43.772727 32543 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:48:43.772738 32543 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:48:43.772745 32543 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:48:43.772753 32543 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:48:43.772792 32543 net.cpp:150] Setting up L3_b1_pool\nI0821 06:48:43.772804 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.772809 32543 net.cpp:165] Memory required for data: 1265153500\nI0821 06:48:43.772814 32543 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:48:43.772824 32543 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:48:43.772830 32543 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:48:43.772836 32543 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:48:43.772847 32543 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:48:43.772881 32543 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:48:43.772889 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.772894 32543 net.cpp:165] Memory required for data: 1266177500\nI0821 06:48:43.772899 32543 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:48:43.772907 32543 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:48:43.772913 32543 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:48:43.772922 32543 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:48:43.772933 32543 net.cpp:150] Setting up L3_b1_relu\nI0821 06:48:43.772939 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.772943 32543 net.cpp:165] Memory required for data: 1267201500\nI0821 06:48:43.772948 32543 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:48:43.772958 32543 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:48:43.772964 32543 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:48:43.774202 32543 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:48:43.774221 32543 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:48:43.774226 32543 net.cpp:165] Memory required for data: 1268225500\nI0821 06:48:43.774232 32543 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:48:43.774241 32543 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:48:43.774248 32543 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:48:43.774255 32543 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:48:43.774266 32543 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:48:43.774312 32543 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:48:43.774324 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.774329 32543 net.cpp:165] Memory required for data: 1270273500\nI0821 06:48:43.774334 32543 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:48:43.774343 32543 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:48:43.774348 32543 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:48:43.774358 32543 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:48:43.774369 32543 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:48:43.774422 32543 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:48:43.774433 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.774448 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.774453 32543 net.cpp:165] Memory required for data: 1274369500\nI0821 06:48:43.774458 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:48:43.774472 32543 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:48:43.774479 32543 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:48:43.774488 32543 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:48:43.775537 32543 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:48:43.775552 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.775557 32543 net.cpp:165] Memory required for data: 1276417500\nI0821 06:48:43.775566 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:48:43.775578 32543 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:48:43.775584 32543 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:48:43.775593 32543 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:48:43.775869 32543 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:48:43.775882 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.775887 32543 net.cpp:165] Memory required for data: 1278465500\nI0821 06:48:43.775897 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:48:43.775909 32543 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:48:43.775916 32543 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:48:43.775924 32543 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.775986 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:48:43.776160 32543 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:48:43.776173 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.776178 32543 net.cpp:165] Memory required for data: 1280513500\nI0821 06:48:43.776187 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:48:43.776199 32543 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:48:43.776206 32543 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:48:43.776213 32543 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:48:43.776226 32543 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:48:43.776233 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.776237 32543 net.cpp:165] Memory required for data: 1282561500\nI0821 06:48:43.776242 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:48:43.776253 32543 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:48:43.776259 32543 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:48:43.776270 32543 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:48:43.777318 32543 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:48:43.777333 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.777338 32543 net.cpp:165] Memory required for data: 1284609500\nI0821 06:48:43.777348 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:48:43.777357 32543 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:48:43.777364 32543 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:48:43.777376 32543 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:48:43.777650 32543 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:48:43.777663 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.777668 32543 net.cpp:165] Memory required for data: 1286657500\nI0821 06:48:43.777678 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:48:43.777688 32543 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:48:43.777693 32543 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:48:43.777700 32543 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:48:43.777762 32543 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:48:43.777921 32543 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:48:43.777936 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.777942 32543 net.cpp:165] Memory required for data: 1288705500\nI0821 06:48:43.777957 32543 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:48:43.777967 32543 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:48:43.777974 32543 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:48:43.777981 32543 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:48:43.777989 32543 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:48:43.778026 32543 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:48:43.778038 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.778043 32543 net.cpp:165] Memory required for data: 1290753500\nI0821 06:48:43.778069 32543 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:48:43.778079 32543 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:48:43.778084 32543 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:48:43.778091 32543 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:48:43.778101 32543 net.cpp:150] Setting up L3_b2_relu\nI0821 06:48:43.778108 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.778113 32543 net.cpp:165] Memory required for data: 1292801500\nI0821 06:48:43.778125 32543 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:48:43.778136 32543 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:48:43.778141 32543 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:48:43.778149 32543 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:48:43.778159 32543 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:48:43.778209 32543 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:48:43.778224 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.778231 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.778236 32543 net.cpp:165] Memory required for data: 1296897500\nI0821 06:48:43.778241 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:48:43.778252 32543 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:48:43.778259 32543 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:48:43.778268 32543 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:48:43.779314 32543 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:48:43.779330 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.779335 32543 net.cpp:165] Memory required for data: 1298945500\nI0821 06:48:43.779343 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:48:43.779357 32543 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:48:43.779363 32543 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:48:43.779372 32543 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:48:43.779642 32543 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:48:43.779655 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.779660 32543 net.cpp:165] Memory required for data: 1300993500\nI0821 06:48:43.779670 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:48:43.779682 32543 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:48:43.779688 32543 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:48:43.779696 32543 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.779759 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:48:43.779924 32543 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:48:43.779937 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.779942 32543 net.cpp:165] Memory required for data: 1303041500\nI0821 06:48:43.779952 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:48:43.779959 32543 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:48:43.779965 32543 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:48:43.779975 32543 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:48:43.779986 32543 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:48:43.779999 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.780004 32543 net.cpp:165] Memory required for data: 1305089500\nI0821 06:48:43.780009 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:48:43.780020 32543 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:48:43.780026 32543 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:48:43.780037 32543 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:48:43.781086 32543 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:48:43.781101 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.781106 32543 net.cpp:165] Memory required for data: 1307137500\nI0821 06:48:43.781116 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:48:43.781134 32543 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:48:43.781141 32543 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:48:43.781149 32543 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:48:43.781420 32543 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:48:43.781433 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.781438 32543 net.cpp:165] Memory required for data: 1309185500\nI0821 06:48:43.781450 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:48:43.781457 32543 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:48:43.781464 32543 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:48:43.781471 32543 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:48:43.781539 32543 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:48:43.781697 32543 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:48:43.781713 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.781718 32543 net.cpp:165] Memory required for data: 1311233500\nI0821 06:48:43.781728 32543 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:48:43.781736 32543 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:48:43.781744 32543 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:48:43.781750 32543 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:48:43.781759 32543 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:48:43.781795 32543 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:48:43.781806 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.781810 32543 net.cpp:165] Memory required for data: 1313281500\nI0821 06:48:43.781816 32543 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:48:43.781826 32543 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:48:43.781833 32543 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:48:43.781841 32543 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:48:43.781849 32543 net.cpp:150] Setting up L3_b3_relu\nI0821 06:48:43.781857 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.781860 32543 net.cpp:165] Memory required for data: 1315329500\nI0821 06:48:43.781865 32543 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:48:43.781875 32543 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:48:43.781880 32543 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:48:43.781888 32543 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:48:43.781898 32543 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:48:43.781945 32543 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:48:43.781960 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.781966 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.781970 32543 net.cpp:165] Memory required for data: 1319425500\nI0821 06:48:43.781975 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:48:43.781987 32543 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:48:43.781993 32543 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:48:43.782011 32543 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:48:43.783056 32543 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:48:43.783071 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.783077 32543 net.cpp:165] Memory required for data: 1321473500\nI0821 06:48:43.783085 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:48:43.783097 32543 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:48:43.783104 32543 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:48:43.783113 32543 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:48:43.783391 32543 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:48:43.783406 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.783411 32543 net.cpp:165] Memory required for data: 1323521500\nI0821 06:48:43.783421 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:48:43.783433 32543 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:48:43.783439 32543 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:48:43.783450 32543 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.783509 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:48:43.783674 32543 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:48:43.783687 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.783692 32543 net.cpp:165] Memory required for data: 1325569500\nI0821 06:48:43.783701 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:48:43.783710 32543 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:48:43.783716 32543 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:48:43.783726 32543 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:48:43.783736 32543 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:48:43.783743 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.783748 32543 net.cpp:165] Memory required for data: 1327617500\nI0821 06:48:43.783752 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:48:43.783763 32543 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:48:43.783769 32543 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:48:43.783780 32543 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:48:43.785787 32543 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:48:43.785805 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.785810 32543 net.cpp:165] Memory required for data: 1329665500\nI0821 06:48:43.785820 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:48:43.785832 32543 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:48:43.785840 32543 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:48:43.785848 32543 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:48:43.786129 32543 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:48:43.786144 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.786149 32543 net.cpp:165] Memory required for data: 1331713500\nI0821 06:48:43.786159 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:48:43.786170 32543 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:48:43.786177 32543 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:48:43.786185 32543 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:48:43.786247 32543 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:48:43.786418 32543 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:48:43.786432 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.786437 32543 net.cpp:165] Memory required for data: 1333761500\nI0821 06:48:43.786445 32543 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:48:43.786455 32543 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:48:43.786461 32543 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:48:43.786468 32543 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:48:43.786479 32543 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:48:43.786522 32543 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:48:43.786532 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.786537 32543 net.cpp:165] Memory required for data: 1335809500\nI0821 06:48:43.786542 32543 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:48:43.786553 32543 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:48:43.786559 32543 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:48:43.786566 32543 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:48:43.786576 32543 net.cpp:150] Setting up L3_b4_relu\nI0821 06:48:43.786583 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.786587 32543 net.cpp:165] Memory required for data: 1337857500\nI0821 06:48:43.786592 32543 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:48:43.786599 32543 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:48:43.786605 32543 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:48:43.786612 32543 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:48:43.786623 32543 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:48:43.786672 32543 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:48:43.786684 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.786691 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.786695 32543 net.cpp:165] Memory required for data: 1341953500\nI0821 06:48:43.786700 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:48:43.786715 32543 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:48:43.786721 32543 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:48:43.786731 32543 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:48:43.787760 32543 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:48:43.787775 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.787781 32543 net.cpp:165] Memory required for data: 1344001500\nI0821 06:48:43.787789 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:48:43.787801 32543 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:48:43.787807 32543 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:48:43.787820 32543 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:48:43.788091 32543 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:48:43.788105 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.788110 32543 net.cpp:165] Memory required for data: 1346049500\nI0821 06:48:43.788125 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:48:43.788134 32543 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:48:43.788141 32543 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:48:43.788151 32543 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.788213 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:48:43.788374 32543 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:48:43.788388 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.788393 32543 net.cpp:165] Memory required for data: 1348097500\nI0821 06:48:43.788401 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:48:43.788409 32543 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:48:43.788415 32543 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:48:43.788427 32543 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:48:43.788437 32543 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:48:43.788444 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.788450 32543 net.cpp:165] Memory required for data: 1350145500\nI0821 06:48:43.788453 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:48:43.788467 32543 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:48:43.788473 32543 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:48:43.788489 32543 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:48:43.789549 32543 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:48:43.789564 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.789569 32543 net.cpp:165] Memory required for data: 1352193500\nI0821 06:48:43.789578 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:48:43.789593 32543 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:48:43.789600 32543 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:48:43.789609 32543 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:48:43.789880 32543 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:48:43.789893 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.789898 32543 net.cpp:165] Memory required for data: 1354241500\nI0821 06:48:43.789909 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:48:43.789924 32543 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:48:43.789937 32543 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:48:43.789957 32543 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:48:43.790035 32543 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:48:43.790212 32543 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:48:43.790233 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.790242 32543 net.cpp:165] Memory required for data: 1356289500\nI0821 06:48:43.790261 32543 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:48:43.790276 32543 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:48:43.790287 32543 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:48:43.790297 32543 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:48:43.790314 32543 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:48:43.790359 32543 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:48:43.790371 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.790376 32543 net.cpp:165] Memory required for data: 1358337500\nI0821 06:48:43.790382 32543 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:48:43.790393 32543 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:48:43.790400 32543 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:48:43.790406 32543 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:48:43.790416 32543 net.cpp:150] Setting up L3_b5_relu\nI0821 06:48:43.790423 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.790427 32543 net.cpp:165] Memory required for data: 1360385500\nI0821 06:48:43.790432 32543 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:48:43.790441 32543 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:48:43.790446 32543 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:48:43.790453 32543 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:48:43.790463 32543 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:48:43.790513 32543 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:48:43.790524 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.790531 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.790535 32543 net.cpp:165] Memory required for data: 1364481500\nI0821 06:48:43.790541 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:48:43.790555 32543 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:48:43.790562 32543 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:48:43.790571 32543 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:48:43.791621 32543 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:48:43.791637 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.791642 32543 net.cpp:165] Memory required for data: 1366529500\nI0821 06:48:43.791651 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:48:43.791671 32543 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:48:43.791678 32543 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:48:43.791689 32543 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:48:43.791965 32543 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:48:43.791977 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.791982 32543 net.cpp:165] Memory required for data: 1368577500\nI0821 06:48:43.791992 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:48:43.792001 32543 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:48:43.792007 32543 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:48:43.792018 32543 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.792078 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:48:43.792249 32543 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:48:43.792263 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.792268 32543 net.cpp:165] Memory required for data: 1370625500\nI0821 06:48:43.792279 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:48:43.792286 32543 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:48:43.792292 32543 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:48:43.792302 32543 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:48:43.792313 32543 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:48:43.792320 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.792325 32543 net.cpp:165] Memory required for data: 1372673500\nI0821 06:48:43.792330 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:48:43.792343 32543 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:48:43.792349 32543 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:48:43.792361 32543 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:48:43.793392 32543 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:48:43.793407 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.793412 32543 net.cpp:165] Memory required for data: 1374721500\nI0821 06:48:43.793421 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:48:43.793431 32543 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:48:43.793437 32543 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:48:43.793448 32543 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:48:43.793720 32543 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:48:43.793733 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.793738 32543 net.cpp:165] Memory required for data: 1376769500\nI0821 06:48:43.793750 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:48:43.793761 32543 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:48:43.793767 32543 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:48:43.793776 32543 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:48:43.793833 32543 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:48:43.793992 32543 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:48:43.794005 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.794010 32543 net.cpp:165] Memory required for data: 1378817500\nI0821 06:48:43.794018 32543 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:48:43.794028 32543 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:48:43.794034 32543 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:48:43.794041 32543 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:48:43.794054 32543 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:48:43.794090 32543 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:48:43.794100 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.794106 32543 net.cpp:165] Memory required for data: 1380865500\nI0821 06:48:43.794111 32543 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:48:43.794127 32543 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:48:43.794142 32543 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:48:43.794149 32543 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:48:43.794159 32543 net.cpp:150] Setting up L3_b6_relu\nI0821 06:48:43.794167 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.794170 32543 net.cpp:165] Memory required for data: 1382913500\nI0821 06:48:43.794175 32543 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:48:43.794183 32543 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:48:43.794188 32543 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:48:43.794196 32543 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:48:43.794205 32543 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:48:43.794260 32543 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:48:43.794271 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.794277 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.794282 32543 net.cpp:165] Memory required for data: 1387009500\nI0821 06:48:43.794287 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:48:43.794303 32543 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:48:43.794309 32543 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:48:43.794318 32543 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:48:43.795359 32543 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:48:43.795374 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.795379 32543 net.cpp:165] Memory required for data: 1389057500\nI0821 06:48:43.795388 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:48:43.795399 32543 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:48:43.795406 32543 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:48:43.795418 32543 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:48:43.795686 32543 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:48:43.795698 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.795703 32543 net.cpp:165] Memory required for data: 1391105500\nI0821 06:48:43.795714 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:48:43.795722 32543 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:48:43.795729 32543 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:48:43.795739 32543 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.795800 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:48:43.795963 32543 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:48:43.795976 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.795981 32543 net.cpp:165] Memory required for data: 1393153500\nI0821 06:48:43.795990 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:48:43.796026 32543 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:48:43.796036 32543 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:48:43.796043 32543 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:48:43.796053 32543 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:48:43.796061 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.796066 32543 net.cpp:165] Memory required for data: 1395201500\nI0821 06:48:43.796070 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:48:43.796082 32543 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:48:43.796087 32543 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:48:43.796097 32543 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:48:43.797142 32543 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:48:43.797158 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.797163 32543 net.cpp:165] Memory required for data: 1397249500\nI0821 06:48:43.797171 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:48:43.797190 32543 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:48:43.797199 32543 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:48:43.797209 32543 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:48:43.797480 32543 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:48:43.797493 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.797498 32543 net.cpp:165] Memory required for data: 1399297500\nI0821 06:48:43.797508 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:48:43.797518 32543 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:48:43.797524 32543 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:48:43.797534 32543 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:48:43.797595 32543 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:48:43.797757 32543 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:48:43.797770 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.797775 32543 net.cpp:165] Memory required for data: 1401345500\nI0821 06:48:43.797783 32543 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:48:43.797792 32543 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:48:43.797799 32543 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:48:43.797809 32543 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:48:43.797817 32543 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:48:43.797854 32543 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:48:43.797866 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.797870 32543 net.cpp:165] Memory required for data: 1403393500\nI0821 06:48:43.797876 32543 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:48:43.797883 32543 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:48:43.797889 32543 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:48:43.797899 32543 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:48:43.797909 32543 net.cpp:150] Setting up L3_b7_relu\nI0821 06:48:43.797916 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.797920 32543 net.cpp:165] Memory required for data: 1405441500\nI0821 06:48:43.797925 32543 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:48:43.797932 32543 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:48:43.797937 32543 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:48:43.797945 32543 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:48:43.797955 32543 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:48:43.798003 32543 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:48:43.798015 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.798022 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.798027 32543 net.cpp:165] Memory required for data: 1409537500\nI0821 06:48:43.798032 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:48:43.798043 32543 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:48:43.798049 32543 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:48:43.798061 32543 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:48:43.800066 32543 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:48:43.800084 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.800089 32543 net.cpp:165] Memory required for data: 1411585500\nI0821 06:48:43.800099 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:48:43.800112 32543 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:48:43.800124 32543 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:48:43.800134 32543 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:48:43.800412 32543 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:48:43.800426 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.800438 32543 net.cpp:165] Memory required for data: 1413633500\nI0821 06:48:43.800449 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:48:43.800462 32543 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:48:43.800468 32543 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:48:43.800477 32543 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.800539 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:48:43.800704 32543 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:48:43.800717 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.800722 32543 net.cpp:165] Memory required for data: 1415681500\nI0821 06:48:43.800732 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:48:43.800743 32543 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:48:43.800750 32543 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:48:43.800757 32543 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:48:43.800766 32543 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:48:43.800773 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.800778 32543 net.cpp:165] Memory required for data: 1417729500\nI0821 06:48:43.800782 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:48:43.800797 32543 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:48:43.800803 32543 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:48:43.800814 32543 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:48:43.801846 32543 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:48:43.801861 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.801865 32543 net.cpp:165] Memory required for data: 1419777500\nI0821 06:48:43.801874 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:48:43.801883 32543 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:48:43.801890 32543 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:48:43.801901 32543 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:48:43.802189 32543 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:48:43.802203 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.802208 32543 net.cpp:165] Memory required for data: 1421825500\nI0821 06:48:43.802218 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:48:43.802227 32543 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:48:43.802234 32543 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:48:43.802242 32543 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:48:43.802302 32543 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:48:43.802464 32543 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:48:43.802477 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.802482 32543 net.cpp:165] Memory required for data: 1423873500\nI0821 06:48:43.802492 32543 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:48:43.802505 32543 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:48:43.802510 32543 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:48:43.802518 32543 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:48:43.802525 32543 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:48:43.802562 32543 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:48:43.802574 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.802579 32543 net.cpp:165] Memory required for data: 1425921500\nI0821 06:48:43.802584 32543 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:48:43.802592 32543 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:48:43.802598 32543 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:48:43.802605 32543 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:48:43.802615 32543 net.cpp:150] Setting up L3_b8_relu\nI0821 06:48:43.802621 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.802626 32543 net.cpp:165] Memory required for data: 1427969500\nI0821 06:48:43.802637 32543 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:48:43.802645 32543 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:48:43.802651 32543 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:48:43.802661 32543 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:48:43.802672 32543 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:48:43.802719 32543 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:48:43.802731 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.802738 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.802742 32543 net.cpp:165] Memory required for data: 1432065500\nI0821 06:48:43.802747 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:48:43.802763 32543 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:48:43.802769 32543 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:48:43.802778 32543 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:48:43.803805 32543 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:48:43.803822 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.803827 32543 net.cpp:165] Memory required for data: 1434113500\nI0821 06:48:43.803835 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:48:43.803848 32543 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:48:43.803853 32543 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:48:43.803863 32543 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:48:43.804145 32543 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:48:43.804159 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.804164 32543 net.cpp:165] Memory required for data: 1436161500\nI0821 06:48:43.804174 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:48:43.804186 32543 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:48:43.804193 32543 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:48:43.804200 32543 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.804265 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:48:43.804428 32543 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:48:43.804442 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.804447 32543 net.cpp:165] Memory required for data: 1438209500\nI0821 06:48:43.804455 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:48:43.804466 32543 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:48:43.804473 32543 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:48:43.804481 32543 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:48:43.804493 32543 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:48:43.804500 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.804504 32543 net.cpp:165] Memory required for data: 1440257500\nI0821 06:48:43.804509 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:48:43.804520 32543 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:48:43.804527 32543 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:48:43.804538 32543 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:48:43.805569 32543 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:48:43.805584 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.805589 32543 net.cpp:165] Memory required for data: 1442305500\nI0821 06:48:43.805598 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:48:43.805608 32543 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:48:43.805613 32543 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:48:43.805627 32543 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:48:43.805903 32543 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:48:43.805917 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.805927 32543 net.cpp:165] Memory required for data: 1444353500\nI0821 06:48:43.805938 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:48:43.805948 32543 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:48:43.805954 32543 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:48:43.805961 32543 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:48:43.806025 32543 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:48:43.806191 32543 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:48:43.806210 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.806215 32543 net.cpp:165] Memory required for data: 1446401500\nI0821 06:48:43.806223 32543 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:48:43.806233 32543 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:48:43.806239 32543 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:48:43.806246 32543 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:48:43.806254 32543 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:48:43.806293 32543 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:48:43.806304 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.806308 32543 net.cpp:165] Memory required for data: 1448449500\nI0821 06:48:43.806313 32543 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:48:43.806321 32543 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:48:43.806327 32543 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:48:43.806334 32543 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:48:43.806344 32543 net.cpp:150] Setting up L3_b9_relu\nI0821 06:48:43.806350 32543 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:48:43.806355 32543 net.cpp:165] Memory required for data: 1450497500\nI0821 06:48:43.806360 32543 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:48:43.806370 32543 net.cpp:100] Creating Layer post_pool\nI0821 06:48:43.806376 32543 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:48:43.806385 32543 net.cpp:408] post_pool -> post_pool\nI0821 06:48:43.806421 32543 net.cpp:150] Setting up post_pool\nI0821 06:48:43.806432 32543 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:48:43.806437 32543 net.cpp:165] Memory required for data: 1450529500\nI0821 06:48:43.806443 32543 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:48:43.806454 32543 net.cpp:100] Creating Layer post_FC\nI0821 06:48:43.806460 32543 net.cpp:434] post_FC <- post_pool\nI0821 06:48:43.806473 32543 net.cpp:408] post_FC -> post_FC_top\nI0821 06:48:43.806643 32543 net.cpp:150] Setting up post_FC\nI0821 06:48:43.806656 32543 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:48:43.806661 32543 net.cpp:165] Memory required for data: 1450534500\nI0821 06:48:43.806670 32543 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:48:43.806680 32543 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:48:43.806687 32543 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:48:43.806695 32543 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:48:43.806707 32543 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:48:43.806757 32543 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:48:43.806769 32543 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:48:43.806776 32543 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:48:43.806780 32543 net.cpp:165] Memory required for data: 1450544500\nI0821 06:48:43.806785 32543 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:48:43.806793 32543 net.cpp:100] Creating Layer accuracy\nI0821 06:48:43.806799 32543 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:48:43.806807 32543 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:48:43.806813 32543 net.cpp:408] accuracy -> accuracy\nI0821 06:48:43.806825 32543 net.cpp:150] Setting up accuracy\nI0821 06:48:43.806833 32543 net.cpp:157] Top shape: (1)\nI0821 06:48:43.806843 32543 net.cpp:165] Memory required for data: 1450544504\nI0821 06:48:43.806849 32543 layer_factory.hpp:77] Creating layer loss\nI0821 06:48:43.806859 32543 net.cpp:100] Creating Layer loss\nI0821 06:48:43.806865 32543 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:48:43.806872 32543 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:48:43.806879 32543 net.cpp:408] loss -> loss\nI0821 06:48:43.806891 32543 layer_factory.hpp:77] Creating layer loss\nI0821 06:48:43.807018 32543 net.cpp:150] Setting up loss\nI0821 06:48:43.807032 32543 net.cpp:157] Top shape: (1)\nI0821 06:48:43.807037 32543 net.cpp:160]     with loss weight 1\nI0821 06:48:43.807054 32543 net.cpp:165] Memory required for data: 1450544508\nI0821 06:48:43.807061 32543 net.cpp:226] loss needs backward computation.\nI0821 06:48:43.807067 32543 net.cpp:228] accuracy does not need backward computation.\nI0821 06:48:43.807073 32543 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:48:43.807078 32543 net.cpp:226] post_FC needs backward computation.\nI0821 06:48:43.807083 32543 net.cpp:226] post_pool needs backward computation.\nI0821 06:48:43.807088 32543 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:48:43.807093 32543 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:48:43.807098 32543 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:48:43.807103 32543 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:48:43.807108 32543 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:48:43.807114 32543 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:48:43.807126 32543 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:48:43.807130 32543 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:48:43.807135 32543 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:48:43.807142 32543 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:48:43.807147 32543 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:48:43.807152 32543 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:48:43.807157 32543 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:48:43.807163 32543 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:48:43.807168 32543 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:48:43.807173 32543 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:48:43.807178 32543 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:48:43.807183 32543 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:48:43.807188 32543 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:48:43.807193 32543 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:48:43.807198 32543 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:48:43.807202 32543 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:48:43.807209 32543 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:48:43.807214 32543 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:48:43.807219 32543 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:48:43.807224 32543 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:48:43.807227 32543 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:48:43.807234 32543 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:48:43.807238 32543 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:48:43.807243 32543 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:48:43.807248 32543 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:48:43.807253 32543 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:48:43.807260 32543 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:48:43.807265 32543 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:48:43.807276 32543 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:48:43.807282 32543 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:48:43.807287 32543 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:48:43.807292 32543 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:48:43.807297 32543 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:48:43.807302 32543 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:48:43.807308 32543 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:48:43.807313 32543 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:48:43.807319 32543 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:48:43.807324 32543 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:48:43.807329 32543 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:48:43.807334 32543 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:48:43.807339 32543 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:48:43.807344 32543 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:48:43.807350 32543 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:48:43.807355 32543 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:48:43.807361 32543 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:48:43.807366 32543 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:48:43.807371 32543 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:48:43.807376 32543 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:48:43.807382 32543 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:48:43.807390 32543 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:48:43.807396 32543 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:48:43.807401 32543 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:48:43.807406 32543 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:48:43.807412 32543 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:48:43.807417 32543 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:48:43.807422 32543 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:48:43.807428 32543 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:48:43.807433 32543 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:48:43.807438 32543 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:48:43.807443 32543 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:48:43.807449 32543 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:48:43.807453 32543 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:48:43.807459 32543 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:48:43.807464 32543 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:48:43.807471 32543 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:48:43.807476 32543 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:48:43.807482 32543 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:48:43.807487 32543 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:48:43.807492 32543 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:48:43.807497 32543 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:48:43.807502 32543 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:48:43.807507 32543 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:48:43.807512 32543 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:48:43.807518 32543 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:48:43.807523 32543 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:48:43.807534 32543 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:48:43.807539 32543 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:48:43.807544 32543 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:48:43.807550 32543 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:48:43.807556 32543 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:48:43.807561 32543 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:48:43.807566 32543 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:48:43.807572 32543 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:48:43.807577 32543 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:48:43.807582 32543 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:48:43.807587 32543 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:48:43.807593 32543 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:48:43.807598 32543 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:48:43.807603 32543 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:48:43.807610 32543 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:48:43.807615 32543 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:48:43.807621 32543 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:48:43.807626 32543 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:48:43.807631 32543 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:48:43.807636 32543 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:48:43.807642 32543 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:48:43.807647 32543 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:48:43.807653 32543 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:48:43.807658 32543 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:48:43.807664 32543 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:48:43.807669 32543 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:48:43.807675 32543 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:48:43.807680 32543 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:48:43.807685 32543 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:48:43.807690 32543 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:48:43.807696 32543 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:48:43.807701 32543 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:48:43.807706 32543 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:48:43.807713 32543 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:48:43.807718 32543 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:48:43.807723 32543 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:48:43.807729 32543 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:48:43.807734 32543 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:48:43.807739 32543 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:48:43.807744 32543 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:48:43.807749 32543 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:48:43.807760 32543 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:48:43.807765 32543 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:48:43.807771 32543 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:48:43.807777 32543 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:48:43.807782 32543 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:48:43.807788 32543 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:48:43.807795 32543 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:48:43.807804 32543 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:48:43.807809 32543 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:48:43.807816 32543 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:48:43.807821 32543 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:48:43.807826 32543 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:48:43.807832 32543 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:48:43.807838 32543 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:48:43.807843 32543 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:48:43.807849 32543 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:48:43.807854 32543 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:48:43.807860 32543 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:48:43.807865 32543 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:48:43.807871 32543 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:48:43.807876 32543 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:48:43.807883 32543 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:48:43.807888 32543 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:48:43.807893 32543 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:48:43.807898 32543 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:48:43.807904 32543 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:48:43.807910 32543 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:48:43.807915 32543 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:48:43.807920 32543 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:48:43.807926 32543 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:48:43.807931 32543 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:48:43.807937 32543 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:48:43.807942 32543 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:48:43.807950 32543 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:48:43.807955 32543 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:48:43.807960 32543 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:48:43.807965 32543 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:48:43.807971 32543 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:48:43.807976 32543 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:48:43.807982 32543 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:48:43.807988 32543 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:48:43.807994 32543 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:48:43.807999 32543 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:48:43.808006 32543 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:48:43.808012 32543 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:48:43.808017 32543 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:48:43.808022 32543 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:48:43.808027 32543 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:48:43.808033 32543 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:48:43.808038 32543 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:48:43.808044 32543 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:48:43.808049 32543 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:48:43.808056 32543 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:48:43.808061 32543 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:48:43.808071 32543 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:48:43.808079 32543 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:48:43.808084 32543 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:48:43.808090 32543 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:48:43.808095 32543 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:48:43.808101 32543 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:48:43.808106 32543 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:48:43.808112 32543 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:48:43.808123 32543 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:48:43.808130 32543 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:48:43.808136 32543 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:48:43.808141 32543 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:48:43.808148 32543 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:48:43.808153 32543 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:48:43.808159 32543 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:48:43.808164 32543 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:48:43.808171 32543 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:48:43.808176 32543 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:48:43.808182 32543 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:48:43.808187 32543 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:48:43.808192 32543 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:48:43.808198 32543 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:48:43.808204 32543 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:48:43.808209 32543 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:48:43.808215 32543 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:48:43.808221 32543 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:48:43.808226 32543 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:48:43.808233 32543 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:48:43.808238 32543 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:48:43.808243 32543 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:48:43.808249 32543 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:48:43.808255 32543 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:48:43.808261 32543 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:48:43.808267 32543 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:48:43.808272 32543 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:48:43.808279 32543 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:48:43.808284 32543 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:48:43.808290 32543 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:48:43.808295 32543 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:48:43.808301 32543 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:48:43.808306 32543 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:48:43.808312 32543 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:48:43.808318 32543 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:48:43.808324 32543 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:48:43.808331 32543 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:48:43.808336 32543 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:48:43.808341 32543 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:48:43.808352 32543 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:48:43.808358 32543 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:48:43.808364 32543 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:48:43.808370 32543 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:48:43.808377 32543 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:48:43.808382 32543 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:48:43.808388 32543 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:48:43.808394 32543 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:48:43.808399 32543 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:48:43.808405 32543 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:48:43.808410 32543 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:48:43.808416 32543 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:48:43.808423 32543 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:48:43.808429 32543 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:48:43.808434 32543 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:48:43.808440 32543 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:48:43.808445 32543 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:48:43.808454 32543 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:48:43.808460 32543 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:48:43.808465 32543 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:48:43.808471 32543 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:48:43.808477 32543 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:48:43.808483 32543 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:48:43.808490 32543 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:48:43.808495 32543 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:48:43.808501 32543 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:48:43.808506 32543 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:48:43.808512 32543 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:48:43.808518 32543 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:48:43.808523 32543 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:48:43.808529 32543 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:48:43.808535 32543 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:48:43.808542 32543 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:48:43.808547 32543 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:48:43.808553 32543 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:48:43.808559 32543 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:48:43.808565 32543 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:48:43.808571 32543 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:48:43.808576 32543 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:48:43.808583 32543 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:48:43.808588 32543 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:48:43.808593 32543 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:48:43.808599 32543 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:48:43.808605 32543 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:48:43.808611 32543 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:48:43.808619 32543 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:48:43.808624 32543 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:48:43.808636 32543 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:48:43.808642 32543 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:48:43.808648 32543 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:48:43.808653 32543 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:48:43.808660 32543 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:48:43.808665 32543 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:48:43.808671 32543 net.cpp:226] pre_relu needs backward computation.\nI0821 06:48:43.808676 32543 net.cpp:226] pre_scale needs backward computation.\nI0821 06:48:43.808682 32543 net.cpp:226] pre_bn needs backward computation.\nI0821 06:48:43.808687 32543 net.cpp:226] pre_conv needs backward computation.\nI0821 06:48:43.808694 32543 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:48:43.808701 32543 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:48:43.808706 32543 net.cpp:270] This network produces output accuracy\nI0821 06:48:43.808712 32543 net.cpp:270] This network produces output loss\nI0821 06:48:43.809039 32543 net.cpp:283] Network initialization done.\nI0821 06:48:43.810045 32543 solver.cpp:60] Solver scaffolding done.\nI0821 06:48:44.030874 32543 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 06:48:44.391577 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:44.391654 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:44.398578 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:44.622498 32543 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:48:44.622612 32543 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:48:44.656919 32543 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:48:44.657028 32543 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:48:45.113175 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:45.113236 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:45.121273 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:45.366127 32543 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:48:45.366235 32543 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:48:45.418210 32543 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:48:45.418313 32543 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:48:45.922705 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:45.922771 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:45.931520 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:46.201747 32543 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:48:46.201875 32543 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:48:46.273032 32543 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:48:46.273169 32543 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:48:46.356014 32543 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 06:48:46.839817 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:46.839890 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:46.849485 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:47.140156 32543 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:48:47.140355 32543 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:48:47.232368 32543 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:48:47.232553 32543 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:48:47.882689 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:47.882747 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:47.893460 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:48.206429 32543 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:48:48.206611 32543 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:48:48.320003 32543 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:48:48.320189 32543 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:48:49.027709 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:49.027773 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:49.038846 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:49.384735 32543 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:48:49.384948 32543 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:48:49.518074 32543 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:48:49.518288 32543 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:48:50.293462 32543 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:48:50.293526 32543 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:48:50.305866 32543 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:48:50.359997 32570 blocking_queue.cpp:50] Waiting for data\nI0821 06:48:50.415386 32567 blocking_queue.cpp:50] Waiting for data\nI0821 06:48:50.740725 32543 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:48:50.740962 32543 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:48:50.892158 32543 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:48:50.892392 32543 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:48:51.063241 32543 parallel.cpp:425] Starting Optimization\nI0821 06:48:51.065565 32543 solver.cpp:279] Solving Cifar-Resnet\nI0821 06:48:51.065582 32543 solver.cpp:280] Learning Rate Policy: triangular\nI0821 06:48:51.069874 32543 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 06:50:10.739503 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 06:50:10.739804 32543 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 06:50:14.787983 32543 solver.cpp:228] Iteration 0, loss = 4.74978\nI0821 06:50:14.788028 32543 solver.cpp:244]     Train net output #0: accuracy = 0.144\nI0821 06:50:14.788043 32543 solver.cpp:244]     Train net output #1: loss = 4.74978 (* 1 = 4.74978 loss)\nI0821 06:50:14.788197 32543 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0821 06:52:32.891837 32543 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 06:53:52.965178 32543 solver.cpp:404]     Test net output #0: accuracy = 0.20748\nI0821 06:53:52.965457 32543 solver.cpp:404]     Test net output #1: loss = 2.10685 (* 1 = 2.10685 loss)\nI0821 06:53:54.250967 32543 solver.cpp:228] Iteration 100, loss = 1.98746\nI0821 06:53:54.251024 32543 solver.cpp:244]     Train net output #0: accuracy = 0.304\nI0821 06:53:54.251041 32543 solver.cpp:244]     Train net output #1: loss = 1.98746 (* 1 = 1.98746 loss)\nI0821 06:53:54.379398 32543 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0821 06:56:12.085185 32543 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 06:57:32.169014 32543 solver.cpp:404]     Test net output #0: accuracy = 0.36004\nI0821 06:57:32.169306 32543 solver.cpp:404]     Test net output #1: loss = 1.72746 (* 1 = 1.72746 loss)\nI0821 06:57:33.455222 32543 solver.cpp:228] Iteration 200, loss = 1.55146\nI0821 06:57:33.455281 32543 solver.cpp:244]     Train net output #0: accuracy = 0.368\nI0821 06:57:33.455305 32543 solver.cpp:244]     Train net output #1: loss = 1.55146 (* 1 = 1.55146 loss)\nI0821 06:57:33.583840 32543 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0821 06:59:51.553375 32543 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 07:01:11.686297 32543 solver.cpp:404]     Test net output #0: accuracy = 0.3266\nI0821 07:01:11.686573 32543 solver.cpp:404]     Test net output #1: loss = 2.21808 (* 1 = 2.21808 loss)\nI0821 07:01:12.973112 32543 solver.cpp:228] Iteration 300, loss = 1.18283\nI0821 07:01:12.973171 32543 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0821 07:01:12.973198 32543 solver.cpp:244]     Train net output #1: loss = 1.18283 (* 1 = 1.18283 loss)\nI0821 07:01:13.096635 32543 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0821 07:03:30.984421 32543 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 07:04:51.110590 32543 solver.cpp:404]     Test net output #0: accuracy = 0.46168\nI0821 07:04:51.110829 32543 solver.cpp:404]     Test net output #1: loss = 1.61229 (* 1 = 1.61229 loss)\nI0821 07:04:52.397861 32543 solver.cpp:228] Iteration 400, loss = 0.816534\nI0821 07:04:52.397923 32543 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0821 07:04:52.397948 32543 solver.cpp:244]     Train net output #1: loss = 0.816534 (* 1 = 0.816534 loss)\nI0821 07:04:52.518546 32543 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0821 07:07:10.669128 32543 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 07:08:30.771491 32543 solver.cpp:404]     Test net output #0: accuracy = 0.47272\nI0821 07:08:30.771713 32543 solver.cpp:404]     Test net output #1: loss = 1.69458 (* 1 = 1.69458 loss)\nI0821 07:08:32.058521 32543 solver.cpp:228] Iteration 500, loss = 0.704245\nI0821 07:08:32.058576 32543 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0821 07:08:32.058601 32543 solver.cpp:244]     Train net output #1: loss = 0.704245 (* 1 = 0.704245 loss)\nI0821 07:08:32.187927 32543 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0821 07:10:50.158535 32543 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 07:12:10.222707 32543 solver.cpp:404]     Test net output #0: accuracy = 0.53608\nI0821 07:12:10.222971 32543 solver.cpp:404]     Test net output #1: loss = 1.45789 (* 1 = 1.45789 loss)\nI0821 07:12:11.508626 32543 solver.cpp:228] Iteration 600, loss = 0.597541\nI0821 07:12:11.508684 32543 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0821 07:12:11.508709 32543 solver.cpp:244]     Train net output #1: loss = 0.597541 (* 1 = 0.597541 loss)\nI0821 07:12:11.633143 32543 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0821 07:14:29.601308 32543 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 07:15:49.722571 32543 solver.cpp:404]     Test net output #0: accuracy = 0.45032\nI0821 07:15:49.722810 32543 solver.cpp:404]     Test net output #1: loss = 2.19011 (* 1 = 2.19011 loss)\nI0821 07:15:51.008934 32543 solver.cpp:228] Iteration 700, loss = 0.58912\nI0821 07:15:51.008993 32543 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 07:15:51.009018 32543 solver.cpp:244]     Train net output #1: loss = 0.58912 (* 1 = 0.58912 loss)\nI0821 07:15:51.131636 32543 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0821 07:18:09.247380 32543 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 07:19:29.378944 32543 solver.cpp:404]     Test net output #0: accuracy = 0.65868\nI0821 07:19:29.379191 32543 solver.cpp:404]     Test net output #1: loss = 1.07836 (* 1 = 1.07836 loss)\nI0821 07:19:30.664650 32543 solver.cpp:228] Iteration 800, loss = 0.548606\nI0821 07:19:30.664708 32543 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0821 07:19:30.664734 32543 solver.cpp:244]     Train net output #1: loss = 0.548606 (* 1 = 0.548606 loss)\nI0821 07:19:30.794291 32543 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0821 07:21:48.778576 32543 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 07:23:08.864244 32543 solver.cpp:404]     Test net output #0: accuracy = 0.61348\nI0821 07:23:08.864511 32543 solver.cpp:404]     Test net output #1: loss = 1.23883 (* 1 = 1.23883 loss)\nI0821 07:23:10.150781 32543 solver.cpp:228] Iteration 900, loss = 0.527091\nI0821 07:23:10.150840 32543 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0821 07:23:10.150864 32543 solver.cpp:244]     Train net output #1: loss = 0.527091 (* 1 = 0.527091 loss)\nI0821 07:23:10.276590 32543 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0821 07:25:28.454195 32543 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 07:26:48.550755 32543 solver.cpp:404]     Test net output #0: accuracy = 0.43536\nI0821 07:26:48.550985 32543 solver.cpp:404]     Test net output #1: loss = 2.12549 (* 1 = 2.12549 loss)\nI0821 07:26:49.836462 32543 solver.cpp:228] Iteration 1000, loss = 0.561354\nI0821 07:26:49.836525 32543 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0821 07:26:49.836551 32543 solver.cpp:244]     Train net output #1: loss = 0.561354 (* 1 = 0.561354 loss)\nI0821 07:26:49.961568 32543 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0821 07:29:08.014269 32543 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 07:30:28.106487 32543 solver.cpp:404]     Test net output #0: accuracy = 0.50212\nI0821 07:30:28.106716 32543 solver.cpp:404]     Test net output #1: loss = 1.69002 (* 1 = 1.69002 loss)\nI0821 07:30:29.392889 32543 solver.cpp:228] Iteration 1100, loss = 0.621786\nI0821 07:30:29.392947 32543 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0821 07:30:29.392971 32543 solver.cpp:244]     Train net output #1: loss = 0.621786 (* 1 = 0.621786 loss)\nI0821 07:30:29.521529 32543 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0821 07:32:47.675709 32543 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 07:34:07.730408 32543 solver.cpp:404]     Test net output #0: accuracy = 0.49588\nI0821 07:34:07.730648 32543 solver.cpp:404]     Test net output #1: loss = 1.61463 (* 1 = 1.61463 loss)\nI0821 07:34:09.016527 32543 solver.cpp:228] Iteration 1200, loss = 0.507083\nI0821 07:34:09.016584 32543 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 07:34:09.016609 32543 solver.cpp:244]     Train net output #1: loss = 0.507083 (* 1 = 0.507083 loss)\nI0821 07:34:09.144213 32543 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0821 07:36:27.069442 32543 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 07:37:46.943863 32543 solver.cpp:404]     Test net output #0: accuracy = 0.39916\nI0821 07:37:46.944108 32543 solver.cpp:404]     Test net output #1: loss = 2.23759 (* 1 = 2.23759 loss)\nI0821 07:37:48.229074 32543 solver.cpp:228] Iteration 1300, loss = 0.438288\nI0821 07:37:48.229132 32543 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 07:37:48.229157 32543 solver.cpp:244]     Train net output #1: loss = 0.438288 (* 1 = 0.438288 loss)\nI0821 07:37:48.358850 32543 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0821 07:40:06.386832 32543 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 07:41:26.459894 32543 solver.cpp:404]     Test net output #0: accuracy = 0.52004\nI0821 07:41:26.460114 32543 solver.cpp:404]     Test net output #1: loss = 1.75909 (* 1 = 1.75909 loss)\nI0821 07:41:27.745879 32543 solver.cpp:228] Iteration 1400, loss = 0.500682\nI0821 07:41:27.745936 32543 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 07:41:27.745962 32543 solver.cpp:244]     Train net output #1: loss = 0.500682 (* 1 = 0.500682 loss)\nI0821 07:41:27.870390 32543 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0821 07:43:46.016348 32543 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 07:45:06.079931 32543 solver.cpp:404]     Test net output #0: accuracy = 0.56432\nI0821 07:45:06.080144 32543 solver.cpp:404]     Test net output #1: loss = 1.32574 (* 1 = 1.32574 loss)\nI0821 07:45:07.365173 32543 solver.cpp:228] Iteration 1500, loss = 0.64552\nI0821 07:45:07.365227 32543 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0821 07:45:07.365249 32543 solver.cpp:244]     Train net output #1: loss = 0.64552 (* 1 = 0.64552 loss)\nI0821 07:45:07.490789 32543 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0821 07:47:25.497191 32543 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 07:48:45.581193 32543 solver.cpp:404]     Test net output #0: accuracy = 0.37272\nI0821 07:48:45.581439 32543 solver.cpp:404]     Test net output #1: loss = 1.89991 (* 1 = 1.89991 loss)\nI0821 07:48:46.866775 32543 solver.cpp:228] Iteration 1600, loss = 0.503389\nI0821 07:48:46.866828 32543 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0821 07:48:46.866852 32543 solver.cpp:244]     Train net output #1: loss = 0.503389 (* 1 = 0.503389 loss)\nI0821 07:48:46.994153 32543 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0821 07:51:04.823060 32543 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 07:52:24.794478 32543 solver.cpp:404]     Test net output #0: accuracy = 0.4998\nI0821 07:52:24.794716 32543 solver.cpp:404]     Test net output #1: loss = 1.84778 (* 1 = 1.84778 loss)\nI0821 07:52:26.080740 32543 solver.cpp:228] Iteration 1700, loss = 0.670312\nI0821 07:52:26.080796 32543 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0821 07:52:26.080821 32543 solver.cpp:244]     Train net output #1: loss = 0.670312 (* 1 = 0.670312 loss)\nI0821 07:52:26.203934 32543 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0821 07:54:44.204428 32543 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 07:56:03.394364 32543 solver.cpp:404]     Test net output #0: accuracy = 0.44612\nI0821 07:56:03.394628 32543 solver.cpp:404]     Test net output #1: loss = 2.14205 (* 1 = 2.14205 loss)\nI0821 07:56:04.676033 32543 solver.cpp:228] Iteration 1800, loss = 0.714446\nI0821 07:56:04.676081 32543 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0821 07:56:04.676097 32543 solver.cpp:244]     Train net output #1: loss = 0.714446 (* 1 = 0.714446 loss)\nI0821 07:56:04.808841 32543 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0821 07:58:22.916585 32543 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 07:59:42.023172 32543 solver.cpp:404]     Test net output #0: accuracy = 0.56844\nI0821 07:59:42.023437 32543 solver.cpp:404]     Test net output #1: loss = 1.32198 (* 1 = 1.32198 loss)\nI0821 07:59:43.304463 32543 solver.cpp:228] Iteration 1900, loss = 0.572333\nI0821 07:59:43.304512 32543 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0821 07:59:43.304527 32543 solver.cpp:244]     Train net output #1: loss = 0.572333 (* 1 = 0.572333 loss)\nI0821 07:59:43.439664 32543 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0821 08:02:01.282104 32543 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 08:03:20.391294 32543 solver.cpp:404]     Test net output #0: accuracy = 0.46704\nI0821 08:03:20.391547 32543 solver.cpp:404]     Test net output #1: loss = 2.12375 (* 1 = 2.12375 loss)\nI0821 08:03:21.673406 32543 solver.cpp:228] Iteration 2000, loss = 0.551541\nI0821 08:03:21.673455 32543 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0821 08:03:21.673471 32543 solver.cpp:244]     Train net output #1: loss = 0.551541 (* 1 = 0.551541 loss)\nI0821 08:03:21.799964 32543 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0821 08:05:39.629571 32543 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 08:06:58.734575 32543 solver.cpp:404]     Test net output #0: accuracy = 0.29852\nI0821 08:06:58.734836 32543 solver.cpp:404]     Test net output #1: loss = 3.83267 (* 1 = 3.83267 loss)\nI0821 08:07:00.016633 32543 solver.cpp:228] Iteration 2100, loss = 0.668789\nI0821 08:07:00.016680 32543 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0821 08:07:00.016696 32543 solver.cpp:244]     Train net output #1: loss = 0.668789 (* 1 = 0.668789 loss)\nI0821 08:07:00.145460 32543 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0821 08:09:18.240109 32543 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 08:10:37.341987 32543 solver.cpp:404]     Test net output #0: accuracy = 0.43544\nI0821 08:10:37.342262 32543 solver.cpp:404]     Test net output #1: loss = 1.79784 (* 1 = 1.79784 loss)\nI0821 08:10:38.624272 32543 solver.cpp:228] Iteration 2200, loss = 0.626583\nI0821 08:10:38.624320 32543 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0821 08:10:38.624336 32543 solver.cpp:244]     Train net output #1: loss = 0.626583 (* 1 = 0.626583 loss)\nI0821 08:10:38.753458 32543 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0821 08:12:56.634507 32543 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 08:14:15.741284 32543 solver.cpp:404]     Test net output #0: accuracy = 0.34892\nI0821 08:14:15.741554 32543 solver.cpp:404]     Test net output #1: loss = 2.13084 (* 1 = 2.13084 loss)\nI0821 08:14:17.023298 32543 solver.cpp:228] Iteration 2300, loss = 0.724591\nI0821 08:14:17.023345 32543 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0821 08:14:17.023360 32543 solver.cpp:244]     Train net output #1: loss = 0.724591 (* 1 = 0.724591 loss)\nI0821 08:14:17.151640 32543 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0821 08:16:34.972424 32543 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 08:17:54.115459 32543 solver.cpp:404]     Test net output #0: accuracy = 0.44448\nI0821 08:17:54.115717 32543 solver.cpp:404]     Test net output #1: loss = 2.59971 (* 1 = 2.59971 loss)\nI0821 08:17:55.401334 32543 solver.cpp:228] Iteration 2400, loss = 0.804736\nI0821 08:17:55.401389 32543 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0821 08:17:55.401413 32543 solver.cpp:244]     Train net output #1: loss = 0.804736 (* 1 = 0.804736 loss)\nI0821 08:17:55.523350 32543 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0821 08:20:13.331013 32543 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 08:21:32.516834 32543 solver.cpp:404]     Test net output #0: accuracy = 0.43948\nI0821 08:21:32.517103 32543 solver.cpp:404]     Test net output #1: loss = 1.69399 (* 1 = 1.69399 loss)\nI0821 08:21:33.799449 32543 solver.cpp:228] Iteration 2500, loss = 0.772671\nI0821 08:21:33.799501 32543 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0821 08:21:33.799525 32543 solver.cpp:244]     Train net output #1: loss = 0.772671 (* 1 = 0.772671 loss)\nI0821 08:21:33.934087 32543 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0821 08:23:51.738746 32543 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 08:25:10.941994 32543 solver.cpp:404]     Test net output #0: accuracy = 0.38424\nI0821 08:25:10.942245 32543 solver.cpp:404]     Test net output #1: loss = 1.95912 (* 1 = 1.95912 loss)\nI0821 08:25:12.224097 32543 solver.cpp:228] Iteration 2600, loss = 0.733005\nI0821 08:25:12.224144 32543 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0821 08:25:12.224161 32543 solver.cpp:244]     Train net output #1: loss = 0.733005 (* 1 = 0.733005 loss)\nI0821 08:25:12.359045 32543 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0821 08:27:30.022753 32543 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 08:28:49.133204 32543 solver.cpp:404]     Test net output #0: accuracy = 0.25148\nI0821 08:28:49.133473 32543 solver.cpp:404]     Test net output #1: loss = 3.18374 (* 1 = 3.18374 loss)\nI0821 08:28:50.415211 32543 solver.cpp:228] Iteration 2700, loss = 0.755258\nI0821 08:28:50.415263 32543 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0821 08:28:50.415279 32543 solver.cpp:244]     Train net output #1: loss = 0.755258 (* 1 = 0.755258 loss)\nI0821 08:28:50.538985 32543 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0821 08:31:08.302819 32543 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 08:32:27.411593 32543 solver.cpp:404]     Test net output #0: accuracy = 0.34924\nI0821 08:32:27.411864 32543 solver.cpp:404]     Test net output #1: loss = 2.93491 (* 1 = 2.93491 loss)\nI0821 08:32:28.693481 32543 solver.cpp:228] Iteration 2800, loss = 0.747218\nI0821 08:32:28.693529 32543 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0821 08:32:28.693545 32543 solver.cpp:244]     Train net output #1: loss = 0.747218 (* 1 = 0.747218 loss)\nI0821 08:32:28.820869 32543 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0821 08:34:46.379031 32543 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 08:36:05.492640 32543 solver.cpp:404]     Test net output #0: accuracy = 0.43944\nI0821 08:36:05.492894 32543 solver.cpp:404]     Test net output #1: loss = 1.73557 (* 1 = 1.73557 loss)\nI0821 08:36:06.774734 32543 solver.cpp:228] Iteration 2900, loss = 0.804474\nI0821 08:36:06.774781 32543 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0821 08:36:06.774797 32543 solver.cpp:244]     Train net output #1: loss = 0.804474 (* 1 = 0.804474 loss)\nI0821 08:36:06.909643 32543 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0821 08:38:25.478339 32543 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 08:39:45.547803 32543 solver.cpp:404]     Test net output #0: accuracy = 0.31596\nI0821 08:39:45.548020 32543 solver.cpp:404]     Test net output #1: loss = 2.40867 (* 1 = 2.40867 loss)\nI0821 08:39:46.833608 32543 solver.cpp:228] Iteration 3000, loss = 0.848863\nI0821 08:39:46.833663 32543 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0821 08:39:46.833680 32543 solver.cpp:244]     Train net output #1: loss = 0.848863 (* 1 = 0.848863 loss)\nI0821 08:39:46.960860 32543 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0821 08:42:05.401849 32543 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 08:43:25.440552 32543 solver.cpp:404]     Test net output #0: accuracy = 0.29564\nI0821 08:43:25.440800 32543 solver.cpp:404]     Test net output #1: loss = 2.55567 (* 1 = 2.55567 loss)\nI0821 08:43:26.726363 32543 solver.cpp:228] Iteration 3100, loss = 0.850024\nI0821 08:43:26.726418 32543 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0821 08:43:26.726435 32543 solver.cpp:244]     Train net output #1: loss = 0.850024 (* 1 = 0.850024 loss)\nI0821 08:43:26.855679 32543 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0821 08:45:45.438879 32543 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 08:47:05.496258 32543 solver.cpp:404]     Test net output #0: accuracy = 0.14772\nI0821 08:47:05.496505 32543 solver.cpp:404]     Test net output #1: loss = 4.36606 (* 1 = 4.36606 loss)\nI0821 08:47:06.782277 32543 solver.cpp:228] Iteration 3200, loss = 0.929723\nI0821 08:47:06.782332 32543 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0821 08:47:06.782357 32543 solver.cpp:244]     Train net output #1: loss = 0.929723 (* 1 = 0.929723 loss)\nI0821 08:47:06.915838 32543 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0821 08:49:25.603709 32543 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 08:50:45.484589 32543 solver.cpp:404]     Test net output #0: accuracy = 0.30864\nI0821 08:50:45.484881 32543 solver.cpp:404]     Test net output #1: loss = 2.38048 (* 1 = 2.38048 loss)\nI0821 08:50:46.770943 32543 solver.cpp:228] Iteration 3300, loss = 0.715982\nI0821 08:50:46.770998 32543 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0821 08:50:46.771023 32543 solver.cpp:244]     Train net output #1: loss = 0.715982 (* 1 = 0.715982 loss)\nI0821 08:50:46.899529 32543 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0821 08:53:05.525717 32543 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 08:54:25.584403 32543 solver.cpp:404]     Test net output #0: accuracy = 0.29156\nI0821 08:54:25.584691 32543 solver.cpp:404]     Test net output #1: loss = 2.02631 (* 1 = 2.02631 loss)\nI0821 08:54:26.868594 32543 solver.cpp:228] Iteration 3400, loss = 0.818084\nI0821 08:54:26.868646 32543 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0821 08:54:26.868664 32543 solver.cpp:244]     Train net output #1: loss = 0.818084 (* 1 = 0.818084 loss)\nI0821 08:54:27.000133 32543 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0821 08:56:45.544123 32543 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 08:58:05.613865 32543 solver.cpp:404]     Test net output #0: accuracy = 0.33008\nI0821 08:58:05.614161 32543 solver.cpp:404]     Test net output #1: loss = 1.86941 (* 1 = 1.86941 loss)\nI0821 08:58:06.896453 32543 solver.cpp:228] Iteration 3500, loss = 0.859101\nI0821 08:58:06.896503 32543 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0821 08:58:06.896520 32543 solver.cpp:244]     Train net output #1: loss = 0.859101 (* 1 = 0.859101 loss)\nI0821 08:58:07.027653 32543 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0821 09:00:25.470772 32543 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 09:01:45.548787 32543 solver.cpp:404]     Test net output #0: accuracy = 0.36776\nI0821 09:01:45.549053 32543 solver.cpp:404]     Test net output #1: loss = 2.28133 (* 1 = 2.28133 loss)\nI0821 09:01:46.831217 32543 solver.cpp:228] Iteration 3600, loss = 0.815461\nI0821 09:01:46.831269 32543 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0821 09:01:46.831286 32543 solver.cpp:244]     Train net output #1: loss = 0.815461 (* 1 = 0.815461 loss)\nI0821 09:01:46.960175 32543 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0821 09:04:05.482537 32543 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 09:05:25.582262 32543 solver.cpp:404]     Test net output #0: accuracy = 0.29484\nI0821 09:05:25.582566 32543 solver.cpp:404]     Test net output #1: loss = 2.2065 (* 1 = 2.2065 loss)\nI0821 09:05:26.864398 32543 solver.cpp:228] Iteration 3700, loss = 0.897881\nI0821 09:05:26.864449 32543 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0821 09:05:26.864465 32543 solver.cpp:244]     Train net output #1: loss = 0.897881 (* 1 = 0.897881 loss)\nI0821 09:05:26.996302 32543 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0821 09:07:45.230377 32543 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 09:09:05.320719 32543 solver.cpp:404]     Test net output #0: accuracy = 0.28948\nI0821 09:09:05.321017 32543 solver.cpp:404]     Test net output #1: loss = 3.27782 (* 1 = 3.27782 loss)\nI0821 09:09:06.602280 32543 solver.cpp:228] Iteration 3800, loss = 0.801331\nI0821 09:09:06.602327 32543 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0821 09:09:06.602344 32543 solver.cpp:244]     Train net output #1: loss = 0.801331 (* 1 = 0.801331 loss)\nI0821 09:09:06.735291 32543 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0821 09:11:25.111338 32543 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 09:12:45.194175 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10072\nI0821 09:12:45.194447 32543 solver.cpp:404]     Test net output #1: loss = 56.5017 (* 1 = 56.5017 loss)\nI0821 09:12:46.476217 32543 solver.cpp:228] Iteration 3900, loss = 1.78739\nI0821 09:12:46.476271 32543 solver.cpp:244]     Train net output #0: accuracy = 0.408\nI0821 09:12:46.476290 32543 solver.cpp:244]     Train net output #1: loss = 1.78739 (* 1 = 1.78739 loss)\nI0821 09:12:46.609685 32543 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0821 09:15:05.330627 32543 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 09:16:25.423800 32543 solver.cpp:404]     Test net output #0: accuracy = 0.15204\nI0821 09:16:25.424096 32543 solver.cpp:404]     Test net output #1: loss = 2.93493 (* 1 = 2.93493 loss)\nI0821 09:16:26.705874 32543 solver.cpp:228] Iteration 4000, loss = 1.04813\nI0821 09:16:26.705924 32543 solver.cpp:244]     Train net output #0: accuracy = 0.616\nI0821 09:16:26.705941 32543 solver.cpp:244]     Train net output #1: loss = 1.04813 (* 1 = 1.04813 loss)\nI0821 09:16:26.840227 32543 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0821 09:18:45.430462 32543 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 09:20:05.534710 32543 solver.cpp:404]     Test net output #0: accuracy = 0.21204\nI0821 09:20:05.534976 32543 solver.cpp:404]     Test net output #1: loss = 3.56956 (* 1 = 3.56956 loss)\nI0821 09:20:06.816395 32543 solver.cpp:228] Iteration 4100, loss = 1.03371\nI0821 09:20:06.816442 32543 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0821 09:20:06.816459 32543 solver.cpp:244]     Train net output #1: loss = 1.03371 (* 1 = 1.03371 loss)\nI0821 09:20:06.952870 32543 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0821 09:22:25.331970 32543 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 09:23:45.428617 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10192\nI0821 09:23:45.428891 32543 solver.cpp:404]     Test net output #1: loss = 3.48857 (* 1 = 3.48857 loss)\nI0821 09:23:46.710911 32543 solver.cpp:228] Iteration 4200, loss = 0.92531\nI0821 09:23:46.710958 32543 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0821 09:23:46.710974 32543 solver.cpp:244]     Train net output #1: loss = 0.92531 (* 1 = 0.92531 loss)\nI0821 09:23:46.844967 32543 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0821 09:26:05.588148 32543 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 09:27:25.700553 32543 solver.cpp:404]     Test net output #0: accuracy = 0.15076\nI0821 09:27:25.700846 32543 solver.cpp:404]     Test net output #1: loss = 9.21344 (* 1 = 9.21344 loss)\nI0821 09:27:26.983516 32543 solver.cpp:228] Iteration 4300, loss = 1.03201\nI0821 09:27:26.983564 32543 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0821 09:27:26.983582 32543 solver.cpp:244]     Train net output #1: loss = 1.03201 (* 1 = 1.03201 loss)\nI0821 09:27:27.123314 32543 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0821 09:29:45.935401 32543 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 09:31:06.166579 32543 solver.cpp:404]     Test net output #0: accuracy = 0.17612\nI0821 09:31:06.166868 32543 solver.cpp:404]     Test net output #1: loss = 4.24645 (* 1 = 4.24645 loss)\nI0821 09:31:07.448369 32543 solver.cpp:228] Iteration 4400, loss = 1.22888\nI0821 09:31:07.448420 32543 solver.cpp:244]     Train net output #0: accuracy = 0.6\nI0821 09:31:07.448436 32543 solver.cpp:244]     Train net output #1: loss = 1.22888 (* 1 = 1.22888 loss)\nI0821 09:31:07.581131 32543 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0821 09:33:26.358464 32543 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 09:34:46.604377 32543 solver.cpp:404]     Test net output #0: accuracy = 0.15312\nI0821 09:34:46.604668 32543 solver.cpp:404]     Test net output #1: loss = 15.9061 (* 1 = 15.9061 loss)\nI0821 09:34:47.886744 32543 solver.cpp:228] Iteration 4500, loss = 1.33707\nI0821 09:34:47.886796 32543 solver.cpp:244]     Train net output #0: accuracy = 0.576\nI0821 09:34:47.886814 32543 solver.cpp:244]     Train net output #1: loss = 1.33707 (* 1 = 1.33707 loss)\nI0821 09:34:48.019039 32543 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0821 09:37:06.484266 32543 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 09:38:26.572012 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09256\nI0821 09:38:26.572325 32543 solver.cpp:404]     Test net output #1: loss = 35.3642 (* 1 = 35.3642 loss)\nI0821 09:38:27.864483 32543 solver.cpp:228] Iteration 4600, loss = 1.81268\nI0821 09:38:27.864538 32543 solver.cpp:244]     Train net output #0: accuracy = 0.376\nI0821 09:38:27.864563 32543 solver.cpp:244]     Train net output #1: loss = 1.81268 (* 1 = 1.81268 loss)\nI0821 09:38:27.980911 32543 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0821 09:40:46.376482 32543 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 09:42:06.468592 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 09:42:06.468870 32543 solver.cpp:404]     Test net output #1: loss = 2.5038 (* 1 = 2.5038 loss)\nI0821 09:42:07.751938 32543 solver.cpp:228] Iteration 4700, loss = 1.35933\nI0821 09:42:07.751992 32543 solver.cpp:244]     Train net output #0: accuracy = 0.552\nI0821 09:42:07.752010 32543 solver.cpp:244]     Train net output #1: loss = 1.35933 (* 1 = 1.35933 loss)\nI0821 09:42:07.878360 32543 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0821 09:44:26.227875 32543 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 09:45:46.293618 32543 solver.cpp:404]     Test net output #0: accuracy = 0.153\nI0821 09:45:46.293915 32543 solver.cpp:404]     Test net output #1: loss = 3.06596 (* 1 = 3.06596 loss)\nI0821 09:45:47.576220 32543 solver.cpp:228] Iteration 4800, loss = 1.5324\nI0821 09:45:47.576273 32543 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI0821 09:45:47.576295 32543 solver.cpp:244]     Train net output #1: loss = 1.5324 (* 1 = 1.5324 loss)\nI0821 09:45:47.709419 32543 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0821 09:48:06.224593 32543 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 09:49:26.295004 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10308\nI0821 09:49:26.295368 32543 solver.cpp:404]     Test net output #1: loss = 4.6776 (* 1 = 4.6776 loss)\nI0821 09:49:27.576309 32543 solver.cpp:228] Iteration 4900, loss = 1.45549\nI0821 09:49:27.576359 32543 solver.cpp:244]     Train net output #0: accuracy = 0.52\nI0821 09:49:27.576377 32543 solver.cpp:244]     Train net output #1: loss = 1.45549 (* 1 = 1.45549 loss)\nI0821 09:49:27.705633 32543 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0821 09:51:46.028229 32543 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 09:53:06.048918 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0821 09:53:06.049192 32543 solver.cpp:404]     Test net output #1: loss = 13.2998 (* 1 = 13.2998 loss)\nI0821 09:53:07.332047 32543 solver.cpp:228] Iteration 5000, loss = 1.53572\nI0821 09:53:07.332095 32543 solver.cpp:244]     Train net output #0: accuracy = 0.448\nI0821 09:53:07.332113 32543 solver.cpp:244]     Train net output #1: loss = 1.53572 (* 1 = 1.53572 loss)\nI0821 09:53:07.460546 32543 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0821 09:55:26.322825 32543 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 09:56:46.341483 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 09:56:46.341769 32543 solver.cpp:404]     Test net output #1: loss = 21.3704 (* 1 = 21.3704 loss)\nI0821 09:56:47.624428 32543 solver.cpp:228] Iteration 5100, loss = 2.30327\nI0821 09:56:47.624480 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 09:56:47.624497 32543 solver.cpp:244]     Train net output #1: loss = 2.30327 (* 1 = 2.30327 loss)\nI0821 09:56:47.755827 32543 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0821 09:59:06.253320 32543 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 10:00:26.314309 32543 solver.cpp:404]     Test net output #0: accuracy = 0.11572\nI0821 10:00:26.314604 32543 solver.cpp:404]     Test net output #1: loss = 2.43723 (* 1 = 2.43723 loss)\nI0821 10:00:27.596544 32543 solver.cpp:228] Iteration 5200, loss = 2.29915\nI0821 10:00:27.596595 32543 solver.cpp:244]     Train net output #0: accuracy = 0.136\nI0821 10:00:27.596612 32543 solver.cpp:244]     Train net output #1: loss = 2.29915 (* 1 = 2.29915 loss)\nI0821 10:00:27.723445 32543 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0821 10:02:46.187115 32543 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 10:04:06.246529 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:04:06.246798 32543 solver.cpp:404]     Test net output #1: loss = 2.4683 (* 1 = 2.4683 loss)\nI0821 10:04:07.528808 32543 solver.cpp:228] Iteration 5300, loss = 2.75865\nI0821 10:04:07.528861 32543 solver.cpp:244]     Train net output #0: accuracy = 0.144\nI0821 10:04:07.528878 32543 solver.cpp:244]     Train net output #1: loss = 2.75865 (* 1 = 2.75865 loss)\nI0821 10:04:07.656386 32543 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0821 10:06:26.285223 32543 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 10:07:46.208310 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10944\nI0821 10:07:46.208606 32543 solver.cpp:404]     Test net output #1: loss = 2.32898 (* 1 = 2.32898 loss)\nI0821 10:07:47.490939 32543 solver.cpp:228] Iteration 5400, loss = 1.97296\nI0821 10:07:47.490989 32543 solver.cpp:244]     Train net output #0: accuracy = 0.312\nI0821 10:07:47.491008 32543 solver.cpp:244]     Train net output #1: loss = 1.97296 (* 1 = 1.97296 loss)\nI0821 10:07:47.614128 32543 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0821 10:10:06.172200 32543 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 10:11:26.107530 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 10:11:26.107810 32543 solver.cpp:404]     Test net output #1: loss = 2.32296 (* 1 = 2.32296 loss)\nI0821 10:11:27.391374 32543 solver.cpp:228] Iteration 5500, loss = 2.30545\nI0821 10:11:27.391429 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:11:27.391449 32543 solver.cpp:244]     Train net output #1: loss = 2.30545 (* 1 = 2.30545 loss)\nI0821 10:11:27.515004 32543 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0821 10:13:46.264822 32543 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 10:15:06.316684 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 10:15:06.316975 32543 solver.cpp:404]     Test net output #1: loss = 2.32988 (* 1 = 2.32988 loss)\nI0821 10:15:07.608064 32543 solver.cpp:228] Iteration 5600, loss = 2.3193\nI0821 10:15:07.608121 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:15:07.608140 32543 solver.cpp:244]     Train net output #1: loss = 2.3193 (* 1 = 2.3193 loss)\nI0821 10:15:07.729540 32543 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0821 10:17:26.112855 32543 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 10:18:46.277396 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 10:18:46.277669 32543 solver.cpp:404]     Test net output #1: loss = 2.33024 (* 1 = 2.33024 loss)\nI0821 10:18:47.561092 32543 solver.cpp:228] Iteration 5700, loss = 2.3183\nI0821 10:18:47.561151 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:18:47.561169 32543 solver.cpp:244]     Train net output #1: loss = 2.3183 (* 1 = 2.3183 loss)\nI0821 10:18:47.691282 32543 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0821 10:21:06.053230 32543 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 10:22:26.107996 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 10:22:26.108276 32543 solver.cpp:404]     Test net output #1: loss = 2.32889 (* 1 = 2.32889 loss)\nI0821 10:22:27.390172 32543 solver.cpp:228] Iteration 5800, loss = 2.31707\nI0821 10:22:27.390226 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:22:27.390246 32543 solver.cpp:244]     Train net output #1: loss = 2.31707 (* 1 = 2.31707 loss)\nI0821 10:22:27.524423 32543 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0821 10:24:46.067430 32543 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 10:26:06.081454 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 10:26:06.081737 32543 solver.cpp:404]     Test net output #1: loss = 2.32691 (* 1 = 2.32691 loss)\nI0821 10:26:07.364086 32543 solver.cpp:228] Iteration 5900, loss = 2.31576\nI0821 10:26:07.364142 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:26:07.364162 32543 solver.cpp:244]     Train net output #1: loss = 2.31576 (* 1 = 2.31576 loss)\nI0821 10:26:07.498306 32543 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0821 10:28:26.254132 32543 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 10:29:46.296962 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 10:29:46.297206 32543 solver.cpp:404]     Test net output #1: loss = 2.32392 (* 1 = 2.32392 loss)\nI0821 10:29:47.579051 32543 solver.cpp:228] Iteration 6000, loss = 2.31467\nI0821 10:29:47.579105 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:29:47.579124 32543 solver.cpp:244]     Train net output #1: loss = 2.31467 (* 1 = 2.31467 loss)\nI0821 10:29:47.711771 32543 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0821 10:32:06.240134 32543 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 10:33:26.289364 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 10:33:26.289631 32543 solver.cpp:404]     Test net output #1: loss = 2.32155 (* 1 = 2.32155 loss)\nI0821 10:33:27.571776 32543 solver.cpp:228] Iteration 6100, loss = 2.31401\nI0821 10:33:27.571831 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:33:27.571851 32543 solver.cpp:244]     Train net output #1: loss = 2.31401 (* 1 = 2.31401 loss)\nI0821 10:33:27.699631 32543 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0821 10:35:46.041649 32543 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 10:37:06.097849 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 10:37:06.098122 32543 solver.cpp:404]     Test net output #1: loss = 2.31913 (* 1 = 2.31913 loss)\nI0821 10:37:07.380735 32543 solver.cpp:228] Iteration 6200, loss = 2.31382\nI0821 10:37:07.380789 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:37:07.380807 32543 solver.cpp:244]     Train net output #1: loss = 2.31382 (* 1 = 2.31382 loss)\nI0821 10:37:07.516568 32543 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0821 10:39:25.683193 32543 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 10:40:45.635143 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 10:40:45.635417 32543 solver.cpp:404]     Test net output #1: loss = 2.3176 (* 1 = 2.3176 loss)\nI0821 10:40:46.917400 32543 solver.cpp:228] Iteration 6300, loss = 2.31405\nI0821 10:40:46.917454 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:40:46.917472 32543 solver.cpp:244]     Train net output #1: loss = 2.31405 (* 1 = 2.31405 loss)\nI0821 10:40:47.044932 32543 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0821 10:43:05.645843 32543 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 10:44:25.698706 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 10:44:25.698977 32543 solver.cpp:404]     Test net output #1: loss = 2.31613 (* 1 = 2.31613 loss)\nI0821 10:44:26.980849 32543 solver.cpp:228] Iteration 6400, loss = 2.31465\nI0821 10:44:26.980904 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:44:26.980922 32543 solver.cpp:244]     Train net output #1: loss = 2.31465 (* 1 = 2.31465 loss)\nI0821 10:44:27.107455 32543 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0821 10:46:45.705287 32543 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 10:48:05.735980 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 10:48:05.736276 32543 solver.cpp:404]     Test net output #1: loss = 2.31523 (* 1 = 2.31523 loss)\nI0821 10:48:07.017895 32543 solver.cpp:228] Iteration 6500, loss = 2.31563\nI0821 10:48:07.017949 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:48:07.017968 32543 solver.cpp:244]     Train net output #1: loss = 2.31563 (* 1 = 2.31563 loss)\nI0821 10:48:07.146925 32543 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0821 10:50:25.757521 32543 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 10:51:45.811218 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 10:51:45.811517 32543 solver.cpp:404]     Test net output #1: loss = 2.31426 (* 1 = 2.31426 loss)\nI0821 10:51:47.093713 32543 solver.cpp:228] Iteration 6600, loss = 2.31705\nI0821 10:51:47.093767 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:51:47.093786 32543 solver.cpp:244]     Train net output #1: loss = 2.31705 (* 1 = 2.31705 loss)\nI0821 10:51:47.225317 32543 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0821 10:54:05.828680 32543 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 10:55:25.865247 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 10:55:25.865523 32543 solver.cpp:404]     Test net output #1: loss = 2.31348 (* 1 = 2.31348 loss)\nI0821 10:55:27.147917 32543 solver.cpp:228] Iteration 6700, loss = 2.31903\nI0821 10:55:27.147971 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:55:27.147990 32543 solver.cpp:244]     Train net output #1: loss = 2.31903 (* 1 = 2.31903 loss)\nI0821 10:55:27.278703 32543 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0821 10:57:45.858132 32543 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 10:59:05.907352 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 10:59:05.907622 32543 solver.cpp:404]     Test net output #1: loss = 2.31267 (* 1 = 2.31267 loss)\nI0821 10:59:07.189337 32543 solver.cpp:228] Iteration 6800, loss = 2.32169\nI0821 10:59:07.189390 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 10:59:07.189409 32543 solver.cpp:244]     Train net output #1: loss = 2.32169 (* 1 = 2.32169 loss)\nI0821 10:59:07.316720 32543 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0821 11:01:25.841020 32543 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 11:02:45.865031 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 11:02:45.865332 32543 solver.cpp:404]     Test net output #1: loss = 2.31189 (* 1 = 2.31189 loss)\nI0821 11:02:47.147519 32543 solver.cpp:228] Iteration 6900, loss = 2.32508\nI0821 11:02:47.147573 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 11:02:47.147593 32543 solver.cpp:244]     Train net output #1: loss = 2.32508 (* 1 = 2.32508 loss)\nI0821 11:02:47.280629 32543 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0821 11:05:05.999053 32543 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 11:06:26.060840 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 11:06:26.061131 32543 solver.cpp:404]     Test net output #1: loss = 2.3116 (* 1 = 2.3116 loss)\nI0821 11:06:27.343410 32543 solver.cpp:228] Iteration 7000, loss = 2.32894\nI0821 11:06:27.343464 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 11:06:27.343482 32543 solver.cpp:244]     Train net output #1: loss = 2.32894 (* 1 = 2.32894 loss)\nI0821 11:06:27.471673 32543 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0821 11:08:46.011462 32543 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 11:10:06.043681 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 11:10:06.043962 32543 solver.cpp:404]     Test net output #1: loss = 2.31133 (* 1 = 2.31133 loss)\nI0821 11:10:07.325827 32543 solver.cpp:228] Iteration 7100, loss = 2.33246\nI0821 11:10:07.325883 32543 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 11:10:07.325902 32543 solver.cpp:244]     Train net output #1: loss = 2.33246 (* 1 = 2.33246 loss)\nI0821 11:10:07.459867 32543 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0821 11:12:25.480191 32543 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 11:13:45.547399 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 11:13:45.547670 32543 solver.cpp:404]     Test net output #1: loss = 2.31161 (* 1 = 2.31161 loss)\nI0821 11:13:46.829478 32543 solver.cpp:228] Iteration 7200, loss = 2.33449\nI0821 11:13:46.829531 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:13:46.829551 32543 solver.cpp:244]     Train net output #1: loss = 2.33449 (* 1 = 2.33449 loss)\nI0821 11:13:46.956595 32543 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0821 11:16:05.006289 32543 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 11:17:25.064891 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 11:17:25.065181 32543 solver.cpp:404]     Test net output #1: loss = 2.31119 (* 1 = 2.31119 loss)\nI0821 11:17:26.346374 32543 solver.cpp:228] Iteration 7300, loss = 2.33448\nI0821 11:17:26.346428 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:17:26.346447 32543 solver.cpp:244]     Train net output #1: loss = 2.33448 (* 1 = 2.33448 loss)\nI0821 11:17:26.479377 32543 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0821 11:19:44.386379 32543 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 11:21:04.444016 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 11:21:04.444283 32543 solver.cpp:404]     Test net output #1: loss = 2.31111 (* 1 = 2.31111 loss)\nI0821 11:21:05.726316 32543 solver.cpp:228] Iteration 7400, loss = 2.33288\nI0821 11:21:05.726375 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:21:05.726394 32543 solver.cpp:244]     Train net output #1: loss = 2.33288 (* 1 = 2.33288 loss)\nI0821 11:21:05.852068 32543 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0821 11:23:23.683523 32543 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 11:24:43.717558 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 11:24:43.717831 32543 solver.cpp:404]     Test net output #1: loss = 2.31042 (* 1 = 2.31042 loss)\nI0821 11:24:44.999372 32543 solver.cpp:228] Iteration 7500, loss = 2.33063\nI0821 11:24:44.999426 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:24:44.999446 32543 solver.cpp:244]     Train net output #1: loss = 2.33063 (* 1 = 2.33063 loss)\nI0821 11:24:45.130128 32543 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0821 11:27:02.998185 32543 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 11:28:23.028245 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 11:28:23.028543 32543 solver.cpp:404]     Test net output #1: loss = 2.31043 (* 1 = 2.31043 loss)\nI0821 11:28:24.310520 32543 solver.cpp:228] Iteration 7600, loss = 2.32845\nI0821 11:28:24.310575 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:28:24.310592 32543 solver.cpp:244]     Train net output #1: loss = 2.32845 (* 1 = 2.32845 loss)\nI0821 11:28:24.440315 32543 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0821 11:30:42.513116 32543 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 11:32:02.555156 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 11:32:02.555387 32543 solver.cpp:404]     Test net output #1: loss = 2.31008 (* 1 = 2.31008 loss)\nI0821 11:32:03.837205 32543 solver.cpp:228] Iteration 7700, loss = 2.32668\nI0821 11:32:03.837256 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:32:03.837275 32543 solver.cpp:244]     Train net output #1: loss = 2.32668 (* 1 = 2.32668 loss)\nI0821 11:32:03.957974 32543 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0821 11:34:22.115555 32543 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 11:35:42.172385 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 11:35:42.172683 32543 solver.cpp:404]     Test net output #1: loss = 2.31058 (* 1 = 2.31058 loss)\nI0821 11:35:43.454355 32543 solver.cpp:228] Iteration 7800, loss = 2.32545\nI0821 11:35:43.454409 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:35:43.454429 32543 solver.cpp:244]     Train net output #1: loss = 2.32545 (* 1 = 2.32545 loss)\nI0821 11:35:43.665074 32543 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0821 11:38:01.837438 32543 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 11:39:21.896742 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 11:39:21.897017 32543 solver.cpp:404]     Test net output #1: loss = 2.31062 (* 1 = 2.31062 loss)\nI0821 11:39:23.178789 32543 solver.cpp:228] Iteration 7900, loss = 2.3247\nI0821 11:39:23.178843 32543 solver.cpp:244]     Train net output #0: accuracy = 0.048\nI0821 11:39:23.178861 32543 solver.cpp:244]     Train net output #1: loss = 2.3247 (* 1 = 2.3247 loss)\nI0821 11:39:23.309370 32543 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0821 11:41:41.324465 32543 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 11:43:01.387883 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 11:43:01.388177 32543 solver.cpp:404]     Test net output #1: loss = 2.3113 (* 1 = 2.3113 loss)\nI0821 11:43:02.669545 32543 solver.cpp:228] Iteration 8000, loss = 2.32412\nI0821 11:43:02.669600 32543 solver.cpp:244]     Train net output #0: accuracy = 0.048\nI0821 11:43:02.669618 32543 solver.cpp:244]     Train net output #1: loss = 2.32412 (* 1 = 2.32412 loss)\nI0821 11:43:02.798842 32543 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0821 11:45:20.683682 32543 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 11:46:40.770380 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 11:46:40.770685 32543 solver.cpp:404]     Test net output #1: loss = 2.31076 (* 1 = 2.31076 loss)\nI0821 11:46:42.052683 32543 solver.cpp:228] Iteration 8100, loss = 2.3231\nI0821 11:46:42.052737 32543 solver.cpp:244]     Train net output #0: accuracy = 0.048\nI0821 11:46:42.052755 32543 solver.cpp:244]     Train net output #1: loss = 2.3231 (* 1 = 2.3231 loss)\nI0821 11:46:42.184445 32543 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0821 11:49:00.164343 32543 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 11:50:20.251647 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 11:50:20.251933 32543 solver.cpp:404]     Test net output #1: loss = 2.31003 (* 1 = 2.31003 loss)\nI0821 11:50:21.534605 32543 solver.cpp:228] Iteration 8200, loss = 2.32103\nI0821 11:50:21.534658 32543 solver.cpp:244]     Train net output #0: accuracy = 0.048\nI0821 11:50:21.534677 32543 solver.cpp:244]     Train net output #1: loss = 2.32103 (* 1 = 2.32103 loss)\nI0821 11:50:21.661602 32543 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0821 11:52:39.566396 32543 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 11:53:59.642096 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 11:53:59.642403 32543 solver.cpp:404]     Test net output #1: loss = 2.30796 (* 1 = 2.30796 loss)\nI0821 11:54:00.924684 32543 solver.cpp:228] Iteration 8300, loss = 2.31807\nI0821 11:54:00.924737 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:54:00.924757 32543 solver.cpp:244]     Train net output #1: loss = 2.31807 (* 1 = 2.31807 loss)\nI0821 11:54:01.054349 32543 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0821 11:56:18.986207 32543 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 11:57:39.042291 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 11:57:39.042577 32543 solver.cpp:404]     Test net output #1: loss = 2.3066 (* 1 = 2.3066 loss)\nI0821 11:57:40.325276 32543 solver.cpp:228] Iteration 8400, loss = 2.31517\nI0821 11:57:40.325330 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 11:57:40.325348 32543 solver.cpp:244]     Train net output #1: loss = 2.31517 (* 1 = 2.31517 loss)\nI0821 11:57:40.453372 32543 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0821 11:59:58.605768 32543 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 12:01:18.671648 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 12:01:18.671932 32543 solver.cpp:404]     Test net output #1: loss = 2.30547 (* 1 = 2.30547 loss)\nI0821 12:01:19.953331 32543 solver.cpp:228] Iteration 8500, loss = 2.313\nI0821 12:01:19.953382 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:01:19.953400 32543 solver.cpp:244]     Train net output #1: loss = 2.313 (* 1 = 2.313 loss)\nI0821 12:01:20.080831 32543 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0821 12:03:38.006661 32543 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 12:04:58.067121 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 12:04:58.067404 32543 solver.cpp:404]     Test net output #1: loss = 2.30514 (* 1 = 2.30514 loss)\nI0821 12:04:59.349088 32543 solver.cpp:228] Iteration 8600, loss = 2.31157\nI0821 12:04:59.349138 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:04:59.349156 32543 solver.cpp:244]     Train net output #1: loss = 2.31157 (* 1 = 2.31157 loss)\nI0821 12:04:59.476284 32543 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0821 12:07:17.323619 32543 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 12:08:37.385702 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 12:08:37.385982 32543 solver.cpp:404]     Test net output #1: loss = 2.30499 (* 1 = 2.30499 loss)\nI0821 12:08:38.667701 32543 solver.cpp:228] Iteration 8700, loss = 2.31069\nI0821 12:08:38.667753 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:08:38.667773 32543 solver.cpp:244]     Train net output #1: loss = 2.31069 (* 1 = 2.31069 loss)\nI0821 12:08:38.792263 32543 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0821 12:10:57.082808 32543 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 12:12:17.139849 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 12:12:17.140141 32543 solver.cpp:404]     Test net output #1: loss = 2.30503 (* 1 = 2.30503 loss)\nI0821 12:12:18.422718 32543 solver.cpp:228] Iteration 8800, loss = 2.31027\nI0821 12:12:18.422770 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:12:18.422788 32543 solver.cpp:244]     Train net output #1: loss = 2.31027 (* 1 = 2.31027 loss)\nI0821 12:12:18.546087 32543 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0821 12:14:36.468371 32543 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 12:15:56.526211 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 12:15:56.526521 32543 solver.cpp:404]     Test net output #1: loss = 2.30507 (* 1 = 2.30507 loss)\nI0821 12:15:57.808392 32543 solver.cpp:228] Iteration 8900, loss = 2.31037\nI0821 12:15:57.808441 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:15:57.808459 32543 solver.cpp:244]     Train net output #1: loss = 2.31037 (* 1 = 2.31037 loss)\nI0821 12:15:57.933411 32543 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0821 12:18:15.921671 32543 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 12:19:35.965235 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 12:19:35.965533 32543 solver.cpp:404]     Test net output #1: loss = 2.30505 (* 1 = 2.30505 loss)\nI0821 12:19:37.247097 32543 solver.cpp:228] Iteration 9000, loss = 2.31108\nI0821 12:19:37.247145 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:19:37.247164 32543 solver.cpp:244]     Train net output #1: loss = 2.31108 (* 1 = 2.31108 loss)\nI0821 12:19:37.371963 32543 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0821 12:21:55.169829 32543 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 12:23:15.209771 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 12:23:15.210072 32543 solver.cpp:404]     Test net output #1: loss = 2.30485 (* 1 = 2.30485 loss)\nI0821 12:23:16.491740 32543 solver.cpp:228] Iteration 9100, loss = 2.31227\nI0821 12:23:16.491791 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:23:16.491808 32543 solver.cpp:244]     Train net output #1: loss = 2.31227 (* 1 = 2.31227 loss)\nI0821 12:23:16.617903 32543 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0821 12:25:34.357282 32543 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 12:26:54.419978 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 12:26:54.420261 32543 solver.cpp:404]     Test net output #1: loss = 2.3047 (* 1 = 2.3047 loss)\nI0821 12:26:55.702595 32543 solver.cpp:228] Iteration 9200, loss = 2.31333\nI0821 12:26:55.702644 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:26:55.702663 32543 solver.cpp:244]     Train net output #1: loss = 2.31333 (* 1 = 2.31333 loss)\nI0821 12:26:55.829967 32543 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0821 12:29:13.568233 32543 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 12:30:33.608034 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 12:30:33.608342 32543 solver.cpp:404]     Test net output #1: loss = 2.30434 (* 1 = 2.30434 loss)\nI0821 12:30:34.890633 32543 solver.cpp:228] Iteration 9300, loss = 2.3139\nI0821 12:30:34.890681 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:30:34.890700 32543 solver.cpp:244]     Train net output #1: loss = 2.3139 (* 1 = 2.3139 loss)\nI0821 12:30:35.021076 32543 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0821 12:32:52.672297 32543 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 12:34:12.704466 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 12:34:12.704756 32543 solver.cpp:404]     Test net output #1: loss = 2.30439 (* 1 = 2.30439 loss)\nI0821 12:34:13.986876 32543 solver.cpp:228] Iteration 9400, loss = 2.31424\nI0821 12:34:13.986927 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:34:13.986944 32543 solver.cpp:244]     Train net output #1: loss = 2.31424 (* 1 = 2.31424 loss)\nI0821 12:34:14.116559 32543 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0821 12:36:31.843463 32543 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 12:37:51.901018 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 12:37:51.901332 32543 solver.cpp:404]     Test net output #1: loss = 2.30424 (* 1 = 2.30424 loss)\nI0821 12:37:53.183482 32543 solver.cpp:228] Iteration 9500, loss = 2.31436\nI0821 12:37:53.183532 32543 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0821 12:37:53.183552 32543 solver.cpp:244]     Train net output #1: loss = 2.31436 (* 1 = 2.31436 loss)\nI0821 12:37:53.306231 32543 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0821 12:40:11.178454 32543 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 12:41:31.212810 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 12:41:31.213101 32543 solver.cpp:404]     Test net output #1: loss = 2.30418 (* 1 = 2.30418 loss)\nI0821 12:41:32.494401 32543 solver.cpp:228] Iteration 9600, loss = 2.31337\nI0821 12:41:32.494454 32543 solver.cpp:244]     Train net output #0: accuracy = 0.048\nI0821 12:41:32.494473 32543 solver.cpp:244]     Train net output #1: loss = 2.31337 (* 1 = 2.31337 loss)\nI0821 12:41:32.624858 32543 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0821 12:43:50.450800 32543 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 12:45:10.497848 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 12:45:10.498157 32543 solver.cpp:404]     Test net output #1: loss = 2.3035 (* 1 = 2.3035 loss)\nI0821 12:45:11.780062 32543 solver.cpp:228] Iteration 9700, loss = 2.30992\nI0821 12:45:11.780114 32543 solver.cpp:244]     Train net output #0: accuracy = 0.048\nI0821 12:45:11.780133 32543 solver.cpp:244]     Train net output #1: loss = 2.30992 (* 1 = 2.30992 loss)\nI0821 12:45:11.910694 32543 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0821 12:47:29.704815 32543 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 12:48:49.749959 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 12:48:49.750267 32543 solver.cpp:404]     Test net output #1: loss = 2.30303 (* 1 = 2.30303 loss)\nI0821 12:48:51.032873 32543 solver.cpp:228] Iteration 9800, loss = 2.30584\nI0821 12:48:51.032927 32543 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0821 12:48:51.032946 32543 solver.cpp:244]     Train net output #1: loss = 2.30584 (* 1 = 2.30584 loss)\nI0821 12:48:51.163260 32543 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0821 12:51:09.120591 32543 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 12:52:29.173310 32543 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 12:52:29.173609 32543 solver.cpp:404]     Test net output #1: loss = 2.30277 (* 1 = 2.30277 loss)\nI0821 12:52:30.455652 32543 solver.cpp:228] Iteration 9900, loss = 2.30446\nI0821 12:52:30.455708 32543 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0821 12:52:30.455726 32543 solver.cpp:244]     Train net output #1: loss = 2.30446 (* 1 = 2.30446 loss)\nI0821 12:52:30.586414 32543 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0821 12:54:48.458139 32543 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kWD-3Fig11_iter_10000.caffemodel\nI0821 12:54:48.678573 32543 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kWD-3Fig11_iter_10000.solverstate\nI0821 12:54:49.112004 32543 solver.cpp:317] Iteration 10000, loss = 2.3046\nI0821 12:54:49.112051 32543 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 12:56:09.157970 32543 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 12:56:09.158306 32543 solver.cpp:404]     Test net output #1: loss = 2.3028 (* 1 = 2.3028 loss)\nI0821 12:56:09.158319 32543 solver.cpp:322] Optimization Done.\nI0821 12:56:14.462976 32543 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kWD-5Fig11",
    "content": "I0821 06:49:14.501749 32405 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 06:49:14.504400 32405 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 06:49:14.505616 32405 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 06:49:14.506826 32405 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 06:49:14.508045 32405 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 06:49:14.509275 32405 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 06:49:14.510669 32405 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 06:49:14.511899 32405 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 06:49:14.513134 32405 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 06:49:14.931164 32405 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 1e-05\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kWD-5Fig11\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0821 06:49:14.935619 32405 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 06:49:14.948756 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:14.948837 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:14.949975 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 06:49:14.950036 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 06:49:14.950064 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:49:14.950086 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:49:14.950105 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:49:14.950124 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:49:14.950140 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:49:14.950160 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:49:14.950179 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:49:14.950196 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:49:14.950215 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:49:14.950230 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:49:14.950250 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:49:14.950269 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:49:14.950289 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:49:14.950306 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:49:14.950323 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:49:14.950340 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:49:14.950359 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:49:14.950377 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:49:14.950412 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:49:14.950430 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:49:14.950455 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:49:14.950474 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:49:14.950492 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:49:14.950506 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:49:14.950525 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:49:14.950541 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:49:14.950559 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:49:14.950577 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:49:14.950597 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:49:14.950614 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:49:14.950634 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:49:14.950650 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:49:14.950670 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:49:14.950688 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:49:14.950707 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:49:14.950724 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:49:14.950742 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:49:14.950759 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:49:14.950783 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:49:14.950801 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:49:14.950819 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:49:14.950837 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:49:14.950857 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:49:14.950886 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:49:14.950906 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:49:14.950922 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:49:14.950940 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:49:14.950956 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:49:14.950974 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:49:14.951006 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:49:14.951027 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:49:14.951045 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:49:14.951063 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:49:14.951079 32405 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:49:14.952821 32405 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0821 06:49:14.955097 32405 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:49:14.960546 32405 net.cpp:100] Creating Layer dataLayer\nI0821 06:49:14.960624 32405 net.cpp:408] dataLayer -> data_top\nI0821 06:49:14.960829 32405 net.cpp:408] dataLayer -> label\nI0821 06:49:14.960963 32405 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:49:15.007768 32410 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0821 06:49:15.039824 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:15.046919 32405 net.cpp:150] Setting up dataLayer\nI0821 06:49:15.046990 32405 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:49:15.047004 32405 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:15.047010 32405 net.cpp:165] Memory required for data: 1536500\nI0821 06:49:15.047026 32405 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:49:15.047044 32405 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:49:15.047052 32405 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:49:15.047072 32405 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:49:15.047087 32405 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:49:15.047161 32405 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:49:15.047174 32405 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:15.047181 32405 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:15.047185 32405 net.cpp:165] Memory required for data: 1537500\nI0821 06:49:15.047191 32405 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:49:15.047257 32405 net.cpp:100] Creating Layer pre_conv\nI0821 06:49:15.047269 32405 net.cpp:434] pre_conv <- data_top\nI0821 06:49:15.047282 32405 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:49:15.049120 32405 net.cpp:150] Setting up pre_conv\nI0821 06:49:15.049140 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.049146 32405 net.cpp:165] Memory required for data: 9729500\nI0821 06:49:15.049217 32405 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:49:15.049295 32405 net.cpp:100] Creating Layer pre_bn\nI0821 06:49:15.049309 32405 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:49:15.049319 32405 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:49:15.049429 32411 blocking_queue.cpp:50] Waiting for data\nI0821 06:49:15.049654 32405 net.cpp:150] Setting up pre_bn\nI0821 06:49:15.049670 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.049676 32405 net.cpp:165] Memory required for data: 17921500\nI0821 06:49:15.049695 32405 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:49:15.049744 32405 net.cpp:100] Creating Layer pre_scale\nI0821 06:49:15.049754 32405 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:49:15.049762 32405 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:49:15.049950 32405 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:49:15.058204 32405 net.cpp:150] Setting up pre_scale\nI0821 06:49:15.058223 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.058228 32405 net.cpp:165] Memory required for data: 26113500\nI0821 06:49:15.058238 32405 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:49:15.058286 32405 net.cpp:100] Creating Layer pre_relu\nI0821 06:49:15.058295 32405 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:49:15.058307 32405 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:49:15.058318 32405 net.cpp:150] Setting up pre_relu\nI0821 06:49:15.058326 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.058331 32405 net.cpp:165] Memory required for data: 34305500\nI0821 06:49:15.058336 32405 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:49:15.058343 32405 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:49:15.058348 32405 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:49:15.058358 32405 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:49:15.058368 32405 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:49:15.058414 32405 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:49:15.058428 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.058434 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.058439 32405 net.cpp:165] Memory required for data: 50689500\nI0821 06:49:15.058444 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:49:15.058459 32405 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:49:15.058465 32405 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:49:15.058473 32405 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:49:15.058797 32405 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:49:15.058812 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.058818 32405 net.cpp:165] Memory required for data: 58881500\nI0821 06:49:15.058830 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:49:15.058845 32405 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:49:15.058851 32405 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:49:15.058866 32405 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:49:15.059095 32405 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:49:15.059108 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.059113 32405 net.cpp:165] Memory required for data: 67073500\nI0821 06:49:15.059123 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:49:15.059136 32405 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:49:15.059141 32405 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:49:15.059149 32405 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.059203 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:49:15.059342 32405 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:49:15.059355 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.059360 32405 net.cpp:165] Memory required for data: 75265500\nI0821 06:49:15.059368 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:49:15.059384 32405 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:49:15.059391 32405 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:49:15.059401 32405 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.059411 32405 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:49:15.059417 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.059422 32405 net.cpp:165] Memory required for data: 83457500\nI0821 06:49:15.059427 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:49:15.059442 32405 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:49:15.059448 32405 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:49:15.059456 32405 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:49:15.059762 32405 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:49:15.059777 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.059782 32405 net.cpp:165] Memory required for data: 91649500\nI0821 06:49:15.059790 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:49:15.059803 32405 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:49:15.059809 32405 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:49:15.059818 32405 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:49:15.060053 32405 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:49:15.060066 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.060071 32405 net.cpp:165] Memory required for data: 99841500\nI0821 06:49:15.060086 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:49:15.060098 32405 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:49:15.060104 32405 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:49:15.060112 32405 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:49:15.060168 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:49:15.060307 32405 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:49:15.060320 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.060325 32405 net.cpp:165] Memory required for data: 108033500\nI0821 06:49:15.060334 32405 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:49:15.060389 32405 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:49:15.060401 32405 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:49:15.060410 32405 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:49:15.060420 32405 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:49:15.060500 32405 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:49:15.060515 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.060520 32405 net.cpp:165] Memory required for data: 116225500\nI0821 06:49:15.060525 32405 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:49:15.060534 32405 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:49:15.060539 32405 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:49:15.060550 32405 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:49:15.060560 32405 net.cpp:150] Setting up L1_b1_relu\nI0821 06:49:15.060567 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.060571 32405 net.cpp:165] Memory required for data: 124417500\nI0821 06:49:15.060576 32405 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:15.060585 32405 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:15.060590 32405 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:49:15.060598 32405 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:49:15.060607 32405 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:49:15.060652 32405 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:15.060663 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.060670 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.060683 32405 net.cpp:165] Memory required for data: 140801500\nI0821 06:49:15.060688 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:49:15.060699 32405 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:49:15.060705 32405 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:49:15.060716 32405 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:49:15.061033 32405 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:49:15.061048 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.061053 32405 net.cpp:165] Memory required for data: 148993500\nI0821 06:49:15.061061 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:49:15.061071 32405 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:49:15.061077 32405 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:49:15.061089 32405 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:49:15.061331 32405 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:49:15.061347 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.061352 32405 net.cpp:165] Memory required for data: 157185500\nI0821 06:49:15.061363 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:49:15.061372 32405 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:49:15.061378 32405 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:49:15.061385 32405 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.061437 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:49:15.061578 32405 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:49:15.061590 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.061595 32405 net.cpp:165] Memory required for data: 165377500\nI0821 06:49:15.061604 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:49:15.061615 32405 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:49:15.061621 32405 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:49:15.061628 32405 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.061640 32405 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:49:15.061647 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.061651 32405 net.cpp:165] Memory required for data: 173569500\nI0821 06:49:15.061656 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:49:15.061667 32405 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:49:15.061673 32405 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:49:15.061684 32405 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:49:15.061995 32405 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:49:15.062007 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.062012 32405 net.cpp:165] Memory required for data: 181761500\nI0821 06:49:15.062021 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:49:15.062031 32405 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:49:15.062036 32405 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:49:15.062047 32405 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:49:15.062284 32405 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:49:15.062297 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.062302 32405 net.cpp:165] Memory required for data: 189953500\nI0821 06:49:15.062320 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:49:15.062330 32405 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:49:15.062335 32405 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:49:15.062342 32405 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:49:15.062397 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:49:15.062537 32405 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:49:15.062549 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.062554 32405 net.cpp:165] Memory required for data: 198145500\nI0821 06:49:15.062563 32405 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:49:15.062582 32405 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:49:15.062588 32405 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:49:15.062595 32405 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:49:15.062603 32405 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:49:15.062636 32405 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:49:15.062649 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.062654 32405 net.cpp:165] Memory required for data: 206337500\nI0821 06:49:15.062659 32405 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:49:15.062665 32405 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:49:15.062671 32405 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:49:15.062678 32405 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:49:15.062687 32405 net.cpp:150] Setting up L1_b2_relu\nI0821 06:49:15.062695 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.062700 32405 net.cpp:165] Memory required for data: 214529500\nI0821 06:49:15.062703 32405 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:15.062711 32405 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:15.062716 32405 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:49:15.062726 32405 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:49:15.062736 32405 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:49:15.062777 32405 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:15.062788 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.062794 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.062798 32405 net.cpp:165] Memory required for data: 230913500\nI0821 06:49:15.062803 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:49:15.062818 32405 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:49:15.062824 32405 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:49:15.062832 32405 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:49:15.063140 32405 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:49:15.063154 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.063159 32405 net.cpp:165] Memory required for data: 239105500\nI0821 06:49:15.063169 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:49:15.063182 32405 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:49:15.063189 32405 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:49:15.063199 32405 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:49:15.063431 32405 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:49:15.063443 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.063448 32405 net.cpp:165] Memory required for data: 247297500\nI0821 06:49:15.063458 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:49:15.063467 32405 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:49:15.063473 32405 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:49:15.063483 32405 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.063534 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:49:15.063670 32405 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:49:15.063683 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.063688 32405 net.cpp:165] Memory required for data: 255489500\nI0821 06:49:15.063697 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:49:15.063707 32405 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:49:15.063714 32405 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:49:15.063720 32405 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.063730 32405 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:49:15.063743 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.063748 32405 net.cpp:165] Memory required for data: 263681500\nI0821 06:49:15.063753 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:49:15.063767 32405 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:49:15.063773 32405 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:49:15.063784 32405 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:49:15.064100 32405 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:49:15.064113 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.064119 32405 net.cpp:165] Memory required for data: 271873500\nI0821 06:49:15.064127 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:49:15.064144 32405 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:49:15.064152 32405 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:49:15.064159 32405 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:49:15.064390 32405 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:49:15.064404 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.064409 32405 net.cpp:165] Memory required for data: 280065500\nI0821 06:49:15.064419 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:49:15.064427 32405 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:49:15.064433 32405 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:49:15.064443 32405 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:49:15.064496 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:49:15.064630 32405 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:49:15.064646 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.064651 32405 net.cpp:165] Memory required for data: 288257500\nI0821 06:49:15.064661 32405 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:49:15.064669 32405 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:49:15.064674 32405 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:49:15.064682 32405 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:49:15.064689 32405 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:49:15.064723 32405 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:49:15.064731 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.064735 32405 net.cpp:165] Memory required for data: 296449500\nI0821 06:49:15.064741 32405 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:49:15.064751 32405 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:49:15.064756 32405 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:49:15.064764 32405 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:49:15.064774 32405 net.cpp:150] Setting up L1_b3_relu\nI0821 06:49:15.064780 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.064785 32405 net.cpp:165] Memory required for data: 304641500\nI0821 06:49:15.064790 32405 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:15.064796 32405 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:15.064801 32405 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:49:15.064811 32405 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:49:15.064821 32405 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:49:15.064867 32405 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:15.064883 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.064890 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.064894 32405 net.cpp:165] Memory required for data: 321025500\nI0821 06:49:15.064899 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:49:15.064910 32405 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:49:15.064916 32405 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:49:15.064931 32405 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:49:15.065243 32405 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:49:15.065256 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.065261 32405 net.cpp:165] Memory required for data: 329217500\nI0821 06:49:15.065270 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:49:15.065282 32405 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:49:15.065289 32405 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:49:15.065296 32405 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:49:15.065532 32405 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:49:15.065546 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.065549 32405 net.cpp:165] Memory required for data: 337409500\nI0821 06:49:15.065560 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:49:15.065569 32405 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:49:15.065575 32405 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:49:15.065587 32405 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.065639 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:49:15.065778 32405 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:49:15.065793 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.065798 32405 net.cpp:165] Memory required for data: 345601500\nI0821 06:49:15.065807 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:49:15.065815 32405 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:49:15.065820 32405 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:49:15.065829 32405 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.065837 32405 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:49:15.065845 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.065848 32405 net.cpp:165] Memory required for data: 353793500\nI0821 06:49:15.065853 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:49:15.065873 32405 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:49:15.065881 32405 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:49:15.065891 32405 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:49:15.066200 32405 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:49:15.066215 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.066218 32405 net.cpp:165] Memory required for data: 361985500\nI0821 06:49:15.066227 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:49:15.066238 32405 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:49:15.066246 32405 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:49:15.066256 32405 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:49:15.066495 32405 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:49:15.066509 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.066514 32405 net.cpp:165] Memory required for data: 370177500\nI0821 06:49:15.066524 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:49:15.066535 32405 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:49:15.066541 32405 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:49:15.066548 32405 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:49:15.066601 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:49:15.066738 32405 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:49:15.066751 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.066756 32405 net.cpp:165] Memory required for data: 378369500\nI0821 06:49:15.066766 32405 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:49:15.066773 32405 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:49:15.066779 32405 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:49:15.066786 32405 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:49:15.066799 32405 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:49:15.066838 32405 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:49:15.066850 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.066855 32405 net.cpp:165] Memory required for data: 386561500\nI0821 06:49:15.066866 32405 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:49:15.066875 32405 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:49:15.066880 32405 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:49:15.066889 32405 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:49:15.066897 32405 net.cpp:150] Setting up L1_b4_relu\nI0821 06:49:15.066905 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.066910 32405 net.cpp:165] Memory required for data: 394753500\nI0821 06:49:15.066915 32405 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:15.066926 32405 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:15.066931 32405 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:49:15.066941 32405 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:49:15.066949 32405 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:49:15.066994 32405 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:15.067008 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.067013 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.067018 32405 net.cpp:165] Memory required for data: 411137500\nI0821 06:49:15.067023 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:49:15.067039 32405 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:49:15.067045 32405 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:49:15.067057 32405 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:49:15.067369 32405 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:49:15.067383 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.067387 32405 net.cpp:165] Memory required for data: 419329500\nI0821 06:49:15.067406 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:49:15.067420 32405 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:49:15.067425 32405 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:49:15.067433 32405 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:49:15.067670 32405 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:49:15.067683 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.067688 32405 net.cpp:165] Memory required for data: 427521500\nI0821 06:49:15.067698 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:49:15.067708 32405 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:49:15.067713 32405 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:49:15.067723 32405 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.067775 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:49:15.067925 32405 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:49:15.067939 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.067944 32405 net.cpp:165] Memory required for data: 435713500\nI0821 06:49:15.067952 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:49:15.067960 32405 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:49:15.067966 32405 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:49:15.067973 32405 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.067983 32405 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:49:15.067989 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.067994 32405 net.cpp:165] Memory required for data: 443905500\nI0821 06:49:15.067998 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:49:15.068012 32405 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:49:15.068018 32405 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:49:15.068035 32405 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:49:15.068349 32405 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:49:15.068363 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.068368 32405 net.cpp:165] Memory required for data: 452097500\nI0821 06:49:15.068377 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:49:15.068390 32405 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:49:15.068397 32405 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:49:15.068408 32405 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:49:15.068640 32405 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:49:15.068653 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.068658 32405 net.cpp:165] Memory required for data: 460289500\nI0821 06:49:15.068668 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:49:15.068677 32405 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:49:15.068683 32405 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:49:15.068693 32405 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:49:15.068745 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:49:15.068888 32405 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:49:15.068907 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.068912 32405 net.cpp:165] Memory required for data: 468481500\nI0821 06:49:15.068920 32405 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:49:15.068929 32405 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:49:15.068935 32405 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:49:15.068943 32405 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:49:15.068950 32405 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:49:15.068982 32405 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:49:15.068994 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.069000 32405 net.cpp:165] Memory required for data: 476673500\nI0821 06:49:15.069005 32405 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:49:15.069015 32405 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:49:15.069020 32405 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:49:15.069027 32405 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:49:15.069036 32405 net.cpp:150] Setting up L1_b5_relu\nI0821 06:49:15.069043 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.069048 32405 net.cpp:165] Memory required for data: 484865500\nI0821 06:49:15.069052 32405 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:15.069059 32405 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:15.069064 32405 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:49:15.069074 32405 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:49:15.069084 32405 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:49:15.069125 32405 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:15.069139 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.069146 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.069151 32405 net.cpp:165] Memory required for data: 501249500\nI0821 06:49:15.069156 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:49:15.069166 32405 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:49:15.069172 32405 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:49:15.069181 32405 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:49:15.069490 32405 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:49:15.069504 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.069509 32405 net.cpp:165] Memory required for data: 509441500\nI0821 06:49:15.069525 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:49:15.069536 32405 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:49:15.069542 32405 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:49:15.069550 32405 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:49:15.069788 32405 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:49:15.069802 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.069806 32405 net.cpp:165] Memory required for data: 517633500\nI0821 06:49:15.069816 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:49:15.069825 32405 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:49:15.069831 32405 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:49:15.069841 32405 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.069900 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:49:15.070050 32405 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:49:15.070066 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.070071 32405 net.cpp:165] Memory required for data: 525825500\nI0821 06:49:15.070080 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:49:15.070088 32405 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:49:15.070093 32405 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:49:15.070101 32405 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.070111 32405 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:49:15.070117 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.070122 32405 net.cpp:165] Memory required for data: 534017500\nI0821 06:49:15.070127 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:49:15.070140 32405 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:49:15.070147 32405 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:49:15.070157 32405 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:49:15.070472 32405 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:49:15.070485 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.070490 32405 net.cpp:165] Memory required for data: 542209500\nI0821 06:49:15.070499 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:49:15.070510 32405 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:49:15.070518 32405 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:49:15.070528 32405 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:49:15.070762 32405 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:49:15.070775 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.070780 32405 net.cpp:165] Memory required for data: 550401500\nI0821 06:49:15.070791 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:49:15.070798 32405 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:49:15.070804 32405 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:49:15.070812 32405 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:49:15.070874 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:49:15.071015 32405 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:49:15.071028 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.071033 32405 net.cpp:165] Memory required for data: 558593500\nI0821 06:49:15.071043 32405 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:49:15.071061 32405 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:49:15.071068 32405 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:49:15.071074 32405 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:49:15.071082 32405 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:49:15.071120 32405 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:49:15.071131 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.071136 32405 net.cpp:165] Memory required for data: 566785500\nI0821 06:49:15.071141 32405 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:49:15.071157 32405 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:49:15.071163 32405 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:49:15.071173 32405 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:49:15.071183 32405 net.cpp:150] Setting up L1_b6_relu\nI0821 06:49:15.071190 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.071194 32405 net.cpp:165] Memory required for data: 574977500\nI0821 06:49:15.071199 32405 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:15.071207 32405 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:15.071211 32405 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:49:15.071218 32405 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:49:15.071228 32405 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:49:15.071274 32405 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:15.071285 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.071291 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.071296 32405 net.cpp:165] Memory required for data: 591361500\nI0821 06:49:15.071301 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:49:15.071312 32405 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:49:15.071318 32405 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:49:15.071329 32405 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:49:15.071642 32405 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:49:15.071656 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.071661 32405 net.cpp:165] Memory required for data: 599553500\nI0821 06:49:15.071671 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:49:15.071678 32405 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:49:15.071684 32405 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:49:15.071697 32405 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:49:15.071947 32405 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:49:15.071961 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.071966 32405 net.cpp:165] Memory required for data: 607745500\nI0821 06:49:15.071976 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:49:15.071988 32405 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:49:15.071995 32405 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:49:15.072002 32405 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.072054 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:49:15.072193 32405 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:49:15.072206 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.072211 32405 net.cpp:165] Memory required for data: 615937500\nI0821 06:49:15.072244 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:49:15.072257 32405 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:49:15.072263 32405 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:49:15.072273 32405 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.072283 32405 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:49:15.072290 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.072294 32405 net.cpp:165] Memory required for data: 624129500\nI0821 06:49:15.072301 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:49:15.072316 32405 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:49:15.072322 32405 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:49:15.072332 32405 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:49:15.072644 32405 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:49:15.072659 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.072664 32405 net.cpp:165] Memory required for data: 632321500\nI0821 06:49:15.072686 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:49:15.072696 32405 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:49:15.072702 32405 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:49:15.072710 32405 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:49:15.072963 32405 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:49:15.072978 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.072983 32405 net.cpp:165] Memory required for data: 640513500\nI0821 06:49:15.072993 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:49:15.073004 32405 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:49:15.073010 32405 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:49:15.073019 32405 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:49:15.073073 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:49:15.073215 32405 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:49:15.073227 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.073232 32405 net.cpp:165] Memory required for data: 648705500\nI0821 06:49:15.073241 32405 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:49:15.073249 32405 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:49:15.073256 32405 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:49:15.073262 32405 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:49:15.073276 32405 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:49:15.073307 32405 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:49:15.073321 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.073328 32405 net.cpp:165] Memory required for data: 656897500\nI0821 06:49:15.073333 32405 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:49:15.073340 32405 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:49:15.073345 32405 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:49:15.073352 32405 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:49:15.073361 32405 net.cpp:150] Setting up L1_b7_relu\nI0821 06:49:15.073369 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.073374 32405 net.cpp:165] Memory required for data: 665089500\nI0821 06:49:15.073377 32405 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:15.073387 32405 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:15.073393 32405 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:49:15.073400 32405 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:49:15.073410 32405 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:49:15.073456 32405 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:15.073467 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.073472 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.073477 32405 net.cpp:165] Memory required for data: 681473500\nI0821 06:49:15.073482 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:49:15.073493 32405 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:49:15.073499 32405 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:49:15.073510 32405 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:49:15.073827 32405 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:49:15.073840 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.073845 32405 net.cpp:165] Memory required for data: 689665500\nI0821 06:49:15.073854 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:49:15.073868 32405 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:49:15.073875 32405 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:49:15.073884 32405 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:49:15.074138 32405 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:49:15.074151 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.074156 32405 net.cpp:165] Memory required for data: 697857500\nI0821 06:49:15.074167 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:49:15.074182 32405 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:49:15.074187 32405 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:49:15.074195 32405 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.074251 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:49:15.074395 32405 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:49:15.074409 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.074414 32405 net.cpp:165] Memory required for data: 706049500\nI0821 06:49:15.074422 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:49:15.074430 32405 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:49:15.074436 32405 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:49:15.074446 32405 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.074456 32405 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:49:15.074463 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.074467 32405 net.cpp:165] Memory required for data: 714241500\nI0821 06:49:15.074472 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:49:15.074486 32405 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:49:15.074492 32405 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:49:15.074501 32405 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:49:15.074848 32405 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:49:15.074869 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.074875 32405 net.cpp:165] Memory required for data: 722433500\nI0821 06:49:15.074884 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:49:15.074897 32405 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:49:15.074903 32405 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:49:15.074910 32405 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:49:15.075168 32405 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:49:15.075181 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.075186 32405 net.cpp:165] Memory required for data: 730625500\nI0821 06:49:15.075197 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:49:15.075206 32405 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:49:15.075212 32405 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:49:15.075223 32405 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:49:15.075276 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:49:15.075417 32405 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:49:15.075430 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.075435 32405 net.cpp:165] Memory required for data: 738817500\nI0821 06:49:15.075443 32405 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:49:15.075453 32405 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:49:15.075459 32405 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:49:15.075465 32405 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:49:15.075475 32405 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:49:15.075507 32405 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:49:15.075520 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.075525 32405 net.cpp:165] Memory required for data: 747009500\nI0821 06:49:15.075529 32405 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:49:15.075537 32405 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:49:15.075542 32405 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:49:15.075549 32405 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:49:15.075558 32405 net.cpp:150] Setting up L1_b8_relu\nI0821 06:49:15.075565 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.075577 32405 net.cpp:165] Memory required for data: 755201500\nI0821 06:49:15.075582 32405 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:15.075594 32405 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:15.075600 32405 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:49:15.075608 32405 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:49:15.075618 32405 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:49:15.075664 32405 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:15.075675 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.075681 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.075686 32405 net.cpp:165] Memory required for data: 771585500\nI0821 06:49:15.075691 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:49:15.075702 32405 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:49:15.075708 32405 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:49:15.075721 32405 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:49:15.076051 32405 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:49:15.076066 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.076071 32405 net.cpp:165] Memory required for data: 779777500\nI0821 06:49:15.076081 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:49:15.076092 32405 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:49:15.076098 32405 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:49:15.076107 32405 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:49:15.076350 32405 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:49:15.076362 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.076367 32405 net.cpp:165] Memory required for data: 787969500\nI0821 06:49:15.076377 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:49:15.076386 32405 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:49:15.076392 32405 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:49:15.076400 32405 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.076454 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:49:15.076598 32405 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:49:15.076611 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.076617 32405 net.cpp:165] Memory required for data: 796161500\nI0821 06:49:15.076625 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:49:15.076633 32405 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:49:15.076638 32405 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:49:15.076649 32405 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.076658 32405 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:49:15.076665 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.076670 32405 net.cpp:165] Memory required for data: 804353500\nI0821 06:49:15.076675 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:49:15.076685 32405 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:49:15.076691 32405 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:49:15.076702 32405 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:49:15.077031 32405 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:49:15.077045 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.077050 32405 net.cpp:165] Memory required for data: 812545500\nI0821 06:49:15.077059 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:49:15.077067 32405 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:49:15.077074 32405 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:49:15.077085 32405 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:49:15.077337 32405 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:49:15.077353 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.077358 32405 net.cpp:165] Memory required for data: 820737500\nI0821 06:49:15.077385 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:49:15.077397 32405 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:49:15.077404 32405 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:49:15.077411 32405 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:49:15.077466 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:49:15.077605 32405 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:49:15.077616 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.077621 32405 net.cpp:165] Memory required for data: 828929500\nI0821 06:49:15.077630 32405 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:49:15.077639 32405 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:49:15.077646 32405 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:49:15.077651 32405 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:49:15.077662 32405 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:49:15.077693 32405 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:49:15.077702 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.077708 32405 net.cpp:165] Memory required for data: 837121500\nI0821 06:49:15.077713 32405 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:49:15.077720 32405 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:49:15.077725 32405 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:49:15.077735 32405 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:49:15.077745 32405 net.cpp:150] Setting up L1_b9_relu\nI0821 06:49:15.077751 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.077756 32405 net.cpp:165] Memory required for data: 845313500\nI0821 06:49:15.077761 32405 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:15.077767 32405 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:15.077774 32405 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:49:15.077785 32405 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:49:15.077795 32405 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:49:15.077838 32405 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:15.077850 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.077857 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.077867 32405 net.cpp:165] Memory required for data: 861697500\nI0821 06:49:15.077872 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:49:15.077884 32405 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:49:15.077890 32405 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:49:15.077901 32405 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:49:15.078217 32405 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:49:15.078230 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.078235 32405 net.cpp:165] Memory required for data: 863745500\nI0821 06:49:15.078244 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:49:15.078253 32405 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:49:15.078259 32405 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:49:15.078270 32405 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:49:15.078506 32405 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:49:15.078519 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.078524 32405 net.cpp:165] Memory required for data: 865793500\nI0821 06:49:15.078536 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:49:15.078552 32405 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:49:15.078565 32405 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:49:15.078573 32405 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.078626 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:49:15.078768 32405 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:49:15.078781 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.078786 32405 net.cpp:165] Memory required for data: 867841500\nI0821 06:49:15.078794 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:49:15.078805 32405 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:49:15.078811 32405 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:49:15.078819 32405 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.078829 32405 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:49:15.078835 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.078840 32405 net.cpp:165] Memory required for data: 869889500\nI0821 06:49:15.078845 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:49:15.078858 32405 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:49:15.078871 32405 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:49:15.078883 32405 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:49:15.079198 32405 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:49:15.079212 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.079217 32405 net.cpp:165] Memory required for data: 871937500\nI0821 06:49:15.079226 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:49:15.079234 32405 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:49:15.079241 32405 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:49:15.079252 32405 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:49:15.079496 32405 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:49:15.079509 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.079514 32405 net.cpp:165] Memory required for data: 873985500\nI0821 06:49:15.079524 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:49:15.079535 32405 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:49:15.079541 32405 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:49:15.079548 32405 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:49:15.079602 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:49:15.079746 32405 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:49:15.079759 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.079764 32405 net.cpp:165] Memory required for data: 876033500\nI0821 06:49:15.079773 32405 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:49:15.079787 32405 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:49:15.079793 32405 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:49:15.079802 32405 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:49:15.079900 32405 net.cpp:150] Setting up L2_b1_pool\nI0821 06:49:15.079916 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.079922 32405 net.cpp:165] Memory required for data: 878081500\nI0821 06:49:15.079928 32405 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:49:15.079937 32405 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:49:15.079943 32405 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:49:15.079949 32405 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:49:15.079963 32405 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:49:15.079998 32405 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:49:15.080008 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.080013 32405 net.cpp:165] Memory required for data: 880129500\nI0821 06:49:15.080018 32405 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:49:15.080025 32405 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:49:15.080031 32405 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:49:15.080039 32405 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:49:15.080055 32405 net.cpp:150] Setting up L2_b1_relu\nI0821 06:49:15.080062 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.080067 32405 net.cpp:165] Memory required for data: 882177500\nI0821 06:49:15.080071 32405 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:49:15.080127 32405 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:49:15.080142 32405 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:49:15.082499 32405 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:49:15.082518 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.082523 32405 net.cpp:165] Memory required for data: 884225500\nI0821 06:49:15.082530 32405 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:49:15.082540 32405 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:49:15.082546 32405 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:49:15.082553 32405 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:49:15.082561 32405 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:49:15.082646 32405 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:49:15.082661 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.082666 32405 net.cpp:165] Memory required for data: 888321500\nI0821 06:49:15.082672 32405 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:15.082684 32405 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:15.082690 32405 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:49:15.082697 32405 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:49:15.082707 32405 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:49:15.082758 32405 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:15.082770 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.082777 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.082782 32405 net.cpp:165] Memory required for data: 896513500\nI0821 06:49:15.082787 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:49:15.082801 32405 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:49:15.082808 32405 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:49:15.082816 32405 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:49:15.084249 32405 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:49:15.084266 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.084272 32405 net.cpp:165] Memory required for data: 900609500\nI0821 06:49:15.084281 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:49:15.084292 32405 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:49:15.084298 32405 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:49:15.084311 32405 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:49:15.084558 32405 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:49:15.084571 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.084576 32405 net.cpp:165] Memory required for data: 904705500\nI0821 06:49:15.084588 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:49:15.084601 32405 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:49:15.084607 32405 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:49:15.084615 32405 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.084671 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:49:15.084821 32405 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:49:15.084834 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.084839 32405 net.cpp:165] Memory required for data: 908801500\nI0821 06:49:15.084848 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:49:15.084866 32405 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:49:15.084872 32405 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:49:15.084880 32405 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.084898 32405 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:49:15.084906 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.084910 32405 net.cpp:165] Memory required for data: 912897500\nI0821 06:49:15.084915 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:49:15.084930 32405 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:49:15.084936 32405 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:49:15.084947 32405 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:49:15.085407 32405 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:49:15.085420 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.085425 32405 net.cpp:165] Memory required for data: 916993500\nI0821 06:49:15.085433 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:49:15.085443 32405 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:49:15.085449 32405 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:49:15.085460 32405 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:49:15.085708 32405 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:49:15.085721 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.085726 32405 net.cpp:165] Memory required for data: 921089500\nI0821 06:49:15.085736 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:49:15.085748 32405 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:49:15.085754 32405 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:49:15.085762 32405 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:49:15.085817 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:49:15.085970 32405 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:49:15.085984 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.085989 32405 net.cpp:165] Memory required for data: 925185500\nI0821 06:49:15.085999 32405 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:49:15.086010 32405 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:49:15.086016 32405 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:49:15.086024 32405 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:49:15.086031 32405 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:49:15.086061 32405 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:49:15.086071 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.086076 32405 net.cpp:165] Memory required for data: 929281500\nI0821 06:49:15.086081 32405 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:49:15.086088 32405 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:49:15.086094 32405 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:49:15.086102 32405 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:49:15.086117 32405 net.cpp:150] Setting up L2_b2_relu\nI0821 06:49:15.086123 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.086128 32405 net.cpp:165] Memory required for data: 933377500\nI0821 06:49:15.086133 32405 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:15.086139 32405 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:15.086145 32405 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:49:15.086153 32405 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:49:15.086161 32405 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:49:15.086210 32405 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:15.086221 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.086228 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.086232 32405 net.cpp:165] Memory required for data: 941569500\nI0821 06:49:15.086237 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:49:15.086256 32405 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:49:15.086262 32405 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:49:15.086273 32405 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:49:15.086735 32405 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:49:15.086748 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.086753 32405 net.cpp:165] Memory required for data: 945665500\nI0821 06:49:15.086762 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:49:15.086771 32405 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:49:15.086777 32405 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:49:15.086788 32405 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:49:15.087039 32405 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:49:15.087052 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.087057 32405 net.cpp:165] Memory required for data: 949761500\nI0821 06:49:15.087067 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:49:15.087080 32405 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:49:15.087085 32405 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:49:15.087093 32405 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.087148 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:49:15.087293 32405 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:49:15.087306 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.087311 32405 net.cpp:165] Memory required for data: 953857500\nI0821 06:49:15.087319 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:49:15.087330 32405 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:49:15.087337 32405 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:49:15.087343 32405 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.087353 32405 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:49:15.087360 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.087364 32405 net.cpp:165] Memory required for data: 957953500\nI0821 06:49:15.087369 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:49:15.087383 32405 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:49:15.087389 32405 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:49:15.087399 32405 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:49:15.087855 32405 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:49:15.087875 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.087880 32405 net.cpp:165] Memory required for data: 962049500\nI0821 06:49:15.087889 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:49:15.087898 32405 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:49:15.087904 32405 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:49:15.087916 32405 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:49:15.088169 32405 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:49:15.088182 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.088187 32405 net.cpp:165] Memory required for data: 966145500\nI0821 06:49:15.088197 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:49:15.088209 32405 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:49:15.088215 32405 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:49:15.088223 32405 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:49:15.088274 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:49:15.088423 32405 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:49:15.088435 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.088439 32405 net.cpp:165] Memory required for data: 970241500\nI0821 06:49:15.088449 32405 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:49:15.088457 32405 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:49:15.088464 32405 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:49:15.088470 32405 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:49:15.088487 32405 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:49:15.088515 32405 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:49:15.088527 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.088532 32405 net.cpp:165] Memory required for data: 974337500\nI0821 06:49:15.088538 32405 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:49:15.088559 32405 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:49:15.088565 32405 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:49:15.088573 32405 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:49:15.088582 32405 net.cpp:150] Setting up L2_b3_relu\nI0821 06:49:15.088589 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.088594 32405 net.cpp:165] Memory required for data: 978433500\nI0821 06:49:15.088599 32405 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:15.088606 32405 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:15.088611 32405 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:49:15.088621 32405 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:49:15.088631 32405 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:49:15.088677 32405 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:15.088688 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.088695 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.088699 32405 net.cpp:165] Memory required for data: 986625500\nI0821 06:49:15.088704 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:49:15.088721 32405 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:49:15.088727 32405 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:49:15.088737 32405 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:49:15.089206 32405 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:49:15.089221 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.089226 32405 net.cpp:165] Memory required for data: 990721500\nI0821 06:49:15.089233 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:49:15.089246 32405 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:49:15.089251 32405 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:49:15.089260 32405 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:49:15.089506 32405 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:49:15.089519 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.089524 32405 net.cpp:165] Memory required for data: 994817500\nI0821 06:49:15.089534 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:49:15.089543 32405 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:49:15.089550 32405 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:49:15.089560 32405 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.089614 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:49:15.089767 32405 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:49:15.089781 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.089785 32405 net.cpp:165] Memory required for data: 998913500\nI0821 06:49:15.089793 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:49:15.089802 32405 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:49:15.089807 32405 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:49:15.089818 32405 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.089828 32405 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:49:15.089834 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.089838 32405 net.cpp:165] Memory required for data: 1003009500\nI0821 06:49:15.089843 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:49:15.089869 32405 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:49:15.089877 32405 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:49:15.089886 32405 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:49:15.090348 32405 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:49:15.090363 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.090368 32405 net.cpp:165] Memory required for data: 1007105500\nI0821 06:49:15.090376 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:49:15.090385 32405 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:49:15.090391 32405 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:49:15.090402 32405 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:49:15.090644 32405 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:49:15.090657 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.090662 32405 net.cpp:165] Memory required for data: 1011201500\nI0821 06:49:15.090672 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:49:15.090682 32405 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:49:15.090687 32405 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:49:15.090694 32405 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:49:15.090751 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:49:15.090903 32405 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:49:15.090919 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.090924 32405 net.cpp:165] Memory required for data: 1015297500\nI0821 06:49:15.090934 32405 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:49:15.090943 32405 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:49:15.090950 32405 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:49:15.090955 32405 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:49:15.090963 32405 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:49:15.090992 32405 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:49:15.091002 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.091006 32405 net.cpp:165] Memory required for data: 1019393500\nI0821 06:49:15.091012 32405 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:49:15.091019 32405 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:49:15.091024 32405 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:49:15.091034 32405 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:49:15.091044 32405 net.cpp:150] Setting up L2_b4_relu\nI0821 06:49:15.091051 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.091055 32405 net.cpp:165] Memory required for data: 1023489500\nI0821 06:49:15.091060 32405 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:15.091068 32405 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:15.091073 32405 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:49:15.091083 32405 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:49:15.091092 32405 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:49:15.091135 32405 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:15.091146 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.091153 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.091157 32405 net.cpp:165] Memory required for data: 1031681500\nI0821 06:49:15.091163 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:49:15.091177 32405 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:49:15.091183 32405 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:49:15.091192 32405 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:49:15.091656 32405 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:49:15.091676 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.091681 32405 net.cpp:165] Memory required for data: 1035777500\nI0821 06:49:15.091691 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:49:15.091702 32405 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:49:15.091708 32405 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:49:15.091717 32405 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:49:15.091972 32405 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:49:15.091986 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.091991 32405 net.cpp:165] Memory required for data: 1039873500\nI0821 06:49:15.092001 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:49:15.092010 32405 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:49:15.092016 32405 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:49:15.092027 32405 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.092082 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:49:15.092231 32405 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:49:15.092244 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.092249 32405 net.cpp:165] Memory required for data: 1043969500\nI0821 06:49:15.092257 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:49:15.092265 32405 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:49:15.092272 32405 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:49:15.092279 32405 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.092288 32405 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:49:15.092295 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.092299 32405 net.cpp:165] Memory required for data: 1048065500\nI0821 06:49:15.092304 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:49:15.092319 32405 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:49:15.092324 32405 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:49:15.092335 32405 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:49:15.092794 32405 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:49:15.092808 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.092813 32405 net.cpp:165] Memory required for data: 1052161500\nI0821 06:49:15.092821 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:49:15.092833 32405 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:49:15.092840 32405 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:49:15.092850 32405 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:49:15.093102 32405 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:49:15.093116 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.093122 32405 net.cpp:165] Memory required for data: 1056257500\nI0821 06:49:15.093132 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:49:15.093140 32405 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:49:15.093147 32405 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:49:15.093154 32405 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:49:15.093210 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:49:15.093356 32405 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:49:15.093369 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.093374 32405 net.cpp:165] Memory required for data: 1060353500\nI0821 06:49:15.093382 32405 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:49:15.093394 32405 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:49:15.093400 32405 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:49:15.093407 32405 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:49:15.093415 32405 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:49:15.093444 32405 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:49:15.093453 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.093464 32405 net.cpp:165] Memory required for data: 1064449500\nI0821 06:49:15.093469 32405 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:49:15.093477 32405 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:49:15.093483 32405 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:49:15.093492 32405 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:49:15.093502 32405 net.cpp:150] Setting up L2_b5_relu\nI0821 06:49:15.093509 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.093513 32405 net.cpp:165] Memory required for data: 1068545500\nI0821 06:49:15.093518 32405 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:15.093525 32405 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:15.093530 32405 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:49:15.093540 32405 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:49:15.093550 32405 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:49:15.093593 32405 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:15.093605 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.093612 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.093616 32405 net.cpp:165] Memory required for data: 1076737500\nI0821 06:49:15.093621 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:49:15.093636 32405 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:49:15.093642 32405 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:49:15.093652 32405 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:49:15.094128 32405 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:49:15.094142 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.094147 32405 net.cpp:165] Memory required for data: 1080833500\nI0821 06:49:15.094156 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:49:15.094166 32405 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:49:15.094174 32405 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:49:15.094183 32405 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:49:15.094429 32405 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:49:15.094442 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.094447 32405 net.cpp:165] Memory required for data: 1084929500\nI0821 06:49:15.094457 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:49:15.094466 32405 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:49:15.094472 32405 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:49:15.094480 32405 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.094537 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:49:15.094683 32405 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:49:15.094698 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.094703 32405 net.cpp:165] Memory required for data: 1089025500\nI0821 06:49:15.094712 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:49:15.094720 32405 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:49:15.094727 32405 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:49:15.094733 32405 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.094743 32405 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:49:15.094749 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.094754 32405 net.cpp:165] Memory required for data: 1093121500\nI0821 06:49:15.094758 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:49:15.094772 32405 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:49:15.094779 32405 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:49:15.094789 32405 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:49:15.095260 32405 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:49:15.095280 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.095285 32405 net.cpp:165] Memory required for data: 1097217500\nI0821 06:49:15.095294 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:49:15.095306 32405 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:49:15.095314 32405 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:49:15.095324 32405 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:49:15.095571 32405 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:49:15.095582 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.095587 32405 net.cpp:165] Memory required for data: 1101313500\nI0821 06:49:15.095598 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:49:15.095607 32405 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:49:15.095613 32405 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:49:15.095620 32405 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:49:15.095679 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:49:15.095825 32405 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:49:15.095839 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.095844 32405 net.cpp:165] Memory required for data: 1105409500\nI0821 06:49:15.095852 32405 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:49:15.095870 32405 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:49:15.095876 32405 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:49:15.095885 32405 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:49:15.095891 32405 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:49:15.095919 32405 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:49:15.095928 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.095932 32405 net.cpp:165] Memory required for data: 1109505500\nI0821 06:49:15.095937 32405 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:49:15.095948 32405 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:49:15.095954 32405 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:49:15.095962 32405 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:49:15.095970 32405 net.cpp:150] Setting up L2_b6_relu\nI0821 06:49:15.095978 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.095981 32405 net.cpp:165] Memory required for data: 1113601500\nI0821 06:49:15.095986 32405 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:15.095993 32405 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:15.095999 32405 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:49:15.096005 32405 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:49:15.096015 32405 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:49:15.096062 32405 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:15.096073 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.096081 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.096084 32405 net.cpp:165] Memory required for data: 1121793500\nI0821 06:49:15.096089 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:49:15.096103 32405 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:49:15.096109 32405 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:49:15.096118 32405 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:49:15.096587 32405 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:49:15.096601 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.096606 32405 net.cpp:165] Memory required for data: 1125889500\nI0821 06:49:15.096616 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:49:15.096627 32405 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:49:15.096639 32405 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:49:15.096652 32405 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:49:15.096912 32405 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:49:15.096925 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.096930 32405 net.cpp:165] Memory required for data: 1129985500\nI0821 06:49:15.096940 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:49:15.096949 32405 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:49:15.096956 32405 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:49:15.096963 32405 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.097021 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:49:15.097170 32405 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:49:15.097183 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.097188 32405 net.cpp:165] Memory required for data: 1134081500\nI0821 06:49:15.097198 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:49:15.097208 32405 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:49:15.097214 32405 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:49:15.097221 32405 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.097230 32405 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:49:15.097237 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.097242 32405 net.cpp:165] Memory required for data: 1138177500\nI0821 06:49:15.097246 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:49:15.097261 32405 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:49:15.097267 32405 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:49:15.097280 32405 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:49:15.097743 32405 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:49:15.097757 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.097761 32405 net.cpp:165] Memory required for data: 1142273500\nI0821 06:49:15.097770 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:49:15.097782 32405 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:49:15.097789 32405 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:49:15.097800 32405 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:49:15.098062 32405 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:49:15.098076 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.098080 32405 net.cpp:165] Memory required for data: 1146369500\nI0821 06:49:15.098091 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:49:15.098100 32405 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:49:15.098106 32405 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:49:15.098114 32405 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:49:15.098171 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:49:15.098316 32405 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:49:15.098328 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.098333 32405 net.cpp:165] Memory required for data: 1150465500\nI0821 06:49:15.098342 32405 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:49:15.098354 32405 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:49:15.098361 32405 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:49:15.098367 32405 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:49:15.098374 32405 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:49:15.098402 32405 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:49:15.098410 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.098414 32405 net.cpp:165] Memory required for data: 1154561500\nI0821 06:49:15.098420 32405 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:49:15.098431 32405 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:49:15.098436 32405 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:49:15.098444 32405 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:49:15.098459 32405 net.cpp:150] Setting up L2_b7_relu\nI0821 06:49:15.098467 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.098471 32405 net.cpp:165] Memory required for data: 1158657500\nI0821 06:49:15.098476 32405 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:15.098484 32405 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:15.098489 32405 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:49:15.098495 32405 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:49:15.098505 32405 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:49:15.098552 32405 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:15.098564 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.098570 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.098575 32405 net.cpp:165] Memory required for data: 1166849500\nI0821 06:49:15.098580 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:49:15.098593 32405 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:49:15.098600 32405 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:49:15.098609 32405 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:49:15.099086 32405 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:49:15.099102 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.099107 32405 net.cpp:165] Memory required for data: 1170945500\nI0821 06:49:15.099114 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:49:15.099126 32405 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:49:15.099133 32405 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:49:15.099143 32405 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:49:15.099391 32405 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:49:15.099405 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.099409 32405 net.cpp:165] Memory required for data: 1175041500\nI0821 06:49:15.099419 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:49:15.099428 32405 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:49:15.099434 32405 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:49:15.099442 32405 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.099498 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:49:15.099651 32405 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:49:15.099664 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.099669 32405 net.cpp:165] Memory required for data: 1179137500\nI0821 06:49:15.099678 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:49:15.099689 32405 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:49:15.099695 32405 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:49:15.099702 32405 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.099712 32405 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:49:15.099719 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.099723 32405 net.cpp:165] Memory required for data: 1183233500\nI0821 06:49:15.099727 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:49:15.099741 32405 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:49:15.099747 32405 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:49:15.099756 32405 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:49:15.100230 32405 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:49:15.100245 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.100250 32405 net.cpp:165] Memory required for data: 1187329500\nI0821 06:49:15.100258 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:49:15.100270 32405 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:49:15.100286 32405 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:49:15.100294 32405 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:49:15.100548 32405 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:49:15.100564 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.100569 32405 net.cpp:165] Memory required for data: 1191425500\nI0821 06:49:15.100579 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:49:15.100589 32405 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:49:15.100594 32405 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:49:15.100601 32405 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:49:15.100657 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:49:15.100807 32405 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:49:15.100821 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.100826 32405 net.cpp:165] Memory required for data: 1195521500\nI0821 06:49:15.100833 32405 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:49:15.100842 32405 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:49:15.100848 32405 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:49:15.100855 32405 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:49:15.100872 32405 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:49:15.100900 32405 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:49:15.100910 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.100914 32405 net.cpp:165] Memory required for data: 1199617500\nI0821 06:49:15.100920 32405 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:49:15.100930 32405 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:49:15.100936 32405 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:49:15.100944 32405 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:49:15.100953 32405 net.cpp:150] Setting up L2_b8_relu\nI0821 06:49:15.100960 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.100965 32405 net.cpp:165] Memory required for data: 1203713500\nI0821 06:49:15.100970 32405 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:15.100976 32405 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:15.100981 32405 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:49:15.100988 32405 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:49:15.101011 32405 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:49:15.101061 32405 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:15.101073 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.101080 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.101085 32405 net.cpp:165] Memory required for data: 1211905500\nI0821 06:49:15.101089 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:49:15.101105 32405 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:49:15.101111 32405 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:49:15.101120 32405 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:49:15.101593 32405 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:49:15.101606 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.101610 32405 net.cpp:165] Memory required for data: 1216001500\nI0821 06:49:15.101619 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:49:15.101631 32405 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:49:15.101639 32405 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:49:15.101646 32405 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:49:15.101903 32405 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:49:15.101919 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.101924 32405 net.cpp:165] Memory required for data: 1220097500\nI0821 06:49:15.101943 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:49:15.101951 32405 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:49:15.101958 32405 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:49:15.101965 32405 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.102023 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:49:15.102177 32405 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:49:15.102190 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.102195 32405 net.cpp:165] Memory required for data: 1224193500\nI0821 06:49:15.102203 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:49:15.102211 32405 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:49:15.102217 32405 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:49:15.102227 32405 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.102237 32405 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:49:15.102244 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.102248 32405 net.cpp:165] Memory required for data: 1228289500\nI0821 06:49:15.102253 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:49:15.102264 32405 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:49:15.102269 32405 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:49:15.102280 32405 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:49:15.102742 32405 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:49:15.102756 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.102761 32405 net.cpp:165] Memory required for data: 1232385500\nI0821 06:49:15.102771 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:49:15.102779 32405 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:49:15.102785 32405 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:49:15.102797 32405 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:49:15.103060 32405 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:49:15.103075 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.103080 32405 net.cpp:165] Memory required for data: 1236481500\nI0821 06:49:15.103127 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:49:15.103139 32405 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:49:15.103145 32405 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:49:15.103153 32405 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:49:15.103214 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:49:15.103361 32405 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:49:15.103377 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.103382 32405 net.cpp:165] Memory required for data: 1240577500\nI0821 06:49:15.103392 32405 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:49:15.103401 32405 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:49:15.103407 32405 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:49:15.103415 32405 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:49:15.103425 32405 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:49:15.103452 32405 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:49:15.103461 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.103466 32405 net.cpp:165] Memory required for data: 1244673500\nI0821 06:49:15.103471 32405 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:49:15.103478 32405 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:49:15.103483 32405 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:49:15.103493 32405 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:49:15.103503 32405 net.cpp:150] Setting up L2_b9_relu\nI0821 06:49:15.103510 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.103514 32405 net.cpp:165] Memory required for data: 1248769500\nI0821 06:49:15.103519 32405 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:15.103533 32405 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:15.103538 32405 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:49:15.103549 32405 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:49:15.103559 32405 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:49:15.103603 32405 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:15.103615 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.103621 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.103626 32405 net.cpp:165] Memory required for data: 1256961500\nI0821 06:49:15.103631 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:49:15.103644 32405 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:49:15.103652 32405 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:49:15.103660 32405 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:49:15.104146 32405 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:49:15.104161 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.104166 32405 net.cpp:165] Memory required for data: 1257985500\nI0821 06:49:15.104174 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:49:15.104187 32405 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:49:15.104192 32405 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:49:15.104200 32405 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:49:15.104462 32405 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:49:15.104475 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.104480 32405 net.cpp:165] Memory required for data: 1259009500\nI0821 06:49:15.104490 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:49:15.104501 32405 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:49:15.104507 32405 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:49:15.104516 32405 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.104573 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:49:15.104729 32405 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:49:15.104743 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.104748 32405 net.cpp:165] Memory required for data: 1260033500\nI0821 06:49:15.104755 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:49:15.104768 32405 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:49:15.104774 32405 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:49:15.104782 32405 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.104794 32405 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:49:15.104801 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.104805 32405 net.cpp:165] Memory required for data: 1261057500\nI0821 06:49:15.104810 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:49:15.104821 32405 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:49:15.104827 32405 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:49:15.104838 32405 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:49:15.105315 32405 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:49:15.105330 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.105335 32405 net.cpp:165] Memory required for data: 1262081500\nI0821 06:49:15.105343 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:49:15.105352 32405 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:49:15.105360 32405 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:49:15.105370 32405 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:49:15.105633 32405 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:49:15.105646 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.105650 32405 net.cpp:165] Memory required for data: 1263105500\nI0821 06:49:15.105667 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:49:15.105676 32405 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:49:15.105684 32405 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:49:15.105690 32405 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:49:15.105748 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:49:15.105912 32405 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:49:15.105929 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.105934 32405 net.cpp:165] Memory required for data: 1264129500\nI0821 06:49:15.105943 32405 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:49:15.105952 32405 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:49:15.105958 32405 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:49:15.105967 32405 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:49:15.106004 32405 net.cpp:150] Setting up L3_b1_pool\nI0821 06:49:15.106014 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.106019 32405 net.cpp:165] Memory required for data: 1265153500\nI0821 06:49:15.106024 32405 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:49:15.106032 32405 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:49:15.106039 32405 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:49:15.106045 32405 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:49:15.106052 32405 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:49:15.106086 32405 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:49:15.106096 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.106101 32405 net.cpp:165] Memory required for data: 1266177500\nI0821 06:49:15.106106 32405 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:49:15.106112 32405 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:49:15.106118 32405 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:49:15.106125 32405 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:49:15.106134 32405 net.cpp:150] Setting up L3_b1_relu\nI0821 06:49:15.106142 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.106145 32405 net.cpp:165] Memory required for data: 1267201500\nI0821 06:49:15.106150 32405 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:49:15.106159 32405 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:49:15.106169 32405 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:49:15.107383 32405 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:49:15.107401 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.107406 32405 net.cpp:165] Memory required for data: 1268225500\nI0821 06:49:15.107412 32405 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:49:15.107425 32405 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:49:15.107431 32405 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:49:15.107439 32405 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:49:15.107447 32405 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:49:15.107491 32405 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:49:15.107503 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.107508 32405 net.cpp:165] Memory required for data: 1270273500\nI0821 06:49:15.107513 32405 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:15.107522 32405 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:15.107527 32405 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:49:15.107537 32405 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:49:15.107547 32405 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:49:15.107595 32405 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:15.107609 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.107616 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.107628 32405 net.cpp:165] Memory required for data: 1274369500\nI0821 06:49:15.107633 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:49:15.107648 32405 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:49:15.107656 32405 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:49:15.107664 32405 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:49:15.109635 32405 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:49:15.109652 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.109658 32405 net.cpp:165] Memory required for data: 1276417500\nI0821 06:49:15.109668 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:49:15.109680 32405 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:49:15.109688 32405 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:49:15.109696 32405 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:49:15.109971 32405 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:49:15.109984 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.109989 32405 net.cpp:165] Memory required for data: 1278465500\nI0821 06:49:15.110000 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:49:15.110013 32405 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:49:15.110019 32405 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:49:15.110030 32405 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.110087 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:49:15.110246 32405 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:49:15.110260 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.110265 32405 net.cpp:165] Memory required for data: 1280513500\nI0821 06:49:15.110275 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:49:15.110282 32405 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:49:15.110288 32405 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:49:15.110299 32405 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.110309 32405 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:49:15.110316 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.110321 32405 net.cpp:165] Memory required for data: 1282561500\nI0821 06:49:15.110325 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:49:15.110339 32405 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:49:15.110345 32405 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:49:15.110354 32405 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:49:15.111380 32405 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:49:15.111395 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.111400 32405 net.cpp:165] Memory required for data: 1284609500\nI0821 06:49:15.111409 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:49:15.111421 32405 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:49:15.111428 32405 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:49:15.111436 32405 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:49:15.111701 32405 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:49:15.111714 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.111719 32405 net.cpp:165] Memory required for data: 1286657500\nI0821 06:49:15.111730 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:49:15.111738 32405 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:49:15.111744 32405 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:49:15.111752 32405 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:49:15.111812 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:49:15.111977 32405 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:49:15.111991 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.111996 32405 net.cpp:165] Memory required for data: 1288705500\nI0821 06:49:15.112005 32405 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:49:15.112015 32405 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:49:15.112028 32405 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:49:15.112036 32405 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:49:15.112049 32405 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:49:15.112084 32405 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:49:15.112097 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.112102 32405 net.cpp:165] Memory required for data: 1290753500\nI0821 06:49:15.112107 32405 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:49:15.112114 32405 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:49:15.112120 32405 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:49:15.112128 32405 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:49:15.112136 32405 net.cpp:150] Setting up L3_b2_relu\nI0821 06:49:15.112143 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.112148 32405 net.cpp:165] Memory required for data: 1292801500\nI0821 06:49:15.112152 32405 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:15.112162 32405 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:15.112169 32405 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:49:15.112175 32405 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:49:15.112185 32405 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:49:15.112231 32405 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:15.112247 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.112254 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.112258 32405 net.cpp:165] Memory required for data: 1296897500\nI0821 06:49:15.112263 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:49:15.112275 32405 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:49:15.112282 32405 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:49:15.112290 32405 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:49:15.113307 32405 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:49:15.113325 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.113330 32405 net.cpp:165] Memory required for data: 1298945500\nI0821 06:49:15.113339 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:49:15.113348 32405 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:49:15.113355 32405 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:49:15.113366 32405 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:49:15.113626 32405 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:49:15.113639 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.113644 32405 net.cpp:165] Memory required for data: 1300993500\nI0821 06:49:15.113654 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:49:15.113667 32405 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:49:15.113674 32405 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:49:15.113682 32405 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.113739 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:49:15.113901 32405 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:49:15.113915 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.113920 32405 net.cpp:165] Memory required for data: 1303041500\nI0821 06:49:15.113929 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:49:15.113937 32405 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:49:15.113943 32405 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:49:15.113953 32405 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.113963 32405 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:49:15.113970 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.113981 32405 net.cpp:165] Memory required for data: 1305089500\nI0821 06:49:15.113986 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:49:15.114001 32405 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:49:15.114006 32405 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:49:15.114015 32405 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:49:15.115027 32405 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:49:15.115042 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.115047 32405 net.cpp:165] Memory required for data: 1307137500\nI0821 06:49:15.115056 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:49:15.115070 32405 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:49:15.115077 32405 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:49:15.115085 32405 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:49:15.115351 32405 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:49:15.115365 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.115370 32405 net.cpp:165] Memory required for data: 1309185500\nI0821 06:49:15.115380 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:49:15.115388 32405 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:49:15.115394 32405 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:49:15.115401 32405 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:49:15.115461 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:49:15.115618 32405 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:49:15.115631 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.115636 32405 net.cpp:165] Memory required for data: 1311233500\nI0821 06:49:15.115644 32405 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:49:15.115653 32405 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:49:15.115660 32405 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:49:15.115666 32405 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:49:15.115679 32405 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:49:15.115712 32405 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:49:15.115726 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.115731 32405 net.cpp:165] Memory required for data: 1313281500\nI0821 06:49:15.115737 32405 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:49:15.115744 32405 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:49:15.115751 32405 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:49:15.115757 32405 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:49:15.115767 32405 net.cpp:150] Setting up L3_b3_relu\nI0821 06:49:15.115773 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.115777 32405 net.cpp:165] Memory required for data: 1315329500\nI0821 06:49:15.115782 32405 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:15.115792 32405 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:15.115797 32405 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:49:15.115805 32405 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:49:15.115814 32405 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:49:15.115869 32405 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:15.115881 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.115888 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.115892 32405 net.cpp:165] Memory required for data: 1319425500\nI0821 06:49:15.115897 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:49:15.115908 32405 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:49:15.115916 32405 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:49:15.115926 32405 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:49:15.116957 32405 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:49:15.116972 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.116977 32405 net.cpp:165] Memory required for data: 1321473500\nI0821 06:49:15.116986 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:49:15.116996 32405 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:49:15.117002 32405 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:49:15.117013 32405 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:49:15.117283 32405 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:49:15.117300 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.117305 32405 net.cpp:165] Memory required for data: 1323521500\nI0821 06:49:15.117316 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:49:15.117323 32405 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:49:15.117331 32405 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:49:15.117337 32405 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.117395 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:49:15.117552 32405 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:49:15.117564 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.117569 32405 net.cpp:165] Memory required for data: 1325569500\nI0821 06:49:15.117578 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:49:15.117586 32405 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:49:15.117593 32405 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:49:15.117602 32405 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.117612 32405 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:49:15.117619 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.117624 32405 net.cpp:165] Memory required for data: 1327617500\nI0821 06:49:15.117628 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:49:15.117642 32405 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:49:15.117648 32405 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:49:15.117656 32405 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:49:15.118681 32405 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:49:15.118696 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.118701 32405 net.cpp:165] Memory required for data: 1329665500\nI0821 06:49:15.118710 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:49:15.118722 32405 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:49:15.118728 32405 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:49:15.118739 32405 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:49:15.119014 32405 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:49:15.119027 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.119032 32405 net.cpp:165] Memory required for data: 1331713500\nI0821 06:49:15.119042 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:49:15.119050 32405 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:49:15.119057 32405 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:49:15.119067 32405 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:49:15.119127 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:49:15.119290 32405 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:49:15.119303 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.119308 32405 net.cpp:165] Memory required for data: 1333761500\nI0821 06:49:15.119318 32405 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:49:15.119326 32405 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:49:15.119333 32405 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:49:15.119339 32405 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:49:15.119350 32405 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:49:15.119392 32405 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:49:15.119411 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.119416 32405 net.cpp:165] Memory required for data: 1335809500\nI0821 06:49:15.119422 32405 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:49:15.119429 32405 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:49:15.119436 32405 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:49:15.119443 32405 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:49:15.119452 32405 net.cpp:150] Setting up L3_b4_relu\nI0821 06:49:15.119459 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.119464 32405 net.cpp:165] Memory required for data: 1337857500\nI0821 06:49:15.119468 32405 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:15.119478 32405 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:15.119484 32405 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:49:15.119491 32405 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:49:15.119501 32405 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:49:15.119551 32405 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:15.119562 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.119570 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.119573 32405 net.cpp:165] Memory required for data: 1341953500\nI0821 06:49:15.119578 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:49:15.119590 32405 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:49:15.119596 32405 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:49:15.119607 32405 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:49:15.120636 32405 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:49:15.120651 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.120656 32405 net.cpp:165] Memory required for data: 1344001500\nI0821 06:49:15.120664 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:49:15.120674 32405 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:49:15.120681 32405 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:49:15.120692 32405 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:49:15.121932 32405 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:49:15.121949 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.121954 32405 net.cpp:165] Memory required for data: 1346049500\nI0821 06:49:15.121965 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:49:15.121975 32405 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:49:15.121982 32405 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:49:15.121994 32405 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.122056 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:49:15.122216 32405 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:49:15.122231 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.122236 32405 net.cpp:165] Memory required for data: 1348097500\nI0821 06:49:15.122243 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:49:15.122252 32405 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:49:15.122258 32405 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:49:15.122268 32405 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.122279 32405 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:49:15.122287 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.122290 32405 net.cpp:165] Memory required for data: 1350145500\nI0821 06:49:15.122295 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:49:15.122309 32405 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:49:15.122315 32405 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:49:15.122324 32405 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:49:15.124312 32405 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:49:15.124330 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.124336 32405 net.cpp:165] Memory required for data: 1352193500\nI0821 06:49:15.124344 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:49:15.124357 32405 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:49:15.124364 32405 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:49:15.124375 32405 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:49:15.124660 32405 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:49:15.124673 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.124678 32405 net.cpp:165] Memory required for data: 1354241500\nI0821 06:49:15.124689 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:49:15.124698 32405 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:49:15.124706 32405 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:49:15.124712 32405 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:49:15.124773 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:49:15.124938 32405 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:49:15.124953 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.124958 32405 net.cpp:165] Memory required for data: 1356289500\nI0821 06:49:15.124966 32405 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:49:15.124976 32405 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:49:15.124982 32405 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:49:15.124989 32405 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:49:15.125000 32405 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:49:15.125035 32405 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:49:15.125048 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.125053 32405 net.cpp:165] Memory required for data: 1358337500\nI0821 06:49:15.125058 32405 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:49:15.125066 32405 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:49:15.125072 32405 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:49:15.125079 32405 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:49:15.125088 32405 net.cpp:150] Setting up L3_b5_relu\nI0821 06:49:15.125095 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.125100 32405 net.cpp:165] Memory required for data: 1360385500\nI0821 06:49:15.125104 32405 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:15.125114 32405 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:15.125120 32405 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:49:15.125128 32405 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:49:15.125138 32405 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:49:15.125185 32405 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:15.125196 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.125203 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.125207 32405 net.cpp:165] Memory required for data: 1364481500\nI0821 06:49:15.125212 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:49:15.125224 32405 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:49:15.125231 32405 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:49:15.125242 32405 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:49:15.126262 32405 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:49:15.126277 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.126282 32405 net.cpp:165] Memory required for data: 1366529500\nI0821 06:49:15.126291 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:49:15.126301 32405 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:49:15.126314 32405 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:49:15.126327 32405 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:49:15.126586 32405 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:49:15.126601 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.126606 32405 net.cpp:165] Memory required for data: 1368577500\nI0821 06:49:15.126617 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:49:15.126626 32405 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:49:15.126632 32405 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:49:15.126639 32405 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.126698 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:49:15.126854 32405 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:49:15.126873 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.126878 32405 net.cpp:165] Memory required for data: 1370625500\nI0821 06:49:15.126888 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:49:15.126895 32405 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:49:15.126904 32405 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:49:15.126912 32405 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.126922 32405 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:49:15.126929 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.126933 32405 net.cpp:165] Memory required for data: 1372673500\nI0821 06:49:15.126938 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:49:15.126951 32405 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:49:15.126957 32405 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:49:15.126966 32405 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:49:15.127980 32405 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:49:15.127995 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.128000 32405 net.cpp:165] Memory required for data: 1374721500\nI0821 06:49:15.128008 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:49:15.128021 32405 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:49:15.128026 32405 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:49:15.128037 32405 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:49:15.128295 32405 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:49:15.128309 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.128314 32405 net.cpp:165] Memory required for data: 1376769500\nI0821 06:49:15.128324 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:49:15.128332 32405 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:49:15.128338 32405 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:49:15.128350 32405 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:49:15.128407 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:49:15.128561 32405 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:49:15.128574 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.128579 32405 net.cpp:165] Memory required for data: 1378817500\nI0821 06:49:15.128588 32405 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:49:15.128597 32405 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:49:15.128603 32405 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:49:15.128610 32405 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:49:15.128620 32405 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:49:15.128657 32405 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:49:15.128669 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.128674 32405 net.cpp:165] Memory required for data: 1380865500\nI0821 06:49:15.128679 32405 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:49:15.128685 32405 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:49:15.128691 32405 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:49:15.128705 32405 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:49:15.128718 32405 net.cpp:150] Setting up L3_b6_relu\nI0821 06:49:15.128726 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.128729 32405 net.cpp:165] Memory required for data: 1382913500\nI0821 06:49:15.128734 32405 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:15.128741 32405 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:15.128746 32405 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:49:15.128753 32405 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:49:15.128762 32405 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:49:15.128811 32405 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:15.128823 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.128829 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.128834 32405 net.cpp:165] Memory required for data: 1387009500\nI0821 06:49:15.128839 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:49:15.128849 32405 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:49:15.128856 32405 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:49:15.128875 32405 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:49:15.129899 32405 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:49:15.129912 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.129917 32405 net.cpp:165] Memory required for data: 1389057500\nI0821 06:49:15.129926 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:49:15.129935 32405 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:49:15.129941 32405 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:49:15.129952 32405 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:49:15.130218 32405 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:49:15.130234 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.130239 32405 net.cpp:165] Memory required for data: 1391105500\nI0821 06:49:15.130249 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:49:15.130257 32405 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:49:15.130264 32405 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:49:15.130271 32405 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.130328 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:49:15.130484 32405 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:49:15.130497 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.130502 32405 net.cpp:165] Memory required for data: 1393153500\nI0821 06:49:15.130511 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:49:15.130547 32405 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:49:15.130554 32405 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:49:15.130563 32405 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.130573 32405 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:49:15.130580 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.130584 32405 net.cpp:165] Memory required for data: 1395201500\nI0821 06:49:15.130589 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:49:15.130601 32405 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:49:15.130606 32405 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:49:15.130619 32405 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:49:15.131638 32405 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:49:15.131652 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.131657 32405 net.cpp:165] Memory required for data: 1397249500\nI0821 06:49:15.131665 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:49:15.131675 32405 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:49:15.131687 32405 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:49:15.131700 32405 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:49:15.131971 32405 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:49:15.131988 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.131992 32405 net.cpp:165] Memory required for data: 1399297500\nI0821 06:49:15.132002 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:49:15.132011 32405 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:49:15.132017 32405 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:49:15.132025 32405 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:49:15.132082 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:49:15.132242 32405 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:49:15.132256 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.132261 32405 net.cpp:165] Memory required for data: 1401345500\nI0821 06:49:15.132269 32405 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:49:15.132282 32405 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:49:15.132288 32405 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:49:15.132295 32405 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:49:15.132302 32405 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:49:15.132340 32405 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:49:15.132351 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.132356 32405 net.cpp:165] Memory required for data: 1403393500\nI0821 06:49:15.132361 32405 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:49:15.132369 32405 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:49:15.132375 32405 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:49:15.132381 32405 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:49:15.132390 32405 net.cpp:150] Setting up L3_b7_relu\nI0821 06:49:15.132397 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.132402 32405 net.cpp:165] Memory required for data: 1405441500\nI0821 06:49:15.132406 32405 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:15.132413 32405 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:15.132418 32405 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:49:15.132428 32405 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:49:15.132438 32405 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:49:15.132484 32405 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:15.132495 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.132503 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.132506 32405 net.cpp:165] Memory required for data: 1409537500\nI0821 06:49:15.132511 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:49:15.132525 32405 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:49:15.132532 32405 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:49:15.132541 32405 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:49:15.133558 32405 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:49:15.133572 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.133576 32405 net.cpp:165] Memory required for data: 1411585500\nI0821 06:49:15.133585 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:49:15.133597 32405 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:49:15.133605 32405 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:49:15.133613 32405 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:49:15.133883 32405 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:49:15.133898 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.133903 32405 net.cpp:165] Memory required for data: 1413633500\nI0821 06:49:15.133919 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:49:15.133931 32405 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:49:15.133939 32405 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:49:15.133945 32405 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.134004 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:49:15.134161 32405 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:49:15.134174 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.134179 32405 net.cpp:165] Memory required for data: 1415681500\nI0821 06:49:15.134187 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:49:15.134198 32405 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:49:15.134204 32405 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:49:15.134212 32405 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.134222 32405 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:49:15.134228 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.134233 32405 net.cpp:165] Memory required for data: 1417729500\nI0821 06:49:15.134238 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:49:15.134251 32405 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:49:15.134258 32405 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:49:15.134268 32405 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:49:15.135289 32405 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:49:15.135304 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.135309 32405 net.cpp:165] Memory required for data: 1419777500\nI0821 06:49:15.135318 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:49:15.135326 32405 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:49:15.135334 32405 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:49:15.135346 32405 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:49:15.135610 32405 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:49:15.135627 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.135632 32405 net.cpp:165] Memory required for data: 1421825500\nI0821 06:49:15.135641 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:49:15.135650 32405 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:49:15.135656 32405 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:49:15.135663 32405 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:49:15.135720 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:49:15.135882 32405 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:49:15.135896 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.135901 32405 net.cpp:165] Memory required for data: 1423873500\nI0821 06:49:15.135910 32405 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:49:15.135921 32405 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:49:15.135927 32405 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:49:15.135934 32405 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:49:15.135942 32405 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:49:15.135978 32405 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:49:15.135989 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.135994 32405 net.cpp:165] Memory required for data: 1425921500\nI0821 06:49:15.135999 32405 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:49:15.136008 32405 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:49:15.136013 32405 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:49:15.136019 32405 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:49:15.136029 32405 net.cpp:150] Setting up L3_b8_relu\nI0821 06:49:15.136035 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.136040 32405 net.cpp:165] Memory required for data: 1427969500\nI0821 06:49:15.136044 32405 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:15.136059 32405 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:15.136065 32405 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:49:15.136075 32405 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:49:15.136085 32405 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:49:15.136131 32405 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:15.136142 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.136148 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.136152 32405 net.cpp:165] Memory required for data: 1432065500\nI0821 06:49:15.136157 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:49:15.136173 32405 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:49:15.136178 32405 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:49:15.136188 32405 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:49:15.138185 32405 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:49:15.138201 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.138207 32405 net.cpp:165] Memory required for data: 1434113500\nI0821 06:49:15.138216 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:49:15.138229 32405 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:49:15.138236 32405 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:49:15.138244 32405 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:49:15.138510 32405 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:49:15.138523 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.138528 32405 net.cpp:165] Memory required for data: 1436161500\nI0821 06:49:15.138540 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:49:15.138548 32405 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:49:15.138555 32405 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:49:15.138561 32405 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.138622 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:49:15.138782 32405 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:49:15.138797 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.138801 32405 net.cpp:165] Memory required for data: 1438209500\nI0821 06:49:15.138810 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:49:15.138819 32405 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:49:15.138825 32405 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:49:15.138833 32405 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.138842 32405 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:49:15.138849 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.138854 32405 net.cpp:165] Memory required for data: 1440257500\nI0821 06:49:15.138859 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:49:15.138880 32405 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:49:15.138886 32405 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:49:15.138897 32405 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:49:15.139945 32405 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:49:15.139960 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.139964 32405 net.cpp:165] Memory required for data: 1442305500\nI0821 06:49:15.139973 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:49:15.139986 32405 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:49:15.139992 32405 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:49:15.140000 32405 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:49:15.140269 32405 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:49:15.140282 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.140286 32405 net.cpp:165] Memory required for data: 1444353500\nI0821 06:49:15.140305 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:49:15.140317 32405 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:49:15.140324 32405 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:49:15.140331 32405 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:49:15.140393 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:49:15.140547 32405 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:49:15.140560 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.140565 32405 net.cpp:165] Memory required for data: 1446401500\nI0821 06:49:15.140574 32405 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:49:15.140585 32405 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:49:15.140592 32405 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:49:15.140599 32405 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:49:15.140607 32405 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:49:15.140642 32405 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:49:15.140655 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.140660 32405 net.cpp:165] Memory required for data: 1448449500\nI0821 06:49:15.140664 32405 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:49:15.140672 32405 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:49:15.140677 32405 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:49:15.140689 32405 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:49:15.140699 32405 net.cpp:150] Setting up L3_b9_relu\nI0821 06:49:15.140707 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.140710 32405 net.cpp:165] Memory required for data: 1450497500\nI0821 06:49:15.140715 32405 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:49:15.140724 32405 net.cpp:100] Creating Layer post_pool\nI0821 06:49:15.140729 32405 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:49:15.140738 32405 net.cpp:408] post_pool -> post_pool\nI0821 06:49:15.140770 32405 net.cpp:150] Setting up post_pool\nI0821 06:49:15.140784 32405 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:49:15.140789 32405 net.cpp:165] Memory required for data: 1450529500\nI0821 06:49:15.140794 32405 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:49:15.140894 32405 net.cpp:100] Creating Layer post_FC\nI0821 06:49:15.140908 32405 net.cpp:434] post_FC <- post_pool\nI0821 06:49:15.140918 32405 net.cpp:408] post_FC -> post_FC_top\nI0821 06:49:15.141180 32405 net.cpp:150] Setting up post_FC\nI0821 06:49:15.141199 32405 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:15.141206 32405 net.cpp:165] Memory required for data: 1450534500\nI0821 06:49:15.141214 32405 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:49:15.141223 32405 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:49:15.141229 32405 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:49:15.141237 32405 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:49:15.141247 32405 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:49:15.141296 32405 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:49:15.141307 32405 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:15.141314 32405 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:15.141319 32405 net.cpp:165] Memory required for data: 1450544500\nI0821 06:49:15.141324 32405 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:49:15.141422 32405 net.cpp:100] Creating Layer accuracy\nI0821 06:49:15.141435 32405 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:49:15.141443 32405 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:49:15.141451 32405 net.cpp:408] accuracy -> accuracy\nI0821 06:49:15.141499 32405 net.cpp:150] Setting up accuracy\nI0821 06:49:15.141512 32405 net.cpp:157] Top shape: (1)\nI0821 06:49:15.141517 32405 net.cpp:165] Memory required for data: 1450544504\nI0821 06:49:15.141522 32405 layer_factory.hpp:77] Creating layer loss\nI0821 06:49:15.141538 32405 net.cpp:100] Creating Layer loss\nI0821 06:49:15.141546 32405 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:49:15.141552 32405 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:49:15.141561 32405 net.cpp:408] loss -> loss\nI0821 06:49:15.142943 32405 layer_factory.hpp:77] Creating layer loss\nI0821 06:49:15.147545 32405 net.cpp:150] Setting up loss\nI0821 06:49:15.147572 32405 net.cpp:157] Top shape: (1)\nI0821 06:49:15.147578 32405 net.cpp:160]     with loss weight 1\nI0821 06:49:15.147665 32405 net.cpp:165] Memory required for data: 1450544508\nI0821 06:49:15.147675 32405 net.cpp:226] loss needs backward computation.\nI0821 06:49:15.147681 32405 net.cpp:228] accuracy does not need backward computation.\nI0821 06:49:15.147687 32405 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:49:15.147693 32405 net.cpp:226] post_FC needs backward computation.\nI0821 06:49:15.147698 32405 net.cpp:226] post_pool needs backward computation.\nI0821 06:49:15.147703 32405 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:49:15.147707 32405 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:49:15.147713 32405 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:49:15.147717 32405 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:49:15.147722 32405 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:49:15.147727 32405 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:49:15.147732 32405 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:49:15.147737 32405 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:49:15.147742 32405 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:49:15.147747 32405 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:49:15.147752 32405 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:49:15.147756 32405 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:49:15.147763 32405 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:49:15.147768 32405 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:49:15.147773 32405 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:49:15.147778 32405 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:49:15.147783 32405 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:49:15.147786 32405 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:49:15.147791 32405 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:49:15.147797 32405 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:49:15.147802 32405 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:49:15.147807 32405 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:49:15.147812 32405 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:49:15.147817 32405 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:49:15.147822 32405 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:49:15.147827 32405 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:49:15.147832 32405 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:49:15.147836 32405 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:49:15.147841 32405 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:49:15.147847 32405 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:49:15.147852 32405 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:49:15.147857 32405 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:49:15.147873 32405 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:49:15.147878 32405 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:49:15.147884 32405 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:49:15.147889 32405 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:49:15.147902 32405 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:49:15.147908 32405 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:49:15.147913 32405 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:49:15.147919 32405 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:49:15.147924 32405 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:49:15.147929 32405 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:49:15.147935 32405 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:49:15.147940 32405 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:49:15.147945 32405 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:49:15.147953 32405 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:49:15.147958 32405 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:49:15.147963 32405 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:49:15.147967 32405 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:49:15.147972 32405 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:49:15.147977 32405 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:49:15.147982 32405 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:49:15.147989 32405 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:49:15.147994 32405 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:49:15.147999 32405 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:49:15.148003 32405 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:49:15.148008 32405 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:49:15.148013 32405 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:49:15.148018 32405 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:49:15.148023 32405 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:49:15.148028 32405 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:49:15.148033 32405 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:49:15.148039 32405 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:49:15.148044 32405 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:49:15.148056 32405 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:49:15.148062 32405 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:49:15.148067 32405 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:49:15.148072 32405 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:49:15.148077 32405 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:49:15.148083 32405 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:49:15.148088 32405 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:49:15.148093 32405 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:49:15.148099 32405 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:49:15.148104 32405 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:49:15.148110 32405 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:49:15.148115 32405 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:49:15.148120 32405 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:49:15.148125 32405 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:49:15.148130 32405 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:49:15.148136 32405 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:49:15.148141 32405 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:49:15.148147 32405 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:49:15.148152 32405 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:49:15.148162 32405 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:49:15.148169 32405 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:49:15.148175 32405 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:49:15.148180 32405 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:49:15.148185 32405 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:49:15.148190 32405 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:49:15.148195 32405 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:49:15.148200 32405 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:49:15.148205 32405 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:49:15.148211 32405 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:49:15.148216 32405 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:49:15.148221 32405 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:49:15.148227 32405 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:49:15.148232 32405 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:49:15.148237 32405 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:49:15.148242 32405 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:49:15.148247 32405 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:49:15.148252 32405 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:49:15.148258 32405 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:49:15.148264 32405 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:49:15.148269 32405 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:49:15.148275 32405 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:49:15.148280 32405 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:49:15.148286 32405 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:49:15.148291 32405 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:49:15.148296 32405 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:49:15.148301 32405 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:49:15.148308 32405 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:49:15.148313 32405 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:49:15.148319 32405 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:49:15.148324 32405 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:49:15.148332 32405 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:49:15.148339 32405 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:49:15.148344 32405 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:49:15.148350 32405 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:49:15.148355 32405 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:49:15.148360 32405 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:49:15.148365 32405 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:49:15.148370 32405 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:49:15.148376 32405 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:49:15.148381 32405 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:49:15.148386 32405 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:49:15.148392 32405 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:49:15.148398 32405 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:49:15.148403 32405 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:49:15.148408 32405 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:49:15.148414 32405 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:49:15.148424 32405 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:49:15.148429 32405 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:49:15.148435 32405 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:49:15.148440 32405 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:49:15.148447 32405 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:49:15.148452 32405 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:49:15.148458 32405 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:49:15.148463 32405 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:49:15.148468 32405 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:49:15.148473 32405 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:49:15.148478 32405 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:49:15.148483 32405 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:49:15.148488 32405 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:49:15.148494 32405 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:49:15.148499 32405 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:49:15.148505 32405 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:49:15.148510 32405 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:49:15.148515 32405 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:49:15.148520 32405 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:49:15.148526 32405 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:49:15.148531 32405 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:49:15.148537 32405 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:49:15.148543 32405 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:49:15.148548 32405 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:49:15.148555 32405 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:49:15.148561 32405 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:49:15.148566 32405 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:49:15.148571 32405 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:49:15.148576 32405 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:49:15.148582 32405 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:49:15.148587 32405 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:49:15.148593 32405 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:49:15.148598 32405 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:49:15.148604 32405 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:49:15.148609 32405 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:49:15.148615 32405 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:49:15.148620 32405 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:49:15.148627 32405 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:49:15.148632 32405 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:49:15.148638 32405 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:49:15.148643 32405 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:49:15.148648 32405 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:49:15.148653 32405 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:49:15.148658 32405 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:49:15.148665 32405 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:49:15.148669 32405 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:49:15.148675 32405 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:49:15.148690 32405 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:49:15.148696 32405 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:49:15.148701 32405 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:49:15.148710 32405 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:49:15.148716 32405 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:49:15.148721 32405 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:49:15.148727 32405 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:49:15.148733 32405 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:49:15.148738 32405 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:49:15.148744 32405 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:49:15.148749 32405 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:49:15.148756 32405 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:49:15.148761 32405 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:49:15.148766 32405 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:49:15.148772 32405 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:49:15.148777 32405 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:49:15.148783 32405 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:49:15.148788 32405 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:49:15.148793 32405 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:49:15.148799 32405 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:49:15.148804 32405 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:49:15.148810 32405 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:49:15.148815 32405 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:49:15.148821 32405 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:49:15.148828 32405 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:49:15.148833 32405 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:49:15.148838 32405 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:49:15.148844 32405 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:49:15.148849 32405 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:49:15.148855 32405 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:49:15.148867 32405 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:49:15.148875 32405 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:49:15.148881 32405 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:49:15.148887 32405 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:49:15.148893 32405 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:49:15.148898 32405 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:49:15.148903 32405 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:49:15.148910 32405 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:49:15.148916 32405 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:49:15.148921 32405 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:49:15.148926 32405 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:49:15.148936 32405 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:49:15.148941 32405 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:49:15.148947 32405 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:49:15.148952 32405 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:49:15.148957 32405 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:49:15.148963 32405 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:49:15.148974 32405 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:49:15.148980 32405 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:49:15.148986 32405 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:49:15.148991 32405 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:49:15.148998 32405 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:49:15.149003 32405 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:49:15.149009 32405 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:49:15.149014 32405 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:49:15.149020 32405 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:49:15.149025 32405 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:49:15.149035 32405 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:49:15.149041 32405 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:49:15.149046 32405 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:49:15.149051 32405 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:49:15.149058 32405 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:49:15.149063 32405 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:49:15.149070 32405 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:49:15.149075 32405 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:49:15.149080 32405 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:49:15.149085 32405 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:49:15.149091 32405 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:49:15.149096 32405 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:49:15.149102 32405 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:49:15.149108 32405 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:49:15.149114 32405 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:49:15.149119 32405 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:49:15.149125 32405 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:49:15.149130 32405 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:49:15.149137 32405 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:49:15.149142 32405 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:49:15.149147 32405 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:49:15.149152 32405 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:49:15.149158 32405 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:49:15.149163 32405 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:49:15.149169 32405 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:49:15.149174 32405 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:49:15.149181 32405 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:49:15.149186 32405 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:49:15.149193 32405 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:49:15.149197 32405 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:49:15.149204 32405 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:49:15.149209 32405 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:49:15.149214 32405 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:49:15.149220 32405 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:49:15.149226 32405 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:49:15.149232 32405 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:49:15.149237 32405 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:49:15.149248 32405 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:49:15.149255 32405 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:49:15.149260 32405 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:49:15.149266 32405 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:49:15.149271 32405 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:49:15.149277 32405 net.cpp:226] pre_relu needs backward computation.\nI0821 06:49:15.149282 32405 net.cpp:226] pre_scale needs backward computation.\nI0821 06:49:15.149287 32405 net.cpp:226] pre_bn needs backward computation.\nI0821 06:49:15.149292 32405 net.cpp:226] pre_conv needs backward computation.\nI0821 06:49:15.149299 32405 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:49:15.149307 32405 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:49:15.149310 32405 net.cpp:270] This network produces output accuracy\nI0821 06:49:15.149317 32405 net.cpp:270] This network produces output loss\nI0821 06:49:15.149693 32405 net.cpp:283] Network initialization done.\nI0821 06:49:15.159309 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:15.159351 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:15.159420 32405 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 06:49:15.159816 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 06:49:15.159832 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 06:49:15.159843 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:49:15.159852 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:49:15.159870 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:49:15.159880 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:49:15.159889 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:49:15.159898 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:49:15.159907 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:49:15.159915 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:49:15.159925 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:49:15.159934 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:49:15.159942 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:49:15.159950 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:49:15.159960 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:49:15.159968 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:49:15.159977 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:49:15.159986 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:49:15.159996 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:49:15.160014 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:49:15.160024 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:49:15.160032 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:49:15.160044 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:49:15.160053 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:49:15.160063 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:49:15.160070 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:49:15.160079 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:49:15.160087 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:49:15.160095 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:49:15.160104 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:49:15.160112 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:49:15.160121 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:49:15.160130 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:49:15.160138 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:49:15.160146 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:49:15.160156 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:49:15.160163 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:49:15.160172 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:49:15.160181 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:49:15.160189 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:49:15.160200 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:49:15.160209 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:49:15.160218 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:49:15.160225 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:49:15.160234 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:49:15.160243 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:49:15.160251 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:49:15.160259 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:49:15.160269 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:49:15.160275 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:49:15.160292 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:49:15.160302 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:49:15.160310 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:49:15.160320 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:49:15.160327 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:49:15.160336 32405 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:49:15.161984 32405 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0821 06:49:15.163581 32405 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:49:15.163820 32405 net.cpp:100] Creating Layer dataLayer\nI0821 06:49:15.163844 32405 net.cpp:408] dataLayer -> data_top\nI0821 06:49:15.163866 32405 net.cpp:408] dataLayer -> label\nI0821 06:49:15.163880 32405 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:49:15.217573 32412 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 06:49:15.217890 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:15.226166 32405 net.cpp:150] Setting up dataLayer\nI0821 06:49:15.226189 32405 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:49:15.226197 32405 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:15.226202 32405 net.cpp:165] Memory required for data: 1536500\nI0821 06:49:15.226209 32405 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:49:15.226219 32405 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:49:15.226225 32405 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:49:15.226236 32405 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:49:15.226249 32405 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:49:15.226354 32405 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:49:15.226368 32405 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:15.226374 32405 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:15.226379 32405 net.cpp:165] Memory required for data: 1537500\nI0821 06:49:15.226385 32405 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:49:15.226400 32405 net.cpp:100] Creating Layer pre_conv\nI0821 06:49:15.226407 32405 net.cpp:434] pre_conv <- data_top\nI0821 06:49:15.226420 32405 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:49:15.226915 32405 net.cpp:150] Setting up pre_conv\nI0821 06:49:15.226940 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.226946 32405 net.cpp:165] Memory required for data: 9729500\nI0821 06:49:15.226969 32405 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:49:15.226986 32405 net.cpp:100] Creating Layer pre_bn\nI0821 06:49:15.226995 32405 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:49:15.227005 32405 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:49:15.227330 32405 net.cpp:150] Setting up pre_bn\nI0821 06:49:15.227346 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.227352 32405 net.cpp:165] Memory required for data: 17921500\nI0821 06:49:15.227368 32405 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:49:15.227378 32405 net.cpp:100] Creating Layer pre_scale\nI0821 06:49:15.227385 32405 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:49:15.227396 32405 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:49:15.227460 32405 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:49:15.227635 32405 net.cpp:150] Setting up pre_scale\nI0821 06:49:15.227649 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.227654 32405 net.cpp:165] Memory required for data: 26113500\nI0821 06:49:15.227663 32405 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:49:15.227671 32405 net.cpp:100] Creating Layer pre_relu\nI0821 06:49:15.227677 32405 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:49:15.227687 32405 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:49:15.227746 32405 net.cpp:150] Setting up pre_relu\nI0821 06:49:15.227756 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.227761 32405 net.cpp:165] Memory required for data: 34305500\nI0821 06:49:15.227766 32405 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:49:15.227773 32405 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:49:15.227782 32405 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:49:15.227807 32405 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:49:15.227819 32405 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:49:15.227885 32405 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:49:15.227897 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.227905 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.227908 32405 net.cpp:165] Memory required for data: 50689500\nI0821 06:49:15.227913 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:49:15.227924 32405 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:49:15.227933 32405 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:49:15.227946 32405 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:49:15.228351 32405 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:49:15.228368 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.228374 32405 net.cpp:165] Memory required for data: 58881500\nI0821 06:49:15.228385 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:49:15.228402 32405 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:49:15.228408 32405 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:49:15.228417 32405 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:49:15.228721 32405 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:49:15.228737 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.228742 32405 net.cpp:165] Memory required for data: 67073500\nI0821 06:49:15.228754 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:49:15.228765 32405 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:49:15.228770 32405 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:49:15.228778 32405 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.228845 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:49:15.230015 32405 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:49:15.230031 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.230036 32405 net.cpp:165] Memory required for data: 75265500\nI0821 06:49:15.230053 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:49:15.230064 32405 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:49:15.230072 32405 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:49:15.230080 32405 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.230089 32405 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:49:15.230098 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.230101 32405 net.cpp:165] Memory required for data: 83457500\nI0821 06:49:15.230106 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:49:15.230120 32405 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:49:15.230126 32405 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:49:15.230136 32405 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:49:15.230487 32405 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:49:15.230501 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.230506 32405 net.cpp:165] Memory required for data: 91649500\nI0821 06:49:15.230515 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:49:15.230525 32405 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:49:15.230531 32405 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:49:15.230542 32405 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:49:15.230808 32405 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:49:15.230821 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.230826 32405 net.cpp:165] Memory required for data: 99841500\nI0821 06:49:15.230844 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:49:15.230854 32405 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:49:15.230866 32405 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:49:15.230875 32405 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:49:15.230934 32405 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:49:15.231092 32405 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:49:15.231106 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.231111 32405 net.cpp:165] Memory required for data: 108033500\nI0821 06:49:15.231119 32405 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:49:15.231132 32405 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:49:15.231137 32405 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:49:15.231144 32405 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:49:15.231151 32405 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:49:15.231187 32405 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:49:15.231196 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.231201 32405 net.cpp:165] Memory required for data: 116225500\nI0821 06:49:15.231206 32405 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:49:15.231214 32405 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:49:15.231220 32405 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:49:15.231230 32405 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:49:15.231238 32405 net.cpp:150] Setting up L1_b1_relu\nI0821 06:49:15.231246 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.231251 32405 net.cpp:165] Memory required for data: 124417500\nI0821 06:49:15.231256 32405 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:15.231263 32405 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:15.231268 32405 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:49:15.231276 32405 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:49:15.231284 32405 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:49:15.231336 32405 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:15.231348 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.231364 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.231369 32405 net.cpp:165] Memory required for data: 140801500\nI0821 06:49:15.231374 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:49:15.231389 32405 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:49:15.231395 32405 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:49:15.231403 32405 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:49:15.231752 32405 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:49:15.231767 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.231772 32405 net.cpp:165] Memory required for data: 148993500\nI0821 06:49:15.231781 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:49:15.231794 32405 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:49:15.231801 32405 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:49:15.231809 32405 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:49:15.232094 32405 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:49:15.232107 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.232112 32405 net.cpp:165] Memory required for data: 157185500\nI0821 06:49:15.232123 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:49:15.232131 32405 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:49:15.232137 32405 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:49:15.232144 32405 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.232203 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:49:15.232368 32405 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:49:15.232380 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.232385 32405 net.cpp:165] Memory required for data: 165377500\nI0821 06:49:15.232394 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:49:15.232403 32405 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:49:15.232408 32405 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:49:15.232417 32405 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.232427 32405 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:49:15.232434 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.232439 32405 net.cpp:165] Memory required for data: 173569500\nI0821 06:49:15.232444 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:49:15.232456 32405 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:49:15.232462 32405 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:49:15.232472 32405 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:49:15.233023 32405 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:49:15.233038 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.233043 32405 net.cpp:165] Memory required for data: 181761500\nI0821 06:49:15.233052 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:49:15.233064 32405 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:49:15.233070 32405 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:49:15.233078 32405 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:49:15.233350 32405 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:49:15.233366 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.233371 32405 net.cpp:165] Memory required for data: 189953500\nI0821 06:49:15.233386 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:49:15.233395 32405 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:49:15.233402 32405 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:49:15.233412 32405 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:49:15.233471 32405 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:49:15.233625 32405 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:49:15.233641 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.233646 32405 net.cpp:165] Memory required for data: 198145500\nI0821 06:49:15.233664 32405 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:49:15.233672 32405 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:49:15.233678 32405 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:49:15.233685 32405 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:49:15.233696 32405 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:49:15.233731 32405 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:49:15.233741 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.233744 32405 net.cpp:165] Memory required for data: 206337500\nI0821 06:49:15.233750 32405 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:49:15.233757 32405 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:49:15.233762 32405 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:49:15.233769 32405 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:49:15.233778 32405 net.cpp:150] Setting up L1_b2_relu\nI0821 06:49:15.233785 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.233789 32405 net.cpp:165] Memory required for data: 214529500\nI0821 06:49:15.233794 32405 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:15.233804 32405 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:15.233809 32405 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:49:15.233817 32405 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:49:15.233827 32405 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:49:15.233880 32405 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:15.233893 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.233901 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.233906 32405 net.cpp:165] Memory required for data: 230913500\nI0821 06:49:15.233909 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:49:15.233922 32405 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:49:15.233927 32405 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:49:15.233935 32405 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:49:15.234280 32405 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:49:15.234295 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.234300 32405 net.cpp:165] Memory required for data: 239105500\nI0821 06:49:15.234308 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:49:15.234320 32405 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:49:15.234326 32405 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:49:15.234334 32405 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:49:15.234602 32405 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:49:15.234616 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.234621 32405 net.cpp:165] Memory required for data: 247297500\nI0821 06:49:15.234630 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:49:15.234639 32405 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:49:15.234644 32405 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:49:15.234655 32405 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.234714 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:49:15.234881 32405 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:49:15.234895 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.234900 32405 net.cpp:165] Memory required for data: 255489500\nI0821 06:49:15.234910 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:49:15.234917 32405 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:49:15.234923 32405 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:49:15.234935 32405 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.234951 32405 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:49:15.234961 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.234964 32405 net.cpp:165] Memory required for data: 263681500\nI0821 06:49:15.234969 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:49:15.234982 32405 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:49:15.234988 32405 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:49:15.234997 32405 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:49:15.235380 32405 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:49:15.235394 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.235399 32405 net.cpp:165] Memory required for data: 271873500\nI0821 06:49:15.235409 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:49:15.235424 32405 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:49:15.235430 32405 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:49:15.235437 32405 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:49:15.235924 32405 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:49:15.235939 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.235944 32405 net.cpp:165] Memory required for data: 280065500\nI0821 06:49:15.235954 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:49:15.235967 32405 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:49:15.235973 32405 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:49:15.235981 32405 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:49:15.236042 32405 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:49:15.236202 32405 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:49:15.236217 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.236222 32405 net.cpp:165] Memory required for data: 288257500\nI0821 06:49:15.236229 32405 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:49:15.236238 32405 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:49:15.236244 32405 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:49:15.236251 32405 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:49:15.236261 32405 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:49:15.236295 32405 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:49:15.236308 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.236313 32405 net.cpp:165] Memory required for data: 296449500\nI0821 06:49:15.236318 32405 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:49:15.236325 32405 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:49:15.236331 32405 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:49:15.236338 32405 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:49:15.236346 32405 net.cpp:150] Setting up L1_b3_relu\nI0821 06:49:15.236353 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.236357 32405 net.cpp:165] Memory required for data: 304641500\nI0821 06:49:15.236362 32405 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:15.236372 32405 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:15.236377 32405 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:49:15.236384 32405 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:49:15.236394 32405 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:49:15.236443 32405 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:15.236455 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.236461 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.236466 32405 net.cpp:165] Memory required for data: 321025500\nI0821 06:49:15.236471 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:49:15.236481 32405 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:49:15.236496 32405 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:49:15.236508 32405 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:49:15.236876 32405 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:49:15.236891 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.236896 32405 net.cpp:165] Memory required for data: 329217500\nI0821 06:49:15.236904 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:49:15.236913 32405 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:49:15.236919 32405 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:49:15.236927 32405 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:49:15.237251 32405 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:49:15.237265 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.237270 32405 net.cpp:165] Memory required for data: 337409500\nI0821 06:49:15.237282 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:49:15.237290 32405 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:49:15.237295 32405 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:49:15.237306 32405 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.237372 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:49:15.237551 32405 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:49:15.237565 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.237571 32405 net.cpp:165] Memory required for data: 345601500\nI0821 06:49:15.237581 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:49:15.237589 32405 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:49:15.237596 32405 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:49:15.237602 32405 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.237612 32405 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:49:15.237619 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.237624 32405 net.cpp:165] Memory required for data: 353793500\nI0821 06:49:15.237628 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:49:15.237643 32405 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:49:15.237649 32405 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:49:15.237665 32405 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:49:15.238334 32405 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:49:15.238350 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.238356 32405 net.cpp:165] Memory required for data: 361985500\nI0821 06:49:15.238365 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:49:15.238380 32405 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:49:15.238387 32405 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:49:15.238399 32405 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:49:15.238709 32405 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:49:15.238723 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.238729 32405 net.cpp:165] Memory required for data: 370177500\nI0821 06:49:15.238741 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:49:15.238754 32405 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:49:15.238760 32405 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:49:15.238768 32405 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:49:15.238833 32405 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:49:15.239022 32405 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:49:15.239038 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.239042 32405 net.cpp:165] Memory required for data: 378369500\nI0821 06:49:15.239053 32405 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:49:15.239060 32405 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:49:15.239069 32405 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:49:15.239076 32405 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:49:15.239095 32405 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:49:15.239135 32405 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:49:15.239150 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.239157 32405 net.cpp:165] Memory required for data: 386561500\nI0821 06:49:15.239162 32405 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:49:15.239171 32405 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:49:15.239176 32405 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:49:15.239183 32405 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:49:15.239192 32405 net.cpp:150] Setting up L1_b4_relu\nI0821 06:49:15.239199 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.239203 32405 net.cpp:165] Memory required for data: 394753500\nI0821 06:49:15.239208 32405 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:15.239218 32405 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:15.239223 32405 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:49:15.239233 32405 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:49:15.239244 32405 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:49:15.239300 32405 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:15.239311 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.239317 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.239322 32405 net.cpp:165] Memory required for data: 411137500\nI0821 06:49:15.239329 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:49:15.239341 32405 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:49:15.239347 32405 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:49:15.239362 32405 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:49:15.239751 32405 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:49:15.239768 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.239773 32405 net.cpp:165] Memory required for data: 419329500\nI0821 06:49:15.239794 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:49:15.239809 32405 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:49:15.239816 32405 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:49:15.239825 32405 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:49:15.240145 32405 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:49:15.240159 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.240164 32405 net.cpp:165] Memory required for data: 427521500\nI0821 06:49:15.240175 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:49:15.240185 32405 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:49:15.240190 32405 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:49:15.240214 32405 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.240291 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:49:15.240468 32405 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:49:15.240487 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.240495 32405 net.cpp:165] Memory required for data: 435713500\nI0821 06:49:15.240505 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:49:15.240512 32405 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:49:15.240519 32405 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:49:15.240526 32405 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.240538 32405 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:49:15.240545 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.240550 32405 net.cpp:165] Memory required for data: 443905500\nI0821 06:49:15.240556 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:49:15.240577 32405 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:49:15.240584 32405 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:49:15.240595 32405 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:49:15.240996 32405 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:49:15.241014 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.241019 32405 net.cpp:165] Memory required for data: 452097500\nI0821 06:49:15.241036 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:49:15.241050 32405 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:49:15.241056 32405 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:49:15.241067 32405 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:49:15.241380 32405 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:49:15.241394 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.241400 32405 net.cpp:165] Memory required for data: 460289500\nI0821 06:49:15.241410 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:49:15.241421 32405 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:49:15.241427 32405 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:49:15.241436 32405 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:49:15.241504 32405 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:49:15.241683 32405 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:49:15.241696 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.241703 32405 net.cpp:165] Memory required for data: 468481500\nI0821 06:49:15.241714 32405 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:49:15.241725 32405 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:49:15.241731 32405 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:49:15.241739 32405 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:49:15.241745 32405 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:49:15.241787 32405 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:49:15.241798 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.241802 32405 net.cpp:165] Memory required for data: 476673500\nI0821 06:49:15.241807 32405 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:49:15.241816 32405 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:49:15.241823 32405 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:49:15.241837 32405 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:49:15.241847 32405 net.cpp:150] Setting up L1_b5_relu\nI0821 06:49:15.241853 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.241858 32405 net.cpp:165] Memory required for data: 484865500\nI0821 06:49:15.241873 32405 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:15.241881 32405 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:15.241886 32405 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:49:15.241900 32405 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:49:15.241910 32405 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:49:15.241966 32405 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:15.241977 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.241984 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.241988 32405 net.cpp:165] Memory required for data: 501249500\nI0821 06:49:15.241996 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:49:15.242012 32405 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:49:15.242017 32405 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:49:15.242027 32405 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:49:15.242466 32405 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:49:15.242480 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.242492 32405 net.cpp:165] Memory required for data: 509441500\nI0821 06:49:15.242504 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:49:15.242518 32405 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:49:15.242524 32405 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:49:15.242537 32405 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:49:15.242851 32405 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:49:15.242872 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.242878 32405 net.cpp:165] Memory required for data: 517633500\nI0821 06:49:15.242888 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:49:15.242897 32405 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:49:15.242905 32405 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:49:15.242914 32405 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.242985 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:49:15.243190 32405 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:49:15.243204 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.243209 32405 net.cpp:165] Memory required for data: 525825500\nI0821 06:49:15.243221 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:49:15.243233 32405 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:49:15.243239 32405 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:49:15.243247 32405 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.243257 32405 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:49:15.243263 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.243268 32405 net.cpp:165] Memory required for data: 534017500\nI0821 06:49:15.243273 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:49:15.243286 32405 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:49:15.243293 32405 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:49:15.243307 32405 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:49:15.243705 32405 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:49:15.243722 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.243727 32405 net.cpp:165] Memory required for data: 542209500\nI0821 06:49:15.243736 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:49:15.243748 32405 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:49:15.243757 32405 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:49:15.243767 32405 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:49:15.244105 32405 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:49:15.244120 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.244125 32405 net.cpp:165] Memory required for data: 550401500\nI0821 06:49:15.244137 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:49:15.244145 32405 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:49:15.244151 32405 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:49:15.244158 32405 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:49:15.244230 32405 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:49:15.244436 32405 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:49:15.244451 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.244455 32405 net.cpp:165] Memory required for data: 558593500\nI0821 06:49:15.244465 32405 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:49:15.244487 32405 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:49:15.244493 32405 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:49:15.244500 32405 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:49:15.244515 32405 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:49:15.244559 32405 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:49:15.244570 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.244575 32405 net.cpp:165] Memory required for data: 566785500\nI0821 06:49:15.244588 32405 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:49:15.244596 32405 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:49:15.244602 32405 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:49:15.244609 32405 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:49:15.244622 32405 net.cpp:150] Setting up L1_b6_relu\nI0821 06:49:15.244629 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.244633 32405 net.cpp:165] Memory required for data: 574977500\nI0821 06:49:15.244638 32405 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:15.244645 32405 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:15.244650 32405 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:49:15.244664 32405 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:49:15.244675 32405 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:49:15.244730 32405 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:15.244742 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.244748 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.244753 32405 net.cpp:165] Memory required for data: 591361500\nI0821 06:49:15.244758 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:49:15.244776 32405 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:49:15.244781 32405 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:49:15.244791 32405 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:49:15.245203 32405 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:49:15.245219 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.245224 32405 net.cpp:165] Memory required for data: 599553500\nI0821 06:49:15.245234 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:49:15.245246 32405 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:49:15.245252 32405 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:49:15.245265 32405 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:49:15.245574 32405 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:49:15.245587 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.245592 32405 net.cpp:165] Memory required for data: 607745500\nI0821 06:49:15.245604 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:49:15.245611 32405 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:49:15.245620 32405 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:49:15.245632 32405 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.245700 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:49:15.245895 32405 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:49:15.245916 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.245921 32405 net.cpp:165] Memory required for data: 615937500\nI0821 06:49:15.245934 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:49:15.245942 32405 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:49:15.245949 32405 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:49:15.245955 32405 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.245967 32405 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:49:15.245975 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.245980 32405 net.cpp:165] Memory required for data: 624129500\nI0821 06:49:15.245985 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:49:15.245997 32405 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:49:15.246004 32405 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:49:15.246016 32405 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:49:15.246404 32405 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:49:15.246418 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.246430 32405 net.cpp:165] Memory required for data: 632321500\nI0821 06:49:15.246439 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:49:15.246453 32405 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:49:15.246459 32405 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:49:15.246469 32405 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:49:15.247123 32405 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:49:15.247136 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.247143 32405 net.cpp:165] Memory required for data: 640513500\nI0821 06:49:15.247153 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:49:15.247161 32405 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:49:15.247167 32405 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:49:15.247174 32405 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:49:15.247237 32405 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:49:15.247398 32405 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:49:15.247412 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.247417 32405 net.cpp:165] Memory required for data: 648705500\nI0821 06:49:15.247426 32405 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:49:15.247437 32405 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:49:15.247444 32405 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:49:15.247452 32405 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:49:15.247459 32405 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:49:15.247496 32405 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:49:15.247508 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.247512 32405 net.cpp:165] Memory required for data: 656897500\nI0821 06:49:15.247517 32405 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:49:15.247525 32405 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:49:15.247530 32405 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:49:15.247540 32405 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:49:15.247550 32405 net.cpp:150] Setting up L1_b7_relu\nI0821 06:49:15.247556 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.247561 32405 net.cpp:165] Memory required for data: 665089500\nI0821 06:49:15.247566 32405 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:15.247573 32405 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:15.247578 32405 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:49:15.247588 32405 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:49:15.247597 32405 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:49:15.247645 32405 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:15.247656 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.247663 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.247668 32405 net.cpp:165] Memory required for data: 681473500\nI0821 06:49:15.247673 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:49:15.247685 32405 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:49:15.247692 32405 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:49:15.247701 32405 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:49:15.248064 32405 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:49:15.248080 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.248085 32405 net.cpp:165] Memory required for data: 689665500\nI0821 06:49:15.248092 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:49:15.248107 32405 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:49:15.248114 32405 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:49:15.248131 32405 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:49:15.248406 32405 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:49:15.248420 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.248425 32405 net.cpp:165] Memory required for data: 697857500\nI0821 06:49:15.248435 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:49:15.248445 32405 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:49:15.248450 32405 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:49:15.248461 32405 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.248518 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:49:15.248675 32405 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:49:15.248692 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.248697 32405 net.cpp:165] Memory required for data: 706049500\nI0821 06:49:15.248705 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:49:15.248713 32405 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:49:15.248719 32405 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:49:15.248726 32405 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.248736 32405 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:49:15.248742 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.248747 32405 net.cpp:165] Memory required for data: 714241500\nI0821 06:49:15.248751 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:49:15.248765 32405 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:49:15.248771 32405 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:49:15.248782 32405 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:49:15.249150 32405 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:49:15.249166 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.249171 32405 net.cpp:165] Memory required for data: 722433500\nI0821 06:49:15.249178 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:49:15.249191 32405 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:49:15.249197 32405 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:49:15.249207 32405 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:49:15.249480 32405 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:49:15.249493 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.249498 32405 net.cpp:165] Memory required for data: 730625500\nI0821 06:49:15.249508 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:49:15.249516 32405 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:49:15.249522 32405 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:49:15.249529 32405 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:49:15.249593 32405 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:49:15.249752 32405 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:49:15.249765 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.249770 32405 net.cpp:165] Memory required for data: 738817500\nI0821 06:49:15.249778 32405 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:49:15.249790 32405 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:49:15.249796 32405 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:49:15.249804 32405 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:49:15.249810 32405 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:49:15.249848 32405 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:49:15.249864 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.249871 32405 net.cpp:165] Memory required for data: 747009500\nI0821 06:49:15.249876 32405 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:49:15.249883 32405 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:49:15.249889 32405 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:49:15.249899 32405 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:49:15.249917 32405 net.cpp:150] Setting up L1_b8_relu\nI0821 06:49:15.249923 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.249927 32405 net.cpp:165] Memory required for data: 755201500\nI0821 06:49:15.249933 32405 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:15.249939 32405 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:15.249944 32405 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:49:15.249954 32405 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:49:15.249965 32405 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:49:15.250013 32405 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:15.250025 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.250031 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.250036 32405 net.cpp:165] Memory required for data: 771585500\nI0821 06:49:15.250041 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:49:15.250056 32405 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:49:15.250061 32405 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:49:15.250071 32405 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:49:15.250433 32405 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:49:15.250450 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.250455 32405 net.cpp:165] Memory required for data: 779777500\nI0821 06:49:15.250464 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:49:15.250473 32405 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:49:15.250479 32405 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:49:15.250489 32405 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:49:15.250766 32405 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:49:15.250778 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.250783 32405 net.cpp:165] Memory required for data: 787969500\nI0821 06:49:15.250794 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:49:15.250805 32405 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:49:15.250811 32405 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:49:15.250819 32405 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.250885 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:49:15.251050 32405 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:49:15.251063 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.251068 32405 net.cpp:165] Memory required for data: 796161500\nI0821 06:49:15.251077 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:49:15.251085 32405 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:49:15.251091 32405 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:49:15.251106 32405 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.251117 32405 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:49:15.251124 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.251128 32405 net.cpp:165] Memory required for data: 804353500\nI0821 06:49:15.251133 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:49:15.251147 32405 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:49:15.251152 32405 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:49:15.251160 32405 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:49:15.251513 32405 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:49:15.251525 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.251530 32405 net.cpp:165] Memory required for data: 812545500\nI0821 06:49:15.251539 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:49:15.251551 32405 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:49:15.251557 32405 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:49:15.251572 32405 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:49:15.251847 32405 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:49:15.251865 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.251871 32405 net.cpp:165] Memory required for data: 820737500\nI0821 06:49:15.251904 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:49:15.251916 32405 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:49:15.251924 32405 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:49:15.251933 32405 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:49:15.251991 32405 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:49:15.252151 32405 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:49:15.252163 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.252168 32405 net.cpp:165] Memory required for data: 828929500\nI0821 06:49:15.252177 32405 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:49:15.252187 32405 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:49:15.252192 32405 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:49:15.252199 32405 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:49:15.252207 32405 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:49:15.252244 32405 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:49:15.252255 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.252260 32405 net.cpp:165] Memory required for data: 837121500\nI0821 06:49:15.252265 32405 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:49:15.252272 32405 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:49:15.252281 32405 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:49:15.252288 32405 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:49:15.252297 32405 net.cpp:150] Setting up L1_b9_relu\nI0821 06:49:15.252305 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.252310 32405 net.cpp:165] Memory required for data: 845313500\nI0821 06:49:15.252315 32405 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:15.252321 32405 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:15.252326 32405 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:49:15.252336 32405 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:49:15.252346 32405 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:49:15.252394 32405 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:15.252405 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.252413 32405 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:15.252416 32405 net.cpp:165] Memory required for data: 861697500\nI0821 06:49:15.252421 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:49:15.252435 32405 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:49:15.252442 32405 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:49:15.252450 32405 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:49:15.252815 32405 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:49:15.252827 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.252832 32405 net.cpp:165] Memory required for data: 863745500\nI0821 06:49:15.252841 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:49:15.252856 32405 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:49:15.252868 32405 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:49:15.252877 32405 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:49:15.253149 32405 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:49:15.253161 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.253166 32405 net.cpp:165] Memory required for data: 865793500\nI0821 06:49:15.253177 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:49:15.253192 32405 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:49:15.253198 32405 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:49:15.253209 32405 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.253268 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:49:15.253432 32405 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:49:15.253444 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.253449 32405 net.cpp:165] Memory required for data: 867841500\nI0821 06:49:15.253458 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:49:15.253465 32405 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:49:15.253471 32405 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:49:15.253481 32405 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.253491 32405 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:49:15.253499 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.253502 32405 net.cpp:165] Memory required for data: 869889500\nI0821 06:49:15.253506 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:49:15.253520 32405 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:49:15.253525 32405 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:49:15.253535 32405 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:49:15.253900 32405 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:49:15.253913 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.253917 32405 net.cpp:165] Memory required for data: 871937500\nI0821 06:49:15.253926 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:49:15.253937 32405 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:49:15.253944 32405 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:49:15.253952 32405 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:49:15.254218 32405 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:49:15.254231 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.254236 32405 net.cpp:165] Memory required for data: 873985500\nI0821 06:49:15.254246 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:49:15.254254 32405 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:49:15.254261 32405 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:49:15.254268 32405 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:49:15.254333 32405 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:49:15.254493 32405 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:49:15.254509 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.254514 32405 net.cpp:165] Memory required for data: 876033500\nI0821 06:49:15.254523 32405 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:49:15.254532 32405 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:49:15.254539 32405 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:49:15.254546 32405 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:49:15.254580 32405 net.cpp:150] Setting up L2_b1_pool\nI0821 06:49:15.254590 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.254595 32405 net.cpp:165] Memory required for data: 878081500\nI0821 06:49:15.254600 32405 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:49:15.254607 32405 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:49:15.254616 32405 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:49:15.254623 32405 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:49:15.254631 32405 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:49:15.254848 32405 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:49:15.254868 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.254874 32405 net.cpp:165] Memory required for data: 880129500\nI0821 06:49:15.254880 32405 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:49:15.254889 32405 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:49:15.254901 32405 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:49:15.254909 32405 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:49:15.254920 32405 net.cpp:150] Setting up L2_b1_relu\nI0821 06:49:15.254926 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.254931 32405 net.cpp:165] Memory required for data: 882177500\nI0821 06:49:15.254935 32405 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:49:15.254945 32405 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:49:15.254956 32405 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:49:15.257246 32405 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:49:15.257264 32405 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:15.257269 32405 net.cpp:165] Memory required for data: 884225500\nI0821 06:49:15.257275 32405 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:49:15.257288 32405 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:49:15.257294 32405 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:49:15.257302 32405 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:49:15.257310 32405 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:49:15.257352 32405 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:49:15.257367 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.257374 32405 net.cpp:165] Memory required for data: 888321500\nI0821 06:49:15.257378 32405 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:15.257386 32405 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:15.257392 32405 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:49:15.257402 32405 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:49:15.257412 32405 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:49:15.257462 32405 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:15.257477 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.257483 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.257488 32405 net.cpp:165] Memory required for data: 896513500\nI0821 06:49:15.257493 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:49:15.257503 32405 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:49:15.257510 32405 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:49:15.257522 32405 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:49:15.258035 32405 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:49:15.258050 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.258055 32405 net.cpp:165] Memory required for data: 900609500\nI0821 06:49:15.258064 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:49:15.258076 32405 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:49:15.258082 32405 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:49:15.258090 32405 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:49:15.258361 32405 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:49:15.258381 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.258386 32405 net.cpp:165] Memory required for data: 904705500\nI0821 06:49:15.258397 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:49:15.258406 32405 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:49:15.258412 32405 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:49:15.258419 32405 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.258479 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:49:15.258638 32405 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:49:15.258651 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.258656 32405 net.cpp:165] Memory required for data: 908801500\nI0821 06:49:15.258666 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:49:15.258673 32405 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:49:15.258687 32405 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:49:15.258698 32405 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.258708 32405 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:49:15.258715 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.258719 32405 net.cpp:165] Memory required for data: 912897500\nI0821 06:49:15.258724 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:49:15.258734 32405 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:49:15.258740 32405 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:49:15.258751 32405 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:49:15.259254 32405 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:49:15.259269 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.259274 32405 net.cpp:165] Memory required for data: 916993500\nI0821 06:49:15.259284 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:49:15.259291 32405 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:49:15.259299 32405 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:49:15.259310 32405 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:49:15.259573 32405 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:49:15.259587 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.259590 32405 net.cpp:165] Memory required for data: 921089500\nI0821 06:49:15.259601 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:49:15.259613 32405 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:49:15.259618 32405 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:49:15.259626 32405 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:49:15.259685 32405 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:49:15.259842 32405 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:49:15.259855 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.259866 32405 net.cpp:165] Memory required for data: 925185500\nI0821 06:49:15.259876 32405 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:49:15.259888 32405 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:49:15.259896 32405 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:49:15.259902 32405 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:49:15.259912 32405 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:49:15.259941 32405 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:49:15.259953 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.259958 32405 net.cpp:165] Memory required for data: 929281500\nI0821 06:49:15.259963 32405 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:49:15.259971 32405 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:49:15.259977 32405 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:49:15.259990 32405 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:49:15.260000 32405 net.cpp:150] Setting up L2_b2_relu\nI0821 06:49:15.260007 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.260011 32405 net.cpp:165] Memory required for data: 933377500\nI0821 06:49:15.260017 32405 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:15.260025 32405 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:15.260030 32405 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:49:15.260037 32405 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:49:15.260046 32405 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:49:15.260097 32405 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:15.260109 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.260115 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.260119 32405 net.cpp:165] Memory required for data: 941569500\nI0821 06:49:15.260133 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:49:15.260143 32405 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:49:15.260149 32405 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:49:15.260161 32405 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:49:15.260659 32405 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:49:15.260673 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.260679 32405 net.cpp:165] Memory required for data: 945665500\nI0821 06:49:15.260686 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:49:15.260699 32405 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:49:15.260705 32405 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:49:15.260713 32405 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:49:15.260987 32405 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:49:15.261000 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.261005 32405 net.cpp:165] Memory required for data: 949761500\nI0821 06:49:15.261016 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:49:15.261027 32405 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:49:15.261034 32405 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:49:15.261041 32405 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.261099 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:49:15.261260 32405 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:49:15.261273 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.261277 32405 net.cpp:165] Memory required for data: 953857500\nI0821 06:49:15.261286 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:49:15.261297 32405 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:49:15.261303 32405 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:49:15.261313 32405 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.261322 32405 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:49:15.261329 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.261334 32405 net.cpp:165] Memory required for data: 957953500\nI0821 06:49:15.261339 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:49:15.261349 32405 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:49:15.261355 32405 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:49:15.261368 32405 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:49:15.261858 32405 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:49:15.261878 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.261883 32405 net.cpp:165] Memory required for data: 962049500\nI0821 06:49:15.261893 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:49:15.261901 32405 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:49:15.261909 32405 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:49:15.261919 32405 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:49:15.262194 32405 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:49:15.262207 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.262212 32405 net.cpp:165] Memory required for data: 966145500\nI0821 06:49:15.262223 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:49:15.262234 32405 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:49:15.262240 32405 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:49:15.262248 32405 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:49:15.262306 32405 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:49:15.262465 32405 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:49:15.262477 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.262481 32405 net.cpp:165] Memory required for data: 970241500\nI0821 06:49:15.262490 32405 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:49:15.262502 32405 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:49:15.262516 32405 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:49:15.262522 32405 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:49:15.262531 32405 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:49:15.262562 32405 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:49:15.262574 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.262578 32405 net.cpp:165] Memory required for data: 974337500\nI0821 06:49:15.262584 32405 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:49:15.262604 32405 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:49:15.262610 32405 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:49:15.262617 32405 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:49:15.262627 32405 net.cpp:150] Setting up L2_b3_relu\nI0821 06:49:15.262634 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.262639 32405 net.cpp:165] Memory required for data: 978433500\nI0821 06:49:15.262643 32405 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:15.262653 32405 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:15.262660 32405 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:49:15.262667 32405 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:49:15.262676 32405 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:49:15.262727 32405 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:15.262742 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.262748 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.262753 32405 net.cpp:165] Memory required for data: 986625500\nI0821 06:49:15.262758 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:49:15.262768 32405 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:49:15.262775 32405 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:49:15.262784 32405 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:49:15.263288 32405 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:49:15.263301 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.263306 32405 net.cpp:165] Memory required for data: 990721500\nI0821 06:49:15.263315 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:49:15.263327 32405 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:49:15.263334 32405 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:49:15.263342 32405 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:49:15.263617 32405 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:49:15.263629 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.263634 32405 net.cpp:165] Memory required for data: 994817500\nI0821 06:49:15.263644 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:49:15.263653 32405 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:49:15.263659 32405 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:49:15.263669 32405 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.263730 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:49:15.263898 32405 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:49:15.263912 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.263916 32405 net.cpp:165] Memory required for data: 998913500\nI0821 06:49:15.263926 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:49:15.263933 32405 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:49:15.263939 32405 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:49:15.263952 32405 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.263962 32405 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:49:15.263968 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.263973 32405 net.cpp:165] Memory required for data: 1003009500\nI0821 06:49:15.263985 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:49:15.263999 32405 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:49:15.264005 32405 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:49:15.264014 32405 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:49:15.264509 32405 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:49:15.264523 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.264528 32405 net.cpp:165] Memory required for data: 1007105500\nI0821 06:49:15.264538 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:49:15.264549 32405 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:49:15.264555 32405 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:49:15.264564 32405 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:49:15.264832 32405 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:49:15.264845 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.264850 32405 net.cpp:165] Memory required for data: 1011201500\nI0821 06:49:15.264866 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:49:15.264876 32405 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:49:15.264883 32405 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:49:15.264894 32405 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:49:15.264955 32405 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:49:15.265125 32405 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:49:15.265138 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.265143 32405 net.cpp:165] Memory required for data: 1015297500\nI0821 06:49:15.265152 32405 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:49:15.265161 32405 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:49:15.265167 32405 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:49:15.265174 32405 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:49:15.265184 32405 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:49:15.265213 32405 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:49:15.265223 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.265228 32405 net.cpp:165] Memory required for data: 1019393500\nI0821 06:49:15.265233 32405 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:49:15.265244 32405 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:49:15.265250 32405 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:49:15.265256 32405 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:49:15.265266 32405 net.cpp:150] Setting up L2_b4_relu\nI0821 06:49:15.265274 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.265277 32405 net.cpp:165] Memory required for data: 1023489500\nI0821 06:49:15.265282 32405 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:15.265291 32405 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:15.265297 32405 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:49:15.265305 32405 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:49:15.265314 32405 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:49:15.265362 32405 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:15.265377 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.265383 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.265388 32405 net.cpp:165] Memory required for data: 1031681500\nI0821 06:49:15.265393 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:49:15.265404 32405 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:49:15.265410 32405 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:49:15.265419 32405 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:49:15.265936 32405 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:49:15.265950 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.265955 32405 net.cpp:165] Memory required for data: 1035777500\nI0821 06:49:15.265964 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:49:15.265976 32405 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:49:15.265983 32405 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:49:15.265991 32405 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:49:15.266268 32405 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:49:15.266280 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.266285 32405 net.cpp:165] Memory required for data: 1039873500\nI0821 06:49:15.266296 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:49:15.266304 32405 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:49:15.266310 32405 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:49:15.266321 32405 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.266381 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:49:15.266544 32405 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:49:15.266557 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.266562 32405 net.cpp:165] Memory required for data: 1043969500\nI0821 06:49:15.266571 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:49:15.266578 32405 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:49:15.266584 32405 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:49:15.266592 32405 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.266602 32405 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:49:15.266608 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.266613 32405 net.cpp:165] Memory required for data: 1048065500\nI0821 06:49:15.266618 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:49:15.266631 32405 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:49:15.266638 32405 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:49:15.266649 32405 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:49:15.267151 32405 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:49:15.267166 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.267171 32405 net.cpp:165] Memory required for data: 1052161500\nI0821 06:49:15.267180 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:49:15.267191 32405 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:49:15.267199 32405 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:49:15.267210 32405 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:49:15.267534 32405 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:49:15.267560 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.267580 32405 net.cpp:165] Memory required for data: 1056257500\nI0821 06:49:15.267590 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:49:15.267599 32405 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:49:15.267606 32405 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:49:15.267612 32405 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:49:15.267676 32405 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:49:15.267835 32405 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:49:15.267848 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.267853 32405 net.cpp:165] Memory required for data: 1060353500\nI0821 06:49:15.267869 32405 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:49:15.267882 32405 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:49:15.267889 32405 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:49:15.267895 32405 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:49:15.267904 32405 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:49:15.267935 32405 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:49:15.267952 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.267957 32405 net.cpp:165] Memory required for data: 1064449500\nI0821 06:49:15.267963 32405 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:49:15.267971 32405 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:49:15.267976 32405 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:49:15.267987 32405 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:49:15.267997 32405 net.cpp:150] Setting up L2_b5_relu\nI0821 06:49:15.268003 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.268008 32405 net.cpp:165] Memory required for data: 1068545500\nI0821 06:49:15.268013 32405 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:15.268019 32405 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:15.268025 32405 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:49:15.268035 32405 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:49:15.268049 32405 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:49:15.268102 32405 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:15.268115 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.268121 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.268126 32405 net.cpp:165] Memory required for data: 1076737500\nI0821 06:49:15.268131 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:49:15.268144 32405 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:49:15.268151 32405 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:49:15.268160 32405 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:49:15.268666 32405 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:49:15.268679 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.268684 32405 net.cpp:165] Memory required for data: 1080833500\nI0821 06:49:15.268693 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:49:15.268702 32405 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:49:15.268708 32405 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:49:15.268720 32405 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:49:15.268996 32405 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:49:15.269009 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.269014 32405 net.cpp:165] Memory required for data: 1084929500\nI0821 06:49:15.269024 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:49:15.269033 32405 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:49:15.269039 32405 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:49:15.269047 32405 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.269107 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:49:15.269273 32405 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:49:15.269289 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.269294 32405 net.cpp:165] Memory required for data: 1089025500\nI0821 06:49:15.269302 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:49:15.269309 32405 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:49:15.269316 32405 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:49:15.269323 32405 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.269332 32405 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:49:15.269340 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.269345 32405 net.cpp:165] Memory required for data: 1093121500\nI0821 06:49:15.269348 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:49:15.269362 32405 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:49:15.269368 32405 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:49:15.269379 32405 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:49:15.269915 32405 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:49:15.269930 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.269935 32405 net.cpp:165] Memory required for data: 1097217500\nI0821 06:49:15.269944 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:49:15.269958 32405 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:49:15.269964 32405 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:49:15.269974 32405 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:49:15.270247 32405 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:49:15.270262 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.270267 32405 net.cpp:165] Memory required for data: 1101313500\nI0821 06:49:15.270277 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:49:15.270284 32405 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:49:15.270292 32405 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:49:15.270298 32405 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:49:15.270359 32405 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:49:15.270517 32405 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:49:15.270530 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.270535 32405 net.cpp:165] Memory required for data: 1105409500\nI0821 06:49:15.270545 32405 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:49:15.270556 32405 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:49:15.270563 32405 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:49:15.270570 32405 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:49:15.270577 32405 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:49:15.270606 32405 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:49:15.270615 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.270620 32405 net.cpp:165] Memory required for data: 1109505500\nI0821 06:49:15.270625 32405 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:49:15.270637 32405 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:49:15.270642 32405 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:49:15.270649 32405 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:49:15.270658 32405 net.cpp:150] Setting up L2_b6_relu\nI0821 06:49:15.270665 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.270670 32405 net.cpp:165] Memory required for data: 1113601500\nI0821 06:49:15.270674 32405 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:15.270681 32405 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:15.270686 32405 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:49:15.270694 32405 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:49:15.270704 32405 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:49:15.270754 32405 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:15.270766 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.270773 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.270777 32405 net.cpp:165] Memory required for data: 1121793500\nI0821 06:49:15.270782 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:49:15.270797 32405 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:49:15.270804 32405 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:49:15.270813 32405 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:49:15.272344 32405 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:49:15.272362 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.272367 32405 net.cpp:165] Memory required for data: 1125889500\nI0821 06:49:15.272377 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:49:15.272394 32405 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:49:15.272402 32405 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:49:15.272413 32405 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:49:15.272686 32405 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:49:15.272699 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.272704 32405 net.cpp:165] Memory required for data: 1129985500\nI0821 06:49:15.272714 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:49:15.272727 32405 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:49:15.272733 32405 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:49:15.272740 32405 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.272824 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:49:15.272996 32405 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:49:15.273010 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.273015 32405 net.cpp:165] Memory required for data: 1134081500\nI0821 06:49:15.273025 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:49:15.273036 32405 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:49:15.273042 32405 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:49:15.273051 32405 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.273059 32405 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:49:15.273066 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.273072 32405 net.cpp:165] Memory required for data: 1138177500\nI0821 06:49:15.273077 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:49:15.273090 32405 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:49:15.273097 32405 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:49:15.273108 32405 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:49:15.273596 32405 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:49:15.273610 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.273615 32405 net.cpp:165] Memory required for data: 1142273500\nI0821 06:49:15.273624 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:49:15.273633 32405 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:49:15.273639 32405 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:49:15.273651 32405 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:49:15.273931 32405 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:49:15.273943 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.273948 32405 net.cpp:165] Memory required for data: 1146369500\nI0821 06:49:15.273959 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:49:15.273970 32405 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:49:15.273977 32405 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:49:15.273984 32405 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:49:15.274042 32405 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:49:15.274199 32405 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:49:15.274211 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.274216 32405 net.cpp:165] Memory required for data: 1150465500\nI0821 06:49:15.274225 32405 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:49:15.274237 32405 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:49:15.274243 32405 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:49:15.274251 32405 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:49:15.274258 32405 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:49:15.274291 32405 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:49:15.274302 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.274307 32405 net.cpp:165] Memory required for data: 1154561500\nI0821 06:49:15.274312 32405 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:49:15.274320 32405 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:49:15.274333 32405 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:49:15.274343 32405 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:49:15.274353 32405 net.cpp:150] Setting up L2_b7_relu\nI0821 06:49:15.274360 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.274364 32405 net.cpp:165] Memory required for data: 1158657500\nI0821 06:49:15.274369 32405 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:15.274376 32405 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:15.274381 32405 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:49:15.274389 32405 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:49:15.274399 32405 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:49:15.274453 32405 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:15.274466 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.274472 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.274477 32405 net.cpp:165] Memory required for data: 1166849500\nI0821 06:49:15.274482 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:49:15.274492 32405 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:49:15.274498 32405 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:49:15.274510 32405 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:49:15.275039 32405 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:49:15.275055 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.275060 32405 net.cpp:165] Memory required for data: 1170945500\nI0821 06:49:15.275069 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:49:15.275079 32405 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:49:15.275084 32405 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:49:15.275095 32405 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:49:15.275372 32405 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:49:15.275384 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.275389 32405 net.cpp:165] Memory required for data: 1175041500\nI0821 06:49:15.275400 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:49:15.275411 32405 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:49:15.275418 32405 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:49:15.275426 32405 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.275486 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:49:15.275642 32405 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:49:15.275655 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.275660 32405 net.cpp:165] Memory required for data: 1179137500\nI0821 06:49:15.275669 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:49:15.275681 32405 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:49:15.275686 32405 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:49:15.275694 32405 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.275703 32405 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:49:15.275710 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.275715 32405 net.cpp:165] Memory required for data: 1183233500\nI0821 06:49:15.275719 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:49:15.275733 32405 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:49:15.275739 32405 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:49:15.275750 32405 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:49:15.276248 32405 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:49:15.276262 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.276268 32405 net.cpp:165] Memory required for data: 1187329500\nI0821 06:49:15.276276 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:49:15.276293 32405 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:49:15.276298 32405 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:49:15.276309 32405 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:49:15.276587 32405 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:49:15.276599 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.276604 32405 net.cpp:165] Memory required for data: 1191425500\nI0821 06:49:15.276614 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:49:15.276625 32405 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:49:15.276633 32405 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:49:15.276639 32405 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:49:15.276695 32405 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:49:15.276856 32405 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:49:15.276875 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.276880 32405 net.cpp:165] Memory required for data: 1195521500\nI0821 06:49:15.276890 32405 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:49:15.276898 32405 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:49:15.276904 32405 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:49:15.276917 32405 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:49:15.276926 32405 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:49:15.276955 32405 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:49:15.276968 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.276973 32405 net.cpp:165] Memory required for data: 1199617500\nI0821 06:49:15.276978 32405 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:49:15.276986 32405 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:49:15.276993 32405 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:49:15.276998 32405 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:49:15.277009 32405 net.cpp:150] Setting up L2_b8_relu\nI0821 06:49:15.277015 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.277019 32405 net.cpp:165] Memory required for data: 1203713500\nI0821 06:49:15.277024 32405 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:15.277035 32405 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:15.277041 32405 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:49:15.277048 32405 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:49:15.277072 32405 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:49:15.277122 32405 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:15.277138 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.277145 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.277149 32405 net.cpp:165] Memory required for data: 1211905500\nI0821 06:49:15.277154 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:49:15.277168 32405 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:49:15.277175 32405 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:49:15.277184 32405 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:49:15.277681 32405 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:49:15.277695 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.277700 32405 net.cpp:165] Memory required for data: 1216001500\nI0821 06:49:15.277709 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:49:15.277721 32405 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:49:15.277729 32405 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:49:15.277736 32405 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:49:15.278023 32405 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:49:15.278043 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.278048 32405 net.cpp:165] Memory required for data: 1220097500\nI0821 06:49:15.278059 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:49:15.278069 32405 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:49:15.278074 32405 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:49:15.278085 32405 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.278146 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:49:15.278311 32405 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:49:15.278322 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.278327 32405 net.cpp:165] Memory required for data: 1224193500\nI0821 06:49:15.278337 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:49:15.278344 32405 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:49:15.278350 32405 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:49:15.278360 32405 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.278370 32405 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:49:15.278378 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.278381 32405 net.cpp:165] Memory required for data: 1228289500\nI0821 06:49:15.278386 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:49:15.278399 32405 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:49:15.278405 32405 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:49:15.278414 32405 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:49:15.279917 32405 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:49:15.279933 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.279938 32405 net.cpp:165] Memory required for data: 1232385500\nI0821 06:49:15.279948 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:49:15.279958 32405 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:49:15.279964 32405 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:49:15.279976 32405 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:49:15.280244 32405 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:49:15.280258 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.280263 32405 net.cpp:165] Memory required for data: 1236481500\nI0821 06:49:15.280314 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:49:15.280328 32405 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:49:15.280333 32405 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:49:15.280341 32405 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:49:15.280405 32405 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:49:15.280561 32405 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:49:15.280577 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.280582 32405 net.cpp:165] Memory required for data: 1240577500\nI0821 06:49:15.280591 32405 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:49:15.280601 32405 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:49:15.280607 32405 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:49:15.280614 32405 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:49:15.280622 32405 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:49:15.280653 32405 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:49:15.280666 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.280671 32405 net.cpp:165] Memory required for data: 1244673500\nI0821 06:49:15.280676 32405 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:49:15.280684 32405 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:49:15.280690 32405 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:49:15.280700 32405 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:49:15.280709 32405 net.cpp:150] Setting up L2_b9_relu\nI0821 06:49:15.280716 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.280730 32405 net.cpp:165] Memory required for data: 1248769500\nI0821 06:49:15.280735 32405 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:15.280741 32405 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:15.280747 32405 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:49:15.280757 32405 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:49:15.280767 32405 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:49:15.280817 32405 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:15.280828 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.280834 32405 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:15.280839 32405 net.cpp:165] Memory required for data: 1256961500\nI0821 06:49:15.280874 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:49:15.280894 32405 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:49:15.280900 32405 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:49:15.280910 32405 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:49:15.281414 32405 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:49:15.281430 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.281435 32405 net.cpp:165] Memory required for data: 1257985500\nI0821 06:49:15.281443 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:49:15.281456 32405 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:49:15.281462 32405 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:49:15.281471 32405 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:49:15.281746 32405 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:49:15.281759 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.281764 32405 net.cpp:165] Memory required for data: 1259009500\nI0821 06:49:15.281775 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:49:15.281787 32405 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:49:15.281793 32405 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:49:15.281801 32405 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.281869 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:49:15.282039 32405 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:49:15.282053 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.282058 32405 net.cpp:165] Memory required for data: 1260033500\nI0821 06:49:15.282068 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:49:15.282078 32405 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:49:15.282084 32405 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:49:15.282093 32405 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:49:15.282102 32405 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:49:15.282109 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.282114 32405 net.cpp:165] Memory required for data: 1261057500\nI0821 06:49:15.282119 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:49:15.282132 32405 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:49:15.282138 32405 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:49:15.282150 32405 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:49:15.282640 32405 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:49:15.282655 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.282660 32405 net.cpp:165] Memory required for data: 1262081500\nI0821 06:49:15.282668 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:49:15.282677 32405 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:49:15.282683 32405 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:49:15.282694 32405 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:49:15.282984 32405 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:49:15.283004 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.283010 32405 net.cpp:165] Memory required for data: 1263105500\nI0821 06:49:15.283020 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:49:15.283028 32405 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:49:15.283035 32405 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:49:15.283042 32405 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:49:15.283103 32405 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:49:15.283267 32405 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:49:15.283282 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.283288 32405 net.cpp:165] Memory required for data: 1264129500\nI0821 06:49:15.283298 32405 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:49:15.283306 32405 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:49:15.283313 32405 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:49:15.283321 32405 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:49:15.283360 32405 net.cpp:150] Setting up L3_b1_pool\nI0821 06:49:15.283371 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.283375 32405 net.cpp:165] Memory required for data: 1265153500\nI0821 06:49:15.283381 32405 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:49:15.283390 32405 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:49:15.283396 32405 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:49:15.283402 32405 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:49:15.283409 32405 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:49:15.283448 32405 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:49:15.283460 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.283465 32405 net.cpp:165] Memory required for data: 1266177500\nI0821 06:49:15.283470 32405 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:49:15.283478 32405 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:49:15.283483 32405 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:49:15.283490 32405 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:49:15.283499 32405 net.cpp:150] Setting up L3_b1_relu\nI0821 06:49:15.283507 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.283511 32405 net.cpp:165] Memory required for data: 1267201500\nI0821 06:49:15.283516 32405 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:49:15.283525 32405 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:49:15.283535 32405 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:49:15.284775 32405 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:49:15.284792 32405 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:15.284797 32405 net.cpp:165] Memory required for data: 1268225500\nI0821 06:49:15.284803 32405 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:49:15.284816 32405 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:49:15.284822 32405 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:49:15.284831 32405 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:49:15.284838 32405 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:49:15.284890 32405 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:49:15.284904 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.284909 32405 net.cpp:165] Memory required for data: 1270273500\nI0821 06:49:15.284914 32405 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:15.284921 32405 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:15.284927 32405 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:49:15.284939 32405 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:49:15.284948 32405 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:49:15.285003 32405 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:15.285018 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.285039 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.285044 32405 net.cpp:165] Memory required for data: 1274369500\nI0821 06:49:15.285049 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:49:15.285066 32405 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:49:15.285073 32405 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:49:15.285082 32405 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:49:15.286134 32405 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:49:15.286149 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.286154 32405 net.cpp:165] Memory required for data: 1276417500\nI0821 06:49:15.286164 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:49:15.286175 32405 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:49:15.286182 32405 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:49:15.286190 32405 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:49:15.286468 32405 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:49:15.286479 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.286484 32405 net.cpp:165] Memory required for data: 1278465500\nI0821 06:49:15.286495 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:49:15.286504 32405 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:49:15.286510 32405 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:49:15.286517 32405 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.286581 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:49:15.286742 32405 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:49:15.286756 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.286761 32405 net.cpp:165] Memory required for data: 1280513500\nI0821 06:49:15.286769 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:49:15.286777 32405 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:49:15.286783 32405 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:49:15.286792 32405 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:49:15.286800 32405 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:49:15.286808 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.286811 32405 net.cpp:165] Memory required for data: 1282561500\nI0821 06:49:15.286816 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:49:15.286829 32405 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:49:15.286835 32405 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:49:15.286846 32405 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:49:15.287897 32405 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:49:15.287912 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.287917 32405 net.cpp:165] Memory required for data: 1284609500\nI0821 06:49:15.287926 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:49:15.287938 32405 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:49:15.287945 32405 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:49:15.287953 32405 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:49:15.288282 32405 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:49:15.288307 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.288328 32405 net.cpp:165] Memory required for data: 1286657500\nI0821 06:49:15.288339 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:49:15.288353 32405 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:49:15.288360 32405 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:49:15.288368 32405 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:49:15.288432 32405 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:49:15.288596 32405 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:49:15.288609 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.288614 32405 net.cpp:165] Memory required for data: 1288705500\nI0821 06:49:15.288630 32405 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:49:15.288642 32405 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:49:15.288650 32405 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:49:15.288656 32405 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:49:15.288667 32405 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:49:15.288702 32405 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:49:15.288710 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.288715 32405 net.cpp:165] Memory required for data: 1290753500\nI0821 06:49:15.288720 32405 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:49:15.288731 32405 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:49:15.288738 32405 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:49:15.288744 32405 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:49:15.288754 32405 net.cpp:150] Setting up L3_b2_relu\nI0821 06:49:15.288760 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.288765 32405 net.cpp:165] Memory required for data: 1292801500\nI0821 06:49:15.288769 32405 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:15.288776 32405 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:15.288781 32405 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:49:15.288789 32405 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:49:15.288799 32405 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:49:15.288848 32405 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:15.288866 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.288873 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.288877 32405 net.cpp:165] Memory required for data: 1296897500\nI0821 06:49:15.288883 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:49:15.288897 32405 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:49:15.288904 32405 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:49:15.288913 32405 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:49:15.289959 32405 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:49:15.289974 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.289979 32405 net.cpp:165] Memory required for data: 1298945500\nI0821 06:49:15.289988 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:49:15.290000 32405 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:49:15.290007 32405 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:49:15.290016 32405 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:49:15.290292 32405 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:49:15.290304 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.290309 32405 net.cpp:165] Memory required for data: 1300993500\nI0821 06:49:15.290319 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:49:15.290328 32405 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:49:15.290334 32405 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:49:15.290341 32405 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.290403 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:49:15.290567 32405 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:49:15.290580 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.290585 32405 net.cpp:165] Memory required for data: 1303041500\nI0821 06:49:15.290594 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:49:15.290602 32405 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:49:15.290608 32405 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:49:15.290618 32405 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:49:15.290628 32405 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:49:15.290642 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.290647 32405 net.cpp:165] Memory required for data: 1305089500\nI0821 06:49:15.290652 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:49:15.290666 32405 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:49:15.290673 32405 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:49:15.290681 32405 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:49:15.291725 32405 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:49:15.291740 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.291745 32405 net.cpp:165] Memory required for data: 1307137500\nI0821 06:49:15.291754 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:49:15.291766 32405 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:49:15.291774 32405 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:49:15.291781 32405 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:49:15.292062 32405 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:49:15.292076 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.292081 32405 net.cpp:165] Memory required for data: 1309185500\nI0821 06:49:15.292091 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:49:15.292104 32405 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:49:15.292110 32405 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:49:15.292117 32405 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:49:15.292181 32405 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:49:15.292341 32405 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:49:15.292354 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.292359 32405 net.cpp:165] Memory required for data: 1311233500\nI0821 06:49:15.292368 32405 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:49:15.292381 32405 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:49:15.292387 32405 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:49:15.292394 32405 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:49:15.292405 32405 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:49:15.292439 32405 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:49:15.292450 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.292455 32405 net.cpp:165] Memory required for data: 1313281500\nI0821 06:49:15.292460 32405 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:49:15.292471 32405 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:49:15.292477 32405 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:49:15.292484 32405 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:49:15.292493 32405 net.cpp:150] Setting up L3_b3_relu\nI0821 06:49:15.292500 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.292505 32405 net.cpp:165] Memory required for data: 1315329500\nI0821 06:49:15.292510 32405 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:15.292516 32405 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:15.292522 32405 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:49:15.292529 32405 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:49:15.292538 32405 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:49:15.292589 32405 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:15.292601 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.292608 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.292613 32405 net.cpp:165] Memory required for data: 1319425500\nI0821 06:49:15.292616 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:49:15.292630 32405 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:49:15.292637 32405 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:49:15.292652 32405 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:49:15.293705 32405 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:49:15.293721 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.293726 32405 net.cpp:165] Memory required for data: 1321473500\nI0821 06:49:15.293735 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:49:15.293747 32405 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:49:15.293753 32405 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:49:15.293764 32405 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:49:15.294042 32405 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:49:15.294056 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.294061 32405 net.cpp:165] Memory required for data: 1323521500\nI0821 06:49:15.294071 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:49:15.294080 32405 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:49:15.294086 32405 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:49:15.294097 32405 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.294157 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:49:15.294320 32405 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:49:15.294333 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.294338 32405 net.cpp:165] Memory required for data: 1325569500\nI0821 06:49:15.294348 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:49:15.294355 32405 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:49:15.294361 32405 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:49:15.294371 32405 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:49:15.294381 32405 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:49:15.294389 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.294394 32405 net.cpp:165] Memory required for data: 1327617500\nI0821 06:49:15.294397 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:49:15.294411 32405 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:49:15.294417 32405 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:49:15.294425 32405 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:49:15.296456 32405 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:49:15.296474 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.296479 32405 net.cpp:165] Memory required for data: 1329665500\nI0821 06:49:15.296489 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:49:15.296502 32405 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:49:15.296509 32405 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:49:15.296521 32405 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:49:15.296793 32405 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:49:15.296808 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.296813 32405 net.cpp:165] Memory required for data: 1331713500\nI0821 06:49:15.296823 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:49:15.296833 32405 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:49:15.296838 32405 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:49:15.296846 32405 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:49:15.296916 32405 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:49:15.297080 32405 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:49:15.297094 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.297099 32405 net.cpp:165] Memory required for data: 1333761500\nI0821 06:49:15.297108 32405 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:49:15.297117 32405 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:49:15.297123 32405 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:49:15.297130 32405 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:49:15.297142 32405 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:49:15.297184 32405 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:49:15.297199 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.297202 32405 net.cpp:165] Memory required for data: 1335809500\nI0821 06:49:15.297209 32405 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:49:15.297216 32405 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:49:15.297222 32405 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:49:15.297230 32405 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:49:15.297238 32405 net.cpp:150] Setting up L3_b4_relu\nI0821 06:49:15.297245 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.297250 32405 net.cpp:165] Memory required for data: 1337857500\nI0821 06:49:15.297255 32405 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:15.297264 32405 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:15.297271 32405 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:49:15.297277 32405 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:49:15.297287 32405 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:49:15.297338 32405 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:15.297349 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.297356 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.297360 32405 net.cpp:165] Memory required for data: 1341953500\nI0821 06:49:15.297365 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:49:15.297377 32405 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:49:15.297384 32405 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:49:15.297395 32405 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:49:15.298429 32405 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:49:15.298444 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.298449 32405 net.cpp:165] Memory required for data: 1344001500\nI0821 06:49:15.298458 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:49:15.298467 32405 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:49:15.298475 32405 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:49:15.298486 32405 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:49:15.298763 32405 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:49:15.298779 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.298784 32405 net.cpp:165] Memory required for data: 1346049500\nI0821 06:49:15.298795 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:49:15.298804 32405 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:49:15.298810 32405 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:49:15.298817 32405 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.298883 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:49:15.299049 32405 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:49:15.299062 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.299067 32405 net.cpp:165] Memory required for data: 1348097500\nI0821 06:49:15.299077 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:49:15.299084 32405 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:49:15.299094 32405 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:49:15.299101 32405 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:49:15.299111 32405 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:49:15.299118 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.299123 32405 net.cpp:165] Memory required for data: 1350145500\nI0821 06:49:15.299127 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:49:15.299141 32405 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:49:15.299147 32405 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:49:15.299162 32405 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:49:15.300187 32405 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:49:15.300202 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.300207 32405 net.cpp:165] Memory required for data: 1352193500\nI0821 06:49:15.300216 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:49:15.300228 32405 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:49:15.300235 32405 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:49:15.300246 32405 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:49:15.300513 32405 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:49:15.300526 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.300531 32405 net.cpp:165] Memory required for data: 1354241500\nI0821 06:49:15.300541 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:49:15.300550 32405 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:49:15.300557 32405 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:49:15.300567 32405 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:49:15.300626 32405 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:49:15.300793 32405 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:49:15.300806 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.300812 32405 net.cpp:165] Memory required for data: 1356289500\nI0821 06:49:15.300820 32405 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:49:15.300829 32405 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:49:15.300837 32405 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:49:15.300843 32405 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:49:15.300853 32405 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:49:15.300897 32405 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:49:15.300909 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.300915 32405 net.cpp:165] Memory required for data: 1358337500\nI0821 06:49:15.300920 32405 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:49:15.300927 32405 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:49:15.300933 32405 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:49:15.300940 32405 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:49:15.300952 32405 net.cpp:150] Setting up L3_b5_relu\nI0821 06:49:15.300961 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.300964 32405 net.cpp:165] Memory required for data: 1360385500\nI0821 06:49:15.300969 32405 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:15.300976 32405 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:15.300982 32405 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:49:15.300989 32405 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:49:15.300999 32405 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:49:15.301048 32405 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:15.301060 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.301066 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.301071 32405 net.cpp:165] Memory required for data: 1364481500\nI0821 06:49:15.301076 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:49:15.301087 32405 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:49:15.301093 32405 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:49:15.301105 32405 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:49:15.302134 32405 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:49:15.302148 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.302153 32405 net.cpp:165] Memory required for data: 1366529500\nI0821 06:49:15.302162 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:49:15.302178 32405 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:49:15.302184 32405 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:49:15.302196 32405 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:49:15.302472 32405 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:49:15.302489 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.302494 32405 net.cpp:165] Memory required for data: 1368577500\nI0821 06:49:15.302505 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:49:15.302513 32405 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:49:15.302520 32405 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:49:15.302527 32405 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.302587 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:49:15.302748 32405 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:49:15.302762 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.302767 32405 net.cpp:165] Memory required for data: 1370625500\nI0821 06:49:15.302775 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:49:15.302786 32405 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:49:15.302793 32405 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:49:15.302800 32405 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:49:15.302810 32405 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:49:15.302817 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.302821 32405 net.cpp:165] Memory required for data: 1372673500\nI0821 06:49:15.302826 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:49:15.302839 32405 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:49:15.302846 32405 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:49:15.302855 32405 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:49:15.303894 32405 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:49:15.303907 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.303912 32405 net.cpp:165] Memory required for data: 1374721500\nI0821 06:49:15.303921 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:49:15.303936 32405 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:49:15.303942 32405 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:49:15.303953 32405 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:49:15.304227 32405 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:49:15.304240 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.304245 32405 net.cpp:165] Memory required for data: 1376769500\nI0821 06:49:15.304255 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:49:15.304265 32405 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:49:15.304270 32405 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:49:15.304281 32405 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:49:15.304340 32405 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:49:15.304502 32405 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:49:15.304514 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.304519 32405 net.cpp:165] Memory required for data: 1378817500\nI0821 06:49:15.304528 32405 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:49:15.304536 32405 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:49:15.304543 32405 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:49:15.304550 32405 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:49:15.304561 32405 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:49:15.304600 32405 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:49:15.304611 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.304616 32405 net.cpp:165] Memory required for data: 1380865500\nI0821 06:49:15.304621 32405 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:49:15.304628 32405 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:49:15.304641 32405 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:49:15.304651 32405 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:49:15.304661 32405 net.cpp:150] Setting up L3_b6_relu\nI0821 06:49:15.304668 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.304673 32405 net.cpp:165] Memory required for data: 1382913500\nI0821 06:49:15.304677 32405 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:15.304685 32405 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:15.304690 32405 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:49:15.304698 32405 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:49:15.304708 32405 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:49:15.304761 32405 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:15.304774 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.304780 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.304785 32405 net.cpp:165] Memory required for data: 1387009500\nI0821 06:49:15.304790 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:49:15.304800 32405 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:49:15.304807 32405 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:49:15.304819 32405 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:49:15.305851 32405 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:49:15.305871 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.305876 32405 net.cpp:165] Memory required for data: 1389057500\nI0821 06:49:15.305886 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:49:15.305894 32405 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:49:15.305902 32405 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:49:15.305912 32405 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:49:15.306193 32405 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:49:15.306206 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.306211 32405 net.cpp:165] Memory required for data: 1391105500\nI0821 06:49:15.306221 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:49:15.306231 32405 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:49:15.306237 32405 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:49:15.306246 32405 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.306305 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:49:15.306466 32405 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:49:15.306479 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.306484 32405 net.cpp:165] Memory required for data: 1393153500\nI0821 06:49:15.306493 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:49:15.306529 32405 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:49:15.306538 32405 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:49:15.306546 32405 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:49:15.306556 32405 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:49:15.306563 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.306568 32405 net.cpp:165] Memory required for data: 1395201500\nI0821 06:49:15.306573 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:49:15.306584 32405 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:49:15.306589 32405 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:49:15.306602 32405 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:49:15.307633 32405 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:49:15.307648 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.307653 32405 net.cpp:165] Memory required for data: 1397249500\nI0821 06:49:15.307662 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:49:15.307677 32405 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:49:15.307684 32405 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:49:15.307696 32405 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:49:15.307981 32405 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:49:15.307996 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.308001 32405 net.cpp:165] Memory required for data: 1399297500\nI0821 06:49:15.308012 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:49:15.308022 32405 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:49:15.308027 32405 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:49:15.308035 32405 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:49:15.308095 32405 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:49:15.308261 32405 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:49:15.308275 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.308280 32405 net.cpp:165] Memory required for data: 1401345500\nI0821 06:49:15.308290 32405 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:49:15.308300 32405 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:49:15.308307 32405 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:49:15.308315 32405 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:49:15.308322 32405 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:49:15.308359 32405 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:49:15.308370 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.308375 32405 net.cpp:165] Memory required for data: 1403393500\nI0821 06:49:15.308380 32405 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:49:15.308388 32405 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:49:15.308394 32405 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:49:15.308401 32405 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:49:15.308410 32405 net.cpp:150] Setting up L3_b7_relu\nI0821 06:49:15.308418 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.308423 32405 net.cpp:165] Memory required for data: 1405441500\nI0821 06:49:15.308426 32405 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:15.308434 32405 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:15.308439 32405 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:49:15.308449 32405 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:49:15.308459 32405 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:49:15.308506 32405 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:15.308517 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.308524 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.308528 32405 net.cpp:165] Memory required for data: 1409537500\nI0821 06:49:15.308533 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:49:15.308548 32405 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:49:15.308554 32405 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:49:15.308563 32405 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:49:15.310591 32405 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:49:15.310608 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.310613 32405 net.cpp:165] Memory required for data: 1411585500\nI0821 06:49:15.310622 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:49:15.310636 32405 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:49:15.310642 32405 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:49:15.310652 32405 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:49:15.310940 32405 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:49:15.310952 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.310966 32405 net.cpp:165] Memory required for data: 1413633500\nI0821 06:49:15.310976 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:49:15.310986 32405 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:49:15.310992 32405 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:49:15.311000 32405 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.311065 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:49:15.311231 32405 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:49:15.311247 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.311252 32405 net.cpp:165] Memory required for data: 1415681500\nI0821 06:49:15.311261 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:49:15.311269 32405 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:49:15.311275 32405 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:49:15.311283 32405 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:49:15.311292 32405 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:49:15.311300 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.311305 32405 net.cpp:165] Memory required for data: 1417729500\nI0821 06:49:15.311308 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:49:15.311322 32405 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:49:15.311328 32405 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:49:15.311339 32405 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:49:15.312371 32405 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:49:15.312386 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.312391 32405 net.cpp:165] Memory required for data: 1419777500\nI0821 06:49:15.312400 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:49:15.312412 32405 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:49:15.312419 32405 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:49:15.312428 32405 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:49:15.312700 32405 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:49:15.312713 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.312717 32405 net.cpp:165] Memory required for data: 1421825500\nI0821 06:49:15.312728 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:49:15.312741 32405 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:49:15.312747 32405 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:49:15.312754 32405 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:49:15.312813 32405 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:49:15.312985 32405 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:49:15.312999 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.313004 32405 net.cpp:165] Memory required for data: 1423873500\nI0821 06:49:15.313012 32405 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:49:15.313025 32405 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:49:15.313031 32405 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:49:15.313040 32405 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:49:15.313047 32405 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:49:15.313083 32405 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:49:15.313096 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.313100 32405 net.cpp:165] Memory required for data: 1425921500\nI0821 06:49:15.313105 32405 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:49:15.313112 32405 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:49:15.313118 32405 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:49:15.313128 32405 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:49:15.313138 32405 net.cpp:150] Setting up L3_b8_relu\nI0821 06:49:15.313145 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.313150 32405 net.cpp:165] Memory required for data: 1427969500\nI0821 06:49:15.313161 32405 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:15.313169 32405 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:15.313174 32405 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:49:15.313182 32405 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:49:15.313192 32405 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:49:15.313243 32405 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:15.313256 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.313261 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.313266 32405 net.cpp:165] Memory required for data: 1432065500\nI0821 06:49:15.313271 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:49:15.313282 32405 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:49:15.313288 32405 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:49:15.313305 32405 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:49:15.314329 32405 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:49:15.314344 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.314349 32405 net.cpp:165] Memory required for data: 1434113500\nI0821 06:49:15.314358 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:49:15.314370 32405 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:49:15.314376 32405 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:49:15.314385 32405 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:49:15.314661 32405 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:49:15.314673 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.314678 32405 net.cpp:165] Memory required for data: 1436161500\nI0821 06:49:15.314688 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:49:15.314697 32405 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:49:15.314704 32405 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:49:15.314712 32405 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.314776 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:49:15.314947 32405 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:49:15.314961 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.314966 32405 net.cpp:165] Memory required for data: 1438209500\nI0821 06:49:15.314975 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:49:15.314983 32405 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:49:15.314990 32405 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:49:15.314996 32405 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:49:15.315006 32405 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:49:15.315013 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.315017 32405 net.cpp:165] Memory required for data: 1440257500\nI0821 06:49:15.315022 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:49:15.315035 32405 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:49:15.315042 32405 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:49:15.315053 32405 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:49:15.316089 32405 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:49:15.316104 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.316109 32405 net.cpp:165] Memory required for data: 1442305500\nI0821 06:49:15.316118 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:49:15.316131 32405 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:49:15.316138 32405 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:49:15.316146 32405 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:49:15.316421 32405 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:49:15.316434 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.316445 32405 net.cpp:165] Memory required for data: 1444353500\nI0821 06:49:15.316457 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:49:15.316468 32405 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:49:15.316475 32405 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:49:15.316483 32405 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:49:15.316545 32405 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:49:15.316710 32405 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:49:15.316723 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.316728 32405 net.cpp:165] Memory required for data: 1446401500\nI0821 06:49:15.316737 32405 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:49:15.316750 32405 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:49:15.316756 32405 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:49:15.316763 32405 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:49:15.316773 32405 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:49:15.316807 32405 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:49:15.316819 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.316824 32405 net.cpp:165] Memory required for data: 1448449500\nI0821 06:49:15.316829 32405 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:49:15.316839 32405 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:49:15.316845 32405 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:49:15.316854 32405 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:49:15.316867 32405 net.cpp:150] Setting up L3_b9_relu\nI0821 06:49:15.316875 32405 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:15.316880 32405 net.cpp:165] Memory required for data: 1450497500\nI0821 06:49:15.316885 32405 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:49:15.316893 32405 net.cpp:100] Creating Layer post_pool\nI0821 06:49:15.316900 32405 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:49:15.316907 32405 net.cpp:408] post_pool -> post_pool\nI0821 06:49:15.316943 32405 net.cpp:150] Setting up post_pool\nI0821 06:49:15.316958 32405 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:49:15.316963 32405 net.cpp:165] Memory required for data: 1450529500\nI0821 06:49:15.316968 32405 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:49:15.316979 32405 net.cpp:100] Creating Layer post_FC\nI0821 06:49:15.316985 32405 net.cpp:434] post_FC <- post_pool\nI0821 06:49:15.316994 32405 net.cpp:408] post_FC -> post_FC_top\nI0821 06:49:15.317162 32405 net.cpp:150] Setting up post_FC\nI0821 06:49:15.317175 32405 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:15.317180 32405 net.cpp:165] Memory required for data: 1450534500\nI0821 06:49:15.317189 32405 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:49:15.317198 32405 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:49:15.317203 32405 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:49:15.317211 32405 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:49:15.317224 32405 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:49:15.317272 32405 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:49:15.317283 32405 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:15.317291 32405 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:15.317294 32405 net.cpp:165] Memory required for data: 1450544500\nI0821 06:49:15.317299 32405 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:49:15.317312 32405 net.cpp:100] Creating Layer accuracy\nI0821 06:49:15.317317 32405 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:49:15.317324 32405 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:49:15.317332 32405 net.cpp:408] accuracy -> accuracy\nI0821 06:49:15.317344 32405 net.cpp:150] Setting up accuracy\nI0821 06:49:15.317351 32405 net.cpp:157] Top shape: (1)\nI0821 06:49:15.317363 32405 net.cpp:165] Memory required for data: 1450544504\nI0821 06:49:15.317368 32405 layer_factory.hpp:77] Creating layer loss\nI0821 06:49:15.317374 32405 net.cpp:100] Creating Layer loss\nI0821 06:49:15.317380 32405 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:49:15.317386 32405 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:49:15.317394 32405 net.cpp:408] loss -> loss\nI0821 06:49:15.317406 32405 layer_factory.hpp:77] Creating layer loss\nI0821 06:49:15.317531 32405 net.cpp:150] Setting up loss\nI0821 06:49:15.317546 32405 net.cpp:157] Top shape: (1)\nI0821 06:49:15.317551 32405 net.cpp:160]     with loss weight 1\nI0821 06:49:15.317569 32405 net.cpp:165] Memory required for data: 1450544508\nI0821 06:49:15.317574 32405 net.cpp:226] loss needs backward computation.\nI0821 06:49:15.317580 32405 net.cpp:228] accuracy does not need backward computation.\nI0821 06:49:15.317586 32405 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:49:15.317592 32405 net.cpp:226] post_FC needs backward computation.\nI0821 06:49:15.317597 32405 net.cpp:226] post_pool needs backward computation.\nI0821 06:49:15.317602 32405 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:49:15.317606 32405 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:49:15.317612 32405 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:49:15.317616 32405 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:49:15.317621 32405 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:49:15.317626 32405 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:49:15.317631 32405 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:49:15.317636 32405 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:49:15.317641 32405 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:49:15.317646 32405 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:49:15.317651 32405 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:49:15.317656 32405 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:49:15.317662 32405 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:49:15.317667 32405 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:49:15.317672 32405 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:49:15.317677 32405 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:49:15.317682 32405 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:49:15.317687 32405 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:49:15.317692 32405 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:49:15.317697 32405 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:49:15.317701 32405 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:49:15.317706 32405 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:49:15.317713 32405 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:49:15.317718 32405 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:49:15.317723 32405 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:49:15.317728 32405 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:49:15.317731 32405 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:49:15.317736 32405 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:49:15.317741 32405 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:49:15.317747 32405 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:49:15.317752 32405 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:49:15.317757 32405 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:49:15.317762 32405 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:49:15.317767 32405 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:49:15.317780 32405 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:49:15.317785 32405 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:49:15.317790 32405 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:49:15.317795 32405 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:49:15.317800 32405 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:49:15.317804 32405 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:49:15.317809 32405 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:49:15.317814 32405 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:49:15.317821 32405 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:49:15.317826 32405 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:49:15.317831 32405 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:49:15.317836 32405 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:49:15.317840 32405 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:49:15.317845 32405 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:49:15.317850 32405 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:49:15.317855 32405 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:49:15.317867 32405 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:49:15.317873 32405 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:49:15.317879 32405 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:49:15.317884 32405 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:49:15.317889 32405 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:49:15.317895 32405 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:49:15.317900 32405 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:49:15.317905 32405 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:49:15.317910 32405 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:49:15.317919 32405 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:49:15.317924 32405 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:49:15.317930 32405 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:49:15.317935 32405 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:49:15.317940 32405 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:49:15.317945 32405 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:49:15.317951 32405 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:49:15.317955 32405 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:49:15.317960 32405 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:49:15.317966 32405 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:49:15.317971 32405 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:49:15.317976 32405 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:49:15.317981 32405 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:49:15.317987 32405 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:49:15.317992 32405 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:49:15.317998 32405 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:49:15.318003 32405 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:49:15.318008 32405 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:49:15.318013 32405 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:49:15.318018 32405 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:49:15.318023 32405 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:49:15.318028 32405 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:49:15.318040 32405 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:49:15.318045 32405 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:49:15.318051 32405 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:49:15.318058 32405 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:49:15.318063 32405 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:49:15.318068 32405 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:49:15.318073 32405 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:49:15.318078 32405 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:49:15.318084 32405 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:49:15.318089 32405 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:49:15.318094 32405 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:49:15.318099 32405 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:49:15.318104 32405 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:49:15.318109 32405 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:49:15.318115 32405 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:49:15.318120 32405 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:49:15.318125 32405 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:49:15.318130 32405 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:49:15.318136 32405 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:49:15.318140 32405 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:49:15.318146 32405 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:49:15.318151 32405 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:49:15.318157 32405 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:49:15.318162 32405 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:49:15.318167 32405 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:49:15.318173 32405 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:49:15.318178 32405 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:49:15.318183 32405 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:49:15.318192 32405 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:49:15.318197 32405 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:49:15.318202 32405 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:49:15.318208 32405 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:49:15.318214 32405 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:49:15.318219 32405 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:49:15.318225 32405 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:49:15.318230 32405 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:49:15.318235 32405 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:49:15.318241 32405 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:49:15.318246 32405 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:49:15.318251 32405 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:49:15.318256 32405 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:49:15.318262 32405 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:49:15.318267 32405 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:49:15.318272 32405 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:49:15.318279 32405 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:49:15.318284 32405 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:49:15.318289 32405 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:49:15.318295 32405 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:49:15.318305 32405 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:49:15.318310 32405 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:49:15.318315 32405 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:49:15.318321 32405 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:49:15.318326 32405 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:49:15.318331 32405 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:49:15.318338 32405 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:49:15.318343 32405 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:49:15.318348 32405 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:49:15.318353 32405 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:49:15.318359 32405 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:49:15.318364 32405 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:49:15.318369 32405 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:49:15.318374 32405 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:49:15.318380 32405 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:49:15.318385 32405 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:49:15.318392 32405 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:49:15.318397 32405 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:49:15.318403 32405 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:49:15.318408 32405 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:49:15.318413 32405 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:49:15.318418 32405 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:49:15.318423 32405 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:49:15.318429 32405 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:49:15.318434 32405 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:49:15.318439 32405 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:49:15.318446 32405 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:49:15.318451 32405 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:49:15.318457 32405 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:49:15.318462 32405 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:49:15.318469 32405 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:49:15.318473 32405 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:49:15.318480 32405 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:49:15.318485 32405 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:49:15.318490 32405 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:49:15.318495 32405 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:49:15.318502 32405 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:49:15.318507 32405 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:49:15.318512 32405 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:49:15.318517 32405 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:49:15.318523 32405 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:49:15.318528 32405 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:49:15.318533 32405 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:49:15.318539 32405 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:49:15.318544 32405 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:49:15.318552 32405 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:49:15.318557 32405 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:49:15.318565 32405 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:49:15.318578 32405 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:49:15.318583 32405 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:49:15.318588 32405 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:49:15.318594 32405 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:49:15.318600 32405 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:49:15.318605 32405 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:49:15.318610 32405 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:49:15.318616 32405 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:49:15.318622 32405 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:49:15.318627 32405 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:49:15.318632 32405 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:49:15.318639 32405 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:49:15.318645 32405 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:49:15.318650 32405 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:49:15.318655 32405 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:49:15.318660 32405 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:49:15.318666 32405 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:49:15.318671 32405 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:49:15.318677 32405 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:49:15.318683 32405 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:49:15.318688 32405 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:49:15.318694 32405 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:49:15.318699 32405 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:49:15.318706 32405 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:49:15.318711 32405 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:49:15.318717 32405 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:49:15.318722 32405 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:49:15.318727 32405 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:49:15.318733 32405 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:49:15.318738 32405 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:49:15.318744 32405 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:49:15.318750 32405 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:49:15.318756 32405 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:49:15.318761 32405 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:49:15.318768 32405 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:49:15.318773 32405 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:49:15.318778 32405 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:49:15.318783 32405 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:49:15.318789 32405 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:49:15.318794 32405 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:49:15.318800 32405 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:49:15.318806 32405 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:49:15.318811 32405 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:49:15.318817 32405 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:49:15.318822 32405 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:49:15.318828 32405 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:49:15.318841 32405 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:49:15.318847 32405 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:49:15.318853 32405 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:49:15.318864 32405 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:49:15.318871 32405 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:49:15.318877 32405 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:49:15.318883 32405 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:49:15.318889 32405 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:49:15.318895 32405 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:49:15.318900 32405 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:49:15.318907 32405 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:49:15.318912 32405 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:49:15.318918 32405 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:49:15.318923 32405 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:49:15.318929 32405 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:49:15.318935 32405 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:49:15.318940 32405 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:49:15.318946 32405 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:49:15.318953 32405 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:49:15.318958 32405 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:49:15.318963 32405 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:49:15.318969 32405 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:49:15.318974 32405 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:49:15.318979 32405 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:49:15.318985 32405 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:49:15.318991 32405 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:49:15.318996 32405 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:49:15.319002 32405 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:49:15.319007 32405 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:49:15.319013 32405 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:49:15.319018 32405 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:49:15.319025 32405 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:49:15.319030 32405 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:49:15.319036 32405 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:49:15.319041 32405 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:49:15.319047 32405 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:49:15.319052 32405 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:49:15.319058 32405 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:49:15.319064 32405 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:49:15.319069 32405 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:49:15.319074 32405 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:49:15.319080 32405 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:49:15.319085 32405 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:49:15.319092 32405 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:49:15.319097 32405 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:49:15.319103 32405 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:49:15.319109 32405 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:49:15.319121 32405 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:49:15.319128 32405 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:49:15.319133 32405 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:49:15.319139 32405 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:49:15.319144 32405 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:49:15.319150 32405 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:49:15.319155 32405 net.cpp:226] pre_relu needs backward computation.\nI0821 06:49:15.319161 32405 net.cpp:226] pre_scale needs backward computation.\nI0821 06:49:15.319166 32405 net.cpp:226] pre_bn needs backward computation.\nI0821 06:49:15.319171 32405 net.cpp:226] pre_conv needs backward computation.\nI0821 06:49:15.319178 32405 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:49:15.319185 32405 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:49:15.319190 32405 net.cpp:270] This network produces output accuracy\nI0821 06:49:15.319196 32405 net.cpp:270] This network produces output loss\nI0821 06:49:15.319527 32405 net.cpp:283] Network initialization done.\nI0821 06:49:15.320536 32405 solver.cpp:60] Solver scaffolding done.\nI0821 06:49:15.544598 32405 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 06:49:15.908267 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:15.908339 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:15.915436 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:16.138577 32405 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:16.138660 32405 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:16.173912 32405 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:16.173991 32405 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:16.635673 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:16.635738 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:16.643337 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:16.885942 32405 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:16.886077 32405 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:16.937077 32405 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:16.937208 32405 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:17.445698 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:17.445762 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:17.454408 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:17.727839 32405 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:17.728003 32405 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:17.799599 32405 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:17.799757 32405 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:17.884217 32405 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 06:49:18.366082 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:18.366159 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:18.376215 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:18.665729 32405 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:18.665921 32405 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:18.757613 32405 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:18.757792 32405 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:19.407771 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:19.407835 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:19.418262 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:19.724472 32405 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:19.724689 32405 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:19.837491 32405 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:19.837695 32405 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:20.545097 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:20.545161 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:20.556965 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:20.898551 32405 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:20.898797 32405 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:21.031400 32405 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:21.031636 32405 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:21.807078 32405 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:21.807142 32405 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:21.819257 32405 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:21.907825 32423 blocking_queue.cpp:50] Waiting for data\nI0821 06:49:22.012699 32419 blocking_queue.cpp:50] Waiting for data\nI0821 06:49:22.263715 32405 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:22.263957 32405 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:22.416460 32405 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:22.416687 32405 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:22.587580 32405 parallel.cpp:425] Starting Optimization\nI0821 06:49:22.588788 32405 solver.cpp:279] Solving Cifar-Resnet\nI0821 06:49:22.588804 32405 solver.cpp:280] Learning Rate Policy: triangular\nI0821 06:49:22.593605 32405 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 06:50:42.251674 32405 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 06:50:42.251972 32405 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 06:50:46.298082 32405 solver.cpp:228] Iteration 0, loss = 3.45608\nI0821 06:50:46.298126 32405 solver.cpp:244]     Train net output #0: accuracy = 0.128\nI0821 06:50:46.298148 32405 solver.cpp:244]     Train net output #1: loss = 3.45608 (* 1 = 3.45608 loss)\nI0821 06:50:46.348443 32405 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0821 06:53:04.296870 32405 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 06:54:23.478951 32405 solver.cpp:404]     Test net output #0: accuracy = 0.23628\nI0821 06:54:23.479180 32405 solver.cpp:404]     Test net output #1: loss = 2.05126 (* 1 = 2.05126 loss)\nI0821 06:54:24.764936 32405 solver.cpp:228] Iteration 100, loss = 1.67486\nI0821 06:54:24.764982 32405 solver.cpp:244]     Train net output #0: accuracy = 0.352\nI0821 06:54:24.764998 32405 solver.cpp:244]     Train net output #1: loss = 1.67486 (* 1 = 1.67486 loss)\nI0821 06:54:24.887809 32405 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0821 06:56:42.742434 32405 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 06:58:01.926841 32405 solver.cpp:404]     Test net output #0: accuracy = 0.46876\nI0821 06:58:01.927093 32405 solver.cpp:404]     Test net output #1: loss = 1.46264 (* 1 = 1.46264 loss)\nI0821 06:58:03.213733 32405 solver.cpp:228] Iteration 200, loss = 1.2191\nI0821 06:58:03.213778 32405 solver.cpp:244]     Train net output #0: accuracy = 0.568\nI0821 06:58:03.213793 32405 solver.cpp:244]     Train net output #1: loss = 1.2191 (* 1 = 1.2191 loss)\nI0821 06:58:03.333410 32405 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0821 07:00:21.195358 32405 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 07:01:40.398164 32405 solver.cpp:404]     Test net output #0: accuracy = 0.58728\nI0821 07:01:40.398391 32405 solver.cpp:404]     Test net output #1: loss = 1.19745 (* 1 = 1.19745 loss)\nI0821 07:01:41.684499 32405 solver.cpp:228] Iteration 300, loss = 1.02408\nI0821 07:01:41.684541 32405 solver.cpp:244]     Train net output #0: accuracy = 0.608\nI0821 07:01:41.684557 32405 solver.cpp:244]     Train net output #1: loss = 1.02408 (* 1 = 1.02408 loss)\nI0821 07:01:41.807703 32405 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0821 07:03:59.622092 32405 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 07:05:18.821023 32405 solver.cpp:404]     Test net output #0: accuracy = 0.62436\nI0821 07:05:18.821276 32405 solver.cpp:404]     Test net output #1: loss = 1.17206 (* 1 = 1.17206 loss)\nI0821 07:05:20.107923 32405 solver.cpp:228] Iteration 400, loss = 0.801417\nI0821 07:05:20.107970 32405 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0821 07:05:20.107986 32405 solver.cpp:244]     Train net output #1: loss = 0.801417 (* 1 = 0.801417 loss)\nI0821 07:05:20.226209 32405 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0821 07:07:38.104310 32405 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 07:08:57.311014 32405 solver.cpp:404]     Test net output #0: accuracy = 0.66192\nI0821 07:08:57.311249 32405 solver.cpp:404]     Test net output #1: loss = 1.04029 (* 1 = 1.04029 loss)\nI0821 07:08:58.597626 32405 solver.cpp:228] Iteration 500, loss = 0.647091\nI0821 07:08:58.597667 32405 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0821 07:08:58.597681 32405 solver.cpp:244]     Train net output #1: loss = 0.647091 (* 1 = 0.647091 loss)\nI0821 07:08:58.723274 32405 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0821 07:11:16.686630 32405 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 07:12:35.896230 32405 solver.cpp:404]     Test net output #0: accuracy = 0.67976\nI0821 07:12:35.896488 32405 solver.cpp:404]     Test net output #1: loss = 0.972064 (* 1 = 0.972064 loss)\nI0821 07:12:37.181979 32405 solver.cpp:228] Iteration 600, loss = 0.549513\nI0821 07:12:37.182019 32405 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0821 07:12:37.182034 32405 solver.cpp:244]     Train net output #1: loss = 0.549513 (* 1 = 0.549513 loss)\nI0821 07:12:37.304131 32405 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0821 07:14:55.166501 32405 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 07:16:14.378695 32405 solver.cpp:404]     Test net output #0: accuracy = 0.68488\nI0821 07:16:14.378952 32405 solver.cpp:404]     Test net output #1: loss = 1.02853 (* 1 = 1.02853 loss)\nI0821 07:16:15.665205 32405 solver.cpp:228] Iteration 700, loss = 0.500962\nI0821 07:16:15.665243 32405 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 07:16:15.665257 32405 solver.cpp:244]     Train net output #1: loss = 0.500962 (* 1 = 0.500962 loss)\nI0821 07:16:15.788425 32405 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0821 07:18:33.632520 32405 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 07:19:52.836534 32405 solver.cpp:404]     Test net output #0: accuracy = 0.63448\nI0821 07:19:52.836802 32405 solver.cpp:404]     Test net output #1: loss = 1.31402 (* 1 = 1.31402 loss)\nI0821 07:19:54.122836 32405 solver.cpp:228] Iteration 800, loss = 0.471955\nI0821 07:19:54.122875 32405 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 07:19:54.122890 32405 solver.cpp:244]     Train net output #1: loss = 0.471955 (* 1 = 0.471955 loss)\nI0821 07:19:54.244046 32405 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0821 07:22:12.207367 32405 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 07:23:31.449453 32405 solver.cpp:404]     Test net output #0: accuracy = 0.6736\nI0821 07:23:31.449697 32405 solver.cpp:404]     Test net output #1: loss = 1.08637 (* 1 = 1.08637 loss)\nI0821 07:23:32.736254 32405 solver.cpp:228] Iteration 900, loss = 0.42895\nI0821 07:23:32.736299 32405 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 07:23:32.736323 32405 solver.cpp:244]     Train net output #1: loss = 0.42895 (* 1 = 0.42895 loss)\nI0821 07:23:32.861220 32405 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0821 07:25:50.650614 32405 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 07:27:09.863955 32405 solver.cpp:404]     Test net output #0: accuracy = 0.7608\nI0821 07:27:09.864198 32405 solver.cpp:404]     Test net output #1: loss = 0.787123 (* 1 = 0.787123 loss)\nI0821 07:27:11.150492 32405 solver.cpp:228] Iteration 1000, loss = 0.33261\nI0821 07:27:11.150537 32405 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 07:27:11.150559 32405 solver.cpp:244]     Train net output #1: loss = 0.33261 (* 1 = 0.33261 loss)\nI0821 07:27:11.267408 32405 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0821 07:29:29.142251 32405 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 07:30:48.434118 32405 solver.cpp:404]     Test net output #0: accuracy = 0.7128\nI0821 07:30:48.434398 32405 solver.cpp:404]     Test net output #1: loss = 1.12784 (* 1 = 1.12784 loss)\nI0821 07:30:49.721511 32405 solver.cpp:228] Iteration 1100, loss = 0.340457\nI0821 07:30:49.721554 32405 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 07:30:49.721578 32405 solver.cpp:244]     Train net output #1: loss = 0.340456 (* 1 = 0.340456 loss)\nI0821 07:30:49.838953 32405 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0821 07:33:07.841807 32405 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 07:34:27.158972 32405 solver.cpp:404]     Test net output #0: accuracy = 0.639\nI0821 07:34:27.159232 32405 solver.cpp:404]     Test net output #1: loss = 1.52562 (* 1 = 1.52562 loss)\nI0821 07:34:28.445741 32405 solver.cpp:228] Iteration 1200, loss = 0.210529\nI0821 07:34:28.445785 32405 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 07:34:28.445807 32405 solver.cpp:244]     Train net output #1: loss = 0.210529 (* 1 = 0.210529 loss)\nI0821 07:34:28.571405 32405 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0821 07:36:46.402638 32405 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 07:38:05.633409 32405 solver.cpp:404]     Test net output #0: accuracy = 0.73712\nI0821 07:38:05.633673 32405 solver.cpp:404]     Test net output #1: loss = 0.897689 (* 1 = 0.897689 loss)\nI0821 07:38:06.920505 32405 solver.cpp:228] Iteration 1300, loss = 0.318603\nI0821 07:38:06.920545 32405 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:38:06.920569 32405 solver.cpp:244]     Train net output #1: loss = 0.318603 (* 1 = 0.318603 loss)\nI0821 07:38:07.046288 32405 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0821 07:40:25.025264 32405 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 07:41:44.326937 32405 solver.cpp:404]     Test net output #0: accuracy = 0.73304\nI0821 07:41:44.327205 32405 solver.cpp:404]     Test net output #1: loss = 1.04238 (* 1 = 1.04238 loss)\nI0821 07:41:45.614951 32405 solver.cpp:228] Iteration 1400, loss = 0.188597\nI0821 07:41:45.614994 32405 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 07:41:45.615018 32405 solver.cpp:244]     Train net output #1: loss = 0.188597 (* 1 = 0.188597 loss)\nI0821 07:41:45.733551 32405 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0821 07:44:03.650660 32405 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 07:45:22.906956 32405 solver.cpp:404]     Test net output #0: accuracy = 0.71908\nI0821 07:45:22.907224 32405 solver.cpp:404]     Test net output #1: loss = 1.10713 (* 1 = 1.10713 loss)\nI0821 07:45:24.193701 32405 solver.cpp:228] Iteration 1500, loss = 0.264466\nI0821 07:45:24.193742 32405 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 07:45:24.193765 32405 solver.cpp:244]     Train net output #1: loss = 0.264466 (* 1 = 0.264466 loss)\nI0821 07:45:24.319986 32405 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0821 07:47:42.226480 32405 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 07:49:01.434062 32405 solver.cpp:404]     Test net output #0: accuracy = 0.7376\nI0821 07:49:01.434337 32405 solver.cpp:404]     Test net output #1: loss = 0.975711 (* 1 = 0.975711 loss)\nI0821 07:49:02.722462 32405 solver.cpp:228] Iteration 1600, loss = 0.216321\nI0821 07:49:02.722507 32405 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 07:49:02.722530 32405 solver.cpp:244]     Train net output #1: loss = 0.216321 (* 1 = 0.216321 loss)\nI0821 07:49:02.842365 32405 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0821 07:51:20.852550 32405 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 07:52:40.044272 32405 solver.cpp:404]     Test net output #0: accuracy = 0.76964\nI0821 07:52:40.044531 32405 solver.cpp:404]     Test net output #1: loss = 0.886513 (* 1 = 0.886513 loss)\nI0821 07:52:41.334370 32405 solver.cpp:228] Iteration 1700, loss = 0.276898\nI0821 07:52:41.334415 32405 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 07:52:41.334439 32405 solver.cpp:244]     Train net output #1: loss = 0.276898 (* 1 = 0.276898 loss)\nI0821 07:52:41.458665 32405 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0821 07:54:59.328800 32405 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 07:56:18.498832 32405 solver.cpp:404]     Test net output #0: accuracy = 0.77104\nI0821 07:56:18.499095 32405 solver.cpp:404]     Test net output #1: loss = 1.07193 (* 1 = 1.07193 loss)\nI0821 07:56:19.786595 32405 solver.cpp:228] Iteration 1800, loss = 0.272881\nI0821 07:56:19.786638 32405 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:56:19.786653 32405 solver.cpp:244]     Train net output #1: loss = 0.272881 (* 1 = 0.272881 loss)\nI0821 07:56:19.906774 32405 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0821 07:58:37.788704 32405 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 07:59:56.964879 32405 solver.cpp:404]     Test net output #0: accuracy = 0.74216\nI0821 07:59:56.965145 32405 solver.cpp:404]     Test net output #1: loss = 1.13745 (* 1 = 1.13745 loss)\nI0821 07:59:58.252070 32405 solver.cpp:228] Iteration 1900, loss = 0.237753\nI0821 07:59:58.252112 32405 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 07:59:58.252127 32405 solver.cpp:244]     Train net output #1: loss = 0.237753 (* 1 = 0.237753 loss)\nI0821 07:59:58.377429 32405 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0821 08:02:16.178532 32405 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 08:03:35.421917 32405 solver.cpp:404]     Test net output #0: accuracy = 0.7558\nI0821 08:03:35.422184 32405 solver.cpp:404]     Test net output #1: loss = 1.08862 (* 1 = 1.08862 loss)\nI0821 08:03:36.709359 32405 solver.cpp:228] Iteration 2000, loss = 0.214801\nI0821 08:03:36.709403 32405 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 08:03:36.709417 32405 solver.cpp:244]     Train net output #1: loss = 0.214801 (* 1 = 0.214801 loss)\nI0821 08:03:36.829062 32405 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0821 08:05:54.713639 32405 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 08:07:13.944480 32405 solver.cpp:404]     Test net output #0: accuracy = 0.78296\nI0821 08:07:13.944744 32405 solver.cpp:404]     Test net output #1: loss = 0.998743 (* 1 = 0.998743 loss)\nI0821 08:07:15.230902 32405 solver.cpp:228] Iteration 2100, loss = 0.14381\nI0821 08:07:15.230942 32405 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:07:15.230957 32405 solver.cpp:244]     Train net output #1: loss = 0.14381 (* 1 = 0.14381 loss)\nI0821 08:07:15.353802 32405 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0821 08:09:33.134565 32405 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 08:10:52.368852 32405 solver.cpp:404]     Test net output #0: accuracy = 0.68724\nI0821 08:10:52.369117 32405 solver.cpp:404]     Test net output #1: loss = 1.58464 (* 1 = 1.58464 loss)\nI0821 08:10:53.655946 32405 solver.cpp:228] Iteration 2200, loss = 0.171965\nI0821 08:10:53.655989 32405 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 08:10:53.656005 32405 solver.cpp:244]     Train net output #1: loss = 0.171965 (* 1 = 0.171965 loss)\nI0821 08:10:53.776998 32405 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0821 08:13:11.664672 32405 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 08:14:30.845993 32405 solver.cpp:404]     Test net output #0: accuracy = 0.7638\nI0821 08:14:30.846257 32405 solver.cpp:404]     Test net output #1: loss = 1.12295 (* 1 = 1.12295 loss)\nI0821 08:14:32.132491 32405 solver.cpp:228] Iteration 2300, loss = 0.132535\nI0821 08:14:32.132535 32405 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:14:32.132550 32405 solver.cpp:244]     Train net output #1: loss = 0.132535 (* 1 = 0.132535 loss)\nI0821 08:14:32.252928 32405 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0821 08:16:50.128501 32405 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 08:18:09.310408 32405 solver.cpp:404]     Test net output #0: accuracy = 0.76176\nI0821 08:18:09.310672 32405 solver.cpp:404]     Test net output #1: loss = 1.15725 (* 1 = 1.15725 loss)\nI0821 08:18:10.596729 32405 solver.cpp:228] Iteration 2400, loss = 0.23397\nI0821 08:18:10.596772 32405 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 08:18:10.596787 32405 solver.cpp:244]     Train net output #1: loss = 0.23397 (* 1 = 0.23397 loss)\nI0821 08:18:10.723160 32405 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0821 08:20:28.566613 32405 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 08:21:47.738056 32405 solver.cpp:404]     Test net output #0: accuracy = 0.75864\nI0821 08:21:47.738318 32405 solver.cpp:404]     Test net output #1: loss = 0.9322 (* 1 = 0.9322 loss)\nI0821 08:21:49.024477 32405 solver.cpp:228] Iteration 2500, loss = 0.14371\nI0821 08:21:49.024521 32405 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:21:49.024536 32405 solver.cpp:244]     Train net output #1: loss = 0.14371 (* 1 = 0.14371 loss)\nI0821 08:21:49.150130 32405 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0821 08:24:07.035864 32405 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 08:25:26.302840 32405 solver.cpp:404]     Test net output #0: accuracy = 0.77208\nI0821 08:25:26.303104 32405 solver.cpp:404]     Test net output #1: loss = 0.920669 (* 1 = 0.920669 loss)\nI0821 08:25:27.590364 32405 solver.cpp:228] Iteration 2600, loss = 0.106151\nI0821 08:25:27.590411 32405 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:25:27.590436 32405 solver.cpp:244]     Train net output #1: loss = 0.106151 (* 1 = 0.106151 loss)\nI0821 08:25:27.703301 32405 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0821 08:27:45.733211 32405 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 08:29:05.010545 32405 solver.cpp:404]     Test net output #0: accuracy = 0.78432\nI0821 08:29:05.010812 32405 solver.cpp:404]     Test net output #1: loss = 0.965428 (* 1 = 0.965428 loss)\nI0821 08:29:06.297879 32405 solver.cpp:228] Iteration 2700, loss = 0.12896\nI0821 08:29:06.297924 32405 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:29:06.297947 32405 solver.cpp:244]     Train net output #1: loss = 0.12896 (* 1 = 0.12896 loss)\nI0821 08:29:06.415277 32405 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0821 08:31:24.215766 32405 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 08:32:43.535377 32405 solver.cpp:404]     Test net output #0: accuracy = 0.7712\nI0821 08:32:43.535643 32405 solver.cpp:404]     Test net output #1: loss = 1.00822 (* 1 = 1.00822 loss)\nI0821 08:32:44.823503 32405 solver.cpp:228] Iteration 2800, loss = 0.082069\nI0821 08:32:44.823547 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 08:32:44.823571 32405 solver.cpp:244]     Train net output #1: loss = 0.082069 (* 1 = 0.082069 loss)\nI0821 08:32:44.945219 32405 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0821 08:35:02.703272 32405 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 08:36:22.014715 32405 solver.cpp:404]     Test net output #0: accuracy = 0.77612\nI0821 08:36:22.014981 32405 solver.cpp:404]     Test net output #1: loss = 1.20093 (* 1 = 1.20093 loss)\nI0821 08:36:23.301803 32405 solver.cpp:228] Iteration 2900, loss = 0.165938\nI0821 08:36:23.301846 32405 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:36:23.301870 32405 solver.cpp:244]     Train net output #1: loss = 0.165938 (* 1 = 0.165938 loss)\nI0821 08:36:23.418936 32405 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0821 08:38:41.229861 32405 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 08:40:00.399798 32405 solver.cpp:404]     Test net output #0: accuracy = 0.7968\nI0821 08:40:00.400061 32405 solver.cpp:404]     Test net output #1: loss = 0.978995 (* 1 = 0.978995 loss)\nI0821 08:40:01.685670 32405 solver.cpp:228] Iteration 3000, loss = 0.094853\nI0821 08:40:01.685712 32405 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 08:40:01.685729 32405 solver.cpp:244]     Train net output #1: loss = 0.094853 (* 1 = 0.094853 loss)\nI0821 08:40:01.806932 32405 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0821 08:42:19.691429 32405 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 08:43:38.868398 32405 solver.cpp:404]     Test net output #0: accuracy = 0.78692\nI0821 08:43:38.868661 32405 solver.cpp:404]     Test net output #1: loss = 1.05523 (* 1 = 1.05523 loss)\nI0821 08:43:40.154657 32405 solver.cpp:228] Iteration 3100, loss = 0.0585055\nI0821 08:43:40.154698 32405 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 08:43:40.154714 32405 solver.cpp:244]     Train net output #1: loss = 0.0585055 (* 1 = 0.0585055 loss)\nI0821 08:43:40.274307 32405 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0821 08:45:58.221156 32405 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 08:47:17.386073 32405 solver.cpp:404]     Test net output #0: accuracy = 0.78044\nI0821 08:47:17.386334 32405 solver.cpp:404]     Test net output #1: loss = 1.09714 (* 1 = 1.09714 loss)\nI0821 08:47:18.671809 32405 solver.cpp:228] Iteration 3200, loss = 0.0941738\nI0821 08:47:18.671849 32405 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:47:18.671865 32405 solver.cpp:244]     Train net output #1: loss = 0.0941738 (* 1 = 0.0941738 loss)\nI0821 08:47:18.795321 32405 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0821 08:49:36.805318 32405 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 08:50:55.977783 32405 solver.cpp:404]     Test net output #0: accuracy = 0.79996\nI0821 08:50:55.978045 32405 solver.cpp:404]     Test net output #1: loss = 0.838492 (* 1 = 0.838492 loss)\nI0821 08:50:57.263566 32405 solver.cpp:228] Iteration 3300, loss = 0.170973\nI0821 08:50:57.263607 32405 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 08:50:57.263622 32405 solver.cpp:244]     Train net output #1: loss = 0.170973 (* 1 = 0.170973 loss)\nI0821 08:50:57.385257 32405 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0821 08:53:15.234580 32405 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 08:54:34.409874 32405 solver.cpp:404]     Test net output #0: accuracy = 0.803\nI0821 08:54:34.410151 32405 solver.cpp:404]     Test net output #1: loss = 0.954375 (* 1 = 0.954375 loss)\nI0821 08:54:35.696414 32405 solver.cpp:228] Iteration 3400, loss = 0.0912706\nI0821 08:54:35.696455 32405 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 08:54:35.696471 32405 solver.cpp:244]     Train net output #1: loss = 0.0912706 (* 1 = 0.0912706 loss)\nI0821 08:54:35.814254 32405 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0821 08:56:53.704340 32405 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 08:58:12.875244 32405 solver.cpp:404]     Test net output #0: accuracy = 0.80808\nI0821 08:58:12.875491 32405 solver.cpp:404]     Test net output #1: loss = 0.849542 (* 1 = 0.849542 loss)\nI0821 08:58:14.161182 32405 solver.cpp:228] Iteration 3500, loss = 0.0621638\nI0821 08:58:14.161223 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 08:58:14.161239 32405 solver.cpp:244]     Train net output #1: loss = 0.0621638 (* 1 = 0.0621638 loss)\nI0821 08:58:14.277740 32405 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0821 09:00:32.205169 32405 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 09:01:51.377111 32405 solver.cpp:404]     Test net output #0: accuracy = 0.76604\nI0821 09:01:51.377349 32405 solver.cpp:404]     Test net output #1: loss = 1.21503 (* 1 = 1.21503 loss)\nI0821 09:01:52.663141 32405 solver.cpp:228] Iteration 3600, loss = 0.0566429\nI0821 09:01:52.663183 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 09:01:52.663199 32405 solver.cpp:244]     Train net output #1: loss = 0.0566429 (* 1 = 0.0566429 loss)\nI0821 09:01:52.781548 32405 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0821 09:04:10.681063 32405 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 09:05:29.870259 32405 solver.cpp:404]     Test net output #0: accuracy = 0.78136\nI0821 09:05:29.870517 32405 solver.cpp:404]     Test net output #1: loss = 1.03811 (* 1 = 1.03811 loss)\nI0821 09:05:31.156105 32405 solver.cpp:228] Iteration 3700, loss = 0.117045\nI0821 09:05:31.156147 32405 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:05:31.156163 32405 solver.cpp:244]     Train net output #1: loss = 0.117045 (* 1 = 0.117045 loss)\nI0821 09:05:31.276875 32405 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0821 09:07:49.107203 32405 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 09:09:08.298218 32405 solver.cpp:404]     Test net output #0: accuracy = 0.81072\nI0821 09:09:08.298482 32405 solver.cpp:404]     Test net output #1: loss = 0.898091 (* 1 = 0.898091 loss)\nI0821 09:09:09.585100 32405 solver.cpp:228] Iteration 3800, loss = 0.0505925\nI0821 09:09:09.585142 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 09:09:09.585158 32405 solver.cpp:244]     Train net output #1: loss = 0.0505925 (* 1 = 0.0505925 loss)\nI0821 09:09:09.705389 32405 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0821 09:11:27.277878 32405 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 09:12:46.471690 32405 solver.cpp:404]     Test net output #0: accuracy = 0.74876\nI0821 09:12:46.471954 32405 solver.cpp:404]     Test net output #1: loss = 1.3361 (* 1 = 1.3361 loss)\nI0821 09:12:47.758638 32405 solver.cpp:228] Iteration 3900, loss = 0.0818959\nI0821 09:12:47.758682 32405 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 09:12:47.758697 32405 solver.cpp:244]     Train net output #1: loss = 0.0818959 (* 1 = 0.0818959 loss)\nI0821 09:12:47.880576 32405 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0821 09:15:05.446936 32405 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 09:16:24.646661 32405 solver.cpp:404]     Test net output #0: accuracy = 0.76904\nI0821 09:16:24.646906 32405 solver.cpp:404]     Test net output #1: loss = 1.18452 (* 1 = 1.18452 loss)\nI0821 09:16:25.933990 32405 solver.cpp:228] Iteration 4000, loss = 0.144907\nI0821 09:16:25.934031 32405 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:16:25.934046 32405 solver.cpp:244]     Train net output #1: loss = 0.144907 (* 1 = 0.144907 loss)\nI0821 09:16:26.053443 32405 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0821 09:18:43.719442 32405 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 09:20:02.919155 32405 solver.cpp:404]     Test net output #0: accuracy = 0.7348\nI0821 09:20:02.919404 32405 solver.cpp:404]     Test net output #1: loss = 1.77868 (* 1 = 1.77868 loss)\nI0821 09:20:04.205605 32405 solver.cpp:228] Iteration 4100, loss = 0.0359816\nI0821 09:20:04.205646 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:20:04.205662 32405 solver.cpp:244]     Train net output #1: loss = 0.0359816 (* 1 = 0.0359816 loss)\nI0821 09:20:04.323276 32405 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0821 09:22:21.989742 32405 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 09:23:41.183660 32405 solver.cpp:404]     Test net output #0: accuracy = 0.80944\nI0821 09:23:41.183902 32405 solver.cpp:404]     Test net output #1: loss = 0.944673 (* 1 = 0.944673 loss)\nI0821 09:23:42.470073 32405 solver.cpp:228] Iteration 4200, loss = 0.146287\nI0821 09:23:42.470114 32405 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:23:42.470130 32405 solver.cpp:244]     Train net output #1: loss = 0.146287 (* 1 = 0.146287 loss)\nI0821 09:23:42.587560 32405 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0821 09:26:00.231561 32405 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 09:27:19.421677 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8148\nI0821 09:27:19.421952 32405 solver.cpp:404]     Test net output #1: loss = 0.905289 (* 1 = 0.905289 loss)\nI0821 09:27:20.707901 32405 solver.cpp:228] Iteration 4300, loss = 0.119137\nI0821 09:27:20.707947 32405 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:27:20.707963 32405 solver.cpp:244]     Train net output #1: loss = 0.119137 (* 1 = 0.119137 loss)\nI0821 09:27:20.823662 32405 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0821 09:29:38.386685 32405 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 09:30:57.579646 32405 solver.cpp:404]     Test net output #0: accuracy = 0.80492\nI0821 09:30:57.579913 32405 solver.cpp:404]     Test net output #1: loss = 0.966904 (* 1 = 0.966904 loss)\nI0821 09:30:58.866186 32405 solver.cpp:228] Iteration 4400, loss = 0.0867628\nI0821 09:30:58.866231 32405 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 09:30:58.866247 32405 solver.cpp:244]     Train net output #1: loss = 0.0867627 (* 1 = 0.0867627 loss)\nI0821 09:30:58.984498 32405 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0821 09:33:16.617341 32405 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 09:34:35.822744 32405 solver.cpp:404]     Test net output #0: accuracy = 0.79924\nI0821 09:34:35.823006 32405 solver.cpp:404]     Test net output #1: loss = 1.0749 (* 1 = 1.0749 loss)\nI0821 09:34:37.109622 32405 solver.cpp:228] Iteration 4500, loss = 0.0990959\nI0821 09:34:37.109664 32405 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:34:37.109679 32405 solver.cpp:244]     Train net output #1: loss = 0.0990958 (* 1 = 0.0990958 loss)\nI0821 09:34:37.226490 32405 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0821 09:36:54.888662 32405 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 09:38:14.084051 32405 solver.cpp:404]     Test net output #0: accuracy = 0.76616\nI0821 09:38:14.084316 32405 solver.cpp:404]     Test net output #1: loss = 1.1629 (* 1 = 1.1629 loss)\nI0821 09:38:15.370321 32405 solver.cpp:228] Iteration 4600, loss = 0.0483959\nI0821 09:38:15.370367 32405 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 09:38:15.370383 32405 solver.cpp:244]     Train net output #1: loss = 0.0483958 (* 1 = 0.0483958 loss)\nI0821 09:38:15.487954 32405 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0821 09:40:33.127650 32405 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 09:41:52.333376 32405 solver.cpp:404]     Test net output #0: accuracy = 0.80176\nI0821 09:41:52.333600 32405 solver.cpp:404]     Test net output #1: loss = 0.980751 (* 1 = 0.980751 loss)\nI0821 09:41:53.620090 32405 solver.cpp:228] Iteration 4700, loss = 0.118542\nI0821 09:41:53.620132 32405 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:41:53.620148 32405 solver.cpp:244]     Train net output #1: loss = 0.118542 (* 1 = 0.118542 loss)\nI0821 09:41:53.737104 32405 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0821 09:44:11.462316 32405 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 09:45:30.657783 32405 solver.cpp:404]     Test net output #0: accuracy = 0.80508\nI0821 09:45:30.658025 32405 solver.cpp:404]     Test net output #1: loss = 0.970896 (* 1 = 0.970896 loss)\nI0821 09:45:31.943964 32405 solver.cpp:228] Iteration 4800, loss = 0.125061\nI0821 09:45:31.944008 32405 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 09:45:31.944025 32405 solver.cpp:244]     Train net output #1: loss = 0.125061 (* 1 = 0.125061 loss)\nI0821 09:45:32.062835 32405 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0821 09:47:49.895604 32405 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 09:49:09.086586 32405 solver.cpp:404]     Test net output #0: accuracy = 0.75988\nI0821 09:49:09.086846 32405 solver.cpp:404]     Test net output #1: loss = 1.28897 (* 1 = 1.28897 loss)\nI0821 09:49:10.373389 32405 solver.cpp:228] Iteration 4900, loss = 0.1286\nI0821 09:49:10.373433 32405 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 09:49:10.373450 32405 solver.cpp:244]     Train net output #1: loss = 0.1286 (* 1 = 0.1286 loss)\nI0821 09:49:10.490582 32405 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0821 09:51:28.009500 32405 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 09:52:47.169754 32405 solver.cpp:404]     Test net output #0: accuracy = 0.77464\nI0821 09:52:47.170022 32405 solver.cpp:404]     Test net output #1: loss = 1.23872 (* 1 = 1.23872 loss)\nI0821 09:52:48.456555 32405 solver.cpp:228] Iteration 5000, loss = 0.0671536\nI0821 09:52:48.456599 32405 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 09:52:48.456615 32405 solver.cpp:244]     Train net output #1: loss = 0.0671536 (* 1 = 0.0671536 loss)\nI0821 09:52:48.572713 32405 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0821 09:55:06.175097 32405 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 09:56:25.328397 32405 solver.cpp:404]     Test net output #0: accuracy = 0.79776\nI0821 09:56:25.328635 32405 solver.cpp:404]     Test net output #1: loss = 1.02946 (* 1 = 1.02946 loss)\nI0821 09:56:26.614387 32405 solver.cpp:228] Iteration 5100, loss = 0.0767431\nI0821 09:56:26.614433 32405 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 09:56:26.614449 32405 solver.cpp:244]     Train net output #1: loss = 0.0767431 (* 1 = 0.0767431 loss)\nI0821 09:56:26.734730 32405 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0821 09:58:44.385068 32405 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 10:00:03.538075 32405 solver.cpp:404]     Test net output #0: accuracy = 0.767\nI0821 10:00:03.538321 32405 solver.cpp:404]     Test net output #1: loss = 1.20336 (* 1 = 1.20336 loss)\nI0821 10:00:04.824570 32405 solver.cpp:228] Iteration 5200, loss = 0.0674753\nI0821 10:00:04.824614 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:00:04.824630 32405 solver.cpp:244]     Train net output #1: loss = 0.0674752 (* 1 = 0.0674752 loss)\nI0821 10:00:04.945243 32405 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0821 10:02:22.521608 32405 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 10:03:41.671185 32405 solver.cpp:404]     Test net output #0: accuracy = 0.78352\nI0821 10:03:41.671427 32405 solver.cpp:404]     Test net output #1: loss = 1.15753 (* 1 = 1.15753 loss)\nI0821 10:03:42.958082 32405 solver.cpp:228] Iteration 5300, loss = 0.0857166\nI0821 10:03:42.958127 32405 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:03:42.958143 32405 solver.cpp:244]     Train net output #1: loss = 0.0857165 (* 1 = 0.0857165 loss)\nI0821 10:03:43.075278 32405 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0821 10:06:00.693979 32405 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 10:07:19.852460 32405 solver.cpp:404]     Test net output #0: accuracy = 0.83504\nI0821 10:07:19.852730 32405 solver.cpp:404]     Test net output #1: loss = 0.847826 (* 1 = 0.847826 loss)\nI0821 10:07:21.138712 32405 solver.cpp:228] Iteration 5400, loss = 0.0751818\nI0821 10:07:21.138756 32405 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:07:21.138772 32405 solver.cpp:244]     Train net output #1: loss = 0.0751818 (* 1 = 0.0751818 loss)\nI0821 10:07:21.263455 32405 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0821 10:09:38.810654 32405 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 10:10:57.963480 32405 solver.cpp:404]     Test net output #0: accuracy = 0.80756\nI0821 10:10:57.963740 32405 solver.cpp:404]     Test net output #1: loss = 0.955809 (* 1 = 0.955809 loss)\nI0821 10:10:59.249732 32405 solver.cpp:228] Iteration 5500, loss = 0.0357322\nI0821 10:10:59.249778 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:10:59.249794 32405 solver.cpp:244]     Train net output #1: loss = 0.0357321 (* 1 = 0.0357321 loss)\nI0821 10:10:59.369076 32405 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0821 10:13:17.123412 32405 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 10:14:36.272521 32405 solver.cpp:404]     Test net output #0: accuracy = 0.79588\nI0821 10:14:36.272763 32405 solver.cpp:404]     Test net output #1: loss = 1.07997 (* 1 = 1.07997 loss)\nI0821 10:14:37.559146 32405 solver.cpp:228] Iteration 5600, loss = 0.0672835\nI0821 10:14:37.559188 32405 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 10:14:37.559204 32405 solver.cpp:244]     Train net output #1: loss = 0.0672835 (* 1 = 0.0672835 loss)\nI0821 10:14:37.679149 32405 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0821 10:16:55.270632 32405 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 10:18:14.429222 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8188\nI0821 10:18:14.429461 32405 solver.cpp:404]     Test net output #1: loss = 0.795271 (* 1 = 0.795271 loss)\nI0821 10:18:15.715821 32405 solver.cpp:228] Iteration 5700, loss = 0.0357664\nI0821 10:18:15.715865 32405 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:18:15.715881 32405 solver.cpp:244]     Train net output #1: loss = 0.0357664 (* 1 = 0.0357664 loss)\nI0821 10:18:15.831292 32405 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0821 10:20:33.415956 32405 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 10:21:52.571442 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8084\nI0821 10:21:52.571707 32405 solver.cpp:404]     Test net output #1: loss = 1.02025 (* 1 = 1.02025 loss)\nI0821 10:21:53.857601 32405 solver.cpp:228] Iteration 5800, loss = 0.13623\nI0821 10:21:53.857645 32405 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:21:53.857661 32405 solver.cpp:244]     Train net output #1: loss = 0.13623 (* 1 = 0.13623 loss)\nI0821 10:21:53.976866 32405 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0821 10:24:11.526594 32405 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 10:25:30.692950 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8248\nI0821 10:25:30.693207 32405 solver.cpp:404]     Test net output #1: loss = 0.894352 (* 1 = 0.894352 loss)\nI0821 10:25:31.979444 32405 solver.cpp:228] Iteration 5900, loss = 0.0165782\nI0821 10:25:31.979486 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:25:31.979502 32405 solver.cpp:244]     Train net output #1: loss = 0.0165782 (* 1 = 0.0165782 loss)\nI0821 10:25:32.095446 32405 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0821 10:27:49.658550 32405 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 10:29:08.839388 32405 solver.cpp:404]     Test net output #0: accuracy = 0.79872\nI0821 10:29:08.839654 32405 solver.cpp:404]     Test net output #1: loss = 1.24819 (* 1 = 1.24819 loss)\nI0821 10:29:10.125780 32405 solver.cpp:228] Iteration 6000, loss = 0.0570303\nI0821 10:29:10.125823 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:29:10.125838 32405 solver.cpp:244]     Train net output #1: loss = 0.0570302 (* 1 = 0.0570302 loss)\nI0821 10:29:10.249330 32405 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0821 10:31:27.966528 32405 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 10:32:47.160570 32405 solver.cpp:404]     Test net output #0: accuracy = 0.84744\nI0821 10:32:47.160815 32405 solver.cpp:404]     Test net output #1: loss = 0.827548 (* 1 = 0.827548 loss)\nI0821 10:32:48.447414 32405 solver.cpp:228] Iteration 6100, loss = 0.00581722\nI0821 10:32:48.447459 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:32:48.447475 32405 solver.cpp:244]     Train net output #1: loss = 0.00581713 (* 1 = 0.00581713 loss)\nI0821 10:32:48.570089 32405 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0821 10:35:06.141786 32405 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 10:36:25.350867 32405 solver.cpp:404]     Test net output #0: accuracy = 0.83184\nI0821 10:36:25.351130 32405 solver.cpp:404]     Test net output #1: loss = 0.881933 (* 1 = 0.881933 loss)\nI0821 10:36:26.637266 32405 solver.cpp:228] Iteration 6200, loss = 0.0409333\nI0821 10:36:26.637306 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:36:26.637322 32405 solver.cpp:244]     Train net output #1: loss = 0.0409332 (* 1 = 0.0409332 loss)\nI0821 10:36:26.757299 32405 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0821 10:38:44.299285 32405 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 10:40:03.497017 32405 solver.cpp:404]     Test net output #0: accuracy = 0.82716\nI0821 10:40:03.497278 32405 solver.cpp:404]     Test net output #1: loss = 0.961269 (* 1 = 0.961269 loss)\nI0821 10:40:04.783501 32405 solver.cpp:228] Iteration 6300, loss = 0.0114765\nI0821 10:40:04.783543 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:40:04.783557 32405 solver.cpp:244]     Train net output #1: loss = 0.0114764 (* 1 = 0.0114764 loss)\nI0821 10:40:04.901314 32405 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0821 10:42:22.384853 32405 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 10:43:41.581181 32405 solver.cpp:404]     Test net output #0: accuracy = 0.83772\nI0821 10:43:41.581449 32405 solver.cpp:404]     Test net output #1: loss = 0.924176 (* 1 = 0.924176 loss)\nI0821 10:43:42.868901 32405 solver.cpp:228] Iteration 6400, loss = 0.0206308\nI0821 10:43:42.868948 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:43:42.868965 32405 solver.cpp:244]     Train net output #1: loss = 0.0206307 (* 1 = 0.0206307 loss)\nI0821 10:43:42.983772 32405 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0821 10:46:00.529332 32405 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 10:47:19.700212 32405 solver.cpp:404]     Test net output #0: accuracy = 0.84172\nI0821 10:47:19.700476 32405 solver.cpp:404]     Test net output #1: loss = 0.889745 (* 1 = 0.889745 loss)\nI0821 10:47:20.986560 32405 solver.cpp:228] Iteration 6500, loss = 0.00700465\nI0821 10:47:20.986603 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:47:20.986619 32405 solver.cpp:244]     Train net output #1: loss = 0.00700455 (* 1 = 0.00700455 loss)\nI0821 10:47:21.099830 32405 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0821 10:49:38.601665 32405 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 10:50:57.796761 32405 solver.cpp:404]     Test net output #0: accuracy = 0.83856\nI0821 10:50:57.797019 32405 solver.cpp:404]     Test net output #1: loss = 0.884502 (* 1 = 0.884502 loss)\nI0821 10:50:59.085352 32405 solver.cpp:228] Iteration 6600, loss = 0.00509176\nI0821 10:50:59.085402 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:50:59.085425 32405 solver.cpp:244]     Train net output #1: loss = 0.00509166 (* 1 = 0.00509166 loss)\nI0821 10:50:59.202407 32405 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0821 10:53:16.799624 32405 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 10:54:35.990504 32405 solver.cpp:404]     Test net output #0: accuracy = 0.83884\nI0821 10:54:35.990772 32405 solver.cpp:404]     Test net output #1: loss = 0.984793 (* 1 = 0.984793 loss)\nI0821 10:54:37.277987 32405 solver.cpp:228] Iteration 6700, loss = 0.0426557\nI0821 10:54:37.278033 32405 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:54:37.278048 32405 solver.cpp:244]     Train net output #1: loss = 0.0426556 (* 1 = 0.0426556 loss)\nI0821 10:54:37.397856 32405 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0821 10:56:54.882517 32405 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 10:58:14.075750 32405 solver.cpp:404]     Test net output #0: accuracy = 0.85256\nI0821 10:58:14.076004 32405 solver.cpp:404]     Test net output #1: loss = 0.855338 (* 1 = 0.855338 loss)\nI0821 10:58:15.362437 32405 solver.cpp:228] Iteration 6800, loss = 0.0129353\nI0821 10:58:15.362483 32405 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:58:15.362498 32405 solver.cpp:244]     Train net output #1: loss = 0.0129352 (* 1 = 0.0129352 loss)\nI0821 10:58:15.481452 32405 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0821 11:00:33.003077 32405 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 11:01:52.208946 32405 solver.cpp:404]     Test net output #0: accuracy = 0.83504\nI0821 11:01:52.209208 32405 solver.cpp:404]     Test net output #1: loss = 1.03236 (* 1 = 1.03236 loss)\nI0821 11:01:53.495451 32405 solver.cpp:228] Iteration 6900, loss = 0.0110871\nI0821 11:01:53.495496 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:01:53.495512 32405 solver.cpp:244]     Train net output #1: loss = 0.011087 (* 1 = 0.011087 loss)\nI0821 11:01:53.614765 32405 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0821 11:04:11.297871 32405 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 11:05:30.505519 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8364\nI0821 11:05:30.505795 32405 solver.cpp:404]     Test net output #1: loss = 1.11769 (* 1 = 1.11769 loss)\nI0821 11:05:31.792242 32405 solver.cpp:228] Iteration 7000, loss = 0.00340702\nI0821 11:05:31.792284 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:05:31.792299 32405 solver.cpp:244]     Train net output #1: loss = 0.00340694 (* 1 = 0.00340694 loss)\nI0821 11:05:31.914144 32405 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0821 11:07:49.502595 32405 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 11:09:08.702006 32405 solver.cpp:404]     Test net output #0: accuracy = 0.85272\nI0821 11:09:08.702262 32405 solver.cpp:404]     Test net output #1: loss = 0.977807 (* 1 = 0.977807 loss)\nI0821 11:09:09.987634 32405 solver.cpp:228] Iteration 7100, loss = 0.00347376\nI0821 11:09:09.987674 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:09:09.987689 32405 solver.cpp:244]     Train net output #1: loss = 0.00347367 (* 1 = 0.00347367 loss)\nI0821 11:09:10.103529 32405 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0821 11:11:27.771641 32405 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 11:12:46.979900 32405 solver.cpp:404]     Test net output #0: accuracy = 0.84328\nI0821 11:12:46.980175 32405 solver.cpp:404]     Test net output #1: loss = 1.00573 (* 1 = 1.00573 loss)\nI0821 11:12:48.265880 32405 solver.cpp:228] Iteration 7200, loss = 0.022836\nI0821 11:12:48.265925 32405 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 11:12:48.265943 32405 solver.cpp:244]     Train net output #1: loss = 0.0228359 (* 1 = 0.0228359 loss)\nI0821 11:12:48.387163 32405 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0821 11:15:05.970368 32405 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 11:16:25.170379 32405 solver.cpp:404]     Test net output #0: accuracy = 0.86684\nI0821 11:16:25.170627 32405 solver.cpp:404]     Test net output #1: loss = 0.834205 (* 1 = 0.834205 loss)\nI0821 11:16:26.456269 32405 solver.cpp:228] Iteration 7300, loss = 0.000265531\nI0821 11:16:26.456310 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:16:26.456324 32405 solver.cpp:244]     Train net output #1: loss = 0.000265441 (* 1 = 0.000265441 loss)\nI0821 11:16:26.572182 32405 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0821 11:18:44.297650 32405 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 11:20:03.488855 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8616\nI0821 11:20:03.489073 32405 solver.cpp:404]     Test net output #1: loss = 0.901893 (* 1 = 0.901893 loss)\nI0821 11:20:04.775159 32405 solver.cpp:228] Iteration 7400, loss = 0.000967222\nI0821 11:20:04.775202 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:20:04.775218 32405 solver.cpp:244]     Train net output #1: loss = 0.000967138 (* 1 = 0.000967138 loss)\nI0821 11:20:04.895411 32405 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0821 11:22:22.526731 32405 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 11:23:41.716014 32405 solver.cpp:404]     Test net output #0: accuracy = 0.85972\nI0821 11:23:41.716264 32405 solver.cpp:404]     Test net output #1: loss = 0.920344 (* 1 = 0.920344 loss)\nI0821 11:23:43.002022 32405 solver.cpp:228] Iteration 7500, loss = 0.00384087\nI0821 11:23:43.002066 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:23:43.002081 32405 solver.cpp:244]     Train net output #1: loss = 0.00384079 (* 1 = 0.00384079 loss)\nI0821 11:23:43.123610 32405 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0821 11:26:00.644915 32405 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 11:27:19.836689 32405 solver.cpp:404]     Test net output #0: accuracy = 0.863401\nI0821 11:27:19.836967 32405 solver.cpp:404]     Test net output #1: loss = 0.882686 (* 1 = 0.882686 loss)\nI0821 11:27:21.123919 32405 solver.cpp:228] Iteration 7600, loss = 0.000127433\nI0821 11:27:21.123963 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:27:21.123980 32405 solver.cpp:244]     Train net output #1: loss = 0.000127347 (* 1 = 0.000127347 loss)\nI0821 11:27:21.241498 32405 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0821 11:29:39.083046 32405 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 11:30:58.267793 32405 solver.cpp:404]     Test net output #0: accuracy = 0.87412\nI0821 11:30:58.268036 32405 solver.cpp:404]     Test net output #1: loss = 0.847085 (* 1 = 0.847085 loss)\nI0821 11:30:59.554661 32405 solver.cpp:228] Iteration 7700, loss = 0.000149044\nI0821 11:30:59.554702 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:30:59.554715 32405 solver.cpp:244]     Train net output #1: loss = 0.000148963 (* 1 = 0.000148963 loss)\nI0821 11:30:59.675142 32405 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0821 11:33:17.307951 32405 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 11:34:36.506685 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88004\nI0821 11:34:36.506958 32405 solver.cpp:404]     Test net output #1: loss = 0.801847 (* 1 = 0.801847 loss)\nI0821 11:34:37.793738 32405 solver.cpp:228] Iteration 7800, loss = 9.5104e-05\nI0821 11:34:37.793782 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:34:37.793797 32405 solver.cpp:244]     Train net output #1: loss = 9.50211e-05 (* 1 = 9.50211e-05 loss)\nI0821 11:34:37.913519 32405 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0821 11:36:55.475376 32405 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 11:38:14.669898 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0821 11:38:14.670150 32405 solver.cpp:404]     Test net output #1: loss = 0.755653 (* 1 = 0.755653 loss)\nI0821 11:38:15.956008 32405 solver.cpp:228] Iteration 7900, loss = 6.17621e-05\nI0821 11:38:15.956051 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:38:15.956068 32405 solver.cpp:244]     Train net output #1: loss = 6.16792e-05 (* 1 = 6.16792e-05 loss)\nI0821 11:38:16.082563 32405 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0821 11:40:33.568958 32405 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 11:41:52.766708 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8816\nI0821 11:41:52.766970 32405 solver.cpp:404]     Test net output #1: loss = 0.757384 (* 1 = 0.757384 loss)\nI0821 11:41:54.054054 32405 solver.cpp:228] Iteration 8000, loss = 6.17275e-05\nI0821 11:41:54.054095 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:41:54.054111 32405 solver.cpp:244]     Train net output #1: loss = 6.16447e-05 (* 1 = 6.16447e-05 loss)\nI0821 11:41:54.175679 32405 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0821 11:44:11.877462 32405 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 11:45:31.073420 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88256\nI0821 11:45:31.073686 32405 solver.cpp:404]     Test net output #1: loss = 0.733205 (* 1 = 0.733205 loss)\nI0821 11:45:32.360098 32405 solver.cpp:228] Iteration 8100, loss = 2.87121e-05\nI0821 11:45:32.360139 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:45:32.360155 32405 solver.cpp:244]     Train net output #1: loss = 2.86292e-05 (* 1 = 2.86292e-05 loss)\nI0821 11:45:32.475656 32405 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0821 11:47:50.084539 32405 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 11:49:09.292807 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8822\nI0821 11:49:09.293077 32405 solver.cpp:404]     Test net output #1: loss = 0.736629 (* 1 = 0.736629 loss)\nI0821 11:49:10.579782 32405 solver.cpp:228] Iteration 8200, loss = 3.64262e-05\nI0821 11:49:10.579826 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:49:10.579841 32405 solver.cpp:244]     Train net output #1: loss = 3.63433e-05 (* 1 = 3.63433e-05 loss)\nI0821 11:49:10.696638 32405 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0821 11:51:28.460891 32405 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 11:52:47.662133 32405 solver.cpp:404]     Test net output #0: accuracy = 0.883201\nI0821 11:52:47.662415 32405 solver.cpp:404]     Test net output #1: loss = 0.716846 (* 1 = 0.716846 loss)\nI0821 11:52:48.948935 32405 solver.cpp:228] Iteration 8300, loss = 7.27505e-05\nI0821 11:52:48.948977 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:52:48.948992 32405 solver.cpp:244]     Train net output #1: loss = 7.26676e-05 (* 1 = 7.26676e-05 loss)\nI0821 11:52:49.069102 32405 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0821 11:55:06.686036 32405 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 11:56:25.886142 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88176\nI0821 11:56:25.886404 32405 solver.cpp:404]     Test net output #1: loss = 0.719387 (* 1 = 0.719387 loss)\nI0821 11:56:27.172823 32405 solver.cpp:228] Iteration 8400, loss = 4.13973e-05\nI0821 11:56:27.172866 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:56:27.172881 32405 solver.cpp:244]     Train net output #1: loss = 4.13143e-05 (* 1 = 4.13143e-05 loss)\nI0821 11:56:27.294531 32405 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0821 11:58:44.782120 32405 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 12:00:03.987083 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88348\nI0821 12:00:03.987336 32405 solver.cpp:404]     Test net output #1: loss = 0.701446 (* 1 = 0.701446 loss)\nI0821 12:00:05.274178 32405 solver.cpp:228] Iteration 8500, loss = 5.44778e-05\nI0821 12:00:05.274220 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:00:05.274236 32405 solver.cpp:244]     Train net output #1: loss = 5.43948e-05 (* 1 = 5.43948e-05 loss)\nI0821 12:00:05.393546 32405 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0821 12:02:22.921845 32405 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 12:03:42.204349 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0821 12:03:42.204624 32405 solver.cpp:404]     Test net output #1: loss = 0.706271 (* 1 = 0.706271 loss)\nI0821 12:03:43.491585 32405 solver.cpp:228] Iteration 8600, loss = 6.59422e-05\nI0821 12:03:43.491629 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:03:43.491644 32405 solver.cpp:244]     Train net output #1: loss = 6.58593e-05 (* 1 = 6.58593e-05 loss)\nI0821 12:03:43.608403 32405 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0821 12:06:01.218228 32405 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 12:07:20.486917 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88304\nI0821 12:07:20.487191 32405 solver.cpp:404]     Test net output #1: loss = 0.687979 (* 1 = 0.687979 loss)\nI0821 12:07:21.773756 32405 solver.cpp:228] Iteration 8700, loss = 6.93143e-05\nI0821 12:07:21.773802 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:07:21.773818 32405 solver.cpp:244]     Train net output #1: loss = 6.92313e-05 (* 1 = 6.92313e-05 loss)\nI0821 12:07:21.895762 32405 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0821 12:09:39.594563 32405 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 12:10:58.781756 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88228\nI0821 12:10:58.782025 32405 solver.cpp:404]     Test net output #1: loss = 0.69481 (* 1 = 0.69481 loss)\nI0821 12:11:00.069357 32405 solver.cpp:228] Iteration 8800, loss = 6.37328e-05\nI0821 12:11:00.069402 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:11:00.069417 32405 solver.cpp:244]     Train net output #1: loss = 6.36499e-05 (* 1 = 6.36499e-05 loss)\nI0821 12:11:00.185376 32405 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0821 12:13:17.889922 32405 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 12:14:37.064682 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88488\nI0821 12:14:37.064960 32405 solver.cpp:404]     Test net output #1: loss = 0.677942 (* 1 = 0.677942 loss)\nI0821 12:14:38.351822 32405 solver.cpp:228] Iteration 8900, loss = 6.96014e-05\nI0821 12:14:38.351868 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:14:38.351884 32405 solver.cpp:244]     Train net output #1: loss = 6.95185e-05 (* 1 = 6.95185e-05 loss)\nI0821 12:14:38.470922 32405 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0821 12:16:56.141460 32405 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 12:18:15.298272 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0821 12:18:15.298480 32405 solver.cpp:404]     Test net output #1: loss = 0.685403 (* 1 = 0.685403 loss)\nI0821 12:18:16.585161 32405 solver.cpp:228] Iteration 9000, loss = 6.71956e-05\nI0821 12:18:16.585204 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:18:16.585219 32405 solver.cpp:244]     Train net output #1: loss = 6.71127e-05 (* 1 = 6.71127e-05 loss)\nI0821 12:18:16.702145 32405 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0821 12:20:34.379494 32405 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 12:21:53.527410 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88424\nI0821 12:21:53.527602 32405 solver.cpp:404]     Test net output #1: loss = 0.671443 (* 1 = 0.671443 loss)\nI0821 12:21:54.813750 32405 solver.cpp:228] Iteration 9100, loss = 6.02411e-05\nI0821 12:21:54.813796 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:21:54.813810 32405 solver.cpp:244]     Train net output #1: loss = 6.01582e-05 (* 1 = 6.01582e-05 loss)\nI0821 12:21:54.931141 32405 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0821 12:24:12.604151 32405 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 12:25:31.759841 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0821 12:25:31.760054 32405 solver.cpp:404]     Test net output #1: loss = 0.679127 (* 1 = 0.679127 loss)\nI0821 12:25:33.045959 32405 solver.cpp:228] Iteration 9200, loss = 4.62852e-05\nI0821 12:25:33.046007 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:25:33.046022 32405 solver.cpp:244]     Train net output #1: loss = 4.62023e-05 (* 1 = 4.62023e-05 loss)\nI0821 12:25:33.162230 32405 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0821 12:27:50.880152 32405 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 12:29:10.030897 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8848\nI0821 12:29:10.031119 32405 solver.cpp:404]     Test net output #1: loss = 0.665473 (* 1 = 0.665473 loss)\nI0821 12:29:11.317157 32405 solver.cpp:228] Iteration 9300, loss = 6.7877e-05\nI0821 12:29:11.317199 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:29:11.317214 32405 solver.cpp:244]     Train net output #1: loss = 6.7794e-05 (* 1 = 6.7794e-05 loss)\nI0821 12:29:11.431473 32405 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0821 12:31:29.024871 32405 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 12:32:48.173880 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0821 12:32:48.174140 32405 solver.cpp:404]     Test net output #1: loss = 0.672579 (* 1 = 0.672579 loss)\nI0821 12:32:49.460758 32405 solver.cpp:228] Iteration 9400, loss = 5.28555e-05\nI0821 12:32:49.460803 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:32:49.460819 32405 solver.cpp:244]     Train net output #1: loss = 5.27725e-05 (* 1 = 5.27725e-05 loss)\nI0821 12:32:49.583920 32405 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0821 12:35:07.202955 32405 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 12:36:26.357010 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88468\nI0821 12:36:26.357270 32405 solver.cpp:404]     Test net output #1: loss = 0.659542 (* 1 = 0.659542 loss)\nI0821 12:36:27.643208 32405 solver.cpp:228] Iteration 9500, loss = 7.44583e-05\nI0821 12:36:27.643251 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:36:27.643267 32405 solver.cpp:244]     Train net output #1: loss = 7.43753e-05 (* 1 = 7.43753e-05 loss)\nI0821 12:36:27.758934 32405 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0821 12:38:45.715740 32405 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 12:40:04.861678 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0821 12:40:04.861919 32405 solver.cpp:404]     Test net output #1: loss = 0.667654 (* 1 = 0.667654 loss)\nI0821 12:40:06.147853 32405 solver.cpp:228] Iteration 9600, loss = 7.30968e-05\nI0821 12:40:06.147898 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:40:06.147914 32405 solver.cpp:244]     Train net output #1: loss = 7.30138e-05 (* 1 = 7.30138e-05 loss)\nI0821 12:40:06.265910 32405 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0821 12:42:23.936250 32405 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 12:43:43.123878 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88496\nI0821 12:43:43.124128 32405 solver.cpp:404]     Test net output #1: loss = 0.65676 (* 1 = 0.65676 loss)\nI0821 12:43:44.409981 32405 solver.cpp:228] Iteration 9700, loss = 6.93815e-05\nI0821 12:43:44.410027 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:43:44.410042 32405 solver.cpp:244]     Train net output #1: loss = 6.92985e-05 (* 1 = 6.92985e-05 loss)\nI0821 12:43:44.531442 32405 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0821 12:46:02.115322 32405 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 12:47:21.303061 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88268\nI0821 12:47:21.303289 32405 solver.cpp:404]     Test net output #1: loss = 0.666411 (* 1 = 0.666411 loss)\nI0821 12:47:22.589248 32405 solver.cpp:228] Iteration 9800, loss = 7.98029e-05\nI0821 12:47:22.589293 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:47:22.589309 32405 solver.cpp:244]     Train net output #1: loss = 7.97199e-05 (* 1 = 7.97199e-05 loss)\nI0821 12:47:22.708678 32405 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0821 12:49:40.322453 32405 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 12:50:59.509531 32405 solver.cpp:404]     Test net output #0: accuracy = 0.88492\nI0821 12:50:59.509770 32405 solver.cpp:404]     Test net output #1: loss = 0.653995 (* 1 = 0.653995 loss)\nI0821 12:51:00.795835 32405 solver.cpp:228] Iteration 9900, loss = 6.776e-05\nI0821 12:51:00.795879 32405 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:51:00.795895 32405 solver.cpp:244]     Train net output #1: loss = 6.7677e-05 (* 1 = 6.7677e-05 loss)\nI0821 12:51:00.914131 32405 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0821 12:53:18.618510 32405 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kWD-5Fig11_iter_10000.caffemodel\nI0821 12:53:18.833737 32405 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kWD-5Fig11_iter_10000.solverstate\nI0821 12:53:19.265955 32405 solver.cpp:317] Iteration 10000, loss = 8.3291e-05\nI0821 12:53:19.266000 32405 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 12:54:38.455269 32405 solver.cpp:404]     Test net output #0: accuracy = 0.8828\nI0821 12:54:38.455534 32405 solver.cpp:404]     Test net output #1: loss = 0.6653 (* 1 = 0.6653 loss)\nI0821 12:54:38.455546 32405 solver.cpp:322] Optimization Done.\nI0821 12:54:44.148492 32405 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/clr3SS5kWD-6Fig11",
    "content": "I0821 06:49:43.292289 32299 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 06:49:43.295130 32299 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 06:49:43.296357 32299 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 06:49:43.297572 32299 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 06:49:43.298982 32299 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 06:49:43.300212 32299 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 06:49:43.301443 32299 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 06:49:43.302675 32299 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 06:49:43.304008 32299 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 06:49:43.727957 32299 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.1\ndisplay: 100\nmax_iter: 10000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 1e-06\nstepsize: 5000\nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/clr3SS5kWD-6Fig11\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0821 06:49:43.731345 32299 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 06:49:43.743849 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:43.743926 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:43.745039 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 06:49:43.745105 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 06:49:43.745127 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:49:43.745146 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:49:43.745165 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:49:43.745182 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:49:43.745200 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:49:43.745218 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:49:43.745239 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:49:43.745256 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:49:43.745275 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:49:43.745290 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:49:43.745311 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:49:43.745338 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:49:43.745362 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:49:43.745380 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:49:43.745398 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:49:43.745416 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:49:43.745435 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:49:43.745453 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:49:43.745483 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:49:43.745501 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:49:43.745527 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:49:43.745546 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:49:43.745564 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:49:43.745579 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:49:43.745597 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:49:43.745613 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:49:43.745631 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:49:43.745649 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:49:43.745667 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:49:43.745685 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:49:43.745703 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:49:43.745719 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:49:43.745738 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:49:43.745755 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:49:43.745775 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:49:43.745791 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:49:43.745810 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:49:43.745828 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:49:43.745852 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:49:43.745869 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:49:43.745887 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:49:43.745905 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:49:43.745923 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:49:43.745941 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:49:43.745959 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:49:43.745975 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:49:43.745995 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:49:43.746011 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:49:43.746028 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:49:43.746057 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:49:43.746078 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:49:43.746095 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:49:43.746114 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:49:43.746129 32299 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:49:43.747900 32299 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0821 06:49:43.749997 32299 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:49:43.751224 32299 net.cpp:100] Creating Layer dataLayer\nI0821 06:49:43.751312 32299 net.cpp:408] dataLayer -> data_top\nI0821 06:49:43.751523 32299 net.cpp:408] dataLayer -> label\nI0821 06:49:43.751653 32299 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:49:43.762519 32305 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0821 06:49:43.848953 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:43.855955 32299 net.cpp:150] Setting up dataLayer\nI0821 06:49:43.856019 32299 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:49:43.856031 32299 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:43.856036 32299 net.cpp:165] Memory required for data: 1536500\nI0821 06:49:43.856053 32299 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:49:43.856070 32299 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:49:43.856078 32299 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:49:43.856103 32299 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:49:43.856120 32299 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:49:43.856709 32299 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:49:43.856734 32299 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:43.856740 32299 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:43.856745 32299 net.cpp:165] Memory required for data: 1537500\nI0821 06:49:43.856752 32299 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:49:43.856828 32299 net.cpp:100] Creating Layer pre_conv\nI0821 06:49:43.856840 32299 net.cpp:434] pre_conv <- data_top\nI0821 06:49:43.856853 32299 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:49:43.859066 32306 blocking_queue.cpp:50] Waiting for data\nI0821 06:49:43.860371 32299 net.cpp:150] Setting up pre_conv\nI0821 06:49:43.860399 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.860405 32299 net.cpp:165] Memory required for data: 9729500\nI0821 06:49:43.860476 32299 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:49:43.861855 32299 net.cpp:100] Creating Layer pre_bn\nI0821 06:49:43.861871 32299 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:49:43.861881 32299 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:49:43.862190 32299 net.cpp:150] Setting up pre_bn\nI0821 06:49:43.862206 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.862211 32299 net.cpp:165] Memory required for data: 17921500\nI0821 06:49:43.862228 32299 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:49:43.862285 32299 net.cpp:100] Creating Layer pre_scale\nI0821 06:49:43.862295 32299 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:49:43.862308 32299 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:49:43.862489 32299 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:49:43.863308 32299 net.cpp:150] Setting up pre_scale\nI0821 06:49:43.863327 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.863337 32299 net.cpp:165] Memory required for data: 26113500\nI0821 06:49:43.863349 32299 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:49:43.863400 32299 net.cpp:100] Creating Layer pre_relu\nI0821 06:49:43.863410 32299 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:49:43.863422 32299 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:49:43.863435 32299 net.cpp:150] Setting up pre_relu\nI0821 06:49:43.863442 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.863447 32299 net.cpp:165] Memory required for data: 34305500\nI0821 06:49:43.863453 32299 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:49:43.863463 32299 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:49:43.863469 32299 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:49:43.863476 32299 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:49:43.863487 32299 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:49:43.863535 32299 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:49:43.863548 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.863555 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.863559 32299 net.cpp:165] Memory required for data: 50689500\nI0821 06:49:43.863565 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:49:43.863582 32299 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:49:43.863589 32299 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:49:43.863598 32299 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:49:43.863898 32299 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:49:43.863914 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.863919 32299 net.cpp:165] Memory required for data: 58881500\nI0821 06:49:43.863931 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:49:43.863946 32299 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:49:43.863952 32299 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:49:43.863965 32299 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:49:43.864192 32299 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:49:43.864205 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.864210 32299 net.cpp:165] Memory required for data: 67073500\nI0821 06:49:43.864222 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:49:43.864231 32299 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:49:43.864238 32299 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:49:43.864245 32299 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:49:43.864298 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:49:43.864439 32299 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:49:43.864452 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.864457 32299 net.cpp:165] Memory required for data: 75265500\nI0821 06:49:43.864467 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:49:43.864483 32299 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:49:43.864490 32299 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:49:43.864500 32299 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:49:43.864511 32299 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:49:43.864517 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.864522 32299 net.cpp:165] Memory required for data: 83457500\nI0821 06:49:43.864527 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:49:43.864542 32299 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:49:43.864547 32299 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:49:43.864560 32299 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:49:43.864859 32299 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:49:43.864874 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.864879 32299 net.cpp:165] Memory required for data: 91649500\nI0821 06:49:43.864888 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:49:43.864900 32299 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:49:43.864907 32299 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:49:43.864915 32299 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:49:43.865149 32299 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:49:43.865161 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.865166 32299 net.cpp:165] Memory required for data: 99841500\nI0821 06:49:43.865181 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:49:43.865191 32299 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:49:43.865196 32299 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:49:43.865208 32299 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:49:43.865262 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:49:43.865406 32299 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:49:43.865418 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.865424 32299 net.cpp:165] Memory required for data: 108033500\nI0821 06:49:43.865433 32299 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:49:43.865487 32299 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:49:43.865496 32299 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:49:43.865504 32299 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:49:43.865514 32299 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:49:43.865586 32299 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:49:43.865600 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.865605 32299 net.cpp:165] Memory required for data: 116225500\nI0821 06:49:43.865612 32299 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:49:43.865620 32299 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:49:43.865625 32299 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:49:43.865633 32299 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:49:43.865643 32299 net.cpp:150] Setting up L1_b1_relu\nI0821 06:49:43.865650 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.865655 32299 net.cpp:165] Memory required for data: 124417500\nI0821 06:49:43.865659 32299 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:43.865669 32299 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:43.865674 32299 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:49:43.865684 32299 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:49:43.865694 32299 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:49:43.865736 32299 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:43.865751 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.865757 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.865769 32299 net.cpp:165] Memory required for data: 140801500\nI0821 06:49:43.865775 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:49:43.865787 32299 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:49:43.865792 32299 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:49:43.865802 32299 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:49:43.866125 32299 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:49:43.866139 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.866145 32299 net.cpp:165] Memory required for data: 148993500\nI0821 06:49:43.866154 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:49:43.866168 32299 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:49:43.866173 32299 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:49:43.866183 32299 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:49:43.866420 32299 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:49:43.866435 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.866439 32299 net.cpp:165] Memory required for data: 157185500\nI0821 06:49:43.866451 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:49:43.866463 32299 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:49:43.866469 32299 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:49:43.866477 32299 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:49:43.866528 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:49:43.866664 32299 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:49:43.866677 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.866683 32299 net.cpp:165] Memory required for data: 165377500\nI0821 06:49:43.866691 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:49:43.866699 32299 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:49:43.866705 32299 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:49:43.866716 32299 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:49:43.866726 32299 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:49:43.866734 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.866739 32299 net.cpp:165] Memory required for data: 173569500\nI0821 06:49:43.866744 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:49:43.866757 32299 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:49:43.866763 32299 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:49:43.866771 32299 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:49:43.867079 32299 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:49:43.867092 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.867097 32299 net.cpp:165] Memory required for data: 181761500\nI0821 06:49:43.867106 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:49:43.867120 32299 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:49:43.867125 32299 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:49:43.867133 32299 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:49:43.867372 32299 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:49:43.867385 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.867390 32299 net.cpp:165] Memory required for data: 189953500\nI0821 06:49:43.867410 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:49:43.867420 32299 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:49:43.867426 32299 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:49:43.867436 32299 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:49:43.867489 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:49:43.867626 32299 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:49:43.867640 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.867645 32299 net.cpp:165] Memory required for data: 198145500\nI0821 06:49:43.867655 32299 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:49:43.867673 32299 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:49:43.867679 32299 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:49:43.867687 32299 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:49:43.867696 32299 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:49:43.867728 32299 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:49:43.867741 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.867745 32299 net.cpp:165] Memory required for data: 206337500\nI0821 06:49:43.867751 32299 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:49:43.867759 32299 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:49:43.867764 32299 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:49:43.867774 32299 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:49:43.867784 32299 net.cpp:150] Setting up L1_b2_relu\nI0821 06:49:43.867791 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.867796 32299 net.cpp:165] Memory required for data: 214529500\nI0821 06:49:43.867801 32299 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:43.867808 32299 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:43.867813 32299 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:49:43.867821 32299 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:49:43.867831 32299 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:49:43.867875 32299 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:43.867887 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.867894 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.867898 32299 net.cpp:165] Memory required for data: 230913500\nI0821 06:49:43.867903 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:49:43.867918 32299 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:49:43.867924 32299 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:49:43.867933 32299 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:49:43.868232 32299 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:49:43.868245 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.868250 32299 net.cpp:165] Memory required for data: 239105500\nI0821 06:49:43.868259 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:49:43.868269 32299 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:49:43.868275 32299 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:49:43.868286 32299 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:49:43.868528 32299 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:49:43.868544 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.868551 32299 net.cpp:165] Memory required for data: 247297500\nI0821 06:49:43.868561 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:49:43.868569 32299 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:49:43.868577 32299 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:49:43.868584 32299 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:49:43.868635 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:49:43.868777 32299 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:49:43.868789 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.868794 32299 net.cpp:165] Memory required for data: 255489500\nI0821 06:49:43.868803 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:49:43.868815 32299 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:49:43.868821 32299 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:49:43.868829 32299 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:49:43.868839 32299 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:49:43.868856 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.868861 32299 net.cpp:165] Memory required for data: 263681500\nI0821 06:49:43.868865 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:49:43.868877 32299 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:49:43.868883 32299 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:49:43.868894 32299 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:49:43.869199 32299 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:49:43.869212 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.869217 32299 net.cpp:165] Memory required for data: 271873500\nI0821 06:49:43.869226 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:49:43.869242 32299 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:49:43.869248 32299 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:49:43.869257 32299 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:49:43.869496 32299 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:49:43.869513 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.869518 32299 net.cpp:165] Memory required for data: 280065500\nI0821 06:49:43.869529 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:49:43.869539 32299 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:49:43.869544 32299 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:49:43.869552 32299 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:49:43.869603 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:49:43.869740 32299 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:49:43.869753 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.869758 32299 net.cpp:165] Memory required for data: 288257500\nI0821 06:49:43.869768 32299 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:49:43.869781 32299 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:49:43.869786 32299 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:49:43.869794 32299 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:49:43.869804 32299 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:49:43.869835 32299 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:49:43.869844 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.869849 32299 net.cpp:165] Memory required for data: 296449500\nI0821 06:49:43.869854 32299 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:49:43.869865 32299 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:49:43.869871 32299 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:49:43.869879 32299 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:49:43.869889 32299 net.cpp:150] Setting up L1_b3_relu\nI0821 06:49:43.869895 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.869899 32299 net.cpp:165] Memory required for data: 304641500\nI0821 06:49:43.869904 32299 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:43.869911 32299 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:43.869916 32299 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:49:43.869925 32299 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:49:43.870093 32299 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:49:43.870149 32299 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:43.870162 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.870168 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.870173 32299 net.cpp:165] Memory required for data: 321025500\nI0821 06:49:43.870179 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:49:43.870193 32299 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:49:43.870199 32299 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:49:43.870215 32299 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:49:43.870535 32299 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:49:43.870550 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.870555 32299 net.cpp:165] Memory required for data: 329217500\nI0821 06:49:43.870565 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:49:43.870579 32299 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:49:43.870586 32299 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:49:43.870595 32299 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:49:43.870831 32299 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:49:43.870846 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.870849 32299 net.cpp:165] Memory required for data: 337409500\nI0821 06:49:43.870860 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:49:43.870870 32299 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:49:43.870877 32299 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:49:43.870883 32299 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:49:43.870939 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:49:43.871076 32299 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:49:43.871089 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.871094 32299 net.cpp:165] Memory required for data: 345601500\nI0821 06:49:43.871104 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:49:43.871111 32299 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:49:43.871117 32299 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:49:43.871129 32299 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:49:43.871139 32299 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:49:43.871145 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.871150 32299 net.cpp:165] Memory required for data: 353793500\nI0821 06:49:43.871155 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:49:43.871166 32299 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:49:43.871172 32299 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:49:43.871183 32299 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:49:43.871500 32299 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:49:43.871515 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.871520 32299 net.cpp:165] Memory required for data: 361985500\nI0821 06:49:43.871529 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:49:43.871538 32299 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:49:43.871544 32299 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:49:43.871559 32299 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:49:43.871798 32299 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:49:43.871811 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.871816 32299 net.cpp:165] Memory required for data: 370177500\nI0821 06:49:43.871827 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:49:43.871836 32299 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:49:43.871842 32299 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:49:43.871850 32299 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:49:43.871904 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:49:43.872040 32299 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:49:43.872053 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.872058 32299 net.cpp:165] Memory required for data: 378369500\nI0821 06:49:43.872067 32299 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:49:43.872076 32299 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:49:43.872082 32299 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:49:43.872090 32299 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:49:43.872102 32299 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:49:43.872139 32299 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:49:43.872149 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.872154 32299 net.cpp:165] Memory required for data: 386561500\nI0821 06:49:43.872159 32299 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:49:43.872172 32299 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:49:43.872179 32299 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:49:43.872185 32299 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:49:43.872195 32299 net.cpp:150] Setting up L1_b4_relu\nI0821 06:49:43.872202 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.872207 32299 net.cpp:165] Memory required for data: 394753500\nI0821 06:49:43.872212 32299 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:43.872220 32299 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:43.872225 32299 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:49:43.872232 32299 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:49:43.872242 32299 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:49:43.872287 32299 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:43.872298 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.872305 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.872309 32299 net.cpp:165] Memory required for data: 411137500\nI0821 06:49:43.872314 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:49:43.872334 32299 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:49:43.872341 32299 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:49:43.872351 32299 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:49:43.872660 32299 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:49:43.872674 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.872679 32299 net.cpp:165] Memory required for data: 419329500\nI0821 06:49:43.872702 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:49:43.872714 32299 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:49:43.872721 32299 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:49:43.872732 32299 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:49:43.872972 32299 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:49:43.872985 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.872990 32299 net.cpp:165] Memory required for data: 427521500\nI0821 06:49:43.873001 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:49:43.873010 32299 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:49:43.873016 32299 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:49:43.873024 32299 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:49:43.873080 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:49:43.873217 32299 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:49:43.873230 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.873235 32299 net.cpp:165] Memory required for data: 435713500\nI0821 06:49:43.873245 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:49:43.873252 32299 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:49:43.873258 32299 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:49:43.873270 32299 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:49:43.873280 32299 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:49:43.873286 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.873291 32299 net.cpp:165] Memory required for data: 443905500\nI0821 06:49:43.873296 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:49:43.873309 32299 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:49:43.873316 32299 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:49:43.873337 32299 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:49:43.873652 32299 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:49:43.873667 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.873672 32299 net.cpp:165] Memory required for data: 452097500\nI0821 06:49:43.873682 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:49:43.873695 32299 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:49:43.873703 32299 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:49:43.873711 32299 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:49:43.873946 32299 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:49:43.873961 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.873966 32299 net.cpp:165] Memory required for data: 460289500\nI0821 06:49:43.873977 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:49:43.873987 32299 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:49:43.873993 32299 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:49:43.874001 32299 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:49:43.874053 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:49:43.874191 32299 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:49:43.874203 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.874208 32299 net.cpp:165] Memory required for data: 468481500\nI0821 06:49:43.874218 32299 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:49:43.874233 32299 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:49:43.874239 32299 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:49:43.874246 32299 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:49:43.874258 32299 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:49:43.874287 32299 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:49:43.874300 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.874305 32299 net.cpp:165] Memory required for data: 476673500\nI0821 06:49:43.874310 32299 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:49:43.874320 32299 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:49:43.874326 32299 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:49:43.874341 32299 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:49:43.874351 32299 net.cpp:150] Setting up L1_b5_relu\nI0821 06:49:43.874357 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.874362 32299 net.cpp:165] Memory required for data: 484865500\nI0821 06:49:43.874367 32299 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:43.874374 32299 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:43.874379 32299 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:49:43.874387 32299 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:49:43.874397 32299 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:49:43.874444 32299 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:43.874455 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.874461 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.874466 32299 net.cpp:165] Memory required for data: 501249500\nI0821 06:49:43.874471 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:49:43.874485 32299 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:49:43.874491 32299 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:49:43.874501 32299 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:49:43.874806 32299 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:49:43.874819 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.874825 32299 net.cpp:165] Memory required for data: 509441500\nI0821 06:49:43.874840 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:49:43.874853 32299 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:49:43.874861 32299 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:49:43.874868 32299 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:49:43.875108 32299 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:49:43.875120 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.875125 32299 net.cpp:165] Memory required for data: 517633500\nI0821 06:49:43.875135 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:49:43.875144 32299 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:49:43.875150 32299 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:49:43.875159 32299 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:49:43.875212 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:49:43.875363 32299 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:49:43.875377 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.875382 32299 net.cpp:165] Memory required for data: 525825500\nI0821 06:49:43.875391 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:49:43.875401 32299 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:49:43.875406 32299 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:49:43.875416 32299 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:49:43.875427 32299 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:49:43.875434 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.875438 32299 net.cpp:165] Memory required for data: 534017500\nI0821 06:49:43.875443 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:49:43.875454 32299 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:49:43.875459 32299 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:49:43.875471 32299 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:49:43.875782 32299 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:49:43.875797 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.875802 32299 net.cpp:165] Memory required for data: 542209500\nI0821 06:49:43.875810 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:49:43.875820 32299 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:49:43.875826 32299 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:49:43.875838 32299 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:49:43.876077 32299 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:49:43.876093 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.876098 32299 net.cpp:165] Memory required for data: 550401500\nI0821 06:49:43.876109 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:49:43.876118 32299 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:49:43.876124 32299 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:49:43.876132 32299 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:49:43.876185 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:49:43.876325 32299 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:49:43.876344 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.876349 32299 net.cpp:165] Memory required for data: 558593500\nI0821 06:49:43.876359 32299 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:49:43.876379 32299 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:49:43.876384 32299 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:49:43.876392 32299 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:49:43.876400 32299 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:49:43.876435 32299 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:49:43.876447 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.876452 32299 net.cpp:165] Memory required for data: 566785500\nI0821 06:49:43.876458 32299 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:49:43.876474 32299 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:49:43.876480 32299 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:49:43.876487 32299 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:49:43.876497 32299 net.cpp:150] Setting up L1_b6_relu\nI0821 06:49:43.876504 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.876509 32299 net.cpp:165] Memory required for data: 574977500\nI0821 06:49:43.876514 32299 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:43.876521 32299 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:43.876526 32299 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:49:43.876538 32299 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:49:43.876547 32299 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:49:43.876590 32299 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:43.876600 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.876607 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.876612 32299 net.cpp:165] Memory required for data: 591361500\nI0821 06:49:43.876617 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:49:43.876631 32299 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:49:43.876637 32299 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:49:43.876647 32299 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:49:43.876958 32299 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:49:43.876972 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.876977 32299 net.cpp:165] Memory required for data: 599553500\nI0821 06:49:43.876986 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:49:43.876998 32299 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:49:43.877005 32299 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:49:43.877017 32299 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:49:43.877255 32299 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:49:43.877269 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.877274 32299 net.cpp:165] Memory required for data: 607745500\nI0821 06:49:43.877285 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:49:43.877293 32299 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:49:43.877300 32299 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:49:43.877315 32299 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:49:43.877373 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:49:43.877513 32299 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:49:43.877529 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.877534 32299 net.cpp:165] Memory required for data: 615937500\nI0821 06:49:43.877544 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:49:43.877552 32299 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:49:43.877559 32299 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:49:43.877566 32299 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:49:43.877576 32299 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:49:43.877583 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.877588 32299 net.cpp:165] Memory required for data: 624129500\nI0821 06:49:43.877593 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:49:43.877606 32299 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:49:43.877612 32299 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:49:43.877625 32299 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:49:43.877938 32299 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:49:43.877952 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.877957 32299 net.cpp:165] Memory required for data: 632321500\nI0821 06:49:43.877974 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:49:43.877987 32299 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:49:43.877993 32299 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:49:43.878005 32299 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:49:43.878242 32299 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:49:43.878255 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.878260 32299 net.cpp:165] Memory required for data: 640513500\nI0821 06:49:43.878271 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:49:43.878280 32299 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:49:43.878286 32299 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:49:43.878294 32299 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:49:43.878357 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:49:43.878497 32299 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:49:43.878510 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.878515 32299 net.cpp:165] Memory required for data: 648705500\nI0821 06:49:43.878525 32299 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:49:43.878537 32299 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:49:43.878543 32299 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:49:43.878551 32299 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:49:43.878559 32299 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:49:43.878593 32299 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:49:43.878605 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.878610 32299 net.cpp:165] Memory required for data: 656897500\nI0821 06:49:43.878615 32299 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:49:43.878623 32299 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:49:43.878628 32299 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:49:43.878638 32299 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:49:43.878648 32299 net.cpp:150] Setting up L1_b7_relu\nI0821 06:49:43.878655 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.878660 32299 net.cpp:165] Memory required for data: 665089500\nI0821 06:49:43.878665 32299 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:43.878672 32299 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:43.878677 32299 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:49:43.878690 32299 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:49:43.878700 32299 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:49:43.878742 32299 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:43.878753 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.878760 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.878765 32299 net.cpp:165] Memory required for data: 681473500\nI0821 06:49:43.878770 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:49:43.878784 32299 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:49:43.878792 32299 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:49:43.878800 32299 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:49:43.879117 32299 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:49:43.879132 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.879137 32299 net.cpp:165] Memory required for data: 689665500\nI0821 06:49:43.879146 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:49:43.879160 32299 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:49:43.879168 32299 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:49:43.879179 32299 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:49:43.879433 32299 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:49:43.879447 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.879452 32299 net.cpp:165] Memory required for data: 697857500\nI0821 06:49:43.879463 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:49:43.879472 32299 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:49:43.879478 32299 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:49:43.879487 32299 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:49:43.879541 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:49:43.879679 32299 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:49:43.879693 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.879698 32299 net.cpp:165] Memory required for data: 706049500\nI0821 06:49:43.879706 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:49:43.879719 32299 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:49:43.879725 32299 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:49:43.879734 32299 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:49:43.879743 32299 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:49:43.879750 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.879755 32299 net.cpp:165] Memory required for data: 714241500\nI0821 06:49:43.879760 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:49:43.879773 32299 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:49:43.879781 32299 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:49:43.879791 32299 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:49:43.880110 32299 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:49:43.880123 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.880128 32299 net.cpp:165] Memory required for data: 722433500\nI0821 06:49:43.880137 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:49:43.880149 32299 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:49:43.880156 32299 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:49:43.880165 32299 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:49:43.880424 32299 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:49:43.880437 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.880442 32299 net.cpp:165] Memory required for data: 730625500\nI0821 06:49:43.880453 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:49:43.880462 32299 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:49:43.880468 32299 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:49:43.880476 32299 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:49:43.880532 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:49:43.880676 32299 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:49:43.880688 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.880693 32299 net.cpp:165] Memory required for data: 738817500\nI0821 06:49:43.880703 32299 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:49:43.880712 32299 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:49:43.880719 32299 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:49:43.880726 32299 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:49:43.880738 32299 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:49:43.880769 32299 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:49:43.880780 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.880785 32299 net.cpp:165] Memory required for data: 747009500\nI0821 06:49:43.880791 32299 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:49:43.880801 32299 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:49:43.880808 32299 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:49:43.880815 32299 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:49:43.880825 32299 net.cpp:150] Setting up L1_b8_relu\nI0821 06:49:43.880832 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.880843 32299 net.cpp:165] Memory required for data: 755201500\nI0821 06:49:43.880848 32299 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:43.880856 32299 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:43.880861 32299 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:49:43.880868 32299 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:49:43.880878 32299 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:49:43.880925 32299 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:43.880937 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.880944 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.880949 32299 net.cpp:165] Memory required for data: 771585500\nI0821 06:49:43.880954 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:49:43.880967 32299 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:49:43.880973 32299 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:49:43.880983 32299 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:49:43.881309 32299 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:49:43.881326 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.881337 32299 net.cpp:165] Memory required for data: 779777500\nI0821 06:49:43.881347 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:49:43.881357 32299 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:49:43.881363 32299 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:49:43.881372 32299 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:49:43.881620 32299 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:49:43.881633 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.881639 32299 net.cpp:165] Memory required for data: 787969500\nI0821 06:49:43.881649 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:49:43.881661 32299 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:49:43.881667 32299 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:49:43.881675 32299 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:49:43.881736 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:49:43.881882 32299 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:49:43.881894 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.881899 32299 net.cpp:165] Memory required for data: 796161500\nI0821 06:49:43.881909 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:49:43.881918 32299 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:49:43.881924 32299 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:49:43.881934 32299 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:49:43.881944 32299 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:49:43.881953 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.881956 32299 net.cpp:165] Memory required for data: 804353500\nI0821 06:49:43.881961 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:49:43.881974 32299 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:49:43.881980 32299 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:49:43.881989 32299 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:49:43.882308 32299 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:49:43.882320 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.882325 32299 net.cpp:165] Memory required for data: 812545500\nI0821 06:49:43.882341 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:49:43.882354 32299 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:49:43.882361 32299 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:49:43.882369 32299 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:49:43.882844 32299 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:49:43.882860 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.882865 32299 net.cpp:165] Memory required for data: 820737500\nI0821 06:49:43.882899 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:49:43.882910 32299 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:49:43.882917 32299 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:49:43.882925 32299 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:49:43.882984 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:49:43.883232 32299 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:49:43.883246 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.883252 32299 net.cpp:165] Memory required for data: 828929500\nI0821 06:49:43.883262 32299 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:49:43.883272 32299 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:49:43.883278 32299 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:49:43.883285 32299 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:49:43.883294 32299 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:49:43.883337 32299 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:49:43.883349 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.883354 32299 net.cpp:165] Memory required for data: 837121500\nI0821 06:49:43.883359 32299 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:49:43.883368 32299 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:49:43.883373 32299 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:49:43.883384 32299 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:49:43.883394 32299 net.cpp:150] Setting up L1_b9_relu\nI0821 06:49:43.883402 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.883406 32299 net.cpp:165] Memory required for data: 845313500\nI0821 06:49:43.883411 32299 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:43.883419 32299 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:43.883424 32299 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:49:43.883438 32299 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:49:43.883450 32299 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:49:43.883494 32299 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:43.883507 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.883513 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.883517 32299 net.cpp:165] Memory required for data: 861697500\nI0821 06:49:43.883522 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:49:43.883538 32299 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:49:43.883543 32299 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:49:43.883553 32299 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:49:43.883878 32299 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:49:43.883893 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.883898 32299 net.cpp:165] Memory required for data: 863745500\nI0821 06:49:43.883906 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:49:43.883919 32299 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:49:43.883925 32299 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:49:43.883934 32299 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:49:43.884171 32299 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:49:43.884183 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.884188 32299 net.cpp:165] Memory required for data: 865793500\nI0821 06:49:43.884199 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:49:43.884208 32299 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:49:43.884220 32299 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:49:43.884233 32299 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:49:43.884289 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:49:43.884439 32299 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:49:43.884454 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.884459 32299 net.cpp:165] Memory required for data: 867841500\nI0821 06:49:43.884469 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:49:43.884476 32299 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:49:43.884482 32299 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:49:43.884490 32299 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:49:43.884500 32299 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:49:43.884507 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.884511 32299 net.cpp:165] Memory required for data: 869889500\nI0821 06:49:43.884516 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:49:43.884531 32299 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:49:43.884536 32299 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:49:43.884547 32299 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:49:43.884871 32299 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:49:43.884886 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.884891 32299 net.cpp:165] Memory required for data: 871937500\nI0821 06:49:43.884901 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:49:43.884912 32299 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:49:43.884918 32299 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:49:43.884932 32299 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:49:43.885174 32299 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:49:43.885187 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.885192 32299 net.cpp:165] Memory required for data: 873985500\nI0821 06:49:43.885203 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:49:43.885212 32299 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:49:43.885218 32299 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:49:43.885226 32299 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:49:43.885283 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:49:43.885435 32299 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:49:43.885452 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.885457 32299 net.cpp:165] Memory required for data: 876033500\nI0821 06:49:43.885468 32299 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:49:43.885478 32299 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:49:43.885483 32299 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:49:43.885493 32299 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:49:43.885584 32299 net.cpp:150] Setting up L2_b1_pool\nI0821 06:49:43.885599 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.885604 32299 net.cpp:165] Memory required for data: 878081500\nI0821 06:49:43.885610 32299 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:49:43.885620 32299 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:49:43.885627 32299 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:49:43.885633 32299 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:49:43.885644 32299 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:49:43.885679 32299 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:49:43.885690 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.885695 32299 net.cpp:165] Memory required for data: 880129500\nI0821 06:49:43.885701 32299 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:49:43.885712 32299 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:49:43.885718 32299 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:49:43.885725 32299 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:49:43.885742 32299 net.cpp:150] Setting up L2_b1_relu\nI0821 06:49:43.885751 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.885754 32299 net.cpp:165] Memory required for data: 882177500\nI0821 06:49:43.885759 32299 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:49:43.885812 32299 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:49:43.885829 32299 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:49:43.888128 32299 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:49:43.888147 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:43.888152 32299 net.cpp:165] Memory required for data: 884225500\nI0821 06:49:43.888159 32299 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:49:43.888169 32299 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:49:43.888175 32299 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:49:43.888182 32299 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:49:43.888195 32299 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:49:43.888273 32299 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:49:43.888290 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.888295 32299 net.cpp:165] Memory required for data: 888321500\nI0821 06:49:43.888301 32299 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:43.888310 32299 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:43.888316 32299 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:49:43.888324 32299 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:49:43.888345 32299 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:49:43.888396 32299 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:43.888407 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.888414 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.888418 32299 net.cpp:165] Memory required for data: 896513500\nI0821 06:49:43.888424 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:49:43.888438 32299 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:49:43.888445 32299 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:49:43.888458 32299 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:49:43.889894 32299 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:49:43.889910 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.889915 32299 net.cpp:165] Memory required for data: 900609500\nI0821 06:49:43.889926 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:49:43.889940 32299 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:49:43.889946 32299 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:49:43.889955 32299 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:49:43.890203 32299 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:49:43.890216 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.890221 32299 net.cpp:165] Memory required for data: 904705500\nI0821 06:49:43.890233 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:49:43.890242 32299 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:49:43.890249 32299 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:49:43.890259 32299 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:49:43.890316 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:49:43.890472 32299 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:49:43.890486 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.890491 32299 net.cpp:165] Memory required for data: 908801500\nI0821 06:49:43.890501 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:49:43.890509 32299 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:49:43.890516 32299 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:49:43.890527 32299 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:49:43.890545 32299 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:49:43.890552 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.890558 32299 net.cpp:165] Memory required for data: 912897500\nI0821 06:49:43.890563 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:49:43.890576 32299 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:49:43.890583 32299 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:49:43.890592 32299 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:49:43.891050 32299 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:49:43.891064 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.891070 32299 net.cpp:165] Memory required for data: 916993500\nI0821 06:49:43.891079 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:49:43.891089 32299 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:49:43.891095 32299 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:49:43.891108 32299 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:49:43.891360 32299 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:49:43.891373 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.891378 32299 net.cpp:165] Memory required for data: 921089500\nI0821 06:49:43.891389 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:49:43.891398 32299 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:49:43.891405 32299 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:49:43.891412 32299 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:49:43.891470 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:49:43.891613 32299 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:49:43.891628 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.891633 32299 net.cpp:165] Memory required for data: 925185500\nI0821 06:49:43.891643 32299 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:49:43.891652 32299 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:49:43.891659 32299 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:49:43.891666 32299 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:49:43.891674 32299 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:49:43.891705 32299 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:49:43.891715 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.891719 32299 net.cpp:165] Memory required for data: 929281500\nI0821 06:49:43.891724 32299 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:49:43.891732 32299 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:49:43.891739 32299 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:49:43.891749 32299 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:49:43.891759 32299 net.cpp:150] Setting up L2_b2_relu\nI0821 06:49:43.891767 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.891772 32299 net.cpp:165] Memory required for data: 933377500\nI0821 06:49:43.891777 32299 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:43.891783 32299 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:43.891789 32299 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:49:43.891799 32299 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:49:43.891809 32299 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:49:43.891852 32299 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:43.891865 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.891870 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.891875 32299 net.cpp:165] Memory required for data: 941569500\nI0821 06:49:43.891880 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:49:43.891901 32299 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:49:43.891908 32299 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:49:43.891918 32299 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:49:43.892397 32299 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:49:43.892412 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.892417 32299 net.cpp:165] Memory required for data: 945665500\nI0821 06:49:43.892427 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:49:43.892439 32299 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:49:43.892446 32299 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:49:43.892454 32299 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:49:43.892699 32299 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:49:43.892711 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.892716 32299 net.cpp:165] Memory required for data: 949761500\nI0821 06:49:43.892727 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:49:43.892735 32299 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:49:43.892742 32299 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:49:43.892752 32299 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:49:43.892807 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:49:43.892958 32299 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:49:43.892971 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.892976 32299 net.cpp:165] Memory required for data: 953857500\nI0821 06:49:43.892985 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:49:43.892994 32299 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:49:43.893000 32299 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:49:43.893007 32299 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:49:43.893018 32299 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:49:43.893024 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.893028 32299 net.cpp:165] Memory required for data: 957953500\nI0821 06:49:43.893033 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:49:43.893048 32299 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:49:43.893054 32299 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:49:43.893064 32299 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:49:43.893537 32299 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:49:43.893551 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.893556 32299 net.cpp:165] Memory required for data: 962049500\nI0821 06:49:43.893566 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:49:43.893579 32299 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:49:43.893585 32299 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:49:43.893596 32299 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:49:43.893842 32299 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:49:43.893856 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.893860 32299 net.cpp:165] Memory required for data: 966145500\nI0821 06:49:43.893872 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:49:43.893880 32299 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:49:43.893887 32299 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:49:43.893894 32299 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:49:43.893952 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:49:43.894100 32299 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:49:43.894114 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.894119 32299 net.cpp:165] Memory required for data: 970241500\nI0821 06:49:43.894129 32299 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:49:43.894140 32299 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:49:43.894146 32299 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:49:43.894155 32299 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:49:43.894170 32299 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:49:43.894201 32299 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:49:43.894212 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.894217 32299 net.cpp:165] Memory required for data: 974337500\nI0821 06:49:43.894222 32299 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:49:43.894244 32299 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:49:43.894251 32299 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:49:43.894259 32299 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:49:43.894268 32299 net.cpp:150] Setting up L2_b3_relu\nI0821 06:49:43.894276 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.894280 32299 net.cpp:165] Memory required for data: 978433500\nI0821 06:49:43.894286 32299 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:43.894294 32299 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:43.894299 32299 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:49:43.894306 32299 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:49:43.894316 32299 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:49:43.894372 32299 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:43.894385 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.894392 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.894397 32299 net.cpp:165] Memory required for data: 986625500\nI0821 06:49:43.894402 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:49:43.894418 32299 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:49:43.894425 32299 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:49:43.894435 32299 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:49:43.894896 32299 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:49:43.894909 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.894914 32299 net.cpp:165] Memory required for data: 990721500\nI0821 06:49:43.894923 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:49:43.894937 32299 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:49:43.894942 32299 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:49:43.894953 32299 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:49:43.895198 32299 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:49:43.895215 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.895220 32299 net.cpp:165] Memory required for data: 994817500\nI0821 06:49:43.895231 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:49:43.895239 32299 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:49:43.895246 32299 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:49:43.895253 32299 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:49:43.895308 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:49:43.895462 32299 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:49:43.895475 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.895479 32299 net.cpp:165] Memory required for data: 998913500\nI0821 06:49:43.895489 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:49:43.895498 32299 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:49:43.895504 32299 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:49:43.895514 32299 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:49:43.895525 32299 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:49:43.895532 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.895536 32299 net.cpp:165] Memory required for data: 1003009500\nI0821 06:49:43.895541 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:49:43.895562 32299 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:49:43.895568 32299 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:49:43.895577 32299 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:49:43.896031 32299 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:49:43.896045 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.896050 32299 net.cpp:165] Memory required for data: 1007105500\nI0821 06:49:43.896059 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:49:43.896075 32299 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:49:43.896082 32299 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:49:43.896091 32299 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:49:43.896344 32299 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:49:43.896358 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.896363 32299 net.cpp:165] Memory required for data: 1011201500\nI0821 06:49:43.896373 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:49:43.896385 32299 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:49:43.896392 32299 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:49:43.896400 32299 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:49:43.896455 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:49:43.896600 32299 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:49:43.896613 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.896617 32299 net.cpp:165] Memory required for data: 1015297500\nI0821 06:49:43.896627 32299 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:49:43.896639 32299 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:49:43.896646 32299 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:49:43.896653 32299 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:49:43.896663 32299 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:49:43.896692 32299 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:49:43.896700 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.896705 32299 net.cpp:165] Memory required for data: 1019393500\nI0821 06:49:43.896710 32299 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:49:43.896718 32299 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:49:43.896724 32299 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:49:43.896734 32299 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:49:43.896745 32299 net.cpp:150] Setting up L2_b4_relu\nI0821 06:49:43.896752 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.896756 32299 net.cpp:165] Memory required for data: 1023489500\nI0821 06:49:43.896762 32299 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:43.896770 32299 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:43.896775 32299 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:49:43.896782 32299 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:49:43.896791 32299 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:49:43.896838 32299 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:43.896850 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.896857 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.896862 32299 net.cpp:165] Memory required for data: 1031681500\nI0821 06:49:43.896867 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:49:43.896880 32299 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:49:43.896888 32299 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:49:43.896896 32299 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:49:43.897361 32299 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:49:43.897382 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.897387 32299 net.cpp:165] Memory required for data: 1035777500\nI0821 06:49:43.897397 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:49:43.897409 32299 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:49:43.897416 32299 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:49:43.897424 32299 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:49:43.897671 32299 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:49:43.897687 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.897692 32299 net.cpp:165] Memory required for data: 1039873500\nI0821 06:49:43.897702 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:49:43.897711 32299 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:49:43.897717 32299 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:49:43.897725 32299 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:49:43.897779 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:49:43.897933 32299 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:49:43.897946 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.897951 32299 net.cpp:165] Memory required for data: 1043969500\nI0821 06:49:43.897961 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:49:43.897969 32299 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:49:43.897975 32299 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:49:43.897986 32299 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:49:43.897996 32299 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:49:43.898003 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.898008 32299 net.cpp:165] Memory required for data: 1048065500\nI0821 06:49:43.898013 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:49:43.898026 32299 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:49:43.898033 32299 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:49:43.898041 32299 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:49:43.898531 32299 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:49:43.898546 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.898551 32299 net.cpp:165] Memory required for data: 1052161500\nI0821 06:49:43.898561 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:49:43.898571 32299 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:49:43.898576 32299 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:49:43.898588 32299 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:49:43.898838 32299 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:49:43.898850 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.898855 32299 net.cpp:165] Memory required for data: 1056257500\nI0821 06:49:43.898866 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:49:43.898878 32299 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:49:43.898885 32299 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:49:43.898893 32299 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:49:43.898947 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:49:43.899094 32299 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:49:43.899107 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.899112 32299 net.cpp:165] Memory required for data: 1060353500\nI0821 06:49:43.899121 32299 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:49:43.899134 32299 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:49:43.899140 32299 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:49:43.899147 32299 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:49:43.899158 32299 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:49:43.899185 32299 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:49:43.899194 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.899206 32299 net.cpp:165] Memory required for data: 1064449500\nI0821 06:49:43.899211 32299 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:49:43.899219 32299 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:49:43.899225 32299 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:49:43.899235 32299 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:49:43.899245 32299 net.cpp:150] Setting up L2_b5_relu\nI0821 06:49:43.899253 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.899257 32299 net.cpp:165] Memory required for data: 1068545500\nI0821 06:49:43.899262 32299 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:43.899269 32299 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:43.899276 32299 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:49:43.899282 32299 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:49:43.899292 32299 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:49:43.899346 32299 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:43.899359 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.899366 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.899370 32299 net.cpp:165] Memory required for data: 1076737500\nI0821 06:49:43.899376 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:49:43.899387 32299 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:49:43.899394 32299 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:49:43.899405 32299 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:49:43.899873 32299 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:49:43.899888 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.899893 32299 net.cpp:165] Memory required for data: 1080833500\nI0821 06:49:43.899901 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:49:43.899914 32299 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:49:43.899921 32299 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:49:43.899930 32299 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:49:43.900180 32299 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:49:43.900193 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.900198 32299 net.cpp:165] Memory required for data: 1084929500\nI0821 06:49:43.900209 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:49:43.900221 32299 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:49:43.900228 32299 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:49:43.900236 32299 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:49:43.900291 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:49:43.900444 32299 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:49:43.900457 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.900462 32299 net.cpp:165] Memory required for data: 1089025500\nI0821 06:49:43.900471 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:49:43.900482 32299 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:49:43.900490 32299 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:49:43.900499 32299 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:49:43.900511 32299 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:49:43.900517 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.900521 32299 net.cpp:165] Memory required for data: 1093121500\nI0821 06:49:43.900527 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:49:43.900538 32299 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:49:43.900544 32299 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:49:43.900557 32299 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:49:43.901011 32299 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:49:43.901031 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.901037 32299 net.cpp:165] Memory required for data: 1097217500\nI0821 06:49:43.901046 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:49:43.901057 32299 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:49:43.901062 32299 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:49:43.901073 32299 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:49:43.901321 32299 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:49:43.901340 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.901346 32299 net.cpp:165] Memory required for data: 1101313500\nI0821 06:49:43.901357 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:49:43.901370 32299 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:49:43.901376 32299 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:49:43.901383 32299 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:49:43.901438 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:49:43.901584 32299 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:49:43.901597 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.901602 32299 net.cpp:165] Memory required for data: 1105409500\nI0821 06:49:43.901612 32299 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:49:43.901624 32299 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:49:43.901630 32299 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:49:43.901638 32299 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:49:43.901646 32299 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:49:43.901675 32299 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:49:43.901685 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.901690 32299 net.cpp:165] Memory required for data: 1109505500\nI0821 06:49:43.901695 32299 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:49:43.901703 32299 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:49:43.901710 32299 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:49:43.901720 32299 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:49:43.901729 32299 net.cpp:150] Setting up L2_b6_relu\nI0821 06:49:43.901737 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.901742 32299 net.cpp:165] Memory required for data: 1113601500\nI0821 06:49:43.901746 32299 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:43.901754 32299 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:43.901759 32299 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:49:43.901767 32299 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:49:43.901777 32299 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:49:43.901823 32299 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:43.901835 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.901841 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.901846 32299 net.cpp:165] Memory required for data: 1121793500\nI0821 06:49:43.901851 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:49:43.901862 32299 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:49:43.901868 32299 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:49:43.901880 32299 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:49:43.902350 32299 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:49:43.902364 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.902369 32299 net.cpp:165] Memory required for data: 1125889500\nI0821 06:49:43.902379 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:49:43.902389 32299 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:49:43.902401 32299 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:49:43.902413 32299 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:49:43.902662 32299 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:49:43.902675 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.902680 32299 net.cpp:165] Memory required for data: 1129985500\nI0821 06:49:43.902691 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:49:43.902704 32299 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:49:43.902709 32299 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:49:43.902717 32299 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:49:43.902772 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:49:43.902923 32299 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:49:43.902936 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.902941 32299 net.cpp:165] Memory required for data: 1134081500\nI0821 06:49:43.902951 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:49:43.902963 32299 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:49:43.902969 32299 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:49:43.902977 32299 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:49:43.902987 32299 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:49:43.902997 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.903002 32299 net.cpp:165] Memory required for data: 1138177500\nI0821 06:49:43.903007 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:49:43.903018 32299 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:49:43.903024 32299 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:49:43.903035 32299 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:49:43.903504 32299 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:49:43.903519 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.903524 32299 net.cpp:165] Memory required for data: 1142273500\nI0821 06:49:43.903533 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:49:43.903543 32299 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:49:43.903549 32299 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:49:43.903560 32299 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:49:43.903831 32299 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:49:43.903846 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.903851 32299 net.cpp:165] Memory required for data: 1146369500\nI0821 06:49:43.903861 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:49:43.903874 32299 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:49:43.903882 32299 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:49:43.903889 32299 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:49:43.903944 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:49:43.904093 32299 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:49:43.904106 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.904111 32299 net.cpp:165] Memory required for data: 1150465500\nI0821 06:49:43.904121 32299 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:49:43.904134 32299 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:49:43.904140 32299 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:49:43.904148 32299 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:49:43.904156 32299 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:49:43.904187 32299 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:49:43.904199 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.904203 32299 net.cpp:165] Memory required for data: 1154561500\nI0821 06:49:43.904209 32299 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:49:43.904217 32299 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:49:43.904223 32299 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:49:43.904233 32299 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:49:43.904249 32299 net.cpp:150] Setting up L2_b7_relu\nI0821 06:49:43.904258 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.904263 32299 net.cpp:165] Memory required for data: 1158657500\nI0821 06:49:43.904268 32299 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:43.904273 32299 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:43.904279 32299 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:49:43.904286 32299 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:49:43.904296 32299 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:49:43.904353 32299 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:43.904366 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.904373 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.904377 32299 net.cpp:165] Memory required for data: 1166849500\nI0821 06:49:43.904383 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:49:43.904394 32299 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:49:43.904402 32299 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:49:43.904413 32299 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:49:43.904886 32299 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:49:43.904901 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.904906 32299 net.cpp:165] Memory required for data: 1170945500\nI0821 06:49:43.904916 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:49:43.904925 32299 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:49:43.904932 32299 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:49:43.904942 32299 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:49:43.905196 32299 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:49:43.905210 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.905215 32299 net.cpp:165] Memory required for data: 1175041500\nI0821 06:49:43.905225 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:49:43.905237 32299 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:49:43.905244 32299 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:49:43.905252 32299 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:49:43.905308 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:49:43.905467 32299 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:49:43.905480 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.905485 32299 net.cpp:165] Memory required for data: 1179137500\nI0821 06:49:43.905494 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:49:43.905506 32299 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:49:43.905513 32299 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:49:43.905520 32299 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:49:43.905530 32299 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:49:43.905537 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.905542 32299 net.cpp:165] Memory required for data: 1183233500\nI0821 06:49:43.905547 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:49:43.905560 32299 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:49:43.905567 32299 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:49:43.905578 32299 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:49:43.906044 32299 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:49:43.906057 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.906062 32299 net.cpp:165] Memory required for data: 1187329500\nI0821 06:49:43.906071 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:49:43.906081 32299 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:49:43.906095 32299 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:49:43.906110 32299 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:49:43.906370 32299 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:49:43.906384 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.906390 32299 net.cpp:165] Memory required for data: 1191425500\nI0821 06:49:43.906400 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:49:43.906415 32299 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:49:43.906422 32299 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:49:43.906430 32299 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:49:43.906486 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:49:43.906637 32299 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:49:43.906651 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.906656 32299 net.cpp:165] Memory required for data: 1195521500\nI0821 06:49:43.906664 32299 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:49:43.906674 32299 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:49:43.906680 32299 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:49:43.906690 32299 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:49:43.906699 32299 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:49:43.906726 32299 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:49:43.906739 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.906744 32299 net.cpp:165] Memory required for data: 1199617500\nI0821 06:49:43.906749 32299 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:49:43.906759 32299 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:49:43.906764 32299 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:49:43.906771 32299 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:49:43.906781 32299 net.cpp:150] Setting up L2_b8_relu\nI0821 06:49:43.906788 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.906792 32299 net.cpp:165] Memory required for data: 1203713500\nI0821 06:49:43.906797 32299 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:43.906807 32299 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:43.906813 32299 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:49:43.906821 32299 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:49:43.906844 32299 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:49:43.906893 32299 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:43.906908 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.906914 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.906919 32299 net.cpp:165] Memory required for data: 1211905500\nI0821 06:49:43.906924 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:49:43.906936 32299 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:49:43.906942 32299 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:49:43.906952 32299 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:49:43.907428 32299 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:49:43.907441 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.907446 32299 net.cpp:165] Memory required for data: 1216001500\nI0821 06:49:43.907456 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:49:43.907469 32299 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:49:43.907475 32299 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:49:43.907485 32299 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:49:43.907735 32299 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:49:43.907748 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.907753 32299 net.cpp:165] Memory required for data: 1220097500\nI0821 06:49:43.907771 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:49:43.907780 32299 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:49:43.907788 32299 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:49:43.907799 32299 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:49:43.907855 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:49:43.908007 32299 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:49:43.908020 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.908025 32299 net.cpp:165] Memory required for data: 1224193500\nI0821 06:49:43.908035 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:49:43.908043 32299 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:49:43.908049 32299 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:49:43.908062 32299 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:49:43.908072 32299 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:49:43.908080 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.908084 32299 net.cpp:165] Memory required for data: 1228289500\nI0821 06:49:43.908089 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:49:43.908103 32299 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:49:43.908109 32299 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:49:43.908118 32299 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:49:43.908591 32299 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:49:43.908604 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.908609 32299 net.cpp:165] Memory required for data: 1232385500\nI0821 06:49:43.908619 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:49:43.908632 32299 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:49:43.908638 32299 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:49:43.908646 32299 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:49:43.908900 32299 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:49:43.908912 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.908917 32299 net.cpp:165] Memory required for data: 1236481500\nI0821 06:49:43.908962 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:49:43.908974 32299 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:49:43.908982 32299 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:49:43.908989 32299 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:49:43.909049 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:49:43.909201 32299 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:49:43.909214 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.909219 32299 net.cpp:165] Memory required for data: 1240577500\nI0821 06:49:43.909229 32299 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:49:43.909238 32299 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:49:43.909245 32299 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:49:43.909251 32299 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:49:43.909262 32299 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:49:43.909291 32299 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:49:43.909301 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.909306 32299 net.cpp:165] Memory required for data: 1244673500\nI0821 06:49:43.909310 32299 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:49:43.909322 32299 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:49:43.909332 32299 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:49:43.909342 32299 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:49:43.909351 32299 net.cpp:150] Setting up L2_b9_relu\nI0821 06:49:43.909359 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.909363 32299 net.cpp:165] Memory required for data: 1248769500\nI0821 06:49:43.909368 32299 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:43.909385 32299 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:43.909391 32299 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:49:43.909399 32299 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:49:43.909409 32299 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:49:43.909458 32299 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:43.909471 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.909477 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:43.909482 32299 net.cpp:165] Memory required for data: 1256961500\nI0821 06:49:43.909487 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:49:43.909502 32299 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:49:43.909508 32299 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:49:43.909518 32299 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:49:43.909996 32299 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:49:43.910009 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.910014 32299 net.cpp:165] Memory required for data: 1257985500\nI0821 06:49:43.910023 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:49:43.910035 32299 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:49:43.910043 32299 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:49:43.910050 32299 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:49:43.910316 32299 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:49:43.910334 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.910341 32299 net.cpp:165] Memory required for data: 1259009500\nI0821 06:49:43.910351 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:49:43.910360 32299 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:49:43.910367 32299 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:49:43.910375 32299 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:49:43.910434 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:49:43.910590 32299 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:49:43.910604 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.910609 32299 net.cpp:165] Memory required for data: 1260033500\nI0821 06:49:43.910617 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:49:43.910626 32299 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:49:43.910632 32299 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:49:43.910642 32299 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:49:43.910653 32299 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:49:43.910660 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.910665 32299 net.cpp:165] Memory required for data: 1261057500\nI0821 06:49:43.910670 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:49:43.910683 32299 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:49:43.910689 32299 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:49:43.910699 32299 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:49:43.911176 32299 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:49:43.911190 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.911195 32299 net.cpp:165] Memory required for data: 1262081500\nI0821 06:49:43.911204 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:49:43.911216 32299 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:49:43.911223 32299 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:49:43.911231 32299 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:49:43.911502 32299 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:49:43.911516 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.911521 32299 net.cpp:165] Memory required for data: 1263105500\nI0821 06:49:43.911538 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:49:43.911550 32299 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:49:43.911557 32299 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:49:43.911566 32299 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:49:43.911625 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:49:43.911783 32299 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:49:43.911797 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.911801 32299 net.cpp:165] Memory required for data: 1264129500\nI0821 06:49:43.911810 32299 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:49:43.911823 32299 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:49:43.911829 32299 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:49:43.911841 32299 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:49:43.911876 32299 net.cpp:150] Setting up L3_b1_pool\nI0821 06:49:43.911888 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.911893 32299 net.cpp:165] Memory required for data: 1265153500\nI0821 06:49:43.911898 32299 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:49:43.911911 32299 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:49:43.911917 32299 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:49:43.911924 32299 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:49:43.911932 32299 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:49:43.911964 32299 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:49:43.911974 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.911979 32299 net.cpp:165] Memory required for data: 1266177500\nI0821 06:49:43.911984 32299 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:49:43.911993 32299 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:49:43.911998 32299 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:49:43.912009 32299 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:49:43.912019 32299 net.cpp:150] Setting up L3_b1_relu\nI0821 06:49:43.912026 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.912030 32299 net.cpp:165] Memory required for data: 1267201500\nI0821 06:49:43.912035 32299 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:49:43.912045 32299 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:49:43.912052 32299 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:49:43.913270 32299 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:49:43.913287 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:43.913293 32299 net.cpp:165] Memory required for data: 1268225500\nI0821 06:49:43.913300 32299 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:49:43.913311 32299 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:49:43.913318 32299 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:49:43.913326 32299 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:49:43.913341 32299 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:49:43.913384 32299 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:49:43.913396 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.913401 32299 net.cpp:165] Memory required for data: 1270273500\nI0821 06:49:43.913408 32299 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:43.913415 32299 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:43.913421 32299 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:49:43.913432 32299 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:49:43.913444 32299 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:49:43.913494 32299 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:43.913506 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.913512 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.913524 32299 net.cpp:165] Memory required for data: 1274369500\nI0821 06:49:43.913529 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:49:43.913542 32299 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:49:43.913548 32299 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:49:43.913560 32299 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:49:43.915539 32299 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:49:43.915556 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.915562 32299 net.cpp:165] Memory required for data: 1276417500\nI0821 06:49:43.915572 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:49:43.915585 32299 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:49:43.915592 32299 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:49:43.915603 32299 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:49:43.915863 32299 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:49:43.915875 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.915880 32299 net.cpp:165] Memory required for data: 1278465500\nI0821 06:49:43.915891 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:49:43.915901 32299 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:49:43.915907 32299 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:49:43.915920 32299 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:49:43.915977 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:49:43.916131 32299 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:49:43.916143 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.916148 32299 net.cpp:165] Memory required for data: 1280513500\nI0821 06:49:43.916157 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:49:43.916167 32299 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:49:43.916173 32299 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:49:43.916183 32299 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:49:43.916194 32299 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:49:43.916201 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.916206 32299 net.cpp:165] Memory required for data: 1282561500\nI0821 06:49:43.916211 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:49:43.916225 32299 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:49:43.916232 32299 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:49:43.916241 32299 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:49:43.917265 32299 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:49:43.917280 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.917285 32299 net.cpp:165] Memory required for data: 1284609500\nI0821 06:49:43.917295 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:49:43.917307 32299 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:49:43.917315 32299 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:49:43.917323 32299 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:49:43.917594 32299 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:49:43.917608 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.917613 32299 net.cpp:165] Memory required for data: 1286657500\nI0821 06:49:43.917624 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:49:43.917636 32299 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:49:43.917644 32299 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:49:43.917654 32299 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:49:43.917711 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:49:43.917867 32299 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:49:43.917881 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.917884 32299 net.cpp:165] Memory required for data: 1288705500\nI0821 06:49:43.917894 32299 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:49:43.917904 32299 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:49:43.917918 32299 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:49:43.917927 32299 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:49:43.917937 32299 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:49:43.917973 32299 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:49:43.917982 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.917986 32299 net.cpp:165] Memory required for data: 1290753500\nI0821 06:49:43.917992 32299 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:49:43.918004 32299 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:49:43.918009 32299 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:49:43.918016 32299 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:49:43.918027 32299 net.cpp:150] Setting up L3_b2_relu\nI0821 06:49:43.918035 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.918040 32299 net.cpp:165] Memory required for data: 1292801500\nI0821 06:49:43.918043 32299 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:43.918051 32299 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:43.918056 32299 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:49:43.918064 32299 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:49:43.918074 32299 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:49:43.918125 32299 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:43.918138 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.918144 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.918148 32299 net.cpp:165] Memory required for data: 1296897500\nI0821 06:49:43.918154 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:49:43.918169 32299 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:49:43.918175 32299 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:49:43.918185 32299 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:49:43.919200 32299 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:49:43.919215 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.919221 32299 net.cpp:165] Memory required for data: 1298945500\nI0821 06:49:43.919230 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:49:43.919244 32299 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:49:43.919250 32299 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:49:43.919260 32299 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:49:43.919528 32299 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:49:43.919541 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.919546 32299 net.cpp:165] Memory required for data: 1300993500\nI0821 06:49:43.919558 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:49:43.919566 32299 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:49:43.919574 32299 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:49:43.919585 32299 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:49:43.919646 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:49:43.919802 32299 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:49:43.919816 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.919821 32299 net.cpp:165] Memory required for data: 1303041500\nI0821 06:49:43.919829 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:49:43.919838 32299 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:49:43.919844 32299 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:49:43.919854 32299 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:49:43.919865 32299 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:49:43.919872 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.919883 32299 net.cpp:165] Memory required for data: 1305089500\nI0821 06:49:43.919889 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:49:43.919903 32299 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:49:43.919910 32299 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:49:43.919921 32299 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:49:43.920946 32299 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:49:43.920961 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.920966 32299 net.cpp:165] Memory required for data: 1307137500\nI0821 06:49:43.920975 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:49:43.920985 32299 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:49:43.920991 32299 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:49:43.921003 32299 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:49:43.921272 32299 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:49:43.921288 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.921293 32299 net.cpp:165] Memory required for data: 1309185500\nI0821 06:49:43.921303 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:49:43.921313 32299 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:49:43.921319 32299 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:49:43.921331 32299 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:49:43.921392 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:49:43.921547 32299 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:49:43.921561 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.921566 32299 net.cpp:165] Memory required for data: 1311233500\nI0821 06:49:43.921574 32299 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:49:43.921584 32299 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:49:43.921596 32299 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:49:43.921603 32299 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:49:43.921612 32299 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:49:43.921650 32299 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:49:43.921663 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.921669 32299 net.cpp:165] Memory required for data: 1313281500\nI0821 06:49:43.921674 32299 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:49:43.921681 32299 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:49:43.921687 32299 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:49:43.921695 32299 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:49:43.921705 32299 net.cpp:150] Setting up L3_b3_relu\nI0821 06:49:43.921712 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.921716 32299 net.cpp:165] Memory required for data: 1315329500\nI0821 06:49:43.921721 32299 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:43.921730 32299 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:43.921735 32299 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:49:43.921741 32299 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:49:43.921752 32299 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:49:43.921802 32299 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:43.921813 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.921819 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.921824 32299 net.cpp:165] Memory required for data: 1319425500\nI0821 06:49:43.921829 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:49:43.921844 32299 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:49:43.921850 32299 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:49:43.921860 32299 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:49:43.922915 32299 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:49:43.922931 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.922936 32299 net.cpp:165] Memory required for data: 1321473500\nI0821 06:49:43.922946 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:49:43.922955 32299 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:49:43.922962 32299 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:49:43.922974 32299 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:49:43.923243 32299 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:49:43.923255 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.923260 32299 net.cpp:165] Memory required for data: 1323521500\nI0821 06:49:43.923272 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:49:43.923281 32299 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:49:43.923287 32299 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:49:43.923298 32299 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:49:43.923363 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:49:43.923522 32299 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:49:43.923535 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.923540 32299 net.cpp:165] Memory required for data: 1325569500\nI0821 06:49:43.923549 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:49:43.923562 32299 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:49:43.923568 32299 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:49:43.923575 32299 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:49:43.923585 32299 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:49:43.923593 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.923598 32299 net.cpp:165] Memory required for data: 1327617500\nI0821 06:49:43.923602 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:49:43.923616 32299 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:49:43.923622 32299 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:49:43.923633 32299 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:49:43.924667 32299 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:49:43.924682 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.924688 32299 net.cpp:165] Memory required for data: 1329665500\nI0821 06:49:43.924697 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:49:43.924707 32299 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:49:43.924713 32299 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:49:43.924726 32299 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:49:43.924998 32299 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:49:43.925014 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.925019 32299 net.cpp:165] Memory required for data: 1331713500\nI0821 06:49:43.925029 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:49:43.925038 32299 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:49:43.925045 32299 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:49:43.925052 32299 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:49:43.925110 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:49:43.925274 32299 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:49:43.925287 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.925292 32299 net.cpp:165] Memory required for data: 1333761500\nI0821 06:49:43.925302 32299 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:49:43.925314 32299 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:49:43.925320 32299 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:49:43.925333 32299 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:49:43.925343 32299 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:49:43.925382 32299 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:49:43.925398 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.925405 32299 net.cpp:165] Memory required for data: 1335809500\nI0821 06:49:43.925410 32299 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:49:43.925417 32299 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:49:43.925423 32299 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:49:43.925431 32299 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:49:43.925441 32299 net.cpp:150] Setting up L3_b4_relu\nI0821 06:49:43.925448 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.925452 32299 net.cpp:165] Memory required for data: 1337857500\nI0821 06:49:43.925457 32299 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:43.925464 32299 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:43.925469 32299 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:49:43.925480 32299 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:49:43.925492 32299 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:49:43.925539 32299 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:43.925550 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.925557 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.925561 32299 net.cpp:165] Memory required for data: 1341953500\nI0821 06:49:43.925567 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:49:43.925581 32299 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:49:43.925588 32299 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:49:43.925597 32299 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:49:43.926646 32299 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:49:43.926662 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.926667 32299 net.cpp:165] Memory required for data: 1344001500\nI0821 06:49:43.926677 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:49:43.926690 32299 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:49:43.926697 32299 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:49:43.926707 32299 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:49:43.927944 32299 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:49:43.927963 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.927969 32299 net.cpp:165] Memory required for data: 1346049500\nI0821 06:49:43.927981 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:49:43.927991 32299 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:49:43.927999 32299 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:49:43.928009 32299 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:49:43.928071 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:49:43.928230 32299 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:49:43.928242 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.928247 32299 net.cpp:165] Memory required for data: 1348097500\nI0821 06:49:43.928257 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:49:43.928268 32299 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:49:43.928275 32299 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:49:43.928282 32299 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:49:43.928293 32299 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:49:43.928300 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.928305 32299 net.cpp:165] Memory required for data: 1350145500\nI0821 06:49:43.928310 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:49:43.928324 32299 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:49:43.928336 32299 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:49:43.928347 32299 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:49:43.930343 32299 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:49:43.930366 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.930371 32299 net.cpp:165] Memory required for data: 1352193500\nI0821 06:49:43.930382 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:49:43.930392 32299 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:49:43.930399 32299 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:49:43.930410 32299 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:49:43.930680 32299 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:49:43.930696 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.930701 32299 net.cpp:165] Memory required for data: 1354241500\nI0821 06:49:43.930711 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:49:43.930721 32299 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:49:43.930727 32299 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:49:43.930735 32299 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:49:43.930794 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:49:43.930946 32299 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:49:43.930959 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.930964 32299 net.cpp:165] Memory required for data: 1356289500\nI0821 06:49:43.930974 32299 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:49:43.930986 32299 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:49:43.930994 32299 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:49:43.931001 32299 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:49:43.931010 32299 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:49:43.931046 32299 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:49:43.931056 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.931061 32299 net.cpp:165] Memory required for data: 1358337500\nI0821 06:49:43.931067 32299 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:49:43.931076 32299 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:49:43.931082 32299 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:49:43.931088 32299 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:49:43.931098 32299 net.cpp:150] Setting up L3_b5_relu\nI0821 06:49:43.931105 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.931110 32299 net.cpp:165] Memory required for data: 1360385500\nI0821 06:49:43.931114 32299 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:43.931121 32299 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:43.931128 32299 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:49:43.931138 32299 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:49:43.931149 32299 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:49:43.931195 32299 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:43.931205 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.931212 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.931216 32299 net.cpp:165] Memory required for data: 1364481500\nI0821 06:49:43.931221 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:49:43.931236 32299 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:49:43.931242 32299 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:49:43.931252 32299 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:49:43.932273 32299 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:49:43.932288 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.932294 32299 net.cpp:165] Memory required for data: 1366529500\nI0821 06:49:43.932303 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:49:43.932317 32299 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:49:43.932335 32299 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:49:43.932346 32299 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:49:43.932612 32299 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:49:43.932626 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.932631 32299 net.cpp:165] Memory required for data: 1368577500\nI0821 06:49:43.932641 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:49:43.932651 32299 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:49:43.932657 32299 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:49:43.932668 32299 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:49:43.932725 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:49:43.932883 32299 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:49:43.932895 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.932900 32299 net.cpp:165] Memory required for data: 1370625500\nI0821 06:49:43.932910 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:49:43.932921 32299 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:49:43.932929 32299 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:49:43.932935 32299 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:49:43.932945 32299 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:49:43.932953 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.932957 32299 net.cpp:165] Memory required for data: 1372673500\nI0821 06:49:43.932962 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:49:43.932977 32299 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:49:43.932983 32299 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:49:43.932994 32299 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:49:43.934010 32299 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:49:43.934025 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.934029 32299 net.cpp:165] Memory required for data: 1374721500\nI0821 06:49:43.934038 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:49:43.934048 32299 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:49:43.934056 32299 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:49:43.934067 32299 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:49:43.934334 32299 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:49:43.934350 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.934355 32299 net.cpp:165] Memory required for data: 1376769500\nI0821 06:49:43.934366 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:49:43.934376 32299 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:49:43.934382 32299 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:49:43.934391 32299 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:49:43.934448 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:49:43.934600 32299 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:49:43.934612 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.934617 32299 net.cpp:165] Memory required for data: 1378817500\nI0821 06:49:43.934626 32299 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:49:43.934641 32299 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:49:43.934648 32299 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:49:43.934655 32299 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:49:43.934664 32299 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:49:43.934700 32299 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:49:43.934711 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.934716 32299 net.cpp:165] Memory required for data: 1380865500\nI0821 06:49:43.934721 32299 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:49:43.934729 32299 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:49:43.934736 32299 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:49:43.934749 32299 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:49:43.934759 32299 net.cpp:150] Setting up L3_b6_relu\nI0821 06:49:43.934767 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.934772 32299 net.cpp:165] Memory required for data: 1382913500\nI0821 06:49:43.934777 32299 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:43.934784 32299 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:43.934789 32299 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:49:43.934800 32299 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:49:43.934810 32299 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:49:43.934857 32299 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:43.934870 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.934876 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.934880 32299 net.cpp:165] Memory required for data: 1387009500\nI0821 06:49:43.934885 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:49:43.934900 32299 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:49:43.934906 32299 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:49:43.934916 32299 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:49:43.935940 32299 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:49:43.935955 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.935961 32299 net.cpp:165] Memory required for data: 1389057500\nI0821 06:49:43.935969 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:49:43.935982 32299 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:49:43.935989 32299 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:49:43.935997 32299 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:49:43.936259 32299 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:49:43.936272 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.936277 32299 net.cpp:165] Memory required for data: 1391105500\nI0821 06:49:43.936287 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:49:43.936300 32299 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:49:43.936306 32299 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:49:43.936314 32299 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:49:43.936381 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:49:43.936542 32299 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:49:43.936554 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.936559 32299 net.cpp:165] Memory required for data: 1393153500\nI0821 06:49:43.936568 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:49:43.936604 32299 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:49:43.936614 32299 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:49:43.936622 32299 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:49:43.936632 32299 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:49:43.936640 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.936645 32299 net.cpp:165] Memory required for data: 1395201500\nI0821 06:49:43.936650 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:49:43.936662 32299 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:49:43.936667 32299 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:49:43.936676 32299 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:49:43.937702 32299 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:49:43.937717 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.937722 32299 net.cpp:165] Memory required for data: 1397249500\nI0821 06:49:43.937732 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:49:43.937741 32299 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:49:43.937754 32299 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:49:43.937767 32299 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:49:43.938030 32299 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:49:43.938042 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.938047 32299 net.cpp:165] Memory required for data: 1399297500\nI0821 06:49:43.938058 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:49:43.938067 32299 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:49:43.938073 32299 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:49:43.938084 32299 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:49:43.938141 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:49:43.938298 32299 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:49:43.938311 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.938316 32299 net.cpp:165] Memory required for data: 1401345500\nI0821 06:49:43.938325 32299 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:49:43.938344 32299 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:49:43.938351 32299 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:49:43.938359 32299 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:49:43.938367 32299 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:49:43.938406 32299 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:49:43.938418 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.938423 32299 net.cpp:165] Memory required for data: 1403393500\nI0821 06:49:43.938428 32299 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:49:43.938436 32299 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:49:43.938442 32299 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:49:43.938452 32299 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:49:43.938462 32299 net.cpp:150] Setting up L3_b7_relu\nI0821 06:49:43.938469 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.938474 32299 net.cpp:165] Memory required for data: 1405441500\nI0821 06:49:43.938479 32299 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:43.938486 32299 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:43.938493 32299 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:49:43.938499 32299 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:49:43.938510 32299 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:49:43.938560 32299 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:43.938571 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.938577 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.938582 32299 net.cpp:165] Memory required for data: 1409537500\nI0821 06:49:43.938587 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:49:43.938598 32299 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:49:43.938604 32299 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:49:43.938616 32299 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:49:43.939633 32299 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:49:43.939648 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.939653 32299 net.cpp:165] Memory required for data: 1411585500\nI0821 06:49:43.939663 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:49:43.939677 32299 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:49:43.939682 32299 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:49:43.939692 32299 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:49:43.939959 32299 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:49:43.939970 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.939975 32299 net.cpp:165] Memory required for data: 1413633500\nI0821 06:49:43.939993 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:49:43.940002 32299 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:49:43.940008 32299 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:49:43.940016 32299 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:49:43.940078 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:49:43.940230 32299 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:49:43.940245 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.940250 32299 net.cpp:165] Memory required for data: 1415681500\nI0821 06:49:43.940259 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:49:43.940268 32299 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:49:43.940274 32299 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:49:43.940281 32299 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:49:43.940291 32299 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:49:43.940299 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.940304 32299 net.cpp:165] Memory required for data: 1417729500\nI0821 06:49:43.940307 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:49:43.940321 32299 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:49:43.940333 32299 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:49:43.940346 32299 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:49:43.941364 32299 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:49:43.941378 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.941383 32299 net.cpp:165] Memory required for data: 1419777500\nI0821 06:49:43.941392 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:49:43.941406 32299 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:49:43.941411 32299 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:49:43.941421 32299 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:49:43.941676 32299 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:49:43.941689 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.941694 32299 net.cpp:165] Memory required for data: 1421825500\nI0821 06:49:43.941705 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:49:43.941717 32299 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:49:43.941725 32299 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:49:43.941732 32299 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:49:43.941792 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:49:43.941948 32299 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:49:43.941961 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.941965 32299 net.cpp:165] Memory required for data: 1423873500\nI0821 06:49:43.941975 32299 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:49:43.941987 32299 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:49:43.941993 32299 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:49:43.942001 32299 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:49:43.942009 32299 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:49:43.942044 32299 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:49:43.942056 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.942061 32299 net.cpp:165] Memory required for data: 1425921500\nI0821 06:49:43.942066 32299 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:49:43.942075 32299 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:49:43.942080 32299 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:49:43.942090 32299 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:49:43.942101 32299 net.cpp:150] Setting up L3_b8_relu\nI0821 06:49:43.942108 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.942112 32299 net.cpp:165] Memory required for data: 1427969500\nI0821 06:49:43.942117 32299 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:43.942131 32299 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:43.942137 32299 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:49:43.942145 32299 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:49:43.942157 32299 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:49:43.942205 32299 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:43.942217 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.942224 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.942229 32299 net.cpp:165] Memory required for data: 1432065500\nI0821 06:49:43.942234 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:49:43.942247 32299 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:49:43.942255 32299 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:49:43.942265 32299 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:49:43.944237 32299 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:49:43.944254 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.944259 32299 net.cpp:165] Memory required for data: 1434113500\nI0821 06:49:43.944269 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:49:43.944283 32299 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:49:43.944289 32299 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:49:43.944298 32299 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:49:43.944571 32299 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:49:43.944583 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.944588 32299 net.cpp:165] Memory required for data: 1436161500\nI0821 06:49:43.944599 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:49:43.944612 32299 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:49:43.944618 32299 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:49:43.944629 32299 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:49:43.944689 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:49:43.944854 32299 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:49:43.944866 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.944871 32299 net.cpp:165] Memory required for data: 1438209500\nI0821 06:49:43.944880 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:49:43.944890 32299 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:49:43.944895 32299 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:49:43.944906 32299 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:49:43.944917 32299 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:49:43.944924 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.944929 32299 net.cpp:165] Memory required for data: 1440257500\nI0821 06:49:43.944934 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:49:43.944946 32299 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:49:43.944952 32299 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:49:43.944963 32299 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:49:43.945976 32299 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:49:43.945991 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.945996 32299 net.cpp:165] Memory required for data: 1442305500\nI0821 06:49:43.946005 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:49:43.946017 32299 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:49:43.946024 32299 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:49:43.946033 32299 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:49:43.946295 32299 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:49:43.946308 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.946313 32299 net.cpp:165] Memory required for data: 1444353500\nI0821 06:49:43.946338 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:49:43.946348 32299 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:49:43.946355 32299 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:49:43.946363 32299 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:49:43.946424 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:49:43.946578 32299 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:49:43.946590 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.946595 32299 net.cpp:165] Memory required for data: 1446401500\nI0821 06:49:43.946604 32299 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:49:43.946614 32299 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:49:43.946620 32299 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:49:43.946629 32299 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:49:43.946640 32299 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:49:43.946672 32299 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:49:43.946684 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.946689 32299 net.cpp:165] Memory required for data: 1448449500\nI0821 06:49:43.946694 32299 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:49:43.946705 32299 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:49:43.946712 32299 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:49:43.946719 32299 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:49:43.946729 32299 net.cpp:150] Setting up L3_b9_relu\nI0821 06:49:43.946738 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:43.946741 32299 net.cpp:165] Memory required for data: 1450497500\nI0821 06:49:43.946746 32299 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:49:43.946758 32299 net.cpp:100] Creating Layer post_pool\nI0821 06:49:43.946763 32299 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:49:43.946771 32299 net.cpp:408] post_pool -> post_pool\nI0821 06:49:43.946805 32299 net.cpp:150] Setting up post_pool\nI0821 06:49:43.946815 32299 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:49:43.946820 32299 net.cpp:165] Memory required for data: 1450529500\nI0821 06:49:43.946825 32299 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:49:43.946918 32299 net.cpp:100] Creating Layer post_FC\nI0821 06:49:43.946931 32299 net.cpp:434] post_FC <- post_pool\nI0821 06:49:43.946946 32299 net.cpp:408] post_FC -> post_FC_top\nI0821 06:49:43.947203 32299 net.cpp:150] Setting up post_FC\nI0821 06:49:43.947219 32299 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:43.947224 32299 net.cpp:165] Memory required for data: 1450534500\nI0821 06:49:43.947234 32299 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:49:43.947243 32299 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:49:43.947249 32299 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:49:43.947262 32299 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:49:43.947273 32299 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:49:43.947322 32299 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:49:43.947340 32299 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:43.947347 32299 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:43.947352 32299 net.cpp:165] Memory required for data: 1450544500\nI0821 06:49:43.947357 32299 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:49:43.947402 32299 net.cpp:100] Creating Layer accuracy\nI0821 06:49:43.947413 32299 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:49:43.947422 32299 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:49:43.947430 32299 net.cpp:408] accuracy -> accuracy\nI0821 06:49:43.947474 32299 net.cpp:150] Setting up accuracy\nI0821 06:49:43.947486 32299 net.cpp:157] Top shape: (1)\nI0821 06:49:43.947491 32299 net.cpp:165] Memory required for data: 1450544504\nI0821 06:49:43.947497 32299 layer_factory.hpp:77] Creating layer loss\nI0821 06:49:43.947518 32299 net.cpp:100] Creating Layer loss\nI0821 06:49:43.947525 32299 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:49:43.947532 32299 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:49:43.947541 32299 net.cpp:408] loss -> loss\nI0821 06:49:43.948349 32299 layer_factory.hpp:77] Creating layer loss\nI0821 06:49:43.948518 32299 net.cpp:150] Setting up loss\nI0821 06:49:43.948534 32299 net.cpp:157] Top shape: (1)\nI0821 06:49:43.948539 32299 net.cpp:160]     with loss weight 1\nI0821 06:49:43.948616 32299 net.cpp:165] Memory required for data: 1450544508\nI0821 06:49:43.948624 32299 net.cpp:226] loss needs backward computation.\nI0821 06:49:43.948632 32299 net.cpp:228] accuracy does not need backward computation.\nI0821 06:49:43.948637 32299 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:49:43.948642 32299 net.cpp:226] post_FC needs backward computation.\nI0821 06:49:43.948647 32299 net.cpp:226] post_pool needs backward computation.\nI0821 06:49:43.948652 32299 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:49:43.948657 32299 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:49:43.948663 32299 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:49:43.948668 32299 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:49:43.948673 32299 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:49:43.948678 32299 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:49:43.948683 32299 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:49:43.948688 32299 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:49:43.948693 32299 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:49:43.948698 32299 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:49:43.948704 32299 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:49:43.948709 32299 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:49:43.948714 32299 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:49:43.948719 32299 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:49:43.948724 32299 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:49:43.948729 32299 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:49:43.948734 32299 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:49:43.948740 32299 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:49:43.948745 32299 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:49:43.948750 32299 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:49:43.948755 32299 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:49:43.948760 32299 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:49:43.948765 32299 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:49:43.948770 32299 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:49:43.948776 32299 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:49:43.948781 32299 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:49:43.948786 32299 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:49:43.948791 32299 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:49:43.948796 32299 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:49:43.948801 32299 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:49:43.948806 32299 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:49:43.948810 32299 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:49:43.948815 32299 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:49:43.948820 32299 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:49:43.948827 32299 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:49:43.948832 32299 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:49:43.948843 32299 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:49:43.948849 32299 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:49:43.948854 32299 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:49:43.948860 32299 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:49:43.948865 32299 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:49:43.948870 32299 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:49:43.948879 32299 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:49:43.948884 32299 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:49:43.948890 32299 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:49:43.948895 32299 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:49:43.948900 32299 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:49:43.948905 32299 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:49:43.948911 32299 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:49:43.948916 32299 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:49:43.948921 32299 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:49:43.948927 32299 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:49:43.948932 32299 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:49:43.948937 32299 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:49:43.948943 32299 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:49:43.948948 32299 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:49:43.948953 32299 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:49:43.948958 32299 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:49:43.948963 32299 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:49:43.948968 32299 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:49:43.948973 32299 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:49:43.948978 32299 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:49:43.948984 32299 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:49:43.948989 32299 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:49:43.948994 32299 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:49:43.948999 32299 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:49:43.949004 32299 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:49:43.949009 32299 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:49:43.949014 32299 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:49:43.949020 32299 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:49:43.949025 32299 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:49:43.949030 32299 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:49:43.949036 32299 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:49:43.949041 32299 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:49:43.949048 32299 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:49:43.949053 32299 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:49:43.949057 32299 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:49:43.949062 32299 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:49:43.949067 32299 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:49:43.949074 32299 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:49:43.949079 32299 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:49:43.949085 32299 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:49:43.949090 32299 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:49:43.949100 32299 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:49:43.949106 32299 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:49:43.949111 32299 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:49:43.949116 32299 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:49:43.949122 32299 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:49:43.949127 32299 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:49:43.949132 32299 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:49:43.949137 32299 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:49:43.949143 32299 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:49:43.949148 32299 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:49:43.949153 32299 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:49:43.949159 32299 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:49:43.949165 32299 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:49:43.949170 32299 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:49:43.949175 32299 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:49:43.949182 32299 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:49:43.949187 32299 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:49:43.949192 32299 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:49:43.949196 32299 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:49:43.949203 32299 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:49:43.949208 32299 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:49:43.949213 32299 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:49:43.949219 32299 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:49:43.949224 32299 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:49:43.949230 32299 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:49:43.949239 32299 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:49:43.949244 32299 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:49:43.949249 32299 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:49:43.949254 32299 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:49:43.949260 32299 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:49:43.949265 32299 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:49:43.949270 32299 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:49:43.949276 32299 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:49:43.949281 32299 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:49:43.949287 32299 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:49:43.949292 32299 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:49:43.949297 32299 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:49:43.949302 32299 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:49:43.949308 32299 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:49:43.949313 32299 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:49:43.949319 32299 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:49:43.949324 32299 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:49:43.949337 32299 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:49:43.949343 32299 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:49:43.949349 32299 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:49:43.949355 32299 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:49:43.949360 32299 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:49:43.949371 32299 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:49:43.949378 32299 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:49:43.949383 32299 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:49:43.949389 32299 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:49:43.949394 32299 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:49:43.949400 32299 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:49:43.949406 32299 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:49:43.949411 32299 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:49:43.949417 32299 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:49:43.949422 32299 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:49:43.949427 32299 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:49:43.949434 32299 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:49:43.949439 32299 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:49:43.949445 32299 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:49:43.949450 32299 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:49:43.949455 32299 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:49:43.949461 32299 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:49:43.949466 32299 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:49:43.949472 32299 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:49:43.949477 32299 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:49:43.949483 32299 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:49:43.949488 32299 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:49:43.949494 32299 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:49:43.949499 32299 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:49:43.949504 32299 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:49:43.949512 32299 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:49:43.949517 32299 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:49:43.949522 32299 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:49:43.949527 32299 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:49:43.949532 32299 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:49:43.949538 32299 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:49:43.949543 32299 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:49:43.949549 32299 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:49:43.949555 32299 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:49:43.949560 32299 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:49:43.949566 32299 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:49:43.949573 32299 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:49:43.949578 32299 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:49:43.949584 32299 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:49:43.949589 32299 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:49:43.949594 32299 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:49:43.949600 32299 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:49:43.949605 32299 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:49:43.949611 32299 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:49:43.949617 32299 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:49:43.949622 32299 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:49:43.949628 32299 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:49:43.949640 32299 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:49:43.949645 32299 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:49:43.949651 32299 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:49:43.949657 32299 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:49:43.949663 32299 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:49:43.949668 32299 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:49:43.949674 32299 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:49:43.949679 32299 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:49:43.949686 32299 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:49:43.949690 32299 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:49:43.949697 32299 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:49:43.949702 32299 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:49:43.949707 32299 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:49:43.949713 32299 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:49:43.949719 32299 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:49:43.949724 32299 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:49:43.949729 32299 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:49:43.949735 32299 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:49:43.949740 32299 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:49:43.949746 32299 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:49:43.949753 32299 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:49:43.949759 32299 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:49:43.949764 32299 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:49:43.949769 32299 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:49:43.949775 32299 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:49:43.949780 32299 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:49:43.949786 32299 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:49:43.949791 32299 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:49:43.949797 32299 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:49:43.949803 32299 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:49:43.949808 32299 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:49:43.949815 32299 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:49:43.949820 32299 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:49:43.949826 32299 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:49:43.949831 32299 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:49:43.949837 32299 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:49:43.949842 32299 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:49:43.949848 32299 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:49:43.949854 32299 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:49:43.949861 32299 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:49:43.949865 32299 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:49:43.949872 32299 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:49:43.949877 32299 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:49:43.949882 32299 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:49:43.949888 32299 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:49:43.949894 32299 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:49:43.949899 32299 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:49:43.949909 32299 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:49:43.949915 32299 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:49:43.949921 32299 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:49:43.949928 32299 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:49:43.949936 32299 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:49:43.949942 32299 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:49:43.949949 32299 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:49:43.949954 32299 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:49:43.949960 32299 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:49:43.949966 32299 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:49:43.949972 32299 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:49:43.949978 32299 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:49:43.949983 32299 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:49:43.949990 32299 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:49:43.949996 32299 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:49:43.950001 32299 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:49:43.950007 32299 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:49:43.950013 32299 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:49:43.950018 32299 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:49:43.950024 32299 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:49:43.950029 32299 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:49:43.950037 32299 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:49:43.950042 32299 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:49:43.950047 32299 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:49:43.950053 32299 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:49:43.950059 32299 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:49:43.950064 32299 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:49:43.950070 32299 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:49:43.950076 32299 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:49:43.950083 32299 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:49:43.950088 32299 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:49:43.950093 32299 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:49:43.950099 32299 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:49:43.950104 32299 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:49:43.950110 32299 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:49:43.950116 32299 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:49:43.950121 32299 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:49:43.950127 32299 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:49:43.950132 32299 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:49:43.950137 32299 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:49:43.950143 32299 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:49:43.950150 32299 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:49:43.950155 32299 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:49:43.950160 32299 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:49:43.950167 32299 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:49:43.950172 32299 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:49:43.950178 32299 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:49:43.950188 32299 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:49:43.950194 32299 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:49:43.950201 32299 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:49:43.950206 32299 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:49:43.950212 32299 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:49:43.950217 32299 net.cpp:226] pre_relu needs backward computation.\nI0821 06:49:43.950222 32299 net.cpp:226] pre_scale needs backward computation.\nI0821 06:49:43.950227 32299 net.cpp:226] pre_bn needs backward computation.\nI0821 06:49:43.950233 32299 net.cpp:226] pre_conv needs backward computation.\nI0821 06:49:43.950240 32299 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:49:43.950247 32299 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:49:43.950251 32299 net.cpp:270] This network produces output accuracy\nI0821 06:49:43.950258 32299 net.cpp:270] This network produces output loss\nI0821 06:49:43.950649 32299 net.cpp:283] Network initialization done.\nI0821 06:49:43.959947 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:43.959990 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:43.960052 32299 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 06:49:43.960439 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 06:49:43.960458 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 06:49:43.960469 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 06:49:43.960477 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 06:49:43.960487 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 06:49:43.960495 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 06:49:43.960505 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 06:49:43.960513 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 06:49:43.960522 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 06:49:43.960531 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 06:49:43.960541 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 06:49:43.960548 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 06:49:43.960557 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 06:49:43.960566 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 06:49:43.960574 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 06:49:43.960583 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 06:49:43.960592 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 06:49:43.960600 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 06:49:43.960609 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 06:49:43.960628 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 06:49:43.960638 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 06:49:43.960646 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 06:49:43.960659 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 06:49:43.960667 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 06:49:43.960676 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 06:49:43.960685 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 06:49:43.960693 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 06:49:43.960701 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 06:49:43.960711 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 06:49:43.960718 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 06:49:43.960727 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 06:49:43.960736 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 06:49:43.960746 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 06:49:43.960753 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 06:49:43.960762 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 06:49:43.960770 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 06:49:43.960779 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 06:49:43.960788 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 06:49:43.960796 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 06:49:43.960804 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 06:49:43.960816 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 06:49:43.960825 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 06:49:43.960834 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 06:49:43.960841 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 06:49:43.960851 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 06:49:43.960860 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 06:49:43.960868 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 06:49:43.960876 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 06:49:43.960886 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 06:49:43.960893 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 06:49:43.960909 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 06:49:43.960918 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 06:49:43.960928 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 06:49:43.960937 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 06:49:43.960945 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 06:49:43.960953 32299 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 06:49:43.962596 32299 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI0821 06:49:43.964220 32299 layer_factory.hpp:77] Creating layer dataLayer\nI0821 06:49:43.964920 32299 net.cpp:100] Creating Layer dataLayer\nI0821 06:49:43.964943 32299 net.cpp:408] dataLayer -> data_top\nI0821 06:49:43.964959 32299 net.cpp:408] dataLayer -> label\nI0821 06:49:43.964972 32299 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 06:49:43.975847 32307 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 06:49:43.976130 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:43.983996 32299 net.cpp:150] Setting up dataLayer\nI0821 06:49:43.984043 32299 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 06:49:43.984053 32299 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:43.984060 32299 net.cpp:165] Memory required for data: 1536500\nI0821 06:49:43.984068 32299 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 06:49:43.984079 32299 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 06:49:43.984086 32299 net.cpp:434] label_dataLayer_1_split <- label\nI0821 06:49:43.984097 32299 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 06:49:43.984112 32299 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 06:49:43.984223 32299 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 06:49:43.984242 32299 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:43.984249 32299 net.cpp:157] Top shape: 125 (125)\nI0821 06:49:43.984254 32299 net.cpp:165] Memory required for data: 1537500\nI0821 06:49:43.984259 32299 layer_factory.hpp:77] Creating layer pre_conv\nI0821 06:49:43.984275 32299 net.cpp:100] Creating Layer pre_conv\nI0821 06:49:43.984282 32299 net.cpp:434] pre_conv <- data_top\nI0821 06:49:43.984295 32299 net.cpp:408] pre_conv -> pre_conv_top\nI0821 06:49:43.984728 32299 net.cpp:150] Setting up pre_conv\nI0821 06:49:43.984755 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.984761 32299 net.cpp:165] Memory required for data: 9729500\nI0821 06:49:43.984776 32299 layer_factory.hpp:77] Creating layer pre_bn\nI0821 06:49:43.984793 32299 net.cpp:100] Creating Layer pre_bn\nI0821 06:49:43.984799 32299 net.cpp:434] pre_bn <- pre_conv_top\nI0821 06:49:43.984808 32299 net.cpp:408] pre_bn -> pre_bn_top\nI0821 06:49:43.985157 32299 net.cpp:150] Setting up pre_bn\nI0821 06:49:43.985172 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.985177 32299 net.cpp:165] Memory required for data: 17921500\nI0821 06:49:43.985196 32299 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:49:43.985208 32299 net.cpp:100] Creating Layer pre_scale\nI0821 06:49:43.985213 32299 net.cpp:434] pre_scale <- pre_bn_top\nI0821 06:49:43.985221 32299 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 06:49:43.985323 32299 layer_factory.hpp:77] Creating layer pre_scale\nI0821 06:49:43.985551 32299 net.cpp:150] Setting up pre_scale\nI0821 06:49:43.985566 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.985571 32299 net.cpp:165] Memory required for data: 26113500\nI0821 06:49:43.985581 32299 layer_factory.hpp:77] Creating layer pre_relu\nI0821 06:49:43.985590 32299 net.cpp:100] Creating Layer pre_relu\nI0821 06:49:43.985596 32299 net.cpp:434] pre_relu <- pre_bn_top\nI0821 06:49:43.985610 32299 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 06:49:43.985621 32299 net.cpp:150] Setting up pre_relu\nI0821 06:49:43.985628 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.985633 32299 net.cpp:165] Memory required for data: 34305500\nI0821 06:49:43.985640 32299 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 06:49:43.985648 32299 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 06:49:43.985654 32299 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 06:49:43.985666 32299 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 06:49:43.985676 32299 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 06:49:43.985728 32299 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 06:49:43.985740 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.985747 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.985752 32299 net.cpp:165] Memory required for data: 50689500\nI0821 06:49:43.985759 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 06:49:43.985775 32299 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 06:49:43.985782 32299 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 06:49:43.985791 32299 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 06:49:43.986230 32299 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 06:49:43.986246 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.986251 32299 net.cpp:165] Memory required for data: 58881500\nI0821 06:49:43.986263 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 06:49:43.986279 32299 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 06:49:43.986287 32299 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 06:49:43.986296 32299 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 06:49:43.986876 32299 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 06:49:43.986894 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.986899 32299 net.cpp:165] Memory required for data: 67073500\nI0821 06:49:43.986912 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:49:43.986922 32299 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 06:49:43.986927 32299 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 06:49:43.986939 32299 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:49:43.987010 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 06:49:43.987185 32299 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 06:49:43.987200 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.987208 32299 net.cpp:165] Memory required for data: 75265500\nI0821 06:49:43.987226 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 06:49:43.987234 32299 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 06:49:43.987243 32299 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 06:49:43.987251 32299 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 06:49:43.987263 32299 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 06:49:43.987270 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.987275 32299 net.cpp:165] Memory required for data: 83457500\nI0821 06:49:43.987291 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 06:49:43.987306 32299 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 06:49:43.987313 32299 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 06:49:43.987324 32299 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 06:49:43.987722 32299 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 06:49:43.987740 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.987746 32299 net.cpp:165] Memory required for data: 91649500\nI0821 06:49:43.987754 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 06:49:43.987767 32299 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 06:49:43.987776 32299 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 06:49:43.987789 32299 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 06:49:43.988090 32299 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 06:49:43.988104 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.988109 32299 net.cpp:165] Memory required for data: 99841500\nI0821 06:49:43.988124 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:49:43.988137 32299 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 06:49:43.988145 32299 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 06:49:43.988154 32299 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 06:49:43.988222 32299 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 06:49:43.988406 32299 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 06:49:43.988425 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.988430 32299 net.cpp:165] Memory required for data: 108033500\nI0821 06:49:43.988443 32299 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 06:49:43.988453 32299 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 06:49:43.988459 32299 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 06:49:43.988467 32299 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 06:49:43.988477 32299 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 06:49:43.988518 32299 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 06:49:43.988528 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.988533 32299 net.cpp:165] Memory required for data: 116225500\nI0821 06:49:43.988540 32299 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 06:49:43.988553 32299 net.cpp:100] Creating Layer L1_b1_relu\nI0821 06:49:43.988559 32299 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 06:49:43.988566 32299 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 06:49:43.988579 32299 net.cpp:150] Setting up L1_b1_relu\nI0821 06:49:43.988586 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.988591 32299 net.cpp:165] Memory required for data: 124417500\nI0821 06:49:43.988596 32299 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:43.988608 32299 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:43.988618 32299 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 06:49:43.988626 32299 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:49:43.988636 32299 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:49:43.988687 32299 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 06:49:43.988700 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.988718 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.988723 32299 net.cpp:165] Memory required for data: 140801500\nI0821 06:49:43.988729 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 06:49:43.988741 32299 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 06:49:43.988747 32299 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 06:49:43.988757 32299 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 06:49:43.989159 32299 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 06:49:43.989174 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.989181 32299 net.cpp:165] Memory required for data: 148993500\nI0821 06:49:43.989192 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 06:49:43.989204 32299 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 06:49:43.989210 32299 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 06:49:43.989223 32299 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 06:49:43.989537 32299 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 06:49:43.989554 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.989559 32299 net.cpp:165] Memory required for data: 157185500\nI0821 06:49:43.989570 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:49:43.989583 32299 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 06:49:43.989589 32299 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 06:49:43.989598 32299 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:49:43.989665 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 06:49:43.990039 32299 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 06:49:43.990053 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.990059 32299 net.cpp:165] Memory required for data: 165377500\nI0821 06:49:43.990068 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 06:49:43.990077 32299 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 06:49:43.990083 32299 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 06:49:43.990097 32299 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 06:49:43.990108 32299 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 06:49:43.990115 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.990120 32299 net.cpp:165] Memory required for data: 173569500\nI0821 06:49:43.990125 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 06:49:43.990142 32299 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 06:49:43.990149 32299 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 06:49:43.990159 32299 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 06:49:43.990571 32299 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 06:49:43.990584 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.990592 32299 net.cpp:165] Memory required for data: 181761500\nI0821 06:49:43.990602 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 06:49:43.990610 32299 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 06:49:43.990625 32299 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 06:49:43.990635 32299 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 06:49:43.990954 32299 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 06:49:43.990969 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.990974 32299 net.cpp:165] Memory required for data: 189953500\nI0821 06:49:43.990993 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:49:43.991006 32299 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 06:49:43.991014 32299 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 06:49:43.991024 32299 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 06:49:43.991093 32299 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 06:49:43.991281 32299 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 06:49:43.991293 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.991298 32299 net.cpp:165] Memory required for data: 198145500\nI0821 06:49:43.991319 32299 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 06:49:43.991334 32299 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 06:49:43.991341 32299 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 06:49:43.991354 32299 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 06:49:43.991364 32299 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 06:49:43.991408 32299 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 06:49:43.991418 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.991423 32299 net.cpp:165] Memory required for data: 206337500\nI0821 06:49:43.991428 32299 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 06:49:43.991436 32299 net.cpp:100] Creating Layer L1_b2_relu\nI0821 06:49:43.991442 32299 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 06:49:43.991456 32299 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 06:49:43.991466 32299 net.cpp:150] Setting up L1_b2_relu\nI0821 06:49:43.991474 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.991478 32299 net.cpp:165] Memory required for data: 214529500\nI0821 06:49:43.991485 32299 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:43.991493 32299 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:43.991498 32299 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 06:49:43.991506 32299 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:49:43.991516 32299 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:49:43.991575 32299 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 06:49:43.991586 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.991592 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.991597 32299 net.cpp:165] Memory required for data: 230913500\nI0821 06:49:43.991603 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 06:49:43.991616 32299 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 06:49:43.991623 32299 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 06:49:43.991636 32299 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 06:49:43.992029 32299 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 06:49:43.992044 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.992053 32299 net.cpp:165] Memory required for data: 239105500\nI0821 06:49:43.992061 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 06:49:43.992070 32299 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 06:49:43.992076 32299 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 06:49:43.992089 32299 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 06:49:43.992405 32299 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 06:49:43.992421 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.992429 32299 net.cpp:165] Memory required for data: 247297500\nI0821 06:49:43.992439 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:49:43.992455 32299 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 06:49:43.992461 32299 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 06:49:43.992470 32299 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:49:43.992533 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 06:49:43.992724 32299 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 06:49:43.992738 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.992744 32299 net.cpp:165] Memory required for data: 255489500\nI0821 06:49:43.992755 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 06:49:43.992769 32299 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 06:49:43.992775 32299 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 06:49:43.992784 32299 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 06:49:43.992800 32299 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 06:49:43.992810 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.992816 32299 net.cpp:165] Memory required for data: 263681500\nI0821 06:49:43.992821 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 06:49:43.992835 32299 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 06:49:43.992843 32299 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 06:49:43.992856 32299 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 06:49:43.993485 32299 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 06:49:43.993501 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.993506 32299 net.cpp:165] Memory required for data: 271873500\nI0821 06:49:43.993516 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 06:49:43.993532 32299 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 06:49:43.993541 32299 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 06:49:43.993554 32299 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 06:49:43.993867 32299 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 06:49:43.993881 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.993886 32299 net.cpp:165] Memory required for data: 280065500\nI0821 06:49:43.993897 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:49:43.993914 32299 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 06:49:43.993921 32299 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 06:49:43.993929 32299 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 06:49:43.993993 32299 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 06:49:43.994189 32299 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 06:49:43.994204 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.994209 32299 net.cpp:165] Memory required for data: 288257500\nI0821 06:49:43.994220 32299 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 06:49:43.994228 32299 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 06:49:43.994236 32299 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 06:49:43.994248 32299 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 06:49:43.994257 32299 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 06:49:43.994299 32299 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 06:49:43.994313 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.994318 32299 net.cpp:165] Memory required for data: 296449500\nI0821 06:49:43.994323 32299 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 06:49:43.994335 32299 net.cpp:100] Creating Layer L1_b3_relu\nI0821 06:49:43.994343 32299 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 06:49:43.994352 32299 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 06:49:43.994364 32299 net.cpp:150] Setting up L1_b3_relu\nI0821 06:49:43.994374 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.994379 32299 net.cpp:165] Memory required for data: 304641500\nI0821 06:49:43.994384 32299 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:43.994391 32299 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:43.994396 32299 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 06:49:43.994405 32299 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:49:43.994417 32299 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:49:43.994475 32299 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 06:49:43.994489 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.994496 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.994503 32299 net.cpp:165] Memory required for data: 321025500\nI0821 06:49:43.994508 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 06:49:43.994519 32299 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 06:49:43.994532 32299 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 06:49:43.994549 32299 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 06:49:43.994904 32299 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 06:49:43.994917 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.994922 32299 net.cpp:165] Memory required for data: 329217500\nI0821 06:49:43.994931 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 06:49:43.994941 32299 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 06:49:43.994947 32299 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 06:49:43.994956 32299 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 06:49:43.995226 32299 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 06:49:43.995240 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.995245 32299 net.cpp:165] Memory required for data: 337409500\nI0821 06:49:43.995255 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:49:43.995267 32299 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 06:49:43.995275 32299 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 06:49:43.995282 32299 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:49:43.995348 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 06:49:43.995522 32299 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 06:49:43.995535 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.995540 32299 net.cpp:165] Memory required for data: 345601500\nI0821 06:49:43.995550 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 06:49:43.995558 32299 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 06:49:43.995564 32299 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 06:49:43.995575 32299 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 06:49:43.995585 32299 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 06:49:43.995592 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.995597 32299 net.cpp:165] Memory required for data: 353793500\nI0821 06:49:43.995602 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 06:49:43.995616 32299 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 06:49:43.995622 32299 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 06:49:43.995631 32299 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 06:49:43.995980 32299 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 06:49:43.995995 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.996001 32299 net.cpp:165] Memory required for data: 361985500\nI0821 06:49:43.996009 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 06:49:43.996021 32299 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 06:49:43.996027 32299 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 06:49:43.996037 32299 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 06:49:43.996309 32299 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 06:49:43.996322 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.996333 32299 net.cpp:165] Memory required for data: 370177500\nI0821 06:49:43.996345 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:49:43.996357 32299 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 06:49:43.996364 32299 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 06:49:43.996372 32299 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 06:49:43.996433 32299 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 06:49:43.996589 32299 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 06:49:43.996603 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.996608 32299 net.cpp:165] Memory required for data: 378369500\nI0821 06:49:43.996618 32299 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 06:49:43.996629 32299 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 06:49:43.996635 32299 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 06:49:43.996644 32299 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 06:49:43.996659 32299 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 06:49:43.996695 32299 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 06:49:43.996706 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.996711 32299 net.cpp:165] Memory required for data: 386561500\nI0821 06:49:43.996716 32299 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 06:49:43.996723 32299 net.cpp:100] Creating Layer L1_b4_relu\nI0821 06:49:43.996729 32299 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 06:49:43.996739 32299 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 06:49:43.996749 32299 net.cpp:150] Setting up L1_b4_relu\nI0821 06:49:43.996757 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.996760 32299 net.cpp:165] Memory required for data: 394753500\nI0821 06:49:43.996765 32299 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:43.996773 32299 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:43.996778 32299 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 06:49:43.996785 32299 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:49:43.996795 32299 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:49:43.996845 32299 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 06:49:43.996856 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.996862 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.996867 32299 net.cpp:165] Memory required for data: 411137500\nI0821 06:49:43.996872 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 06:49:43.996883 32299 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 06:49:43.996889 32299 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 06:49:43.996901 32299 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 06:49:43.997252 32299 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 06:49:43.997267 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.997272 32299 net.cpp:165] Memory required for data: 419329500\nI0821 06:49:43.997318 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 06:49:43.997337 32299 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 06:49:43.997344 32299 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 06:49:43.997356 32299 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 06:49:43.997632 32299 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 06:49:43.997647 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.997651 32299 net.cpp:165] Memory required for data: 427521500\nI0821 06:49:43.997663 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:49:43.997675 32299 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 06:49:43.997681 32299 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 06:49:43.997689 32299 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:49:43.997747 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 06:49:43.997906 32299 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 06:49:43.997920 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.997925 32299 net.cpp:165] Memory required for data: 435713500\nI0821 06:49:43.997933 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 06:49:43.997942 32299 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 06:49:43.997948 32299 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 06:49:43.997958 32299 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 06:49:43.997969 32299 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 06:49:43.997977 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.997982 32299 net.cpp:165] Memory required for data: 443905500\nI0821 06:49:43.997985 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 06:49:43.998006 32299 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 06:49:43.998013 32299 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 06:49:43.998021 32299 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 06:49:43.998410 32299 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 06:49:43.998426 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.998431 32299 net.cpp:165] Memory required for data: 452097500\nI0821 06:49:43.998441 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 06:49:43.998453 32299 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 06:49:43.998461 32299 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 06:49:43.998469 32299 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 06:49:43.998744 32299 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 06:49:43.998759 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.998764 32299 net.cpp:165] Memory required for data: 460289500\nI0821 06:49:43.998773 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:49:43.998785 32299 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 06:49:43.998792 32299 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 06:49:43.998800 32299 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 06:49:43.998862 32299 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 06:49:43.999049 32299 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 06:49:43.999063 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.999068 32299 net.cpp:165] Memory required for data: 468481500\nI0821 06:49:43.999078 32299 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 06:49:43.999088 32299 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 06:49:43.999094 32299 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 06:49:43.999100 32299 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 06:49:43.999112 32299 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 06:49:43.999148 32299 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 06:49:43.999163 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.999168 32299 net.cpp:165] Memory required for data: 476673500\nI0821 06:49:43.999174 32299 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 06:49:43.999181 32299 net.cpp:100] Creating Layer L1_b5_relu\nI0821 06:49:43.999187 32299 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 06:49:43.999194 32299 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 06:49:43.999204 32299 net.cpp:150] Setting up L1_b5_relu\nI0821 06:49:43.999212 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.999217 32299 net.cpp:165] Memory required for data: 484865500\nI0821 06:49:43.999222 32299 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:43.999231 32299 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:43.999238 32299 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 06:49:43.999244 32299 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:49:43.999254 32299 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:49:43.999306 32299 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 06:49:43.999318 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.999325 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.999336 32299 net.cpp:165] Memory required for data: 501249500\nI0821 06:49:43.999342 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 06:49:43.999353 32299 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 06:49:43.999359 32299 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 06:49:43.999372 32299 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 06:49:43.999737 32299 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 06:49:43.999750 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:43.999763 32299 net.cpp:165] Memory required for data: 509441500\nI0821 06:49:43.999773 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 06:49:43.999781 32299 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 06:49:43.999788 32299 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 06:49:43.999796 32299 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 06:49:44.000073 32299 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 06:49:44.000087 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.000092 32299 net.cpp:165] Memory required for data: 517633500\nI0821 06:49:44.000103 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:49:44.000115 32299 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 06:49:44.000121 32299 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 06:49:44.000129 32299 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:49:44.000192 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 06:49:44.000361 32299 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 06:49:44.000375 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.000380 32299 net.cpp:165] Memory required for data: 525825500\nI0821 06:49:44.000389 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 06:49:44.000398 32299 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 06:49:44.000404 32299 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 06:49:44.000416 32299 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 06:49:44.000427 32299 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 06:49:44.000434 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.000439 32299 net.cpp:165] Memory required for data: 534017500\nI0821 06:49:44.000444 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 06:49:44.000458 32299 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 06:49:44.000463 32299 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 06:49:44.000473 32299 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 06:49:44.000826 32299 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 06:49:44.000841 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.000846 32299 net.cpp:165] Memory required for data: 542209500\nI0821 06:49:44.000855 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 06:49:44.000867 32299 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 06:49:44.000874 32299 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 06:49:44.000882 32299 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 06:49:44.001159 32299 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 06:49:44.001173 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.001178 32299 net.cpp:165] Memory required for data: 550401500\nI0821 06:49:44.001189 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:49:44.001199 32299 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 06:49:44.001204 32299 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 06:49:44.001215 32299 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 06:49:44.001273 32299 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 06:49:44.001441 32299 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 06:49:44.001454 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.001459 32299 net.cpp:165] Memory required for data: 558593500\nI0821 06:49:44.001469 32299 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 06:49:44.001487 32299 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 06:49:44.001494 32299 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 06:49:44.001502 32299 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 06:49:44.001514 32299 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 06:49:44.001549 32299 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 06:49:44.001559 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.001564 32299 net.cpp:165] Memory required for data: 566785500\nI0821 06:49:44.001577 32299 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 06:49:44.001585 32299 net.cpp:100] Creating Layer L1_b6_relu\nI0821 06:49:44.001591 32299 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 06:49:44.001602 32299 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 06:49:44.001612 32299 net.cpp:150] Setting up L1_b6_relu\nI0821 06:49:44.001619 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.001624 32299 net.cpp:165] Memory required for data: 574977500\nI0821 06:49:44.001628 32299 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:44.001636 32299 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:44.001641 32299 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 06:49:44.001648 32299 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:49:44.001658 32299 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:49:44.001710 32299 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 06:49:44.001723 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.001729 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.001734 32299 net.cpp:165] Memory required for data: 591361500\nI0821 06:49:44.001739 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 06:49:44.001752 32299 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 06:49:44.001760 32299 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 06:49:44.001768 32299 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 06:49:44.002126 32299 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 06:49:44.002141 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.002146 32299 net.cpp:165] Memory required for data: 599553500\nI0821 06:49:44.002156 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 06:49:44.002164 32299 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 06:49:44.002171 32299 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 06:49:44.002182 32299 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 06:49:44.002472 32299 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 06:49:44.002490 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.002495 32299 net.cpp:165] Memory required for data: 607745500\nI0821 06:49:44.002506 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:49:44.002514 32299 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 06:49:44.002521 32299 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 06:49:44.002528 32299 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:49:44.002588 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 06:49:44.002748 32299 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 06:49:44.002761 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.002766 32299 net.cpp:165] Memory required for data: 615937500\nI0821 06:49:44.002775 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 06:49:44.002787 32299 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 06:49:44.002794 32299 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 06:49:44.002801 32299 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 06:49:44.002811 32299 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 06:49:44.002821 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.002825 32299 net.cpp:165] Memory required for data: 624129500\nI0821 06:49:44.002830 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 06:49:44.002841 32299 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 06:49:44.002847 32299 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 06:49:44.002858 32299 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 06:49:44.003239 32299 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 06:49:44.003254 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.003265 32299 net.cpp:165] Memory required for data: 632321500\nI0821 06:49:44.003275 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 06:49:44.003285 32299 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 06:49:44.003291 32299 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 06:49:44.003304 32299 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 06:49:44.003588 32299 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 06:49:44.003603 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.003608 32299 net.cpp:165] Memory required for data: 640513500\nI0821 06:49:44.003619 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:49:44.003631 32299 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 06:49:44.003638 32299 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 06:49:44.003645 32299 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 06:49:44.003705 32299 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 06:49:44.003895 32299 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 06:49:44.003908 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.003913 32299 net.cpp:165] Memory required for data: 648705500\nI0821 06:49:44.003923 32299 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 06:49:44.003935 32299 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 06:49:44.003942 32299 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 06:49:44.003949 32299 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 06:49:44.003957 32299 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 06:49:44.003994 32299 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 06:49:44.004006 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.004011 32299 net.cpp:165] Memory required for data: 656897500\nI0821 06:49:44.004017 32299 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 06:49:44.004025 32299 net.cpp:100] Creating Layer L1_b7_relu\nI0821 06:49:44.004031 32299 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 06:49:44.004041 32299 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 06:49:44.004051 32299 net.cpp:150] Setting up L1_b7_relu\nI0821 06:49:44.004060 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.004063 32299 net.cpp:165] Memory required for data: 665089500\nI0821 06:49:44.004068 32299 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:44.004076 32299 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:44.004081 32299 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 06:49:44.004088 32299 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:49:44.004098 32299 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:49:44.004148 32299 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 06:49:44.004160 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.004168 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.004171 32299 net.cpp:165] Memory required for data: 681473500\nI0821 06:49:44.004176 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 06:49:44.004189 32299 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 06:49:44.004195 32299 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 06:49:44.004206 32299 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 06:49:44.004571 32299 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 06:49:44.004586 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.004591 32299 net.cpp:165] Memory required for data: 689665500\nI0821 06:49:44.004601 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 06:49:44.004611 32299 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 06:49:44.004617 32299 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 06:49:44.004636 32299 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 06:49:44.004917 32299 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 06:49:44.004933 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.004938 32299 net.cpp:165] Memory required for data: 697857500\nI0821 06:49:44.004950 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:49:44.004958 32299 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 06:49:44.004964 32299 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 06:49:44.004972 32299 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:49:44.005030 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 06:49:44.005195 32299 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 06:49:44.005208 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.005213 32299 net.cpp:165] Memory required for data: 706049500\nI0821 06:49:44.005223 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 06:49:44.005234 32299 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 06:49:44.005240 32299 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 06:49:44.005249 32299 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 06:49:44.005259 32299 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 06:49:44.005265 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.005270 32299 net.cpp:165] Memory required for data: 714241500\nI0821 06:49:44.005275 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 06:49:44.005290 32299 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 06:49:44.005295 32299 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 06:49:44.005308 32299 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 06:49:44.005677 32299 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 06:49:44.005692 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.005697 32299 net.cpp:165] Memory required for data: 722433500\nI0821 06:49:44.005707 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 06:49:44.005717 32299 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 06:49:44.005722 32299 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 06:49:44.005733 32299 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 06:49:44.006011 32299 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 06:49:44.006024 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.006031 32299 net.cpp:165] Memory required for data: 730625500\nI0821 06:49:44.006041 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:49:44.006052 32299 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 06:49:44.006058 32299 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 06:49:44.006067 32299 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 06:49:44.006125 32299 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 06:49:44.006285 32299 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 06:49:44.006299 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.006304 32299 net.cpp:165] Memory required for data: 738817500\nI0821 06:49:44.006312 32299 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 06:49:44.006325 32299 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 06:49:44.006337 32299 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 06:49:44.006345 32299 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 06:49:44.006353 32299 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 06:49:44.006392 32299 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 06:49:44.006404 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.006409 32299 net.cpp:165] Memory required for data: 747009500\nI0821 06:49:44.006414 32299 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 06:49:44.006422 32299 net.cpp:100] Creating Layer L1_b8_relu\nI0821 06:49:44.006428 32299 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 06:49:44.006438 32299 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 06:49:44.006455 32299 net.cpp:150] Setting up L1_b8_relu\nI0821 06:49:44.006463 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.006467 32299 net.cpp:165] Memory required for data: 755201500\nI0821 06:49:44.006472 32299 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:44.006479 32299 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:44.006484 32299 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 06:49:44.006492 32299 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:49:44.006502 32299 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:49:44.006553 32299 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 06:49:44.006566 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.006572 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.006577 32299 net.cpp:165] Memory required for data: 771585500\nI0821 06:49:44.006582 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 06:49:44.006592 32299 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 06:49:44.006599 32299 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 06:49:44.006610 32299 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 06:49:44.006974 32299 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 06:49:44.006989 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.006994 32299 net.cpp:165] Memory required for data: 779777500\nI0821 06:49:44.007004 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 06:49:44.007019 32299 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 06:49:44.007025 32299 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 06:49:44.007036 32299 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 06:49:44.007313 32299 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 06:49:44.007326 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.007338 32299 net.cpp:165] Memory required for data: 787969500\nI0821 06:49:44.007349 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:49:44.007359 32299 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 06:49:44.007364 32299 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 06:49:44.007372 32299 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:49:44.007434 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 06:49:44.007593 32299 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 06:49:44.007606 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.007611 32299 net.cpp:165] Memory required for data: 796161500\nI0821 06:49:44.007622 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 06:49:44.007632 32299 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 06:49:44.007638 32299 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 06:49:44.007645 32299 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 06:49:44.007657 32299 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 06:49:44.007663 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.007668 32299 net.cpp:165] Memory required for data: 804353500\nI0821 06:49:44.007673 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 06:49:44.007686 32299 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 06:49:44.007692 32299 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 06:49:44.007705 32299 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 06:49:44.008062 32299 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 06:49:44.008076 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.008081 32299 net.cpp:165] Memory required for data: 812545500\nI0821 06:49:44.008091 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 06:49:44.008103 32299 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 06:49:44.008111 32299 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 06:49:44.008126 32299 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 06:49:44.008409 32299 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 06:49:44.008424 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.008430 32299 net.cpp:165] Memory required for data: 820737500\nI0821 06:49:44.008461 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:49:44.008471 32299 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 06:49:44.008477 32299 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 06:49:44.008491 32299 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 06:49:44.008548 32299 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 06:49:44.008710 32299 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 06:49:44.008723 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.008728 32299 net.cpp:165] Memory required for data: 828929500\nI0821 06:49:44.008738 32299 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 06:49:44.008750 32299 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 06:49:44.008756 32299 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 06:49:44.008764 32299 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 06:49:44.008772 32299 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 06:49:44.008807 32299 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 06:49:44.008818 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.008823 32299 net.cpp:165] Memory required for data: 837121500\nI0821 06:49:44.008828 32299 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 06:49:44.008836 32299 net.cpp:100] Creating Layer L1_b9_relu\nI0821 06:49:44.008842 32299 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 06:49:44.008852 32299 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 06:49:44.008862 32299 net.cpp:150] Setting up L1_b9_relu\nI0821 06:49:44.008870 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.008874 32299 net.cpp:165] Memory required for data: 845313500\nI0821 06:49:44.008879 32299 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:44.008889 32299 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:44.008894 32299 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 06:49:44.008903 32299 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:49:44.008913 32299 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:49:44.008965 32299 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 06:49:44.008976 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.008982 32299 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 06:49:44.008987 32299 net.cpp:165] Memory required for data: 861697500\nI0821 06:49:44.008992 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 06:49:44.009006 32299 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 06:49:44.009012 32299 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 06:49:44.009021 32299 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 06:49:44.009388 32299 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 06:49:44.009402 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.009407 32299 net.cpp:165] Memory required for data: 863745500\nI0821 06:49:44.009418 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 06:49:44.009429 32299 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 06:49:44.009436 32299 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 06:49:44.009444 32299 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 06:49:44.009713 32299 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 06:49:44.009729 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.009734 32299 net.cpp:165] Memory required for data: 865793500\nI0821 06:49:44.009745 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:49:44.009760 32299 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 06:49:44.009768 32299 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 06:49:44.009775 32299 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:49:44.009834 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 06:49:44.009994 32299 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 06:49:44.010007 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.010012 32299 net.cpp:165] Memory required for data: 867841500\nI0821 06:49:44.010022 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 06:49:44.010031 32299 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 06:49:44.010037 32299 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 06:49:44.010047 32299 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 06:49:44.010058 32299 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 06:49:44.010066 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.010069 32299 net.cpp:165] Memory required for data: 869889500\nI0821 06:49:44.010074 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 06:49:44.010090 32299 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 06:49:44.010097 32299 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 06:49:44.010105 32299 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 06:49:44.010462 32299 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 06:49:44.010476 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.010481 32299 net.cpp:165] Memory required for data: 871937500\nI0821 06:49:44.010490 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 06:49:44.010500 32299 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 06:49:44.010506 32299 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 06:49:44.010520 32299 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 06:49:44.010787 32299 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 06:49:44.010800 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.010805 32299 net.cpp:165] Memory required for data: 873985500\nI0821 06:49:44.010817 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:49:44.010828 32299 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 06:49:44.010834 32299 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 06:49:44.010843 32299 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 06:49:44.010901 32299 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 06:49:44.011059 32299 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 06:49:44.011070 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.011075 32299 net.cpp:165] Memory required for data: 876033500\nI0821 06:49:44.011085 32299 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 06:49:44.011097 32299 net.cpp:100] Creating Layer L2_b1_pool\nI0821 06:49:44.011104 32299 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 06:49:44.011116 32299 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 06:49:44.011147 32299 net.cpp:150] Setting up L2_b1_pool\nI0821 06:49:44.011157 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.011160 32299 net.cpp:165] Memory required for data: 878081500\nI0821 06:49:44.011165 32299 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 06:49:44.011174 32299 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 06:49:44.011180 32299 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 06:49:44.011188 32299 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 06:49:44.011198 32299 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 06:49:44.011232 32299 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 06:49:44.011241 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.011246 32299 net.cpp:165] Memory required for data: 880129500\nI0821 06:49:44.011251 32299 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 06:49:44.011260 32299 net.cpp:100] Creating Layer L2_b1_relu\nI0821 06:49:44.011272 32299 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 06:49:44.011282 32299 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 06:49:44.011293 32299 net.cpp:150] Setting up L2_b1_relu\nI0821 06:49:44.011301 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.011304 32299 net.cpp:165] Memory required for data: 882177500\nI0821 06:49:44.011309 32299 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 06:49:44.011318 32299 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 06:49:44.011327 32299 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 06:49:44.013589 32299 net.cpp:150] Setting up L2_b1_zeros\nI0821 06:49:44.013607 32299 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 06:49:44.013613 32299 net.cpp:165] Memory required for data: 884225500\nI0821 06:49:44.013619 32299 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 06:49:44.013629 32299 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 06:49:44.013635 32299 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 06:49:44.013646 32299 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 06:49:44.013654 32299 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 06:49:44.013701 32299 net.cpp:150] Setting up L2_b1_concat0\nI0821 06:49:44.013713 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.013718 32299 net.cpp:165] Memory required for data: 888321500\nI0821 06:49:44.013725 32299 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:44.013731 32299 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:44.013737 32299 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 06:49:44.013751 32299 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:49:44.013761 32299 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:49:44.013811 32299 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 06:49:44.013826 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.013833 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.013839 32299 net.cpp:165] Memory required for data: 896513500\nI0821 06:49:44.013844 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 06:49:44.013855 32299 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 06:49:44.013861 32299 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 06:49:44.013870 32299 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 06:49:44.014384 32299 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 06:49:44.014398 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.014403 32299 net.cpp:165] Memory required for data: 900609500\nI0821 06:49:44.014413 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 06:49:44.014426 32299 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 06:49:44.014432 32299 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 06:49:44.014441 32299 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 06:49:44.014717 32299 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 06:49:44.014730 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.014735 32299 net.cpp:165] Memory required for data: 904705500\nI0821 06:49:44.014746 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:49:44.014756 32299 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 06:49:44.014762 32299 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 06:49:44.014773 32299 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:49:44.014832 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 06:49:44.014991 32299 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 06:49:44.015003 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.015008 32299 net.cpp:165] Memory required for data: 908801500\nI0821 06:49:44.015017 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 06:49:44.015027 32299 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 06:49:44.015040 32299 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 06:49:44.015051 32299 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 06:49:44.015061 32299 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 06:49:44.015069 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.015074 32299 net.cpp:165] Memory required for data: 912897500\nI0821 06:49:44.015079 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 06:49:44.015092 32299 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 06:49:44.015099 32299 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 06:49:44.015107 32299 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 06:49:44.015614 32299 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 06:49:44.015628 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.015635 32299 net.cpp:165] Memory required for data: 916993500\nI0821 06:49:44.015643 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 06:49:44.015656 32299 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 06:49:44.015663 32299 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 06:49:44.015671 32299 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 06:49:44.015939 32299 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 06:49:44.015952 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.015957 32299 net.cpp:165] Memory required for data: 921089500\nI0821 06:49:44.015969 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:49:44.015977 32299 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 06:49:44.015983 32299 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 06:49:44.015991 32299 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 06:49:44.016053 32299 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 06:49:44.016209 32299 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 06:49:44.016225 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.016230 32299 net.cpp:165] Memory required for data: 925185500\nI0821 06:49:44.016240 32299 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 06:49:44.016249 32299 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 06:49:44.016255 32299 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 06:49:44.016263 32299 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 06:49:44.016271 32299 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 06:49:44.016304 32299 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 06:49:44.016315 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.016320 32299 net.cpp:165] Memory required for data: 929281500\nI0821 06:49:44.016325 32299 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 06:49:44.016340 32299 net.cpp:100] Creating Layer L2_b2_relu\nI0821 06:49:44.016345 32299 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 06:49:44.016356 32299 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 06:49:44.016366 32299 net.cpp:150] Setting up L2_b2_relu\nI0821 06:49:44.016374 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.016378 32299 net.cpp:165] Memory required for data: 933377500\nI0821 06:49:44.016383 32299 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:44.016391 32299 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:44.016396 32299 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 06:49:44.016407 32299 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:49:44.016417 32299 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:49:44.016465 32299 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 06:49:44.016476 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.016484 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.016487 32299 net.cpp:165] Memory required for data: 941569500\nI0821 06:49:44.016499 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 06:49:44.016515 32299 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 06:49:44.016521 32299 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 06:49:44.016531 32299 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 06:49:44.017032 32299 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 06:49:44.017047 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.017052 32299 net.cpp:165] Memory required for data: 945665500\nI0821 06:49:44.017061 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 06:49:44.017073 32299 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 06:49:44.017081 32299 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 06:49:44.017089 32299 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 06:49:44.017361 32299 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 06:49:44.017375 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.017380 32299 net.cpp:165] Memory required for data: 949761500\nI0821 06:49:44.017392 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:49:44.017401 32299 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 06:49:44.017407 32299 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 06:49:44.017418 32299 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:49:44.017475 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 06:49:44.017637 32299 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 06:49:44.017650 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.017655 32299 net.cpp:165] Memory required for data: 953857500\nI0821 06:49:44.017664 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 06:49:44.017673 32299 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 06:49:44.017679 32299 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 06:49:44.017686 32299 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 06:49:44.017699 32299 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 06:49:44.017707 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.017711 32299 net.cpp:165] Memory required for data: 957953500\nI0821 06:49:44.017716 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 06:49:44.017727 32299 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 06:49:44.017736 32299 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 06:49:44.017745 32299 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 06:49:44.018235 32299 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 06:49:44.018250 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.018255 32299 net.cpp:165] Memory required for data: 962049500\nI0821 06:49:44.018265 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 06:49:44.018277 32299 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 06:49:44.018283 32299 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 06:49:44.018295 32299 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 06:49:44.018568 32299 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 06:49:44.018581 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.018586 32299 net.cpp:165] Memory required for data: 966145500\nI0821 06:49:44.018597 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:49:44.018606 32299 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 06:49:44.018613 32299 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 06:49:44.018621 32299 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 06:49:44.018682 32299 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 06:49:44.018841 32299 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 06:49:44.018857 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.018862 32299 net.cpp:165] Memory required for data: 970241500\nI0821 06:49:44.018870 32299 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 06:49:44.018880 32299 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 06:49:44.018893 32299 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 06:49:44.018901 32299 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 06:49:44.018909 32299 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 06:49:44.018941 32299 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 06:49:44.018951 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.018956 32299 net.cpp:165] Memory required for data: 974337500\nI0821 06:49:44.018961 32299 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 06:49:44.018985 32299 net.cpp:100] Creating Layer L2_b3_relu\nI0821 06:49:44.018990 32299 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 06:49:44.018998 32299 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 06:49:44.019008 32299 net.cpp:150] Setting up L2_b3_relu\nI0821 06:49:44.019016 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.019021 32299 net.cpp:165] Memory required for data: 978433500\nI0821 06:49:44.019026 32299 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:44.019033 32299 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:44.019038 32299 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 06:49:44.019047 32299 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:49:44.019057 32299 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:49:44.019109 32299 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 06:49:44.019121 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.019127 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.019132 32299 net.cpp:165] Memory required for data: 986625500\nI0821 06:49:44.019137 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 06:49:44.019151 32299 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 06:49:44.019157 32299 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 06:49:44.019170 32299 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 06:49:44.019681 32299 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 06:49:44.019696 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.019701 32299 net.cpp:165] Memory required for data: 990721500\nI0821 06:49:44.019711 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 06:49:44.019723 32299 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 06:49:44.019729 32299 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 06:49:44.019742 32299 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 06:49:44.020014 32299 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 06:49:44.020026 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.020031 32299 net.cpp:165] Memory required for data: 994817500\nI0821 06:49:44.020042 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:49:44.020051 32299 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 06:49:44.020057 32299 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 06:49:44.020066 32299 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:49:44.020125 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 06:49:44.020284 32299 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 06:49:44.020297 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.020301 32299 net.cpp:165] Memory required for data: 998913500\nI0821 06:49:44.020311 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 06:49:44.020319 32299 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 06:49:44.020325 32299 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 06:49:44.020344 32299 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 06:49:44.020354 32299 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 06:49:44.020362 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.020366 32299 net.cpp:165] Memory required for data: 1003009500\nI0821 06:49:44.020380 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 06:49:44.020395 32299 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 06:49:44.020401 32299 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 06:49:44.020409 32299 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 06:49:44.020905 32299 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 06:49:44.020918 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.020923 32299 net.cpp:165] Memory required for data: 1007105500\nI0821 06:49:44.020933 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 06:49:44.020947 32299 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 06:49:44.020954 32299 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 06:49:44.020962 32299 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 06:49:44.021232 32299 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 06:49:44.021250 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.021255 32299 net.cpp:165] Memory required for data: 1011201500\nI0821 06:49:44.021266 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:49:44.021275 32299 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 06:49:44.021281 32299 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 06:49:44.021289 32299 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 06:49:44.021353 32299 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 06:49:44.021518 32299 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 06:49:44.021531 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.021536 32299 net.cpp:165] Memory required for data: 1015297500\nI0821 06:49:44.021545 32299 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 06:49:44.021554 32299 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 06:49:44.021560 32299 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 06:49:44.021569 32299 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 06:49:44.021579 32299 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 06:49:44.021607 32299 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 06:49:44.021617 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.021622 32299 net.cpp:165] Memory required for data: 1019393500\nI0821 06:49:44.021627 32299 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 06:49:44.021638 32299 net.cpp:100] Creating Layer L2_b4_relu\nI0821 06:49:44.021644 32299 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 06:49:44.021651 32299 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 06:49:44.021661 32299 net.cpp:150] Setting up L2_b4_relu\nI0821 06:49:44.021669 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.021674 32299 net.cpp:165] Memory required for data: 1023489500\nI0821 06:49:44.021678 32299 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:44.021687 32299 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:44.021692 32299 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 06:49:44.021699 32299 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:49:44.021709 32299 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:49:44.021760 32299 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 06:49:44.021772 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.021778 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.021783 32299 net.cpp:165] Memory required for data: 1031681500\nI0821 06:49:44.021788 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 06:49:44.021802 32299 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 06:49:44.021809 32299 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 06:49:44.021818 32299 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 06:49:44.022322 32299 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 06:49:44.022342 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.022347 32299 net.cpp:165] Memory required for data: 1035777500\nI0821 06:49:44.022357 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 06:49:44.022369 32299 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 06:49:44.022377 32299 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 06:49:44.022385 32299 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 06:49:44.022655 32299 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 06:49:44.022670 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.022675 32299 net.cpp:165] Memory required for data: 1039873500\nI0821 06:49:44.022686 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:49:44.022694 32299 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 06:49:44.022701 32299 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 06:49:44.022708 32299 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:49:44.022766 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 06:49:44.022934 32299 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 06:49:44.022948 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.022953 32299 net.cpp:165] Memory required for data: 1043969500\nI0821 06:49:44.022963 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 06:49:44.022970 32299 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 06:49:44.022977 32299 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 06:49:44.022987 32299 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 06:49:44.022999 32299 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 06:49:44.023005 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.023010 32299 net.cpp:165] Memory required for data: 1048065500\nI0821 06:49:44.023015 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 06:49:44.023028 32299 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 06:49:44.023036 32299 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 06:49:44.023043 32299 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 06:49:44.023545 32299 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 06:49:44.023560 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.023564 32299 net.cpp:165] Memory required for data: 1052161500\nI0821 06:49:44.023573 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 06:49:44.023586 32299 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 06:49:44.023592 32299 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 06:49:44.023602 32299 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 06:49:44.023871 32299 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 06:49:44.023885 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.023890 32299 net.cpp:165] Memory required for data: 1056257500\nI0821 06:49:44.023901 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:49:44.023913 32299 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 06:49:44.023919 32299 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 06:49:44.023927 32299 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 06:49:44.023985 32299 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 06:49:44.024143 32299 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 06:49:44.024157 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.024161 32299 net.cpp:165] Memory required for data: 1060353500\nI0821 06:49:44.024170 32299 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 06:49:44.024183 32299 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 06:49:44.024189 32299 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 06:49:44.024197 32299 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 06:49:44.024207 32299 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 06:49:44.024236 32299 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 06:49:44.024253 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.024258 32299 net.cpp:165] Memory required for data: 1064449500\nI0821 06:49:44.024263 32299 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 06:49:44.024271 32299 net.cpp:100] Creating Layer L2_b5_relu\nI0821 06:49:44.024277 32299 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 06:49:44.024288 32299 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 06:49:44.024298 32299 net.cpp:150] Setting up L2_b5_relu\nI0821 06:49:44.024305 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.024310 32299 net.cpp:165] Memory required for data: 1068545500\nI0821 06:49:44.024315 32299 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:44.024322 32299 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:44.024333 32299 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 06:49:44.024343 32299 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:49:44.024353 32299 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:49:44.024405 32299 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 06:49:44.024417 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.024423 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.024428 32299 net.cpp:165] Memory required for data: 1076737500\nI0821 06:49:44.024433 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 06:49:44.024444 32299 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 06:49:44.024451 32299 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 06:49:44.024462 32299 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 06:49:44.024963 32299 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 06:49:44.024978 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.024983 32299 net.cpp:165] Memory required for data: 1080833500\nI0821 06:49:44.024992 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 06:49:44.025004 32299 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 06:49:44.025012 32299 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 06:49:44.025019 32299 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 06:49:44.025288 32299 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 06:49:44.025305 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.025310 32299 net.cpp:165] Memory required for data: 1084929500\nI0821 06:49:44.025321 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:49:44.025336 32299 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 06:49:44.025342 32299 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 06:49:44.025351 32299 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:49:44.025409 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 06:49:44.025573 32299 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 06:49:44.025585 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.025590 32299 net.cpp:165] Memory required for data: 1089025500\nI0821 06:49:44.025599 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 06:49:44.025609 32299 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 06:49:44.025614 32299 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 06:49:44.025624 32299 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 06:49:44.025635 32299 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 06:49:44.025642 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.025647 32299 net.cpp:165] Memory required for data: 1093121500\nI0821 06:49:44.025651 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 06:49:44.025662 32299 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 06:49:44.025668 32299 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 06:49:44.025679 32299 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 06:49:44.026176 32299 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 06:49:44.026190 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.026196 32299 net.cpp:165] Memory required for data: 1097217500\nI0821 06:49:44.026206 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 06:49:44.026214 32299 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 06:49:44.026221 32299 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 06:49:44.026232 32299 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 06:49:44.026525 32299 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 06:49:44.026540 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.026546 32299 net.cpp:165] Memory required for data: 1101313500\nI0821 06:49:44.026557 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:49:44.026572 32299 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 06:49:44.026579 32299 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 06:49:44.026587 32299 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 06:49:44.026646 32299 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 06:49:44.026803 32299 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 06:49:44.026816 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.026821 32299 net.cpp:165] Memory required for data: 1105409500\nI0821 06:49:44.026831 32299 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 06:49:44.026844 32299 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 06:49:44.026850 32299 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 06:49:44.026857 32299 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 06:49:44.026868 32299 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 06:49:44.026897 32299 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 06:49:44.026907 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.026911 32299 net.cpp:165] Memory required for data: 1109505500\nI0821 06:49:44.026917 32299 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 06:49:44.026924 32299 net.cpp:100] Creating Layer L2_b6_relu\nI0821 06:49:44.026931 32299 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 06:49:44.026942 32299 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 06:49:44.026952 32299 net.cpp:150] Setting up L2_b6_relu\nI0821 06:49:44.026958 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.026962 32299 net.cpp:165] Memory required for data: 1113601500\nI0821 06:49:44.026968 32299 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:44.026975 32299 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:44.026980 32299 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 06:49:44.026988 32299 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:49:44.026998 32299 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:49:44.027050 32299 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 06:49:44.027061 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.027068 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.027073 32299 net.cpp:165] Memory required for data: 1121793500\nI0821 06:49:44.027078 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 06:49:44.027089 32299 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 06:49:44.027096 32299 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 06:49:44.027108 32299 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 06:49:44.028590 32299 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 06:49:44.028607 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.028612 32299 net.cpp:165] Memory required for data: 1125889500\nI0821 06:49:44.028622 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 06:49:44.028643 32299 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 06:49:44.028650 32299 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 06:49:44.028659 32299 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 06:49:44.028934 32299 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 06:49:44.028947 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.028952 32299 net.cpp:165] Memory required for data: 1129985500\nI0821 06:49:44.028964 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:49:44.028973 32299 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 06:49:44.028980 32299 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 06:49:44.028990 32299 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:49:44.029052 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 06:49:44.029213 32299 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 06:49:44.029227 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.029232 32299 net.cpp:165] Memory required for data: 1134081500\nI0821 06:49:44.029242 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 06:49:44.029249 32299 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 06:49:44.029256 32299 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 06:49:44.029266 32299 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 06:49:44.029278 32299 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 06:49:44.029284 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.029289 32299 net.cpp:165] Memory required for data: 1138177500\nI0821 06:49:44.029294 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 06:49:44.029309 32299 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 06:49:44.029314 32299 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 06:49:44.029323 32299 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 06:49:44.029819 32299 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 06:49:44.029834 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.029839 32299 net.cpp:165] Memory required for data: 1142273500\nI0821 06:49:44.029848 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 06:49:44.029861 32299 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 06:49:44.029867 32299 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 06:49:44.029876 32299 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 06:49:44.030143 32299 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 06:49:44.030156 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.030161 32299 net.cpp:165] Memory required for data: 1146369500\nI0821 06:49:44.030172 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:49:44.030181 32299 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 06:49:44.030187 32299 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 06:49:44.030195 32299 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 06:49:44.030256 32299 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 06:49:44.030421 32299 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 06:49:44.030437 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.030442 32299 net.cpp:165] Memory required for data: 1150465500\nI0821 06:49:44.030452 32299 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 06:49:44.030462 32299 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 06:49:44.030468 32299 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 06:49:44.030475 32299 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 06:49:44.030486 32299 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 06:49:44.030516 32299 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 06:49:44.030526 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.030530 32299 net.cpp:165] Memory required for data: 1154561500\nI0821 06:49:44.030536 32299 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 06:49:44.030544 32299 net.cpp:100] Creating Layer L2_b7_relu\nI0821 06:49:44.030557 32299 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 06:49:44.030570 32299 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 06:49:44.030580 32299 net.cpp:150] Setting up L2_b7_relu\nI0821 06:49:44.030587 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.030592 32299 net.cpp:165] Memory required for data: 1158657500\nI0821 06:49:44.030597 32299 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:44.030604 32299 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:44.030611 32299 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 06:49:44.030620 32299 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:49:44.030632 32299 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:49:44.030680 32299 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 06:49:44.030692 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.030699 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.030704 32299 net.cpp:165] Memory required for data: 1166849500\nI0821 06:49:44.030709 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 06:49:44.030724 32299 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 06:49:44.030730 32299 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 06:49:44.030740 32299 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 06:49:44.031231 32299 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 06:49:44.031245 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.031250 32299 net.cpp:165] Memory required for data: 1170945500\nI0821 06:49:44.031260 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 06:49:44.031272 32299 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 06:49:44.031278 32299 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 06:49:44.031287 32299 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 06:49:44.031565 32299 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 06:49:44.031579 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.031584 32299 net.cpp:165] Memory required for data: 1175041500\nI0821 06:49:44.031595 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:49:44.031605 32299 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 06:49:44.031611 32299 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 06:49:44.031621 32299 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:49:44.031680 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 06:49:44.031839 32299 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 06:49:44.031852 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.031857 32299 net.cpp:165] Memory required for data: 1179137500\nI0821 06:49:44.031867 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 06:49:44.031875 32299 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 06:49:44.031882 32299 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 06:49:44.031893 32299 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 06:49:44.031903 32299 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 06:49:44.031910 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.031914 32299 net.cpp:165] Memory required for data: 1183233500\nI0821 06:49:44.031919 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 06:49:44.031934 32299 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 06:49:44.031939 32299 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 06:49:44.031949 32299 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 06:49:44.032444 32299 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 06:49:44.032459 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.032464 32299 net.cpp:165] Memory required for data: 1187329500\nI0821 06:49:44.032472 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 06:49:44.032490 32299 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 06:49:44.032496 32299 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 06:49:44.032507 32299 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 06:49:44.032781 32299 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 06:49:44.032795 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.032800 32299 net.cpp:165] Memory required for data: 1191425500\nI0821 06:49:44.032811 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:49:44.032820 32299 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 06:49:44.032826 32299 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 06:49:44.032835 32299 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 06:49:44.032896 32299 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 06:49:44.033054 32299 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 06:49:44.033069 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.033076 32299 net.cpp:165] Memory required for data: 1195521500\nI0821 06:49:44.033084 32299 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 06:49:44.033093 32299 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 06:49:44.033100 32299 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 06:49:44.033107 32299 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 06:49:44.033116 32299 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 06:49:44.033148 32299 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 06:49:44.033157 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.033162 32299 net.cpp:165] Memory required for data: 1199617500\nI0821 06:49:44.033167 32299 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 06:49:44.033175 32299 net.cpp:100] Creating Layer L2_b8_relu\nI0821 06:49:44.033181 32299 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 06:49:44.033191 32299 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 06:49:44.033201 32299 net.cpp:150] Setting up L2_b8_relu\nI0821 06:49:44.033208 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.033213 32299 net.cpp:165] Memory required for data: 1203713500\nI0821 06:49:44.033218 32299 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:44.033226 32299 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:44.033231 32299 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 06:49:44.033241 32299 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:49:44.033265 32299 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:49:44.033316 32299 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 06:49:44.033334 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.033342 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.033346 32299 net.cpp:165] Memory required for data: 1211905500\nI0821 06:49:44.033352 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 06:49:44.033367 32299 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 06:49:44.033375 32299 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 06:49:44.033386 32299 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 06:49:44.033884 32299 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 06:49:44.033897 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.033902 32299 net.cpp:165] Memory required for data: 1216001500\nI0821 06:49:44.033911 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 06:49:44.033924 32299 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 06:49:44.033931 32299 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 06:49:44.033941 32299 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 06:49:44.034216 32299 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 06:49:44.034236 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.034242 32299 net.cpp:165] Memory required for data: 1220097500\nI0821 06:49:44.034253 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:49:44.034262 32299 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 06:49:44.034268 32299 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 06:49:44.034277 32299 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:49:44.034348 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 06:49:44.034510 32299 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 06:49:44.034525 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.034530 32299 net.cpp:165] Memory required for data: 1224193500\nI0821 06:49:44.034540 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 06:49:44.034549 32299 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 06:49:44.034555 32299 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 06:49:44.034564 32299 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 06:49:44.034574 32299 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 06:49:44.034580 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.034585 32299 net.cpp:165] Memory required for data: 1228289500\nI0821 06:49:44.034590 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 06:49:44.034605 32299 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 06:49:44.034610 32299 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 06:49:44.034622 32299 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 06:49:44.036099 32299 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 06:49:44.036116 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.036121 32299 net.cpp:165] Memory required for data: 1232385500\nI0821 06:49:44.036131 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 06:49:44.036144 32299 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 06:49:44.036151 32299 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 06:49:44.036160 32299 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 06:49:44.036437 32299 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 06:49:44.036451 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.036456 32299 net.cpp:165] Memory required for data: 1236481500\nI0821 06:49:44.036506 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:49:44.036521 32299 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 06:49:44.036528 32299 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 06:49:44.036536 32299 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 06:49:44.036595 32299 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 06:49:44.036753 32299 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 06:49:44.036765 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.036770 32299 net.cpp:165] Memory required for data: 1240577500\nI0821 06:49:44.036780 32299 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 06:49:44.036790 32299 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 06:49:44.036798 32299 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 06:49:44.036804 32299 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 06:49:44.036815 32299 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 06:49:44.036844 32299 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 06:49:44.036854 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.036859 32299 net.cpp:165] Memory required for data: 1244673500\nI0821 06:49:44.036864 32299 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 06:49:44.036875 32299 net.cpp:100] Creating Layer L2_b9_relu\nI0821 06:49:44.036881 32299 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 06:49:44.036888 32299 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 06:49:44.036898 32299 net.cpp:150] Setting up L2_b9_relu\nI0821 06:49:44.036906 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.036918 32299 net.cpp:165] Memory required for data: 1248769500\nI0821 06:49:44.036923 32299 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:44.036933 32299 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:44.036939 32299 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 06:49:44.036947 32299 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:49:44.036958 32299 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:49:44.037009 32299 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 06:49:44.037021 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.037027 32299 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 06:49:44.037032 32299 net.cpp:165] Memory required for data: 1256961500\nI0821 06:49:44.037037 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 06:49:44.037051 32299 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 06:49:44.037058 32299 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 06:49:44.037067 32299 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 06:49:44.037570 32299 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 06:49:44.037585 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.037590 32299 net.cpp:165] Memory required for data: 1257985500\nI0821 06:49:44.037600 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 06:49:44.037612 32299 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 06:49:44.037619 32299 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 06:49:44.037631 32299 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 06:49:44.037904 32299 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 06:49:44.037916 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.037921 32299 net.cpp:165] Memory required for data: 1259009500\nI0821 06:49:44.037932 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:49:44.037942 32299 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 06:49:44.037948 32299 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 06:49:44.037956 32299 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:49:44.038017 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 06:49:44.038184 32299 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 06:49:44.038197 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.038203 32299 net.cpp:165] Memory required for data: 1260033500\nI0821 06:49:44.038213 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 06:49:44.038220 32299 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 06:49:44.038228 32299 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 06:49:44.038238 32299 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 06:49:44.038247 32299 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 06:49:44.038255 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.038259 32299 net.cpp:165] Memory required for data: 1261057500\nI0821 06:49:44.038264 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 06:49:44.038278 32299 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 06:49:44.038285 32299 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 06:49:44.038295 32299 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 06:49:44.038792 32299 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 06:49:44.038807 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.038812 32299 net.cpp:165] Memory required for data: 1262081500\nI0821 06:49:44.038821 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 06:49:44.038835 32299 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 06:49:44.038841 32299 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 06:49:44.038851 32299 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 06:49:44.039129 32299 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 06:49:44.039149 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.039155 32299 net.cpp:165] Memory required for data: 1263105500\nI0821 06:49:44.039165 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:49:44.039177 32299 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 06:49:44.039185 32299 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 06:49:44.039192 32299 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 06:49:44.039253 32299 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 06:49:44.039427 32299 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 06:49:44.039441 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.039446 32299 net.cpp:165] Memory required for data: 1264129500\nI0821 06:49:44.039456 32299 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 06:49:44.039469 32299 net.cpp:100] Creating Layer L3_b1_pool\nI0821 06:49:44.039476 32299 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 06:49:44.039487 32299 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 06:49:44.039523 32299 net.cpp:150] Setting up L3_b1_pool\nI0821 06:49:44.039535 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.039539 32299 net.cpp:165] Memory required for data: 1265153500\nI0821 06:49:44.039544 32299 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 06:49:44.039558 32299 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 06:49:44.039564 32299 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 06:49:44.039572 32299 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 06:49:44.039579 32299 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 06:49:44.039613 32299 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 06:49:44.039623 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.039628 32299 net.cpp:165] Memory required for data: 1266177500\nI0821 06:49:44.039633 32299 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 06:49:44.039644 32299 net.cpp:100] Creating Layer L3_b1_relu\nI0821 06:49:44.039650 32299 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 06:49:44.039657 32299 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 06:49:44.039667 32299 net.cpp:150] Setting up L3_b1_relu\nI0821 06:49:44.039674 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.039680 32299 net.cpp:165] Memory required for data: 1267201500\nI0821 06:49:44.039685 32299 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 06:49:44.039693 32299 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 06:49:44.039700 32299 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 06:49:44.041652 32299 net.cpp:150] Setting up L3_b1_zeros\nI0821 06:49:44.041673 32299 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 06:49:44.041679 32299 net.cpp:165] Memory required for data: 1268225500\nI0821 06:49:44.041685 32299 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 06:49:44.041695 32299 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 06:49:44.041702 32299 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 06:49:44.041709 32299 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 06:49:44.041718 32299 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 06:49:44.041764 32299 net.cpp:150] Setting up L3_b1_concat0\nI0821 06:49:44.041775 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.041780 32299 net.cpp:165] Memory required for data: 1270273500\nI0821 06:49:44.041785 32299 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:44.041795 32299 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:44.041800 32299 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 06:49:44.041811 32299 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:49:44.041821 32299 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:49:44.041880 32299 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 06:49:44.041893 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.041906 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.041911 32299 net.cpp:165] Memory required for data: 1274369500\nI0821 06:49:44.041918 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 06:49:44.041929 32299 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 06:49:44.041935 32299 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 06:49:44.041949 32299 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 06:49:44.042994 32299 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 06:49:44.043009 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.043015 32299 net.cpp:165] Memory required for data: 1276417500\nI0821 06:49:44.043025 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 06:49:44.043035 32299 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 06:49:44.043041 32299 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 06:49:44.043053 32299 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 06:49:44.043335 32299 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 06:49:44.043352 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.043359 32299 net.cpp:165] Memory required for data: 1278465500\nI0821 06:49:44.043370 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:49:44.043378 32299 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 06:49:44.043385 32299 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 06:49:44.043393 32299 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:49:44.043454 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 06:49:44.043618 32299 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 06:49:44.043632 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.043635 32299 net.cpp:165] Memory required for data: 1280513500\nI0821 06:49:44.043645 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 06:49:44.043654 32299 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 06:49:44.043660 32299 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 06:49:44.043670 32299 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 06:49:44.043681 32299 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 06:49:44.043689 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.043694 32299 net.cpp:165] Memory required for data: 1282561500\nI0821 06:49:44.043699 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 06:49:44.043715 32299 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 06:49:44.043721 32299 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 06:49:44.043730 32299 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 06:49:44.044780 32299 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 06:49:44.044795 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.044800 32299 net.cpp:165] Memory required for data: 1284609500\nI0821 06:49:44.044811 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 06:49:44.044822 32299 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 06:49:44.044829 32299 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 06:49:44.044841 32299 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 06:49:44.045112 32299 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 06:49:44.045125 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.045130 32299 net.cpp:165] Memory required for data: 1286657500\nI0821 06:49:44.045141 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:49:44.045150 32299 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 06:49:44.045156 32299 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 06:49:44.045166 32299 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 06:49:44.045228 32299 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 06:49:44.045397 32299 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 06:49:44.045411 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.045416 32299 net.cpp:165] Memory required for data: 1288705500\nI0821 06:49:44.045433 32299 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 06:49:44.045442 32299 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 06:49:44.045449 32299 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 06:49:44.045456 32299 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 06:49:44.045467 32299 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 06:49:44.045502 32299 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 06:49:44.045516 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.045521 32299 net.cpp:165] Memory required for data: 1290753500\nI0821 06:49:44.045527 32299 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 06:49:44.045534 32299 net.cpp:100] Creating Layer L3_b2_relu\nI0821 06:49:44.045541 32299 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 06:49:44.045547 32299 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 06:49:44.045558 32299 net.cpp:150] Setting up L3_b2_relu\nI0821 06:49:44.045565 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.045569 32299 net.cpp:165] Memory required for data: 1292801500\nI0821 06:49:44.045574 32299 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:44.045584 32299 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:44.045590 32299 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 06:49:44.045598 32299 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:49:44.045608 32299 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:49:44.045658 32299 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 06:49:44.045670 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.045677 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.045681 32299 net.cpp:165] Memory required for data: 1296897500\nI0821 06:49:44.045686 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 06:49:44.045698 32299 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 06:49:44.045704 32299 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 06:49:44.045717 32299 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 06:49:44.046761 32299 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 06:49:44.046775 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.046780 32299 net.cpp:165] Memory required for data: 1298945500\nI0821 06:49:44.046790 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 06:49:44.046800 32299 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 06:49:44.046807 32299 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 06:49:44.046818 32299 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 06:49:44.047091 32299 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 06:49:44.047106 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.047112 32299 net.cpp:165] Memory required for data: 1300993500\nI0821 06:49:44.047123 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:49:44.047132 32299 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 06:49:44.047139 32299 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 06:49:44.047147 32299 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:49:44.047204 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 06:49:44.047371 32299 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 06:49:44.047385 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.047390 32299 net.cpp:165] Memory required for data: 1303041500\nI0821 06:49:44.047400 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 06:49:44.047411 32299 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 06:49:44.047418 32299 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 06:49:44.047425 32299 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 06:49:44.047436 32299 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 06:49:44.047449 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.047454 32299 net.cpp:165] Memory required for data: 1305089500\nI0821 06:49:44.047459 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 06:49:44.047475 32299 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 06:49:44.047482 32299 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 06:49:44.047490 32299 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 06:49:44.048537 32299 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 06:49:44.048552 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.048558 32299 net.cpp:165] Memory required for data: 1307137500\nI0821 06:49:44.048568 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 06:49:44.048580 32299 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 06:49:44.048588 32299 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 06:49:44.048599 32299 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 06:49:44.048868 32299 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 06:49:44.048882 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.048887 32299 net.cpp:165] Memory required for data: 1309185500\nI0821 06:49:44.048897 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:49:44.048907 32299 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 06:49:44.048913 32299 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 06:49:44.048923 32299 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 06:49:44.048985 32299 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 06:49:44.049146 32299 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 06:49:44.049160 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.049165 32299 net.cpp:165] Memory required for data: 1311233500\nI0821 06:49:44.049173 32299 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 06:49:44.049183 32299 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 06:49:44.049190 32299 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 06:49:44.049197 32299 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 06:49:44.049208 32299 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 06:49:44.049245 32299 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 06:49:44.049257 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.049263 32299 net.cpp:165] Memory required for data: 1313281500\nI0821 06:49:44.049268 32299 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 06:49:44.049275 32299 net.cpp:100] Creating Layer L3_b3_relu\nI0821 06:49:44.049281 32299 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 06:49:44.049291 32299 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 06:49:44.049302 32299 net.cpp:150] Setting up L3_b3_relu\nI0821 06:49:44.049309 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.049314 32299 net.cpp:165] Memory required for data: 1315329500\nI0821 06:49:44.049319 32299 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:44.049332 32299 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:44.049340 32299 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 06:49:44.049347 32299 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:49:44.049357 32299 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:49:44.049409 32299 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 06:49:44.049422 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.049428 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.049432 32299 net.cpp:165] Memory required for data: 1319425500\nI0821 06:49:44.049438 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 06:49:44.049449 32299 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 06:49:44.049455 32299 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 06:49:44.049474 32299 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 06:49:44.050524 32299 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 06:49:44.050559 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.050565 32299 net.cpp:165] Memory required for data: 1321473500\nI0821 06:49:44.050575 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 06:49:44.050585 32299 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 06:49:44.050591 32299 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 06:49:44.050603 32299 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 06:49:44.050878 32299 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 06:49:44.050894 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.050899 32299 net.cpp:165] Memory required for data: 1323521500\nI0821 06:49:44.050909 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:49:44.050920 32299 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 06:49:44.050925 32299 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 06:49:44.050933 32299 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:49:44.050993 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 06:49:44.051156 32299 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 06:49:44.051168 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.051173 32299 net.cpp:165] Memory required for data: 1325569500\nI0821 06:49:44.051182 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 06:49:44.051193 32299 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 06:49:44.051200 32299 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 06:49:44.051208 32299 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 06:49:44.051218 32299 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 06:49:44.051226 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.051230 32299 net.cpp:165] Memory required for data: 1327617500\nI0821 06:49:44.051235 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 06:49:44.051252 32299 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 06:49:44.051259 32299 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 06:49:44.051267 32299 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 06:49:44.053269 32299 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 06:49:44.053287 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.053292 32299 net.cpp:165] Memory required for data: 1329665500\nI0821 06:49:44.053303 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 06:49:44.053313 32299 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 06:49:44.053319 32299 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 06:49:44.053336 32299 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 06:49:44.053613 32299 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 06:49:44.053629 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.053634 32299 net.cpp:165] Memory required for data: 1331713500\nI0821 06:49:44.053647 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:49:44.053655 32299 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 06:49:44.053663 32299 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 06:49:44.053670 32299 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 06:49:44.053731 32299 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 06:49:44.053892 32299 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 06:49:44.053906 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.053911 32299 net.cpp:165] Memory required for data: 1333761500\nI0821 06:49:44.053920 32299 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 06:49:44.053933 32299 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 06:49:44.053939 32299 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 06:49:44.053947 32299 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 06:49:44.053956 32299 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 06:49:44.054003 32299 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 06:49:44.054015 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.054020 32299 net.cpp:165] Memory required for data: 1335809500\nI0821 06:49:44.054025 32299 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 06:49:44.054034 32299 net.cpp:100] Creating Layer L3_b4_relu\nI0821 06:49:44.054040 32299 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 06:49:44.054047 32299 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 06:49:44.054059 32299 net.cpp:150] Setting up L3_b4_relu\nI0821 06:49:44.054065 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.054070 32299 net.cpp:165] Memory required for data: 1337857500\nI0821 06:49:44.054075 32299 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:44.054082 32299 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:44.054087 32299 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 06:49:44.054098 32299 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:49:44.054110 32299 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:49:44.054157 32299 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 06:49:44.054168 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.054175 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.054179 32299 net.cpp:165] Memory required for data: 1341953500\nI0821 06:49:44.054184 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 06:49:44.054199 32299 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 06:49:44.054206 32299 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 06:49:44.054215 32299 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 06:49:44.055251 32299 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 06:49:44.055266 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.055271 32299 net.cpp:165] Memory required for data: 1344001500\nI0821 06:49:44.055281 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 06:49:44.055294 32299 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 06:49:44.055301 32299 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 06:49:44.055310 32299 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 06:49:44.055588 32299 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 06:49:44.055601 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.055606 32299 net.cpp:165] Memory required for data: 1346049500\nI0821 06:49:44.055619 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:49:44.055630 32299 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 06:49:44.055636 32299 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 06:49:44.055645 32299 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:49:44.055706 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 06:49:44.055868 32299 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 06:49:44.055881 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.055886 32299 net.cpp:165] Memory required for data: 1348097500\nI0821 06:49:44.055896 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 06:49:44.055907 32299 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 06:49:44.055914 32299 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 06:49:44.055922 32299 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 06:49:44.055932 32299 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 06:49:44.055939 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.055943 32299 net.cpp:165] Memory required for data: 1350145500\nI0821 06:49:44.055948 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 06:49:44.055963 32299 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 06:49:44.055968 32299 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 06:49:44.055986 32299 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 06:49:44.057013 32299 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 06:49:44.057029 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.057034 32299 net.cpp:165] Memory required for data: 1352193500\nI0821 06:49:44.057042 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 06:49:44.057051 32299 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 06:49:44.057059 32299 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 06:49:44.057070 32299 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 06:49:44.057351 32299 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 06:49:44.057365 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.057370 32299 net.cpp:165] Memory required for data: 1354241500\nI0821 06:49:44.057381 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:49:44.057390 32299 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 06:49:44.057397 32299 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 06:49:44.057405 32299 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 06:49:44.057466 32299 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 06:49:44.057626 32299 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 06:49:44.057638 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.057643 32299 net.cpp:165] Memory required for data: 1356289500\nI0821 06:49:44.057653 32299 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 06:49:44.057665 32299 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 06:49:44.057672 32299 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 06:49:44.057680 32299 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 06:49:44.057688 32299 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 06:49:44.057725 32299 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 06:49:44.057737 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.057741 32299 net.cpp:165] Memory required for data: 1358337500\nI0821 06:49:44.057747 32299 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 06:49:44.057755 32299 net.cpp:100] Creating Layer L3_b5_relu\nI0821 06:49:44.057761 32299 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 06:49:44.057768 32299 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 06:49:44.057778 32299 net.cpp:150] Setting up L3_b5_relu\nI0821 06:49:44.057786 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.057790 32299 net.cpp:165] Memory required for data: 1360385500\nI0821 06:49:44.057796 32299 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:44.057803 32299 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:44.057808 32299 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 06:49:44.057818 32299 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:49:44.057831 32299 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:49:44.057876 32299 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 06:49:44.057888 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.057893 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.057898 32299 net.cpp:165] Memory required for data: 1364481500\nI0821 06:49:44.057904 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 06:49:44.057919 32299 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 06:49:44.057924 32299 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 06:49:44.057934 32299 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 06:49:44.058969 32299 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 06:49:44.058982 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.058989 32299 net.cpp:165] Memory required for data: 1366529500\nI0821 06:49:44.058997 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 06:49:44.059016 32299 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 06:49:44.059023 32299 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 06:49:44.059032 32299 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 06:49:44.059305 32299 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 06:49:44.059319 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.059324 32299 net.cpp:165] Memory required for data: 1368577500\nI0821 06:49:44.059341 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:49:44.059356 32299 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 06:49:44.059363 32299 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 06:49:44.059371 32299 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:49:44.059433 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 06:49:44.059598 32299 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 06:49:44.059612 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.059617 32299 net.cpp:165] Memory required for data: 1370625500\nI0821 06:49:44.059625 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 06:49:44.059638 32299 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 06:49:44.059643 32299 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 06:49:44.059651 32299 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 06:49:44.059664 32299 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 06:49:44.059671 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.059676 32299 net.cpp:165] Memory required for data: 1372673500\nI0821 06:49:44.059681 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 06:49:44.059692 32299 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 06:49:44.059698 32299 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 06:49:44.059710 32299 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 06:49:44.060731 32299 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 06:49:44.060746 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.060751 32299 net.cpp:165] Memory required for data: 1374721500\nI0821 06:49:44.060760 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 06:49:44.060770 32299 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 06:49:44.060777 32299 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 06:49:44.060791 32299 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 06:49:44.061065 32299 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 06:49:44.061079 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.061084 32299 net.cpp:165] Memory required for data: 1376769500\nI0821 06:49:44.061094 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:49:44.061105 32299 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 06:49:44.061110 32299 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 06:49:44.061118 32299 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 06:49:44.061178 32299 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 06:49:44.061342 32299 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 06:49:44.061359 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.061364 32299 net.cpp:165] Memory required for data: 1378817500\nI0821 06:49:44.061373 32299 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 06:49:44.061383 32299 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 06:49:44.061389 32299 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 06:49:44.061398 32299 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 06:49:44.061405 32299 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 06:49:44.061444 32299 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 06:49:44.061456 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.061461 32299 net.cpp:165] Memory required for data: 1380865500\nI0821 06:49:44.061466 32299 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 06:49:44.061475 32299 net.cpp:100] Creating Layer L3_b6_relu\nI0821 06:49:44.061491 32299 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 06:49:44.061498 32299 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 06:49:44.061508 32299 net.cpp:150] Setting up L3_b6_relu\nI0821 06:49:44.061516 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.061520 32299 net.cpp:165] Memory required for data: 1382913500\nI0821 06:49:44.061525 32299 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:44.061532 32299 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:44.061537 32299 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 06:49:44.061549 32299 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:49:44.061560 32299 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:49:44.061610 32299 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 06:49:44.061625 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.061631 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.061636 32299 net.cpp:165] Memory required for data: 1387009500\nI0821 06:49:44.061641 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 06:49:44.061653 32299 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 06:49:44.061659 32299 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 06:49:44.061668 32299 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 06:49:44.062706 32299 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 06:49:44.062741 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.062748 32299 net.cpp:165] Memory required for data: 1389057500\nI0821 06:49:44.062759 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 06:49:44.062772 32299 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 06:49:44.062779 32299 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 06:49:44.062788 32299 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 06:49:44.063063 32299 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 06:49:44.063076 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.063081 32299 net.cpp:165] Memory required for data: 1391105500\nI0821 06:49:44.063092 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:49:44.063105 32299 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 06:49:44.063112 32299 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 06:49:44.063120 32299 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:49:44.063181 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 06:49:44.063351 32299 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 06:49:44.063365 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.063370 32299 net.cpp:165] Memory required for data: 1393153500\nI0821 06:49:44.063380 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 06:49:44.063415 32299 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 06:49:44.063423 32299 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 06:49:44.063432 32299 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 06:49:44.063443 32299 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 06:49:44.063452 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.063455 32299 net.cpp:165] Memory required for data: 1395201500\nI0821 06:49:44.063462 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 06:49:44.063472 32299 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 06:49:44.063478 32299 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 06:49:44.063488 32299 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 06:49:44.064525 32299 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 06:49:44.064540 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.064546 32299 net.cpp:165] Memory required for data: 1397249500\nI0821 06:49:44.064555 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 06:49:44.064576 32299 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 06:49:44.064584 32299 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 06:49:44.064592 32299 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 06:49:44.064863 32299 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 06:49:44.064877 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.064882 32299 net.cpp:165] Memory required for data: 1399297500\nI0821 06:49:44.064891 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:49:44.064903 32299 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 06:49:44.064910 32299 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 06:49:44.064918 32299 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 06:49:44.064980 32299 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 06:49:44.065142 32299 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 06:49:44.065155 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.065160 32299 net.cpp:165] Memory required for data: 1401345500\nI0821 06:49:44.065170 32299 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 06:49:44.065181 32299 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 06:49:44.065188 32299 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 06:49:44.065196 32299 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 06:49:44.065207 32299 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 06:49:44.065241 32299 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 06:49:44.065253 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.065258 32299 net.cpp:165] Memory required for data: 1403393500\nI0821 06:49:44.065263 32299 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 06:49:44.065274 32299 net.cpp:100] Creating Layer L3_b7_relu\nI0821 06:49:44.065281 32299 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 06:49:44.065289 32299 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 06:49:44.065299 32299 net.cpp:150] Setting up L3_b7_relu\nI0821 06:49:44.065306 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.065310 32299 net.cpp:165] Memory required for data: 1405441500\nI0821 06:49:44.065315 32299 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:44.065322 32299 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:44.065333 32299 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 06:49:44.065342 32299 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:49:44.065352 32299 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:49:44.065405 32299 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 06:49:44.065418 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.065425 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.065429 32299 net.cpp:165] Memory required for data: 1409537500\nI0821 06:49:44.065434 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 06:49:44.065449 32299 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 06:49:44.065455 32299 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 06:49:44.065465 32299 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 06:49:44.067469 32299 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 06:49:44.067489 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.067495 32299 net.cpp:165] Memory required for data: 1411585500\nI0821 06:49:44.067505 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 06:49:44.067517 32299 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 06:49:44.067523 32299 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 06:49:44.067534 32299 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 06:49:44.067811 32299 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 06:49:44.067824 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.067836 32299 net.cpp:165] Memory required for data: 1413633500\nI0821 06:49:44.067848 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:49:44.067860 32299 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 06:49:44.067867 32299 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 06:49:44.067876 32299 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:49:44.067937 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 06:49:44.068101 32299 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 06:49:44.068114 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.068120 32299 net.cpp:165] Memory required for data: 1415681500\nI0821 06:49:44.068130 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 06:49:44.068138 32299 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 06:49:44.068145 32299 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 06:49:44.068156 32299 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 06:49:44.068166 32299 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 06:49:44.068173 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.068178 32299 net.cpp:165] Memory required for data: 1417729500\nI0821 06:49:44.068182 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 06:49:44.068197 32299 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 06:49:44.068203 32299 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 06:49:44.068212 32299 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 06:49:44.069238 32299 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 06:49:44.069253 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.069258 32299 net.cpp:165] Memory required for data: 1419777500\nI0821 06:49:44.069267 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 06:49:44.069280 32299 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 06:49:44.069288 32299 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 06:49:44.069296 32299 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 06:49:44.069582 32299 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 06:49:44.069597 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.069602 32299 net.cpp:165] Memory required for data: 1421825500\nI0821 06:49:44.069612 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:49:44.069622 32299 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 06:49:44.069628 32299 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 06:49:44.069636 32299 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 06:49:44.069700 32299 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 06:49:44.069864 32299 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 06:49:44.069876 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.069881 32299 net.cpp:165] Memory required for data: 1423873500\nI0821 06:49:44.069890 32299 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 06:49:44.069900 32299 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 06:49:44.069907 32299 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 06:49:44.069914 32299 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 06:49:44.069926 32299 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 06:49:44.070122 32299 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 06:49:44.070139 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.070145 32299 net.cpp:165] Memory required for data: 1425921500\nI0821 06:49:44.070150 32299 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 06:49:44.070159 32299 net.cpp:100] Creating Layer L3_b8_relu\nI0821 06:49:44.070165 32299 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 06:49:44.070173 32299 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 06:49:44.070183 32299 net.cpp:150] Setting up L3_b8_relu\nI0821 06:49:44.070190 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.070194 32299 net.cpp:165] Memory required for data: 1427969500\nI0821 06:49:44.070206 32299 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:44.070217 32299 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:44.070222 32299 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 06:49:44.070230 32299 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:49:44.070241 32299 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:49:44.070294 32299 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 06:49:44.070307 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.070313 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.070318 32299 net.cpp:165] Memory required for data: 1432065500\nI0821 06:49:44.070323 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 06:49:44.070341 32299 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 06:49:44.070349 32299 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 06:49:44.070361 32299 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 06:49:44.071395 32299 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 06:49:44.071410 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.071415 32299 net.cpp:165] Memory required for data: 1434113500\nI0821 06:49:44.071425 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 06:49:44.071435 32299 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 06:49:44.071442 32299 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 06:49:44.071454 32299 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 06:49:44.071732 32299 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 06:49:44.071748 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.071754 32299 net.cpp:165] Memory required for data: 1436161500\nI0821 06:49:44.071765 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:49:44.071774 32299 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 06:49:44.071780 32299 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 06:49:44.071789 32299 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:49:44.071848 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 06:49:44.072012 32299 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 06:49:44.072026 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.072031 32299 net.cpp:165] Memory required for data: 1438209500\nI0821 06:49:44.072039 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 06:49:44.072048 32299 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 06:49:44.072054 32299 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 06:49:44.072065 32299 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 06:49:44.072077 32299 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 06:49:44.072083 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.072088 32299 net.cpp:165] Memory required for data: 1440257500\nI0821 06:49:44.072093 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 06:49:44.072108 32299 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 06:49:44.072113 32299 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 06:49:44.072123 32299 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 06:49:44.073151 32299 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 06:49:44.073166 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.073173 32299 net.cpp:165] Memory required for data: 1442305500\nI0821 06:49:44.073181 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 06:49:44.073196 32299 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 06:49:44.073204 32299 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 06:49:44.073216 32299 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 06:49:44.073496 32299 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 06:49:44.073510 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.073521 32299 net.cpp:165] Memory required for data: 1444353500\nI0821 06:49:44.073532 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:49:44.073542 32299 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 06:49:44.073549 32299 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 06:49:44.073559 32299 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 06:49:44.073621 32299 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 06:49:44.073782 32299 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 06:49:44.073796 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.073801 32299 net.cpp:165] Memory required for data: 1446401500\nI0821 06:49:44.073810 32299 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 06:49:44.073820 32299 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 06:49:44.073827 32299 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 06:49:44.073833 32299 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 06:49:44.073848 32299 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 06:49:44.073881 32299 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 06:49:44.073896 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.073901 32299 net.cpp:165] Memory required for data: 1448449500\nI0821 06:49:44.073907 32299 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 06:49:44.073915 32299 net.cpp:100] Creating Layer L3_b9_relu\nI0821 06:49:44.073920 32299 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 06:49:44.073928 32299 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 06:49:44.073938 32299 net.cpp:150] Setting up L3_b9_relu\nI0821 06:49:44.073945 32299 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 06:49:44.073949 32299 net.cpp:165] Memory required for data: 1450497500\nI0821 06:49:44.073954 32299 layer_factory.hpp:77] Creating layer post_pool\nI0821 06:49:44.073966 32299 net.cpp:100] Creating Layer post_pool\nI0821 06:49:44.073971 32299 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 06:49:44.073979 32299 net.cpp:408] post_pool -> post_pool\nI0821 06:49:44.074015 32299 net.cpp:150] Setting up post_pool\nI0821 06:49:44.074028 32299 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 06:49:44.074033 32299 net.cpp:165] Memory required for data: 1450529500\nI0821 06:49:44.074038 32299 layer_factory.hpp:77] Creating layer post_FC\nI0821 06:49:44.074051 32299 net.cpp:100] Creating Layer post_FC\nI0821 06:49:44.074059 32299 net.cpp:434] post_FC <- post_pool\nI0821 06:49:44.074066 32299 net.cpp:408] post_FC -> post_FC_top\nI0821 06:49:44.074229 32299 net.cpp:150] Setting up post_FC\nI0821 06:49:44.074242 32299 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:44.074247 32299 net.cpp:165] Memory required for data: 1450534500\nI0821 06:49:44.074256 32299 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 06:49:44.074265 32299 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 06:49:44.074271 32299 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 06:49:44.074285 32299 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 06:49:44.074295 32299 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 06:49:44.074353 32299 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 06:49:44.074367 32299 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:44.074373 32299 net.cpp:157] Top shape: 125 10 (1250)\nI0821 06:49:44.074378 32299 net.cpp:165] Memory required for data: 1450544500\nI0821 06:49:44.074383 32299 layer_factory.hpp:77] Creating layer accuracy\nI0821 06:49:44.074390 32299 net.cpp:100] Creating Layer accuracy\nI0821 06:49:44.074396 32299 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 06:49:44.074404 32299 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 06:49:44.074412 32299 net.cpp:408] accuracy -> accuracy\nI0821 06:49:44.074429 32299 net.cpp:150] Setting up accuracy\nI0821 06:49:44.074437 32299 net.cpp:157] Top shape: (1)\nI0821 06:49:44.074448 32299 net.cpp:165] Memory required for data: 1450544504\nI0821 06:49:44.074453 32299 layer_factory.hpp:77] Creating layer loss\nI0821 06:49:44.074461 32299 net.cpp:100] Creating Layer loss\nI0821 06:49:44.074467 32299 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 06:49:44.074475 32299 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 06:49:44.074482 32299 net.cpp:408] loss -> loss\nI0821 06:49:44.074496 32299 layer_factory.hpp:77] Creating layer loss\nI0821 06:49:44.074620 32299 net.cpp:150] Setting up loss\nI0821 06:49:44.074633 32299 net.cpp:157] Top shape: (1)\nI0821 06:49:44.074638 32299 net.cpp:160]     with loss weight 1\nI0821 06:49:44.074656 32299 net.cpp:165] Memory required for data: 1450544508\nI0821 06:49:44.074661 32299 net.cpp:226] loss needs backward computation.\nI0821 06:49:44.074667 32299 net.cpp:228] accuracy does not need backward computation.\nI0821 06:49:44.074674 32299 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 06:49:44.074679 32299 net.cpp:226] post_FC needs backward computation.\nI0821 06:49:44.074684 32299 net.cpp:226] post_pool needs backward computation.\nI0821 06:49:44.074689 32299 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 06:49:44.074694 32299 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 06:49:44.074700 32299 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 06:49:44.074705 32299 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 06:49:44.074710 32299 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 06:49:44.074715 32299 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 06:49:44.074720 32299 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 06:49:44.074725 32299 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 06:49:44.074730 32299 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 06:49:44.074735 32299 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 06:49:44.074741 32299 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 06:49:44.074746 32299 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 06:49:44.074751 32299 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 06:49:44.074757 32299 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 06:49:44.074762 32299 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 06:49:44.074767 32299 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 06:49:44.074772 32299 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 06:49:44.074777 32299 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 06:49:44.074782 32299 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 06:49:44.074789 32299 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 06:49:44.074793 32299 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 06:49:44.074798 32299 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 06:49:44.074803 32299 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 06:49:44.074810 32299 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 06:49:44.074815 32299 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 06:49:44.074820 32299 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 06:49:44.074825 32299 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 06:49:44.074829 32299 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 06:49:44.074834 32299 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 06:49:44.074843 32299 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 06:49:44.074849 32299 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 06:49:44.074854 32299 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 06:49:44.074859 32299 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 06:49:44.074865 32299 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 06:49:44.074877 32299 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 06:49:44.074882 32299 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 06:49:44.074887 32299 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 06:49:44.074892 32299 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 06:49:44.074898 32299 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 06:49:44.074903 32299 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 06:49:44.074908 32299 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 06:49:44.074913 32299 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 06:49:44.074919 32299 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 06:49:44.074924 32299 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 06:49:44.074930 32299 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 06:49:44.074935 32299 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 06:49:44.074940 32299 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 06:49:44.074945 32299 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 06:49:44.074950 32299 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 06:49:44.074956 32299 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 06:49:44.074961 32299 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 06:49:44.074966 32299 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 06:49:44.074972 32299 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 06:49:44.074977 32299 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 06:49:44.074982 32299 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 06:49:44.074987 32299 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 06:49:44.074992 32299 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 06:49:44.074997 32299 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 06:49:44.075003 32299 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 06:49:44.075008 32299 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 06:49:44.075014 32299 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 06:49:44.075019 32299 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 06:49:44.075026 32299 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 06:49:44.075031 32299 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 06:49:44.075036 32299 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 06:49:44.075040 32299 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 06:49:44.075045 32299 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 06:49:44.075050 32299 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 06:49:44.075055 32299 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 06:49:44.075062 32299 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 06:49:44.075067 32299 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 06:49:44.075072 32299 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 06:49:44.075078 32299 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 06:49:44.075083 32299 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 06:49:44.075088 32299 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 06:49:44.075093 32299 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 06:49:44.075098 32299 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 06:49:44.075103 32299 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 06:49:44.075109 32299 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 06:49:44.075114 32299 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 06:49:44.075120 32299 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 06:49:44.075131 32299 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 06:49:44.075136 32299 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 06:49:44.075141 32299 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 06:49:44.075148 32299 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 06:49:44.075153 32299 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 06:49:44.075158 32299 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 06:49:44.075165 32299 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 06:49:44.075170 32299 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 06:49:44.075175 32299 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 06:49:44.075179 32299 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 06:49:44.075186 32299 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 06:49:44.075191 32299 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 06:49:44.075196 32299 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 06:49:44.075201 32299 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 06:49:44.075209 32299 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 06:49:44.075215 32299 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 06:49:44.075222 32299 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 06:49:44.075227 32299 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 06:49:44.075232 32299 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 06:49:44.075237 32299 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 06:49:44.075242 32299 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 06:49:44.075248 32299 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 06:49:44.075254 32299 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 06:49:44.075259 32299 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 06:49:44.075265 32299 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 06:49:44.075270 32299 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 06:49:44.075276 32299 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 06:49:44.075281 32299 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 06:49:44.075287 32299 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 06:49:44.075292 32299 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 06:49:44.075297 32299 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 06:49:44.075304 32299 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 06:49:44.075309 32299 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 06:49:44.075314 32299 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 06:49:44.075320 32299 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 06:49:44.075325 32299 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 06:49:44.075337 32299 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 06:49:44.075343 32299 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 06:49:44.075350 32299 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 06:49:44.075354 32299 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 06:49:44.075361 32299 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 06:49:44.075366 32299 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 06:49:44.075371 32299 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 06:49:44.075377 32299 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 06:49:44.075383 32299 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 06:49:44.075388 32299 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 06:49:44.075394 32299 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 06:49:44.075399 32299 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 06:49:44.075410 32299 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 06:49:44.075415 32299 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 06:49:44.075422 32299 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 06:49:44.075428 32299 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 06:49:44.075433 32299 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 06:49:44.075438 32299 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 06:49:44.075444 32299 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 06:49:44.075449 32299 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 06:49:44.075455 32299 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 06:49:44.075461 32299 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 06:49:44.075466 32299 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 06:49:44.075471 32299 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 06:49:44.075477 32299 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 06:49:44.075482 32299 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 06:49:44.075489 32299 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 06:49:44.075494 32299 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 06:49:44.075500 32299 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 06:49:44.075510 32299 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 06:49:44.075515 32299 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 06:49:44.075521 32299 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 06:49:44.075526 32299 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 06:49:44.075532 32299 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 06:49:44.075537 32299 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 06:49:44.075543 32299 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 06:49:44.075549 32299 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 06:49:44.075556 32299 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 06:49:44.075562 32299 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 06:49:44.075567 32299 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 06:49:44.075572 32299 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 06:49:44.075578 32299 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 06:49:44.075583 32299 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 06:49:44.075589 32299 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 06:49:44.075595 32299 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 06:49:44.075600 32299 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 06:49:44.075606 32299 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 06:49:44.075611 32299 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 06:49:44.075618 32299 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 06:49:44.075623 32299 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 06:49:44.075629 32299 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 06:49:44.075634 32299 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 06:49:44.075640 32299 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 06:49:44.075645 32299 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 06:49:44.075651 32299 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 06:49:44.075656 32299 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 06:49:44.075662 32299 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 06:49:44.075670 32299 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 06:49:44.075675 32299 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 06:49:44.075685 32299 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 06:49:44.075691 32299 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 06:49:44.075697 32299 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 06:49:44.075702 32299 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 06:49:44.075708 32299 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 06:49:44.075714 32299 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 06:49:44.075721 32299 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 06:49:44.075726 32299 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 06:49:44.075731 32299 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 06:49:44.075737 32299 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 06:49:44.075743 32299 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 06:49:44.075749 32299 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 06:49:44.075755 32299 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 06:49:44.075762 32299 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 06:49:44.075767 32299 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 06:49:44.075773 32299 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 06:49:44.075778 32299 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 06:49:44.075783 32299 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 06:49:44.075788 32299 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 06:49:44.075794 32299 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 06:49:44.075800 32299 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 06:49:44.075806 32299 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 06:49:44.075812 32299 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 06:49:44.075817 32299 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 06:49:44.075824 32299 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 06:49:44.075829 32299 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 06:49:44.075835 32299 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 06:49:44.075840 32299 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 06:49:44.075846 32299 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 06:49:44.075852 32299 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 06:49:44.075858 32299 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 06:49:44.075865 32299 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 06:49:44.075870 32299 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 06:49:44.075876 32299 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 06:49:44.075882 32299 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 06:49:44.075887 32299 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 06:49:44.075893 32299 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 06:49:44.075899 32299 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 06:49:44.075907 32299 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 06:49:44.075913 32299 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 06:49:44.075919 32299 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 06:49:44.075925 32299 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 06:49:44.075932 32299 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 06:49:44.075937 32299 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 06:49:44.075943 32299 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 06:49:44.075949 32299 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 06:49:44.075954 32299 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 06:49:44.075964 32299 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 06:49:44.075970 32299 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 06:49:44.075976 32299 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 06:49:44.075983 32299 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 06:49:44.075987 32299 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 06:49:44.075994 32299 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 06:49:44.075999 32299 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 06:49:44.076005 32299 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 06:49:44.076011 32299 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 06:49:44.076016 32299 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 06:49:44.076022 32299 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 06:49:44.076028 32299 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 06:49:44.076035 32299 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 06:49:44.076040 32299 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 06:49:44.076045 32299 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 06:49:44.076051 32299 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 06:49:44.076057 32299 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 06:49:44.076063 32299 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 06:49:44.076069 32299 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 06:49:44.076074 32299 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 06:49:44.076081 32299 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 06:49:44.076086 32299 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 06:49:44.076092 32299 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 06:49:44.076097 32299 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 06:49:44.076103 32299 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 06:49:44.076110 32299 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 06:49:44.076115 32299 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 06:49:44.076122 32299 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 06:49:44.076128 32299 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 06:49:44.076133 32299 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 06:49:44.076138 32299 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 06:49:44.076144 32299 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 06:49:44.076150 32299 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 06:49:44.076156 32299 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 06:49:44.076162 32299 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 06:49:44.076169 32299 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 06:49:44.076174 32299 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 06:49:44.076180 32299 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 06:49:44.076186 32299 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 06:49:44.076191 32299 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 06:49:44.076197 32299 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 06:49:44.076203 32299 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 06:49:44.076210 32299 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 06:49:44.076215 32299 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 06:49:44.076220 32299 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 06:49:44.076227 32299 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 06:49:44.076233 32299 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 06:49:44.076246 32299 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 06:49:44.076252 32299 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 06:49:44.076257 32299 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 06:49:44.076263 32299 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 06:49:44.076269 32299 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 06:49:44.076275 32299 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 06:49:44.076282 32299 net.cpp:226] pre_relu needs backward computation.\nI0821 06:49:44.076287 32299 net.cpp:226] pre_scale needs backward computation.\nI0821 06:49:44.076292 32299 net.cpp:226] pre_bn needs backward computation.\nI0821 06:49:44.076297 32299 net.cpp:226] pre_conv needs backward computation.\nI0821 06:49:44.076304 32299 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 06:49:44.076311 32299 net.cpp:228] dataLayer does not need backward computation.\nI0821 06:49:44.076315 32299 net.cpp:270] This network produces output accuracy\nI0821 06:49:44.076323 32299 net.cpp:270] This network produces output loss\nI0821 06:49:44.076656 32299 net.cpp:283] Network initialization done.\nI0821 06:49:44.077674 32299 solver.cpp:60] Solver scaffolding done.\nI0821 06:49:44.299794 32299 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 06:49:44.654764 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:44.654829 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:44.661684 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:44.897193 32299 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:44.897307 32299 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:44.931725 32299 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:44.931835 32299 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:45.380625 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:45.380702 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:45.388432 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:45.634863 32299 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:45.635004 32299 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:45.687214 32299 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:45.687347 32299 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:46.205250 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:46.205307 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:46.213824 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:46.482022 32299 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:46.482151 32299 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:46.553171 32299 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:46.553300 32299 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:46.636219 32299 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 06:49:47.121820 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:47.121894 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:47.131450 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:47.419353 32299 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:47.419549 32299 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:47.510402 32299 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:47.510586 32299 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:48.162377 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:48.162451 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:48.173120 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:48.485620 32299 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:48.485832 32299 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:48.598269 32299 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:48.598479 32299 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:49.309662 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:49.309728 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:49.320821 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:49.662792 32299 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:49.663049 32299 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:49.795863 32299 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:49.796108 32299 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:50.578373 32299 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 06:49:50.578435 32299 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 06:49:50.590560 32299 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 06:49:50.641106 32326 blocking_queue.cpp:50] Waiting for data\nI0821 06:49:50.696836 32311 blocking_queue.cpp:50] Waiting for data\nI0821 06:49:51.017663 32299 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 06:49:51.017902 32299 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 06:49:51.170032 32299 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 06:49:51.170423 32299 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 06:49:51.341702 32299 parallel.cpp:425] Starting Optimization\nI0821 06:49:51.342954 32299 solver.cpp:279] Solving Cifar-Resnet\nI0821 06:49:51.342969 32299 solver.cpp:280] Learning Rate Policy: triangular\nI0821 06:49:51.347882 32299 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 06:51:11.595374 32299 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 06:51:11.595669 32299 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 06:51:15.567769 32299 solver.cpp:228] Iteration 0, loss = 4.90926\nI0821 06:51:15.567819 32299 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0821 06:51:15.567837 32299 solver.cpp:244]     Train net output #1: loss = 4.90926 (* 1 = 4.90926 loss)\nI0821 06:51:15.611233 32299 sgd_solver.cpp:166] Iteration 0, lr = 0.1\nI0821 06:53:33.949141 32299 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 06:54:54.662864 32299 solver.cpp:404]     Test net output #0: accuracy = 0.22776\nI0821 06:54:54.663147 32299 solver.cpp:404]     Test net output #1: loss = 2.0485 (* 1 = 2.0485 loss)\nI0821 06:54:55.961722 32299 solver.cpp:228] Iteration 100, loss = 1.95118\nI0821 06:54:55.961783 32299 solver.cpp:244]     Train net output #0: accuracy = 0.264\nI0821 06:54:55.961802 32299 solver.cpp:244]     Train net output #1: loss = 1.95118 (* 1 = 1.95118 loss)\nI0821 06:54:56.065129 32299 sgd_solver.cpp:166] Iteration 100, lr = 0.158\nI0821 06:57:14.338539 32299 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 06:58:35.040057 32299 solver.cpp:404]     Test net output #0: accuracy = 0.40348\nI0821 06:58:35.040319 32299 solver.cpp:404]     Test net output #1: loss = 1.72153 (* 1 = 1.72153 loss)\nI0821 06:58:36.338587 32299 solver.cpp:228] Iteration 200, loss = 1.47267\nI0821 06:58:36.338649 32299 solver.cpp:244]     Train net output #0: accuracy = 0.416\nI0821 06:58:36.338666 32299 solver.cpp:244]     Train net output #1: loss = 1.47267 (* 1 = 1.47267 loss)\nI0821 06:58:36.441745 32299 sgd_solver.cpp:166] Iteration 200, lr = 0.216\nI0821 07:00:54.638818 32299 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 07:02:15.262956 32299 solver.cpp:404]     Test net output #0: accuracy = 0.44696\nI0821 07:02:15.263221 32299 solver.cpp:404]     Test net output #1: loss = 1.75573 (* 1 = 1.75573 loss)\nI0821 07:02:16.560837 32299 solver.cpp:228] Iteration 300, loss = 1.20898\nI0821 07:02:16.560897 32299 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0821 07:02:16.560916 32299 solver.cpp:244]     Train net output #1: loss = 1.20898 (* 1 = 1.20898 loss)\nI0821 07:02:16.668175 32299 sgd_solver.cpp:166] Iteration 300, lr = 0.274\nI0821 07:04:34.856142 32299 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 07:05:55.542754 32299 solver.cpp:404]     Test net output #0: accuracy = 0.6092\nI0821 07:05:55.542995 32299 solver.cpp:404]     Test net output #1: loss = 1.18656 (* 1 = 1.18656 loss)\nI0821 07:05:56.840199 32299 solver.cpp:228] Iteration 400, loss = 0.895879\nI0821 07:05:56.840260 32299 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0821 07:05:56.840277 32299 solver.cpp:244]     Train net output #1: loss = 0.895879 (* 1 = 0.895879 loss)\nI0821 07:05:56.945941 32299 sgd_solver.cpp:166] Iteration 400, lr = 0.332\nI0821 07:08:15.186218 32299 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 07:09:35.853824 32299 solver.cpp:404]     Test net output #0: accuracy = 0.49824\nI0821 07:09:35.854041 32299 solver.cpp:404]     Test net output #1: loss = 1.78973 (* 1 = 1.78973 loss)\nI0821 07:09:37.152505 32299 solver.cpp:228] Iteration 500, loss = 0.826505\nI0821 07:09:37.152570 32299 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0821 07:09:37.152588 32299 solver.cpp:244]     Train net output #1: loss = 0.826505 (* 1 = 0.826505 loss)\nI0821 07:09:37.254710 32299 sgd_solver.cpp:166] Iteration 500, lr = 0.39\nI0821 07:11:55.472074 32299 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 07:13:16.131248 32299 solver.cpp:404]     Test net output #0: accuracy = 0.66384\nI0821 07:13:16.131541 32299 solver.cpp:404]     Test net output #1: loss = 1.08443 (* 1 = 1.08443 loss)\nI0821 07:13:17.428717 32299 solver.cpp:228] Iteration 600, loss = 0.632025\nI0821 07:13:17.428777 32299 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0821 07:13:17.428795 32299 solver.cpp:244]     Train net output #1: loss = 0.632025 (* 1 = 0.632025 loss)\nI0821 07:13:17.528156 32299 sgd_solver.cpp:166] Iteration 600, lr = 0.448\nI0821 07:15:35.719384 32299 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 07:16:56.397081 32299 solver.cpp:404]     Test net output #0: accuracy = 0.70856\nI0821 07:16:56.397305 32299 solver.cpp:404]     Test net output #1: loss = 0.9062 (* 1 = 0.9062 loss)\nI0821 07:16:57.694125 32299 solver.cpp:228] Iteration 700, loss = 0.461976\nI0821 07:16:57.694185 32299 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 07:16:57.694202 32299 solver.cpp:244]     Train net output #1: loss = 0.461976 (* 1 = 0.461976 loss)\nI0821 07:16:57.803534 32299 sgd_solver.cpp:166] Iteration 700, lr = 0.506\nI0821 07:19:16.086017 32299 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 07:20:36.618479 32299 solver.cpp:404]     Test net output #0: accuracy = 0.74988\nI0821 07:20:36.618746 32299 solver.cpp:404]     Test net output #1: loss = 0.763901 (* 1 = 0.763901 loss)\nI0821 07:20:37.915366 32299 solver.cpp:228] Iteration 800, loss = 0.424252\nI0821 07:20:37.915427 32299 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 07:20:37.915444 32299 solver.cpp:244]     Train net output #1: loss = 0.424252 (* 1 = 0.424252 loss)\nI0821 07:20:38.018154 32299 sgd_solver.cpp:166] Iteration 800, lr = 0.564\nI0821 07:22:56.368419 32299 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 07:24:16.474010 32299 solver.cpp:404]     Test net output #0: accuracy = 0.69372\nI0821 07:24:16.474225 32299 solver.cpp:404]     Test net output #1: loss = 1.01921 (* 1 = 1.01921 loss)\nI0821 07:24:17.770797 32299 solver.cpp:228] Iteration 900, loss = 0.400838\nI0821 07:24:17.770858 32299 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 07:24:17.770875 32299 solver.cpp:244]     Train net output #1: loss = 0.400838 (* 1 = 0.400838 loss)\nI0821 07:24:17.874593 32299 sgd_solver.cpp:166] Iteration 900, lr = 0.622\nI0821 07:26:36.118290 32299 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 07:27:56.729876 32299 solver.cpp:404]     Test net output #0: accuracy = 0.73888\nI0821 07:27:56.730103 32299 solver.cpp:404]     Test net output #1: loss = 0.884056 (* 1 = 0.884056 loss)\nI0821 07:27:58.026952 32299 solver.cpp:228] Iteration 1000, loss = 0.318195\nI0821 07:27:58.027009 32299 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 07:27:58.027027 32299 solver.cpp:244]     Train net output #1: loss = 0.318195 (* 1 = 0.318195 loss)\nI0821 07:27:58.130053 32299 sgd_solver.cpp:166] Iteration 1000, lr = 0.68\nI0821 07:30:16.327872 32299 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 07:31:36.928449 32299 solver.cpp:404]     Test net output #0: accuracy = 0.72824\nI0821 07:31:36.928658 32299 solver.cpp:404]     Test net output #1: loss = 1.0446 (* 1 = 1.0446 loss)\nI0821 07:31:38.225723 32299 solver.cpp:228] Iteration 1100, loss = 0.333616\nI0821 07:31:38.225781 32299 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:31:38.225800 32299 solver.cpp:244]     Train net output #1: loss = 0.333616 (* 1 = 0.333616 loss)\nI0821 07:31:38.333000 32299 sgd_solver.cpp:166] Iteration 1100, lr = 0.738\nI0821 07:33:56.505946 32299 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 07:35:17.114879 32299 solver.cpp:404]     Test net output #0: accuracy = 0.74224\nI0821 07:35:17.115108 32299 solver.cpp:404]     Test net output #1: loss = 0.893752 (* 1 = 0.893752 loss)\nI0821 07:35:18.412523 32299 solver.cpp:228] Iteration 1200, loss = 0.456183\nI0821 07:35:18.412581 32299 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 07:35:18.412600 32299 solver.cpp:244]     Train net output #1: loss = 0.456183 (* 1 = 0.456183 loss)\nI0821 07:35:18.511389 32299 sgd_solver.cpp:166] Iteration 1200, lr = 0.796\nI0821 07:37:36.712602 32299 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 07:38:57.383344 32299 solver.cpp:404]     Test net output #0: accuracy = 0.76784\nI0821 07:38:57.383586 32299 solver.cpp:404]     Test net output #1: loss = 0.766301 (* 1 = 0.766301 loss)\nI0821 07:38:58.680399 32299 solver.cpp:228] Iteration 1300, loss = 0.268883\nI0821 07:38:58.680456 32299 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 07:38:58.680474 32299 solver.cpp:244]     Train net output #1: loss = 0.268883 (* 1 = 0.268883 loss)\nI0821 07:38:58.790176 32299 sgd_solver.cpp:166] Iteration 1300, lr = 0.854\nI0821 07:41:16.976434 32299 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 07:42:37.605736 32299 solver.cpp:404]     Test net output #0: accuracy = 0.67408\nI0821 07:42:37.606014 32299 solver.cpp:404]     Test net output #1: loss = 1.29332 (* 1 = 1.29332 loss)\nI0821 07:42:38.902173 32299 solver.cpp:228] Iteration 1400, loss = 0.276625\nI0821 07:42:38.902230 32299 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:42:38.902248 32299 solver.cpp:244]     Train net output #1: loss = 0.276625 (* 1 = 0.276625 loss)\nI0821 07:42:39.005190 32299 sgd_solver.cpp:166] Iteration 1400, lr = 0.912\nI0821 07:44:57.281697 32299 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 07:46:17.995997 32299 solver.cpp:404]     Test net output #0: accuracy = 0.74676\nI0821 07:46:17.996243 32299 solver.cpp:404]     Test net output #1: loss = 0.876776 (* 1 = 0.876776 loss)\nI0821 07:46:19.292575 32299 solver.cpp:228] Iteration 1500, loss = 0.217449\nI0821 07:46:19.292634 32299 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:46:19.292651 32299 solver.cpp:244]     Train net output #1: loss = 0.217449 (* 1 = 0.217449 loss)\nI0821 07:46:19.397596 32299 sgd_solver.cpp:166] Iteration 1500, lr = 0.97\nI0821 07:48:37.602056 32299 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 07:49:58.004722 32299 solver.cpp:404]     Test net output #0: accuracy = 0.7552\nI0821 07:49:58.004940 32299 solver.cpp:404]     Test net output #1: loss = 0.875536 (* 1 = 0.875536 loss)\nI0821 07:49:59.301981 32299 solver.cpp:228] Iteration 1600, loss = 0.230044\nI0821 07:49:59.302040 32299 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:49:59.302058 32299 solver.cpp:244]     Train net output #1: loss = 0.230044 (* 1 = 0.230044 loss)\nI0821 07:49:59.403653 32299 sgd_solver.cpp:166] Iteration 1600, lr = 1.028\nI0821 07:52:17.678481 32299 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 07:53:38.320484 32299 solver.cpp:404]     Test net output #0: accuracy = 0.72712\nI0821 07:53:38.320731 32299 solver.cpp:404]     Test net output #1: loss = 1.04938 (* 1 = 1.04938 loss)\nI0821 07:53:39.616724 32299 solver.cpp:228] Iteration 1700, loss = 0.286167\nI0821 07:53:39.616783 32299 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 07:53:39.616802 32299 solver.cpp:244]     Train net output #1: loss = 0.286167 (* 1 = 0.286167 loss)\nI0821 07:53:39.723182 32299 sgd_solver.cpp:166] Iteration 1700, lr = 1.086\nI0821 07:55:57.881537 32299 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 07:57:18.522655 32299 solver.cpp:404]     Test net output #0: accuracy = 0.69948\nI0821 07:57:18.522891 32299 solver.cpp:404]     Test net output #1: loss = 1.45173 (* 1 = 1.45173 loss)\nI0821 07:57:19.819121 32299 solver.cpp:228] Iteration 1800, loss = 0.32247\nI0821 07:57:19.819181 32299 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 07:57:19.819197 32299 solver.cpp:244]     Train net output #1: loss = 0.32247 (* 1 = 0.32247 loss)\nI0821 07:57:19.929467 32299 sgd_solver.cpp:166] Iteration 1800, lr = 1.144\nI0821 07:59:38.201186 32299 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 08:00:58.833504 32299 solver.cpp:404]     Test net output #0: accuracy = 0.75236\nI0821 08:00:58.833765 32299 solver.cpp:404]     Test net output #1: loss = 1.03163 (* 1 = 1.03163 loss)\nI0821 08:01:00.130713 32299 solver.cpp:228] Iteration 1900, loss = 0.230359\nI0821 08:01:00.130774 32299 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 08:01:00.130791 32299 solver.cpp:244]     Train net output #1: loss = 0.230359 (* 1 = 0.230359 loss)\nI0821 08:01:00.234766 32299 sgd_solver.cpp:166] Iteration 1900, lr = 1.202\nI0821 08:03:18.383821 32299 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 08:04:39.071142 32299 solver.cpp:404]     Test net output #0: accuracy = 0.7916\nI0821 08:04:39.071359 32299 solver.cpp:404]     Test net output #1: loss = 0.856187 (* 1 = 0.856187 loss)\nI0821 08:04:40.369684 32299 solver.cpp:228] Iteration 2000, loss = 0.189914\nI0821 08:04:40.369743 32299 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 08:04:40.369761 32299 solver.cpp:244]     Train net output #1: loss = 0.189914 (* 1 = 0.189914 loss)\nI0821 08:04:40.474012 32299 sgd_solver.cpp:166] Iteration 2000, lr = 1.26\nI0821 08:06:58.662192 32299 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 08:08:19.285056 32299 solver.cpp:404]     Test net output #0: accuracy = 0.80288\nI0821 08:08:19.285300 32299 solver.cpp:404]     Test net output #1: loss = 0.725182 (* 1 = 0.725182 loss)\nI0821 08:08:20.581933 32299 solver.cpp:228] Iteration 2100, loss = 0.125779\nI0821 08:08:20.581995 32299 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:08:20.582012 32299 solver.cpp:244]     Train net output #1: loss = 0.125779 (* 1 = 0.125779 loss)\nI0821 08:08:20.683523 32299 sgd_solver.cpp:166] Iteration 2100, lr = 1.318\nI0821 08:10:38.960625 32299 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 08:11:59.601219 32299 solver.cpp:404]     Test net output #0: accuracy = 0.77292\nI0821 08:11:59.601459 32299 solver.cpp:404]     Test net output #1: loss = 1.08192 (* 1 = 1.08192 loss)\nI0821 08:12:00.896167 32299 solver.cpp:228] Iteration 2200, loss = 0.140996\nI0821 08:12:00.896227 32299 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:12:00.896244 32299 solver.cpp:244]     Train net output #1: loss = 0.140996 (* 1 = 0.140996 loss)\nI0821 08:12:01.003837 32299 sgd_solver.cpp:166] Iteration 2200, lr = 1.376\nI0821 08:14:19.090129 32299 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 08:15:39.758965 32299 solver.cpp:404]     Test net output #0: accuracy = 0.77816\nI0821 08:15:39.759168 32299 solver.cpp:404]     Test net output #1: loss = 0.930088 (* 1 = 0.930088 loss)\nI0821 08:15:41.054311 32299 solver.cpp:228] Iteration 2300, loss = 0.231607\nI0821 08:15:41.054373 32299 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 08:15:41.054389 32299 solver.cpp:244]     Train net output #1: loss = 0.231607 (* 1 = 0.231607 loss)\nI0821 08:15:41.157230 32299 sgd_solver.cpp:166] Iteration 2300, lr = 1.434\nI0821 08:17:59.244752 32299 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 08:19:19.896378 32299 solver.cpp:404]     Test net output #0: accuracy = 0.76372\nI0821 08:19:19.896625 32299 solver.cpp:404]     Test net output #1: loss = 1.13609 (* 1 = 1.13609 loss)\nI0821 08:19:21.191489 32299 solver.cpp:228] Iteration 2400, loss = 0.144333\nI0821 08:19:21.191555 32299 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:19:21.191571 32299 solver.cpp:244]     Train net output #1: loss = 0.144333 (* 1 = 0.144333 loss)\nI0821 08:19:21.301719 32299 sgd_solver.cpp:166] Iteration 2400, lr = 1.492\nI0821 08:21:39.380142 32299 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 08:23:00.021250 32299 solver.cpp:404]     Test net output #0: accuracy = 0.79152\nI0821 08:23:00.021531 32299 solver.cpp:404]     Test net output #1: loss = 0.897733 (* 1 = 0.897733 loss)\nI0821 08:23:01.316417 32299 solver.cpp:228] Iteration 2500, loss = 0.183009\nI0821 08:23:01.316473 32299 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 08:23:01.316490 32299 solver.cpp:244]     Train net output #1: loss = 0.183009 (* 1 = 0.183009 loss)\nI0821 08:23:01.424962 32299 sgd_solver.cpp:166] Iteration 2500, lr = 1.55\nI0821 08:25:19.595343 32299 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 08:26:40.214207 32299 solver.cpp:404]     Test net output #0: accuracy = 0.79672\nI0821 08:26:40.214514 32299 solver.cpp:404]     Test net output #1: loss = 0.844296 (* 1 = 0.844296 loss)\nI0821 08:26:41.509060 32299 solver.cpp:228] Iteration 2600, loss = 0.0683991\nI0821 08:26:41.509121 32299 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 08:26:41.509138 32299 solver.cpp:244]     Train net output #1: loss = 0.0683991 (* 1 = 0.0683991 loss)\nI0821 08:26:41.611296 32299 sgd_solver.cpp:166] Iteration 2600, lr = 1.608\nI0821 08:28:59.743336 32299 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 08:30:20.358966 32299 solver.cpp:404]     Test net output #0: accuracy = 0.77968\nI0821 08:30:20.359235 32299 solver.cpp:404]     Test net output #1: loss = 1.00226 (* 1 = 1.00226 loss)\nI0821 08:30:21.653858 32299 solver.cpp:228] Iteration 2700, loss = 0.107709\nI0821 08:30:21.653918 32299 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 08:30:21.653935 32299 solver.cpp:244]     Train net output #1: loss = 0.107709 (* 1 = 0.107709 loss)\nI0821 08:30:21.761672 32299 sgd_solver.cpp:166] Iteration 2700, lr = 1.666\nI0821 08:32:39.938367 32299 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 08:34:00.885545 32299 solver.cpp:404]     Test net output #0: accuracy = 0.79032\nI0821 08:34:00.885787 32299 solver.cpp:404]     Test net output #1: loss = 0.955747 (* 1 = 0.955747 loss)\nI0821 08:34:02.194588 32299 solver.cpp:228] Iteration 2800, loss = 0.0769274\nI0821 08:34:02.194648 32299 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 08:34:02.194664 32299 solver.cpp:244]     Train net output #1: loss = 0.0769274 (* 1 = 0.0769274 loss)\nI0821 08:34:02.284807 32299 sgd_solver.cpp:166] Iteration 2800, lr = 1.724\nI0821 08:36:20.362288 32299 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 08:37:41.656500 32299 solver.cpp:404]     Test net output #0: accuracy = 0.79516\nI0821 08:37:41.656759 32299 solver.cpp:404]     Test net output #1: loss = 0.948728 (* 1 = 0.948728 loss)\nI0821 08:37:42.963961 32299 solver.cpp:228] Iteration 2900, loss = 0.0795301\nI0821 08:37:42.964023 32299 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 08:37:42.964040 32299 solver.cpp:244]     Train net output #1: loss = 0.0795301 (* 1 = 0.0795301 loss)\nI0821 08:37:43.061760 32299 sgd_solver.cpp:166] Iteration 2900, lr = 1.782\nI0821 08:40:01.230392 32299 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 08:41:22.497542 32299 solver.cpp:404]     Test net output #0: accuracy = 0.79944\nI0821 08:41:22.497786 32299 solver.cpp:404]     Test net output #1: loss = 1.03914 (* 1 = 1.03914 loss)\nI0821 08:41:23.804528 32299 solver.cpp:228] Iteration 3000, loss = 0.100754\nI0821 08:41:23.804591 32299 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:41:23.804610 32299 solver.cpp:244]     Train net output #1: loss = 0.100754 (* 1 = 0.100754 loss)\nI0821 08:41:23.898064 32299 sgd_solver.cpp:166] Iteration 3000, lr = 1.84\nI0821 08:43:42.031210 32299 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 08:45:03.363929 32299 solver.cpp:404]     Test net output #0: accuracy = 0.78588\nI0821 08:45:03.364166 32299 solver.cpp:404]     Test net output #1: loss = 1.09992 (* 1 = 1.09992 loss)\nI0821 08:45:04.671149 32299 solver.cpp:228] Iteration 3100, loss = 0.090319\nI0821 08:45:04.671205 32299 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:45:04.671223 32299 solver.cpp:244]     Train net output #1: loss = 0.0903189 (* 1 = 0.0903189 loss)\nI0821 08:45:04.766577 32299 sgd_solver.cpp:166] Iteration 3100, lr = 1.898\nI0821 08:47:23.041477 32299 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 08:48:44.376886 32299 solver.cpp:404]     Test net output #0: accuracy = 0.80548\nI0821 08:48:44.377133 32299 solver.cpp:404]     Test net output #1: loss = 0.908446 (* 1 = 0.908446 loss)\nI0821 08:48:45.684443 32299 solver.cpp:228] Iteration 3200, loss = 0.154003\nI0821 08:48:45.684501 32299 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 08:48:45.684523 32299 solver.cpp:244]     Train net output #1: loss = 0.154003 (* 1 = 0.154003 loss)\nI0821 08:48:45.777076 32299 sgd_solver.cpp:166] Iteration 3200, lr = 1.956\nI0821 08:51:04.000072 32299 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 08:52:25.162571 32299 solver.cpp:404]     Test net output #0: accuracy = 0.8026\nI0821 08:52:25.162852 32299 solver.cpp:404]     Test net output #1: loss = 0.94425 (* 1 = 0.94425 loss)\nI0821 08:52:26.469831 32299 solver.cpp:228] Iteration 3300, loss = 0.0851581\nI0821 08:52:26.469887 32299 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 08:52:26.469904 32299 solver.cpp:244]     Train net output #1: loss = 0.085158 (* 1 = 0.085158 loss)\nI0821 08:52:26.560083 32299 sgd_solver.cpp:166] Iteration 3300, lr = 2.014\nI0821 08:54:44.775077 32299 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 08:56:06.104171 32299 solver.cpp:404]     Test net output #0: accuracy = 0.80704\nI0821 08:56:06.104411 32299 solver.cpp:404]     Test net output #1: loss = 0.989146 (* 1 = 0.989146 loss)\nI0821 08:56:07.411542 32299 solver.cpp:228] Iteration 3400, loss = 0.0834907\nI0821 08:56:07.411599 32299 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:56:07.411617 32299 solver.cpp:244]     Train net output #1: loss = 0.0834906 (* 1 = 0.0834906 loss)\nI0821 08:56:07.510185 32299 sgd_solver.cpp:166] Iteration 3400, lr = 2.072\nI0821 08:58:25.756183 32299 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 08:59:47.067145 32299 solver.cpp:404]     Test net output #0: accuracy = 0.79456\nI0821 08:59:47.067392 32299 solver.cpp:404]     Test net output #1: loss = 1.04806 (* 1 = 1.04806 loss)\nI0821 08:59:48.375077 32299 solver.cpp:228] Iteration 3500, loss = 0.115955\nI0821 08:59:48.375131 32299 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 08:59:48.375149 32299 solver.cpp:244]     Train net output #1: loss = 0.115955 (* 1 = 0.115955 loss)\nI0821 08:59:48.469789 32299 sgd_solver.cpp:166] Iteration 3500, lr = 2.13\nI0821 09:02:06.481673 32299 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 09:03:27.877238 32299 solver.cpp:404]     Test net output #0: accuracy = 0.807\nI0821 09:03:27.877498 32299 solver.cpp:404]     Test net output #1: loss = 0.966925 (* 1 = 0.966925 loss)\nI0821 09:03:29.185953 32299 solver.cpp:228] Iteration 3600, loss = 0.0665708\nI0821 09:03:29.185997 32299 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 09:03:29.186020 32299 solver.cpp:244]     Train net output #1: loss = 0.0665706 (* 1 = 0.0665706 loss)\nI0821 09:03:29.272841 32299 sgd_solver.cpp:166] Iteration 3600, lr = 2.188\nI0821 09:05:47.040540 32299 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 09:07:08.422250 32299 solver.cpp:404]     Test net output #0: accuracy = 0.77656\nI0821 09:07:08.422507 32299 solver.cpp:404]     Test net output #1: loss = 1.44705 (* 1 = 1.44705 loss)\nI0821 09:07:09.729914 32299 solver.cpp:228] Iteration 3700, loss = 0.0782741\nI0821 09:07:09.729972 32299 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 09:07:09.729998 32299 solver.cpp:244]     Train net output #1: loss = 0.078274 (* 1 = 0.078274 loss)\nI0821 09:07:09.822463 32299 sgd_solver.cpp:166] Iteration 3700, lr = 2.246\nI0821 09:09:27.555261 32299 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 09:10:48.914400 32299 solver.cpp:404]     Test net output #0: accuracy = 0.77476\nI0821 09:10:48.914700 32299 solver.cpp:404]     Test net output #1: loss = 1.32938 (* 1 = 1.32938 loss)\nI0821 09:10:50.223372 32299 solver.cpp:228] Iteration 3800, loss = 0.152833\nI0821 09:10:50.223417 32299 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 09:10:50.223439 32299 solver.cpp:244]     Train net output #1: loss = 0.152833 (* 1 = 0.152833 loss)\nI0821 09:10:50.306690 32299 sgd_solver.cpp:166] Iteration 3800, lr = 2.304\nI0821 09:13:07.991745 32299 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 09:14:29.344847 32299 solver.cpp:404]     Test net output #0: accuracy = 0.79276\nI0821 09:14:29.345072 32299 solver.cpp:404]     Test net output #1: loss = 1.1682 (* 1 = 1.1682 loss)\nI0821 09:14:30.651723 32299 solver.cpp:228] Iteration 3900, loss = 0.0982011\nI0821 09:14:30.651765 32299 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 09:14:30.651782 32299 solver.cpp:244]     Train net output #1: loss = 0.0982009 (* 1 = 0.0982009 loss)\nI0821 09:14:30.738174 32299 sgd_solver.cpp:166] Iteration 3900, lr = 2.362\nI0821 09:16:48.527470 32299 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 09:18:09.877775 32299 solver.cpp:404]     Test net output #0: accuracy = 0.82696\nI0821 09:18:09.878026 32299 solver.cpp:404]     Test net output #1: loss = 1.004 (* 1 = 1.004 loss)\nI0821 09:18:11.185417 32299 solver.cpp:228] Iteration 4000, loss = 0.0461584\nI0821 09:18:11.185473 32299 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 09:18:11.185490 32299 solver.cpp:244]     Train net output #1: loss = 0.0461582 (* 1 = 0.0461582 loss)\nI0821 09:18:11.274983 32299 sgd_solver.cpp:166] Iteration 4000, lr = 2.42\nI0821 09:20:28.970335 32299 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 09:21:50.301861 32299 solver.cpp:404]     Test net output #0: accuracy = 0.80452\nI0821 09:21:50.302079 32299 solver.cpp:404]     Test net output #1: loss = 1.11746 (* 1 = 1.11746 loss)\nI0821 09:21:51.609431 32299 solver.cpp:228] Iteration 4100, loss = 0.0178559\nI0821 09:21:51.609493 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:21:51.609510 32299 solver.cpp:244]     Train net output #1: loss = 0.0178558 (* 1 = 0.0178558 loss)\nI0821 09:21:51.698648 32299 sgd_solver.cpp:166] Iteration 4100, lr = 2.478\nI0821 09:24:09.509922 32299 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 09:25:30.826321 32299 solver.cpp:404]     Test net output #0: accuracy = 0.7946\nI0821 09:25:30.826565 32299 solver.cpp:404]     Test net output #1: loss = 1.22967 (* 1 = 1.22967 loss)\nI0821 09:25:32.133843 32299 solver.cpp:228] Iteration 4200, loss = 0.0567204\nI0821 09:25:32.133904 32299 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 09:25:32.133920 32299 solver.cpp:244]     Train net output #1: loss = 0.0567203 (* 1 = 0.0567203 loss)\nI0821 09:25:32.229816 32299 sgd_solver.cpp:166] Iteration 4200, lr = 2.536\nI0821 09:27:50.480610 32299 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 09:29:11.802363 32299 solver.cpp:404]     Test net output #0: accuracy = 0.80344\nI0821 09:29:11.802590 32299 solver.cpp:404]     Test net output #1: loss = 1.13135 (* 1 = 1.13135 loss)\nI0821 09:29:13.110747 32299 solver.cpp:228] Iteration 4300, loss = 0.092367\nI0821 09:29:13.110807 32299 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 09:29:13.110823 32299 solver.cpp:244]     Train net output #1: loss = 0.0923668 (* 1 = 0.0923668 loss)\nI0821 09:29:13.202988 32299 sgd_solver.cpp:166] Iteration 4300, lr = 2.594\nI0821 09:31:31.414463 32299 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 09:32:52.741279 32299 solver.cpp:404]     Test net output #0: accuracy = 0.7948\nI0821 09:32:52.741539 32299 solver.cpp:404]     Test net output #1: loss = 1.19365 (* 1 = 1.19365 loss)\nI0821 09:32:54.049507 32299 solver.cpp:228] Iteration 4400, loss = 0.0792604\nI0821 09:32:54.049572 32299 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 09:32:54.049589 32299 solver.cpp:244]     Train net output #1: loss = 0.0792602 (* 1 = 0.0792602 loss)\nI0821 09:32:54.141455 32299 sgd_solver.cpp:166] Iteration 4400, lr = 2.652\nI0821 09:35:12.344175 32299 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 09:36:33.654208 32299 solver.cpp:404]     Test net output #0: accuracy = 0.81892\nI0821 09:36:33.654444 32299 solver.cpp:404]     Test net output #1: loss = 0.910449 (* 1 = 0.910449 loss)\nI0821 09:36:34.962648 32299 solver.cpp:228] Iteration 4500, loss = 0.113562\nI0821 09:36:34.962693 32299 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 09:36:34.962708 32299 solver.cpp:244]     Train net output #1: loss = 0.113562 (* 1 = 0.113562 loss)\nI0821 09:36:35.054404 32299 sgd_solver.cpp:166] Iteration 4500, lr = 2.71\nI0821 09:38:52.950425 32299 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 09:40:14.268731 32299 solver.cpp:404]     Test net output #0: accuracy = 0.78716\nI0821 09:40:14.268981 32299 solver.cpp:404]     Test net output #1: loss = 1.39248 (* 1 = 1.39248 loss)\nI0821 09:40:15.578543 32299 solver.cpp:228] Iteration 4600, loss = 0.117306\nI0821 09:40:15.578588 32299 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 09:40:15.578603 32299 solver.cpp:244]     Train net output #1: loss = 0.117306 (* 1 = 0.117306 loss)\nI0821 09:40:15.665303 32299 sgd_solver.cpp:166] Iteration 4600, lr = 2.768\nI0821 09:42:33.333806 32299 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 09:43:54.611557 32299 solver.cpp:404]     Test net output #0: accuracy = 0.81112\nI0821 09:43:54.611755 32299 solver.cpp:404]     Test net output #1: loss = 1.01891 (* 1 = 1.01891 loss)\nI0821 09:43:55.920996 32299 solver.cpp:228] Iteration 4700, loss = 0.0527374\nI0821 09:43:55.921058 32299 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 09:43:55.921075 32299 solver.cpp:244]     Train net output #1: loss = 0.0527373 (* 1 = 0.0527373 loss)\nI0821 09:43:56.010933 32299 sgd_solver.cpp:166] Iteration 4700, lr = 2.826\nI0821 09:46:13.703330 32299 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 09:47:34.967630 32299 solver.cpp:404]     Test net output #0: accuracy = 0.79248\nI0821 09:47:34.967870 32299 solver.cpp:404]     Test net output #1: loss = 1.30495 (* 1 = 1.30495 loss)\nI0821 09:47:36.277271 32299 solver.cpp:228] Iteration 4800, loss = 0.0492631\nI0821 09:47:36.277315 32299 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 09:47:36.277330 32299 solver.cpp:244]     Train net output #1: loss = 0.0492629 (* 1 = 0.0492629 loss)\nI0821 09:47:36.368307 32299 sgd_solver.cpp:166] Iteration 4800, lr = 2.884\nI0821 09:49:54.033529 32299 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 09:51:15.366243 32299 solver.cpp:404]     Test net output #0: accuracy = 0.82984\nI0821 09:51:15.366505 32299 solver.cpp:404]     Test net output #1: loss = 0.974017 (* 1 = 0.974017 loss)\nI0821 09:51:16.677206 32299 solver.cpp:228] Iteration 4900, loss = 0.0526924\nI0821 09:51:16.677268 32299 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 09:51:16.677284 32299 solver.cpp:244]     Train net output #1: loss = 0.0526922 (* 1 = 0.0526922 loss)\nI0821 09:51:16.766554 32299 sgd_solver.cpp:166] Iteration 4900, lr = 2.942\nI0821 09:53:34.409863 32299 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 09:54:55.721604 32299 solver.cpp:404]     Test net output #0: accuracy = 0.80972\nI0821 09:54:55.721846 32299 solver.cpp:404]     Test net output #1: loss = 1.13745 (* 1 = 1.13745 loss)\nI0821 09:54:57.032758 32299 solver.cpp:228] Iteration 5000, loss = 0.0122571\nI0821 09:54:57.032801 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:54:57.032817 32299 solver.cpp:244]     Train net output #1: loss = 0.0122569 (* 1 = 0.0122569 loss)\nI0821 09:54:57.116674 32299 sgd_solver.cpp:166] Iteration 5000, lr = 3\nI0821 09:57:14.819558 32299 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 09:58:36.088059 32299 solver.cpp:404]     Test net output #0: accuracy = 0.82928\nI0821 09:58:36.088289 32299 solver.cpp:404]     Test net output #1: loss = 0.982931 (* 1 = 0.982931 loss)\nI0821 09:58:37.399250 32299 solver.cpp:228] Iteration 5100, loss = 0.00923625\nI0821 09:58:37.399294 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:58:37.399317 32299 solver.cpp:244]     Train net output #1: loss = 0.00923604 (* 1 = 0.00923604 loss)\nI0821 09:58:37.486779 32299 sgd_solver.cpp:166] Iteration 5100, lr = 2.942\nI0821 10:00:55.205638 32299 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 10:02:16.275811 32299 solver.cpp:404]     Test net output #0: accuracy = 0.81604\nI0821 10:02:16.276080 32299 solver.cpp:404]     Test net output #1: loss = 1.23414 (* 1 = 1.23414 loss)\nI0821 10:02:17.585136 32299 solver.cpp:228] Iteration 5200, loss = 0.0324493\nI0821 10:02:17.585178 32299 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 10:02:17.585194 32299 solver.cpp:244]     Train net output #1: loss = 0.0324491 (* 1 = 0.0324491 loss)\nI0821 10:02:17.671609 32299 sgd_solver.cpp:166] Iteration 5200, lr = 2.884\nI0821 10:04:35.409942 32299 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 10:05:56.694499 32299 solver.cpp:404]     Test net output #0: accuracy = 0.810441\nI0821 10:05:56.694720 32299 solver.cpp:404]     Test net output #1: loss = 1.04111 (* 1 = 1.04111 loss)\nI0821 10:05:58.005049 32299 solver.cpp:228] Iteration 5300, loss = 0.0455222\nI0821 10:05:58.005091 32299 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 10:05:58.005107 32299 solver.cpp:244]     Train net output #1: loss = 0.045522 (* 1 = 0.045522 loss)\nI0821 10:05:58.089013 32299 sgd_solver.cpp:166] Iteration 5300, lr = 2.826\nI0821 10:08:15.772686 32299 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 10:09:37.100172 32299 solver.cpp:404]     Test net output #0: accuracy = 0.82416\nI0821 10:09:37.100420 32299 solver.cpp:404]     Test net output #1: loss = 1.02441 (* 1 = 1.02441 loss)\nI0821 10:09:38.410362 32299 solver.cpp:228] Iteration 5400, loss = 0.0372345\nI0821 10:09:38.410418 32299 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:09:38.410435 32299 solver.cpp:244]     Train net output #1: loss = 0.0372343 (* 1 = 0.0372343 loss)\nI0821 10:09:38.496095 32299 sgd_solver.cpp:166] Iteration 5400, lr = 2.768\nI0821 10:11:56.231215 32299 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 10:13:17.563103 32299 solver.cpp:404]     Test net output #0: accuracy = 0.80464\nI0821 10:13:17.563390 32299 solver.cpp:404]     Test net output #1: loss = 1.28346 (* 1 = 1.28346 loss)\nI0821 10:13:18.873816 32299 solver.cpp:228] Iteration 5500, loss = 0.0446906\nI0821 10:13:18.873859 32299 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 10:13:18.873875 32299 solver.cpp:244]     Train net output #1: loss = 0.0446904 (* 1 = 0.0446904 loss)\nI0821 10:13:18.965126 32299 sgd_solver.cpp:166] Iteration 5500, lr = 2.71\nI0821 10:15:36.659497 32299 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 10:16:57.999583 32299 solver.cpp:404]     Test net output #0: accuracy = 0.8384\nI0821 10:16:57.999835 32299 solver.cpp:404]     Test net output #1: loss = 1.08716 (* 1 = 1.08716 loss)\nI0821 10:16:59.309993 32299 solver.cpp:228] Iteration 5600, loss = 0.0104168\nI0821 10:16:59.310034 32299 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:16:59.310050 32299 solver.cpp:244]     Train net output #1: loss = 0.0104165 (* 1 = 0.0104165 loss)\nI0821 10:16:59.398996 32299 sgd_solver.cpp:166] Iteration 5600, lr = 2.652\nI0821 10:19:17.145918 32299 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 10:20:38.342859 32299 solver.cpp:404]     Test net output #0: accuracy = 0.84208\nI0821 10:20:38.343085 32299 solver.cpp:404]     Test net output #1: loss = 1.05452 (* 1 = 1.05452 loss)\nI0821 10:20:39.652822 32299 solver.cpp:228] Iteration 5700, loss = 0.0530097\nI0821 10:20:39.652878 32299 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 10:20:39.652895 32299 solver.cpp:244]     Train net output #1: loss = 0.0530094 (* 1 = 0.0530094 loss)\nI0821 10:20:39.735975 32299 sgd_solver.cpp:166] Iteration 5700, lr = 2.594\nI0821 10:22:57.428362 32299 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 10:24:18.791728 32299 solver.cpp:404]     Test net output #0: accuracy = 0.85236\nI0821 10:24:18.791957 32299 solver.cpp:404]     Test net output #1: loss = 1.00329 (* 1 = 1.00329 loss)\nI0821 10:24:20.101991 32299 solver.cpp:228] Iteration 5800, loss = 0.00451819\nI0821 10:24:20.102048 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:24:20.102066 32299 solver.cpp:244]     Train net output #1: loss = 0.00451796 (* 1 = 0.00451796 loss)\nI0821 10:24:20.194916 32299 sgd_solver.cpp:166] Iteration 5800, lr = 2.536\nI0821 10:26:37.957489 32299 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 10:27:59.203740 32299 solver.cpp:404]     Test net output #0: accuracy = 0.85224\nI0821 10:27:59.203969 32299 solver.cpp:404]     Test net output #1: loss = 0.977845 (* 1 = 0.977845 loss)\nI0821 10:28:00.513846 32299 solver.cpp:228] Iteration 5900, loss = 0.00101155\nI0821 10:28:00.513900 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:28:00.513917 32299 solver.cpp:244]     Train net output #1: loss = 0.00101132 (* 1 = 0.00101132 loss)\nI0821 10:28:00.607154 32299 sgd_solver.cpp:166] Iteration 5900, lr = 2.478\nI0821 10:30:18.331920 32299 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 10:31:39.649576 32299 solver.cpp:404]     Test net output #0: accuracy = 0.84616\nI0821 10:31:39.649827 32299 solver.cpp:404]     Test net output #1: loss = 1.18746 (* 1 = 1.18746 loss)\nI0821 10:31:40.959694 32299 solver.cpp:228] Iteration 6000, loss = 0.0106732\nI0821 10:31:40.959735 32299 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:31:40.959751 32299 solver.cpp:244]     Train net output #1: loss = 0.010673 (* 1 = 0.010673 loss)\nI0821 10:31:41.052785 32299 sgd_solver.cpp:166] Iteration 6000, lr = 2.42\nI0821 10:33:58.842151 32299 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 10:35:20.178753 32299 solver.cpp:404]     Test net output #0: accuracy = 0.85868\nI0821 10:35:20.179003 32299 solver.cpp:404]     Test net output #1: loss = 1.10118 (* 1 = 1.10118 loss)\nI0821 10:35:21.489150 32299 solver.cpp:228] Iteration 6100, loss = 0.000567471\nI0821 10:35:21.489210 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:35:21.489228 32299 solver.cpp:244]     Train net output #1: loss = 0.000567232 (* 1 = 0.000567232 loss)\nI0821 10:35:21.572674 32299 sgd_solver.cpp:166] Iteration 6100, lr = 2.362\nI0821 10:37:39.284802 32299 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 10:39:00.579329 32299 solver.cpp:404]     Test net output #0: accuracy = 0.84956\nI0821 10:39:00.579576 32299 solver.cpp:404]     Test net output #1: loss = 1.1826 (* 1 = 1.1826 loss)\nI0821 10:39:01.890909 32299 solver.cpp:228] Iteration 6200, loss = 0.0176615\nI0821 10:39:01.890970 32299 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:39:01.890988 32299 solver.cpp:244]     Train net output #1: loss = 0.0176613 (* 1 = 0.0176613 loss)\nI0821 10:39:01.976897 32299 sgd_solver.cpp:166] Iteration 6200, lr = 2.304\nI0821 10:41:19.704123 32299 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 10:42:40.966761 32299 solver.cpp:404]     Test net output #0: accuracy = 0.84572\nI0821 10:42:40.967053 32299 solver.cpp:404]     Test net output #1: loss = 1.25967 (* 1 = 1.25967 loss)\nI0821 10:42:42.278594 32299 solver.cpp:228] Iteration 6300, loss = 0.0285463\nI0821 10:42:42.278636 32299 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:42:42.278653 32299 solver.cpp:244]     Train net output #1: loss = 0.028546 (* 1 = 0.028546 loss)\nI0821 10:42:42.366412 32299 sgd_solver.cpp:166] Iteration 6300, lr = 2.246\nI0821 10:45:00.102843 32299 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 10:46:21.425329 32299 solver.cpp:404]     Test net output #0: accuracy = 0.86112\nI0821 10:46:21.425572 32299 solver.cpp:404]     Test net output #1: loss = 1.02976 (* 1 = 1.02976 loss)\nI0821 10:46:22.736289 32299 solver.cpp:228] Iteration 6400, loss = 3.97586e-05\nI0821 10:46:22.736333 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:46:22.736349 32299 solver.cpp:244]     Train net output #1: loss = 3.95071e-05 (* 1 = 3.95071e-05 loss)\nI0821 10:46:22.821928 32299 sgd_solver.cpp:166] Iteration 6400, lr = 2.188\nI0821 10:48:40.541846 32299 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 10:50:01.841586 32299 solver.cpp:404]     Test net output #0: accuracy = 0.8648\nI0821 10:50:01.841802 32299 solver.cpp:404]     Test net output #1: loss = 1.0602 (* 1 = 1.0602 loss)\nI0821 10:50:03.152338 32299 solver.cpp:228] Iteration 6500, loss = 0.00617512\nI0821 10:50:03.152395 32299 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:50:03.152412 32299 solver.cpp:244]     Train net output #1: loss = 0.00617487 (* 1 = 0.00617487 loss)\nI0821 10:50:03.237411 32299 sgd_solver.cpp:166] Iteration 6500, lr = 2.13\nI0821 10:52:20.961782 32299 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 10:53:42.262485 32299 solver.cpp:404]     Test net output #0: accuracy = 0.866921\nI0821 10:53:42.262743 32299 solver.cpp:404]     Test net output #1: loss = 1.05702 (* 1 = 1.05702 loss)\nI0821 10:53:43.572906 32299 solver.cpp:228] Iteration 6600, loss = 0.00373924\nI0821 10:53:43.572952 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:53:43.572975 32299 solver.cpp:244]     Train net output #1: loss = 0.00373899 (* 1 = 0.00373899 loss)\nI0821 10:53:43.657923 32299 sgd_solver.cpp:166] Iteration 6600, lr = 2.072\nI0821 10:56:01.416486 32299 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 10:57:22.747701 32299 solver.cpp:404]     Test net output #0: accuracy = 0.870281\nI0821 10:57:22.747946 32299 solver.cpp:404]     Test net output #1: loss = 1.03224 (* 1 = 1.03224 loss)\nI0821 10:57:24.057713 32299 solver.cpp:228] Iteration 6700, loss = 7.09408e-05\nI0821 10:57:24.057760 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:57:24.057783 32299 solver.cpp:244]     Train net output #1: loss = 7.06897e-05 (* 1 = 7.06897e-05 loss)\nI0821 10:57:24.146606 32299 sgd_solver.cpp:166] Iteration 6700, lr = 2.014\nI0821 10:59:41.875066 32299 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 11:01:03.187445 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87092\nI0821 11:01:03.187711 32299 solver.cpp:404]     Test net output #1: loss = 1.05402 (* 1 = 1.05402 loss)\nI0821 11:01:04.498870 32299 solver.cpp:228] Iteration 6800, loss = 1.80248e-05\nI0821 11:01:04.498915 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:01:04.498939 32299 solver.cpp:244]     Train net output #1: loss = 1.77753e-05 (* 1 = 1.77753e-05 loss)\nI0821 11:01:04.588462 32299 sgd_solver.cpp:166] Iteration 6800, lr = 1.956\nI0821 11:03:22.410174 32299 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 11:04:43.723121 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87168\nI0821 11:04:43.723361 32299 solver.cpp:404]     Test net output #1: loss = 1.04282 (* 1 = 1.04282 loss)\nI0821 11:04:45.034695 32299 solver.cpp:228] Iteration 6900, loss = 1.32616e-05\nI0821 11:04:45.034759 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:04:45.034785 32299 solver.cpp:244]     Train net output #1: loss = 1.30121e-05 (* 1 = 1.30121e-05 loss)\nI0821 11:04:45.124356 32299 sgd_solver.cpp:166] Iteration 6900, lr = 1.898\nI0821 11:07:02.899363 32299 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 11:08:24.229521 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87068\nI0821 11:08:24.229779 32299 solver.cpp:404]     Test net output #1: loss = 1.06381 (* 1 = 1.06381 loss)\nI0821 11:08:25.541460 32299 solver.cpp:228] Iteration 7000, loss = 2.11814e-05\nI0821 11:08:25.541522 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:08:25.541546 32299 solver.cpp:244]     Train net output #1: loss = 2.09319e-05 (* 1 = 2.09319e-05 loss)\nI0821 11:08:25.625870 32299 sgd_solver.cpp:166] Iteration 7000, lr = 1.84\nI0821 11:10:43.393663 32299 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 11:12:04.715337 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87164\nI0821 11:12:04.715610 32299 solver.cpp:404]     Test net output #1: loss = 1.04889 (* 1 = 1.04889 loss)\nI0821 11:12:06.025308 32299 solver.cpp:228] Iteration 7100, loss = 2.03713e-05\nI0821 11:12:06.025372 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:12:06.025398 32299 solver.cpp:244]     Train net output #1: loss = 2.01218e-05 (* 1 = 2.01218e-05 loss)\nI0821 11:12:06.111719 32299 sgd_solver.cpp:166] Iteration 7100, lr = 1.782\nI0821 11:14:23.868335 32299 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 11:15:45.188362 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87036\nI0821 11:15:45.188578 32299 solver.cpp:404]     Test net output #1: loss = 1.06663 (* 1 = 1.06663 loss)\nI0821 11:15:46.499131 32299 solver.cpp:228] Iteration 7200, loss = 1.60335e-05\nI0821 11:15:46.499179 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:15:46.499202 32299 solver.cpp:244]     Train net output #1: loss = 1.5784e-05 (* 1 = 1.5784e-05 loss)\nI0821 11:15:46.589288 32299 sgd_solver.cpp:166] Iteration 7200, lr = 1.724\nI0821 11:18:04.358736 32299 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 11:19:25.495565 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87176\nI0821 11:19:25.495795 32299 solver.cpp:404]     Test net output #1: loss = 1.05331 (* 1 = 1.05331 loss)\nI0821 11:19:26.805932 32299 solver.cpp:228] Iteration 7300, loss = 2.33865e-05\nI0821 11:19:26.805979 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:19:26.806001 32299 solver.cpp:244]     Train net output #1: loss = 2.31369e-05 (* 1 = 2.31369e-05 loss)\nI0821 11:19:26.895289 32299 sgd_solver.cpp:166] Iteration 7300, lr = 1.666\nI0821 11:21:44.612051 32299 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 11:23:05.930045 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87036\nI0821 11:23:05.930335 32299 solver.cpp:404]     Test net output #1: loss = 1.07161 (* 1 = 1.07161 loss)\nI0821 11:23:07.241658 32299 solver.cpp:228] Iteration 7400, loss = 6.31994e-06\nI0821 11:23:07.241705 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:23:07.241734 32299 solver.cpp:244]     Train net output #1: loss = 6.07041e-06 (* 1 = 6.07041e-06 loss)\nI0821 11:23:07.327127 32299 sgd_solver.cpp:166] Iteration 7400, lr = 1.608\nI0821 11:25:25.056578 32299 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 11:26:46.397544 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87252\nI0821 11:26:46.397799 32299 solver.cpp:404]     Test net output #1: loss = 1.05767 (* 1 = 1.05767 loss)\nI0821 11:26:47.708122 32299 solver.cpp:228] Iteration 7500, loss = 1.7195e-05\nI0821 11:26:47.708168 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:26:47.708192 32299 solver.cpp:244]     Train net output #1: loss = 1.69455e-05 (* 1 = 1.69455e-05 loss)\nI0821 11:26:47.795284 32299 sgd_solver.cpp:166] Iteration 7500, lr = 1.55\nI0821 11:29:05.578824 32299 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 11:30:26.936787 32299 solver.cpp:404]     Test net output #0: accuracy = 0.8708\nI0821 11:30:26.937052 32299 solver.cpp:404]     Test net output #1: loss = 1.07347 (* 1 = 1.07347 loss)\nI0821 11:30:28.248522 32299 solver.cpp:228] Iteration 7600, loss = 2.4481e-05\nI0821 11:30:28.248566 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:30:28.248589 32299 solver.cpp:244]     Train net output #1: loss = 2.42315e-05 (* 1 = 2.42315e-05 loss)\nI0821 11:30:28.333735 32299 sgd_solver.cpp:166] Iteration 7600, lr = 1.492\nI0821 11:32:46.095528 32299 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 11:34:07.469751 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87228\nI0821 11:34:07.470048 32299 solver.cpp:404]     Test net output #1: loss = 1.05925 (* 1 = 1.05925 loss)\nI0821 11:34:08.781688 32299 solver.cpp:228] Iteration 7700, loss = 1.08994e-05\nI0821 11:34:08.781747 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:34:08.781765 32299 solver.cpp:244]     Train net output #1: loss = 1.06499e-05 (* 1 = 1.06499e-05 loss)\nI0821 11:34:08.871098 32299 sgd_solver.cpp:166] Iteration 7700, lr = 1.434\nI0821 11:36:26.694185 32299 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 11:37:48.069018 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87132\nI0821 11:37:48.069326 32299 solver.cpp:404]     Test net output #1: loss = 1.07739 (* 1 = 1.07739 loss)\nI0821 11:37:49.379307 32299 solver.cpp:228] Iteration 7800, loss = 8.53133e-06\nI0821 11:37:49.379369 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:37:49.379385 32299 solver.cpp:244]     Train net output #1: loss = 8.28183e-06 (* 1 = 8.28183e-06 loss)\nI0821 11:37:49.470109 32299 sgd_solver.cpp:166] Iteration 7800, lr = 1.376\nI0821 11:40:07.135326 32299 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 11:41:28.508074 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87272\nI0821 11:41:28.508384 32299 solver.cpp:404]     Test net output #1: loss = 1.06154 (* 1 = 1.06154 loss)\nI0821 11:41:29.819064 32299 solver.cpp:228] Iteration 7900, loss = 1.07184e-05\nI0821 11:41:29.819123 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:41:29.819149 32299 solver.cpp:244]     Train net output #1: loss = 1.04689e-05 (* 1 = 1.04689e-05 loss)\nI0821 11:41:29.906867 32299 sgd_solver.cpp:166] Iteration 7900, lr = 1.318\nI0821 11:43:47.568887 32299 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 11:45:08.982007 32299 solver.cpp:404]     Test net output #0: accuracy = 0.8716\nI0821 11:45:08.982292 32299 solver.cpp:404]     Test net output #1: loss = 1.07663 (* 1 = 1.07663 loss)\nI0821 11:45:10.293258 32299 solver.cpp:228] Iteration 8000, loss = 1.41177e-05\nI0821 11:45:10.293318 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:45:10.293344 32299 solver.cpp:244]     Train net output #1: loss = 1.38682e-05 (* 1 = 1.38682e-05 loss)\nI0821 11:45:10.374094 32299 sgd_solver.cpp:166] Iteration 8000, lr = 1.26\nI0821 11:47:28.035573 32299 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 11:48:49.422425 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87244\nI0821 11:48:49.422741 32299 solver.cpp:404]     Test net output #1: loss = 1.06053 (* 1 = 1.06053 loss)\nI0821 11:48:50.736436 32299 solver.cpp:228] Iteration 8100, loss = 1.89682e-05\nI0821 11:48:50.736681 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:48:50.736802 32299 solver.cpp:244]     Train net output #1: loss = 1.87187e-05 (* 1 = 1.87187e-05 loss)\nI0821 11:48:50.816936 32299 sgd_solver.cpp:166] Iteration 8100, lr = 1.202\nI0821 11:51:08.456195 32299 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 11:52:29.816221 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87164\nI0821 11:52:29.816529 32299 solver.cpp:404]     Test net output #1: loss = 1.07849 (* 1 = 1.07849 loss)\nI0821 11:52:31.127871 32299 solver.cpp:228] Iteration 8200, loss = 7.92383e-06\nI0821 11:52:31.127925 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:52:31.127943 32299 solver.cpp:244]     Train net output #1: loss = 7.67433e-06 (* 1 = 7.67433e-06 loss)\nI0821 11:52:31.213407 32299 sgd_solver.cpp:166] Iteration 8200, lr = 1.144\nI0821 11:54:48.914726 32299 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 11:56:10.272341 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87328\nI0821 11:56:10.272632 32299 solver.cpp:404]     Test net output #1: loss = 1.06315 (* 1 = 1.06315 loss)\nI0821 11:56:11.583278 32299 solver.cpp:228] Iteration 8300, loss = 8.555e-06\nI0821 11:56:11.583333 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:56:11.583350 32299 solver.cpp:244]     Train net output #1: loss = 8.3055e-06 (* 1 = 8.3055e-06 loss)\nI0821 11:56:11.668436 32299 sgd_solver.cpp:166] Iteration 8300, lr = 1.086\nI0821 11:58:29.392393 32299 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 11:59:50.790794 32299 solver.cpp:404]     Test net output #0: accuracy = 0.8716\nI0821 11:59:50.791101 32299 solver.cpp:404]     Test net output #1: loss = 1.07559 (* 1 = 1.07559 loss)\nI0821 11:59:52.101171 32299 solver.cpp:228] Iteration 8400, loss = 7.96409e-06\nI0821 11:59:52.101227 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:59:52.101243 32299 solver.cpp:244]     Train net output #1: loss = 7.71459e-06 (* 1 = 7.71459e-06 loss)\nI0821 11:59:52.192914 32299 sgd_solver.cpp:166] Iteration 8400, lr = 1.028\nI0821 12:02:09.875784 32299 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 12:03:31.248241 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87324\nI0821 12:03:31.248553 32299 solver.cpp:404]     Test net output #1: loss = 1.06418 (* 1 = 1.06418 loss)\nI0821 12:03:32.558900 32299 solver.cpp:228] Iteration 8500, loss = 1.42537e-05\nI0821 12:03:32.558941 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:03:32.558956 32299 solver.cpp:244]     Train net output #1: loss = 1.40042e-05 (* 1 = 1.40042e-05 loss)\nI0821 12:03:32.645922 32299 sgd_solver.cpp:166] Iteration 8500, lr = 0.97\nI0821 12:05:50.339627 32299 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 12:07:11.694434 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87188\nI0821 12:07:11.694721 32299 solver.cpp:404]     Test net output #1: loss = 1.07889 (* 1 = 1.07889 loss)\nI0821 12:07:13.005828 32299 solver.cpp:228] Iteration 8600, loss = 1.09005e-05\nI0821 12:07:13.005882 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:07:13.005898 32299 solver.cpp:244]     Train net output #1: loss = 1.0651e-05 (* 1 = 1.0651e-05 loss)\nI0821 12:07:13.089715 32299 sgd_solver.cpp:166] Iteration 8600, lr = 0.912\nI0821 12:09:30.734123 32299 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 12:10:52.079130 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87308\nI0821 12:10:52.079427 32299 solver.cpp:404]     Test net output #1: loss = 1.0632 (* 1 = 1.0632 loss)\nI0821 12:10:53.389256 32299 solver.cpp:228] Iteration 8700, loss = 1.31433e-05\nI0821 12:10:53.389310 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:10:53.389328 32299 solver.cpp:244]     Train net output #1: loss = 1.28938e-05 (* 1 = 1.28938e-05 loss)\nI0821 12:10:53.477432 32299 sgd_solver.cpp:166] Iteration 8700, lr = 0.854\nI0821 12:13:11.143957 32299 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 12:14:32.522084 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87188\nI0821 12:14:32.522372 32299 solver.cpp:404]     Test net output #1: loss = 1.07805 (* 1 = 1.07805 loss)\nI0821 12:14:33.834277 32299 solver.cpp:228] Iteration 8800, loss = 8.42413e-06\nI0821 12:14:33.834319 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:14:33.834334 32299 solver.cpp:244]     Train net output #1: loss = 8.17463e-06 (* 1 = 8.17463e-06 loss)\nI0821 12:14:33.919517 32299 sgd_solver.cpp:166] Iteration 8800, lr = 0.796\nI0821 12:16:51.561059 32299 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 12:18:12.929199 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87376\nI0821 12:18:12.929512 32299 solver.cpp:404]     Test net output #1: loss = 1.06319 (* 1 = 1.06319 loss)\nI0821 12:18:14.240978 32299 solver.cpp:228] Iteration 8900, loss = 1.7328e-05\nI0821 12:18:14.241022 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:18:14.241037 32299 solver.cpp:244]     Train net output #1: loss = 1.70785e-05 (* 1 = 1.70785e-05 loss)\nI0821 12:18:14.326473 32299 sgd_solver.cpp:166] Iteration 8900, lr = 0.738\nI0821 12:20:32.005683 32299 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 12:21:53.384083 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87196\nI0821 12:21:53.384394 32299 solver.cpp:404]     Test net output #1: loss = 1.07871 (* 1 = 1.07871 loss)\nI0821 12:21:54.695065 32299 solver.cpp:228] Iteration 9000, loss = 6.22795e-06\nI0821 12:21:54.695107 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:21:54.695123 32299 solver.cpp:244]     Train net output #1: loss = 5.97845e-06 (* 1 = 5.97845e-06 loss)\nI0821 12:21:54.779305 32299 sgd_solver.cpp:166] Iteration 9000, lr = 0.68\nI0821 12:24:12.422652 32299 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 12:25:33.775027 32299 solver.cpp:404]     Test net output #0: accuracy = 0.874\nI0821 12:25:33.775300 32299 solver.cpp:404]     Test net output #1: loss = 1.06338 (* 1 = 1.06338 loss)\nI0821 12:25:35.085888 32299 solver.cpp:228] Iteration 9100, loss = 1.34707e-05\nI0821 12:25:35.085948 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:25:35.085966 32299 solver.cpp:244]     Train net output #1: loss = 1.32212e-05 (* 1 = 1.32212e-05 loss)\nI0821 12:25:35.172330 32299 sgd_solver.cpp:166] Iteration 9100, lr = 0.622\nI0821 12:27:52.910327 32299 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 12:29:14.260578 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87232\nI0821 12:29:14.261059 32299 solver.cpp:404]     Test net output #1: loss = 1.08025 (* 1 = 1.08025 loss)\nI0821 12:29:15.572361 32299 solver.cpp:228] Iteration 9200, loss = 1.4596e-05\nI0821 12:29:15.572404 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:29:15.572419 32299 solver.cpp:244]     Train net output #1: loss = 1.43465e-05 (* 1 = 1.43465e-05 loss)\nI0821 12:29:15.663012 32299 sgd_solver.cpp:166] Iteration 9200, lr = 0.564\nI0821 12:31:33.376745 32299 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 12:32:54.708237 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87392\nI0821 12:32:54.708536 32299 solver.cpp:404]     Test net output #1: loss = 1.06164 (* 1 = 1.06164 loss)\nI0821 12:32:56.019593 32299 solver.cpp:228] Iteration 9300, loss = 1.34967e-05\nI0821 12:32:56.019634 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:32:56.019649 32299 solver.cpp:244]     Train net output #1: loss = 1.32472e-05 (* 1 = 1.32472e-05 loss)\nI0821 12:32:56.108388 32299 sgd_solver.cpp:166] Iteration 9300, lr = 0.506\nI0821 12:35:13.747982 32299 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 12:36:35.119537 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87184\nI0821 12:36:35.119828 32299 solver.cpp:404]     Test net output #1: loss = 1.0785 (* 1 = 1.0785 loss)\nI0821 12:36:36.430881 32299 solver.cpp:228] Iteration 9400, loss = 1.10012e-05\nI0821 12:36:36.430922 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:36:36.430938 32299 solver.cpp:244]     Train net output #1: loss = 1.07517e-05 (* 1 = 1.07517e-05 loss)\nI0821 12:36:36.519557 32299 sgd_solver.cpp:166] Iteration 9400, lr = 0.448\nI0821 12:38:54.184989 32299 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 12:40:15.536963 32299 solver.cpp:404]     Test net output #0: accuracy = 0.8732\nI0821 12:40:15.537264 32299 solver.cpp:404]     Test net output #1: loss = 1.0629 (* 1 = 1.0629 loss)\nI0821 12:40:16.848420 32299 solver.cpp:228] Iteration 9500, loss = 1.08498e-05\nI0821 12:40:16.848479 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:40:16.848497 32299 solver.cpp:244]     Train net output #1: loss = 1.06003e-05 (* 1 = 1.06003e-05 loss)\nI0821 12:40:16.937367 32299 sgd_solver.cpp:166] Iteration 9500, lr = 0.39\nI0821 12:42:34.595618 32299 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 12:43:55.941175 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87216\nI0821 12:43:55.941481 32299 solver.cpp:404]     Test net output #1: loss = 1.07989 (* 1 = 1.07989 loss)\nI0821 12:43:57.251442 32299 solver.cpp:228] Iteration 9600, loss = 1.18543e-05\nI0821 12:43:57.251500 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:43:57.251518 32299 solver.cpp:244]     Train net output #1: loss = 1.16048e-05 (* 1 = 1.16048e-05 loss)\nI0821 12:43:57.343041 32299 sgd_solver.cpp:166] Iteration 9600, lr = 0.332\nI0821 12:46:15.100765 32299 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 12:47:36.450233 32299 solver.cpp:404]     Test net output #0: accuracy = 0.8736\nI0821 12:47:36.450516 32299 solver.cpp:404]     Test net output #1: loss = 1.06237 (* 1 = 1.06237 loss)\nI0821 12:47:37.760630 32299 solver.cpp:228] Iteration 9700, loss = 7.91059e-06\nI0821 12:47:37.760673 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:47:37.760687 32299 solver.cpp:244]     Train net output #1: loss = 7.66109e-06 (* 1 = 7.66109e-06 loss)\nI0821 12:47:37.845052 32299 sgd_solver.cpp:166] Iteration 9700, lr = 0.274\nI0821 12:49:55.584285 32299 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 12:51:16.940465 32299 solver.cpp:404]     Test net output #0: accuracy = 0.872\nI0821 12:51:16.940778 32299 solver.cpp:404]     Test net output #1: loss = 1.08028 (* 1 = 1.08028 loss)\nI0821 12:51:18.251933 32299 solver.cpp:228] Iteration 9800, loss = 1.03251e-05\nI0821 12:51:18.251974 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:51:18.251991 32299 solver.cpp:244]     Train net output #1: loss = 1.00756e-05 (* 1 = 1.00756e-05 loss)\nI0821 12:51:18.341641 32299 sgd_solver.cpp:166] Iteration 9800, lr = 0.216\nI0821 12:53:36.072396 32299 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 12:54:57.456650 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87352\nI0821 12:54:57.456961 32299 solver.cpp:404]     Test net output #1: loss = 1.06343 (* 1 = 1.06343 loss)\nI0821 12:54:58.767725 32299 solver.cpp:228] Iteration 9900, loss = 5.86894e-06\nI0821 12:54:58.767783 32299 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:54:58.767808 32299 solver.cpp:244]     Train net output #1: loss = 5.61944e-06 (* 1 = 5.61944e-06 loss)\nI0821 12:54:58.853639 32299 sgd_solver.cpp:166] Iteration 9900, lr = 0.158\nI0821 12:57:16.631351 32299 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/clr3SS5kWD-6Fig11_iter_10000.caffemodel\nI0821 12:57:16.853155 32299 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/clr3SS5kWD-6Fig11_iter_10000.solverstate\nI0821 12:57:17.295766 32299 solver.cpp:317] Iteration 10000, loss = 8.07594e-06\nI0821 12:57:17.295820 32299 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 12:58:38.687479 32299 solver.cpp:404]     Test net output #0: accuracy = 0.87252\nI0821 12:58:38.687800 32299 solver.cpp:404]     Test net output #1: loss = 1.07828 (* 1 = 1.07828 loss)\nI0821 12:58:38.687824 32299 solver.cpp:322] Optimization Done.\nI0821 12:58:44.015338 32299 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35AdaDeltaFig9",
    "content": "I0817 16:29:01.227229 17472 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:29:01.229779 17472 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:29:01.231009 17472 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:29:01.232223 17472 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:29:01.233443 17472 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:29:01.234668 17472 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:29:01.235894 17472 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:29:01.237316 17472 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:29:01.238541 17472 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:29:01.671294 17472 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35AdaDeltaFig9\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\ntype: \"AdaDelta\"\nI0817 16:29:01.674533 17472 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:29:01.686070 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:01.686146 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:01.687211 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:29:01.687265 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:29:01.687286 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:29:01.687306 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:29:01.687325 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:29:01.687343 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:29:01.687361 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:29:01.687379 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:29:01.687398 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:29:01.687417 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:29:01.687435 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:29:01.687453 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:29:01.687471 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:29:01.687489 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:29:01.687510 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:29:01.687526 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:29:01.687544 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:29:01.687561 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:29:01.687580 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:29:01.687598 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:29:01.687634 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:29:01.687654 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:29:01.687678 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:29:01.687697 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:29:01.687716 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:29:01.687731 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:29:01.687749 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:29:01.687767 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:29:01.687784 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:29:01.687810 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:29:01.687834 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:29:01.687851 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:29:01.687870 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:29:01.687885 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:29:01.687904 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:29:01.687922 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:29:01.687942 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:29:01.687958 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:29:01.687976 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:29:01.687994 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:29:01.688019 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:29:01.688035 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:29:01.688053 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:29:01.688069 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:29:01.688088 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:29:01.688107 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:29:01.688123 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:29:01.688139 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:29:01.688158 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:29:01.688174 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:29:01.688191 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:29:01.688218 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:29:01.688239 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:29:01.688257 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:29:01.688275 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:29:01.688292 17472 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:29:01.690037 17472 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\nI0817 16:29:01.692093 17472 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:29:01.693295 17472 net.cpp:100] Creating Layer dataLayer\nI0817 16:29:01.693369 17472 net.cpp:408] dataLayer -> data_top\nI0817 16:29:01.693550 17472 net.cpp:408] dataLayer -> label\nI0817 16:29:01.693665 17472 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:29:01.703145 17477 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:29:01.726948 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:01.734000 17472 net.cpp:150] Setting up dataLayer\nI0817 16:29:01.734067 17472 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:29:01.734081 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:01.734086 17472 net.cpp:165] Memory required for data: 1536500\nI0817 16:29:01.734103 17472 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:29:01.734117 17472 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:29:01.734125 17472 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:29:01.734146 17472 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:29:01.734161 17472 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:29:01.734285 17472 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:29:01.734302 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:01.734308 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:01.734313 17472 net.cpp:165] Memory required for data: 1537500\nI0817 16:29:01.734319 17472 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:29:01.734377 17472 net.cpp:100] Creating Layer pre_conv\nI0817 16:29:01.734390 17472 net.cpp:434] pre_conv <- data_top\nI0817 16:29:01.734402 17472 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:29:01.736255 17472 net.cpp:150] Setting up pre_conv\nI0817 16:29:01.736277 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.736284 17472 net.cpp:165] Memory required for data: 9729500\nI0817 16:29:01.736343 17472 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:29:01.736407 17472 net.cpp:100] Creating Layer pre_bn\nI0817 16:29:01.736418 17472 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:29:01.736430 17472 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:29:01.736953 17472 net.cpp:150] Setting up pre_bn\nI0817 16:29:01.736970 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.736986 17472 net.cpp:165] Memory required for data: 17921500\nI0817 16:29:01.737005 17472 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:29:01.737058 17472 net.cpp:100] Creating Layer pre_scale\nI0817 16:29:01.737069 17472 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:29:01.737079 17472 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:29:01.737267 17472 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:29:01.737365 17478 blocking_queue.cpp:50] Waiting for data\nI0817 16:29:01.737529 17472 net.cpp:150] Setting up pre_scale\nI0817 16:29:01.737546 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.737552 17472 net.cpp:165] Memory required for data: 26113500\nI0817 16:29:01.737563 17472 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:29:01.737608 17472 net.cpp:100] Creating Layer pre_relu\nI0817 16:29:01.737617 17472 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:29:01.737627 17472 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:29:01.737637 17472 net.cpp:150] Setting up pre_relu\nI0817 16:29:01.737644 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.737649 17472 net.cpp:165] Memory required for data: 34305500\nI0817 16:29:01.737654 17472 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:29:01.737664 17472 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:29:01.737669 17472 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:29:01.737678 17472 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:29:01.737687 17472 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:29:01.737735 17472 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:29:01.737746 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.737752 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.737757 17472 net.cpp:165] Memory required for data: 50689500\nI0817 16:29:01.737762 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:29:01.737777 17472 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:29:01.737783 17472 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:29:01.737792 17472 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:29:01.738099 17472 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:29:01.738113 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.738118 17472 net.cpp:165] Memory required for data: 58881500\nI0817 16:29:01.738131 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:29:01.738147 17472 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:29:01.738154 17472 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:29:01.738163 17472 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:29:01.738384 17472 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:29:01.738397 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.738402 17472 net.cpp:165] Memory required for data: 67073500\nI0817 16:29:01.738414 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:29:01.738422 17472 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:29:01.738428 17472 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:29:01.738441 17472 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.738490 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:29:01.738622 17472 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:29:01.738638 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.738643 17472 net.cpp:165] Memory required for data: 75265500\nI0817 16:29:01.738653 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:29:01.738669 17472 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:29:01.738677 17472 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:29:01.738683 17472 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.738693 17472 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:29:01.738700 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.738704 17472 net.cpp:165] Memory required for data: 83457500\nI0817 16:29:01.738709 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:29:01.738725 17472 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:29:01.738731 17472 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:29:01.738744 17472 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:29:01.739071 17472 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:29:01.739085 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.739090 17472 net.cpp:165] Memory required for data: 91649500\nI0817 16:29:01.739099 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:29:01.739112 17472 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:29:01.739118 17472 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:29:01.739130 17472 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:29:01.739356 17472 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:29:01.739368 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.739373 17472 net.cpp:165] Memory required for data: 99841500\nI0817 16:29:01.739388 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:29:01.739398 17472 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:29:01.739403 17472 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:29:01.739413 17472 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:29:01.739470 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:29:01.739603 17472 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:29:01.739619 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.739624 17472 net.cpp:165] Memory required for data: 108033500\nI0817 16:29:01.739632 17472 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:29:01.739681 17472 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:29:01.739692 17472 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:29:01.739701 17472 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:29:01.739708 17472 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:29:01.739779 17472 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:29:01.739794 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.739805 17472 net.cpp:165] Memory required for data: 116225500\nI0817 16:29:01.739811 17472 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:29:01.739822 17472 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:29:01.739828 17472 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:29:01.739835 17472 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:29:01.739845 17472 net.cpp:150] Setting up L1_b1_relu\nI0817 16:29:01.739852 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.739857 17472 net.cpp:165] Memory required for data: 124417500\nI0817 16:29:01.739862 17472 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:01.739873 17472 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:01.739879 17472 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:29:01.739886 17472 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:29:01.739895 17472 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:29:01.739939 17472 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:01.739950 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.739958 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.739969 17472 net.cpp:165] Memory required for data: 140801500\nI0817 16:29:01.739975 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:29:01.739987 17472 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:29:01.739994 17472 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:29:01.740005 17472 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:29:01.740310 17472 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:29:01.740326 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.740331 17472 net.cpp:165] Memory required for data: 148993500\nI0817 16:29:01.740340 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:29:01.740350 17472 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:29:01.740355 17472 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:29:01.740362 17472 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:29:01.740602 17472 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:29:01.740614 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.740619 17472 net.cpp:165] Memory required for data: 157185500\nI0817 16:29:01.740630 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:29:01.740643 17472 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:29:01.740648 17472 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:29:01.740655 17472 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.740711 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:29:01.740855 17472 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:29:01.740869 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.740875 17472 net.cpp:165] Memory required for data: 165377500\nI0817 16:29:01.740883 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:29:01.740892 17472 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:29:01.740897 17472 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:29:01.740907 17472 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.740917 17472 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:29:01.740924 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.740929 17472 net.cpp:165] Memory required for data: 173569500\nI0817 16:29:01.740934 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:29:01.740947 17472 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:29:01.740953 17472 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:29:01.740962 17472 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:29:01.741256 17472 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:29:01.741271 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.741276 17472 net.cpp:165] Memory required for data: 181761500\nI0817 16:29:01.741284 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:29:01.741295 17472 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:29:01.741302 17472 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:29:01.741309 17472 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:29:01.741539 17472 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:29:01.741552 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.741557 17472 net.cpp:165] Memory required for data: 189953500\nI0817 16:29:01.741575 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:29:01.741587 17472 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:29:01.741593 17472 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:29:01.741600 17472 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:29:01.741652 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:29:01.741794 17472 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:29:01.741813 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.741819 17472 net.cpp:165] Memory required for data: 198145500\nI0817 16:29:01.741828 17472 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:29:01.741848 17472 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:29:01.741854 17472 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:29:01.741861 17472 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:29:01.741873 17472 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:29:01.741904 17472 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:29:01.741915 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.741920 17472 net.cpp:165] Memory required for data: 206337500\nI0817 16:29:01.741925 17472 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:29:01.741935 17472 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:29:01.741941 17472 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:29:01.741948 17472 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:29:01.741957 17472 net.cpp:150] Setting up L1_b2_relu\nI0817 16:29:01.741964 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.741968 17472 net.cpp:165] Memory required for data: 214529500\nI0817 16:29:01.741973 17472 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:01.741981 17472 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:01.741986 17472 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:29:01.741992 17472 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:29:01.742002 17472 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:29:01.742044 17472 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:01.742056 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.742063 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.742067 17472 net.cpp:165] Memory required for data: 230913500\nI0817 16:29:01.742072 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:29:01.742085 17472 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:29:01.742092 17472 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:29:01.742100 17472 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:29:01.742403 17472 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:29:01.742416 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.742421 17472 net.cpp:165] Memory required for data: 239105500\nI0817 16:29:01.742429 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:29:01.742441 17472 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:29:01.742447 17472 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:29:01.742455 17472 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:29:01.742688 17472 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:29:01.742702 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.742707 17472 net.cpp:165] Memory required for data: 247297500\nI0817 16:29:01.742717 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:29:01.742724 17472 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:29:01.742730 17472 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:29:01.742738 17472 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.742791 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:29:01.742938 17472 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:29:01.742950 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.742956 17472 net.cpp:165] Memory required for data: 255489500\nI0817 16:29:01.742964 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:29:01.742972 17472 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:29:01.742979 17472 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:29:01.742990 17472 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.742998 17472 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:29:01.743015 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.743019 17472 net.cpp:165] Memory required for data: 263681500\nI0817 16:29:01.743024 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:29:01.743037 17472 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:29:01.743043 17472 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:29:01.743052 17472 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:29:01.743352 17472 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:29:01.743366 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.743371 17472 net.cpp:165] Memory required for data: 271873500\nI0817 16:29:01.743379 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:29:01.743394 17472 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:29:01.743401 17472 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:29:01.743408 17472 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:29:01.743643 17472 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:29:01.743656 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.743661 17472 net.cpp:165] Memory required for data: 280065500\nI0817 16:29:01.743671 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:29:01.743680 17472 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:29:01.743685 17472 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:29:01.743692 17472 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:29:01.743746 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:29:01.743896 17472 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:29:01.743911 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.743916 17472 net.cpp:165] Memory required for data: 288257500\nI0817 16:29:01.743924 17472 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:29:01.743933 17472 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:29:01.743939 17472 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:29:01.743947 17472 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:29:01.743957 17472 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:29:01.743988 17472 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:29:01.743996 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.744000 17472 net.cpp:165] Memory required for data: 296449500\nI0817 16:29:01.744006 17472 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:29:01.744016 17472 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:29:01.744022 17472 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:29:01.744029 17472 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:29:01.744038 17472 net.cpp:150] Setting up L1_b3_relu\nI0817 16:29:01.744045 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.744050 17472 net.cpp:165] Memory required for data: 304641500\nI0817 16:29:01.744055 17472 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:01.744061 17472 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:01.744066 17472 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:29:01.744073 17472 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:29:01.744083 17472 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:29:01.744128 17472 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:01.744140 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.744146 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.744150 17472 net.cpp:165] Memory required for data: 321025500\nI0817 16:29:01.744155 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:29:01.744169 17472 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:29:01.744175 17472 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:29:01.744190 17472 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:29:01.744493 17472 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:29:01.744508 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.744513 17472 net.cpp:165] Memory required for data: 329217500\nI0817 16:29:01.744521 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:29:01.744534 17472 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:29:01.744539 17472 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:29:01.744550 17472 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:29:01.744786 17472 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:29:01.744804 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.744810 17472 net.cpp:165] Memory required for data: 337409500\nI0817 16:29:01.744822 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:29:01.744829 17472 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:29:01.744835 17472 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:29:01.744843 17472 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.744899 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:29:01.745038 17472 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:29:01.745050 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.745055 17472 net.cpp:165] Memory required for data: 345601500\nI0817 16:29:01.745065 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:29:01.745074 17472 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:29:01.745079 17472 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:29:01.745090 17472 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.745098 17472 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:29:01.745105 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.745110 17472 net.cpp:165] Memory required for data: 353793500\nI0817 16:29:01.745115 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:29:01.745128 17472 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:29:01.745134 17472 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:29:01.745146 17472 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:29:01.745455 17472 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:29:01.745468 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.745473 17472 net.cpp:165] Memory required for data: 361985500\nI0817 16:29:01.745481 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:29:01.745493 17472 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:29:01.745499 17472 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:29:01.745507 17472 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:29:01.745748 17472 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:29:01.745760 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.745765 17472 net.cpp:165] Memory required for data: 370177500\nI0817 16:29:01.745776 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:29:01.745784 17472 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:29:01.745790 17472 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:29:01.745797 17472 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:29:01.745859 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:29:01.745996 17472 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:29:01.746009 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.746014 17472 net.cpp:165] Memory required for data: 378369500\nI0817 16:29:01.746023 17472 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:29:01.746035 17472 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:29:01.746042 17472 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:29:01.746048 17472 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:29:01.746055 17472 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:29:01.746098 17472 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:29:01.746107 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.746112 17472 net.cpp:165] Memory required for data: 386561500\nI0817 16:29:01.746117 17472 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:29:01.746125 17472 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:29:01.746130 17472 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:29:01.746142 17472 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:29:01.746151 17472 net.cpp:150] Setting up L1_b4_relu\nI0817 16:29:01.746158 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.746163 17472 net.cpp:165] Memory required for data: 394753500\nI0817 16:29:01.746167 17472 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:01.746176 17472 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:01.746181 17472 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:29:01.746191 17472 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:29:01.746201 17472 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:29:01.746240 17472 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:01.746253 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.746259 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.746263 17472 net.cpp:165] Memory required for data: 411137500\nI0817 16:29:01.746268 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:29:01.746282 17472 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:29:01.746289 17472 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:29:01.746297 17472 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:29:01.746604 17472 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:29:01.746618 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.746623 17472 net.cpp:165] Memory required for data: 419329500\nI0817 16:29:01.746645 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:29:01.746657 17472 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:29:01.746664 17472 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:29:01.746671 17472 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:29:01.746914 17472 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:29:01.746928 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.746933 17472 net.cpp:165] Memory required for data: 427521500\nI0817 16:29:01.746944 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:29:01.746953 17472 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:29:01.746958 17472 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:29:01.746966 17472 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.747020 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:29:01.747158 17472 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:29:01.747169 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.747174 17472 net.cpp:165] Memory required for data: 435713500\nI0817 16:29:01.747184 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:29:01.747195 17472 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:29:01.747200 17472 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:29:01.747207 17472 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.747217 17472 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:29:01.747225 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.747228 17472 net.cpp:165] Memory required for data: 443905500\nI0817 16:29:01.747233 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:29:01.747246 17472 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:29:01.747259 17472 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:29:01.747272 17472 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:29:01.747591 17472 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:29:01.747604 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.747609 17472 net.cpp:165] Memory required for data: 452097500\nI0817 16:29:01.747618 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:29:01.747630 17472 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:29:01.747637 17472 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:29:01.747644 17472 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:29:01.747889 17472 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:29:01.747902 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.747907 17472 net.cpp:165] Memory required for data: 460289500\nI0817 16:29:01.747917 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:29:01.747926 17472 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:29:01.747932 17472 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:29:01.747939 17472 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:29:01.747993 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:29:01.748129 17472 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:29:01.748142 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.748147 17472 net.cpp:165] Memory required for data: 468481500\nI0817 16:29:01.748157 17472 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:29:01.748165 17472 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:29:01.748172 17472 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:29:01.748178 17472 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:29:01.748188 17472 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:29:01.748219 17472 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:29:01.748227 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.748232 17472 net.cpp:165] Memory required for data: 476673500\nI0817 16:29:01.748237 17472 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:29:01.748248 17472 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:29:01.748253 17472 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:29:01.748261 17472 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:29:01.748270 17472 net.cpp:150] Setting up L1_b5_relu\nI0817 16:29:01.748277 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.748281 17472 net.cpp:165] Memory required for data: 484865500\nI0817 16:29:01.748286 17472 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:01.748293 17472 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:01.748298 17472 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:29:01.748306 17472 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:29:01.748314 17472 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:29:01.748358 17472 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:01.748370 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.748378 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.748381 17472 net.cpp:165] Memory required for data: 501249500\nI0817 16:29:01.748386 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:29:01.748400 17472 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:29:01.748406 17472 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:29:01.748415 17472 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:29:01.748718 17472 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:29:01.748733 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.748738 17472 net.cpp:165] Memory required for data: 509441500\nI0817 16:29:01.748754 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:29:01.748765 17472 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:29:01.748771 17472 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:29:01.748781 17472 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:29:01.749025 17472 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:29:01.749039 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.749044 17472 net.cpp:165] Memory required for data: 517633500\nI0817 16:29:01.749054 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:29:01.749063 17472 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:29:01.749068 17472 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:29:01.749076 17472 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.749130 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:29:01.749269 17472 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:29:01.749282 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.749287 17472 net.cpp:165] Memory required for data: 525825500\nI0817 16:29:01.749296 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:29:01.749305 17472 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:29:01.749310 17472 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:29:01.749320 17472 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.749330 17472 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:29:01.749337 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.749342 17472 net.cpp:165] Memory required for data: 534017500\nI0817 16:29:01.749346 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:29:01.749361 17472 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:29:01.749366 17472 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:29:01.749377 17472 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:29:01.749686 17472 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:29:01.749701 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.749706 17472 net.cpp:165] Memory required for data: 542209500\nI0817 16:29:01.749714 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:29:01.749725 17472 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:29:01.749732 17472 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:29:01.749740 17472 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:29:01.749985 17472 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:29:01.750000 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.750005 17472 net.cpp:165] Memory required for data: 550401500\nI0817 16:29:01.750015 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:29:01.750022 17472 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:29:01.750028 17472 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:29:01.750036 17472 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:29:01.750089 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:29:01.750231 17472 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:29:01.750243 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.750248 17472 net.cpp:165] Memory required for data: 558593500\nI0817 16:29:01.750257 17472 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:29:01.750274 17472 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:29:01.750280 17472 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:29:01.750288 17472 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:29:01.750298 17472 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:29:01.750329 17472 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:29:01.750340 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.750345 17472 net.cpp:165] Memory required for data: 566785500\nI0817 16:29:01.750350 17472 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:29:01.750366 17472 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:29:01.750373 17472 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:29:01.750380 17472 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:29:01.750389 17472 net.cpp:150] Setting up L1_b6_relu\nI0817 16:29:01.750396 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.750401 17472 net.cpp:165] Memory required for data: 574977500\nI0817 16:29:01.750406 17472 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:01.750416 17472 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:01.750422 17472 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:29:01.750428 17472 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:29:01.750438 17472 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:29:01.750480 17472 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:01.750494 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.750501 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.750506 17472 net.cpp:165] Memory required for data: 591361500\nI0817 16:29:01.750510 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:29:01.750521 17472 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:29:01.750527 17472 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:29:01.750536 17472 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:29:01.750851 17472 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:29:01.750865 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.750870 17472 net.cpp:165] Memory required for data: 599553500\nI0817 16:29:01.750880 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:29:01.750891 17472 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:29:01.750897 17472 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:29:01.750905 17472 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:29:01.751142 17472 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:29:01.751155 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.751160 17472 net.cpp:165] Memory required for data: 607745500\nI0817 16:29:01.751171 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:29:01.751179 17472 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:29:01.751185 17472 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:29:01.751196 17472 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.751248 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:29:01.751386 17472 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:29:01.751399 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.751405 17472 net.cpp:165] Memory required for data: 615937500\nI0817 16:29:01.751413 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:29:01.751421 17472 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:29:01.751427 17472 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:29:01.751435 17472 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.751444 17472 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:29:01.751451 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.751456 17472 net.cpp:165] Memory required for data: 624129500\nI0817 16:29:01.751461 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:29:01.751474 17472 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:29:01.751480 17472 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:29:01.751493 17472 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:29:01.751808 17472 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:29:01.751822 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.751828 17472 net.cpp:165] Memory required for data: 632321500\nI0817 16:29:01.751844 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:29:01.751857 17472 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:29:01.751863 17472 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:29:01.751873 17472 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:29:01.752106 17472 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:29:01.752120 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.752125 17472 net.cpp:165] Memory required for data: 640513500\nI0817 16:29:01.752135 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:29:01.752143 17472 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:29:01.752149 17472 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:29:01.752159 17472 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:29:01.752213 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:29:01.752347 17472 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:29:01.752363 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.752368 17472 net.cpp:165] Memory required for data: 648705500\nI0817 16:29:01.752377 17472 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:29:01.752388 17472 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:29:01.752393 17472 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:29:01.752399 17472 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:29:01.752408 17472 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:29:01.752440 17472 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:29:01.752450 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.752455 17472 net.cpp:165] Memory required for data: 656897500\nI0817 16:29:01.752460 17472 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:29:01.752467 17472 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:29:01.752476 17472 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:29:01.752483 17472 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:29:01.752492 17472 net.cpp:150] Setting up L1_b7_relu\nI0817 16:29:01.752499 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.752504 17472 net.cpp:165] Memory required for data: 665089500\nI0817 16:29:01.752508 17472 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:01.752516 17472 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:01.752521 17472 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:29:01.752530 17472 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:29:01.752540 17472 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:29:01.752580 17472 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:01.752593 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.752599 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.752604 17472 net.cpp:165] Memory required for data: 681473500\nI0817 16:29:01.752609 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:29:01.752621 17472 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:29:01.752629 17472 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:29:01.752637 17472 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:29:01.752961 17472 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:29:01.752975 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.752980 17472 net.cpp:165] Memory required for data: 689665500\nI0817 16:29:01.752990 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:29:01.753000 17472 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:29:01.753007 17472 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:29:01.753015 17472 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:29:01.753262 17472 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:29:01.753276 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.753281 17472 net.cpp:165] Memory required for data: 697857500\nI0817 16:29:01.753291 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:29:01.753300 17472 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:29:01.753306 17472 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:29:01.753316 17472 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.753370 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:29:01.753506 17472 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:29:01.753523 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.753530 17472 net.cpp:165] Memory required for data: 706049500\nI0817 16:29:01.753538 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:29:01.753546 17472 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:29:01.753552 17472 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:29:01.753559 17472 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.753569 17472 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:29:01.753576 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.753581 17472 net.cpp:165] Memory required for data: 714241500\nI0817 16:29:01.753585 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:29:01.753599 17472 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:29:01.753605 17472 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:29:01.753617 17472 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:29:01.753938 17472 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:29:01.753953 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.753958 17472 net.cpp:165] Memory required for data: 722433500\nI0817 16:29:01.753967 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:29:01.753978 17472 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:29:01.753984 17472 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:29:01.753995 17472 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:29:01.754238 17472 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:29:01.754251 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.754256 17472 net.cpp:165] Memory required for data: 730625500\nI0817 16:29:01.754266 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:29:01.754276 17472 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:29:01.754281 17472 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:29:01.754288 17472 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:29:01.754343 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:29:01.754484 17472 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:29:01.754498 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.754503 17472 net.cpp:165] Memory required for data: 738817500\nI0817 16:29:01.754511 17472 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:29:01.754523 17472 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:29:01.754529 17472 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:29:01.754536 17472 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:29:01.754544 17472 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:29:01.754580 17472 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:29:01.754591 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.754596 17472 net.cpp:165] Memory required for data: 747009500\nI0817 16:29:01.754601 17472 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:29:01.754609 17472 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:29:01.754614 17472 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:29:01.754624 17472 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:29:01.754634 17472 net.cpp:150] Setting up L1_b8_relu\nI0817 16:29:01.754647 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.754653 17472 net.cpp:165] Memory required for data: 755201500\nI0817 16:29:01.754657 17472 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:01.754665 17472 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:01.754670 17472 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:29:01.754680 17472 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:29:01.754691 17472 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:29:01.754734 17472 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:01.754746 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.754753 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.754757 17472 net.cpp:165] Memory required for data: 771585500\nI0817 16:29:01.754762 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:29:01.754777 17472 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:29:01.754783 17472 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:29:01.754792 17472 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:29:01.755125 17472 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:29:01.755142 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.755147 17472 net.cpp:165] Memory required for data: 779777500\nI0817 16:29:01.755156 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:29:01.755165 17472 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:29:01.755172 17472 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:29:01.755182 17472 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:29:01.755424 17472 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:29:01.755437 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.755442 17472 net.cpp:165] Memory required for data: 787969500\nI0817 16:29:01.755453 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:29:01.755465 17472 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:29:01.755470 17472 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:29:01.755478 17472 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.755532 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:29:01.755674 17472 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:29:01.755687 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.755692 17472 net.cpp:165] Memory required for data: 796161500\nI0817 16:29:01.755702 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:29:01.755712 17472 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:29:01.755719 17472 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:29:01.755726 17472 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.755736 17472 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:29:01.755743 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.755748 17472 net.cpp:165] Memory required for data: 804353500\nI0817 16:29:01.755753 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:29:01.755766 17472 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:29:01.755772 17472 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:29:01.755784 17472 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:29:01.756109 17472 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:29:01.756124 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.756129 17472 net.cpp:165] Memory required for data: 812545500\nI0817 16:29:01.756137 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:29:01.756147 17472 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:29:01.756153 17472 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:29:01.756161 17472 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:29:01.756414 17472 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:29:01.756428 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.756433 17472 net.cpp:165] Memory required for data: 820737500\nI0817 16:29:01.756465 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:29:01.756476 17472 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:29:01.756482 17472 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:29:01.756492 17472 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:29:01.756544 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:29:01.756685 17472 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:29:01.756698 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.756703 17472 net.cpp:165] Memory required for data: 828929500\nI0817 16:29:01.756713 17472 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:29:01.756722 17472 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:29:01.756728 17472 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:29:01.756736 17472 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:29:01.756742 17472 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:29:01.756777 17472 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:29:01.756786 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.756790 17472 net.cpp:165] Memory required for data: 837121500\nI0817 16:29:01.756795 17472 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:29:01.756811 17472 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:29:01.756819 17472 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:29:01.756825 17472 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:29:01.756835 17472 net.cpp:150] Setting up L1_b9_relu\nI0817 16:29:01.756844 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.756847 17472 net.cpp:165] Memory required for data: 845313500\nI0817 16:29:01.756852 17472 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:01.756862 17472 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:01.756868 17472 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:29:01.756876 17472 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:29:01.756886 17472 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:29:01.756929 17472 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:01.756943 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.756950 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.756955 17472 net.cpp:165] Memory required for data: 861697500\nI0817 16:29:01.756960 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:29:01.756971 17472 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:29:01.756978 17472 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:29:01.756986 17472 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:29:01.757303 17472 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:29:01.757318 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.757321 17472 net.cpp:165] Memory required for data: 863745500\nI0817 16:29:01.757330 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:29:01.757342 17472 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:29:01.757349 17472 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:29:01.757356 17472 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:29:01.757593 17472 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:29:01.757606 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.757611 17472 net.cpp:165] Memory required for data: 865793500\nI0817 16:29:01.757622 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:29:01.757630 17472 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:29:01.757643 17472 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:29:01.757657 17472 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.757710 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:29:01.757858 17472 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:29:01.757871 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.757876 17472 net.cpp:165] Memory required for data: 867841500\nI0817 16:29:01.757885 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:29:01.757894 17472 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:29:01.757900 17472 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:29:01.757910 17472 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.757920 17472 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:29:01.757927 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.757932 17472 net.cpp:165] Memory required for data: 869889500\nI0817 16:29:01.757937 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:29:01.757951 17472 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:29:01.757956 17472 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:29:01.757966 17472 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:29:01.758283 17472 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:29:01.758296 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.758301 17472 net.cpp:165] Memory required for data: 871937500\nI0817 16:29:01.758311 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:29:01.758323 17472 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:29:01.758330 17472 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:29:01.758338 17472 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:29:01.758575 17472 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:29:01.758589 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.758592 17472 net.cpp:165] Memory required for data: 873985500\nI0817 16:29:01.758604 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:29:01.758612 17472 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:29:01.758618 17472 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:29:01.758628 17472 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:29:01.758682 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:29:01.758828 17472 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:29:01.758842 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.758847 17472 net.cpp:165] Memory required for data: 876033500\nI0817 16:29:01.758857 17472 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:29:01.758867 17472 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:29:01.758872 17472 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:29:01.758888 17472 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:29:01.758970 17472 net.cpp:150] Setting up L2_b1_pool\nI0817 16:29:01.758985 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.758990 17472 net.cpp:165] Memory required for data: 878081500\nI0817 16:29:01.758996 17472 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:29:01.759008 17472 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:29:01.759014 17472 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:29:01.759021 17472 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:29:01.759029 17472 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:29:01.759063 17472 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:29:01.759076 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.759081 17472 net.cpp:165] Memory required for data: 880129500\nI0817 16:29:01.759086 17472 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:29:01.759093 17472 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:29:01.759099 17472 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:29:01.759114 17472 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:29:01.759124 17472 net.cpp:150] Setting up L2_b1_relu\nI0817 16:29:01.759131 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.759136 17472 net.cpp:165] Memory required for data: 882177500\nI0817 16:29:01.759140 17472 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:29:01.759186 17472 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:29:01.759203 17472 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:29:01.761540 17472 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:29:01.761559 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.761564 17472 net.cpp:165] Memory required for data: 884225500\nI0817 16:29:01.761570 17472 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:29:01.761584 17472 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:29:01.761590 17472 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:29:01.761597 17472 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:29:01.761605 17472 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:29:01.761683 17472 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:29:01.761698 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.761703 17472 net.cpp:165] Memory required for data: 888321500\nI0817 16:29:01.761709 17472 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:01.761718 17472 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:01.761723 17472 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:29:01.761734 17472 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:29:01.761745 17472 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:29:01.761793 17472 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:01.761813 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.761821 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.761826 17472 net.cpp:165] Memory required for data: 896513500\nI0817 16:29:01.761831 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:29:01.761843 17472 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:29:01.761849 17472 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:29:01.761862 17472 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:29:01.763301 17472 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:29:01.763319 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.763324 17472 net.cpp:165] Memory required for data: 900609500\nI0817 16:29:01.763334 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:29:01.763346 17472 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:29:01.763352 17472 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:29:01.763361 17472 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:29:01.763609 17472 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:29:01.763623 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.763628 17472 net.cpp:165] Memory required for data: 904705500\nI0817 16:29:01.763639 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:29:01.763648 17472 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:29:01.763654 17472 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:29:01.763665 17472 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.763720 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:29:01.763875 17472 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:29:01.763888 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.763893 17472 net.cpp:165] Memory required for data: 908801500\nI0817 16:29:01.763903 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:29:01.763911 17472 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:29:01.763917 17472 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:29:01.763936 17472 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.763947 17472 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:29:01.763953 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.763958 17472 net.cpp:165] Memory required for data: 912897500\nI0817 16:29:01.763963 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:29:01.763978 17472 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:29:01.763983 17472 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:29:01.763991 17472 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:29:01.764442 17472 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:29:01.764456 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.764461 17472 net.cpp:165] Memory required for data: 916993500\nI0817 16:29:01.764470 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:29:01.764482 17472 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:29:01.764489 17472 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:29:01.764497 17472 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:29:01.764735 17472 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:29:01.764749 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.764753 17472 net.cpp:165] Memory required for data: 921089500\nI0817 16:29:01.764763 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:29:01.764772 17472 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:29:01.764778 17472 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:29:01.764789 17472 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:29:01.764849 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:29:01.764992 17472 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:29:01.765005 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.765010 17472 net.cpp:165] Memory required for data: 925185500\nI0817 16:29:01.765019 17472 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:29:01.765029 17472 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:29:01.765035 17472 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:29:01.765043 17472 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:29:01.765055 17472 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:29:01.765082 17472 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:29:01.765091 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.765095 17472 net.cpp:165] Memory required for data: 929281500\nI0817 16:29:01.765101 17472 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:29:01.765111 17472 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:29:01.765118 17472 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:29:01.765125 17472 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:29:01.765135 17472 net.cpp:150] Setting up L2_b2_relu\nI0817 16:29:01.765141 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.765146 17472 net.cpp:165] Memory required for data: 933377500\nI0817 16:29:01.765151 17472 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:01.765161 17472 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:01.765166 17472 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:29:01.765173 17472 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:29:01.765182 17472 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:29:01.765224 17472 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:01.765239 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.765245 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.765250 17472 net.cpp:165] Memory required for data: 941569500\nI0817 16:29:01.765255 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:29:01.765274 17472 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:29:01.765280 17472 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:29:01.765290 17472 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:29:01.765753 17472 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:29:01.765766 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.765771 17472 net.cpp:165] Memory required for data: 945665500\nI0817 16:29:01.765780 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:29:01.765792 17472 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:29:01.765805 17472 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:29:01.765813 17472 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:29:01.766054 17472 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:29:01.766067 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.766072 17472 net.cpp:165] Memory required for data: 949761500\nI0817 16:29:01.766083 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:29:01.766091 17472 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:29:01.766098 17472 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:29:01.766108 17472 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.766161 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:29:01.766305 17472 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:29:01.766319 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.766324 17472 net.cpp:165] Memory required for data: 953857500\nI0817 16:29:01.766332 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:29:01.766340 17472 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:29:01.766346 17472 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:29:01.766357 17472 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.766367 17472 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:29:01.766374 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.766378 17472 net.cpp:165] Memory required for data: 957953500\nI0817 16:29:01.766383 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:29:01.766396 17472 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:29:01.766402 17472 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:29:01.766412 17472 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:29:01.766875 17472 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:29:01.766890 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.766894 17472 net.cpp:165] Memory required for data: 962049500\nI0817 16:29:01.766903 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:29:01.766917 17472 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:29:01.766924 17472 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:29:01.766932 17472 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:29:01.767174 17472 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:29:01.767186 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.767191 17472 net.cpp:165] Memory required for data: 966145500\nI0817 16:29:01.767202 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:29:01.767210 17472 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:29:01.767216 17472 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:29:01.767225 17472 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:29:01.767280 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:29:01.767427 17472 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:29:01.767442 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.767447 17472 net.cpp:165] Memory required for data: 970241500\nI0817 16:29:01.767457 17472 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:29:01.767465 17472 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:29:01.767472 17472 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:29:01.767485 17472 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:29:01.767496 17472 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:29:01.767525 17472 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:29:01.767534 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.767539 17472 net.cpp:165] Memory required for data: 974337500\nI0817 16:29:01.767544 17472 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:29:01.767568 17472 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:29:01.767575 17472 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:29:01.767582 17472 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:29:01.767591 17472 net.cpp:150] Setting up L2_b3_relu\nI0817 16:29:01.767598 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.767603 17472 net.cpp:165] Memory required for data: 978433500\nI0817 16:29:01.767608 17472 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:01.767616 17472 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:01.767621 17472 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:29:01.767627 17472 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:29:01.767637 17472 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:29:01.767685 17472 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:01.767696 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.767704 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.767707 17472 net.cpp:165] Memory required for data: 986625500\nI0817 16:29:01.767714 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:29:01.767726 17472 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:29:01.767734 17472 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:29:01.767745 17472 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:29:01.768208 17472 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:29:01.768223 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.768227 17472 net.cpp:165] Memory required for data: 990721500\nI0817 16:29:01.768235 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:29:01.768249 17472 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:29:01.768254 17472 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:29:01.768265 17472 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:29:01.768507 17472 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:29:01.768520 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.768525 17472 net.cpp:165] Memory required for data: 994817500\nI0817 16:29:01.768535 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:29:01.768544 17472 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:29:01.768550 17472 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:29:01.768558 17472 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.768613 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:29:01.768754 17472 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:29:01.768765 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.768770 17472 net.cpp:165] Memory required for data: 998913500\nI0817 16:29:01.768779 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:29:01.768790 17472 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:29:01.768796 17472 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:29:01.768810 17472 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.768821 17472 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:29:01.768827 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.768832 17472 net.cpp:165] Memory required for data: 1003009500\nI0817 16:29:01.768837 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:29:01.768858 17472 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:29:01.768864 17472 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:29:01.768875 17472 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:29:01.769331 17472 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:29:01.769345 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.769351 17472 net.cpp:165] Memory required for data: 1007105500\nI0817 16:29:01.769358 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:29:01.769374 17472 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:29:01.769381 17472 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:29:01.769392 17472 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:29:01.769629 17472 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:29:01.769644 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.769650 17472 net.cpp:165] Memory required for data: 1011201500\nI0817 16:29:01.769660 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:29:01.769670 17472 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:29:01.769675 17472 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:29:01.769683 17472 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:29:01.769737 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:29:01.769887 17472 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:29:01.769901 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.769906 17472 net.cpp:165] Memory required for data: 1015297500\nI0817 16:29:01.769914 17472 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:29:01.769923 17472 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:29:01.769929 17472 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:29:01.769937 17472 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:29:01.769948 17472 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:29:01.769974 17472 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:29:01.769984 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.769987 17472 net.cpp:165] Memory required for data: 1019393500\nI0817 16:29:01.769994 17472 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:29:01.770005 17472 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:29:01.770011 17472 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:29:01.770020 17472 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:29:01.770028 17472 net.cpp:150] Setting up L2_b4_relu\nI0817 16:29:01.770035 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.770040 17472 net.cpp:165] Memory required for data: 1023489500\nI0817 16:29:01.770045 17472 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:01.770051 17472 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:01.770056 17472 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:29:01.770063 17472 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:29:01.770072 17472 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:29:01.770117 17472 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:01.770129 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.770135 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.770140 17472 net.cpp:165] Memory required for data: 1031681500\nI0817 16:29:01.770145 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:29:01.770159 17472 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:29:01.770164 17472 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:29:01.770174 17472 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:29:01.770628 17472 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:29:01.770649 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.770654 17472 net.cpp:165] Memory required for data: 1035777500\nI0817 16:29:01.770663 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:29:01.770674 17472 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:29:01.770681 17472 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:29:01.770692 17472 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:29:01.770942 17472 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:29:01.770956 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.770961 17472 net.cpp:165] Memory required for data: 1039873500\nI0817 16:29:01.770970 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:29:01.770979 17472 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:29:01.770985 17472 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:29:01.770992 17472 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.771049 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:29:01.771195 17472 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:29:01.771209 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.771212 17472 net.cpp:165] Memory required for data: 1043969500\nI0817 16:29:01.771222 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:29:01.771234 17472 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:29:01.771239 17472 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:29:01.771246 17472 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.771256 17472 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:29:01.771263 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.771267 17472 net.cpp:165] Memory required for data: 1048065500\nI0817 16:29:01.771272 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:29:01.771286 17472 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:29:01.771291 17472 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:29:01.771301 17472 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:29:01.771754 17472 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:29:01.771769 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.771773 17472 net.cpp:165] Memory required for data: 1052161500\nI0817 16:29:01.771782 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:29:01.771795 17472 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:29:01.771806 17472 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:29:01.771814 17472 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:29:01.772058 17472 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:29:01.772073 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.772078 17472 net.cpp:165] Memory required for data: 1056257500\nI0817 16:29:01.772089 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:29:01.772097 17472 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:29:01.772104 17472 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:29:01.772110 17472 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:29:01.772163 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:29:01.772310 17472 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:29:01.772323 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.772328 17472 net.cpp:165] Memory required for data: 1060353500\nI0817 16:29:01.772337 17472 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:29:01.772346 17472 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:29:01.772352 17472 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:29:01.772359 17472 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:29:01.772369 17472 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:29:01.772397 17472 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:29:01.772405 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.772416 17472 net.cpp:165] Memory required for data: 1064449500\nI0817 16:29:01.772423 17472 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:29:01.772433 17472 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:29:01.772439 17472 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:29:01.772446 17472 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:29:01.772455 17472 net.cpp:150] Setting up L2_b5_relu\nI0817 16:29:01.772462 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.772467 17472 net.cpp:165] Memory required for data: 1068545500\nI0817 16:29:01.772471 17472 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:01.772478 17472 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:01.772485 17472 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:29:01.772491 17472 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:29:01.772500 17472 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:29:01.772547 17472 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:01.772558 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.772565 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.772569 17472 net.cpp:165] Memory required for data: 1076737500\nI0817 16:29:01.772574 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:29:01.772588 17472 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:29:01.772594 17472 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:29:01.772603 17472 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:29:01.773071 17472 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:29:01.773084 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.773089 17472 net.cpp:165] Memory required for data: 1080833500\nI0817 16:29:01.773098 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:29:01.773110 17472 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:29:01.773118 17472 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:29:01.773128 17472 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:29:01.773370 17472 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:29:01.773385 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.773389 17472 net.cpp:165] Memory required for data: 1084929500\nI0817 16:29:01.773401 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:29:01.773409 17472 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:29:01.773416 17472 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:29:01.773423 17472 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.773478 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:29:01.773622 17472 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:29:01.773634 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.773639 17472 net.cpp:165] Memory required for data: 1089025500\nI0817 16:29:01.773648 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:29:01.773656 17472 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:29:01.773663 17472 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:29:01.773672 17472 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.773682 17472 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:29:01.773689 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.773694 17472 net.cpp:165] Memory required for data: 1093121500\nI0817 16:29:01.773699 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:29:01.773712 17472 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:29:01.773720 17472 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:29:01.773727 17472 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:29:01.774191 17472 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:29:01.774212 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.774217 17472 net.cpp:165] Memory required for data: 1097217500\nI0817 16:29:01.774226 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:29:01.774238 17472 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:29:01.774245 17472 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:29:01.774252 17472 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:29:01.774492 17472 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:29:01.774505 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.774510 17472 net.cpp:165] Memory required for data: 1101313500\nI0817 16:29:01.774520 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:29:01.774533 17472 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:29:01.774538 17472 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:29:01.774546 17472 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:29:01.774600 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:29:01.774744 17472 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:29:01.774757 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.774762 17472 net.cpp:165] Memory required for data: 1105409500\nI0817 16:29:01.774771 17472 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:29:01.774782 17472 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:29:01.774790 17472 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:29:01.774796 17472 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:29:01.774812 17472 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:29:01.774840 17472 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:29:01.774849 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.774854 17472 net.cpp:165] Memory required for data: 1109505500\nI0817 16:29:01.774859 17472 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:29:01.774866 17472 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:29:01.774873 17472 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:29:01.774883 17472 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:29:01.774893 17472 net.cpp:150] Setting up L2_b6_relu\nI0817 16:29:01.774899 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.774904 17472 net.cpp:165] Memory required for data: 1113601500\nI0817 16:29:01.774909 17472 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:01.774915 17472 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:01.774920 17472 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:29:01.774927 17472 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:29:01.774937 17472 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:29:01.774986 17472 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:01.774997 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.775004 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.775009 17472 net.cpp:165] Memory required for data: 1121793500\nI0817 16:29:01.775014 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:29:01.775027 17472 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:29:01.775034 17472 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:29:01.775043 17472 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:29:01.775501 17472 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:29:01.775514 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.775519 17472 net.cpp:165] Memory required for data: 1125889500\nI0817 16:29:01.775528 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:29:01.775540 17472 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:29:01.775553 17472 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:29:01.775562 17472 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:29:01.775813 17472 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:29:01.775830 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.775835 17472 net.cpp:165] Memory required for data: 1129985500\nI0817 16:29:01.775846 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:29:01.775854 17472 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:29:01.775861 17472 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:29:01.775868 17472 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.775923 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:29:01.776072 17472 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:29:01.776085 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.776089 17472 net.cpp:165] Memory required for data: 1134081500\nI0817 16:29:01.776099 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:29:01.776108 17472 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:29:01.776113 17472 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:29:01.776124 17472 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.776134 17472 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:29:01.776140 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.776144 17472 net.cpp:165] Memory required for data: 1138177500\nI0817 16:29:01.776149 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:29:01.776162 17472 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:29:01.776168 17472 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:29:01.776177 17472 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:29:01.776634 17472 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:29:01.776648 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.776654 17472 net.cpp:165] Memory required for data: 1142273500\nI0817 16:29:01.776661 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:29:01.776670 17472 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:29:01.776676 17472 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:29:01.776687 17472 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:29:01.776945 17472 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:29:01.776959 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.776964 17472 net.cpp:165] Memory required for data: 1146369500\nI0817 16:29:01.776974 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:29:01.776986 17472 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:29:01.776993 17472 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:29:01.777001 17472 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:29:01.777056 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:29:01.777202 17472 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:29:01.777215 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.777220 17472 net.cpp:165] Memory required for data: 1150465500\nI0817 16:29:01.777230 17472 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:29:01.777241 17472 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:29:01.777248 17472 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:29:01.777256 17472 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:29:01.777266 17472 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:29:01.777292 17472 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:29:01.777302 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.777307 17472 net.cpp:165] Memory required for data: 1154561500\nI0817 16:29:01.777312 17472 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:29:01.777319 17472 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:29:01.777324 17472 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:29:01.777341 17472 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:29:01.777351 17472 net.cpp:150] Setting up L2_b7_relu\nI0817 16:29:01.777359 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.777364 17472 net.cpp:165] Memory required for data: 1158657500\nI0817 16:29:01.777367 17472 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:01.777374 17472 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:01.777379 17472 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:29:01.777386 17472 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:29:01.777396 17472 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:29:01.777443 17472 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:01.777454 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.777462 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.777467 17472 net.cpp:165] Memory required for data: 1166849500\nI0817 16:29:01.777472 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:29:01.777482 17472 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:29:01.777488 17472 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:29:01.777500 17472 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:29:01.777971 17472 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:29:01.777984 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.777989 17472 net.cpp:165] Memory required for data: 1170945500\nI0817 16:29:01.777998 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:29:01.778010 17472 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:29:01.778017 17472 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:29:01.778025 17472 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:29:01.778272 17472 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:29:01.778285 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.778290 17472 net.cpp:165] Memory required for data: 1175041500\nI0817 16:29:01.778301 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:29:01.778312 17472 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:29:01.778318 17472 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:29:01.778326 17472 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.778380 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:29:01.778533 17472 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:29:01.778545 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.778550 17472 net.cpp:165] Memory required for data: 1179137500\nI0817 16:29:01.778560 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:29:01.778571 17472 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:29:01.778578 17472 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:29:01.778587 17472 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.778597 17472 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:29:01.778604 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.778609 17472 net.cpp:165] Memory required for data: 1183233500\nI0817 16:29:01.778614 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:29:01.778625 17472 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:29:01.778630 17472 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:29:01.778640 17472 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:29:01.779105 17472 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:29:01.779119 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.779124 17472 net.cpp:165] Memory required for data: 1187329500\nI0817 16:29:01.779134 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:29:01.779142 17472 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:29:01.779157 17472 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:29:01.779170 17472 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:29:01.779419 17472 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:29:01.779433 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.779438 17472 net.cpp:165] Memory required for data: 1191425500\nI0817 16:29:01.779448 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:29:01.779459 17472 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:29:01.779466 17472 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:29:01.779474 17472 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:29:01.779528 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:29:01.779675 17472 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:29:01.779688 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.779693 17472 net.cpp:165] Memory required for data: 1195521500\nI0817 16:29:01.779702 17472 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:29:01.779714 17472 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:29:01.779721 17472 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:29:01.779727 17472 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:29:01.779736 17472 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:29:01.779765 17472 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:29:01.779774 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.779779 17472 net.cpp:165] Memory required for data: 1199617500\nI0817 16:29:01.779784 17472 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:29:01.779791 17472 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:29:01.779798 17472 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:29:01.779814 17472 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:29:01.779825 17472 net.cpp:150] Setting up L2_b8_relu\nI0817 16:29:01.779832 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.779836 17472 net.cpp:165] Memory required for data: 1203713500\nI0817 16:29:01.779841 17472 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:01.779848 17472 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:01.779853 17472 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:29:01.779860 17472 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:29:01.779883 17472 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:29:01.779930 17472 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:01.779945 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.779953 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.779958 17472 net.cpp:165] Memory required for data: 1211905500\nI0817 16:29:01.779963 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:29:01.779974 17472 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:29:01.779980 17472 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:29:01.779995 17472 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:29:01.780455 17472 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:29:01.780469 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.780473 17472 net.cpp:165] Memory required for data: 1216001500\nI0817 16:29:01.780483 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:29:01.780493 17472 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:29:01.780498 17472 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:29:01.780509 17472 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:29:01.780756 17472 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:29:01.780768 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.780781 17472 net.cpp:165] Memory required for data: 1220097500\nI0817 16:29:01.780792 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:29:01.780809 17472 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:29:01.780817 17472 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:29:01.780825 17472 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.780881 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:29:01.781033 17472 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:29:01.781045 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.781050 17472 net.cpp:165] Memory required for data: 1224193500\nI0817 16:29:01.781060 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:29:01.781070 17472 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:29:01.781077 17472 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:29:01.781085 17472 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.781095 17472 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:29:01.781101 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.781106 17472 net.cpp:165] Memory required for data: 1228289500\nI0817 16:29:01.781111 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:29:01.781124 17472 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:29:01.781131 17472 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:29:01.781141 17472 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:29:01.781595 17472 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:29:01.781608 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.781613 17472 net.cpp:165] Memory required for data: 1232385500\nI0817 16:29:01.781622 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:29:01.781631 17472 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:29:01.781637 17472 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:29:01.781646 17472 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:29:01.781903 17472 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:29:01.781915 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.781920 17472 net.cpp:165] Memory required for data: 1236481500\nI0817 16:29:01.781965 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:29:01.781980 17472 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:29:01.781987 17472 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:29:01.781996 17472 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:29:01.782055 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:29:01.782200 17472 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:29:01.782213 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.782218 17472 net.cpp:165] Memory required for data: 1240577500\nI0817 16:29:01.782227 17472 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:29:01.782241 17472 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:29:01.782248 17472 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:29:01.782255 17472 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:29:01.782263 17472 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:29:01.782290 17472 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:29:01.782299 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.782305 17472 net.cpp:165] Memory required for data: 1244673500\nI0817 16:29:01.782310 17472 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:29:01.782320 17472 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:29:01.782326 17472 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:29:01.782333 17472 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:29:01.782346 17472 net.cpp:150] Setting up L2_b9_relu\nI0817 16:29:01.782352 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.782357 17472 net.cpp:165] Memory required for data: 1248769500\nI0817 16:29:01.782368 17472 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:01.782377 17472 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:01.782382 17472 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:29:01.782389 17472 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:29:01.782399 17472 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:29:01.782447 17472 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:01.782459 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.782466 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.782470 17472 net.cpp:165] Memory required for data: 1256961500\nI0817 16:29:01.782475 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:29:01.782490 17472 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:29:01.782495 17472 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:29:01.782505 17472 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:29:01.782984 17472 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:29:01.782999 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.783004 17472 net.cpp:165] Memory required for data: 1257985500\nI0817 16:29:01.783013 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:29:01.783025 17472 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:29:01.783031 17472 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:29:01.783042 17472 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:29:01.783301 17472 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:29:01.783313 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.783318 17472 net.cpp:165] Memory required for data: 1259009500\nI0817 16:29:01.783329 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:29:01.783339 17472 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:29:01.783344 17472 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:29:01.783355 17472 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.783413 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:29:01.783563 17472 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:29:01.783576 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.783581 17472 net.cpp:165] Memory required for data: 1260033500\nI0817 16:29:01.783591 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:29:01.783598 17472 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:29:01.783604 17472 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:29:01.783615 17472 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.783625 17472 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:29:01.783632 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.783637 17472 net.cpp:165] Memory required for data: 1261057500\nI0817 16:29:01.783641 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:29:01.783655 17472 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:29:01.783661 17472 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:29:01.783669 17472 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:29:01.784145 17472 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:29:01.784162 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.784168 17472 net.cpp:165] Memory required for data: 1262081500\nI0817 16:29:01.784176 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:29:01.784186 17472 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:29:01.784193 17472 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:29:01.784204 17472 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:29:01.784459 17472 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:29:01.784472 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.784483 17472 net.cpp:165] Memory required for data: 1263105500\nI0817 16:29:01.784495 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:29:01.784507 17472 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:29:01.784513 17472 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:29:01.784521 17472 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:29:01.784576 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:29:01.784734 17472 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:29:01.784746 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.784751 17472 net.cpp:165] Memory required for data: 1264129500\nI0817 16:29:01.784760 17472 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:29:01.784770 17472 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:29:01.784776 17472 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:29:01.784787 17472 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:29:01.784827 17472 net.cpp:150] Setting up L3_b1_pool\nI0817 16:29:01.784838 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.784842 17472 net.cpp:165] Memory required for data: 1265153500\nI0817 16:29:01.784848 17472 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:29:01.784862 17472 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:29:01.784868 17472 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:29:01.784875 17472 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:29:01.784883 17472 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:29:01.784914 17472 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:29:01.784924 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.784929 17472 net.cpp:165] Memory required for data: 1266177500\nI0817 16:29:01.784934 17472 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:29:01.784945 17472 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:29:01.784950 17472 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:29:01.784957 17472 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:29:01.784966 17472 net.cpp:150] Setting up L3_b1_relu\nI0817 16:29:01.784973 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.784978 17472 net.cpp:165] Memory required for data: 1267201500\nI0817 16:29:01.784982 17472 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:29:01.784991 17472 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:29:01.784998 17472 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:29:01.786223 17472 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:29:01.786243 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.786249 17472 net.cpp:165] Memory required for data: 1268225500\nI0817 16:29:01.786255 17472 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:29:01.786264 17472 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:29:01.786270 17472 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:29:01.786278 17472 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:29:01.786288 17472 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:29:01.786329 17472 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:29:01.786339 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.786345 17472 net.cpp:165] Memory required for data: 1270273500\nI0817 16:29:01.786351 17472 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:01.786361 17472 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:01.786368 17472 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:29:01.786375 17472 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:29:01.786386 17472 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:29:01.786437 17472 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:01.786448 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.786454 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.786466 17472 net.cpp:165] Memory required for data: 1274369500\nI0817 16:29:01.786473 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:29:01.786484 17472 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:29:01.786490 17472 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:29:01.786502 17472 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:29:01.788489 17472 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:29:01.788507 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.788512 17472 net.cpp:165] Memory required for data: 1276417500\nI0817 16:29:01.788522 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:29:01.788535 17472 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:29:01.788542 17472 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:29:01.788552 17472 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:29:01.788816 17472 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:29:01.788830 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.788836 17472 net.cpp:165] Memory required for data: 1278465500\nI0817 16:29:01.788846 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:29:01.788858 17472 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:29:01.788866 17472 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:29:01.788873 17472 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.788930 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:29:01.789085 17472 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:29:01.789098 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.789104 17472 net.cpp:165] Memory required for data: 1280513500\nI0817 16:29:01.789113 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:29:01.789124 17472 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:29:01.789130 17472 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:29:01.789139 17472 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.789149 17472 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:29:01.789155 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.789160 17472 net.cpp:165] Memory required for data: 1282561500\nI0817 16:29:01.789165 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:29:01.789178 17472 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:29:01.789185 17472 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:29:01.789196 17472 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:29:01.790223 17472 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:29:01.790238 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.790243 17472 net.cpp:165] Memory required for data: 1284609500\nI0817 16:29:01.790252 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:29:01.790261 17472 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:29:01.790267 17472 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:29:01.790278 17472 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:29:01.790539 17472 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:29:01.790554 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.790560 17472 net.cpp:165] Memory required for data: 1286657500\nI0817 16:29:01.790570 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:29:01.790578 17472 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:29:01.790585 17472 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:29:01.790592 17472 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:29:01.790649 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:29:01.790817 17472 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:29:01.790832 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.790837 17472 net.cpp:165] Memory required for data: 1288705500\nI0817 16:29:01.790845 17472 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:29:01.790865 17472 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:29:01.790874 17472 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:29:01.790880 17472 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:29:01.790889 17472 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:29:01.790925 17472 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:29:01.790935 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.790940 17472 net.cpp:165] Memory required for data: 1290753500\nI0817 16:29:01.790946 17472 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:29:01.790952 17472 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:29:01.790958 17472 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:29:01.790966 17472 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:29:01.790976 17472 net.cpp:150] Setting up L3_b2_relu\nI0817 16:29:01.790982 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.790987 17472 net.cpp:165] Memory required for data: 1292801500\nI0817 16:29:01.790992 17472 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:01.790998 17472 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:01.791003 17472 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:29:01.791013 17472 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:29:01.791024 17472 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:29:01.791069 17472 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:01.791080 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.791087 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.791091 17472 net.cpp:165] Memory required for data: 1296897500\nI0817 16:29:01.791096 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:29:01.791112 17472 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:29:01.791119 17472 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:29:01.791128 17472 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:29:01.792150 17472 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:29:01.792165 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.792171 17472 net.cpp:165] Memory required for data: 1298945500\nI0817 16:29:01.792178 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:29:01.792191 17472 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:29:01.792197 17472 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:29:01.792206 17472 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:29:01.792461 17472 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:29:01.792474 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.792479 17472 net.cpp:165] Memory required for data: 1300993500\nI0817 16:29:01.792490 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:29:01.792501 17472 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:29:01.792507 17472 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:29:01.792515 17472 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.792578 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:29:01.792732 17472 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:29:01.792744 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.792749 17472 net.cpp:165] Memory required for data: 1303041500\nI0817 16:29:01.792758 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:29:01.792769 17472 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:29:01.792776 17472 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:29:01.792784 17472 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.792794 17472 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:29:01.792806 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.792819 17472 net.cpp:165] Memory required for data: 1305089500\nI0817 16:29:01.792824 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:29:01.792839 17472 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:29:01.792845 17472 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:29:01.792856 17472 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:29:01.793910 17472 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:29:01.793926 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.793931 17472 net.cpp:165] Memory required for data: 1307137500\nI0817 16:29:01.793941 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:29:01.793951 17472 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:29:01.793956 17472 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:29:01.793967 17472 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:29:01.794234 17472 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:29:01.794246 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.794251 17472 net.cpp:165] Memory required for data: 1309185500\nI0817 16:29:01.794262 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:29:01.794271 17472 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:29:01.794277 17472 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:29:01.794286 17472 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:29:01.794344 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:29:01.794497 17472 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:29:01.794509 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.794514 17472 net.cpp:165] Memory required for data: 1311233500\nI0817 16:29:01.794523 17472 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:29:01.794536 17472 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:29:01.794543 17472 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:29:01.794550 17472 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:29:01.794559 17472 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:29:01.794595 17472 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:29:01.794606 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.794611 17472 net.cpp:165] Memory required for data: 1313281500\nI0817 16:29:01.794616 17472 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:29:01.794625 17472 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:29:01.794631 17472 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:29:01.794637 17472 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:29:01.794646 17472 net.cpp:150] Setting up L3_b3_relu\nI0817 16:29:01.794653 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.794657 17472 net.cpp:165] Memory required for data: 1315329500\nI0817 16:29:01.794662 17472 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:01.794669 17472 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:01.794674 17472 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:29:01.794685 17472 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:29:01.794694 17472 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:29:01.794739 17472 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:01.794750 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.794757 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.794762 17472 net.cpp:165] Memory required for data: 1319425500\nI0817 16:29:01.794766 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:29:01.794781 17472 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:29:01.794787 17472 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:29:01.794796 17472 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:29:01.795827 17472 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:29:01.795842 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.795847 17472 net.cpp:165] Memory required for data: 1321473500\nI0817 16:29:01.795856 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:29:01.795868 17472 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:29:01.795876 17472 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:29:01.795883 17472 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:29:01.796154 17472 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:29:01.796167 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.796172 17472 net.cpp:165] Memory required for data: 1323521500\nI0817 16:29:01.796182 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:29:01.796193 17472 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:29:01.796200 17472 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:29:01.796208 17472 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.796267 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:29:01.796422 17472 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:29:01.796435 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.796440 17472 net.cpp:165] Memory required for data: 1325569500\nI0817 16:29:01.796450 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:29:01.796461 17472 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:29:01.796468 17472 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:29:01.796474 17472 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.796488 17472 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:29:01.796494 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.796499 17472 net.cpp:165] Memory required for data: 1327617500\nI0817 16:29:01.796504 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:29:01.796514 17472 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:29:01.796520 17472 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:29:01.796531 17472 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:29:01.797557 17472 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:29:01.797572 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.797576 17472 net.cpp:165] Memory required for data: 1329665500\nI0817 16:29:01.797585 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:29:01.797595 17472 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:29:01.797601 17472 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:29:01.797612 17472 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:29:01.797886 17472 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:29:01.797900 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.797905 17472 net.cpp:165] Memory required for data: 1331713500\nI0817 16:29:01.797915 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:29:01.797924 17472 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:29:01.797930 17472 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:29:01.797938 17472 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:29:01.797999 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:29:01.798164 17472 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:29:01.798180 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.798185 17472 net.cpp:165] Memory required for data: 1333761500\nI0817 16:29:01.798194 17472 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:29:01.798203 17472 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:29:01.798209 17472 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:29:01.798216 17472 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:29:01.798224 17472 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:29:01.798261 17472 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:29:01.798277 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.798282 17472 net.cpp:165] Memory required for data: 1335809500\nI0817 16:29:01.798287 17472 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:29:01.798295 17472 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:29:01.798301 17472 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:29:01.798308 17472 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:29:01.798318 17472 net.cpp:150] Setting up L3_b4_relu\nI0817 16:29:01.798326 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.798329 17472 net.cpp:165] Memory required for data: 1337857500\nI0817 16:29:01.798333 17472 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:01.798341 17472 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:01.798346 17472 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:29:01.798357 17472 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:29:01.798367 17472 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:29:01.798413 17472 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:01.798427 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.798434 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.798439 17472 net.cpp:165] Memory required for data: 1341953500\nI0817 16:29:01.798444 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:29:01.798454 17472 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:29:01.798461 17472 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:29:01.798470 17472 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:29:01.799489 17472 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:29:01.799504 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.799510 17472 net.cpp:165] Memory required for data: 1344001500\nI0817 16:29:01.799518 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:29:01.799530 17472 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:29:01.799536 17472 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:29:01.799545 17472 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:29:01.800796 17472 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:29:01.800817 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.800822 17472 net.cpp:165] Memory required for data: 1346049500\nI0817 16:29:01.800834 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:29:01.800848 17472 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:29:01.800854 17472 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:29:01.800863 17472 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.800925 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:29:01.801084 17472 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:29:01.801100 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.801105 17472 net.cpp:165] Memory required for data: 1348097500\nI0817 16:29:01.801115 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:29:01.801123 17472 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:29:01.801129 17472 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:29:01.801137 17472 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.801147 17472 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:29:01.801154 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.801158 17472 net.cpp:165] Memory required for data: 1350145500\nI0817 16:29:01.801163 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:29:01.801178 17472 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:29:01.801184 17472 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:29:01.801193 17472 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:29:01.803369 17472 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:29:01.803386 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.803392 17472 net.cpp:165] Memory required for data: 1352193500\nI0817 16:29:01.803402 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:29:01.803411 17472 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:29:01.803418 17472 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:29:01.803431 17472 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:29:01.803694 17472 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:29:01.803706 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.803711 17472 net.cpp:165] Memory required for data: 1354241500\nI0817 16:29:01.803722 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:29:01.803731 17472 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:29:01.803737 17472 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:29:01.803745 17472 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:29:01.803812 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:29:01.803967 17472 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:29:01.803983 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.803988 17472 net.cpp:165] Memory required for data: 1356289500\nI0817 16:29:01.803998 17472 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:29:01.804008 17472 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:29:01.804015 17472 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:29:01.804023 17472 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:29:01.804030 17472 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:29:01.804066 17472 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:29:01.804078 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.804082 17472 net.cpp:165] Memory required for data: 1358337500\nI0817 16:29:01.804088 17472 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:29:01.804095 17472 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:29:01.804101 17472 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:29:01.804110 17472 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:29:01.804118 17472 net.cpp:150] Setting up L3_b5_relu\nI0817 16:29:01.804126 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.804131 17472 net.cpp:165] Memory required for data: 1360385500\nI0817 16:29:01.804134 17472 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:01.804141 17472 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:01.804147 17472 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:29:01.804157 17472 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:29:01.804167 17472 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:29:01.804213 17472 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:01.804224 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.804230 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.804234 17472 net.cpp:165] Memory required for data: 1364481500\nI0817 16:29:01.804239 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:29:01.804255 17472 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:29:01.804260 17472 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:29:01.804270 17472 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:29:01.805317 17472 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:29:01.805332 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.805337 17472 net.cpp:165] Memory required for data: 1366529500\nI0817 16:29:01.805347 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:29:01.805359 17472 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:29:01.805373 17472 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:29:01.805382 17472 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:29:01.805645 17472 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:29:01.805658 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.805663 17472 net.cpp:165] Memory required for data: 1368577500\nI0817 16:29:01.805675 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:29:01.805685 17472 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:29:01.805692 17472 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:29:01.805701 17472 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.805763 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:29:01.805927 17472 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:29:01.805939 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.805945 17472 net.cpp:165] Memory required for data: 1370625500\nI0817 16:29:01.805954 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:29:01.805965 17472 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:29:01.805972 17472 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:29:01.805981 17472 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.805992 17472 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:29:01.805999 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.806004 17472 net.cpp:165] Memory required for data: 1372673500\nI0817 16:29:01.806008 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:29:01.806020 17472 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:29:01.806025 17472 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:29:01.806040 17472 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:29:01.807054 17472 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:29:01.807067 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.807072 17472 net.cpp:165] Memory required for data: 1374721500\nI0817 16:29:01.807081 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:29:01.807093 17472 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:29:01.807101 17472 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:29:01.807109 17472 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:29:01.807369 17472 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:29:01.807384 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.807389 17472 net.cpp:165] Memory required for data: 1376769500\nI0817 16:29:01.807399 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:29:01.807407 17472 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:29:01.807413 17472 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:29:01.807421 17472 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:29:01.807481 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:29:01.807629 17472 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:29:01.807646 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.807651 17472 net.cpp:165] Memory required for data: 1378817500\nI0817 16:29:01.807660 17472 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:29:01.807669 17472 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:29:01.807675 17472 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:29:01.807682 17472 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:29:01.807690 17472 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:29:01.807725 17472 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:29:01.807737 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.807742 17472 net.cpp:165] Memory required for data: 1380865500\nI0817 16:29:01.807747 17472 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:29:01.807754 17472 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:29:01.807760 17472 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:29:01.807775 17472 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:29:01.807785 17472 net.cpp:150] Setting up L3_b6_relu\nI0817 16:29:01.807792 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.807797 17472 net.cpp:165] Memory required for data: 1382913500\nI0817 16:29:01.807807 17472 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:01.807818 17472 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:01.807824 17472 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:29:01.807832 17472 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:29:01.807842 17472 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:29:01.807886 17472 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:01.807901 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.807909 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.807912 17472 net.cpp:165] Memory required for data: 1387009500\nI0817 16:29:01.807917 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:29:01.807929 17472 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:29:01.807935 17472 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:29:01.807945 17472 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:29:01.808960 17472 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:29:01.808975 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.808980 17472 net.cpp:165] Memory required for data: 1389057500\nI0817 16:29:01.808990 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:29:01.809001 17472 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:29:01.809007 17472 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:29:01.809015 17472 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:29:01.809280 17472 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:29:01.809293 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.809298 17472 net.cpp:165] Memory required for data: 1391105500\nI0817 16:29:01.809309 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:29:01.809319 17472 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:29:01.809326 17472 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:29:01.809334 17472 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.809393 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:29:01.809551 17472 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:29:01.809563 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.809568 17472 net.cpp:165] Memory required for data: 1393153500\nI0817 16:29:01.809577 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:29:01.809612 17472 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:29:01.809619 17472 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:29:01.809628 17472 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.809638 17472 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:29:01.809644 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.809649 17472 net.cpp:165] Memory required for data: 1395201500\nI0817 16:29:01.809654 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:29:01.809666 17472 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:29:01.809672 17472 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:29:01.809680 17472 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:29:01.810696 17472 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:29:01.810711 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.810716 17472 net.cpp:165] Memory required for data: 1397249500\nI0817 16:29:01.810725 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:29:01.810737 17472 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:29:01.810750 17472 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:29:01.810760 17472 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:29:01.811028 17472 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:29:01.811040 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.811045 17472 net.cpp:165] Memory required for data: 1399297500\nI0817 16:29:01.811056 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:29:01.811067 17472 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:29:01.811074 17472 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:29:01.811081 17472 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:29:01.811144 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:29:01.811298 17472 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:29:01.811312 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.811317 17472 net.cpp:165] Memory required for data: 1401345500\nI0817 16:29:01.811326 17472 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:29:01.811338 17472 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:29:01.811344 17472 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:29:01.811352 17472 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:29:01.811363 17472 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:29:01.811398 17472 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:29:01.811408 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.811414 17472 net.cpp:165] Memory required for data: 1403393500\nI0817 16:29:01.811419 17472 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:29:01.811429 17472 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:29:01.811435 17472 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:29:01.811442 17472 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:29:01.811452 17472 net.cpp:150] Setting up L3_b7_relu\nI0817 16:29:01.811460 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.811463 17472 net.cpp:165] Memory required for data: 1405441500\nI0817 16:29:01.811468 17472 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:01.811475 17472 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:01.811480 17472 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:29:01.811487 17472 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:29:01.811497 17472 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:29:01.811544 17472 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:01.811556 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.811563 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.811568 17472 net.cpp:165] Memory required for data: 1409537500\nI0817 16:29:01.811573 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:29:01.811585 17472 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:29:01.811592 17472 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:29:01.811601 17472 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:29:01.812611 17472 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:29:01.812626 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.812631 17472 net.cpp:165] Memory required for data: 1411585500\nI0817 16:29:01.812640 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:29:01.812652 17472 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:29:01.812659 17472 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:29:01.812669 17472 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:29:01.812934 17472 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:29:01.812947 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.812952 17472 net.cpp:165] Memory required for data: 1413633500\nI0817 16:29:01.812970 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:29:01.812979 17472 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:29:01.812986 17472 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:29:01.812994 17472 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.813055 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:29:01.813208 17472 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:29:01.813221 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.813226 17472 net.cpp:165] Memory required for data: 1415681500\nI0817 16:29:01.813235 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:29:01.813243 17472 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:29:01.813249 17472 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:29:01.813261 17472 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.813271 17472 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:29:01.813277 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.813282 17472 net.cpp:165] Memory required for data: 1417729500\nI0817 16:29:01.813287 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:29:01.813303 17472 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:29:01.813309 17472 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:29:01.813318 17472 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:29:01.814329 17472 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:29:01.814344 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.814349 17472 net.cpp:165] Memory required for data: 1419777500\nI0817 16:29:01.814358 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:29:01.814371 17472 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:29:01.814378 17472 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:29:01.814386 17472 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:29:01.814646 17472 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:29:01.814659 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.814664 17472 net.cpp:165] Memory required for data: 1421825500\nI0817 16:29:01.814676 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:29:01.814687 17472 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:29:01.814692 17472 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:29:01.814700 17472 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:29:01.814759 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:29:01.815096 17472 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:29:01.815112 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.815117 17472 net.cpp:165] Memory required for data: 1423873500\nI0817 16:29:01.815127 17472 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:29:01.815140 17472 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:29:01.815146 17472 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:29:01.815153 17472 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:29:01.815165 17472 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:29:01.815197 17472 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:29:01.815209 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.815214 17472 net.cpp:165] Memory required for data: 1425921500\nI0817 16:29:01.815219 17472 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:29:01.815229 17472 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:29:01.815237 17472 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:29:01.815243 17472 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:29:01.815253 17472 net.cpp:150] Setting up L3_b8_relu\nI0817 16:29:01.815259 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.815264 17472 net.cpp:165] Memory required for data: 1427969500\nI0817 16:29:01.815268 17472 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:01.815282 17472 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:01.815289 17472 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:29:01.815296 17472 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:29:01.815306 17472 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:29:01.815356 17472 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:01.815367 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.815374 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.815378 17472 net.cpp:165] Memory required for data: 1432065500\nI0817 16:29:01.815383 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:29:01.815398 17472 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:29:01.815404 17472 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:29:01.815413 17472 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:29:01.817406 17472 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:29:01.817426 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.817431 17472 net.cpp:165] Memory required for data: 1434113500\nI0817 16:29:01.817441 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:29:01.817451 17472 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:29:01.817457 17472 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:29:01.817468 17472 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:29:01.817734 17472 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:29:01.817749 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.817754 17472 net.cpp:165] Memory required for data: 1436161500\nI0817 16:29:01.817765 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:29:01.817775 17472 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:29:01.817780 17472 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:29:01.817788 17472 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.817854 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:29:01.818012 17472 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:29:01.818027 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.818032 17472 net.cpp:165] Memory required for data: 1438209500\nI0817 16:29:01.818040 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:29:01.818049 17472 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:29:01.818055 17472 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:29:01.818065 17472 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.818076 17472 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:29:01.818084 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.818089 17472 net.cpp:165] Memory required for data: 1440257500\nI0817 16:29:01.818094 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:29:01.818107 17472 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:29:01.818114 17472 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:29:01.818122 17472 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:29:01.819142 17472 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:29:01.819156 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.819161 17472 net.cpp:165] Memory required for data: 1442305500\nI0817 16:29:01.819170 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:29:01.819182 17472 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:29:01.819190 17472 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:29:01.819200 17472 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:29:01.819464 17472 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:29:01.819478 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.819483 17472 net.cpp:165] Memory required for data: 1444353500\nI0817 16:29:01.819501 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:29:01.819510 17472 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:29:01.819517 17472 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:29:01.819527 17472 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:29:01.819586 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:29:01.819739 17472 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:29:01.819752 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.819757 17472 net.cpp:165] Memory required for data: 1446401500\nI0817 16:29:01.819767 17472 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:29:01.819777 17472 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:29:01.819782 17472 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:29:01.819789 17472 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:29:01.819805 17472 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:29:01.819844 17472 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:29:01.819856 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.819861 17472 net.cpp:165] Memory required for data: 1448449500\nI0817 16:29:01.819866 17472 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:29:01.819875 17472 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:29:01.819880 17472 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:29:01.819887 17472 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:29:01.819897 17472 net.cpp:150] Setting up L3_b9_relu\nI0817 16:29:01.819905 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.819908 17472 net.cpp:165] Memory required for data: 1450497500\nI0817 16:29:01.819913 17472 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:29:01.819924 17472 net.cpp:100] Creating Layer post_pool\nI0817 16:29:01.819931 17472 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:29:01.819938 17472 net.cpp:408] post_pool -> post_pool\nI0817 16:29:01.819972 17472 net.cpp:150] Setting up post_pool\nI0817 16:29:01.819980 17472 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:29:01.819985 17472 net.cpp:165] Memory required for data: 1450529500\nI0817 16:29:01.819990 17472 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:29:01.820081 17472 net.cpp:100] Creating Layer post_FC\nI0817 16:29:01.820094 17472 net.cpp:434] post_FC <- post_pool\nI0817 16:29:01.820104 17472 net.cpp:408] post_FC -> post_FC_top\nI0817 16:29:01.820344 17472 net.cpp:150] Setting up post_FC\nI0817 16:29:01.820359 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:01.820364 17472 net.cpp:165] Memory required for data: 1450534500\nI0817 16:29:01.820374 17472 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:29:01.820382 17472 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:29:01.820389 17472 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:29:01.820399 17472 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:29:01.820410 17472 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:29:01.820461 17472 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:29:01.820472 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:01.820478 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:01.820483 17472 net.cpp:165] Memory required for data: 1450544500\nI0817 16:29:01.820488 17472 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:29:01.820531 17472 net.cpp:100] Creating Layer accuracy\nI0817 16:29:01.820543 17472 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:29:01.820550 17472 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:29:01.820564 17472 net.cpp:408] accuracy -> accuracy\nI0817 16:29:01.820607 17472 net.cpp:150] Setting up accuracy\nI0817 16:29:01.820621 17472 net.cpp:157] Top shape: (1)\nI0817 16:29:01.820626 17472 net.cpp:165] Memory required for data: 1450544504\nI0817 16:29:01.820638 17472 layer_factory.hpp:77] Creating layer loss\nI0817 16:29:01.820647 17472 net.cpp:100] Creating Layer loss\nI0817 16:29:01.820653 17472 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:29:01.820662 17472 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:29:01.820668 17472 net.cpp:408] loss -> loss\nI0817 16:29:01.820714 17472 layer_factory.hpp:77] Creating layer loss\nI0817 16:29:01.820880 17472 net.cpp:150] Setting up loss\nI0817 16:29:01.820895 17472 net.cpp:157] Top shape: (1)\nI0817 16:29:01.820901 17472 net.cpp:160]     with loss weight 1\nI0817 16:29:01.820974 17472 net.cpp:165] Memory required for data: 1450544508\nI0817 16:29:01.820983 17472 net.cpp:226] loss needs backward computation.\nI0817 16:29:01.820989 17472 net.cpp:228] accuracy does not need backward computation.\nI0817 16:29:01.820996 17472 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:29:01.821000 17472 net.cpp:226] post_FC needs backward computation.\nI0817 16:29:01.821005 17472 net.cpp:226] post_pool needs backward computation.\nI0817 16:29:01.821010 17472 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:29:01.821015 17472 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:29:01.821022 17472 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:29:01.821027 17472 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:29:01.821030 17472 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:29:01.821035 17472 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:29:01.821040 17472 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:29:01.821045 17472 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:29:01.821049 17472 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:29:01.821054 17472 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:29:01.821059 17472 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:29:01.821064 17472 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:29:01.821069 17472 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:29:01.821074 17472 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:29:01.821079 17472 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:29:01.821084 17472 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:29:01.821089 17472 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:29:01.821094 17472 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:29:01.821099 17472 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:29:01.821105 17472 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:29:01.821110 17472 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:29:01.821115 17472 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:29:01.821120 17472 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:29:01.821128 17472 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:29:01.821133 17472 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:29:01.821138 17472 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:29:01.821143 17472 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:29:01.821148 17472 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:29:01.821153 17472 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:29:01.821158 17472 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:29:01.821163 17472 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:29:01.821168 17472 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:29:01.821174 17472 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:29:01.821179 17472 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:29:01.821184 17472 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:29:01.821195 17472 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:29:01.821202 17472 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:29:01.821207 17472 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:29:01.821211 17472 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:29:01.821218 17472 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:29:01.821223 17472 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:29:01.821228 17472 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:29:01.821233 17472 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:29:01.821238 17472 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:29:01.821243 17472 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:29:01.821247 17472 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:29:01.821252 17472 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:29:01.821257 17472 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:29:01.821264 17472 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:29:01.821269 17472 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:29:01.821274 17472 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:29:01.821279 17472 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:29:01.821285 17472 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:29:01.821290 17472 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:29:01.821295 17472 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:29:01.821300 17472 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:29:01.821305 17472 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:29:01.821308 17472 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:29:01.821315 17472 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:29:01.821319 17472 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:29:01.821324 17472 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:29:01.821329 17472 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:29:01.821334 17472 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:29:01.821339 17472 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:29:01.821344 17472 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:29:01.821349 17472 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:29:01.821354 17472 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:29:01.821359 17472 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:29:01.821364 17472 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:29:01.821369 17472 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:29:01.821374 17472 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:29:01.821379 17472 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:29:01.821385 17472 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:29:01.821390 17472 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:29:01.821395 17472 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:29:01.821401 17472 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:29:01.821405 17472 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:29:01.821411 17472 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:29:01.821416 17472 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:29:01.821421 17472 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:29:01.821426 17472 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:29:01.821432 17472 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:29:01.821442 17472 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:29:01.821449 17472 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:29:01.821455 17472 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:29:01.821460 17472 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:29:01.821465 17472 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:29:01.821470 17472 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:29:01.821475 17472 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:29:01.821480 17472 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:29:01.821485 17472 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:29:01.821493 17472 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:29:01.821499 17472 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:29:01.821504 17472 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:29:01.821509 17472 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:29:01.821516 17472 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:29:01.821521 17472 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:29:01.821527 17472 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:29:01.821532 17472 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:29:01.821537 17472 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:29:01.821542 17472 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:29:01.821547 17472 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:29:01.821552 17472 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:29:01.821558 17472 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:29:01.821563 17472 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:29:01.821569 17472 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:29:01.821574 17472 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:29:01.821580 17472 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:29:01.821585 17472 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:29:01.821590 17472 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:29:01.821595 17472 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:29:01.821601 17472 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:29:01.821606 17472 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:29:01.821612 17472 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:29:01.821617 17472 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:29:01.821624 17472 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:29:01.821629 17472 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:29:01.821633 17472 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:29:01.821640 17472 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:29:01.821645 17472 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:29:01.821650 17472 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:29:01.821655 17472 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:29:01.821660 17472 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:29:01.821666 17472 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:29:01.821671 17472 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:29:01.821676 17472 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:29:01.821681 17472 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:29:01.821686 17472 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:29:01.821691 17472 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:29:01.821696 17472 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:29:01.821707 17472 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:29:01.821713 17472 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:29:01.821718 17472 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:29:01.821723 17472 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:29:01.821728 17472 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:29:01.821734 17472 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:29:01.821739 17472 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:29:01.821745 17472 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:29:01.821750 17472 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:29:01.821755 17472 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:29:01.821760 17472 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:29:01.821766 17472 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:29:01.821771 17472 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:29:01.821782 17472 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:29:01.821789 17472 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:29:01.821794 17472 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:29:01.821805 17472 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:29:01.821811 17472 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:29:01.821817 17472 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:29:01.821822 17472 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:29:01.821828 17472 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:29:01.821833 17472 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:29:01.821840 17472 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:29:01.821844 17472 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:29:01.821851 17472 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:29:01.821856 17472 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:29:01.821861 17472 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:29:01.821867 17472 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:29:01.821872 17472 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:29:01.821878 17472 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:29:01.821883 17472 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:29:01.821889 17472 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:29:01.821894 17472 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:29:01.821900 17472 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:29:01.821905 17472 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:29:01.821912 17472 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:29:01.821916 17472 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:29:01.821923 17472 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:29:01.821928 17472 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:29:01.821933 17472 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:29:01.821938 17472 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:29:01.821944 17472 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:29:01.821950 17472 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:29:01.821955 17472 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:29:01.821962 17472 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:29:01.821967 17472 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:29:01.821974 17472 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:29:01.821985 17472 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:29:01.821991 17472 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:29:01.821997 17472 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:29:01.822003 17472 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:29:01.822010 17472 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:29:01.822015 17472 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:29:01.822019 17472 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:29:01.822026 17472 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:29:01.822031 17472 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:29:01.822036 17472 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:29:01.822042 17472 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:29:01.822048 17472 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:29:01.822053 17472 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:29:01.822058 17472 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:29:01.822064 17472 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:29:01.822069 17472 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:29:01.822074 17472 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:29:01.822080 17472 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:29:01.822087 17472 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:29:01.822091 17472 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:29:01.822098 17472 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:29:01.822103 17472 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:29:01.822108 17472 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:29:01.822114 17472 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:29:01.822120 17472 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:29:01.822125 17472 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:29:01.822130 17472 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:29:01.822136 17472 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:29:01.822142 17472 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:29:01.822147 17472 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:29:01.822154 17472 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:29:01.822160 17472 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:29:01.822165 17472 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:29:01.822173 17472 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:29:01.822180 17472 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:29:01.822185 17472 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:29:01.822191 17472 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:29:01.822197 17472 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:29:01.822203 17472 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:29:01.822208 17472 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:29:01.822214 17472 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:29:01.822221 17472 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:29:01.822227 17472 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:29:01.822232 17472 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:29:01.822237 17472 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:29:01.822242 17472 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:29:01.822248 17472 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:29:01.822258 17472 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:29:01.822264 17472 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:29:01.822270 17472 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:29:01.822276 17472 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:29:01.822283 17472 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:29:01.822288 17472 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:29:01.822294 17472 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:29:01.822299 17472 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:29:01.822305 17472 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:29:01.822310 17472 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:29:01.822316 17472 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:29:01.822322 17472 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:29:01.822327 17472 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:29:01.822334 17472 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:29:01.822340 17472 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:29:01.822345 17472 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:29:01.822351 17472 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:29:01.822356 17472 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:29:01.822362 17472 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:29:01.822367 17472 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:29:01.822373 17472 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:29:01.822379 17472 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:29:01.822384 17472 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:29:01.822391 17472 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:29:01.822396 17472 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:29:01.822402 17472 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:29:01.822407 17472 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:29:01.822413 17472 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:29:01.822418 17472 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:29:01.822424 17472 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:29:01.822430 17472 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:29:01.822437 17472 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:29:01.822441 17472 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:29:01.822448 17472 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:29:01.822453 17472 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:29:01.822459 17472 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:29:01.822464 17472 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:29:01.822470 17472 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:29:01.822475 17472 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:29:01.822481 17472 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:29:01.822487 17472 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:29:01.822494 17472 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:29:01.822499 17472 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:29:01.822504 17472 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:29:01.822510 17472 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:29:01.822516 17472 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:29:01.822522 17472 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:29:01.822532 17472 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:29:01.822540 17472 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:29:01.822544 17472 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:29:01.822551 17472 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:29:01.822556 17472 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:29:01.822561 17472 net.cpp:226] pre_relu needs backward computation.\nI0817 16:29:01.822566 17472 net.cpp:226] pre_scale needs backward computation.\nI0817 16:29:01.822572 17472 net.cpp:226] pre_bn needs backward computation.\nI0817 16:29:01.822577 17472 net.cpp:226] pre_conv needs backward computation.\nI0817 16:29:01.822583 17472 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:29:01.822590 17472 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:29:01.822594 17472 net.cpp:270] This network produces output accuracy\nI0817 16:29:01.822602 17472 net.cpp:270] This network produces output loss\nI0817 16:29:01.822971 17472 net.cpp:283] Network initialization done.\nI0817 16:29:01.832172 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:01.832211 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:01.832260 17472 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:29:01.832653 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:29:01.832670 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:29:01.832680 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:29:01.832690 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:29:01.832700 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:29:01.832708 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:29:01.832717 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:29:01.832726 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:29:01.832736 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:29:01.832743 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:29:01.832753 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:29:01.832761 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:29:01.832769 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:29:01.832778 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:29:01.832787 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:29:01.832795 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:29:01.832813 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:29:01.832823 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:29:01.832831 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:29:01.832849 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:29:01.832859 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:29:01.832867 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:29:01.832880 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:29:01.832890 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:29:01.832898 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:29:01.832906 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:29:01.832914 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:29:01.832923 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:29:01.832931 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:29:01.832939 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:29:01.832948 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:29:01.832957 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:29:01.832967 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:29:01.832973 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:29:01.832983 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:29:01.832990 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:29:01.832999 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:29:01.833008 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:29:01.833016 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:29:01.833024 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:29:01.833036 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:29:01.833045 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:29:01.833052 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:29:01.833061 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:29:01.833070 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:29:01.833078 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:29:01.833087 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:29:01.833094 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:29:01.833104 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:29:01.833122 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:29:01.833130 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:29:01.833138 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:29:01.833148 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:29:01.833156 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:29:01.833165 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:29:01.833173 17472 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:29:01.834818 17472 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0817 16:29:01.836421 17472 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:29:01.836622 17472 net.cpp:100] Creating Layer dataLayer\nI0817 16:29:01.836642 17472 net.cpp:408] dataLayer -> data_top\nI0817 16:29:01.836658 17472 net.cpp:408] dataLayer -> label\nI0817 16:29:01.836669 17472 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:29:01.846402 17479 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:29:01.846658 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:01.854619 17472 net.cpp:150] Setting up dataLayer\nI0817 16:29:01.854640 17472 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:29:01.854651 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:01.854656 17472 net.cpp:165] Memory required for data: 1536500\nI0817 16:29:01.854663 17472 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:29:01.854673 17472 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:29:01.854678 17472 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:29:01.854693 17472 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:29:01.854706 17472 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:29:01.854806 17472 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:29:01.854826 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:01.854833 17472 net.cpp:157] Top shape: 125 (125)\nI0817 16:29:01.854838 17472 net.cpp:165] Memory required for data: 1537500\nI0817 16:29:01.854846 17472 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:29:01.854861 17472 net.cpp:100] Creating Layer pre_conv\nI0817 16:29:01.854867 17472 net.cpp:434] pre_conv <- data_top\nI0817 16:29:01.854883 17472 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:29:01.855304 17472 net.cpp:150] Setting up pre_conv\nI0817 16:29:01.855329 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.855336 17472 net.cpp:165] Memory required for data: 9729500\nI0817 16:29:01.855351 17472 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:29:01.855370 17472 net.cpp:100] Creating Layer pre_bn\nI0817 16:29:01.855376 17472 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:29:01.855383 17472 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:29:01.855681 17472 net.cpp:150] Setting up pre_bn\nI0817 16:29:01.855695 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.855700 17472 net.cpp:165] Memory required for data: 17921500\nI0817 16:29:01.855718 17472 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:29:01.855729 17472 net.cpp:100] Creating Layer pre_scale\nI0817 16:29:01.855734 17472 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:29:01.855742 17472 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:29:01.855850 17472 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:29:01.856075 17472 net.cpp:150] Setting up pre_scale\nI0817 16:29:01.856089 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.856094 17472 net.cpp:165] Memory required for data: 26113500\nI0817 16:29:01.856104 17472 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:29:01.856117 17472 net.cpp:100] Creating Layer pre_relu\nI0817 16:29:01.856124 17472 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:29:01.856133 17472 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:29:01.856143 17472 net.cpp:150] Setting up pre_relu\nI0817 16:29:01.856150 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.856154 17472 net.cpp:165] Memory required for data: 34305500\nI0817 16:29:01.856159 17472 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:29:01.856168 17472 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:29:01.856174 17472 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:29:01.856209 17472 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:29:01.856222 17472 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:29:01.856277 17472 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:29:01.856287 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.856293 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.856298 17472 net.cpp:165] Memory required for data: 50689500\nI0817 16:29:01.856303 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:29:01.856318 17472 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:29:01.856324 17472 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:29:01.856336 17472 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:29:01.856732 17472 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:29:01.856750 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.856755 17472 net.cpp:165] Memory required for data: 58881500\nI0817 16:29:01.856766 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:29:01.856783 17472 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:29:01.856791 17472 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:29:01.856806 17472 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:29:01.857393 17472 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:29:01.857409 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.857414 17472 net.cpp:165] Memory required for data: 67073500\nI0817 16:29:01.857429 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:29:01.857441 17472 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:29:01.857447 17472 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:29:01.857458 17472 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.857524 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:29:01.857709 17472 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:29:01.857722 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.857736 17472 net.cpp:165] Memory required for data: 75265500\nI0817 16:29:01.857746 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:29:01.857754 17472 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:29:01.857762 17472 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:29:01.857774 17472 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.857785 17472 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:29:01.857792 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.857803 17472 net.cpp:165] Memory required for data: 83457500\nI0817 16:29:01.857810 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:29:01.857825 17472 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:29:01.857831 17472 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:29:01.857842 17472 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:29:01.858244 17472 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:29:01.858259 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.858265 17472 net.cpp:165] Memory required for data: 91649500\nI0817 16:29:01.858274 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:29:01.858283 17472 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:29:01.858289 17472 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:29:01.858300 17472 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:29:01.858600 17472 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:29:01.858615 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.858621 17472 net.cpp:165] Memory required for data: 99841500\nI0817 16:29:01.858639 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:29:01.858651 17472 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:29:01.858657 17472 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:29:01.858665 17472 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:29:01.858731 17472 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:29:01.858922 17472 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:29:01.858937 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.858942 17472 net.cpp:165] Memory required for data: 108033500\nI0817 16:29:01.858955 17472 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:29:01.858964 17472 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:29:01.858970 17472 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:29:01.858979 17472 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:29:01.858992 17472 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:29:01.859033 17472 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:29:01.859046 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.859051 17472 net.cpp:165] Memory required for data: 116225500\nI0817 16:29:01.859057 17472 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:29:01.859064 17472 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:29:01.859071 17472 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:29:01.859081 17472 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:29:01.859091 17472 net.cpp:150] Setting up L1_b1_relu\nI0817 16:29:01.859097 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.859102 17472 net.cpp:165] Memory required for data: 124417500\nI0817 16:29:01.859107 17472 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:01.859123 17472 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:01.859130 17472 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:29:01.859138 17472 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:29:01.859149 17472 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:29:01.859201 17472 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:29:01.859220 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.859230 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.859235 17472 net.cpp:165] Memory required for data: 140801500\nI0817 16:29:01.859239 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:29:01.859251 17472 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:29:01.859259 17472 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:29:01.859272 17472 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:29:01.859699 17472 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:29:01.859714 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.859722 17472 net.cpp:165] Memory required for data: 148993500\nI0817 16:29:01.859731 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:29:01.859740 17472 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:29:01.859746 17472 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:29:01.859757 17472 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:29:01.860082 17472 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:29:01.860100 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.860105 17472 net.cpp:165] Memory required for data: 157185500\nI0817 16:29:01.860116 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:29:01.860127 17472 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:29:01.860133 17472 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:29:01.860141 17472 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.860211 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:29:01.860582 17472 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:29:01.860597 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.860602 17472 net.cpp:165] Memory required for data: 165377500\nI0817 16:29:01.860612 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:29:01.860620 17472 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:29:01.860625 17472 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:29:01.860641 17472 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.860651 17472 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:29:01.860658 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.860662 17472 net.cpp:165] Memory required for data: 173569500\nI0817 16:29:01.860669 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:29:01.860684 17472 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:29:01.860690 17472 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:29:01.860699 17472 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:29:01.861106 17472 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:29:01.861120 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.861125 17472 net.cpp:165] Memory required for data: 181761500\nI0817 16:29:01.861135 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:29:01.861146 17472 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:29:01.861155 17472 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:29:01.861165 17472 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:29:01.861476 17472 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:29:01.861491 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.861497 17472 net.cpp:165] Memory required for data: 189953500\nI0817 16:29:01.861516 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:29:01.861526 17472 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:29:01.861531 17472 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:29:01.861541 17472 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:29:01.861608 17472 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:29:01.861793 17472 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:29:01.861814 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.861821 17472 net.cpp:165] Memory required for data: 198145500\nI0817 16:29:01.861840 17472 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:29:01.861856 17472 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:29:01.861863 17472 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:29:01.861871 17472 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:29:01.861878 17472 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:29:01.861922 17472 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:29:01.861932 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.861935 17472 net.cpp:165] Memory required for data: 206337500\nI0817 16:29:01.861941 17472 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:29:01.861951 17472 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:29:01.861958 17472 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:29:01.861968 17472 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:29:01.861977 17472 net.cpp:150] Setting up L1_b2_relu\nI0817 16:29:01.861987 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.861991 17472 net.cpp:165] Memory required for data: 214529500\nI0817 16:29:01.861996 17472 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:01.862004 17472 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:01.862009 17472 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:29:01.862018 17472 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:29:01.862028 17472 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:29:01.862084 17472 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:29:01.862094 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.862100 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.862105 17472 net.cpp:165] Memory required for data: 230913500\nI0817 16:29:01.862112 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:29:01.862124 17472 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:29:01.862130 17472 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:29:01.862143 17472 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:29:01.862560 17472 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:29:01.862574 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.862579 17472 net.cpp:165] Memory required for data: 239105500\nI0817 16:29:01.862588 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:29:01.862601 17472 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:29:01.862607 17472 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:29:01.862618 17472 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:29:01.862944 17472 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:29:01.862959 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.862965 17472 net.cpp:165] Memory required for data: 247297500\nI0817 16:29:01.862977 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:29:01.862987 17472 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:29:01.862996 17472 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:29:01.863005 17472 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.863071 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:29:01.863266 17472 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:29:01.863281 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.863287 17472 net.cpp:165] Memory required for data: 255489500\nI0817 16:29:01.863297 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:29:01.863309 17472 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:29:01.863317 17472 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:29:01.863328 17472 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.863346 17472 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:29:01.863356 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.863363 17472 net.cpp:165] Memory required for data: 263681500\nI0817 16:29:01.863368 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:29:01.863381 17472 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:29:01.863389 17472 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:29:01.863402 17472 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:29:01.864008 17472 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:29:01.864027 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.864032 17472 net.cpp:165] Memory required for data: 271873500\nI0817 16:29:01.864042 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:29:01.864054 17472 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:29:01.864063 17472 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:29:01.864115 17472 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:29:01.864429 17472 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:29:01.864445 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.864451 17472 net.cpp:165] Memory required for data: 280065500\nI0817 16:29:01.864461 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:29:01.864478 17472 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:29:01.864488 17472 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:29:01.864498 17472 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:29:01.864562 17472 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:29:01.864742 17472 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:29:01.864756 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.864763 17472 net.cpp:165] Memory required for data: 288257500\nI0817 16:29:01.864773 17472 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:29:01.864786 17472 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:29:01.864794 17472 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:29:01.864811 17472 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:29:01.864820 17472 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:29:01.864868 17472 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:29:01.864878 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.864883 17472 net.cpp:165] Memory required for data: 296449500\nI0817 16:29:01.864888 17472 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:29:01.864898 17472 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:29:01.864907 17472 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:29:01.864917 17472 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:29:01.864926 17472 net.cpp:150] Setting up L1_b3_relu\nI0817 16:29:01.864936 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.864940 17472 net.cpp:165] Memory required for data: 304641500\nI0817 16:29:01.864945 17472 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:01.864954 17472 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:01.864959 17472 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:29:01.864966 17472 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:29:01.864976 17472 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:29:01.865031 17472 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:29:01.865041 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.865048 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.865052 17472 net.cpp:165] Memory required for data: 321025500\nI0817 16:29:01.865057 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:29:01.865068 17472 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:29:01.865083 17472 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:29:01.865097 17472 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:29:01.865444 17472 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:29:01.865458 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.865463 17472 net.cpp:165] Memory required for data: 329217500\nI0817 16:29:01.865473 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:29:01.865481 17472 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:29:01.865487 17472 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:29:01.865497 17472 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:29:01.865766 17472 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:29:01.865778 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.865783 17472 net.cpp:165] Memory required for data: 337409500\nI0817 16:29:01.865793 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:29:01.865811 17472 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:29:01.865818 17472 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:29:01.865826 17472 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.865883 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:29:01.866066 17472 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:29:01.866081 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.866087 17472 net.cpp:165] Memory required for data: 345601500\nI0817 16:29:01.866096 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:29:01.866104 17472 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:29:01.866111 17472 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:29:01.866122 17472 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.866132 17472 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:29:01.866138 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.866143 17472 net.cpp:165] Memory required for data: 353793500\nI0817 16:29:01.866147 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:29:01.866161 17472 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:29:01.866168 17472 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:29:01.866178 17472 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:29:01.866523 17472 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:29:01.866535 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.866540 17472 net.cpp:165] Memory required for data: 361985500\nI0817 16:29:01.866549 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:29:01.866559 17472 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:29:01.866564 17472 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:29:01.866572 17472 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:29:01.866849 17472 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:29:01.866864 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.866869 17472 net.cpp:165] Memory required for data: 370177500\nI0817 16:29:01.866879 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:29:01.866890 17472 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:29:01.866897 17472 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:29:01.866907 17472 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:29:01.866963 17472 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:29:01.867116 17472 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:29:01.867130 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.867135 17472 net.cpp:165] Memory required for data: 378369500\nI0817 16:29:01.867143 17472 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:29:01.867156 17472 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:29:01.867161 17472 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:29:01.867168 17472 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:29:01.867183 17472 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:29:01.867221 17472 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:29:01.867231 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.867235 17472 net.cpp:165] Memory required for data: 386561500\nI0817 16:29:01.867240 17472 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:29:01.867247 17472 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:29:01.867254 17472 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:29:01.867264 17472 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:29:01.867272 17472 net.cpp:150] Setting up L1_b4_relu\nI0817 16:29:01.867280 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.867285 17472 net.cpp:165] Memory required for data: 394753500\nI0817 16:29:01.867290 17472 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:01.867296 17472 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:01.867301 17472 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:29:01.867308 17472 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:29:01.867317 17472 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:29:01.867365 17472 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:29:01.867377 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.867384 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.867388 17472 net.cpp:165] Memory required for data: 411137500\nI0817 16:29:01.867393 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:29:01.867403 17472 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:29:01.867409 17472 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:29:01.867422 17472 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:29:01.867794 17472 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:29:01.867816 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.867821 17472 net.cpp:165] Memory required for data: 419329500\nI0817 16:29:01.867871 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:29:01.867887 17472 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:29:01.867893 17472 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:29:01.867902 17472 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:29:01.868175 17472 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:29:01.868187 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.868192 17472 net.cpp:165] Memory required for data: 427521500\nI0817 16:29:01.868203 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:29:01.868216 17472 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:29:01.868221 17472 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:29:01.868229 17472 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.868286 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:29:01.868450 17472 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:29:01.868463 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.868468 17472 net.cpp:165] Memory required for data: 435713500\nI0817 16:29:01.868479 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:29:01.868489 17472 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:29:01.868495 17472 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:29:01.868502 17472 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.868511 17472 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:29:01.868518 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.868523 17472 net.cpp:165] Memory required for data: 443905500\nI0817 16:29:01.868527 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:29:01.868548 17472 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:29:01.868556 17472 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:29:01.868566 17472 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:29:01.868922 17472 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:29:01.868937 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.868942 17472 net.cpp:165] Memory required for data: 452097500\nI0817 16:29:01.868952 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:29:01.868959 17472 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:29:01.868965 17472 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:29:01.868973 17472 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:29:01.869246 17472 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:29:01.869261 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.869266 17472 net.cpp:165] Memory required for data: 460289500\nI0817 16:29:01.869276 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:29:01.869287 17472 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:29:01.869292 17472 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:29:01.869300 17472 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:29:01.869360 17472 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:29:01.869536 17472 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:29:01.869550 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.869555 17472 net.cpp:165] Memory required for data: 468481500\nI0817 16:29:01.869565 17472 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:29:01.869573 17472 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:29:01.869580 17472 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:29:01.869586 17472 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:29:01.869597 17472 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:29:01.869637 17472 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:29:01.869648 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.869653 17472 net.cpp:165] Memory required for data: 476673500\nI0817 16:29:01.869658 17472 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:29:01.869665 17472 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:29:01.869670 17472 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:29:01.869683 17472 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:29:01.869693 17472 net.cpp:150] Setting up L1_b5_relu\nI0817 16:29:01.869699 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.869704 17472 net.cpp:165] Memory required for data: 484865500\nI0817 16:29:01.869707 17472 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:01.869714 17472 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:01.869719 17472 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:29:01.869726 17472 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:29:01.869736 17472 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:29:01.869786 17472 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:29:01.869797 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.869809 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.869814 17472 net.cpp:165] Memory required for data: 501249500\nI0817 16:29:01.869819 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:29:01.869830 17472 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:29:01.869837 17472 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:29:01.869848 17472 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:29:01.870210 17472 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:29:01.870225 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.870237 17472 net.cpp:165] Memory required for data: 509441500\nI0817 16:29:01.870246 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:29:01.870255 17472 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:29:01.870261 17472 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:29:01.870272 17472 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:29:01.870546 17472 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:29:01.870559 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.870564 17472 net.cpp:165] Memory required for data: 517633500\nI0817 16:29:01.870574 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:29:01.870587 17472 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:29:01.870594 17472 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:29:01.870601 17472 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.870658 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:29:01.870826 17472 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:29:01.870839 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.870844 17472 net.cpp:165] Memory required for data: 525825500\nI0817 16:29:01.870853 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:29:01.870862 17472 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:29:01.870867 17472 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:29:01.870877 17472 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.870887 17472 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:29:01.870894 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.870899 17472 net.cpp:165] Memory required for data: 534017500\nI0817 16:29:01.870904 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:29:01.870918 17472 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:29:01.870923 17472 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:29:01.870931 17472 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:29:01.871281 17472 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:29:01.871295 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.871300 17472 net.cpp:165] Memory required for data: 542209500\nI0817 16:29:01.871309 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:29:01.871321 17472 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:29:01.871328 17472 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:29:01.871336 17472 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:29:01.871629 17472 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:29:01.871644 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.871649 17472 net.cpp:165] Memory required for data: 550401500\nI0817 16:29:01.871660 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:29:01.871672 17472 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:29:01.871678 17472 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:29:01.871686 17472 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:29:01.871744 17472 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:29:01.871912 17472 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:29:01.871925 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.871930 17472 net.cpp:165] Memory required for data: 558593500\nI0817 16:29:01.871939 17472 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:29:01.871956 17472 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:29:01.871963 17472 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:29:01.871970 17472 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:29:01.871981 17472 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:29:01.872017 17472 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:29:01.872026 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.872031 17472 net.cpp:165] Memory required for data: 566785500\nI0817 16:29:01.872045 17472 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:29:01.872056 17472 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:29:01.872061 17472 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:29:01.872068 17472 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:29:01.872077 17472 net.cpp:150] Setting up L1_b6_relu\nI0817 16:29:01.872084 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.872089 17472 net.cpp:165] Memory required for data: 574977500\nI0817 16:29:01.872094 17472 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:01.872100 17472 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:01.872105 17472 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:29:01.872112 17472 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:29:01.872123 17472 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:29:01.872172 17472 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:29:01.872185 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.872191 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.872196 17472 net.cpp:165] Memory required for data: 591361500\nI0817 16:29:01.872201 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:29:01.872215 17472 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:29:01.872221 17472 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:29:01.872232 17472 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:29:01.872586 17472 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:29:01.872601 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.872606 17472 net.cpp:165] Memory required for data: 599553500\nI0817 16:29:01.872613 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:29:01.872625 17472 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:29:01.872632 17472 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:29:01.872639 17472 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:29:01.872921 17472 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:29:01.872937 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.872942 17472 net.cpp:165] Memory required for data: 607745500\nI0817 16:29:01.872953 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:29:01.872961 17472 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:29:01.872967 17472 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:29:01.872974 17472 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.873034 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:29:01.873198 17472 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:29:01.873210 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.873215 17472 net.cpp:165] Memory required for data: 615937500\nI0817 16:29:01.873225 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:29:01.873232 17472 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:29:01.873239 17472 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:29:01.873250 17472 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.873258 17472 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:29:01.873265 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.873270 17472 net.cpp:165] Memory required for data: 624129500\nI0817 16:29:01.873275 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:29:01.873286 17472 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:29:01.873291 17472 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:29:01.873301 17472 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:29:01.873658 17472 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:29:01.873679 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.873684 17472 net.cpp:165] Memory required for data: 632321500\nI0817 16:29:01.873694 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:29:01.873703 17472 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:29:01.873708 17472 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:29:01.873719 17472 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:29:01.873998 17472 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:29:01.874014 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.874020 17472 net.cpp:165] Memory required for data: 640513500\nI0817 16:29:01.874030 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:29:01.874039 17472 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:29:01.874045 17472 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:29:01.874053 17472 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:29:01.874110 17472 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:29:01.874271 17472 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:29:01.874284 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.874289 17472 net.cpp:165] Memory required for data: 648705500\nI0817 16:29:01.874299 17472 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:29:01.874310 17472 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:29:01.874316 17472 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:29:01.874323 17472 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:29:01.874330 17472 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:29:01.874367 17472 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:29:01.874378 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.874383 17472 net.cpp:165] Memory required for data: 656897500\nI0817 16:29:01.874388 17472 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:29:01.874395 17472 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:29:01.874402 17472 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:29:01.874411 17472 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:29:01.874420 17472 net.cpp:150] Setting up L1_b7_relu\nI0817 16:29:01.874428 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.874433 17472 net.cpp:165] Memory required for data: 665089500\nI0817 16:29:01.874436 17472 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:01.874444 17472 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:01.874449 17472 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:29:01.874455 17472 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:29:01.874464 17472 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:29:01.874514 17472 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:29:01.874526 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.874531 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.874536 17472 net.cpp:165] Memory required for data: 681473500\nI0817 16:29:01.874541 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:29:01.874554 17472 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:29:01.874560 17472 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:29:01.874569 17472 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:29:01.874933 17472 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:29:01.874948 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.874953 17472 net.cpp:165] Memory required for data: 689665500\nI0817 16:29:01.874961 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:29:01.874970 17472 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:29:01.874976 17472 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:29:01.874994 17472 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:29:01.875269 17472 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:29:01.875285 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.875291 17472 net.cpp:165] Memory required for data: 697857500\nI0817 16:29:01.875301 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:29:01.875309 17472 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:29:01.875315 17472 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:29:01.875324 17472 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.875380 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:29:01.875541 17472 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:29:01.875555 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.875560 17472 net.cpp:165] Memory required for data: 706049500\nI0817 16:29:01.875569 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:29:01.875579 17472 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:29:01.875586 17472 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:29:01.875592 17472 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.875605 17472 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:29:01.875612 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.875617 17472 net.cpp:165] Memory required for data: 714241500\nI0817 16:29:01.875622 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:29:01.875632 17472 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:29:01.875638 17472 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:29:01.875648 17472 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:29:01.876006 17472 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:29:01.876021 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.876025 17472 net.cpp:165] Memory required for data: 722433500\nI0817 16:29:01.876034 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:29:01.876044 17472 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:29:01.876049 17472 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:29:01.876060 17472 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:29:01.876334 17472 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:29:01.876348 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.876353 17472 net.cpp:165] Memory required for data: 730625500\nI0817 16:29:01.876363 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:29:01.876376 17472 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:29:01.876382 17472 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:29:01.876390 17472 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:29:01.876447 17472 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:29:01.876607 17472 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:29:01.876621 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.876626 17472 net.cpp:165] Memory required for data: 738817500\nI0817 16:29:01.876634 17472 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:29:01.876646 17472 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:29:01.876652 17472 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:29:01.876659 17472 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:29:01.876667 17472 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:29:01.876703 17472 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:29:01.876715 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.876719 17472 net.cpp:165] Memory required for data: 747009500\nI0817 16:29:01.876724 17472 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:29:01.876732 17472 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:29:01.876737 17472 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:29:01.876747 17472 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:29:01.876763 17472 net.cpp:150] Setting up L1_b8_relu\nI0817 16:29:01.876771 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.876776 17472 net.cpp:165] Memory required for data: 755201500\nI0817 16:29:01.876780 17472 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:01.876787 17472 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:01.876792 17472 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:29:01.876806 17472 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:29:01.876816 17472 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:29:01.876868 17472 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:29:01.876879 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.876886 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.876890 17472 net.cpp:165] Memory required for data: 771585500\nI0817 16:29:01.876895 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:29:01.876906 17472 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:29:01.876912 17472 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:29:01.876924 17472 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:29:01.877285 17472 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:29:01.877305 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.877310 17472 net.cpp:165] Memory required for data: 779777500\nI0817 16:29:01.877318 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:29:01.877327 17472 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:29:01.877333 17472 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:29:01.877343 17472 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:29:01.877617 17472 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:29:01.877631 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.877636 17472 net.cpp:165] Memory required for data: 787969500\nI0817 16:29:01.877646 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:29:01.877655 17472 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:29:01.877660 17472 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:29:01.877671 17472 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.877728 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:29:01.877897 17472 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:29:01.877914 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.877919 17472 net.cpp:165] Memory required for data: 796161500\nI0817 16:29:01.877928 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:29:01.877936 17472 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:29:01.877943 17472 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:29:01.877950 17472 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.877960 17472 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:29:01.877967 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.877971 17472 net.cpp:165] Memory required for data: 804353500\nI0817 16:29:01.877976 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:29:01.877991 17472 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:29:01.877997 17472 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:29:01.878010 17472 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:29:01.878368 17472 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:29:01.878382 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.878387 17472 net.cpp:165] Memory required for data: 812545500\nI0817 16:29:01.878396 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:29:01.878407 17472 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:29:01.878414 17472 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:29:01.878432 17472 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:29:01.878705 17472 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:29:01.878717 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.878722 17472 net.cpp:165] Memory required for data: 820737500\nI0817 16:29:01.878754 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:29:01.878763 17472 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:29:01.878769 17472 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:29:01.878779 17472 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:29:01.878842 17472 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:29:01.879006 17472 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:29:01.879020 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.879025 17472 net.cpp:165] Memory required for data: 828929500\nI0817 16:29:01.879034 17472 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:29:01.879045 17472 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:29:01.879052 17472 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:29:01.879060 17472 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:29:01.879066 17472 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:29:01.879101 17472 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:29:01.879112 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.879117 17472 net.cpp:165] Memory required for data: 837121500\nI0817 16:29:01.879122 17472 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:29:01.879132 17472 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:29:01.879139 17472 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:29:01.879146 17472 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:29:01.879155 17472 net.cpp:150] Setting up L1_b9_relu\nI0817 16:29:01.879163 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.879166 17472 net.cpp:165] Memory required for data: 845313500\nI0817 16:29:01.879171 17472 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:01.879181 17472 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:01.879186 17472 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:29:01.879194 17472 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:29:01.879204 17472 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:29:01.879254 17472 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:29:01.879266 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.879272 17472 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:29:01.879276 17472 net.cpp:165] Memory required for data: 861697500\nI0817 16:29:01.879281 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:29:01.879294 17472 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:29:01.879300 17472 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:29:01.879309 17472 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:29:01.879668 17472 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:29:01.879681 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.879686 17472 net.cpp:165] Memory required for data: 863745500\nI0817 16:29:01.879695 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:29:01.879707 17472 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:29:01.879714 17472 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:29:01.879724 17472 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:29:01.879995 17472 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:29:01.880012 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.880017 17472 net.cpp:165] Memory required for data: 865793500\nI0817 16:29:01.880034 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:29:01.880043 17472 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:29:01.880049 17472 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:29:01.880058 17472 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.880117 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:29:01.880275 17472 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:29:01.880288 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.880293 17472 net.cpp:165] Memory required for data: 867841500\nI0817 16:29:01.880302 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:29:01.880309 17472 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:29:01.880316 17472 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:29:01.880326 17472 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.880336 17472 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:29:01.880342 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.880347 17472 net.cpp:165] Memory required for data: 869889500\nI0817 16:29:01.880352 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:29:01.880365 17472 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:29:01.880372 17472 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:29:01.880380 17472 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:29:01.880734 17472 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:29:01.880748 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.880753 17472 net.cpp:165] Memory required for data: 871937500\nI0817 16:29:01.880761 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:29:01.880775 17472 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:29:01.880782 17472 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:29:01.880790 17472 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:29:01.881063 17472 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:29:01.881078 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.881083 17472 net.cpp:165] Memory required for data: 873985500\nI0817 16:29:01.881093 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:29:01.881108 17472 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:29:01.881114 17472 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:29:01.881121 17472 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:29:01.881181 17472 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:29:01.881337 17472 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:29:01.881350 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.881356 17472 net.cpp:165] Memory required for data: 876033500\nI0817 16:29:01.881364 17472 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:29:01.881376 17472 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:29:01.881383 17472 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:29:01.881394 17472 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:29:01.881423 17472 net.cpp:150] Setting up L2_b1_pool\nI0817 16:29:01.881433 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.881438 17472 net.cpp:165] Memory required for data: 878081500\nI0817 16:29:01.881443 17472 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:29:01.881454 17472 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:29:01.881461 17472 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:29:01.881467 17472 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:29:01.881475 17472 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:29:01.881507 17472 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:29:01.881518 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.881523 17472 net.cpp:165] Memory required for data: 880129500\nI0817 16:29:01.881528 17472 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:29:01.881536 17472 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:29:01.881548 17472 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:29:01.881559 17472 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:29:01.881569 17472 net.cpp:150] Setting up L2_b1_relu\nI0817 16:29:01.881577 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.881582 17472 net.cpp:165] Memory required for data: 882177500\nI0817 16:29:01.881587 17472 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:29:01.881595 17472 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:29:01.881603 17472 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:29:01.883807 17472 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:29:01.883826 17472 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:29:01.883831 17472 net.cpp:165] Memory required for data: 884225500\nI0817 16:29:01.883836 17472 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:29:01.883848 17472 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:29:01.883855 17472 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:29:01.883863 17472 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:29:01.883872 17472 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:29:01.883919 17472 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:29:01.883931 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.883935 17472 net.cpp:165] Memory required for data: 888321500\nI0817 16:29:01.883941 17472 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:01.883949 17472 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:01.883955 17472 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:29:01.883965 17472 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:29:01.883975 17472 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:29:01.884026 17472 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:29:01.884039 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.884045 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.884050 17472 net.cpp:165] Memory required for data: 896513500\nI0817 16:29:01.884057 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:29:01.884068 17472 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:29:01.884073 17472 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:29:01.884083 17472 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:29:01.884587 17472 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:29:01.884600 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.884605 17472 net.cpp:165] Memory required for data: 900609500\nI0817 16:29:01.884614 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:29:01.884626 17472 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:29:01.884634 17472 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:29:01.884641 17472 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:29:01.884925 17472 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:29:01.884938 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.884943 17472 net.cpp:165] Memory required for data: 904705500\nI0817 16:29:01.884954 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:29:01.884963 17472 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:29:01.884969 17472 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:29:01.884982 17472 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.885042 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:29:01.885201 17472 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:29:01.885215 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.885220 17472 net.cpp:165] Memory required for data: 908801500\nI0817 16:29:01.885228 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:29:01.885236 17472 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:29:01.885251 17472 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:29:01.885262 17472 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.885272 17472 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:29:01.885279 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.885283 17472 net.cpp:165] Memory required for data: 912897500\nI0817 16:29:01.885288 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:29:01.885303 17472 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:29:01.885308 17472 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:29:01.885316 17472 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:29:01.885812 17472 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:29:01.885826 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.885831 17472 net.cpp:165] Memory required for data: 916993500\nI0817 16:29:01.885840 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:29:01.885852 17472 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:29:01.885859 17472 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:29:01.885867 17472 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:29:01.886129 17472 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:29:01.886142 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.886147 17472 net.cpp:165] Memory required for data: 921089500\nI0817 16:29:01.886157 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:29:01.886167 17472 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:29:01.886173 17472 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:29:01.886183 17472 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:29:01.886241 17472 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:29:01.886395 17472 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:29:01.886409 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.886414 17472 net.cpp:165] Memory required for data: 925185500\nI0817 16:29:01.886422 17472 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:29:01.886431 17472 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:29:01.886437 17472 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:29:01.886445 17472 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:29:01.886456 17472 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:29:01.886483 17472 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:29:01.886492 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.886497 17472 net.cpp:165] Memory required for data: 929281500\nI0817 16:29:01.886502 17472 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:29:01.886512 17472 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:29:01.886519 17472 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:29:01.886526 17472 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:29:01.886535 17472 net.cpp:150] Setting up L2_b2_relu\nI0817 16:29:01.886543 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.886548 17472 net.cpp:165] Memory required for data: 933377500\nI0817 16:29:01.886553 17472 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:01.886559 17472 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:01.886564 17472 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:29:01.886576 17472 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:29:01.886586 17472 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:29:01.886631 17472 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:29:01.886646 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.886653 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.886657 17472 net.cpp:165] Memory required for data: 941569500\nI0817 16:29:01.886669 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:29:01.886680 17472 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:29:01.886687 17472 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:29:01.886696 17472 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:29:01.887202 17472 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:29:01.887217 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.887221 17472 net.cpp:165] Memory required for data: 945665500\nI0817 16:29:01.887231 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:29:01.887243 17472 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:29:01.887250 17472 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:29:01.887259 17472 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:29:01.887523 17472 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:29:01.887537 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.887542 17472 net.cpp:165] Memory required for data: 949761500\nI0817 16:29:01.887552 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:29:01.887560 17472 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:29:01.887567 17472 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:29:01.887576 17472 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.887634 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:29:01.887791 17472 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:29:01.887809 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.887815 17472 net.cpp:165] Memory required for data: 953857500\nI0817 16:29:01.887825 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:29:01.887832 17472 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:29:01.887838 17472 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:29:01.887850 17472 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.887859 17472 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:29:01.887866 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.887871 17472 net.cpp:165] Memory required for data: 957953500\nI0817 16:29:01.887876 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:29:01.887888 17472 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:29:01.887894 17472 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:29:01.887903 17472 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:29:01.888396 17472 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:29:01.888411 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.888415 17472 net.cpp:165] Memory required for data: 962049500\nI0817 16:29:01.888424 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:29:01.888437 17472 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:29:01.888442 17472 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:29:01.888451 17472 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:29:01.888720 17472 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:29:01.888732 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.888737 17472 net.cpp:165] Memory required for data: 966145500\nI0817 16:29:01.888748 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:29:01.888756 17472 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:29:01.888763 17472 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:29:01.888770 17472 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:29:01.888837 17472 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:29:01.888996 17472 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:29:01.889012 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.889017 17472 net.cpp:165] Memory required for data: 970241500\nI0817 16:29:01.889026 17472 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:29:01.889035 17472 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:29:01.889048 17472 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:29:01.889056 17472 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:29:01.889065 17472 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:29:01.889096 17472 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:29:01.889108 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.889113 17472 net.cpp:165] Memory required for data: 974337500\nI0817 16:29:01.889118 17472 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:29:01.889140 17472 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:29:01.889147 17472 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:29:01.889154 17472 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:29:01.889164 17472 net.cpp:150] Setting up L2_b3_relu\nI0817 16:29:01.889170 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.889175 17472 net.cpp:165] Memory required for data: 978433500\nI0817 16:29:01.889180 17472 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:01.889189 17472 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:01.889194 17472 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:29:01.889200 17472 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:29:01.889210 17472 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:29:01.889262 17472 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:29:01.889274 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.889281 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.889286 17472 net.cpp:165] Memory required for data: 986625500\nI0817 16:29:01.889289 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:29:01.889303 17472 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:29:01.889310 17472 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:29:01.889322 17472 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:29:01.889819 17472 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:29:01.889833 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.889838 17472 net.cpp:165] Memory required for data: 990721500\nI0817 16:29:01.889847 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:29:01.889859 17472 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:29:01.889866 17472 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:29:01.889878 17472 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:29:01.890148 17472 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:29:01.890161 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.890166 17472 net.cpp:165] Memory required for data: 994817500\nI0817 16:29:01.890177 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:29:01.890185 17472 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:29:01.890192 17472 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:29:01.890199 17472 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.890264 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:29:01.890421 17472 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:29:01.890434 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.890439 17472 net.cpp:165] Memory required for data: 998913500\nI0817 16:29:01.890449 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:29:01.890460 17472 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:29:01.890467 17472 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:29:01.890475 17472 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.890485 17472 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:29:01.890491 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.890503 17472 net.cpp:165] Memory required for data: 1003009500\nI0817 16:29:01.890509 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:29:01.890522 17472 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:29:01.890529 17472 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:29:01.890538 17472 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:29:01.891036 17472 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:29:01.891050 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.891055 17472 net.cpp:165] Memory required for data: 1007105500\nI0817 16:29:01.891064 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:29:01.891078 17472 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:29:01.891085 17472 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:29:01.891093 17472 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:29:01.891360 17472 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:29:01.891376 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.891381 17472 net.cpp:165] Memory required for data: 1011201500\nI0817 16:29:01.891392 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:29:01.891402 17472 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:29:01.891407 17472 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:29:01.891414 17472 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:29:01.891472 17472 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:29:01.891635 17472 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:29:01.891649 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.891654 17472 net.cpp:165] Memory required for data: 1015297500\nI0817 16:29:01.891662 17472 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:29:01.891671 17472 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:29:01.891677 17472 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:29:01.891685 17472 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:29:01.891695 17472 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:29:01.891723 17472 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:29:01.891732 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.891737 17472 net.cpp:165] Memory required for data: 1019393500\nI0817 16:29:01.891742 17472 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:29:01.891753 17472 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:29:01.891759 17472 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:29:01.891767 17472 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:29:01.891775 17472 net.cpp:150] Setting up L2_b4_relu\nI0817 16:29:01.891782 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.891788 17472 net.cpp:165] Memory required for data: 1023489500\nI0817 16:29:01.891793 17472 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:01.891804 17472 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:01.891810 17472 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:29:01.891819 17472 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:29:01.891829 17472 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:29:01.891880 17472 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:29:01.891892 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.891899 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.891904 17472 net.cpp:165] Memory required for data: 1031681500\nI0817 16:29:01.891909 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:29:01.891923 17472 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:29:01.891929 17472 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:29:01.891938 17472 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:29:01.892442 17472 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:29:01.892457 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.892462 17472 net.cpp:165] Memory required for data: 1035777500\nI0817 16:29:01.892470 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:29:01.892482 17472 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:29:01.892488 17472 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:29:01.892499 17472 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:29:01.892766 17472 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:29:01.892779 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.892784 17472 net.cpp:165] Memory required for data: 1039873500\nI0817 16:29:01.892796 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:29:01.892809 17472 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:29:01.892815 17472 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:29:01.892823 17472 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.892884 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:29:01.893045 17472 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:29:01.893059 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.893064 17472 net.cpp:165] Memory required for data: 1043969500\nI0817 16:29:01.893074 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:29:01.893101 17472 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:29:01.893107 17472 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:29:01.893118 17472 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.893128 17472 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:29:01.893136 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.893141 17472 net.cpp:165] Memory required for data: 1048065500\nI0817 16:29:01.893146 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:29:01.893158 17472 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:29:01.893164 17472 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:29:01.893172 17472 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:29:01.893668 17472 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:29:01.893682 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.893687 17472 net.cpp:165] Memory required for data: 1052161500\nI0817 16:29:01.893697 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:29:01.893708 17472 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:29:01.893714 17472 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:29:01.893723 17472 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:29:01.894004 17472 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:29:01.894021 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.894026 17472 net.cpp:165] Memory required for data: 1056257500\nI0817 16:29:01.894037 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:29:01.894044 17472 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:29:01.894052 17472 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:29:01.894058 17472 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:29:01.894117 17472 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:29:01.894276 17472 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:29:01.894289 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.894294 17472 net.cpp:165] Memory required for data: 1060353500\nI0817 16:29:01.894304 17472 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:29:01.894312 17472 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:29:01.894320 17472 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:29:01.894326 17472 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:29:01.894336 17472 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:29:01.894366 17472 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:29:01.894381 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.894387 17472 net.cpp:165] Memory required for data: 1064449500\nI0817 16:29:01.894392 17472 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:29:01.894402 17472 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:29:01.894409 17472 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:29:01.894417 17472 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:29:01.894426 17472 net.cpp:150] Setting up L2_b5_relu\nI0817 16:29:01.894433 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.894438 17472 net.cpp:165] Memory required for data: 1068545500\nI0817 16:29:01.894443 17472 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:01.894448 17472 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:01.894454 17472 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:29:01.894461 17472 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:29:01.894470 17472 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:29:01.894521 17472 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:29:01.894533 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.894541 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.894544 17472 net.cpp:165] Memory required for data: 1076737500\nI0817 16:29:01.894549 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:29:01.894563 17472 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:29:01.894569 17472 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:29:01.894579 17472 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:29:01.895081 17472 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:29:01.895097 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.895102 17472 net.cpp:165] Memory required for data: 1080833500\nI0817 16:29:01.895110 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:29:01.895123 17472 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:29:01.895128 17472 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:29:01.895136 17472 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:29:01.895403 17472 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:29:01.895418 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.895423 17472 net.cpp:165] Memory required for data: 1084929500\nI0817 16:29:01.895434 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:29:01.895443 17472 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:29:01.895449 17472 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:29:01.895457 17472 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.895514 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:29:01.895678 17472 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:29:01.895691 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.895696 17472 net.cpp:165] Memory required for data: 1089025500\nI0817 16:29:01.895705 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:29:01.895714 17472 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:29:01.895720 17472 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:29:01.895730 17472 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.895740 17472 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:29:01.895747 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.895752 17472 net.cpp:165] Memory required for data: 1093121500\nI0817 16:29:01.895756 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:29:01.895771 17472 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:29:01.895776 17472 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:29:01.895792 17472 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:29:01.896296 17472 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:29:01.896311 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.896315 17472 net.cpp:165] Memory required for data: 1097217500\nI0817 16:29:01.896324 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:29:01.896335 17472 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:29:01.896342 17472 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:29:01.896350 17472 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:29:01.896616 17472 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:29:01.896630 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.896634 17472 net.cpp:165] Memory required for data: 1101313500\nI0817 16:29:01.896644 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:29:01.896656 17472 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:29:01.896662 17472 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:29:01.896669 17472 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:29:01.896728 17472 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:29:01.896891 17472 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:29:01.896905 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.896910 17472 net.cpp:165] Memory required for data: 1105409500\nI0817 16:29:01.896919 17472 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:29:01.896931 17472 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:29:01.896937 17472 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:29:01.896944 17472 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:29:01.896955 17472 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:29:01.896983 17472 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:29:01.896992 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.896997 17472 net.cpp:165] Memory required for data: 1109505500\nI0817 16:29:01.897003 17472 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:29:01.897011 17472 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:29:01.897017 17472 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:29:01.897027 17472 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:29:01.897037 17472 net.cpp:150] Setting up L2_b6_relu\nI0817 16:29:01.897043 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.897048 17472 net.cpp:165] Memory required for data: 1113601500\nI0817 16:29:01.897053 17472 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:01.897059 17472 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:01.897064 17472 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:29:01.897073 17472 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:29:01.897081 17472 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:29:01.897131 17472 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:29:01.897143 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.897150 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.897155 17472 net.cpp:165] Memory required for data: 1121793500\nI0817 16:29:01.897159 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:29:01.897171 17472 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:29:01.897176 17472 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:29:01.897188 17472 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:29:01.898685 17472 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:29:01.898702 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.898708 17472 net.cpp:165] Memory required for data: 1125889500\nI0817 16:29:01.898717 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:29:01.898738 17472 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:29:01.898746 17472 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:29:01.898754 17472 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:29:01.899034 17472 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:29:01.899047 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.899052 17472 net.cpp:165] Memory required for data: 1129985500\nI0817 16:29:01.899063 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:29:01.899072 17472 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:29:01.899078 17472 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:29:01.899089 17472 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.899148 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:29:01.899309 17472 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:29:01.899323 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.899328 17472 net.cpp:165] Memory required for data: 1134081500\nI0817 16:29:01.899338 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:29:01.899345 17472 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:29:01.899353 17472 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:29:01.899363 17472 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.899372 17472 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:29:01.899379 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.899384 17472 net.cpp:165] Memory required for data: 1138177500\nI0817 16:29:01.899389 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:29:01.899404 17472 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:29:01.899410 17472 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:29:01.899418 17472 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:29:01.899915 17472 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:29:01.899930 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.899935 17472 net.cpp:165] Memory required for data: 1142273500\nI0817 16:29:01.899943 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:29:01.899955 17472 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:29:01.899962 17472 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:29:01.899971 17472 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:29:01.900240 17472 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:29:01.900254 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.900259 17472 net.cpp:165] Memory required for data: 1146369500\nI0817 16:29:01.900269 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:29:01.900279 17472 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:29:01.900285 17472 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:29:01.900295 17472 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:29:01.900353 17472 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:29:01.900512 17472 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:29:01.900526 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.900530 17472 net.cpp:165] Memory required for data: 1150465500\nI0817 16:29:01.900539 17472 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:29:01.900548 17472 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:29:01.900555 17472 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:29:01.900563 17472 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:29:01.900573 17472 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:29:01.900602 17472 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:29:01.900611 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.900616 17472 net.cpp:165] Memory required for data: 1154561500\nI0817 16:29:01.900621 17472 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:29:01.900632 17472 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:29:01.900645 17472 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:29:01.900652 17472 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:29:01.900662 17472 net.cpp:150] Setting up L2_b7_relu\nI0817 16:29:01.900669 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.900674 17472 net.cpp:165] Memory required for data: 1158657500\nI0817 16:29:01.900679 17472 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:01.900689 17472 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:01.900694 17472 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:29:01.900702 17472 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:29:01.900712 17472 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:29:01.900760 17472 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:29:01.900775 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.900782 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.900787 17472 net.cpp:165] Memory required for data: 1166849500\nI0817 16:29:01.900791 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:29:01.900810 17472 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:29:01.900816 17472 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:29:01.900825 17472 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:29:01.901314 17472 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:29:01.901329 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.901334 17472 net.cpp:165] Memory required for data: 1170945500\nI0817 16:29:01.901342 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:29:01.901355 17472 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:29:01.901360 17472 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:29:01.901370 17472 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:29:01.901643 17472 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:29:01.901655 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.901660 17472 net.cpp:165] Memory required for data: 1175041500\nI0817 16:29:01.901671 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:29:01.901679 17472 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:29:01.901686 17472 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:29:01.901696 17472 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.901756 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:29:01.901923 17472 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:29:01.901937 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.901942 17472 net.cpp:165] Memory required for data: 1179137500\nI0817 16:29:01.901952 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:29:01.901959 17472 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:29:01.901965 17472 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:29:01.901975 17472 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.901986 17472 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:29:01.901993 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.901998 17472 net.cpp:165] Memory required for data: 1183233500\nI0817 16:29:01.902003 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:29:01.902015 17472 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:29:01.902022 17472 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:29:01.902030 17472 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:29:01.902523 17472 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:29:01.902537 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.902542 17472 net.cpp:165] Memory required for data: 1187329500\nI0817 16:29:01.902551 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:29:01.902570 17472 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:29:01.902576 17472 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:29:01.902585 17472 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:29:01.902868 17472 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:29:01.902882 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.902887 17472 net.cpp:165] Memory required for data: 1191425500\nI0817 16:29:01.902899 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:29:01.902906 17472 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:29:01.902914 17472 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:29:01.902923 17472 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:29:01.902984 17472 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:29:01.903148 17472 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:29:01.903162 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.903167 17472 net.cpp:165] Memory required for data: 1195521500\nI0817 16:29:01.903177 17472 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:29:01.903185 17472 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:29:01.903192 17472 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:29:01.903199 17472 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:29:01.903209 17472 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:29:01.903239 17472 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:29:01.903249 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.903254 17472 net.cpp:165] Memory required for data: 1199617500\nI0817 16:29:01.903259 17472 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:29:01.903265 17472 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:29:01.903271 17472 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:29:01.903281 17472 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:29:01.903291 17472 net.cpp:150] Setting up L2_b8_relu\nI0817 16:29:01.903298 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.903303 17472 net.cpp:165] Memory required for data: 1203713500\nI0817 16:29:01.903307 17472 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:01.903314 17472 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:01.903321 17472 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:29:01.903331 17472 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:29:01.903354 17472 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:29:01.903406 17472 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:29:01.903419 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.903425 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.903429 17472 net.cpp:165] Memory required for data: 1211905500\nI0817 16:29:01.903434 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:29:01.903450 17472 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:29:01.903455 17472 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:29:01.903467 17472 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:29:01.903973 17472 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:29:01.903987 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.903992 17472 net.cpp:165] Memory required for data: 1216001500\nI0817 16:29:01.904001 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:29:01.904014 17472 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:29:01.904021 17472 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:29:01.904029 17472 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:29:01.904305 17472 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:29:01.904325 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.904330 17472 net.cpp:165] Memory required for data: 1220097500\nI0817 16:29:01.904341 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:29:01.904350 17472 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:29:01.904356 17472 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:29:01.904364 17472 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.904425 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:29:01.904585 17472 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:29:01.904602 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.904606 17472 net.cpp:165] Memory required for data: 1224193500\nI0817 16:29:01.904615 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:29:01.904623 17472 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:29:01.904630 17472 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:29:01.904637 17472 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.904647 17472 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:29:01.904654 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.904659 17472 net.cpp:165] Memory required for data: 1228289500\nI0817 16:29:01.904662 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:29:01.904676 17472 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:29:01.904682 17472 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:29:01.904693 17472 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:29:01.906214 17472 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:29:01.906231 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.906236 17472 net.cpp:165] Memory required for data: 1232385500\nI0817 16:29:01.906246 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:29:01.906258 17472 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:29:01.906265 17472 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:29:01.906275 17472 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:29:01.906541 17472 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:29:01.906554 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.906559 17472 net.cpp:165] Memory required for data: 1236481500\nI0817 16:29:01.906607 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:29:01.906622 17472 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:29:01.906630 17472 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:29:01.906637 17472 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:29:01.906699 17472 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:29:01.906860 17472 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:29:01.906874 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.906879 17472 net.cpp:165] Memory required for data: 1240577500\nI0817 16:29:01.906889 17472 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:29:01.906901 17472 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:29:01.906908 17472 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:29:01.906915 17472 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:29:01.906924 17472 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:29:01.906951 17472 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:29:01.906960 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.906965 17472 net.cpp:165] Memory required for data: 1244673500\nI0817 16:29:01.906970 17472 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:29:01.906981 17472 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:29:01.906987 17472 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:29:01.906994 17472 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:29:01.907003 17472 net.cpp:150] Setting up L2_b9_relu\nI0817 16:29:01.907011 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.907023 17472 net.cpp:165] Memory required for data: 1248769500\nI0817 16:29:01.907029 17472 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:01.907039 17472 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:01.907045 17472 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:29:01.907052 17472 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:29:01.907063 17472 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:29:01.907116 17472 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:29:01.907129 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.907135 17472 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:29:01.907140 17472 net.cpp:165] Memory required for data: 1256961500\nI0817 16:29:01.907145 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:29:01.907158 17472 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:29:01.907166 17472 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:29:01.907174 17472 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:29:01.907671 17472 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:29:01.907686 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.907691 17472 net.cpp:165] Memory required for data: 1257985500\nI0817 16:29:01.907699 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:29:01.907711 17472 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:29:01.907718 17472 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:29:01.907728 17472 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:29:01.908010 17472 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:29:01.908023 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.908028 17472 net.cpp:165] Memory required for data: 1259009500\nI0817 16:29:01.908038 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:29:01.908047 17472 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:29:01.908053 17472 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:29:01.908064 17472 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.908121 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:29:01.908288 17472 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:29:01.908300 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.908305 17472 net.cpp:165] Memory required for data: 1260033500\nI0817 16:29:01.908314 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:29:01.908324 17472 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:29:01.908329 17472 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:29:01.908339 17472 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:29:01.908350 17472 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:29:01.908357 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.908362 17472 net.cpp:165] Memory required for data: 1261057500\nI0817 16:29:01.908366 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:29:01.908380 17472 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:29:01.908386 17472 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:29:01.908394 17472 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:29:01.908891 17472 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:29:01.908905 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.908910 17472 net.cpp:165] Memory required for data: 1262081500\nI0817 16:29:01.908920 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:29:01.908931 17472 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:29:01.908938 17472 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:29:01.908946 17472 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:29:01.909225 17472 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:29:01.909245 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.909250 17472 net.cpp:165] Memory required for data: 1263105500\nI0817 16:29:01.909260 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:29:01.909272 17472 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:29:01.909279 17472 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:29:01.909289 17472 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:29:01.909346 17472 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:29:01.909513 17472 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:29:01.909526 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.909531 17472 net.cpp:165] Memory required for data: 1264129500\nI0817 16:29:01.909540 17472 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:29:01.909550 17472 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:29:01.909556 17472 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:29:01.909569 17472 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:29:01.909603 17472 net.cpp:150] Setting up L3_b1_pool\nI0817 16:29:01.909612 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.909617 17472 net.cpp:165] Memory required for data: 1265153500\nI0817 16:29:01.909622 17472 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:29:01.909636 17472 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:29:01.909642 17472 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:29:01.909649 17472 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:29:01.909657 17472 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:29:01.909689 17472 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:29:01.909698 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.909703 17472 net.cpp:165] Memory required for data: 1266177500\nI0817 16:29:01.909708 17472 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:29:01.909719 17472 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:29:01.909725 17472 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:29:01.909732 17472 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:29:01.909741 17472 net.cpp:150] Setting up L3_b1_relu\nI0817 16:29:01.909749 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.909754 17472 net.cpp:165] Memory required for data: 1267201500\nI0817 16:29:01.909757 17472 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:29:01.909766 17472 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:29:01.909773 17472 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:29:01.910998 17472 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:29:01.911018 17472 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:29:01.911023 17472 net.cpp:165] Memory required for data: 1268225500\nI0817 16:29:01.911029 17472 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:29:01.911038 17472 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:29:01.911046 17472 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:29:01.911052 17472 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:29:01.911062 17472 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:29:01.911104 17472 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:29:01.911116 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.911121 17472 net.cpp:165] Memory required for data: 1270273500\nI0817 16:29:01.911126 17472 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:01.911137 17472 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:01.911144 17472 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:29:01.911151 17472 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:29:01.911161 17472 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:29:01.911217 17472 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:29:01.911229 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.911247 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.911252 17472 net.cpp:165] Memory required for data: 1274369500\nI0817 16:29:01.911257 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:29:01.911268 17472 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:29:01.911275 17472 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:29:01.911288 17472 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:29:01.912338 17472 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:29:01.912353 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.912358 17472 net.cpp:165] Memory required for data: 1276417500\nI0817 16:29:01.912367 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:29:01.912377 17472 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:29:01.912384 17472 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:29:01.912395 17472 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:29:01.912667 17472 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:29:01.912683 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.912688 17472 net.cpp:165] Memory required for data: 1278465500\nI0817 16:29:01.912698 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:29:01.912708 17472 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:29:01.912714 17472 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:29:01.912721 17472 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.912781 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:29:01.912947 17472 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:29:01.912961 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.912966 17472 net.cpp:165] Memory required for data: 1280513500\nI0817 16:29:01.912976 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:29:01.912987 17472 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:29:01.912994 17472 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:29:01.913002 17472 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:29:01.913012 17472 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:29:01.913018 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.913022 17472 net.cpp:165] Memory required for data: 1282561500\nI0817 16:29:01.913028 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:29:01.913041 17472 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:29:01.913048 17472 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:29:01.913056 17472 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:29:01.914095 17472 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:29:01.914110 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.914115 17472 net.cpp:165] Memory required for data: 1284609500\nI0817 16:29:01.914124 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:29:01.914136 17472 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:29:01.914144 17472 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:29:01.914155 17472 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:29:01.914427 17472 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:29:01.914440 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.914445 17472 net.cpp:165] Memory required for data: 1286657500\nI0817 16:29:01.914456 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:29:01.914465 17472 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:29:01.914471 17472 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:29:01.914481 17472 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:29:01.914541 17472 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:29:01.914703 17472 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:29:01.914716 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.914721 17472 net.cpp:165] Memory required for data: 1288705500\nI0817 16:29:01.914737 17472 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:29:01.914747 17472 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:29:01.914753 17472 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:29:01.914762 17472 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:29:01.914772 17472 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:29:01.914815 17472 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:29:01.914827 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.915005 17472 net.cpp:165] Memory required for data: 1290753500\nI0817 16:29:01.915015 17472 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:29:01.915024 17472 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:29:01.915031 17472 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:29:01.915042 17472 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:29:01.915053 17472 net.cpp:150] Setting up L3_b2_relu\nI0817 16:29:01.915061 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.915066 17472 net.cpp:165] Memory required for data: 1292801500\nI0817 16:29:01.915071 17472 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:01.915077 17472 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:01.915083 17472 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:29:01.915091 17472 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:29:01.915100 17472 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:29:01.915154 17472 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:29:01.915166 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.915172 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.915177 17472 net.cpp:165] Memory required for data: 1296897500\nI0817 16:29:01.915182 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:29:01.915194 17472 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:29:01.915200 17472 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:29:01.915212 17472 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:29:01.916256 17472 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:29:01.916271 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.916276 17472 net.cpp:165] Memory required for data: 1298945500\nI0817 16:29:01.916285 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:29:01.916294 17472 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:29:01.916301 17472 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:29:01.916312 17472 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:29:01.916584 17472 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:29:01.916597 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.916602 17472 net.cpp:165] Memory required for data: 1300993500\nI0817 16:29:01.916613 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:29:01.916621 17472 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:29:01.916628 17472 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:29:01.916635 17472 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.916695 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:29:01.916864 17472 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:29:01.916878 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.916883 17472 net.cpp:165] Memory required for data: 1303041500\nI0817 16:29:01.916893 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:29:01.916903 17472 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:29:01.916910 17472 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:29:01.916918 17472 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:29:01.916927 17472 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:29:01.916942 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.916947 17472 net.cpp:165] Memory required for data: 1305089500\nI0817 16:29:01.916952 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:29:01.916967 17472 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:29:01.916973 17472 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:29:01.916982 17472 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:29:01.918028 17472 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:29:01.918042 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.918047 17472 net.cpp:165] Memory required for data: 1307137500\nI0817 16:29:01.918056 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:29:01.918068 17472 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:29:01.918076 17472 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:29:01.918087 17472 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:29:01.918352 17472 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:29:01.918365 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.918370 17472 net.cpp:165] Memory required for data: 1309185500\nI0817 16:29:01.918381 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:29:01.918390 17472 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:29:01.918396 17472 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:29:01.918406 17472 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:29:01.918467 17472 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:29:01.918625 17472 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:29:01.918638 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.918643 17472 net.cpp:165] Memory required for data: 1311233500\nI0817 16:29:01.918653 17472 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:29:01.918664 17472 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:29:01.918671 17472 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:29:01.918678 17472 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:29:01.918686 17472 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:29:01.918723 17472 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:29:01.918735 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.918740 17472 net.cpp:165] Memory required for data: 1313281500\nI0817 16:29:01.918745 17472 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:29:01.918752 17472 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:29:01.918758 17472 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:29:01.918768 17472 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:29:01.918778 17472 net.cpp:150] Setting up L3_b3_relu\nI0817 16:29:01.918786 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.918789 17472 net.cpp:165] Memory required for data: 1315329500\nI0817 16:29:01.918794 17472 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:01.918807 17472 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:01.918813 17472 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:29:01.918822 17472 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:29:01.918830 17472 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:29:01.918881 17472 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:29:01.918898 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.918905 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.918910 17472 net.cpp:165] Memory required for data: 1319425500\nI0817 16:29:01.918915 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:29:01.918926 17472 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:29:01.918932 17472 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:29:01.918951 17472 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:29:01.920006 17472 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:29:01.920020 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.920025 17472 net.cpp:165] Memory required for data: 1321473500\nI0817 16:29:01.920034 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:29:01.920043 17472 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:29:01.920050 17472 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:29:01.920061 17472 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:29:01.920331 17472 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:29:01.920343 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.920348 17472 net.cpp:165] Memory required for data: 1323521500\nI0817 16:29:01.920359 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:29:01.920367 17472 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:29:01.920373 17472 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:29:01.920382 17472 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.920444 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:29:01.920605 17472 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:29:01.920620 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.920626 17472 net.cpp:165] Memory required for data: 1325569500\nI0817 16:29:01.920635 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:29:01.920644 17472 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:29:01.920650 17472 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:29:01.920657 17472 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:29:01.920666 17472 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:29:01.920673 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.920678 17472 net.cpp:165] Memory required for data: 1327617500\nI0817 16:29:01.920683 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:29:01.920696 17472 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:29:01.920703 17472 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:29:01.920711 17472 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:29:01.922731 17472 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:29:01.922749 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.922755 17472 net.cpp:165] Memory required for data: 1329665500\nI0817 16:29:01.922763 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:29:01.922773 17472 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:29:01.922780 17472 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:29:01.922792 17472 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:29:01.923074 17472 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:29:01.923089 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.923094 17472 net.cpp:165] Memory required for data: 1331713500\nI0817 16:29:01.923104 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:29:01.923112 17472 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:29:01.923120 17472 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:29:01.923126 17472 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:29:01.923189 17472 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:29:01.923349 17472 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:29:01.923367 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.923372 17472 net.cpp:165] Memory required for data: 1333761500\nI0817 16:29:01.923380 17472 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:29:01.923389 17472 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:29:01.923396 17472 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:29:01.923403 17472 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:29:01.923411 17472 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:29:01.923457 17472 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:29:01.923467 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.923472 17472 net.cpp:165] Memory required for data: 1335809500\nI0817 16:29:01.923477 17472 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:29:01.923485 17472 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:29:01.923491 17472 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:29:01.923498 17472 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:29:01.923507 17472 net.cpp:150] Setting up L3_b4_relu\nI0817 16:29:01.923514 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.923519 17472 net.cpp:165] Memory required for data: 1337857500\nI0817 16:29:01.923523 17472 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:01.923530 17472 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:01.923537 17472 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:29:01.923547 17472 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:29:01.923557 17472 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:29:01.923604 17472 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:29:01.923615 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.923621 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.923626 17472 net.cpp:165] Memory required for data: 1341953500\nI0817 16:29:01.923631 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:29:01.923646 17472 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:29:01.923652 17472 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:29:01.923661 17472 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:29:01.924700 17472 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:29:01.924713 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.924718 17472 net.cpp:165] Memory required for data: 1344001500\nI0817 16:29:01.924728 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:29:01.924741 17472 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:29:01.924746 17472 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:29:01.924756 17472 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:29:01.925040 17472 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:29:01.925053 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.925057 17472 net.cpp:165] Memory required for data: 1346049500\nI0817 16:29:01.925068 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:29:01.925081 17472 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:29:01.925086 17472 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:29:01.925094 17472 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.925155 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:29:01.925317 17472 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:29:01.925329 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.925334 17472 net.cpp:165] Memory required for data: 1348097500\nI0817 16:29:01.925344 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:29:01.925355 17472 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:29:01.925361 17472 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:29:01.925371 17472 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:29:01.925381 17472 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:29:01.925388 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.925393 17472 net.cpp:165] Memory required for data: 1350145500\nI0817 16:29:01.925397 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:29:01.925408 17472 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:29:01.925415 17472 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:29:01.925433 17472 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:29:01.926457 17472 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:29:01.926471 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.926476 17472 net.cpp:165] Memory required for data: 1352193500\nI0817 16:29:01.926486 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:29:01.926498 17472 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:29:01.926504 17472 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:29:01.926513 17472 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:29:01.926784 17472 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:29:01.926797 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.926807 17472 net.cpp:165] Memory required for data: 1354241500\nI0817 16:29:01.926818 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:29:01.926826 17472 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:29:01.926833 17472 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:29:01.926841 17472 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:29:01.926901 17472 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:29:01.927063 17472 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:29:01.927079 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.927084 17472 net.cpp:165] Memory required for data: 1356289500\nI0817 16:29:01.927093 17472 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:29:01.927103 17472 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:29:01.927109 17472 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:29:01.927116 17472 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:29:01.927124 17472 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:29:01.927160 17472 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:29:01.927172 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.927177 17472 net.cpp:165] Memory required for data: 1358337500\nI0817 16:29:01.927182 17472 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:29:01.927189 17472 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:29:01.927196 17472 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:29:01.927202 17472 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:29:01.927212 17472 net.cpp:150] Setting up L3_b5_relu\nI0817 16:29:01.927219 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.927223 17472 net.cpp:165] Memory required for data: 1360385500\nI0817 16:29:01.927228 17472 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:01.927237 17472 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:01.927243 17472 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:29:01.927251 17472 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:29:01.927260 17472 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:29:01.927306 17472 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:29:01.927320 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.927327 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.927332 17472 net.cpp:165] Memory required for data: 1364481500\nI0817 16:29:01.927337 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:29:01.927350 17472 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:29:01.927356 17472 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:29:01.927364 17472 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:29:01.928426 17472 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:29:01.928442 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.928447 17472 net.cpp:165] Memory required for data: 1366529500\nI0817 16:29:01.928462 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:29:01.928475 17472 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:29:01.928483 17472 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:29:01.928490 17472 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:29:01.928762 17472 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:29:01.928776 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.928781 17472 net.cpp:165] Memory required for data: 1368577500\nI0817 16:29:01.928791 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:29:01.928807 17472 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:29:01.928815 17472 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:29:01.928823 17472 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.928886 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:29:01.929080 17472 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:29:01.929095 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.929100 17472 net.cpp:165] Memory required for data: 1370625500\nI0817 16:29:01.929108 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:29:01.929116 17472 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:29:01.929123 17472 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:29:01.929133 17472 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:29:01.929144 17472 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:29:01.929152 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.929157 17472 net.cpp:165] Memory required for data: 1372673500\nI0817 16:29:01.929160 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:29:01.929172 17472 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:29:01.929177 17472 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:29:01.929188 17472 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:29:01.930217 17472 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:29:01.930233 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.930238 17472 net.cpp:165] Memory required for data: 1374721500\nI0817 16:29:01.930246 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:29:01.930260 17472 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:29:01.930268 17472 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:29:01.930276 17472 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:29:01.930550 17472 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:29:01.930563 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.930568 17472 net.cpp:165] Memory required for data: 1376769500\nI0817 16:29:01.930578 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:29:01.930588 17472 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:29:01.930594 17472 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:29:01.930601 17472 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:29:01.930662 17472 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:29:01.930826 17472 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:29:01.930842 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.930848 17472 net.cpp:165] Memory required for data: 1378817500\nI0817 16:29:01.930857 17472 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:29:01.930867 17472 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:29:01.930873 17472 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:29:01.930881 17472 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:29:01.930888 17472 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:29:01.930927 17472 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:29:01.930938 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.930943 17472 net.cpp:165] Memory required for data: 1380865500\nI0817 16:29:01.930948 17472 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:29:01.930958 17472 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:29:01.930971 17472 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:29:01.930979 17472 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:29:01.930989 17472 net.cpp:150] Setting up L3_b6_relu\nI0817 16:29:01.930996 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.931000 17472 net.cpp:165] Memory required for data: 1382913500\nI0817 16:29:01.931005 17472 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:01.931015 17472 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:01.931021 17472 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:29:01.931028 17472 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:29:01.931038 17472 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:29:01.931088 17472 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:29:01.931104 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.931112 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.931115 17472 net.cpp:165] Memory required for data: 1387009500\nI0817 16:29:01.931121 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:29:01.931133 17472 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:29:01.931138 17472 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:29:01.931147 17472 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:29:01.932175 17472 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:29:01.932189 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.932194 17472 net.cpp:165] Memory required for data: 1389057500\nI0817 16:29:01.932204 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:29:01.932216 17472 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:29:01.932224 17472 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:29:01.932231 17472 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:29:01.932507 17472 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:29:01.932519 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.932524 17472 net.cpp:165] Memory required for data: 1391105500\nI0817 16:29:01.932534 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:29:01.932546 17472 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:29:01.932552 17472 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:29:01.932564 17472 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.932622 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:29:01.932788 17472 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:29:01.932806 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.932812 17472 net.cpp:165] Memory required for data: 1393153500\nI0817 16:29:01.932822 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:29:01.932857 17472 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:29:01.932867 17472 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:29:01.932874 17472 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:29:01.932885 17472 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:29:01.932893 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.932896 17472 net.cpp:165] Memory required for data: 1395201500\nI0817 16:29:01.932902 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:29:01.932914 17472 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:29:01.932919 17472 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:29:01.932929 17472 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:29:01.933954 17472 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:29:01.933969 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.933974 17472 net.cpp:165] Memory required for data: 1397249500\nI0817 16:29:01.933984 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:29:01.934003 17472 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:29:01.934011 17472 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:29:01.934020 17472 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:29:01.934291 17472 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:29:01.934304 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.934309 17472 net.cpp:165] Memory required for data: 1399297500\nI0817 16:29:01.934319 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:29:01.934331 17472 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:29:01.934337 17472 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:29:01.934345 17472 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:29:01.934407 17472 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:29:01.934572 17472 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:29:01.934587 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.934592 17472 net.cpp:165] Memory required for data: 1401345500\nI0817 16:29:01.934600 17472 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:29:01.934612 17472 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:29:01.934619 17472 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:29:01.934626 17472 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:29:01.934636 17472 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:29:01.934670 17472 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:29:01.934682 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.934686 17472 net.cpp:165] Memory required for data: 1403393500\nI0817 16:29:01.934691 17472 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:29:01.934702 17472 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:29:01.934708 17472 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:29:01.934716 17472 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:29:01.934725 17472 net.cpp:150] Setting up L3_b7_relu\nI0817 16:29:01.934732 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.934737 17472 net.cpp:165] Memory required for data: 1405441500\nI0817 16:29:01.934742 17472 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:01.934748 17472 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:01.934754 17472 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:29:01.934762 17472 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:29:01.934770 17472 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:29:01.934828 17472 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:29:01.934839 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.934846 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.934851 17472 net.cpp:165] Memory required for data: 1409537500\nI0817 16:29:01.934856 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:29:01.934870 17472 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:29:01.934876 17472 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:29:01.934886 17472 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:29:01.936894 17472 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:29:01.936914 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.936919 17472 net.cpp:165] Memory required for data: 1411585500\nI0817 16:29:01.936929 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:29:01.936939 17472 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:29:01.936946 17472 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:29:01.936957 17472 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:29:01.937234 17472 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:29:01.937258 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.937263 17472 net.cpp:165] Memory required for data: 1413633500\nI0817 16:29:01.937274 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:29:01.937283 17472 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:29:01.937289 17472 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:29:01.937297 17472 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.937361 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:29:01.937526 17472 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:29:01.937541 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.937546 17472 net.cpp:165] Memory required for data: 1415681500\nI0817 16:29:01.937554 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:29:01.937562 17472 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:29:01.937569 17472 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:29:01.937579 17472 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:29:01.937589 17472 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:29:01.937597 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.937602 17472 net.cpp:165] Memory required for data: 1417729500\nI0817 16:29:01.937607 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:29:01.937620 17472 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:29:01.937626 17472 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:29:01.937635 17472 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:29:01.938668 17472 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:29:01.938683 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.938688 17472 net.cpp:165] Memory required for data: 1419777500\nI0817 16:29:01.938696 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:29:01.938709 17472 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:29:01.938715 17472 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:29:01.938726 17472 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:29:01.939005 17472 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:29:01.939018 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.939023 17472 net.cpp:165] Memory required for data: 1421825500\nI0817 16:29:01.939034 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:29:01.939043 17472 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:29:01.939049 17472 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:29:01.939059 17472 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:29:01.939119 17472 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:29:01.939283 17472 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:29:01.939296 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.939301 17472 net.cpp:165] Memory required for data: 1423873500\nI0817 16:29:01.939311 17472 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:29:01.939321 17472 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:29:01.939327 17472 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:29:01.939333 17472 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:29:01.939344 17472 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:29:01.939381 17472 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:29:01.939393 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.939398 17472 net.cpp:165] Memory required for data: 1425921500\nI0817 16:29:01.939402 17472 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:29:01.939410 17472 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:29:01.939416 17472 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:29:01.939424 17472 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:29:01.939432 17472 net.cpp:150] Setting up L3_b8_relu\nI0817 16:29:01.939440 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.939445 17472 net.cpp:165] Memory required for data: 1427969500\nI0817 16:29:01.939456 17472 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:01.939466 17472 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:01.939472 17472 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:29:01.939479 17472 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:29:01.939491 17472 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:29:01.939545 17472 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:29:01.939558 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.939564 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.939568 17472 net.cpp:165] Memory required for data: 1432065500\nI0817 16:29:01.939574 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:29:01.939585 17472 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:29:01.939591 17472 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:29:01.939604 17472 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:29:01.940634 17472 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:29:01.940649 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.940654 17472 net.cpp:165] Memory required for data: 1434113500\nI0817 16:29:01.940663 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:29:01.940672 17472 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:29:01.940678 17472 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:29:01.940690 17472 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:29:01.940971 17472 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:29:01.940989 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.940994 17472 net.cpp:165] Memory required for data: 1436161500\nI0817 16:29:01.941005 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:29:01.941015 17472 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:29:01.941020 17472 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:29:01.941027 17472 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.941087 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:29:01.941248 17472 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:29:01.941262 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.941267 17472 net.cpp:165] Memory required for data: 1438209500\nI0817 16:29:01.941275 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:29:01.941287 17472 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:29:01.941293 17472 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:29:01.941300 17472 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:29:01.941310 17472 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:29:01.941318 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.941321 17472 net.cpp:165] Memory required for data: 1440257500\nI0817 16:29:01.941326 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:29:01.941339 17472 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:29:01.941346 17472 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:29:01.941354 17472 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:29:01.942384 17472 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:29:01.942399 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.942404 17472 net.cpp:165] Memory required for data: 1442305500\nI0817 16:29:01.942414 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:29:01.942427 17472 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:29:01.942435 17472 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:29:01.942447 17472 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:29:01.942721 17472 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:29:01.942734 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.942745 17472 net.cpp:165] Memory required for data: 1444353500\nI0817 16:29:01.942756 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:29:01.942765 17472 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:29:01.942772 17472 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:29:01.942782 17472 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:29:01.942849 17472 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:29:01.943012 17472 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:29:01.943025 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.943030 17472 net.cpp:165] Memory required for data: 1446401500\nI0817 16:29:01.943039 17472 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:29:01.943048 17472 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:29:01.943055 17472 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:29:01.943063 17472 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:29:01.943074 17472 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:29:01.943112 17472 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:29:01.943125 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.943128 17472 net.cpp:165] Memory required for data: 1448449500\nI0817 16:29:01.943135 17472 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:29:01.943142 17472 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:29:01.943148 17472 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:29:01.943158 17472 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:29:01.943168 17472 net.cpp:150] Setting up L3_b9_relu\nI0817 16:29:01.943176 17472 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:29:01.943179 17472 net.cpp:165] Memory required for data: 1450497500\nI0817 16:29:01.943184 17472 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:29:01.943192 17472 net.cpp:100] Creating Layer post_pool\nI0817 16:29:01.943197 17472 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:29:01.943205 17472 net.cpp:408] post_pool -> post_pool\nI0817 16:29:01.943240 17472 net.cpp:150] Setting up post_pool\nI0817 16:29:01.943253 17472 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:29:01.943256 17472 net.cpp:165] Memory required for data: 1450529500\nI0817 16:29:01.943262 17472 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:29:01.943276 17472 net.cpp:100] Creating Layer post_FC\nI0817 16:29:01.943282 17472 net.cpp:434] post_FC <- post_pool\nI0817 16:29:01.943291 17472 net.cpp:408] post_FC -> post_FC_top\nI0817 16:29:01.943454 17472 net.cpp:150] Setting up post_FC\nI0817 16:29:01.943469 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:01.943472 17472 net.cpp:165] Memory required for data: 1450534500\nI0817 16:29:01.943481 17472 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:29:01.943492 17472 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:29:01.943498 17472 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:29:01.943506 17472 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:29:01.943516 17472 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:29:01.943567 17472 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:29:01.943578 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:01.943584 17472 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:29:01.943589 17472 net.cpp:165] Memory required for data: 1450544500\nI0817 16:29:01.943594 17472 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:29:01.943601 17472 net.cpp:100] Creating Layer accuracy\nI0817 16:29:01.943608 17472 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:29:01.943614 17472 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:29:01.943624 17472 net.cpp:408] accuracy -> accuracy\nI0817 16:29:01.943637 17472 net.cpp:150] Setting up accuracy\nI0817 16:29:01.943645 17472 net.cpp:157] Top shape: (1)\nI0817 16:29:01.943656 17472 net.cpp:165] Memory required for data: 1450544504\nI0817 16:29:01.943661 17472 layer_factory.hpp:77] Creating layer loss\nI0817 16:29:01.943670 17472 net.cpp:100] Creating Layer loss\nI0817 16:29:01.943675 17472 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:29:01.943681 17472 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:29:01.943688 17472 net.cpp:408] loss -> loss\nI0817 16:29:01.943701 17472 layer_factory.hpp:77] Creating layer loss\nI0817 16:29:01.943831 17472 net.cpp:150] Setting up loss\nI0817 16:29:01.943845 17472 net.cpp:157] Top shape: (1)\nI0817 16:29:01.943850 17472 net.cpp:160]     with loss weight 1\nI0817 16:29:01.943866 17472 net.cpp:165] Memory required for data: 1450544508\nI0817 16:29:01.943872 17472 net.cpp:226] loss needs backward computation.\nI0817 16:29:01.943878 17472 net.cpp:228] accuracy does not need backward computation.\nI0817 16:29:01.943884 17472 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:29:01.943889 17472 net.cpp:226] post_FC needs backward computation.\nI0817 16:29:01.943894 17472 net.cpp:226] post_pool needs backward computation.\nI0817 16:29:01.943899 17472 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:29:01.943903 17472 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:29:01.943909 17472 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:29:01.943913 17472 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:29:01.943918 17472 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:29:01.943923 17472 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:29:01.943928 17472 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:29:01.943933 17472 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:29:01.943938 17472 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:29:01.943943 17472 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:29:01.943948 17472 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:29:01.943953 17472 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:29:01.943958 17472 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:29:01.943964 17472 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:29:01.943969 17472 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:29:01.943974 17472 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:29:01.943979 17472 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:29:01.943984 17472 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:29:01.943989 17472 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:29:01.943997 17472 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:29:01.944002 17472 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:29:01.944007 17472 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:29:01.944012 17472 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:29:01.944017 17472 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:29:01.944022 17472 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:29:01.944027 17472 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:29:01.944032 17472 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:29:01.944037 17472 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:29:01.944042 17472 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:29:01.944047 17472 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:29:01.944052 17472 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:29:01.944057 17472 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:29:01.944063 17472 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:29:01.944068 17472 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:29:01.944080 17472 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:29:01.944087 17472 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:29:01.944092 17472 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:29:01.944097 17472 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:29:01.944102 17472 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:29:01.944106 17472 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:29:01.944111 17472 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:29:01.944116 17472 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:29:01.944123 17472 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:29:01.944128 17472 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:29:01.944133 17472 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:29:01.944138 17472 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:29:01.944142 17472 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:29:01.944147 17472 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:29:01.944152 17472 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:29:01.944157 17472 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:29:01.944164 17472 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:29:01.944169 17472 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:29:01.944175 17472 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:29:01.944180 17472 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:29:01.944185 17472 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:29:01.944190 17472 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:29:01.944195 17472 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:29:01.944200 17472 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:29:01.944205 17472 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:29:01.944209 17472 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:29:01.944214 17472 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:29:01.944219 17472 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:29:01.944226 17472 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:29:01.944231 17472 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:29:01.944236 17472 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:29:01.944241 17472 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:29:01.944247 17472 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:29:01.944252 17472 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:29:01.944257 17472 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:29:01.944262 17472 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:29:01.944267 17472 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:29:01.944272 17472 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:29:01.944278 17472 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:29:01.944283 17472 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:29:01.944288 17472 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:29:01.944293 17472 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:29:01.944298 17472 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:29:01.944303 17472 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:29:01.944308 17472 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:29:01.944314 17472 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:29:01.944319 17472 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:29:01.944330 17472 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:29:01.944335 17472 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:29:01.944341 17472 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:29:01.944350 17472 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:29:01.944355 17472 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:29:01.944360 17472 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:29:01.944366 17472 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:29:01.944372 17472 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:29:01.944377 17472 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:29:01.944382 17472 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:29:01.944387 17472 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:29:01.944392 17472 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:29:01.944398 17472 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:29:01.944403 17472 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:29:01.944408 17472 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:29:01.944413 17472 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:29:01.944419 17472 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:29:01.944424 17472 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:29:01.944429 17472 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:29:01.944434 17472 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:29:01.944440 17472 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:29:01.944445 17472 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:29:01.944450 17472 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:29:01.944456 17472 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:29:01.944461 17472 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:29:01.944466 17472 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:29:01.944473 17472 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:29:01.944478 17472 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:29:01.944483 17472 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:29:01.944488 17472 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:29:01.944492 17472 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:29:01.944499 17472 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:29:01.944504 17472 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:29:01.944509 17472 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:29:01.944514 17472 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:29:01.944519 17472 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:29:01.944525 17472 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:29:01.944530 17472 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:29:01.944535 17472 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:29:01.944540 17472 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:29:01.944545 17472 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:29:01.944550 17472 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:29:01.944556 17472 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:29:01.944561 17472 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:29:01.944567 17472 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:29:01.944572 17472 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:29:01.944577 17472 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:29:01.944587 17472 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:29:01.944593 17472 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:29:01.944598 17472 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:29:01.944604 17472 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:29:01.944609 17472 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:29:01.944614 17472 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:29:01.944620 17472 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:29:01.944625 17472 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:29:01.944631 17472 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:29:01.944636 17472 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:29:01.944644 17472 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:29:01.944650 17472 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:29:01.944655 17472 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:29:01.944660 17472 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:29:01.944666 17472 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:29:01.944672 17472 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:29:01.944677 17472 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:29:01.944684 17472 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:29:01.944689 17472 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:29:01.944694 17472 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:29:01.944700 17472 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:29:01.944705 17472 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:29:01.944710 17472 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:29:01.944715 17472 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:29:01.944720 17472 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:29:01.944725 17472 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:29:01.944731 17472 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:29:01.944737 17472 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:29:01.944742 17472 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:29:01.944747 17472 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:29:01.944753 17472 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:29:01.944758 17472 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:29:01.944764 17472 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:29:01.944769 17472 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:29:01.944775 17472 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:29:01.944780 17472 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:29:01.944787 17472 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:29:01.944792 17472 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:29:01.944797 17472 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:29:01.944809 17472 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:29:01.944815 17472 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:29:01.944820 17472 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:29:01.944826 17472 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:29:01.944831 17472 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:29:01.944838 17472 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:29:01.944844 17472 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:29:01.944849 17472 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:29:01.944859 17472 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:29:01.944865 17472 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:29:01.944872 17472 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:29:01.944877 17472 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:29:01.944883 17472 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:29:01.944888 17472 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:29:01.944895 17472 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:29:01.944900 17472 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:29:01.944905 17472 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:29:01.944911 17472 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:29:01.944916 17472 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:29:01.944921 17472 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:29:01.944927 17472 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:29:01.944933 17472 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:29:01.944938 17472 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:29:01.944944 17472 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:29:01.944949 17472 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:29:01.944954 17472 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:29:01.944960 17472 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:29:01.944965 17472 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:29:01.944972 17472 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:29:01.944977 17472 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:29:01.944983 17472 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:29:01.944988 17472 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:29:01.944994 17472 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:29:01.944999 17472 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:29:01.945005 17472 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:29:01.945010 17472 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:29:01.945016 17472 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:29:01.945021 17472 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:29:01.945034 17472 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:29:01.945041 17472 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:29:01.945046 17472 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:29:01.945053 17472 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:29:01.945058 17472 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:29:01.945065 17472 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:29:01.945070 17472 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:29:01.945076 17472 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:29:01.945081 17472 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:29:01.945087 17472 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:29:01.945093 17472 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:29:01.945098 17472 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:29:01.945104 17472 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:29:01.945111 17472 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:29:01.945116 17472 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:29:01.945122 17472 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:29:01.945127 17472 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:29:01.945132 17472 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:29:01.945142 17472 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:29:01.945149 17472 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:29:01.945154 17472 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:29:01.945160 17472 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:29:01.945166 17472 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:29:01.945173 17472 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:29:01.945178 17472 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:29:01.945183 17472 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:29:01.945189 17472 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:29:01.945194 17472 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:29:01.945200 17472 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:29:01.945206 17472 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:29:01.945211 17472 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:29:01.945217 17472 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:29:01.945224 17472 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:29:01.945230 17472 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:29:01.945235 17472 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:29:01.945240 17472 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:29:01.945246 17472 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:29:01.945251 17472 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:29:01.945257 17472 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:29:01.945263 17472 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:29:01.945269 17472 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:29:01.945274 17472 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:29:01.945281 17472 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:29:01.945286 17472 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:29:01.945292 17472 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:29:01.945298 17472 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:29:01.945303 17472 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:29:01.945309 17472 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:29:01.945315 17472 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:29:01.945320 17472 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:29:01.945327 17472 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:29:01.945333 17472 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:29:01.945338 17472 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:29:01.945344 17472 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:29:01.945350 17472 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:29:01.945356 17472 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:29:01.945363 17472 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:29:01.945367 17472 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:29:01.945374 17472 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:29:01.945379 17472 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:29:01.945385 17472 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:29:01.945390 17472 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:29:01.945396 17472 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:29:01.945403 17472 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:29:01.945410 17472 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:29:01.945421 17472 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:29:01.945427 17472 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:29:01.945433 17472 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:29:01.945439 17472 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:29:01.945444 17472 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:29:01.945451 17472 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:29:01.945456 17472 net.cpp:226] pre_relu needs backward computation.\nI0817 16:29:01.945461 17472 net.cpp:226] pre_scale needs backward computation.\nI0817 16:29:01.945466 17472 net.cpp:226] pre_bn needs backward computation.\nI0817 16:29:01.945472 17472 net.cpp:226] pre_conv needs backward computation.\nI0817 16:29:01.945478 17472 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:29:01.945485 17472 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:29:01.945490 17472 net.cpp:270] This network produces output accuracy\nI0817 16:29:01.945497 17472 net.cpp:270] This network produces output loss\nI0817 16:29:01.945832 17472 net.cpp:283] Network initialization done.\nI0817 16:29:01.946842 17472 solver.cpp:60] Solver scaffolding done.\nI0817 16:29:02.179828 17472 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:29:02.553550 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:02.553606 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:02.560438 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:02.786285 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:02.786368 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:02.821137 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:02.821216 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:03.273350 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:03.273429 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:03.280951 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:03.524529 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:03.524667 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:03.576011 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:03.576144 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:04.094014 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:04.094068 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:04.102748 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:04.368436 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:04.368567 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:04.440541 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:04.440670 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:04.524531 17472 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:29:05.010115 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:05.010170 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:05.020093 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:05.315546 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:05.315739 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:05.407021 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:05.407204 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:06.050788 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:06.050863 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:06.061041 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:06.379572 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:06.379753 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:06.492383 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:06.492560 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:07.201262 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:07.201314 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:07.212301 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:07.555274 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:07.555480 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:07.688580 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:07.688782 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:08.474941 17472 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:29:08.475019 17472 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:29:08.488077 17472 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:29:08.525647 17495 blocking_queue.cpp:50] Waiting for data\nI0817 16:29:08.571805 17495 blocking_queue.cpp:50] Waiting for data\nI0817 16:29:08.893786 17472 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:29:08.894078 17472 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:29:09.047266 17472 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:29:09.047535 17472 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:29:09.220800 17472 parallel.cpp:425] Starting Optimization\nI0817 16:29:09.222144 17472 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:29:09.222163 17472 solver.cpp:280] Learning Rate Policy: multistep\nI0817 16:29:09.226140 17472 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:30:31.283010 17472 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:30:31.283349 17472 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:30:35.401746 17472 solver.cpp:228] Iteration 0, loss = 3.63738\nI0817 16:30:35.401787 17472 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI0817 16:30:35.401805 17472 solver.cpp:244]     Train net output #1: loss = 3.63738 (* 1 = 3.63738 loss)\nI0817 16:30:35.401973 17472 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0817 16:32:53.859154 17472 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:34:16.172734 17472 solver.cpp:404]     Test net output #0: accuracy = 0.31188\nI0817 16:34:16.173007 17472 solver.cpp:404]     Test net output #1: loss = 2.09299 (* 1 = 2.09299 loss)\nI0817 16:34:17.502527 17472 solver.cpp:228] Iteration 100, loss = 1.49734\nI0817 16:34:17.502568 17472 solver.cpp:244]     Train net output #0: accuracy = 0.432\nI0817 16:34:17.502584 17472 solver.cpp:244]     Train net output #1: loss = 1.49734 (* 1 = 1.49734 loss)\nI0817 16:34:17.595067 17472 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0817 16:36:35.342422 17472 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:37:57.629873 17472 solver.cpp:404]     Test net output #0: accuracy = 0.33512\nI0817 16:37:57.630158 17472 solver.cpp:404]     Test net output #1: loss = 1.9235 (* 1 = 1.9235 loss)\nI0817 16:37:58.959818 17472 solver.cpp:228] Iteration 200, loss = 1.3929\nI0817 16:37:58.959866 17472 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI0817 16:37:58.959882 17472 solver.cpp:244]     Train net output #1: loss = 1.3929 (* 1 = 1.3929 loss)\nI0817 16:37:59.049502 17472 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0817 16:40:16.764662 17472 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:41:39.037693 17472 solver.cpp:404]     Test net output #0: accuracy = 0.3798\nI0817 16:41:39.037982 17472 solver.cpp:404]     Test net output #1: loss = 1.68351 (* 1 = 1.68351 loss)\nI0817 16:41:40.366870 17472 solver.cpp:228] Iteration 300, loss = 1.22973\nI0817 16:41:40.366920 17472 solver.cpp:244]     Train net output #0: accuracy = 0.504\nI0817 16:41:40.366936 17472 solver.cpp:244]     Train net output #1: loss = 1.22973 (* 1 = 1.22973 loss)\nI0817 16:41:40.456353 17472 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0817 16:43:58.215996 17472 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:45:20.476516 17472 solver.cpp:404]     Test net output #0: accuracy = 0.457\nI0817 16:45:20.476805 17472 solver.cpp:404]     Test net output #1: loss = 1.4774 (* 1 = 1.4774 loss)\nI0817 16:45:21.806455 17472 solver.cpp:228] Iteration 400, loss = 1.19445\nI0817 16:45:21.806494 17472 solver.cpp:244]     Train net output #0: accuracy = 0.568\nI0817 16:45:21.806510 17472 solver.cpp:244]     Train net output #1: loss = 1.19445 (* 1 = 1.19445 loss)\nI0817 16:45:21.891587 17472 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0817 16:47:39.557267 17472 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:49:01.619150 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4734\nI0817 16:49:01.619388 17472 solver.cpp:404]     Test net output #1: loss = 1.52279 (* 1 = 1.52279 loss)\nI0817 16:49:02.948778 17472 solver.cpp:228] Iteration 500, loss = 1.10902\nI0817 16:49:02.948827 17472 solver.cpp:244]     Train net output #0: accuracy = 0.616\nI0817 16:49:02.948845 17472 solver.cpp:244]     Train net output #1: loss = 1.10902 (* 1 = 1.10902 loss)\nI0817 16:49:03.042954 17472 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0817 16:51:20.787473 17472 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:52:42.919373 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50712\nI0817 16:52:42.919623 17472 solver.cpp:404]     Test net output #1: loss = 1.41591 (* 1 = 1.41591 loss)\nI0817 16:52:44.248872 17472 solver.cpp:228] Iteration 600, loss = 1.02057\nI0817 16:52:44.248919 17472 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0817 16:52:44.248937 17472 solver.cpp:244]     Train net output #1: loss = 1.02057 (* 1 = 1.02057 loss)\nI0817 16:52:44.341805 17472 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0817 16:55:01.941397 17472 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:56:24.189419 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5466\nI0817 16:56:24.189659 17472 solver.cpp:404]     Test net output #1: loss = 1.28219 (* 1 = 1.28219 loss)\nI0817 16:56:25.518092 17472 solver.cpp:228] Iteration 700, loss = 0.933764\nI0817 16:56:25.518138 17472 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0817 16:56:25.518154 17472 solver.cpp:244]     Train net output #1: loss = 0.933764 (* 1 = 0.933764 loss)\nI0817 16:56:25.613344 17472 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0817 16:58:43.293457 17472 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 17:00:05.546016 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52704\nI0817 17:00:05.546296 17472 solver.cpp:404]     Test net output #1: loss = 1.38185 (* 1 = 1.38185 loss)\nI0817 17:00:06.875757 17472 solver.cpp:228] Iteration 800, loss = 0.914017\nI0817 17:00:06.875805 17472 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0817 17:00:06.875821 17472 solver.cpp:244]     Train net output #1: loss = 0.914017 (* 1 = 0.914017 loss)\nI0817 17:00:06.961800 17472 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0817 17:02:24.538978 17472 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 17:03:46.776309 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5472\nI0817 17:03:46.776531 17472 solver.cpp:404]     Test net output #1: loss = 1.27038 (* 1 = 1.27038 loss)\nI0817 17:03:48.105842 17472 solver.cpp:228] Iteration 900, loss = 0.888176\nI0817 17:03:48.105890 17472 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0817 17:03:48.105906 17472 solver.cpp:244]     Train net output #1: loss = 0.888176 (* 1 = 0.888176 loss)\nI0817 17:03:48.193727 17472 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0817 17:06:06.394872 17472 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 17:07:28.591495 17472 solver.cpp:404]     Test net output #0: accuracy = 0.54788\nI0817 17:07:28.591733 17472 solver.cpp:404]     Test net output #1: loss = 1.30597 (* 1 = 1.30597 loss)\nI0817 17:07:29.920631 17472 solver.cpp:228] Iteration 1000, loss = 0.763522\nI0817 17:07:29.920681 17472 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 17:07:29.920697 17472 solver.cpp:244]     Train net output #1: loss = 0.763522 (* 1 = 0.763522 loss)\nI0817 17:07:30.008396 17472 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0817 17:09:47.644160 17472 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 17:11:09.833284 17472 solver.cpp:404]     Test net output #0: accuracy = 0.57996\nI0817 17:11:09.833523 17472 solver.cpp:404]     Test net output #1: loss = 1.19466 (* 1 = 1.19466 loss)\nI0817 17:11:11.163293 17472 solver.cpp:228] Iteration 1100, loss = 0.760757\nI0817 17:11:11.163343 17472 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 17:11:11.163360 17472 solver.cpp:244]     Train net output #1: loss = 0.760757 (* 1 = 0.760757 loss)\nI0817 17:11:11.247263 17472 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0817 17:13:28.864557 17472 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 17:14:51.040443 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6066\nI0817 17:14:51.040681 17472 solver.cpp:404]     Test net output #1: loss = 1.12636 (* 1 = 1.12636 loss)\nI0817 17:14:52.369556 17472 solver.cpp:228] Iteration 1200, loss = 0.738878\nI0817 17:14:52.369604 17472 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 17:14:52.369621 17472 solver.cpp:244]     Train net output #1: loss = 0.738878 (* 1 = 0.738878 loss)\nI0817 17:14:52.458890 17472 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0817 17:17:10.916709 17472 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 17:18:33.095141 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61708\nI0817 17:18:33.095389 17472 solver.cpp:404]     Test net output #1: loss = 1.09122 (* 1 = 1.09122 loss)\nI0817 17:18:34.424612 17472 solver.cpp:228] Iteration 1300, loss = 0.734753\nI0817 17:18:34.424660 17472 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 17:18:34.424676 17472 solver.cpp:244]     Train net output #1: loss = 0.734753 (* 1 = 0.734753 loss)\nI0817 17:18:34.512841 17472 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0817 17:20:52.176455 17472 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:22:14.324290 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5934\nI0817 17:22:14.324556 17472 solver.cpp:404]     Test net output #1: loss = 1.16724 (* 1 = 1.16724 loss)\nI0817 17:22:15.653502 17472 solver.cpp:228] Iteration 1400, loss = 0.70723\nI0817 17:22:15.653544 17472 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 17:22:15.653559 17472 solver.cpp:244]     Train net output #1: loss = 0.70723 (* 1 = 0.70723 loss)\nI0817 17:22:15.742118 17472 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0817 17:24:33.484140 17472 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:25:55.501538 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5534\nI0817 17:25:55.501737 17472 solver.cpp:404]     Test net output #1: loss = 1.36731 (* 1 = 1.36731 loss)\nI0817 17:25:56.830760 17472 solver.cpp:228] Iteration 1500, loss = 0.711798\nI0817 17:25:56.830811 17472 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 17:25:56.830827 17472 solver.cpp:244]     Train net output #1: loss = 0.711798 (* 1 = 0.711798 loss)\nI0817 17:25:56.915854 17472 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0817 17:28:14.545411 17472 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:29:36.534344 17472 solver.cpp:404]     Test net output #0: accuracy = 0.56176\nI0817 17:29:36.534545 17472 solver.cpp:404]     Test net output #1: loss = 1.34306 (* 1 = 1.34306 loss)\nI0817 17:29:37.863307 17472 solver.cpp:228] Iteration 1600, loss = 0.71751\nI0817 17:29:37.863358 17472 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 17:29:37.863373 17472 solver.cpp:244]     Train net output #1: loss = 0.71751 (* 1 = 0.71751 loss)\nI0817 17:29:37.950958 17472 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0817 17:31:56.344477 17472 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:33:18.485976 17472 solver.cpp:404]     Test net output #0: accuracy = 0.54472\nI0817 17:33:18.486289 17472 solver.cpp:404]     Test net output #1: loss = 1.43618 (* 1 = 1.43618 loss)\nI0817 17:33:19.815560 17472 solver.cpp:228] Iteration 1700, loss = 0.605863\nI0817 17:33:19.815610 17472 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 17:33:19.815626 17472 solver.cpp:244]     Train net output #1: loss = 0.605863 (* 1 = 0.605863 loss)\nI0817 17:33:19.903281 17472 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0817 17:35:38.174722 17472 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:37:00.206499 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53748\nI0817 17:37:00.206696 17472 solver.cpp:404]     Test net output #1: loss = 1.48065 (* 1 = 1.48065 loss)\nI0817 17:37:01.536165 17472 solver.cpp:228] Iteration 1800, loss = 0.653636\nI0817 17:37:01.536216 17472 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 17:37:01.536232 17472 solver.cpp:244]     Train net output #1: loss = 0.653636 (* 1 = 0.653636 loss)\nI0817 17:37:01.624141 17472 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0817 17:39:19.919999 17472 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:40:42.039182 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50352\nI0817 17:40:42.039439 17472 solver.cpp:404]     Test net output #1: loss = 1.78313 (* 1 = 1.78313 loss)\nI0817 17:40:43.368039 17472 solver.cpp:228] Iteration 1900, loss = 0.660455\nI0817 17:40:43.368089 17472 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 17:40:43.368105 17472 solver.cpp:244]     Train net output #1: loss = 0.660455 (* 1 = 0.660455 loss)\nI0817 17:40:43.461571 17472 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0817 17:43:01.623512 17472 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:44:23.671286 17472 solver.cpp:404]     Test net output #0: accuracy = 0.57392\nI0817 17:44:23.671489 17472 solver.cpp:404]     Test net output #1: loss = 1.40674 (* 1 = 1.40674 loss)\nI0817 17:44:25.000511 17472 solver.cpp:228] Iteration 2000, loss = 0.603479\nI0817 17:44:25.000562 17472 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 17:44:25.000581 17472 solver.cpp:244]     Train net output #1: loss = 0.603479 (* 1 = 0.603479 loss)\nI0817 17:44:25.091347 17472 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0817 17:46:43.553591 17472 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:48:05.505300 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51296\nI0817 17:48:05.505496 17472 solver.cpp:404]     Test net output #1: loss = 1.71282 (* 1 = 1.71282 loss)\nI0817 17:48:06.834662 17472 solver.cpp:228] Iteration 2100, loss = 0.647234\nI0817 17:48:06.834712 17472 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 17:48:06.834729 17472 solver.cpp:244]     Train net output #1: loss = 0.647234 (* 1 = 0.647234 loss)\nI0817 17:48:06.922076 17472 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0817 17:50:25.370674 17472 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:51:47.313446 17472 solver.cpp:404]     Test net output #0: accuracy = 0.47268\nI0817 17:51:47.313663 17472 solver.cpp:404]     Test net output #1: loss = 2.14245 (* 1 = 2.14245 loss)\nI0817 17:51:48.642490 17472 solver.cpp:228] Iteration 2200, loss = 0.542839\nI0817 17:51:48.642541 17472 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 17:51:48.642558 17472 solver.cpp:244]     Train net output #1: loss = 0.542839 (* 1 = 0.542839 loss)\nI0817 17:51:48.731413 17472 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0817 17:54:06.412179 17472 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:55:28.440641 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4746\nI0817 17:55:28.440877 17472 solver.cpp:404]     Test net output #1: loss = 2.07404 (* 1 = 2.07404 loss)\nI0817 17:55:29.769549 17472 solver.cpp:228] Iteration 2300, loss = 0.487139\nI0817 17:55:29.769598 17472 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 17:55:29.769616 17472 solver.cpp:244]     Train net output #1: loss = 0.487139 (* 1 = 0.487139 loss)\nI0817 17:55:29.858582 17472 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0817 17:57:48.212990 17472 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:59:10.183430 17472 solver.cpp:404]     Test net output #0: accuracy = 0.491\nI0817 17:59:10.183662 17472 solver.cpp:404]     Test net output #1: loss = 1.94848 (* 1 = 1.94848 loss)\nI0817 17:59:11.512444 17472 solver.cpp:228] Iteration 2400, loss = 0.5028\nI0817 17:59:11.512495 17472 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 17:59:11.512511 17472 solver.cpp:244]     Train net output #1: loss = 0.5028 (* 1 = 0.5028 loss)\nI0817 17:59:11.599704 17472 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0817 18:01:29.942183 17472 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 18:02:52.091894 17472 solver.cpp:404]     Test net output #0: accuracy = 0.45528\nI0817 18:02:52.092164 17472 solver.cpp:404]     Test net output #1: loss = 2.3668 (* 1 = 2.3668 loss)\nI0817 18:02:53.421304 17472 solver.cpp:228] Iteration 2500, loss = 0.478747\nI0817 18:02:53.421352 17472 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 18:02:53.421368 17472 solver.cpp:244]     Train net output #1: loss = 0.478747 (* 1 = 0.478747 loss)\nI0817 18:02:53.506566 17472 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0817 18:05:11.798734 17472 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 18:06:33.833183 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50144\nI0817 18:06:33.833401 17472 solver.cpp:404]     Test net output #1: loss = 2.00545 (* 1 = 2.00545 loss)\nI0817 18:06:35.162421 17472 solver.cpp:228] Iteration 2600, loss = 0.499571\nI0817 18:06:35.162470 17472 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 18:06:35.162487 17472 solver.cpp:244]     Train net output #1: loss = 0.499571 (* 1 = 0.499571 loss)\nI0817 18:06:35.247941 17472 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0817 18:08:53.536036 17472 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 18:10:15.640187 17472 solver.cpp:404]     Test net output #0: accuracy = 0.38328\nI0817 18:10:15.640422 17472 solver.cpp:404]     Test net output #1: loss = 3.2592 (* 1 = 3.2592 loss)\nI0817 18:10:16.969753 17472 solver.cpp:228] Iteration 2700, loss = 0.411393\nI0817 18:10:16.969805 17472 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 18:10:16.969820 17472 solver.cpp:244]     Train net output #1: loss = 0.411393 (* 1 = 0.411393 loss)\nI0817 18:10:17.060783 17472 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0817 18:12:34.760422 17472 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 18:13:56.835810 17472 solver.cpp:404]     Test net output #0: accuracy = 0.42216\nI0817 18:13:56.836050 17472 solver.cpp:404]     Test net output #1: loss = 2.72878 (* 1 = 2.72878 loss)\nI0817 18:13:58.165478 17472 solver.cpp:228] Iteration 2800, loss = 0.390923\nI0817 18:13:58.165529 17472 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:13:58.165545 17472 solver.cpp:244]     Train net output #1: loss = 0.390923 (* 1 = 0.390923 loss)\nI0817 18:13:58.252710 17472 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0817 18:16:16.637200 17472 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 18:17:38.642551 17472 solver.cpp:404]     Test net output #0: accuracy = 0.42944\nI0817 18:17:38.642784 17472 solver.cpp:404]     Test net output #1: loss = 2.69928 (* 1 = 2.69928 loss)\nI0817 18:17:39.971379 17472 solver.cpp:228] Iteration 2900, loss = 0.392667\nI0817 18:17:39.971429 17472 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 18:17:39.971446 17472 solver.cpp:244]     Train net output #1: loss = 0.392667 (* 1 = 0.392667 loss)\nI0817 18:17:40.064792 17472 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0817 18:19:58.429231 17472 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 18:21:20.476843 17472 solver.cpp:404]     Test net output #0: accuracy = 0.41368\nI0817 18:21:20.477057 17472 solver.cpp:404]     Test net output #1: loss = 2.93343 (* 1 = 2.93343 loss)\nI0817 18:21:21.806680 17472 solver.cpp:228] Iteration 3000, loss = 0.355905\nI0817 18:21:21.806731 17472 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 18:21:21.806748 17472 solver.cpp:244]     Train net output #1: loss = 0.355905 (* 1 = 0.355905 loss)\nI0817 18:21:21.914377 17472 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0817 18:23:40.239853 17472 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:25:02.333017 17472 solver.cpp:404]     Test net output #0: accuracy = 0.46656\nI0817 18:25:02.333228 17472 solver.cpp:404]     Test net output #1: loss = 2.47925 (* 1 = 2.47925 loss)\nI0817 18:25:03.662869 17472 solver.cpp:228] Iteration 3100, loss = 0.369502\nI0817 18:25:03.662923 17472 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:25:03.662941 17472 solver.cpp:244]     Train net output #1: loss = 0.369502 (* 1 = 0.369502 loss)\nI0817 18:25:03.753774 17472 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0817 18:27:21.383483 17472 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:28:43.518332 17472 solver.cpp:404]     Test net output #0: accuracy = 0.3562\nI0817 18:28:43.518577 17472 solver.cpp:404]     Test net output #1: loss = 3.89359 (* 1 = 3.89359 loss)\nI0817 18:28:44.847842 17472 solver.cpp:228] Iteration 3200, loss = 0.313114\nI0817 18:28:44.847882 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:28:44.847898 17472 solver.cpp:244]     Train net output #1: loss = 0.313114 (* 1 = 0.313114 loss)\nI0817 18:28:44.933337 17472 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0817 18:31:03.088589 17472 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:32:25.267779 17472 solver.cpp:404]     Test net output #0: accuracy = 0.42232\nI0817 18:32:25.268133 17472 solver.cpp:404]     Test net output #1: loss = 3.22414 (* 1 = 3.22414 loss)\nI0817 18:32:26.597285 17472 solver.cpp:228] Iteration 3300, loss = 0.397803\nI0817 18:32:26.597324 17472 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 18:32:26.597339 17472 solver.cpp:244]     Train net output #1: loss = 0.397803 (* 1 = 0.397803 loss)\nI0817 18:32:26.683897 17472 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0817 18:34:44.330612 17472 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:36:06.493490 17472 solver.cpp:404]     Test net output #0: accuracy = 0.41864\nI0817 18:36:06.493731 17472 solver.cpp:404]     Test net output #1: loss = 3.1699 (* 1 = 3.1699 loss)\nI0817 18:36:07.823379 17472 solver.cpp:228] Iteration 3400, loss = 0.291838\nI0817 18:36:07.823426 17472 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:36:07.823442 17472 solver.cpp:244]     Train net output #1: loss = 0.291838 (* 1 = 0.291838 loss)\nI0817 18:36:07.910389 17472 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0817 18:38:25.532097 17472 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:39:47.716941 17472 solver.cpp:404]     Test net output #0: accuracy = 0.35888\nI0817 18:39:47.717164 17472 solver.cpp:404]     Test net output #1: loss = 3.9849 (* 1 = 3.9849 loss)\nI0817 18:39:49.046881 17472 solver.cpp:228] Iteration 3500, loss = 0.365406\nI0817 18:39:49.046931 17472 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:39:49.046948 17472 solver.cpp:244]     Train net output #1: loss = 0.365406 (* 1 = 0.365406 loss)\nI0817 18:39:49.135277 17472 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0817 18:42:06.754364 17472 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:43:28.889912 17472 solver.cpp:404]     Test net output #0: accuracy = 0.41148\nI0817 18:43:28.890153 17472 solver.cpp:404]     Test net output #1: loss = 3.45672 (* 1 = 3.45672 loss)\nI0817 18:43:30.219681 17472 solver.cpp:228] Iteration 3600, loss = 0.249951\nI0817 18:43:30.219727 17472 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:43:30.219743 17472 solver.cpp:244]     Train net output #1: loss = 0.249951 (* 1 = 0.249951 loss)\nI0817 18:43:30.311467 17472 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0817 18:45:47.946158 17472 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:47:10.065333 17472 solver.cpp:404]     Test net output #0: accuracy = 0.34308\nI0817 18:47:10.065552 17472 solver.cpp:404]     Test net output #1: loss = 4.47328 (* 1 = 4.47328 loss)\nI0817 18:47:11.396184 17472 solver.cpp:228] Iteration 3700, loss = 0.26073\nI0817 18:47:11.396234 17472 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:47:11.396250 17472 solver.cpp:244]     Train net output #1: loss = 0.26073 (* 1 = 0.26073 loss)\nI0817 18:47:11.480540 17472 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0817 18:49:29.132072 17472 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:50:51.312453 17472 solver.cpp:404]     Test net output #0: accuracy = 0.43452\nI0817 18:50:51.312683 17472 solver.cpp:404]     Test net output #1: loss = 3.47748 (* 1 = 3.47748 loss)\nI0817 18:50:52.641499 17472 solver.cpp:228] Iteration 3800, loss = 0.182251\nI0817 18:50:52.641549 17472 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:50:52.641567 17472 solver.cpp:244]     Train net output #1: loss = 0.182251 (* 1 = 0.182251 loss)\nI0817 18:50:52.730897 17472 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0817 18:53:10.409762 17472 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:54:32.633915 17472 solver.cpp:404]     Test net output #0: accuracy = 0.42244\nI0817 18:54:32.634160 17472 solver.cpp:404]     Test net output #1: loss = 3.46212 (* 1 = 3.46212 loss)\nI0817 18:54:33.963976 17472 solver.cpp:228] Iteration 3900, loss = 0.24618\nI0817 18:54:33.964028 17472 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:54:33.964045 17472 solver.cpp:244]     Train net output #1: loss = 0.24618 (* 1 = 0.24618 loss)\nI0817 18:54:34.054354 17472 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0817 18:56:51.879932 17472 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:58:14.115864 17472 solver.cpp:404]     Test net output #0: accuracy = 0.45192\nI0817 18:58:14.116086 17472 solver.cpp:404]     Test net output #1: loss = 3.40705 (* 1 = 3.40705 loss)\nI0817 18:58:15.445703 17472 solver.cpp:228] Iteration 4000, loss = 0.203048\nI0817 18:58:15.445750 17472 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:58:15.445767 17472 solver.cpp:244]     Train net output #1: loss = 0.203048 (* 1 = 0.203048 loss)\nI0817 18:58:15.535311 17472 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0817 19:00:33.125349 17472 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 19:01:55.350880 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50656\nI0817 19:01:55.351164 17472 solver.cpp:404]     Test net output #1: loss = 2.74196 (* 1 = 2.74196 loss)\nI0817 19:01:56.679564 17472 solver.cpp:228] Iteration 4100, loss = 0.197853\nI0817 19:01:56.679611 17472 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:01:56.679627 17472 solver.cpp:244]     Train net output #1: loss = 0.197853 (* 1 = 0.197853 loss)\nI0817 19:01:56.773133 17472 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0817 19:04:14.485122 17472 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 19:05:36.662214 17472 solver.cpp:404]     Test net output #0: accuracy = 0.42844\nI0817 19:05:36.662441 17472 solver.cpp:404]     Test net output #1: loss = 3.64575 (* 1 = 3.64575 loss)\nI0817 19:05:37.991912 17472 solver.cpp:228] Iteration 4200, loss = 0.129973\nI0817 19:05:37.991963 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:05:37.991981 17472 solver.cpp:244]     Train net output #1: loss = 0.129973 (* 1 = 0.129973 loss)\nI0817 19:05:38.082384 17472 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0817 19:07:55.809409 17472 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 19:09:17.993510 17472 solver.cpp:404]     Test net output #0: accuracy = 0.47872\nI0817 19:09:17.993748 17472 solver.cpp:404]     Test net output #1: loss = 3.05639 (* 1 = 3.05639 loss)\nI0817 19:09:19.323623 17472 solver.cpp:228] Iteration 4300, loss = 0.150928\nI0817 19:09:19.323671 17472 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:09:19.323688 17472 solver.cpp:244]     Train net output #1: loss = 0.150928 (* 1 = 0.150928 loss)\nI0817 19:09:19.409745 17472 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0817 19:11:37.060806 17472 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 19:12:59.257688 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5004\nI0817 19:12:59.257951 17472 solver.cpp:404]     Test net output #1: loss = 3.19631 (* 1 = 3.19631 loss)\nI0817 19:13:00.586977 17472 solver.cpp:228] Iteration 4400, loss = 0.11527\nI0817 19:13:00.587028 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:13:00.587045 17472 solver.cpp:244]     Train net output #1: loss = 0.11527 (* 1 = 0.11527 loss)\nI0817 19:13:00.673075 17472 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0817 19:15:18.339166 17472 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 19:16:40.551686 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48812\nI0817 19:16:40.551926 17472 solver.cpp:404]     Test net output #1: loss = 3.27163 (* 1 = 3.27163 loss)\nI0817 19:16:41.881408 17472 solver.cpp:228] Iteration 4500, loss = 0.137358\nI0817 19:16:41.881458 17472 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:16:41.881474 17472 solver.cpp:244]     Train net output #1: loss = 0.137358 (* 1 = 0.137358 loss)\nI0817 19:16:41.971195 17472 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0817 19:18:59.583681 17472 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 19:20:21.740998 17472 solver.cpp:404]     Test net output #0: accuracy = 0.44004\nI0817 19:20:21.741227 17472 solver.cpp:404]     Test net output #1: loss = 4.50503 (* 1 = 4.50503 loss)\nI0817 19:20:23.070487 17472 solver.cpp:228] Iteration 4600, loss = 0.0910356\nI0817 19:20:23.070539 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:20:23.070556 17472 solver.cpp:244]     Train net output #1: loss = 0.0910355 (* 1 = 0.0910355 loss)\nI0817 19:20:23.159934 17472 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0817 19:22:41.019995 17472 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:24:03.246747 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49812\nI0817 19:24:03.247052 17472 solver.cpp:404]     Test net output #1: loss = 3.37012 (* 1 = 3.37012 loss)\nI0817 19:24:04.575608 17472 solver.cpp:228] Iteration 4700, loss = 0.0936654\nI0817 19:24:04.575660 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:24:04.575678 17472 solver.cpp:244]     Train net output #1: loss = 0.0936653 (* 1 = 0.0936653 loss)\nI0817 19:24:04.667152 17472 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0817 19:26:22.526821 17472 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:27:44.746284 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53464\nI0817 19:27:44.746594 17472 solver.cpp:404]     Test net output #1: loss = 2.8408 (* 1 = 2.8408 loss)\nI0817 19:27:46.076037 17472 solver.cpp:228] Iteration 4800, loss = 0.0868641\nI0817 19:27:46.076086 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:27:46.076104 17472 solver.cpp:244]     Train net output #1: loss = 0.086864 (* 1 = 0.086864 loss)\nI0817 19:27:46.165344 17472 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0817 19:30:03.760866 17472 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:31:25.983564 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4998\nI0817 19:31:25.983872 17472 solver.cpp:404]     Test net output #1: loss = 3.73155 (* 1 = 3.73155 loss)\nI0817 19:31:27.313150 17472 solver.cpp:228] Iteration 4900, loss = 0.0613938\nI0817 19:31:27.313194 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:31:27.313211 17472 solver.cpp:244]     Train net output #1: loss = 0.0613938 (* 1 = 0.0613938 loss)\nI0817 19:31:27.402384 17472 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0817 19:33:45.234699 17472 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:35:07.464370 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49276\nI0817 19:35:07.464656 17472 solver.cpp:404]     Test net output #1: loss = 3.69684 (* 1 = 3.69684 loss)\nI0817 19:35:08.794118 17472 solver.cpp:228] Iteration 5000, loss = 0.0733307\nI0817 19:35:08.794160 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:35:08.794176 17472 solver.cpp:244]     Train net output #1: loss = 0.0733307 (* 1 = 0.0733307 loss)\nI0817 19:35:08.881417 17472 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0817 19:37:26.482846 17472 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:38:48.722061 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62612\nI0817 19:38:48.722369 17472 solver.cpp:404]     Test net output #1: loss = 2.15713 (* 1 = 2.15713 loss)\nI0817 19:38:50.052083 17472 solver.cpp:228] Iteration 5100, loss = 0.0375117\nI0817 19:38:50.052121 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 19:38:50.052136 17472 solver.cpp:244]     Train net output #1: loss = 0.0375117 (* 1 = 0.0375117 loss)\nI0817 19:38:50.141757 17472 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0817 19:41:07.761680 17472 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:42:29.975224 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61276\nI0817 19:42:29.975525 17472 solver.cpp:404]     Test net output #1: loss = 2.20249 (* 1 = 2.20249 loss)\nI0817 19:42:31.304829 17472 solver.cpp:228] Iteration 5200, loss = 0.101592\nI0817 19:42:31.304867 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:42:31.304882 17472 solver.cpp:244]     Train net output #1: loss = 0.101592 (* 1 = 0.101592 loss)\nI0817 19:42:31.392608 17472 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0817 19:44:49.128353 17472 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:46:11.241173 17472 solver.cpp:404]     Test net output #0: accuracy = 0.60212\nI0817 19:46:11.241458 17472 solver.cpp:404]     Test net output #1: loss = 2.33636 (* 1 = 2.33636 loss)\nI0817 19:46:12.570745 17472 solver.cpp:228] Iteration 5300, loss = 0.0842478\nI0817 19:46:12.570791 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:46:12.570808 17472 solver.cpp:244]     Train net output #1: loss = 0.0842478 (* 1 = 0.0842478 loss)\nI0817 19:46:12.658599 17472 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0817 19:48:30.427266 17472 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:49:52.452468 17472 solver.cpp:404]     Test net output #0: accuracy = 0.59456\nI0817 19:49:52.452678 17472 solver.cpp:404]     Test net output #1: loss = 2.54082 (* 1 = 2.54082 loss)\nI0817 19:49:53.782080 17472 solver.cpp:228] Iteration 5400, loss = 0.0416478\nI0817 19:49:53.782131 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:49:53.782148 17472 solver.cpp:244]     Train net output #1: loss = 0.0416478 (* 1 = 0.0416478 loss)\nI0817 19:49:53.871558 17472 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0817 19:52:11.482936 17472 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:53:33.663198 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6284\nI0817 19:53:33.663444 17472 solver.cpp:404]     Test net output #1: loss = 2.31551 (* 1 = 2.31551 loss)\nI0817 19:53:34.993208 17472 solver.cpp:228] Iteration 5500, loss = 0.0533834\nI0817 19:53:34.993257 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:53:34.993274 17472 solver.cpp:244]     Train net output #1: loss = 0.0533834 (* 1 = 0.0533834 loss)\nI0817 19:53:35.083823 17472 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0817 19:55:52.918555 17472 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:57:14.934095 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64276\nI0817 19:57:14.934310 17472 solver.cpp:404]     Test net output #1: loss = 2.10133 (* 1 = 2.10133 loss)\nI0817 19:57:16.263823 17472 solver.cpp:228] Iteration 5600, loss = 0.0330325\nI0817 19:57:16.263875 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 19:57:16.263890 17472 solver.cpp:244]     Train net output #1: loss = 0.0330325 (* 1 = 0.0330325 loss)\nI0817 19:57:16.354379 17472 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0817 19:59:34.016755 17472 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 20:00:55.937588 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64456\nI0817 20:00:55.937813 17472 solver.cpp:404]     Test net output #1: loss = 2.16916 (* 1 = 2.16916 loss)\nI0817 20:00:57.266979 17472 solver.cpp:228] Iteration 5700, loss = 0.0535896\nI0817 20:00:57.267035 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:00:57.267052 17472 solver.cpp:244]     Train net output #1: loss = 0.0535896 (* 1 = 0.0535896 loss)\nI0817 20:00:57.355562 17472 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0817 20:03:15.029074 17472 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 20:04:36.991770 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67344\nI0817 20:04:36.991973 17472 solver.cpp:404]     Test net output #1: loss = 1.90441 (* 1 = 1.90441 loss)\nI0817 20:04:38.321575 17472 solver.cpp:228] Iteration 5800, loss = 0.0657278\nI0817 20:04:38.321615 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:04:38.321631 17472 solver.cpp:244]     Train net output #1: loss = 0.0657278 (* 1 = 0.0657278 loss)\nI0817 20:04:38.410274 17472 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0817 20:06:56.021368 17472 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 20:08:17.973033 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62476\nI0817 20:08:17.973249 17472 solver.cpp:404]     Test net output #1: loss = 2.47945 (* 1 = 2.47945 loss)\nI0817 20:08:19.303020 17472 solver.cpp:228] Iteration 5900, loss = 0.0613827\nI0817 20:08:19.303068 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:08:19.303086 17472 solver.cpp:244]     Train net output #1: loss = 0.0613827 (* 1 = 0.0613827 loss)\nI0817 20:08:19.388037 17472 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0817 20:10:36.877543 17472 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 20:11:58.997931 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62576\nI0817 20:11:58.998176 17472 solver.cpp:404]     Test net output #1: loss = 2.44019 (* 1 = 2.44019 loss)\nI0817 20:12:00.327762 17472 solver.cpp:228] Iteration 6000, loss = 0.0607104\nI0817 20:12:00.327805 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:12:00.327821 17472 solver.cpp:244]     Train net output #1: loss = 0.0607104 (* 1 = 0.0607104 loss)\nI0817 20:12:00.417325 17472 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0817 20:14:18.079409 17472 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 20:15:40.028722 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61816\nI0817 20:15:40.028940 17472 solver.cpp:404]     Test net output #1: loss = 2.55699 (* 1 = 2.55699 loss)\nI0817 20:15:41.359125 17472 solver.cpp:228] Iteration 6100, loss = 0.0887389\nI0817 20:15:41.359177 17472 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:15:41.359194 17472 solver.cpp:244]     Train net output #1: loss = 0.0887389 (* 1 = 0.0887389 loss)\nI0817 20:15:41.444497 17472 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0817 20:17:59.088927 17472 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 20:19:21.007407 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61972\nI0817 20:19:21.007609 17472 solver.cpp:404]     Test net output #1: loss = 2.50589 (* 1 = 2.50589 loss)\nI0817 20:19:22.337038 17472 solver.cpp:228] Iteration 6200, loss = 0.052741\nI0817 20:19:22.337088 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:19:22.337105 17472 solver.cpp:244]     Train net output #1: loss = 0.0527409 (* 1 = 0.0527409 loss)\nI0817 20:19:22.428586 17472 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0817 20:21:40.055729 17472 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:23:02.132372 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48256\nI0817 20:23:02.132637 17472 solver.cpp:404]     Test net output #1: loss = 4.90029 (* 1 = 4.90029 loss)\nI0817 20:23:03.461961 17472 solver.cpp:228] Iteration 6300, loss = 0.0624609\nI0817 20:23:03.462016 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:23:03.462033 17472 solver.cpp:244]     Train net output #1: loss = 0.0624609 (* 1 = 0.0624609 loss)\nI0817 20:23:03.550869 17472 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0817 20:25:21.305059 17472 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:26:43.458160 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68368\nI0817 20:26:43.458377 17472 solver.cpp:404]     Test net output #1: loss = 1.90833 (* 1 = 1.90833 loss)\nI0817 20:26:44.787978 17472 solver.cpp:228] Iteration 6400, loss = 0.0108885\nI0817 20:26:44.788033 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:26:44.788048 17472 solver.cpp:244]     Train net output #1: loss = 0.0108885 (* 1 = 0.0108885 loss)\nI0817 20:26:44.871573 17472 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0817 20:29:02.403614 17472 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:30:24.563580 17472 solver.cpp:404]     Test net output #0: accuracy = 0.636\nI0817 20:30:24.563822 17472 solver.cpp:404]     Test net output #1: loss = 2.27807 (* 1 = 2.27807 loss)\nI0817 20:30:25.892580 17472 solver.cpp:228] Iteration 6500, loss = 0.0227156\nI0817 20:30:25.892632 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:30:25.892648 17472 solver.cpp:244]     Train net output #1: loss = 0.0227157 (* 1 = 0.0227157 loss)\nI0817 20:30:25.987139 17472 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0817 20:32:43.919191 17472 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:34:05.891716 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6374\nI0817 20:34:05.891947 17472 solver.cpp:404]     Test net output #1: loss = 2.41584 (* 1 = 2.41584 loss)\nI0817 20:34:07.221474 17472 solver.cpp:228] Iteration 6600, loss = 0.0809191\nI0817 20:34:07.221527 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:34:07.221546 17472 solver.cpp:244]     Train net output #1: loss = 0.0809192 (* 1 = 0.0809192 loss)\nI0817 20:34:07.312896 17472 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0817 20:36:24.964325 17472 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:37:46.970309 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6492\nI0817 20:37:46.970520 17472 solver.cpp:404]     Test net output #1: loss = 2.46176 (* 1 = 2.46176 loss)\nI0817 20:37:48.300457 17472 solver.cpp:228] Iteration 6700, loss = 0.0342062\nI0817 20:37:48.300509 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:37:48.300528 17472 solver.cpp:244]     Train net output #1: loss = 0.0342062 (* 1 = 0.0342062 loss)\nI0817 20:37:48.391523 17472 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0817 20:40:06.160784 17472 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:41:28.203117 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61644\nI0817 20:41:28.203352 17472 solver.cpp:404]     Test net output #1: loss = 2.85434 (* 1 = 2.85434 loss)\nI0817 20:41:29.533891 17472 solver.cpp:228] Iteration 6800, loss = 0.026851\nI0817 20:41:29.533944 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:41:29.533962 17472 solver.cpp:244]     Train net output #1: loss = 0.0268511 (* 1 = 0.0268511 loss)\nI0817 20:41:29.627465 17472 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0817 20:43:47.305207 17472 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:45:09.430166 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61616\nI0817 20:45:09.430400 17472 solver.cpp:404]     Test net output #1: loss = 2.84144 (* 1 = 2.84144 loss)\nI0817 20:45:10.761116 17472 solver.cpp:228] Iteration 6900, loss = 0.0416747\nI0817 20:45:10.761169 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:45:10.761186 17472 solver.cpp:244]     Train net output #1: loss = 0.0416747 (* 1 = 0.0416747 loss)\nI0817 20:45:10.845202 17472 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0817 20:47:28.412905 17472 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:48:50.361781 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6546\nI0817 20:48:50.362021 17472 solver.cpp:404]     Test net output #1: loss = 2.46073 (* 1 = 2.46073 loss)\nI0817 20:48:51.692638 17472 solver.cpp:228] Iteration 7000, loss = 0.022348\nI0817 20:48:51.692692 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:48:51.692708 17472 solver.cpp:244]     Train net output #1: loss = 0.022348 (* 1 = 0.022348 loss)\nI0817 20:48:51.778712 17472 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0817 20:51:09.366139 17472 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:52:31.439967 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66444\nI0817 20:52:31.440207 17472 solver.cpp:404]     Test net output #1: loss = 2.17136 (* 1 = 2.17136 loss)\nI0817 20:52:32.770015 17472 solver.cpp:228] Iteration 7100, loss = 0.0071285\nI0817 20:52:32.770067 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:52:32.770084 17472 solver.cpp:244]     Train net output #1: loss = 0.00712852 (* 1 = 0.00712852 loss)\nI0817 20:52:32.857993 17472 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0817 20:54:50.588179 17472 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:56:12.806411 17472 solver.cpp:404]     Test net output #0: accuracy = 0.60548\nI0817 20:56:12.806622 17472 solver.cpp:404]     Test net output #1: loss = 3.18019 (* 1 = 3.18019 loss)\nI0817 20:56:14.137193 17472 solver.cpp:228] Iteration 7200, loss = 0.0130529\nI0817 20:56:14.137248 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:56:14.137264 17472 solver.cpp:244]     Train net output #1: loss = 0.013053 (* 1 = 0.013053 loss)\nI0817 20:56:14.229753 17472 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0817 20:58:31.903981 17472 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:59:54.122355 17472 solver.cpp:404]     Test net output #0: accuracy = 0.662\nI0817 20:59:54.122584 17472 solver.cpp:404]     Test net output #1: loss = 2.26437 (* 1 = 2.26437 loss)\nI0817 20:59:55.452836 17472 solver.cpp:228] Iteration 7300, loss = 0.0217165\nI0817 20:59:55.452889 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:59:55.452906 17472 solver.cpp:244]     Train net output #1: loss = 0.0217165 (* 1 = 0.0217165 loss)\nI0817 20:59:55.539661 17472 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0817 21:02:13.106025 17472 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 21:03:35.327661 17472 solver.cpp:404]     Test net output #0: accuracy = 0.59412\nI0817 21:03:35.327877 17472 solver.cpp:404]     Test net output #1: loss = 3.442 (* 1 = 3.442 loss)\nI0817 21:03:36.657891 17472 solver.cpp:228] Iteration 7400, loss = 0.0596804\nI0817 21:03:36.657944 17472 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:03:36.657961 17472 solver.cpp:244]     Train net output #1: loss = 0.0596805 (* 1 = 0.0596805 loss)\nI0817 21:03:36.748697 17472 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0817 21:05:54.522130 17472 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 21:07:16.741075 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65\nI0817 21:07:16.741293 17472 solver.cpp:404]     Test net output #1: loss = 2.74947 (* 1 = 2.74947 loss)\nI0817 21:07:18.070703 17472 solver.cpp:228] Iteration 7500, loss = 0.0389487\nI0817 21:07:18.070757 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:07:18.070775 17472 solver.cpp:244]     Train net output #1: loss = 0.0389488 (* 1 = 0.0389488 loss)\nI0817 21:07:18.161803 17472 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0817 21:09:35.817495 17472 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 21:10:57.902482 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67704\nI0817 21:10:57.902688 17472 solver.cpp:404]     Test net output #1: loss = 2.10195 (* 1 = 2.10195 loss)\nI0817 21:10:59.233651 17472 solver.cpp:228] Iteration 7600, loss = 0.0225034\nI0817 21:10:59.233695 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:10:59.233711 17472 solver.cpp:244]     Train net output #1: loss = 0.0225034 (* 1 = 0.0225034 loss)\nI0817 21:10:59.318312 17472 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0817 21:13:16.871512 17472 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 21:14:38.980674 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66396\nI0817 21:14:38.980900 17472 solver.cpp:404]     Test net output #1: loss = 2.32623 (* 1 = 2.32623 loss)\nI0817 21:14:40.310238 17472 solver.cpp:228] Iteration 7700, loss = 0.00828277\nI0817 21:14:40.310292 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:14:40.310310 17472 solver.cpp:244]     Train net output #1: loss = 0.0082828 (* 1 = 0.0082828 loss)\nI0817 21:14:40.397959 17472 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0817 21:16:58.045939 17472 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 21:18:20.192438 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62944\nI0817 21:18:20.192652 17472 solver.cpp:404]     Test net output #1: loss = 2.81448 (* 1 = 2.81448 loss)\nI0817 21:18:21.521896 17472 solver.cpp:228] Iteration 7800, loss = 0.0217787\nI0817 21:18:21.521950 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:18:21.521967 17472 solver.cpp:244]     Train net output #1: loss = 0.0217787 (* 1 = 0.0217787 loss)\nI0817 21:18:21.612515 17472 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0817 21:20:39.261441 17472 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:22:01.449156 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64912\nI0817 21:22:01.449364 17472 solver.cpp:404]     Test net output #1: loss = 2.72818 (* 1 = 2.72818 loss)\nI0817 21:22:02.779206 17472 solver.cpp:228] Iteration 7900, loss = 0.0497488\nI0817 21:22:02.779255 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:22:02.779273 17472 solver.cpp:244]     Train net output #1: loss = 0.0497488 (* 1 = 0.0497488 loss)\nI0817 21:22:02.868347 17472 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0817 21:24:20.533219 17472 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:25:42.721385 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62184\nI0817 21:25:42.721621 17472 solver.cpp:404]     Test net output #1: loss = 3.09444 (* 1 = 3.09444 loss)\nI0817 21:25:44.051317 17472 solver.cpp:228] Iteration 8000, loss = 0.0245892\nI0817 21:25:44.051371 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:25:44.051388 17472 solver.cpp:244]     Train net output #1: loss = 0.0245892 (* 1 = 0.0245892 loss)\nI0817 21:25:44.140278 17472 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0817 21:28:01.832942 17472 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:29:24.010679 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67008\nI0817 21:29:24.010910 17472 solver.cpp:404]     Test net output #1: loss = 2.32917 (* 1 = 2.32917 loss)\nI0817 21:29:25.340865 17472 solver.cpp:228] Iteration 8100, loss = 0.0361696\nI0817 21:29:25.340909 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:29:25.340924 17472 solver.cpp:244]     Train net output #1: loss = 0.0361697 (* 1 = 0.0361697 loss)\nI0817 21:29:25.427099 17472 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0817 21:31:43.078457 17472 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:33:05.286171 17472 solver.cpp:404]     Test net output #0: accuracy = 0.63872\nI0817 21:33:05.286429 17472 solver.cpp:404]     Test net output #1: loss = 2.67954 (* 1 = 2.67954 loss)\nI0817 21:33:06.616672 17472 solver.cpp:228] Iteration 8200, loss = 0.0373406\nI0817 21:33:06.616715 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:33:06.616731 17472 solver.cpp:244]     Train net output #1: loss = 0.0373406 (* 1 = 0.0373406 loss)\nI0817 21:33:06.704121 17472 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0817 21:35:24.539489 17472 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:36:46.766793 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64316\nI0817 21:36:46.767012 17472 solver.cpp:404]     Test net output #1: loss = 2.70344 (* 1 = 2.70344 loss)\nI0817 21:36:48.096226 17472 solver.cpp:228] Iteration 8300, loss = 0.0296568\nI0817 21:36:48.096280 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:36:48.096298 17472 solver.cpp:244]     Train net output #1: loss = 0.0296568 (* 1 = 0.0296568 loss)\nI0817 21:36:48.182567 17472 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0817 21:39:05.997987 17472 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:40:28.176398 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64988\nI0817 21:40:28.176625 17472 solver.cpp:404]     Test net output #1: loss = 2.50144 (* 1 = 2.50144 loss)\nI0817 21:40:29.506211 17472 solver.cpp:228] Iteration 8400, loss = 0.0147666\nI0817 21:40:29.506261 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:40:29.506279 17472 solver.cpp:244]     Train net output #1: loss = 0.0147666 (* 1 = 0.0147666 loss)\nI0817 21:40:29.593416 17472 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0817 21:42:47.274663 17472 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:44:09.497938 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66504\nI0817 21:44:09.498186 17472 solver.cpp:404]     Test net output #1: loss = 2.37229 (* 1 = 2.37229 loss)\nI0817 21:44:10.827296 17472 solver.cpp:228] Iteration 8500, loss = 0.0190726\nI0817 21:44:10.827347 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:44:10.827363 17472 solver.cpp:244]     Train net output #1: loss = 0.0190727 (* 1 = 0.0190727 loss)\nI0817 21:44:10.919085 17472 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0817 21:46:28.594318 17472 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:47:50.811386 17472 solver.cpp:404]     Test net output #0: accuracy = 0.57164\nI0817 21:47:50.811604 17472 solver.cpp:404]     Test net output #1: loss = 3.84165 (* 1 = 3.84165 loss)\nI0817 21:47:52.141165 17472 solver.cpp:228] Iteration 8600, loss = 0.070782\nI0817 21:47:52.141208 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:47:52.141224 17472 solver.cpp:244]     Train net output #1: loss = 0.070782 (* 1 = 0.070782 loss)\nI0817 21:47:52.229733 17472 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0817 21:50:09.882447 17472 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:51:32.058262 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66088\nI0817 21:51:32.058534 17472 solver.cpp:404]     Test net output #1: loss = 2.36539 (* 1 = 2.36539 loss)\nI0817 21:51:33.388790 17472 solver.cpp:228] Iteration 8700, loss = 0.0117044\nI0817 21:51:33.388842 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:51:33.388859 17472 solver.cpp:244]     Train net output #1: loss = 0.0117045 (* 1 = 0.0117045 loss)\nI0817 21:51:33.475545 17472 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0817 21:53:51.188578 17472 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:55:13.366323 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65184\nI0817 21:55:13.366561 17472 solver.cpp:404]     Test net output #1: loss = 2.39887 (* 1 = 2.39887 loss)\nI0817 21:55:14.696657 17472 solver.cpp:228] Iteration 8800, loss = 0.0162437\nI0817 21:55:14.696701 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:55:14.696715 17472 solver.cpp:244]     Train net output #1: loss = 0.0162438 (* 1 = 0.0162438 loss)\nI0817 21:55:14.782464 17472 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0817 21:57:32.514019 17472 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:58:54.665987 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69064\nI0817 21:58:54.666213 17472 solver.cpp:404]     Test net output #1: loss = 2.00455 (* 1 = 2.00455 loss)\nI0817 21:58:55.995759 17472 solver.cpp:228] Iteration 8900, loss = 0.0128602\nI0817 21:58:55.995811 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:58:55.995828 17472 solver.cpp:244]     Train net output #1: loss = 0.0128602 (* 1 = 0.0128602 loss)\nI0817 21:58:56.087813 17472 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0817 22:01:13.825361 17472 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 22:02:35.987941 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64808\nI0817 22:02:35.988210 17472 solver.cpp:404]     Test net output #1: loss = 2.45871 (* 1 = 2.45871 loss)\nI0817 22:02:37.317529 17472 solver.cpp:228] Iteration 9000, loss = 0.0151493\nI0817 22:02:37.317580 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:02:37.317597 17472 solver.cpp:244]     Train net output #1: loss = 0.0151493 (* 1 = 0.0151493 loss)\nI0817 22:02:37.491766 17472 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0817 22:04:55.292724 17472 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 22:06:17.473654 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66828\nI0817 22:06:17.473902 17472 solver.cpp:404]     Test net output #1: loss = 2.33543 (* 1 = 2.33543 loss)\nI0817 22:06:18.802924 17472 solver.cpp:228] Iteration 9100, loss = 0.0119111\nI0817 22:06:18.802973 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:06:18.802991 17472 solver.cpp:244]     Train net output #1: loss = 0.0119112 (* 1 = 0.0119112 loss)\nI0817 22:06:18.892148 17472 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0817 22:08:36.722236 17472 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 22:09:58.904433 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68116\nI0817 22:09:58.904649 17472 solver.cpp:404]     Test net output #1: loss = 2.28264 (* 1 = 2.28264 loss)\nI0817 22:10:00.235327 17472 solver.cpp:228] Iteration 9200, loss = 0.0095072\nI0817 22:10:00.235379 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:10:00.235396 17472 solver.cpp:244]     Train net output #1: loss = 0.00950724 (* 1 = 0.00950724 loss)\nI0817 22:10:00.324009 17472 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0817 22:12:18.094444 17472 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 22:13:40.288012 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65976\nI0817 22:13:40.288239 17472 solver.cpp:404]     Test net output #1: loss = 2.53858 (* 1 = 2.53858 loss)\nI0817 22:13:41.617161 17472 solver.cpp:228] Iteration 9300, loss = 0.0978636\nI0817 22:13:41.617213 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:13:41.617230 17472 solver.cpp:244]     Train net output #1: loss = 0.0978637 (* 1 = 0.0978637 loss)\nI0817 22:13:41.707391 17472 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0817 22:15:59.328289 17472 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 22:17:21.525614 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6838\nI0817 22:17:21.525833 17472 solver.cpp:404]     Test net output #1: loss = 2.19092 (* 1 = 2.19092 loss)\nI0817 22:17:22.856149 17472 solver.cpp:228] Iteration 9400, loss = 0.00502137\nI0817 22:17:22.856202 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:17:22.856220 17472 solver.cpp:244]     Train net output #1: loss = 0.00502144 (* 1 = 0.00502144 loss)\nI0817 22:17:22.945139 17472 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0817 22:19:40.618257 17472 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:21:02.836113 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66596\nI0817 22:21:02.836393 17472 solver.cpp:404]     Test net output #1: loss = 2.47985 (* 1 = 2.47985 loss)\nI0817 22:21:04.166775 17472 solver.cpp:228] Iteration 9500, loss = 0.0160097\nI0817 22:21:04.166831 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:21:04.166847 17472 solver.cpp:244]     Train net output #1: loss = 0.0160098 (* 1 = 0.0160098 loss)\nI0817 22:21:04.253692 17472 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0817 22:23:22.033705 17472 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:24:44.244451 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67392\nI0817 22:24:44.244671 17472 solver.cpp:404]     Test net output #1: loss = 2.32675 (* 1 = 2.32675 loss)\nI0817 22:24:45.574379 17472 solver.cpp:228] Iteration 9600, loss = 0.00877635\nI0817 22:24:45.574431 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:24:45.574448 17472 solver.cpp:244]     Train net output #1: loss = 0.00877641 (* 1 = 0.00877641 loss)\nI0817 22:24:45.662771 17472 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0817 22:27:03.540647 17472 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:28:25.751458 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64448\nI0817 22:28:25.751699 17472 solver.cpp:404]     Test net output #1: loss = 2.95006 (* 1 = 2.95006 loss)\nI0817 22:28:27.080842 17472 solver.cpp:228] Iteration 9700, loss = 0.0173992\nI0817 22:28:27.080893 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:28:27.080909 17472 solver.cpp:244]     Train net output #1: loss = 0.0173993 (* 1 = 0.0173993 loss)\nI0817 22:28:27.173830 17472 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0817 22:30:44.953624 17472 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:32:07.182432 17472 solver.cpp:404]     Test net output #0: accuracy = 0.60596\nI0817 22:32:07.182678 17472 solver.cpp:404]     Test net output #1: loss = 3.37903 (* 1 = 3.37903 loss)\nI0817 22:32:08.513234 17472 solver.cpp:228] Iteration 9800, loss = 0.0317373\nI0817 22:32:08.513284 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:32:08.513301 17472 solver.cpp:244]     Train net output #1: loss = 0.0317374 (* 1 = 0.0317374 loss)\nI0817 22:32:08.602244 17472 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0817 22:34:26.169999 17472 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:35:48.385648 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6498\nI0817 22:35:48.385896 17472 solver.cpp:404]     Test net output #1: loss = 2.62389 (* 1 = 2.62389 loss)\nI0817 22:35:49.715605 17472 solver.cpp:228] Iteration 9900, loss = 0.00529173\nI0817 22:35:49.715654 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:35:49.715672 17472 solver.cpp:244]     Train net output #1: loss = 0.0052918 (* 1 = 0.0052918 loss)\nI0817 22:35:49.802271 17472 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0817 22:38:07.475571 17472 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:39:29.675935 17472 solver.cpp:404]     Test net output #0: accuracy = 0.55136\nI0817 22:39:29.676188 17472 solver.cpp:404]     Test net output #1: loss = 4.31915 (* 1 = 4.31915 loss)\nI0817 22:39:31.005404 17472 solver.cpp:228] Iteration 10000, loss = 0.0269076\nI0817 22:39:31.005455 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:39:31.005472 17472 solver.cpp:244]     Train net output #1: loss = 0.0269077 (* 1 = 0.0269077 loss)\nI0817 22:39:31.091743 17472 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0817 22:41:48.919579 17472 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0817 22:43:11.126961 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62532\nI0817 22:43:11.127246 17472 solver.cpp:404]     Test net output #1: loss = 3.04085 (* 1 = 3.04085 loss)\nI0817 22:43:12.457430 17472 solver.cpp:228] Iteration 10100, loss = 0.00868711\nI0817 22:43:12.457484 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:43:12.457500 17472 solver.cpp:244]     Train net output #1: loss = 0.00868719 (* 1 = 0.00868719 loss)\nI0817 22:43:12.547790 17472 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0817 22:45:30.372733 17472 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0817 22:46:52.612210 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66068\nI0817 22:46:52.612437 17472 solver.cpp:404]     Test net output #1: loss = 2.62509 (* 1 = 2.62509 loss)\nI0817 22:46:53.942018 17472 solver.cpp:228] Iteration 10200, loss = 0.017882\nI0817 22:46:53.942070 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:46:53.942086 17472 solver.cpp:244]     Train net output #1: loss = 0.017882 (* 1 = 0.017882 loss)\nI0817 22:46:54.030267 17472 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0817 22:49:11.718444 17472 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0817 22:50:33.962246 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64156\nI0817 22:50:33.962502 17472 solver.cpp:404]     Test net output #1: loss = 2.85227 (* 1 = 2.85227 loss)\nI0817 22:50:35.292165 17472 solver.cpp:228] Iteration 10300, loss = 0.0146046\nI0817 22:50:35.292217 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:50:35.292233 17472 solver.cpp:244]     Train net output #1: loss = 0.0146047 (* 1 = 0.0146047 loss)\nI0817 22:50:35.377334 17472 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0817 22:52:53.064095 17472 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0817 22:54:15.298873 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64376\nI0817 22:54:15.299120 17472 solver.cpp:404]     Test net output #1: loss = 2.63739 (* 1 = 2.63739 loss)\nI0817 22:54:16.628844 17472 solver.cpp:228] Iteration 10400, loss = 0.0261263\nI0817 22:54:16.628897 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:54:16.628914 17472 solver.cpp:244]     Train net output #1: loss = 0.0261264 (* 1 = 0.0261264 loss)\nI0817 22:54:16.719410 17472 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0817 22:56:34.507666 17472 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0817 22:57:56.742965 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66616\nI0817 22:57:56.743185 17472 solver.cpp:404]     Test net output #1: loss = 2.54108 (* 1 = 2.54108 loss)\nI0817 22:57:58.073060 17472 solver.cpp:228] Iteration 10500, loss = 0.0164278\nI0817 22:57:58.073112 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:57:58.073129 17472 solver.cpp:244]     Train net output #1: loss = 0.0164278 (* 1 = 0.0164278 loss)\nI0817 22:57:58.162227 17472 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0817 23:00:15.937094 17472 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0817 23:01:38.177290 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61972\nI0817 23:01:38.177572 17472 solver.cpp:404]     Test net output #1: loss = 3.11534 (* 1 = 3.11534 loss)\nI0817 23:01:39.507983 17472 solver.cpp:228] Iteration 10600, loss = 0.0416271\nI0817 23:01:39.508028 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:01:39.508044 17472 solver.cpp:244]     Train net output #1: loss = 0.0416272 (* 1 = 0.0416272 loss)\nI0817 23:01:39.594907 17472 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0817 23:03:57.291788 17472 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0817 23:05:19.522311 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66092\nI0817 23:05:19.522552 17472 solver.cpp:404]     Test net output #1: loss = 2.51203 (* 1 = 2.51203 loss)\nI0817 23:05:20.852191 17472 solver.cpp:228] Iteration 10700, loss = 0.0189453\nI0817 23:05:20.852243 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:05:20.852260 17472 solver.cpp:244]     Train net output #1: loss = 0.0189454 (* 1 = 0.0189454 loss)\nI0817 23:05:20.943565 17472 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0817 23:07:38.751211 17472 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0817 23:09:00.985138 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65192\nI0817 23:09:00.985360 17472 solver.cpp:404]     Test net output #1: loss = 2.75388 (* 1 = 2.75388 loss)\nI0817 23:09:02.316419 17472 solver.cpp:228] Iteration 10800, loss = 0.0137819\nI0817 23:09:02.316473 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:09:02.316490 17472 solver.cpp:244]     Train net output #1: loss = 0.013782 (* 1 = 0.013782 loss)\nI0817 23:09:02.404995 17472 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0817 23:11:20.111197 17472 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0817 23:12:42.339298 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66264\nI0817 23:12:42.339582 17472 solver.cpp:404]     Test net output #1: loss = 2.56933 (* 1 = 2.56933 loss)\nI0817 23:12:43.669072 17472 solver.cpp:228] Iteration 10900, loss = 0.0125403\nI0817 23:12:43.669123 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:12:43.669139 17472 solver.cpp:244]     Train net output #1: loss = 0.0125404 (* 1 = 0.0125404 loss)\nI0817 23:12:43.755625 17472 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0817 23:15:01.323813 17472 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0817 23:16:23.549996 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68684\nI0817 23:16:23.550240 17472 solver.cpp:404]     Test net output #1: loss = 2.17627 (* 1 = 2.17627 loss)\nI0817 23:16:24.880378 17472 solver.cpp:228] Iteration 11000, loss = 0.0443576\nI0817 23:16:24.880419 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:16:24.880435 17472 solver.cpp:244]     Train net output #1: loss = 0.0443577 (* 1 = 0.0443577 loss)\nI0817 23:16:24.968336 17472 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0817 23:18:42.577275 17472 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0817 23:20:04.802752 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65352\nI0817 23:20:04.802994 17472 solver.cpp:404]     Test net output #1: loss = 2.77851 (* 1 = 2.77851 loss)\nI0817 23:20:06.131764 17472 solver.cpp:228] Iteration 11100, loss = 0.0308416\nI0817 23:20:06.131816 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:20:06.131834 17472 solver.cpp:244]     Train net output #1: loss = 0.0308417 (* 1 = 0.0308417 loss)\nI0817 23:20:06.222865 17472 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0817 23:22:23.941190 17472 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0817 23:23:46.180373 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6664\nI0817 23:23:46.180651 17472 solver.cpp:404]     Test net output #1: loss = 2.53939 (* 1 = 2.53939 loss)\nI0817 23:23:47.509856 17472 solver.cpp:228] Iteration 11200, loss = 0.00742375\nI0817 23:23:47.509907 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:23:47.509924 17472 solver.cpp:244]     Train net output #1: loss = 0.00742381 (* 1 = 0.00742381 loss)\nI0817 23:23:47.600690 17472 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0817 23:26:05.201957 17472 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0817 23:27:27.440868 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65848\nI0817 23:27:27.441100 17472 solver.cpp:404]     Test net output #1: loss = 2.55624 (* 1 = 2.55624 loss)\nI0817 23:27:28.770491 17472 solver.cpp:228] Iteration 11300, loss = 0.00703158\nI0817 23:27:28.770541 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:27:28.770558 17472 solver.cpp:244]     Train net output #1: loss = 0.00703164 (* 1 = 0.00703164 loss)\nI0817 23:27:28.861065 17472 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0817 23:29:46.560504 17472 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0817 23:31:08.790164 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64084\nI0817 23:31:08.790423 17472 solver.cpp:404]     Test net output #1: loss = 2.82502 (* 1 = 2.82502 loss)\nI0817 23:31:10.120239 17472 solver.cpp:228] Iteration 11400, loss = 0.0144918\nI0817 23:31:10.120287 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:31:10.120304 17472 solver.cpp:244]     Train net output #1: loss = 0.0144918 (* 1 = 0.0144918 loss)\nI0817 23:31:10.206218 17472 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0817 23:33:27.910284 17472 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0817 23:34:50.119298 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62932\nI0817 23:34:50.119525 17472 solver.cpp:404]     Test net output #1: loss = 3.12132 (* 1 = 3.12132 loss)\nI0817 23:34:51.448858 17472 solver.cpp:228] Iteration 11500, loss = 0.0155173\nI0817 23:34:51.448907 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:34:51.448925 17472 solver.cpp:244]     Train net output #1: loss = 0.0155174 (* 1 = 0.0155174 loss)\nI0817 23:34:51.540856 17472 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0817 23:37:09.314435 17472 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0817 23:38:31.538326 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64008\nI0817 23:38:31.538554 17472 solver.cpp:404]     Test net output #1: loss = 3.00964 (* 1 = 3.00964 loss)\nI0817 23:38:32.867861 17472 solver.cpp:228] Iteration 11600, loss = 0.00755094\nI0817 23:38:32.867909 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:38:32.867926 17472 solver.cpp:244]     Train net output #1: loss = 0.00755101 (* 1 = 0.00755101 loss)\nI0817 23:38:32.960397 17472 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0817 23:40:50.737048 17472 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0817 23:42:12.956110 17472 solver.cpp:404]     Test net output #0: accuracy = 0.535\nI0817 23:42:12.956369 17472 solver.cpp:404]     Test net output #1: loss = 5.15504 (* 1 = 5.15504 loss)\nI0817 23:42:14.286248 17472 solver.cpp:228] Iteration 11700, loss = 0.0385441\nI0817 23:42:14.286299 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:42:14.286316 17472 solver.cpp:244]     Train net output #1: loss = 0.0385442 (* 1 = 0.0385442 loss)\nI0817 23:42:14.371397 17472 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0817 23:44:32.141722 17472 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0817 23:45:54.362736 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64588\nI0817 23:45:54.362967 17472 solver.cpp:404]     Test net output #1: loss = 2.80215 (* 1 = 2.80215 loss)\nI0817 23:45:55.694010 17472 solver.cpp:228] Iteration 11800, loss = 0.00722332\nI0817 23:45:55.694061 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:45:55.694077 17472 solver.cpp:244]     Train net output #1: loss = 0.0072234 (* 1 = 0.0072234 loss)\nI0817 23:45:55.777268 17472 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0817 23:48:13.333081 17472 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0817 23:49:35.579898 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66044\nI0817 23:49:35.580123 17472 solver.cpp:404]     Test net output #1: loss = 2.75177 (* 1 = 2.75177 loss)\nI0817 23:49:36.910323 17472 solver.cpp:228] Iteration 11900, loss = 0.00939957\nI0817 23:49:36.910378 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:49:36.910395 17472 solver.cpp:244]     Train net output #1: loss = 0.00939966 (* 1 = 0.00939966 loss)\nI0817 23:49:37.009562 17472 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0817 23:51:54.684096 17472 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0817 23:53:16.934373 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6398\nI0817 23:53:16.934656 17472 solver.cpp:404]     Test net output #1: loss = 3.26316 (* 1 = 3.26316 loss)\nI0817 23:53:18.264339 17472 solver.cpp:228] Iteration 12000, loss = 0.0190283\nI0817 23:53:18.264392 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:53:18.264410 17472 solver.cpp:244]     Train net output #1: loss = 0.0190284 (* 1 = 0.0190284 loss)\nI0817 23:53:18.355834 17472 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0817 23:55:36.021514 17472 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0817 23:56:58.276741 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7048\nI0817 23:56:58.277037 17472 solver.cpp:404]     Test net output #1: loss = 2.08157 (* 1 = 2.08157 loss)\nI0817 23:56:59.607815 17472 solver.cpp:228] Iteration 12100, loss = 0.0229905\nI0817 23:56:59.607872 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:56:59.607887 17472 solver.cpp:244]     Train net output #1: loss = 0.0229905 (* 1 = 0.0229905 loss)\nI0817 23:56:59.697268 17472 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0817 23:59:17.305580 17472 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0818 00:00:39.549237 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68756\nI0818 00:00:39.549515 17472 solver.cpp:404]     Test net output #1: loss = 2.36123 (* 1 = 2.36123 loss)\nI0818 00:00:40.879743 17472 solver.cpp:228] Iteration 12200, loss = 0.00770505\nI0818 00:00:40.879787 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:00:40.879803 17472 solver.cpp:244]     Train net output #1: loss = 0.00770515 (* 1 = 0.00770515 loss)\nI0818 00:00:40.966367 17472 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0818 00:02:58.586398 17472 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0818 00:04:20.818331 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67844\nI0818 00:04:20.818625 17472 solver.cpp:404]     Test net output #1: loss = 2.42195 (* 1 = 2.42195 loss)\nI0818 00:04:22.148547 17472 solver.cpp:228] Iteration 12300, loss = 0.0025236\nI0818 00:04:22.148598 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:04:22.148615 17472 solver.cpp:244]     Train net output #1: loss = 0.00252369 (* 1 = 0.00252369 loss)\nI0818 00:04:22.234669 17472 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0818 00:06:39.940901 17472 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0818 00:08:02.190815 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67732\nI0818 00:08:02.191103 17472 solver.cpp:404]     Test net output #1: loss = 2.48521 (* 1 = 2.48521 loss)\nI0818 00:08:03.520823 17472 solver.cpp:228] Iteration 12400, loss = 0.00615556\nI0818 00:08:03.520876 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:08:03.520894 17472 solver.cpp:244]     Train net output #1: loss = 0.00615567 (* 1 = 0.00615567 loss)\nI0818 00:08:03.610582 17472 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0818 00:10:21.228873 17472 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0818 00:11:43.465201 17472 solver.cpp:404]     Test net output #0: accuracy = 0.60136\nI0818 00:11:43.465486 17472 solver.cpp:404]     Test net output #1: loss = 3.98514 (* 1 = 3.98514 loss)\nI0818 00:11:44.794858 17472 solver.cpp:228] Iteration 12500, loss = 0.0118468\nI0818 00:11:44.794909 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:11:44.794926 17472 solver.cpp:244]     Train net output #1: loss = 0.0118469 (* 1 = 0.0118469 loss)\nI0818 00:11:44.885040 17472 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0818 00:14:02.537125 17472 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0818 00:15:24.770416 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66756\nI0818 00:15:24.770716 17472 solver.cpp:404]     Test net output #1: loss = 2.71812 (* 1 = 2.71812 loss)\nI0818 00:15:26.099928 17472 solver.cpp:228] Iteration 12600, loss = 0.0273763\nI0818 00:15:26.099969 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:15:26.099985 17472 solver.cpp:244]     Train net output #1: loss = 0.0273763 (* 1 = 0.0273763 loss)\nI0818 00:15:26.186727 17472 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0818 00:17:43.784549 17472 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 00:19:06.025701 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64292\nI0818 00:19:06.025995 17472 solver.cpp:404]     Test net output #1: loss = 3.15635 (* 1 = 3.15635 loss)\nI0818 00:19:07.355536 17472 solver.cpp:228] Iteration 12700, loss = 0.0249243\nI0818 00:19:07.355581 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:19:07.355597 17472 solver.cpp:244]     Train net output #1: loss = 0.0249244 (* 1 = 0.0249244 loss)\nI0818 00:19:07.441546 17472 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0818 00:21:25.009359 17472 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 00:22:47.257638 17472 solver.cpp:404]     Test net output #0: accuracy = 0.63052\nI0818 00:22:47.257941 17472 solver.cpp:404]     Test net output #1: loss = 3.40482 (* 1 = 3.40482 loss)\nI0818 00:22:48.587518 17472 solver.cpp:228] Iteration 12800, loss = 0.00660873\nI0818 00:22:48.587556 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:22:48.587571 17472 solver.cpp:244]     Train net output #1: loss = 0.00660884 (* 1 = 0.00660884 loss)\nI0818 00:22:48.674715 17472 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0818 00:25:06.307879 17472 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 00:26:28.555977 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68252\nI0818 00:26:28.556285 17472 solver.cpp:404]     Test net output #1: loss = 2.54898 (* 1 = 2.54898 loss)\nI0818 00:26:29.886675 17472 solver.cpp:228] Iteration 12900, loss = 0.00497169\nI0818 00:26:29.886718 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:26:29.886734 17472 solver.cpp:244]     Train net output #1: loss = 0.00497181 (* 1 = 0.00497181 loss)\nI0818 00:26:29.971499 17472 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0818 00:28:47.540822 17472 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 00:30:09.792398 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66688\nI0818 00:30:09.792691 17472 solver.cpp:404]     Test net output #1: loss = 2.63559 (* 1 = 2.63559 loss)\nI0818 00:30:11.122206 17472 solver.cpp:228] Iteration 13000, loss = 0.0150544\nI0818 00:30:11.122248 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:30:11.122264 17472 solver.cpp:244]     Train net output #1: loss = 0.0150545 (* 1 = 0.0150545 loss)\nI0818 00:30:11.208477 17472 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0818 00:32:29.163056 17472 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 00:33:51.420900 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68256\nI0818 00:33:51.421188 17472 solver.cpp:404]     Test net output #1: loss = 2.40628 (* 1 = 2.40628 loss)\nI0818 00:33:52.751096 17472 solver.cpp:228] Iteration 13100, loss = 0.017137\nI0818 00:33:52.751137 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:33:52.751152 17472 solver.cpp:244]     Train net output #1: loss = 0.0171371 (* 1 = 0.0171371 loss)\nI0818 00:33:52.836227 17472 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0818 00:36:10.651268 17472 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 00:37:32.886157 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6572\nI0818 00:37:32.886437 17472 solver.cpp:404]     Test net output #1: loss = 2.95695 (* 1 = 2.95695 loss)\nI0818 00:37:34.215979 17472 solver.cpp:228] Iteration 13200, loss = 0.0143775\nI0818 00:37:34.216022 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:37:34.216042 17472 solver.cpp:244]     Train net output #1: loss = 0.0143776 (* 1 = 0.0143776 loss)\nI0818 00:37:34.305796 17472 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0818 00:39:52.046329 17472 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 00:41:14.269574 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65976\nI0818 00:41:14.269883 17472 solver.cpp:404]     Test net output #1: loss = 2.77017 (* 1 = 2.77017 loss)\nI0818 00:41:15.599735 17472 solver.cpp:228] Iteration 13300, loss = 0.00473772\nI0818 00:41:15.599774 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:41:15.599789 17472 solver.cpp:244]     Train net output #1: loss = 0.00473782 (* 1 = 0.00473782 loss)\nI0818 00:41:15.691747 17472 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0818 00:43:33.420692 17472 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 00:44:55.315241 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67376\nI0818 00:44:55.315507 17472 solver.cpp:404]     Test net output #1: loss = 2.61373 (* 1 = 2.61373 loss)\nI0818 00:44:56.646237 17472 solver.cpp:228] Iteration 13400, loss = 0.00146168\nI0818 00:44:56.646281 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:44:56.646297 17472 solver.cpp:244]     Train net output #1: loss = 0.00146179 (* 1 = 0.00146179 loss)\nI0818 00:44:56.730608 17472 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0818 00:47:14.370088 17472 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 00:48:35.794389 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69376\nI0818 00:48:35.794622 17472 solver.cpp:404]     Test net output #1: loss = 2.32791 (* 1 = 2.32791 loss)\nI0818 00:48:37.116686 17472 solver.cpp:228] Iteration 13500, loss = 0.00660889\nI0818 00:48:37.116734 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:48:37.116751 17472 solver.cpp:244]     Train net output #1: loss = 0.006609 (* 1 = 0.006609 loss)\nI0818 00:48:37.211833 17472 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0818 00:50:54.701465 17472 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 00:52:16.130784 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65076\nI0818 00:52:16.131013 17472 solver.cpp:404]     Test net output #1: loss = 2.91049 (* 1 = 2.91049 loss)\nI0818 00:52:17.454293 17472 solver.cpp:228] Iteration 13600, loss = 0.00973701\nI0818 00:52:17.454336 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:52:17.454352 17472 solver.cpp:244]     Train net output #1: loss = 0.00973711 (* 1 = 0.00973711 loss)\nI0818 00:52:17.549674 17472 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0818 00:54:34.987004 17472 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 00:55:56.400302 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67696\nI0818 00:55:56.400519 17472 solver.cpp:404]     Test net output #1: loss = 2.54718 (* 1 = 2.54718 loss)\nI0818 00:55:57.723493 17472 solver.cpp:228] Iteration 13700, loss = 0.00516961\nI0818 00:55:57.723538 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:55:57.723554 17472 solver.cpp:244]     Train net output #1: loss = 0.00516971 (* 1 = 0.00516971 loss)\nI0818 00:55:57.812665 17472 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0818 00:58:15.267807 17472 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 00:59:36.678552 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67628\nI0818 00:59:36.678767 17472 solver.cpp:404]     Test net output #1: loss = 2.6064 (* 1 = 2.6064 loss)\nI0818 00:59:38.001565 17472 solver.cpp:228] Iteration 13800, loss = 0.0227302\nI0818 00:59:38.001611 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:59:38.001628 17472 solver.cpp:244]     Train net output #1: loss = 0.0227303 (* 1 = 0.0227303 loss)\nI0818 00:59:38.091428 17472 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0818 01:01:55.443130 17472 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 01:03:16.856215 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69164\nI0818 01:03:16.856441 17472 solver.cpp:404]     Test net output #1: loss = 2.44772 (* 1 = 2.44772 loss)\nI0818 01:03:18.179163 17472 solver.cpp:228] Iteration 13900, loss = 0.035385\nI0818 01:03:18.179208 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:03:18.179225 17472 solver.cpp:244]     Train net output #1: loss = 0.0353851 (* 1 = 0.0353851 loss)\nI0818 01:03:18.270999 17472 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0818 01:05:35.705529 17472 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 01:06:57.101671 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65824\nI0818 01:06:57.101923 17472 solver.cpp:404]     Test net output #1: loss = 2.8662 (* 1 = 2.8662 loss)\nI0818 01:06:58.424741 17472 solver.cpp:228] Iteration 14000, loss = 0.00686679\nI0818 01:06:58.424785 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:06:58.424803 17472 solver.cpp:244]     Train net output #1: loss = 0.00686689 (* 1 = 0.00686689 loss)\nI0818 01:06:58.516513 17472 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 01:09:15.937209 17472 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 01:10:37.358281 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66112\nI0818 01:10:37.358536 17472 solver.cpp:404]     Test net output #1: loss = 2.81947 (* 1 = 2.81947 loss)\nI0818 01:10:38.681335 17472 solver.cpp:228] Iteration 14100, loss = 0.0113953\nI0818 01:10:38.681382 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:10:38.681397 17472 solver.cpp:244]     Train net output #1: loss = 0.0113954 (* 1 = 0.0113954 loss)\nI0818 01:10:38.773102 17472 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0818 01:12:56.158216 17472 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 01:14:17.567608 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66608\nI0818 01:14:17.567821 17472 solver.cpp:404]     Test net output #1: loss = 2.80114 (* 1 = 2.80114 loss)\nI0818 01:14:18.889645 17472 solver.cpp:228] Iteration 14200, loss = 0.0303059\nI0818 01:14:18.889689 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:14:18.889705 17472 solver.cpp:244]     Train net output #1: loss = 0.030306 (* 1 = 0.030306 loss)\nI0818 01:14:18.988031 17472 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0818 01:16:36.523558 17472 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 01:17:57.927871 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67536\nI0818 01:17:57.928119 17472 solver.cpp:404]     Test net output #1: loss = 2.6209 (* 1 = 2.6209 loss)\nI0818 01:17:59.251194 17472 solver.cpp:228] Iteration 14300, loss = 0.012489\nI0818 01:17:59.251240 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:17:59.251255 17472 solver.cpp:244]     Train net output #1: loss = 0.0124891 (* 1 = 0.0124891 loss)\nI0818 01:17:59.341295 17472 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0818 01:20:16.760784 17472 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 01:21:38.173413 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65696\nI0818 01:21:38.173653 17472 solver.cpp:404]     Test net output #1: loss = 2.81645 (* 1 = 2.81645 loss)\nI0818 01:21:39.497314 17472 solver.cpp:228] Iteration 14400, loss = 0.0132125\nI0818 01:21:39.497359 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:21:39.497375 17472 solver.cpp:244]     Train net output #1: loss = 0.0132126 (* 1 = 0.0132126 loss)\nI0818 01:21:39.585001 17472 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0818 01:23:57.136782 17472 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 01:25:18.550495 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6628\nI0818 01:25:18.550734 17472 solver.cpp:404]     Test net output #1: loss = 2.74375 (* 1 = 2.74375 loss)\nI0818 01:25:19.873222 17472 solver.cpp:228] Iteration 14500, loss = 0.01373\nI0818 01:25:19.873272 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:25:19.873298 17472 solver.cpp:244]     Train net output #1: loss = 0.0137301 (* 1 = 0.0137301 loss)\nI0818 01:25:19.965507 17472 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0818 01:27:37.511615 17472 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 01:28:58.922801 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68048\nI0818 01:28:58.923027 17472 solver.cpp:404]     Test net output #1: loss = 2.46142 (* 1 = 2.46142 loss)\nI0818 01:29:00.244832 17472 solver.cpp:228] Iteration 14600, loss = 0.00519187\nI0818 01:29:00.244881 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:29:00.244899 17472 solver.cpp:244]     Train net output #1: loss = 0.00519199 (* 1 = 0.00519199 loss)\nI0818 01:29:00.338891 17472 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0818 01:31:17.963809 17472 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 01:32:39.389758 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62484\nI0818 01:32:39.390031 17472 solver.cpp:404]     Test net output #1: loss = 3.19546 (* 1 = 3.19546 loss)\nI0818 01:32:40.712942 17472 solver.cpp:228] Iteration 14700, loss = 0.0145929\nI0818 01:32:40.712987 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:32:40.713004 17472 solver.cpp:244]     Train net output #1: loss = 0.0145931 (* 1 = 0.0145931 loss)\nI0818 01:32:40.806121 17472 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0818 01:34:58.279243 17472 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 01:36:19.690867 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68996\nI0818 01:36:19.691108 17472 solver.cpp:404]     Test net output #1: loss = 2.3956 (* 1 = 2.3956 loss)\nI0818 01:36:21.013497 17472 solver.cpp:228] Iteration 14800, loss = 0.00750605\nI0818 01:36:21.013542 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:36:21.013558 17472 solver.cpp:244]     Train net output #1: loss = 0.00750616 (* 1 = 0.00750616 loss)\nI0818 01:36:21.104223 17472 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0818 01:38:38.613019 17472 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 01:40:00.012426 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69604\nI0818 01:40:00.012681 17472 solver.cpp:404]     Test net output #1: loss = 2.25251 (* 1 = 2.25251 loss)\nI0818 01:40:01.335120 17472 solver.cpp:228] Iteration 14900, loss = 0.0127931\nI0818 01:40:01.335162 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:40:01.335180 17472 solver.cpp:244]     Train net output #1: loss = 0.0127932 (* 1 = 0.0127932 loss)\nI0818 01:40:01.432590 17472 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0818 01:42:18.947942 17472 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 01:43:40.349006 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68044\nI0818 01:43:40.349231 17472 solver.cpp:404]     Test net output #1: loss = 2.36575 (* 1 = 2.36575 loss)\nI0818 01:43:41.672248 17472 solver.cpp:228] Iteration 15000, loss = 0.0174089\nI0818 01:43:41.672294 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:43:41.672310 17472 solver.cpp:244]     Train net output #1: loss = 0.017409 (* 1 = 0.017409 loss)\nI0818 01:43:41.761133 17472 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0818 01:45:59.301450 17472 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 01:47:20.712725 17472 solver.cpp:404]     Test net output #0: accuracy = 0.657\nI0818 01:47:20.712947 17472 solver.cpp:404]     Test net output #1: loss = 2.68094 (* 1 = 2.68094 loss)\nI0818 01:47:22.035815 17472 solver.cpp:228] Iteration 15100, loss = 0.00927125\nI0818 01:47:22.035861 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:47:22.035883 17472 solver.cpp:244]     Train net output #1: loss = 0.00927136 (* 1 = 0.00927136 loss)\nI0818 01:47:22.128931 17472 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0818 01:49:39.683728 17472 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 01:51:01.100163 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66604\nI0818 01:51:01.100383 17472 solver.cpp:404]     Test net output #1: loss = 2.58348 (* 1 = 2.58348 loss)\nI0818 01:51:02.423069 17472 solver.cpp:228] Iteration 15200, loss = 0.0199876\nI0818 01:51:02.423111 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:51:02.423128 17472 solver.cpp:244]     Train net output #1: loss = 0.0199877 (* 1 = 0.0199877 loss)\nI0818 01:51:02.517114 17472 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0818 01:53:19.989393 17472 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 01:54:41.398743 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66\nI0818 01:54:41.398958 17472 solver.cpp:404]     Test net output #1: loss = 2.79482 (* 1 = 2.79482 loss)\nI0818 01:54:42.721660 17472 solver.cpp:228] Iteration 15300, loss = 0.0238431\nI0818 01:54:42.721705 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:54:42.721722 17472 solver.cpp:244]     Train net output #1: loss = 0.0238432 (* 1 = 0.0238432 loss)\nI0818 01:54:42.815294 17472 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0818 01:57:00.350162 17472 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 01:58:21.759346 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70144\nI0818 01:58:21.759557 17472 solver.cpp:404]     Test net output #1: loss = 2.36843 (* 1 = 2.36843 loss)\nI0818 01:58:23.082000 17472 solver.cpp:228] Iteration 15400, loss = 0.0174112\nI0818 01:58:23.082046 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:58:23.082062 17472 solver.cpp:244]     Train net output #1: loss = 0.0174113 (* 1 = 0.0174113 loss)\nI0818 01:58:23.176700 17472 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0818 02:00:40.736171 17472 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 02:02:02.145588 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68004\nI0818 02:02:02.145866 17472 solver.cpp:404]     Test net output #1: loss = 2.46998 (* 1 = 2.46998 loss)\nI0818 02:02:03.470091 17472 solver.cpp:228] Iteration 15500, loss = 0.0180336\nI0818 02:02:03.470134 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:02:03.470149 17472 solver.cpp:244]     Train net output #1: loss = 0.0180337 (* 1 = 0.0180337 loss)\nI0818 02:02:03.558225 17472 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0818 02:04:21.078969 17472 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 02:05:42.480093 17472 solver.cpp:404]     Test net output #0: accuracy = 0.679\nI0818 02:05:42.480360 17472 solver.cpp:404]     Test net output #1: loss = 2.50866 (* 1 = 2.50866 loss)\nI0818 02:05:43.802786 17472 solver.cpp:228] Iteration 15600, loss = 0.00317398\nI0818 02:05:43.802831 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:05:43.802847 17472 solver.cpp:244]     Train net output #1: loss = 0.00317411 (* 1 = 0.00317411 loss)\nI0818 02:05:43.898849 17472 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0818 02:08:01.486908 17472 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 02:09:22.895650 17472 solver.cpp:404]     Test net output #0: accuracy = 0.63892\nI0818 02:09:22.895900 17472 solver.cpp:404]     Test net output #1: loss = 3.23941 (* 1 = 3.23941 loss)\nI0818 02:09:24.218951 17472 solver.cpp:228] Iteration 15700, loss = 0.00732647\nI0818 02:09:24.218997 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:09:24.219012 17472 solver.cpp:244]     Train net output #1: loss = 0.00732659 (* 1 = 0.00732659 loss)\nI0818 02:09:24.314995 17472 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0818 02:11:41.877907 17472 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 02:13:03.286406 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68332\nI0818 02:13:03.286664 17472 solver.cpp:404]     Test net output #1: loss = 2.51414 (* 1 = 2.51414 loss)\nI0818 02:13:04.609184 17472 solver.cpp:228] Iteration 15800, loss = 0.0187072\nI0818 02:13:04.609228 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:13:04.609246 17472 solver.cpp:244]     Train net output #1: loss = 0.0187073 (* 1 = 0.0187073 loss)\nI0818 02:13:04.699127 17472 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0818 02:15:22.256852 17472 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 02:16:43.667114 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67952\nI0818 02:16:43.667346 17472 solver.cpp:404]     Test net output #1: loss = 2.6604 (* 1 = 2.6604 loss)\nI0818 02:16:44.989786 17472 solver.cpp:228] Iteration 15900, loss = 0.00293352\nI0818 02:16:44.989830 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:16:44.989846 17472 solver.cpp:244]     Train net output #1: loss = 0.00293363 (* 1 = 0.00293363 loss)\nI0818 02:16:45.081073 17472 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0818 02:19:02.645445 17472 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 02:20:24.047749 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69388\nI0818 02:20:24.048002 17472 solver.cpp:404]     Test net output #1: loss = 2.55671 (* 1 = 2.55671 loss)\nI0818 02:20:25.370348 17472 solver.cpp:228] Iteration 16000, loss = 0.0594146\nI0818 02:20:25.370391 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:20:25.370407 17472 solver.cpp:244]     Train net output #1: loss = 0.0594147 (* 1 = 0.0594147 loss)\nI0818 02:20:25.462941 17472 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0818 02:22:42.975623 17472 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 02:24:04.381775 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66928\nI0818 02:24:04.382035 17472 solver.cpp:404]     Test net output #1: loss = 2.83941 (* 1 = 2.83941 loss)\nI0818 02:24:05.703531 17472 solver.cpp:228] Iteration 16100, loss = 0.0166429\nI0818 02:24:05.703573 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:24:05.703589 17472 solver.cpp:244]     Train net output #1: loss = 0.016643 (* 1 = 0.016643 loss)\nI0818 02:24:05.795724 17472 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0818 02:26:23.319634 17472 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 02:27:44.730473 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68056\nI0818 02:27:44.730720 17472 solver.cpp:404]     Test net output #1: loss = 2.51381 (* 1 = 2.51381 loss)\nI0818 02:27:46.052741 17472 solver.cpp:228] Iteration 16200, loss = 0.00333367\nI0818 02:27:46.052783 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:27:46.052798 17472 solver.cpp:244]     Train net output #1: loss = 0.00333378 (* 1 = 0.00333378 loss)\nI0818 02:27:46.148193 17472 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0818 02:30:03.695154 17472 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 02:31:25.101809 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67924\nI0818 02:31:25.102057 17472 solver.cpp:404]     Test net output #1: loss = 2.46041 (* 1 = 2.46041 loss)\nI0818 02:31:26.424124 17472 solver.cpp:228] Iteration 16300, loss = 0.0155451\nI0818 02:31:26.424165 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:31:26.424181 17472 solver.cpp:244]     Train net output #1: loss = 0.0155452 (* 1 = 0.0155452 loss)\nI0818 02:31:26.512403 17472 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0818 02:33:44.106114 17472 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 02:35:05.514953 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68396\nI0818 02:35:05.515167 17472 solver.cpp:404]     Test net output #1: loss = 2.52728 (* 1 = 2.52728 loss)\nI0818 02:35:06.836947 17472 solver.cpp:228] Iteration 16400, loss = 0.0044757\nI0818 02:35:06.836989 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:35:06.837007 17472 solver.cpp:244]     Train net output #1: loss = 0.00447581 (* 1 = 0.00447581 loss)\nI0818 02:35:06.925853 17472 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0818 02:37:24.452929 17472 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0818 02:38:45.860121 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6812\nI0818 02:38:45.860401 17472 solver.cpp:404]     Test net output #1: loss = 2.48556 (* 1 = 2.48556 loss)\nI0818 02:38:47.182922 17472 solver.cpp:228] Iteration 16500, loss = 0.00856908\nI0818 02:38:47.182963 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:38:47.182979 17472 solver.cpp:244]     Train net output #1: loss = 0.00856919 (* 1 = 0.00856919 loss)\nI0818 02:38:47.273636 17472 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0818 02:41:04.797701 17472 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0818 02:42:26.227213 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62836\nI0818 02:42:26.227471 17472 solver.cpp:404]     Test net output #1: loss = 3.26476 (* 1 = 3.26476 loss)\nI0818 02:42:27.549630 17472 solver.cpp:228] Iteration 16600, loss = 0.0112066\nI0818 02:42:27.549669 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:42:27.549685 17472 solver.cpp:244]     Train net output #1: loss = 0.0112067 (* 1 = 0.0112067 loss)\nI0818 02:42:27.643218 17472 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0818 02:44:45.158720 17472 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0818 02:46:06.567971 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67688\nI0818 02:46:06.568249 17472 solver.cpp:404]     Test net output #1: loss = 2.47296 (* 1 = 2.47296 loss)\nI0818 02:46:07.890362 17472 solver.cpp:228] Iteration 16700, loss = 0.00501426\nI0818 02:46:07.890401 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:46:07.890419 17472 solver.cpp:244]     Train net output #1: loss = 0.00501437 (* 1 = 0.00501437 loss)\nI0818 02:46:07.981626 17472 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0818 02:48:25.594106 17472 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0818 02:49:47.003075 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66132\nI0818 02:49:47.003376 17472 solver.cpp:404]     Test net output #1: loss = 2.81411 (* 1 = 2.81411 loss)\nI0818 02:49:48.325971 17472 solver.cpp:228] Iteration 16800, loss = 0.0219366\nI0818 02:49:48.326011 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:49:48.326027 17472 solver.cpp:244]     Train net output #1: loss = 0.0219367 (* 1 = 0.0219367 loss)\nI0818 02:49:48.415627 17472 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0818 02:52:05.955451 17472 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0818 02:53:27.364321 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68192\nI0818 02:53:27.364603 17472 solver.cpp:404]     Test net output #1: loss = 2.55968 (* 1 = 2.55968 loss)\nI0818 02:53:28.686568 17472 solver.cpp:228] Iteration 16900, loss = 0.00497984\nI0818 02:53:28.686609 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:53:28.686625 17472 solver.cpp:244]     Train net output #1: loss = 0.00497995 (* 1 = 0.00497995 loss)\nI0818 02:53:28.783783 17472 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0818 02:55:46.306205 17472 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0818 02:57:07.724200 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64508\nI0818 02:57:07.724478 17472 solver.cpp:404]     Test net output #1: loss = 3.33617 (* 1 = 3.33617 loss)\nI0818 02:57:09.047298 17472 solver.cpp:228] Iteration 17000, loss = 0.00575578\nI0818 02:57:09.047338 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:57:09.047353 17472 solver.cpp:244]     Train net output #1: loss = 0.00575588 (* 1 = 0.00575588 loss)\nI0818 02:57:09.136734 17472 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0818 02:59:26.702416 17472 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0818 03:00:48.124382 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65388\nI0818 03:00:48.124650 17472 solver.cpp:404]     Test net output #1: loss = 2.86052 (* 1 = 2.86052 loss)\nI0818 03:00:49.446914 17472 solver.cpp:228] Iteration 17100, loss = 0.000967282\nI0818 03:00:49.446954 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:00:49.446970 17472 solver.cpp:244]     Train net output #1: loss = 0.000967386 (* 1 = 0.000967386 loss)\nI0818 03:00:49.538584 17472 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0818 03:03:07.089588 17472 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0818 03:04:28.498752 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66204\nI0818 03:04:28.499006 17472 solver.cpp:404]     Test net output #1: loss = 3.01944 (* 1 = 3.01944 loss)\nI0818 03:04:29.821954 17472 solver.cpp:228] Iteration 17200, loss = 0.0047589\nI0818 03:04:29.821995 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:04:29.822011 17472 solver.cpp:244]     Train net output #1: loss = 0.004759 (* 1 = 0.004759 loss)\nI0818 03:04:29.912989 17472 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0818 03:06:47.417071 17472 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0818 03:08:08.835988 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6676\nI0818 03:08:08.836267 17472 solver.cpp:404]     Test net output #1: loss = 2.9384 (* 1 = 2.9384 loss)\nI0818 03:08:10.158632 17472 solver.cpp:228] Iteration 17300, loss = 0.00789384\nI0818 03:08:10.158673 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:08:10.158689 17472 solver.cpp:244]     Train net output #1: loss = 0.00789395 (* 1 = 0.00789395 loss)\nI0818 03:08:10.253729 17472 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0818 03:10:27.755483 17472 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0818 03:11:49.150357 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67272\nI0818 03:11:49.150617 17472 solver.cpp:404]     Test net output #1: loss = 2.81741 (* 1 = 2.81741 loss)\nI0818 03:11:50.471997 17472 solver.cpp:228] Iteration 17400, loss = 0.0011723\nI0818 03:11:50.472041 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:11:50.472057 17472 solver.cpp:244]     Train net output #1: loss = 0.0011724 (* 1 = 0.0011724 loss)\nI0818 03:11:50.565410 17472 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0818 03:14:08.038738 17472 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0818 03:15:29.446765 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67676\nI0818 03:15:29.447031 17472 solver.cpp:404]     Test net output #1: loss = 2.65028 (* 1 = 2.65028 loss)\nI0818 03:15:30.769371 17472 solver.cpp:228] Iteration 17500, loss = 0.00683586\nI0818 03:15:30.769415 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:15:30.769433 17472 solver.cpp:244]     Train net output #1: loss = 0.00683596 (* 1 = 0.00683596 loss)\nI0818 03:15:30.859315 17472 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0818 03:17:48.322932 17472 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0818 03:19:09.737808 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6912\nI0818 03:19:09.738096 17472 solver.cpp:404]     Test net output #1: loss = 2.56712 (* 1 = 2.56712 loss)\nI0818 03:19:11.059788 17472 solver.cpp:228] Iteration 17600, loss = 0.0057569\nI0818 03:19:11.059834 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:19:11.059849 17472 solver.cpp:244]     Train net output #1: loss = 0.00575701 (* 1 = 0.00575701 loss)\nI0818 03:19:11.155966 17472 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0818 03:21:28.799280 17472 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0818 03:22:50.207664 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67024\nI0818 03:22:50.207947 17472 solver.cpp:404]     Test net output #1: loss = 2.54175 (* 1 = 2.54175 loss)\nI0818 03:22:51.530813 17472 solver.cpp:228] Iteration 17700, loss = 0.00603886\nI0818 03:22:51.530859 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:22:51.530880 17472 solver.cpp:244]     Train net output #1: loss = 0.00603897 (* 1 = 0.00603897 loss)\nI0818 03:22:51.624584 17472 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0818 03:25:09.129636 17472 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0818 03:26:30.535845 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67416\nI0818 03:26:30.536113 17472 solver.cpp:404]     Test net output #1: loss = 2.67031 (* 1 = 2.67031 loss)\nI0818 03:26:31.857467 17472 solver.cpp:228] Iteration 17800, loss = 0.00730089\nI0818 03:26:31.857512 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:26:31.857528 17472 solver.cpp:244]     Train net output #1: loss = 0.00730099 (* 1 = 0.00730099 loss)\nI0818 03:26:31.950690 17472 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0818 03:28:49.531021 17472 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0818 03:30:10.941145 17472 solver.cpp:404]     Test net output #0: accuracy = 0.63508\nI0818 03:30:10.941427 17472 solver.cpp:404]     Test net output #1: loss = 3.57383 (* 1 = 3.57383 loss)\nI0818 03:30:12.263269 17472 solver.cpp:228] Iteration 17900, loss = 0.00604909\nI0818 03:30:12.263315 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:30:12.263332 17472 solver.cpp:244]     Train net output #1: loss = 0.00604919 (* 1 = 0.00604919 loss)\nI0818 03:30:12.356062 17472 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0818 03:32:29.823346 17472 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0818 03:33:51.239379 17472 solver.cpp:404]     Test net output #0: accuracy = 0.63996\nI0818 03:33:51.239646 17472 solver.cpp:404]     Test net output #1: loss = 3.34284 (* 1 = 3.34284 loss)\nI0818 03:33:52.562747 17472 solver.cpp:228] Iteration 18000, loss = 0.00900357\nI0818 03:33:52.562793 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:33:52.562808 17472 solver.cpp:244]     Train net output #1: loss = 0.00900367 (* 1 = 0.00900367 loss)\nI0818 03:33:52.652249 17472 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0818 03:36:10.116600 17472 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0818 03:37:31.528854 17472 solver.cpp:404]     Test net output #0: accuracy = 0.615\nI0818 03:37:31.529111 17472 solver.cpp:404]     Test net output #1: loss = 3.9512 (* 1 = 3.9512 loss)\nI0818 03:37:32.851119 17472 solver.cpp:228] Iteration 18100, loss = 0.00471752\nI0818 03:37:32.851163 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:37:32.851179 17472 solver.cpp:244]     Train net output #1: loss = 0.00471761 (* 1 = 0.00471761 loss)\nI0818 03:37:32.945747 17472 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0818 03:39:50.716646 17472 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0818 03:41:12.126075 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67576\nI0818 03:41:12.126341 17472 solver.cpp:404]     Test net output #1: loss = 2.63546 (* 1 = 2.63546 loss)\nI0818 03:41:13.448534 17472 solver.cpp:228] Iteration 18200, loss = 0.0283901\nI0818 03:41:13.448580 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:41:13.448597 17472 solver.cpp:244]     Train net output #1: loss = 0.0283902 (* 1 = 0.0283902 loss)\nI0818 03:41:13.545373 17472 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0818 03:43:31.007942 17472 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0818 03:44:52.423780 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69608\nI0818 03:44:52.424082 17472 solver.cpp:404]     Test net output #1: loss = 2.2865 (* 1 = 2.2865 loss)\nI0818 03:44:53.746760 17472 solver.cpp:228] Iteration 18300, loss = 0.00917385\nI0818 03:44:53.746805 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:44:53.746821 17472 solver.cpp:244]     Train net output #1: loss = 0.00917394 (* 1 = 0.00917394 loss)\nI0818 03:44:53.839841 17472 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0818 03:47:11.330008 17472 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0818 03:48:32.746054 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64048\nI0818 03:48:32.746311 17472 solver.cpp:404]     Test net output #1: loss = 3.23693 (* 1 = 3.23693 loss)\nI0818 03:48:34.069046 17472 solver.cpp:228] Iteration 18400, loss = 0.00244121\nI0818 03:48:34.069089 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:48:34.069105 17472 solver.cpp:244]     Train net output #1: loss = 0.0024413 (* 1 = 0.0024413 loss)\nI0818 03:48:34.159104 17472 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0818 03:50:51.682289 17472 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0818 03:52:13.094480 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6876\nI0818 03:52:13.094763 17472 solver.cpp:404]     Test net output #1: loss = 2.55069 (* 1 = 2.55069 loss)\nI0818 03:52:14.417906 17472 solver.cpp:228] Iteration 18500, loss = 0.0192066\nI0818 03:52:14.417950 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:52:14.417968 17472 solver.cpp:244]     Train net output #1: loss = 0.0192067 (* 1 = 0.0192067 loss)\nI0818 03:52:14.510187 17472 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0818 03:54:32.135874 17472 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0818 03:55:53.544996 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67748\nI0818 03:55:53.545285 17472 solver.cpp:404]     Test net output #1: loss = 2.66697 (* 1 = 2.66697 loss)\nI0818 03:55:54.868638 17472 solver.cpp:228] Iteration 18600, loss = 0.00650688\nI0818 03:55:54.868681 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:55:54.868698 17472 solver.cpp:244]     Train net output #1: loss = 0.00650698 (* 1 = 0.00650698 loss)\nI0818 03:55:54.958627 17472 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0818 03:58:12.507041 17472 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0818 03:59:33.916384 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64092\nI0818 03:59:33.916671 17472 solver.cpp:404]     Test net output #1: loss = 3.5528 (* 1 = 3.5528 loss)\nI0818 03:59:35.238839 17472 solver.cpp:228] Iteration 18700, loss = 0.0229298\nI0818 03:59:35.238888 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:59:35.238904 17472 solver.cpp:244]     Train net output #1: loss = 0.0229299 (* 1 = 0.0229299 loss)\nI0818 03:59:35.334839 17472 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0818 04:01:52.873427 17472 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0818 04:03:14.291141 17472 solver.cpp:404]     Test net output #0: accuracy = 0.63092\nI0818 04:03:14.291424 17472 solver.cpp:404]     Test net output #1: loss = 3.2982 (* 1 = 3.2982 loss)\nI0818 04:03:15.614449 17472 solver.cpp:228] Iteration 18800, loss = 0.0132966\nI0818 04:03:15.614495 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:03:15.614511 17472 solver.cpp:244]     Train net output #1: loss = 0.0132967 (* 1 = 0.0132967 loss)\nI0818 04:03:15.705248 17472 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0818 04:05:33.283104 17472 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0818 04:06:54.704651 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66664\nI0818 04:06:54.704934 17472 solver.cpp:404]     Test net output #1: loss = 2.87307 (* 1 = 2.87307 loss)\nI0818 04:06:56.026423 17472 solver.cpp:228] Iteration 18900, loss = 0.00319036\nI0818 04:06:56.026468 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:06:56.026484 17472 solver.cpp:244]     Train net output #1: loss = 0.00319046 (* 1 = 0.00319046 loss)\nI0818 04:06:56.118474 17472 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0818 04:09:13.638746 17472 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0818 04:10:35.414558 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69712\nI0818 04:10:35.414876 17472 solver.cpp:404]     Test net output #1: loss = 2.45136 (* 1 = 2.45136 loss)\nI0818 04:10:36.744683 17472 solver.cpp:228] Iteration 19000, loss = 0.00323193\nI0818 04:10:36.744729 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:10:36.744743 17472 solver.cpp:244]     Train net output #1: loss = 0.00323202 (* 1 = 0.00323202 loss)\nI0818 04:10:36.832994 17472 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0818 04:12:54.490298 17472 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0818 04:14:16.771600 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6616\nI0818 04:14:16.771920 17472 solver.cpp:404]     Test net output #1: loss = 2.89525 (* 1 = 2.89525 loss)\nI0818 04:14:18.102635 17472 solver.cpp:228] Iteration 19100, loss = 0.0353014\nI0818 04:14:18.102679 17472 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:14:18.102695 17472 solver.cpp:244]     Train net output #1: loss = 0.0353015 (* 1 = 0.0353015 loss)\nI0818 04:14:18.186249 17472 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0818 04:16:35.830727 17472 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0818 04:17:58.093366 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67324\nI0818 04:17:58.093684 17472 solver.cpp:404]     Test net output #1: loss = 2.89875 (* 1 = 2.89875 loss)\nI0818 04:17:59.423499 17472 solver.cpp:228] Iteration 19200, loss = 0.00590275\nI0818 04:17:59.423539 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:17:59.423555 17472 solver.cpp:244]     Train net output #1: loss = 0.00590284 (* 1 = 0.00590284 loss)\nI0818 04:17:59.513788 17472 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0818 04:20:17.153244 17472 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0818 04:21:39.420799 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66944\nI0818 04:21:39.421088 17472 solver.cpp:404]     Test net output #1: loss = 2.78318 (* 1 = 2.78318 loss)\nI0818 04:21:40.751713 17472 solver.cpp:228] Iteration 19300, loss = 0.00745812\nI0818 04:21:40.751754 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:21:40.751770 17472 solver.cpp:244]     Train net output #1: loss = 0.00745821 (* 1 = 0.00745821 loss)\nI0818 04:21:40.837159 17472 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0818 04:23:58.753813 17472 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0818 04:25:21.022900 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69232\nI0818 04:25:21.023247 17472 solver.cpp:404]     Test net output #1: loss = 2.44984 (* 1 = 2.44984 loss)\nI0818 04:25:22.352676 17472 solver.cpp:228] Iteration 19400, loss = 0.00125375\nI0818 04:25:22.352720 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:25:22.352735 17472 solver.cpp:244]     Train net output #1: loss = 0.00125383 (* 1 = 0.00125383 loss)\nI0818 04:25:22.439851 17472 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0818 04:27:40.122395 17472 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0818 04:29:02.393321 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66888\nI0818 04:29:02.393651 17472 solver.cpp:404]     Test net output #1: loss = 2.75506 (* 1 = 2.75506 loss)\nI0818 04:29:03.724076 17472 solver.cpp:228] Iteration 19500, loss = 0.00784546\nI0818 04:29:03.724118 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:29:03.724134 17472 solver.cpp:244]     Train net output #1: loss = 0.00784554 (* 1 = 0.00784554 loss)\nI0818 04:29:03.811132 17472 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0818 04:31:21.501823 17472 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0818 04:32:43.772111 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69616\nI0818 04:32:43.772404 17472 solver.cpp:404]     Test net output #1: loss = 2.47733 (* 1 = 2.47733 loss)\nI0818 04:32:45.102962 17472 solver.cpp:228] Iteration 19600, loss = 0.00252983\nI0818 04:32:45.103009 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:32:45.103025 17472 solver.cpp:244]     Train net output #1: loss = 0.00252991 (* 1 = 0.00252991 loss)\nI0818 04:32:45.192873 17472 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0818 04:35:02.753558 17472 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0818 04:36:25.017640 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70848\nI0818 04:36:25.017976 17472 solver.cpp:404]     Test net output #1: loss = 2.35195 (* 1 = 2.35195 loss)\nI0818 04:36:26.347791 17472 solver.cpp:228] Iteration 19700, loss = 0.00442041\nI0818 04:36:26.347836 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:36:26.347852 17472 solver.cpp:244]     Train net output #1: loss = 0.0044205 (* 1 = 0.0044205 loss)\nI0818 04:36:26.438264 17472 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0818 04:38:44.076272 17472 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0818 04:40:06.352146 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69728\nI0818 04:40:06.352459 17472 solver.cpp:404]     Test net output #1: loss = 2.48026 (* 1 = 2.48026 loss)\nI0818 04:40:07.683719 17472 solver.cpp:228] Iteration 19800, loss = 0.00242081\nI0818 04:40:07.683763 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:40:07.683780 17472 solver.cpp:244]     Train net output #1: loss = 0.0024209 (* 1 = 0.0024209 loss)\nI0818 04:40:07.768113 17472 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0818 04:42:25.356056 17472 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0818 04:43:47.629238 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68468\nI0818 04:43:47.629513 17472 solver.cpp:404]     Test net output #1: loss = 2.52683 (* 1 = 2.52683 loss)\nI0818 04:43:48.959981 17472 solver.cpp:228] Iteration 19900, loss = 0.0050165\nI0818 04:43:48.960024 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:43:48.960041 17472 solver.cpp:244]     Train net output #1: loss = 0.00501658 (* 1 = 0.00501658 loss)\nI0818 04:43:49.046412 17472 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0818 04:46:06.720372 17472 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0818 04:47:28.978281 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70236\nI0818 04:47:28.978602 17472 solver.cpp:404]     Test net output #1: loss = 2.4342 (* 1 = 2.4342 loss)\nI0818 04:47:30.308208 17472 solver.cpp:228] Iteration 20000, loss = 0.0442442\nI0818 04:47:30.308251 17472 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:47:30.308267 17472 solver.cpp:244]     Train net output #1: loss = 0.0442443 (* 1 = 0.0442443 loss)\nI0818 04:47:30.398661 17472 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0818 04:49:48.067634 17472 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0818 04:51:10.323901 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66136\nI0818 04:51:10.324226 17472 solver.cpp:404]     Test net output #1: loss = 3.02759 (* 1 = 3.02759 loss)\nI0818 04:51:11.654481 17472 solver.cpp:228] Iteration 20100, loss = 0.00785264\nI0818 04:51:11.654523 17472 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:51:11.654541 17472 solver.cpp:244]     Train net output #1: loss = 0.00785272 (* 1 = 0.00785272 loss)\nI0818 04:51:11.740852 17472 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0818 04:53:29.357414 17472 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0818 04:54:51.616631 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68184\nI0818 04:54:51.616961 17472 solver.cpp:404]     Test net output #1: loss = 2.69157 (* 1 = 2.69157 loss)\nI0818 04:54:52.946058 17472 solver.cpp:228] Iteration 20200, loss = 0.00299795\nI0818 04:54:52.946101 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:54:52.946116 17472 solver.cpp:244]     Train net output #1: loss = 0.00299803 (* 1 = 0.00299803 loss)\nI0818 04:54:53.036892 17472 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0818 04:57:10.719462 17472 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0818 04:58:32.967499 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6618\nI0818 04:58:32.967802 17472 solver.cpp:404]     Test net output #1: loss = 3.28375 (* 1 = 3.28375 loss)\nI0818 04:58:34.298364 17472 solver.cpp:228] Iteration 20300, loss = 0.00982298\nI0818 04:58:34.298409 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:58:34.298425 17472 solver.cpp:244]     Train net output #1: loss = 0.00982306 (* 1 = 0.00982306 loss)\nI0818 04:58:34.384213 17472 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0818 05:00:52.100570 17472 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0818 05:02:14.342818 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6974\nI0818 05:02:14.343132 17472 solver.cpp:404]     Test net output #1: loss = 2.50061 (* 1 = 2.50061 loss)\nI0818 05:02:15.673666 17472 solver.cpp:228] Iteration 20400, loss = 0.00335538\nI0818 05:02:15.673712 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:02:15.673727 17472 solver.cpp:244]     Train net output #1: loss = 0.00335546 (* 1 = 0.00335546 loss)\nI0818 05:02:15.762498 17472 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0818 05:04:33.465940 17472 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0818 05:05:55.705888 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69124\nI0818 05:05:55.706218 17472 solver.cpp:404]     Test net output #1: loss = 2.61211 (* 1 = 2.61211 loss)\nI0818 05:05:57.037004 17472 solver.cpp:228] Iteration 20500, loss = 0.000867599\nI0818 05:05:57.037050 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:05:57.037065 17472 solver.cpp:244]     Train net output #1: loss = 0.000867681 (* 1 = 0.000867681 loss)\nI0818 05:05:57.131335 17472 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0818 05:08:14.884568 17472 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0818 05:09:37.117413 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70204\nI0818 05:09:37.117723 17472 solver.cpp:404]     Test net output #1: loss = 2.34833 (* 1 = 2.34833 loss)\nI0818 05:09:38.445878 17472 solver.cpp:228] Iteration 20600, loss = 0.000137538\nI0818 05:09:38.445921 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:09:38.445936 17472 solver.cpp:244]     Train net output #1: loss = 0.000137623 (* 1 = 0.000137623 loss)\nI0818 05:09:38.529002 17472 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0818 05:11:55.654000 17472 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0818 05:13:17.894498 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71332\nI0818 05:13:17.894821 17472 solver.cpp:404]     Test net output #1: loss = 2.27893 (* 1 = 2.27893 loss)\nI0818 05:13:19.223049 17472 solver.cpp:228] Iteration 20700, loss = 0.000157308\nI0818 05:13:19.223093 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:13:19.223107 17472 solver.cpp:244]     Train net output #1: loss = 0.000157393 (* 1 = 0.000157393 loss)\nI0818 05:13:19.311763 17472 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0818 05:15:36.411237 17472 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0818 05:16:58.662084 17472 solver.cpp:404]     Test net output #0: accuracy = 0.714\nI0818 05:16:58.662406 17472 solver.cpp:404]     Test net output #1: loss = 2.25551 (* 1 = 2.25551 loss)\nI0818 05:16:59.989845 17472 solver.cpp:228] Iteration 20800, loss = 0.0001547\nI0818 05:16:59.989888 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:16:59.989903 17472 solver.cpp:244]     Train net output #1: loss = 0.000154785 (* 1 = 0.000154785 loss)\nI0818 05:17:00.077265 17472 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0818 05:19:17.119809 17472 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0818 05:20:39.360133 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71004\nI0818 05:20:39.360457 17472 solver.cpp:404]     Test net output #1: loss = 2.26222 (* 1 = 2.26222 loss)\nI0818 05:20:40.687791 17472 solver.cpp:228] Iteration 20900, loss = 7.21514e-05\nI0818 05:20:40.687836 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:20:40.687851 17472 solver.cpp:244]     Train net output #1: loss = 7.22364e-05 (* 1 = 7.22364e-05 loss)\nI0818 05:20:40.769723 17472 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0818 05:22:57.784674 17472 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0818 05:24:20.037883 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70028\nI0818 05:24:20.038224 17472 solver.cpp:404]     Test net output #1: loss = 2.30407 (* 1 = 2.30407 loss)\nI0818 05:24:21.365530 17472 solver.cpp:228] Iteration 21000, loss = 7.18285e-05\nI0818 05:24:21.365574 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:24:21.365592 17472 solver.cpp:244]     Train net output #1: loss = 7.19135e-05 (* 1 = 7.19135e-05 loss)\nI0818 05:24:21.452471 17472 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0818 05:26:38.536839 17472 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0818 05:28:00.765637 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69308\nI0818 05:28:00.765919 17472 solver.cpp:404]     Test net output #1: loss = 2.32336 (* 1 = 2.32336 loss)\nI0818 05:28:02.092617 17472 solver.cpp:228] Iteration 21100, loss = 6.52712e-05\nI0818 05:28:02.092660 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:28:02.092676 17472 solver.cpp:244]     Train net output #1: loss = 6.53562e-05 (* 1 = 6.53562e-05 loss)\nI0818 05:28:02.180112 17472 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0818 05:30:19.211608 17472 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0818 05:31:41.176316 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68368\nI0818 05:31:41.176564 17472 solver.cpp:404]     Test net output #1: loss = 2.37287 (* 1 = 2.37287 loss)\nI0818 05:31:42.503993 17472 solver.cpp:228] Iteration 21200, loss = 4.96815e-05\nI0818 05:31:42.504039 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:31:42.504055 17472 solver.cpp:244]     Train net output #1: loss = 4.97665e-05 (* 1 = 4.97665e-05 loss)\nI0818 05:31:42.591532 17472 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0818 05:33:59.571286 17472 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0818 05:35:21.517294 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66988\nI0818 05:35:21.517498 17472 solver.cpp:404]     Test net output #1: loss = 2.47174 (* 1 = 2.47174 loss)\nI0818 05:35:22.844032 17472 solver.cpp:228] Iteration 21300, loss = 5.49318e-05\nI0818 05:35:22.844075 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:35:22.844089 17472 solver.cpp:244]     Train net output #1: loss = 5.50168e-05 (* 1 = 5.50168e-05 loss)\nI0818 05:35:22.927438 17472 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0818 05:37:40.028082 17472 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0818 05:39:01.982733 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66168\nI0818 05:39:01.982993 17472 solver.cpp:404]     Test net output #1: loss = 2.52848 (* 1 = 2.52848 loss)\nI0818 05:39:03.309871 17472 solver.cpp:228] Iteration 21400, loss = 7.17606e-05\nI0818 05:39:03.309914 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:39:03.309931 17472 solver.cpp:244]     Train net output #1: loss = 7.18456e-05 (* 1 = 7.18456e-05 loss)\nI0818 05:39:03.399905 17472 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0818 05:41:20.391927 17472 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0818 05:42:42.586236 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64952\nI0818 05:42:42.586498 17472 solver.cpp:404]     Test net output #1: loss = 2.61779 (* 1 = 2.61779 loss)\nI0818 05:42:43.913852 17472 solver.cpp:228] Iteration 21500, loss = 6.62086e-05\nI0818 05:42:43.913897 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:42:43.913913 17472 solver.cpp:244]     Train net output #1: loss = 6.62935e-05 (* 1 = 6.62935e-05 loss)\nI0818 05:42:44.002406 17472 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0818 05:45:00.966807 17472 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0818 05:46:23.173079 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6364\nI0818 05:46:23.173334 17472 solver.cpp:404]     Test net output #1: loss = 2.67968 (* 1 = 2.67968 loss)\nI0818 05:46:24.503247 17472 solver.cpp:228] Iteration 21600, loss = 5.15929e-05\nI0818 05:46:24.503291 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:46:24.503306 17472 solver.cpp:244]     Train net output #1: loss = 5.16779e-05 (* 1 = 5.16779e-05 loss)\nI0818 05:46:24.587695 17472 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0818 05:48:41.730420 17472 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0818 05:50:03.726651 17472 solver.cpp:404]     Test net output #0: accuracy = 0.62184\nI0818 05:50:03.726872 17472 solver.cpp:404]     Test net output #1: loss = 2.8065 (* 1 = 2.8065 loss)\nI0818 05:50:05.053892 17472 solver.cpp:228] Iteration 21700, loss = 7.17012e-05\nI0818 05:50:05.053937 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:50:05.053953 17472 solver.cpp:244]     Train net output #1: loss = 7.17862e-05 (* 1 = 7.17862e-05 loss)\nI0818 05:50:05.139562 17472 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0818 05:52:22.220175 17472 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0818 05:53:44.159768 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61416\nI0818 05:53:44.159986 17472 solver.cpp:404]     Test net output #1: loss = 2.86289 (* 1 = 2.86289 loss)\nI0818 05:53:45.487193 17472 solver.cpp:228] Iteration 21800, loss = 7.43985e-05\nI0818 05:53:45.487242 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:53:45.487257 17472 solver.cpp:244]     Train net output #1: loss = 7.44835e-05 (* 1 = 7.44835e-05 loss)\nI0818 05:53:45.574877 17472 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0818 05:56:02.667920 17472 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0818 05:57:24.602577 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5994\nI0818 05:57:24.602785 17472 solver.cpp:404]     Test net output #1: loss = 2.96071 (* 1 = 2.96071 loss)\nI0818 05:57:25.931064 17472 solver.cpp:228] Iteration 21900, loss = 5.11943e-05\nI0818 05:57:25.931107 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:57:25.931123 17472 solver.cpp:244]     Train net output #1: loss = 5.12793e-05 (* 1 = 5.12793e-05 loss)\nI0818 05:57:26.018546 17472 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0818 05:59:43.103888 17472 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0818 06:01:05.107460 17472 solver.cpp:404]     Test net output #0: accuracy = 0.59056\nI0818 06:01:05.107722 17472 solver.cpp:404]     Test net output #1: loss = 3.03462 (* 1 = 3.03462 loss)\nI0818 06:01:06.435454 17472 solver.cpp:228] Iteration 22000, loss = 5.7031e-05\nI0818 06:01:06.435499 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:01:06.435515 17472 solver.cpp:244]     Train net output #1: loss = 5.7116e-05 (* 1 = 5.7116e-05 loss)\nI0818 06:01:06.517599 17472 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0818 06:03:23.719785 17472 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0818 06:04:45.668041 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5802\nI0818 06:04:45.668253 17472 solver.cpp:404]     Test net output #1: loss = 3.11538 (* 1 = 3.11538 loss)\nI0818 06:04:46.995841 17472 solver.cpp:228] Iteration 22100, loss = 4.49251e-05\nI0818 06:04:46.995887 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:04:46.995903 17472 solver.cpp:244]     Train net output #1: loss = 4.50101e-05 (* 1 = 4.50101e-05 loss)\nI0818 06:04:47.078989 17472 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0818 06:07:04.141247 17472 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0818 06:08:26.117216 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5746\nI0818 06:08:26.117449 17472 solver.cpp:404]     Test net output #1: loss = 3.10725 (* 1 = 3.10725 loss)\nI0818 06:08:27.444672 17472 solver.cpp:228] Iteration 22200, loss = 7.27526e-05\nI0818 06:08:27.444716 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:08:27.444732 17472 solver.cpp:244]     Train net output #1: loss = 7.28376e-05 (* 1 = 7.28376e-05 loss)\nI0818 06:08:27.524821 17472 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0818 06:10:44.516038 17472 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0818 06:12:06.620267 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5646\nI0818 06:12:06.620510 17472 solver.cpp:404]     Test net output #1: loss = 3.22827 (* 1 = 3.22827 loss)\nI0818 06:12:07.948074 17472 solver.cpp:228] Iteration 22300, loss = 7.9213e-05\nI0818 06:12:07.948119 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:12:07.948134 17472 solver.cpp:244]     Train net output #1: loss = 7.9298e-05 (* 1 = 7.9298e-05 loss)\nI0818 06:12:08.034126 17472 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0818 06:14:25.081413 17472 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0818 06:15:47.170040 17472 solver.cpp:404]     Test net output #0: accuracy = 0.55608\nI0818 06:15:47.170262 17472 solver.cpp:404]     Test net output #1: loss = 3.27138 (* 1 = 3.27138 loss)\nI0818 06:15:48.498283 17472 solver.cpp:228] Iteration 22400, loss = 8.52804e-05\nI0818 06:15:48.498327 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:15:48.498344 17472 solver.cpp:244]     Train net output #1: loss = 8.53655e-05 (* 1 = 8.53655e-05 loss)\nI0818 06:15:48.584774 17472 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0818 06:18:05.627336 17472 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0818 06:19:27.800276 17472 solver.cpp:404]     Test net output #0: accuracy = 0.54764\nI0818 06:19:27.800521 17472 solver.cpp:404]     Test net output #1: loss = 3.33609 (* 1 = 3.33609 loss)\nI0818 06:19:29.128655 17472 solver.cpp:228] Iteration 22500, loss = 6.24146e-05\nI0818 06:19:29.128698 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:19:29.128715 17472 solver.cpp:244]     Train net output #1: loss = 6.24996e-05 (* 1 = 6.24996e-05 loss)\nI0818 06:19:29.212574 17472 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0818 06:21:46.411193 17472 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0818 06:23:08.398733 17472 solver.cpp:404]     Test net output #0: accuracy = 0.54096\nI0818 06:23:08.398993 17472 solver.cpp:404]     Test net output #1: loss = 3.35947 (* 1 = 3.35947 loss)\nI0818 06:23:09.719617 17472 solver.cpp:228] Iteration 22600, loss = 6.47412e-05\nI0818 06:23:09.719666 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:23:09.719681 17472 solver.cpp:244]     Train net output #1: loss = 6.48262e-05 (* 1 = 6.48262e-05 loss)\nI0818 06:23:09.804924 17472 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0818 06:25:26.933089 17472 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0818 06:26:48.343538 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53448\nI0818 06:26:48.343827 17472 solver.cpp:404]     Test net output #1: loss = 3.4131 (* 1 = 3.4131 loss)\nI0818 06:26:49.662248 17472 solver.cpp:228] Iteration 22700, loss = 6.03755e-05\nI0818 06:26:49.662293 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:26:49.662309 17472 solver.cpp:244]     Train net output #1: loss = 6.04606e-05 (* 1 = 6.04606e-05 loss)\nI0818 06:26:49.756665 17472 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0818 06:29:06.891872 17472 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0818 06:30:28.295691 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52788\nI0818 06:30:28.295979 17472 solver.cpp:404]     Test net output #1: loss = 3.44362 (* 1 = 3.44362 loss)\nI0818 06:30:29.614941 17472 solver.cpp:228] Iteration 22800, loss = 7.11216e-05\nI0818 06:30:29.614982 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:30:29.614998 17472 solver.cpp:244]     Train net output #1: loss = 7.12066e-05 (* 1 = 7.12066e-05 loss)\nI0818 06:30:29.700780 17472 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0818 06:32:46.857875 17472 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0818 06:34:08.254508 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52312\nI0818 06:34:08.254766 17472 solver.cpp:404]     Test net output #1: loss = 3.48081 (* 1 = 3.48081 loss)\nI0818 06:34:09.573081 17472 solver.cpp:228] Iteration 22900, loss = 6.22015e-05\nI0818 06:34:09.573125 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:34:09.573141 17472 solver.cpp:244]     Train net output #1: loss = 6.22865e-05 (* 1 = 6.22865e-05 loss)\nI0818 06:34:09.658849 17472 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0818 06:36:26.761132 17472 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0818 06:37:48.161504 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51916\nI0818 06:37:48.161790 17472 solver.cpp:404]     Test net output #1: loss = 3.50638 (* 1 = 3.50638 loss)\nI0818 06:37:49.480128 17472 solver.cpp:228] Iteration 23000, loss = 7.69817e-05\nI0818 06:37:49.480170 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:37:49.480185 17472 solver.cpp:244]     Train net output #1: loss = 7.70667e-05 (* 1 = 7.70667e-05 loss)\nI0818 06:37:49.567121 17472 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0818 06:40:06.719980 17472 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0818 06:41:28.127486 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51556\nI0818 06:41:28.127756 17472 solver.cpp:404]     Test net output #1: loss = 3.55081 (* 1 = 3.55081 loss)\nI0818 06:41:29.446296 17472 solver.cpp:228] Iteration 23100, loss = 6.7031e-05\nI0818 06:41:29.446338 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:41:29.446354 17472 solver.cpp:244]     Train net output #1: loss = 6.7116e-05 (* 1 = 6.7116e-05 loss)\nI0818 06:41:29.537899 17472 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0818 06:43:46.671656 17472 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0818 06:45:08.079497 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51188\nI0818 06:45:08.079783 17472 solver.cpp:404]     Test net output #1: loss = 3.51793 (* 1 = 3.51793 loss)\nI0818 06:45:09.398275 17472 solver.cpp:228] Iteration 23200, loss = 8.18426e-05\nI0818 06:45:09.398317 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:45:09.398334 17472 solver.cpp:244]     Train net output #1: loss = 8.19276e-05 (* 1 = 8.19276e-05 loss)\nI0818 06:45:09.490591 17472 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0818 06:47:26.566597 17472 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0818 06:48:47.963722 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51012\nI0818 06:48:47.963999 17472 solver.cpp:404]     Test net output #1: loss = 3.56448 (* 1 = 3.56448 loss)\nI0818 06:48:49.282464 17472 solver.cpp:228] Iteration 23300, loss = 6.22333e-05\nI0818 06:48:49.282506 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:48:49.282521 17472 solver.cpp:244]     Train net output #1: loss = 6.23183e-05 (* 1 = 6.23183e-05 loss)\nI0818 06:48:49.375510 17472 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0818 06:51:06.550005 17472 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0818 06:52:27.940054 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5066\nI0818 06:52:27.940338 17472 solver.cpp:404]     Test net output #1: loss = 3.55608 (* 1 = 3.55608 loss)\nI0818 06:52:29.258298 17472 solver.cpp:228] Iteration 23400, loss = 8.1172e-05\nI0818 06:52:29.258340 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:52:29.258357 17472 solver.cpp:244]     Train net output #1: loss = 8.1257e-05 (* 1 = 8.1257e-05 loss)\nI0818 06:52:29.347004 17472 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0818 06:54:46.604895 17472 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0818 06:56:08.006376 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5042\nI0818 06:56:08.006665 17472 solver.cpp:404]     Test net output #1: loss = 3.59419 (* 1 = 3.59419 loss)\nI0818 06:56:09.324736 17472 solver.cpp:228] Iteration 23500, loss = 7.34375e-05\nI0818 06:56:09.324779 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:56:09.324793 17472 solver.cpp:244]     Train net output #1: loss = 7.35225e-05 (* 1 = 7.35225e-05 loss)\nI0818 06:56:09.413893 17472 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0818 06:58:26.615241 17472 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0818 06:59:48.013803 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50288\nI0818 06:59:48.014118 17472 solver.cpp:404]     Test net output #1: loss = 3.577 (* 1 = 3.577 loss)\nI0818 06:59:49.332484 17472 solver.cpp:228] Iteration 23600, loss = 8.09473e-05\nI0818 06:59:49.332525 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:59:49.332541 17472 solver.cpp:244]     Train net output #1: loss = 8.10323e-05 (* 1 = 8.10323e-05 loss)\nI0818 06:59:49.424656 17472 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0818 07:02:06.465376 17472 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0818 07:03:27.856796 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49604\nI0818 07:03:27.857072 17472 solver.cpp:404]     Test net output #1: loss = 3.64835 (* 1 = 3.64835 loss)\nI0818 07:03:29.175359 17472 solver.cpp:228] Iteration 23700, loss = 8.92926e-05\nI0818 07:03:29.175403 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:03:29.175420 17472 solver.cpp:244]     Train net output #1: loss = 8.93776e-05 (* 1 = 8.93776e-05 loss)\nI0818 07:03:29.264066 17472 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0818 07:05:46.390220 17472 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0818 07:07:07.788689 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49656\nI0818 07:07:07.788961 17472 solver.cpp:404]     Test net output #1: loss = 3.60563 (* 1 = 3.60563 loss)\nI0818 07:07:09.107470 17472 solver.cpp:228] Iteration 23800, loss = 9.12544e-05\nI0818 07:07:09.107512 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:07:09.107528 17472 solver.cpp:244]     Train net output #1: loss = 9.13394e-05 (* 1 = 9.13394e-05 loss)\nI0818 07:07:09.200031 17472 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0818 07:09:26.283879 17472 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0818 07:10:47.675721 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49544\nI0818 07:10:47.676008 17472 solver.cpp:404]     Test net output #1: loss = 3.61672 (* 1 = 3.61672 loss)\nI0818 07:10:48.993894 17472 solver.cpp:228] Iteration 23900, loss = 8.37221e-05\nI0818 07:10:48.993937 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:10:48.993952 17472 solver.cpp:244]     Train net output #1: loss = 8.38071e-05 (* 1 = 8.38071e-05 loss)\nI0818 07:10:49.084240 17472 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0818 07:13:06.179723 17472 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0818 07:14:27.576731 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4886\nI0818 07:14:27.577023 17472 solver.cpp:404]     Test net output #1: loss = 3.63105 (* 1 = 3.63105 loss)\nI0818 07:14:28.895027 17472 solver.cpp:228] Iteration 24000, loss = 8.36411e-05\nI0818 07:14:28.895066 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:14:28.895083 17472 solver.cpp:244]     Train net output #1: loss = 8.37261e-05 (* 1 = 8.37261e-05 loss)\nI0818 07:14:28.986052 17472 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0818 07:16:46.079965 17472 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0818 07:18:07.469974 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48944\nI0818 07:18:07.470319 17472 solver.cpp:404]     Test net output #1: loss = 3.64371 (* 1 = 3.64371 loss)\nI0818 07:18:08.788079 17472 solver.cpp:228] Iteration 24100, loss = 7.41193e-05\nI0818 07:18:08.788120 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:18:08.788136 17472 solver.cpp:244]     Train net output #1: loss = 7.42043e-05 (* 1 = 7.42043e-05 loss)\nI0818 07:18:08.879355 17472 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0818 07:20:26.152251 17472 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0818 07:21:47.593546 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4886\nI0818 07:21:47.593823 17472 solver.cpp:404]     Test net output #1: loss = 3.59659 (* 1 = 3.59659 loss)\nI0818 07:21:48.913193 17472 solver.cpp:228] Iteration 24200, loss = 8.38218e-05\nI0818 07:21:48.913239 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:21:48.913262 17472 solver.cpp:244]     Train net output #1: loss = 8.39068e-05 (* 1 = 8.39068e-05 loss)\nI0818 07:21:49.001519 17472 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0818 07:24:06.122336 17472 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0818 07:25:27.535150 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4894\nI0818 07:25:27.535441 17472 solver.cpp:404]     Test net output #1: loss = 3.59472 (* 1 = 3.59472 loss)\nI0818 07:25:28.854689 17472 solver.cpp:228] Iteration 24300, loss = 9.92796e-05\nI0818 07:25:28.854732 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:25:28.854755 17472 solver.cpp:244]     Train net output #1: loss = 9.93646e-05 (* 1 = 9.93646e-05 loss)\nI0818 07:25:28.942536 17472 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0818 07:27:46.166296 17472 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0818 07:29:07.580278 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48696\nI0818 07:29:07.580567 17472 solver.cpp:404]     Test net output #1: loss = 3.56995 (* 1 = 3.56995 loss)\nI0818 07:29:08.898850 17472 solver.cpp:228] Iteration 24400, loss = 0.000121676\nI0818 07:29:08.898893 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:29:08.898916 17472 solver.cpp:244]     Train net output #1: loss = 0.000121761 (* 1 = 0.000121761 loss)\nI0818 07:29:08.989470 17472 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0818 07:31:26.048758 17472 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0818 07:32:47.482496 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48432\nI0818 07:32:47.482764 17472 solver.cpp:404]     Test net output #1: loss = 3.61073 (* 1 = 3.61073 loss)\nI0818 07:32:48.800926 17472 solver.cpp:228] Iteration 24500, loss = 9.68419e-05\nI0818 07:32:48.800969 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:32:48.800992 17472 solver.cpp:244]     Train net output #1: loss = 9.6927e-05 (* 1 = 9.6927e-05 loss)\nI0818 07:32:48.894228 17472 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0818 07:35:05.981137 17472 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0818 07:36:27.395215 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48492\nI0818 07:36:27.395481 17472 solver.cpp:404]     Test net output #1: loss = 3.56436 (* 1 = 3.56436 loss)\nI0818 07:36:28.714520 17472 solver.cpp:228] Iteration 24600, loss = 8.59475e-05\nI0818 07:36:28.714562 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:36:28.714586 17472 solver.cpp:244]     Train net output #1: loss = 8.60326e-05 (* 1 = 8.60326e-05 loss)\nI0818 07:36:28.806195 17472 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0818 07:38:46.099858 17472 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0818 07:40:07.518596 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48596\nI0818 07:40:07.518887 17472 solver.cpp:404]     Test net output #1: loss = 3.57129 (* 1 = 3.57129 loss)\nI0818 07:40:08.838160 17472 solver.cpp:228] Iteration 24700, loss = 9.82197e-05\nI0818 07:40:08.838204 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:40:08.838228 17472 solver.cpp:244]     Train net output #1: loss = 9.83048e-05 (* 1 = 9.83048e-05 loss)\nI0818 07:40:08.926923 17472 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0818 07:42:26.010759 17472 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0818 07:43:47.422590 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48768\nI0818 07:43:47.422881 17472 solver.cpp:404]     Test net output #1: loss = 3.50593 (* 1 = 3.50593 loss)\nI0818 07:43:48.741044 17472 solver.cpp:228] Iteration 24800, loss = 0.000113512\nI0818 07:43:48.741089 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:43:48.741112 17472 solver.cpp:244]     Train net output #1: loss = 0.000113597 (* 1 = 0.000113597 loss)\nI0818 07:43:48.833375 17472 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0818 07:46:06.067308 17472 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0818 07:47:27.491472 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4856\nI0818 07:47:27.491761 17472 solver.cpp:404]     Test net output #1: loss = 3.54194 (* 1 = 3.54194 loss)\nI0818 07:47:28.811532 17472 solver.cpp:228] Iteration 24900, loss = 0.00011206\nI0818 07:47:28.811578 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:47:28.811602 17472 solver.cpp:244]     Train net output #1: loss = 0.000112145 (* 1 = 0.000112145 loss)\nI0818 07:47:28.895630 17472 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0818 07:49:45.970805 17472 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0818 07:51:07.391301 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48272\nI0818 07:51:07.391592 17472 solver.cpp:404]     Test net output #1: loss = 3.52994 (* 1 = 3.52994 loss)\nI0818 07:51:08.710887 17472 solver.cpp:228] Iteration 25000, loss = 0.000105387\nI0818 07:51:08.710933 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:51:08.710955 17472 solver.cpp:244]     Train net output #1: loss = 0.000105472 (* 1 = 0.000105472 loss)\nI0818 07:51:08.794788 17472 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0818 07:53:25.873123 17472 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0818 07:54:47.288816 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4836\nI0818 07:54:47.289120 17472 solver.cpp:404]     Test net output #1: loss = 3.5206 (* 1 = 3.5206 loss)\nI0818 07:54:48.607331 17472 solver.cpp:228] Iteration 25100, loss = 9.97986e-05\nI0818 07:54:48.607372 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:54:48.607388 17472 solver.cpp:244]     Train net output #1: loss = 9.98836e-05 (* 1 = 9.98836e-05 loss)\nI0818 07:54:48.696992 17472 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0818 07:57:05.702986 17472 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0818 07:58:27.110138 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4808\nI0818 07:58:27.110414 17472 solver.cpp:404]     Test net output #1: loss = 3.53133 (* 1 = 3.53133 loss)\nI0818 07:58:28.428457 17472 solver.cpp:228] Iteration 25200, loss = 9.60191e-05\nI0818 07:58:28.428500 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:58:28.428516 17472 solver.cpp:244]     Train net output #1: loss = 9.61041e-05 (* 1 = 9.61041e-05 loss)\nI0818 07:58:28.515085 17472 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0818 08:00:45.504592 17472 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0818 08:02:06.916132 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48176\nI0818 08:02:06.916409 17472 solver.cpp:404]     Test net output #1: loss = 3.53761 (* 1 = 3.53761 loss)\nI0818 08:02:08.234506 17472 solver.cpp:228] Iteration 25300, loss = 0.000100028\nI0818 08:02:08.234549 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:02:08.234565 17472 solver.cpp:244]     Train net output #1: loss = 0.000100113 (* 1 = 0.000100113 loss)\nI0818 08:02:08.324872 17472 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0818 08:04:25.488219 17472 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0818 08:05:46.888883 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48132\nI0818 08:05:46.889171 17472 solver.cpp:404]     Test net output #1: loss = 3.51218 (* 1 = 3.51218 loss)\nI0818 08:05:48.207716 17472 solver.cpp:228] Iteration 25400, loss = 9.87277e-05\nI0818 08:05:48.207756 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:05:48.207773 17472 solver.cpp:244]     Train net output #1: loss = 9.88127e-05 (* 1 = 9.88127e-05 loss)\nI0818 08:05:48.295055 17472 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0818 08:08:05.378042 17472 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0818 08:09:26.788746 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4808\nI0818 08:09:26.789036 17472 solver.cpp:404]     Test net output #1: loss = 3.54528 (* 1 = 3.54528 loss)\nI0818 08:09:28.107774 17472 solver.cpp:228] Iteration 25500, loss = 9.7636e-05\nI0818 08:09:28.107818 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:09:28.107834 17472 solver.cpp:244]     Train net output #1: loss = 9.7721e-05 (* 1 = 9.7721e-05 loss)\nI0818 08:09:28.196112 17472 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0818 08:11:45.396572 17472 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0818 08:13:06.791982 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48296\nI0818 08:13:06.792274 17472 solver.cpp:404]     Test net output #1: loss = 3.468 (* 1 = 3.468 loss)\nI0818 08:13:08.110581 17472 solver.cpp:228] Iteration 25600, loss = 9.25744e-05\nI0818 08:13:08.110622 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:13:08.110637 17472 solver.cpp:244]     Train net output #1: loss = 9.26594e-05 (* 1 = 9.26594e-05 loss)\nI0818 08:13:08.202656 17472 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0818 08:15:25.255002 17472 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0818 08:16:46.646282 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4826\nI0818 08:16:46.646555 17472 solver.cpp:404]     Test net output #1: loss = 3.4891 (* 1 = 3.4891 loss)\nI0818 08:16:47.964862 17472 solver.cpp:228] Iteration 25700, loss = 0.000134419\nI0818 08:16:47.964902 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:16:47.964923 17472 solver.cpp:244]     Train net output #1: loss = 0.000134504 (* 1 = 0.000134504 loss)\nI0818 08:16:48.051204 17472 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0818 08:19:05.198313 17472 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0818 08:20:26.603896 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4846\nI0818 08:20:26.604188 17472 solver.cpp:404]     Test net output #1: loss = 3.4009 (* 1 = 3.4009 loss)\nI0818 08:20:27.922741 17472 solver.cpp:228] Iteration 25800, loss = 0.000137429\nI0818 08:20:27.922782 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:20:27.922797 17472 solver.cpp:244]     Train net output #1: loss = 0.000137514 (* 1 = 0.000137514 loss)\nI0818 08:20:28.015807 17472 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0818 08:22:45.030720 17472 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0818 08:24:06.432080 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48244\nI0818 08:24:06.432370 17472 solver.cpp:404]     Test net output #1: loss = 3.45069 (* 1 = 3.45069 loss)\nI0818 08:24:07.750520 17472 solver.cpp:228] Iteration 25900, loss = 0.000103901\nI0818 08:24:07.750563 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:24:07.750579 17472 solver.cpp:244]     Train net output #1: loss = 0.000103986 (* 1 = 0.000103986 loss)\nI0818 08:24:07.844264 17472 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0818 08:26:24.936367 17472 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0818 08:27:46.343757 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4852\nI0818 08:27:46.344054 17472 solver.cpp:404]     Test net output #1: loss = 3.37312 (* 1 = 3.37312 loss)\nI0818 08:27:47.662266 17472 solver.cpp:228] Iteration 26000, loss = 0.000117083\nI0818 08:27:47.662307 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:27:47.662323 17472 solver.cpp:244]     Train net output #1: loss = 0.000117168 (* 1 = 0.000117168 loss)\nI0818 08:27:47.753525 17472 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0818 08:30:05.012377 17472 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0818 08:31:26.406703 17472 solver.cpp:404]     Test net output #0: accuracy = 0.483\nI0818 08:31:26.406971 17472 solver.cpp:404]     Test net output #1: loss = 3.4368 (* 1 = 3.4368 loss)\nI0818 08:31:27.725572 17472 solver.cpp:228] Iteration 26100, loss = 0.000134564\nI0818 08:31:27.725615 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:31:27.725631 17472 solver.cpp:244]     Train net output #1: loss = 0.000134649 (* 1 = 0.000134649 loss)\nI0818 08:31:27.817028 17472 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0818 08:33:45.048840 17472 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0818 08:35:06.451042 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4818\nI0818 08:35:06.451306 17472 solver.cpp:404]     Test net output #1: loss = 3.38247 (* 1 = 3.38247 loss)\nI0818 08:35:07.769728 17472 solver.cpp:228] Iteration 26200, loss = 0.000124009\nI0818 08:35:07.769770 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:35:07.769786 17472 solver.cpp:244]     Train net output #1: loss = 0.000124094 (* 1 = 0.000124094 loss)\nI0818 08:35:07.856016 17472 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0818 08:37:25.001189 17472 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0818 08:38:46.415304 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4836\nI0818 08:38:46.415593 17472 solver.cpp:404]     Test net output #1: loss = 3.38671 (* 1 = 3.38671 loss)\nI0818 08:38:47.734349 17472 solver.cpp:228] Iteration 26300, loss = 0.000102227\nI0818 08:38:47.734391 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:38:47.734407 17472 solver.cpp:244]     Train net output #1: loss = 0.000102312 (* 1 = 0.000102312 loss)\nI0818 08:38:47.824393 17472 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0818 08:41:05.001955 17472 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0818 08:42:26.418781 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48328\nI0818 08:42:26.419064 17472 solver.cpp:404]     Test net output #1: loss = 3.35725 (* 1 = 3.35725 loss)\nI0818 08:42:27.737519 17472 solver.cpp:228] Iteration 26400, loss = 0.000129979\nI0818 08:42:27.737560 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:42:27.737576 17472 solver.cpp:244]     Train net output #1: loss = 0.000130064 (* 1 = 0.000130064 loss)\nI0818 08:42:27.824219 17472 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0818 08:44:44.980849 17472 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0818 08:46:06.395444 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48672\nI0818 08:46:06.395732 17472 solver.cpp:404]     Test net output #1: loss = 3.33675 (* 1 = 3.33675 loss)\nI0818 08:46:07.714344 17472 solver.cpp:228] Iteration 26500, loss = 0.000122775\nI0818 08:46:07.714386 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:46:07.714402 17472 solver.cpp:244]     Train net output #1: loss = 0.00012286 (* 1 = 0.00012286 loss)\nI0818 08:46:07.805995 17472 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0818 08:48:24.824028 17472 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0818 08:49:46.223793 17472 solver.cpp:404]     Test net output #0: accuracy = 0.484\nI0818 08:49:46.224064 17472 solver.cpp:404]     Test net output #1: loss = 3.33156 (* 1 = 3.33156 loss)\nI0818 08:49:47.541990 17472 solver.cpp:228] Iteration 26600, loss = 0.000119909\nI0818 08:49:47.542031 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:49:47.542047 17472 solver.cpp:244]     Train net output #1: loss = 0.000119994 (* 1 = 0.000119994 loss)\nI0818 08:49:47.630949 17472 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0818 08:52:04.743252 17472 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0818 08:53:26.283396 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48588\nI0818 08:53:26.283686 17472 solver.cpp:404]     Test net output #1: loss = 3.31094 (* 1 = 3.31094 loss)\nI0818 08:53:27.602381 17472 solver.cpp:228] Iteration 26700, loss = 0.00012044\nI0818 08:53:27.602422 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:53:27.602438 17472 solver.cpp:244]     Train net output #1: loss = 0.000120525 (* 1 = 0.000120525 loss)\nI0818 08:53:27.690129 17472 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0818 08:55:44.694694 17472 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0818 08:57:06.110491 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48416\nI0818 08:57:06.110788 17472 solver.cpp:404]     Test net output #1: loss = 3.31321 (* 1 = 3.31321 loss)\nI0818 08:57:07.428575 17472 solver.cpp:228] Iteration 26800, loss = 0.000108817\nI0818 08:57:07.428616 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:57:07.428632 17472 solver.cpp:244]     Train net output #1: loss = 0.000108902 (* 1 = 0.000108902 loss)\nI0818 08:57:07.517148 17472 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0818 08:59:24.754674 17472 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0818 09:00:46.163069 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48272\nI0818 09:00:46.163360 17472 solver.cpp:404]     Test net output #1: loss = 3.36258 (* 1 = 3.36258 loss)\nI0818 09:00:47.481659 17472 solver.cpp:228] Iteration 26900, loss = 0.000136906\nI0818 09:00:47.481701 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:00:47.481717 17472 solver.cpp:244]     Train net output #1: loss = 0.000136991 (* 1 = 0.000136991 loss)\nI0818 09:00:47.571028 17472 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0818 09:03:04.644723 17472 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0818 09:04:26.048890 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48296\nI0818 09:04:26.049180 17472 solver.cpp:404]     Test net output #1: loss = 3.30444 (* 1 = 3.30444 loss)\nI0818 09:04:27.367113 17472 solver.cpp:228] Iteration 27000, loss = 0.000123503\nI0818 09:04:27.367156 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:04:27.367172 17472 solver.cpp:244]     Train net output #1: loss = 0.000123588 (* 1 = 0.000123588 loss)\nI0818 09:04:27.458159 17472 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0818 09:06:44.541321 17472 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0818 09:08:05.945637 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48464\nI0818 09:08:05.945925 17472 solver.cpp:404]     Test net output #1: loss = 3.31745 (* 1 = 3.31745 loss)\nI0818 09:08:07.263645 17472 solver.cpp:228] Iteration 27100, loss = 0.000145979\nI0818 09:08:07.263686 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:08:07.263702 17472 solver.cpp:244]     Train net output #1: loss = 0.000146064 (* 1 = 0.000146064 loss)\nI0818 09:08:07.354859 17472 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0818 09:10:24.488983 17472 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0818 09:11:45.905521 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48492\nI0818 09:11:45.905805 17472 solver.cpp:404]     Test net output #1: loss = 3.28071 (* 1 = 3.28071 loss)\nI0818 09:11:47.224704 17472 solver.cpp:228] Iteration 27200, loss = 0.000141042\nI0818 09:11:47.224745 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:11:47.224761 17472 solver.cpp:244]     Train net output #1: loss = 0.000141127 (* 1 = 0.000141127 loss)\nI0818 09:11:47.313680 17472 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0818 09:14:04.467628 17472 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0818 09:15:25.864679 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48476\nI0818 09:15:25.864970 17472 solver.cpp:404]     Test net output #1: loss = 3.28885 (* 1 = 3.28885 loss)\nI0818 09:15:27.183329 17472 solver.cpp:228] Iteration 27300, loss = 0.000174227\nI0818 09:15:27.183369 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:15:27.183385 17472 solver.cpp:244]     Train net output #1: loss = 0.000174312 (* 1 = 0.000174312 loss)\nI0818 09:15:27.270957 17472 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0818 09:17:44.323930 17472 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0818 09:19:05.736239 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48748\nI0818 09:19:05.736526 17472 solver.cpp:404]     Test net output #1: loss = 3.22043 (* 1 = 3.22043 loss)\nI0818 09:19:07.054772 17472 solver.cpp:228] Iteration 27400, loss = 0.000149141\nI0818 09:19:07.054811 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:19:07.054826 17472 solver.cpp:244]     Train net output #1: loss = 0.000149226 (* 1 = 0.000149226 loss)\nI0818 09:19:07.142590 17472 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0818 09:21:24.210512 17472 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0818 09:22:45.625855 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48508\nI0818 09:22:45.626148 17472 solver.cpp:404]     Test net output #1: loss = 3.27484 (* 1 = 3.27484 loss)\nI0818 09:22:46.944252 17472 solver.cpp:228] Iteration 27500, loss = 0.000148512\nI0818 09:22:46.944289 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:22:46.944304 17472 solver.cpp:244]     Train net output #1: loss = 0.000148598 (* 1 = 0.000148598 loss)\nI0818 09:22:47.033476 17472 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0818 09:25:04.101704 17472 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0818 09:26:25.498903 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4852\nI0818 09:26:25.499176 17472 solver.cpp:404]     Test net output #1: loss = 3.23556 (* 1 = 3.23556 loss)\nI0818 09:26:26.817845 17472 solver.cpp:228] Iteration 27600, loss = 0.000181828\nI0818 09:26:26.817888 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:26:26.817903 17472 solver.cpp:244]     Train net output #1: loss = 0.000181913 (* 1 = 0.000181913 loss)\nI0818 09:26:26.909461 17472 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0818 09:28:44.025416 17472 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0818 09:30:05.442517 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48668\nI0818 09:30:05.442803 17472 solver.cpp:404]     Test net output #1: loss = 3.24245 (* 1 = 3.24245 loss)\nI0818 09:30:06.761437 17472 solver.cpp:228] Iteration 27700, loss = 0.00013426\nI0818 09:30:06.761478 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:30:06.761494 17472 solver.cpp:244]     Train net output #1: loss = 0.000134345 (* 1 = 0.000134345 loss)\nI0818 09:30:06.851737 17472 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0818 09:32:23.933997 17472 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0818 09:33:45.356865 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48984\nI0818 09:33:45.357158 17472 solver.cpp:404]     Test net output #1: loss = 3.17447 (* 1 = 3.17447 loss)\nI0818 09:33:46.675298 17472 solver.cpp:228] Iteration 27800, loss = 0.000108588\nI0818 09:33:46.675341 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:33:46.675356 17472 solver.cpp:244]     Train net output #1: loss = 0.000108673 (* 1 = 0.000108673 loss)\nI0818 09:33:46.766155 17472 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0818 09:36:03.930793 17472 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0818 09:37:25.351208 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48844\nI0818 09:37:25.351595 17472 solver.cpp:404]     Test net output #1: loss = 3.20694 (* 1 = 3.20694 loss)\nI0818 09:37:26.670063 17472 solver.cpp:228] Iteration 27900, loss = 0.000157483\nI0818 09:37:26.670104 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:37:26.670119 17472 solver.cpp:244]     Train net output #1: loss = 0.000157568 (* 1 = 0.000157568 loss)\nI0818 09:37:26.757621 17472 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0818 09:39:43.924975 17472 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0818 09:41:05.340801 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48944\nI0818 09:41:05.341068 17472 solver.cpp:404]     Test net output #1: loss = 3.16112 (* 1 = 3.16112 loss)\nI0818 09:41:06.659617 17472 solver.cpp:228] Iteration 28000, loss = 0.000156488\nI0818 09:41:06.659659 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:41:06.659675 17472 solver.cpp:244]     Train net output #1: loss = 0.000156573 (* 1 = 0.000156573 loss)\nI0818 09:41:06.750607 17472 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0818 09:43:23.864717 17472 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0818 09:44:45.277441 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48924\nI0818 09:44:45.277734 17472 solver.cpp:404]     Test net output #1: loss = 3.18101 (* 1 = 3.18101 loss)\nI0818 09:44:46.596496 17472 solver.cpp:228] Iteration 28100, loss = 0.00016312\nI0818 09:44:46.596539 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:44:46.596554 17472 solver.cpp:244]     Train net output #1: loss = 0.000163205 (* 1 = 0.000163205 loss)\nI0818 09:44:46.686136 17472 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0818 09:47:03.711856 17472 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0818 09:48:25.152542 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48872\nI0818 09:48:25.152832 17472 solver.cpp:404]     Test net output #1: loss = 3.15101 (* 1 = 3.15101 loss)\nI0818 09:48:26.471513 17472 solver.cpp:228] Iteration 28200, loss = 0.000168918\nI0818 09:48:26.471554 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:48:26.471570 17472 solver.cpp:244]     Train net output #1: loss = 0.000169003 (* 1 = 0.000169003 loss)\nI0818 09:48:26.562813 17472 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0818 09:50:43.695832 17472 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0818 09:52:05.109490 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49152\nI0818 09:52:05.109771 17472 solver.cpp:404]     Test net output #1: loss = 3.15121 (* 1 = 3.15121 loss)\nI0818 09:52:06.428225 17472 solver.cpp:228] Iteration 28300, loss = 0.000151989\nI0818 09:52:06.428264 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:52:06.428280 17472 solver.cpp:244]     Train net output #1: loss = 0.000152074 (* 1 = 0.000152074 loss)\nI0818 09:52:06.519086 17472 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0818 09:54:23.564254 17472 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0818 09:55:44.966478 17472 solver.cpp:404]     Test net output #0: accuracy = 0.496\nI0818 09:55:44.966773 17472 solver.cpp:404]     Test net output #1: loss = 3.05862 (* 1 = 3.05862 loss)\nI0818 09:55:46.285004 17472 solver.cpp:228] Iteration 28400, loss = 0.000135154\nI0818 09:55:46.285045 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:55:46.285061 17472 solver.cpp:244]     Train net output #1: loss = 0.000135239 (* 1 = 0.000135239 loss)\nI0818 09:55:46.372174 17472 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0818 09:58:03.455337 17472 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0818 09:59:24.867677 17472 solver.cpp:404]     Test net output #0: accuracy = 0.491\nI0818 09:59:24.867969 17472 solver.cpp:404]     Test net output #1: loss = 3.12373 (* 1 = 3.12373 loss)\nI0818 09:59:26.186764 17472 solver.cpp:228] Iteration 28500, loss = 0.000181365\nI0818 09:59:26.186806 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:59:26.186822 17472 solver.cpp:244]     Train net output #1: loss = 0.00018145 (* 1 = 0.00018145 loss)\nI0818 09:59:26.272936 17472 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0818 10:01:43.331478 17472 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0818 10:03:04.740514 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49128\nI0818 10:03:04.740804 17472 solver.cpp:404]     Test net output #1: loss = 3.08697 (* 1 = 3.08697 loss)\nI0818 10:03:06.059500 17472 solver.cpp:228] Iteration 28600, loss = 0.000131713\nI0818 10:03:06.059542 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:03:06.059558 17472 solver.cpp:244]     Train net output #1: loss = 0.000131798 (* 1 = 0.000131798 loss)\nI0818 10:03:06.148492 17472 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0818 10:05:23.284009 17472 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0818 10:06:44.696568 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4936\nI0818 10:06:44.696817 17472 solver.cpp:404]     Test net output #1: loss = 3.08327 (* 1 = 3.08327 loss)\nI0818 10:06:46.015305 17472 solver.cpp:228] Iteration 28700, loss = 0.000145998\nI0818 10:06:46.015347 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:06:46.015362 17472 solver.cpp:244]     Train net output #1: loss = 0.000146083 (* 1 = 0.000146083 loss)\nI0818 10:06:46.102402 17472 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0818 10:09:03.221101 17472 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0818 10:10:24.626777 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49452\nI0818 10:10:24.627085 17472 solver.cpp:404]     Test net output #1: loss = 3.05691 (* 1 = 3.05691 loss)\nI0818 10:10:25.945854 17472 solver.cpp:228] Iteration 28800, loss = 0.000157279\nI0818 10:10:25.945896 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:10:25.945917 17472 solver.cpp:244]     Train net output #1: loss = 0.000157364 (* 1 = 0.000157364 loss)\nI0818 10:10:26.038952 17472 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0818 10:12:43.289748 17472 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0818 10:14:04.700017 17472 solver.cpp:404]     Test net output #0: accuracy = 0.48988\nI0818 10:14:04.700305 17472 solver.cpp:404]     Test net output #1: loss = 3.12176 (* 1 = 3.12176 loss)\nI0818 10:14:06.018752 17472 solver.cpp:228] Iteration 28900, loss = 0.000184313\nI0818 10:14:06.018795 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:14:06.018810 17472 solver.cpp:244]     Train net output #1: loss = 0.000184398 (* 1 = 0.000184398 loss)\nI0818 10:14:06.111240 17472 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0818 10:16:23.303278 17472 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0818 10:17:44.722817 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49484\nI0818 10:17:44.723086 17472 solver.cpp:404]     Test net output #1: loss = 3.02155 (* 1 = 3.02155 loss)\nI0818 10:17:46.041234 17472 solver.cpp:228] Iteration 29000, loss = 0.000159112\nI0818 10:17:46.041276 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:17:46.041292 17472 solver.cpp:244]     Train net output #1: loss = 0.000159197 (* 1 = 0.000159197 loss)\nI0818 10:17:46.133059 17472 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0818 10:20:03.343412 17472 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0818 10:21:24.764451 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49212\nI0818 10:21:24.764739 17472 solver.cpp:404]     Test net output #1: loss = 3.07669 (* 1 = 3.07669 loss)\nI0818 10:21:26.083283 17472 solver.cpp:228] Iteration 29100, loss = 0.000175628\nI0818 10:21:26.083326 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:21:26.083343 17472 solver.cpp:244]     Train net output #1: loss = 0.000175713 (* 1 = 0.000175713 loss)\nI0818 10:21:26.169344 17472 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0818 10:23:43.251646 17472 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0818 10:25:04.675940 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49628\nI0818 10:25:04.676231 17472 solver.cpp:404]     Test net output #1: loss = 3.01658 (* 1 = 3.01658 loss)\nI0818 10:25:05.994804 17472 solver.cpp:228] Iteration 29200, loss = 0.000240681\nI0818 10:25:05.994846 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:25:05.994863 17472 solver.cpp:244]     Train net output #1: loss = 0.000240766 (* 1 = 0.000240766 loss)\nI0818 10:25:06.079324 17472 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0818 10:27:23.193084 17472 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0818 10:28:44.616971 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49148\nI0818 10:28:44.617272 17472 solver.cpp:404]     Test net output #1: loss = 3.07232 (* 1 = 3.07232 loss)\nI0818 10:28:45.935622 17472 solver.cpp:228] Iteration 29300, loss = 0.000154949\nI0818 10:28:45.935662 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:28:45.935678 17472 solver.cpp:244]     Train net output #1: loss = 0.000155034 (* 1 = 0.000155034 loss)\nI0818 10:28:46.032716 17472 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0818 10:31:03.142457 17472 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0818 10:32:24.602583 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49452\nI0818 10:32:24.602880 17472 solver.cpp:404]     Test net output #1: loss = 3.01876 (* 1 = 3.01876 loss)\nI0818 10:32:25.922875 17472 solver.cpp:228] Iteration 29400, loss = 0.000148634\nI0818 10:32:25.922920 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:32:25.922945 17472 solver.cpp:244]     Train net output #1: loss = 0.000148719 (* 1 = 0.000148719 loss)\nI0818 10:32:26.017365 17472 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0818 10:34:43.168802 17472 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0818 10:36:04.767354 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49424\nI0818 10:36:04.767652 17472 solver.cpp:404]     Test net output #1: loss = 3.03655 (* 1 = 3.03655 loss)\nI0818 10:36:06.087504 17472 solver.cpp:228] Iteration 29500, loss = 0.00019127\nI0818 10:36:06.087549 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:36:06.087574 17472 solver.cpp:244]     Train net output #1: loss = 0.000191355 (* 1 = 0.000191355 loss)\nI0818 10:36:06.178460 17472 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0818 10:38:23.196437 17472 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0818 10:39:44.749249 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49372\nI0818 10:39:44.749552 17472 solver.cpp:404]     Test net output #1: loss = 3.03216 (* 1 = 3.03216 loss)\nI0818 10:39:46.069625 17472 solver.cpp:228] Iteration 29600, loss = 0.000143077\nI0818 10:39:46.069670 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:39:46.069695 17472 solver.cpp:244]     Train net output #1: loss = 0.000143162 (* 1 = 0.000143162 loss)\nI0818 10:39:46.157596 17472 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0818 10:42:03.167536 17472 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0818 10:43:24.621759 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49648\nI0818 10:43:24.622061 17472 solver.cpp:404]     Test net output #1: loss = 3.0096 (* 1 = 3.0096 loss)\nI0818 10:43:25.942036 17472 solver.cpp:228] Iteration 29700, loss = 0.000161085\nI0818 10:43:25.942085 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:43:25.942111 17472 solver.cpp:244]     Train net output #1: loss = 0.00016117 (* 1 = 0.00016117 loss)\nI0818 10:43:26.030004 17472 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0818 10:45:43.120177 17472 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0818 10:47:04.544996 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49424\nI0818 10:47:04.545303 17472 solver.cpp:404]     Test net output #1: loss = 3.00986 (* 1 = 3.00986 loss)\nI0818 10:47:05.864954 17472 solver.cpp:228] Iteration 29800, loss = 0.000151583\nI0818 10:47:05.865000 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:47:05.865023 17472 solver.cpp:244]     Train net output #1: loss = 0.000151668 (* 1 = 0.000151668 loss)\nI0818 10:47:05.953127 17472 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0818 10:49:23.007408 17472 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0818 10:50:44.438019 17472 solver.cpp:404]     Test net output #0: accuracy = 0.497\nI0818 10:50:44.438335 17472 solver.cpp:404]     Test net output #1: loss = 2.99789 (* 1 = 2.99789 loss)\nI0818 10:50:45.757593 17472 solver.cpp:228] Iteration 29900, loss = 0.00013637\nI0818 10:50:45.757638 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:50:45.757663 17472 solver.cpp:244]     Train net output #1: loss = 0.000136455 (* 1 = 0.000136455 loss)\nI0818 10:50:45.850617 17472 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0818 10:53:02.966154 17472 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0818 10:54:24.462265 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49512\nI0818 10:54:24.462569 17472 solver.cpp:404]     Test net output #1: loss = 2.98637 (* 1 = 2.98637 loss)\nI0818 10:54:25.782702 17472 solver.cpp:228] Iteration 30000, loss = 0.000182819\nI0818 10:54:25.782749 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:54:25.782773 17472 solver.cpp:244]     Train net output #1: loss = 0.000182904 (* 1 = 0.000182904 loss)\nI0818 10:54:25.867600 17472 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0818 10:56:43.163748 17472 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0818 10:58:04.605790 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49372\nI0818 10:58:04.606104 17472 solver.cpp:404]     Test net output #1: loss = 2.9998 (* 1 = 2.9998 loss)\nI0818 10:58:05.925765 17472 solver.cpp:228] Iteration 30100, loss = 0.000145835\nI0818 10:58:05.925812 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:58:05.925837 17472 solver.cpp:244]     Train net output #1: loss = 0.00014592 (* 1 = 0.00014592 loss)\nI0818 10:58:06.014472 17472 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0818 11:00:23.253993 17472 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0818 11:01:44.699126 17472 solver.cpp:404]     Test net output #0: accuracy = 0.493\nI0818 11:01:44.699421 17472 solver.cpp:404]     Test net output #1: loss = 3.00399 (* 1 = 3.00399 loss)\nI0818 11:01:46.019693 17472 solver.cpp:228] Iteration 30200, loss = 0.000164722\nI0818 11:01:46.019740 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:01:46.019765 17472 solver.cpp:244]     Train net output #1: loss = 0.000164807 (* 1 = 0.000164807 loss)\nI0818 11:01:46.108487 17472 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0818 11:04:03.389801 17472 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0818 11:05:24.827510 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49296\nI0818 11:05:24.827810 17472 solver.cpp:404]     Test net output #1: loss = 3.01787 (* 1 = 3.01787 loss)\nI0818 11:05:26.146872 17472 solver.cpp:228] Iteration 30300, loss = 0.000159287\nI0818 11:05:26.146919 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:05:26.146944 17472 solver.cpp:244]     Train net output #1: loss = 0.000159372 (* 1 = 0.000159372 loss)\nI0818 11:05:26.236008 17472 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0818 11:07:43.296969 17472 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0818 11:09:04.759311 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49764\nI0818 11:09:04.759604 17472 solver.cpp:404]     Test net output #1: loss = 2.93644 (* 1 = 2.93644 loss)\nI0818 11:09:06.079543 17472 solver.cpp:228] Iteration 30400, loss = 0.000203092\nI0818 11:09:06.079589 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:09:06.079612 17472 solver.cpp:244]     Train net output #1: loss = 0.000203177 (* 1 = 0.000203177 loss)\nI0818 11:09:06.169589 17472 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0818 11:11:23.267776 17472 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0818 11:12:44.785754 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49832\nI0818 11:12:44.786059 17472 solver.cpp:404]     Test net output #1: loss = 2.96751 (* 1 = 2.96751 loss)\nI0818 11:12:46.106258 17472 solver.cpp:228] Iteration 30500, loss = 0.000212328\nI0818 11:12:46.106307 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:12:46.106328 17472 solver.cpp:244]     Train net output #1: loss = 0.000212413 (* 1 = 0.000212413 loss)\nI0818 11:12:46.191501 17472 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0818 11:15:03.325373 17472 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0818 11:16:24.752662 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49808\nI0818 11:16:24.752962 17472 solver.cpp:404]     Test net output #1: loss = 2.92986 (* 1 = 2.92986 loss)\nI0818 11:16:26.072075 17472 solver.cpp:228] Iteration 30600, loss = 0.000244207\nI0818 11:16:26.072113 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:16:26.072135 17472 solver.cpp:244]     Train net output #1: loss = 0.000244292 (* 1 = 0.000244292 loss)\nI0818 11:16:26.160974 17472 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0818 11:18:43.287848 17472 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0818 11:20:04.767746 17472 solver.cpp:404]     Test net output #0: accuracy = 0.4938\nI0818 11:20:04.768039 17472 solver.cpp:404]     Test net output #1: loss = 2.97163 (* 1 = 2.97163 loss)\nI0818 11:20:06.087782 17472 solver.cpp:228] Iteration 30700, loss = 0.000161671\nI0818 11:20:06.087817 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:20:06.087841 17472 solver.cpp:244]     Train net output #1: loss = 0.000161756 (* 1 = 0.000161756 loss)\nI0818 11:20:06.173506 17472 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0818 11:22:23.370249 17472 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0818 11:23:44.818135 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49468\nI0818 11:23:44.818423 17472 solver.cpp:404]     Test net output #1: loss = 2.95762 (* 1 = 2.95762 loss)\nI0818 11:23:46.137465 17472 solver.cpp:228] Iteration 30800, loss = 0.000188594\nI0818 11:23:46.137501 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:23:46.137523 17472 solver.cpp:244]     Train net output #1: loss = 0.000188679 (* 1 = 0.000188679 loss)\nI0818 11:23:46.225137 17472 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0818 11:26:03.153646 17472 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0818 11:27:24.581737 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49688\nI0818 11:27:24.582036 17472 solver.cpp:404]     Test net output #1: loss = 2.96032 (* 1 = 2.96032 loss)\nI0818 11:27:25.901408 17472 solver.cpp:228] Iteration 30900, loss = 0.000193104\nI0818 11:27:25.901445 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:27:25.901468 17472 solver.cpp:244]     Train net output #1: loss = 0.000193189 (* 1 = 0.000193189 loss)\nI0818 11:27:25.983911 17472 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0818 11:29:42.923357 17472 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0818 11:31:04.381697 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49848\nI0818 11:31:04.381997 17472 solver.cpp:404]     Test net output #1: loss = 2.91662 (* 1 = 2.91662 loss)\nI0818 11:31:05.701437 17472 solver.cpp:228] Iteration 31000, loss = 0.000192002\nI0818 11:31:05.701474 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:31:05.701498 17472 solver.cpp:244]     Train net output #1: loss = 0.000192087 (* 1 = 0.000192087 loss)\nI0818 11:31:05.790995 17472 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0818 11:33:22.720160 17472 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0818 11:34:44.227969 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49864\nI0818 11:34:44.228276 17472 solver.cpp:404]     Test net output #1: loss = 2.92855 (* 1 = 2.92855 loss)\nI0818 11:34:45.546461 17472 solver.cpp:228] Iteration 31100, loss = 0.000208987\nI0818 11:34:45.546499 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:34:45.546521 17472 solver.cpp:244]     Train net output #1: loss = 0.000209072 (* 1 = 0.000209072 loss)\nI0818 11:34:45.634490 17472 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0818 11:37:02.482079 17472 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0818 11:38:23.949790 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50072\nI0818 11:38:23.950078 17472 solver.cpp:404]     Test net output #1: loss = 2.88304 (* 1 = 2.88304 loss)\nI0818 11:38:25.270089 17472 solver.cpp:228] Iteration 31200, loss = 0.000197538\nI0818 11:38:25.270128 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:38:25.270149 17472 solver.cpp:244]     Train net output #1: loss = 0.000197623 (* 1 = 0.000197623 loss)\nI0818 11:38:25.358317 17472 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0818 11:40:42.216434 17472 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0818 11:42:03.684530 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49544\nI0818 11:42:03.684815 17472 solver.cpp:404]     Test net output #1: loss = 2.94309 (* 1 = 2.94309 loss)\nI0818 11:42:05.004580 17472 solver.cpp:228] Iteration 31300, loss = 0.000184558\nI0818 11:42:05.004617 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:42:05.004639 17472 solver.cpp:244]     Train net output #1: loss = 0.000184643 (* 1 = 0.000184643 loss)\nI0818 11:42:05.092710 17472 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0818 11:44:22.240550 17472 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0818 11:45:43.730406 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50228\nI0818 11:45:43.730708 17472 solver.cpp:404]     Test net output #1: loss = 2.85998 (* 1 = 2.85998 loss)\nI0818 11:45:45.049655 17472 solver.cpp:228] Iteration 31400, loss = 0.000180766\nI0818 11:45:45.049691 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:45:45.049715 17472 solver.cpp:244]     Train net output #1: loss = 0.000180851 (* 1 = 0.000180851 loss)\nI0818 11:45:45.139963 17472 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0818 11:48:02.021893 17472 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0818 11:49:23.496296 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49876\nI0818 11:49:23.496598 17472 solver.cpp:404]     Test net output #1: loss = 2.91734 (* 1 = 2.91734 loss)\nI0818 11:49:24.816210 17472 solver.cpp:228] Iteration 31500, loss = 0.000183706\nI0818 11:49:24.816246 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:49:24.816267 17472 solver.cpp:244]     Train net output #1: loss = 0.000183791 (* 1 = 0.000183791 loss)\nI0818 11:49:24.904922 17472 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0818 11:51:42.048436 17472 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0818 11:53:03.507185 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5014\nI0818 11:53:03.507489 17472 solver.cpp:404]     Test net output #1: loss = 2.85884 (* 1 = 2.85884 loss)\nI0818 11:53:04.827220 17472 solver.cpp:228] Iteration 31600, loss = 0.000196534\nI0818 11:53:04.827257 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:53:04.827280 17472 solver.cpp:244]     Train net output #1: loss = 0.000196619 (* 1 = 0.000196619 loss)\nI0818 11:53:04.909842 17472 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0818 11:55:21.738591 17472 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0818 11:56:43.238462 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50192\nI0818 11:56:43.238765 17472 solver.cpp:404]     Test net output #1: loss = 2.87027 (* 1 = 2.87027 loss)\nI0818 11:56:44.558058 17472 solver.cpp:228] Iteration 31700, loss = 0.000208454\nI0818 11:56:44.558099 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:56:44.558121 17472 solver.cpp:244]     Train net output #1: loss = 0.000208539 (* 1 = 0.000208539 loss)\nI0818 11:56:44.642801 17472 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0818 11:59:01.516077 17472 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0818 12:00:22.941905 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50168\nI0818 12:00:22.942205 17472 solver.cpp:404]     Test net output #1: loss = 2.8645 (* 1 = 2.8645 loss)\nI0818 12:00:24.261238 17472 solver.cpp:228] Iteration 31800, loss = 0.000205999\nI0818 12:00:24.261276 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:00:24.261299 17472 solver.cpp:244]     Train net output #1: loss = 0.000206084 (* 1 = 0.000206084 loss)\nI0818 12:00:24.350409 17472 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0818 12:02:41.216826 17472 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0818 12:04:02.674569 17472 solver.cpp:404]     Test net output #0: accuracy = 0.501\nI0818 12:04:02.674865 17472 solver.cpp:404]     Test net output #1: loss = 2.87387 (* 1 = 2.87387 loss)\nI0818 12:04:03.994748 17472 solver.cpp:228] Iteration 31900, loss = 0.000223661\nI0818 12:04:03.994786 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:04:03.994809 17472 solver.cpp:244]     Train net output #1: loss = 0.000223746 (* 1 = 0.000223746 loss)\nI0818 12:04:04.083572 17472 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0818 12:06:21.202289 17472 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0818 12:07:42.650970 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49636\nI0818 12:07:42.651283 17472 solver.cpp:404]     Test net output #1: loss = 2.88551 (* 1 = 2.88551 loss)\nI0818 12:07:43.970734 17472 solver.cpp:228] Iteration 32000, loss = 0.000194309\nI0818 12:07:43.970772 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:07:43.970794 17472 solver.cpp:244]     Train net output #1: loss = 0.000194394 (* 1 = 0.000194394 loss)\nI0818 12:07:44.058804 17472 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0818 12:10:00.990612 17472 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0818 12:11:22.409428 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50056\nI0818 12:11:22.409721 17472 solver.cpp:404]     Test net output #1: loss = 2.86229 (* 1 = 2.86229 loss)\nI0818 12:11:23.727944 17472 solver.cpp:228] Iteration 32100, loss = 0.000199152\nI0818 12:11:23.727977 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:11:23.727993 17472 solver.cpp:244]     Train net output #1: loss = 0.000199237 (* 1 = 0.000199237 loss)\nI0818 12:11:23.818359 17472 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0818 12:13:40.917820 17472 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0818 12:15:02.323294 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50276\nI0818 12:15:02.323582 17472 solver.cpp:404]     Test net output #1: loss = 2.81368 (* 1 = 2.81368 loss)\nI0818 12:15:03.642176 17472 solver.cpp:228] Iteration 32200, loss = 0.000156203\nI0818 12:15:03.642210 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:15:03.642225 17472 solver.cpp:244]     Train net output #1: loss = 0.000156288 (* 1 = 0.000156288 loss)\nI0818 12:15:03.732987 17472 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0818 12:17:20.899085 17472 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0818 12:18:42.318126 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5024\nI0818 12:18:42.318419 17472 solver.cpp:404]     Test net output #1: loss = 2.82363 (* 1 = 2.82363 loss)\nI0818 12:18:43.636554 17472 solver.cpp:228] Iteration 32300, loss = 0.000174439\nI0818 12:18:43.636584 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:18:43.636598 17472 solver.cpp:244]     Train net output #1: loss = 0.000174524 (* 1 = 0.000174524 loss)\nI0818 12:18:43.723816 17472 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0818 12:21:00.686705 17472 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0818 12:22:22.094290 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50024\nI0818 12:22:22.094586 17472 solver.cpp:404]     Test net output #1: loss = 2.85906 (* 1 = 2.85906 loss)\nI0818 12:22:23.412413 17472 solver.cpp:228] Iteration 32400, loss = 0.000208814\nI0818 12:22:23.412446 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:22:23.412461 17472 solver.cpp:244]     Train net output #1: loss = 0.000208899 (* 1 = 0.000208899 loss)\nI0818 12:22:23.504187 17472 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0818 12:24:40.587652 17472 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0818 12:26:01.990747 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50528\nI0818 12:26:01.991034 17472 solver.cpp:404]     Test net output #1: loss = 2.81904 (* 1 = 2.81904 loss)\nI0818 12:26:03.308990 17472 solver.cpp:228] Iteration 32500, loss = 0.000177607\nI0818 12:26:03.309022 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:26:03.309036 17472 solver.cpp:244]     Train net output #1: loss = 0.000177692 (* 1 = 0.000177692 loss)\nI0818 12:26:03.396857 17472 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0818 12:28:20.326444 17472 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0818 12:29:41.734902 17472 solver.cpp:404]     Test net output #0: accuracy = 0.49872\nI0818 12:29:41.735205 17472 solver.cpp:404]     Test net output #1: loss = 2.8621 (* 1 = 2.8621 loss)\nI0818 12:29:43.053648 17472 solver.cpp:228] Iteration 32600, loss = 0.000186152\nI0818 12:29:43.053681 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:29:43.053697 17472 solver.cpp:244]     Train net output #1: loss = 0.000186237 (* 1 = 0.000186237 loss)\nI0818 12:29:43.141355 17472 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0818 12:32:00.238910 17472 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0818 12:33:21.660845 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50076\nI0818 12:33:21.661149 17472 solver.cpp:404]     Test net output #1: loss = 2.86657 (* 1 = 2.86657 loss)\nI0818 12:33:22.979751 17472 solver.cpp:228] Iteration 32700, loss = 0.000184981\nI0818 12:33:22.979784 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:33:22.979799 17472 solver.cpp:244]     Train net output #1: loss = 0.000185066 (* 1 = 0.000185066 loss)\nI0818 12:33:23.067062 17472 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0818 12:35:40.295979 17472 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0818 12:37:01.715039 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50204\nI0818 12:37:01.715337 17472 solver.cpp:404]     Test net output #1: loss = 2.831 (* 1 = 2.831 loss)\nI0818 12:37:03.033941 17472 solver.cpp:228] Iteration 32800, loss = 0.000175255\nI0818 12:37:03.033975 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:37:03.033989 17472 solver.cpp:244]     Train net output #1: loss = 0.00017534 (* 1 = 0.00017534 loss)\nI0818 12:37:03.121804 17472 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0818 12:39:20.102211 17472 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0818 12:40:41.517642 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50068\nI0818 12:40:41.517921 17472 solver.cpp:404]     Test net output #1: loss = 2.85259 (* 1 = 2.85259 loss)\nI0818 12:40:42.836295 17472 solver.cpp:228] Iteration 32900, loss = 0.000162741\nI0818 12:40:42.836328 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:40:42.836341 17472 solver.cpp:244]     Train net output #1: loss = 0.000162826 (* 1 = 0.000162826 loss)\nI0818 12:40:42.924037 17472 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0818 12:42:59.837539 17472 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0818 12:44:21.253684 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5012\nI0818 12:44:21.253968 17472 solver.cpp:404]     Test net output #1: loss = 2.8109 (* 1 = 2.8109 loss)\nI0818 12:44:22.572630 17472 solver.cpp:228] Iteration 33000, loss = 0.000217024\nI0818 12:44:22.572664 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:44:22.572679 17472 solver.cpp:244]     Train net output #1: loss = 0.000217109 (* 1 = 0.000217109 loss)\nI0818 12:44:22.662634 17472 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0818 12:46:39.613481 17472 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0818 12:48:01.036806 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5042\nI0818 12:48:01.037123 17472 solver.cpp:404]     Test net output #1: loss = 2.78698 (* 1 = 2.78698 loss)\nI0818 12:48:02.355162 17472 solver.cpp:228] Iteration 33100, loss = 0.000190471\nI0818 12:48:02.355195 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:48:02.355208 17472 solver.cpp:244]     Train net output #1: loss = 0.000190556 (* 1 = 0.000190556 loss)\nI0818 12:48:02.443307 17472 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0818 12:50:19.386646 17472 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0818 12:51:40.798979 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50596\nI0818 12:51:40.799263 17472 solver.cpp:404]     Test net output #1: loss = 2.74258 (* 1 = 2.74258 loss)\nI0818 12:51:42.118175 17472 solver.cpp:228] Iteration 33200, loss = 0.000229571\nI0818 12:51:42.118207 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:51:42.118222 17472 solver.cpp:244]     Train net output #1: loss = 0.000229656 (* 1 = 0.000229656 loss)\nI0818 12:51:42.205307 17472 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0818 12:53:59.074054 17472 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0818 12:55:20.489068 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50688\nI0818 12:55:20.489347 17472 solver.cpp:404]     Test net output #1: loss = 2.76852 (* 1 = 2.76852 loss)\nI0818 12:55:21.807334 17472 solver.cpp:228] Iteration 33300, loss = 0.000209946\nI0818 12:55:21.807374 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:55:21.807390 17472 solver.cpp:244]     Train net output #1: loss = 0.000210031 (* 1 = 0.000210031 loss)\nI0818 12:55:21.899186 17472 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0818 12:57:38.746865 17472 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0818 12:59:00.155984 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50768\nI0818 12:59:00.156280 17472 solver.cpp:404]     Test net output #1: loss = 2.72604 (* 1 = 2.72604 loss)\nI0818 12:59:01.474738 17472 solver.cpp:228] Iteration 33400, loss = 0.000177235\nI0818 12:59:01.474769 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:59:01.474783 17472 solver.cpp:244]     Train net output #1: loss = 0.00017732 (* 1 = 0.00017732 loss)\nI0818 12:59:01.560222 17472 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0818 13:01:18.253088 17472 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0818 13:02:39.672461 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50288\nI0818 13:02:39.672749 17472 solver.cpp:404]     Test net output #1: loss = 2.79073 (* 1 = 2.79073 loss)\nI0818 13:02:40.991214 17472 solver.cpp:228] Iteration 33500, loss = 0.000182607\nI0818 13:02:40.991245 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:02:40.991261 17472 solver.cpp:244]     Train net output #1: loss = 0.000182692 (* 1 = 0.000182692 loss)\nI0818 13:02:41.080675 17472 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0818 13:04:57.752300 17472 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0818 13:06:19.165411 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5086\nI0818 13:06:19.165710 17472 solver.cpp:404]     Test net output #1: loss = 2.73235 (* 1 = 2.73235 loss)\nI0818 13:06:20.484277 17472 solver.cpp:228] Iteration 33600, loss = 0.000204008\nI0818 13:06:20.484308 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:06:20.484323 17472 solver.cpp:244]     Train net output #1: loss = 0.000204093 (* 1 = 0.000204093 loss)\nI0818 13:06:20.570834 17472 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0818 13:08:37.250023 17472 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0818 13:09:58.665180 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50384\nI0818 13:09:58.665477 17472 solver.cpp:404]     Test net output #1: loss = 2.78931 (* 1 = 2.78931 loss)\nI0818 13:09:59.984060 17472 solver.cpp:228] Iteration 33700, loss = 0.000196971\nI0818 13:09:59.984093 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:09:59.984108 17472 solver.cpp:244]     Train net output #1: loss = 0.000197056 (* 1 = 0.000197056 loss)\nI0818 13:10:00.073282 17472 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0818 13:12:16.689265 17472 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0818 13:13:38.096191 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50364\nI0818 13:13:38.096483 17472 solver.cpp:404]     Test net output #1: loss = 2.77731 (* 1 = 2.77731 loss)\nI0818 13:13:39.415153 17472 solver.cpp:228] Iteration 33800, loss = 0.000203668\nI0818 13:13:39.415196 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:13:39.415212 17472 solver.cpp:244]     Train net output #1: loss = 0.000203753 (* 1 = 0.000203753 loss)\nI0818 13:13:39.502709 17472 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0818 13:15:56.195149 17472 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0818 13:17:17.603902 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50392\nI0818 13:17:17.604204 17472 solver.cpp:404]     Test net output #1: loss = 2.76734 (* 1 = 2.76734 loss)\nI0818 13:17:18.922782 17472 solver.cpp:228] Iteration 33900, loss = 0.000214532\nI0818 13:17:18.922816 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:17:18.922830 17472 solver.cpp:244]     Train net output #1: loss = 0.000214617 (* 1 = 0.000214617 loss)\nI0818 13:17:19.013064 17472 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0818 13:19:35.543159 17472 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0818 13:20:56.953023 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50584\nI0818 13:20:56.953305 17472 solver.cpp:404]     Test net output #1: loss = 2.7305 (* 1 = 2.7305 loss)\nI0818 13:20:58.271287 17472 solver.cpp:228] Iteration 34000, loss = 0.000180344\nI0818 13:20:58.271320 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:20:58.271334 17472 solver.cpp:244]     Train net output #1: loss = 0.000180429 (* 1 = 0.000180429 loss)\nI0818 13:20:58.358496 17472 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0818 13:23:14.958705 17472 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0818 13:24:36.379513 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5034\nI0818 13:24:36.379787 17472 solver.cpp:404]     Test net output #1: loss = 2.75603 (* 1 = 2.75603 loss)\nI0818 13:24:37.698457 17472 solver.cpp:228] Iteration 34100, loss = 0.000242133\nI0818 13:24:37.698490 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:24:37.698505 17472 solver.cpp:244]     Train net output #1: loss = 0.000242218 (* 1 = 0.000242218 loss)\nI0818 13:24:37.786150 17472 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0818 13:26:54.461463 17472 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0818 13:28:15.885308 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50728\nI0818 13:28:15.885599 17472 solver.cpp:404]     Test net output #1: loss = 2.71714 (* 1 = 2.71714 loss)\nI0818 13:28:17.205557 17472 solver.cpp:228] Iteration 34200, loss = 0.000213102\nI0818 13:28:17.205593 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:28:17.205608 17472 solver.cpp:244]     Train net output #1: loss = 0.000213187 (* 1 = 0.000213187 loss)\nI0818 13:28:17.294919 17472 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0818 13:30:33.949519 17472 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0818 13:31:55.506620 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50744\nI0818 13:31:55.506914 17472 solver.cpp:404]     Test net output #1: loss = 2.73673 (* 1 = 2.73673 loss)\nI0818 13:31:56.826560 17472 solver.cpp:228] Iteration 34300, loss = 0.000191662\nI0818 13:31:56.826596 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:31:56.826611 17472 solver.cpp:244]     Train net output #1: loss = 0.000191747 (* 1 = 0.000191747 loss)\nI0818 13:31:56.917243 17472 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0818 13:34:13.564348 17472 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0818 13:35:35.115553 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50812\nI0818 13:35:35.115859 17472 solver.cpp:404]     Test net output #1: loss = 2.70597 (* 1 = 2.70597 loss)\nI0818 13:35:36.434221 17472 solver.cpp:228] Iteration 34400, loss = 0.000165037\nI0818 13:35:36.434260 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:35:36.434283 17472 solver.cpp:244]     Train net output #1: loss = 0.000165122 (* 1 = 0.000165122 loss)\nI0818 13:35:36.525475 17472 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0818 13:37:53.361901 17472 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0818 13:39:15.558578 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5028\nI0818 13:39:15.558848 17472 solver.cpp:404]     Test net output #1: loss = 2.77819 (* 1 = 2.77819 loss)\nI0818 13:39:16.885694 17472 solver.cpp:228] Iteration 34500, loss = 0.000228992\nI0818 13:39:16.885743 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:39:16.885759 17472 solver.cpp:244]     Train net output #1: loss = 0.000229077 (* 1 = 0.000229077 loss)\nI0818 13:39:16.971316 17472 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0818 13:41:33.902303 17472 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0818 13:42:56.108597 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50548\nI0818 13:42:56.108908 17472 solver.cpp:404]     Test net output #1: loss = 2.73766 (* 1 = 2.73766 loss)\nI0818 13:42:57.436012 17472 solver.cpp:228] Iteration 34600, loss = 0.000183655\nI0818 13:42:57.436060 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:42:57.436076 17472 solver.cpp:244]     Train net output #1: loss = 0.00018374 (* 1 = 0.00018374 loss)\nI0818 13:42:57.522349 17472 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0818 13:45:14.529268 17472 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0818 13:46:36.782654 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5066\nI0818 13:46:36.783007 17472 solver.cpp:404]     Test net output #1: loss = 2.73456 (* 1 = 2.73456 loss)\nI0818 13:46:38.109743 17472 solver.cpp:228] Iteration 34700, loss = 0.000233815\nI0818 13:46:38.109791 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:46:38.109807 17472 solver.cpp:244]     Train net output #1: loss = 0.0002339 (* 1 = 0.0002339 loss)\nI0818 13:46:38.193228 17472 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0818 13:48:55.461777 17472 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0818 13:50:17.720943 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50776\nI0818 13:50:17.721261 17472 solver.cpp:404]     Test net output #1: loss = 2.68545 (* 1 = 2.68545 loss)\nI0818 13:50:19.048725 17472 solver.cpp:228] Iteration 34800, loss = 0.000202905\nI0818 13:50:19.048768 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:50:19.048784 17472 solver.cpp:244]     Train net output #1: loss = 0.00020299 (* 1 = 0.00020299 loss)\nI0818 13:50:19.132028 17472 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0818 13:52:36.456202 17472 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0818 13:53:58.706218 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50692\nI0818 13:53:58.706567 17472 solver.cpp:404]     Test net output #1: loss = 2.73912 (* 1 = 2.73912 loss)\nI0818 13:54:00.033673 17472 solver.cpp:228] Iteration 34900, loss = 0.000192281\nI0818 13:54:00.033716 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:54:00.033733 17472 solver.cpp:244]     Train net output #1: loss = 0.000192366 (* 1 = 0.000192366 loss)\nI0818 13:54:00.121100 17472 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0818 13:56:17.112664 17472 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0818 13:57:39.364106 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50952\nI0818 13:57:39.364429 17472 solver.cpp:404]     Test net output #1: loss = 2.68514 (* 1 = 2.68514 loss)\nI0818 13:57:40.692138 17472 solver.cpp:228] Iteration 35000, loss = 0.000197543\nI0818 13:57:40.692183 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:57:40.692198 17472 solver.cpp:244]     Train net output #1: loss = 0.000197628 (* 1 = 0.000197628 loss)\nI0818 13:57:40.775367 17472 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0818 13:59:57.814301 17472 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0818 14:01:20.069847 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50168\nI0818 14:01:20.070188 17472 solver.cpp:404]     Test net output #1: loss = 2.77102 (* 1 = 2.77102 loss)\nI0818 14:01:21.397302 17472 solver.cpp:228] Iteration 35100, loss = 0.000185734\nI0818 14:01:21.397344 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:01:21.397361 17472 solver.cpp:244]     Train net output #1: loss = 0.000185819 (* 1 = 0.000185819 loss)\nI0818 14:01:21.482846 17472 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0818 14:03:38.392951 17472 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0818 14:05:00.647933 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50856\nI0818 14:05:00.648258 17472 solver.cpp:404]     Test net output #1: loss = 2.67673 (* 1 = 2.67673 loss)\nI0818 14:05:01.975836 17472 solver.cpp:228] Iteration 35200, loss = 0.000194288\nI0818 14:05:01.975881 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:05:01.975898 17472 solver.cpp:244]     Train net output #1: loss = 0.000194373 (* 1 = 0.000194373 loss)\nI0818 14:05:02.062548 17472 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0818 14:07:19.047276 17472 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0818 14:08:41.295516 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50712\nI0818 14:08:41.295835 17472 solver.cpp:404]     Test net output #1: loss = 2.69826 (* 1 = 2.69826 loss)\nI0818 14:08:42.623877 17472 solver.cpp:228] Iteration 35300, loss = 0.000208811\nI0818 14:08:42.623926 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:08:42.623942 17472 solver.cpp:244]     Train net output #1: loss = 0.000208896 (* 1 = 0.000208896 loss)\nI0818 14:08:42.709370 17472 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0818 14:10:59.709709 17472 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0818 14:12:21.852172 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51204\nI0818 14:12:21.852455 17472 solver.cpp:404]     Test net output #1: loss = 2.64614 (* 1 = 2.64614 loss)\nI0818 14:12:23.179643 17472 solver.cpp:228] Iteration 35400, loss = 0.000210349\nI0818 14:12:23.179695 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:12:23.179713 17472 solver.cpp:244]     Train net output #1: loss = 0.000210434 (* 1 = 0.000210434 loss)\nI0818 14:12:23.264771 17472 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0818 14:14:40.154808 17472 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0818 14:16:02.363466 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5074\nI0818 14:16:02.363732 17472 solver.cpp:404]     Test net output #1: loss = 2.71342 (* 1 = 2.71342 loss)\nI0818 14:16:03.691087 17472 solver.cpp:228] Iteration 35500, loss = 0.000228441\nI0818 14:16:03.691138 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:16:03.691154 17472 solver.cpp:244]     Train net output #1: loss = 0.000228526 (* 1 = 0.000228526 loss)\nI0818 14:16:03.782476 17472 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0818 14:18:20.832262 17472 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0818 14:19:43.078166 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51016\nI0818 14:19:43.078395 17472 solver.cpp:404]     Test net output #1: loss = 2.6607 (* 1 = 2.6607 loss)\nI0818 14:19:44.406322 17472 solver.cpp:228] Iteration 35600, loss = 0.000175267\nI0818 14:19:44.406373 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:19:44.406388 17472 solver.cpp:244]     Train net output #1: loss = 0.000175352 (* 1 = 0.000175352 loss)\nI0818 14:19:44.488970 17472 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0818 14:22:01.452239 17472 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0818 14:23:23.673573 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51096\nI0818 14:23:23.673842 17472 solver.cpp:404]     Test net output #1: loss = 2.66913 (* 1 = 2.66913 loss)\nI0818 14:23:25.002645 17472 solver.cpp:228] Iteration 35700, loss = 0.000218101\nI0818 14:23:25.002684 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:23:25.002699 17472 solver.cpp:244]     Train net output #1: loss = 0.000218186 (* 1 = 0.000218186 loss)\nI0818 14:23:25.085250 17472 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0818 14:25:42.457049 17472 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0818 14:27:04.676837 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51056\nI0818 14:27:04.677073 17472 solver.cpp:404]     Test net output #1: loss = 2.64837 (* 1 = 2.64837 loss)\nI0818 14:27:06.004757 17472 solver.cpp:228] Iteration 35800, loss = 0.000178444\nI0818 14:27:06.004797 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:27:06.004812 17472 solver.cpp:244]     Train net output #1: loss = 0.000178529 (* 1 = 0.000178529 loss)\nI0818 14:27:06.087816 17472 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0818 14:29:23.077766 17472 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0818 14:30:45.261173 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50868\nI0818 14:30:45.261440 17472 solver.cpp:404]     Test net output #1: loss = 2.68121 (* 1 = 2.68121 loss)\nI0818 14:30:46.589269 17472 solver.cpp:228] Iteration 35900, loss = 0.00024878\nI0818 14:30:46.589308 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:30:46.589323 17472 solver.cpp:244]     Train net output #1: loss = 0.000248865 (* 1 = 0.000248865 loss)\nI0818 14:30:46.671434 17472 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0818 14:33:03.724725 17472 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0818 14:34:25.927443 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51292\nI0818 14:34:25.927700 17472 solver.cpp:404]     Test net output #1: loss = 2.63724 (* 1 = 2.63724 loss)\nI0818 14:34:27.254747 17472 solver.cpp:228] Iteration 36000, loss = 0.000192709\nI0818 14:34:27.254788 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:34:27.254803 17472 solver.cpp:244]     Train net output #1: loss = 0.000192794 (* 1 = 0.000192794 loss)\nI0818 14:34:27.341446 17472 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0818 14:39:25.901180 17472 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0818 14:40:47.526581 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50876\nI0818 14:40:47.528568 17472 solver.cpp:404]     Test net output #1: loss = 2.67254 (* 1 = 2.67254 loss)\nI0818 14:40:48.858530 17472 solver.cpp:228] Iteration 36100, loss = 0.000220686\nI0818 14:40:48.858568 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:40:48.858583 17472 solver.cpp:244]     Train net output #1: loss = 0.000220771 (* 1 = 0.000220771 loss)\nI0818 14:40:48.942049 17472 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0818 14:43:06.616701 17472 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0818 14:44:38.825561 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50888\nI0818 14:44:38.826088 17472 solver.cpp:404]     Test net output #1: loss = 2.65351 (* 1 = 2.65351 loss)\nI0818 14:44:40.156281 17472 solver.cpp:228] Iteration 36200, loss = 0.000215151\nI0818 14:44:40.156360 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:44:40.156378 17472 solver.cpp:244]     Train net output #1: loss = 0.000215236 (* 1 = 0.000215236 loss)\nI0818 14:44:40.239200 17472 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0818 14:46:57.772830 17472 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0818 14:48:20.062427 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50088\nI0818 14:48:20.062674 17472 solver.cpp:404]     Test net output #1: loss = 2.7495 (* 1 = 2.7495 loss)\nI0818 14:48:21.391666 17472 solver.cpp:228] Iteration 36300, loss = 0.000189893\nI0818 14:48:21.391705 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:48:21.391721 17472 solver.cpp:244]     Train net output #1: loss = 0.000189978 (* 1 = 0.000189978 loss)\nI0818 14:48:21.476239 17472 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0818 14:50:38.526021 17472 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0818 14:52:00.821686 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50784\nI0818 14:52:00.822015 17472 solver.cpp:404]     Test net output #1: loss = 2.68678 (* 1 = 2.68678 loss)\nI0818 14:52:02.150820 17472 solver.cpp:228] Iteration 36400, loss = 0.000181564\nI0818 14:52:02.150859 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:52:02.150876 17472 solver.cpp:244]     Train net output #1: loss = 0.000181649 (* 1 = 0.000181649 loss)\nI0818 14:52:02.236738 17472 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0818 14:54:19.311892 17472 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0818 14:55:41.598943 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50908\nI0818 14:55:41.599228 17472 solver.cpp:404]     Test net output #1: loss = 2.6696 (* 1 = 2.6696 loss)\nI0818 14:55:42.928021 17472 solver.cpp:228] Iteration 36500, loss = 0.000190555\nI0818 14:55:42.928058 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:55:42.928073 17472 solver.cpp:244]     Train net output #1: loss = 0.00019064 (* 1 = 0.00019064 loss)\nI0818 14:55:43.011420 17472 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0818 14:58:00.176690 17472 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0818 14:59:22.467666 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51048\nI0818 14:59:22.467900 17472 solver.cpp:404]     Test net output #1: loss = 2.63271 (* 1 = 2.63271 loss)\nI0818 14:59:23.796062 17472 solver.cpp:228] Iteration 36600, loss = 0.000222902\nI0818 14:59:23.796102 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:59:23.796116 17472 solver.cpp:244]     Train net output #1: loss = 0.000222987 (* 1 = 0.000222987 loss)\nI0818 14:59:23.876322 17472 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0818 15:01:41.072289 17472 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0818 15:03:03.382905 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50736\nI0818 15:03:03.383220 17472 solver.cpp:404]     Test net output #1: loss = 2.68306 (* 1 = 2.68306 loss)\nI0818 15:03:04.712461 17472 solver.cpp:228] Iteration 36700, loss = 0.000190629\nI0818 15:03:04.712501 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:03:04.712515 17472 solver.cpp:244]     Train net output #1: loss = 0.000190714 (* 1 = 0.000190714 loss)\nI0818 15:03:04.793021 17472 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0818 15:05:21.957716 17472 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0818 15:06:44.128772 17472 solver.cpp:404]     Test net output #0: accuracy = 0.50868\nI0818 15:06:44.129019 17472 solver.cpp:404]     Test net output #1: loss = 2.65816 (* 1 = 2.65816 loss)\nI0818 15:06:45.457316 17472 solver.cpp:228] Iteration 36800, loss = 0.000205918\nI0818 15:06:45.457356 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:06:45.457371 17472 solver.cpp:244]     Train net output #1: loss = 0.000206003 (* 1 = 0.000206003 loss)\nI0818 15:06:45.539717 17472 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0818 15:09:02.617853 17472 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0818 15:10:24.778044 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51072\nI0818 15:10:24.778342 17472 solver.cpp:404]     Test net output #1: loss = 2.61276 (* 1 = 2.61276 loss)\nI0818 15:10:26.107913 17472 solver.cpp:228] Iteration 36900, loss = 0.000192155\nI0818 15:10:26.107954 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:10:26.107969 17472 solver.cpp:244]     Train net output #1: loss = 0.00019224 (* 1 = 0.00019224 loss)\nI0818 15:10:26.193622 17472 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0818 15:12:43.422174 17472 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0818 15:14:05.589152 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51136\nI0818 15:14:05.589381 17472 solver.cpp:404]     Test net output #1: loss = 2.6213 (* 1 = 2.6213 loss)\nI0818 15:14:06.919092 17472 solver.cpp:228] Iteration 37000, loss = 0.000205508\nI0818 15:14:06.919131 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:14:06.919147 17472 solver.cpp:244]     Train net output #1: loss = 0.000205593 (* 1 = 0.000205593 loss)\nI0818 15:14:07.001960 17472 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0818 15:16:24.144333 17472 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0818 15:17:46.251940 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51196\nI0818 15:17:46.252177 17472 solver.cpp:404]     Test net output #1: loss = 2.61051 (* 1 = 2.61051 loss)\nI0818 15:17:47.582036 17472 solver.cpp:228] Iteration 37100, loss = 0.000201217\nI0818 15:17:47.582072 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:17:47.582088 17472 solver.cpp:244]     Train net output #1: loss = 0.000201302 (* 1 = 0.000201302 loss)\nI0818 15:17:47.660543 17472 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0818 15:20:04.751219 17472 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0818 15:21:26.824472 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51252\nI0818 15:21:26.824697 17472 solver.cpp:404]     Test net output #1: loss = 2.60841 (* 1 = 2.60841 loss)\nI0818 15:21:28.152791 17472 solver.cpp:228] Iteration 37200, loss = 0.000232085\nI0818 15:21:28.152827 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:21:28.152842 17472 solver.cpp:244]     Train net output #1: loss = 0.00023217 (* 1 = 0.00023217 loss)\nI0818 15:21:28.236294 17472 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0818 15:23:45.271168 17472 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0818 15:25:07.411623 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51112\nI0818 15:25:07.411869 17472 solver.cpp:404]     Test net output #1: loss = 2.63246 (* 1 = 2.63246 loss)\nI0818 15:25:08.740070 17472 solver.cpp:228] Iteration 37300, loss = 0.000203616\nI0818 15:25:08.740108 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:25:08.740123 17472 solver.cpp:244]     Train net output #1: loss = 0.000203701 (* 1 = 0.000203701 loss)\nI0818 15:25:08.826658 17472 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0818 15:27:25.862988 17472 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0818 15:28:47.937191 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51012\nI0818 15:28:47.937476 17472 solver.cpp:404]     Test net output #1: loss = 2.63131 (* 1 = 2.63131 loss)\nI0818 15:28:49.265390 17472 solver.cpp:228] Iteration 37400, loss = 0.000208748\nI0818 15:28:49.265426 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:28:49.265442 17472 solver.cpp:244]     Train net output #1: loss = 0.000208833 (* 1 = 0.000208833 loss)\nI0818 15:28:49.351631 17472 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0818 15:31:06.489848 17472 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0818 15:32:28.751451 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51192\nI0818 15:32:28.751755 17472 solver.cpp:404]     Test net output #1: loss = 2.60838 (* 1 = 2.60838 loss)\nI0818 15:32:30.081269 17472 solver.cpp:228] Iteration 37500, loss = 0.000210651\nI0818 15:32:30.081308 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:32:30.081322 17472 solver.cpp:244]     Train net output #1: loss = 0.000210736 (* 1 = 0.000210736 loss)\nI0818 15:32:30.163522 17472 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0818 15:34:47.265918 17472 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0818 15:36:09.540338 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51344\nI0818 15:36:09.540642 17472 solver.cpp:404]     Test net output #1: loss = 2.59155 (* 1 = 2.59155 loss)\nI0818 15:36:10.869515 17472 solver.cpp:228] Iteration 37600, loss = 0.000167321\nI0818 15:36:10.869555 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:36:10.869571 17472 solver.cpp:244]     Train net output #1: loss = 0.000167406 (* 1 = 0.000167406 loss)\nI0818 15:36:10.949779 17472 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0818 15:38:28.167848 17472 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0818 15:39:50.398635 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51068\nI0818 15:39:50.398903 17472 solver.cpp:404]     Test net output #1: loss = 2.62207 (* 1 = 2.62207 loss)\nI0818 15:39:51.726671 17472 solver.cpp:228] Iteration 37700, loss = 0.000200151\nI0818 15:39:51.726711 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:39:51.726727 17472 solver.cpp:244]     Train net output #1: loss = 0.000200236 (* 1 = 0.000200236 loss)\nI0818 15:39:51.812669 17472 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0818 15:42:08.839121 17472 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0818 15:43:30.898391 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51288\nI0818 15:43:30.898668 17472 solver.cpp:404]     Test net output #1: loss = 2.59954 (* 1 = 2.59954 loss)\nI0818 15:43:32.226820 17472 solver.cpp:228] Iteration 37800, loss = 0.000197973\nI0818 15:43:32.226860 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:43:32.226874 17472 solver.cpp:244]     Train net output #1: loss = 0.000198058 (* 1 = 0.000198058 loss)\nI0818 15:43:32.308091 17472 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0818 15:45:49.357018 17472 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0818 15:47:11.461416 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51228\nI0818 15:47:11.461670 17472 solver.cpp:404]     Test net output #1: loss = 2.60481 (* 1 = 2.60481 loss)\nI0818 15:47:12.790052 17472 solver.cpp:228] Iteration 37900, loss = 0.000198711\nI0818 15:47:12.790092 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:47:12.790107 17472 solver.cpp:244]     Train net output #1: loss = 0.000198796 (* 1 = 0.000198796 loss)\nI0818 15:47:12.872097 17472 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0818 15:49:30.052628 17472 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0818 15:50:52.076614 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51336\nI0818 15:50:52.076859 17472 solver.cpp:404]     Test net output #1: loss = 2.56796 (* 1 = 2.56796 loss)\nI0818 15:50:53.405531 17472 solver.cpp:228] Iteration 38000, loss = 0.000181779\nI0818 15:50:53.405572 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:50:53.405588 17472 solver.cpp:244]     Train net output #1: loss = 0.000181864 (* 1 = 0.000181864 loss)\nI0818 15:50:53.484621 17472 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0818 15:53:10.603377 17472 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0818 15:54:32.703138 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51644\nI0818 15:54:32.703400 17472 solver.cpp:404]     Test net output #1: loss = 2.55518 (* 1 = 2.55518 loss)\nI0818 15:54:34.031749 17472 solver.cpp:228] Iteration 38100, loss = 0.000194547\nI0818 15:54:34.031788 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:54:34.031805 17472 solver.cpp:244]     Train net output #1: loss = 0.000194632 (* 1 = 0.000194632 loss)\nI0818 15:54:34.118279 17472 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0818 15:56:51.244148 17472 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0818 15:58:13.450445 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51388\nI0818 15:58:13.450682 17472 solver.cpp:404]     Test net output #1: loss = 2.57442 (* 1 = 2.57442 loss)\nI0818 15:58:14.779263 17472 solver.cpp:228] Iteration 38200, loss = 0.000262786\nI0818 15:58:14.779302 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:58:14.779319 17472 solver.cpp:244]     Train net output #1: loss = 0.000262871 (* 1 = 0.000262871 loss)\nI0818 15:58:14.864722 17472 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0818 16:00:31.922308 17472 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0818 16:01:54.158788 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51304\nI0818 16:01:54.159075 17472 solver.cpp:404]     Test net output #1: loss = 2.57591 (* 1 = 2.57591 loss)\nI0818 16:01:55.486419 17472 solver.cpp:228] Iteration 38300, loss = 0.000193482\nI0818 16:01:55.486457 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:01:55.486474 17472 solver.cpp:244]     Train net output #1: loss = 0.000193567 (* 1 = 0.000193567 loss)\nI0818 16:01:55.566815 17472 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0818 16:04:12.575364 17472 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0818 16:05:34.600894 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51612\nI0818 16:05:34.601158 17472 solver.cpp:404]     Test net output #1: loss = 2.54677 (* 1 = 2.54677 loss)\nI0818 16:05:35.929303 17472 solver.cpp:228] Iteration 38400, loss = 0.0002028\nI0818 16:05:35.929343 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:05:35.929358 17472 solver.cpp:244]     Train net output #1: loss = 0.000202885 (* 1 = 0.000202885 loss)\nI0818 16:05:36.012569 17472 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0818 16:07:53.152503 17472 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0818 16:09:15.149729 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51588\nI0818 16:09:15.149976 17472 solver.cpp:404]     Test net output #1: loss = 2.55216 (* 1 = 2.55216 loss)\nI0818 16:09:16.478929 17472 solver.cpp:228] Iteration 38500, loss = 0.000189296\nI0818 16:09:16.478968 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:09:16.478984 17472 solver.cpp:244]     Train net output #1: loss = 0.000189381 (* 1 = 0.000189381 loss)\nI0818 16:09:16.558392 17472 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0818 16:11:33.797430 17472 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0818 16:12:55.920068 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51816\nI0818 16:12:55.920343 17472 solver.cpp:404]     Test net output #1: loss = 2.5233 (* 1 = 2.5233 loss)\nI0818 16:12:57.249579 17472 solver.cpp:228] Iteration 38600, loss = 0.000207473\nI0818 16:12:57.249620 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:12:57.249636 17472 solver.cpp:244]     Train net output #1: loss = 0.000207558 (* 1 = 0.000207558 loss)\nI0818 16:12:57.326948 17472 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0818 16:15:14.552693 17472 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0818 16:16:36.665005 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51372\nI0818 16:16:36.665274 17472 solver.cpp:404]     Test net output #1: loss = 2.57433 (* 1 = 2.57433 loss)\nI0818 16:16:37.994706 17472 solver.cpp:228] Iteration 38700, loss = 0.000178673\nI0818 16:16:37.994746 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:16:37.994762 17472 solver.cpp:244]     Train net output #1: loss = 0.000178758 (* 1 = 0.000178758 loss)\nI0818 16:16:38.071718 17472 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0818 16:18:55.293102 17472 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0818 16:20:17.464408 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51112\nI0818 16:20:17.464648 17472 solver.cpp:404]     Test net output #1: loss = 2.58563 (* 1 = 2.58563 loss)\nI0818 16:20:18.793540 17472 solver.cpp:228] Iteration 38800, loss = 0.000186744\nI0818 16:20:18.793582 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:20:18.793597 17472 solver.cpp:244]     Train net output #1: loss = 0.000186829 (* 1 = 0.000186829 loss)\nI0818 16:20:18.876278 17472 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0818 16:22:35.992875 17472 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0818 16:23:58.226037 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51464\nI0818 16:23:58.226277 17472 solver.cpp:404]     Test net output #1: loss = 2.54944 (* 1 = 2.54944 loss)\nI0818 16:23:59.554282 17472 solver.cpp:228] Iteration 38900, loss = 0.000233474\nI0818 16:23:59.554323 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:23:59.554339 17472 solver.cpp:244]     Train net output #1: loss = 0.000233559 (* 1 = 0.000233559 loss)\nI0818 16:23:59.638378 17472 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0818 16:26:16.765970 17472 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0818 16:27:39.031818 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51628\nI0818 16:27:39.032088 17472 solver.cpp:404]     Test net output #1: loss = 2.53663 (* 1 = 2.53663 loss)\nI0818 16:27:40.360028 17472 solver.cpp:228] Iteration 39000, loss = 0.000201707\nI0818 16:27:40.360067 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:27:40.360082 17472 solver.cpp:244]     Train net output #1: loss = 0.000201792 (* 1 = 0.000201792 loss)\nI0818 16:27:40.440959 17472 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0818 16:29:57.484232 17472 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0818 16:31:19.775192 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51612\nI0818 16:31:19.775465 17472 solver.cpp:404]     Test net output #1: loss = 2.55396 (* 1 = 2.55396 loss)\nI0818 16:31:21.103847 17472 solver.cpp:228] Iteration 39100, loss = 0.000214474\nI0818 16:31:21.103884 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:31:21.103899 17472 solver.cpp:244]     Train net output #1: loss = 0.000214559 (* 1 = 0.000214559 loss)\nI0818 16:31:21.189996 17472 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0818 16:33:38.338145 17472 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0818 16:35:00.445122 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51764\nI0818 16:35:00.445382 17472 solver.cpp:404]     Test net output #1: loss = 2.53054 (* 1 = 2.53054 loss)\nI0818 16:35:01.773303 17472 solver.cpp:228] Iteration 39200, loss = 0.000174509\nI0818 16:35:01.773340 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:35:01.773355 17472 solver.cpp:244]     Train net output #1: loss = 0.000174594 (* 1 = 0.000174594 loss)\nI0818 16:35:01.852982 17472 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0818 16:37:18.988410 17472 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0818 16:38:41.277951 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51532\nI0818 16:38:41.278197 17472 solver.cpp:404]     Test net output #1: loss = 2.55206 (* 1 = 2.55206 loss)\nI0818 16:38:42.606540 17472 solver.cpp:228] Iteration 39300, loss = 0.000207354\nI0818 16:38:42.606577 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:38:42.606592 17472 solver.cpp:244]     Train net output #1: loss = 0.000207439 (* 1 = 0.000207439 loss)\nI0818 16:38:42.692394 17472 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0818 16:40:59.765072 17472 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0818 16:42:21.771361 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51648\nI0818 16:42:21.771605 17472 solver.cpp:404]     Test net output #1: loss = 2.52111 (* 1 = 2.52111 loss)\nI0818 16:42:23.099362 17472 solver.cpp:228] Iteration 39400, loss = 0.000209676\nI0818 16:42:23.099401 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:42:23.099416 17472 solver.cpp:244]     Train net output #1: loss = 0.000209761 (* 1 = 0.000209761 loss)\nI0818 16:42:23.179071 17472 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0818 16:44:40.306375 17472 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0818 16:46:02.402285 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51756\nI0818 16:46:02.402513 17472 solver.cpp:404]     Test net output #1: loss = 2.54127 (* 1 = 2.54127 loss)\nI0818 16:46:03.730473 17472 solver.cpp:228] Iteration 39500, loss = 0.000197592\nI0818 16:46:03.730511 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:46:03.730527 17472 solver.cpp:244]     Train net output #1: loss = 0.000197677 (* 1 = 0.000197677 loss)\nI0818 16:46:03.812417 17472 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0818 16:48:20.829006 17472 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0818 16:49:43.027541 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51252\nI0818 16:49:43.027814 17472 solver.cpp:404]     Test net output #1: loss = 2.56675 (* 1 = 2.56675 loss)\nI0818 16:49:44.356034 17472 solver.cpp:228] Iteration 39600, loss = 0.000205681\nI0818 16:49:44.356071 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:49:44.356086 17472 solver.cpp:244]     Train net output #1: loss = 0.000205766 (* 1 = 0.000205766 loss)\nI0818 16:49:44.440743 17472 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0818 16:52:01.473354 17472 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0818 16:53:23.661442 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51568\nI0818 16:53:23.661701 17472 solver.cpp:404]     Test net output #1: loss = 2.52439 (* 1 = 2.52439 loss)\nI0818 16:53:24.989564 17472 solver.cpp:228] Iteration 39700, loss = 0.00020031\nI0818 16:53:24.989603 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:53:24.989617 17472 solver.cpp:244]     Train net output #1: loss = 0.000200395 (* 1 = 0.000200395 loss)\nI0818 16:53:25.071957 17472 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0818 16:55:42.046968 17472 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0818 16:57:04.015103 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51372\nI0818 16:57:04.015322 17472 solver.cpp:404]     Test net output #1: loss = 2.54529 (* 1 = 2.54529 loss)\nI0818 16:57:05.343513 17472 solver.cpp:228] Iteration 39800, loss = 0.000239881\nI0818 16:57:05.343551 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:57:05.343567 17472 solver.cpp:244]     Train net output #1: loss = 0.000239966 (* 1 = 0.000239966 loss)\nI0818 16:57:05.431026 17472 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0818 16:59:22.460711 17472 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0818 17:00:44.658780 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51908\nI0818 17:00:44.659054 17472 solver.cpp:404]     Test net output #1: loss = 2.51621 (* 1 = 2.51621 loss)\nI0818 17:00:45.986901 17472 solver.cpp:228] Iteration 39900, loss = 0.000206162\nI0818 17:00:45.986938 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:00:45.986953 17472 solver.cpp:244]     Train net output #1: loss = 0.000206247 (* 1 = 0.000206247 loss)\nI0818 17:00:46.072222 17472 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0818 17:03:03.093711 17472 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0818 17:04:25.282624 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51708\nI0818 17:04:25.282958 17472 solver.cpp:404]     Test net output #1: loss = 2.48856 (* 1 = 2.48856 loss)\nI0818 17:04:26.611040 17472 solver.cpp:228] Iteration 40000, loss = 0.000205347\nI0818 17:04:26.611079 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:04:26.611094 17472 solver.cpp:244]     Train net output #1: loss = 0.000205432 (* 1 = 0.000205432 loss)\nI0818 17:04:26.694806 17472 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0818 17:06:43.708438 17472 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0818 17:08:05.714404 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51272\nI0818 17:08:05.714627 17472 solver.cpp:404]     Test net output #1: loss = 2.55922 (* 1 = 2.55922 loss)\nI0818 17:08:07.043169 17472 solver.cpp:228] Iteration 40100, loss = 0.000214464\nI0818 17:08:07.043208 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:08:07.043223 17472 solver.cpp:244]     Train net output #1: loss = 0.000214549 (* 1 = 0.000214549 loss)\nI0818 17:08:07.126549 17472 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0818 17:10:24.128594 17472 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0818 17:11:46.131254 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5192\nI0818 17:11:46.131510 17472 solver.cpp:404]     Test net output #1: loss = 2.49217 (* 1 = 2.49217 loss)\nI0818 17:11:47.460237 17472 solver.cpp:228] Iteration 40200, loss = 0.000186743\nI0818 17:11:47.460279 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:11:47.460295 17472 solver.cpp:244]     Train net output #1: loss = 0.000186828 (* 1 = 0.000186828 loss)\nI0818 17:11:47.543612 17472 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0818 17:14:04.758781 17472 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0818 17:15:26.783280 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51472\nI0818 17:15:26.783507 17472 solver.cpp:404]     Test net output #1: loss = 2.53421 (* 1 = 2.53421 loss)\nI0818 17:15:28.112406 17472 solver.cpp:228] Iteration 40300, loss = 0.000204235\nI0818 17:15:28.112445 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:15:28.112462 17472 solver.cpp:244]     Train net output #1: loss = 0.00020432 (* 1 = 0.00020432 loss)\nI0818 17:15:28.193238 17472 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0818 17:17:45.349839 17472 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0818 17:19:07.410079 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51816\nI0818 17:19:07.410362 17472 solver.cpp:404]     Test net output #1: loss = 2.49055 (* 1 = 2.49055 loss)\nI0818 17:19:08.739574 17472 solver.cpp:228] Iteration 40400, loss = 0.000203477\nI0818 17:19:08.739614 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:19:08.739630 17472 solver.cpp:244]     Train net output #1: loss = 0.000203562 (* 1 = 0.000203562 loss)\nI0818 17:19:08.825569 17472 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0818 17:21:25.810158 17472 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0818 17:22:47.953809 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52076\nI0818 17:22:47.954103 17472 solver.cpp:404]     Test net output #1: loss = 2.48937 (* 1 = 2.48937 loss)\nI0818 17:22:49.282193 17472 solver.cpp:228] Iteration 40500, loss = 0.000223141\nI0818 17:22:49.282236 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:22:49.282253 17472 solver.cpp:244]     Train net output #1: loss = 0.000223226 (* 1 = 0.000223226 loss)\nI0818 17:22:49.364990 17472 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0818 17:25:06.582672 17472 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0818 17:26:28.608804 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51096\nI0818 17:26:28.609045 17472 solver.cpp:404]     Test net output #1: loss = 2.55989 (* 1 = 2.55989 loss)\nI0818 17:26:29.936828 17472 solver.cpp:228] Iteration 40600, loss = 0.000192671\nI0818 17:26:29.936868 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:26:29.936884 17472 solver.cpp:244]     Train net output #1: loss = 0.000192756 (* 1 = 0.000192756 loss)\nI0818 17:26:30.019973 17472 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0818 17:28:47.377046 17472 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0818 17:30:09.419348 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5094\nI0818 17:30:09.419642 17472 solver.cpp:404]     Test net output #1: loss = 2.59518 (* 1 = 2.59518 loss)\nI0818 17:30:10.747740 17472 solver.cpp:228] Iteration 40700, loss = 0.000207499\nI0818 17:30:10.747781 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:30:10.747797 17472 solver.cpp:244]     Train net output #1: loss = 0.000207584 (* 1 = 0.000207584 loss)\nI0818 17:30:10.833887 17472 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0818 17:32:28.094305 17472 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0818 17:33:50.209713 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51556\nI0818 17:33:50.209988 17472 solver.cpp:404]     Test net output #1: loss = 2.53528 (* 1 = 2.53528 loss)\nI0818 17:33:51.538372 17472 solver.cpp:228] Iteration 40800, loss = 0.000195679\nI0818 17:33:51.538414 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:33:51.538430 17472 solver.cpp:244]     Train net output #1: loss = 0.000195764 (* 1 = 0.000195764 loss)\nI0818 17:33:51.624058 17472 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0818 17:36:08.729887 17472 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0818 17:37:30.835252 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51352\nI0818 17:37:30.835513 17472 solver.cpp:404]     Test net output #1: loss = 2.54443 (* 1 = 2.54443 loss)\nI0818 17:37:32.163458 17472 solver.cpp:228] Iteration 40900, loss = 0.000187571\nI0818 17:37:32.163501 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:37:32.163517 17472 solver.cpp:244]     Train net output #1: loss = 0.000187656 (* 1 = 0.000187656 loss)\nI0818 17:37:32.246508 17472 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0818 17:39:49.424052 17472 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0818 17:41:11.472280 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5144\nI0818 17:41:11.472520 17472 solver.cpp:404]     Test net output #1: loss = 2.53648 (* 1 = 2.53648 loss)\nI0818 17:41:12.801482 17472 solver.cpp:228] Iteration 41000, loss = 0.0001792\nI0818 17:41:12.801522 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:41:12.801538 17472 solver.cpp:244]     Train net output #1: loss = 0.000179285 (* 1 = 0.000179285 loss)\nI0818 17:41:12.881618 17472 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0818 17:43:29.994550 17472 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0818 17:44:52.078730 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5142\nI0818 17:44:52.078965 17472 solver.cpp:404]     Test net output #1: loss = 2.55096 (* 1 = 2.55096 loss)\nI0818 17:44:53.406666 17472 solver.cpp:228] Iteration 41100, loss = 0.000210837\nI0818 17:44:53.406707 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:44:53.406721 17472 solver.cpp:244]     Train net output #1: loss = 0.000210922 (* 1 = 0.000210922 loss)\nI0818 17:44:53.487639 17472 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0818 17:47:10.586973 17472 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0818 17:48:32.733497 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51272\nI0818 17:48:32.733736 17472 solver.cpp:404]     Test net output #1: loss = 2.53941 (* 1 = 2.53941 loss)\nI0818 17:48:34.062346 17472 solver.cpp:228] Iteration 41200, loss = 0.000231631\nI0818 17:48:34.062388 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:48:34.062404 17472 solver.cpp:244]     Train net output #1: loss = 0.000231716 (* 1 = 0.000231716 loss)\nI0818 17:48:34.145108 17472 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0818 17:50:51.305625 17472 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0818 17:52:13.584601 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51836\nI0818 17:52:13.584915 17472 solver.cpp:404]     Test net output #1: loss = 2.50127 (* 1 = 2.50127 loss)\nI0818 17:52:14.913779 17472 solver.cpp:228] Iteration 41300, loss = 0.000213204\nI0818 17:52:14.913820 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:52:14.913836 17472 solver.cpp:244]     Train net output #1: loss = 0.000213289 (* 1 = 0.000213289 loss)\nI0818 17:52:14.995542 17472 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0818 17:54:32.208024 17472 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0818 17:55:54.324748 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51292\nI0818 17:55:54.324997 17472 solver.cpp:404]     Test net output #1: loss = 2.50771 (* 1 = 2.50771 loss)\nI0818 17:55:55.653265 17472 solver.cpp:228] Iteration 41400, loss = 0.000185654\nI0818 17:55:55.653306 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:55:55.653329 17472 solver.cpp:244]     Train net output #1: loss = 0.000185739 (* 1 = 0.000185739 loss)\nI0818 17:55:55.738466 17472 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0818 17:58:13.008271 17472 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0818 17:59:35.216440 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5166\nI0818 17:59:35.216686 17472 solver.cpp:404]     Test net output #1: loss = 2.53089 (* 1 = 2.53089 loss)\nI0818 17:59:36.544888 17472 solver.cpp:228] Iteration 41500, loss = 0.000211172\nI0818 17:59:36.544927 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:59:36.544944 17472 solver.cpp:244]     Train net output #1: loss = 0.000211257 (* 1 = 0.000211257 loss)\nI0818 17:59:36.629495 17472 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0818 18:01:53.702081 17472 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0818 18:03:16.011611 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5176\nI0818 18:03:16.011894 17472 solver.cpp:404]     Test net output #1: loss = 2.48678 (* 1 = 2.48678 loss)\nI0818 18:03:17.340149 17472 solver.cpp:228] Iteration 41600, loss = 0.000219715\nI0818 18:03:17.340191 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:03:17.340207 17472 solver.cpp:244]     Train net output #1: loss = 0.0002198 (* 1 = 0.0002198 loss)\nI0818 18:03:17.422194 17472 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0818 18:05:34.646735 17472 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0818 18:06:56.980554 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51732\nI0818 18:06:56.980799 17472 solver.cpp:404]     Test net output #1: loss = 2.50382 (* 1 = 2.50382 loss)\nI0818 18:06:58.309864 17472 solver.cpp:228] Iteration 41700, loss = 0.000211368\nI0818 18:06:58.309903 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:06:58.309919 17472 solver.cpp:244]     Train net output #1: loss = 0.000211453 (* 1 = 0.000211453 loss)\nI0818 18:06:58.396128 17472 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0818 18:09:15.585852 17472 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0818 18:10:37.920940 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51564\nI0818 18:10:37.921221 17472 solver.cpp:404]     Test net output #1: loss = 2.4863 (* 1 = 2.4863 loss)\nI0818 18:10:39.250588 17472 solver.cpp:228] Iteration 41800, loss = 0.000171436\nI0818 18:10:39.250630 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:10:39.250646 17472 solver.cpp:244]     Train net output #1: loss = 0.000171521 (* 1 = 0.000171521 loss)\nI0818 18:10:39.334096 17472 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0818 18:12:56.505136 17472 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0818 18:14:18.827613 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51484\nI0818 18:14:18.827863 17472 solver.cpp:404]     Test net output #1: loss = 2.50623 (* 1 = 2.50623 loss)\nI0818 18:14:20.156148 17472 solver.cpp:228] Iteration 41900, loss = 0.000160663\nI0818 18:14:20.156189 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:14:20.156205 17472 solver.cpp:244]     Train net output #1: loss = 0.000160748 (* 1 = 0.000160748 loss)\nI0818 18:14:20.241294 17472 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0818 18:16:37.311575 17472 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0818 18:17:59.622798 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51376\nI0818 18:17:59.623054 17472 solver.cpp:404]     Test net output #1: loss = 2.49552 (* 1 = 2.49552 loss)\nI0818 18:18:00.950748 17472 solver.cpp:228] Iteration 42000, loss = 0.000207771\nI0818 18:18:00.950784 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:18:00.950800 17472 solver.cpp:244]     Train net output #1: loss = 0.000207856 (* 1 = 0.000207856 loss)\nI0818 18:18:01.037154 17472 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0818 18:20:18.249151 17472 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0818 18:21:40.562294 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51604\nI0818 18:21:40.562547 17472 solver.cpp:404]     Test net output #1: loss = 2.49617 (* 1 = 2.49617 loss)\nI0818 18:21:41.890389 17472 solver.cpp:228] Iteration 42100, loss = 0.00021351\nI0818 18:21:41.890427 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:21:41.890442 17472 solver.cpp:244]     Train net output #1: loss = 0.000213595 (* 1 = 0.000213595 loss)\nI0818 18:21:41.978706 17472 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0818 18:23:59.211056 17472 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0818 18:25:21.519397 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51712\nI0818 18:25:21.519644 17472 solver.cpp:404]     Test net output #1: loss = 2.50254 (* 1 = 2.50254 loss)\nI0818 18:25:22.847995 17472 solver.cpp:228] Iteration 42200, loss = 0.000201291\nI0818 18:25:22.848038 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:25:22.848054 17472 solver.cpp:244]     Train net output #1: loss = 0.000201376 (* 1 = 0.000201376 loss)\nI0818 18:25:22.932251 17472 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0818 18:27:40.182894 17472 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0818 18:29:02.487457 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51556\nI0818 18:29:02.487717 17472 solver.cpp:404]     Test net output #1: loss = 2.48962 (* 1 = 2.48962 loss)\nI0818 18:29:03.815901 17472 solver.cpp:228] Iteration 42300, loss = 0.000202063\nI0818 18:29:03.815939 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:29:03.815954 17472 solver.cpp:244]     Train net output #1: loss = 0.000202148 (* 1 = 0.000202148 loss)\nI0818 18:29:03.897102 17472 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0818 18:31:21.065001 17472 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0818 18:32:43.379958 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51624\nI0818 18:32:43.380254 17472 solver.cpp:404]     Test net output #1: loss = 2.47145 (* 1 = 2.47145 loss)\nI0818 18:32:44.709149 17472 solver.cpp:228] Iteration 42400, loss = 0.000191201\nI0818 18:32:44.709190 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:32:44.709206 17472 solver.cpp:244]     Train net output #1: loss = 0.000191286 (* 1 = 0.000191286 loss)\nI0818 18:32:44.791949 17472 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0818 18:35:01.805449 17472 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0818 18:36:24.067126 17472 solver.cpp:404]     Test net output #0: accuracy = 0.516\nI0818 18:36:24.067380 17472 solver.cpp:404]     Test net output #1: loss = 2.47367 (* 1 = 2.47367 loss)\nI0818 18:36:25.396361 17472 solver.cpp:228] Iteration 42500, loss = 0.000205086\nI0818 18:36:25.396402 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:36:25.396419 17472 solver.cpp:244]     Train net output #1: loss = 0.000205171 (* 1 = 0.000205171 loss)\nI0818 18:36:25.485774 17472 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0818 18:38:42.626197 17472 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0818 18:40:04.903527 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52148\nI0818 18:40:04.903801 17472 solver.cpp:404]     Test net output #1: loss = 2.44767 (* 1 = 2.44767 loss)\nI0818 18:40:06.233233 17472 solver.cpp:228] Iteration 42600, loss = 0.00017539\nI0818 18:40:06.233274 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:40:06.233290 17472 solver.cpp:244]     Train net output #1: loss = 0.000175475 (* 1 = 0.000175475 loss)\nI0818 18:40:06.313910 17472 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0818 18:42:23.525506 17472 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0818 18:43:45.849112 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51656\nI0818 18:43:45.849447 17472 solver.cpp:404]     Test net output #1: loss = 2.48899 (* 1 = 2.48899 loss)\nI0818 18:43:47.178861 17472 solver.cpp:228] Iteration 42700, loss = 0.000203862\nI0818 18:43:47.178901 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:43:47.178917 17472 solver.cpp:244]     Train net output #1: loss = 0.000203947 (* 1 = 0.000203947 loss)\nI0818 18:43:47.258668 17472 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0818 18:46:04.474753 17472 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0818 18:47:26.796322 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52004\nI0818 18:47:26.796648 17472 solver.cpp:404]     Test net output #1: loss = 2.44947 (* 1 = 2.44947 loss)\nI0818 18:47:28.125855 17472 solver.cpp:228] Iteration 42800, loss = 0.000203093\nI0818 18:47:28.125895 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:47:28.125911 17472 solver.cpp:244]     Train net output #1: loss = 0.000203178 (* 1 = 0.000203178 loss)\nI0818 18:47:28.210872 17472 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0818 18:49:45.441332 17472 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0818 18:51:07.758801 17472 solver.cpp:404]     Test net output #0: accuracy = 0.518\nI0818 18:51:07.759156 17472 solver.cpp:404]     Test net output #1: loss = 2.48216 (* 1 = 2.48216 loss)\nI0818 18:51:09.089012 17472 solver.cpp:228] Iteration 42900, loss = 0.000188161\nI0818 18:51:09.089052 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:51:09.089067 17472 solver.cpp:244]     Train net output #1: loss = 0.000188246 (* 1 = 0.000188246 loss)\nI0818 18:51:09.170977 17472 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0818 18:53:26.400674 17472 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0818 18:54:48.658516 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52252\nI0818 18:54:48.658783 17472 solver.cpp:404]     Test net output #1: loss = 2.4354 (* 1 = 2.4354 loss)\nI0818 18:54:49.988111 17472 solver.cpp:228] Iteration 43000, loss = 0.000193728\nI0818 18:54:49.988150 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:54:49.988165 17472 solver.cpp:244]     Train net output #1: loss = 0.000193813 (* 1 = 0.000193813 loss)\nI0818 18:54:50.072509 17472 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0818 18:57:07.323876 17472 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0818 18:58:29.581781 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51824\nI0818 18:58:29.582037 17472 solver.cpp:404]     Test net output #1: loss = 2.46221 (* 1 = 2.46221 loss)\nI0818 18:58:30.911795 17472 solver.cpp:228] Iteration 43100, loss = 0.000183263\nI0818 18:58:30.911837 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:58:30.911854 17472 solver.cpp:244]     Train net output #1: loss = 0.000183348 (* 1 = 0.000183348 loss)\nI0818 18:58:30.994777 17472 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0818 19:00:48.198150 17472 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0818 19:02:10.463707 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52812\nI0818 19:02:10.464020 17472 solver.cpp:404]     Test net output #1: loss = 2.38012 (* 1 = 2.38012 loss)\nI0818 19:02:11.793172 17472 solver.cpp:228] Iteration 43200, loss = 0.000189129\nI0818 19:02:11.793215 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:02:11.793231 17472 solver.cpp:244]     Train net output #1: loss = 0.000189214 (* 1 = 0.000189214 loss)\nI0818 19:02:11.880030 17472 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0818 19:04:28.992810 17472 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0818 19:05:51.321281 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5216\nI0818 19:05:51.321524 17472 solver.cpp:404]     Test net output #1: loss = 2.43668 (* 1 = 2.43668 loss)\nI0818 19:05:52.650173 17472 solver.cpp:228] Iteration 43300, loss = 0.000224362\nI0818 19:05:52.650213 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:05:52.650230 17472 solver.cpp:244]     Train net output #1: loss = 0.000224447 (* 1 = 0.000224447 loss)\nI0818 19:05:52.730998 17472 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0818 19:08:09.900161 17472 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0818 19:09:32.172170 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52316\nI0818 19:09:32.172413 17472 solver.cpp:404]     Test net output #1: loss = 2.41298 (* 1 = 2.41298 loss)\nI0818 19:09:33.502144 17472 solver.cpp:228] Iteration 43400, loss = 0.000198045\nI0818 19:09:33.502187 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:09:33.502203 17472 solver.cpp:244]     Train net output #1: loss = 0.00019813 (* 1 = 0.00019813 loss)\nI0818 19:09:33.584938 17472 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0818 19:11:50.715829 17472 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0818 19:13:13.044744 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5248\nI0818 19:13:13.045014 17472 solver.cpp:404]     Test net output #1: loss = 2.40371 (* 1 = 2.40371 loss)\nI0818 19:13:14.372642 17472 solver.cpp:228] Iteration 43500, loss = 0.000216925\nI0818 19:13:14.372683 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:13:14.372697 17472 solver.cpp:244]     Train net output #1: loss = 0.00021701 (* 1 = 0.00021701 loss)\nI0818 19:13:14.460470 17472 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0818 19:15:31.784621 17472 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0818 19:16:54.047008 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52696\nI0818 19:16:54.047257 17472 solver.cpp:404]     Test net output #1: loss = 2.38805 (* 1 = 2.38805 loss)\nI0818 19:16:55.376621 17472 solver.cpp:228] Iteration 43600, loss = 0.000191921\nI0818 19:16:55.376662 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:16:55.376677 17472 solver.cpp:244]     Train net output #1: loss = 0.000192006 (* 1 = 0.000192006 loss)\nI0818 19:16:55.457911 17472 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0818 19:19:12.568382 17472 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0818 19:20:34.830253 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52096\nI0818 19:20:34.830507 17472 solver.cpp:404]     Test net output #1: loss = 2.4462 (* 1 = 2.4462 loss)\nI0818 19:20:36.160075 17472 solver.cpp:228] Iteration 43700, loss = 0.00019676\nI0818 19:20:36.160112 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:20:36.160128 17472 solver.cpp:244]     Train net output #1: loss = 0.000196845 (* 1 = 0.000196845 loss)\nI0818 19:20:36.238494 17472 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0818 19:22:53.447561 17472 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0818 19:24:15.764394 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51868\nI0818 19:24:15.764669 17472 solver.cpp:404]     Test net output #1: loss = 2.44559 (* 1 = 2.44559 loss)\nI0818 19:24:17.094017 17472 solver.cpp:228] Iteration 43800, loss = 0.000198808\nI0818 19:24:17.094055 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:24:17.094070 17472 solver.cpp:244]     Train net output #1: loss = 0.000198893 (* 1 = 0.000198893 loss)\nI0818 19:24:17.171679 17472 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0818 19:26:34.332056 17472 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0818 19:27:56.598978 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5226\nI0818 19:27:56.599270 17472 solver.cpp:404]     Test net output #1: loss = 2.41489 (* 1 = 2.41489 loss)\nI0818 19:27:57.927793 17472 solver.cpp:228] Iteration 43900, loss = 0.000216864\nI0818 19:27:57.927831 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:27:57.927847 17472 solver.cpp:244]     Train net output #1: loss = 0.000216949 (* 1 = 0.000216949 loss)\nI0818 19:27:58.011085 17472 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0818 19:30:15.205945 17472 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0818 19:31:37.501394 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52224\nI0818 19:31:37.501644 17472 solver.cpp:404]     Test net output #1: loss = 2.40873 (* 1 = 2.40873 loss)\nI0818 19:31:38.830889 17472 solver.cpp:228] Iteration 44000, loss = 0.000182476\nI0818 19:31:38.830929 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:31:38.830945 17472 solver.cpp:244]     Train net output #1: loss = 0.000182561 (* 1 = 0.000182561 loss)\nI0818 19:31:38.919356 17472 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0818 19:33:56.369786 17472 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0818 19:35:17.895392 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52752\nI0818 19:35:17.895685 17472 solver.cpp:404]     Test net output #1: loss = 2.40214 (* 1 = 2.40214 loss)\nI0818 19:35:19.220039 17472 solver.cpp:228] Iteration 44100, loss = 0.000198951\nI0818 19:35:19.220088 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:35:19.220104 17472 solver.cpp:244]     Train net output #1: loss = 0.000199036 (* 1 = 0.000199036 loss)\nI0818 19:35:19.313910 17472 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0818 19:37:36.750226 17472 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0818 19:38:58.259697 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52148\nI0818 19:38:58.260028 17472 solver.cpp:404]     Test net output #1: loss = 2.42109 (* 1 = 2.42109 loss)\nI0818 19:38:59.584995 17472 solver.cpp:228] Iteration 44200, loss = 0.000195933\nI0818 19:38:59.585045 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:38:59.585062 17472 solver.cpp:244]     Train net output #1: loss = 0.000196018 (* 1 = 0.000196018 loss)\nI0818 19:38:59.673682 17472 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0818 19:41:17.078999 17472 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0818 19:42:38.568707 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52136\nI0818 19:42:38.569028 17472 solver.cpp:404]     Test net output #1: loss = 2.43303 (* 1 = 2.43303 loss)\nI0818 19:42:39.889384 17472 solver.cpp:228] Iteration 44300, loss = 0.000231619\nI0818 19:42:39.889434 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:42:39.889451 17472 solver.cpp:244]     Train net output #1: loss = 0.000231704 (* 1 = 0.000231704 loss)\nI0818 19:42:39.980823 17472 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0818 19:44:57.010210 17472 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0818 19:46:18.489573 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51888\nI0818 19:46:18.489892 17472 solver.cpp:404]     Test net output #1: loss = 2.43641 (* 1 = 2.43641 loss)\nI0818 19:46:19.814617 17472 solver.cpp:228] Iteration 44400, loss = 0.000226006\nI0818 19:46:19.814666 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:46:19.814682 17472 solver.cpp:244]     Train net output #1: loss = 0.000226091 (* 1 = 0.000226091 loss)\nI0818 19:46:19.909273 17472 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0818 19:48:37.423382 17472 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0818 19:49:58.909951 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5178\nI0818 19:49:58.910270 17472 solver.cpp:404]     Test net output #1: loss = 2.44745 (* 1 = 2.44745 loss)\nI0818 19:50:00.233667 17472 solver.cpp:228] Iteration 44500, loss = 0.000202641\nI0818 19:50:00.233716 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:50:00.233732 17472 solver.cpp:244]     Train net output #1: loss = 0.000202726 (* 1 = 0.000202726 loss)\nI0818 19:50:00.327971 17472 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0818 19:52:17.812757 17472 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0818 19:53:39.295809 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5254\nI0818 19:53:39.296135 17472 solver.cpp:404]     Test net output #1: loss = 2.37541 (* 1 = 2.37541 loss)\nI0818 19:53:40.619614 17472 solver.cpp:228] Iteration 44600, loss = 0.000190492\nI0818 19:53:40.619663 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:53:40.619678 17472 solver.cpp:244]     Train net output #1: loss = 0.000190577 (* 1 = 0.000190577 loss)\nI0818 19:53:40.707417 17472 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0818 19:55:58.216104 17472 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0818 19:57:19.690279 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52056\nI0818 19:57:19.690601 17472 solver.cpp:404]     Test net output #1: loss = 2.42619 (* 1 = 2.42619 loss)\nI0818 19:57:21.014094 17472 solver.cpp:228] Iteration 44700, loss = 0.000185381\nI0818 19:57:21.014142 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:57:21.014158 17472 solver.cpp:244]     Train net output #1: loss = 0.000185466 (* 1 = 0.000185466 loss)\nI0818 19:57:21.101940 17472 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0818 19:59:38.380172 17472 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0818 20:00:59.864648 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52048\nI0818 20:00:59.864967 17472 solver.cpp:404]     Test net output #1: loss = 2.41033 (* 1 = 2.41033 loss)\nI0818 20:01:01.188289 17472 solver.cpp:228] Iteration 44800, loss = 0.000226622\nI0818 20:01:01.188336 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:01:01.188352 17472 solver.cpp:244]     Train net output #1: loss = 0.000226707 (* 1 = 0.000226707 loss)\nI0818 20:01:01.283047 17472 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0818 20:03:18.717147 17472 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0818 20:04:40.189455 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52412\nI0818 20:04:40.189738 17472 solver.cpp:404]     Test net output #1: loss = 2.41092 (* 1 = 2.41092 loss)\nI0818 20:04:41.512548 17472 solver.cpp:228] Iteration 44900, loss = 0.000197761\nI0818 20:04:41.512594 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:04:41.512610 17472 solver.cpp:244]     Train net output #1: loss = 0.000197846 (* 1 = 0.000197846 loss)\nI0818 20:04:41.601927 17472 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0818 20:06:59.106840 17472 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0818 20:08:20.576320 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52472\nI0818 20:08:20.576627 17472 solver.cpp:404]     Test net output #1: loss = 2.39575 (* 1 = 2.39575 loss)\nI0818 20:08:21.899924 17472 solver.cpp:228] Iteration 45000, loss = 0.000184841\nI0818 20:08:21.899973 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:08:21.899988 17472 solver.cpp:244]     Train net output #1: loss = 0.000184926 (* 1 = 0.000184926 loss)\nI0818 20:08:21.994390 17472 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0818 20:10:39.430186 17472 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0818 20:12:00.901376 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52404\nI0818 20:12:00.901690 17472 solver.cpp:404]     Test net output #1: loss = 2.40937 (* 1 = 2.40937 loss)\nI0818 20:12:02.224927 17472 solver.cpp:228] Iteration 45100, loss = 0.000193036\nI0818 20:12:02.224980 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:12:02.224997 17472 solver.cpp:244]     Train net output #1: loss = 0.000193121 (* 1 = 0.000193121 loss)\nI0818 20:12:02.317831 17472 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0818 20:14:19.845876 17472 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0818 20:15:41.328944 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5276\nI0818 20:15:41.329263 17472 solver.cpp:404]     Test net output #1: loss = 2.38785 (* 1 = 2.38785 loss)\nI0818 20:15:42.652348 17472 solver.cpp:228] Iteration 45200, loss = 0.000209877\nI0818 20:15:42.652395 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:15:42.652410 17472 solver.cpp:244]     Train net output #1: loss = 0.000209962 (* 1 = 0.000209962 loss)\nI0818 20:15:42.744460 17472 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0818 20:18:00.295646 17472 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0818 20:19:21.786386 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51724\nI0818 20:19:21.786691 17472 solver.cpp:404]     Test net output #1: loss = 2.44724 (* 1 = 2.44724 loss)\nI0818 20:19:23.110267 17472 solver.cpp:228] Iteration 45300, loss = 0.000196305\nI0818 20:19:23.110316 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:19:23.110332 17472 solver.cpp:244]     Train net output #1: loss = 0.00019639 (* 1 = 0.00019639 loss)\nI0818 20:19:23.202525 17472 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0818 20:21:40.647955 17472 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0818 20:23:02.132371 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5192\nI0818 20:23:02.132675 17472 solver.cpp:404]     Test net output #1: loss = 2.42839 (* 1 = 2.42839 loss)\nI0818 20:23:03.456476 17472 solver.cpp:228] Iteration 45400, loss = 0.000199997\nI0818 20:23:03.456524 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:23:03.456539 17472 solver.cpp:244]     Train net output #1: loss = 0.000200082 (* 1 = 0.000200082 loss)\nI0818 20:23:03.551828 17472 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0818 20:25:21.292204 17472 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0818 20:26:42.783190 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5194\nI0818 20:26:42.783507 17472 solver.cpp:404]     Test net output #1: loss = 2.44357 (* 1 = 2.44357 loss)\nI0818 20:26:44.106561 17472 solver.cpp:228] Iteration 45500, loss = 0.000183202\nI0818 20:26:44.106598 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:26:44.106613 17472 solver.cpp:244]     Train net output #1: loss = 0.000183287 (* 1 = 0.000183287 loss)\nI0818 20:26:44.197337 17472 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0818 20:29:01.540285 17472 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0818 20:30:23.034235 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52548\nI0818 20:30:23.034555 17472 solver.cpp:404]     Test net output #1: loss = 2.38622 (* 1 = 2.38622 loss)\nI0818 20:30:24.359042 17472 solver.cpp:228] Iteration 45600, loss = 0.000210047\nI0818 20:30:24.359081 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:30:24.359097 17472 solver.cpp:244]     Train net output #1: loss = 0.000210132 (* 1 = 0.000210132 loss)\nI0818 20:30:24.449750 17472 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0818 20:32:41.832484 17472 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0818 20:34:03.346978 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52144\nI0818 20:34:03.347283 17472 solver.cpp:404]     Test net output #1: loss = 2.43598 (* 1 = 2.43598 loss)\nI0818 20:34:04.671775 17472 solver.cpp:228] Iteration 45700, loss = 0.000198854\nI0818 20:34:04.671813 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:34:04.671826 17472 solver.cpp:244]     Train net output #1: loss = 0.000198939 (* 1 = 0.000198939 loss)\nI0818 20:34:04.758291 17472 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0818 20:36:22.233938 17472 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0818 20:37:43.730955 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52448\nI0818 20:37:43.731274 17472 solver.cpp:404]     Test net output #1: loss = 2.41374 (* 1 = 2.41374 loss)\nI0818 20:37:45.055168 17472 solver.cpp:228] Iteration 45800, loss = 0.000192892\nI0818 20:37:45.055205 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:37:45.055220 17472 solver.cpp:244]     Train net output #1: loss = 0.000192977 (* 1 = 0.000192977 loss)\nI0818 20:37:45.143468 17472 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0818 20:40:02.650718 17472 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0818 20:41:24.158949 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5216\nI0818 20:41:24.159256 17472 solver.cpp:404]     Test net output #1: loss = 2.44106 (* 1 = 2.44106 loss)\nI0818 20:41:25.482375 17472 solver.cpp:228] Iteration 45900, loss = 0.000206464\nI0818 20:41:25.482410 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:41:25.482424 17472 solver.cpp:244]     Train net output #1: loss = 0.00020655 (* 1 = 0.00020655 loss)\nI0818 20:41:25.571982 17472 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0818 20:43:43.177572 17472 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0818 20:45:04.674351 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52828\nI0818 20:45:04.674661 17472 solver.cpp:404]     Test net output #1: loss = 2.38438 (* 1 = 2.38438 loss)\nI0818 20:45:05.998168 17472 solver.cpp:228] Iteration 46000, loss = 0.000202768\nI0818 20:45:05.998203 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:45:05.998216 17472 solver.cpp:244]     Train net output #1: loss = 0.000202853 (* 1 = 0.000202853 loss)\nI0818 20:45:06.091365 17472 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0818 20:47:23.699087 17472 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0818 20:48:45.191702 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52184\nI0818 20:48:45.192021 17472 solver.cpp:404]     Test net output #1: loss = 2.43034 (* 1 = 2.43034 loss)\nI0818 20:48:46.515509 17472 solver.cpp:228] Iteration 46100, loss = 0.000190205\nI0818 20:48:46.515542 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:48:46.515555 17472 solver.cpp:244]     Train net output #1: loss = 0.00019029 (* 1 = 0.00019029 loss)\nI0818 20:48:46.601711 17472 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0818 20:51:04.102665 17472 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0818 20:52:25.601799 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5244\nI0818 20:52:25.602135 17472 solver.cpp:404]     Test net output #1: loss = 2.40516 (* 1 = 2.40516 loss)\nI0818 20:52:26.926738 17472 solver.cpp:228] Iteration 46200, loss = 0.000194246\nI0818 20:52:26.926771 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:52:26.926786 17472 solver.cpp:244]     Train net output #1: loss = 0.000194331 (* 1 = 0.000194331 loss)\nI0818 20:52:27.013015 17472 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0818 20:54:44.525343 17472 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0818 20:56:06.002347 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52144\nI0818 20:56:06.002667 17472 solver.cpp:404]     Test net output #1: loss = 2.41875 (* 1 = 2.41875 loss)\nI0818 20:56:07.326491 17472 solver.cpp:228] Iteration 46300, loss = 0.000199713\nI0818 20:56:07.326526 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:56:07.326541 17472 solver.cpp:244]     Train net output #1: loss = 0.000199798 (* 1 = 0.000199798 loss)\nI0818 20:56:07.415268 17472 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0818 20:58:24.859166 17472 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0818 20:59:46.341820 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52592\nI0818 20:59:46.342131 17472 solver.cpp:404]     Test net output #1: loss = 2.37854 (* 1 = 2.37854 loss)\nI0818 20:59:47.665129 17472 solver.cpp:228] Iteration 46400, loss = 0.000171807\nI0818 20:59:47.665163 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:59:47.665179 17472 solver.cpp:244]     Train net output #1: loss = 0.000171892 (* 1 = 0.000171892 loss)\nI0818 20:59:47.758776 17472 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0818 21:02:05.322072 17472 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0818 21:03:26.810778 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52072\nI0818 21:03:26.811122 17472 solver.cpp:404]     Test net output #1: loss = 2.41964 (* 1 = 2.41964 loss)\nI0818 21:03:28.134626 17472 solver.cpp:228] Iteration 46500, loss = 0.00022588\nI0818 21:03:28.134662 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:03:28.134677 17472 solver.cpp:244]     Train net output #1: loss = 0.000225965 (* 1 = 0.000225965 loss)\nI0818 21:03:28.224406 17472 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0818 21:05:45.689765 17472 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0818 21:07:07.167743 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52264\nI0818 21:07:07.168045 17472 solver.cpp:404]     Test net output #1: loss = 2.39149 (* 1 = 2.39149 loss)\nI0818 21:07:08.491297 17472 solver.cpp:228] Iteration 46600, loss = 0.000183644\nI0818 21:07:08.491330 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:07:08.491344 17472 solver.cpp:244]     Train net output #1: loss = 0.000183729 (* 1 = 0.000183729 loss)\nI0818 21:07:08.578805 17472 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0818 21:09:26.060580 17472 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0818 21:10:47.538168 17472 solver.cpp:404]     Test net output #0: accuracy = 0.51984\nI0818 21:10:47.538476 17472 solver.cpp:404]     Test net output #1: loss = 2.43746 (* 1 = 2.43746 loss)\nI0818 21:10:48.862041 17472 solver.cpp:228] Iteration 46700, loss = 0.000196282\nI0818 21:10:48.862076 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:10:48.862092 17472 solver.cpp:244]     Train net output #1: loss = 0.000196367 (* 1 = 0.000196367 loss)\nI0818 21:10:48.950546 17472 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0818 21:13:06.540221 17472 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0818 21:14:28.022534 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52376\nI0818 21:14:28.022858 17472 solver.cpp:404]     Test net output #1: loss = 2.39516 (* 1 = 2.39516 loss)\nI0818 21:14:29.345906 17472 solver.cpp:228] Iteration 46800, loss = 0.000186968\nI0818 21:14:29.345943 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:14:29.345957 17472 solver.cpp:244]     Train net output #1: loss = 0.000187053 (* 1 = 0.000187053 loss)\nI0818 21:14:29.440754 17472 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0818 21:16:46.886478 17472 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0818 21:18:08.366128 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52464\nI0818 21:18:08.366441 17472 solver.cpp:404]     Test net output #1: loss = 2.37678 (* 1 = 2.37678 loss)\nI0818 21:18:09.690258 17472 solver.cpp:228] Iteration 46900, loss = 0.000192087\nI0818 21:18:09.690294 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:18:09.690307 17472 solver.cpp:244]     Train net output #1: loss = 0.000192172 (* 1 = 0.000192172 loss)\nI0818 21:18:09.779506 17472 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0818 21:20:27.290622 17472 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0818 21:21:48.782176 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52464\nI0818 21:21:48.782488 17472 solver.cpp:404]     Test net output #1: loss = 2.39799 (* 1 = 2.39799 loss)\nI0818 21:21:50.106456 17472 solver.cpp:228] Iteration 47000, loss = 0.000211571\nI0818 21:21:50.106497 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:21:50.106511 17472 solver.cpp:244]     Train net output #1: loss = 0.000211656 (* 1 = 0.000211656 loss)\nI0818 21:21:50.195006 17472 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0818 21:24:07.625898 17472 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0818 21:25:29.115751 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52588\nI0818 21:25:29.116076 17472 solver.cpp:404]     Test net output #1: loss = 2.38198 (* 1 = 2.38198 loss)\nI0818 21:25:30.440299 17472 solver.cpp:228] Iteration 47100, loss = 0.000165243\nI0818 21:25:30.440335 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:25:30.440351 17472 solver.cpp:244]     Train net output #1: loss = 0.000165328 (* 1 = 0.000165328 loss)\nI0818 21:25:30.535181 17472 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0818 21:27:47.959501 17472 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0818 21:29:09.453467 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53384\nI0818 21:29:09.453778 17472 solver.cpp:404]     Test net output #1: loss = 2.33761 (* 1 = 2.33761 loss)\nI0818 21:29:10.777703 17472 solver.cpp:228] Iteration 47200, loss = 0.000198935\nI0818 21:29:10.777739 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:29:10.777753 17472 solver.cpp:244]     Train net output #1: loss = 0.00019902 (* 1 = 0.00019902 loss)\nI0818 21:29:10.868259 17472 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0818 21:31:28.349011 17472 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0818 21:32:49.850157 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53112\nI0818 21:32:49.850481 17472 solver.cpp:404]     Test net output #1: loss = 2.35755 (* 1 = 2.35755 loss)\nI0818 21:32:51.174633 17472 solver.cpp:228] Iteration 47300, loss = 0.000190793\nI0818 21:32:51.174671 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:32:51.174686 17472 solver.cpp:244]     Train net output #1: loss = 0.000190878 (* 1 = 0.000190878 loss)\nI0818 21:32:51.265470 17472 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0818 21:35:08.722338 17472 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0818 21:36:30.217250 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53284\nI0818 21:36:30.217576 17472 solver.cpp:404]     Test net output #1: loss = 2.33525 (* 1 = 2.33525 loss)\nI0818 21:36:31.540377 17472 solver.cpp:228] Iteration 47400, loss = 0.000181418\nI0818 21:36:31.540416 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:36:31.540429 17472 solver.cpp:244]     Train net output #1: loss = 0.000181503 (* 1 = 0.000181503 loss)\nI0818 21:36:31.630039 17472 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0818 21:38:49.105572 17472 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0818 21:40:10.596438 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52572\nI0818 21:40:10.596757 17472 solver.cpp:404]     Test net output #1: loss = 2.39096 (* 1 = 2.39096 loss)\nI0818 21:40:11.921109 17472 solver.cpp:228] Iteration 47500, loss = 0.000178156\nI0818 21:40:11.921149 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:40:11.921164 17472 solver.cpp:244]     Train net output #1: loss = 0.000178241 (* 1 = 0.000178241 loss)\nI0818 21:40:12.008838 17472 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0818 21:42:29.634660 17472 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0818 21:43:51.146126 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53044\nI0818 21:43:51.146446 17472 solver.cpp:404]     Test net output #1: loss = 2.34118 (* 1 = 2.34118 loss)\nI0818 21:43:52.470181 17472 solver.cpp:228] Iteration 47600, loss = 0.000174817\nI0818 21:43:52.470218 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:43:52.470232 17472 solver.cpp:244]     Train net output #1: loss = 0.000174902 (* 1 = 0.000174902 loss)\nI0818 21:43:52.561141 17472 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0818 21:46:10.016793 17472 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0818 21:47:31.532552 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5224\nI0818 21:47:31.532882 17472 solver.cpp:404]     Test net output #1: loss = 2.39087 (* 1 = 2.39087 loss)\nI0818 21:47:32.856243 17472 solver.cpp:228] Iteration 47700, loss = 0.000187919\nI0818 21:47:32.856281 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:47:32.856297 17472 solver.cpp:244]     Train net output #1: loss = 0.000188004 (* 1 = 0.000188004 loss)\nI0818 21:47:32.951622 17472 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0818 21:49:50.420439 17472 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0818 21:51:11.926152 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52528\nI0818 21:51:11.926462 17472 solver.cpp:404]     Test net output #1: loss = 2.35718 (* 1 = 2.35718 loss)\nI0818 21:51:13.250629 17472 solver.cpp:228] Iteration 47800, loss = 0.000214048\nI0818 21:51:13.250669 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:51:13.250684 17472 solver.cpp:244]     Train net output #1: loss = 0.000214133 (* 1 = 0.000214133 loss)\nI0818 21:51:13.336932 17472 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0818 21:53:30.861268 17472 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0818 21:54:52.362975 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52464\nI0818 21:54:52.363303 17472 solver.cpp:404]     Test net output #1: loss = 2.39247 (* 1 = 2.39247 loss)\nI0818 21:54:53.687155 17472 solver.cpp:228] Iteration 47900, loss = 0.000202945\nI0818 21:54:53.687194 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:54:53.687209 17472 solver.cpp:244]     Train net output #1: loss = 0.00020303 (* 1 = 0.00020303 loss)\nI0818 21:54:53.781548 17472 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0818 21:57:11.317862 17472 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0818 21:58:32.837816 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5264\nI0818 21:58:32.838142 17472 solver.cpp:404]     Test net output #1: loss = 2.36205 (* 1 = 2.36205 loss)\nI0818 21:58:34.161878 17472 solver.cpp:228] Iteration 48000, loss = 0.000190188\nI0818 21:58:34.161917 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:58:34.161932 17472 solver.cpp:244]     Train net output #1: loss = 0.000190273 (* 1 = 0.000190273 loss)\nI0818 21:58:34.254088 17472 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0818 22:00:51.751471 17472 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0818 22:02:13.262013 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5312\nI0818 22:02:13.262326 17472 solver.cpp:404]     Test net output #1: loss = 2.32969 (* 1 = 2.32969 loss)\nI0818 22:02:14.587213 17472 solver.cpp:228] Iteration 48100, loss = 0.000197377\nI0818 22:02:14.587252 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:02:14.587267 17472 solver.cpp:244]     Train net output #1: loss = 0.000197462 (* 1 = 0.000197462 loss)\nI0818 22:02:14.678946 17472 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0818 22:04:32.080695 17472 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0818 22:05:53.581949 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52752\nI0818 22:05:53.582283 17472 solver.cpp:404]     Test net output #1: loss = 2.371 (* 1 = 2.371 loss)\nI0818 22:05:54.907038 17472 solver.cpp:228] Iteration 48200, loss = 0.000180695\nI0818 22:05:54.907078 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:05:54.907096 17472 solver.cpp:244]     Train net output #1: loss = 0.00018078 (* 1 = 0.00018078 loss)\nI0818 22:05:54.994671 17472 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0818 22:08:12.489284 17472 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0818 22:09:33.998605 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5298\nI0818 22:09:33.998917 17472 solver.cpp:404]     Test net output #1: loss = 2.35484 (* 1 = 2.35484 loss)\nI0818 22:09:35.324070 17472 solver.cpp:228] Iteration 48300, loss = 0.000203744\nI0818 22:09:35.324112 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:09:35.324128 17472 solver.cpp:244]     Train net output #1: loss = 0.000203829 (* 1 = 0.000203829 loss)\nI0818 22:09:35.411981 17472 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0818 22:11:52.978576 17472 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0818 22:13:14.486112 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52844\nI0818 22:13:14.486440 17472 solver.cpp:404]     Test net output #1: loss = 2.39719 (* 1 = 2.39719 loss)\nI0818 22:13:15.810792 17472 solver.cpp:228] Iteration 48400, loss = 0.000157223\nI0818 22:13:15.810829 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:13:15.810844 17472 solver.cpp:244]     Train net output #1: loss = 0.000157308 (* 1 = 0.000157308 loss)\nI0818 22:13:15.904280 17472 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0818 22:15:33.470541 17472 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0818 22:16:54.990171 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52628\nI0818 22:16:54.990499 17472 solver.cpp:404]     Test net output #1: loss = 2.38106 (* 1 = 2.38106 loss)\nI0818 22:16:56.315726 17472 solver.cpp:228] Iteration 48500, loss = 0.000196811\nI0818 22:16:56.315763 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:16:56.315778 17472 solver.cpp:244]     Train net output #1: loss = 0.000196896 (* 1 = 0.000196896 loss)\nI0818 22:16:56.403674 17472 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0818 22:19:13.890050 17472 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0818 22:20:35.399782 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52604\nI0818 22:20:35.400117 17472 solver.cpp:404]     Test net output #1: loss = 2.36084 (* 1 = 2.36084 loss)\nI0818 22:20:36.724612 17472 solver.cpp:228] Iteration 48600, loss = 0.000176182\nI0818 22:20:36.724650 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:20:36.724665 17472 solver.cpp:244]     Train net output #1: loss = 0.000176267 (* 1 = 0.000176267 loss)\nI0818 22:20:36.814455 17472 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0818 22:22:54.402806 17472 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0818 22:24:16.017302 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5272\nI0818 22:24:16.017618 17472 solver.cpp:404]     Test net output #1: loss = 2.39177 (* 1 = 2.39177 loss)\nI0818 22:24:17.342038 17472 solver.cpp:228] Iteration 48700, loss = 0.00020891\nI0818 22:24:17.342075 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:24:17.342095 17472 solver.cpp:244]     Train net output #1: loss = 0.000208995 (* 1 = 0.000208995 loss)\nI0818 22:24:17.429040 17472 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0818 22:26:34.836076 17472 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0818 22:27:56.331338 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52664\nI0818 22:27:56.331647 17472 solver.cpp:404]     Test net output #1: loss = 2.35611 (* 1 = 2.35611 loss)\nI0818 22:27:57.655536 17472 solver.cpp:228] Iteration 48800, loss = 0.000169011\nI0818 22:27:57.655573 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:27:57.655588 17472 solver.cpp:244]     Train net output #1: loss = 0.000169096 (* 1 = 0.000169096 loss)\nI0818 22:27:57.747874 17472 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0818 22:30:15.301640 17472 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0818 22:31:36.797771 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52356\nI0818 22:31:36.798089 17472 solver.cpp:404]     Test net output #1: loss = 2.3873 (* 1 = 2.3873 loss)\nI0818 22:31:38.121992 17472 solver.cpp:228] Iteration 48900, loss = 0.000170972\nI0818 22:31:38.122026 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:31:38.122041 17472 solver.cpp:244]     Train net output #1: loss = 0.000171057 (* 1 = 0.000171057 loss)\nI0818 22:31:38.213985 17472 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0818 22:33:55.711825 17472 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0818 22:35:17.213449 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52496\nI0818 22:35:17.213757 17472 solver.cpp:404]     Test net output #1: loss = 2.38797 (* 1 = 2.38797 loss)\nI0818 22:35:18.537138 17472 solver.cpp:228] Iteration 49000, loss = 0.000172005\nI0818 22:35:18.537173 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:35:18.537189 17472 solver.cpp:244]     Train net output #1: loss = 0.00017209 (* 1 = 0.00017209 loss)\nI0818 22:35:18.630739 17472 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0818 22:37:36.193361 17472 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0818 22:38:57.689923 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52804\nI0818 22:38:57.690244 17472 solver.cpp:404]     Test net output #1: loss = 2.3602 (* 1 = 2.3602 loss)\nI0818 22:38:59.014327 17472 solver.cpp:228] Iteration 49100, loss = 0.000171641\nI0818 22:38:59.014365 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:38:59.014380 17472 solver.cpp:244]     Train net output #1: loss = 0.000171726 (* 1 = 0.000171726 loss)\nI0818 22:38:59.104941 17472 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0818 22:41:16.670115 17472 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0818 22:42:38.165243 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52496\nI0818 22:42:38.165572 17472 solver.cpp:404]     Test net output #1: loss = 2.37904 (* 1 = 2.37904 loss)\nI0818 22:42:39.489287 17472 solver.cpp:228] Iteration 49200, loss = 0.000185749\nI0818 22:42:39.489326 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:42:39.489341 17472 solver.cpp:244]     Train net output #1: loss = 0.000185834 (* 1 = 0.000185834 loss)\nI0818 22:42:39.576689 17472 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0818 22:44:57.006238 17472 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0818 22:46:18.515585 17472 solver.cpp:404]     Test net output #0: accuracy = 0.528\nI0818 22:46:18.515893 17472 solver.cpp:404]     Test net output #1: loss = 2.36691 (* 1 = 2.36691 loss)\nI0818 22:46:19.840375 17472 solver.cpp:228] Iteration 49300, loss = 0.000173355\nI0818 22:46:19.840414 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:46:19.840430 17472 solver.cpp:244]     Train net output #1: loss = 0.00017344 (* 1 = 0.00017344 loss)\nI0818 22:46:19.934206 17472 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0818 22:48:37.696069 17472 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0818 22:49:59.183652 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5286\nI0818 22:49:59.183996 17472 solver.cpp:404]     Test net output #1: loss = 2.37199 (* 1 = 2.37199 loss)\nI0818 22:50:00.508309 17472 solver.cpp:228] Iteration 49400, loss = 0.00018626\nI0818 22:50:00.508348 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:50:00.508363 17472 solver.cpp:244]     Train net output #1: loss = 0.000186345 (* 1 = 0.000186345 loss)\nI0818 22:50:00.597092 17472 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0818 22:52:18.128264 17472 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0818 22:53:39.616593 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53304\nI0818 22:53:39.616920 17472 solver.cpp:404]     Test net output #1: loss = 2.3591 (* 1 = 2.3591 loss)\nI0818 22:53:40.942411 17472 solver.cpp:228] Iteration 49500, loss = 0.000203217\nI0818 22:53:40.942451 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:53:40.942466 17472 solver.cpp:244]     Train net output #1: loss = 0.000203302 (* 1 = 0.000203302 loss)\nI0818 22:53:41.029917 17472 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0818 22:55:58.552083 17472 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0818 22:57:20.053391 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53224\nI0818 22:57:20.053701 17472 solver.cpp:404]     Test net output #1: loss = 2.33794 (* 1 = 2.33794 loss)\nI0818 22:57:21.376926 17472 solver.cpp:228] Iteration 49600, loss = 0.000169257\nI0818 22:57:21.376963 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:57:21.376978 17472 solver.cpp:244]     Train net output #1: loss = 0.000169342 (* 1 = 0.000169342 loss)\nI0818 22:57:21.464056 17472 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0818 22:59:38.871439 17472 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0818 23:01:00.369559 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5326\nI0818 23:01:00.369871 17472 solver.cpp:404]     Test net output #1: loss = 2.34708 (* 1 = 2.34708 loss)\nI0818 23:01:01.693048 17472 solver.cpp:228] Iteration 49700, loss = 0.000184693\nI0818 23:01:01.693083 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:01:01.693097 17472 solver.cpp:244]     Train net output #1: loss = 0.000184778 (* 1 = 0.000184778 loss)\nI0818 23:01:01.785853 17472 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0818 23:03:19.307638 17472 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0818 23:04:40.813704 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52852\nI0818 23:04:40.814013 17472 solver.cpp:404]     Test net output #1: loss = 2.37173 (* 1 = 2.37173 loss)\nI0818 23:04:42.137997 17472 solver.cpp:228] Iteration 49800, loss = 0.000184907\nI0818 23:04:42.138036 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:04:42.138049 17472 solver.cpp:244]     Train net output #1: loss = 0.000184992 (* 1 = 0.000184992 loss)\nI0818 23:04:42.233393 17472 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0818 23:06:59.755151 17472 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0818 23:08:21.254031 17472 solver.cpp:404]     Test net output #0: accuracy = 0.53052\nI0818 23:08:21.254357 17472 solver.cpp:404]     Test net output #1: loss = 2.36605 (* 1 = 2.36605 loss)\nI0818 23:08:22.578218 17472 solver.cpp:228] Iteration 49900, loss = 0.000196825\nI0818 23:08:22.578256 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:08:22.578271 17472 solver.cpp:244]     Train net output #1: loss = 0.00019691 (* 1 = 0.00019691 loss)\nI0818 23:08:22.670749 17472 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0818 23:10:40.193392 17472 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0818 23:12:01.683796 17472 solver.cpp:404]     Test net output #0: accuracy = 0.52848\nI0818 23:12:01.684084 17472 solver.cpp:404]     Test net output #1: loss = 2.37308 (* 1 = 2.37308 loss)\nI0818 23:12:03.007509 17472 solver.cpp:228] Iteration 50000, loss = 0.000202062\nI0818 23:12:03.007549 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:12:03.007565 17472 solver.cpp:244]     Train net output #1: loss = 0.000202147 (* 1 = 0.000202147 loss)\nI0818 23:12:03.102203 17472 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0818 23:12:03.102226 17472 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0818 23:14:20.659824 17472 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0818 23:15:42.156910 17472 solver.cpp:404]     Test net output #0: accuracy = 0.55012\nI0818 23:15:42.157239 17472 solver.cpp:404]     Test net output #1: loss = 2.24895 (* 1 = 2.24895 loss)\nI0818 23:15:43.482355 17472 solver.cpp:228] Iteration 50100, loss = 0.000197896\nI0818 23:15:43.482394 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:15:43.482409 17472 solver.cpp:244]     Train net output #1: loss = 0.000197981 (* 1 = 0.000197981 loss)\nI0818 23:15:43.570744 17472 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0818 23:18:01.002759 17472 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0818 23:19:22.482343 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5792\nI0818 23:19:22.482673 17472 solver.cpp:404]     Test net output #1: loss = 2.09738 (* 1 = 2.09738 loss)\nI0818 23:19:23.805667 17472 solver.cpp:228] Iteration 50200, loss = 0.000181952\nI0818 23:19:23.805707 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:19:23.805722 17472 solver.cpp:244]     Train net output #1: loss = 0.000182037 (* 1 = 0.000182037 loss)\nI0818 23:19:23.901935 17472 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0818 23:21:41.382702 17472 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0818 23:23:02.865017 17472 solver.cpp:404]     Test net output #0: accuracy = 0.5962\nI0818 23:23:02.865355 17472 solver.cpp:404]     Test net output #1: loss = 2.002 (* 1 = 2.002 loss)\nI0818 23:23:04.191048 17472 solver.cpp:228] Iteration 50300, loss = 0.000168652\nI0818 23:23:04.191087 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:23:04.191102 17472 solver.cpp:244]     Train net output #1: loss = 0.000168737 (* 1 = 0.000168737 loss)\nI0818 23:23:04.275619 17472 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0818 23:25:21.750895 17472 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0818 23:26:43.224989 17472 solver.cpp:404]     Test net output #0: accuracy = 0.61952\nI0818 23:26:43.225344 17472 solver.cpp:404]     Test net output #1: loss = 1.89511 (* 1 = 1.89511 loss)\nI0818 23:26:44.549116 17472 solver.cpp:228] Iteration 50400, loss = 0.000186574\nI0818 23:26:44.549155 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:26:44.549170 17472 solver.cpp:244]     Train net output #1: loss = 0.000186659 (* 1 = 0.000186659 loss)\nI0818 23:26:44.638763 17472 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0818 23:29:02.045624 17472 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0818 23:30:23.527674 17472 solver.cpp:404]     Test net output #0: accuracy = 0.63328\nI0818 23:30:23.528003 17472 solver.cpp:404]     Test net output #1: loss = 1.83093 (* 1 = 1.83093 loss)\nI0818 23:30:24.852078 17472 solver.cpp:228] Iteration 50500, loss = 0.000185801\nI0818 23:30:24.852120 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:30:24.852136 17472 solver.cpp:244]     Train net output #1: loss = 0.000185886 (* 1 = 0.000185886 loss)\nI0818 23:30:24.941253 17472 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0818 23:32:42.373837 17472 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0818 23:34:03.868610 17472 solver.cpp:404]     Test net output #0: accuracy = 0.64928\nI0818 23:34:03.868935 17472 solver.cpp:404]     Test net output #1: loss = 1.75421 (* 1 = 1.75421 loss)\nI0818 23:34:05.189087 17472 solver.cpp:228] Iteration 50600, loss = 0.000153658\nI0818 23:34:05.189138 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:34:05.189155 17472 solver.cpp:244]     Train net output #1: loss = 0.000153743 (* 1 = 0.000153743 loss)\nI0818 23:34:05.279872 17472 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0818 23:36:22.332912 17472 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0818 23:37:43.818073 17472 solver.cpp:404]     Test net output #0: accuracy = 0.65544\nI0818 23:37:43.818408 17472 solver.cpp:404]     Test net output #1: loss = 1.7203 (* 1 = 1.7203 loss)\nI0818 23:37:45.139293 17472 solver.cpp:228] Iteration 50700, loss = 0.000195102\nI0818 23:37:45.139343 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:37:45.139358 17472 solver.cpp:244]     Train net output #1: loss = 0.000195187 (* 1 = 0.000195187 loss)\nI0818 23:37:45.223551 17472 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0818 23:40:02.242444 17472 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0818 23:41:23.727367 17472 solver.cpp:404]     Test net output #0: accuracy = 0.66888\nI0818 23:41:23.727672 17472 solver.cpp:404]     Test net output #1: loss = 1.66181 (* 1 = 1.66181 loss)\nI0818 23:41:25.048385 17472 solver.cpp:228] Iteration 50800, loss = 0.000189086\nI0818 23:41:25.048432 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:41:25.048449 17472 solver.cpp:244]     Train net output #1: loss = 0.000189171 (* 1 = 0.000189171 loss)\nI0818 23:41:25.132621 17472 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0818 23:43:42.666543 17472 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0818 23:45:04.157508 17472 solver.cpp:404]     Test net output #0: accuracy = 0.67308\nI0818 23:45:04.157817 17472 solver.cpp:404]     Test net output #1: loss = 1.64205 (* 1 = 1.64205 loss)\nI0818 23:45:05.478794 17472 solver.cpp:228] Iteration 50900, loss = 0.000166785\nI0818 23:45:05.478843 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:45:05.478860 17472 solver.cpp:244]     Train net output #1: loss = 0.00016687 (* 1 = 0.00016687 loss)\nI0818 23:45:05.564613 17472 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0818 23:47:22.573915 17472 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0818 23:48:44.056572 17472 solver.cpp:404]     Test net output #0: accuracy = 0.684\nI0818 23:48:44.056897 17472 solver.cpp:404]     Test net output #1: loss = 1.60302 (* 1 = 1.60302 loss)\nI0818 23:48:45.377465 17472 solver.cpp:228] Iteration 51000, loss = 0.000166707\nI0818 23:48:45.377511 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:48:45.377527 17472 solver.cpp:244]     Train net output #1: loss = 0.000166792 (* 1 = 0.000166792 loss)\nI0818 23:48:45.463518 17472 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0818 23:51:02.480191 17472 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0818 23:52:23.969092 17472 solver.cpp:404]     Test net output #0: accuracy = 0.68524\nI0818 23:52:23.969430 17472 solver.cpp:404]     Test net output #1: loss = 1.59126 (* 1 = 1.59126 loss)\nI0818 23:52:25.289067 17472 solver.cpp:228] Iteration 51100, loss = 0.000221549\nI0818 23:52:25.289119 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:52:25.289136 17472 solver.cpp:244]     Train net output #1: loss = 0.000221634 (* 1 = 0.000221634 loss)\nI0818 23:52:25.376502 17472 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0818 23:54:42.403069 17472 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0818 23:56:03.876693 17472 solver.cpp:404]     Test net output #0: accuracy = 0.6942\nI0818 23:56:03.877017 17472 solver.cpp:404]     Test net output #1: loss = 1.56552 (* 1 = 1.56552 loss)\nI0818 23:56:05.197837 17472 solver.cpp:228] Iteration 51200, loss = 0.000203175\nI0818 23:56:05.197885 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:56:05.197901 17472 solver.cpp:244]     Train net output #1: loss = 0.00020326 (* 1 = 0.00020326 loss)\nI0818 23:56:05.287199 17472 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0818 23:58:22.268796 17472 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0818 23:59:43.750149 17472 solver.cpp:404]     Test net output #0: accuracy = 0.69484\nI0818 23:59:43.750490 17472 solver.cpp:404]     Test net output #1: loss = 1.55837 (* 1 = 1.55837 loss)\nI0818 23:59:45.071350 17472 solver.cpp:228] Iteration 51300, loss = 0.000203907\nI0818 23:59:45.071399 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:59:45.071415 17472 solver.cpp:244]     Train net output #1: loss = 0.000203992 (* 1 = 0.000203992 loss)\nI0818 23:59:45.159360 17472 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0819 00:02:02.167414 17472 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0819 00:03:23.659175 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70188\nI0819 00:03:23.659507 17472 solver.cpp:404]     Test net output #1: loss = 1.54698 (* 1 = 1.54698 loss)\nI0819 00:03:24.980357 17472 solver.cpp:228] Iteration 51400, loss = 0.000189654\nI0819 00:03:24.980403 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:03:24.980418 17472 solver.cpp:244]     Train net output #1: loss = 0.000189739 (* 1 = 0.000189739 loss)\nI0819 00:03:25.068485 17472 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0819 00:05:42.115147 17472 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0819 00:07:03.607532 17472 solver.cpp:404]     Test net output #0: accuracy = 0.704\nI0819 00:07:03.607858 17472 solver.cpp:404]     Test net output #1: loss = 1.54368 (* 1 = 1.54368 loss)\nI0819 00:07:04.928061 17472 solver.cpp:228] Iteration 51500, loss = 0.000194204\nI0819 00:07:04.928107 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:07:04.928128 17472 solver.cpp:244]     Train net output #1: loss = 0.000194289 (* 1 = 0.000194289 loss)\nI0819 00:07:05.015266 17472 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0819 00:09:22.049576 17472 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0819 00:10:43.524435 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70544\nI0819 00:10:43.524765 17472 solver.cpp:404]     Test net output #1: loss = 1.5351 (* 1 = 1.5351 loss)\nI0819 00:10:44.844704 17472 solver.cpp:228] Iteration 51600, loss = 0.000209576\nI0819 00:10:44.844750 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:10:44.844768 17472 solver.cpp:244]     Train net output #1: loss = 0.000209661 (* 1 = 0.000209661 loss)\nI0819 00:10:44.931958 17472 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0819 00:13:02.264313 17472 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 00:14:23.748363 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70604\nI0819 00:14:23.748687 17472 solver.cpp:404]     Test net output #1: loss = 1.53047 (* 1 = 1.53047 loss)\nI0819 00:14:25.068594 17472 solver.cpp:228] Iteration 51700, loss = 0.000194071\nI0819 00:14:25.068639 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:14:25.068655 17472 solver.cpp:244]     Train net output #1: loss = 0.000194156 (* 1 = 0.000194156 loss)\nI0819 00:14:25.159287 17472 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0819 00:16:42.027703 17472 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 00:18:03.529299 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70608\nI0819 00:18:03.529651 17472 solver.cpp:404]     Test net output #1: loss = 1.53158 (* 1 = 1.53158 loss)\nI0819 00:18:04.849196 17472 solver.cpp:228] Iteration 51800, loss = 0.000174637\nI0819 00:18:04.849241 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:18:04.849256 17472 solver.cpp:244]     Train net output #1: loss = 0.000174722 (* 1 = 0.000174722 loss)\nI0819 00:18:04.937831 17472 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0819 00:20:21.870502 17472 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 00:21:43.361605 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7076\nI0819 00:21:43.361943 17472 solver.cpp:404]     Test net output #1: loss = 1.52492 (* 1 = 1.52492 loss)\nI0819 00:21:44.681265 17472 solver.cpp:228] Iteration 51900, loss = 0.000173166\nI0819 00:21:44.681311 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:21:44.681327 17472 solver.cpp:244]     Train net output #1: loss = 0.000173251 (* 1 = 0.000173251 loss)\nI0819 00:21:44.767786 17472 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0819 00:24:01.720548 17472 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 00:25:23.205993 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71008\nI0819 00:25:23.206307 17472 solver.cpp:404]     Test net output #1: loss = 1.52323 (* 1 = 1.52323 loss)\nI0819 00:25:24.526057 17472 solver.cpp:228] Iteration 52000, loss = 0.000186593\nI0819 00:25:24.526101 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:25:24.526118 17472 solver.cpp:244]     Train net output #1: loss = 0.000186678 (* 1 = 0.000186678 loss)\nI0819 00:25:24.613924 17472 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0819 00:27:41.982276 17472 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 00:29:03.470326 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70844\nI0819 00:29:03.470628 17472 solver.cpp:404]     Test net output #1: loss = 1.52163 (* 1 = 1.52163 loss)\nI0819 00:29:04.790309 17472 solver.cpp:228] Iteration 52100, loss = 0.000218349\nI0819 00:29:04.790356 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:29:04.790372 17472 solver.cpp:244]     Train net output #1: loss = 0.000218434 (* 1 = 0.000218434 loss)\nI0819 00:29:04.878360 17472 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0819 00:31:22.038288 17472 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 00:32:43.509266 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70944\nI0819 00:32:43.509594 17472 solver.cpp:404]     Test net output #1: loss = 1.52092 (* 1 = 1.52092 loss)\nI0819 00:32:44.829329 17472 solver.cpp:228] Iteration 52200, loss = 0.000190121\nI0819 00:32:44.829375 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:32:44.829391 17472 solver.cpp:244]     Train net output #1: loss = 0.000190206 (* 1 = 0.000190206 loss)\nI0819 00:32:44.918577 17472 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0819 00:35:01.842435 17472 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 00:36:23.325233 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70864\nI0819 00:36:23.325549 17472 solver.cpp:404]     Test net output #1: loss = 1.52184 (* 1 = 1.52184 loss)\nI0819 00:36:24.645231 17472 solver.cpp:228] Iteration 52300, loss = 0.000169202\nI0819 00:36:24.645277 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:36:24.645294 17472 solver.cpp:244]     Train net output #1: loss = 0.000169287 (* 1 = 0.000169287 loss)\nI0819 00:36:24.733564 17472 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0819 00:38:41.738528 17472 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 00:40:03.233176 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7122\nI0819 00:40:03.233466 17472 solver.cpp:404]     Test net output #1: loss = 1.5205 (* 1 = 1.5205 loss)\nI0819 00:40:04.553294 17472 solver.cpp:228] Iteration 52400, loss = 0.000182624\nI0819 00:40:04.553338 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:40:04.553354 17472 solver.cpp:244]     Train net output #1: loss = 0.000182709 (* 1 = 0.000182709 loss)\nI0819 00:40:04.639080 17472 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0819 00:42:21.738343 17472 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 00:43:43.679776 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71048\nI0819 00:43:43.680083 17472 solver.cpp:404]     Test net output #1: loss = 1.51838 (* 1 = 1.51838 loss)\nI0819 00:43:45.008980 17472 solver.cpp:228] Iteration 52500, loss = 0.000192435\nI0819 00:43:45.009043 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:43:45.009061 17472 solver.cpp:244]     Train net output #1: loss = 0.00019252 (* 1 = 0.00019252 loss)\nI0819 00:43:45.088843 17472 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0819 00:46:02.287683 17472 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 00:47:24.094760 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71292\nI0819 00:47:24.095064 17472 solver.cpp:404]     Test net output #1: loss = 1.51879 (* 1 = 1.51879 loss)\nI0819 00:47:25.424353 17472 solver.cpp:228] Iteration 52600, loss = 0.000174231\nI0819 00:47:25.424404 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:47:25.424422 17472 solver.cpp:244]     Train net output #1: loss = 0.000174316 (* 1 = 0.000174316 loss)\nI0819 00:47:25.504606 17472 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0819 00:49:42.794955 17472 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 00:51:04.768898 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71196\nI0819 00:51:04.769178 17472 solver.cpp:404]     Test net output #1: loss = 1.521 (* 1 = 1.521 loss)\nI0819 00:51:06.100836 17472 solver.cpp:228] Iteration 52700, loss = 0.000166852\nI0819 00:51:06.100886 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:51:06.100903 17472 solver.cpp:244]     Train net output #1: loss = 0.000166937 (* 1 = 0.000166937 loss)\nI0819 00:51:06.189616 17472 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0819 00:53:23.674152 17472 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 00:54:45.698695 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71276\nI0819 00:54:45.698930 17472 solver.cpp:404]     Test net output #1: loss = 1.52299 (* 1 = 1.52299 loss)\nI0819 00:54:47.028084 17472 solver.cpp:228] Iteration 52800, loss = 0.0002061\nI0819 00:54:47.028136 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:54:47.028152 17472 solver.cpp:244]     Train net output #1: loss = 0.000206185 (* 1 = 0.000206185 loss)\nI0819 00:54:47.112643 17472 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0819 00:57:04.448462 17472 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 00:58:26.620384 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71228\nI0819 00:58:26.620613 17472 solver.cpp:404]     Test net output #1: loss = 1.52127 (* 1 = 1.52127 loss)\nI0819 00:58:27.949383 17472 solver.cpp:228] Iteration 52900, loss = 0.000177324\nI0819 00:58:27.949434 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:58:27.949451 17472 solver.cpp:244]     Train net output #1: loss = 0.000177409 (* 1 = 0.000177409 loss)\nI0819 00:58:28.032799 17472 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0819 01:00:45.285698 17472 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 01:02:07.376576 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71224\nI0819 01:02:07.376902 17472 solver.cpp:404]     Test net output #1: loss = 1.52105 (* 1 = 1.52105 loss)\nI0819 01:02:08.705005 17472 solver.cpp:228] Iteration 53000, loss = 0.000170781\nI0819 01:02:08.705051 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:02:08.705068 17472 solver.cpp:244]     Train net output #1: loss = 0.000170866 (* 1 = 0.000170866 loss)\nI0819 01:02:08.788377 17472 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0819 01:04:25.865070 17472 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 01:05:48.139096 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71208\nI0819 01:05:48.139375 17472 solver.cpp:404]     Test net output #1: loss = 1.51845 (* 1 = 1.51845 loss)\nI0819 01:05:49.468422 17472 solver.cpp:228] Iteration 53100, loss = 0.000168142\nI0819 01:05:49.468472 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:05:49.468488 17472 solver.cpp:244]     Train net output #1: loss = 0.000168227 (* 1 = 0.000168227 loss)\nI0819 01:05:49.551331 17472 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0819 01:08:06.555438 17472 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 01:09:28.829537 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7124\nI0819 01:09:28.829818 17472 solver.cpp:404]     Test net output #1: loss = 1.52042 (* 1 = 1.52042 loss)\nI0819 01:09:30.158696 17472 solver.cpp:228] Iteration 53200, loss = 0.000185432\nI0819 01:09:30.158735 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:09:30.158751 17472 solver.cpp:244]     Train net output #1: loss = 0.000185517 (* 1 = 0.000185517 loss)\nI0819 01:09:30.245417 17472 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0819 01:11:47.237949 17472 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 01:13:09.530445 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71424\nI0819 01:13:09.530740 17472 solver.cpp:404]     Test net output #1: loss = 1.52005 (* 1 = 1.52005 loss)\nI0819 01:13:10.858914 17472 solver.cpp:228] Iteration 53300, loss = 0.000164992\nI0819 01:13:10.858961 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:13:10.858978 17472 solver.cpp:244]     Train net output #1: loss = 0.000165077 (* 1 = 0.000165077 loss)\nI0819 01:13:10.939144 17472 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0819 01:15:28.177006 17472 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 01:16:50.512174 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71324\nI0819 01:16:50.512460 17472 solver.cpp:404]     Test net output #1: loss = 1.51935 (* 1 = 1.51935 loss)\nI0819 01:16:51.840500 17472 solver.cpp:228] Iteration 53400, loss = 0.000201302\nI0819 01:16:51.840548 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:16:51.840564 17472 solver.cpp:244]     Train net output #1: loss = 0.000201387 (* 1 = 0.000201387 loss)\nI0819 01:16:51.927003 17472 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0819 01:19:08.924551 17472 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 01:20:31.203018 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71368\nI0819 01:20:31.203299 17472 solver.cpp:404]     Test net output #1: loss = 1.52059 (* 1 = 1.52059 loss)\nI0819 01:20:32.532225 17472 solver.cpp:228] Iteration 53500, loss = 0.000187745\nI0819 01:20:32.532270 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:20:32.532287 17472 solver.cpp:244]     Train net output #1: loss = 0.00018783 (* 1 = 0.00018783 loss)\nI0819 01:20:32.610983 17472 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0819 01:22:49.862015 17472 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 01:24:11.963368 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7144\nI0819 01:24:11.963626 17472 solver.cpp:404]     Test net output #1: loss = 1.52229 (* 1 = 1.52229 loss)\nI0819 01:24:13.292155 17472 solver.cpp:228] Iteration 53600, loss = 0.000162605\nI0819 01:24:13.292201 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:24:13.292217 17472 solver.cpp:244]     Train net output #1: loss = 0.00016269 (* 1 = 0.00016269 loss)\nI0819 01:24:13.373309 17472 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0819 01:26:30.378018 17472 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 01:27:52.373047 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71408\nI0819 01:27:52.373301 17472 solver.cpp:404]     Test net output #1: loss = 1.52365 (* 1 = 1.52365 loss)\nI0819 01:27:53.701581 17472 solver.cpp:228] Iteration 53700, loss = 0.000183262\nI0819 01:27:53.701627 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:27:53.701643 17472 solver.cpp:244]     Train net output #1: loss = 0.000183347 (* 1 = 0.000183347 loss)\nI0819 01:27:53.783419 17472 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0819 01:30:10.899596 17472 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 01:31:33.125145 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71416\nI0819 01:31:33.125403 17472 solver.cpp:404]     Test net output #1: loss = 1.52278 (* 1 = 1.52278 loss)\nI0819 01:31:34.453809 17472 solver.cpp:228] Iteration 53800, loss = 0.000202004\nI0819 01:31:34.453853 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:31:34.453869 17472 solver.cpp:244]     Train net output #1: loss = 0.000202089 (* 1 = 0.000202089 loss)\nI0819 01:31:34.537230 17472 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0819 01:33:51.545639 17472 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 01:35:13.736766 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7144\nI0819 01:35:13.737025 17472 solver.cpp:404]     Test net output #1: loss = 1.52208 (* 1 = 1.52208 loss)\nI0819 01:35:15.065774 17472 solver.cpp:228] Iteration 53900, loss = 0.000166101\nI0819 01:35:15.065820 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:35:15.065837 17472 solver.cpp:244]     Train net output #1: loss = 0.000166186 (* 1 = 0.000166186 loss)\nI0819 01:35:15.145318 17472 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0819 01:37:32.398749 17472 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 01:38:54.410460 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71388\nI0819 01:38:54.410696 17472 solver.cpp:404]     Test net output #1: loss = 1.52419 (* 1 = 1.52419 loss)\nI0819 01:38:55.739190 17472 solver.cpp:228] Iteration 54000, loss = 0.000183266\nI0819 01:38:55.739235 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:38:55.739251 17472 solver.cpp:244]     Train net output #1: loss = 0.000183351 (* 1 = 0.000183351 loss)\nI0819 01:38:55.823858 17472 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0819 01:41:12.808696 17472 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 01:42:34.878614 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71428\nI0819 01:42:34.878927 17472 solver.cpp:404]     Test net output #1: loss = 1.52443 (* 1 = 1.52443 loss)\nI0819 01:42:36.207726 17472 solver.cpp:228] Iteration 54100, loss = 0.000185573\nI0819 01:42:36.207772 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:42:36.207787 17472 solver.cpp:244]     Train net output #1: loss = 0.000185658 (* 1 = 0.000185658 loss)\nI0819 01:42:36.288004 17472 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0819 01:44:53.236348 17472 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 01:46:15.302880 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71392\nI0819 01:46:15.303164 17472 solver.cpp:404]     Test net output #1: loss = 1.52417 (* 1 = 1.52417 loss)\nI0819 01:46:16.631050 17472 solver.cpp:228] Iteration 54200, loss = 0.000186259\nI0819 01:46:16.631098 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:46:16.631115 17472 solver.cpp:244]     Train net output #1: loss = 0.000186344 (* 1 = 0.000186344 loss)\nI0819 01:46:16.713795 17472 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0819 01:48:33.676517 17472 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 01:49:55.973423 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71392\nI0819 01:49:55.973786 17472 solver.cpp:404]     Test net output #1: loss = 1.52469 (* 1 = 1.52469 loss)\nI0819 01:49:57.301702 17472 solver.cpp:228] Iteration 54300, loss = 0.000160787\nI0819 01:49:57.301744 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:49:57.301761 17472 solver.cpp:244]     Train net output #1: loss = 0.000160872 (* 1 = 0.000160872 loss)\nI0819 01:49:57.386174 17472 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0819 01:52:14.403379 17472 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 01:53:36.700103 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71484\nI0819 01:53:36.700520 17472 solver.cpp:404]     Test net output #1: loss = 1.52318 (* 1 = 1.52318 loss)\nI0819 01:53:38.028308 17472 solver.cpp:228] Iteration 54400, loss = 0.000179117\nI0819 01:53:38.028352 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:53:38.028369 17472 solver.cpp:244]     Train net output #1: loss = 0.000179202 (* 1 = 0.000179202 loss)\nI0819 01:53:38.114428 17472 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0819 01:55:55.179237 17472 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 01:57:17.448532 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71424\nI0819 01:57:17.448868 17472 solver.cpp:404]     Test net output #1: loss = 1.52486 (* 1 = 1.52486 loss)\nI0819 01:57:18.778043 17472 solver.cpp:228] Iteration 54500, loss = 0.000202291\nI0819 01:57:18.778079 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:57:18.778095 17472 solver.cpp:244]     Train net output #1: loss = 0.000202376 (* 1 = 0.000202376 loss)\nI0819 01:57:18.862063 17472 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0819 01:59:35.939649 17472 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 02:00:58.191084 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71448\nI0819 02:00:58.191380 17472 solver.cpp:404]     Test net output #1: loss = 1.52312 (* 1 = 1.52312 loss)\nI0819 02:00:59.519062 17472 solver.cpp:228] Iteration 54600, loss = 0.000184964\nI0819 02:00:59.519105 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:00:59.519122 17472 solver.cpp:244]     Train net output #1: loss = 0.000185049 (* 1 = 0.000185049 loss)\nI0819 02:00:59.605979 17472 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0819 02:03:16.823175 17472 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 02:04:39.086302 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7142\nI0819 02:04:39.086549 17472 solver.cpp:404]     Test net output #1: loss = 1.52431 (* 1 = 1.52431 loss)\nI0819 02:04:40.414618 17472 solver.cpp:228] Iteration 54700, loss = 0.000172986\nI0819 02:04:40.414660 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:04:40.414677 17472 solver.cpp:244]     Train net output #1: loss = 0.000173071 (* 1 = 0.000173071 loss)\nI0819 02:04:40.498168 17472 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0819 02:06:57.561697 17472 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 02:08:19.842572 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71348\nI0819 02:08:19.842823 17472 solver.cpp:404]     Test net output #1: loss = 1.52676 (* 1 = 1.52676 loss)\nI0819 02:08:21.170811 17472 solver.cpp:228] Iteration 54800, loss = 0.000187568\nI0819 02:08:21.170859 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:08:21.170876 17472 solver.cpp:244]     Train net output #1: loss = 0.000187653 (* 1 = 0.000187653 loss)\nI0819 02:08:21.257697 17472 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0819 02:10:38.243086 17472 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0819 02:12:00.540865 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71404\nI0819 02:12:00.541118 17472 solver.cpp:404]     Test net output #1: loss = 1.52437 (* 1 = 1.52437 loss)\nI0819 02:12:01.869467 17472 solver.cpp:228] Iteration 54900, loss = 0.000178528\nI0819 02:12:01.869514 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:12:01.869531 17472 solver.cpp:244]     Train net output #1: loss = 0.000178613 (* 1 = 0.000178613 loss)\nI0819 02:12:01.949731 17472 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0819 02:14:19.225608 17472 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0819 02:15:41.547287 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71428\nI0819 02:15:41.547561 17472 solver.cpp:404]     Test net output #1: loss = 1.52272 (* 1 = 1.52272 loss)\nI0819 02:15:42.876229 17472 solver.cpp:228] Iteration 55000, loss = 0.000186829\nI0819 02:15:42.876279 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:15:42.876296 17472 solver.cpp:244]     Train net output #1: loss = 0.000186914 (* 1 = 0.000186914 loss)\nI0819 02:15:42.960052 17472 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0819 02:18:00.202754 17472 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0819 02:19:22.479184 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71392\nI0819 02:19:22.479434 17472 solver.cpp:404]     Test net output #1: loss = 1.52265 (* 1 = 1.52265 loss)\nI0819 02:19:23.807876 17472 solver.cpp:228] Iteration 55100, loss = 0.000164799\nI0819 02:19:23.807926 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:19:23.807943 17472 solver.cpp:244]     Train net output #1: loss = 0.000164884 (* 1 = 0.000164884 loss)\nI0819 02:19:23.894701 17472 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0819 02:21:41.203991 17472 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0819 02:23:03.310613 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71352\nI0819 02:23:03.310933 17472 solver.cpp:404]     Test net output #1: loss = 1.52468 (* 1 = 1.52468 loss)\nI0819 02:23:04.639291 17472 solver.cpp:228] Iteration 55200, loss = 0.000179642\nI0819 02:23:04.639341 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:23:04.639358 17472 solver.cpp:244]     Train net output #1: loss = 0.000179727 (* 1 = 0.000179727 loss)\nI0819 02:23:04.722246 17472 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0819 02:25:22.095125 17472 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0819 02:26:44.298424 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71388\nI0819 02:26:44.298699 17472 solver.cpp:404]     Test net output #1: loss = 1.52676 (* 1 = 1.52676 loss)\nI0819 02:26:45.627018 17472 solver.cpp:228] Iteration 55300, loss = 0.000183815\nI0819 02:26:45.627071 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:26:45.627089 17472 solver.cpp:244]     Train net output #1: loss = 0.0001839 (* 1 = 0.0001839 loss)\nI0819 02:26:45.707087 17472 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0819 02:29:02.813050 17472 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0819 02:30:25.043078 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71424\nI0819 02:30:25.043318 17472 solver.cpp:404]     Test net output #1: loss = 1.52506 (* 1 = 1.52506 loss)\nI0819 02:30:26.371503 17472 solver.cpp:228] Iteration 55400, loss = 0.000186053\nI0819 02:30:26.371552 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:30:26.371569 17472 solver.cpp:244]     Train net output #1: loss = 0.000186138 (* 1 = 0.000186138 loss)\nI0819 02:30:26.451947 17472 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0819 02:32:43.478682 17472 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0819 02:34:05.553424 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7144\nI0819 02:34:05.553656 17472 solver.cpp:404]     Test net output #1: loss = 1.52563 (* 1 = 1.52563 loss)\nI0819 02:34:06.881846 17472 solver.cpp:228] Iteration 55500, loss = 0.000174763\nI0819 02:34:06.881896 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:34:06.881911 17472 solver.cpp:244]     Train net output #1: loss = 0.000174848 (* 1 = 0.000174848 loss)\nI0819 02:34:06.966267 17472 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0819 02:36:24.179301 17472 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0819 02:37:46.342952 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71532\nI0819 02:37:46.343231 17472 solver.cpp:404]     Test net output #1: loss = 1.5251 (* 1 = 1.5251 loss)\nI0819 02:37:47.671875 17472 solver.cpp:228] Iteration 55600, loss = 0.000168387\nI0819 02:37:47.671926 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:37:47.671941 17472 solver.cpp:244]     Train net output #1: loss = 0.000168472 (* 1 = 0.000168472 loss)\nI0819 02:37:47.753466 17472 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0819 02:40:05.013176 17472 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0819 02:41:27.333784 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7154\nI0819 02:41:27.334060 17472 solver.cpp:404]     Test net output #1: loss = 1.52256 (* 1 = 1.52256 loss)\nI0819 02:41:28.663614 17472 solver.cpp:228] Iteration 55700, loss = 0.000176566\nI0819 02:41:28.663664 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:41:28.663681 17472 solver.cpp:244]     Train net output #1: loss = 0.000176651 (* 1 = 0.000176651 loss)\nI0819 02:41:28.749727 17472 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0819 02:43:46.227138 17472 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0819 02:45:08.574132 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71508\nI0819 02:45:08.574384 17472 solver.cpp:404]     Test net output #1: loss = 1.52488 (* 1 = 1.52488 loss)\nI0819 02:45:09.904078 17472 solver.cpp:228] Iteration 55800, loss = 0.00020541\nI0819 02:45:09.904130 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:45:09.904146 17472 solver.cpp:244]     Train net output #1: loss = 0.000205495 (* 1 = 0.000205495 loss)\nI0819 02:45:09.988251 17472 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0819 02:47:27.116714 17472 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0819 02:48:49.431324 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71492\nI0819 02:48:49.431601 17472 solver.cpp:404]     Test net output #1: loss = 1.52539 (* 1 = 1.52539 loss)\nI0819 02:48:50.760141 17472 solver.cpp:228] Iteration 55900, loss = 0.000158516\nI0819 02:48:50.760185 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:48:50.760201 17472 solver.cpp:244]     Train net output #1: loss = 0.000158601 (* 1 = 0.000158601 loss)\nI0819 02:48:50.843555 17472 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0819 02:51:07.890722 17472 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0819 02:52:30.192212 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71488\nI0819 02:52:30.192488 17472 solver.cpp:404]     Test net output #1: loss = 1.52378 (* 1 = 1.52378 loss)\nI0819 02:52:31.520982 17472 solver.cpp:228] Iteration 56000, loss = 0.000196558\nI0819 02:52:31.521029 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:52:31.521045 17472 solver.cpp:244]     Train net output #1: loss = 0.000196643 (* 1 = 0.000196643 loss)\nI0819 02:52:31.604768 17472 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0819 02:54:48.771826 17472 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0819 02:56:11.117714 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71428\nI0819 02:56:11.118024 17472 solver.cpp:404]     Test net output #1: loss = 1.52507 (* 1 = 1.52507 loss)\nI0819 02:56:12.446892 17472 solver.cpp:228] Iteration 56100, loss = 0.000177333\nI0819 02:56:12.446933 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:56:12.446952 17472 solver.cpp:244]     Train net output #1: loss = 0.000177418 (* 1 = 0.000177418 loss)\nI0819 02:56:12.528712 17472 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0819 02:58:29.640903 17472 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0819 02:59:51.926239 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7154\nI0819 02:59:51.926522 17472 solver.cpp:404]     Test net output #1: loss = 1.52322 (* 1 = 1.52322 loss)\nI0819 02:59:53.255571 17472 solver.cpp:228] Iteration 56200, loss = 0.000156706\nI0819 02:59:53.255611 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:59:53.255627 17472 solver.cpp:244]     Train net output #1: loss = 0.000156791 (* 1 = 0.000156791 loss)\nI0819 02:59:53.341470 17472 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0819 03:02:10.761265 17472 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0819 03:03:33.054947 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71408\nI0819 03:03:33.055217 17472 solver.cpp:404]     Test net output #1: loss = 1.52744 (* 1 = 1.52744 loss)\nI0819 03:03:34.384109 17472 solver.cpp:228] Iteration 56300, loss = 0.000192648\nI0819 03:03:34.384148 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:03:34.384163 17472 solver.cpp:244]     Train net output #1: loss = 0.000192733 (* 1 = 0.000192733 loss)\nI0819 03:03:34.469660 17472 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0819 03:05:51.606806 17472 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0819 03:07:13.895851 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71392\nI0819 03:07:13.896106 17472 solver.cpp:404]     Test net output #1: loss = 1.52545 (* 1 = 1.52545 loss)\nI0819 03:07:15.224411 17472 solver.cpp:228] Iteration 56400, loss = 0.00018763\nI0819 03:07:15.224450 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:07:15.224467 17472 solver.cpp:244]     Train net output #1: loss = 0.000187715 (* 1 = 0.000187715 loss)\nI0819 03:07:15.305955 17472 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0819 03:09:32.349036 17472 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0819 03:10:54.626389 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71412\nI0819 03:10:54.626667 17472 solver.cpp:404]     Test net output #1: loss = 1.52667 (* 1 = 1.52667 loss)\nI0819 03:10:55.955319 17472 solver.cpp:228] Iteration 56500, loss = 0.000169488\nI0819 03:10:55.955360 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:10:55.955376 17472 solver.cpp:244]     Train net output #1: loss = 0.000169573 (* 1 = 0.000169573 loss)\nI0819 03:10:56.035825 17472 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0819 03:13:13.185698 17472 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0819 03:14:35.475203 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71448\nI0819 03:14:35.475476 17472 solver.cpp:404]     Test net output #1: loss = 1.52756 (* 1 = 1.52756 loss)\nI0819 03:14:36.803895 17472 solver.cpp:228] Iteration 56600, loss = 0.000180859\nI0819 03:14:36.803937 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:14:36.803952 17472 solver.cpp:244]     Train net output #1: loss = 0.000180944 (* 1 = 0.000180944 loss)\nI0819 03:14:36.888113 17472 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0819 03:16:54.010097 17472 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0819 03:18:16.288954 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71468\nI0819 03:18:16.289211 17472 solver.cpp:404]     Test net output #1: loss = 1.52847 (* 1 = 1.52847 loss)\nI0819 03:18:17.618091 17472 solver.cpp:228] Iteration 56700, loss = 0.000171622\nI0819 03:18:17.618130 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:18:17.618146 17472 solver.cpp:244]     Train net output #1: loss = 0.000171707 (* 1 = 0.000171707 loss)\nI0819 03:18:17.699074 17472 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0819 03:20:34.974915 17472 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0819 03:21:57.248991 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7152\nI0819 03:21:57.249251 17472 solver.cpp:404]     Test net output #1: loss = 1.52766 (* 1 = 1.52766 loss)\nI0819 03:21:58.577628 17472 solver.cpp:228] Iteration 56800, loss = 0.000173792\nI0819 03:21:58.577668 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:21:58.577684 17472 solver.cpp:244]     Train net output #1: loss = 0.000173877 (* 1 = 0.000173877 loss)\nI0819 03:21:58.664495 17472 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0819 03:24:15.746573 17472 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0819 03:25:38.028697 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71464\nI0819 03:25:38.028949 17472 solver.cpp:404]     Test net output #1: loss = 1.53126 (* 1 = 1.53126 loss)\nI0819 03:25:39.357236 17472 solver.cpp:228] Iteration 56900, loss = 0.000160258\nI0819 03:25:39.357277 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:25:39.357293 17472 solver.cpp:244]     Train net output #1: loss = 0.000160343 (* 1 = 0.000160343 loss)\nI0819 03:25:39.441648 17472 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0819 03:27:56.692075 17472 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0819 03:29:18.992223 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71396\nI0819 03:29:18.992544 17472 solver.cpp:404]     Test net output #1: loss = 1.52995 (* 1 = 1.52995 loss)\nI0819 03:29:20.321187 17472 solver.cpp:228] Iteration 57000, loss = 0.000185817\nI0819 03:29:20.321228 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:29:20.321244 17472 solver.cpp:244]     Train net output #1: loss = 0.000185902 (* 1 = 0.000185902 loss)\nI0819 03:29:20.403913 17472 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0819 03:31:37.449347 17472 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0819 03:32:59.769696 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71364\nI0819 03:32:59.770066 17472 solver.cpp:404]     Test net output #1: loss = 1.53021 (* 1 = 1.53021 loss)\nI0819 03:33:01.098544 17472 solver.cpp:228] Iteration 57100, loss = 0.000173548\nI0819 03:33:01.098584 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:33:01.098599 17472 solver.cpp:244]     Train net output #1: loss = 0.000173633 (* 1 = 0.000173633 loss)\nI0819 03:33:01.181566 17472 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0819 03:35:18.248447 17472 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0819 03:36:40.592561 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71388\nI0819 03:36:40.592913 17472 solver.cpp:404]     Test net output #1: loss = 1.53022 (* 1 = 1.53022 loss)\nI0819 03:36:41.921967 17472 solver.cpp:228] Iteration 57200, loss = 0.000181532\nI0819 03:36:41.922006 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:36:41.922022 17472 solver.cpp:244]     Train net output #1: loss = 0.000181617 (* 1 = 0.000181617 loss)\nI0819 03:36:42.007469 17472 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0819 03:38:59.045733 17472 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0819 03:40:21.391712 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7138\nI0819 03:40:21.392069 17472 solver.cpp:404]     Test net output #1: loss = 1.52824 (* 1 = 1.52824 loss)\nI0819 03:40:22.720968 17472 solver.cpp:228] Iteration 57300, loss = 0.000179971\nI0819 03:40:22.721007 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:40:22.721022 17472 solver.cpp:244]     Train net output #1: loss = 0.000180056 (* 1 = 0.000180056 loss)\nI0819 03:40:22.806203 17472 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0819 03:42:39.825191 17472 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0819 03:44:02.160301 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71428\nI0819 03:44:02.160660 17472 solver.cpp:404]     Test net output #1: loss = 1.52992 (* 1 = 1.52992 loss)\nI0819 03:44:03.489329 17472 solver.cpp:228] Iteration 57400, loss = 0.000183868\nI0819 03:44:03.489369 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:44:03.489385 17472 solver.cpp:244]     Train net output #1: loss = 0.000183953 (* 1 = 0.000183953 loss)\nI0819 03:44:03.568982 17472 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0819 03:46:20.650995 17472 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0819 03:47:42.980473 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71436\nI0819 03:47:42.980841 17472 solver.cpp:404]     Test net output #1: loss = 1.52724 (* 1 = 1.52724 loss)\nI0819 03:47:44.308882 17472 solver.cpp:228] Iteration 57500, loss = 0.00020222\nI0819 03:47:44.308917 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:47:44.308933 17472 solver.cpp:244]     Train net output #1: loss = 0.000202305 (* 1 = 0.000202305 loss)\nI0819 03:47:44.393193 17472 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0819 03:50:01.699095 17472 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0819 03:51:24.016394 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71452\nI0819 03:51:24.016744 17472 solver.cpp:404]     Test net output #1: loss = 1.52917 (* 1 = 1.52917 loss)\nI0819 03:51:25.345531 17472 solver.cpp:228] Iteration 57600, loss = 0.00015868\nI0819 03:51:25.345566 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:51:25.345582 17472 solver.cpp:244]     Train net output #1: loss = 0.000158765 (* 1 = 0.000158765 loss)\nI0819 03:51:25.430855 17472 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0819 03:53:42.548758 17472 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0819 03:55:04.875102 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71364\nI0819 03:55:04.875387 17472 solver.cpp:404]     Test net output #1: loss = 1.52824 (* 1 = 1.52824 loss)\nI0819 03:55:06.204463 17472 solver.cpp:228] Iteration 57700, loss = 0.000191736\nI0819 03:55:06.204499 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:55:06.204514 17472 solver.cpp:244]     Train net output #1: loss = 0.000191821 (* 1 = 0.000191821 loss)\nI0819 03:55:06.287557 17472 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0819 03:57:23.344964 17472 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0819 03:58:45.656615 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71396\nI0819 03:58:45.656908 17472 solver.cpp:404]     Test net output #1: loss = 1.52914 (* 1 = 1.52914 loss)\nI0819 03:58:46.985553 17472 solver.cpp:228] Iteration 57800, loss = 0.000183251\nI0819 03:58:46.985590 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:58:46.985605 17472 solver.cpp:244]     Train net output #1: loss = 0.000183336 (* 1 = 0.000183336 loss)\nI0819 03:58:47.063635 17472 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0819 04:01:04.354311 17472 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0819 04:02:26.666463 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71408\nI0819 04:02:26.666776 17472 solver.cpp:404]     Test net output #1: loss = 1.52886 (* 1 = 1.52886 loss)\nI0819 04:02:27.995312 17472 solver.cpp:228] Iteration 57900, loss = 0.000193337\nI0819 04:02:27.995352 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:02:27.995368 17472 solver.cpp:244]     Train net output #1: loss = 0.000193422 (* 1 = 0.000193422 loss)\nI0819 04:02:28.078132 17472 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0819 04:04:45.169575 17472 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0819 04:06:07.482142 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71536\nI0819 04:06:07.482396 17472 solver.cpp:404]     Test net output #1: loss = 1.52778 (* 1 = 1.52778 loss)\nI0819 04:06:08.810986 17472 solver.cpp:228] Iteration 58000, loss = 0.000180746\nI0819 04:06:08.811028 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:06:08.811048 17472 solver.cpp:244]     Train net output #1: loss = 0.000180831 (* 1 = 0.000180831 loss)\nI0819 04:06:08.891777 17472 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0819 04:08:26.210631 17472 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0819 04:09:48.490855 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71516\nI0819 04:09:48.491112 17472 solver.cpp:404]     Test net output #1: loss = 1.52687 (* 1 = 1.52687 loss)\nI0819 04:09:49.819733 17472 solver.cpp:228] Iteration 58100, loss = 0.000172182\nI0819 04:09:49.819772 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:09:49.819787 17472 solver.cpp:244]     Train net output #1: loss = 0.000172267 (* 1 = 0.000172267 loss)\nI0819 04:09:49.906131 17472 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0819 04:12:06.994959 17472 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0819 04:13:29.283174 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71448\nI0819 04:13:29.283452 17472 solver.cpp:404]     Test net output #1: loss = 1.52755 (* 1 = 1.52755 loss)\nI0819 04:13:30.612368 17472 solver.cpp:228] Iteration 58200, loss = 0.000206435\nI0819 04:13:30.612409 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:13:30.612426 17472 solver.cpp:244]     Train net output #1: loss = 0.00020652 (* 1 = 0.00020652 loss)\nI0819 04:13:30.696853 17472 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0819 04:15:47.766242 17472 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0819 04:17:10.040343 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71472\nI0819 04:17:10.040606 17472 solver.cpp:404]     Test net output #1: loss = 1.52579 (* 1 = 1.52579 loss)\nI0819 04:17:11.368966 17472 solver.cpp:228] Iteration 58300, loss = 0.000176687\nI0819 04:17:11.369007 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:17:11.369024 17472 solver.cpp:244]     Train net output #1: loss = 0.000176772 (* 1 = 0.000176772 loss)\nI0819 04:17:11.453239 17472 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0819 04:19:28.415040 17472 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0819 04:20:50.699573 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71396\nI0819 04:20:50.699856 17472 solver.cpp:404]     Test net output #1: loss = 1.52955 (* 1 = 1.52955 loss)\nI0819 04:20:52.028409 17472 solver.cpp:228] Iteration 58400, loss = 0.000192468\nI0819 04:20:52.028450 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:20:52.028465 17472 solver.cpp:244]     Train net output #1: loss = 0.000192553 (* 1 = 0.000192553 loss)\nI0819 04:20:52.110870 17472 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0819 04:23:09.413321 17472 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0819 04:24:30.891014 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71488\nI0819 04:24:30.891324 17472 solver.cpp:404]     Test net output #1: loss = 1.52648 (* 1 = 1.52648 loss)\nI0819 04:24:32.212122 17472 solver.cpp:228] Iteration 58500, loss = 0.000193086\nI0819 04:24:32.212174 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:24:32.212190 17472 solver.cpp:244]     Train net output #1: loss = 0.000193171 (* 1 = 0.000193171 loss)\nI0819 04:24:32.297991 17472 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0819 04:26:49.048439 17472 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0819 04:28:10.536394 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71576\nI0819 04:28:10.536733 17472 solver.cpp:404]     Test net output #1: loss = 1.52467 (* 1 = 1.52467 loss)\nI0819 04:28:11.857775 17472 solver.cpp:228] Iteration 58600, loss = 0.000155226\nI0819 04:28:11.857823 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:28:11.857839 17472 solver.cpp:244]     Train net output #1: loss = 0.000155311 (* 1 = 0.000155311 loss)\nI0819 04:28:11.939308 17472 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0819 04:30:28.838639 17472 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0819 04:31:50.315978 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71404\nI0819 04:31:50.316323 17472 solver.cpp:404]     Test net output #1: loss = 1.52756 (* 1 = 1.52756 loss)\nI0819 04:31:51.636699 17472 solver.cpp:228] Iteration 58700, loss = 0.00017839\nI0819 04:31:51.636747 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:31:51.636765 17472 solver.cpp:244]     Train net output #1: loss = 0.000178475 (* 1 = 0.000178475 loss)\nI0819 04:31:51.725487 17472 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0819 04:34:08.611610 17472 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0819 04:35:30.094730 17472 solver.cpp:404]     Test net output #0: accuracy = 0.714\nI0819 04:35:30.095054 17472 solver.cpp:404]     Test net output #1: loss = 1.52826 (* 1 = 1.52826 loss)\nI0819 04:35:31.415405 17472 solver.cpp:228] Iteration 58800, loss = 0.000173302\nI0819 04:35:31.415452 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:35:31.415468 17472 solver.cpp:244]     Train net output #1: loss = 0.000173387 (* 1 = 0.000173387 loss)\nI0819 04:35:31.506261 17472 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0819 04:37:48.122865 17472 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0819 04:39:09.602377 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71472\nI0819 04:39:09.602710 17472 solver.cpp:404]     Test net output #1: loss = 1.52674 (* 1 = 1.52674 loss)\nI0819 04:39:10.922998 17472 solver.cpp:228] Iteration 58900, loss = 0.00016265\nI0819 04:39:10.923046 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:39:10.923063 17472 solver.cpp:244]     Train net output #1: loss = 0.000162735 (* 1 = 0.000162735 loss)\nI0819 04:39:11.008729 17472 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0819 04:41:27.899507 17472 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0819 04:42:49.394456 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71528\nI0819 04:42:49.394794 17472 solver.cpp:404]     Test net output #1: loss = 1.52615 (* 1 = 1.52615 loss)\nI0819 04:42:50.715708 17472 solver.cpp:228] Iteration 59000, loss = 0.000169971\nI0819 04:42:50.715754 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:42:50.715771 17472 solver.cpp:244]     Train net output #1: loss = 0.000170056 (* 1 = 0.000170056 loss)\nI0819 04:42:50.805452 17472 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0819 04:45:07.483744 17472 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0819 04:46:28.978906 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7142\nI0819 04:46:28.979243 17472 solver.cpp:404]     Test net output #1: loss = 1.52828 (* 1 = 1.52828 loss)\nI0819 04:46:30.299576 17472 solver.cpp:228] Iteration 59100, loss = 0.000192122\nI0819 04:46:30.299623 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:46:30.299638 17472 solver.cpp:244]     Train net output #1: loss = 0.000192207 (* 1 = 0.000192207 loss)\nI0819 04:46:30.391227 17472 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0819 04:48:47.128808 17472 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0819 04:50:08.607417 17472 solver.cpp:404]     Test net output #0: accuracy = 0.715\nI0819 04:50:08.607755 17472 solver.cpp:404]     Test net output #1: loss = 1.52937 (* 1 = 1.52937 loss)\nI0819 04:50:09.928459 17472 solver.cpp:228] Iteration 59200, loss = 0.000172992\nI0819 04:50:09.928506 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:50:09.928522 17472 solver.cpp:244]     Train net output #1: loss = 0.000173077 (* 1 = 0.000173077 loss)\nI0819 04:50:10.015492 17472 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0819 04:52:26.919188 17472 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0819 04:53:48.406302 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71476\nI0819 04:53:48.406632 17472 solver.cpp:404]     Test net output #1: loss = 1.52677 (* 1 = 1.52677 loss)\nI0819 04:53:49.727319 17472 solver.cpp:228] Iteration 59300, loss = 0.000174725\nI0819 04:53:49.727365 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:53:49.727380 17472 solver.cpp:244]     Train net output #1: loss = 0.00017481 (* 1 = 0.00017481 loss)\nI0819 04:53:49.813702 17472 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0819 04:56:06.356536 17472 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0819 04:57:27.847604 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71484\nI0819 04:57:27.847940 17472 solver.cpp:404]     Test net output #1: loss = 1.52747 (* 1 = 1.52747 loss)\nI0819 04:57:29.168577 17472 solver.cpp:228] Iteration 59400, loss = 0.00017805\nI0819 04:57:29.168622 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:57:29.168637 17472 solver.cpp:244]     Train net output #1: loss = 0.000178135 (* 1 = 0.000178135 loss)\nI0819 04:57:29.255923 17472 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0819 04:59:46.066776 17472 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0819 05:01:07.564646 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71408\nI0819 05:01:07.564983 17472 solver.cpp:404]     Test net output #1: loss = 1.52843 (* 1 = 1.52843 loss)\nI0819 05:01:08.885548 17472 solver.cpp:228] Iteration 59500, loss = 0.000172765\nI0819 05:01:08.885594 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:01:08.885610 17472 solver.cpp:244]     Train net output #1: loss = 0.00017285 (* 1 = 0.00017285 loss)\nI0819 05:01:08.973489 17472 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0819 05:03:25.506348 17472 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0819 05:04:47.035145 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7148\nI0819 05:04:47.035482 17472 solver.cpp:404]     Test net output #1: loss = 1.52748 (* 1 = 1.52748 loss)\nI0819 05:04:48.356916 17472 solver.cpp:228] Iteration 59600, loss = 0.000159811\nI0819 05:04:48.356963 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:04:48.356986 17472 solver.cpp:244]     Train net output #1: loss = 0.000159896 (* 1 = 0.000159896 loss)\nI0819 05:04:48.440719 17472 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0819 05:07:05.033722 17472 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0819 05:08:26.547950 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71476\nI0819 05:08:26.548267 17472 solver.cpp:404]     Test net output #1: loss = 1.52731 (* 1 = 1.52731 loss)\nI0819 05:08:27.868819 17472 solver.cpp:228] Iteration 59700, loss = 0.000168052\nI0819 05:08:27.868866 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:08:27.868891 17472 solver.cpp:244]     Train net output #1: loss = 0.000168137 (* 1 = 0.000168137 loss)\nI0819 05:08:27.956713 17472 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0819 05:10:44.757935 17472 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0819 05:12:06.261699 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71556\nI0819 05:12:06.262038 17472 solver.cpp:404]     Test net output #1: loss = 1.52651 (* 1 = 1.52651 loss)\nI0819 05:12:07.584035 17472 solver.cpp:228] Iteration 59800, loss = 0.00020042\nI0819 05:12:07.584084 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:12:07.584108 17472 solver.cpp:244]     Train net output #1: loss = 0.000200505 (* 1 = 0.000200505 loss)\nI0819 05:12:07.671521 17472 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0819 05:14:24.602949 17472 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0819 05:15:46.109881 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71376\nI0819 05:15:46.110211 17472 solver.cpp:404]     Test net output #1: loss = 1.52857 (* 1 = 1.52857 loss)\nI0819 05:15:47.431109 17472 solver.cpp:228] Iteration 59900, loss = 0.000205929\nI0819 05:15:47.431159 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:15:47.431182 17472 solver.cpp:244]     Train net output #1: loss = 0.000206014 (* 1 = 0.000206014 loss)\nI0819 05:15:47.515480 17472 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0819 05:18:04.289973 17472 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0819 05:19:25.777573 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71456\nI0819 05:19:25.777895 17472 solver.cpp:404]     Test net output #1: loss = 1.52947 (* 1 = 1.52947 loss)\nI0819 05:19:27.097734 17472 solver.cpp:228] Iteration 60000, loss = 0.000166221\nI0819 05:19:27.097780 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:19:27.097795 17472 solver.cpp:244]     Train net output #1: loss = 0.000166306 (* 1 = 0.000166306 loss)\nI0819 05:19:27.188617 17472 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0819 05:21:43.821976 17472 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0819 05:23:05.314676 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71404\nI0819 05:23:05.314997 17472 solver.cpp:404]     Test net output #1: loss = 1.52785 (* 1 = 1.52785 loss)\nI0819 05:23:06.635385 17472 solver.cpp:228] Iteration 60100, loss = 0.000153946\nI0819 05:23:06.635432 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:23:06.635448 17472 solver.cpp:244]     Train net output #1: loss = 0.000154031 (* 1 = 0.000154031 loss)\nI0819 05:23:06.717620 17472 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0819 05:25:23.506170 17472 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0819 05:26:44.994771 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71376\nI0819 05:26:44.995126 17472 solver.cpp:404]     Test net output #1: loss = 1.53075 (* 1 = 1.53075 loss)\nI0819 05:26:46.315722 17472 solver.cpp:228] Iteration 60200, loss = 0.00018708\nI0819 05:26:46.315768 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:26:46.315784 17472 solver.cpp:244]     Train net output #1: loss = 0.000187165 (* 1 = 0.000187165 loss)\nI0819 05:26:46.402122 17472 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0819 05:29:03.323632 17472 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0819 05:30:24.809634 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 05:30:24.809963 17472 solver.cpp:404]     Test net output #1: loss = 1.53142 (* 1 = 1.53142 loss)\nI0819 05:30:26.130002 17472 solver.cpp:228] Iteration 60300, loss = 0.000195621\nI0819 05:30:26.130048 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:30:26.130062 17472 solver.cpp:244]     Train net output #1: loss = 0.000195706 (* 1 = 0.000195706 loss)\nI0819 05:30:26.213425 17472 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0819 05:32:42.960577 17472 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0819 05:34:04.453958 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71412\nI0819 05:34:04.454300 17472 solver.cpp:404]     Test net output #1: loss = 1.5287 (* 1 = 1.5287 loss)\nI0819 05:34:05.774039 17472 solver.cpp:228] Iteration 60400, loss = 0.0001637\nI0819 05:34:05.774085 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:34:05.774101 17472 solver.cpp:244]     Train net output #1: loss = 0.000163785 (* 1 = 0.000163785 loss)\nI0819 05:34:05.862951 17472 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0819 05:36:22.743440 17472 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0819 05:37:44.223208 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71344\nI0819 05:37:44.223539 17472 solver.cpp:404]     Test net output #1: loss = 1.5295 (* 1 = 1.5295 loss)\nI0819 05:37:45.542659 17472 solver.cpp:228] Iteration 60500, loss = 0.000174081\nI0819 05:37:45.542706 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:37:45.542721 17472 solver.cpp:244]     Train net output #1: loss = 0.000174166 (* 1 = 0.000174166 loss)\nI0819 05:37:45.630084 17472 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0819 05:40:02.441973 17472 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0819 05:41:23.934394 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71416\nI0819 05:41:23.934723 17472 solver.cpp:404]     Test net output #1: loss = 1.53082 (* 1 = 1.53082 loss)\nI0819 05:41:25.254750 17472 solver.cpp:228] Iteration 60600, loss = 0.000184197\nI0819 05:41:25.254797 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:41:25.254813 17472 solver.cpp:244]     Train net output #1: loss = 0.000184282 (* 1 = 0.000184282 loss)\nI0819 05:41:25.344101 17472 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0819 05:43:42.211940 17472 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0819 05:45:03.699831 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71448\nI0819 05:45:03.700161 17472 solver.cpp:404]     Test net output #1: loss = 1.52803 (* 1 = 1.52803 loss)\nI0819 05:45:05.020452 17472 solver.cpp:228] Iteration 60700, loss = 0.000161594\nI0819 05:45:05.020500 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:45:05.020516 17472 solver.cpp:244]     Train net output #1: loss = 0.000161679 (* 1 = 0.000161679 loss)\nI0819 05:45:05.108824 17472 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0819 05:47:22.004204 17472 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0819 05:48:43.502089 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71516\nI0819 05:48:43.502408 17472 solver.cpp:404]     Test net output #1: loss = 1.52859 (* 1 = 1.52859 loss)\nI0819 05:48:44.822242 17472 solver.cpp:228] Iteration 60800, loss = 0.000174301\nI0819 05:48:44.822290 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:48:44.822306 17472 solver.cpp:244]     Train net output #1: loss = 0.000174386 (* 1 = 0.000174386 loss)\nI0819 05:48:44.907786 17472 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0819 05:51:01.825511 17472 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0819 05:52:23.308890 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71412\nI0819 05:52:23.309228 17472 solver.cpp:404]     Test net output #1: loss = 1.52916 (* 1 = 1.52916 loss)\nI0819 05:52:24.628777 17472 solver.cpp:228] Iteration 60900, loss = 0.000179918\nI0819 05:52:24.628826 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:52:24.628844 17472 solver.cpp:244]     Train net output #1: loss = 0.000180003 (* 1 = 0.000180003 loss)\nI0819 05:52:24.718928 17472 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0819 05:54:41.657171 17472 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0819 05:56:03.135543 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71528\nI0819 05:56:03.135896 17472 solver.cpp:404]     Test net output #1: loss = 1.52758 (* 1 = 1.52758 loss)\nI0819 05:56:04.456240 17472 solver.cpp:228] Iteration 61000, loss = 0.000216885\nI0819 05:56:04.456288 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:56:04.456305 17472 solver.cpp:244]     Train net output #1: loss = 0.00021697 (* 1 = 0.00021697 loss)\nI0819 05:56:04.542410 17472 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0819 05:58:21.319113 17472 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0819 05:59:42.804574 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7132\nI0819 05:59:42.804910 17472 solver.cpp:404]     Test net output #1: loss = 1.53323 (* 1 = 1.53323 loss)\nI0819 05:59:44.124969 17472 solver.cpp:228] Iteration 61100, loss = 0.000164356\nI0819 05:59:44.125018 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:59:44.125035 17472 solver.cpp:244]     Train net output #1: loss = 0.000164441 (* 1 = 0.000164441 loss)\nI0819 05:59:44.214920 17472 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0819 06:02:00.975775 17472 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0819 06:03:22.469010 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71236\nI0819 06:03:22.469362 17472 solver.cpp:404]     Test net output #1: loss = 1.5345 (* 1 = 1.5345 loss)\nI0819 06:03:23.789283 17472 solver.cpp:228] Iteration 61200, loss = 0.000188177\nI0819 06:03:23.789331 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:03:23.789348 17472 solver.cpp:244]     Train net output #1: loss = 0.000188262 (* 1 = 0.000188262 loss)\nI0819 06:03:23.875376 17472 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0819 06:05:40.550580 17472 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0819 06:07:02.033505 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71288\nI0819 06:07:02.033844 17472 solver.cpp:404]     Test net output #1: loss = 1.53222 (* 1 = 1.53222 loss)\nI0819 06:07:03.353468 17472 solver.cpp:228] Iteration 61300, loss = 0.000191074\nI0819 06:07:03.353518 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:07:03.353533 17472 solver.cpp:244]     Train net output #1: loss = 0.000191159 (* 1 = 0.000191159 loss)\nI0819 06:07:03.440919 17472 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0819 06:09:20.219607 17472 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0819 06:10:41.697872 17472 solver.cpp:404]     Test net output #0: accuracy = 0.714\nI0819 06:10:41.698216 17472 solver.cpp:404]     Test net output #1: loss = 1.53128 (* 1 = 1.53128 loss)\nI0819 06:10:43.017681 17472 solver.cpp:228] Iteration 61400, loss = 0.000206541\nI0819 06:10:43.017729 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:10:43.017745 17472 solver.cpp:244]     Train net output #1: loss = 0.000206626 (* 1 = 0.000206626 loss)\nI0819 06:10:43.109402 17472 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0819 06:12:59.998181 17472 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0819 06:14:21.473484 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71244\nI0819 06:14:21.473803 17472 solver.cpp:404]     Test net output #1: loss = 1.53111 (* 1 = 1.53111 loss)\nI0819 06:14:22.792973 17472 solver.cpp:228] Iteration 61500, loss = 0.000186098\nI0819 06:14:22.793020 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:14:22.793035 17472 solver.cpp:244]     Train net output #1: loss = 0.000186183 (* 1 = 0.000186183 loss)\nI0819 06:14:22.885854 17472 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0819 06:16:39.533532 17472 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0819 06:18:01.003742 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71436\nI0819 06:18:01.004061 17472 solver.cpp:404]     Test net output #1: loss = 1.52976 (* 1 = 1.52976 loss)\nI0819 06:18:02.323560 17472 solver.cpp:228] Iteration 61600, loss = 0.00017939\nI0819 06:18:02.323606 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:18:02.323622 17472 solver.cpp:244]     Train net output #1: loss = 0.000179475 (* 1 = 0.000179475 loss)\nI0819 06:18:02.410552 17472 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0819 06:20:19.181736 17472 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0819 06:21:40.660086 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71412\nI0819 06:21:40.660429 17472 solver.cpp:404]     Test net output #1: loss = 1.52914 (* 1 = 1.52914 loss)\nI0819 06:21:41.979724 17472 solver.cpp:228] Iteration 61700, loss = 0.000167749\nI0819 06:21:41.979773 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:21:41.979790 17472 solver.cpp:244]     Train net output #1: loss = 0.000167834 (* 1 = 0.000167834 loss)\nI0819 06:21:42.070179 17472 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0819 06:23:58.956655 17472 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0819 06:25:20.444110 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71428\nI0819 06:25:20.444428 17472 solver.cpp:404]     Test net output #1: loss = 1.52936 (* 1 = 1.52936 loss)\nI0819 06:25:21.764250 17472 solver.cpp:228] Iteration 61800, loss = 0.000189286\nI0819 06:25:21.764298 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:25:21.764313 17472 solver.cpp:244]     Train net output #1: loss = 0.000189371 (* 1 = 0.000189371 loss)\nI0819 06:25:21.852493 17472 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0819 06:27:38.549453 17472 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0819 06:29:00.010347 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71572\nI0819 06:29:00.010627 17472 solver.cpp:404]     Test net output #1: loss = 1.52555 (* 1 = 1.52555 loss)\nI0819 06:29:01.330111 17472 solver.cpp:228] Iteration 61900, loss = 0.000191332\nI0819 06:29:01.330157 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:29:01.330173 17472 solver.cpp:244]     Train net output #1: loss = 0.000191417 (* 1 = 0.000191417 loss)\nI0819 06:29:01.418344 17472 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0819 06:31:18.341663 17472 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0819 06:32:39.810138 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71556\nI0819 06:32:39.810468 17472 solver.cpp:404]     Test net output #1: loss = 1.52776 (* 1 = 1.52776 loss)\nI0819 06:32:41.129452 17472 solver.cpp:228] Iteration 62000, loss = 0.000186289\nI0819 06:32:41.129498 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:32:41.129513 17472 solver.cpp:244]     Train net output #1: loss = 0.000186374 (* 1 = 0.000186374 loss)\nI0819 06:32:41.215360 17472 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0819 06:34:58.087200 17472 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0819 06:36:19.546726 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71456\nI0819 06:36:19.547034 17472 solver.cpp:404]     Test net output #1: loss = 1.52714 (* 1 = 1.52714 loss)\nI0819 06:36:20.866948 17472 solver.cpp:228] Iteration 62100, loss = 0.000208848\nI0819 06:36:20.866994 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:36:20.867010 17472 solver.cpp:244]     Train net output #1: loss = 0.000208933 (* 1 = 0.000208933 loss)\nI0819 06:36:20.955693 17472 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0819 06:38:37.594878 17472 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0819 06:39:59.049604 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7158\nI0819 06:39:59.049924 17472 solver.cpp:404]     Test net output #1: loss = 1.52723 (* 1 = 1.52723 loss)\nI0819 06:40:00.369127 17472 solver.cpp:228] Iteration 62200, loss = 0.000181494\nI0819 06:40:00.369174 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:40:00.369189 17472 solver.cpp:244]     Train net output #1: loss = 0.000181579 (* 1 = 0.000181579 loss)\nI0819 06:40:00.453495 17472 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0819 06:42:17.578418 17472 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0819 06:43:39.042223 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71508\nI0819 06:43:39.042503 17472 solver.cpp:404]     Test net output #1: loss = 1.52546 (* 1 = 1.52546 loss)\nI0819 06:43:40.361721 17472 solver.cpp:228] Iteration 62300, loss = 0.000175171\nI0819 06:43:40.361768 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:43:40.361784 17472 solver.cpp:244]     Train net output #1: loss = 0.000175256 (* 1 = 0.000175256 loss)\nI0819 06:43:40.450731 17472 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0819 06:45:57.158515 17472 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0819 06:47:18.618778 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71448\nI0819 06:47:18.619091 17472 solver.cpp:404]     Test net output #1: loss = 1.5267 (* 1 = 1.5267 loss)\nI0819 06:47:19.939007 17472 solver.cpp:228] Iteration 62400, loss = 0.000178463\nI0819 06:47:19.939054 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:47:19.939070 17472 solver.cpp:244]     Train net output #1: loss = 0.000178548 (* 1 = 0.000178548 loss)\nI0819 06:47:20.028424 17472 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0819 06:49:36.975082 17472 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0819 06:50:58.445832 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71628\nI0819 06:50:58.446143 17472 solver.cpp:404]     Test net output #1: loss = 1.52213 (* 1 = 1.52213 loss)\nI0819 06:50:59.765360 17472 solver.cpp:228] Iteration 62500, loss = 0.000191184\nI0819 06:50:59.765406 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:50:59.765424 17472 solver.cpp:244]     Train net output #1: loss = 0.000191269 (* 1 = 0.000191269 loss)\nI0819 06:50:59.850610 17472 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0819 06:53:16.529399 17472 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0819 06:54:38.006646 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71472\nI0819 06:54:38.006944 17472 solver.cpp:404]     Test net output #1: loss = 1.52648 (* 1 = 1.52648 loss)\nI0819 06:54:39.326071 17472 solver.cpp:228] Iteration 62600, loss = 0.000168959\nI0819 06:54:39.326117 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:54:39.326133 17472 solver.cpp:244]     Train net output #1: loss = 0.000169044 (* 1 = 0.000169044 loss)\nI0819 06:54:39.413421 17472 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0819 06:56:56.458036 17472 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0819 06:58:17.937263 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71408\nI0819 06:58:17.937577 17472 solver.cpp:404]     Test net output #1: loss = 1.52892 (* 1 = 1.52892 loss)\nI0819 06:58:19.256750 17472 solver.cpp:228] Iteration 62700, loss = 0.000194372\nI0819 06:58:19.256796 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:58:19.256813 17472 solver.cpp:244]     Train net output #1: loss = 0.000194457 (* 1 = 0.000194457 loss)\nI0819 06:58:19.343576 17472 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0819 07:00:36.205399 17472 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0819 07:01:57.667373 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71432\nI0819 07:01:57.667672 17472 solver.cpp:404]     Test net output #1: loss = 1.52783 (* 1 = 1.52783 loss)\nI0819 07:01:58.986580 17472 solver.cpp:228] Iteration 62800, loss = 0.00018309\nI0819 07:01:58.986625 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:01:58.986640 17472 solver.cpp:244]     Train net output #1: loss = 0.000183175 (* 1 = 0.000183175 loss)\nI0819 07:01:59.073776 17472 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0819 07:04:15.906222 17472 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0819 07:05:37.360774 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71456\nI0819 07:05:37.361057 17472 solver.cpp:404]     Test net output #1: loss = 1.52742 (* 1 = 1.52742 loss)\nI0819 07:05:38.680196 17472 solver.cpp:228] Iteration 62900, loss = 0.00017111\nI0819 07:05:38.680240 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:05:38.680256 17472 solver.cpp:244]     Train net output #1: loss = 0.000171195 (* 1 = 0.000171195 loss)\nI0819 07:05:38.769992 17472 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0819 07:07:55.654099 17472 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0819 07:09:17.118952 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71556\nI0819 07:09:17.119249 17472 solver.cpp:404]     Test net output #1: loss = 1.52535 (* 1 = 1.52535 loss)\nI0819 07:09:18.438834 17472 solver.cpp:228] Iteration 63000, loss = 0.000188006\nI0819 07:09:18.438885 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:09:18.438901 17472 solver.cpp:244]     Train net output #1: loss = 0.000188091 (* 1 = 0.000188091 loss)\nI0819 07:09:18.526418 17472 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0819 07:11:35.183372 17472 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0819 07:12:56.693385 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7148\nI0819 07:12:56.693677 17472 solver.cpp:404]     Test net output #1: loss = 1.52691 (* 1 = 1.52691 loss)\nI0819 07:12:58.014235 17472 solver.cpp:228] Iteration 63100, loss = 0.000188279\nI0819 07:12:58.014284 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:12:58.014309 17472 solver.cpp:244]     Train net output #1: loss = 0.000188364 (* 1 = 0.000188364 loss)\nI0819 07:12:58.101194 17472 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0819 07:15:14.939452 17472 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0819 07:16:36.433926 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71504\nI0819 07:16:36.434222 17472 solver.cpp:404]     Test net output #1: loss = 1.52591 (* 1 = 1.52591 loss)\nI0819 07:16:37.755028 17472 solver.cpp:228] Iteration 63200, loss = 0.000151271\nI0819 07:16:37.755079 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:16:37.755103 17472 solver.cpp:244]     Train net output #1: loss = 0.000151356 (* 1 = 0.000151356 loss)\nI0819 07:16:37.843114 17472 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0819 07:18:54.433914 17472 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0819 07:20:15.943019 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71452\nI0819 07:20:15.943372 17472 solver.cpp:404]     Test net output #1: loss = 1.52627 (* 1 = 1.52627 loss)\nI0819 07:20:17.263399 17472 solver.cpp:228] Iteration 63300, loss = 0.000166233\nI0819 07:20:17.263450 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:20:17.263475 17472 solver.cpp:244]     Train net output #1: loss = 0.000166318 (* 1 = 0.000166318 loss)\nI0819 07:20:17.347985 17472 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0819 07:22:34.290055 17472 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0819 07:23:55.786273 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71544\nI0819 07:23:55.786532 17472 solver.cpp:404]     Test net output #1: loss = 1.52572 (* 1 = 1.52572 loss)\nI0819 07:23:57.106467 17472 solver.cpp:228] Iteration 63400, loss = 0.000194043\nI0819 07:23:57.106518 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:23:57.106542 17472 solver.cpp:244]     Train net output #1: loss = 0.000194128 (* 1 = 0.000194128 loss)\nI0819 07:23:57.197013 17472 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0819 07:26:14.085407 17472 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0819 07:27:35.566212 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71436\nI0819 07:27:35.566460 17472 solver.cpp:404]     Test net output #1: loss = 1.52804 (* 1 = 1.52804 loss)\nI0819 07:27:36.886873 17472 solver.cpp:228] Iteration 63500, loss = 0.000176901\nI0819 07:27:36.886914 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:27:36.886939 17472 solver.cpp:244]     Train net output #1: loss = 0.000176986 (* 1 = 0.000176986 loss)\nI0819 07:27:36.975473 17472 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0819 07:29:53.916939 17472 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0819 07:31:15.409965 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71552\nI0819 07:31:15.410310 17472 solver.cpp:404]     Test net output #1: loss = 1.53021 (* 1 = 1.53021 loss)\nI0819 07:31:16.730986 17472 solver.cpp:228] Iteration 63600, loss = 0.000168359\nI0819 07:31:16.731029 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:31:16.731056 17472 solver.cpp:244]     Train net output #1: loss = 0.000168444 (* 1 = 0.000168444 loss)\nI0819 07:31:16.811954 17472 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0819 07:33:33.520965 17472 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0819 07:34:55.873549 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7154\nI0819 07:34:55.873885 17472 solver.cpp:404]     Test net output #1: loss = 1.52545 (* 1 = 1.52545 loss)\nI0819 07:34:57.202509 17472 solver.cpp:228] Iteration 63700, loss = 0.000167151\nI0819 07:34:57.202560 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:34:57.202576 17472 solver.cpp:244]     Train net output #1: loss = 0.000167236 (* 1 = 0.000167236 loss)\nI0819 07:34:57.286787 17472 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0819 07:37:14.564321 17472 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0819 07:38:36.907327 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7152\nI0819 07:38:36.907716 17472 solver.cpp:404]     Test net output #1: loss = 1.53042 (* 1 = 1.53042 loss)\nI0819 07:38:38.236009 17472 solver.cpp:228] Iteration 63800, loss = 0.000147749\nI0819 07:38:38.236059 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:38:38.236076 17472 solver.cpp:244]     Train net output #1: loss = 0.000147834 (* 1 = 0.000147834 loss)\nI0819 07:38:38.318354 17472 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0819 07:40:55.700242 17472 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0819 07:42:18.041460 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71452\nI0819 07:42:18.041797 17472 solver.cpp:404]     Test net output #1: loss = 1.53022 (* 1 = 1.53022 loss)\nI0819 07:42:19.371048 17472 solver.cpp:228] Iteration 63900, loss = 0.000189177\nI0819 07:42:19.371099 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:42:19.371116 17472 solver.cpp:244]     Train net output #1: loss = 0.000189262 (* 1 = 0.000189262 loss)\nI0819 07:42:19.454510 17472 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0819 07:44:36.883900 17472 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0819 07:45:59.229562 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7148\nI0819 07:45:59.229936 17472 solver.cpp:404]     Test net output #1: loss = 1.53121 (* 1 = 1.53121 loss)\nI0819 07:46:00.558787 17472 solver.cpp:228] Iteration 64000, loss = 0.000187471\nI0819 07:46:00.558836 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:46:00.558851 17472 solver.cpp:244]     Train net output #1: loss = 0.000187556 (* 1 = 0.000187556 loss)\nI0819 07:46:00.639845 17472 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0819 07:48:18.028841 17472 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0819 07:49:40.367519 17472 solver.cpp:404]     Test net output #0: accuracy = 0.712881\nI0819 07:49:40.367908 17472 solver.cpp:404]     Test net output #1: loss = 1.53212 (* 1 = 1.53212 loss)\nI0819 07:49:41.697222 17472 solver.cpp:228] Iteration 64100, loss = 0.000179389\nI0819 07:49:41.697271 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:49:41.697288 17472 solver.cpp:244]     Train net output #1: loss = 0.000179474 (* 1 = 0.000179474 loss)\nI0819 07:49:41.774804 17472 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0819 07:51:59.165282 17472 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0819 07:53:21.499116 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71376\nI0819 07:53:21.499495 17472 solver.cpp:404]     Test net output #1: loss = 1.53199 (* 1 = 1.53199 loss)\nI0819 07:53:22.828850 17472 solver.cpp:228] Iteration 64200, loss = 0.000172624\nI0819 07:53:22.828900 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:53:22.828917 17472 solver.cpp:244]     Train net output #1: loss = 0.000172709 (* 1 = 0.000172709 loss)\nI0819 07:53:22.912536 17472 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0819 07:55:40.285549 17472 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0819 07:57:02.631266 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71464\nI0819 07:57:02.631640 17472 solver.cpp:404]     Test net output #1: loss = 1.53041 (* 1 = 1.53041 loss)\nI0819 07:57:03.960989 17472 solver.cpp:228] Iteration 64300, loss = 0.000187266\nI0819 07:57:03.961040 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:57:03.961056 17472 solver.cpp:244]     Train net output #1: loss = 0.000187351 (* 1 = 0.000187351 loss)\nI0819 07:57:04.045078 17472 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0819 07:59:21.402460 17472 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0819 08:00:43.750532 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71356\nI0819 08:00:43.750869 17472 solver.cpp:404]     Test net output #1: loss = 1.53385 (* 1 = 1.53385 loss)\nI0819 08:00:45.079068 17472 solver.cpp:228] Iteration 64400, loss = 0.00017893\nI0819 08:00:45.079118 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:00:45.079134 17472 solver.cpp:244]     Train net output #1: loss = 0.000179015 (* 1 = 0.000179015 loss)\nI0819 08:00:45.164891 17472 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0819 08:03:02.519037 17472 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0819 08:04:24.868890 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71332\nI0819 08:04:24.869266 17472 solver.cpp:404]     Test net output #1: loss = 1.53407 (* 1 = 1.53407 loss)\nI0819 08:04:26.198693 17472 solver.cpp:228] Iteration 64500, loss = 0.000167832\nI0819 08:04:26.198738 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:04:26.198755 17472 solver.cpp:244]     Train net output #1: loss = 0.000167917 (* 1 = 0.000167917 loss)\nI0819 08:04:26.280588 17472 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0819 08:06:43.556180 17472 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0819 08:08:05.900315 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71376\nI0819 08:08:05.900665 17472 solver.cpp:404]     Test net output #1: loss = 1.53357 (* 1 = 1.53357 loss)\nI0819 08:08:07.229732 17472 solver.cpp:228] Iteration 64600, loss = 0.000168475\nI0819 08:08:07.229779 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:08:07.229796 17472 solver.cpp:244]     Train net output #1: loss = 0.00016856 (* 1 = 0.00016856 loss)\nI0819 08:08:07.314832 17472 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0819 08:10:24.613616 17472 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0819 08:11:46.945063 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71228\nI0819 08:11:46.945444 17472 solver.cpp:404]     Test net output #1: loss = 1.53267 (* 1 = 1.53267 loss)\nI0819 08:11:48.274296 17472 solver.cpp:228] Iteration 64700, loss = 0.000201337\nI0819 08:11:48.274345 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:11:48.274361 17472 solver.cpp:244]     Train net output #1: loss = 0.000201422 (* 1 = 0.000201422 loss)\nI0819 08:11:48.361416 17472 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0819 08:14:05.639564 17472 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0819 08:15:27.976110 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 08:15:27.976482 17472 solver.cpp:404]     Test net output #1: loss = 1.53245 (* 1 = 1.53245 loss)\nI0819 08:15:29.304929 17472 solver.cpp:228] Iteration 64800, loss = 0.000182986\nI0819 08:15:29.304985 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:15:29.305001 17472 solver.cpp:244]     Train net output #1: loss = 0.000183071 (* 1 = 0.000183071 loss)\nI0819 08:15:29.388645 17472 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0819 08:17:46.719825 17472 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0819 08:19:09.042449 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7144\nI0819 08:19:09.042821 17472 solver.cpp:404]     Test net output #1: loss = 1.53047 (* 1 = 1.53047 loss)\nI0819 08:19:10.371399 17472 solver.cpp:228] Iteration 64900, loss = 0.000195186\nI0819 08:19:10.371449 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:19:10.371465 17472 solver.cpp:244]     Train net output #1: loss = 0.000195271 (* 1 = 0.000195271 loss)\nI0819 08:19:10.454375 17472 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0819 08:21:27.886253 17472 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0819 08:22:50.212177 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71516\nI0819 08:22:50.212541 17472 solver.cpp:404]     Test net output #1: loss = 1.52883 (* 1 = 1.52883 loss)\nI0819 08:22:51.541050 17472 solver.cpp:228] Iteration 65000, loss = 0.000197386\nI0819 08:22:51.541100 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:22:51.541116 17472 solver.cpp:244]     Train net output #1: loss = 0.000197471 (* 1 = 0.000197471 loss)\nI0819 08:22:51.626490 17472 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0819 08:25:08.854086 17472 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0819 08:26:31.191381 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71444\nI0819 08:26:31.191751 17472 solver.cpp:404]     Test net output #1: loss = 1.52959 (* 1 = 1.52959 loss)\nI0819 08:26:32.521026 17472 solver.cpp:228] Iteration 65100, loss = 0.000177721\nI0819 08:26:32.521075 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:26:32.521091 17472 solver.cpp:244]     Train net output #1: loss = 0.000177806 (* 1 = 0.000177806 loss)\nI0819 08:26:32.607501 17472 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0819 08:28:50.072990 17472 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0819 08:30:12.398731 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71424\nI0819 08:30:12.399075 17472 solver.cpp:404]     Test net output #1: loss = 1.53101 (* 1 = 1.53101 loss)\nI0819 08:30:13.727947 17472 solver.cpp:228] Iteration 65200, loss = 0.000184978\nI0819 08:30:13.728000 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:30:13.728018 17472 solver.cpp:244]     Train net output #1: loss = 0.000185063 (* 1 = 0.000185063 loss)\nI0819 08:30:13.805531 17472 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0819 08:32:31.060561 17472 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0819 08:33:53.379400 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71432\nI0819 08:33:53.379753 17472 solver.cpp:404]     Test net output #1: loss = 1.5313 (* 1 = 1.5313 loss)\nI0819 08:33:54.707592 17472 solver.cpp:228] Iteration 65300, loss = 0.000168133\nI0819 08:33:54.707639 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:33:54.707655 17472 solver.cpp:244]     Train net output #1: loss = 0.000168218 (* 1 = 0.000168218 loss)\nI0819 08:33:54.792547 17472 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0819 08:36:12.016588 17472 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0819 08:37:34.324031 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7134\nI0819 08:37:34.324404 17472 solver.cpp:404]     Test net output #1: loss = 1.53239 (* 1 = 1.53239 loss)\nI0819 08:37:35.652164 17472 solver.cpp:228] Iteration 65400, loss = 0.000191111\nI0819 08:37:35.652214 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:37:35.652230 17472 solver.cpp:244]     Train net output #1: loss = 0.000191196 (* 1 = 0.000191196 loss)\nI0819 08:37:35.737579 17472 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0819 08:39:52.984550 17472 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0819 08:41:15.288766 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 08:41:15.289144 17472 solver.cpp:404]     Test net output #1: loss = 1.53051 (* 1 = 1.53051 loss)\nI0819 08:41:16.617060 17472 solver.cpp:228] Iteration 65500, loss = 0.00017573\nI0819 08:41:16.617110 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:41:16.617126 17472 solver.cpp:244]     Train net output #1: loss = 0.000175815 (* 1 = 0.000175815 loss)\nI0819 08:41:16.696591 17472 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0819 08:43:33.990810 17472 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0819 08:44:56.412155 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7132\nI0819 08:44:56.412478 17472 solver.cpp:404]     Test net output #1: loss = 1.53285 (* 1 = 1.53285 loss)\nI0819 08:44:57.740252 17472 solver.cpp:228] Iteration 65600, loss = 0.000171681\nI0819 08:44:57.740300 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:44:57.740316 17472 solver.cpp:244]     Train net output #1: loss = 0.000171766 (* 1 = 0.000171766 loss)\nI0819 08:44:57.821089 17472 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0819 08:47:15.146898 17472 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0819 08:48:37.327527 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71292\nI0819 08:48:37.327904 17472 solver.cpp:404]     Test net output #1: loss = 1.53314 (* 1 = 1.53314 loss)\nI0819 08:48:38.656599 17472 solver.cpp:228] Iteration 65700, loss = 0.000200829\nI0819 08:48:38.656647 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:48:38.656664 17472 solver.cpp:244]     Train net output #1: loss = 0.000200914 (* 1 = 0.000200914 loss)\nI0819 08:48:38.734534 17472 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0819 08:50:56.041831 17472 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0819 08:52:18.366065 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 08:52:18.366430 17472 solver.cpp:404]     Test net output #1: loss = 1.53141 (* 1 = 1.53141 loss)\nI0819 08:52:19.694680 17472 solver.cpp:228] Iteration 65800, loss = 0.000174177\nI0819 08:52:19.694723 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:52:19.694739 17472 solver.cpp:244]     Train net output #1: loss = 0.000174262 (* 1 = 0.000174262 loss)\nI0819 08:52:19.778664 17472 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0819 08:54:37.072129 17472 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0819 08:55:59.456992 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71392\nI0819 08:55:59.457362 17472 solver.cpp:404]     Test net output #1: loss = 1.53007 (* 1 = 1.53007 loss)\nI0819 08:56:00.785440 17472 solver.cpp:228] Iteration 65900, loss = 0.000187222\nI0819 08:56:00.785481 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:56:00.785496 17472 solver.cpp:244]     Train net output #1: loss = 0.000187307 (* 1 = 0.000187307 loss)\nI0819 08:56:00.869294 17472 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0819 08:58:18.317261 17472 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0819 08:59:40.656633 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7148\nI0819 08:59:40.656987 17472 solver.cpp:404]     Test net output #1: loss = 1.52755 (* 1 = 1.52755 loss)\nI0819 08:59:41.985013 17472 solver.cpp:228] Iteration 66000, loss = 0.000188026\nI0819 08:59:41.985052 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:59:41.985069 17472 solver.cpp:244]     Train net output #1: loss = 0.000188111 (* 1 = 0.000188111 loss)\nI0819 08:59:42.071447 17472 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0819 09:01:59.548282 17472 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0819 09:03:21.893326 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71344\nI0819 09:03:21.893709 17472 solver.cpp:404]     Test net output #1: loss = 1.52894 (* 1 = 1.52894 loss)\nI0819 09:03:23.222107 17472 solver.cpp:228] Iteration 66100, loss = 0.000190082\nI0819 09:03:23.222149 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:03:23.222165 17472 solver.cpp:244]     Train net output #1: loss = 0.000190167 (* 1 = 0.000190167 loss)\nI0819 09:03:23.306625 17472 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0819 09:05:40.624733 17472 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0819 09:07:02.955265 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71392\nI0819 09:07:02.955615 17472 solver.cpp:404]     Test net output #1: loss = 1.52941 (* 1 = 1.52941 loss)\nI0819 09:07:04.283838 17472 solver.cpp:228] Iteration 66200, loss = 0.000166087\nI0819 09:07:04.283879 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:07:04.283895 17472 solver.cpp:244]     Train net output #1: loss = 0.000166172 (* 1 = 0.000166172 loss)\nI0819 09:07:04.378782 17472 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0819 09:09:21.858363 17472 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0819 09:10:44.207815 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71404\nI0819 09:10:44.208209 17472 solver.cpp:404]     Test net output #1: loss = 1.52958 (* 1 = 1.52958 loss)\nI0819 09:10:45.536252 17472 solver.cpp:228] Iteration 66300, loss = 0.000175591\nI0819 09:10:45.536293 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:10:45.536309 17472 solver.cpp:244]     Train net output #1: loss = 0.000175676 (* 1 = 0.000175676 loss)\nI0819 09:10:45.623520 17472 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0819 09:13:02.882510 17472 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0819 09:14:25.226871 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7142\nI0819 09:14:25.227247 17472 solver.cpp:404]     Test net output #1: loss = 1.53048 (* 1 = 1.53048 loss)\nI0819 09:14:26.555272 17472 solver.cpp:228] Iteration 66400, loss = 0.000174244\nI0819 09:14:26.555313 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:14:26.555330 17472 solver.cpp:244]     Train net output #1: loss = 0.000174329 (* 1 = 0.000174329 loss)\nI0819 09:14:26.640643 17472 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0819 09:16:44.089133 17472 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0819 09:18:06.422763 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71428\nI0819 09:18:06.423151 17472 solver.cpp:404]     Test net output #1: loss = 1.53073 (* 1 = 1.53073 loss)\nI0819 09:18:07.751238 17472 solver.cpp:228] Iteration 66500, loss = 0.000178123\nI0819 09:18:07.751279 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:18:07.751296 17472 solver.cpp:244]     Train net output #1: loss = 0.000178208 (* 1 = 0.000178208 loss)\nI0819 09:18:07.836072 17472 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0819 09:20:25.201441 17472 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0819 09:21:47.543097 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 09:21:47.543444 17472 solver.cpp:404]     Test net output #1: loss = 1.53312 (* 1 = 1.53312 loss)\nI0819 09:21:48.872164 17472 solver.cpp:228] Iteration 66600, loss = 0.000169334\nI0819 09:21:48.872205 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:21:48.872220 17472 solver.cpp:244]     Train net output #1: loss = 0.000169419 (* 1 = 0.000169419 loss)\nI0819 09:21:48.956337 17472 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0819 09:24:06.558053 17472 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0819 09:25:28.890882 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 09:25:28.891263 17472 solver.cpp:404]     Test net output #1: loss = 1.53158 (* 1 = 1.53158 loss)\nI0819 09:25:30.219288 17472 solver.cpp:228] Iteration 66700, loss = 0.000190867\nI0819 09:25:30.219331 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:25:30.219347 17472 solver.cpp:244]     Train net output #1: loss = 0.000190952 (* 1 = 0.000190952 loss)\nI0819 09:25:30.305029 17472 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0819 09:27:47.596705 17472 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0819 09:29:09.931294 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 09:29:09.931648 17472 solver.cpp:404]     Test net output #1: loss = 1.52921 (* 1 = 1.52921 loss)\nI0819 09:29:11.259667 17472 solver.cpp:228] Iteration 66800, loss = 0.000199522\nI0819 09:29:11.259711 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:29:11.259726 17472 solver.cpp:244]     Train net output #1: loss = 0.000199607 (* 1 = 0.000199607 loss)\nI0819 09:29:11.339489 17472 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0819 09:31:28.703685 17472 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0819 09:32:51.033589 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71388\nI0819 09:32:51.033964 17472 solver.cpp:404]     Test net output #1: loss = 1.53068 (* 1 = 1.53068 loss)\nI0819 09:32:52.363811 17472 solver.cpp:228] Iteration 66900, loss = 0.000178119\nI0819 09:32:52.363852 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:32:52.363867 17472 solver.cpp:244]     Train net output #1: loss = 0.000178204 (* 1 = 0.000178204 loss)\nI0819 09:32:52.441987 17472 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0819 09:35:09.955986 17472 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0819 09:36:32.291103 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71492\nI0819 09:36:32.291481 17472 solver.cpp:404]     Test net output #1: loss = 1.52854 (* 1 = 1.52854 loss)\nI0819 09:36:33.619627 17472 solver.cpp:228] Iteration 67000, loss = 0.000183146\nI0819 09:36:33.619669 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:36:33.619685 17472 solver.cpp:244]     Train net output #1: loss = 0.000183231 (* 1 = 0.000183231 loss)\nI0819 09:36:33.697304 17472 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0819 09:38:50.831656 17472 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0819 09:40:13.169883 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71244\nI0819 09:40:13.170234 17472 solver.cpp:404]     Test net output #1: loss = 1.534 (* 1 = 1.534 loss)\nI0819 09:40:14.498317 17472 solver.cpp:228] Iteration 67100, loss = 0.000184663\nI0819 09:40:14.498353 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:40:14.498369 17472 solver.cpp:244]     Train net output #1: loss = 0.000184748 (* 1 = 0.000184748 loss)\nI0819 09:40:14.580521 17472 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0819 09:42:31.824996 17472 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0819 09:43:54.162124 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71428\nI0819 09:43:54.162473 17472 solver.cpp:404]     Test net output #1: loss = 1.53236 (* 1 = 1.53236 loss)\nI0819 09:43:55.489943 17472 solver.cpp:228] Iteration 67200, loss = 0.000183114\nI0819 09:43:55.489981 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:43:55.489997 17472 solver.cpp:244]     Train net output #1: loss = 0.000183199 (* 1 = 0.000183199 loss)\nI0819 09:43:55.576598 17472 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0819 09:46:12.918258 17472 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0819 09:47:35.249255 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71336\nI0819 09:47:35.249634 17472 solver.cpp:404]     Test net output #1: loss = 1.53153 (* 1 = 1.53153 loss)\nI0819 09:47:36.577530 17472 solver.cpp:228] Iteration 67300, loss = 0.00017258\nI0819 09:47:36.577567 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:47:36.577582 17472 solver.cpp:244]     Train net output #1: loss = 0.000172665 (* 1 = 0.000172665 loss)\nI0819 09:47:36.663266 17472 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0819 09:49:54.119298 17472 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0819 09:51:16.460734 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71472\nI0819 09:51:16.461103 17472 solver.cpp:404]     Test net output #1: loss = 1.52997 (* 1 = 1.52997 loss)\nI0819 09:51:17.789077 17472 solver.cpp:228] Iteration 67400, loss = 0.000188165\nI0819 09:51:17.789117 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:51:17.789134 17472 solver.cpp:244]     Train net output #1: loss = 0.00018825 (* 1 = 0.00018825 loss)\nI0819 09:51:17.873699 17472 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0819 09:53:35.133850 17472 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0819 09:54:57.464224 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71424\nI0819 09:54:57.464573 17472 solver.cpp:404]     Test net output #1: loss = 1.52951 (* 1 = 1.52951 loss)\nI0819 09:54:58.792963 17472 solver.cpp:228] Iteration 67500, loss = 0.00018536\nI0819 09:54:58.793001 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:54:58.793017 17472 solver.cpp:244]     Train net output #1: loss = 0.000185445 (* 1 = 0.000185445 loss)\nI0819 09:54:58.877856 17472 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0819 09:57:16.218252 17472 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0819 09:58:37.724004 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71456\nI0819 09:58:37.724328 17472 solver.cpp:404]     Test net output #1: loss = 1.529 (* 1 = 1.529 loss)\nI0819 09:58:39.046946 17472 solver.cpp:228] Iteration 67600, loss = 0.000196275\nI0819 09:58:39.046995 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:58:39.047019 17472 solver.cpp:244]     Train net output #1: loss = 0.00019636 (* 1 = 0.00019636 loss)\nI0819 09:58:39.132946 17472 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0819 10:00:56.331691 17472 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0819 10:02:17.814469 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71388\nI0819 10:02:17.814805 17472 solver.cpp:404]     Test net output #1: loss = 1.52903 (* 1 = 1.52903 loss)\nI0819 10:02:19.134160 17472 solver.cpp:228] Iteration 67700, loss = 0.00017403\nI0819 10:02:19.134208 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:02:19.134232 17472 solver.cpp:244]     Train net output #1: loss = 0.000174115 (* 1 = 0.000174115 loss)\nI0819 10:02:19.224969 17472 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0819 10:04:36.472707 17472 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0819 10:05:57.948009 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7148\nI0819 10:05:57.948309 17472 solver.cpp:404]     Test net output #1: loss = 1.52997 (* 1 = 1.52997 loss)\nI0819 10:05:59.267812 17472 solver.cpp:228] Iteration 67800, loss = 0.000189491\nI0819 10:05:59.267861 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:05:59.267885 17472 solver.cpp:244]     Train net output #1: loss = 0.000189576 (* 1 = 0.000189576 loss)\nI0819 10:05:59.364820 17472 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0819 10:08:16.551950 17472 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0819 10:09:38.033942 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 10:09:38.034231 17472 solver.cpp:404]     Test net output #1: loss = 1.52957 (* 1 = 1.52957 loss)\nI0819 10:09:39.353853 17472 solver.cpp:228] Iteration 67900, loss = 0.000199228\nI0819 10:09:39.353901 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:09:39.353925 17472 solver.cpp:244]     Train net output #1: loss = 0.000199313 (* 1 = 0.000199313 loss)\nI0819 10:09:39.439770 17472 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0819 10:11:56.485816 17472 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0819 10:13:17.962484 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71336\nI0819 10:13:17.962815 17472 solver.cpp:404]     Test net output #1: loss = 1.53168 (* 1 = 1.53168 loss)\nI0819 10:13:19.282184 17472 solver.cpp:228] Iteration 68000, loss = 0.000176022\nI0819 10:13:19.282232 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:13:19.282256 17472 solver.cpp:244]     Train net output #1: loss = 0.000176107 (* 1 = 0.000176107 loss)\nI0819 10:13:19.367784 17472 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0819 10:15:36.490811 17472 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0819 10:16:57.956223 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7128\nI0819 10:16:57.956534 17472 solver.cpp:404]     Test net output #1: loss = 1.53389 (* 1 = 1.53389 loss)\nI0819 10:16:59.275022 17472 solver.cpp:228] Iteration 68100, loss = 0.000189794\nI0819 10:16:59.275076 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:16:59.275101 17472 solver.cpp:244]     Train net output #1: loss = 0.000189879 (* 1 = 0.000189879 loss)\nI0819 10:16:59.366364 17472 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0819 10:19:16.546217 17472 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0819 10:20:38.017215 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71404\nI0819 10:20:38.017557 17472 solver.cpp:404]     Test net output #1: loss = 1.53313 (* 1 = 1.53313 loss)\nI0819 10:20:39.337147 17472 solver.cpp:228] Iteration 68200, loss = 0.000163015\nI0819 10:20:39.337193 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:20:39.337216 17472 solver.cpp:244]     Train net output #1: loss = 0.0001631 (* 1 = 0.0001631 loss)\nI0819 10:20:39.426831 17472 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0819 10:22:56.754999 17472 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0819 10:24:18.245216 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71424\nI0819 10:24:18.245507 17472 solver.cpp:404]     Test net output #1: loss = 1.53 (* 1 = 1.53 loss)\nI0819 10:24:19.565754 17472 solver.cpp:228] Iteration 68300, loss = 0.000189657\nI0819 10:24:19.565799 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:24:19.565822 17472 solver.cpp:244]     Train net output #1: loss = 0.000189742 (* 1 = 0.000189742 loss)\nI0819 10:24:19.647524 17472 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0819 10:26:36.733778 17472 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0819 10:27:58.220435 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71488\nI0819 10:27:58.220736 17472 solver.cpp:404]     Test net output #1: loss = 1.53131 (* 1 = 1.53131 loss)\nI0819 10:27:59.540720 17472 solver.cpp:228] Iteration 68400, loss = 0.00017692\nI0819 10:27:59.540768 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:27:59.540792 17472 solver.cpp:244]     Train net output #1: loss = 0.000177005 (* 1 = 0.000177005 loss)\nI0819 10:27:59.628307 17472 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0819 10:30:16.790787 17472 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0819 10:31:38.264911 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71468\nI0819 10:31:38.265211 17472 solver.cpp:404]     Test net output #1: loss = 1.53266 (* 1 = 1.53266 loss)\nI0819 10:31:39.584290 17472 solver.cpp:228] Iteration 68500, loss = 0.000174558\nI0819 10:31:39.584338 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:31:39.584362 17472 solver.cpp:244]     Train net output #1: loss = 0.000174643 (* 1 = 0.000174643 loss)\nI0819 10:31:39.668061 17472 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0819 10:33:56.869180 17472 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0819 10:35:18.348028 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71456\nI0819 10:35:18.348383 17472 solver.cpp:404]     Test net output #1: loss = 1.5303 (* 1 = 1.5303 loss)\nI0819 10:35:19.667879 17472 solver.cpp:228] Iteration 68600, loss = 0.000178946\nI0819 10:35:19.667929 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:35:19.667954 17472 solver.cpp:244]     Train net output #1: loss = 0.000179031 (* 1 = 0.000179031 loss)\nI0819 10:35:19.749758 17472 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0819 10:37:36.898246 17472 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0819 10:38:58.371527 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71472\nI0819 10:38:58.371816 17472 solver.cpp:404]     Test net output #1: loss = 1.53153 (* 1 = 1.53153 loss)\nI0819 10:38:59.692109 17472 solver.cpp:228] Iteration 68700, loss = 0.000184807\nI0819 10:38:59.692159 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:38:59.692183 17472 solver.cpp:244]     Train net output #1: loss = 0.000184892 (* 1 = 0.000184892 loss)\nI0819 10:38:59.776201 17472 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0819 10:41:16.995457 17472 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0819 10:42:38.472525 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71352\nI0819 10:42:38.472818 17472 solver.cpp:404]     Test net output #1: loss = 1.53289 (* 1 = 1.53289 loss)\nI0819 10:42:39.793428 17472 solver.cpp:228] Iteration 68800, loss = 0.000183759\nI0819 10:42:39.793478 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:42:39.793501 17472 solver.cpp:244]     Train net output #1: loss = 0.000183844 (* 1 = 0.000183844 loss)\nI0819 10:42:39.877517 17472 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0819 10:44:57.046617 17472 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0819 10:46:18.516540 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7134\nI0819 10:46:18.516876 17472 solver.cpp:404]     Test net output #1: loss = 1.5321 (* 1 = 1.5321 loss)\nI0819 10:46:19.836784 17472 solver.cpp:228] Iteration 68900, loss = 0.0001869\nI0819 10:46:19.836834 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:46:19.836859 17472 solver.cpp:244]     Train net output #1: loss = 0.000186985 (* 1 = 0.000186985 loss)\nI0819 10:46:19.928690 17472 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0819 10:48:37.023937 17472 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0819 10:49:58.493319 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71372\nI0819 10:49:58.493624 17472 solver.cpp:404]     Test net output #1: loss = 1.53073 (* 1 = 1.53073 loss)\nI0819 10:49:59.812683 17472 solver.cpp:228] Iteration 69000, loss = 0.000165686\nI0819 10:49:59.812733 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:49:59.812757 17472 solver.cpp:244]     Train net output #1: loss = 0.000165771 (* 1 = 0.000165771 loss)\nI0819 10:49:59.898339 17472 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0819 10:52:17.261332 17472 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0819 10:53:38.729441 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71352\nI0819 10:53:38.729739 17472 solver.cpp:404]     Test net output #1: loss = 1.53397 (* 1 = 1.53397 loss)\nI0819 10:53:40.050396 17472 solver.cpp:228] Iteration 69100, loss = 0.000204581\nI0819 10:53:40.050447 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:53:40.050472 17472 solver.cpp:244]     Train net output #1: loss = 0.000204666 (* 1 = 0.000204666 loss)\nI0819 10:53:40.134382 17472 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0819 10:55:57.273254 17472 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0819 10:57:18.741762 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71388\nI0819 10:57:18.742100 17472 solver.cpp:404]     Test net output #1: loss = 1.53325 (* 1 = 1.53325 loss)\nI0819 10:57:20.061396 17472 solver.cpp:228] Iteration 69200, loss = 0.000176031\nI0819 10:57:20.061447 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:57:20.061471 17472 solver.cpp:244]     Train net output #1: loss = 0.000176116 (* 1 = 0.000176116 loss)\nI0819 10:57:20.151931 17472 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0819 10:59:37.476989 17472 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0819 11:00:58.949192 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71344\nI0819 11:00:58.949514 17472 solver.cpp:404]     Test net output #1: loss = 1.53448 (* 1 = 1.53448 loss)\nI0819 11:01:00.269209 17472 solver.cpp:228] Iteration 69300, loss = 0.000173055\nI0819 11:01:00.269258 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:01:00.269282 17472 solver.cpp:244]     Train net output #1: loss = 0.00017314 (* 1 = 0.00017314 loss)\nI0819 11:01:00.359556 17472 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0819 11:03:17.418455 17472 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0819 11:04:38.884436 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71428\nI0819 11:04:38.884778 17472 solver.cpp:404]     Test net output #1: loss = 1.53399 (* 1 = 1.53399 loss)\nI0819 11:04:40.204890 17472 solver.cpp:228] Iteration 69400, loss = 0.000180942\nI0819 11:04:40.204938 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:04:40.204962 17472 solver.cpp:244]     Train net output #1: loss = 0.000181027 (* 1 = 0.000181027 loss)\nI0819 11:04:40.294872 17472 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0819 11:06:57.315884 17472 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0819 11:08:18.783447 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71272\nI0819 11:08:18.783759 17472 solver.cpp:404]     Test net output #1: loss = 1.53603 (* 1 = 1.53603 loss)\nI0819 11:08:20.103792 17472 solver.cpp:228] Iteration 69500, loss = 0.000167024\nI0819 11:08:20.103842 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:08:20.103866 17472 solver.cpp:244]     Train net output #1: loss = 0.000167109 (* 1 = 0.000167109 loss)\nI0819 11:08:20.194088 17472 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0819 11:10:37.388303 17472 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0819 11:11:58.856498 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7146\nI0819 11:11:58.856796 17472 solver.cpp:404]     Test net output #1: loss = 1.53166 (* 1 = 1.53166 loss)\nI0819 11:12:00.177505 17472 solver.cpp:228] Iteration 69600, loss = 0.000192859\nI0819 11:12:00.177554 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:12:00.177578 17472 solver.cpp:244]     Train net output #1: loss = 0.000192944 (* 1 = 0.000192944 loss)\nI0819 11:12:00.265521 17472 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0819 11:14:17.211087 17472 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0819 11:15:38.694456 17472 solver.cpp:404]     Test net output #0: accuracy = 0.713521\nI0819 11:15:38.694742 17472 solver.cpp:404]     Test net output #1: loss = 1.5333 (* 1 = 1.5333 loss)\nI0819 11:15:40.016242 17472 solver.cpp:228] Iteration 69700, loss = 0.000160994\nI0819 11:15:40.016290 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:15:40.016314 17472 solver.cpp:244]     Train net output #1: loss = 0.000161079 (* 1 = 0.000161079 loss)\nI0819 11:15:40.099720 17472 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0819 11:17:57.213454 17472 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0819 11:19:18.705173 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71316\nI0819 11:19:18.705479 17472 solver.cpp:404]     Test net output #1: loss = 1.5353 (* 1 = 1.5353 loss)\nI0819 11:19:20.025967 17472 solver.cpp:228] Iteration 69800, loss = 0.000180808\nI0819 11:19:20.026017 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:19:20.026041 17472 solver.cpp:244]     Train net output #1: loss = 0.000180893 (* 1 = 0.000180893 loss)\nI0819 11:19:20.113477 17472 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0819 11:21:37.180853 17472 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0819 11:22:58.674458 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71316\nI0819 11:22:58.674800 17472 solver.cpp:404]     Test net output #1: loss = 1.53415 (* 1 = 1.53415 loss)\nI0819 11:22:59.995293 17472 solver.cpp:228] Iteration 69900, loss = 0.000207488\nI0819 11:22:59.995344 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:22:59.995368 17472 solver.cpp:244]     Train net output #1: loss = 0.000207573 (* 1 = 0.000207573 loss)\nI0819 11:23:00.078976 17472 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0819 11:25:17.067795 17472 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0819 11:26:38.552767 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0819 11:26:38.553099 17472 solver.cpp:404]     Test net output #1: loss = 1.53187 (* 1 = 1.53187 loss)\nI0819 11:26:39.873200 17472 solver.cpp:228] Iteration 70000, loss = 0.000191591\nI0819 11:26:39.873247 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:26:39.873272 17472 solver.cpp:244]     Train net output #1: loss = 0.000191676 (* 1 = 0.000191676 loss)\nI0819 11:26:39.958843 17472 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0819 11:26:39.958865 17472 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0819 11:28:57.163276 17472 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0819 11:30:18.644433 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71284\nI0819 11:30:18.644780 17472 solver.cpp:404]     Test net output #1: loss = 1.53352 (* 1 = 1.53352 loss)\nI0819 11:30:19.965124 17472 solver.cpp:228] Iteration 70100, loss = 0.000202908\nI0819 11:30:19.965173 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:30:19.965196 17472 solver.cpp:244]     Train net output #1: loss = 0.000202993 (* 1 = 0.000202993 loss)\nI0819 11:30:20.049870 17472 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0819 11:32:37.262683 17472 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0819 11:33:58.744038 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71308\nI0819 11:33:58.744340 17472 solver.cpp:404]     Test net output #1: loss = 1.53399 (* 1 = 1.53399 loss)\nI0819 11:34:00.064906 17472 solver.cpp:228] Iteration 70200, loss = 0.000193074\nI0819 11:34:00.064959 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:34:00.064982 17472 solver.cpp:244]     Train net output #1: loss = 0.000193159 (* 1 = 0.000193159 loss)\nI0819 11:34:00.147717 17472 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0819 11:36:17.296788 17472 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0819 11:37:38.781795 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71252\nI0819 11:37:38.782127 17472 solver.cpp:404]     Test net output #1: loss = 1.53659 (* 1 = 1.53659 loss)\nI0819 11:37:40.102542 17472 solver.cpp:228] Iteration 70300, loss = 0.00019525\nI0819 11:37:40.102592 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:37:40.102617 17472 solver.cpp:244]     Train net output #1: loss = 0.000195335 (* 1 = 0.000195335 loss)\nI0819 11:37:40.189661 17472 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0819 11:39:57.339715 17472 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0819 11:41:18.832643 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71264\nI0819 11:41:18.833214 17472 solver.cpp:404]     Test net output #1: loss = 1.53694 (* 1 = 1.53694 loss)\nI0819 11:41:20.152954 17472 solver.cpp:228] Iteration 70400, loss = 0.000177483\nI0819 11:41:20.153002 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:41:20.153026 17472 solver.cpp:244]     Train net output #1: loss = 0.000177568 (* 1 = 0.000177568 loss)\nI0819 11:41:20.242707 17472 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0819 11:43:37.496906 17472 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0819 11:44:58.982516 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71216\nI0819 11:44:58.982874 17472 solver.cpp:404]     Test net output #1: loss = 1.53955 (* 1 = 1.53955 loss)\nI0819 11:45:00.302093 17472 solver.cpp:228] Iteration 70500, loss = 0.000200534\nI0819 11:45:00.302141 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:45:00.302165 17472 solver.cpp:244]     Train net output #1: loss = 0.000200619 (* 1 = 0.000200619 loss)\nI0819 11:45:00.391094 17472 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0819 11:47:17.760846 17472 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0819 11:48:39.249184 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71228\nI0819 11:48:39.249526 17472 solver.cpp:404]     Test net output #1: loss = 1.53984 (* 1 = 1.53984 loss)\nI0819 11:48:40.570206 17472 solver.cpp:228] Iteration 70600, loss = 0.00019322\nI0819 11:48:40.570253 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:48:40.570277 17472 solver.cpp:244]     Train net output #1: loss = 0.000193305 (* 1 = 0.000193305 loss)\nI0819 11:48:40.662516 17472 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0819 11:50:58.086625 17472 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0819 11:52:19.639950 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71212\nI0819 11:52:19.640328 17472 solver.cpp:404]     Test net output #1: loss = 1.54211 (* 1 = 1.54211 loss)\nI0819 11:52:20.959575 17472 solver.cpp:228] Iteration 70700, loss = 0.000192416\nI0819 11:52:20.959623 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:52:20.959647 17472 solver.cpp:244]     Train net output #1: loss = 0.000192501 (* 1 = 0.000192501 loss)\nI0819 11:52:21.056002 17472 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0819 11:54:38.459931 17472 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0819 11:55:59.944135 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71232\nI0819 11:55:59.944485 17472 solver.cpp:404]     Test net output #1: loss = 1.54246 (* 1 = 1.54246 loss)\nI0819 11:56:01.264744 17472 solver.cpp:228] Iteration 70800, loss = 0.000186447\nI0819 11:56:01.264792 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:56:01.264816 17472 solver.cpp:244]     Train net output #1: loss = 0.000186532 (* 1 = 0.000186532 loss)\nI0819 11:56:01.350937 17472 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0819 11:58:18.579893 17472 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0819 11:59:40.189713 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71216\nI0819 11:59:40.190091 17472 solver.cpp:404]     Test net output #1: loss = 1.54423 (* 1 = 1.54423 loss)\nI0819 11:59:41.510423 17472 solver.cpp:228] Iteration 70900, loss = 0.000171967\nI0819 11:59:41.510470 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:59:41.510494 17472 solver.cpp:244]     Train net output #1: loss = 0.000172052 (* 1 = 0.000172052 loss)\nI0819 11:59:41.595051 17472 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0819 12:01:58.737655 17472 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0819 12:03:20.309654 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7126\nI0819 12:03:20.309969 17472 solver.cpp:404]     Test net output #1: loss = 1.54379 (* 1 = 1.54379 loss)\nI0819 12:03:21.629853 17472 solver.cpp:228] Iteration 71000, loss = 0.000173085\nI0819 12:03:21.629906 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:03:21.629930 17472 solver.cpp:244]     Train net output #1: loss = 0.00017317 (* 1 = 0.00017317 loss)\nI0819 12:03:21.718914 17472 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0819 12:05:38.786860 17472 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0819 12:07:00.255167 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71196\nI0819 12:07:00.255457 17472 solver.cpp:404]     Test net output #1: loss = 1.54575 (* 1 = 1.54575 loss)\nI0819 12:07:01.575659 17472 solver.cpp:228] Iteration 71100, loss = 0.000176332\nI0819 12:07:01.575711 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:07:01.575736 17472 solver.cpp:244]     Train net output #1: loss = 0.000176417 (* 1 = 0.000176417 loss)\nI0819 12:07:01.664136 17472 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0819 12:09:19.110733 17472 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0819 12:10:40.588913 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71164\nI0819 12:10:40.589221 17472 solver.cpp:404]     Test net output #1: loss = 1.54632 (* 1 = 1.54632 loss)\nI0819 12:10:41.909194 17472 solver.cpp:228] Iteration 71200, loss = 0.000178218\nI0819 12:10:41.909245 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:10:41.909271 17472 solver.cpp:244]     Train net output #1: loss = 0.000178303 (* 1 = 0.000178303 loss)\nI0819 12:10:41.993172 17472 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0819 12:12:59.337697 17472 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0819 12:14:20.808593 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71144\nI0819 12:14:20.808886 17472 solver.cpp:404]     Test net output #1: loss = 1.54784 (* 1 = 1.54784 loss)\nI0819 12:14:22.128973 17472 solver.cpp:228] Iteration 71300, loss = 0.000174638\nI0819 12:14:22.129024 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:14:22.129048 17472 solver.cpp:244]     Train net output #1: loss = 0.000174723 (* 1 = 0.000174723 loss)\nI0819 12:14:22.214539 17472 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0819 12:16:39.482245 17472 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0819 12:18:00.954704 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7118\nI0819 12:18:00.955010 17472 solver.cpp:404]     Test net output #1: loss = 1.54811 (* 1 = 1.54811 loss)\nI0819 12:18:02.275226 17472 solver.cpp:228] Iteration 71400, loss = 0.000179333\nI0819 12:18:02.275271 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:18:02.275295 17472 solver.cpp:244]     Train net output #1: loss = 0.000179418 (* 1 = 0.000179418 loss)\nI0819 12:18:02.357796 17472 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0819 12:20:19.670984 17472 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0819 12:21:41.136848 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71128\nI0819 12:21:41.137176 17472 solver.cpp:404]     Test net output #1: loss = 1.54877 (* 1 = 1.54877 loss)\nI0819 12:21:42.456784 17472 solver.cpp:228] Iteration 71500, loss = 0.000188122\nI0819 12:21:42.456831 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:21:42.456856 17472 solver.cpp:244]     Train net output #1: loss = 0.000188208 (* 1 = 0.000188208 loss)\nI0819 12:21:42.543483 17472 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0819 12:23:59.795436 17472 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0819 12:25:21.269508 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7112\nI0819 12:25:21.269843 17472 solver.cpp:404]     Test net output #1: loss = 1.5489 (* 1 = 1.5489 loss)\nI0819 12:25:22.589607 17472 solver.cpp:228] Iteration 71600, loss = 0.000174977\nI0819 12:25:22.589658 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:25:22.589680 17472 solver.cpp:244]     Train net output #1: loss = 0.000175062 (* 1 = 0.000175062 loss)\nI0819 12:25:22.676162 17472 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0819 12:27:40.094095 17472 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0819 12:29:01.571451 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71084\nI0819 12:29:01.571796 17472 solver.cpp:404]     Test net output #1: loss = 1.5501 (* 1 = 1.5501 loss)\nI0819 12:29:02.891029 17472 solver.cpp:228] Iteration 71700, loss = 0.000166812\nI0819 12:29:02.891079 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:29:02.891103 17472 solver.cpp:244]     Train net output #1: loss = 0.000166897 (* 1 = 0.000166897 loss)\nI0819 12:29:02.974654 17472 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0819 12:31:20.407785 17472 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0819 12:32:41.886970 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7108\nI0819 12:32:41.887284 17472 solver.cpp:404]     Test net output #1: loss = 1.55031 (* 1 = 1.55031 loss)\nI0819 12:32:43.207691 17472 solver.cpp:228] Iteration 71800, loss = 0.000187839\nI0819 12:32:43.207741 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:32:43.207767 17472 solver.cpp:244]     Train net output #1: loss = 0.000187924 (* 1 = 0.000187924 loss)\nI0819 12:32:43.295584 17472 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0819 12:35:00.635785 17472 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0819 12:36:22.119618 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71104\nI0819 12:36:22.119932 17472 solver.cpp:404]     Test net output #1: loss = 1.55112 (* 1 = 1.55112 loss)\nI0819 12:36:23.440330 17472 solver.cpp:228] Iteration 71900, loss = 0.000178114\nI0819 12:36:23.440376 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:36:23.440399 17472 solver.cpp:244]     Train net output #1: loss = 0.000178199 (* 1 = 0.000178199 loss)\nI0819 12:36:23.530145 17472 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0819 12:38:40.627143 17472 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0819 12:40:02.107597 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71112\nI0819 12:40:02.107981 17472 solver.cpp:404]     Test net output #1: loss = 1.55104 (* 1 = 1.55104 loss)\nI0819 12:40:03.427522 17472 solver.cpp:228] Iteration 72000, loss = 0.000168971\nI0819 12:40:03.427569 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:40:03.427594 17472 solver.cpp:244]     Train net output #1: loss = 0.000169056 (* 1 = 0.000169056 loss)\nI0819 12:40:03.513083 17472 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0819 12:42:20.697324 17472 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0819 12:43:42.191558 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71132\nI0819 12:43:42.191851 17472 solver.cpp:404]     Test net output #1: loss = 1.55197 (* 1 = 1.55197 loss)\nI0819 12:43:43.512894 17472 solver.cpp:228] Iteration 72100, loss = 0.000159361\nI0819 12:43:43.512944 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:43:43.512969 17472 solver.cpp:244]     Train net output #1: loss = 0.000159446 (* 1 = 0.000159446 loss)\nI0819 12:43:43.598858 17472 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0819 12:46:00.792404 17472 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0819 12:47:22.284467 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71112\nI0819 12:47:22.284802 17472 solver.cpp:404]     Test net output #1: loss = 1.55257 (* 1 = 1.55257 loss)\nI0819 12:47:23.604924 17472 solver.cpp:228] Iteration 72200, loss = 0.000190657\nI0819 12:47:23.604974 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:47:23.604998 17472 solver.cpp:244]     Train net output #1: loss = 0.000190742 (* 1 = 0.000190742 loss)\nI0819 12:47:23.689537 17472 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0819 12:49:40.775738 17472 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0819 12:51:02.263329 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7114\nI0819 12:51:02.263643 17472 solver.cpp:404]     Test net output #1: loss = 1.55297 (* 1 = 1.55297 loss)\nI0819 12:51:03.583863 17472 solver.cpp:228] Iteration 72300, loss = 0.000168482\nI0819 12:51:03.583914 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:51:03.583940 17472 solver.cpp:244]     Train net output #1: loss = 0.000168567 (* 1 = 0.000168567 loss)\nI0819 12:51:03.673679 17472 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0819 12:53:20.649152 17472 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0819 12:54:42.146208 17472 solver.cpp:404]     Test net output #0: accuracy = 0.711\nI0819 12:54:42.146530 17472 solver.cpp:404]     Test net output #1: loss = 1.55284 (* 1 = 1.55284 loss)\nI0819 12:54:43.466449 17472 solver.cpp:228] Iteration 72400, loss = 0.000191852\nI0819 12:54:43.466497 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:54:43.466521 17472 solver.cpp:244]     Train net output #1: loss = 0.000191937 (* 1 = 0.000191937 loss)\nI0819 12:54:43.550617 17472 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0819 12:57:00.774660 17472 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0819 12:58:22.264158 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71072\nI0819 12:58:22.264421 17472 solver.cpp:404]     Test net output #1: loss = 1.55376 (* 1 = 1.55376 loss)\nI0819 12:58:23.584743 17472 solver.cpp:228] Iteration 72500, loss = 0.000174\nI0819 12:58:23.584791 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:58:23.584815 17472 solver.cpp:244]     Train net output #1: loss = 0.000174085 (* 1 = 0.000174085 loss)\nI0819 12:58:23.676235 17472 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0819 13:00:40.964520 17472 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0819 13:02:02.430230 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71076\nI0819 13:02:02.430543 17472 solver.cpp:404]     Test net output #1: loss = 1.55389 (* 1 = 1.55389 loss)\nI0819 13:02:03.749725 17472 solver.cpp:228] Iteration 72600, loss = 0.000195337\nI0819 13:02:03.749769 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:02:03.749785 17472 solver.cpp:244]     Train net output #1: loss = 0.000195422 (* 1 = 0.000195422 loss)\nI0819 13:02:03.835788 17472 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0819 13:04:20.693009 17472 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0819 13:05:42.169396 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7104\nI0819 13:05:42.169706 17472 solver.cpp:404]     Test net output #1: loss = 1.555 (* 1 = 1.555 loss)\nI0819 13:05:43.489565 17472 solver.cpp:228] Iteration 72700, loss = 0.000203208\nI0819 13:05:43.489612 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:05:43.489629 17472 solver.cpp:244]     Train net output #1: loss = 0.000203293 (* 1 = 0.000203293 loss)\nI0819 13:05:43.574504 17472 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0819 13:08:00.190523 17472 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0819 13:09:21.661658 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71024\nI0819 13:09:21.661978 17472 solver.cpp:404]     Test net output #1: loss = 1.55478 (* 1 = 1.55478 loss)\nI0819 13:09:22.981132 17472 solver.cpp:228] Iteration 72800, loss = 0.000170563\nI0819 13:09:22.981178 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:09:22.981194 17472 solver.cpp:244]     Train net output #1: loss = 0.000170648 (* 1 = 0.000170648 loss)\nI0819 13:09:23.072088 17472 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0819 13:11:39.887639 17472 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0819 13:13:01.374577 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71092\nI0819 13:13:01.374902 17472 solver.cpp:404]     Test net output #1: loss = 1.55535 (* 1 = 1.55535 loss)\nI0819 13:13:02.694649 17472 solver.cpp:228] Iteration 72900, loss = 0.000191668\nI0819 13:13:02.694685 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:13:02.694700 17472 solver.cpp:244]     Train net output #1: loss = 0.000191753 (* 1 = 0.000191753 loss)\nI0819 13:13:02.778152 17472 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0819 13:15:19.440616 17472 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0819 13:16:40.909191 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71072\nI0819 13:16:40.909507 17472 solver.cpp:404]     Test net output #1: loss = 1.55478 (* 1 = 1.55478 loss)\nI0819 13:16:42.228688 17472 solver.cpp:228] Iteration 73000, loss = 0.000180623\nI0819 13:16:42.228725 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:16:42.228739 17472 solver.cpp:244]     Train net output #1: loss = 0.000180708 (* 1 = 0.000180708 loss)\nI0819 13:16:42.313693 17472 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0819 13:18:58.805164 17472 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0819 13:20:20.274747 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71024\nI0819 13:20:20.275066 17472 solver.cpp:404]     Test net output #1: loss = 1.55563 (* 1 = 1.55563 loss)\nI0819 13:20:21.594084 17472 solver.cpp:228] Iteration 73100, loss = 0.000156944\nI0819 13:20:21.594121 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:20:21.594135 17472 solver.cpp:244]     Train net output #1: loss = 0.000157029 (* 1 = 0.000157029 loss)\nI0819 13:20:21.681859 17472 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0819 13:22:38.383700 17472 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0819 13:23:59.831430 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71032\nI0819 13:23:59.831725 17472 solver.cpp:404]     Test net output #1: loss = 1.55546 (* 1 = 1.55546 loss)\nI0819 13:24:01.151064 17472 solver.cpp:228] Iteration 73200, loss = 0.000212676\nI0819 13:24:01.151100 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:24:01.151115 17472 solver.cpp:244]     Train net output #1: loss = 0.000212761 (* 1 = 0.000212761 loss)\nI0819 13:24:01.239444 17472 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0819 13:26:17.957571 17472 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0819 13:27:39.418539 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71032\nI0819 13:27:39.418887 17472 solver.cpp:404]     Test net output #1: loss = 1.55595 (* 1 = 1.55595 loss)\nI0819 13:27:40.737956 17472 solver.cpp:228] Iteration 73300, loss = 0.000184692\nI0819 13:27:40.737990 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:27:40.738005 17472 solver.cpp:244]     Train net output #1: loss = 0.000184777 (* 1 = 0.000184777 loss)\nI0819 13:27:40.825762 17472 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0819 13:29:57.555312 17472 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0819 13:31:19.019301 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7106\nI0819 13:31:19.019651 17472 solver.cpp:404]     Test net output #1: loss = 1.55586 (* 1 = 1.55586 loss)\nI0819 13:31:20.338675 17472 solver.cpp:228] Iteration 73400, loss = 0.000180949\nI0819 13:31:20.338711 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:31:20.338727 17472 solver.cpp:244]     Train net output #1: loss = 0.000181034 (* 1 = 0.000181034 loss)\nI0819 13:31:20.425307 17472 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0819 13:33:36.913918 17472 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0819 13:34:58.400583 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71012\nI0819 13:34:58.400949 17472 solver.cpp:404]     Test net output #1: loss = 1.55615 (* 1 = 1.55615 loss)\nI0819 13:34:59.720254 17472 solver.cpp:228] Iteration 73500, loss = 0.000182912\nI0819 13:34:59.720293 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:34:59.720307 17472 solver.cpp:244]     Train net output #1: loss = 0.000182997 (* 1 = 0.000182997 loss)\nI0819 13:34:59.811897 17472 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0819 13:37:16.551450 17472 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0819 13:38:38.030422 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71016\nI0819 13:38:38.030760 17472 solver.cpp:404]     Test net output #1: loss = 1.55577 (* 1 = 1.55577 loss)\nI0819 13:38:39.350004 17472 solver.cpp:228] Iteration 73600, loss = 0.000180748\nI0819 13:38:39.350042 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:38:39.350057 17472 solver.cpp:244]     Train net output #1: loss = 0.000180833 (* 1 = 0.000180833 loss)\nI0819 13:38:39.435789 17472 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0819 13:40:55.915347 17472 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0819 13:42:17.401000 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71024\nI0819 13:42:17.401340 17472 solver.cpp:404]     Test net output #1: loss = 1.55638 (* 1 = 1.55638 loss)\nI0819 13:42:18.721031 17472 solver.cpp:228] Iteration 73700, loss = 0.000204101\nI0819 13:42:18.721067 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:42:18.721082 17472 solver.cpp:244]     Train net output #1: loss = 0.000204186 (* 1 = 0.000204186 loss)\nI0819 13:42:18.810719 17472 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0819 13:44:35.572371 17472 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0819 13:45:57.055971 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71036\nI0819 13:45:57.056337 17472 solver.cpp:404]     Test net output #1: loss = 1.55619 (* 1 = 1.55619 loss)\nI0819 13:45:58.376019 17472 solver.cpp:228] Iteration 73800, loss = 0.000171535\nI0819 13:45:58.376058 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:45:58.376073 17472 solver.cpp:244]     Train net output #1: loss = 0.00017162 (* 1 = 0.00017162 loss)\nI0819 13:45:58.465241 17472 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0819 13:48:15.165808 17472 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0819 13:49:36.653630 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71016\nI0819 13:49:36.653985 17472 solver.cpp:404]     Test net output #1: loss = 1.55621 (* 1 = 1.55621 loss)\nI0819 13:49:37.973522 17472 solver.cpp:228] Iteration 73900, loss = 0.000180607\nI0819 13:49:37.973559 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:49:37.973574 17472 solver.cpp:244]     Train net output #1: loss = 0.000180692 (* 1 = 0.000180692 loss)\nI0819 13:49:38.060923 17472 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0819 13:51:54.547574 17472 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0819 13:53:16.025682 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71048\nI0819 13:53:16.026053 17472 solver.cpp:404]     Test net output #1: loss = 1.55584 (* 1 = 1.55584 loss)\nI0819 13:53:17.345010 17472 solver.cpp:228] Iteration 74000, loss = 0.000204704\nI0819 13:53:17.345047 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:53:17.345063 17472 solver.cpp:244]     Train net output #1: loss = 0.000204789 (* 1 = 0.000204789 loss)\nI0819 13:53:17.436250 17472 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0819 13:55:34.169651 17472 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0819 13:56:55.636092 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7104\nI0819 13:56:55.636461 17472 solver.cpp:404]     Test net output #1: loss = 1.5563 (* 1 = 1.5563 loss)\nI0819 13:56:56.956809 17472 solver.cpp:228] Iteration 74100, loss = 0.000176424\nI0819 13:56:56.956845 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:56:56.956861 17472 solver.cpp:244]     Train net output #1: loss = 0.000176509 (* 1 = 0.000176509 loss)\nI0819 13:56:57.045991 17472 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0819 13:59:13.743793 17472 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0819 14:00:35.225802 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71028\nI0819 14:00:35.226145 17472 solver.cpp:404]     Test net output #1: loss = 1.55679 (* 1 = 1.55679 loss)\nI0819 14:00:36.546486 17472 solver.cpp:228] Iteration 74200, loss = 0.000166753\nI0819 14:00:36.546521 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:00:36.546536 17472 solver.cpp:244]     Train net output #1: loss = 0.000166838 (* 1 = 0.000166838 loss)\nI0819 14:00:36.635646 17472 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0819 14:02:53.201876 17472 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0819 14:04:14.677325 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7104\nI0819 14:04:14.677685 17472 solver.cpp:404]     Test net output #1: loss = 1.55659 (* 1 = 1.55659 loss)\nI0819 14:04:15.997345 17472 solver.cpp:228] Iteration 74300, loss = 0.000178549\nI0819 14:04:15.997380 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:04:15.997395 17472 solver.cpp:244]     Train net output #1: loss = 0.000178634 (* 1 = 0.000178634 loss)\nI0819 14:04:16.084898 17472 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0819 14:06:32.785308 17472 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0819 14:07:54.261644 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70996\nI0819 14:07:54.261966 17472 solver.cpp:404]     Test net output #1: loss = 1.55643 (* 1 = 1.55643 loss)\nI0819 14:07:55.582448 17472 solver.cpp:228] Iteration 74400, loss = 0.000181255\nI0819 14:07:55.582484 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:07:55.582500 17472 solver.cpp:244]     Train net output #1: loss = 0.00018134 (* 1 = 0.00018134 loss)\nI0819 14:07:55.669775 17472 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0819 14:10:12.441246 17472 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0819 14:11:33.922829 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71044\nI0819 14:11:33.923135 17472 solver.cpp:404]     Test net output #1: loss = 1.5567 (* 1 = 1.5567 loss)\nI0819 14:11:35.247174 17472 solver.cpp:228] Iteration 74500, loss = 0.000198744\nI0819 14:11:35.247215 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:11:35.247237 17472 solver.cpp:244]     Train net output #1: loss = 0.000198829 (* 1 = 0.000198829 loss)\nI0819 14:11:35.329424 17472 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0819 14:13:51.952694 17472 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0819 14:15:13.440980 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71028\nI0819 14:15:13.441331 17472 solver.cpp:404]     Test net output #1: loss = 1.55601 (* 1 = 1.55601 loss)\nI0819 14:15:14.761551 17472 solver.cpp:228] Iteration 74600, loss = 0.0001832\nI0819 14:15:14.761591 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:15:14.761615 17472 solver.cpp:244]     Train net output #1: loss = 0.000183285 (* 1 = 0.000183285 loss)\nI0819 14:15:14.848727 17472 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0819 14:17:31.673373 17472 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0819 14:18:53.166749 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71008\nI0819 14:18:53.167098 17472 solver.cpp:404]     Test net output #1: loss = 1.55678 (* 1 = 1.55678 loss)\nI0819 14:18:54.487613 17472 solver.cpp:228] Iteration 74700, loss = 0.000178209\nI0819 14:18:54.487654 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:18:54.487678 17472 solver.cpp:244]     Train net output #1: loss = 0.000178294 (* 1 = 0.000178294 loss)\nI0819 14:18:54.574733 17472 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0819 14:21:11.191226 17472 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0819 14:22:32.681191 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7106\nI0819 14:22:32.681505 17472 solver.cpp:404]     Test net output #1: loss = 1.55678 (* 1 = 1.55678 loss)\nI0819 14:22:34.002298 17472 solver.cpp:228] Iteration 74800, loss = 0.000173725\nI0819 14:22:34.002341 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:22:34.002363 17472 solver.cpp:244]     Train net output #1: loss = 0.00017381 (* 1 = 0.00017381 loss)\nI0819 14:22:34.083797 17472 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0819 14:24:50.521764 17472 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0819 14:26:11.997184 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71064\nI0819 14:26:11.997468 17472 solver.cpp:404]     Test net output #1: loss = 1.55736 (* 1 = 1.55736 loss)\nI0819 14:26:13.317600 17472 solver.cpp:228] Iteration 74900, loss = 0.000173935\nI0819 14:26:13.317642 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:26:13.317665 17472 solver.cpp:244]     Train net output #1: loss = 0.00017402 (* 1 = 0.00017402 loss)\nI0819 14:26:13.405035 17472 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0819 14:28:30.250653 17472 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0819 14:29:51.736388 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71052\nI0819 14:29:51.736688 17472 solver.cpp:404]     Test net output #1: loss = 1.55705 (* 1 = 1.55705 loss)\nI0819 14:29:53.057339 17472 solver.cpp:228] Iteration 75000, loss = 0.000174881\nI0819 14:29:53.057380 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:29:53.057404 17472 solver.cpp:244]     Train net output #1: loss = 0.000174966 (* 1 = 0.000174966 loss)\nI0819 14:29:53.144481 17472 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0819 14:32:09.904356 17472 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0819 14:33:31.386617 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71048\nI0819 14:33:31.386921 17472 solver.cpp:404]     Test net output #1: loss = 1.55772 (* 1 = 1.55772 loss)\nI0819 14:33:32.708012 17472 solver.cpp:228] Iteration 75100, loss = 0.000187015\nI0819 14:33:32.708053 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:33:32.708082 17472 solver.cpp:244]     Train net output #1: loss = 0.0001871 (* 1 = 0.0001871 loss)\nI0819 14:33:32.791635 17472 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0819 14:35:49.635690 17472 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0819 14:37:11.126139 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71\nI0819 14:37:11.126456 17472 solver.cpp:404]     Test net output #1: loss = 1.55737 (* 1 = 1.55737 loss)\nI0819 14:37:12.447456 17472 solver.cpp:228] Iteration 75200, loss = 0.000201144\nI0819 14:37:12.447499 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:37:12.447520 17472 solver.cpp:244]     Train net output #1: loss = 0.000201229 (* 1 = 0.000201229 loss)\nI0819 14:37:12.531093 17472 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0819 14:39:29.257216 17472 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0819 14:40:50.751436 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70992\nI0819 14:40:50.751736 17472 solver.cpp:404]     Test net output #1: loss = 1.55711 (* 1 = 1.55711 loss)\nI0819 14:40:52.072859 17472 solver.cpp:228] Iteration 75300, loss = 0.000196076\nI0819 14:40:52.072899 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:40:52.072922 17472 solver.cpp:244]     Train net output #1: loss = 0.000196161 (* 1 = 0.000196161 loss)\nI0819 14:40:52.162317 17472 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0819 14:43:08.736963 17472 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0819 14:44:30.225718 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71036\nI0819 14:44:30.226024 17472 solver.cpp:404]     Test net output #1: loss = 1.55642 (* 1 = 1.55642 loss)\nI0819 14:44:31.546211 17472 solver.cpp:228] Iteration 75400, loss = 0.000184893\nI0819 14:44:31.546252 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:44:31.546274 17472 solver.cpp:244]     Train net output #1: loss = 0.000184978 (* 1 = 0.000184978 loss)\nI0819 14:44:31.634222 17472 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0819 14:46:48.159137 17472 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0819 14:48:09.655074 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7104\nI0819 14:48:09.655380 17472 solver.cpp:404]     Test net output #1: loss = 1.55723 (* 1 = 1.55723 loss)\nI0819 14:48:10.975618 17472 solver.cpp:228] Iteration 75500, loss = 0.000182954\nI0819 14:48:10.975661 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:48:10.975682 17472 solver.cpp:244]     Train net output #1: loss = 0.000183039 (* 1 = 0.000183039 loss)\nI0819 14:48:11.061650 17472 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0819 14:50:27.836598 17472 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0819 14:51:49.345142 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7106\nI0819 14:51:49.345444 17472 solver.cpp:404]     Test net output #1: loss = 1.55694 (* 1 = 1.55694 loss)\nI0819 14:51:50.666028 17472 solver.cpp:228] Iteration 75600, loss = 0.000185981\nI0819 14:51:50.666074 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:51:50.666098 17472 solver.cpp:244]     Train net output #1: loss = 0.000186066 (* 1 = 0.000186066 loss)\nI0819 14:51:50.747758 17472 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0819 14:54:07.497684 17472 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0819 14:55:28.987601 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71044\nI0819 14:55:28.987896 17472 solver.cpp:404]     Test net output #1: loss = 1.55723 (* 1 = 1.55723 loss)\nI0819 14:55:30.309228 17472 solver.cpp:228] Iteration 75700, loss = 0.000171397\nI0819 14:55:30.309270 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:55:30.309293 17472 solver.cpp:244]     Train net output #1: loss = 0.000171482 (* 1 = 0.000171482 loss)\nI0819 14:55:30.397590 17472 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0819 14:57:46.931721 17472 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0819 14:59:08.424494 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7102\nI0819 14:59:08.424764 17472 solver.cpp:404]     Test net output #1: loss = 1.55708 (* 1 = 1.55708 loss)\nI0819 14:59:09.745667 17472 solver.cpp:228] Iteration 75800, loss = 0.000173565\nI0819 14:59:09.745708 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:59:09.745731 17472 solver.cpp:244]     Train net output #1: loss = 0.00017365 (* 1 = 0.00017365 loss)\nI0819 14:59:09.833498 17472 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0819 15:01:26.595793 17472 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0819 15:02:48.098547 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71008\nI0819 15:02:48.098901 17472 solver.cpp:404]     Test net output #1: loss = 1.55737 (* 1 = 1.55737 loss)\nI0819 15:02:49.419596 17472 solver.cpp:228] Iteration 75900, loss = 0.000170456\nI0819 15:02:49.419637 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:02:49.419659 17472 solver.cpp:244]     Train net output #1: loss = 0.000170541 (* 1 = 0.000170541 loss)\nI0819 15:02:49.504374 17472 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0819 15:05:06.060840 17472 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0819 15:06:27.552661 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71\nI0819 15:06:27.552923 17472 solver.cpp:404]     Test net output #1: loss = 1.55715 (* 1 = 1.55715 loss)\nI0819 15:06:28.873652 17472 solver.cpp:228] Iteration 76000, loss = 0.000183413\nI0819 15:06:28.873695 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:06:28.873718 17472 solver.cpp:244]     Train net output #1: loss = 0.000183498 (* 1 = 0.000183498 loss)\nI0819 15:06:28.965664 17472 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0819 15:08:45.551615 17472 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0819 15:10:07.022120 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70988\nI0819 15:10:07.022456 17472 solver.cpp:404]     Test net output #1: loss = 1.5577 (* 1 = 1.5577 loss)\nI0819 15:10:08.343055 17472 solver.cpp:228] Iteration 76100, loss = 0.000183883\nI0819 15:10:08.343094 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:10:08.343111 17472 solver.cpp:244]     Train net output #1: loss = 0.000183968 (* 1 = 0.000183968 loss)\nI0819 15:10:08.430656 17472 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0819 15:12:25.046461 17472 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0819 15:13:46.504384 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71016\nI0819 15:13:46.504669 17472 solver.cpp:404]     Test net output #1: loss = 1.55714 (* 1 = 1.55714 loss)\nI0819 15:13:47.825351 17472 solver.cpp:228] Iteration 76200, loss = 0.00020173\nI0819 15:13:47.825390 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:13:47.825407 17472 solver.cpp:244]     Train net output #1: loss = 0.000201815 (* 1 = 0.000201815 loss)\nI0819 15:13:47.909137 17472 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0819 15:16:04.657342 17472 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0819 15:17:26.132213 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71004\nI0819 15:17:26.132581 17472 solver.cpp:404]     Test net output #1: loss = 1.55742 (* 1 = 1.55742 loss)\nI0819 15:17:27.453162 17472 solver.cpp:228] Iteration 76300, loss = 0.000189177\nI0819 15:17:27.453200 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:17:27.453215 17472 solver.cpp:244]     Train net output #1: loss = 0.000189262 (* 1 = 0.000189262 loss)\nI0819 15:17:27.545992 17472 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0819 15:19:44.281056 17472 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0819 15:21:05.748653 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71036\nI0819 15:21:05.749007 17472 solver.cpp:404]     Test net output #1: loss = 1.55751 (* 1 = 1.55751 loss)\nI0819 15:21:07.069586 17472 solver.cpp:228] Iteration 76400, loss = 0.000200477\nI0819 15:21:07.069624 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:21:07.069640 17472 solver.cpp:244]     Train net output #1: loss = 0.000200562 (* 1 = 0.000200562 loss)\nI0819 15:21:07.159754 17472 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0819 15:23:23.955047 17472 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0819 15:24:45.436965 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70988\nI0819 15:24:45.437255 17472 solver.cpp:404]     Test net output #1: loss = 1.55742 (* 1 = 1.55742 loss)\nI0819 15:24:46.757930 17472 solver.cpp:228] Iteration 76500, loss = 0.000187969\nI0819 15:24:46.757972 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:24:46.757993 17472 solver.cpp:244]     Train net output #1: loss = 0.000188054 (* 1 = 0.000188054 loss)\nI0819 15:24:46.843277 17472 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0819 15:27:03.619945 17472 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0819 15:28:25.112354 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7102\nI0819 15:28:25.112653 17472 solver.cpp:404]     Test net output #1: loss = 1.55685 (* 1 = 1.55685 loss)\nI0819 15:28:26.433580 17472 solver.cpp:228] Iteration 76600, loss = 0.000192818\nI0819 15:28:26.433622 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:28:26.433645 17472 solver.cpp:244]     Train net output #1: loss = 0.000192903 (* 1 = 0.000192903 loss)\nI0819 15:28:26.521154 17472 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0819 15:30:43.299577 17472 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0819 15:32:04.787695 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71008\nI0819 15:32:04.788048 17472 solver.cpp:404]     Test net output #1: loss = 1.55723 (* 1 = 1.55723 loss)\nI0819 15:32:06.108412 17472 solver.cpp:228] Iteration 76700, loss = 0.000187192\nI0819 15:32:06.108453 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:32:06.108474 17472 solver.cpp:244]     Train net output #1: loss = 0.000187277 (* 1 = 0.000187277 loss)\nI0819 15:32:06.194515 17472 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0819 15:34:22.684952 17472 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0819 15:35:44.177296 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71\nI0819 15:35:44.177629 17472 solver.cpp:404]     Test net output #1: loss = 1.55701 (* 1 = 1.55701 loss)\nI0819 15:35:45.498296 17472 solver.cpp:228] Iteration 76800, loss = 0.000202991\nI0819 15:35:45.498337 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:35:45.498359 17472 solver.cpp:244]     Train net output #1: loss = 0.000203076 (* 1 = 0.000203076 loss)\nI0819 15:35:45.587684 17472 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0819 15:38:02.161957 17472 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0819 15:39:23.655086 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71004\nI0819 15:39:23.655419 17472 solver.cpp:404]     Test net output #1: loss = 1.55764 (* 1 = 1.55764 loss)\nI0819 15:39:24.976460 17472 solver.cpp:228] Iteration 76900, loss = 0.000186351\nI0819 15:39:24.976502 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:39:24.976524 17472 solver.cpp:244]     Train net output #1: loss = 0.000186436 (* 1 = 0.000186436 loss)\nI0819 15:39:25.061835 17472 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0819 15:41:41.703320 17472 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0819 15:43:03.196545 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7104\nI0819 15:43:03.196904 17472 solver.cpp:404]     Test net output #1: loss = 1.55738 (* 1 = 1.55738 loss)\nI0819 15:43:04.517383 17472 solver.cpp:228] Iteration 77000, loss = 0.000160461\nI0819 15:43:04.517426 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:43:04.517447 17472 solver.cpp:244]     Train net output #1: loss = 0.000160546 (* 1 = 0.000160546 loss)\nI0819 15:43:04.603591 17472 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0819 15:45:21.457734 17472 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0819 15:46:42.940956 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70996\nI0819 15:46:42.941277 17472 solver.cpp:404]     Test net output #1: loss = 1.55756 (* 1 = 1.55756 loss)\nI0819 15:46:44.261925 17472 solver.cpp:228] Iteration 77100, loss = 0.000192136\nI0819 15:46:44.261968 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:46:44.261991 17472 solver.cpp:244]     Train net output #1: loss = 0.000192221 (* 1 = 0.000192221 loss)\nI0819 15:46:44.348500 17472 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0819 15:49:01.185056 17472 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0819 15:50:22.676281 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71004\nI0819 15:50:22.676625 17472 solver.cpp:404]     Test net output #1: loss = 1.55748 (* 1 = 1.55748 loss)\nI0819 15:50:23.997584 17472 solver.cpp:228] Iteration 77200, loss = 0.000174665\nI0819 15:50:23.997627 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:50:23.997649 17472 solver.cpp:244]     Train net output #1: loss = 0.00017475 (* 1 = 0.00017475 loss)\nI0819 15:50:24.083827 17472 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0819 15:52:41.038924 17472 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0819 15:54:02.544724 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70992\nI0819 15:54:02.545055 17472 solver.cpp:404]     Test net output #1: loss = 1.55772 (* 1 = 1.55772 loss)\nI0819 15:54:03.866508 17472 solver.cpp:228] Iteration 77300, loss = 0.000183602\nI0819 15:54:03.866551 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:54:03.866575 17472 solver.cpp:244]     Train net output #1: loss = 0.000183687 (* 1 = 0.000183687 loss)\nI0819 15:54:03.956290 17472 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0819 15:56:20.856706 17472 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0819 15:57:42.354012 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71012\nI0819 15:57:42.354351 17472 solver.cpp:404]     Test net output #1: loss = 1.55736 (* 1 = 1.55736 loss)\nI0819 15:57:43.675339 17472 solver.cpp:228] Iteration 77400, loss = 0.000185811\nI0819 15:57:43.675381 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:57:43.675405 17472 solver.cpp:244]     Train net output #1: loss = 0.000185896 (* 1 = 0.000185896 loss)\nI0819 15:57:43.760960 17472 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0819 16:00:00.355226 17472 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0819 16:01:21.844328 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71012\nI0819 16:01:21.844667 17472 solver.cpp:404]     Test net output #1: loss = 1.55739 (* 1 = 1.55739 loss)\nI0819 16:01:23.165246 17472 solver.cpp:228] Iteration 77500, loss = 0.000174727\nI0819 16:01:23.165288 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:01:23.165311 17472 solver.cpp:244]     Train net output #1: loss = 0.000174812 (* 1 = 0.000174812 loss)\nI0819 16:01:23.245714 17472 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0819 16:03:39.961387 17472 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0819 16:05:01.453492 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71008\nI0819 16:05:01.453805 17472 solver.cpp:404]     Test net output #1: loss = 1.55692 (* 1 = 1.55692 loss)\nI0819 16:05:02.774543 17472 solver.cpp:228] Iteration 77600, loss = 0.00017277\nI0819 16:05:02.774585 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:05:02.774607 17472 solver.cpp:244]     Train net output #1: loss = 0.000172855 (* 1 = 0.000172855 loss)\nI0819 16:05:02.863046 17472 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0819 16:07:19.515449 17472 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0819 16:08:41.005220 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7104\nI0819 16:08:41.005519 17472 solver.cpp:404]     Test net output #1: loss = 1.5575 (* 1 = 1.5575 loss)\nI0819 16:08:42.326119 17472 solver.cpp:228] Iteration 77700, loss = 0.000163891\nI0819 16:08:42.326162 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:08:42.326186 17472 solver.cpp:244]     Train net output #1: loss = 0.000163976 (* 1 = 0.000163976 loss)\nI0819 16:08:42.409312 17472 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0819 16:10:59.285549 17472 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0819 16:12:20.781982 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71044\nI0819 16:12:20.782335 17472 solver.cpp:404]     Test net output #1: loss = 1.55771 (* 1 = 1.55771 loss)\nI0819 16:12:22.102789 17472 solver.cpp:228] Iteration 77800, loss = 0.000192863\nI0819 16:12:22.102830 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:12:22.102854 17472 solver.cpp:244]     Train net output #1: loss = 0.000192948 (* 1 = 0.000192948 loss)\nI0819 16:12:22.190794 17472 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0819 16:14:39.039181 17472 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0819 16:16:00.511543 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71032\nI0819 16:16:00.511880 17472 solver.cpp:404]     Test net output #1: loss = 1.55804 (* 1 = 1.55804 loss)\nI0819 16:16:01.832334 17472 solver.cpp:228] Iteration 77900, loss = 0.000168375\nI0819 16:16:01.832371 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:16:01.832387 17472 solver.cpp:244]     Train net output #1: loss = 0.00016846 (* 1 = 0.00016846 loss)\nI0819 16:16:01.922791 17472 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0819 16:18:18.447372 17472 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0819 16:19:39.925200 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71032\nI0819 16:19:39.925492 17472 solver.cpp:404]     Test net output #1: loss = 1.55763 (* 1 = 1.55763 loss)\nI0819 16:19:41.244549 17472 solver.cpp:228] Iteration 78000, loss = 0.000188177\nI0819 16:19:41.244585 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:19:41.244599 17472 solver.cpp:244]     Train net output #1: loss = 0.000188262 (* 1 = 0.000188262 loss)\nI0819 16:19:41.336889 17472 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0819 16:21:57.808712 17472 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0819 16:23:19.278600 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71032\nI0819 16:23:19.278961 17472 solver.cpp:404]     Test net output #1: loss = 1.55768 (* 1 = 1.55768 loss)\nI0819 16:23:20.598295 17472 solver.cpp:228] Iteration 78100, loss = 0.000176037\nI0819 16:23:20.598331 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:23:20.598345 17472 solver.cpp:244]     Train net output #1: loss = 0.000176122 (* 1 = 0.000176122 loss)\nI0819 16:23:20.684054 17472 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0819 16:25:37.177049 17472 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0819 16:26:58.638556 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71032\nI0819 16:26:58.638814 17472 solver.cpp:404]     Test net output #1: loss = 1.55731 (* 1 = 1.55731 loss)\nI0819 16:26:59.958519 17472 solver.cpp:228] Iteration 78200, loss = 0.000170952\nI0819 16:26:59.958557 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:26:59.958573 17472 solver.cpp:244]     Train net output #1: loss = 0.000171037 (* 1 = 0.000171037 loss)\nI0819 16:27:00.044816 17472 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0819 16:29:16.794487 17472 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0819 16:30:38.256974 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70992\nI0819 16:30:38.257272 17472 solver.cpp:404]     Test net output #1: loss = 1.55776 (* 1 = 1.55776 loss)\nI0819 16:30:39.576468 17472 solver.cpp:228] Iteration 78300, loss = 0.000172216\nI0819 16:30:39.576505 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:30:39.576521 17472 solver.cpp:244]     Train net output #1: loss = 0.000172301 (* 1 = 0.000172301 loss)\nI0819 16:30:39.661803 17472 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0819 16:32:56.163300 17472 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0819 16:34:17.647016 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71016\nI0819 16:34:17.647395 17472 solver.cpp:404]     Test net output #1: loss = 1.55788 (* 1 = 1.55788 loss)\nI0819 16:34:18.967389 17472 solver.cpp:228] Iteration 78400, loss = 0.000179223\nI0819 16:34:18.967427 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:34:18.967440 17472 solver.cpp:244]     Train net output #1: loss = 0.000179308 (* 1 = 0.000179308 loss)\nI0819 16:34:19.052731 17472 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0819 16:36:35.873085 17472 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0819 16:37:57.371436 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70928\nI0819 16:37:57.371803 17472 solver.cpp:404]     Test net output #1: loss = 1.55798 (* 1 = 1.55798 loss)\nI0819 16:37:58.692548 17472 solver.cpp:228] Iteration 78500, loss = 0.000165452\nI0819 16:37:58.692589 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:37:58.692611 17472 solver.cpp:244]     Train net output #1: loss = 0.000165537 (* 1 = 0.000165537 loss)\nI0819 16:37:58.779422 17472 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0819 16:40:15.536499 17472 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0819 16:41:37.052786 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70996\nI0819 16:41:37.053165 17472 solver.cpp:404]     Test net output #1: loss = 1.55754 (* 1 = 1.55754 loss)\nI0819 16:41:38.373535 17472 solver.cpp:228] Iteration 78600, loss = 0.000181974\nI0819 16:41:38.373574 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:41:38.373597 17472 solver.cpp:244]     Train net output #1: loss = 0.000182059 (* 1 = 0.000182059 loss)\nI0819 16:41:38.463951 17472 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0819 16:43:55.027410 17472 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0819 16:45:16.575285 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70972\nI0819 16:45:16.575650 17472 solver.cpp:404]     Test net output #1: loss = 1.55779 (* 1 = 1.55779 loss)\nI0819 16:45:17.896085 17472 solver.cpp:228] Iteration 78700, loss = 0.000181241\nI0819 16:45:17.896124 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:45:17.896147 17472 solver.cpp:244]     Train net output #1: loss = 0.000181326 (* 1 = 0.000181326 loss)\nI0819 16:45:17.980343 17472 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0819 16:47:34.806098 17472 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0819 16:48:56.439888 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7102\nI0819 16:48:56.440271 17472 solver.cpp:404]     Test net output #1: loss = 1.55736 (* 1 = 1.55736 loss)\nI0819 16:48:57.760864 17472 solver.cpp:228] Iteration 78800, loss = 0.000190026\nI0819 16:48:57.760905 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:48:57.760928 17472 solver.cpp:244]     Train net output #1: loss = 0.000190111 (* 1 = 0.000190111 loss)\nI0819 16:48:57.845907 17472 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0819 16:51:14.616998 17472 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0819 16:52:36.147277 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70976\nI0819 16:52:36.147645 17472 solver.cpp:404]     Test net output #1: loss = 1.55783 (* 1 = 1.55783 loss)\nI0819 16:52:37.468027 17472 solver.cpp:228] Iteration 78900, loss = 0.000170002\nI0819 16:52:37.468067 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:52:37.468096 17472 solver.cpp:244]     Train net output #1: loss = 0.000170087 (* 1 = 0.000170087 loss)\nI0819 16:52:37.557190 17472 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0819 16:54:54.277429 17472 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0819 16:56:15.773270 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71012\nI0819 16:56:15.773641 17472 solver.cpp:404]     Test net output #1: loss = 1.55713 (* 1 = 1.55713 loss)\nI0819 16:56:17.093855 17472 solver.cpp:228] Iteration 79000, loss = 0.000207984\nI0819 16:56:17.093895 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:56:17.093917 17472 solver.cpp:244]     Train net output #1: loss = 0.000208069 (* 1 = 0.000208069 loss)\nI0819 16:56:17.179544 17472 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0819 16:58:33.652160 17472 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0819 16:59:55.137486 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70968\nI0819 16:59:55.137852 17472 solver.cpp:404]     Test net output #1: loss = 1.55782 (* 1 = 1.55782 loss)\nI0819 16:59:56.458390 17472 solver.cpp:228] Iteration 79100, loss = 0.000176437\nI0819 16:59:56.458431 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:59:56.458454 17472 solver.cpp:244]     Train net output #1: loss = 0.000176522 (* 1 = 0.000176522 loss)\nI0819 16:59:56.545833 17472 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0819 17:02:13.034688 17472 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0819 17:03:34.525615 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70956\nI0819 17:03:34.525987 17472 solver.cpp:404]     Test net output #1: loss = 1.55766 (* 1 = 1.55766 loss)\nI0819 17:03:35.846392 17472 solver.cpp:228] Iteration 79200, loss = 0.0001704\nI0819 17:03:35.846431 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:03:35.846453 17472 solver.cpp:244]     Train net output #1: loss = 0.000170485 (* 1 = 0.000170485 loss)\nI0819 17:03:35.935726 17472 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0819 17:05:52.694615 17472 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0819 17:07:14.226714 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70992\nI0819 17:07:14.227088 17472 solver.cpp:404]     Test net output #1: loss = 1.55772 (* 1 = 1.55772 loss)\nI0819 17:07:15.546972 17472 solver.cpp:228] Iteration 79300, loss = 0.000188412\nI0819 17:07:15.547009 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:07:15.547034 17472 solver.cpp:244]     Train net output #1: loss = 0.000188497 (* 1 = 0.000188497 loss)\nI0819 17:07:15.630049 17472 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0819 17:09:32.355381 17472 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0819 17:10:53.863533 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7098\nI0819 17:10:53.863912 17472 solver.cpp:404]     Test net output #1: loss = 1.55711 (* 1 = 1.55711 loss)\nI0819 17:10:55.184214 17472 solver.cpp:228] Iteration 79400, loss = 0.000212036\nI0819 17:10:55.184254 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:10:55.184276 17472 solver.cpp:244]     Train net output #1: loss = 0.000212121 (* 1 = 0.000212121 loss)\nI0819 17:10:55.266715 17472 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0819 17:13:11.912482 17472 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0819 17:14:33.420168 17472 solver.cpp:404]     Test net output #0: accuracy = 0.71004\nI0819 17:14:33.420548 17472 solver.cpp:404]     Test net output #1: loss = 1.55701 (* 1 = 1.55701 loss)\nI0819 17:14:34.740751 17472 solver.cpp:228] Iteration 79500, loss = 0.000191327\nI0819 17:14:34.740789 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:14:34.740813 17472 solver.cpp:244]     Train net output #1: loss = 0.000191412 (* 1 = 0.000191412 loss)\nI0819 17:14:34.828171 17472 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0819 17:16:51.721963 17472 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0819 17:18:13.235329 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70972\nI0819 17:18:13.235707 17472 solver.cpp:404]     Test net output #1: loss = 1.55775 (* 1 = 1.55775 loss)\nI0819 17:18:14.556625 17472 solver.cpp:228] Iteration 79600, loss = 0.000171789\nI0819 17:18:14.556663 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:18:14.556679 17472 solver.cpp:244]     Train net output #1: loss = 0.000171874 (* 1 = 0.000171874 loss)\nI0819 17:18:14.645334 17472 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0819 17:20:31.661733 17472 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0819 17:21:53.147940 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70976\nI0819 17:21:53.148315 17472 solver.cpp:404]     Test net output #1: loss = 1.55818 (* 1 = 1.55818 loss)\nI0819 17:21:54.467942 17472 solver.cpp:228] Iteration 79700, loss = 0.000177069\nI0819 17:21:54.467978 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:21:54.467993 17472 solver.cpp:244]     Train net output #1: loss = 0.000177154 (* 1 = 0.000177154 loss)\nI0819 17:21:54.557747 17472 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0819 17:24:11.430588 17472 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0819 17:25:32.915563 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70964\nI0819 17:25:32.915940 17472 solver.cpp:404]     Test net output #1: loss = 1.55803 (* 1 = 1.55803 loss)\nI0819 17:25:34.235672 17472 solver.cpp:228] Iteration 79800, loss = 0.000192497\nI0819 17:25:34.235709 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:25:34.235724 17472 solver.cpp:244]     Train net output #1: loss = 0.000192582 (* 1 = 0.000192582 loss)\nI0819 17:25:34.323603 17472 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0819 17:27:50.914536 17472 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0819 17:29:12.400585 17472 solver.cpp:404]     Test net output #0: accuracy = 0.7096\nI0819 17:29:12.400959 17472 solver.cpp:404]     Test net output #1: loss = 1.55848 (* 1 = 1.55848 loss)\nI0819 17:29:13.720710 17472 solver.cpp:228] Iteration 79900, loss = 0.000175704\nI0819 17:29:13.720746 17472 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:29:13.720762 17472 solver.cpp:244]     Train net output #1: loss = 0.000175789 (* 1 = 0.000175789 loss)\nI0819 17:29:13.804931 17472 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0819 17:31:30.719733 17472 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35AdaDeltaFig9_iter_80000.caffemodel\nI0819 17:31:31.129222 17472 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35AdaDeltaFig9_iter_80000.solverstate\nI0819 17:31:31.593834 17472 solver.cpp:317] Iteration 80000, loss = 0.000183374\nI0819 17:31:31.593889 17472 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0819 17:32:53.068418 17472 solver.cpp:404]     Test net output #0: accuracy = 0.70968\nI0819 17:32:53.068795 17472 solver.cpp:404]     Test net output #1: loss = 1.55791 (* 1 = 1.55791 loss)\nI0819 17:32:53.068807 17472 solver.cpp:322] Optimization Done.\nI0819 17:32:58.453364 17472 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35AdaGradFig9",
    "content": "I0817 16:27:30.955082 17344 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:27:30.957391 17344 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:27:30.958602 17344 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:27:30.959817 17344 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:27:30.961024 17344 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:27:30.962247 17344 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:27:30.963469 17344 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:27:30.964694 17344 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:27:30.965922 17344 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:27:31.379016 17344 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35AdaGradFig9\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\ntype: \"AdaGrad\"\nI0817 16:27:31.382930 17344 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:27:31.400795 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:31.400882 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:31.401937 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:27:31.401993 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:27:31.402019 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:27:31.402040 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:27:31.402060 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:27:31.402076 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:27:31.402093 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:27:31.402112 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:27:31.402132 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:27:31.402149 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:27:31.402168 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:27:31.402184 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:27:31.402201 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:27:31.402220 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:27:31.402238 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:27:31.402256 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:27:31.402274 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:27:31.402292 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:27:31.402309 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:27:31.402326 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:27:31.402361 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:27:31.402379 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:27:31.402403 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:27:31.402421 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:27:31.402439 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:27:31.402454 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:27:31.402473 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:27:31.402490 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:27:31.402508 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:27:31.402524 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:27:31.402544 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:27:31.402560 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:27:31.402578 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:27:31.402593 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:27:31.402613 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:27:31.402631 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:27:31.402649 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:27:31.402667 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:27:31.402684 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:27:31.402703 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:27:31.402725 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:27:31.402742 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:27:31.402758 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:27:31.402776 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:27:31.402796 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:27:31.402822 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:27:31.402842 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:27:31.402858 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:27:31.402878 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:27:31.402894 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:27:31.402910 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:27:31.402938 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:27:31.402958 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:27:31.402976 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:27:31.402994 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:27:31.403009 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:27:31.404763 17344 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\nI0817 16:27:31.406841 17344 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:27:31.408052 17344 net.cpp:100] Creating Layer dataLayer\nI0817 16:27:31.408125 17344 net.cpp:408] dataLayer -> data_top\nI0817 16:27:31.408318 17344 net.cpp:408] dataLayer -> label\nI0817 16:27:31.408433 17344 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:27:31.418701 17349 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:27:31.440558 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:31.447453 17344 net.cpp:150] Setting up dataLayer\nI0817 16:27:31.447515 17344 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:27:31.447527 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:31.447533 17344 net.cpp:165] Memory required for data: 1536500\nI0817 16:27:31.447549 17344 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:27:31.447563 17344 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:27:31.447572 17344 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:27:31.447590 17344 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:27:31.447605 17344 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:27:31.447692 17344 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:27:31.447705 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:31.447712 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:31.447717 17344 net.cpp:165] Memory required for data: 1537500\nI0817 16:27:31.447722 17344 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:27:31.447784 17344 net.cpp:100] Creating Layer pre_conv\nI0817 16:27:31.447798 17344 net.cpp:434] pre_conv <- data_top\nI0817 16:27:31.447818 17344 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:27:31.449592 17344 net.cpp:150] Setting up pre_conv\nI0817 16:27:31.449614 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.449620 17344 net.cpp:165] Memory required for data: 9729500\nI0817 16:27:31.449681 17344 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:27:31.449749 17344 net.cpp:100] Creating Layer pre_bn\nI0817 16:27:31.449761 17344 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:27:31.449770 17344 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:27:31.449916 17350 blocking_queue.cpp:50] Waiting for data\nI0817 16:27:31.450100 17344 net.cpp:150] Setting up pre_bn\nI0817 16:27:31.450119 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.450124 17344 net.cpp:165] Memory required for data: 17921500\nI0817 16:27:31.450141 17344 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:27:31.450187 17344 net.cpp:100] Creating Layer pre_scale\nI0817 16:27:31.450196 17344 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:27:31.450206 17344 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:27:31.450366 17344 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:27:31.450613 17344 net.cpp:150] Setting up pre_scale\nI0817 16:27:31.450628 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.450634 17344 net.cpp:165] Memory required for data: 26113500\nI0817 16:27:31.450644 17344 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:27:31.450686 17344 net.cpp:100] Creating Layer pre_relu\nI0817 16:27:31.450697 17344 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:27:31.450709 17344 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:27:31.450721 17344 net.cpp:150] Setting up pre_relu\nI0817 16:27:31.450728 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.450732 17344 net.cpp:165] Memory required for data: 34305500\nI0817 16:27:31.450737 17344 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:27:31.450744 17344 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:27:31.450749 17344 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:27:31.450759 17344 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:27:31.450769 17344 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:27:31.450822 17344 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:27:31.450834 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.450841 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.450845 17344 net.cpp:165] Memory required for data: 50689500\nI0817 16:27:31.450850 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:27:31.450866 17344 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:27:31.450872 17344 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:27:31.450881 17344 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:27:31.451189 17344 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:27:31.451203 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.451208 17344 net.cpp:165] Memory required for data: 58881500\nI0817 16:27:31.451220 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:27:31.451234 17344 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:27:31.451241 17344 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:27:31.451249 17344 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:27:31.451474 17344 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:27:31.451488 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.451493 17344 net.cpp:165] Memory required for data: 67073500\nI0817 16:27:31.451503 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:27:31.451515 17344 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:27:31.451520 17344 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:27:31.451529 17344 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.451584 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:27:31.451721 17344 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:27:31.451735 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.451740 17344 net.cpp:165] Memory required for data: 75265500\nI0817 16:27:31.451748 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:27:31.451764 17344 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:27:31.451771 17344 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:27:31.451781 17344 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.451791 17344 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:27:31.451797 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.451808 17344 net.cpp:165] Memory required for data: 83457500\nI0817 16:27:31.451814 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:27:31.451830 17344 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:27:31.451836 17344 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:27:31.451845 17344 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:27:31.452159 17344 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:27:31.452174 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.452179 17344 net.cpp:165] Memory required for data: 91649500\nI0817 16:27:31.452188 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:27:31.452200 17344 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:27:31.452205 17344 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:27:31.452214 17344 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:27:31.452440 17344 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:27:31.452453 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.452458 17344 net.cpp:165] Memory required for data: 99841500\nI0817 16:27:31.452472 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:27:31.452484 17344 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:27:31.452489 17344 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:27:31.452497 17344 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:27:31.452551 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:27:31.452687 17344 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:27:31.452700 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.452705 17344 net.cpp:165] Memory required for data: 108033500\nI0817 16:27:31.452714 17344 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:27:31.452764 17344 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:27:31.452775 17344 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:27:31.452786 17344 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:27:31.452795 17344 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:27:31.452883 17344 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:27:31.452899 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.452904 17344 net.cpp:165] Memory required for data: 116225500\nI0817 16:27:31.452910 17344 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:27:31.452919 17344 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:27:31.452924 17344 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:27:31.452935 17344 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:27:31.452944 17344 net.cpp:150] Setting up L1_b1_relu\nI0817 16:27:31.452951 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.452956 17344 net.cpp:165] Memory required for data: 124417500\nI0817 16:27:31.452961 17344 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:31.452971 17344 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:31.452976 17344 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:27:31.452983 17344 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:27:31.452992 17344 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:27:31.453038 17344 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:31.453050 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.453057 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.453068 17344 net.cpp:165] Memory required for data: 140801500\nI0817 16:27:31.453074 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:27:31.453085 17344 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:27:31.453091 17344 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:27:31.453104 17344 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:27:31.453403 17344 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:27:31.453418 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.453423 17344 net.cpp:165] Memory required for data: 148993500\nI0817 16:27:31.453431 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:27:31.453440 17344 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:27:31.453446 17344 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:27:31.453459 17344 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:27:31.453694 17344 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:27:31.453709 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.453714 17344 net.cpp:165] Memory required for data: 157185500\nI0817 16:27:31.453725 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:27:31.453734 17344 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:27:31.453740 17344 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:27:31.453747 17344 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.453799 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:27:31.453945 17344 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:27:31.453958 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.453963 17344 net.cpp:165] Memory required for data: 165377500\nI0817 16:27:31.453972 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:27:31.453984 17344 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:27:31.453989 17344 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:27:31.453999 17344 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.454008 17344 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:27:31.454015 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.454020 17344 net.cpp:165] Memory required for data: 173569500\nI0817 16:27:31.454025 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:27:31.454036 17344 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:27:31.454041 17344 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:27:31.454052 17344 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:27:31.454350 17344 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:27:31.454365 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.454370 17344 net.cpp:165] Memory required for data: 181761500\nI0817 16:27:31.454377 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:27:31.454386 17344 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:27:31.454392 17344 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:27:31.454406 17344 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:27:31.454643 17344 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:27:31.454655 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.454660 17344 net.cpp:165] Memory required for data: 189953500\nI0817 16:27:31.454679 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:27:31.454687 17344 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:27:31.454694 17344 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:27:31.454704 17344 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:27:31.454756 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:27:31.454897 17344 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:27:31.454910 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.454916 17344 net.cpp:165] Memory required for data: 198145500\nI0817 16:27:31.454924 17344 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:27:31.454942 17344 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:27:31.454949 17344 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:27:31.454957 17344 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:27:31.454963 17344 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:27:31.454996 17344 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:27:31.455008 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.455013 17344 net.cpp:165] Memory required for data: 206337500\nI0817 16:27:31.455018 17344 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:27:31.455025 17344 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:27:31.455030 17344 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:27:31.455037 17344 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:27:31.455046 17344 net.cpp:150] Setting up L1_b2_relu\nI0817 16:27:31.455052 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.455057 17344 net.cpp:165] Memory required for data: 214529500\nI0817 16:27:31.455062 17344 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:31.455070 17344 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:31.455075 17344 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:27:31.455083 17344 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:27:31.455093 17344 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:27:31.455133 17344 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:31.455144 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.455152 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.455155 17344 net.cpp:165] Memory required for data: 230913500\nI0817 16:27:31.455160 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:27:31.455174 17344 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:27:31.455181 17344 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:27:31.455189 17344 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:27:31.455490 17344 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:27:31.455504 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.455509 17344 net.cpp:165] Memory required for data: 239105500\nI0817 16:27:31.455518 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:27:31.455530 17344 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:27:31.455536 17344 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:27:31.455546 17344 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:27:31.455777 17344 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:27:31.455790 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.455795 17344 net.cpp:165] Memory required for data: 247297500\nI0817 16:27:31.455811 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:27:31.455821 17344 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:27:31.455826 17344 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:27:31.455837 17344 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.455888 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:27:31.456030 17344 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:27:31.456046 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.456051 17344 net.cpp:165] Memory required for data: 255489500\nI0817 16:27:31.456060 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:27:31.456068 17344 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:27:31.456074 17344 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:27:31.456081 17344 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.456090 17344 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:27:31.456104 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.456110 17344 net.cpp:165] Memory required for data: 263681500\nI0817 16:27:31.456115 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:27:31.456128 17344 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:27:31.456133 17344 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:27:31.456145 17344 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:27:31.456452 17344 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:27:31.456465 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.456470 17344 net.cpp:165] Memory required for data: 271873500\nI0817 16:27:31.456480 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:27:31.456496 17344 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:27:31.456501 17344 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:27:31.456509 17344 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:27:31.456740 17344 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:27:31.456753 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.456758 17344 net.cpp:165] Memory required for data: 280065500\nI0817 16:27:31.456768 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:27:31.456778 17344 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:27:31.456782 17344 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:27:31.456794 17344 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:27:31.456852 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:27:31.456989 17344 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:27:31.457003 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.457008 17344 net.cpp:165] Memory required for data: 288257500\nI0817 16:27:31.457017 17344 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:27:31.457026 17344 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:27:31.457031 17344 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:27:31.457038 17344 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:27:31.457046 17344 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:27:31.457078 17344 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:27:31.457087 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.457093 17344 net.cpp:165] Memory required for data: 296449500\nI0817 16:27:31.457098 17344 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:27:31.457108 17344 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:27:31.457113 17344 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:27:31.457120 17344 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:27:31.457129 17344 net.cpp:150] Setting up L1_b3_relu\nI0817 16:27:31.457135 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.457139 17344 net.cpp:165] Memory required for data: 304641500\nI0817 16:27:31.457144 17344 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:31.457154 17344 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:31.457159 17344 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:27:31.457166 17344 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:27:31.457175 17344 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:27:31.457216 17344 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:31.457229 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.457237 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.457242 17344 net.cpp:165] Memory required for data: 321025500\nI0817 16:27:31.457245 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:27:31.457257 17344 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:27:31.457263 17344 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:27:31.457278 17344 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:27:31.457592 17344 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:27:31.457605 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.457610 17344 net.cpp:165] Memory required for data: 329217500\nI0817 16:27:31.457619 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:27:31.457630 17344 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:27:31.457636 17344 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:27:31.457644 17344 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:27:31.457890 17344 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:27:31.457904 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.457909 17344 net.cpp:165] Memory required for data: 337409500\nI0817 16:27:31.457921 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:27:31.457928 17344 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:27:31.457934 17344 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:27:31.457945 17344 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.457998 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:27:31.458137 17344 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:27:31.458149 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.458155 17344 net.cpp:165] Memory required for data: 345601500\nI0817 16:27:31.458163 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:27:31.458171 17344 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:27:31.458178 17344 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:27:31.458184 17344 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.458194 17344 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:27:31.458200 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.458205 17344 net.cpp:165] Memory required for data: 353793500\nI0817 16:27:31.458209 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:27:31.458223 17344 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:27:31.458230 17344 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:27:31.458240 17344 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:27:31.458545 17344 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:27:31.458559 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.458564 17344 net.cpp:165] Memory required for data: 361985500\nI0817 16:27:31.458572 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:27:31.458583 17344 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:27:31.458590 17344 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:27:31.458600 17344 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:27:31.458847 17344 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:27:31.458860 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.458865 17344 net.cpp:165] Memory required for data: 370177500\nI0817 16:27:31.458875 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:27:31.458886 17344 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:27:31.458894 17344 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:27:31.458900 17344 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:27:31.458953 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:27:31.459087 17344 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:27:31.459100 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.459105 17344 net.cpp:165] Memory required for data: 378369500\nI0817 16:27:31.459115 17344 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:27:31.459122 17344 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:27:31.459128 17344 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:27:31.459136 17344 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:27:31.459146 17344 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:27:31.459183 17344 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:27:31.459197 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.459203 17344 net.cpp:165] Memory required for data: 386561500\nI0817 16:27:31.459208 17344 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:27:31.459215 17344 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:27:31.459221 17344 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:27:31.459228 17344 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:27:31.459237 17344 net.cpp:150] Setting up L1_b4_relu\nI0817 16:27:31.459244 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.459249 17344 net.cpp:165] Memory required for data: 394753500\nI0817 16:27:31.459254 17344 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:27:31.459264 17344 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:27:31.459270 17344 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:27:31.459276 17344 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:27:31.459285 17344 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:27:31.459329 17344 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:27:31.459341 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.459347 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.459352 17344 net.cpp:165] Memory required for data: 411137500\nI0817 16:27:31.459357 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:27:31.459367 17344 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:27:31.459372 17344 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:27:31.459384 17344 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:27:31.459689 17344 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:27:31.459703 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.459708 17344 net.cpp:165] Memory required for data: 419329500\nI0817 16:27:31.459728 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:27:31.459739 17344 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:27:31.459745 17344 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:27:31.459756 17344 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:27:31.460000 17344 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:27:31.460013 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.460018 17344 net.cpp:165] Memory required for data: 427521500\nI0817 16:27:31.460029 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:27:31.460038 17344 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:27:31.460043 17344 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:27:31.460054 17344 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.460103 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:27:31.460244 17344 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:27:31.460258 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.460263 17344 net.cpp:165] Memory required for data: 435713500\nI0817 16:27:31.460271 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:27:31.460279 17344 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:27:31.460285 17344 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:27:31.460295 17344 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.460305 17344 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:27:31.460311 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.460316 17344 net.cpp:165] Memory required for data: 443905500\nI0817 16:27:31.460321 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:27:31.460335 17344 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:27:31.460347 17344 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:27:31.460356 17344 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:27:31.460666 17344 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:27:31.460680 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.460685 17344 net.cpp:165] Memory required for data: 452097500\nI0817 16:27:31.460695 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:27:31.460703 17344 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:27:31.460712 17344 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:27:31.460721 17344 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:27:31.460959 17344 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:27:31.460973 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.460978 17344 net.cpp:165] Memory required for data: 460289500\nI0817 16:27:31.460988 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:27:31.460997 17344 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:27:31.461004 17344 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:27:31.461014 17344 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:27:31.461064 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:27:31.461200 17344 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:27:31.461216 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.461221 17344 net.cpp:165] Memory required for data: 468481500\nI0817 16:27:31.461231 17344 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:27:31.461239 17344 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:27:31.461246 17344 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:27:31.461252 17344 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:27:31.461261 17344 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:27:31.461294 17344 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:27:31.461307 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.461311 17344 net.cpp:165] Memory required for data: 476673500\nI0817 16:27:31.461316 17344 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:27:31.461326 17344 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:27:31.461333 17344 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:27:31.461338 17344 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:27:31.461347 17344 net.cpp:150] Setting up L1_b5_relu\nI0817 16:27:31.461354 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.461359 17344 net.cpp:165] Memory required for data: 484865500\nI0817 16:27:31.461364 17344 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:27:31.461374 17344 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:27:31.461378 17344 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:27:31.461386 17344 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:27:31.461395 17344 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:27:31.461437 17344 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:27:31.461452 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.461458 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.461462 17344 net.cpp:165] Memory required for data: 501249500\nI0817 16:27:31.461467 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:27:31.461478 17344 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:27:31.461484 17344 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:27:31.461493 17344 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:27:31.461809 17344 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:27:31.461824 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.461829 17344 net.cpp:165] Memory required for data: 509441500\nI0817 16:27:31.461845 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:27:31.461858 17344 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:27:31.461863 17344 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:27:31.461871 17344 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:27:31.462105 17344 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:27:31.462118 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.462123 17344 net.cpp:165] Memory required for data: 517633500\nI0817 16:27:31.462133 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:27:31.462142 17344 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:27:31.462148 17344 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:27:31.462158 17344 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.462210 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:27:31.462348 17344 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:27:31.462360 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.462365 17344 net.cpp:165] Memory required for data: 525825500\nI0817 16:27:31.462374 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:27:31.462383 17344 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:27:31.462388 17344 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:27:31.462395 17344 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.462404 17344 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:27:31.462411 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.462415 17344 net.cpp:165] Memory required for data: 534017500\nI0817 16:27:31.462420 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:27:31.462435 17344 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:27:31.462440 17344 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:27:31.462450 17344 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:27:31.462759 17344 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:27:31.462774 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.462777 17344 net.cpp:165] Memory required for data: 542209500\nI0817 16:27:31.462786 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:27:31.462798 17344 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:27:31.462810 17344 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:27:31.462821 17344 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:27:31.463063 17344 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:27:31.463075 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.463080 17344 net.cpp:165] Memory required for data: 550401500\nI0817 16:27:31.463090 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:27:31.463099 17344 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:27:31.463105 17344 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:27:31.463115 17344 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:27:31.463168 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:27:31.463305 17344 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:27:31.463320 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.463325 17344 net.cpp:165] Memory required for data: 558593500\nI0817 16:27:31.463335 17344 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:27:31.463351 17344 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:27:31.463357 17344 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:27:31.463364 17344 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:27:31.463372 17344 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:27:31.463407 17344 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:27:31.463418 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.463423 17344 net.cpp:165] Memory required for data: 566785500\nI0817 16:27:31.463429 17344 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:27:31.463444 17344 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:27:31.463450 17344 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:27:31.463460 17344 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:27:31.463470 17344 net.cpp:150] Setting up L1_b6_relu\nI0817 16:27:31.463477 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.463482 17344 net.cpp:165] Memory required for data: 574977500\nI0817 16:27:31.463486 17344 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:27:31.463493 17344 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:27:31.463498 17344 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:27:31.463506 17344 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:27:31.463515 17344 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:27:31.463562 17344 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:27:31.463572 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.463579 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.463583 17344 net.cpp:165] Memory required for data: 591361500\nI0817 16:27:31.463588 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:27:31.463599 17344 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:27:31.463605 17344 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:27:31.463618 17344 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:27:31.463937 17344 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:27:31.463951 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.463956 17344 net.cpp:165] Memory required for data: 599553500\nI0817 16:27:31.463964 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:27:31.463973 17344 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:27:31.463979 17344 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:27:31.463990 17344 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:27:31.464228 17344 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:27:31.464241 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.464246 17344 net.cpp:165] Memory required for data: 607745500\nI0817 16:27:31.464257 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:27:31.464267 17344 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:27:31.464273 17344 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:27:31.464280 17344 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.464332 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:27:31.464474 17344 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:27:31.464488 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.464493 17344 net.cpp:165] Memory required for data: 615937500\nI0817 16:27:31.464500 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:27:31.464511 17344 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:27:31.464517 17344 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:27:31.464525 17344 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.464534 17344 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:27:31.464541 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.464545 17344 net.cpp:165] Memory required for data: 624129500\nI0817 16:27:31.464550 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:27:31.464565 17344 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:27:31.464571 17344 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:27:31.464581 17344 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:27:31.464895 17344 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:27:31.464910 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.464915 17344 net.cpp:165] Memory required for data: 632321500\nI0817 16:27:31.464931 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:27:31.464941 17344 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:27:31.464946 17344 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:27:31.464954 17344 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:27:31.465193 17344 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:27:31.465205 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.465210 17344 net.cpp:165] Memory required for data: 640513500\nI0817 16:27:31.465220 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:27:31.465231 17344 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:27:31.465237 17344 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:27:31.465245 17344 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:27:31.465301 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:27:31.465440 17344 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:27:31.465451 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.465456 17344 net.cpp:165] Memory required for data: 648705500\nI0817 16:27:31.465466 17344 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:27:31.465474 17344 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:27:31.465481 17344 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:27:31.465487 17344 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:27:31.465497 17344 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:27:31.465533 17344 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:27:31.465543 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.465548 17344 net.cpp:165] Memory required for data: 656897500\nI0817 16:27:31.465553 17344 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:27:31.465561 17344 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:27:31.465566 17344 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:27:31.465574 17344 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:27:31.465585 17344 net.cpp:150] Setting up L1_b7_relu\nI0817 16:27:31.465592 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.465596 17344 net.cpp:165] Memory required for data: 665089500\nI0817 16:27:31.465601 17344 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:27:31.465608 17344 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:27:31.465615 17344 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:27:31.465620 17344 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:27:31.465629 17344 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:27:31.465674 17344 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:27:31.465685 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.465692 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.465697 17344 net.cpp:165] Memory required for data: 681473500\nI0817 16:27:31.465701 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:27:31.465713 17344 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:27:31.465719 17344 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:27:31.465730 17344 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:27:31.466048 17344 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:27:31.466063 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.466068 17344 net.cpp:165] Memory required for data: 689665500\nI0817 16:27:31.466076 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:27:31.466085 17344 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:27:31.466091 17344 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:27:31.466104 17344 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:27:31.466351 17344 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:27:31.466364 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.466369 17344 net.cpp:165] Memory required for data: 697857500\nI0817 16:27:31.466379 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:27:31.466392 17344 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:27:31.466398 17344 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:27:31.466405 17344 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.466457 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:27:31.466600 17344 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:27:31.466614 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.466619 17344 net.cpp:165] Memory required for data: 706049500\nI0817 16:27:31.466627 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:27:31.466635 17344 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:27:31.466641 17344 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:27:31.466651 17344 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.466661 17344 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:27:31.466668 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.466672 17344 net.cpp:165] Memory required for data: 714241500\nI0817 16:27:31.466677 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:27:31.466691 17344 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:27:31.466697 17344 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:27:31.466706 17344 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:27:31.467049 17344 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:27:31.467064 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.467069 17344 net.cpp:165] Memory required for data: 722433500\nI0817 16:27:31.467078 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:27:31.467090 17344 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:27:31.467097 17344 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:27:31.467104 17344 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:27:31.467350 17344 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:27:31.467361 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.467366 17344 net.cpp:165] Memory required for data: 730625500\nI0817 16:27:31.467376 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:27:31.467389 17344 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:27:31.467396 17344 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:27:31.467403 17344 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:27:31.467459 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:27:31.467598 17344 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:27:31.467612 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.467617 17344 net.cpp:165] Memory required for data: 738817500\nI0817 16:27:31.467625 17344 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:27:31.467634 17344 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:27:31.467640 17344 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:27:31.467648 17344 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:27:31.467658 17344 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:27:31.467689 17344 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:27:31.467701 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.467706 17344 net.cpp:165] Memory required for data: 747009500\nI0817 16:27:31.467711 17344 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:27:31.467718 17344 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:27:31.467725 17344 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:27:31.467731 17344 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:27:31.467739 17344 net.cpp:150] Setting up L1_b8_relu\nI0817 16:27:31.467753 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.467758 17344 net.cpp:165] Memory required for data: 755201500\nI0817 16:27:31.467763 17344 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:27:31.467773 17344 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:27:31.467779 17344 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:27:31.467787 17344 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:27:31.467797 17344 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:27:31.467850 17344 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:27:31.467862 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.467869 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.467874 17344 net.cpp:165] Memory required for data: 771585500\nI0817 16:27:31.467878 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:27:31.467890 17344 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:27:31.467896 17344 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:27:31.467908 17344 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:27:31.468230 17344 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:27:31.468245 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.468250 17344 net.cpp:165] Memory required for data: 779777500\nI0817 16:27:31.468258 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:27:31.468272 17344 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:27:31.468279 17344 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:27:31.468287 17344 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:27:31.468533 17344 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:27:31.468546 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.468551 17344 net.cpp:165] Memory required for data: 787969500\nI0817 16:27:31.468561 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:27:31.468570 17344 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:27:31.468576 17344 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:27:31.468583 17344 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.468638 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:27:31.468786 17344 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:27:31.468798 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.468809 17344 net.cpp:165] Memory required for data: 796161500\nI0817 16:27:31.468818 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:27:31.468827 17344 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:27:31.468833 17344 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:27:31.468843 17344 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.468853 17344 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:27:31.468860 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.468865 17344 net.cpp:165] Memory required for data: 804353500\nI0817 16:27:31.468869 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:27:31.468883 17344 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:27:31.468889 17344 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:27:31.468897 17344 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:27:31.469213 17344 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:27:31.469228 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.469233 17344 net.cpp:165] Memory required for data: 812545500\nI0817 16:27:31.469241 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:27:31.469249 17344 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:27:31.469256 17344 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:27:31.469266 17344 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:27:31.469516 17344 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:27:31.469532 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.469537 17344 net.cpp:165] Memory required for data: 820737500\nI0817 16:27:31.469568 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:27:31.469576 17344 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:27:31.469583 17344 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:27:31.469590 17344 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:27:31.469645 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:27:31.469784 17344 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:27:31.469796 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.469801 17344 net.cpp:165] Memory required for data: 828929500\nI0817 16:27:31.469816 17344 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:27:31.469826 17344 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:27:31.469832 17344 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:27:31.469840 17344 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:27:31.469851 17344 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:27:31.469882 17344 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:27:31.469892 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.469897 17344 net.cpp:165] Memory required for data: 837121500\nI0817 16:27:31.469902 17344 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:27:31.469909 17344 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:27:31.469914 17344 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:27:31.469924 17344 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:27:31.469934 17344 net.cpp:150] Setting up L1_b9_relu\nI0817 16:27:31.469941 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.469945 17344 net.cpp:165] Memory required for data: 845313500\nI0817 16:27:31.469950 17344 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:27:31.469957 17344 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:27:31.469962 17344 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:27:31.469974 17344 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:27:31.469983 17344 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:27:31.470028 17344 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:27:31.470039 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.470046 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.470051 17344 net.cpp:165] Memory required for data: 861697500\nI0817 16:27:31.470054 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:27:31.470067 17344 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:27:31.470072 17344 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:27:31.470084 17344 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:27:31.470399 17344 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:27:31.470413 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.470418 17344 net.cpp:165] Memory required for data: 863745500\nI0817 16:27:31.470427 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:27:31.470437 17344 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:27:31.470443 17344 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:27:31.470453 17344 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:27:31.470688 17344 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:27:31.470701 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.470706 17344 net.cpp:165] Memory required for data: 865793500\nI0817 16:27:31.470716 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:27:31.470729 17344 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:27:31.470741 17344 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:27:31.470749 17344 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.470808 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:27:31.470949 17344 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:27:31.470963 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.470968 17344 net.cpp:165] Memory required for data: 867841500\nI0817 16:27:31.470976 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:27:31.470988 17344 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:27:31.470993 17344 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:27:31.471001 17344 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.471010 17344 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:27:31.471017 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.471021 17344 net.cpp:165] Memory required for data: 869889500\nI0817 16:27:31.471026 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:27:31.471040 17344 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:27:31.471046 17344 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:27:31.471056 17344 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:27:31.471366 17344 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:27:31.471379 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.471385 17344 net.cpp:165] Memory required for data: 871937500\nI0817 16:27:31.471392 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:27:31.471401 17344 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:27:31.471407 17344 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:27:31.471421 17344 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:27:31.471664 17344 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:27:31.471676 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.471681 17344 net.cpp:165] Memory required for data: 873985500\nI0817 16:27:31.471693 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:27:31.471706 17344 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:27:31.471712 17344 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:27:31.471719 17344 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:27:31.471772 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:27:31.471923 17344 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:27:31.471936 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.471941 17344 net.cpp:165] Memory required for data: 876033500\nI0817 16:27:31.471951 17344 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:27:31.471964 17344 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:27:31.471971 17344 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:27:31.471979 17344 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:27:31.472064 17344 net.cpp:150] Setting up L2_b1_pool\nI0817 16:27:31.472079 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.472084 17344 net.cpp:165] Memory required for data: 878081500\nI0817 16:27:31.472090 17344 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:27:31.472100 17344 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:27:31.472105 17344 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:27:31.472111 17344 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:27:31.472122 17344 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:27:31.472156 17344 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:27:31.472164 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.472169 17344 net.cpp:165] Memory required for data: 880129500\nI0817 16:27:31.472174 17344 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:27:31.472182 17344 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:27:31.472187 17344 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:27:31.472203 17344 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:27:31.472213 17344 net.cpp:150] Setting up L2_b1_relu\nI0817 16:27:31.472219 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.472223 17344 net.cpp:165] Memory required for data: 882177500\nI0817 16:27:31.472229 17344 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:27:31.472277 17344 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:27:31.472291 17344 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:27:31.474565 17344 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:27:31.474583 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.474588 17344 net.cpp:165] Memory required for data: 884225500\nI0817 16:27:31.474594 17344 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:27:31.474606 17344 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:27:31.474611 17344 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:27:31.474619 17344 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:27:31.474630 17344 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:27:31.474705 17344 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:27:31.474720 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.474730 17344 net.cpp:165] Memory required for data: 888321500\nI0817 16:27:31.474735 17344 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:31.474745 17344 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:31.474750 17344 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:27:31.474757 17344 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:27:31.474767 17344 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:27:31.474824 17344 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:31.474838 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.474843 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.474848 17344 net.cpp:165] Memory required for data: 896513500\nI0817 16:27:31.474853 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:27:31.474869 17344 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:27:31.474875 17344 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:27:31.474884 17344 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:27:31.476302 17344 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:27:31.476320 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.476325 17344 net.cpp:165] Memory required for data: 900609500\nI0817 16:27:31.476336 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:27:31.476348 17344 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:27:31.476354 17344 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:27:31.476363 17344 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:27:31.476610 17344 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:27:31.476624 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.476629 17344 net.cpp:165] Memory required for data: 904705500\nI0817 16:27:31.476639 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:27:31.476650 17344 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:27:31.476657 17344 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:27:31.476665 17344 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.476719 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:27:31.476874 17344 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:27:31.476888 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.476893 17344 net.cpp:165] Memory required for data: 908801500\nI0817 16:27:31.476902 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:27:31.476913 17344 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:27:31.476920 17344 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:27:31.476935 17344 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.476948 17344 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:27:31.476956 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.476961 17344 net.cpp:165] Memory required for data: 912897500\nI0817 16:27:31.476965 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:27:31.476977 17344 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:27:31.476984 17344 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:27:31.476994 17344 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:27:31.477447 17344 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:27:31.477461 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.477466 17344 net.cpp:165] Memory required for data: 916993500\nI0817 16:27:31.477475 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:27:31.477484 17344 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:27:31.477490 17344 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:27:31.477501 17344 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:27:31.477744 17344 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:27:31.477757 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.477762 17344 net.cpp:165] Memory required for data: 921089500\nI0817 16:27:31.477772 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:27:31.477787 17344 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:27:31.477793 17344 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:27:31.477800 17344 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:27:31.477864 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:27:31.478006 17344 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:27:31.478019 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.478024 17344 net.cpp:165] Memory required for data: 925185500\nI0817 16:27:31.478034 17344 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:27:31.478045 17344 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:27:31.478052 17344 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:27:31.478060 17344 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:27:31.478067 17344 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:27:31.478096 17344 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:27:31.478106 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.478111 17344 net.cpp:165] Memory required for data: 929281500\nI0817 16:27:31.478116 17344 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:27:31.478123 17344 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:27:31.478129 17344 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:27:31.478139 17344 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:27:31.478148 17344 net.cpp:150] Setting up L2_b2_relu\nI0817 16:27:31.478155 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.478160 17344 net.cpp:165] Memory required for data: 933377500\nI0817 16:27:31.478164 17344 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:31.478171 17344 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:31.478176 17344 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:27:31.478184 17344 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:27:31.478193 17344 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:27:31.478240 17344 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:31.478251 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.478258 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.478263 17344 net.cpp:165] Memory required for data: 941569500\nI0817 16:27:31.478268 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:27:31.478286 17344 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:27:31.478293 17344 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:27:31.478305 17344 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:27:31.478765 17344 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:27:31.478780 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.478785 17344 net.cpp:165] Memory required for data: 945665500\nI0817 16:27:31.478793 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:27:31.478808 17344 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:27:31.478816 17344 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:27:31.478826 17344 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:27:31.479070 17344 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:27:31.479084 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.479089 17344 net.cpp:165] Memory required for data: 949761500\nI0817 16:27:31.479099 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:27:31.479110 17344 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:27:31.479116 17344 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:27:31.479123 17344 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.479177 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:27:31.479321 17344 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:27:31.479333 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.479338 17344 net.cpp:165] Memory required for data: 953857500\nI0817 16:27:31.479347 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:27:31.479360 17344 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:27:31.479367 17344 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:27:31.479374 17344 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.479383 17344 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:27:31.479390 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.479395 17344 net.cpp:165] Memory required for data: 957953500\nI0817 16:27:31.479400 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:27:31.479414 17344 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:27:31.479420 17344 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:27:31.479431 17344 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:27:31.479890 17344 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:27:31.479904 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.479909 17344 net.cpp:165] Memory required for data: 962049500\nI0817 16:27:31.479918 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:27:31.479928 17344 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:27:31.479933 17344 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:27:31.479944 17344 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:27:31.480190 17344 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:27:31.480202 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.480207 17344 net.cpp:165] Memory required for data: 966145500\nI0817 16:27:31.480217 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:27:31.480228 17344 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:27:31.480234 17344 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:27:31.480242 17344 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:27:31.480295 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:27:31.480443 17344 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:27:31.480454 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.480459 17344 net.cpp:165] Memory required for data: 970241500\nI0817 16:27:31.480468 17344 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:27:31.480480 17344 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:27:31.480486 17344 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:27:31.480500 17344 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:27:31.480509 17344 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:27:31.480536 17344 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:27:31.480551 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.480556 17344 net.cpp:165] Memory required for data: 974337500\nI0817 16:27:31.480561 17344 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:27:31.480581 17344 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:27:31.480587 17344 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:27:31.480594 17344 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:27:31.480603 17344 net.cpp:150] Setting up L2_b3_relu\nI0817 16:27:31.480610 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.480614 17344 net.cpp:165] Memory required for data: 978433500\nI0817 16:27:31.480620 17344 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:31.480628 17344 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:31.480633 17344 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:27:31.480643 17344 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:27:31.480653 17344 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:27:31.480697 17344 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:31.480712 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.480718 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.480723 17344 net.cpp:165] Memory required for data: 986625500\nI0817 16:27:31.480728 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:27:31.480739 17344 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:27:31.480746 17344 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:27:31.480754 17344 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:27:31.481220 17344 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:27:31.481233 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.481238 17344 net.cpp:165] Memory required for data: 990721500\nI0817 16:27:31.481247 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:27:31.481259 17344 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:27:31.481266 17344 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:27:31.481273 17344 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:27:31.481519 17344 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:27:31.481531 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.481536 17344 net.cpp:165] Memory required for data: 994817500\nI0817 16:27:31.481546 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:27:31.481555 17344 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:27:31.481561 17344 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:27:31.481571 17344 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.481626 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:27:31.481767 17344 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:27:31.481781 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.481784 17344 net.cpp:165] Memory required for data: 998913500\nI0817 16:27:31.481793 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:27:31.481806 17344 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:27:31.481813 17344 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:27:31.481824 17344 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.481837 17344 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:27:31.481843 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.481848 17344 net.cpp:165] Memory required for data: 1003009500\nI0817 16:27:31.481853 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:27:31.481873 17344 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:27:31.481879 17344 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:27:31.481889 17344 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:27:31.482348 17344 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:27:31.482363 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.482368 17344 net.cpp:165] Memory required for data: 1007105500\nI0817 16:27:31.482376 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:27:31.482388 17344 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:27:31.482395 17344 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:27:31.482403 17344 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:27:31.482643 17344 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:27:31.482656 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.482661 17344 net.cpp:165] Memory required for data: 1011201500\nI0817 16:27:31.482671 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:27:31.482679 17344 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:27:31.482686 17344 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:27:31.482692 17344 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:27:31.482748 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:27:31.482903 17344 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:27:31.482920 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.482925 17344 net.cpp:165] Memory required for data: 1015297500\nI0817 16:27:31.482934 17344 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:27:31.482944 17344 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:27:31.482949 17344 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:27:31.482956 17344 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:27:31.482964 17344 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:27:31.482993 17344 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:27:31.483002 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.483007 17344 net.cpp:165] Memory required for data: 1019393500\nI0817 16:27:31.483012 17344 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:27:31.483021 17344 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:27:31.483026 17344 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:27:31.483036 17344 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:27:31.483044 17344 net.cpp:150] Setting up L2_b4_relu\nI0817 16:27:31.483052 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.483057 17344 net.cpp:165] Memory required for data: 1023489500\nI0817 16:27:31.483060 17344 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:27:31.483068 17344 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:27:31.483073 17344 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:27:31.483083 17344 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:27:31.483093 17344 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:27:31.483136 17344 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:27:31.483147 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.483153 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.483158 17344 net.cpp:165] Memory required for data: 1031681500\nI0817 16:27:31.483163 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:27:31.483177 17344 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:27:31.483184 17344 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:27:31.483193 17344 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:27:31.483649 17344 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:27:31.483669 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.483675 17344 net.cpp:165] Memory required for data: 1035777500\nI0817 16:27:31.483685 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:27:31.483696 17344 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:27:31.483702 17344 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:27:31.483711 17344 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:27:31.483968 17344 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:27:31.483980 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.483985 17344 net.cpp:165] Memory required for data: 1039873500\nI0817 16:27:31.483995 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:27:31.484004 17344 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:27:31.484010 17344 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:27:31.484020 17344 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.484074 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:27:31.484225 17344 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:27:31.484237 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.484242 17344 net.cpp:165] Memory required for data: 1043969500\nI0817 16:27:31.484251 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:27:31.484259 17344 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:27:31.484266 17344 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:27:31.484272 17344 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.484284 17344 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:27:31.484292 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.484297 17344 net.cpp:165] Memory required for data: 1048065500\nI0817 16:27:31.484300 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:27:31.484311 17344 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:27:31.484320 17344 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:27:31.484328 17344 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:27:31.484793 17344 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:27:31.484812 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.484818 17344 net.cpp:165] Memory required for data: 1052161500\nI0817 16:27:31.484827 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:27:31.484839 17344 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:27:31.484845 17344 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:27:31.484856 17344 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:27:31.485097 17344 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:27:31.485110 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.485116 17344 net.cpp:165] Memory required for data: 1056257500\nI0817 16:27:31.485126 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:27:31.485134 17344 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:27:31.485141 17344 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:27:31.485148 17344 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:27:31.485203 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:27:31.485347 17344 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:27:31.485363 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.485368 17344 net.cpp:165] Memory required for data: 1060353500\nI0817 16:27:31.485375 17344 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:27:31.485384 17344 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:27:31.485390 17344 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:27:31.485397 17344 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:27:31.485405 17344 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:27:31.485435 17344 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:27:31.485443 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.485455 17344 net.cpp:165] Memory required for data: 1064449500\nI0817 16:27:31.485460 17344 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:27:31.485467 17344 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:27:31.485473 17344 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:27:31.485482 17344 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:27:31.485492 17344 net.cpp:150] Setting up L2_b5_relu\nI0817 16:27:31.485499 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.485503 17344 net.cpp:165] Memory required for data: 1068545500\nI0817 16:27:31.485508 17344 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:27:31.485515 17344 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:27:31.485520 17344 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:27:31.485530 17344 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:27:31.485539 17344 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:27:31.485584 17344 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:27:31.485595 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.485601 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.485606 17344 net.cpp:165] Memory required for data: 1076737500\nI0817 16:27:31.485610 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:27:31.485625 17344 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:27:31.485631 17344 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:27:31.485641 17344 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:27:31.486116 17344 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:27:31.486130 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.486135 17344 net.cpp:165] Memory required for data: 1080833500\nI0817 16:27:31.486145 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:27:31.486156 17344 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:27:31.486162 17344 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:27:31.486171 17344 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:27:31.486421 17344 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:27:31.486434 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.486439 17344 net.cpp:165] Memory required for data: 1084929500\nI0817 16:27:31.486449 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:27:31.486459 17344 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:27:31.486464 17344 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:27:31.486471 17344 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.486528 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:27:31.486670 17344 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:27:31.486685 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.486690 17344 net.cpp:165] Memory required for data: 1089025500\nI0817 16:27:31.486699 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:27:31.486706 17344 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:27:31.486712 17344 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:27:31.486719 17344 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.486729 17344 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:27:31.486737 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.486740 17344 net.cpp:165] Memory required for data: 1093121500\nI0817 16:27:31.486745 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:27:31.486759 17344 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:27:31.486765 17344 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:27:31.486776 17344 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:27:31.487241 17344 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:27:31.487260 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.487267 17344 net.cpp:165] Memory required for data: 1097217500\nI0817 16:27:31.487274 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:27:31.487287 17344 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:27:31.487293 17344 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:27:31.487304 17344 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:27:31.487545 17344 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:27:31.487557 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.487561 17344 net.cpp:165] Memory required for data: 1101313500\nI0817 16:27:31.487571 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:27:31.487581 17344 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:27:31.487586 17344 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:27:31.487593 17344 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:27:31.487649 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:27:31.487797 17344 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:27:31.487817 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.487821 17344 net.cpp:165] Memory required for data: 1105409500\nI0817 16:27:31.487829 17344 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:27:31.487841 17344 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:27:31.487848 17344 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:27:31.487855 17344 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:27:31.487862 17344 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:27:31.487890 17344 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:27:31.487900 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.487905 17344 net.cpp:165] Memory required for data: 1109505500\nI0817 16:27:31.487910 17344 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:27:31.487921 17344 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:27:31.487927 17344 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:27:31.487936 17344 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:27:31.487946 17344 net.cpp:150] Setting up L2_b6_relu\nI0817 16:27:31.487952 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.487957 17344 net.cpp:165] Memory required for data: 1113601500\nI0817 16:27:31.487962 17344 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:27:31.487969 17344 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:27:31.487974 17344 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:27:31.487982 17344 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:27:31.487990 17344 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:27:31.488037 17344 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:27:31.488049 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.488055 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.488059 17344 net.cpp:165] Memory required for data: 1121793500\nI0817 16:27:31.488065 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:27:31.488078 17344 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:27:31.488085 17344 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:27:31.488093 17344 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:27:31.488560 17344 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:27:31.488574 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.488579 17344 net.cpp:165] Memory required for data: 1125889500\nI0817 16:27:31.488587 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:27:31.488600 17344 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:27:31.488612 17344 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:27:31.488625 17344 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:27:31.488881 17344 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:27:31.488895 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.488900 17344 net.cpp:165] Memory required for data: 1129985500\nI0817 16:27:31.488910 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:27:31.488919 17344 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:27:31.488925 17344 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:27:31.488932 17344 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.488989 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:27:31.489135 17344 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:27:31.489150 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.489154 17344 net.cpp:165] Memory required for data: 1134081500\nI0817 16:27:31.489163 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:27:31.489171 17344 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:27:31.489177 17344 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:27:31.489184 17344 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.489194 17344 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:27:31.489202 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.489205 17344 net.cpp:165] Memory required for data: 1138177500\nI0817 16:27:31.489210 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:27:31.489225 17344 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:27:31.489231 17344 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:27:31.489243 17344 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:27:31.489708 17344 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:27:31.489722 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.489727 17344 net.cpp:165] Memory required for data: 1142273500\nI0817 16:27:31.489735 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:27:31.489748 17344 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:27:31.489754 17344 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:27:31.489764 17344 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:27:31.490022 17344 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:27:31.490036 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.490041 17344 net.cpp:165] Memory required for data: 1146369500\nI0817 16:27:31.490051 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:27:31.490059 17344 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:27:31.490065 17344 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:27:31.490073 17344 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:27:31.490130 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:27:31.490276 17344 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:27:31.490289 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.490294 17344 net.cpp:165] Memory required for data: 1150465500\nI0817 16:27:31.490303 17344 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:27:31.490314 17344 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:27:31.490321 17344 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:27:31.490327 17344 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:27:31.490335 17344 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:27:31.490362 17344 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:27:31.490371 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.490375 17344 net.cpp:165] Memory required for data: 1154561500\nI0817 16:27:31.490381 17344 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:27:31.490392 17344 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:27:31.490397 17344 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:27:31.490411 17344 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:27:31.490422 17344 net.cpp:150] Setting up L2_b7_relu\nI0817 16:27:31.490428 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.490433 17344 net.cpp:165] Memory required for data: 1158657500\nI0817 16:27:31.490438 17344 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:27:31.490445 17344 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:27:31.490450 17344 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:27:31.490458 17344 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:27:31.490468 17344 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:27:31.490514 17344 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:27:31.490526 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.490532 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.490537 17344 net.cpp:165] Memory required for data: 1166849500\nI0817 16:27:31.490542 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:27:31.490556 17344 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:27:31.490562 17344 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:27:31.490571 17344 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:27:31.491048 17344 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:27:31.491062 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.491068 17344 net.cpp:165] Memory required for data: 1170945500\nI0817 16:27:31.491076 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:27:31.491088 17344 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:27:31.491096 17344 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:27:31.491106 17344 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:27:31.491355 17344 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:27:31.491369 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.491372 17344 net.cpp:165] Memory required for data: 1175041500\nI0817 16:27:31.491382 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:27:31.491391 17344 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:27:31.491397 17344 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:27:31.491405 17344 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.491461 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:27:31.491608 17344 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:27:31.491621 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.491626 17344 net.cpp:165] Memory required for data: 1179137500\nI0817 16:27:31.491634 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:27:31.491645 17344 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:27:31.491652 17344 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:27:31.491658 17344 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.491668 17344 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:27:31.491674 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.491679 17344 net.cpp:165] Memory required for data: 1183233500\nI0817 16:27:31.491684 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:27:31.491698 17344 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:27:31.491703 17344 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:27:31.491714 17344 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:27:31.492185 17344 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:27:31.492199 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.492204 17344 net.cpp:165] Memory required for data: 1187329500\nI0817 16:27:31.492213 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:27:31.492224 17344 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:27:31.492239 17344 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:27:31.492250 17344 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:27:31.492501 17344 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:27:31.492516 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.492522 17344 net.cpp:165] Memory required for data: 1191425500\nI0817 16:27:31.492532 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:27:31.492540 17344 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:27:31.492547 17344 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:27:31.492554 17344 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:27:31.492609 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:27:31.492760 17344 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:27:31.492772 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.492777 17344 net.cpp:165] Memory required for data: 1195521500\nI0817 16:27:31.492786 17344 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:27:31.492795 17344 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:27:31.492801 17344 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:27:31.492817 17344 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:27:31.492825 17344 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:27:31.492853 17344 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:27:31.492862 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.492867 17344 net.cpp:165] Memory required for data: 1199617500\nI0817 16:27:31.492872 17344 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:27:31.492883 17344 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:27:31.492889 17344 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:27:31.492897 17344 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:27:31.492905 17344 net.cpp:150] Setting up L2_b8_relu\nI0817 16:27:31.492913 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.492916 17344 net.cpp:165] Memory required for data: 1203713500\nI0817 16:27:31.492921 17344 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:27:31.492928 17344 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:27:31.492933 17344 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:27:31.492941 17344 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:27:31.492964 17344 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:27:31.493013 17344 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:27:31.493026 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.493032 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.493036 17344 net.cpp:165] Memory required for data: 1211905500\nI0817 16:27:31.493041 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:27:31.493055 17344 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:27:31.493062 17344 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:27:31.493072 17344 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:27:31.493540 17344 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:27:31.493553 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.493558 17344 net.cpp:165] Memory required for data: 1216001500\nI0817 16:27:31.493566 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:27:31.493578 17344 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:27:31.493585 17344 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:27:31.493593 17344 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:27:31.493851 17344 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:27:31.493867 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.493880 17344 net.cpp:165] Memory required for data: 1220097500\nI0817 16:27:31.493891 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:27:31.493899 17344 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:27:31.493906 17344 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:27:31.493913 17344 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.493968 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:27:31.494122 17344 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:27:31.494134 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.494139 17344 net.cpp:165] Memory required for data: 1224193500\nI0817 16:27:31.494148 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:27:31.494156 17344 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:27:31.494163 17344 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:27:31.494173 17344 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.494182 17344 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:27:31.494189 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.494194 17344 net.cpp:165] Memory required for data: 1228289500\nI0817 16:27:31.494199 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:27:31.494212 17344 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:27:31.494218 17344 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:27:31.494226 17344 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:27:31.494691 17344 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:27:31.494705 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.494710 17344 net.cpp:165] Memory required for data: 1232385500\nI0817 16:27:31.494719 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:27:31.494731 17344 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:27:31.494738 17344 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:27:31.494746 17344 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:27:31.495004 17344 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:27:31.495018 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.495023 17344 net.cpp:165] Memory required for data: 1236481500\nI0817 16:27:31.495066 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:27:31.495079 17344 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:27:31.495085 17344 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:27:31.495092 17344 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:27:31.495151 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:27:31.495301 17344 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:27:31.495314 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.495319 17344 net.cpp:165] Memory required for data: 1240577500\nI0817 16:27:31.495328 17344 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:27:31.495337 17344 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:27:31.495344 17344 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:27:31.495352 17344 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:27:31.495362 17344 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:27:31.495389 17344 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:27:31.495398 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.495404 17344 net.cpp:165] Memory required for data: 1244673500\nI0817 16:27:31.495409 17344 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:27:31.495419 17344 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:27:31.495424 17344 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:27:31.495431 17344 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:27:31.495440 17344 net.cpp:150] Setting up L2_b9_relu\nI0817 16:27:31.495447 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.495452 17344 net.cpp:165] Memory required for data: 1248769500\nI0817 16:27:31.495463 17344 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:27:31.495471 17344 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:27:31.495476 17344 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:27:31.495486 17344 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:27:31.495496 17344 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:27:31.495543 17344 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:27:31.495558 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.495564 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.495568 17344 net.cpp:165] Memory required for data: 1256961500\nI0817 16:27:31.495573 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:27:31.495585 17344 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:27:31.495591 17344 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:27:31.495600 17344 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:27:31.496078 17344 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:27:31.496093 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.496098 17344 net.cpp:165] Memory required for data: 1257985500\nI0817 16:27:31.496106 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:27:31.496119 17344 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:27:31.496125 17344 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:27:31.496134 17344 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:27:31.496397 17344 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:27:31.496410 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.496415 17344 net.cpp:165] Memory required for data: 1259009500\nI0817 16:27:31.496426 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:27:31.496438 17344 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:27:31.496444 17344 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:27:31.496451 17344 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.496508 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:27:31.496659 17344 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:27:31.496672 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.496677 17344 net.cpp:165] Memory required for data: 1260033500\nI0817 16:27:31.496686 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:27:31.496696 17344 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:27:31.496703 17344 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:27:31.496712 17344 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.496722 17344 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:27:31.496729 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.496734 17344 net.cpp:165] Memory required for data: 1261057500\nI0817 16:27:31.496738 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:27:31.496749 17344 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:27:31.496755 17344 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:27:31.496767 17344 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:27:31.497241 17344 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:27:31.497256 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.497262 17344 net.cpp:165] Memory required for data: 1262081500\nI0817 16:27:31.497270 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:27:31.497282 17344 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:27:31.497288 17344 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:27:31.497297 17344 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:27:31.497555 17344 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:27:31.497568 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.497579 17344 net.cpp:165] Memory required for data: 1263105500\nI0817 16:27:31.497591 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:27:31.497599 17344 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:27:31.497606 17344 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:27:31.497612 17344 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:27:31.497670 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:27:31.497834 17344 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:27:31.497850 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.497855 17344 net.cpp:165] Memory required for data: 1264129500\nI0817 16:27:31.497864 17344 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:27:31.497874 17344 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:27:31.497880 17344 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:27:31.497889 17344 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:27:31.497925 17344 net.cpp:150] Setting up L3_b1_pool\nI0817 16:27:31.497934 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.497939 17344 net.cpp:165] Memory required for data: 1265153500\nI0817 16:27:31.497944 17344 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:27:31.497953 17344 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:27:31.497959 17344 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:27:31.497966 17344 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:27:31.497972 17344 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:27:31.498006 17344 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:27:31.498016 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.498021 17344 net.cpp:165] Memory required for data: 1266177500\nI0817 16:27:31.498026 17344 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:27:31.498034 17344 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:27:31.498039 17344 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:27:31.498045 17344 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:27:31.498054 17344 net.cpp:150] Setting up L3_b1_relu\nI0817 16:27:31.498061 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.498066 17344 net.cpp:165] Memory required for data: 1267201500\nI0817 16:27:31.498070 17344 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:27:31.498080 17344 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:27:31.498090 17344 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:27:31.499300 17344 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:27:31.499317 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.499322 17344 net.cpp:165] Memory required for data: 1268225500\nI0817 16:27:31.499328 17344 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:27:31.499341 17344 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:27:31.499347 17344 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:27:31.499356 17344 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:27:31.499362 17344 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:27:31.499405 17344 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:27:31.499418 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.499423 17344 net.cpp:165] Memory required for data: 1270273500\nI0817 16:27:31.499428 17344 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:31.499435 17344 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:31.499441 17344 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:27:31.499451 17344 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:27:31.499461 17344 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:27:31.499511 17344 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:31.499526 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.499532 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.499544 17344 net.cpp:165] Memory required for data: 1274369500\nI0817 16:27:31.499550 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:27:31.499565 17344 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:27:31.499572 17344 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:27:31.499581 17344 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:27:31.501560 17344 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:27:31.501580 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.501586 17344 net.cpp:165] Memory required for data: 1276417500\nI0817 16:27:31.501595 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:27:31.501606 17344 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:27:31.501612 17344 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:27:31.501623 17344 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:27:31.501893 17344 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:27:31.501906 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.501911 17344 net.cpp:165] Memory required for data: 1278465500\nI0817 16:27:31.501921 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:27:31.501933 17344 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:27:31.501940 17344 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:27:31.501947 17344 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.502004 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:27:31.502159 17344 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:27:31.502172 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.502177 17344 net.cpp:165] Memory required for data: 1280513500\nI0817 16:27:31.502187 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:27:31.502194 17344 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:27:31.502200 17344 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:27:31.502210 17344 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.502220 17344 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:27:31.502228 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.502233 17344 net.cpp:165] Memory required for data: 1282561500\nI0817 16:27:31.502238 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:27:31.502251 17344 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:27:31.502257 17344 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:27:31.502266 17344 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:27:31.503293 17344 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:27:31.503309 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.503314 17344 net.cpp:165] Memory required for data: 1284609500\nI0817 16:27:31.503322 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:27:31.503334 17344 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:27:31.503341 17344 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:27:31.503350 17344 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:27:31.503615 17344 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:27:31.503629 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.503633 17344 net.cpp:165] Memory required for data: 1286657500\nI0817 16:27:31.503644 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:27:31.503654 17344 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:27:31.503659 17344 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:27:31.503666 17344 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:27:31.503726 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:27:31.503886 17344 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:27:31.503900 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.503904 17344 net.cpp:165] Memory required for data: 1288705500\nI0817 16:27:31.503913 17344 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:27:31.503931 17344 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:27:31.503938 17344 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:27:31.503945 17344 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:27:31.503955 17344 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:27:31.503990 17344 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:27:31.504005 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.504010 17344 net.cpp:165] Memory required for data: 1290753500\nI0817 16:27:31.504015 17344 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:27:31.504024 17344 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:27:31.504029 17344 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:27:31.504036 17344 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:27:31.504045 17344 net.cpp:150] Setting up L3_b2_relu\nI0817 16:27:31.504052 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.504056 17344 net.cpp:165] Memory required for data: 1292801500\nI0817 16:27:31.504061 17344 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:31.504071 17344 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:31.504077 17344 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:27:31.504084 17344 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:27:31.504094 17344 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:27:31.504143 17344 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:31.504155 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.504163 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.504166 17344 net.cpp:165] Memory required for data: 1296897500\nI0817 16:27:31.504171 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:27:31.504184 17344 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:27:31.504189 17344 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:27:31.504201 17344 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:27:31.505213 17344 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:27:31.505228 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.505233 17344 net.cpp:165] Memory required for data: 1298945500\nI0817 16:27:31.505241 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:27:31.505250 17344 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:27:31.505256 17344 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:27:31.505267 17344 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:27:31.505527 17344 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:27:31.505543 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.505549 17344 net.cpp:165] Memory required for data: 1300993500\nI0817 16:27:31.505559 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:27:31.505568 17344 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:27:31.505574 17344 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:27:31.505581 17344 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.505636 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:27:31.505792 17344 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:27:31.505810 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.505815 17344 net.cpp:165] Memory required for data: 1303041500\nI0817 16:27:31.505825 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:27:31.505833 17344 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:27:31.505839 17344 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:27:31.505849 17344 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.505861 17344 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:27:31.505867 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.505878 17344 net.cpp:165] Memory required for data: 1305089500\nI0817 16:27:31.505883 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:27:31.505898 17344 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:27:31.505904 17344 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:27:31.505913 17344 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:27:31.506939 17344 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:27:31.506953 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.506958 17344 net.cpp:165] Memory required for data: 1307137500\nI0817 16:27:31.506968 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:27:31.506979 17344 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:27:31.506986 17344 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:27:31.506999 17344 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:27:31.507259 17344 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:27:31.507272 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.507277 17344 net.cpp:165] Memory required for data: 1309185500\nI0817 16:27:31.507287 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:27:31.507295 17344 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:27:31.507302 17344 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:27:31.507309 17344 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:27:31.507367 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:27:31.507519 17344 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:27:31.507532 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.507537 17344 net.cpp:165] Memory required for data: 1311233500\nI0817 16:27:31.507546 17344 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:27:31.507555 17344 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:27:31.507561 17344 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:27:31.507568 17344 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:27:31.507580 17344 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:27:31.507613 17344 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:27:31.507627 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.507632 17344 net.cpp:165] Memory required for data: 1313281500\nI0817 16:27:31.507637 17344 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:27:31.507645 17344 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:27:31.507652 17344 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:27:31.507658 17344 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:27:31.507668 17344 net.cpp:150] Setting up L3_b3_relu\nI0817 16:27:31.507674 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.507678 17344 net.cpp:165] Memory required for data: 1315329500\nI0817 16:27:31.507683 17344 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:27:31.507694 17344 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:27:31.507699 17344 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:27:31.507705 17344 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:27:31.507715 17344 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:27:31.507763 17344 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:27:31.507776 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.507781 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.507786 17344 net.cpp:165] Memory required for data: 1319425500\nI0817 16:27:31.507791 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:27:31.507807 17344 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:27:31.507815 17344 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:27:31.507827 17344 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:27:31.508855 17344 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:27:31.508870 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.508875 17344 net.cpp:165] Memory required for data: 1321473500\nI0817 16:27:31.508884 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:27:31.508893 17344 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:27:31.508899 17344 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:27:31.508910 17344 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:27:31.509178 17344 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:27:31.509193 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.509198 17344 net.cpp:165] Memory required for data: 1323521500\nI0817 16:27:31.509208 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:27:31.509217 17344 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:27:31.509222 17344 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:27:31.509230 17344 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.509289 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:27:31.509443 17344 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:27:31.509455 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.509460 17344 net.cpp:165] Memory required for data: 1325569500\nI0817 16:27:31.509469 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:27:31.509479 17344 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:27:31.509486 17344 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:27:31.509493 17344 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.509503 17344 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:27:31.509510 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.509515 17344 net.cpp:165] Memory required for data: 1327617500\nI0817 16:27:31.509519 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:27:31.509532 17344 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:27:31.509539 17344 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:27:31.509547 17344 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:27:31.510603 17344 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:27:31.510618 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.510624 17344 net.cpp:165] Memory required for data: 1329665500\nI0817 16:27:31.510632 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:27:31.510644 17344 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:27:31.510651 17344 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:27:31.510663 17344 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:27:31.510936 17344 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:27:31.510951 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.510956 17344 net.cpp:165] Memory required for data: 1331713500\nI0817 16:27:31.510965 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:27:31.510973 17344 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:27:31.510980 17344 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:27:31.510990 17344 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:27:31.511049 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:27:31.511211 17344 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:27:31.511224 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.511229 17344 net.cpp:165] Memory required for data: 1333761500\nI0817 16:27:31.511237 17344 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:27:31.511246 17344 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:27:31.511253 17344 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:27:31.511260 17344 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:27:31.511271 17344 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:27:31.511307 17344 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:27:31.511323 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.511328 17344 net.cpp:165] Memory required for data: 1335809500\nI0817 16:27:31.511334 17344 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:27:31.511342 17344 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:27:31.511348 17344 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:27:31.511358 17344 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:27:31.511368 17344 net.cpp:150] Setting up L3_b4_relu\nI0817 16:27:31.511374 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.511379 17344 net.cpp:165] Memory required for data: 1337857500\nI0817 16:27:31.511384 17344 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:27:31.511390 17344 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:27:31.511396 17344 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:27:31.511404 17344 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:27:31.511412 17344 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:27:31.511462 17344 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:27:31.511473 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.511481 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.511484 17344 net.cpp:165] Memory required for data: 1341953500\nI0817 16:27:31.511489 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:27:31.511502 17344 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:27:31.511507 17344 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:27:31.511519 17344 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:27:31.512552 17344 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:27:31.512567 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.512572 17344 net.cpp:165] Memory required for data: 1344001500\nI0817 16:27:31.512580 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:27:31.512589 17344 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:27:31.512596 17344 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:27:31.512607 17344 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:27:31.513839 17344 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:27:31.513856 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.513862 17344 net.cpp:165] Memory required for data: 1346049500\nI0817 16:27:31.513875 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:27:31.513883 17344 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:27:31.513890 17344 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:27:31.513901 17344 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.513962 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:27:31.514120 17344 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:27:31.514132 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.514137 17344 net.cpp:165] Memory required for data: 1348097500\nI0817 16:27:31.514147 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:27:31.514155 17344 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:27:31.514161 17344 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:27:31.514173 17344 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.514183 17344 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:27:31.514189 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.514194 17344 net.cpp:165] Memory required for data: 1350145500\nI0817 16:27:31.514199 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:27:31.514214 17344 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:27:31.514219 17344 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:27:31.514227 17344 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:27:31.516240 17344 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:27:31.516258 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.516263 17344 net.cpp:165] Memory required for data: 1352193500\nI0817 16:27:31.516273 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:27:31.516286 17344 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:27:31.516294 17344 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:27:31.516304 17344 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:27:31.516564 17344 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:27:31.516577 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.516582 17344 net.cpp:165] Memory required for data: 1354241500\nI0817 16:27:31.516593 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:27:31.516602 17344 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:27:31.516608 17344 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:27:31.516618 17344 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:27:31.516675 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:27:31.516837 17344 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:27:31.516851 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.516856 17344 net.cpp:165] Memory required for data: 1356289500\nI0817 16:27:31.516865 17344 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:27:31.516875 17344 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:27:31.516881 17344 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:27:31.516888 17344 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:27:31.516899 17344 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:27:31.516935 17344 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:27:31.516947 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.516952 17344 net.cpp:165] Memory required for data: 1358337500\nI0817 16:27:31.516957 17344 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:27:31.516964 17344 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:27:31.516970 17344 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:27:31.516978 17344 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:27:31.516989 17344 net.cpp:150] Setting up L3_b5_relu\nI0817 16:27:31.516996 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.517001 17344 net.cpp:165] Memory required for data: 1360385500\nI0817 16:27:31.517005 17344 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:27:31.517014 17344 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:27:31.517019 17344 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:27:31.517025 17344 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:27:31.517035 17344 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:27:31.517082 17344 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:27:31.517093 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.517101 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.517104 17344 net.cpp:165] Memory required for data: 1364481500\nI0817 16:27:31.517109 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:27:31.517122 17344 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:27:31.517128 17344 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:27:31.517139 17344 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:27:31.518158 17344 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:27:31.518173 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.518178 17344 net.cpp:165] Memory required for data: 1366529500\nI0817 16:27:31.518187 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:27:31.518196 17344 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:27:31.518211 17344 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:27:31.518224 17344 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:27:31.518484 17344 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:27:31.518501 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.518506 17344 net.cpp:165] Memory required for data: 1368577500\nI0817 16:27:31.518515 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:27:31.518524 17344 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:27:31.518530 17344 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:27:31.518538 17344 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.518594 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:27:31.518750 17344 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:27:31.518764 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.518769 17344 net.cpp:165] Memory required for data: 1370625500\nI0817 16:27:31.518777 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:27:31.518788 17344 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:27:31.518795 17344 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:27:31.518807 17344 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.518818 17344 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:27:31.518826 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.518831 17344 net.cpp:165] Memory required for data: 1372673500\nI0817 16:27:31.518834 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:27:31.518848 17344 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:27:31.518856 17344 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:27:31.518864 17344 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:27:31.519882 17344 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:27:31.519897 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.519902 17344 net.cpp:165] Memory required for data: 1374721500\nI0817 16:27:31.519912 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:27:31.519925 17344 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:27:31.519932 17344 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:27:31.519943 17344 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:27:31.520205 17344 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:27:31.520218 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.520223 17344 net.cpp:165] Memory required for data: 1376769500\nI0817 16:27:31.520234 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:27:31.520242 17344 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:27:31.520248 17344 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:27:31.520259 17344 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:27:31.520316 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:27:31.520470 17344 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:27:31.520483 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.520488 17344 net.cpp:165] Memory required for data: 1378817500\nI0817 16:27:31.520498 17344 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:27:31.520506 17344 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:27:31.520512 17344 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:27:31.520519 17344 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:27:31.520530 17344 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:27:31.520566 17344 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:27:31.520577 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.520582 17344 net.cpp:165] Memory required for data: 1380865500\nI0817 16:27:31.520587 17344 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:27:31.520596 17344 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:27:31.520601 17344 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:27:31.520617 17344 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:27:31.520628 17344 net.cpp:150] Setting up L3_b6_relu\nI0817 16:27:31.520635 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.520640 17344 net.cpp:165] Memory required for data: 1382913500\nI0817 16:27:31.520645 17344 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:27:31.520653 17344 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:27:31.520658 17344 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:27:31.520665 17344 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:27:31.520675 17344 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:27:31.520725 17344 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:27:31.520735 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.520742 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.520746 17344 net.cpp:165] Memory required for data: 1387009500\nI0817 16:27:31.520751 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:27:31.520762 17344 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:27:31.520769 17344 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:27:31.520781 17344 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:27:31.521809 17344 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:27:31.521824 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.521829 17344 net.cpp:165] Memory required for data: 1389057500\nI0817 16:27:31.521838 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:27:31.521847 17344 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:27:31.521853 17344 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:27:31.521865 17344 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:27:31.522130 17344 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:27:31.522142 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.522147 17344 net.cpp:165] Memory required for data: 1391105500\nI0817 16:27:31.522157 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:27:31.522166 17344 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:27:31.522172 17344 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:27:31.522179 17344 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.522238 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:27:31.522392 17344 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:27:31.522405 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.522410 17344 net.cpp:165] Memory required for data: 1393153500\nI0817 16:27:31.522419 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:27:31.522454 17344 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:27:31.522462 17344 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:27:31.522471 17344 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.522481 17344 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:27:31.522488 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.522493 17344 net.cpp:165] Memory required for data: 1395201500\nI0817 16:27:31.522498 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:27:31.522511 17344 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:27:31.522516 17344 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:27:31.522528 17344 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:27:31.523556 17344 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:27:31.523571 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.523576 17344 net.cpp:165] Memory required for data: 1397249500\nI0817 16:27:31.523586 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:27:31.523594 17344 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:27:31.523607 17344 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:27:31.523619 17344 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:27:31.523895 17344 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:27:31.523912 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.523917 17344 net.cpp:165] Memory required for data: 1399297500\nI0817 16:27:31.523927 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:27:31.523936 17344 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:27:31.523942 17344 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:27:31.523950 17344 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:27:31.524008 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:27:31.524161 17344 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:27:31.524174 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.524179 17344 net.cpp:165] Memory required for data: 1401345500\nI0817 16:27:31.524188 17344 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:27:31.524199 17344 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:27:31.524206 17344 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:27:31.524214 17344 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:27:31.524221 17344 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:27:31.524257 17344 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:27:31.524269 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.524274 17344 net.cpp:165] Memory required for data: 1403393500\nI0817 16:27:31.524279 17344 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:27:31.524286 17344 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:27:31.524292 17344 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:27:31.524299 17344 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:27:31.524308 17344 net.cpp:150] Setting up L3_b7_relu\nI0817 16:27:31.524315 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.524320 17344 net.cpp:165] Memory required for data: 1405441500\nI0817 16:27:31.524324 17344 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:27:31.524332 17344 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:27:31.524336 17344 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:27:31.524346 17344 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:27:31.524356 17344 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:27:31.524401 17344 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:27:31.524412 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.524418 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.524423 17344 net.cpp:165] Memory required for data: 1409537500\nI0817 16:27:31.524428 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:27:31.524442 17344 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:27:31.524449 17344 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:27:31.524458 17344 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:27:31.525481 17344 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:27:31.525496 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.525501 17344 net.cpp:165] Memory required for data: 1411585500\nI0817 16:27:31.525511 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:27:31.525522 17344 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:27:31.525528 17344 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:27:31.525537 17344 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:27:31.525799 17344 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:27:31.525817 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.525822 17344 net.cpp:165] Memory required for data: 1413633500\nI0817 16:27:31.525840 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:27:31.525852 17344 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:27:31.525859 17344 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:27:31.525867 17344 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.525933 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:27:31.526090 17344 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:27:31.526103 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.526108 17344 net.cpp:165] Memory required for data: 1415681500\nI0817 16:27:31.526116 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:27:31.526127 17344 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:27:31.526134 17344 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:27:31.526141 17344 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.526150 17344 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:27:31.526157 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.526162 17344 net.cpp:165] Memory required for data: 1417729500\nI0817 16:27:31.526167 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:27:31.526180 17344 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:27:31.526187 17344 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:27:31.526197 17344 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:27:31.527221 17344 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:27:31.527235 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.527240 17344 net.cpp:165] Memory required for data: 1419777500\nI0817 16:27:31.527249 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:27:31.527258 17344 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:27:31.527264 17344 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:27:31.527276 17344 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:27:31.527539 17344 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:27:31.527554 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.527559 17344 net.cpp:165] Memory required for data: 1421825500\nI0817 16:27:31.527570 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:27:31.527577 17344 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:27:31.527583 17344 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:27:31.527591 17344 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:27:31.527648 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:27:31.527813 17344 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:27:31.527827 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.527832 17344 net.cpp:165] Memory required for data: 1423873500\nI0817 16:27:31.527842 17344 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:27:31.527853 17344 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:27:31.527859 17344 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:27:31.527868 17344 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:27:31.527874 17344 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:27:31.527910 17344 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:27:31.527920 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.527925 17344 net.cpp:165] Memory required for data: 1425921500\nI0817 16:27:31.527930 17344 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:27:31.527938 17344 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:27:31.527943 17344 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:27:31.527951 17344 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:27:31.527959 17344 net.cpp:150] Setting up L3_b8_relu\nI0817 16:27:31.527966 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.527971 17344 net.cpp:165] Memory required for data: 1427969500\nI0817 16:27:31.527976 17344 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:27:31.527992 17344 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:27:31.527998 17344 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:27:31.528008 17344 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:27:31.528018 17344 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:27:31.528064 17344 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:27:31.528076 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.528082 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.528087 17344 net.cpp:165] Memory required for data: 1432065500\nI0817 16:27:31.528092 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:27:31.528108 17344 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:27:31.528115 17344 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:27:31.528125 17344 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:27:31.530122 17344 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:27:31.530139 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.530144 17344 net.cpp:165] Memory required for data: 1434113500\nI0817 16:27:31.530153 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:27:31.530166 17344 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:27:31.530174 17344 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:27:31.530182 17344 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:27:31.530448 17344 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:27:31.530462 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.530467 17344 net.cpp:165] Memory required for data: 1436161500\nI0817 16:27:31.530477 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:27:31.530485 17344 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:27:31.530493 17344 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:27:31.530499 17344 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.530560 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:27:31.530716 17344 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:27:31.530730 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.530735 17344 net.cpp:165] Memory required for data: 1438209500\nI0817 16:27:31.530743 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:27:31.530752 17344 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:27:31.530758 17344 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:27:31.530766 17344 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.530776 17344 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:27:31.530782 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.530786 17344 net.cpp:165] Memory required for data: 1440257500\nI0817 16:27:31.530791 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:27:31.530812 17344 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:27:31.530818 17344 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:27:31.530830 17344 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:27:31.531867 17344 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:27:31.531882 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.531886 17344 net.cpp:165] Memory required for data: 1442305500\nI0817 16:27:31.531895 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:27:31.531908 17344 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:27:31.531913 17344 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:27:31.531922 17344 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:27:31.532184 17344 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:27:31.532197 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.532202 17344 net.cpp:165] Memory required for data: 1444353500\nI0817 16:27:31.532222 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:27:31.532233 17344 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:27:31.532239 17344 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:27:31.532248 17344 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:27:31.532307 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:27:31.532461 17344 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:27:31.532474 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.532479 17344 net.cpp:165] Memory required for data: 1446401500\nI0817 16:27:31.532488 17344 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:27:31.532500 17344 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:27:31.532507 17344 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:27:31.532515 17344 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:27:31.532524 17344 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:27:31.532557 17344 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:27:31.532568 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.532573 17344 net.cpp:165] Memory required for data: 1448449500\nI0817 16:27:31.532578 17344 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:27:31.532591 17344 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:27:31.532598 17344 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:27:31.532604 17344 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:27:31.532613 17344 net.cpp:150] Setting up L3_b9_relu\nI0817 16:27:31.532620 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.532625 17344 net.cpp:165] Memory required for data: 1450497500\nI0817 16:27:31.532630 17344 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:27:31.532639 17344 net.cpp:100] Creating Layer post_pool\nI0817 16:27:31.532644 17344 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:27:31.532651 17344 net.cpp:408] post_pool -> post_pool\nI0817 16:27:31.532685 17344 net.cpp:150] Setting up post_pool\nI0817 16:27:31.532698 17344 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:27:31.532703 17344 net.cpp:165] Memory required for data: 1450529500\nI0817 16:27:31.532708 17344 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:27:31.532793 17344 net.cpp:100] Creating Layer post_FC\nI0817 16:27:31.532812 17344 net.cpp:434] post_FC <- post_pool\nI0817 16:27:31.532822 17344 net.cpp:408] post_FC -> post_FC_top\nI0817 16:27:31.533062 17344 net.cpp:150] Setting up post_FC\nI0817 16:27:31.533077 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:31.533083 17344 net.cpp:165] Memory required for data: 1450534500\nI0817 16:27:31.533092 17344 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:27:31.533102 17344 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:27:31.533107 17344 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:27:31.533115 17344 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:27:31.533128 17344 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:27:31.533175 17344 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:27:31.533186 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:31.533193 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:31.533197 17344 net.cpp:165] Memory required for data: 1450544500\nI0817 16:27:31.533203 17344 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:27:31.533249 17344 net.cpp:100] Creating Layer accuracy\nI0817 16:27:31.533262 17344 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:27:31.533269 17344 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:27:31.533277 17344 net.cpp:408] accuracy -> accuracy\nI0817 16:27:31.533318 17344 net.cpp:150] Setting up accuracy\nI0817 16:27:31.533331 17344 net.cpp:157] Top shape: (1)\nI0817 16:27:31.533336 17344 net.cpp:165] Memory required for data: 1450544504\nI0817 16:27:31.533349 17344 layer_factory.hpp:77] Creating layer loss\nI0817 16:27:31.533359 17344 net.cpp:100] Creating Layer loss\nI0817 16:27:31.533365 17344 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:27:31.533371 17344 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:27:31.533380 17344 net.cpp:408] loss -> loss\nI0817 16:27:31.533426 17344 layer_factory.hpp:77] Creating layer loss\nI0817 16:27:31.533584 17344 net.cpp:150] Setting up loss\nI0817 16:27:31.533601 17344 net.cpp:157] Top shape: (1)\nI0817 16:27:31.533607 17344 net.cpp:160]     with loss weight 1\nI0817 16:27:31.533680 17344 net.cpp:165] Memory required for data: 1450544508\nI0817 16:27:31.533689 17344 net.cpp:226] loss needs backward computation.\nI0817 16:27:31.533695 17344 net.cpp:228] accuracy does not need backward computation.\nI0817 16:27:31.533702 17344 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:27:31.533707 17344 net.cpp:226] post_FC needs backward computation.\nI0817 16:27:31.533712 17344 net.cpp:226] post_pool needs backward computation.\nI0817 16:27:31.533717 17344 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:27:31.533722 17344 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:27:31.533727 17344 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:27:31.533732 17344 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:27:31.533737 17344 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:27:31.533743 17344 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:27:31.533748 17344 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:27:31.533752 17344 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:27:31.533757 17344 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:27:31.533763 17344 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:27:31.533768 17344 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:27:31.533773 17344 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:27:31.533779 17344 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:27:31.533783 17344 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:27:31.533789 17344 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:27:31.533794 17344 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:27:31.533799 17344 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:27:31.533812 17344 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:27:31.533818 17344 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:27:31.533823 17344 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:27:31.533828 17344 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:27:31.533833 17344 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:27:31.533838 17344 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:27:31.533843 17344 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:27:31.533849 17344 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:27:31.533854 17344 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:27:31.533859 17344 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:27:31.533864 17344 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:27:31.533869 17344 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:27:31.533874 17344 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:27:31.533879 17344 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:27:31.533885 17344 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:27:31.533890 17344 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:27:31.533895 17344 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:27:31.533901 17344 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:27:31.533913 17344 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:27:31.533920 17344 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:27:31.533924 17344 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:27:31.533931 17344 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:27:31.533936 17344 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:27:31.533941 17344 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:27:31.533946 17344 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:27:31.533951 17344 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:27:31.533957 17344 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:27:31.533962 17344 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:27:31.533967 17344 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:27:31.533972 17344 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:27:31.533977 17344 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:27:31.533983 17344 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:27:31.533988 17344 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:27:31.533993 17344 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:27:31.533998 17344 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:27:31.534004 17344 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:27:31.534009 17344 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:27:31.534015 17344 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:27:31.534020 17344 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:27:31.534025 17344 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:27:31.534030 17344 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:27:31.534035 17344 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:27:31.534044 17344 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:27:31.534049 17344 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:27:31.534054 17344 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:27:31.534060 17344 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:27:31.534065 17344 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:27:31.534070 17344 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:27:31.534075 17344 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:27:31.534080 17344 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:27:31.534085 17344 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:27:31.534090 17344 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:27:31.534096 17344 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:27:31.534101 17344 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:27:31.534106 17344 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:27:31.534112 17344 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:27:31.534117 17344 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:27:31.534122 17344 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:27:31.534127 17344 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:27:31.534132 17344 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:27:31.534138 17344 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:27:31.534143 17344 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:27:31.534148 17344 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:27:31.534153 17344 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:27:31.534160 17344 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:27:31.534170 17344 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:27:31.534176 17344 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:27:31.534183 17344 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:27:31.534188 17344 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:27:31.534193 17344 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:27:31.534198 17344 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:27:31.534204 17344 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:27:31.534209 17344 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:27:31.534214 17344 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:27:31.534219 17344 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:27:31.534224 17344 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:27:31.534230 17344 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:27:31.534235 17344 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:27:31.534241 17344 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:27:31.534246 17344 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:27:31.534252 17344 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:27:31.534257 17344 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:27:31.534263 17344 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:27:31.534268 17344 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:27:31.534273 17344 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:27:31.534279 17344 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:27:31.534284 17344 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:27:31.534289 17344 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:27:31.534296 17344 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:27:31.534301 17344 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:27:31.534307 17344 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:27:31.534312 17344 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:27:31.534320 17344 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:27:31.534325 17344 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:27:31.534330 17344 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:27:31.534337 17344 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:27:31.534342 17344 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:27:31.534348 17344 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:27:31.534353 17344 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:27:31.534358 17344 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:27:31.534363 17344 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:27:31.534369 17344 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:27:31.534374 17344 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:27:31.534379 17344 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:27:31.534384 17344 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:27:31.534390 17344 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:27:31.534395 17344 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:27:31.534400 17344 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:27:31.534406 17344 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:27:31.534412 17344 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:27:31.534417 17344 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:27:31.534422 17344 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:27:31.534427 17344 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:27:31.534437 17344 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:27:31.534443 17344 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:27:31.534449 17344 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:27:31.534454 17344 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:27:31.534461 17344 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:27:31.534466 17344 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:27:31.534471 17344 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:27:31.534476 17344 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:27:31.534482 17344 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:27:31.534487 17344 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:27:31.534492 17344 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:27:31.534497 17344 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:27:31.534503 17344 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:27:31.534508 17344 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:27:31.534514 17344 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:27:31.534519 17344 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:27:31.534525 17344 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:27:31.534530 17344 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:27:31.534535 17344 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:27:31.534541 17344 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:27:31.534546 17344 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:27:31.534551 17344 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:27:31.534557 17344 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:27:31.534564 17344 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:27:31.534569 17344 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:27:31.534574 17344 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:27:31.534579 17344 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:27:31.534585 17344 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:27:31.534590 17344 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:27:31.534596 17344 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:27:31.534601 17344 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:27:31.534606 17344 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:27:31.534612 17344 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:27:31.534618 17344 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:27:31.534623 17344 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:27:31.534629 17344 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:27:31.534634 17344 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:27:31.534641 17344 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:27:31.534646 17344 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:27:31.534651 17344 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:27:31.534657 17344 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:27:31.534662 17344 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:27:31.534667 17344 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:27:31.534672 17344 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:27:31.534679 17344 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:27:31.534684 17344 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:27:31.534690 17344 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:27:31.534703 17344 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:27:31.534710 17344 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:27:31.534716 17344 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:27:31.534721 17344 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:27:31.534728 17344 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:27:31.534734 17344 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:27:31.534739 17344 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:27:31.534744 17344 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:27:31.534750 17344 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:27:31.534755 17344 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:27:31.534761 17344 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:27:31.534767 17344 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:27:31.534772 17344 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:27:31.534778 17344 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:27:31.534783 17344 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:27:31.534790 17344 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:27:31.534795 17344 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:27:31.534801 17344 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:27:31.534811 17344 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:27:31.534817 17344 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:27:31.534823 17344 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:27:31.534829 17344 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:27:31.534834 17344 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:27:31.534840 17344 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:27:31.534847 17344 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:27:31.534852 17344 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:27:31.534857 17344 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:27:31.534863 17344 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:27:31.534869 17344 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:27:31.534874 17344 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:27:31.534880 17344 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:27:31.534886 17344 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:27:31.534891 17344 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:27:31.534898 17344 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:27:31.534904 17344 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:27:31.534909 17344 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:27:31.534914 17344 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:27:31.534919 17344 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:27:31.534925 17344 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:27:31.534930 17344 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:27:31.534936 17344 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:27:31.534942 17344 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:27:31.534947 17344 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:27:31.534953 17344 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:27:31.534960 17344 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:27:31.534965 17344 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:27:31.534970 17344 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:27:31.534981 17344 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:27:31.534987 17344 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:27:31.534993 17344 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:27:31.534999 17344 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:27:31.535006 17344 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:27:31.535012 17344 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:27:31.535017 17344 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:27:31.535022 17344 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:27:31.535028 17344 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:27:31.535033 17344 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:27:31.535039 17344 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:27:31.535044 17344 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:27:31.535050 17344 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:27:31.535055 17344 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:27:31.535063 17344 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:27:31.535068 17344 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:27:31.535073 17344 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:27:31.535079 17344 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:27:31.535084 17344 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:27:31.535090 17344 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:27:31.535096 17344 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:27:31.535101 17344 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:27:31.535107 17344 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:27:31.535112 17344 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:27:31.535120 17344 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:27:31.535125 17344 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:27:31.535130 17344 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:27:31.535136 17344 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:27:31.535141 17344 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:27:31.535147 17344 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:27:31.535152 17344 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:27:31.535158 17344 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:27:31.535164 17344 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:27:31.535171 17344 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:27:31.535176 17344 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:27:31.535181 17344 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:27:31.535187 17344 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:27:31.535193 17344 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:27:31.535199 17344 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:27:31.535204 17344 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:27:31.535210 17344 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:27:31.535217 17344 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:27:31.535221 17344 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:27:31.535228 17344 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:27:31.535233 17344 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:27:31.535239 17344 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:27:31.535244 17344 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:27:31.535255 17344 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:27:31.535261 17344 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:27:31.535267 17344 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:27:31.535274 17344 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:27:31.535279 17344 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:27:31.535284 17344 net.cpp:226] pre_relu needs backward computation.\nI0817 16:27:31.535290 17344 net.cpp:226] pre_scale needs backward computation.\nI0817 16:27:31.535295 17344 net.cpp:226] pre_bn needs backward computation.\nI0817 16:27:31.535300 17344 net.cpp:226] pre_conv needs backward computation.\nI0817 16:27:31.535307 17344 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:27:31.535315 17344 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:27:31.535318 17344 net.cpp:270] This network produces output accuracy\nI0817 16:27:31.535326 17344 net.cpp:270] This network produces output loss\nI0817 16:27:31.535691 17344 net.cpp:283] Network initialization done.\nI0817 16:27:31.544924 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:31.544961 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:31.545011 17344 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:27:31.545388 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:27:31.545406 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:27:31.545418 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:27:31.545426 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:27:31.545435 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:27:31.545444 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:27:31.545454 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:27:31.545462 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:27:31.545471 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:27:31.545480 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:27:31.545490 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:27:31.545497 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:27:31.545506 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:27:31.545514 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:27:31.545523 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:27:31.545533 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:27:31.545542 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:27:31.545550 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:27:31.545559 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:27:31.545578 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:27:31.545588 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:27:31.545595 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:27:31.545608 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:27:31.545616 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:27:31.545625 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:27:31.545634 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:27:31.545642 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:27:31.545650 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:27:31.545658 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:27:31.545667 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:27:31.545676 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:27:31.545686 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:27:31.545694 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:27:31.545701 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:27:31.545711 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:27:31.545718 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:27:31.545727 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:27:31.545737 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:27:31.545744 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:27:31.545753 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:27:31.545764 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:27:31.545773 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:27:31.545781 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:27:31.545789 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:27:31.545799 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:27:31.545816 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:27:31.545825 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:27:31.545833 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:27:31.545842 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:27:31.545859 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:27:31.545868 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:27:31.545876 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:27:31.545886 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:27:31.545894 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:27:31.545903 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:27:31.545910 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:27:31.547565 17344 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0817 16:27:31.549173 17344 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:27:31.549957 17344 net.cpp:100] Creating Layer dataLayer\nI0817 16:27:31.549975 17344 net.cpp:408] dataLayer -> data_top\nI0817 16:27:31.549995 17344 net.cpp:408] dataLayer -> label\nI0817 16:27:31.550009 17344 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:27:31.560428 17351 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:27:31.560720 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:31.567989 17344 net.cpp:150] Setting up dataLayer\nI0817 16:27:31.568015 17344 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:27:31.568024 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:31.568029 17344 net.cpp:165] Memory required for data: 1536500\nI0817 16:27:31.568037 17344 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:27:31.568048 17344 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:27:31.568055 17344 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:27:31.568063 17344 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:27:31.568075 17344 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:27:31.568145 17344 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:27:31.568161 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:31.568167 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:27:31.568171 17344 net.cpp:165] Memory required for data: 1537500\nI0817 16:27:31.568177 17344 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:27:31.568197 17344 net.cpp:100] Creating Layer pre_conv\nI0817 16:27:31.568205 17344 net.cpp:434] pre_conv <- data_top\nI0817 16:27:31.568218 17344 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:27:31.568711 17344 net.cpp:150] Setting up pre_conv\nI0817 16:27:31.568734 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.568740 17344 net.cpp:165] Memory required for data: 9729500\nI0817 16:27:31.568768 17344 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:27:31.568783 17344 net.cpp:100] Creating Layer pre_bn\nI0817 16:27:31.568789 17344 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:27:31.568797 17344 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:27:31.569238 17344 net.cpp:150] Setting up pre_bn\nI0817 16:27:31.569254 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.569259 17344 net.cpp:165] Memory required for data: 17921500\nI0817 16:27:31.569278 17344 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:27:31.569288 17344 net.cpp:100] Creating Layer pre_scale\nI0817 16:27:31.569294 17344 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:27:31.569305 17344 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:27:31.569371 17344 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:27:31.569598 17344 net.cpp:150] Setting up pre_scale\nI0817 16:27:31.569614 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.569620 17344 net.cpp:165] Memory required for data: 26113500\nI0817 16:27:31.569630 17344 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:27:31.569638 17344 net.cpp:100] Creating Layer pre_relu\nI0817 16:27:31.569644 17344 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:27:31.569651 17344 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:27:31.569663 17344 net.cpp:150] Setting up pre_relu\nI0817 16:27:31.569670 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.569674 17344 net.cpp:165] Memory required for data: 34305500\nI0817 16:27:31.569679 17344 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:27:31.569700 17344 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:27:31.569707 17344 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:27:31.569718 17344 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:27:31.569728 17344 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:27:31.569787 17344 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:27:31.569799 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.569813 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.569818 17344 net.cpp:165] Memory required for data: 50689500\nI0817 16:27:31.569823 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:27:31.569836 17344 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:27:31.569842 17344 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:27:31.569854 17344 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:27:31.570261 17344 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:27:31.570277 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.570283 17344 net.cpp:165] Memory required for data: 58881500\nI0817 16:27:31.570296 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:27:31.570308 17344 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:27:31.570314 17344 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:27:31.570323 17344 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:27:31.570713 17344 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:27:31.570729 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.570734 17344 net.cpp:165] Memory required for data: 67073500\nI0817 16:27:31.570749 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:27:31.570757 17344 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:27:31.570763 17344 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:27:31.570771 17344 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.570958 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:27:31.571144 17344 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:27:31.571158 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.571172 17344 net.cpp:165] Memory required for data: 75265500\nI0817 16:27:31.571183 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:27:31.571195 17344 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:27:31.571202 17344 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:27:31.571220 17344 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.571231 17344 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:27:31.571239 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.571244 17344 net.cpp:165] Memory required for data: 83457500\nI0817 16:27:31.571249 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:27:31.571264 17344 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:27:31.571269 17344 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:27:31.571283 17344 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:27:31.571719 17344 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:27:31.571734 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.571739 17344 net.cpp:165] Memory required for data: 91649500\nI0817 16:27:31.571751 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:27:31.571763 17344 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:27:31.571768 17344 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:27:31.571775 17344 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:27:31.572103 17344 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:27:31.572119 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.572124 17344 net.cpp:165] Memory required for data: 99841500\nI0817 16:27:31.572144 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:27:31.572152 17344 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:27:31.572158 17344 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:27:31.572170 17344 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:27:31.572237 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:27:31.572425 17344 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:27:31.572440 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.572445 17344 net.cpp:165] Memory required for data: 108033500\nI0817 16:27:31.572455 17344 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:27:31.572468 17344 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:27:31.572474 17344 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:27:31.572480 17344 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:27:31.572487 17344 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:27:31.572531 17344 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:27:31.572541 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.572546 17344 net.cpp:165] Memory required for data: 116225500\nI0817 16:27:31.572551 17344 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:27:31.572559 17344 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:27:31.572567 17344 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:27:31.572578 17344 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:27:31.572587 17344 net.cpp:150] Setting up L1_b1_relu\nI0817 16:27:31.572594 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.572599 17344 net.cpp:165] Memory required for data: 124417500\nI0817 16:27:31.572603 17344 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:31.572613 17344 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:31.572618 17344 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:27:31.572624 17344 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:27:31.572636 17344 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:27:31.572692 17344 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:27:31.572713 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.572721 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.572726 17344 net.cpp:165] Memory required for data: 140801500\nI0817 16:27:31.572731 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:27:31.572746 17344 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:27:31.572752 17344 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:27:31.572764 17344 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:27:31.573174 17344 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:27:31.573191 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.573196 17344 net.cpp:165] Memory required for data: 148993500\nI0817 16:27:31.573205 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:27:31.573215 17344 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:27:31.573220 17344 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:27:31.573232 17344 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:27:31.573551 17344 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:27:31.573570 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.573575 17344 net.cpp:165] Memory required for data: 157185500\nI0817 16:27:31.573585 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:27:31.573596 17344 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:27:31.573601 17344 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:27:31.573608 17344 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.573678 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:27:31.573930 17344 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:27:31.573945 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.573951 17344 net.cpp:165] Memory required for data: 165377500\nI0817 16:27:31.573962 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:27:31.573974 17344 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:27:31.573981 17344 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:27:31.573989 17344 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.574000 17344 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:27:31.574007 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.574012 17344 net.cpp:165] Memory required for data: 173569500\nI0817 16:27:31.574017 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:27:31.574033 17344 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:27:31.574039 17344 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:27:31.574050 17344 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:27:31.574466 17344 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:27:31.574483 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.574488 17344 net.cpp:165] Memory required for data: 181761500\nI0817 16:27:31.574498 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:27:31.574509 17344 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:27:31.574515 17344 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:27:31.574527 17344 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:27:31.574847 17344 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:27:31.574861 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.574868 17344 net.cpp:165] Memory required for data: 189953500\nI0817 16:27:31.574888 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:27:31.574898 17344 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:27:31.574903 17344 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:27:31.574910 17344 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:27:31.574985 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:27:31.575166 17344 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:27:31.575179 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.575184 17344 net.cpp:165] Memory required for data: 198145500\nI0817 16:27:31.575201 17344 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:27:31.575212 17344 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:27:31.575219 17344 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:27:31.575227 17344 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:27:31.575238 17344 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:27:31.575278 17344 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:27:31.575290 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.575295 17344 net.cpp:165] Memory required for data: 206337500\nI0817 16:27:31.575300 17344 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:27:31.575311 17344 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:27:31.575317 17344 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:27:31.575325 17344 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:27:31.575333 17344 net.cpp:150] Setting up L1_b2_relu\nI0817 16:27:31.575340 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.575345 17344 net.cpp:165] Memory required for data: 214529500\nI0817 16:27:31.575350 17344 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:31.575361 17344 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:31.575366 17344 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:27:31.575372 17344 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:27:31.575382 17344 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:27:31.575441 17344 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:27:31.575451 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.575458 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.575462 17344 net.cpp:165] Memory required for data: 230913500\nI0817 16:27:31.575467 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:27:31.575480 17344 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:27:31.575489 17344 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:27:31.575500 17344 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:27:31.575898 17344 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:27:31.575913 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.575918 17344 net.cpp:165] Memory required for data: 239105500\nI0817 16:27:31.575927 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:27:31.575947 17344 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:27:31.575953 17344 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:27:31.575961 17344 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:27:31.576279 17344 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:27:31.576297 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.576301 17344 net.cpp:165] Memory required for data: 247297500\nI0817 16:27:31.576313 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:27:31.576323 17344 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:27:31.576329 17344 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:27:31.576337 17344 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.576406 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:27:31.576586 17344 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:27:31.576601 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.576606 17344 net.cpp:165] Memory required for data: 255489500\nI0817 16:27:31.576616 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:27:31.576623 17344 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:27:31.576632 17344 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:27:31.576643 17344 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.576660 17344 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:27:31.576668 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.576673 17344 net.cpp:165] Memory required for data: 263681500\nI0817 16:27:31.576678 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:27:31.576694 17344 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:27:31.576701 17344 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:27:31.576710 17344 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:27:31.577353 17344 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:27:31.577368 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.577376 17344 net.cpp:165] Memory required for data: 271873500\nI0817 16:27:31.577385 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:27:31.577401 17344 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:27:31.577409 17344 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:27:31.577416 17344 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:27:31.577745 17344 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:27:31.577759 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.577764 17344 net.cpp:165] Memory required for data: 280065500\nI0817 16:27:31.577775 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:27:31.577783 17344 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:27:31.577790 17344 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:27:31.577800 17344 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:27:31.577875 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:27:31.578060 17344 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:27:31.578075 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.578081 17344 net.cpp:165] Memory required for data: 288257500\nI0817 16:27:31.578090 17344 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:27:31.578099 17344 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:27:31.578105 17344 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:27:31.578114 17344 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:27:31.578127 17344 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:27:31.578166 17344 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:27:31.578176 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.578181 17344 net.cpp:165] Memory required for data: 296449500\nI0817 16:27:31.578186 17344 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:27:31.578197 17344 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:27:31.578202 17344 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:27:31.578209 17344 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:27:31.578222 17344 net.cpp:150] Setting up L1_b3_relu\nI0817 16:27:31.578229 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.578233 17344 net.cpp:165] Memory required for data: 304641500\nI0817 16:27:31.578238 17344 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:31.578245 17344 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:31.578253 17344 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:27:31.578261 17344 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:27:31.578270 17344 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:27:31.578326 17344 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:27:31.578338 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.578347 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.578352 17344 net.cpp:165] Memory required for data: 321025500\nI0817 16:27:31.578358 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:27:31.578373 17344 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:27:31.578389 17344 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:27:31.578400 17344 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:27:31.578779 17344 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:27:31.578794 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.578799 17344 net.cpp:165] Memory required for data: 329217500\nI0817 16:27:31.578814 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:27:31.578825 17344 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:27:31.578832 17344 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:27:31.578840 17344 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:27:31.579115 17344 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:27:31.579131 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.579138 17344 net.cpp:165] Memory required for data: 337409500\nI0817 16:27:31.579149 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:27:31.579157 17344 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:27:31.579162 17344 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:27:31.579170 17344 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.579227 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:27:31.579385 17344 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:27:31.579399 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.579404 17344 net.cpp:165] Memory required for data: 345601500\nI0817 16:27:31.579412 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:27:31.579424 17344 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:27:31.579430 17344 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:27:31.579440 17344 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.579449 17344 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:27:31.579457 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.579461 17344 net.cpp:165] Memory required for data: 353793500\nI0817 16:27:31.579466 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:27:31.579478 17344 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:27:31.579483 17344 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:27:31.579493 17344 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:27:31.579851 17344 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:27:31.579865 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.579870 17344 net.cpp:165] Memory required for data: 361985500\nI0817 16:27:31.579880 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:27:31.579890 17344 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:27:31.579895 17344 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:27:31.579906 17344 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:27:31.580219 17344 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:27:31.580234 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.580238 17344 net.cpp:165] Memory required for data: 370177500\nI0817 16:27:31.580250 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:27:31.580258 17344 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:27:31.580265 17344 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:27:31.580271 17344 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:27:31.580332 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:27:31.580487 17344 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:27:31.580502 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.580505 17344 net.cpp:165] Memory required for data: 378369500\nI0817 16:27:31.580514 17344 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:27:31.580523 17344 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:27:31.580529 17344 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:27:31.580536 17344 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:27:31.580554 17344 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:27:31.580588 17344 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:27:31.580597 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.580602 17344 net.cpp:165] Memory required for data: 386561500\nI0817 16:27:31.580607 17344 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:27:31.580618 17344 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:27:31.580624 17344 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:27:31.580631 17344 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:27:31.580639 17344 net.cpp:150] Setting up L1_b4_relu\nI0817 16:27:31.580646 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.580651 17344 net.cpp:165] Memory required for data: 394753500\nI0817 16:27:31.580655 17344 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:27:31.580662 17344 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:27:31.580667 17344 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:27:31.580674 17344 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:27:31.580684 17344 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:27:31.580732 17344 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:27:31.580744 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.580750 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.580755 17344 net.cpp:165] Memory required for data: 411137500\nI0817 16:27:31.580760 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:27:31.580775 17344 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:27:31.580781 17344 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:27:31.580790 17344 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:27:31.581147 17344 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:27:31.581162 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.581167 17344 net.cpp:165] Memory required for data: 419329500\nI0817 16:27:31.581189 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:27:31.581202 17344 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:27:31.581207 17344 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:27:31.581218 17344 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:27:31.581493 17344 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:27:31.581506 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.581511 17344 net.cpp:165] Memory required for data: 427521500\nI0817 16:27:31.581521 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:27:31.581531 17344 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:27:31.581537 17344 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:27:31.581543 17344 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.581604 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:27:31.581760 17344 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:27:31.581773 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.581779 17344 net.cpp:165] Memory required for data: 435713500\nI0817 16:27:31.581787 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:27:31.581794 17344 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:27:31.581800 17344 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:27:31.581816 17344 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.581826 17344 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:27:31.581833 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.581838 17344 net.cpp:165] Memory required for data: 443905500\nI0817 16:27:31.581843 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:27:31.581861 17344 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:27:31.581867 17344 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:27:31.581878 17344 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:27:31.582244 17344 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:27:31.582259 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.582264 17344 net.cpp:165] Memory required for data: 452097500\nI0817 16:27:31.582273 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:27:31.582283 17344 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:27:31.582288 17344 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:27:31.582299 17344 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:27:31.582578 17344 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:27:31.582594 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.582599 17344 net.cpp:165] Memory required for data: 460289500\nI0817 16:27:31.582610 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:27:31.582618 17344 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:27:31.582624 17344 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:27:31.582631 17344 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:27:31.582690 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:27:31.582859 17344 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:27:31.582872 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.582877 17344 net.cpp:165] Memory required for data: 468481500\nI0817 16:27:31.582886 17344 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:27:31.582898 17344 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:27:31.582904 17344 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:27:31.582911 17344 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:27:31.582923 17344 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:27:31.582957 17344 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:27:31.582968 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.582973 17344 net.cpp:165] Memory required for data: 476673500\nI0817 16:27:31.582978 17344 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:27:31.582988 17344 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:27:31.582994 17344 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:27:31.583003 17344 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:27:31.583011 17344 net.cpp:150] Setting up L1_b5_relu\nI0817 16:27:31.583017 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.583022 17344 net.cpp:165] Memory required for data: 484865500\nI0817 16:27:31.583026 17344 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:27:31.583034 17344 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:27:31.583039 17344 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:27:31.583045 17344 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:27:31.583055 17344 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:27:31.583104 17344 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:27:31.583117 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.583122 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.583127 17344 net.cpp:165] Memory required for data: 501249500\nI0817 16:27:31.583132 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:27:31.583147 17344 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:27:31.583153 17344 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:27:31.583161 17344 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:27:31.583509 17344 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:27:31.583523 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.583534 17344 net.cpp:165] Memory required for data: 509441500\nI0817 16:27:31.583544 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:27:31.583556 17344 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:27:31.583562 17344 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:27:31.583570 17344 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:27:31.583863 17344 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:27:31.583879 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.583884 17344 net.cpp:165] Memory required for data: 517633500\nI0817 16:27:31.583895 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:27:31.583904 17344 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:27:31.583909 17344 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:27:31.583917 17344 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.583974 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:27:31.584152 17344 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:27:31.584167 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.584172 17344 net.cpp:165] Memory required for data: 525825500\nI0817 16:27:31.584182 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:27:31.584192 17344 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:27:31.584198 17344 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:27:31.584208 17344 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.584218 17344 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:27:31.584225 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.584230 17344 net.cpp:165] Memory required for data: 534017500\nI0817 16:27:31.584234 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:27:31.584245 17344 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:27:31.584250 17344 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:27:31.584261 17344 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:27:31.584612 17344 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:27:31.584626 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.584631 17344 net.cpp:165] Memory required for data: 542209500\nI0817 16:27:31.584640 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:27:31.584648 17344 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:27:31.584655 17344 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:27:31.584669 17344 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:27:31.584951 17344 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:27:31.584964 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.584969 17344 net.cpp:165] Memory required for data: 550401500\nI0817 16:27:31.584980 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:27:31.584991 17344 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:27:31.584997 17344 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:27:31.585005 17344 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:27:31.585062 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:27:31.585222 17344 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:27:31.585234 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.585239 17344 net.cpp:165] Memory required for data: 558593500\nI0817 16:27:31.585248 17344 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:27:31.585268 17344 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:27:31.585274 17344 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:27:31.585281 17344 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:27:31.585289 17344 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:27:31.585325 17344 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:27:31.585335 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.585340 17344 net.cpp:165] Memory required for data: 566785500\nI0817 16:27:31.585352 17344 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:27:31.585361 17344 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:27:31.585366 17344 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:27:31.585373 17344 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:27:31.585382 17344 net.cpp:150] Setting up L1_b6_relu\nI0817 16:27:31.585389 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.585393 17344 net.cpp:165] Memory required for data: 574977500\nI0817 16:27:31.585398 17344 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:27:31.585405 17344 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:27:31.585410 17344 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:27:31.585420 17344 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:27:31.585430 17344 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:27:31.585477 17344 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:27:31.585487 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.585494 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.585499 17344 net.cpp:165] Memory required for data: 591361500\nI0817 16:27:31.585503 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:27:31.585517 17344 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:27:31.585523 17344 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:27:31.585532 17344 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:27:31.585891 17344 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:27:31.585906 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.585911 17344 net.cpp:165] Memory required for data: 599553500\nI0817 16:27:31.585919 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:27:31.585932 17344 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:27:31.585939 17344 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:27:31.585950 17344 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:27:31.586225 17344 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:27:31.586239 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.586244 17344 net.cpp:165] Memory required for data: 607745500\nI0817 16:27:31.586254 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:27:31.586262 17344 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:27:31.586268 17344 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:27:31.586275 17344 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.586338 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:27:31.586494 17344 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:27:31.586508 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.586513 17344 net.cpp:165] Memory required for data: 615937500\nI0817 16:27:31.586520 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:27:31.586531 17344 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:27:31.586537 17344 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:27:31.586545 17344 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.586555 17344 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:27:31.586561 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.586565 17344 net.cpp:165] Memory required for data: 624129500\nI0817 16:27:31.586570 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:27:31.586583 17344 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:27:31.586589 17344 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:27:31.586601 17344 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:27:31.586966 17344 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:27:31.586980 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.586992 17344 net.cpp:165] Memory required for data: 632321500\nI0817 16:27:31.587002 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:27:31.587013 17344 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:27:31.587020 17344 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:27:31.587030 17344 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:27:31.587306 17344 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:27:31.587318 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.587323 17344 net.cpp:165] Memory required for data: 640513500\nI0817 16:27:31.587333 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:27:31.587342 17344 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:27:31.587347 17344 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:27:31.587355 17344 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:27:31.587416 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:27:31.587594 17344 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:27:31.587608 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.587613 17344 net.cpp:165] Memory required for data: 648705500\nI0817 16:27:31.587622 17344 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:27:31.587631 17344 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:27:31.587637 17344 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:27:31.587644 17344 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:27:31.587656 17344 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:27:31.587690 17344 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:27:31.587702 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.587707 17344 net.cpp:165] Memory required for data: 656897500\nI0817 16:27:31.587712 17344 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:27:31.587724 17344 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:27:31.587730 17344 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:27:31.587738 17344 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:27:31.587746 17344 net.cpp:150] Setting up L1_b7_relu\nI0817 16:27:31.587754 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.587757 17344 net.cpp:165] Memory required for data: 665089500\nI0817 16:27:31.587762 17344 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:27:31.587769 17344 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:27:31.587774 17344 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:27:31.587781 17344 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:27:31.587790 17344 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:27:31.587848 17344 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:27:31.587862 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.587868 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.587873 17344 net.cpp:165] Memory required for data: 681473500\nI0817 16:27:31.587878 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:27:31.587891 17344 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:27:31.587898 17344 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:27:31.587908 17344 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:27:31.588263 17344 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:27:31.588275 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.588280 17344 net.cpp:165] Memory required for data: 689665500\nI0817 16:27:31.588289 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:27:31.588300 17344 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:27:31.588307 17344 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:27:31.588325 17344 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:27:31.588601 17344 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:27:31.588614 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.588619 17344 net.cpp:165] Memory required for data: 697857500\nI0817 16:27:31.588629 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:27:31.588639 17344 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:27:31.588644 17344 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:27:31.588651 17344 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.588711 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:27:31.588876 17344 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:27:31.588891 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.588896 17344 net.cpp:165] Memory required for data: 706049500\nI0817 16:27:31.588904 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:27:31.588914 17344 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:27:31.588920 17344 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:27:31.588927 17344 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.588937 17344 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:27:31.588944 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.588948 17344 net.cpp:165] Memory required for data: 714241500\nI0817 16:27:31.588953 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:27:31.588966 17344 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:27:31.588973 17344 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:27:31.588984 17344 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:27:31.589340 17344 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:27:31.589354 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.589359 17344 net.cpp:165] Memory required for data: 722433500\nI0817 16:27:31.589367 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:27:31.589378 17344 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:27:31.589385 17344 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:27:31.589393 17344 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:27:31.589674 17344 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:27:31.589687 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.589692 17344 net.cpp:165] Memory required for data: 730625500\nI0817 16:27:31.589702 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:27:31.589711 17344 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:27:31.589716 17344 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:27:31.589725 17344 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:27:31.589785 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:27:31.589951 17344 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:27:31.589964 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.589969 17344 net.cpp:165] Memory required for data: 738817500\nI0817 16:27:31.589978 17344 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:27:31.589987 17344 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:27:31.589993 17344 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:27:31.589999 17344 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:27:31.590010 17344 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:27:31.590044 17344 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:27:31.590054 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.590057 17344 net.cpp:165] Memory required for data: 747009500\nI0817 16:27:31.590062 17344 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:27:31.590073 17344 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:27:31.590080 17344 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:27:31.590086 17344 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:27:31.590101 17344 net.cpp:150] Setting up L1_b8_relu\nI0817 16:27:31.590109 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.590114 17344 net.cpp:165] Memory required for data: 755201500\nI0817 16:27:31.590118 17344 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:27:31.590126 17344 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:27:31.590131 17344 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:27:31.590138 17344 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:27:31.590147 17344 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:27:31.590198 17344 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:27:31.590209 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.590215 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.590220 17344 net.cpp:165] Memory required for data: 771585500\nI0817 16:27:31.590224 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:27:31.590239 17344 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:27:31.590245 17344 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:27:31.590253 17344 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:27:31.590615 17344 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:27:31.590629 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.590634 17344 net.cpp:165] Memory required for data: 779777500\nI0817 16:27:31.590642 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:27:31.590654 17344 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:27:31.590661 17344 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:27:31.590668 17344 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:27:31.590952 17344 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:27:31.590966 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.590971 17344 net.cpp:165] Memory required for data: 787969500\nI0817 16:27:31.590981 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:27:31.590992 17344 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:27:31.590999 17344 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:27:31.591006 17344 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.591069 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:27:31.591233 17344 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:27:31.591246 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.591251 17344 net.cpp:165] Memory required for data: 796161500\nI0817 16:27:31.591260 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:27:31.591267 17344 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:27:31.591274 17344 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:27:31.591284 17344 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.591294 17344 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:27:31.591300 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.591305 17344 net.cpp:165] Memory required for data: 804353500\nI0817 16:27:31.591310 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:27:31.591322 17344 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:27:31.591329 17344 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:27:31.591337 17344 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:27:31.591694 17344 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:27:31.591708 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.591713 17344 net.cpp:165] Memory required for data: 812545500\nI0817 16:27:31.591722 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:27:31.591733 17344 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:27:31.591739 17344 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:27:31.591754 17344 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:27:31.592037 17344 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:27:31.592051 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.592056 17344 net.cpp:165] Memory required for data: 820737500\nI0817 16:27:31.592089 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:27:31.592102 17344 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:27:31.592108 17344 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:27:31.592114 17344 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:27:31.592176 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:27:31.592335 17344 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:27:31.592348 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.592353 17344 net.cpp:165] Memory required for data: 828929500\nI0817 16:27:31.592362 17344 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:27:31.592370 17344 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:27:31.592377 17344 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:27:31.592383 17344 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:27:31.592391 17344 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:27:31.592428 17344 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:27:31.592438 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.592442 17344 net.cpp:165] Memory required for data: 837121500\nI0817 16:27:31.592447 17344 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:27:31.592455 17344 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:27:31.592460 17344 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:27:31.592473 17344 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:27:31.592483 17344 net.cpp:150] Setting up L1_b9_relu\nI0817 16:27:31.592490 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.592495 17344 net.cpp:165] Memory required for data: 845313500\nI0817 16:27:31.592499 17344 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:27:31.592506 17344 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:27:31.592511 17344 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:27:31.592521 17344 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:27:31.592531 17344 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:27:31.592578 17344 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:27:31.592591 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.592597 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:27:31.592600 17344 net.cpp:165] Memory required for data: 861697500\nI0817 16:27:31.592605 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:27:31.592619 17344 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:27:31.592627 17344 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:27:31.592634 17344 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:27:31.593005 17344 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:27:31.593020 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.593025 17344 net.cpp:165] Memory required for data: 863745500\nI0817 16:27:31.593034 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:27:31.593045 17344 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:27:31.593051 17344 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:27:31.593060 17344 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:27:31.593323 17344 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:27:31.593336 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.593341 17344 net.cpp:165] Memory required for data: 865793500\nI0817 16:27:31.593358 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:27:31.593367 17344 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:27:31.593374 17344 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:27:31.593381 17344 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.593444 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:27:31.593602 17344 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:27:31.593618 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.593623 17344 net.cpp:165] Memory required for data: 867841500\nI0817 16:27:31.593632 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:27:31.593641 17344 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:27:31.593647 17344 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:27:31.593653 17344 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.593663 17344 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:27:31.593669 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.593674 17344 net.cpp:165] Memory required for data: 869889500\nI0817 16:27:31.593678 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:27:31.593693 17344 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:27:31.593698 17344 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:27:31.593708 17344 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:27:31.594071 17344 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:27:31.594085 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.594090 17344 net.cpp:165] Memory required for data: 871937500\nI0817 16:27:31.594099 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:27:31.594111 17344 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:27:31.594117 17344 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:27:31.594128 17344 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:27:31.594394 17344 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:27:31.594406 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.594411 17344 net.cpp:165] Memory required for data: 873985500\nI0817 16:27:31.594421 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:27:31.594430 17344 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:27:31.594436 17344 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:27:31.594444 17344 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:27:31.594506 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:27:31.594660 17344 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:27:31.594673 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.594678 17344 net.cpp:165] Memory required for data: 876033500\nI0817 16:27:31.594686 17344 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:27:31.594698 17344 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:27:31.594705 17344 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:27:31.594713 17344 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:27:31.594746 17344 net.cpp:150] Setting up L2_b1_pool\nI0817 16:27:31.594755 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.594760 17344 net.cpp:165] Memory required for data: 878081500\nI0817 16:27:31.594765 17344 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:27:31.594774 17344 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:27:31.594779 17344 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:27:31.594785 17344 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:27:31.594795 17344 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:27:31.594836 17344 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:27:31.594846 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.594851 17344 net.cpp:165] Memory required for data: 880129500\nI0817 16:27:31.594856 17344 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:27:31.594867 17344 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:27:31.594879 17344 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:27:31.594887 17344 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:27:31.594897 17344 net.cpp:150] Setting up L2_b1_relu\nI0817 16:27:31.594903 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.594908 17344 net.cpp:165] Memory required for data: 882177500\nI0817 16:27:31.594913 17344 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:27:31.594923 17344 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:27:31.594929 17344 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:27:31.597127 17344 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:27:31.597146 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:27:31.597152 17344 net.cpp:165] Memory required for data: 884225500\nI0817 16:27:31.597158 17344 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:27:31.597167 17344 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:27:31.597173 17344 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:27:31.597182 17344 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:27:31.597192 17344 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:27:31.597234 17344 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:27:31.597249 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.597255 17344 net.cpp:165] Memory required for data: 888321500\nI0817 16:27:31.597260 17344 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:31.597268 17344 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:31.597273 17344 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:27:31.597281 17344 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:27:31.597295 17344 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:27:31.597344 17344 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:27:31.597355 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.597362 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.597367 17344 net.cpp:165] Memory required for data: 896513500\nI0817 16:27:31.597373 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:27:31.597386 17344 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:27:31.597393 17344 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:27:31.597404 17344 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:27:31.597918 17344 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:27:31.597932 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.597937 17344 net.cpp:165] Memory required for data: 900609500\nI0817 16:27:31.597946 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:27:31.597956 17344 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:27:31.597962 17344 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:27:31.597973 17344 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:27:31.598244 17344 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:27:31.598258 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.598263 17344 net.cpp:165] Memory required for data: 904705500\nI0817 16:27:31.598273 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:27:31.598285 17344 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:27:31.598291 17344 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:27:31.598299 17344 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.598357 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:27:31.598513 17344 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:27:31.598526 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.598531 17344 net.cpp:165] Memory required for data: 908801500\nI0817 16:27:31.598541 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:27:31.598551 17344 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:27:31.598565 17344 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:27:31.598573 17344 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.598583 17344 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:27:31.598593 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.598598 17344 net.cpp:165] Memory required for data: 912897500\nI0817 16:27:31.598603 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:27:31.598614 17344 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:27:31.598619 17344 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:27:31.598630 17344 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:27:31.599128 17344 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:27:31.599143 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.599148 17344 net.cpp:165] Memory required for data: 916993500\nI0817 16:27:31.599158 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:27:31.599166 17344 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:27:31.599172 17344 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:27:31.599184 17344 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:27:31.599448 17344 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:27:31.599462 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.599467 17344 net.cpp:165] Memory required for data: 921089500\nI0817 16:27:31.599478 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:27:31.599488 17344 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:27:31.599495 17344 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:27:31.599503 17344 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:27:31.599560 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:27:31.599722 17344 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:27:31.599735 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.599740 17344 net.cpp:165] Memory required for data: 925185500\nI0817 16:27:31.599750 17344 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:27:31.599761 17344 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:27:31.599767 17344 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:27:31.599774 17344 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:27:31.599782 17344 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:27:31.599820 17344 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:27:31.599833 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.599838 17344 net.cpp:165] Memory required for data: 929281500\nI0817 16:27:31.599843 17344 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:27:31.599850 17344 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:27:31.599856 17344 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:27:31.599866 17344 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:27:31.599875 17344 net.cpp:150] Setting up L2_b2_relu\nI0817 16:27:31.599882 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.599887 17344 net.cpp:165] Memory required for data: 933377500\nI0817 16:27:31.599891 17344 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:31.599898 17344 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:31.599905 17344 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:27:31.599911 17344 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:27:31.599920 17344 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:27:31.599972 17344 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:27:31.599983 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.599990 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.599994 17344 net.cpp:165] Memory required for data: 941569500\nI0817 16:27:31.600006 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:27:31.600018 17344 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:27:31.600024 17344 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:27:31.600036 17344 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:27:31.600530 17344 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:27:31.600544 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.600549 17344 net.cpp:165] Memory required for data: 945665500\nI0817 16:27:31.600558 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:27:31.600567 17344 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:27:31.600574 17344 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:27:31.600585 17344 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:27:31.600883 17344 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:27:31.600898 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.600903 17344 net.cpp:165] Memory required for data: 949761500\nI0817 16:27:31.600914 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:27:31.600925 17344 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:27:31.600932 17344 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:27:31.600939 17344 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.600997 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:27:31.601155 17344 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:27:31.601168 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.601173 17344 net.cpp:165] Memory required for data: 953857500\nI0817 16:27:31.601182 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:27:31.601192 17344 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:27:31.601199 17344 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:27:31.601207 17344 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.601217 17344 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:27:31.601223 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.601227 17344 net.cpp:165] Memory required for data: 957953500\nI0817 16:27:31.601233 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:27:31.601248 17344 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:27:31.601253 17344 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:27:31.601266 17344 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:27:31.601752 17344 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:27:31.601766 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.601770 17344 net.cpp:165] Memory required for data: 962049500\nI0817 16:27:31.601779 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:27:31.601788 17344 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:27:31.601794 17344 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:27:31.601812 17344 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:27:31.602083 17344 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:27:31.602097 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.602102 17344 net.cpp:165] Memory required for data: 966145500\nI0817 16:27:31.602113 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:27:31.602123 17344 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:27:31.602129 17344 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:27:31.602138 17344 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:27:31.602196 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:27:31.602351 17344 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:27:31.602365 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.602370 17344 net.cpp:165] Memory required for data: 970241500\nI0817 16:27:31.602378 17344 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:27:31.602387 17344 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:27:31.602399 17344 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:27:31.602409 17344 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:27:31.602418 17344 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:27:31.602447 17344 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:27:31.602461 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.602466 17344 net.cpp:165] Memory required for data: 974337500\nI0817 16:27:31.602471 17344 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:27:31.602491 17344 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:27:31.602497 17344 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:27:31.602504 17344 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:27:31.602514 17344 net.cpp:150] Setting up L2_b3_relu\nI0817 16:27:31.602521 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.602525 17344 net.cpp:165] Memory required for data: 978433500\nI0817 16:27:31.602530 17344 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:31.602537 17344 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:31.602543 17344 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:27:31.602552 17344 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:27:31.602562 17344 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:27:31.602612 17344 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:27:31.602623 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.602629 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.602633 17344 net.cpp:165] Memory required for data: 986625500\nI0817 16:27:31.602639 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:27:31.602653 17344 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:27:31.602659 17344 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:27:31.602669 17344 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:27:31.603170 17344 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:27:31.603184 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.603189 17344 net.cpp:165] Memory required for data: 990721500\nI0817 16:27:31.603199 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:27:31.603211 17344 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:27:31.603217 17344 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:27:31.603225 17344 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:27:31.603500 17344 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:27:31.603513 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.603518 17344 net.cpp:165] Memory required for data: 994817500\nI0817 16:27:31.603528 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:27:31.603536 17344 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:27:31.603543 17344 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:27:31.603552 17344 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.603612 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:27:31.603775 17344 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:27:31.603787 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.603792 17344 net.cpp:165] Memory required for data: 998913500\nI0817 16:27:31.603801 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:27:31.603816 17344 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:27:31.603822 17344 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:27:31.603832 17344 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.603842 17344 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:27:31.603849 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.603854 17344 net.cpp:165] Memory required for data: 1003009500\nI0817 16:27:31.603868 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:27:31.603881 17344 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:27:31.603888 17344 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:27:31.603896 17344 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:27:31.604394 17344 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:27:31.604409 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.604414 17344 net.cpp:165] Memory required for data: 1007105500\nI0817 16:27:31.604423 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:27:31.604432 17344 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:27:31.604441 17344 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:27:31.604449 17344 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:27:31.604713 17344 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:27:31.604727 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.604732 17344 net.cpp:165] Memory required for data: 1011201500\nI0817 16:27:31.604742 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:27:31.604749 17344 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:27:31.604755 17344 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:27:31.604763 17344 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:27:31.604830 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:27:31.604990 17344 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:27:31.605006 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.605011 17344 net.cpp:165] Memory required for data: 1015297500\nI0817 16:27:31.605020 17344 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:27:31.605029 17344 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:27:31.605036 17344 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:27:31.605042 17344 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:27:31.605051 17344 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:27:31.605080 17344 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:27:31.605090 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.605094 17344 net.cpp:165] Memory required for data: 1019393500\nI0817 16:27:31.605099 17344 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:27:31.605106 17344 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:27:31.605113 17344 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:27:31.605121 17344 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:27:31.605130 17344 net.cpp:150] Setting up L2_b4_relu\nI0817 16:27:31.605137 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.605141 17344 net.cpp:165] Memory required for data: 1023489500\nI0817 16:27:31.605146 17344 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:27:31.605154 17344 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:27:31.605159 17344 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:27:31.605168 17344 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:27:31.605178 17344 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:27:31.605226 17344 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:27:31.605237 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.605243 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.605247 17344 net.cpp:165] Memory required for data: 1031681500\nI0817 16:27:31.605252 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:27:31.605267 17344 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:27:31.605273 17344 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:27:31.605281 17344 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:27:31.605789 17344 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:27:31.605809 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.605815 17344 net.cpp:165] Memory required for data: 1035777500\nI0817 16:27:31.605823 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:27:31.605836 17344 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:27:31.605842 17344 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:27:31.605850 17344 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:27:31.606120 17344 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:27:31.606134 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.606139 17344 net.cpp:165] Memory required for data: 1039873500\nI0817 16:27:31.606149 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:27:31.606158 17344 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:27:31.606163 17344 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:27:31.606174 17344 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.606232 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:27:31.606392 17344 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:27:31.606405 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.606410 17344 net.cpp:165] Memory required for data: 1043969500\nI0817 16:27:31.606420 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:27:31.606427 17344 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:27:31.606433 17344 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:27:31.606441 17344 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.606451 17344 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:27:31.606457 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.606462 17344 net.cpp:165] Memory required for data: 1048065500\nI0817 16:27:31.606467 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:27:31.606480 17344 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:27:31.606487 17344 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:27:31.606498 17344 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:27:31.606999 17344 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:27:31.607014 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.607019 17344 net.cpp:165] Memory required for data: 1052161500\nI0817 16:27:31.607028 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:27:31.607039 17344 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:27:31.607046 17344 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:27:31.607058 17344 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:27:31.607324 17344 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:27:31.607337 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.607342 17344 net.cpp:165] Memory required for data: 1056257500\nI0817 16:27:31.607352 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:27:31.607360 17344 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:27:31.607367 17344 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:27:31.607374 17344 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:27:31.607434 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:27:31.607589 17344 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:27:31.607604 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.607610 17344 net.cpp:165] Memory required for data: 1060353500\nI0817 16:27:31.607619 17344 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:27:31.607627 17344 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:27:31.607633 17344 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:27:31.607640 17344 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:27:31.607648 17344 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:27:31.607679 17344 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:27:31.607695 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.607700 17344 net.cpp:165] Memory required for data: 1064449500\nI0817 16:27:31.607705 17344 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:27:31.607713 17344 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:27:31.607719 17344 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:27:31.607728 17344 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:27:31.607738 17344 net.cpp:150] Setting up L2_b5_relu\nI0817 16:27:31.607745 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.607749 17344 net.cpp:165] Memory required for data: 1068545500\nI0817 16:27:31.607754 17344 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:27:31.607761 17344 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:27:31.607766 17344 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:27:31.607776 17344 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:27:31.607785 17344 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:27:31.607842 17344 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:27:31.607854 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.607861 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.607866 17344 net.cpp:165] Memory required for data: 1076737500\nI0817 16:27:31.607870 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:27:31.607885 17344 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:27:31.607892 17344 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:27:31.607902 17344 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:27:31.608402 17344 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:27:31.608417 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.608422 17344 net.cpp:165] Memory required for data: 1080833500\nI0817 16:27:31.608430 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:27:31.608441 17344 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:27:31.608448 17344 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:27:31.608456 17344 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:27:31.608722 17344 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:27:31.608736 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.608741 17344 net.cpp:165] Memory required for data: 1084929500\nI0817 16:27:31.608750 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:27:31.608759 17344 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:27:31.608765 17344 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:27:31.608773 17344 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.608840 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:27:31.608999 17344 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:27:31.609014 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.609019 17344 net.cpp:165] Memory required for data: 1089025500\nI0817 16:27:31.609028 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:27:31.609036 17344 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:27:31.609043 17344 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:27:31.609050 17344 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.609060 17344 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:27:31.609066 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.609071 17344 net.cpp:165] Memory required for data: 1093121500\nI0817 16:27:31.609076 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:27:31.609088 17344 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:27:31.609096 17344 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:27:31.609105 17344 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:27:31.609632 17344 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:27:31.609647 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.609652 17344 net.cpp:165] Memory required for data: 1097217500\nI0817 16:27:31.609660 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:27:31.609673 17344 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:27:31.609679 17344 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:27:31.609689 17344 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:27:31.609967 17344 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:27:31.609982 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.609987 17344 net.cpp:165] Memory required for data: 1101313500\nI0817 16:27:31.609997 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:27:31.610005 17344 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:27:31.610011 17344 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:27:31.610018 17344 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:27:31.610080 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:27:31.610262 17344 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:27:31.610276 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.610281 17344 net.cpp:165] Memory required for data: 1105409500\nI0817 16:27:31.610291 17344 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:27:31.610302 17344 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:27:31.610309 17344 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:27:31.610316 17344 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:27:31.610324 17344 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:27:31.610354 17344 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:27:31.610363 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.610368 17344 net.cpp:165] Memory required for data: 1109505500\nI0817 16:27:31.610373 17344 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:27:31.610384 17344 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:27:31.610390 17344 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:27:31.610396 17344 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:27:31.610406 17344 net.cpp:150] Setting up L2_b6_relu\nI0817 16:27:31.610414 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.610417 17344 net.cpp:165] Memory required for data: 1113601500\nI0817 16:27:31.610422 17344 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:27:31.610430 17344 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:27:31.610435 17344 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:27:31.610441 17344 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:27:31.610451 17344 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:27:31.610504 17344 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:27:31.610515 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.610522 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.610527 17344 net.cpp:165] Memory required for data: 1121793500\nI0817 16:27:31.610532 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:27:31.610545 17344 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:27:31.610553 17344 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:27:31.610561 17344 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:27:31.612052 17344 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:27:31.612071 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.612076 17344 net.cpp:165] Memory required for data: 1125889500\nI0817 16:27:31.612085 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:27:31.612102 17344 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:27:31.612109 17344 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:27:31.612121 17344 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:27:31.612390 17344 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:27:31.612404 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.612409 17344 net.cpp:165] Memory required for data: 1129985500\nI0817 16:27:31.612419 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:27:31.612432 17344 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:27:31.612437 17344 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:27:31.612445 17344 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.612504 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:27:31.612665 17344 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:27:31.612679 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.612684 17344 net.cpp:165] Memory required for data: 1134081500\nI0817 16:27:31.612694 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:27:31.612704 17344 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:27:31.612710 17344 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:27:31.612717 17344 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.612730 17344 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:27:31.612737 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.612742 17344 net.cpp:165] Memory required for data: 1138177500\nI0817 16:27:31.612747 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:27:31.612758 17344 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:27:31.612764 17344 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:27:31.612776 17344 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:27:31.613287 17344 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:27:31.613303 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.613308 17344 net.cpp:165] Memory required for data: 1142273500\nI0817 16:27:31.613317 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:27:31.613327 17344 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:27:31.613332 17344 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:27:31.613346 17344 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:27:31.613616 17344 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:27:31.613629 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.613634 17344 net.cpp:165] Memory required for data: 1146369500\nI0817 16:27:31.613644 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:27:31.613656 17344 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:27:31.613662 17344 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:27:31.613670 17344 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:27:31.613729 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:27:31.613893 17344 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:27:31.613906 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.613911 17344 net.cpp:165] Memory required for data: 1150465500\nI0817 16:27:31.613920 17344 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:27:31.613932 17344 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:27:31.613940 17344 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:27:31.613946 17344 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:27:31.613955 17344 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:27:31.613986 17344 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:27:31.613996 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.614001 17344 net.cpp:165] Memory required for data: 1154561500\nI0817 16:27:31.614006 17344 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:27:31.614013 17344 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:27:31.614029 17344 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:27:31.614040 17344 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:27:31.614050 17344 net.cpp:150] Setting up L2_b7_relu\nI0817 16:27:31.614056 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.614061 17344 net.cpp:165] Memory required for data: 1158657500\nI0817 16:27:31.614066 17344 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:27:31.614073 17344 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:27:31.614078 17344 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:27:31.614085 17344 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:27:31.614095 17344 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:27:31.614148 17344 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:27:31.614157 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.614164 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.614168 17344 net.cpp:165] Memory required for data: 1166849500\nI0817 16:27:31.614174 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:27:31.614184 17344 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:27:31.614192 17344 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:27:31.614203 17344 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:27:31.614691 17344 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:27:31.614706 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.614711 17344 net.cpp:165] Memory required for data: 1170945500\nI0817 16:27:31.614719 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:27:31.614728 17344 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:27:31.614734 17344 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:27:31.614745 17344 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:27:31.615023 17344 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:27:31.615037 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.615041 17344 net.cpp:165] Memory required for data: 1175041500\nI0817 16:27:31.615051 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:27:31.615063 17344 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:27:31.615069 17344 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:27:31.615077 17344 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.615137 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:27:31.615319 17344 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:27:31.615342 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.615347 17344 net.cpp:165] Memory required for data: 1179137500\nI0817 16:27:31.615357 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:27:31.615368 17344 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:27:31.615375 17344 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:27:31.615382 17344 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.615391 17344 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:27:31.615398 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.615403 17344 net.cpp:165] Memory required for data: 1183233500\nI0817 16:27:31.615407 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:27:31.615422 17344 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:27:31.615428 17344 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:27:31.615439 17344 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:27:31.615970 17344 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:27:31.615985 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.615990 17344 net.cpp:165] Memory required for data: 1187329500\nI0817 16:27:31.615999 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:27:31.616015 17344 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:27:31.616022 17344 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:27:31.616034 17344 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:27:31.616308 17344 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:27:31.616322 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.616328 17344 net.cpp:165] Memory required for data: 1191425500\nI0817 16:27:31.616338 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:27:31.616348 17344 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:27:31.616355 17344 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:27:31.616363 17344 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:27:31.616422 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:27:31.616581 17344 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:27:31.616593 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.616598 17344 net.cpp:165] Memory required for data: 1195521500\nI0817 16:27:31.616607 17344 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:27:31.616619 17344 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:27:31.616626 17344 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:27:31.616632 17344 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:27:31.616641 17344 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:27:31.616668 17344 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:27:31.616683 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.616688 17344 net.cpp:165] Memory required for data: 1199617500\nI0817 16:27:31.616693 17344 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:27:31.616699 17344 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:27:31.616705 17344 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:27:31.616713 17344 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:27:31.616721 17344 net.cpp:150] Setting up L2_b8_relu\nI0817 16:27:31.616729 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.616732 17344 net.cpp:165] Memory required for data: 1203713500\nI0817 16:27:31.616737 17344 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:27:31.616747 17344 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:27:31.616753 17344 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:27:31.616760 17344 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:27:31.616782 17344 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:27:31.616842 17344 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:27:31.616858 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.616864 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.616869 17344 net.cpp:165] Memory required for data: 1211905500\nI0817 16:27:31.616874 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:27:31.616886 17344 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:27:31.616894 17344 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:27:31.616902 17344 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:27:31.617398 17344 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:27:31.617414 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.617420 17344 net.cpp:165] Memory required for data: 1216001500\nI0817 16:27:31.617429 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:27:31.617439 17344 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:27:31.617444 17344 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:27:31.617455 17344 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:27:31.617735 17344 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:27:31.617754 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.617759 17344 net.cpp:165] Memory required for data: 1220097500\nI0817 16:27:31.617770 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:27:31.617781 17344 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:27:31.617787 17344 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:27:31.617795 17344 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.617859 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:27:31.618026 17344 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:27:31.618038 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.618043 17344 net.cpp:165] Memory required for data: 1224193500\nI0817 16:27:31.618052 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:27:31.618060 17344 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:27:31.618067 17344 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:27:31.618077 17344 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.618086 17344 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:27:31.618093 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.618098 17344 net.cpp:165] Memory required for data: 1228289500\nI0817 16:27:31.618103 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:27:31.618116 17344 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:27:31.618122 17344 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:27:31.618131 17344 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:27:31.619606 17344 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:27:31.619622 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.619628 17344 net.cpp:165] Memory required for data: 1232385500\nI0817 16:27:31.619637 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:27:31.619647 17344 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:27:31.619653 17344 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:27:31.619665 17344 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:27:31.619945 17344 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:27:31.619958 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.619963 17344 net.cpp:165] Memory required for data: 1236481500\nI0817 16:27:31.620014 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:27:31.620025 17344 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:27:31.620031 17344 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:27:31.620038 17344 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:27:31.620102 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:27:31.620260 17344 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:27:31.620276 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.620281 17344 net.cpp:165] Memory required for data: 1240577500\nI0817 16:27:31.620290 17344 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:27:31.620301 17344 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:27:31.620306 17344 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:27:31.620313 17344 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:27:31.620324 17344 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:27:31.620352 17344 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:27:31.620360 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.620365 17344 net.cpp:165] Memory required for data: 1244673500\nI0817 16:27:31.620370 17344 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:27:31.620378 17344 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:27:31.620384 17344 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:27:31.620394 17344 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:27:31.620404 17344 net.cpp:150] Setting up L2_b9_relu\nI0817 16:27:31.620410 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.620424 17344 net.cpp:165] Memory required for data: 1248769500\nI0817 16:27:31.620429 17344 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:27:31.620436 17344 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:27:31.620441 17344 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:27:31.620452 17344 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:27:31.620462 17344 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:27:31.620512 17344 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:27:31.620522 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.620528 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:27:31.620533 17344 net.cpp:165] Memory required for data: 1256961500\nI0817 16:27:31.620538 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:27:31.620553 17344 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:27:31.620560 17344 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:27:31.620569 17344 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:27:31.621075 17344 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:27:31.621089 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.621095 17344 net.cpp:165] Memory required for data: 1257985500\nI0817 16:27:31.621104 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:27:31.621115 17344 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:27:31.621122 17344 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:27:31.621130 17344 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:27:31.621408 17344 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:27:31.621421 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.621426 17344 net.cpp:165] Memory required for data: 1259009500\nI0817 16:27:31.621436 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:27:31.621448 17344 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:27:31.621454 17344 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:27:31.621462 17344 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.621521 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:27:31.621687 17344 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:27:31.621700 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.621704 17344 net.cpp:165] Memory required for data: 1260033500\nI0817 16:27:31.621713 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:27:31.621724 17344 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:27:31.621731 17344 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:27:31.621738 17344 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:27:31.621750 17344 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:27:31.621757 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.621762 17344 net.cpp:165] Memory required for data: 1261057500\nI0817 16:27:31.621767 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:27:31.621778 17344 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:27:31.621783 17344 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:27:31.621794 17344 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:27:31.622294 17344 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:27:31.622309 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.622315 17344 net.cpp:165] Memory required for data: 1262081500\nI0817 16:27:31.622323 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:27:31.622333 17344 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:27:31.622339 17344 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:27:31.622350 17344 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:27:31.622630 17344 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:27:31.622650 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.622655 17344 net.cpp:165] Memory required for data: 1263105500\nI0817 16:27:31.622666 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:27:31.622675 17344 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:27:31.622681 17344 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:27:31.622689 17344 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:27:31.622750 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:27:31.622920 17344 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:27:31.622937 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.622942 17344 net.cpp:165] Memory required for data: 1264129500\nI0817 16:27:31.622951 17344 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:27:31.622961 17344 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:27:31.622967 17344 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:27:31.622975 17344 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:27:31.623013 17344 net.cpp:150] Setting up L3_b1_pool\nI0817 16:27:31.623021 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.623026 17344 net.cpp:165] Memory required for data: 1265153500\nI0817 16:27:31.623033 17344 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:27:31.623040 17344 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:27:31.623046 17344 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:27:31.623054 17344 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:27:31.623060 17344 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:27:31.623097 17344 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:27:31.623111 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.623116 17344 net.cpp:165] Memory required for data: 1266177500\nI0817 16:27:31.623121 17344 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:27:31.623128 17344 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:27:31.623134 17344 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:27:31.623142 17344 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:27:31.623150 17344 net.cpp:150] Setting up L3_b1_relu\nI0817 16:27:31.623157 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.623162 17344 net.cpp:165] Memory required for data: 1267201500\nI0817 16:27:31.623167 17344 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:27:31.623175 17344 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:27:31.623185 17344 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:27:31.624416 17344 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:27:31.624434 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:27:31.624439 17344 net.cpp:165] Memory required for data: 1268225500\nI0817 16:27:31.624445 17344 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:27:31.624459 17344 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:27:31.624465 17344 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:27:31.624472 17344 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:27:31.624480 17344 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:27:31.624526 17344 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:27:31.624538 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.624542 17344 net.cpp:165] Memory required for data: 1270273500\nI0817 16:27:31.624548 17344 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:31.624557 17344 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:31.624562 17344 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:27:31.624572 17344 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:27:31.624583 17344 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:27:31.624635 17344 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:27:31.624650 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.624665 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.624670 17344 net.cpp:165] Memory required for data: 1274369500\nI0817 16:27:31.624675 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:27:31.624689 17344 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:27:31.624696 17344 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:27:31.624704 17344 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:27:31.625757 17344 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:27:31.625772 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.625777 17344 net.cpp:165] Memory required for data: 1276417500\nI0817 16:27:31.625787 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:27:31.625798 17344 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:27:31.625811 17344 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:27:31.625819 17344 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:27:31.626098 17344 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:27:31.626112 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.626117 17344 net.cpp:165] Memory required for data: 1278465500\nI0817 16:27:31.626127 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:27:31.626137 17344 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:27:31.626142 17344 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:27:31.626150 17344 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.626212 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:27:31.626374 17344 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:27:31.626387 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.626392 17344 net.cpp:165] Memory required for data: 1280513500\nI0817 16:27:31.626401 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:27:31.626410 17344 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:27:31.626416 17344 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:27:31.626426 17344 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:27:31.626436 17344 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:27:31.626443 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.626447 17344 net.cpp:165] Memory required for data: 1282561500\nI0817 16:27:31.626452 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:27:31.626466 17344 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:27:31.626472 17344 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:27:31.626480 17344 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:27:31.627526 17344 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:27:31.627540 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.627545 17344 net.cpp:165] Memory required for data: 1284609500\nI0817 16:27:31.627554 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:27:31.627566 17344 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:27:31.627573 17344 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:27:31.627581 17344 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:27:31.627863 17344 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:27:31.627876 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.627882 17344 net.cpp:165] Memory required for data: 1286657500\nI0817 16:27:31.627892 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:27:31.627903 17344 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:27:31.627909 17344 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:27:31.627918 17344 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:27:31.627979 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:27:31.628141 17344 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:27:31.628154 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.628159 17344 net.cpp:165] Memory required for data: 1288705500\nI0817 16:27:31.628175 17344 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:27:31.628187 17344 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:27:31.628195 17344 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:27:31.628202 17344 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:27:31.628212 17344 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:27:31.628247 17344 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:27:31.628255 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.628259 17344 net.cpp:165] Memory required for data: 1290753500\nI0817 16:27:31.628265 17344 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:27:31.628275 17344 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:27:31.628283 17344 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:27:31.628289 17344 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:27:31.628298 17344 net.cpp:150] Setting up L3_b2_relu\nI0817 16:27:31.628305 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.628309 17344 net.cpp:165] Memory required for data: 1292801500\nI0817 16:27:31.628314 17344 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:31.628321 17344 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:31.628327 17344 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:27:31.628334 17344 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:27:31.628343 17344 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:27:31.628393 17344 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:27:31.628404 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.628412 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.628417 17344 net.cpp:165] Memory required for data: 1296897500\nI0817 16:27:31.628422 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:27:31.628435 17344 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:27:31.628443 17344 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:27:31.628451 17344 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:27:31.629499 17344 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:27:31.629514 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.629519 17344 net.cpp:165] Memory required for data: 1298945500\nI0817 16:27:31.629528 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:27:31.629540 17344 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:27:31.629547 17344 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:27:31.629559 17344 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:27:31.629842 17344 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:27:31.629855 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.629859 17344 net.cpp:165] Memory required for data: 1300993500\nI0817 16:27:31.629870 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:27:31.629879 17344 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:27:31.629885 17344 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:27:31.629894 17344 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.629954 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:27:31.630117 17344 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:27:31.630131 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.630136 17344 net.cpp:165] Memory required for data: 1303041500\nI0817 16:27:31.630144 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:27:31.630152 17344 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:27:31.630158 17344 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:27:31.630168 17344 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:27:31.630178 17344 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:27:31.630192 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.630198 17344 net.cpp:165] Memory required for data: 1305089500\nI0817 16:27:31.630203 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:27:31.630218 17344 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:27:31.630224 17344 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:27:31.630233 17344 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:27:31.631278 17344 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:27:31.631292 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.631297 17344 net.cpp:165] Memory required for data: 1307137500\nI0817 16:27:31.631306 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:27:31.631319 17344 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:27:31.631325 17344 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:27:31.631333 17344 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:27:31.631605 17344 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:27:31.631618 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.631623 17344 net.cpp:165] Memory required for data: 1309185500\nI0817 16:27:31.631633 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:27:31.631645 17344 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:27:31.631652 17344 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:27:31.631660 17344 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:27:31.631723 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:27:31.631891 17344 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:27:31.631906 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.631911 17344 net.cpp:165] Memory required for data: 1311233500\nI0817 16:27:31.631919 17344 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:27:31.631933 17344 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:27:31.631940 17344 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:27:31.631947 17344 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:27:31.631958 17344 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:27:31.631991 17344 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:27:31.632002 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.632007 17344 net.cpp:165] Memory required for data: 1313281500\nI0817 16:27:31.632014 17344 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:27:31.632025 17344 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:27:31.632030 17344 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:27:31.632037 17344 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:27:31.632046 17344 net.cpp:150] Setting up L3_b3_relu\nI0817 16:27:31.632053 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.632057 17344 net.cpp:165] Memory required for data: 1315329500\nI0817 16:27:31.632062 17344 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:27:31.632069 17344 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:27:31.632074 17344 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:27:31.632082 17344 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:27:31.632091 17344 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:27:31.632143 17344 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:27:31.632154 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.632160 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.632165 17344 net.cpp:165] Memory required for data: 1319425500\nI0817 16:27:31.632170 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:27:31.632184 17344 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:27:31.632191 17344 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:27:31.632208 17344 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:27:31.633278 17344 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:27:31.633293 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.633298 17344 net.cpp:165] Memory required for data: 1321473500\nI0817 16:27:31.633307 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:27:31.633319 17344 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:27:31.633327 17344 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:27:31.633337 17344 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:27:31.633605 17344 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:27:31.633618 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.633623 17344 net.cpp:165] Memory required for data: 1323521500\nI0817 16:27:31.633633 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:27:31.633642 17344 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:27:31.633648 17344 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:27:31.633659 17344 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.633725 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:27:31.633965 17344 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:27:31.633981 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.633986 17344 net.cpp:165] Memory required for data: 1325569500\nI0817 16:27:31.633996 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:27:31.634004 17344 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:27:31.634011 17344 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:27:31.634021 17344 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:27:31.634032 17344 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:27:31.634039 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.634043 17344 net.cpp:165] Memory required for data: 1327617500\nI0817 16:27:31.634048 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:27:31.634063 17344 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:27:31.634069 17344 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:27:31.634078 17344 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:27:31.636097 17344 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:27:31.636116 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.636121 17344 net.cpp:165] Memory required for data: 1329665500\nI0817 16:27:31.636131 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:27:31.636143 17344 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:27:31.636150 17344 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:27:31.636162 17344 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:27:31.636482 17344 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:27:31.636504 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.636513 17344 net.cpp:165] Memory required for data: 1331713500\nI0817 16:27:31.636533 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:27:31.636548 17344 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:27:31.636555 17344 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:27:31.636566 17344 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:27:31.636631 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:27:31.636793 17344 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:27:31.636812 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.636818 17344 net.cpp:165] Memory required for data: 1333761500\nI0817 16:27:31.636828 17344 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:27:31.636837 17344 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:27:31.636844 17344 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:27:31.636852 17344 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:27:31.636862 17344 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:27:31.636909 17344 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:27:31.636919 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.636924 17344 net.cpp:165] Memory required for data: 1335809500\nI0817 16:27:31.636929 17344 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:27:31.636937 17344 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:27:31.636943 17344 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:27:31.636950 17344 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:27:31.636960 17344 net.cpp:150] Setting up L3_b4_relu\nI0817 16:27:31.636966 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.636970 17344 net.cpp:165] Memory required for data: 1337857500\nI0817 16:27:31.636976 17344 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:27:31.636994 17344 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:27:31.636999 17344 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:27:31.637007 17344 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:27:31.637017 17344 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:27:31.637066 17344 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:27:31.637079 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.637085 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.637089 17344 net.cpp:165] Memory required for data: 1341953500\nI0817 16:27:31.637095 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:27:31.637106 17344 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:27:31.637112 17344 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:27:31.637125 17344 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:27:31.638169 17344 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:27:31.638183 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.638188 17344 net.cpp:165] Memory required for data: 1344001500\nI0817 16:27:31.638197 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:27:31.638206 17344 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:27:31.638213 17344 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:27:31.638226 17344 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:27:31.638501 17344 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:27:31.638516 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.638521 17344 net.cpp:165] Memory required for data: 1346049500\nI0817 16:27:31.638532 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:27:31.638540 17344 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:27:31.638546 17344 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:27:31.638555 17344 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.638617 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:27:31.638780 17344 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:27:31.638792 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.638797 17344 net.cpp:165] Memory required for data: 1348097500\nI0817 16:27:31.638813 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:27:31.638824 17344 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:27:31.638831 17344 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:27:31.638839 17344 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:27:31.638849 17344 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:27:31.638856 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.638861 17344 net.cpp:165] Memory required for data: 1350145500\nI0817 16:27:31.638866 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:27:31.638880 17344 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:27:31.638886 17344 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:27:31.638906 17344 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:27:31.639940 17344 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:27:31.639955 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.639961 17344 net.cpp:165] Memory required for data: 1352193500\nI0817 16:27:31.639969 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:27:31.639981 17344 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:27:31.639988 17344 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:27:31.639999 17344 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:27:31.640271 17344 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:27:31.640285 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.640290 17344 net.cpp:165] Memory required for data: 1354241500\nI0817 16:27:31.640300 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:27:31.640308 17344 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:27:31.640316 17344 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:27:31.640326 17344 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:27:31.640384 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:27:31.640543 17344 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:27:31.640557 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.640561 17344 net.cpp:165] Memory required for data: 1356289500\nI0817 16:27:31.640570 17344 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:27:31.640579 17344 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:27:31.640585 17344 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:27:31.640592 17344 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:27:31.640604 17344 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:27:31.640640 17344 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:27:31.640650 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.640655 17344 net.cpp:165] Memory required for data: 1358337500\nI0817 16:27:31.640661 17344 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:27:31.640668 17344 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:27:31.640674 17344 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:27:31.640684 17344 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:27:31.640694 17344 net.cpp:150] Setting up L3_b5_relu\nI0817 16:27:31.640702 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.640705 17344 net.cpp:165] Memory required for data: 1360385500\nI0817 16:27:31.640710 17344 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:27:31.640717 17344 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:27:31.640723 17344 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:27:31.640730 17344 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:27:31.640739 17344 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:27:31.640789 17344 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:27:31.640800 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.640812 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.640817 17344 net.cpp:165] Memory required for data: 1364481500\nI0817 16:27:31.640822 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:27:31.640835 17344 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:27:31.640841 17344 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:27:31.640853 17344 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:27:31.641898 17344 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:27:31.641913 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.641918 17344 net.cpp:165] Memory required for data: 1366529500\nI0817 16:27:31.641934 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:27:31.641944 17344 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:27:31.641952 17344 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:27:31.641963 17344 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:27:31.642238 17344 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:27:31.642254 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.642259 17344 net.cpp:165] Memory required for data: 1368577500\nI0817 16:27:31.642269 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:27:31.642278 17344 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:27:31.642284 17344 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:27:31.642292 17344 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.642354 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:27:31.642519 17344 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:27:31.642532 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.642537 17344 net.cpp:165] Memory required for data: 1370625500\nI0817 16:27:31.642546 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:27:31.642557 17344 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:27:31.642565 17344 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:27:31.642571 17344 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:27:31.642581 17344 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:27:31.642588 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.642592 17344 net.cpp:165] Memory required for data: 1372673500\nI0817 16:27:31.642597 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:27:31.642611 17344 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:27:31.642618 17344 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:27:31.642627 17344 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:27:31.643656 17344 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:27:31.643671 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.643676 17344 net.cpp:165] Memory required for data: 1374721500\nI0817 16:27:31.643683 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:27:31.643695 17344 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:27:31.643702 17344 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:27:31.643713 17344 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:27:31.643995 17344 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:27:31.644007 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.644012 17344 net.cpp:165] Memory required for data: 1376769500\nI0817 16:27:31.644023 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:27:31.644032 17344 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:27:31.644038 17344 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:27:31.644048 17344 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:27:31.644107 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:27:31.644270 17344 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:27:31.644284 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.644289 17344 net.cpp:165] Memory required for data: 1378817500\nI0817 16:27:31.644297 17344 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:27:31.644306 17344 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:27:31.644314 17344 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:27:31.644323 17344 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:27:31.644331 17344 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:27:31.644371 17344 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:27:31.644381 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.644387 17344 net.cpp:165] Memory required for data: 1380865500\nI0817 16:27:31.644392 17344 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:27:31.644399 17344 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:27:31.644412 17344 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:27:31.644423 17344 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:27:31.644433 17344 net.cpp:150] Setting up L3_b6_relu\nI0817 16:27:31.644439 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.644444 17344 net.cpp:165] Memory required for data: 1382913500\nI0817 16:27:31.644448 17344 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:27:31.644456 17344 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:27:31.644461 17344 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:27:31.644469 17344 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:27:31.644479 17344 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:27:31.644531 17344 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:27:31.644543 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.644549 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.644554 17344 net.cpp:165] Memory required for data: 1387009500\nI0817 16:27:31.644559 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:27:31.644570 17344 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:27:31.644577 17344 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:27:31.644588 17344 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:27:31.645630 17344 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:27:31.645645 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.645650 17344 net.cpp:165] Memory required for data: 1389057500\nI0817 16:27:31.645658 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:27:31.645668 17344 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:27:31.645674 17344 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:27:31.645685 17344 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:27:31.645969 17344 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:27:31.645982 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.645987 17344 net.cpp:165] Memory required for data: 1391105500\nI0817 16:27:31.645998 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:27:31.646006 17344 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:27:31.646013 17344 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:27:31.646020 17344 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.646081 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:27:31.646240 17344 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:27:31.646256 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.646261 17344 net.cpp:165] Memory required for data: 1393153500\nI0817 16:27:31.646270 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:27:31.646303 17344 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:27:31.646312 17344 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:27:31.646320 17344 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:27:31.646330 17344 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:27:31.646337 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.646342 17344 net.cpp:165] Memory required for data: 1395201500\nI0817 16:27:31.646347 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:27:31.646358 17344 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:27:31.646364 17344 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:27:31.646376 17344 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:27:31.647411 17344 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:27:31.647426 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.647431 17344 net.cpp:165] Memory required for data: 1397249500\nI0817 16:27:31.647439 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:27:31.647456 17344 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:27:31.647464 17344 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:27:31.647475 17344 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:27:31.647753 17344 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:27:31.647769 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.647774 17344 net.cpp:165] Memory required for data: 1399297500\nI0817 16:27:31.647784 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:27:31.647794 17344 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:27:31.647799 17344 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:27:31.647812 17344 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:27:31.647874 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:27:31.648038 17344 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:27:31.648051 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.648056 17344 net.cpp:165] Memory required for data: 1401345500\nI0817 16:27:31.648066 17344 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:27:31.648077 17344 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:27:31.648084 17344 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:27:31.648092 17344 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:27:31.648099 17344 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:27:31.648136 17344 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:27:31.648147 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.648152 17344 net.cpp:165] Memory required for data: 1403393500\nI0817 16:27:31.648157 17344 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:27:31.648165 17344 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:27:31.648171 17344 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:27:31.648178 17344 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:27:31.648187 17344 net.cpp:150] Setting up L3_b7_relu\nI0817 16:27:31.648195 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.648200 17344 net.cpp:165] Memory required for data: 1405441500\nI0817 16:27:31.648205 17344 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:27:31.648211 17344 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:27:31.648216 17344 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:27:31.648226 17344 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:27:31.648236 17344 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:27:31.648284 17344 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:27:31.648295 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.648301 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.648306 17344 net.cpp:165] Memory required for data: 1409537500\nI0817 16:27:31.648311 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:27:31.648325 17344 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:27:31.648332 17344 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:27:31.648341 17344 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:27:31.650357 17344 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:27:31.650374 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.650379 17344 net.cpp:165] Memory required for data: 1411585500\nI0817 16:27:31.650389 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:27:31.650401 17344 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:27:31.650408 17344 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:27:31.650418 17344 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:27:31.650697 17344 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:27:31.650718 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.650724 17344 net.cpp:165] Memory required for data: 1413633500\nI0817 16:27:31.650734 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:27:31.650743 17344 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:27:31.650750 17344 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:27:31.650758 17344 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.650830 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:27:31.650997 17344 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:27:31.651011 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.651016 17344 net.cpp:165] Memory required for data: 1415681500\nI0817 16:27:31.651026 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:27:31.651033 17344 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:27:31.651039 17344 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:27:31.651047 17344 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:27:31.651057 17344 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:27:31.651063 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.651068 17344 net.cpp:165] Memory required for data: 1417729500\nI0817 16:27:31.651073 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:27:31.651087 17344 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:27:31.651094 17344 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:27:31.651105 17344 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:27:31.652139 17344 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:27:31.652154 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.652159 17344 net.cpp:165] Memory required for data: 1419777500\nI0817 16:27:31.652168 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:27:31.652181 17344 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:27:31.652189 17344 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:27:31.652196 17344 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:27:31.652467 17344 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:27:31.652480 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.652485 17344 net.cpp:165] Memory required for data: 1421825500\nI0817 16:27:31.652495 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:27:31.652508 17344 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:27:31.652514 17344 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:27:31.652521 17344 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:27:31.652583 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:27:31.652746 17344 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:27:31.652760 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.652765 17344 net.cpp:165] Memory required for data: 1423873500\nI0817 16:27:31.652773 17344 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:27:31.652786 17344 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:27:31.652792 17344 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:27:31.652799 17344 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:27:31.652813 17344 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:27:31.652851 17344 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:27:31.652863 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.652868 17344 net.cpp:165] Memory required for data: 1425921500\nI0817 16:27:31.652873 17344 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:27:31.652881 17344 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:27:31.652887 17344 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:27:31.652899 17344 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:27:31.652909 17344 net.cpp:150] Setting up L3_b8_relu\nI0817 16:27:31.652915 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.652920 17344 net.cpp:165] Memory required for data: 1427969500\nI0817 16:27:31.652931 17344 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:27:31.652940 17344 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:27:31.652945 17344 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:27:31.652953 17344 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:27:31.652962 17344 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:27:31.653012 17344 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:27:31.653024 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.653031 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.653036 17344 net.cpp:165] Memory required for data: 1432065500\nI0817 16:27:31.653041 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:27:31.653055 17344 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:27:31.653062 17344 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:27:31.653071 17344 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:27:31.654099 17344 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:27:31.654114 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.654119 17344 net.cpp:165] Memory required for data: 1434113500\nI0817 16:27:31.654129 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:27:31.654140 17344 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:27:31.654147 17344 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:27:31.654155 17344 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:27:31.654433 17344 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:27:31.654445 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.654450 17344 net.cpp:165] Memory required for data: 1436161500\nI0817 16:27:31.654461 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:27:31.654469 17344 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:27:31.654475 17344 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:27:31.654482 17344 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.654546 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:27:31.654709 17344 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:27:31.654722 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.654727 17344 net.cpp:165] Memory required for data: 1438209500\nI0817 16:27:31.654736 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:27:31.654744 17344 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:27:31.654750 17344 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:27:31.654760 17344 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:27:31.654770 17344 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:27:31.654778 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.654783 17344 net.cpp:165] Memory required for data: 1440257500\nI0817 16:27:31.654786 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:27:31.654800 17344 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:27:31.654814 17344 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:27:31.654822 17344 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:27:31.655870 17344 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:27:31.655885 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.655890 17344 net.cpp:165] Memory required for data: 1442305500\nI0817 16:27:31.655900 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:27:31.655911 17344 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:27:31.655918 17344 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:27:31.655926 17344 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:27:31.656201 17344 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:27:31.656214 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.656226 17344 net.cpp:165] Memory required for data: 1444353500\nI0817 16:27:31.656237 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:27:31.656249 17344 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:27:31.656255 17344 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:27:31.656263 17344 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:27:31.656327 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:27:31.656491 17344 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:27:31.656504 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.656509 17344 net.cpp:165] Memory required for data: 1446401500\nI0817 16:27:31.656518 17344 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:27:31.656530 17344 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:27:31.656538 17344 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:27:31.656544 17344 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:27:31.656555 17344 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:27:31.656589 17344 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:27:31.656600 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.656605 17344 net.cpp:165] Memory required for data: 1448449500\nI0817 16:27:31.656610 17344 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:27:31.656621 17344 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:27:31.656628 17344 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:27:31.656635 17344 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:27:31.656644 17344 net.cpp:150] Setting up L3_b9_relu\nI0817 16:27:31.656651 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:27:31.656656 17344 net.cpp:165] Memory required for data: 1450497500\nI0817 16:27:31.656661 17344 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:27:31.656668 17344 net.cpp:100] Creating Layer post_pool\nI0817 16:27:31.656674 17344 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:27:31.656682 17344 net.cpp:408] post_pool -> post_pool\nI0817 16:27:31.656720 17344 net.cpp:150] Setting up post_pool\nI0817 16:27:31.656730 17344 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:27:31.656735 17344 net.cpp:165] Memory required for data: 1450529500\nI0817 16:27:31.656740 17344 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:27:31.656751 17344 net.cpp:100] Creating Layer post_FC\nI0817 16:27:31.656757 17344 net.cpp:434] post_FC <- post_pool\nI0817 16:27:31.656765 17344 net.cpp:408] post_FC -> post_FC_top\nI0817 16:27:31.656939 17344 net.cpp:150] Setting up post_FC\nI0817 16:27:31.656952 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:31.656957 17344 net.cpp:165] Memory required for data: 1450534500\nI0817 16:27:31.656966 17344 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:27:31.656975 17344 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:27:31.656980 17344 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:27:31.656988 17344 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:27:31.657001 17344 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:27:31.657049 17344 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:27:31.657060 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:31.657066 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:27:31.657071 17344 net.cpp:165] Memory required for data: 1450544500\nI0817 16:27:31.657076 17344 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:27:31.657088 17344 net.cpp:100] Creating Layer accuracy\nI0817 16:27:31.657094 17344 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:27:31.657101 17344 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:27:31.657109 17344 net.cpp:408] accuracy -> accuracy\nI0817 16:27:31.657121 17344 net.cpp:150] Setting up accuracy\nI0817 16:27:31.657130 17344 net.cpp:157] Top shape: (1)\nI0817 16:27:31.657140 17344 net.cpp:165] Memory required for data: 1450544504\nI0817 16:27:31.657146 17344 layer_factory.hpp:77] Creating layer loss\nI0817 16:27:31.657155 17344 net.cpp:100] Creating Layer loss\nI0817 16:27:31.657160 17344 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:27:31.657166 17344 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:27:31.657173 17344 net.cpp:408] loss -> loss\nI0817 16:27:31.657186 17344 layer_factory.hpp:77] Creating layer loss\nI0817 16:27:31.657310 17344 net.cpp:150] Setting up loss\nI0817 16:27:31.657323 17344 net.cpp:157] Top shape: (1)\nI0817 16:27:31.657328 17344 net.cpp:160]     with loss weight 1\nI0817 16:27:31.657346 17344 net.cpp:165] Memory required for data: 1450544508\nI0817 16:27:31.657351 17344 net.cpp:226] loss needs backward computation.\nI0817 16:27:31.657357 17344 net.cpp:228] accuracy does not need backward computation.\nI0817 16:27:31.657363 17344 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:27:31.657369 17344 net.cpp:226] post_FC needs backward computation.\nI0817 16:27:31.657374 17344 net.cpp:226] post_pool needs backward computation.\nI0817 16:27:31.657379 17344 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:27:31.657384 17344 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:27:31.657389 17344 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:27:31.657394 17344 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:27:31.657399 17344 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:27:31.657404 17344 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:27:31.657409 17344 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:27:31.657414 17344 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:27:31.657419 17344 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:27:31.657424 17344 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:27:31.657429 17344 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:27:31.657434 17344 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:27:31.657440 17344 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:27:31.657445 17344 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:27:31.657450 17344 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:27:31.657455 17344 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:27:31.657460 17344 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:27:31.657465 17344 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:27:31.657470 17344 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:27:31.657475 17344 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:27:31.657480 17344 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:27:31.657485 17344 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:27:31.657491 17344 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:27:31.657496 17344 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:27:31.657501 17344 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:27:31.657506 17344 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:27:31.657511 17344 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:27:31.657516 17344 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:27:31.657521 17344 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:27:31.657526 17344 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:27:31.657531 17344 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:27:31.657536 17344 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:27:31.657541 17344 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:27:31.657546 17344 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:27:31.657562 17344 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:27:31.657568 17344 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:27:31.657573 17344 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:27:31.657578 17344 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:27:31.657583 17344 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:27:31.657588 17344 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:27:31.657594 17344 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:27:31.657599 17344 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:27:31.657604 17344 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:27:31.657609 17344 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:27:31.657615 17344 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:27:31.657620 17344 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:27:31.657625 17344 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:27:31.657630 17344 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:27:31.657635 17344 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:27:31.657641 17344 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:27:31.657646 17344 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:27:31.657651 17344 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:27:31.657657 17344 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:27:31.657662 17344 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:27:31.657670 17344 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:27:31.657676 17344 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:27:31.657681 17344 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:27:31.657686 17344 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:27:31.657691 17344 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:27:31.657696 17344 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:27:31.657702 17344 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:27:31.657707 17344 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:27:31.657712 17344 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:27:31.657717 17344 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:27:31.657723 17344 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:27:31.657728 17344 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:27:31.657733 17344 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:27:31.657738 17344 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:27:31.657743 17344 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:27:31.657748 17344 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:27:31.657753 17344 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:27:31.657759 17344 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:27:31.657764 17344 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:27:31.657770 17344 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:27:31.657775 17344 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:27:31.657780 17344 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:27:31.657785 17344 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:27:31.657790 17344 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:27:31.657795 17344 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:27:31.657801 17344 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:27:31.657812 17344 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:27:31.657825 17344 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:27:31.657830 17344 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:27:31.657835 17344 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:27:31.657841 17344 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:27:31.657847 17344 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:27:31.657852 17344 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:27:31.657857 17344 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:27:31.657863 17344 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:27:31.657868 17344 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:27:31.657873 17344 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:27:31.657878 17344 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:27:31.657884 17344 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:27:31.657889 17344 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:27:31.657894 17344 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:27:31.657901 17344 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:27:31.657905 17344 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:27:31.657910 17344 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:27:31.657917 17344 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:27:31.657922 17344 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:27:31.657927 17344 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:27:31.657932 17344 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:27:31.657938 17344 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:27:31.657946 17344 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:27:31.657951 17344 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:27:31.657958 17344 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:27:31.657963 17344 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:27:31.657968 17344 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:27:31.657974 17344 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:27:31.657979 17344 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:27:31.657984 17344 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:27:31.657989 17344 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:27:31.657994 17344 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:27:31.657999 17344 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:27:31.658004 17344 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:27:31.658010 17344 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:27:31.658015 17344 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:27:31.658021 17344 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:27:31.658026 17344 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:27:31.658031 17344 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:27:31.658036 17344 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:27:31.658041 17344 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:27:31.658047 17344 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:27:31.658052 17344 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:27:31.658057 17344 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:27:31.658063 17344 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:27:31.658068 17344 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:27:31.658074 17344 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:27:31.658084 17344 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:27:31.658090 17344 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:27:31.658095 17344 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:27:31.658102 17344 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:27:31.658107 17344 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:27:31.658113 17344 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:27:31.658118 17344 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:27:31.658123 17344 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:27:31.658128 17344 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:27:31.658134 17344 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:27:31.658139 17344 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:27:31.658144 17344 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:27:31.658149 17344 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:27:31.658155 17344 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:27:31.658160 17344 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:27:31.658166 17344 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:27:31.658171 17344 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:27:31.658177 17344 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:27:31.658182 17344 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:27:31.658187 17344 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:27:31.658193 17344 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:27:31.658198 17344 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:27:31.658203 17344 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:27:31.658210 17344 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:27:31.658215 17344 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:27:31.658221 17344 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:27:31.658226 17344 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:27:31.658232 17344 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:27:31.658237 17344 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:27:31.658242 17344 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:27:31.658248 17344 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:27:31.658254 17344 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:27:31.658259 17344 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:27:31.658265 17344 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:27:31.658270 17344 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:27:31.658277 17344 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:27:31.658282 17344 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:27:31.658288 17344 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:27:31.658293 17344 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:27:31.658298 17344 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:27:31.658303 17344 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:27:31.658308 17344 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:27:31.658314 17344 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:27:31.658320 17344 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:27:31.658325 17344 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:27:31.658335 17344 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:27:31.658344 17344 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:27:31.658352 17344 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:27:31.658359 17344 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:27:31.658365 17344 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:27:31.658370 17344 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:27:31.658376 17344 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:27:31.658381 17344 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:27:31.658387 17344 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:27:31.658392 17344 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:27:31.658397 17344 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:27:31.658403 17344 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:27:31.658408 17344 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:27:31.658414 17344 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:27:31.658419 17344 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:27:31.658427 17344 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:27:31.658432 17344 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:27:31.658437 17344 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:27:31.658442 17344 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:27:31.658448 17344 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:27:31.658453 17344 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:27:31.658459 17344 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:27:31.658464 17344 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:27:31.658470 17344 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:27:31.658475 17344 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:27:31.658483 17344 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:27:31.658488 17344 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:27:31.658493 17344 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:27:31.658499 17344 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:27:31.658504 17344 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:27:31.658509 17344 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:27:31.658515 17344 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:27:31.658521 17344 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:27:31.658527 17344 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:27:31.658534 17344 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:27:31.658540 17344 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:27:31.658545 17344 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:27:31.658550 17344 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:27:31.658555 17344 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:27:31.658561 17344 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:27:31.658566 17344 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:27:31.658572 17344 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:27:31.658577 17344 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:27:31.658583 17344 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:27:31.658589 17344 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:27:31.658596 17344 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:27:31.658601 17344 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:27:31.658607 17344 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:27:31.658612 17344 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:27:31.658617 17344 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:27:31.658628 17344 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:27:31.658634 17344 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:27:31.658640 17344 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:27:31.658646 17344 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:27:31.658651 17344 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:27:31.658658 17344 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:27:31.658663 17344 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:27:31.658669 17344 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:27:31.658675 17344 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:27:31.658680 17344 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:27:31.658686 17344 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:27:31.658692 17344 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:27:31.658699 17344 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:27:31.658704 17344 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:27:31.658710 17344 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:27:31.658715 17344 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:27:31.658721 17344 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:27:31.658726 17344 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:27:31.658732 17344 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:27:31.658738 17344 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:27:31.658743 17344 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:27:31.658749 17344 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:27:31.658756 17344 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:27:31.658761 17344 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:27:31.658766 17344 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:27:31.658772 17344 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:27:31.658777 17344 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:27:31.658783 17344 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:27:31.658789 17344 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:27:31.658794 17344 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:27:31.658800 17344 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:27:31.658812 17344 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:27:31.658818 17344 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:27:31.658824 17344 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:27:31.658830 17344 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:27:31.658836 17344 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:27:31.658843 17344 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:27:31.658849 17344 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:27:31.658854 17344 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:27:31.658859 17344 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:27:31.658865 17344 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:27:31.658871 17344 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:27:31.658877 17344 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:27:31.658884 17344 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:27:31.658888 17344 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:27:31.658895 17344 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:27:31.658901 17344 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:27:31.658913 17344 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:27:31.658920 17344 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:27:31.658926 17344 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:27:31.658931 17344 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:27:31.658937 17344 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:27:31.658943 17344 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:27:31.658948 17344 net.cpp:226] pre_relu needs backward computation.\nI0817 16:27:31.658954 17344 net.cpp:226] pre_scale needs backward computation.\nI0817 16:27:31.658959 17344 net.cpp:226] pre_bn needs backward computation.\nI0817 16:27:31.658964 17344 net.cpp:226] pre_conv needs backward computation.\nI0817 16:27:31.658972 17344 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:27:31.658978 17344 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:27:31.658982 17344 net.cpp:270] This network produces output accuracy\nI0817 16:27:31.658989 17344 net.cpp:270] This network produces output loss\nI0817 16:27:31.659317 17344 net.cpp:283] Network initialization done.\nI0817 16:27:31.660334 17344 solver.cpp:60] Solver scaffolding done.\nI0817 16:27:31.882531 17344 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:27:32.240245 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:32.240324 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:32.247205 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:32.469579 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:32.469694 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:32.504199 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:32.504308 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:32.952000 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:32.952077 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:32.959838 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:33.210099 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:33.210244 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:33.262079 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:33.262217 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:33.779583 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:33.779637 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:33.788269 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:34.054653 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:34.054783 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:34.125820 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:34.125953 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:34.209132 17344 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:27:34.691344 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:34.691421 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:34.700639 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:34.992178 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:34.992372 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:35.084281 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:35.084463 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:35.733697 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:35.733774 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:35.743974 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:36.062516 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:36.062737 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:36.174552 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:36.174762 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:36.894636 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:36.894690 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:36.905977 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:37.239616 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:37.239830 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:37.372028 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:37.372232 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:38.151279 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:27:38.151330 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:27:38.163902 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:27:38.210099 17371 blocking_queue.cpp:50] Waiting for data\nI0817 16:27:38.254375 17365 blocking_queue.cpp:50] Waiting for data\nI0817 16:27:38.580286 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:27:38.580557 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:27:38.730927 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:27:38.731192 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:27:38.901624 17344 parallel.cpp:425] Starting Optimization\nI0817 16:27:38.904201 17344 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:27:38.904217 17344 solver.cpp:280] Learning Rate Policy: multistep\nI0817 16:27:38.908700 17344 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:29:02.183187 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:29:02.183465 17344 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:29:06.067999 17344 solver.cpp:228] Iteration 0, loss = 4.14198\nI0817 16:29:06.068053 17344 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0817 16:29:06.068069 17344 solver.cpp:244]     Train net output #1: loss = 4.14198 (* 1 = 4.14198 loss)\nI0817 16:29:06.155992 17344 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0817 16:31:25.419260 17344 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:32:49.133316 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14232\nI0817 16:32:49.133601 17344 solver.cpp:404]     Test net output #1: loss = 2.42663 (* 1 = 2.42663 loss)\nI0817 16:32:50.479109 17344 solver.cpp:228] Iteration 100, loss = 1.99202\nI0817 16:32:50.479151 17344 solver.cpp:244]     Train net output #0: accuracy = 0.264\nI0817 16:32:50.479167 17344 solver.cpp:244]     Train net output #1: loss = 1.99202 (* 1 = 1.99202 loss)\nI0817 16:32:50.548002 17344 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0817 16:35:09.528440 17344 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:36:32.330847 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18592\nI0817 16:36:32.331117 17344 solver.cpp:404]     Test net output #1: loss = 2.19004 (* 1 = 2.19004 loss)\nI0817 16:36:33.675662 17344 solver.cpp:228] Iteration 200, loss = 1.80065\nI0817 16:36:33.675696 17344 solver.cpp:244]     Train net output #0: accuracy = 0.32\nI0817 16:36:33.675711 17344 solver.cpp:244]     Train net output #1: loss = 1.80065 (* 1 = 1.80065 loss)\nI0817 16:36:33.729939 17344 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0817 16:38:52.424376 17344 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:40:15.203631 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18812\nI0817 16:40:15.203902 17344 solver.cpp:404]     Test net output #1: loss = 2.25502 (* 1 = 2.25502 loss)\nI0817 16:40:16.547655 17344 solver.cpp:228] Iteration 300, loss = 1.64207\nI0817 16:40:16.547688 17344 solver.cpp:244]     Train net output #0: accuracy = 0.36\nI0817 16:40:16.547703 17344 solver.cpp:244]     Train net output #1: loss = 1.64207 (* 1 = 1.64207 loss)\nI0817 16:40:16.607820 17344 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0817 16:42:35.264667 17344 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:43:58.042794 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13508\nI0817 16:43:58.043068 17344 solver.cpp:404]     Test net output #1: loss = 2.40567 (* 1 = 2.40567 loss)\nI0817 16:43:59.387742 17344 solver.cpp:228] Iteration 400, loss = 1.55299\nI0817 16:43:59.387775 17344 solver.cpp:244]     Train net output #0: accuracy = 0.416\nI0817 16:43:59.387789 17344 solver.cpp:244]     Train net output #1: loss = 1.55299 (* 1 = 1.55299 loss)\nI0817 16:43:59.441046 17344 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0817 16:46:18.122416 17344 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:47:40.873256 17344 solver.cpp:404]     Test net output #0: accuracy = 0.101\nI0817 16:47:40.873517 17344 solver.cpp:404]     Test net output #1: loss = 3.0481 (* 1 = 3.0481 loss)\nI0817 16:47:42.217805 17344 solver.cpp:228] Iteration 500, loss = 1.48494\nI0817 16:47:42.217840 17344 solver.cpp:244]     Train net output #0: accuracy = 0.424\nI0817 16:47:42.217856 17344 solver.cpp:244]     Train net output #1: loss = 1.48494 (* 1 = 1.48494 loss)\nI0817 16:47:42.278754 17344 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0817 16:50:01.009675 17344 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:51:22.848917 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0817 16:51:22.849212 17344 solver.cpp:404]     Test net output #1: loss = 3.37515 (* 1 = 3.37515 loss)\nI0817 16:51:24.177600 17344 solver.cpp:228] Iteration 600, loss = 1.27282\nI0817 16:51:24.177634 17344 solver.cpp:244]     Train net output #0: accuracy = 0.472\nI0817 16:51:24.177649 17344 solver.cpp:244]     Train net output #1: loss = 1.27282 (* 1 = 1.27282 loss)\nI0817 16:51:24.253475 17344 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0817 16:53:41.276396 17344 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:55:03.177045 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0817 16:55:03.177310 17344 solver.cpp:404]     Test net output #1: loss = 3.23402 (* 1 = 3.23402 loss)\nI0817 16:55:04.506249 17344 solver.cpp:228] Iteration 700, loss = 1.35054\nI0817 16:55:04.506283 17344 solver.cpp:244]     Train net output #0: accuracy = 0.448\nI0817 16:55:04.506299 17344 solver.cpp:244]     Train net output #1: loss = 1.35054 (* 1 = 1.35054 loss)\nI0817 16:55:04.585088 17344 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0817 16:57:21.592674 17344 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:58:43.634369 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0817 16:58:43.634639 17344 solver.cpp:404]     Test net output #1: loss = 3.71648 (* 1 = 3.71648 loss)\nI0817 16:58:44.963502 17344 solver.cpp:228] Iteration 800, loss = 1.05855\nI0817 16:58:44.963536 17344 solver.cpp:244]     Train net output #0: accuracy = 0.56\nI0817 16:58:44.963551 17344 solver.cpp:244]     Train net output #1: loss = 1.05855 (* 1 = 1.05855 loss)\nI0817 16:58:45.040997 17344 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0817 17:01:02.283993 17344 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 17:02:24.280213 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0817 17:02:24.280472 17344 solver.cpp:404]     Test net output #1: loss = 3.50141 (* 1 = 3.50141 loss)\nI0817 17:02:25.609269 17344 solver.cpp:228] Iteration 900, loss = 1.02527\nI0817 17:02:25.609302 17344 solver.cpp:244]     Train net output #0: accuracy = 0.616\nI0817 17:02:25.609318 17344 solver.cpp:244]     Train net output #1: loss = 1.02526 (* 1 = 1.02526 loss)\nI0817 17:02:25.692240 17344 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0817 17:04:42.999383 17344 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 17:06:04.974588 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0817 17:06:04.974851 17344 solver.cpp:404]     Test net output #1: loss = 4.25069 (* 1 = 4.25069 loss)\nI0817 17:06:06.303261 17344 solver.cpp:228] Iteration 1000, loss = 0.847799\nI0817 17:06:06.303292 17344 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 17:06:06.303308 17344 solver.cpp:244]     Train net output #1: loss = 0.847798 (* 1 = 0.847798 loss)\nI0817 17:06:06.390229 17344 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0817 17:08:23.635540 17344 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 17:09:45.650290 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0817 17:09:45.650555 17344 solver.cpp:404]     Test net output #1: loss = 4.49386 (* 1 = 4.49386 loss)\nI0817 17:09:46.979653 17344 solver.cpp:228] Iteration 1100, loss = 0.828682\nI0817 17:09:46.979686 17344 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 17:09:46.979701 17344 solver.cpp:244]     Train net output #1: loss = 0.828682 (* 1 = 0.828682 loss)\nI0817 17:09:47.064110 17344 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0817 17:12:04.536422 17344 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 17:13:26.516177 17344 solver.cpp:404]     Test net output #0: accuracy = 0.102\nI0817 17:13:26.516441 17344 solver.cpp:404]     Test net output #1: loss = 4.66776 (* 1 = 4.66776 loss)\nI0817 17:13:27.845466 17344 solver.cpp:228] Iteration 1200, loss = 0.762817\nI0817 17:13:27.845500 17344 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 17:13:27.845515 17344 solver.cpp:244]     Train net output #1: loss = 0.762816 (* 1 = 0.762816 loss)\nI0817 17:13:27.931782 17344 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0817 17:15:45.210538 17344 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 17:17:07.188303 17344 solver.cpp:404]     Test net output #0: accuracy = 0.112\nI0817 17:17:07.188558 17344 solver.cpp:404]     Test net output #1: loss = 4.91861 (* 1 = 4.91861 loss)\nI0817 17:17:08.517845 17344 solver.cpp:228] Iteration 1300, loss = 0.677907\nI0817 17:17:08.517879 17344 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 17:17:08.517894 17344 solver.cpp:244]     Train net output #1: loss = 0.677907 (* 1 = 0.677907 loss)\nI0817 17:17:08.604317 17344 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0817 17:19:25.757237 17344 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:20:47.817754 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11548\nI0817 17:20:47.818018 17344 solver.cpp:404]     Test net output #1: loss = 4.96041 (* 1 = 4.96041 loss)\nI0817 17:20:49.146893 17344 solver.cpp:228] Iteration 1400, loss = 0.686201\nI0817 17:20:49.146926 17344 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 17:20:49.146941 17344 solver.cpp:244]     Train net output #1: loss = 0.686201 (* 1 = 0.686201 loss)\nI0817 17:20:49.228309 17344 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0817 17:23:06.416709 17344 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:24:28.451756 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10244\nI0817 17:24:28.452015 17344 solver.cpp:404]     Test net output #1: loss = 5.05925 (* 1 = 5.05925 loss)\nI0817 17:24:29.782014 17344 solver.cpp:228] Iteration 1500, loss = 0.613843\nI0817 17:24:29.782047 17344 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 17:24:29.782063 17344 solver.cpp:244]     Train net output #1: loss = 0.613842 (* 1 = 0.613842 loss)\nI0817 17:24:29.867638 17344 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0817 17:26:47.177361 17344 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:28:09.244797 17344 solver.cpp:404]     Test net output #0: accuracy = 0.119\nI0817 17:28:09.245046 17344 solver.cpp:404]     Test net output #1: loss = 4.77524 (* 1 = 4.77524 loss)\nI0817 17:28:10.574687 17344 solver.cpp:228] Iteration 1600, loss = 0.461162\nI0817 17:28:10.574719 17344 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 17:28:10.574735 17344 solver.cpp:244]     Train net output #1: loss = 0.461162 (* 1 = 0.461162 loss)\nI0817 17:28:10.657573 17344 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0817 17:30:27.864704 17344 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:31:49.946249 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1204\nI0817 17:31:49.946512 17344 solver.cpp:404]     Test net output #1: loss = 4.7742 (* 1 = 4.7742 loss)\nI0817 17:31:51.276160 17344 solver.cpp:228] Iteration 1700, loss = 0.515106\nI0817 17:31:51.276193 17344 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 17:31:51.276208 17344 solver.cpp:244]     Train net output #1: loss = 0.515106 (* 1 = 0.515106 loss)\nI0817 17:31:51.354636 17344 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0817 17:34:08.656780 17344 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:35:30.733680 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11408\nI0817 17:35:30.733938 17344 solver.cpp:404]     Test net output #1: loss = 4.80529 (* 1 = 4.80529 loss)\nI0817 17:35:32.063212 17344 solver.cpp:228] Iteration 1800, loss = 0.635891\nI0817 17:35:32.063246 17344 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 17:35:32.063261 17344 solver.cpp:244]     Train net output #1: loss = 0.63589 (* 1 = 0.63589 loss)\nI0817 17:35:32.142774 17344 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0817 17:37:49.349951 17344 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:39:11.352205 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10008\nI0817 17:39:11.352464 17344 solver.cpp:404]     Test net output #1: loss = 4.85625 (* 1 = 4.85625 loss)\nI0817 17:39:12.681318 17344 solver.cpp:228] Iteration 1900, loss = 0.452141\nI0817 17:39:12.681350 17344 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 17:39:12.681366 17344 solver.cpp:244]     Train net output #1: loss = 0.45214 (* 1 = 0.45214 loss)\nI0817 17:39:12.765213 17344 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0817 17:41:29.984838 17344 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:42:51.963549 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11076\nI0817 17:42:51.963795 17344 solver.cpp:404]     Test net output #1: loss = 4.66336 (* 1 = 4.66336 loss)\nI0817 17:42:53.292208 17344 solver.cpp:228] Iteration 2000, loss = 0.44121\nI0817 17:42:53.292244 17344 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 17:42:53.292268 17344 solver.cpp:244]     Train net output #1: loss = 0.441209 (* 1 = 0.441209 loss)\nI0817 17:42:53.381216 17344 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0817 17:45:10.592363 17344 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:46:32.611948 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09916\nI0817 17:46:32.612202 17344 solver.cpp:404]     Test net output #1: loss = 4.88406 (* 1 = 4.88406 loss)\nI0817 17:46:33.941968 17344 solver.cpp:228] Iteration 2100, loss = 0.504884\nI0817 17:46:33.942004 17344 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 17:46:33.942028 17344 solver.cpp:244]     Train net output #1: loss = 0.504884 (* 1 = 0.504884 loss)\nI0817 17:46:34.028197 17344 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0817 17:48:51.224072 17344 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:50:13.287823 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1334\nI0817 17:50:13.288084 17344 solver.cpp:404]     Test net output #1: loss = 4.97503 (* 1 = 4.97503 loss)\nI0817 17:50:14.617758 17344 solver.cpp:228] Iteration 2200, loss = 0.518441\nI0817 17:50:14.617795 17344 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 17:50:14.617818 17344 solver.cpp:244]     Train net output #1: loss = 0.518441 (* 1 = 0.518441 loss)\nI0817 17:50:14.696401 17344 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0817 17:52:31.895032 17344 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:53:53.867332 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13008\nI0817 17:53:53.867599 17344 solver.cpp:404]     Test net output #1: loss = 4.90783 (* 1 = 4.90783 loss)\nI0817 17:53:55.196354 17344 solver.cpp:228] Iteration 2300, loss = 0.367074\nI0817 17:53:55.196393 17344 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 17:53:55.196415 17344 solver.cpp:244]     Train net output #1: loss = 0.367073 (* 1 = 0.367073 loss)\nI0817 17:53:55.277035 17344 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0817 17:56:12.593500 17344 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:57:34.562885 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1246\nI0817 17:57:34.563155 17344 solver.cpp:404]     Test net output #1: loss = 5.19408 (* 1 = 5.19408 loss)\nI0817 17:57:35.892376 17344 solver.cpp:228] Iteration 2400, loss = 0.407805\nI0817 17:57:35.892416 17344 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 17:57:35.892437 17344 solver.cpp:244]     Train net output #1: loss = 0.407805 (* 1 = 0.407805 loss)\nI0817 17:57:35.977715 17344 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0817 17:59:53.153684 17344 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 18:01:15.118643 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10724\nI0817 18:01:15.118893 17344 solver.cpp:404]     Test net output #1: loss = 5.03898 (* 1 = 5.03898 loss)\nI0817 18:01:16.448298 17344 solver.cpp:228] Iteration 2500, loss = 0.411201\nI0817 18:01:16.448335 17344 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 18:01:16.448359 17344 solver.cpp:244]     Train net output #1: loss = 0.411201 (* 1 = 0.411201 loss)\nI0817 18:01:16.533893 17344 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0817 18:03:33.732396 17344 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 18:04:55.697046 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10176\nI0817 18:04:55.697317 17344 solver.cpp:404]     Test net output #1: loss = 5.21517 (* 1 = 5.21517 loss)\nI0817 18:04:57.026581 17344 solver.cpp:228] Iteration 2600, loss = 0.357802\nI0817 18:04:57.026617 17344 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:04:57.026639 17344 solver.cpp:244]     Train net output #1: loss = 0.357801 (* 1 = 0.357801 loss)\nI0817 18:04:57.113417 17344 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0817 18:07:14.285679 17344 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 18:08:36.237366 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10064\nI0817 18:08:36.237632 17344 solver.cpp:404]     Test net output #1: loss = 5.48455 (* 1 = 5.48455 loss)\nI0817 18:08:37.566862 17344 solver.cpp:228] Iteration 2700, loss = 0.318764\nI0817 18:08:37.566901 17344 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:08:37.566922 17344 solver.cpp:244]     Train net output #1: loss = 0.318763 (* 1 = 0.318763 loss)\nI0817 18:08:37.647215 17344 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0817 18:10:54.986135 17344 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 18:12:16.945777 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10252\nI0817 18:12:16.946050 17344 solver.cpp:404]     Test net output #1: loss = 5.74372 (* 1 = 5.74372 loss)\nI0817 18:12:18.274829 17344 solver.cpp:228] Iteration 2800, loss = 0.406165\nI0817 18:12:18.274868 17344 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:12:18.274891 17344 solver.cpp:244]     Train net output #1: loss = 0.406165 (* 1 = 0.406165 loss)\nI0817 18:12:18.358454 17344 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0817 18:14:35.673048 17344 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 18:15:57.708541 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1\nI0817 18:15:57.708811 17344 solver.cpp:404]     Test net output #1: loss = 6.66852 (* 1 = 6.66852 loss)\nI0817 18:15:59.037667 17344 solver.cpp:228] Iteration 2900, loss = 0.350492\nI0817 18:15:59.037705 17344 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:15:59.037727 17344 solver.cpp:244]     Train net output #1: loss = 0.350491 (* 1 = 0.350491 loss)\nI0817 18:15:59.125226 17344 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0817 18:18:16.292457 17344 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 18:19:38.336900 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0817 18:19:38.337174 17344 solver.cpp:404]     Test net output #1: loss = 6.88244 (* 1 = 6.88244 loss)\nI0817 18:19:39.666144 17344 solver.cpp:228] Iteration 3000, loss = 0.28488\nI0817 18:19:39.666182 17344 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:19:39.666205 17344 solver.cpp:244]     Train net output #1: loss = 0.28488 (* 1 = 0.28488 loss)\nI0817 18:19:39.755661 17344 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0817 18:21:56.906774 17344 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:23:18.940464 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10004\nI0817 18:23:18.940735 17344 solver.cpp:404]     Test net output #1: loss = 6.44708 (* 1 = 6.44708 loss)\nI0817 18:23:20.269794 17344 solver.cpp:228] Iteration 3100, loss = 0.249305\nI0817 18:23:20.269831 17344 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:23:20.269855 17344 solver.cpp:244]     Train net output #1: loss = 0.249304 (* 1 = 0.249304 loss)\nI0817 18:23:20.353479 17344 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0817 18:25:37.547915 17344 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:26:59.569386 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09888\nI0817 18:26:59.569655 17344 solver.cpp:404]     Test net output #1: loss = 6.96875 (* 1 = 6.96875 loss)\nI0817 18:27:00.898463 17344 solver.cpp:228] Iteration 3200, loss = 0.302878\nI0817 18:27:00.898501 17344 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:27:00.898524 17344 solver.cpp:244]     Train net output #1: loss = 0.302878 (* 1 = 0.302878 loss)\nI0817 18:27:00.984040 17344 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0817 18:29:18.339859 17344 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:30:40.384737 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11296\nI0817 18:30:40.385007 17344 solver.cpp:404]     Test net output #1: loss = 6.92005 (* 1 = 6.92005 loss)\nI0817 18:30:41.713896 17344 solver.cpp:228] Iteration 3300, loss = 0.461117\nI0817 18:30:41.713933 17344 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 18:30:41.713958 17344 solver.cpp:244]     Train net output #1: loss = 0.461117 (* 1 = 0.461117 loss)\nI0817 18:30:41.792670 17344 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0817 18:32:59.005879 17344 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:34:20.995239 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10104\nI0817 18:34:20.995504 17344 solver.cpp:404]     Test net output #1: loss = 7.14747 (* 1 = 7.14747 loss)\nI0817 18:34:22.324538 17344 solver.cpp:228] Iteration 3400, loss = 0.302753\nI0817 18:34:22.324576 17344 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:34:22.324599 17344 solver.cpp:244]     Train net output #1: loss = 0.302752 (* 1 = 0.302752 loss)\nI0817 18:34:22.413398 17344 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0817 18:36:39.706354 17344 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:38:01.693236 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09888\nI0817 18:38:01.693470 17344 solver.cpp:404]     Test net output #1: loss = 7.18666 (* 1 = 7.18666 loss)\nI0817 18:38:03.022238 17344 solver.cpp:228] Iteration 3500, loss = 0.263989\nI0817 18:38:03.022276 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:38:03.022300 17344 solver.cpp:244]     Train net output #1: loss = 0.263989 (* 1 = 0.263989 loss)\nI0817 18:38:03.106546 17344 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0817 18:40:20.324775 17344 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:41:42.194916 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12528\nI0817 18:41:42.195183 17344 solver.cpp:404]     Test net output #1: loss = 6.19615 (* 1 = 6.19615 loss)\nI0817 18:41:43.524399 17344 solver.cpp:228] Iteration 3600, loss = 0.262286\nI0817 18:41:43.524437 17344 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:41:43.524461 17344 solver.cpp:244]     Train net output #1: loss = 0.262286 (* 1 = 0.262286 loss)\nI0817 18:41:43.603683 17344 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0817 18:44:01.047124 17344 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:45:22.948933 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09976\nI0817 18:45:22.949297 17344 solver.cpp:404]     Test net output #1: loss = 7.17092 (* 1 = 7.17092 loss)\nI0817 18:45:24.278311 17344 solver.cpp:228] Iteration 3700, loss = 0.223757\nI0817 18:45:24.278348 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:45:24.278370 17344 solver.cpp:244]     Train net output #1: loss = 0.223756 (* 1 = 0.223756 loss)\nI0817 18:45:24.358530 17344 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0817 18:47:41.808941 17344 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:49:03.622246 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12844\nI0817 18:49:03.622516 17344 solver.cpp:404]     Test net output #1: loss = 6.62814 (* 1 = 6.62814 loss)\nI0817 18:49:04.951668 17344 solver.cpp:228] Iteration 3800, loss = 0.239997\nI0817 18:49:04.951707 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:49:04.951730 17344 solver.cpp:244]     Train net output #1: loss = 0.239996 (* 1 = 0.239996 loss)\nI0817 18:49:05.038480 17344 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0817 18:51:22.369314 17344 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:52:44.256958 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10468\nI0817 18:52:44.257220 17344 solver.cpp:404]     Test net output #1: loss = 7.32809 (* 1 = 7.32809 loss)\nI0817 18:52:45.586299 17344 solver.cpp:228] Iteration 3900, loss = 0.302542\nI0817 18:52:45.586338 17344 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 18:52:45.586360 17344 solver.cpp:244]     Train net output #1: loss = 0.302542 (* 1 = 0.302542 loss)\nI0817 18:52:45.667482 17344 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0817 18:55:02.931893 17344 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:56:24.842102 17344 solver.cpp:404]     Test net output #0: accuracy = 0.135\nI0817 18:56:24.842376 17344 solver.cpp:404]     Test net output #1: loss = 6.3815 (* 1 = 6.3815 loss)\nI0817 18:56:26.171106 17344 solver.cpp:228] Iteration 4000, loss = 0.254295\nI0817 18:56:26.171144 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:56:26.171165 17344 solver.cpp:244]     Train net output #1: loss = 0.254294 (* 1 = 0.254294 loss)\nI0817 18:56:26.254570 17344 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0817 18:58:43.512044 17344 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 19:00:05.457211 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13412\nI0817 19:00:05.457476 17344 solver.cpp:404]     Test net output #1: loss = 6.70838 (* 1 = 6.70838 loss)\nI0817 19:00:06.786875 17344 solver.cpp:228] Iteration 4100, loss = 0.16006\nI0817 19:00:06.786912 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:00:06.786937 17344 solver.cpp:244]     Train net output #1: loss = 0.16006 (* 1 = 0.16006 loss)\nI0817 19:00:06.873308 17344 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0817 19:02:24.148849 17344 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 19:03:46.084132 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12348\nI0817 19:03:46.084398 17344 solver.cpp:404]     Test net output #1: loss = 6.72607 (* 1 = 6.72607 loss)\nI0817 19:03:47.414167 17344 solver.cpp:228] Iteration 4200, loss = 0.172682\nI0817 19:03:47.414206 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:03:47.414228 17344 solver.cpp:244]     Train net output #1: loss = 0.172682 (* 1 = 0.172682 loss)\nI0817 19:03:47.492605 17344 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0817 19:06:04.770433 17344 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 19:07:26.597645 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12376\nI0817 19:07:26.597909 17344 solver.cpp:404]     Test net output #1: loss = 6.58111 (* 1 = 6.58111 loss)\nI0817 19:07:27.927162 17344 solver.cpp:228] Iteration 4300, loss = 0.238214\nI0817 19:07:27.927201 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:07:27.927223 17344 solver.cpp:244]     Train net output #1: loss = 0.238213 (* 1 = 0.238213 loss)\nI0817 19:07:28.011242 17344 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0817 19:09:45.308323 17344 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 19:11:07.132400 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14076\nI0817 19:11:07.132666 17344 solver.cpp:404]     Test net output #1: loss = 6.62068 (* 1 = 6.62068 loss)\nI0817 19:11:08.461516 17344 solver.cpp:228] Iteration 4400, loss = 0.220786\nI0817 19:11:08.461555 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:11:08.461577 17344 solver.cpp:244]     Train net output #1: loss = 0.220786 (* 1 = 0.220786 loss)\nI0817 19:11:08.544363 17344 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0817 19:13:25.740912 17344 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 19:14:47.700117 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14296\nI0817 19:14:47.700394 17344 solver.cpp:404]     Test net output #1: loss = 6.03451 (* 1 = 6.03451 loss)\nI0817 19:14:49.028997 17344 solver.cpp:228] Iteration 4500, loss = 0.19092\nI0817 19:14:49.029036 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:14:49.029058 17344 solver.cpp:244]     Train net output #1: loss = 0.190919 (* 1 = 0.190919 loss)\nI0817 19:14:49.110946 17344 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0817 19:17:06.305801 17344 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 19:18:28.299417 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10864\nI0817 19:18:28.299712 17344 solver.cpp:404]     Test net output #1: loss = 6.60014 (* 1 = 6.60014 loss)\nI0817 19:18:29.628330 17344 solver.cpp:228] Iteration 4600, loss = 0.258538\nI0817 19:18:29.628366 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:18:29.628382 17344 solver.cpp:244]     Train net output #1: loss = 0.258537 (* 1 = 0.258537 loss)\nI0817 19:18:29.708577 17344 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0817 19:20:47.004971 17344 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:22:08.947028 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11364\nI0817 19:22:08.947288 17344 solver.cpp:404]     Test net output #1: loss = 7.56312 (* 1 = 7.56312 loss)\nI0817 19:22:10.276080 17344 solver.cpp:228] Iteration 4700, loss = 0.185226\nI0817 19:22:10.276113 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:22:10.276129 17344 solver.cpp:244]     Train net output #1: loss = 0.185226 (* 1 = 0.185226 loss)\nI0817 19:22:10.355021 17344 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0817 19:24:27.601418 17344 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:25:49.552981 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10752\nI0817 19:25:49.553236 17344 solver.cpp:404]     Test net output #1: loss = 9.12851 (* 1 = 9.12851 loss)\nI0817 19:25:50.881849 17344 solver.cpp:228] Iteration 4800, loss = 0.189855\nI0817 19:25:50.881886 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:25:50.881902 17344 solver.cpp:244]     Train net output #1: loss = 0.189855 (* 1 = 0.189855 loss)\nI0817 19:25:50.966126 17344 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0817 19:28:08.457605 17344 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:29:30.406788 17344 solver.cpp:404]     Test net output #0: accuracy = 0.135\nI0817 19:29:30.407059 17344 solver.cpp:404]     Test net output #1: loss = 8.70986 (* 1 = 8.70986 loss)\nI0817 19:29:31.735049 17344 solver.cpp:228] Iteration 4900, loss = 0.207645\nI0817 19:29:31.735083 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:29:31.735100 17344 solver.cpp:244]     Train net output #1: loss = 0.207645 (* 1 = 0.207645 loss)\nI0817 19:29:31.818368 17344 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0817 19:31:49.116523 17344 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:33:11.111521 17344 solver.cpp:404]     Test net output #0: accuracy = 0.0996\nI0817 19:33:11.111779 17344 solver.cpp:404]     Test net output #1: loss = 10.275 (* 1 = 10.275 loss)\nI0817 19:33:12.440835 17344 solver.cpp:228] Iteration 5000, loss = 0.182504\nI0817 19:33:12.440871 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:33:12.440887 17344 solver.cpp:244]     Train net output #1: loss = 0.182503 (* 1 = 0.182503 loss)\nI0817 19:33:12.526087 17344 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0817 19:35:29.710770 17344 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:36:51.697921 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10908\nI0817 19:36:51.698189 17344 solver.cpp:404]     Test net output #1: loss = 10.2475 (* 1 = 10.2475 loss)\nI0817 19:36:53.027192 17344 solver.cpp:228] Iteration 5100, loss = 0.205912\nI0817 19:36:53.027226 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:36:53.027240 17344 solver.cpp:244]     Train net output #1: loss = 0.205911 (* 1 = 0.205911 loss)\nI0817 19:36:53.116698 17344 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0817 19:39:10.574959 17344 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:40:32.563426 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10204\nI0817 19:40:32.563685 17344 solver.cpp:404]     Test net output #1: loss = 9.96818 (* 1 = 9.96818 loss)\nI0817 19:40:33.892714 17344 solver.cpp:228] Iteration 5200, loss = 0.229524\nI0817 19:40:33.892748 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 19:40:33.892762 17344 solver.cpp:244]     Train net output #1: loss = 0.229523 (* 1 = 0.229523 loss)\nI0817 19:40:33.971413 17344 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0817 19:42:51.150691 17344 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:44:13.152070 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0817 19:44:13.152324 17344 solver.cpp:404]     Test net output #1: loss = 10.6043 (* 1 = 10.6043 loss)\nI0817 19:44:14.481256 17344 solver.cpp:228] Iteration 5300, loss = 0.178279\nI0817 19:44:14.481292 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:44:14.481307 17344 solver.cpp:244]     Train net output #1: loss = 0.178279 (* 1 = 0.178279 loss)\nI0817 19:44:14.560940 17344 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0817 19:46:31.686439 17344 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:47:53.665632 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0817 19:47:53.665911 17344 solver.cpp:404]     Test net output #1: loss = 10.6103 (* 1 = 10.6103 loss)\nI0817 19:47:54.995023 17344 solver.cpp:228] Iteration 5400, loss = 0.155785\nI0817 19:47:54.995061 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:47:54.995084 17344 solver.cpp:244]     Train net output #1: loss = 0.155785 (* 1 = 0.155785 loss)\nI0817 19:47:55.079588 17344 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0817 19:50:12.237774 17344 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:51:34.232297 17344 solver.cpp:404]     Test net output #0: accuracy = 0.118\nI0817 19:51:34.232570 17344 solver.cpp:404]     Test net output #1: loss = 8.62228 (* 1 = 8.62228 loss)\nI0817 19:51:35.561229 17344 solver.cpp:228] Iteration 5500, loss = 0.197771\nI0817 19:51:35.561266 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:51:35.561288 17344 solver.cpp:244]     Train net output #1: loss = 0.197771 (* 1 = 0.197771 loss)\nI0817 19:51:35.647711 17344 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0817 19:53:52.870151 17344 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:55:14.859696 17344 solver.cpp:404]     Test net output #0: accuracy = 0.0996\nI0817 19:55:14.859975 17344 solver.cpp:404]     Test net output #1: loss = 11.6334 (* 1 = 11.6334 loss)\nI0817 19:55:16.189061 17344 solver.cpp:228] Iteration 5600, loss = 0.175906\nI0817 19:55:16.189100 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:55:16.189122 17344 solver.cpp:244]     Train net output #1: loss = 0.175906 (* 1 = 0.175906 loss)\nI0817 19:55:16.275872 17344 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0817 19:57:33.536002 17344 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:58:55.515173 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10068\nI0817 19:58:55.515445 17344 solver.cpp:404]     Test net output #1: loss = 11.0484 (* 1 = 11.0484 loss)\nI0817 19:58:56.844779 17344 solver.cpp:228] Iteration 5700, loss = 0.16225\nI0817 19:58:56.844821 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:58:56.844844 17344 solver.cpp:244]     Train net output #1: loss = 0.162249 (* 1 = 0.162249 loss)\nI0817 19:58:56.923523 17344 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0817 20:01:14.033665 17344 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 20:02:36.032810 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0817 20:02:36.033089 17344 solver.cpp:404]     Test net output #1: loss = 15.1472 (* 1 = 15.1472 loss)\nI0817 20:02:37.362438 17344 solver.cpp:228] Iteration 5800, loss = 0.228182\nI0817 20:02:37.362476 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:02:37.362498 17344 solver.cpp:244]     Train net output #1: loss = 0.228181 (* 1 = 0.228181 loss)\nI0817 20:02:37.446578 17344 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0817 20:04:54.590569 17344 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 20:06:16.576989 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10056\nI0817 20:06:16.577270 17344 solver.cpp:404]     Test net output #1: loss = 13.1844 (* 1 = 13.1844 loss)\nI0817 20:06:17.906307 17344 solver.cpp:228] Iteration 5900, loss = 0.170811\nI0817 20:06:17.906344 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:06:17.906366 17344 solver.cpp:244]     Train net output #1: loss = 0.170811 (* 1 = 0.170811 loss)\nI0817 20:06:17.982559 17344 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0817 20:08:35.342931 17344 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 20:09:57.320190 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10228\nI0817 20:09:57.320467 17344 solver.cpp:404]     Test net output #1: loss = 10.9378 (* 1 = 10.9378 loss)\nI0817 20:09:58.648632 17344 solver.cpp:228] Iteration 6000, loss = 0.169174\nI0817 20:09:58.648669 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:09:58.648691 17344 solver.cpp:244]     Train net output #1: loss = 0.169173 (* 1 = 0.169173 loss)\nI0817 20:09:58.729056 17344 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0817 20:12:15.977879 17344 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 20:13:37.949748 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10088\nI0817 20:13:37.950031 17344 solver.cpp:404]     Test net output #1: loss = 11.1893 (* 1 = 11.1893 loss)\nI0817 20:13:39.279232 17344 solver.cpp:228] Iteration 6100, loss = 0.184942\nI0817 20:13:39.279269 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:13:39.279291 17344 solver.cpp:244]     Train net output #1: loss = 0.184942 (* 1 = 0.184942 loss)\nI0817 20:13:39.366752 17344 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0817 20:15:56.608378 17344 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 20:17:18.575212 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0817 20:17:18.575479 17344 solver.cpp:404]     Test net output #1: loss = 10.7468 (* 1 = 10.7468 loss)\nI0817 20:17:19.904405 17344 solver.cpp:228] Iteration 6200, loss = 0.130581\nI0817 20:17:19.904443 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:17:19.904464 17344 solver.cpp:244]     Train net output #1: loss = 0.13058 (* 1 = 0.13058 loss)\nI0817 20:17:19.998106 17344 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0817 20:19:37.276588 17344 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:20:59.235976 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1154\nI0817 20:20:59.236261 17344 solver.cpp:404]     Test net output #1: loss = 8.07614 (* 1 = 8.07614 loss)\nI0817 20:21:00.564780 17344 solver.cpp:228] Iteration 6300, loss = 0.178473\nI0817 20:21:00.564824 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:21:00.564846 17344 solver.cpp:244]     Train net output #1: loss = 0.178473 (* 1 = 0.178473 loss)\nI0817 20:21:00.649698 17344 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0817 20:23:17.941802 17344 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:24:39.901343 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16572\nI0817 20:24:39.901614 17344 solver.cpp:404]     Test net output #1: loss = 6.85796 (* 1 = 6.85796 loss)\nI0817 20:24:41.229879 17344 solver.cpp:228] Iteration 6400, loss = 0.143764\nI0817 20:24:41.229913 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:24:41.229928 17344 solver.cpp:244]     Train net output #1: loss = 0.143764 (* 1 = 0.143764 loss)\nI0817 20:24:41.309253 17344 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0817 20:26:58.559767 17344 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:28:20.528424 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1744\nI0817 20:28:20.528674 17344 solver.cpp:404]     Test net output #1: loss = 6.50741 (* 1 = 6.50741 loss)\nI0817 20:28:21.856851 17344 solver.cpp:228] Iteration 6500, loss = 0.108046\nI0817 20:28:21.856885 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:28:21.856901 17344 solver.cpp:244]     Train net output #1: loss = 0.108046 (* 1 = 0.108046 loss)\nI0817 20:28:21.946348 17344 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0817 20:30:39.168885 17344 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:32:01.125152 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15724\nI0817 20:32:01.125396 17344 solver.cpp:404]     Test net output #1: loss = 6.88988 (* 1 = 6.88988 loss)\nI0817 20:32:02.454181 17344 solver.cpp:228] Iteration 6600, loss = 0.215044\nI0817 20:32:02.454218 17344 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 20:32:02.454232 17344 solver.cpp:244]     Train net output #1: loss = 0.215044 (* 1 = 0.215044 loss)\nI0817 20:32:02.541648 17344 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0817 20:34:19.738517 17344 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:35:41.693354 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1614\nI0817 20:35:41.693617 17344 solver.cpp:404]     Test net output #1: loss = 6.56615 (* 1 = 6.56615 loss)\nI0817 20:35:43.021093 17344 solver.cpp:228] Iteration 6700, loss = 0.149073\nI0817 20:35:43.021127 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:35:43.021143 17344 solver.cpp:244]     Train net output #1: loss = 0.149072 (* 1 = 0.149072 loss)\nI0817 20:35:43.109814 17344 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0817 20:38:00.325742 17344 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:39:22.284677 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22564\nI0817 20:39:22.284943 17344 solver.cpp:404]     Test net output #1: loss = 6.31129 (* 1 = 6.31129 loss)\nI0817 20:39:23.613658 17344 solver.cpp:228] Iteration 6800, loss = 0.180471\nI0817 20:39:23.613692 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:39:23.613706 17344 solver.cpp:244]     Train net output #1: loss = 0.18047 (* 1 = 0.18047 loss)\nI0817 20:39:23.694087 17344 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0817 20:41:40.923437 17344 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:43:02.885196 17344 solver.cpp:404]     Test net output #0: accuracy = 0.294\nI0817 20:43:02.885473 17344 solver.cpp:404]     Test net output #1: loss = 4.68506 (* 1 = 4.68506 loss)\nI0817 20:43:04.214475 17344 solver.cpp:228] Iteration 6900, loss = 0.145858\nI0817 20:43:04.214509 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:43:04.214522 17344 solver.cpp:244]     Train net output #1: loss = 0.145857 (* 1 = 0.145857 loss)\nI0817 20:43:04.294906 17344 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0817 20:45:22.639377 17344 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:46:45.478260 17344 solver.cpp:404]     Test net output #0: accuracy = 0.31048\nI0817 20:46:45.478515 17344 solver.cpp:404]     Test net output #1: loss = 4.98287 (* 1 = 4.98287 loss)\nI0817 20:46:46.821151 17344 solver.cpp:228] Iteration 7000, loss = 0.185658\nI0817 20:46:46.821185 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:46:46.821202 17344 solver.cpp:244]     Train net output #1: loss = 0.185657 (* 1 = 0.185657 loss)\nI0817 20:46:46.892873 17344 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0817 20:49:05.494441 17344 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:50:28.343530 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25896\nI0817 20:50:28.343808 17344 solver.cpp:404]     Test net output #1: loss = 6.67401 (* 1 = 6.67401 loss)\nI0817 20:50:29.687006 17344 solver.cpp:228] Iteration 7100, loss = 0.121585\nI0817 20:50:29.687041 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:50:29.687055 17344 solver.cpp:244]     Train net output #1: loss = 0.121585 (* 1 = 0.121585 loss)\nI0817 20:50:29.750784 17344 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0817 20:52:48.361207 17344 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:54:11.208401 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26424\nI0817 20:54:11.208670 17344 solver.cpp:404]     Test net output #1: loss = 6.84132 (* 1 = 6.84132 loss)\nI0817 20:54:12.552132 17344 solver.cpp:228] Iteration 7200, loss = 0.0919481\nI0817 20:54:12.552165 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:54:12.552181 17344 solver.cpp:244]     Train net output #1: loss = 0.0919477 (* 1 = 0.0919477 loss)\nI0817 20:54:12.621598 17344 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0817 20:56:31.137059 17344 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:57:53.883311 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25308\nI0817 20:57:53.883579 17344 solver.cpp:404]     Test net output #1: loss = 6.54634 (* 1 = 6.54634 loss)\nI0817 20:57:55.225400 17344 solver.cpp:228] Iteration 7300, loss = 0.119227\nI0817 20:57:55.225436 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:57:55.225451 17344 solver.cpp:244]     Train net output #1: loss = 0.119227 (* 1 = 0.119227 loss)\nI0817 20:57:55.292327 17344 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0817 21:00:13.871934 17344 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 21:01:36.632267 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19324\nI0817 21:01:36.632534 17344 solver.cpp:404]     Test net output #1: loss = 8.37691 (* 1 = 8.37691 loss)\nI0817 21:01:37.975186 17344 solver.cpp:228] Iteration 7400, loss = 0.114844\nI0817 21:01:37.975220 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:01:37.975235 17344 solver.cpp:244]     Train net output #1: loss = 0.114844 (* 1 = 0.114844 loss)\nI0817 21:01:38.045200 17344 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0817 21:03:56.619976 17344 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 21:05:19.951484 17344 solver.cpp:404]     Test net output #0: accuracy = 0.20608\nI0817 21:05:19.951707 17344 solver.cpp:404]     Test net output #1: loss = 7.20341 (* 1 = 7.20341 loss)\nI0817 21:05:21.296861 17344 solver.cpp:228] Iteration 7500, loss = 0.134541\nI0817 21:05:21.296905 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:05:21.296929 17344 solver.cpp:244]     Train net output #1: loss = 0.13454 (* 1 = 0.13454 loss)\nI0817 21:05:21.369784 17344 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0817 21:07:40.407395 17344 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 21:09:03.690860 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21872\nI0817 21:09:03.691076 17344 solver.cpp:404]     Test net output #1: loss = 6.48972 (* 1 = 6.48972 loss)\nI0817 21:09:05.036538 17344 solver.cpp:228] Iteration 7600, loss = 0.147994\nI0817 21:09:05.036586 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:09:05.036609 17344 solver.cpp:244]     Train net output #1: loss = 0.147994 (* 1 = 0.147994 loss)\nI0817 21:09:05.100509 17344 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0817 21:11:24.057636 17344 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 21:12:47.513401 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17928\nI0817 21:12:47.513690 17344 solver.cpp:404]     Test net output #1: loss = 7.62181 (* 1 = 7.62181 loss)\nI0817 21:12:48.858662 17344 solver.cpp:228] Iteration 7700, loss = 0.139817\nI0817 21:12:48.858706 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 21:12:48.858731 17344 solver.cpp:244]     Train net output #1: loss = 0.139817 (* 1 = 0.139817 loss)\nI0817 21:12:48.923143 17344 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0817 21:15:07.940549 17344 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 21:16:31.597852 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14992\nI0817 21:16:31.598098 17344 solver.cpp:404]     Test net output #1: loss = 10.5344 (* 1 = 10.5344 loss)\nI0817 21:16:32.943137 17344 solver.cpp:228] Iteration 7800, loss = 0.085517\nI0817 21:16:32.943188 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:16:32.943212 17344 solver.cpp:244]     Train net output #1: loss = 0.0855167 (* 1 = 0.0855167 loss)\nI0817 21:16:33.007843 17344 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0817 21:18:52.044083 17344 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:20:15.724071 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10656\nI0817 21:20:15.724375 17344 solver.cpp:404]     Test net output #1: loss = 9.63034 (* 1 = 9.63034 loss)\nI0817 21:20:17.069232 17344 solver.cpp:228] Iteration 7900, loss = 0.140977\nI0817 21:20:17.069279 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:20:17.069301 17344 solver.cpp:244]     Train net output #1: loss = 0.140976 (* 1 = 0.140976 loss)\nI0817 21:20:17.132674 17344 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0817 21:22:36.208220 17344 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:23:59.917130 17344 solver.cpp:404]     Test net output #0: accuracy = 0.143\nI0817 21:23:59.917429 17344 solver.cpp:404]     Test net output #1: loss = 7.48201 (* 1 = 7.48201 loss)\nI0817 21:24:01.262841 17344 solver.cpp:228] Iteration 8000, loss = 0.161288\nI0817 21:24:01.262887 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 21:24:01.262909 17344 solver.cpp:244]     Train net output #1: loss = 0.161288 (* 1 = 0.161288 loss)\nI0817 21:24:01.337386 17344 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0817 21:26:20.139173 17344 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:27:42.904978 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17104\nI0817 21:27:42.905261 17344 solver.cpp:404]     Test net output #1: loss = 7.16985 (* 1 = 7.16985 loss)\nI0817 21:27:44.248571 17344 solver.cpp:228] Iteration 8100, loss = 0.116597\nI0817 21:27:44.248605 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:27:44.248620 17344 solver.cpp:244]     Train net output #1: loss = 0.116596 (* 1 = 0.116596 loss)\nI0817 21:27:44.323711 17344 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0817 21:30:02.886720 17344 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:31:25.787406 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11756\nI0817 21:31:25.787683 17344 solver.cpp:404]     Test net output #1: loss = 8.57863 (* 1 = 8.57863 loss)\nI0817 21:31:27.130818 17344 solver.cpp:228] Iteration 8200, loss = 0.102223\nI0817 21:31:27.130854 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:31:27.130878 17344 solver.cpp:244]     Train net output #1: loss = 0.102222 (* 1 = 0.102222 loss)\nI0817 21:31:27.198015 17344 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0817 21:33:45.696514 17344 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:35:08.551862 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24128\nI0817 21:35:08.552121 17344 solver.cpp:404]     Test net output #1: loss = 6.63711 (* 1 = 6.63711 loss)\nI0817 21:35:09.894285 17344 solver.cpp:228] Iteration 8300, loss = 0.108743\nI0817 21:35:09.894321 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:35:09.894336 17344 solver.cpp:244]     Train net output #1: loss = 0.108743 (* 1 = 0.108743 loss)\nI0817 21:35:09.965543 17344 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0817 21:37:28.445281 17344 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:38:51.293318 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28\nI0817 21:38:51.293589 17344 solver.cpp:404]     Test net output #1: loss = 5.53413 (* 1 = 5.53413 loss)\nI0817 21:38:52.635690 17344 solver.cpp:228] Iteration 8400, loss = 0.139734\nI0817 21:38:52.635725 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 21:38:52.635740 17344 solver.cpp:244]     Train net output #1: loss = 0.139733 (* 1 = 0.139733 loss)\nI0817 21:38:52.700134 17344 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0817 21:41:11.172068 17344 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:42:34.029448 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24444\nI0817 21:42:34.029726 17344 solver.cpp:404]     Test net output #1: loss = 7.88541 (* 1 = 7.88541 loss)\nI0817 21:42:35.372153 17344 solver.cpp:228] Iteration 8500, loss = 0.141886\nI0817 21:42:35.372189 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:42:35.372202 17344 solver.cpp:244]     Train net output #1: loss = 0.141885 (* 1 = 0.141885 loss)\nI0817 21:42:35.437933 17344 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0817 21:44:53.934103 17344 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:46:16.782419 17344 solver.cpp:404]     Test net output #0: accuracy = 0.36272\nI0817 21:46:16.782680 17344 solver.cpp:404]     Test net output #1: loss = 5.12981 (* 1 = 5.12981 loss)\nI0817 21:46:18.124621 17344 solver.cpp:228] Iteration 8600, loss = 0.189095\nI0817 21:46:18.124655 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 21:46:18.124670 17344 solver.cpp:244]     Train net output #1: loss = 0.189094 (* 1 = 0.189094 loss)\nI0817 21:46:18.191249 17344 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0817 21:48:36.684623 17344 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:49:59.517885 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28056\nI0817 21:49:59.518132 17344 solver.cpp:404]     Test net output #1: loss = 7.41293 (* 1 = 7.41293 loss)\nI0817 21:50:00.860085 17344 solver.cpp:228] Iteration 8700, loss = 0.0955079\nI0817 21:50:00.860117 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:50:00.860132 17344 solver.cpp:244]     Train net output #1: loss = 0.0955076 (* 1 = 0.0955076 loss)\nI0817 21:50:00.929258 17344 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0817 21:52:19.402624 17344 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:53:42.236709 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35952\nI0817 21:53:42.236917 17344 solver.cpp:404]     Test net output #1: loss = 4.88463 (* 1 = 4.88463 loss)\nI0817 21:53:43.578820 17344 solver.cpp:228] Iteration 8800, loss = 0.081563\nI0817 21:53:43.578855 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:53:43.578874 17344 solver.cpp:244]     Train net output #1: loss = 0.0815626 (* 1 = 0.0815626 loss)\nI0817 21:53:43.642071 17344 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0817 21:56:02.087718 17344 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:57:24.937225 17344 solver.cpp:404]     Test net output #0: accuracy = 0.46616\nI0817 21:57:24.937461 17344 solver.cpp:404]     Test net output #1: loss = 3.42529 (* 1 = 3.42529 loss)\nI0817 21:57:26.279853 17344 solver.cpp:228] Iteration 8900, loss = 0.105434\nI0817 21:57:26.279893 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:57:26.279908 17344 solver.cpp:244]     Train net output #1: loss = 0.105434 (* 1 = 0.105434 loss)\nI0817 21:57:26.342526 17344 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0817 21:59:44.824944 17344 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 22:01:07.676568 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35976\nI0817 22:01:07.676801 17344 solver.cpp:404]     Test net output #1: loss = 3.80827 (* 1 = 3.80827 loss)\nI0817 22:01:09.018236 17344 solver.cpp:228] Iteration 9000, loss = 0.169611\nI0817 22:01:09.018270 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 22:01:09.018285 17344 solver.cpp:244]     Train net output #1: loss = 0.169611 (* 1 = 0.169611 loss)\nI0817 22:01:09.084514 17344 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0817 22:03:27.555884 17344 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 22:04:50.394453 17344 solver.cpp:404]     Test net output #0: accuracy = 0.45428\nI0817 22:04:50.394680 17344 solver.cpp:404]     Test net output #1: loss = 3.30808 (* 1 = 3.30808 loss)\nI0817 22:04:51.736654 17344 solver.cpp:228] Iteration 9100, loss = 0.20963\nI0817 22:04:51.736690 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 22:04:51.736704 17344 solver.cpp:244]     Train net output #1: loss = 0.209629 (* 1 = 0.209629 loss)\nI0817 22:04:51.803221 17344 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0817 22:07:10.269156 17344 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 22:08:33.129262 17344 solver.cpp:404]     Test net output #0: accuracy = 0.42504\nI0817 22:08:33.129477 17344 solver.cpp:404]     Test net output #1: loss = 3.49735 (* 1 = 3.49735 loss)\nI0817 22:08:34.471676 17344 solver.cpp:228] Iteration 9200, loss = 0.0561705\nI0817 22:08:34.471710 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:08:34.471725 17344 solver.cpp:244]     Train net output #1: loss = 0.0561701 (* 1 = 0.0561701 loss)\nI0817 22:08:34.533258 17344 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0817 22:10:53.011077 17344 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 22:12:15.862924 17344 solver.cpp:404]     Test net output #0: accuracy = 0.33844\nI0817 22:12:15.863164 17344 solver.cpp:404]     Test net output #1: loss = 4.23581 (* 1 = 4.23581 loss)\nI0817 22:12:17.205530 17344 solver.cpp:228] Iteration 9300, loss = 0.118813\nI0817 22:12:17.205565 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:12:17.205580 17344 solver.cpp:244]     Train net output #1: loss = 0.118813 (* 1 = 0.118813 loss)\nI0817 22:12:17.271132 17344 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0817 22:14:35.736702 17344 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 22:15:58.594197 17344 solver.cpp:404]     Test net output #0: accuracy = 0.20564\nI0817 22:15:58.594441 17344 solver.cpp:404]     Test net output #1: loss = 7.10424 (* 1 = 7.10424 loss)\nI0817 22:15:59.936502 17344 solver.cpp:228] Iteration 9400, loss = 0.10847\nI0817 22:15:59.936537 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:15:59.936553 17344 solver.cpp:244]     Train net output #1: loss = 0.10847 (* 1 = 0.10847 loss)\nI0817 22:16:00.002187 17344 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0817 22:18:18.497800 17344 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:19:41.353824 17344 solver.cpp:404]     Test net output #0: accuracy = 0.325\nI0817 22:19:41.354082 17344 solver.cpp:404]     Test net output #1: loss = 4.56469 (* 1 = 4.56469 loss)\nI0817 22:19:42.696707 17344 solver.cpp:228] Iteration 9500, loss = 0.0505277\nI0817 22:19:42.696740 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:19:42.696755 17344 solver.cpp:244]     Train net output #1: loss = 0.0505273 (* 1 = 0.0505273 loss)\nI0817 22:19:42.761922 17344 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0817 22:22:01.271813 17344 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:23:24.128731 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19928\nI0817 22:23:24.129102 17344 solver.cpp:404]     Test net output #1: loss = 6.07994 (* 1 = 6.07994 loss)\nI0817 22:23:25.471447 17344 solver.cpp:228] Iteration 9600, loss = 0.146885\nI0817 22:23:25.471482 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:23:25.471498 17344 solver.cpp:244]     Train net output #1: loss = 0.146884 (* 1 = 0.146884 loss)\nI0817 22:23:25.541967 17344 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0817 22:25:44.103267 17344 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:27:06.958119 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37588\nI0817 22:27:06.958370 17344 solver.cpp:404]     Test net output #1: loss = 3.86316 (* 1 = 3.86316 loss)\nI0817 22:27:08.301720 17344 solver.cpp:228] Iteration 9700, loss = 0.0821434\nI0817 22:27:08.301754 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:27:08.301770 17344 solver.cpp:244]     Train net output #1: loss = 0.082143 (* 1 = 0.082143 loss)\nI0817 22:27:08.364964 17344 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0817 22:29:27.052908 17344 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:30:49.901856 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2752\nI0817 22:30:49.902130 17344 solver.cpp:404]     Test net output #1: loss = 5.57597 (* 1 = 5.57597 loss)\nI0817 22:30:51.245506 17344 solver.cpp:228] Iteration 9800, loss = 0.0909413\nI0817 22:30:51.245538 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:30:51.245554 17344 solver.cpp:244]     Train net output #1: loss = 0.090941 (* 1 = 0.090941 loss)\nI0817 22:30:51.304517 17344 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0817 22:33:09.956756 17344 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:34:32.820121 17344 solver.cpp:404]     Test net output #0: accuracy = 0.40004\nI0817 22:34:32.820353 17344 solver.cpp:404]     Test net output #1: loss = 3.70604 (* 1 = 3.70604 loss)\nI0817 22:34:34.163056 17344 solver.cpp:228] Iteration 9900, loss = 0.127765\nI0817 22:34:34.163091 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:34:34.163111 17344 solver.cpp:244]     Train net output #1: loss = 0.127764 (* 1 = 0.127764 loss)\nI0817 22:34:34.229147 17344 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0817 22:36:52.869083 17344 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:38:15.729110 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4646\nI0817 22:38:15.729346 17344 solver.cpp:404]     Test net output #1: loss = 2.69676 (* 1 = 2.69676 loss)\nI0817 22:38:17.071416 17344 solver.cpp:228] Iteration 10000, loss = 0.204332\nI0817 22:38:17.071451 17344 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 22:38:17.071466 17344 solver.cpp:244]     Train net output #1: loss = 0.204331 (* 1 = 0.204331 loss)\nI0817 22:38:17.131168 17344 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0817 22:40:35.723546 17344 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0817 22:41:58.592144 17344 solver.cpp:404]     Test net output #0: accuracy = 0.36668\nI0817 22:41:58.592332 17344 solver.cpp:404]     Test net output #1: loss = 6.335 (* 1 = 6.335 loss)\nI0817 22:41:59.935652 17344 solver.cpp:228] Iteration 10100, loss = 0.132101\nI0817 22:41:59.935686 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:41:59.935701 17344 solver.cpp:244]     Train net output #1: loss = 0.1321 (* 1 = 0.1321 loss)\nI0817 22:42:00.006196 17344 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0817 22:44:18.625478 17344 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0817 22:45:41.479496 17344 solver.cpp:404]     Test net output #0: accuracy = 0.153\nI0817 22:45:41.479758 17344 solver.cpp:404]     Test net output #1: loss = 19.0962 (* 1 = 19.0962 loss)\nI0817 22:45:42.822005 17344 solver.cpp:228] Iteration 10200, loss = 0.0412637\nI0817 22:45:42.822039 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:45:42.822054 17344 solver.cpp:244]     Train net output #1: loss = 0.0412634 (* 1 = 0.0412634 loss)\nI0817 22:45:42.896903 17344 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0817 22:48:01.482729 17344 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0817 22:49:24.332801 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16856\nI0817 22:49:24.333029 17344 solver.cpp:404]     Test net output #1: loss = 16.8236 (* 1 = 16.8236 loss)\nI0817 22:49:25.676090 17344 solver.cpp:228] Iteration 10300, loss = 0.079479\nI0817 22:49:25.676126 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:49:25.676141 17344 solver.cpp:244]     Train net output #1: loss = 0.0794787 (* 1 = 0.0794787 loss)\nI0817 22:49:25.744431 17344 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0817 22:51:44.314376 17344 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0817 22:53:07.176226 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26744\nI0817 22:53:07.176443 17344 solver.cpp:404]     Test net output #1: loss = 9.94541 (* 1 = 9.94541 loss)\nI0817 22:53:08.519951 17344 solver.cpp:228] Iteration 10400, loss = 0.147839\nI0817 22:53:08.519986 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 22:53:08.520001 17344 solver.cpp:244]     Train net output #1: loss = 0.147838 (* 1 = 0.147838 loss)\nI0817 22:53:08.581889 17344 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0817 22:55:27.180794 17344 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0817 22:56:50.048669 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16988\nI0817 22:56:50.048888 17344 solver.cpp:404]     Test net output #1: loss = 13.0241 (* 1 = 13.0241 loss)\nI0817 22:56:51.391764 17344 solver.cpp:228] Iteration 10500, loss = 0.0672864\nI0817 22:56:51.391799 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:56:51.391814 17344 solver.cpp:244]     Train net output #1: loss = 0.0672861 (* 1 = 0.0672861 loss)\nI0817 22:56:51.458698 17344 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0817 22:59:09.955664 17344 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0817 23:00:32.822474 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29172\nI0817 23:00:32.822721 17344 solver.cpp:404]     Test net output #1: loss = 5.21716 (* 1 = 5.21716 loss)\nI0817 23:00:34.166031 17344 solver.cpp:228] Iteration 10600, loss = 0.135794\nI0817 23:00:34.166065 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:00:34.166080 17344 solver.cpp:244]     Train net output #1: loss = 0.135793 (* 1 = 0.135793 loss)\nI0817 23:00:34.231883 17344 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0817 23:02:52.820694 17344 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0817 23:04:15.691052 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24444\nI0817 23:04:15.691298 17344 solver.cpp:404]     Test net output #1: loss = 8.30037 (* 1 = 8.30037 loss)\nI0817 23:04:17.034183 17344 solver.cpp:228] Iteration 10700, loss = 0.0439917\nI0817 23:04:17.034217 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:04:17.034232 17344 solver.cpp:244]     Train net output #1: loss = 0.0439914 (* 1 = 0.0439914 loss)\nI0817 23:04:17.103828 17344 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0817 23:06:35.711246 17344 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0817 23:07:58.576581 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21296\nI0817 23:07:58.576836 17344 solver.cpp:404]     Test net output #1: loss = 8.29207 (* 1 = 8.29207 loss)\nI0817 23:07:59.920280 17344 solver.cpp:228] Iteration 10800, loss = 0.102228\nI0817 23:07:59.920315 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:07:59.920330 17344 solver.cpp:244]     Train net output #1: loss = 0.102228 (* 1 = 0.102228 loss)\nI0817 23:07:59.986896 17344 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0817 23:10:18.603562 17344 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0817 23:11:41.457352 17344 solver.cpp:404]     Test net output #0: accuracy = 0.164\nI0817 23:11:41.457581 17344 solver.cpp:404]     Test net output #1: loss = 8.86359 (* 1 = 8.86359 loss)\nI0817 23:11:42.799996 17344 solver.cpp:228] Iteration 10900, loss = 0.0724215\nI0817 23:11:42.800031 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:11:42.800047 17344 solver.cpp:244]     Train net output #1: loss = 0.0724211 (* 1 = 0.0724211 loss)\nI0817 23:11:42.871899 17344 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0817 23:14:01.436195 17344 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0817 23:15:24.208991 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24208\nI0817 23:15:24.209254 17344 solver.cpp:404]     Test net output #1: loss = 8.99902 (* 1 = 8.99902 loss)\nI0817 23:15:25.552088 17344 solver.cpp:228] Iteration 11000, loss = 0.120025\nI0817 23:15:25.552126 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:15:25.552141 17344 solver.cpp:244]     Train net output #1: loss = 0.120024 (* 1 = 0.120024 loss)\nI0817 23:15:25.625388 17344 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0817 23:17:44.218755 17344 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0817 23:19:06.992635 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11444\nI0817 23:19:06.992899 17344 solver.cpp:404]     Test net output #1: loss = 22.9125 (* 1 = 22.9125 loss)\nI0817 23:19:08.336033 17344 solver.cpp:228] Iteration 11100, loss = 0.18359\nI0817 23:19:08.336067 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:19:08.336082 17344 solver.cpp:244]     Train net output #1: loss = 0.18359 (* 1 = 0.18359 loss)\nI0817 23:19:08.404027 17344 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0817 23:21:26.972323 17344 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0817 23:22:50.297828 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14028\nI0817 23:22:50.298138 17344 solver.cpp:404]     Test net output #1: loss = 39.9849 (* 1 = 39.9849 loss)\nI0817 23:22:51.644093 17344 solver.cpp:228] Iteration 11200, loss = 0.103898\nI0817 23:22:51.644130 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:22:51.644146 17344 solver.cpp:244]     Train net output #1: loss = 0.103897 (* 1 = 0.103897 loss)\nI0817 23:22:51.706401 17344 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0817 23:25:10.276381 17344 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0817 23:26:33.050045 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14336\nI0817 23:26:33.050281 17344 solver.cpp:404]     Test net output #1: loss = 36.3571 (* 1 = 36.3571 loss)\nI0817 23:26:34.392227 17344 solver.cpp:228] Iteration 11300, loss = 0.0541281\nI0817 23:26:34.392261 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:26:34.392276 17344 solver.cpp:244]     Train net output #1: loss = 0.0541277 (* 1 = 0.0541277 loss)\nI0817 23:26:34.464038 17344 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0817 23:28:52.862004 17344 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0817 23:30:15.616125 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17932\nI0817 23:30:15.616376 17344 solver.cpp:404]     Test net output #1: loss = 28.2194 (* 1 = 28.2194 loss)\nI0817 23:30:16.959136 17344 solver.cpp:228] Iteration 11400, loss = 0.0749887\nI0817 23:30:16.959168 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:30:16.959184 17344 solver.cpp:244]     Train net output #1: loss = 0.0749883 (* 1 = 0.0749883 loss)\nI0817 23:30:17.031752 17344 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0817 23:32:35.488840 17344 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0817 23:33:58.254211 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17944\nI0817 23:33:58.254431 17344 solver.cpp:404]     Test net output #1: loss = 35.6048 (* 1 = 35.6048 loss)\nI0817 23:33:59.596698 17344 solver.cpp:228] Iteration 11500, loss = 0.119338\nI0817 23:33:59.596731 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:33:59.596746 17344 solver.cpp:244]     Train net output #1: loss = 0.119338 (* 1 = 0.119338 loss)\nI0817 23:33:59.666195 17344 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0817 23:36:18.090123 17344 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0817 23:37:40.852296 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15604\nI0817 23:37:40.852555 17344 solver.cpp:404]     Test net output #1: loss = 25.9657 (* 1 = 25.9657 loss)\nI0817 23:37:42.194962 17344 solver.cpp:228] Iteration 11600, loss = 0.0873689\nI0817 23:37:42.194996 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:37:42.195011 17344 solver.cpp:244]     Train net output #1: loss = 0.0873686 (* 1 = 0.0873686 loss)\nI0817 23:37:42.265590 17344 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0817 23:40:00.746644 17344 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0817 23:41:23.496417 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28668\nI0817 23:41:23.496670 17344 solver.cpp:404]     Test net output #1: loss = 7.34428 (* 1 = 7.34428 loss)\nI0817 23:41:24.839385 17344 solver.cpp:228] Iteration 11700, loss = 0.104739\nI0817 23:41:24.839418 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:41:24.839434 17344 solver.cpp:244]     Train net output #1: loss = 0.104739 (* 1 = 0.104739 loss)\nI0817 23:41:24.907508 17344 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0817 23:43:43.366611 17344 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0817 23:45:06.104178 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27404\nI0817 23:45:06.104421 17344 solver.cpp:404]     Test net output #1: loss = 7.54115 (* 1 = 7.54115 loss)\nI0817 23:45:07.446861 17344 solver.cpp:228] Iteration 11800, loss = 0.120453\nI0817 23:45:07.446895 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:45:07.446915 17344 solver.cpp:244]     Train net output #1: loss = 0.120453 (* 1 = 0.120453 loss)\nI0817 23:45:07.515943 17344 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0817 23:47:25.979447 17344 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0817 23:48:48.822558 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13532\nI0817 23:48:48.822793 17344 solver.cpp:404]     Test net output #1: loss = 10.4836 (* 1 = 10.4836 loss)\nI0817 23:48:50.164767 17344 solver.cpp:228] Iteration 11900, loss = 0.126423\nI0817 23:48:50.164799 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:48:50.164813 17344 solver.cpp:244]     Train net output #1: loss = 0.126422 (* 1 = 0.126422 loss)\nI0817 23:48:50.236922 17344 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0817 23:51:08.677556 17344 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0817 23:52:31.541081 17344 solver.cpp:404]     Test net output #0: accuracy = 0.34588\nI0817 23:52:31.541335 17344 solver.cpp:404]     Test net output #1: loss = 6.90094 (* 1 = 6.90094 loss)\nI0817 23:52:32.882989 17344 solver.cpp:228] Iteration 12000, loss = 0.0588107\nI0817 23:52:32.883023 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:52:32.883038 17344 solver.cpp:244]     Train net output #1: loss = 0.0588104 (* 1 = 0.0588104 loss)\nI0817 23:52:32.958348 17344 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0817 23:54:51.391269 17344 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0817 23:56:14.245159 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35352\nI0817 23:56:14.245411 17344 solver.cpp:404]     Test net output #1: loss = 7.50772 (* 1 = 7.50772 loss)\nI0817 23:56:15.588400 17344 solver.cpp:228] Iteration 12100, loss = 0.0361772\nI0817 23:56:15.588435 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:56:15.588449 17344 solver.cpp:244]     Train net output #1: loss = 0.0361769 (* 1 = 0.0361769 loss)\nI0817 23:56:15.663812 17344 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0817 23:58:34.126085 17344 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0817 23:59:56.981972 17344 solver.cpp:404]     Test net output #0: accuracy = 0.38008\nI0817 23:59:56.982229 17344 solver.cpp:404]     Test net output #1: loss = 5.07634 (* 1 = 5.07634 loss)\nI0817 23:59:58.324697 17344 solver.cpp:228] Iteration 12200, loss = 0.0514578\nI0817 23:59:58.324730 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:59:58.324745 17344 solver.cpp:244]     Train net output #1: loss = 0.0514575 (* 1 = 0.0514575 loss)\nI0817 23:59:58.393409 17344 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0818 00:02:16.830343 17344 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0818 00:03:39.677561 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35096\nI0818 00:03:39.677794 17344 solver.cpp:404]     Test net output #1: loss = 6.1106 (* 1 = 6.1106 loss)\nI0818 00:03:41.020406 17344 solver.cpp:228] Iteration 12300, loss = 0.102698\nI0818 00:03:41.020439 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:03:41.020454 17344 solver.cpp:244]     Train net output #1: loss = 0.102698 (* 1 = 0.102698 loss)\nI0818 00:03:41.094075 17344 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0818 00:05:59.417098 17344 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0818 00:07:22.254887 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24848\nI0818 00:07:22.255146 17344 solver.cpp:404]     Test net output #1: loss = 6.56482 (* 1 = 6.56482 loss)\nI0818 00:07:23.597374 17344 solver.cpp:228] Iteration 12400, loss = 0.0937903\nI0818 00:07:23.597409 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:07:23.597424 17344 solver.cpp:244]     Train net output #1: loss = 0.09379 (* 1 = 0.09379 loss)\nI0818 00:07:23.672318 17344 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0818 00:09:42.117460 17344 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0818 00:11:04.971222 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28324\nI0818 00:11:04.971472 17344 solver.cpp:404]     Test net output #1: loss = 5.87298 (* 1 = 5.87298 loss)\nI0818 00:11:06.313695 17344 solver.cpp:228] Iteration 12500, loss = 0.134183\nI0818 00:11:06.313730 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:11:06.313745 17344 solver.cpp:244]     Train net output #1: loss = 0.134183 (* 1 = 0.134183 loss)\nI0818 00:11:06.384399 17344 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0818 00:13:24.836184 17344 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0818 00:14:47.684016 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28016\nI0818 00:14:47.684247 17344 solver.cpp:404]     Test net output #1: loss = 5.84282 (* 1 = 5.84282 loss)\nI0818 00:14:49.026655 17344 solver.cpp:228] Iteration 12600, loss = 0.0877491\nI0818 00:14:49.026690 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:14:49.026705 17344 solver.cpp:244]     Train net output #1: loss = 0.0877488 (* 1 = 0.0877488 loss)\nI0818 00:14:49.100853 17344 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0818 00:17:07.575248 17344 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 00:18:30.429404 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32464\nI0818 00:18:30.429623 17344 solver.cpp:404]     Test net output #1: loss = 5.2255 (* 1 = 5.2255 loss)\nI0818 00:18:31.771574 17344 solver.cpp:228] Iteration 12700, loss = 0.0540156\nI0818 00:18:31.771608 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:18:31.771625 17344 solver.cpp:244]     Train net output #1: loss = 0.0540153 (* 1 = 0.0540153 loss)\nI0818 00:18:31.845332 17344 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0818 00:20:50.284375 17344 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 00:22:13.134151 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18432\nI0818 00:22:13.134409 17344 solver.cpp:404]     Test net output #1: loss = 10.0622 (* 1 = 10.0622 loss)\nI0818 00:22:14.476866 17344 solver.cpp:228] Iteration 12800, loss = 0.0580903\nI0818 00:22:14.476899 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:22:14.476920 17344 solver.cpp:244]     Train net output #1: loss = 0.05809 (* 1 = 0.05809 loss)\nI0818 00:22:14.550745 17344 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0818 00:24:32.981937 17344 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 00:25:55.847268 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16336\nI0818 00:25:55.847524 17344 solver.cpp:404]     Test net output #1: loss = 14.1512 (* 1 = 14.1512 loss)\nI0818 00:25:57.189429 17344 solver.cpp:228] Iteration 12900, loss = 0.0524363\nI0818 00:25:57.189465 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:25:57.189479 17344 solver.cpp:244]     Train net output #1: loss = 0.052436 (* 1 = 0.052436 loss)\nI0818 00:25:57.260095 17344 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0818 00:28:15.699615 17344 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 00:29:38.549584 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0818 00:29:38.549844 17344 solver.cpp:404]     Test net output #1: loss = 39.6247 (* 1 = 39.6247 loss)\nI0818 00:29:39.892130 17344 solver.cpp:228] Iteration 13000, loss = 0.105231\nI0818 00:29:39.892166 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:29:39.892181 17344 solver.cpp:244]     Train net output #1: loss = 0.105231 (* 1 = 0.105231 loss)\nI0818 00:29:39.970373 17344 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0818 00:31:58.415633 17344 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 00:33:21.259691 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10896\nI0818 00:33:21.259956 17344 solver.cpp:404]     Test net output #1: loss = 33.5629 (* 1 = 33.5629 loss)\nI0818 00:33:22.601398 17344 solver.cpp:228] Iteration 13100, loss = 0.108509\nI0818 00:33:22.601433 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:33:22.601449 17344 solver.cpp:244]     Train net output #1: loss = 0.108509 (* 1 = 0.108509 loss)\nI0818 00:33:22.673122 17344 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0818 00:35:41.123447 17344 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 00:37:03.969686 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17592\nI0818 00:37:03.969943 17344 solver.cpp:404]     Test net output #1: loss = 15.652 (* 1 = 15.652 loss)\nI0818 00:37:05.312645 17344 solver.cpp:228] Iteration 13200, loss = 0.0506837\nI0818 00:37:05.312680 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:37:05.312695 17344 solver.cpp:244]     Train net output #1: loss = 0.0506833 (* 1 = 0.0506833 loss)\nI0818 00:37:05.388443 17344 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0818 00:39:23.737485 17344 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 00:40:46.584038 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17708\nI0818 00:40:46.584300 17344 solver.cpp:404]     Test net output #1: loss = 12.8426 (* 1 = 12.8426 loss)\nI0818 00:40:47.925576 17344 solver.cpp:228] Iteration 13300, loss = 0.11052\nI0818 00:40:47.925611 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:40:47.925626 17344 solver.cpp:244]     Train net output #1: loss = 0.11052 (* 1 = 0.11052 loss)\nI0818 00:40:48.007280 17344 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0818 00:43:06.624277 17344 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 00:44:29.730693 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14644\nI0818 00:44:29.730967 17344 solver.cpp:404]     Test net output #1: loss = 19.8163 (* 1 = 19.8163 loss)\nI0818 00:44:31.075752 17344 solver.cpp:228] Iteration 13400, loss = 0.0333789\nI0818 00:44:31.075793 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:44:31.075808 17344 solver.cpp:244]     Train net output #1: loss = 0.0333786 (* 1 = 0.0333786 loss)\nI0818 00:44:31.143538 17344 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0818 00:46:50.039722 17344 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 00:48:13.113512 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14256\nI0818 00:48:13.113782 17344 solver.cpp:404]     Test net output #1: loss = 30.3126 (* 1 = 30.3126 loss)\nI0818 00:48:14.458781 17344 solver.cpp:228] Iteration 13500, loss = 0.0742372\nI0818 00:48:14.458824 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:48:14.458839 17344 solver.cpp:244]     Train net output #1: loss = 0.0742369 (* 1 = 0.0742369 loss)\nI0818 00:48:14.532901 17344 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0818 00:50:33.333217 17344 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 00:51:56.468921 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25712\nI0818 00:51:56.469195 17344 solver.cpp:404]     Test net output #1: loss = 12.8623 (* 1 = 12.8623 loss)\nI0818 00:51:57.813072 17344 solver.cpp:228] Iteration 13600, loss = 0.0622477\nI0818 00:51:57.813112 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:51:57.813128 17344 solver.cpp:244]     Train net output #1: loss = 0.0622474 (* 1 = 0.0622474 loss)\nI0818 00:51:57.881254 17344 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0818 00:54:16.941452 17344 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 00:55:40.657995 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21488\nI0818 00:55:40.658298 17344 solver.cpp:404]     Test net output #1: loss = 16.3111 (* 1 = 16.3111 loss)\nI0818 00:55:42.003818 17344 solver.cpp:228] Iteration 13700, loss = 0.0799528\nI0818 00:55:42.003861 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:55:42.003877 17344 solver.cpp:244]     Train net output #1: loss = 0.0799524 (* 1 = 0.0799524 loss)\nI0818 00:55:42.074688 17344 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0818 00:58:00.941547 17344 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 00:59:24.638463 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23284\nI0818 00:59:24.638761 17344 solver.cpp:404]     Test net output #1: loss = 12.2484 (* 1 = 12.2484 loss)\nI0818 00:59:25.983368 17344 solver.cpp:228] Iteration 13800, loss = 0.0760452\nI0818 00:59:25.983412 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:59:25.983428 17344 solver.cpp:244]     Train net output #1: loss = 0.0760449 (* 1 = 0.0760449 loss)\nI0818 00:59:26.056155 17344 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0818 01:01:44.965450 17344 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 01:03:08.665421 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27432\nI0818 01:03:08.665731 17344 solver.cpp:404]     Test net output #1: loss = 14.2725 (* 1 = 14.2725 loss)\nI0818 01:03:10.009749 17344 solver.cpp:228] Iteration 13900, loss = 0.0622478\nI0818 01:03:10.009791 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:03:10.009807 17344 solver.cpp:244]     Train net output #1: loss = 0.0622475 (* 1 = 0.0622475 loss)\nI0818 01:03:10.074167 17344 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0818 01:05:29.060161 17344 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 01:06:52.776032 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28156\nI0818 01:06:52.776355 17344 solver.cpp:404]     Test net output #1: loss = 12.302 (* 1 = 12.302 loss)\nI0818 01:06:54.121840 17344 solver.cpp:228] Iteration 14000, loss = 0.115288\nI0818 01:06:54.121884 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 01:06:54.121901 17344 solver.cpp:244]     Train net output #1: loss = 0.115288 (* 1 = 0.115288 loss)\nI0818 01:06:54.185600 17344 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 01:09:13.120414 17344 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 01:10:36.826256 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15272\nI0818 01:10:36.826562 17344 solver.cpp:404]     Test net output #1: loss = 20.5267 (* 1 = 20.5267 loss)\nI0818 01:10:38.171994 17344 solver.cpp:228] Iteration 14100, loss = 0.111199\nI0818 01:10:38.172037 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:10:38.172052 17344 solver.cpp:244]     Train net output #1: loss = 0.111198 (* 1 = 0.111198 loss)\nI0818 01:10:38.242101 17344 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0818 01:12:57.236572 17344 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 01:14:20.941208 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23872\nI0818 01:14:20.941520 17344 solver.cpp:404]     Test net output #1: loss = 10.0462 (* 1 = 10.0462 loss)\nI0818 01:14:22.285629 17344 solver.cpp:228] Iteration 14200, loss = 0.0595204\nI0818 01:14:22.285670 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:14:22.285684 17344 solver.cpp:244]     Train net output #1: loss = 0.0595201 (* 1 = 0.0595201 loss)\nI0818 01:14:22.361871 17344 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0818 01:16:41.410328 17344 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 01:18:05.107030 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28984\nI0818 01:18:05.107355 17344 solver.cpp:404]     Test net output #1: loss = 7.0332 (* 1 = 7.0332 loss)\nI0818 01:18:06.452421 17344 solver.cpp:228] Iteration 14300, loss = 0.106023\nI0818 01:18:06.452462 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:18:06.452477 17344 solver.cpp:244]     Train net output #1: loss = 0.106023 (* 1 = 0.106023 loss)\nI0818 01:18:06.520406 17344 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0818 01:20:25.442844 17344 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 01:21:49.140563 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27164\nI0818 01:21:49.140933 17344 solver.cpp:404]     Test net output #1: loss = 7.57097 (* 1 = 7.57097 loss)\nI0818 01:21:50.486225 17344 solver.cpp:228] Iteration 14400, loss = 0.0745201\nI0818 01:21:50.486266 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:21:50.486282 17344 solver.cpp:244]     Train net output #1: loss = 0.0745198 (* 1 = 0.0745198 loss)\nI0818 01:21:50.558559 17344 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0818 01:24:09.630097 17344 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 01:25:33.323662 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1662\nI0818 01:25:33.323995 17344 solver.cpp:404]     Test net output #1: loss = 18.1691 (* 1 = 18.1691 loss)\nI0818 01:25:34.668982 17344 solver.cpp:228] Iteration 14500, loss = 0.0499644\nI0818 01:25:34.669025 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:25:34.669040 17344 solver.cpp:244]     Train net output #1: loss = 0.0499641 (* 1 = 0.0499641 loss)\nI0818 01:25:34.735966 17344 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0818 01:27:53.645050 17344 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 01:29:17.325182 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13088\nI0818 01:29:17.325546 17344 solver.cpp:404]     Test net output #1: loss = 26.0046 (* 1 = 26.0046 loss)\nI0818 01:29:18.670747 17344 solver.cpp:228] Iteration 14600, loss = 0.0922412\nI0818 01:29:18.670789 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:29:18.670805 17344 solver.cpp:244]     Train net output #1: loss = 0.092241 (* 1 = 0.092241 loss)\nI0818 01:29:18.733623 17344 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0818 01:31:37.635839 17344 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 01:33:01.318315 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1454\nI0818 01:33:01.318629 17344 solver.cpp:404]     Test net output #1: loss = 30.9097 (* 1 = 30.9097 loss)\nI0818 01:33:02.664098 17344 solver.cpp:228] Iteration 14700, loss = 0.0807777\nI0818 01:33:02.664139 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:33:02.664155 17344 solver.cpp:244]     Train net output #1: loss = 0.0807774 (* 1 = 0.0807774 loss)\nI0818 01:33:02.736382 17344 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0818 01:35:21.802829 17344 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 01:36:45.487869 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15504\nI0818 01:36:45.488198 17344 solver.cpp:404]     Test net output #1: loss = 36.8024 (* 1 = 36.8024 loss)\nI0818 01:36:46.831879 17344 solver.cpp:228] Iteration 14800, loss = 0.0451957\nI0818 01:36:46.831923 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:36:46.831938 17344 solver.cpp:244]     Train net output #1: loss = 0.0451954 (* 1 = 0.0451954 loss)\nI0818 01:36:46.900231 17344 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0818 01:39:05.758316 17344 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 01:40:29.435261 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13268\nI0818 01:40:29.435547 17344 solver.cpp:404]     Test net output #1: loss = 35.8956 (* 1 = 35.8956 loss)\nI0818 01:40:30.779932 17344 solver.cpp:228] Iteration 14900, loss = 0.07591\nI0818 01:40:30.779973 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 01:40:30.779989 17344 solver.cpp:244]     Train net output #1: loss = 0.0759097 (* 1 = 0.0759097 loss)\nI0818 01:40:30.851706 17344 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0818 01:42:49.707116 17344 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 01:44:13.393623 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18708\nI0818 01:44:13.393930 17344 solver.cpp:404]     Test net output #1: loss = 15.4106 (* 1 = 15.4106 loss)\nI0818 01:44:14.738234 17344 solver.cpp:228] Iteration 15000, loss = 0.032062\nI0818 01:44:14.738273 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:44:14.738288 17344 solver.cpp:244]     Train net output #1: loss = 0.0320617 (* 1 = 0.0320617 loss)\nI0818 01:44:14.805773 17344 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0818 01:46:33.675225 17344 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 01:47:57.370380 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17904\nI0818 01:47:57.370702 17344 solver.cpp:404]     Test net output #1: loss = 15.3161 (* 1 = 15.3161 loss)\nI0818 01:47:58.714727 17344 solver.cpp:228] Iteration 15100, loss = 0.0830622\nI0818 01:47:58.714771 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:47:58.714785 17344 solver.cpp:244]     Train net output #1: loss = 0.0830619 (* 1 = 0.0830619 loss)\nI0818 01:47:58.783818 17344 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0818 01:50:17.672081 17344 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 01:51:41.353186 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15732\nI0818 01:51:41.353529 17344 solver.cpp:404]     Test net output #1: loss = 15.4227 (* 1 = 15.4227 loss)\nI0818 01:51:42.697983 17344 solver.cpp:228] Iteration 15200, loss = 0.0572819\nI0818 01:51:42.698026 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:51:42.698047 17344 solver.cpp:244]     Train net output #1: loss = 0.0572817 (* 1 = 0.0572817 loss)\nI0818 01:51:42.764217 17344 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0818 01:54:01.652632 17344 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 01:55:25.345500 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14244\nI0818 01:55:25.345799 17344 solver.cpp:404]     Test net output #1: loss = 14.8176 (* 1 = 14.8176 loss)\nI0818 01:55:26.689584 17344 solver.cpp:228] Iteration 15300, loss = 0.0864979\nI0818 01:55:26.689625 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:55:26.689640 17344 solver.cpp:244]     Train net output #1: loss = 0.0864977 (* 1 = 0.0864977 loss)\nI0818 01:55:26.762233 17344 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0818 01:57:45.714846 17344 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 01:59:09.405627 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16596\nI0818 01:59:09.405941 17344 solver.cpp:404]     Test net output #1: loss = 9.82132 (* 1 = 9.82132 loss)\nI0818 01:59:10.749866 17344 solver.cpp:228] Iteration 15400, loss = 0.0808177\nI0818 01:59:10.749905 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:59:10.749922 17344 solver.cpp:244]     Train net output #1: loss = 0.0808174 (* 1 = 0.0808174 loss)\nI0818 01:59:10.816074 17344 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0818 02:01:29.759344 17344 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 02:02:53.434993 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23844\nI0818 02:02:53.435283 17344 solver.cpp:404]     Test net output #1: loss = 8.7197 (* 1 = 8.7197 loss)\nI0818 02:02:54.779599 17344 solver.cpp:228] Iteration 15500, loss = 0.0474428\nI0818 02:02:54.779641 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:02:54.779657 17344 solver.cpp:244]     Train net output #1: loss = 0.0474426 (* 1 = 0.0474426 loss)\nI0818 02:02:54.850144 17344 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0818 02:05:13.729794 17344 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 02:06:37.421650 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19472\nI0818 02:06:37.421977 17344 solver.cpp:404]     Test net output #1: loss = 9.4411 (* 1 = 9.4411 loss)\nI0818 02:06:38.766427 17344 solver.cpp:228] Iteration 15600, loss = 0.0589516\nI0818 02:06:38.766464 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:06:38.766480 17344 solver.cpp:244]     Train net output #1: loss = 0.0589513 (* 1 = 0.0589513 loss)\nI0818 02:06:38.841941 17344 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0818 02:08:57.711781 17344 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 02:10:21.405248 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21328\nI0818 02:10:21.405563 17344 solver.cpp:404]     Test net output #1: loss = 10.2539 (* 1 = 10.2539 loss)\nI0818 02:10:22.749922 17344 solver.cpp:228] Iteration 15700, loss = 0.0925869\nI0818 02:10:22.749960 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 02:10:22.749977 17344 solver.cpp:244]     Train net output #1: loss = 0.0925866 (* 1 = 0.0925866 loss)\nI0818 02:10:22.814715 17344 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0818 02:12:41.720584 17344 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 02:14:05.427623 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18604\nI0818 02:14:05.427949 17344 solver.cpp:404]     Test net output #1: loss = 14.8846 (* 1 = 14.8846 loss)\nI0818 02:14:06.772414 17344 solver.cpp:228] Iteration 15800, loss = 0.0453475\nI0818 02:14:06.772452 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:14:06.772467 17344 solver.cpp:244]     Train net output #1: loss = 0.0453472 (* 1 = 0.0453472 loss)\nI0818 02:14:06.844103 17344 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0818 02:16:25.758286 17344 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 02:17:49.458341 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17024\nI0818 02:17:49.458644 17344 solver.cpp:404]     Test net output #1: loss = 14.261 (* 1 = 14.261 loss)\nI0818 02:17:50.803287 17344 solver.cpp:228] Iteration 15900, loss = 0.0787128\nI0818 02:17:50.803324 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:17:50.803339 17344 solver.cpp:244]     Train net output #1: loss = 0.0787126 (* 1 = 0.0787126 loss)\nI0818 02:17:50.869487 17344 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0818 02:20:09.776075 17344 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 02:21:33.464476 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13972\nI0818 02:21:33.464778 17344 solver.cpp:404]     Test net output #1: loss = 13.2981 (* 1 = 13.2981 loss)\nI0818 02:21:34.808327 17344 solver.cpp:228] Iteration 16000, loss = 0.0919004\nI0818 02:21:34.808363 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:21:34.808378 17344 solver.cpp:244]     Train net output #1: loss = 0.0919002 (* 1 = 0.0919002 loss)\nI0818 02:21:34.886857 17344 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0818 02:23:53.784612 17344 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 02:25:17.477742 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22296\nI0818 02:25:17.478047 17344 solver.cpp:404]     Test net output #1: loss = 8.75807 (* 1 = 8.75807 loss)\nI0818 02:25:18.822080 17344 solver.cpp:228] Iteration 16100, loss = 0.0529905\nI0818 02:25:18.822118 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:25:18.822134 17344 solver.cpp:244]     Train net output #1: loss = 0.0529903 (* 1 = 0.0529903 loss)\nI0818 02:25:18.889948 17344 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0818 02:27:37.743954 17344 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 02:29:01.432042 17344 solver.cpp:404]     Test net output #0: accuracy = 0.209\nI0818 02:29:01.432356 17344 solver.cpp:404]     Test net output #1: loss = 7.64615 (* 1 = 7.64615 loss)\nI0818 02:29:02.776157 17344 solver.cpp:228] Iteration 16200, loss = 0.0275413\nI0818 02:29:02.776192 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:29:02.776207 17344 solver.cpp:244]     Train net output #1: loss = 0.0275411 (* 1 = 0.0275411 loss)\nI0818 02:29:02.845058 17344 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0818 02:31:21.747506 17344 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 02:32:45.429553 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18744\nI0818 02:32:45.429873 17344 solver.cpp:404]     Test net output #1: loss = 7.15595 (* 1 = 7.15595 loss)\nI0818 02:32:46.774180 17344 solver.cpp:228] Iteration 16300, loss = 0.088007\nI0818 02:32:46.774217 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:32:46.774232 17344 solver.cpp:244]     Train net output #1: loss = 0.0880068 (* 1 = 0.0880068 loss)\nI0818 02:32:46.848363 17344 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0818 02:35:05.723309 17344 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 02:36:29.404095 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15624\nI0818 02:36:29.404393 17344 solver.cpp:404]     Test net output #1: loss = 7.90889 (* 1 = 7.90889 loss)\nI0818 02:36:30.748700 17344 solver.cpp:228] Iteration 16400, loss = 0.0635109\nI0818 02:36:30.748738 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:36:30.748752 17344 solver.cpp:244]     Train net output #1: loss = 0.0635106 (* 1 = 0.0635106 loss)\nI0818 02:36:30.819159 17344 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0818 02:38:49.686645 17344 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0818 02:40:13.384080 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12192\nI0818 02:40:13.384371 17344 solver.cpp:404]     Test net output #1: loss = 11.4276 (* 1 = 11.4276 loss)\nI0818 02:40:14.728742 17344 solver.cpp:228] Iteration 16500, loss = 0.0487115\nI0818 02:40:14.728781 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:40:14.728796 17344 solver.cpp:244]     Train net output #1: loss = 0.0487113 (* 1 = 0.0487113 loss)\nI0818 02:40:14.799465 17344 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0818 02:42:33.667171 17344 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0818 02:43:57.376196 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1596\nI0818 02:43:57.376518 17344 solver.cpp:404]     Test net output #1: loss = 11.092 (* 1 = 11.092 loss)\nI0818 02:43:58.720651 17344 solver.cpp:228] Iteration 16600, loss = 0.0495132\nI0818 02:43:58.720690 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:43:58.720705 17344 solver.cpp:244]     Train net output #1: loss = 0.049513 (* 1 = 0.049513 loss)\nI0818 02:43:58.790067 17344 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0818 02:46:17.663589 17344 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0818 02:47:41.371953 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15336\nI0818 02:47:41.372277 17344 solver.cpp:404]     Test net output #1: loss = 10.9264 (* 1 = 10.9264 loss)\nI0818 02:47:42.716728 17344 solver.cpp:228] Iteration 16700, loss = 0.137666\nI0818 02:47:42.716768 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 02:47:42.716784 17344 solver.cpp:244]     Train net output #1: loss = 0.137666 (* 1 = 0.137666 loss)\nI0818 02:47:42.791641 17344 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0818 02:50:01.675921 17344 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0818 02:51:25.392407 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1584\nI0818 02:51:25.392726 17344 solver.cpp:404]     Test net output #1: loss = 11.289 (* 1 = 11.289 loss)\nI0818 02:51:26.736878 17344 solver.cpp:228] Iteration 16800, loss = 0.030704\nI0818 02:51:26.736920 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:51:26.736937 17344 solver.cpp:244]     Train net output #1: loss = 0.0307037 (* 1 = 0.0307037 loss)\nI0818 02:51:26.807525 17344 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0818 02:53:45.690728 17344 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0818 02:55:09.403051 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14364\nI0818 02:55:09.403381 17344 solver.cpp:404]     Test net output #1: loss = 9.90545 (* 1 = 9.90545 loss)\nI0818 02:55:10.747375 17344 solver.cpp:228] Iteration 16900, loss = 0.0297878\nI0818 02:55:10.747413 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:55:10.747431 17344 solver.cpp:244]     Train net output #1: loss = 0.0297876 (* 1 = 0.0297876 loss)\nI0818 02:55:10.813506 17344 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0818 02:57:29.683838 17344 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0818 02:58:53.391144 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11588\nI0818 02:58:53.391450 17344 solver.cpp:404]     Test net output #1: loss = 10.6907 (* 1 = 10.6907 loss)\nI0818 02:58:54.735646 17344 solver.cpp:228] Iteration 17000, loss = 0.0826588\nI0818 02:58:54.735687 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:58:54.735702 17344 solver.cpp:244]     Train net output #1: loss = 0.0826585 (* 1 = 0.0826585 loss)\nI0818 02:58:54.796547 17344 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0818 03:01:13.676355 17344 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0818 03:02:37.368839 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23148\nI0818 03:02:37.369164 17344 solver.cpp:404]     Test net output #1: loss = 6.01458 (* 1 = 6.01458 loss)\nI0818 03:02:38.713865 17344 solver.cpp:228] Iteration 17100, loss = 0.0795157\nI0818 03:02:38.713906 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:02:38.713922 17344 solver.cpp:244]     Train net output #1: loss = 0.0795154 (* 1 = 0.0795154 loss)\nI0818 03:02:38.779314 17344 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0818 03:04:57.686321 17344 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0818 03:06:21.386888 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22924\nI0818 03:06:21.387223 17344 solver.cpp:404]     Test net output #1: loss = 8.29233 (* 1 = 8.29233 loss)\nI0818 03:06:22.732065 17344 solver.cpp:228] Iteration 17200, loss = 0.0172153\nI0818 03:06:22.732108 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:06:22.732123 17344 solver.cpp:244]     Train net output #1: loss = 0.017215 (* 1 = 0.017215 loss)\nI0818 03:06:22.802243 17344 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0818 03:08:41.711935 17344 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0818 03:10:05.400565 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17192\nI0818 03:10:05.400848 17344 solver.cpp:404]     Test net output #1: loss = 8.30136 (* 1 = 8.30136 loss)\nI0818 03:10:06.745045 17344 solver.cpp:228] Iteration 17300, loss = 0.0466576\nI0818 03:10:06.745086 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:10:06.745102 17344 solver.cpp:244]     Train net output #1: loss = 0.0466573 (* 1 = 0.0466573 loss)\nI0818 03:10:06.808699 17344 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0818 03:12:25.684375 17344 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0818 03:13:49.384001 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18328\nI0818 03:13:49.384331 17344 solver.cpp:404]     Test net output #1: loss = 8.48357 (* 1 = 8.48357 loss)\nI0818 03:13:50.728508 17344 solver.cpp:228] Iteration 17400, loss = 0.245897\nI0818 03:13:50.728549 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 03:13:50.728564 17344 solver.cpp:244]     Train net output #1: loss = 0.245896 (* 1 = 0.245896 loss)\nI0818 03:13:50.793941 17344 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0818 03:16:09.666141 17344 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0818 03:17:33.359277 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14104\nI0818 03:17:33.359603 17344 solver.cpp:404]     Test net output #1: loss = 8.23956 (* 1 = 8.23956 loss)\nI0818 03:17:34.703629 17344 solver.cpp:228] Iteration 17500, loss = 0.0375866\nI0818 03:17:34.703670 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:17:34.703686 17344 solver.cpp:244]     Train net output #1: loss = 0.0375863 (* 1 = 0.0375863 loss)\nI0818 03:17:34.780762 17344 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0818 03:19:53.773603 17344 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0818 03:21:17.466964 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18064\nI0818 03:21:17.467267 17344 solver.cpp:404]     Test net output #1: loss = 7.99649 (* 1 = 7.99649 loss)\nI0818 03:21:18.812736 17344 solver.cpp:228] Iteration 17600, loss = 0.0273695\nI0818 03:21:18.812778 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:21:18.812794 17344 solver.cpp:244]     Train net output #1: loss = 0.0273692 (* 1 = 0.0273692 loss)\nI0818 03:21:18.874644 17344 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0818 03:23:37.911485 17344 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0818 03:25:01.582254 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19252\nI0818 03:25:01.582564 17344 solver.cpp:404]     Test net output #1: loss = 9.18963 (* 1 = 9.18963 loss)\nI0818 03:25:02.927088 17344 solver.cpp:228] Iteration 17700, loss = 0.0335046\nI0818 03:25:02.927130 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:25:02.927146 17344 solver.cpp:244]     Train net output #1: loss = 0.0335043 (* 1 = 0.0335043 loss)\nI0818 03:25:02.992538 17344 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0818 03:27:21.897569 17344 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0818 03:28:45.577661 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17652\nI0818 03:28:45.577981 17344 solver.cpp:404]     Test net output #1: loss = 8.80649 (* 1 = 8.80649 loss)\nI0818 03:28:46.921844 17344 solver.cpp:228] Iteration 17800, loss = 0.0214149\nI0818 03:28:46.921885 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:28:46.921900 17344 solver.cpp:244]     Train net output #1: loss = 0.0214146 (* 1 = 0.0214146 loss)\nI0818 03:28:46.998524 17344 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0818 03:31:05.861513 17344 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0818 03:32:29.525465 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14004\nI0818 03:32:29.525771 17344 solver.cpp:404]     Test net output #1: loss = 16.8343 (* 1 = 16.8343 loss)\nI0818 03:32:30.870187 17344 solver.cpp:228] Iteration 17900, loss = 0.0516059\nI0818 03:32:30.870229 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:32:30.870245 17344 solver.cpp:244]     Train net output #1: loss = 0.0516056 (* 1 = 0.0516056 loss)\nI0818 03:32:30.943724 17344 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0818 03:34:49.799712 17344 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0818 03:36:13.479907 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22768\nI0818 03:36:13.480219 17344 solver.cpp:404]     Test net output #1: loss = 5.34525 (* 1 = 5.34525 loss)\nI0818 03:36:14.824167 17344 solver.cpp:228] Iteration 18000, loss = 0.09985\nI0818 03:36:14.824208 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:36:14.824224 17344 solver.cpp:244]     Train net output #1: loss = 0.0998498 (* 1 = 0.0998498 loss)\nI0818 03:36:14.897266 17344 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0818 03:38:33.763641 17344 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0818 03:39:57.447923 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15152\nI0818 03:39:57.448235 17344 solver.cpp:404]     Test net output #1: loss = 19.4474 (* 1 = 19.4474 loss)\nI0818 03:39:58.792567 17344 solver.cpp:228] Iteration 18100, loss = 0.0297653\nI0818 03:39:58.792608 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:39:58.792625 17344 solver.cpp:244]     Train net output #1: loss = 0.029765 (* 1 = 0.029765 loss)\nI0818 03:39:58.862612 17344 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0818 03:42:17.622072 17344 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0818 03:43:40.492462 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0818 03:43:40.492755 17344 solver.cpp:404]     Test net output #1: loss = 36.4421 (* 1 = 36.4421 loss)\nI0818 03:43:41.835083 17344 solver.cpp:228] Iteration 18200, loss = 0.0869537\nI0818 03:43:41.835124 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:43:41.835139 17344 solver.cpp:244]     Train net output #1: loss = 0.0869535 (* 1 = 0.0869535 loss)\nI0818 03:43:41.903182 17344 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0818 03:46:00.497612 17344 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0818 03:47:23.275727 17344 solver.cpp:404]     Test net output #0: accuracy = 0.122\nI0818 03:47:23.276015 17344 solver.cpp:404]     Test net output #1: loss = 31.4192 (* 1 = 31.4192 loss)\nI0818 03:47:24.618793 17344 solver.cpp:228] Iteration 18300, loss = 0.0671831\nI0818 03:47:24.618829 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:47:24.618844 17344 solver.cpp:244]     Train net output #1: loss = 0.0671828 (* 1 = 0.0671828 loss)\nI0818 03:47:24.684192 17344 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0818 03:49:43.233567 17344 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0818 03:51:06.004720 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15684\nI0818 03:51:06.004999 17344 solver.cpp:404]     Test net output #1: loss = 13.2956 (* 1 = 13.2956 loss)\nI0818 03:51:07.348297 17344 solver.cpp:228] Iteration 18400, loss = 0.0824705\nI0818 03:51:07.348335 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:51:07.348351 17344 solver.cpp:244]     Train net output #1: loss = 0.0824703 (* 1 = 0.0824703 loss)\nI0818 03:51:07.414659 17344 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0818 03:53:25.908813 17344 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0818 03:54:48.677530 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2602\nI0818 03:54:48.677812 17344 solver.cpp:404]     Test net output #1: loss = 8.05627 (* 1 = 8.05627 loss)\nI0818 03:54:50.020912 17344 solver.cpp:228] Iteration 18500, loss = 0.117731\nI0818 03:54:50.020946 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:54:50.020961 17344 solver.cpp:244]     Train net output #1: loss = 0.117731 (* 1 = 0.117731 loss)\nI0818 03:54:50.085921 17344 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0818 03:57:08.643064 17344 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0818 03:58:31.395011 17344 solver.cpp:404]     Test net output #0: accuracy = 0.31572\nI0818 03:58:31.395265 17344 solver.cpp:404]     Test net output #1: loss = 5.66519 (* 1 = 5.66519 loss)\nI0818 03:58:32.738582 17344 solver.cpp:228] Iteration 18600, loss = 0.098437\nI0818 03:58:32.738615 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:58:32.738631 17344 solver.cpp:244]     Train net output #1: loss = 0.0984367 (* 1 = 0.0984367 loss)\nI0818 03:58:32.803853 17344 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0818 04:00:51.364300 17344 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0818 04:02:14.110270 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13196\nI0818 04:02:14.110496 17344 solver.cpp:404]     Test net output #1: loss = 20.2782 (* 1 = 20.2782 loss)\nI0818 04:02:15.454035 17344 solver.cpp:228] Iteration 18700, loss = 0.0494469\nI0818 04:02:15.454071 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:02:15.454085 17344 solver.cpp:244]     Train net output #1: loss = 0.0494466 (* 1 = 0.0494466 loss)\nI0818 04:02:15.508684 17344 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0818 04:04:34.084748 17344 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0818 04:05:56.846487 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21428\nI0818 04:05:56.846740 17344 solver.cpp:404]     Test net output #1: loss = 9.79542 (* 1 = 9.79542 loss)\nI0818 04:05:58.189374 17344 solver.cpp:228] Iteration 18800, loss = 0.0791597\nI0818 04:05:58.189409 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 04:05:58.189424 17344 solver.cpp:244]     Train net output #1: loss = 0.0791593 (* 1 = 0.0791593 loss)\nI0818 04:05:58.249176 17344 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0818 04:08:16.823369 17344 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0818 04:09:39.592854 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2408\nI0818 04:09:39.593123 17344 solver.cpp:404]     Test net output #1: loss = 7.78815 (* 1 = 7.78815 loss)\nI0818 04:09:40.936043 17344 solver.cpp:228] Iteration 18900, loss = 0.0419682\nI0818 04:09:40.936076 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:09:40.936092 17344 solver.cpp:244]     Train net output #1: loss = 0.0419679 (* 1 = 0.0419679 loss)\nI0818 04:09:40.999557 17344 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0818 04:11:59.599449 17344 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0818 04:13:22.371631 17344 solver.cpp:404]     Test net output #0: accuracy = 0.20208\nI0818 04:13:22.371903 17344 solver.cpp:404]     Test net output #1: loss = 12.3657 (* 1 = 12.3657 loss)\nI0818 04:13:23.715548 17344 solver.cpp:228] Iteration 19000, loss = 0.0574545\nI0818 04:13:23.715581 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:13:23.715596 17344 solver.cpp:244]     Train net output #1: loss = 0.0574542 (* 1 = 0.0574542 loss)\nI0818 04:13:23.779510 17344 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0818 04:15:42.343559 17344 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0818 04:17:05.105659 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30224\nI0818 04:17:05.105903 17344 solver.cpp:404]     Test net output #1: loss = 10.8642 (* 1 = 10.8642 loss)\nI0818 04:17:06.449092 17344 solver.cpp:228] Iteration 19100, loss = 0.161321\nI0818 04:17:06.449126 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 04:17:06.449146 17344 solver.cpp:244]     Train net output #1: loss = 0.161321 (* 1 = 0.161321 loss)\nI0818 04:17:06.516808 17344 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0818 04:19:25.090037 17344 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0818 04:20:47.949651 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13588\nI0818 04:20:47.949911 17344 solver.cpp:404]     Test net output #1: loss = 20.232 (* 1 = 20.232 loss)\nI0818 04:20:49.293253 17344 solver.cpp:228] Iteration 19200, loss = 0.0539527\nI0818 04:20:49.293287 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:20:49.293303 17344 solver.cpp:244]     Train net output #1: loss = 0.0539524 (* 1 = 0.0539524 loss)\nI0818 04:20:49.354193 17344 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0818 04:23:07.966835 17344 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0818 04:24:30.830550 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1866\nI0818 04:24:30.830816 17344 solver.cpp:404]     Test net output #1: loss = 20.5137 (* 1 = 20.5137 loss)\nI0818 04:24:32.174156 17344 solver.cpp:228] Iteration 19300, loss = 0.0348067\nI0818 04:24:32.174190 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:24:32.174206 17344 solver.cpp:244]     Train net output #1: loss = 0.0348064 (* 1 = 0.0348064 loss)\nI0818 04:24:32.243005 17344 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0818 04:26:50.842993 17344 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0818 04:28:13.698747 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22924\nI0818 04:28:13.699018 17344 solver.cpp:404]     Test net output #1: loss = 15.6565 (* 1 = 15.6565 loss)\nI0818 04:28:15.042006 17344 solver.cpp:228] Iteration 19400, loss = 0.0258001\nI0818 04:28:15.042042 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:28:15.042057 17344 solver.cpp:244]     Train net output #1: loss = 0.0257997 (* 1 = 0.0257997 loss)\nI0818 04:28:15.106612 17344 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0818 04:30:33.749171 17344 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0818 04:31:56.608650 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18892\nI0818 04:31:56.608909 17344 solver.cpp:404]     Test net output #1: loss = 20.5328 (* 1 = 20.5328 loss)\nI0818 04:31:57.951833 17344 solver.cpp:228] Iteration 19500, loss = 0.0426427\nI0818 04:31:57.951867 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:31:57.951882 17344 solver.cpp:244]     Train net output #1: loss = 0.0426424 (* 1 = 0.0426424 loss)\nI0818 04:31:58.012023 17344 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0818 04:34:16.640563 17344 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0818 04:35:39.484700 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16932\nI0818 04:35:39.484926 17344 solver.cpp:404]     Test net output #1: loss = 32.6855 (* 1 = 32.6855 loss)\nI0818 04:35:40.827816 17344 solver.cpp:228] Iteration 19600, loss = 0.110835\nI0818 04:35:40.827848 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:35:40.827863 17344 solver.cpp:244]     Train net output #1: loss = 0.110835 (* 1 = 0.110835 loss)\nI0818 04:35:40.897099 17344 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0818 04:37:59.517242 17344 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0818 04:39:22.362325 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29672\nI0818 04:39:22.362573 17344 solver.cpp:404]     Test net output #1: loss = 15.3891 (* 1 = 15.3891 loss)\nI0818 04:39:23.706369 17344 solver.cpp:228] Iteration 19700, loss = 0.0590121\nI0818 04:39:23.706403 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:39:23.706419 17344 solver.cpp:244]     Train net output #1: loss = 0.0590118 (* 1 = 0.0590118 loss)\nI0818 04:39:23.764703 17344 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0818 04:41:42.458709 17344 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0818 04:43:05.313724 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16292\nI0818 04:43:05.313971 17344 solver.cpp:404]     Test net output #1: loss = 21.6329 (* 1 = 21.6329 loss)\nI0818 04:43:06.657624 17344 solver.cpp:228] Iteration 19800, loss = 0.0235881\nI0818 04:43:06.657661 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:43:06.657683 17344 solver.cpp:244]     Train net output #1: loss = 0.0235878 (* 1 = 0.0235878 loss)\nI0818 04:43:06.726333 17344 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0818 04:45:25.299886 17344 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0818 04:46:48.147588 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13648\nI0818 04:46:48.147842 17344 solver.cpp:404]     Test net output #1: loss = 12.982 (* 1 = 12.982 loss)\nI0818 04:46:49.490797 17344 solver.cpp:228] Iteration 19900, loss = 0.0441646\nI0818 04:46:49.490842 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:46:49.490866 17344 solver.cpp:244]     Train net output #1: loss = 0.0441642 (* 1 = 0.0441642 loss)\nI0818 04:46:49.556126 17344 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0818 04:49:08.177852 17344 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0818 04:50:31.024520 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1684\nI0818 04:50:31.024741 17344 solver.cpp:404]     Test net output #1: loss = 13.9465 (* 1 = 13.9465 loss)\nI0818 04:50:32.368238 17344 solver.cpp:228] Iteration 20000, loss = 0.0135077\nI0818 04:50:32.368278 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:50:32.368299 17344 solver.cpp:244]     Train net output #1: loss = 0.0135074 (* 1 = 0.0135074 loss)\nI0818 04:50:32.439779 17344 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0818 04:52:51.110266 17344 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0818 04:54:13.959166 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1536\nI0818 04:54:13.959436 17344 solver.cpp:404]     Test net output #1: loss = 13.308 (* 1 = 13.308 loss)\nI0818 04:54:15.302559 17344 solver.cpp:228] Iteration 20100, loss = 0.0331326\nI0818 04:54:15.302595 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:54:15.302610 17344 solver.cpp:244]     Train net output #1: loss = 0.0331323 (* 1 = 0.0331323 loss)\nI0818 04:54:15.370218 17344 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0818 04:56:33.983304 17344 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0818 04:57:56.819151 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1412\nI0818 04:57:56.819397 17344 solver.cpp:404]     Test net output #1: loss = 13.8578 (* 1 = 13.8578 loss)\nI0818 04:57:58.162261 17344 solver.cpp:228] Iteration 20200, loss = 0.0284043\nI0818 04:57:58.162297 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:57:58.162312 17344 solver.cpp:244]     Train net output #1: loss = 0.028404 (* 1 = 0.028404 loss)\nI0818 04:57:58.229076 17344 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0818 05:00:16.831272 17344 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0818 05:01:39.667084 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16956\nI0818 05:01:39.667343 17344 solver.cpp:404]     Test net output #1: loss = 13.8052 (* 1 = 13.8052 loss)\nI0818 05:01:41.015908 17344 solver.cpp:228] Iteration 20300, loss = 0.106997\nI0818 05:01:41.015943 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 05:01:41.015959 17344 solver.cpp:244]     Train net output #1: loss = 0.106997 (* 1 = 0.106997 loss)\nI0818 05:01:41.075486 17344 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0818 05:03:59.680672 17344 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0818 05:05:22.513108 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15624\nI0818 05:05:22.513381 17344 solver.cpp:404]     Test net output #1: loss = 11.0641 (* 1 = 11.0641 loss)\nI0818 05:05:23.856389 17344 solver.cpp:228] Iteration 20400, loss = 0.0364892\nI0818 05:05:23.856422 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:05:23.856437 17344 solver.cpp:244]     Train net output #1: loss = 0.0364889 (* 1 = 0.0364889 loss)\nI0818 05:05:23.920353 17344 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0818 05:07:42.460083 17344 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0818 05:09:05.298233 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24492\nI0818 05:09:05.298491 17344 solver.cpp:404]     Test net output #1: loss = 8.53638 (* 1 = 8.53638 loss)\nI0818 05:09:06.640887 17344 solver.cpp:228] Iteration 20500, loss = 0.029874\nI0818 05:09:06.640926 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:09:06.640943 17344 solver.cpp:244]     Train net output #1: loss = 0.0298737 (* 1 = 0.0298737 loss)\nI0818 05:09:06.703124 17344 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0818 05:11:25.273593 17344 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0818 05:12:48.093533 17344 solver.cpp:404]     Test net output #0: accuracy = 0.171\nI0818 05:12:48.093791 17344 solver.cpp:404]     Test net output #1: loss = 9.09257 (* 1 = 9.09257 loss)\nI0818 05:12:49.436588 17344 solver.cpp:228] Iteration 20600, loss = 0.0221368\nI0818 05:12:49.436622 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:12:49.436637 17344 solver.cpp:244]     Train net output #1: loss = 0.0221366 (* 1 = 0.0221366 loss)\nI0818 05:12:49.493947 17344 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0818 05:15:08.107712 17344 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0818 05:16:30.929268 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2066\nI0818 05:16:30.929535 17344 solver.cpp:404]     Test net output #1: loss = 14.0189 (* 1 = 14.0189 loss)\nI0818 05:16:32.272068 17344 solver.cpp:228] Iteration 20700, loss = 0.0285465\nI0818 05:16:32.272104 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:16:32.272119 17344 solver.cpp:244]     Train net output #1: loss = 0.0285462 (* 1 = 0.0285462 loss)\nI0818 05:16:32.340581 17344 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0818 05:18:50.936641 17344 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0818 05:20:13.803859 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1124\nI0818 05:20:13.804159 17344 solver.cpp:404]     Test net output #1: loss = 29.597 (* 1 = 29.597 loss)\nI0818 05:20:15.147192 17344 solver.cpp:228] Iteration 20800, loss = 0.028967\nI0818 05:20:15.147230 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:20:15.147253 17344 solver.cpp:244]     Train net output #1: loss = 0.0289667 (* 1 = 0.0289667 loss)\nI0818 05:20:15.207283 17344 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0818 05:22:33.744761 17344 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0818 05:23:56.581630 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23352\nI0818 05:23:56.581897 17344 solver.cpp:404]     Test net output #1: loss = 8.2417 (* 1 = 8.2417 loss)\nI0818 05:23:57.924664 17344 solver.cpp:228] Iteration 20900, loss = 0.0368108\nI0818 05:23:57.924700 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:23:57.924715 17344 solver.cpp:244]     Train net output #1: loss = 0.0368105 (* 1 = 0.0368105 loss)\nI0818 05:23:57.992419 17344 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0818 05:26:16.565754 17344 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0818 05:27:39.372023 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16044\nI0818 05:27:39.372289 17344 solver.cpp:404]     Test net output #1: loss = 20.9586 (* 1 = 20.9586 loss)\nI0818 05:27:40.714895 17344 solver.cpp:228] Iteration 21000, loss = 0.0342127\nI0818 05:27:40.714932 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:27:40.714947 17344 solver.cpp:244]     Train net output #1: loss = 0.0342124 (* 1 = 0.0342124 loss)\nI0818 05:27:40.784586 17344 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0818 05:29:59.420054 17344 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0818 05:31:22.222177 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15696\nI0818 05:31:22.222457 17344 solver.cpp:404]     Test net output #1: loss = 22.4374 (* 1 = 22.4374 loss)\nI0818 05:31:23.565544 17344 solver.cpp:228] Iteration 21100, loss = 0.0650768\nI0818 05:31:23.565579 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:31:23.565594 17344 solver.cpp:244]     Train net output #1: loss = 0.0650765 (* 1 = 0.0650765 loss)\nI0818 05:31:23.636293 17344 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0818 05:33:42.140094 17344 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0818 05:35:04.923357 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11332\nI0818 05:35:04.923632 17344 solver.cpp:404]     Test net output #1: loss = 20.3795 (* 1 = 20.3795 loss)\nI0818 05:35:06.265617 17344 solver.cpp:228] Iteration 21200, loss = 0.0854553\nI0818 05:35:06.265651 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:35:06.265666 17344 solver.cpp:244]     Train net output #1: loss = 0.085455 (* 1 = 0.085455 loss)\nI0818 05:35:06.333155 17344 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0818 05:37:24.782788 17344 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0818 05:38:47.561589 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14756\nI0818 05:38:47.561872 17344 solver.cpp:404]     Test net output #1: loss = 15.921 (* 1 = 15.921 loss)\nI0818 05:38:48.904526 17344 solver.cpp:228] Iteration 21300, loss = 0.0345271\nI0818 05:38:48.904561 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:38:48.904575 17344 solver.cpp:244]     Train net output #1: loss = 0.0345268 (* 1 = 0.0345268 loss)\nI0818 05:38:48.972158 17344 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0818 05:41:07.536134 17344 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0818 05:42:30.320773 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14064\nI0818 05:42:30.321058 17344 solver.cpp:404]     Test net output #1: loss = 15.0602 (* 1 = 15.0602 loss)\nI0818 05:42:31.664245 17344 solver.cpp:228] Iteration 21400, loss = 0.044743\nI0818 05:42:31.664279 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:42:31.664295 17344 solver.cpp:244]     Train net output #1: loss = 0.0447427 (* 1 = 0.0447427 loss)\nI0818 05:42:31.732424 17344 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0818 05:44:50.378506 17344 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0818 05:46:13.142244 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14296\nI0818 05:46:13.142527 17344 solver.cpp:404]     Test net output #1: loss = 20.9508 (* 1 = 20.9508 loss)\nI0818 05:46:14.485721 17344 solver.cpp:228] Iteration 21500, loss = 0.0192995\nI0818 05:46:14.485756 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:46:14.485771 17344 solver.cpp:244]     Train net output #1: loss = 0.0192992 (* 1 = 0.0192992 loss)\nI0818 05:46:14.550557 17344 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0818 05:48:33.101583 17344 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0818 05:49:55.859498 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17648\nI0818 05:49:55.859786 17344 solver.cpp:404]     Test net output #1: loss = 10.931 (* 1 = 10.931 loss)\nI0818 05:49:57.202857 17344 solver.cpp:228] Iteration 21600, loss = 0.0234939\nI0818 05:49:57.202895 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:49:57.202911 17344 solver.cpp:244]     Train net output #1: loss = 0.0234936 (* 1 = 0.0234936 loss)\nI0818 05:49:57.263721 17344 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0818 05:52:15.907644 17344 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0818 05:53:38.657410 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19952\nI0818 05:53:38.657685 17344 solver.cpp:404]     Test net output #1: loss = 8.9988 (* 1 = 8.9988 loss)\nI0818 05:53:40.000247 17344 solver.cpp:228] Iteration 21700, loss = 0.0452894\nI0818 05:53:40.000283 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:53:40.000298 17344 solver.cpp:244]     Train net output #1: loss = 0.0452891 (* 1 = 0.0452891 loss)\nI0818 05:53:40.068807 17344 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0818 05:55:58.676693 17344 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0818 05:57:21.419742 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14668\nI0818 05:57:21.420048 17344 solver.cpp:404]     Test net output #1: loss = 11.6401 (* 1 = 11.6401 loss)\nI0818 05:57:22.762715 17344 solver.cpp:228] Iteration 21800, loss = 0.0464932\nI0818 05:57:22.762750 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:57:22.762765 17344 solver.cpp:244]     Train net output #1: loss = 0.0464929 (* 1 = 0.0464929 loss)\nI0818 05:57:22.824214 17344 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0818 05:59:41.408098 17344 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0818 06:01:04.156877 17344 solver.cpp:404]     Test net output #0: accuracy = 0.163\nI0818 06:01:04.157150 17344 solver.cpp:404]     Test net output #1: loss = 13.8341 (* 1 = 13.8341 loss)\nI0818 06:01:05.500311 17344 solver.cpp:228] Iteration 21900, loss = 0.0285214\nI0818 06:01:05.500345 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:01:05.500360 17344 solver.cpp:244]     Train net output #1: loss = 0.0285211 (* 1 = 0.0285211 loss)\nI0818 06:01:05.560899 17344 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0818 06:03:24.177523 17344 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0818 06:04:46.835315 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2312\nI0818 06:04:46.835597 17344 solver.cpp:404]     Test net output #1: loss = 9.90348 (* 1 = 9.90348 loss)\nI0818 06:04:48.178750 17344 solver.cpp:228] Iteration 22000, loss = 0.046329\nI0818 06:04:48.178784 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:04:48.178800 17344 solver.cpp:244]     Train net output #1: loss = 0.0463287 (* 1 = 0.0463287 loss)\nI0818 06:04:48.246449 17344 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0818 06:07:06.772032 17344 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0818 06:08:29.440539 17344 solver.cpp:404]     Test net output #0: accuracy = 0.20144\nI0818 06:08:29.440827 17344 solver.cpp:404]     Test net output #1: loss = 17.1326 (* 1 = 17.1326 loss)\nI0818 06:08:30.783435 17344 solver.cpp:228] Iteration 22100, loss = 0.0265225\nI0818 06:08:30.783468 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:08:30.783483 17344 solver.cpp:244]     Train net output #1: loss = 0.0265221 (* 1 = 0.0265221 loss)\nI0818 06:08:30.845391 17344 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0818 06:10:49.414777 17344 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0818 06:12:12.081940 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1698\nI0818 06:12:12.082227 17344 solver.cpp:404]     Test net output #1: loss = 22.3535 (* 1 = 22.3535 loss)\nI0818 06:12:13.425376 17344 solver.cpp:228] Iteration 22200, loss = 0.0367829\nI0818 06:12:13.425410 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:12:13.425426 17344 solver.cpp:244]     Train net output #1: loss = 0.0367826 (* 1 = 0.0367826 loss)\nI0818 06:12:13.493620 17344 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0818 06:14:32.101588 17344 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0818 06:15:54.757442 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11228\nI0818 06:15:54.757721 17344 solver.cpp:404]     Test net output #1: loss = 27.1375 (* 1 = 27.1375 loss)\nI0818 06:15:56.100630 17344 solver.cpp:228] Iteration 22300, loss = 0.0715881\nI0818 06:15:56.100664 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 06:15:56.100679 17344 solver.cpp:244]     Train net output #1: loss = 0.0715878 (* 1 = 0.0715878 loss)\nI0818 06:15:56.174507 17344 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0818 06:18:14.651566 17344 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0818 06:19:37.304165 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1556\nI0818 06:19:37.304445 17344 solver.cpp:404]     Test net output #1: loss = 18.7784 (* 1 = 18.7784 loss)\nI0818 06:19:38.646745 17344 solver.cpp:228] Iteration 22400, loss = 0.0606079\nI0818 06:19:38.646780 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:19:38.646795 17344 solver.cpp:244]     Train net output #1: loss = 0.0606076 (* 1 = 0.0606076 loss)\nI0818 06:19:38.719754 17344 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0818 06:21:57.165026 17344 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0818 06:23:20.696352 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1382\nI0818 06:23:20.696636 17344 solver.cpp:404]     Test net output #1: loss = 14.169 (* 1 = 14.169 loss)\nI0818 06:23:22.041436 17344 solver.cpp:228] Iteration 22500, loss = 0.0464608\nI0818 06:23:22.041481 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:23:22.041496 17344 solver.cpp:244]     Train net output #1: loss = 0.0464605 (* 1 = 0.0464605 loss)\nI0818 06:23:22.129215 17344 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0818 06:25:41.113515 17344 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0818 06:27:04.664299 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2228\nI0818 06:27:04.664525 17344 solver.cpp:404]     Test net output #1: loss = 13.6796 (* 1 = 13.6796 loss)\nI0818 06:27:06.008682 17344 solver.cpp:228] Iteration 22600, loss = 0.0434447\nI0818 06:27:06.008721 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:27:06.008738 17344 solver.cpp:244]     Train net output #1: loss = 0.0434444 (* 1 = 0.0434444 loss)\nI0818 06:27:06.082398 17344 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0818 06:29:25.050792 17344 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0818 06:30:48.644644 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27036\nI0818 06:30:48.644938 17344 solver.cpp:404]     Test net output #1: loss = 11.5602 (* 1 = 11.5602 loss)\nI0818 06:30:49.988559 17344 solver.cpp:228] Iteration 22700, loss = 0.0350025\nI0818 06:30:49.988601 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:30:49.988618 17344 solver.cpp:244]     Train net output #1: loss = 0.0350022 (* 1 = 0.0350022 loss)\nI0818 06:30:50.058485 17344 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0818 06:33:09.094519 17344 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0818 06:34:32.680356 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15504\nI0818 06:34:32.680622 17344 solver.cpp:404]     Test net output #1: loss = 15.2624 (* 1 = 15.2624 loss)\nI0818 06:34:34.024472 17344 solver.cpp:228] Iteration 22800, loss = 0.101142\nI0818 06:34:34.024514 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 06:34:34.024530 17344 solver.cpp:244]     Train net output #1: loss = 0.101142 (* 1 = 0.101142 loss)\nI0818 06:34:34.098893 17344 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0818 06:36:53.197537 17344 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0818 06:38:16.807384 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18988\nI0818 06:38:16.807632 17344 solver.cpp:404]     Test net output #1: loss = 15.0762 (* 1 = 15.0762 loss)\nI0818 06:38:18.152353 17344 solver.cpp:228] Iteration 22900, loss = 0.0385056\nI0818 06:38:18.152393 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:38:18.152410 17344 solver.cpp:244]     Train net output #1: loss = 0.0385052 (* 1 = 0.0385052 loss)\nI0818 06:38:18.224004 17344 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0818 06:40:37.075330 17344 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0818 06:42:00.275743 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15076\nI0818 06:42:00.275997 17344 solver.cpp:404]     Test net output #1: loss = 20.7362 (* 1 = 20.7362 loss)\nI0818 06:42:01.619724 17344 solver.cpp:228] Iteration 23000, loss = 0.0511255\nI0818 06:42:01.619767 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:42:01.619783 17344 solver.cpp:244]     Train net output #1: loss = 0.0511252 (* 1 = 0.0511252 loss)\nI0818 06:42:01.692497 17344 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0818 06:44:20.781494 17344 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0818 06:45:43.924757 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17436\nI0818 06:45:43.925019 17344 solver.cpp:404]     Test net output #1: loss = 26.5122 (* 1 = 26.5122 loss)\nI0818 06:45:45.270023 17344 solver.cpp:228] Iteration 23100, loss = 0.0565176\nI0818 06:45:45.270063 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:45:45.270079 17344 solver.cpp:244]     Train net output #1: loss = 0.0565172 (* 1 = 0.0565172 loss)\nI0818 06:45:45.338052 17344 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0818 06:48:04.396311 17344 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0818 06:49:27.631418 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15508\nI0818 06:49:27.631665 17344 solver.cpp:404]     Test net output #1: loss = 18.4994 (* 1 = 18.4994 loss)\nI0818 06:49:28.974840 17344 solver.cpp:228] Iteration 23200, loss = 0.0286735\nI0818 06:49:28.974884 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:49:28.974900 17344 solver.cpp:244]     Train net output #1: loss = 0.0286732 (* 1 = 0.0286732 loss)\nI0818 06:49:29.047904 17344 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0818 06:51:47.893204 17344 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0818 06:53:11.479373 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19932\nI0818 06:53:11.479631 17344 solver.cpp:404]     Test net output #1: loss = 15.5133 (* 1 = 15.5133 loss)\nI0818 06:53:12.823724 17344 solver.cpp:228] Iteration 23300, loss = 0.0299106\nI0818 06:53:12.823765 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:53:12.823781 17344 solver.cpp:244]     Train net output #1: loss = 0.0299103 (* 1 = 0.0299103 loss)\nI0818 06:53:12.891746 17344 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0818 06:55:31.882130 17344 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0818 06:56:55.072777 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22024\nI0818 06:56:55.073031 17344 solver.cpp:404]     Test net output #1: loss = 13.9478 (* 1 = 13.9478 loss)\nI0818 06:56:56.417645 17344 solver.cpp:228] Iteration 23400, loss = 0.0118918\nI0818 06:56:56.417686 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:56:56.417703 17344 solver.cpp:244]     Train net output #1: loss = 0.0118914 (* 1 = 0.0118914 loss)\nI0818 06:56:56.485595 17344 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0818 06:59:15.585389 17344 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0818 07:00:39.085549 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2274\nI0818 07:00:39.085834 17344 solver.cpp:404]     Test net output #1: loss = 16.5957 (* 1 = 16.5957 loss)\nI0818 07:00:40.430807 17344 solver.cpp:228] Iteration 23500, loss = 0.0141106\nI0818 07:00:40.430846 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:00:40.430862 17344 solver.cpp:244]     Train net output #1: loss = 0.0141103 (* 1 = 0.0141103 loss)\nI0818 07:00:40.498167 17344 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0818 07:02:59.466663 17344 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0818 07:04:22.958950 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26392\nI0818 07:04:22.959193 17344 solver.cpp:404]     Test net output #1: loss = 14.4972 (* 1 = 14.4972 loss)\nI0818 07:04:24.304193 17344 solver.cpp:228] Iteration 23600, loss = 0.00813628\nI0818 07:04:24.304236 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:04:24.304252 17344 solver.cpp:244]     Train net output #1: loss = 0.00813598 (* 1 = 0.00813598 loss)\nI0818 07:04:24.368605 17344 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0818 07:06:43.470674 17344 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0818 07:08:07.062141 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26088\nI0818 07:08:07.062422 17344 solver.cpp:404]     Test net output #1: loss = 14.3642 (* 1 = 14.3642 loss)\nI0818 07:08:08.408957 17344 solver.cpp:228] Iteration 23700, loss = 0.00305229\nI0818 07:08:08.408999 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:08:08.409023 17344 solver.cpp:244]     Train net output #1: loss = 0.00305199 (* 1 = 0.00305199 loss)\nI0818 07:08:08.480937 17344 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0818 07:10:27.445423 17344 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0818 07:11:50.672570 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26492\nI0818 07:11:50.672878 17344 solver.cpp:404]     Test net output #1: loss = 13.4121 (* 1 = 13.4121 loss)\nI0818 07:11:52.018188 17344 solver.cpp:228] Iteration 23800, loss = 0.0023124\nI0818 07:11:52.018231 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:11:52.018254 17344 solver.cpp:244]     Train net output #1: loss = 0.0023121 (* 1 = 0.0023121 loss)\nI0818 07:11:52.090024 17344 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0818 07:14:11.024862 17344 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0818 07:15:34.368352 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24208\nI0818 07:15:34.368597 17344 solver.cpp:404]     Test net output #1: loss = 13.5755 (* 1 = 13.5755 loss)\nI0818 07:15:35.713011 17344 solver.cpp:228] Iteration 23900, loss = 0.00121869\nI0818 07:15:35.713055 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:15:35.713078 17344 solver.cpp:244]     Train net output #1: loss = 0.00121838 (* 1 = 0.00121838 loss)\nI0818 07:15:35.782228 17344 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0818 07:17:54.716471 17344 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0818 07:19:18.061100 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25632\nI0818 07:19:18.061383 17344 solver.cpp:404]     Test net output #1: loss = 12.29 (* 1 = 12.29 loss)\nI0818 07:19:19.405311 17344 solver.cpp:228] Iteration 24000, loss = 0.00170645\nI0818 07:19:19.405357 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:19:19.405380 17344 solver.cpp:244]     Train net output #1: loss = 0.00170614 (* 1 = 0.00170614 loss)\nI0818 07:19:19.476840 17344 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0818 07:21:38.456724 17344 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0818 07:23:01.897008 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26976\nI0818 07:23:01.897301 17344 solver.cpp:404]     Test net output #1: loss = 11.7406 (* 1 = 11.7406 loss)\nI0818 07:23:03.241567 17344 solver.cpp:228] Iteration 24100, loss = 0.00158986\nI0818 07:23:03.241613 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:23:03.241636 17344 solver.cpp:244]     Train net output #1: loss = 0.00158956 (* 1 = 0.00158956 loss)\nI0818 07:23:03.306733 17344 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0818 07:25:22.220935 17344 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0818 07:26:45.441875 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27632\nI0818 07:26:45.442147 17344 solver.cpp:404]     Test net output #1: loss = 11.2242 (* 1 = 11.2242 loss)\nI0818 07:26:46.786768 17344 solver.cpp:228] Iteration 24200, loss = 0.00102381\nI0818 07:26:46.786810 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:26:46.786834 17344 solver.cpp:244]     Train net output #1: loss = 0.0010235 (* 1 = 0.0010235 loss)\nI0818 07:26:46.864387 17344 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0818 07:29:05.783593 17344 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0818 07:30:29.316066 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27764\nI0818 07:30:29.316354 17344 solver.cpp:404]     Test net output #1: loss = 10.6955 (* 1 = 10.6955 loss)\nI0818 07:30:30.661237 17344 solver.cpp:228] Iteration 24300, loss = 0.00129912\nI0818 07:30:30.661281 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:30:30.661303 17344 solver.cpp:244]     Train net output #1: loss = 0.00129882 (* 1 = 0.00129882 loss)\nI0818 07:30:30.727605 17344 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0818 07:32:49.670220 17344 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0818 07:34:13.070255 17344 solver.cpp:404]     Test net output #0: accuracy = 0.271\nI0818 07:34:13.070503 17344 solver.cpp:404]     Test net output #1: loss = 10.2337 (* 1 = 10.2337 loss)\nI0818 07:34:14.414626 17344 solver.cpp:228] Iteration 24400, loss = 0.000875336\nI0818 07:34:14.414670 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:34:14.414693 17344 solver.cpp:244]     Train net output #1: loss = 0.000875032 (* 1 = 0.000875032 loss)\nI0818 07:34:14.486057 17344 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0818 07:36:33.566252 17344 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0818 07:37:56.977295 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25304\nI0818 07:37:56.977543 17344 solver.cpp:404]     Test net output #1: loss = 10.3576 (* 1 = 10.3576 loss)\nI0818 07:37:58.323148 17344 solver.cpp:228] Iteration 24500, loss = 0.000786446\nI0818 07:37:58.323191 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:37:58.323213 17344 solver.cpp:244]     Train net output #1: loss = 0.000786142 (* 1 = 0.000786142 loss)\nI0818 07:37:58.395436 17344 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0818 07:40:17.485345 17344 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0818 07:41:41.046212 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19436\nI0818 07:41:41.046465 17344 solver.cpp:404]     Test net output #1: loss = 11.0121 (* 1 = 11.0121 loss)\nI0818 07:41:42.390955 17344 solver.cpp:228] Iteration 24600, loss = 0.000868205\nI0818 07:41:42.391000 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:41:42.391023 17344 solver.cpp:244]     Train net output #1: loss = 0.000867901 (* 1 = 0.000867901 loss)\nI0818 07:41:42.463855 17344 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0818 07:44:01.398912 17344 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0818 07:45:24.971456 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17404\nI0818 07:45:24.971735 17344 solver.cpp:404]     Test net output #1: loss = 13.3585 (* 1 = 13.3585 loss)\nI0818 07:45:26.315974 17344 solver.cpp:228] Iteration 24700, loss = 0.0010367\nI0818 07:45:26.316016 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:45:26.316040 17344 solver.cpp:244]     Train net output #1: loss = 0.0010364 (* 1 = 0.0010364 loss)\nI0818 07:45:26.383096 17344 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0818 07:47:45.237787 17344 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0818 07:49:08.568917 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18116\nI0818 07:49:08.569198 17344 solver.cpp:404]     Test net output #1: loss = 15.7296 (* 1 = 15.7296 loss)\nI0818 07:49:09.914759 17344 solver.cpp:228] Iteration 24800, loss = 0.00122208\nI0818 07:49:09.914801 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:49:09.914819 17344 solver.cpp:244]     Train net output #1: loss = 0.00122177 (* 1 = 0.00122177 loss)\nI0818 07:49:09.978035 17344 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0818 07:51:28.862906 17344 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0818 07:52:52.254474 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14892\nI0818 07:52:52.254751 17344 solver.cpp:404]     Test net output #1: loss = 18.5357 (* 1 = 18.5357 loss)\nI0818 07:52:53.598742 17344 solver.cpp:228] Iteration 24900, loss = 0.00075793\nI0818 07:52:53.598786 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:52:53.598809 17344 solver.cpp:244]     Train net output #1: loss = 0.000757626 (* 1 = 0.000757626 loss)\nI0818 07:52:53.670682 17344 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0818 07:55:12.554611 17344 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0818 07:56:35.963076 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10088\nI0818 07:56:35.963347 17344 solver.cpp:404]     Test net output #1: loss = 24.7396 (* 1 = 24.7396 loss)\nI0818 07:56:37.307232 17344 solver.cpp:228] Iteration 25000, loss = 0.00110494\nI0818 07:56:37.307276 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:56:37.307299 17344 solver.cpp:244]     Train net output #1: loss = 0.00110464 (* 1 = 0.00110464 loss)\nI0818 07:56:37.375572 17344 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0818 07:58:56.342332 17344 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0818 08:00:19.971829 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0818 08:00:19.972126 17344 solver.cpp:404]     Test net output #1: loss = 35.8135 (* 1 = 35.8135 loss)\nI0818 08:00:21.316145 17344 solver.cpp:228] Iteration 25100, loss = 0.000868455\nI0818 08:00:21.316187 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:00:21.316210 17344 solver.cpp:244]     Train net output #1: loss = 0.000868151 (* 1 = 0.000868151 loss)\nI0818 08:00:21.381012 17344 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0818 08:02:39.945740 17344 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0818 08:04:02.695036 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0818 08:04:02.695332 17344 solver.cpp:404]     Test net output #1: loss = 36.4228 (* 1 = 36.4228 loss)\nI0818 08:04:04.038846 17344 solver.cpp:228] Iteration 25200, loss = 0.000792967\nI0818 08:04:04.038879 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:04:04.038894 17344 solver.cpp:244]     Train net output #1: loss = 0.000792662 (* 1 = 0.000792662 loss)\nI0818 08:04:04.113173 17344 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0818 08:06:22.699287 17344 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0818 08:07:45.452424 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0818 08:07:45.452718 17344 solver.cpp:404]     Test net output #1: loss = 41.4035 (* 1 = 41.4035 loss)\nI0818 08:07:46.795850 17344 solver.cpp:228] Iteration 25300, loss = 0.000853754\nI0818 08:07:46.795886 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:07:46.795900 17344 solver.cpp:244]     Train net output #1: loss = 0.00085345 (* 1 = 0.00085345 loss)\nI0818 08:07:46.866230 17344 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0818 08:10:05.391582 17344 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0818 08:11:28.131636 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0818 08:11:28.131904 17344 solver.cpp:404]     Test net output #1: loss = 52.601 (* 1 = 52.601 loss)\nI0818 08:11:29.475004 17344 solver.cpp:228] Iteration 25400, loss = 0.00133802\nI0818 08:11:29.475040 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:11:29.475055 17344 solver.cpp:244]     Train net output #1: loss = 0.00133771 (* 1 = 0.00133771 loss)\nI0818 08:11:29.553809 17344 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0818 08:13:48.143359 17344 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0818 08:15:10.884047 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0818 08:15:10.884359 17344 solver.cpp:404]     Test net output #1: loss = 48.4124 (* 1 = 48.4124 loss)\nI0818 08:15:12.227151 17344 solver.cpp:228] Iteration 25500, loss = 0.000764245\nI0818 08:15:12.227187 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:15:12.227202 17344 solver.cpp:244]     Train net output #1: loss = 0.000763941 (* 1 = 0.000763941 loss)\nI0818 08:15:12.301319 17344 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0818 08:17:30.826581 17344 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0818 08:18:53.538877 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0818 08:18:53.539175 17344 solver.cpp:404]     Test net output #1: loss = 41.2001 (* 1 = 41.2001 loss)\nI0818 08:18:54.882108 17344 solver.cpp:228] Iteration 25600, loss = 0.000768835\nI0818 08:18:54.882144 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:18:54.882160 17344 solver.cpp:244]     Train net output #1: loss = 0.000768531 (* 1 = 0.000768531 loss)\nI0818 08:18:54.957487 17344 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0818 08:21:13.521886 17344 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0818 08:22:36.182015 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0818 08:22:36.182328 17344 solver.cpp:404]     Test net output #1: loss = 42.424 (* 1 = 42.424 loss)\nI0818 08:22:37.525264 17344 solver.cpp:228] Iteration 25700, loss = 0.00109276\nI0818 08:22:37.525297 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:22:37.525312 17344 solver.cpp:244]     Train net output #1: loss = 0.00109246 (* 1 = 0.00109246 loss)\nI0818 08:22:37.591333 17344 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0818 08:24:56.339438 17344 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0818 08:26:19.953917 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10156\nI0818 08:26:19.954181 17344 solver.cpp:404]     Test net output #1: loss = 41.0778 (* 1 = 41.0778 loss)\nI0818 08:26:21.299471 17344 solver.cpp:228] Iteration 25800, loss = 0.000660719\nI0818 08:26:21.299513 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:26:21.299528 17344 solver.cpp:244]     Train net output #1: loss = 0.000660415 (* 1 = 0.000660415 loss)\nI0818 08:26:21.369343 17344 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0818 08:28:40.434245 17344 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0818 08:30:03.985319 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12368\nI0818 08:30:03.985597 17344 solver.cpp:404]     Test net output #1: loss = 38.7934 (* 1 = 38.7934 loss)\nI0818 08:30:05.330796 17344 solver.cpp:228] Iteration 25900, loss = 0.000729525\nI0818 08:30:05.330840 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:30:05.330855 17344 solver.cpp:244]     Train net output #1: loss = 0.00072922 (* 1 = 0.00072922 loss)\nI0818 08:30:05.393829 17344 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0818 08:32:24.467540 17344 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0818 08:33:47.788743 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11364\nI0818 08:33:47.788972 17344 solver.cpp:404]     Test net output #1: loss = 42.2665 (* 1 = 42.2665 loss)\nI0818 08:33:49.133283 17344 solver.cpp:228] Iteration 26000, loss = 0.0009268\nI0818 08:33:49.133327 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:33:49.133342 17344 solver.cpp:244]     Train net output #1: loss = 0.000926496 (* 1 = 0.000926496 loss)\nI0818 08:33:49.208487 17344 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0818 08:36:08.236337 17344 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0818 08:37:31.423393 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1022\nI0818 08:37:31.423622 17344 solver.cpp:404]     Test net output #1: loss = 43.5885 (* 1 = 43.5885 loss)\nI0818 08:37:32.769008 17344 solver.cpp:228] Iteration 26100, loss = 0.000721067\nI0818 08:37:32.769052 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:37:32.769068 17344 solver.cpp:244]     Train net output #1: loss = 0.000720763 (* 1 = 0.000720763 loss)\nI0818 08:37:32.842070 17344 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0818 08:39:51.818512 17344 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0818 08:41:15.312124 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18056\nI0818 08:41:15.312407 17344 solver.cpp:404]     Test net output #1: loss = 45.2951 (* 1 = 45.2951 loss)\nI0818 08:41:16.657712 17344 solver.cpp:228] Iteration 26200, loss = 0.000925275\nI0818 08:41:16.657757 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:41:16.657773 17344 solver.cpp:244]     Train net output #1: loss = 0.000924971 (* 1 = 0.000924971 loss)\nI0818 08:41:16.731690 17344 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0818 08:43:35.794242 17344 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0818 08:44:59.419911 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10804\nI0818 08:44:59.420186 17344 solver.cpp:404]     Test net output #1: loss = 52.9033 (* 1 = 52.9033 loss)\nI0818 08:45:00.765971 17344 solver.cpp:228] Iteration 26300, loss = 0.000802429\nI0818 08:45:00.766016 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:45:00.766031 17344 solver.cpp:244]     Train net output #1: loss = 0.000802125 (* 1 = 0.000802125 loss)\nI0818 08:45:00.842692 17344 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0818 08:47:19.720796 17344 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0818 08:48:43.355136 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0818 08:48:43.355389 17344 solver.cpp:404]     Test net output #1: loss = 51.963 (* 1 = 51.963 loss)\nI0818 08:48:44.700796 17344 solver.cpp:228] Iteration 26400, loss = 0.000913849\nI0818 08:48:44.700841 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:48:44.700865 17344 solver.cpp:244]     Train net output #1: loss = 0.000913545 (* 1 = 0.000913545 loss)\nI0818 08:48:44.775650 17344 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0818 08:51:03.731856 17344 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0818 08:52:27.340370 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0818 08:52:27.340685 17344 solver.cpp:404]     Test net output #1: loss = 49.2688 (* 1 = 49.2688 loss)\nI0818 08:52:28.686194 17344 solver.cpp:228] Iteration 26500, loss = 0.000772765\nI0818 08:52:28.686238 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:52:28.686260 17344 solver.cpp:244]     Train net output #1: loss = 0.000772461 (* 1 = 0.000772461 loss)\nI0818 08:52:28.755759 17344 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0818 08:54:47.798606 17344 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0818 08:56:11.417271 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1064\nI0818 08:56:11.417541 17344 solver.cpp:404]     Test net output #1: loss = 49.9056 (* 1 = 49.9056 loss)\nI0818 08:56:12.761350 17344 solver.cpp:228] Iteration 26600, loss = 0.000774824\nI0818 08:56:12.761396 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:56:12.761420 17344 solver.cpp:244]     Train net output #1: loss = 0.000774519 (* 1 = 0.000774519 loss)\nI0818 08:56:12.835074 17344 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0818 08:58:31.916965 17344 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0818 08:59:55.530645 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09932\nI0818 08:59:55.530899 17344 solver.cpp:404]     Test net output #1: loss = 51.1516 (* 1 = 51.1516 loss)\nI0818 08:59:56.875903 17344 solver.cpp:228] Iteration 26700, loss = 0.000671076\nI0818 08:59:56.875948 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:59:56.875972 17344 solver.cpp:244]     Train net output #1: loss = 0.000670772 (* 1 = 0.000670772 loss)\nI0818 08:59:56.942083 17344 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0818 09:02:16.008580 17344 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0818 09:03:39.613040 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0818 09:03:39.613307 17344 solver.cpp:404]     Test net output #1: loss = 55.9026 (* 1 = 55.9026 loss)\nI0818 09:03:40.958516 17344 solver.cpp:228] Iteration 26800, loss = 0.000919127\nI0818 09:03:40.958559 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:03:40.958585 17344 solver.cpp:244]     Train net output #1: loss = 0.000918823 (* 1 = 0.000918823 loss)\nI0818 09:03:41.025656 17344 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0818 09:06:00.094689 17344 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0818 09:07:23.691170 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09468\nI0818 09:07:23.691422 17344 solver.cpp:404]     Test net output #1: loss = 52.1333 (* 1 = 52.1333 loss)\nI0818 09:07:25.036649 17344 solver.cpp:228] Iteration 26900, loss = 0.000697137\nI0818 09:07:25.036695 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:07:25.036717 17344 solver.cpp:244]     Train net output #1: loss = 0.000696833 (* 1 = 0.000696833 loss)\nI0818 09:07:25.104008 17344 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0818 09:09:43.948762 17344 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0818 09:11:07.559569 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1244\nI0818 09:11:07.559861 17344 solver.cpp:404]     Test net output #1: loss = 76.4692 (* 1 = 76.4692 loss)\nI0818 09:11:08.904027 17344 solver.cpp:228] Iteration 27000, loss = 1.54839\nI0818 09:11:08.904072 17344 solver.cpp:244]     Train net output #0: accuracy = 0.416\nI0818 09:11:08.904095 17344 solver.cpp:244]     Train net output #1: loss = 1.54839 (* 1 = 1.54839 loss)\nI0818 09:11:08.975612 17344 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0818 09:13:27.877805 17344 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0818 09:14:51.178390 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10584\nI0818 09:14:51.178665 17344 solver.cpp:404]     Test net output #1: loss = 77.0544 (* 1 = 77.0544 loss)\nI0818 09:14:52.523691 17344 solver.cpp:228] Iteration 27100, loss = 1.02004\nI0818 09:14:52.523737 17344 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0818 09:14:52.523762 17344 solver.cpp:244]     Train net output #1: loss = 1.02004 (* 1 = 1.02004 loss)\nI0818 09:14:52.594305 17344 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0818 09:17:11.673457 17344 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0818 09:18:35.285940 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0818 09:18:35.286198 17344 solver.cpp:404]     Test net output #1: loss = 63.7403 (* 1 = 63.7403 loss)\nI0818 09:18:36.630303 17344 solver.cpp:228] Iteration 27200, loss = 0.638871\nI0818 09:18:36.630347 17344 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0818 09:18:36.630370 17344 solver.cpp:244]     Train net output #1: loss = 0.638871 (* 1 = 0.638871 loss)\nI0818 09:18:36.703011 17344 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0818 09:20:55.709414 17344 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0818 09:22:19.015307 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0818 09:22:19.015616 17344 solver.cpp:404]     Test net output #1: loss = 18.5677 (* 1 = 18.5677 loss)\nI0818 09:22:20.360766 17344 solver.cpp:228] Iteration 27300, loss = 0.481369\nI0818 09:22:20.360812 17344 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 09:22:20.360837 17344 solver.cpp:244]     Train net output #1: loss = 0.481369 (* 1 = 0.481369 loss)\nI0818 09:22:20.434597 17344 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0818 09:24:39.522121 17344 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0818 09:26:02.683910 17344 solver.cpp:404]     Test net output #0: accuracy = 0.111\nI0818 09:26:02.684166 17344 solver.cpp:404]     Test net output #1: loss = 16.67 (* 1 = 16.67 loss)\nI0818 09:26:04.029500 17344 solver.cpp:228] Iteration 27400, loss = 0.453367\nI0818 09:26:04.029543 17344 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0818 09:26:04.029566 17344 solver.cpp:244]     Train net output #1: loss = 0.453367 (* 1 = 0.453367 loss)\nI0818 09:26:04.098251 17344 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0818 09:28:23.179677 17344 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0818 09:29:46.714912 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10892\nI0818 09:29:46.715284 17344 solver.cpp:404]     Test net output #1: loss = 14.5034 (* 1 = 14.5034 loss)\nI0818 09:29:48.060529 17344 solver.cpp:228] Iteration 27500, loss = 0.501366\nI0818 09:29:48.060573 17344 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 09:29:48.060596 17344 solver.cpp:244]     Train net output #1: loss = 0.501366 (* 1 = 0.501366 loss)\nI0818 09:29:48.133309 17344 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0818 09:32:07.210204 17344 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0818 09:33:30.737774 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0818 09:33:30.738055 17344 solver.cpp:404]     Test net output #1: loss = 8.55121 (* 1 = 8.55121 loss)\nI0818 09:33:32.083130 17344 solver.cpp:228] Iteration 27600, loss = 0.404555\nI0818 09:33:32.083173 17344 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 09:33:32.083196 17344 solver.cpp:244]     Train net output #1: loss = 0.404555 (* 1 = 0.404555 loss)\nI0818 09:33:32.152837 17344 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0818 09:35:51.200572 17344 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0818 09:37:14.825280 17344 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0818 09:37:14.825551 17344 solver.cpp:404]     Test net output #1: loss = 6.29175 (* 1 = 6.29175 loss)\nI0818 09:37:16.171077 17344 solver.cpp:228] Iteration 27700, loss = 0.327226\nI0818 09:37:16.171120 17344 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 09:37:16.171144 17344 solver.cpp:244]     Train net output #1: loss = 0.327226 (* 1 = 0.327226 loss)\nI0818 09:37:16.246429 17344 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0818 09:39:35.196784 17344 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0818 09:40:58.563210 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0818 09:40:58.563478 17344 solver.cpp:404]     Test net output #1: loss = 4.65783 (* 1 = 4.65783 loss)\nI0818 09:40:59.908807 17344 solver.cpp:228] Iteration 27800, loss = 0.475048\nI0818 09:40:59.908851 17344 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 09:40:59.908875 17344 solver.cpp:244]     Train net output #1: loss = 0.475048 (* 1 = 0.475048 loss)\nI0818 09:40:59.973063 17344 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0818 09:43:19.012409 17344 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0818 09:44:42.391352 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12876\nI0818 09:44:42.391623 17344 solver.cpp:404]     Test net output #1: loss = 4.26261 (* 1 = 4.26261 loss)\nI0818 09:44:43.736451 17344 solver.cpp:228] Iteration 27900, loss = 0.320169\nI0818 09:44:43.736500 17344 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 09:44:43.736521 17344 solver.cpp:244]     Train net output #1: loss = 0.320169 (* 1 = 0.320169 loss)\nI0818 09:44:43.812024 17344 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0818 09:47:02.887442 17344 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0818 09:48:26.428720 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0818 09:48:26.428988 17344 solver.cpp:404]     Test net output #1: loss = 7.0194 (* 1 = 7.0194 loss)\nI0818 09:48:27.774813 17344 solver.cpp:228] Iteration 28000, loss = 0.189494\nI0818 09:48:27.774854 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 09:48:27.774871 17344 solver.cpp:244]     Train net output #1: loss = 0.189494 (* 1 = 0.189494 loss)\nI0818 09:48:27.842885 17344 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0818 09:50:46.882781 17344 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0818 09:52:10.460590 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1386\nI0818 09:52:10.460903 17344 solver.cpp:404]     Test net output #1: loss = 4.12016 (* 1 = 4.12016 loss)\nI0818 09:52:11.805865 17344 solver.cpp:228] Iteration 28100, loss = 0.29494\nI0818 09:52:11.805910 17344 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 09:52:11.805940 17344 solver.cpp:244]     Train net output #1: loss = 0.29494 (* 1 = 0.29494 loss)\nI0818 09:52:11.874711 17344 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0818 09:54:30.796694 17344 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0818 09:55:54.145148 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17332\nI0818 09:55:54.145418 17344 solver.cpp:404]     Test net output #1: loss = 3.98516 (* 1 = 3.98516 loss)\nI0818 09:55:55.489955 17344 solver.cpp:228] Iteration 28200, loss = 0.228478\nI0818 09:55:55.490001 17344 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 09:55:55.490025 17344 solver.cpp:244]     Train net output #1: loss = 0.228478 (* 1 = 0.228478 loss)\nI0818 09:55:55.556192 17344 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0818 09:58:14.457742 17344 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0818 09:59:37.771910 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17116\nI0818 09:59:37.772157 17344 solver.cpp:404]     Test net output #1: loss = 3.6612 (* 1 = 3.6612 loss)\nI0818 09:59:39.115264 17344 solver.cpp:228] Iteration 28300, loss = 0.161251\nI0818 09:59:39.115308 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 09:59:39.115321 17344 solver.cpp:244]     Train net output #1: loss = 0.161251 (* 1 = 0.161251 loss)\nI0818 09:59:39.182746 17344 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0818 10:01:58.018084 17344 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0818 10:03:21.370645 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17516\nI0818 10:03:21.370978 17344 solver.cpp:404]     Test net output #1: loss = 4.44827 (* 1 = 4.44827 loss)\nI0818 10:03:22.714856 17344 solver.cpp:228] Iteration 28400, loss = 0.180401\nI0818 10:03:22.714897 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 10:03:22.714912 17344 solver.cpp:244]     Train net output #1: loss = 0.180401 (* 1 = 0.180401 loss)\nI0818 10:03:22.783190 17344 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0818 10:05:41.641659 17344 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0818 10:07:05.140133 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14536\nI0818 10:07:05.140403 17344 solver.cpp:404]     Test net output #1: loss = 5.24714 (* 1 = 5.24714 loss)\nI0818 10:07:06.484484 17344 solver.cpp:228] Iteration 28500, loss = 0.2097\nI0818 10:07:06.484526 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:07:06.484542 17344 solver.cpp:244]     Train net output #1: loss = 0.2097 (* 1 = 0.2097 loss)\nI0818 10:07:06.550464 17344 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0818 10:09:25.400213 17344 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0818 10:10:48.730849 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1118\nI0818 10:10:48.731111 17344 solver.cpp:404]     Test net output #1: loss = 4.2269 (* 1 = 4.2269 loss)\nI0818 10:10:50.075146 17344 solver.cpp:228] Iteration 28600, loss = 0.17556\nI0818 10:10:50.075187 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 10:10:50.075203 17344 solver.cpp:244]     Train net output #1: loss = 0.17556 (* 1 = 0.17556 loss)\nI0818 10:10:50.144582 17344 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0818 10:13:09.018023 17344 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0818 10:14:32.550408 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11872\nI0818 10:14:32.550698 17344 solver.cpp:404]     Test net output #1: loss = 5.9278 (* 1 = 5.9278 loss)\nI0818 10:14:33.894484 17344 solver.cpp:228] Iteration 28700, loss = 0.159761\nI0818 10:14:33.894526 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 10:14:33.894541 17344 solver.cpp:244]     Train net output #1: loss = 0.159761 (* 1 = 0.159761 loss)\nI0818 10:14:33.967854 17344 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0818 10:16:52.862977 17344 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0818 10:18:16.462303 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11044\nI0818 10:18:16.462546 17344 solver.cpp:404]     Test net output #1: loss = 5.06827 (* 1 = 5.06827 loss)\nI0818 10:18:17.806362 17344 solver.cpp:228] Iteration 28800, loss = 0.218183\nI0818 10:18:17.806406 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 10:18:17.806421 17344 solver.cpp:244]     Train net output #1: loss = 0.218183 (* 1 = 0.218183 loss)\nI0818 10:18:17.873082 17344 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0818 10:20:36.773007 17344 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0818 10:22:00.322540 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1016\nI0818 10:22:00.322790 17344 solver.cpp:404]     Test net output #1: loss = 4.65971 (* 1 = 4.65971 loss)\nI0818 10:22:01.667062 17344 solver.cpp:228] Iteration 28900, loss = 0.144584\nI0818 10:22:01.667101 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:22:01.667117 17344 solver.cpp:244]     Train net output #1: loss = 0.144584 (* 1 = 0.144584 loss)\nI0818 10:22:01.739650 17344 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0818 10:24:20.794308 17344 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0818 10:25:44.082948 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17972\nI0818 10:25:44.083237 17344 solver.cpp:404]     Test net output #1: loss = 5.39251 (* 1 = 5.39251 loss)\nI0818 10:25:45.428493 17344 solver.cpp:228] Iteration 29000, loss = 0.198499\nI0818 10:25:45.428539 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 10:25:45.428561 17344 solver.cpp:244]     Train net output #1: loss = 0.198499 (* 1 = 0.198499 loss)\nI0818 10:25:45.503398 17344 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0818 10:28:04.563891 17344 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0818 10:29:27.759992 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13804\nI0818 10:29:27.760241 17344 solver.cpp:404]     Test net output #1: loss = 4.62037 (* 1 = 4.62037 loss)\nI0818 10:29:29.104755 17344 solver.cpp:228] Iteration 29100, loss = 0.18661\nI0818 10:29:29.104800 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 10:29:29.104822 17344 solver.cpp:244]     Train net output #1: loss = 0.18661 (* 1 = 0.18661 loss)\nI0818 10:29:29.176388 17344 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0818 10:31:48.283645 17344 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0818 10:33:11.843235 17344 solver.cpp:404]     Test net output #0: accuracy = 0.20416\nI0818 10:33:11.843551 17344 solver.cpp:404]     Test net output #1: loss = 4.36021 (* 1 = 4.36021 loss)\nI0818 10:33:13.188637 17344 solver.cpp:228] Iteration 29200, loss = 0.120223\nI0818 10:33:13.188680 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:33:13.188704 17344 solver.cpp:244]     Train net output #1: loss = 0.120223 (* 1 = 0.120223 loss)\nI0818 10:33:13.262030 17344 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0818 10:35:32.284456 17344 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0818 10:36:55.839136 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23216\nI0818 10:36:55.839426 17344 solver.cpp:404]     Test net output #1: loss = 4.17146 (* 1 = 4.17146 loss)\nI0818 10:36:57.183364 17344 solver.cpp:228] Iteration 29300, loss = 0.106046\nI0818 10:36:57.183408 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 10:36:57.183431 17344 solver.cpp:244]     Train net output #1: loss = 0.106046 (* 1 = 0.106046 loss)\nI0818 10:36:57.252236 17344 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0818 10:39:16.160697 17344 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0818 10:40:39.455495 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12556\nI0818 10:40:39.455804 17344 solver.cpp:404]     Test net output #1: loss = 4.63805 (* 1 = 4.63805 loss)\nI0818 10:40:40.800819 17344 solver.cpp:228] Iteration 29400, loss = 0.118976\nI0818 10:40:40.800863 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 10:40:40.800879 17344 solver.cpp:244]     Train net output #1: loss = 0.118976 (* 1 = 0.118976 loss)\nI0818 10:40:40.866566 17344 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0818 10:42:59.961715 17344 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0818 10:44:23.499114 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19856\nI0818 10:44:23.499358 17344 solver.cpp:404]     Test net output #1: loss = 3.42405 (* 1 = 3.42405 loss)\nI0818 10:44:24.844252 17344 solver.cpp:228] Iteration 29500, loss = 0.0911749\nI0818 10:44:24.844295 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:44:24.844312 17344 solver.cpp:244]     Train net output #1: loss = 0.0911749 (* 1 = 0.0911749 loss)\nI0818 10:44:24.914681 17344 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0818 10:46:43.860263 17344 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0818 10:48:07.428542 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15\nI0818 10:48:07.428786 17344 solver.cpp:404]     Test net output #1: loss = 6.14072 (* 1 = 6.14072 loss)\nI0818 10:48:08.773546 17344 solver.cpp:228] Iteration 29600, loss = 0.112286\nI0818 10:48:08.773591 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:48:08.773607 17344 solver.cpp:244]     Train net output #1: loss = 0.112286 (* 1 = 0.112286 loss)\nI0818 10:48:08.846575 17344 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0818 10:50:27.815454 17344 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0818 10:51:51.404232 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19232\nI0818 10:51:51.404516 17344 solver.cpp:404]     Test net output #1: loss = 4.86292 (* 1 = 4.86292 loss)\nI0818 10:51:52.748976 17344 solver.cpp:228] Iteration 29700, loss = 0.101162\nI0818 10:51:52.749020 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 10:51:52.749037 17344 solver.cpp:244]     Train net output #1: loss = 0.101162 (* 1 = 0.101162 loss)\nI0818 10:51:52.821585 17344 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0818 10:54:11.669018 17344 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0818 10:55:35.267268 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25196\nI0818 10:55:35.267526 17344 solver.cpp:404]     Test net output #1: loss = 4.10614 (* 1 = 4.10614 loss)\nI0818 10:55:36.612190 17344 solver.cpp:228] Iteration 29800, loss = 0.0523306\nI0818 10:55:36.612237 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:55:36.612254 17344 solver.cpp:244]     Train net output #1: loss = 0.0523306 (* 1 = 0.0523306 loss)\nI0818 10:55:36.678686 17344 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0818 10:57:55.595155 17344 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0818 10:59:19.198904 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1606\nI0818 10:59:19.199153 17344 solver.cpp:404]     Test net output #1: loss = 6.28548 (* 1 = 6.28548 loss)\nI0818 10:59:20.542999 17344 solver.cpp:228] Iteration 29900, loss = 0.146787\nI0818 10:59:20.543042 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:59:20.543057 17344 solver.cpp:244]     Train net output #1: loss = 0.146787 (* 1 = 0.146787 loss)\nI0818 10:59:20.611207 17344 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0818 11:01:39.460458 17344 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0818 11:03:03.035282 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27244\nI0818 11:03:03.035571 17344 solver.cpp:404]     Test net output #1: loss = 3.78552 (* 1 = 3.78552 loss)\nI0818 11:03:04.379905 17344 solver.cpp:228] Iteration 30000, loss = 0.111739\nI0818 11:03:04.379948 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 11:03:04.379964 17344 solver.cpp:244]     Train net output #1: loss = 0.111739 (* 1 = 0.111739 loss)\nI0818 11:03:04.455010 17344 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0818 11:05:23.434386 17344 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0818 11:06:47.015246 17344 solver.cpp:404]     Test net output #0: accuracy = 0.52856\nI0818 11:06:47.015499 17344 solver.cpp:404]     Test net output #1: loss = 1.76225 (* 1 = 1.76225 loss)\nI0818 11:06:48.360563 17344 solver.cpp:228] Iteration 30100, loss = 0.0966422\nI0818 11:06:48.360605 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 11:06:48.360620 17344 solver.cpp:244]     Train net output #1: loss = 0.0966422 (* 1 = 0.0966422 loss)\nI0818 11:06:48.430058 17344 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0818 11:09:07.457747 17344 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0818 11:10:31.053719 17344 solver.cpp:404]     Test net output #0: accuracy = 0.39784\nI0818 11:10:31.054009 17344 solver.cpp:404]     Test net output #1: loss = 2.57772 (* 1 = 2.57772 loss)\nI0818 11:10:32.397828 17344 solver.cpp:228] Iteration 30200, loss = 0.0881015\nI0818 11:10:32.397872 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 11:10:32.397888 17344 solver.cpp:244]     Train net output #1: loss = 0.0881016 (* 1 = 0.0881016 loss)\nI0818 11:10:32.466452 17344 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0818 11:12:51.456868 17344 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0818 11:14:14.991783 17344 solver.cpp:404]     Test net output #0: accuracy = 0.42828\nI0818 11:14:14.992038 17344 solver.cpp:404]     Test net output #1: loss = 2.30645 (* 1 = 2.30645 loss)\nI0818 11:14:16.336609 17344 solver.cpp:228] Iteration 30300, loss = 0.0645306\nI0818 11:14:16.336654 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:14:16.336678 17344 solver.cpp:244]     Train net output #1: loss = 0.0645306 (* 1 = 0.0645306 loss)\nI0818 11:14:16.402453 17344 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0818 11:16:35.344153 17344 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0818 11:17:58.598570 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28536\nI0818 11:17:58.598843 17344 solver.cpp:404]     Test net output #1: loss = 3.93286 (* 1 = 3.93286 loss)\nI0818 11:17:59.943219 17344 solver.cpp:228] Iteration 30400, loss = 0.0976628\nI0818 11:17:59.943266 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:17:59.943289 17344 solver.cpp:244]     Train net output #1: loss = 0.0976628 (* 1 = 0.0976628 loss)\nI0818 11:18:00.012045 17344 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0818 11:20:18.881757 17344 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0818 11:21:42.059470 17344 solver.cpp:404]     Test net output #0: accuracy = 0.38664\nI0818 11:21:42.059737 17344 solver.cpp:404]     Test net output #1: loss = 3.20278 (* 1 = 3.20278 loss)\nI0818 11:21:43.404005 17344 solver.cpp:228] Iteration 30500, loss = 0.116772\nI0818 11:21:43.404050 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:21:43.404074 17344 solver.cpp:244]     Train net output #1: loss = 0.116772 (* 1 = 0.116772 loss)\nI0818 11:21:43.469045 17344 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0818 11:24:02.550647 17344 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0818 11:25:25.775995 17344 solver.cpp:404]     Test net output #0: accuracy = 0.276\nI0818 11:25:25.776283 17344 solver.cpp:404]     Test net output #1: loss = 4.33351 (* 1 = 4.33351 loss)\nI0818 11:25:27.121579 17344 solver.cpp:228] Iteration 30600, loss = 0.0686134\nI0818 11:25:27.121624 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:25:27.121647 17344 solver.cpp:244]     Train net output #1: loss = 0.0686134 (* 1 = 0.0686134 loss)\nI0818 11:25:27.191623 17344 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0818 11:27:46.179318 17344 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0818 11:29:09.461084 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2058\nI0818 11:29:09.461323 17344 solver.cpp:404]     Test net output #1: loss = 6.84153 (* 1 = 6.84153 loss)\nI0818 11:29:10.806406 17344 solver.cpp:228] Iteration 30700, loss = 0.0671525\nI0818 11:29:10.806449 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:29:10.806473 17344 solver.cpp:244]     Train net output #1: loss = 0.0671525 (* 1 = 0.0671525 loss)\nI0818 11:29:10.881431 17344 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0818 11:31:29.827786 17344 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0818 11:32:53.266122 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2332\nI0818 11:32:53.266422 17344 solver.cpp:404]     Test net output #1: loss = 7.77318 (* 1 = 7.77318 loss)\nI0818 11:32:54.611423 17344 solver.cpp:228] Iteration 30800, loss = 0.0516326\nI0818 11:32:54.611464 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:32:54.611487 17344 solver.cpp:244]     Train net output #1: loss = 0.0516327 (* 1 = 0.0516327 loss)\nI0818 11:32:54.682159 17344 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0818 11:35:13.577392 17344 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0818 11:36:37.159656 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25652\nI0818 11:36:37.159910 17344 solver.cpp:404]     Test net output #1: loss = 4.71168 (* 1 = 4.71168 loss)\nI0818 11:36:38.504729 17344 solver.cpp:228] Iteration 30900, loss = 0.123582\nI0818 11:36:38.504775 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 11:36:38.504797 17344 solver.cpp:244]     Train net output #1: loss = 0.123582 (* 1 = 0.123582 loss)\nI0818 11:36:38.571671 17344 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0818 11:38:57.499888 17344 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0818 11:40:21.079560 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3586\nI0818 11:40:21.079852 17344 solver.cpp:404]     Test net output #1: loss = 3.27278 (* 1 = 3.27278 loss)\nI0818 11:40:22.423306 17344 solver.cpp:228] Iteration 31000, loss = 0.100602\nI0818 11:40:22.423352 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:40:22.423375 17344 solver.cpp:244]     Train net output #1: loss = 0.100602 (* 1 = 0.100602 loss)\nI0818 11:40:22.495692 17344 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0818 11:42:41.428390 17344 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0818 11:44:04.968140 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2828\nI0818 11:44:04.968417 17344 solver.cpp:404]     Test net output #1: loss = 5.6096 (* 1 = 5.6096 loss)\nI0818 11:44:06.312312 17344 solver.cpp:228] Iteration 31100, loss = 0.0506881\nI0818 11:44:06.312356 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:44:06.312379 17344 solver.cpp:244]     Train net output #1: loss = 0.0506882 (* 1 = 0.0506882 loss)\nI0818 11:44:06.388895 17344 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0818 11:46:25.216612 17344 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0818 11:47:48.812464 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14344\nI0818 11:47:48.812719 17344 solver.cpp:404]     Test net output #1: loss = 10.8953 (* 1 = 10.8953 loss)\nI0818 11:47:50.156831 17344 solver.cpp:228] Iteration 31200, loss = 0.0287076\nI0818 11:47:50.156872 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:47:50.156895 17344 solver.cpp:244]     Train net output #1: loss = 0.0287076 (* 1 = 0.0287076 loss)\nI0818 11:47:50.227471 17344 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0818 11:50:09.081785 17344 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0818 11:51:32.696589 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18028\nI0818 11:51:32.696909 17344 solver.cpp:404]     Test net output #1: loss = 15.2097 (* 1 = 15.2097 loss)\nI0818 11:51:34.041826 17344 solver.cpp:228] Iteration 31300, loss = 0.0769807\nI0818 11:51:34.041872 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:51:34.041895 17344 solver.cpp:244]     Train net output #1: loss = 0.0769807 (* 1 = 0.0769807 loss)\nI0818 11:51:34.107355 17344 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0818 11:53:53.047632 17344 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0818 11:55:16.666293 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29192\nI0818 11:55:16.666553 17344 solver.cpp:404]     Test net output #1: loss = 5.81311 (* 1 = 5.81311 loss)\nI0818 11:55:18.010151 17344 solver.cpp:228] Iteration 31400, loss = 0.0667621\nI0818 11:55:18.010202 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:55:18.010227 17344 solver.cpp:244]     Train net output #1: loss = 0.0667622 (* 1 = 0.0667622 loss)\nI0818 11:55:18.085147 17344 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0818 11:57:36.939265 17344 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0818 11:59:00.503955 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2954\nI0818 11:59:00.504212 17344 solver.cpp:404]     Test net output #1: loss = 5.44322 (* 1 = 5.44322 loss)\nI0818 11:59:01.848803 17344 solver.cpp:228] Iteration 31500, loss = 0.105809\nI0818 11:59:01.848846 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 11:59:01.848870 17344 solver.cpp:244]     Train net output #1: loss = 0.105809 (* 1 = 0.105809 loss)\nI0818 11:59:01.919047 17344 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0818 12:01:20.911053 17344 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0818 12:02:44.507983 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4384\nI0818 12:02:44.508291 17344 solver.cpp:404]     Test net output #1: loss = 3.43733 (* 1 = 3.43733 loss)\nI0818 12:02:45.852083 17344 solver.cpp:228] Iteration 31600, loss = 0.0356747\nI0818 12:02:45.852128 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:02:45.852151 17344 solver.cpp:244]     Train net output #1: loss = 0.0356747 (* 1 = 0.0356747 loss)\nI0818 12:02:45.918332 17344 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0818 12:05:04.774493 17344 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0818 12:06:28.351569 17344 solver.cpp:404]     Test net output #0: accuracy = 0.41064\nI0818 12:06:28.351817 17344 solver.cpp:404]     Test net output #1: loss = 4.53731 (* 1 = 4.53731 loss)\nI0818 12:06:29.696662 17344 solver.cpp:228] Iteration 31700, loss = 0.134371\nI0818 12:06:29.696709 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 12:06:29.696732 17344 solver.cpp:244]     Train net output #1: loss = 0.134371 (* 1 = 0.134371 loss)\nI0818 12:06:29.775684 17344 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0818 12:08:48.755527 17344 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0818 12:10:12.343926 17344 solver.cpp:404]     Test net output #0: accuracy = 0.48336\nI0818 12:10:12.344244 17344 solver.cpp:404]     Test net output #1: loss = 2.77695 (* 1 = 2.77695 loss)\nI0818 12:10:13.688179 17344 solver.cpp:228] Iteration 31800, loss = 0.0406018\nI0818 12:10:13.688228 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:10:13.688252 17344 solver.cpp:244]     Train net output #1: loss = 0.0406018 (* 1 = 0.0406018 loss)\nI0818 12:10:13.759642 17344 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0818 12:12:32.833256 17344 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0818 12:13:56.439018 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4708\nI0818 12:13:56.439311 17344 solver.cpp:404]     Test net output #1: loss = 2.60949 (* 1 = 2.60949 loss)\nI0818 12:13:57.783965 17344 solver.cpp:228] Iteration 31900, loss = 0.0239695\nI0818 12:13:57.784013 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:13:57.784035 17344 solver.cpp:244]     Train net output #1: loss = 0.0239695 (* 1 = 0.0239695 loss)\nI0818 12:13:57.856578 17344 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0818 12:16:16.853541 17344 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0818 12:17:40.448130 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3562\nI0818 12:17:40.448405 17344 solver.cpp:404]     Test net output #1: loss = 3.76144 (* 1 = 3.76144 loss)\nI0818 12:17:41.792301 17344 solver.cpp:228] Iteration 32000, loss = 0.0171198\nI0818 12:17:41.792348 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:17:41.792371 17344 solver.cpp:244]     Train net output #1: loss = 0.0171198 (* 1 = 0.0171198 loss)\nI0818 12:17:41.861845 17344 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0818 12:20:00.914654 17344 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0818 12:21:24.523591 17344 solver.cpp:404]     Test net output #0: accuracy = 0.41556\nI0818 12:21:24.523914 17344 solver.cpp:404]     Test net output #1: loss = 3.33975 (* 1 = 3.33975 loss)\nI0818 12:21:25.868515 17344 solver.cpp:228] Iteration 32100, loss = 0.0888399\nI0818 12:21:25.868561 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:21:25.868583 17344 solver.cpp:244]     Train net output #1: loss = 0.0888399 (* 1 = 0.0888399 loss)\nI0818 12:21:25.942574 17344 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0818 12:23:44.918876 17344 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0818 12:25:08.522522 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2276\nI0818 12:25:08.522773 17344 solver.cpp:404]     Test net output #1: loss = 6.55877 (* 1 = 6.55877 loss)\nI0818 12:25:09.867071 17344 solver.cpp:228] Iteration 32200, loss = 0.0756621\nI0818 12:25:09.867116 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:25:09.867141 17344 solver.cpp:244]     Train net output #1: loss = 0.0756621 (* 1 = 0.0756621 loss)\nI0818 12:25:09.937412 17344 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0818 12:27:28.881271 17344 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0818 12:28:52.486374 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32136\nI0818 12:28:52.486621 17344 solver.cpp:404]     Test net output #1: loss = 5.71911 (* 1 = 5.71911 loss)\nI0818 12:28:53.830111 17344 solver.cpp:228] Iteration 32300, loss = 0.0226739\nI0818 12:28:53.830157 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:28:53.830180 17344 solver.cpp:244]     Train net output #1: loss = 0.0226739 (* 1 = 0.0226739 loss)\nI0818 12:28:53.899025 17344 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0818 12:31:12.932615 17344 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0818 12:32:36.537251 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37056\nI0818 12:32:36.537600 17344 solver.cpp:404]     Test net output #1: loss = 3.63789 (* 1 = 3.63789 loss)\nI0818 12:32:37.882432 17344 solver.cpp:228] Iteration 32400, loss = 0.0404621\nI0818 12:32:37.882475 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:32:37.882498 17344 solver.cpp:244]     Train net output #1: loss = 0.0404621 (* 1 = 0.0404621 loss)\nI0818 12:32:37.956665 17344 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0818 12:34:56.912660 17344 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0818 12:36:20.568984 17344 solver.cpp:404]     Test net output #0: accuracy = 0.34988\nI0818 12:36:20.569330 17344 solver.cpp:404]     Test net output #1: loss = 3.42319 (* 1 = 3.42319 loss)\nI0818 12:36:21.913278 17344 solver.cpp:228] Iteration 32500, loss = 0.0290149\nI0818 12:36:21.913321 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:36:21.913345 17344 solver.cpp:244]     Train net output #1: loss = 0.0290149 (* 1 = 0.0290149 loss)\nI0818 12:36:21.982808 17344 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0818 12:38:40.895823 17344 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0818 12:40:04.543853 17344 solver.cpp:404]     Test net output #0: accuracy = 0.34704\nI0818 12:40:04.544169 17344 solver.cpp:404]     Test net output #1: loss = 3.37771 (* 1 = 3.37771 loss)\nI0818 12:40:05.889586 17344 solver.cpp:228] Iteration 32600, loss = 0.0176383\nI0818 12:40:05.889628 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:40:05.889652 17344 solver.cpp:244]     Train net output #1: loss = 0.0176383 (* 1 = 0.0176383 loss)\nI0818 12:40:05.955080 17344 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0818 12:42:24.860093 17344 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0818 12:43:48.504508 17344 solver.cpp:404]     Test net output #0: accuracy = 0.39932\nI0818 12:43:48.504855 17344 solver.cpp:404]     Test net output #1: loss = 4.03503 (* 1 = 4.03503 loss)\nI0818 12:43:49.848804 17344 solver.cpp:228] Iteration 32700, loss = 0.0446384\nI0818 12:43:49.848845 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:43:49.848867 17344 solver.cpp:244]     Train net output #1: loss = 0.0446384 (* 1 = 0.0446384 loss)\nI0818 12:43:49.923748 17344 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0818 12:46:08.911317 17344 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0818 12:47:32.560987 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35456\nI0818 12:47:32.561316 17344 solver.cpp:404]     Test net output #1: loss = 4.87178 (* 1 = 4.87178 loss)\nI0818 12:47:33.905853 17344 solver.cpp:228] Iteration 32800, loss = 0.0531061\nI0818 12:47:33.905895 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:47:33.905920 17344 solver.cpp:244]     Train net output #1: loss = 0.0531061 (* 1 = 0.0531061 loss)\nI0818 12:47:33.972352 17344 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0818 12:49:52.937122 17344 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0818 12:51:16.567737 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1572\nI0818 12:51:16.568089 17344 solver.cpp:404]     Test net output #1: loss = 8.99613 (* 1 = 8.99613 loss)\nI0818 12:51:17.911629 17344 solver.cpp:228] Iteration 32900, loss = 0.0303768\nI0818 12:51:17.911669 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:51:17.911684 17344 solver.cpp:244]     Train net output #1: loss = 0.0303767 (* 1 = 0.0303767 loss)\nI0818 12:51:17.984239 17344 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0818 12:53:36.822301 17344 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0818 12:55:00.429793 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1794\nI0818 12:55:00.430110 17344 solver.cpp:404]     Test net output #1: loss = 8.52723 (* 1 = 8.52723 loss)\nI0818 12:55:01.774157 17344 solver.cpp:228] Iteration 33000, loss = 0.059529\nI0818 12:55:01.774195 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:55:01.774211 17344 solver.cpp:244]     Train net output #1: loss = 0.0595289 (* 1 = 0.0595289 loss)\nI0818 12:55:01.843562 17344 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0818 12:57:20.664929 17344 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0818 12:58:44.277319 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25396\nI0818 12:58:44.277588 17344 solver.cpp:404]     Test net output #1: loss = 6.47232 (* 1 = 6.47232 loss)\nI0818 12:58:45.621726 17344 solver.cpp:228] Iteration 33100, loss = 0.0472129\nI0818 12:58:45.621768 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:58:45.621784 17344 solver.cpp:244]     Train net output #1: loss = 0.0472128 (* 1 = 0.0472128 loss)\nI0818 12:58:45.693943 17344 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0818 13:01:04.622505 17344 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0818 13:02:28.221901 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2856\nI0818 13:02:28.222211 17344 solver.cpp:404]     Test net output #1: loss = 7.15631 (* 1 = 7.15631 loss)\nI0818 13:02:29.567404 17344 solver.cpp:228] Iteration 33200, loss = 0.0124738\nI0818 13:02:29.567452 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:02:29.567474 17344 solver.cpp:244]     Train net output #1: loss = 0.0124738 (* 1 = 0.0124738 loss)\nI0818 13:02:29.638403 17344 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0818 13:04:48.625099 17344 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0818 13:06:12.210438 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26988\nI0818 13:06:12.210765 17344 solver.cpp:404]     Test net output #1: loss = 8.60908 (* 1 = 8.60908 loss)\nI0818 13:06:13.555186 17344 solver.cpp:228] Iteration 33300, loss = 0.0195563\nI0818 13:06:13.555232 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:06:13.555254 17344 solver.cpp:244]     Train net output #1: loss = 0.0195563 (* 1 = 0.0195563 loss)\nI0818 13:06:13.624312 17344 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0818 13:08:32.520774 17344 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0818 13:09:56.091080 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15552\nI0818 13:09:56.091394 17344 solver.cpp:404]     Test net output #1: loss = 12.3669 (* 1 = 12.3669 loss)\nI0818 13:09:57.435608 17344 solver.cpp:228] Iteration 33400, loss = 0.0272986\nI0818 13:09:57.435655 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:09:57.435678 17344 solver.cpp:244]     Train net output #1: loss = 0.0272985 (* 1 = 0.0272985 loss)\nI0818 13:09:57.508678 17344 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0818 13:12:16.569134 17344 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0818 13:13:40.166654 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14156\nI0818 13:13:40.166923 17344 solver.cpp:404]     Test net output #1: loss = 15.4169 (* 1 = 15.4169 loss)\nI0818 13:13:41.511977 17344 solver.cpp:228] Iteration 33500, loss = 0.035314\nI0818 13:13:41.512024 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:13:41.512048 17344 solver.cpp:244]     Train net output #1: loss = 0.035314 (* 1 = 0.035314 loss)\nI0818 13:13:41.578636 17344 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0818 13:16:00.661860 17344 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0818 13:17:24.255091 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25364\nI0818 13:17:24.255378 17344 solver.cpp:404]     Test net output #1: loss = 9.22798 (* 1 = 9.22798 loss)\nI0818 13:17:25.600284 17344 solver.cpp:228] Iteration 33600, loss = 0.033803\nI0818 13:17:25.600332 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:17:25.600354 17344 solver.cpp:244]     Train net output #1: loss = 0.033803 (* 1 = 0.033803 loss)\nI0818 13:17:25.667758 17344 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0818 13:19:44.654911 17344 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0818 13:21:08.229852 17344 solver.cpp:404]     Test net output #0: accuracy = 0.354\nI0818 13:21:08.230137 17344 solver.cpp:404]     Test net output #1: loss = 5.09916 (* 1 = 5.09916 loss)\nI0818 13:21:09.573894 17344 solver.cpp:228] Iteration 33700, loss = 0.0724222\nI0818 13:21:09.573937 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:21:09.573952 17344 solver.cpp:244]     Train net output #1: loss = 0.0724222 (* 1 = 0.0724222 loss)\nI0818 13:21:09.638204 17344 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0818 13:23:28.270201 17344 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0818 13:24:51.855756 17344 solver.cpp:404]     Test net output #0: accuracy = 0.43588\nI0818 13:24:51.856021 17344 solver.cpp:404]     Test net output #1: loss = 3.95037 (* 1 = 3.95037 loss)\nI0818 13:24:53.196604 17344 solver.cpp:228] Iteration 33800, loss = 0.041916\nI0818 13:24:53.196645 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:24:53.196661 17344 solver.cpp:244]     Train net output #1: loss = 0.041916 (* 1 = 0.041916 loss)\nI0818 13:24:53.265152 17344 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0818 13:27:11.774884 17344 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0818 13:28:35.378522 17344 solver.cpp:404]     Test net output #0: accuracy = 0.45456\nI0818 13:28:35.378789 17344 solver.cpp:404]     Test net output #1: loss = 3.00405 (* 1 = 3.00405 loss)\nI0818 13:28:36.719379 17344 solver.cpp:228] Iteration 33900, loss = 0.025612\nI0818 13:28:36.719421 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:28:36.719437 17344 solver.cpp:244]     Train net output #1: loss = 0.025612 (* 1 = 0.025612 loss)\nI0818 13:28:36.800014 17344 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0818 13:30:55.358340 17344 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0818 13:32:18.948710 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5004\nI0818 13:32:18.949012 17344 solver.cpp:404]     Test net output #1: loss = 2.84605 (* 1 = 2.84605 loss)\nI0818 13:32:20.290190 17344 solver.cpp:228] Iteration 34000, loss = 0.0568945\nI0818 13:32:20.290231 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:32:20.290246 17344 solver.cpp:244]     Train net output #1: loss = 0.0568945 (* 1 = 0.0568945 loss)\nI0818 13:32:20.366071 17344 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0818 13:34:38.884160 17344 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0818 13:36:02.488553 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35944\nI0818 13:36:02.488862 17344 solver.cpp:404]     Test net output #1: loss = 5.22518 (* 1 = 5.22518 loss)\nI0818 13:36:03.829290 17344 solver.cpp:228] Iteration 34100, loss = 0.0748563\nI0818 13:36:03.829334 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 13:36:03.829349 17344 solver.cpp:244]     Train net output #1: loss = 0.0748563 (* 1 = 0.0748563 loss)\nI0818 13:36:03.898782 17344 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0818 13:38:22.425065 17344 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0818 13:39:46.003170 17344 solver.cpp:404]     Test net output #0: accuracy = 0.33736\nI0818 13:39:46.003419 17344 solver.cpp:404]     Test net output #1: loss = 5.59718 (* 1 = 5.59718 loss)\nI0818 13:39:47.344130 17344 solver.cpp:228] Iteration 34200, loss = 0.070112\nI0818 13:39:47.344174 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 13:39:47.344189 17344 solver.cpp:244]     Train net output #1: loss = 0.070112 (* 1 = 0.070112 loss)\nI0818 13:39:47.420078 17344 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0818 13:42:06.026103 17344 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0818 13:43:28.793611 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35828\nI0818 13:43:28.793910 17344 solver.cpp:404]     Test net output #1: loss = 5.15209 (* 1 = 5.15209 loss)\nI0818 13:43:30.133859 17344 solver.cpp:228] Iteration 34300, loss = 0.0274772\nI0818 13:43:30.133898 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:43:30.133913 17344 solver.cpp:244]     Train net output #1: loss = 0.0274772 (* 1 = 0.0274772 loss)\nI0818 13:43:30.216109 17344 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0818 13:45:48.671857 17344 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0818 13:47:11.414171 17344 solver.cpp:404]     Test net output #0: accuracy = 0.41428\nI0818 13:47:11.414455 17344 solver.cpp:404]     Test net output #1: loss = 4.34262 (* 1 = 4.34262 loss)\nI0818 13:47:12.753855 17344 solver.cpp:228] Iteration 34400, loss = 0.0268857\nI0818 13:47:12.753893 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:47:12.753908 17344 solver.cpp:244]     Train net output #1: loss = 0.0268857 (* 1 = 0.0268857 loss)\nI0818 13:47:12.820495 17344 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0818 13:49:31.038920 17344 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0818 13:50:53.792500 17344 solver.cpp:404]     Test net output #0: accuracy = 0.44336\nI0818 13:50:53.792827 17344 solver.cpp:404]     Test net output #1: loss = 4.0044 (* 1 = 4.0044 loss)\nI0818 13:50:55.131645 17344 solver.cpp:228] Iteration 34500, loss = 0.0292304\nI0818 13:50:55.131682 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:50:55.131706 17344 solver.cpp:244]     Train net output #1: loss = 0.0292304 (* 1 = 0.0292304 loss)\nI0818 13:50:55.194242 17344 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0818 13:53:13.345213 17344 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0818 13:54:36.103255 17344 solver.cpp:404]     Test net output #0: accuracy = 0.39508\nI0818 13:54:36.103559 17344 solver.cpp:404]     Test net output #1: loss = 4.74973 (* 1 = 4.74973 loss)\nI0818 13:54:37.443792 17344 solver.cpp:228] Iteration 34600, loss = 0.0173614\nI0818 13:54:37.443835 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:54:37.443856 17344 solver.cpp:244]     Train net output #1: loss = 0.0173614 (* 1 = 0.0173614 loss)\nI0818 13:54:37.514129 17344 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0818 13:56:55.776497 17344 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0818 13:58:18.533689 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3838\nI0818 13:58:18.534006 17344 solver.cpp:404]     Test net output #1: loss = 4.60168 (* 1 = 4.60168 loss)\nI0818 13:58:19.874045 17344 solver.cpp:228] Iteration 34700, loss = 0.0120013\nI0818 13:58:19.874083 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:58:19.874104 17344 solver.cpp:244]     Train net output #1: loss = 0.0120013 (* 1 = 0.0120013 loss)\nI0818 13:58:19.944236 17344 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0818 14:00:38.237460 17344 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0818 14:02:01.012125 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37892\nI0818 14:02:01.012435 17344 solver.cpp:404]     Test net output #1: loss = 5.49459 (* 1 = 5.49459 loss)\nI0818 14:02:02.352128 17344 solver.cpp:228] Iteration 34800, loss = 0.0128361\nI0818 14:02:02.352169 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:02:02.352191 17344 solver.cpp:244]     Train net output #1: loss = 0.0128361 (* 1 = 0.0128361 loss)\nI0818 14:02:02.418251 17344 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0818 14:04:20.648617 17344 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0818 14:05:43.410884 17344 solver.cpp:404]     Test net output #0: accuracy = 0.31648\nI0818 14:05:43.411202 17344 solver.cpp:404]     Test net output #1: loss = 6.72473 (* 1 = 6.72473 loss)\nI0818 14:05:44.751636 17344 solver.cpp:228] Iteration 34900, loss = 0.0269325\nI0818 14:05:44.751672 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:05:44.751694 17344 solver.cpp:244]     Train net output #1: loss = 0.0269325 (* 1 = 0.0269325 loss)\nI0818 14:05:44.811144 17344 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0818 14:08:02.972003 17344 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0818 14:09:25.712674 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24064\nI0818 14:09:25.712985 17344 solver.cpp:404]     Test net output #1: loss = 9.3674 (* 1 = 9.3674 loss)\nI0818 14:09:27.058251 17344 solver.cpp:228] Iteration 35000, loss = 0.173711\nI0818 14:09:27.058289 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 14:09:27.058310 17344 solver.cpp:244]     Train net output #1: loss = 0.173711 (* 1 = 0.173711 loss)\nI0818 14:09:27.113410 17344 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0818 14:11:45.288136 17344 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0818 14:13:08.038547 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32752\nI0818 14:13:08.039014 17344 solver.cpp:404]     Test net output #1: loss = 6.01305 (* 1 = 6.01305 loss)\nI0818 14:13:09.378111 17344 solver.cpp:228] Iteration 35100, loss = 0.0420206\nI0818 14:13:09.378146 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:13:09.378170 17344 solver.cpp:244]     Train net output #1: loss = 0.0420206 (* 1 = 0.0420206 loss)\nI0818 14:13:09.448117 17344 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0818 14:15:27.545580 17344 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0818 14:16:50.294665 17344 solver.cpp:404]     Test net output #0: accuracy = 0.44044\nI0818 14:16:50.294975 17344 solver.cpp:404]     Test net output #1: loss = 5.05653 (* 1 = 5.05653 loss)\nI0818 14:16:51.633543 17344 solver.cpp:228] Iteration 35200, loss = 0.026737\nI0818 14:16:51.633579 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:16:51.633601 17344 solver.cpp:244]     Train net output #1: loss = 0.026737 (* 1 = 0.026737 loss)\nI0818 14:16:51.703403 17344 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0818 14:19:09.889587 17344 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0818 14:20:32.635249 17344 solver.cpp:404]     Test net output #0: accuracy = 0.36568\nI0818 14:20:32.635551 17344 solver.cpp:404]     Test net output #1: loss = 5.01371 (* 1 = 5.01371 loss)\nI0818 14:20:33.975153 17344 solver.cpp:228] Iteration 35300, loss = 0.0205644\nI0818 14:20:33.975186 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:20:33.975200 17344 solver.cpp:244]     Train net output #1: loss = 0.0205645 (* 1 = 0.0205645 loss)\nI0818 14:20:34.044981 17344 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0818 14:22:52.278079 17344 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0818 14:24:15.039492 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30104\nI0818 14:24:15.039810 17344 solver.cpp:404]     Test net output #1: loss = 6.17768 (* 1 = 6.17768 loss)\nI0818 14:24:16.380039 17344 solver.cpp:228] Iteration 35400, loss = 0.0214905\nI0818 14:24:16.380074 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:24:16.380089 17344 solver.cpp:244]     Train net output #1: loss = 0.0214906 (* 1 = 0.0214906 loss)\nI0818 14:24:16.451644 17344 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0818 14:26:34.711750 17344 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0818 14:27:57.468119 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29952\nI0818 14:27:57.468405 17344 solver.cpp:404]     Test net output #1: loss = 6.73589 (* 1 = 6.73589 loss)\nI0818 14:27:58.807879 17344 solver.cpp:228] Iteration 35500, loss = 0.0252516\nI0818 14:27:58.807912 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:27:58.807926 17344 solver.cpp:244]     Train net output #1: loss = 0.0252516 (* 1 = 0.0252516 loss)\nI0818 14:27:58.884251 17344 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0818 14:30:17.048513 17344 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0818 14:31:39.804725 17344 solver.cpp:404]     Test net output #0: accuracy = 0.39824\nI0818 14:31:39.805018 17344 solver.cpp:404]     Test net output #1: loss = 4.42854 (* 1 = 4.42854 loss)\nI0818 14:31:41.144080 17344 solver.cpp:228] Iteration 35600, loss = 0.18997\nI0818 14:31:41.144112 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 14:31:41.144127 17344 solver.cpp:244]     Train net output #1: loss = 0.18997 (* 1 = 0.18997 loss)\nI0818 14:31:41.208639 17344 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0818 14:33:59.417361 17344 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0818 14:37:52.267462 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2844\nI0818 14:37:52.268842 17344 solver.cpp:404]     Test net output #1: loss = 6.81013 (* 1 = 6.81013 loss)\nI0818 14:37:53.612116 17344 solver.cpp:228] Iteration 35700, loss = 0.0391033\nI0818 14:37:53.612202 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:37:53.612227 17344 solver.cpp:244]     Train net output #1: loss = 0.0391033 (* 1 = 0.0391033 loss)\nI0818 14:37:53.674823 17344 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0818 14:40:11.785816 17344 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0818 14:41:34.517769 17344 solver.cpp:404]     Test net output #0: accuracy = 0.20376\nI0818 14:41:34.518081 17344 solver.cpp:404]     Test net output #1: loss = 10.0494 (* 1 = 10.0494 loss)\nI0818 14:41:35.856997 17344 solver.cpp:228] Iteration 35800, loss = 0.0503206\nI0818 14:41:35.857035 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:41:35.857059 17344 solver.cpp:244]     Train net output #1: loss = 0.0503206 (* 1 = 0.0503206 loss)\nI0818 14:41:35.922507 17344 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0818 14:44:06.574476 17344 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0818 14:45:29.313598 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29848\nI0818 14:45:29.313952 17344 solver.cpp:404]     Test net output #1: loss = 6.27455 (* 1 = 6.27455 loss)\nI0818 14:45:30.653404 17344 solver.cpp:228] Iteration 35900, loss = 0.0454485\nI0818 14:45:30.653442 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:45:30.653466 17344 solver.cpp:244]     Train net output #1: loss = 0.0454486 (* 1 = 0.0454486 loss)\nI0818 14:45:30.726608 17344 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0818 14:47:48.798079 17344 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0818 14:49:11.530876 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2826\nI0818 14:49:11.531198 17344 solver.cpp:404]     Test net output #1: loss = 6.70625 (* 1 = 6.70625 loss)\nI0818 14:49:12.873220 17344 solver.cpp:228] Iteration 36000, loss = 0.0100548\nI0818 14:49:12.873260 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:49:12.873283 17344 solver.cpp:244]     Train net output #1: loss = 0.0100549 (* 1 = 0.0100549 loss)\nI0818 14:49:12.940372 17344 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0818 14:51:31.166268 17344 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0818 14:52:53.900387 17344 solver.cpp:404]     Test net output #0: accuracy = 0.31876\nI0818 14:52:53.900691 17344 solver.cpp:404]     Test net output #1: loss = 6.31311 (* 1 = 6.31311 loss)\nI0818 14:52:55.240039 17344 solver.cpp:228] Iteration 36100, loss = 0.00915522\nI0818 14:52:55.240077 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:52:55.240090 17344 solver.cpp:244]     Train net output #1: loss = 0.00915526 (* 1 = 0.00915526 loss)\nI0818 14:52:55.308533 17344 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0818 14:55:13.356299 17344 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0818 14:56:36.077683 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30664\nI0818 14:56:36.077994 17344 solver.cpp:404]     Test net output #1: loss = 6.91859 (* 1 = 6.91859 loss)\nI0818 14:56:37.416374 17344 solver.cpp:228] Iteration 36200, loss = 0.00260594\nI0818 14:56:37.416409 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:56:37.416424 17344 solver.cpp:244]     Train net output #1: loss = 0.00260597 (* 1 = 0.00260597 loss)\nI0818 14:56:37.479440 17344 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0818 14:58:55.531244 17344 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0818 15:00:18.258761 17344 solver.cpp:404]     Test net output #0: accuracy = 0.41624\nI0818 15:00:18.259071 17344 solver.cpp:404]     Test net output #1: loss = 4.50724 (* 1 = 4.50724 loss)\nI0818 15:00:19.597116 17344 solver.cpp:228] Iteration 36300, loss = 0.00136342\nI0818 15:00:19.597151 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:00:19.597167 17344 solver.cpp:244]     Train net output #1: loss = 0.00136345 (* 1 = 0.00136345 loss)\nI0818 15:00:19.664656 17344 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0818 15:02:37.712487 17344 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0818 15:04:00.448907 17344 solver.cpp:404]     Test net output #0: accuracy = 0.39784\nI0818 15:04:00.449203 17344 solver.cpp:404]     Test net output #1: loss = 4.64983 (* 1 = 4.64983 loss)\nI0818 15:04:01.788270 17344 solver.cpp:228] Iteration 36400, loss = 0.0010842\nI0818 15:04:01.788305 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:04:01.788321 17344 solver.cpp:244]     Train net output #1: loss = 0.00108423 (* 1 = 0.00108423 loss)\nI0818 15:04:01.849867 17344 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0818 15:06:19.950016 17344 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0818 15:07:42.684631 17344 solver.cpp:404]     Test net output #0: accuracy = 0.38064\nI0818 15:07:42.684923 17344 solver.cpp:404]     Test net output #1: loss = 4.75066 (* 1 = 4.75066 loss)\nI0818 15:07:44.023452 17344 solver.cpp:228] Iteration 36500, loss = 0.000899202\nI0818 15:07:44.023488 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:07:44.023504 17344 solver.cpp:244]     Train net output #1: loss = 0.00089924 (* 1 = 0.00089924 loss)\nI0818 15:07:44.089778 17344 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0818 15:10:02.178474 17344 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0818 15:11:24.870307 17344 solver.cpp:404]     Test net output #0: accuracy = 0.33972\nI0818 15:11:24.870616 17344 solver.cpp:404]     Test net output #1: loss = 5.10534 (* 1 = 5.10534 loss)\nI0818 15:11:26.209497 17344 solver.cpp:228] Iteration 36600, loss = 0.000834713\nI0818 15:11:26.209533 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:11:26.209548 17344 solver.cpp:244]     Train net output #1: loss = 0.00083475 (* 1 = 0.00083475 loss)\nI0818 15:11:26.275123 17344 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0818 15:13:44.361382 17344 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0818 15:15:07.016242 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30272\nI0818 15:15:07.016549 17344 solver.cpp:404]     Test net output #1: loss = 5.43143 (* 1 = 5.43143 loss)\nI0818 15:15:08.355145 17344 solver.cpp:228] Iteration 36700, loss = 0.00126606\nI0818 15:15:08.355180 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:15:08.355196 17344 solver.cpp:244]     Train net output #1: loss = 0.0012661 (* 1 = 0.0012661 loss)\nI0818 15:15:08.418970 17344 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0818 15:17:26.490766 17344 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0818 15:18:49.138808 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27344\nI0818 15:18:49.139082 17344 solver.cpp:404]     Test net output #1: loss = 5.95756 (* 1 = 5.95756 loss)\nI0818 15:18:50.477710 17344 solver.cpp:228] Iteration 36800, loss = 0.000887513\nI0818 15:18:50.477743 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:18:50.477758 17344 solver.cpp:244]     Train net output #1: loss = 0.000887551 (* 1 = 0.000887551 loss)\nI0818 15:18:50.540493 17344 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0818 15:21:08.605640 17344 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0818 15:22:31.261335 17344 solver.cpp:404]     Test net output #0: accuracy = 0.248\nI0818 15:22:31.261631 17344 solver.cpp:404]     Test net output #1: loss = 6.76447 (* 1 = 6.76447 loss)\nI0818 15:22:32.600455 17344 solver.cpp:228] Iteration 36900, loss = 0.000618445\nI0818 15:22:32.600488 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:22:32.600504 17344 solver.cpp:244]     Train net output #1: loss = 0.000618482 (* 1 = 0.000618482 loss)\nI0818 15:22:32.671105 17344 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0818 15:24:50.764339 17344 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0818 15:26:13.424335 17344 solver.cpp:404]     Test net output #0: accuracy = 0.236\nI0818 15:26:13.424628 17344 solver.cpp:404]     Test net output #1: loss = 7.82225 (* 1 = 7.82225 loss)\nI0818 15:26:14.763828 17344 solver.cpp:228] Iteration 37000, loss = 0.000844625\nI0818 15:26:14.763864 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:26:14.763880 17344 solver.cpp:244]     Train net output #1: loss = 0.000844663 (* 1 = 0.000844663 loss)\nI0818 15:26:14.840018 17344 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0818 15:28:32.921648 17344 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0818 15:29:55.569963 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22876\nI0818 15:29:55.570255 17344 solver.cpp:404]     Test net output #1: loss = 9.01805 (* 1 = 9.01805 loss)\nI0818 15:29:56.909544 17344 solver.cpp:228] Iteration 37100, loss = 0.001026\nI0818 15:29:56.909579 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:29:56.909595 17344 solver.cpp:244]     Train net output #1: loss = 0.00102604 (* 1 = 0.00102604 loss)\nI0818 15:29:56.982214 17344 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0818 15:32:15.051551 17344 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0818 15:33:37.696934 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24544\nI0818 15:33:37.697234 17344 solver.cpp:404]     Test net output #1: loss = 9.91648 (* 1 = 9.91648 loss)\nI0818 15:33:39.035605 17344 solver.cpp:228] Iteration 37200, loss = 0.00139605\nI0818 15:33:39.035640 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:33:39.035655 17344 solver.cpp:244]     Train net output #1: loss = 0.00139609 (* 1 = 0.00139609 loss)\nI0818 15:33:39.107986 17344 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0818 15:35:57.195408 17344 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0818 15:37:19.839431 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24656\nI0818 15:37:19.839725 17344 solver.cpp:404]     Test net output #1: loss = 11.3202 (* 1 = 11.3202 loss)\nI0818 15:37:21.178755 17344 solver.cpp:228] Iteration 37300, loss = 0.00116555\nI0818 15:37:21.178786 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:37:21.178802 17344 solver.cpp:244]     Train net output #1: loss = 0.00116559 (* 1 = 0.00116559 loss)\nI0818 15:37:21.255105 17344 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0818 15:39:39.340085 17344 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0818 15:41:01.985111 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27268\nI0818 15:41:01.985402 17344 solver.cpp:404]     Test net output #1: loss = 12.5702 (* 1 = 12.5702 loss)\nI0818 15:41:03.323506 17344 solver.cpp:228] Iteration 37400, loss = 0.000508922\nI0818 15:41:03.323539 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:41:03.323554 17344 solver.cpp:244]     Train net output #1: loss = 0.00050896 (* 1 = 0.00050896 loss)\nI0818 15:41:03.402930 17344 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0818 15:43:21.448132 17344 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0818 15:44:44.113849 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26036\nI0818 15:44:44.114166 17344 solver.cpp:404]     Test net output #1: loss = 14.1759 (* 1 = 14.1759 loss)\nI0818 15:44:45.452728 17344 solver.cpp:228] Iteration 37500, loss = 0.000495662\nI0818 15:44:45.452760 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:44:45.452775 17344 solver.cpp:244]     Train net output #1: loss = 0.000495699 (* 1 = 0.000495699 loss)\nI0818 15:44:45.527185 17344 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0818 15:47:03.574745 17344 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0818 15:48:26.310348 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2394\nI0818 15:48:26.310659 17344 solver.cpp:404]     Test net output #1: loss = 16.2157 (* 1 = 16.2157 loss)\nI0818 15:48:27.649531 17344 solver.cpp:228] Iteration 37600, loss = 0.000624957\nI0818 15:48:27.649564 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:48:27.649580 17344 solver.cpp:244]     Train net output #1: loss = 0.000624994 (* 1 = 0.000624994 loss)\nI0818 15:48:27.719197 17344 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0818 15:50:45.786495 17344 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0818 15:52:08.534852 17344 solver.cpp:404]     Test net output #0: accuracy = 0.20744\nI0818 15:52:08.535159 17344 solver.cpp:404]     Test net output #1: loss = 18.3959 (* 1 = 18.3959 loss)\nI0818 15:52:09.873850 17344 solver.cpp:228] Iteration 37700, loss = 0.000826404\nI0818 15:52:09.873883 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:52:09.873898 17344 solver.cpp:244]     Train net output #1: loss = 0.000826441 (* 1 = 0.000826441 loss)\nI0818 15:52:09.947913 17344 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0818 15:54:28.008601 17344 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0818 15:55:50.747153 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15248\nI0818 15:55:50.747459 17344 solver.cpp:404]     Test net output #1: loss = 20.2636 (* 1 = 20.2636 loss)\nI0818 15:55:52.085922 17344 solver.cpp:228] Iteration 37800, loss = 0.000836015\nI0818 15:55:52.085958 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:55:52.085973 17344 solver.cpp:244]     Train net output #1: loss = 0.000836052 (* 1 = 0.000836052 loss)\nI0818 15:55:52.160588 17344 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0818 15:58:10.217540 17344 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0818 15:59:32.957096 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13352\nI0818 15:59:32.957391 17344 solver.cpp:404]     Test net output #1: loss = 22.5852 (* 1 = 22.5852 loss)\nI0818 15:59:34.295941 17344 solver.cpp:228] Iteration 37900, loss = 0.000841513\nI0818 15:59:34.295977 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:59:34.295992 17344 solver.cpp:244]     Train net output #1: loss = 0.000841551 (* 1 = 0.000841551 loss)\nI0818 15:59:34.365069 17344 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0818 16:01:52.416663 17344 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0818 16:03:15.159170 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10688\nI0818 16:03:15.159464 17344 solver.cpp:404]     Test net output #1: loss = 25.7535 (* 1 = 25.7535 loss)\nI0818 16:03:16.497550 17344 solver.cpp:228] Iteration 38000, loss = 0.000598515\nI0818 16:03:16.497587 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:03:16.497602 17344 solver.cpp:244]     Train net output #1: loss = 0.000598552 (* 1 = 0.000598552 loss)\nI0818 16:03:16.573037 17344 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0818 16:05:34.620033 17344 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0818 16:06:57.355880 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10104\nI0818 16:06:57.356184 17344 solver.cpp:404]     Test net output #1: loss = 29.4253 (* 1 = 29.4253 loss)\nI0818 16:06:58.694658 17344 solver.cpp:228] Iteration 38100, loss = 0.000829301\nI0818 16:06:58.694694 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:06:58.694710 17344 solver.cpp:244]     Train net output #1: loss = 0.000829339 (* 1 = 0.000829339 loss)\nI0818 16:06:58.762590 17344 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0818 16:09:16.828637 17344 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0818 16:10:39.565134 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09976\nI0818 16:10:39.565438 17344 solver.cpp:404]     Test net output #1: loss = 32.0297 (* 1 = 32.0297 loss)\nI0818 16:10:40.903941 17344 solver.cpp:228] Iteration 38200, loss = 0.000961026\nI0818 16:10:40.903973 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:10:40.903988 17344 solver.cpp:244]     Train net output #1: loss = 0.000961064 (* 1 = 0.000961064 loss)\nI0818 16:10:40.973302 17344 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0818 16:12:59.020812 17344 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0818 16:14:21.765557 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0818 16:14:21.765864 17344 solver.cpp:404]     Test net output #1: loss = 34.089 (* 1 = 34.089 loss)\nI0818 16:14:23.104358 17344 solver.cpp:228] Iteration 38300, loss = 0.000884521\nI0818 16:14:23.104393 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:14:23.104408 17344 solver.cpp:244]     Train net output #1: loss = 0.000884559 (* 1 = 0.000884559 loss)\nI0818 16:14:23.184774 17344 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0818 16:16:41.241430 17344 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0818 16:18:03.985811 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0818 16:18:03.986143 17344 solver.cpp:404]     Test net output #1: loss = 36.076 (* 1 = 36.076 loss)\nI0818 16:18:05.325081 17344 solver.cpp:228] Iteration 38400, loss = 0.000698337\nI0818 16:18:05.325115 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:18:05.325129 17344 solver.cpp:244]     Train net output #1: loss = 0.000698375 (* 1 = 0.000698375 loss)\nI0818 16:18:05.402371 17344 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0818 16:20:23.432449 17344 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0818 16:21:46.170210 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0818 16:21:46.170536 17344 solver.cpp:404]     Test net output #1: loss = 37.4229 (* 1 = 37.4229 loss)\nI0818 16:21:47.509809 17344 solver.cpp:228] Iteration 38500, loss = 0.000584924\nI0818 16:21:47.509846 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:21:47.509868 17344 solver.cpp:244]     Train net output #1: loss = 0.000584961 (* 1 = 0.000584961 loss)\nI0818 16:21:47.584671 17344 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0818 16:24:05.787325 17344 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0818 16:25:28.530303 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0818 16:25:28.530632 17344 solver.cpp:404]     Test net output #1: loss = 40.9661 (* 1 = 40.9661 loss)\nI0818 16:25:29.869949 17344 solver.cpp:228] Iteration 38600, loss = 0.000751167\nI0818 16:25:29.869987 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:25:29.870009 17344 solver.cpp:244]     Train net output #1: loss = 0.000751205 (* 1 = 0.000751205 loss)\nI0818 16:25:29.942445 17344 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0818 16:27:48.101959 17344 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0818 16:29:10.853986 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0818 16:29:10.854266 17344 solver.cpp:404]     Test net output #1: loss = 41.6348 (* 1 = 41.6348 loss)\nI0818 16:29:12.193862 17344 solver.cpp:228] Iteration 38700, loss = 0.000607726\nI0818 16:29:12.193902 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:29:12.193917 17344 solver.cpp:244]     Train net output #1: loss = 0.000607763 (* 1 = 0.000607763 loss)\nI0818 16:29:12.271237 17344 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0818 16:31:30.437029 17344 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0818 16:32:53.169486 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0818 16:32:53.169796 17344 solver.cpp:404]     Test net output #1: loss = 44.2654 (* 1 = 44.2654 loss)\nI0818 16:32:54.509306 17344 solver.cpp:228] Iteration 38800, loss = 0.000652613\nI0818 16:32:54.509341 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:32:54.509356 17344 solver.cpp:244]     Train net output #1: loss = 0.000652651 (* 1 = 0.000652651 loss)\nI0818 16:32:54.584553 17344 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0818 16:35:12.720297 17344 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0818 16:36:35.443464 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0818 16:36:35.443752 17344 solver.cpp:404]     Test net output #1: loss = 45.8244 (* 1 = 45.8244 loss)\nI0818 16:36:36.783737 17344 solver.cpp:228] Iteration 38900, loss = 0.000563589\nI0818 16:36:36.783771 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:36:36.783785 17344 solver.cpp:244]     Train net output #1: loss = 0.000563627 (* 1 = 0.000563627 loss)\nI0818 16:36:36.856876 17344 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0818 16:38:55.059610 17344 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0818 16:40:17.789645 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0818 16:40:17.789939 17344 solver.cpp:404]     Test net output #1: loss = 46.6953 (* 1 = 46.6953 loss)\nI0818 16:40:19.129020 17344 solver.cpp:228] Iteration 39000, loss = 0.000751129\nI0818 16:40:19.129055 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:40:19.129070 17344 solver.cpp:244]     Train net output #1: loss = 0.000751167 (* 1 = 0.000751167 loss)\nI0818 16:40:19.198588 17344 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0818 16:42:37.332608 17344 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0818 16:44:00.061604 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0818 16:44:00.061920 17344 solver.cpp:404]     Test net output #1: loss = 47.2099 (* 1 = 47.2099 loss)\nI0818 16:44:01.400257 17344 solver.cpp:228] Iteration 39100, loss = 0.000584991\nI0818 16:44:01.400292 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:44:01.400307 17344 solver.cpp:244]     Train net output #1: loss = 0.000585029 (* 1 = 0.000585029 loss)\nI0818 16:44:01.472923 17344 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0818 16:46:19.508744 17344 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0818 16:47:42.208050 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0818 16:47:42.208360 17344 solver.cpp:404]     Test net output #1: loss = 78.5854 (* 1 = 78.5854 loss)\nI0818 16:47:43.546496 17344 solver.cpp:228] Iteration 39200, loss = 2.07966\nI0818 16:47:43.546528 17344 solver.cpp:244]     Train net output #0: accuracy = 0.152\nI0818 16:47:43.546543 17344 solver.cpp:244]     Train net output #1: loss = 2.07966 (* 1 = 2.07966 loss)\nI0818 16:47:43.622382 17344 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0818 16:50:01.649930 17344 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0818 16:51:24.369058 17344 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0818 16:51:24.369362 17344 solver.cpp:404]     Test net output #1: loss = 78.6204 (* 1 = 78.6204 loss)\nI0818 16:51:25.707931 17344 solver.cpp:228] Iteration 39300, loss = 1.58559\nI0818 16:51:25.707962 17344 solver.cpp:244]     Train net output #0: accuracy = 0.408\nI0818 16:51:25.707978 17344 solver.cpp:244]     Train net output #1: loss = 1.58559 (* 1 = 1.58559 loss)\nI0818 16:51:25.781180 17344 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0818 16:53:43.800380 17344 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0818 16:55:06.516428 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0818 16:55:06.516737 17344 solver.cpp:404]     Test net output #1: loss = 78.5974 (* 1 = 78.5974 loss)\nI0818 16:55:07.854663 17344 solver.cpp:228] Iteration 39400, loss = 1.25263\nI0818 16:55:07.854694 17344 solver.cpp:244]     Train net output #0: accuracy = 0.56\nI0818 16:55:07.854710 17344 solver.cpp:244]     Train net output #1: loss = 1.25263 (* 1 = 1.25263 loss)\nI0818 16:55:07.926964 17344 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0818 16:57:25.963007 17344 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0818 16:58:48.687564 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10284\nI0818 16:58:48.687885 17344 solver.cpp:404]     Test net output #1: loss = 24.516 (* 1 = 24.516 loss)\nI0818 16:58:50.026279 17344 solver.cpp:228] Iteration 39500, loss = 0.773733\nI0818 16:58:50.026309 17344 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0818 16:58:50.026324 17344 solver.cpp:244]     Train net output #1: loss = 0.773733 (* 1 = 0.773733 loss)\nI0818 16:58:50.102311 17344 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0818 17:01:08.134008 17344 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0818 17:02:30.846531 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10404\nI0818 17:02:30.846843 17344 solver.cpp:404]     Test net output #1: loss = 9.83641 (* 1 = 9.83641 loss)\nI0818 17:02:32.185281 17344 solver.cpp:228] Iteration 39600, loss = 0.567656\nI0818 17:02:32.185314 17344 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0818 17:02:32.185328 17344 solver.cpp:244]     Train net output #1: loss = 0.567656 (* 1 = 0.567656 loss)\nI0818 17:02:32.259788 17344 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0818 17:04:50.287436 17344 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0818 17:06:13.001525 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10928\nI0818 17:06:13.001830 17344 solver.cpp:404]     Test net output #1: loss = 6.04773 (* 1 = 6.04773 loss)\nI0818 17:06:14.339694 17344 solver.cpp:228] Iteration 39700, loss = 0.371125\nI0818 17:06:14.339726 17344 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 17:06:14.339742 17344 solver.cpp:244]     Train net output #1: loss = 0.371125 (* 1 = 0.371125 loss)\nI0818 17:06:14.410473 17344 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0818 17:08:32.446964 17344 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0818 17:09:55.166678 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10208\nI0818 17:09:55.166988 17344 solver.cpp:404]     Test net output #1: loss = 5.39209 (* 1 = 5.39209 loss)\nI0818 17:09:56.505676 17344 solver.cpp:228] Iteration 39800, loss = 0.354821\nI0818 17:09:56.505709 17344 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 17:09:56.505724 17344 solver.cpp:244]     Train net output #1: loss = 0.354821 (* 1 = 0.354821 loss)\nI0818 17:09:56.580358 17344 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0818 17:12:14.730350 17344 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0818 17:13:37.444716 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13896\nI0818 17:13:37.445032 17344 solver.cpp:404]     Test net output #1: loss = 3.99556 (* 1 = 3.99556 loss)\nI0818 17:13:38.784267 17344 solver.cpp:228] Iteration 39900, loss = 0.33776\nI0818 17:13:38.784301 17344 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 17:13:38.784317 17344 solver.cpp:244]     Train net output #1: loss = 0.33776 (* 1 = 0.33776 loss)\nI0818 17:13:38.858021 17344 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0818 17:15:56.976608 17344 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0818 17:17:19.695453 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1166\nI0818 17:17:19.695752 17344 solver.cpp:404]     Test net output #1: loss = 5.29713 (* 1 = 5.29713 loss)\nI0818 17:17:21.035589 17344 solver.cpp:228] Iteration 40000, loss = 0.277205\nI0818 17:17:21.035622 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 17:17:21.035637 17344 solver.cpp:244]     Train net output #1: loss = 0.277205 (* 1 = 0.277205 loss)\nI0818 17:17:21.109131 17344 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0818 17:19:39.349223 17344 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0818 17:21:02.678066 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10504\nI0818 17:21:02.678398 17344 solver.cpp:404]     Test net output #1: loss = 4.75615 (* 1 = 4.75615 loss)\nI0818 17:21:04.018211 17344 solver.cpp:228] Iteration 40100, loss = 0.222879\nI0818 17:21:04.018254 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 17:21:04.018270 17344 solver.cpp:244]     Train net output #1: loss = 0.222879 (* 1 = 0.222879 loss)\nI0818 17:21:04.090293 17344 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0818 17:23:22.556242 17344 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0818 17:24:46.118330 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10572\nI0818 17:24:46.118667 17344 solver.cpp:404]     Test net output #1: loss = 4.69767 (* 1 = 4.69767 loss)\nI0818 17:24:47.459409 17344 solver.cpp:228] Iteration 40200, loss = 0.21507\nI0818 17:24:47.459450 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:24:47.459466 17344 solver.cpp:244]     Train net output #1: loss = 0.21507 (* 1 = 0.21507 loss)\nI0818 17:24:47.531478 17344 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0818 17:27:06.021631 17344 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0818 17:28:29.586645 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14652\nI0818 17:28:29.587030 17344 solver.cpp:404]     Test net output #1: loss = 9.23236 (* 1 = 9.23236 loss)\nI0818 17:28:30.926789 17344 solver.cpp:228] Iteration 40300, loss = 0.263981\nI0818 17:28:30.926829 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 17:28:30.926846 17344 solver.cpp:244]     Train net output #1: loss = 0.263981 (* 1 = 0.263981 loss)\nI0818 17:28:30.996686 17344 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0818 17:30:49.445057 17344 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0818 17:32:13.000382 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14108\nI0818 17:32:13.000705 17344 solver.cpp:404]     Test net output #1: loss = 6.4633 (* 1 = 6.4633 loss)\nI0818 17:32:14.340859 17344 solver.cpp:228] Iteration 40400, loss = 0.266416\nI0818 17:32:14.340901 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 17:32:14.340917 17344 solver.cpp:244]     Train net output #1: loss = 0.266416 (* 1 = 0.266416 loss)\nI0818 17:32:14.412920 17344 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0818 17:34:32.851755 17344 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0818 17:35:56.419600 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1104\nI0818 17:35:56.419958 17344 solver.cpp:404]     Test net output #1: loss = 5.54214 (* 1 = 5.54214 loss)\nI0818 17:35:57.760224 17344 solver.cpp:228] Iteration 40500, loss = 0.226391\nI0818 17:35:57.760265 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 17:35:57.760282 17344 solver.cpp:244]     Train net output #1: loss = 0.226391 (* 1 = 0.226391 loss)\nI0818 17:35:57.834529 17344 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0818 17:38:16.491552 17344 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0818 17:39:40.041404 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10648\nI0818 17:39:40.041762 17344 solver.cpp:404]     Test net output #1: loss = 7.66395 (* 1 = 7.66395 loss)\nI0818 17:39:41.382850 17344 solver.cpp:228] Iteration 40600, loss = 0.17365\nI0818 17:39:41.382891 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 17:39:41.382907 17344 solver.cpp:244]     Train net output #1: loss = 0.17365 (* 1 = 0.17365 loss)\nI0818 17:39:41.452738 17344 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0818 17:41:59.928963 17344 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0818 17:43:23.501329 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11328\nI0818 17:43:23.501677 17344 solver.cpp:404]     Test net output #1: loss = 8.85593 (* 1 = 8.85593 loss)\nI0818 17:43:24.841802 17344 solver.cpp:228] Iteration 40700, loss = 0.105642\nI0818 17:43:24.841843 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:43:24.841858 17344 solver.cpp:244]     Train net output #1: loss = 0.105642 (* 1 = 0.105642 loss)\nI0818 17:43:24.911325 17344 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0818 17:45:43.553706 17344 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0818 17:47:07.130769 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14844\nI0818 17:47:07.131129 17344 solver.cpp:404]     Test net output #1: loss = 7.40927 (* 1 = 7.40927 loss)\nI0818 17:47:08.471423 17344 solver.cpp:228] Iteration 40800, loss = 0.161175\nI0818 17:47:08.471463 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:47:08.471478 17344 solver.cpp:244]     Train net output #1: loss = 0.161175 (* 1 = 0.161175 loss)\nI0818 17:47:08.546046 17344 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0818 17:49:26.998473 17344 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0818 17:50:49.723016 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10488\nI0818 17:50:49.723351 17344 solver.cpp:404]     Test net output #1: loss = 9.71417 (* 1 = 9.71417 loss)\nI0818 17:50:51.062405 17344 solver.cpp:228] Iteration 40900, loss = 0.195479\nI0818 17:50:51.062440 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:50:51.062455 17344 solver.cpp:244]     Train net output #1: loss = 0.195479 (* 1 = 0.195479 loss)\nI0818 17:50:51.134574 17344 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0818 17:53:09.309655 17344 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0818 17:54:31.945809 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1014\nI0818 17:54:31.946063 17344 solver.cpp:404]     Test net output #1: loss = 10.7545 (* 1 = 10.7545 loss)\nI0818 17:54:33.284756 17344 solver.cpp:228] Iteration 41000, loss = 0.163464\nI0818 17:54:33.284791 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:54:33.284806 17344 solver.cpp:244]     Train net output #1: loss = 0.163464 (* 1 = 0.163464 loss)\nI0818 17:54:33.355330 17344 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0818 17:56:51.401881 17344 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0818 17:58:14.050632 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12988\nI0818 17:58:14.050900 17344 solver.cpp:404]     Test net output #1: loss = 6.07015 (* 1 = 6.07015 loss)\nI0818 17:58:15.389514 17344 solver.cpp:228] Iteration 41100, loss = 0.105435\nI0818 17:58:15.389547 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:58:15.389562 17344 solver.cpp:244]     Train net output #1: loss = 0.105435 (* 1 = 0.105435 loss)\nI0818 17:58:15.462895 17344 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0818 18:00:33.544942 17344 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0818 18:01:56.227721 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0818 18:01:56.227994 17344 solver.cpp:404]     Test net output #1: loss = 11.228 (* 1 = 11.228 loss)\nI0818 18:01:57.566679 17344 solver.cpp:228] Iteration 41200, loss = 0.157081\nI0818 18:01:57.566715 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:01:57.566730 17344 solver.cpp:244]     Train net output #1: loss = 0.157081 (* 1 = 0.157081 loss)\nI0818 18:01:57.632062 17344 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0818 18:04:15.711750 17344 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0818 18:05:38.440593 17344 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0818 18:05:38.440843 17344 solver.cpp:404]     Test net output #1: loss = 14.9431 (* 1 = 14.9431 loss)\nI0818 18:05:39.778740 17344 solver.cpp:228] Iteration 41300, loss = 0.132927\nI0818 18:05:39.778775 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:05:39.778789 17344 solver.cpp:244]     Train net output #1: loss = 0.132927 (* 1 = 0.132927 loss)\nI0818 18:05:39.848016 17344 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0818 18:07:58.002034 17344 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0818 18:09:20.739687 17344 solver.cpp:404]     Test net output #0: accuracy = 0.112\nI0818 18:09:20.739965 17344 solver.cpp:404]     Test net output #1: loss = 4.71374 (* 1 = 4.71374 loss)\nI0818 18:09:22.079249 17344 solver.cpp:228] Iteration 41400, loss = 0.0889277\nI0818 18:09:22.079283 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:09:22.079299 17344 solver.cpp:244]     Train net output #1: loss = 0.0889277 (* 1 = 0.0889277 loss)\nI0818 18:09:22.144376 17344 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0818 18:11:40.299294 17344 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0818 18:13:03.027287 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14992\nI0818 18:13:03.027565 17344 solver.cpp:404]     Test net output #1: loss = 6.71903 (* 1 = 6.71903 loss)\nI0818 18:13:04.367148 17344 solver.cpp:228] Iteration 41500, loss = 0.0921964\nI0818 18:13:04.367184 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:13:04.367199 17344 solver.cpp:244]     Train net output #1: loss = 0.0921965 (* 1 = 0.0921965 loss)\nI0818 18:13:04.430399 17344 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0818 18:15:22.525853 17344 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0818 18:16:45.265456 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13212\nI0818 18:16:45.265730 17344 solver.cpp:404]     Test net output #1: loss = 5.03028 (* 1 = 5.03028 loss)\nI0818 18:16:46.604993 17344 solver.cpp:228] Iteration 41600, loss = 0.132716\nI0818 18:16:46.605028 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:16:46.605043 17344 solver.cpp:244]     Train net output #1: loss = 0.132716 (* 1 = 0.132716 loss)\nI0818 18:16:46.674320 17344 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0818 18:19:04.814069 17344 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0818 18:20:27.549666 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10196\nI0818 18:20:27.549963 17344 solver.cpp:404]     Test net output #1: loss = 11.627 (* 1 = 11.627 loss)\nI0818 18:20:28.889591 17344 solver.cpp:228] Iteration 41700, loss = 0.093767\nI0818 18:20:28.889627 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:20:28.889642 17344 solver.cpp:244]     Train net output #1: loss = 0.093767 (* 1 = 0.093767 loss)\nI0818 18:20:28.956961 17344 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0818 18:22:47.125574 17344 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0818 18:24:09.858705 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10484\nI0818 18:24:09.859005 17344 solver.cpp:404]     Test net output #1: loss = 13.4813 (* 1 = 13.4813 loss)\nI0818 18:24:11.198257 17344 solver.cpp:228] Iteration 41800, loss = 0.160122\nI0818 18:24:11.198289 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:24:11.198304 17344 solver.cpp:244]     Train net output #1: loss = 0.160122 (* 1 = 0.160122 loss)\nI0818 18:24:11.262053 17344 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0818 18:26:29.359861 17344 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0818 18:27:52.098026 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1506\nI0818 18:27:52.098292 17344 solver.cpp:404]     Test net output #1: loss = 9.35388 (* 1 = 9.35388 loss)\nI0818 18:27:53.438024 17344 solver.cpp:228] Iteration 41900, loss = 0.0807902\nI0818 18:27:53.438058 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:27:53.438074 17344 solver.cpp:244]     Train net output #1: loss = 0.0807903 (* 1 = 0.0807903 loss)\nI0818 18:27:53.505542 17344 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0818 18:30:11.726008 17344 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0818 18:31:34.461262 17344 solver.cpp:404]     Test net output #0: accuracy = 0.198\nI0818 18:31:34.461558 17344 solver.cpp:404]     Test net output #1: loss = 3.99771 (* 1 = 3.99771 loss)\nI0818 18:31:35.799293 17344 solver.cpp:228] Iteration 42000, loss = 0.12227\nI0818 18:31:35.799329 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:31:35.799343 17344 solver.cpp:244]     Train net output #1: loss = 0.12227 (* 1 = 0.12227 loss)\nI0818 18:31:35.871407 17344 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0818 18:33:54.077649 17344 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0818 18:35:16.815632 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10652\nI0818 18:35:16.815953 17344 solver.cpp:404]     Test net output #1: loss = 18.2449 (* 1 = 18.2449 loss)\nI0818 18:35:18.156054 17344 solver.cpp:228] Iteration 42100, loss = 0.146852\nI0818 18:35:18.156088 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:35:18.156103 17344 solver.cpp:244]     Train net output #1: loss = 0.146852 (* 1 = 0.146852 loss)\nI0818 18:35:18.219508 17344 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0818 18:37:36.411892 17344 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0818 18:38:59.151177 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11228\nI0818 18:38:59.151489 17344 solver.cpp:404]     Test net output #1: loss = 15.7579 (* 1 = 15.7579 loss)\nI0818 18:39:00.490101 17344 solver.cpp:228] Iteration 42200, loss = 0.0886275\nI0818 18:39:00.490135 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:39:00.490150 17344 solver.cpp:244]     Train net output #1: loss = 0.0886275 (* 1 = 0.0886275 loss)\nI0818 18:39:00.561229 17344 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0818 18:41:18.777482 17344 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0818 18:42:41.517354 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15372\nI0818 18:42:41.517647 17344 solver.cpp:404]     Test net output #1: loss = 9.50898 (* 1 = 9.50898 loss)\nI0818 18:42:42.856248 17344 solver.cpp:228] Iteration 42300, loss = 0.0919038\nI0818 18:42:42.856283 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:42:42.856299 17344 solver.cpp:244]     Train net output #1: loss = 0.0919038 (* 1 = 0.0919038 loss)\nI0818 18:42:42.922727 17344 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0818 18:45:01.030170 17344 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0818 18:46:23.765529 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10168\nI0818 18:46:23.765852 17344 solver.cpp:404]     Test net output #1: loss = 20.0719 (* 1 = 20.0719 loss)\nI0818 18:46:25.105499 17344 solver.cpp:228] Iteration 42400, loss = 0.0744302\nI0818 18:46:25.105533 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:46:25.105548 17344 solver.cpp:244]     Train net output #1: loss = 0.0744303 (* 1 = 0.0744303 loss)\nI0818 18:46:25.170061 17344 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0818 18:48:43.326467 17344 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0818 18:50:06.055846 17344 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0818 18:50:06.056161 17344 solver.cpp:404]     Test net output #1: loss = 18.5135 (* 1 = 18.5135 loss)\nI0818 18:50:07.395536 17344 solver.cpp:228] Iteration 42500, loss = 0.108752\nI0818 18:50:07.395571 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:50:07.395586 17344 solver.cpp:244]     Train net output #1: loss = 0.108752 (* 1 = 0.108752 loss)\nI0818 18:50:07.459656 17344 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0818 18:52:25.646716 17344 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0818 18:53:48.387315 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0818 18:53:48.387634 17344 solver.cpp:404]     Test net output #1: loss = 30.186 (* 1 = 30.186 loss)\nI0818 18:53:49.726263 17344 solver.cpp:228] Iteration 42600, loss = 0.0513104\nI0818 18:53:49.726295 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:53:49.726310 17344 solver.cpp:244]     Train net output #1: loss = 0.0513105 (* 1 = 0.0513105 loss)\nI0818 18:53:49.794895 17344 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0818 18:56:07.948712 17344 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0818 18:57:30.676949 17344 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0818 18:57:30.677259 17344 solver.cpp:404]     Test net output #1: loss = 17.9129 (* 1 = 17.9129 loss)\nI0818 18:57:32.016125 17344 solver.cpp:228] Iteration 42700, loss = 0.132101\nI0818 18:57:32.016158 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:57:32.016173 17344 solver.cpp:244]     Train net output #1: loss = 0.132101 (* 1 = 0.132101 loss)\nI0818 18:57:32.090068 17344 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0818 18:59:50.254405 17344 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0818 19:01:12.990453 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21148\nI0818 19:01:12.990746 17344 solver.cpp:404]     Test net output #1: loss = 4.48277 (* 1 = 4.48277 loss)\nI0818 19:01:14.330039 17344 solver.cpp:228] Iteration 42800, loss = 0.0507259\nI0818 19:01:14.330073 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:01:14.330088 17344 solver.cpp:244]     Train net output #1: loss = 0.0507259 (* 1 = 0.0507259 loss)\nI0818 19:01:14.395979 17344 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0818 19:03:32.505600 17344 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0818 19:04:55.242182 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10496\nI0818 19:04:55.242501 17344 solver.cpp:404]     Test net output #1: loss = 17.5942 (* 1 = 17.5942 loss)\nI0818 19:04:56.580580 17344 solver.cpp:228] Iteration 42900, loss = 0.0986694\nI0818 19:04:56.580612 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:04:56.580628 17344 solver.cpp:244]     Train net output #1: loss = 0.0986694 (* 1 = 0.0986694 loss)\nI0818 19:04:56.652771 17344 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0818 19:07:14.701016 17344 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0818 19:08:37.434114 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14952\nI0818 19:08:37.434428 17344 solver.cpp:404]     Test net output #1: loss = 6.93648 (* 1 = 6.93648 loss)\nI0818 19:08:38.773067 17344 solver.cpp:228] Iteration 43000, loss = 0.0647476\nI0818 19:08:38.773102 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:08:38.773118 17344 solver.cpp:244]     Train net output #1: loss = 0.0647476 (* 1 = 0.0647476 loss)\nI0818 19:08:38.838737 17344 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0818 19:10:57.530416 17344 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0818 19:12:20.773677 17344 solver.cpp:404]     Test net output #0: accuracy = 0.14404\nI0818 19:12:20.773959 17344 solver.cpp:404]     Test net output #1: loss = 7.23124 (* 1 = 7.23124 loss)\nI0818 19:12:22.115888 17344 solver.cpp:228] Iteration 43100, loss = 0.0246478\nI0818 19:12:22.115932 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:12:22.115948 17344 solver.cpp:244]     Train net output #1: loss = 0.0246478 (* 1 = 0.0246478 loss)\nI0818 19:12:22.190714 17344 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0818 19:14:40.914538 17344 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0818 19:16:04.368124 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25896\nI0818 19:16:04.368408 17344 solver.cpp:404]     Test net output #1: loss = 6.20563 (* 1 = 6.20563 loss)\nI0818 19:16:05.709164 17344 solver.cpp:228] Iteration 43200, loss = 0.0495092\nI0818 19:16:05.709208 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:16:05.709224 17344 solver.cpp:244]     Train net output #1: loss = 0.0495093 (* 1 = 0.0495093 loss)\nI0818 19:16:05.780633 17344 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0818 19:18:24.322360 17344 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0818 19:19:47.896055 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15892\nI0818 19:19:47.896344 17344 solver.cpp:404]     Test net output #1: loss = 9.30194 (* 1 = 9.30194 loss)\nI0818 19:19:49.237674 17344 solver.cpp:228] Iteration 43300, loss = 0.105271\nI0818 19:19:49.237716 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:19:49.237732 17344 solver.cpp:244]     Train net output #1: loss = 0.105271 (* 1 = 0.105271 loss)\nI0818 19:19:49.311962 17344 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0818 19:22:07.951767 17344 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0818 19:23:31.527601 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10568\nI0818 19:23:31.527925 17344 solver.cpp:404]     Test net output #1: loss = 18.6851 (* 1 = 18.6851 loss)\nI0818 19:23:32.869943 17344 solver.cpp:228] Iteration 43400, loss = 0.0248519\nI0818 19:23:32.869987 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:23:32.870003 17344 solver.cpp:244]     Train net output #1: loss = 0.024852 (* 1 = 0.024852 loss)\nI0818 19:23:32.945921 17344 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0818 19:25:51.459254 17344 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0818 19:27:14.831543 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 19:27:14.831818 17344 solver.cpp:404]     Test net output #1: loss = 42.5153 (* 1 = 42.5153 loss)\nI0818 19:27:16.171813 17344 solver.cpp:228] Iteration 43500, loss = 0.0994626\nI0818 19:27:16.171855 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:27:16.171871 17344 solver.cpp:244]     Train net output #1: loss = 0.0994626 (* 1 = 0.0994626 loss)\nI0818 19:27:16.243302 17344 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0818 19:29:34.773766 17344 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0818 19:30:58.031759 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 19:30:58.032013 17344 solver.cpp:404]     Test net output #1: loss = 43.6299 (* 1 = 43.6299 loss)\nI0818 19:30:59.372890 17344 solver.cpp:228] Iteration 43600, loss = 0.0469778\nI0818 19:30:59.372932 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:30:59.372948 17344 solver.cpp:244]     Train net output #1: loss = 0.0469779 (* 1 = 0.0469779 loss)\nI0818 19:30:59.445427 17344 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0818 19:33:17.952770 17344 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0818 19:34:41.419665 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10176\nI0818 19:34:41.419940 17344 solver.cpp:404]     Test net output #1: loss = 28.2562 (* 1 = 28.2562 loss)\nI0818 19:34:42.760608 17344 solver.cpp:228] Iteration 43700, loss = 0.0249526\nI0818 19:34:42.760648 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:34:42.760665 17344 solver.cpp:244]     Train net output #1: loss = 0.0249527 (* 1 = 0.0249527 loss)\nI0818 19:34:42.843127 17344 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0818 19:37:01.395514 17344 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0818 19:38:24.773609 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10024\nI0818 19:38:24.773897 17344 solver.cpp:404]     Test net output #1: loss = 29.1687 (* 1 = 29.1687 loss)\nI0818 19:38:26.115001 17344 solver.cpp:228] Iteration 43800, loss = 0.0470074\nI0818 19:38:26.115042 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:38:26.115058 17344 solver.cpp:244]     Train net output #1: loss = 0.0470075 (* 1 = 0.0470075 loss)\nI0818 19:38:26.186430 17344 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0818 19:40:44.702479 17344 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0818 19:42:07.941872 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10692\nI0818 19:42:07.942140 17344 solver.cpp:404]     Test net output #1: loss = 17.0471 (* 1 = 17.0471 loss)\nI0818 19:42:09.283262 17344 solver.cpp:228] Iteration 43900, loss = 0.0228077\nI0818 19:42:09.283303 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:42:09.283319 17344 solver.cpp:244]     Train net output #1: loss = 0.0228077 (* 1 = 0.0228077 loss)\nI0818 19:42:09.349025 17344 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0818 19:44:28.058812 17344 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0818 19:45:51.417835 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12176\nI0818 19:45:51.418120 17344 solver.cpp:404]     Test net output #1: loss = 12.3597 (* 1 = 12.3597 loss)\nI0818 19:45:52.758288 17344 solver.cpp:228] Iteration 44000, loss = 0.0670368\nI0818 19:45:52.758329 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:45:52.758347 17344 solver.cpp:244]     Train net output #1: loss = 0.0670368 (* 1 = 0.0670368 loss)\nI0818 19:45:52.836896 17344 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0818 19:48:11.568467 17344 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0818 19:49:34.869127 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12528\nI0818 19:49:34.869386 17344 solver.cpp:404]     Test net output #1: loss = 19.2084 (* 1 = 19.2084 loss)\nI0818 19:49:36.210479 17344 solver.cpp:228] Iteration 44100, loss = 0.0363097\nI0818 19:49:36.210520 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:49:36.210537 17344 solver.cpp:244]     Train net output #1: loss = 0.0363097 (* 1 = 0.0363097 loss)\nI0818 19:49:36.285202 17344 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0818 19:51:54.928495 17344 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0818 19:53:18.169153 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10248\nI0818 19:53:18.169442 17344 solver.cpp:404]     Test net output #1: loss = 25.6354 (* 1 = 25.6354 loss)\nI0818 19:53:19.511183 17344 solver.cpp:228] Iteration 44200, loss = 0.03088\nI0818 19:53:19.511227 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:53:19.511243 17344 solver.cpp:244]     Train net output #1: loss = 0.03088 (* 1 = 0.03088 loss)\nI0818 19:53:19.583748 17344 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0818 19:55:38.271262 17344 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0818 19:57:01.439800 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12068\nI0818 19:57:01.440083 17344 solver.cpp:404]     Test net output #1: loss = 20.1696 (* 1 = 20.1696 loss)\nI0818 19:57:02.781534 17344 solver.cpp:228] Iteration 44300, loss = 0.0438135\nI0818 19:57:02.781579 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:57:02.781595 17344 solver.cpp:244]     Train net output #1: loss = 0.0438136 (* 1 = 0.0438136 loss)\nI0818 19:57:02.855056 17344 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0818 19:59:21.565860 17344 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0818 20:00:44.808707 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 20:00:44.809011 17344 solver.cpp:404]     Test net output #1: loss = 63.3444 (* 1 = 63.3444 loss)\nI0818 20:00:46.149605 17344 solver.cpp:228] Iteration 44400, loss = 0.0617797\nI0818 20:00:46.149646 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:00:46.149662 17344 solver.cpp:244]     Train net output #1: loss = 0.0617797 (* 1 = 0.0617797 loss)\nI0818 20:00:46.229137 17344 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0818 20:03:04.845352 17344 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0818 20:04:28.253823 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 20:04:28.254112 17344 solver.cpp:404]     Test net output #1: loss = 59.4328 (* 1 = 59.4328 loss)\nI0818 20:04:29.594646 17344 solver.cpp:228] Iteration 44500, loss = 0.0351419\nI0818 20:04:29.594688 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:04:29.594704 17344 solver.cpp:244]     Train net output #1: loss = 0.0351419 (* 1 = 0.0351419 loss)\nI0818 20:04:29.670441 17344 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0818 20:06:48.286262 17344 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0818 20:08:11.487807 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10204\nI0818 20:08:11.488067 17344 solver.cpp:404]     Test net output #1: loss = 38.6283 (* 1 = 38.6283 loss)\nI0818 20:08:12.829674 17344 solver.cpp:228] Iteration 44600, loss = 0.0108762\nI0818 20:08:12.829716 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:08:12.829732 17344 solver.cpp:244]     Train net output #1: loss = 0.0108762 (* 1 = 0.0108762 loss)\nI0818 20:08:12.897033 17344 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0818 20:10:31.577906 17344 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0818 20:11:54.706959 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15464\nI0818 20:11:54.707258 17344 solver.cpp:404]     Test net output #1: loss = 17.1412 (* 1 = 17.1412 loss)\nI0818 20:11:56.048794 17344 solver.cpp:228] Iteration 44700, loss = 0.0403203\nI0818 20:11:56.048836 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:11:56.048852 17344 solver.cpp:244]     Train net output #1: loss = 0.0403203 (* 1 = 0.0403203 loss)\nI0818 20:11:56.119127 17344 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0818 20:14:14.699640 17344 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0818 20:15:37.954016 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1444\nI0818 20:15:37.954298 17344 solver.cpp:404]     Test net output #1: loss = 18.3859 (* 1 = 18.3859 loss)\nI0818 20:15:39.295516 17344 solver.cpp:228] Iteration 44800, loss = 0.0445981\nI0818 20:15:39.295557 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:15:39.295573 17344 solver.cpp:244]     Train net output #1: loss = 0.0445981 (* 1 = 0.0445981 loss)\nI0818 20:15:39.370445 17344 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0818 20:17:58.005172 17344 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0818 20:19:21.532595 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27004\nI0818 20:19:21.532884 17344 solver.cpp:404]     Test net output #1: loss = 7.83969 (* 1 = 7.83969 loss)\nI0818 20:19:22.874987 17344 solver.cpp:228] Iteration 44900, loss = 0.0113413\nI0818 20:19:22.875028 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:19:22.875044 17344 solver.cpp:244]     Train net output #1: loss = 0.0113413 (* 1 = 0.0113413 loss)\nI0818 20:19:22.947660 17344 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0818 20:21:41.448973 17344 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0818 20:23:04.981061 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3772\nI0818 20:23:04.981393 17344 solver.cpp:404]     Test net output #1: loss = 4.13973 (* 1 = 4.13973 loss)\nI0818 20:23:06.321192 17344 solver.cpp:228] Iteration 45000, loss = 0.0326219\nI0818 20:23:06.321234 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:23:06.321250 17344 solver.cpp:244]     Train net output #1: loss = 0.032622 (* 1 = 0.032622 loss)\nI0818 20:23:06.394767 17344 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0818 20:25:24.921609 17344 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0818 20:26:48.358222 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3014\nI0818 20:26:48.358475 17344 solver.cpp:404]     Test net output #1: loss = 7.14141 (* 1 = 7.14141 loss)\nI0818 20:26:49.698935 17344 solver.cpp:228] Iteration 45100, loss = 0.0342976\nI0818 20:26:49.698978 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:26:49.698994 17344 solver.cpp:244]     Train net output #1: loss = 0.0342977 (* 1 = 0.0342977 loss)\nI0818 20:26:49.775030 17344 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0818 20:29:08.456722 17344 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0818 20:30:31.900998 17344 solver.cpp:404]     Test net output #0: accuracy = 0.36588\nI0818 20:30:31.901275 17344 solver.cpp:404]     Test net output #1: loss = 5.37179 (* 1 = 5.37179 loss)\nI0818 20:30:33.242302 17344 solver.cpp:228] Iteration 45200, loss = 0.0821494\nI0818 20:30:33.242346 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:30:33.242360 17344 solver.cpp:244]     Train net output #1: loss = 0.0821495 (* 1 = 0.0821495 loss)\nI0818 20:30:33.312773 17344 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0818 20:32:51.881276 17344 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0818 20:34:15.424528 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24212\nI0818 20:34:15.424809 17344 solver.cpp:404]     Test net output #1: loss = 6.17095 (* 1 = 6.17095 loss)\nI0818 20:34:16.765205 17344 solver.cpp:228] Iteration 45300, loss = 0.0494193\nI0818 20:34:16.765247 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:34:16.765264 17344 solver.cpp:244]     Train net output #1: loss = 0.0494194 (* 1 = 0.0494194 loss)\nI0818 20:34:16.840096 17344 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0818 20:36:35.432983 17344 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0818 20:37:58.944914 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1804\nI0818 20:37:58.945199 17344 solver.cpp:404]     Test net output #1: loss = 7.33117 (* 1 = 7.33117 loss)\nI0818 20:38:00.284991 17344 solver.cpp:228] Iteration 45400, loss = 0.052019\nI0818 20:38:00.285035 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:38:00.285053 17344 solver.cpp:244]     Train net output #1: loss = 0.0520191 (* 1 = 0.0520191 loss)\nI0818 20:38:00.363373 17344 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0818 20:40:18.933394 17344 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0818 20:41:42.460655 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27576\nI0818 20:41:42.460994 17344 solver.cpp:404]     Test net output #1: loss = 6.41172 (* 1 = 6.41172 loss)\nI0818 20:41:43.802407 17344 solver.cpp:228] Iteration 45500, loss = 0.0237388\nI0818 20:41:43.802450 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:41:43.802466 17344 solver.cpp:244]     Train net output #1: loss = 0.0237389 (* 1 = 0.0237389 loss)\nI0818 20:41:43.869004 17344 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0818 20:44:02.552927 17344 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0818 20:45:26.093544 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27052\nI0818 20:45:26.093804 17344 solver.cpp:404]     Test net output #1: loss = 6.96677 (* 1 = 6.96677 loss)\nI0818 20:45:27.435497 17344 solver.cpp:228] Iteration 45600, loss = 0.0136594\nI0818 20:45:27.435539 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:45:27.435554 17344 solver.cpp:244]     Train net output #1: loss = 0.0136595 (* 1 = 0.0136595 loss)\nI0818 20:45:27.512073 17344 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0818 20:47:46.147224 17344 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0818 20:49:09.450932 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27976\nI0818 20:49:09.451180 17344 solver.cpp:404]     Test net output #1: loss = 6.28396 (* 1 = 6.28396 loss)\nI0818 20:49:10.791999 17344 solver.cpp:228] Iteration 45700, loss = 0.00932937\nI0818 20:49:10.792043 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:49:10.792059 17344 solver.cpp:244]     Train net output #1: loss = 0.00932951 (* 1 = 0.00932951 loss)\nI0818 20:49:10.866039 17344 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0818 20:51:29.335263 17344 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0818 20:52:52.835855 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30444\nI0818 20:52:52.836197 17344 solver.cpp:404]     Test net output #1: loss = 7.13816 (* 1 = 7.13816 loss)\nI0818 20:52:54.177223 17344 solver.cpp:228] Iteration 45800, loss = 0.0141202\nI0818 20:52:54.177266 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:52:54.177281 17344 solver.cpp:244]     Train net output #1: loss = 0.0141203 (* 1 = 0.0141203 loss)\nI0818 20:52:54.253751 17344 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0818 20:55:12.834117 17344 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0818 20:56:36.380700 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37452\nI0818 20:56:36.380962 17344 solver.cpp:404]     Test net output #1: loss = 5.64317 (* 1 = 5.64317 loss)\nI0818 20:56:37.722242 17344 solver.cpp:228] Iteration 45900, loss = 0.00613298\nI0818 20:56:37.722285 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:56:37.722299 17344 solver.cpp:244]     Train net output #1: loss = 0.00613313 (* 1 = 0.00613313 loss)\nI0818 20:56:37.797457 17344 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0818 20:58:56.467322 17344 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0818 21:00:19.758360 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30484\nI0818 21:00:19.758615 17344 solver.cpp:404]     Test net output #1: loss = 6.52804 (* 1 = 6.52804 loss)\nI0818 21:00:21.100002 17344 solver.cpp:228] Iteration 46000, loss = 0.00500754\nI0818 21:00:21.100045 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:00:21.100061 17344 solver.cpp:244]     Train net output #1: loss = 0.00500768 (* 1 = 0.00500768 loss)\nI0818 21:00:21.171803 17344 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0818 21:02:39.647755 17344 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0818 21:04:02.861420 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25524\nI0818 21:04:02.861649 17344 solver.cpp:404]     Test net output #1: loss = 7.84346 (* 1 = 7.84346 loss)\nI0818 21:04:04.202047 17344 solver.cpp:228] Iteration 46100, loss = 0.00247497\nI0818 21:04:04.202087 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:04:04.202102 17344 solver.cpp:244]     Train net output #1: loss = 0.00247512 (* 1 = 0.00247512 loss)\nI0818 21:04:04.277179 17344 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0818 21:06:22.737932 17344 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0818 21:07:45.985692 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28804\nI0818 21:07:45.985956 17344 solver.cpp:404]     Test net output #1: loss = 7.10424 (* 1 = 7.10424 loss)\nI0818 21:07:47.326087 17344 solver.cpp:228] Iteration 46200, loss = 0.00272802\nI0818 21:07:47.326128 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:07:47.326143 17344 solver.cpp:244]     Train net output #1: loss = 0.00272817 (* 1 = 0.00272817 loss)\nI0818 21:07:47.398203 17344 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0818 21:10:05.867422 17344 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0818 21:11:29.104822 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27612\nI0818 21:11:29.105144 17344 solver.cpp:404]     Test net output #1: loss = 7.51267 (* 1 = 7.51267 loss)\nI0818 21:11:30.445235 17344 solver.cpp:228] Iteration 46300, loss = 0.00171384\nI0818 21:11:30.445273 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:11:30.445289 17344 solver.cpp:244]     Train net output #1: loss = 0.00171398 (* 1 = 0.00171398 loss)\nI0818 21:11:30.513505 17344 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0818 21:13:48.964206 17344 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0818 21:15:12.200683 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29156\nI0818 21:15:12.200953 17344 solver.cpp:404]     Test net output #1: loss = 7.45808 (* 1 = 7.45808 loss)\nI0818 21:15:13.541162 17344 solver.cpp:228] Iteration 46400, loss = 0.000871388\nI0818 21:15:13.541201 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:15:13.541215 17344 solver.cpp:244]     Train net output #1: loss = 0.000871532 (* 1 = 0.000871532 loss)\nI0818 21:15:13.610580 17344 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0818 21:17:32.050443 17344 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0818 21:18:55.412544 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28936\nI0818 21:18:55.412832 17344 solver.cpp:404]     Test net output #1: loss = 8.20768 (* 1 = 8.20768 loss)\nI0818 21:18:56.753126 17344 solver.cpp:228] Iteration 46500, loss = 0.00103603\nI0818 21:18:56.753168 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:18:56.753185 17344 solver.cpp:244]     Train net output #1: loss = 0.00103617 (* 1 = 0.00103617 loss)\nI0818 21:18:56.827356 17344 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0818 21:21:15.294736 17344 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0818 21:22:38.833596 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26324\nI0818 21:22:38.833920 17344 solver.cpp:404]     Test net output #1: loss = 10.3272 (* 1 = 10.3272 loss)\nI0818 21:22:40.174160 17344 solver.cpp:228] Iteration 46600, loss = 0.00114175\nI0818 21:22:40.174199 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:22:40.174216 17344 solver.cpp:244]     Train net output #1: loss = 0.0011419 (* 1 = 0.0011419 loss)\nI0818 21:22:40.251142 17344 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0818 21:24:58.709290 17344 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0818 21:26:22.230741 17344 solver.cpp:404]     Test net output #0: accuracy = 0.19512\nI0818 21:26:22.231045 17344 solver.cpp:404]     Test net output #1: loss = 14.6733 (* 1 = 14.6733 loss)\nI0818 21:26:23.571679 17344 solver.cpp:228] Iteration 46700, loss = 0.00060045\nI0818 21:26:23.571722 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:26:23.571738 17344 solver.cpp:244]     Train net output #1: loss = 0.000600594 (* 1 = 0.000600594 loss)\nI0818 21:26:23.646678 17344 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0818 21:28:42.141574 17344 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0818 21:30:05.673177 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1462\nI0818 21:30:05.673460 17344 solver.cpp:404]     Test net output #1: loss = 19.6112 (* 1 = 19.6112 loss)\nI0818 21:30:07.013895 17344 solver.cpp:228] Iteration 46800, loss = 0.00101564\nI0818 21:30:07.013933 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:30:07.013949 17344 solver.cpp:244]     Train net output #1: loss = 0.00101578 (* 1 = 0.00101578 loss)\nI0818 21:30:07.092852 17344 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0818 21:32:25.551955 17344 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0818 21:33:49.077219 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12436\nI0818 21:33:49.077548 17344 solver.cpp:404]     Test net output #1: loss = 23.7183 (* 1 = 23.7183 loss)\nI0818 21:33:50.418802 17344 solver.cpp:228] Iteration 46900, loss = 0.000722988\nI0818 21:33:50.418840 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:33:50.418857 17344 solver.cpp:244]     Train net output #1: loss = 0.000723132 (* 1 = 0.000723132 loss)\nI0818 21:33:50.487196 17344 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0818 21:36:08.985244 17344 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0818 21:37:32.287398 17344 solver.cpp:404]     Test net output #0: accuracy = 0.11264\nI0818 21:37:32.287667 17344 solver.cpp:404]     Test net output #1: loss = 27.994 (* 1 = 27.994 loss)\nI0818 21:37:33.627956 17344 solver.cpp:228] Iteration 47000, loss = 0.000637113\nI0818 21:37:33.627996 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:37:33.628016 17344 solver.cpp:244]     Train net output #1: loss = 0.000637257 (* 1 = 0.000637257 loss)\nI0818 21:37:33.704018 17344 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0818 21:39:51.991183 17344 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0818 21:41:14.728353 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10556\nI0818 21:41:14.728682 17344 solver.cpp:404]     Test net output #1: loss = 32.5729 (* 1 = 32.5729 loss)\nI0818 21:41:16.067018 17344 solver.cpp:228] Iteration 47100, loss = 0.000715118\nI0818 21:41:16.067054 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:41:16.067070 17344 solver.cpp:244]     Train net output #1: loss = 0.000715261 (* 1 = 0.000715261 loss)\nI0818 21:41:16.142313 17344 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0818 21:43:34.239866 17344 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0818 21:44:56.977325 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10172\nI0818 21:44:56.977632 17344 solver.cpp:404]     Test net output #1: loss = 38.4604 (* 1 = 38.4604 loss)\nI0818 21:44:58.317410 17344 solver.cpp:228] Iteration 47200, loss = 0.000428252\nI0818 21:44:58.317445 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:44:58.317461 17344 solver.cpp:244]     Train net output #1: loss = 0.000428396 (* 1 = 0.000428396 loss)\nI0818 21:44:58.390321 17344 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0818 21:47:16.557674 17344 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0818 21:48:39.297509 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09996\nI0818 21:48:39.297833 17344 solver.cpp:404]     Test net output #1: loss = 42.2449 (* 1 = 42.2449 loss)\nI0818 21:48:40.637913 17344 solver.cpp:228] Iteration 47300, loss = 0.000695341\nI0818 21:48:40.637948 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:48:40.637962 17344 solver.cpp:244]     Train net output #1: loss = 0.000695484 (* 1 = 0.000695484 loss)\nI0818 21:48:40.701387 17344 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0818 21:50:58.768790 17344 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0818 21:52:21.515944 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10024\nI0818 21:52:21.516261 17344 solver.cpp:404]     Test net output #1: loss = 45.4169 (* 1 = 45.4169 loss)\nI0818 21:52:22.855402 17344 solver.cpp:228] Iteration 47400, loss = 0.000705081\nI0818 21:52:22.855437 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:52:22.855451 17344 solver.cpp:244]     Train net output #1: loss = 0.000705224 (* 1 = 0.000705224 loss)\nI0818 21:52:22.926707 17344 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0818 21:54:41.056125 17344 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0818 21:56:03.795377 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 21:56:03.795701 17344 solver.cpp:404]     Test net output #1: loss = 47.0726 (* 1 = 47.0726 loss)\nI0818 21:56:05.135146 17344 solver.cpp:228] Iteration 47500, loss = 0.000861003\nI0818 21:56:05.135182 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:56:05.135197 17344 solver.cpp:244]     Train net output #1: loss = 0.000861146 (* 1 = 0.000861146 loss)\nI0818 21:56:05.200814 17344 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0818 21:58:23.269646 17344 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0818 21:59:46.002796 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 21:59:46.003144 17344 solver.cpp:404]     Test net output #1: loss = 49.0733 (* 1 = 49.0733 loss)\nI0818 21:59:47.341128 17344 solver.cpp:228] Iteration 47600, loss = 0.000787196\nI0818 21:59:47.341161 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:59:47.341176 17344 solver.cpp:244]     Train net output #1: loss = 0.000787339 (* 1 = 0.000787339 loss)\nI0818 21:59:47.404254 17344 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0818 22:02:05.459197 17344 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0818 22:03:28.103122 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 22:03:28.103441 17344 solver.cpp:404]     Test net output #1: loss = 50.8312 (* 1 = 50.8312 loss)\nI0818 22:03:29.442059 17344 solver.cpp:228] Iteration 47700, loss = 0.00094283\nI0818 22:03:29.442092 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:03:29.442108 17344 solver.cpp:244]     Train net output #1: loss = 0.000942973 (* 1 = 0.000942973 loss)\nI0818 22:03:29.508749 17344 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0818 22:05:47.547407 17344 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0818 22:07:10.201217 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 22:07:10.201535 17344 solver.cpp:404]     Test net output #1: loss = 53.1529 (* 1 = 53.1529 loss)\nI0818 22:07:11.539762 17344 solver.cpp:228] Iteration 47800, loss = 0.000586986\nI0818 22:07:11.539798 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:07:11.539813 17344 solver.cpp:244]     Train net output #1: loss = 0.000587129 (* 1 = 0.000587129 loss)\nI0818 22:07:11.610177 17344 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0818 22:09:29.665793 17344 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0818 22:10:52.305250 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 22:10:52.305573 17344 solver.cpp:404]     Test net output #1: loss = 56.4699 (* 1 = 56.4699 loss)\nI0818 22:10:53.644317 17344 solver.cpp:228] Iteration 47900, loss = 0.000702368\nI0818 22:10:53.644352 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:10:53.644367 17344 solver.cpp:244]     Train net output #1: loss = 0.000702511 (* 1 = 0.000702511 loss)\nI0818 22:10:53.714018 17344 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0818 22:13:11.777724 17344 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0818 22:14:34.433619 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 22:14:34.433910 17344 solver.cpp:404]     Test net output #1: loss = 62.5025 (* 1 = 62.5025 loss)\nI0818 22:14:35.772382 17344 solver.cpp:228] Iteration 48000, loss = 0.00080828\nI0818 22:14:35.772416 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:14:35.772431 17344 solver.cpp:244]     Train net output #1: loss = 0.000808423 (* 1 = 0.000808423 loss)\nI0818 22:14:35.846650 17344 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0818 22:16:54.453670 17344 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0818 22:18:17.962292 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 22:18:17.962560 17344 solver.cpp:404]     Test net output #1: loss = 62.1892 (* 1 = 62.1892 loss)\nI0818 22:18:19.304304 17344 solver.cpp:228] Iteration 48100, loss = 0.000566657\nI0818 22:18:19.304347 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:18:19.304364 17344 solver.cpp:244]     Train net output #1: loss = 0.0005668 (* 1 = 0.0005668 loss)\nI0818 22:18:19.371840 17344 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0818 22:20:37.982914 17344 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0818 22:22:01.524065 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 22:22:01.524345 17344 solver.cpp:404]     Test net output #1: loss = 64.3957 (* 1 = 64.3957 loss)\nI0818 22:22:02.865288 17344 solver.cpp:228] Iteration 48200, loss = 0.000891516\nI0818 22:22:02.865329 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:22:02.865345 17344 solver.cpp:244]     Train net output #1: loss = 0.000891659 (* 1 = 0.000891659 loss)\nI0818 22:22:02.933795 17344 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0818 22:24:21.606168 17344 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0818 22:25:45.132406 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 22:25:45.132652 17344 solver.cpp:404]     Test net output #1: loss = 73.7614 (* 1 = 73.7614 loss)\nI0818 22:25:46.473801 17344 solver.cpp:228] Iteration 48300, loss = 0.000544009\nI0818 22:25:46.473846 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:25:46.473862 17344 solver.cpp:244]     Train net output #1: loss = 0.000544152 (* 1 = 0.000544152 loss)\nI0818 22:25:46.539047 17344 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0818 22:28:05.159060 17344 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0818 22:29:28.450204 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 22:29:28.450485 17344 solver.cpp:404]     Test net output #1: loss = 75.9347 (* 1 = 75.9347 loss)\nI0818 22:29:29.791726 17344 solver.cpp:228] Iteration 48400, loss = 0.000731664\nI0818 22:29:29.791770 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:29:29.791786 17344 solver.cpp:244]     Train net output #1: loss = 0.000731807 (* 1 = 0.000731807 loss)\nI0818 22:29:29.858597 17344 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0818 22:31:48.427011 17344 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0818 22:33:11.960491 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 22:33:11.960813 17344 solver.cpp:404]     Test net output #1: loss = 76.6663 (* 1 = 76.6663 loss)\nI0818 22:33:13.302139 17344 solver.cpp:228] Iteration 48500, loss = 0.000905271\nI0818 22:33:13.302182 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:33:13.302199 17344 solver.cpp:244]     Train net output #1: loss = 0.000905414 (* 1 = 0.000905414 loss)\nI0818 22:33:13.370846 17344 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0818 22:35:31.885540 17344 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0818 22:36:55.443085 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 22:36:55.443364 17344 solver.cpp:404]     Test net output #1: loss = 77.1476 (* 1 = 77.1476 loss)\nI0818 22:36:56.783985 17344 solver.cpp:228] Iteration 48600, loss = 0.000555823\nI0818 22:36:56.784030 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:36:56.784045 17344 solver.cpp:244]     Train net output #1: loss = 0.000555966 (* 1 = 0.000555966 loss)\nI0818 22:36:56.855455 17344 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0818 22:39:15.512879 17344 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0818 22:40:39.088099 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 22:40:39.088399 17344 solver.cpp:404]     Test net output #1: loss = 76.854 (* 1 = 76.854 loss)\nI0818 22:40:40.429139 17344 solver.cpp:228] Iteration 48700, loss = 0.000940372\nI0818 22:40:40.429181 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:40:40.429198 17344 solver.cpp:244]     Train net output #1: loss = 0.000940515 (* 1 = 0.000940515 loss)\nI0818 22:40:40.499809 17344 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0818 22:42:59.061710 17344 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0818 22:44:22.629025 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 22:44:22.629293 17344 solver.cpp:404]     Test net output #1: loss = 77.5576 (* 1 = 77.5576 loss)\nI0818 22:44:23.969995 17344 solver.cpp:228] Iteration 48800, loss = 0.000972123\nI0818 22:44:23.970038 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:44:23.970054 17344 solver.cpp:244]     Train net output #1: loss = 0.000972267 (* 1 = 0.000972267 loss)\nI0818 22:44:24.034052 17344 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0818 22:46:42.597326 17344 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0818 22:48:05.871354 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 22:48:05.871613 17344 solver.cpp:404]     Test net output #1: loss = 78.6169 (* 1 = 78.6169 loss)\nI0818 22:48:07.212783 17344 solver.cpp:228] Iteration 48900, loss = 0.00103797\nI0818 22:48:07.212829 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:48:07.212846 17344 solver.cpp:244]     Train net output #1: loss = 0.00103811 (* 1 = 0.00103811 loss)\nI0818 22:48:07.279228 17344 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0818 22:50:25.894625 17344 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0818 22:51:49.158335 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 22:51:49.158582 17344 solver.cpp:404]     Test net output #1: loss = 78.5889 (* 1 = 78.5889 loss)\nI0818 22:51:50.500349 17344 solver.cpp:228] Iteration 49000, loss = 0.000704084\nI0818 22:51:50.500391 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:51:50.500406 17344 solver.cpp:244]     Train net output #1: loss = 0.000704227 (* 1 = 0.000704227 loss)\nI0818 22:51:50.566406 17344 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0818 22:54:09.080859 17344 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0818 22:55:32.420403 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 22:55:32.420670 17344 solver.cpp:404]     Test net output #1: loss = 78.6169 (* 1 = 78.6169 loss)\nI0818 22:55:33.761107 17344 solver.cpp:228] Iteration 49100, loss = 0.000465594\nI0818 22:55:33.761152 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:55:33.761168 17344 solver.cpp:244]     Train net output #1: loss = 0.000465737 (* 1 = 0.000465737 loss)\nI0818 22:55:33.826941 17344 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0818 22:57:52.435317 17344 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0818 22:59:15.706383 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0818 22:59:15.706634 17344 solver.cpp:404]     Test net output #1: loss = 78.5889 (* 1 = 78.5889 loss)\nI0818 22:59:17.047493 17344 solver.cpp:228] Iteration 49200, loss = 0.000691405\nI0818 22:59:17.047536 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:59:17.047552 17344 solver.cpp:244]     Train net output #1: loss = 0.000691548 (* 1 = 0.000691548 loss)\nI0818 22:59:17.117221 17344 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0818 23:01:35.684687 17344 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0818 23:02:58.890417 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0818 23:02:58.890684 17344 solver.cpp:404]     Test net output #1: loss = 78.2161 (* 1 = 78.2161 loss)\nI0818 23:03:00.232367 17344 solver.cpp:228] Iteration 49300, loss = 0.000893332\nI0818 23:03:00.232409 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:03:00.232425 17344 solver.cpp:244]     Train net output #1: loss = 0.000893475 (* 1 = 0.000893475 loss)\nI0818 23:03:00.290997 17344 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0818 23:05:18.910488 17344 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0818 23:06:42.480299 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10132\nI0818 23:06:42.480571 17344 solver.cpp:404]     Test net output #1: loss = 71.1136 (* 1 = 71.1136 loss)\nI0818 23:06:43.820870 17344 solver.cpp:228] Iteration 49400, loss = 0.00077819\nI0818 23:06:43.820911 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:06:43.820929 17344 solver.cpp:244]     Train net output #1: loss = 0.000778333 (* 1 = 0.000778333 loss)\nI0818 23:06:43.881872 17344 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0818 23:09:02.372653 17344 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0818 23:10:25.976361 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0818 23:10:25.976655 17344 solver.cpp:404]     Test net output #1: loss = 78.5715 (* 1 = 78.5715 loss)\nI0818 23:10:27.322057 17344 solver.cpp:228] Iteration 49500, loss = 1.47589\nI0818 23:10:27.322098 17344 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI0818 23:10:27.322113 17344 solver.cpp:244]     Train net output #1: loss = 1.47589 (* 1 = 1.47589 loss)\nI0818 23:10:27.387753 17344 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0818 23:12:45.863988 17344 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0818 23:14:09.448225 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0818 23:14:09.448524 17344 solver.cpp:404]     Test net output #1: loss = 78.6344 (* 1 = 78.6344 loss)\nI0818 23:14:10.788868 17344 solver.cpp:228] Iteration 49600, loss = 0.680468\nI0818 23:14:10.788907 17344 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0818 23:14:10.788921 17344 solver.cpp:244]     Train net output #1: loss = 0.680467 (* 1 = 0.680467 loss)\nI0818 23:14:10.857079 17344 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0818 23:16:29.327965 17344 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0818 23:17:52.915397 17344 solver.cpp:404]     Test net output #0: accuracy = 0.07492\nI0818 23:17:52.915699 17344 solver.cpp:404]     Test net output #1: loss = 33.2878 (* 1 = 33.2878 loss)\nI0818 23:17:54.256762 17344 solver.cpp:228] Iteration 49700, loss = 0.490177\nI0818 23:17:54.256803 17344 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0818 23:17:54.256817 17344 solver.cpp:244]     Train net output #1: loss = 0.490176 (* 1 = 0.490176 loss)\nI0818 23:17:54.320560 17344 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0818 23:20:12.797106 17344 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0818 23:21:36.300815 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1372\nI0818 23:21:36.301080 17344 solver.cpp:404]     Test net output #1: loss = 19.3857 (* 1 = 19.3857 loss)\nI0818 23:21:37.641695 17344 solver.cpp:228] Iteration 49800, loss = 0.443111\nI0818 23:21:37.641733 17344 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 23:21:37.641749 17344 solver.cpp:244]     Train net output #1: loss = 0.44311 (* 1 = 0.44311 loss)\nI0818 23:21:37.709925 17344 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0818 23:23:56.179157 17344 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0818 23:25:19.449684 17344 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0818 23:25:19.449961 17344 solver.cpp:404]     Test net output #1: loss = 18.7514 (* 1 = 18.7514 loss)\nI0818 23:25:20.789945 17344 solver.cpp:228] Iteration 49900, loss = 0.274531\nI0818 23:25:20.789990 17344 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 23:25:20.790010 17344 solver.cpp:244]     Train net output #1: loss = 0.274531 (* 1 = 0.274531 loss)\nI0818 23:25:20.855024 17344 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0818 23:27:39.346931 17344 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0818 23:29:02.790119 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10088\nI0818 23:29:02.790375 17344 solver.cpp:404]     Test net output #1: loss = 19.2045 (* 1 = 19.2045 loss)\nI0818 23:29:04.130627 17344 solver.cpp:228] Iteration 50000, loss = 0.221383\nI0818 23:29:04.130666 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 23:29:04.130681 17344 solver.cpp:244]     Train net output #1: loss = 0.221383 (* 1 = 0.221383 loss)\nI0818 23:29:04.191625 17344 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0818 23:29:04.191651 17344 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0818 23:31:22.658360 17344 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0818 23:32:46.050424 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10584\nI0818 23:32:46.050763 17344 solver.cpp:404]     Test net output #1: loss = 15.6881 (* 1 = 15.6881 loss)\nI0818 23:32:47.391073 17344 solver.cpp:228] Iteration 50100, loss = 0.175974\nI0818 23:32:47.391113 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 23:32:47.391129 17344 solver.cpp:244]     Train net output #1: loss = 0.175974 (* 1 = 0.175974 loss)\nI0818 23:32:47.451936 17344 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0818 23:35:05.930851 17344 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0818 23:36:29.475324 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12276\nI0818 23:36:29.475606 17344 solver.cpp:404]     Test net output #1: loss = 12.5439 (* 1 = 12.5439 loss)\nI0818 23:36:30.816020 17344 solver.cpp:228] Iteration 50200, loss = 0.216932\nI0818 23:36:30.816059 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 23:36:30.816074 17344 solver.cpp:244]     Train net output #1: loss = 0.216931 (* 1 = 0.216931 loss)\nI0818 23:36:30.876106 17344 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0818 23:38:49.379050 17344 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0818 23:40:12.943372 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12128\nI0818 23:40:12.943661 17344 solver.cpp:404]     Test net output #1: loss = 11.315 (* 1 = 11.315 loss)\nI0818 23:40:14.284536 17344 solver.cpp:228] Iteration 50300, loss = 0.162308\nI0818 23:40:14.284575 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:40:14.284591 17344 solver.cpp:244]     Train net output #1: loss = 0.162307 (* 1 = 0.162307 loss)\nI0818 23:40:14.354260 17344 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0818 23:42:32.815462 17344 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0818 23:43:56.359174 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12944\nI0818 23:43:56.359468 17344 solver.cpp:404]     Test net output #1: loss = 10.7098 (* 1 = 10.7098 loss)\nI0818 23:43:57.699779 17344 solver.cpp:228] Iteration 50400, loss = 0.155147\nI0818 23:43:57.699821 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:43:57.699837 17344 solver.cpp:244]     Train net output #1: loss = 0.155147 (* 1 = 0.155147 loss)\nI0818 23:43:57.766849 17344 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0818 23:46:16.235388 17344 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0818 23:47:39.475216 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15752\nI0818 23:47:39.475494 17344 solver.cpp:404]     Test net output #1: loss = 10.103 (* 1 = 10.103 loss)\nI0818 23:47:40.816509 17344 solver.cpp:228] Iteration 50500, loss = 0.13369\nI0818 23:47:40.816546 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 23:47:40.816562 17344 solver.cpp:244]     Train net output #1: loss = 0.133689 (* 1 = 0.133689 loss)\nI0818 23:47:40.886389 17344 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0818 23:49:59.543391 17344 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0818 23:51:22.851482 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1612\nI0818 23:51:22.851789 17344 solver.cpp:404]     Test net output #1: loss = 9.42963 (* 1 = 9.42963 loss)\nI0818 23:51:24.193315 17344 solver.cpp:228] Iteration 50600, loss = 0.129909\nI0818 23:51:24.193357 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:51:24.193373 17344 solver.cpp:244]     Train net output #1: loss = 0.129909 (* 1 = 0.129909 loss)\nI0818 23:51:24.253983 17344 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0818 23:53:42.967234 17344 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0818 23:55:06.356333 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18408\nI0818 23:55:06.356592 17344 solver.cpp:404]     Test net output #1: loss = 8.03675 (* 1 = 8.03675 loss)\nI0818 23:55:07.698339 17344 solver.cpp:228] Iteration 50700, loss = 0.119945\nI0818 23:55:07.698381 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:55:07.698396 17344 solver.cpp:244]     Train net output #1: loss = 0.119944 (* 1 = 0.119944 loss)\nI0818 23:55:07.776424 17344 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0818 23:57:26.506355 17344 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0818 23:58:49.707939 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21156\nI0818 23:58:49.708245 17344 solver.cpp:404]     Test net output #1: loss = 7.42969 (* 1 = 7.42969 loss)\nI0818 23:58:51.050565 17344 solver.cpp:228] Iteration 50800, loss = 0.12449\nI0818 23:58:51.050608 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:58:51.050623 17344 solver.cpp:244]     Train net output #1: loss = 0.12449 (* 1 = 0.12449 loss)\nI0818 23:58:51.116724 17344 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0819 00:01:09.728924 17344 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0819 00:02:33.017652 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23032\nI0819 00:02:33.017933 17344 solver.cpp:404]     Test net output #1: loss = 5.98307 (* 1 = 5.98307 loss)\nI0819 00:02:34.360031 17344 solver.cpp:228] Iteration 50900, loss = 0.117666\nI0819 00:02:34.360075 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:02:34.360091 17344 solver.cpp:244]     Train net output #1: loss = 0.117665 (* 1 = 0.117665 loss)\nI0819 00:02:34.422555 17344 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0819 00:04:53.203249 17344 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0819 00:06:16.458163 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25396\nI0819 00:06:16.458426 17344 solver.cpp:404]     Test net output #1: loss = 5.00221 (* 1 = 5.00221 loss)\nI0819 00:06:17.803063 17344 solver.cpp:228] Iteration 51000, loss = 0.10627\nI0819 00:06:17.803107 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:06:17.803122 17344 solver.cpp:244]     Train net output #1: loss = 0.10627 (* 1 = 0.10627 loss)\nI0819 00:06:17.874053 17344 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0819 00:08:36.854482 17344 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0819 00:10:00.310752 17344 solver.cpp:404]     Test net output #0: accuracy = 0.275\nI0819 00:10:00.311020 17344 solver.cpp:404]     Test net output #1: loss = 4.71325 (* 1 = 4.71325 loss)\nI0819 00:10:01.656059 17344 solver.cpp:228] Iteration 51100, loss = 0.094625\nI0819 00:10:01.656096 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:10:01.656112 17344 solver.cpp:244]     Train net output #1: loss = 0.0946244 (* 1 = 0.0946244 loss)\nI0819 00:10:01.728750 17344 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0819 00:12:20.600653 17344 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0819 00:13:44.130273 17344 solver.cpp:404]     Test net output #0: accuracy = 0.31176\nI0819 00:13:44.130522 17344 solver.cpp:404]     Test net output #1: loss = 4.13526 (* 1 = 4.13526 loss)\nI0819 00:13:45.473506 17344 solver.cpp:228] Iteration 51200, loss = 0.0937778\nI0819 00:13:45.473549 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:13:45.473565 17344 solver.cpp:244]     Train net output #1: loss = 0.0937773 (* 1 = 0.0937773 loss)\nI0819 00:13:45.546588 17344 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0819 00:16:04.365494 17344 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0819 00:17:27.595654 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35328\nI0819 00:17:27.595903 17344 solver.cpp:404]     Test net output #1: loss = 3.68793 (* 1 = 3.68793 loss)\nI0819 00:17:28.939031 17344 solver.cpp:228] Iteration 51300, loss = 0.0903888\nI0819 00:17:28.939074 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:17:28.939091 17344 solver.cpp:244]     Train net output #1: loss = 0.0903882 (* 1 = 0.0903882 loss)\nI0819 00:17:29.014190 17344 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0819 00:19:47.819651 17344 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0819 00:21:10.963526 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37556\nI0819 00:21:10.963790 17344 solver.cpp:404]     Test net output #1: loss = 3.5733 (* 1 = 3.5733 loss)\nI0819 00:21:12.307343 17344 solver.cpp:228] Iteration 51400, loss = 0.0600041\nI0819 00:21:12.307387 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:21:12.307404 17344 solver.cpp:244]     Train net output #1: loss = 0.0600035 (* 1 = 0.0600035 loss)\nI0819 00:21:12.373054 17344 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0819 00:23:31.190904 17344 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0819 00:24:54.341881 17344 solver.cpp:404]     Test net output #0: accuracy = 0.39912\nI0819 00:24:54.342149 17344 solver.cpp:404]     Test net output #1: loss = 3.36696 (* 1 = 3.36696 loss)\nI0819 00:24:55.685320 17344 solver.cpp:228] Iteration 51500, loss = 0.0548711\nI0819 00:24:55.685362 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:24:55.685379 17344 solver.cpp:244]     Train net output #1: loss = 0.0548706 (* 1 = 0.0548706 loss)\nI0819 00:24:55.753345 17344 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0819 00:27:14.564718 17344 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0819 00:28:37.843811 17344 solver.cpp:404]     Test net output #0: accuracy = 0.40356\nI0819 00:28:37.844072 17344 solver.cpp:404]     Test net output #1: loss = 3.3703 (* 1 = 3.3703 loss)\nI0819 00:28:39.187698 17344 solver.cpp:228] Iteration 51600, loss = 0.0774857\nI0819 00:28:39.187742 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:28:39.187758 17344 solver.cpp:244]     Train net output #1: loss = 0.0774851 (* 1 = 0.0774851 loss)\nI0819 00:28:39.259421 17344 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0819 00:30:58.069481 17344 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 00:32:21.621263 17344 solver.cpp:404]     Test net output #0: accuracy = 0.49172\nI0819 00:32:21.621587 17344 solver.cpp:404]     Test net output #1: loss = 2.52079 (* 1 = 2.52079 loss)\nI0819 00:32:22.965157 17344 solver.cpp:228] Iteration 51700, loss = 0.0767273\nI0819 00:32:22.965199 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:32:22.965215 17344 solver.cpp:244]     Train net output #1: loss = 0.0767267 (* 1 = 0.0767267 loss)\nI0819 00:32:23.041280 17344 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0819 00:34:41.848152 17344 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 00:36:05.402638 17344 solver.cpp:404]     Test net output #0: accuracy = 0.48044\nI0819 00:36:05.402950 17344 solver.cpp:404]     Test net output #1: loss = 2.72897 (* 1 = 2.72897 loss)\nI0819 00:36:06.746525 17344 solver.cpp:228] Iteration 51800, loss = 0.0551626\nI0819 00:36:06.746565 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:36:06.746582 17344 solver.cpp:244]     Train net output #1: loss = 0.055162 (* 1 = 0.055162 loss)\nI0819 00:36:06.821285 17344 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0819 00:38:25.598176 17344 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 00:39:48.976372 17344 solver.cpp:404]     Test net output #0: accuracy = 0.53568\nI0819 00:39:48.976677 17344 solver.cpp:404]     Test net output #1: loss = 2.37088 (* 1 = 2.37088 loss)\nI0819 00:39:50.319905 17344 solver.cpp:228] Iteration 51900, loss = 0.0448961\nI0819 00:39:50.319943 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:39:50.319959 17344 solver.cpp:244]     Train net output #1: loss = 0.0448955 (* 1 = 0.0448955 loss)\nI0819 00:39:50.387161 17344 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0819 00:42:09.181002 17344 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 00:43:32.232347 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57276\nI0819 00:43:32.232637 17344 solver.cpp:404]     Test net output #1: loss = 2.1765 (* 1 = 2.1765 loss)\nI0819 00:43:33.576011 17344 solver.cpp:228] Iteration 52000, loss = 0.0785406\nI0819 00:43:33.576051 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:43:33.576066 17344 solver.cpp:244]     Train net output #1: loss = 0.07854 (* 1 = 0.07854 loss)\nI0819 00:43:33.643223 17344 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0819 00:45:52.358906 17344 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 00:47:15.266396 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63884\nI0819 00:47:15.266705 17344 solver.cpp:404]     Test net output #1: loss = 1.67636 (* 1 = 1.67636 loss)\nI0819 00:47:16.610131 17344 solver.cpp:228] Iteration 52100, loss = 0.069103\nI0819 00:47:16.610172 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:47:16.610188 17344 solver.cpp:244]     Train net output #1: loss = 0.0691025 (* 1 = 0.0691025 loss)\nI0819 00:47:16.681677 17344 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0819 00:49:35.416497 17344 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 00:50:58.338891 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67364\nI0819 00:50:58.339215 17344 solver.cpp:404]     Test net output #1: loss = 1.47047 (* 1 = 1.47047 loss)\nI0819 00:50:59.682832 17344 solver.cpp:228] Iteration 52200, loss = 0.0386372\nI0819 00:50:59.682870 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:50:59.682885 17344 solver.cpp:244]     Train net output #1: loss = 0.0386366 (* 1 = 0.0386366 loss)\nI0819 00:50:59.756505 17344 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0819 00:53:18.473870 17344 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 00:54:41.885712 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70136\nI0819 00:54:41.885983 17344 solver.cpp:404]     Test net output #1: loss = 1.35881 (* 1 = 1.35881 loss)\nI0819 00:54:43.228830 17344 solver.cpp:228] Iteration 52300, loss = 0.0526583\nI0819 00:54:43.228873 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:54:43.228888 17344 solver.cpp:244]     Train net output #1: loss = 0.0526577 (* 1 = 0.0526577 loss)\nI0819 00:54:43.295542 17344 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0819 00:57:02.087435 17344 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 00:58:25.666456 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71492\nI0819 00:58:25.666723 17344 solver.cpp:404]     Test net output #1: loss = 1.26631 (* 1 = 1.26631 loss)\nI0819 00:58:27.010432 17344 solver.cpp:228] Iteration 52400, loss = 0.0522138\nI0819 00:58:27.010473 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:58:27.010488 17344 solver.cpp:244]     Train net output #1: loss = 0.0522132 (* 1 = 0.0522132 loss)\nI0819 00:58:27.082197 17344 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0819 01:00:46.013303 17344 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 01:02:09.610330 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73916\nI0819 01:02:09.610659 17344 solver.cpp:404]     Test net output #1: loss = 1.17835 (* 1 = 1.17835 loss)\nI0819 01:02:10.955286 17344 solver.cpp:228] Iteration 52500, loss = 0.0349274\nI0819 01:02:10.955327 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:02:10.955343 17344 solver.cpp:244]     Train net output #1: loss = 0.0349269 (* 1 = 0.0349269 loss)\nI0819 01:02:11.026477 17344 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0819 01:04:30.000967 17344 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 01:05:53.577281 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74752\nI0819 01:05:53.577556 17344 solver.cpp:404]     Test net output #1: loss = 1.14147 (* 1 = 1.14147 loss)\nI0819 01:05:54.922809 17344 solver.cpp:228] Iteration 52600, loss = 0.0306744\nI0819 01:05:54.922855 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:05:54.922879 17344 solver.cpp:244]     Train net output #1: loss = 0.0306739 (* 1 = 0.0306739 loss)\nI0819 01:05:54.994674 17344 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0819 01:08:13.964805 17344 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 01:09:37.523138 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72944\nI0819 01:09:37.523427 17344 solver.cpp:404]     Test net output #1: loss = 1.31522 (* 1 = 1.31522 loss)\nI0819 01:09:38.869388 17344 solver.cpp:228] Iteration 52700, loss = 0.0499619\nI0819 01:09:38.869429 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:09:38.869444 17344 solver.cpp:244]     Train net output #1: loss = 0.0499613 (* 1 = 0.0499613 loss)\nI0819 01:09:38.938412 17344 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0819 01:11:57.879305 17344 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 01:13:21.478060 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73376\nI0819 01:13:21.478404 17344 solver.cpp:404]     Test net output #1: loss = 1.27039 (* 1 = 1.27039 loss)\nI0819 01:13:22.825170 17344 solver.cpp:228] Iteration 52800, loss = 0.0310991\nI0819 01:13:22.825217 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:13:22.825232 17344 solver.cpp:244]     Train net output #1: loss = 0.0310985 (* 1 = 0.0310985 loss)\nI0819 01:13:22.894734 17344 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0819 01:15:41.828323 17344 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 01:17:05.436136 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76148\nI0819 01:17:05.436432 17344 solver.cpp:404]     Test net output #1: loss = 1.14349 (* 1 = 1.14349 loss)\nI0819 01:17:06.781066 17344 solver.cpp:228] Iteration 52900, loss = 0.0230161\nI0819 01:17:06.781111 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:17:06.781126 17344 solver.cpp:244]     Train net output #1: loss = 0.0230155 (* 1 = 0.0230155 loss)\nI0819 01:17:06.849107 17344 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0819 01:19:25.800531 17344 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 01:20:49.165314 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78068\nI0819 01:20:49.165577 17344 solver.cpp:404]     Test net output #1: loss = 1.03175 (* 1 = 1.03175 loss)\nI0819 01:20:50.509620 17344 solver.cpp:228] Iteration 53000, loss = 0.0410776\nI0819 01:20:50.509662 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:20:50.509677 17344 solver.cpp:244]     Train net output #1: loss = 0.041077 (* 1 = 0.041077 loss)\nI0819 01:20:50.582128 17344 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0819 01:23:09.558676 17344 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 01:24:32.754158 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78476\nI0819 01:24:32.754416 17344 solver.cpp:404]     Test net output #1: loss = 0.994419 (* 1 = 0.994419 loss)\nI0819 01:24:34.098425 17344 solver.cpp:228] Iteration 53100, loss = 0.0219613\nI0819 01:24:34.098469 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:24:34.098492 17344 solver.cpp:244]     Train net output #1: loss = 0.0219608 (* 1 = 0.0219608 loss)\nI0819 01:24:34.172286 17344 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0819 01:26:53.200107 17344 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 01:28:16.776772 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78796\nI0819 01:28:16.777046 17344 solver.cpp:404]     Test net output #1: loss = 1.0205 (* 1 = 1.0205 loss)\nI0819 01:28:18.121596 17344 solver.cpp:228] Iteration 53200, loss = 0.0253412\nI0819 01:28:18.121639 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:28:18.121662 17344 solver.cpp:244]     Train net output #1: loss = 0.0253406 (* 1 = 0.0253406 loss)\nI0819 01:28:18.190979 17344 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0819 01:30:37.103950 17344 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 01:32:00.506021 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78696\nI0819 01:32:00.506295 17344 solver.cpp:404]     Test net output #1: loss = 1.04044 (* 1 = 1.04044 loss)\nI0819 01:32:01.849786 17344 solver.cpp:228] Iteration 53300, loss = 0.0200822\nI0819 01:32:01.849831 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:32:01.849854 17344 solver.cpp:244]     Train net output #1: loss = 0.0200816 (* 1 = 0.0200816 loss)\nI0819 01:32:01.923635 17344 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0819 01:34:20.926271 17344 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 01:35:44.507586 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78588\nI0819 01:35:44.507851 17344 solver.cpp:404]     Test net output #1: loss = 1.06445 (* 1 = 1.06445 loss)\nI0819 01:35:45.853189 17344 solver.cpp:228] Iteration 53400, loss = 0.0189924\nI0819 01:35:45.853235 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:35:45.853257 17344 solver.cpp:244]     Train net output #1: loss = 0.0189919 (* 1 = 0.0189919 loss)\nI0819 01:35:45.923394 17344 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0819 01:38:04.924227 17344 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 01:39:28.315924 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80032\nI0819 01:39:28.316205 17344 solver.cpp:404]     Test net output #1: loss = 0.983033 (* 1 = 0.983033 loss)\nI0819 01:39:29.659510 17344 solver.cpp:228] Iteration 53500, loss = 0.0171267\nI0819 01:39:29.659551 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:39:29.659567 17344 solver.cpp:244]     Train net output #1: loss = 0.0171261 (* 1 = 0.0171261 loss)\nI0819 01:39:29.731921 17344 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0819 01:41:48.581229 17344 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 01:43:12.099020 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79176\nI0819 01:43:12.099349 17344 solver.cpp:404]     Test net output #1: loss = 1.05283 (* 1 = 1.05283 loss)\nI0819 01:43:13.443876 17344 solver.cpp:228] Iteration 53600, loss = 0.0172947\nI0819 01:43:13.443915 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:43:13.443931 17344 solver.cpp:244]     Train net output #1: loss = 0.0172942 (* 1 = 0.0172942 loss)\nI0819 01:43:13.511857 17344 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0819 01:45:32.374349 17344 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 01:46:55.887816 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79088\nI0819 01:46:55.888106 17344 solver.cpp:404]     Test net output #1: loss = 1.06712 (* 1 = 1.06712 loss)\nI0819 01:46:57.231878 17344 solver.cpp:228] Iteration 53700, loss = 0.0145642\nI0819 01:46:57.231920 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:46:57.231935 17344 solver.cpp:244]     Train net output #1: loss = 0.0145636 (* 1 = 0.0145636 loss)\nI0819 01:46:57.300488 17344 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0819 01:49:16.130645 17344 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 01:50:39.587219 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79428\nI0819 01:50:39.587523 17344 solver.cpp:404]     Test net output #1: loss = 1.0437 (* 1 = 1.0437 loss)\nI0819 01:50:40.932463 17344 solver.cpp:228] Iteration 53800, loss = 0.0190817\nI0819 01:50:40.932502 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:50:40.932518 17344 solver.cpp:244]     Train net output #1: loss = 0.0190811 (* 1 = 0.0190811 loss)\nI0819 01:50:41.005892 17344 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0819 01:53:00.016526 17344 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 01:54:23.566321 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79772\nI0819 01:54:23.566588 17344 solver.cpp:404]     Test net output #1: loss = 1.0319 (* 1 = 1.0319 loss)\nI0819 01:54:24.911510 17344 solver.cpp:228] Iteration 53900, loss = 0.0212697\nI0819 01:54:24.911556 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:54:24.911571 17344 solver.cpp:244]     Train net output #1: loss = 0.0212692 (* 1 = 0.0212692 loss)\nI0819 01:54:24.981381 17344 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0819 01:56:43.920812 17344 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 01:58:07.128545 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80044\nI0819 01:58:07.128794 17344 solver.cpp:404]     Test net output #1: loss = 1.0339 (* 1 = 1.0339 loss)\nI0819 01:58:08.472085 17344 solver.cpp:228] Iteration 54000, loss = 0.00815327\nI0819 01:58:08.472129 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:58:08.472146 17344 solver.cpp:244]     Train net output #1: loss = 0.0081527 (* 1 = 0.0081527 loss)\nI0819 01:58:08.545400 17344 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0819 02:00:27.508214 17344 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 02:01:51.060988 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79768\nI0819 02:01:51.061317 17344 solver.cpp:404]     Test net output #1: loss = 1.06113 (* 1 = 1.06113 loss)\nI0819 02:01:52.406137 17344 solver.cpp:228] Iteration 54100, loss = 0.0182318\nI0819 02:01:52.406182 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:01:52.406198 17344 solver.cpp:244]     Train net output #1: loss = 0.0182313 (* 1 = 0.0182313 loss)\nI0819 02:01:52.476728 17344 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0819 02:04:11.365530 17344 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 02:05:34.867511 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79848\nI0819 02:05:34.867780 17344 solver.cpp:404]     Test net output #1: loss = 1.08431 (* 1 = 1.08431 loss)\nI0819 02:05:36.212654 17344 solver.cpp:228] Iteration 54200, loss = 0.0123884\nI0819 02:05:36.212695 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:05:36.212710 17344 solver.cpp:244]     Train net output #1: loss = 0.0123878 (* 1 = 0.0123878 loss)\nI0819 02:05:36.281275 17344 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0819 02:07:55.286480 17344 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 02:09:18.520808 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78444\nI0819 02:09:18.521114 17344 solver.cpp:404]     Test net output #1: loss = 1.21924 (* 1 = 1.21924 loss)\nI0819 02:09:19.864935 17344 solver.cpp:228] Iteration 54300, loss = 0.0100824\nI0819 02:09:19.864979 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:09:19.865003 17344 solver.cpp:244]     Train net output #1: loss = 0.0100818 (* 1 = 0.0100818 loss)\nI0819 02:09:19.934447 17344 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0819 02:11:38.794580 17344 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 02:13:02.065764 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79776\nI0819 02:13:02.066083 17344 solver.cpp:404]     Test net output #1: loss = 1.13797 (* 1 = 1.13797 loss)\nI0819 02:13:03.409930 17344 solver.cpp:228] Iteration 54400, loss = 0.011335\nI0819 02:13:03.409974 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:13:03.409991 17344 solver.cpp:244]     Train net output #1: loss = 0.0113344 (* 1 = 0.0113344 loss)\nI0819 02:13:03.480032 17344 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0819 02:15:22.417601 17344 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 02:16:45.674194 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80724\nI0819 02:16:45.674496 17344 solver.cpp:404]     Test net output #1: loss = 1.07238 (* 1 = 1.07238 loss)\nI0819 02:16:47.019022 17344 solver.cpp:228] Iteration 54500, loss = 0.0152791\nI0819 02:16:47.019068 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:16:47.019083 17344 solver.cpp:244]     Train net output #1: loss = 0.0152786 (* 1 = 0.0152786 loss)\nI0819 02:16:47.087368 17344 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0819 02:19:05.938120 17344 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 02:20:29.214478 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78664\nI0819 02:20:29.214802 17344 solver.cpp:404]     Test net output #1: loss = 1.24378 (* 1 = 1.24378 loss)\nI0819 02:20:30.559211 17344 solver.cpp:228] Iteration 54600, loss = 0.00972183\nI0819 02:20:30.559255 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:20:30.559272 17344 solver.cpp:244]     Train net output #1: loss = 0.00972126 (* 1 = 0.00972126 loss)\nI0819 02:20:30.635582 17344 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0819 02:22:49.577354 17344 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 02:24:13.209125 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78604\nI0819 02:24:13.209476 17344 solver.cpp:404]     Test net output #1: loss = 1.25844 (* 1 = 1.25844 loss)\nI0819 02:24:14.552654 17344 solver.cpp:228] Iteration 54700, loss = 0.0123082\nI0819 02:24:14.552696 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:24:14.552712 17344 solver.cpp:244]     Train net output #1: loss = 0.0123077 (* 1 = 0.0123077 loss)\nI0819 02:24:14.625959 17344 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0819 02:26:33.518220 17344 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 02:27:57.196766 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78964\nI0819 02:27:57.197147 17344 solver.cpp:404]     Test net output #1: loss = 1.22239 (* 1 = 1.22239 loss)\nI0819 02:27:58.539988 17344 solver.cpp:228] Iteration 54800, loss = 0.010595\nI0819 02:27:58.540033 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:27:58.540050 17344 solver.cpp:244]     Train net output #1: loss = 0.0105944 (* 1 = 0.0105944 loss)\nI0819 02:27:58.609812 17344 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0819 02:30:17.359395 17344 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0819 02:31:41.057003 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7744\nI0819 02:31:41.057369 17344 solver.cpp:404]     Test net output #1: loss = 1.37501 (* 1 = 1.37501 loss)\nI0819 02:31:42.401160 17344 solver.cpp:228] Iteration 54900, loss = 0.00930488\nI0819 02:31:42.401206 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:31:42.401222 17344 solver.cpp:244]     Train net output #1: loss = 0.00930432 (* 1 = 0.00930432 loss)\nI0819 02:31:42.476593 17344 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0819 02:34:01.390676 17344 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0819 02:35:25.068503 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76504\nI0819 02:35:25.068850 17344 solver.cpp:404]     Test net output #1: loss = 1.45452 (* 1 = 1.45452 loss)\nI0819 02:35:26.411999 17344 solver.cpp:228] Iteration 55000, loss = 0.00873171\nI0819 02:35:26.412042 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:35:26.412058 17344 solver.cpp:244]     Train net output #1: loss = 0.00873114 (* 1 = 0.00873114 loss)\nI0819 02:35:26.478715 17344 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0819 02:37:45.444129 17344 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0819 02:39:09.127478 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7538\nI0819 02:39:09.127836 17344 solver.cpp:404]     Test net output #1: loss = 1.57448 (* 1 = 1.57448 loss)\nI0819 02:39:10.472532 17344 solver.cpp:228] Iteration 55100, loss = 0.00764265\nI0819 02:39:10.472573 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:39:10.472589 17344 solver.cpp:244]     Train net output #1: loss = 0.00764209 (* 1 = 0.00764209 loss)\nI0819 02:39:10.546067 17344 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0819 02:41:29.394884 17344 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0819 02:42:53.142540 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76532\nI0819 02:42:53.142882 17344 solver.cpp:404]     Test net output #1: loss = 1.47071 (* 1 = 1.47071 loss)\nI0819 02:42:54.486311 17344 solver.cpp:228] Iteration 55200, loss = 0.0110961\nI0819 02:42:54.486354 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:42:54.486371 17344 solver.cpp:244]     Train net output #1: loss = 0.0110955 (* 1 = 0.0110955 loss)\nI0819 02:42:54.555692 17344 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0819 02:45:13.475600 17344 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0819 02:46:37.210566 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76656\nI0819 02:46:37.210937 17344 solver.cpp:404]     Test net output #1: loss = 1.44814 (* 1 = 1.44814 loss)\nI0819 02:46:38.554390 17344 solver.cpp:228] Iteration 55300, loss = 0.00987544\nI0819 02:46:38.554432 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:46:38.554450 17344 solver.cpp:244]     Train net output #1: loss = 0.00987488 (* 1 = 0.00987488 loss)\nI0819 02:46:38.626245 17344 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0819 02:48:57.634699 17344 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0819 02:50:21.374331 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76136\nI0819 02:50:21.374670 17344 solver.cpp:404]     Test net output #1: loss = 1.48867 (* 1 = 1.48867 loss)\nI0819 02:50:22.719306 17344 solver.cpp:228] Iteration 55400, loss = 0.0112629\nI0819 02:50:22.719347 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:50:22.719362 17344 solver.cpp:244]     Train net output #1: loss = 0.0112623 (* 1 = 0.0112623 loss)\nI0819 02:50:22.787685 17344 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0819 02:52:41.600502 17344 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0819 02:54:05.279330 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76188\nI0819 02:54:05.279705 17344 solver.cpp:404]     Test net output #1: loss = 1.51348 (* 1 = 1.51348 loss)\nI0819 02:54:06.623250 17344 solver.cpp:228] Iteration 55500, loss = 0.00883199\nI0819 02:54:06.623291 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:54:06.623306 17344 solver.cpp:244]     Train net output #1: loss = 0.00883143 (* 1 = 0.00883143 loss)\nI0819 02:54:06.693176 17344 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0819 02:56:25.630269 17344 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0819 02:57:49.360735 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75648\nI0819 02:57:49.361114 17344 solver.cpp:404]     Test net output #1: loss = 1.55504 (* 1 = 1.55504 loss)\nI0819 02:57:50.705687 17344 solver.cpp:228] Iteration 55600, loss = 0.00853188\nI0819 02:57:50.705724 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:57:50.705739 17344 solver.cpp:244]     Train net output #1: loss = 0.00853132 (* 1 = 0.00853132 loss)\nI0819 02:57:50.778813 17344 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0819 03:00:09.602780 17344 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0819 03:01:33.377003 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7534\nI0819 03:01:33.377346 17344 solver.cpp:404]     Test net output #1: loss = 1.59424 (* 1 = 1.59424 loss)\nI0819 03:01:34.721606 17344 solver.cpp:228] Iteration 55700, loss = 0.00802999\nI0819 03:01:34.721645 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:01:34.721660 17344 solver.cpp:244]     Train net output #1: loss = 0.00802943 (* 1 = 0.00802943 loss)\nI0819 03:01:34.787418 17344 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0819 03:03:53.647212 17344 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0819 03:05:17.263916 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75348\nI0819 03:05:17.264292 17344 solver.cpp:404]     Test net output #1: loss = 1.6041 (* 1 = 1.6041 loss)\nI0819 03:05:18.606048 17344 solver.cpp:228] Iteration 55800, loss = 0.0086298\nI0819 03:05:18.606093 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:05:18.606117 17344 solver.cpp:244]     Train net output #1: loss = 0.00862924 (* 1 = 0.00862924 loss)\nI0819 03:05:18.675817 17344 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0819 03:07:37.577980 17344 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0819 03:09:01.295328 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75888\nI0819 03:09:01.295694 17344 solver.cpp:404]     Test net output #1: loss = 1.53779 (* 1 = 1.53779 loss)\nI0819 03:09:02.640128 17344 solver.cpp:228] Iteration 55900, loss = 0.0064813\nI0819 03:09:02.640171 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:09:02.640195 17344 solver.cpp:244]     Train net output #1: loss = 0.00648074 (* 1 = 0.00648074 loss)\nI0819 03:09:02.705803 17344 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0819 03:11:21.665280 17344 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0819 03:12:45.363742 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75508\nI0819 03:12:45.364111 17344 solver.cpp:404]     Test net output #1: loss = 1.6039 (* 1 = 1.6039 loss)\nI0819 03:12:46.708616 17344 solver.cpp:228] Iteration 56000, loss = 0.00512627\nI0819 03:12:46.708662 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:12:46.708686 17344 solver.cpp:244]     Train net output #1: loss = 0.00512571 (* 1 = 0.00512571 loss)\nI0819 03:12:46.778970 17344 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0819 03:15:05.714994 17344 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0819 03:16:29.446022 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74476\nI0819 03:16:29.446382 17344 solver.cpp:404]     Test net output #1: loss = 1.68797 (* 1 = 1.68797 loss)\nI0819 03:16:30.790966 17344 solver.cpp:228] Iteration 56100, loss = 0.00964141\nI0819 03:16:30.791009 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:16:30.791033 17344 solver.cpp:244]     Train net output #1: loss = 0.00964085 (* 1 = 0.00964085 loss)\nI0819 03:16:30.865259 17344 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0819 03:18:49.780756 17344 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0819 03:20:13.410550 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75092\nI0819 03:20:13.410918 17344 solver.cpp:404]     Test net output #1: loss = 1.62892 (* 1 = 1.62892 loss)\nI0819 03:20:14.755498 17344 solver.cpp:228] Iteration 56200, loss = 0.00732409\nI0819 03:20:14.755543 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:20:14.755566 17344 solver.cpp:244]     Train net output #1: loss = 0.00732353 (* 1 = 0.00732353 loss)\nI0819 03:20:14.824822 17344 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0819 03:22:33.721698 17344 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0819 03:23:57.351378 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75152\nI0819 03:23:57.351752 17344 solver.cpp:404]     Test net output #1: loss = 1.61226 (* 1 = 1.61226 loss)\nI0819 03:23:58.696828 17344 solver.cpp:228] Iteration 56300, loss = 0.00714983\nI0819 03:23:58.696874 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:23:58.696897 17344 solver.cpp:244]     Train net output #1: loss = 0.00714927 (* 1 = 0.00714927 loss)\nI0819 03:23:58.766625 17344 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0819 03:26:17.803728 17344 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0819 03:27:41.438311 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75136\nI0819 03:27:41.438665 17344 solver.cpp:404]     Test net output #1: loss = 1.63517 (* 1 = 1.63517 loss)\nI0819 03:27:42.783891 17344 solver.cpp:228] Iteration 56400, loss = 0.00595376\nI0819 03:27:42.783931 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:27:42.783946 17344 solver.cpp:244]     Train net output #1: loss = 0.0059532 (* 1 = 0.0059532 loss)\nI0819 03:27:42.854753 17344 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0819 03:30:01.728865 17344 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0819 03:31:25.355630 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74696\nI0819 03:31:25.356003 17344 solver.cpp:404]     Test net output #1: loss = 1.65851 (* 1 = 1.65851 loss)\nI0819 03:31:26.700517 17344 solver.cpp:228] Iteration 56500, loss = 0.00443963\nI0819 03:31:26.700558 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:31:26.700573 17344 solver.cpp:244]     Train net output #1: loss = 0.00443907 (* 1 = 0.00443907 loss)\nI0819 03:31:26.770686 17344 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0819 03:33:45.572098 17344 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0819 03:35:09.195147 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74896\nI0819 03:35:09.195524 17344 solver.cpp:404]     Test net output #1: loss = 1.64775 (* 1 = 1.64775 loss)\nI0819 03:35:10.540421 17344 solver.cpp:228] Iteration 56600, loss = 0.00554986\nI0819 03:35:10.540462 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:35:10.540478 17344 solver.cpp:244]     Train net output #1: loss = 0.0055493 (* 1 = 0.0055493 loss)\nI0819 03:35:10.609231 17344 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0819 03:37:29.566035 17344 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0819 03:38:53.193356 17344 solver.cpp:404]     Test net output #0: accuracy = 0.748\nI0819 03:38:53.193722 17344 solver.cpp:404]     Test net output #1: loss = 1.66372 (* 1 = 1.66372 loss)\nI0819 03:38:54.538233 17344 solver.cpp:228] Iteration 56700, loss = 0.00794967\nI0819 03:38:54.538275 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:38:54.538290 17344 solver.cpp:244]     Train net output #1: loss = 0.00794911 (* 1 = 0.00794911 loss)\nI0819 03:38:54.612973 17344 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0819 03:41:13.374567 17344 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0819 03:42:37.005491 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75756\nI0819 03:42:37.005839 17344 solver.cpp:404]     Test net output #1: loss = 1.55745 (* 1 = 1.55745 loss)\nI0819 03:42:38.350734 17344 solver.cpp:228] Iteration 56800, loss = 0.00581869\nI0819 03:42:38.350776 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:42:38.350800 17344 solver.cpp:244]     Train net output #1: loss = 0.00581813 (* 1 = 0.00581813 loss)\nI0819 03:42:38.423281 17344 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0819 03:44:57.412804 17344 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0819 03:46:21.025624 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75324\nI0819 03:46:21.026003 17344 solver.cpp:404]     Test net output #1: loss = 1.63484 (* 1 = 1.63484 loss)\nI0819 03:46:22.373163 17344 solver.cpp:228] Iteration 56900, loss = 0.00651272\nI0819 03:46:22.373203 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:46:22.373219 17344 solver.cpp:244]     Train net output #1: loss = 0.00651216 (* 1 = 0.00651216 loss)\nI0819 03:46:22.435101 17344 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0819 03:48:41.233513 17344 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0819 03:50:04.877627 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75832\nI0819 03:50:04.877979 17344 solver.cpp:404]     Test net output #1: loss = 1.58541 (* 1 = 1.58541 loss)\nI0819 03:50:06.221761 17344 solver.cpp:228] Iteration 57000, loss = 0.00480377\nI0819 03:50:06.221801 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:50:06.221817 17344 solver.cpp:244]     Train net output #1: loss = 0.00480321 (* 1 = 0.00480321 loss)\nI0819 03:50:06.293226 17344 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0819 03:52:25.084978 17344 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0819 03:53:48.713491 17344 solver.cpp:404]     Test net output #0: accuracy = 0.745\nI0819 03:53:48.713866 17344 solver.cpp:404]     Test net output #1: loss = 1.72009 (* 1 = 1.72009 loss)\nI0819 03:53:50.056967 17344 solver.cpp:228] Iteration 57100, loss = 0.0045157\nI0819 03:53:50.057004 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:53:50.057020 17344 solver.cpp:244]     Train net output #1: loss = 0.00451514 (* 1 = 0.00451514 loss)\nI0819 03:53:50.128449 17344 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0819 03:56:08.897411 17344 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0819 03:57:32.523093 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73888\nI0819 03:57:32.523470 17344 solver.cpp:404]     Test net output #1: loss = 1.73019 (* 1 = 1.73019 loss)\nI0819 03:57:33.866892 17344 solver.cpp:228] Iteration 57200, loss = 0.00393318\nI0819 03:57:33.866931 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:57:33.866946 17344 solver.cpp:244]     Train net output #1: loss = 0.00393262 (* 1 = 0.00393262 loss)\nI0819 03:57:33.933636 17344 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0819 03:59:52.726588 17344 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0819 04:01:16.349120 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74256\nI0819 04:01:16.349493 17344 solver.cpp:404]     Test net output #1: loss = 1.73272 (* 1 = 1.73272 loss)\nI0819 04:01:17.692639 17344 solver.cpp:228] Iteration 57300, loss = 0.00588969\nI0819 04:01:17.692677 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:01:17.692693 17344 solver.cpp:244]     Train net output #1: loss = 0.00588914 (* 1 = 0.00588914 loss)\nI0819 04:01:17.768018 17344 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0819 04:03:36.565584 17344 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0819 04:05:00.191750 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74624\nI0819 04:05:00.192129 17344 solver.cpp:404]     Test net output #1: loss = 1.73726 (* 1 = 1.73726 loss)\nI0819 04:05:01.535670 17344 solver.cpp:228] Iteration 57400, loss = 0.00565763\nI0819 04:05:01.535706 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:05:01.535722 17344 solver.cpp:244]     Train net output #1: loss = 0.00565708 (* 1 = 0.00565708 loss)\nI0819 04:05:01.603819 17344 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0819 04:07:20.404613 17344 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0819 04:08:44.021870 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74512\nI0819 04:08:44.022269 17344 solver.cpp:404]     Test net output #1: loss = 1.75192 (* 1 = 1.75192 loss)\nI0819 04:08:45.365484 17344 solver.cpp:228] Iteration 57500, loss = 0.00593126\nI0819 04:08:45.365525 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:08:45.365541 17344 solver.cpp:244]     Train net output #1: loss = 0.0059307 (* 1 = 0.0059307 loss)\nI0819 04:08:45.441408 17344 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0819 04:11:04.229990 17344 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0819 04:12:27.852361 17344 solver.cpp:404]     Test net output #0: accuracy = 0.739\nI0819 04:12:27.852725 17344 solver.cpp:404]     Test net output #1: loss = 1.80141 (* 1 = 1.80141 loss)\nI0819 04:12:29.195926 17344 solver.cpp:228] Iteration 57600, loss = 0.00381545\nI0819 04:12:29.195971 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:12:29.195987 17344 solver.cpp:244]     Train net output #1: loss = 0.0038149 (* 1 = 0.0038149 loss)\nI0819 04:12:29.271003 17344 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0819 04:14:48.040860 17344 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0819 04:16:11.663226 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7524\nI0819 04:16:11.663596 17344 solver.cpp:404]     Test net output #1: loss = 1.66577 (* 1 = 1.66577 loss)\nI0819 04:16:13.006927 17344 solver.cpp:228] Iteration 57700, loss = 0.0044167\nI0819 04:16:13.006970 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:16:13.006986 17344 solver.cpp:244]     Train net output #1: loss = 0.00441614 (* 1 = 0.00441614 loss)\nI0819 04:16:13.078774 17344 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0819 04:18:31.964529 17344 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0819 04:19:55.592053 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74572\nI0819 04:19:55.592427 17344 solver.cpp:404]     Test net output #1: loss = 1.71212 (* 1 = 1.71212 loss)\nI0819 04:19:56.937314 17344 solver.cpp:228] Iteration 57800, loss = 0.00542877\nI0819 04:19:56.937355 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:19:56.937371 17344 solver.cpp:244]     Train net output #1: loss = 0.00542822 (* 1 = 0.00542822 loss)\nI0819 04:19:57.014092 17344 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0819 04:22:15.838201 17344 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0819 04:23:39.470475 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74304\nI0819 04:23:39.470850 17344 solver.cpp:404]     Test net output #1: loss = 1.79708 (* 1 = 1.79708 loss)\nI0819 04:23:40.814188 17344 solver.cpp:228] Iteration 57900, loss = 0.00420153\nI0819 04:23:40.814232 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:23:40.814249 17344 solver.cpp:244]     Train net output #1: loss = 0.00420097 (* 1 = 0.00420097 loss)\nI0819 04:23:40.879762 17344 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0819 04:25:59.667552 17344 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0819 04:27:23.289767 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74916\nI0819 04:27:23.290124 17344 solver.cpp:404]     Test net output #1: loss = 1.71287 (* 1 = 1.71287 loss)\nI0819 04:27:24.633234 17344 solver.cpp:228] Iteration 58000, loss = 0.00537836\nI0819 04:27:24.633275 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:27:24.633291 17344 solver.cpp:244]     Train net output #1: loss = 0.0053778 (* 1 = 0.0053778 loss)\nI0819 04:27:24.707912 17344 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0819 04:29:43.498592 17344 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0819 04:31:07.141285 17344 solver.cpp:404]     Test net output #0: accuracy = 0.748\nI0819 04:31:07.141650 17344 solver.cpp:404]     Test net output #1: loss = 1.70903 (* 1 = 1.70903 loss)\nI0819 04:31:08.485020 17344 solver.cpp:228] Iteration 58100, loss = 0.00583379\nI0819 04:31:08.485060 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:31:08.485076 17344 solver.cpp:244]     Train net output #1: loss = 0.00583323 (* 1 = 0.00583323 loss)\nI0819 04:31:08.555444 17344 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0819 04:33:27.390723 17344 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0819 04:34:51.035758 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74796\nI0819 04:34:51.036136 17344 solver.cpp:404]     Test net output #1: loss = 1.71524 (* 1 = 1.71524 loss)\nI0819 04:34:52.379765 17344 solver.cpp:228] Iteration 58200, loss = 0.00374109\nI0819 04:34:52.379802 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:34:52.379817 17344 solver.cpp:244]     Train net output #1: loss = 0.00374053 (* 1 = 0.00374053 loss)\nI0819 04:34:52.450534 17344 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0819 04:37:11.277876 17344 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0819 04:38:34.919312 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7372\nI0819 04:38:34.919682 17344 solver.cpp:404]     Test net output #1: loss = 1.82527 (* 1 = 1.82527 loss)\nI0819 04:38:36.263088 17344 solver.cpp:228] Iteration 58300, loss = 0.00411575\nI0819 04:38:36.263126 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:38:36.263142 17344 solver.cpp:244]     Train net output #1: loss = 0.00411519 (* 1 = 0.00411519 loss)\nI0819 04:38:36.336364 17344 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0819 04:40:55.133296 17344 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0819 04:42:18.763051 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74324\nI0819 04:42:18.763411 17344 solver.cpp:404]     Test net output #1: loss = 1.74328 (* 1 = 1.74328 loss)\nI0819 04:42:20.107898 17344 solver.cpp:228] Iteration 58400, loss = 0.00532521\nI0819 04:42:20.107937 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:42:20.107959 17344 solver.cpp:244]     Train net output #1: loss = 0.00532466 (* 1 = 0.00532466 loss)\nI0819 04:42:20.177423 17344 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0819 04:44:39.012181 17344 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0819 04:46:02.639487 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74808\nI0819 04:46:02.639858 17344 solver.cpp:404]     Test net output #1: loss = 1.70103 (* 1 = 1.70103 loss)\nI0819 04:46:03.983921 17344 solver.cpp:228] Iteration 58500, loss = 0.00314822\nI0819 04:46:03.983963 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:46:03.983979 17344 solver.cpp:244]     Train net output #1: loss = 0.00314766 (* 1 = 0.00314766 loss)\nI0819 04:46:04.050647 17344 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0819 04:48:23.029918 17344 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0819 04:49:46.664003 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73924\nI0819 04:49:46.664355 17344 solver.cpp:404]     Test net output #1: loss = 1.75623 (* 1 = 1.75623 loss)\nI0819 04:49:48.007681 17344 solver.cpp:228] Iteration 58600, loss = 0.0051377\nI0819 04:49:48.007725 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:49:48.007741 17344 solver.cpp:244]     Train net output #1: loss = 0.00513714 (* 1 = 0.00513714 loss)\nI0819 04:49:48.082738 17344 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0819 04:52:06.997561 17344 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0819 04:53:30.624927 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74164\nI0819 04:53:30.625285 17344 solver.cpp:404]     Test net output #1: loss = 1.75991 (* 1 = 1.75991 loss)\nI0819 04:53:31.970397 17344 solver.cpp:228] Iteration 58700, loss = 0.00412929\nI0819 04:53:31.970440 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:53:31.970456 17344 solver.cpp:244]     Train net output #1: loss = 0.00412874 (* 1 = 0.00412874 loss)\nI0819 04:53:32.040119 17344 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0819 04:55:50.915393 17344 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0819 04:57:14.543344 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73576\nI0819 04:57:14.543696 17344 solver.cpp:404]     Test net output #1: loss = 1.80212 (* 1 = 1.80212 loss)\nI0819 04:57:15.887151 17344 solver.cpp:228] Iteration 58800, loss = 0.0047422\nI0819 04:57:15.887194 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:57:15.887212 17344 solver.cpp:244]     Train net output #1: loss = 0.00474165 (* 1 = 0.00474165 loss)\nI0819 04:57:15.961892 17344 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0819 04:59:34.850266 17344 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0819 05:00:58.483244 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73768\nI0819 05:00:58.483609 17344 solver.cpp:404]     Test net output #1: loss = 1.77041 (* 1 = 1.77041 loss)\nI0819 05:00:59.827215 17344 solver.cpp:228] Iteration 58900, loss = 0.00538236\nI0819 05:00:59.827257 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:00:59.827273 17344 solver.cpp:244]     Train net output #1: loss = 0.00538181 (* 1 = 0.00538181 loss)\nI0819 05:00:59.899441 17344 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0819 05:03:18.718927 17344 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0819 05:04:42.323343 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72824\nI0819 05:04:42.323725 17344 solver.cpp:404]     Test net output #1: loss = 1.91021 (* 1 = 1.91021 loss)\nI0819 05:04:43.666803 17344 solver.cpp:228] Iteration 59000, loss = 0.00348213\nI0819 05:04:43.666844 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:04:43.666860 17344 solver.cpp:244]     Train net output #1: loss = 0.00348157 (* 1 = 0.00348157 loss)\nI0819 05:04:43.735968 17344 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0819 05:07:02.493784 17344 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0819 05:08:26.092155 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74128\nI0819 05:08:26.092506 17344 solver.cpp:404]     Test net output #1: loss = 1.7544 (* 1 = 1.7544 loss)\nI0819 05:08:27.436496 17344 solver.cpp:228] Iteration 59100, loss = 0.00406065\nI0819 05:08:27.436537 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:08:27.436553 17344 solver.cpp:244]     Train net output #1: loss = 0.00406009 (* 1 = 0.00406009 loss)\nI0819 05:08:27.502708 17344 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0819 05:10:46.295387 17344 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0819 05:12:09.886122 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73808\nI0819 05:12:09.886487 17344 solver.cpp:404]     Test net output #1: loss = 1.76984 (* 1 = 1.76984 loss)\nI0819 05:12:11.230438 17344 solver.cpp:228] Iteration 59200, loss = 0.00298922\nI0819 05:12:11.230479 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:12:11.230495 17344 solver.cpp:244]     Train net output #1: loss = 0.00298866 (* 1 = 0.00298866 loss)\nI0819 05:12:11.297994 17344 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0819 05:14:30.097296 17344 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0819 05:15:53.684810 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73948\nI0819 05:15:53.685174 17344 solver.cpp:404]     Test net output #1: loss = 1.79938 (* 1 = 1.79938 loss)\nI0819 05:15:55.028445 17344 solver.cpp:228] Iteration 59300, loss = 0.00283144\nI0819 05:15:55.028486 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:15:55.028501 17344 solver.cpp:244]     Train net output #1: loss = 0.00283088 (* 1 = 0.00283088 loss)\nI0819 05:15:55.101850 17344 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0819 05:18:14.018862 17344 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0819 05:19:37.612488 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73648\nI0819 05:19:37.612862 17344 solver.cpp:404]     Test net output #1: loss = 1.81756 (* 1 = 1.81756 loss)\nI0819 05:19:38.957545 17344 solver.cpp:228] Iteration 59400, loss = 0.00324732\nI0819 05:19:38.957587 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:19:38.957602 17344 solver.cpp:244]     Train net output #1: loss = 0.00324677 (* 1 = 0.00324677 loss)\nI0819 05:19:39.032692 17344 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0819 05:21:58.033555 17344 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0819 05:23:21.624666 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73316\nI0819 05:23:21.625030 17344 solver.cpp:404]     Test net output #1: loss = 1.8619 (* 1 = 1.8619 loss)\nI0819 05:23:22.969540 17344 solver.cpp:228] Iteration 59500, loss = 0.00357661\nI0819 05:23:22.969583 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:23:22.969599 17344 solver.cpp:244]     Train net output #1: loss = 0.00357605 (* 1 = 0.00357605 loss)\nI0819 05:23:23.038982 17344 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0819 05:25:41.919708 17344 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0819 05:27:05.520404 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73052\nI0819 05:27:05.520756 17344 solver.cpp:404]     Test net output #1: loss = 1.87275 (* 1 = 1.87275 loss)\nI0819 05:27:06.865147 17344 solver.cpp:228] Iteration 59600, loss = 0.00286426\nI0819 05:27:06.865188 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:27:06.865205 17344 solver.cpp:244]     Train net output #1: loss = 0.0028637 (* 1 = 0.0028637 loss)\nI0819 05:27:06.938102 17344 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0819 05:29:25.866680 17344 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0819 05:30:49.465728 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72432\nI0819 05:30:49.466096 17344 solver.cpp:404]     Test net output #1: loss = 1.97038 (* 1 = 1.97038 loss)\nI0819 05:30:50.810313 17344 solver.cpp:228] Iteration 59700, loss = 0.00378149\nI0819 05:30:50.810356 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:30:50.810372 17344 solver.cpp:244]     Train net output #1: loss = 0.00378094 (* 1 = 0.00378094 loss)\nI0819 05:30:50.880338 17344 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0819 05:33:09.807965 17344 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0819 05:34:33.426069 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73012\nI0819 05:34:33.426443 17344 solver.cpp:404]     Test net output #1: loss = 1.9186 (* 1 = 1.9186 loss)\nI0819 05:34:34.770789 17344 solver.cpp:228] Iteration 59800, loss = 0.0051579\nI0819 05:34:34.770833 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:34:34.770848 17344 solver.cpp:244]     Train net output #1: loss = 0.00515734 (* 1 = 0.00515734 loss)\nI0819 05:34:34.841339 17344 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0819 05:36:53.764772 17344 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0819 05:38:17.361234 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72624\nI0819 05:38:17.361618 17344 solver.cpp:404]     Test net output #1: loss = 1.94554 (* 1 = 1.94554 loss)\nI0819 05:38:18.705564 17344 solver.cpp:228] Iteration 59900, loss = 0.00662283\nI0819 05:38:18.705606 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:38:18.705621 17344 solver.cpp:244]     Train net output #1: loss = 0.00662227 (* 1 = 0.00662227 loss)\nI0819 05:38:18.775863 17344 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0819 05:40:37.620759 17344 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0819 05:42:01.218829 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72372\nI0819 05:42:01.219223 17344 solver.cpp:404]     Test net output #1: loss = 1.96879 (* 1 = 1.96879 loss)\nI0819 05:42:02.563717 17344 solver.cpp:228] Iteration 60000, loss = 0.00247717\nI0819 05:42:02.563757 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:42:02.563773 17344 solver.cpp:244]     Train net output #1: loss = 0.00247662 (* 1 = 0.00247662 loss)\nI0819 05:42:02.631880 17344 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0819 05:44:21.484921 17344 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0819 05:45:45.096779 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72968\nI0819 05:45:45.097168 17344 solver.cpp:404]     Test net output #1: loss = 1.92189 (* 1 = 1.92189 loss)\nI0819 05:45:46.440354 17344 solver.cpp:228] Iteration 60100, loss = 0.00382194\nI0819 05:45:46.440397 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:45:46.440412 17344 solver.cpp:244]     Train net output #1: loss = 0.00382139 (* 1 = 0.00382139 loss)\nI0819 05:45:46.512015 17344 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0819 05:48:05.331848 17344 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0819 05:49:28.932780 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72244\nI0819 05:49:28.933161 17344 solver.cpp:404]     Test net output #1: loss = 1.98956 (* 1 = 1.98956 loss)\nI0819 05:49:30.277633 17344 solver.cpp:228] Iteration 60200, loss = 0.00425858\nI0819 05:49:30.277671 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:49:30.277688 17344 solver.cpp:244]     Train net output #1: loss = 0.00425803 (* 1 = 0.00425803 loss)\nI0819 05:49:30.341699 17344 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0819 05:51:49.108346 17344 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0819 05:53:12.716506 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71668\nI0819 05:53:12.716872 17344 solver.cpp:404]     Test net output #1: loss = 2.03922 (* 1 = 2.03922 loss)\nI0819 05:53:14.061305 17344 solver.cpp:228] Iteration 60300, loss = 0.00339586\nI0819 05:53:14.061347 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:53:14.061362 17344 solver.cpp:244]     Train net output #1: loss = 0.0033953 (* 1 = 0.0033953 loss)\nI0819 05:53:14.129223 17344 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0819 05:55:32.938108 17344 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0819 05:56:56.552358 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71788\nI0819 05:56:56.552736 17344 solver.cpp:404]     Test net output #1: loss = 2.02968 (* 1 = 2.02968 loss)\nI0819 05:56:57.897258 17344 solver.cpp:228] Iteration 60400, loss = 0.00486515\nI0819 05:56:57.897296 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:56:57.897312 17344 solver.cpp:244]     Train net output #1: loss = 0.00486459 (* 1 = 0.00486459 loss)\nI0819 05:56:57.971637 17344 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0819 05:59:16.784320 17344 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0819 06:00:40.405474 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70356\nI0819 06:00:40.405843 17344 solver.cpp:404]     Test net output #1: loss = 2.2282 (* 1 = 2.2282 loss)\nI0819 06:00:41.748765 17344 solver.cpp:228] Iteration 60500, loss = 0.00247824\nI0819 06:00:41.748806 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:00:41.748821 17344 solver.cpp:244]     Train net output #1: loss = 0.00247768 (* 1 = 0.00247768 loss)\nI0819 06:00:41.818819 17344 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0819 06:03:00.631336 17344 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0819 06:04:24.240324 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70316\nI0819 06:04:24.240679 17344 solver.cpp:404]     Test net output #1: loss = 2.27919 (* 1 = 2.27919 loss)\nI0819 06:04:25.583833 17344 solver.cpp:228] Iteration 60600, loss = 0.00401764\nI0819 06:04:25.583875 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:04:25.583891 17344 solver.cpp:244]     Train net output #1: loss = 0.00401708 (* 1 = 0.00401708 loss)\nI0819 06:04:25.654816 17344 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0819 06:06:44.584488 17344 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0819 06:08:08.194254 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7148\nI0819 06:08:08.194631 17344 solver.cpp:404]     Test net output #1: loss = 2.14451 (* 1 = 2.14451 loss)\nI0819 06:08:09.538269 17344 solver.cpp:228] Iteration 60700, loss = 0.00345546\nI0819 06:08:09.538311 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:08:09.538326 17344 solver.cpp:244]     Train net output #1: loss = 0.00345491 (* 1 = 0.00345491 loss)\nI0819 06:08:09.604003 17344 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0819 06:10:28.444794 17344 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0819 06:11:52.057931 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72052\nI0819 06:11:52.058316 17344 solver.cpp:404]     Test net output #1: loss = 2.03127 (* 1 = 2.03127 loss)\nI0819 06:11:53.401758 17344 solver.cpp:228] Iteration 60800, loss = 0.00312241\nI0819 06:11:53.401798 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:11:53.401813 17344 solver.cpp:244]     Train net output #1: loss = 0.00312186 (* 1 = 0.00312186 loss)\nI0819 06:11:53.473645 17344 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0819 06:14:12.362365 17344 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0819 06:15:35.963265 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72104\nI0819 06:15:35.963644 17344 solver.cpp:404]     Test net output #1: loss = 2.02997 (* 1 = 2.02997 loss)\nI0819 06:15:37.307902 17344 solver.cpp:228] Iteration 60900, loss = 0.00424567\nI0819 06:15:37.307943 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:15:37.307960 17344 solver.cpp:244]     Train net output #1: loss = 0.00424512 (* 1 = 0.00424512 loss)\nI0819 06:15:37.380267 17344 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0819 06:17:56.288647 17344 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0819 06:19:19.887074 17344 solver.cpp:404]     Test net output #0: accuracy = 0.721\nI0819 06:19:19.887426 17344 solver.cpp:404]     Test net output #1: loss = 2.01176 (* 1 = 2.01176 loss)\nI0819 06:19:21.231171 17344 solver.cpp:228] Iteration 61000, loss = 0.00293201\nI0819 06:19:21.231212 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:19:21.231228 17344 solver.cpp:244]     Train net output #1: loss = 0.00293145 (* 1 = 0.00293145 loss)\nI0819 06:19:21.302552 17344 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0819 06:21:40.165995 17344 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0819 06:23:03.754189 17344 solver.cpp:404]     Test net output #0: accuracy = 0.722\nI0819 06:23:03.754518 17344 solver.cpp:404]     Test net output #1: loss = 2.00716 (* 1 = 2.00716 loss)\nI0819 06:23:05.098176 17344 solver.cpp:228] Iteration 61100, loss = 0.00332617\nI0819 06:23:05.098219 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:23:05.098235 17344 solver.cpp:244]     Train net output #1: loss = 0.00332561 (* 1 = 0.00332561 loss)\nI0819 06:23:05.169764 17344 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0819 06:25:24.015786 17344 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0819 06:26:47.616703 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7286\nI0819 06:26:47.617054 17344 solver.cpp:404]     Test net output #1: loss = 1.98247 (* 1 = 1.98247 loss)\nI0819 06:26:48.960289 17344 solver.cpp:228] Iteration 61200, loss = 0.00402607\nI0819 06:26:48.960330 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:26:48.960345 17344 solver.cpp:244]     Train net output #1: loss = 0.00402552 (* 1 = 0.00402552 loss)\nI0819 06:26:49.034849 17344 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0819 06:29:07.925055 17344 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0819 06:30:31.527813 17344 solver.cpp:404]     Test net output #0: accuracy = 0.722\nI0819 06:30:31.528190 17344 solver.cpp:404]     Test net output #1: loss = 2.04763 (* 1 = 2.04763 loss)\nI0819 06:30:32.872835 17344 solver.cpp:228] Iteration 61300, loss = 0.00258144\nI0819 06:30:32.872879 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:30:32.872895 17344 solver.cpp:244]     Train net output #1: loss = 0.00258088 (* 1 = 0.00258088 loss)\nI0819 06:30:32.939872 17344 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0819 06:32:51.815712 17344 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0819 06:34:15.414968 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73356\nI0819 06:34:15.415400 17344 solver.cpp:404]     Test net output #1: loss = 1.95372 (* 1 = 1.95372 loss)\nI0819 06:34:16.759630 17344 solver.cpp:228] Iteration 61400, loss = 0.0035481\nI0819 06:34:16.759672 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:34:16.759688 17344 solver.cpp:244]     Train net output #1: loss = 0.00354755 (* 1 = 0.00354755 loss)\nI0819 06:34:16.826601 17344 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0819 06:36:35.641690 17344 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0819 06:37:59.241986 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73732\nI0819 06:37:59.242367 17344 solver.cpp:404]     Test net output #1: loss = 1.90509 (* 1 = 1.90509 loss)\nI0819 06:38:00.584991 17344 solver.cpp:228] Iteration 61500, loss = 0.00202137\nI0819 06:38:00.585032 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:38:00.585049 17344 solver.cpp:244]     Train net output #1: loss = 0.00202081 (* 1 = 0.00202081 loss)\nI0819 06:38:00.662145 17344 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0819 06:40:19.588209 17344 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0819 06:41:43.190551 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74464\nI0819 06:41:43.190918 17344 solver.cpp:404]     Test net output #1: loss = 1.82893 (* 1 = 1.82893 loss)\nI0819 06:41:44.534020 17344 solver.cpp:228] Iteration 61600, loss = 0.00270981\nI0819 06:41:44.534061 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:41:44.534077 17344 solver.cpp:244]     Train net output #1: loss = 0.00270925 (* 1 = 0.00270925 loss)\nI0819 06:41:44.607867 17344 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0819 06:44:03.407699 17344 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0819 06:45:27.009353 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74328\nI0819 06:45:27.009729 17344 solver.cpp:404]     Test net output #1: loss = 1.84801 (* 1 = 1.84801 loss)\nI0819 06:45:28.353157 17344 solver.cpp:228] Iteration 61700, loss = 0.00331472\nI0819 06:45:28.353199 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:45:28.353215 17344 solver.cpp:244]     Train net output #1: loss = 0.00331416 (* 1 = 0.00331416 loss)\nI0819 06:45:28.423509 17344 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0819 06:47:47.283740 17344 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0819 06:49:10.971981 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74332\nI0819 06:49:10.972355 17344 solver.cpp:404]     Test net output #1: loss = 1.84901 (* 1 = 1.84901 loss)\nI0819 06:49:12.316669 17344 solver.cpp:228] Iteration 61800, loss = 0.00224472\nI0819 06:49:12.316709 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:49:12.316725 17344 solver.cpp:244]     Train net output #1: loss = 0.00224416 (* 1 = 0.00224416 loss)\nI0819 06:49:12.386572 17344 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0819 06:51:31.315326 17344 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0819 06:52:54.900394 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74032\nI0819 06:52:54.900764 17344 solver.cpp:404]     Test net output #1: loss = 1.89421 (* 1 = 1.89421 loss)\nI0819 06:52:56.246685 17344 solver.cpp:228] Iteration 61900, loss = 0.00334194\nI0819 06:52:56.246731 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:52:56.246778 17344 solver.cpp:244]     Train net output #1: loss = 0.00334138 (* 1 = 0.00334138 loss)\nI0819 06:52:56.315418 17344 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0819 06:55:15.119319 17344 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0819 06:56:38.704002 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7422\nI0819 06:56:38.704387 17344 solver.cpp:404]     Test net output #1: loss = 1.86859 (* 1 = 1.86859 loss)\nI0819 06:56:40.047740 17344 solver.cpp:228] Iteration 62000, loss = 0.00282446\nI0819 06:56:40.047780 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:56:40.047796 17344 solver.cpp:244]     Train net output #1: loss = 0.0028239 (* 1 = 0.0028239 loss)\nI0819 06:56:40.114743 17344 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0819 06:58:58.890871 17344 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0819 07:00:22.472116 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73996\nI0819 07:00:22.472460 17344 solver.cpp:404]     Test net output #1: loss = 1.90474 (* 1 = 1.90474 loss)\nI0819 07:00:23.814932 17344 solver.cpp:228] Iteration 62100, loss = 0.00408767\nI0819 07:00:23.814971 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:00:23.814986 17344 solver.cpp:244]     Train net output #1: loss = 0.00408711 (* 1 = 0.00408711 loss)\nI0819 07:00:23.884656 17344 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0819 07:02:42.641551 17344 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0819 07:04:06.225486 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73516\nI0819 07:04:06.225860 17344 solver.cpp:404]     Test net output #1: loss = 1.95545 (* 1 = 1.95545 loss)\nI0819 07:04:07.568900 17344 solver.cpp:228] Iteration 62200, loss = 0.0033526\nI0819 07:04:07.568938 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:04:07.568954 17344 solver.cpp:244]     Train net output #1: loss = 0.00335204 (* 1 = 0.00335204 loss)\nI0819 07:04:07.639119 17344 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0819 07:06:26.414819 17344 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0819 07:07:50.004542 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73644\nI0819 07:07:50.004920 17344 solver.cpp:404]     Test net output #1: loss = 1.92122 (* 1 = 1.92122 loss)\nI0819 07:07:51.348021 17344 solver.cpp:228] Iteration 62300, loss = 0.0030455\nI0819 07:07:51.348062 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:07:51.348078 17344 solver.cpp:244]     Train net output #1: loss = 0.00304494 (* 1 = 0.00304494 loss)\nI0819 07:07:51.415835 17344 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0819 07:10:10.388540 17344 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0819 07:11:33.975452 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7294\nI0819 07:11:33.975837 17344 solver.cpp:404]     Test net output #1: loss = 2.01492 (* 1 = 2.01492 loss)\nI0819 07:11:35.319350 17344 solver.cpp:228] Iteration 62400, loss = 0.00311303\nI0819 07:11:35.319394 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:11:35.319409 17344 solver.cpp:244]     Train net output #1: loss = 0.00311247 (* 1 = 0.00311247 loss)\nI0819 07:11:35.384956 17344 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0819 07:13:54.360252 17344 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0819 07:15:17.953848 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72432\nI0819 07:15:17.954255 17344 solver.cpp:404]     Test net output #1: loss = 2.08933 (* 1 = 2.08933 loss)\nI0819 07:15:19.298527 17344 solver.cpp:228] Iteration 62500, loss = 0.00275892\nI0819 07:15:19.298568 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:15:19.298583 17344 solver.cpp:244]     Train net output #1: loss = 0.00275836 (* 1 = 0.00275836 loss)\nI0819 07:15:19.373086 17344 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0819 07:17:38.304255 17344 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0819 07:19:01.891309 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71976\nI0819 07:19:01.891710 17344 solver.cpp:404]     Test net output #1: loss = 2.13159 (* 1 = 2.13159 loss)\nI0819 07:19:03.236166 17344 solver.cpp:228] Iteration 62600, loss = 0.0025488\nI0819 07:19:03.236209 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:19:03.236225 17344 solver.cpp:244]     Train net output #1: loss = 0.00254824 (* 1 = 0.00254824 loss)\nI0819 07:19:03.310281 17344 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0819 07:21:22.283866 17344 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0819 07:22:45.854499 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71664\nI0819 07:22:45.854872 17344 solver.cpp:404]     Test net output #1: loss = 2.13236 (* 1 = 2.13236 loss)\nI0819 07:22:47.199017 17344 solver.cpp:228] Iteration 62700, loss = 0.00320608\nI0819 07:22:47.199059 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:22:47.199074 17344 solver.cpp:244]     Train net output #1: loss = 0.00320552 (* 1 = 0.00320552 loss)\nI0819 07:22:47.280212 17344 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0819 07:25:06.317313 17344 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0819 07:26:29.903131 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7212\nI0819 07:26:29.903511 17344 solver.cpp:404]     Test net output #1: loss = 2.15416 (* 1 = 2.15416 loss)\nI0819 07:26:31.248214 17344 solver.cpp:228] Iteration 62800, loss = 0.00249051\nI0819 07:26:31.248255 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:26:31.248270 17344 solver.cpp:244]     Train net output #1: loss = 0.00248995 (* 1 = 0.00248995 loss)\nI0819 07:26:31.315935 17344 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0819 07:28:50.373654 17344 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0819 07:30:13.948282 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71332\nI0819 07:30:13.948621 17344 solver.cpp:404]     Test net output #1: loss = 2.23938 (* 1 = 2.23938 loss)\nI0819 07:30:15.292773 17344 solver.cpp:228] Iteration 62900, loss = 0.00226526\nI0819 07:30:15.292815 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:30:15.292831 17344 solver.cpp:244]     Train net output #1: loss = 0.0022647 (* 1 = 0.0022647 loss)\nI0819 07:30:15.364408 17344 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0819 07:32:34.340045 17344 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0819 07:33:57.926604 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71232\nI0819 07:33:57.926964 17344 solver.cpp:404]     Test net output #1: loss = 2.27586 (* 1 = 2.27586 loss)\nI0819 07:33:59.271245 17344 solver.cpp:228] Iteration 63000, loss = 0.00283825\nI0819 07:33:59.271287 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:33:59.271303 17344 solver.cpp:244]     Train net output #1: loss = 0.00283769 (* 1 = 0.00283769 loss)\nI0819 07:33:59.338810 17344 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0819 07:36:18.241164 17344 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0819 07:37:41.828374 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72212\nI0819 07:37:41.828754 17344 solver.cpp:404]     Test net output #1: loss = 2.11213 (* 1 = 2.11213 loss)\nI0819 07:37:43.171972 17344 solver.cpp:228] Iteration 63100, loss = 0.00213136\nI0819 07:37:43.172019 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:37:43.172034 17344 solver.cpp:244]     Train net output #1: loss = 0.0021308 (* 1 = 0.0021308 loss)\nI0819 07:37:43.243553 17344 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0819 07:40:02.017985 17344 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0819 07:41:25.605237 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71244\nI0819 07:41:25.605617 17344 solver.cpp:404]     Test net output #1: loss = 2.25131 (* 1 = 2.25131 loss)\nI0819 07:41:26.948210 17344 solver.cpp:228] Iteration 63200, loss = 0.00258141\nI0819 07:41:26.948249 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:41:26.948266 17344 solver.cpp:244]     Train net output #1: loss = 0.00258085 (* 1 = 0.00258085 loss)\nI0819 07:41:27.018967 17344 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0819 07:43:45.854354 17344 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0819 07:45:09.454705 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71404\nI0819 07:45:09.455070 17344 solver.cpp:404]     Test net output #1: loss = 2.16913 (* 1 = 2.16913 loss)\nI0819 07:45:10.798576 17344 solver.cpp:228] Iteration 63300, loss = 0.00214274\nI0819 07:45:10.798614 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:45:10.798629 17344 solver.cpp:244]     Train net output #1: loss = 0.00214218 (* 1 = 0.00214218 loss)\nI0819 07:45:10.870759 17344 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0819 07:47:29.754529 17344 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0819 07:48:53.365759 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72016\nI0819 07:48:53.366117 17344 solver.cpp:404]     Test net output #1: loss = 2.10072 (* 1 = 2.10072 loss)\nI0819 07:48:54.709364 17344 solver.cpp:228] Iteration 63400, loss = 0.00208098\nI0819 07:48:54.709404 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:48:54.709420 17344 solver.cpp:244]     Train net output #1: loss = 0.00208042 (* 1 = 0.00208042 loss)\nI0819 07:48:54.776402 17344 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0819 07:51:13.550307 17344 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0819 07:52:37.159972 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72356\nI0819 07:52:37.160328 17344 solver.cpp:404]     Test net output #1: loss = 2.06132 (* 1 = 2.06132 loss)\nI0819 07:52:38.503954 17344 solver.cpp:228] Iteration 63500, loss = 0.00309938\nI0819 07:52:38.503998 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:52:38.504014 17344 solver.cpp:244]     Train net output #1: loss = 0.00309882 (* 1 = 0.00309882 loss)\nI0819 07:52:38.579352 17344 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0819 07:54:57.485958 17344 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0819 07:56:21.161478 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72432\nI0819 07:56:21.161837 17344 solver.cpp:404]     Test net output #1: loss = 2.08953 (* 1 = 2.08953 loss)\nI0819 07:56:22.506047 17344 solver.cpp:228] Iteration 63600, loss = 0.00259548\nI0819 07:56:22.506089 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:56:22.506104 17344 solver.cpp:244]     Train net output #1: loss = 0.00259492 (* 1 = 0.00259492 loss)\nI0819 07:56:22.577600 17344 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0819 07:58:41.417830 17344 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0819 08:00:05.017715 17344 solver.cpp:404]     Test net output #0: accuracy = 0.727\nI0819 08:00:05.018051 17344 solver.cpp:404]     Test net output #1: loss = 2.06962 (* 1 = 2.06962 loss)\nI0819 08:00:06.361284 17344 solver.cpp:228] Iteration 63700, loss = 0.00164898\nI0819 08:00:06.361325 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:00:06.361340 17344 solver.cpp:244]     Train net output #1: loss = 0.00164842 (* 1 = 0.00164842 loss)\nI0819 08:00:06.429718 17344 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0819 08:02:25.198717 17344 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0819 08:03:48.794677 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73036\nI0819 08:03:48.795070 17344 solver.cpp:404]     Test net output #1: loss = 2.05271 (* 1 = 2.05271 loss)\nI0819 08:03:50.137814 17344 solver.cpp:228] Iteration 63800, loss = 0.00309533\nI0819 08:03:50.137852 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:03:50.137867 17344 solver.cpp:244]     Train net output #1: loss = 0.00309478 (* 1 = 0.00309478 loss)\nI0819 08:03:50.204988 17344 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0819 08:06:09.025653 17344 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0819 08:07:32.616855 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7364\nI0819 08:07:32.617214 17344 solver.cpp:404]     Test net output #1: loss = 1.93972 (* 1 = 1.93972 loss)\nI0819 08:07:33.960279 17344 solver.cpp:228] Iteration 63900, loss = 0.00295724\nI0819 08:07:33.960319 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:07:33.960335 17344 solver.cpp:244]     Train net output #1: loss = 0.00295668 (* 1 = 0.00295668 loss)\nI0819 08:07:34.035935 17344 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0819 08:09:53.020891 17344 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0819 08:11:16.621701 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73848\nI0819 08:11:16.622052 17344 solver.cpp:404]     Test net output #1: loss = 1.92261 (* 1 = 1.92261 loss)\nI0819 08:11:17.966251 17344 solver.cpp:228] Iteration 64000, loss = 0.00224476\nI0819 08:11:17.966289 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:11:17.966305 17344 solver.cpp:244]     Train net output #1: loss = 0.0022442 (* 1 = 0.0022442 loss)\nI0819 08:11:18.035507 17344 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0819 08:13:36.998796 17344 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0819 08:15:00.589124 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73232\nI0819 08:15:00.589493 17344 solver.cpp:404]     Test net output #1: loss = 1.97986 (* 1 = 1.97986 loss)\nI0819 08:15:01.938853 17344 solver.cpp:228] Iteration 64100, loss = 0.00293451\nI0819 08:15:01.938887 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:15:01.938902 17344 solver.cpp:244]     Train net output #1: loss = 0.00293395 (* 1 = 0.00293395 loss)\nI0819 08:15:02.002034 17344 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0819 08:17:20.972944 17344 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0819 08:18:44.553256 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74148\nI0819 08:18:44.553616 17344 solver.cpp:404]     Test net output #1: loss = 1.92426 (* 1 = 1.92426 loss)\nI0819 08:18:45.897125 17344 solver.cpp:228] Iteration 64200, loss = 0.00189613\nI0819 08:18:45.897162 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:18:45.897178 17344 solver.cpp:244]     Train net output #1: loss = 0.00189557 (* 1 = 0.00189557 loss)\nI0819 08:18:45.966626 17344 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0819 08:21:04.917776 17344 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0819 08:22:28.496412 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74128\nI0819 08:22:28.496791 17344 solver.cpp:404]     Test net output #1: loss = 1.89155 (* 1 = 1.89155 loss)\nI0819 08:22:29.840942 17344 solver.cpp:228] Iteration 64300, loss = 0.00195164\nI0819 08:22:29.840981 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:22:29.841001 17344 solver.cpp:244]     Train net output #1: loss = 0.00195108 (* 1 = 0.00195108 loss)\nI0819 08:22:29.910471 17344 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0819 08:24:48.757140 17344 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0819 08:26:12.336333 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7482\nI0819 08:26:12.336693 17344 solver.cpp:404]     Test net output #1: loss = 1.8227 (* 1 = 1.8227 loss)\nI0819 08:26:13.679836 17344 solver.cpp:228] Iteration 64400, loss = 0.00155183\nI0819 08:26:13.679873 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:26:13.679888 17344 solver.cpp:244]     Train net output #1: loss = 0.00155127 (* 1 = 0.00155127 loss)\nI0819 08:26:13.755914 17344 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0819 08:28:32.530745 17344 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0819 08:29:56.114439 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74348\nI0819 08:29:56.114809 17344 solver.cpp:404]     Test net output #1: loss = 1.85609 (* 1 = 1.85609 loss)\nI0819 08:29:57.458204 17344 solver.cpp:228] Iteration 64500, loss = 0.00194889\nI0819 08:29:57.458245 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:29:57.458261 17344 solver.cpp:244]     Train net output #1: loss = 0.00194833 (* 1 = 0.00194833 loss)\nI0819 08:29:57.534731 17344 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0819 08:32:16.310050 17344 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0819 08:33:39.885620 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73912\nI0819 08:33:39.886077 17344 solver.cpp:404]     Test net output #1: loss = 1.94138 (* 1 = 1.94138 loss)\nI0819 08:33:41.229019 17344 solver.cpp:228] Iteration 64600, loss = 0.00282356\nI0819 08:33:41.229058 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:33:41.229074 17344 solver.cpp:244]     Train net output #1: loss = 0.002823 (* 1 = 0.002823 loss)\nI0819 08:33:41.304836 17344 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0819 08:36:00.079022 17344 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0819 08:37:23.654806 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73956\nI0819 08:37:23.655174 17344 solver.cpp:404]     Test net output #1: loss = 1.92436 (* 1 = 1.92436 loss)\nI0819 08:37:24.998374 17344 solver.cpp:228] Iteration 64700, loss = 0.00181584\nI0819 08:37:24.998412 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:37:24.998427 17344 solver.cpp:244]     Train net output #1: loss = 0.00181528 (* 1 = 0.00181528 loss)\nI0819 08:37:25.064177 17344 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0819 08:39:43.809643 17344 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0819 08:41:07.397771 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73364\nI0819 08:41:07.398123 17344 solver.cpp:404]     Test net output #1: loss = 2.04065 (* 1 = 2.04065 loss)\nI0819 08:41:08.741291 17344 solver.cpp:228] Iteration 64800, loss = 0.00185126\nI0819 08:41:08.741331 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:41:08.741348 17344 solver.cpp:244]     Train net output #1: loss = 0.0018507 (* 1 = 0.0018507 loss)\nI0819 08:41:08.813261 17344 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0819 08:43:27.566821 17344 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0819 08:44:50.680665 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72868\nI0819 08:44:50.681008 17344 solver.cpp:404]     Test net output #1: loss = 2.095 (* 1 = 2.095 loss)\nI0819 08:44:52.024039 17344 solver.cpp:228] Iteration 64900, loss = 0.0023628\nI0819 08:44:52.024075 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:44:52.024091 17344 solver.cpp:244]     Train net output #1: loss = 0.00236224 (* 1 = 0.00236224 loss)\nI0819 08:44:52.090621 17344 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0819 08:47:10.813946 17344 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0819 08:48:34.038390 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74048\nI0819 08:48:34.038736 17344 solver.cpp:404]     Test net output #1: loss = 1.93758 (* 1 = 1.93758 loss)\nI0819 08:48:35.382066 17344 solver.cpp:228] Iteration 65000, loss = 0.00129744\nI0819 08:48:35.382104 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:48:35.382120 17344 solver.cpp:244]     Train net output #1: loss = 0.00129688 (* 1 = 0.00129688 loss)\nI0819 08:48:35.454797 17344 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0819 08:50:54.236460 17344 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0819 08:52:17.827559 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7324\nI0819 08:52:17.827953 17344 solver.cpp:404]     Test net output #1: loss = 2.04743 (* 1 = 2.04743 loss)\nI0819 08:52:19.171105 17344 solver.cpp:228] Iteration 65100, loss = 0.00219069\nI0819 08:52:19.171144 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:52:19.171159 17344 solver.cpp:244]     Train net output #1: loss = 0.00219013 (* 1 = 0.00219013 loss)\nI0819 08:52:19.242594 17344 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0819 08:54:38.082687 17344 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0819 08:56:01.674089 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71528\nI0819 08:56:01.674453 17344 solver.cpp:404]     Test net output #1: loss = 2.27387 (* 1 = 2.27387 loss)\nI0819 08:56:03.019598 17344 solver.cpp:228] Iteration 65200, loss = 0.00132226\nI0819 08:56:03.019640 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:56:03.019656 17344 solver.cpp:244]     Train net output #1: loss = 0.00132171 (* 1 = 0.00132171 loss)\nI0819 08:56:03.084395 17344 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0819 08:58:22.067178 17344 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0819 08:59:45.662945 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70852\nI0819 08:59:45.663318 17344 solver.cpp:404]     Test net output #1: loss = 2.30902 (* 1 = 2.30902 loss)\nI0819 08:59:47.008065 17344 solver.cpp:228] Iteration 65300, loss = 0.00307298\nI0819 08:59:47.008108 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:59:47.008124 17344 solver.cpp:244]     Train net output #1: loss = 0.00307242 (* 1 = 0.00307242 loss)\nI0819 08:59:47.080269 17344 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0819 09:02:05.992029 17344 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0819 09:03:29.587525 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70928\nI0819 09:03:29.587895 17344 solver.cpp:404]     Test net output #1: loss = 2.34915 (* 1 = 2.34915 loss)\nI0819 09:03:30.932018 17344 solver.cpp:228] Iteration 65400, loss = 0.00191989\nI0819 09:03:30.932060 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:03:30.932076 17344 solver.cpp:244]     Train net output #1: loss = 0.00191933 (* 1 = 0.00191933 loss)\nI0819 09:03:31.003715 17344 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0819 09:05:49.963296 17344 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0819 09:07:13.563674 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7072\nI0819 09:07:13.564075 17344 solver.cpp:404]     Test net output #1: loss = 2.37558 (* 1 = 2.37558 loss)\nI0819 09:07:14.908488 17344 solver.cpp:228] Iteration 65500, loss = 0.00209219\nI0819 09:07:14.908532 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:07:14.908548 17344 solver.cpp:244]     Train net output #1: loss = 0.00209163 (* 1 = 0.00209163 loss)\nI0819 09:07:14.979295 17344 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0819 09:09:34.034251 17344 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0819 09:10:57.631633 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70368\nI0819 09:10:57.632030 17344 solver.cpp:404]     Test net output #1: loss = 2.53178 (* 1 = 2.53178 loss)\nI0819 09:10:58.977114 17344 solver.cpp:228] Iteration 65600, loss = 0.00119523\nI0819 09:10:58.977154 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:10:58.977169 17344 solver.cpp:244]     Train net output #1: loss = 0.00119467 (* 1 = 0.00119467 loss)\nI0819 09:10:59.051208 17344 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0819 09:13:18.054688 17344 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0819 09:14:41.646948 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69256\nI0819 09:14:41.647315 17344 solver.cpp:404]     Test net output #1: loss = 2.64001 (* 1 = 2.64001 loss)\nI0819 09:14:42.991897 17344 solver.cpp:228] Iteration 65700, loss = 0.00187895\nI0819 09:14:42.991937 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:14:42.991953 17344 solver.cpp:244]     Train net output #1: loss = 0.00187839 (* 1 = 0.00187839 loss)\nI0819 09:14:43.063614 17344 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0819 09:17:01.987596 17344 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0819 09:18:25.577020 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69012\nI0819 09:18:25.577407 17344 solver.cpp:404]     Test net output #1: loss = 2.74656 (* 1 = 2.74656 loss)\nI0819 09:18:26.921057 17344 solver.cpp:228] Iteration 65800, loss = 0.00175345\nI0819 09:18:26.921097 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:18:26.921113 17344 solver.cpp:244]     Train net output #1: loss = 0.00175289 (* 1 = 0.00175289 loss)\nI0819 09:18:26.994894 17344 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0819 09:20:45.835363 17344 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0819 09:22:09.422439 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68744\nI0819 09:22:09.422811 17344 solver.cpp:404]     Test net output #1: loss = 2.68654 (* 1 = 2.68654 loss)\nI0819 09:22:10.766161 17344 solver.cpp:228] Iteration 65900, loss = 0.00203205\nI0819 09:22:10.766201 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:22:10.766216 17344 solver.cpp:244]     Train net output #1: loss = 0.00203149 (* 1 = 0.00203149 loss)\nI0819 09:22:10.833449 17344 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0819 09:24:29.683498 17344 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0819 09:25:53.262079 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67068\nI0819 09:25:53.262475 17344 solver.cpp:404]     Test net output #1: loss = 2.87769 (* 1 = 2.87769 loss)\nI0819 09:25:54.606281 17344 solver.cpp:228] Iteration 66000, loss = 0.00138225\nI0819 09:25:54.606322 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:25:54.606336 17344 solver.cpp:244]     Train net output #1: loss = 0.00138169 (* 1 = 0.00138169 loss)\nI0819 09:25:54.684610 17344 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0819 09:28:13.497750 17344 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0819 09:29:37.087359 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66356\nI0819 09:29:37.087734 17344 solver.cpp:404]     Test net output #1: loss = 2.92422 (* 1 = 2.92422 loss)\nI0819 09:29:38.430840 17344 solver.cpp:228] Iteration 66100, loss = 0.00149661\nI0819 09:29:38.430882 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:29:38.430901 17344 solver.cpp:244]     Train net output #1: loss = 0.00149605 (* 1 = 0.00149605 loss)\nI0819 09:29:38.503347 17344 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0819 09:31:57.342737 17344 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0819 09:33:20.924952 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67184\nI0819 09:33:20.925372 17344 solver.cpp:404]     Test net output #1: loss = 2.81874 (* 1 = 2.81874 loss)\nI0819 09:33:22.268383 17344 solver.cpp:228] Iteration 66200, loss = 0.0016949\nI0819 09:33:22.268422 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:33:22.268438 17344 solver.cpp:244]     Train net output #1: loss = 0.00169434 (* 1 = 0.00169434 loss)\nI0819 09:33:22.342087 17344 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0819 09:35:41.313530 17344 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0819 09:37:04.891803 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66508\nI0819 09:37:04.892165 17344 solver.cpp:404]     Test net output #1: loss = 2.92884 (* 1 = 2.92884 loss)\nI0819 09:37:06.236814 17344 solver.cpp:228] Iteration 66300, loss = 0.00229135\nI0819 09:37:06.236851 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:37:06.236866 17344 solver.cpp:244]     Train net output #1: loss = 0.00229079 (* 1 = 0.00229079 loss)\nI0819 09:37:06.313271 17344 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0819 09:39:25.247113 17344 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0819 09:40:48.834733 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65328\nI0819 09:40:48.835083 17344 solver.cpp:404]     Test net output #1: loss = 2.99856 (* 1 = 2.99856 loss)\nI0819 09:40:50.179255 17344 solver.cpp:228] Iteration 66400, loss = 0.00271055\nI0819 09:40:50.179292 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:40:50.179307 17344 solver.cpp:244]     Train net output #1: loss = 0.00270999 (* 1 = 0.00270999 loss)\nI0819 09:40:50.252408 17344 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0819 09:43:09.260527 17344 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0819 09:44:32.883641 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6624\nI0819 09:44:32.884032 17344 solver.cpp:404]     Test net output #1: loss = 2.89102 (* 1 = 2.89102 loss)\nI0819 09:44:34.228760 17344 solver.cpp:228] Iteration 66500, loss = 0.00164058\nI0819 09:44:34.228801 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:44:34.228816 17344 solver.cpp:244]     Train net output #1: loss = 0.00164002 (* 1 = 0.00164002 loss)\nI0819 09:44:34.289912 17344 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0819 09:46:53.124346 17344 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0819 09:48:16.730955 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6684\nI0819 09:48:16.731324 17344 solver.cpp:404]     Test net output #1: loss = 2.80747 (* 1 = 2.80747 loss)\nI0819 09:48:18.074828 17344 solver.cpp:228] Iteration 66600, loss = 0.00166722\nI0819 09:48:18.074867 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:48:18.074882 17344 solver.cpp:244]     Train net output #1: loss = 0.00166666 (* 1 = 0.00166666 loss)\nI0819 09:48:18.143471 17344 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0819 09:50:36.867635 17344 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0819 09:51:59.522161 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66848\nI0819 09:51:59.522445 17344 solver.cpp:404]     Test net output #1: loss = 2.78254 (* 1 = 2.78254 loss)\nI0819 09:52:00.864722 17344 solver.cpp:228] Iteration 66700, loss = 0.00125822\nI0819 09:52:00.864760 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:52:00.864784 17344 solver.cpp:244]     Train net output #1: loss = 0.00125766 (* 1 = 0.00125766 loss)\nI0819 09:52:00.937937 17344 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0819 09:54:19.348585 17344 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0819 09:55:42.001883 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67376\nI0819 09:55:42.002207 17344 solver.cpp:404]     Test net output #1: loss = 2.67306 (* 1 = 2.67306 loss)\nI0819 09:55:43.344223 17344 solver.cpp:228] Iteration 66800, loss = 0.0020789\nI0819 09:55:43.344256 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:55:43.344271 17344 solver.cpp:244]     Train net output #1: loss = 0.00207834 (* 1 = 0.00207834 loss)\nI0819 09:55:43.413041 17344 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0819 09:58:01.843374 17344 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0819 09:59:24.584923 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67844\nI0819 09:59:24.585255 17344 solver.cpp:404]     Test net output #1: loss = 2.66652 (* 1 = 2.66652 loss)\nI0819 09:59:25.927247 17344 solver.cpp:228] Iteration 66900, loss = 0.00199374\nI0819 09:59:25.927283 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:59:25.927297 17344 solver.cpp:244]     Train net output #1: loss = 0.00199318 (* 1 = 0.00199318 loss)\nI0819 09:59:25.998147 17344 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0819 10:01:44.404222 17344 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0819 10:03:07.135417 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6804\nI0819 10:03:07.135742 17344 solver.cpp:404]     Test net output #1: loss = 2.61488 (* 1 = 2.61488 loss)\nI0819 10:03:08.477267 17344 solver.cpp:228] Iteration 67000, loss = 0.0019304\nI0819 10:03:08.477303 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:03:08.477319 17344 solver.cpp:244]     Train net output #1: loss = 0.00192984 (* 1 = 0.00192984 loss)\nI0819 10:03:08.541821 17344 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0819 10:05:26.961568 17344 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0819 10:06:49.699290 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67324\nI0819 10:06:49.699568 17344 solver.cpp:404]     Test net output #1: loss = 2.71061 (* 1 = 2.71061 loss)\nI0819 10:06:51.040977 17344 solver.cpp:228] Iteration 67100, loss = 0.00196848\nI0819 10:06:51.041008 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:06:51.041023 17344 solver.cpp:244]     Train net output #1: loss = 0.00196792 (* 1 = 0.00196792 loss)\nI0819 10:06:51.113704 17344 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0819 10:09:09.541151 17344 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0819 10:10:32.301514 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6624\nI0819 10:10:32.301818 17344 solver.cpp:404]     Test net output #1: loss = 2.82138 (* 1 = 2.82138 loss)\nI0819 10:10:33.644165 17344 solver.cpp:228] Iteration 67200, loss = 0.00189372\nI0819 10:10:33.644201 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:10:33.644223 17344 solver.cpp:244]     Train net output #1: loss = 0.00189317 (* 1 = 0.00189317 loss)\nI0819 10:10:33.711300 17344 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0819 10:12:52.175657 17344 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0819 10:14:14.919468 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66284\nI0819 10:14:14.919729 17344 solver.cpp:404]     Test net output #1: loss = 2.78235 (* 1 = 2.78235 loss)\nI0819 10:14:16.261237 17344 solver.cpp:228] Iteration 67300, loss = 0.00142573\nI0819 10:14:16.261271 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:14:16.261287 17344 solver.cpp:244]     Train net output #1: loss = 0.00142517 (* 1 = 0.00142517 loss)\nI0819 10:14:16.327124 17344 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0819 10:16:34.739194 17344 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0819 10:17:57.480660 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65244\nI0819 10:17:57.480973 17344 solver.cpp:404]     Test net output #1: loss = 2.91892 (* 1 = 2.91892 loss)\nI0819 10:17:58.822682 17344 solver.cpp:228] Iteration 67400, loss = 0.00143851\nI0819 10:17:58.822715 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:17:58.822731 17344 solver.cpp:244]     Train net output #1: loss = 0.00143795 (* 1 = 0.00143795 loss)\nI0819 10:17:58.890357 17344 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0819 10:20:17.301923 17344 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0819 10:21:40.041591 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64912\nI0819 10:21:40.041898 17344 solver.cpp:404]     Test net output #1: loss = 2.99158 (* 1 = 2.99158 loss)\nI0819 10:21:41.383195 17344 solver.cpp:228] Iteration 67500, loss = 0.00117908\nI0819 10:21:41.383227 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:21:41.383242 17344 solver.cpp:244]     Train net output #1: loss = 0.00117852 (* 1 = 0.00117852 loss)\nI0819 10:21:41.454210 17344 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0819 10:23:59.850277 17344 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0819 10:25:22.587258 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63188\nI0819 10:25:22.587582 17344 solver.cpp:404]     Test net output #1: loss = 3.17931 (* 1 = 3.17931 loss)\nI0819 10:25:23.928913 17344 solver.cpp:228] Iteration 67600, loss = 0.00168965\nI0819 10:25:23.928946 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:25:23.928961 17344 solver.cpp:244]     Train net output #1: loss = 0.00168909 (* 1 = 0.00168909 loss)\nI0819 10:25:23.989822 17344 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0819 10:27:42.393338 17344 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0819 10:29:05.129329 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63168\nI0819 10:29:05.129658 17344 solver.cpp:404]     Test net output #1: loss = 3.24773 (* 1 = 3.24773 loss)\nI0819 10:29:06.471344 17344 solver.cpp:228] Iteration 67700, loss = 0.00222275\nI0819 10:29:06.471377 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:29:06.471392 17344 solver.cpp:244]     Train net output #1: loss = 0.00222219 (* 1 = 0.00222219 loss)\nI0819 10:29:06.539613 17344 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0819 10:31:24.943668 17344 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0819 10:32:47.680807 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63772\nI0819 10:32:47.681131 17344 solver.cpp:404]     Test net output #1: loss = 3.2168 (* 1 = 3.2168 loss)\nI0819 10:32:49.021787 17344 solver.cpp:228] Iteration 67800, loss = 0.00204937\nI0819 10:32:49.021821 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:32:49.021836 17344 solver.cpp:244]     Train net output #1: loss = 0.00204881 (* 1 = 0.00204881 loss)\nI0819 10:32:49.096058 17344 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0819 10:35:07.480470 17344 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0819 10:36:30.203428 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62536\nI0819 10:36:30.203723 17344 solver.cpp:404]     Test net output #1: loss = 3.40314 (* 1 = 3.40314 loss)\nI0819 10:36:31.544654 17344 solver.cpp:228] Iteration 67900, loss = 0.00160785\nI0819 10:36:31.544685 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:36:31.544700 17344 solver.cpp:244]     Train net output #1: loss = 0.00160729 (* 1 = 0.00160729 loss)\nI0819 10:36:31.606178 17344 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0819 10:38:49.975486 17344 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0819 10:40:12.697504 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6032\nI0819 10:40:12.697813 17344 solver.cpp:404]     Test net output #1: loss = 3.69228 (* 1 = 3.69228 loss)\nI0819 10:40:14.039474 17344 solver.cpp:228] Iteration 68000, loss = 0.00254675\nI0819 10:40:14.039506 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:40:14.039521 17344 solver.cpp:244]     Train net output #1: loss = 0.00254619 (* 1 = 0.00254619 loss)\nI0819 10:40:14.100898 17344 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0819 10:42:32.511005 17344 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0819 10:43:55.237737 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58936\nI0819 10:43:55.238003 17344 solver.cpp:404]     Test net output #1: loss = 3.90702 (* 1 = 3.90702 loss)\nI0819 10:43:56.579967 17344 solver.cpp:228] Iteration 68100, loss = 0.00137945\nI0819 10:43:56.579999 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:43:56.580014 17344 solver.cpp:244]     Train net output #1: loss = 0.00137889 (* 1 = 0.00137889 loss)\nI0819 10:43:56.646769 17344 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0819 10:46:15.028492 17344 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0819 10:47:37.745313 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5812\nI0819 10:47:37.745599 17344 solver.cpp:404]     Test net output #1: loss = 3.95363 (* 1 = 3.95363 loss)\nI0819 10:47:39.087154 17344 solver.cpp:228] Iteration 68200, loss = 0.00158528\nI0819 10:47:39.087188 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:47:39.087203 17344 solver.cpp:244]     Train net output #1: loss = 0.00158472 (* 1 = 0.00158472 loss)\nI0819 10:47:39.159903 17344 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0819 10:49:57.534945 17344 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0819 10:51:20.249369 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58048\nI0819 10:51:20.249724 17344 solver.cpp:404]     Test net output #1: loss = 3.8969 (* 1 = 3.8969 loss)\nI0819 10:51:21.590821 17344 solver.cpp:228] Iteration 68300, loss = 0.00159229\nI0819 10:51:21.590853 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:51:21.590873 17344 solver.cpp:244]     Train net output #1: loss = 0.00159173 (* 1 = 0.00159173 loss)\nI0819 10:51:21.658821 17344 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0819 10:53:40.053741 17344 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0819 10:55:02.777555 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57948\nI0819 10:55:02.777904 17344 solver.cpp:404]     Test net output #1: loss = 4.01321 (* 1 = 4.01321 loss)\nI0819 10:55:04.119571 17344 solver.cpp:228] Iteration 68400, loss = 0.00167055\nI0819 10:55:04.119604 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:55:04.119619 17344 solver.cpp:244]     Train net output #1: loss = 0.00166999 (* 1 = 0.00166999 loss)\nI0819 10:55:04.191102 17344 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0819 10:57:22.529659 17344 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0819 10:58:45.250288 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58384\nI0819 10:58:45.250643 17344 solver.cpp:404]     Test net output #1: loss = 3.94518 (* 1 = 3.94518 loss)\nI0819 10:58:46.592092 17344 solver.cpp:228] Iteration 68500, loss = 0.00149055\nI0819 10:58:46.592125 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:58:46.592140 17344 solver.cpp:244]     Train net output #1: loss = 0.00148999 (* 1 = 0.00148999 loss)\nI0819 10:58:46.660315 17344 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0819 11:01:05.013933 17344 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0819 11:02:27.738355 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5752\nI0819 11:02:27.738720 17344 solver.cpp:404]     Test net output #1: loss = 4.05045 (* 1 = 4.05045 loss)\nI0819 11:02:29.080243 17344 solver.cpp:228] Iteration 68600, loss = 0.00186942\nI0819 11:02:29.080278 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:02:29.080293 17344 solver.cpp:244]     Train net output #1: loss = 0.00186886 (* 1 = 0.00186886 loss)\nI0819 11:02:29.144322 17344 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0819 11:04:47.486261 17344 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0819 11:06:10.203869 17344 solver.cpp:404]     Test net output #0: accuracy = 0.55716\nI0819 11:06:10.204223 17344 solver.cpp:404]     Test net output #1: loss = 4.31786 (* 1 = 4.31786 loss)\nI0819 11:06:11.545862 17344 solver.cpp:228] Iteration 68700, loss = 0.00109591\nI0819 11:06:11.545902 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:06:11.545922 17344 solver.cpp:244]     Train net output #1: loss = 0.00109535 (* 1 = 0.00109535 loss)\nI0819 11:06:11.608996 17344 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0819 11:08:29.992183 17344 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0819 11:09:52.715750 17344 solver.cpp:404]     Test net output #0: accuracy = 0.55604\nI0819 11:09:52.716120 17344 solver.cpp:404]     Test net output #1: loss = 4.5751 (* 1 = 4.5751 loss)\nI0819 11:09:54.057696 17344 solver.cpp:228] Iteration 68800, loss = 0.00197689\nI0819 11:09:54.057729 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:09:54.057744 17344 solver.cpp:244]     Train net output #1: loss = 0.00197633 (* 1 = 0.00197633 loss)\nI0819 11:09:54.126405 17344 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0819 11:12:12.504259 17344 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0819 11:13:35.244637 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56512\nI0819 11:13:35.244984 17344 solver.cpp:404]     Test net output #1: loss = 4.39938 (* 1 = 4.39938 loss)\nI0819 11:13:36.586463 17344 solver.cpp:228] Iteration 68900, loss = 0.00237678\nI0819 11:13:36.586498 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:13:36.586513 17344 solver.cpp:244]     Train net output #1: loss = 0.00237622 (* 1 = 0.00237622 loss)\nI0819 11:13:36.647171 17344 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0819 11:15:55.045059 17344 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0819 11:17:17.771302 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56328\nI0819 11:17:17.771639 17344 solver.cpp:404]     Test net output #1: loss = 4.37621 (* 1 = 4.37621 loss)\nI0819 11:17:19.112859 17344 solver.cpp:228] Iteration 69000, loss = 0.00217977\nI0819 11:17:19.112896 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:17:19.112912 17344 solver.cpp:244]     Train net output #1: loss = 0.00217921 (* 1 = 0.00217921 loss)\nI0819 11:17:19.180315 17344 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0819 11:19:37.531499 17344 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0819 11:21:00.260532 17344 solver.cpp:404]     Test net output #0: accuracy = 0.563\nI0819 11:21:00.260900 17344 solver.cpp:404]     Test net output #1: loss = 4.47603 (* 1 = 4.47603 loss)\nI0819 11:21:01.602480 17344 solver.cpp:228] Iteration 69100, loss = 0.00148793\nI0819 11:21:01.602514 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:21:01.602529 17344 solver.cpp:244]     Train net output #1: loss = 0.00148737 (* 1 = 0.00148737 loss)\nI0819 11:21:01.667031 17344 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0819 11:23:20.039211 17344 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0819 11:24:42.769152 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5668\nI0819 11:24:42.769501 17344 solver.cpp:404]     Test net output #1: loss = 4.37107 (* 1 = 4.37107 loss)\nI0819 11:24:44.111227 17344 solver.cpp:228] Iteration 69200, loss = 0.00137597\nI0819 11:24:44.111259 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:24:44.111274 17344 solver.cpp:244]     Train net output #1: loss = 0.00137541 (* 1 = 0.00137541 loss)\nI0819 11:24:44.179185 17344 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0819 11:27:02.531158 17344 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0819 11:28:25.256403 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5606\nI0819 11:28:25.256757 17344 solver.cpp:404]     Test net output #1: loss = 4.52292 (* 1 = 4.52292 loss)\nI0819 11:28:26.598074 17344 solver.cpp:228] Iteration 69300, loss = 0.00130631\nI0819 11:28:26.598107 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:28:26.598122 17344 solver.cpp:244]     Train net output #1: loss = 0.00130575 (* 1 = 0.00130575 loss)\nI0819 11:28:26.663692 17344 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0819 11:30:45.035353 17344 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0819 11:32:07.767012 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5492\nI0819 11:32:07.767361 17344 solver.cpp:404]     Test net output #1: loss = 4.64544 (* 1 = 4.64544 loss)\nI0819 11:32:09.109251 17344 solver.cpp:228] Iteration 69400, loss = 0.00246261\nI0819 11:32:09.109284 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:32:09.109299 17344 solver.cpp:244]     Train net output #1: loss = 0.00246205 (* 1 = 0.00246205 loss)\nI0819 11:32:09.184638 17344 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0819 11:34:27.570122 17344 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0819 11:35:50.297556 17344 solver.cpp:404]     Test net output #0: accuracy = 0.53936\nI0819 11:35:50.297924 17344 solver.cpp:404]     Test net output #1: loss = 4.85331 (* 1 = 4.85331 loss)\nI0819 11:35:51.639875 17344 solver.cpp:228] Iteration 69500, loss = 0.00170632\nI0819 11:35:51.639907 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:35:51.639922 17344 solver.cpp:244]     Train net output #1: loss = 0.00170576 (* 1 = 0.00170576 loss)\nI0819 11:35:51.704754 17344 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0819 11:38:10.086045 17344 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0819 11:39:32.811938 17344 solver.cpp:404]     Test net output #0: accuracy = 0.51464\nI0819 11:39:32.812279 17344 solver.cpp:404]     Test net output #1: loss = 5.19238 (* 1 = 5.19238 loss)\nI0819 11:39:34.153036 17344 solver.cpp:228] Iteration 69600, loss = 0.0012561\nI0819 11:39:34.153067 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:39:34.153082 17344 solver.cpp:244]     Train net output #1: loss = 0.00125554 (* 1 = 0.00125554 loss)\nI0819 11:39:34.222340 17344 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0819 11:41:52.593349 17344 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0819 11:43:15.219904 17344 solver.cpp:404]     Test net output #0: accuracy = 0.51336\nI0819 11:43:15.220263 17344 solver.cpp:404]     Test net output #1: loss = 5.11898 (* 1 = 5.11898 loss)\nI0819 11:43:16.561553 17344 solver.cpp:228] Iteration 69700, loss = 0.00121234\nI0819 11:43:16.561585 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:43:16.561600 17344 solver.cpp:244]     Train net output #1: loss = 0.00121178 (* 1 = 0.00121178 loss)\nI0819 11:43:16.628110 17344 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0819 11:45:34.932087 17344 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0819 11:46:57.561856 17344 solver.cpp:404]     Test net output #0: accuracy = 0.51336\nI0819 11:46:57.562207 17344 solver.cpp:404]     Test net output #1: loss = 5.14864 (* 1 = 5.14864 loss)\nI0819 11:46:58.903173 17344 solver.cpp:228] Iteration 69800, loss = 0.00171278\nI0819 11:46:58.903208 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:46:58.903223 17344 solver.cpp:244]     Train net output #1: loss = 0.00171222 (* 1 = 0.00171222 loss)\nI0819 11:46:58.969560 17344 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0819 11:49:17.442158 17344 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0819 11:50:40.088376 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50868\nI0819 11:50:40.088737 17344 solver.cpp:404]     Test net output #1: loss = 5.21491 (* 1 = 5.21491 loss)\nI0819 11:50:41.431053 17344 solver.cpp:228] Iteration 69900, loss = 0.00136961\nI0819 11:50:41.431087 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:50:41.431103 17344 solver.cpp:244]     Train net output #1: loss = 0.00136905 (* 1 = 0.00136905 loss)\nI0819 11:50:41.491998 17344 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0819 11:52:59.911420 17344 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0819 11:54:22.551175 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50428\nI0819 11:54:22.551514 17344 solver.cpp:404]     Test net output #1: loss = 5.22806 (* 1 = 5.22806 loss)\nI0819 11:54:23.893267 17344 solver.cpp:228] Iteration 70000, loss = 0.00140266\nI0819 11:54:23.893301 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:54:23.893317 17344 solver.cpp:244]     Train net output #1: loss = 0.00140211 (* 1 = 0.00140211 loss)\nI0819 11:54:23.960929 17344 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0819 11:54:23.960950 17344 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0819 11:56:42.341781 17344 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0819 11:58:04.980887 17344 solver.cpp:404]     Test net output #0: accuracy = 0.55156\nI0819 11:58:04.981232 17344 solver.cpp:404]     Test net output #1: loss = 4.28951 (* 1 = 4.28951 loss)\nI0819 11:58:06.322801 17344 solver.cpp:228] Iteration 70100, loss = 0.00212338\nI0819 11:58:06.322841 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:58:06.322857 17344 solver.cpp:244]     Train net output #1: loss = 0.00212282 (* 1 = 0.00212282 loss)\nI0819 11:58:06.394436 17344 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0819 12:00:24.799976 17344 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0819 12:01:47.442991 17344 solver.cpp:404]     Test net output #0: accuracy = 0.59388\nI0819 12:01:47.443331 17344 solver.cpp:404]     Test net output #1: loss = 3.54219 (* 1 = 3.54219 loss)\nI0819 12:01:48.784621 17344 solver.cpp:228] Iteration 70200, loss = 0.00105636\nI0819 12:01:48.784652 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:01:48.784668 17344 solver.cpp:244]     Train net output #1: loss = 0.0010558 (* 1 = 0.0010558 loss)\nI0819 12:01:48.853060 17344 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0819 12:04:07.222360 17344 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0819 12:05:29.857614 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63568\nI0819 12:05:29.857967 17344 solver.cpp:404]     Test net output #1: loss = 3.0157 (* 1 = 3.0157 loss)\nI0819 12:05:31.200139 17344 solver.cpp:228] Iteration 70300, loss = 0.00170018\nI0819 12:05:31.200171 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:05:31.200186 17344 solver.cpp:244]     Train net output #1: loss = 0.00169962 (* 1 = 0.00169962 loss)\nI0819 12:05:31.266767 17344 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0819 12:07:49.645226 17344 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0819 12:09:12.279872 17344 solver.cpp:404]     Test net output #0: accuracy = 0.669\nI0819 12:09:12.280223 17344 solver.cpp:404]     Test net output #1: loss = 2.58428 (* 1 = 2.58428 loss)\nI0819 12:09:13.621245 17344 solver.cpp:228] Iteration 70400, loss = 0.00122966\nI0819 12:09:13.621279 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:09:13.621295 17344 solver.cpp:244]     Train net output #1: loss = 0.0012291 (* 1 = 0.0012291 loss)\nI0819 12:09:13.690135 17344 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0819 12:11:32.179086 17344 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0819 12:12:54.825817 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70004\nI0819 12:12:54.826164 17344 solver.cpp:404]     Test net output #1: loss = 2.2736 (* 1 = 2.2736 loss)\nI0819 12:12:56.168407 17344 solver.cpp:228] Iteration 70500, loss = 0.00219055\nI0819 12:12:56.168443 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:12:56.168458 17344 solver.cpp:244]     Train net output #1: loss = 0.00218999 (* 1 = 0.00218999 loss)\nI0819 12:12:56.233649 17344 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0819 12:15:14.700918 17344 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0819 12:16:37.450402 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72852\nI0819 12:16:37.450757 17344 solver.cpp:404]     Test net output #1: loss = 2.01301 (* 1 = 2.01301 loss)\nI0819 12:16:38.791952 17344 solver.cpp:228] Iteration 70600, loss = 0.00134719\nI0819 12:16:38.791986 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:16:38.792001 17344 solver.cpp:244]     Train net output #1: loss = 0.00134663 (* 1 = 0.00134663 loss)\nI0819 12:16:38.863195 17344 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0819 12:18:57.250562 17344 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0819 12:20:19.971599 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75192\nI0819 12:20:19.971947 17344 solver.cpp:404]     Test net output #1: loss = 1.81532 (* 1 = 1.81532 loss)\nI0819 12:20:21.313474 17344 solver.cpp:228] Iteration 70700, loss = 0.0021254\nI0819 12:20:21.313508 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:20:21.313524 17344 solver.cpp:244]     Train net output #1: loss = 0.00212484 (* 1 = 0.00212484 loss)\nI0819 12:20:21.377647 17344 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0819 12:22:39.757652 17344 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0819 12:24:02.491595 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7652\nI0819 12:24:02.491962 17344 solver.cpp:404]     Test net output #1: loss = 1.67051 (* 1 = 1.67051 loss)\nI0819 12:24:03.833245 17344 solver.cpp:228] Iteration 70800, loss = 0.00162059\nI0819 12:24:03.833278 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:24:03.833293 17344 solver.cpp:244]     Train net output #1: loss = 0.00162003 (* 1 = 0.00162003 loss)\nI0819 12:24:03.902366 17344 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0819 12:26:22.299309 17344 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0819 12:27:45.040597 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7794\nI0819 12:27:45.040963 17344 solver.cpp:404]     Test net output #1: loss = 1.56097 (* 1 = 1.56097 loss)\nI0819 12:27:46.382267 17344 solver.cpp:228] Iteration 70900, loss = 0.00142686\nI0819 12:27:46.382300 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:27:46.382315 17344 solver.cpp:244]     Train net output #1: loss = 0.0014263 (* 1 = 0.0014263 loss)\nI0819 12:27:46.445596 17344 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0819 12:30:04.724834 17344 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0819 12:31:27.459311 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79104\nI0819 12:31:27.459645 17344 solver.cpp:404]     Test net output #1: loss = 1.45766 (* 1 = 1.45766 loss)\nI0819 12:31:28.801481 17344 solver.cpp:228] Iteration 71000, loss = 0.00105363\nI0819 12:31:28.801515 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:31:28.801529 17344 solver.cpp:244]     Train net output #1: loss = 0.00105307 (* 1 = 0.00105307 loss)\nI0819 12:31:28.867786 17344 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0819 12:33:47.257994 17344 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0819 12:35:09.993531 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79888\nI0819 12:35:09.993916 17344 solver.cpp:404]     Test net output #1: loss = 1.38954 (* 1 = 1.38954 loss)\nI0819 12:35:11.335824 17344 solver.cpp:228] Iteration 71100, loss = 0.00136583\nI0819 12:35:11.335857 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:35:11.335871 17344 solver.cpp:244]     Train net output #1: loss = 0.00136527 (* 1 = 0.00136527 loss)\nI0819 12:35:11.403564 17344 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0819 12:37:29.793803 17344 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0819 12:38:52.518101 17344 solver.cpp:404]     Test net output #0: accuracy = 0.804\nI0819 12:38:52.518453 17344 solver.cpp:404]     Test net output #1: loss = 1.33099 (* 1 = 1.33099 loss)\nI0819 12:38:53.859884 17344 solver.cpp:228] Iteration 71200, loss = 0.00173867\nI0819 12:38:53.859925 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:38:53.859939 17344 solver.cpp:244]     Train net output #1: loss = 0.00173811 (* 1 = 0.00173811 loss)\nI0819 12:38:53.924748 17344 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0819 12:41:12.267271 17344 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0819 12:42:35.006274 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80876\nI0819 12:42:35.006631 17344 solver.cpp:404]     Test net output #1: loss = 1.28296 (* 1 = 1.28296 loss)\nI0819 12:42:36.344805 17344 solver.cpp:228] Iteration 71300, loss = 0.0010443\nI0819 12:42:36.344840 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:42:36.344853 17344 solver.cpp:244]     Train net output #1: loss = 0.00104374 (* 1 = 0.00104374 loss)\nI0819 12:42:36.414647 17344 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0819 12:44:55.084854 17344 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0819 12:46:18.648437 17344 solver.cpp:404]     Test net output #0: accuracy = 0.812921\nI0819 12:46:18.648735 17344 solver.cpp:404]     Test net output #1: loss = 1.23925 (* 1 = 1.23925 loss)\nI0819 12:46:19.990468 17344 solver.cpp:228] Iteration 71400, loss = 0.00139947\nI0819 12:46:19.990511 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:46:19.990527 17344 solver.cpp:244]     Train net output #1: loss = 0.00139891 (* 1 = 0.00139891 loss)\nI0819 12:46:20.057945 17344 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0819 12:48:38.672880 17344 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0819 12:50:02.240952 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81724\nI0819 12:50:02.241283 17344 solver.cpp:404]     Test net output #1: loss = 1.21019 (* 1 = 1.21019 loss)\nI0819 12:50:03.583089 17344 solver.cpp:228] Iteration 71500, loss = 0.00134917\nI0819 12:50:03.583132 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:50:03.583148 17344 solver.cpp:244]     Train net output #1: loss = 0.00134861 (* 1 = 0.00134861 loss)\nI0819 12:50:03.657225 17344 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0819 12:52:22.418879 17344 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0819 12:53:45.989490 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81988\nI0819 12:53:45.989768 17344 solver.cpp:404]     Test net output #1: loss = 1.18881 (* 1 = 1.18881 loss)\nI0819 12:53:47.330976 17344 solver.cpp:228] Iteration 71600, loss = 0.00130877\nI0819 12:53:47.331023 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:53:47.331039 17344 solver.cpp:244]     Train net output #1: loss = 0.00130821 (* 1 = 0.00130821 loss)\nI0819 12:53:47.401202 17344 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0819 12:56:06.163961 17344 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0819 12:57:29.605226 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82088\nI0819 12:57:29.605530 17344 solver.cpp:404]     Test net output #1: loss = 1.16512 (* 1 = 1.16512 loss)\nI0819 12:57:30.946686 17344 solver.cpp:228] Iteration 71700, loss = 0.00143581\nI0819 12:57:30.946727 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:57:30.946743 17344 solver.cpp:244]     Train net output #1: loss = 0.00143525 (* 1 = 0.00143525 loss)\nI0819 12:57:31.017192 17344 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0819 12:59:49.743319 17344 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0819 13:01:13.070475 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82496\nI0819 13:01:13.070839 17344 solver.cpp:404]     Test net output #1: loss = 1.15037 (* 1 = 1.15037 loss)\nI0819 13:01:14.416227 17344 solver.cpp:228] Iteration 71800, loss = 0.00122506\nI0819 13:01:14.416271 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:01:14.416287 17344 solver.cpp:244]     Train net output #1: loss = 0.0012245 (* 1 = 0.0012245 loss)\nI0819 13:01:14.480886 17344 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0819 13:03:33.038915 17344 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0819 13:04:56.623095 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82636\nI0819 13:04:56.623370 17344 solver.cpp:404]     Test net output #1: loss = 1.13604 (* 1 = 1.13604 loss)\nI0819 13:04:57.963500 17344 solver.cpp:228] Iteration 71900, loss = 0.00139094\nI0819 13:04:57.963542 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:04:57.963558 17344 solver.cpp:244]     Train net output #1: loss = 0.00139038 (* 1 = 0.00139038 loss)\nI0819 13:04:58.030102 17344 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0819 13:07:16.535964 17344 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0819 13:08:40.032140 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82764\nI0819 13:08:40.032436 17344 solver.cpp:404]     Test net output #1: loss = 1.12307 (* 1 = 1.12307 loss)\nI0819 13:08:41.372596 17344 solver.cpp:228] Iteration 72000, loss = 0.00121073\nI0819 13:08:41.372639 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:08:41.372655 17344 solver.cpp:244]     Train net output #1: loss = 0.00121016 (* 1 = 0.00121016 loss)\nI0819 13:08:41.436199 17344 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0819 13:10:59.926204 17344 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0819 13:12:23.370369 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82952\nI0819 13:12:23.370724 17344 solver.cpp:404]     Test net output #1: loss = 1.11006 (* 1 = 1.11006 loss)\nI0819 13:12:24.711202 17344 solver.cpp:228] Iteration 72100, loss = 0.00242565\nI0819 13:12:24.711247 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:12:24.711262 17344 solver.cpp:244]     Train net output #1: loss = 0.00242509 (* 1 = 0.00242509 loss)\nI0819 13:12:24.777812 17344 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0819 13:14:43.337271 17344 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0819 13:16:06.480314 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82988\nI0819 13:16:06.480618 17344 solver.cpp:404]     Test net output #1: loss = 1.10341 (* 1 = 1.10341 loss)\nI0819 13:16:07.820624 17344 solver.cpp:228] Iteration 72200, loss = 0.00110278\nI0819 13:16:07.820667 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:16:07.820683 17344 solver.cpp:244]     Train net output #1: loss = 0.00110222 (* 1 = 0.00110222 loss)\nI0819 13:16:07.889328 17344 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0819 13:18:26.407933 17344 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0819 13:19:49.552445 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82996\nI0819 13:19:49.552754 17344 solver.cpp:404]     Test net output #1: loss = 1.09382 (* 1 = 1.09382 loss)\nI0819 13:19:50.893285 17344 solver.cpp:228] Iteration 72300, loss = 0.00131698\nI0819 13:19:50.893326 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:19:50.893342 17344 solver.cpp:244]     Train net output #1: loss = 0.00131642 (* 1 = 0.00131642 loss)\nI0819 13:19:50.965188 17344 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0819 13:22:09.477766 17344 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0819 13:23:32.735733 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83068\nI0819 13:23:32.736029 17344 solver.cpp:404]     Test net output #1: loss = 1.08994 (* 1 = 1.08994 loss)\nI0819 13:23:34.076370 17344 solver.cpp:228] Iteration 72400, loss = 0.00112082\nI0819 13:23:34.076412 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:23:34.076428 17344 solver.cpp:244]     Train net output #1: loss = 0.00112026 (* 1 = 0.00112026 loss)\nI0819 13:23:34.142470 17344 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0819 13:25:52.632997 17344 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0819 13:27:15.962296 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83048\nI0819 13:27:15.962585 17344 solver.cpp:404]     Test net output #1: loss = 1.08194 (* 1 = 1.08194 loss)\nI0819 13:27:17.302587 17344 solver.cpp:228] Iteration 72500, loss = 0.00144922\nI0819 13:27:17.302631 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:27:17.302645 17344 solver.cpp:244]     Train net output #1: loss = 0.00144866 (* 1 = 0.00144866 loss)\nI0819 13:27:17.376765 17344 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0819 13:29:35.877882 17344 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0819 13:30:59.335705 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83168\nI0819 13:30:59.336024 17344 solver.cpp:404]     Test net output #1: loss = 1.07642 (* 1 = 1.07642 loss)\nI0819 13:31:00.677229 17344 solver.cpp:228] Iteration 72600, loss = 0.00159078\nI0819 13:31:00.677270 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:31:00.677287 17344 solver.cpp:244]     Train net output #1: loss = 0.00159022 (* 1 = 0.00159022 loss)\nI0819 13:31:00.741217 17344 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0819 13:33:19.231079 17344 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0819 13:34:42.826047 17344 solver.cpp:404]     Test net output #0: accuracy = 0.832841\nI0819 13:34:42.826318 17344 solver.cpp:404]     Test net output #1: loss = 1.0732 (* 1 = 1.0732 loss)\nI0819 13:34:44.167382 17344 solver.cpp:228] Iteration 72700, loss = 0.00127149\nI0819 13:34:44.167424 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:34:44.167440 17344 solver.cpp:244]     Train net output #1: loss = 0.00127093 (* 1 = 0.00127093 loss)\nI0819 13:34:44.230861 17344 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0819 13:37:02.706730 17344 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0819 13:38:25.938817 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83236\nI0819 13:38:25.939082 17344 solver.cpp:404]     Test net output #1: loss = 1.07444 (* 1 = 1.07444 loss)\nI0819 13:38:27.279528 17344 solver.cpp:228] Iteration 72800, loss = 0.00112513\nI0819 13:38:27.279572 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:38:27.279587 17344 solver.cpp:244]     Train net output #1: loss = 0.00112457 (* 1 = 0.00112457 loss)\nI0819 13:38:27.351135 17344 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0819 13:40:45.825193 17344 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0819 13:42:09.059365 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83348\nI0819 13:42:09.059669 17344 solver.cpp:404]     Test net output #1: loss = 1.06813 (* 1 = 1.06813 loss)\nI0819 13:42:10.400121 17344 solver.cpp:228] Iteration 72900, loss = 0.00129055\nI0819 13:42:10.400163 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:42:10.400179 17344 solver.cpp:244]     Train net output #1: loss = 0.00128999 (* 1 = 0.00128999 loss)\nI0819 13:42:10.471344 17344 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0819 13:44:28.949013 17344 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0819 13:45:52.201308 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8338\nI0819 13:45:52.201573 17344 solver.cpp:404]     Test net output #1: loss = 1.06976 (* 1 = 1.06976 loss)\nI0819 13:45:53.542227 17344 solver.cpp:228] Iteration 73000, loss = 0.00168444\nI0819 13:45:53.542269 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:45:53.542285 17344 solver.cpp:244]     Train net output #1: loss = 0.00168388 (* 1 = 0.00168388 loss)\nI0819 13:45:53.609410 17344 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0819 13:48:12.097295 17344 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0819 13:49:35.406163 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8344\nI0819 13:49:35.406463 17344 solver.cpp:404]     Test net output #1: loss = 1.06344 (* 1 = 1.06344 loss)\nI0819 13:49:36.747135 17344 solver.cpp:228] Iteration 73100, loss = 0.00121018\nI0819 13:49:36.747179 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:49:36.747195 17344 solver.cpp:244]     Train net output #1: loss = 0.00120962 (* 1 = 0.00120962 loss)\nI0819 13:49:36.807072 17344 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0819 13:51:55.294795 17344 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0819 13:53:18.587834 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83528\nI0819 13:53:18.588193 17344 solver.cpp:404]     Test net output #1: loss = 1.06257 (* 1 = 1.06257 loss)\nI0819 13:53:19.929106 17344 solver.cpp:228] Iteration 73200, loss = 0.0015135\nI0819 13:53:19.929150 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:53:19.929167 17344 solver.cpp:244]     Train net output #1: loss = 0.00151294 (* 1 = 0.00151294 loss)\nI0819 13:53:19.994822 17344 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0819 13:55:38.565549 17344 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0819 13:57:01.792506 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83516\nI0819 13:57:01.792773 17344 solver.cpp:404]     Test net output #1: loss = 1.06102 (* 1 = 1.06102 loss)\nI0819 13:57:03.133528 17344 solver.cpp:228] Iteration 73300, loss = 0.00130193\nI0819 13:57:03.133572 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:57:03.133587 17344 solver.cpp:244]     Train net output #1: loss = 0.00130137 (* 1 = 0.00130137 loss)\nI0819 13:57:03.196760 17344 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0819 13:59:21.904453 17344 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0819 14:00:45.466851 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8346\nI0819 14:00:45.467180 17344 solver.cpp:404]     Test net output #1: loss = 1.06268 (* 1 = 1.06268 loss)\nI0819 14:00:46.808897 17344 solver.cpp:228] Iteration 73400, loss = 0.00163843\nI0819 14:00:46.808940 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:00:46.808955 17344 solver.cpp:244]     Train net output #1: loss = 0.00163787 (* 1 = 0.00163787 loss)\nI0819 14:00:46.877373 17344 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0819 14:03:05.550192 17344 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0819 14:04:29.111825 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83476\nI0819 14:04:29.112097 17344 solver.cpp:404]     Test net output #1: loss = 1.05545 (* 1 = 1.05545 loss)\nI0819 14:04:30.454556 17344 solver.cpp:228] Iteration 73500, loss = 0.0015419\nI0819 14:04:30.454599 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:04:30.454615 17344 solver.cpp:244]     Train net output #1: loss = 0.00154134 (* 1 = 0.00154134 loss)\nI0819 14:04:30.519765 17344 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0819 14:06:49.176159 17344 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0819 14:08:12.742851 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83476\nI0819 14:08:12.743157 17344 solver.cpp:404]     Test net output #1: loss = 1.05744 (* 1 = 1.05744 loss)\nI0819 14:08:14.083658 17344 solver.cpp:228] Iteration 73600, loss = 0.00133828\nI0819 14:08:14.083703 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:08:14.083719 17344 solver.cpp:244]     Train net output #1: loss = 0.00133772 (* 1 = 0.00133772 loss)\nI0819 14:08:14.149776 17344 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0819 14:10:32.711259 17344 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0819 14:11:55.964598 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83532\nI0819 14:11:55.964864 17344 solver.cpp:404]     Test net output #1: loss = 1.05182 (* 1 = 1.05182 loss)\nI0819 14:11:57.305424 17344 solver.cpp:228] Iteration 73700, loss = 0.000987703\nI0819 14:11:57.305467 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:11:57.305482 17344 solver.cpp:244]     Train net output #1: loss = 0.000987143 (* 1 = 0.000987143 loss)\nI0819 14:11:57.374918 17344 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0819 14:14:15.947480 17344 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0819 14:15:39.156393 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8352\nI0819 14:15:39.156649 17344 solver.cpp:404]     Test net output #1: loss = 1.0563 (* 1 = 1.0563 loss)\nI0819 14:15:40.497182 17344 solver.cpp:228] Iteration 73800, loss = 0.00145057\nI0819 14:15:40.497223 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:15:40.497239 17344 solver.cpp:244]     Train net output #1: loss = 0.00145001 (* 1 = 0.00145001 loss)\nI0819 14:15:40.561287 17344 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0819 14:17:59.075865 17344 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0819 14:19:22.331022 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83436\nI0819 14:19:22.331324 17344 solver.cpp:404]     Test net output #1: loss = 1.05726 (* 1 = 1.05726 loss)\nI0819 14:19:23.671284 17344 solver.cpp:228] Iteration 73900, loss = 0.00161639\nI0819 14:19:23.671326 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:19:23.671342 17344 solver.cpp:244]     Train net output #1: loss = 0.00161583 (* 1 = 0.00161583 loss)\nI0819 14:19:23.733114 17344 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0819 14:21:42.233573 17344 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0819 14:23:05.348203 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83476\nI0819 14:23:05.348556 17344 solver.cpp:404]     Test net output #1: loss = 1.06118 (* 1 = 1.06118 loss)\nI0819 14:23:06.688894 17344 solver.cpp:228] Iteration 74000, loss = 0.00130076\nI0819 14:23:06.688936 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:23:06.688951 17344 solver.cpp:244]     Train net output #1: loss = 0.0013002 (* 1 = 0.0013002 loss)\nI0819 14:23:06.761538 17344 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0819 14:25:25.243263 17344 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0819 14:26:48.457864 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83516\nI0819 14:26:48.458145 17344 solver.cpp:404]     Test net output #1: loss = 1.05786 (* 1 = 1.05786 loss)\nI0819 14:26:49.798187 17344 solver.cpp:228] Iteration 74100, loss = 0.00115685\nI0819 14:26:49.798228 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:26:49.798243 17344 solver.cpp:244]     Train net output #1: loss = 0.00115629 (* 1 = 0.00115629 loss)\nI0819 14:26:49.877053 17344 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0819 14:29:08.363036 17344 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0819 14:30:31.646081 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83444\nI0819 14:30:31.646375 17344 solver.cpp:404]     Test net output #1: loss = 1.062 (* 1 = 1.062 loss)\nI0819 14:30:32.986874 17344 solver.cpp:228] Iteration 74200, loss = 0.00136811\nI0819 14:30:32.986920 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:30:32.986949 17344 solver.cpp:244]     Train net output #1: loss = 0.00136755 (* 1 = 0.00136755 loss)\nI0819 14:30:33.059121 17344 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0819 14:32:51.664808 17344 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0819 14:34:14.904176 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83588\nI0819 14:34:14.904459 17344 solver.cpp:404]     Test net output #1: loss = 1.05724 (* 1 = 1.05724 loss)\nI0819 14:34:16.244539 17344 solver.cpp:228] Iteration 74300, loss = 0.00122378\nI0819 14:34:16.244582 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:34:16.244606 17344 solver.cpp:244]     Train net output #1: loss = 0.00122321 (* 1 = 0.00122321 loss)\nI0819 14:34:16.320564 17344 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0819 14:36:34.931736 17344 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0819 14:37:58.417313 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83592\nI0819 14:37:58.417703 17344 solver.cpp:404]     Test net output #1: loss = 1.05917 (* 1 = 1.05917 loss)\nI0819 14:37:59.757530 17344 solver.cpp:228] Iteration 74400, loss = 0.0015503\nI0819 14:37:59.757575 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:37:59.757597 17344 solver.cpp:244]     Train net output #1: loss = 0.00154973 (* 1 = 0.00154973 loss)\nI0819 14:37:59.827584 17344 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0819 14:40:18.458843 17344 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0819 14:41:42.054239 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83676\nI0819 14:41:42.054627 17344 solver.cpp:404]     Test net output #1: loss = 1.05335 (* 1 = 1.05335 loss)\nI0819 14:41:43.395941 17344 solver.cpp:228] Iteration 74500, loss = 0.00161886\nI0819 14:41:43.395985 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:41:43.396008 17344 solver.cpp:244]     Train net output #1: loss = 0.0016183 (* 1 = 0.0016183 loss)\nI0819 14:41:43.463732 17344 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0819 14:44:02.077311 17344 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0819 14:45:25.707917 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83608\nI0819 14:45:25.708302 17344 solver.cpp:404]     Test net output #1: loss = 1.05343 (* 1 = 1.05343 loss)\nI0819 14:45:27.049091 17344 solver.cpp:228] Iteration 74600, loss = 0.00160282\nI0819 14:45:27.049135 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:45:27.049160 17344 solver.cpp:244]     Train net output #1: loss = 0.00160226 (* 1 = 0.00160226 loss)\nI0819 14:45:27.127599 17344 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0819 14:47:45.580094 17344 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0819 14:49:09.195576 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83708\nI0819 14:49:09.195929 17344 solver.cpp:404]     Test net output #1: loss = 1.04922 (* 1 = 1.04922 loss)\nI0819 14:49:10.536334 17344 solver.cpp:228] Iteration 74700, loss = 0.00138141\nI0819 14:49:10.536373 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:49:10.536389 17344 solver.cpp:244]     Train net output #1: loss = 0.00138085 (* 1 = 0.00138085 loss)\nI0819 14:49:10.611811 17344 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0819 14:51:29.092378 17344 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0819 14:52:52.702375 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83532\nI0819 14:52:52.702761 17344 solver.cpp:404]     Test net output #1: loss = 1.05273 (* 1 = 1.05273 loss)\nI0819 14:52:54.043151 17344 solver.cpp:228] Iteration 74800, loss = 0.00128653\nI0819 14:52:54.043193 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:52:54.043207 17344 solver.cpp:244]     Train net output #1: loss = 0.00128597 (* 1 = 0.00128597 loss)\nI0819 14:52:54.113847 17344 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0819 14:55:12.568270 17344 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0819 14:56:36.173593 17344 solver.cpp:404]     Test net output #0: accuracy = 0.836\nI0819 14:56:36.173918 17344 solver.cpp:404]     Test net output #1: loss = 1.05142 (* 1 = 1.05142 loss)\nI0819 14:56:37.514338 17344 solver.cpp:228] Iteration 74900, loss = 0.00105942\nI0819 14:56:37.514376 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:56:37.514391 17344 solver.cpp:244]     Train net output #1: loss = 0.00105886 (* 1 = 0.00105886 loss)\nI0819 14:56:37.594389 17344 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0819 14:58:56.011790 17344 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0819 15:00:19.608392 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8358\nI0819 15:00:19.608736 17344 solver.cpp:404]     Test net output #1: loss = 1.05542 (* 1 = 1.05542 loss)\nI0819 15:00:20.949180 17344 solver.cpp:228] Iteration 75000, loss = 0.00115511\nI0819 15:00:20.949218 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:00:20.949234 17344 solver.cpp:244]     Train net output #1: loss = 0.00115455 (* 1 = 0.00115455 loss)\nI0819 15:00:21.021476 17344 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0819 15:02:39.472203 17344 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0819 15:04:03.089469 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83624\nI0819 15:04:03.089835 17344 solver.cpp:404]     Test net output #1: loss = 1.05182 (* 1 = 1.05182 loss)\nI0819 15:04:04.429898 17344 solver.cpp:228] Iteration 75100, loss = 0.00103608\nI0819 15:04:04.429939 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:04:04.429955 17344 solver.cpp:244]     Train net output #1: loss = 0.00103552 (* 1 = 0.00103552 loss)\nI0819 15:04:04.511688 17344 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0819 15:06:23.015817 17344 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0819 15:07:46.624572 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83604\nI0819 15:07:46.624938 17344 solver.cpp:404]     Test net output #1: loss = 1.05623 (* 1 = 1.05623 loss)\nI0819 15:07:47.965817 17344 solver.cpp:228] Iteration 75200, loss = 0.00149188\nI0819 15:07:47.965862 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:07:47.965878 17344 solver.cpp:244]     Train net output #1: loss = 0.00149132 (* 1 = 0.00149132 loss)\nI0819 15:07:48.039955 17344 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0819 15:10:06.571929 17344 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0819 15:11:30.190109 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83712\nI0819 15:11:30.190464 17344 solver.cpp:404]     Test net output #1: loss = 1.05241 (* 1 = 1.05241 loss)\nI0819 15:11:31.530707 17344 solver.cpp:228] Iteration 75300, loss = 0.00176331\nI0819 15:11:31.530750 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:11:31.530766 17344 solver.cpp:244]     Train net output #1: loss = 0.00176275 (* 1 = 0.00176275 loss)\nI0819 15:11:31.608512 17344 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0819 15:13:50.093794 17344 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0819 15:15:13.701684 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8366\nI0819 15:15:13.702041 17344 solver.cpp:404]     Test net output #1: loss = 1.05042 (* 1 = 1.05042 loss)\nI0819 15:15:15.042168 17344 solver.cpp:228] Iteration 75400, loss = 0.001261\nI0819 15:15:15.042212 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:15:15.042227 17344 solver.cpp:244]     Train net output #1: loss = 0.00126044 (* 1 = 0.00126044 loss)\nI0819 15:15:15.112864 17344 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0819 15:17:33.597621 17344 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0819 15:18:57.216861 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83708\nI0819 15:18:57.217241 17344 solver.cpp:404]     Test net output #1: loss = 1.049 (* 1 = 1.049 loss)\nI0819 15:18:58.557343 17344 solver.cpp:228] Iteration 75500, loss = 0.00144659\nI0819 15:18:58.557385 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:18:58.557401 17344 solver.cpp:244]     Train net output #1: loss = 0.00144603 (* 1 = 0.00144603 loss)\nI0819 15:18:58.630563 17344 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0819 15:21:17.171203 17344 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0819 15:22:40.796620 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83648\nI0819 15:22:40.797014 17344 solver.cpp:404]     Test net output #1: loss = 1.0502 (* 1 = 1.0502 loss)\nI0819 15:22:42.137770 17344 solver.cpp:228] Iteration 75600, loss = 0.00150431\nI0819 15:22:42.137814 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:22:42.137830 17344 solver.cpp:244]     Train net output #1: loss = 0.00150375 (* 1 = 0.00150375 loss)\nI0819 15:22:42.215719 17344 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0819 15:25:00.768816 17344 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0819 15:26:24.396473 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83672\nI0819 15:26:24.396867 17344 solver.cpp:404]     Test net output #1: loss = 1.0502 (* 1 = 1.0502 loss)\nI0819 15:26:25.737785 17344 solver.cpp:228] Iteration 75700, loss = 0.00170613\nI0819 15:26:25.737828 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:26:25.737843 17344 solver.cpp:244]     Train net output #1: loss = 0.00170557 (* 1 = 0.00170557 loss)\nI0819 15:26:25.807397 17344 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0819 15:28:44.396373 17344 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0819 15:30:08.116212 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8362\nI0819 15:30:08.116583 17344 solver.cpp:404]     Test net output #1: loss = 1.05488 (* 1 = 1.05488 loss)\nI0819 15:30:09.457909 17344 solver.cpp:228] Iteration 75800, loss = 0.00129102\nI0819 15:30:09.457953 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:30:09.457969 17344 solver.cpp:244]     Train net output #1: loss = 0.00129046 (* 1 = 0.00129046 loss)\nI0819 15:30:09.534747 17344 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0819 15:32:28.206353 17344 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0819 15:33:51.822227 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83676\nI0819 15:33:51.822578 17344 solver.cpp:404]     Test net output #1: loss = 1.05001 (* 1 = 1.05001 loss)\nI0819 15:33:53.163664 17344 solver.cpp:228] Iteration 75900, loss = 0.00196403\nI0819 15:33:53.163705 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:33:53.163722 17344 solver.cpp:244]     Train net output #1: loss = 0.00196347 (* 1 = 0.00196347 loss)\nI0819 15:33:53.234182 17344 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0819 15:36:11.866816 17344 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0819 15:37:35.458451 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8372\nI0819 15:37:35.458748 17344 solver.cpp:404]     Test net output #1: loss = 1.05038 (* 1 = 1.05038 loss)\nI0819 15:37:36.800374 17344 solver.cpp:228] Iteration 76000, loss = 0.00100539\nI0819 15:37:36.800417 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:37:36.800441 17344 solver.cpp:244]     Train net output #1: loss = 0.00100483 (* 1 = 0.00100483 loss)\nI0819 15:37:36.876557 17344 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0819 15:39:55.366600 17344 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0819 15:41:18.647054 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83764\nI0819 15:41:18.647397 17344 solver.cpp:404]     Test net output #1: loss = 1.04639 (* 1 = 1.04639 loss)\nI0819 15:41:19.987588 17344 solver.cpp:228] Iteration 76100, loss = 0.00114451\nI0819 15:41:19.987633 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:41:19.987658 17344 solver.cpp:244]     Train net output #1: loss = 0.00114395 (* 1 = 0.00114395 loss)\nI0819 15:41:20.060060 17344 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0819 15:43:38.562387 17344 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0819 15:45:01.810817 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83728\nI0819 15:45:01.811113 17344 solver.cpp:404]     Test net output #1: loss = 1.04983 (* 1 = 1.04983 loss)\nI0819 15:45:03.153086 17344 solver.cpp:228] Iteration 76200, loss = 0.0017394\nI0819 15:45:03.153134 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:45:03.153156 17344 solver.cpp:244]     Train net output #1: loss = 0.00173884 (* 1 = 0.00173884 loss)\nI0819 15:45:03.223958 17344 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0819 15:47:21.895560 17344 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0819 15:48:45.110754 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83736\nI0819 15:48:45.111040 17344 solver.cpp:404]     Test net output #1: loss = 1.04637 (* 1 = 1.04637 loss)\nI0819 15:48:46.452853 17344 solver.cpp:228] Iteration 76300, loss = 0.00106299\nI0819 15:48:46.452896 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:48:46.452913 17344 solver.cpp:244]     Train net output #1: loss = 0.00106243 (* 1 = 0.00106243 loss)\nI0819 15:48:46.527876 17344 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0819 15:51:05.241539 17344 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0819 15:52:28.538023 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83616\nI0819 15:52:28.538383 17344 solver.cpp:404]     Test net output #1: loss = 1.04907 (* 1 = 1.04907 loss)\nI0819 15:52:29.880522 17344 solver.cpp:228] Iteration 76400, loss = 0.00129893\nI0819 15:52:29.880565 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:52:29.880581 17344 solver.cpp:244]     Train net output #1: loss = 0.00129837 (* 1 = 0.00129837 loss)\nI0819 15:52:29.952082 17344 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0819 15:54:48.693472 17344 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0819 15:56:12.142789 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83592\nI0819 15:56:12.143102 17344 solver.cpp:404]     Test net output #1: loss = 1.04794 (* 1 = 1.04794 loss)\nI0819 15:56:13.483793 17344 solver.cpp:228] Iteration 76500, loss = 0.00127648\nI0819 15:56:13.483837 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:56:13.483852 17344 solver.cpp:244]     Train net output #1: loss = 0.00127592 (* 1 = 0.00127592 loss)\nI0819 15:56:13.552796 17344 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0819 15:58:32.105984 17344 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0819 15:59:55.365058 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8366\nI0819 15:59:55.365340 17344 solver.cpp:404]     Test net output #1: loss = 1.04801 (* 1 = 1.04801 loss)\nI0819 15:59:56.705672 17344 solver.cpp:228] Iteration 76600, loss = 0.00175588\nI0819 15:59:56.705713 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:59:56.705729 17344 solver.cpp:244]     Train net output #1: loss = 0.00175532 (* 1 = 0.00175532 loss)\nI0819 15:59:56.773499 17344 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0819 16:02:15.362062 17344 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0819 16:03:38.646765 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83776\nI0819 16:03:38.647056 17344 solver.cpp:404]     Test net output #1: loss = 1.04709 (* 1 = 1.04709 loss)\nI0819 16:03:39.993248 17344 solver.cpp:228] Iteration 76700, loss = 0.00124148\nI0819 16:03:39.993293 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:03:39.993309 17344 solver.cpp:244]     Train net output #1: loss = 0.00124092 (* 1 = 0.00124092 loss)\nI0819 16:03:40.053472 17344 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0819 16:05:58.816088 17344 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0819 16:07:22.309273 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83668\nI0819 16:07:22.309564 17344 solver.cpp:404]     Test net output #1: loss = 1.05018 (* 1 = 1.05018 loss)\nI0819 16:07:23.651470 17344 solver.cpp:228] Iteration 76800, loss = 0.00130744\nI0819 16:07:23.651515 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:07:23.651530 17344 solver.cpp:244]     Train net output #1: loss = 0.00130688 (* 1 = 0.00130688 loss)\nI0819 16:07:23.712725 17344 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0819 16:09:42.443238 17344 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0819 16:11:05.915824 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83832\nI0819 16:11:05.916155 17344 solver.cpp:404]     Test net output #1: loss = 1.04587 (* 1 = 1.04587 loss)\nI0819 16:11:07.257923 17344 solver.cpp:228] Iteration 76900, loss = 0.00185518\nI0819 16:11:07.257972 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:11:07.257988 17344 solver.cpp:244]     Train net output #1: loss = 0.00185462 (* 1 = 0.00185462 loss)\nI0819 16:11:07.325501 17344 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0819 16:13:26.077579 17344 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0819 16:14:49.265388 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83724\nI0819 16:14:49.265661 17344 solver.cpp:404]     Test net output #1: loss = 1.04769 (* 1 = 1.04769 loss)\nI0819 16:14:50.607434 17344 solver.cpp:228] Iteration 77000, loss = 0.00160286\nI0819 16:14:50.607476 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:14:50.607492 17344 solver.cpp:244]     Train net output #1: loss = 0.0016023 (* 1 = 0.0016023 loss)\nI0819 16:14:50.671633 17344 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0819 16:17:09.270804 17344 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0819 16:18:32.415073 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83808\nI0819 16:18:32.415375 17344 solver.cpp:404]     Test net output #1: loss = 1.04696 (* 1 = 1.04696 loss)\nI0819 16:18:33.759414 17344 solver.cpp:228] Iteration 77100, loss = 0.00118888\nI0819 16:18:33.759457 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:18:33.759474 17344 solver.cpp:244]     Train net output #1: loss = 0.00118832 (* 1 = 0.00118832 loss)\nI0819 16:18:33.819257 17344 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0819 16:20:52.495342 17344 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0819 16:22:15.933360 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8364\nI0819 16:22:15.933665 17344 solver.cpp:404]     Test net output #1: loss = 1.04945 (* 1 = 1.04945 loss)\nI0819 16:22:17.275569 17344 solver.cpp:228] Iteration 77200, loss = 0.0018013\nI0819 16:22:17.275614 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:22:17.275629 17344 solver.cpp:244]     Train net output #1: loss = 0.00180074 (* 1 = 0.00180074 loss)\nI0819 16:22:17.344563 17344 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0819 16:24:35.922307 17344 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0819 16:25:59.261198 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83632\nI0819 16:25:59.261512 17344 solver.cpp:404]     Test net output #1: loss = 1.04884 (* 1 = 1.04884 loss)\nI0819 16:26:00.601805 17344 solver.cpp:228] Iteration 77300, loss = 0.00162545\nI0819 16:26:00.601847 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:26:00.601863 17344 solver.cpp:244]     Train net output #1: loss = 0.00162489 (* 1 = 0.00162489 loss)\nI0819 16:26:00.666540 17344 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0819 16:28:19.334852 17344 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0819 16:29:42.539345 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI0819 16:29:42.539625 17344 solver.cpp:404]     Test net output #1: loss = 1.04942 (* 1 = 1.04942 loss)\nI0819 16:29:43.881206 17344 solver.cpp:228] Iteration 77400, loss = 0.0016667\nI0819 16:29:43.881249 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:29:43.881266 17344 solver.cpp:244]     Train net output #1: loss = 0.00166614 (* 1 = 0.00166614 loss)\nI0819 16:29:43.946282 17344 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0819 16:32:02.598386 17344 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0819 16:33:25.882423 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8376\nI0819 16:33:25.882737 17344 solver.cpp:404]     Test net output #1: loss = 1.04879 (* 1 = 1.04879 loss)\nI0819 16:33:27.224524 17344 solver.cpp:228] Iteration 77500, loss = 0.00106722\nI0819 16:33:27.224568 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:33:27.224584 17344 solver.cpp:244]     Train net output #1: loss = 0.00106666 (* 1 = 0.00106666 loss)\nI0819 16:33:27.293797 17344 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0819 16:35:45.921952 17344 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0819 16:37:08.560274 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83692\nI0819 16:37:08.560650 17344 solver.cpp:404]     Test net output #1: loss = 1.05301 (* 1 = 1.05301 loss)\nI0819 16:37:09.899013 17344 solver.cpp:228] Iteration 77600, loss = 0.00178286\nI0819 16:37:09.899047 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:37:09.899062 17344 solver.cpp:244]     Train net output #1: loss = 0.0017823 (* 1 = 0.0017823 loss)\nI0819 16:37:09.966449 17344 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0819 16:39:28.014551 17344 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0819 16:40:50.673974 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83752\nI0819 16:40:50.674326 17344 solver.cpp:404]     Test net output #1: loss = 1.05285 (* 1 = 1.05285 loss)\nI0819 16:40:52.012771 17344 solver.cpp:228] Iteration 77700, loss = 0.00146868\nI0819 16:40:52.012805 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:40:52.012821 17344 solver.cpp:244]     Train net output #1: loss = 0.00146813 (* 1 = 0.00146813 loss)\nI0819 16:40:52.079802 17344 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0819 16:43:10.127259 17344 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0819 16:44:32.781116 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83612\nI0819 16:44:32.781452 17344 solver.cpp:404]     Test net output #1: loss = 1.05285 (* 1 = 1.05285 loss)\nI0819 16:44:34.119740 17344 solver.cpp:228] Iteration 77800, loss = 0.00143813\nI0819 16:44:34.119773 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:44:34.119788 17344 solver.cpp:244]     Train net output #1: loss = 0.00143757 (* 1 = 0.00143757 loss)\nI0819 16:44:34.187191 17344 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0819 16:46:52.249493 17344 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0819 16:48:14.987170 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83672\nI0819 16:48:14.987529 17344 solver.cpp:404]     Test net output #1: loss = 1.05016 (* 1 = 1.05016 loss)\nI0819 16:48:16.326315 17344 solver.cpp:228] Iteration 77900, loss = 0.000938567\nI0819 16:48:16.326349 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:48:16.326364 17344 solver.cpp:244]     Train net output #1: loss = 0.000938009 (* 1 = 0.000938009 loss)\nI0819 16:48:16.395694 17344 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0819 16:50:34.497463 17344 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0819 16:51:57.246649 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83608\nI0819 16:51:57.247012 17344 solver.cpp:404]     Test net output #1: loss = 1.05604 (* 1 = 1.05604 loss)\nI0819 16:51:58.585822 17344 solver.cpp:228] Iteration 78000, loss = 0.00146539\nI0819 16:51:58.585861 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:51:58.585877 17344 solver.cpp:244]     Train net output #1: loss = 0.00146483 (* 1 = 0.00146483 loss)\nI0819 16:51:58.655391 17344 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0819 16:54:16.714968 17344 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0819 16:55:39.453519 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83664\nI0819 16:55:39.453886 17344 solver.cpp:404]     Test net output #1: loss = 1.05353 (* 1 = 1.05353 loss)\nI0819 16:55:40.792624 17344 solver.cpp:228] Iteration 78100, loss = 0.00171305\nI0819 16:55:40.792659 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:55:40.792675 17344 solver.cpp:244]     Train net output #1: loss = 0.00171249 (* 1 = 0.00171249 loss)\nI0819 16:55:40.857370 17344 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0819 16:57:58.904016 17344 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0819 16:59:21.642199 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83668\nI0819 16:59:21.642576 17344 solver.cpp:404]     Test net output #1: loss = 1.05445 (* 1 = 1.05445 loss)\nI0819 16:59:22.981248 17344 solver.cpp:228] Iteration 78200, loss = 0.00125\nI0819 16:59:22.981284 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:59:22.981299 17344 solver.cpp:244]     Train net output #1: loss = 0.00124944 (* 1 = 0.00124944 loss)\nI0819 16:59:23.049633 17344 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0819 17:01:41.106036 17344 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0819 17:03:03.845356 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83584\nI0819 17:03:03.845706 17344 solver.cpp:404]     Test net output #1: loss = 1.05396 (* 1 = 1.05396 loss)\nI0819 17:03:05.183352 17344 solver.cpp:228] Iteration 78300, loss = 0.00110275\nI0819 17:03:05.183387 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:03:05.183403 17344 solver.cpp:244]     Train net output #1: loss = 0.00110219 (* 1 = 0.00110219 loss)\nI0819 17:03:05.259117 17344 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0819 17:05:23.302734 17344 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0819 17:06:46.040287 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83552\nI0819 17:06:46.040652 17344 solver.cpp:404]     Test net output #1: loss = 1.05428 (* 1 = 1.05428 loss)\nI0819 17:06:47.378923 17344 solver.cpp:228] Iteration 78400, loss = 0.00132431\nI0819 17:06:47.378959 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:06:47.378974 17344 solver.cpp:244]     Train net output #1: loss = 0.00132375 (* 1 = 0.00132375 loss)\nI0819 17:06:47.443125 17344 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0819 17:09:05.493784 17344 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0819 17:10:28.232416 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83716\nI0819 17:10:28.232792 17344 solver.cpp:404]     Test net output #1: loss = 1.05113 (* 1 = 1.05113 loss)\nI0819 17:10:29.571059 17344 solver.cpp:228] Iteration 78500, loss = 0.00181629\nI0819 17:10:29.571094 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:10:29.571110 17344 solver.cpp:244]     Train net output #1: loss = 0.00181574 (* 1 = 0.00181574 loss)\nI0819 17:10:29.643864 17344 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0819 17:12:47.754480 17344 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0819 17:14:10.478075 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI0819 17:14:10.478435 17344 solver.cpp:404]     Test net output #1: loss = 1.05271 (* 1 = 1.05271 loss)\nI0819 17:14:11.817487 17344 solver.cpp:228] Iteration 78600, loss = 0.00110737\nI0819 17:14:11.817523 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:14:11.817538 17344 solver.cpp:244]     Train net output #1: loss = 0.00110681 (* 1 = 0.00110681 loss)\nI0819 17:14:11.882695 17344 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0819 17:16:30.016001 17344 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0819 17:17:52.750710 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83708\nI0819 17:17:52.751078 17344 solver.cpp:404]     Test net output #1: loss = 1.05081 (* 1 = 1.05081 loss)\nI0819 17:17:54.090492 17344 solver.cpp:228] Iteration 78700, loss = 0.00123856\nI0819 17:17:54.090526 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:17:54.090541 17344 solver.cpp:244]     Train net output #1: loss = 0.001238 (* 1 = 0.001238 loss)\nI0819 17:17:54.153641 17344 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0819 17:20:12.533416 17344 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0819 17:21:35.266784 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83668\nI0819 17:21:35.267129 17344 solver.cpp:404]     Test net output #1: loss = 1.05399 (* 1 = 1.05399 loss)\nI0819 17:21:36.608324 17344 solver.cpp:228] Iteration 78800, loss = 0.00221307\nI0819 17:21:36.608358 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:21:36.608373 17344 solver.cpp:244]     Train net output #1: loss = 0.00221251 (* 1 = 0.00221251 loss)\nI0819 17:21:36.675626 17344 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0819 17:23:55.061712 17344 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0819 17:25:17.800959 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8366\nI0819 17:25:17.801326 17344 solver.cpp:404]     Test net output #1: loss = 1.05033 (* 1 = 1.05033 loss)\nI0819 17:25:19.143342 17344 solver.cpp:228] Iteration 78900, loss = 0.0014534\nI0819 17:25:19.143378 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:25:19.143393 17344 solver.cpp:244]     Train net output #1: loss = 0.00145284 (* 1 = 0.00145284 loss)\nI0819 17:25:19.214241 17344 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0819 17:27:37.629705 17344 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0819 17:29:00.368353 17344 solver.cpp:404]     Test net output #0: accuracy = 0.836\nI0819 17:29:00.368721 17344 solver.cpp:404]     Test net output #1: loss = 1.05063 (* 1 = 1.05063 loss)\nI0819 17:29:01.710491 17344 solver.cpp:228] Iteration 79000, loss = 0.00124217\nI0819 17:29:01.710526 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:29:01.710541 17344 solver.cpp:244]     Train net output #1: loss = 0.00124161 (* 1 = 0.00124161 loss)\nI0819 17:29:01.774104 17344 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0819 17:31:20.182092 17344 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0819 17:32:42.921602 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83756\nI0819 17:32:42.921969 17344 solver.cpp:404]     Test net output #1: loss = 1.04876 (* 1 = 1.04876 loss)\nI0819 17:32:44.263010 17344 solver.cpp:228] Iteration 79100, loss = 0.00115807\nI0819 17:32:44.263046 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:32:44.263061 17344 solver.cpp:244]     Train net output #1: loss = 0.00115752 (* 1 = 0.00115752 loss)\nI0819 17:32:44.331862 17344 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0819 17:35:02.733770 17344 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0819 17:36:25.476264 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83708\nI0819 17:36:25.476627 17344 solver.cpp:404]     Test net output #1: loss = 1.04826 (* 1 = 1.04826 loss)\nI0819 17:36:26.818809 17344 solver.cpp:228] Iteration 79200, loss = 0.000932926\nI0819 17:36:26.818847 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:36:26.818863 17344 solver.cpp:244]     Train net output #1: loss = 0.000932368 (* 1 = 0.000932368 loss)\nI0819 17:36:26.888404 17344 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0819 17:38:45.432411 17344 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0819 17:40:08.162322 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8384\nI0819 17:40:08.162663 17344 solver.cpp:404]     Test net output #1: loss = 1.04394 (* 1 = 1.04394 loss)\nI0819 17:40:09.504765 17344 solver.cpp:228] Iteration 79300, loss = 0.00147989\nI0819 17:40:09.504801 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:40:09.504814 17344 solver.cpp:244]     Train net output #1: loss = 0.00147933 (* 1 = 0.00147933 loss)\nI0819 17:40:09.570698 17344 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0819 17:42:28.020036 17344 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0819 17:43:50.763573 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI0819 17:43:50.763947 17344 solver.cpp:404]     Test net output #1: loss = 1.04759 (* 1 = 1.04759 loss)\nI0819 17:43:52.106963 17344 solver.cpp:228] Iteration 79400, loss = 0.00151453\nI0819 17:43:52.106997 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:43:52.107012 17344 solver.cpp:244]     Train net output #1: loss = 0.00151397 (* 1 = 0.00151397 loss)\nI0819 17:43:52.169536 17344 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0819 17:46:10.583890 17344 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0819 17:47:33.328377 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8386\nI0819 17:47:33.328742 17344 solver.cpp:404]     Test net output #1: loss = 1.04558 (* 1 = 1.04558 loss)\nI0819 17:47:34.670404 17344 solver.cpp:228] Iteration 79500, loss = 0.00128705\nI0819 17:47:34.670437 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:47:34.670452 17344 solver.cpp:244]     Train net output #1: loss = 0.00128649 (* 1 = 0.00128649 loss)\nI0819 17:47:34.735944 17344 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0819 17:49:53.038748 17344 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0819 17:51:15.773491 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8372\nI0819 17:51:15.773852 17344 solver.cpp:404]     Test net output #1: loss = 1.05176 (* 1 = 1.05176 loss)\nI0819 17:51:17.114682 17344 solver.cpp:228] Iteration 79600, loss = 0.00138525\nI0819 17:51:17.114715 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:51:17.114729 17344 solver.cpp:244]     Train net output #1: loss = 0.00138469 (* 1 = 0.00138469 loss)\nI0819 17:51:17.179419 17344 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0819 17:53:35.526821 17344 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0819 17:54:58.260892 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83716\nI0819 17:54:58.261237 17344 solver.cpp:404]     Test net output #1: loss = 1.05073 (* 1 = 1.05073 loss)\nI0819 17:54:59.602934 17344 solver.cpp:228] Iteration 79700, loss = 0.00145211\nI0819 17:54:59.602967 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:54:59.602982 17344 solver.cpp:244]     Train net output #1: loss = 0.00145155 (* 1 = 0.00145155 loss)\nI0819 17:54:59.664922 17344 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0819 17:57:17.966358 17344 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0819 17:58:40.707092 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI0819 17:58:40.707448 17344 solver.cpp:404]     Test net output #1: loss = 1.05173 (* 1 = 1.05173 loss)\nI0819 17:58:42.049398 17344 solver.cpp:228] Iteration 79800, loss = 0.00130701\nI0819 17:58:42.049430 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:58:42.049445 17344 solver.cpp:244]     Train net output #1: loss = 0.00130646 (* 1 = 0.00130646 loss)\nI0819 17:58:42.112510 17344 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0819 18:01:00.483160 17344 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0819 18:02:23.228641 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83744\nI0819 18:02:23.229020 17344 solver.cpp:404]     Test net output #1: loss = 1.05083 (* 1 = 1.05083 loss)\nI0819 18:02:24.570314 17344 solver.cpp:228] Iteration 79900, loss = 0.00139012\nI0819 18:02:24.570346 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 18:02:24.570361 17344 solver.cpp:244]     Train net output #1: loss = 0.00138956 (* 1 = 0.00138956 loss)\nI0819 18:02:24.636102 17344 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0819 18:04:43.084895 17344 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35AdaGradFig9_iter_80000.caffemodel\nI0819 18:04:43.359634 17344 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35AdaGradFig9_iter_80000.solverstate\nI0819 18:04:43.812196 17344 solver.cpp:317] Iteration 80000, loss = 0.00108621\nI0819 18:04:43.812234 17344 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0819 18:06:06.549095 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83736\nI0819 18:06:06.549473 17344 solver.cpp:404]     Test net output #1: loss = 1.04974 (* 1 = 1.04974 loss)\nI0819 18:06:06.549485 17344 solver.cpp:322] Optimization Done.\nI0819 18:06:11.926137 17344 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35AdamFig9",
    "content": "I0817 16:28:31.169255 17344 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:28:31.171392 17344 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:28:31.172585 17344 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:28:31.173780 17344 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:28:31.175487 17344 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:28:31.176707 17344 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:28:31.178196 17344 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:28:31.179405 17344 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:28:31.180608 17344 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:28:31.595432 17344 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.0035\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35AdamFig9\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\ntype: \"Adam\"\nI0817 16:28:31.598819 17344 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:28:31.610014 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:31.610079 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:31.611119 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:28:31.611169 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:28:31.611181 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:28:31.611189 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:28:31.611203 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:28:31.611212 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:28:31.611220 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:28:31.611229 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:28:31.611239 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:28:31.611248 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:28:31.611258 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:28:31.611265 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:28:31.611274 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:28:31.611284 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:28:31.611292 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:28:31.611301 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:28:31.611310 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:28:31.611318 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:28:31.611328 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:28:31.611336 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:28:31.611357 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:28:31.611366 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:28:31.611379 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:28:31.611388 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:28:31.611397 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:28:31.611405 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:28:31.611414 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:28:31.611423 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:28:31.611430 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:28:31.611439 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:28:31.611449 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:28:31.611456 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:28:31.611465 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:28:31.611474 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:28:31.611482 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:28:31.611490 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:28:31.611500 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:28:31.611508 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:28:31.611516 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:28:31.611526 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:28:31.611537 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:28:31.611546 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:28:31.611553 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:28:31.611562 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:28:31.611572 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:28:31.611579 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:28:31.611588 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:28:31.611595 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:28:31.611605 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:28:31.611613 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:28:31.611621 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:28:31.611637 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:28:31.611646 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:28:31.611654 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:28:31.611665 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:28:31.611671 17344 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:28:31.613404 17344 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\nI0817 16:28:31.615329 17344 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:28:31.617028 17344 net.cpp:100] Creating Layer dataLayer\nI0817 16:28:31.617089 17344 net.cpp:408] dataLayer -> data_top\nI0817 16:28:31.617274 17344 net.cpp:408] dataLayer -> label\nI0817 16:28:31.617362 17344 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:28:31.626590 17349 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:28:31.649118 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:31.656270 17344 net.cpp:150] Setting up dataLayer\nI0817 16:28:31.656332 17344 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:28:31.656344 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:31.656350 17344 net.cpp:165] Memory required for data: 1536500\nI0817 16:28:31.656365 17344 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:28:31.656379 17344 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:28:31.656388 17344 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:28:31.656410 17344 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:28:31.656425 17344 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:28:31.656496 17344 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:28:31.656512 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:31.656518 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:31.656523 17344 net.cpp:165] Memory required for data: 1537500\nI0817 16:28:31.656528 17344 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:28:31.656590 17344 net.cpp:100] Creating Layer pre_conv\nI0817 16:28:31.656601 17344 net.cpp:434] pre_conv <- data_top\nI0817 16:28:31.656611 17344 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:28:31.658330 17344 net.cpp:150] Setting up pre_conv\nI0817 16:28:31.658350 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.658356 17344 net.cpp:165] Memory required for data: 9729500\nI0817 16:28:31.658423 17344 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:28:31.658491 17344 net.cpp:100] Creating Layer pre_bn\nI0817 16:28:31.658504 17344 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:28:31.658512 17344 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:28:31.658937 17350 blocking_queue.cpp:50] Waiting for data\nI0817 16:28:31.659061 17344 net.cpp:150] Setting up pre_bn\nI0817 16:28:31.659082 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.659087 17344 net.cpp:165] Memory required for data: 17921500\nI0817 16:28:31.659104 17344 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:28:31.659150 17344 net.cpp:100] Creating Layer pre_scale\nI0817 16:28:31.659160 17344 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:28:31.659173 17344 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:28:31.659340 17344 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:28:31.659595 17344 net.cpp:150] Setting up pre_scale\nI0817 16:28:31.659610 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.659615 17344 net.cpp:165] Memory required for data: 26113500\nI0817 16:28:31.659626 17344 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:28:31.659668 17344 net.cpp:100] Creating Layer pre_relu\nI0817 16:28:31.659684 17344 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:28:31.659693 17344 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:28:31.659703 17344 net.cpp:150] Setting up pre_relu\nI0817 16:28:31.659713 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.659716 17344 net.cpp:165] Memory required for data: 34305500\nI0817 16:28:31.659721 17344 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:28:31.659734 17344 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:28:31.659739 17344 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:28:31.659752 17344 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:28:31.659765 17344 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:28:31.659811 17344 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:28:31.659823 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.659831 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.659835 17344 net.cpp:165] Memory required for data: 50689500\nI0817 16:28:31.659840 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:28:31.659860 17344 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:28:31.659865 17344 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:28:31.659879 17344 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:28:31.660192 17344 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:28:31.660205 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.660210 17344 net.cpp:165] Memory required for data: 58881500\nI0817 16:28:31.660223 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:28:31.660236 17344 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:28:31.660243 17344 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:28:31.660250 17344 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:28:31.660480 17344 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:28:31.660496 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.660501 17344 net.cpp:165] Memory required for data: 67073500\nI0817 16:28:31.660512 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:28:31.660521 17344 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:28:31.660526 17344 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:28:31.660533 17344 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.660584 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:28:31.660728 17344 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:28:31.660742 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.660748 17344 net.cpp:165] Memory required for data: 75265500\nI0817 16:28:31.660756 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:28:31.660774 17344 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:28:31.660781 17344 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:28:31.660789 17344 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.660797 17344 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:28:31.660804 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.660809 17344 net.cpp:165] Memory required for data: 83457500\nI0817 16:28:31.660814 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:28:31.660828 17344 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:28:31.660835 17344 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:28:31.660845 17344 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:28:31.661145 17344 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:28:31.661159 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.661164 17344 net.cpp:165] Memory required for data: 91649500\nI0817 16:28:31.661178 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:28:31.661187 17344 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:28:31.661193 17344 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:28:31.661204 17344 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:28:31.661440 17344 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:28:31.661453 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.661458 17344 net.cpp:165] Memory required for data: 99841500\nI0817 16:28:31.661475 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:28:31.661485 17344 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:28:31.661490 17344 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:28:31.661499 17344 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:28:31.661551 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:28:31.661691 17344 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:28:31.661705 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.661710 17344 net.cpp:165] Memory required for data: 108033500\nI0817 16:28:31.661720 17344 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:28:31.661772 17344 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:28:31.661783 17344 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:28:31.661792 17344 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:28:31.661801 17344 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:28:31.661871 17344 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:28:31.661885 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.661891 17344 net.cpp:165] Memory required for data: 116225500\nI0817 16:28:31.661897 17344 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:28:31.661908 17344 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:28:31.661914 17344 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:28:31.661922 17344 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:28:31.661931 17344 net.cpp:150] Setting up L1_b1_relu\nI0817 16:28:31.661938 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.661943 17344 net.cpp:165] Memory required for data: 124417500\nI0817 16:28:31.661947 17344 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:31.661957 17344 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:31.661962 17344 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:28:31.661969 17344 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:28:31.661978 17344 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:28:31.662021 17344 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:31.662034 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.662047 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.662052 17344 net.cpp:165] Memory required for data: 140801500\nI0817 16:28:31.662057 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:28:31.662071 17344 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:28:31.662078 17344 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:28:31.662087 17344 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:28:31.662391 17344 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:28:31.662405 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.662410 17344 net.cpp:165] Memory required for data: 148993500\nI0817 16:28:31.662420 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:28:31.662432 17344 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:28:31.662438 17344 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:28:31.662449 17344 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:28:31.662693 17344 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:28:31.662706 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.662711 17344 net.cpp:165] Memory required for data: 157185500\nI0817 16:28:31.662722 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:28:31.662736 17344 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:28:31.662742 17344 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:28:31.662750 17344 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.662804 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:28:31.662938 17344 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:28:31.662951 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.662956 17344 net.cpp:165] Memory required for data: 165377500\nI0817 16:28:31.662966 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:28:31.662981 17344 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:28:31.662986 17344 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:28:31.662993 17344 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.663002 17344 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:28:31.663009 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.663014 17344 net.cpp:165] Memory required for data: 173569500\nI0817 16:28:31.663019 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:28:31.663033 17344 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:28:31.663038 17344 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:28:31.663049 17344 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:28:31.663353 17344 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:28:31.663367 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.663372 17344 net.cpp:165] Memory required for data: 181761500\nI0817 16:28:31.663381 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:28:31.663393 17344 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:28:31.663399 17344 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:28:31.663408 17344 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:28:31.663643 17344 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:28:31.663656 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.663661 17344 net.cpp:165] Memory required for data: 189953500\nI0817 16:28:31.663681 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:28:31.663694 17344 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:28:31.663700 17344 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:28:31.663707 17344 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:28:31.663763 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:28:31.663898 17344 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:28:31.663911 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.663916 17344 net.cpp:165] Memory required for data: 198145500\nI0817 16:28:31.663925 17344 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:28:31.663942 17344 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:28:31.663949 17344 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:28:31.663955 17344 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:28:31.663965 17344 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:28:31.663996 17344 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:28:31.664005 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.664010 17344 net.cpp:165] Memory required for data: 206337500\nI0817 16:28:31.664014 17344 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:28:31.664021 17344 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:28:31.664026 17344 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:28:31.664033 17344 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:28:31.664042 17344 net.cpp:150] Setting up L1_b2_relu\nI0817 16:28:31.664048 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.664053 17344 net.cpp:165] Memory required for data: 214529500\nI0817 16:28:31.664057 17344 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:31.664067 17344 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:31.664072 17344 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:28:31.664080 17344 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:28:31.664088 17344 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:28:31.664131 17344 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:31.664142 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.664149 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.664153 17344 net.cpp:165] Memory required for data: 230913500\nI0817 16:28:31.664160 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:28:31.664170 17344 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:28:31.664175 17344 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:28:31.664192 17344 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:28:31.664510 17344 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:28:31.664523 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.664528 17344 net.cpp:165] Memory required for data: 239105500\nI0817 16:28:31.664537 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:28:31.664546 17344 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:28:31.664552 17344 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:28:31.664561 17344 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:28:31.664804 17344 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:28:31.664819 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.664824 17344 net.cpp:165] Memory required for data: 247297500\nI0817 16:28:31.664834 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:28:31.664844 17344 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:28:31.664850 17344 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:28:31.664858 17344 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.664914 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:28:31.665051 17344 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:28:31.665065 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.665069 17344 net.cpp:165] Memory required for data: 255489500\nI0817 16:28:31.665078 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:28:31.665086 17344 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:28:31.665091 17344 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:28:31.665102 17344 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.665110 17344 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:28:31.665124 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.665129 17344 net.cpp:165] Memory required for data: 263681500\nI0817 16:28:31.665133 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:28:31.665148 17344 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:28:31.665153 17344 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:28:31.665161 17344 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:28:31.665464 17344 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:28:31.665477 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.665482 17344 net.cpp:165] Memory required for data: 271873500\nI0817 16:28:31.665491 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:28:31.665505 17344 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:28:31.665511 17344 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:28:31.665522 17344 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:28:31.665765 17344 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:28:31.665778 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.665783 17344 net.cpp:165] Memory required for data: 280065500\nI0817 16:28:31.665793 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:28:31.665807 17344 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:28:31.665813 17344 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:28:31.665822 17344 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:28:31.665873 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:28:31.666007 17344 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:28:31.666019 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.666024 17344 net.cpp:165] Memory required for data: 288257500\nI0817 16:28:31.666033 17344 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:28:31.666043 17344 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:28:31.666048 17344 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:28:31.666057 17344 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:28:31.666065 17344 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:28:31.666098 17344 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:28:31.666107 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.666111 17344 net.cpp:165] Memory required for data: 296449500\nI0817 16:28:31.666116 17344 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:28:31.666124 17344 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:28:31.666129 17344 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:28:31.666138 17344 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:28:31.666147 17344 net.cpp:150] Setting up L1_b3_relu\nI0817 16:28:31.666154 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.666159 17344 net.cpp:165] Memory required for data: 304641500\nI0817 16:28:31.666164 17344 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:31.666172 17344 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:31.666177 17344 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:28:31.666183 17344 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:28:31.666191 17344 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:28:31.666236 17344 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:31.666247 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.666254 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.666259 17344 net.cpp:165] Memory required for data: 321025500\nI0817 16:28:31.666263 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:28:31.666275 17344 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:28:31.666280 17344 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:28:31.666298 17344 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:28:31.666607 17344 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:28:31.666621 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.666626 17344 net.cpp:165] Memory required for data: 329217500\nI0817 16:28:31.666635 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:28:31.666643 17344 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:28:31.666649 17344 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:28:31.666661 17344 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:28:31.666904 17344 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:28:31.666918 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.666923 17344 net.cpp:165] Memory required for data: 337409500\nI0817 16:28:31.666934 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:28:31.666944 17344 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:28:31.666950 17344 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:28:31.666959 17344 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.667011 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:28:31.667150 17344 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:28:31.667162 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.667167 17344 net.cpp:165] Memory required for data: 345601500\nI0817 16:28:31.667176 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:28:31.667189 17344 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:28:31.667196 17344 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:28:31.667202 17344 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.667212 17344 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:28:31.667218 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.667222 17344 net.cpp:165] Memory required for data: 353793500\nI0817 16:28:31.667227 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:28:31.667242 17344 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:28:31.667246 17344 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:28:31.667258 17344 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:28:31.667563 17344 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:28:31.667577 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.667582 17344 net.cpp:165] Memory required for data: 361985500\nI0817 16:28:31.667592 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:28:31.667599 17344 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:28:31.667605 17344 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:28:31.667613 17344 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:28:31.667866 17344 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:28:31.667881 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.667886 17344 net.cpp:165] Memory required for data: 370177500\nI0817 16:28:31.667898 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:28:31.667907 17344 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:28:31.667913 17344 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:28:31.667922 17344 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:28:31.667974 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:28:31.668112 17344 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:28:31.668125 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.668130 17344 net.cpp:165] Memory required for data: 378369500\nI0817 16:28:31.668139 17344 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:28:31.668150 17344 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:28:31.668156 17344 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:28:31.668164 17344 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:28:31.668170 17344 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:28:31.668212 17344 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:28:31.668222 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.668226 17344 net.cpp:165] Memory required for data: 386561500\nI0817 16:28:31.668231 17344 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:28:31.668239 17344 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:28:31.668244 17344 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:28:31.668254 17344 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:28:31.668263 17344 net.cpp:150] Setting up L1_b4_relu\nI0817 16:28:31.668270 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.668274 17344 net.cpp:165] Memory required for data: 394753500\nI0817 16:28:31.668279 17344 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:31.668287 17344 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:31.668292 17344 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:28:31.668298 17344 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:28:31.668308 17344 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:28:31.668350 17344 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:31.668362 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.668368 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.668373 17344 net.cpp:165] Memory required for data: 411137500\nI0817 16:28:31.668378 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:28:31.668391 17344 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:28:31.668398 17344 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:28:31.668406 17344 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:28:31.668720 17344 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:28:31.668735 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.668740 17344 net.cpp:165] Memory required for data: 419329500\nI0817 16:28:31.668763 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:28:31.668776 17344 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:28:31.668782 17344 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:28:31.668790 17344 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:28:31.669030 17344 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:28:31.669042 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.669047 17344 net.cpp:165] Memory required for data: 427521500\nI0817 16:28:31.669059 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:28:31.669070 17344 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:28:31.669075 17344 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:28:31.669083 17344 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.669134 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:28:31.669272 17344 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:28:31.669286 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.669289 17344 net.cpp:165] Memory required for data: 435713500\nI0817 16:28:31.669298 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:28:31.669308 17344 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:28:31.669314 17344 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:28:31.669322 17344 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.669330 17344 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:28:31.669337 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.669342 17344 net.cpp:165] Memory required for data: 443905500\nI0817 16:28:31.669347 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:28:31.669359 17344 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:28:31.669373 17344 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:28:31.669384 17344 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:28:31.669698 17344 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:28:31.669713 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.669718 17344 net.cpp:165] Memory required for data: 452097500\nI0817 16:28:31.669726 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:28:31.669735 17344 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:28:31.669741 17344 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:28:31.669749 17344 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:28:31.669984 17344 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:28:31.669997 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.670001 17344 net.cpp:165] Memory required for data: 460289500\nI0817 16:28:31.670011 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:28:31.670023 17344 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:28:31.670029 17344 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:28:31.670037 17344 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:28:31.670094 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:28:31.670230 17344 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:28:31.670243 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.670248 17344 net.cpp:165] Memory required for data: 468481500\nI0817 16:28:31.670256 17344 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:28:31.670265 17344 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:28:31.670271 17344 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:28:31.670279 17344 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:28:31.670289 17344 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:28:31.670321 17344 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:28:31.670331 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.670336 17344 net.cpp:165] Memory required for data: 476673500\nI0817 16:28:31.670341 17344 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:28:31.670347 17344 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:28:31.670353 17344 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:28:31.670362 17344 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:28:31.670372 17344 net.cpp:150] Setting up L1_b5_relu\nI0817 16:28:31.670378 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.670382 17344 net.cpp:165] Memory required for data: 484865500\nI0817 16:28:31.670387 17344 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:31.670394 17344 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:31.670399 17344 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:28:31.670406 17344 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:28:31.670415 17344 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:28:31.670459 17344 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:31.670471 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.670477 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.670482 17344 net.cpp:165] Memory required for data: 501249500\nI0817 16:28:31.670487 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:28:31.670497 17344 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:28:31.670503 17344 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:28:31.670514 17344 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:28:31.670830 17344 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:28:31.670843 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.670848 17344 net.cpp:165] Memory required for data: 509441500\nI0817 16:28:31.670864 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:28:31.670874 17344 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:28:31.670879 17344 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:28:31.670891 17344 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:28:31.671130 17344 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:28:31.671144 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.671149 17344 net.cpp:165] Memory required for data: 517633500\nI0817 16:28:31.671159 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:28:31.671170 17344 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:28:31.671176 17344 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:28:31.671183 17344 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.671236 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:28:31.671377 17344 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:28:31.671389 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.671394 17344 net.cpp:165] Memory required for data: 525825500\nI0817 16:28:31.671403 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:28:31.671411 17344 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:28:31.671416 17344 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:28:31.671427 17344 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.671435 17344 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:28:31.671442 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.671447 17344 net.cpp:165] Memory required for data: 534017500\nI0817 16:28:31.671452 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:28:31.671468 17344 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:28:31.671473 17344 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:28:31.671484 17344 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:28:31.671803 17344 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:28:31.671818 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.671823 17344 net.cpp:165] Memory required for data: 542209500\nI0817 16:28:31.671831 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:28:31.671839 17344 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:28:31.671845 17344 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:28:31.671854 17344 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:28:31.672091 17344 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:28:31.672102 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.672107 17344 net.cpp:165] Memory required for data: 550401500\nI0817 16:28:31.672117 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:28:31.672128 17344 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:28:31.672134 17344 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:28:31.672142 17344 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:28:31.672196 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:28:31.672338 17344 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:28:31.672349 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.672354 17344 net.cpp:165] Memory required for data: 558593500\nI0817 16:28:31.672363 17344 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:28:31.672380 17344 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:28:31.672386 17344 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:28:31.672394 17344 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:28:31.672404 17344 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:28:31.672435 17344 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:28:31.672446 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.672451 17344 net.cpp:165] Memory required for data: 566785500\nI0817 16:28:31.672456 17344 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:28:31.672474 17344 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:28:31.672482 17344 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:28:31.672488 17344 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:28:31.672497 17344 net.cpp:150] Setting up L1_b6_relu\nI0817 16:28:31.672504 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.672508 17344 net.cpp:165] Memory required for data: 574977500\nI0817 16:28:31.672513 17344 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:31.672520 17344 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:31.672525 17344 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:28:31.672533 17344 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:28:31.672541 17344 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:28:31.672586 17344 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:31.672598 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.672605 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.672610 17344 net.cpp:165] Memory required for data: 591361500\nI0817 16:28:31.672614 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:28:31.672627 17344 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:28:31.672634 17344 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:28:31.672642 17344 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:28:31.672962 17344 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:28:31.672977 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.672982 17344 net.cpp:165] Memory required for data: 599553500\nI0817 16:28:31.672991 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:28:31.673004 17344 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:28:31.673010 17344 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:28:31.673019 17344 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:28:31.673261 17344 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:28:31.673274 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.673279 17344 net.cpp:165] Memory required for data: 607745500\nI0817 16:28:31.673290 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:28:31.673297 17344 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:28:31.673302 17344 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:28:31.673310 17344 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.673364 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:28:31.673503 17344 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:28:31.673516 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.673521 17344 net.cpp:165] Memory required for data: 615937500\nI0817 16:28:31.673529 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:28:31.673537 17344 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:28:31.673542 17344 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:28:31.673553 17344 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.673563 17344 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:28:31.673569 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.673574 17344 net.cpp:165] Memory required for data: 624129500\nI0817 16:28:31.673578 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:28:31.673589 17344 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:28:31.673594 17344 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:28:31.673605 17344 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:28:31.673923 17344 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:28:31.673936 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.673941 17344 net.cpp:165] Memory required for data: 632321500\nI0817 16:28:31.673957 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:28:31.673966 17344 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:28:31.673972 17344 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:28:31.673984 17344 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:28:31.674222 17344 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:28:31.674238 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.674243 17344 net.cpp:165] Memory required for data: 640513500\nI0817 16:28:31.674253 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:28:31.674262 17344 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:28:31.674268 17344 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:28:31.674274 17344 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:28:31.674329 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:28:31.674470 17344 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:28:31.674484 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.674489 17344 net.cpp:165] Memory required for data: 648705500\nI0817 16:28:31.674496 17344 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:28:31.674510 17344 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:28:31.674516 17344 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:28:31.674523 17344 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:28:31.674533 17344 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:28:31.674564 17344 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:28:31.674573 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.674578 17344 net.cpp:165] Memory required for data: 656897500\nI0817 16:28:31.674583 17344 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:28:31.674594 17344 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:28:31.674600 17344 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:28:31.674607 17344 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:28:31.674615 17344 net.cpp:150] Setting up L1_b7_relu\nI0817 16:28:31.674623 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.674626 17344 net.cpp:165] Memory required for data: 665089500\nI0817 16:28:31.674631 17344 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:31.674638 17344 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:31.674643 17344 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:28:31.674650 17344 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:28:31.674659 17344 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:28:31.674711 17344 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:31.674723 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.674731 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.674736 17344 net.cpp:165] Memory required for data: 681473500\nI0817 16:28:31.674739 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:28:31.674758 17344 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:28:31.674764 17344 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:28:31.674773 17344 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:28:31.675087 17344 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:28:31.675101 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.675107 17344 net.cpp:165] Memory required for data: 689665500\nI0817 16:28:31.675115 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:28:31.675128 17344 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:28:31.675134 17344 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:28:31.675143 17344 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:28:31.675392 17344 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:28:31.675408 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.675413 17344 net.cpp:165] Memory required for data: 697857500\nI0817 16:28:31.675423 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:28:31.675432 17344 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:28:31.675438 17344 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:28:31.675446 17344 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.675498 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:28:31.675639 17344 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:28:31.675652 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.675657 17344 net.cpp:165] Memory required for data: 706049500\nI0817 16:28:31.675665 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:28:31.675683 17344 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:28:31.675689 17344 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:28:31.675699 17344 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.675709 17344 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:28:31.675716 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.675720 17344 net.cpp:165] Memory required for data: 714241500\nI0817 16:28:31.675725 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:28:31.675736 17344 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:28:31.675741 17344 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:28:31.675757 17344 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:28:31.676102 17344 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:28:31.676117 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.676122 17344 net.cpp:165] Memory required for data: 722433500\nI0817 16:28:31.676131 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:28:31.676139 17344 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:28:31.676146 17344 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:28:31.676156 17344 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:28:31.676404 17344 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:28:31.676417 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.676422 17344 net.cpp:165] Memory required for data: 730625500\nI0817 16:28:31.676434 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:28:31.676445 17344 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:28:31.676450 17344 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:28:31.676457 17344 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:28:31.676511 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:28:31.676673 17344 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:28:31.676694 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.676699 17344 net.cpp:165] Memory required for data: 738817500\nI0817 16:28:31.676708 17344 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:28:31.676726 17344 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:28:31.676733 17344 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:28:31.676739 17344 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:28:31.676748 17344 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:28:31.676781 17344 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:28:31.676795 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.676798 17344 net.cpp:165] Memory required for data: 747009500\nI0817 16:28:31.676803 17344 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:28:31.676811 17344 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:28:31.676817 17344 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:28:31.676827 17344 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:28:31.676836 17344 net.cpp:150] Setting up L1_b8_relu\nI0817 16:28:31.676851 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.676856 17344 net.cpp:165] Memory required for data: 755201500\nI0817 16:28:31.676859 17344 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:31.676867 17344 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:31.676872 17344 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:28:31.676879 17344 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:28:31.676889 17344 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:28:31.676935 17344 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:31.676951 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.676959 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.676964 17344 net.cpp:165] Memory required for data: 771585500\nI0817 16:28:31.676968 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:28:31.676981 17344 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:28:31.676988 17344 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:28:31.676997 17344 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:28:31.677320 17344 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:28:31.677336 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.677341 17344 net.cpp:165] Memory required for data: 779777500\nI0817 16:28:31.677350 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:28:31.677359 17344 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:28:31.677368 17344 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:28:31.677376 17344 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:28:31.677624 17344 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:28:31.677637 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.677642 17344 net.cpp:165] Memory required for data: 787969500\nI0817 16:28:31.677652 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:28:31.677661 17344 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:28:31.677666 17344 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:28:31.677683 17344 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.677741 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:28:31.677884 17344 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:28:31.677901 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.677906 17344 net.cpp:165] Memory required for data: 796161500\nI0817 16:28:31.677914 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:28:31.677922 17344 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:28:31.677927 17344 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:28:31.677934 17344 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.677943 17344 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:28:31.677950 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.677954 17344 net.cpp:165] Memory required for data: 804353500\nI0817 16:28:31.677959 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:28:31.677973 17344 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:28:31.677978 17344 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:28:31.677989 17344 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:28:31.678313 17344 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:28:31.678328 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.678333 17344 net.cpp:165] Memory required for data: 812545500\nI0817 16:28:31.678340 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:28:31.678352 17344 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:28:31.678359 17344 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:28:31.678369 17344 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:28:31.678618 17344 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:28:31.678632 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.678637 17344 net.cpp:165] Memory required for data: 820737500\nI0817 16:28:31.678668 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:28:31.678683 17344 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:28:31.678689 17344 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:28:31.678700 17344 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:28:31.678753 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:28:31.678900 17344 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:28:31.678912 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.678917 17344 net.cpp:165] Memory required for data: 828929500\nI0817 16:28:31.678925 17344 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:28:31.678938 17344 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:28:31.678944 17344 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:28:31.678951 17344 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:28:31.678958 17344 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:28:31.678989 17344 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:28:31.678998 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.679003 17344 net.cpp:165] Memory required for data: 837121500\nI0817 16:28:31.679008 17344 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:28:31.679018 17344 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:28:31.679024 17344 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:28:31.679031 17344 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:28:31.679040 17344 net.cpp:150] Setting up L1_b9_relu\nI0817 16:28:31.679046 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.679051 17344 net.cpp:165] Memory required for data: 845313500\nI0817 16:28:31.679056 17344 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:31.679066 17344 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:31.679072 17344 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:28:31.679080 17344 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:28:31.679088 17344 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:28:31.679134 17344 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:31.679145 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.679152 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.679157 17344 net.cpp:165] Memory required for data: 861697500\nI0817 16:28:31.679162 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:28:31.679175 17344 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:28:31.679181 17344 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:28:31.679190 17344 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:28:31.679508 17344 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:28:31.679522 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.679527 17344 net.cpp:165] Memory required for data: 863745500\nI0817 16:28:31.679536 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:28:31.679548 17344 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:28:31.679554 17344 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:28:31.679564 17344 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:28:31.679813 17344 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:28:31.679827 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.679832 17344 net.cpp:165] Memory required for data: 865793500\nI0817 16:28:31.679842 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:28:31.679858 17344 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:28:31.679864 17344 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:28:31.679872 17344 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.679929 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:28:31.680068 17344 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:28:31.680080 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.680085 17344 net.cpp:165] Memory required for data: 867841500\nI0817 16:28:31.680094 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:28:31.680101 17344 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:28:31.680110 17344 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:28:31.680119 17344 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.680127 17344 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:28:31.680135 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.680138 17344 net.cpp:165] Memory required for data: 869889500\nI0817 16:28:31.680143 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:28:31.680157 17344 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:28:31.680163 17344 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:28:31.680171 17344 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:28:31.680492 17344 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:28:31.680505 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.680510 17344 net.cpp:165] Memory required for data: 871937500\nI0817 16:28:31.680519 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:28:31.680531 17344 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:28:31.680536 17344 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:28:31.680546 17344 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:28:31.680796 17344 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:28:31.680812 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.680817 17344 net.cpp:165] Memory required for data: 873985500\nI0817 16:28:31.680829 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:28:31.680836 17344 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:28:31.680842 17344 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:28:31.680850 17344 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:28:31.680903 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:28:31.681051 17344 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:28:31.681062 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.681067 17344 net.cpp:165] Memory required for data: 876033500\nI0817 16:28:31.681077 17344 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:28:31.681087 17344 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:28:31.681092 17344 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:28:31.681103 17344 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:28:31.681186 17344 net.cpp:150] Setting up L2_b1_pool\nI0817 16:28:31.681201 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.681206 17344 net.cpp:165] Memory required for data: 878081500\nI0817 16:28:31.681211 17344 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:28:31.681226 17344 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:28:31.681231 17344 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:28:31.681238 17344 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:28:31.681246 17344 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:28:31.681278 17344 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:28:31.681288 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.681293 17344 net.cpp:165] Memory required for data: 880129500\nI0817 16:28:31.681298 17344 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:28:31.681304 17344 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:28:31.681310 17344 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:28:31.681327 17344 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:28:31.681337 17344 net.cpp:150] Setting up L2_b1_relu\nI0817 16:28:31.681344 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.681349 17344 net.cpp:165] Memory required for data: 882177500\nI0817 16:28:31.681354 17344 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:28:31.681401 17344 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:28:31.681413 17344 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:28:31.683738 17344 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:28:31.683756 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.683763 17344 net.cpp:165] Memory required for data: 884225500\nI0817 16:28:31.683768 17344 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:28:31.683781 17344 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:28:31.683787 17344 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:28:31.683795 17344 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:28:31.683802 17344 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:28:31.683882 17344 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:28:31.683897 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.683902 17344 net.cpp:165] Memory required for data: 888321500\nI0817 16:28:31.683908 17344 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:31.683917 17344 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:31.683923 17344 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:28:31.683933 17344 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:28:31.683943 17344 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:28:31.683995 17344 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:31.684010 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.684017 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.684021 17344 net.cpp:165] Memory required for data: 896513500\nI0817 16:28:31.684026 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:28:31.684039 17344 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:28:31.684046 17344 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:28:31.684054 17344 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:28:31.685696 17344 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:28:31.685714 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.685719 17344 net.cpp:165] Memory required for data: 900609500\nI0817 16:28:31.685729 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:28:31.685742 17344 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:28:31.685748 17344 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:28:31.685760 17344 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:28:31.686005 17344 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:28:31.686018 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.686023 17344 net.cpp:165] Memory required for data: 904705500\nI0817 16:28:31.686033 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:28:31.686043 17344 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:28:31.686049 17344 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:28:31.686056 17344 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.686113 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:28:31.686261 17344 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:28:31.686275 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.686280 17344 net.cpp:165] Memory required for data: 908801500\nI0817 16:28:31.686287 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:28:31.686298 17344 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:28:31.686305 17344 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:28:31.686321 17344 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.686331 17344 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:28:31.686337 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.686342 17344 net.cpp:165] Memory required for data: 912897500\nI0817 16:28:31.686347 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:28:31.686362 17344 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:28:31.686367 17344 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:28:31.686375 17344 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:28:31.686848 17344 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:28:31.686863 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.686868 17344 net.cpp:165] Memory required for data: 916993500\nI0817 16:28:31.686877 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:28:31.686888 17344 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:28:31.686895 17344 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:28:31.686903 17344 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:28:31.687150 17344 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:28:31.687165 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.687170 17344 net.cpp:165] Memory required for data: 921089500\nI0817 16:28:31.687180 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:28:31.687188 17344 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:28:31.687194 17344 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:28:31.687202 17344 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:28:31.687258 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:28:31.687403 17344 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:28:31.687415 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.687420 17344 net.cpp:165] Memory required for data: 925185500\nI0817 16:28:31.687429 17344 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:28:31.687438 17344 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:28:31.687443 17344 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:28:31.687450 17344 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:28:31.687461 17344 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:28:31.687487 17344 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:28:31.687496 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.687501 17344 net.cpp:165] Memory required for data: 929281500\nI0817 16:28:31.687506 17344 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:28:31.687516 17344 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:28:31.687522 17344 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:28:31.687551 17344 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:28:31.687562 17344 net.cpp:150] Setting up L2_b2_relu\nI0817 16:28:31.687569 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.687574 17344 net.cpp:165] Memory required for data: 933377500\nI0817 16:28:31.687579 17344 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:31.687587 17344 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:31.687592 17344 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:28:31.687599 17344 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:28:31.687608 17344 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:28:31.687657 17344 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:31.687669 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.687682 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.687688 17344 net.cpp:165] Memory required for data: 941569500\nI0817 16:28:31.687693 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:28:31.687714 17344 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:28:31.687721 17344 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:28:31.687731 17344 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:28:31.688192 17344 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:28:31.688206 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.688211 17344 net.cpp:165] Memory required for data: 945665500\nI0817 16:28:31.688220 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:28:31.688232 17344 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:28:31.688238 17344 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:28:31.688249 17344 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:28:31.688498 17344 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:28:31.688510 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.688515 17344 net.cpp:165] Memory required for data: 949761500\nI0817 16:28:31.688525 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:28:31.688534 17344 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:28:31.688539 17344 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:28:31.688547 17344 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.688604 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:28:31.688756 17344 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:28:31.688771 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.688776 17344 net.cpp:165] Memory required for data: 953857500\nI0817 16:28:31.688784 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:28:31.688791 17344 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:28:31.688797 17344 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:28:31.688807 17344 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.688817 17344 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:28:31.688824 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.688828 17344 net.cpp:165] Memory required for data: 957953500\nI0817 16:28:31.688833 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:28:31.688846 17344 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:28:31.688853 17344 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:28:31.688860 17344 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:28:31.689324 17344 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:28:31.689338 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.689343 17344 net.cpp:165] Memory required for data: 962049500\nI0817 16:28:31.689352 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:28:31.689363 17344 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:28:31.689369 17344 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:28:31.689378 17344 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:28:31.689627 17344 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:28:31.689642 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.689647 17344 net.cpp:165] Memory required for data: 966145500\nI0817 16:28:31.689658 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:28:31.689666 17344 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:28:31.689672 17344 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:28:31.689685 17344 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:28:31.689741 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:28:31.689895 17344 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:28:31.689908 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.689913 17344 net.cpp:165] Memory required for data: 970241500\nI0817 16:28:31.689921 17344 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:28:31.689930 17344 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:28:31.689936 17344 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:28:31.689950 17344 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:28:31.689962 17344 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:28:31.689990 17344 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:28:31.689999 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.690004 17344 net.cpp:165] Memory required for data: 974337500\nI0817 16:28:31.690009 17344 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:28:31.690032 17344 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:28:31.690037 17344 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:28:31.690044 17344 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:28:31.690054 17344 net.cpp:150] Setting up L2_b3_relu\nI0817 16:28:31.690062 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.690065 17344 net.cpp:165] Memory required for data: 978433500\nI0817 16:28:31.690070 17344 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:31.690078 17344 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:31.690083 17344 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:28:31.690090 17344 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:28:31.690099 17344 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:28:31.690147 17344 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:31.690160 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.690166 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.690171 17344 net.cpp:165] Memory required for data: 986625500\nI0817 16:28:31.690176 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:28:31.690187 17344 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:28:31.690193 17344 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:28:31.690204 17344 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:28:31.690666 17344 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:28:31.690686 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.690692 17344 net.cpp:165] Memory required for data: 990721500\nI0817 16:28:31.690701 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:28:31.690711 17344 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:28:31.690716 17344 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:28:31.690726 17344 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:28:31.690975 17344 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:28:31.690989 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.690994 17344 net.cpp:165] Memory required for data: 994817500\nI0817 16:28:31.691004 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:28:31.691015 17344 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:28:31.691020 17344 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:28:31.691028 17344 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.691084 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:28:31.691232 17344 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:28:31.691244 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.691249 17344 net.cpp:165] Memory required for data: 998913500\nI0817 16:28:31.691258 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:28:31.691269 17344 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:28:31.691275 17344 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:28:31.691282 17344 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.691292 17344 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:28:31.691298 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.691303 17344 net.cpp:165] Memory required for data: 1003009500\nI0817 16:28:31.691308 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:28:31.691329 17344 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:28:31.691335 17344 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:28:31.691345 17344 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:28:31.691815 17344 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:28:31.691830 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.691835 17344 net.cpp:165] Memory required for data: 1007105500\nI0817 16:28:31.691843 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:28:31.691853 17344 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:28:31.691859 17344 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:28:31.691869 17344 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:28:31.692116 17344 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:28:31.692129 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.692133 17344 net.cpp:165] Memory required for data: 1011201500\nI0817 16:28:31.692143 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:28:31.692154 17344 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:28:31.692160 17344 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:28:31.692168 17344 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:28:31.692221 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:28:31.692368 17344 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:28:31.692381 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.692385 17344 net.cpp:165] Memory required for data: 1015297500\nI0817 16:28:31.692394 17344 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:28:31.692404 17344 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:28:31.692409 17344 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:28:31.692418 17344 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:28:31.692426 17344 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:28:31.692452 17344 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:28:31.692464 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.692469 17344 net.cpp:165] Memory required for data: 1019393500\nI0817 16:28:31.692474 17344 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:28:31.692482 17344 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:28:31.692487 17344 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:28:31.692494 17344 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:28:31.692503 17344 net.cpp:150] Setting up L2_b4_relu\nI0817 16:28:31.692509 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.692514 17344 net.cpp:165] Memory required for data: 1023489500\nI0817 16:28:31.692518 17344 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:31.692528 17344 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:31.692533 17344 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:28:31.692541 17344 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:28:31.692550 17344 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:28:31.692596 17344 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:31.692607 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.692615 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.692618 17344 net.cpp:165] Memory required for data: 1031681500\nI0817 16:28:31.692623 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:28:31.692634 17344 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:28:31.692641 17344 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:28:31.692651 17344 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:28:31.693120 17344 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:28:31.693140 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.693145 17344 net.cpp:165] Memory required for data: 1035777500\nI0817 16:28:31.693155 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:28:31.693163 17344 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:28:31.693169 17344 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:28:31.693181 17344 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:28:31.693434 17344 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:28:31.693447 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.693452 17344 net.cpp:165] Memory required for data: 1039873500\nI0817 16:28:31.693462 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:28:31.693473 17344 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:28:31.693480 17344 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:28:31.693487 17344 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.693541 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:28:31.693694 17344 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:28:31.693708 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.693713 17344 net.cpp:165] Memory required for data: 1043969500\nI0817 16:28:31.693722 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:28:31.693732 17344 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:28:31.693738 17344 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:28:31.693745 17344 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.693755 17344 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:28:31.693761 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.693766 17344 net.cpp:165] Memory required for data: 1048065500\nI0817 16:28:31.693770 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:28:31.693784 17344 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:28:31.693790 17344 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:28:31.693800 17344 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:28:31.694258 17344 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:28:31.694272 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.694278 17344 net.cpp:165] Memory required for data: 1052161500\nI0817 16:28:31.694285 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:28:31.694294 17344 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:28:31.694300 17344 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:28:31.694308 17344 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:28:31.694553 17344 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:28:31.694566 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.694571 17344 net.cpp:165] Memory required for data: 1056257500\nI0817 16:28:31.694581 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:28:31.694589 17344 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:28:31.694595 17344 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:28:31.694605 17344 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:28:31.694660 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:28:31.694815 17344 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:28:31.694828 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.694833 17344 net.cpp:165] Memory required for data: 1060353500\nI0817 16:28:31.694842 17344 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:28:31.694851 17344 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:28:31.694857 17344 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:28:31.694864 17344 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:28:31.694875 17344 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:28:31.694901 17344 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:28:31.694916 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.694927 17344 net.cpp:165] Memory required for data: 1064449500\nI0817 16:28:31.694932 17344 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:28:31.694941 17344 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:28:31.694946 17344 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:28:31.694952 17344 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:28:31.694962 17344 net.cpp:150] Setting up L2_b5_relu\nI0817 16:28:31.694968 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.694973 17344 net.cpp:165] Memory required for data: 1068545500\nI0817 16:28:31.694978 17344 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:31.694988 17344 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:31.694993 17344 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:28:31.695000 17344 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:28:31.695009 17344 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:28:31.695056 17344 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:31.695068 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.695075 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.695080 17344 net.cpp:165] Memory required for data: 1076737500\nI0817 16:28:31.695085 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:28:31.695094 17344 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:28:31.695101 17344 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:28:31.695112 17344 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:28:31.695578 17344 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:28:31.695592 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.695597 17344 net.cpp:165] Memory required for data: 1080833500\nI0817 16:28:31.695605 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:28:31.695614 17344 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:28:31.695621 17344 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:28:31.695631 17344 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:28:31.695889 17344 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:28:31.695902 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.695907 17344 net.cpp:165] Memory required for data: 1084929500\nI0817 16:28:31.695917 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:28:31.695930 17344 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:28:31.695935 17344 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:28:31.695943 17344 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.695997 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:28:31.696142 17344 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:28:31.696156 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.696159 17344 net.cpp:165] Memory required for data: 1089025500\nI0817 16:28:31.696168 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:28:31.696177 17344 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:28:31.696182 17344 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:28:31.696192 17344 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.696202 17344 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:28:31.696208 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.696213 17344 net.cpp:165] Memory required for data: 1093121500\nI0817 16:28:31.696218 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:28:31.696233 17344 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:28:31.696238 17344 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:28:31.696249 17344 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:28:31.696710 17344 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:28:31.696730 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.696737 17344 net.cpp:165] Memory required for data: 1097217500\nI0817 16:28:31.696744 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:28:31.696753 17344 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:28:31.696759 17344 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:28:31.696768 17344 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:28:31.697019 17344 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:28:31.697032 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.697037 17344 net.cpp:165] Memory required for data: 1101313500\nI0817 16:28:31.697047 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:28:31.697057 17344 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:28:31.697062 17344 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:28:31.697072 17344 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:28:31.697126 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:28:31.697273 17344 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:28:31.697285 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.697290 17344 net.cpp:165] Memory required for data: 1105409500\nI0817 16:28:31.697299 17344 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:28:31.697309 17344 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:28:31.697314 17344 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:28:31.697320 17344 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:28:31.697331 17344 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:28:31.697357 17344 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:28:31.697366 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.697371 17344 net.cpp:165] Memory required for data: 1109505500\nI0817 16:28:31.697376 17344 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:28:31.697386 17344 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:28:31.697392 17344 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:28:31.697399 17344 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:28:31.697408 17344 net.cpp:150] Setting up L2_b6_relu\nI0817 16:28:31.697415 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.697419 17344 net.cpp:165] Memory required for data: 1113601500\nI0817 16:28:31.697424 17344 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:31.697433 17344 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:31.697439 17344 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:28:31.697446 17344 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:28:31.697455 17344 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:28:31.697499 17344 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:31.697513 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.697520 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.697525 17344 net.cpp:165] Memory required for data: 1121793500\nI0817 16:28:31.697530 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:28:31.697540 17344 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:28:31.697546 17344 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:28:31.697556 17344 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:28:31.698029 17344 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:28:31.698042 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.698047 17344 net.cpp:165] Memory required for data: 1125889500\nI0817 16:28:31.698056 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:28:31.698068 17344 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:28:31.698081 17344 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:28:31.698091 17344 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:28:31.698343 17344 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:28:31.698356 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.698361 17344 net.cpp:165] Memory required for data: 1129985500\nI0817 16:28:31.698371 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:28:31.698380 17344 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:28:31.698386 17344 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:28:31.698398 17344 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.698456 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:28:31.698604 17344 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:28:31.698616 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.698621 17344 net.cpp:165] Memory required for data: 1134081500\nI0817 16:28:31.698631 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:28:31.698638 17344 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:28:31.698644 17344 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:28:31.698655 17344 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.698665 17344 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:28:31.698673 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.698683 17344 net.cpp:165] Memory required for data: 1138177500\nI0817 16:28:31.698688 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:28:31.698703 17344 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:28:31.698709 17344 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:28:31.698717 17344 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:28:31.699188 17344 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:28:31.699203 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.699208 17344 net.cpp:165] Memory required for data: 1142273500\nI0817 16:28:31.699216 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:28:31.699229 17344 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:28:31.699234 17344 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:28:31.699242 17344 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:28:31.699492 17344 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:28:31.699504 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.699509 17344 net.cpp:165] Memory required for data: 1146369500\nI0817 16:28:31.699519 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:28:31.699528 17344 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:28:31.699534 17344 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:28:31.699545 17344 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:28:31.699601 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:28:31.699757 17344 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:28:31.699771 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.699776 17344 net.cpp:165] Memory required for data: 1150465500\nI0817 16:28:31.699785 17344 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:28:31.699793 17344 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:28:31.699800 17344 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:28:31.699806 17344 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:28:31.699817 17344 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:28:31.699844 17344 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:28:31.699853 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.699857 17344 net.cpp:165] Memory required for data: 1154561500\nI0817 16:28:31.699862 17344 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:28:31.699873 17344 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:28:31.699879 17344 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:28:31.699892 17344 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:28:31.699903 17344 net.cpp:150] Setting up L2_b7_relu\nI0817 16:28:31.699909 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.699913 17344 net.cpp:165] Memory required for data: 1158657500\nI0817 16:28:31.699918 17344 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:31.699925 17344 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:31.699930 17344 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:28:31.699944 17344 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:28:31.699954 17344 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:28:31.699998 17344 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:31.700013 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.700019 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.700024 17344 net.cpp:165] Memory required for data: 1166849500\nI0817 16:28:31.700029 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:28:31.700040 17344 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:28:31.700047 17344 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:28:31.700055 17344 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:28:31.700525 17344 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:28:31.700539 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.700544 17344 net.cpp:165] Memory required for data: 1170945500\nI0817 16:28:31.700553 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:28:31.700564 17344 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:28:31.700572 17344 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:28:31.700579 17344 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:28:31.700837 17344 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:28:31.700851 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.700855 17344 net.cpp:165] Memory required for data: 1175041500\nI0817 16:28:31.700866 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:28:31.700875 17344 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:28:31.700881 17344 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:28:31.700891 17344 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.700947 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:28:31.701124 17344 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:28:31.701139 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.701144 17344 net.cpp:165] Memory required for data: 1179137500\nI0817 16:28:31.701153 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:28:31.701160 17344 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:28:31.701166 17344 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:28:31.701176 17344 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.701186 17344 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:28:31.701194 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.701197 17344 net.cpp:165] Memory required for data: 1183233500\nI0817 16:28:31.701202 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:28:31.701215 17344 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:28:31.701222 17344 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:28:31.701231 17344 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:28:31.701704 17344 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:28:31.701717 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.701722 17344 net.cpp:165] Memory required for data: 1187329500\nI0817 16:28:31.701731 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:28:31.701743 17344 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:28:31.701757 17344 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:28:31.701766 17344 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:28:31.702019 17344 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:28:31.702033 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.702038 17344 net.cpp:165] Memory required for data: 1191425500\nI0817 16:28:31.702047 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:28:31.702056 17344 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:28:31.702062 17344 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:28:31.702069 17344 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:28:31.702127 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:28:31.702273 17344 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:28:31.702289 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.702294 17344 net.cpp:165] Memory required for data: 1195521500\nI0817 16:28:31.702302 17344 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:28:31.702311 17344 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:28:31.702317 17344 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:28:31.702324 17344 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:28:31.702332 17344 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:28:31.702361 17344 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:28:31.702371 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.702375 17344 net.cpp:165] Memory required for data: 1199617500\nI0817 16:28:31.702381 17344 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:28:31.702389 17344 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:28:31.702394 17344 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:28:31.702404 17344 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:28:31.702412 17344 net.cpp:150] Setting up L2_b8_relu\nI0817 16:28:31.702419 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.702425 17344 net.cpp:165] Memory required for data: 1203713500\nI0817 16:28:31.702428 17344 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:31.702435 17344 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:31.702440 17344 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:28:31.702450 17344 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:28:31.702472 17344 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:28:31.702518 17344 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:31.702530 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.702538 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.702541 17344 net.cpp:165] Memory required for data: 1211905500\nI0817 16:28:31.702548 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:28:31.702561 17344 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:28:31.702567 17344 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:28:31.702580 17344 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:28:31.703052 17344 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:28:31.703068 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.703073 17344 net.cpp:165] Memory required for data: 1216001500\nI0817 16:28:31.703080 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:28:31.703092 17344 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:28:31.703099 17344 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:28:31.703107 17344 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:28:31.703351 17344 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:28:31.703364 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.703377 17344 net.cpp:165] Memory required for data: 1220097500\nI0817 16:28:31.703387 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:28:31.703395 17344 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:28:31.703402 17344 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:28:31.703409 17344 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.703470 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:28:31.703619 17344 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:28:31.703634 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.703639 17344 net.cpp:165] Memory required for data: 1224193500\nI0817 16:28:31.703647 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:28:31.703655 17344 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:28:31.703660 17344 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:28:31.703667 17344 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.703682 17344 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:28:31.703691 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.703696 17344 net.cpp:165] Memory required for data: 1228289500\nI0817 16:28:31.703701 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:28:31.703714 17344 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:28:31.703721 17344 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:28:31.703732 17344 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:28:31.704200 17344 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:28:31.704213 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.704218 17344 net.cpp:165] Memory required for data: 1232385500\nI0817 16:28:31.704227 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:28:31.704238 17344 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:28:31.704246 17344 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:28:31.704255 17344 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:28:31.704505 17344 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:28:31.704519 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.704524 17344 net.cpp:165] Memory required for data: 1236481500\nI0817 16:28:31.704565 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:28:31.704578 17344 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:28:31.704586 17344 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:28:31.704592 17344 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:28:31.704653 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:28:31.704809 17344 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:28:31.704823 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.704828 17344 net.cpp:165] Memory required for data: 1240577500\nI0817 16:28:31.704836 17344 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:28:31.704849 17344 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:28:31.704855 17344 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:28:31.704862 17344 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:28:31.704869 17344 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:28:31.704903 17344 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:28:31.704913 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.704918 17344 net.cpp:165] Memory required for data: 1244673500\nI0817 16:28:31.704923 17344 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:28:31.704931 17344 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:28:31.704936 17344 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:28:31.704946 17344 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:28:31.704955 17344 net.cpp:150] Setting up L2_b9_relu\nI0817 16:28:31.704962 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.704967 17344 net.cpp:165] Memory required for data: 1248769500\nI0817 16:28:31.704977 17344 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:31.704985 17344 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:31.704990 17344 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:28:31.705000 17344 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:28:31.705010 17344 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:28:31.705060 17344 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:31.705072 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.705078 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.705083 17344 net.cpp:165] Memory required for data: 1256961500\nI0817 16:28:31.705087 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:28:31.705098 17344 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:28:31.705104 17344 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:28:31.705116 17344 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:28:31.705587 17344 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:28:31.705601 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.705606 17344 net.cpp:165] Memory required for data: 1257985500\nI0817 16:28:31.705615 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:28:31.705624 17344 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:28:31.705631 17344 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:28:31.705641 17344 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:28:31.706094 17344 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:28:31.706112 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.706117 17344 net.cpp:165] Memory required for data: 1259009500\nI0817 16:28:31.706128 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:28:31.706137 17344 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:28:31.706145 17344 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:28:31.706152 17344 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.706208 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:28:31.706362 17344 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:28:31.706374 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.706379 17344 net.cpp:165] Memory required for data: 1260033500\nI0817 16:28:31.706388 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:28:31.706399 17344 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:28:31.706405 17344 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:28:31.706413 17344 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.706423 17344 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:28:31.706429 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.706434 17344 net.cpp:165] Memory required for data: 1261057500\nI0817 16:28:31.706437 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:28:31.706452 17344 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:28:31.706459 17344 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:28:31.706466 17344 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:28:31.706951 17344 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:28:31.706966 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.706971 17344 net.cpp:165] Memory required for data: 1262081500\nI0817 16:28:31.706980 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:28:31.706992 17344 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:28:31.706998 17344 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:28:31.707012 17344 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:28:31.707267 17344 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:28:31.707279 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.707291 17344 net.cpp:165] Memory required for data: 1263105500\nI0817 16:28:31.707303 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:28:31.707311 17344 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:28:31.707317 17344 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:28:31.707329 17344 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:28:31.707384 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:28:31.707538 17344 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:28:31.707551 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.707556 17344 net.cpp:165] Memory required for data: 1264129500\nI0817 16:28:31.707566 17344 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:28:31.707574 17344 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:28:31.707581 17344 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:28:31.707592 17344 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:28:31.707628 17344 net.cpp:150] Setting up L3_b1_pool\nI0817 16:28:31.707639 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.707644 17344 net.cpp:165] Memory required for data: 1265153500\nI0817 16:28:31.707649 17344 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:28:31.707659 17344 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:28:31.707664 17344 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:28:31.707670 17344 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:28:31.707686 17344 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:28:31.707721 17344 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:28:31.707731 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.707736 17344 net.cpp:165] Memory required for data: 1266177500\nI0817 16:28:31.707741 17344 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:28:31.707753 17344 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:28:31.707758 17344 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:28:31.707765 17344 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:28:31.707774 17344 net.cpp:150] Setting up L3_b1_relu\nI0817 16:28:31.707782 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.707785 17344 net.cpp:165] Memory required for data: 1267201500\nI0817 16:28:31.707790 17344 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:28:31.707803 17344 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:28:31.707809 17344 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:28:31.709061 17344 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:28:31.709079 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.709084 17344 net.cpp:165] Memory required for data: 1268225500\nI0817 16:28:31.709090 17344 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:28:31.709100 17344 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:28:31.709106 17344 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:28:31.709113 17344 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:28:31.709125 17344 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:28:31.709164 17344 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:28:31.709180 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.709185 17344 net.cpp:165] Memory required for data: 1270273500\nI0817 16:28:31.709190 17344 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:31.709198 17344 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:31.709204 17344 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:28:31.709211 17344 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:28:31.709226 17344 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:28:31.709275 17344 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:31.709286 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.709293 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.709306 17344 net.cpp:165] Memory required for data: 1274369500\nI0817 16:28:31.709311 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:28:31.709326 17344 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:28:31.709332 17344 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:28:31.709342 17344 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:28:31.711325 17344 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:28:31.711344 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.711349 17344 net.cpp:165] Memory required for data: 1276417500\nI0817 16:28:31.711359 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:28:31.711370 17344 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:28:31.711377 17344 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:28:31.711385 17344 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:28:31.711652 17344 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:28:31.711664 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.711669 17344 net.cpp:165] Memory required for data: 1278465500\nI0817 16:28:31.711688 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:28:31.711697 17344 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:28:31.711704 17344 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:28:31.711711 17344 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.711778 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:28:31.711937 17344 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:28:31.711953 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.711958 17344 net.cpp:165] Memory required for data: 1280513500\nI0817 16:28:31.711967 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:28:31.711975 17344 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:28:31.711982 17344 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:28:31.711988 17344 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.711997 17344 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:28:31.712004 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.712009 17344 net.cpp:165] Memory required for data: 1282561500\nI0817 16:28:31.712013 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:28:31.712028 17344 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:28:31.712034 17344 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:28:31.712043 17344 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:28:31.713075 17344 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:28:31.713090 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.713095 17344 net.cpp:165] Memory required for data: 1284609500\nI0817 16:28:31.713104 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:28:31.713116 17344 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:28:31.713122 17344 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:28:31.713130 17344 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:28:31.713392 17344 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:28:31.713404 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.713409 17344 net.cpp:165] Memory required for data: 1286657500\nI0817 16:28:31.713419 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:28:31.713428 17344 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:28:31.713434 17344 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:28:31.713444 17344 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:28:31.713501 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:28:31.713656 17344 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:28:31.713668 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.713673 17344 net.cpp:165] Memory required for data: 1288705500\nI0817 16:28:31.713690 17344 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:28:31.713709 17344 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:28:31.713716 17344 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:28:31.713723 17344 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:28:31.713732 17344 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:28:31.713778 17344 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:28:31.713788 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.713793 17344 net.cpp:165] Memory required for data: 1290753500\nI0817 16:28:31.713798 17344 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:28:31.713806 17344 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:28:31.713812 17344 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:28:31.713826 17344 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:28:31.713835 17344 net.cpp:150] Setting up L3_b2_relu\nI0817 16:28:31.713842 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.713847 17344 net.cpp:165] Memory required for data: 1292801500\nI0817 16:28:31.713851 17344 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:31.713858 17344 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:31.713865 17344 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:28:31.713871 17344 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:28:31.713881 17344 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:28:31.713929 17344 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:31.713942 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.713948 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.713953 17344 net.cpp:165] Memory required for data: 1296897500\nI0817 16:28:31.713958 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:28:31.713968 17344 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:28:31.713974 17344 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:28:31.713986 17344 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:28:31.715008 17344 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:28:31.715023 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.715029 17344 net.cpp:165] Memory required for data: 1298945500\nI0817 16:28:31.715036 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:28:31.715049 17344 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:28:31.715055 17344 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:28:31.715064 17344 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:28:31.715327 17344 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:28:31.715339 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.715344 17344 net.cpp:165] Memory required for data: 1300993500\nI0817 16:28:31.715354 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:28:31.715363 17344 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:28:31.715368 17344 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:28:31.715376 17344 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.715436 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:28:31.715590 17344 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:28:31.715602 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.715607 17344 net.cpp:165] Memory required for data: 1303041500\nI0817 16:28:31.715615 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:28:31.715623 17344 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:28:31.715629 17344 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:28:31.715636 17344 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.715646 17344 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:28:31.715652 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.715663 17344 net.cpp:165] Memory required for data: 1305089500\nI0817 16:28:31.715669 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:28:31.715689 17344 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:28:31.715697 17344 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:28:31.715708 17344 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:28:31.716747 17344 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:28:31.716761 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.716766 17344 net.cpp:165] Memory required for data: 1307137500\nI0817 16:28:31.716774 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:28:31.716789 17344 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:28:31.716794 17344 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:28:31.716804 17344 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:28:31.717066 17344 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:28:31.717078 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.717083 17344 net.cpp:165] Memory required for data: 1309185500\nI0817 16:28:31.717092 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:28:31.717104 17344 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:28:31.717111 17344 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:28:31.717118 17344 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:28:31.717177 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:28:31.717339 17344 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:28:31.717351 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.717356 17344 net.cpp:165] Memory required for data: 1311233500\nI0817 16:28:31.717365 17344 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:28:31.717376 17344 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:28:31.717383 17344 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:28:31.717391 17344 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:28:31.717398 17344 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:28:31.717434 17344 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:28:31.717443 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.717448 17344 net.cpp:165] Memory required for data: 1313281500\nI0817 16:28:31.717453 17344 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:28:31.717460 17344 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:28:31.717466 17344 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:28:31.717476 17344 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:28:31.717485 17344 net.cpp:150] Setting up L3_b3_relu\nI0817 16:28:31.717492 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.717496 17344 net.cpp:165] Memory required for data: 1315329500\nI0817 16:28:31.717501 17344 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:31.717509 17344 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:31.717514 17344 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:28:31.717520 17344 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:28:31.717530 17344 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:28:31.717578 17344 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:31.717591 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.717597 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.717602 17344 net.cpp:165] Memory required for data: 1319425500\nI0817 16:28:31.717607 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:28:31.717620 17344 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:28:31.717627 17344 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:28:31.717643 17344 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:28:31.718689 17344 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:28:31.718705 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.718710 17344 net.cpp:165] Memory required for data: 1321473500\nI0817 16:28:31.718719 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:28:31.718731 17344 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:28:31.718737 17344 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:28:31.718746 17344 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:28:31.719014 17344 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:28:31.719027 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.719032 17344 net.cpp:165] Memory required for data: 1323521500\nI0817 16:28:31.719041 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:28:31.719050 17344 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:28:31.719056 17344 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:28:31.719064 17344 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.719123 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:28:31.719280 17344 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:28:31.719291 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.719296 17344 net.cpp:165] Memory required for data: 1325569500\nI0817 16:28:31.719305 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:28:31.719313 17344 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:28:31.719319 17344 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:28:31.719329 17344 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.719338 17344 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:28:31.719346 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.719350 17344 net.cpp:165] Memory required for data: 1327617500\nI0817 16:28:31.719354 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:28:31.719368 17344 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:28:31.719374 17344 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:28:31.719383 17344 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:28:31.720422 17344 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:28:31.720438 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.720443 17344 net.cpp:165] Memory required for data: 1329665500\nI0817 16:28:31.720450 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:28:31.720463 17344 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:28:31.720469 17344 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:28:31.720479 17344 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:28:31.720758 17344 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:28:31.720772 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.720777 17344 net.cpp:165] Memory required for data: 1331713500\nI0817 16:28:31.720788 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:28:31.720798 17344 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:28:31.720805 17344 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:28:31.720813 17344 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:28:31.720875 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:28:31.721035 17344 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:28:31.721048 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.721053 17344 net.cpp:165] Memory required for data: 1333761500\nI0817 16:28:31.721061 17344 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:28:31.721073 17344 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:28:31.721081 17344 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:28:31.721087 17344 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:28:31.721097 17344 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:28:31.721132 17344 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:28:31.721148 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.721153 17344 net.cpp:165] Memory required for data: 1335809500\nI0817 16:28:31.721158 17344 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:28:31.721168 17344 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:28:31.721174 17344 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:28:31.721181 17344 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:28:31.721190 17344 net.cpp:150] Setting up L3_b4_relu\nI0817 16:28:31.721197 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.721201 17344 net.cpp:165] Memory required for data: 1337857500\nI0817 16:28:31.721206 17344 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:31.721213 17344 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:31.721218 17344 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:28:31.721225 17344 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:28:31.721235 17344 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:28:31.721284 17344 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:31.721297 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.721302 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.721307 17344 net.cpp:165] Memory required for data: 1341953500\nI0817 16:28:31.721312 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:28:31.721325 17344 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:28:31.721333 17344 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:28:31.721340 17344 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:28:31.722371 17344 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:28:31.722388 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.722393 17344 net.cpp:165] Memory required for data: 1344001500\nI0817 16:28:31.722400 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:28:31.722412 17344 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:28:31.722419 17344 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:28:31.722429 17344 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:28:31.723697 17344 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:28:31.723714 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.723719 17344 net.cpp:165] Memory required for data: 1346049500\nI0817 16:28:31.723731 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:28:31.723743 17344 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:28:31.723750 17344 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:28:31.723758 17344 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.723824 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:28:31.723984 17344 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:28:31.723996 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.724000 17344 net.cpp:165] Memory required for data: 1348097500\nI0817 16:28:31.724009 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:28:31.724020 17344 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:28:31.724027 17344 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:28:31.724035 17344 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.724046 17344 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:28:31.724053 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.724058 17344 net.cpp:165] Memory required for data: 1350145500\nI0817 16:28:31.724062 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:28:31.724073 17344 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:28:31.724079 17344 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:28:31.724090 17344 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:28:31.726100 17344 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:28:31.726119 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.726124 17344 net.cpp:165] Memory required for data: 1352193500\nI0817 16:28:31.726133 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:28:31.726146 17344 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:28:31.726153 17344 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:28:31.726161 17344 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:28:31.726419 17344 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:28:31.726433 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.726438 17344 net.cpp:165] Memory required for data: 1354241500\nI0817 16:28:31.726449 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:28:31.726460 17344 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:28:31.726466 17344 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:28:31.726475 17344 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:28:31.726533 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:28:31.726692 17344 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:28:31.726706 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.726711 17344 net.cpp:165] Memory required for data: 1356289500\nI0817 16:28:31.726719 17344 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:28:31.726732 17344 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:28:31.726738 17344 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:28:31.726745 17344 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:28:31.726755 17344 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:28:31.726788 17344 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:28:31.726797 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.726801 17344 net.cpp:165] Memory required for data: 1358337500\nI0817 16:28:31.726806 17344 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:28:31.726817 17344 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:28:31.726824 17344 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:28:31.726830 17344 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:28:31.726840 17344 net.cpp:150] Setting up L3_b5_relu\nI0817 16:28:31.726846 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.726850 17344 net.cpp:165] Memory required for data: 1360385500\nI0817 16:28:31.726855 17344 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:31.726862 17344 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:31.726868 17344 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:28:31.726876 17344 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:28:31.726884 17344 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:28:31.726933 17344 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:31.726943 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.726950 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.726954 17344 net.cpp:165] Memory required for data: 1364481500\nI0817 16:28:31.726959 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:28:31.726974 17344 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:28:31.726980 17344 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:28:31.726989 17344 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:28:31.728008 17344 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:28:31.728024 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.728029 17344 net.cpp:165] Memory required for data: 1366529500\nI0817 16:28:31.728037 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:28:31.728060 17344 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:28:31.728067 17344 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:28:31.728075 17344 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:28:31.728335 17344 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:28:31.728348 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.728353 17344 net.cpp:165] Memory required for data: 1368577500\nI0817 16:28:31.728363 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:28:31.728373 17344 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:28:31.728379 17344 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:28:31.728386 17344 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.728446 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:28:31.728597 17344 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:28:31.728610 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.728615 17344 net.cpp:165] Memory required for data: 1370625500\nI0817 16:28:31.728623 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:28:31.728631 17344 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:28:31.728638 17344 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:28:31.728649 17344 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.728659 17344 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:28:31.728667 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.728670 17344 net.cpp:165] Memory required for data: 1372673500\nI0817 16:28:31.728680 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:28:31.728696 17344 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:28:31.728703 17344 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:28:31.728711 17344 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:28:31.729722 17344 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:28:31.729737 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.729743 17344 net.cpp:165] Memory required for data: 1374721500\nI0817 16:28:31.729751 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:28:31.729763 17344 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:28:31.729770 17344 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:28:31.729779 17344 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:28:31.730033 17344 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:28:31.730046 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.730051 17344 net.cpp:165] Memory required for data: 1376769500\nI0817 16:28:31.730060 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:28:31.730073 17344 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:28:31.730079 17344 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:28:31.730087 17344 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:28:31.730150 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:28:31.730306 17344 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:28:31.730319 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.730324 17344 net.cpp:165] Memory required for data: 1378817500\nI0817 16:28:31.730334 17344 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:28:31.730345 17344 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:28:31.730351 17344 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:28:31.730358 17344 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:28:31.730368 17344 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:28:31.730401 17344 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:28:31.730409 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.730413 17344 net.cpp:165] Memory required for data: 1380865500\nI0817 16:28:31.730418 17344 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:28:31.730429 17344 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:28:31.730435 17344 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:28:31.730449 17344 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:28:31.730459 17344 net.cpp:150] Setting up L3_b6_relu\nI0817 16:28:31.730466 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.730470 17344 net.cpp:165] Memory required for data: 1382913500\nI0817 16:28:31.730475 17344 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:31.730482 17344 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:31.730487 17344 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:28:31.730494 17344 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:28:31.730504 17344 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:28:31.730551 17344 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:31.730563 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.730569 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.730574 17344 net.cpp:165] Memory required for data: 1387009500\nI0817 16:28:31.730579 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:28:31.730592 17344 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:28:31.730599 17344 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:28:31.730608 17344 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:28:31.731627 17344 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:28:31.731643 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.731648 17344 net.cpp:165] Memory required for data: 1389057500\nI0817 16:28:31.731657 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:28:31.731668 17344 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:28:31.731680 17344 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:28:31.731693 17344 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:28:31.731956 17344 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:28:31.731968 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.731973 17344 net.cpp:165] Memory required for data: 1391105500\nI0817 16:28:31.731983 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:28:31.731992 17344 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:28:31.731998 17344 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:28:31.732009 17344 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.732066 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:28:31.732220 17344 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:28:31.732234 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.732239 17344 net.cpp:165] Memory required for data: 1393153500\nI0817 16:28:31.732247 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:28:31.732280 17344 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:28:31.732290 17344 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:28:31.732297 17344 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.732307 17344 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:28:31.732313 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.732318 17344 net.cpp:165] Memory required for data: 1395201500\nI0817 16:28:31.732323 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:28:31.732338 17344 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:28:31.732344 17344 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:28:31.732352 17344 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:28:31.733384 17344 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:28:31.733399 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.733404 17344 net.cpp:165] Memory required for data: 1397249500\nI0817 16:28:31.733413 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:28:31.733425 17344 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:28:31.733438 17344 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:28:31.733448 17344 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:28:31.733721 17344 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:28:31.733734 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.733739 17344 net.cpp:165] Memory required for data: 1399297500\nI0817 16:28:31.733749 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:28:31.733757 17344 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:28:31.733764 17344 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:28:31.733772 17344 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:28:31.733832 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:28:31.733988 17344 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:28:31.733999 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.734004 17344 net.cpp:165] Memory required for data: 1401345500\nI0817 16:28:31.734014 17344 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:28:31.734022 17344 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:28:31.734028 17344 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:28:31.734035 17344 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:28:31.734046 17344 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:28:31.734078 17344 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:28:31.734091 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.734096 17344 net.cpp:165] Memory required for data: 1403393500\nI0817 16:28:31.734102 17344 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:28:31.734108 17344 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:28:31.734114 17344 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:28:31.734122 17344 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:28:31.734130 17344 net.cpp:150] Setting up L3_b7_relu\nI0817 16:28:31.734138 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.734141 17344 net.cpp:165] Memory required for data: 1405441500\nI0817 16:28:31.734146 17344 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:31.734158 17344 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:31.734163 17344 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:28:31.734170 17344 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:28:31.734179 17344 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:28:31.734226 17344 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:31.734237 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.734244 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.734248 17344 net.cpp:165] Memory required for data: 1409537500\nI0817 16:28:31.734253 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:28:31.734264 17344 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:28:31.734271 17344 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:28:31.734282 17344 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:28:31.735306 17344 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:28:31.735321 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.735327 17344 net.cpp:165] Memory required for data: 1411585500\nI0817 16:28:31.735334 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:28:31.735343 17344 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:28:31.735350 17344 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:28:31.735361 17344 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:28:31.735626 17344 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:28:31.735641 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.735647 17344 net.cpp:165] Memory required for data: 1413633500\nI0817 16:28:31.735664 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:28:31.735672 17344 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:28:31.735685 17344 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:28:31.735693 17344 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.735754 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:28:31.735910 17344 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:28:31.735924 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.735929 17344 net.cpp:165] Memory required for data: 1415681500\nI0817 16:28:31.735936 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:28:31.735944 17344 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:28:31.735950 17344 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:28:31.735960 17344 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.735970 17344 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:28:31.735977 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.735981 17344 net.cpp:165] Memory required for data: 1417729500\nI0817 16:28:31.735986 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:28:31.736001 17344 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:28:31.736006 17344 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:28:31.736014 17344 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:28:31.737033 17344 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:28:31.737048 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.737053 17344 net.cpp:165] Memory required for data: 1419777500\nI0817 16:28:31.737062 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:28:31.737076 17344 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:28:31.737082 17344 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:28:31.737093 17344 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:28:31.737352 17344 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:28:31.737365 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.737370 17344 net.cpp:165] Memory required for data: 1421825500\nI0817 16:28:31.737381 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:28:31.737390 17344 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:28:31.737396 17344 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:28:31.737403 17344 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:28:31.737467 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:28:31.737619 17344 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:28:31.737632 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.737637 17344 net.cpp:165] Memory required for data: 1423873500\nI0817 16:28:31.737646 17344 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:28:31.737655 17344 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:28:31.737661 17344 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:28:31.737668 17344 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:28:31.737684 17344 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:28:31.737720 17344 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:28:31.737735 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.737740 17344 net.cpp:165] Memory required for data: 1425921500\nI0817 16:28:31.737745 17344 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:28:31.737757 17344 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:28:31.737762 17344 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:28:31.737769 17344 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:28:31.737779 17344 net.cpp:150] Setting up L3_b8_relu\nI0817 16:28:31.737785 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.737790 17344 net.cpp:165] Memory required for data: 1427969500\nI0817 16:28:31.737794 17344 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:31.737812 17344 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:31.737818 17344 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:28:31.737825 17344 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:28:31.737834 17344 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:28:31.737882 17344 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:31.737895 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.737900 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.737905 17344 net.cpp:165] Memory required for data: 1432065500\nI0817 16:28:31.737910 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:28:31.737921 17344 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:28:31.737927 17344 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:28:31.737938 17344 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:28:31.739948 17344 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:28:31.739966 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.739971 17344 net.cpp:165] Memory required for data: 1434113500\nI0817 16:28:31.739980 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:28:31.739994 17344 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:28:31.740000 17344 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:28:31.740011 17344 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:28:31.740276 17344 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:28:31.740289 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.740294 17344 net.cpp:165] Memory required for data: 1436161500\nI0817 16:28:31.740304 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:28:31.740314 17344 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:28:31.740319 17344 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:28:31.740330 17344 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.740388 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:28:31.740547 17344 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:28:31.740559 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.740564 17344 net.cpp:165] Memory required for data: 1438209500\nI0817 16:28:31.740573 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:28:31.740584 17344 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:28:31.740591 17344 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:28:31.740598 17344 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.740607 17344 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:28:31.740614 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.740618 17344 net.cpp:165] Memory required for data: 1440257500\nI0817 16:28:31.740623 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:28:31.740638 17344 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:28:31.740644 17344 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:28:31.740654 17344 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:28:31.741684 17344 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:28:31.741699 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.741704 17344 net.cpp:165] Memory required for data: 1442305500\nI0817 16:28:31.741714 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:28:31.741722 17344 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:28:31.741729 17344 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:28:31.741740 17344 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:28:31.742013 17344 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:28:31.742029 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.742034 17344 net.cpp:165] Memory required for data: 1444353500\nI0817 16:28:31.742053 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:28:31.742061 17344 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:28:31.742069 17344 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:28:31.742075 17344 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:28:31.742133 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:28:31.742292 17344 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:28:31.742305 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.742311 17344 net.cpp:165] Memory required for data: 1446401500\nI0817 16:28:31.742318 17344 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:28:31.742331 17344 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:28:31.742337 17344 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:28:31.742344 17344 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:28:31.742352 17344 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:28:31.742388 17344 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:28:31.742399 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.742404 17344 net.cpp:165] Memory required for data: 1448449500\nI0817 16:28:31.742409 17344 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:28:31.742416 17344 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:28:31.742422 17344 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:28:31.742429 17344 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:28:31.742439 17344 net.cpp:150] Setting up L3_b9_relu\nI0817 16:28:31.742446 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.742450 17344 net.cpp:165] Memory required for data: 1450497500\nI0817 16:28:31.742455 17344 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:28:31.742462 17344 net.cpp:100] Creating Layer post_pool\nI0817 16:28:31.742468 17344 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:28:31.742478 17344 net.cpp:408] post_pool -> post_pool\nI0817 16:28:31.742513 17344 net.cpp:150] Setting up post_pool\nI0817 16:28:31.742524 17344 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:28:31.742529 17344 net.cpp:165] Memory required for data: 1450529500\nI0817 16:28:31.742534 17344 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:28:31.742617 17344 net.cpp:100] Creating Layer post_FC\nI0817 16:28:31.742630 17344 net.cpp:434] post_FC <- post_pool\nI0817 16:28:31.742645 17344 net.cpp:408] post_FC -> post_FC_top\nI0817 16:28:31.742894 17344 net.cpp:150] Setting up post_FC\nI0817 16:28:31.742910 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:31.742916 17344 net.cpp:165] Memory required for data: 1450534500\nI0817 16:28:31.742925 17344 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:28:31.742934 17344 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:28:31.742940 17344 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:28:31.742951 17344 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:28:31.742962 17344 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:28:31.743015 17344 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:28:31.743026 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:31.743032 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:31.743037 17344 net.cpp:165] Memory required for data: 1450544500\nI0817 16:28:31.743042 17344 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:28:31.743084 17344 net.cpp:100] Creating Layer accuracy\nI0817 16:28:31.743095 17344 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:28:31.743103 17344 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:28:31.743110 17344 net.cpp:408] accuracy -> accuracy\nI0817 16:28:31.743152 17344 net.cpp:150] Setting up accuracy\nI0817 16:28:31.743165 17344 net.cpp:157] Top shape: (1)\nI0817 16:28:31.743170 17344 net.cpp:165] Memory required for data: 1450544504\nI0817 16:28:31.743182 17344 layer_factory.hpp:77] Creating layer loss\nI0817 16:28:31.743191 17344 net.cpp:100] Creating Layer loss\nI0817 16:28:31.743196 17344 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:28:31.743203 17344 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:28:31.743214 17344 net.cpp:408] loss -> loss\nI0817 16:28:31.743261 17344 layer_factory.hpp:77] Creating layer loss\nI0817 16:28:31.743420 17344 net.cpp:150] Setting up loss\nI0817 16:28:31.743435 17344 net.cpp:157] Top shape: (1)\nI0817 16:28:31.743440 17344 net.cpp:160]     with loss weight 1\nI0817 16:28:31.743515 17344 net.cpp:165] Memory required for data: 1450544508\nI0817 16:28:31.743523 17344 net.cpp:226] loss needs backward computation.\nI0817 16:28:31.743530 17344 net.cpp:228] accuracy does not need backward computation.\nI0817 16:28:31.743535 17344 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:28:31.743541 17344 net.cpp:226] post_FC needs backward computation.\nI0817 16:28:31.743546 17344 net.cpp:226] post_pool needs backward computation.\nI0817 16:28:31.743551 17344 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:28:31.743556 17344 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:28:31.743561 17344 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:28:31.743566 17344 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:28:31.743571 17344 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:28:31.743577 17344 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:28:31.743582 17344 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:28:31.743587 17344 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:28:31.743592 17344 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:28:31.743597 17344 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:28:31.743602 17344 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:28:31.743607 17344 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:28:31.743613 17344 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:28:31.743618 17344 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:28:31.743623 17344 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:28:31.743628 17344 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:28:31.743633 17344 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:28:31.743638 17344 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:28:31.743643 17344 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:28:31.743647 17344 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:28:31.743652 17344 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:28:31.743657 17344 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:28:31.743666 17344 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:28:31.743672 17344 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:28:31.743683 17344 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:28:31.743690 17344 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:28:31.743695 17344 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:28:31.743700 17344 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:28:31.743705 17344 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:28:31.743710 17344 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:28:31.743716 17344 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:28:31.743721 17344 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:28:31.743727 17344 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:28:31.743732 17344 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:28:31.743737 17344 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:28:31.743757 17344 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:28:31.743762 17344 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:28:31.743767 17344 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:28:31.743772 17344 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:28:31.743778 17344 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:28:31.743783 17344 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:28:31.743788 17344 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:28:31.743793 17344 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:28:31.743798 17344 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:28:31.743804 17344 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:28:31.743809 17344 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:28:31.743814 17344 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:28:31.743819 17344 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:28:31.743824 17344 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:28:31.743829 17344 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:28:31.743835 17344 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:28:31.743840 17344 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:28:31.743845 17344 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:28:31.743850 17344 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:28:31.743856 17344 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:28:31.743862 17344 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:28:31.743867 17344 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:28:31.743871 17344 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:28:31.743877 17344 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:28:31.743882 17344 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:28:31.743887 17344 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:28:31.743892 17344 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:28:31.743898 17344 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:28:31.743903 17344 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:28:31.743908 17344 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:28:31.743913 17344 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:28:31.743918 17344 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:28:31.743923 17344 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:28:31.743928 17344 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:28:31.743933 17344 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:28:31.743938 17344 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:28:31.743944 17344 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:28:31.743949 17344 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:28:31.743954 17344 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:28:31.743960 17344 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:28:31.743965 17344 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:28:31.743973 17344 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:28:31.743978 17344 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:28:31.743984 17344 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:28:31.743989 17344 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:28:31.743994 17344 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:28:31.744001 17344 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:28:31.744010 17344 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:28:31.744016 17344 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:28:31.744022 17344 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:28:31.744027 17344 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:28:31.744033 17344 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:28:31.744038 17344 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:28:31.744043 17344 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:28:31.744048 17344 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:28:31.744053 17344 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:28:31.744060 17344 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:28:31.744065 17344 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:28:31.744069 17344 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:28:31.744074 17344 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:28:31.744081 17344 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:28:31.744086 17344 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:28:31.744091 17344 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:28:31.744096 17344 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:28:31.744102 17344 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:28:31.744107 17344 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:28:31.744112 17344 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:28:31.744117 17344 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:28:31.744123 17344 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:28:31.744128 17344 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:28:31.744133 17344 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:28:31.744139 17344 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:28:31.744144 17344 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:28:31.744149 17344 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:28:31.744154 17344 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:28:31.744159 17344 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:28:31.744165 17344 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:28:31.744170 17344 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:28:31.744175 17344 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:28:31.744180 17344 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:28:31.744186 17344 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:28:31.744191 17344 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:28:31.744197 17344 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:28:31.744202 17344 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:28:31.744207 17344 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:28:31.744212 17344 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:28:31.744217 17344 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:28:31.744223 17344 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:28:31.744228 17344 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:28:31.744233 17344 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:28:31.744240 17344 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:28:31.744244 17344 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:28:31.744251 17344 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:28:31.744256 17344 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:28:31.744261 17344 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:28:31.744271 17344 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:28:31.744277 17344 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:28:31.744282 17344 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:28:31.744287 17344 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:28:31.744292 17344 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:28:31.744298 17344 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:28:31.744303 17344 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:28:31.744308 17344 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:28:31.744314 17344 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:28:31.744319 17344 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:28:31.744324 17344 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:28:31.744329 17344 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:28:31.744335 17344 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:28:31.744343 17344 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:28:31.744349 17344 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:28:31.744354 17344 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:28:31.744360 17344 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:28:31.744365 17344 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:28:31.744371 17344 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:28:31.744376 17344 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:28:31.744381 17344 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:28:31.744387 17344 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:28:31.744392 17344 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:28:31.744398 17344 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:28:31.744403 17344 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:28:31.744410 17344 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:28:31.744415 17344 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:28:31.744421 17344 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:28:31.744426 17344 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:28:31.744431 17344 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:28:31.744436 17344 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:28:31.744441 17344 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:28:31.744446 17344 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:28:31.744452 17344 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:28:31.744457 17344 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:28:31.744463 17344 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:28:31.744468 17344 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:28:31.744474 17344 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:28:31.744479 17344 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:28:31.744484 17344 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:28:31.744490 17344 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:28:31.744495 17344 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:28:31.744501 17344 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:28:31.744506 17344 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:28:31.744513 17344 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:28:31.744518 17344 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:28:31.744524 17344 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:28:31.744535 17344 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:28:31.744541 17344 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:28:31.744546 17344 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:28:31.744552 17344 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:28:31.744559 17344 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:28:31.744563 17344 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:28:31.744568 17344 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:28:31.744575 17344 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:28:31.744580 17344 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:28:31.744585 17344 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:28:31.744591 17344 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:28:31.744597 17344 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:28:31.744602 17344 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:28:31.744608 17344 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:28:31.744613 17344 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:28:31.744618 17344 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:28:31.744624 17344 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:28:31.744629 17344 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:28:31.744635 17344 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:28:31.744642 17344 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:28:31.744647 17344 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:28:31.744653 17344 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:28:31.744658 17344 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:28:31.744664 17344 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:28:31.744669 17344 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:28:31.744680 17344 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:28:31.744688 17344 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:28:31.744693 17344 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:28:31.744699 17344 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:28:31.744705 17344 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:28:31.744711 17344 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:28:31.744717 17344 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:28:31.744724 17344 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:28:31.744729 17344 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:28:31.744735 17344 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:28:31.744740 17344 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:28:31.744745 17344 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:28:31.744751 17344 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:28:31.744757 17344 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:28:31.744762 17344 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:28:31.744768 17344 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:28:31.744774 17344 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:28:31.744781 17344 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:28:31.744786 17344 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:28:31.744791 17344 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:28:31.744797 17344 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:28:31.744802 17344 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:28:31.744813 17344 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:28:31.744820 17344 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:28:31.744825 17344 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:28:31.744832 17344 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:28:31.744838 17344 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:28:31.744843 17344 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:28:31.744849 17344 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:28:31.744854 17344 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:28:31.744860 17344 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:28:31.744865 17344 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:28:31.744871 17344 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:28:31.744877 17344 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:28:31.744882 17344 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:28:31.744889 17344 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:28:31.744894 17344 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:28:31.744900 17344 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:28:31.744906 17344 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:28:31.744911 17344 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:28:31.744917 17344 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:28:31.744922 17344 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:28:31.744928 17344 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:28:31.744933 17344 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:28:31.744940 17344 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:28:31.744945 17344 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:28:31.744951 17344 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:28:31.744956 17344 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:28:31.744962 17344 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:28:31.744968 17344 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:28:31.744973 17344 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:28:31.744979 17344 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:28:31.744984 17344 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:28:31.744990 17344 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:28:31.744997 17344 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:28:31.745002 17344 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:28:31.745008 17344 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:28:31.745013 17344 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:28:31.745019 17344 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:28:31.745028 17344 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:28:31.745033 17344 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:28:31.745039 17344 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:28:31.745045 17344 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:28:31.745051 17344 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:28:31.745056 17344 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:28:31.745062 17344 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:28:31.745069 17344 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:28:31.745074 17344 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:28:31.745080 17344 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:28:31.745090 17344 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:28:31.745097 17344 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:28:31.745102 17344 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:28:31.745108 17344 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:28:31.745115 17344 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:28:31.745120 17344 net.cpp:226] pre_relu needs backward computation.\nI0817 16:28:31.745124 17344 net.cpp:226] pre_scale needs backward computation.\nI0817 16:28:31.745131 17344 net.cpp:226] pre_bn needs backward computation.\nI0817 16:28:31.745136 17344 net.cpp:226] pre_conv needs backward computation.\nI0817 16:28:31.745142 17344 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:28:31.745149 17344 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:28:31.745153 17344 net.cpp:270] This network produces output accuracy\nI0817 16:28:31.745160 17344 net.cpp:270] This network produces output loss\nI0817 16:28:31.745522 17344 net.cpp:283] Network initialization done.\nI0817 16:28:31.754736 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:31.754775 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:31.754822 17344 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:28:31.755199 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:28:31.755218 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:28:31.755228 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:28:31.755237 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:28:31.755247 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:28:31.755255 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:28:31.755264 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:28:31.755273 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:28:31.755282 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:28:31.755290 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:28:31.755300 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:28:31.755307 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:28:31.755316 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:28:31.755326 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:28:31.755334 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:28:31.755342 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:28:31.755352 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:28:31.755359 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:28:31.755368 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:28:31.755386 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:28:31.755396 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:28:31.755404 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:28:31.755416 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:28:31.755424 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:28:31.755434 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:28:31.755441 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:28:31.755450 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:28:31.755458 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:28:31.755467 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:28:31.755475 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:28:31.755484 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:28:31.755493 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:28:31.755502 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:28:31.755509 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:28:31.755518 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:28:31.755527 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:28:31.755535 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:28:31.755544 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:28:31.755553 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:28:31.755560 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:28:31.755573 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:28:31.755580 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:28:31.755589 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:28:31.755597 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:28:31.755606 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:28:31.755614 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:28:31.755623 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:28:31.755631 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:28:31.755640 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:28:31.755656 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:28:31.755664 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:28:31.755673 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:28:31.755690 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:28:31.755699 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:28:31.755708 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:28:31.755717 17344 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:28:31.757375 17344 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0817 16:28:31.758993 17344 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:28:31.759229 17344 net.cpp:100] Creating Layer dataLayer\nI0817 16:28:31.759250 17344 net.cpp:408] dataLayer -> data_top\nI0817 16:28:31.759266 17344 net.cpp:408] dataLayer -> label\nI0817 16:28:31.759279 17344 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:28:31.768821 17351 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:28:31.769150 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:31.776461 17344 net.cpp:150] Setting up dataLayer\nI0817 16:28:31.776485 17344 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:28:31.776494 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:31.776499 17344 net.cpp:165] Memory required for data: 1536500\nI0817 16:28:31.776505 17344 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:28:31.776515 17344 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:28:31.776521 17344 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:28:31.776571 17344 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:28:31.776590 17344 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:28:31.776711 17344 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:28:31.776731 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:31.776737 17344 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:31.776741 17344 net.cpp:165] Memory required for data: 1537500\nI0817 16:28:31.776747 17344 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:28:31.776767 17344 net.cpp:100] Creating Layer pre_conv\nI0817 16:28:31.776773 17344 net.cpp:434] pre_conv <- data_top\nI0817 16:28:31.776782 17344 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:28:31.777235 17344 net.cpp:150] Setting up pre_conv\nI0817 16:28:31.777261 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.777268 17344 net.cpp:165] Memory required for data: 9729500\nI0817 16:28:31.777282 17344 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:28:31.777300 17344 net.cpp:100] Creating Layer pre_bn\nI0817 16:28:31.777307 17344 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:28:31.777315 17344 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:28:31.777617 17344 net.cpp:150] Setting up pre_bn\nI0817 16:28:31.777631 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.777637 17344 net.cpp:165] Memory required for data: 17921500\nI0817 16:28:31.777653 17344 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:28:31.777686 17344 net.cpp:100] Creating Layer pre_scale\nI0817 16:28:31.777695 17344 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:28:31.777704 17344 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:28:31.777776 17344 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:28:31.778007 17344 net.cpp:150] Setting up pre_scale\nI0817 16:28:31.778023 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.778028 17344 net.cpp:165] Memory required for data: 26113500\nI0817 16:28:31.778036 17344 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:28:31.778044 17344 net.cpp:100] Creating Layer pre_relu\nI0817 16:28:31.778050 17344 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:28:31.778061 17344 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:28:31.778071 17344 net.cpp:150] Setting up pre_relu\nI0817 16:28:31.778077 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.778082 17344 net.cpp:165] Memory required for data: 34305500\nI0817 16:28:31.778087 17344 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:28:31.778100 17344 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:28:31.778108 17344 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:28:31.778116 17344 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:28:31.778126 17344 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:28:31.778180 17344 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:28:31.778190 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.778198 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.778203 17344 net.cpp:165] Memory required for data: 50689500\nI0817 16:28:31.778209 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:28:31.778240 17344 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:28:31.778249 17344 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:28:31.778259 17344 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:28:31.778652 17344 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:28:31.778668 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.778673 17344 net.cpp:165] Memory required for data: 58881500\nI0817 16:28:31.778699 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:28:31.778717 17344 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:28:31.778724 17344 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:28:31.778735 17344 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:28:31.779048 17344 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:28:31.779065 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.779070 17344 net.cpp:165] Memory required for data: 67073500\nI0817 16:28:31.779081 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:28:31.779090 17344 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:28:31.779098 17344 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:28:31.779106 17344 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.779220 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:28:31.779567 17344 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:28:31.779583 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.779597 17344 net.cpp:165] Memory required for data: 75265500\nI0817 16:28:31.779608 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:28:31.779615 17344 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:28:31.779621 17344 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:28:31.779631 17344 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.779644 17344 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:28:31.779654 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.779657 17344 net.cpp:165] Memory required for data: 83457500\nI0817 16:28:31.779664 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:28:31.779675 17344 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:28:31.779691 17344 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:28:31.779705 17344 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:28:31.780120 17344 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:28:31.780138 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.780143 17344 net.cpp:165] Memory required for data: 91649500\nI0817 16:28:31.780153 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:28:31.780161 17344 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:28:31.780167 17344 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:28:31.780179 17344 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:28:31.780516 17344 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:28:31.780534 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.780539 17344 net.cpp:165] Memory required for data: 99841500\nI0817 16:28:31.780553 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:28:31.780562 17344 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:28:31.780568 17344 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:28:31.780580 17344 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:28:31.780649 17344 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:28:31.780838 17344 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:28:31.780853 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.780858 17344 net.cpp:165] Memory required for data: 108033500\nI0817 16:28:31.780867 17344 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:28:31.780882 17344 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:28:31.780892 17344 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:28:31.780900 17344 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:28:31.780910 17344 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:28:31.780948 17344 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:28:31.780957 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.780964 17344 net.cpp:165] Memory required for data: 116225500\nI0817 16:28:31.780971 17344 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:28:31.780980 17344 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:28:31.780987 17344 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:28:31.780993 17344 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:28:31.781002 17344 net.cpp:150] Setting up L1_b1_relu\nI0817 16:28:31.781009 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.781013 17344 net.cpp:165] Memory required for data: 124417500\nI0817 16:28:31.781018 17344 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:31.781026 17344 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:31.781035 17344 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:28:31.781044 17344 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:28:31.781051 17344 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:28:31.781108 17344 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:31.781127 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.781133 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.781138 17344 net.cpp:165] Memory required for data: 140801500\nI0817 16:28:31.781143 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:28:31.781157 17344 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:28:31.781167 17344 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:28:31.781177 17344 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:28:31.781601 17344 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:28:31.781616 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.781621 17344 net.cpp:165] Memory required for data: 148993500\nI0817 16:28:31.781630 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:28:31.781642 17344 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:28:31.781648 17344 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:28:31.781659 17344 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:28:31.781992 17344 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:28:31.782007 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.782012 17344 net.cpp:165] Memory required for data: 157185500\nI0817 16:28:31.782023 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:28:31.782048 17344 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:28:31.782058 17344 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:28:31.782068 17344 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.782140 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:28:31.782341 17344 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:28:31.782356 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.782362 17344 net.cpp:165] Memory required for data: 165377500\nI0817 16:28:31.782373 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:28:31.782382 17344 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:28:31.782388 17344 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:28:31.782398 17344 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.782413 17344 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:28:31.782419 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.782424 17344 net.cpp:165] Memory required for data: 173569500\nI0817 16:28:31.782429 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:28:31.782444 17344 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:28:31.782449 17344 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:28:31.782457 17344 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:28:31.782943 17344 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:28:31.782959 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.782964 17344 net.cpp:165] Memory required for data: 181761500\nI0817 16:28:31.782974 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:28:31.782985 17344 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:28:31.782991 17344 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:28:31.783001 17344 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:28:31.783326 17344 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:28:31.783342 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.783349 17344 net.cpp:165] Memory required for data: 189953500\nI0817 16:28:31.783365 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:28:31.783375 17344 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:28:31.783381 17344 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:28:31.783392 17344 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:28:31.783464 17344 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:28:31.783638 17344 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:28:31.783650 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.783669 17344 net.cpp:165] Memory required for data: 198145500\nI0817 16:28:31.783686 17344 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:28:31.783702 17344 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:28:31.783710 17344 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:28:31.783716 17344 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:28:31.783723 17344 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:28:31.783766 17344 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:28:31.783777 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.783782 17344 net.cpp:165] Memory required for data: 206337500\nI0817 16:28:31.783787 17344 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:28:31.783797 17344 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:28:31.783803 17344 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:28:31.783810 17344 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:28:31.783819 17344 net.cpp:150] Setting up L1_b2_relu\nI0817 16:28:31.783826 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.783833 17344 net.cpp:165] Memory required for data: 214529500\nI0817 16:28:31.783838 17344 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:31.783846 17344 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:31.783851 17344 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:28:31.783862 17344 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:28:31.783876 17344 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:28:31.783923 17344 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:31.783936 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.783943 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.783947 17344 net.cpp:165] Memory required for data: 230913500\nI0817 16:28:31.783952 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:28:31.783977 17344 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:28:31.783985 17344 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:28:31.783995 17344 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:28:31.784392 17344 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:28:31.784409 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.784415 17344 net.cpp:165] Memory required for data: 239105500\nI0817 16:28:31.784423 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:28:31.784432 17344 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:28:31.784442 17344 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:28:31.784451 17344 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:28:31.784766 17344 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:28:31.784781 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.784788 17344 net.cpp:165] Memory required for data: 247297500\nI0817 16:28:31.784799 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:28:31.784808 17344 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:28:31.784813 17344 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:28:31.784824 17344 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.784893 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:28:31.785095 17344 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:28:31.785115 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.785121 17344 net.cpp:165] Memory required for data: 255489500\nI0817 16:28:31.785132 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:28:31.785140 17344 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:28:31.785146 17344 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:28:31.785158 17344 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.785176 17344 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:28:31.785184 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.785192 17344 net.cpp:165] Memory required for data: 263681500\nI0817 16:28:31.785197 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:28:31.785212 17344 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:28:31.785220 17344 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:28:31.785234 17344 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:28:31.785796 17344 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:28:31.785815 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.785820 17344 net.cpp:165] Memory required for data: 271873500\nI0817 16:28:31.785828 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:28:31.785850 17344 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:28:31.785858 17344 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:28:31.785869 17344 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:28:31.786192 17344 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:28:31.786208 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.786214 17344 net.cpp:165] Memory required for data: 280065500\nI0817 16:28:31.786226 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:28:31.786234 17344 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:28:31.786240 17344 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:28:31.786247 17344 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:28:31.786320 17344 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:28:31.786501 17344 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:28:31.786514 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.786520 17344 net.cpp:165] Memory required for data: 288257500\nI0817 16:28:31.786528 17344 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:28:31.786540 17344 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:28:31.786550 17344 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:28:31.786557 17344 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:28:31.786566 17344 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:28:31.786607 17344 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:28:31.786617 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.786623 17344 net.cpp:165] Memory required for data: 296449500\nI0817 16:28:31.786630 17344 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:28:31.786638 17344 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:28:31.786643 17344 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:28:31.786653 17344 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:28:31.786662 17344 net.cpp:150] Setting up L1_b3_relu\nI0817 16:28:31.786669 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.786674 17344 net.cpp:165] Memory required for data: 304641500\nI0817 16:28:31.786695 17344 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:31.786702 17344 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:31.786708 17344 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:28:31.786720 17344 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:28:31.786733 17344 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:28:31.786789 17344 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:31.786801 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.786808 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.786813 17344 net.cpp:165] Memory required for data: 321025500\nI0817 16:28:31.786819 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:28:31.786842 17344 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:28:31.786849 17344 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:28:31.786861 17344 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:28:31.787253 17344 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:28:31.787268 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.787273 17344 net.cpp:165] Memory required for data: 329217500\nI0817 16:28:31.787282 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:28:31.787291 17344 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:28:31.787297 17344 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:28:31.787308 17344 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:28:31.787622 17344 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:28:31.787638 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.787643 17344 net.cpp:165] Memory required for data: 337409500\nI0817 16:28:31.787654 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:28:31.787662 17344 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:28:31.787668 17344 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:28:31.787683 17344 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.787747 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:28:31.787904 17344 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:28:31.787917 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.787922 17344 net.cpp:165] Memory required for data: 345601500\nI0817 16:28:31.787931 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:28:31.787941 17344 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:28:31.787947 17344 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:28:31.787955 17344 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.787964 17344 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:28:31.787972 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.787976 17344 net.cpp:165] Memory required for data: 353793500\nI0817 16:28:31.787981 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:28:31.787994 17344 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:28:31.788000 17344 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:28:31.788012 17344 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:28:31.788390 17344 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:28:31.788405 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.788410 17344 net.cpp:165] Memory required for data: 361985500\nI0817 16:28:31.788419 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:28:31.788430 17344 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:28:31.788436 17344 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:28:31.788444 17344 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:28:31.788722 17344 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:28:31.788738 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.788743 17344 net.cpp:165] Memory required for data: 370177500\nI0817 16:28:31.788753 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:28:31.788760 17344 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:28:31.788766 17344 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:28:31.788776 17344 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:28:31.788835 17344 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:28:31.788986 17344 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:28:31.789002 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.789007 17344 net.cpp:165] Memory required for data: 378369500\nI0817 16:28:31.789016 17344 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:28:31.789024 17344 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:28:31.789031 17344 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:28:31.789036 17344 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:28:31.789052 17344 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:28:31.789089 17344 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:28:31.789098 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.789103 17344 net.cpp:165] Memory required for data: 386561500\nI0817 16:28:31.789108 17344 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:28:31.789115 17344 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:28:31.789126 17344 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:28:31.789134 17344 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:28:31.789142 17344 net.cpp:150] Setting up L1_b4_relu\nI0817 16:28:31.789149 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.789153 17344 net.cpp:165] Memory required for data: 394753500\nI0817 16:28:31.789158 17344 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:31.789165 17344 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:31.789170 17344 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:28:31.789180 17344 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:28:31.789189 17344 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:28:31.789234 17344 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:31.789245 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.789252 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.789257 17344 net.cpp:165] Memory required for data: 411137500\nI0817 16:28:31.789261 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:28:31.789275 17344 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:28:31.789281 17344 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:28:31.789290 17344 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:28:31.789654 17344 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:28:31.789669 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.789674 17344 net.cpp:165] Memory required for data: 419329500\nI0817 16:28:31.789702 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:28:31.789716 17344 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:28:31.789722 17344 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:28:31.789731 17344 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:28:31.789997 17344 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:28:31.790011 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.790016 17344 net.cpp:165] Memory required for data: 427521500\nI0817 16:28:31.790026 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:28:31.790035 17344 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:28:31.790040 17344 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:28:31.790048 17344 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.790107 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:28:31.790263 17344 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:28:31.790277 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.790282 17344 net.cpp:165] Memory required for data: 435713500\nI0817 16:28:31.790289 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:28:31.790300 17344 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:28:31.790307 17344 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:28:31.790313 17344 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.790323 17344 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:28:31.790329 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.790334 17344 net.cpp:165] Memory required for data: 443905500\nI0817 16:28:31.790339 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:28:31.790359 17344 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:28:31.790365 17344 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:28:31.790376 17344 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:28:31.790736 17344 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:28:31.790751 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.790756 17344 net.cpp:165] Memory required for data: 452097500\nI0817 16:28:31.790766 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:28:31.790776 17344 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:28:31.790783 17344 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:28:31.790796 17344 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:28:31.791067 17344 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:28:31.791080 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.791085 17344 net.cpp:165] Memory required for data: 460289500\nI0817 16:28:31.791095 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:28:31.791103 17344 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:28:31.791110 17344 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:28:31.791116 17344 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:28:31.791177 17344 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:28:31.791335 17344 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:28:31.791348 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.791353 17344 net.cpp:165] Memory required for data: 468481500\nI0817 16:28:31.791362 17344 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:28:31.791370 17344 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:28:31.791379 17344 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:28:31.791386 17344 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:28:31.791393 17344 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:28:31.791430 17344 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:28:31.791441 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.791446 17344 net.cpp:165] Memory required for data: 476673500\nI0817 16:28:31.791451 17344 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:28:31.791458 17344 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:28:31.791463 17344 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:28:31.791473 17344 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:28:31.791482 17344 net.cpp:150] Setting up L1_b5_relu\nI0817 16:28:31.791489 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.791494 17344 net.cpp:165] Memory required for data: 484865500\nI0817 16:28:31.791498 17344 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:31.791504 17344 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:31.791509 17344 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:28:31.791517 17344 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:28:31.791527 17344 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:28:31.791577 17344 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:31.791586 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.791592 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.791597 17344 net.cpp:165] Memory required for data: 501249500\nI0817 16:28:31.791602 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:28:31.791615 17344 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:28:31.791621 17344 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:28:31.791630 17344 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:28:31.791988 17344 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:28:31.792009 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.792014 17344 net.cpp:165] Memory required for data: 509441500\nI0817 16:28:31.792023 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:28:31.792035 17344 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:28:31.792042 17344 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:28:31.792052 17344 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:28:31.792331 17344 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:28:31.792345 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.792349 17344 net.cpp:165] Memory required for data: 517633500\nI0817 16:28:31.792361 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:28:31.792368 17344 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:28:31.792374 17344 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:28:31.792382 17344 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.792448 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:28:31.792605 17344 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:28:31.792618 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.792623 17344 net.cpp:165] Memory required for data: 525825500\nI0817 16:28:31.792632 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:28:31.792642 17344 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:28:31.792649 17344 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:28:31.792656 17344 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.792665 17344 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:28:31.792672 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.792685 17344 net.cpp:165] Memory required for data: 534017500\nI0817 16:28:31.792690 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:28:31.792703 17344 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:28:31.792709 17344 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:28:31.792721 17344 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:28:31.793071 17344 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:28:31.793085 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.793090 17344 net.cpp:165] Memory required for data: 542209500\nI0817 16:28:31.793099 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:28:31.793112 17344 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:28:31.793118 17344 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:28:31.793125 17344 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:28:31.793421 17344 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:28:31.793437 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.793442 17344 net.cpp:165] Memory required for data: 550401500\nI0817 16:28:31.793452 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:28:31.793459 17344 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:28:31.793465 17344 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:28:31.793473 17344 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:28:31.793534 17344 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:28:31.793704 17344 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:28:31.793717 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.793722 17344 net.cpp:165] Memory required for data: 558593500\nI0817 16:28:31.793730 17344 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:28:31.793747 17344 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:28:31.793754 17344 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:28:31.793761 17344 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:28:31.793771 17344 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:28:31.793807 17344 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:28:31.793818 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.793823 17344 net.cpp:165] Memory required for data: 566785500\nI0817 16:28:31.793838 17344 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:28:31.793846 17344 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:28:31.793853 17344 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:28:31.793859 17344 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:28:31.793867 17344 net.cpp:150] Setting up L1_b6_relu\nI0817 16:28:31.793874 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.793879 17344 net.cpp:165] Memory required for data: 574977500\nI0817 16:28:31.793884 17344 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:31.793893 17344 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:31.793898 17344 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:28:31.793905 17344 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:28:31.793915 17344 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:28:31.793962 17344 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:31.793977 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.793984 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.793988 17344 net.cpp:165] Memory required for data: 591361500\nI0817 16:28:31.793993 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:28:31.794004 17344 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:28:31.794010 17344 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:28:31.794018 17344 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:28:31.794374 17344 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:28:31.794389 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.794394 17344 net.cpp:165] Memory required for data: 599553500\nI0817 16:28:31.794402 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:28:31.794414 17344 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:28:31.794420 17344 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:28:31.794428 17344 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:28:31.794713 17344 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:28:31.794728 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.794733 17344 net.cpp:165] Memory required for data: 607745500\nI0817 16:28:31.794742 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:28:31.794751 17344 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:28:31.794757 17344 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:28:31.794767 17344 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.794826 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:28:31.794987 17344 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:28:31.795001 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.795006 17344 net.cpp:165] Memory required for data: 615937500\nI0817 16:28:31.795014 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:28:31.795022 17344 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:28:31.795027 17344 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:28:31.795037 17344 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.795047 17344 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:28:31.795054 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.795058 17344 net.cpp:165] Memory required for data: 624129500\nI0817 16:28:31.795063 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:28:31.795076 17344 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:28:31.795083 17344 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:28:31.795090 17344 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:28:31.795466 17344 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:28:31.795488 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.795493 17344 net.cpp:165] Memory required for data: 632321500\nI0817 16:28:31.795502 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:28:31.795511 17344 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:28:31.795521 17344 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:28:31.795528 17344 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:28:31.795814 17344 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:28:31.795827 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.795832 17344 net.cpp:165] Memory required for data: 640513500\nI0817 16:28:31.795842 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:28:31.795850 17344 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:28:31.795856 17344 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:28:31.795867 17344 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:28:31.795925 17344 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:28:31.796085 17344 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:28:31.796102 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.796106 17344 net.cpp:165] Memory required for data: 648705500\nI0817 16:28:31.796115 17344 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:28:31.796123 17344 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:28:31.796129 17344 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:28:31.796135 17344 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:28:31.796144 17344 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:28:31.796180 17344 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:28:31.796191 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.796196 17344 net.cpp:165] Memory required for data: 656897500\nI0817 16:28:31.796201 17344 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:28:31.796211 17344 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:28:31.796216 17344 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:28:31.796223 17344 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:28:31.796232 17344 net.cpp:150] Setting up L1_b7_relu\nI0817 16:28:31.796239 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.796243 17344 net.cpp:165] Memory required for data: 665089500\nI0817 16:28:31.796248 17344 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:31.796257 17344 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:31.796263 17344 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:28:31.796270 17344 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:28:31.796279 17344 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:28:31.796325 17344 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:31.796340 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.796347 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.796351 17344 net.cpp:165] Memory required for data: 681473500\nI0817 16:28:31.796356 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:28:31.796367 17344 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:28:31.796373 17344 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:28:31.796382 17344 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:28:31.796752 17344 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:28:31.796766 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.796772 17344 net.cpp:165] Memory required for data: 689665500\nI0817 16:28:31.796780 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:28:31.796792 17344 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:28:31.796798 17344 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:28:31.796813 17344 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:28:31.797092 17344 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:28:31.797106 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.797111 17344 net.cpp:165] Memory required for data: 697857500\nI0817 16:28:31.797121 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:28:31.797128 17344 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:28:31.797135 17344 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:28:31.797145 17344 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.797204 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:28:31.797369 17344 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:28:31.797381 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.797386 17344 net.cpp:165] Memory required for data: 706049500\nI0817 16:28:31.797395 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:28:31.797402 17344 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:28:31.797408 17344 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:28:31.797415 17344 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.797425 17344 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:28:31.797431 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.797435 17344 net.cpp:165] Memory required for data: 714241500\nI0817 16:28:31.797441 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:28:31.797453 17344 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:28:31.797459 17344 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:28:31.797471 17344 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:28:31.797840 17344 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:28:31.797855 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.797860 17344 net.cpp:165] Memory required for data: 722433500\nI0817 16:28:31.797868 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:28:31.797880 17344 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:28:31.797886 17344 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:28:31.797896 17344 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:28:31.798171 17344 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:28:31.798183 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.798187 17344 net.cpp:165] Memory required for data: 730625500\nI0817 16:28:31.798197 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:28:31.798205 17344 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:28:31.798211 17344 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:28:31.798223 17344 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:28:31.798280 17344 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:28:31.798436 17344 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:28:31.798452 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.798457 17344 net.cpp:165] Memory required for data: 738817500\nI0817 16:28:31.798466 17344 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:28:31.798475 17344 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:28:31.798480 17344 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:28:31.798487 17344 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:28:31.798494 17344 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:28:31.798530 17344 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:28:31.798539 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.798544 17344 net.cpp:165] Memory required for data: 747009500\nI0817 16:28:31.798549 17344 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:28:31.798557 17344 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:28:31.798565 17344 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:28:31.798573 17344 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:28:31.798588 17344 net.cpp:150] Setting up L1_b8_relu\nI0817 16:28:31.798595 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.798599 17344 net.cpp:165] Memory required for data: 755201500\nI0817 16:28:31.798604 17344 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:31.798610 17344 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:31.798615 17344 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:28:31.798625 17344 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:28:31.798635 17344 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:28:31.798689 17344 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:31.798702 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.798708 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.798713 17344 net.cpp:165] Memory required for data: 771585500\nI0817 16:28:31.798718 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:28:31.798732 17344 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:28:31.798738 17344 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:28:31.798748 17344 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:28:31.799115 17344 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:28:31.799132 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.799137 17344 net.cpp:165] Memory required for data: 779777500\nI0817 16:28:31.799146 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:28:31.799155 17344 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:28:31.799161 17344 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:28:31.799175 17344 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:28:31.799449 17344 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:28:31.799463 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.799468 17344 net.cpp:165] Memory required for data: 787969500\nI0817 16:28:31.799479 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:28:31.799489 17344 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:28:31.799495 17344 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:28:31.799502 17344 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.799561 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:28:31.799728 17344 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:28:31.799741 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.799746 17344 net.cpp:165] Memory required for data: 796161500\nI0817 16:28:31.799756 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:28:31.799767 17344 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:28:31.799773 17344 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:28:31.799780 17344 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.799790 17344 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:28:31.799796 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.799801 17344 net.cpp:165] Memory required for data: 804353500\nI0817 16:28:31.799805 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:28:31.799818 17344 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:28:31.799824 17344 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:28:31.799835 17344 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:28:31.800186 17344 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:28:31.800200 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.800205 17344 net.cpp:165] Memory required for data: 812545500\nI0817 16:28:31.800213 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:28:31.800222 17344 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:28:31.800228 17344 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:28:31.800246 17344 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:28:31.800526 17344 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:28:31.800540 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.800545 17344 net.cpp:165] Memory required for data: 820737500\nI0817 16:28:31.800577 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:28:31.800587 17344 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:28:31.800593 17344 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:28:31.800603 17344 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:28:31.800659 17344 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:28:31.800829 17344 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:28:31.800843 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.800848 17344 net.cpp:165] Memory required for data: 828929500\nI0817 16:28:31.800858 17344 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:28:31.800866 17344 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:28:31.800873 17344 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:28:31.800879 17344 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:28:31.800889 17344 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:28:31.800925 17344 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:28:31.800937 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.800941 17344 net.cpp:165] Memory required for data: 837121500\nI0817 16:28:31.800947 17344 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:28:31.800957 17344 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:28:31.800988 17344 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:28:31.800997 17344 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:28:31.801007 17344 net.cpp:150] Setting up L1_b9_relu\nI0817 16:28:31.801014 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.801019 17344 net.cpp:165] Memory required for data: 845313500\nI0817 16:28:31.801023 17344 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:31.801033 17344 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:31.801039 17344 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:28:31.801046 17344 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:28:31.801055 17344 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:28:31.801106 17344 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:31.801120 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.801127 17344 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:31.801131 17344 net.cpp:165] Memory required for data: 861697500\nI0817 16:28:31.801136 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:28:31.801147 17344 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:28:31.801153 17344 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:28:31.801162 17344 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:28:31.801525 17344 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:28:31.801539 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.801544 17344 net.cpp:165] Memory required for data: 863745500\nI0817 16:28:31.801553 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:28:31.801564 17344 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:28:31.801570 17344 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:28:31.801579 17344 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:28:31.801854 17344 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:28:31.801868 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.801873 17344 net.cpp:165] Memory required for data: 865793500\nI0817 16:28:31.801890 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:28:31.801899 17344 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:28:31.801905 17344 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:28:31.801915 17344 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.801975 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:28:31.802134 17344 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:28:31.802146 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.802151 17344 net.cpp:165] Memory required for data: 867841500\nI0817 16:28:31.802160 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:28:31.802168 17344 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:28:31.802175 17344 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:28:31.802183 17344 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.802193 17344 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:28:31.802201 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.802204 17344 net.cpp:165] Memory required for data: 869889500\nI0817 16:28:31.802208 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:28:31.802222 17344 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:28:31.802227 17344 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:28:31.802237 17344 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:28:31.802590 17344 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:28:31.802604 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.802609 17344 net.cpp:165] Memory required for data: 871937500\nI0817 16:28:31.802618 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:28:31.802629 17344 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:28:31.802635 17344 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:28:31.802644 17344 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:28:31.802918 17344 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:28:31.802932 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.802937 17344 net.cpp:165] Memory required for data: 873985500\nI0817 16:28:31.802948 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:28:31.802956 17344 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:28:31.802963 17344 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:28:31.802973 17344 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:28:31.803033 17344 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:28:31.803190 17344 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:28:31.803203 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.803208 17344 net.cpp:165] Memory required for data: 876033500\nI0817 16:28:31.803216 17344 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:28:31.803226 17344 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:28:31.803232 17344 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:28:31.803243 17344 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:28:31.803272 17344 net.cpp:150] Setting up L2_b1_pool\nI0817 16:28:31.803283 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.803288 17344 net.cpp:165] Memory required for data: 878081500\nI0817 16:28:31.803293 17344 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:28:31.803304 17344 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:28:31.803310 17344 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:28:31.803318 17344 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:28:31.803324 17344 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:28:31.803361 17344 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:28:31.803372 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.803377 17344 net.cpp:165] Memory required for data: 880129500\nI0817 16:28:31.803382 17344 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:28:31.803390 17344 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:28:31.803401 17344 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:28:31.803409 17344 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:28:31.803418 17344 net.cpp:150] Setting up L2_b1_relu\nI0817 16:28:31.803424 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.803429 17344 net.cpp:165] Memory required for data: 882177500\nI0817 16:28:31.803433 17344 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:28:31.803442 17344 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:28:31.803452 17344 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:28:31.805660 17344 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:28:31.805683 17344 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:31.805692 17344 net.cpp:165] Memory required for data: 884225500\nI0817 16:28:31.805698 17344 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:28:31.805707 17344 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:28:31.805713 17344 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:28:31.805721 17344 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:28:31.805728 17344 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:28:31.805939 17344 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:28:31.805953 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.805958 17344 net.cpp:165] Memory required for data: 888321500\nI0817 16:28:31.805963 17344 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:31.805971 17344 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:31.805977 17344 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:28:31.805987 17344 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:28:31.805997 17344 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:28:31.806051 17344 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:31.806061 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.806068 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.806073 17344 net.cpp:165] Memory required for data: 896513500\nI0817 16:28:31.806078 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:28:31.806092 17344 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:28:31.806098 17344 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:28:31.806107 17344 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:28:31.806607 17344 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:28:31.806622 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.806627 17344 net.cpp:165] Memory required for data: 900609500\nI0817 16:28:31.806635 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:28:31.806648 17344 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:28:31.806653 17344 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:28:31.806664 17344 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:28:31.806943 17344 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:28:31.806957 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.806962 17344 net.cpp:165] Memory required for data: 904705500\nI0817 16:28:31.806972 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:28:31.806982 17344 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:28:31.806988 17344 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:28:31.806994 17344 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.807056 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:28:31.807211 17344 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:28:31.807224 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.807229 17344 net.cpp:165] Memory required for data: 908801500\nI0817 16:28:31.807237 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:28:31.807245 17344 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:28:31.807260 17344 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:28:31.807271 17344 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.807281 17344 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:28:31.807287 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.807291 17344 net.cpp:165] Memory required for data: 912897500\nI0817 16:28:31.807296 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:28:31.807312 17344 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:28:31.807317 17344 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:28:31.807327 17344 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:28:31.807834 17344 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:28:31.807849 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.807854 17344 net.cpp:165] Memory required for data: 916993500\nI0817 16:28:31.807863 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:28:31.807875 17344 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:28:31.807881 17344 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:28:31.807889 17344 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:28:31.808151 17344 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:28:31.808166 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.808171 17344 net.cpp:165] Memory required for data: 921089500\nI0817 16:28:31.808182 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:28:31.808190 17344 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:28:31.808197 17344 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:28:31.808203 17344 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:28:31.808262 17344 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:28:31.808423 17344 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:28:31.808435 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.808440 17344 net.cpp:165] Memory required for data: 925185500\nI0817 16:28:31.808449 17344 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:28:31.808457 17344 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:28:31.808464 17344 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:28:31.808470 17344 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:28:31.808481 17344 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:28:31.808509 17344 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:28:31.808518 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.808523 17344 net.cpp:165] Memory required for data: 929281500\nI0817 16:28:31.808528 17344 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:28:31.808535 17344 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:28:31.808540 17344 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:28:31.808552 17344 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:28:31.808560 17344 net.cpp:150] Setting up L2_b2_relu\nI0817 16:28:31.808568 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.808571 17344 net.cpp:165] Memory required for data: 933377500\nI0817 16:28:31.808576 17344 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:31.808583 17344 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:31.808588 17344 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:28:31.808595 17344 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:28:31.808604 17344 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:28:31.808655 17344 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:31.808666 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.808673 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.808683 17344 net.cpp:165] Memory required for data: 941569500\nI0817 16:28:31.808696 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:28:31.808712 17344 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:28:31.808718 17344 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:28:31.808727 17344 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:28:31.809221 17344 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:28:31.809236 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.809240 17344 net.cpp:165] Memory required for data: 945665500\nI0817 16:28:31.809249 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:28:31.809260 17344 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:28:31.809267 17344 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:28:31.809275 17344 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:28:31.809548 17344 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:28:31.809563 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.809568 17344 net.cpp:165] Memory required for data: 949761500\nI0817 16:28:31.809579 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:28:31.809587 17344 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:28:31.809593 17344 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:28:31.809600 17344 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.809659 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:28:31.809828 17344 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:28:31.809842 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.809847 17344 net.cpp:165] Memory required for data: 953857500\nI0817 16:28:31.809856 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:28:31.809864 17344 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:28:31.809870 17344 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:28:31.809880 17344 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.809890 17344 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:28:31.809896 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.809901 17344 net.cpp:165] Memory required for data: 957953500\nI0817 16:28:31.809906 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:28:31.809921 17344 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:28:31.809926 17344 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:28:31.809934 17344 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:28:31.810427 17344 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:28:31.810441 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.810446 17344 net.cpp:165] Memory required for data: 962049500\nI0817 16:28:31.810454 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:28:31.810466 17344 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:28:31.810472 17344 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:28:31.810480 17344 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:28:31.810761 17344 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:28:31.810775 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.810781 17344 net.cpp:165] Memory required for data: 966145500\nI0817 16:28:31.810791 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:28:31.810801 17344 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:28:31.810808 17344 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:28:31.810816 17344 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:28:31.810873 17344 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:28:31.811033 17344 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:28:31.811045 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.811050 17344 net.cpp:165] Memory required for data: 970241500\nI0817 16:28:31.811059 17344 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:28:31.811070 17344 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:28:31.811084 17344 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:28:31.811090 17344 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:28:31.811100 17344 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:28:31.811130 17344 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:28:31.811138 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.811142 17344 net.cpp:165] Memory required for data: 974337500\nI0817 16:28:31.811147 17344 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:28:31.811167 17344 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:28:31.811174 17344 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:28:31.811182 17344 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:28:31.811190 17344 net.cpp:150] Setting up L2_b3_relu\nI0817 16:28:31.811197 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.811202 17344 net.cpp:165] Memory required for data: 978433500\nI0817 16:28:31.811208 17344 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:31.811216 17344 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:31.811223 17344 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:28:31.811229 17344 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:28:31.811239 17344 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:28:31.811290 17344 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:31.811302 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.811308 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.811313 17344 net.cpp:165] Memory required for data: 986625500\nI0817 16:28:31.811317 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:28:31.811328 17344 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:28:31.811334 17344 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:28:31.811347 17344 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:28:31.811854 17344 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:28:31.811869 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.811874 17344 net.cpp:165] Memory required for data: 990721500\nI0817 16:28:31.811883 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:28:31.811892 17344 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:28:31.811898 17344 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:28:31.811910 17344 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:28:31.812185 17344 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:28:31.812196 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.812201 17344 net.cpp:165] Memory required for data: 994817500\nI0817 16:28:31.812211 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:28:31.812222 17344 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:28:31.812228 17344 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:28:31.812237 17344 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.812294 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:28:31.812455 17344 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:28:31.812467 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.812472 17344 net.cpp:165] Memory required for data: 998913500\nI0817 16:28:31.812481 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:28:31.812491 17344 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:28:31.812497 17344 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:28:31.812505 17344 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.812515 17344 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:28:31.812520 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.812533 17344 net.cpp:165] Memory required for data: 1003009500\nI0817 16:28:31.812538 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:28:31.812552 17344 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:28:31.812557 17344 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:28:31.812568 17344 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:28:31.813068 17344 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:28:31.813083 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.813088 17344 net.cpp:165] Memory required for data: 1007105500\nI0817 16:28:31.813097 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:28:31.813107 17344 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:28:31.813112 17344 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:28:31.813120 17344 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:28:31.813385 17344 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:28:31.813397 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.813402 17344 net.cpp:165] Memory required for data: 1011201500\nI0817 16:28:31.813412 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:28:31.813421 17344 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:28:31.813426 17344 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:28:31.813438 17344 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:28:31.813498 17344 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:28:31.813658 17344 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:28:31.813671 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.813681 17344 net.cpp:165] Memory required for data: 1015297500\nI0817 16:28:31.813691 17344 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:28:31.813700 17344 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:28:31.813706 17344 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:28:31.813714 17344 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:28:31.813724 17344 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:28:31.813751 17344 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:28:31.813765 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.813768 17344 net.cpp:165] Memory required for data: 1019393500\nI0817 16:28:31.813773 17344 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:28:31.813781 17344 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:28:31.813786 17344 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:28:31.813793 17344 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:28:31.813802 17344 net.cpp:150] Setting up L2_b4_relu\nI0817 16:28:31.813809 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.813813 17344 net.cpp:165] Memory required for data: 1023489500\nI0817 16:28:31.813818 17344 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:31.813827 17344 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:31.813833 17344 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:28:31.813840 17344 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:28:31.813849 17344 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:28:31.813899 17344 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:31.813911 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.813917 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.813922 17344 net.cpp:165] Memory required for data: 1031681500\nI0817 16:28:31.813927 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:28:31.813937 17344 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:28:31.813943 17344 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:28:31.813956 17344 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:28:31.814462 17344 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:28:31.814476 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.814481 17344 net.cpp:165] Memory required for data: 1035777500\nI0817 16:28:31.814489 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:28:31.814498 17344 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:28:31.814504 17344 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:28:31.814515 17344 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:28:31.814796 17344 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:28:31.814810 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.814815 17344 net.cpp:165] Memory required for data: 1039873500\nI0817 16:28:31.814826 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:28:31.814837 17344 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:28:31.814843 17344 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:28:31.814851 17344 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.814909 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:28:31.815069 17344 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:28:31.815081 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.815086 17344 net.cpp:165] Memory required for data: 1043969500\nI0817 16:28:31.815095 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:28:31.815102 17344 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:28:31.815109 17344 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:28:31.815119 17344 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.815129 17344 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:28:31.815135 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.815140 17344 net.cpp:165] Memory required for data: 1048065500\nI0817 16:28:31.815145 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:28:31.815158 17344 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:28:31.815165 17344 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:28:31.815174 17344 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:28:31.815665 17344 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:28:31.815685 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.815690 17344 net.cpp:165] Memory required for data: 1052161500\nI0817 16:28:31.815698 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:28:31.815707 17344 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:28:31.815713 17344 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:28:31.815722 17344 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:28:31.815995 17344 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:28:31.816009 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.816013 17344 net.cpp:165] Memory required for data: 1056257500\nI0817 16:28:31.816023 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:28:31.816031 17344 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:28:31.816037 17344 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:28:31.816047 17344 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:28:31.816107 17344 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:28:31.816264 17344 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:28:31.816277 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.816282 17344 net.cpp:165] Memory required for data: 1060353500\nI0817 16:28:31.816290 17344 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:28:31.816298 17344 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:28:31.816304 17344 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:28:31.816311 17344 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:28:31.816321 17344 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:28:31.816349 17344 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:28:31.816367 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.816372 17344 net.cpp:165] Memory required for data: 1064449500\nI0817 16:28:31.816377 17344 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:28:31.816387 17344 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:28:31.816393 17344 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:28:31.816401 17344 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:28:31.816409 17344 net.cpp:150] Setting up L2_b5_relu\nI0817 16:28:31.816416 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.816421 17344 net.cpp:165] Memory required for data: 1068545500\nI0817 16:28:31.816426 17344 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:31.816435 17344 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:31.816440 17344 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:28:31.816447 17344 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:28:31.816457 17344 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:28:31.816506 17344 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:31.816520 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.816527 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.816531 17344 net.cpp:165] Memory required for data: 1076737500\nI0817 16:28:31.816536 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:28:31.816547 17344 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:28:31.816553 17344 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:28:31.816562 17344 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:28:31.817073 17344 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:28:31.817087 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.817093 17344 net.cpp:165] Memory required for data: 1080833500\nI0817 16:28:31.817101 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:28:31.817113 17344 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:28:31.817119 17344 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:28:31.817127 17344 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:28:31.817394 17344 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:28:31.817405 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.817410 17344 net.cpp:165] Memory required for data: 1084929500\nI0817 16:28:31.817420 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:28:31.817428 17344 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:28:31.817435 17344 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:28:31.817445 17344 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.817503 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:28:31.817662 17344 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:28:31.817674 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.817685 17344 net.cpp:165] Memory required for data: 1089025500\nI0817 16:28:31.817694 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:28:31.817703 17344 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:28:31.817709 17344 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:28:31.817719 17344 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.817729 17344 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:28:31.817736 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.817740 17344 net.cpp:165] Memory required for data: 1093121500\nI0817 16:28:31.817745 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:28:31.817759 17344 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:28:31.817764 17344 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:28:31.817780 17344 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:28:31.818274 17344 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:28:31.818287 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.818292 17344 net.cpp:165] Memory required for data: 1097217500\nI0817 16:28:31.818300 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:28:31.818312 17344 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:28:31.818318 17344 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:28:31.818326 17344 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:28:31.818596 17344 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:28:31.818609 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.818614 17344 net.cpp:165] Memory required for data: 1101313500\nI0817 16:28:31.818624 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:28:31.818632 17344 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:28:31.818639 17344 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:28:31.818648 17344 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:28:31.818716 17344 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:28:31.818874 17344 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:28:31.818886 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.818892 17344 net.cpp:165] Memory required for data: 1105409500\nI0817 16:28:31.818900 17344 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:28:31.818908 17344 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:28:31.818914 17344 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:28:31.818922 17344 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:28:31.818931 17344 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:28:31.818960 17344 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:28:31.818969 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.818974 17344 net.cpp:165] Memory required for data: 1109505500\nI0817 16:28:31.818979 17344 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:28:31.818989 17344 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:28:31.818994 17344 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:28:31.819001 17344 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:28:31.819010 17344 net.cpp:150] Setting up L2_b6_relu\nI0817 16:28:31.819017 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.819021 17344 net.cpp:165] Memory required for data: 1113601500\nI0817 16:28:31.819026 17344 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:31.819033 17344 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:31.819038 17344 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:28:31.819048 17344 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:28:31.819058 17344 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:28:31.819105 17344 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:31.819121 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.819128 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.819133 17344 net.cpp:165] Memory required for data: 1121793500\nI0817 16:28:31.819138 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:28:31.819149 17344 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:28:31.819154 17344 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:28:31.819162 17344 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:28:31.820623 17344 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:28:31.820641 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.820647 17344 net.cpp:165] Memory required for data: 1125889500\nI0817 16:28:31.820655 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:28:31.820683 17344 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:28:31.820691 17344 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:28:31.820703 17344 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:28:31.820973 17344 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:28:31.820987 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.820992 17344 net.cpp:165] Memory required for data: 1129985500\nI0817 16:28:31.821002 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:28:31.821010 17344 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:28:31.821017 17344 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:28:31.821024 17344 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.821086 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:28:31.821245 17344 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:28:31.821259 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.821264 17344 net.cpp:165] Memory required for data: 1134081500\nI0817 16:28:31.821272 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:28:31.821280 17344 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:28:31.821286 17344 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:28:31.821296 17344 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.821306 17344 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:28:31.821313 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.821317 17344 net.cpp:165] Memory required for data: 1138177500\nI0817 16:28:31.821322 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:28:31.821336 17344 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:28:31.821342 17344 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:28:31.821350 17344 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:28:31.821847 17344 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:28:31.821862 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.821867 17344 net.cpp:165] Memory required for data: 1142273500\nI0817 16:28:31.821876 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:28:31.821887 17344 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:28:31.821894 17344 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:28:31.821902 17344 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:28:31.822170 17344 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:28:31.822186 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.822191 17344 net.cpp:165] Memory required for data: 1146369500\nI0817 16:28:31.822201 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:28:31.822209 17344 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:28:31.822216 17344 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:28:31.822223 17344 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:28:31.822283 17344 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:28:31.822441 17344 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:28:31.822454 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.822459 17344 net.cpp:165] Memory required for data: 1150465500\nI0817 16:28:31.822468 17344 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:28:31.822476 17344 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:28:31.822484 17344 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:28:31.822490 17344 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:28:31.822500 17344 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:28:31.822530 17344 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:28:31.822537 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.822542 17344 net.cpp:165] Memory required for data: 1154561500\nI0817 16:28:31.822547 17344 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:28:31.822557 17344 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:28:31.822571 17344 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:28:31.822578 17344 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:28:31.822587 17344 net.cpp:150] Setting up L2_b7_relu\nI0817 16:28:31.822594 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.822599 17344 net.cpp:165] Memory required for data: 1158657500\nI0817 16:28:31.822604 17344 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:31.822612 17344 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:31.822616 17344 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:28:31.822623 17344 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:28:31.822633 17344 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:28:31.822695 17344 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:31.822707 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.822713 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.822718 17344 net.cpp:165] Memory required for data: 1166849500\nI0817 16:28:31.822723 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:28:31.822737 17344 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:28:31.822744 17344 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:28:31.822753 17344 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:28:31.823240 17344 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:28:31.823253 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.823258 17344 net.cpp:165] Memory required for data: 1170945500\nI0817 16:28:31.823267 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:28:31.823278 17344 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:28:31.823285 17344 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:28:31.823293 17344 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:28:31.823567 17344 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:28:31.823583 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.823588 17344 net.cpp:165] Memory required for data: 1175041500\nI0817 16:28:31.823598 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:28:31.823607 17344 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:28:31.823613 17344 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:28:31.823621 17344 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.823686 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:28:31.823849 17344 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:28:31.823863 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.823868 17344 net.cpp:165] Memory required for data: 1179137500\nI0817 16:28:31.823876 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:28:31.823884 17344 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:28:31.823889 17344 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:28:31.823902 17344 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.823912 17344 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:28:31.823920 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.823923 17344 net.cpp:165] Memory required for data: 1183233500\nI0817 16:28:31.823928 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:28:31.823941 17344 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:28:31.823948 17344 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:28:31.823956 17344 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:28:31.824441 17344 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:28:31.824455 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.824460 17344 net.cpp:165] Memory required for data: 1187329500\nI0817 16:28:31.824476 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:28:31.824487 17344 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:28:31.824494 17344 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:28:31.824502 17344 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:28:31.824785 17344 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:28:31.824800 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.824805 17344 net.cpp:165] Memory required for data: 1191425500\nI0817 16:28:31.824815 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:28:31.824826 17344 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:28:31.824832 17344 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:28:31.824841 17344 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:28:31.824900 17344 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:28:31.825059 17344 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:28:31.825073 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.825078 17344 net.cpp:165] Memory required for data: 1195521500\nI0817 16:28:31.825085 17344 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:28:31.825098 17344 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:28:31.825103 17344 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:28:31.825110 17344 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:28:31.825121 17344 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:28:31.825150 17344 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:28:31.825158 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.825163 17344 net.cpp:165] Memory required for data: 1199617500\nI0817 16:28:31.825168 17344 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:28:31.825176 17344 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:28:31.825181 17344 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:28:31.825191 17344 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:28:31.825201 17344 net.cpp:150] Setting up L2_b8_relu\nI0817 16:28:31.825207 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.825212 17344 net.cpp:165] Memory required for data: 1203713500\nI0817 16:28:31.825217 17344 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:31.825223 17344 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:31.825228 17344 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:28:31.825235 17344 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:28:31.825258 17344 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:28:31.825312 17344 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:31.825325 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.825331 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.825335 17344 net.cpp:165] Memory required for data: 1211905500\nI0817 16:28:31.825340 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:28:31.825352 17344 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:28:31.825358 17344 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:28:31.825371 17344 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:28:31.825882 17344 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:28:31.825896 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.825901 17344 net.cpp:165] Memory required for data: 1216001500\nI0817 16:28:31.825911 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:28:31.825920 17344 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:28:31.825927 17344 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:28:31.825937 17344 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:28:31.826217 17344 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:28:31.826237 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.826242 17344 net.cpp:165] Memory required for data: 1220097500\nI0817 16:28:31.826252 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:28:31.826264 17344 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:28:31.826270 17344 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:28:31.826278 17344 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.826337 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:28:31.826499 17344 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:28:31.826512 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.826517 17344 net.cpp:165] Memory required for data: 1224193500\nI0817 16:28:31.826525 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:28:31.826536 17344 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:28:31.826542 17344 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:28:31.826550 17344 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.826561 17344 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:28:31.826568 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.826573 17344 net.cpp:165] Memory required for data: 1228289500\nI0817 16:28:31.826577 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:28:31.826588 17344 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:28:31.826593 17344 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:28:31.826604 17344 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:28:31.828074 17344 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:28:31.828092 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.828097 17344 net.cpp:165] Memory required for data: 1232385500\nI0817 16:28:31.828106 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:28:31.828119 17344 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:28:31.828126 17344 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:28:31.828137 17344 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:28:31.828408 17344 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:28:31.828421 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.828426 17344 net.cpp:165] Memory required for data: 1236481500\nI0817 16:28:31.828474 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:28:31.828488 17344 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:28:31.828495 17344 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:28:31.828502 17344 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:28:31.828562 17344 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:28:31.828723 17344 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:28:31.828737 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.828742 17344 net.cpp:165] Memory required for data: 1240577500\nI0817 16:28:31.828752 17344 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:28:31.828760 17344 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:28:31.828766 17344 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:28:31.828773 17344 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:28:31.828784 17344 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:28:31.828812 17344 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:28:31.828825 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.828830 17344 net.cpp:165] Memory required for data: 1244673500\nI0817 16:28:31.828835 17344 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:28:31.828842 17344 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:28:31.828848 17344 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:28:31.828855 17344 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:28:31.828864 17344 net.cpp:150] Setting up L2_b9_relu\nI0817 16:28:31.828871 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.828883 17344 net.cpp:165] Memory required for data: 1248769500\nI0817 16:28:31.828888 17344 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:31.828898 17344 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:31.828903 17344 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:28:31.828912 17344 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:28:31.828922 17344 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:28:31.828974 17344 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:31.828986 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.828994 17344 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:31.828997 17344 net.cpp:165] Memory required for data: 1256961500\nI0817 16:28:31.829002 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:28:31.829013 17344 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:28:31.829020 17344 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:28:31.829031 17344 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:28:31.829529 17344 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:28:31.829542 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.829547 17344 net.cpp:165] Memory required for data: 1257985500\nI0817 16:28:31.829555 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:28:31.829566 17344 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:28:31.829571 17344 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:28:31.829582 17344 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:28:31.829869 17344 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:28:31.829885 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.829891 17344 net.cpp:165] Memory required for data: 1259009500\nI0817 16:28:31.829901 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:28:31.829910 17344 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:28:31.829916 17344 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:28:31.829923 17344 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.829980 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:28:31.830145 17344 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:28:31.830158 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.830163 17344 net.cpp:165] Memory required for data: 1260033500\nI0817 16:28:31.830173 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:28:31.830179 17344 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:28:31.830185 17344 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:28:31.830195 17344 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:28:31.830205 17344 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:28:31.830212 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.830216 17344 net.cpp:165] Memory required for data: 1261057500\nI0817 16:28:31.830221 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:28:31.830235 17344 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:28:31.830241 17344 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:28:31.830250 17344 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:28:31.830749 17344 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:28:31.830765 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.830770 17344 net.cpp:165] Memory required for data: 1262081500\nI0817 16:28:31.830778 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:28:31.830790 17344 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:28:31.830797 17344 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:28:31.830808 17344 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:28:31.831082 17344 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:28:31.831101 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.831106 17344 net.cpp:165] Memory required for data: 1263105500\nI0817 16:28:31.831116 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:28:31.831125 17344 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:28:31.831131 17344 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:28:31.831142 17344 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:28:31.831199 17344 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:28:31.831367 17344 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:28:31.831379 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.831384 17344 net.cpp:165] Memory required for data: 1264129500\nI0817 16:28:31.831393 17344 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:28:31.831403 17344 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:28:31.831408 17344 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:28:31.831419 17344 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:28:31.831459 17344 net.cpp:150] Setting up L3_b1_pool\nI0817 16:28:31.831470 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.831475 17344 net.cpp:165] Memory required for data: 1265153500\nI0817 16:28:31.831480 17344 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:28:31.831487 17344 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:28:31.831493 17344 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:28:31.831501 17344 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:28:31.831511 17344 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:28:31.831543 17344 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:28:31.831554 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.831559 17344 net.cpp:165] Memory required for data: 1266177500\nI0817 16:28:31.831563 17344 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:28:31.831571 17344 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:28:31.831578 17344 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:28:31.831583 17344 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:28:31.831593 17344 net.cpp:150] Setting up L3_b1_relu\nI0817 16:28:31.831599 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.831604 17344 net.cpp:165] Memory required for data: 1267201500\nI0817 16:28:31.831609 17344 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:28:31.831621 17344 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:28:31.831629 17344 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:28:31.832857 17344 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:28:31.832875 17344 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:31.832881 17344 net.cpp:165] Memory required for data: 1268225500\nI0817 16:28:31.832886 17344 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:28:31.832896 17344 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:28:31.832902 17344 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:28:31.832909 17344 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:28:31.832921 17344 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:28:31.832963 17344 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:28:31.832978 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.832983 17344 net.cpp:165] Memory required for data: 1270273500\nI0817 16:28:31.832988 17344 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:31.832996 17344 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:31.833001 17344 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:28:31.833009 17344 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:28:31.833019 17344 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:28:31.833073 17344 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:31.833092 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.833099 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.833103 17344 net.cpp:165] Memory required for data: 1274369500\nI0817 16:28:31.833109 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:28:31.833123 17344 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:28:31.833129 17344 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:28:31.833139 17344 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:28:31.834193 17344 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:28:31.834208 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.834213 17344 net.cpp:165] Memory required for data: 1276417500\nI0817 16:28:31.834223 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:28:31.834234 17344 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:28:31.834241 17344 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:28:31.834252 17344 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:28:31.834527 17344 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:28:31.834540 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.834545 17344 net.cpp:165] Memory required for data: 1278465500\nI0817 16:28:31.834555 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:28:31.834564 17344 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:28:31.834570 17344 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:28:31.834581 17344 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.834643 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:28:31.834815 17344 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:28:31.834828 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.834833 17344 net.cpp:165] Memory required for data: 1280513500\nI0817 16:28:31.834842 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:28:31.834853 17344 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:28:31.834859 17344 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:28:31.834867 17344 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:28:31.834877 17344 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:28:31.834883 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.834887 17344 net.cpp:165] Memory required for data: 1282561500\nI0817 16:28:31.834892 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:28:31.834905 17344 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:28:31.834911 17344 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:28:31.834923 17344 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:28:31.835965 17344 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:28:31.835980 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.835985 17344 net.cpp:165] Memory required for data: 1284609500\nI0817 16:28:31.835994 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:28:31.836004 17344 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:28:31.836010 17344 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:28:31.836020 17344 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:28:31.836293 17344 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:28:31.836308 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.836313 17344 net.cpp:165] Memory required for data: 1286657500\nI0817 16:28:31.836323 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:28:31.836333 17344 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:28:31.836338 17344 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:28:31.836346 17344 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:28:31.836405 17344 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:28:31.836567 17344 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:28:31.836580 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.836585 17344 net.cpp:165] Memory required for data: 1288705500\nI0817 16:28:31.836601 17344 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:28:31.836613 17344 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:28:31.836621 17344 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:28:31.836627 17344 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:28:31.836634 17344 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:28:31.836673 17344 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:28:31.836690 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.836695 17344 net.cpp:165] Memory required for data: 1290753500\nI0817 16:28:31.836700 17344 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:28:31.836707 17344 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:28:31.836714 17344 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:28:31.836720 17344 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:28:31.836730 17344 net.cpp:150] Setting up L3_b2_relu\nI0817 16:28:31.836737 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.836741 17344 net.cpp:165] Memory required for data: 1292801500\nI0817 16:28:31.836746 17344 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:31.836752 17344 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:31.836758 17344 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:28:31.836771 17344 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:28:31.836781 17344 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:28:31.836828 17344 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:31.836840 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.836846 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.836851 17344 net.cpp:165] Memory required for data: 1296897500\nI0817 16:28:31.836855 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:28:31.836870 17344 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:28:31.836876 17344 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:28:31.836885 17344 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:28:31.837934 17344 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:28:31.837949 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.837954 17344 net.cpp:165] Memory required for data: 1298945500\nI0817 16:28:31.837962 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:28:31.837975 17344 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:28:31.837981 17344 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:28:31.837990 17344 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:28:31.838261 17344 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:28:31.838274 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.838279 17344 net.cpp:165] Memory required for data: 1300993500\nI0817 16:28:31.838289 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:28:31.838301 17344 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:28:31.838307 17344 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:28:31.838315 17344 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.838373 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:28:31.838534 17344 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:28:31.838546 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.838551 17344 net.cpp:165] Memory required for data: 1303041500\nI0817 16:28:31.838560 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:28:31.838572 17344 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:28:31.838577 17344 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:28:31.838584 17344 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:28:31.838601 17344 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:28:31.838608 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.838613 17344 net.cpp:165] Memory required for data: 1305089500\nI0817 16:28:31.838618 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:28:31.838632 17344 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:28:31.838639 17344 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:28:31.838649 17344 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:28:31.839701 17344 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:28:31.839716 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.839721 17344 net.cpp:165] Memory required for data: 1307137500\nI0817 16:28:31.839730 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:28:31.839740 17344 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:28:31.839745 17344 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:28:31.839756 17344 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:28:31.840032 17344 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:28:31.840049 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.840054 17344 net.cpp:165] Memory required for data: 1309185500\nI0817 16:28:31.840065 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:28:31.840073 17344 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:28:31.840080 17344 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:28:31.840087 17344 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:28:31.840147 17344 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:28:31.840307 17344 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:28:31.840320 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.840325 17344 net.cpp:165] Memory required for data: 1311233500\nI0817 16:28:31.840334 17344 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:28:31.840346 17344 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:28:31.840353 17344 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:28:31.840359 17344 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:28:31.840368 17344 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:28:31.840404 17344 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:28:31.840415 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.840420 17344 net.cpp:165] Memory required for data: 1313281500\nI0817 16:28:31.840425 17344 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:28:31.840432 17344 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:28:31.840438 17344 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:28:31.840445 17344 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:28:31.840454 17344 net.cpp:150] Setting up L3_b3_relu\nI0817 16:28:31.840461 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.840466 17344 net.cpp:165] Memory required for data: 1315329500\nI0817 16:28:31.840471 17344 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:31.840477 17344 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:31.840482 17344 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:28:31.840492 17344 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:28:31.840502 17344 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:28:31.840549 17344 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:31.840560 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.840566 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.840571 17344 net.cpp:165] Memory required for data: 1319425500\nI0817 16:28:31.840576 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:28:31.840590 17344 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:28:31.840596 17344 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:28:31.840612 17344 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:28:31.841665 17344 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:28:31.841686 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.841691 17344 net.cpp:165] Memory required for data: 1321473500\nI0817 16:28:31.841701 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:28:31.841711 17344 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:28:31.841718 17344 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:28:31.841727 17344 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:28:31.841995 17344 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:28:31.842008 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.842013 17344 net.cpp:165] Memory required for data: 1323521500\nI0817 16:28:31.842023 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:28:31.842036 17344 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:28:31.842041 17344 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:28:31.842049 17344 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.842113 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:28:31.842273 17344 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:28:31.842285 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.842290 17344 net.cpp:165] Memory required for data: 1325569500\nI0817 16:28:31.842299 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:28:31.842310 17344 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:28:31.842316 17344 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:28:31.842324 17344 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:28:31.842332 17344 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:28:31.842339 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.842344 17344 net.cpp:165] Memory required for data: 1327617500\nI0817 16:28:31.842348 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:28:31.842362 17344 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:28:31.842368 17344 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:28:31.842378 17344 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:28:31.844385 17344 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:28:31.844403 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.844408 17344 net.cpp:165] Memory required for data: 1329665500\nI0817 16:28:31.844418 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:28:31.844430 17344 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:28:31.844437 17344 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:28:31.844446 17344 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:28:31.844725 17344 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:28:31.844738 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.844743 17344 net.cpp:165] Memory required for data: 1331713500\nI0817 16:28:31.844754 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:28:31.844765 17344 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:28:31.844772 17344 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:28:31.844780 17344 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:28:31.844844 17344 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:28:31.845005 17344 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:28:31.845018 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.845023 17344 net.cpp:165] Memory required for data: 1333761500\nI0817 16:28:31.845032 17344 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:28:31.845043 17344 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:28:31.845051 17344 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:28:31.845057 17344 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:28:31.845072 17344 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:28:31.845109 17344 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:28:31.845119 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.845124 17344 net.cpp:165] Memory required for data: 1335809500\nI0817 16:28:31.845129 17344 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:28:31.845136 17344 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:28:31.845142 17344 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:28:31.845152 17344 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:28:31.845162 17344 net.cpp:150] Setting up L3_b4_relu\nI0817 16:28:31.845168 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.845173 17344 net.cpp:165] Memory required for data: 1337857500\nI0817 16:28:31.845177 17344 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:31.845185 17344 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:31.845190 17344 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:28:31.845196 17344 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:28:31.845206 17344 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:28:31.845257 17344 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:31.845268 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.845274 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.845279 17344 net.cpp:165] Memory required for data: 1341953500\nI0817 16:28:31.845284 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:28:31.845299 17344 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:28:31.845304 17344 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:28:31.845314 17344 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:28:31.846343 17344 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:28:31.846359 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.846364 17344 net.cpp:165] Memory required for data: 1344001500\nI0817 16:28:31.846372 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:28:31.846385 17344 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:28:31.846391 17344 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:28:31.846400 17344 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:28:31.846673 17344 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:28:31.846693 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.846698 17344 net.cpp:165] Memory required for data: 1346049500\nI0817 16:28:31.846709 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:28:31.846716 17344 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:28:31.846724 17344 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:28:31.846731 17344 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.846793 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:28:31.846958 17344 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:28:31.846971 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.846976 17344 net.cpp:165] Memory required for data: 1348097500\nI0817 16:28:31.846985 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:28:31.846993 17344 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:28:31.846999 17344 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:28:31.847007 17344 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:28:31.847015 17344 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:28:31.847023 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.847028 17344 net.cpp:165] Memory required for data: 1350145500\nI0817 16:28:31.847031 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:28:31.847046 17344 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:28:31.847053 17344 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:28:31.847074 17344 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:28:31.848281 17344 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:28:31.848299 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.848304 17344 net.cpp:165] Memory required for data: 1352193500\nI0817 16:28:31.848312 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:28:31.848325 17344 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:28:31.848331 17344 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:28:31.848340 17344 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:28:31.848608 17344 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:28:31.848621 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.848626 17344 net.cpp:165] Memory required for data: 1354241500\nI0817 16:28:31.848636 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:28:31.848649 17344 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:28:31.848656 17344 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:28:31.848664 17344 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:28:31.848731 17344 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:28:31.848893 17344 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:28:31.848906 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.848912 17344 net.cpp:165] Memory required for data: 1356289500\nI0817 16:28:31.848920 17344 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:28:31.848932 17344 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:28:31.848938 17344 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:28:31.848945 17344 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:28:31.848956 17344 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:28:31.848989 17344 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:28:31.849001 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.849006 17344 net.cpp:165] Memory required for data: 1358337500\nI0817 16:28:31.849011 17344 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:28:31.849022 17344 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:28:31.849028 17344 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:28:31.849035 17344 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:28:31.849045 17344 net.cpp:150] Setting up L3_b5_relu\nI0817 16:28:31.849051 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.849056 17344 net.cpp:165] Memory required for data: 1360385500\nI0817 16:28:31.849061 17344 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:31.849067 17344 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:31.849072 17344 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:28:31.849079 17344 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:28:31.849089 17344 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:28:31.849138 17344 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:31.849149 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.849155 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.849160 17344 net.cpp:165] Memory required for data: 1364481500\nI0817 16:28:31.849165 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:28:31.849179 17344 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:28:31.849185 17344 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:28:31.849195 17344 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:28:31.850227 17344 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:28:31.850242 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.850247 17344 net.cpp:165] Memory required for data: 1366529500\nI0817 16:28:31.850263 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:28:31.850281 17344 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:28:31.850288 17344 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:28:31.850296 17344 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:28:31.850572 17344 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:28:31.850585 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.850590 17344 net.cpp:165] Memory required for data: 1368577500\nI0817 16:28:31.850600 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:28:31.850608 17344 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:28:31.850615 17344 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:28:31.850622 17344 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.850692 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:28:31.850858 17344 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:28:31.850870 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.850875 17344 net.cpp:165] Memory required for data: 1370625500\nI0817 16:28:31.850884 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:28:31.850893 17344 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:28:31.850899 17344 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:28:31.850909 17344 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:28:31.850919 17344 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:28:31.850925 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.850930 17344 net.cpp:165] Memory required for data: 1372673500\nI0817 16:28:31.850935 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:28:31.850949 17344 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:28:31.850955 17344 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:28:31.850962 17344 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:28:31.851999 17344 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:28:31.852015 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.852020 17344 net.cpp:165] Memory required for data: 1374721500\nI0817 16:28:31.852027 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:28:31.852039 17344 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:28:31.852046 17344 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:28:31.852054 17344 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:28:31.852327 17344 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:28:31.852341 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.852345 17344 net.cpp:165] Memory required for data: 1376769500\nI0817 16:28:31.852355 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:28:31.852367 17344 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:28:31.852373 17344 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:28:31.852380 17344 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:28:31.852443 17344 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:28:31.852607 17344 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:28:31.852619 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.852624 17344 net.cpp:165] Memory required for data: 1378817500\nI0817 16:28:31.852632 17344 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:28:31.852644 17344 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:28:31.852651 17344 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:28:31.852658 17344 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:28:31.852669 17344 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:28:31.852710 17344 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:28:31.852721 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.852726 17344 net.cpp:165] Memory required for data: 1380865500\nI0817 16:28:31.852731 17344 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:28:31.852748 17344 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:28:31.852756 17344 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:28:31.852762 17344 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:28:31.852772 17344 net.cpp:150] Setting up L3_b6_relu\nI0817 16:28:31.852779 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.852784 17344 net.cpp:165] Memory required for data: 1382913500\nI0817 16:28:31.852788 17344 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:31.852797 17344 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:31.852802 17344 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:28:31.852808 17344 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:28:31.852818 17344 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:28:31.852870 17344 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:31.852882 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.852888 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.852893 17344 net.cpp:165] Memory required for data: 1387009500\nI0817 16:28:31.852898 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:28:31.852911 17344 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:28:31.852918 17344 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:28:31.852927 17344 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:28:31.853986 17344 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:28:31.854002 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.854007 17344 net.cpp:165] Memory required for data: 1389057500\nI0817 16:28:31.854017 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:28:31.854029 17344 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:28:31.854037 17344 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:28:31.854048 17344 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:28:31.854318 17344 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:28:31.854331 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.854336 17344 net.cpp:165] Memory required for data: 1391105500\nI0817 16:28:31.854346 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:28:31.854354 17344 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:28:31.854360 17344 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:28:31.854372 17344 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.854431 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:28:31.854590 17344 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:28:31.854602 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.854607 17344 net.cpp:165] Memory required for data: 1393153500\nI0817 16:28:31.854616 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:28:31.854652 17344 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:28:31.854661 17344 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:28:31.854670 17344 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:28:31.854686 17344 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:28:31.854693 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.854698 17344 net.cpp:165] Memory required for data: 1395201500\nI0817 16:28:31.854704 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:28:31.854718 17344 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:28:31.854724 17344 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:28:31.854733 17344 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:28:31.855773 17344 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:28:31.855788 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.855793 17344 net.cpp:165] Memory required for data: 1397249500\nI0817 16:28:31.855809 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:28:31.855823 17344 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:28:31.855830 17344 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:28:31.855839 17344 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:28:31.856114 17344 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:28:31.856127 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.856132 17344 net.cpp:165] Memory required for data: 1399297500\nI0817 16:28:31.856142 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:28:31.856151 17344 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:28:31.856158 17344 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:28:31.856164 17344 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:28:31.856228 17344 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:28:31.856390 17344 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:28:31.856403 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.856408 17344 net.cpp:165] Memory required for data: 1401345500\nI0817 16:28:31.856417 17344 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:28:31.856426 17344 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:28:31.856432 17344 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:28:31.856439 17344 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:28:31.856451 17344 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:28:31.856484 17344 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:28:31.856497 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.856501 17344 net.cpp:165] Memory required for data: 1403393500\nI0817 16:28:31.856506 17344 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:28:31.856513 17344 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:28:31.856519 17344 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:28:31.856526 17344 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:28:31.856536 17344 net.cpp:150] Setting up L3_b7_relu\nI0817 16:28:31.856542 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.856547 17344 net.cpp:165] Memory required for data: 1405441500\nI0817 16:28:31.856551 17344 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:31.856561 17344 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:31.856566 17344 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:28:31.856575 17344 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:28:31.856583 17344 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:28:31.856633 17344 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:31.856645 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.856652 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.856655 17344 net.cpp:165] Memory required for data: 1409537500\nI0817 16:28:31.856660 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:28:31.856672 17344 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:28:31.856683 17344 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:28:31.856696 17344 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:28:31.858697 17344 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:28:31.858714 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.858721 17344 net.cpp:165] Memory required for data: 1411585500\nI0817 16:28:31.858729 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:28:31.858742 17344 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:28:31.858749 17344 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:28:31.858760 17344 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:28:31.859036 17344 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:28:31.859057 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.859062 17344 net.cpp:165] Memory required for data: 1413633500\nI0817 16:28:31.859072 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:28:31.859081 17344 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:28:31.859087 17344 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:28:31.859099 17344 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.859163 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:28:31.859328 17344 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:28:31.859340 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.859345 17344 net.cpp:165] Memory required for data: 1415681500\nI0817 16:28:31.859354 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:28:31.859362 17344 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:28:31.859369 17344 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:28:31.859378 17344 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:28:31.859388 17344 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:28:31.859395 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.859400 17344 net.cpp:165] Memory required for data: 1417729500\nI0817 16:28:31.859405 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:28:31.859418 17344 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:28:31.859426 17344 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:28:31.859433 17344 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:28:31.860463 17344 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:28:31.860486 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.860491 17344 net.cpp:165] Memory required for data: 1419777500\nI0817 16:28:31.860499 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:28:31.860508 17344 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:28:31.860514 17344 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:28:31.860525 17344 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:28:31.860807 17344 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:28:31.860821 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.860826 17344 net.cpp:165] Memory required for data: 1421825500\nI0817 16:28:31.860836 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:28:31.860848 17344 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:28:31.860854 17344 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:28:31.860862 17344 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:28:31.860921 17344 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:28:31.861083 17344 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:28:31.861094 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.861099 17344 net.cpp:165] Memory required for data: 1423873500\nI0817 16:28:31.861109 17344 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:28:31.861117 17344 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:28:31.861124 17344 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:28:31.861130 17344 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:28:31.861141 17344 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:28:31.861176 17344 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:28:31.861183 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.861188 17344 net.cpp:165] Memory required for data: 1425921500\nI0817 16:28:31.861193 17344 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:28:31.861203 17344 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:28:31.861210 17344 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:28:31.861217 17344 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:28:31.861227 17344 net.cpp:150] Setting up L3_b8_relu\nI0817 16:28:31.861233 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.861238 17344 net.cpp:165] Memory required for data: 1427969500\nI0817 16:28:31.861248 17344 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:31.861256 17344 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:31.861261 17344 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:28:31.861268 17344 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:28:31.861279 17344 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:28:31.861328 17344 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:31.861340 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.861347 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.861351 17344 net.cpp:165] Memory required for data: 1432065500\nI0817 16:28:31.861356 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:28:31.861371 17344 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:28:31.861377 17344 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:28:31.861385 17344 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:28:31.862413 17344 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:28:31.862428 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.862433 17344 net.cpp:165] Memory required for data: 1434113500\nI0817 16:28:31.862442 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:28:31.862454 17344 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:28:31.862460 17344 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:28:31.862471 17344 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:28:31.862753 17344 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:28:31.862767 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.862772 17344 net.cpp:165] Memory required for data: 1436161500\nI0817 16:28:31.862782 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:28:31.862792 17344 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:28:31.862797 17344 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:28:31.862807 17344 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.862869 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:28:31.863028 17344 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:28:31.863040 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.863045 17344 net.cpp:165] Memory required for data: 1438209500\nI0817 16:28:31.863054 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:28:31.863061 17344 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:28:31.863067 17344 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:28:31.863077 17344 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:28:31.863087 17344 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:28:31.863095 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.863098 17344 net.cpp:165] Memory required for data: 1440257500\nI0817 16:28:31.863103 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:28:31.863116 17344 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:28:31.863123 17344 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:28:31.863133 17344 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:28:31.864161 17344 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:28:31.864176 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.864181 17344 net.cpp:165] Memory required for data: 1442305500\nI0817 16:28:31.864189 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:28:31.864198 17344 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:28:31.864205 17344 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:28:31.864215 17344 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:28:31.864483 17344 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:28:31.864498 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.864511 17344 net.cpp:165] Memory required for data: 1444353500\nI0817 16:28:31.864521 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:28:31.864529 17344 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:28:31.864536 17344 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:28:31.864543 17344 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:28:31.864601 17344 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:28:31.864773 17344 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:28:31.864787 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.864792 17344 net.cpp:165] Memory required for data: 1446401500\nI0817 16:28:31.864801 17344 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:28:31.864814 17344 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:28:31.864821 17344 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:28:31.864828 17344 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:28:31.864836 17344 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:28:31.864874 17344 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:28:31.864886 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.864890 17344 net.cpp:165] Memory required for data: 1448449500\nI0817 16:28:31.864897 17344 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:28:31.864903 17344 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:28:31.864909 17344 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:28:31.864917 17344 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:28:31.864925 17344 net.cpp:150] Setting up L3_b9_relu\nI0817 16:28:31.864933 17344 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:31.864936 17344 net.cpp:165] Memory required for data: 1450497500\nI0817 16:28:31.864941 17344 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:28:31.864949 17344 net.cpp:100] Creating Layer post_pool\nI0817 16:28:31.864954 17344 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:28:31.864966 17344 net.cpp:408] post_pool -> post_pool\nI0817 16:28:31.865001 17344 net.cpp:150] Setting up post_pool\nI0817 16:28:31.865012 17344 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:28:31.865017 17344 net.cpp:165] Memory required for data: 1450529500\nI0817 16:28:31.865022 17344 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:28:31.865034 17344 net.cpp:100] Creating Layer post_FC\nI0817 16:28:31.865041 17344 net.cpp:434] post_FC <- post_pool\nI0817 16:28:31.865051 17344 net.cpp:408] post_FC -> post_FC_top\nI0817 16:28:31.865217 17344 net.cpp:150] Setting up post_FC\nI0817 16:28:31.865231 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:31.865236 17344 net.cpp:165] Memory required for data: 1450534500\nI0817 16:28:31.865244 17344 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:28:31.865252 17344 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:28:31.865257 17344 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:28:31.865268 17344 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:28:31.865278 17344 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:28:31.865331 17344 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:28:31.865342 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:31.865348 17344 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:31.865353 17344 net.cpp:165] Memory required for data: 1450544500\nI0817 16:28:31.865358 17344 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:28:31.865366 17344 net.cpp:100] Creating Layer accuracy\nI0817 16:28:31.865372 17344 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:28:31.865380 17344 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:28:31.865386 17344 net.cpp:408] accuracy -> accuracy\nI0817 16:28:31.865398 17344 net.cpp:150] Setting up accuracy\nI0817 16:28:31.865404 17344 net.cpp:157] Top shape: (1)\nI0817 16:28:31.865417 17344 net.cpp:165] Memory required for data: 1450544504\nI0817 16:28:31.865422 17344 layer_factory.hpp:77] Creating layer loss\nI0817 16:28:31.865429 17344 net.cpp:100] Creating Layer loss\nI0817 16:28:31.865435 17344 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:28:31.865442 17344 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:28:31.865452 17344 net.cpp:408] loss -> loss\nI0817 16:28:31.865464 17344 layer_factory.hpp:77] Creating layer loss\nI0817 16:28:31.865588 17344 net.cpp:150] Setting up loss\nI0817 16:28:31.865600 17344 net.cpp:157] Top shape: (1)\nI0817 16:28:31.865605 17344 net.cpp:160]     with loss weight 1\nI0817 16:28:31.865622 17344 net.cpp:165] Memory required for data: 1450544508\nI0817 16:28:31.865628 17344 net.cpp:226] loss needs backward computation.\nI0817 16:28:31.865634 17344 net.cpp:228] accuracy does not need backward computation.\nI0817 16:28:31.865640 17344 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:28:31.865645 17344 net.cpp:226] post_FC needs backward computation.\nI0817 16:28:31.865650 17344 net.cpp:226] post_pool needs backward computation.\nI0817 16:28:31.865655 17344 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:28:31.865659 17344 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:28:31.865665 17344 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:28:31.865669 17344 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:28:31.865674 17344 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:28:31.865686 17344 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:28:31.865691 17344 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:28:31.865696 17344 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:28:31.865701 17344 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:28:31.865707 17344 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:28:31.865712 17344 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:28:31.865717 17344 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:28:31.865722 17344 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:28:31.865727 17344 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:28:31.865732 17344 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:28:31.865737 17344 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:28:31.865742 17344 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:28:31.865747 17344 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:28:31.865752 17344 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:28:31.865757 17344 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:28:31.865762 17344 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:28:31.865767 17344 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:28:31.865773 17344 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:28:31.865778 17344 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:28:31.865787 17344 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:28:31.865792 17344 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:28:31.865797 17344 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:28:31.865802 17344 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:28:31.865806 17344 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:28:31.865813 17344 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:28:31.865818 17344 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:28:31.865823 17344 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:28:31.865828 17344 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:28:31.865833 17344 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:28:31.865844 17344 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:28:31.865850 17344 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:28:31.865855 17344 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:28:31.865860 17344 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:28:31.865865 17344 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:28:31.865870 17344 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:28:31.865875 17344 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:28:31.865880 17344 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:28:31.865886 17344 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:28:31.865891 17344 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:28:31.865896 17344 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:28:31.865901 17344 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:28:31.865906 17344 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:28:31.865911 17344 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:28:31.865916 17344 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:28:31.865921 17344 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:28:31.865926 17344 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:28:31.865932 17344 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:28:31.865937 17344 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:28:31.865942 17344 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:28:31.865948 17344 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:28:31.865953 17344 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:28:31.865958 17344 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:28:31.865962 17344 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:28:31.865968 17344 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:28:31.865973 17344 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:28:31.865978 17344 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:28:31.865983 17344 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:28:31.865988 17344 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:28:31.865993 17344 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:28:31.865999 17344 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:28:31.866004 17344 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:28:31.866008 17344 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:28:31.866014 17344 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:28:31.866019 17344 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:28:31.866024 17344 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:28:31.866029 17344 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:28:31.866034 17344 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:28:31.866040 17344 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:28:31.866046 17344 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:28:31.866051 17344 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:28:31.866056 17344 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:28:31.866065 17344 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:28:31.866070 17344 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:28:31.866075 17344 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:28:31.866080 17344 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:28:31.866086 17344 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:28:31.866097 17344 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:28:31.866102 17344 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:28:31.866108 17344 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:28:31.866114 17344 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:28:31.866119 17344 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:28:31.866124 17344 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:28:31.866129 17344 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:28:31.866134 17344 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:28:31.866140 17344 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:28:31.866145 17344 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:28:31.866150 17344 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:28:31.866156 17344 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:28:31.866161 17344 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:28:31.866166 17344 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:28:31.866173 17344 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:28:31.866178 17344 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:28:31.866183 17344 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:28:31.866189 17344 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:28:31.866194 17344 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:28:31.866199 17344 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:28:31.866204 17344 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:28:31.866209 17344 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:28:31.866214 17344 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:28:31.866220 17344 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:28:31.866225 17344 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:28:31.866231 17344 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:28:31.866236 17344 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:28:31.866241 17344 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:28:31.866246 17344 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:28:31.866251 17344 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:28:31.866257 17344 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:28:31.866262 17344 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:28:31.866267 17344 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:28:31.866272 17344 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:28:31.866278 17344 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:28:31.866283 17344 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:28:31.866289 17344 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:28:31.866294 17344 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:28:31.866299 17344 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:28:31.866304 17344 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:28:31.866309 17344 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:28:31.866315 17344 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:28:31.866320 17344 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:28:31.866325 17344 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:28:31.866331 17344 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:28:31.866338 17344 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:28:31.866343 17344 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:28:31.866353 17344 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:28:31.866358 17344 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:28:31.866364 17344 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:28:31.866369 17344 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:28:31.866374 17344 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:28:31.866379 17344 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:28:31.866384 17344 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:28:31.866390 17344 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:28:31.866396 17344 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:28:31.866401 17344 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:28:31.866406 17344 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:28:31.866411 17344 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:28:31.866417 17344 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:28:31.866422 17344 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:28:31.866428 17344 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:28:31.866437 17344 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:28:31.866443 17344 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:28:31.866449 17344 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:28:31.866454 17344 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:28:31.866461 17344 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:28:31.866466 17344 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:28:31.866472 17344 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:28:31.866477 17344 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:28:31.866482 17344 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:28:31.866487 17344 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:28:31.866493 17344 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:28:31.866498 17344 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:28:31.866504 17344 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:28:31.866510 17344 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:28:31.866515 17344 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:28:31.866521 17344 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:28:31.866526 17344 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:28:31.866533 17344 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:28:31.866538 17344 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:28:31.866544 17344 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:28:31.866549 17344 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:28:31.866554 17344 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:28:31.866560 17344 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:28:31.866565 17344 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:28:31.866570 17344 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:28:31.866576 17344 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:28:31.866581 17344 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:28:31.866586 17344 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:28:31.866592 17344 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:28:31.866598 17344 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:28:31.866603 17344 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:28:31.866611 17344 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:28:31.866619 17344 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:28:31.866626 17344 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:28:31.866632 17344 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:28:31.866637 17344 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:28:31.866643 17344 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:28:31.866649 17344 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:28:31.866654 17344 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:28:31.866660 17344 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:28:31.866665 17344 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:28:31.866670 17344 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:28:31.866683 17344 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:28:31.866689 17344 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:28:31.866695 17344 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:28:31.866701 17344 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:28:31.866708 17344 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:28:31.866713 17344 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:28:31.866719 17344 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:28:31.866724 17344 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:28:31.866729 17344 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:28:31.866735 17344 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:28:31.866741 17344 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:28:31.866747 17344 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:28:31.866752 17344 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:28:31.866758 17344 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:28:31.866765 17344 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:28:31.866770 17344 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:28:31.866775 17344 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:28:31.866780 17344 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:28:31.866786 17344 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:28:31.866792 17344 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:28:31.866798 17344 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:28:31.866803 17344 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:28:31.866809 17344 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:28:31.866816 17344 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:28:31.866822 17344 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:28:31.866827 17344 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:28:31.866833 17344 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:28:31.866839 17344 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:28:31.866844 17344 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:28:31.866850 17344 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:28:31.866856 17344 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:28:31.866863 17344 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:28:31.866868 17344 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:28:31.866873 17344 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:28:31.866879 17344 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:28:31.866885 17344 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:28:31.866891 17344 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:28:31.866896 17344 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:28:31.866907 17344 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:28:31.866914 17344 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:28:31.866919 17344 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:28:31.866925 17344 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:28:31.866931 17344 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:28:31.866937 17344 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:28:31.866943 17344 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:28:31.866950 17344 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:28:31.866955 17344 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:28:31.866961 17344 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:28:31.866966 17344 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:28:31.866971 17344 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:28:31.866977 17344 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:28:31.866983 17344 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:28:31.866988 17344 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:28:31.866994 17344 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:28:31.867000 17344 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:28:31.867007 17344 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:28:31.867012 17344 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:28:31.867017 17344 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:28:31.867022 17344 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:28:31.867028 17344 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:28:31.867034 17344 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:28:31.867039 17344 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:28:31.867046 17344 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:28:31.867053 17344 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:28:31.867058 17344 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:28:31.867063 17344 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:28:31.867069 17344 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:28:31.867074 17344 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:28:31.867080 17344 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:28:31.867086 17344 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:28:31.867092 17344 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:28:31.867099 17344 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:28:31.867103 17344 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:28:31.867110 17344 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:28:31.867115 17344 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:28:31.867121 17344 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:28:31.867130 17344 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:28:31.867136 17344 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:28:31.867141 17344 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:28:31.867147 17344 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:28:31.867153 17344 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:28:31.867158 17344 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:28:31.867164 17344 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:28:31.867172 17344 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:28:31.867177 17344 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:28:31.867188 17344 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:28:31.867194 17344 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:28:31.867200 17344 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:28:31.867207 17344 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:28:31.867211 17344 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:28:31.867218 17344 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:28:31.867223 17344 net.cpp:226] pre_relu needs backward computation.\nI0817 16:28:31.867228 17344 net.cpp:226] pre_scale needs backward computation.\nI0817 16:28:31.867233 17344 net.cpp:226] pre_bn needs backward computation.\nI0817 16:28:31.867239 17344 net.cpp:226] pre_conv needs backward computation.\nI0817 16:28:31.867245 17344 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:28:31.867252 17344 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:28:31.867256 17344 net.cpp:270] This network produces output accuracy\nI0817 16:28:31.867264 17344 net.cpp:270] This network produces output loss\nI0817 16:28:31.867588 17344 net.cpp:283] Network initialization done.\nI0817 16:28:31.868587 17344 solver.cpp:60] Solver scaffolding done.\nI0817 16:28:32.096266 17344 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:28:32.462072 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:32.462149 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:32.469110 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:32.693536 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:32.693655 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:32.729099 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:32.729207 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:33.187424 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:33.187479 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:33.195483 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:33.441906 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:33.442018 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:33.493685 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:33.493793 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:34.006250 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:34.006312 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:34.014741 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:34.291121 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:34.291254 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:34.363314 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:34.363445 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:34.447515 17344 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:28:34.932662 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:34.932739 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:34.942234 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:35.230468 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:35.230661 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:35.322640 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:35.322825 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:35.978947 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:35.979007 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:35.989825 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:36.306751 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:36.306968 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:36.420578 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:36.420792 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:37.135277 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:37.135345 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:37.146350 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:37.483019 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:37.483278 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:37.617419 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:37.617663 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:38.417013 17344 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:38.417078 17344 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:38.429313 17344 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:38.474267 17350 blocking_queue.cpp:50] Waiting for data\nI0817 16:28:38.528187 17372 blocking_queue.cpp:50] Waiting for data\nI0817 16:28:38.851897 17344 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:38.852171 17344 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:39.004619 17344 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:39.004884 17344 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:39.176342 17344 parallel.cpp:425] Starting Optimization\nI0817 16:28:39.177673 17344 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:28:39.177693 17344 solver.cpp:280] Learning Rate Policy: multistep\nI0817 16:28:39.181874 17344 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:30:00.187687 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:30:00.187985 17344 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:30:04.180085 17344 solver.cpp:228] Iteration 0, loss = 5.37523\nI0817 16:30:04.180124 17344 solver.cpp:244]     Train net output #0: accuracy = 0.112\nI0817 16:30:04.180140 17344 solver.cpp:244]     Train net output #1: loss = 5.37523 (* 1 = 5.37523 loss)\nI0817 16:30:04.180308 17344 sgd_solver.cpp:166] Iteration 0, lr = 0.0035\nI0817 16:32:21.196066 17344 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:33:41.659505 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21316\nI0817 16:33:41.659778 17344 solver.cpp:404]     Test net output #1: loss = 2.72322 (* 1 = 2.72322 loss)\nI0817 16:33:42.965890 17344 solver.cpp:228] Iteration 100, loss = 1.09424\nI0817 16:33:42.965925 17344 solver.cpp:244]     Train net output #0: accuracy = 0.608\nI0817 16:33:42.965940 17344 solver.cpp:244]     Train net output #1: loss = 1.09424 (* 1 = 1.09424 loss)\nI0817 16:33:43.069293 17344 sgd_solver.cpp:166] Iteration 100, lr = 0.0035\nI0817 16:35:59.716998 17344 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:37:20.049091 17344 solver.cpp:404]     Test net output #0: accuracy = 0.43708\nI0817 16:37:20.049352 17344 solver.cpp:404]     Test net output #1: loss = 1.83767 (* 1 = 1.83767 loss)\nI0817 16:37:21.355286 17344 solver.cpp:228] Iteration 200, loss = 0.773618\nI0817 16:37:21.355325 17344 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 16:37:21.355340 17344 solver.cpp:244]     Train net output #1: loss = 0.773618 (* 1 = 0.773618 loss)\nI0817 16:37:21.455905 17344 sgd_solver.cpp:166] Iteration 200, lr = 0.0035\nI0817 16:39:38.224401 17344 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:40:58.538489 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35448\nI0817 16:40:58.538733 17344 solver.cpp:404]     Test net output #1: loss = 2.29267 (* 1 = 2.29267 loss)\nI0817 16:40:59.844420 17344 solver.cpp:228] Iteration 300, loss = 0.658915\nI0817 16:40:59.844462 17344 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 16:40:59.844478 17344 solver.cpp:244]     Train net output #1: loss = 0.658915 (* 1 = 0.658915 loss)\nI0817 16:40:59.939013 17344 sgd_solver.cpp:166] Iteration 300, lr = 0.0035\nI0817 16:43:16.611513 17344 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:44:36.950932 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21608\nI0817 16:44:36.951195 17344 solver.cpp:404]     Test net output #1: loss = 5.23741 (* 1 = 5.23741 loss)\nI0817 16:44:38.257619 17344 solver.cpp:228] Iteration 400, loss = 0.548134\nI0817 16:44:38.257660 17344 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 16:44:38.257675 17344 solver.cpp:244]     Train net output #1: loss = 0.548134 (* 1 = 0.548134 loss)\nI0817 16:44:38.350256 17344 sgd_solver.cpp:166] Iteration 400, lr = 0.0035\nI0817 16:46:55.030285 17344 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:48:15.363375 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50412\nI0817 16:48:15.363632 17344 solver.cpp:404]     Test net output #1: loss = 2.07708 (* 1 = 2.07708 loss)\nI0817 16:48:16.669451 17344 solver.cpp:228] Iteration 500, loss = 0.498619\nI0817 16:48:16.669492 17344 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 16:48:16.669507 17344 solver.cpp:244]     Train net output #1: loss = 0.498619 (* 1 = 0.498619 loss)\nI0817 16:48:16.765346 17344 sgd_solver.cpp:166] Iteration 500, lr = 0.0035\nI0817 16:50:33.390431 17344 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:51:53.721653 17344 solver.cpp:404]     Test net output #0: accuracy = 0.40648\nI0817 16:51:53.721941 17344 solver.cpp:404]     Test net output #1: loss = 2.87681 (* 1 = 2.87681 loss)\nI0817 16:51:55.028084 17344 solver.cpp:228] Iteration 600, loss = 0.373111\nI0817 16:51:55.028127 17344 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 16:51:55.028142 17344 solver.cpp:244]     Train net output #1: loss = 0.373111 (* 1 = 0.373111 loss)\nI0817 16:51:55.123486 17344 sgd_solver.cpp:166] Iteration 600, lr = 0.0035\nI0817 16:54:11.700883 17344 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:55:32.058159 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28804\nI0817 16:55:32.058421 17344 solver.cpp:404]     Test net output #1: loss = 4.10924 (* 1 = 4.10924 loss)\nI0817 16:55:33.364704 17344 solver.cpp:228] Iteration 700, loss = 0.400322\nI0817 16:55:33.364753 17344 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 16:55:33.364769 17344 solver.cpp:244]     Train net output #1: loss = 0.400322 (* 1 = 0.400322 loss)\nI0817 16:55:33.463815 17344 sgd_solver.cpp:166] Iteration 700, lr = 0.0035\nI0817 16:57:50.036481 17344 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:59:10.379658 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32268\nI0817 16:59:10.379936 17344 solver.cpp:404]     Test net output #1: loss = 4.36484 (* 1 = 4.36484 loss)\nI0817 16:59:11.685591 17344 solver.cpp:228] Iteration 800, loss = 0.335632\nI0817 16:59:11.685632 17344 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 16:59:11.685648 17344 solver.cpp:244]     Train net output #1: loss = 0.335632 (* 1 = 0.335632 loss)\nI0817 16:59:11.783869 17344 sgd_solver.cpp:166] Iteration 800, lr = 0.0035\nI0817 17:01:28.369104 17344 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 17:02:48.711009 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24496\nI0817 17:02:48.711269 17344 solver.cpp:404]     Test net output #1: loss = 4.89038 (* 1 = 4.89038 loss)\nI0817 17:02:50.017297 17344 solver.cpp:228] Iteration 900, loss = 0.206424\nI0817 17:02:50.017339 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:02:50.017355 17344 solver.cpp:244]     Train net output #1: loss = 0.206424 (* 1 = 0.206424 loss)\nI0817 17:02:50.118394 17344 sgd_solver.cpp:166] Iteration 900, lr = 0.0035\nI0817 17:05:06.787665 17344 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 17:06:27.115995 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24744\nI0817 17:06:27.116228 17344 solver.cpp:404]     Test net output #1: loss = 5.261 (* 1 = 5.261 loss)\nI0817 17:06:28.421936 17344 solver.cpp:228] Iteration 1000, loss = 0.25802\nI0817 17:06:28.421978 17344 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:06:28.421993 17344 solver.cpp:244]     Train net output #1: loss = 0.25802 (* 1 = 0.25802 loss)\nI0817 17:06:28.529928 17344 sgd_solver.cpp:166] Iteration 1000, lr = 0.0035\nI0817 17:08:45.160326 17344 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 17:10:05.485200 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30868\nI0817 17:10:05.485448 17344 solver.cpp:404]     Test net output #1: loss = 4.00648 (* 1 = 4.00648 loss)\nI0817 17:10:06.791074 17344 solver.cpp:228] Iteration 1100, loss = 0.226215\nI0817 17:10:06.791113 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:10:06.791129 17344 solver.cpp:244]     Train net output #1: loss = 0.226215 (* 1 = 0.226215 loss)\nI0817 17:10:06.884966 17344 sgd_solver.cpp:166] Iteration 1100, lr = 0.0035\nI0817 17:12:23.510802 17344 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 17:13:43.965200 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22796\nI0817 17:13:43.965435 17344 solver.cpp:404]     Test net output #1: loss = 5.04911 (* 1 = 5.04911 loss)\nI0817 17:13:45.271112 17344 solver.cpp:228] Iteration 1200, loss = 0.263101\nI0817 17:13:45.271153 17344 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:13:45.271169 17344 solver.cpp:244]     Train net output #1: loss = 0.263101 (* 1 = 0.263101 loss)\nI0817 17:13:45.373548 17344 sgd_solver.cpp:166] Iteration 1200, lr = 0.0035\nI0817 17:16:01.916391 17344 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 17:17:22.373674 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26196\nI0817 17:17:22.373920 17344 solver.cpp:404]     Test net output #1: loss = 3.3227 (* 1 = 3.3227 loss)\nI0817 17:17:23.679692 17344 solver.cpp:228] Iteration 1300, loss = 0.274825\nI0817 17:17:23.679733 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:17:23.679749 17344 solver.cpp:244]     Train net output #1: loss = 0.274825 (* 1 = 0.274825 loss)\nI0817 17:17:23.777591 17344 sgd_solver.cpp:166] Iteration 1300, lr = 0.0035\nI0817 17:19:40.250509 17344 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:21:00.708992 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2606\nI0817 17:21:00.709245 17344 solver.cpp:404]     Test net output #1: loss = 4.16586 (* 1 = 4.16586 loss)\nI0817 17:21:02.015202 17344 solver.cpp:228] Iteration 1400, loss = 0.321468\nI0817 17:21:02.015239 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:21:02.015254 17344 solver.cpp:244]     Train net output #1: loss = 0.321468 (* 1 = 0.321468 loss)\nI0817 17:21:02.106828 17344 sgd_solver.cpp:166] Iteration 1400, lr = 0.0035\nI0817 17:23:18.623668 17344 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:24:39.085276 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28692\nI0817 17:24:39.085538 17344 solver.cpp:404]     Test net output #1: loss = 3.45777 (* 1 = 3.45777 loss)\nI0817 17:24:40.391333 17344 solver.cpp:228] Iteration 1500, loss = 0.165389\nI0817 17:24:40.391372 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:24:40.391388 17344 solver.cpp:244]     Train net output #1: loss = 0.165389 (* 1 = 0.165389 loss)\nI0817 17:24:40.485303 17344 sgd_solver.cpp:166] Iteration 1500, lr = 0.0035\nI0817 17:26:56.964732 17344 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:28:17.411725 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16112\nI0817 17:28:17.411996 17344 solver.cpp:404]     Test net output #1: loss = 4.42588 (* 1 = 4.42588 loss)\nI0817 17:28:18.717329 17344 solver.cpp:228] Iteration 1600, loss = 0.183875\nI0817 17:28:18.717370 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:28:18.717384 17344 solver.cpp:244]     Train net output #1: loss = 0.183875 (* 1 = 0.183875 loss)\nI0817 17:28:18.817013 17344 sgd_solver.cpp:166] Iteration 1600, lr = 0.0035\nI0817 17:30:35.308480 17344 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:31:55.746912 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1828\nI0817 17:31:55.747102 17344 solver.cpp:404]     Test net output #1: loss = 5.13626 (* 1 = 5.13626 loss)\nI0817 17:31:57.052733 17344 solver.cpp:228] Iteration 1700, loss = 0.167872\nI0817 17:31:57.052773 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:31:57.052793 17344 solver.cpp:244]     Train net output #1: loss = 0.167872 (* 1 = 0.167872 loss)\nI0817 17:31:57.146080 17344 sgd_solver.cpp:166] Iteration 1700, lr = 0.0035\nI0817 17:34:13.603772 17344 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:35:34.034579 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17916\nI0817 17:35:34.034832 17344 solver.cpp:404]     Test net output #1: loss = 4.18906 (* 1 = 4.18906 loss)\nI0817 17:35:35.340575 17344 solver.cpp:228] Iteration 1800, loss = 0.189084\nI0817 17:35:35.340615 17344 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:35:35.340631 17344 solver.cpp:244]     Train net output #1: loss = 0.189084 (* 1 = 0.189084 loss)\nI0817 17:35:35.432535 17344 sgd_solver.cpp:166] Iteration 1800, lr = 0.0035\nI0817 17:37:51.954664 17344 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:39:12.386375 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15328\nI0817 17:39:12.386584 17344 solver.cpp:404]     Test net output #1: loss = 5.11118 (* 1 = 5.11118 loss)\nI0817 17:39:13.691989 17344 solver.cpp:228] Iteration 1900, loss = 0.21197\nI0817 17:39:13.692031 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:39:13.692047 17344 solver.cpp:244]     Train net output #1: loss = 0.21197 (* 1 = 0.21197 loss)\nI0817 17:39:13.785300 17344 sgd_solver.cpp:166] Iteration 1900, lr = 0.0035\nI0817 17:41:30.218291 17344 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:42:50.651170 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27568\nI0817 17:42:50.651370 17344 solver.cpp:404]     Test net output #1: loss = 3.32202 (* 1 = 3.32202 loss)\nI0817 17:42:51.956887 17344 solver.cpp:228] Iteration 2000, loss = 0.147139\nI0817 17:42:51.956928 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:42:51.956943 17344 solver.cpp:244]     Train net output #1: loss = 0.147139 (* 1 = 0.147139 loss)\nI0817 17:42:52.048614 17344 sgd_solver.cpp:166] Iteration 2000, lr = 0.0035\nI0817 17:45:08.419706 17344 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:46:28.843032 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22644\nI0817 17:46:28.843255 17344 solver.cpp:404]     Test net output #1: loss = 4.50972 (* 1 = 4.50972 loss)\nI0817 17:46:30.148831 17344 solver.cpp:228] Iteration 2100, loss = 0.139751\nI0817 17:46:30.148874 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:46:30.148890 17344 solver.cpp:244]     Train net output #1: loss = 0.139751 (* 1 = 0.139751 loss)\nI0817 17:46:30.243686 17344 sgd_solver.cpp:166] Iteration 2100, lr = 0.0035\nI0817 17:48:46.606717 17344 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:50:07.049479 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32216\nI0817 17:50:07.049713 17344 solver.cpp:404]     Test net output #1: loss = 2.82432 (* 1 = 2.82432 loss)\nI0817 17:50:08.354988 17344 solver.cpp:228] Iteration 2200, loss = 0.174578\nI0817 17:50:08.355031 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:50:08.355047 17344 solver.cpp:244]     Train net output #1: loss = 0.174578 (* 1 = 0.174578 loss)\nI0817 17:50:08.454040 17344 sgd_solver.cpp:166] Iteration 2200, lr = 0.0035\nI0817 17:52:24.781679 17344 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:53:45.225555 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27736\nI0817 17:53:45.225749 17344 solver.cpp:404]     Test net output #1: loss = 3.03978 (* 1 = 3.03978 loss)\nI0817 17:53:46.531378 17344 solver.cpp:228] Iteration 2300, loss = 0.173107\nI0817 17:53:46.531419 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:53:46.531435 17344 solver.cpp:244]     Train net output #1: loss = 0.173107 (* 1 = 0.173107 loss)\nI0817 17:53:46.623059 17344 sgd_solver.cpp:166] Iteration 2300, lr = 0.0035\nI0817 17:56:03.070798 17344 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:57:23.506224 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23472\nI0817 17:57:23.506428 17344 solver.cpp:404]     Test net output #1: loss = 3.94939 (* 1 = 3.94939 loss)\nI0817 17:57:24.812166 17344 solver.cpp:228] Iteration 2400, loss = 0.1692\nI0817 17:57:24.812207 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:57:24.812223 17344 solver.cpp:244]     Train net output #1: loss = 0.1692 (* 1 = 0.1692 loss)\nI0817 17:57:24.906585 17344 sgd_solver.cpp:166] Iteration 2400, lr = 0.0035\nI0817 17:59:41.363674 17344 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 18:01:01.798319 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1828\nI0817 18:01:01.798560 17344 solver.cpp:404]     Test net output #1: loss = 5.06945 (* 1 = 5.06945 loss)\nI0817 18:01:03.103577 17344 solver.cpp:228] Iteration 2500, loss = 0.119131\nI0817 18:01:03.103621 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:01:03.103636 17344 solver.cpp:244]     Train net output #1: loss = 0.119131 (* 1 = 0.119131 loss)\nI0817 18:01:03.201973 17344 sgd_solver.cpp:166] Iteration 2500, lr = 0.0035\nI0817 18:03:19.506057 17344 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 18:04:39.944581 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2104\nI0817 18:04:39.944772 17344 solver.cpp:404]     Test net output #1: loss = 4.27436 (* 1 = 4.27436 loss)\nI0817 18:04:41.250310 17344 solver.cpp:228] Iteration 2600, loss = 0.156368\nI0817 18:04:41.250350 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:04:41.250366 17344 solver.cpp:244]     Train net output #1: loss = 0.156368 (* 1 = 0.156368 loss)\nI0817 18:04:41.343014 17344 sgd_solver.cpp:166] Iteration 2600, lr = 0.0035\nI0817 18:06:57.858508 17344 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 18:08:18.295588 17344 solver.cpp:404]     Test net output #0: accuracy = 0.13716\nI0817 18:08:18.295811 17344 solver.cpp:404]     Test net output #1: loss = 5.05374 (* 1 = 5.05374 loss)\nI0817 18:08:19.601341 17344 solver.cpp:228] Iteration 2700, loss = 0.11895\nI0817 18:08:19.601384 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:08:19.601400 17344 solver.cpp:244]     Train net output #1: loss = 0.11895 (* 1 = 0.11895 loss)\nI0817 18:08:19.695708 17344 sgd_solver.cpp:166] Iteration 2700, lr = 0.0035\nI0817 18:10:36.070994 17344 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 18:11:56.506690 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17884\nI0817 18:11:56.506927 17344 solver.cpp:404]     Test net output #1: loss = 4.15747 (* 1 = 4.15747 loss)\nI0817 18:11:57.812214 17344 solver.cpp:228] Iteration 2800, loss = 0.0800687\nI0817 18:11:57.812254 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:11:57.812270 17344 solver.cpp:244]     Train net output #1: loss = 0.0800687 (* 1 = 0.0800687 loss)\nI0817 18:11:57.911671 17344 sgd_solver.cpp:166] Iteration 2800, lr = 0.0035\nI0817 18:14:14.303534 17344 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 18:15:34.748170 17344 solver.cpp:404]     Test net output #0: accuracy = 0.17532\nI0817 18:15:34.748395 17344 solver.cpp:404]     Test net output #1: loss = 4.04596 (* 1 = 4.04596 loss)\nI0817 18:15:36.053529 17344 solver.cpp:228] Iteration 2900, loss = 0.172446\nI0817 18:15:36.053570 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:15:36.053586 17344 solver.cpp:244]     Train net output #1: loss = 0.172446 (* 1 = 0.172446 loss)\nI0817 18:15:36.151716 17344 sgd_solver.cpp:166] Iteration 2900, lr = 0.0035\nI0817 18:17:52.704133 17344 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 18:19:13.141376 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3232\nI0817 18:19:13.141618 17344 solver.cpp:404]     Test net output #1: loss = 2.94632 (* 1 = 2.94632 loss)\nI0817 18:19:14.447192 17344 solver.cpp:228] Iteration 3000, loss = 0.0864478\nI0817 18:19:14.447232 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:19:14.447248 17344 solver.cpp:244]     Train net output #1: loss = 0.0864478 (* 1 = 0.0864478 loss)\nI0817 18:19:14.545816 17344 sgd_solver.cpp:166] Iteration 3000, lr = 0.0035\nI0817 18:21:31.120860 17344 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:22:51.550961 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2906\nI0817 18:22:51.551204 17344 solver.cpp:404]     Test net output #1: loss = 3.18708 (* 1 = 3.18708 loss)\nI0817 18:22:52.856665 17344 solver.cpp:228] Iteration 3100, loss = 0.134259\nI0817 18:22:52.856706 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:22:52.856722 17344 solver.cpp:244]     Train net output #1: loss = 0.134259 (* 1 = 0.134259 loss)\nI0817 18:22:52.957249 17344 sgd_solver.cpp:166] Iteration 3100, lr = 0.0035\nI0817 18:25:09.473628 17344 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:26:29.914621 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32308\nI0817 18:26:29.914849 17344 solver.cpp:404]     Test net output #1: loss = 2.76997 (* 1 = 2.76997 loss)\nI0817 18:26:31.220490 17344 solver.cpp:228] Iteration 3200, loss = 0.172652\nI0817 18:26:31.220531 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:26:31.220547 17344 solver.cpp:244]     Train net output #1: loss = 0.172652 (* 1 = 0.172652 loss)\nI0817 18:26:31.313748 17344 sgd_solver.cpp:166] Iteration 3200, lr = 0.0035\nI0817 18:28:47.822983 17344 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:30:08.262734 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32536\nI0817 18:30:08.262965 17344 solver.cpp:404]     Test net output #1: loss = 2.5829 (* 1 = 2.5829 loss)\nI0817 18:30:09.568094 17344 solver.cpp:228] Iteration 3300, loss = 0.158911\nI0817 18:30:09.568136 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:30:09.568159 17344 solver.cpp:244]     Train net output #1: loss = 0.158911 (* 1 = 0.158911 loss)\nI0817 18:30:09.664252 17344 sgd_solver.cpp:166] Iteration 3300, lr = 0.0035\nI0817 18:32:26.127650 17344 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:33:46.557626 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25736\nI0817 18:33:46.557868 17344 solver.cpp:404]     Test net output #1: loss = 3.53029 (* 1 = 3.53029 loss)\nI0817 18:33:47.863661 17344 solver.cpp:228] Iteration 3400, loss = 0.126236\nI0817 18:33:47.863703 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:33:47.863718 17344 solver.cpp:244]     Train net output #1: loss = 0.126236 (* 1 = 0.126236 loss)\nI0817 18:33:47.955298 17344 sgd_solver.cpp:166] Iteration 3400, lr = 0.0035\nI0817 18:36:04.437958 17344 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:37:24.866217 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26968\nI0817 18:37:24.866462 17344 solver.cpp:404]     Test net output #1: loss = 3.59176 (* 1 = 3.59176 loss)\nI0817 18:37:26.172935 17344 solver.cpp:228] Iteration 3500, loss = 0.118885\nI0817 18:37:26.172976 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:37:26.172997 17344 solver.cpp:244]     Train net output #1: loss = 0.118885 (* 1 = 0.118885 loss)\nI0817 18:37:26.270969 17344 sgd_solver.cpp:166] Iteration 3500, lr = 0.0035\nI0817 18:39:42.875592 17344 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:41:03.307510 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26052\nI0817 18:41:03.307760 17344 solver.cpp:404]     Test net output #1: loss = 3.57941 (* 1 = 3.57941 loss)\nI0817 18:41:04.614187 17344 solver.cpp:228] Iteration 3600, loss = 0.0569366\nI0817 18:41:04.614228 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:41:04.614243 17344 solver.cpp:244]     Train net output #1: loss = 0.0569365 (* 1 = 0.0569365 loss)\nI0817 18:41:04.708299 17344 sgd_solver.cpp:166] Iteration 3600, lr = 0.0035\nI0817 18:43:21.246924 17344 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:44:41.681327 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3076\nI0817 18:44:41.681555 17344 solver.cpp:404]     Test net output #1: loss = 2.8081 (* 1 = 2.8081 loss)\nI0817 18:44:42.988513 17344 solver.cpp:228] Iteration 3700, loss = 0.0988409\nI0817 18:44:42.988557 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:44:42.988574 17344 solver.cpp:244]     Train net output #1: loss = 0.0988409 (* 1 = 0.0988409 loss)\nI0817 18:44:43.084415 17344 sgd_solver.cpp:166] Iteration 3700, lr = 0.0035\nI0817 18:47:00.241912 17344 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:48:20.676112 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4456\nI0817 18:48:20.676358 17344 solver.cpp:404]     Test net output #1: loss = 2.03061 (* 1 = 2.03061 loss)\nI0817 18:48:21.982866 17344 solver.cpp:228] Iteration 3800, loss = 0.16873\nI0817 18:48:21.982905 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:48:21.982920 17344 solver.cpp:244]     Train net output #1: loss = 0.16873 (* 1 = 0.16873 loss)\nI0817 18:48:22.073206 17344 sgd_solver.cpp:166] Iteration 3800, lr = 0.0035\nI0817 18:50:39.231889 17344 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:51:59.662120 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29868\nI0817 18:51:59.662340 17344 solver.cpp:404]     Test net output #1: loss = 3.47347 (* 1 = 3.47347 loss)\nI0817 18:52:00.969123 17344 solver.cpp:228] Iteration 3900, loss = 0.0681398\nI0817 18:52:00.969163 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:52:00.969178 17344 solver.cpp:244]     Train net output #1: loss = 0.0681398 (* 1 = 0.0681398 loss)\nI0817 18:52:01.065333 17344 sgd_solver.cpp:166] Iteration 3900, lr = 0.0035\nI0817 18:54:18.187276 17344 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:55:38.509244 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3448\nI0817 18:55:38.509486 17344 solver.cpp:404]     Test net output #1: loss = 2.68828 (* 1 = 2.68828 loss)\nI0817 18:55:39.815806 17344 solver.cpp:228] Iteration 4000, loss = 0.0633444\nI0817 18:55:39.815846 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:55:39.815862 17344 solver.cpp:244]     Train net output #1: loss = 0.0633444 (* 1 = 0.0633444 loss)\nI0817 18:55:39.904829 17344 sgd_solver.cpp:166] Iteration 4000, lr = 0.0035\nI0817 18:57:57.068231 17344 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:59:17.379196 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35416\nI0817 18:59:17.379449 17344 solver.cpp:404]     Test net output #1: loss = 3.31521 (* 1 = 3.31521 loss)\nI0817 18:59:18.685472 17344 solver.cpp:228] Iteration 4100, loss = 0.153743\nI0817 18:59:18.685511 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:59:18.685526 17344 solver.cpp:244]     Train net output #1: loss = 0.153743 (* 1 = 0.153743 loss)\nI0817 18:59:18.778373 17344 sgd_solver.cpp:166] Iteration 4100, lr = 0.0035\nI0817 19:01:36.042723 17344 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 19:02:56.370849 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32112\nI0817 19:02:56.371104 17344 solver.cpp:404]     Test net output #1: loss = 3.04822 (* 1 = 3.04822 loss)\nI0817 19:02:57.677179 17344 solver.cpp:228] Iteration 4200, loss = 0.090448\nI0817 19:02:57.677218 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:02:57.677234 17344 solver.cpp:244]     Train net output #1: loss = 0.0904479 (* 1 = 0.0904479 loss)\nI0817 19:02:57.772424 17344 sgd_solver.cpp:166] Iteration 4200, lr = 0.0035\nI0817 19:05:14.936569 17344 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 19:06:35.255719 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29884\nI0817 19:06:35.255945 17344 solver.cpp:404]     Test net output #1: loss = 3.57193 (* 1 = 3.57193 loss)\nI0817 19:06:36.562911 17344 solver.cpp:228] Iteration 4300, loss = 0.103411\nI0817 19:06:36.562955 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:06:36.562979 17344 solver.cpp:244]     Train net output #1: loss = 0.103411 (* 1 = 0.103411 loss)\nI0817 19:06:36.656244 17344 sgd_solver.cpp:166] Iteration 4300, lr = 0.0035\nI0817 19:08:53.800719 17344 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 19:10:14.121181 17344 solver.cpp:404]     Test net output #0: accuracy = 0.38296\nI0817 19:10:14.121425 17344 solver.cpp:404]     Test net output #1: loss = 2.16462 (* 1 = 2.16462 loss)\nI0817 19:10:15.427033 17344 solver.cpp:228] Iteration 4400, loss = 0.0944362\nI0817 19:10:15.427078 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:10:15.427101 17344 solver.cpp:244]     Train net output #1: loss = 0.0944361 (* 1 = 0.0944361 loss)\nI0817 19:10:15.524152 17344 sgd_solver.cpp:166] Iteration 4400, lr = 0.0035\nI0817 19:12:32.813091 17344 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 19:13:53.143375 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50436\nI0817 19:13:53.143628 17344 solver.cpp:404]     Test net output #1: loss = 1.89431 (* 1 = 1.89431 loss)\nI0817 19:13:54.449640 17344 solver.cpp:228] Iteration 4500, loss = 0.160522\nI0817 19:13:54.449681 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:13:54.449697 17344 solver.cpp:244]     Train net output #1: loss = 0.160522 (* 1 = 0.160522 loss)\nI0817 19:13:54.543735 17344 sgd_solver.cpp:166] Iteration 4500, lr = 0.0035\nI0817 19:16:11.763819 17344 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 19:17:32.104878 17344 solver.cpp:404]     Test net output #0: accuracy = 0.39272\nI0817 19:17:32.105103 17344 solver.cpp:404]     Test net output #1: loss = 2.42448 (* 1 = 2.42448 loss)\nI0817 19:17:33.410868 17344 solver.cpp:228] Iteration 4600, loss = 0.096158\nI0817 19:17:33.410908 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:17:33.410924 17344 solver.cpp:244]     Train net output #1: loss = 0.0961579 (* 1 = 0.0961579 loss)\nI0817 19:17:33.502856 17344 sgd_solver.cpp:166] Iteration 4600, lr = 0.0035\nI0817 19:19:50.630690 17344 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:21:10.961787 17344 solver.cpp:404]     Test net output #0: accuracy = 0.345\nI0817 19:21:10.962028 17344 solver.cpp:404]     Test net output #1: loss = 2.77843 (* 1 = 2.77843 loss)\nI0817 19:21:12.267377 17344 solver.cpp:228] Iteration 4700, loss = 0.0720923\nI0817 19:21:12.267418 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:21:12.267436 17344 solver.cpp:244]     Train net output #1: loss = 0.0720922 (* 1 = 0.0720922 loss)\nI0817 19:21:12.362370 17344 sgd_solver.cpp:166] Iteration 4700, lr = 0.0035\nI0817 19:23:29.472684 17344 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:24:49.812260 17344 solver.cpp:404]     Test net output #0: accuracy = 0.43748\nI0817 19:24:49.812466 17344 solver.cpp:404]     Test net output #1: loss = 2.1785 (* 1 = 2.1785 loss)\nI0817 19:24:51.117699 17344 solver.cpp:228] Iteration 4800, loss = 0.14133\nI0817 19:24:51.117748 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:24:51.117764 17344 solver.cpp:244]     Train net output #1: loss = 0.14133 (* 1 = 0.14133 loss)\nI0817 19:24:51.214500 17344 sgd_solver.cpp:166] Iteration 4800, lr = 0.0035\nI0817 19:27:08.353072 17344 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:28:28.789463 17344 solver.cpp:404]     Test net output #0: accuracy = 0.54312\nI0817 19:28:28.789692 17344 solver.cpp:404]     Test net output #1: loss = 1.64996 (* 1 = 1.64996 loss)\nI0817 19:28:30.095480 17344 solver.cpp:228] Iteration 4900, loss = 0.0444829\nI0817 19:28:30.095522 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:28:30.095538 17344 solver.cpp:244]     Train net output #1: loss = 0.0444828 (* 1 = 0.0444828 loss)\nI0817 19:28:30.190153 17344 sgd_solver.cpp:166] Iteration 4900, lr = 0.0035\nI0817 19:30:47.768462 17344 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:32:08.210440 17344 solver.cpp:404]     Test net output #0: accuracy = 0.49432\nI0817 19:32:08.210686 17344 solver.cpp:404]     Test net output #1: loss = 2.07209 (* 1 = 2.07209 loss)\nI0817 19:32:09.516371 17344 solver.cpp:228] Iteration 5000, loss = 0.046329\nI0817 19:32:09.516414 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:32:09.516430 17344 solver.cpp:244]     Train net output #1: loss = 0.046329 (* 1 = 0.046329 loss)\nI0817 19:32:09.611208 17344 sgd_solver.cpp:166] Iteration 5000, lr = 0.0035\nI0817 19:34:26.731758 17344 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:35:47.170007 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56848\nI0817 19:35:47.170248 17344 solver.cpp:404]     Test net output #1: loss = 1.66344 (* 1 = 1.66344 loss)\nI0817 19:35:48.476125 17344 solver.cpp:228] Iteration 5100, loss = 0.0664868\nI0817 19:35:48.476167 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:35:48.476183 17344 solver.cpp:244]     Train net output #1: loss = 0.0664867 (* 1 = 0.0664867 loss)\nI0817 19:35:48.567828 17344 sgd_solver.cpp:166] Iteration 5100, lr = 0.0035\nI0817 19:38:05.646399 17344 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:39:26.092365 17344 solver.cpp:404]     Test net output #0: accuracy = 0.39924\nI0817 19:39:26.092617 17344 solver.cpp:404]     Test net output #1: loss = 2.5271 (* 1 = 2.5271 loss)\nI0817 19:39:27.398732 17344 solver.cpp:228] Iteration 5200, loss = 0.0754271\nI0817 19:39:27.398774 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:39:27.398792 17344 solver.cpp:244]     Train net output #1: loss = 0.075427 (* 1 = 0.075427 loss)\nI0817 19:39:27.488283 17344 sgd_solver.cpp:166] Iteration 5200, lr = 0.0035\nI0817 19:41:44.690462 17344 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:43:05.128453 17344 solver.cpp:404]     Test net output #0: accuracy = 0.41608\nI0817 19:43:05.128664 17344 solver.cpp:404]     Test net output #1: loss = 2.47385 (* 1 = 2.47385 loss)\nI0817 19:43:06.433688 17344 solver.cpp:228] Iteration 5300, loss = 0.101771\nI0817 19:43:06.433735 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:43:06.433751 17344 solver.cpp:244]     Train net output #1: loss = 0.101771 (* 1 = 0.101771 loss)\nI0817 19:43:06.527756 17344 sgd_solver.cpp:166] Iteration 5300, lr = 0.0035\nI0817 19:45:23.702878 17344 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:46:44.148787 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56092\nI0817 19:46:44.148988 17344 solver.cpp:404]     Test net output #1: loss = 1.78175 (* 1 = 1.78175 loss)\nI0817 19:46:45.454779 17344 solver.cpp:228] Iteration 5400, loss = 0.0601059\nI0817 19:46:45.454820 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:46:45.454836 17344 solver.cpp:244]     Train net output #1: loss = 0.0601058 (* 1 = 0.0601058 loss)\nI0817 19:46:45.550130 17344 sgd_solver.cpp:166] Iteration 5400, lr = 0.0035\nI0817 19:49:02.776919 17344 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:50:23.205490 17344 solver.cpp:404]     Test net output #0: accuracy = 0.51264\nI0817 19:50:23.205734 17344 solver.cpp:404]     Test net output #1: loss = 1.61398 (* 1 = 1.61398 loss)\nI0817 19:50:24.511019 17344 solver.cpp:228] Iteration 5500, loss = 0.108573\nI0817 19:50:24.511061 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:50:24.511077 17344 solver.cpp:244]     Train net output #1: loss = 0.108573 (* 1 = 0.108573 loss)\nI0817 19:50:24.611332 17344 sgd_solver.cpp:166] Iteration 5500, lr = 0.0035\nI0817 19:52:41.741324 17344 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:54:02.176832 17344 solver.cpp:404]     Test net output #0: accuracy = 0.54476\nI0817 19:54:02.177074 17344 solver.cpp:404]     Test net output #1: loss = 1.5733 (* 1 = 1.5733 loss)\nI0817 19:54:03.482401 17344 solver.cpp:228] Iteration 5600, loss = 0.0521429\nI0817 19:54:03.482442 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:54:03.482458 17344 solver.cpp:244]     Train net output #1: loss = 0.0521428 (* 1 = 0.0521428 loss)\nI0817 19:54:03.577956 17344 sgd_solver.cpp:166] Iteration 5600, lr = 0.0035\nI0817 19:56:20.829551 17344 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:57:41.268916 17344 solver.cpp:404]     Test net output #0: accuracy = 0.42704\nI0817 19:57:41.269168 17344 solver.cpp:404]     Test net output #1: loss = 2.28285 (* 1 = 2.28285 loss)\nI0817 19:57:42.574827 17344 solver.cpp:228] Iteration 5700, loss = 0.0881249\nI0817 19:57:42.574868 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:57:42.574884 17344 solver.cpp:244]     Train net output #1: loss = 0.0881248 (* 1 = 0.0881248 loss)\nI0817 19:57:42.669872 17344 sgd_solver.cpp:166] Iteration 5700, lr = 0.0035\nI0817 19:59:59.894829 17344 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 20:01:20.313915 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60984\nI0817 20:01:20.314170 17344 solver.cpp:404]     Test net output #1: loss = 1.45916 (* 1 = 1.45916 loss)\nI0817 20:01:21.619678 17344 solver.cpp:228] Iteration 5800, loss = 0.0768223\nI0817 20:01:21.619725 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:01:21.619742 17344 solver.cpp:244]     Train net output #1: loss = 0.0768222 (* 1 = 0.0768222 loss)\nI0817 20:01:21.718502 17344 sgd_solver.cpp:166] Iteration 5800, lr = 0.0035\nI0817 20:03:38.906097 17344 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 20:04:59.328186 17344 solver.cpp:404]     Test net output #0: accuracy = 0.51672\nI0817 20:04:59.328383 17344 solver.cpp:404]     Test net output #1: loss = 1.6703 (* 1 = 1.6703 loss)\nI0817 20:05:00.633967 17344 solver.cpp:228] Iteration 5900, loss = 0.123691\nI0817 20:05:00.634007 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:05:00.634023 17344 solver.cpp:244]     Train net output #1: loss = 0.123691 (* 1 = 0.123691 loss)\nI0817 20:05:00.725261 17344 sgd_solver.cpp:166] Iteration 5900, lr = 0.0035\nI0817 20:07:17.949759 17344 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 20:08:38.379586 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58544\nI0817 20:08:38.379819 17344 solver.cpp:404]     Test net output #1: loss = 1.42762 (* 1 = 1.42762 loss)\nI0817 20:08:39.685241 17344 solver.cpp:228] Iteration 6000, loss = 0.217481\nI0817 20:08:39.685282 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:08:39.685297 17344 solver.cpp:244]     Train net output #1: loss = 0.217481 (* 1 = 0.217481 loss)\nI0817 20:08:39.777823 17344 sgd_solver.cpp:166] Iteration 6000, lr = 0.0035\nI0817 20:10:57.026690 17344 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 20:12:17.464251 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60376\nI0817 20:12:17.464480 17344 solver.cpp:404]     Test net output #1: loss = 1.42299 (* 1 = 1.42299 loss)\nI0817 20:12:18.770037 17344 solver.cpp:228] Iteration 6100, loss = 0.139702\nI0817 20:12:18.770076 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:12:18.770092 17344 solver.cpp:244]     Train net output #1: loss = 0.139702 (* 1 = 0.139702 loss)\nI0817 20:12:18.863569 17344 sgd_solver.cpp:166] Iteration 6100, lr = 0.0035\nI0817 20:14:36.154822 17344 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 20:15:56.587131 17344 solver.cpp:404]     Test net output #0: accuracy = 0.55368\nI0817 20:15:56.587376 17344 solver.cpp:404]     Test net output #1: loss = 1.93053 (* 1 = 1.93053 loss)\nI0817 20:15:57.893204 17344 solver.cpp:228] Iteration 6200, loss = 0.121503\nI0817 20:15:57.893244 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:15:57.893260 17344 solver.cpp:244]     Train net output #1: loss = 0.121503 (* 1 = 0.121503 loss)\nI0817 20:15:57.982487 17344 sgd_solver.cpp:166] Iteration 6200, lr = 0.0035\nI0817 20:18:15.064527 17344 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:19:35.495625 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67652\nI0817 20:19:35.495867 17344 solver.cpp:404]     Test net output #1: loss = 1.27479 (* 1 = 1.27479 loss)\nI0817 20:19:36.801525 17344 solver.cpp:228] Iteration 6300, loss = 0.0792173\nI0817 20:19:36.801566 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:19:36.801581 17344 solver.cpp:244]     Train net output #1: loss = 0.0792172 (* 1 = 0.0792172 loss)\nI0817 20:19:36.890956 17344 sgd_solver.cpp:166] Iteration 6300, lr = 0.0035\nI0817 20:21:53.954759 17344 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:23:14.396591 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67764\nI0817 20:23:14.396802 17344 solver.cpp:404]     Test net output #1: loss = 1.13545 (* 1 = 1.13545 loss)\nI0817 20:23:15.701988 17344 solver.cpp:228] Iteration 6400, loss = 0.0992995\nI0817 20:23:15.702029 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:23:15.702045 17344 solver.cpp:244]     Train net output #1: loss = 0.0992994 (* 1 = 0.0992994 loss)\nI0817 20:23:15.792456 17344 sgd_solver.cpp:166] Iteration 6400, lr = 0.0035\nI0817 20:25:33.178879 17344 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:26:53.606760 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69252\nI0817 20:26:53.606956 17344 solver.cpp:404]     Test net output #1: loss = 1.01408 (* 1 = 1.01408 loss)\nI0817 20:26:54.912405 17344 solver.cpp:228] Iteration 6500, loss = 0.0647915\nI0817 20:26:54.912446 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:26:54.912461 17344 solver.cpp:244]     Train net output #1: loss = 0.0647914 (* 1 = 0.0647914 loss)\nI0817 20:26:55.003620 17344 sgd_solver.cpp:166] Iteration 6500, lr = 0.0035\nI0817 20:29:12.122792 17344 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:30:32.552289 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69292\nI0817 20:30:32.552495 17344 solver.cpp:404]     Test net output #1: loss = 1.34918 (* 1 = 1.34918 loss)\nI0817 20:30:33.858057 17344 solver.cpp:228] Iteration 6600, loss = 0.0683998\nI0817 20:30:33.858098 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:30:33.858115 17344 solver.cpp:244]     Train net output #1: loss = 0.0683997 (* 1 = 0.0683997 loss)\nI0817 20:30:33.954675 17344 sgd_solver.cpp:166] Iteration 6600, lr = 0.0035\nI0817 20:32:51.090100 17344 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:34:11.519305 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65048\nI0817 20:34:11.519562 17344 solver.cpp:404]     Test net output #1: loss = 1.2325 (* 1 = 1.2325 loss)\nI0817 20:34:12.824656 17344 solver.cpp:228] Iteration 6700, loss = 0.0694201\nI0817 20:34:12.824698 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:34:12.824715 17344 solver.cpp:244]     Train net output #1: loss = 0.06942 (* 1 = 0.06942 loss)\nI0817 20:34:12.924298 17344 sgd_solver.cpp:166] Iteration 6700, lr = 0.0035\nI0817 20:36:30.030886 17344 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:37:50.468473 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6284\nI0817 20:37:50.468729 17344 solver.cpp:404]     Test net output #1: loss = 1.47336 (* 1 = 1.47336 loss)\nI0817 20:37:51.774268 17344 solver.cpp:228] Iteration 6800, loss = 0.0726018\nI0817 20:37:51.774309 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:37:51.774325 17344 solver.cpp:244]     Train net output #1: loss = 0.0726017 (* 1 = 0.0726017 loss)\nI0817 20:37:51.872040 17344 sgd_solver.cpp:166] Iteration 6800, lr = 0.0035\nI0817 20:40:09.077870 17344 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:41:29.515195 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68088\nI0817 20:41:29.515456 17344 solver.cpp:404]     Test net output #1: loss = 1.10069 (* 1 = 1.10069 loss)\nI0817 20:41:30.820974 17344 solver.cpp:228] Iteration 6900, loss = 0.0676031\nI0817 20:41:30.821015 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:41:30.821032 17344 solver.cpp:244]     Train net output #1: loss = 0.067603 (* 1 = 0.067603 loss)\nI0817 20:41:30.917541 17344 sgd_solver.cpp:166] Iteration 6900, lr = 0.0035\nI0817 20:43:48.203817 17344 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:45:08.642012 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67124\nI0817 20:45:08.642256 17344 solver.cpp:404]     Test net output #1: loss = 1.2447 (* 1 = 1.2447 loss)\nI0817 20:45:09.948006 17344 solver.cpp:228] Iteration 7000, loss = 0.0864759\nI0817 20:45:09.948047 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:45:09.948063 17344 solver.cpp:244]     Train net output #1: loss = 0.0864758 (* 1 = 0.0864758 loss)\nI0817 20:45:10.043974 17344 sgd_solver.cpp:166] Iteration 7000, lr = 0.0035\nI0817 20:47:27.195996 17344 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:48:47.639453 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71528\nI0817 20:48:47.639724 17344 solver.cpp:404]     Test net output #1: loss = 1.06861 (* 1 = 1.06861 loss)\nI0817 20:48:48.945417 17344 solver.cpp:228] Iteration 7100, loss = 0.0672213\nI0817 20:48:48.945457 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:48:48.945474 17344 solver.cpp:244]     Train net output #1: loss = 0.0672212 (* 1 = 0.0672212 loss)\nI0817 20:48:49.040488 17344 sgd_solver.cpp:166] Iteration 7100, lr = 0.0035\nI0817 20:51:06.240842 17344 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:52:26.685470 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64784\nI0817 20:52:26.685721 17344 solver.cpp:404]     Test net output #1: loss = 1.43562 (* 1 = 1.43562 loss)\nI0817 20:52:27.991606 17344 solver.cpp:228] Iteration 7200, loss = 0.130241\nI0817 20:52:27.991644 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:52:27.991660 17344 solver.cpp:244]     Train net output #1: loss = 0.130241 (* 1 = 0.130241 loss)\nI0817 20:52:28.084868 17344 sgd_solver.cpp:166] Iteration 7200, lr = 0.0035\nI0817 20:54:45.275494 17344 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:56:05.712599 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61408\nI0817 20:56:05.712859 17344 solver.cpp:404]     Test net output #1: loss = 1.55128 (* 1 = 1.55128 loss)\nI0817 20:56:07.018851 17344 solver.cpp:228] Iteration 7300, loss = 0.086771\nI0817 20:56:07.018888 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:56:07.018903 17344 solver.cpp:244]     Train net output #1: loss = 0.0867709 (* 1 = 0.0867709 loss)\nI0817 20:56:07.112632 17344 sgd_solver.cpp:166] Iteration 7300, lr = 0.0035\nI0817 20:58:24.420040 17344 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:59:44.862350 17344 solver.cpp:404]     Test net output #0: accuracy = 0.59796\nI0817 20:59:44.862591 17344 solver.cpp:404]     Test net output #1: loss = 1.69885 (* 1 = 1.69885 loss)\nI0817 20:59:46.168687 17344 solver.cpp:228] Iteration 7400, loss = 0.134425\nI0817 20:59:46.168730 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:59:46.168746 17344 solver.cpp:244]     Train net output #1: loss = 0.134424 (* 1 = 0.134424 loss)\nI0817 20:59:46.265357 17344 sgd_solver.cpp:166] Iteration 7400, lr = 0.0035\nI0817 21:02:03.431324 17344 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 21:03:23.882323 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58156\nI0817 21:03:23.882560 17344 solver.cpp:404]     Test net output #1: loss = 1.90615 (* 1 = 1.90615 loss)\nI0817 21:03:25.189699 17344 solver.cpp:228] Iteration 7500, loss = 0.0507545\nI0817 21:03:25.189745 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:03:25.189762 17344 solver.cpp:244]     Train net output #1: loss = 0.0507543 (* 1 = 0.0507543 loss)\nI0817 21:03:25.280411 17344 sgd_solver.cpp:166] Iteration 7500, lr = 0.0035\nI0817 21:05:42.479712 17344 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 21:07:02.958292 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69072\nI0817 21:07:02.958560 17344 solver.cpp:404]     Test net output #1: loss = 1.08029 (* 1 = 1.08029 loss)\nI0817 21:07:04.266000 17344 solver.cpp:228] Iteration 7600, loss = 0.170889\nI0817 21:07:04.266041 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:07:04.266057 17344 solver.cpp:244]     Train net output #1: loss = 0.170889 (* 1 = 0.170889 loss)\nI0817 21:07:04.359489 17344 sgd_solver.cpp:166] Iteration 7600, lr = 0.0035\nI0817 21:09:21.663530 17344 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 21:10:42.012296 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7308\nI0817 21:10:42.012555 17344 solver.cpp:404]     Test net output #1: loss = 1.00168 (* 1 = 1.00168 loss)\nI0817 21:10:43.321370 17344 solver.cpp:228] Iteration 7700, loss = 0.101092\nI0817 21:10:43.321414 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:10:43.321439 17344 solver.cpp:244]     Train net output #1: loss = 0.101092 (* 1 = 0.101092 loss)\nI0817 21:10:43.412983 17344 sgd_solver.cpp:166] Iteration 7700, lr = 0.0035\nI0817 21:13:00.714612 17344 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 21:14:21.061161 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75696\nI0817 21:14:21.061434 17344 solver.cpp:404]     Test net output #1: loss = 0.914644 (* 1 = 0.914644 loss)\nI0817 21:14:22.367491 17344 solver.cpp:228] Iteration 7800, loss = 0.158092\nI0817 21:14:22.367534 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:14:22.367558 17344 solver.cpp:244]     Train net output #1: loss = 0.158092 (* 1 = 0.158092 loss)\nI0817 21:14:22.465214 17344 sgd_solver.cpp:166] Iteration 7800, lr = 0.0035\nI0817 21:16:39.634701 17344 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:17:59.981693 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73104\nI0817 21:17:59.981976 17344 solver.cpp:404]     Test net output #1: loss = 0.969216 (* 1 = 0.969216 loss)\nI0817 21:18:01.288113 17344 solver.cpp:228] Iteration 7900, loss = 0.0376514\nI0817 21:18:01.288159 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:18:01.288184 17344 solver.cpp:244]     Train net output #1: loss = 0.0376512 (* 1 = 0.0376512 loss)\nI0817 21:18:01.386184 17344 sgd_solver.cpp:166] Iteration 7900, lr = 0.0035\nI0817 21:20:18.740478 17344 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:21:39.112306 17344 solver.cpp:404]     Test net output #0: accuracy = 0.722\nI0817 21:21:39.112581 17344 solver.cpp:404]     Test net output #1: loss = 1.13509 (* 1 = 1.13509 loss)\nI0817 21:21:40.419544 17344 solver.cpp:228] Iteration 8000, loss = 0.107654\nI0817 21:21:40.419591 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:21:40.419616 17344 solver.cpp:244]     Train net output #1: loss = 0.107654 (* 1 = 0.107654 loss)\nI0817 21:21:40.513041 17344 sgd_solver.cpp:166] Iteration 8000, lr = 0.0035\nI0817 21:23:57.837188 17344 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:25:18.251197 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7078\nI0817 21:25:18.251464 17344 solver.cpp:404]     Test net output #1: loss = 1.09184 (* 1 = 1.09184 loss)\nI0817 21:25:19.556897 17344 solver.cpp:228] Iteration 8100, loss = 0.0898353\nI0817 21:25:19.556941 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:25:19.556957 17344 solver.cpp:244]     Train net output #1: loss = 0.0898351 (* 1 = 0.0898351 loss)\nI0817 21:25:19.654274 17344 sgd_solver.cpp:166] Iteration 8100, lr = 0.0035\nI0817 21:27:36.972337 17344 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:28:57.319824 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75124\nI0817 21:28:57.320091 17344 solver.cpp:404]     Test net output #1: loss = 0.989867 (* 1 = 0.989867 loss)\nI0817 21:28:58.626078 17344 solver.cpp:228] Iteration 8200, loss = 0.0931472\nI0817 21:28:58.626122 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:28:58.626138 17344 solver.cpp:244]     Train net output #1: loss = 0.093147 (* 1 = 0.093147 loss)\nI0817 21:28:58.723158 17344 sgd_solver.cpp:166] Iteration 8200, lr = 0.0035\nI0817 21:31:15.968328 17344 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:32:36.629717 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68096\nI0817 21:32:36.629967 17344 solver.cpp:404]     Test net output #1: loss = 1.48381 (* 1 = 1.48381 loss)\nI0817 21:32:37.941023 17344 solver.cpp:228] Iteration 8300, loss = 0.150336\nI0817 21:32:37.941076 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:32:37.941098 17344 solver.cpp:244]     Train net output #1: loss = 0.150336 (* 1 = 0.150336 loss)\nI0817 21:32:38.031127 17344 sgd_solver.cpp:166] Iteration 8300, lr = 0.0035\nI0817 21:34:55.439221 17344 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:36:16.929261 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72912\nI0817 21:36:16.929500 17344 solver.cpp:404]     Test net output #1: loss = 1.05519 (* 1 = 1.05519 loss)\nI0817 21:36:18.240432 17344 solver.cpp:228] Iteration 8400, loss = 0.0417904\nI0817 21:36:18.240478 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:36:18.240500 17344 solver.cpp:244]     Train net output #1: loss = 0.0417901 (* 1 = 0.0417901 loss)\nI0817 21:36:18.332823 17344 sgd_solver.cpp:166] Iteration 8400, lr = 0.0035\nI0817 21:38:35.789182 17344 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:39:57.142627 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72444\nI0817 21:39:57.142870 17344 solver.cpp:404]     Test net output #1: loss = 1.09898 (* 1 = 1.09898 loss)\nI0817 21:39:58.454233 17344 solver.cpp:228] Iteration 8500, loss = 0.0645235\nI0817 21:39:58.454282 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:39:58.454306 17344 solver.cpp:244]     Train net output #1: loss = 0.0645233 (* 1 = 0.0645233 loss)\nI0817 21:39:58.546237 17344 sgd_solver.cpp:166] Iteration 8500, lr = 0.0035\nI0817 21:42:16.032078 17344 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:43:37.271045 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73004\nI0817 21:43:37.271262 17344 solver.cpp:404]     Test net output #1: loss = 1.10543 (* 1 = 1.10543 loss)\nI0817 21:43:38.582129 17344 solver.cpp:228] Iteration 8600, loss = 0.0684847\nI0817 21:43:38.582175 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:43:38.582191 17344 solver.cpp:244]     Train net output #1: loss = 0.0684845 (* 1 = 0.0684845 loss)\nI0817 21:43:38.672708 17344 sgd_solver.cpp:166] Iteration 8600, lr = 0.0035\nI0817 21:45:56.457067 17344 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:47:17.748152 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75328\nI0817 21:47:17.748371 17344 solver.cpp:404]     Test net output #1: loss = 0.886619 (* 1 = 0.886619 loss)\nI0817 21:47:19.058336 17344 solver.cpp:228] Iteration 8700, loss = 0.0713018\nI0817 21:47:19.058395 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:47:19.058413 17344 solver.cpp:244]     Train net output #1: loss = 0.0713016 (* 1 = 0.0713016 loss)\nI0817 21:47:19.150518 17344 sgd_solver.cpp:166] Iteration 8700, lr = 0.0035\nI0817 21:49:36.959444 17344 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:50:58.324611 17344 solver.cpp:404]     Test net output #0: accuracy = 0.765119\nI0817 21:50:58.324868 17344 solver.cpp:404]     Test net output #1: loss = 0.92729 (* 1 = 0.92729 loss)\nI0817 21:50:59.635118 17344 solver.cpp:228] Iteration 8800, loss = 0.0456466\nI0817 21:50:59.635179 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:50:59.635197 17344 solver.cpp:244]     Train net output #1: loss = 0.0456463 (* 1 = 0.0456463 loss)\nI0817 21:50:59.728906 17344 sgd_solver.cpp:166] Iteration 8800, lr = 0.0035\nI0817 21:53:17.399607 17344 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:54:38.363713 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78172\nI0817 21:54:38.363926 17344 solver.cpp:404]     Test net output #1: loss = 0.765338 (* 1 = 0.765338 loss)\nI0817 21:54:39.674405 17344 solver.cpp:228] Iteration 8900, loss = 0.0742768\nI0817 21:54:39.674448 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:54:39.674464 17344 solver.cpp:244]     Train net output #1: loss = 0.0742765 (* 1 = 0.0742765 loss)\nI0817 21:54:39.769166 17344 sgd_solver.cpp:166] Iteration 8900, lr = 0.0035\nI0817 21:56:57.431509 17344 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:58:18.455548 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74056\nI0817 21:58:18.455780 17344 solver.cpp:404]     Test net output #1: loss = 1.03098 (* 1 = 1.03098 loss)\nI0817 21:58:19.766279 17344 solver.cpp:228] Iteration 9000, loss = 0.0690534\nI0817 21:58:19.766324 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:58:19.766338 17344 solver.cpp:244]     Train net output #1: loss = 0.0690531 (* 1 = 0.0690531 loss)\nI0817 21:58:19.861157 17344 sgd_solver.cpp:166] Iteration 9000, lr = 0.0035\nI0817 22:00:37.442634 17344 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 22:01:58.678287 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7884\nI0817 22:01:58.678536 17344 solver.cpp:404]     Test net output #1: loss = 0.743522 (* 1 = 0.743522 loss)\nI0817 22:01:59.988492 17344 solver.cpp:228] Iteration 9100, loss = 0.0776462\nI0817 22:01:59.988551 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:01:59.988567 17344 solver.cpp:244]     Train net output #1: loss = 0.0776459 (* 1 = 0.0776459 loss)\nI0817 22:02:00.084342 17344 sgd_solver.cpp:166] Iteration 9100, lr = 0.0035\nI0817 22:04:17.721510 17344 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 22:05:39.173285 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78328\nI0817 22:05:39.173498 17344 solver.cpp:404]     Test net output #1: loss = 0.818985 (* 1 = 0.818985 loss)\nI0817 22:05:40.487782 17344 solver.cpp:228] Iteration 9200, loss = 0.0983967\nI0817 22:05:40.487838 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:05:40.487866 17344 solver.cpp:244]     Train net output #1: loss = 0.0983964 (* 1 = 0.0983964 loss)\nI0817 22:05:40.580858 17344 sgd_solver.cpp:166] Iteration 9200, lr = 0.0035\nI0817 22:07:58.330148 17344 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 22:09:19.774019 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77364\nI0817 22:09:19.774253 17344 solver.cpp:404]     Test net output #1: loss = 0.882111 (* 1 = 0.882111 loss)\nI0817 22:09:21.085182 17344 solver.cpp:228] Iteration 9300, loss = 0.136413\nI0817 22:09:21.085243 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:09:21.085260 17344 solver.cpp:244]     Train net output #1: loss = 0.136413 (* 1 = 0.136413 loss)\nI0817 22:09:21.179396 17344 sgd_solver.cpp:166] Iteration 9300, lr = 0.0035\nI0817 22:11:38.713727 17344 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 22:12:59.161088 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76588\nI0817 22:12:59.161356 17344 solver.cpp:404]     Test net output #1: loss = 0.945957 (* 1 = 0.945957 loss)\nI0817 22:13:00.467067 17344 solver.cpp:228] Iteration 9400, loss = 0.0493444\nI0817 22:13:00.467108 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:13:00.467124 17344 solver.cpp:244]     Train net output #1: loss = 0.0493441 (* 1 = 0.0493441 loss)\nI0817 22:13:00.559473 17344 sgd_solver.cpp:166] Iteration 9400, lr = 0.0035\nI0817 22:15:17.712353 17344 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:16:38.160920 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74324\nI0817 22:16:38.161188 17344 solver.cpp:404]     Test net output #1: loss = 1.06182 (* 1 = 1.06182 loss)\nI0817 22:16:39.467105 17344 solver.cpp:228] Iteration 9500, loss = 0.0982866\nI0817 22:16:39.467145 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:16:39.467161 17344 solver.cpp:244]     Train net output #1: loss = 0.0982863 (* 1 = 0.0982863 loss)\nI0817 22:16:39.566476 17344 sgd_solver.cpp:166] Iteration 9500, lr = 0.0035\nI0817 22:18:56.775285 17344 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:20:17.216327 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71532\nI0817 22:20:17.216598 17344 solver.cpp:404]     Test net output #1: loss = 1.21936 (* 1 = 1.21936 loss)\nI0817 22:20:18.522451 17344 solver.cpp:228] Iteration 9600, loss = 0.113927\nI0817 22:20:18.522495 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:20:18.522511 17344 solver.cpp:244]     Train net output #1: loss = 0.113927 (* 1 = 0.113927 loss)\nI0817 22:20:18.616576 17344 sgd_solver.cpp:166] Iteration 9600, lr = 0.0035\nI0817 22:22:35.772722 17344 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:23:56.221735 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7254\nI0817 22:23:56.222003 17344 solver.cpp:404]     Test net output #1: loss = 1.02412 (* 1 = 1.02412 loss)\nI0817 22:23:57.527592 17344 solver.cpp:228] Iteration 9700, loss = 0.0835431\nI0817 22:23:57.527634 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:23:57.527652 17344 solver.cpp:244]     Train net output #1: loss = 0.0835428 (* 1 = 0.0835428 loss)\nI0817 22:23:57.621348 17344 sgd_solver.cpp:166] Iteration 9700, lr = 0.0035\nI0817 22:26:14.820030 17344 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:27:35.266923 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73756\nI0817 22:27:35.267168 17344 solver.cpp:404]     Test net output #1: loss = 1.00855 (* 1 = 1.00855 loss)\nI0817 22:27:36.573185 17344 solver.cpp:228] Iteration 9800, loss = 0.0313414\nI0817 22:27:36.573228 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:27:36.573245 17344 solver.cpp:244]     Train net output #1: loss = 0.0313411 (* 1 = 0.0313411 loss)\nI0817 22:27:36.669945 17344 sgd_solver.cpp:166] Iteration 9800, lr = 0.0035\nI0817 22:29:53.908198 17344 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:31:14.357317 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7388\nI0817 22:31:14.357589 17344 solver.cpp:404]     Test net output #1: loss = 1.0676 (* 1 = 1.0676 loss)\nI0817 22:31:15.663254 17344 solver.cpp:228] Iteration 9900, loss = 0.0598206\nI0817 22:31:15.663298 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:31:15.663314 17344 solver.cpp:244]     Train net output #1: loss = 0.0598203 (* 1 = 0.0598203 loss)\nI0817 22:31:15.759057 17344 sgd_solver.cpp:166] Iteration 9900, lr = 0.0035\nI0817 22:33:33.055708 17344 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:34:53.504308 17344 solver.cpp:404]     Test net output #0: accuracy = 0.796\nI0817 22:34:53.504571 17344 solver.cpp:404]     Test net output #1: loss = 0.710419 (* 1 = 0.710419 loss)\nI0817 22:34:54.810206 17344 solver.cpp:228] Iteration 10000, loss = 0.0778241\nI0817 22:34:54.810248 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:34:54.810266 17344 solver.cpp:244]     Train net output #1: loss = 0.0778238 (* 1 = 0.0778238 loss)\nI0817 22:34:54.905460 17344 sgd_solver.cpp:166] Iteration 10000, lr = 0.0035\nI0817 22:37:12.101989 17344 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0817 22:38:32.558380 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75288\nI0817 22:38:32.558648 17344 solver.cpp:404]     Test net output #1: loss = 1.11824 (* 1 = 1.11824 loss)\nI0817 22:38:33.864207 17344 solver.cpp:228] Iteration 10100, loss = 0.0304281\nI0817 22:38:33.864248 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:38:33.864264 17344 solver.cpp:244]     Train net output #1: loss = 0.0304278 (* 1 = 0.0304278 loss)\nI0817 22:38:33.960315 17344 sgd_solver.cpp:166] Iteration 10100, lr = 0.0035\nI0817 22:40:51.169745 17344 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0817 22:42:11.623817 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69808\nI0817 22:42:11.624083 17344 solver.cpp:404]     Test net output #1: loss = 1.22731 (* 1 = 1.22731 loss)\nI0817 22:42:12.929800 17344 solver.cpp:228] Iteration 10200, loss = 0.128681\nI0817 22:42:12.929842 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:42:12.929858 17344 solver.cpp:244]     Train net output #1: loss = 0.128681 (* 1 = 0.128681 loss)\nI0817 22:42:13.029522 17344 sgd_solver.cpp:166] Iteration 10200, lr = 0.0035\nI0817 22:44:30.333027 17344 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0817 22:45:50.791142 17344 solver.cpp:404]     Test net output #0: accuracy = 0.645321\nI0817 22:45:50.791414 17344 solver.cpp:404]     Test net output #1: loss = 1.75996 (* 1 = 1.75996 loss)\nI0817 22:45:52.097277 17344 solver.cpp:228] Iteration 10300, loss = 0.0519846\nI0817 22:45:52.097321 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:45:52.097337 17344 solver.cpp:244]     Train net output #1: loss = 0.0519843 (* 1 = 0.0519843 loss)\nI0817 22:45:52.189458 17344 sgd_solver.cpp:166] Iteration 10300, lr = 0.0035\nI0817 22:48:09.490552 17344 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0817 22:49:29.941711 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61252\nI0817 22:49:29.941995 17344 solver.cpp:404]     Test net output #1: loss = 2.05446 (* 1 = 2.05446 loss)\nI0817 22:49:31.247586 17344 solver.cpp:228] Iteration 10400, loss = 0.0843165\nI0817 22:49:31.247628 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:49:31.247644 17344 solver.cpp:244]     Train net output #1: loss = 0.0843162 (* 1 = 0.0843162 loss)\nI0817 22:49:31.342080 17344 sgd_solver.cpp:166] Iteration 10400, lr = 0.0035\nI0817 22:51:48.535038 17344 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0817 22:53:09.736011 17344 solver.cpp:404]     Test net output #0: accuracy = 0.59808\nI0817 22:53:09.736276 17344 solver.cpp:404]     Test net output #1: loss = 2.25698 (* 1 = 2.25698 loss)\nI0817 22:53:11.047427 17344 solver.cpp:228] Iteration 10500, loss = 0.0602567\nI0817 22:53:11.047472 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:53:11.047488 17344 solver.cpp:244]     Train net output #1: loss = 0.0602564 (* 1 = 0.0602564 loss)\nI0817 22:53:11.138178 17344 sgd_solver.cpp:166] Iteration 10500, lr = 0.0035\nI0817 22:55:28.535521 17344 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0817 22:56:49.995792 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7512\nI0817 22:56:49.996042 17344 solver.cpp:404]     Test net output #1: loss = 1.04052 (* 1 = 1.04052 loss)\nI0817 22:56:51.306977 17344 solver.cpp:228] Iteration 10600, loss = 0.062788\nI0817 22:56:51.307020 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:56:51.307036 17344 solver.cpp:244]     Train net output #1: loss = 0.0627877 (* 1 = 0.0627877 loss)\nI0817 22:56:51.395669 17344 sgd_solver.cpp:166] Iteration 10600, lr = 0.0035\nI0817 22:59:08.806565 17344 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0817 23:00:29.978407 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73148\nI0817 23:00:29.978617 17344 solver.cpp:404]     Test net output #1: loss = 1.15267 (* 1 = 1.15267 loss)\nI0817 23:00:31.288648 17344 solver.cpp:228] Iteration 10700, loss = 0.0757312\nI0817 23:00:31.288692 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:00:31.288708 17344 solver.cpp:244]     Train net output #1: loss = 0.0757309 (* 1 = 0.0757309 loss)\nI0817 23:00:31.379562 17344 sgd_solver.cpp:166] Iteration 10700, lr = 0.0035\nI0817 23:02:48.663471 17344 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0817 23:04:09.114622 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6924\nI0817 23:04:09.114883 17344 solver.cpp:404]     Test net output #1: loss = 1.35824 (* 1 = 1.35824 loss)\nI0817 23:04:10.420526 17344 solver.cpp:228] Iteration 10800, loss = 0.088507\nI0817 23:04:10.420565 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:04:10.420580 17344 solver.cpp:244]     Train net output #1: loss = 0.0885067 (* 1 = 0.0885067 loss)\nI0817 23:04:10.516469 17344 sgd_solver.cpp:166] Iteration 10800, lr = 0.0035\nI0817 23:06:27.845803 17344 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0817 23:07:48.300297 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73468\nI0817 23:07:48.300568 17344 solver.cpp:404]     Test net output #1: loss = 1.13963 (* 1 = 1.13963 loss)\nI0817 23:07:49.606709 17344 solver.cpp:228] Iteration 10900, loss = 0.0255542\nI0817 23:07:49.606750 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:07:49.606765 17344 solver.cpp:244]     Train net output #1: loss = 0.0255539 (* 1 = 0.0255539 loss)\nI0817 23:07:49.705364 17344 sgd_solver.cpp:166] Iteration 10900, lr = 0.0035\nI0817 23:10:07.029036 17344 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0817 23:11:27.486268 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7568\nI0817 23:11:27.486531 17344 solver.cpp:404]     Test net output #1: loss = 0.961509 (* 1 = 0.961509 loss)\nI0817 23:11:28.792315 17344 solver.cpp:228] Iteration 11000, loss = 0.0859144\nI0817 23:11:28.792356 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:11:28.792371 17344 solver.cpp:244]     Train net output #1: loss = 0.0859141 (* 1 = 0.0859141 loss)\nI0817 23:11:28.885308 17344 sgd_solver.cpp:166] Iteration 11000, lr = 0.0035\nI0817 23:13:46.114053 17344 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0817 23:15:06.576045 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7448\nI0817 23:15:06.576314 17344 solver.cpp:404]     Test net output #1: loss = 1.1132 (* 1 = 1.1132 loss)\nI0817 23:15:07.882021 17344 solver.cpp:228] Iteration 11100, loss = 0.0976326\nI0817 23:15:07.882061 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:15:07.882076 17344 solver.cpp:244]     Train net output #1: loss = 0.0976323 (* 1 = 0.0976323 loss)\nI0817 23:15:07.976915 17344 sgd_solver.cpp:166] Iteration 11100, lr = 0.0035\nI0817 23:17:25.216989 17344 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0817 23:18:45.665410 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77148\nI0817 23:18:45.665678 17344 solver.cpp:404]     Test net output #1: loss = 0.958732 (* 1 = 0.958732 loss)\nI0817 23:18:46.971043 17344 solver.cpp:228] Iteration 11200, loss = 0.154822\nI0817 23:18:46.971082 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:18:46.971097 17344 solver.cpp:244]     Train net output #1: loss = 0.154821 (* 1 = 0.154821 loss)\nI0817 23:18:47.068336 17344 sgd_solver.cpp:166] Iteration 11200, lr = 0.0035\nI0817 23:21:04.306759 17344 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0817 23:22:24.751121 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78864\nI0817 23:22:24.751397 17344 solver.cpp:404]     Test net output #1: loss = 0.853211 (* 1 = 0.853211 loss)\nI0817 23:22:26.057106 17344 solver.cpp:228] Iteration 11300, loss = 0.0454971\nI0817 23:22:26.057144 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:22:26.057159 17344 solver.cpp:244]     Train net output #1: loss = 0.0454968 (* 1 = 0.0454968 loss)\nI0817 23:22:26.153818 17344 sgd_solver.cpp:166] Iteration 11300, lr = 0.0035\nI0817 23:24:43.525401 17344 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0817 23:26:03.951114 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8064\nI0817 23:26:03.951359 17344 solver.cpp:404]     Test net output #1: loss = 0.736329 (* 1 = 0.736329 loss)\nI0817 23:26:05.257382 17344 solver.cpp:228] Iteration 11400, loss = 0.106578\nI0817 23:26:05.257422 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:26:05.257438 17344 solver.cpp:244]     Train net output #1: loss = 0.106578 (* 1 = 0.106578 loss)\nI0817 23:26:05.355069 17344 sgd_solver.cpp:166] Iteration 11400, lr = 0.0035\nI0817 23:28:22.766476 17344 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0817 23:29:43.109264 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7672\nI0817 23:29:43.109534 17344 solver.cpp:404]     Test net output #1: loss = 0.987173 (* 1 = 0.987173 loss)\nI0817 23:29:44.415570 17344 solver.cpp:228] Iteration 11500, loss = 0.0799264\nI0817 23:29:44.415612 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:29:44.415628 17344 solver.cpp:244]     Train net output #1: loss = 0.0799261 (* 1 = 0.0799261 loss)\nI0817 23:29:44.510160 17344 sgd_solver.cpp:166] Iteration 11500, lr = 0.0035\nI0817 23:32:01.817787 17344 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0817 23:33:22.172880 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76988\nI0817 23:33:22.173151 17344 solver.cpp:404]     Test net output #1: loss = 0.984199 (* 1 = 0.984199 loss)\nI0817 23:33:23.478961 17344 solver.cpp:228] Iteration 11600, loss = 0.0452273\nI0817 23:33:23.479002 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:33:23.479018 17344 solver.cpp:244]     Train net output #1: loss = 0.045227 (* 1 = 0.045227 loss)\nI0817 23:33:23.571579 17344 sgd_solver.cpp:166] Iteration 11600, lr = 0.0035\nI0817 23:35:40.748447 17344 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0817 23:37:01.104590 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78672\nI0817 23:37:01.104864 17344 solver.cpp:404]     Test net output #1: loss = 0.853212 (* 1 = 0.853212 loss)\nI0817 23:37:02.410531 17344 solver.cpp:228] Iteration 11700, loss = 0.0382608\nI0817 23:37:02.410569 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:37:02.410585 17344 solver.cpp:244]     Train net output #1: loss = 0.0382606 (* 1 = 0.0382606 loss)\nI0817 23:37:02.503417 17344 sgd_solver.cpp:166] Iteration 11700, lr = 0.0035\nI0817 23:39:19.686122 17344 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0817 23:40:40.029757 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7902\nI0817 23:40:40.030015 17344 solver.cpp:404]     Test net output #1: loss = 0.853289 (* 1 = 0.853289 loss)\nI0817 23:40:41.335605 17344 solver.cpp:228] Iteration 11800, loss = 0.0462834\nI0817 23:40:41.335644 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:40:41.335659 17344 solver.cpp:244]     Train net output #1: loss = 0.0462832 (* 1 = 0.0462832 loss)\nI0817 23:40:41.427563 17344 sgd_solver.cpp:166] Iteration 11800, lr = 0.0035\nI0817 23:42:58.696301 17344 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0817 23:44:19.043503 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78508\nI0817 23:44:19.043751 17344 solver.cpp:404]     Test net output #1: loss = 0.821596 (* 1 = 0.821596 loss)\nI0817 23:44:20.349447 17344 solver.cpp:228] Iteration 11900, loss = 0.0534326\nI0817 23:44:20.349485 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:44:20.349500 17344 solver.cpp:244]     Train net output #1: loss = 0.0534323 (* 1 = 0.0534323 loss)\nI0817 23:44:20.445392 17344 sgd_solver.cpp:166] Iteration 11900, lr = 0.0035\nI0817 23:46:37.701473 17344 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0817 23:47:58.047111 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80396\nI0817 23:47:58.047379 17344 solver.cpp:404]     Test net output #1: loss = 0.774923 (* 1 = 0.774923 loss)\nI0817 23:47:59.353127 17344 solver.cpp:228] Iteration 12000, loss = 0.136055\nI0817 23:47:59.353168 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:47:59.353183 17344 solver.cpp:244]     Train net output #1: loss = 0.136055 (* 1 = 0.136055 loss)\nI0817 23:47:59.450178 17344 sgd_solver.cpp:166] Iteration 12000, lr = 0.0035\nI0817 23:50:16.722069 17344 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0817 23:51:37.067847 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78296\nI0817 23:51:37.068115 17344 solver.cpp:404]     Test net output #1: loss = 0.907836 (* 1 = 0.907836 loss)\nI0817 23:51:38.373971 17344 solver.cpp:228] Iteration 12100, loss = 0.0869297\nI0817 23:51:38.374016 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:51:38.374032 17344 solver.cpp:244]     Train net output #1: loss = 0.0869295 (* 1 = 0.0869295 loss)\nI0817 23:51:38.472543 17344 sgd_solver.cpp:166] Iteration 12100, lr = 0.0035\nI0817 23:53:55.889868 17344 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0817 23:55:16.235409 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72724\nI0817 23:55:16.235669 17344 solver.cpp:404]     Test net output #1: loss = 1.07463 (* 1 = 1.07463 loss)\nI0817 23:55:17.541589 17344 solver.cpp:228] Iteration 12200, loss = 0.0838685\nI0817 23:55:17.541632 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:55:17.541648 17344 solver.cpp:244]     Train net output #1: loss = 0.0838682 (* 1 = 0.0838682 loss)\nI0817 23:55:17.635443 17344 sgd_solver.cpp:166] Iteration 12200, lr = 0.0035\nI0817 23:57:34.839891 17344 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0817 23:58:55.191570 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73496\nI0817 23:58:55.191823 17344 solver.cpp:404]     Test net output #1: loss = 1.01963 (* 1 = 1.01963 loss)\nI0817 23:58:56.497772 17344 solver.cpp:228] Iteration 12300, loss = 0.0611113\nI0817 23:58:56.497817 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:58:56.497833 17344 solver.cpp:244]     Train net output #1: loss = 0.0611111 (* 1 = 0.0611111 loss)\nI0817 23:58:56.589650 17344 sgd_solver.cpp:166] Iteration 12300, lr = 0.0035\nI0818 00:01:13.846639 17344 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0818 00:02:34.312054 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72156\nI0818 00:02:34.312319 17344 solver.cpp:404]     Test net output #1: loss = 1.18387 (* 1 = 1.18387 loss)\nI0818 00:02:35.618665 17344 solver.cpp:228] Iteration 12400, loss = 0.143876\nI0818 00:02:35.618710 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:02:35.618737 17344 solver.cpp:244]     Train net output #1: loss = 0.143876 (* 1 = 0.143876 loss)\nI0818 00:02:35.714277 17344 sgd_solver.cpp:166] Iteration 12400, lr = 0.0035\nI0818 00:04:52.864742 17344 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0818 00:06:13.323866 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72792\nI0818 00:06:13.324137 17344 solver.cpp:404]     Test net output #1: loss = 1.09395 (* 1 = 1.09395 loss)\nI0818 00:06:14.630931 17344 solver.cpp:228] Iteration 12500, loss = 0.0966356\nI0818 00:06:14.630975 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:06:14.630992 17344 solver.cpp:244]     Train net output #1: loss = 0.0966354 (* 1 = 0.0966354 loss)\nI0818 00:06:14.724053 17344 sgd_solver.cpp:166] Iteration 12500, lr = 0.0035\nI0818 00:08:31.962417 17344 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0818 00:09:52.418319 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78124\nI0818 00:09:52.418599 17344 solver.cpp:404]     Test net output #1: loss = 0.89425 (* 1 = 0.89425 loss)\nI0818 00:09:53.725438 17344 solver.cpp:228] Iteration 12600, loss = 0.0946863\nI0818 00:09:53.725483 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:09:53.725500 17344 solver.cpp:244]     Train net output #1: loss = 0.0946862 (* 1 = 0.0946862 loss)\nI0818 00:09:53.817148 17344 sgd_solver.cpp:166] Iteration 12600, lr = 0.0035\nI0818 00:12:11.118594 17344 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 00:13:31.561925 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71936\nI0818 00:13:31.562196 17344 solver.cpp:404]     Test net output #1: loss = 1.24019 (* 1 = 1.24019 loss)\nI0818 00:13:32.871094 17344 solver.cpp:228] Iteration 12700, loss = 0.122898\nI0818 00:13:32.871141 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:13:32.871157 17344 solver.cpp:244]     Train net output #1: loss = 0.122898 (* 1 = 0.122898 loss)\nI0818 00:13:32.963984 17344 sgd_solver.cpp:166] Iteration 12700, lr = 0.0035\nI0818 00:15:50.277535 17344 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 00:17:10.751232 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67924\nI0818 00:17:10.751505 17344 solver.cpp:404]     Test net output #1: loss = 1.50437 (* 1 = 1.50437 loss)\nI0818 00:17:12.059697 17344 solver.cpp:228] Iteration 12800, loss = 0.0896931\nI0818 00:17:12.059743 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:17:12.059759 17344 solver.cpp:244]     Train net output #1: loss = 0.0896929 (* 1 = 0.0896929 loss)\nI0818 00:17:12.154134 17344 sgd_solver.cpp:166] Iteration 12800, lr = 0.0035\nI0818 00:19:29.405470 17344 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 00:20:49.860064 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75924\nI0818 00:20:49.860339 17344 solver.cpp:404]     Test net output #1: loss = 1.05414 (* 1 = 1.05414 loss)\nI0818 00:20:51.166270 17344 solver.cpp:228] Iteration 12900, loss = 0.0268874\nI0818 00:20:51.166316 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:20:51.166332 17344 solver.cpp:244]     Train net output #1: loss = 0.0268873 (* 1 = 0.0268873 loss)\nI0818 00:20:51.257671 17344 sgd_solver.cpp:166] Iteration 12900, lr = 0.0035\nI0818 00:23:08.445017 17344 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 00:24:28.902532 17344 solver.cpp:404]     Test net output #0: accuracy = 0.744\nI0818 00:24:28.902798 17344 solver.cpp:404]     Test net output #1: loss = 0.943047 (* 1 = 0.943047 loss)\nI0818 00:24:30.209936 17344 solver.cpp:228] Iteration 13000, loss = 0.0942003\nI0818 00:24:30.209981 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:24:30.209997 17344 solver.cpp:244]     Train net output #1: loss = 0.0942002 (* 1 = 0.0942002 loss)\nI0818 00:24:30.300914 17344 sgd_solver.cpp:166] Iteration 13000, lr = 0.0035\nI0818 00:26:47.526854 17344 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 00:28:07.982023 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5836\nI0818 00:28:07.982293 17344 solver.cpp:404]     Test net output #1: loss = 2.11575 (* 1 = 2.11575 loss)\nI0818 00:28:09.288894 17344 solver.cpp:228] Iteration 13100, loss = 0.0894707\nI0818 00:28:09.288939 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:28:09.288955 17344 solver.cpp:244]     Train net output #1: loss = 0.0894706 (* 1 = 0.0894706 loss)\nI0818 00:28:09.381572 17344 sgd_solver.cpp:166] Iteration 13100, lr = 0.0035\nI0818 00:30:26.627310 17344 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 00:31:47.083953 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6994\nI0818 00:31:47.084206 17344 solver.cpp:404]     Test net output #1: loss = 1.34306 (* 1 = 1.34306 loss)\nI0818 00:31:48.391540 17344 solver.cpp:228] Iteration 13200, loss = 0.133445\nI0818 00:31:48.391584 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:31:48.391602 17344 solver.cpp:244]     Train net output #1: loss = 0.133444 (* 1 = 0.133444 loss)\nI0818 00:31:48.481676 17344 sgd_solver.cpp:166] Iteration 13200, lr = 0.0035\nI0818 00:34:05.706060 17344 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 00:35:26.168459 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79284\nI0818 00:35:26.168738 17344 solver.cpp:404]     Test net output #1: loss = 0.82522 (* 1 = 0.82522 loss)\nI0818 00:35:27.475369 17344 solver.cpp:228] Iteration 13300, loss = 0.0459922\nI0818 00:35:27.475414 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:35:27.475430 17344 solver.cpp:244]     Train net output #1: loss = 0.0459921 (* 1 = 0.0459921 loss)\nI0818 00:35:27.571123 17344 sgd_solver.cpp:166] Iteration 13300, lr = 0.0035\nI0818 00:37:44.798342 17344 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 00:39:05.255404 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76232\nI0818 00:39:05.255677 17344 solver.cpp:404]     Test net output #1: loss = 1.02485 (* 1 = 1.02485 loss)\nI0818 00:39:06.561599 17344 solver.cpp:228] Iteration 13400, loss = 0.0779047\nI0818 00:39:06.561650 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:39:06.561676 17344 solver.cpp:244]     Train net output #1: loss = 0.0779046 (* 1 = 0.0779046 loss)\nI0818 00:39:06.652863 17344 sgd_solver.cpp:166] Iteration 13400, lr = 0.0035\nI0818 00:41:23.878274 17344 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 00:42:44.316164 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71748\nI0818 00:42:44.316387 17344 solver.cpp:404]     Test net output #1: loss = 1.28115 (* 1 = 1.28115 loss)\nI0818 00:42:45.622117 17344 solver.cpp:228] Iteration 13500, loss = 0.0989589\nI0818 00:42:45.622165 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:42:45.622181 17344 solver.cpp:244]     Train net output #1: loss = 0.0989588 (* 1 = 0.0989588 loss)\nI0818 00:42:45.714558 17344 sgd_solver.cpp:166] Iteration 13500, lr = 0.0035\nI0818 00:45:02.842730 17344 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 00:46:23.292647 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76376\nI0818 00:46:23.292914 17344 solver.cpp:404]     Test net output #1: loss = 1.22663 (* 1 = 1.22663 loss)\nI0818 00:46:24.604367 17344 solver.cpp:228] Iteration 13600, loss = 0.0516153\nI0818 00:46:24.604429 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:46:24.604446 17344 solver.cpp:244]     Train net output #1: loss = 0.0516151 (* 1 = 0.0516151 loss)\nI0818 00:46:24.696331 17344 sgd_solver.cpp:166] Iteration 13600, lr = 0.0035\nI0818 00:48:41.954372 17344 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 00:50:02.601006 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74984\nI0818 00:50:02.601251 17344 solver.cpp:404]     Test net output #1: loss = 1.24615 (* 1 = 1.24615 loss)\nI0818 00:50:03.912783 17344 solver.cpp:228] Iteration 13700, loss = 0.0309765\nI0818 00:50:03.912855 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:50:03.912880 17344 solver.cpp:244]     Train net output #1: loss = 0.0309764 (* 1 = 0.0309764 loss)\nI0818 00:50:04.003909 17344 sgd_solver.cpp:166] Iteration 13700, lr = 0.0035\nI0818 00:52:21.412837 17344 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 00:53:42.337589 17344 solver.cpp:404]     Test net output #0: accuracy = 0.762\nI0818 00:53:42.337888 17344 solver.cpp:404]     Test net output #1: loss = 1.12458 (* 1 = 1.12458 loss)\nI0818 00:53:43.648957 17344 solver.cpp:228] Iteration 13800, loss = 0.0560824\nI0818 00:53:43.649020 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:53:43.649047 17344 solver.cpp:244]     Train net output #1: loss = 0.0560823 (* 1 = 0.0560823 loss)\nI0818 00:53:43.741180 17344 sgd_solver.cpp:166] Iteration 13800, lr = 0.0035\nI0818 00:56:01.255383 17344 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 00:57:22.687278 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70796\nI0818 00:57:22.687556 17344 solver.cpp:404]     Test net output #1: loss = 1.30356 (* 1 = 1.30356 loss)\nI0818 00:57:23.998816 17344 solver.cpp:228] Iteration 13900, loss = 0.0497555\nI0818 00:57:23.998886 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:57:23.998911 17344 solver.cpp:244]     Train net output #1: loss = 0.0497553 (* 1 = 0.0497553 loss)\nI0818 00:57:24.093147 17344 sgd_solver.cpp:166] Iteration 13900, lr = 0.0035\nI0818 00:59:41.688238 17344 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 01:01:03.130378 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75576\nI0818 01:01:03.130671 17344 solver.cpp:404]     Test net output #1: loss = 1.06445 (* 1 = 1.06445 loss)\nI0818 01:01:04.440712 17344 solver.cpp:228] Iteration 14000, loss = 0.103396\nI0818 01:01:04.440775 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:01:04.440800 17344 solver.cpp:244]     Train net output #1: loss = 0.103396 (* 1 = 0.103396 loss)\nI0818 01:01:04.535310 17344 sgd_solver.cpp:166] Iteration 14000, lr = 0.0035\nI0818 01:03:22.141783 17344 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 01:04:43.604430 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7548\nI0818 01:04:43.604727 17344 solver.cpp:404]     Test net output #1: loss = 1.24814 (* 1 = 1.24814 loss)\nI0818 01:04:44.915901 17344 solver.cpp:228] Iteration 14100, loss = 0.0209355\nI0818 01:04:44.915964 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:04:44.915990 17344 solver.cpp:244]     Train net output #1: loss = 0.0209354 (* 1 = 0.0209354 loss)\nI0818 01:04:45.008507 17344 sgd_solver.cpp:166] Iteration 14100, lr = 0.0035\nI0818 01:07:02.650811 17344 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 01:08:24.115778 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7964\nI0818 01:08:24.116075 17344 solver.cpp:404]     Test net output #1: loss = 1.02284 (* 1 = 1.02284 loss)\nI0818 01:08:25.425730 17344 solver.cpp:228] Iteration 14200, loss = 0.0107422\nI0818 01:08:25.425792 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:08:25.425817 17344 solver.cpp:244]     Train net output #1: loss = 0.0107421 (* 1 = 0.0107421 loss)\nI0818 01:08:25.519769 17344 sgd_solver.cpp:166] Iteration 14200, lr = 0.0035\nI0818 01:10:43.175879 17344 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 01:12:04.619293 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37676\nI0818 01:12:04.619582 17344 solver.cpp:404]     Test net output #1: loss = 9.80292 (* 1 = 9.80292 loss)\nI0818 01:12:05.930379 17344 solver.cpp:228] Iteration 14300, loss = 0.0485394\nI0818 01:12:05.930443 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:12:05.930469 17344 solver.cpp:244]     Train net output #1: loss = 0.0485394 (* 1 = 0.0485394 loss)\nI0818 01:12:06.024705 17344 sgd_solver.cpp:166] Iteration 14300, lr = 0.0035\nI0818 01:14:23.599761 17344 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 01:15:45.045804 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70236\nI0818 01:15:45.046113 17344 solver.cpp:404]     Test net output #1: loss = 1.4929 (* 1 = 1.4929 loss)\nI0818 01:15:46.356603 17344 solver.cpp:228] Iteration 14400, loss = 0.0381694\nI0818 01:15:46.356667 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:15:46.356691 17344 solver.cpp:244]     Train net output #1: loss = 0.0381694 (* 1 = 0.0381694 loss)\nI0818 01:15:46.446763 17344 sgd_solver.cpp:166] Iteration 14400, lr = 0.0035\nI0818 01:18:04.056226 17344 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 01:19:25.513067 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7282\nI0818 01:19:25.513365 17344 solver.cpp:404]     Test net output #1: loss = 1.26015 (* 1 = 1.26015 loss)\nI0818 01:19:26.823413 17344 solver.cpp:228] Iteration 14500, loss = 0.160237\nI0818 01:19:26.823460 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:19:26.823477 17344 solver.cpp:244]     Train net output #1: loss = 0.160237 (* 1 = 0.160237 loss)\nI0818 01:19:26.913099 17344 sgd_solver.cpp:166] Iteration 14500, lr = 0.0035\nI0818 01:21:43.851444 17344 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 01:23:04.275012 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78444\nI0818 01:23:04.275271 17344 solver.cpp:404]     Test net output #1: loss = 0.969309 (* 1 = 0.969309 loss)\nI0818 01:23:05.580972 17344 solver.cpp:228] Iteration 14600, loss = 0.0917253\nI0818 01:23:05.581012 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:23:05.581028 17344 solver.cpp:244]     Train net output #1: loss = 0.0917252 (* 1 = 0.0917252 loss)\nI0818 01:23:05.680158 17344 sgd_solver.cpp:166] Iteration 14600, lr = 0.0035\nI0818 01:25:22.651705 17344 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 01:26:43.082334 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2218\nI0818 01:26:43.082604 17344 solver.cpp:404]     Test net output #1: loss = 7.44433 (* 1 = 7.44433 loss)\nI0818 01:26:44.387514 17344 solver.cpp:228] Iteration 14700, loss = 0.0463805\nI0818 01:26:44.387553 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:26:44.387569 17344 solver.cpp:244]     Train net output #1: loss = 0.0463804 (* 1 = 0.0463804 loss)\nI0818 01:26:44.485450 17344 sgd_solver.cpp:166] Iteration 14700, lr = 0.0035\nI0818 01:29:01.380383 17344 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 01:30:21.802562 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77596\nI0818 01:30:21.802839 17344 solver.cpp:404]     Test net output #1: loss = 0.849651 (* 1 = 0.849651 loss)\nI0818 01:30:23.108659 17344 solver.cpp:228] Iteration 14800, loss = 0.0177103\nI0818 01:30:23.108698 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:30:23.108714 17344 solver.cpp:244]     Train net output #1: loss = 0.0177102 (* 1 = 0.0177102 loss)\nI0818 01:30:23.205781 17344 sgd_solver.cpp:166] Iteration 14800, lr = 0.0035\nI0818 01:32:40.161181 17344 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 01:34:00.581189 17344 solver.cpp:404]     Test net output #0: accuracy = 0.728\nI0818 01:34:00.581460 17344 solver.cpp:404]     Test net output #1: loss = 1.24953 (* 1 = 1.24953 loss)\nI0818 01:34:01.887171 17344 solver.cpp:228] Iteration 14900, loss = 0.029842\nI0818 01:34:01.887208 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:34:01.887224 17344 solver.cpp:244]     Train net output #1: loss = 0.0298419 (* 1 = 0.0298419 loss)\nI0818 01:34:01.983956 17344 sgd_solver.cpp:166] Iteration 14900, lr = 0.0035\nI0818 01:36:18.853997 17344 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 01:37:39.262928 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74648\nI0818 01:37:39.263193 17344 solver.cpp:404]     Test net output #1: loss = 1.17398 (* 1 = 1.17398 loss)\nI0818 01:37:40.568922 17344 solver.cpp:228] Iteration 15000, loss = 0.0600869\nI0818 01:37:40.568963 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:37:40.568979 17344 solver.cpp:244]     Train net output #1: loss = 0.0600869 (* 1 = 0.0600869 loss)\nI0818 01:37:40.668085 17344 sgd_solver.cpp:166] Iteration 15000, lr = 0.0035\nI0818 01:39:57.643014 17344 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 01:41:18.074826 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32852\nI0818 01:41:18.075090 17344 solver.cpp:404]     Test net output #1: loss = 11.4882 (* 1 = 11.4882 loss)\nI0818 01:41:19.380846 17344 solver.cpp:228] Iteration 15100, loss = 0.0706823\nI0818 01:41:19.380887 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:41:19.380903 17344 solver.cpp:244]     Train net output #1: loss = 0.0706822 (* 1 = 0.0706822 loss)\nI0818 01:41:19.479007 17344 sgd_solver.cpp:166] Iteration 15100, lr = 0.0035\nI0818 01:43:36.398492 17344 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 01:44:56.725836 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79488\nI0818 01:44:56.726092 17344 solver.cpp:404]     Test net output #1: loss = 0.887619 (* 1 = 0.887619 loss)\nI0818 01:44:58.031955 17344 solver.cpp:228] Iteration 15200, loss = 0.0458401\nI0818 01:44:58.031996 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:44:58.032012 17344 solver.cpp:244]     Train net output #1: loss = 0.04584 (* 1 = 0.04584 loss)\nI0818 01:44:58.129194 17344 sgd_solver.cpp:166] Iteration 15200, lr = 0.0035\nI0818 01:47:14.905848 17344 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 01:48:35.220940 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75128\nI0818 01:48:35.221158 17344 solver.cpp:404]     Test net output #1: loss = 1.21328 (* 1 = 1.21328 loss)\nI0818 01:48:36.526885 17344 solver.cpp:228] Iteration 15300, loss = 0.0780177\nI0818 01:48:36.526927 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:48:36.526943 17344 solver.cpp:244]     Train net output #1: loss = 0.0780176 (* 1 = 0.0780176 loss)\nI0818 01:48:36.626008 17344 sgd_solver.cpp:166] Iteration 15300, lr = 0.0035\nI0818 01:50:53.566498 17344 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 01:52:13.905393 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77404\nI0818 01:52:13.905645 17344 solver.cpp:404]     Test net output #1: loss = 0.951005 (* 1 = 0.951005 loss)\nI0818 01:52:15.211952 17344 solver.cpp:228] Iteration 15400, loss = 0.0758743\nI0818 01:52:15.211997 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:52:15.212013 17344 solver.cpp:244]     Train net output #1: loss = 0.0758742 (* 1 = 0.0758742 loss)\nI0818 01:52:15.309391 17344 sgd_solver.cpp:166] Iteration 15400, lr = 0.0035\nI0818 01:54:32.244174 17344 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 01:55:52.565346 17344 solver.cpp:404]     Test net output #0: accuracy = 0.831\nI0818 01:55:52.565587 17344 solver.cpp:404]     Test net output #1: loss = 0.73267 (* 1 = 0.73267 loss)\nI0818 01:55:53.871763 17344 solver.cpp:228] Iteration 15500, loss = 0.0432989\nI0818 01:55:53.871806 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:55:53.871822 17344 solver.cpp:244]     Train net output #1: loss = 0.0432988 (* 1 = 0.0432988 loss)\nI0818 01:55:53.969257 17344 sgd_solver.cpp:166] Iteration 15500, lr = 0.0035\nI0818 01:58:10.955140 17344 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 01:59:31.250676 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71092\nI0818 01:59:31.250896 17344 solver.cpp:404]     Test net output #1: loss = 1.71442 (* 1 = 1.71442 loss)\nI0818 01:59:32.557195 17344 solver.cpp:228] Iteration 15600, loss = 0.0549847\nI0818 01:59:32.557240 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:59:32.557255 17344 solver.cpp:244]     Train net output #1: loss = 0.0549846 (* 1 = 0.0549846 loss)\nI0818 01:59:32.652963 17344 sgd_solver.cpp:166] Iteration 15600, lr = 0.0035\nI0818 02:01:49.421633 17344 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 02:03:09.709712 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72848\nI0818 02:03:09.709986 17344 solver.cpp:404]     Test net output #1: loss = 1.58177 (* 1 = 1.58177 loss)\nI0818 02:03:11.015983 17344 solver.cpp:228] Iteration 15700, loss = 0.0383575\nI0818 02:03:11.016027 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:03:11.016043 17344 solver.cpp:244]     Train net output #1: loss = 0.0383574 (* 1 = 0.0383574 loss)\nI0818 02:03:11.113023 17344 sgd_solver.cpp:166] Iteration 15700, lr = 0.0035\nI0818 02:05:27.919188 17344 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 02:06:48.204628 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72916\nI0818 02:06:48.204893 17344 solver.cpp:404]     Test net output #1: loss = 1.59546 (* 1 = 1.59546 loss)\nI0818 02:06:49.511729 17344 solver.cpp:228] Iteration 15800, loss = 0.0422582\nI0818 02:06:49.511778 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:06:49.511795 17344 solver.cpp:244]     Train net output #1: loss = 0.0422581 (* 1 = 0.0422581 loss)\nI0818 02:06:49.610767 17344 sgd_solver.cpp:166] Iteration 15800, lr = 0.0035\nI0818 02:09:06.498581 17344 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 02:10:26.813444 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76376\nI0818 02:10:26.813673 17344 solver.cpp:404]     Test net output #1: loss = 1.10844 (* 1 = 1.10844 loss)\nI0818 02:10:28.120054 17344 solver.cpp:228] Iteration 15900, loss = 0.0658862\nI0818 02:10:28.120100 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:10:28.120116 17344 solver.cpp:244]     Train net output #1: loss = 0.0658861 (* 1 = 0.0658861 loss)\nI0818 02:10:28.217711 17344 sgd_solver.cpp:166] Iteration 15900, lr = 0.0035\nI0818 02:12:45.018039 17344 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 02:14:05.341729 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81244\nI0818 02:14:05.341986 17344 solver.cpp:404]     Test net output #1: loss = 0.847771 (* 1 = 0.847771 loss)\nI0818 02:14:06.648399 17344 solver.cpp:228] Iteration 16000, loss = 0.0300133\nI0818 02:14:06.648444 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:14:06.648468 17344 solver.cpp:244]     Train net output #1: loss = 0.0300133 (* 1 = 0.0300133 loss)\nI0818 02:14:06.750840 17344 sgd_solver.cpp:166] Iteration 16000, lr = 0.0035\nI0818 02:16:23.611599 17344 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 02:17:44.025121 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82188\nI0818 02:17:44.025382 17344 solver.cpp:404]     Test net output #1: loss = 0.733299 (* 1 = 0.733299 loss)\nI0818 02:17:45.330739 17344 solver.cpp:228] Iteration 16100, loss = 0.0531512\nI0818 02:17:45.330785 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:17:45.330809 17344 solver.cpp:244]     Train net output #1: loss = 0.0531512 (* 1 = 0.0531512 loss)\nI0818 02:17:45.434198 17344 sgd_solver.cpp:166] Iteration 16100, lr = 0.0035\nI0818 02:20:02.349562 17344 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 02:21:22.770151 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73868\nI0818 02:21:22.770365 17344 solver.cpp:404]     Test net output #1: loss = 1.22499 (* 1 = 1.22499 loss)\nI0818 02:21:24.076109 17344 solver.cpp:228] Iteration 16200, loss = 0.078562\nI0818 02:21:24.076155 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:21:24.076179 17344 solver.cpp:244]     Train net output #1: loss = 0.078562 (* 1 = 0.078562 loss)\nI0818 02:21:24.175164 17344 sgd_solver.cpp:166] Iteration 16200, lr = 0.0035\nI0818 02:23:41.111253 17344 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 02:25:01.524148 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63204\nI0818 02:25:01.524400 17344 solver.cpp:404]     Test net output #1: loss = 2.48722 (* 1 = 2.48722 loss)\nI0818 02:25:02.830561 17344 solver.cpp:228] Iteration 16300, loss = 0.0591679\nI0818 02:25:02.830606 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:25:02.830629 17344 solver.cpp:244]     Train net output #1: loss = 0.0591679 (* 1 = 0.0591679 loss)\nI0818 02:25:02.928771 17344 sgd_solver.cpp:166] Iteration 16300, lr = 0.0035\nI0818 02:27:19.835239 17344 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 02:28:40.265507 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70692\nI0818 02:28:40.265764 17344 solver.cpp:404]     Test net output #1: loss = 1.50494 (* 1 = 1.50494 loss)\nI0818 02:28:41.572017 17344 solver.cpp:228] Iteration 16400, loss = 0.0796095\nI0818 02:28:41.572062 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:28:41.572084 17344 solver.cpp:244]     Train net output #1: loss = 0.0796095 (* 1 = 0.0796095 loss)\nI0818 02:28:41.669286 17344 sgd_solver.cpp:166] Iteration 16400, lr = 0.0035\nI0818 02:30:58.618813 17344 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0818 02:32:19.045907 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67972\nI0818 02:32:19.046175 17344 solver.cpp:404]     Test net output #1: loss = 1.68505 (* 1 = 1.68505 loss)\nI0818 02:32:20.351667 17344 solver.cpp:228] Iteration 16500, loss = 0.0563288\nI0818 02:32:20.351719 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:32:20.351744 17344 solver.cpp:244]     Train net output #1: loss = 0.0563288 (* 1 = 0.0563288 loss)\nI0818 02:32:20.447579 17344 sgd_solver.cpp:166] Iteration 16500, lr = 0.0035\nI0818 02:34:37.318470 17344 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0818 02:35:57.736482 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61692\nI0818 02:35:57.736748 17344 solver.cpp:404]     Test net output #1: loss = 2.23704 (* 1 = 2.23704 loss)\nI0818 02:35:59.042412 17344 solver.cpp:228] Iteration 16600, loss = 0.0671448\nI0818 02:35:59.042454 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:35:59.042476 17344 solver.cpp:244]     Train net output #1: loss = 0.0671448 (* 1 = 0.0671448 loss)\nI0818 02:35:59.140234 17344 sgd_solver.cpp:166] Iteration 16600, lr = 0.0035\nI0818 02:38:16.104586 17344 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0818 02:39:36.525591 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68712\nI0818 02:39:36.525863 17344 solver.cpp:404]     Test net output #1: loss = 1.72298 (* 1 = 1.72298 loss)\nI0818 02:39:37.831442 17344 solver.cpp:228] Iteration 16700, loss = 0.0873907\nI0818 02:39:37.831486 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:39:37.831511 17344 solver.cpp:244]     Train net output #1: loss = 0.0873907 (* 1 = 0.0873907 loss)\nI0818 02:39:37.931273 17344 sgd_solver.cpp:166] Iteration 16700, lr = 0.0035\nI0818 02:41:54.833209 17344 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0818 02:43:15.255242 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72104\nI0818 02:43:15.255511 17344 solver.cpp:404]     Test net output #1: loss = 1.56602 (* 1 = 1.56602 loss)\nI0818 02:43:16.560822 17344 solver.cpp:228] Iteration 16800, loss = 0.066007\nI0818 02:43:16.560865 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:43:16.560880 17344 solver.cpp:244]     Train net output #1: loss = 0.066007 (* 1 = 0.066007 loss)\nI0818 02:43:16.660728 17344 sgd_solver.cpp:166] Iteration 16800, lr = 0.0035\nI0818 02:45:33.595355 17344 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0818 02:46:53.997680 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63168\nI0818 02:46:53.997951 17344 solver.cpp:404]     Test net output #1: loss = 2.18271 (* 1 = 2.18271 loss)\nI0818 02:46:55.304637 17344 solver.cpp:228] Iteration 16900, loss = 0.104894\nI0818 02:46:55.304678 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 02:46:55.304694 17344 solver.cpp:244]     Train net output #1: loss = 0.104894 (* 1 = 0.104894 loss)\nI0818 02:46:55.401161 17344 sgd_solver.cpp:166] Iteration 16900, lr = 0.0035\nI0818 02:49:12.347528 17344 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0818 02:50:32.766497 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73184\nI0818 02:50:32.766763 17344 solver.cpp:404]     Test net output #1: loss = 1.54779 (* 1 = 1.54779 loss)\nI0818 02:50:34.073539 17344 solver.cpp:228] Iteration 17000, loss = 0.0574255\nI0818 02:50:34.073583 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:50:34.073608 17344 solver.cpp:244]     Train net output #1: loss = 0.0574255 (* 1 = 0.0574255 loss)\nI0818 02:50:34.171874 17344 sgd_solver.cpp:166] Iteration 17000, lr = 0.0035\nI0818 02:52:51.070858 17344 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0818 02:54:11.477308 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60224\nI0818 02:54:11.477566 17344 solver.cpp:404]     Test net output #1: loss = 2.41674 (* 1 = 2.41674 loss)\nI0818 02:54:12.784153 17344 solver.cpp:228] Iteration 17100, loss = 0.0472862\nI0818 02:54:12.784199 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:54:12.784224 17344 solver.cpp:244]     Train net output #1: loss = 0.0472862 (* 1 = 0.0472862 loss)\nI0818 02:54:12.883519 17344 sgd_solver.cpp:166] Iteration 17100, lr = 0.0035\nI0818 02:56:29.774062 17344 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0818 02:57:50.186624 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75264\nI0818 02:57:50.186892 17344 solver.cpp:404]     Test net output #1: loss = 1.1194 (* 1 = 1.1194 loss)\nI0818 02:57:51.493181 17344 solver.cpp:228] Iteration 17200, loss = 0.0296287\nI0818 02:57:51.493228 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:57:51.493253 17344 solver.cpp:244]     Train net output #1: loss = 0.0296286 (* 1 = 0.0296286 loss)\nI0818 02:57:51.592944 17344 sgd_solver.cpp:166] Iteration 17200, lr = 0.0035\nI0818 03:00:08.459395 17344 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0818 03:01:28.857589 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7798\nI0818 03:01:28.857846 17344 solver.cpp:404]     Test net output #1: loss = 0.867039 (* 1 = 0.867039 loss)\nI0818 03:01:30.164839 17344 solver.cpp:228] Iteration 17300, loss = 0.0740438\nI0818 03:01:30.164885 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:01:30.164901 17344 solver.cpp:244]     Train net output #1: loss = 0.0740438 (* 1 = 0.0740438 loss)\nI0818 03:01:30.257927 17344 sgd_solver.cpp:166] Iteration 17300, lr = 0.0035\nI0818 03:03:47.168901 17344 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0818 03:05:07.575474 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76456\nI0818 03:05:07.575752 17344 solver.cpp:404]     Test net output #1: loss = 0.980696 (* 1 = 0.980696 loss)\nI0818 03:05:08.882205 17344 solver.cpp:228] Iteration 17400, loss = 0.0663141\nI0818 03:05:08.882252 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:05:08.882277 17344 solver.cpp:244]     Train net output #1: loss = 0.066314 (* 1 = 0.066314 loss)\nI0818 03:05:08.985535 17344 sgd_solver.cpp:166] Iteration 17400, lr = 0.0035\nI0818 03:07:25.660305 17344 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0818 03:08:46.057266 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77624\nI0818 03:08:46.057481 17344 solver.cpp:404]     Test net output #1: loss = 0.928292 (* 1 = 0.928292 loss)\nI0818 03:08:47.363937 17344 solver.cpp:228] Iteration 17500, loss = 0.0583437\nI0818 03:08:47.363984 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:08:47.364008 17344 solver.cpp:244]     Train net output #1: loss = 0.0583437 (* 1 = 0.0583437 loss)\nI0818 03:08:47.458501 17344 sgd_solver.cpp:166] Iteration 17500, lr = 0.0035\nI0818 03:11:04.274375 17344 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0818 03:12:24.679513 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81716\nI0818 03:12:24.679777 17344 solver.cpp:404]     Test net output #1: loss = 0.752976 (* 1 = 0.752976 loss)\nI0818 03:12:25.986069 17344 solver.cpp:228] Iteration 17600, loss = 0.0478856\nI0818 03:12:25.986116 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:12:25.986140 17344 solver.cpp:244]     Train net output #1: loss = 0.0478856 (* 1 = 0.0478856 loss)\nI0818 03:12:26.083183 17344 sgd_solver.cpp:166] Iteration 17600, lr = 0.0035\nI0818 03:14:42.840718 17344 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0818 03:16:03.239157 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76544\nI0818 03:16:03.239424 17344 solver.cpp:404]     Test net output #1: loss = 1.04201 (* 1 = 1.04201 loss)\nI0818 03:16:04.546118 17344 solver.cpp:228] Iteration 17700, loss = 0.0726387\nI0818 03:16:04.546164 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:16:04.546188 17344 solver.cpp:244]     Train net output #1: loss = 0.0726386 (* 1 = 0.0726386 loss)\nI0818 03:16:04.637743 17344 sgd_solver.cpp:166] Iteration 17700, lr = 0.0035\nI0818 03:18:21.555253 17344 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0818 03:19:41.951624 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80912\nI0818 03:19:41.951887 17344 solver.cpp:404]     Test net output #1: loss = 0.818983 (* 1 = 0.818983 loss)\nI0818 03:19:43.258774 17344 solver.cpp:228] Iteration 17800, loss = 0.0297639\nI0818 03:19:43.258821 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:19:43.258844 17344 solver.cpp:244]     Train net output #1: loss = 0.0297639 (* 1 = 0.0297639 loss)\nI0818 03:19:43.350087 17344 sgd_solver.cpp:166] Iteration 17800, lr = 0.0035\nI0818 03:22:00.199538 17344 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0818 03:23:20.583868 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73696\nI0818 03:23:20.584107 17344 solver.cpp:404]     Test net output #1: loss = 1.1085 (* 1 = 1.1085 loss)\nI0818 03:23:21.890388 17344 solver.cpp:228] Iteration 17900, loss = 0.104392\nI0818 03:23:21.890432 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:23:21.890449 17344 solver.cpp:244]     Train net output #1: loss = 0.104392 (* 1 = 0.104392 loss)\nI0818 03:23:21.988258 17344 sgd_solver.cpp:166] Iteration 17900, lr = 0.0035\nI0818 03:25:38.707743 17344 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0818 03:26:59.103510 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76156\nI0818 03:26:59.103767 17344 solver.cpp:404]     Test net output #1: loss = 0.964748 (* 1 = 0.964748 loss)\nI0818 03:27:00.409497 17344 solver.cpp:228] Iteration 18000, loss = 0.0745007\nI0818 03:27:00.409540 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:27:00.409556 17344 solver.cpp:244]     Train net output #1: loss = 0.0745007 (* 1 = 0.0745007 loss)\nI0818 03:27:00.503533 17344 sgd_solver.cpp:166] Iteration 18000, lr = 0.0035\nI0818 03:29:17.088609 17344 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0818 03:30:37.492914 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74028\nI0818 03:30:37.493171 17344 solver.cpp:404]     Test net output #1: loss = 1.14394 (* 1 = 1.14394 loss)\nI0818 03:30:38.799155 17344 solver.cpp:228] Iteration 18100, loss = 0.035928\nI0818 03:30:38.799198 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:30:38.799214 17344 solver.cpp:244]     Train net output #1: loss = 0.035928 (* 1 = 0.035928 loss)\nI0818 03:30:38.891957 17344 sgd_solver.cpp:166] Iteration 18100, lr = 0.0035\nI0818 03:32:55.366610 17344 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0818 03:34:15.765074 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69464\nI0818 03:34:15.765321 17344 solver.cpp:404]     Test net output #1: loss = 1.98252 (* 1 = 1.98252 loss)\nI0818 03:34:17.071023 17344 solver.cpp:228] Iteration 18200, loss = 0.0494985\nI0818 03:34:17.071065 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:34:17.071081 17344 solver.cpp:244]     Train net output #1: loss = 0.0494984 (* 1 = 0.0494984 loss)\nI0818 03:34:17.166573 17344 sgd_solver.cpp:166] Iteration 18200, lr = 0.0035\nI0818 03:36:33.652936 17344 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0818 03:37:54.063717 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72432\nI0818 03:37:54.063969 17344 solver.cpp:404]     Test net output #1: loss = 1.51964 (* 1 = 1.51964 loss)\nI0818 03:37:55.370008 17344 solver.cpp:228] Iteration 18300, loss = 0.0839723\nI0818 03:37:55.370054 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 03:37:55.370079 17344 solver.cpp:244]     Train net output #1: loss = 0.0839722 (* 1 = 0.0839722 loss)\nI0818 03:37:55.467736 17344 sgd_solver.cpp:166] Iteration 18300, lr = 0.0035\nI0818 03:40:12.084537 17344 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0818 03:41:32.477535 17344 solver.cpp:404]     Test net output #0: accuracy = 0.42008\nI0818 03:41:32.477782 17344 solver.cpp:404]     Test net output #1: loss = 5.55454 (* 1 = 5.55454 loss)\nI0818 03:41:33.783500 17344 solver.cpp:228] Iteration 18400, loss = 0.026141\nI0818 03:41:33.783546 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:41:33.783570 17344 solver.cpp:244]     Train net output #1: loss = 0.0261409 (* 1 = 0.0261409 loss)\nI0818 03:41:33.879835 17344 sgd_solver.cpp:166] Iteration 18400, lr = 0.0035\nI0818 03:43:50.456091 17344 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0818 03:45:10.904590 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71244\nI0818 03:45:10.904855 17344 solver.cpp:404]     Test net output #1: loss = 1.49445 (* 1 = 1.49445 loss)\nI0818 03:45:12.210237 17344 solver.cpp:228] Iteration 18500, loss = 0.0327245\nI0818 03:45:12.210283 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:45:12.210307 17344 solver.cpp:244]     Train net output #1: loss = 0.0327245 (* 1 = 0.0327245 loss)\nI0818 03:45:12.306483 17344 sgd_solver.cpp:166] Iteration 18500, lr = 0.0035\nI0818 03:47:28.858647 17344 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0818 03:48:49.254138 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6654\nI0818 03:48:49.254396 17344 solver.cpp:404]     Test net output #1: loss = 1.68622 (* 1 = 1.68622 loss)\nI0818 03:48:50.559546 17344 solver.cpp:228] Iteration 18600, loss = 0.0658098\nI0818 03:48:50.559593 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 03:48:50.559617 17344 solver.cpp:244]     Train net output #1: loss = 0.0658098 (* 1 = 0.0658098 loss)\nI0818 03:48:50.657655 17344 sgd_solver.cpp:166] Iteration 18600, lr = 0.0035\nI0818 03:51:07.193188 17344 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0818 03:52:27.590313 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58468\nI0818 03:52:27.590584 17344 solver.cpp:404]     Test net output #1: loss = 3.11964 (* 1 = 3.11964 loss)\nI0818 03:52:28.896108 17344 solver.cpp:228] Iteration 18700, loss = 0.0274048\nI0818 03:52:28.896154 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:52:28.896169 17344 solver.cpp:244]     Train net output #1: loss = 0.0274047 (* 1 = 0.0274047 loss)\nI0818 03:52:28.991549 17344 sgd_solver.cpp:166] Iteration 18700, lr = 0.0035\nI0818 03:54:45.533959 17344 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0818 03:56:05.942873 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67392\nI0818 03:56:05.943130 17344 solver.cpp:404]     Test net output #1: loss = 1.68717 (* 1 = 1.68717 loss)\nI0818 03:56:07.248731 17344 solver.cpp:228] Iteration 18800, loss = 0.0509964\nI0818 03:56:07.248776 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:56:07.248797 17344 solver.cpp:244]     Train net output #1: loss = 0.0509964 (* 1 = 0.0509964 loss)\nI0818 03:56:07.343610 17344 sgd_solver.cpp:166] Iteration 18800, lr = 0.0035\nI0818 03:58:23.914371 17344 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0818 03:59:44.261620 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75448\nI0818 03:59:44.261881 17344 solver.cpp:404]     Test net output #1: loss = 1.26103 (* 1 = 1.26103 loss)\nI0818 03:59:45.567394 17344 solver.cpp:228] Iteration 18900, loss = 0.0864456\nI0818 03:59:45.567438 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:59:45.567454 17344 solver.cpp:244]     Train net output #1: loss = 0.0864456 (* 1 = 0.0864456 loss)\nI0818 03:59:45.666940 17344 sgd_solver.cpp:166] Iteration 18900, lr = 0.0035\nI0818 04:02:02.262476 17344 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0818 04:03:22.564590 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69496\nI0818 04:03:22.564864 17344 solver.cpp:404]     Test net output #1: loss = 1.78896 (* 1 = 1.78896 loss)\nI0818 04:03:23.870754 17344 solver.cpp:228] Iteration 19000, loss = 0.105319\nI0818 04:03:23.870803 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:03:23.870820 17344 solver.cpp:244]     Train net output #1: loss = 0.105319 (* 1 = 0.105319 loss)\nI0818 04:03:23.965895 17344 sgd_solver.cpp:166] Iteration 19000, lr = 0.0035\nI0818 04:05:40.486598 17344 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0818 04:07:00.809583 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77692\nI0818 04:07:00.809824 17344 solver.cpp:404]     Test net output #1: loss = 0.994458 (* 1 = 0.994458 loss)\nI0818 04:07:02.115375 17344 solver.cpp:228] Iteration 19100, loss = 0.0293278\nI0818 04:07:02.115417 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:07:02.115432 17344 solver.cpp:244]     Train net output #1: loss = 0.0293278 (* 1 = 0.0293278 loss)\nI0818 04:07:02.210901 17344 sgd_solver.cpp:166] Iteration 19100, lr = 0.0035\nI0818 04:09:18.744163 17344 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0818 04:10:39.022822 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74256\nI0818 04:10:39.023051 17344 solver.cpp:404]     Test net output #1: loss = 1.28116 (* 1 = 1.28116 loss)\nI0818 04:10:40.328776 17344 solver.cpp:228] Iteration 19200, loss = 0.0616434\nI0818 04:10:40.328824 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:10:40.328840 17344 solver.cpp:244]     Train net output #1: loss = 0.0616434 (* 1 = 0.0616434 loss)\nI0818 04:10:40.424461 17344 sgd_solver.cpp:166] Iteration 19200, lr = 0.0035\nI0818 04:12:56.985220 17344 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0818 04:14:17.275907 17344 solver.cpp:404]     Test net output #0: accuracy = 0.53772\nI0818 04:14:17.276170 17344 solver.cpp:404]     Test net output #1: loss = 3.10796 (* 1 = 3.10796 loss)\nI0818 04:14:18.581636 17344 solver.cpp:228] Iteration 19300, loss = 0.114471\nI0818 04:14:18.581681 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 04:14:18.581697 17344 solver.cpp:244]     Train net output #1: loss = 0.114471 (* 1 = 0.114471 loss)\nI0818 04:14:18.674202 17344 sgd_solver.cpp:166] Iteration 19300, lr = 0.0035\nI0818 04:16:35.194147 17344 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0818 04:17:55.489912 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57024\nI0818 04:17:55.490170 17344 solver.cpp:404]     Test net output #1: loss = 2.52346 (* 1 = 2.52346 loss)\nI0818 04:17:56.798571 17344 solver.cpp:228] Iteration 19400, loss = 0.0279581\nI0818 04:17:56.798610 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:17:56.798626 17344 solver.cpp:244]     Train net output #1: loss = 0.0279581 (* 1 = 0.0279581 loss)\nI0818 04:17:56.890223 17344 sgd_solver.cpp:166] Iteration 19400, lr = 0.0035\nI0818 04:20:13.451032 17344 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0818 04:21:33.736129 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0818 04:21:33.736353 17344 solver.cpp:404]     Test net output #1: loss = 52.8567 (* 1 = 52.8567 loss)\nI0818 04:21:35.044701 17344 solver.cpp:228] Iteration 19500, loss = 0.0801954\nI0818 04:21:35.044744 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:21:35.044760 17344 solver.cpp:244]     Train net output #1: loss = 0.0801954 (* 1 = 0.0801954 loss)\nI0818 04:21:35.134874 17344 sgd_solver.cpp:166] Iteration 19500, lr = 0.0035\nI0818 04:23:51.721140 17344 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0818 04:25:12.005940 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79504\nI0818 04:25:12.006218 17344 solver.cpp:404]     Test net output #1: loss = 0.965639 (* 1 = 0.965639 loss)\nI0818 04:25:13.311683 17344 solver.cpp:228] Iteration 19600, loss = 0.0265869\nI0818 04:25:13.311720 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:25:13.311736 17344 solver.cpp:244]     Train net output #1: loss = 0.0265869 (* 1 = 0.0265869 loss)\nI0818 04:25:13.402537 17344 sgd_solver.cpp:166] Iteration 19600, lr = 0.0035\nI0818 04:27:29.888393 17344 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0818 04:28:50.206737 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65148\nI0818 04:28:50.207011 17344 solver.cpp:404]     Test net output #1: loss = 1.92382 (* 1 = 1.92382 loss)\nI0818 04:28:51.512385 17344 solver.cpp:228] Iteration 19700, loss = 0.0643991\nI0818 04:28:51.512425 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:28:51.512441 17344 solver.cpp:244]     Train net output #1: loss = 0.0643991 (* 1 = 0.0643991 loss)\nI0818 04:28:51.604758 17344 sgd_solver.cpp:166] Iteration 19700, lr = 0.0035\nI0818 04:31:08.234941 17344 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0818 04:32:28.514966 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69076\nI0818 04:32:28.515249 17344 solver.cpp:404]     Test net output #1: loss = 1.47312 (* 1 = 1.47312 loss)\nI0818 04:32:29.820920 17344 solver.cpp:228] Iteration 19800, loss = 0.0243627\nI0818 04:32:29.820961 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:32:29.820978 17344 solver.cpp:244]     Train net output #1: loss = 0.0243627 (* 1 = 0.0243627 loss)\nI0818 04:32:29.918982 17344 sgd_solver.cpp:166] Iteration 19800, lr = 0.0035\nI0818 04:34:46.665988 17344 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0818 04:36:07.066795 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66772\nI0818 04:36:07.067064 17344 solver.cpp:404]     Test net output #1: loss = 2.17085 (* 1 = 2.17085 loss)\nI0818 04:36:08.376354 17344 solver.cpp:228] Iteration 19900, loss = 0.0850795\nI0818 04:36:08.376396 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 04:36:08.376413 17344 solver.cpp:244]     Train net output #1: loss = 0.0850795 (* 1 = 0.0850795 loss)\nI0818 04:36:08.472929 17344 sgd_solver.cpp:166] Iteration 19900, lr = 0.0035\nI0818 04:38:25.366514 17344 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0818 04:39:45.758473 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80164\nI0818 04:39:45.758752 17344 solver.cpp:404]     Test net output #1: loss = 0.897863 (* 1 = 0.897863 loss)\nI0818 04:39:47.067646 17344 solver.cpp:228] Iteration 20000, loss = 0.0457858\nI0818 04:39:47.067687 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:39:47.067703 17344 solver.cpp:244]     Train net output #1: loss = 0.0457858 (* 1 = 0.0457858 loss)\nI0818 04:39:47.160089 17344 sgd_solver.cpp:166] Iteration 20000, lr = 0.0035\nI0818 04:42:04.103689 17344 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0818 04:43:24.505859 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78484\nI0818 04:43:24.506124 17344 solver.cpp:404]     Test net output #1: loss = 0.986078 (* 1 = 0.986078 loss)\nI0818 04:43:25.814694 17344 solver.cpp:228] Iteration 20100, loss = 0.0247327\nI0818 04:43:25.814738 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:43:25.814752 17344 solver.cpp:244]     Train net output #1: loss = 0.0247327 (* 1 = 0.0247327 loss)\nI0818 04:43:25.904510 17344 sgd_solver.cpp:166] Iteration 20100, lr = 0.0035\nI0818 04:45:42.806506 17344 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0818 04:47:03.202958 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4208\nI0818 04:47:03.203229 17344 solver.cpp:404]     Test net output #1: loss = 4.42278 (* 1 = 4.42278 loss)\nI0818 04:47:04.511929 17344 solver.cpp:228] Iteration 20200, loss = 0.14503\nI0818 04:47:04.511971 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 04:47:04.511987 17344 solver.cpp:244]     Train net output #1: loss = 0.14503 (* 1 = 0.14503 loss)\nI0818 04:47:04.604547 17344 sgd_solver.cpp:166] Iteration 20200, lr = 0.0035\nI0818 04:49:21.413071 17344 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0818 04:50:41.811285 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2542\nI0818 04:50:41.811563 17344 solver.cpp:404]     Test net output #1: loss = 8.53027 (* 1 = 8.53027 loss)\nI0818 04:50:43.120474 17344 solver.cpp:228] Iteration 20300, loss = 0.0783834\nI0818 04:50:43.120517 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:50:43.120532 17344 solver.cpp:244]     Train net output #1: loss = 0.0783834 (* 1 = 0.0783834 loss)\nI0818 04:50:43.214450 17344 sgd_solver.cpp:166] Iteration 20300, lr = 0.0035\nI0818 04:53:00.264683 17344 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0818 04:54:20.673038 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75444\nI0818 04:54:20.673326 17344 solver.cpp:404]     Test net output #1: loss = 1.09997 (* 1 = 1.09997 loss)\nI0818 04:54:21.982234 17344 solver.cpp:228] Iteration 20400, loss = 0.0742037\nI0818 04:54:21.982280 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:54:21.982303 17344 solver.cpp:244]     Train net output #1: loss = 0.0742037 (* 1 = 0.0742037 loss)\nI0818 04:54:22.076572 17344 sgd_solver.cpp:166] Iteration 20400, lr = 0.0035\nI0818 04:56:38.851275 17344 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0818 04:57:59.262770 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58448\nI0818 04:57:59.263064 17344 solver.cpp:404]     Test net output #1: loss = 2.84422 (* 1 = 2.84422 loss)\nI0818 04:58:00.572515 17344 solver.cpp:228] Iteration 20500, loss = 0.0621426\nI0818 04:58:00.572559 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:58:00.572583 17344 solver.cpp:244]     Train net output #1: loss = 0.0621426 (* 1 = 0.0621426 loss)\nI0818 04:58:00.664700 17344 sgd_solver.cpp:166] Iteration 20500, lr = 0.0035\nI0818 05:00:17.636517 17344 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0818 05:01:38.035421 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73004\nI0818 05:01:38.035735 17344 solver.cpp:404]     Test net output #1: loss = 1.47932 (* 1 = 1.47932 loss)\nI0818 05:01:39.344938 17344 solver.cpp:228] Iteration 20600, loss = 0.0318823\nI0818 05:01:39.344980 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:01:39.345005 17344 solver.cpp:244]     Train net output #1: loss = 0.0318823 (* 1 = 0.0318823 loss)\nI0818 05:01:39.440796 17344 sgd_solver.cpp:166] Iteration 20600, lr = 0.0035\nI0818 05:03:56.307514 17344 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0818 05:05:16.706626 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68\nI0818 05:05:16.706929 17344 solver.cpp:404]     Test net output #1: loss = 1.53232 (* 1 = 1.53232 loss)\nI0818 05:05:18.018417 17344 solver.cpp:228] Iteration 20700, loss = 0.125984\nI0818 05:05:18.018461 17344 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 05:05:18.018484 17344 solver.cpp:244]     Train net output #1: loss = 0.125984 (* 1 = 0.125984 loss)\nI0818 05:05:18.104811 17344 sgd_solver.cpp:166] Iteration 20700, lr = 0.0035\nI0818 05:07:34.919203 17344 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0818 05:08:55.321298 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76308\nI0818 05:08:55.321593 17344 solver.cpp:404]     Test net output #1: loss = 1.09732 (* 1 = 1.09732 loss)\nI0818 05:08:56.631404 17344 solver.cpp:228] Iteration 20800, loss = 0.0748233\nI0818 05:08:56.631448 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 05:08:56.631471 17344 solver.cpp:244]     Train net output #1: loss = 0.0748234 (* 1 = 0.0748234 loss)\nI0818 05:08:56.720528 17344 sgd_solver.cpp:166] Iteration 20800, lr = 0.0035\nI0818 05:11:13.555740 17344 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0818 05:12:33.976594 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75484\nI0818 05:12:33.976897 17344 solver.cpp:404]     Test net output #1: loss = 0.97357 (* 1 = 0.97357 loss)\nI0818 05:12:35.286623 17344 solver.cpp:228] Iteration 20900, loss = 0.0455234\nI0818 05:12:35.286669 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:12:35.286700 17344 solver.cpp:244]     Train net output #1: loss = 0.0455235 (* 1 = 0.0455235 loss)\nI0818 05:12:35.378516 17344 sgd_solver.cpp:166] Iteration 20900, lr = 0.0035\nI0818 05:14:52.334182 17344 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0818 05:16:12.734431 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69472\nI0818 05:16:12.734732 17344 solver.cpp:404]     Test net output #1: loss = 1.48266 (* 1 = 1.48266 loss)\nI0818 05:16:14.043316 17344 solver.cpp:228] Iteration 21000, loss = 0.0612824\nI0818 05:16:14.043361 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:16:14.043386 17344 solver.cpp:244]     Train net output #1: loss = 0.0612825 (* 1 = 0.0612825 loss)\nI0818 05:16:14.137537 17344 sgd_solver.cpp:166] Iteration 21000, lr = 0.0035\nI0818 05:18:30.999336 17344 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0818 05:19:51.395074 17344 solver.cpp:404]     Test net output #0: accuracy = 0.22936\nI0818 05:19:51.395370 17344 solver.cpp:404]     Test net output #1: loss = 14.0914 (* 1 = 14.0914 loss)\nI0818 05:19:52.705878 17344 solver.cpp:228] Iteration 21100, loss = 0.0443099\nI0818 05:19:52.705924 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:19:52.705948 17344 solver.cpp:244]     Train net output #1: loss = 0.04431 (* 1 = 0.04431 loss)\nI0818 05:19:52.802527 17344 sgd_solver.cpp:166] Iteration 21100, lr = 0.0035\nI0818 05:22:09.664227 17344 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0818 05:23:30.072865 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68064\nI0818 05:23:30.073155 17344 solver.cpp:404]     Test net output #1: loss = 1.67839 (* 1 = 1.67839 loss)\nI0818 05:23:31.383314 17344 solver.cpp:228] Iteration 21200, loss = 0.0594425\nI0818 05:23:31.383358 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:23:31.383383 17344 solver.cpp:244]     Train net output #1: loss = 0.0594426 (* 1 = 0.0594426 loss)\nI0818 05:23:31.479650 17344 sgd_solver.cpp:166] Iteration 21200, lr = 0.0035\nI0818 05:25:48.499790 17344 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0818 05:27:08.908587 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77456\nI0818 05:27:08.908885 17344 solver.cpp:404]     Test net output #1: loss = 0.929891 (* 1 = 0.929891 loss)\nI0818 05:27:10.218566 17344 solver.cpp:228] Iteration 21300, loss = 0.0263979\nI0818 05:27:10.218613 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:27:10.218637 17344 solver.cpp:244]     Train net output #1: loss = 0.026398 (* 1 = 0.026398 loss)\nI0818 05:27:10.313549 17344 sgd_solver.cpp:166] Iteration 21300, lr = 0.0035\nI0818 05:29:27.107998 17344 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0818 05:30:47.519655 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80564\nI0818 05:30:47.519961 17344 solver.cpp:404]     Test net output #1: loss = 0.776798 (* 1 = 0.776798 loss)\nI0818 05:30:48.829387 17344 solver.cpp:228] Iteration 21400, loss = 0.028315\nI0818 05:30:48.829433 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:30:48.829457 17344 solver.cpp:244]     Train net output #1: loss = 0.028315 (* 1 = 0.028315 loss)\nI0818 05:30:48.921871 17344 sgd_solver.cpp:166] Iteration 21400, lr = 0.0035\nI0818 05:33:05.790117 17344 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0818 05:34:26.208873 17344 solver.cpp:404]     Test net output #0: accuracy = 0.632\nI0818 05:34:26.209139 17344 solver.cpp:404]     Test net output #1: loss = 2.08851 (* 1 = 2.08851 loss)\nI0818 05:34:27.517853 17344 solver.cpp:228] Iteration 21500, loss = 0.0726912\nI0818 05:34:27.517894 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:34:27.517917 17344 solver.cpp:244]     Train net output #1: loss = 0.0726913 (* 1 = 0.0726913 loss)\nI0818 05:34:27.611374 17344 sgd_solver.cpp:166] Iteration 21500, lr = 0.0035\nI0818 05:36:44.561698 17344 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0818 05:38:04.981775 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80484\nI0818 05:38:04.982051 17344 solver.cpp:404]     Test net output #1: loss = 0.793438 (* 1 = 0.793438 loss)\nI0818 05:38:06.292100 17344 solver.cpp:228] Iteration 21600, loss = 0.18151\nI0818 05:38:06.292143 17344 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 05:38:06.292166 17344 solver.cpp:244]     Train net output #1: loss = 0.18151 (* 1 = 0.18151 loss)\nI0818 05:38:06.382036 17344 sgd_solver.cpp:166] Iteration 21600, lr = 0.0035\nI0818 05:40:23.258952 17344 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0818 05:41:43.676182 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73828\nI0818 05:41:43.676476 17344 solver.cpp:404]     Test net output #1: loss = 1.09365 (* 1 = 1.09365 loss)\nI0818 05:41:44.985946 17344 solver.cpp:228] Iteration 21700, loss = 0.0345097\nI0818 05:41:44.985991 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:41:44.986016 17344 solver.cpp:244]     Train net output #1: loss = 0.0345098 (* 1 = 0.0345098 loss)\nI0818 05:41:45.074064 17344 sgd_solver.cpp:166] Iteration 21700, lr = 0.0035\nI0818 05:44:02.022037 17344 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0818 05:45:22.439975 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73136\nI0818 05:45:22.440258 17344 solver.cpp:404]     Test net output #1: loss = 1.24132 (* 1 = 1.24132 loss)\nI0818 05:45:23.750113 17344 solver.cpp:228] Iteration 21800, loss = 0.0850248\nI0818 05:45:23.750156 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:45:23.750180 17344 solver.cpp:244]     Train net output #1: loss = 0.0850249 (* 1 = 0.0850249 loss)\nI0818 05:45:23.839910 17344 sgd_solver.cpp:166] Iteration 21800, lr = 0.0035\nI0818 05:47:40.776083 17344 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0818 05:49:01.199276 17344 solver.cpp:404]     Test net output #0: accuracy = 0.725\nI0818 05:49:01.199543 17344 solver.cpp:404]     Test net output #1: loss = 1.32594 (* 1 = 1.32594 loss)\nI0818 05:49:02.508992 17344 solver.cpp:228] Iteration 21900, loss = 0.0826285\nI0818 05:49:02.509033 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:49:02.509057 17344 solver.cpp:244]     Train net output #1: loss = 0.0826286 (* 1 = 0.0826286 loss)\nI0818 05:49:02.595194 17344 sgd_solver.cpp:166] Iteration 21900, lr = 0.0035\nI0818 05:51:19.517850 17344 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0818 05:52:40.404155 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76892\nI0818 05:52:40.404429 17344 solver.cpp:404]     Test net output #1: loss = 0.898591 (* 1 = 0.898591 loss)\nI0818 05:52:41.716938 17344 solver.cpp:228] Iteration 22000, loss = 0.0767278\nI0818 05:52:41.717000 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:52:41.717018 17344 solver.cpp:244]     Train net output #1: loss = 0.0767278 (* 1 = 0.0767278 loss)\nI0818 05:52:41.810822 17344 sgd_solver.cpp:166] Iteration 22000, lr = 0.0035\nI0818 05:54:58.953654 17344 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0818 05:56:20.380559 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75112\nI0818 05:56:20.380815 17344 solver.cpp:404]     Test net output #1: loss = 1.03138 (* 1 = 1.03138 loss)\nI0818 05:56:21.690428 17344 solver.cpp:228] Iteration 22100, loss = 0.0658927\nI0818 05:56:21.690490 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:56:21.690508 17344 solver.cpp:244]     Train net output #1: loss = 0.0658927 (* 1 = 0.0658927 loss)\nI0818 05:56:21.789746 17344 sgd_solver.cpp:166] Iteration 22100, lr = 0.0035\nI0818 05:58:38.958122 17344 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0818 06:00:00.382021 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61092\nI0818 06:00:00.382305 17344 solver.cpp:404]     Test net output #1: loss = 2.04843 (* 1 = 2.04843 loss)\nI0818 06:00:01.692953 17344 solver.cpp:228] Iteration 22200, loss = 0.0223036\nI0818 06:00:01.693006 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:00:01.693023 17344 solver.cpp:244]     Train net output #1: loss = 0.0223036 (* 1 = 0.0223036 loss)\nI0818 06:00:01.787408 17344 sgd_solver.cpp:166] Iteration 22200, lr = 0.0035\nI0818 06:02:18.954370 17344 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0818 06:03:40.352107 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72052\nI0818 06:03:40.352342 17344 solver.cpp:404]     Test net output #1: loss = 1.36305 (* 1 = 1.36305 loss)\nI0818 06:03:41.661957 17344 solver.cpp:228] Iteration 22300, loss = 0.0663837\nI0818 06:03:41.662016 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:03:41.662034 17344 solver.cpp:244]     Train net output #1: loss = 0.0663838 (* 1 = 0.0663838 loss)\nI0818 06:03:41.752897 17344 sgd_solver.cpp:166] Iteration 22300, lr = 0.0035\nI0818 06:05:58.816265 17344 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0818 06:07:20.220705 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77132\nI0818 06:07:20.220940 17344 solver.cpp:404]     Test net output #1: loss = 1.07374 (* 1 = 1.07374 loss)\nI0818 06:07:21.530372 17344 solver.cpp:228] Iteration 22400, loss = 0.0632703\nI0818 06:07:21.530431 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:07:21.530449 17344 solver.cpp:244]     Train net output #1: loss = 0.0632704 (* 1 = 0.0632704 loss)\nI0818 06:07:21.624191 17344 sgd_solver.cpp:166] Iteration 22400, lr = 0.0035\nI0818 06:09:38.720057 17344 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0818 06:11:00.109431 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7408\nI0818 06:11:00.109671 17344 solver.cpp:404]     Test net output #1: loss = 1.2533 (* 1 = 1.2533 loss)\nI0818 06:11:01.419225 17344 solver.cpp:228] Iteration 22500, loss = 0.0353342\nI0818 06:11:01.419286 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:11:01.419304 17344 solver.cpp:244]     Train net output #1: loss = 0.0353343 (* 1 = 0.0353343 loss)\nI0818 06:11:01.513994 17344 sgd_solver.cpp:166] Iteration 22500, lr = 0.0035\nI0818 06:13:18.480427 17344 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0818 06:14:39.820904 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76544\nI0818 06:14:39.821223 17344 solver.cpp:404]     Test net output #1: loss = 1.05028 (* 1 = 1.05028 loss)\nI0818 06:14:41.130486 17344 solver.cpp:228] Iteration 22600, loss = 0.0230428\nI0818 06:14:41.130545 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:14:41.130563 17344 solver.cpp:244]     Train net output #1: loss = 0.0230429 (* 1 = 0.0230429 loss)\nI0818 06:14:41.226948 17344 sgd_solver.cpp:166] Iteration 22600, lr = 0.0035\nI0818 06:16:58.187685 17344 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0818 06:18:19.553508 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8\nI0818 06:18:19.553755 17344 solver.cpp:404]     Test net output #1: loss = 0.861182 (* 1 = 0.861182 loss)\nI0818 06:18:20.863034 17344 solver.cpp:228] Iteration 22700, loss = 0.059741\nI0818 06:18:20.863093 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:18:20.863111 17344 solver.cpp:244]     Train net output #1: loss = 0.0597412 (* 1 = 0.0597412 loss)\nI0818 06:18:20.960263 17344 sgd_solver.cpp:166] Iteration 22700, lr = 0.0035\nI0818 06:20:38.164919 17344 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0818 06:21:59.518821 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75768\nI0818 06:21:59.519052 17344 solver.cpp:404]     Test net output #1: loss = 1.09827 (* 1 = 1.09827 loss)\nI0818 06:22:00.829957 17344 solver.cpp:228] Iteration 22800, loss = 0.0433538\nI0818 06:22:00.830016 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:22:00.830034 17344 solver.cpp:244]     Train net output #1: loss = 0.0433539 (* 1 = 0.0433539 loss)\nI0818 06:22:00.921928 17344 sgd_solver.cpp:166] Iteration 22800, lr = 0.0035\nI0818 06:24:18.551995 17344 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0818 06:25:39.896800 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67584\nI0818 06:25:39.897038 17344 solver.cpp:404]     Test net output #1: loss = 1.90761 (* 1 = 1.90761 loss)\nI0818 06:25:41.206547 17344 solver.cpp:228] Iteration 22900, loss = 0.114737\nI0818 06:25:41.206607 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 06:25:41.206624 17344 solver.cpp:244]     Train net output #1: loss = 0.114737 (* 1 = 0.114737 loss)\nI0818 06:25:41.301858 17344 sgd_solver.cpp:166] Iteration 22900, lr = 0.0035\nI0818 06:27:59.037392 17344 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0818 06:29:20.382495 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76248\nI0818 06:29:20.382725 17344 solver.cpp:404]     Test net output #1: loss = 1.29808 (* 1 = 1.29808 loss)\nI0818 06:29:21.692155 17344 solver.cpp:228] Iteration 23000, loss = 0.0768599\nI0818 06:29:21.692216 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:29:21.692234 17344 solver.cpp:244]     Train net output #1: loss = 0.0768599 (* 1 = 0.0768599 loss)\nI0818 06:29:21.788183 17344 sgd_solver.cpp:166] Iteration 23000, lr = 0.0035\nI0818 06:31:39.448809 17344 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0818 06:33:00.791684 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74288\nI0818 06:33:00.791960 17344 solver.cpp:404]     Test net output #1: loss = 1.49591 (* 1 = 1.49591 loss)\nI0818 06:33:02.101598 17344 solver.cpp:228] Iteration 23100, loss = 0.117761\nI0818 06:33:02.101655 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:33:02.101672 17344 solver.cpp:244]     Train net output #1: loss = 0.117761 (* 1 = 0.117761 loss)\nI0818 06:33:02.197703 17344 sgd_solver.cpp:166] Iteration 23100, lr = 0.0035\nI0818 06:35:19.906286 17344 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0818 06:36:41.269294 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30828\nI0818 06:36:41.269520 17344 solver.cpp:404]     Test net output #1: loss = 5.6228 (* 1 = 5.6228 loss)\nI0818 06:36:42.579443 17344 solver.cpp:228] Iteration 23200, loss = 0.0575752\nI0818 06:36:42.579502 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:36:42.579519 17344 solver.cpp:244]     Train net output #1: loss = 0.0575753 (* 1 = 0.0575753 loss)\nI0818 06:36:42.669855 17344 sgd_solver.cpp:166] Iteration 23200, lr = 0.0035\nI0818 06:39:00.397085 17344 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0818 06:40:21.764119 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78504\nI0818 06:40:21.764387 17344 solver.cpp:404]     Test net output #1: loss = 0.978406 (* 1 = 0.978406 loss)\nI0818 06:40:23.075405 17344 solver.cpp:228] Iteration 23300, loss = 0.0938122\nI0818 06:40:23.075462 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 06:40:23.075480 17344 solver.cpp:244]     Train net output #1: loss = 0.0938123 (* 1 = 0.0938123 loss)\nI0818 06:40:23.170812 17344 sgd_solver.cpp:166] Iteration 23300, lr = 0.0035\nI0818 06:42:40.799459 17344 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0818 06:44:01.802436 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7348\nI0818 06:44:01.802650 17344 solver.cpp:404]     Test net output #1: loss = 1.2669 (* 1 = 1.2669 loss)\nI0818 06:44:03.115643 17344 solver.cpp:228] Iteration 23400, loss = 0.0660327\nI0818 06:44:03.115701 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:44:03.115717 17344 solver.cpp:244]     Train net output #1: loss = 0.0660328 (* 1 = 0.0660328 loss)\nI0818 06:44:03.205822 17344 sgd_solver.cpp:166] Iteration 23400, lr = 0.0035\nI0818 06:46:20.995127 17344 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0818 06:47:41.970788 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74844\nI0818 06:47:41.971052 17344 solver.cpp:404]     Test net output #1: loss = 1.11196 (* 1 = 1.11196 loss)\nI0818 06:47:43.284129 17344 solver.cpp:228] Iteration 23500, loss = 0.0512543\nI0818 06:47:43.284186 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:47:43.284204 17344 solver.cpp:244]     Train net output #1: loss = 0.0512543 (* 1 = 0.0512543 loss)\nI0818 06:47:43.376325 17344 sgd_solver.cpp:166] Iteration 23500, lr = 0.0035\nI0818 06:50:01.045421 17344 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0818 06:51:22.246527 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66532\nI0818 06:51:22.246819 17344 solver.cpp:404]     Test net output #1: loss = 1.73155 (* 1 = 1.73155 loss)\nI0818 06:51:23.560297 17344 solver.cpp:228] Iteration 23600, loss = 0.0606036\nI0818 06:51:23.560359 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:51:23.560382 17344 solver.cpp:244]     Train net output #1: loss = 0.0606037 (* 1 = 0.0606037 loss)\nI0818 06:51:23.653944 17344 sgd_solver.cpp:166] Iteration 23600, lr = 0.0035\nI0818 06:53:41.366233 17344 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0818 06:55:02.771720 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68344\nI0818 06:55:02.771970 17344 solver.cpp:404]     Test net output #1: loss = 1.63487 (* 1 = 1.63487 loss)\nI0818 06:55:04.084926 17344 solver.cpp:228] Iteration 23700, loss = 0.0458977\nI0818 06:55:04.084983 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:55:04.085001 17344 solver.cpp:244]     Train net output #1: loss = 0.0458978 (* 1 = 0.0458978 loss)\nI0818 06:55:04.180220 17344 sgd_solver.cpp:166] Iteration 23700, lr = 0.0035\nI0818 06:57:21.653626 17344 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0818 06:58:43.051637 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73096\nI0818 06:58:43.051892 17344 solver.cpp:404]     Test net output #1: loss = 1.34843 (* 1 = 1.34843 loss)\nI0818 06:58:44.365209 17344 solver.cpp:228] Iteration 23800, loss = 0.062667\nI0818 06:58:44.365265 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:58:44.365283 17344 solver.cpp:244]     Train net output #1: loss = 0.0626671 (* 1 = 0.0626671 loss)\nI0818 06:58:44.456666 17344 sgd_solver.cpp:166] Iteration 23800, lr = 0.0035\nI0818 07:01:02.014037 17344 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0818 07:02:23.412328 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7092\nI0818 07:02:23.412616 17344 solver.cpp:404]     Test net output #1: loss = 1.56371 (* 1 = 1.56371 loss)\nI0818 07:02:24.724483 17344 solver.cpp:228] Iteration 23900, loss = 0.0340182\nI0818 07:02:24.724539 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:02:24.724556 17344 solver.cpp:244]     Train net output #1: loss = 0.0340182 (* 1 = 0.0340182 loss)\nI0818 07:02:24.820292 17344 sgd_solver.cpp:166] Iteration 23900, lr = 0.0035\nI0818 07:04:42.431313 17344 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0818 07:06:03.524991 17344 solver.cpp:404]     Test net output #0: accuracy = 0.694\nI0818 07:06:03.525244 17344 solver.cpp:404]     Test net output #1: loss = 1.74068 (* 1 = 1.74068 loss)\nI0818 07:06:04.837478 17344 solver.cpp:228] Iteration 24000, loss = 0.028064\nI0818 07:06:04.837533 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:06:04.837551 17344 solver.cpp:244]     Train net output #1: loss = 0.028064 (* 1 = 0.028064 loss)\nI0818 07:06:04.926908 17344 sgd_solver.cpp:166] Iteration 24000, lr = 0.0035\nI0818 07:08:22.498641 17344 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0818 07:09:43.906399 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56092\nI0818 07:09:43.906649 17344 solver.cpp:404]     Test net output #1: loss = 2.81344 (* 1 = 2.81344 loss)\nI0818 07:09:45.219163 17344 solver.cpp:228] Iteration 24100, loss = 0.0430354\nI0818 07:09:45.219218 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:09:45.219235 17344 solver.cpp:244]     Train net output #1: loss = 0.0430355 (* 1 = 0.0430355 loss)\nI0818 07:09:45.310667 17344 sgd_solver.cpp:166] Iteration 24100, lr = 0.0035\nI0818 07:12:02.932200 17344 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0818 07:13:24.331790 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7746\nI0818 07:13:24.332013 17344 solver.cpp:404]     Test net output #1: loss = 1.16664 (* 1 = 1.16664 loss)\nI0818 07:13:25.643599 17344 solver.cpp:228] Iteration 24200, loss = 0.0370249\nI0818 07:13:25.643653 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:13:25.643671 17344 solver.cpp:244]     Train net output #1: loss = 0.0370249 (* 1 = 0.0370249 loss)\nI0818 07:13:25.736518 17344 sgd_solver.cpp:166] Iteration 24200, lr = 0.0035\nI0818 07:15:43.383702 17344 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0818 07:17:04.782965 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56836\nI0818 07:17:04.783210 17344 solver.cpp:404]     Test net output #1: loss = 2.55007 (* 1 = 2.55007 loss)\nI0818 07:17:06.094708 17344 solver.cpp:228] Iteration 24300, loss = 0.0194444\nI0818 07:17:06.094764 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:17:06.094782 17344 solver.cpp:244]     Train net output #1: loss = 0.0194445 (* 1 = 0.0194445 loss)\nI0818 07:17:06.191610 17344 sgd_solver.cpp:166] Iteration 24300, lr = 0.0035\nI0818 07:19:23.922216 17344 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0818 07:20:44.740166 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63952\nI0818 07:20:44.740408 17344 solver.cpp:404]     Test net output #1: loss = 2.14742 (* 1 = 2.14742 loss)\nI0818 07:20:46.051973 17344 solver.cpp:228] Iteration 24400, loss = 0.0540439\nI0818 07:20:46.052027 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:20:46.052045 17344 solver.cpp:244]     Train net output #1: loss = 0.0540439 (* 1 = 0.0540439 loss)\nI0818 07:20:46.145746 17344 sgd_solver.cpp:166] Iteration 24400, lr = 0.0035\nI0818 07:23:03.788138 17344 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0818 07:24:24.685600 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67296\nI0818 07:24:24.685812 17344 solver.cpp:404]     Test net output #1: loss = 1.63804 (* 1 = 1.63804 loss)\nI0818 07:24:25.995723 17344 solver.cpp:228] Iteration 24500, loss = 0.0440582\nI0818 07:24:25.995775 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:24:25.995792 17344 solver.cpp:244]     Train net output #1: loss = 0.0440581 (* 1 = 0.0440581 loss)\nI0818 07:24:26.091861 17344 sgd_solver.cpp:166] Iteration 24500, lr = 0.0035\nI0818 07:26:43.738775 17344 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0818 07:28:04.733218 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56052\nI0818 07:28:04.733449 17344 solver.cpp:404]     Test net output #1: loss = 3.08452 (* 1 = 3.08452 loss)\nI0818 07:28:06.045243 17344 solver.cpp:228] Iteration 24600, loss = 0.0361317\nI0818 07:28:06.045300 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:28:06.045317 17344 solver.cpp:244]     Train net output #1: loss = 0.0361317 (* 1 = 0.0361317 loss)\nI0818 07:28:06.139480 17344 sgd_solver.cpp:166] Iteration 24600, lr = 0.0035\nI0818 07:30:23.804016 17344 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0818 07:31:44.779906 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73852\nI0818 07:31:44.780166 17344 solver.cpp:404]     Test net output #1: loss = 1.2787 (* 1 = 1.2787 loss)\nI0818 07:31:46.092351 17344 solver.cpp:228] Iteration 24700, loss = 0.0761095\nI0818 07:31:46.092404 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:31:46.092422 17344 solver.cpp:244]     Train net output #1: loss = 0.0761095 (* 1 = 0.0761095 loss)\nI0818 07:31:46.187830 17344 sgd_solver.cpp:166] Iteration 24700, lr = 0.0035\nI0818 07:34:03.903676 17344 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0818 07:35:24.757920 17344 solver.cpp:404]     Test net output #0: accuracy = 0.34872\nI0818 07:35:24.758152 17344 solver.cpp:404]     Test net output #1: loss = 10.122 (* 1 = 10.122 loss)\nI0818 07:35:26.069972 17344 solver.cpp:228] Iteration 24800, loss = 0.0627837\nI0818 07:35:26.070026 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:35:26.070042 17344 solver.cpp:244]     Train net output #1: loss = 0.0627837 (* 1 = 0.0627837 loss)\nI0818 07:35:26.164705 17344 sgd_solver.cpp:166] Iteration 24800, lr = 0.0035\nI0818 07:37:43.958258 17344 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0818 07:39:04.778813 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77512\nI0818 07:39:04.779047 17344 solver.cpp:404]     Test net output #1: loss = 1.07346 (* 1 = 1.07346 loss)\nI0818 07:39:06.088462 17344 solver.cpp:228] Iteration 24900, loss = 0.0376526\nI0818 07:39:06.088515 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:39:06.088532 17344 solver.cpp:244]     Train net output #1: loss = 0.0376526 (* 1 = 0.0376526 loss)\nI0818 07:39:06.186826 17344 sgd_solver.cpp:166] Iteration 24900, lr = 0.0035\nI0818 07:41:23.312523 17344 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0818 07:42:44.433614 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7252\nI0818 07:42:44.433905 17344 solver.cpp:404]     Test net output #1: loss = 1.37517 (* 1 = 1.37517 loss)\nI0818 07:42:45.744060 17344 solver.cpp:228] Iteration 25000, loss = 0.0836266\nI0818 07:42:45.744114 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 07:42:45.744132 17344 solver.cpp:244]     Train net output #1: loss = 0.0836266 (* 1 = 0.0836266 loss)\nI0818 07:42:45.839414 17344 sgd_solver.cpp:166] Iteration 25000, lr = 0.0035\nI0818 07:45:02.934113 17344 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0818 07:46:24.327241 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65692\nI0818 07:46:24.327472 17344 solver.cpp:404]     Test net output #1: loss = 1.86147 (* 1 = 1.86147 loss)\nI0818 07:46:25.636423 17344 solver.cpp:228] Iteration 25100, loss = 0.0402846\nI0818 07:46:25.636476 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:46:25.636493 17344 solver.cpp:244]     Train net output #1: loss = 0.0402846 (* 1 = 0.0402846 loss)\nI0818 07:46:25.733541 17344 sgd_solver.cpp:166] Iteration 25100, lr = 0.0035\nI0818 07:48:42.825206 17344 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0818 07:50:04.226932 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62856\nI0818 07:50:04.227169 17344 solver.cpp:404]     Test net output #1: loss = 2.41456 (* 1 = 2.41456 loss)\nI0818 07:50:05.536939 17344 solver.cpp:228] Iteration 25200, loss = 0.034819\nI0818 07:50:05.536996 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:50:05.537014 17344 solver.cpp:244]     Train net output #1: loss = 0.034819 (* 1 = 0.034819 loss)\nI0818 07:50:05.634804 17344 sgd_solver.cpp:166] Iteration 25200, lr = 0.0035\nI0818 07:52:22.680377 17344 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0818 07:53:43.741360 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61364\nI0818 07:53:43.741605 17344 solver.cpp:404]     Test net output #1: loss = 2.62711 (* 1 = 2.62711 loss)\nI0818 07:53:45.051417 17344 solver.cpp:228] Iteration 25300, loss = 0.0570471\nI0818 07:53:45.051471 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:53:45.051488 17344 solver.cpp:244]     Train net output #1: loss = 0.0570471 (* 1 = 0.0570471 loss)\nI0818 07:53:45.140624 17344 sgd_solver.cpp:166] Iteration 25300, lr = 0.0035\nI0818 07:56:02.119043 17344 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0818 07:57:23.514819 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71992\nI0818 07:57:23.515080 17344 solver.cpp:404]     Test net output #1: loss = 1.41634 (* 1 = 1.41634 loss)\nI0818 07:57:24.824276 17344 solver.cpp:228] Iteration 25400, loss = 0.0252541\nI0818 07:57:24.824331 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:57:24.824347 17344 solver.cpp:244]     Train net output #1: loss = 0.0252541 (* 1 = 0.0252541 loss)\nI0818 07:57:24.919461 17344 sgd_solver.cpp:166] Iteration 25400, lr = 0.0035\nI0818 07:59:41.960698 17344 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0818 08:01:03.357007 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10588\nI0818 08:01:03.357275 17344 solver.cpp:404]     Test net output #1: loss = 30.4052 (* 1 = 30.4052 loss)\nI0818 08:01:04.667095 17344 solver.cpp:228] Iteration 25500, loss = 0.0450026\nI0818 08:01:04.667155 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:01:04.667171 17344 solver.cpp:244]     Train net output #1: loss = 0.0450026 (* 1 = 0.0450026 loss)\nI0818 08:01:04.765139 17344 sgd_solver.cpp:166] Iteration 25500, lr = 0.0035\nI0818 08:03:21.821799 17344 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0818 08:04:42.699213 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6612\nI0818 08:04:42.699421 17344 solver.cpp:404]     Test net output #1: loss = 2.12719 (* 1 = 2.12719 loss)\nI0818 08:04:44.008805 17344 solver.cpp:228] Iteration 25600, loss = 0.0619771\nI0818 08:04:44.008862 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:04:44.008879 17344 solver.cpp:244]     Train net output #1: loss = 0.0619771 (* 1 = 0.0619771 loss)\nI0818 08:04:44.106145 17344 sgd_solver.cpp:166] Iteration 25600, lr = 0.0035\nI0818 08:07:01.268106 17344 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0818 08:08:22.121737 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66156\nI0818 08:08:22.121990 17344 solver.cpp:404]     Test net output #1: loss = 2.3111 (* 1 = 2.3111 loss)\nI0818 08:08:23.431210 17344 solver.cpp:228] Iteration 25700, loss = 0.0768102\nI0818 08:08:23.431267 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:08:23.431283 17344 solver.cpp:244]     Train net output #1: loss = 0.0768103 (* 1 = 0.0768103 loss)\nI0818 08:08:23.527416 17344 sgd_solver.cpp:166] Iteration 25700, lr = 0.0035\nI0818 08:10:40.527667 17344 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0818 08:12:01.670020 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75784\nI0818 08:12:01.670271 17344 solver.cpp:404]     Test net output #1: loss = 1.13058 (* 1 = 1.13058 loss)\nI0818 08:12:02.979655 17344 solver.cpp:228] Iteration 25800, loss = 0.0486672\nI0818 08:12:02.979709 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 08:12:02.979725 17344 solver.cpp:244]     Train net output #1: loss = 0.0486672 (* 1 = 0.0486672 loss)\nI0818 08:12:03.070729 17344 sgd_solver.cpp:166] Iteration 25800, lr = 0.0035\nI0818 08:14:20.200583 17344 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0818 08:15:41.385553 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74484\nI0818 08:15:41.385800 17344 solver.cpp:404]     Test net output #1: loss = 1.1986 (* 1 = 1.1986 loss)\nI0818 08:15:42.695538 17344 solver.cpp:228] Iteration 25900, loss = 0.0448313\nI0818 08:15:42.695595 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:15:42.695611 17344 solver.cpp:244]     Train net output #1: loss = 0.0448313 (* 1 = 0.0448313 loss)\nI0818 08:15:42.787564 17344 sgd_solver.cpp:166] Iteration 25900, lr = 0.0035\nI0818 08:17:59.961153 17344 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0818 08:19:21.346035 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68444\nI0818 08:19:21.346284 17344 solver.cpp:404]     Test net output #1: loss = 1.76839 (* 1 = 1.76839 loss)\nI0818 08:19:22.655814 17344 solver.cpp:228] Iteration 26000, loss = 0.0925141\nI0818 08:19:22.655870 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:19:22.655892 17344 solver.cpp:244]     Train net output #1: loss = 0.092514 (* 1 = 0.092514 loss)\nI0818 08:19:22.744561 17344 sgd_solver.cpp:166] Iteration 26000, lr = 0.0035\nI0818 08:21:39.800592 17344 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0818 08:23:01.188139 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71244\nI0818 08:23:01.188396 17344 solver.cpp:404]     Test net output #1: loss = 1.62719 (* 1 = 1.62719 loss)\nI0818 08:23:02.497922 17344 solver.cpp:228] Iteration 26100, loss = 0.0467732\nI0818 08:23:02.497977 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 08:23:02.497994 17344 solver.cpp:244]     Train net output #1: loss = 0.0467731 (* 1 = 0.0467731 loss)\nI0818 08:23:02.591619 17344 sgd_solver.cpp:166] Iteration 26100, lr = 0.0035\nI0818 08:25:19.646013 17344 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0818 08:26:41.057904 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63356\nI0818 08:26:41.058163 17344 solver.cpp:404]     Test net output #1: loss = 2.2159 (* 1 = 2.2159 loss)\nI0818 08:26:42.369524 17344 solver.cpp:228] Iteration 26200, loss = 0.111666\nI0818 08:26:42.369585 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 08:26:42.369611 17344 solver.cpp:244]     Train net output #1: loss = 0.111666 (* 1 = 0.111666 loss)\nI0818 08:26:42.462703 17344 sgd_solver.cpp:166] Iteration 26200, lr = 0.0035\nI0818 08:28:59.987126 17344 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0818 08:30:21.377586 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73188\nI0818 08:30:21.377856 17344 solver.cpp:404]     Test net output #1: loss = 1.41693 (* 1 = 1.41693 loss)\nI0818 08:30:22.687295 17344 solver.cpp:228] Iteration 26300, loss = 0.0719721\nI0818 08:30:22.687348 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:30:22.687366 17344 solver.cpp:244]     Train net output #1: loss = 0.0719721 (* 1 = 0.0719721 loss)\nI0818 08:30:22.785002 17344 sgd_solver.cpp:166] Iteration 26300, lr = 0.0035\nI0818 08:32:40.461256 17344 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0818 08:34:01.650344 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66056\nI0818 08:34:01.650593 17344 solver.cpp:404]     Test net output #1: loss = 1.9968 (* 1 = 1.9968 loss)\nI0818 08:34:02.959944 17344 solver.cpp:228] Iteration 26400, loss = 0.074631\nI0818 08:34:02.959997 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:34:02.960016 17344 solver.cpp:244]     Train net output #1: loss = 0.074631 (* 1 = 0.074631 loss)\nI0818 08:34:03.057760 17344 sgd_solver.cpp:166] Iteration 26400, lr = 0.0035\nI0818 08:36:20.748615 17344 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0818 08:37:42.116006 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7162\nI0818 08:37:42.116261 17344 solver.cpp:404]     Test net output #1: loss = 1.5887 (* 1 = 1.5887 loss)\nI0818 08:37:43.425819 17344 solver.cpp:228] Iteration 26500, loss = 0.0960952\nI0818 08:37:43.425874 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:37:43.425899 17344 solver.cpp:244]     Train net output #1: loss = 0.0960952 (* 1 = 0.0960952 loss)\nI0818 08:37:43.522414 17344 sgd_solver.cpp:166] Iteration 26500, lr = 0.0035\nI0818 08:40:01.274235 17344 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0818 08:41:22.667873 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74748\nI0818 08:41:22.668128 17344 solver.cpp:404]     Test net output #1: loss = 1.15304 (* 1 = 1.15304 loss)\nI0818 08:41:23.978003 17344 solver.cpp:228] Iteration 26600, loss = 0.0192487\nI0818 08:41:23.978056 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:41:23.978072 17344 solver.cpp:244]     Train net output #1: loss = 0.0192487 (* 1 = 0.0192487 loss)\nI0818 08:41:24.069826 17344 sgd_solver.cpp:166] Iteration 26600, lr = 0.0035\nI0818 08:43:41.780977 17344 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0818 08:45:03.103516 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71588\nI0818 08:45:03.103749 17344 solver.cpp:404]     Test net output #1: loss = 1.37584 (* 1 = 1.37584 loss)\nI0818 08:45:04.413091 17344 solver.cpp:228] Iteration 26700, loss = 0.113008\nI0818 08:45:04.413147 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 08:45:04.413164 17344 solver.cpp:244]     Train net output #1: loss = 0.113008 (* 1 = 0.113008 loss)\nI0818 08:45:04.510581 17344 sgd_solver.cpp:166] Iteration 26700, lr = 0.0035\nI0818 08:47:22.180835 17344 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0818 08:48:43.185768 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6988\nI0818 08:48:43.185998 17344 solver.cpp:404]     Test net output #1: loss = 1.62216 (* 1 = 1.62216 loss)\nI0818 08:48:44.495421 17344 solver.cpp:228] Iteration 26800, loss = 0.0189532\nI0818 08:48:44.495476 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:48:44.495493 17344 solver.cpp:244]     Train net output #1: loss = 0.0189532 (* 1 = 0.0189532 loss)\nI0818 08:48:44.592106 17344 sgd_solver.cpp:166] Iteration 26800, lr = 0.0035\nI0818 08:51:02.288494 17344 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0818 08:52:23.681042 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70452\nI0818 08:52:23.681324 17344 solver.cpp:404]     Test net output #1: loss = 1.52715 (* 1 = 1.52715 loss)\nI0818 08:52:24.990573 17344 solver.cpp:228] Iteration 26900, loss = 0.0704744\nI0818 08:52:24.990628 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:52:24.990645 17344 solver.cpp:244]     Train net output #1: loss = 0.0704744 (* 1 = 0.0704744 loss)\nI0818 08:52:25.082779 17344 sgd_solver.cpp:166] Iteration 26900, lr = 0.0035\nI0818 08:54:42.725152 17344 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0818 08:56:04.116008 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68076\nI0818 08:56:04.116263 17344 solver.cpp:404]     Test net output #1: loss = 1.76034 (* 1 = 1.76034 loss)\nI0818 08:56:05.425340 17344 solver.cpp:228] Iteration 27000, loss = 0.0619367\nI0818 08:56:05.425397 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:56:05.425415 17344 solver.cpp:244]     Train net output #1: loss = 0.0619367 (* 1 = 0.0619367 loss)\nI0818 08:56:05.515946 17344 sgd_solver.cpp:166] Iteration 27000, lr = 0.0035\nI0818 08:58:23.217386 17344 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0818 08:59:44.622153 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75436\nI0818 08:59:44.622406 17344 solver.cpp:404]     Test net output #1: loss = 1.1206 (* 1 = 1.1206 loss)\nI0818 08:59:45.931517 17344 solver.cpp:228] Iteration 27100, loss = 0.0529385\nI0818 08:59:45.931571 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:59:45.931589 17344 solver.cpp:244]     Train net output #1: loss = 0.0529385 (* 1 = 0.0529385 loss)\nI0818 08:59:46.028812 17344 sgd_solver.cpp:166] Iteration 27100, lr = 0.0035\nI0818 09:02:03.651047 17344 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0818 09:03:25.055523 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79996\nI0818 09:03:25.055768 17344 solver.cpp:404]     Test net output #1: loss = 0.882441 (* 1 = 0.882441 loss)\nI0818 09:03:26.365279 17344 solver.cpp:228] Iteration 27200, loss = 0.0811951\nI0818 09:03:26.365339 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:03:26.365356 17344 solver.cpp:244]     Train net output #1: loss = 0.0811951 (* 1 = 0.0811951 loss)\nI0818 09:03:26.461190 17344 sgd_solver.cpp:166] Iteration 27200, lr = 0.0035\nI0818 09:05:44.136337 17344 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0818 09:07:05.505405 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71404\nI0818 09:07:05.505661 17344 solver.cpp:404]     Test net output #1: loss = 1.38038 (* 1 = 1.38038 loss)\nI0818 09:07:06.814973 17344 solver.cpp:228] Iteration 27300, loss = 0.0666723\nI0818 09:07:06.815032 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:07:06.815050 17344 solver.cpp:244]     Train net output #1: loss = 0.0666723 (* 1 = 0.0666723 loss)\nI0818 09:07:06.909847 17344 sgd_solver.cpp:166] Iteration 27300, lr = 0.0035\nI0818 09:09:24.584975 17344 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0818 09:10:45.955329 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72452\nI0818 09:10:45.955602 17344 solver.cpp:404]     Test net output #1: loss = 1.38817 (* 1 = 1.38817 loss)\nI0818 09:10:47.264789 17344 solver.cpp:228] Iteration 27400, loss = 0.058769\nI0818 09:10:47.264849 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 09:10:47.264866 17344 solver.cpp:244]     Train net output #1: loss = 0.0587689 (* 1 = 0.0587689 loss)\nI0818 09:10:47.358460 17344 sgd_solver.cpp:166] Iteration 27400, lr = 0.0035\nI0818 09:13:05.038179 17344 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0818 09:14:26.434571 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7624\nI0818 09:14:26.434798 17344 solver.cpp:404]     Test net output #1: loss = 1.11744 (* 1 = 1.11744 loss)\nI0818 09:14:27.744546 17344 solver.cpp:228] Iteration 27500, loss = 0.0777604\nI0818 09:14:27.744606 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 09:14:27.744623 17344 solver.cpp:244]     Train net output #1: loss = 0.0777603 (* 1 = 0.0777603 loss)\nI0818 09:14:27.841985 17344 sgd_solver.cpp:166] Iteration 27500, lr = 0.0035\nI0818 09:16:45.434900 17344 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0818 09:18:06.449004 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61388\nI0818 09:18:06.449255 17344 solver.cpp:404]     Test net output #1: loss = 2.55739 (* 1 = 2.55739 loss)\nI0818 09:18:07.759500 17344 solver.cpp:228] Iteration 27600, loss = 0.0762973\nI0818 09:18:07.759560 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:18:07.759578 17344 solver.cpp:244]     Train net output #1: loss = 0.0762972 (* 1 = 0.0762972 loss)\nI0818 09:18:07.850735 17344 sgd_solver.cpp:166] Iteration 27600, lr = 0.0035\nI0818 09:20:25.504308 17344 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0818 09:21:46.912289 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70848\nI0818 09:21:46.912540 17344 solver.cpp:404]     Test net output #1: loss = 1.40734 (* 1 = 1.40734 loss)\nI0818 09:21:48.222648 17344 solver.cpp:228] Iteration 27700, loss = 0.0860479\nI0818 09:21:48.222709 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:21:48.222728 17344 solver.cpp:244]     Train net output #1: loss = 0.0860479 (* 1 = 0.0860479 loss)\nI0818 09:21:48.312135 17344 sgd_solver.cpp:166] Iteration 27700, lr = 0.0035\nI0818 09:24:06.121042 17344 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0818 09:25:27.178344 17344 solver.cpp:404]     Test net output #0: accuracy = 0.54404\nI0818 09:25:27.178573 17344 solver.cpp:404]     Test net output #1: loss = 2.8932 (* 1 = 2.8932 loss)\nI0818 09:25:28.489212 17344 solver.cpp:228] Iteration 27800, loss = 0.024601\nI0818 09:25:28.489274 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:25:28.489291 17344 solver.cpp:244]     Train net output #1: loss = 0.0246009 (* 1 = 0.0246009 loss)\nI0818 09:25:28.582286 17344 sgd_solver.cpp:166] Iteration 27800, lr = 0.0035\nI0818 09:27:46.158529 17344 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0818 09:29:07.469152 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61648\nI0818 09:29:07.469403 17344 solver.cpp:404]     Test net output #1: loss = 2.48584 (* 1 = 2.48584 loss)\nI0818 09:29:08.780048 17344 solver.cpp:228] Iteration 27900, loss = 0.0616711\nI0818 09:29:08.780109 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:29:08.780125 17344 solver.cpp:244]     Train net output #1: loss = 0.061671 (* 1 = 0.061671 loss)\nI0818 09:29:08.877743 17344 sgd_solver.cpp:166] Iteration 27900, lr = 0.0035\nI0818 09:31:26.583942 17344 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0818 09:32:47.646957 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68228\nI0818 09:32:47.647231 17344 solver.cpp:404]     Test net output #1: loss = 1.76065 (* 1 = 1.76065 loss)\nI0818 09:32:48.957816 17344 solver.cpp:228] Iteration 28000, loss = 0.0228428\nI0818 09:32:48.957877 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:32:48.957898 17344 solver.cpp:244]     Train net output #1: loss = 0.0228427 (* 1 = 0.0228427 loss)\nI0818 09:32:49.049662 17344 sgd_solver.cpp:166] Iteration 28000, lr = 0.0035\nI0818 09:35:06.711848 17344 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0818 09:36:28.126066 17344 solver.cpp:404]     Test net output #0: accuracy = 0.44056\nI0818 09:36:28.126322 17344 solver.cpp:404]     Test net output #1: loss = 5.0403 (* 1 = 5.0403 loss)\nI0818 09:36:29.436807 17344 solver.cpp:228] Iteration 28100, loss = 0.040676\nI0818 09:36:29.436866 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:36:29.436889 17344 solver.cpp:244]     Train net output #1: loss = 0.0406759 (* 1 = 0.0406759 loss)\nI0818 09:36:29.533036 17344 sgd_solver.cpp:166] Iteration 28100, lr = 0.0035\nI0818 09:38:47.142652 17344 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0818 09:40:08.551378 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7282\nI0818 09:40:08.551645 17344 solver.cpp:404]     Test net output #1: loss = 1.11328 (* 1 = 1.11328 loss)\nI0818 09:40:09.862715 17344 solver.cpp:228] Iteration 28200, loss = 0.0350038\nI0818 09:40:09.862776 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:40:09.862793 17344 solver.cpp:244]     Train net output #1: loss = 0.0350037 (* 1 = 0.0350037 loss)\nI0818 09:40:09.964167 17344 sgd_solver.cpp:166] Iteration 28200, lr = 0.0035\nI0818 09:42:27.747593 17344 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0818 09:43:49.148545 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70992\nI0818 09:43:49.148802 17344 solver.cpp:404]     Test net output #1: loss = 1.35353 (* 1 = 1.35353 loss)\nI0818 09:43:50.459677 17344 solver.cpp:228] Iteration 28300, loss = 0.0576086\nI0818 09:43:50.459738 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:43:50.459755 17344 solver.cpp:244]     Train net output #1: loss = 0.0576085 (* 1 = 0.0576085 loss)\nI0818 09:43:50.558094 17344 sgd_solver.cpp:166] Iteration 28300, lr = 0.0035\nI0818 09:46:08.220099 17344 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0818 09:47:29.607983 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56416\nI0818 09:47:29.608234 17344 solver.cpp:404]     Test net output #1: loss = 3.06661 (* 1 = 3.06661 loss)\nI0818 09:47:30.919278 17344 solver.cpp:228] Iteration 28400, loss = 0.112798\nI0818 09:47:30.919339 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 09:47:30.919358 17344 solver.cpp:244]     Train net output #1: loss = 0.112798 (* 1 = 0.112798 loss)\nI0818 09:47:31.014734 17344 sgd_solver.cpp:166] Iteration 28400, lr = 0.0035\nI0818 09:49:48.610792 17344 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0818 09:51:10.001736 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70012\nI0818 09:51:10.001986 17344 solver.cpp:404]     Test net output #1: loss = 1.53178 (* 1 = 1.53178 loss)\nI0818 09:51:11.312677 17344 solver.cpp:228] Iteration 28500, loss = 0.0787548\nI0818 09:51:11.312738 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:51:11.312757 17344 solver.cpp:244]     Train net output #1: loss = 0.0787547 (* 1 = 0.0787547 loss)\nI0818 09:51:11.411981 17344 sgd_solver.cpp:166] Iteration 28500, lr = 0.0035\nI0818 09:53:29.019924 17344 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0818 09:54:50.428563 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57264\nI0818 09:54:50.428813 17344 solver.cpp:404]     Test net output #1: loss = 2.69635 (* 1 = 2.69635 loss)\nI0818 09:54:51.737978 17344 solver.cpp:228] Iteration 28600, loss = 0.0499343\nI0818 09:54:51.738041 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:54:51.738059 17344 solver.cpp:244]     Train net output #1: loss = 0.0499342 (* 1 = 0.0499342 loss)\nI0818 09:54:51.835649 17344 sgd_solver.cpp:166] Iteration 28600, lr = 0.0035\nI0818 09:57:08.921798 17344 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0818 09:58:30.316125 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18616\nI0818 09:58:30.316367 17344 solver.cpp:404]     Test net output #1: loss = 10.5978 (* 1 = 10.5978 loss)\nI0818 09:58:31.627216 17344 solver.cpp:228] Iteration 28700, loss = 0.0196565\nI0818 09:58:31.627276 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 09:58:31.627295 17344 solver.cpp:244]     Train net output #1: loss = 0.0196565 (* 1 = 0.0196565 loss)\nI0818 09:58:31.720367 17344 sgd_solver.cpp:166] Iteration 28700, lr = 0.0035\nI0818 10:00:48.716337 17344 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0818 10:02:09.824440 17344 solver.cpp:404]     Test net output #0: accuracy = 0.52304\nI0818 10:02:09.824689 17344 solver.cpp:404]     Test net output #1: loss = 2.96481 (* 1 = 2.96481 loss)\nI0818 10:02:11.135645 17344 solver.cpp:228] Iteration 28800, loss = 0.0251216\nI0818 10:02:11.135705 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:02:11.135723 17344 solver.cpp:244]     Train net output #1: loss = 0.0251215 (* 1 = 0.0251215 loss)\nI0818 10:02:11.231930 17344 sgd_solver.cpp:166] Iteration 28800, lr = 0.0035\nI0818 10:04:28.209218 17344 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0818 10:05:49.463760 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58108\nI0818 10:05:49.464009 17344 solver.cpp:404]     Test net output #1: loss = 2.20846 (* 1 = 2.20846 loss)\nI0818 10:05:50.774533 17344 solver.cpp:228] Iteration 28900, loss = 0.0918808\nI0818 10:05:50.774593 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:05:50.774610 17344 solver.cpp:244]     Train net output #1: loss = 0.0918807 (* 1 = 0.0918807 loss)\nI0818 10:05:50.866957 17344 sgd_solver.cpp:166] Iteration 28900, lr = 0.0035\nI0818 10:08:07.920569 17344 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0818 10:09:29.309506 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66524\nI0818 10:09:29.309736 17344 solver.cpp:404]     Test net output #1: loss = 1.8457 (* 1 = 1.8457 loss)\nI0818 10:09:30.619865 17344 solver.cpp:228] Iteration 29000, loss = 0.0851289\nI0818 10:09:30.619930 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:09:30.619948 17344 solver.cpp:244]     Train net output #1: loss = 0.0851289 (* 1 = 0.0851289 loss)\nI0818 10:09:30.716336 17344 sgd_solver.cpp:166] Iteration 29000, lr = 0.0035\nI0818 10:11:47.785993 17344 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0818 10:13:09.154736 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67904\nI0818 10:13:09.155005 17344 solver.cpp:404]     Test net output #1: loss = 1.78338 (* 1 = 1.78338 loss)\nI0818 10:13:10.466043 17344 solver.cpp:228] Iteration 29100, loss = 0.0281653\nI0818 10:13:10.466104 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:13:10.466121 17344 solver.cpp:244]     Train net output #1: loss = 0.0281652 (* 1 = 0.0281652 loss)\nI0818 10:13:10.556849 17344 sgd_solver.cpp:166] Iteration 29100, lr = 0.0035\nI0818 10:15:27.631884 17344 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0818 10:16:49.039604 17344 solver.cpp:404]     Test net output #0: accuracy = 0.59528\nI0818 10:16:49.039850 17344 solver.cpp:404]     Test net output #1: loss = 2.46137 (* 1 = 2.46137 loss)\nI0818 10:16:50.349542 17344 solver.cpp:228] Iteration 29200, loss = 0.104851\nI0818 10:16:50.349606 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 10:16:50.349632 17344 solver.cpp:244]     Train net output #1: loss = 0.104851 (* 1 = 0.104851 loss)\nI0818 10:16:50.441786 17344 sgd_solver.cpp:166] Iteration 29200, lr = 0.0035\nI0818 10:19:07.568387 17344 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0818 10:20:28.977993 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71572\nI0818 10:20:28.978267 17344 solver.cpp:404]     Test net output #1: loss = 1.783 (* 1 = 1.783 loss)\nI0818 10:20:30.288858 17344 solver.cpp:228] Iteration 29300, loss = 0.065446\nI0818 10:20:30.288921 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 10:20:30.288946 17344 solver.cpp:244]     Train net output #1: loss = 0.0654459 (* 1 = 0.0654459 loss)\nI0818 10:20:30.379320 17344 sgd_solver.cpp:166] Iteration 29300, lr = 0.0035\nI0818 10:22:47.509379 17344 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0818 10:24:08.919642 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67408\nI0818 10:24:08.919903 17344 solver.cpp:404]     Test net output #1: loss = 1.94426 (* 1 = 1.94426 loss)\nI0818 10:24:10.230805 17344 solver.cpp:228] Iteration 29400, loss = 0.0994219\nI0818 10:24:10.230871 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:24:10.230896 17344 solver.cpp:244]     Train net output #1: loss = 0.0994218 (* 1 = 0.0994218 loss)\nI0818 10:24:10.319001 17344 sgd_solver.cpp:166] Iteration 29400, lr = 0.0035\nI0818 10:26:27.440701 17344 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0818 10:27:48.856787 17344 solver.cpp:404]     Test net output #0: accuracy = 0.785\nI0818 10:27:48.857097 17344 solver.cpp:404]     Test net output #1: loss = 0.936607 (* 1 = 0.936607 loss)\nI0818 10:27:50.167973 17344 solver.cpp:228] Iteration 29500, loss = 0.0414905\nI0818 10:27:50.168037 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:27:50.168063 17344 solver.cpp:244]     Train net output #1: loss = 0.0414904 (* 1 = 0.0414904 loss)\nI0818 10:27:50.263223 17344 sgd_solver.cpp:166] Iteration 29500, lr = 0.0035\nI0818 10:30:07.289974 17344 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0818 10:31:28.692502 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61572\nI0818 10:31:28.692766 17344 solver.cpp:404]     Test net output #1: loss = 2.41933 (* 1 = 2.41933 loss)\nI0818 10:31:30.003494 17344 solver.cpp:228] Iteration 29600, loss = 0.0698557\nI0818 10:31:30.003556 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 10:31:30.003583 17344 solver.cpp:244]     Train net output #1: loss = 0.0698556 (* 1 = 0.0698556 loss)\nI0818 10:31:30.094475 17344 sgd_solver.cpp:166] Iteration 29600, lr = 0.0035\nI0818 10:33:46.768345 17344 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0818 10:35:07.135314 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66528\nI0818 10:35:07.135602 17344 solver.cpp:404]     Test net output #1: loss = 1.92869 (* 1 = 1.92869 loss)\nI0818 10:35:08.441565 17344 solver.cpp:228] Iteration 29700, loss = 0.0398763\nI0818 10:35:08.441606 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 10:35:08.441622 17344 solver.cpp:244]     Train net output #1: loss = 0.0398762 (* 1 = 0.0398762 loss)\nI0818 10:35:08.533887 17344 sgd_solver.cpp:166] Iteration 29700, lr = 0.0035\nI0818 10:37:25.029713 17344 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0818 10:38:45.396744 17344 solver.cpp:404]     Test net output #0: accuracy = 0.1936\nI0818 10:38:45.397045 17344 solver.cpp:404]     Test net output #1: loss = 9.10782 (* 1 = 9.10782 loss)\nI0818 10:38:46.701954 17344 solver.cpp:228] Iteration 29800, loss = 0.0577163\nI0818 10:38:46.701994 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:38:46.702014 17344 solver.cpp:244]     Train net output #1: loss = 0.0577162 (* 1 = 0.0577162 loss)\nI0818 10:38:46.798708 17344 sgd_solver.cpp:166] Iteration 29800, lr = 0.0035\nI0818 10:41:03.422232 17344 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0818 10:42:23.792896 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62728\nI0818 10:42:23.793211 17344 solver.cpp:404]     Test net output #1: loss = 2.50077 (* 1 = 2.50077 loss)\nI0818 10:42:25.098954 17344 solver.cpp:228] Iteration 29900, loss = 0.0492689\nI0818 10:42:25.098995 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:42:25.099016 17344 solver.cpp:244]     Train net output #1: loss = 0.0492688 (* 1 = 0.0492688 loss)\nI0818 10:42:25.191215 17344 sgd_solver.cpp:166] Iteration 29900, lr = 0.0035\nI0818 10:44:41.789299 17344 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0818 10:46:02.164345 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58364\nI0818 10:46:02.164633 17344 solver.cpp:404]     Test net output #1: loss = 2.47889 (* 1 = 2.47889 loss)\nI0818 10:46:03.470294 17344 solver.cpp:228] Iteration 30000, loss = 0.0924617\nI0818 10:46:03.470335 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:46:03.470350 17344 solver.cpp:244]     Train net output #1: loss = 0.0924616 (* 1 = 0.0924616 loss)\nI0818 10:46:03.567919 17344 sgd_solver.cpp:166] Iteration 30000, lr = 0.0035\nI0818 10:48:20.745414 17344 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0818 10:49:41.029783 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69344\nI0818 10:49:41.030083 17344 solver.cpp:404]     Test net output #1: loss = 1.6959 (* 1 = 1.6959 loss)\nI0818 10:49:42.335464 17344 solver.cpp:228] Iteration 30100, loss = 0.0627382\nI0818 10:49:42.335503 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:49:42.335518 17344 solver.cpp:244]     Train net output #1: loss = 0.0627381 (* 1 = 0.0627381 loss)\nI0818 10:49:42.428768 17344 sgd_solver.cpp:166] Iteration 30100, lr = 0.0035\nI0818 10:51:59.536841 17344 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0818 10:53:19.814977 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61464\nI0818 10:53:19.815268 17344 solver.cpp:404]     Test net output #1: loss = 2.48333 (* 1 = 2.48333 loss)\nI0818 10:53:21.120522 17344 solver.cpp:228] Iteration 30200, loss = 0.0473235\nI0818 10:53:21.120563 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:53:21.120578 17344 solver.cpp:244]     Train net output #1: loss = 0.0473234 (* 1 = 0.0473234 loss)\nI0818 10:53:21.212543 17344 sgd_solver.cpp:166] Iteration 30200, lr = 0.0035\nI0818 10:55:38.441361 17344 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0818 10:56:58.720913 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6472\nI0818 10:56:58.721153 17344 solver.cpp:404]     Test net output #1: loss = 2.58762 (* 1 = 2.58762 loss)\nI0818 10:57:00.027433 17344 solver.cpp:228] Iteration 30300, loss = 0.0960592\nI0818 10:57:00.027477 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:57:00.027493 17344 solver.cpp:244]     Train net output #1: loss = 0.0960591 (* 1 = 0.0960591 loss)\nI0818 10:57:00.120769 17344 sgd_solver.cpp:166] Iteration 30300, lr = 0.0035\nI0818 10:59:17.282569 17344 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0818 11:00:37.567454 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71928\nI0818 11:00:37.567704 17344 solver.cpp:404]     Test net output #1: loss = 1.4161 (* 1 = 1.4161 loss)\nI0818 11:00:38.873456 17344 solver.cpp:228] Iteration 30400, loss = 0.0434971\nI0818 11:00:38.873499 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:00:38.873515 17344 solver.cpp:244]     Train net output #1: loss = 0.043497 (* 1 = 0.043497 loss)\nI0818 11:00:38.969743 17344 sgd_solver.cpp:166] Iteration 30400, lr = 0.0035\nI0818 11:02:56.143378 17344 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0818 11:04:16.438884 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12248\nI0818 11:04:16.439165 17344 solver.cpp:404]     Test net output #1: loss = 19.7323 (* 1 = 19.7323 loss)\nI0818 11:04:17.744750 17344 solver.cpp:228] Iteration 30500, loss = 0.106959\nI0818 11:04:17.744789 17344 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 11:04:17.744805 17344 solver.cpp:244]     Train net output #1: loss = 0.106959 (* 1 = 0.106959 loss)\nI0818 11:04:17.834843 17344 sgd_solver.cpp:166] Iteration 30500, lr = 0.0035\nI0818 11:06:34.996042 17344 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0818 11:07:55.262209 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62784\nI0818 11:07:55.262447 17344 solver.cpp:404]     Test net output #1: loss = 2.42647 (* 1 = 2.42647 loss)\nI0818 11:07:56.567579 17344 solver.cpp:228] Iteration 30600, loss = 0.0485625\nI0818 11:07:56.567622 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:07:56.567637 17344 solver.cpp:244]     Train net output #1: loss = 0.0485625 (* 1 = 0.0485625 loss)\nI0818 11:07:56.662523 17344 sgd_solver.cpp:166] Iteration 30600, lr = 0.0035\nI0818 11:10:13.827476 17344 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0818 11:11:34.120086 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74412\nI0818 11:11:34.120340 17344 solver.cpp:404]     Test net output #1: loss = 1.40017 (* 1 = 1.40017 loss)\nI0818 11:11:35.425781 17344 solver.cpp:228] Iteration 30700, loss = 0.0375994\nI0818 11:11:35.425822 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:11:35.425838 17344 solver.cpp:244]     Train net output #1: loss = 0.0375993 (* 1 = 0.0375993 loss)\nI0818 11:11:35.524297 17344 sgd_solver.cpp:166] Iteration 30700, lr = 0.0035\nI0818 11:13:52.700522 17344 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0818 11:15:12.990835 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27352\nI0818 11:15:12.991113 17344 solver.cpp:404]     Test net output #1: loss = 12.5158 (* 1 = 12.5158 loss)\nI0818 11:15:14.295995 17344 solver.cpp:228] Iteration 30800, loss = 0.0182732\nI0818 11:15:14.296041 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:15:14.296056 17344 solver.cpp:244]     Train net output #1: loss = 0.0182732 (* 1 = 0.0182732 loss)\nI0818 11:15:14.391069 17344 sgd_solver.cpp:166] Iteration 30800, lr = 0.0035\nI0818 11:17:31.678783 17344 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0818 11:18:51.958694 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70844\nI0818 11:18:51.958979 17344 solver.cpp:404]     Test net output #1: loss = 1.62987 (* 1 = 1.62987 loss)\nI0818 11:18:53.263938 17344 solver.cpp:228] Iteration 30900, loss = 0.0555664\nI0818 11:18:53.263981 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:18:53.263996 17344 solver.cpp:244]     Train net output #1: loss = 0.0555663 (* 1 = 0.0555663 loss)\nI0818 11:18:53.361294 17344 sgd_solver.cpp:166] Iteration 30900, lr = 0.0035\nI0818 11:21:10.546038 17344 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0818 11:22:30.823065 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74128\nI0818 11:22:30.823336 17344 solver.cpp:404]     Test net output #1: loss = 1.50444 (* 1 = 1.50444 loss)\nI0818 11:22:32.129040 17344 solver.cpp:228] Iteration 31000, loss = 0.00619106\nI0818 11:22:32.129081 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:22:32.129096 17344 solver.cpp:244]     Train net output #1: loss = 0.00619097 (* 1 = 0.00619097 loss)\nI0818 11:22:32.220204 17344 sgd_solver.cpp:166] Iteration 31000, lr = 0.0035\nI0818 11:24:49.453706 17344 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0818 11:26:10.151799 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71608\nI0818 11:26:10.152112 17344 solver.cpp:404]     Test net output #1: loss = 1.43922 (* 1 = 1.43922 loss)\nI0818 11:26:11.462831 17344 solver.cpp:228] Iteration 31100, loss = 0.047237\nI0818 11:26:11.462877 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:26:11.462893 17344 solver.cpp:244]     Train net output #1: loss = 0.0472369 (* 1 = 0.0472369 loss)\nI0818 11:26:11.554785 17344 sgd_solver.cpp:166] Iteration 31100, lr = 0.0035\nI0818 11:28:28.761487 17344 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0818 11:29:49.131649 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77752\nI0818 11:29:49.131894 17344 solver.cpp:404]     Test net output #1: loss = 0.989473 (* 1 = 0.989473 loss)\nI0818 11:29:50.437141 17344 solver.cpp:228] Iteration 31200, loss = 0.0386505\nI0818 11:29:50.437181 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:29:50.437196 17344 solver.cpp:244]     Train net output #1: loss = 0.0386504 (* 1 = 0.0386504 loss)\nI0818 11:29:50.532801 17344 sgd_solver.cpp:166] Iteration 31200, lr = 0.0035\nI0818 11:32:07.651839 17344 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0818 11:33:28.019136 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62544\nI0818 11:33:28.019395 17344 solver.cpp:404]     Test net output #1: loss = 3.22338 (* 1 = 3.22338 loss)\nI0818 11:33:29.324873 17344 solver.cpp:228] Iteration 31300, loss = 0.0908168\nI0818 11:33:29.324913 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 11:33:29.324928 17344 solver.cpp:244]     Train net output #1: loss = 0.0908168 (* 1 = 0.0908168 loss)\nI0818 11:33:29.422040 17344 sgd_solver.cpp:166] Iteration 31300, lr = 0.0035\nI0818 11:35:46.633069 17344 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0818 11:37:07.003034 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71892\nI0818 11:37:07.003307 17344 solver.cpp:404]     Test net output #1: loss = 1.69954 (* 1 = 1.69954 loss)\nI0818 11:37:08.308923 17344 solver.cpp:228] Iteration 31400, loss = 0.0936383\nI0818 11:37:08.308962 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:37:08.308979 17344 solver.cpp:244]     Train net output #1: loss = 0.0936382 (* 1 = 0.0936382 loss)\nI0818 11:37:08.407364 17344 sgd_solver.cpp:166] Iteration 31400, lr = 0.0035\nI0818 11:39:25.813833 17344 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0818 11:40:46.186344 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77352\nI0818 11:40:46.186615 17344 solver.cpp:404]     Test net output #1: loss = 0.951188 (* 1 = 0.951188 loss)\nI0818 11:40:47.494666 17344 solver.cpp:228] Iteration 31500, loss = 0.057934\nI0818 11:40:47.494706 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:40:47.494722 17344 solver.cpp:244]     Train net output #1: loss = 0.057934 (* 1 = 0.057934 loss)\nI0818 11:40:47.587795 17344 sgd_solver.cpp:166] Iteration 31500, lr = 0.0035\nI0818 11:43:05.029299 17344 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0818 11:44:25.399605 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66012\nI0818 11:44:25.399870 17344 solver.cpp:404]     Test net output #1: loss = 1.84674 (* 1 = 1.84674 loss)\nI0818 11:44:26.707219 17344 solver.cpp:228] Iteration 31600, loss = 0.0285729\nI0818 11:44:26.707259 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:44:26.707274 17344 solver.cpp:244]     Train net output #1: loss = 0.0285729 (* 1 = 0.0285729 loss)\nI0818 11:44:26.805626 17344 sgd_solver.cpp:166] Iteration 31600, lr = 0.0035\nI0818 11:46:44.261817 17344 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0818 11:48:04.612169 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72692\nI0818 11:48:04.612404 17344 solver.cpp:404]     Test net output #1: loss = 1.22009 (* 1 = 1.22009 loss)\nI0818 11:48:05.921262 17344 solver.cpp:228] Iteration 31700, loss = 0.0483001\nI0818 11:48:05.921300 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:48:05.921315 17344 solver.cpp:244]     Train net output #1: loss = 0.0483001 (* 1 = 0.0483001 loss)\nI0818 11:48:06.017150 17344 sgd_solver.cpp:166] Iteration 31700, lr = 0.0035\nI0818 11:50:23.436522 17344 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0818 11:51:43.803827 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60104\nI0818 11:51:43.804080 17344 solver.cpp:404]     Test net output #1: loss = 1.95133 (* 1 = 1.95133 loss)\nI0818 11:51:45.112596 17344 solver.cpp:228] Iteration 31800, loss = 0.0518211\nI0818 11:51:45.112637 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:51:45.112651 17344 solver.cpp:244]     Train net output #1: loss = 0.0518211 (* 1 = 0.0518211 loss)\nI0818 11:51:45.205529 17344 sgd_solver.cpp:166] Iteration 31800, lr = 0.0035\nI0818 11:54:02.563513 17344 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0818 11:55:22.928625 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80568\nI0818 11:55:22.928901 17344 solver.cpp:404]     Test net output #1: loss = 0.773307 (* 1 = 0.773307 loss)\nI0818 11:55:24.237254 17344 solver.cpp:228] Iteration 31900, loss = 0.0392508\nI0818 11:55:24.237293 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:55:24.237308 17344 solver.cpp:244]     Train net output #1: loss = 0.0392507 (* 1 = 0.0392507 loss)\nI0818 11:55:24.333218 17344 sgd_solver.cpp:166] Iteration 31900, lr = 0.0035\nI0818 11:57:41.663375 17344 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0818 11:59:02.027269 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76412\nI0818 11:59:02.027544 17344 solver.cpp:404]     Test net output #1: loss = 1.10576 (* 1 = 1.10576 loss)\nI0818 11:59:03.335358 17344 solver.cpp:228] Iteration 32000, loss = 0.0461539\nI0818 11:59:03.335398 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:59:03.335414 17344 solver.cpp:244]     Train net output #1: loss = 0.0461538 (* 1 = 0.0461538 loss)\nI0818 11:59:03.430786 17344 sgd_solver.cpp:166] Iteration 32000, lr = 0.0035\nI0818 12:01:20.828502 17344 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0818 12:02:41.185493 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68412\nI0818 12:02:41.185766 17344 solver.cpp:404]     Test net output #1: loss = 1.55888 (* 1 = 1.55888 loss)\nI0818 12:02:42.494295 17344 solver.cpp:228] Iteration 32100, loss = 0.0805096\nI0818 12:02:42.494334 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:02:42.494349 17344 solver.cpp:244]     Train net output #1: loss = 0.0805095 (* 1 = 0.0805095 loss)\nI0818 12:02:42.583153 17344 sgd_solver.cpp:166] Iteration 32100, lr = 0.0035\nI0818 12:04:59.993803 17344 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0818 12:06:20.361490 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60244\nI0818 12:06:20.361774 17344 solver.cpp:404]     Test net output #1: loss = 2.92676 (* 1 = 2.92676 loss)\nI0818 12:06:21.670665 17344 solver.cpp:228] Iteration 32200, loss = 0.017725\nI0818 12:06:21.670702 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:06:21.670718 17344 solver.cpp:244]     Train net output #1: loss = 0.017725 (* 1 = 0.017725 loss)\nI0818 12:06:21.765847 17344 sgd_solver.cpp:166] Iteration 32200, lr = 0.0035\nI0818 12:08:39.247584 17344 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0818 12:09:59.625731 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66512\nI0818 12:09:59.626006 17344 solver.cpp:404]     Test net output #1: loss = 1.65545 (* 1 = 1.65545 loss)\nI0818 12:10:00.933943 17344 solver.cpp:228] Iteration 32300, loss = 0.0642261\nI0818 12:10:00.933986 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:10:00.934010 17344 solver.cpp:244]     Train net output #1: loss = 0.0642261 (* 1 = 0.0642261 loss)\nI0818 12:10:01.027945 17344 sgd_solver.cpp:166] Iteration 32300, lr = 0.0035\nI0818 12:12:18.389114 17344 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0818 12:13:38.758806 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70372\nI0818 12:13:38.759047 17344 solver.cpp:404]     Test net output #1: loss = 1.56049 (* 1 = 1.56049 loss)\nI0818 12:13:40.067294 17344 solver.cpp:228] Iteration 32400, loss = 0.0498677\nI0818 12:13:40.067337 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:13:40.067361 17344 solver.cpp:244]     Train net output #1: loss = 0.0498676 (* 1 = 0.0498676 loss)\nI0818 12:13:40.155958 17344 sgd_solver.cpp:166] Iteration 32400, lr = 0.0035\nI0818 12:15:57.548411 17344 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0818 12:17:17.927619 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60016\nI0818 12:17:17.927889 17344 solver.cpp:404]     Test net output #1: loss = 2.80202 (* 1 = 2.80202 loss)\nI0818 12:17:19.236315 17344 solver.cpp:228] Iteration 32500, loss = 0.0939004\nI0818 12:17:19.236363 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:17:19.236387 17344 solver.cpp:244]     Train net output #1: loss = 0.0939004 (* 1 = 0.0939004 loss)\nI0818 12:17:19.330694 17344 sgd_solver.cpp:166] Iteration 32500, lr = 0.0035\nI0818 12:19:36.801023 17344 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0818 12:20:57.174149 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8038\nI0818 12:20:57.174422 17344 solver.cpp:404]     Test net output #1: loss = 0.886657 (* 1 = 0.886657 loss)\nI0818 12:20:58.482764 17344 solver.cpp:228] Iteration 32600, loss = 0.0884615\nI0818 12:20:58.482810 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:20:58.482833 17344 solver.cpp:244]     Train net output #1: loss = 0.0884615 (* 1 = 0.0884615 loss)\nI0818 12:20:58.580214 17344 sgd_solver.cpp:166] Iteration 32600, lr = 0.0035\nI0818 12:23:15.969041 17344 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0818 12:24:36.322131 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62308\nI0818 12:24:36.322410 17344 solver.cpp:404]     Test net output #1: loss = 2.12921 (* 1 = 2.12921 loss)\nI0818 12:24:37.631024 17344 solver.cpp:228] Iteration 32700, loss = 0.023137\nI0818 12:24:37.631068 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:24:37.631084 17344 solver.cpp:244]     Train net output #1: loss = 0.023137 (* 1 = 0.023137 loss)\nI0818 12:24:37.722759 17344 sgd_solver.cpp:166] Iteration 32700, lr = 0.0035\nI0818 12:26:55.318261 17344 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0818 12:28:15.681942 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74168\nI0818 12:28:15.682227 17344 solver.cpp:404]     Test net output #1: loss = 1.35041 (* 1 = 1.35041 loss)\nI0818 12:28:16.990646 17344 solver.cpp:228] Iteration 32800, loss = 0.0717323\nI0818 12:28:16.990692 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:28:16.990708 17344 solver.cpp:244]     Train net output #1: loss = 0.0717322 (* 1 = 0.0717322 loss)\nI0818 12:28:17.086410 17344 sgd_solver.cpp:166] Iteration 32800, lr = 0.0035\nI0818 12:30:34.518242 17344 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0818 12:31:54.886404 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73676\nI0818 12:31:54.886656 17344 solver.cpp:404]     Test net output #1: loss = 1.41104 (* 1 = 1.41104 loss)\nI0818 12:31:56.195785 17344 solver.cpp:228] Iteration 32900, loss = 0.0116736\nI0818 12:31:56.195829 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:31:56.195844 17344 solver.cpp:244]     Train net output #1: loss = 0.0116736 (* 1 = 0.0116736 loss)\nI0818 12:31:56.293800 17344 sgd_solver.cpp:166] Iteration 32900, lr = 0.0035\nI0818 12:34:13.641306 17344 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0818 12:35:33.995044 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6896\nI0818 12:35:33.995288 17344 solver.cpp:404]     Test net output #1: loss = 1.56008 (* 1 = 1.56008 loss)\nI0818 12:35:35.303589 17344 solver.cpp:228] Iteration 33000, loss = 0.0428633\nI0818 12:35:35.303632 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:35:35.303647 17344 solver.cpp:244]     Train net output #1: loss = 0.0428633 (* 1 = 0.0428633 loss)\nI0818 12:35:35.401597 17344 sgd_solver.cpp:166] Iteration 33000, lr = 0.0035\nI0818 12:37:52.749480 17344 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0818 12:39:13.103941 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60656\nI0818 12:39:13.104223 17344 solver.cpp:404]     Test net output #1: loss = 2.36908 (* 1 = 2.36908 loss)\nI0818 12:39:14.413065 17344 solver.cpp:228] Iteration 33100, loss = 0.0469682\nI0818 12:39:14.413108 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:39:14.413125 17344 solver.cpp:244]     Train net output #1: loss = 0.0469682 (* 1 = 0.0469682 loss)\nI0818 12:39:14.508255 17344 sgd_solver.cpp:166] Iteration 33100, lr = 0.0035\nI0818 12:41:32.073477 17344 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0818 12:42:52.429241 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7116\nI0818 12:42:52.429519 17344 solver.cpp:404]     Test net output #1: loss = 1.40458 (* 1 = 1.40458 loss)\nI0818 12:42:53.737713 17344 solver.cpp:228] Iteration 33200, loss = 0.0389523\nI0818 12:42:53.737761 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:42:53.737778 17344 solver.cpp:244]     Train net output #1: loss = 0.0389523 (* 1 = 0.0389523 loss)\nI0818 12:42:53.826002 17344 sgd_solver.cpp:166] Iteration 33200, lr = 0.0035\nI0818 12:45:11.261016 17344 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0818 12:46:31.604682 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63692\nI0818 12:46:31.604969 17344 solver.cpp:404]     Test net output #1: loss = 1.97564 (* 1 = 1.97564 loss)\nI0818 12:46:32.913221 17344 solver.cpp:228] Iteration 33300, loss = 0.0424741\nI0818 12:46:32.913265 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:46:32.913281 17344 solver.cpp:244]     Train net output #1: loss = 0.042474 (* 1 = 0.042474 loss)\nI0818 12:46:33.012341 17344 sgd_solver.cpp:166] Iteration 33300, lr = 0.0035\nI0818 12:48:50.481819 17344 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0818 12:50:10.855379 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58072\nI0818 12:50:10.855659 17344 solver.cpp:404]     Test net output #1: loss = 2.76766 (* 1 = 2.76766 loss)\nI0818 12:50:12.162537 17344 solver.cpp:228] Iteration 33400, loss = 0.0329471\nI0818 12:50:12.162580 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:50:12.162596 17344 solver.cpp:244]     Train net output #1: loss = 0.0329471 (* 1 = 0.0329471 loss)\nI0818 12:50:12.261131 17344 sgd_solver.cpp:166] Iteration 33400, lr = 0.0035\nI0818 12:52:29.134770 17344 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0818 12:53:49.480268 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50436\nI0818 12:53:49.480525 17344 solver.cpp:404]     Test net output #1: loss = 3.96424 (* 1 = 3.96424 loss)\nI0818 12:53:50.785956 17344 solver.cpp:228] Iteration 33500, loss = 0.0447069\nI0818 12:53:50.786000 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:53:50.786016 17344 solver.cpp:244]     Train net output #1: loss = 0.0447069 (* 1 = 0.0447069 loss)\nI0818 12:53:50.881610 17344 sgd_solver.cpp:166] Iteration 33500, lr = 0.0035\nI0818 12:56:07.799907 17344 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0818 12:57:28.139772 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29644\nI0818 12:57:28.140049 17344 solver.cpp:404]     Test net output #1: loss = 7.05515 (* 1 = 7.05515 loss)\nI0818 12:57:29.445866 17344 solver.cpp:228] Iteration 33600, loss = 0.0542068\nI0818 12:57:29.445910 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:57:29.445927 17344 solver.cpp:244]     Train net output #1: loss = 0.0542068 (* 1 = 0.0542068 loss)\nI0818 12:57:29.543969 17344 sgd_solver.cpp:166] Iteration 33600, lr = 0.0035\nI0818 12:59:46.423867 17344 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0818 13:01:06.777844 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67212\nI0818 13:01:06.778122 17344 solver.cpp:404]     Test net output #1: loss = 1.66714 (* 1 = 1.66714 loss)\nI0818 13:01:08.083014 17344 solver.cpp:228] Iteration 33700, loss = 0.050895\nI0818 13:01:08.083056 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 13:01:08.083072 17344 solver.cpp:244]     Train net output #1: loss = 0.050895 (* 1 = 0.050895 loss)\nI0818 13:01:08.176338 17344 sgd_solver.cpp:166] Iteration 33700, lr = 0.0035\nI0818 13:03:25.153368 17344 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0818 13:04:45.508687 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81744\nI0818 13:04:45.508971 17344 solver.cpp:404]     Test net output #1: loss = 0.882038 (* 1 = 0.882038 loss)\nI0818 13:04:46.814023 17344 solver.cpp:228] Iteration 33800, loss = 0.0919492\nI0818 13:04:46.814065 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 13:04:46.814081 17344 solver.cpp:244]     Train net output #1: loss = 0.0919492 (* 1 = 0.0919492 loss)\nI0818 13:04:46.912546 17344 sgd_solver.cpp:166] Iteration 33800, lr = 0.0035\nI0818 13:07:03.617677 17344 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0818 13:08:23.862624 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66716\nI0818 13:08:23.862900 17344 solver.cpp:404]     Test net output #1: loss = 1.93447 (* 1 = 1.93447 loss)\nI0818 13:08:25.167480 17344 solver.cpp:228] Iteration 33900, loss = 0.0525425\nI0818 13:08:25.167524 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:08:25.167541 17344 solver.cpp:244]     Train net output #1: loss = 0.0525425 (* 1 = 0.0525425 loss)\nI0818 13:08:25.263757 17344 sgd_solver.cpp:166] Iteration 33900, lr = 0.0035\nI0818 13:10:42.192031 17344 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0818 13:12:02.456900 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66636\nI0818 13:12:02.457175 17344 solver.cpp:404]     Test net output #1: loss = 2.02302 (* 1 = 2.02302 loss)\nI0818 13:12:03.763084 17344 solver.cpp:228] Iteration 34000, loss = 0.0365278\nI0818 13:12:03.763125 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:12:03.763141 17344 solver.cpp:244]     Train net output #1: loss = 0.0365278 (* 1 = 0.0365278 loss)\nI0818 13:12:03.858534 17344 sgd_solver.cpp:166] Iteration 34000, lr = 0.0035\nI0818 13:14:20.776064 17344 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0818 13:15:41.029889 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71584\nI0818 13:15:41.030122 17344 solver.cpp:404]     Test net output #1: loss = 1.5701 (* 1 = 1.5701 loss)\nI0818 13:15:42.335299 17344 solver.cpp:228] Iteration 34100, loss = 0.0170661\nI0818 13:15:42.335343 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:15:42.335364 17344 solver.cpp:244]     Train net output #1: loss = 0.0170661 (* 1 = 0.0170661 loss)\nI0818 13:15:42.427481 17344 sgd_solver.cpp:166] Iteration 34100, lr = 0.0035\nI0818 13:17:59.296105 17344 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0818 13:19:19.563110 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72068\nI0818 13:19:19.563391 17344 solver.cpp:404]     Test net output #1: loss = 1.53986 (* 1 = 1.53986 loss)\nI0818 13:19:20.868486 17344 solver.cpp:228] Iteration 34200, loss = 0.0363724\nI0818 13:19:20.868531 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:19:20.868547 17344 solver.cpp:244]     Train net output #1: loss = 0.0363724 (* 1 = 0.0363724 loss)\nI0818 13:19:20.962585 17344 sgd_solver.cpp:166] Iteration 34200, lr = 0.0035\nI0818 13:21:37.846377 17344 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0818 13:22:58.119825 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6884\nI0818 13:22:58.120103 17344 solver.cpp:404]     Test net output #1: loss = 1.60507 (* 1 = 1.60507 loss)\nI0818 13:22:59.425097 17344 solver.cpp:228] Iteration 34300, loss = 0.0813282\nI0818 13:22:59.425142 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 13:22:59.425158 17344 solver.cpp:244]     Train net output #1: loss = 0.0813282 (* 1 = 0.0813282 loss)\nI0818 13:22:59.519421 17344 sgd_solver.cpp:166] Iteration 34300, lr = 0.0035\nI0818 13:25:16.219612 17344 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0818 13:26:36.489992 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73608\nI0818 13:26:36.490257 17344 solver.cpp:404]     Test net output #1: loss = 1.35072 (* 1 = 1.35072 loss)\nI0818 13:26:37.795066 17344 solver.cpp:228] Iteration 34400, loss = 0.0245617\nI0818 13:26:37.795110 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:26:37.795127 17344 solver.cpp:244]     Train net output #1: loss = 0.0245617 (* 1 = 0.0245617 loss)\nI0818 13:26:37.892379 17344 sgd_solver.cpp:166] Iteration 34400, lr = 0.0035\nI0818 13:28:54.824280 17344 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0818 13:30:15.077664 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69988\nI0818 13:30:15.077970 17344 solver.cpp:404]     Test net output #1: loss = 1.4724 (* 1 = 1.4724 loss)\nI0818 13:30:16.382930 17344 solver.cpp:228] Iteration 34500, loss = 0.0429524\nI0818 13:30:16.382972 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:30:16.382987 17344 solver.cpp:244]     Train net output #1: loss = 0.0429524 (* 1 = 0.0429524 loss)\nI0818 13:30:16.480891 17344 sgd_solver.cpp:166] Iteration 34500, lr = 0.0035\nI0818 13:32:33.514649 17344 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0818 13:33:53.789268 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76984\nI0818 13:33:53.789567 17344 solver.cpp:404]     Test net output #1: loss = 0.931517 (* 1 = 0.931517 loss)\nI0818 13:33:55.095366 17344 solver.cpp:228] Iteration 34600, loss = 0.0783834\nI0818 13:33:55.095408 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 13:33:55.095425 17344 solver.cpp:244]     Train net output #1: loss = 0.0783834 (* 1 = 0.0783834 loss)\nI0818 13:33:55.187455 17344 sgd_solver.cpp:166] Iteration 34600, lr = 0.0035\nI0818 13:36:12.085397 17344 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0818 13:37:32.350421 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65772\nI0818 13:37:32.350723 17344 solver.cpp:404]     Test net output #1: loss = 1.85028 (* 1 = 1.85028 loss)\nI0818 13:37:33.655949 17344 solver.cpp:228] Iteration 34700, loss = 0.102295\nI0818 13:37:33.655993 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 13:37:33.656009 17344 solver.cpp:244]     Train net output #1: loss = 0.102295 (* 1 = 0.102295 loss)\nI0818 13:37:33.750715 17344 sgd_solver.cpp:166] Iteration 34700, lr = 0.0035\nI0818 13:39:50.561921 17344 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0818 13:41:10.927572 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76092\nI0818 13:41:10.927853 17344 solver.cpp:404]     Test net output #1: loss = 1.13551 (* 1 = 1.13551 loss)\nI0818 13:41:12.232945 17344 solver.cpp:228] Iteration 34800, loss = 0.0416173\nI0818 13:41:12.232990 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:41:12.233006 17344 solver.cpp:244]     Train net output #1: loss = 0.0416173 (* 1 = 0.0416173 loss)\nI0818 13:41:12.327090 17344 sgd_solver.cpp:166] Iteration 34800, lr = 0.0035\nI0818 13:43:29.195871 17344 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0818 13:44:49.566184 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67884\nI0818 13:44:49.566484 17344 solver.cpp:404]     Test net output #1: loss = 1.55679 (* 1 = 1.55679 loss)\nI0818 13:44:50.872077 17344 solver.cpp:228] Iteration 34900, loss = 0.0327925\nI0818 13:44:50.872123 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:44:50.872138 17344 solver.cpp:244]     Train net output #1: loss = 0.0327924 (* 1 = 0.0327924 loss)\nI0818 13:44:50.969854 17344 sgd_solver.cpp:166] Iteration 34900, lr = 0.0035\nI0818 13:47:07.826187 17344 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0818 13:48:28.193547 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6916\nI0818 13:48:28.193848 17344 solver.cpp:404]     Test net output #1: loss = 1.7996 (* 1 = 1.7996 loss)\nI0818 13:48:29.498989 17344 solver.cpp:228] Iteration 35000, loss = 0.0434959\nI0818 13:48:29.499033 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:48:29.499050 17344 solver.cpp:244]     Train net output #1: loss = 0.0434959 (* 1 = 0.0434959 loss)\nI0818 13:48:29.593816 17344 sgd_solver.cpp:166] Iteration 35000, lr = 0.0035\nI0818 13:50:46.476325 17344 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0818 13:52:06.842387 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75004\nI0818 13:52:06.842664 17344 solver.cpp:404]     Test net output #1: loss = 1.32398 (* 1 = 1.32398 loss)\nI0818 13:52:08.147773 17344 solver.cpp:228] Iteration 35100, loss = 0.0353472\nI0818 13:52:08.147819 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:52:08.147835 17344 solver.cpp:244]     Train net output #1: loss = 0.0353472 (* 1 = 0.0353472 loss)\nI0818 13:52:08.243589 17344 sgd_solver.cpp:166] Iteration 35100, lr = 0.0035\nI0818 13:54:25.074311 17344 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0818 13:55:45.434839 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7222\nI0818 13:55:45.435137 17344 solver.cpp:404]     Test net output #1: loss = 1.44299 (* 1 = 1.44299 loss)\nI0818 13:55:46.740595 17344 solver.cpp:228] Iteration 35200, loss = 0.0475519\nI0818 13:55:46.740638 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:55:46.740654 17344 solver.cpp:244]     Train net output #1: loss = 0.0475519 (* 1 = 0.0475519 loss)\nI0818 13:55:46.828158 17344 sgd_solver.cpp:166] Iteration 35200, lr = 0.0035\nI0818 13:58:03.731533 17344 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0818 13:59:24.097218 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67856\nI0818 13:59:24.097522 17344 solver.cpp:404]     Test net output #1: loss = 1.60448 (* 1 = 1.60448 loss)\nI0818 13:59:25.402463 17344 solver.cpp:228] Iteration 35300, loss = 0.0739262\nI0818 13:59:25.402505 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:59:25.402523 17344 solver.cpp:244]     Train net output #1: loss = 0.0739261 (* 1 = 0.0739261 loss)\nI0818 13:59:25.496644 17344 sgd_solver.cpp:166] Iteration 35300, lr = 0.0035\nI0818 14:01:42.203872 17344 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0818 14:03:02.570560 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7762\nI0818 14:03:02.570865 17344 solver.cpp:404]     Test net output #1: loss = 0.967214 (* 1 = 0.967214 loss)\nI0818 14:03:03.875921 17344 solver.cpp:228] Iteration 35400, loss = 0.0382709\nI0818 14:03:03.875964 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:03:03.875980 17344 solver.cpp:244]     Train net output #1: loss = 0.0382709 (* 1 = 0.0382709 loss)\nI0818 14:03:03.965260 17344 sgd_solver.cpp:166] Iteration 35400, lr = 0.0035\nI0818 14:05:20.799670 17344 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0818 14:06:41.163733 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56128\nI0818 14:06:41.164039 17344 solver.cpp:404]     Test net output #1: loss = 2.81574 (* 1 = 2.81574 loss)\nI0818 14:06:42.469259 17344 solver.cpp:228] Iteration 35500, loss = 0.0668433\nI0818 14:06:42.469303 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:06:42.469319 17344 solver.cpp:244]     Train net output #1: loss = 0.0668433 (* 1 = 0.0668433 loss)\nI0818 14:06:42.557838 17344 sgd_solver.cpp:166] Iteration 35500, lr = 0.0035\nI0818 14:08:59.278795 17344 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0818 14:10:19.648711 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58924\nI0818 14:10:19.649013 17344 solver.cpp:404]     Test net output #1: loss = 2.64482 (* 1 = 2.64482 loss)\nI0818 14:10:20.954011 17344 solver.cpp:228] Iteration 35600, loss = 0.0222075\nI0818 14:10:20.954053 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:10:20.954069 17344 solver.cpp:244]     Train net output #1: loss = 0.0222075 (* 1 = 0.0222075 loss)\nI0818 14:10:21.049367 17344 sgd_solver.cpp:166] Iteration 35600, lr = 0.0035\nI0818 14:12:37.713610 17344 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0818 14:13:58.074405 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58852\nI0818 14:13:58.074709 17344 solver.cpp:404]     Test net output #1: loss = 2.72579 (* 1 = 2.72579 loss)\nI0818 14:13:59.380368 17344 solver.cpp:228] Iteration 35700, loss = 0.0709653\nI0818 14:13:59.380411 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:13:59.380429 17344 solver.cpp:244]     Train net output #1: loss = 0.0709652 (* 1 = 0.0709652 loss)\nI0818 14:13:59.475208 17344 sgd_solver.cpp:166] Iteration 35700, lr = 0.0035\nI0818 14:16:16.299845 17344 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0818 14:17:36.652585 17344 solver.cpp:404]     Test net output #0: accuracy = 0.15412\nI0818 14:17:36.652885 17344 solver.cpp:404]     Test net output #1: loss = 21.0838 (* 1 = 21.0838 loss)\nI0818 14:17:37.957991 17344 solver.cpp:228] Iteration 35800, loss = 0.0422378\nI0818 14:17:37.958034 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:17:37.958050 17344 solver.cpp:244]     Train net output #1: loss = 0.0422377 (* 1 = 0.0422377 loss)\nI0818 14:17:38.052302 17344 sgd_solver.cpp:166] Iteration 35800, lr = 0.0035\nI0818 14:19:54.839284 17344 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0818 14:21:15.194700 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71524\nI0818 14:21:15.194994 17344 solver.cpp:404]     Test net output #1: loss = 1.6698 (* 1 = 1.6698 loss)\nI0818 14:21:16.500077 17344 solver.cpp:228] Iteration 35900, loss = 0.06345\nI0818 14:21:16.500118 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 14:21:16.500133 17344 solver.cpp:244]     Train net output #1: loss = 0.0634499 (* 1 = 0.0634499 loss)\nI0818 14:21:16.592983 17344 sgd_solver.cpp:166] Iteration 35900, lr = 0.0035\nI0818 14:23:33.235216 17344 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0818 14:24:53.592624 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6438\nI0818 14:24:53.592931 17344 solver.cpp:404]     Test net output #1: loss = 2.43097 (* 1 = 2.43097 loss)\nI0818 14:24:54.898914 17344 solver.cpp:228] Iteration 36000, loss = 0.107162\nI0818 14:24:54.898955 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:24:54.898970 17344 solver.cpp:244]     Train net output #1: loss = 0.107162 (* 1 = 0.107162 loss)\nI0818 14:24:54.992971 17344 sgd_solver.cpp:166] Iteration 36000, lr = 0.0035\nI0818 14:27:11.873312 17344 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0818 14:28:32.287083 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72912\nI0818 14:28:32.287384 17344 solver.cpp:404]     Test net output #1: loss = 1.30945 (* 1 = 1.30945 loss)\nI0818 14:28:33.592348 17344 solver.cpp:228] Iteration 36100, loss = 0.022252\nI0818 14:28:33.592384 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:28:33.592399 17344 solver.cpp:244]     Train net output #1: loss = 0.0222519 (* 1 = 0.0222519 loss)\nI0818 14:28:33.689801 17344 sgd_solver.cpp:166] Iteration 36100, lr = 0.0035\nI0818 14:30:50.535257 17344 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0818 14:32:10.943863 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7466\nI0818 14:32:10.944131 17344 solver.cpp:404]     Test net output #1: loss = 1.35985 (* 1 = 1.35985 loss)\nI0818 14:32:12.249135 17344 solver.cpp:228] Iteration 36200, loss = 0.140886\nI0818 14:32:12.249171 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 14:32:12.249186 17344 solver.cpp:244]     Train net output #1: loss = 0.140886 (* 1 = 0.140886 loss)\nI0818 14:32:12.345820 17344 sgd_solver.cpp:166] Iteration 36200, lr = 0.0035\nI0818 14:34:29.158448 17344 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0818 14:37:55.733436 17344 solver.cpp:404]     Test net output #0: accuracy = 0.16964\nI0818 14:37:55.735008 17344 solver.cpp:404]     Test net output #1: loss = 5.01517 (* 1 = 5.01517 loss)\nI0818 14:37:57.049566 17344 solver.cpp:228] Iteration 36300, loss = 0.0662923\nI0818 14:37:57.049643 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:37:57.049659 17344 solver.cpp:244]     Train net output #1: loss = 0.0662922 (* 1 = 0.0662922 loss)\nI0818 14:37:57.128698 17344 sgd_solver.cpp:166] Iteration 36300, lr = 0.0035\nI0818 14:40:14.039165 17344 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0818 14:41:34.487509 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74732\nI0818 14:41:34.487732 17344 solver.cpp:404]     Test net output #1: loss = 1.23279 (* 1 = 1.23279 loss)\nI0818 14:41:35.794145 17344 solver.cpp:228] Iteration 36400, loss = 0.0536193\nI0818 14:41:35.794184 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:41:35.794199 17344 solver.cpp:244]     Train net output #1: loss = 0.0536192 (* 1 = 0.0536192 loss)\nI0818 14:41:35.885267 17344 sgd_solver.cpp:166] Iteration 36400, lr = 0.0035\nI0818 14:44:06.145933 17344 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0818 14:45:26.595345 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62688\nI0818 14:45:26.595659 17344 solver.cpp:404]     Test net output #1: loss = 2.50753 (* 1 = 2.50753 loss)\nI0818 14:45:27.903223 17344 solver.cpp:228] Iteration 36500, loss = 0.0357574\nI0818 14:45:27.903264 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:45:27.903280 17344 solver.cpp:244]     Train net output #1: loss = 0.0357573 (* 1 = 0.0357573 loss)\nI0818 14:45:27.995461 17344 sgd_solver.cpp:166] Iteration 36500, lr = 0.0035\nI0818 14:47:44.400527 17344 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0818 14:49:05.113993 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65488\nI0818 14:49:05.114276 17344 solver.cpp:404]     Test net output #1: loss = 2.22663 (* 1 = 2.22663 loss)\nI0818 14:49:06.420203 17344 solver.cpp:228] Iteration 36600, loss = 0.0457233\nI0818 14:49:06.420248 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:49:06.420264 17344 solver.cpp:244]     Train net output #1: loss = 0.0457232 (* 1 = 0.0457232 loss)\nI0818 14:49:06.519968 17344 sgd_solver.cpp:166] Iteration 36600, lr = 0.0035\nI0818 14:51:22.946266 17344 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0818 14:52:43.302980 17344 solver.cpp:404]     Test net output #0: accuracy = 0.53948\nI0818 14:52:43.303261 17344 solver.cpp:404]     Test net output #1: loss = 2.92869 (* 1 = 2.92869 loss)\nI0818 14:52:44.609078 17344 solver.cpp:228] Iteration 36700, loss = 0.0772496\nI0818 14:52:44.609120 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:52:44.609136 17344 solver.cpp:244]     Train net output #1: loss = 0.0772495 (* 1 = 0.0772495 loss)\nI0818 14:52:44.703567 17344 sgd_solver.cpp:166] Iteration 36700, lr = 0.0035\nI0818 14:55:01.212174 17344 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0818 14:56:21.573449 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28072\nI0818 14:56:21.573712 17344 solver.cpp:404]     Test net output #1: loss = 11.4686 (* 1 = 11.4686 loss)\nI0818 14:56:22.879364 17344 solver.cpp:228] Iteration 36800, loss = 0.110267\nI0818 14:56:22.879405 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:56:22.879421 17344 solver.cpp:244]     Train net output #1: loss = 0.110267 (* 1 = 0.110267 loss)\nI0818 14:56:22.979015 17344 sgd_solver.cpp:166] Iteration 36800, lr = 0.0035\nI0818 14:58:39.419936 17344 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0818 14:59:59.771322 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70268\nI0818 14:59:59.771581 17344 solver.cpp:404]     Test net output #1: loss = 1.51992 (* 1 = 1.51992 loss)\nI0818 15:00:01.077468 17344 solver.cpp:228] Iteration 36900, loss = 0.024038\nI0818 15:00:01.077510 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:00:01.077527 17344 solver.cpp:244]     Train net output #1: loss = 0.0240379 (* 1 = 0.0240379 loss)\nI0818 15:00:01.170498 17344 sgd_solver.cpp:166] Iteration 36900, lr = 0.0035\nI0818 15:02:17.673626 17344 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0818 15:03:38.020143 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61192\nI0818 15:03:38.020406 17344 solver.cpp:404]     Test net output #1: loss = 2.65323 (* 1 = 2.65323 loss)\nI0818 15:03:39.326094 17344 solver.cpp:228] Iteration 37000, loss = 0.0582392\nI0818 15:03:39.326136 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:03:39.326153 17344 solver.cpp:244]     Train net output #1: loss = 0.0582391 (* 1 = 0.0582391 loss)\nI0818 15:03:39.419332 17344 sgd_solver.cpp:166] Iteration 37000, lr = 0.0035\nI0818 15:05:56.171468 17344 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0818 15:07:16.520651 17344 solver.cpp:404]     Test net output #0: accuracy = 0.53744\nI0818 15:07:16.520941 17344 solver.cpp:404]     Test net output #1: loss = 3.60054 (* 1 = 3.60054 loss)\nI0818 15:07:17.825860 17344 solver.cpp:228] Iteration 37100, loss = 0.0620445\nI0818 15:07:17.825906 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:07:17.825922 17344 solver.cpp:244]     Train net output #1: loss = 0.0620444 (* 1 = 0.0620444 loss)\nI0818 15:07:17.921501 17344 sgd_solver.cpp:166] Iteration 37100, lr = 0.0035\nI0818 15:09:34.788112 17344 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0818 15:10:55.144187 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5916\nI0818 15:10:55.144464 17344 solver.cpp:404]     Test net output #1: loss = 3.13953 (* 1 = 3.13953 loss)\nI0818 15:10:56.449491 17344 solver.cpp:228] Iteration 37200, loss = 0.0324549\nI0818 15:10:56.449534 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:10:56.449553 17344 solver.cpp:244]     Train net output #1: loss = 0.0324549 (* 1 = 0.0324549 loss)\nI0818 15:10:56.542114 17344 sgd_solver.cpp:166] Iteration 37200, lr = 0.0035\nI0818 15:13:13.420514 17344 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0818 15:14:33.764591 17344 solver.cpp:404]     Test net output #0: accuracy = 0.55708\nI0818 15:14:33.764870 17344 solver.cpp:404]     Test net output #1: loss = 3.47879 (* 1 = 3.47879 loss)\nI0818 15:14:35.069911 17344 solver.cpp:228] Iteration 37300, loss = 0.0767268\nI0818 15:14:35.069955 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:14:35.069972 17344 solver.cpp:244]     Train net output #1: loss = 0.0767268 (* 1 = 0.0767268 loss)\nI0818 15:14:35.169040 17344 sgd_solver.cpp:166] Iteration 37300, lr = 0.0035\nI0818 15:16:51.952072 17344 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0818 15:18:12.307229 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71032\nI0818 15:18:12.307484 17344 solver.cpp:404]     Test net output #1: loss = 1.71194 (* 1 = 1.71194 loss)\nI0818 15:18:13.612403 17344 solver.cpp:228] Iteration 37400, loss = 0.0802979\nI0818 15:18:13.612447 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:18:13.612464 17344 solver.cpp:244]     Train net output #1: loss = 0.0802979 (* 1 = 0.0802979 loss)\nI0818 15:18:13.712380 17344 sgd_solver.cpp:166] Iteration 37400, lr = 0.0035\nI0818 15:20:30.513139 17344 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0818 15:21:50.868674 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69468\nI0818 15:21:50.868904 17344 solver.cpp:404]     Test net output #1: loss = 1.62585 (* 1 = 1.62585 loss)\nI0818 15:21:52.173418 17344 solver.cpp:228] Iteration 37500, loss = 0.0963559\nI0818 15:21:52.173460 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:21:52.173475 17344 solver.cpp:244]     Train net output #1: loss = 0.0963559 (* 1 = 0.0963559 loss)\nI0818 15:21:52.273924 17344 sgd_solver.cpp:166] Iteration 37500, lr = 0.0035\nI0818 15:24:09.043659 17344 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0818 15:25:29.301272 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71524\nI0818 15:25:29.301553 17344 solver.cpp:404]     Test net output #1: loss = 1.38436 (* 1 = 1.38436 loss)\nI0818 15:25:30.606621 17344 solver.cpp:228] Iteration 37600, loss = 0.109907\nI0818 15:25:30.606665 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:25:30.606681 17344 solver.cpp:244]     Train net output #1: loss = 0.109907 (* 1 = 0.109907 loss)\nI0818 15:25:30.706609 17344 sgd_solver.cpp:166] Iteration 37600, lr = 0.0035\nI0818 15:27:47.342180 17344 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0818 15:29:07.606429 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68464\nI0818 15:29:07.606709 17344 solver.cpp:404]     Test net output #1: loss = 1.71785 (* 1 = 1.71785 loss)\nI0818 15:29:08.912048 17344 solver.cpp:228] Iteration 37700, loss = 0.0793789\nI0818 15:29:08.912092 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:29:08.912108 17344 solver.cpp:244]     Train net output #1: loss = 0.0793788 (* 1 = 0.0793788 loss)\nI0818 15:29:09.009661 17344 sgd_solver.cpp:166] Iteration 37700, lr = 0.0035\nI0818 15:31:25.772244 17344 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0818 15:32:47.153187 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7256\nI0818 15:32:47.153542 17344 solver.cpp:404]     Test net output #1: loss = 1.46516 (* 1 = 1.46516 loss)\nI0818 15:32:48.463253 17344 solver.cpp:228] Iteration 37800, loss = 0.102486\nI0818 15:32:48.463312 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:32:48.463330 17344 solver.cpp:244]     Train net output #1: loss = 0.102486 (* 1 = 0.102486 loss)\nI0818 15:32:48.559303 17344 sgd_solver.cpp:166] Iteration 37800, lr = 0.0035\nI0818 15:35:06.252110 17344 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0818 15:36:27.673207 17344 solver.cpp:404]     Test net output #0: accuracy = 0.42832\nI0818 15:36:27.673558 17344 solver.cpp:404]     Test net output #1: loss = 7.00863 (* 1 = 7.00863 loss)\nI0818 15:36:28.984098 17344 solver.cpp:228] Iteration 37900, loss = 0.0370332\nI0818 15:36:28.984151 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:36:28.984169 17344 solver.cpp:244]     Train net output #1: loss = 0.0370331 (* 1 = 0.0370331 loss)\nI0818 15:36:29.075326 17344 sgd_solver.cpp:166] Iteration 37900, lr = 0.0035\nI0818 15:38:46.612958 17344 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0818 15:40:08.036324 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71784\nI0818 15:40:08.036638 17344 solver.cpp:404]     Test net output #1: loss = 1.39457 (* 1 = 1.39457 loss)\nI0818 15:40:09.345726 17344 solver.cpp:228] Iteration 38000, loss = 0.0296179\nI0818 15:40:09.345767 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:40:09.345782 17344 solver.cpp:244]     Train net output #1: loss = 0.0296178 (* 1 = 0.0296178 loss)\nI0818 15:40:09.443692 17344 sgd_solver.cpp:166] Iteration 38000, lr = 0.0035\nI0818 15:42:26.953773 17344 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0818 15:43:47.235080 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61844\nI0818 15:43:47.235321 17344 solver.cpp:404]     Test net output #1: loss = 2.41411 (* 1 = 2.41411 loss)\nI0818 15:43:48.541657 17344 solver.cpp:228] Iteration 38100, loss = 0.0259841\nI0818 15:43:48.541704 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:43:48.541729 17344 solver.cpp:244]     Train net output #1: loss = 0.025984 (* 1 = 0.025984 loss)\nI0818 15:43:48.635174 17344 sgd_solver.cpp:166] Iteration 38100, lr = 0.0035\nI0818 15:46:06.143913 17344 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0818 15:47:26.432813 17344 solver.cpp:404]     Test net output #0: accuracy = 0.59444\nI0818 15:47:26.433094 17344 solver.cpp:404]     Test net output #1: loss = 2.83 (* 1 = 2.83 loss)\nI0818 15:47:27.739691 17344 solver.cpp:228] Iteration 38200, loss = 0.114439\nI0818 15:47:27.739740 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:47:27.739764 17344 solver.cpp:244]     Train net output #1: loss = 0.114439 (* 1 = 0.114439 loss)\nI0818 15:47:27.836907 17344 sgd_solver.cpp:166] Iteration 38200, lr = 0.0035\nI0818 15:49:45.271219 17344 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0818 15:51:05.542769 17344 solver.cpp:404]     Test net output #0: accuracy = 0.677\nI0818 15:51:05.543015 17344 solver.cpp:404]     Test net output #1: loss = 2.02424 (* 1 = 2.02424 loss)\nI0818 15:51:06.848848 17344 solver.cpp:228] Iteration 38300, loss = 0.0210495\nI0818 15:51:06.848897 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:51:06.848922 17344 solver.cpp:244]     Train net output #1: loss = 0.0210494 (* 1 = 0.0210494 loss)\nI0818 15:51:06.944311 17344 sgd_solver.cpp:166] Iteration 38300, lr = 0.0035\nI0818 15:53:24.361357 17344 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0818 15:54:44.612594 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74024\nI0818 15:54:44.612840 17344 solver.cpp:404]     Test net output #1: loss = 1.47599 (* 1 = 1.47599 loss)\nI0818 15:54:45.919075 17344 solver.cpp:228] Iteration 38400, loss = 0.0977313\nI0818 15:54:45.919123 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:54:45.919147 17344 solver.cpp:244]     Train net output #1: loss = 0.0977312 (* 1 = 0.0977312 loss)\nI0818 15:54:46.014161 17344 sgd_solver.cpp:166] Iteration 38400, lr = 0.0035\nI0818 15:57:03.353740 17344 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0818 15:58:23.686280 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8464\nI0818 15:58:23.686511 17344 solver.cpp:404]     Test net output #1: loss = 0.631188 (* 1 = 0.631188 loss)\nI0818 15:58:24.992012 17344 solver.cpp:228] Iteration 38500, loss = 0.0370696\nI0818 15:58:24.992058 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:58:24.992081 17344 solver.cpp:244]     Train net output #1: loss = 0.0370695 (* 1 = 0.0370695 loss)\nI0818 15:58:25.094175 17344 sgd_solver.cpp:166] Iteration 38500, lr = 0.0035\nI0818 16:00:42.549080 17344 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0818 16:02:02.922785 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79124\nI0818 16:02:02.923075 17344 solver.cpp:404]     Test net output #1: loss = 0.974481 (* 1 = 0.974481 loss)\nI0818 16:02:04.229727 17344 solver.cpp:228] Iteration 38600, loss = 0.0481379\nI0818 16:02:04.229773 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:02:04.229797 17344 solver.cpp:244]     Train net output #1: loss = 0.0481378 (* 1 = 0.0481378 loss)\nI0818 16:02:04.327642 17344 sgd_solver.cpp:166] Iteration 38600, lr = 0.0035\nI0818 16:04:21.977911 17344 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0818 16:05:43.387158 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65436\nI0818 16:05:43.387490 17344 solver.cpp:404]     Test net output #1: loss = 2.07897 (* 1 = 2.07897 loss)\nI0818 16:05:44.698937 17344 solver.cpp:228] Iteration 38700, loss = 0.00880068\nI0818 16:05:44.698995 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:05:44.699013 17344 solver.cpp:244]     Train net output #1: loss = 0.0088006 (* 1 = 0.0088006 loss)\nI0818 16:05:44.790699 17344 sgd_solver.cpp:166] Iteration 38700, lr = 0.0035\nI0818 16:08:02.357898 17344 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0818 16:09:23.766568 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61024\nI0818 16:09:23.766902 17344 solver.cpp:404]     Test net output #1: loss = 2.51998 (* 1 = 2.51998 loss)\nI0818 16:09:25.079139 17344 solver.cpp:228] Iteration 38800, loss = 0.0683021\nI0818 16:09:25.079183 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:09:25.079200 17344 solver.cpp:244]     Train net output #1: loss = 0.068302 (* 1 = 0.068302 loss)\nI0818 16:09:25.177675 17344 sgd_solver.cpp:166] Iteration 38800, lr = 0.0035\nI0818 16:11:42.837600 17344 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0818 16:13:04.246829 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61524\nI0818 16:13:04.247268 17344 solver.cpp:404]     Test net output #1: loss = 2.27572 (* 1 = 2.27572 loss)\nI0818 16:13:05.558869 17344 solver.cpp:228] Iteration 38900, loss = 0.0291777\nI0818 16:13:05.558928 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:13:05.558946 17344 solver.cpp:244]     Train net output #1: loss = 0.0291777 (* 1 = 0.0291777 loss)\nI0818 16:13:05.646268 17344 sgd_solver.cpp:166] Iteration 38900, lr = 0.0035\nI0818 16:15:23.224877 17344 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0818 16:16:44.641659 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67756\nI0818 16:16:44.641991 17344 solver.cpp:404]     Test net output #1: loss = 1.93529 (* 1 = 1.93529 loss)\nI0818 16:16:45.954977 17344 solver.cpp:228] Iteration 39000, loss = 0.0908171\nI0818 16:16:45.955020 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:16:45.955035 17344 solver.cpp:244]     Train net output #1: loss = 0.090817 (* 1 = 0.090817 loss)\nI0818 16:16:46.047194 17344 sgd_solver.cpp:166] Iteration 39000, lr = 0.0035\nI0818 16:19:03.644904 17344 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0818 16:20:25.064975 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57524\nI0818 16:20:25.065292 17344 solver.cpp:404]     Test net output #1: loss = 3.00207 (* 1 = 3.00207 loss)\nI0818 16:20:26.378101 17344 solver.cpp:228] Iteration 39100, loss = 0.0984987\nI0818 16:20:26.378144 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:20:26.378159 17344 solver.cpp:244]     Train net output #1: loss = 0.0984986 (* 1 = 0.0984986 loss)\nI0818 16:20:26.468860 17344 sgd_solver.cpp:166] Iteration 39100, lr = 0.0035\nI0818 16:22:44.117700 17344 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0818 16:24:05.538257 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65544\nI0818 16:24:05.538585 17344 solver.cpp:404]     Test net output #1: loss = 2.29395 (* 1 = 2.29395 loss)\nI0818 16:24:06.850756 17344 solver.cpp:228] Iteration 39200, loss = 0.108603\nI0818 16:24:06.850800 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:24:06.850816 17344 solver.cpp:244]     Train net output #1: loss = 0.108603 (* 1 = 0.108603 loss)\nI0818 16:24:06.937211 17344 sgd_solver.cpp:166] Iteration 39200, lr = 0.0035\nI0818 16:26:24.479234 17344 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0818 16:27:45.896432 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79544\nI0818 16:27:45.896796 17344 solver.cpp:404]     Test net output #1: loss = 0.936888 (* 1 = 0.936888 loss)\nI0818 16:27:47.209481 17344 solver.cpp:228] Iteration 39300, loss = 0.0684085\nI0818 16:27:47.209524 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:27:47.209540 17344 solver.cpp:244]     Train net output #1: loss = 0.0684084 (* 1 = 0.0684084 loss)\nI0818 16:27:47.300513 17344 sgd_solver.cpp:166] Iteration 39300, lr = 0.0035\nI0818 16:30:04.823637 17344 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0818 16:31:26.240409 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75528\nI0818 16:31:26.240763 17344 solver.cpp:404]     Test net output #1: loss = 1.14087 (* 1 = 1.14087 loss)\nI0818 16:31:27.553982 17344 solver.cpp:228] Iteration 39400, loss = 0.0498753\nI0818 16:31:27.554039 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:31:27.554057 17344 solver.cpp:244]     Train net output #1: loss = 0.0498752 (* 1 = 0.0498752 loss)\nI0818 16:31:27.638448 17344 sgd_solver.cpp:166] Iteration 39400, lr = 0.0035\nI0818 16:33:45.190043 17344 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0818 16:35:06.612062 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76112\nI0818 16:35:06.612416 17344 solver.cpp:404]     Test net output #1: loss = 1.20787 (* 1 = 1.20787 loss)\nI0818 16:35:07.924449 17344 solver.cpp:228] Iteration 39500, loss = 0.0326954\nI0818 16:35:07.924491 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:35:07.924507 17344 solver.cpp:244]     Train net output #1: loss = 0.0326953 (* 1 = 0.0326953 loss)\nI0818 16:35:08.013144 17344 sgd_solver.cpp:166] Iteration 39500, lr = 0.0035\nI0818 16:37:25.620895 17344 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0818 16:38:47.025956 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76284\nI0818 16:38:47.026290 17344 solver.cpp:404]     Test net output #1: loss = 1.12512 (* 1 = 1.12512 loss)\nI0818 16:38:48.337703 17344 solver.cpp:228] Iteration 39600, loss = 0.0240517\nI0818 16:38:48.337744 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:38:48.337760 17344 solver.cpp:244]     Train net output #1: loss = 0.0240515 (* 1 = 0.0240515 loss)\nI0818 16:38:48.428376 17344 sgd_solver.cpp:166] Iteration 39600, lr = 0.0035\nI0818 16:41:06.057924 17344 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0818 16:42:27.455977 17344 solver.cpp:404]     Test net output #0: accuracy = 0.52208\nI0818 16:42:27.456328 17344 solver.cpp:404]     Test net output #1: loss = 3.04837 (* 1 = 3.04837 loss)\nI0818 16:42:28.767940 17344 solver.cpp:228] Iteration 39700, loss = 0.0234638\nI0818 16:42:28.767978 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:42:28.767994 17344 solver.cpp:244]     Train net output #1: loss = 0.0234637 (* 1 = 0.0234637 loss)\nI0818 16:42:28.859336 17344 sgd_solver.cpp:166] Iteration 39700, lr = 0.0035\nI0818 16:44:46.443974 17344 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0818 16:46:07.848363 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56692\nI0818 16:46:07.848696 17344 solver.cpp:404]     Test net output #1: loss = 2.961 (* 1 = 2.961 loss)\nI0818 16:46:09.161386 17344 solver.cpp:228] Iteration 39800, loss = 0.0478387\nI0818 16:46:09.161425 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:46:09.161442 17344 solver.cpp:244]     Train net output #1: loss = 0.0478386 (* 1 = 0.0478386 loss)\nI0818 16:46:09.259707 17344 sgd_solver.cpp:166] Iteration 39800, lr = 0.0035\nI0818 16:48:26.871182 17344 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0818 16:49:48.280246 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57756\nI0818 16:49:48.280596 17344 solver.cpp:404]     Test net output #1: loss = 2.76202 (* 1 = 2.76202 loss)\nI0818 16:49:49.593539 17344 solver.cpp:228] Iteration 39900, loss = 0.0385841\nI0818 16:49:49.593582 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:49:49.593598 17344 solver.cpp:244]     Train net output #1: loss = 0.0385839 (* 1 = 0.0385839 loss)\nI0818 16:49:49.686079 17344 sgd_solver.cpp:166] Iteration 39900, lr = 0.0035\nI0818 16:52:07.234738 17344 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0818 16:53:28.637796 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69428\nI0818 16:53:28.638157 17344 solver.cpp:404]     Test net output #1: loss = 1.44916 (* 1 = 1.44916 loss)\nI0818 16:53:29.950994 17344 solver.cpp:228] Iteration 40000, loss = 0.0240592\nI0818 16:53:29.951035 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:53:29.951052 17344 solver.cpp:244]     Train net output #1: loss = 0.0240591 (* 1 = 0.0240591 loss)\nI0818 16:53:30.038662 17344 sgd_solver.cpp:166] Iteration 40000, lr = 0.0035\nI0818 16:55:47.578840 17344 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0818 16:57:08.981602 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57532\nI0818 16:57:08.981935 17344 solver.cpp:404]     Test net output #1: loss = 2.78759 (* 1 = 2.78759 loss)\nI0818 16:57:10.293550 17344 solver.cpp:228] Iteration 40100, loss = 0.0390437\nI0818 16:57:10.293601 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:57:10.293618 17344 solver.cpp:244]     Train net output #1: loss = 0.0390435 (* 1 = 0.0390435 loss)\nI0818 16:57:10.388656 17344 sgd_solver.cpp:166] Iteration 40100, lr = 0.0035\nI0818 16:59:27.977532 17344 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0818 17:00:49.376332 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71604\nI0818 17:00:49.376648 17344 solver.cpp:404]     Test net output #1: loss = 1.51773 (* 1 = 1.51773 loss)\nI0818 17:00:50.688623 17344 solver.cpp:228] Iteration 40200, loss = 0.0332226\nI0818 17:00:50.688683 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:00:50.688702 17344 solver.cpp:244]     Train net output #1: loss = 0.0332224 (* 1 = 0.0332224 loss)\nI0818 17:00:50.783804 17344 sgd_solver.cpp:166] Iteration 40200, lr = 0.0035\nI0818 17:03:08.412751 17344 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0818 17:04:29.817234 17344 solver.cpp:404]     Test net output #0: accuracy = 0.53884\nI0818 17:04:29.817590 17344 solver.cpp:404]     Test net output #1: loss = 2.97796 (* 1 = 2.97796 loss)\nI0818 17:04:31.128881 17344 solver.cpp:228] Iteration 40300, loss = 0.0521429\nI0818 17:04:31.128934 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:04:31.128952 17344 solver.cpp:244]     Train net output #1: loss = 0.0521427 (* 1 = 0.0521427 loss)\nI0818 17:04:31.220757 17344 sgd_solver.cpp:166] Iteration 40300, lr = 0.0035\nI0818 17:06:48.809370 17344 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0818 17:08:10.223238 17344 solver.cpp:404]     Test net output #0: accuracy = 0.41016\nI0818 17:08:10.223592 17344 solver.cpp:404]     Test net output #1: loss = 6.64853 (* 1 = 6.64853 loss)\nI0818 17:08:11.535784 17344 solver.cpp:228] Iteration 40400, loss = 0.0745062\nI0818 17:08:11.535840 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:08:11.535856 17344 solver.cpp:244]     Train net output #1: loss = 0.074506 (* 1 = 0.074506 loss)\nI0818 17:08:11.636395 17344 sgd_solver.cpp:166] Iteration 40400, lr = 0.0035\nI0818 17:10:29.177366 17344 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0818 17:11:50.664512 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76664\nI0818 17:11:50.664870 17344 solver.cpp:404]     Test net output #1: loss = 1.2455 (* 1 = 1.2455 loss)\nI0818 17:11:51.977917 17344 solver.cpp:228] Iteration 40500, loss = 0.0336771\nI0818 17:11:51.977958 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:11:51.977974 17344 solver.cpp:244]     Train net output #1: loss = 0.0336769 (* 1 = 0.0336769 loss)\nI0818 17:11:52.072340 17344 sgd_solver.cpp:166] Iteration 40500, lr = 0.0035\nI0818 17:14:09.569962 17344 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0818 17:15:31.139174 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69352\nI0818 17:15:31.139505 17344 solver.cpp:404]     Test net output #1: loss = 1.88855 (* 1 = 1.88855 loss)\nI0818 17:15:32.452435 17344 solver.cpp:228] Iteration 40600, loss = 0.0458721\nI0818 17:15:32.452477 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:15:32.452492 17344 solver.cpp:244]     Train net output #1: loss = 0.0458719 (* 1 = 0.0458719 loss)\nI0818 17:15:32.542932 17344 sgd_solver.cpp:166] Iteration 40600, lr = 0.0035\nI0818 17:17:50.107961 17344 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0818 17:19:11.645689 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69896\nI0818 17:19:11.646021 17344 solver.cpp:404]     Test net output #1: loss = 1.96318 (* 1 = 1.96318 loss)\nI0818 17:19:12.958539 17344 solver.cpp:228] Iteration 40700, loss = 0.0253109\nI0818 17:19:12.958578 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:19:12.958593 17344 solver.cpp:244]     Train net output #1: loss = 0.0253107 (* 1 = 0.0253107 loss)\nI0818 17:19:13.052645 17344 sgd_solver.cpp:166] Iteration 40700, lr = 0.0035\nI0818 17:21:30.604851 17344 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0818 17:22:52.061096 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58656\nI0818 17:22:52.061431 17344 solver.cpp:404]     Test net output #1: loss = 2.96003 (* 1 = 2.96003 loss)\nI0818 17:22:53.373674 17344 solver.cpp:228] Iteration 40800, loss = 0.0414398\nI0818 17:22:53.373734 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:22:53.373752 17344 solver.cpp:244]     Train net output #1: loss = 0.0414396 (* 1 = 0.0414396 loss)\nI0818 17:22:53.472530 17344 sgd_solver.cpp:166] Iteration 40800, lr = 0.0035\nI0818 17:25:11.075678 17344 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0818 17:26:32.497397 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65296\nI0818 17:26:32.497746 17344 solver.cpp:404]     Test net output #1: loss = 2.32797 (* 1 = 2.32797 loss)\nI0818 17:26:33.812837 17344 solver.cpp:228] Iteration 40900, loss = 0.0231615\nI0818 17:26:33.812878 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:26:33.812893 17344 solver.cpp:244]     Train net output #1: loss = 0.0231613 (* 1 = 0.0231613 loss)\nI0818 17:26:33.902380 17344 sgd_solver.cpp:166] Iteration 40900, lr = 0.0035\nI0818 17:28:51.545246 17344 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0818 17:30:13.114172 17344 solver.cpp:404]     Test net output #0: accuracy = 0.38748\nI0818 17:30:13.114495 17344 solver.cpp:404]     Test net output #1: loss = 5.5554 (* 1 = 5.5554 loss)\nI0818 17:30:14.428380 17344 solver.cpp:228] Iteration 41000, loss = 0.11459\nI0818 17:30:14.428424 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:30:14.428449 17344 solver.cpp:244]     Train net output #1: loss = 0.11459 (* 1 = 0.11459 loss)\nI0818 17:30:14.518602 17344 sgd_solver.cpp:166] Iteration 41000, lr = 0.0035\nI0818 17:32:32.062700 17344 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0818 17:33:53.651101 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56172\nI0818 17:33:53.651443 17344 solver.cpp:404]     Test net output #1: loss = 2.82081 (* 1 = 2.82081 loss)\nI0818 17:33:54.964572 17344 solver.cpp:228] Iteration 41100, loss = 0.0441672\nI0818 17:33:54.964632 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:33:54.964658 17344 solver.cpp:244]     Train net output #1: loss = 0.0441671 (* 1 = 0.0441671 loss)\nI0818 17:33:55.053706 17344 sgd_solver.cpp:166] Iteration 41100, lr = 0.0035\nI0818 17:36:12.687326 17344 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0818 17:37:34.255336 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58496\nI0818 17:37:34.255693 17344 solver.cpp:404]     Test net output #1: loss = 2.991 (* 1 = 2.991 loss)\nI0818 17:37:35.566323 17344 solver.cpp:228] Iteration 41200, loss = 0.0811262\nI0818 17:37:35.566386 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:37:35.566412 17344 solver.cpp:244]     Train net output #1: loss = 0.081126 (* 1 = 0.081126 loss)\nI0818 17:37:35.654072 17344 sgd_solver.cpp:166] Iteration 41200, lr = 0.0035\nI0818 17:39:53.250157 17344 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0818 17:41:14.851678 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76184\nI0818 17:41:14.852027 17344 solver.cpp:404]     Test net output #1: loss = 1.10386 (* 1 = 1.10386 loss)\nI0818 17:41:16.162925 17344 solver.cpp:228] Iteration 41300, loss = 0.0214375\nI0818 17:41:16.162986 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:41:16.163012 17344 solver.cpp:244]     Train net output #1: loss = 0.0214373 (* 1 = 0.0214373 loss)\nI0818 17:41:16.252717 17344 sgd_solver.cpp:166] Iteration 41300, lr = 0.0035\nI0818 17:43:33.799083 17344 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0818 17:44:55.243078 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37864\nI0818 17:44:55.243402 17344 solver.cpp:404]     Test net output #1: loss = 5.21938 (* 1 = 5.21938 loss)\nI0818 17:44:56.554095 17344 solver.cpp:228] Iteration 41400, loss = 0.0459492\nI0818 17:44:56.554142 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:44:56.554167 17344 solver.cpp:244]     Train net output #1: loss = 0.045949 (* 1 = 0.045949 loss)\nI0818 17:44:56.641255 17344 sgd_solver.cpp:166] Iteration 41400, lr = 0.0035\nI0818 17:47:14.188478 17344 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0818 17:48:35.636580 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56908\nI0818 17:48:35.636926 17344 solver.cpp:404]     Test net output #1: loss = 2.60612 (* 1 = 2.60612 loss)\nI0818 17:48:36.948110 17344 solver.cpp:228] Iteration 41500, loss = 0.017042\nI0818 17:48:36.948173 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:48:36.948197 17344 solver.cpp:244]     Train net output #1: loss = 0.0170418 (* 1 = 0.0170418 loss)\nI0818 17:48:37.035321 17344 sgd_solver.cpp:166] Iteration 41500, lr = 0.0035\nI0818 17:50:54.538974 17344 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0818 17:52:15.973423 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50252\nI0818 17:52:15.973744 17344 solver.cpp:404]     Test net output #1: loss = 4.13694 (* 1 = 4.13694 loss)\nI0818 17:52:17.284077 17344 solver.cpp:228] Iteration 41600, loss = 0.033862\nI0818 17:52:17.284124 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:52:17.284147 17344 solver.cpp:244]     Train net output #1: loss = 0.0338618 (* 1 = 0.0338618 loss)\nI0818 17:52:17.372859 17344 sgd_solver.cpp:166] Iteration 41600, lr = 0.0035\nI0818 17:54:34.891369 17344 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0818 17:55:56.337153 17344 solver.cpp:404]     Test net output #0: accuracy = 0.44148\nI0818 17:55:56.337496 17344 solver.cpp:404]     Test net output #1: loss = 4.94643 (* 1 = 4.94643 loss)\nI0818 17:55:57.647759 17344 solver.cpp:228] Iteration 41700, loss = 0.0297932\nI0818 17:55:57.647805 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:55:57.647828 17344 solver.cpp:244]     Train net output #1: loss = 0.029793 (* 1 = 0.029793 loss)\nI0818 17:55:57.738695 17344 sgd_solver.cpp:166] Iteration 41700, lr = 0.0035\nI0818 17:58:15.330438 17344 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0818 17:59:36.778345 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62492\nI0818 17:59:36.778662 17344 solver.cpp:404]     Test net output #1: loss = 2.04036 (* 1 = 2.04036 loss)\nI0818 17:59:38.089208 17344 solver.cpp:228] Iteration 41800, loss = 0.0547448\nI0818 17:59:38.089254 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:59:38.089278 17344 solver.cpp:244]     Train net output #1: loss = 0.0547446 (* 1 = 0.0547446 loss)\nI0818 17:59:38.177742 17344 sgd_solver.cpp:166] Iteration 41800, lr = 0.0035\nI0818 18:01:55.774214 17344 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0818 18:03:17.212173 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64972\nI0818 18:03:17.212527 17344 solver.cpp:404]     Test net output #1: loss = 1.89311 (* 1 = 1.89311 loss)\nI0818 18:03:18.523553 17344 solver.cpp:228] Iteration 41900, loss = 0.0244569\nI0818 18:03:18.523600 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:03:18.523623 17344 solver.cpp:244]     Train net output #1: loss = 0.0244567 (* 1 = 0.0244567 loss)\nI0818 18:03:18.615902 17344 sgd_solver.cpp:166] Iteration 41900, lr = 0.0035\nI0818 18:05:36.174644 17344 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0818 18:06:57.622357 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4178\nI0818 18:06:57.622675 17344 solver.cpp:404]     Test net output #1: loss = 5.02113 (* 1 = 5.02113 loss)\nI0818 18:06:58.933413 17344 solver.cpp:228] Iteration 42000, loss = 0.146809\nI0818 18:06:58.933460 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:06:58.933483 17344 solver.cpp:244]     Train net output #1: loss = 0.146809 (* 1 = 0.146809 loss)\nI0818 18:06:59.022455 17344 sgd_solver.cpp:166] Iteration 42000, lr = 0.0035\nI0818 18:09:16.611028 17344 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0818 18:10:38.051820 17344 solver.cpp:404]     Test net output #0: accuracy = 0.18876\nI0818 18:10:38.052132 17344 solver.cpp:404]     Test net output #1: loss = 9.85619 (* 1 = 9.85619 loss)\nI0818 18:10:39.362546 17344 solver.cpp:228] Iteration 42100, loss = 0.0359599\nI0818 18:10:39.362594 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:10:39.362618 17344 solver.cpp:244]     Train net output #1: loss = 0.0359597 (* 1 = 0.0359597 loss)\nI0818 18:10:39.453261 17344 sgd_solver.cpp:166] Iteration 42100, lr = 0.0035\nI0818 18:12:57.012903 17344 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0818 18:14:18.466914 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60772\nI0818 18:14:18.467278 17344 solver.cpp:404]     Test net output #1: loss = 2.25766 (* 1 = 2.25766 loss)\nI0818 18:14:19.777595 17344 solver.cpp:228] Iteration 42200, loss = 0.0762137\nI0818 18:14:19.777657 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:14:19.777681 17344 solver.cpp:244]     Train net output #1: loss = 0.0762134 (* 1 = 0.0762134 loss)\nI0818 18:14:19.870331 17344 sgd_solver.cpp:166] Iteration 42200, lr = 0.0035\nI0818 18:16:37.335489 17344 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0818 18:17:58.791635 17344 solver.cpp:404]     Test net output #0: accuracy = 0.2794\nI0818 18:17:58.791949 17344 solver.cpp:404]     Test net output #1: loss = 8.33724 (* 1 = 8.33724 loss)\nI0818 18:18:00.102612 17344 solver.cpp:228] Iteration 42300, loss = 0.0224483\nI0818 18:18:00.102677 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:18:00.102704 17344 solver.cpp:244]     Train net output #1: loss = 0.022448 (* 1 = 0.022448 loss)\nI0818 18:18:00.194598 17344 sgd_solver.cpp:166] Iteration 42300, lr = 0.0035\nI0818 18:20:17.753471 17344 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0818 18:21:39.193948 17344 solver.cpp:404]     Test net output #0: accuracy = 0.59124\nI0818 18:21:39.194283 17344 solver.cpp:404]     Test net output #1: loss = 1.99023 (* 1 = 1.99023 loss)\nI0818 18:21:40.505167 17344 solver.cpp:228] Iteration 42400, loss = 0.0302706\nI0818 18:21:40.505231 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:21:40.505256 17344 solver.cpp:244]     Train net output #1: loss = 0.0302704 (* 1 = 0.0302704 loss)\nI0818 18:21:40.591231 17344 sgd_solver.cpp:166] Iteration 42400, lr = 0.0035\nI0818 18:23:58.140413 17344 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0818 18:25:19.570637 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64104\nI0818 18:25:19.570952 17344 solver.cpp:404]     Test net output #1: loss = 2.04158 (* 1 = 2.04158 loss)\nI0818 18:25:20.881702 17344 solver.cpp:228] Iteration 42500, loss = 0.0492114\nI0818 18:25:20.881767 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:25:20.881795 17344 solver.cpp:244]     Train net output #1: loss = 0.0492111 (* 1 = 0.0492111 loss)\nI0818 18:25:20.970573 17344 sgd_solver.cpp:166] Iteration 42500, lr = 0.0035\nI0818 18:27:38.494431 17344 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0818 18:28:59.931056 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69672\nI0818 18:28:59.931376 17344 solver.cpp:404]     Test net output #1: loss = 1.64205 (* 1 = 1.64205 loss)\nI0818 18:29:01.240777 17344 solver.cpp:228] Iteration 42600, loss = 0.0202027\nI0818 18:29:01.240823 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:29:01.240846 17344 solver.cpp:244]     Train net output #1: loss = 0.0202025 (* 1 = 0.0202025 loss)\nI0818 18:29:01.335846 17344 sgd_solver.cpp:166] Iteration 42600, lr = 0.0035\nI0818 18:31:18.898008 17344 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0818 18:32:40.339830 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30948\nI0818 18:32:40.340148 17344 solver.cpp:404]     Test net output #1: loss = 10.3037 (* 1 = 10.3037 loss)\nI0818 18:32:41.650872 17344 solver.cpp:228] Iteration 42700, loss = 0.0199041\nI0818 18:32:41.650920 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:32:41.650943 17344 solver.cpp:244]     Train net output #1: loss = 0.0199039 (* 1 = 0.0199039 loss)\nI0818 18:32:41.743013 17344 sgd_solver.cpp:166] Iteration 42700, lr = 0.0035\nI0818 18:34:59.434355 17344 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0818 18:36:20.874243 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37164\nI0818 18:36:20.874577 17344 solver.cpp:404]     Test net output #1: loss = 7.13107 (* 1 = 7.13107 loss)\nI0818 18:36:22.185127 17344 solver.cpp:228] Iteration 42800, loss = 0.0502366\nI0818 18:36:22.185189 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:36:22.185215 17344 solver.cpp:244]     Train net output #1: loss = 0.0502364 (* 1 = 0.0502364 loss)\nI0818 18:36:22.280452 17344 sgd_solver.cpp:166] Iteration 42800, lr = 0.0035\nI0818 18:38:39.800195 17344 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0818 18:40:01.284961 17344 solver.cpp:404]     Test net output #0: accuracy = 0.41944\nI0818 18:40:01.285329 17344 solver.cpp:404]     Test net output #1: loss = 4.28586 (* 1 = 4.28586 loss)\nI0818 18:40:02.596014 17344 solver.cpp:228] Iteration 42900, loss = 0.0293188\nI0818 18:40:02.596071 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:40:02.596097 17344 solver.cpp:244]     Train net output #1: loss = 0.0293186 (* 1 = 0.0293186 loss)\nI0818 18:40:02.687117 17344 sgd_solver.cpp:166] Iteration 42900, lr = 0.0035\nI0818 18:42:20.300437 17344 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0818 18:43:41.745030 17344 solver.cpp:404]     Test net output #0: accuracy = 0.46216\nI0818 18:43:41.745376 17344 solver.cpp:404]     Test net output #1: loss = 4.75902 (* 1 = 4.75902 loss)\nI0818 18:43:43.056360 17344 solver.cpp:228] Iteration 43000, loss = 0.107078\nI0818 18:43:43.056422 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:43:43.056448 17344 solver.cpp:244]     Train net output #1: loss = 0.107078 (* 1 = 0.107078 loss)\nI0818 18:43:43.149178 17344 sgd_solver.cpp:166] Iteration 43000, lr = 0.0035\nI0818 18:46:00.690081 17344 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0818 18:47:22.135663 17344 solver.cpp:404]     Test net output #0: accuracy = 0.34876\nI0818 18:47:22.136023 17344 solver.cpp:404]     Test net output #1: loss = 7.28479 (* 1 = 7.28479 loss)\nI0818 18:47:23.446358 17344 solver.cpp:228] Iteration 43100, loss = 0.109885\nI0818 18:47:23.446422 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:47:23.446447 17344 solver.cpp:244]     Train net output #1: loss = 0.109884 (* 1 = 0.109884 loss)\nI0818 18:47:23.536386 17344 sgd_solver.cpp:166] Iteration 43100, lr = 0.0035\nI0818 18:49:41.084041 17344 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0818 18:51:02.661463 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6936\nI0818 18:51:02.661795 17344 solver.cpp:404]     Test net output #1: loss = 1.6357 (* 1 = 1.6357 loss)\nI0818 18:51:03.972268 17344 solver.cpp:228] Iteration 43200, loss = 0.098882\nI0818 18:51:03.972316 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:51:03.972337 17344 solver.cpp:244]     Train net output #1: loss = 0.0988817 (* 1 = 0.0988817 loss)\nI0818 18:51:04.064508 17344 sgd_solver.cpp:166] Iteration 43200, lr = 0.0035\nI0818 18:53:21.686745 17344 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0818 18:54:43.230924 17344 solver.cpp:404]     Test net output #0: accuracy = 0.10488\nI0818 18:54:43.231274 17344 solver.cpp:404]     Test net output #1: loss = 61.4799 (* 1 = 61.4799 loss)\nI0818 18:54:44.542635 17344 solver.cpp:228] Iteration 43300, loss = 0.0358106\nI0818 18:54:44.542682 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:54:44.542706 17344 solver.cpp:244]     Train net output #1: loss = 0.0358104 (* 1 = 0.0358104 loss)\nI0818 18:54:44.636883 17344 sgd_solver.cpp:166] Iteration 43300, lr = 0.0035\nI0818 18:57:01.970386 17344 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0818 18:58:23.437212 17344 solver.cpp:404]     Test net output #0: accuracy = 0.12916\nI0818 18:58:23.437573 17344 solver.cpp:404]     Test net output #1: loss = 9.36725 (* 1 = 9.36725 loss)\nI0818 18:58:24.748291 17344 solver.cpp:228] Iteration 43400, loss = 0.0252544\nI0818 18:58:24.748335 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:58:24.748358 17344 solver.cpp:244]     Train net output #1: loss = 0.0252541 (* 1 = 0.0252541 loss)\nI0818 18:58:24.839772 17344 sgd_solver.cpp:166] Iteration 43400, lr = 0.0035\nI0818 19:00:42.211047 17344 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0818 19:02:03.691433 17344 solver.cpp:404]     Test net output #0: accuracy = 0.55472\nI0818 19:02:03.691751 17344 solver.cpp:404]     Test net output #1: loss = 2.86127 (* 1 = 2.86127 loss)\nI0818 19:02:05.002274 17344 solver.cpp:228] Iteration 43500, loss = 0.0835327\nI0818 19:02:05.002319 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:02:05.002343 17344 solver.cpp:244]     Train net output #1: loss = 0.0835324 (* 1 = 0.0835324 loss)\nI0818 19:02:05.093484 17344 sgd_solver.cpp:166] Iteration 43500, lr = 0.0035\nI0818 19:04:22.408185 17344 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0818 19:05:43.897074 17344 solver.cpp:404]     Test net output #0: accuracy = 0.59612\nI0818 19:05:43.897413 17344 solver.cpp:404]     Test net output #1: loss = 2.89375 (* 1 = 2.89375 loss)\nI0818 19:05:45.207729 17344 solver.cpp:228] Iteration 43600, loss = 0.0517261\nI0818 19:05:45.207788 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:05:45.207814 17344 solver.cpp:244]     Train net output #1: loss = 0.0517258 (* 1 = 0.0517258 loss)\nI0818 19:05:45.292577 17344 sgd_solver.cpp:166] Iteration 43600, lr = 0.0035\nI0818 19:08:02.640761 17344 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0818 19:09:24.201632 17344 solver.cpp:404]     Test net output #0: accuracy = 0.47608\nI0818 19:09:24.201992 17344 solver.cpp:404]     Test net output #1: loss = 3.4396 (* 1 = 3.4396 loss)\nI0818 19:09:25.512310 17344 solver.cpp:228] Iteration 43700, loss = 0.0465094\nI0818 19:09:25.512354 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:09:25.512377 17344 solver.cpp:244]     Train net output #1: loss = 0.0465091 (* 1 = 0.0465091 loss)\nI0818 19:09:25.600020 17344 sgd_solver.cpp:166] Iteration 43700, lr = 0.0035\nI0818 19:11:42.933717 17344 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0818 19:13:04.513811 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71528\nI0818 19:13:04.514143 17344 solver.cpp:404]     Test net output #1: loss = 1.33823 (* 1 = 1.33823 loss)\nI0818 19:13:05.824441 17344 solver.cpp:228] Iteration 43800, loss = 0.0917059\nI0818 19:13:05.824488 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:13:05.824512 17344 solver.cpp:244]     Train net output #1: loss = 0.0917056 (* 1 = 0.0917056 loss)\nI0818 19:13:05.915220 17344 sgd_solver.cpp:166] Iteration 43800, lr = 0.0035\nI0818 19:15:23.203395 17344 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0818 19:16:44.786824 17344 solver.cpp:404]     Test net output #0: accuracy = 0.54848\nI0818 19:16:44.787165 17344 solver.cpp:404]     Test net output #1: loss = 2.82222 (* 1 = 2.82222 loss)\nI0818 19:16:46.097863 17344 solver.cpp:228] Iteration 43900, loss = 0.106878\nI0818 19:16:46.097910 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:16:46.097934 17344 solver.cpp:244]     Train net output #1: loss = 0.106878 (* 1 = 0.106878 loss)\nI0818 19:16:46.189236 17344 sgd_solver.cpp:166] Iteration 43900, lr = 0.0035\nI0818 19:19:03.444093 17344 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0818 19:20:24.964177 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58972\nI0818 19:20:24.964524 17344 solver.cpp:404]     Test net output #1: loss = 2.98564 (* 1 = 2.98564 loss)\nI0818 19:20:26.274739 17344 solver.cpp:228] Iteration 44000, loss = 0.0560745\nI0818 19:20:26.274781 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:20:26.274806 17344 solver.cpp:244]     Train net output #1: loss = 0.0560743 (* 1 = 0.0560743 loss)\nI0818 19:20:26.365363 17344 sgd_solver.cpp:166] Iteration 44000, lr = 0.0035\nI0818 19:22:43.626961 17344 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0818 19:24:05.186029 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6342\nI0818 19:24:05.186405 17344 solver.cpp:404]     Test net output #1: loss = 2.51935 (* 1 = 2.51935 loss)\nI0818 19:24:06.496824 17344 solver.cpp:228] Iteration 44100, loss = 0.120879\nI0818 19:24:06.496877 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:24:06.496901 17344 solver.cpp:244]     Train net output #1: loss = 0.120878 (* 1 = 0.120878 loss)\nI0818 19:24:06.588891 17344 sgd_solver.cpp:166] Iteration 44100, lr = 0.0035\nI0818 19:26:23.901208 17344 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0818 19:27:45.470921 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56492\nI0818 19:27:45.471298 17344 solver.cpp:404]     Test net output #1: loss = 3.01633 (* 1 = 3.01633 loss)\nI0818 19:27:46.781846 17344 solver.cpp:228] Iteration 44200, loss = 0.0282878\nI0818 19:27:46.781899 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:27:46.781924 17344 solver.cpp:244]     Train net output #1: loss = 0.0282876 (* 1 = 0.0282876 loss)\nI0818 19:27:46.871178 17344 sgd_solver.cpp:166] Iteration 44200, lr = 0.0035\nI0818 19:30:04.224449 17344 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0818 19:31:25.743074 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29312\nI0818 19:31:25.743412 17344 solver.cpp:404]     Test net output #1: loss = 8.29263 (* 1 = 8.29263 loss)\nI0818 19:31:27.053984 17344 solver.cpp:228] Iteration 44300, loss = 0.0401031\nI0818 19:31:27.054030 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:31:27.054059 17344 solver.cpp:244]     Train net output #1: loss = 0.0401028 (* 1 = 0.0401028 loss)\nI0818 19:31:27.143920 17344 sgd_solver.cpp:166] Iteration 44300, lr = 0.0035\nI0818 19:33:44.461621 17344 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0818 19:35:06.033473 17344 solver.cpp:404]     Test net output #0: accuracy = 0.24612\nI0818 19:35:06.033825 17344 solver.cpp:404]     Test net output #1: loss = 10.5569 (* 1 = 10.5569 loss)\nI0818 19:35:07.343682 17344 solver.cpp:228] Iteration 44400, loss = 0.0401605\nI0818 19:35:07.343729 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:35:07.343750 17344 solver.cpp:244]     Train net output #1: loss = 0.0401602 (* 1 = 0.0401602 loss)\nI0818 19:35:07.431818 17344 sgd_solver.cpp:166] Iteration 44400, lr = 0.0035\nI0818 19:37:24.711839 17344 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0818 19:38:46.276262 17344 solver.cpp:404]     Test net output #0: accuracy = 0.43996\nI0818 19:38:46.276625 17344 solver.cpp:404]     Test net output #1: loss = 5.49486 (* 1 = 5.49486 loss)\nI0818 19:38:47.587265 17344 solver.cpp:228] Iteration 44500, loss = 0.0694099\nI0818 19:38:47.587321 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:38:47.587344 17344 solver.cpp:244]     Train net output #1: loss = 0.0694096 (* 1 = 0.0694096 loss)\nI0818 19:38:47.682155 17344 sgd_solver.cpp:166] Iteration 44500, lr = 0.0035\nI0818 19:41:05.033143 17344 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0818 19:42:26.576457 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58804\nI0818 19:42:26.576812 17344 solver.cpp:404]     Test net output #1: loss = 2.62871 (* 1 = 2.62871 loss)\nI0818 19:42:27.887848 17344 solver.cpp:228] Iteration 44600, loss = 0.0973968\nI0818 19:42:27.887894 17344 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:42:27.887918 17344 solver.cpp:244]     Train net output #1: loss = 0.0973966 (* 1 = 0.0973966 loss)\nI0818 19:42:27.982148 17344 sgd_solver.cpp:166] Iteration 44600, lr = 0.0035\nI0818 19:44:45.321029 17344 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0818 19:46:06.858974 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5662\nI0818 19:46:06.859340 17344 solver.cpp:404]     Test net output #1: loss = 2.79603 (* 1 = 2.79603 loss)\nI0818 19:46:08.169487 17344 solver.cpp:228] Iteration 44700, loss = 0.0410117\nI0818 19:46:08.169534 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:46:08.169559 17344 solver.cpp:244]     Train net output #1: loss = 0.0410115 (* 1 = 0.0410115 loss)\nI0818 19:46:08.259171 17344 sgd_solver.cpp:166] Iteration 44700, lr = 0.0035\nI0818 19:48:25.607812 17344 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0818 19:49:47.197185 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7158\nI0818 19:49:47.197526 17344 solver.cpp:404]     Test net output #1: loss = 1.26725 (* 1 = 1.26725 loss)\nI0818 19:49:48.508455 17344 solver.cpp:228] Iteration 44800, loss = 0.0381974\nI0818 19:49:48.508519 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:49:48.508546 17344 solver.cpp:244]     Train net output #1: loss = 0.0381972 (* 1 = 0.0381972 loss)\nI0818 19:49:48.595496 17344 sgd_solver.cpp:166] Iteration 44800, lr = 0.0035\nI0818 19:52:05.931187 17344 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0818 19:53:27.393080 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6944\nI0818 19:53:27.393451 17344 solver.cpp:404]     Test net output #1: loss = 1.57443 (* 1 = 1.57443 loss)\nI0818 19:53:28.703388 17344 solver.cpp:228] Iteration 44900, loss = 0.00985603\nI0818 19:53:28.703447 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:53:28.703471 17344 solver.cpp:244]     Train net output #1: loss = 0.00985577 (* 1 = 0.00985577 loss)\nI0818 19:53:28.793709 17344 sgd_solver.cpp:166] Iteration 44900, lr = 0.0035\nI0818 19:55:46.113971 17344 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0818 19:57:07.682018 17344 solver.cpp:404]     Test net output #0: accuracy = 0.623\nI0818 19:57:07.682392 17344 solver.cpp:404]     Test net output #1: loss = 2.49342 (* 1 = 2.49342 loss)\nI0818 19:57:08.993089 17344 solver.cpp:228] Iteration 45000, loss = 0.0667556\nI0818 19:57:08.993144 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:57:08.993167 17344 solver.cpp:244]     Train net output #1: loss = 0.0667554 (* 1 = 0.0667554 loss)\nI0818 19:57:09.079841 17344 sgd_solver.cpp:166] Iteration 45000, lr = 0.0035\nI0818 19:59:26.455623 17344 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0818 20:00:47.967685 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65068\nI0818 20:00:47.968014 17344 solver.cpp:404]     Test net output #1: loss = 2.63732 (* 1 = 2.63732 loss)\nI0818 20:00:49.277194 17344 solver.cpp:228] Iteration 45100, loss = 0.0463151\nI0818 20:00:49.277236 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:00:49.277252 17344 solver.cpp:244]     Train net output #1: loss = 0.0463148 (* 1 = 0.0463148 loss)\nI0818 20:00:49.364080 17344 sgd_solver.cpp:166] Iteration 45100, lr = 0.0035\nI0818 20:03:06.785012 17344 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0818 20:04:28.227326 17344 solver.cpp:404]     Test net output #0: accuracy = 0.21256\nI0818 20:04:28.227663 17344 solver.cpp:404]     Test net output #1: loss = 12.0649 (* 1 = 12.0649 loss)\nI0818 20:04:29.537598 17344 solver.cpp:228] Iteration 45200, loss = 0.0128398\nI0818 20:04:29.537638 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:04:29.537654 17344 solver.cpp:244]     Train net output #1: loss = 0.0128395 (* 1 = 0.0128395 loss)\nI0818 20:04:29.623412 17344 sgd_solver.cpp:166] Iteration 45200, lr = 0.0035\nI0818 20:06:46.965785 17344 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0818 20:08:08.396328 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76048\nI0818 20:08:08.396661 17344 solver.cpp:404]     Test net output #1: loss = 1.12412 (* 1 = 1.12412 loss)\nI0818 20:08:09.705759 17344 solver.cpp:228] Iteration 45300, loss = 0.0208988\nI0818 20:08:09.705811 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:08:09.705828 17344 solver.cpp:244]     Train net output #1: loss = 0.0208986 (* 1 = 0.0208986 loss)\nI0818 20:08:09.799688 17344 sgd_solver.cpp:166] Iteration 45300, lr = 0.0035\nI0818 20:10:27.115106 17344 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0818 20:11:48.552366 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77264\nI0818 20:11:48.552728 17344 solver.cpp:404]     Test net output #1: loss = 1.1227 (* 1 = 1.1227 loss)\nI0818 20:11:49.862476 17344 solver.cpp:228] Iteration 45400, loss = 0.0817152\nI0818 20:11:49.862516 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:11:49.862532 17344 solver.cpp:244]     Train net output #1: loss = 0.081715 (* 1 = 0.081715 loss)\nI0818 20:11:49.952949 17344 sgd_solver.cpp:166] Iteration 45400, lr = 0.0035\nI0818 20:14:07.200821 17344 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0818 20:15:28.624910 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57828\nI0818 20:15:28.625272 17344 solver.cpp:404]     Test net output #1: loss = 2.78794 (* 1 = 2.78794 loss)\nI0818 20:15:29.934335 17344 solver.cpp:228] Iteration 45500, loss = 0.0518503\nI0818 20:15:29.934376 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:15:29.934392 17344 solver.cpp:244]     Train net output #1: loss = 0.0518502 (* 1 = 0.0518502 loss)\nI0818 20:15:30.025496 17344 sgd_solver.cpp:166] Iteration 45500, lr = 0.0035\nI0818 20:17:47.345052 17344 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0818 20:19:08.784752 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73964\nI0818 20:19:08.785096 17344 solver.cpp:404]     Test net output #1: loss = 1.30767 (* 1 = 1.30767 loss)\nI0818 20:19:10.094949 17344 solver.cpp:228] Iteration 45600, loss = 0.0267271\nI0818 20:19:10.095000 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:19:10.095017 17344 solver.cpp:244]     Train net output #1: loss = 0.0267269 (* 1 = 0.0267269 loss)\nI0818 20:19:10.178902 17344 sgd_solver.cpp:166] Iteration 45600, lr = 0.0035\nI0818 20:21:27.407640 17344 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0818 20:22:48.827927 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67756\nI0818 20:22:48.828279 17344 solver.cpp:404]     Test net output #1: loss = 1.5418 (* 1 = 1.5418 loss)\nI0818 20:22:50.138805 17344 solver.cpp:228] Iteration 45700, loss = 0.0427926\nI0818 20:22:50.138850 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:22:50.138867 17344 solver.cpp:244]     Train net output #1: loss = 0.0427924 (* 1 = 0.0427924 loss)\nI0818 20:22:50.225184 17344 sgd_solver.cpp:166] Iteration 45700, lr = 0.0035\nI0818 20:25:07.343013 17344 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0818 20:26:27.629741 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71184\nI0818 20:26:27.630039 17344 solver.cpp:404]     Test net output #1: loss = 1.36866 (* 1 = 1.36866 loss)\nI0818 20:26:28.934550 17344 solver.cpp:228] Iteration 45800, loss = 0.034285\nI0818 20:26:28.934592 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:26:28.934608 17344 solver.cpp:244]     Train net output #1: loss = 0.0342848 (* 1 = 0.0342848 loss)\nI0818 20:26:29.033773 17344 sgd_solver.cpp:166] Iteration 45800, lr = 0.0035\nI0818 20:28:46.119042 17344 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0818 20:30:06.392349 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71044\nI0818 20:30:06.392611 17344 solver.cpp:404]     Test net output #1: loss = 1.39763 (* 1 = 1.39763 loss)\nI0818 20:30:07.698004 17344 solver.cpp:228] Iteration 45900, loss = 0.0170195\nI0818 20:30:07.698048 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:30:07.698065 17344 solver.cpp:244]     Train net output #1: loss = 0.0170193 (* 1 = 0.0170193 loss)\nI0818 20:30:07.793443 17344 sgd_solver.cpp:166] Iteration 45900, lr = 0.0035\nI0818 20:32:24.899102 17344 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0818 20:33:45.276890 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69716\nI0818 20:33:45.277144 17344 solver.cpp:404]     Test net output #1: loss = 1.54224 (* 1 = 1.54224 loss)\nI0818 20:33:46.582120 17344 solver.cpp:228] Iteration 46000, loss = 0.0452964\nI0818 20:33:46.582166 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:33:46.582182 17344 solver.cpp:244]     Train net output #1: loss = 0.0452962 (* 1 = 0.0452962 loss)\nI0818 20:33:46.680012 17344 sgd_solver.cpp:166] Iteration 46000, lr = 0.0035\nI0818 20:36:03.704494 17344 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0818 20:37:24.082412 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57112\nI0818 20:37:24.082684 17344 solver.cpp:404]     Test net output #1: loss = 3.00268 (* 1 = 3.00268 loss)\nI0818 20:37:25.388519 17344 solver.cpp:228] Iteration 46100, loss = 0.0561425\nI0818 20:37:25.388562 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:37:25.388578 17344 solver.cpp:244]     Train net output #1: loss = 0.0561423 (* 1 = 0.0561423 loss)\nI0818 20:37:25.483052 17344 sgd_solver.cpp:166] Iteration 46100, lr = 0.0035\nI0818 20:39:42.615736 17344 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0818 20:41:02.992220 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7034\nI0818 20:41:02.992516 17344 solver.cpp:404]     Test net output #1: loss = 1.72757 (* 1 = 1.72757 loss)\nI0818 20:41:04.298115 17344 solver.cpp:228] Iteration 46200, loss = 0.027449\nI0818 20:41:04.298158 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:41:04.298176 17344 solver.cpp:244]     Train net output #1: loss = 0.0274488 (* 1 = 0.0274488 loss)\nI0818 20:41:04.393579 17344 sgd_solver.cpp:166] Iteration 46200, lr = 0.0035\nI0818 20:43:21.539676 17344 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0818 20:44:41.920011 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60348\nI0818 20:44:41.920290 17344 solver.cpp:404]     Test net output #1: loss = 2.77512 (* 1 = 2.77512 loss)\nI0818 20:44:43.226429 17344 solver.cpp:228] Iteration 46300, loss = 0.0331657\nI0818 20:44:43.226474 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:44:43.226490 17344 solver.cpp:244]     Train net output #1: loss = 0.0331656 (* 1 = 0.0331656 loss)\nI0818 20:44:43.320957 17344 sgd_solver.cpp:166] Iteration 46300, lr = 0.0035\nI0818 20:47:00.369695 17344 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0818 20:48:20.743983 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56916\nI0818 20:48:20.744282 17344 solver.cpp:404]     Test net output #1: loss = 2.82231 (* 1 = 2.82231 loss)\nI0818 20:48:22.049690 17344 solver.cpp:228] Iteration 46400, loss = 0.0708822\nI0818 20:48:22.049736 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:48:22.049754 17344 solver.cpp:244]     Train net output #1: loss = 0.0708821 (* 1 = 0.0708821 loss)\nI0818 20:48:22.140966 17344 sgd_solver.cpp:166] Iteration 46400, lr = 0.0035\nI0818 20:50:39.278533 17344 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0818 20:51:59.670394 17344 solver.cpp:404]     Test net output #0: accuracy = 0.44308\nI0818 20:51:59.670645 17344 solver.cpp:404]     Test net output #1: loss = 5.07247 (* 1 = 5.07247 loss)\nI0818 20:52:00.976267 17344 solver.cpp:228] Iteration 46500, loss = 0.0604085\nI0818 20:52:00.976313 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:52:00.976330 17344 solver.cpp:244]     Train net output #1: loss = 0.0604083 (* 1 = 0.0604083 loss)\nI0818 20:52:01.067948 17344 sgd_solver.cpp:166] Iteration 46500, lr = 0.0035\nI0818 20:54:18.275562 17344 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0818 20:55:38.650565 17344 solver.cpp:404]     Test net output #0: accuracy = 0.54876\nI0818 20:55:38.650825 17344 solver.cpp:404]     Test net output #1: loss = 2.90437 (* 1 = 2.90437 loss)\nI0818 20:55:39.955812 17344 solver.cpp:228] Iteration 46600, loss = 0.0231643\nI0818 20:55:39.955858 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:55:39.955875 17344 solver.cpp:244]     Train net output #1: loss = 0.0231641 (* 1 = 0.0231641 loss)\nI0818 20:55:40.049135 17344 sgd_solver.cpp:166] Iteration 46600, lr = 0.0035\nI0818 20:57:57.297183 17344 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0818 20:59:17.679714 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57564\nI0818 20:59:17.679955 17344 solver.cpp:404]     Test net output #1: loss = 3.35682 (* 1 = 3.35682 loss)\nI0818 20:59:18.985064 17344 solver.cpp:228] Iteration 46700, loss = 0.0436584\nI0818 20:59:18.985108 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:59:18.985124 17344 solver.cpp:244]     Train net output #1: loss = 0.0436582 (* 1 = 0.0436582 loss)\nI0818 20:59:19.080605 17344 sgd_solver.cpp:166] Iteration 46700, lr = 0.0035\nI0818 21:01:36.177588 17344 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0818 21:02:56.560694 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5816\nI0818 21:02:56.560994 17344 solver.cpp:404]     Test net output #1: loss = 3.19685 (* 1 = 3.19685 loss)\nI0818 21:02:57.866892 17344 solver.cpp:228] Iteration 46800, loss = 0.0975164\nI0818 21:02:57.866938 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:02:57.866955 17344 solver.cpp:244]     Train net output #1: loss = 0.0975163 (* 1 = 0.0975163 loss)\nI0818 21:02:57.957103 17344 sgd_solver.cpp:166] Iteration 46800, lr = 0.0035\nI0818 21:05:15.194036 17344 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0818 21:06:35.572178 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72128\nI0818 21:06:35.572468 17344 solver.cpp:404]     Test net output #1: loss = 1.59617 (* 1 = 1.59617 loss)\nI0818 21:06:36.878382 17344 solver.cpp:228] Iteration 46900, loss = 0.0264206\nI0818 21:06:36.878428 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:06:36.878445 17344 solver.cpp:244]     Train net output #1: loss = 0.0264204 (* 1 = 0.0264204 loss)\nI0818 21:06:36.971451 17344 sgd_solver.cpp:166] Iteration 46900, lr = 0.0035\nI0818 21:08:54.192232 17344 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0818 21:10:14.574082 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72284\nI0818 21:10:14.574334 17344 solver.cpp:404]     Test net output #1: loss = 1.35344 (* 1 = 1.35344 loss)\nI0818 21:10:15.880059 17344 solver.cpp:228] Iteration 47000, loss = 0.0368243\nI0818 21:10:15.880103 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:10:15.880120 17344 solver.cpp:244]     Train net output #1: loss = 0.0368242 (* 1 = 0.0368242 loss)\nI0818 21:10:15.978194 17344 sgd_solver.cpp:166] Iteration 47000, lr = 0.0035\nI0818 21:12:33.204435 17344 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0818 21:13:53.599190 17344 solver.cpp:404]     Test net output #0: accuracy = 0.51088\nI0818 21:13:53.599480 17344 solver.cpp:404]     Test net output #1: loss = 3.83695 (* 1 = 3.83695 loss)\nI0818 21:13:54.905325 17344 solver.cpp:228] Iteration 47100, loss = 0.0399897\nI0818 21:13:54.905369 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:13:54.905385 17344 solver.cpp:244]     Train net output #1: loss = 0.0399896 (* 1 = 0.0399896 loss)\nI0818 21:13:54.995877 17344 sgd_solver.cpp:166] Iteration 47100, lr = 0.0035\nI0818 21:16:12.247050 17344 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0818 21:17:32.636598 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61976\nI0818 21:17:32.636862 17344 solver.cpp:404]     Test net output #1: loss = 2.39269 (* 1 = 2.39269 loss)\nI0818 21:17:33.942942 17344 solver.cpp:228] Iteration 47200, loss = 0.0432799\nI0818 21:17:33.942991 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:17:33.943006 17344 solver.cpp:244]     Train net output #1: loss = 0.0432797 (* 1 = 0.0432797 loss)\nI0818 21:17:34.036363 17344 sgd_solver.cpp:166] Iteration 47200, lr = 0.0035\nI0818 21:19:51.240744 17344 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0818 21:21:11.627545 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73196\nI0818 21:21:11.627840 17344 solver.cpp:404]     Test net output #1: loss = 1.24509 (* 1 = 1.24509 loss)\nI0818 21:21:12.933706 17344 solver.cpp:228] Iteration 47300, loss = 0.0428907\nI0818 21:21:12.933749 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:21:12.933765 17344 solver.cpp:244]     Train net output #1: loss = 0.0428906 (* 1 = 0.0428906 loss)\nI0818 21:21:13.026469 17344 sgd_solver.cpp:166] Iteration 47300, lr = 0.0035\nI0818 21:23:30.194072 17344 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0818 21:24:50.583395 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57808\nI0818 21:24:50.583739 17344 solver.cpp:404]     Test net output #1: loss = 2.68496 (* 1 = 2.68496 loss)\nI0818 21:24:51.889747 17344 solver.cpp:228] Iteration 47400, loss = 0.0335014\nI0818 21:24:51.889791 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:24:51.889807 17344 solver.cpp:244]     Train net output #1: loss = 0.0335012 (* 1 = 0.0335012 loss)\nI0818 21:24:51.989735 17344 sgd_solver.cpp:166] Iteration 47400, lr = 0.0035\nI0818 21:27:09.212808 17344 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0818 21:28:29.599046 17344 solver.cpp:404]     Test net output #0: accuracy = 0.55312\nI0818 21:28:29.599285 17344 solver.cpp:404]     Test net output #1: loss = 3.75529 (* 1 = 3.75529 loss)\nI0818 21:28:30.904887 17344 solver.cpp:228] Iteration 47500, loss = 0.0601824\nI0818 21:28:30.904935 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:28:30.904959 17344 solver.cpp:244]     Train net output #1: loss = 0.0601822 (* 1 = 0.0601822 loss)\nI0818 21:28:30.995237 17344 sgd_solver.cpp:166] Iteration 47500, lr = 0.0035\nI0818 21:30:48.167265 17344 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0818 21:32:08.550616 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67452\nI0818 21:32:08.550880 17344 solver.cpp:404]     Test net output #1: loss = 1.76155 (* 1 = 1.76155 loss)\nI0818 21:32:09.856664 17344 solver.cpp:228] Iteration 47600, loss = 0.0366557\nI0818 21:32:09.856716 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:32:09.856741 17344 solver.cpp:244]     Train net output #1: loss = 0.0366555 (* 1 = 0.0366555 loss)\nI0818 21:32:09.956027 17344 sgd_solver.cpp:166] Iteration 47600, lr = 0.0035\nI0818 21:34:27.052896 17344 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0818 21:35:47.429489 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60144\nI0818 21:35:47.429759 17344 solver.cpp:404]     Test net output #1: loss = 2.95842 (* 1 = 2.95842 loss)\nI0818 21:35:48.735445 17344 solver.cpp:228] Iteration 47700, loss = 0.00998462\nI0818 21:35:48.735493 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:35:48.735517 17344 solver.cpp:244]     Train net output #1: loss = 0.00998451 (* 1 = 0.00998451 loss)\nI0818 21:35:48.839556 17344 sgd_solver.cpp:166] Iteration 47700, lr = 0.0035\nI0818 21:38:06.090529 17344 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0818 21:39:26.472188 17344 solver.cpp:404]     Test net output #0: accuracy = 0.405\nI0818 21:39:26.472457 17344 solver.cpp:404]     Test net output #1: loss = 5.5425 (* 1 = 5.5425 loss)\nI0818 21:39:27.778414 17344 solver.cpp:228] Iteration 47800, loss = 0.0542416\nI0818 21:39:27.778462 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:39:27.778486 17344 solver.cpp:244]     Train net output #1: loss = 0.0542415 (* 1 = 0.0542415 loss)\nI0818 21:39:27.869521 17344 sgd_solver.cpp:166] Iteration 47800, lr = 0.0035\nI0818 21:41:45.014850 17344 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0818 21:43:05.394415 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30272\nI0818 21:43:05.394717 17344 solver.cpp:404]     Test net output #1: loss = 8.29608 (* 1 = 8.29608 loss)\nI0818 21:43:06.700994 17344 solver.cpp:228] Iteration 47900, loss = 0.0789833\nI0818 21:43:06.701200 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:43:06.701231 17344 solver.cpp:244]     Train net output #1: loss = 0.0789832 (* 1 = 0.0789832 loss)\nI0818 21:43:06.792021 17344 sgd_solver.cpp:166] Iteration 47900, lr = 0.0035\nI0818 21:45:23.938370 17344 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0818 21:46:44.314918 17344 solver.cpp:404]     Test net output #0: accuracy = 0.26552\nI0818 21:46:44.315214 17344 solver.cpp:404]     Test net output #1: loss = 9.42752 (* 1 = 9.42752 loss)\nI0818 21:46:45.620837 17344 solver.cpp:228] Iteration 48000, loss = 0.0873923\nI0818 21:46:45.620885 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:46:45.620908 17344 solver.cpp:244]     Train net output #1: loss = 0.0873922 (* 1 = 0.0873922 loss)\nI0818 21:46:45.720154 17344 sgd_solver.cpp:166] Iteration 48000, lr = 0.0035\nI0818 21:49:02.853469 17344 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0818 21:50:23.232097 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64652\nI0818 21:50:23.232424 17344 solver.cpp:404]     Test net output #1: loss = 2.03743 (* 1 = 2.03743 loss)\nI0818 21:50:24.538630 17344 solver.cpp:228] Iteration 48100, loss = 0.0276228\nI0818 21:50:24.538684 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:50:24.538710 17344 solver.cpp:244]     Train net output #1: loss = 0.0276226 (* 1 = 0.0276226 loss)\nI0818 21:50:24.635710 17344 sgd_solver.cpp:166] Iteration 48100, lr = 0.0035\nI0818 21:52:41.767673 17344 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0818 21:54:02.162215 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67408\nI0818 21:54:02.162536 17344 solver.cpp:404]     Test net output #1: loss = 1.76486 (* 1 = 1.76486 loss)\nI0818 21:54:03.469099 17344 solver.cpp:228] Iteration 48200, loss = 0.049878\nI0818 21:54:03.469147 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:54:03.469171 17344 solver.cpp:244]     Train net output #1: loss = 0.0498779 (* 1 = 0.0498779 loss)\nI0818 21:54:03.563915 17344 sgd_solver.cpp:166] Iteration 48200, lr = 0.0035\nI0818 21:56:20.710786 17344 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0818 21:57:41.094007 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69268\nI0818 21:57:41.094323 17344 solver.cpp:404]     Test net output #1: loss = 1.418 (* 1 = 1.418 loss)\nI0818 21:57:42.399704 17344 solver.cpp:228] Iteration 48300, loss = 0.0600379\nI0818 21:57:42.399749 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:57:42.399770 17344 solver.cpp:244]     Train net output #1: loss = 0.0600378 (* 1 = 0.0600378 loss)\nI0818 21:57:42.499883 17344 sgd_solver.cpp:166] Iteration 48300, lr = 0.0035\nI0818 21:59:59.673573 17344 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0818 22:01:20.052280 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68548\nI0818 22:01:20.052597 17344 solver.cpp:404]     Test net output #1: loss = 1.79755 (* 1 = 1.79755 loss)\nI0818 22:01:21.358321 17344 solver.cpp:228] Iteration 48400, loss = 0.0205439\nI0818 22:01:21.358367 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:01:21.358384 17344 solver.cpp:244]     Train net output #1: loss = 0.0205438 (* 1 = 0.0205438 loss)\nI0818 22:01:21.452772 17344 sgd_solver.cpp:166] Iteration 48400, lr = 0.0035\nI0818 22:03:38.550412 17344 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0818 22:04:58.925272 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62548\nI0818 22:04:58.925590 17344 solver.cpp:404]     Test net output #1: loss = 2.64835 (* 1 = 2.64835 loss)\nI0818 22:05:00.231150 17344 solver.cpp:228] Iteration 48500, loss = 0.0289911\nI0818 22:05:00.231195 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:05:00.231212 17344 solver.cpp:244]     Train net output #1: loss = 0.0289909 (* 1 = 0.0289909 loss)\nI0818 22:05:00.329533 17344 sgd_solver.cpp:166] Iteration 48500, lr = 0.0035\nI0818 22:07:17.484123 17344 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0818 22:08:37.861943 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64764\nI0818 22:08:37.862260 17344 solver.cpp:404]     Test net output #1: loss = 2.07262 (* 1 = 2.07262 loss)\nI0818 22:08:39.167994 17344 solver.cpp:228] Iteration 48600, loss = 0.0306157\nI0818 22:08:39.168040 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:08:39.168056 17344 solver.cpp:244]     Train net output #1: loss = 0.0306156 (* 1 = 0.0306156 loss)\nI0818 22:08:39.264849 17344 sgd_solver.cpp:166] Iteration 48600, lr = 0.0035\nI0818 22:10:56.375371 17344 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0818 22:12:16.762435 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60972\nI0818 22:12:16.762742 17344 solver.cpp:404]     Test net output #1: loss = 2.58391 (* 1 = 2.58391 loss)\nI0818 22:12:18.068398 17344 solver.cpp:228] Iteration 48700, loss = 0.0379385\nI0818 22:12:18.068444 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:12:18.068460 17344 solver.cpp:244]     Train net output #1: loss = 0.0379383 (* 1 = 0.0379383 loss)\nI0818 22:12:18.160287 17344 sgd_solver.cpp:166] Iteration 48700, lr = 0.0035\nI0818 22:14:35.433125 17344 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0818 22:15:55.735489 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63564\nI0818 22:15:55.735810 17344 solver.cpp:404]     Test net output #1: loss = 2.26992 (* 1 = 2.26992 loss)\nI0818 22:15:57.041590 17344 solver.cpp:228] Iteration 48800, loss = 0.040399\nI0818 22:15:57.041636 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:15:57.041653 17344 solver.cpp:244]     Train net output #1: loss = 0.0403988 (* 1 = 0.0403988 loss)\nI0818 22:15:57.140346 17344 sgd_solver.cpp:166] Iteration 48800, lr = 0.0035\nI0818 22:18:14.313969 17344 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0818 22:19:34.605413 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67324\nI0818 22:19:34.605739 17344 solver.cpp:404]     Test net output #1: loss = 1.71975 (* 1 = 1.71975 loss)\nI0818 22:19:35.910862 17344 solver.cpp:228] Iteration 48900, loss = 0.0156552\nI0818 22:19:35.910904 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:19:35.910920 17344 solver.cpp:244]     Train net output #1: loss = 0.015655 (* 1 = 0.015655 loss)\nI0818 22:19:36.010923 17344 sgd_solver.cpp:166] Iteration 48900, lr = 0.0035\nI0818 22:21:53.182482 17344 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0818 22:23:13.463220 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5908\nI0818 22:23:13.463544 17344 solver.cpp:404]     Test net output #1: loss = 2.38713 (* 1 = 2.38713 loss)\nI0818 22:23:14.768321 17344 solver.cpp:228] Iteration 49000, loss = 0.0343435\nI0818 22:23:14.768360 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:23:14.768378 17344 solver.cpp:244]     Train net output #1: loss = 0.0343434 (* 1 = 0.0343434 loss)\nI0818 22:23:14.860509 17344 sgd_solver.cpp:166] Iteration 49000, lr = 0.0035\nI0818 22:25:32.089195 17344 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0818 22:26:52.384021 17344 solver.cpp:404]     Test net output #0: accuracy = 0.70856\nI0818 22:26:52.384317 17344 solver.cpp:404]     Test net output #1: loss = 1.37026 (* 1 = 1.37026 loss)\nI0818 22:26:53.689630 17344 solver.cpp:228] Iteration 49100, loss = 0.112475\nI0818 22:26:53.689671 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:26:53.689687 17344 solver.cpp:244]     Train net output #1: loss = 0.112475 (* 1 = 0.112475 loss)\nI0818 22:26:53.782747 17344 sgd_solver.cpp:166] Iteration 49100, lr = 0.0035\nI0818 22:29:11.024571 17344 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0818 22:30:31.323444 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62292\nI0818 22:30:31.323752 17344 solver.cpp:404]     Test net output #1: loss = 2.14061 (* 1 = 2.14061 loss)\nI0818 22:30:32.629050 17344 solver.cpp:228] Iteration 49200, loss = 0.028564\nI0818 22:30:32.629091 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:30:32.629107 17344 solver.cpp:244]     Train net output #1: loss = 0.0285638 (* 1 = 0.0285638 loss)\nI0818 22:30:32.722067 17344 sgd_solver.cpp:166] Iteration 49200, lr = 0.0035\nI0818 22:32:49.856509 17344 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0818 22:34:10.134009 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67648\nI0818 22:34:10.134328 17344 solver.cpp:404]     Test net output #1: loss = 2.07244 (* 1 = 2.07244 loss)\nI0818 22:34:11.438956 17344 solver.cpp:228] Iteration 49300, loss = 0.0412621\nI0818 22:34:11.438998 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:34:11.439015 17344 solver.cpp:244]     Train net output #1: loss = 0.0412619 (* 1 = 0.0412619 loss)\nI0818 22:34:11.536803 17344 sgd_solver.cpp:166] Iteration 49300, lr = 0.0035\nI0818 22:36:28.760001 17344 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0818 22:37:49.056552 17344 solver.cpp:404]     Test net output #0: accuracy = 0.75652\nI0818 22:37:49.056874 17344 solver.cpp:404]     Test net output #1: loss = 1.21173 (* 1 = 1.21173 loss)\nI0818 22:37:50.362375 17344 solver.cpp:228] Iteration 49400, loss = 0.0254071\nI0818 22:37:50.362417 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:37:50.362433 17344 solver.cpp:244]     Train net output #1: loss = 0.0254069 (* 1 = 0.0254069 loss)\nI0818 22:37:50.457103 17344 sgd_solver.cpp:166] Iteration 49400, lr = 0.0035\nI0818 22:40:07.686517 17344 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0818 22:41:27.970407 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81156\nI0818 22:41:27.970727 17344 solver.cpp:404]     Test net output #1: loss = 0.853095 (* 1 = 0.853095 loss)\nI0818 22:41:29.276041 17344 solver.cpp:228] Iteration 49500, loss = 0.115706\nI0818 22:41:29.276080 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:41:29.276098 17344 solver.cpp:244]     Train net output #1: loss = 0.115705 (* 1 = 0.115705 loss)\nI0818 22:41:29.373682 17344 sgd_solver.cpp:166] Iteration 49500, lr = 0.0035\nI0818 22:43:46.552125 17344 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0818 22:45:06.844769 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76796\nI0818 22:45:06.845083 17344 solver.cpp:404]     Test net output #1: loss = 1.20367 (* 1 = 1.20367 loss)\nI0818 22:45:08.150460 17344 solver.cpp:228] Iteration 49600, loss = 0.0210903\nI0818 22:45:08.150501 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:45:08.150517 17344 solver.cpp:244]     Train net output #1: loss = 0.0210901 (* 1 = 0.0210901 loss)\nI0818 22:45:08.249900 17344 sgd_solver.cpp:166] Iteration 49600, lr = 0.0035\nI0818 22:47:25.447043 17344 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0818 22:48:45.828637 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7742\nI0818 22:48:45.828969 17344 solver.cpp:404]     Test net output #1: loss = 1.07731 (* 1 = 1.07731 loss)\nI0818 22:48:47.134053 17344 solver.cpp:228] Iteration 49700, loss = 0.0770148\nI0818 22:48:47.134095 17344 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:48:47.134111 17344 solver.cpp:244]     Train net output #1: loss = 0.0770146 (* 1 = 0.0770146 loss)\nI0818 22:48:47.232862 17344 sgd_solver.cpp:166] Iteration 49700, lr = 0.0035\nI0818 22:51:04.293969 17344 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0818 22:52:24.679991 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69312\nI0818 22:52:24.680289 17344 solver.cpp:404]     Test net output #1: loss = 1.89169 (* 1 = 1.89169 loss)\nI0818 22:52:25.985332 17344 solver.cpp:228] Iteration 49800, loss = 0.024132\nI0818 22:52:25.985373 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:52:25.985389 17344 solver.cpp:244]     Train net output #1: loss = 0.0241318 (* 1 = 0.0241318 loss)\nI0818 22:52:26.083248 17344 sgd_solver.cpp:166] Iteration 49800, lr = 0.0035\nI0818 22:54:43.228665 17344 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0818 22:56:03.606717 17344 solver.cpp:404]     Test net output #0: accuracy = 0.69112\nI0818 22:56:03.607035 17344 solver.cpp:404]     Test net output #1: loss = 1.82062 (* 1 = 1.82062 loss)\nI0818 22:56:04.912467 17344 solver.cpp:228] Iteration 49900, loss = 0.0523917\nI0818 22:56:04.912506 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:56:04.912523 17344 solver.cpp:244]     Train net output #1: loss = 0.0523915 (* 1 = 0.0523915 loss)\nI0818 22:56:05.009562 17344 sgd_solver.cpp:166] Iteration 49900, lr = 0.0035\nI0818 22:58:22.248697 17344 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0818 22:59:42.633523 17344 solver.cpp:404]     Test net output #0: accuracy = 0.52308\nI0818 22:59:42.633839 17344 solver.cpp:404]     Test net output #1: loss = 3.06391 (* 1 = 3.06391 loss)\nI0818 22:59:43.938740 17344 solver.cpp:228] Iteration 50000, loss = 0.0406704\nI0818 22:59:43.938787 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:59:43.938805 17344 solver.cpp:244]     Train net output #1: loss = 0.0406702 (* 1 = 0.0406702 loss)\nI0818 22:59:44.035154 17344 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0818 22:59:44.035176 17344 sgd_solver.cpp:166] Iteration 50000, lr = 0.00035\nI0818 23:02:01.255672 17344 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0818 23:03:21.639631 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72132\nI0818 23:03:21.639955 17344 solver.cpp:404]     Test net output #1: loss = 1.50383 (* 1 = 1.50383 loss)\nI0818 23:03:22.945201 17344 solver.cpp:228] Iteration 50100, loss = 0.0027458\nI0818 23:03:22.945245 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:03:22.945260 17344 solver.cpp:244]     Train net output #1: loss = 0.0027456 (* 1 = 0.0027456 loss)\nI0818 23:03:23.039971 17344 sgd_solver.cpp:166] Iteration 50100, lr = 0.00035\nI0818 23:05:40.298791 17344 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0818 23:07:00.674988 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76632\nI0818 23:07:00.675323 17344 solver.cpp:404]     Test net output #1: loss = 1.24949 (* 1 = 1.24949 loss)\nI0818 23:07:01.980460 17344 solver.cpp:228] Iteration 50200, loss = 0.00335562\nI0818 23:07:01.980502 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:07:01.980518 17344 solver.cpp:244]     Train net output #1: loss = 0.00335541 (* 1 = 0.00335541 loss)\nI0818 23:07:02.079926 17344 sgd_solver.cpp:166] Iteration 50200, lr = 0.00035\nI0818 23:09:19.187517 17344 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0818 23:10:39.557888 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79196\nI0818 23:10:39.558182 17344 solver.cpp:404]     Test net output #1: loss = 1.08739 (* 1 = 1.08739 loss)\nI0818 23:10:40.863423 17344 solver.cpp:228] Iteration 50300, loss = 0.00347884\nI0818 23:10:40.863467 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:10:40.863483 17344 solver.cpp:244]     Train net output #1: loss = 0.00347863 (* 1 = 0.00347863 loss)\nI0818 23:10:40.959601 17344 sgd_solver.cpp:166] Iteration 50300, lr = 0.00035\nI0818 23:12:58.110344 17344 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0818 23:14:18.486006 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8122\nI0818 23:14:18.486321 17344 solver.cpp:404]     Test net output #1: loss = 0.963387 (* 1 = 0.963387 loss)\nI0818 23:14:19.791482 17344 solver.cpp:228] Iteration 50400, loss = 0.00190899\nI0818 23:14:19.791525 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:14:19.791541 17344 solver.cpp:244]     Train net output #1: loss = 0.00190878 (* 1 = 0.00190878 loss)\nI0818 23:14:19.890316 17344 sgd_solver.cpp:166] Iteration 50400, lr = 0.00035\nI0818 23:16:37.043670 17344 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0818 23:17:57.413938 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82684\nI0818 23:17:57.414245 17344 solver.cpp:404]     Test net output #1: loss = 0.85699 (* 1 = 0.85699 loss)\nI0818 23:17:58.719504 17344 solver.cpp:228] Iteration 50500, loss = 0.00150183\nI0818 23:17:58.719547 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:17:58.719563 17344 solver.cpp:244]     Train net output #1: loss = 0.00150162 (* 1 = 0.00150162 loss)\nI0818 23:17:58.818711 17344 sgd_solver.cpp:166] Iteration 50500, lr = 0.00035\nI0818 23:20:15.942219 17344 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0818 23:21:36.315649 17344 solver.cpp:404]     Test net output #0: accuracy = 0.84028\nI0818 23:21:36.315984 17344 solver.cpp:404]     Test net output #1: loss = 0.793226 (* 1 = 0.793226 loss)\nI0818 23:21:37.620698 17344 solver.cpp:228] Iteration 50600, loss = 0.00136512\nI0818 23:21:37.620741 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:21:37.620757 17344 solver.cpp:244]     Train net output #1: loss = 0.00136491 (* 1 = 0.00136491 loss)\nI0818 23:21:37.721158 17344 sgd_solver.cpp:166] Iteration 50600, lr = 0.00035\nI0818 23:23:54.846472 17344 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0818 23:25:15.222951 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85204\nI0818 23:25:15.223268 17344 solver.cpp:404]     Test net output #1: loss = 0.715153 (* 1 = 0.715153 loss)\nI0818 23:25:16.528115 17344 solver.cpp:228] Iteration 50700, loss = 0.000819097\nI0818 23:25:16.528157 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:25:16.528174 17344 solver.cpp:244]     Train net output #1: loss = 0.000818887 (* 1 = 0.000818887 loss)\nI0818 23:25:16.623191 17344 sgd_solver.cpp:166] Iteration 50700, lr = 0.00035\nI0818 23:27:33.726220 17344 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0818 23:28:54.107391 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86068\nI0818 23:28:54.107699 17344 solver.cpp:404]     Test net output #1: loss = 0.680952 (* 1 = 0.680952 loss)\nI0818 23:28:55.412669 17344 solver.cpp:228] Iteration 50800, loss = 0.00107297\nI0818 23:28:55.412713 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:28:55.412729 17344 solver.cpp:244]     Train net output #1: loss = 0.00107276 (* 1 = 0.00107276 loss)\nI0818 23:28:55.504984 17344 sgd_solver.cpp:166] Iteration 50800, lr = 0.00035\nI0818 23:31:12.597432 17344 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0818 23:32:32.973335 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86752\nI0818 23:32:32.973644 17344 solver.cpp:404]     Test net output #1: loss = 0.645532 (* 1 = 0.645532 loss)\nI0818 23:32:34.278708 17344 solver.cpp:228] Iteration 50900, loss = 0.000679529\nI0818 23:32:34.278753 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:32:34.278774 17344 solver.cpp:244]     Train net output #1: loss = 0.000679319 (* 1 = 0.000679319 loss)\nI0818 23:32:34.373050 17344 sgd_solver.cpp:166] Iteration 50900, lr = 0.00035\nI0818 23:34:51.585175 17344 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0818 23:36:11.966048 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87144\nI0818 23:36:11.966367 17344 solver.cpp:404]     Test net output #1: loss = 0.632771 (* 1 = 0.632771 loss)\nI0818 23:36:13.271245 17344 solver.cpp:228] Iteration 51000, loss = 0.000961983\nI0818 23:36:13.271288 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:36:13.271303 17344 solver.cpp:244]     Train net output #1: loss = 0.000961773 (* 1 = 0.000961773 loss)\nI0818 23:36:13.372339 17344 sgd_solver.cpp:166] Iteration 51000, lr = 0.00035\nI0818 23:38:30.421041 17344 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0818 23:39:50.796520 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87624\nI0818 23:39:50.796842 17344 solver.cpp:404]     Test net output #1: loss = 0.595371 (* 1 = 0.595371 loss)\nI0818 23:39:52.101742 17344 solver.cpp:228] Iteration 51100, loss = 0.000876105\nI0818 23:39:52.101788 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:39:52.101804 17344 solver.cpp:244]     Train net output #1: loss = 0.000875895 (* 1 = 0.000875895 loss)\nI0818 23:39:52.198189 17344 sgd_solver.cpp:166] Iteration 51100, lr = 0.00035\nI0818 23:42:09.363530 17344 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0818 23:43:29.739670 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87792\nI0818 23:43:29.739979 17344 solver.cpp:404]     Test net output #1: loss = 0.61233 (* 1 = 0.61233 loss)\nI0818 23:43:31.044642 17344 solver.cpp:228] Iteration 51200, loss = 0.000590739\nI0818 23:43:31.044683 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:43:31.044699 17344 solver.cpp:244]     Train net output #1: loss = 0.000590528 (* 1 = 0.000590528 loss)\nI0818 23:43:31.138447 17344 sgd_solver.cpp:166] Iteration 51200, lr = 0.00035\nI0818 23:45:48.263998 17344 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0818 23:47:08.639839 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0818 23:47:08.640162 17344 solver.cpp:404]     Test net output #1: loss = 0.573087 (* 1 = 0.573087 loss)\nI0818 23:47:09.945113 17344 solver.cpp:228] Iteration 51300, loss = 0.000440756\nI0818 23:47:09.945155 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:47:09.945170 17344 solver.cpp:244]     Train net output #1: loss = 0.000440546 (* 1 = 0.000440546 loss)\nI0818 23:47:10.042424 17344 sgd_solver.cpp:166] Iteration 51300, lr = 0.00035\nI0818 23:49:27.133916 17344 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0818 23:50:47.512187 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0818 23:50:47.512490 17344 solver.cpp:404]     Test net output #1: loss = 0.567073 (* 1 = 0.567073 loss)\nI0818 23:50:48.817679 17344 solver.cpp:228] Iteration 51400, loss = 0.000731665\nI0818 23:50:48.817725 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:50:48.817741 17344 solver.cpp:244]     Train net output #1: loss = 0.000731455 (* 1 = 0.000731455 loss)\nI0818 23:50:48.916193 17344 sgd_solver.cpp:166] Iteration 51400, lr = 0.00035\nI0818 23:53:06.152529 17344 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0818 23:54:26.523514 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88196\nI0818 23:54:26.523846 17344 solver.cpp:404]     Test net output #1: loss = 0.565128 (* 1 = 0.565128 loss)\nI0818 23:54:27.828938 17344 solver.cpp:228] Iteration 51500, loss = 0.00057775\nI0818 23:54:27.828982 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:54:27.828999 17344 solver.cpp:244]     Train net output #1: loss = 0.000577539 (* 1 = 0.000577539 loss)\nI0818 23:54:27.919247 17344 sgd_solver.cpp:166] Iteration 51500, lr = 0.00035\nI0818 23:56:45.304581 17344 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0818 23:58:05.682641 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88044\nI0818 23:58:05.682965 17344 solver.cpp:404]     Test net output #1: loss = 0.570326 (* 1 = 0.570326 loss)\nI0818 23:58:06.987836 17344 solver.cpp:228] Iteration 51600, loss = 0.000758026\nI0818 23:58:06.987881 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:58:06.987896 17344 solver.cpp:244]     Train net output #1: loss = 0.000757815 (* 1 = 0.000757815 loss)\nI0818 23:58:07.085793 17344 sgd_solver.cpp:166] Iteration 51600, lr = 0.00035\nI0819 00:00:24.223332 17344 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 00:01:44.591410 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0819 00:01:44.591737 17344 solver.cpp:404]     Test net output #1: loss = 0.568173 (* 1 = 0.568173 loss)\nI0819 00:01:45.896559 17344 solver.cpp:228] Iteration 51700, loss = 0.000552292\nI0819 00:01:45.896603 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:01:45.896620 17344 solver.cpp:244]     Train net output #1: loss = 0.000552082 (* 1 = 0.000552082 loss)\nI0819 00:01:45.996330 17344 sgd_solver.cpp:166] Iteration 51700, lr = 0.00035\nI0819 00:04:03.189182 17344 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 00:05:23.559710 17344 solver.cpp:404]     Test net output #0: accuracy = 0.877841\nI0819 00:05:23.560041 17344 solver.cpp:404]     Test net output #1: loss = 0.575387 (* 1 = 0.575387 loss)\nI0819 00:05:24.865067 17344 solver.cpp:228] Iteration 51800, loss = 0.000650209\nI0819 00:05:24.865108 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:05:24.865124 17344 solver.cpp:244]     Train net output #1: loss = 0.000649998 (* 1 = 0.000649998 loss)\nI0819 00:05:24.959576 17344 sgd_solver.cpp:166] Iteration 51800, lr = 0.00035\nI0819 00:07:42.116495 17344 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 00:09:02.482646 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87816\nI0819 00:09:02.482985 17344 solver.cpp:404]     Test net output #1: loss = 0.566697 (* 1 = 0.566697 loss)\nI0819 00:09:03.788141 17344 solver.cpp:228] Iteration 51900, loss = 0.000494833\nI0819 00:09:03.788187 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:09:03.788203 17344 solver.cpp:244]     Train net output #1: loss = 0.000494622 (* 1 = 0.000494622 loss)\nI0819 00:09:03.883831 17344 sgd_solver.cpp:166] Iteration 51900, lr = 0.00035\nI0819 00:11:21.156020 17344 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 00:12:41.529317 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8768\nI0819 00:12:41.529644 17344 solver.cpp:404]     Test net output #1: loss = 0.567942 (* 1 = 0.567942 loss)\nI0819 00:12:42.834619 17344 solver.cpp:228] Iteration 52000, loss = 0.000297641\nI0819 00:12:42.834666 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:12:42.834682 17344 solver.cpp:244]     Train net output #1: loss = 0.000297431 (* 1 = 0.000297431 loss)\nI0819 00:12:42.929579 17344 sgd_solver.cpp:166] Iteration 52000, lr = 0.00035\nI0819 00:15:00.092345 17344 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 00:16:20.471875 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87268\nI0819 00:16:20.472204 17344 solver.cpp:404]     Test net output #1: loss = 0.583554 (* 1 = 0.583554 loss)\nI0819 00:16:21.777037 17344 solver.cpp:228] Iteration 52100, loss = 0.000380462\nI0819 00:16:21.777079 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:16:21.777094 17344 solver.cpp:244]     Train net output #1: loss = 0.000380251 (* 1 = 0.000380251 loss)\nI0819 00:16:21.872838 17344 sgd_solver.cpp:166] Iteration 52100, lr = 0.00035\nI0819 00:18:38.988121 17344 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 00:19:59.367959 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86652\nI0819 00:19:59.368257 17344 solver.cpp:404]     Test net output #1: loss = 0.622357 (* 1 = 0.622357 loss)\nI0819 00:20:00.673193 17344 solver.cpp:228] Iteration 52200, loss = 0.000280513\nI0819 00:20:00.673238 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:20:00.673254 17344 solver.cpp:244]     Train net output #1: loss = 0.000280302 (* 1 = 0.000280302 loss)\nI0819 00:20:00.769177 17344 sgd_solver.cpp:166] Iteration 52200, lr = 0.00035\nI0819 00:22:17.910284 17344 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 00:23:38.276376 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86676\nI0819 00:23:38.276726 17344 solver.cpp:404]     Test net output #1: loss = 0.606382 (* 1 = 0.606382 loss)\nI0819 00:23:39.581243 17344 solver.cpp:228] Iteration 52300, loss = 0.000312514\nI0819 00:23:39.581284 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:23:39.581300 17344 solver.cpp:244]     Train net output #1: loss = 0.000312304 (* 1 = 0.000312304 loss)\nI0819 00:23:39.676086 17344 sgd_solver.cpp:166] Iteration 52300, lr = 0.00035\nI0819 00:25:56.763253 17344 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 00:27:17.150609 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85656\nI0819 00:27:17.150928 17344 solver.cpp:404]     Test net output #1: loss = 0.660471 (* 1 = 0.660471 loss)\nI0819 00:27:18.456372 17344 solver.cpp:228] Iteration 52400, loss = 0.00032954\nI0819 00:27:18.456411 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:27:18.456428 17344 solver.cpp:244]     Train net output #1: loss = 0.000329329 (* 1 = 0.000329329 loss)\nI0819 00:27:18.556010 17344 sgd_solver.cpp:166] Iteration 52400, lr = 0.00035\nI0819 00:29:35.612936 17344 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 00:30:55.904573 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85596\nI0819 00:30:55.904887 17344 solver.cpp:404]     Test net output #1: loss = 0.649593 (* 1 = 0.649593 loss)\nI0819 00:30:57.210153 17344 solver.cpp:228] Iteration 52500, loss = 0.00027791\nI0819 00:30:57.210196 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:30:57.210211 17344 solver.cpp:244]     Train net output #1: loss = 0.000277699 (* 1 = 0.000277699 loss)\nI0819 00:30:57.308105 17344 sgd_solver.cpp:166] Iteration 52500, lr = 0.00035\nI0819 00:33:14.440506 17344 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 00:34:34.728904 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8422\nI0819 00:34:34.729235 17344 solver.cpp:404]     Test net output #1: loss = 0.732571 (* 1 = 0.732571 loss)\nI0819 00:34:36.034083 17344 solver.cpp:228] Iteration 52600, loss = 0.000169254\nI0819 00:34:36.034126 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:34:36.034142 17344 solver.cpp:244]     Train net output #1: loss = 0.000169043 (* 1 = 0.000169043 loss)\nI0819 00:34:36.133744 17344 sgd_solver.cpp:166] Iteration 52600, lr = 0.00035\nI0819 00:36:53.311794 17344 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 00:38:13.594059 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83216\nI0819 00:38:13.594362 17344 solver.cpp:404]     Test net output #1: loss = 0.78258 (* 1 = 0.78258 loss)\nI0819 00:38:14.899544 17344 solver.cpp:228] Iteration 52700, loss = 0.000244136\nI0819 00:38:14.899586 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:38:14.899601 17344 solver.cpp:244]     Train net output #1: loss = 0.000243925 (* 1 = 0.000243925 loss)\nI0819 00:38:14.996103 17344 sgd_solver.cpp:166] Iteration 52700, lr = 0.00035\nI0819 00:40:32.199257 17344 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 00:41:52.477166 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82412\nI0819 00:41:52.477483 17344 solver.cpp:404]     Test net output #1: loss = 0.822496 (* 1 = 0.822496 loss)\nI0819 00:41:53.782697 17344 solver.cpp:228] Iteration 52800, loss = 0.000260323\nI0819 00:41:53.782740 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:41:53.782757 17344 solver.cpp:244]     Train net output #1: loss = 0.000260112 (* 1 = 0.000260112 loss)\nI0819 00:41:53.879060 17344 sgd_solver.cpp:166] Iteration 52800, lr = 0.00035\nI0819 00:44:11.093899 17344 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 00:45:31.387799 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80264\nI0819 00:45:31.388097 17344 solver.cpp:404]     Test net output #1: loss = 0.94288 (* 1 = 0.94288 loss)\nI0819 00:45:32.692994 17344 solver.cpp:228] Iteration 52900, loss = 0.000320892\nI0819 00:45:32.693048 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:45:32.693065 17344 solver.cpp:244]     Train net output #1: loss = 0.000320681 (* 1 = 0.000320681 loss)\nI0819 00:45:32.790822 17344 sgd_solver.cpp:166] Iteration 52900, lr = 0.00035\nI0819 00:47:49.720501 17344 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 00:49:10.031512 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79528\nI0819 00:49:10.031792 17344 solver.cpp:404]     Test net output #1: loss = 0.950218 (* 1 = 0.950218 loss)\nI0819 00:49:11.337317 17344 solver.cpp:228] Iteration 53000, loss = 0.000335995\nI0819 00:49:11.337371 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:49:11.337397 17344 solver.cpp:244]     Train net output #1: loss = 0.000335784 (* 1 = 0.000335784 loss)\nI0819 00:49:11.430485 17344 sgd_solver.cpp:166] Iteration 53000, lr = 0.00035\nI0819 00:51:28.496731 17344 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 00:52:48.823467 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78236\nI0819 00:52:48.823796 17344 solver.cpp:404]     Test net output #1: loss = 0.983682 (* 1 = 0.983682 loss)\nI0819 00:52:50.132519 17344 solver.cpp:228] Iteration 53100, loss = 0.000329446\nI0819 00:52:50.132560 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:52:50.132575 17344 solver.cpp:244]     Train net output #1: loss = 0.000329235 (* 1 = 0.000329235 loss)\nI0819 00:52:50.229954 17344 sgd_solver.cpp:166] Iteration 53100, lr = 0.00035\nI0819 00:55:07.376935 17344 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 00:56:27.667162 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76644\nI0819 00:56:27.667488 17344 solver.cpp:404]     Test net output #1: loss = 1.04815 (* 1 = 1.04815 loss)\nI0819 00:56:28.972690 17344 solver.cpp:228] Iteration 53200, loss = 0.00018583\nI0819 00:56:28.972734 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:56:28.972750 17344 solver.cpp:244]     Train net output #1: loss = 0.000185619 (* 1 = 0.000185619 loss)\nI0819 00:56:29.075690 17344 sgd_solver.cpp:166] Iteration 53200, lr = 0.00035\nI0819 00:58:46.334815 17344 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 01:00:06.633656 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71636\nI0819 01:00:06.633960 17344 solver.cpp:404]     Test net output #1: loss = 1.30759 (* 1 = 1.30759 loss)\nI0819 01:00:07.939182 17344 solver.cpp:228] Iteration 53300, loss = 0.000153708\nI0819 01:00:07.939223 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:00:07.939239 17344 solver.cpp:244]     Train net output #1: loss = 0.000153497 (* 1 = 0.000153497 loss)\nI0819 01:00:08.043110 17344 sgd_solver.cpp:166] Iteration 53300, lr = 0.00035\nI0819 01:02:25.238597 17344 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 01:03:45.503032 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71256\nI0819 01:03:45.503352 17344 solver.cpp:404]     Test net output #1: loss = 1.25697 (* 1 = 1.25697 loss)\nI0819 01:03:46.808475 17344 solver.cpp:228] Iteration 53400, loss = 0.000196705\nI0819 01:03:46.808518 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:03:46.808534 17344 solver.cpp:244]     Train net output #1: loss = 0.000196494 (* 1 = 0.000196494 loss)\nI0819 01:03:46.913590 17344 sgd_solver.cpp:166] Iteration 53400, lr = 0.00035\nI0819 01:06:04.174770 17344 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 01:07:24.546735 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67892\nI0819 01:07:24.547058 17344 solver.cpp:404]     Test net output #1: loss = 1.36834 (* 1 = 1.36834 loss)\nI0819 01:07:25.851904 17344 solver.cpp:228] Iteration 53500, loss = 0.00026126\nI0819 01:07:25.851950 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:07:25.851968 17344 solver.cpp:244]     Train net output #1: loss = 0.000261049 (* 1 = 0.000261049 loss)\nI0819 01:07:25.951005 17344 sgd_solver.cpp:166] Iteration 53500, lr = 0.00035\nI0819 01:09:43.101265 17344 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 01:11:03.484578 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63684\nI0819 01:11:03.484899 17344 solver.cpp:404]     Test net output #1: loss = 1.55348 (* 1 = 1.55348 loss)\nI0819 01:11:04.790590 17344 solver.cpp:228] Iteration 53600, loss = 0.000190089\nI0819 01:11:04.790633 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:11:04.790648 17344 solver.cpp:244]     Train net output #1: loss = 0.000189877 (* 1 = 0.000189877 loss)\nI0819 01:11:04.891921 17344 sgd_solver.cpp:166] Iteration 53600, lr = 0.00035\nI0819 01:13:22.093323 17344 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 01:14:42.480183 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5812\nI0819 01:14:42.480517 17344 solver.cpp:404]     Test net output #1: loss = 1.82692 (* 1 = 1.82692 loss)\nI0819 01:14:43.785569 17344 solver.cpp:228] Iteration 53700, loss = 0.000228509\nI0819 01:14:43.785611 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:14:43.785627 17344 solver.cpp:244]     Train net output #1: loss = 0.000228298 (* 1 = 0.000228298 loss)\nI0819 01:14:43.883479 17344 sgd_solver.cpp:166] Iteration 53700, lr = 0.00035\nI0819 01:17:01.138347 17344 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 01:18:21.527426 17344 solver.cpp:404]     Test net output #0: accuracy = 0.54168\nI0819 01:18:21.527761 17344 solver.cpp:404]     Test net output #1: loss = 1.91835 (* 1 = 1.91835 loss)\nI0819 01:18:22.832427 17344 solver.cpp:228] Iteration 53800, loss = 0.000244143\nI0819 01:18:22.832468 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:18:22.832484 17344 solver.cpp:244]     Train net output #1: loss = 0.000243932 (* 1 = 0.000243932 loss)\nI0819 01:18:22.931381 17344 sgd_solver.cpp:166] Iteration 53800, lr = 0.00035\nI0819 01:20:40.205101 17344 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 01:22:00.591526 17344 solver.cpp:404]     Test net output #0: accuracy = 0.49188\nI0819 01:22:00.591856 17344 solver.cpp:404]     Test net output #1: loss = 2.0685 (* 1 = 2.0685 loss)\nI0819 01:22:01.897291 17344 solver.cpp:228] Iteration 53900, loss = 0.000134765\nI0819 01:22:01.897341 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:22:01.897359 17344 solver.cpp:244]     Train net output #1: loss = 0.000134554 (* 1 = 0.000134554 loss)\nI0819 01:22:01.991006 17344 sgd_solver.cpp:166] Iteration 53900, lr = 0.00035\nI0819 01:24:19.370314 17344 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 01:25:39.757915 17344 solver.cpp:404]     Test net output #0: accuracy = 0.41156\nI0819 01:25:39.758244 17344 solver.cpp:404]     Test net output #1: loss = 2.52003 (* 1 = 2.52003 loss)\nI0819 01:25:41.063591 17344 solver.cpp:228] Iteration 54000, loss = 0.000131114\nI0819 01:25:41.063633 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:25:41.063648 17344 solver.cpp:244]     Train net output #1: loss = 0.000130903 (* 1 = 0.000130903 loss)\nI0819 01:25:41.159579 17344 sgd_solver.cpp:166] Iteration 54000, lr = 0.00035\nI0819 01:27:58.421761 17344 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 01:29:18.809653 17344 solver.cpp:404]     Test net output #0: accuracy = 0.33684\nI0819 01:29:18.809960 17344 solver.cpp:404]     Test net output #1: loss = 2.92311 (* 1 = 2.92311 loss)\nI0819 01:29:20.115694 17344 solver.cpp:228] Iteration 54100, loss = 0.000174519\nI0819 01:29:20.115743 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:29:20.115759 17344 solver.cpp:244]     Train net output #1: loss = 0.000174308 (* 1 = 0.000174308 loss)\nI0819 01:29:20.212985 17344 sgd_solver.cpp:166] Iteration 54100, lr = 0.00035\nI0819 01:31:37.467483 17344 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 01:32:57.853250 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32808\nI0819 01:32:57.853557 17344 solver.cpp:404]     Test net output #1: loss = 2.61568 (* 1 = 2.61568 loss)\nI0819 01:32:59.158999 17344 solver.cpp:228] Iteration 54200, loss = 0.000224216\nI0819 01:32:59.159044 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:32:59.159060 17344 solver.cpp:244]     Train net output #1: loss = 0.000224005 (* 1 = 0.000224005 loss)\nI0819 01:32:59.264376 17344 sgd_solver.cpp:166] Iteration 54200, lr = 0.00035\nI0819 01:35:16.526828 17344 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 01:36:36.905385 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35148\nI0819 01:36:36.905706 17344 solver.cpp:404]     Test net output #1: loss = 2.1861 (* 1 = 2.1861 loss)\nI0819 01:36:38.210809 17344 solver.cpp:228] Iteration 54300, loss = 0.000287435\nI0819 01:36:38.210853 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:36:38.210870 17344 solver.cpp:244]     Train net output #1: loss = 0.000287224 (* 1 = 0.000287224 loss)\nI0819 01:36:38.315209 17344 sgd_solver.cpp:166] Iteration 54300, lr = 0.00035\nI0819 01:38:55.464308 17344 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 01:40:15.836226 17344 solver.cpp:404]     Test net output #0: accuracy = 0.38504\nI0819 01:40:15.836525 17344 solver.cpp:404]     Test net output #1: loss = 2.00114 (* 1 = 2.00114 loss)\nI0819 01:40:17.141357 17344 solver.cpp:228] Iteration 54400, loss = 0.040099\nI0819 01:40:17.141400 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:40:17.141417 17344 solver.cpp:244]     Train net output #1: loss = 0.0400988 (* 1 = 0.0400988 loss)\nI0819 01:40:17.240818 17344 sgd_solver.cpp:166] Iteration 54400, lr = 0.00035\nI0819 01:42:34.453263 17344 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 01:43:54.843801 17344 solver.cpp:404]     Test net output #0: accuracy = 0.44628\nI0819 01:43:54.844123 17344 solver.cpp:404]     Test net output #1: loss = 1.69765 (* 1 = 1.69765 loss)\nI0819 01:43:56.149054 17344 solver.cpp:228] Iteration 54500, loss = 0.00266784\nI0819 01:43:56.149098 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:43:56.149114 17344 solver.cpp:244]     Train net output #1: loss = 0.00266763 (* 1 = 0.00266763 loss)\nI0819 01:43:56.243640 17344 sgd_solver.cpp:166] Iteration 54500, lr = 0.00035\nI0819 01:46:13.468055 17344 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 01:47:33.839166 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4454\nI0819 01:47:33.839491 17344 solver.cpp:404]     Test net output #1: loss = 1.72307 (* 1 = 1.72307 loss)\nI0819 01:47:35.144691 17344 solver.cpp:228] Iteration 54600, loss = 0.0017804\nI0819 01:47:35.144742 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:47:35.144758 17344 solver.cpp:244]     Train net output #1: loss = 0.00178019 (* 1 = 0.00178019 loss)\nI0819 01:47:35.240317 17344 sgd_solver.cpp:166] Iteration 54600, lr = 0.00035\nI0819 01:49:52.445878 17344 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 01:51:12.820798 17344 solver.cpp:404]     Test net output #0: accuracy = 0.46416\nI0819 01:51:12.821125 17344 solver.cpp:404]     Test net output #1: loss = 1.74292 (* 1 = 1.74292 loss)\nI0819 01:51:14.126375 17344 solver.cpp:228] Iteration 54700, loss = 0.000315893\nI0819 01:51:14.126420 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:51:14.126436 17344 solver.cpp:244]     Train net output #1: loss = 0.000315685 (* 1 = 0.000315685 loss)\nI0819 01:51:14.224279 17344 sgd_solver.cpp:166] Iteration 54700, lr = 0.00035\nI0819 01:53:31.409252 17344 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 01:54:51.788367 17344 solver.cpp:404]     Test net output #0: accuracy = 0.49164\nI0819 01:54:51.788689 17344 solver.cpp:404]     Test net output #1: loss = 1.68249 (* 1 = 1.68249 loss)\nI0819 01:54:53.093564 17344 solver.cpp:228] Iteration 54800, loss = 0.000409401\nI0819 01:54:53.093610 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:54:53.093626 17344 solver.cpp:244]     Train net output #1: loss = 0.000409194 (* 1 = 0.000409194 loss)\nI0819 01:54:53.196071 17344 sgd_solver.cpp:166] Iteration 54800, lr = 0.00035\nI0819 01:57:10.437997 17344 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0819 01:58:30.812989 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5652\nI0819 01:58:30.813302 17344 solver.cpp:404]     Test net output #1: loss = 1.41978 (* 1 = 1.41978 loss)\nI0819 01:58:32.117947 17344 solver.cpp:228] Iteration 54900, loss = 0.000253046\nI0819 01:58:32.117992 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:58:32.118008 17344 solver.cpp:244]     Train net output #1: loss = 0.000252839 (* 1 = 0.000252839 loss)\nI0819 01:58:32.216989 17344 sgd_solver.cpp:166] Iteration 54900, lr = 0.00035\nI0819 02:00:49.361321 17344 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0819 02:02:09.738330 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5852\nI0819 02:02:09.738649 17344 solver.cpp:404]     Test net output #1: loss = 1.38332 (* 1 = 1.38332 loss)\nI0819 02:02:11.043658 17344 solver.cpp:228] Iteration 55000, loss = 0.000214797\nI0819 02:02:11.043704 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:02:11.043723 17344 solver.cpp:244]     Train net output #1: loss = 0.00021459 (* 1 = 0.00021459 loss)\nI0819 02:02:11.143172 17344 sgd_solver.cpp:166] Iteration 55000, lr = 0.00035\nI0819 02:04:28.455492 17344 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0819 02:05:48.837030 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6148\nI0819 02:05:48.837357 17344 solver.cpp:404]     Test net output #1: loss = 1.29941 (* 1 = 1.29941 loss)\nI0819 02:05:50.142114 17344 solver.cpp:228] Iteration 55100, loss = 0.000183816\nI0819 02:05:50.142159 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:05:50.142175 17344 solver.cpp:244]     Train net output #1: loss = 0.000183609 (* 1 = 0.000183609 loss)\nI0819 02:05:50.237689 17344 sgd_solver.cpp:166] Iteration 55100, lr = 0.00035\nI0819 02:08:07.437355 17344 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0819 02:09:27.811300 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63224\nI0819 02:09:27.811627 17344 solver.cpp:404]     Test net output #1: loss = 1.26227 (* 1 = 1.26227 loss)\nI0819 02:09:29.116636 17344 solver.cpp:228] Iteration 55200, loss = 0.00021005\nI0819 02:09:29.116680 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:09:29.116696 17344 solver.cpp:244]     Train net output #1: loss = 0.000209843 (* 1 = 0.000209843 loss)\nI0819 02:09:29.210871 17344 sgd_solver.cpp:166] Iteration 55200, lr = 0.00035\nI0819 02:11:46.381772 17344 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0819 02:13:06.762930 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65068\nI0819 02:13:06.763257 17344 solver.cpp:404]     Test net output #1: loss = 1.22894 (* 1 = 1.22894 loss)\nI0819 02:13:08.068246 17344 solver.cpp:228] Iteration 55300, loss = 0.000203165\nI0819 02:13:08.068294 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:13:08.068310 17344 solver.cpp:244]     Train net output #1: loss = 0.000202958 (* 1 = 0.000202958 loss)\nI0819 02:13:08.163643 17344 sgd_solver.cpp:166] Iteration 55300, lr = 0.00035\nI0819 02:15:25.466994 17344 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0819 02:16:45.836318 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65868\nI0819 02:16:45.836633 17344 solver.cpp:404]     Test net output #1: loss = 1.21823 (* 1 = 1.21823 loss)\nI0819 02:16:47.141288 17344 solver.cpp:228] Iteration 55400, loss = 0.000188232\nI0819 02:16:47.141330 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:16:47.141346 17344 solver.cpp:244]     Train net output #1: loss = 0.000188025 (* 1 = 0.000188025 loss)\nI0819 02:16:47.234689 17344 sgd_solver.cpp:166] Iteration 55400, lr = 0.00035\nI0819 02:19:04.355818 17344 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0819 02:20:24.722014 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66928\nI0819 02:20:24.722316 17344 solver.cpp:404]     Test net output #1: loss = 1.20225 (* 1 = 1.20225 loss)\nI0819 02:20:26.026926 17344 solver.cpp:228] Iteration 55500, loss = 0.000177568\nI0819 02:20:26.026968 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:20:26.026984 17344 solver.cpp:244]     Train net output #1: loss = 0.000177361 (* 1 = 0.000177361 loss)\nI0819 02:20:26.121445 17344 sgd_solver.cpp:166] Iteration 55500, lr = 0.00035\nI0819 02:22:43.314018 17344 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0819 02:24:03.687266 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67648\nI0819 02:24:03.687583 17344 solver.cpp:404]     Test net output #1: loss = 1.19052 (* 1 = 1.19052 loss)\nI0819 02:24:04.992552 17344 solver.cpp:228] Iteration 55600, loss = 0.000166573\nI0819 02:24:04.992594 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:24:04.992611 17344 solver.cpp:244]     Train net output #1: loss = 0.000166366 (* 1 = 0.000166366 loss)\nI0819 02:24:05.084461 17344 sgd_solver.cpp:166] Iteration 55600, lr = 0.00035\nI0819 02:26:22.360698 17344 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0819 02:27:42.733119 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6756\nI0819 02:27:42.733444 17344 solver.cpp:404]     Test net output #1: loss = 1.22747 (* 1 = 1.22747 loss)\nI0819 02:27:44.038493 17344 solver.cpp:228] Iteration 55700, loss = 0.000129183\nI0819 02:27:44.038537 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:27:44.038553 17344 solver.cpp:244]     Train net output #1: loss = 0.000128976 (* 1 = 0.000128976 loss)\nI0819 02:27:44.133169 17344 sgd_solver.cpp:166] Iteration 55700, lr = 0.00035\nI0819 02:30:01.273475 17344 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0819 02:31:21.651757 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67756\nI0819 02:31:21.652081 17344 solver.cpp:404]     Test net output #1: loss = 1.2295 (* 1 = 1.2295 loss)\nI0819 02:31:22.957136 17344 solver.cpp:228] Iteration 55800, loss = 0.000180423\nI0819 02:31:22.957178 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:31:22.957195 17344 solver.cpp:244]     Train net output #1: loss = 0.000180216 (* 1 = 0.000180216 loss)\nI0819 02:31:23.050560 17344 sgd_solver.cpp:166] Iteration 55800, lr = 0.00035\nI0819 02:33:40.173373 17344 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0819 02:35:00.546322 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66688\nI0819 02:35:00.546651 17344 solver.cpp:404]     Test net output #1: loss = 1.29837 (* 1 = 1.29837 loss)\nI0819 02:35:01.851196 17344 solver.cpp:228] Iteration 55900, loss = 0.000133692\nI0819 02:35:01.851235 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:35:01.851251 17344 solver.cpp:244]     Train net output #1: loss = 0.000133485 (* 1 = 0.000133485 loss)\nI0819 02:35:01.949225 17344 sgd_solver.cpp:166] Iteration 55900, lr = 0.00035\nI0819 02:37:19.067312 17344 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0819 02:38:39.441516 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65468\nI0819 02:38:39.441892 17344 solver.cpp:404]     Test net output #1: loss = 1.36721 (* 1 = 1.36721 loss)\nI0819 02:38:40.746640 17344 solver.cpp:228] Iteration 56000, loss = 0.000135895\nI0819 02:38:40.746681 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:38:40.746697 17344 solver.cpp:244]     Train net output #1: loss = 0.000135688 (* 1 = 0.000135688 loss)\nI0819 02:38:40.843170 17344 sgd_solver.cpp:166] Iteration 56000, lr = 0.00035\nI0819 02:40:58.068765 17344 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0819 02:42:18.448774 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64352\nI0819 02:42:18.449098 17344 solver.cpp:404]     Test net output #1: loss = 1.43683 (* 1 = 1.43683 loss)\nI0819 02:42:19.754272 17344 solver.cpp:228] Iteration 56100, loss = 0.000144808\nI0819 02:42:19.754317 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:42:19.754333 17344 solver.cpp:244]     Train net output #1: loss = 0.000144601 (* 1 = 0.000144601 loss)\nI0819 02:42:19.845089 17344 sgd_solver.cpp:166] Iteration 56100, lr = 0.00035\nI0819 02:44:36.919399 17344 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0819 02:45:57.292834 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6272\nI0819 02:45:57.293162 17344 solver.cpp:404]     Test net output #1: loss = 1.51343 (* 1 = 1.51343 loss)\nI0819 02:45:58.598191 17344 solver.cpp:228] Iteration 56200, loss = 0.0001716\nI0819 02:45:58.598232 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:45:58.598246 17344 solver.cpp:244]     Train net output #1: loss = 0.000171393 (* 1 = 0.000171393 loss)\nI0819 02:45:58.696240 17344 sgd_solver.cpp:166] Iteration 56200, lr = 0.00035\nI0819 02:48:15.864534 17344 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0819 02:49:36.136977 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61364\nI0819 02:49:36.137300 17344 solver.cpp:404]     Test net output #1: loss = 1.59046 (* 1 = 1.59046 loss)\nI0819 02:49:37.441741 17344 solver.cpp:228] Iteration 56300, loss = 0.000162197\nI0819 02:49:37.441787 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:49:37.441802 17344 solver.cpp:244]     Train net output #1: loss = 0.00016199 (* 1 = 0.00016199 loss)\nI0819 02:49:37.538566 17344 sgd_solver.cpp:166] Iteration 56300, lr = 0.00035\nI0819 02:51:54.733410 17344 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0819 02:53:15.003970 17344 solver.cpp:404]     Test net output #0: accuracy = 0.60252\nI0819 02:53:15.004287 17344 solver.cpp:404]     Test net output #1: loss = 1.60034 (* 1 = 1.60034 loss)\nI0819 02:53:16.308940 17344 solver.cpp:228] Iteration 56400, loss = 0.000163154\nI0819 02:53:16.308986 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:53:16.309002 17344 solver.cpp:244]     Train net output #1: loss = 0.000162947 (* 1 = 0.000162947 loss)\nI0819 02:53:16.400979 17344 sgd_solver.cpp:166] Iteration 56400, lr = 0.00035\nI0819 02:55:33.575109 17344 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0819 02:56:53.855137 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58332\nI0819 02:56:53.855448 17344 solver.cpp:404]     Test net output #1: loss = 1.66864 (* 1 = 1.66864 loss)\nI0819 02:56:55.159970 17344 solver.cpp:228] Iteration 56500, loss = 0.000181485\nI0819 02:56:55.160015 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:56:55.160032 17344 solver.cpp:244]     Train net output #1: loss = 0.000181278 (* 1 = 0.000181278 loss)\nI0819 02:56:55.257153 17344 sgd_solver.cpp:166] Iteration 56500, lr = 0.00035\nI0819 02:59:12.401707 17344 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0819 03:00:32.695101 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57488\nI0819 03:00:32.695415 17344 solver.cpp:404]     Test net output #1: loss = 1.62735 (* 1 = 1.62735 loss)\nI0819 03:00:34.000622 17344 solver.cpp:228] Iteration 56600, loss = 0.000193888\nI0819 03:00:34.000666 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:00:34.000682 17344 solver.cpp:244]     Train net output #1: loss = 0.000193681 (* 1 = 0.000193681 loss)\nI0819 03:00:34.096550 17344 sgd_solver.cpp:166] Iteration 56600, lr = 0.00035\nI0819 03:02:51.333734 17344 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0819 03:04:11.599622 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5334\nI0819 03:04:11.599959 17344 solver.cpp:404]     Test net output #1: loss = 1.84773 (* 1 = 1.84773 loss)\nI0819 03:04:12.904806 17344 solver.cpp:228] Iteration 56700, loss = 0.000196015\nI0819 03:04:12.904850 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:04:12.904866 17344 solver.cpp:244]     Train net output #1: loss = 0.000195808 (* 1 = 0.000195808 loss)\nI0819 03:04:13.000785 17344 sgd_solver.cpp:166] Iteration 56700, lr = 0.00035\nI0819 03:06:30.353190 17344 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0819 03:07:50.642665 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50952\nI0819 03:07:50.643023 17344 solver.cpp:404]     Test net output #1: loss = 1.87224 (* 1 = 1.87224 loss)\nI0819 03:07:51.947741 17344 solver.cpp:228] Iteration 56800, loss = 0.000160412\nI0819 03:07:51.947785 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:07:51.947801 17344 solver.cpp:244]     Train net output #1: loss = 0.000160205 (* 1 = 0.000160205 loss)\nI0819 03:07:52.046746 17344 sgd_solver.cpp:166] Iteration 56800, lr = 0.00035\nI0819 03:10:09.166520 17344 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0819 03:11:29.463634 17344 solver.cpp:404]     Test net output #0: accuracy = 0.46872\nI0819 03:11:29.463976 17344 solver.cpp:404]     Test net output #1: loss = 2.0443 (* 1 = 2.0443 loss)\nI0819 03:11:30.768966 17344 solver.cpp:228] Iteration 56900, loss = 0.000135286\nI0819 03:11:30.769009 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:11:30.769026 17344 solver.cpp:244]     Train net output #1: loss = 0.000135079 (* 1 = 0.000135079 loss)\nI0819 03:11:30.862352 17344 sgd_solver.cpp:166] Iteration 56900, lr = 0.00035\nI0819 03:13:48.120244 17344 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0819 03:15:08.416465 17344 solver.cpp:404]     Test net output #0: accuracy = 0.42852\nI0819 03:15:08.416797 17344 solver.cpp:404]     Test net output #1: loss = 2.2282 (* 1 = 2.2282 loss)\nI0819 03:15:09.721681 17344 solver.cpp:228] Iteration 57000, loss = 0.000176193\nI0819 03:15:09.721731 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:15:09.721748 17344 solver.cpp:244]     Train net output #1: loss = 0.000175986 (* 1 = 0.000175986 loss)\nI0819 03:15:09.821377 17344 sgd_solver.cpp:166] Iteration 57000, lr = 0.00035\nI0819 03:17:26.973482 17344 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0819 03:18:47.268815 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37336\nI0819 03:18:47.269145 17344 solver.cpp:404]     Test net output #1: loss = 2.53799 (* 1 = 2.53799 loss)\nI0819 03:18:48.574985 17344 solver.cpp:228] Iteration 57100, loss = 0.000154007\nI0819 03:18:48.575031 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:18:48.575047 17344 solver.cpp:244]     Train net output #1: loss = 0.0001538 (* 1 = 0.0001538 loss)\nI0819 03:18:48.669088 17344 sgd_solver.cpp:166] Iteration 57100, lr = 0.00035\nI0819 03:21:05.916167 17344 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0819 03:22:26.296057 17344 solver.cpp:404]     Test net output #0: accuracy = 0.36344\nI0819 03:22:26.296329 17344 solver.cpp:404]     Test net output #1: loss = 2.41556 (* 1 = 2.41556 loss)\nI0819 03:22:27.602174 17344 solver.cpp:228] Iteration 57200, loss = 0.000142864\nI0819 03:22:27.602217 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:22:27.602234 17344 solver.cpp:244]     Train net output #1: loss = 0.000142657 (* 1 = 0.000142657 loss)\nI0819 03:22:27.705229 17344 sgd_solver.cpp:166] Iteration 57200, lr = 0.00035\nI0819 03:24:44.844310 17344 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0819 03:26:05.213698 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32172\nI0819 03:26:05.214013 17344 solver.cpp:404]     Test net output #1: loss = 2.61647 (* 1 = 2.61647 loss)\nI0819 03:26:06.518823 17344 solver.cpp:228] Iteration 57300, loss = 0.000209735\nI0819 03:26:06.518869 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:26:06.518885 17344 solver.cpp:244]     Train net output #1: loss = 0.000209528 (* 1 = 0.000209528 loss)\nI0819 03:26:06.614051 17344 sgd_solver.cpp:166] Iteration 57300, lr = 0.00035\nI0819 03:28:23.732049 17344 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0819 03:29:44.101117 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28004\nI0819 03:29:44.101425 17344 solver.cpp:404]     Test net output #1: loss = 2.90528 (* 1 = 2.90528 loss)\nI0819 03:29:45.406108 17344 solver.cpp:228] Iteration 57400, loss = 0.00017913\nI0819 03:29:45.406153 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:29:45.406170 17344 solver.cpp:244]     Train net output #1: loss = 0.000178923 (* 1 = 0.000178923 loss)\nI0819 03:29:45.501803 17344 sgd_solver.cpp:166] Iteration 57400, lr = 0.00035\nI0819 03:32:02.600354 17344 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0819 03:33:22.973763 17344 solver.cpp:404]     Test net output #0: accuracy = 0.25248\nI0819 03:33:22.974078 17344 solver.cpp:404]     Test net output #1: loss = 3.07575 (* 1 = 3.07575 loss)\nI0819 03:33:24.278920 17344 solver.cpp:228] Iteration 57500, loss = 0.000152456\nI0819 03:33:24.278964 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:33:24.278981 17344 solver.cpp:244]     Train net output #1: loss = 0.000152249 (* 1 = 0.000152249 loss)\nI0819 03:33:24.376106 17344 sgd_solver.cpp:166] Iteration 57500, lr = 0.00035\nI0819 03:35:41.489706 17344 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0819 03:37:01.869204 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30916\nI0819 03:37:01.869506 17344 solver.cpp:404]     Test net output #1: loss = 2.82925 (* 1 = 2.82925 loss)\nI0819 03:37:03.174531 17344 solver.cpp:228] Iteration 57600, loss = 0.0852431\nI0819 03:37:03.174574 17344 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:37:03.174592 17344 solver.cpp:244]     Train net output #1: loss = 0.0852429 (* 1 = 0.0852429 loss)\nI0819 03:37:03.267606 17344 sgd_solver.cpp:166] Iteration 57600, lr = 0.00035\nI0819 03:39:20.377565 17344 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0819 03:40:40.757771 17344 solver.cpp:404]     Test net output #0: accuracy = 0.35648\nI0819 03:40:40.758090 17344 solver.cpp:404]     Test net output #1: loss = 2.49295 (* 1 = 2.49295 loss)\nI0819 03:40:42.063680 17344 solver.cpp:228] Iteration 57700, loss = 0.0360064\nI0819 03:40:42.063730 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 03:40:42.063748 17344 solver.cpp:244]     Train net output #1: loss = 0.0360062 (* 1 = 0.0360062 loss)\nI0819 03:40:42.161217 17344 sgd_solver.cpp:166] Iteration 57700, lr = 0.00035\nI0819 03:42:59.284751 17344 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0819 03:44:19.662009 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37944\nI0819 03:44:19.662335 17344 solver.cpp:404]     Test net output #1: loss = 2.4944 (* 1 = 2.4944 loss)\nI0819 03:44:20.968145 17344 solver.cpp:228] Iteration 57800, loss = 0.0096662\nI0819 03:44:20.968190 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:44:20.968207 17344 solver.cpp:244]     Train net output #1: loss = 0.00966598 (* 1 = 0.00966598 loss)\nI0819 03:44:21.060256 17344 sgd_solver.cpp:166] Iteration 57800, lr = 0.00035\nI0819 03:46:38.198858 17344 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0819 03:47:58.581861 17344 solver.cpp:404]     Test net output #0: accuracy = 0.31296\nI0819 03:47:58.582175 17344 solver.cpp:404]     Test net output #1: loss = 2.99734 (* 1 = 2.99734 loss)\nI0819 03:47:59.888228 17344 solver.cpp:228] Iteration 57900, loss = 0.00254794\nI0819 03:47:59.888273 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:47:59.888290 17344 solver.cpp:244]     Train net output #1: loss = 0.00254773 (* 1 = 0.00254773 loss)\nI0819 03:47:59.978714 17344 sgd_solver.cpp:166] Iteration 57900, lr = 0.00035\nI0819 03:50:17.046172 17344 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0819 03:51:37.416200 17344 solver.cpp:404]     Test net output #0: accuracy = 0.44184\nI0819 03:51:37.416509 17344 solver.cpp:404]     Test net output #1: loss = 2.28571 (* 1 = 2.28571 loss)\nI0819 03:51:38.722800 17344 solver.cpp:228] Iteration 58000, loss = 0.00276962\nI0819 03:51:38.722846 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:51:38.722863 17344 solver.cpp:244]     Train net output #1: loss = 0.00276941 (* 1 = 0.00276941 loss)\nI0819 03:51:38.817366 17344 sgd_solver.cpp:166] Iteration 58000, lr = 0.00035\nI0819 03:53:55.977581 17344 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0819 03:55:16.345715 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50456\nI0819 03:55:16.346053 17344 solver.cpp:404]     Test net output #1: loss = 1.90941 (* 1 = 1.90941 loss)\nI0819 03:55:17.651715 17344 solver.cpp:228] Iteration 58100, loss = 0.000935294\nI0819 03:55:17.651767 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:55:17.651783 17344 solver.cpp:244]     Train net output #1: loss = 0.000935087 (* 1 = 0.000935087 loss)\nI0819 03:55:17.745419 17344 sgd_solver.cpp:166] Iteration 58100, lr = 0.00035\nI0819 03:57:34.883579 17344 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0819 03:58:55.264741 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5488\nI0819 03:58:55.265071 17344 solver.cpp:404]     Test net output #1: loss = 1.74365 (* 1 = 1.74365 loss)\nI0819 03:58:56.570804 17344 solver.cpp:228] Iteration 58200, loss = 0.000462412\nI0819 03:58:56.570849 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:58:56.570866 17344 solver.cpp:244]     Train net output #1: loss = 0.000462204 (* 1 = 0.000462204 loss)\nI0819 03:58:56.666642 17344 sgd_solver.cpp:166] Iteration 58200, lr = 0.00035\nI0819 04:01:13.840967 17344 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0819 04:02:34.232074 17344 solver.cpp:404]     Test net output #0: accuracy = 0.61984\nI0819 04:02:34.232394 17344 solver.cpp:404]     Test net output #1: loss = 1.4979 (* 1 = 1.4979 loss)\nI0819 04:02:35.539010 17344 solver.cpp:228] Iteration 58300, loss = 0.000432206\nI0819 04:02:35.539057 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:02:35.539072 17344 solver.cpp:244]     Train net output #1: loss = 0.000431998 (* 1 = 0.000431998 loss)\nI0819 04:02:35.632505 17344 sgd_solver.cpp:166] Iteration 58300, lr = 0.00035\nI0819 04:04:52.554769 17344 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0819 04:06:12.940779 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64912\nI0819 04:06:12.941084 17344 solver.cpp:404]     Test net output #1: loss = 1.38825 (* 1 = 1.38825 loss)\nI0819 04:06:14.246846 17344 solver.cpp:228] Iteration 58400, loss = 0.000276776\nI0819 04:06:14.246892 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:06:14.246908 17344 solver.cpp:244]     Train net output #1: loss = 0.000276568 (* 1 = 0.000276568 loss)\nI0819 04:06:14.342154 17344 sgd_solver.cpp:166] Iteration 58400, lr = 0.00035\nI0819 04:08:31.184962 17344 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0819 04:09:51.571218 17344 solver.cpp:404]     Test net output #0: accuracy = 0.685\nI0819 04:09:51.571522 17344 solver.cpp:404]     Test net output #1: loss = 1.23989 (* 1 = 1.23989 loss)\nI0819 04:09:52.877552 17344 solver.cpp:228] Iteration 58500, loss = 0.000364882\nI0819 04:09:52.877599 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:09:52.877614 17344 solver.cpp:244]     Train net output #1: loss = 0.000364675 (* 1 = 0.000364675 loss)\nI0819 04:09:52.972414 17344 sgd_solver.cpp:166] Iteration 58500, lr = 0.00035\nI0819 04:12:09.713215 17344 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0819 04:13:30.087236 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71972\nI0819 04:13:30.087545 17344 solver.cpp:404]     Test net output #1: loss = 1.10646 (* 1 = 1.10646 loss)\nI0819 04:13:31.393724 17344 solver.cpp:228] Iteration 58600, loss = 0.000299722\nI0819 04:13:31.393769 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:13:31.393784 17344 solver.cpp:244]     Train net output #1: loss = 0.000299514 (* 1 = 0.000299514 loss)\nI0819 04:13:31.490896 17344 sgd_solver.cpp:166] Iteration 58600, lr = 0.00035\nI0819 04:15:48.358521 17344 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0819 04:17:08.751356 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74544\nI0819 04:17:08.751637 17344 solver.cpp:404]     Test net output #1: loss = 1.00952 (* 1 = 1.00952 loss)\nI0819 04:17:10.056818 17344 solver.cpp:228] Iteration 58700, loss = 0.000357009\nI0819 04:17:10.056861 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:17:10.056877 17344 solver.cpp:244]     Train net output #1: loss = 0.000356801 (* 1 = 0.000356801 loss)\nI0819 04:17:10.149672 17344 sgd_solver.cpp:166] Iteration 58700, lr = 0.00035\nI0819 04:19:26.982087 17344 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0819 04:20:47.368365 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76288\nI0819 04:20:47.368670 17344 solver.cpp:404]     Test net output #1: loss = 0.938237 (* 1 = 0.938237 loss)\nI0819 04:20:48.673193 17344 solver.cpp:228] Iteration 58800, loss = 0.00023031\nI0819 04:20:48.673238 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:20:48.673254 17344 solver.cpp:244]     Train net output #1: loss = 0.000230103 (* 1 = 0.000230103 loss)\nI0819 04:20:48.769328 17344 sgd_solver.cpp:166] Iteration 58800, lr = 0.00035\nI0819 04:23:05.480548 17344 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0819 04:24:25.870175 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77736\nI0819 04:24:25.870456 17344 solver.cpp:404]     Test net output #1: loss = 0.886478 (* 1 = 0.886478 loss)\nI0819 04:24:27.176455 17344 solver.cpp:228] Iteration 58900, loss = 0.000241755\nI0819 04:24:27.176501 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:24:27.176517 17344 solver.cpp:244]     Train net output #1: loss = 0.000241548 (* 1 = 0.000241548 loss)\nI0819 04:24:27.268642 17344 sgd_solver.cpp:166] Iteration 58900, lr = 0.00035\nI0819 04:26:44.173655 17344 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0819 04:28:04.558965 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78948\nI0819 04:28:04.559273 17344 solver.cpp:404]     Test net output #1: loss = 0.837232 (* 1 = 0.837232 loss)\nI0819 04:28:05.864380 17344 solver.cpp:228] Iteration 59000, loss = 0.000197448\nI0819 04:28:05.864424 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:28:05.864440 17344 solver.cpp:244]     Train net output #1: loss = 0.000197241 (* 1 = 0.000197241 loss)\nI0819 04:28:05.959537 17344 sgd_solver.cpp:166] Iteration 59000, lr = 0.00035\nI0819 04:30:22.845973 17344 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0819 04:31:43.233692 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80188\nI0819 04:31:43.234014 17344 solver.cpp:404]     Test net output #1: loss = 0.793415 (* 1 = 0.793415 loss)\nI0819 04:31:44.539047 17344 solver.cpp:228] Iteration 59100, loss = 0.000215078\nI0819 04:31:44.539090 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:31:44.539106 17344 solver.cpp:244]     Train net output #1: loss = 0.000214871 (* 1 = 0.000214871 loss)\nI0819 04:31:44.632663 17344 sgd_solver.cpp:166] Iteration 59100, lr = 0.00035\nI0819 04:34:01.614841 17344 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0819 04:35:21.996557 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81188\nI0819 04:35:21.996860 17344 solver.cpp:404]     Test net output #1: loss = 0.755583 (* 1 = 0.755583 loss)\nI0819 04:35:23.303272 17344 solver.cpp:228] Iteration 59200, loss = 0.000276375\nI0819 04:35:23.303316 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:35:23.303333 17344 solver.cpp:244]     Train net output #1: loss = 0.000276167 (* 1 = 0.000276167 loss)\nI0819 04:35:23.400552 17344 sgd_solver.cpp:166] Iteration 59200, lr = 0.00035\nI0819 04:37:40.221838 17344 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0819 04:39:01.654670 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82136\nI0819 04:39:01.655035 17344 solver.cpp:404]     Test net output #1: loss = 0.718992 (* 1 = 0.718992 loss)\nI0819 04:39:02.965735 17344 solver.cpp:228] Iteration 59300, loss = 0.000194282\nI0819 04:39:02.965778 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:39:02.965795 17344 solver.cpp:244]     Train net output #1: loss = 0.000194075 (* 1 = 0.000194075 loss)\nI0819 04:39:03.058784 17344 sgd_solver.cpp:166] Iteration 59300, lr = 0.00035\nI0819 04:41:20.099047 17344 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0819 04:42:41.504112 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8228\nI0819 04:42:41.504485 17344 solver.cpp:404]     Test net output #1: loss = 0.711104 (* 1 = 0.711104 loss)\nI0819 04:42:42.813804 17344 solver.cpp:228] Iteration 59400, loss = 0.000199404\nI0819 04:42:42.813863 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:42:42.813881 17344 solver.cpp:244]     Train net output #1: loss = 0.000199197 (* 1 = 0.000199197 loss)\nI0819 04:42:42.909664 17344 sgd_solver.cpp:166] Iteration 59400, lr = 0.00035\nI0819 04:45:00.041110 17344 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0819 04:46:21.444705 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82552\nI0819 04:46:21.445086 17344 solver.cpp:404]     Test net output #1: loss = 0.706426 (* 1 = 0.706426 loss)\nI0819 04:46:22.756777 17344 solver.cpp:228] Iteration 59500, loss = 0.00020586\nI0819 04:46:22.756835 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:46:22.756853 17344 solver.cpp:244]     Train net output #1: loss = 0.000205653 (* 1 = 0.000205653 loss)\nI0819 04:46:22.846534 17344 sgd_solver.cpp:166] Iteration 59500, lr = 0.00035\nI0819 04:48:39.853396 17344 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0819 04:50:01.263118 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8298\nI0819 04:50:01.263455 17344 solver.cpp:404]     Test net output #1: loss = 0.688488 (* 1 = 0.688488 loss)\nI0819 04:50:02.574535 17344 solver.cpp:228] Iteration 59600, loss = 0.000183454\nI0819 04:50:02.574594 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:50:02.574611 17344 solver.cpp:244]     Train net output #1: loss = 0.000183247 (* 1 = 0.000183247 loss)\nI0819 04:50:02.669574 17344 sgd_solver.cpp:166] Iteration 59600, lr = 0.00035\nI0819 04:52:19.794165 17344 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0819 04:53:41.201504 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82784\nI0819 04:53:41.201880 17344 solver.cpp:404]     Test net output #1: loss = 0.702831 (* 1 = 0.702831 loss)\nI0819 04:53:42.513196 17344 solver.cpp:228] Iteration 59700, loss = 0.000170212\nI0819 04:53:42.513257 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:53:42.513274 17344 solver.cpp:244]     Train net output #1: loss = 0.000170004 (* 1 = 0.000170004 loss)\nI0819 04:53:42.608769 17344 sgd_solver.cpp:166] Iteration 59700, lr = 0.00035\nI0819 04:55:59.679131 17344 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0819 04:57:21.082299 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82792\nI0819 04:57:21.082680 17344 solver.cpp:404]     Test net output #1: loss = 0.692824 (* 1 = 0.692824 loss)\nI0819 04:57:22.394528 17344 solver.cpp:228] Iteration 59800, loss = 0.000200363\nI0819 04:57:22.394587 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:57:22.394606 17344 solver.cpp:244]     Train net output #1: loss = 0.000200156 (* 1 = 0.000200156 loss)\nI0819 04:57:22.490851 17344 sgd_solver.cpp:166] Iteration 59800, lr = 0.00035\nI0819 04:59:39.643198 17344 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0819 05:01:01.056844 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82864\nI0819 05:01:01.057216 17344 solver.cpp:404]     Test net output #1: loss = 0.698028 (* 1 = 0.698028 loss)\nI0819 05:01:02.369055 17344 solver.cpp:228] Iteration 59900, loss = 0.000181626\nI0819 05:01:02.369108 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:01:02.369127 17344 solver.cpp:244]     Train net output #1: loss = 0.000181419 (* 1 = 0.000181419 loss)\nI0819 05:01:02.460850 17344 sgd_solver.cpp:166] Iteration 59900, lr = 0.00035\nI0819 05:03:19.708638 17344 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0819 05:04:41.141258 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82732\nI0819 05:04:41.141639 17344 solver.cpp:404]     Test net output #1: loss = 0.69221 (* 1 = 0.69221 loss)\nI0819 05:04:42.454486 17344 solver.cpp:228] Iteration 60000, loss = 0.000194092\nI0819 05:04:42.454547 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:04:42.454566 17344 solver.cpp:244]     Train net output #1: loss = 0.000193885 (* 1 = 0.000193885 loss)\nI0819 05:04:42.543795 17344 sgd_solver.cpp:166] Iteration 60000, lr = 0.00035\nI0819 05:06:59.568073 17344 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0819 05:08:21.001793 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82092\nI0819 05:08:21.002179 17344 solver.cpp:404]     Test net output #1: loss = 0.72089 (* 1 = 0.72089 loss)\nI0819 05:08:22.315251 17344 solver.cpp:228] Iteration 60100, loss = 0.000175454\nI0819 05:08:22.315311 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:08:22.315330 17344 solver.cpp:244]     Train net output #1: loss = 0.000175247 (* 1 = 0.000175247 loss)\nI0819 05:08:22.406546 17344 sgd_solver.cpp:166] Iteration 60100, lr = 0.00035\nI0819 05:10:39.410851 17344 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0819 05:12:00.834974 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81268\nI0819 05:12:00.835347 17344 solver.cpp:404]     Test net output #1: loss = 0.736118 (* 1 = 0.736118 loss)\nI0819 05:12:02.148370 17344 solver.cpp:228] Iteration 60200, loss = 0.000170266\nI0819 05:12:02.148423 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:12:02.148440 17344 solver.cpp:244]     Train net output #1: loss = 0.000170058 (* 1 = 0.000170058 loss)\nI0819 05:12:02.243546 17344 sgd_solver.cpp:166] Iteration 60200, lr = 0.00035\nI0819 05:14:19.359488 17344 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0819 05:15:40.714823 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8212\nI0819 05:15:40.715082 17344 solver.cpp:404]     Test net output #1: loss = 0.716617 (* 1 = 0.716617 loss)\nI0819 05:15:42.027884 17344 solver.cpp:228] Iteration 60300, loss = 0.000200683\nI0819 05:15:42.027947 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:15:42.027966 17344 solver.cpp:244]     Train net output #1: loss = 0.000200475 (* 1 = 0.000200475 loss)\nI0819 05:15:42.116423 17344 sgd_solver.cpp:166] Iteration 60300, lr = 0.00035\nI0819 05:17:59.184340 17344 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0819 05:19:20.081194 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80896\nI0819 05:19:20.081473 17344 solver.cpp:404]     Test net output #1: loss = 0.754713 (* 1 = 0.754713 loss)\nI0819 05:19:21.394161 17344 solver.cpp:228] Iteration 60400, loss = 0.000178456\nI0819 05:19:21.394222 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:19:21.394239 17344 solver.cpp:244]     Train net output #1: loss = 0.000178249 (* 1 = 0.000178249 loss)\nI0819 05:19:21.490417 17344 sgd_solver.cpp:166] Iteration 60400, lr = 0.00035\nI0819 05:21:38.544896 17344 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0819 05:22:59.843266 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79476\nI0819 05:22:59.843564 17344 solver.cpp:404]     Test net output #1: loss = 0.801385 (* 1 = 0.801385 loss)\nI0819 05:23:01.155807 17344 solver.cpp:228] Iteration 60500, loss = 0.000139025\nI0819 05:23:01.155865 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:23:01.155884 17344 solver.cpp:244]     Train net output #1: loss = 0.000138817 (* 1 = 0.000138817 loss)\nI0819 05:23:01.250030 17344 sgd_solver.cpp:166] Iteration 60500, lr = 0.00035\nI0819 05:25:18.454262 17344 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0819 05:26:39.812440 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78144\nI0819 05:26:39.812706 17344 solver.cpp:404]     Test net output #1: loss = 0.848595 (* 1 = 0.848595 loss)\nI0819 05:26:41.125387 17344 solver.cpp:228] Iteration 60600, loss = 0.000168668\nI0819 05:26:41.125447 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:26:41.125464 17344 solver.cpp:244]     Train net output #1: loss = 0.000168461 (* 1 = 0.000168461 loss)\nI0819 05:26:41.221751 17344 sgd_solver.cpp:166] Iteration 60600, lr = 0.00035\nI0819 05:28:58.851547 17344 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0819 05:30:20.231570 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78656\nI0819 05:30:20.231891 17344 solver.cpp:404]     Test net output #1: loss = 0.819112 (* 1 = 0.819112 loss)\nI0819 05:30:21.546460 17344 solver.cpp:228] Iteration 60700, loss = 0.000191581\nI0819 05:30:21.546519 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:30:21.546536 17344 solver.cpp:244]     Train net output #1: loss = 0.000191374 (* 1 = 0.000191374 loss)\nI0819 05:30:21.636431 17344 sgd_solver.cpp:166] Iteration 60700, lr = 0.00035\nI0819 05:32:39.259414 17344 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0819 05:34:00.657613 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76288\nI0819 05:34:00.657899 17344 solver.cpp:404]     Test net output #1: loss = 0.899474 (* 1 = 0.899474 loss)\nI0819 05:34:01.976354 17344 solver.cpp:228] Iteration 60800, loss = 0.000185372\nI0819 05:34:01.976410 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:34:01.976428 17344 solver.cpp:244]     Train net output #1: loss = 0.000185164 (* 1 = 0.000185164 loss)\nI0819 05:34:02.060523 17344 sgd_solver.cpp:166] Iteration 60800, lr = 0.00035\nI0819 05:36:20.485538 17344 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0819 05:37:41.904269 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73824\nI0819 05:37:41.904541 17344 solver.cpp:404]     Test net output #1: loss = 0.990067 (* 1 = 0.990067 loss)\nI0819 05:37:43.215580 17344 solver.cpp:228] Iteration 60900, loss = 0.000191767\nI0819 05:37:43.215625 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:37:43.215641 17344 solver.cpp:244]     Train net output #1: loss = 0.00019156 (* 1 = 0.00019156 loss)\nI0819 05:37:43.306768 17344 sgd_solver.cpp:166] Iteration 60900, lr = 0.00035\nI0819 05:40:00.959297 17344 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0819 05:41:22.325537 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72532\nI0819 05:41:22.325839 17344 solver.cpp:404]     Test net output #1: loss = 1.03205 (* 1 = 1.03205 loss)\nI0819 05:41:23.637485 17344 solver.cpp:228] Iteration 61000, loss = 0.000179387\nI0819 05:41:23.637529 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:41:23.637545 17344 solver.cpp:244]     Train net output #1: loss = 0.00017918 (* 1 = 0.00017918 loss)\nI0819 05:41:23.727794 17344 sgd_solver.cpp:166] Iteration 61000, lr = 0.00035\nI0819 05:43:41.309865 17344 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0819 05:45:02.710934 17344 solver.cpp:404]     Test net output #0: accuracy = 0.68876\nI0819 05:45:02.711215 17344 solver.cpp:404]     Test net output #1: loss = 1.15522 (* 1 = 1.15522 loss)\nI0819 05:45:04.022809 17344 solver.cpp:228] Iteration 61100, loss = 0.000156643\nI0819 05:45:04.022867 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:45:04.022886 17344 solver.cpp:244]     Train net output #1: loss = 0.000156436 (* 1 = 0.000156436 loss)\nI0819 05:45:04.115521 17344 sgd_solver.cpp:166] Iteration 61100, lr = 0.00035\nI0819 05:47:21.790951 17344 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0819 05:48:43.162389 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6578\nI0819 05:48:43.162668 17344 solver.cpp:404]     Test net output #1: loss = 1.25698 (* 1 = 1.25698 loss)\nI0819 05:48:44.474910 17344 solver.cpp:228] Iteration 61200, loss = 0.000156365\nI0819 05:48:44.474972 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:48:44.474988 17344 solver.cpp:244]     Train net output #1: loss = 0.000156157 (* 1 = 0.000156157 loss)\nI0819 05:48:44.564404 17344 sgd_solver.cpp:166] Iteration 61200, lr = 0.00035\nI0819 05:51:02.111557 17344 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0819 05:52:23.487890 17344 solver.cpp:404]     Test net output #0: accuracy = 0.56656\nI0819 05:52:23.488200 17344 solver.cpp:404]     Test net output #1: loss = 1.61979 (* 1 = 1.61979 loss)\nI0819 05:52:24.800858 17344 solver.cpp:228] Iteration 61300, loss = 0.000217049\nI0819 05:52:24.800916 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:52:24.800935 17344 solver.cpp:244]     Train net output #1: loss = 0.000216841 (* 1 = 0.000216841 loss)\nI0819 05:52:24.892982 17344 sgd_solver.cpp:166] Iteration 61300, lr = 0.00035\nI0819 05:54:42.399868 17344 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0819 05:56:03.783893 17344 solver.cpp:404]     Test net output #0: accuracy = 0.44824\nI0819 05:56:03.784180 17344 solver.cpp:404]     Test net output #1: loss = 2.20354 (* 1 = 2.20354 loss)\nI0819 05:56:05.097535 17344 solver.cpp:228] Iteration 61400, loss = 0.0248693\nI0819 05:56:05.097594 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 05:56:05.097611 17344 solver.cpp:244]     Train net output #1: loss = 0.0248691 (* 1 = 0.0248691 loss)\nI0819 05:56:05.188999 17344 sgd_solver.cpp:166] Iteration 61400, lr = 0.00035\nI0819 05:58:22.896162 17344 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0819 05:59:44.263443 17344 solver.cpp:404]     Test net output #0: accuracy = 0.47724\nI0819 05:59:44.263711 17344 solver.cpp:404]     Test net output #1: loss = 2.97566 (* 1 = 2.97566 loss)\nI0819 05:59:45.576313 17344 solver.cpp:228] Iteration 61500, loss = 0.0807196\nI0819 05:59:45.576376 17344 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:59:45.576395 17344 solver.cpp:244]     Train net output #1: loss = 0.0807194 (* 1 = 0.0807194 loss)\nI0819 05:59:45.670059 17344 sgd_solver.cpp:166] Iteration 61500, lr = 0.00035\nI0819 06:02:03.326681 17344 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0819 06:03:24.685858 17344 solver.cpp:404]     Test net output #0: accuracy = 0.45168\nI0819 06:03:24.686106 17344 solver.cpp:404]     Test net output #1: loss = 3.37157 (* 1 = 3.37157 loss)\nI0819 06:03:25.998898 17344 solver.cpp:228] Iteration 61600, loss = 0.0242272\nI0819 06:03:25.998960 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 06:03:25.998978 17344 solver.cpp:244]     Train net output #1: loss = 0.024227 (* 1 = 0.024227 loss)\nI0819 06:03:26.093144 17344 sgd_solver.cpp:166] Iteration 61600, lr = 0.00035\nI0819 06:05:43.647567 17344 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0819 06:07:04.891803 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50692\nI0819 06:07:04.892065 17344 solver.cpp:404]     Test net output #1: loss = 2.51036 (* 1 = 2.51036 loss)\nI0819 06:07:06.205237 17344 solver.cpp:228] Iteration 61700, loss = 0.00552918\nI0819 06:07:06.205301 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:07:06.205318 17344 solver.cpp:244]     Train net output #1: loss = 0.00552899 (* 1 = 0.00552899 loss)\nI0819 06:07:06.299685 17344 sgd_solver.cpp:166] Iteration 61700, lr = 0.00035\nI0819 06:09:24.063469 17344 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0819 06:10:45.444242 17344 solver.cpp:404]     Test net output #0: accuracy = 0.55164\nI0819 06:10:45.444543 17344 solver.cpp:404]     Test net output #1: loss = 2.27958 (* 1 = 2.27958 loss)\nI0819 06:10:46.757107 17344 solver.cpp:228] Iteration 61800, loss = 0.0050899\nI0819 06:10:46.757169 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:10:46.757185 17344 solver.cpp:244]     Train net output #1: loss = 0.00508972 (* 1 = 0.00508972 loss)\nI0819 06:10:46.845598 17344 sgd_solver.cpp:166] Iteration 61800, lr = 0.00035\nI0819 06:13:04.405679 17344 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0819 06:14:25.789911 17344 solver.cpp:404]     Test net output #0: accuracy = 0.629\nI0819 06:14:25.790196 17344 solver.cpp:404]     Test net output #1: loss = 1.69278 (* 1 = 1.69278 loss)\nI0819 06:14:27.102855 17344 solver.cpp:228] Iteration 61900, loss = 0.00182706\nI0819 06:14:27.102918 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:14:27.102936 17344 solver.cpp:244]     Train net output #1: loss = 0.00182688 (* 1 = 0.00182688 loss)\nI0819 06:14:27.195564 17344 sgd_solver.cpp:166] Iteration 61900, lr = 0.00035\nI0819 06:16:44.850360 17344 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0819 06:18:06.253504 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63228\nI0819 06:18:06.253865 17344 solver.cpp:404]     Test net output #1: loss = 1.72149 (* 1 = 1.72149 loss)\nI0819 06:18:07.565768 17344 solver.cpp:228] Iteration 62000, loss = 0.00242454\nI0819 06:18:07.565829 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:18:07.565847 17344 solver.cpp:244]     Train net output #1: loss = 0.00242437 (* 1 = 0.00242437 loss)\nI0819 06:18:07.656965 17344 sgd_solver.cpp:166] Iteration 62000, lr = 0.00035\nI0819 06:20:25.205380 17344 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0819 06:21:46.679363 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62388\nI0819 06:21:46.679735 17344 solver.cpp:404]     Test net output #1: loss = 1.81117 (* 1 = 1.81117 loss)\nI0819 06:21:47.992617 17344 solver.cpp:228] Iteration 62100, loss = 0.00104909\nI0819 06:21:47.992676 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:21:47.992696 17344 solver.cpp:244]     Train net output #1: loss = 0.00104891 (* 1 = 0.00104891 loss)\nI0819 06:21:48.087900 17344 sgd_solver.cpp:166] Iteration 62100, lr = 0.00035\nI0819 06:24:05.690188 17344 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0819 06:25:27.230312 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65052\nI0819 06:25:27.230696 17344 solver.cpp:404]     Test net output #1: loss = 1.6094 (* 1 = 1.6094 loss)\nI0819 06:25:28.542870 17344 solver.cpp:228] Iteration 62200, loss = 0.000337895\nI0819 06:25:28.542930 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:25:28.542948 17344 solver.cpp:244]     Train net output #1: loss = 0.000337716 (* 1 = 0.000337716 loss)\nI0819 06:25:28.636865 17344 sgd_solver.cpp:166] Iteration 62200, lr = 0.00035\nI0819 06:27:46.244892 17344 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0819 06:29:07.795900 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67616\nI0819 06:29:07.796303 17344 solver.cpp:404]     Test net output #1: loss = 1.44705 (* 1 = 1.44705 loss)\nI0819 06:29:09.108330 17344 solver.cpp:228] Iteration 62300, loss = 0.000380944\nI0819 06:29:09.108392 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:29:09.108410 17344 solver.cpp:244]     Train net output #1: loss = 0.000380765 (* 1 = 0.000380765 loss)\nI0819 06:29:09.202834 17344 sgd_solver.cpp:166] Iteration 62300, lr = 0.00035\nI0819 06:31:26.839953 17344 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0819 06:32:48.400776 17344 solver.cpp:404]     Test net output #0: accuracy = 0.71252\nI0819 06:32:48.401161 17344 solver.cpp:404]     Test net output #1: loss = 1.29406 (* 1 = 1.29406 loss)\nI0819 06:32:49.714231 17344 solver.cpp:228] Iteration 62400, loss = 0.000386833\nI0819 06:32:49.714290 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:32:49.714308 17344 solver.cpp:244]     Train net output #1: loss = 0.000386654 (* 1 = 0.000386654 loss)\nI0819 06:32:49.805028 17344 sgd_solver.cpp:166] Iteration 62400, lr = 0.00035\nI0819 06:35:07.482084 17344 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0819 06:36:28.966450 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74268\nI0819 06:36:28.966833 17344 solver.cpp:404]     Test net output #1: loss = 1.17874 (* 1 = 1.17874 loss)\nI0819 06:36:30.279664 17344 solver.cpp:228] Iteration 62500, loss = 0.00034233\nI0819 06:36:30.279724 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:36:30.279743 17344 solver.cpp:244]     Train net output #1: loss = 0.000342151 (* 1 = 0.000342151 loss)\nI0819 06:36:30.379001 17344 sgd_solver.cpp:166] Iteration 62500, lr = 0.00035\nI0819 06:38:47.989147 17344 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0819 06:40:09.412699 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77364\nI0819 06:40:09.413049 17344 solver.cpp:404]     Test net output #1: loss = 1.07519 (* 1 = 1.07519 loss)\nI0819 06:40:10.725234 17344 solver.cpp:228] Iteration 62600, loss = 0.000260665\nI0819 06:40:10.725294 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:40:10.725313 17344 solver.cpp:244]     Train net output #1: loss = 0.000260486 (* 1 = 0.000260486 loss)\nI0819 06:40:10.817234 17344 sgd_solver.cpp:166] Iteration 62600, lr = 0.00035\nI0819 06:42:28.450220 17344 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0819 06:43:49.875212 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79676\nI0819 06:43:49.875571 17344 solver.cpp:404]     Test net output #1: loss = 0.99443 (* 1 = 0.99443 loss)\nI0819 06:43:51.188823 17344 solver.cpp:228] Iteration 62700, loss = 0.000226149\nI0819 06:43:51.188885 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:43:51.188904 17344 solver.cpp:244]     Train net output #1: loss = 0.000225969 (* 1 = 0.000225969 loss)\nI0819 06:43:51.282683 17344 sgd_solver.cpp:166] Iteration 62700, lr = 0.00035\nI0819 06:46:08.983031 17344 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0819 06:47:30.442414 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81088\nI0819 06:47:30.442787 17344 solver.cpp:404]     Test net output #1: loss = 0.922869 (* 1 = 0.922869 loss)\nI0819 06:47:31.755318 17344 solver.cpp:228] Iteration 62800, loss = 0.0002472\nI0819 06:47:31.755378 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:47:31.755395 17344 solver.cpp:244]     Train net output #1: loss = 0.000247021 (* 1 = 0.000247021 loss)\nI0819 06:47:31.842978 17344 sgd_solver.cpp:166] Iteration 62800, lr = 0.00035\nI0819 06:49:49.597868 17344 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0819 06:51:11.014714 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80976\nI0819 06:51:11.015087 17344 solver.cpp:404]     Test net output #1: loss = 1.02256 (* 1 = 1.02256 loss)\nI0819 06:51:12.326596 17344 solver.cpp:228] Iteration 62900, loss = 0.000191671\nI0819 06:51:12.326654 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:51:12.326673 17344 solver.cpp:244]     Train net output #1: loss = 0.000191492 (* 1 = 0.000191492 loss)\nI0819 06:51:12.414886 17344 sgd_solver.cpp:166] Iteration 62900, lr = 0.00035\nI0819 06:53:30.007766 17344 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0819 06:54:51.455148 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82916\nI0819 06:54:51.455497 17344 solver.cpp:404]     Test net output #1: loss = 0.85935 (* 1 = 0.85935 loss)\nI0819 06:54:52.766711 17344 solver.cpp:228] Iteration 63000, loss = 0.000188036\nI0819 06:54:52.766769 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:54:52.766788 17344 solver.cpp:244]     Train net output #1: loss = 0.000187857 (* 1 = 0.000187857 loss)\nI0819 06:54:52.855108 17344 sgd_solver.cpp:166] Iteration 63000, lr = 0.00035\nI0819 06:57:10.518791 17344 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0819 06:58:31.942703 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83876\nI0819 06:58:31.943087 17344 solver.cpp:404]     Test net output #1: loss = 0.800175 (* 1 = 0.800175 loss)\nI0819 06:58:33.256285 17344 solver.cpp:228] Iteration 63100, loss = 0.000202818\nI0819 06:58:33.256342 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:58:33.256361 17344 solver.cpp:244]     Train net output #1: loss = 0.000202639 (* 1 = 0.000202639 loss)\nI0819 06:58:33.346035 17344 sgd_solver.cpp:166] Iteration 63100, lr = 0.00035\nI0819 07:00:50.967445 17344 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0819 07:02:12.380062 17344 solver.cpp:404]     Test net output #0: accuracy = 0.84764\nI0819 07:02:12.380415 17344 solver.cpp:404]     Test net output #1: loss = 0.750773 (* 1 = 0.750773 loss)\nI0819 07:02:13.693156 17344 solver.cpp:228] Iteration 63200, loss = 0.00017117\nI0819 07:02:13.693214 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:02:13.693233 17344 solver.cpp:244]     Train net output #1: loss = 0.000170991 (* 1 = 0.000170991 loss)\nI0819 07:02:13.784719 17344 sgd_solver.cpp:166] Iteration 63200, lr = 0.00035\nI0819 07:04:31.345387 17344 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0819 07:05:52.767527 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8518\nI0819 07:05:52.767912 17344 solver.cpp:404]     Test net output #1: loss = 0.727134 (* 1 = 0.727134 loss)\nI0819 07:05:54.080257 17344 solver.cpp:228] Iteration 63300, loss = 0.000183177\nI0819 07:05:54.080317 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:05:54.080335 17344 solver.cpp:244]     Train net output #1: loss = 0.000182998 (* 1 = 0.000182998 loss)\nI0819 07:05:54.172341 17344 sgd_solver.cpp:166] Iteration 63300, lr = 0.00035\nI0819 07:08:11.814415 17344 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0819 07:09:33.229782 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85292\nI0819 07:09:33.230060 17344 solver.cpp:404]     Test net output #1: loss = 0.721752 (* 1 = 0.721752 loss)\nI0819 07:09:34.542474 17344 solver.cpp:228] Iteration 63400, loss = 0.00019726\nI0819 07:09:34.542529 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:09:34.542547 17344 solver.cpp:244]     Train net output #1: loss = 0.000197081 (* 1 = 0.000197081 loss)\nI0819 07:09:34.631657 17344 sgd_solver.cpp:166] Iteration 63400, lr = 0.00035\nI0819 07:11:52.561367 17344 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0819 07:13:13.846460 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85972\nI0819 07:13:13.846802 17344 solver.cpp:404]     Test net output #1: loss = 0.688993 (* 1 = 0.688993 loss)\nI0819 07:13:15.159476 17344 solver.cpp:228] Iteration 63500, loss = 0.000199084\nI0819 07:13:15.159533 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:13:15.159550 17344 solver.cpp:244]     Train net output #1: loss = 0.000198905 (* 1 = 0.000198905 loss)\nI0819 07:13:15.247298 17344 sgd_solver.cpp:166] Iteration 63500, lr = 0.00035\nI0819 07:15:32.812846 17344 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0819 07:16:54.209215 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85996\nI0819 07:16:54.209508 17344 solver.cpp:404]     Test net output #1: loss = 0.682244 (* 1 = 0.682244 loss)\nI0819 07:16:55.522269 17344 solver.cpp:228] Iteration 63600, loss = 0.000181309\nI0819 07:16:55.522325 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:16:55.522343 17344 solver.cpp:244]     Train net output #1: loss = 0.00018113 (* 1 = 0.00018113 loss)\nI0819 07:16:55.615345 17344 sgd_solver.cpp:166] Iteration 63600, lr = 0.00035\nI0819 07:19:13.308485 17344 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0819 07:20:34.301618 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86332\nI0819 07:20:34.301946 17344 solver.cpp:404]     Test net output #1: loss = 0.661267 (* 1 = 0.661267 loss)\nI0819 07:20:35.613922 17344 solver.cpp:228] Iteration 63700, loss = 0.000180424\nI0819 07:20:35.613983 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:20:35.614008 17344 solver.cpp:244]     Train net output #1: loss = 0.000180245 (* 1 = 0.000180245 loss)\nI0819 07:20:35.707274 17344 sgd_solver.cpp:166] Iteration 63700, lr = 0.00035\nI0819 07:22:53.393641 17344 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0819 07:24:13.805935 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86124\nI0819 07:24:13.806248 17344 solver.cpp:404]     Test net output #1: loss = 0.672417 (* 1 = 0.672417 loss)\nI0819 07:24:15.116524 17344 solver.cpp:228] Iteration 63800, loss = 0.000169789\nI0819 07:24:15.116570 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:24:15.116587 17344 solver.cpp:244]     Train net output #1: loss = 0.00016961 (* 1 = 0.00016961 loss)\nI0819 07:24:15.202728 17344 sgd_solver.cpp:166] Iteration 63800, lr = 0.00035\nI0819 07:26:32.204195 17344 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0819 07:27:52.697741 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85788\nI0819 07:27:52.698132 17344 solver.cpp:404]     Test net output #1: loss = 0.668397 (* 1 = 0.668397 loss)\nI0819 07:27:54.010772 17344 solver.cpp:228] Iteration 63900, loss = 0.000156561\nI0819 07:27:54.010838 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:27:54.010855 17344 solver.cpp:244]     Train net output #1: loss = 0.000156382 (* 1 = 0.000156382 loss)\nI0819 07:27:54.102191 17344 sgd_solver.cpp:166] Iteration 63900, lr = 0.00035\nI0819 07:30:11.715728 17344 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0819 07:31:33.132231 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8562\nI0819 07:31:33.132530 17344 solver.cpp:404]     Test net output #1: loss = 0.672371 (* 1 = 0.672371 loss)\nI0819 07:31:34.445338 17344 solver.cpp:228] Iteration 64000, loss = 0.000204497\nI0819 07:31:34.445397 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:31:34.445415 17344 solver.cpp:244]     Train net output #1: loss = 0.000204318 (* 1 = 0.000204318 loss)\nI0819 07:31:34.536576 17344 sgd_solver.cpp:166] Iteration 64000, lr = 0.00035\nI0819 07:33:52.076359 17344 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0819 07:35:13.504075 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85376\nI0819 07:35:13.504339 17344 solver.cpp:404]     Test net output #1: loss = 0.681238 (* 1 = 0.681238 loss)\nI0819 07:35:14.817406 17344 solver.cpp:228] Iteration 64100, loss = 0.000203253\nI0819 07:35:14.817471 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:35:14.817498 17344 solver.cpp:244]     Train net output #1: loss = 0.000203074 (* 1 = 0.000203074 loss)\nI0819 07:35:14.906154 17344 sgd_solver.cpp:166] Iteration 64100, lr = 0.00035\nI0819 07:37:32.538353 17344 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0819 07:38:53.975245 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8494\nI0819 07:38:53.975533 17344 solver.cpp:404]     Test net output #1: loss = 0.68832 (* 1 = 0.68832 loss)\nI0819 07:38:55.286928 17344 solver.cpp:228] Iteration 64200, loss = 0.000173549\nI0819 07:38:55.286983 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:38:55.287001 17344 solver.cpp:244]     Train net output #1: loss = 0.000173369 (* 1 = 0.000173369 loss)\nI0819 07:38:55.383460 17344 sgd_solver.cpp:166] Iteration 64200, lr = 0.00035\nI0819 07:41:12.966153 17344 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0819 07:42:34.372254 17344 solver.cpp:404]     Test net output #0: accuracy = 0.84612\nI0819 07:42:34.372577 17344 solver.cpp:404]     Test net output #1: loss = 0.697027 (* 1 = 0.697027 loss)\nI0819 07:42:35.684823 17344 solver.cpp:228] Iteration 64300, loss = 0.000181893\nI0819 07:42:35.684880 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:42:35.684898 17344 solver.cpp:244]     Train net output #1: loss = 0.000181714 (* 1 = 0.000181714 loss)\nI0819 07:42:35.774454 17344 sgd_solver.cpp:166] Iteration 64300, lr = 0.00035\nI0819 07:44:53.378093 17344 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0819 07:46:14.802618 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83768\nI0819 07:46:14.802892 17344 solver.cpp:404]     Test net output #1: loss = 0.728566 (* 1 = 0.728566 loss)\nI0819 07:46:16.115020 17344 solver.cpp:228] Iteration 64400, loss = 0.000141347\nI0819 07:46:16.115075 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:46:16.115093 17344 solver.cpp:244]     Train net output #1: loss = 0.000141168 (* 1 = 0.000141168 loss)\nI0819 07:46:16.202055 17344 sgd_solver.cpp:166] Iteration 64400, lr = 0.00035\nI0819 07:48:33.740506 17344 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0819 07:49:54.004581 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8336\nI0819 07:49:54.004880 17344 solver.cpp:404]     Test net output #1: loss = 0.738328 (* 1 = 0.738328 loss)\nI0819 07:49:55.313009 17344 solver.cpp:228] Iteration 64500, loss = 0.000163258\nI0819 07:49:55.313053 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:49:55.313069 17344 solver.cpp:244]     Train net output #1: loss = 0.000163079 (* 1 = 0.000163079 loss)\nI0819 07:49:55.403744 17344 sgd_solver.cpp:166] Iteration 64500, lr = 0.00035\nI0819 07:52:12.599859 17344 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0819 07:53:32.880632 17344 solver.cpp:404]     Test net output #0: accuracy = 0.81632\nI0819 07:53:32.880971 17344 solver.cpp:404]     Test net output #1: loss = 0.802165 (* 1 = 0.802165 loss)\nI0819 07:53:34.188998 17344 solver.cpp:228] Iteration 64600, loss = 0.000170218\nI0819 07:53:34.189041 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:53:34.189057 17344 solver.cpp:244]     Train net output #1: loss = 0.000170039 (* 1 = 0.000170039 loss)\nI0819 07:53:34.280774 17344 sgd_solver.cpp:166] Iteration 64600, lr = 0.00035\nI0819 07:55:51.376140 17344 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0819 07:57:11.740551 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80504\nI0819 07:57:11.740866 17344 solver.cpp:404]     Test net output #1: loss = 0.838725 (* 1 = 0.838725 loss)\nI0819 07:57:13.049305 17344 solver.cpp:228] Iteration 64700, loss = 0.000194767\nI0819 07:57:13.049350 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:57:13.049365 17344 solver.cpp:244]     Train net output #1: loss = 0.000194588 (* 1 = 0.000194588 loss)\nI0819 07:57:13.146528 17344 sgd_solver.cpp:166] Iteration 64700, lr = 0.00035\nI0819 07:59:30.286588 17344 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0819 08:00:50.659276 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79088\nI0819 08:00:50.659613 17344 solver.cpp:404]     Test net output #1: loss = 0.864707 (* 1 = 0.864707 loss)\nI0819 08:00:51.967675 17344 solver.cpp:228] Iteration 64800, loss = 0.000179438\nI0819 08:00:51.967717 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:00:51.967733 17344 solver.cpp:244]     Train net output #1: loss = 0.000179259 (* 1 = 0.000179259 loss)\nI0819 08:00:52.060195 17344 sgd_solver.cpp:166] Iteration 64800, lr = 0.00035\nI0819 08:03:09.229691 17344 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0819 08:04:29.592278 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76496\nI0819 08:04:29.592609 17344 solver.cpp:404]     Test net output #1: loss = 0.948067 (* 1 = 0.948067 loss)\nI0819 08:04:30.900312 17344 solver.cpp:228] Iteration 64900, loss = 0.000170672\nI0819 08:04:30.900353 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:04:30.900369 17344 solver.cpp:244]     Train net output #1: loss = 0.000170493 (* 1 = 0.000170493 loss)\nI0819 08:04:30.997642 17344 sgd_solver.cpp:166] Iteration 64900, lr = 0.00035\nI0819 08:06:48.029445 17344 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0819 08:08:08.397871 17344 solver.cpp:404]     Test net output #0: accuracy = 0.73076\nI0819 08:08:08.398216 17344 solver.cpp:404]     Test net output #1: loss = 1.07814 (* 1 = 1.07814 loss)\nI0819 08:08:09.706246 17344 solver.cpp:228] Iteration 65000, loss = 0.000180446\nI0819 08:08:09.706288 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:08:09.706305 17344 solver.cpp:244]     Train net output #1: loss = 0.000180266 (* 1 = 0.000180266 loss)\nI0819 08:08:09.803042 17344 sgd_solver.cpp:166] Iteration 65000, lr = 0.00035\nI0819 08:10:26.932945 17344 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0819 08:11:47.300909 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64928\nI0819 08:11:47.301249 17344 solver.cpp:404]     Test net output #1: loss = 1.31506 (* 1 = 1.31506 loss)\nI0819 08:11:48.609557 17344 solver.cpp:228] Iteration 65100, loss = 0.000185937\nI0819 08:11:48.609596 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:11:48.609612 17344 solver.cpp:244]     Train net output #1: loss = 0.000185757 (* 1 = 0.000185757 loss)\nI0819 08:11:48.707846 17344 sgd_solver.cpp:166] Iteration 65100, lr = 0.00035\nI0819 08:14:05.763526 17344 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0819 08:15:26.130499 17344 solver.cpp:404]     Test net output #0: accuracy = 0.57484\nI0819 08:15:26.130843 17344 solver.cpp:404]     Test net output #1: loss = 1.56943 (* 1 = 1.56943 loss)\nI0819 08:15:27.438973 17344 solver.cpp:228] Iteration 65200, loss = 0.000166351\nI0819 08:15:27.439016 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:15:27.439033 17344 solver.cpp:244]     Train net output #1: loss = 0.000166171 (* 1 = 0.000166171 loss)\nI0819 08:15:27.533011 17344 sgd_solver.cpp:166] Iteration 65200, lr = 0.00035\nI0819 08:17:44.715541 17344 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0819 08:19:05.078671 17344 solver.cpp:404]     Test net output #0: accuracy = 0.49592\nI0819 08:19:05.079017 17344 solver.cpp:404]     Test net output #1: loss = 1.84949 (* 1 = 1.84949 loss)\nI0819 08:19:06.383738 17344 solver.cpp:228] Iteration 65300, loss = 0.000170418\nI0819 08:19:06.383785 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:19:06.383802 17344 solver.cpp:244]     Train net output #1: loss = 0.000170238 (* 1 = 0.000170238 loss)\nI0819 08:19:06.484226 17344 sgd_solver.cpp:166] Iteration 65300, lr = 0.00035\nI0819 08:21:23.678037 17344 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0819 08:22:44.072724 17344 solver.cpp:404]     Test net output #0: accuracy = 0.42356\nI0819 08:22:44.073071 17344 solver.cpp:404]     Test net output #1: loss = 2.25292 (* 1 = 2.25292 loss)\nI0819 08:22:45.381649 17344 solver.cpp:228] Iteration 65400, loss = 0.000170953\nI0819 08:22:45.381692 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:22:45.381708 17344 solver.cpp:244]     Train net output #1: loss = 0.000170774 (* 1 = 0.000170774 loss)\nI0819 08:22:45.473800 17344 sgd_solver.cpp:166] Iteration 65400, lr = 0.00035\nI0819 08:25:02.743505 17344 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0819 08:26:23.099488 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37144\nI0819 08:26:23.099827 17344 solver.cpp:404]     Test net output #1: loss = 2.68608 (* 1 = 2.68608 loss)\nI0819 08:26:24.408924 17344 solver.cpp:228] Iteration 65500, loss = 0.000181284\nI0819 08:26:24.408968 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:26:24.408985 17344 solver.cpp:244]     Train net output #1: loss = 0.000181104 (* 1 = 0.000181104 loss)\nI0819 08:26:24.508777 17344 sgd_solver.cpp:166] Iteration 65500, lr = 0.00035\nI0819 08:28:41.619524 17344 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0819 08:30:01.984110 17344 solver.cpp:404]     Test net output #0: accuracy = 0.31584\nI0819 08:30:01.984416 17344 solver.cpp:404]     Test net output #1: loss = 3.36775 (* 1 = 3.36775 loss)\nI0819 08:30:03.292361 17344 solver.cpp:228] Iteration 65600, loss = 0.000189494\nI0819 08:30:03.292407 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:30:03.292423 17344 solver.cpp:244]     Train net output #1: loss = 0.000189315 (* 1 = 0.000189315 loss)\nI0819 08:30:03.390130 17344 sgd_solver.cpp:166] Iteration 65600, lr = 0.00035\nI0819 08:32:20.662590 17344 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0819 08:33:41.063438 17344 solver.cpp:404]     Test net output #0: accuracy = 0.27508\nI0819 08:33:41.063784 17344 solver.cpp:404]     Test net output #1: loss = 3.82902 (* 1 = 3.82902 loss)\nI0819 08:33:42.373172 17344 solver.cpp:228] Iteration 65700, loss = 0.000188859\nI0819 08:33:42.373220 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:33:42.373244 17344 solver.cpp:244]     Train net output #1: loss = 0.00018868 (* 1 = 0.00018868 loss)\nI0819 08:33:42.471518 17344 sgd_solver.cpp:166] Iteration 65700, lr = 0.00035\nI0819 08:35:59.633462 17344 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0819 08:37:20.044209 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28948\nI0819 08:37:20.044556 17344 solver.cpp:404]     Test net output #1: loss = 6.01582 (* 1 = 6.01582 loss)\nI0819 08:37:21.353523 17344 solver.cpp:228] Iteration 65800, loss = 0.257413\nI0819 08:37:21.353569 17344 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 08:37:21.353592 17344 solver.cpp:244]     Train net output #1: loss = 0.257413 (* 1 = 0.257413 loss)\nI0819 08:37:21.446221 17344 sgd_solver.cpp:166] Iteration 65800, lr = 0.00035\nI0819 08:39:38.605911 17344 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0819 08:40:59.030571 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37404\nI0819 08:40:59.030917 17344 solver.cpp:404]     Test net output #1: loss = 2.86622 (* 1 = 2.86622 loss)\nI0819 08:41:00.339841 17344 solver.cpp:228] Iteration 65900, loss = 0.0378501\nI0819 08:41:00.339890 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 08:41:00.339915 17344 solver.cpp:244]     Train net output #1: loss = 0.0378499 (* 1 = 0.0378499 loss)\nI0819 08:41:00.427587 17344 sgd_solver.cpp:166] Iteration 65900, lr = 0.00035\nI0819 08:43:17.560835 17344 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0819 08:44:37.920369 17344 solver.cpp:404]     Test net output #0: accuracy = 0.295\nI0819 08:44:37.920693 17344 solver.cpp:404]     Test net output #1: loss = 3.4608 (* 1 = 3.4608 loss)\nI0819 08:44:39.228947 17344 solver.cpp:228] Iteration 66000, loss = 0.0433299\nI0819 08:44:39.228994 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:44:39.229010 17344 solver.cpp:244]     Train net output #1: loss = 0.0433297 (* 1 = 0.0433297 loss)\nI0819 08:44:39.318428 17344 sgd_solver.cpp:166] Iteration 66000, lr = 0.00035\nI0819 08:46:56.495096 17344 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0819 08:48:17.119069 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32396\nI0819 08:48:17.119444 17344 solver.cpp:404]     Test net output #1: loss = 3.14682 (* 1 = 3.14682 loss)\nI0819 08:48:18.432371 17344 solver.cpp:228] Iteration 66100, loss = 0.0117695\nI0819 08:48:18.432435 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:48:18.432452 17344 solver.cpp:244]     Train net output #1: loss = 0.0117693 (* 1 = 0.0117693 loss)\nI0819 08:48:18.524243 17344 sgd_solver.cpp:166] Iteration 66100, lr = 0.00035\nI0819 08:50:35.762387 17344 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0819 08:51:57.183936 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29944\nI0819 08:51:57.184300 17344 solver.cpp:404]     Test net output #1: loss = 3.25523 (* 1 = 3.25523 loss)\nI0819 08:51:58.496736 17344 solver.cpp:228] Iteration 66200, loss = 0.00582793\nI0819 08:51:58.496789 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:51:58.496812 17344 solver.cpp:244]     Train net output #1: loss = 0.0058277 (* 1 = 0.0058277 loss)\nI0819 08:51:58.593730 17344 sgd_solver.cpp:166] Iteration 66200, lr = 0.00035\nI0819 08:54:16.009697 17344 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0819 08:55:37.445166 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32276\nI0819 08:55:37.445531 17344 solver.cpp:404]     Test net output #1: loss = 3.53434 (* 1 = 3.53434 loss)\nI0819 08:55:38.757625 17344 solver.cpp:228] Iteration 66300, loss = 0.00559098\nI0819 08:55:38.757688 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:55:38.757705 17344 solver.cpp:244]     Train net output #1: loss = 0.00559075 (* 1 = 0.00559075 loss)\nI0819 08:55:38.850617 17344 sgd_solver.cpp:166] Iteration 66300, lr = 0.00035\nI0819 08:57:56.170105 17344 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0819 08:59:17.591253 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30224\nI0819 08:59:17.591641 17344 solver.cpp:404]     Test net output #1: loss = 3.72866 (* 1 = 3.72866 loss)\nI0819 08:59:18.903317 17344 solver.cpp:228] Iteration 66400, loss = 0.00205569\nI0819 08:59:18.903378 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:59:18.903396 17344 solver.cpp:244]     Train net output #1: loss = 0.00205545 (* 1 = 0.00205545 loss)\nI0819 08:59:18.991541 17344 sgd_solver.cpp:166] Iteration 66400, lr = 0.00035\nI0819 09:01:36.334509 17344 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0819 09:02:57.740178 17344 solver.cpp:404]     Test net output #0: accuracy = 0.252\nI0819 09:02:57.740536 17344 solver.cpp:404]     Test net output #1: loss = 3.59112 (* 1 = 3.59112 loss)\nI0819 09:02:59.052546 17344 solver.cpp:228] Iteration 66500, loss = 0.00134142\nI0819 09:02:59.052604 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:02:59.052621 17344 solver.cpp:244]     Train net output #1: loss = 0.00134118 (* 1 = 0.00134118 loss)\nI0819 09:02:59.147670 17344 sgd_solver.cpp:166] Iteration 66500, lr = 0.00035\nI0819 09:05:16.386287 17344 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0819 09:06:37.795665 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29092\nI0819 09:06:37.796058 17344 solver.cpp:404]     Test net output #1: loss = 3.40094 (* 1 = 3.40094 loss)\nI0819 09:06:39.114601 17344 solver.cpp:228] Iteration 66600, loss = 0.00942414\nI0819 09:06:39.114667 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:06:39.114693 17344 solver.cpp:244]     Train net output #1: loss = 0.00942391 (* 1 = 0.00942391 loss)\nI0819 09:06:39.204342 17344 sgd_solver.cpp:166] Iteration 66600, lr = 0.00035\nI0819 09:08:55.921241 17344 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0819 09:10:16.285802 17344 solver.cpp:404]     Test net output #0: accuracy = 0.23636\nI0819 09:10:16.286108 17344 solver.cpp:404]     Test net output #1: loss = 3.85729 (* 1 = 3.85729 loss)\nI0819 09:10:17.594864 17344 solver.cpp:228] Iteration 66700, loss = 0.00984459\nI0819 09:10:17.594907 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:10:17.594923 17344 solver.cpp:244]     Train net output #1: loss = 0.00984435 (* 1 = 0.00984435 loss)\nI0819 09:10:17.686913 17344 sgd_solver.cpp:166] Iteration 66700, lr = 0.00035\nI0819 09:12:34.230329 17344 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0819 09:13:54.597860 17344 solver.cpp:404]     Test net output #0: accuracy = 0.32368\nI0819 09:13:54.598172 17344 solver.cpp:404]     Test net output #1: loss = 3.7335 (* 1 = 3.7335 loss)\nI0819 09:13:55.906273 17344 solver.cpp:228] Iteration 66800, loss = 0.00183427\nI0819 09:13:55.906317 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:13:55.906334 17344 solver.cpp:244]     Train net output #1: loss = 0.00183404 (* 1 = 0.00183404 loss)\nI0819 09:13:56.004184 17344 sgd_solver.cpp:166] Iteration 66800, lr = 0.00035\nI0819 09:16:12.562552 17344 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0819 09:17:32.934468 17344 solver.cpp:404]     Test net output #0: accuracy = 0.40088\nI0819 09:17:32.934798 17344 solver.cpp:404]     Test net output #1: loss = 2.96472 (* 1 = 2.96472 loss)\nI0819 09:17:34.243312 17344 solver.cpp:228] Iteration 66900, loss = 0.0015882\nI0819 09:17:34.243357 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:17:34.243374 17344 solver.cpp:244]     Train net output #1: loss = 0.00158796 (* 1 = 0.00158796 loss)\nI0819 09:17:34.338608 17344 sgd_solver.cpp:166] Iteration 66900, lr = 0.00035\nI0819 09:19:51.002964 17344 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0819 09:21:11.373154 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4268\nI0819 09:21:11.373494 17344 solver.cpp:404]     Test net output #1: loss = 2.49028 (* 1 = 2.49028 loss)\nI0819 09:21:12.682090 17344 solver.cpp:228] Iteration 67000, loss = 0.0089747\nI0819 09:21:12.682135 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:21:12.682153 17344 solver.cpp:244]     Train net output #1: loss = 0.00897446 (* 1 = 0.00897446 loss)\nI0819 09:21:12.771847 17344 sgd_solver.cpp:166] Iteration 67000, lr = 0.00035\nI0819 09:23:29.275928 17344 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0819 09:24:49.640118 17344 solver.cpp:404]     Test net output #0: accuracy = 0.5058\nI0819 09:24:49.640439 17344 solver.cpp:404]     Test net output #1: loss = 2.53752 (* 1 = 2.53752 loss)\nI0819 09:24:50.948750 17344 solver.cpp:228] Iteration 67100, loss = 0.0332301\nI0819 09:24:50.948796 17344 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:24:50.948812 17344 solver.cpp:244]     Train net output #1: loss = 0.0332299 (* 1 = 0.0332299 loss)\nI0819 09:24:51.046653 17344 sgd_solver.cpp:166] Iteration 67100, lr = 0.00035\nI0819 09:27:07.581992 17344 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0819 09:28:27.951666 17344 solver.cpp:404]     Test net output #0: accuracy = 0.63248\nI0819 09:28:27.952016 17344 solver.cpp:404]     Test net output #1: loss = 1.9448 (* 1 = 1.9448 loss)\nI0819 09:28:29.260665 17344 solver.cpp:228] Iteration 67200, loss = 0.0167369\nI0819 09:28:29.260711 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:28:29.260733 17344 solver.cpp:244]     Train net output #1: loss = 0.0167367 (* 1 = 0.0167367 loss)\nI0819 09:28:29.358299 17344 sgd_solver.cpp:166] Iteration 67200, lr = 0.00035\nI0819 09:30:45.957192 17344 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0819 09:32:06.331789 17344 solver.cpp:404]     Test net output #0: accuracy = 0.52304\nI0819 09:32:06.332114 17344 solver.cpp:404]     Test net output #1: loss = 2.28521 (* 1 = 2.28521 loss)\nI0819 09:32:07.640763 17344 solver.cpp:228] Iteration 67300, loss = 0.00534514\nI0819 09:32:07.640808 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:32:07.640825 17344 solver.cpp:244]     Train net output #1: loss = 0.00534489 (* 1 = 0.00534489 loss)\nI0819 09:32:07.742481 17344 sgd_solver.cpp:166] Iteration 67300, lr = 0.00035\nI0819 09:34:24.304096 17344 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0819 09:35:44.678984 17344 solver.cpp:404]     Test net output #0: accuracy = 0.3474\nI0819 09:35:44.679321 17344 solver.cpp:404]     Test net output #1: loss = 3.34275 (* 1 = 3.34275 loss)\nI0819 09:35:45.987892 17344 solver.cpp:228] Iteration 67400, loss = 0.000922146\nI0819 09:35:45.987937 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:35:45.987953 17344 solver.cpp:244]     Train net output #1: loss = 0.000921899 (* 1 = 0.000921899 loss)\nI0819 09:35:46.081856 17344 sgd_solver.cpp:166] Iteration 67400, lr = 0.00035\nI0819 09:38:02.650439 17344 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0819 09:39:22.928110 17344 solver.cpp:404]     Test net output #0: accuracy = 0.37704\nI0819 09:39:22.928452 17344 solver.cpp:404]     Test net output #1: loss = 2.97242 (* 1 = 2.97242 loss)\nI0819 09:39:24.236901 17344 solver.cpp:228] Iteration 67500, loss = 0.00143627\nI0819 09:39:24.236944 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:39:24.236960 17344 solver.cpp:244]     Train net output #1: loss = 0.00143602 (* 1 = 0.00143602 loss)\nI0819 09:39:24.332715 17344 sgd_solver.cpp:166] Iteration 67500, lr = 0.00035\nI0819 09:41:40.906283 17344 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0819 09:43:01.179186 17344 solver.cpp:404]     Test net output #0: accuracy = 0.28296\nI0819 09:43:01.179508 17344 solver.cpp:404]     Test net output #1: loss = 3.64793 (* 1 = 3.64793 loss)\nI0819 09:43:02.488060 17344 solver.cpp:228] Iteration 67600, loss = 0.0017903\nI0819 09:43:02.488103 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:43:02.488121 17344 solver.cpp:244]     Train net output #1: loss = 0.00179005 (* 1 = 0.00179005 loss)\nI0819 09:43:02.583573 17344 sgd_solver.cpp:166] Iteration 67600, lr = 0.00035\nI0819 09:45:19.268589 17344 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0819 09:46:39.554538 17344 solver.cpp:404]     Test net output #0: accuracy = 0.30952\nI0819 09:46:39.554839 17344 solver.cpp:404]     Test net output #1: loss = 3.3872 (* 1 = 3.3872 loss)\nI0819 09:46:40.863886 17344 solver.cpp:228] Iteration 67700, loss = 0.0182138\nI0819 09:46:40.863931 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:46:40.863948 17344 solver.cpp:244]     Train net output #1: loss = 0.0182136 (* 1 = 0.0182136 loss)\nI0819 09:46:40.956013 17344 sgd_solver.cpp:166] Iteration 67700, lr = 0.00035\nI0819 09:48:57.744829 17344 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0819 09:50:18.034397 17344 solver.cpp:404]     Test net output #0: accuracy = 0.31608\nI0819 09:50:18.034724 17344 solver.cpp:404]     Test net output #1: loss = 3.25586 (* 1 = 3.25586 loss)\nI0819 09:50:19.343569 17344 solver.cpp:228] Iteration 67800, loss = 0.000585364\nI0819 09:50:19.343611 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:50:19.343628 17344 solver.cpp:244]     Train net output #1: loss = 0.000585117 (* 1 = 0.000585117 loss)\nI0819 09:50:19.441332 17344 sgd_solver.cpp:166] Iteration 67800, lr = 0.00035\nI0819 09:52:36.237126 17344 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0819 09:53:56.515288 17344 solver.cpp:404]     Test net output #0: accuracy = 0.49236\nI0819 09:53:56.515586 17344 solver.cpp:404]     Test net output #1: loss = 2.68313 (* 1 = 2.68313 loss)\nI0819 09:53:57.824196 17344 solver.cpp:228] Iteration 67900, loss = 0.00783965\nI0819 09:53:57.824239 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:53:57.824255 17344 solver.cpp:244]     Train net output #1: loss = 0.0078394 (* 1 = 0.0078394 loss)\nI0819 09:53:57.925258 17344 sgd_solver.cpp:166] Iteration 67900, lr = 0.00035\nI0819 09:56:14.862203 17344 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0819 09:57:35.149642 17344 solver.cpp:404]     Test net output #0: accuracy = 0.46704\nI0819 09:57:35.149940 17344 solver.cpp:404]     Test net output #1: loss = 2.90322 (* 1 = 2.90322 loss)\nI0819 09:57:36.457964 17344 solver.cpp:228] Iteration 68000, loss = 0.0210935\nI0819 09:57:36.458009 17344 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:57:36.458026 17344 solver.cpp:244]     Train net output #1: loss = 0.0210933 (* 1 = 0.0210933 loss)\nI0819 09:57:36.549218 17344 sgd_solver.cpp:166] Iteration 68000, lr = 0.00035\nI0819 09:59:53.293213 17344 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0819 10:01:13.557901 17344 solver.cpp:404]     Test net output #0: accuracy = 0.38768\nI0819 10:01:13.558233 17344 solver.cpp:404]     Test net output #1: loss = 3.64949 (* 1 = 3.64949 loss)\nI0819 10:01:14.866926 17344 solver.cpp:228] Iteration 68100, loss = 0.00155878\nI0819 10:01:14.866971 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:01:14.866987 17344 solver.cpp:244]     Train net output #1: loss = 0.00155853 (* 1 = 0.00155853 loss)\nI0819 10:01:14.958597 17344 sgd_solver.cpp:166] Iteration 68100, lr = 0.00035\nI0819 10:03:31.963582 17344 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0819 10:04:52.230188 17344 solver.cpp:404]     Test net output #0: accuracy = 0.34944\nI0819 10:04:52.230511 17344 solver.cpp:404]     Test net output #1: loss = 4.42017 (* 1 = 4.42017 loss)\nI0819 10:04:53.539647 17344 solver.cpp:228] Iteration 68200, loss = 0.00234306\nI0819 10:04:53.539692 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:04:53.539710 17344 solver.cpp:244]     Train net output #1: loss = 0.00234281 (* 1 = 0.00234281 loss)\nI0819 10:04:53.633568 17344 sgd_solver.cpp:166] Iteration 68200, lr = 0.00035\nI0819 10:07:10.497939 17344 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0819 10:08:30.760596 17344 solver.cpp:404]     Test net output #0: accuracy = 0.29092\nI0819 10:08:30.760910 17344 solver.cpp:404]     Test net output #1: loss = 4.45489 (* 1 = 4.45489 loss)\nI0819 10:08:32.069466 17344 solver.cpp:228] Iteration 68300, loss = 0.00387074\nI0819 10:08:32.069511 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:08:32.069527 17344 solver.cpp:244]     Train net output #1: loss = 0.00387049 (* 1 = 0.00387049 loss)\nI0819 10:08:32.167150 17344 sgd_solver.cpp:166] Iteration 68300, lr = 0.00035\nI0819 10:10:48.901121 17344 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0819 10:12:09.269453 17344 solver.cpp:404]     Test net output #0: accuracy = 0.38956\nI0819 10:12:09.269784 17344 solver.cpp:404]     Test net output #1: loss = 3.65207 (* 1 = 3.65207 loss)\nI0819 10:12:10.579092 17344 solver.cpp:228] Iteration 68400, loss = 0.000692656\nI0819 10:12:10.579138 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:12:10.579154 17344 solver.cpp:244]     Train net output #1: loss = 0.000692402 (* 1 = 0.000692402 loss)\nI0819 10:12:10.671624 17344 sgd_solver.cpp:166] Iteration 68400, lr = 0.00035\nI0819 10:14:27.555445 17344 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0819 10:15:47.924867 17344 solver.cpp:404]     Test net output #0: accuracy = 0.42324\nI0819 10:15:47.925199 17344 solver.cpp:404]     Test net output #1: loss = 3.56666 (* 1 = 3.56666 loss)\nI0819 10:15:49.234411 17344 solver.cpp:228] Iteration 68500, loss = 0.000367428\nI0819 10:15:49.234457 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:15:49.234473 17344 solver.cpp:244]     Train net output #1: loss = 0.000367174 (* 1 = 0.000367174 loss)\nI0819 10:15:49.328042 17344 sgd_solver.cpp:166] Iteration 68500, lr = 0.00035\nI0819 10:18:06.139216 17344 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0819 10:19:26.500442 17344 solver.cpp:404]     Test net output #0: accuracy = 0.46672\nI0819 10:19:26.500754 17344 solver.cpp:404]     Test net output #1: loss = 3.41534 (* 1 = 3.41534 loss)\nI0819 10:19:27.809408 17344 solver.cpp:228] Iteration 68600, loss = 0.00280156\nI0819 10:19:27.809454 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:19:27.809471 17344 solver.cpp:244]     Train net output #1: loss = 0.0028013 (* 1 = 0.0028013 loss)\nI0819 10:19:27.905477 17344 sgd_solver.cpp:166] Iteration 68600, lr = 0.00035\nI0819 10:21:44.771010 17344 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0819 10:23:05.119791 17344 solver.cpp:404]     Test net output #0: accuracy = 0.48568\nI0819 10:23:05.120116 17344 solver.cpp:404]     Test net output #1: loss = 3.11723 (* 1 = 3.11723 loss)\nI0819 10:23:06.428498 17344 solver.cpp:228] Iteration 68700, loss = 0.00153342\nI0819 10:23:06.428541 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:23:06.428558 17344 solver.cpp:244]     Train net output #1: loss = 0.00153317 (* 1 = 0.00153317 loss)\nI0819 10:23:06.521047 17344 sgd_solver.cpp:166] Iteration 68700, lr = 0.00035\nI0819 10:25:23.333394 17344 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0819 10:26:43.687036 17344 solver.cpp:404]     Test net output #0: accuracy = 0.4538\nI0819 10:26:43.687378 17344 solver.cpp:404]     Test net output #1: loss = 3.11834 (* 1 = 3.11834 loss)\nI0819 10:26:44.997059 17344 solver.cpp:228] Iteration 68800, loss = 0.00179867\nI0819 10:26:44.997103 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:26:44.997119 17344 solver.cpp:244]     Train net output #1: loss = 0.00179841 (* 1 = 0.00179841 loss)\nI0819 10:26:45.088448 17344 sgd_solver.cpp:166] Iteration 68800, lr = 0.00035\nI0819 10:29:01.980224 17344 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0819 10:30:22.345299 17344 solver.cpp:404]     Test net output #0: accuracy = 0.33944\nI0819 10:30:22.345639 17344 solver.cpp:404]     Test net output #1: loss = 5.56063 (* 1 = 5.56063 loss)\nI0819 10:30:23.654824 17344 solver.cpp:228] Iteration 68900, loss = 0.00256201\nI0819 10:30:23.654871 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:30:23.654887 17344 solver.cpp:244]     Train net output #1: loss = 0.00256175 (* 1 = 0.00256175 loss)\nI0819 10:30:23.750118 17344 sgd_solver.cpp:166] Iteration 68900, lr = 0.00035\nI0819 10:32:40.495990 17344 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0819 10:34:00.855118 17344 solver.cpp:404]     Test net output #0: accuracy = 0.36744\nI0819 10:34:00.855464 17344 solver.cpp:404]     Test net output #1: loss = 5.39369 (* 1 = 5.39369 loss)\nI0819 10:34:02.163977 17344 solver.cpp:228] Iteration 69000, loss = 0.00166698\nI0819 10:34:02.164019 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:34:02.164036 17344 solver.cpp:244]     Train net output #1: loss = 0.00166672 (* 1 = 0.00166672 loss)\nI0819 10:34:02.253072 17344 sgd_solver.cpp:166] Iteration 69000, lr = 0.00035\nI0819 10:36:19.069409 17344 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0819 10:37:39.432808 17344 solver.cpp:404]     Test net output #0: accuracy = 0.47568\nI0819 10:37:39.433135 17344 solver.cpp:404]     Test net output #1: loss = 2.7849 (* 1 = 2.7849 loss)\nI0819 10:37:40.741472 17344 solver.cpp:228] Iteration 69100, loss = 0.000981961\nI0819 10:37:40.741516 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:37:40.741533 17344 solver.cpp:244]     Train net output #1: loss = 0.000981697 (* 1 = 0.000981697 loss)\nI0819 10:37:40.832314 17344 sgd_solver.cpp:166] Iteration 69100, lr = 0.00035\nI0819 10:39:57.539165 17344 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0819 10:41:17.896601 17344 solver.cpp:404]     Test net output #0: accuracy = 0.43348\nI0819 10:41:17.896944 17344 solver.cpp:404]     Test net output #1: loss = 3.50427 (* 1 = 3.50427 loss)\nI0819 10:41:19.205174 17344 solver.cpp:228] Iteration 69200, loss = 0.00168585\nI0819 10:41:19.205220 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:41:19.205240 17344 solver.cpp:244]     Train net output #1: loss = 0.00168558 (* 1 = 0.00168558 loss)\nI0819 10:41:19.299135 17344 sgd_solver.cpp:166] Iteration 69200, lr = 0.00035\nI0819 10:43:35.968771 17344 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0819 10:44:56.332401 17344 solver.cpp:404]     Test net output #0: accuracy = 0.51628\nI0819 10:44:56.332725 17344 solver.cpp:404]     Test net output #1: loss = 2.66989 (* 1 = 2.66989 loss)\nI0819 10:44:57.641293 17344 solver.cpp:228] Iteration 69300, loss = 0.00175382\nI0819 10:44:57.641338 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:44:57.641355 17344 solver.cpp:244]     Train net output #1: loss = 0.00175356 (* 1 = 0.00175356 loss)\nI0819 10:44:57.738658 17344 sgd_solver.cpp:166] Iteration 69300, lr = 0.00035\nI0819 10:47:14.492424 17344 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0819 10:48:34.858245 17344 solver.cpp:404]     Test net output #0: accuracy = 0.58764\nI0819 10:48:34.858562 17344 solver.cpp:404]     Test net output #1: loss = 1.9926 (* 1 = 1.9926 loss)\nI0819 10:48:36.166975 17344 solver.cpp:228] Iteration 69400, loss = 0.000201835\nI0819 10:48:36.167019 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:48:36.167037 17344 solver.cpp:244]     Train net output #1: loss = 0.000201574 (* 1 = 0.000201574 loss)\nI0819 10:48:36.258499 17344 sgd_solver.cpp:166] Iteration 69400, lr = 0.00035\nI0819 10:50:53.057528 17344 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0819 10:52:13.429384 17344 solver.cpp:404]     Test net output #0: accuracy = 0.66588\nI0819 10:52:13.429733 17344 solver.cpp:404]     Test net output #1: loss = 1.57266 (* 1 = 1.57266 loss)\nI0819 10:52:14.739007 17344 solver.cpp:228] Iteration 69500, loss = 0.000183513\nI0819 10:52:14.739053 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:52:14.739068 17344 solver.cpp:244]     Train net output #1: loss = 0.000183252 (* 1 = 0.000183252 loss)\nI0819 10:52:14.825601 17344 sgd_solver.cpp:166] Iteration 69500, lr = 0.00035\nI0819 10:54:31.569985 17344 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0819 10:55:51.929687 17344 solver.cpp:404]     Test net output #0: accuracy = 0.72408\nI0819 10:55:51.930021 17344 solver.cpp:404]     Test net output #1: loss = 1.30903 (* 1 = 1.30903 loss)\nI0819 10:55:53.238574 17344 solver.cpp:228] Iteration 69600, loss = 0.000221095\nI0819 10:55:53.238617 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:55:53.238633 17344 solver.cpp:244]     Train net output #1: loss = 0.000220834 (* 1 = 0.000220834 loss)\nI0819 10:55:53.329857 17344 sgd_solver.cpp:166] Iteration 69600, lr = 0.00035\nI0819 10:58:10.091264 17344 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0819 10:59:30.457545 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77012\nI0819 10:59:30.457849 17344 solver.cpp:404]     Test net output #1: loss = 1.0926 (* 1 = 1.0926 loss)\nI0819 10:59:31.766364 17344 solver.cpp:228] Iteration 69700, loss = 0.000153377\nI0819 10:59:31.766408 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:59:31.766425 17344 solver.cpp:244]     Train net output #1: loss = 0.000153115 (* 1 = 0.000153115 loss)\nI0819 10:59:31.857333 17344 sgd_solver.cpp:166] Iteration 69700, lr = 0.00035\nI0819 11:01:48.527367 17344 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0819 11:03:08.890866 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0819 11:03:08.891189 17344 solver.cpp:404]     Test net output #1: loss = 0.97132 (* 1 = 0.97132 loss)\nI0819 11:03:10.200899 17344 solver.cpp:228] Iteration 69800, loss = 0.000199457\nI0819 11:03:10.200942 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:03:10.200958 17344 solver.cpp:244]     Train net output #1: loss = 0.000199195 (* 1 = 0.000199195 loss)\nI0819 11:03:10.295466 17344 sgd_solver.cpp:166] Iteration 69800, lr = 0.00035\nI0819 11:05:27.011309 17344 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0819 11:06:47.381201 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83112\nI0819 11:06:47.381526 17344 solver.cpp:404]     Test net output #1: loss = 0.849227 (* 1 = 0.849227 loss)\nI0819 11:06:48.689091 17344 solver.cpp:228] Iteration 69900, loss = 0.000155999\nI0819 11:06:48.689136 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:06:48.689152 17344 solver.cpp:244]     Train net output #1: loss = 0.000155738 (* 1 = 0.000155738 loss)\nI0819 11:06:48.781559 17344 sgd_solver.cpp:166] Iteration 69900, lr = 0.00035\nI0819 11:09:05.470101 17344 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0819 11:10:25.829195 17344 solver.cpp:404]     Test net output #0: accuracy = 0.84472\nI0819 11:10:25.829493 17344 solver.cpp:404]     Test net output #1: loss = 0.783297 (* 1 = 0.783297 loss)\nI0819 11:10:27.138382 17344 solver.cpp:228] Iteration 70000, loss = 0.000184105\nI0819 11:10:27.138428 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:10:27.138443 17344 solver.cpp:244]     Train net output #1: loss = 0.000183844 (* 1 = 0.000183844 loss)\nI0819 11:10:27.231799 17344 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0819 11:10:27.231820 17344 sgd_solver.cpp:166] Iteration 70000, lr = 3.5e-05\nI0819 11:12:43.975185 17344 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0819 11:14:04.341773 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86392\nI0819 11:14:04.342093 17344 solver.cpp:404]     Test net output #1: loss = 0.697584 (* 1 = 0.697584 loss)\nI0819 11:14:05.650303 17344 solver.cpp:228] Iteration 70100, loss = 0.000212951\nI0819 11:14:05.650348 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:14:05.650364 17344 solver.cpp:244]     Train net output #1: loss = 0.000212689 (* 1 = 0.000212689 loss)\nI0819 11:14:05.750061 17344 sgd_solver.cpp:166] Iteration 70100, lr = 3.5e-05\nI0819 11:16:22.477460 17344 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0819 11:17:42.846660 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87252\nI0819 11:17:42.846956 17344 solver.cpp:404]     Test net output #1: loss = 0.662029 (* 1 = 0.662029 loss)\nI0819 11:17:44.155843 17344 solver.cpp:228] Iteration 70200, loss = 0.000131331\nI0819 11:17:44.155889 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:17:44.155905 17344 solver.cpp:244]     Train net output #1: loss = 0.00013107 (* 1 = 0.00013107 loss)\nI0819 11:17:44.255419 17344 sgd_solver.cpp:166] Iteration 70200, lr = 3.5e-05\nI0819 11:20:00.933537 17344 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0819 11:21:21.304481 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88156\nI0819 11:21:21.304786 17344 solver.cpp:404]     Test net output #1: loss = 0.615515 (* 1 = 0.615515 loss)\nI0819 11:21:22.614504 17344 solver.cpp:228] Iteration 70300, loss = 0.00022233\nI0819 11:21:22.614550 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:21:22.614567 17344 solver.cpp:244]     Train net output #1: loss = 0.000222068 (* 1 = 0.000222068 loss)\nI0819 11:21:22.708122 17344 sgd_solver.cpp:166] Iteration 70300, lr = 3.5e-05\nI0819 11:23:39.688092 17344 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0819 11:25:01.108686 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88344\nI0819 11:25:01.109045 17344 solver.cpp:404]     Test net output #1: loss = 0.603953 (* 1 = 0.603953 loss)\nI0819 11:25:02.421979 17344 solver.cpp:228] Iteration 70400, loss = 0.000151326\nI0819 11:25:02.422022 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:25:02.422039 17344 solver.cpp:244]     Train net output #1: loss = 0.000151065 (* 1 = 0.000151065 loss)\nI0819 11:25:02.511662 17344 sgd_solver.cpp:166] Iteration 70400, lr = 3.5e-05\nI0819 11:27:19.427801 17344 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0819 11:28:40.760475 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89064\nI0819 11:28:40.760861 17344 solver.cpp:404]     Test net output #1: loss = 0.57446 (* 1 = 0.57446 loss)\nI0819 11:28:42.074446 17344 solver.cpp:228] Iteration 70500, loss = 0.000154163\nI0819 11:28:42.074504 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:28:42.074529 17344 solver.cpp:244]     Train net output #1: loss = 0.000153901 (* 1 = 0.000153901 loss)\nI0819 11:28:42.165977 17344 sgd_solver.cpp:166] Iteration 70500, lr = 3.5e-05\nI0819 11:30:59.109266 17344 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0819 11:32:20.545573 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89004\nI0819 11:32:20.545936 17344 solver.cpp:404]     Test net output #1: loss = 0.573248 (* 1 = 0.573248 loss)\nI0819 11:32:21.858630 17344 solver.cpp:228] Iteration 70600, loss = 0.000149263\nI0819 11:32:21.858692 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:32:21.858716 17344 solver.cpp:244]     Train net output #1: loss = 0.000149001 (* 1 = 0.000149001 loss)\nI0819 11:32:21.949240 17344 sgd_solver.cpp:166] Iteration 70600, lr = 3.5e-05\nI0819 11:34:38.985611 17344 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0819 11:36:00.405047 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89532\nI0819 11:36:00.405438 17344 solver.cpp:404]     Test net output #1: loss = 0.551942 (* 1 = 0.551942 loss)\nI0819 11:36:01.724632 17344 solver.cpp:228] Iteration 70700, loss = 0.000208879\nI0819 11:36:01.724684 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:36:01.724701 17344 solver.cpp:244]     Train net output #1: loss = 0.000208618 (* 1 = 0.000208618 loss)\nI0819 11:36:01.804154 17344 sgd_solver.cpp:166] Iteration 70700, lr = 3.5e-05\nI0819 11:38:18.796433 17344 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0819 11:39:40.193228 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89488\nI0819 11:39:40.193615 17344 solver.cpp:404]     Test net output #1: loss = 0.555716 (* 1 = 0.555716 loss)\nI0819 11:39:41.506492 17344 solver.cpp:228] Iteration 70800, loss = 0.000146135\nI0819 11:39:41.506551 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:39:41.506569 17344 solver.cpp:244]     Train net output #1: loss = 0.000145874 (* 1 = 0.000145874 loss)\nI0819 11:39:41.601904 17344 sgd_solver.cpp:166] Iteration 70800, lr = 3.5e-05\nI0819 11:41:58.644762 17344 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0819 11:43:20.044570 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89944\nI0819 11:43:20.044960 17344 solver.cpp:404]     Test net output #1: loss = 0.539308 (* 1 = 0.539308 loss)\nI0819 11:43:21.353821 17344 solver.cpp:228] Iteration 70900, loss = 0.000225536\nI0819 11:43:21.353868 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:43:21.353891 17344 solver.cpp:244]     Train net output #1: loss = 0.000225274 (* 1 = 0.000225274 loss)\nI0819 11:43:21.444046 17344 sgd_solver.cpp:166] Iteration 70900, lr = 3.5e-05\nI0819 11:45:38.502197 17344 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0819 11:46:59.899092 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8978\nI0819 11:46:59.899466 17344 solver.cpp:404]     Test net output #1: loss = 0.545198 (* 1 = 0.545198 loss)\nI0819 11:47:01.210420 17344 solver.cpp:228] Iteration 71000, loss = 0.000188564\nI0819 11:47:01.210479 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:47:01.210497 17344 solver.cpp:244]     Train net output #1: loss = 0.000188303 (* 1 = 0.000188303 loss)\nI0819 11:47:01.301265 17344 sgd_solver.cpp:166] Iteration 71000, lr = 3.5e-05\nI0819 11:49:18.345216 17344 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0819 11:50:39.732270 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90152\nI0819 11:50:39.732650 17344 solver.cpp:404]     Test net output #1: loss = 0.531381 (* 1 = 0.531381 loss)\nI0819 11:50:41.042081 17344 solver.cpp:228] Iteration 71100, loss = 0.000128051\nI0819 11:50:41.042125 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:50:41.042141 17344 solver.cpp:244]     Train net output #1: loss = 0.00012779 (* 1 = 0.00012779 loss)\nI0819 11:50:41.132640 17344 sgd_solver.cpp:166] Iteration 71100, lr = 3.5e-05\nI0819 11:52:58.268667 17344 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0819 11:54:19.683837 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90028\nI0819 11:54:19.684211 17344 solver.cpp:404]     Test net output #1: loss = 0.5384 (* 1 = 0.5384 loss)\nI0819 11:54:20.993202 17344 solver.cpp:228] Iteration 71200, loss = 0.000196811\nI0819 11:54:20.993261 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:54:20.993279 17344 solver.cpp:244]     Train net output #1: loss = 0.00019655 (* 1 = 0.00019655 loss)\nI0819 11:54:21.085305 17344 sgd_solver.cpp:166] Iteration 71200, lr = 3.5e-05\nI0819 11:56:38.021677 17344 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0819 11:57:59.422751 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 11:57:59.423157 17344 solver.cpp:404]     Test net output #1: loss = 0.526791 (* 1 = 0.526791 loss)\nI0819 11:58:00.732341 17344 solver.cpp:228] Iteration 71300, loss = 0.000178761\nI0819 11:58:00.732399 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:58:00.732417 17344 solver.cpp:244]     Train net output #1: loss = 0.0001785 (* 1 = 0.0001785 loss)\nI0819 11:58:00.824918 17344 sgd_solver.cpp:166] Iteration 71300, lr = 3.5e-05\nI0819 12:00:17.903049 17344 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0819 12:01:39.310494 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9016\nI0819 12:01:39.310897 17344 solver.cpp:404]     Test net output #1: loss = 0.53407 (* 1 = 0.53407 loss)\nI0819 12:01:40.619982 17344 solver.cpp:228] Iteration 71400, loss = 0.000140303\nI0819 12:01:40.620023 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:01:40.620039 17344 solver.cpp:244]     Train net output #1: loss = 0.000140042 (* 1 = 0.000140042 loss)\nI0819 12:01:40.717207 17344 sgd_solver.cpp:166] Iteration 71400, lr = 3.5e-05\nI0819 12:03:57.753381 17344 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0819 12:05:19.170163 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90436\nI0819 12:05:19.170542 17344 solver.cpp:404]     Test net output #1: loss = 0.523793 (* 1 = 0.523793 loss)\nI0819 12:05:20.479524 17344 solver.cpp:228] Iteration 71500, loss = 0.000170629\nI0819 12:05:20.479584 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:05:20.479601 17344 solver.cpp:244]     Train net output #1: loss = 0.000170368 (* 1 = 0.000170368 loss)\nI0819 12:05:20.570647 17344 sgd_solver.cpp:166] Iteration 71500, lr = 3.5e-05\nI0819 12:07:37.602509 17344 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0819 12:08:59.011739 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90192\nI0819 12:08:59.012115 17344 solver.cpp:404]     Test net output #1: loss = 0.530719 (* 1 = 0.530719 loss)\nI0819 12:09:00.321326 17344 solver.cpp:228] Iteration 71600, loss = 0.000140954\nI0819 12:09:00.321383 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:09:00.321400 17344 solver.cpp:244]     Train net output #1: loss = 0.000140693 (* 1 = 0.000140693 loss)\nI0819 12:09:00.415155 17344 sgd_solver.cpp:166] Iteration 71600, lr = 3.5e-05\nI0819 12:11:17.485900 17344 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0819 12:12:38.882779 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90432\nI0819 12:12:38.883160 17344 solver.cpp:404]     Test net output #1: loss = 0.5214 (* 1 = 0.5214 loss)\nI0819 12:12:40.192374 17344 solver.cpp:228] Iteration 71700, loss = 0.000163483\nI0819 12:12:40.192414 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:12:40.192430 17344 solver.cpp:244]     Train net output #1: loss = 0.000163222 (* 1 = 0.000163222 loss)\nI0819 12:12:40.285635 17344 sgd_solver.cpp:166] Iteration 71700, lr = 3.5e-05\nI0819 12:14:57.252990 17344 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0819 12:16:18.656898 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9028\nI0819 12:16:18.657274 17344 solver.cpp:404]     Test net output #1: loss = 0.528336 (* 1 = 0.528336 loss)\nI0819 12:16:19.966600 17344 solver.cpp:228] Iteration 71800, loss = 0.000154238\nI0819 12:16:19.966641 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:16:19.966657 17344 solver.cpp:244]     Train net output #1: loss = 0.000153977 (* 1 = 0.000153977 loss)\nI0819 12:16:20.057867 17344 sgd_solver.cpp:166] Iteration 71800, lr = 3.5e-05\nI0819 12:18:36.966423 17344 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0819 12:19:58.375160 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0819 12:19:58.375550 17344 solver.cpp:404]     Test net output #1: loss = 0.519484 (* 1 = 0.519484 loss)\nI0819 12:19:59.684147 17344 solver.cpp:228] Iteration 71900, loss = 0.00015245\nI0819 12:19:59.684190 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:19:59.684206 17344 solver.cpp:244]     Train net output #1: loss = 0.000152188 (* 1 = 0.000152188 loss)\nI0819 12:19:59.776376 17344 sgd_solver.cpp:166] Iteration 71900, lr = 3.5e-05\nI0819 12:22:16.662117 17344 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0819 12:23:38.070014 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90288\nI0819 12:23:38.070385 17344 solver.cpp:404]     Test net output #1: loss = 0.52673 (* 1 = 0.52673 loss)\nI0819 12:23:39.379994 17344 solver.cpp:228] Iteration 72000, loss = 0.000166777\nI0819 12:23:39.380048 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:23:39.380064 17344 solver.cpp:244]     Train net output #1: loss = 0.000166516 (* 1 = 0.000166516 loss)\nI0819 12:23:39.472663 17344 sgd_solver.cpp:166] Iteration 72000, lr = 3.5e-05\nI0819 12:25:56.884028 17344 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0819 12:27:18.286540 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90496\nI0819 12:27:18.286916 17344 solver.cpp:404]     Test net output #1: loss = 0.517871 (* 1 = 0.517871 loss)\nI0819 12:27:19.596132 17344 solver.cpp:228] Iteration 72100, loss = 0.00018671\nI0819 12:27:19.596182 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:27:19.596199 17344 solver.cpp:244]     Train net output #1: loss = 0.000186449 (* 1 = 0.000186449 loss)\nI0819 12:27:19.690902 17344 sgd_solver.cpp:166] Iteration 72100, lr = 3.5e-05\nI0819 12:29:37.063210 17344 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0819 12:30:58.465075 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90288\nI0819 12:30:58.465432 17344 solver.cpp:404]     Test net output #1: loss = 0.525224 (* 1 = 0.525224 loss)\nI0819 12:30:59.774685 17344 solver.cpp:228] Iteration 72200, loss = 0.000211451\nI0819 12:30:59.774724 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:30:59.774739 17344 solver.cpp:244]     Train net output #1: loss = 0.00021119 (* 1 = 0.00021119 loss)\nI0819 12:30:59.867552 17344 sgd_solver.cpp:166] Iteration 72200, lr = 3.5e-05\nI0819 12:33:17.186408 17344 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0819 12:34:38.583250 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90484\nI0819 12:34:38.583657 17344 solver.cpp:404]     Test net output #1: loss = 0.516545 (* 1 = 0.516545 loss)\nI0819 12:34:39.892621 17344 solver.cpp:228] Iteration 72300, loss = 0.000130326\nI0819 12:34:39.892659 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:34:39.892675 17344 solver.cpp:244]     Train net output #1: loss = 0.000130065 (* 1 = 0.000130065 loss)\nI0819 12:34:39.985924 17344 sgd_solver.cpp:166] Iteration 72300, lr = 3.5e-05\nI0819 12:36:57.245923 17344 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0819 12:38:18.648530 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9022\nI0819 12:38:18.648907 17344 solver.cpp:404]     Test net output #1: loss = 0.52387 (* 1 = 0.52387 loss)\nI0819 12:38:19.957875 17344 solver.cpp:228] Iteration 72400, loss = 0.000202536\nI0819 12:38:19.957921 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:38:19.957937 17344 solver.cpp:244]     Train net output #1: loss = 0.000202275 (* 1 = 0.000202275 loss)\nI0819 12:38:20.050621 17344 sgd_solver.cpp:166] Iteration 72400, lr = 3.5e-05\nI0819 12:40:37.309880 17344 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0819 12:41:58.709246 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0819 12:41:58.709645 17344 solver.cpp:404]     Test net output #1: loss = 0.514664 (* 1 = 0.514664 loss)\nI0819 12:42:00.018474 17344 solver.cpp:228] Iteration 72500, loss = 0.000189041\nI0819 12:42:00.018517 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:42:00.018532 17344 solver.cpp:244]     Train net output #1: loss = 0.000188779 (* 1 = 0.000188779 loss)\nI0819 12:42:00.111608 17344 sgd_solver.cpp:166] Iteration 72500, lr = 3.5e-05\nI0819 12:44:17.382658 17344 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0819 12:45:38.798759 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90228\nI0819 12:45:38.799118 17344 solver.cpp:404]     Test net output #1: loss = 0.521707 (* 1 = 0.521707 loss)\nI0819 12:45:40.109596 17344 solver.cpp:228] Iteration 72600, loss = 0.000179938\nI0819 12:45:40.109639 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:45:40.109655 17344 solver.cpp:244]     Train net output #1: loss = 0.000179677 (* 1 = 0.000179677 loss)\nI0819 12:45:40.199769 17344 sgd_solver.cpp:166] Iteration 72600, lr = 3.5e-05\nI0819 12:47:57.527758 17344 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0819 12:49:18.947224 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9052\nI0819 12:49:18.947638 17344 solver.cpp:404]     Test net output #1: loss = 0.512736 (* 1 = 0.512736 loss)\nI0819 12:49:20.257866 17344 solver.cpp:228] Iteration 72700, loss = 0.00014819\nI0819 12:49:20.257927 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:49:20.257944 17344 solver.cpp:244]     Train net output #1: loss = 0.000147929 (* 1 = 0.000147929 loss)\nI0819 12:49:20.345149 17344 sgd_solver.cpp:166] Iteration 72700, lr = 3.5e-05\nI0819 12:51:37.675031 17344 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0819 12:52:59.082487 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90268\nI0819 12:52:59.082877 17344 solver.cpp:404]     Test net output #1: loss = 0.519678 (* 1 = 0.519678 loss)\nI0819 12:53:00.393283 17344 solver.cpp:228] Iteration 72800, loss = 0.000142148\nI0819 12:53:00.393343 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:53:00.393359 17344 solver.cpp:244]     Train net output #1: loss = 0.000141887 (* 1 = 0.000141887 loss)\nI0819 12:53:00.484256 17344 sgd_solver.cpp:166] Iteration 72800, lr = 3.5e-05\nI0819 12:55:17.796779 17344 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0819 12:56:39.209146 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9052\nI0819 12:56:39.209519 17344 solver.cpp:404]     Test net output #1: loss = 0.510708 (* 1 = 0.510708 loss)\nI0819 12:56:40.519871 17344 solver.cpp:228] Iteration 72900, loss = 0.000164117\nI0819 12:56:40.519932 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:56:40.519948 17344 solver.cpp:244]     Train net output #1: loss = 0.000163856 (* 1 = 0.000163856 loss)\nI0819 12:56:40.608611 17344 sgd_solver.cpp:166] Iteration 72900, lr = 3.5e-05\nI0819 12:58:57.865197 17344 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0819 13:00:19.278162 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90312\nI0819 13:00:19.278549 17344 solver.cpp:404]     Test net output #1: loss = 0.517249 (* 1 = 0.517249 loss)\nI0819 13:00:20.589551 17344 solver.cpp:228] Iteration 73000, loss = 0.000204715\nI0819 13:00:20.589615 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:00:20.589639 17344 solver.cpp:244]     Train net output #1: loss = 0.000204453 (* 1 = 0.000204453 loss)\nI0819 13:00:20.674753 17344 sgd_solver.cpp:166] Iteration 73000, lr = 3.5e-05\nI0819 13:02:37.944315 17344 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0819 13:03:59.114425 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9062\nI0819 13:03:59.114804 17344 solver.cpp:404]     Test net output #1: loss = 0.507841 (* 1 = 0.507841 loss)\nI0819 13:04:00.424000 17344 solver.cpp:228] Iteration 73100, loss = 0.000175243\nI0819 13:04:00.424042 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:04:00.424059 17344 solver.cpp:244]     Train net output #1: loss = 0.000174981 (* 1 = 0.000174981 loss)\nI0819 13:04:00.524984 17344 sgd_solver.cpp:166] Iteration 73100, lr = 3.5e-05\nI0819 13:06:17.859858 17344 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0819 13:07:38.896267 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0819 13:07:38.896587 17344 solver.cpp:404]     Test net output #1: loss = 0.514636 (* 1 = 0.514636 loss)\nI0819 13:07:40.207234 17344 solver.cpp:228] Iteration 73200, loss = 0.000160239\nI0819 13:07:40.207278 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:07:40.207294 17344 solver.cpp:244]     Train net output #1: loss = 0.000159978 (* 1 = 0.000159978 loss)\nI0819 13:07:40.300796 17344 sgd_solver.cpp:166] Iteration 73200, lr = 3.5e-05\nI0819 13:09:57.661362 17344 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0819 13:11:19.021473 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90604\nI0819 13:11:19.021829 17344 solver.cpp:404]     Test net output #1: loss = 0.505499 (* 1 = 0.505499 loss)\nI0819 13:11:20.330938 17344 solver.cpp:228] Iteration 73300, loss = 0.000236195\nI0819 13:11:20.331001 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:11:20.331018 17344 solver.cpp:244]     Train net output #1: loss = 0.000235934 (* 1 = 0.000235934 loss)\nI0819 13:11:20.422417 17344 sgd_solver.cpp:166] Iteration 73300, lr = 3.5e-05\nI0819 13:13:37.744863 17344 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0819 13:14:58.926861 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90368\nI0819 13:14:58.927239 17344 solver.cpp:404]     Test net output #1: loss = 0.512063 (* 1 = 0.512063 loss)\nI0819 13:15:00.237650 17344 solver.cpp:228] Iteration 73400, loss = 0.000199381\nI0819 13:15:00.237704 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:15:00.237721 17344 solver.cpp:244]     Train net output #1: loss = 0.00019912 (* 1 = 0.00019912 loss)\nI0819 13:15:00.327134 17344 sgd_solver.cpp:166] Iteration 73400, lr = 3.5e-05\nI0819 13:17:17.656767 17344 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0819 13:18:39.043681 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90648\nI0819 13:18:39.044087 17344 solver.cpp:404]     Test net output #1: loss = 0.502246 (* 1 = 0.502246 loss)\nI0819 13:18:40.354081 17344 solver.cpp:228] Iteration 73500, loss = 0.000159247\nI0819 13:18:40.354141 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:18:40.354161 17344 solver.cpp:244]     Train net output #1: loss = 0.000158986 (* 1 = 0.000158986 loss)\nI0819 13:18:40.439774 17344 sgd_solver.cpp:166] Iteration 73500, lr = 3.5e-05\nI0819 13:20:57.840689 17344 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0819 13:22:19.232280 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90384\nI0819 13:22:19.232673 17344 solver.cpp:404]     Test net output #1: loss = 0.508564 (* 1 = 0.508564 loss)\nI0819 13:22:20.542140 17344 solver.cpp:228] Iteration 73600, loss = 0.000155761\nI0819 13:22:20.542186 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:22:20.542201 17344 solver.cpp:244]     Train net output #1: loss = 0.0001555 (* 1 = 0.0001555 loss)\nI0819 13:22:20.633831 17344 sgd_solver.cpp:166] Iteration 73600, lr = 3.5e-05\nI0819 13:24:38.089416 17344 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0819 13:25:59.567667 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90644\nI0819 13:25:59.568078 17344 solver.cpp:404]     Test net output #1: loss = 0.498453 (* 1 = 0.498453 loss)\nI0819 13:26:00.877357 17344 solver.cpp:228] Iteration 73700, loss = 0.000155836\nI0819 13:26:00.877418 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:26:00.877437 17344 solver.cpp:244]     Train net output #1: loss = 0.000155575 (* 1 = 0.000155575 loss)\nI0819 13:26:00.965456 17344 sgd_solver.cpp:166] Iteration 73700, lr = 3.5e-05\nI0819 13:28:18.440248 17344 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0819 13:29:39.811260 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90364\nI0819 13:29:39.811594 17344 solver.cpp:404]     Test net output #1: loss = 0.505766 (* 1 = 0.505766 loss)\nI0819 13:29:41.120654 17344 solver.cpp:228] Iteration 73800, loss = 0.000171459\nI0819 13:29:41.120718 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:29:41.120743 17344 solver.cpp:244]     Train net output #1: loss = 0.000171198 (* 1 = 0.000171198 loss)\nI0819 13:29:41.211197 17344 sgd_solver.cpp:166] Iteration 73800, lr = 3.5e-05\nI0819 13:31:58.481966 17344 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0819 13:33:19.854429 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90672\nI0819 13:33:19.854825 17344 solver.cpp:404]     Test net output #1: loss = 0.495676 (* 1 = 0.495676 loss)\nI0819 13:33:21.165046 17344 solver.cpp:228] Iteration 73900, loss = 0.000177429\nI0819 13:33:21.165114 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:33:21.165140 17344 solver.cpp:244]     Train net output #1: loss = 0.000177167 (* 1 = 0.000177167 loss)\nI0819 13:33:21.256268 17344 sgd_solver.cpp:166] Iteration 73900, lr = 3.5e-05\nI0819 13:35:38.610276 17344 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0819 13:37:00.003715 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 13:37:00.004099 17344 solver.cpp:404]     Test net output #1: loss = 0.502166 (* 1 = 0.502166 loss)\nI0819 13:37:01.313401 17344 solver.cpp:228] Iteration 74000, loss = 0.00017116\nI0819 13:37:01.313462 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:37:01.313486 17344 solver.cpp:244]     Train net output #1: loss = 0.000170899 (* 1 = 0.000170899 loss)\nI0819 13:37:01.406515 17344 sgd_solver.cpp:166] Iteration 74000, lr = 3.5e-05\nI0819 13:39:18.805218 17344 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0819 13:40:40.199290 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90664\nI0819 13:40:40.199674 17344 solver.cpp:404]     Test net output #1: loss = 0.491723 (* 1 = 0.491723 loss)\nI0819 13:40:41.508682 17344 solver.cpp:228] Iteration 74100, loss = 0.000207477\nI0819 13:40:41.508744 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:40:41.508769 17344 solver.cpp:244]     Train net output #1: loss = 0.000207216 (* 1 = 0.000207216 loss)\nI0819 13:40:41.598642 17344 sgd_solver.cpp:166] Iteration 74100, lr = 3.5e-05\nI0819 13:42:58.911491 17344 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0819 13:44:20.350790 17344 solver.cpp:404]     Test net output #0: accuracy = 0.903641\nI0819 13:44:20.351166 17344 solver.cpp:404]     Test net output #1: loss = 0.498477 (* 1 = 0.498477 loss)\nI0819 13:44:21.664798 17344 solver.cpp:228] Iteration 74200, loss = 0.000173756\nI0819 13:44:21.664842 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:44:21.664865 17344 solver.cpp:244]     Train net output #1: loss = 0.000173495 (* 1 = 0.000173495 loss)\nI0819 13:44:21.757984 17344 sgd_solver.cpp:166] Iteration 74200, lr = 3.5e-05\nI0819 13:46:39.049404 17344 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0819 13:48:00.487967 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90724\nI0819 13:48:00.488312 17344 solver.cpp:404]     Test net output #1: loss = 0.488727 (* 1 = 0.488727 loss)\nI0819 13:48:01.798310 17344 solver.cpp:228] Iteration 74300, loss = 0.000171161\nI0819 13:48:01.798352 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:48:01.798368 17344 solver.cpp:244]     Train net output #1: loss = 0.0001709 (* 1 = 0.0001709 loss)\nI0819 13:48:01.891337 17344 sgd_solver.cpp:166] Iteration 74300, lr = 3.5e-05\nI0819 13:50:19.296500 17344 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0819 13:51:40.700548 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90384\nI0819 13:51:40.700934 17344 solver.cpp:404]     Test net output #1: loss = 0.495358 (* 1 = 0.495358 loss)\nI0819 13:51:42.011019 17344 solver.cpp:228] Iteration 74400, loss = 0.000197032\nI0819 13:51:42.011061 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:51:42.011077 17344 solver.cpp:244]     Train net output #1: loss = 0.000196771 (* 1 = 0.000196771 loss)\nI0819 13:51:42.095712 17344 sgd_solver.cpp:166] Iteration 74400, lr = 3.5e-05\nI0819 13:53:59.376989 17344 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0819 13:55:20.780879 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90708\nI0819 13:55:20.781230 17344 solver.cpp:404]     Test net output #1: loss = 0.485159 (* 1 = 0.485159 loss)\nI0819 13:55:22.090626 17344 solver.cpp:228] Iteration 74500, loss = 0.000212252\nI0819 13:55:22.090684 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:55:22.090701 17344 solver.cpp:244]     Train net output #1: loss = 0.000211991 (* 1 = 0.000211991 loss)\nI0819 13:55:22.178336 17344 sgd_solver.cpp:166] Iteration 74500, lr = 3.5e-05\nI0819 13:57:39.516034 17344 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0819 13:59:00.756871 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9034\nI0819 13:59:00.757253 17344 solver.cpp:404]     Test net output #1: loss = 0.492348 (* 1 = 0.492348 loss)\nI0819 13:59:02.066828 17344 solver.cpp:228] Iteration 74600, loss = 0.000194785\nI0819 13:59:02.066884 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:59:02.066901 17344 solver.cpp:244]     Train net output #1: loss = 0.000194523 (* 1 = 0.000194523 loss)\nI0819 13:59:02.158948 17344 sgd_solver.cpp:166] Iteration 74600, lr = 3.5e-05\nI0819 14:01:19.485882 17344 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0819 14:02:40.846961 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90696\nI0819 14:02:40.847342 17344 solver.cpp:404]     Test net output #1: loss = 0.481872 (* 1 = 0.481872 loss)\nI0819 14:02:42.157133 17344 solver.cpp:228] Iteration 74700, loss = 0.000216172\nI0819 14:02:42.157191 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:02:42.157209 17344 solver.cpp:244]     Train net output #1: loss = 0.00021591 (* 1 = 0.00021591 loss)\nI0819 14:02:42.248363 17344 sgd_solver.cpp:166] Iteration 74700, lr = 3.5e-05\nI0819 14:04:59.517293 17344 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0819 14:06:20.592428 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90344\nI0819 14:06:20.592810 17344 solver.cpp:404]     Test net output #1: loss = 0.488258 (* 1 = 0.488258 loss)\nI0819 14:06:21.901923 17344 solver.cpp:228] Iteration 74800, loss = 0.000204674\nI0819 14:06:21.901978 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:06:21.901995 17344 solver.cpp:244]     Train net output #1: loss = 0.000204412 (* 1 = 0.000204412 loss)\nI0819 14:06:21.994424 17344 sgd_solver.cpp:166] Iteration 74800, lr = 3.5e-05\nI0819 14:08:39.281998 17344 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0819 14:10:00.307807 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90672\nI0819 14:10:00.308154 17344 solver.cpp:404]     Test net output #1: loss = 0.47715 (* 1 = 0.47715 loss)\nI0819 14:10:01.617566 17344 solver.cpp:228] Iteration 74900, loss = 0.000174768\nI0819 14:10:01.617622 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:10:01.617640 17344 solver.cpp:244]     Train net output #1: loss = 0.000174507 (* 1 = 0.000174507 loss)\nI0819 14:10:01.711141 17344 sgd_solver.cpp:166] Iteration 74900, lr = 3.5e-05\nI0819 14:12:19.037245 17344 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0819 14:13:40.377955 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90308\nI0819 14:13:40.378331 17344 solver.cpp:404]     Test net output #1: loss = 0.485376 (* 1 = 0.485376 loss)\nI0819 14:13:41.688485 17344 solver.cpp:228] Iteration 75000, loss = 0.000186565\nI0819 14:13:41.688540 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:13:41.688557 17344 solver.cpp:244]     Train net output #1: loss = 0.000186304 (* 1 = 0.000186304 loss)\nI0819 14:13:41.780778 17344 sgd_solver.cpp:166] Iteration 75000, lr = 3.5e-05\nI0819 14:15:59.029191 17344 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0819 14:17:20.449733 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90592\nI0819 14:17:20.450119 17344 solver.cpp:404]     Test net output #1: loss = 0.474179 (* 1 = 0.474179 loss)\nI0819 14:17:21.760128 17344 solver.cpp:228] Iteration 75100, loss = 0.000157057\nI0819 14:17:21.760179 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:17:21.760197 17344 solver.cpp:244]     Train net output #1: loss = 0.000156795 (* 1 = 0.000156795 loss)\nI0819 14:17:21.851433 17344 sgd_solver.cpp:166] Iteration 75100, lr = 3.5e-05\nI0819 14:19:39.168397 17344 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0819 14:21:00.576040 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90312\nI0819 14:21:00.576438 17344 solver.cpp:404]     Test net output #1: loss = 0.481131 (* 1 = 0.481131 loss)\nI0819 14:21:01.884915 17344 solver.cpp:228] Iteration 75200, loss = 0.000189472\nI0819 14:21:01.884968 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:21:01.884985 17344 solver.cpp:244]     Train net output #1: loss = 0.000189211 (* 1 = 0.000189211 loss)\nI0819 14:21:01.978683 17344 sgd_solver.cpp:166] Iteration 75200, lr = 3.5e-05\nI0819 14:23:19.281836 17344 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0819 14:24:40.696157 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90464\nI0819 14:24:40.696560 17344 solver.cpp:404]     Test net output #1: loss = 0.471698 (* 1 = 0.471698 loss)\nI0819 14:24:42.005578 17344 solver.cpp:228] Iteration 75300, loss = 0.000166463\nI0819 14:24:42.005632 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:24:42.005650 17344 solver.cpp:244]     Train net output #1: loss = 0.000166202 (* 1 = 0.000166202 loss)\nI0819 14:24:42.099463 17344 sgd_solver.cpp:166] Iteration 75300, lr = 3.5e-05\nI0819 14:26:59.422588 17344 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0819 14:28:20.849895 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90188\nI0819 14:28:20.850239 17344 solver.cpp:404]     Test net output #1: loss = 0.479475 (* 1 = 0.479475 loss)\nI0819 14:28:22.160281 17344 solver.cpp:228] Iteration 75400, loss = 0.000168124\nI0819 14:28:22.160337 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:28:22.160354 17344 solver.cpp:244]     Train net output #1: loss = 0.000167863 (* 1 = 0.000167863 loss)\nI0819 14:28:22.253020 17344 sgd_solver.cpp:166] Iteration 75400, lr = 3.5e-05\nI0819 14:30:39.532269 17344 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0819 14:32:00.966182 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90356\nI0819 14:32:00.966589 17344 solver.cpp:404]     Test net output #1: loss = 0.470346 (* 1 = 0.470346 loss)\nI0819 14:32:02.276023 17344 solver.cpp:228] Iteration 75500, loss = 0.000149763\nI0819 14:32:02.276077 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:32:02.276094 17344 solver.cpp:244]     Train net output #1: loss = 0.000149502 (* 1 = 0.000149502 loss)\nI0819 14:32:02.368253 17344 sgd_solver.cpp:166] Iteration 75500, lr = 3.5e-05\nI0819 14:34:19.655726 17344 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0819 14:35:41.091115 17344 solver.cpp:404]     Test net output #0: accuracy = 0.9006\nI0819 14:35:41.091516 17344 solver.cpp:404]     Test net output #1: loss = 0.479373 (* 1 = 0.479373 loss)\nI0819 14:35:42.400475 17344 solver.cpp:228] Iteration 75600, loss = 0.000197383\nI0819 14:35:42.400532 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:35:42.400548 17344 solver.cpp:244]     Train net output #1: loss = 0.000197122 (* 1 = 0.000197122 loss)\nI0819 14:35:42.496940 17344 sgd_solver.cpp:166] Iteration 75600, lr = 3.5e-05\nI0819 14:37:59.878119 17344 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0819 14:39:21.313901 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90204\nI0819 14:39:21.314293 17344 solver.cpp:404]     Test net output #1: loss = 0.469605 (* 1 = 0.469605 loss)\nI0819 14:39:22.625113 17344 solver.cpp:228] Iteration 75700, loss = 0.000204255\nI0819 14:39:22.625171 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:39:22.625190 17344 solver.cpp:244]     Train net output #1: loss = 0.000203994 (* 1 = 0.000203994 loss)\nI0819 14:39:22.711756 17344 sgd_solver.cpp:166] Iteration 75700, lr = 3.5e-05\nI0819 14:41:40.003516 17344 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0819 14:43:01.421882 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89952\nI0819 14:43:01.422269 17344 solver.cpp:404]     Test net output #1: loss = 0.477845 (* 1 = 0.477845 loss)\nI0819 14:43:02.733119 17344 solver.cpp:228] Iteration 75800, loss = 0.000176269\nI0819 14:43:02.733175 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:43:02.733199 17344 solver.cpp:244]     Train net output #1: loss = 0.000176007 (* 1 = 0.000176007 loss)\nI0819 14:43:02.826712 17344 sgd_solver.cpp:166] Iteration 75800, lr = 3.5e-05\nI0819 14:45:20.197579 17344 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0819 14:46:41.627053 17344 solver.cpp:404]     Test net output #0: accuracy = 0.90196\nI0819 14:46:41.627463 17344 solver.cpp:404]     Test net output #1: loss = 0.469414 (* 1 = 0.469414 loss)\nI0819 14:46:42.938150 17344 solver.cpp:228] Iteration 75900, loss = 0.000193809\nI0819 14:46:42.938215 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:46:42.938241 17344 solver.cpp:244]     Train net output #1: loss = 0.000193548 (* 1 = 0.000193548 loss)\nI0819 14:46:43.036646 17344 sgd_solver.cpp:166] Iteration 75900, lr = 3.5e-05\nI0819 14:49:00.405597 17344 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0819 14:50:21.832213 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89812\nI0819 14:50:21.832612 17344 solver.cpp:404]     Test net output #1: loss = 0.476895 (* 1 = 0.476895 loss)\nI0819 14:50:23.143124 17344 solver.cpp:228] Iteration 76000, loss = 0.00022894\nI0819 14:50:23.143185 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:50:23.143210 17344 solver.cpp:244]     Train net output #1: loss = 0.000228679 (* 1 = 0.000228679 loss)\nI0819 14:50:23.238276 17344 sgd_solver.cpp:166] Iteration 76000, lr = 3.5e-05\nI0819 14:52:40.525362 17344 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0819 14:54:01.923672 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89888\nI0819 14:54:01.924072 17344 solver.cpp:404]     Test net output #1: loss = 0.47134 (* 1 = 0.47134 loss)\nI0819 14:54:03.232961 17344 solver.cpp:228] Iteration 76100, loss = 0.000200678\nI0819 14:54:03.233021 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:54:03.233038 17344 solver.cpp:244]     Train net output #1: loss = 0.000200417 (* 1 = 0.000200417 loss)\nI0819 14:54:03.328403 17344 sgd_solver.cpp:166] Iteration 76100, lr = 3.5e-05\nI0819 14:56:20.708022 17344 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0819 14:57:42.119170 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8948\nI0819 14:57:42.119550 17344 solver.cpp:404]     Test net output #1: loss = 0.480159 (* 1 = 0.480159 loss)\nI0819 14:57:43.428455 17344 solver.cpp:228] Iteration 76200, loss = 0.000187403\nI0819 14:57:43.428515 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:57:43.428534 17344 solver.cpp:244]     Train net output #1: loss = 0.000187142 (* 1 = 0.000187142 loss)\nI0819 14:57:43.519672 17344 sgd_solver.cpp:166] Iteration 76200, lr = 3.5e-05\nI0819 15:00:00.812268 17344 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0819 15:01:22.221397 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89572\nI0819 15:01:22.221786 17344 solver.cpp:404]     Test net output #1: loss = 0.475728 (* 1 = 0.475728 loss)\nI0819 15:01:23.531625 17344 solver.cpp:228] Iteration 76300, loss = 0.000173455\nI0819 15:01:23.531684 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:01:23.531702 17344 solver.cpp:244]     Train net output #1: loss = 0.000173194 (* 1 = 0.000173194 loss)\nI0819 15:01:23.629169 17344 sgd_solver.cpp:166] Iteration 76300, lr = 3.5e-05\nI0819 15:03:40.989491 17344 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0819 15:05:02.398730 17344 solver.cpp:404]     Test net output #0: accuracy = 0.89176\nI0819 15:05:02.399108 17344 solver.cpp:404]     Test net output #1: loss = 0.484669 (* 1 = 0.484669 loss)\nI0819 15:05:03.708611 17344 solver.cpp:228] Iteration 76400, loss = 0.000185055\nI0819 15:05:03.708670 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:05:03.708688 17344 solver.cpp:244]     Train net output #1: loss = 0.000184794 (* 1 = 0.000184794 loss)\nI0819 15:05:03.801638 17344 sgd_solver.cpp:166] Iteration 76400, lr = 3.5e-05\nI0819 15:07:21.103538 17344 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0819 15:08:42.521575 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8926\nI0819 15:08:42.521951 17344 solver.cpp:404]     Test net output #1: loss = 0.481892 (* 1 = 0.481892 loss)\nI0819 15:08:43.831337 17344 solver.cpp:228] Iteration 76500, loss = 0.000180401\nI0819 15:08:43.831398 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:08:43.831416 17344 solver.cpp:244]     Train net output #1: loss = 0.00018014 (* 1 = 0.00018014 loss)\nI0819 15:08:43.918463 17344 sgd_solver.cpp:166] Iteration 76500, lr = 3.5e-05\nI0819 15:11:01.279778 17344 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0819 15:12:22.677989 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88932\nI0819 15:12:22.678375 17344 solver.cpp:404]     Test net output #1: loss = 0.495181 (* 1 = 0.495181 loss)\nI0819 15:12:23.987226 17344 solver.cpp:228] Iteration 76600, loss = 0.000201583\nI0819 15:12:23.987285 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:12:23.987303 17344 solver.cpp:244]     Train net output #1: loss = 0.000201322 (* 1 = 0.000201322 loss)\nI0819 15:12:24.083874 17344 sgd_solver.cpp:166] Iteration 76600, lr = 3.5e-05\nI0819 15:14:41.428408 17344 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0819 15:16:02.844333 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88856\nI0819 15:16:02.844712 17344 solver.cpp:404]     Test net output #1: loss = 0.494716 (* 1 = 0.494716 loss)\nI0819 15:16:04.153985 17344 solver.cpp:228] Iteration 76700, loss = 0.00019863\nI0819 15:16:04.154042 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:16:04.154060 17344 solver.cpp:244]     Train net output #1: loss = 0.000198369 (* 1 = 0.000198369 loss)\nI0819 15:16:04.261162 17344 sgd_solver.cpp:166] Iteration 76700, lr = 3.5e-05\nI0819 15:18:21.600332 17344 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0819 15:19:43.013455 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88372\nI0819 15:19:43.013829 17344 solver.cpp:404]     Test net output #1: loss = 0.509805 (* 1 = 0.509805 loss)\nI0819 15:19:44.323247 17344 solver.cpp:228] Iteration 76800, loss = 0.000233101\nI0819 15:19:44.323307 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:19:44.323324 17344 solver.cpp:244]     Train net output #1: loss = 0.00023284 (* 1 = 0.00023284 loss)\nI0819 15:19:44.418323 17344 sgd_solver.cpp:166] Iteration 76800, lr = 3.5e-05\nI0819 15:22:01.705750 17344 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0819 15:23:23.082711 17344 solver.cpp:404]     Test net output #0: accuracy = 0.88444\nI0819 15:23:23.083075 17344 solver.cpp:404]     Test net output #1: loss = 0.503758 (* 1 = 0.503758 loss)\nI0819 15:23:24.392385 17344 solver.cpp:228] Iteration 76900, loss = 0.000196285\nI0819 15:23:24.392446 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:23:24.392464 17344 solver.cpp:244]     Train net output #1: loss = 0.000196023 (* 1 = 0.000196023 loss)\nI0819 15:23:24.485134 17344 sgd_solver.cpp:166] Iteration 76900, lr = 3.5e-05\nI0819 15:25:41.806932 17344 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0819 15:27:03.184712 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0819 15:27:03.184989 17344 solver.cpp:404]     Test net output #1: loss = 0.518384 (* 1 = 0.518384 loss)\nI0819 15:27:04.494215 17344 solver.cpp:228] Iteration 77000, loss = 0.000227052\nI0819 15:27:04.494276 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:27:04.494293 17344 solver.cpp:244]     Train net output #1: loss = 0.00022679 (* 1 = 0.00022679 loss)\nI0819 15:27:04.586802 17344 sgd_solver.cpp:166] Iteration 77000, lr = 3.5e-05\nI0819 15:29:21.837621 17344 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0819 15:30:43.233772 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0819 15:30:43.234078 17344 solver.cpp:404]     Test net output #1: loss = 0.521531 (* 1 = 0.521531 loss)\nI0819 15:30:44.543169 17344 solver.cpp:228] Iteration 77100, loss = 0.000185036\nI0819 15:30:44.543231 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:30:44.543249 17344 solver.cpp:244]     Train net output #1: loss = 0.000184775 (* 1 = 0.000184775 loss)\nI0819 15:30:44.638463 17344 sgd_solver.cpp:166] Iteration 77100, lr = 3.5e-05\nI0819 15:33:02.054239 17344 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0819 15:34:23.441598 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87192\nI0819 15:34:23.441897 17344 solver.cpp:404]     Test net output #1: loss = 0.53772 (* 1 = 0.53772 loss)\nI0819 15:34:24.750892 17344 solver.cpp:228] Iteration 77200, loss = 0.000208686\nI0819 15:34:24.750946 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:34:24.750962 17344 solver.cpp:244]     Train net output #1: loss = 0.000208425 (* 1 = 0.000208425 loss)\nI0819 15:34:24.846179 17344 sgd_solver.cpp:166] Iteration 77200, lr = 3.5e-05\nI0819 15:36:42.156270 17344 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0819 15:38:03.538839 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86948\nI0819 15:38:03.539141 17344 solver.cpp:404]     Test net output #1: loss = 0.550741 (* 1 = 0.550741 loss)\nI0819 15:38:04.848501 17344 solver.cpp:228] Iteration 77300, loss = 0.000185755\nI0819 15:38:04.848556 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:38:04.848573 17344 solver.cpp:244]     Train net output #1: loss = 0.000185494 (* 1 = 0.000185494 loss)\nI0819 15:38:04.944074 17344 sgd_solver.cpp:166] Iteration 77300, lr = 3.5e-05\nI0819 15:40:22.277304 17344 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0819 15:41:43.510622 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86324\nI0819 15:41:43.510938 17344 solver.cpp:404]     Test net output #1: loss = 0.576453 (* 1 = 0.576453 loss)\nI0819 15:41:44.819799 17344 solver.cpp:228] Iteration 77400, loss = 0.000215399\nI0819 15:41:44.819859 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:41:44.819878 17344 solver.cpp:244]     Train net output #1: loss = 0.000215138 (* 1 = 0.000215138 loss)\nI0819 15:41:44.912370 17344 sgd_solver.cpp:166] Iteration 77400, lr = 3.5e-05\nI0819 15:44:02.233430 17344 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0819 15:45:23.616129 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8598\nI0819 15:45:23.616405 17344 solver.cpp:404]     Test net output #1: loss = 0.583335 (* 1 = 0.583335 loss)\nI0819 15:45:24.926414 17344 solver.cpp:228] Iteration 77500, loss = 0.000189891\nI0819 15:45:24.926470 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:45:24.926486 17344 solver.cpp:244]     Train net output #1: loss = 0.00018963 (* 1 = 0.00018963 loss)\nI0819 15:45:25.024435 17344 sgd_solver.cpp:166] Iteration 77500, lr = 3.5e-05\nI0819 15:47:42.469580 17344 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0819 15:49:03.864482 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85088\nI0819 15:49:03.864760 17344 solver.cpp:404]     Test net output #1: loss = 0.612937 (* 1 = 0.612937 loss)\nI0819 15:49:05.173671 17344 solver.cpp:228] Iteration 77600, loss = 0.00019003\nI0819 15:49:05.173728 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:49:05.173746 17344 solver.cpp:244]     Train net output #1: loss = 0.000189768 (* 1 = 0.000189768 loss)\nI0819 15:49:05.269093 17344 sgd_solver.cpp:166] Iteration 77600, lr = 3.5e-05\nI0819 15:51:22.610283 17344 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0819 15:52:43.982477 17344 solver.cpp:404]     Test net output #0: accuracy = 0.84404\nI0819 15:52:43.982836 17344 solver.cpp:404]     Test net output #1: loss = 0.632071 (* 1 = 0.632071 loss)\nI0819 15:52:45.293172 17344 solver.cpp:228] Iteration 77700, loss = 0.000202427\nI0819 15:52:45.293229 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:52:45.293248 17344 solver.cpp:244]     Train net output #1: loss = 0.000202166 (* 1 = 0.000202166 loss)\nI0819 15:52:45.383841 17344 sgd_solver.cpp:166] Iteration 77700, lr = 3.5e-05\nI0819 15:55:02.687266 17344 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0819 15:56:24.047948 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83184\nI0819 15:56:24.048260 17344 solver.cpp:404]     Test net output #1: loss = 0.672316 (* 1 = 0.672316 loss)\nI0819 15:56:25.358309 17344 solver.cpp:228] Iteration 77800, loss = 0.000193142\nI0819 15:56:25.358366 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:56:25.358383 17344 solver.cpp:244]     Train net output #1: loss = 0.000192881 (* 1 = 0.000192881 loss)\nI0819 15:56:25.449853 17344 sgd_solver.cpp:166] Iteration 77800, lr = 3.5e-05\nI0819 15:58:42.250716 17344 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0819 16:00:03.447541 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82272\nI0819 16:00:03.447854 17344 solver.cpp:404]     Test net output #1: loss = 0.705793 (* 1 = 0.705793 loss)\nI0819 16:00:04.758440 17344 solver.cpp:228] Iteration 77900, loss = 0.000205159\nI0819 16:00:04.758496 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:00:04.758513 17344 solver.cpp:244]     Train net output #1: loss = 0.000204898 (* 1 = 0.000204898 loss)\nI0819 16:00:04.853039 17344 sgd_solver.cpp:166] Iteration 77900, lr = 3.5e-05\nI0819 16:02:21.665805 17344 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0819 16:03:43.038439 17344 solver.cpp:404]     Test net output #0: accuracy = 0.811\nI0819 16:03:43.038758 17344 solver.cpp:404]     Test net output #1: loss = 0.738609 (* 1 = 0.738609 loss)\nI0819 16:03:44.347800 17344 solver.cpp:228] Iteration 78000, loss = 0.000205862\nI0819 16:03:44.347860 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:03:44.347879 17344 solver.cpp:244]     Train net output #1: loss = 0.000205601 (* 1 = 0.000205601 loss)\nI0819 16:03:44.444881 17344 sgd_solver.cpp:166] Iteration 78000, lr = 3.5e-05\nI0819 16:06:01.214784 17344 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0819 16:07:22.472333 17344 solver.cpp:404]     Test net output #0: accuracy = 0.80012\nI0819 16:07:22.472637 17344 solver.cpp:404]     Test net output #1: loss = 0.775824 (* 1 = 0.775824 loss)\nI0819 16:07:23.782611 17344 solver.cpp:228] Iteration 78100, loss = 0.000193357\nI0819 16:07:23.782670 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:07:23.782688 17344 solver.cpp:244]     Train net output #1: loss = 0.000193096 (* 1 = 0.000193096 loss)\nI0819 16:07:23.877946 17344 sgd_solver.cpp:166] Iteration 78100, lr = 3.5e-05\nI0819 16:09:40.693922 17344 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0819 16:11:02.031167 17344 solver.cpp:404]     Test net output #0: accuracy = 0.78448\nI0819 16:11:02.031491 17344 solver.cpp:404]     Test net output #1: loss = 0.833954 (* 1 = 0.833954 loss)\nI0819 16:11:03.340286 17344 solver.cpp:228] Iteration 78200, loss = 0.000195466\nI0819 16:11:03.340344 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:11:03.340363 17344 solver.cpp:244]     Train net output #1: loss = 0.000195205 (* 1 = 0.000195205 loss)\nI0819 16:11:03.433637 17344 sgd_solver.cpp:166] Iteration 78200, lr = 3.5e-05\nI0819 16:13:20.546262 17344 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0819 16:14:41.808658 17344 solver.cpp:404]     Test net output #0: accuracy = 0.76588\nI0819 16:14:41.808936 17344 solver.cpp:404]     Test net output #1: loss = 0.89254 (* 1 = 0.89254 loss)\nI0819 16:14:43.118062 17344 solver.cpp:228] Iteration 78300, loss = 0.000197825\nI0819 16:14:43.118121 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:14:43.118139 17344 solver.cpp:244]     Train net output #1: loss = 0.000197564 (* 1 = 0.000197564 loss)\nI0819 16:14:43.210682 17344 sgd_solver.cpp:166] Iteration 78300, lr = 3.5e-05\nI0819 16:17:00.147734 17344 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0819 16:18:21.562893 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74156\nI0819 16:18:21.563191 17344 solver.cpp:404]     Test net output #1: loss = 0.965411 (* 1 = 0.965411 loss)\nI0819 16:18:22.872689 17344 solver.cpp:228] Iteration 78400, loss = 0.000197491\nI0819 16:18:22.872745 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:18:22.872762 17344 solver.cpp:244]     Train net output #1: loss = 0.00019723 (* 1 = 0.00019723 loss)\nI0819 16:18:22.966312 17344 sgd_solver.cpp:166] Iteration 78400, lr = 3.5e-05\nI0819 16:20:39.933830 17344 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0819 16:22:01.332515 17344 solver.cpp:404]     Test net output #0: accuracy = 0.7162\nI0819 16:22:01.332804 17344 solver.cpp:404]     Test net output #1: loss = 1.0431 (* 1 = 1.0431 loss)\nI0819 16:22:02.643760 17344 solver.cpp:228] Iteration 78500, loss = 0.000190336\nI0819 16:22:02.643816 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:22:02.643834 17344 solver.cpp:244]     Train net output #1: loss = 0.000190075 (* 1 = 0.000190075 loss)\nI0819 16:22:02.740598 17344 sgd_solver.cpp:166] Iteration 78500, lr = 3.5e-05\nI0819 16:24:19.798774 17344 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0819 16:25:41.204120 17344 solver.cpp:404]     Test net output #0: accuracy = 0.67236\nI0819 16:25:41.204391 17344 solver.cpp:404]     Test net output #1: loss = 1.18139 (* 1 = 1.18139 loss)\nI0819 16:25:42.513375 17344 solver.cpp:228] Iteration 78600, loss = 0.000186028\nI0819 16:25:42.513437 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:25:42.513454 17344 solver.cpp:244]     Train net output #1: loss = 0.000185766 (* 1 = 0.000185766 loss)\nI0819 16:25:42.605207 17344 sgd_solver.cpp:166] Iteration 78600, lr = 3.5e-05\nI0819 16:27:59.583523 17344 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0819 16:29:21.004984 17344 solver.cpp:404]     Test net output #0: accuracy = 0.64364\nI0819 16:29:21.005287 17344 solver.cpp:404]     Test net output #1: loss = 1.29074 (* 1 = 1.29074 loss)\nI0819 16:29:22.314155 17344 solver.cpp:228] Iteration 78700, loss = 0.000196424\nI0819 16:29:22.314218 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:29:22.314235 17344 solver.cpp:244]     Train net output #1: loss = 0.000196162 (* 1 = 0.000196162 loss)\nI0819 16:29:22.411147 17344 sgd_solver.cpp:166] Iteration 78700, lr = 3.5e-05\nI0819 16:31:39.377796 17344 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0819 16:33:00.779219 17344 solver.cpp:404]     Test net output #0: accuracy = 0.62456\nI0819 16:33:00.779582 17344 solver.cpp:404]     Test net output #1: loss = 1.34472 (* 1 = 1.34472 loss)\nI0819 16:33:02.089851 17344 solver.cpp:228] Iteration 78800, loss = 0.00019518\nI0819 16:33:02.089913 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:33:02.089931 17344 solver.cpp:244]     Train net output #1: loss = 0.000194919 (* 1 = 0.000194919 loss)\nI0819 16:33:02.187911 17344 sgd_solver.cpp:166] Iteration 78800, lr = 3.5e-05\nI0819 16:35:19.170053 17344 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0819 16:36:40.496526 17344 solver.cpp:404]     Test net output #0: accuracy = 0.50436\nI0819 16:36:40.496785 17344 solver.cpp:404]     Test net output #1: loss = 2.26218 (* 1 = 2.26218 loss)\nI0819 16:36:41.807271 17344 solver.cpp:228] Iteration 78900, loss = 0.00286764\nI0819 16:36:41.807332 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:36:41.807353 17344 solver.cpp:244]     Train net output #1: loss = 0.00286738 (* 1 = 0.00286738 loss)\nI0819 16:36:41.911051 17344 sgd_solver.cpp:166] Iteration 78900, lr = 3.5e-05\nI0819 16:38:58.918159 17344 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0819 16:40:20.175704 17344 solver.cpp:404]     Test net output #0: accuracy = 0.65612\nI0819 16:40:20.175977 17344 solver.cpp:404]     Test net output #1: loss = 1.48244 (* 1 = 1.48244 loss)\nI0819 16:40:21.486860 17344 solver.cpp:228] Iteration 79000, loss = 0.000589089\nI0819 16:40:21.486919 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:40:21.486938 17344 solver.cpp:244]     Train net output #1: loss = 0.000588827 (* 1 = 0.000588827 loss)\nI0819 16:40:21.576491 17344 sgd_solver.cpp:166] Iteration 79000, lr = 3.5e-05\nI0819 16:42:38.740082 17344 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0819 16:43:59.914650 17344 solver.cpp:404]     Test net output #0: accuracy = 0.6974\nI0819 16:43:59.914976 17344 solver.cpp:404]     Test net output #1: loss = 1.27567 (* 1 = 1.27567 loss)\nI0819 16:44:01.224380 17344 solver.cpp:228] Iteration 79100, loss = 0.000339679\nI0819 16:44:01.224442 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:44:01.224460 17344 solver.cpp:244]     Train net output #1: loss = 0.000339418 (* 1 = 0.000339418 loss)\nI0819 16:44:01.315702 17344 sgd_solver.cpp:166] Iteration 79100, lr = 3.5e-05\nI0819 16:46:18.423503 17344 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0819 16:47:39.385327 17344 solver.cpp:404]     Test net output #0: accuracy = 0.74216\nI0819 16:47:39.385632 17344 solver.cpp:404]     Test net output #1: loss = 1.12065 (* 1 = 1.12065 loss)\nI0819 16:47:40.695686 17344 solver.cpp:228] Iteration 79200, loss = 0.000353178\nI0819 16:47:40.695747 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:47:40.695765 17344 solver.cpp:244]     Train net output #1: loss = 0.000352917 (* 1 = 0.000352917 loss)\nI0819 16:47:40.785548 17344 sgd_solver.cpp:166] Iteration 79200, lr = 3.5e-05\nI0819 16:49:57.719810 17344 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0819 16:51:18.775954 17344 solver.cpp:404]     Test net output #0: accuracy = 0.77504\nI0819 16:51:18.776278 17344 solver.cpp:404]     Test net output #1: loss = 0.991264 (* 1 = 0.991264 loss)\nI0819 16:51:20.086160 17344 solver.cpp:228] Iteration 79300, loss = 0.000303649\nI0819 16:51:20.086225 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:51:20.086242 17344 solver.cpp:244]     Train net output #1: loss = 0.000303389 (* 1 = 0.000303389 loss)\nI0819 16:51:20.177001 17344 sgd_solver.cpp:166] Iteration 79300, lr = 3.5e-05\nI0819 16:53:37.143355 17344 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0819 16:54:58.256484 17344 solver.cpp:404]     Test net output #0: accuracy = 0.79632\nI0819 16:54:58.256757 17344 solver.cpp:404]     Test net output #1: loss = 0.91453 (* 1 = 0.91453 loss)\nI0819 16:54:59.566483 17344 solver.cpp:228] Iteration 79400, loss = 0.000303688\nI0819 16:54:59.566543 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:54:59.566561 17344 solver.cpp:244]     Train net output #1: loss = 0.000303428 (* 1 = 0.000303428 loss)\nI0819 16:54:59.661713 17344 sgd_solver.cpp:166] Iteration 79400, lr = 3.5e-05\nI0819 16:57:16.794765 17344 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0819 16:58:37.940609 17344 solver.cpp:404]     Test net output #0: accuracy = 0.82488\nI0819 16:58:37.940915 17344 solver.cpp:404]     Test net output #1: loss = 0.820244 (* 1 = 0.820244 loss)\nI0819 16:58:39.249907 17344 solver.cpp:228] Iteration 79500, loss = 0.000294829\nI0819 16:58:39.249970 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:58:39.249989 17344 solver.cpp:244]     Train net output #1: loss = 0.000294568 (* 1 = 0.000294568 loss)\nI0819 16:58:39.346956 17344 sgd_solver.cpp:166] Iteration 79500, lr = 3.5e-05\nI0819 17:00:56.396937 17344 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0819 17:02:17.446208 17344 solver.cpp:404]     Test net output #0: accuracy = 0.83848\nI0819 17:02:17.446553 17344 solver.cpp:404]     Test net output #1: loss = 0.768891 (* 1 = 0.768891 loss)\nI0819 17:02:18.755050 17344 solver.cpp:228] Iteration 79600, loss = 0.000269143\nI0819 17:02:18.755112 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:02:18.755131 17344 solver.cpp:244]     Train net output #1: loss = 0.000268883 (* 1 = 0.000268883 loss)\nI0819 17:02:18.848551 17344 sgd_solver.cpp:166] Iteration 79600, lr = 3.5e-05\nI0819 17:04:35.844094 17344 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0819 17:05:56.881253 17344 solver.cpp:404]     Test net output #0: accuracy = 0.85128\nI0819 17:05:56.881553 17344 solver.cpp:404]     Test net output #1: loss = 0.729628 (* 1 = 0.729628 loss)\nI0819 17:05:58.192322 17344 solver.cpp:228] Iteration 79700, loss = 0.000250767\nI0819 17:05:58.192384 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:05:58.192401 17344 solver.cpp:244]     Train net output #1: loss = 0.000250506 (* 1 = 0.000250506 loss)\nI0819 17:05:58.282578 17344 sgd_solver.cpp:166] Iteration 79700, lr = 3.5e-05\nI0819 17:08:15.089409 17344 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0819 17:09:36.241293 17344 solver.cpp:404]     Test net output #0: accuracy = 0.8604\nI0819 17:09:36.241603 17344 solver.cpp:404]     Test net output #1: loss = 0.695762 (* 1 = 0.695762 loss)\nI0819 17:09:37.550915 17344 solver.cpp:228] Iteration 79800, loss = 0.000262486\nI0819 17:09:37.550977 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:09:37.550995 17344 solver.cpp:244]     Train net output #1: loss = 0.000262226 (* 1 = 0.000262226 loss)\nI0819 17:09:37.648624 17344 sgd_solver.cpp:166] Iteration 79800, lr = 3.5e-05\nI0819 17:11:54.463774 17344 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0819 17:13:15.859621 17344 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0819 17:13:15.859966 17344 solver.cpp:404]     Test net output #1: loss = 0.67016 (* 1 = 0.67016 loss)\nI0819 17:13:17.168941 17344 solver.cpp:228] Iteration 79900, loss = 0.000261903\nI0819 17:13:17.169000 17344 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:13:17.169018 17344 solver.cpp:244]     Train net output #1: loss = 0.000261643 (* 1 = 0.000261643 loss)\nI0819 17:13:17.261603 17344 sgd_solver.cpp:166] Iteration 79900, lr = 3.5e-05\nI0819 17:15:34.047078 17344 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35AdamFig9_iter_80000.caffemodel\nI0819 17:15:34.476797 17344 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35AdamFig9_iter_80000.solverstate\nI0819 17:15:34.942519 17344 solver.cpp:317] Iteration 80000, loss = 0.000240354\nI0819 17:15:34.942581 17344 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0819 17:16:56.169163 17344 solver.cpp:404]     Test net output #0: accuracy = 0.87148\nI0819 17:16:56.169510 17344 solver.cpp:404]     Test net output #1: loss = 0.652749 (* 1 = 0.652749 loss)\nI0819 17:16:56.169523 17344 solver.cpp:322] Optimization Done.\nI0819 17:17:01.586102 17344 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35Cifar100Fig8",
    "content": "I0817 16:12:04.542970 17389 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:12:04.545158 17389 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:12:04.546682 17389 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:12:04.547899 17389 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:12:04.549111 17389 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:12:04.550343 17389 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:12:04.551874 17389 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:12:04.553102 17389 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:12:04.554334 17389 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:12:04.971352 17389 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35Cifar100Fig8\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\nI0817 16:12:04.974308 17389 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:12:04.990897 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:04.990972 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:04.992022 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:12:04.992077 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:12:04.992103 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:12:04.992123 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:12:04.992142 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:12:04.992161 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:12:04.992178 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:12:04.992197 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:12:04.992216 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:12:04.992234 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:12:04.992254 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:12:04.992267 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:12:04.992286 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:12:04.992305 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:12:04.992324 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:12:04.992342 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:12:04.992359 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:12:04.992377 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:12:04.992394 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:12:04.992413 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:12:04.992446 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:12:04.992465 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:12:04.992491 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:12:04.992519 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:12:04.992537 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:12:04.992552 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:12:04.992569 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:12:04.992585 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:12:04.992602 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:12:04.992619 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:12:04.992638 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:12:04.992687 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:12:04.992707 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:12:04.992725 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:12:04.992744 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:12:04.992763 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:12:04.992782 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:12:04.992799 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:12:04.992816 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:12:04.992833 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:12:04.992857 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:12:04.992874 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:12:04.992890 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:12:04.992908 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:12:04.992928 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:12:04.992944 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:12:04.992964 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:12:04.992979 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:12:04.992998 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:12:04.993012 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:12:04.993029 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:12:04.993058 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:12:04.993078 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:12:04.993098 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:12:04.993115 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:12:04.993130 17389 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:12:04.994889 17389 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar100/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar100/cifar100_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.9\nI0817 16:12:04.996956 17389 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:12:04.998152 17389 net.cpp:100] Creating Layer dataLayer\nI0817 16:12:04.998225 17389 net.cpp:408] dataLayer -> data_top\nI0817 16:12:04.998421 17389 net.cpp:408] dataLayer -> label\nI0817 16:12:04.998574 17389 data_transformer.cpp:25] Loading mean file from: examples/cifar100/mean.binaryproto\nI0817 16:12:05.033730 17394 db_lmdb.cpp:35] Opened lmdb examples/cifar100/cifar100_train_lmdb\nI0817 16:12:05.051635 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:05.058689 17389 net.cpp:150] Setting up dataLayer\nI0817 16:12:05.058750 17389 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:12:05.058763 17389 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:05.058768 17389 net.cpp:165] Memory required for data: 1536500\nI0817 16:12:05.058784 17389 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:12:05.058797 17389 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:12:05.058805 17389 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:12:05.058825 17389 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:12:05.058838 17389 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:12:05.058921 17389 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:12:05.058934 17389 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:05.058941 17389 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:05.058946 17389 net.cpp:165] Memory required for data: 1537500\nI0817 16:12:05.058953 17389 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:12:05.059011 17389 net.cpp:100] Creating Layer pre_conv\nI0817 16:12:05.059023 17389 net.cpp:434] pre_conv <- data_top\nI0817 16:12:05.059036 17389 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:12:05.060837 17389 net.cpp:150] Setting up pre_conv\nI0817 16:12:05.060858 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.060863 17389 net.cpp:165] Memory required for data: 9729500\nI0817 16:12:05.060928 17389 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:12:05.060997 17389 net.cpp:100] Creating Layer pre_bn\nI0817 16:12:05.061008 17389 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:12:05.061018 17389 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:12:05.061532 17396 blocking_queue.cpp:50] Waiting for data\nI0817 16:12:05.061568 17389 net.cpp:150] Setting up pre_bn\nI0817 16:12:05.061584 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.061589 17389 net.cpp:165] Memory required for data: 17921500\nI0817 16:12:05.061607 17389 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:12:05.061663 17389 net.cpp:100] Creating Layer pre_scale\nI0817 16:12:05.061673 17389 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:12:05.061682 17389 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:12:05.061851 17389 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:12:05.062104 17389 net.cpp:150] Setting up pre_scale\nI0817 16:12:05.062120 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.062125 17389 net.cpp:165] Memory required for data: 26113500\nI0817 16:12:05.062135 17389 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:12:05.062178 17389 net.cpp:100] Creating Layer pre_relu\nI0817 16:12:05.062187 17389 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:12:05.062198 17389 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:12:05.062209 17389 net.cpp:150] Setting up pre_relu\nI0817 16:12:05.062217 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.062222 17389 net.cpp:165] Memory required for data: 34305500\nI0817 16:12:05.062227 17389 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:12:05.062235 17389 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:12:05.062243 17389 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:12:05.062252 17389 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:12:05.062260 17389 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:12:05.062309 17389 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:12:05.062321 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.062328 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.062333 17389 net.cpp:165] Memory required for data: 50689500\nI0817 16:12:05.062338 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:12:05.062350 17389 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:12:05.062356 17389 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:12:05.062368 17389 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:12:05.062683 17389 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:12:05.062700 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.062705 17389 net.cpp:165] Memory required for data: 58881500\nI0817 16:12:05.062716 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:12:05.062732 17389 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:12:05.062739 17389 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:12:05.062750 17389 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:12:05.062986 17389 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:12:05.063000 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.063005 17389 net.cpp:165] Memory required for data: 67073500\nI0817 16:12:05.063016 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:12:05.063025 17389 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:12:05.063031 17389 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:12:05.063040 17389 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.063092 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:12:05.063230 17389 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:12:05.063243 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.063248 17389 net.cpp:165] Memory required for data: 75265500\nI0817 16:12:05.063257 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:12:05.063273 17389 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:12:05.063279 17389 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:12:05.063292 17389 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.063302 17389 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:12:05.063308 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.063313 17389 net.cpp:165] Memory required for data: 83457500\nI0817 16:12:05.063318 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:12:05.063333 17389 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:12:05.063338 17389 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:12:05.063346 17389 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:12:05.063676 17389 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:12:05.063691 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.063697 17389 net.cpp:165] Memory required for data: 91649500\nI0817 16:12:05.063706 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:12:05.063716 17389 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:12:05.063721 17389 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:12:05.063733 17389 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:12:05.063968 17389 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:12:05.063984 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.063989 17389 net.cpp:165] Memory required for data: 99841500\nI0817 16:12:05.064003 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:12:05.064013 17389 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:12:05.064018 17389 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:12:05.064026 17389 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:12:05.064082 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:12:05.064221 17389 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:12:05.064234 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.064239 17389 net.cpp:165] Memory required for data: 108033500\nI0817 16:12:05.064249 17389 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:12:05.064301 17389 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:12:05.064313 17389 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:12:05.064321 17389 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:12:05.064329 17389 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:12:05.064402 17389 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:12:05.064416 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.064421 17389 net.cpp:165] Memory required for data: 116225500\nI0817 16:12:05.064427 17389 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:12:05.064436 17389 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:12:05.064441 17389 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:12:05.064450 17389 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:12:05.064458 17389 net.cpp:150] Setting up L1_b1_relu\nI0817 16:12:05.064466 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.064471 17389 net.cpp:165] Memory required for data: 124417500\nI0817 16:12:05.064476 17389 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:05.064484 17389 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:05.064489 17389 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:12:05.064517 17389 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:12:05.064529 17389 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:12:05.064573 17389 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:05.064582 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.064589 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.064600 17389 net.cpp:165] Memory required for data: 140801500\nI0817 16:12:05.064606 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:12:05.064620 17389 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:12:05.064627 17389 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:12:05.064636 17389 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:12:05.064946 17389 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:12:05.064960 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.064965 17389 net.cpp:165] Memory required for data: 148993500\nI0817 16:12:05.064975 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:12:05.064987 17389 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:12:05.064995 17389 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:12:05.065004 17389 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:12:05.065243 17389 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:12:05.065258 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.065263 17389 net.cpp:165] Memory required for data: 157185500\nI0817 16:12:05.065273 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:12:05.065281 17389 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:12:05.065286 17389 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:12:05.065297 17389 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.065349 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:12:05.065488 17389 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:12:05.065508 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.065513 17389 net.cpp:165] Memory required for data: 165377500\nI0817 16:12:05.065522 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:12:05.065531 17389 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:12:05.065536 17389 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:12:05.065544 17389 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.065553 17389 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:12:05.065560 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.065564 17389 net.cpp:165] Memory required for data: 173569500\nI0817 16:12:05.065569 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:12:05.065583 17389 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:12:05.065589 17389 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:12:05.065600 17389 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:12:05.065903 17389 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:12:05.065917 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.065922 17389 net.cpp:165] Memory required for data: 181761500\nI0817 16:12:05.065932 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:12:05.065943 17389 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:12:05.065948 17389 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:12:05.065961 17389 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:12:05.066195 17389 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:12:05.066210 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.066215 17389 net.cpp:165] Memory required for data: 189953500\nI0817 16:12:05.066229 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:12:05.066241 17389 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:12:05.066248 17389 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:12:05.066258 17389 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:12:05.066311 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:12:05.066449 17389 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:12:05.066462 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.066468 17389 net.cpp:165] Memory required for data: 198145500\nI0817 16:12:05.066476 17389 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:12:05.066501 17389 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:12:05.066509 17389 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:12:05.066516 17389 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:12:05.066524 17389 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:12:05.066560 17389 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:12:05.066570 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.066576 17389 net.cpp:165] Memory required for data: 206337500\nI0817 16:12:05.066581 17389 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:12:05.066587 17389 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:12:05.066593 17389 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:12:05.066603 17389 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:12:05.066612 17389 net.cpp:150] Setting up L1_b2_relu\nI0817 16:12:05.066619 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.066623 17389 net.cpp:165] Memory required for data: 214529500\nI0817 16:12:05.066628 17389 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:05.066635 17389 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:05.066642 17389 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:12:05.066648 17389 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:12:05.066658 17389 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:12:05.066700 17389 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:05.066712 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.066720 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.066725 17389 net.cpp:165] Memory required for data: 230913500\nI0817 16:12:05.066730 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:12:05.066740 17389 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:12:05.066745 17389 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:12:05.066757 17389 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:12:05.067059 17389 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:12:05.067073 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.067078 17389 net.cpp:165] Memory required for data: 239105500\nI0817 16:12:05.067087 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:12:05.067096 17389 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:12:05.067102 17389 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:12:05.067113 17389 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:12:05.067350 17389 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:12:05.067363 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.067368 17389 net.cpp:165] Memory required for data: 247297500\nI0817 16:12:05.067379 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:12:05.067390 17389 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:12:05.067396 17389 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:12:05.067404 17389 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.067456 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:12:05.067602 17389 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:12:05.067615 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.067620 17389 net.cpp:165] Memory required for data: 255489500\nI0817 16:12:05.067629 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:12:05.067641 17389 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:12:05.067646 17389 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:12:05.067654 17389 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.067663 17389 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:12:05.067677 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.067682 17389 net.cpp:165] Memory required for data: 263681500\nI0817 16:12:05.067687 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:12:05.067701 17389 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:12:05.067706 17389 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:12:05.067718 17389 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:12:05.068024 17389 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:12:05.068038 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.068043 17389 net.cpp:165] Memory required for data: 271873500\nI0817 16:12:05.068053 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:12:05.068066 17389 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:12:05.068073 17389 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:12:05.068081 17389 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:12:05.068315 17389 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:12:05.068330 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.068336 17389 net.cpp:165] Memory required for data: 280065500\nI0817 16:12:05.068346 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:12:05.068354 17389 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:12:05.068361 17389 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:12:05.068368 17389 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:12:05.068420 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:12:05.068564 17389 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:12:05.068578 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.068583 17389 net.cpp:165] Memory required for data: 288257500\nI0817 16:12:05.068593 17389 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:12:05.068604 17389 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:12:05.068611 17389 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:12:05.068619 17389 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:12:05.068650 17389 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:12:05.068687 17389 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:12:05.068699 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.068704 17389 net.cpp:165] Memory required for data: 296449500\nI0817 16:12:05.068712 17389 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:12:05.068723 17389 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:12:05.068729 17389 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:12:05.068737 17389 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:12:05.068745 17389 net.cpp:150] Setting up L1_b3_relu\nI0817 16:12:05.068753 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.068758 17389 net.cpp:165] Memory required for data: 304641500\nI0817 16:12:05.068763 17389 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:05.068769 17389 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:05.068774 17389 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:12:05.068783 17389 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:12:05.068791 17389 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:12:05.068837 17389 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:05.068850 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.068856 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.068861 17389 net.cpp:165] Memory required for data: 321025500\nI0817 16:12:05.068866 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:12:05.068879 17389 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:12:05.068886 17389 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:12:05.068902 17389 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:12:05.069211 17389 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:12:05.069226 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.069231 17389 net.cpp:165] Memory required for data: 329217500\nI0817 16:12:05.069239 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:12:05.069250 17389 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:12:05.069257 17389 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:12:05.069265 17389 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:12:05.069514 17389 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:12:05.069530 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.069536 17389 net.cpp:165] Memory required for data: 337409500\nI0817 16:12:05.069546 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:12:05.069555 17389 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:12:05.069561 17389 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:12:05.069569 17389 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.069622 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:12:05.069766 17389 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:12:05.069778 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.069783 17389 net.cpp:165] Memory required for data: 345601500\nI0817 16:12:05.069792 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:12:05.069802 17389 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:12:05.069808 17389 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:12:05.069818 17389 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.069828 17389 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:12:05.069835 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.069840 17389 net.cpp:165] Memory required for data: 353793500\nI0817 16:12:05.069845 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:12:05.069856 17389 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:12:05.069862 17389 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:12:05.069874 17389 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:12:05.070178 17389 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:12:05.070191 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.070196 17389 net.cpp:165] Memory required for data: 361985500\nI0817 16:12:05.070206 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:12:05.070215 17389 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:12:05.070221 17389 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:12:05.070232 17389 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:12:05.070472 17389 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:12:05.070488 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.070493 17389 net.cpp:165] Memory required for data: 370177500\nI0817 16:12:05.070510 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:12:05.070519 17389 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:12:05.070525 17389 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:12:05.070533 17389 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:12:05.070588 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:12:05.070726 17389 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:12:05.070739 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.070744 17389 net.cpp:165] Memory required for data: 378369500\nI0817 16:12:05.070753 17389 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:12:05.070762 17389 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:12:05.070768 17389 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:12:05.070775 17389 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:12:05.070786 17389 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:12:05.070825 17389 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:12:05.070834 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.070839 17389 net.cpp:165] Memory required for data: 386561500\nI0817 16:12:05.070844 17389 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:12:05.070855 17389 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:12:05.070861 17389 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:12:05.070868 17389 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:12:05.070878 17389 net.cpp:150] Setting up L1_b4_relu\nI0817 16:12:05.070884 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.070889 17389 net.cpp:165] Memory required for data: 394753500\nI0817 16:12:05.070894 17389 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:05.070900 17389 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:05.070906 17389 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:12:05.070914 17389 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:12:05.070922 17389 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:12:05.070967 17389 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:05.070978 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.070986 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.070991 17389 net.cpp:165] Memory required for data: 411137500\nI0817 16:12:05.070996 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:12:05.071009 17389 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:12:05.071015 17389 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:12:05.071024 17389 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:12:05.071332 17389 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:12:05.071346 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.071352 17389 net.cpp:165] Memory required for data: 419329500\nI0817 16:12:05.071375 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:12:05.071388 17389 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:12:05.071394 17389 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:12:05.071405 17389 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:12:05.071655 17389 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:12:05.071668 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.071673 17389 net.cpp:165] Memory required for data: 427521500\nI0817 16:12:05.071683 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:12:05.071692 17389 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:12:05.071698 17389 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:12:05.071707 17389 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.071760 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:12:05.071899 17389 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:12:05.071913 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.071918 17389 net.cpp:165] Memory required for data: 435713500\nI0817 16:12:05.071926 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:12:05.071933 17389 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:12:05.071939 17389 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:12:05.071949 17389 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.071959 17389 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:12:05.071966 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.071971 17389 net.cpp:165] Memory required for data: 443905500\nI0817 16:12:05.071975 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:12:05.071986 17389 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:12:05.072000 17389 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:12:05.072011 17389 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:12:05.072319 17389 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:12:05.072332 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.072337 17389 net.cpp:165] Memory required for data: 452097500\nI0817 16:12:05.072346 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:12:05.072355 17389 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:12:05.072361 17389 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:12:05.072372 17389 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:12:05.072616 17389 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:12:05.072633 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.072638 17389 net.cpp:165] Memory required for data: 460289500\nI0817 16:12:05.072649 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:12:05.072657 17389 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:12:05.072664 17389 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:12:05.072671 17389 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:12:05.072724 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:12:05.072872 17389 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:12:05.072885 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.072891 17389 net.cpp:165] Memory required for data: 468481500\nI0817 16:12:05.072906 17389 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:12:05.072918 17389 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:12:05.072924 17389 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:12:05.072932 17389 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:12:05.072939 17389 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:12:05.072973 17389 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:12:05.072983 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.072988 17389 net.cpp:165] Memory required for data: 476673500\nI0817 16:12:05.072994 17389 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:12:05.073001 17389 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:12:05.073007 17389 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:12:05.073019 17389 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:12:05.073029 17389 net.cpp:150] Setting up L1_b5_relu\nI0817 16:12:05.073036 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.073040 17389 net.cpp:165] Memory required for data: 484865500\nI0817 16:12:05.073045 17389 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:05.073052 17389 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:05.073057 17389 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:12:05.073065 17389 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:12:05.073074 17389 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:12:05.073119 17389 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:05.073132 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.073138 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.073143 17389 net.cpp:165] Memory required for data: 501249500\nI0817 16:12:05.073148 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:12:05.073161 17389 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:12:05.073168 17389 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:12:05.073177 17389 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:12:05.073482 17389 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:12:05.073503 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.073508 17389 net.cpp:165] Memory required for data: 509441500\nI0817 16:12:05.073523 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:12:05.073535 17389 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:12:05.073542 17389 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:12:05.073550 17389 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:12:05.073796 17389 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:12:05.073812 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.073817 17389 net.cpp:165] Memory required for data: 517633500\nI0817 16:12:05.073827 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:12:05.073837 17389 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:12:05.073843 17389 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:12:05.073850 17389 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.073902 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:12:05.074043 17389 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:12:05.074055 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.074060 17389 net.cpp:165] Memory required for data: 525825500\nI0817 16:12:05.074069 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:12:05.074080 17389 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:12:05.074086 17389 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:12:05.074093 17389 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.074106 17389 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:12:05.074113 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.074117 17389 net.cpp:165] Memory required for data: 534017500\nI0817 16:12:05.074122 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:12:05.074133 17389 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:12:05.074138 17389 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:12:05.074151 17389 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:12:05.074460 17389 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:12:05.074473 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.074478 17389 net.cpp:165] Memory required for data: 542209500\nI0817 16:12:05.074487 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:12:05.074502 17389 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:12:05.074509 17389 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:12:05.074522 17389 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:12:05.074764 17389 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:12:05.074776 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.074781 17389 net.cpp:165] Memory required for data: 550401500\nI0817 16:12:05.074792 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:12:05.074803 17389 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:12:05.074810 17389 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:12:05.074817 17389 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:12:05.074869 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:12:05.075009 17389 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:12:05.075022 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.075027 17389 net.cpp:165] Memory required for data: 558593500\nI0817 16:12:05.075037 17389 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:12:05.075054 17389 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:12:05.075062 17389 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:12:05.075068 17389 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:12:05.075076 17389 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:12:05.075110 17389 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:12:05.075122 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.075127 17389 net.cpp:165] Memory required for data: 566785500\nI0817 16:12:05.075134 17389 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:12:05.075148 17389 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:12:05.075155 17389 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:12:05.075162 17389 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:12:05.075171 17389 net.cpp:150] Setting up L1_b6_relu\nI0817 16:12:05.075178 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.075183 17389 net.cpp:165] Memory required for data: 574977500\nI0817 16:12:05.075187 17389 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:05.075196 17389 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:05.075201 17389 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:12:05.075211 17389 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:12:05.075220 17389 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:12:05.075264 17389 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:05.075275 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.075283 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.075287 17389 net.cpp:165] Memory required for data: 591361500\nI0817 16:12:05.075292 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:12:05.075306 17389 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:12:05.075312 17389 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:12:05.075321 17389 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:12:05.075644 17389 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:12:05.075659 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.075664 17389 net.cpp:165] Memory required for data: 599553500\nI0817 16:12:05.075672 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:12:05.075683 17389 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:12:05.075690 17389 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:12:05.075700 17389 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:12:05.075939 17389 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:12:05.075953 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.075958 17389 net.cpp:165] Memory required for data: 607745500\nI0817 16:12:05.075968 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:12:05.075975 17389 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:12:05.075983 17389 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:12:05.075989 17389 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.076043 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:12:05.076185 17389 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:12:05.076198 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.076203 17389 net.cpp:165] Memory required for data: 615937500\nI0817 16:12:05.076212 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:12:05.076223 17389 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:12:05.076230 17389 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:12:05.076237 17389 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.076246 17389 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:12:05.076253 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.076258 17389 net.cpp:165] Memory required for data: 624129500\nI0817 16:12:05.076262 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:12:05.076275 17389 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:12:05.076282 17389 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:12:05.076294 17389 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:12:05.076613 17389 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:12:05.076627 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.076632 17389 net.cpp:165] Memory required for data: 632321500\nI0817 16:12:05.076648 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:12:05.076668 17389 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:12:05.076674 17389 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:12:05.076683 17389 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:12:05.076921 17389 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:12:05.076934 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.076939 17389 net.cpp:165] Memory required for data: 640513500\nI0817 16:12:05.076949 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:12:05.076958 17389 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:12:05.076964 17389 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:12:05.076972 17389 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:12:05.077028 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:12:05.077168 17389 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:12:05.077180 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.077186 17389 net.cpp:165] Memory required for data: 648705500\nI0817 16:12:05.077194 17389 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:12:05.077203 17389 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:12:05.077209 17389 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:12:05.077216 17389 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:12:05.077226 17389 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:12:05.077257 17389 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:12:05.077266 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.077271 17389 net.cpp:165] Memory required for data: 656897500\nI0817 16:12:05.077276 17389 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:12:05.077286 17389 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:12:05.077292 17389 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:12:05.077299 17389 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:12:05.077308 17389 net.cpp:150] Setting up L1_b7_relu\nI0817 16:12:05.077316 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.077320 17389 net.cpp:165] Memory required for data: 665089500\nI0817 16:12:05.077324 17389 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:05.077332 17389 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:05.077337 17389 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:12:05.077344 17389 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:12:05.077353 17389 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:12:05.077399 17389 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:05.077410 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.077416 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.077421 17389 net.cpp:165] Memory required for data: 681473500\nI0817 16:12:05.077426 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:12:05.077442 17389 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:12:05.077450 17389 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:12:05.077458 17389 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:12:05.077780 17389 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:12:05.077795 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.077800 17389 net.cpp:165] Memory required for data: 689665500\nI0817 16:12:05.077808 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:12:05.077822 17389 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:12:05.077828 17389 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:12:05.077839 17389 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:12:05.078091 17389 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:12:05.078105 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.078110 17389 net.cpp:165] Memory required for data: 697857500\nI0817 16:12:05.078120 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:12:05.078130 17389 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:12:05.078135 17389 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:12:05.078142 17389 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.078198 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:12:05.078338 17389 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:12:05.078351 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.078356 17389 net.cpp:165] Memory required for data: 706049500\nI0817 16:12:05.078366 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:12:05.078373 17389 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:12:05.078379 17389 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:12:05.078392 17389 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.078400 17389 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:12:05.078408 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.078413 17389 net.cpp:165] Memory required for data: 714241500\nI0817 16:12:05.078418 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:12:05.078430 17389 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:12:05.078436 17389 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:12:05.078447 17389 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:12:05.078775 17389 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:12:05.078789 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.078794 17389 net.cpp:165] Memory required for data: 722433500\nI0817 16:12:05.078802 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:12:05.078814 17389 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:12:05.078821 17389 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:12:05.078830 17389 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:12:05.079078 17389 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:12:05.079092 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.079097 17389 net.cpp:165] Memory required for data: 730625500\nI0817 16:12:05.079107 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:12:05.079115 17389 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:12:05.079121 17389 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:12:05.079129 17389 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:12:05.079182 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:12:05.079322 17389 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:12:05.079335 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.079340 17389 net.cpp:165] Memory required for data: 738817500\nI0817 16:12:05.079349 17389 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:12:05.079357 17389 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:12:05.079365 17389 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:12:05.079370 17389 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:12:05.079381 17389 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:12:05.079412 17389 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:12:05.079421 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.079426 17389 net.cpp:165] Memory required for data: 747009500\nI0817 16:12:05.079430 17389 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:12:05.079442 17389 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:12:05.079447 17389 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:12:05.079454 17389 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:12:05.079463 17389 net.cpp:150] Setting up L1_b8_relu\nI0817 16:12:05.079478 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.079483 17389 net.cpp:165] Memory required for data: 755201500\nI0817 16:12:05.079488 17389 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:05.079499 17389 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:05.079506 17389 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:12:05.079514 17389 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:12:05.079524 17389 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:12:05.079571 17389 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:05.079582 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.079589 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.079593 17389 net.cpp:165] Memory required for data: 771585500\nI0817 16:12:05.079598 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:12:05.079612 17389 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:12:05.079618 17389 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:12:05.079627 17389 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:12:05.079949 17389 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:12:05.079964 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.079969 17389 net.cpp:165] Memory required for data: 779777500\nI0817 16:12:05.079977 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:12:05.079989 17389 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:12:05.079996 17389 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:12:05.080004 17389 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:12:05.080247 17389 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:12:05.080260 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.080265 17389 net.cpp:165] Memory required for data: 787969500\nI0817 16:12:05.080276 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:12:05.080287 17389 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:12:05.080293 17389 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:12:05.080302 17389 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.080356 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:12:05.080510 17389 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:12:05.080524 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.080529 17389 net.cpp:165] Memory required for data: 796161500\nI0817 16:12:05.080538 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:12:05.080546 17389 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:12:05.080552 17389 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:12:05.080562 17389 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.080572 17389 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:12:05.080579 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.080585 17389 net.cpp:165] Memory required for data: 804353500\nI0817 16:12:05.080588 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:12:05.080602 17389 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:12:05.080608 17389 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:12:05.080617 17389 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:12:05.080935 17389 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:12:05.080950 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.080955 17389 net.cpp:165] Memory required for data: 812545500\nI0817 16:12:05.080963 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:12:05.080974 17389 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:12:05.080981 17389 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:12:05.080989 17389 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:12:05.081236 17389 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:12:05.081250 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.081255 17389 net.cpp:165] Memory required for data: 820737500\nI0817 16:12:05.081285 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:12:05.081297 17389 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:12:05.081305 17389 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:12:05.081312 17389 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:12:05.081367 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:12:05.081511 17389 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:12:05.081526 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.081531 17389 net.cpp:165] Memory required for data: 828929500\nI0817 16:12:05.081539 17389 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:12:05.081548 17389 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:12:05.081554 17389 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:12:05.081562 17389 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:12:05.081569 17389 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:12:05.081604 17389 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:12:05.081614 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.081619 17389 net.cpp:165] Memory required for data: 837121500\nI0817 16:12:05.081624 17389 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:12:05.081630 17389 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:12:05.081636 17389 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:12:05.081646 17389 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:12:05.081655 17389 net.cpp:150] Setting up L1_b9_relu\nI0817 16:12:05.081662 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.081667 17389 net.cpp:165] Memory required for data: 845313500\nI0817 16:12:05.081672 17389 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:05.081679 17389 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:05.081684 17389 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:12:05.081696 17389 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:12:05.081707 17389 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:12:05.081748 17389 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:05.081759 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.081766 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.081770 17389 net.cpp:165] Memory required for data: 861697500\nI0817 16:12:05.081776 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:12:05.081789 17389 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:12:05.081796 17389 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:12:05.081805 17389 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:12:05.082129 17389 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:12:05.082144 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.082149 17389 net.cpp:165] Memory required for data: 863745500\nI0817 16:12:05.082156 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:12:05.082165 17389 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:12:05.082175 17389 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:12:05.082183 17389 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:12:05.082417 17389 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:12:05.082430 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.082435 17389 net.cpp:165] Memory required for data: 865793500\nI0817 16:12:05.082446 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:12:05.082454 17389 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:12:05.082468 17389 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:12:05.082476 17389 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.082548 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:12:05.082696 17389 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:12:05.082711 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.082717 17389 net.cpp:165] Memory required for data: 867841500\nI0817 16:12:05.082726 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:12:05.082733 17389 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:12:05.082739 17389 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:12:05.082747 17389 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.082756 17389 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:12:05.082763 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.082768 17389 net.cpp:165] Memory required for data: 869889500\nI0817 16:12:05.082773 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:12:05.082787 17389 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:12:05.082792 17389 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:12:05.082803 17389 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:12:05.083120 17389 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:12:05.083134 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.083139 17389 net.cpp:165] Memory required for data: 871937500\nI0817 16:12:05.083148 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:12:05.083159 17389 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:12:05.083166 17389 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:12:05.083181 17389 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:12:05.083426 17389 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:12:05.083439 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.083444 17389 net.cpp:165] Memory required for data: 873985500\nI0817 16:12:05.083454 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:12:05.083463 17389 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:12:05.083469 17389 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:12:05.083477 17389 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:12:05.083542 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:12:05.083684 17389 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:12:05.083698 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.083703 17389 net.cpp:165] Memory required for data: 876033500\nI0817 16:12:05.083711 17389 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:12:05.083725 17389 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:12:05.083732 17389 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:12:05.083741 17389 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:12:05.083823 17389 net.cpp:150] Setting up L2_b1_pool\nI0817 16:12:05.083838 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.083843 17389 net.cpp:165] Memory required for data: 878081500\nI0817 16:12:05.083848 17389 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:12:05.083861 17389 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:12:05.083868 17389 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:12:05.083875 17389 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:12:05.083884 17389 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:12:05.083915 17389 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:12:05.083925 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.083930 17389 net.cpp:165] Memory required for data: 880129500\nI0817 16:12:05.083935 17389 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:12:05.083945 17389 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:12:05.083951 17389 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:12:05.083966 17389 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:12:05.083976 17389 net.cpp:150] Setting up L2_b1_relu\nI0817 16:12:05.083983 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.083987 17389 net.cpp:165] Memory required for data: 882177500\nI0817 16:12:05.083992 17389 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:12:05.084038 17389 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:12:05.084051 17389 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:12:05.086385 17389 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:12:05.086405 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.086411 17389 net.cpp:165] Memory required for data: 884225500\nI0817 16:12:05.086417 17389 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:12:05.086427 17389 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:12:05.086434 17389 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:12:05.086441 17389 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:12:05.086452 17389 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:12:05.086534 17389 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:12:05.086549 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.086555 17389 net.cpp:165] Memory required for data: 888321500\nI0817 16:12:05.086560 17389 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:05.086572 17389 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:05.086578 17389 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:12:05.086586 17389 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:12:05.086597 17389 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:12:05.086647 17389 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:05.086659 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.086666 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.086671 17389 net.cpp:165] Memory required for data: 896513500\nI0817 16:12:05.086676 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:12:05.086690 17389 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:12:05.086697 17389 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:12:05.086709 17389 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:12:05.088158 17389 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:12:05.088176 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.088181 17389 net.cpp:165] Memory required for data: 900609500\nI0817 16:12:05.088191 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:12:05.088203 17389 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:12:05.088210 17389 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:12:05.088218 17389 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:12:05.088459 17389 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:12:05.088472 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.088479 17389 net.cpp:165] Memory required for data: 904705500\nI0817 16:12:05.088488 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:12:05.088503 17389 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:12:05.088510 17389 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:12:05.088518 17389 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.088577 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:12:05.088722 17389 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:12:05.088737 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.088742 17389 net.cpp:165] Memory required for data: 908801500\nI0817 16:12:05.088752 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:12:05.088760 17389 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:12:05.088766 17389 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:12:05.088781 17389 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.088793 17389 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:12:05.088799 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.088804 17389 net.cpp:165] Memory required for data: 912897500\nI0817 16:12:05.088809 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:12:05.088825 17389 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:12:05.088832 17389 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:12:05.088843 17389 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:12:05.089303 17389 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:12:05.089318 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.089323 17389 net.cpp:165] Memory required for data: 916993500\nI0817 16:12:05.089331 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:12:05.089344 17389 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:12:05.089349 17389 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:12:05.089360 17389 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:12:05.089613 17389 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:12:05.089627 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.089632 17389 net.cpp:165] Memory required for data: 921089500\nI0817 16:12:05.089643 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:12:05.089653 17389 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:12:05.089658 17389 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:12:05.089666 17389 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:12:05.089723 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:12:05.089865 17389 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:12:05.089877 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.089882 17389 net.cpp:165] Memory required for data: 925185500\nI0817 16:12:05.089891 17389 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:12:05.089903 17389 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:12:05.089910 17389 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:12:05.089917 17389 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:12:05.089926 17389 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:12:05.089952 17389 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:12:05.089962 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.089965 17389 net.cpp:165] Memory required for data: 929281500\nI0817 16:12:05.089972 17389 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:12:05.089982 17389 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:12:05.089987 17389 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:12:05.089998 17389 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:12:05.090008 17389 net.cpp:150] Setting up L2_b2_relu\nI0817 16:12:05.090014 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.090019 17389 net.cpp:165] Memory required for data: 933377500\nI0817 16:12:05.090023 17389 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:05.090030 17389 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:05.090035 17389 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:12:05.090044 17389 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:12:05.090054 17389 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:12:05.090100 17389 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:05.090111 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.090117 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.090122 17389 net.cpp:165] Memory required for data: 941569500\nI0817 16:12:05.090127 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:12:05.090147 17389 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:12:05.090154 17389 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:12:05.090164 17389 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:12:05.090636 17389 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:12:05.090651 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.090656 17389 net.cpp:165] Memory required for data: 945665500\nI0817 16:12:05.090664 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:12:05.090675 17389 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:12:05.090682 17389 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:12:05.090693 17389 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:12:05.090934 17389 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:12:05.090947 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.090952 17389 net.cpp:165] Memory required for data: 949761500\nI0817 16:12:05.090963 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:12:05.090971 17389 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:12:05.090978 17389 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:12:05.090986 17389 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.091042 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:12:05.091188 17389 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:12:05.091204 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.091209 17389 net.cpp:165] Memory required for data: 953857500\nI0817 16:12:05.091218 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:12:05.091226 17389 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:12:05.091233 17389 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:12:05.091239 17389 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.091249 17389 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:12:05.091256 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.091260 17389 net.cpp:165] Memory required for data: 957953500\nI0817 16:12:05.091265 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:12:05.091279 17389 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:12:05.091285 17389 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:12:05.091296 17389 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:12:05.091763 17389 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:12:05.091778 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.091784 17389 net.cpp:165] Memory required for data: 962049500\nI0817 16:12:05.091792 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:12:05.091804 17389 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:12:05.091810 17389 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:12:05.091821 17389 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:12:05.092067 17389 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:12:05.092079 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.092085 17389 net.cpp:165] Memory required for data: 966145500\nI0817 16:12:05.092095 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:12:05.092104 17389 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:12:05.092110 17389 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:12:05.092118 17389 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:12:05.092175 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:12:05.092319 17389 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:12:05.092332 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.092337 17389 net.cpp:165] Memory required for data: 970241500\nI0817 16:12:05.092345 17389 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:12:05.092357 17389 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:12:05.092365 17389 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:12:05.092377 17389 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:12:05.092386 17389 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:12:05.092413 17389 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:12:05.092422 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.092427 17389 net.cpp:165] Memory required for data: 974337500\nI0817 16:12:05.092432 17389 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:12:05.092453 17389 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:12:05.092458 17389 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:12:05.092465 17389 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:12:05.092475 17389 net.cpp:150] Setting up L2_b3_relu\nI0817 16:12:05.092483 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.092488 17389 net.cpp:165] Memory required for data: 978433500\nI0817 16:12:05.092492 17389 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:05.092506 17389 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:05.092512 17389 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:12:05.092519 17389 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:12:05.092528 17389 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:12:05.092579 17389 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:05.092592 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.092597 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.092602 17389 net.cpp:165] Memory required for data: 986625500\nI0817 16:12:05.092607 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:12:05.092618 17389 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:12:05.092625 17389 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:12:05.092638 17389 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:12:05.093137 17389 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:12:05.093150 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.093156 17389 net.cpp:165] Memory required for data: 990721500\nI0817 16:12:05.093164 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:12:05.093176 17389 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:12:05.093183 17389 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:12:05.093191 17389 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:12:05.093436 17389 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:12:05.093451 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.093456 17389 net.cpp:165] Memory required for data: 994817500\nI0817 16:12:05.093466 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:12:05.093475 17389 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:12:05.093482 17389 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:12:05.093489 17389 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.093550 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:12:05.093699 17389 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:12:05.093711 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.093716 17389 net.cpp:165] Memory required for data: 998913500\nI0817 16:12:05.093725 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:12:05.093734 17389 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:12:05.093739 17389 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:12:05.093750 17389 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.093760 17389 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:12:05.093766 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.093771 17389 net.cpp:165] Memory required for data: 1003009500\nI0817 16:12:05.093776 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:12:05.093794 17389 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:12:05.093801 17389 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:12:05.093811 17389 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:12:05.094264 17389 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:12:05.094277 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.094282 17389 net.cpp:165] Memory required for data: 1007105500\nI0817 16:12:05.094291 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:12:05.094300 17389 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:12:05.094306 17389 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:12:05.094318 17389 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:12:05.094573 17389 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:12:05.094588 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.094593 17389 net.cpp:165] Memory required for data: 1011201500\nI0817 16:12:05.094602 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:12:05.094614 17389 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:12:05.094620 17389 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:12:05.094629 17389 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:12:05.094682 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:12:05.094831 17389 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:12:05.094844 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.094851 17389 net.cpp:165] Memory required for data: 1015297500\nI0817 16:12:05.094859 17389 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:12:05.094871 17389 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:12:05.094877 17389 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:12:05.094884 17389 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:12:05.094895 17389 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:12:05.094923 17389 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:12:05.094931 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.094936 17389 net.cpp:165] Memory required for data: 1019393500\nI0817 16:12:05.094941 17389 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:12:05.094949 17389 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:12:05.094954 17389 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:12:05.094965 17389 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:12:05.094974 17389 net.cpp:150] Setting up L2_b4_relu\nI0817 16:12:05.094981 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.094986 17389 net.cpp:165] Memory required for data: 1023489500\nI0817 16:12:05.094990 17389 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:05.094997 17389 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:05.095002 17389 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:12:05.095010 17389 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:12:05.095021 17389 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:12:05.095067 17389 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:05.095078 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.095085 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.095090 17389 net.cpp:165] Memory required for data: 1031681500\nI0817 16:12:05.095095 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:12:05.095106 17389 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:12:05.095113 17389 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:12:05.095124 17389 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:12:05.095588 17389 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:12:05.095609 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.095614 17389 net.cpp:165] Memory required for data: 1035777500\nI0817 16:12:05.095623 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:12:05.095635 17389 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:12:05.095641 17389 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:12:05.095650 17389 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:12:05.095899 17389 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:12:05.095912 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.095917 17389 net.cpp:165] Memory required for data: 1039873500\nI0817 16:12:05.095927 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:12:05.095938 17389 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:12:05.095945 17389 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:12:05.095952 17389 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.096006 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:12:05.096158 17389 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:12:05.096170 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.096175 17389 net.cpp:165] Memory required for data: 1043969500\nI0817 16:12:05.096184 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:12:05.096195 17389 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:12:05.096201 17389 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:12:05.096211 17389 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.096221 17389 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:12:05.096228 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.096232 17389 net.cpp:165] Memory required for data: 1048065500\nI0817 16:12:05.096237 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:12:05.096248 17389 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:12:05.096253 17389 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:12:05.096266 17389 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:12:05.096730 17389 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:12:05.096745 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.096750 17389 net.cpp:165] Memory required for data: 1052161500\nI0817 16:12:05.096758 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:12:05.096767 17389 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:12:05.096773 17389 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:12:05.096787 17389 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:12:05.097033 17389 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:12:05.097045 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.097050 17389 net.cpp:165] Memory required for data: 1056257500\nI0817 16:12:05.097060 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:12:05.097071 17389 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:12:05.097079 17389 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:12:05.097085 17389 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:12:05.097141 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:12:05.097286 17389 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:12:05.097299 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.097304 17389 net.cpp:165] Memory required for data: 1060353500\nI0817 16:12:05.097313 17389 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:12:05.097324 17389 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:12:05.097332 17389 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:12:05.097338 17389 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:12:05.097347 17389 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:12:05.097375 17389 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:12:05.097385 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.097396 17389 net.cpp:165] Memory required for data: 1064449500\nI0817 16:12:05.097401 17389 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:12:05.097409 17389 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:12:05.097415 17389 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:12:05.097424 17389 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:12:05.097434 17389 net.cpp:150] Setting up L2_b5_relu\nI0817 16:12:05.097441 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.097446 17389 net.cpp:165] Memory required for data: 1068545500\nI0817 16:12:05.097451 17389 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:05.097458 17389 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:05.097463 17389 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:12:05.097471 17389 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:12:05.097481 17389 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:12:05.097534 17389 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:05.097546 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.097553 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.097558 17389 net.cpp:165] Memory required for data: 1076737500\nI0817 16:12:05.097563 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:12:05.097574 17389 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:12:05.097581 17389 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:12:05.097594 17389 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:12:05.098060 17389 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:12:05.098074 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.098079 17389 net.cpp:165] Memory required for data: 1080833500\nI0817 16:12:05.098088 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:12:05.098096 17389 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:12:05.098104 17389 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:12:05.098114 17389 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:12:05.098364 17389 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:12:05.098376 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.098381 17389 net.cpp:165] Memory required for data: 1084929500\nI0817 16:12:05.098392 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:12:05.098403 17389 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:12:05.098409 17389 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:12:05.098417 17389 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.098471 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:12:05.098628 17389 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:12:05.098640 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.098645 17389 net.cpp:165] Memory required for data: 1089025500\nI0817 16:12:05.098654 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:12:05.098665 17389 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:12:05.098672 17389 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:12:05.098680 17389 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.098690 17389 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:12:05.098696 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.098701 17389 net.cpp:165] Memory required for data: 1093121500\nI0817 16:12:05.098706 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:12:05.098721 17389 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:12:05.098726 17389 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:12:05.098739 17389 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:12:05.099195 17389 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:12:05.099215 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.099221 17389 net.cpp:165] Memory required for data: 1097217500\nI0817 16:12:05.099230 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:12:05.099238 17389 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:12:05.099246 17389 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:12:05.099256 17389 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:12:05.099506 17389 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:12:05.099520 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.099525 17389 net.cpp:165] Memory required for data: 1101313500\nI0817 16:12:05.099535 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:12:05.099550 17389 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:12:05.099556 17389 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:12:05.099565 17389 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:12:05.099619 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:12:05.099766 17389 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:12:05.099778 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.099783 17389 net.cpp:165] Memory required for data: 1105409500\nI0817 16:12:05.099792 17389 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:12:05.099804 17389 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:12:05.099812 17389 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:12:05.099818 17389 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:12:05.099827 17389 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:12:05.099855 17389 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:12:05.099864 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.099869 17389 net.cpp:165] Memory required for data: 1109505500\nI0817 16:12:05.099874 17389 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:12:05.099882 17389 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:12:05.099887 17389 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:12:05.099895 17389 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:12:05.099907 17389 net.cpp:150] Setting up L2_b6_relu\nI0817 16:12:05.099915 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.099920 17389 net.cpp:165] Memory required for data: 1113601500\nI0817 16:12:05.099923 17389 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:05.099931 17389 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:05.099936 17389 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:12:05.099943 17389 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:12:05.099953 17389 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:12:05.100002 17389 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:05.100013 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.100019 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.100024 17389 net.cpp:165] Memory required for data: 1121793500\nI0817 16:12:05.100029 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:12:05.100040 17389 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:12:05.100046 17389 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:12:05.100059 17389 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:12:05.100528 17389 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:12:05.100543 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.100548 17389 net.cpp:165] Memory required for data: 1125889500\nI0817 16:12:05.100555 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:12:05.100564 17389 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:12:05.100577 17389 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:12:05.100591 17389 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:12:05.100844 17389 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:12:05.100857 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.100862 17389 net.cpp:165] Memory required for data: 1129985500\nI0817 16:12:05.100872 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:12:05.100884 17389 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:12:05.100890 17389 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:12:05.100898 17389 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.100953 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:12:05.101099 17389 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:12:05.101112 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.101117 17389 net.cpp:165] Memory required for data: 1134081500\nI0817 16:12:05.101126 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:12:05.101140 17389 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:12:05.101146 17389 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:12:05.101153 17389 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.101162 17389 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:12:05.101171 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.101174 17389 net.cpp:165] Memory required for data: 1138177500\nI0817 16:12:05.101179 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:12:05.101192 17389 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:12:05.101199 17389 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:12:05.101210 17389 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:12:05.101678 17389 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:12:05.101692 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.101697 17389 net.cpp:165] Memory required for data: 1142273500\nI0817 16:12:05.101706 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:12:05.101716 17389 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:12:05.101722 17389 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:12:05.101733 17389 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:12:05.101982 17389 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:12:05.101994 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.102000 17389 net.cpp:165] Memory required for data: 1146369500\nI0817 16:12:05.102010 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:12:05.102023 17389 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:12:05.102030 17389 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:12:05.102037 17389 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:12:05.102089 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:12:05.102236 17389 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:12:05.102247 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.102252 17389 net.cpp:165] Memory required for data: 1150465500\nI0817 16:12:05.102262 17389 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:12:05.102270 17389 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:12:05.102277 17389 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:12:05.102288 17389 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:12:05.102298 17389 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:12:05.102324 17389 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:12:05.102336 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.102340 17389 net.cpp:165] Memory required for data: 1154561500\nI0817 16:12:05.102346 17389 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:12:05.102354 17389 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:12:05.102360 17389 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:12:05.102373 17389 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:12:05.102383 17389 net.cpp:150] Setting up L2_b7_relu\nI0817 16:12:05.102391 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.102394 17389 net.cpp:165] Memory required for data: 1158657500\nI0817 16:12:05.102399 17389 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:05.102409 17389 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:05.102416 17389 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:12:05.102422 17389 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:12:05.102432 17389 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:12:05.102479 17389 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:05.102491 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.102504 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.102509 17389 net.cpp:165] Memory required for data: 1166849500\nI0817 16:12:05.102514 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:12:05.102525 17389 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:12:05.102532 17389 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:12:05.102545 17389 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:12:05.103018 17389 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:12:05.103031 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.103036 17389 net.cpp:165] Memory required for data: 1170945500\nI0817 16:12:05.103045 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:12:05.103055 17389 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:12:05.103060 17389 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:12:05.103071 17389 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:12:05.103323 17389 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:12:05.103335 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.103340 17389 net.cpp:165] Memory required for data: 1175041500\nI0817 16:12:05.103351 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:12:05.103363 17389 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:12:05.103368 17389 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:12:05.103376 17389 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.103431 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:12:05.103591 17389 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:12:05.103605 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.103610 17389 net.cpp:165] Memory required for data: 1179137500\nI0817 16:12:05.103618 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:12:05.103629 17389 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:12:05.103636 17389 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:12:05.103643 17389 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.103652 17389 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:12:05.103659 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.103664 17389 net.cpp:165] Memory required for data: 1183233500\nI0817 16:12:05.103669 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:12:05.103683 17389 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:12:05.103689 17389 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:12:05.103700 17389 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:12:05.104163 17389 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:12:05.104176 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.104182 17389 net.cpp:165] Memory required for data: 1187329500\nI0817 16:12:05.104190 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:12:05.104199 17389 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:12:05.104214 17389 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:12:05.104223 17389 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:12:05.104476 17389 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:12:05.104490 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.104501 17389 net.cpp:165] Memory required for data: 1191425500\nI0817 16:12:05.104511 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:12:05.104519 17389 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:12:05.104526 17389 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:12:05.104537 17389 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:12:05.104594 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:12:05.104743 17389 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:12:05.104756 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.104761 17389 net.cpp:165] Memory required for data: 1195521500\nI0817 16:12:05.104770 17389 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:12:05.104779 17389 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:12:05.104785 17389 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:12:05.104792 17389 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:12:05.104804 17389 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:12:05.104831 17389 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:12:05.104843 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.104848 17389 net.cpp:165] Memory required for data: 1199617500\nI0817 16:12:05.104853 17389 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:12:05.104861 17389 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:12:05.104866 17389 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:12:05.104873 17389 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:12:05.104883 17389 net.cpp:150] Setting up L2_b8_relu\nI0817 16:12:05.104890 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.104894 17389 net.cpp:165] Memory required for data: 1203713500\nI0817 16:12:05.104899 17389 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:05.104909 17389 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:05.104914 17389 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:12:05.104923 17389 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:12:05.104944 17389 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:12:05.104991 17389 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:05.105007 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.105015 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.105020 17389 net.cpp:165] Memory required for data: 1211905500\nI0817 16:12:05.105024 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:12:05.105038 17389 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:12:05.105044 17389 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:12:05.105053 17389 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:12:05.105530 17389 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:12:05.105543 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.105548 17389 net.cpp:165] Memory required for data: 1216001500\nI0817 16:12:05.105557 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:12:05.105569 17389 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:12:05.105576 17389 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:12:05.105584 17389 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:12:05.105834 17389 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:12:05.105846 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.105859 17389 net.cpp:165] Memory required for data: 1220097500\nI0817 16:12:05.105870 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:12:05.105877 17389 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:12:05.105883 17389 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:12:05.105895 17389 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.105952 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:12:05.106101 17389 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:12:05.106114 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.106118 17389 net.cpp:165] Memory required for data: 1224193500\nI0817 16:12:05.106127 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:12:05.106135 17389 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:12:05.106142 17389 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:12:05.106153 17389 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.106161 17389 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:12:05.106168 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.106173 17389 net.cpp:165] Memory required for data: 1228289500\nI0817 16:12:05.106178 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:12:05.106191 17389 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:12:05.106197 17389 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:12:05.106206 17389 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:12:05.106688 17389 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:12:05.106703 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.106708 17389 net.cpp:165] Memory required for data: 1232385500\nI0817 16:12:05.106716 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:12:05.106727 17389 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:12:05.106734 17389 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:12:05.106744 17389 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:12:05.106992 17389 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:12:05.107004 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.107009 17389 net.cpp:165] Memory required for data: 1236481500\nI0817 16:12:05.107053 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:12:05.107065 17389 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:12:05.107072 17389 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:12:05.107080 17389 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:12:05.107139 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:12:05.107286 17389 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:12:05.107300 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.107305 17389 net.cpp:165] Memory required for data: 1240577500\nI0817 16:12:05.107313 17389 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:12:05.107326 17389 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:12:05.107333 17389 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:12:05.107341 17389 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:12:05.107352 17389 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:12:05.107378 17389 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:12:05.107388 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.107393 17389 net.cpp:165] Memory required for data: 1244673500\nI0817 16:12:05.107398 17389 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:12:05.107405 17389 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:12:05.107410 17389 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:12:05.107421 17389 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:12:05.107430 17389 net.cpp:150] Setting up L2_b9_relu\nI0817 16:12:05.107437 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.107442 17389 net.cpp:165] Memory required for data: 1248769500\nI0817 16:12:05.107453 17389 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:05.107465 17389 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:05.107470 17389 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:12:05.107477 17389 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:12:05.107487 17389 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:12:05.107542 17389 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:05.107555 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.107563 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.107566 17389 net.cpp:165] Memory required for data: 1256961500\nI0817 16:12:05.107571 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:12:05.107583 17389 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:12:05.107589 17389 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:12:05.107601 17389 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:12:05.108072 17389 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:12:05.108088 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.108093 17389 net.cpp:165] Memory required for data: 1257985500\nI0817 16:12:05.108101 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:12:05.108113 17389 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:12:05.108119 17389 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:12:05.108127 17389 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:12:05.108392 17389 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:12:05.108404 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.108410 17389 net.cpp:165] Memory required for data: 1259009500\nI0817 16:12:05.108420 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:12:05.108429 17389 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:12:05.108435 17389 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:12:05.108443 17389 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.108507 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:12:05.108664 17389 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:12:05.108675 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.108680 17389 net.cpp:165] Memory required for data: 1260033500\nI0817 16:12:05.108690 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:12:05.108697 17389 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:12:05.108705 17389 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:12:05.108711 17389 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.108721 17389 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:12:05.108727 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.108732 17389 net.cpp:165] Memory required for data: 1261057500\nI0817 16:12:05.108737 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:12:05.108750 17389 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:12:05.108757 17389 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:12:05.108768 17389 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:12:05.109243 17389 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:12:05.109257 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.109262 17389 net.cpp:165] Memory required for data: 1262081500\nI0817 16:12:05.109272 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:12:05.109283 17389 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:12:05.109289 17389 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:12:05.109297 17389 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:12:05.109561 17389 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:12:05.109575 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.109586 17389 net.cpp:165] Memory required for data: 1263105500\nI0817 16:12:05.109597 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:12:05.109611 17389 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:12:05.109617 17389 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:12:05.109625 17389 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:12:05.109684 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:12:05.109848 17389 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:12:05.109861 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.109866 17389 net.cpp:165] Memory required for data: 1264129500\nI0817 16:12:05.109875 17389 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:12:05.109887 17389 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:12:05.109894 17389 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:12:05.109905 17389 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:12:05.109938 17389 net.cpp:150] Setting up L3_b1_pool\nI0817 16:12:05.109947 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.109951 17389 net.cpp:165] Memory required for data: 1265153500\nI0817 16:12:05.109957 17389 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:12:05.109968 17389 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:12:05.109975 17389 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:12:05.109982 17389 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:12:05.109989 17389 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:12:05.110020 17389 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:12:05.110029 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.110034 17389 net.cpp:165] Memory required for data: 1266177500\nI0817 16:12:05.110039 17389 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:12:05.110046 17389 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:12:05.110052 17389 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:12:05.110062 17389 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:12:05.110072 17389 net.cpp:150] Setting up L3_b1_relu\nI0817 16:12:05.110080 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.110083 17389 net.cpp:165] Memory required for data: 1267201500\nI0817 16:12:05.110088 17389 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:12:05.110098 17389 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:12:05.110105 17389 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:12:05.111335 17389 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:12:05.111353 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.111358 17389 net.cpp:165] Memory required for data: 1268225500\nI0817 16:12:05.111364 17389 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:12:05.111377 17389 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:12:05.111383 17389 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:12:05.111392 17389 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:12:05.111399 17389 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:12:05.111443 17389 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:12:05.111454 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.111459 17389 net.cpp:165] Memory required for data: 1270273500\nI0817 16:12:05.111464 17389 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:05.111472 17389 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:05.111479 17389 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:12:05.111490 17389 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:12:05.111505 17389 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:12:05.111557 17389 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:05.111573 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.111582 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.111593 17389 net.cpp:165] Memory required for data: 1274369500\nI0817 16:12:05.111599 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:12:05.111611 17389 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:12:05.111618 17389 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:12:05.111626 17389 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:12:05.113641 17389 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:12:05.113657 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.113663 17389 net.cpp:165] Memory required for data: 1276417500\nI0817 16:12:05.113672 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:12:05.113685 17389 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:12:05.113692 17389 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:12:05.113704 17389 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:12:05.113965 17389 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:12:05.113979 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.113984 17389 net.cpp:165] Memory required for data: 1278465500\nI0817 16:12:05.113994 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:12:05.114003 17389 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:12:05.114011 17389 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:12:05.114022 17389 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.114079 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:12:05.114235 17389 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:12:05.114248 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.114253 17389 net.cpp:165] Memory required for data: 1280513500\nI0817 16:12:05.114262 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:12:05.114270 17389 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:12:05.114276 17389 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:12:05.114286 17389 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.114296 17389 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:12:05.114303 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.114308 17389 net.cpp:165] Memory required for data: 1282561500\nI0817 16:12:05.114313 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:12:05.114327 17389 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:12:05.114333 17389 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:12:05.114342 17389 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:12:05.115371 17389 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:12:05.115386 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.115391 17389 net.cpp:165] Memory required for data: 1284609500\nI0817 16:12:05.115401 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:12:05.115412 17389 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:12:05.115419 17389 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:12:05.115427 17389 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:12:05.115715 17389 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:12:05.115728 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.115733 17389 net.cpp:165] Memory required for data: 1286657500\nI0817 16:12:05.115744 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:12:05.115756 17389 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:12:05.115762 17389 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:12:05.115770 17389 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:12:05.115830 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:12:05.115993 17389 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:12:05.116004 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.116010 17389 net.cpp:165] Memory required for data: 1288705500\nI0817 16:12:05.116019 17389 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:12:05.116036 17389 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:12:05.116044 17389 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:12:05.116051 17389 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:12:05.116062 17389 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:12:05.116096 17389 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:12:05.116106 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.116111 17389 net.cpp:165] Memory required for data: 1290753500\nI0817 16:12:05.116116 17389 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:12:05.116127 17389 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:12:05.116134 17389 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:12:05.116142 17389 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:12:05.116152 17389 net.cpp:150] Setting up L3_b2_relu\nI0817 16:12:05.116158 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.116163 17389 net.cpp:165] Memory required for data: 1292801500\nI0817 16:12:05.116168 17389 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:05.116175 17389 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:05.116180 17389 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:12:05.116189 17389 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:12:05.116197 17389 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:12:05.116247 17389 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:05.116259 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.116266 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.116271 17389 net.cpp:165] Memory required for data: 1296897500\nI0817 16:12:05.116276 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:12:05.116291 17389 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:12:05.116297 17389 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:12:05.116307 17389 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:12:05.117331 17389 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:12:05.117347 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.117352 17389 net.cpp:165] Memory required for data: 1298945500\nI0817 16:12:05.117360 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:12:05.117372 17389 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:12:05.117379 17389 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:12:05.117390 17389 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:12:05.117660 17389 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:12:05.117672 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.117677 17389 net.cpp:165] Memory required for data: 1300993500\nI0817 16:12:05.117687 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:12:05.117697 17389 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:12:05.117702 17389 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:12:05.117714 17389 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.117772 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:12:05.117933 17389 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:12:05.117944 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.117949 17389 net.cpp:165] Memory required for data: 1303041500\nI0817 16:12:05.117959 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:12:05.117966 17389 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:12:05.117972 17389 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:12:05.117983 17389 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.117993 17389 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:12:05.118000 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.118011 17389 net.cpp:165] Memory required for data: 1305089500\nI0817 16:12:05.118017 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:12:05.118031 17389 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:12:05.118036 17389 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:12:05.118046 17389 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:12:05.119150 17389 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:12:05.119168 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.119173 17389 net.cpp:165] Memory required for data: 1307137500\nI0817 16:12:05.119182 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:12:05.119194 17389 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:12:05.119201 17389 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:12:05.119210 17389 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:12:05.119484 17389 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:12:05.119503 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.119509 17389 net.cpp:165] Memory required for data: 1309185500\nI0817 16:12:05.119519 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:12:05.119531 17389 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:12:05.119539 17389 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:12:05.119549 17389 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:12:05.119608 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:12:05.119771 17389 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:12:05.119783 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.119788 17389 net.cpp:165] Memory required for data: 1311233500\nI0817 16:12:05.119797 17389 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:12:05.119807 17389 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:12:05.119813 17389 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:12:05.119820 17389 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:12:05.119832 17389 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:12:05.119865 17389 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:12:05.119876 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.119881 17389 net.cpp:165] Memory required for data: 1313281500\nI0817 16:12:05.119886 17389 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:12:05.119897 17389 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:12:05.119904 17389 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:12:05.119910 17389 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:12:05.119920 17389 net.cpp:150] Setting up L3_b3_relu\nI0817 16:12:05.119927 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.119931 17389 net.cpp:165] Memory required for data: 1315329500\nI0817 16:12:05.119936 17389 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:05.119943 17389 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:05.119949 17389 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:12:05.119956 17389 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:12:05.119966 17389 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:12:05.120018 17389 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:05.120030 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.120038 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.120041 17389 net.cpp:165] Memory required for data: 1319425500\nI0817 16:12:05.120046 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:12:05.120060 17389 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:12:05.120067 17389 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:12:05.120076 17389 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:12:05.121106 17389 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:12:05.121121 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.121126 17389 net.cpp:165] Memory required for data: 1321473500\nI0817 16:12:05.121135 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:12:05.121147 17389 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:12:05.121155 17389 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:12:05.121165 17389 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:12:05.121436 17389 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:12:05.121449 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.121454 17389 net.cpp:165] Memory required for data: 1323521500\nI0817 16:12:05.121464 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:12:05.121474 17389 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:12:05.121479 17389 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:12:05.121490 17389 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.121556 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:12:05.121717 17389 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:12:05.121731 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.121736 17389 net.cpp:165] Memory required for data: 1325569500\nI0817 16:12:05.121745 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:12:05.121753 17389 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:12:05.121759 17389 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:12:05.121769 17389 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.121779 17389 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:12:05.121786 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.121791 17389 net.cpp:165] Memory required for data: 1327617500\nI0817 16:12:05.121796 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:12:05.121809 17389 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:12:05.121816 17389 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:12:05.121824 17389 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:12:05.122859 17389 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:12:05.122875 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.122880 17389 net.cpp:165] Memory required for data: 1329665500\nI0817 16:12:05.122889 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:12:05.122898 17389 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:12:05.122905 17389 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:12:05.122916 17389 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:12:05.123191 17389 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:12:05.123204 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.123209 17389 net.cpp:165] Memory required for data: 1331713500\nI0817 16:12:05.123219 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:12:05.123230 17389 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:12:05.123237 17389 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:12:05.123245 17389 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:12:05.123304 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:12:05.123469 17389 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:12:05.123482 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.123487 17389 net.cpp:165] Memory required for data: 1333761500\nI0817 16:12:05.123502 17389 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:12:05.123512 17389 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:12:05.123518 17389 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:12:05.123525 17389 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:12:05.123538 17389 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:12:05.123571 17389 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:12:05.123587 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.123594 17389 net.cpp:165] Memory required for data: 1335809500\nI0817 16:12:05.123598 17389 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:12:05.123610 17389 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:12:05.123616 17389 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:12:05.123623 17389 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:12:05.123632 17389 net.cpp:150] Setting up L3_b4_relu\nI0817 16:12:05.123641 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.123644 17389 net.cpp:165] Memory required for data: 1337857500\nI0817 16:12:05.123649 17389 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:05.123656 17389 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:05.123662 17389 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:12:05.123669 17389 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:12:05.123679 17389 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:12:05.123729 17389 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:05.123741 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.123749 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.123754 17389 net.cpp:165] Memory required for data: 1341953500\nI0817 16:12:05.123759 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:12:05.123772 17389 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:12:05.123778 17389 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:12:05.123788 17389 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:12:05.124814 17389 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:12:05.124830 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.124835 17389 net.cpp:165] Memory required for data: 1344001500\nI0817 16:12:05.124842 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:12:05.124856 17389 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:12:05.124864 17389 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:12:05.124876 17389 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:12:05.126428 17389 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:12:05.126446 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.126451 17389 net.cpp:165] Memory required for data: 1346049500\nI0817 16:12:05.126462 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:12:05.126476 17389 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:12:05.126482 17389 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:12:05.126493 17389 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.126564 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:12:05.126724 17389 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:12:05.126737 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.126742 17389 net.cpp:165] Memory required for data: 1348097500\nI0817 16:12:05.126751 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:12:05.126760 17389 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:12:05.126765 17389 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:12:05.126776 17389 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.126786 17389 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:12:05.126793 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.126798 17389 net.cpp:165] Memory required for data: 1350145500\nI0817 16:12:05.126803 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:12:05.126817 17389 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:12:05.126824 17389 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:12:05.126833 17389 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:12:05.128844 17389 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:12:05.128861 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.128866 17389 net.cpp:165] Memory required for data: 1352193500\nI0817 16:12:05.128875 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:12:05.128888 17389 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:12:05.128895 17389 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:12:05.128904 17389 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:12:05.129165 17389 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:12:05.129179 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.129184 17389 net.cpp:165] Memory required for data: 1354241500\nI0817 16:12:05.129194 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:12:05.129205 17389 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:12:05.129212 17389 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:12:05.129223 17389 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:12:05.129281 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:12:05.129437 17389 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:12:05.129451 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.129456 17389 net.cpp:165] Memory required for data: 1356289500\nI0817 16:12:05.129464 17389 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:12:05.129473 17389 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:12:05.129480 17389 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:12:05.129487 17389 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:12:05.129504 17389 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:12:05.129539 17389 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:12:05.129550 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.129555 17389 net.cpp:165] Memory required for data: 1358337500\nI0817 16:12:05.129561 17389 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:12:05.129572 17389 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:12:05.129578 17389 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:12:05.129585 17389 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:12:05.129595 17389 net.cpp:150] Setting up L3_b5_relu\nI0817 16:12:05.129602 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.129607 17389 net.cpp:165] Memory required for data: 1360385500\nI0817 16:12:05.129611 17389 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:05.129618 17389 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:05.129624 17389 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:12:05.129631 17389 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:12:05.129642 17389 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:12:05.129689 17389 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:05.129701 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.129707 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.129712 17389 net.cpp:165] Memory required for data: 1364481500\nI0817 16:12:05.129717 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:12:05.129731 17389 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:12:05.129739 17389 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:12:05.129747 17389 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:12:05.130756 17389 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:12:05.130771 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.130776 17389 net.cpp:165] Memory required for data: 1366529500\nI0817 16:12:05.130784 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:12:05.130798 17389 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:12:05.130812 17389 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:12:05.130825 17389 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:12:05.131078 17389 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:12:05.131091 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.131096 17389 net.cpp:165] Memory required for data: 1368577500\nI0817 16:12:05.131106 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:12:05.131115 17389 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:12:05.131121 17389 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:12:05.131134 17389 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.131192 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:12:05.131345 17389 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:12:05.131358 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.131363 17389 net.cpp:165] Memory required for data: 1370625500\nI0817 16:12:05.131372 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:12:05.131381 17389 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:12:05.131386 17389 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:12:05.131397 17389 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.131407 17389 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:12:05.131413 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.131418 17389 net.cpp:165] Memory required for data: 1372673500\nI0817 16:12:05.131423 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:12:05.131436 17389 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:12:05.131443 17389 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:12:05.131453 17389 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:12:05.132459 17389 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:12:05.132474 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.132479 17389 net.cpp:165] Memory required for data: 1374721500\nI0817 16:12:05.132488 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:12:05.132503 17389 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:12:05.132509 17389 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:12:05.132524 17389 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:12:05.132786 17389 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:12:05.132802 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.132807 17389 net.cpp:165] Memory required for data: 1376769500\nI0817 16:12:05.132817 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:12:05.132827 17389 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:12:05.132833 17389 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:12:05.132840 17389 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:12:05.132897 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:12:05.133050 17389 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:12:05.133064 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.133069 17389 net.cpp:165] Memory required for data: 1378817500\nI0817 16:12:05.133077 17389 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:12:05.133086 17389 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:12:05.133096 17389 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:12:05.133103 17389 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:12:05.133111 17389 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:12:05.133147 17389 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:12:05.133158 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.133163 17389 net.cpp:165] Memory required for data: 1380865500\nI0817 16:12:05.133168 17389 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:12:05.133177 17389 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:12:05.133183 17389 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:12:05.133199 17389 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:12:05.133209 17389 net.cpp:150] Setting up L3_b6_relu\nI0817 16:12:05.133216 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.133221 17389 net.cpp:165] Memory required for data: 1382913500\nI0817 16:12:05.133225 17389 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:05.133232 17389 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:05.133239 17389 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:12:05.133245 17389 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:12:05.133255 17389 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:12:05.133304 17389 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:05.133316 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.133322 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.133327 17389 net.cpp:165] Memory required for data: 1387009500\nI0817 16:12:05.133332 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:12:05.133345 17389 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:12:05.133352 17389 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:12:05.133361 17389 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:12:05.134378 17389 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:12:05.134393 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.134398 17389 net.cpp:165] Memory required for data: 1389057500\nI0817 16:12:05.134407 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:12:05.134416 17389 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:12:05.134423 17389 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:12:05.134434 17389 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:12:05.134697 17389 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:12:05.134711 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.134716 17389 net.cpp:165] Memory required for data: 1391105500\nI0817 16:12:05.134727 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:12:05.134735 17389 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:12:05.134742 17389 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:12:05.134752 17389 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.134810 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:12:05.134968 17389 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:12:05.134981 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.134986 17389 net.cpp:165] Memory required for data: 1393153500\nI0817 16:12:05.134995 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:12:05.135030 17389 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:12:05.135040 17389 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:12:05.135046 17389 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.135057 17389 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:12:05.135064 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.135069 17389 net.cpp:165] Memory required for data: 1395201500\nI0817 16:12:05.135074 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:12:05.135085 17389 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:12:05.135092 17389 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:12:05.135100 17389 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:12:05.136126 17389 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:12:05.136140 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.136145 17389 net.cpp:165] Memory required for data: 1397249500\nI0817 16:12:05.136154 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:12:05.136167 17389 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:12:05.136181 17389 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:12:05.136193 17389 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:12:05.136459 17389 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:12:05.136472 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.136477 17389 net.cpp:165] Memory required for data: 1399297500\nI0817 16:12:05.136487 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:12:05.136504 17389 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:12:05.136515 17389 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:12:05.136533 17389 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:12:05.136601 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:12:05.136760 17389 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:12:05.136773 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.136778 17389 net.cpp:165] Memory required for data: 1401345500\nI0817 16:12:05.136787 17389 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:12:05.136800 17389 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:12:05.136806 17389 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:12:05.136813 17389 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:12:05.136821 17389 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:12:05.136859 17389 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:12:05.136871 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.136876 17389 net.cpp:165] Memory required for data: 1403393500\nI0817 16:12:05.136881 17389 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:12:05.136889 17389 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:12:05.136895 17389 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:12:05.136905 17389 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:12:05.136915 17389 net.cpp:150] Setting up L3_b7_relu\nI0817 16:12:05.136922 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.136927 17389 net.cpp:165] Memory required for data: 1405441500\nI0817 16:12:05.136931 17389 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:05.136939 17389 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:05.136945 17389 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:12:05.136951 17389 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:12:05.136960 17389 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:12:05.137009 17389 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:05.137022 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.137027 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.137032 17389 net.cpp:165] Memory required for data: 1409537500\nI0817 16:12:05.137037 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:12:05.137048 17389 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:12:05.137055 17389 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:12:05.137068 17389 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:12:05.138097 17389 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:12:05.138111 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.138116 17389 net.cpp:165] Memory required for data: 1411585500\nI0817 16:12:05.138125 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:12:05.138134 17389 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:12:05.138141 17389 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:12:05.138152 17389 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:12:05.138423 17389 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:12:05.138437 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.138442 17389 net.cpp:165] Memory required for data: 1413633500\nI0817 16:12:05.138459 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:12:05.138468 17389 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:12:05.138474 17389 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:12:05.138483 17389 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.138700 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:12:05.138859 17389 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:12:05.138875 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.138881 17389 net.cpp:165] Memory required for data: 1415681500\nI0817 16:12:05.138890 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:12:05.138898 17389 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:12:05.138906 17389 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:12:05.138912 17389 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.138922 17389 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:12:05.138929 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.138934 17389 net.cpp:165] Memory required for data: 1417729500\nI0817 16:12:05.138939 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:12:05.138953 17389 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:12:05.138959 17389 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:12:05.138968 17389 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:12:05.139988 17389 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:12:05.140003 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.140008 17389 net.cpp:165] Memory required for data: 1419777500\nI0817 16:12:05.140017 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:12:05.140029 17389 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:12:05.140036 17389 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:12:05.140048 17389 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:12:05.140306 17389 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:12:05.140319 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.140326 17389 net.cpp:165] Memory required for data: 1421825500\nI0817 16:12:05.140336 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:12:05.140343 17389 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:12:05.140350 17389 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:12:05.140362 17389 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:12:05.140419 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:12:05.140585 17389 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:12:05.140599 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.140604 17389 net.cpp:165] Memory required for data: 1423873500\nI0817 16:12:05.140614 17389 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:12:05.140625 17389 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:12:05.140631 17389 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:12:05.140640 17389 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:12:05.140647 17389 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:12:05.140686 17389 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:12:05.140698 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.140703 17389 net.cpp:165] Memory required for data: 1425921500\nI0817 16:12:05.140709 17389 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:12:05.140717 17389 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:12:05.140722 17389 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:12:05.140733 17389 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:12:05.140743 17389 net.cpp:150] Setting up L3_b8_relu\nI0817 16:12:05.140749 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.140754 17389 net.cpp:165] Memory required for data: 1427969500\nI0817 16:12:05.140758 17389 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:05.140774 17389 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:05.140779 17389 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:12:05.140786 17389 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:12:05.140796 17389 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:12:05.140846 17389 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:05.140857 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.140864 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.140869 17389 net.cpp:165] Memory required for data: 1432065500\nI0817 16:12:05.140874 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:12:05.140885 17389 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:12:05.140892 17389 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:12:05.140903 17389 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:12:05.143128 17389 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:12:05.143146 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.143151 17389 net.cpp:165] Memory required for data: 1434113500\nI0817 16:12:05.143160 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:12:05.143173 17389 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:12:05.143180 17389 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:12:05.143189 17389 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:12:05.143455 17389 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:12:05.143468 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.143473 17389 net.cpp:165] Memory required for data: 1436161500\nI0817 16:12:05.143483 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:12:05.143501 17389 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:12:05.143508 17389 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:12:05.143517 17389 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.143579 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:12:05.143740 17389 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:12:05.143754 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.143759 17389 net.cpp:165] Memory required for data: 1438209500\nI0817 16:12:05.143767 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:12:05.143779 17389 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:12:05.143785 17389 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:12:05.143795 17389 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.143805 17389 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:12:05.143812 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.143817 17389 net.cpp:165] Memory required for data: 1440257500\nI0817 16:12:05.143822 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:12:05.143833 17389 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:12:05.143839 17389 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:12:05.143851 17389 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:12:05.144871 17389 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:12:05.144886 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.144891 17389 net.cpp:165] Memory required for data: 1442305500\nI0817 16:12:05.144901 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:12:05.144913 17389 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:12:05.144919 17389 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:12:05.144928 17389 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:12:05.145195 17389 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:12:05.145208 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.145213 17389 net.cpp:165] Memory required for data: 1444353500\nI0817 16:12:05.145232 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:12:05.145241 17389 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:12:05.145247 17389 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:12:05.145256 17389 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:12:05.145316 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:12:05.145469 17389 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:12:05.145485 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.145490 17389 net.cpp:165] Memory required for data: 1446401500\nI0817 16:12:05.145504 17389 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:12:05.145514 17389 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:12:05.145521 17389 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:12:05.145550 17389 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:12:05.145558 17389 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:12:05.145604 17389 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:12:05.145617 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.145622 17389 net.cpp:165] Memory required for data: 1448449500\nI0817 16:12:05.145627 17389 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:12:05.145634 17389 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:12:05.145640 17389 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:12:05.145648 17389 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:12:05.145658 17389 net.cpp:150] Setting up L3_b9_relu\nI0817 16:12:05.145664 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.145669 17389 net.cpp:165] Memory required for data: 1450497500\nI0817 16:12:05.145674 17389 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:12:05.145685 17389 net.cpp:100] Creating Layer post_pool\nI0817 16:12:05.145691 17389 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:12:05.145699 17389 net.cpp:408] post_pool -> post_pool\nI0817 16:12:05.145733 17389 net.cpp:150] Setting up post_pool\nI0817 16:12:05.145745 17389 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:12:05.145750 17389 net.cpp:165] Memory required for data: 1450529500\nI0817 16:12:05.145756 17389 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:12:05.145845 17389 net.cpp:100] Creating Layer post_FC\nI0817 16:12:05.145858 17389 net.cpp:434] post_FC <- post_pool\nI0817 16:12:05.145874 17389 net.cpp:408] post_FC -> post_FC_top\nI0817 16:12:05.146175 17389 net.cpp:150] Setting up post_FC\nI0817 16:12:05.146191 17389 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:05.146196 17389 net.cpp:165] Memory required for data: 1450579500\nI0817 16:12:05.146205 17389 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:12:05.146219 17389 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:12:05.146225 17389 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:12:05.146234 17389 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:12:05.146247 17389 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:12:05.146297 17389 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:12:05.146309 17389 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:05.146315 17389 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:05.146320 17389 net.cpp:165] Memory required for data: 1450679500\nI0817 16:12:05.146325 17389 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:12:05.146368 17389 net.cpp:100] Creating Layer accuracy\nI0817 16:12:05.146379 17389 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:12:05.146387 17389 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:12:05.146395 17389 net.cpp:408] accuracy -> accuracy\nI0817 16:12:05.146436 17389 net.cpp:150] Setting up accuracy\nI0817 16:12:05.146450 17389 net.cpp:157] Top shape: (1)\nI0817 16:12:05.146455 17389 net.cpp:165] Memory required for data: 1450679504\nI0817 16:12:05.146467 17389 layer_factory.hpp:77] Creating layer loss\nI0817 16:12:05.146479 17389 net.cpp:100] Creating Layer loss\nI0817 16:12:05.146486 17389 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:12:05.146493 17389 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:12:05.146508 17389 net.cpp:408] loss -> loss\nI0817 16:12:05.146555 17389 layer_factory.hpp:77] Creating layer loss\nI0817 16:12:05.146723 17389 net.cpp:150] Setting up loss\nI0817 16:12:05.146740 17389 net.cpp:157] Top shape: (1)\nI0817 16:12:05.146745 17389 net.cpp:160]     with loss weight 1\nI0817 16:12:05.146819 17389 net.cpp:165] Memory required for data: 1450679508\nI0817 16:12:05.146828 17389 net.cpp:226] loss needs backward computation.\nI0817 16:12:05.146834 17389 net.cpp:228] accuracy does not need backward computation.\nI0817 16:12:05.146842 17389 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:12:05.146847 17389 net.cpp:226] post_FC needs backward computation.\nI0817 16:12:05.146852 17389 net.cpp:226] post_pool needs backward computation.\nI0817 16:12:05.146857 17389 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:12:05.146860 17389 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:12:05.146867 17389 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:12:05.146870 17389 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:12:05.146875 17389 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:12:05.146880 17389 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:12:05.146885 17389 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:12:05.146889 17389 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:12:05.146894 17389 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:12:05.146899 17389 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:12:05.146904 17389 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:12:05.146909 17389 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:12:05.146914 17389 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:12:05.146919 17389 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:12:05.146924 17389 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:12:05.146929 17389 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:12:05.146934 17389 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:12:05.146939 17389 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:12:05.146944 17389 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:12:05.146950 17389 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:12:05.146955 17389 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:12:05.146960 17389 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:12:05.146965 17389 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:12:05.146970 17389 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:12:05.146975 17389 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:12:05.146980 17389 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:12:05.146984 17389 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:12:05.146988 17389 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:12:05.146993 17389 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:12:05.146998 17389 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:12:05.147004 17389 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:12:05.147008 17389 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:12:05.147014 17389 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:12:05.147019 17389 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:12:05.147024 17389 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:12:05.147037 17389 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:12:05.147042 17389 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:12:05.147047 17389 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:12:05.147053 17389 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:12:05.147058 17389 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:12:05.147063 17389 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:12:05.147068 17389 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:12:05.147073 17389 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:12:05.147078 17389 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:12:05.147083 17389 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:12:05.147088 17389 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:12:05.147092 17389 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:12:05.147097 17389 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:12:05.147102 17389 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:12:05.147107 17389 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:12:05.147112 17389 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:12:05.147117 17389 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:12:05.147123 17389 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:12:05.147132 17389 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:12:05.147137 17389 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:12:05.147142 17389 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:12:05.147147 17389 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:12:05.147152 17389 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:12:05.147157 17389 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:12:05.147163 17389 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:12:05.147168 17389 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:12:05.147173 17389 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:12:05.147179 17389 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:12:05.147184 17389 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:12:05.147189 17389 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:12:05.147194 17389 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:12:05.147199 17389 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:12:05.147204 17389 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:12:05.147209 17389 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:12:05.147214 17389 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:12:05.147219 17389 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:12:05.147225 17389 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:12:05.147230 17389 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:12:05.147235 17389 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:12:05.147240 17389 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:12:05.147246 17389 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:12:05.147250 17389 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:12:05.147255 17389 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:12:05.147261 17389 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:12:05.147266 17389 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:12:05.147271 17389 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:12:05.147277 17389 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:12:05.147287 17389 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:12:05.147294 17389 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:12:05.147300 17389 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:12:05.147305 17389 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:12:05.147310 17389 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:12:05.147315 17389 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:12:05.147320 17389 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:12:05.147325 17389 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:12:05.147330 17389 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:12:05.147336 17389 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:12:05.147341 17389 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:12:05.147346 17389 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:12:05.147351 17389 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:12:05.147356 17389 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:12:05.147361 17389 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:12:05.147367 17389 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:12:05.147372 17389 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:12:05.147377 17389 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:12:05.147382 17389 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:12:05.147387 17389 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:12:05.147392 17389 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:12:05.147398 17389 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:12:05.147403 17389 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:12:05.147408 17389 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:12:05.147413 17389 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:12:05.147419 17389 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:12:05.147424 17389 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:12:05.147429 17389 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:12:05.147434 17389 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:12:05.147439 17389 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:12:05.147445 17389 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:12:05.147450 17389 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:12:05.147455 17389 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:12:05.147461 17389 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:12:05.147466 17389 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:12:05.147471 17389 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:12:05.147480 17389 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:12:05.147485 17389 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:12:05.147490 17389 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:12:05.147501 17389 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:12:05.147508 17389 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:12:05.147514 17389 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:12:05.147519 17389 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:12:05.147526 17389 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:12:05.147531 17389 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:12:05.147536 17389 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:12:05.147541 17389 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:12:05.147547 17389 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:12:05.147557 17389 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:12:05.147562 17389 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:12:05.147568 17389 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:12:05.147573 17389 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:12:05.147578 17389 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:12:05.147584 17389 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:12:05.147589 17389 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:12:05.147595 17389 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:12:05.147600 17389 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:12:05.147605 17389 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:12:05.147610 17389 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:12:05.147615 17389 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:12:05.147621 17389 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:12:05.147626 17389 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:12:05.147631 17389 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:12:05.147637 17389 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:12:05.147642 17389 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:12:05.147647 17389 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:12:05.147653 17389 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:12:05.147658 17389 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:12:05.147663 17389 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:12:05.147670 17389 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:12:05.147675 17389 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:12:05.147680 17389 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:12:05.147686 17389 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:12:05.147691 17389 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:12:05.147696 17389 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:12:05.147702 17389 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:12:05.147707 17389 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:12:05.147712 17389 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:12:05.147717 17389 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:12:05.147722 17389 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:12:05.147728 17389 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:12:05.147733 17389 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:12:05.147739 17389 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:12:05.147744 17389 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:12:05.147750 17389 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:12:05.147755 17389 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:12:05.147760 17389 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:12:05.147766 17389 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:12:05.147771 17389 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:12:05.147776 17389 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:12:05.147783 17389 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:12:05.147788 17389 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:12:05.147794 17389 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:12:05.147799 17389 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:12:05.147804 17389 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:12:05.147815 17389 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:12:05.147821 17389 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:12:05.147826 17389 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:12:05.147832 17389 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:12:05.147837 17389 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:12:05.147842 17389 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:12:05.147847 17389 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:12:05.147853 17389 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:12:05.147858 17389 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:12:05.147864 17389 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:12:05.147869 17389 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:12:05.147876 17389 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:12:05.147881 17389 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:12:05.147886 17389 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:12:05.147891 17389 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:12:05.147897 17389 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:12:05.147902 17389 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:12:05.147907 17389 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:12:05.147912 17389 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:12:05.147917 17389 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:12:05.147922 17389 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:12:05.147928 17389 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:12:05.147934 17389 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:12:05.147939 17389 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:12:05.147945 17389 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:12:05.147950 17389 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:12:05.147956 17389 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:12:05.147961 17389 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:12:05.147967 17389 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:12:05.147972 17389 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:12:05.147979 17389 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:12:05.147984 17389 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:12:05.147989 17389 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:12:05.147995 17389 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:12:05.148000 17389 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:12:05.148005 17389 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:12:05.148011 17389 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:12:05.148016 17389 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:12:05.148022 17389 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:12:05.148027 17389 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:12:05.148033 17389 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:12:05.148039 17389 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:12:05.148044 17389 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:12:05.148051 17389 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:12:05.148056 17389 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:12:05.148061 17389 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:12:05.148067 17389 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:12:05.148077 17389 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:12:05.148083 17389 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:12:05.148089 17389 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:12:05.148094 17389 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:12:05.148102 17389 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:12:05.148106 17389 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:12:05.148113 17389 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:12:05.148118 17389 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:12:05.148123 17389 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:12:05.148128 17389 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:12:05.148133 17389 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:12:05.148139 17389 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:12:05.148144 17389 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:12:05.148150 17389 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:12:05.148159 17389 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:12:05.148164 17389 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:12:05.148170 17389 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:12:05.148176 17389 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:12:05.148181 17389 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:12:05.148186 17389 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:12:05.148192 17389 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:12:05.148197 17389 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:12:05.148203 17389 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:12:05.148210 17389 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:12:05.148216 17389 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:12:05.148221 17389 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:12:05.148226 17389 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:12:05.148231 17389 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:12:05.148237 17389 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:12:05.148242 17389 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:12:05.148248 17389 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:12:05.148254 17389 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:12:05.148259 17389 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:12:05.148264 17389 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:12:05.148270 17389 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:12:05.148277 17389 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:12:05.148282 17389 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:12:05.148288 17389 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:12:05.148293 17389 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:12:05.148298 17389 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:12:05.148303 17389 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:12:05.148308 17389 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:12:05.148314 17389 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:12:05.148319 17389 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:12:05.148326 17389 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:12:05.148331 17389 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:12:05.148337 17389 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:12:05.148347 17389 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:12:05.148353 17389 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:12:05.148360 17389 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:12:05.148365 17389 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:12:05.148370 17389 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:12:05.148375 17389 net.cpp:226] pre_relu needs backward computation.\nI0817 16:12:05.148380 17389 net.cpp:226] pre_scale needs backward computation.\nI0817 16:12:05.148386 17389 net.cpp:226] pre_bn needs backward computation.\nI0817 16:12:05.148391 17389 net.cpp:226] pre_conv needs backward computation.\nI0817 16:12:05.148397 17389 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:12:05.148403 17389 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:12:05.148407 17389 net.cpp:270] This network produces output accuracy\nI0817 16:12:05.148414 17389 net.cpp:270] This network produces output loss\nI0817 16:12:05.148785 17389 net.cpp:283] Network initialization done.\nI0817 16:12:05.158213 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:05.158252 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:05.158305 17389 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:12:05.158689 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:12:05.158707 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:12:05.158718 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:12:05.158727 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:12:05.158737 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:12:05.158746 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:12:05.158756 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:12:05.158764 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:12:05.158773 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:12:05.158782 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:12:05.158792 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:12:05.158799 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:12:05.158809 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:12:05.158818 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:12:05.158826 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:12:05.158835 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:12:05.158844 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:12:05.158852 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:12:05.158861 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:12:05.158880 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:12:05.158890 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:12:05.158898 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:12:05.158910 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:12:05.158920 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:12:05.158928 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:12:05.158936 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:12:05.158946 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:12:05.158954 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:12:05.158962 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:12:05.158972 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:12:05.158980 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:12:05.158989 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:12:05.158998 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:12:05.159006 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:12:05.159015 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:12:05.159024 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:12:05.159032 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:12:05.159041 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:12:05.159049 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:12:05.159059 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:12:05.159070 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:12:05.159078 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:12:05.159086 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:12:05.159095 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:12:05.159103 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:12:05.159112 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:12:05.159121 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:12:05.159129 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:12:05.159138 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:12:05.159154 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:12:05.159163 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:12:05.159173 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:12:05.159181 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:12:05.159189 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:12:05.159199 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:12:05.159207 17389 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:12:05.160857 17389 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar100/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar100/cifar100_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term\nI0817 16:12:05.162461 17389 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:12:05.162708 17389 net.cpp:100] Creating Layer dataLayer\nI0817 16:12:05.162729 17389 net.cpp:408] dataLayer -> data_top\nI0817 16:12:05.162744 17389 net.cpp:408] dataLayer -> label\nI0817 16:12:05.162756 17389 data_transformer.cpp:25] Loading mean file from: examples/cifar100/mean.binaryproto\nI0817 16:12:05.196063 17397 db_lmdb.cpp:35] Opened lmdb examples/cifar100/cifar100_test_lmdb\nI0817 16:12:05.196324 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:05.204152 17389 net.cpp:150] Setting up dataLayer\nI0817 16:12:05.204174 17389 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:12:05.204182 17389 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:05.204187 17389 net.cpp:165] Memory required for data: 1536500\nI0817 16:12:05.204195 17389 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:12:05.204205 17389 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:12:05.204211 17389 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:12:05.204246 17389 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:12:05.204262 17389 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:12:05.204362 17389 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:12:05.204375 17389 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:05.204385 17389 net.cpp:157] Top shape: 125 (125)\nI0817 16:12:05.204390 17389 net.cpp:165] Memory required for data: 1537500\nI0817 16:12:05.204396 17389 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:12:05.204411 17389 net.cpp:100] Creating Layer pre_conv\nI0817 16:12:05.204418 17389 net.cpp:434] pre_conv <- data_top\nI0817 16:12:05.204433 17389 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:12:05.204929 17389 net.cpp:150] Setting up pre_conv\nI0817 16:12:05.204953 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.204959 17389 net.cpp:165] Memory required for data: 9729500\nI0817 16:12:05.204973 17389 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:12:05.204985 17389 net.cpp:100] Creating Layer pre_bn\nI0817 16:12:05.204993 17389 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:12:05.205008 17389 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:12:05.205441 17389 net.cpp:150] Setting up pre_bn\nI0817 16:12:05.205459 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.205464 17389 net.cpp:165] Memory required for data: 17921500\nI0817 16:12:05.205484 17389 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:12:05.205502 17389 net.cpp:100] Creating Layer pre_scale\nI0817 16:12:05.205508 17389 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:12:05.205518 17389 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:12:05.205585 17389 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:12:05.205760 17389 net.cpp:150] Setting up pre_scale\nI0817 16:12:05.205775 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.205780 17389 net.cpp:165] Memory required for data: 26113500\nI0817 16:12:05.205793 17389 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:12:05.205806 17389 net.cpp:100] Creating Layer pre_relu\nI0817 16:12:05.205811 17389 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:12:05.205818 17389 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:12:05.205829 17389 net.cpp:150] Setting up pre_relu\nI0817 16:12:05.205838 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.205842 17389 net.cpp:165] Memory required for data: 34305500\nI0817 16:12:05.205847 17389 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:12:05.205868 17389 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:12:05.205874 17389 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:12:05.205883 17389 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:12:05.205893 17389 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:12:05.205950 17389 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:12:05.205962 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.205970 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.205974 17389 net.cpp:165] Memory required for data: 50689500\nI0817 16:12:05.205979 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:12:05.205996 17389 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:12:05.206003 17389 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:12:05.206012 17389 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:12:05.206406 17389 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:12:05.206421 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.206426 17389 net.cpp:165] Memory required for data: 58881500\nI0817 16:12:05.206439 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:12:05.206455 17389 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:12:05.206463 17389 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:12:05.206475 17389 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:12:05.206789 17389 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:12:05.206802 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.206807 17389 net.cpp:165] Memory required for data: 67073500\nI0817 16:12:05.206821 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:12:05.206833 17389 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:12:05.206840 17389 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:12:05.206847 17389 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.206987 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:12:05.207324 17389 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:12:05.207340 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.207353 17389 net.cpp:165] Memory required for data: 75265500\nI0817 16:12:05.207366 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:12:05.207379 17389 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:12:05.207386 17389 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:12:05.207394 17389 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.207406 17389 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:12:05.207414 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.207418 17389 net.cpp:165] Memory required for data: 83457500\nI0817 16:12:05.207423 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:12:05.207442 17389 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:12:05.207451 17389 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:12:05.207464 17389 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:12:05.208391 17389 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:12:05.208407 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.208412 17389 net.cpp:165] Memory required for data: 91649500\nI0817 16:12:05.208422 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:12:05.208434 17389 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:12:05.208441 17389 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:12:05.208449 17389 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:12:05.208727 17389 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:12:05.208746 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.208752 17389 net.cpp:165] Memory required for data: 99841500\nI0817 16:12:05.208766 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:12:05.208775 17389 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:12:05.208781 17389 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:12:05.208789 17389 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:12:05.208849 17389 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:12:05.209002 17389 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:12:05.209015 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.209020 17389 net.cpp:165] Memory required for data: 108033500\nI0817 16:12:05.209029 17389 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:12:05.209041 17389 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:12:05.209048 17389 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:12:05.209054 17389 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:12:05.209064 17389 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:12:05.209100 17389 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:12:05.209111 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.209115 17389 net.cpp:165] Memory required for data: 116225500\nI0817 16:12:05.209120 17389 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:12:05.209128 17389 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:12:05.209133 17389 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:12:05.209141 17389 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:12:05.209149 17389 net.cpp:150] Setting up L1_b1_relu\nI0817 16:12:05.209157 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.209161 17389 net.cpp:165] Memory required for data: 124417500\nI0817 16:12:05.209167 17389 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:05.209175 17389 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:05.209180 17389 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:12:05.209190 17389 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:12:05.209199 17389 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:12:05.209245 17389 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:12:05.209264 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.209270 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.209275 17389 net.cpp:165] Memory required for data: 140801500\nI0817 16:12:05.209280 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:12:05.209295 17389 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:12:05.209301 17389 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:12:05.209311 17389 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:12:05.209672 17389 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:12:05.209688 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.209693 17389 net.cpp:165] Memory required for data: 148993500\nI0817 16:12:05.209702 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:12:05.209714 17389 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:12:05.209720 17389 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:12:05.209729 17389 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:12:05.210198 17389 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:12:05.210213 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.210218 17389 net.cpp:165] Memory required for data: 157185500\nI0817 16:12:05.210229 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:12:05.210240 17389 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:12:05.210247 17389 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:12:05.210254 17389 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.210312 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:12:05.210472 17389 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:12:05.210485 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.210490 17389 net.cpp:165] Memory required for data: 165377500\nI0817 16:12:05.210505 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:12:05.210513 17389 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:12:05.210520 17389 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:12:05.210532 17389 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.210542 17389 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:12:05.210549 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.210554 17389 net.cpp:165] Memory required for data: 173569500\nI0817 16:12:05.210559 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:12:05.210572 17389 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:12:05.210578 17389 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:12:05.210587 17389 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:12:05.210943 17389 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:12:05.210958 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.210963 17389 net.cpp:165] Memory required for data: 181761500\nI0817 16:12:05.210973 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:12:05.210981 17389 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:12:05.210988 17389 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:12:05.210999 17389 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:12:05.211266 17389 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:12:05.211280 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.211285 17389 net.cpp:165] Memory required for data: 189953500\nI0817 16:12:05.211303 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:12:05.211313 17389 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:12:05.211318 17389 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:12:05.211328 17389 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:12:05.211387 17389 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:12:05.211551 17389 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:12:05.211565 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.211577 17389 net.cpp:165] Memory required for data: 198145500\nI0817 16:12:05.211586 17389 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:12:05.211598 17389 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:12:05.211604 17389 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:12:05.211612 17389 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:12:05.211619 17389 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:12:05.211657 17389 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:12:05.211666 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.211671 17389 net.cpp:165] Memory required for data: 206337500\nI0817 16:12:05.211676 17389 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:12:05.211683 17389 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:12:05.211689 17389 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:12:05.211699 17389 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:12:05.211709 17389 net.cpp:150] Setting up L1_b2_relu\nI0817 16:12:05.211716 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.211720 17389 net.cpp:165] Memory required for data: 214529500\nI0817 16:12:05.211725 17389 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:05.211731 17389 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:05.211736 17389 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:12:05.211745 17389 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:12:05.211753 17389 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:12:05.211802 17389 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:12:05.211812 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.211819 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.211824 17389 net.cpp:165] Memory required for data: 230913500\nI0817 16:12:05.211829 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:12:05.211839 17389 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:12:05.211845 17389 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:12:05.211858 17389 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:12:05.212206 17389 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:12:05.212220 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.212225 17389 net.cpp:165] Memory required for data: 239105500\nI0817 16:12:05.212234 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:12:05.212244 17389 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:12:05.212249 17389 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:12:05.212265 17389 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:12:05.212543 17389 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:12:05.212560 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.212565 17389 net.cpp:165] Memory required for data: 247297500\nI0817 16:12:05.212575 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:12:05.212584 17389 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:12:05.212590 17389 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:12:05.212599 17389 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.212671 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:12:05.212833 17389 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:12:05.212846 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.212852 17389 net.cpp:165] Memory required for data: 255489500\nI0817 16:12:05.212860 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:12:05.212872 17389 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:12:05.212878 17389 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:12:05.212887 17389 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.212903 17389 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:12:05.212910 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.212915 17389 net.cpp:165] Memory required for data: 263681500\nI0817 16:12:05.212920 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:12:05.212934 17389 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:12:05.212940 17389 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:12:05.212951 17389 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:12:05.213351 17389 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:12:05.213366 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.213371 17389 net.cpp:165] Memory required for data: 271873500\nI0817 16:12:05.213380 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:12:05.213395 17389 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:12:05.213403 17389 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:12:05.213412 17389 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:12:05.213693 17389 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:12:05.213711 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.213716 17389 net.cpp:165] Memory required for data: 280065500\nI0817 16:12:05.213727 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:12:05.213735 17389 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:12:05.213742 17389 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:12:05.213749 17389 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:12:05.213809 17389 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:12:05.213968 17389 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:12:05.213980 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.213985 17389 net.cpp:165] Memory required for data: 288257500\nI0817 16:12:05.213994 17389 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:12:05.214009 17389 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:12:05.214015 17389 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:12:05.214022 17389 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:12:05.214032 17389 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:12:05.214066 17389 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:12:05.214077 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.214082 17389 net.cpp:165] Memory required for data: 296449500\nI0817 16:12:05.214087 17389 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:12:05.214107 17389 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:12:05.214112 17389 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:12:05.214119 17389 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:12:05.214128 17389 net.cpp:150] Setting up L1_b3_relu\nI0817 16:12:05.214135 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.214140 17389 net.cpp:165] Memory required for data: 304641500\nI0817 16:12:05.214145 17389 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:05.214151 17389 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:05.214157 17389 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:12:05.214164 17389 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:12:05.214174 17389 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:12:05.214222 17389 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:12:05.214239 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.214246 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.214251 17389 net.cpp:165] Memory required for data: 321025500\nI0817 16:12:05.214256 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:12:05.214275 17389 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:12:05.214282 17389 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:12:05.214292 17389 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:12:05.214653 17389 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:12:05.214668 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.214673 17389 net.cpp:165] Memory required for data: 329217500\nI0817 16:12:05.214682 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:12:05.214694 17389 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:12:05.214700 17389 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:12:05.214709 17389 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:12:05.214993 17389 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:12:05.215005 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.215013 17389 net.cpp:165] Memory required for data: 337409500\nI0817 16:12:05.215025 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:12:05.215034 17389 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:12:05.215040 17389 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:12:05.215049 17389 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.215111 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:12:05.215297 17389 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:12:05.215311 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.215315 17389 net.cpp:165] Memory required for data: 345601500\nI0817 16:12:05.215324 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:12:05.215335 17389 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:12:05.215342 17389 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:12:05.215353 17389 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.215363 17389 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:12:05.215370 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.215374 17389 net.cpp:165] Memory required for data: 353793500\nI0817 16:12:05.215379 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:12:05.215400 17389 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:12:05.215409 17389 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:12:05.215420 17389 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:12:05.215816 17389 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:12:05.215831 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.215837 17389 net.cpp:165] Memory required for data: 361985500\nI0817 16:12:05.215847 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:12:05.215855 17389 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:12:05.215864 17389 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:12:05.215876 17389 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:12:05.216189 17389 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:12:05.216207 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.216212 17389 net.cpp:165] Memory required for data: 370177500\nI0817 16:12:05.216228 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:12:05.216238 17389 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:12:05.216243 17389 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:12:05.216251 17389 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:12:05.216315 17389 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:12:05.216487 17389 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:12:05.216509 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.216514 17389 net.cpp:165] Memory required for data: 378369500\nI0817 16:12:05.216523 17389 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:12:05.216538 17389 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:12:05.216547 17389 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:12:05.216562 17389 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:12:05.216583 17389 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:12:05.216625 17389 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:12:05.216635 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.216640 17389 net.cpp:165] Memory required for data: 386561500\nI0817 16:12:05.216645 17389 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:12:05.216656 17389 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:12:05.216663 17389 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:12:05.216670 17389 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:12:05.216680 17389 net.cpp:150] Setting up L1_b4_relu\nI0817 16:12:05.216687 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.216694 17389 net.cpp:165] Memory required for data: 394753500\nI0817 16:12:05.216699 17389 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:05.216706 17389 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:05.216712 17389 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:12:05.216720 17389 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:12:05.216729 17389 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:12:05.216786 17389 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:12:05.216797 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.216804 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.216809 17389 net.cpp:165] Memory required for data: 411137500\nI0817 16:12:05.216814 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:12:05.216826 17389 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:12:05.216833 17389 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:12:05.216842 17389 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:12:05.217236 17389 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:12:05.217252 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.217258 17389 net.cpp:165] Memory required for data: 419329500\nI0817 16:12:05.217291 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:12:05.217308 17389 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:12:05.217315 17389 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:12:05.217324 17389 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:12:05.217650 17389 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:12:05.217665 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.217672 17389 net.cpp:165] Memory required for data: 427521500\nI0817 16:12:05.217684 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:12:05.217696 17389 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:12:05.217703 17389 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:12:05.217711 17389 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.217777 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:12:05.217954 17389 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:12:05.217968 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.217973 17389 net.cpp:165] Memory required for data: 435713500\nI0817 16:12:05.217983 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:12:05.217993 17389 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:12:05.217999 17389 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:12:05.218008 17389 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.218019 17389 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:12:05.218027 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.218032 17389 net.cpp:165] Memory required for data: 443905500\nI0817 16:12:05.218036 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:12:05.218061 17389 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:12:05.218068 17389 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:12:05.218080 17389 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:12:05.218602 17389 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:12:05.218619 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.218626 17389 net.cpp:165] Memory required for data: 452097500\nI0817 16:12:05.218634 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:12:05.218647 17389 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:12:05.218654 17389 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:12:05.218662 17389 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:12:05.218977 17389 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:12:05.218991 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.218997 17389 net.cpp:165] Memory required for data: 460289500\nI0817 16:12:05.219007 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:12:05.219022 17389 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:12:05.219029 17389 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:12:05.219038 17389 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:12:05.219112 17389 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:12:05.219298 17389 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:12:05.219312 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.219317 17389 net.cpp:165] Memory required for data: 468481500\nI0817 16:12:05.219326 17389 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:12:05.219336 17389 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:12:05.219341 17389 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:12:05.219348 17389 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:12:05.219362 17389 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:12:05.219405 17389 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:12:05.219418 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.219424 17389 net.cpp:165] Memory required for data: 476673500\nI0817 16:12:05.219429 17389 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:12:05.219435 17389 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:12:05.219441 17389 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:12:05.219449 17389 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:12:05.219472 17389 net.cpp:150] Setting up L1_b5_relu\nI0817 16:12:05.219482 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.219487 17389 net.cpp:165] Memory required for data: 484865500\nI0817 16:12:05.219493 17389 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:05.219509 17389 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:05.219516 17389 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:12:05.219524 17389 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:12:05.219535 17389 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:12:05.219593 17389 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:12:05.219605 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.219612 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.219619 17389 net.cpp:165] Memory required for data: 501249500\nI0817 16:12:05.219625 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:12:05.219636 17389 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:12:05.219645 17389 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:12:05.219658 17389 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:12:05.220084 17389 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:12:05.220108 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.220114 17389 net.cpp:165] Memory required for data: 509441500\nI0817 16:12:05.220124 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:12:05.220132 17389 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:12:05.220139 17389 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:12:05.220149 17389 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:12:05.220468 17389 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:12:05.220484 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.220489 17389 net.cpp:165] Memory required for data: 517633500\nI0817 16:12:05.220506 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:12:05.220521 17389 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:12:05.220530 17389 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:12:05.220537 17389 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.220607 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:12:05.220835 17389 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:12:05.220850 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.220855 17389 net.cpp:165] Memory required for data: 525825500\nI0817 16:12:05.220865 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:12:05.220875 17389 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:12:05.220882 17389 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:12:05.220893 17389 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.220903 17389 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:12:05.220911 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.220916 17389 net.cpp:165] Memory required for data: 534017500\nI0817 16:12:05.220921 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:12:05.220933 17389 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:12:05.220939 17389 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:12:05.220952 17389 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:12:05.221354 17389 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:12:05.221369 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.221375 17389 net.cpp:165] Memory required for data: 542209500\nI0817 16:12:05.221384 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:12:05.221400 17389 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:12:05.221406 17389 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:12:05.221415 17389 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:12:05.221735 17389 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:12:05.221748 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.221753 17389 net.cpp:165] Memory required for data: 550401500\nI0817 16:12:05.221765 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:12:05.221773 17389 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:12:05.221779 17389 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:12:05.221793 17389 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:12:05.221861 17389 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:12:05.222036 17389 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:12:05.222057 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.222064 17389 net.cpp:165] Memory required for data: 558593500\nI0817 16:12:05.222074 17389 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:12:05.222091 17389 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:12:05.222100 17389 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:12:05.222108 17389 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:12:05.222116 17389 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:12:05.222162 17389 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:12:05.222175 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.222182 17389 net.cpp:165] Memory required for data: 566785500\nI0817 16:12:05.222194 17389 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:12:05.222201 17389 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:12:05.222208 17389 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:12:05.222218 17389 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:12:05.222228 17389 net.cpp:150] Setting up L1_b6_relu\nI0817 16:12:05.222234 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.222239 17389 net.cpp:165] Memory required for data: 574977500\nI0817 16:12:05.222244 17389 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:05.222254 17389 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:05.222260 17389 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:12:05.222267 17389 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:12:05.222277 17389 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:12:05.222337 17389 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:12:05.222350 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.222357 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.222364 17389 net.cpp:165] Memory required for data: 591361500\nI0817 16:12:05.222370 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:12:05.222381 17389 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:12:05.222388 17389 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:12:05.222404 17389 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:12:05.222823 17389 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:12:05.222838 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.222843 17389 net.cpp:165] Memory required for data: 599553500\nI0817 16:12:05.222856 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:12:05.222864 17389 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:12:05.222872 17389 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:12:05.222896 17389 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:12:05.223227 17389 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:12:05.223242 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.223246 17389 net.cpp:165] Memory required for data: 607745500\nI0817 16:12:05.223260 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:12:05.223273 17389 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:12:05.223279 17389 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:12:05.223287 17389 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.223356 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:12:05.223554 17389 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:12:05.223570 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.223577 17389 net.cpp:165] Memory required for data: 615937500\nI0817 16:12:05.223585 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:12:05.223593 17389 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:12:05.223599 17389 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:12:05.223609 17389 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.223620 17389 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:12:05.223628 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.223634 17389 net.cpp:165] Memory required for data: 624129500\nI0817 16:12:05.223640 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:12:05.223654 17389 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:12:05.223659 17389 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:12:05.223673 17389 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:12:05.224081 17389 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:12:05.224102 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.224108 17389 net.cpp:165] Memory required for data: 632321500\nI0817 16:12:05.224117 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:12:05.224126 17389 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:12:05.224133 17389 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:12:05.224140 17389 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:12:05.224419 17389 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:12:05.224433 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.224438 17389 net.cpp:165] Memory required for data: 640513500\nI0817 16:12:05.224448 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:12:05.224460 17389 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:12:05.224467 17389 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:12:05.224474 17389 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:12:05.224544 17389 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:12:05.224711 17389 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:12:05.224725 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.224730 17389 net.cpp:165] Memory required for data: 648705500\nI0817 16:12:05.224738 17389 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:12:05.224747 17389 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:12:05.224753 17389 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:12:05.224761 17389 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:12:05.224936 17389 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:12:05.224992 17389 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:12:05.225005 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.225010 17389 net.cpp:165] Memory required for data: 656897500\nI0817 16:12:05.225015 17389 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:12:05.225023 17389 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:12:05.225030 17389 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:12:05.225037 17389 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:12:05.225049 17389 net.cpp:150] Setting up L1_b7_relu\nI0817 16:12:05.225057 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.225061 17389 net.cpp:165] Memory required for data: 665089500\nI0817 16:12:05.225066 17389 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:05.225073 17389 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:05.225078 17389 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:12:05.225086 17389 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:12:05.225095 17389 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:12:05.225147 17389 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:12:05.225159 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.225167 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.225170 17389 net.cpp:165] Memory required for data: 681473500\nI0817 16:12:05.225175 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:12:05.225186 17389 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:12:05.225193 17389 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:12:05.225205 17389 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:12:05.225572 17389 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:12:05.225587 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.225592 17389 net.cpp:165] Memory required for data: 689665500\nI0817 16:12:05.225601 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:12:05.225610 17389 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:12:05.225617 17389 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:12:05.225636 17389 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:12:05.225915 17389 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:12:05.225929 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.225934 17389 net.cpp:165] Memory required for data: 697857500\nI0817 16:12:05.225944 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:12:05.225955 17389 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:12:05.225962 17389 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:12:05.225970 17389 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.226028 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:12:05.226191 17389 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:12:05.226204 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.226209 17389 net.cpp:165] Memory required for data: 706049500\nI0817 16:12:05.226218 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:12:05.226227 17389 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:12:05.226233 17389 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:12:05.226243 17389 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.226253 17389 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:12:05.226260 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.226265 17389 net.cpp:165] Memory required for data: 714241500\nI0817 16:12:05.226270 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:12:05.226284 17389 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:12:05.226289 17389 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:12:05.226299 17389 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:12:05.226668 17389 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:12:05.226683 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.226688 17389 net.cpp:165] Memory required for data: 722433500\nI0817 16:12:05.226697 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:12:05.226708 17389 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:12:05.226716 17389 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:12:05.226723 17389 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:12:05.227006 17389 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:12:05.227020 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.227025 17389 net.cpp:165] Memory required for data: 730625500\nI0817 16:12:05.227035 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:12:05.227043 17389 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:12:05.227051 17389 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:12:05.227061 17389 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:12:05.227120 17389 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:12:05.227283 17389 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:12:05.227296 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.227301 17389 net.cpp:165] Memory required for data: 738817500\nI0817 16:12:05.227310 17389 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:12:05.227319 17389 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:12:05.227325 17389 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:12:05.227332 17389 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:12:05.227344 17389 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:12:05.227377 17389 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:12:05.227392 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.227397 17389 net.cpp:165] Memory required for data: 747009500\nI0817 16:12:05.227403 17389 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:12:05.227411 17389 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:12:05.227416 17389 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:12:05.227423 17389 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:12:05.227439 17389 net.cpp:150] Setting up L1_b8_relu\nI0817 16:12:05.227447 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.227452 17389 net.cpp:165] Memory required for data: 755201500\nI0817 16:12:05.227457 17389 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:05.227465 17389 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:05.227471 17389 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:12:05.227478 17389 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:12:05.227488 17389 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:12:05.227545 17389 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:12:05.227557 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.227565 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.227569 17389 net.cpp:165] Memory required for data: 771585500\nI0817 16:12:05.227574 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:12:05.227586 17389 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:12:05.227591 17389 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:12:05.227603 17389 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:12:05.227972 17389 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:12:05.227985 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.227990 17389 net.cpp:165] Memory required for data: 779777500\nI0817 16:12:05.227999 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:12:05.228011 17389 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:12:05.228018 17389 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:12:05.228026 17389 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:12:05.228308 17389 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:12:05.228322 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.228327 17389 net.cpp:165] Memory required for data: 787969500\nI0817 16:12:05.228338 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:12:05.228345 17389 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:12:05.228351 17389 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:12:05.228359 17389 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.228420 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:12:05.228591 17389 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:12:05.228605 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.228610 17389 net.cpp:165] Memory required for data: 796161500\nI0817 16:12:05.228619 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:12:05.228627 17389 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:12:05.228633 17389 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:12:05.228646 17389 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.228654 17389 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:12:05.228662 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.228667 17389 net.cpp:165] Memory required for data: 804353500\nI0817 16:12:05.228672 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:12:05.228685 17389 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:12:05.228691 17389 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:12:05.228699 17389 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:12:05.229059 17389 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:12:05.229074 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.229079 17389 net.cpp:165] Memory required for data: 812545500\nI0817 16:12:05.229087 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:12:05.229095 17389 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:12:05.229102 17389 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:12:05.229120 17389 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:12:05.229401 17389 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:12:05.229418 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.229423 17389 net.cpp:165] Memory required for data: 820737500\nI0817 16:12:05.229455 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:12:05.229466 17389 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:12:05.229472 17389 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:12:05.229480 17389 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:12:05.229549 17389 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:12:05.229714 17389 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:12:05.229727 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.229732 17389 net.cpp:165] Memory required for data: 828929500\nI0817 16:12:05.229743 17389 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:12:05.229750 17389 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:12:05.229758 17389 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:12:05.229764 17389 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:12:05.229774 17389 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:12:05.229809 17389 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:12:05.229820 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.229825 17389 net.cpp:165] Memory required for data: 837121500\nI0817 16:12:05.229831 17389 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:12:05.229838 17389 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:12:05.229845 17389 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:12:05.229856 17389 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:12:05.229866 17389 net.cpp:150] Setting up L1_b9_relu\nI0817 16:12:05.229872 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.229876 17389 net.cpp:165] Memory required for data: 845313500\nI0817 16:12:05.229882 17389 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:05.229887 17389 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:05.229893 17389 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:12:05.229903 17389 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:12:05.229913 17389 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:12:05.229964 17389 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:12:05.229975 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.229982 17389 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:12:05.229987 17389 net.cpp:165] Memory required for data: 861697500\nI0817 16:12:05.229992 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:12:05.230002 17389 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:12:05.230010 17389 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:12:05.230020 17389 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:12:05.230382 17389 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:12:05.230396 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.230401 17389 net.cpp:165] Memory required for data: 863745500\nI0817 16:12:05.230410 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:12:05.230419 17389 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:12:05.230425 17389 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:12:05.230437 17389 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:12:05.230713 17389 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:12:05.230727 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.230732 17389 net.cpp:165] Memory required for data: 865793500\nI0817 16:12:05.230749 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:12:05.230763 17389 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:12:05.230770 17389 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:12:05.230778 17389 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.230839 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:12:05.231000 17389 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:12:05.231014 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.231019 17389 net.cpp:165] Memory required for data: 867841500\nI0817 16:12:05.231027 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:12:05.231039 17389 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:12:05.231045 17389 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:12:05.231052 17389 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.231061 17389 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:12:05.231068 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.231073 17389 net.cpp:165] Memory required for data: 869889500\nI0817 16:12:05.231078 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:12:05.231091 17389 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:12:05.231097 17389 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:12:05.231108 17389 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:12:05.231465 17389 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:12:05.231479 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.231484 17389 net.cpp:165] Memory required for data: 871937500\nI0817 16:12:05.231492 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:12:05.231514 17389 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:12:05.231523 17389 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:12:05.231534 17389 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:12:05.231806 17389 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:12:05.231819 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.231824 17389 net.cpp:165] Memory required for data: 873985500\nI0817 16:12:05.231835 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:12:05.231847 17389 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:12:05.231853 17389 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:12:05.231861 17389 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:12:05.231920 17389 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:12:05.232138 17389 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:12:05.232157 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.232165 17389 net.cpp:165] Memory required for data: 876033500\nI0817 16:12:05.232182 17389 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:12:05.232198 17389 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:12:05.232208 17389 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:12:05.232221 17389 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:12:05.232266 17389 net.cpp:150] Setting up L2_b1_pool\nI0817 16:12:05.232288 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.232295 17389 net.cpp:165] Memory required for data: 878081500\nI0817 16:12:05.232300 17389 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:12:05.232309 17389 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:12:05.232316 17389 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:12:05.232322 17389 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:12:05.232331 17389 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:12:05.232370 17389 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:12:05.232383 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.232388 17389 net.cpp:165] Memory required for data: 880129500\nI0817 16:12:05.232393 17389 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:12:05.232400 17389 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:12:05.232414 17389 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:12:05.232421 17389 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:12:05.232431 17389 net.cpp:150] Setting up L2_b1_relu\nI0817 16:12:05.232439 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.232442 17389 net.cpp:165] Memory required for data: 882177500\nI0817 16:12:05.232447 17389 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:12:05.232460 17389 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:12:05.232467 17389 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:12:05.234745 17389 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:12:05.234763 17389 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:12:05.234768 17389 net.cpp:165] Memory required for data: 884225500\nI0817 16:12:05.234774 17389 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:12:05.234784 17389 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:12:05.234791 17389 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:12:05.234798 17389 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:12:05.234807 17389 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:12:05.234853 17389 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:12:05.234865 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.234870 17389 net.cpp:165] Memory required for data: 888321500\nI0817 16:12:05.234875 17389 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:05.234886 17389 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:05.234892 17389 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:12:05.234900 17389 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:12:05.234910 17389 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:12:05.234962 17389 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:12:05.234974 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.234982 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.234987 17389 net.cpp:165] Memory required for data: 896513500\nI0817 16:12:05.234992 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:12:05.235008 17389 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:12:05.235014 17389 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:12:05.235023 17389 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:12:05.235549 17389 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:12:05.235564 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.235570 17389 net.cpp:165] Memory required for data: 900609500\nI0817 16:12:05.235579 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:12:05.235589 17389 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:12:05.235599 17389 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:12:05.235608 17389 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:12:05.235880 17389 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:12:05.235893 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.235899 17389 net.cpp:165] Memory required for data: 904705500\nI0817 16:12:05.235909 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:12:05.235918 17389 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:12:05.235924 17389 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:12:05.235932 17389 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.235996 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:12:05.236155 17389 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:12:05.236169 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.236176 17389 net.cpp:165] Memory required for data: 908801500\nI0817 16:12:05.236184 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:12:05.236191 17389 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:12:05.236207 17389 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:12:05.236214 17389 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.236224 17389 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:12:05.236232 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.236235 17389 net.cpp:165] Memory required for data: 912897500\nI0817 16:12:05.236240 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:12:05.236254 17389 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:12:05.236260 17389 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:12:05.236271 17389 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:12:05.236779 17389 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:12:05.236793 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.236799 17389 net.cpp:165] Memory required for data: 916993500\nI0817 16:12:05.236809 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:12:05.236820 17389 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:12:05.236827 17389 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:12:05.236838 17389 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:12:05.237105 17389 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:12:05.237118 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.237123 17389 net.cpp:165] Memory required for data: 921089500\nI0817 16:12:05.237133 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:12:05.237143 17389 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:12:05.237149 17389 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:12:05.237156 17389 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:12:05.237218 17389 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:12:05.237399 17389 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:12:05.237413 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.237419 17389 net.cpp:165] Memory required for data: 925185500\nI0817 16:12:05.237428 17389 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:12:05.237439 17389 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:12:05.237447 17389 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:12:05.237454 17389 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:12:05.237462 17389 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:12:05.237493 17389 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:12:05.237507 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.237512 17389 net.cpp:165] Memory required for data: 929281500\nI0817 16:12:05.237517 17389 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:12:05.237529 17389 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:12:05.237535 17389 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:12:05.237542 17389 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:12:05.237552 17389 net.cpp:150] Setting up L2_b2_relu\nI0817 16:12:05.237560 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.237563 17389 net.cpp:165] Memory required for data: 933377500\nI0817 16:12:05.237568 17389 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:05.237576 17389 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:05.237581 17389 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:12:05.237588 17389 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:12:05.237597 17389 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:12:05.237650 17389 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:12:05.237661 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.237669 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.237673 17389 net.cpp:165] Memory required for data: 941569500\nI0817 16:12:05.237685 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:12:05.237699 17389 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:12:05.237706 17389 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:12:05.237715 17389 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:12:05.238215 17389 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:12:05.238229 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.238234 17389 net.cpp:165] Memory required for data: 945665500\nI0817 16:12:05.238243 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:12:05.238255 17389 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:12:05.238261 17389 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:12:05.238272 17389 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:12:05.238546 17389 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:12:05.238560 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.238565 17389 net.cpp:165] Memory required for data: 949761500\nI0817 16:12:05.238575 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:12:05.238584 17389 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:12:05.238590 17389 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:12:05.238598 17389 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.238659 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:12:05.238816 17389 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:12:05.238834 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.238839 17389 net.cpp:165] Memory required for data: 953857500\nI0817 16:12:05.238848 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:12:05.238857 17389 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:12:05.238863 17389 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:12:05.238870 17389 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.238879 17389 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:12:05.238886 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.238891 17389 net.cpp:165] Memory required for data: 957953500\nI0817 16:12:05.238895 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:12:05.238909 17389 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:12:05.238915 17389 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:12:05.238927 17389 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:12:05.239421 17389 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:12:05.239435 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.239440 17389 net.cpp:165] Memory required for data: 962049500\nI0817 16:12:05.239449 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:12:05.239460 17389 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:12:05.239467 17389 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:12:05.239478 17389 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:12:05.239780 17389 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:12:05.239796 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.239801 17389 net.cpp:165] Memory required for data: 966145500\nI0817 16:12:05.239812 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:12:05.239821 17389 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:12:05.239828 17389 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:12:05.239835 17389 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:12:05.239897 17389 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:12:05.240054 17389 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:12:05.240067 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.240072 17389 net.cpp:165] Memory required for data: 970241500\nI0817 16:12:05.240082 17389 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:12:05.240092 17389 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:12:05.240106 17389 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:12:05.240113 17389 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:12:05.240121 17389 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:12:05.240150 17389 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:12:05.240161 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.240166 17389 net.cpp:165] Memory required for data: 974337500\nI0817 16:12:05.240171 17389 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:12:05.240192 17389 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:12:05.240200 17389 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:12:05.240206 17389 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:12:05.240216 17389 net.cpp:150] Setting up L2_b3_relu\nI0817 16:12:05.240223 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.240227 17389 net.cpp:165] Memory required for data: 978433500\nI0817 16:12:05.240233 17389 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:05.240239 17389 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:05.240245 17389 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:12:05.240252 17389 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:12:05.240262 17389 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:12:05.240314 17389 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:12:05.240326 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.240334 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.240337 17389 net.cpp:165] Memory required for data: 986625500\nI0817 16:12:05.240342 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:12:05.240353 17389 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:12:05.240360 17389 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:12:05.240372 17389 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:12:05.240882 17389 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:12:05.240896 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.240902 17389 net.cpp:165] Memory required for data: 990721500\nI0817 16:12:05.240911 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:12:05.240922 17389 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:12:05.240929 17389 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:12:05.240937 17389 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:12:05.241210 17389 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:12:05.241226 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.241231 17389 net.cpp:165] Memory required for data: 994817500\nI0817 16:12:05.241241 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:12:05.241250 17389 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:12:05.241256 17389 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:12:05.241264 17389 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.241323 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:12:05.241487 17389 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:12:05.241506 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.241511 17389 net.cpp:165] Memory required for data: 998913500\nI0817 16:12:05.241520 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:12:05.241528 17389 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:12:05.241535 17389 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:12:05.241545 17389 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.241555 17389 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:12:05.241562 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.241575 17389 net.cpp:165] Memory required for data: 1003009500\nI0817 16:12:05.241580 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:12:05.241592 17389 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:12:05.241598 17389 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:12:05.241610 17389 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:12:05.242105 17389 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:12:05.242120 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.242125 17389 net.cpp:165] Memory required for data: 1007105500\nI0817 16:12:05.242132 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:12:05.242141 17389 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:12:05.242148 17389 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:12:05.242159 17389 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:12:05.242426 17389 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:12:05.242439 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.242444 17389 net.cpp:165] Memory required for data: 1011201500\nI0817 16:12:05.242455 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:12:05.242470 17389 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:12:05.242477 17389 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:12:05.242486 17389 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:12:05.242550 17389 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:12:05.242712 17389 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:12:05.242725 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.242730 17389 net.cpp:165] Memory required for data: 1015297500\nI0817 16:12:05.242739 17389 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:12:05.242751 17389 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:12:05.242758 17389 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:12:05.242765 17389 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:12:05.242774 17389 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:12:05.242804 17389 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:12:05.242813 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.242818 17389 net.cpp:165] Memory required for data: 1019393500\nI0817 16:12:05.242823 17389 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:12:05.242830 17389 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:12:05.242836 17389 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:12:05.242846 17389 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:12:05.242856 17389 net.cpp:150] Setting up L2_b4_relu\nI0817 16:12:05.242863 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.242868 17389 net.cpp:165] Memory required for data: 1023489500\nI0817 16:12:05.242872 17389 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:05.242879 17389 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:05.242885 17389 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:12:05.242892 17389 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:12:05.242902 17389 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:12:05.242951 17389 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:12:05.242964 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.242970 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.242975 17389 net.cpp:165] Memory required for data: 1031681500\nI0817 16:12:05.242980 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:12:05.242991 17389 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:12:05.242997 17389 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:12:05.243010 17389 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:12:05.243520 17389 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:12:05.243533 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.243538 17389 net.cpp:165] Memory required for data: 1035777500\nI0817 16:12:05.243547 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:12:05.243556 17389 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:12:05.243563 17389 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:12:05.243574 17389 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:12:05.243844 17389 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:12:05.243856 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.243861 17389 net.cpp:165] Memory required for data: 1039873500\nI0817 16:12:05.243871 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:12:05.243883 17389 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:12:05.243890 17389 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:12:05.243897 17389 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.243955 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:12:05.244117 17389 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:12:05.244130 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.244135 17389 net.cpp:165] Memory required for data: 1043969500\nI0817 16:12:05.244144 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:12:05.244158 17389 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:12:05.244163 17389 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:12:05.244171 17389 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.244184 17389 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:12:05.244190 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.244194 17389 net.cpp:165] Memory required for data: 1048065500\nI0817 16:12:05.244199 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:12:05.244210 17389 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:12:05.244215 17389 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:12:05.244226 17389 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:12:05.244720 17389 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:12:05.244735 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.244740 17389 net.cpp:165] Memory required for data: 1052161500\nI0817 16:12:05.244748 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:12:05.244758 17389 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:12:05.244765 17389 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:12:05.244776 17389 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:12:05.245045 17389 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:12:05.245059 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.245064 17389 net.cpp:165] Memory required for data: 1056257500\nI0817 16:12:05.245074 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:12:05.245085 17389 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:12:05.245092 17389 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:12:05.245100 17389 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:12:05.245158 17389 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:12:05.245316 17389 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:12:05.245333 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.245338 17389 net.cpp:165] Memory required for data: 1060353500\nI0817 16:12:05.245347 17389 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:12:05.245358 17389 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:12:05.245365 17389 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:12:05.245373 17389 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:12:05.245380 17389 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:12:05.245411 17389 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:12:05.245427 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.245432 17389 net.cpp:165] Memory required for data: 1064449500\nI0817 16:12:05.245438 17389 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:12:05.245445 17389 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:12:05.245451 17389 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:12:05.245461 17389 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:12:05.245471 17389 net.cpp:150] Setting up L2_b5_relu\nI0817 16:12:05.245477 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.245482 17389 net.cpp:165] Memory required for data: 1068545500\nI0817 16:12:05.245487 17389 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:05.245501 17389 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:05.245507 17389 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:12:05.245514 17389 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:12:05.245553 17389 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:12:05.245620 17389 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:12:05.245633 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.245640 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.245645 17389 net.cpp:165] Memory required for data: 1076737500\nI0817 16:12:05.245649 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:12:05.245661 17389 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:12:05.245667 17389 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:12:05.245679 17389 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:12:05.247269 17389 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:12:05.247288 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.247293 17389 net.cpp:165] Memory required for data: 1080833500\nI0817 16:12:05.247303 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:12:05.247316 17389 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:12:05.247323 17389 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:12:05.247333 17389 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:12:05.247614 17389 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:12:05.247628 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.247633 17389 net.cpp:165] Memory required for data: 1084929500\nI0817 16:12:05.247644 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:12:05.247653 17389 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:12:05.247659 17389 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:12:05.247670 17389 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.247731 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:12:05.247895 17389 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:12:05.247908 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.247913 17389 net.cpp:165] Memory required for data: 1089025500\nI0817 16:12:05.247922 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:12:05.247931 17389 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:12:05.247937 17389 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:12:05.247946 17389 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.247954 17389 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:12:05.247961 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.247967 17389 net.cpp:165] Memory required for data: 1093121500\nI0817 16:12:05.247972 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:12:05.247985 17389 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:12:05.247992 17389 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:12:05.248011 17389 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:12:05.248507 17389 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:12:05.248522 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.248528 17389 net.cpp:165] Memory required for data: 1097217500\nI0817 16:12:05.248536 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:12:05.248548 17389 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:12:05.248555 17389 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:12:05.248566 17389 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:12:05.248841 17389 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:12:05.248853 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.248859 17389 net.cpp:165] Memory required for data: 1101313500\nI0817 16:12:05.248869 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:12:05.248878 17389 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:12:05.248884 17389 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:12:05.248891 17389 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:12:05.248953 17389 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:12:05.249114 17389 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:12:05.249130 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.249135 17389 net.cpp:165] Memory required for data: 1105409500\nI0817 16:12:05.249143 17389 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:12:05.249152 17389 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:12:05.249158 17389 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:12:05.249166 17389 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:12:05.249173 17389 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:12:05.249205 17389 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:12:05.249215 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.249219 17389 net.cpp:165] Memory required for data: 1109505500\nI0817 16:12:05.249224 17389 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:12:05.249233 17389 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:12:05.249238 17389 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:12:05.249248 17389 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:12:05.249258 17389 net.cpp:150] Setting up L2_b6_relu\nI0817 16:12:05.249264 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.249269 17389 net.cpp:165] Memory required for data: 1113601500\nI0817 16:12:05.249274 17389 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:05.249281 17389 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:05.249286 17389 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:12:05.249296 17389 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:12:05.249306 17389 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:12:05.249354 17389 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:12:05.249366 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.249373 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.249377 17389 net.cpp:165] Memory required for data: 1121793500\nI0817 16:12:05.249382 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:12:05.249397 17389 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:12:05.249403 17389 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:12:05.249413 17389 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:12:05.249928 17389 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:12:05.249943 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.249948 17389 net.cpp:165] Memory required for data: 1125889500\nI0817 16:12:05.249958 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:12:05.249976 17389 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:12:05.249984 17389 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:12:05.249992 17389 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:12:05.250262 17389 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:12:05.250274 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.250279 17389 net.cpp:165] Memory required for data: 1129985500\nI0817 16:12:05.250290 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:12:05.250298 17389 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:12:05.250305 17389 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:12:05.250313 17389 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.250377 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:12:05.250545 17389 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:12:05.250560 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.250566 17389 net.cpp:165] Memory required for data: 1134081500\nI0817 16:12:05.250576 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:12:05.250583 17389 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:12:05.250589 17389 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:12:05.250597 17389 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.250607 17389 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:12:05.250613 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.250617 17389 net.cpp:165] Memory required for data: 1138177500\nI0817 16:12:05.250622 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:12:05.250635 17389 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:12:05.250643 17389 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:12:05.250653 17389 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:12:05.251173 17389 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:12:05.251188 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.251194 17389 net.cpp:165] Memory required for data: 1142273500\nI0817 16:12:05.251202 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:12:05.251214 17389 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:12:05.251220 17389 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:12:05.251231 17389 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:12:05.251513 17389 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:12:05.251528 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.251533 17389 net.cpp:165] Memory required for data: 1146369500\nI0817 16:12:05.251543 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:12:05.251551 17389 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:12:05.251559 17389 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:12:05.251566 17389 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:12:05.251628 17389 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:12:05.251785 17389 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:12:05.251797 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.251802 17389 net.cpp:165] Memory required for data: 1150465500\nI0817 16:12:05.251811 17389 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:12:05.251822 17389 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:12:05.251829 17389 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:12:05.251837 17389 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:12:05.251844 17389 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:12:05.251873 17389 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:12:05.251881 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.251886 17389 net.cpp:165] Memory required for data: 1154561500\nI0817 16:12:05.251891 17389 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:12:05.251902 17389 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:12:05.251915 17389 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:12:05.251922 17389 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:12:05.251932 17389 net.cpp:150] Setting up L2_b7_relu\nI0817 16:12:05.251940 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.251945 17389 net.cpp:165] Memory required for data: 1158657500\nI0817 16:12:05.251948 17389 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:05.251955 17389 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:05.251960 17389 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:12:05.251968 17389 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:12:05.251977 17389 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:12:05.252030 17389 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:12:05.252043 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.252049 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.252054 17389 net.cpp:165] Memory required for data: 1166849500\nI0817 16:12:05.252059 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:12:05.252076 17389 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:12:05.252082 17389 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:12:05.252092 17389 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:12:05.252593 17389 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:12:05.252607 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.252612 17389 net.cpp:165] Memory required for data: 1170945500\nI0817 16:12:05.252621 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:12:05.252634 17389 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:12:05.252640 17389 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:12:05.252650 17389 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:12:05.252920 17389 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:12:05.252933 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.252938 17389 net.cpp:165] Memory required for data: 1175041500\nI0817 16:12:05.252948 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:12:05.252956 17389 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:12:05.252964 17389 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:12:05.252970 17389 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.253033 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:12:05.253190 17389 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:12:05.253206 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.253211 17389 net.cpp:165] Memory required for data: 1179137500\nI0817 16:12:05.253219 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:12:05.253227 17389 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:12:05.253233 17389 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:12:05.253242 17389 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.253250 17389 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:12:05.253257 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.253262 17389 net.cpp:165] Memory required for data: 1183233500\nI0817 16:12:05.253267 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:12:05.253280 17389 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:12:05.253286 17389 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:12:05.253298 17389 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:12:05.253798 17389 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:12:05.253813 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.253818 17389 net.cpp:165] Memory required for data: 1187329500\nI0817 16:12:05.253834 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:12:05.253845 17389 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:12:05.253852 17389 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:12:05.253865 17389 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:12:05.254142 17389 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:12:05.254154 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.254159 17389 net.cpp:165] Memory required for data: 1191425500\nI0817 16:12:05.254169 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:12:05.254178 17389 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:12:05.254184 17389 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:12:05.254192 17389 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:12:05.254254 17389 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:12:05.254413 17389 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:12:05.254426 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.254431 17389 net.cpp:165] Memory required for data: 1195521500\nI0817 16:12:05.254439 17389 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:12:05.254452 17389 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:12:05.254459 17389 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:12:05.254467 17389 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:12:05.254473 17389 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:12:05.254508 17389 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:12:05.254520 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.254525 17389 net.cpp:165] Memory required for data: 1199617500\nI0817 16:12:05.254530 17389 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:12:05.254541 17389 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:12:05.254547 17389 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:12:05.254555 17389 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:12:05.254565 17389 net.cpp:150] Setting up L2_b8_relu\nI0817 16:12:05.254571 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.254576 17389 net.cpp:165] Memory required for data: 1203713500\nI0817 16:12:05.254581 17389 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:05.254588 17389 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:05.254593 17389 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:12:05.254601 17389 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:12:05.254623 17389 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:12:05.254681 17389 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:12:05.254693 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.254700 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.254705 17389 net.cpp:165] Memory required for data: 1211905500\nI0817 16:12:05.254710 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:12:05.254724 17389 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:12:05.254730 17389 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:12:05.254742 17389 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:12:05.255236 17389 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:12:05.255250 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.255255 17389 net.cpp:165] Memory required for data: 1216001500\nI0817 16:12:05.255264 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:12:05.255276 17389 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:12:05.255283 17389 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:12:05.255293 17389 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:12:05.255578 17389 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:12:05.255599 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.255604 17389 net.cpp:165] Memory required for data: 1220097500\nI0817 16:12:05.255614 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:12:05.255622 17389 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:12:05.255628 17389 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:12:05.255636 17389 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.255703 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:12:05.255867 17389 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:12:05.255879 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.255884 17389 net.cpp:165] Memory required for data: 1224193500\nI0817 16:12:05.255893 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:12:05.255901 17389 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:12:05.255908 17389 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:12:05.255918 17389 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.255928 17389 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:12:05.255935 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.255939 17389 net.cpp:165] Memory required for data: 1228289500\nI0817 16:12:05.255944 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:12:05.255957 17389 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:12:05.255964 17389 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:12:05.255972 17389 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:12:05.257455 17389 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:12:05.257472 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.257477 17389 net.cpp:165] Memory required for data: 1232385500\nI0817 16:12:05.257488 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:12:05.257503 17389 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:12:05.257513 17389 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:12:05.257522 17389 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:12:05.257788 17389 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:12:05.257802 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.257807 17389 net.cpp:165] Memory required for data: 1236481500\nI0817 16:12:05.257858 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:12:05.257869 17389 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:12:05.257876 17389 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:12:05.257884 17389 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:12:05.257947 17389 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:12:05.258105 17389 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:12:05.258116 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.258121 17389 net.cpp:165] Memory required for data: 1240577500\nI0817 16:12:05.258131 17389 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:12:05.258142 17389 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:12:05.258149 17389 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:12:05.258157 17389 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:12:05.258167 17389 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:12:05.258196 17389 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:12:05.258205 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.258210 17389 net.cpp:165] Memory required for data: 1244673500\nI0817 16:12:05.258215 17389 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:12:05.258224 17389 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:12:05.258229 17389 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:12:05.258239 17389 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:12:05.258249 17389 net.cpp:150] Setting up L2_b9_relu\nI0817 16:12:05.258256 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.258268 17389 net.cpp:165] Memory required for data: 1248769500\nI0817 16:12:05.258273 17389 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:05.258281 17389 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:05.258286 17389 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:12:05.258297 17389 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:12:05.258306 17389 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:12:05.258358 17389 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:12:05.258370 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.258378 17389 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:12:05.258383 17389 net.cpp:165] Memory required for data: 1256961500\nI0817 16:12:05.258388 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:12:05.258399 17389 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:12:05.258404 17389 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:12:05.258416 17389 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:12:05.258922 17389 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:12:05.258937 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.258942 17389 net.cpp:165] Memory required for data: 1257985500\nI0817 16:12:05.258951 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:12:05.258963 17389 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:12:05.258970 17389 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:12:05.258980 17389 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:12:05.259254 17389 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:12:05.259268 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.259272 17389 net.cpp:165] Memory required for data: 1259009500\nI0817 16:12:05.259284 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:12:05.259291 17389 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:12:05.259299 17389 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:12:05.259305 17389 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.259366 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:12:05.259536 17389 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:12:05.259554 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.259560 17389 net.cpp:165] Memory required for data: 1260033500\nI0817 16:12:05.259569 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:12:05.259577 17389 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:12:05.259583 17389 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:12:05.259591 17389 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:12:05.259601 17389 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:12:05.259608 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.259613 17389 net.cpp:165] Memory required for data: 1261057500\nI0817 16:12:05.259618 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:12:05.259631 17389 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:12:05.259637 17389 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:12:05.259646 17389 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:12:05.260140 17389 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:12:05.260155 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.260160 17389 net.cpp:165] Memory required for data: 1262081500\nI0817 16:12:05.260169 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:12:05.260181 17389 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:12:05.260188 17389 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:12:05.260196 17389 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:12:05.260468 17389 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:12:05.260488 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.260494 17389 net.cpp:165] Memory required for data: 1263105500\nI0817 16:12:05.260510 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:12:05.260522 17389 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:12:05.260529 17389 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:12:05.260536 17389 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:12:05.260596 17389 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:12:05.260763 17389 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:12:05.260777 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.260782 17389 net.cpp:165] Memory required for data: 1264129500\nI0817 16:12:05.260792 17389 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:12:05.260803 17389 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:12:05.260810 17389 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:12:05.260819 17389 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:12:05.260856 17389 net.cpp:150] Setting up L3_b1_pool\nI0817 16:12:05.260866 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.260871 17389 net.cpp:165] Memory required for data: 1265153500\nI0817 16:12:05.260876 17389 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:12:05.260888 17389 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:12:05.260895 17389 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:12:05.260901 17389 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:12:05.260910 17389 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:12:05.260943 17389 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:12:05.260954 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.260959 17389 net.cpp:165] Memory required for data: 1266177500\nI0817 16:12:05.260964 17389 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:12:05.260972 17389 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:12:05.260977 17389 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:12:05.260989 17389 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:12:05.260999 17389 net.cpp:150] Setting up L3_b1_relu\nI0817 16:12:05.261006 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.261011 17389 net.cpp:165] Memory required for data: 1267201500\nI0817 16:12:05.261015 17389 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:12:05.261025 17389 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:12:05.261034 17389 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:12:05.262266 17389 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:12:05.262284 17389 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:12:05.262290 17389 net.cpp:165] Memory required for data: 1268225500\nI0817 16:12:05.262295 17389 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:12:05.262305 17389 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:12:05.262311 17389 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:12:05.262321 17389 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:12:05.262331 17389 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:12:05.262377 17389 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:12:05.262388 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.262393 17389 net.cpp:165] Memory required for data: 1270273500\nI0817 16:12:05.262398 17389 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:05.262406 17389 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:05.262413 17389 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:12:05.262423 17389 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:12:05.262434 17389 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:12:05.262488 17389 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:12:05.262516 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.262524 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.262528 17389 net.cpp:165] Memory required for data: 1274369500\nI0817 16:12:05.262534 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:12:05.262545 17389 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:12:05.262552 17389 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:12:05.262562 17389 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:12:05.263615 17389 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:12:05.263630 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.263635 17389 net.cpp:165] Memory required for data: 1276417500\nI0817 16:12:05.263644 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:12:05.263656 17389 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:12:05.263664 17389 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:12:05.263671 17389 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:12:05.263947 17389 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:12:05.263960 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.263965 17389 net.cpp:165] Memory required for data: 1278465500\nI0817 16:12:05.263975 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:12:05.263988 17389 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:12:05.263994 17389 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:12:05.264003 17389 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.264065 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:12:05.264228 17389 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:12:05.264241 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.264246 17389 net.cpp:165] Memory required for data: 1280513500\nI0817 16:12:05.264256 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:12:05.264266 17389 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:12:05.264272 17389 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:12:05.264282 17389 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:12:05.264292 17389 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:12:05.264299 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.264304 17389 net.cpp:165] Memory required for data: 1282561500\nI0817 16:12:05.264309 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:12:05.264319 17389 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:12:05.264325 17389 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:12:05.264338 17389 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:12:05.265385 17389 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:12:05.265400 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.265406 17389 net.cpp:165] Memory required for data: 1284609500\nI0817 16:12:05.265415 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:12:05.265427 17389 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:12:05.265434 17389 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:12:05.265442 17389 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:12:05.265722 17389 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:12:05.265736 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.265741 17389 net.cpp:165] Memory required for data: 1286657500\nI0817 16:12:05.265751 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:12:05.265760 17389 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:12:05.265766 17389 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:12:05.265774 17389 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:12:05.265836 17389 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:12:05.265997 17389 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:12:05.266013 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.266018 17389 net.cpp:165] Memory required for data: 1288705500\nI0817 16:12:05.266034 17389 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:12:05.266044 17389 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:12:05.266052 17389 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:12:05.266058 17389 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:12:05.266067 17389 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:12:05.266103 17389 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:12:05.266113 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.266118 17389 net.cpp:165] Memory required for data: 1290753500\nI0817 16:12:05.266122 17389 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:12:05.266130 17389 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:12:05.266136 17389 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:12:05.266144 17389 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:12:05.266152 17389 net.cpp:150] Setting up L3_b2_relu\nI0817 16:12:05.266160 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.266165 17389 net.cpp:165] Memory required for data: 1292801500\nI0817 16:12:05.266168 17389 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:05.266178 17389 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:05.266185 17389 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:12:05.266192 17389 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:12:05.266202 17389 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:12:05.266250 17389 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:12:05.266265 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.266273 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.266278 17389 net.cpp:165] Memory required for data: 1296897500\nI0817 16:12:05.266283 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:12:05.266294 17389 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:12:05.266299 17389 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:12:05.266309 17389 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:12:05.267351 17389 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:12:05.267366 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.267371 17389 net.cpp:165] Memory required for data: 1298945500\nI0817 16:12:05.267380 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:12:05.267392 17389 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:12:05.267400 17389 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:12:05.267408 17389 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:12:05.267705 17389 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:12:05.267720 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.267725 17389 net.cpp:165] Memory required for data: 1300993500\nI0817 16:12:05.267736 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:12:05.267748 17389 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:12:05.267755 17389 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:12:05.267765 17389 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.267827 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:12:05.267992 17389 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:12:05.268004 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.268009 17389 net.cpp:165] Memory required for data: 1303041500\nI0817 16:12:05.268019 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:12:05.268028 17389 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:12:05.268033 17389 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:12:05.268044 17389 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:12:05.268060 17389 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:12:05.268069 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.268074 17389 net.cpp:165] Memory required for data: 1305089500\nI0817 16:12:05.268077 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:12:05.268090 17389 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:12:05.268095 17389 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:12:05.268106 17389 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:12:05.269170 17389 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:12:05.269186 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.269191 17389 net.cpp:165] Memory required for data: 1307137500\nI0817 16:12:05.269201 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:12:05.269212 17389 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:12:05.269219 17389 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:12:05.269227 17389 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:12:05.269502 17389 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:12:05.269516 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.269521 17389 net.cpp:165] Memory required for data: 1309185500\nI0817 16:12:05.269531 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:12:05.269541 17389 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:12:05.269547 17389 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:12:05.269556 17389 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:12:05.269620 17389 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:12:05.269783 17389 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:12:05.269796 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.269801 17389 net.cpp:165] Memory required for data: 1311233500\nI0817 16:12:05.269810 17389 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:12:05.269819 17389 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:12:05.269826 17389 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:12:05.269834 17389 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:12:05.269845 17389 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:12:05.269878 17389 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:12:05.269892 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.269898 17389 net.cpp:165] Memory required for data: 1313281500\nI0817 16:12:05.269903 17389 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:12:05.269912 17389 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:12:05.269917 17389 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:12:05.269924 17389 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:12:05.269933 17389 net.cpp:150] Setting up L3_b3_relu\nI0817 16:12:05.269940 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.269945 17389 net.cpp:165] Memory required for data: 1315329500\nI0817 16:12:05.269950 17389 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:05.269959 17389 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:05.269965 17389 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:12:05.269973 17389 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:12:05.269982 17389 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:12:05.270028 17389 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:12:05.270043 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.270051 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.270056 17389 net.cpp:165] Memory required for data: 1319425500\nI0817 16:12:05.270061 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:12:05.270071 17389 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:12:05.270084 17389 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:12:05.270094 17389 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:12:05.271147 17389 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:12:05.271164 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.271169 17389 net.cpp:165] Memory required for data: 1321473500\nI0817 16:12:05.271178 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:12:05.271188 17389 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:12:05.271194 17389 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:12:05.271203 17389 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:12:05.271473 17389 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:12:05.271486 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.271492 17389 net.cpp:165] Memory required for data: 1323521500\nI0817 16:12:05.271507 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:12:05.271519 17389 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:12:05.271526 17389 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:12:05.271536 17389 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.271595 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:12:05.271757 17389 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:12:05.271770 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.271775 17389 net.cpp:165] Memory required for data: 1325569500\nI0817 16:12:05.271783 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:12:05.271792 17389 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:12:05.271798 17389 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:12:05.271808 17389 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:12:05.271818 17389 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:12:05.271826 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.271831 17389 net.cpp:165] Memory required for data: 1327617500\nI0817 16:12:05.271836 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:12:05.271849 17389 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:12:05.271855 17389 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:12:05.271864 17389 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:12:05.273967 17389 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:12:05.273984 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.273990 17389 net.cpp:165] Memory required for data: 1329665500\nI0817 16:12:05.273999 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:12:05.274013 17389 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:12:05.274019 17389 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:12:05.274029 17389 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:12:05.274302 17389 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:12:05.274314 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.274319 17389 net.cpp:165] Memory required for data: 1331713500\nI0817 16:12:05.274329 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:12:05.274341 17389 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:12:05.274348 17389 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:12:05.274359 17389 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:12:05.274418 17389 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:12:05.274595 17389 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:12:05.274608 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.274613 17389 net.cpp:165] Memory required for data: 1333761500\nI0817 16:12:05.274623 17389 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:12:05.274632 17389 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:12:05.274639 17389 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:12:05.274646 17389 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:12:05.274665 17389 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:12:05.274700 17389 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:12:05.274710 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.274715 17389 net.cpp:165] Memory required for data: 1335809500\nI0817 16:12:05.274720 17389 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:12:05.274730 17389 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:12:05.274737 17389 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:12:05.274744 17389 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:12:05.274755 17389 net.cpp:150] Setting up L3_b4_relu\nI0817 16:12:05.274761 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.274766 17389 net.cpp:165] Memory required for data: 1337857500\nI0817 16:12:05.274771 17389 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:05.274778 17389 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:05.274783 17389 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:12:05.274791 17389 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:12:05.274801 17389 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:12:05.274852 17389 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:12:05.274864 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.274871 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.274876 17389 net.cpp:165] Memory required for data: 1341953500\nI0817 16:12:05.274881 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:12:05.274895 17389 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:12:05.274902 17389 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:12:05.274911 17389 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:12:05.275944 17389 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:12:05.275959 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.275964 17389 net.cpp:165] Memory required for data: 1344001500\nI0817 16:12:05.275974 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:12:05.275985 17389 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:12:05.275992 17389 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:12:05.276005 17389 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:12:05.276275 17389 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:12:05.276288 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.276293 17389 net.cpp:165] Memory required for data: 1346049500\nI0817 16:12:05.276304 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:12:05.276312 17389 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:12:05.276319 17389 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:12:05.276329 17389 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.276389 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:12:05.276558 17389 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:12:05.276572 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.276577 17389 net.cpp:165] Memory required for data: 1348097500\nI0817 16:12:05.276587 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:12:05.276594 17389 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:12:05.276602 17389 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:12:05.276612 17389 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:12:05.276623 17389 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:12:05.276629 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.276634 17389 net.cpp:165] Memory required for data: 1350145500\nI0817 16:12:05.276639 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:12:05.276652 17389 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:12:05.276665 17389 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:12:05.276674 17389 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:12:05.277699 17389 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:12:05.277716 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.277721 17389 net.cpp:165] Memory required for data: 1352193500\nI0817 16:12:05.277730 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:12:05.277740 17389 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:12:05.277746 17389 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:12:05.277760 17389 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:12:05.278033 17389 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:12:05.278046 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.278053 17389 net.cpp:165] Memory required for data: 1354241500\nI0817 16:12:05.278062 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:12:05.278074 17389 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:12:05.278080 17389 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:12:05.278089 17389 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:12:05.278147 17389 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:12:05.278308 17389 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:12:05.278321 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.278326 17389 net.cpp:165] Memory required for data: 1356289500\nI0817 16:12:05.278336 17389 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:12:05.278344 17389 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:12:05.278350 17389 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:12:05.278358 17389 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:12:05.278369 17389 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:12:05.278403 17389 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:12:05.278414 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.278419 17389 net.cpp:165] Memory required for data: 1358337500\nI0817 16:12:05.278424 17389 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:12:05.278434 17389 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:12:05.278441 17389 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:12:05.278448 17389 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:12:05.278457 17389 net.cpp:150] Setting up L3_b5_relu\nI0817 16:12:05.278465 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.278470 17389 net.cpp:165] Memory required for data: 1360385500\nI0817 16:12:05.278475 17389 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:05.278481 17389 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:05.278486 17389 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:12:05.278493 17389 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:12:05.278511 17389 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:12:05.278561 17389 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:12:05.278573 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.278580 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.278584 17389 net.cpp:165] Memory required for data: 1364481500\nI0817 16:12:05.278589 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:12:05.278604 17389 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:12:05.278610 17389 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:12:05.278620 17389 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:12:05.279649 17389 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:12:05.279664 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.279670 17389 net.cpp:165] Memory required for data: 1366529500\nI0817 16:12:05.279685 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:12:05.279697 17389 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:12:05.279705 17389 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:12:05.279716 17389 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:12:05.279985 17389 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:12:05.279999 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.280004 17389 net.cpp:165] Memory required for data: 1368577500\nI0817 16:12:05.280014 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:12:05.280022 17389 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:12:05.280028 17389 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:12:05.280040 17389 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.280100 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:12:05.280269 17389 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:12:05.280283 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.280288 17389 net.cpp:165] Memory required for data: 1370625500\nI0817 16:12:05.280297 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:12:05.280305 17389 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:12:05.280311 17389 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:12:05.280321 17389 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:12:05.280333 17389 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:12:05.280339 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.280344 17389 net.cpp:165] Memory required for data: 1372673500\nI0817 16:12:05.280349 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:12:05.280362 17389 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:12:05.280369 17389 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:12:05.280380 17389 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:12:05.281412 17389 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:12:05.281426 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.281431 17389 net.cpp:165] Memory required for data: 1374721500\nI0817 16:12:05.281440 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:12:05.281450 17389 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:12:05.281456 17389 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:12:05.281467 17389 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:12:05.281747 17389 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:12:05.281764 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.281769 17389 net.cpp:165] Memory required for data: 1376769500\nI0817 16:12:05.281780 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:12:05.281787 17389 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:12:05.281795 17389 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:12:05.281801 17389 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:12:05.281860 17389 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:12:05.282021 17389 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:12:05.282034 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.282039 17389 net.cpp:165] Memory required for data: 1378817500\nI0817 16:12:05.282048 17389 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:12:05.282060 17389 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:12:05.282068 17389 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:12:05.282074 17389 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:12:05.282083 17389 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:12:05.282119 17389 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:12:05.282130 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.282135 17389 net.cpp:165] Memory required for data: 1380865500\nI0817 16:12:05.282140 17389 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:12:05.282155 17389 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:12:05.282161 17389 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:12:05.282168 17389 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:12:05.282178 17389 net.cpp:150] Setting up L3_b6_relu\nI0817 16:12:05.282186 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.282189 17389 net.cpp:165] Memory required for data: 1382913500\nI0817 16:12:05.282194 17389 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:05.282202 17389 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:05.282207 17389 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:12:05.282217 17389 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:12:05.282227 17389 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:12:05.282275 17389 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:12:05.282286 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.282294 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.282299 17389 net.cpp:165] Memory required for data: 1387009500\nI0817 16:12:05.282304 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:12:05.282321 17389 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:12:05.282328 17389 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:12:05.282337 17389 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:12:05.283371 17389 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:12:05.283386 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.283391 17389 net.cpp:165] Memory required for data: 1389057500\nI0817 16:12:05.283401 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:12:05.283411 17389 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:12:05.283418 17389 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:12:05.283427 17389 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:12:05.283704 17389 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:12:05.283718 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.283723 17389 net.cpp:165] Memory required for data: 1391105500\nI0817 16:12:05.283733 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:12:05.283742 17389 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:12:05.283748 17389 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:12:05.283759 17389 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.283820 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:12:05.283980 17389 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:12:05.283993 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.283998 17389 net.cpp:165] Memory required for data: 1393153500\nI0817 16:12:05.284008 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:12:05.284044 17389 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:12:05.284054 17389 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:12:05.284062 17389 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:12:05.284072 17389 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:12:05.284080 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.284085 17389 net.cpp:165] Memory required for data: 1395201500\nI0817 16:12:05.284090 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:12:05.284101 17389 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:12:05.284107 17389 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:12:05.284116 17389 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:12:05.285147 17389 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:12:05.285162 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.285167 17389 net.cpp:165] Memory required for data: 1397249500\nI0817 16:12:05.285182 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:12:05.285197 17389 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:12:05.285204 17389 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:12:05.285214 17389 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:12:05.285490 17389 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:12:05.285508 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.285513 17389 net.cpp:165] Memory required for data: 1399297500\nI0817 16:12:05.285524 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:12:05.285532 17389 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:12:05.285539 17389 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:12:05.285550 17389 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:12:05.285610 17389 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:12:05.285774 17389 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:12:05.285787 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.285792 17389 net.cpp:165] Memory required for data: 1401345500\nI0817 16:12:05.285802 17389 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:12:05.285814 17389 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:12:05.285820 17389 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:12:05.285828 17389 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:12:05.285836 17389 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:12:05.285872 17389 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:12:05.285884 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.285889 17389 net.cpp:165] Memory required for data: 1403393500\nI0817 16:12:05.285894 17389 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:12:05.285902 17389 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:12:05.285908 17389 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:12:05.285918 17389 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:12:05.285928 17389 net.cpp:150] Setting up L3_b7_relu\nI0817 16:12:05.285935 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.285940 17389 net.cpp:165] Memory required for data: 1405441500\nI0817 16:12:05.285944 17389 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:05.285951 17389 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:05.285957 17389 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:12:05.285964 17389 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:12:05.285974 17389 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:12:05.286025 17389 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:12:05.286036 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.286043 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.286047 17389 net.cpp:165] Memory required for data: 1409537500\nI0817 16:12:05.286052 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:12:05.286063 17389 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:12:05.286069 17389 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:12:05.286082 17389 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:12:05.288091 17389 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:12:05.288108 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.288113 17389 net.cpp:165] Memory required for data: 1411585500\nI0817 16:12:05.288123 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:12:05.288136 17389 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:12:05.288142 17389 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:12:05.288151 17389 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:12:05.288427 17389 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:12:05.288449 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.288453 17389 net.cpp:165] Memory required for data: 1413633500\nI0817 16:12:05.288465 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:12:05.288475 17389 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:12:05.288482 17389 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:12:05.288491 17389 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.288560 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:12:05.288727 17389 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:12:05.288739 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.288744 17389 net.cpp:165] Memory required for data: 1415681500\nI0817 16:12:05.288753 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:12:05.288764 17389 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:12:05.288771 17389 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:12:05.288779 17389 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:12:05.288789 17389 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:12:05.288799 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.288802 17389 net.cpp:165] Memory required for data: 1417729500\nI0817 16:12:05.288807 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:12:05.288818 17389 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:12:05.288825 17389 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:12:05.288836 17389 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:12:05.289875 17389 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:12:05.289891 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.289896 17389 net.cpp:165] Memory required for data: 1419777500\nI0817 16:12:05.289903 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:12:05.289913 17389 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:12:05.289921 17389 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:12:05.289932 17389 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:12:05.290210 17389 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:12:05.290223 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.290228 17389 net.cpp:165] Memory required for data: 1421825500\nI0817 16:12:05.290238 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:12:05.290247 17389 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:12:05.290253 17389 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:12:05.290261 17389 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:12:05.290325 17389 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:12:05.290484 17389 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:12:05.290505 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.290511 17389 net.cpp:165] Memory required for data: 1423873500\nI0817 16:12:05.290520 17389 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:12:05.290530 17389 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:12:05.290536 17389 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:12:05.290544 17389 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:12:05.290551 17389 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:12:05.290590 17389 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:12:05.290601 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.290606 17389 net.cpp:165] Memory required for data: 1425921500\nI0817 16:12:05.290611 17389 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:12:05.290619 17389 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:12:05.290626 17389 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:12:05.290632 17389 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:12:05.290642 17389 net.cpp:150] Setting up L3_b8_relu\nI0817 16:12:05.290648 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.290660 17389 net.cpp:165] Memory required for data: 1427969500\nI0817 16:12:05.290665 17389 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:05.290673 17389 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:05.290678 17389 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:12:05.290688 17389 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:12:05.290699 17389 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:12:05.290746 17389 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:12:05.290761 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.290769 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.290773 17389 net.cpp:165] Memory required for data: 1432065500\nI0817 16:12:05.290778 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:12:05.290791 17389 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:12:05.290796 17389 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:12:05.290805 17389 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:12:05.291841 17389 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:12:05.291856 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.291860 17389 net.cpp:165] Memory required for data: 1434113500\nI0817 16:12:05.291869 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:12:05.291882 17389 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:12:05.291888 17389 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:12:05.291896 17389 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:12:05.292168 17389 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:12:05.292181 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.292186 17389 net.cpp:165] Memory required for data: 1436161500\nI0817 16:12:05.292197 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:12:05.292208 17389 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:12:05.292214 17389 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:12:05.292222 17389 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.292289 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:12:05.292450 17389 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:12:05.292464 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.292469 17389 net.cpp:165] Memory required for data: 1438209500\nI0817 16:12:05.292477 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:12:05.292491 17389 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:12:05.292503 17389 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:12:05.292515 17389 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:12:05.292526 17389 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:12:05.292532 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.292537 17389 net.cpp:165] Memory required for data: 1440257500\nI0817 16:12:05.292542 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:12:05.292553 17389 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:12:05.292559 17389 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:12:05.292570 17389 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:12:05.293635 17389 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:12:05.293651 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.293656 17389 net.cpp:165] Memory required for data: 1442305500\nI0817 16:12:05.293665 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:12:05.293678 17389 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:12:05.293684 17389 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:12:05.293694 17389 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:12:05.293968 17389 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:12:05.293982 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.293993 17389 net.cpp:165] Memory required for data: 1444353500\nI0817 16:12:05.294004 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:12:05.294013 17389 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:12:05.294019 17389 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:12:05.294028 17389 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:12:05.294090 17389 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:12:05.294250 17389 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:12:05.294267 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.294272 17389 net.cpp:165] Memory required for data: 1446401500\nI0817 16:12:05.294282 17389 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:12:05.294291 17389 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:12:05.294297 17389 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:12:05.294304 17389 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:12:05.294312 17389 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:12:05.294348 17389 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:12:05.294360 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.294365 17389 net.cpp:165] Memory required for data: 1448449500\nI0817 16:12:05.294370 17389 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:12:05.294378 17389 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:12:05.294384 17389 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:12:05.294391 17389 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:12:05.294400 17389 net.cpp:150] Setting up L3_b9_relu\nI0817 16:12:05.294407 17389 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:12:05.294412 17389 net.cpp:165] Memory required for data: 1450497500\nI0817 16:12:05.294416 17389 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:12:05.294427 17389 net.cpp:100] Creating Layer post_pool\nI0817 16:12:05.294433 17389 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:12:05.294441 17389 net.cpp:408] post_pool -> post_pool\nI0817 16:12:05.294476 17389 net.cpp:150] Setting up post_pool\nI0817 16:12:05.294488 17389 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:12:05.294493 17389 net.cpp:165] Memory required for data: 1450529500\nI0817 16:12:05.294504 17389 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:12:05.294517 17389 net.cpp:100] Creating Layer post_FC\nI0817 16:12:05.294523 17389 net.cpp:434] post_FC <- post_pool\nI0817 16:12:05.294534 17389 net.cpp:408] post_FC -> post_FC_top\nI0817 16:12:05.294754 17389 net.cpp:150] Setting up post_FC\nI0817 16:12:05.294766 17389 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:05.294771 17389 net.cpp:165] Memory required for data: 1450579500\nI0817 16:12:05.294780 17389 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:12:05.294791 17389 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:12:05.294797 17389 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:12:05.294808 17389 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:12:05.294818 17389 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:12:05.294872 17389 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:12:05.294883 17389 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:05.294890 17389 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:12:05.294895 17389 net.cpp:165] Memory required for data: 1450679500\nI0817 16:12:05.294900 17389 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:12:05.294909 17389 net.cpp:100] Creating Layer accuracy\nI0817 16:12:05.294914 17389 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:12:05.294921 17389 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:12:05.294929 17389 net.cpp:408] accuracy -> accuracy\nI0817 16:12:05.294940 17389 net.cpp:150] Setting up accuracy\nI0817 16:12:05.294947 17389 net.cpp:157] Top shape: (1)\nI0817 16:12:05.294960 17389 net.cpp:165] Memory required for data: 1450679504\nI0817 16:12:05.294965 17389 layer_factory.hpp:77] Creating layer loss\nI0817 16:12:05.294977 17389 net.cpp:100] Creating Layer loss\nI0817 16:12:05.294983 17389 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:12:05.294991 17389 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:12:05.294998 17389 net.cpp:408] loss -> loss\nI0817 16:12:05.295011 17389 layer_factory.hpp:77] Creating layer loss\nI0817 16:12:05.295143 17389 net.cpp:150] Setting up loss\nI0817 16:12:05.295158 17389 net.cpp:157] Top shape: (1)\nI0817 16:12:05.295164 17389 net.cpp:160]     with loss weight 1\nI0817 16:12:05.295181 17389 net.cpp:165] Memory required for data: 1450679508\nI0817 16:12:05.295186 17389 net.cpp:226] loss needs backward computation.\nI0817 16:12:05.295193 17389 net.cpp:228] accuracy does not need backward computation.\nI0817 16:12:05.295199 17389 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:12:05.295204 17389 net.cpp:226] post_FC needs backward computation.\nI0817 16:12:05.295209 17389 net.cpp:226] post_pool needs backward computation.\nI0817 16:12:05.295214 17389 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:12:05.295218 17389 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:12:05.295224 17389 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:12:05.295229 17389 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:12:05.295234 17389 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:12:05.295239 17389 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:12:05.295244 17389 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:12:05.295248 17389 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:12:05.295253 17389 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:12:05.295258 17389 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:12:05.295264 17389 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:12:05.295269 17389 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:12:05.295274 17389 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:12:05.295279 17389 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:12:05.295284 17389 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:12:05.295289 17389 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:12:05.295295 17389 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:12:05.295298 17389 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:12:05.295303 17389 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:12:05.295308 17389 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:12:05.295313 17389 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:12:05.295318 17389 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:12:05.295325 17389 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:12:05.295330 17389 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:12:05.295334 17389 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:12:05.295339 17389 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:12:05.295343 17389 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:12:05.295348 17389 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:12:05.295353 17389 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:12:05.295358 17389 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:12:05.295363 17389 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:12:05.295368 17389 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:12:05.295374 17389 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:12:05.295379 17389 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:12:05.295390 17389 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:12:05.295397 17389 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:12:05.295402 17389 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:12:05.295405 17389 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:12:05.295411 17389 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:12:05.295416 17389 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:12:05.295421 17389 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:12:05.295426 17389 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:12:05.295431 17389 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:12:05.295436 17389 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:12:05.295441 17389 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:12:05.295447 17389 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:12:05.295452 17389 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:12:05.295457 17389 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:12:05.295462 17389 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:12:05.295467 17389 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:12:05.295475 17389 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:12:05.295481 17389 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:12:05.295486 17389 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:12:05.295491 17389 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:12:05.295503 17389 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:12:05.295508 17389 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:12:05.295513 17389 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:12:05.295519 17389 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:12:05.295524 17389 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:12:05.295529 17389 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:12:05.295536 17389 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:12:05.295541 17389 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:12:05.295545 17389 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:12:05.295550 17389 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:12:05.295555 17389 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:12:05.295560 17389 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:12:05.295567 17389 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:12:05.295572 17389 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:12:05.295577 17389 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:12:05.295581 17389 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:12:05.295588 17389 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:12:05.295593 17389 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:12:05.295598 17389 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:12:05.295603 17389 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:12:05.295608 17389 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:12:05.295614 17389 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:12:05.295619 17389 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:12:05.295624 17389 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:12:05.295629 17389 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:12:05.295634 17389 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:12:05.295639 17389 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:12:05.295652 17389 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:12:05.295657 17389 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:12:05.295662 17389 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:12:05.295670 17389 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:12:05.295675 17389 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:12:05.295680 17389 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:12:05.295684 17389 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:12:05.295691 17389 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:12:05.295696 17389 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:12:05.295701 17389 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:12:05.295706 17389 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:12:05.295711 17389 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:12:05.295717 17389 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:12:05.295722 17389 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:12:05.295727 17389 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:12:05.295732 17389 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:12:05.295737 17389 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:12:05.295742 17389 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:12:05.295747 17389 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:12:05.295753 17389 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:12:05.295758 17389 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:12:05.295763 17389 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:12:05.295769 17389 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:12:05.295774 17389 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:12:05.295780 17389 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:12:05.295785 17389 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:12:05.295791 17389 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:12:05.295796 17389 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:12:05.295801 17389 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:12:05.295806 17389 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:12:05.295812 17389 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:12:05.295817 17389 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:12:05.295824 17389 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:12:05.295828 17389 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:12:05.295838 17389 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:12:05.295845 17389 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:12:05.295850 17389 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:12:05.295855 17389 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:12:05.295861 17389 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:12:05.295866 17389 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:12:05.295871 17389 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:12:05.295876 17389 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:12:05.295882 17389 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:12:05.295887 17389 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:12:05.295893 17389 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:12:05.295898 17389 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:12:05.295903 17389 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:12:05.295913 17389 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:12:05.295919 17389 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:12:05.295924 17389 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:12:05.295930 17389 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:12:05.295935 17389 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:12:05.295941 17389 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:12:05.295946 17389 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:12:05.295953 17389 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:12:05.295958 17389 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:12:05.295963 17389 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:12:05.295969 17389 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:12:05.295974 17389 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:12:05.295979 17389 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:12:05.295984 17389 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:12:05.295989 17389 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:12:05.295994 17389 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:12:05.296000 17389 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:12:05.296006 17389 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:12:05.296011 17389 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:12:05.296016 17389 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:12:05.296022 17389 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:12:05.296027 17389 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:12:05.296033 17389 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:12:05.296038 17389 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:12:05.296044 17389 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:12:05.296049 17389 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:12:05.296056 17389 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:12:05.296061 17389 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:12:05.296066 17389 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:12:05.296072 17389 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:12:05.296077 17389 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:12:05.296082 17389 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:12:05.296087 17389 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:12:05.296093 17389 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:12:05.296098 17389 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:12:05.296104 17389 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:12:05.296110 17389 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:12:05.296116 17389 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:12:05.296121 17389 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:12:05.296126 17389 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:12:05.296133 17389 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:12:05.296138 17389 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:12:05.296142 17389 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:12:05.296149 17389 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:12:05.296154 17389 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:12:05.296159 17389 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:12:05.296165 17389 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:12:05.296175 17389 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:12:05.296181 17389 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:12:05.296187 17389 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:12:05.296193 17389 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:12:05.296198 17389 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:12:05.296205 17389 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:12:05.296210 17389 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:12:05.296214 17389 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:12:05.296221 17389 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:12:05.296226 17389 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:12:05.296231 17389 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:12:05.296236 17389 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:12:05.296241 17389 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:12:05.296248 17389 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:12:05.296253 17389 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:12:05.296258 17389 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:12:05.296264 17389 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:12:05.296269 17389 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:12:05.296274 17389 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:12:05.296280 17389 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:12:05.296285 17389 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:12:05.296291 17389 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:12:05.296296 17389 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:12:05.296303 17389 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:12:05.296308 17389 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:12:05.296314 17389 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:12:05.296319 17389 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:12:05.296324 17389 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:12:05.296329 17389 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:12:05.296335 17389 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:12:05.296341 17389 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:12:05.296346 17389 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:12:05.296352 17389 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:12:05.296358 17389 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:12:05.296363 17389 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:12:05.296370 17389 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:12:05.296375 17389 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:12:05.296380 17389 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:12:05.296386 17389 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:12:05.296391 17389 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:12:05.296396 17389 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:12:05.296402 17389 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:12:05.296408 17389 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:12:05.296414 17389 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:12:05.296419 17389 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:12:05.296425 17389 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:12:05.296430 17389 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:12:05.296437 17389 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:12:05.296445 17389 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:12:05.296452 17389 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:12:05.296458 17389 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:12:05.296463 17389 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:12:05.296468 17389 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:12:05.296475 17389 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:12:05.296480 17389 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:12:05.296486 17389 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:12:05.296492 17389 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:12:05.296504 17389 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:12:05.296509 17389 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:12:05.296515 17389 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:12:05.296525 17389 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:12:05.296530 17389 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:12:05.296536 17389 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:12:05.296542 17389 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:12:05.296548 17389 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:12:05.296553 17389 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:12:05.296560 17389 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:12:05.296566 17389 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:12:05.296571 17389 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:12:05.296576 17389 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:12:05.296582 17389 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:12:05.296588 17389 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:12:05.296593 17389 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:12:05.296600 17389 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:12:05.296607 17389 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:12:05.296612 17389 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:12:05.296617 17389 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:12:05.296623 17389 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:12:05.296628 17389 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:12:05.296634 17389 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:12:05.296640 17389 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:12:05.296645 17389 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:12:05.296651 17389 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:12:05.296658 17389 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:12:05.296663 17389 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:12:05.296669 17389 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:12:05.296674 17389 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:12:05.296680 17389 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:12:05.296685 17389 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:12:05.296691 17389 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:12:05.296697 17389 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:12:05.296703 17389 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:12:05.296708 17389 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:12:05.296715 17389 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:12:05.296720 17389 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:12:05.296733 17389 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:12:05.296739 17389 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:12:05.296746 17389 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:12:05.296751 17389 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:12:05.296756 17389 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:12:05.296762 17389 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:12:05.296768 17389 net.cpp:226] pre_relu needs backward computation.\nI0817 16:12:05.296773 17389 net.cpp:226] pre_scale needs backward computation.\nI0817 16:12:05.296778 17389 net.cpp:226] pre_bn needs backward computation.\nI0817 16:12:05.296784 17389 net.cpp:226] pre_conv needs backward computation.\nI0817 16:12:05.296792 17389 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:12:05.296797 17389 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:12:05.296802 17389 net.cpp:270] This network produces output accuracy\nI0817 16:12:05.296808 17389 net.cpp:270] This network produces output loss\nI0817 16:12:05.297135 17389 net.cpp:283] Network initialization done.\nI0817 16:12:05.298123 17389 solver.cpp:60] Solver scaffolding done.\nI0817 16:12:05.521611 17389 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:12:05.881561 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:05.881623 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:05.888593 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:06.122622 17389 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:06.122707 17389 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:06.157598 17389 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:06.157681 17389 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:06.597317 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:06.597394 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:06.605129 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:06.846953 17389 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:06.847091 17389 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:06.898422 17389 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:06.898556 17389 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:07.409073 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:07.409149 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:07.417909 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:07.685014 17389 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:07.685181 17389 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:07.757283 17389 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:07.757452 17389 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:07.842108 17389 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:12:08.319456 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:08.319526 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:08.329145 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:08.621110 17389 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:08.621268 17389 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:08.711936 17389 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:08.712091 17389 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:09.346900 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:09.346956 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:09.357379 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:09.667503 17389 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:09.667686 17389 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:09.779884 17389 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:09.780061 17389 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:10.481914 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:10.481964 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:10.493132 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:10.833395 17389 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:10.833608 17389 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:10.965350 17389 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:10.965559 17389 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:11.734303 17389 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:12:11.734359 17389 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:12:11.746484 17389 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:12:11.829638 17417 blocking_queue.cpp:50] Waiting for data\nI0817 16:12:11.909303 17408 blocking_queue.cpp:50] Waiting for data\nI0817 16:12:12.201844 17389 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:12:12.202121 17389 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:12:12.351305 17389 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:12:12.351567 17389 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:12:12.520368 17389 parallel.cpp:425] Starting Optimization\nI0817 16:12:12.521652 17389 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:12:12.521668 17389 solver.cpp:280] Learning Rate Policy: multistep\nI0817 16:12:12.525707 17389 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:13:33.662637 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01008\nI0817 16:13:33.662915 17389 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:13:37.683641 17389 solver.cpp:228] Iteration 0, loss = 7.50831\nI0817 16:13:37.683683 17389 solver.cpp:244]     Train net output #0: accuracy = 0.008\nI0817 16:13:37.683707 17389 solver.cpp:244]     Train net output #1: loss = 7.50831 (* 1 = 7.50831 loss)\nI0817 16:13:37.705570 17389 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0817 16:15:55.776700 17389 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:17:16.475668 17389 solver.cpp:404]     Test net output #0: accuracy = 0.0174\nI0817 16:17:16.475920 17389 solver.cpp:404]     Test net output #1: loss = 5.11742 (* 1 = 5.11742 loss)\nI0817 16:17:17.782204 17389 solver.cpp:228] Iteration 100, loss = 4.49584\nI0817 16:17:17.782250 17389 solver.cpp:244]     Train net output #0: accuracy = 0.024\nI0817 16:17:17.782276 17389 solver.cpp:244]     Train net output #1: loss = 4.49584 (* 1 = 4.49584 loss)\nI0817 16:17:17.876252 17389 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0817 16:19:35.756603 17389 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:20:56.364441 17389 solver.cpp:404]     Test net output #0: accuracy = 0.0208\nI0817 16:20:56.364673 17389 solver.cpp:404]     Test net output #1: loss = 4.58737 (* 1 = 4.58737 loss)\nI0817 16:20:57.670411 17389 solver.cpp:228] Iteration 200, loss = 4.37399\nI0817 16:20:57.670454 17389 solver.cpp:244]     Train net output #0: accuracy = 0.056\nI0817 16:20:57.670470 17389 solver.cpp:244]     Train net output #1: loss = 4.37399 (* 1 = 4.37399 loss)\nI0817 16:20:57.768946 17389 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0817 16:23:15.774849 17389 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:24:36.375341 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01464\nI0817 16:24:36.375573 17389 solver.cpp:404]     Test net output #1: loss = 4.55675 (* 1 = 4.55675 loss)\nI0817 16:24:37.680487 17389 solver.cpp:228] Iteration 300, loss = 4.10971\nI0817 16:24:37.680529 17389 solver.cpp:244]     Train net output #0: accuracy = 0.056\nI0817 16:24:37.680546 17389 solver.cpp:244]     Train net output #1: loss = 4.10971 (* 1 = 4.10971 loss)\nI0817 16:24:37.781458 17389 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0817 16:26:55.784099 17389 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:28:16.384999 17389 solver.cpp:404]     Test net output #0: accuracy = 0.0236\nI0817 16:28:16.385227 17389 solver.cpp:404]     Test net output #1: loss = 4.71505 (* 1 = 4.71505 loss)\nI0817 16:28:17.690732 17389 solver.cpp:228] Iteration 400, loss = 3.76533\nI0817 16:28:17.690773 17389 solver.cpp:244]     Train net output #0: accuracy = 0.104\nI0817 16:28:17.690790 17389 solver.cpp:244]     Train net output #1: loss = 3.76533 (* 1 = 3.76533 loss)\nI0817 16:28:17.786463 17389 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0817 16:30:35.765229 17389 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:31:56.364781 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01584\nI0817 16:31:56.365015 17389 solver.cpp:404]     Test net output #1: loss = 5.40832 (* 1 = 5.40832 loss)\nI0817 16:31:57.670871 17389 solver.cpp:228] Iteration 500, loss = 3.36344\nI0817 16:31:57.670913 17389 solver.cpp:244]     Train net output #0: accuracy = 0.184\nI0817 16:31:57.670930 17389 solver.cpp:244]     Train net output #1: loss = 3.36344 (* 1 = 3.36344 loss)\nI0817 16:31:57.769003 17389 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0817 16:34:15.787906 17389 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:35:36.390323 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01328\nI0817 16:35:36.390571 17389 solver.cpp:404]     Test net output #1: loss = 6.37108 (* 1 = 6.37108 loss)\nI0817 16:35:37.697036 17389 solver.cpp:228] Iteration 600, loss = 3.05674\nI0817 16:35:37.697078 17389 solver.cpp:244]     Train net output #0: accuracy = 0.248\nI0817 16:35:37.697095 17389 solver.cpp:244]     Train net output #1: loss = 3.05674 (* 1 = 3.05674 loss)\nI0817 16:35:37.794015 17389 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0817 16:37:55.795436 17389 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:39:16.400178 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01968\nI0817 16:39:16.400405 17389 solver.cpp:404]     Test net output #1: loss = 5.36808 (* 1 = 5.36808 loss)\nI0817 16:39:17.706804 17389 solver.cpp:228] Iteration 700, loss = 2.73918\nI0817 16:39:17.706846 17389 solver.cpp:244]     Train net output #0: accuracy = 0.296\nI0817 16:39:17.706862 17389 solver.cpp:244]     Train net output #1: loss = 2.73918 (* 1 = 2.73918 loss)\nI0817 16:39:17.803426 17389 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0817 16:41:35.761533 17389 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:42:56.355957 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01384\nI0817 16:42:56.356209 17389 solver.cpp:404]     Test net output #1: loss = 5.8808 (* 1 = 5.8808 loss)\nI0817 16:42:57.663069 17389 solver.cpp:228] Iteration 800, loss = 2.67953\nI0817 16:42:57.663110 17389 solver.cpp:244]     Train net output #0: accuracy = 0.328\nI0817 16:42:57.663126 17389 solver.cpp:244]     Train net output #1: loss = 2.67953 (* 1 = 2.67953 loss)\nI0817 16:42:57.759519 17389 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0817 16:45:15.774258 17389 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:46:36.377095 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01548\nI0817 16:46:36.377341 17389 solver.cpp:404]     Test net output #1: loss = 5.93219 (* 1 = 5.93219 loss)\nI0817 16:46:37.684022 17389 solver.cpp:228] Iteration 900, loss = 2.39115\nI0817 16:46:37.684064 17389 solver.cpp:244]     Train net output #0: accuracy = 0.32\nI0817 16:46:37.684082 17389 solver.cpp:244]     Train net output #1: loss = 2.39115 (* 1 = 2.39115 loss)\nI0817 16:46:37.777937 17389 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0817 16:48:55.876639 17389 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:50:16.477902 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01528\nI0817 16:50:16.478130 17389 solver.cpp:404]     Test net output #1: loss = 6.30307 (* 1 = 6.30307 loss)\nI0817 16:50:17.785171 17389 solver.cpp:228] Iteration 1000, loss = 2.13017\nI0817 16:50:17.785213 17389 solver.cpp:244]     Train net output #0: accuracy = 0.432\nI0817 16:50:17.785229 17389 solver.cpp:244]     Train net output #1: loss = 2.13017 (* 1 = 2.13017 loss)\nI0817 16:50:17.877233 17389 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0817 16:52:35.858897 17389 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:53:56.458472 17389 solver.cpp:404]     Test net output #0: accuracy = 0.0152\nI0817 16:53:56.458729 17389 solver.cpp:404]     Test net output #1: loss = 6.33402 (* 1 = 6.33402 loss)\nI0817 16:53:57.765321 17389 solver.cpp:228] Iteration 1100, loss = 2.15043\nI0817 16:53:57.765360 17389 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI0817 16:53:57.765377 17389 solver.cpp:244]     Train net output #1: loss = 2.15043 (* 1 = 2.15043 loss)\nI0817 16:53:57.862074 17389 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0817 16:56:15.879751 17389 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:57:36.477772 17389 solver.cpp:404]     Test net output #0: accuracy = 0.01672\nI0817 16:57:36.478003 17389 solver.cpp:404]     Test net output #1: loss = 6.07717 (* 1 = 6.07717 loss)\nI0817 16:57:37.784411 17389 solver.cpp:228] Iteration 1200, loss = 1.89671\nI0817 16:57:37.784453 17389 solver.cpp:244]     Train net output #0: accuracy = 0.472\nI0817 16:57:37.784469 17389 solver.cpp:244]     Train net output #1: loss = 1.89671 (* 1 = 1.89671 loss)\nI0817 16:57:37.876600 17389 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0817 16:59:55.905884 17389 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 17:01:16.602932 17389 solver.cpp:404]     Test net output #0: accuracy = 0.027\nI0817 17:01:16.603184 17389 solver.cpp:404]     Test net output #1: loss = 6.01425 (* 1 = 6.01425 loss)\nI0817 17:01:17.908900 17389 solver.cpp:228] Iteration 1300, loss = 1.82427\nI0817 17:01:17.908944 17389 solver.cpp:244]     Train net output #0: accuracy = 0.488\nI0817 17:01:17.908962 17389 solver.cpp:244]     Train net output #1: loss = 1.82427 (* 1 = 1.82427 loss)\nI0817 17:01:18.000248 17389 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0817 17:03:36.008049 17389 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:04:56.711817 17389 solver.cpp:404]     Test net output #0: accuracy = 0.02732\nI0817 17:04:56.712047 17389 solver.cpp:404]     Test net output #1: loss = 5.89007 (* 1 = 5.89007 loss)\nI0817 17:04:58.017781 17389 solver.cpp:228] Iteration 1400, loss = 1.59863\nI0817 17:04:58.017824 17389 solver.cpp:244]     Train net output #0: accuracy = 0.544\nI0817 17:04:58.017841 17389 solver.cpp:244]     Train net output #1: loss = 1.59863 (* 1 = 1.59863 loss)\nI0817 17:04:58.116650 17389 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0817 17:07:16.109199 17389 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:08:36.809818 17389 solver.cpp:404]     Test net output #0: accuracy = 0.03808\nI0817 17:08:36.810061 17389 solver.cpp:404]     Test net output #1: loss = 5.40935 (* 1 = 5.40935 loss)\nI0817 17:08:38.116973 17389 solver.cpp:228] Iteration 1500, loss = 1.57191\nI0817 17:08:38.117017 17389 solver.cpp:244]     Train net output #0: accuracy = 0.512\nI0817 17:08:38.117033 17389 solver.cpp:244]     Train net output #1: loss = 1.57191 (* 1 = 1.57191 loss)\nI0817 17:08:38.213498 17389 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0817 17:10:56.243485 17389 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:12:16.893396 17389 solver.cpp:404]     Test net output #0: accuracy = 0.02596\nI0817 17:12:16.893651 17389 solver.cpp:404]     Test net output #1: loss = 6.49456 (* 1 = 6.49456 loss)\nI0817 17:12:18.199338 17389 solver.cpp:228] Iteration 1600, loss = 1.49425\nI0817 17:12:18.199383 17389 solver.cpp:244]     Train net output #0: accuracy = 0.552\nI0817 17:12:18.199399 17389 solver.cpp:244]     Train net output #1: loss = 1.49425 (* 1 = 1.49425 loss)\nI0817 17:12:18.298209 17389 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0817 17:14:36.286450 17389 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:15:57.009739 17389 solver.cpp:404]     Test net output #0: accuracy = 0.04072\nI0817 17:15:57.009987 17389 solver.cpp:404]     Test net output #1: loss = 5.88059 (* 1 = 5.88059 loss)\nI0817 17:15:58.315902 17389 solver.cpp:228] Iteration 1700, loss = 1.44995\nI0817 17:15:58.315949 17389 solver.cpp:244]     Train net output #0: accuracy = 0.576\nI0817 17:15:58.315973 17389 solver.cpp:244]     Train net output #1: loss = 1.44995 (* 1 = 1.44995 loss)\nI0817 17:15:58.416784 17389 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0817 17:18:16.509363 17389 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:19:37.205662 17389 solver.cpp:404]     Test net output #0: accuracy = 0.07012\nI0817 17:19:37.205909 17389 solver.cpp:404]     Test net output #1: loss = 5.10975 (* 1 = 5.10975 loss)\nI0817 17:19:38.511291 17389 solver.cpp:228] Iteration 1800, loss = 1.30621\nI0817 17:19:38.511337 17389 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI0817 17:19:38.511361 17389 solver.cpp:244]     Train net output #1: loss = 1.30621 (* 1 = 1.30621 loss)\nI0817 17:19:38.606223 17389 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0817 17:21:56.703483 17389 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:23:17.343729 17389 solver.cpp:404]     Test net output #0: accuracy = 0.051\nI0817 17:23:17.343992 17389 solver.cpp:404]     Test net output #1: loss = 5.77071 (* 1 = 5.77071 loss)\nI0817 17:23:18.650110 17389 solver.cpp:228] Iteration 1900, loss = 1.26445\nI0817 17:23:18.650156 17389 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0817 17:23:18.650179 17389 solver.cpp:244]     Train net output #1: loss = 1.26445 (* 1 = 1.26445 loss)\nI0817 17:23:18.746249 17389 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0817 17:25:36.750352 17389 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:26:57.361205 17389 solver.cpp:404]     Test net output #0: accuracy = 0.06896\nI0817 17:26:57.361438 17389 solver.cpp:404]     Test net output #1: loss = 5.49546 (* 1 = 5.49546 loss)\nI0817 17:26:58.669929 17389 solver.cpp:228] Iteration 2000, loss = 1.27832\nI0817 17:26:58.669977 17389 solver.cpp:244]     Train net output #0: accuracy = 0.616\nI0817 17:26:58.670002 17389 solver.cpp:244]     Train net output #1: loss = 1.27832 (* 1 = 1.27832 loss)\nI0817 17:26:58.762584 17389 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0817 17:29:16.780205 17389 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:30:37.492694 17389 solver.cpp:404]     Test net output #0: accuracy = 0.1156\nI0817 17:30:37.492928 17389 solver.cpp:404]     Test net output #1: loss = 4.61358 (* 1 = 4.61358 loss)\nI0817 17:30:38.802338 17389 solver.cpp:228] Iteration 2100, loss = 1.28371\nI0817 17:30:38.802387 17389 solver.cpp:244]     Train net output #0: accuracy = 0.6\nI0817 17:30:38.802412 17389 solver.cpp:244]     Train net output #1: loss = 1.28371 (* 1 = 1.28371 loss)\nI0817 17:30:38.890293 17389 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0817 17:32:56.889230 17389 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:34:17.538151 17389 solver.cpp:404]     Test net output #0: accuracy = 0.12524\nI0817 17:34:17.538380 17389 solver.cpp:404]     Test net output #1: loss = 4.71326 (* 1 = 4.71326 loss)\nI0817 17:34:18.844671 17389 solver.cpp:228] Iteration 2200, loss = 1.06937\nI0817 17:34:18.844715 17389 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 17:34:18.844733 17389 solver.cpp:244]     Train net output #1: loss = 1.06937 (* 1 = 1.06937 loss)\nI0817 17:34:18.937877 17389 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0817 17:36:36.956086 17389 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:37:57.609194 17389 solver.cpp:404]     Test net output #0: accuracy = 0.12212\nI0817 17:37:57.609457 17389 solver.cpp:404]     Test net output #1: loss = 5.02833 (* 1 = 5.02833 loss)\nI0817 17:37:58.915823 17389 solver.cpp:228] Iteration 2300, loss = 1.02137\nI0817 17:37:58.915865 17389 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0817 17:37:58.915882 17389 solver.cpp:244]     Train net output #1: loss = 1.02137 (* 1 = 1.02137 loss)\nI0817 17:37:59.012361 17389 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0817 17:40:17.147280 17389 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:41:37.786535 17389 solver.cpp:404]     Test net output #0: accuracy = 0.12716\nI0817 17:41:37.786774 17389 solver.cpp:404]     Test net output #1: loss = 4.88589 (* 1 = 4.88589 loss)\nI0817 17:41:39.093379 17389 solver.cpp:228] Iteration 2400, loss = 0.938286\nI0817 17:41:39.093421 17389 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 17:41:39.093438 17389 solver.cpp:244]     Train net output #1: loss = 0.938286 (* 1 = 0.938286 loss)\nI0817 17:41:39.191583 17389 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0817 17:43:57.277290 17389 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:45:17.976239 17389 solver.cpp:404]     Test net output #0: accuracy = 0.16496\nI0817 17:45:17.976490 17389 solver.cpp:404]     Test net output #1: loss = 4.64544 (* 1 = 4.64544 loss)\nI0817 17:45:19.283390 17389 solver.cpp:228] Iteration 2500, loss = 1.00353\nI0817 17:45:19.283432 17389 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0817 17:45:19.283449 17389 solver.cpp:244]     Train net output #1: loss = 1.00353 (* 1 = 1.00353 loss)\nI0817 17:45:19.374032 17389 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0817 17:47:37.391170 17389 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:48:58.043505 17389 solver.cpp:404]     Test net output #0: accuracy = 0.14472\nI0817 17:48:58.043743 17389 solver.cpp:404]     Test net output #1: loss = 5.71591 (* 1 = 5.71591 loss)\nI0817 17:48:59.349230 17389 solver.cpp:228] Iteration 2600, loss = 1.01837\nI0817 17:48:59.349272 17389 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 17:48:59.349289 17389 solver.cpp:244]     Train net output #1: loss = 1.01837 (* 1 = 1.01837 loss)\nI0817 17:48:59.441164 17389 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0817 17:51:17.477391 17389 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:52:38.124598 17389 solver.cpp:404]     Test net output #0: accuracy = 0.17768\nI0817 17:52:38.124857 17389 solver.cpp:404]     Test net output #1: loss = 5.49392 (* 1 = 5.49392 loss)\nI0817 17:52:39.431033 17389 solver.cpp:228] Iteration 2700, loss = 1.01584\nI0817 17:52:39.431077 17389 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 17:52:39.431093 17389 solver.cpp:244]     Train net output #1: loss = 1.01584 (* 1 = 1.01584 loss)\nI0817 17:52:39.522614 17389 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0817 17:54:57.693521 17389 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:56:18.299834 17389 solver.cpp:404]     Test net output #0: accuracy = 0.13808\nI0817 17:56:18.300086 17389 solver.cpp:404]     Test net output #1: loss = 6.3273 (* 1 = 6.3273 loss)\nI0817 17:56:19.606541 17389 solver.cpp:228] Iteration 2800, loss = 0.965305\nI0817 17:56:19.606588 17389 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 17:56:19.606613 17389 solver.cpp:244]     Train net output #1: loss = 0.965305 (* 1 = 0.965305 loss)\nI0817 17:56:19.704861 17389 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0817 17:58:38.043107 17389 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:59:58.679409 17389 solver.cpp:404]     Test net output #0: accuracy = 0.15484\nI0817 17:59:58.679652 17389 solver.cpp:404]     Test net output #1: loss = 5.96544 (* 1 = 5.96544 loss)\nI0817 17:59:59.985586 17389 solver.cpp:228] Iteration 2900, loss = 0.939784\nI0817 17:59:59.985630 17389 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 17:59:59.985654 17389 solver.cpp:244]     Train net output #1: loss = 0.939784 (* 1 = 0.939784 loss)\nI0817 18:00:00.089929 17389 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0817 18:02:18.335455 17389 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 18:03:39.037083 17389 solver.cpp:404]     Test net output #0: accuracy = 0.155\nI0817 18:03:39.037340 17389 solver.cpp:404]     Test net output #1: loss = 5.75513 (* 1 = 5.75513 loss)\nI0817 18:03:40.344063 17389 solver.cpp:228] Iteration 3000, loss = 0.893428\nI0817 18:03:40.344110 17389 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 18:03:40.344133 17389 solver.cpp:244]     Train net output #1: loss = 0.893428 (* 1 = 0.893428 loss)\nI0817 18:03:40.436692 17389 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0817 18:05:58.631095 17389 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:07:19.247020 17389 solver.cpp:404]     Test net output #0: accuracy = 0.15204\nI0817 18:07:19.247277 17389 solver.cpp:404]     Test net output #1: loss = 5.69217 (* 1 = 5.69217 loss)\nI0817 18:07:20.553128 17389 solver.cpp:228] Iteration 3100, loss = 0.838302\nI0817 18:07:20.553174 17389 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 18:07:20.553196 17389 solver.cpp:244]     Train net output #1: loss = 0.838302 (* 1 = 0.838302 loss)\nI0817 18:07:20.647370 17389 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0817 18:09:38.883733 17389 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:10:59.510350 17389 solver.cpp:404]     Test net output #0: accuracy = 0.14264\nI0817 18:10:59.510597 17389 solver.cpp:404]     Test net output #1: loss = 6.3039 (* 1 = 6.3039 loss)\nI0817 18:11:00.816016 17389 solver.cpp:228] Iteration 3200, loss = 0.76406\nI0817 18:11:00.816061 17389 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 18:11:00.816085 17389 solver.cpp:244]     Train net output #1: loss = 0.76406 (* 1 = 0.76406 loss)\nI0817 18:11:00.909737 17389 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0817 18:13:19.119904 17389 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:14:39.769003 17389 solver.cpp:404]     Test net output #0: accuracy = 0.166\nI0817 18:14:39.769259 17389 solver.cpp:404]     Test net output #1: loss = 6.12494 (* 1 = 6.12494 loss)\nI0817 18:14:41.075469 17389 solver.cpp:228] Iteration 3300, loss = 0.830797\nI0817 18:14:41.075513 17389 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 18:14:41.075537 17389 solver.cpp:244]     Train net output #1: loss = 0.830797 (* 1 = 0.830797 loss)\nI0817 18:14:41.167434 17389 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0817 18:16:59.347910 17389 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:18:20.033577 17389 solver.cpp:404]     Test net output #0: accuracy = 0.18624\nI0817 18:18:20.033838 17389 solver.cpp:404]     Test net output #1: loss = 6.10172 (* 1 = 6.10172 loss)\nI0817 18:18:21.340819 17389 solver.cpp:228] Iteration 3400, loss = 0.642088\nI0817 18:18:21.340863 17389 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 18:18:21.340888 17389 solver.cpp:244]     Train net output #1: loss = 0.642088 (* 1 = 0.642088 loss)\nI0817 18:18:21.432199 17389 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0817 18:20:39.695245 17389 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:22:00.402345 17389 solver.cpp:404]     Test net output #0: accuracy = 0.17984\nI0817 18:22:00.402590 17389 solver.cpp:404]     Test net output #1: loss = 6.45975 (* 1 = 6.45975 loss)\nI0817 18:22:01.710336 17389 solver.cpp:228] Iteration 3500, loss = 0.823426\nI0817 18:22:01.710382 17389 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 18:22:01.710405 17389 solver.cpp:244]     Train net output #1: loss = 0.823426 (* 1 = 0.823426 loss)\nI0817 18:22:01.804627 17389 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0817 18:24:20.150900 17389 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:25:40.890223 17389 solver.cpp:404]     Test net output #0: accuracy = 0.1448\nI0817 18:25:40.890486 17389 solver.cpp:404]     Test net output #1: loss = 8.12522 (* 1 = 8.12522 loss)\nI0817 18:25:42.196295 17389 solver.cpp:228] Iteration 3600, loss = 0.948223\nI0817 18:25:42.196341 17389 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 18:25:42.196364 17389 solver.cpp:244]     Train net output #1: loss = 0.948223 (* 1 = 0.948223 loss)\nI0817 18:25:42.290194 17389 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0817 18:28:00.522786 17389 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:29:21.298291 17389 solver.cpp:404]     Test net output #0: accuracy = 0.158\nI0817 18:29:21.298548 17389 solver.cpp:404]     Test net output #1: loss = 6.68601 (* 1 = 6.68601 loss)\nI0817 18:29:22.605520 17389 solver.cpp:228] Iteration 3700, loss = 0.743047\nI0817 18:29:22.605568 17389 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 18:29:22.605592 17389 solver.cpp:244]     Train net output #1: loss = 0.743047 (* 1 = 0.743047 loss)\nI0817 18:29:22.697194 17389 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0817 18:31:41.021019 17389 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:33:01.754703 17389 solver.cpp:404]     Test net output #0: accuracy = 0.16352\nI0817 18:33:01.754953 17389 solver.cpp:404]     Test net output #1: loss = 7.61649 (* 1 = 7.61649 loss)\nI0817 18:33:03.061036 17389 solver.cpp:228] Iteration 3800, loss = 0.789466\nI0817 18:33:03.061084 17389 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 18:33:03.061107 17389 solver.cpp:244]     Train net output #1: loss = 0.789466 (* 1 = 0.789466 loss)\nI0817 18:33:03.153704 17389 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0817 18:35:21.426404 17389 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:36:42.156983 17389 solver.cpp:404]     Test net output #0: accuracy = 0.19764\nI0817 18:36:42.157232 17389 solver.cpp:404]     Test net output #1: loss = 6.14012 (* 1 = 6.14012 loss)\nI0817 18:36:43.463950 17389 solver.cpp:228] Iteration 3900, loss = 0.654821\nI0817 18:36:43.463996 17389 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 18:36:43.464021 17389 solver.cpp:244]     Train net output #1: loss = 0.654821 (* 1 = 0.654821 loss)\nI0817 18:36:43.558353 17389 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0817 18:39:01.826700 17389 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:40:22.527066 17389 solver.cpp:404]     Test net output #0: accuracy = 0.21992\nI0817 18:40:22.527289 17389 solver.cpp:404]     Test net output #1: loss = 6.39243 (* 1 = 6.39243 loss)\nI0817 18:40:23.833878 17389 solver.cpp:228] Iteration 4000, loss = 0.677491\nI0817 18:40:23.833923 17389 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 18:40:23.833948 17389 solver.cpp:244]     Train net output #1: loss = 0.677491 (* 1 = 0.677491 loss)\nI0817 18:40:23.927399 17389 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0817 18:42:42.193085 17389 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:44:02.852672 17389 solver.cpp:404]     Test net output #0: accuracy = 0.23856\nI0817 18:44:02.852913 17389 solver.cpp:404]     Test net output #1: loss = 5.76734 (* 1 = 5.76734 loss)\nI0817 18:44:04.162091 17389 solver.cpp:228] Iteration 4100, loss = 0.755966\nI0817 18:44:04.162135 17389 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 18:44:04.162158 17389 solver.cpp:244]     Train net output #1: loss = 0.755966 (* 1 = 0.755966 loss)\nI0817 18:44:04.258848 17389 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0817 18:46:22.546025 17389 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:47:43.252405 17389 solver.cpp:404]     Test net output #0: accuracy = 0.27424\nI0817 18:47:43.252652 17389 solver.cpp:404]     Test net output #1: loss = 5.07853 (* 1 = 5.07853 loss)\nI0817 18:47:44.559316 17389 solver.cpp:228] Iteration 4200, loss = 0.657524\nI0817 18:47:44.559362 17389 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 18:47:44.559386 17389 solver.cpp:244]     Train net output #1: loss = 0.657524 (* 1 = 0.657524 loss)\nI0817 18:47:44.652492 17389 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0817 18:50:02.880508 17389 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:51:23.616035 17389 solver.cpp:404]     Test net output #0: accuracy = 0.2566\nI0817 18:51:23.616273 17389 solver.cpp:404]     Test net output #1: loss = 5.04419 (* 1 = 5.04419 loss)\nI0817 18:51:24.922421 17389 solver.cpp:228] Iteration 4300, loss = 0.501968\nI0817 18:51:24.922466 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 18:51:24.922490 17389 solver.cpp:244]     Train net output #1: loss = 0.501968 (* 1 = 0.501968 loss)\nI0817 18:51:25.017796 17389 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0817 18:53:43.261562 17389 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:55:03.979243 17389 solver.cpp:404]     Test net output #0: accuracy = 0.288\nI0817 18:55:03.979476 17389 solver.cpp:404]     Test net output #1: loss = 5.16918 (* 1 = 5.16918 loss)\nI0817 18:55:05.286769 17389 solver.cpp:228] Iteration 4400, loss = 0.495756\nI0817 18:55:05.286815 17389 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 18:55:05.286839 17389 solver.cpp:244]     Train net output #1: loss = 0.495756 (* 1 = 0.495756 loss)\nI0817 18:55:05.384793 17389 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0817 18:57:23.652405 17389 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 18:58:44.375679 17389 solver.cpp:404]     Test net output #0: accuracy = 0.311\nI0817 18:58:44.375929 17389 solver.cpp:404]     Test net output #1: loss = 5.12166 (* 1 = 5.12166 loss)\nI0817 18:58:45.683292 17389 solver.cpp:228] Iteration 4500, loss = 0.557344\nI0817 18:58:45.683336 17389 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 18:58:45.683360 17389 solver.cpp:244]     Train net output #1: loss = 0.557344 (* 1 = 0.557344 loss)\nI0817 18:58:45.775506 17389 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0817 19:01:04.021562 17389 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 19:02:24.640856 17389 solver.cpp:404]     Test net output #0: accuracy = 0.334\nI0817 19:02:24.641095 17389 solver.cpp:404]     Test net output #1: loss = 3.99835 (* 1 = 3.99835 loss)\nI0817 19:02:25.948467 17389 solver.cpp:228] Iteration 4600, loss = 0.433122\nI0817 19:02:25.948508 17389 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 19:02:25.948523 17389 solver.cpp:244]     Train net output #1: loss = 0.433122 (* 1 = 0.433122 loss)\nI0817 19:02:26.043848 17389 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0817 19:04:44.512367 17389 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:06:05.126495 17389 solver.cpp:404]     Test net output #0: accuracy = 0.33252\nI0817 19:06:05.126750 17389 solver.cpp:404]     Test net output #1: loss = 4.19749 (* 1 = 4.19749 loss)\nI0817 19:06:06.432790 17389 solver.cpp:228] Iteration 4700, loss = 0.619488\nI0817 19:06:06.432831 17389 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 19:06:06.432845 17389 solver.cpp:244]     Train net output #1: loss = 0.619488 (* 1 = 0.619488 loss)\nI0817 19:06:06.532325 17389 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0817 19:08:24.815428 17389 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:09:45.435756 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34956\nI0817 19:09:45.436105 17389 solver.cpp:404]     Test net output #1: loss = 3.99437 (* 1 = 3.99437 loss)\nI0817 19:09:46.742069 17389 solver.cpp:228] Iteration 4800, loss = 0.519757\nI0817 19:09:46.742110 17389 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 19:09:46.742125 17389 solver.cpp:244]     Train net output #1: loss = 0.519757 (* 1 = 0.519757 loss)\nI0817 19:09:46.839349 17389 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0817 19:12:05.157873 17389 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:13:25.768779 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34892\nI0817 19:13:25.769047 17389 solver.cpp:404]     Test net output #1: loss = 3.95676 (* 1 = 3.95676 loss)\nI0817 19:13:27.075224 17389 solver.cpp:228] Iteration 4900, loss = 0.42604\nI0817 19:13:27.075264 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 19:13:27.075280 17389 solver.cpp:244]     Train net output #1: loss = 0.42604 (* 1 = 0.42604 loss)\nI0817 19:13:27.169318 17389 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0817 19:15:45.617231 17389 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:17:06.230854 17389 solver.cpp:404]     Test net output #0: accuracy = 0.33692\nI0817 19:17:06.231084 17389 solver.cpp:404]     Test net output #1: loss = 4.65924 (* 1 = 4.65924 loss)\nI0817 19:17:07.537178 17389 solver.cpp:228] Iteration 5000, loss = 0.490201\nI0817 19:17:07.537219 17389 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 19:17:07.537233 17389 solver.cpp:244]     Train net output #1: loss = 0.490201 (* 1 = 0.490201 loss)\nI0817 19:17:07.637037 17389 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0817 19:19:25.950984 17389 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:20:46.558516 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3142\nI0817 19:20:46.558789 17389 solver.cpp:404]     Test net output #1: loss = 4.96701 (* 1 = 4.96701 loss)\nI0817 19:20:47.864585 17389 solver.cpp:228] Iteration 5100, loss = 0.553375\nI0817 19:20:47.864624 17389 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 19:20:47.864640 17389 solver.cpp:244]     Train net output #1: loss = 0.553374 (* 1 = 0.553374 loss)\nI0817 19:20:47.959499 17389 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0817 19:23:06.201687 17389 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:24:26.805349 17389 solver.cpp:404]     Test net output #0: accuracy = 0.35944\nI0817 19:24:26.805608 17389 solver.cpp:404]     Test net output #1: loss = 4.00719 (* 1 = 4.00719 loss)\nI0817 19:24:28.111773 17389 solver.cpp:228] Iteration 5200, loss = 0.423486\nI0817 19:24:28.111812 17389 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 19:24:28.111829 17389 solver.cpp:244]     Train net output #1: loss = 0.423486 (* 1 = 0.423486 loss)\nI0817 19:24:28.206650 17389 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0817 19:26:46.567883 17389 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:28:07.176975 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37212\nI0817 19:28:07.177213 17389 solver.cpp:404]     Test net output #1: loss = 3.81218 (* 1 = 3.81218 loss)\nI0817 19:28:08.483243 17389 solver.cpp:228] Iteration 5300, loss = 0.506615\nI0817 19:28:08.483281 17389 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 19:28:08.483297 17389 solver.cpp:244]     Train net output #1: loss = 0.506615 (* 1 = 0.506615 loss)\nI0817 19:28:08.581048 17389 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0817 19:30:26.805392 17389 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:31:47.419253 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3698\nI0817 19:31:47.419492 17389 solver.cpp:404]     Test net output #1: loss = 4.06308 (* 1 = 4.06308 loss)\nI0817 19:31:48.726107 17389 solver.cpp:228] Iteration 5400, loss = 0.521006\nI0817 19:31:48.726147 17389 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 19:31:48.726164 17389 solver.cpp:244]     Train net output #1: loss = 0.521006 (* 1 = 0.521006 loss)\nI0817 19:31:48.821390 17389 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0817 19:34:07.037026 17389 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:35:27.648515 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34172\nI0817 19:35:27.648759 17389 solver.cpp:404]     Test net output #1: loss = 4.24047 (* 1 = 4.24047 loss)\nI0817 19:35:28.954306 17389 solver.cpp:228] Iteration 5500, loss = 0.359894\nI0817 19:35:28.954346 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:35:28.954362 17389 solver.cpp:244]     Train net output #1: loss = 0.359894 (* 1 = 0.359894 loss)\nI0817 19:35:29.051576 17389 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0817 19:37:47.353976 17389 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:39:07.964334 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36\nI0817 19:39:07.964586 17389 solver.cpp:404]     Test net output #1: loss = 4.25401 (* 1 = 4.25401 loss)\nI0817 19:39:09.270980 17389 solver.cpp:228] Iteration 5600, loss = 0.582402\nI0817 19:39:09.271020 17389 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 19:39:09.271036 17389 solver.cpp:244]     Train net output #1: loss = 0.582402 (* 1 = 0.582402 loss)\nI0817 19:39:09.363804 17389 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0817 19:41:27.607429 17389 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:42:48.225757 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36532\nI0817 19:42:48.226007 17389 solver.cpp:404]     Test net output #1: loss = 3.97135 (* 1 = 3.97135 loss)\nI0817 19:42:49.531874 17389 solver.cpp:228] Iteration 5700, loss = 0.351405\nI0817 19:42:49.531915 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 19:42:49.531931 17389 solver.cpp:244]     Train net output #1: loss = 0.351405 (* 1 = 0.351405 loss)\nI0817 19:42:49.624243 17389 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0817 19:45:07.978595 17389 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:46:28.589038 17389 solver.cpp:404]     Test net output #0: accuracy = 0.35684\nI0817 19:46:28.589278 17389 solver.cpp:404]     Test net output #1: loss = 4.20888 (* 1 = 4.20888 loss)\nI0817 19:46:29.894945 17389 solver.cpp:228] Iteration 5800, loss = 0.373031\nI0817 19:46:29.894987 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 19:46:29.895004 17389 solver.cpp:244]     Train net output #1: loss = 0.373031 (* 1 = 0.373031 loss)\nI0817 19:46:29.989689 17389 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0817 19:48:48.322144 17389 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:50:08.937247 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3668\nI0817 19:50:08.937486 17389 solver.cpp:404]     Test net output #1: loss = 3.88953 (* 1 = 3.88953 loss)\nI0817 19:50:10.243793 17389 solver.cpp:228] Iteration 5900, loss = 0.391097\nI0817 19:50:10.243836 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 19:50:10.243852 17389 solver.cpp:244]     Train net output #1: loss = 0.391097 (* 1 = 0.391097 loss)\nI0817 19:50:10.342041 17389 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0817 19:52:28.778681 17389 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:53:49.394817 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34196\nI0817 19:53:49.395056 17389 solver.cpp:404]     Test net output #1: loss = 4.07993 (* 1 = 4.07993 loss)\nI0817 19:53:50.701683 17389 solver.cpp:228] Iteration 6000, loss = 0.304464\nI0817 19:53:50.701725 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:53:50.701740 17389 solver.cpp:244]     Train net output #1: loss = 0.304464 (* 1 = 0.304464 loss)\nI0817 19:53:50.797530 17389 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0817 19:56:09.094591 17389 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:57:29.705128 17389 solver.cpp:404]     Test net output #0: accuracy = 0.30644\nI0817 19:57:29.705374 17389 solver.cpp:404]     Test net output #1: loss = 5.03431 (* 1 = 5.03431 loss)\nI0817 19:57:31.012091 17389 solver.cpp:228] Iteration 6100, loss = 0.548423\nI0817 19:57:31.012133 17389 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 19:57:31.012151 17389 solver.cpp:244]     Train net output #1: loss = 0.548423 (* 1 = 0.548423 loss)\nI0817 19:57:31.104264 17389 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0817 19:59:49.374943 17389 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 20:01:09.987723 17389 solver.cpp:404]     Test net output #0: accuracy = 0.33376\nI0817 20:01:09.987972 17389 solver.cpp:404]     Test net output #1: loss = 4.05696 (* 1 = 4.05696 loss)\nI0817 20:01:11.294782 17389 solver.cpp:228] Iteration 6200, loss = 0.390916\nI0817 20:01:11.294824 17389 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 20:01:11.294842 17389 solver.cpp:244]     Train net output #1: loss = 0.390916 (* 1 = 0.390916 loss)\nI0817 20:01:11.394876 17389 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0817 20:03:29.708784 17389 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:04:50.442036 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39764\nI0817 20:04:50.442282 17389 solver.cpp:404]     Test net output #1: loss = 3.42624 (* 1 = 3.42624 loss)\nI0817 20:04:51.748364 17389 solver.cpp:228] Iteration 6300, loss = 0.28446\nI0817 20:04:51.748410 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 20:04:51.748435 17389 solver.cpp:244]     Train net output #1: loss = 0.28446 (* 1 = 0.28446 loss)\nI0817 20:04:51.846674 17389 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0817 20:07:10.241545 17389 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:08:30.886472 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36272\nI0817 20:08:30.886734 17389 solver.cpp:404]     Test net output #1: loss = 3.81591 (* 1 = 3.81591 loss)\nI0817 20:08:32.193725 17389 solver.cpp:228] Iteration 6400, loss = 0.286801\nI0817 20:08:32.193776 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 20:08:32.193801 17389 solver.cpp:244]     Train net output #1: loss = 0.286801 (* 1 = 0.286801 loss)\nI0817 20:08:32.288318 17389 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0817 20:10:50.701184 17389 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:12:11.337394 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36172\nI0817 20:12:11.337631 17389 solver.cpp:404]     Test net output #1: loss = 3.96987 (* 1 = 3.96987 loss)\nI0817 20:12:12.644418 17389 solver.cpp:228] Iteration 6500, loss = 0.338069\nI0817 20:12:12.644464 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 20:12:12.644487 17389 solver.cpp:244]     Train net output #1: loss = 0.338069 (* 1 = 0.338069 loss)\nI0817 20:12:12.739689 17389 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0817 20:14:31.109468 17389 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:15:51.734314 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39252\nI0817 20:15:51.734537 17389 solver.cpp:404]     Test net output #1: loss = 3.44381 (* 1 = 3.44381 loss)\nI0817 20:15:53.041638 17389 solver.cpp:228] Iteration 6600, loss = 0.331067\nI0817 20:15:53.041687 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 20:15:53.041710 17389 solver.cpp:244]     Train net output #1: loss = 0.331067 (* 1 = 0.331067 loss)\nI0817 20:15:53.136605 17389 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0817 20:18:11.438657 17389 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:19:32.063443 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3618\nI0817 20:19:32.063684 17389 solver.cpp:404]     Test net output #1: loss = 3.87321 (* 1 = 3.87321 loss)\nI0817 20:19:33.370872 17389 solver.cpp:228] Iteration 6700, loss = 0.397076\nI0817 20:19:33.370919 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 20:19:33.370944 17389 solver.cpp:244]     Train net output #1: loss = 0.397076 (* 1 = 0.397076 loss)\nI0817 20:19:33.466035 17389 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0817 20:21:51.957592 17389 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:23:12.587142 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36636\nI0817 20:23:12.587378 17389 solver.cpp:404]     Test net output #1: loss = 3.87499 (* 1 = 3.87499 loss)\nI0817 20:23:13.896337 17389 solver.cpp:228] Iteration 6800, loss = 0.386344\nI0817 20:23:13.896381 17389 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 20:23:13.896397 17389 solver.cpp:244]     Train net output #1: loss = 0.386343 (* 1 = 0.386343 loss)\nI0817 20:23:13.991616 17389 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0817 20:25:32.405095 17389 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:26:53.021592 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37768\nI0817 20:26:53.021808 17389 solver.cpp:404]     Test net output #1: loss = 3.80858 (* 1 = 3.80858 loss)\nI0817 20:26:54.328158 17389 solver.cpp:228] Iteration 6900, loss = 0.354915\nI0817 20:26:54.328202 17389 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 20:26:54.328219 17389 solver.cpp:244]     Train net output #1: loss = 0.354915 (* 1 = 0.354915 loss)\nI0817 20:26:54.422909 17389 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0817 20:29:12.759531 17389 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:30:33.366602 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39372\nI0817 20:30:33.366825 17389 solver.cpp:404]     Test net output #1: loss = 3.50486 (* 1 = 3.50486 loss)\nI0817 20:30:34.674373 17389 solver.cpp:228] Iteration 7000, loss = 0.368366\nI0817 20:30:34.674418 17389 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 20:30:34.674435 17389 solver.cpp:244]     Train net output #1: loss = 0.368366 (* 1 = 0.368366 loss)\nI0817 20:30:34.769906 17389 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0817 20:32:53.016512 17389 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:34:13.626787 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36684\nI0817 20:34:13.627012 17389 solver.cpp:404]     Test net output #1: loss = 4.16308 (* 1 = 4.16308 loss)\nI0817 20:34:14.933815 17389 solver.cpp:228] Iteration 7100, loss = 0.336773\nI0817 20:34:14.933859 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 20:34:14.933876 17389 solver.cpp:244]     Train net output #1: loss = 0.336773 (* 1 = 0.336773 loss)\nI0817 20:34:15.028472 17389 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0817 20:36:33.249028 17389 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:37:53.856539 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42528\nI0817 20:37:53.856791 17389 solver.cpp:404]     Test net output #1: loss = 3.42116 (* 1 = 3.42116 loss)\nI0817 20:37:55.163486 17389 solver.cpp:228] Iteration 7200, loss = 0.392856\nI0817 20:37:55.163529 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 20:37:55.163547 17389 solver.cpp:244]     Train net output #1: loss = 0.392855 (* 1 = 0.392855 loss)\nI0817 20:37:55.260763 17389 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0817 20:40:13.462146 17389 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:41:34.075047 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42292\nI0817 20:41:34.075281 17389 solver.cpp:404]     Test net output #1: loss = 3.42302 (* 1 = 3.42302 loss)\nI0817 20:41:35.381623 17389 solver.cpp:228] Iteration 7300, loss = 0.339722\nI0817 20:41:35.381667 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 20:41:35.381685 17389 solver.cpp:244]     Train net output #1: loss = 0.339722 (* 1 = 0.339722 loss)\nI0817 20:41:35.480715 17389 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0817 20:43:53.819173 17389 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:45:14.434931 17389 solver.cpp:404]     Test net output #0: accuracy = 0.414\nI0817 20:45:14.435153 17389 solver.cpp:404]     Test net output #1: loss = 3.44154 (* 1 = 3.44154 loss)\nI0817 20:45:15.742396 17389 solver.cpp:228] Iteration 7400, loss = 0.458984\nI0817 20:45:15.742440 17389 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 20:45:15.742455 17389 solver.cpp:244]     Train net output #1: loss = 0.458984 (* 1 = 0.458984 loss)\nI0817 20:45:15.835058 17389 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0817 20:47:34.218529 17389 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:48:54.837523 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39236\nI0817 20:48:54.837752 17389 solver.cpp:404]     Test net output #1: loss = 3.92442 (* 1 = 3.92442 loss)\nI0817 20:48:56.145251 17389 solver.cpp:228] Iteration 7500, loss = 0.409336\nI0817 20:48:56.145295 17389 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 20:48:56.145311 17389 solver.cpp:244]     Train net output #1: loss = 0.409336 (* 1 = 0.409336 loss)\nI0817 20:48:56.238430 17389 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0817 20:51:14.635099 17389 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:52:35.254469 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39956\nI0817 20:52:35.254698 17389 solver.cpp:404]     Test net output #1: loss = 3.57559 (* 1 = 3.57559 loss)\nI0817 20:52:36.561445 17389 solver.cpp:228] Iteration 7600, loss = 0.476868\nI0817 20:52:36.561496 17389 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 20:52:36.561520 17389 solver.cpp:244]     Train net output #1: loss = 0.476867 (* 1 = 0.476867 loss)\nI0817 20:52:36.657379 17389 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0817 20:54:54.991461 17389 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:56:15.604826 17389 solver.cpp:404]     Test net output #0: accuracy = 0.422\nI0817 20:56:15.605029 17389 solver.cpp:404]     Test net output #1: loss = 3.39237 (* 1 = 3.39237 loss)\nI0817 20:56:16.911900 17389 solver.cpp:228] Iteration 7700, loss = 0.348353\nI0817 20:56:16.911943 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 20:56:16.911958 17389 solver.cpp:244]     Train net output #1: loss = 0.348353 (* 1 = 0.348353 loss)\nI0817 20:56:17.014001 17389 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0817 20:58:35.388286 17389 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 20:59:56.002837 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41696\nI0817 20:59:56.003082 17389 solver.cpp:404]     Test net output #1: loss = 3.58216 (* 1 = 3.58216 loss)\nI0817 20:59:57.310040 17389 solver.cpp:228] Iteration 7800, loss = 0.249654\nI0817 20:59:57.310081 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 20:59:57.310097 17389 solver.cpp:244]     Train net output #1: loss = 0.249654 (* 1 = 0.249654 loss)\nI0817 20:59:57.411442 17389 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0817 21:02:15.770440 17389 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:03:36.386904 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40992\nI0817 21:03:36.387137 17389 solver.cpp:404]     Test net output #1: loss = 3.62882 (* 1 = 3.62882 loss)\nI0817 21:03:37.693171 17389 solver.cpp:228] Iteration 7900, loss = 0.360713\nI0817 21:03:37.693215 17389 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 21:03:37.693238 17389 solver.cpp:244]     Train net output #1: loss = 0.360713 (* 1 = 0.360713 loss)\nI0817 21:03:37.788004 17389 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0817 21:05:56.099663 17389 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:07:16.722465 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41268\nI0817 21:07:16.722697 17389 solver.cpp:404]     Test net output #1: loss = 3.3112 (* 1 = 3.3112 loss)\nI0817 21:07:18.029106 17389 solver.cpp:228] Iteration 8000, loss = 0.314956\nI0817 21:07:18.029150 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 21:07:18.029172 17389 solver.cpp:244]     Train net output #1: loss = 0.314956 (* 1 = 0.314956 loss)\nI0817 21:07:18.120229 17389 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0817 21:09:36.380329 17389 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:10:57.006422 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39992\nI0817 21:10:57.006644 17389 solver.cpp:404]     Test net output #1: loss = 3.66737 (* 1 = 3.66737 loss)\nI0817 21:10:58.312544 17389 solver.cpp:228] Iteration 8100, loss = 0.30934\nI0817 21:10:58.312588 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 21:10:58.312613 17389 solver.cpp:244]     Train net output #1: loss = 0.30934 (* 1 = 0.30934 loss)\nI0817 21:10:58.415128 17389 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0817 21:13:16.654798 17389 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:14:37.275704 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38808\nI0817 21:14:37.275925 17389 solver.cpp:404]     Test net output #1: loss = 4.0472 (* 1 = 4.0472 loss)\nI0817 21:14:38.582149 17389 solver.cpp:228] Iteration 8200, loss = 0.30209\nI0817 21:14:38.582191 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 21:14:38.582216 17389 solver.cpp:244]     Train net output #1: loss = 0.30209 (* 1 = 0.30209 loss)\nI0817 21:14:38.683053 17389 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0817 21:16:57.107774 17389 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:18:17.722856 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38512\nI0817 21:18:17.723103 17389 solver.cpp:404]     Test net output #1: loss = 4.05097 (* 1 = 4.05097 loss)\nI0817 21:18:19.030004 17389 solver.cpp:228] Iteration 8300, loss = 0.310297\nI0817 21:18:19.030047 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 21:18:19.030071 17389 solver.cpp:244]     Train net output #1: loss = 0.310297 (* 1 = 0.310297 loss)\nI0817 21:18:19.124044 17389 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0817 21:20:37.428166 17389 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:21:58.039821 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41648\nI0817 21:21:58.040047 17389 solver.cpp:404]     Test net output #1: loss = 3.51554 (* 1 = 3.51554 loss)\nI0817 21:21:59.346935 17389 solver.cpp:228] Iteration 8400, loss = 0.333631\nI0817 21:21:59.346979 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 21:21:59.347003 17389 solver.cpp:244]     Train net output #1: loss = 0.33363 (* 1 = 0.33363 loss)\nI0817 21:21:59.438905 17389 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0817 21:24:17.781144 17389 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:25:38.383586 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40064\nI0817 21:25:38.383822 17389 solver.cpp:404]     Test net output #1: loss = 3.83734 (* 1 = 3.83734 loss)\nI0817 21:25:39.690486 17389 solver.cpp:228] Iteration 8500, loss = 0.328441\nI0817 21:25:39.690528 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 21:25:39.690553 17389 solver.cpp:244]     Train net output #1: loss = 0.328441 (* 1 = 0.328441 loss)\nI0817 21:25:39.785686 17389 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0817 21:27:58.072461 17389 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:29:18.694015 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39992\nI0817 21:29:18.694224 17389 solver.cpp:404]     Test net output #1: loss = 3.71064 (* 1 = 3.71064 loss)\nI0817 21:29:20.000727 17389 solver.cpp:228] Iteration 8600, loss = 0.266951\nI0817 21:29:20.000771 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 21:29:20.000787 17389 solver.cpp:244]     Train net output #1: loss = 0.266951 (* 1 = 0.266951 loss)\nI0817 21:29:20.092463 17389 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0817 21:31:38.442690 17389 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:32:59.060137 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37524\nI0817 21:32:59.060369 17389 solver.cpp:404]     Test net output #1: loss = 3.91849 (* 1 = 3.91849 loss)\nI0817 21:33:00.366503 17389 solver.cpp:228] Iteration 8700, loss = 0.342563\nI0817 21:33:00.366542 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 21:33:00.366562 17389 solver.cpp:244]     Train net output #1: loss = 0.342563 (* 1 = 0.342563 loss)\nI0817 21:33:00.462190 17389 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0817 21:35:18.778120 17389 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:36:39.387840 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3626\nI0817 21:36:39.388065 17389 solver.cpp:404]     Test net output #1: loss = 4.09347 (* 1 = 4.09347 loss)\nI0817 21:36:40.693711 17389 solver.cpp:228] Iteration 8800, loss = 0.218112\nI0817 21:36:40.693753 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 21:36:40.693768 17389 solver.cpp:244]     Train net output #1: loss = 0.218112 (* 1 = 0.218112 loss)\nI0817 21:36:40.794147 17389 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0817 21:38:59.029085 17389 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:40:19.639128 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40648\nI0817 21:40:19.639328 17389 solver.cpp:404]     Test net output #1: loss = 3.81476 (* 1 = 3.81476 loss)\nI0817 21:40:20.945130 17389 solver.cpp:228] Iteration 8900, loss = 0.302404\nI0817 21:40:20.945173 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 21:40:20.945188 17389 solver.cpp:244]     Train net output #1: loss = 0.302403 (* 1 = 0.302403 loss)\nI0817 21:40:21.047582 17389 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0817 21:42:39.398563 17389 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:44:00.013900 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39388\nI0817 21:44:00.014125 17389 solver.cpp:404]     Test net output #1: loss = 3.89755 (* 1 = 3.89755 loss)\nI0817 21:44:01.320312 17389 solver.cpp:228] Iteration 9000, loss = 0.367653\nI0817 21:44:01.320350 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 21:44:01.320366 17389 solver.cpp:244]     Train net output #1: loss = 0.367653 (* 1 = 0.367653 loss)\nI0817 21:44:01.411028 17389 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0817 21:46:19.624274 17389 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:47:40.236037 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40536\nI0817 21:47:40.236258 17389 solver.cpp:404]     Test net output #1: loss = 3.69524 (* 1 = 3.69524 loss)\nI0817 21:47:41.542071 17389 solver.cpp:228] Iteration 9100, loss = 0.330076\nI0817 21:47:41.542112 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 21:47:41.542129 17389 solver.cpp:244]     Train net output #1: loss = 0.330076 (* 1 = 0.330076 loss)\nI0817 21:47:41.635982 17389 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0817 21:49:59.907469 17389 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:51:20.513996 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3822\nI0817 21:51:20.514222 17389 solver.cpp:404]     Test net output #1: loss = 3.8292 (* 1 = 3.8292 loss)\nI0817 21:51:21.820081 17389 solver.cpp:228] Iteration 9200, loss = 0.406926\nI0817 21:51:21.820124 17389 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 21:51:21.820140 17389 solver.cpp:244]     Train net output #1: loss = 0.406926 (* 1 = 0.406926 loss)\nI0817 21:51:21.912385 17389 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0817 21:53:40.143028 17389 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:55:00.755697 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3028\nI0817 21:55:00.755892 17389 solver.cpp:404]     Test net output #1: loss = 5.36705 (* 1 = 5.36705 loss)\nI0817 21:55:02.062172 17389 solver.cpp:228] Iteration 9300, loss = 0.283918\nI0817 21:55:02.062216 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 21:55:02.062232 17389 solver.cpp:244]     Train net output #1: loss = 0.283917 (* 1 = 0.283917 loss)\nI0817 21:55:02.154952 17389 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0817 21:57:20.394920 17389 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 21:58:41.002166 17389 solver.cpp:404]     Test net output #0: accuracy = 0.31272\nI0817 21:58:41.002388 17389 solver.cpp:404]     Test net output #1: loss = 5.42343 (* 1 = 5.42343 loss)\nI0817 21:58:42.308357 17389 solver.cpp:228] Iteration 9400, loss = 0.50372\nI0817 21:58:42.308399 17389 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 21:58:42.308415 17389 solver.cpp:244]     Train net output #1: loss = 0.50372 (* 1 = 0.50372 loss)\nI0817 21:58:42.401506 17389 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0817 22:01:00.650713 17389 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:02:21.255923 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34788\nI0817 22:02:21.256153 17389 solver.cpp:404]     Test net output #1: loss = 4.37089 (* 1 = 4.37089 loss)\nI0817 22:02:22.561983 17389 solver.cpp:228] Iteration 9500, loss = 0.25999\nI0817 22:02:22.562026 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 22:02:22.562042 17389 solver.cpp:244]     Train net output #1: loss = 0.25999 (* 1 = 0.25999 loss)\nI0817 22:02:22.654505 17389 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0817 22:04:40.898857 17389 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:06:01.511121 17389 solver.cpp:404]     Test net output #0: accuracy = 0.32072\nI0817 22:06:01.511339 17389 solver.cpp:404]     Test net output #1: loss = 4.66998 (* 1 = 4.66998 loss)\nI0817 22:06:02.818197 17389 solver.cpp:228] Iteration 9600, loss = 0.246708\nI0817 22:06:02.818240 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 22:06:02.818256 17389 solver.cpp:244]     Train net output #1: loss = 0.246708 (* 1 = 0.246708 loss)\nI0817 22:06:02.911382 17389 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0817 22:08:21.336232 17389 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:09:41.946259 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37608\nI0817 22:09:41.946493 17389 solver.cpp:404]     Test net output #1: loss = 4.0531 (* 1 = 4.0531 loss)\nI0817 22:09:43.252825 17389 solver.cpp:228] Iteration 9700, loss = 0.278946\nI0817 22:09:43.252867 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 22:09:43.252883 17389 solver.cpp:244]     Train net output #1: loss = 0.278946 (* 1 = 0.278946 loss)\nI0817 22:09:43.344091 17389 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0817 22:12:01.613907 17389 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:13:22.226372 17389 solver.cpp:404]     Test net output #0: accuracy = 0.35528\nI0817 22:13:22.226605 17389 solver.cpp:404]     Test net output #1: loss = 4.23838 (* 1 = 4.23838 loss)\nI0817 22:13:23.532667 17389 solver.cpp:228] Iteration 9800, loss = 0.284663\nI0817 22:13:23.532711 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 22:13:23.532727 17389 solver.cpp:244]     Train net output #1: loss = 0.284663 (* 1 = 0.284663 loss)\nI0817 22:13:23.630614 17389 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0817 22:15:41.863798 17389 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:17:02.466465 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3496\nI0817 22:17:02.466692 17389 solver.cpp:404]     Test net output #1: loss = 4.28951 (* 1 = 4.28951 loss)\nI0817 22:17:03.772918 17389 solver.cpp:228] Iteration 9900, loss = 0.389314\nI0817 22:17:03.772961 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 22:17:03.772977 17389 solver.cpp:244]     Train net output #1: loss = 0.389314 (* 1 = 0.389314 loss)\nI0817 22:17:03.872061 17389 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0817 22:19:22.127195 17389 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:20:42.735146 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3396\nI0817 22:20:42.735364 17389 solver.cpp:404]     Test net output #1: loss = 4.38317 (* 1 = 4.38317 loss)\nI0817 22:20:44.041110 17389 solver.cpp:228] Iteration 10000, loss = 0.349974\nI0817 22:20:44.041153 17389 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 22:20:44.041169 17389 solver.cpp:244]     Train net output #1: loss = 0.349974 (* 1 = 0.349974 loss)\nI0817 22:20:44.139442 17389 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0817 22:23:02.426527 17389 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0817 22:24:23.033298 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3436\nI0817 22:24:23.033524 17389 solver.cpp:404]     Test net output #1: loss = 4.41262 (* 1 = 4.41262 loss)\nI0817 22:24:24.340150 17389 solver.cpp:228] Iteration 10100, loss = 0.312286\nI0817 22:24:24.340193 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 22:24:24.340210 17389 solver.cpp:244]     Train net output #1: loss = 0.312286 (* 1 = 0.312286 loss)\nI0817 22:24:24.433594 17389 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0817 22:26:42.672219 17389 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0817 22:28:03.290385 17389 solver.cpp:404]     Test net output #0: accuracy = 0.28124\nI0817 22:28:03.290608 17389 solver.cpp:404]     Test net output #1: loss = 5.52314 (* 1 = 5.52314 loss)\nI0817 22:28:04.596396 17389 solver.cpp:228] Iteration 10200, loss = 0.151175\nI0817 22:28:04.596438 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:28:04.596454 17389 solver.cpp:244]     Train net output #1: loss = 0.151175 (* 1 = 0.151175 loss)\nI0817 22:28:04.690299 17389 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0817 22:30:22.960760 17389 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0817 22:31:43.571909 17389 solver.cpp:404]     Test net output #0: accuracy = 0.35356\nI0817 22:31:43.572146 17389 solver.cpp:404]     Test net output #1: loss = 3.91902 (* 1 = 3.91902 loss)\nI0817 22:31:44.878041 17389 solver.cpp:228] Iteration 10300, loss = 0.369955\nI0817 22:31:44.878082 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 22:31:44.878098 17389 solver.cpp:244]     Train net output #1: loss = 0.369955 (* 1 = 0.369955 loss)\nI0817 22:31:44.973723 17389 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0817 22:34:03.395452 17389 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0817 22:35:24.004899 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36576\nI0817 22:35:24.005116 17389 solver.cpp:404]     Test net output #1: loss = 3.80149 (* 1 = 3.80149 loss)\nI0817 22:35:25.311914 17389 solver.cpp:228] Iteration 10400, loss = 0.388553\nI0817 22:35:25.311956 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 22:35:25.311974 17389 solver.cpp:244]     Train net output #1: loss = 0.388553 (* 1 = 0.388553 loss)\nI0817 22:35:25.404434 17389 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0817 22:37:43.677798 17389 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0817 22:39:04.292254 17389 solver.cpp:404]     Test net output #0: accuracy = 0.33044\nI0817 22:39:04.292482 17389 solver.cpp:404]     Test net output #1: loss = 4.66293 (* 1 = 4.66293 loss)\nI0817 22:39:05.599455 17389 solver.cpp:228] Iteration 10500, loss = 0.286253\nI0817 22:39:05.599496 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 22:39:05.599514 17389 solver.cpp:244]     Train net output #1: loss = 0.286253 (* 1 = 0.286253 loss)\nI0817 22:39:05.700388 17389 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0817 22:41:24.013099 17389 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0817 22:42:44.629101 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34416\nI0817 22:42:44.629359 17389 solver.cpp:404]     Test net output #1: loss = 4.39184 (* 1 = 4.39184 loss)\nI0817 22:42:45.936012 17389 solver.cpp:228] Iteration 10600, loss = 0.232765\nI0817 22:42:45.936053 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 22:42:45.936069 17389 solver.cpp:244]     Train net output #1: loss = 0.232765 (* 1 = 0.232765 loss)\nI0817 22:42:46.029402 17389 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0817 22:45:04.289572 17389 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0817 22:46:24.912346 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34244\nI0817 22:46:24.912598 17389 solver.cpp:404]     Test net output #1: loss = 4.39369 (* 1 = 4.39369 loss)\nI0817 22:46:26.219161 17389 solver.cpp:228] Iteration 10700, loss = 0.235107\nI0817 22:46:26.219203 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 22:46:26.219220 17389 solver.cpp:244]     Train net output #1: loss = 0.235107 (* 1 = 0.235107 loss)\nI0817 22:46:26.312517 17389 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0817 22:48:44.548470 17389 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0817 22:50:05.163213 17389 solver.cpp:404]     Test net output #0: accuracy = 0.35172\nI0817 22:50:05.163449 17389 solver.cpp:404]     Test net output #1: loss = 4.14018 (* 1 = 4.14018 loss)\nI0817 22:50:06.469422 17389 solver.cpp:228] Iteration 10800, loss = 0.325529\nI0817 22:50:06.469465 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 22:50:06.469481 17389 solver.cpp:244]     Train net output #1: loss = 0.325529 (* 1 = 0.325529 loss)\nI0817 22:50:06.562873 17389 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0817 22:52:24.824061 17389 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0817 22:53:45.433249 17389 solver.cpp:404]     Test net output #0: accuracy = 0.32264\nI0817 22:53:45.433495 17389 solver.cpp:404]     Test net output #1: loss = 4.85883 (* 1 = 4.85883 loss)\nI0817 22:53:46.739424 17389 solver.cpp:228] Iteration 10900, loss = 0.23096\nI0817 22:53:46.739466 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 22:53:46.739485 17389 solver.cpp:244]     Train net output #1: loss = 0.23096 (* 1 = 0.23096 loss)\nI0817 22:53:46.840292 17389 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0817 22:56:05.231364 17389 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0817 22:57:25.845291 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34496\nI0817 22:57:25.845523 17389 solver.cpp:404]     Test net output #1: loss = 4.46803 (* 1 = 4.46803 loss)\nI0817 22:57:27.153187 17389 solver.cpp:228] Iteration 11000, loss = 0.318005\nI0817 22:57:27.153228 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 22:57:27.153244 17389 solver.cpp:244]     Train net output #1: loss = 0.318005 (* 1 = 0.318005 loss)\nI0817 22:57:27.244761 17389 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0817 22:59:45.508859 17389 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0817 23:01:06.120998 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3156\nI0817 23:01:06.121243 17389 solver.cpp:404]     Test net output #1: loss = 4.52726 (* 1 = 4.52726 loss)\nI0817 23:01:07.428529 17389 solver.cpp:228] Iteration 11100, loss = 0.315781\nI0817 23:01:07.428577 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 23:01:07.428593 17389 solver.cpp:244]     Train net output #1: loss = 0.315781 (* 1 = 0.315781 loss)\nI0817 23:01:07.529361 17389 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0817 23:03:26.057929 17389 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0817 23:04:47.513759 17389 solver.cpp:404]     Test net output #0: accuracy = 0.33444\nI0817 23:04:47.513993 17389 solver.cpp:404]     Test net output #1: loss = 4.50432 (* 1 = 4.50432 loss)\nI0817 23:04:48.825670 17389 solver.cpp:228] Iteration 11200, loss = 0.258833\nI0817 23:04:48.825729 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 23:04:48.825747 17389 solver.cpp:244]     Train net output #1: loss = 0.258832 (* 1 = 0.258832 loss)\nI0817 23:04:48.914733 17389 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0817 23:07:07.678526 17389 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0817 23:08:28.822648 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38\nI0817 23:08:28.822859 17389 solver.cpp:404]     Test net output #1: loss = 3.97548 (* 1 = 3.97548 loss)\nI0817 23:08:30.133131 17389 solver.cpp:228] Iteration 11300, loss = 0.254416\nI0817 23:08:30.133188 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 23:08:30.133206 17389 solver.cpp:244]     Train net output #1: loss = 0.254416 (* 1 = 0.254416 loss)\nI0817 23:08:30.228754 17389 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0817 23:10:48.949759 17389 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0817 23:12:10.372989 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34872\nI0817 23:12:10.373284 17389 solver.cpp:404]     Test net output #1: loss = 4.19274 (* 1 = 4.19274 loss)\nI0817 23:12:11.682670 17389 solver.cpp:228] Iteration 11400, loss = 0.176158\nI0817 23:12:11.682734 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:12:11.682751 17389 solver.cpp:244]     Train net output #1: loss = 0.176158 (* 1 = 0.176158 loss)\nI0817 23:12:11.775605 17389 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0817 23:14:30.609374 17389 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0817 23:15:52.107724 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34236\nI0817 23:15:52.108013 17389 solver.cpp:404]     Test net output #1: loss = 4.43726 (* 1 = 4.43726 loss)\nI0817 23:15:53.419042 17389 solver.cpp:228] Iteration 11500, loss = 0.382764\nI0817 23:15:53.419100 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 23:15:53.419117 17389 solver.cpp:244]     Train net output #1: loss = 0.382763 (* 1 = 0.382763 loss)\nI0817 23:15:53.510541 17389 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0817 23:18:12.314720 17389 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0817 23:19:33.795925 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34268\nI0817 23:19:33.796236 17389 solver.cpp:404]     Test net output #1: loss = 4.19587 (* 1 = 4.19587 loss)\nI0817 23:19:35.105463 17389 solver.cpp:228] Iteration 11600, loss = 0.323479\nI0817 23:19:35.105520 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 23:19:35.105536 17389 solver.cpp:244]     Train net output #1: loss = 0.323479 (* 1 = 0.323479 loss)\nI0817 23:19:35.201915 17389 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0817 23:21:54.023756 17389 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0817 23:23:15.522680 17389 solver.cpp:404]     Test net output #0: accuracy = 0.35748\nI0817 23:23:15.522990 17389 solver.cpp:404]     Test net output #1: loss = 4.01277 (* 1 = 4.01277 loss)\nI0817 23:23:16.832789 17389 solver.cpp:228] Iteration 11700, loss = 0.318202\nI0817 23:23:16.832830 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 23:23:16.832846 17389 solver.cpp:244]     Train net output #1: loss = 0.318202 (* 1 = 0.318202 loss)\nI0817 23:23:16.929405 17389 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0817 23:25:35.670658 17389 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0817 23:26:57.155887 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36048\nI0817 23:26:57.156194 17389 solver.cpp:404]     Test net output #1: loss = 4.31023 (* 1 = 4.31023 loss)\nI0817 23:26:58.466204 17389 solver.cpp:228] Iteration 11800, loss = 0.32639\nI0817 23:26:58.466243 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 23:26:58.466260 17389 solver.cpp:244]     Train net output #1: loss = 0.32639 (* 1 = 0.32639 loss)\nI0817 23:26:58.559269 17389 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0817 23:29:17.290383 17389 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0817 23:30:38.770179 17389 solver.cpp:404]     Test net output #0: accuracy = 0.35808\nI0817 23:30:38.770484 17389 solver.cpp:404]     Test net output #1: loss = 4.30957 (* 1 = 4.30957 loss)\nI0817 23:30:40.080370 17389 solver.cpp:228] Iteration 11900, loss = 0.312995\nI0817 23:30:40.080425 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 23:30:40.080442 17389 solver.cpp:244]     Train net output #1: loss = 0.312995 (* 1 = 0.312995 loss)\nI0817 23:30:40.171156 17389 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0817 23:32:58.941021 17389 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0817 23:34:20.459457 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3736\nI0817 23:34:20.459770 17389 solver.cpp:404]     Test net output #1: loss = 4.11138 (* 1 = 4.11138 loss)\nI0817 23:34:21.769531 17389 solver.cpp:228] Iteration 12000, loss = 0.184923\nI0817 23:34:21.769589 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 23:34:21.769606 17389 solver.cpp:244]     Train net output #1: loss = 0.184923 (* 1 = 0.184923 loss)\nI0817 23:34:21.863067 17389 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0817 23:36:40.598315 17389 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0817 23:38:01.999724 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34804\nI0817 23:38:01.999953 17389 solver.cpp:404]     Test net output #1: loss = 4.29047 (* 1 = 4.29047 loss)\nI0817 23:38:03.309775 17389 solver.cpp:228] Iteration 12100, loss = 0.208313\nI0817 23:38:03.309833 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 23:38:03.309849 17389 solver.cpp:244]     Train net output #1: loss = 0.208313 (* 1 = 0.208313 loss)\nI0817 23:38:03.408649 17389 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0817 23:40:22.188503 17389 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0817 23:41:43.577563 17389 solver.cpp:404]     Test net output #0: accuracy = 0.334\nI0817 23:41:43.577792 17389 solver.cpp:404]     Test net output #1: loss = 4.50403 (* 1 = 4.50403 loss)\nI0817 23:41:44.887622 17389 solver.cpp:228] Iteration 12200, loss = 0.321968\nI0817 23:41:44.887675 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 23:41:44.887691 17389 solver.cpp:244]     Train net output #1: loss = 0.321968 (* 1 = 0.321968 loss)\nI0817 23:41:44.977910 17389 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0817 23:44:03.756252 17389 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0817 23:45:25.087473 17389 solver.cpp:404]     Test net output #0: accuracy = 0.31752\nI0817 23:45:25.087724 17389 solver.cpp:404]     Test net output #1: loss = 4.76785 (* 1 = 4.76785 loss)\nI0817 23:45:26.399032 17389 solver.cpp:228] Iteration 12300, loss = 0.183513\nI0817 23:45:26.399092 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 23:45:26.399109 17389 solver.cpp:244]     Train net output #1: loss = 0.183513 (* 1 = 0.183513 loss)\nI0817 23:45:26.492270 17389 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0817 23:47:45.260453 17389 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0817 23:49:06.534446 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38644\nI0817 23:49:06.534690 17389 solver.cpp:404]     Test net output #1: loss = 3.89561 (* 1 = 3.89561 loss)\nI0817 23:49:07.845407 17389 solver.cpp:228] Iteration 12400, loss = 0.286814\nI0817 23:49:07.845464 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 23:49:07.845482 17389 solver.cpp:244]     Train net output #1: loss = 0.286814 (* 1 = 0.286814 loss)\nI0817 23:49:07.939818 17389 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0817 23:51:26.651206 17389 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0817 23:52:47.998122 17389 solver.cpp:404]     Test net output #0: accuracy = 0.32088\nI0817 23:52:47.998404 17389 solver.cpp:404]     Test net output #1: loss = 4.86505 (* 1 = 4.86505 loss)\nI0817 23:52:49.309187 17389 solver.cpp:228] Iteration 12500, loss = 0.213135\nI0817 23:52:49.309243 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 23:52:49.309260 17389 solver.cpp:244]     Train net output #1: loss = 0.213135 (* 1 = 0.213135 loss)\nI0817 23:52:49.401098 17389 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0817 23:55:08.152509 17389 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0817 23:56:29.545805 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36844\nI0817 23:56:29.546048 17389 solver.cpp:404]     Test net output #1: loss = 4.14193 (* 1 = 4.14193 loss)\nI0817 23:56:30.856739 17389 solver.cpp:228] Iteration 12600, loss = 0.286186\nI0817 23:56:30.856779 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 23:56:30.856796 17389 solver.cpp:244]     Train net output #1: loss = 0.286185 (* 1 = 0.286185 loss)\nI0817 23:56:30.951678 17389 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0817 23:58:49.875471 17389 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 00:00:11.295469 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3508\nI0818 00:00:11.295723 17389 solver.cpp:404]     Test net output #1: loss = 4.51136 (* 1 = 4.51136 loss)\nI0818 00:00:12.606910 17389 solver.cpp:228] Iteration 12700, loss = 0.324332\nI0818 00:00:12.606958 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 00:00:12.606973 17389 solver.cpp:244]     Train net output #1: loss = 0.324332 (* 1 = 0.324332 loss)\nI0818 00:00:12.700212 17389 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0818 00:02:31.516582 17389 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 00:03:52.994047 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34584\nI0818 00:03:52.994289 17389 solver.cpp:404]     Test net output #1: loss = 4.29542 (* 1 = 4.29542 loss)\nI0818 00:03:54.305078 17389 solver.cpp:228] Iteration 12800, loss = 0.273153\nI0818 00:03:54.305136 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 00:03:54.305155 17389 solver.cpp:244]     Train net output #1: loss = 0.273153 (* 1 = 0.273153 loss)\nI0818 00:03:54.403022 17389 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0818 00:06:13.370515 17389 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 00:07:34.826670 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36284\nI0818 00:07:34.826925 17389 solver.cpp:404]     Test net output #1: loss = 4.37924 (* 1 = 4.37924 loss)\nI0818 00:07:36.138176 17389 solver.cpp:228] Iteration 12900, loss = 0.24985\nI0818 00:07:36.138234 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 00:07:36.138252 17389 solver.cpp:244]     Train net output #1: loss = 0.24985 (* 1 = 0.24985 loss)\nI0818 00:07:36.229377 17389 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0818 00:09:55.050530 17389 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 00:11:16.177664 17389 solver.cpp:404]     Test net output #0: accuracy = 0.35624\nI0818 00:11:16.177906 17389 solver.cpp:404]     Test net output #1: loss = 4.70185 (* 1 = 4.70185 loss)\nI0818 00:11:17.488473 17389 solver.cpp:228] Iteration 13000, loss = 0.181158\nI0818 00:11:17.488517 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 00:11:17.488533 17389 solver.cpp:244]     Train net output #1: loss = 0.181158 (* 1 = 0.181158 loss)\nI0818 00:11:17.586086 17389 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0818 00:13:36.434788 17389 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 00:14:57.753077 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34372\nI0818 00:14:57.753315 17389 solver.cpp:404]     Test net output #1: loss = 4.75336 (* 1 = 4.75336 loss)\nI0818 00:14:59.064256 17389 solver.cpp:228] Iteration 13100, loss = 0.218767\nI0818 00:14:59.064313 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 00:14:59.064332 17389 solver.cpp:244]     Train net output #1: loss = 0.218767 (* 1 = 0.218767 loss)\nI0818 00:14:59.163099 17389 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0818 00:17:17.973000 17389 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 00:18:39.291239 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39176\nI0818 00:18:39.291466 17389 solver.cpp:404]     Test net output #1: loss = 3.9845 (* 1 = 3.9845 loss)\nI0818 00:18:40.601502 17389 solver.cpp:228] Iteration 13200, loss = 0.280039\nI0818 00:18:40.601562 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 00:18:40.601580 17389 solver.cpp:244]     Train net output #1: loss = 0.280039 (* 1 = 0.280039 loss)\nI0818 00:18:40.693578 17389 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0818 00:20:59.573304 17389 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 00:22:20.724256 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34032\nI0818 00:22:20.724510 17389 solver.cpp:404]     Test net output #1: loss = 4.80654 (* 1 = 4.80654 loss)\nI0818 00:22:22.034148 17389 solver.cpp:228] Iteration 13300, loss = 0.267056\nI0818 00:22:22.034204 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 00:22:22.034221 17389 solver.cpp:244]     Train net output #1: loss = 0.267055 (* 1 = 0.267055 loss)\nI0818 00:22:22.124657 17389 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0818 00:24:40.670506 17389 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 00:26:01.780338 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38816\nI0818 00:26:01.780583 17389 solver.cpp:404]     Test net output #1: loss = 4.06214 (* 1 = 4.06214 loss)\nI0818 00:26:03.090513 17389 solver.cpp:228] Iteration 13400, loss = 0.300094\nI0818 00:26:03.090565 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 00:26:03.090581 17389 solver.cpp:244]     Train net output #1: loss = 0.300094 (* 1 = 0.300094 loss)\nI0818 00:26:03.179178 17389 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0818 00:28:21.703739 17389 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 00:29:43.160851 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40524\nI0818 00:29:43.161085 17389 solver.cpp:404]     Test net output #1: loss = 3.74299 (* 1 = 3.74299 loss)\nI0818 00:29:44.470445 17389 solver.cpp:228] Iteration 13500, loss = 0.350552\nI0818 00:29:44.470499 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 00:29:44.470517 17389 solver.cpp:244]     Train net output #1: loss = 0.350552 (* 1 = 0.350552 loss)\nI0818 00:29:44.561918 17389 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0818 00:32:03.135051 17389 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 00:33:24.577726 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39584\nI0818 00:33:24.577956 17389 solver.cpp:404]     Test net output #1: loss = 3.99314 (* 1 = 3.99314 loss)\nI0818 00:33:25.887869 17389 solver.cpp:228] Iteration 13600, loss = 0.315349\nI0818 00:33:25.887923 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 00:33:25.887940 17389 solver.cpp:244]     Train net output #1: loss = 0.315349 (* 1 = 0.315349 loss)\nI0818 00:33:25.980478 17389 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0818 00:35:44.737768 17389 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 00:37:06.168778 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3988\nI0818 00:37:06.169018 17389 solver.cpp:404]     Test net output #1: loss = 3.94217 (* 1 = 3.94217 loss)\nI0818 00:37:07.478806 17389 solver.cpp:228] Iteration 13700, loss = 0.203772\nI0818 00:37:07.478855 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:37:07.478871 17389 solver.cpp:244]     Train net output #1: loss = 0.203772 (* 1 = 0.203772 loss)\nI0818 00:37:07.570319 17389 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0818 00:39:26.267745 17389 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 00:40:47.723615 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38072\nI0818 00:40:47.723888 17389 solver.cpp:404]     Test net output #1: loss = 4.39986 (* 1 = 4.39986 loss)\nI0818 00:40:49.033167 17389 solver.cpp:228] Iteration 13800, loss = 0.302783\nI0818 00:40:49.033219 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 00:40:49.033236 17389 solver.cpp:244]     Train net output #1: loss = 0.302783 (* 1 = 0.302783 loss)\nI0818 00:40:49.125139 17389 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0818 00:43:07.682813 17389 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 00:44:28.563999 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39116\nI0818 00:44:28.564232 17389 solver.cpp:404]     Test net output #1: loss = 4.01807 (* 1 = 4.01807 loss)\nI0818 00:44:29.873905 17389 solver.cpp:228] Iteration 13900, loss = 0.237011\nI0818 00:44:29.873961 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 00:44:29.873980 17389 solver.cpp:244]     Train net output #1: loss = 0.237011 (* 1 = 0.237011 loss)\nI0818 00:44:29.972947 17389 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0818 00:46:48.441012 17389 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 00:48:09.357667 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39592\nI0818 00:48:09.357928 17389 solver.cpp:404]     Test net output #1: loss = 3.94094 (* 1 = 3.94094 loss)\nI0818 00:48:10.668946 17389 solver.cpp:228] Iteration 14000, loss = 0.294736\nI0818 00:48:10.669008 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 00:48:10.669033 17389 solver.cpp:244]     Train net output #1: loss = 0.294736 (* 1 = 0.294736 loss)\nI0818 00:48:10.761521 17389 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 00:50:29.054271 17389 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 00:51:49.699414 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41188\nI0818 00:51:49.699672 17389 solver.cpp:404]     Test net output #1: loss = 3.91201 (* 1 = 3.91201 loss)\nI0818 00:51:51.009289 17389 solver.cpp:228] Iteration 14100, loss = 0.180578\nI0818 00:51:51.009327 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:51:51.009344 17389 solver.cpp:244]     Train net output #1: loss = 0.180578 (* 1 = 0.180578 loss)\nI0818 00:51:51.097240 17389 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0818 00:54:09.290799 17389 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 00:55:29.907466 17389 solver.cpp:404]     Test net output #0: accuracy = 0.413\nI0818 00:55:29.907701 17389 solver.cpp:404]     Test net output #1: loss = 3.87628 (* 1 = 3.87628 loss)\nI0818 00:55:31.217123 17389 solver.cpp:228] Iteration 14200, loss = 0.240934\nI0818 00:55:31.217165 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 00:55:31.217181 17389 solver.cpp:244]     Train net output #1: loss = 0.240934 (* 1 = 0.240934 loss)\nI0818 00:55:31.309962 17389 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0818 00:57:49.595037 17389 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 00:59:10.211743 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38824\nI0818 00:59:10.211994 17389 solver.cpp:404]     Test net output #1: loss = 4.06868 (* 1 = 4.06868 loss)\nI0818 00:59:11.521170 17389 solver.cpp:228] Iteration 14300, loss = 0.192265\nI0818 00:59:11.521211 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 00:59:11.521227 17389 solver.cpp:244]     Train net output #1: loss = 0.192265 (* 1 = 0.192265 loss)\nI0818 00:59:11.611585 17389 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0818 01:01:29.953200 17389 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 01:02:50.573221 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3872\nI0818 01:02:50.573549 17389 solver.cpp:404]     Test net output #1: loss = 4.2375 (* 1 = 4.2375 loss)\nI0818 01:02:51.882701 17389 solver.cpp:228] Iteration 14400, loss = 0.246599\nI0818 01:02:51.882743 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 01:02:51.882760 17389 solver.cpp:244]     Train net output #1: loss = 0.246599 (* 1 = 0.246599 loss)\nI0818 01:02:51.973428 17389 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0818 01:05:10.278831 17389 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 01:06:30.905463 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3794\nI0818 01:06:30.905760 17389 solver.cpp:404]     Test net output #1: loss = 4.03796 (* 1 = 4.03796 loss)\nI0818 01:06:32.214758 17389 solver.cpp:228] Iteration 14500, loss = 0.302834\nI0818 01:06:32.214802 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 01:06:32.214820 17389 solver.cpp:244]     Train net output #1: loss = 0.302834 (* 1 = 0.302834 loss)\nI0818 01:06:32.312484 17389 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0818 01:08:50.685511 17389 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 01:10:11.304635 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41948\nI0818 01:10:11.304901 17389 solver.cpp:404]     Test net output #1: loss = 3.66846 (* 1 = 3.66846 loss)\nI0818 01:10:12.613498 17389 solver.cpp:228] Iteration 14600, loss = 0.264082\nI0818 01:10:12.613546 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 01:10:12.613562 17389 solver.cpp:244]     Train net output #1: loss = 0.264081 (* 1 = 0.264081 loss)\nI0818 01:10:12.711228 17389 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0818 01:12:31.015221 17389 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 01:13:51.631964 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37832\nI0818 01:13:51.632210 17389 solver.cpp:404]     Test net output #1: loss = 4.22455 (* 1 = 4.22455 loss)\nI0818 01:13:52.939743 17389 solver.cpp:228] Iteration 14700, loss = 0.257445\nI0818 01:13:52.939786 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 01:13:52.939803 17389 solver.cpp:244]     Train net output #1: loss = 0.257444 (* 1 = 0.257444 loss)\nI0818 01:13:53.035066 17389 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0818 01:16:11.329396 17389 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 01:17:31.949097 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37932\nI0818 01:17:31.949352 17389 solver.cpp:404]     Test net output #1: loss = 4.37656 (* 1 = 4.37656 loss)\nI0818 01:17:33.256083 17389 solver.cpp:228] Iteration 14800, loss = 0.304808\nI0818 01:17:33.256129 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 01:17:33.256145 17389 solver.cpp:244]     Train net output #1: loss = 0.304808 (* 1 = 0.304808 loss)\nI0818 01:17:33.347873 17389 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0818 01:19:51.653942 17389 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 01:21:12.273079 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38676\nI0818 01:21:12.273345 17389 solver.cpp:404]     Test net output #1: loss = 4.21896 (* 1 = 4.21896 loss)\nI0818 01:21:13.579565 17389 solver.cpp:228] Iteration 14900, loss = 0.217594\nI0818 01:21:13.579608 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 01:21:13.579624 17389 solver.cpp:244]     Train net output #1: loss = 0.217594 (* 1 = 0.217594 loss)\nI0818 01:21:13.678910 17389 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0818 01:23:31.996201 17389 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 01:24:52.612836 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39216\nI0818 01:24:52.613095 17389 solver.cpp:404]     Test net output #1: loss = 3.74545 (* 1 = 3.74545 loss)\nI0818 01:24:53.919384 17389 solver.cpp:228] Iteration 15000, loss = 0.422624\nI0818 01:24:53.919428 17389 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 01:24:53.919445 17389 solver.cpp:244]     Train net output #1: loss = 0.422624 (* 1 = 0.422624 loss)\nI0818 01:24:54.013753 17389 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0818 01:27:12.293440 17389 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 01:28:32.913859 17389 solver.cpp:404]     Test net output #0: accuracy = 0.377\nI0818 01:28:32.914109 17389 solver.cpp:404]     Test net output #1: loss = 4.29443 (* 1 = 4.29443 loss)\nI0818 01:28:34.220732 17389 solver.cpp:228] Iteration 15100, loss = 0.364899\nI0818 01:28:34.220777 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 01:28:34.220793 17389 solver.cpp:244]     Train net output #1: loss = 0.364899 (* 1 = 0.364899 loss)\nI0818 01:28:34.311736 17389 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0818 01:30:52.584698 17389 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 01:32:13.214864 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38336\nI0818 01:32:13.215121 17389 solver.cpp:404]     Test net output #1: loss = 4.50169 (* 1 = 4.50169 loss)\nI0818 01:32:14.522545 17389 solver.cpp:228] Iteration 15200, loss = 0.223901\nI0818 01:32:14.522586 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 01:32:14.522603 17389 solver.cpp:244]     Train net output #1: loss = 0.223901 (* 1 = 0.223901 loss)\nI0818 01:32:14.616008 17389 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0818 01:34:32.880934 17389 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 01:35:53.505719 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4182\nI0818 01:35:53.505961 17389 solver.cpp:404]     Test net output #1: loss = 3.83245 (* 1 = 3.83245 loss)\nI0818 01:35:54.812214 17389 solver.cpp:228] Iteration 15300, loss = 0.285804\nI0818 01:35:54.812256 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 01:35:54.812273 17389 solver.cpp:244]     Train net output #1: loss = 0.285804 (* 1 = 0.285804 loss)\nI0818 01:35:54.910815 17389 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0818 01:38:13.295138 17389 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 01:39:33.920606 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39688\nI0818 01:39:33.920872 17389 solver.cpp:404]     Test net output #1: loss = 4.11111 (* 1 = 4.11111 loss)\nI0818 01:39:35.226675 17389 solver.cpp:228] Iteration 15400, loss = 0.242027\nI0818 01:39:35.226716 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 01:39:35.226732 17389 solver.cpp:244]     Train net output #1: loss = 0.242027 (* 1 = 0.242027 loss)\nI0818 01:39:35.326340 17389 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0818 01:41:53.540180 17389 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 01:43:14.176060 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41816\nI0818 01:43:14.176326 17389 solver.cpp:404]     Test net output #1: loss = 3.7083 (* 1 = 3.7083 loss)\nI0818 01:43:15.483359 17389 solver.cpp:228] Iteration 15500, loss = 0.21836\nI0818 01:43:15.483402 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 01:43:15.483427 17389 solver.cpp:244]     Train net output #1: loss = 0.21836 (* 1 = 0.21836 loss)\nI0818 01:43:15.580720 17389 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0818 01:45:33.866904 17389 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 01:46:54.504454 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3878\nI0818 01:46:54.504721 17389 solver.cpp:404]     Test net output #1: loss = 4.17334 (* 1 = 4.17334 loss)\nI0818 01:46:55.811964 17389 solver.cpp:228] Iteration 15600, loss = 0.252094\nI0818 01:46:55.812008 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 01:46:55.812031 17389 solver.cpp:244]     Train net output #1: loss = 0.252094 (* 1 = 0.252094 loss)\nI0818 01:46:55.902822 17389 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0818 01:49:14.137116 17389 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 01:50:34.770529 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42136\nI0818 01:50:34.770803 17389 solver.cpp:404]     Test net output #1: loss = 3.89396 (* 1 = 3.89396 loss)\nI0818 01:50:36.078300 17389 solver.cpp:228] Iteration 15700, loss = 0.261672\nI0818 01:50:36.078344 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 01:50:36.078368 17389 solver.cpp:244]     Train net output #1: loss = 0.261672 (* 1 = 0.261672 loss)\nI0818 01:50:36.179241 17389 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0818 01:52:54.579337 17389 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 01:54:15.215270 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42816\nI0818 01:54:15.215528 17389 solver.cpp:404]     Test net output #1: loss = 3.45496 (* 1 = 3.45496 loss)\nI0818 01:54:16.522572 17389 solver.cpp:228] Iteration 15800, loss = 0.452918\nI0818 01:54:16.522615 17389 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0818 01:54:16.522639 17389 solver.cpp:244]     Train net output #1: loss = 0.452917 (* 1 = 0.452917 loss)\nI0818 01:54:16.620460 17389 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0818 01:56:34.906466 17389 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 01:57:55.541074 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40988\nI0818 01:57:55.541347 17389 solver.cpp:404]     Test net output #1: loss = 3.78579 (* 1 = 3.78579 loss)\nI0818 01:57:56.847852 17389 solver.cpp:228] Iteration 15900, loss = 0.243876\nI0818 01:57:56.847896 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 01:57:56.847919 17389 solver.cpp:244]     Train net output #1: loss = 0.243876 (* 1 = 0.243876 loss)\nI0818 01:57:56.940713 17389 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0818 02:00:15.225162 17389 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 02:01:35.850154 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38776\nI0818 02:01:35.850414 17389 solver.cpp:404]     Test net output #1: loss = 4.02983 (* 1 = 4.02983 loss)\nI0818 02:01:37.157390 17389 solver.cpp:228] Iteration 16000, loss = 0.20368\nI0818 02:01:37.157435 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 02:01:37.157459 17389 solver.cpp:244]     Train net output #1: loss = 0.20368 (* 1 = 0.20368 loss)\nI0818 02:01:37.256750 17389 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0818 02:03:55.491859 17389 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 02:05:16.142683 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41504\nI0818 02:05:16.142961 17389 solver.cpp:404]     Test net output #1: loss = 3.98221 (* 1 = 3.98221 loss)\nI0818 02:05:17.449811 17389 solver.cpp:228] Iteration 16100, loss = 0.286627\nI0818 02:05:17.449856 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 02:05:17.449879 17389 solver.cpp:244]     Train net output #1: loss = 0.286627 (* 1 = 0.286627 loss)\nI0818 02:05:17.545589 17389 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0818 02:07:35.958530 17389 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 02:08:56.581178 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39756\nI0818 02:08:56.581452 17389 solver.cpp:404]     Test net output #1: loss = 4.37105 (* 1 = 4.37105 loss)\nI0818 02:08:57.887212 17389 solver.cpp:228] Iteration 16200, loss = 0.241619\nI0818 02:08:57.887256 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 02:08:57.887281 17389 solver.cpp:244]     Train net output #1: loss = 0.241619 (* 1 = 0.241619 loss)\nI0818 02:08:57.981446 17389 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0818 02:11:16.253968 17389 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 02:12:36.959596 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39628\nI0818 02:12:36.959877 17389 solver.cpp:404]     Test net output #1: loss = 4.45076 (* 1 = 4.45076 loss)\nI0818 02:12:38.266384 17389 solver.cpp:228] Iteration 16300, loss = 0.181374\nI0818 02:12:38.266432 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 02:12:38.266454 17389 solver.cpp:244]     Train net output #1: loss = 0.181374 (* 1 = 0.181374 loss)\nI0818 02:12:38.361850 17389 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0818 02:14:56.626269 17389 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 02:16:17.300724 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40132\nI0818 02:16:17.301007 17389 solver.cpp:404]     Test net output #1: loss = 4.3678 (* 1 = 4.3678 loss)\nI0818 02:16:18.607851 17389 solver.cpp:228] Iteration 16400, loss = 0.332582\nI0818 02:16:18.607897 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 02:16:18.607921 17389 solver.cpp:244]     Train net output #1: loss = 0.332582 (* 1 = 0.332582 loss)\nI0818 02:16:18.701165 17389 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0818 02:18:36.987450 17389 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0818 02:19:57.610496 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36424\nI0818 02:19:57.610775 17389 solver.cpp:404]     Test net output #1: loss = 4.96402 (* 1 = 4.96402 loss)\nI0818 02:19:58.917794 17389 solver.cpp:228] Iteration 16500, loss = 0.265352\nI0818 02:19:58.917840 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 02:19:58.917863 17389 solver.cpp:244]     Train net output #1: loss = 0.265352 (* 1 = 0.265352 loss)\nI0818 02:19:59.017248 17389 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0818 02:22:17.342969 17389 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0818 02:23:38.040729 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38312\nI0818 02:23:38.041007 17389 solver.cpp:404]     Test net output #1: loss = 4.25543 (* 1 = 4.25543 loss)\nI0818 02:23:39.347448 17389 solver.cpp:228] Iteration 16600, loss = 0.220556\nI0818 02:23:39.347494 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 02:23:39.347519 17389 solver.cpp:244]     Train net output #1: loss = 0.220556 (* 1 = 0.220556 loss)\nI0818 02:23:39.440897 17389 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0818 02:25:57.813450 17389 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0818 02:27:18.433195 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38968\nI0818 02:27:18.433467 17389 solver.cpp:404]     Test net output #1: loss = 4.12968 (* 1 = 4.12968 loss)\nI0818 02:27:19.739981 17389 solver.cpp:228] Iteration 16700, loss = 0.19206\nI0818 02:27:19.740028 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 02:27:19.740052 17389 solver.cpp:244]     Train net output #1: loss = 0.19206 (* 1 = 0.19206 loss)\nI0818 02:27:19.834074 17389 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0818 02:29:38.258285 17389 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0818 02:30:58.873448 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41168\nI0818 02:30:58.873714 17389 solver.cpp:404]     Test net output #1: loss = 3.85915 (* 1 = 3.85915 loss)\nI0818 02:31:00.180064 17389 solver.cpp:228] Iteration 16800, loss = 0.347933\nI0818 02:31:00.180110 17389 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0818 02:31:00.180135 17389 solver.cpp:244]     Train net output #1: loss = 0.347933 (* 1 = 0.347933 loss)\nI0818 02:31:00.271937 17389 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0818 02:33:18.642334 17389 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0818 02:34:39.344102 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34496\nI0818 02:34:39.344363 17389 solver.cpp:404]     Test net output #1: loss = 4.93636 (* 1 = 4.93636 loss)\nI0818 02:34:40.651736 17389 solver.cpp:228] Iteration 16900, loss = 0.393494\nI0818 02:34:40.651788 17389 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0818 02:34:40.651813 17389 solver.cpp:244]     Train net output #1: loss = 0.393494 (* 1 = 0.393494 loss)\nI0818 02:34:40.749251 17389 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0818 02:36:59.267084 17389 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0818 02:38:19.960431 17389 solver.cpp:404]     Test net output #0: accuracy = 0.375\nI0818 02:38:19.960707 17389 solver.cpp:404]     Test net output #1: loss = 4.78638 (* 1 = 4.78638 loss)\nI0818 02:38:21.267716 17389 solver.cpp:228] Iteration 17000, loss = 0.308825\nI0818 02:38:21.267762 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 02:38:21.267792 17389 solver.cpp:244]     Train net output #1: loss = 0.308825 (* 1 = 0.308825 loss)\nI0818 02:38:21.363075 17389 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0818 02:40:39.751329 17389 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0818 02:42:00.428367 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39404\nI0818 02:42:00.428625 17389 solver.cpp:404]     Test net output #1: loss = 4.19884 (* 1 = 4.19884 loss)\nI0818 02:42:01.739385 17389 solver.cpp:228] Iteration 17100, loss = 0.183652\nI0818 02:42:01.739429 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 02:42:01.739454 17389 solver.cpp:244]     Train net output #1: loss = 0.183651 (* 1 = 0.183651 loss)\nI0818 02:42:01.829041 17389 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0818 02:44:20.184206 17389 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0818 02:45:40.796254 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4134\nI0818 02:45:40.796528 17389 solver.cpp:404]     Test net output #1: loss = 3.92391 (* 1 = 3.92391 loss)\nI0818 02:45:42.107255 17389 solver.cpp:228] Iteration 17200, loss = 0.23375\nI0818 02:45:42.107303 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 02:45:42.107328 17389 solver.cpp:244]     Train net output #1: loss = 0.233749 (* 1 = 0.233749 loss)\nI0818 02:45:42.197391 17389 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0818 02:48:00.595374 17389 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0818 02:49:21.204525 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38016\nI0818 02:49:21.204790 17389 solver.cpp:404]     Test net output #1: loss = 4.68044 (* 1 = 4.68044 loss)\nI0818 02:49:22.514397 17389 solver.cpp:228] Iteration 17300, loss = 0.389659\nI0818 02:49:22.514444 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 02:49:22.514470 17389 solver.cpp:244]     Train net output #1: loss = 0.389659 (* 1 = 0.389659 loss)\nI0818 02:49:22.608038 17389 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0818 02:51:40.952723 17389 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0818 02:53:01.616219 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39536\nI0818 02:53:01.616492 17389 solver.cpp:404]     Test net output #1: loss = 4.41714 (* 1 = 4.41714 loss)\nI0818 02:53:02.926839 17389 solver.cpp:228] Iteration 17400, loss = 0.304233\nI0818 02:53:02.926885 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 02:53:02.926910 17389 solver.cpp:244]     Train net output #1: loss = 0.304233 (* 1 = 0.304233 loss)\nI0818 02:53:03.021062 17389 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0818 02:55:21.401556 17389 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0818 02:56:42.082051 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3794\nI0818 02:56:42.082301 17389 solver.cpp:404]     Test net output #1: loss = 4.47174 (* 1 = 4.47174 loss)\nI0818 02:56:43.392484 17389 solver.cpp:228] Iteration 17500, loss = 0.29082\nI0818 02:56:43.392531 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 02:56:43.392555 17389 solver.cpp:244]     Train net output #1: loss = 0.29082 (* 1 = 0.29082 loss)\nI0818 02:56:43.483954 17389 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0818 02:59:01.875306 17389 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0818 03:00:22.495026 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38668\nI0818 03:00:22.495295 17389 solver.cpp:404]     Test net output #1: loss = 4.53475 (* 1 = 4.53475 loss)\nI0818 03:00:23.806092 17389 solver.cpp:228] Iteration 17600, loss = 0.130417\nI0818 03:00:23.806138 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:00:23.806161 17389 solver.cpp:244]     Train net output #1: loss = 0.130417 (* 1 = 0.130417 loss)\nI0818 03:00:23.895947 17389 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0818 03:02:42.292356 17389 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0818 03:04:02.901993 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42016\nI0818 03:04:02.902242 17389 solver.cpp:404]     Test net output #1: loss = 3.70241 (* 1 = 3.70241 loss)\nI0818 03:04:04.211799 17389 solver.cpp:228] Iteration 17700, loss = 0.247642\nI0818 03:04:04.211843 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 03:04:04.211859 17389 solver.cpp:244]     Train net output #1: loss = 0.247642 (* 1 = 0.247642 loss)\nI0818 03:04:04.305171 17389 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0818 03:06:22.906764 17389 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0818 03:07:43.524354 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42032\nI0818 03:07:43.524607 17389 solver.cpp:404]     Test net output #1: loss = 3.76209 (* 1 = 3.76209 loss)\nI0818 03:07:44.834169 17389 solver.cpp:228] Iteration 17800, loss = 0.245694\nI0818 03:07:44.834213 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 03:07:44.834228 17389 solver.cpp:244]     Train net output #1: loss = 0.245694 (* 1 = 0.245694 loss)\nI0818 03:07:44.923270 17389 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0818 03:10:03.285720 17389 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0818 03:11:23.907933 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39744\nI0818 03:11:23.908179 17389 solver.cpp:404]     Test net output #1: loss = 3.81157 (* 1 = 3.81157 loss)\nI0818 03:11:25.217706 17389 solver.cpp:228] Iteration 17900, loss = 0.247538\nI0818 03:11:25.217749 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 03:11:25.217767 17389 solver.cpp:244]     Train net output #1: loss = 0.247537 (* 1 = 0.247537 loss)\nI0818 03:11:25.308691 17389 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0818 03:13:43.648283 17389 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0818 03:15:04.263478 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4156\nI0818 03:15:04.263731 17389 solver.cpp:404]     Test net output #1: loss = 3.86577 (* 1 = 3.86577 loss)\nI0818 03:15:05.571995 17389 solver.cpp:228] Iteration 18000, loss = 0.248668\nI0818 03:15:05.572037 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 03:15:05.572053 17389 solver.cpp:244]     Train net output #1: loss = 0.248668 (* 1 = 0.248668 loss)\nI0818 03:15:05.670356 17389 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0818 03:17:23.932237 17389 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0818 03:18:44.542543 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4288\nI0818 03:18:44.542807 17389 solver.cpp:404]     Test net output #1: loss = 3.5441 (* 1 = 3.5441 loss)\nI0818 03:18:45.852434 17389 solver.cpp:228] Iteration 18100, loss = 0.166919\nI0818 03:18:45.852478 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 03:18:45.852494 17389 solver.cpp:244]     Train net output #1: loss = 0.166918 (* 1 = 0.166918 loss)\nI0818 03:18:45.948400 17389 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0818 03:21:04.417513 17389 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0818 03:22:25.032281 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42236\nI0818 03:22:25.032546 17389 solver.cpp:404]     Test net output #1: loss = 3.80917 (* 1 = 3.80917 loss)\nI0818 03:22:26.341688 17389 solver.cpp:228] Iteration 18200, loss = 0.156146\nI0818 03:22:26.341732 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 03:22:26.341749 17389 solver.cpp:244]     Train net output #1: loss = 0.156146 (* 1 = 0.156146 loss)\nI0818 03:22:26.436647 17389 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0818 03:24:44.838615 17389 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0818 03:26:05.456406 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43004\nI0818 03:26:05.456657 17389 solver.cpp:404]     Test net output #1: loss = 3.74568 (* 1 = 3.74568 loss)\nI0818 03:26:06.765473 17389 solver.cpp:228] Iteration 18300, loss = 0.222276\nI0818 03:26:06.765516 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 03:26:06.765532 17389 solver.cpp:244]     Train net output #1: loss = 0.222276 (* 1 = 0.222276 loss)\nI0818 03:26:06.857236 17389 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0818 03:28:25.136188 17389 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0818 03:29:45.743924 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38672\nI0818 03:29:45.744180 17389 solver.cpp:404]     Test net output #1: loss = 4.24512 (* 1 = 4.24512 loss)\nI0818 03:29:47.054383 17389 solver.cpp:228] Iteration 18400, loss = 0.216194\nI0818 03:29:47.054427 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 03:29:47.054443 17389 solver.cpp:244]     Train net output #1: loss = 0.216194 (* 1 = 0.216194 loss)\nI0818 03:29:47.143075 17389 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0818 03:32:05.464711 17389 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0818 03:33:26.084511 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41772\nI0818 03:33:26.084784 17389 solver.cpp:404]     Test net output #1: loss = 3.91088 (* 1 = 3.91088 loss)\nI0818 03:33:27.394613 17389 solver.cpp:228] Iteration 18500, loss = 0.300636\nI0818 03:33:27.394656 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 03:33:27.394672 17389 solver.cpp:244]     Train net output #1: loss = 0.300636 (* 1 = 0.300636 loss)\nI0818 03:33:27.486281 17389 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0818 03:35:45.851873 17389 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0818 03:37:06.458798 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40272\nI0818 03:37:06.459074 17389 solver.cpp:404]     Test net output #1: loss = 4.08072 (* 1 = 4.08072 loss)\nI0818 03:37:07.769201 17389 solver.cpp:228] Iteration 18600, loss = 0.134326\nI0818 03:37:07.769243 17389 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:37:07.769259 17389 solver.cpp:244]     Train net output #1: loss = 0.134326 (* 1 = 0.134326 loss)\nI0818 03:37:07.862651 17389 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0818 03:39:26.266609 17389 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0818 03:40:46.896486 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41728\nI0818 03:40:46.896744 17389 solver.cpp:404]     Test net output #1: loss = 3.99696 (* 1 = 3.99696 loss)\nI0818 03:40:48.206755 17389 solver.cpp:228] Iteration 18700, loss = 0.331505\nI0818 03:40:48.206799 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 03:40:48.206815 17389 solver.cpp:244]     Train net output #1: loss = 0.331504 (* 1 = 0.331504 loss)\nI0818 03:40:48.300712 17389 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0818 03:43:06.828559 17389 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0818 03:44:27.453655 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42176\nI0818 03:44:27.453904 17389 solver.cpp:404]     Test net output #1: loss = 3.88166 (* 1 = 3.88166 loss)\nI0818 03:44:28.764653 17389 solver.cpp:228] Iteration 18800, loss = 0.266347\nI0818 03:44:28.764696 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 03:44:28.764714 17389 solver.cpp:244]     Train net output #1: loss = 0.266347 (* 1 = 0.266347 loss)\nI0818 03:44:28.857435 17389 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0818 03:46:47.203490 17389 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0818 03:48:07.831339 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39648\nI0818 03:48:07.831614 17389 solver.cpp:404]     Test net output #1: loss = 4.03603 (* 1 = 4.03603 loss)\nI0818 03:48:09.141189 17389 solver.cpp:228] Iteration 18900, loss = 0.238578\nI0818 03:48:09.141232 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 03:48:09.141248 17389 solver.cpp:244]     Train net output #1: loss = 0.238578 (* 1 = 0.238578 loss)\nI0818 03:48:09.238698 17389 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0818 03:50:27.605934 17389 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0818 03:51:48.232489 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40064\nI0818 03:51:48.232745 17389 solver.cpp:404]     Test net output #1: loss = 4.01812 (* 1 = 4.01812 loss)\nI0818 03:51:49.543280 17389 solver.cpp:228] Iteration 19000, loss = 0.140887\nI0818 03:51:49.543324 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 03:51:49.543340 17389 solver.cpp:244]     Train net output #1: loss = 0.140887 (* 1 = 0.140887 loss)\nI0818 03:51:49.641000 17389 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0818 03:54:08.027650 17389 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0818 03:55:28.645862 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41124\nI0818 03:55:28.646132 17389 solver.cpp:404]     Test net output #1: loss = 4.03432 (* 1 = 4.03432 loss)\nI0818 03:55:29.954618 17389 solver.cpp:228] Iteration 19100, loss = 0.243866\nI0818 03:55:29.954663 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 03:55:29.954679 17389 solver.cpp:244]     Train net output #1: loss = 0.243865 (* 1 = 0.243865 loss)\nI0818 03:55:30.051733 17389 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0818 03:57:48.394666 17389 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0818 03:59:09.021597 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40632\nI0818 03:59:09.021857 17389 solver.cpp:404]     Test net output #1: loss = 3.8872 (* 1 = 3.8872 loss)\nI0818 03:59:10.331740 17389 solver.cpp:228] Iteration 19200, loss = 0.267343\nI0818 03:59:10.331784 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 03:59:10.331799 17389 solver.cpp:244]     Train net output #1: loss = 0.267343 (* 1 = 0.267343 loss)\nI0818 03:59:10.423090 17389 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0818 04:01:28.958286 17389 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0818 04:02:49.575865 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37992\nI0818 04:02:49.576124 17389 solver.cpp:404]     Test net output #1: loss = 3.99261 (* 1 = 3.99261 loss)\nI0818 04:02:50.885913 17389 solver.cpp:228] Iteration 19300, loss = 0.288334\nI0818 04:02:50.885957 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 04:02:50.885974 17389 solver.cpp:244]     Train net output #1: loss = 0.288333 (* 1 = 0.288333 loss)\nI0818 04:02:50.977105 17389 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0818 04:05:09.360241 17389 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0818 04:06:29.980943 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4422\nI0818 04:06:29.981211 17389 solver.cpp:404]     Test net output #1: loss = 3.39687 (* 1 = 3.39687 loss)\nI0818 04:06:31.291175 17389 solver.cpp:228] Iteration 19400, loss = 0.20213\nI0818 04:06:31.291218 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 04:06:31.291232 17389 solver.cpp:244]     Train net output #1: loss = 0.20213 (* 1 = 0.20213 loss)\nI0818 04:06:31.381409 17389 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0818 04:08:49.750787 17389 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0818 04:10:10.374516 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38624\nI0818 04:10:10.374799 17389 solver.cpp:404]     Test net output #1: loss = 4.62437 (* 1 = 4.62437 loss)\nI0818 04:10:11.684828 17389 solver.cpp:228] Iteration 19500, loss = 0.246035\nI0818 04:10:11.684869 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 04:10:11.684885 17389 solver.cpp:244]     Train net output #1: loss = 0.246035 (* 1 = 0.246035 loss)\nI0818 04:10:11.776685 17389 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0818 04:12:30.143539 17389 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0818 04:13:50.762945 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43356\nI0818 04:13:50.763216 17389 solver.cpp:404]     Test net output #1: loss = 3.60776 (* 1 = 3.60776 loss)\nI0818 04:13:52.073819 17389 solver.cpp:228] Iteration 19600, loss = 0.348803\nI0818 04:13:52.073861 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 04:13:52.073879 17389 solver.cpp:244]     Train net output #1: loss = 0.348803 (* 1 = 0.348803 loss)\nI0818 04:13:52.167737 17389 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0818 04:16:10.540866 17389 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0818 04:17:31.163952 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42152\nI0818 04:17:31.164221 17389 solver.cpp:404]     Test net output #1: loss = 3.98572 (* 1 = 3.98572 loss)\nI0818 04:17:32.474218 17389 solver.cpp:228] Iteration 19700, loss = 0.209203\nI0818 04:17:32.474261 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 04:17:32.474277 17389 solver.cpp:244]     Train net output #1: loss = 0.209202 (* 1 = 0.209202 loss)\nI0818 04:17:32.567838 17389 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0818 04:19:50.923094 17389 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0818 04:21:11.540557 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41232\nI0818 04:21:11.540830 17389 solver.cpp:404]     Test net output #1: loss = 4.07739 (* 1 = 4.07739 loss)\nI0818 04:21:12.850855 17389 solver.cpp:228] Iteration 19800, loss = 0.339504\nI0818 04:21:12.850896 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 04:21:12.850913 17389 solver.cpp:244]     Train net output #1: loss = 0.339503 (* 1 = 0.339503 loss)\nI0818 04:21:12.947917 17389 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0818 04:23:31.489629 17389 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0818 04:24:52.105453 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3796\nI0818 04:24:52.105732 17389 solver.cpp:404]     Test net output #1: loss = 4.19733 (* 1 = 4.19733 loss)\nI0818 04:24:53.415247 17389 solver.cpp:228] Iteration 19900, loss = 0.285722\nI0818 04:24:53.415287 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 04:24:53.415303 17389 solver.cpp:244]     Train net output #1: loss = 0.285722 (* 1 = 0.285722 loss)\nI0818 04:24:53.514819 17389 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0818 04:27:11.851891 17389 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0818 04:28:32.462379 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40768\nI0818 04:28:32.462635 17389 solver.cpp:404]     Test net output #1: loss = 3.61676 (* 1 = 3.61676 loss)\nI0818 04:28:33.772317 17389 solver.cpp:228] Iteration 20000, loss = 0.171681\nI0818 04:28:33.772357 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 04:28:33.772373 17389 solver.cpp:244]     Train net output #1: loss = 0.17168 (* 1 = 0.17168 loss)\nI0818 04:28:33.862493 17389 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0818 04:30:52.264917 17389 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0818 04:32:12.889768 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42628\nI0818 04:32:12.890048 17389 solver.cpp:404]     Test net output #1: loss = 3.66043 (* 1 = 3.66043 loss)\nI0818 04:32:14.200335 17389 solver.cpp:228] Iteration 20100, loss = 0.203805\nI0818 04:32:14.200378 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 04:32:14.200393 17389 solver.cpp:244]     Train net output #1: loss = 0.203805 (* 1 = 0.203805 loss)\nI0818 04:32:14.287454 17389 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0818 04:34:32.637817 17389 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0818 04:35:53.266207 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43476\nI0818 04:35:53.266458 17389 solver.cpp:404]     Test net output #1: loss = 3.63431 (* 1 = 3.63431 loss)\nI0818 04:35:54.576218 17389 solver.cpp:228] Iteration 20200, loss = 0.213803\nI0818 04:35:54.576261 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 04:35:54.576277 17389 solver.cpp:244]     Train net output #1: loss = 0.213803 (* 1 = 0.213803 loss)\nI0818 04:35:54.667554 17389 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0818 04:38:13.173501 17389 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0818 04:39:33.798243 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42832\nI0818 04:39:33.798496 17389 solver.cpp:404]     Test net output #1: loss = 3.65571 (* 1 = 3.65571 loss)\nI0818 04:39:35.106775 17389 solver.cpp:228] Iteration 20300, loss = 0.300255\nI0818 04:39:35.106817 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 04:39:35.106832 17389 solver.cpp:244]     Train net output #1: loss = 0.300255 (* 1 = 0.300255 loss)\nI0818 04:39:35.202500 17389 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0818 04:41:53.667359 17389 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0818 04:43:14.289034 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43344\nI0818 04:43:14.289288 17389 solver.cpp:404]     Test net output #1: loss = 3.68083 (* 1 = 3.68083 loss)\nI0818 04:43:15.595655 17389 solver.cpp:228] Iteration 20400, loss = 0.232014\nI0818 04:43:15.595698 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 04:43:15.595715 17389 solver.cpp:244]     Train net output #1: loss = 0.232013 (* 1 = 0.232013 loss)\nI0818 04:43:15.688022 17389 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0818 04:45:34.100649 17389 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0818 04:46:54.721601 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3864\nI0818 04:46:54.721853 17389 solver.cpp:404]     Test net output #1: loss = 4.35814 (* 1 = 4.35814 loss)\nI0818 04:46:56.028148 17389 solver.cpp:228] Iteration 20500, loss = 0.154482\nI0818 04:46:56.028190 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 04:46:56.028208 17389 solver.cpp:244]     Train net output #1: loss = 0.154482 (* 1 = 0.154482 loss)\nI0818 04:46:56.131314 17389 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0818 04:49:14.504529 17389 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0818 04:50:35.130244 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40796\nI0818 04:50:35.130504 17389 solver.cpp:404]     Test net output #1: loss = 3.78366 (* 1 = 3.78366 loss)\nI0818 04:50:36.436779 17389 solver.cpp:228] Iteration 20600, loss = 0.293468\nI0818 04:50:36.436822 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 04:50:36.436839 17389 solver.cpp:244]     Train net output #1: loss = 0.293468 (* 1 = 0.293468 loss)\nI0818 04:50:36.528702 17389 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0818 04:52:54.873095 17389 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0818 04:54:15.498227 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39408\nI0818 04:54:15.498481 17389 solver.cpp:404]     Test net output #1: loss = 4.21078 (* 1 = 4.21078 loss)\nI0818 04:54:16.805212 17389 solver.cpp:228] Iteration 20700, loss = 0.298928\nI0818 04:54:16.805255 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 04:54:16.805272 17389 solver.cpp:244]     Train net output #1: loss = 0.298928 (* 1 = 0.298928 loss)\nI0818 04:54:16.900398 17389 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0818 04:56:35.370653 17389 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0818 04:57:55.994290 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41036\nI0818 04:57:55.994547 17389 solver.cpp:404]     Test net output #1: loss = 4.07047 (* 1 = 4.07047 loss)\nI0818 04:57:57.301118 17389 solver.cpp:228] Iteration 20800, loss = 0.313537\nI0818 04:57:57.301162 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 04:57:57.301178 17389 solver.cpp:244]     Train net output #1: loss = 0.313537 (* 1 = 0.313537 loss)\nI0818 04:57:57.403904 17389 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0818 05:00:15.825398 17389 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0818 05:01:36.447923 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3866\nI0818 05:01:36.448182 17389 solver.cpp:404]     Test net output #1: loss = 4.45497 (* 1 = 4.45497 loss)\nI0818 05:01:37.755895 17389 solver.cpp:228] Iteration 20900, loss = 0.222038\nI0818 05:01:37.755939 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 05:01:37.755954 17389 solver.cpp:244]     Train net output #1: loss = 0.222038 (* 1 = 0.222038 loss)\nI0818 05:01:37.853528 17389 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0818 05:03:56.235424 17389 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0818 05:05:16.855559 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39468\nI0818 05:05:16.855821 17389 solver.cpp:404]     Test net output #1: loss = 4.32658 (* 1 = 4.32658 loss)\nI0818 05:05:18.162770 17389 solver.cpp:228] Iteration 21000, loss = 0.304424\nI0818 05:05:18.162811 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 05:05:18.162827 17389 solver.cpp:244]     Train net output #1: loss = 0.304424 (* 1 = 0.304424 loss)\nI0818 05:05:18.261497 17389 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0818 05:07:36.687630 17389 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0818 05:08:57.304396 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40732\nI0818 05:08:57.304651 17389 solver.cpp:404]     Test net output #1: loss = 3.85393 (* 1 = 3.85393 loss)\nI0818 05:08:58.610589 17389 solver.cpp:228] Iteration 21100, loss = 0.221106\nI0818 05:08:58.610630 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 05:08:58.610646 17389 solver.cpp:244]     Train net output #1: loss = 0.221106 (* 1 = 0.221106 loss)\nI0818 05:08:58.706984 17389 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0818 05:11:17.103309 17389 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0818 05:12:37.724716 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38804\nI0818 05:12:37.725004 17389 solver.cpp:404]     Test net output #1: loss = 4.23267 (* 1 = 4.23267 loss)\nI0818 05:12:39.031404 17389 solver.cpp:228] Iteration 21200, loss = 0.215516\nI0818 05:12:39.031448 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 05:12:39.031463 17389 solver.cpp:244]     Train net output #1: loss = 0.215516 (* 1 = 0.215516 loss)\nI0818 05:12:39.127585 17389 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0818 05:14:57.548812 17389 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0818 05:16:18.175377 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37872\nI0818 05:16:18.175631 17389 solver.cpp:404]     Test net output #1: loss = 4.37445 (* 1 = 4.37445 loss)\nI0818 05:16:19.482450 17389 solver.cpp:228] Iteration 21300, loss = 0.191882\nI0818 05:16:19.482492 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 05:16:19.482508 17389 solver.cpp:244]     Train net output #1: loss = 0.191882 (* 1 = 0.191882 loss)\nI0818 05:16:19.576392 17389 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0818 05:18:37.961848 17389 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0818 05:19:58.590260 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4062\nI0818 05:19:58.590512 17389 solver.cpp:404]     Test net output #1: loss = 3.68688 (* 1 = 3.68688 loss)\nI0818 05:19:59.896813 17389 solver.cpp:228] Iteration 21400, loss = 0.231728\nI0818 05:19:59.896857 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 05:19:59.896872 17389 solver.cpp:244]     Train net output #1: loss = 0.231727 (* 1 = 0.231727 loss)\nI0818 05:19:59.990100 17389 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0818 05:22:18.354164 17389 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0818 05:23:38.969532 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40832\nI0818 05:23:38.969828 17389 solver.cpp:404]     Test net output #1: loss = 3.82459 (* 1 = 3.82459 loss)\nI0818 05:23:40.278754 17389 solver.cpp:228] Iteration 21500, loss = 0.177997\nI0818 05:23:40.278816 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 05:23:40.278841 17389 solver.cpp:244]     Train net output #1: loss = 0.177997 (* 1 = 0.177997 loss)\nI0818 05:23:40.371531 17389 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0818 05:25:58.938619 17389 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0818 05:27:20.023067 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41456\nI0818 05:27:20.023329 17389 solver.cpp:404]     Test net output #1: loss = 3.85076 (* 1 = 3.85076 loss)\nI0818 05:27:21.333729 17389 solver.cpp:228] Iteration 21600, loss = 0.216939\nI0818 05:27:21.333788 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 05:27:21.333806 17389 solver.cpp:244]     Train net output #1: loss = 0.216938 (* 1 = 0.216938 loss)\nI0818 05:27:21.427455 17389 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0818 05:29:39.958137 17389 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0818 05:31:01.406942 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41512\nI0818 05:31:01.407204 17389 solver.cpp:404]     Test net output #1: loss = 3.72434 (* 1 = 3.72434 loss)\nI0818 05:31:02.717542 17389 solver.cpp:228] Iteration 21700, loss = 0.24088\nI0818 05:31:02.717586 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 05:31:02.717608 17389 solver.cpp:244]     Train net output #1: loss = 0.24088 (* 1 = 0.24088 loss)\nI0818 05:31:02.812705 17389 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0818 05:33:21.426520 17389 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0818 05:34:42.874761 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38264\nI0818 05:34:42.875020 17389 solver.cpp:404]     Test net output #1: loss = 4.24524 (* 1 = 4.24524 loss)\nI0818 05:34:44.186290 17389 solver.cpp:228] Iteration 21800, loss = 0.266613\nI0818 05:34:44.186352 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 05:34:44.186378 17389 solver.cpp:244]     Train net output #1: loss = 0.266613 (* 1 = 0.266613 loss)\nI0818 05:34:44.277160 17389 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0818 05:37:02.848175 17389 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0818 05:38:24.297211 17389 solver.cpp:404]     Test net output #0: accuracy = 0.34112\nI0818 05:38:24.297451 17389 solver.cpp:404]     Test net output #1: loss = 5.11199 (* 1 = 5.11199 loss)\nI0818 05:38:25.609133 17389 solver.cpp:228] Iteration 21900, loss = 0.384066\nI0818 05:38:25.609179 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 05:38:25.609202 17389 solver.cpp:244]     Train net output #1: loss = 0.384065 (* 1 = 0.384065 loss)\nI0818 05:38:25.697921 17389 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0818 05:40:44.229075 17389 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0818 05:42:05.513228 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44252\nI0818 05:42:05.513473 17389 solver.cpp:404]     Test net output #1: loss = 3.70404 (* 1 = 3.70404 loss)\nI0818 05:42:06.825109 17389 solver.cpp:228] Iteration 22000, loss = 0.171194\nI0818 05:42:06.825166 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 05:42:06.825183 17389 solver.cpp:244]     Train net output #1: loss = 0.171194 (* 1 = 0.171194 loss)\nI0818 05:42:06.916419 17389 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0818 05:44:25.455724 17389 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0818 05:45:46.569502 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38164\nI0818 05:45:46.569738 17389 solver.cpp:404]     Test net output #1: loss = 4.14046 (* 1 = 4.14046 loss)\nI0818 05:45:47.881449 17389 solver.cpp:228] Iteration 22100, loss = 0.204469\nI0818 05:45:47.881508 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 05:45:47.881526 17389 solver.cpp:244]     Train net output #1: loss = 0.204469 (* 1 = 0.204469 loss)\nI0818 05:45:47.977620 17389 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0818 05:48:06.605556 17389 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0818 05:49:27.772264 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39696\nI0818 05:49:27.772493 17389 solver.cpp:404]     Test net output #1: loss = 4.13337 (* 1 = 4.13337 loss)\nI0818 05:49:29.084025 17389 solver.cpp:228] Iteration 22200, loss = 0.252082\nI0818 05:49:29.084082 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 05:49:29.084101 17389 solver.cpp:244]     Train net output #1: loss = 0.252082 (* 1 = 0.252082 loss)\nI0818 05:49:29.172919 17389 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0818 05:51:47.789474 17389 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0818 05:53:08.976615 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40356\nI0818 05:53:08.976879 17389 solver.cpp:404]     Test net output #1: loss = 4.13764 (* 1 = 4.13764 loss)\nI0818 05:53:10.288337 17389 solver.cpp:228] Iteration 22300, loss = 0.189298\nI0818 05:53:10.288396 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 05:53:10.288414 17389 solver.cpp:244]     Train net output #1: loss = 0.189298 (* 1 = 0.189298 loss)\nI0818 05:53:10.378327 17389 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0818 05:55:28.922272 17389 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0818 05:56:49.981376 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38624\nI0818 05:56:49.981596 17389 solver.cpp:404]     Test net output #1: loss = 4.20399 (* 1 = 4.20399 loss)\nI0818 05:56:51.292738 17389 solver.cpp:228] Iteration 22400, loss = 0.287152\nI0818 05:56:51.292796 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 05:56:51.292814 17389 solver.cpp:244]     Train net output #1: loss = 0.287152 (* 1 = 0.287152 loss)\nI0818 05:56:51.387871 17389 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0818 05:59:09.946410 17389 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0818 06:00:31.100389 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42436\nI0818 06:00:31.100607 17389 solver.cpp:404]     Test net output #1: loss = 3.56367 (* 1 = 3.56367 loss)\nI0818 06:00:32.411967 17389 solver.cpp:228] Iteration 22500, loss = 0.320225\nI0818 06:00:32.412025 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 06:00:32.412044 17389 solver.cpp:244]     Train net output #1: loss = 0.320225 (* 1 = 0.320225 loss)\nI0818 06:00:32.504690 17389 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0818 06:02:51.032402 17389 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0818 06:04:12.218896 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4214\nI0818 06:04:12.219154 17389 solver.cpp:404]     Test net output #1: loss = 3.81649 (* 1 = 3.81649 loss)\nI0818 06:04:13.528930 17389 solver.cpp:228] Iteration 22600, loss = 0.233053\nI0818 06:04:13.528986 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 06:04:13.529003 17389 solver.cpp:244]     Train net output #1: loss = 0.233053 (* 1 = 0.233053 loss)\nI0818 06:04:13.623369 17389 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0818 06:06:32.245612 17389 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0818 06:07:53.307101 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40736\nI0818 06:07:53.307334 17389 solver.cpp:404]     Test net output #1: loss = 3.90216 (* 1 = 3.90216 loss)\nI0818 06:07:54.617769 17389 solver.cpp:228] Iteration 22700, loss = 0.209984\nI0818 06:07:54.617827 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 06:07:54.617846 17389 solver.cpp:244]     Train net output #1: loss = 0.209983 (* 1 = 0.209983 loss)\nI0818 06:07:54.707859 17389 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0818 06:10:13.197311 17389 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0818 06:11:34.359104 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4196\nI0818 06:11:34.359411 17389 solver.cpp:404]     Test net output #1: loss = 3.70414 (* 1 = 3.70414 loss)\nI0818 06:11:35.669890 17389 solver.cpp:228] Iteration 22800, loss = 0.24179\nI0818 06:11:35.669931 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 06:11:35.669946 17389 solver.cpp:244]     Train net output #1: loss = 0.24179 (* 1 = 0.24179 loss)\nI0818 06:11:35.763280 17389 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0818 06:13:54.338948 17389 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0818 06:15:15.847827 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42816\nI0818 06:15:15.848165 17389 solver.cpp:404]     Test net output #1: loss = 3.66457 (* 1 = 3.66457 loss)\nI0818 06:15:17.158043 17389 solver.cpp:228] Iteration 22900, loss = 0.167922\nI0818 06:15:17.158099 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 06:15:17.158116 17389 solver.cpp:244]     Train net output #1: loss = 0.167922 (* 1 = 0.167922 loss)\nI0818 06:15:17.247795 17389 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0818 06:17:35.738785 17389 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0818 06:18:57.250697 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42356\nI0818 06:18:57.251029 17389 solver.cpp:404]     Test net output #1: loss = 3.79037 (* 1 = 3.79037 loss)\nI0818 06:18:58.560587 17389 solver.cpp:228] Iteration 23000, loss = 0.241809\nI0818 06:18:58.560628 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 06:18:58.560643 17389 solver.cpp:244]     Train net output #1: loss = 0.241808 (* 1 = 0.241808 loss)\nI0818 06:18:58.651823 17389 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0818 06:21:17.238306 17389 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0818 06:22:38.768736 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41232\nI0818 06:22:38.769050 17389 solver.cpp:404]     Test net output #1: loss = 3.81259 (* 1 = 3.81259 loss)\nI0818 06:22:40.078980 17389 solver.cpp:228] Iteration 23100, loss = 0.224827\nI0818 06:22:40.079036 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 06:22:40.079054 17389 solver.cpp:244]     Train net output #1: loss = 0.224827 (* 1 = 0.224827 loss)\nI0818 06:22:40.174681 17389 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0818 06:24:58.721174 17389 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0818 06:26:20.251652 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42456\nI0818 06:26:20.251962 17389 solver.cpp:404]     Test net output #1: loss = 3.55044 (* 1 = 3.55044 loss)\nI0818 06:26:21.562386 17389 solver.cpp:228] Iteration 23200, loss = 0.273345\nI0818 06:26:21.562429 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 06:26:21.562445 17389 solver.cpp:244]     Train net output #1: loss = 0.273345 (* 1 = 0.273345 loss)\nI0818 06:26:21.651605 17389 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0818 06:28:40.215523 17389 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0818 06:30:01.746861 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37928\nI0818 06:30:01.747145 17389 solver.cpp:404]     Test net output #1: loss = 4.24875 (* 1 = 4.24875 loss)\nI0818 06:30:03.057466 17389 solver.cpp:228] Iteration 23300, loss = 0.259938\nI0818 06:30:03.057507 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 06:30:03.057523 17389 solver.cpp:244]     Train net output #1: loss = 0.259938 (* 1 = 0.259938 loss)\nI0818 06:30:03.153172 17389 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0818 06:32:21.827405 17389 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0818 06:33:43.347185 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39892\nI0818 06:33:43.347508 17389 solver.cpp:404]     Test net output #1: loss = 4.03854 (* 1 = 4.03854 loss)\nI0818 06:33:44.657563 17389 solver.cpp:228] Iteration 23400, loss = 0.246333\nI0818 06:33:44.657620 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 06:33:44.657639 17389 solver.cpp:244]     Train net output #1: loss = 0.246333 (* 1 = 0.246333 loss)\nI0818 06:33:44.754005 17389 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0818 06:36:03.283831 17389 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0818 06:37:24.807678 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41408\nI0818 06:37:24.808009 17389 solver.cpp:404]     Test net output #1: loss = 3.76833 (* 1 = 3.76833 loss)\nI0818 06:37:26.118121 17389 solver.cpp:228] Iteration 23500, loss = 0.200125\nI0818 06:37:26.118178 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 06:37:26.118196 17389 solver.cpp:244]     Train net output #1: loss = 0.200124 (* 1 = 0.200124 loss)\nI0818 06:37:26.209820 17389 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0818 06:39:44.755014 17389 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0818 06:41:06.281246 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42696\nI0818 06:41:06.281546 17389 solver.cpp:404]     Test net output #1: loss = 3.68794 (* 1 = 3.68794 loss)\nI0818 06:41:07.592144 17389 solver.cpp:228] Iteration 23600, loss = 0.225878\nI0818 06:41:07.592202 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 06:41:07.592221 17389 solver.cpp:244]     Train net output #1: loss = 0.225877 (* 1 = 0.225877 loss)\nI0818 06:41:07.679432 17389 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0818 06:43:26.225793 17389 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0818 06:44:47.753522 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38216\nI0818 06:44:47.753837 17389 solver.cpp:404]     Test net output #1: loss = 4.49898 (* 1 = 4.49898 loss)\nI0818 06:44:49.064405 17389 solver.cpp:228] Iteration 23700, loss = 0.194177\nI0818 06:44:49.064461 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 06:44:49.064479 17389 solver.cpp:244]     Train net output #1: loss = 0.194177 (* 1 = 0.194177 loss)\nI0818 06:44:49.159091 17389 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0818 06:47:07.739411 17389 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0818 06:48:29.267498 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4172\nI0818 06:48:29.267804 17389 solver.cpp:404]     Test net output #1: loss = 3.93004 (* 1 = 3.93004 loss)\nI0818 06:48:30.577937 17389 solver.cpp:228] Iteration 23800, loss = 0.229737\nI0818 06:48:30.577996 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 06:48:30.578013 17389 solver.cpp:244]     Train net output #1: loss = 0.229737 (* 1 = 0.229737 loss)\nI0818 06:48:30.667037 17389 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0818 06:50:49.240226 17389 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0818 06:52:10.760861 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40104\nI0818 06:52:10.761147 17389 solver.cpp:404]     Test net output #1: loss = 4.04293 (* 1 = 4.04293 loss)\nI0818 06:52:12.071269 17389 solver.cpp:228] Iteration 23900, loss = 0.340302\nI0818 06:52:12.071326 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 06:52:12.071344 17389 solver.cpp:244]     Train net output #1: loss = 0.340301 (* 1 = 0.340301 loss)\nI0818 06:52:12.167488 17389 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0818 06:54:30.770540 17389 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0818 06:55:52.282960 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4062\nI0818 06:55:52.283269 17389 solver.cpp:404]     Test net output #1: loss = 3.81535 (* 1 = 3.81535 loss)\nI0818 06:55:53.593678 17389 solver.cpp:228] Iteration 24000, loss = 0.222543\nI0818 06:55:53.593740 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 06:55:53.593758 17389 solver.cpp:244]     Train net output #1: loss = 0.222543 (* 1 = 0.222543 loss)\nI0818 06:55:53.684741 17389 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0818 06:58:12.267341 17389 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0818 06:59:33.739910 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38292\nI0818 06:59:33.740237 17389 solver.cpp:404]     Test net output #1: loss = 4.37538 (* 1 = 4.37538 loss)\nI0818 06:59:35.050889 17389 solver.cpp:228] Iteration 24100, loss = 0.188538\nI0818 06:59:35.050932 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 06:59:35.050948 17389 solver.cpp:244]     Train net output #1: loss = 0.188538 (* 1 = 0.188538 loss)\nI0818 06:59:35.139081 17389 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0818 07:01:53.716634 17389 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0818 07:03:15.178225 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36316\nI0818 07:03:15.178562 17389 solver.cpp:404]     Test net output #1: loss = 4.33306 (* 1 = 4.33306 loss)\nI0818 07:03:16.488077 17389 solver.cpp:228] Iteration 24200, loss = 0.225913\nI0818 07:03:16.488137 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 07:03:16.488157 17389 solver.cpp:244]     Train net output #1: loss = 0.225913 (* 1 = 0.225913 loss)\nI0818 07:03:16.582520 17389 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0818 07:05:35.093502 17389 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0818 07:06:56.574388 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41412\nI0818 07:06:56.574712 17389 solver.cpp:404]     Test net output #1: loss = 3.97638 (* 1 = 3.97638 loss)\nI0818 07:06:57.884891 17389 solver.cpp:228] Iteration 24300, loss = 0.284347\nI0818 07:06:57.884948 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 07:06:57.884965 17389 solver.cpp:244]     Train net output #1: loss = 0.284346 (* 1 = 0.284346 loss)\nI0818 07:06:57.974154 17389 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0818 07:09:16.644093 17389 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0818 07:10:38.136850 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4006\nI0818 07:10:38.137168 17389 solver.cpp:404]     Test net output #1: loss = 4.03294 (* 1 = 4.03294 loss)\nI0818 07:10:39.447078 17389 solver.cpp:228] Iteration 24400, loss = 0.232022\nI0818 07:10:39.447130 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 07:10:39.447147 17389 solver.cpp:244]     Train net output #1: loss = 0.232022 (* 1 = 0.232022 loss)\nI0818 07:10:39.541468 17389 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0818 07:12:58.353895 17389 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0818 07:14:19.832695 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39824\nI0818 07:14:19.833026 17389 solver.cpp:404]     Test net output #1: loss = 3.90182 (* 1 = 3.90182 loss)\nI0818 07:14:21.143075 17389 solver.cpp:228] Iteration 24500, loss = 0.231328\nI0818 07:14:21.143127 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 07:14:21.143144 17389 solver.cpp:244]     Train net output #1: loss = 0.231328 (* 1 = 0.231328 loss)\nI0818 07:14:21.242343 17389 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0818 07:16:40.055768 17389 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0818 07:18:01.555411 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42996\nI0818 07:18:01.555727 17389 solver.cpp:404]     Test net output #1: loss = 3.73973 (* 1 = 3.73973 loss)\nI0818 07:18:02.865681 17389 solver.cpp:228] Iteration 24600, loss = 0.164806\nI0818 07:18:02.865736 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 07:18:02.865753 17389 solver.cpp:244]     Train net output #1: loss = 0.164806 (* 1 = 0.164806 loss)\nI0818 07:18:02.956512 17389 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0818 07:20:21.741256 17389 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0818 07:21:43.225437 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40244\nI0818 07:21:43.225767 17389 solver.cpp:404]     Test net output #1: loss = 4.09579 (* 1 = 4.09579 loss)\nI0818 07:21:44.536016 17389 solver.cpp:228] Iteration 24700, loss = 0.206153\nI0818 07:21:44.536070 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 07:21:44.536087 17389 solver.cpp:244]     Train net output #1: loss = 0.206153 (* 1 = 0.206153 loss)\nI0818 07:21:44.637102 17389 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0818 07:24:03.655211 17389 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0818 07:25:25.150460 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37288\nI0818 07:25:25.150775 17389 solver.cpp:404]     Test net output #1: loss = 4.73617 (* 1 = 4.73617 loss)\nI0818 07:25:26.460858 17389 solver.cpp:228] Iteration 24800, loss = 0.237946\nI0818 07:25:26.460912 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 07:25:26.460929 17389 solver.cpp:244]     Train net output #1: loss = 0.237946 (* 1 = 0.237946 loss)\nI0818 07:25:26.561440 17389 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0818 07:27:45.519829 17389 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0818 07:29:07.016856 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41192\nI0818 07:29:07.017191 17389 solver.cpp:404]     Test net output #1: loss = 3.85876 (* 1 = 3.85876 loss)\nI0818 07:29:08.327666 17389 solver.cpp:228] Iteration 24900, loss = 0.257124\nI0818 07:29:08.327702 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 07:29:08.327723 17389 solver.cpp:244]     Train net output #1: loss = 0.257123 (* 1 = 0.257123 loss)\nI0818 07:29:08.419672 17389 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0818 07:31:27.275312 17389 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0818 07:32:48.784207 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42656\nI0818 07:32:48.784544 17389 solver.cpp:404]     Test net output #1: loss = 3.9774 (* 1 = 3.9774 loss)\nI0818 07:32:50.094354 17389 solver.cpp:228] Iteration 25000, loss = 0.28231\nI0818 07:32:50.094408 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 07:32:50.094425 17389 solver.cpp:244]     Train net output #1: loss = 0.28231 (* 1 = 0.28231 loss)\nI0818 07:32:50.186288 17389 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0818 07:35:09.047629 17389 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0818 07:36:30.546448 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3736\nI0818 07:36:30.546773 17389 solver.cpp:404]     Test net output #1: loss = 4.50619 (* 1 = 4.50619 loss)\nI0818 07:36:31.856601 17389 solver.cpp:228] Iteration 25100, loss = 0.206651\nI0818 07:36:31.856653 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 07:36:31.856670 17389 solver.cpp:244]     Train net output #1: loss = 0.206651 (* 1 = 0.206651 loss)\nI0818 07:36:31.955682 17389 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0818 07:38:50.821255 17389 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0818 07:40:12.328717 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43188\nI0818 07:40:12.329031 17389 solver.cpp:404]     Test net output #1: loss = 3.79284 (* 1 = 3.79284 loss)\nI0818 07:40:13.639168 17389 solver.cpp:228] Iteration 25200, loss = 0.205448\nI0818 07:40:13.639220 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 07:40:13.639235 17389 solver.cpp:244]     Train net output #1: loss = 0.205448 (* 1 = 0.205448 loss)\nI0818 07:40:13.732167 17389 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0818 07:42:32.703590 17389 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0818 07:43:54.189716 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3654\nI0818 07:43:54.190045 17389 solver.cpp:404]     Test net output #1: loss = 4.87909 (* 1 = 4.87909 loss)\nI0818 07:43:55.500211 17389 solver.cpp:228] Iteration 25300, loss = 0.241648\nI0818 07:43:55.500267 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 07:43:55.500283 17389 solver.cpp:244]     Train net output #1: loss = 0.241648 (* 1 = 0.241648 loss)\nI0818 07:43:55.593551 17389 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0818 07:46:14.468552 17389 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0818 07:47:35.911778 17389 solver.cpp:404]     Test net output #0: accuracy = 0.407\nI0818 07:47:35.912019 17389 solver.cpp:404]     Test net output #1: loss = 3.91688 (* 1 = 3.91688 loss)\nI0818 07:47:37.223085 17389 solver.cpp:228] Iteration 25400, loss = 0.230148\nI0818 07:47:37.223141 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 07:47:37.223158 17389 solver.cpp:244]     Train net output #1: loss = 0.230148 (* 1 = 0.230148 loss)\nI0818 07:47:37.318470 17389 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0818 07:49:56.096226 17389 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0818 07:51:17.596256 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41096\nI0818 07:51:17.596571 17389 solver.cpp:404]     Test net output #1: loss = 3.92813 (* 1 = 3.92813 loss)\nI0818 07:51:18.906847 17389 solver.cpp:228] Iteration 25500, loss = 0.324072\nI0818 07:51:18.906903 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 07:51:18.906920 17389 solver.cpp:244]     Train net output #1: loss = 0.324072 (* 1 = 0.324072 loss)\nI0818 07:51:18.996897 17389 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0818 07:53:37.399133 17389 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0818 07:54:58.026923 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40336\nI0818 07:54:58.027204 17389 solver.cpp:404]     Test net output #1: loss = 4.35296 (* 1 = 4.35296 loss)\nI0818 07:54:59.334650 17389 solver.cpp:228] Iteration 25600, loss = 0.189572\nI0818 07:54:59.334695 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 07:54:59.334712 17389 solver.cpp:244]     Train net output #1: loss = 0.189572 (* 1 = 0.189572 loss)\nI0818 07:54:59.425592 17389 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0818 07:57:17.840975 17389 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0818 07:58:38.464841 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41648\nI0818 07:58:38.465096 17389 solver.cpp:404]     Test net output #1: loss = 4.21913 (* 1 = 4.21913 loss)\nI0818 07:58:39.772213 17389 solver.cpp:228] Iteration 25700, loss = 0.26349\nI0818 07:58:39.772258 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 07:58:39.772274 17389 solver.cpp:244]     Train net output #1: loss = 0.26349 (* 1 = 0.26349 loss)\nI0818 07:58:39.864264 17389 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0818 08:00:58.382258 17389 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0818 08:02:19.012737 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41672\nI0818 08:02:19.013015 17389 solver.cpp:404]     Test net output #1: loss = 4.1061 (* 1 = 4.1061 loss)\nI0818 08:02:20.319217 17389 solver.cpp:228] Iteration 25800, loss = 0.165796\nI0818 08:02:20.319260 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 08:02:20.319278 17389 solver.cpp:244]     Train net output #1: loss = 0.165796 (* 1 = 0.165796 loss)\nI0818 08:02:20.414758 17389 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0818 08:04:38.935951 17389 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0818 08:05:59.556424 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38808\nI0818 08:05:59.556697 17389 solver.cpp:404]     Test net output #1: loss = 4.93079 (* 1 = 4.93079 loss)\nI0818 08:06:00.864362 17389 solver.cpp:228] Iteration 25900, loss = 0.22313\nI0818 08:06:00.864405 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 08:06:00.864423 17389 solver.cpp:244]     Train net output #1: loss = 0.22313 (* 1 = 0.22313 loss)\nI0818 08:06:00.958560 17389 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0818 08:08:19.413030 17389 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0818 08:09:40.038842 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4182\nI0818 08:09:40.039129 17389 solver.cpp:404]     Test net output #1: loss = 4.1064 (* 1 = 4.1064 loss)\nI0818 08:09:41.346374 17389 solver.cpp:228] Iteration 26000, loss = 0.154356\nI0818 08:09:41.346418 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 08:09:41.346434 17389 solver.cpp:244]     Train net output #1: loss = 0.154356 (* 1 = 0.154356 loss)\nI0818 08:09:41.447413 17389 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0818 08:11:59.917068 17389 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0818 08:13:20.550065 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39336\nI0818 08:13:20.550334 17389 solver.cpp:404]     Test net output #1: loss = 4.34176 (* 1 = 4.34176 loss)\nI0818 08:13:21.857890 17389 solver.cpp:228] Iteration 26100, loss = 0.233614\nI0818 08:13:21.857935 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 08:13:21.857952 17389 solver.cpp:244]     Train net output #1: loss = 0.233614 (* 1 = 0.233614 loss)\nI0818 08:13:21.952409 17389 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0818 08:15:40.432543 17389 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0818 08:17:01.054854 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38816\nI0818 08:17:01.055125 17389 solver.cpp:404]     Test net output #1: loss = 4.35963 (* 1 = 4.35963 loss)\nI0818 08:17:02.361377 17389 solver.cpp:228] Iteration 26200, loss = 0.279741\nI0818 08:17:02.361418 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 08:17:02.361435 17389 solver.cpp:244]     Train net output #1: loss = 0.279741 (* 1 = 0.279741 loss)\nI0818 08:17:02.460418 17389 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0818 08:19:21.005327 17389 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0818 08:20:41.632918 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4216\nI0818 08:20:41.633201 17389 solver.cpp:404]     Test net output #1: loss = 3.87196 (* 1 = 3.87196 loss)\nI0818 08:20:42.940657 17389 solver.cpp:228] Iteration 26300, loss = 0.236961\nI0818 08:20:42.940701 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 08:20:42.940716 17389 solver.cpp:244]     Train net output #1: loss = 0.236961 (* 1 = 0.236961 loss)\nI0818 08:20:43.039228 17389 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0818 08:23:01.670378 17389 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0818 08:24:22.291872 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43904\nI0818 08:24:22.292145 17389 solver.cpp:404]     Test net output #1: loss = 3.53275 (* 1 = 3.53275 loss)\nI0818 08:24:23.599566 17389 solver.cpp:228] Iteration 26400, loss = 0.203038\nI0818 08:24:23.599609 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 08:24:23.599625 17389 solver.cpp:244]     Train net output #1: loss = 0.203038 (* 1 = 0.203038 loss)\nI0818 08:24:23.692364 17389 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0818 08:26:42.185745 17389 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0818 08:28:02.812696 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42396\nI0818 08:28:02.812970 17389 solver.cpp:404]     Test net output #1: loss = 3.803 (* 1 = 3.803 loss)\nI0818 08:28:04.120674 17389 solver.cpp:228] Iteration 26500, loss = 0.242329\nI0818 08:28:04.120718 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 08:28:04.120733 17389 solver.cpp:244]     Train net output #1: loss = 0.242329 (* 1 = 0.242329 loss)\nI0818 08:28:04.216706 17389 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0818 08:30:22.808054 17389 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0818 08:31:43.429143 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44424\nI0818 08:31:43.429417 17389 solver.cpp:404]     Test net output #1: loss = 3.52356 (* 1 = 3.52356 loss)\nI0818 08:31:44.736120 17389 solver.cpp:228] Iteration 26600, loss = 0.136121\nI0818 08:31:44.736160 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 08:31:44.736177 17389 solver.cpp:244]     Train net output #1: loss = 0.13612 (* 1 = 0.13612 loss)\nI0818 08:31:44.831620 17389 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0818 08:34:03.457288 17389 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0818 08:35:24.075817 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38172\nI0818 08:35:24.076091 17389 solver.cpp:404]     Test net output #1: loss = 4.42095 (* 1 = 4.42095 loss)\nI0818 08:35:25.382292 17389 solver.cpp:228] Iteration 26700, loss = 0.194632\nI0818 08:35:25.382336 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 08:35:25.382352 17389 solver.cpp:244]     Train net output #1: loss = 0.194632 (* 1 = 0.194632 loss)\nI0818 08:35:25.477205 17389 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0818 08:37:44.060482 17389 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0818 08:39:04.685950 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43872\nI0818 08:39:04.686211 17389 solver.cpp:404]     Test net output #1: loss = 3.57378 (* 1 = 3.57378 loss)\nI0818 08:39:05.994144 17389 solver.cpp:228] Iteration 26800, loss = 0.201774\nI0818 08:39:05.994189 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 08:39:05.994205 17389 solver.cpp:244]     Train net output #1: loss = 0.201773 (* 1 = 0.201773 loss)\nI0818 08:39:06.085809 17389 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0818 08:41:24.557564 17389 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0818 08:42:45.184075 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4178\nI0818 08:42:45.184345 17389 solver.cpp:404]     Test net output #1: loss = 3.65803 (* 1 = 3.65803 loss)\nI0818 08:42:46.490942 17389 solver.cpp:228] Iteration 26900, loss = 0.214174\nI0818 08:42:46.490984 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 08:42:46.491001 17389 solver.cpp:244]     Train net output #1: loss = 0.214174 (* 1 = 0.214174 loss)\nI0818 08:42:46.588214 17389 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0818 08:45:04.992036 17389 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0818 08:46:25.609910 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39832\nI0818 08:46:25.610173 17389 solver.cpp:404]     Test net output #1: loss = 4.41264 (* 1 = 4.41264 loss)\nI0818 08:46:26.916565 17389 solver.cpp:228] Iteration 27000, loss = 0.251888\nI0818 08:46:26.916610 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 08:46:26.916625 17389 solver.cpp:244]     Train net output #1: loss = 0.251888 (* 1 = 0.251888 loss)\nI0818 08:46:27.014542 17389 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0818 08:48:45.450484 17389 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0818 08:50:06.076654 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4034\nI0818 08:50:06.076934 17389 solver.cpp:404]     Test net output #1: loss = 3.92508 (* 1 = 3.92508 loss)\nI0818 08:50:07.383227 17389 solver.cpp:228] Iteration 27100, loss = 0.146163\nI0818 08:50:07.383271 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:50:07.383287 17389 solver.cpp:244]     Train net output #1: loss = 0.146162 (* 1 = 0.146162 loss)\nI0818 08:50:07.476430 17389 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0818 08:52:25.913496 17389 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0818 08:53:46.538492 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43168\nI0818 08:53:46.538769 17389 solver.cpp:404]     Test net output #1: loss = 3.64334 (* 1 = 3.64334 loss)\nI0818 08:53:47.844966 17389 solver.cpp:228] Iteration 27200, loss = 0.237334\nI0818 08:53:47.845006 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 08:53:47.845022 17389 solver.cpp:244]     Train net output #1: loss = 0.237334 (* 1 = 0.237334 loss)\nI0818 08:53:47.945598 17389 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0818 08:56:06.349768 17389 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0818 08:57:26.968819 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45848\nI0818 08:57:26.969085 17389 solver.cpp:404]     Test net output #1: loss = 3.28357 (* 1 = 3.28357 loss)\nI0818 08:57:28.275341 17389 solver.cpp:228] Iteration 27300, loss = 0.183027\nI0818 08:57:28.275380 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 08:57:28.275396 17389 solver.cpp:244]     Train net output #1: loss = 0.183027 (* 1 = 0.183027 loss)\nI0818 08:57:28.368115 17389 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0818 08:59:46.745121 17389 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0818 09:01:07.363487 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42504\nI0818 09:01:07.363746 17389 solver.cpp:404]     Test net output #1: loss = 3.93623 (* 1 = 3.93623 loss)\nI0818 09:01:08.670007 17389 solver.cpp:228] Iteration 27400, loss = 0.180925\nI0818 09:01:08.670047 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 09:01:08.670063 17389 solver.cpp:244]     Train net output #1: loss = 0.180925 (* 1 = 0.180925 loss)\nI0818 09:01:08.765218 17389 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0818 09:03:27.179342 17389 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0818 09:04:47.814515 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39656\nI0818 09:04:47.814785 17389 solver.cpp:404]     Test net output #1: loss = 4.13933 (* 1 = 4.13933 loss)\nI0818 09:04:49.121834 17389 solver.cpp:228] Iteration 27500, loss = 0.196711\nI0818 09:04:49.121877 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 09:04:49.121901 17389 solver.cpp:244]     Train net output #1: loss = 0.19671 (* 1 = 0.19671 loss)\nI0818 09:04:49.215304 17389 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0818 09:07:07.721017 17389 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0818 09:08:28.349526 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44988\nI0818 09:08:28.349835 17389 solver.cpp:404]     Test net output #1: loss = 3.38619 (* 1 = 3.38619 loss)\nI0818 09:08:29.657054 17389 solver.cpp:228] Iteration 27600, loss = 0.18122\nI0818 09:08:29.657097 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 09:08:29.657120 17389 solver.cpp:244]     Train net output #1: loss = 0.181219 (* 1 = 0.181219 loss)\nI0818 09:08:29.751447 17389 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0818 09:10:48.171505 17389 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0818 09:12:08.879436 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40604\nI0818 09:12:08.879753 17389 solver.cpp:404]     Test net output #1: loss = 4.13848 (* 1 = 4.13848 loss)\nI0818 09:12:10.189577 17389 solver.cpp:228] Iteration 27700, loss = 0.17976\nI0818 09:12:10.189625 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 09:12:10.189642 17389 solver.cpp:244]     Train net output #1: loss = 0.17976 (* 1 = 0.17976 loss)\nI0818 09:12:10.285415 17389 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0818 09:14:28.697389 17389 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0818 09:15:49.322433 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39272\nI0818 09:15:49.322703 17389 solver.cpp:404]     Test net output #1: loss = 4.7975 (* 1 = 4.7975 loss)\nI0818 09:15:50.629799 17389 solver.cpp:228] Iteration 27800, loss = 0.161507\nI0818 09:15:50.629843 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 09:15:50.629859 17389 solver.cpp:244]     Train net output #1: loss = 0.161506 (* 1 = 0.161506 loss)\nI0818 09:15:50.725399 17389 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0818 09:18:09.137842 17389 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0818 09:19:29.763877 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43888\nI0818 09:19:29.764106 17389 solver.cpp:404]     Test net output #1: loss = 3.85699 (* 1 = 3.85699 loss)\nI0818 09:19:31.071774 17389 solver.cpp:228] Iteration 27900, loss = 0.339624\nI0818 09:19:31.071818 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 09:19:31.071835 17389 solver.cpp:244]     Train net output #1: loss = 0.339623 (* 1 = 0.339623 loss)\nI0818 09:19:31.167088 17389 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0818 09:21:49.818182 17389 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0818 09:23:10.439532 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40356\nI0818 09:23:10.439764 17389 solver.cpp:404]     Test net output #1: loss = 4.21424 (* 1 = 4.21424 loss)\nI0818 09:23:11.747272 17389 solver.cpp:228] Iteration 28000, loss = 0.193506\nI0818 09:23:11.747316 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 09:23:11.747333 17389 solver.cpp:244]     Train net output #1: loss = 0.193505 (* 1 = 0.193505 loss)\nI0818 09:23:11.845582 17389 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0818 09:25:30.349200 17389 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0818 09:26:50.973987 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4268\nI0818 09:26:50.974215 17389 solver.cpp:404]     Test net output #1: loss = 3.92402 (* 1 = 3.92402 loss)\nI0818 09:26:52.281898 17389 solver.cpp:228] Iteration 28100, loss = 0.13394\nI0818 09:26:52.281941 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 09:26:52.281957 17389 solver.cpp:244]     Train net output #1: loss = 0.13394 (* 1 = 0.13394 loss)\nI0818 09:26:52.378585 17389 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0818 09:29:10.886134 17389 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0818 09:30:31.508216 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40884\nI0818 09:30:31.508463 17389 solver.cpp:404]     Test net output #1: loss = 4.45673 (* 1 = 4.45673 loss)\nI0818 09:30:32.815991 17389 solver.cpp:228] Iteration 28200, loss = 0.192753\nI0818 09:30:32.816033 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 09:30:32.816049 17389 solver.cpp:244]     Train net output #1: loss = 0.192753 (* 1 = 0.192753 loss)\nI0818 09:30:32.912706 17389 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0818 09:32:51.374804 17389 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0818 09:34:11.981034 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43992\nI0818 09:34:11.981276 17389 solver.cpp:404]     Test net output #1: loss = 3.72083 (* 1 = 3.72083 loss)\nI0818 09:34:13.287726 17389 solver.cpp:228] Iteration 28300, loss = 0.24073\nI0818 09:34:13.287768 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 09:34:13.287791 17389 solver.cpp:244]     Train net output #1: loss = 0.24073 (* 1 = 0.24073 loss)\nI0818 09:34:13.381592 17389 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0818 09:36:31.760026 17389 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0818 09:37:52.372289 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42416\nI0818 09:37:52.372529 17389 solver.cpp:404]     Test net output #1: loss = 4.10857 (* 1 = 4.10857 loss)\nI0818 09:37:53.678922 17389 solver.cpp:228] Iteration 28400, loss = 0.238629\nI0818 09:37:53.678964 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 09:37:53.678988 17389 solver.cpp:244]     Train net output #1: loss = 0.238628 (* 1 = 0.238628 loss)\nI0818 09:37:53.772958 17389 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0818 09:40:12.191846 17389 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0818 09:41:32.795979 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42848\nI0818 09:41:32.796213 17389 solver.cpp:404]     Test net output #1: loss = 4.00021 (* 1 = 4.00021 loss)\nI0818 09:41:34.102365 17389 solver.cpp:228] Iteration 28500, loss = 0.245711\nI0818 09:41:34.102408 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 09:41:34.102432 17389 solver.cpp:244]     Train net output #1: loss = 0.245711 (* 1 = 0.245711 loss)\nI0818 09:41:34.199251 17389 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0818 09:43:52.765471 17389 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0818 09:45:13.370239 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43272\nI0818 09:45:13.370491 17389 solver.cpp:404]     Test net output #1: loss = 3.84113 (* 1 = 3.84113 loss)\nI0818 09:45:14.677546 17389 solver.cpp:228] Iteration 28600, loss = 0.237644\nI0818 09:45:14.677587 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 09:45:14.677611 17389 solver.cpp:244]     Train net output #1: loss = 0.237644 (* 1 = 0.237644 loss)\nI0818 09:45:14.774081 17389 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0818 09:47:33.327236 17389 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0818 09:48:53.941740 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43816\nI0818 09:48:53.941972 17389 solver.cpp:404]     Test net output #1: loss = 3.67615 (* 1 = 3.67615 loss)\nI0818 09:48:55.248556 17389 solver.cpp:228] Iteration 28700, loss = 0.187355\nI0818 09:48:55.248601 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 09:48:55.248625 17389 solver.cpp:244]     Train net output #1: loss = 0.187355 (* 1 = 0.187355 loss)\nI0818 09:48:55.340131 17389 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0818 09:51:13.722350 17389 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0818 09:52:34.338672 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39064\nI0818 09:52:34.338943 17389 solver.cpp:404]     Test net output #1: loss = 4.13445 (* 1 = 4.13445 loss)\nI0818 09:52:35.645736 17389 solver.cpp:228] Iteration 28800, loss = 0.277232\nI0818 09:52:35.645783 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 09:52:35.645807 17389 solver.cpp:244]     Train net output #1: loss = 0.277231 (* 1 = 0.277231 loss)\nI0818 09:52:35.732337 17389 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0818 09:54:54.214264 17389 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0818 09:56:14.840298 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40888\nI0818 09:56:14.840575 17389 solver.cpp:404]     Test net output #1: loss = 4.36853 (* 1 = 4.36853 loss)\nI0818 09:56:16.146853 17389 solver.cpp:228] Iteration 28900, loss = 0.225578\nI0818 09:56:16.146899 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 09:56:16.146922 17389 solver.cpp:244]     Train net output #1: loss = 0.225578 (* 1 = 0.225578 loss)\nI0818 09:56:16.232442 17389 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0818 09:58:34.622189 17389 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0818 09:59:55.235183 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44196\nI0818 09:59:55.235452 17389 solver.cpp:404]     Test net output #1: loss = 3.77102 (* 1 = 3.77102 loss)\nI0818 09:59:56.541412 17389 solver.cpp:228] Iteration 29000, loss = 0.268023\nI0818 09:59:56.541460 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 09:59:56.541482 17389 solver.cpp:244]     Train net output #1: loss = 0.268022 (* 1 = 0.268022 loss)\nI0818 09:59:56.628413 17389 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0818 10:02:15.001735 17389 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0818 10:03:35.615401 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45488\nI0818 10:03:35.615665 17389 solver.cpp:404]     Test net output #1: loss = 3.40913 (* 1 = 3.40913 loss)\nI0818 10:03:36.922775 17389 solver.cpp:228] Iteration 29100, loss = 0.174619\nI0818 10:03:36.922822 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 10:03:36.922847 17389 solver.cpp:244]     Train net output #1: loss = 0.174619 (* 1 = 0.174619 loss)\nI0818 10:03:37.010164 17389 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0818 10:05:55.442065 17389 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0818 10:07:16.053694 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44764\nI0818 10:07:16.053925 17389 solver.cpp:404]     Test net output #1: loss = 3.42999 (* 1 = 3.42999 loss)\nI0818 10:07:17.361846 17389 solver.cpp:228] Iteration 29200, loss = 0.209824\nI0818 10:07:17.361893 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 10:07:17.361917 17389 solver.cpp:244]     Train net output #1: loss = 0.209824 (* 1 = 0.209824 loss)\nI0818 10:07:17.447914 17389 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0818 10:09:35.819761 17389 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0818 10:10:56.430971 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41392\nI0818 10:10:56.431217 17389 solver.cpp:404]     Test net output #1: loss = 4.05702 (* 1 = 4.05702 loss)\nI0818 10:10:57.738564 17389 solver.cpp:228] Iteration 29300, loss = 0.253879\nI0818 10:10:57.738608 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 10:10:57.738631 17389 solver.cpp:244]     Train net output #1: loss = 0.253879 (* 1 = 0.253879 loss)\nI0818 10:10:57.821456 17389 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0818 10:13:16.086622 17389 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0818 10:14:36.704344 17389 solver.cpp:404]     Test net output #0: accuracy = 0.441\nI0818 10:14:36.704581 17389 solver.cpp:404]     Test net output #1: loss = 3.5334 (* 1 = 3.5334 loss)\nI0818 10:14:38.011396 17389 solver.cpp:228] Iteration 29400, loss = 0.230728\nI0818 10:14:38.011440 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 10:14:38.011464 17389 solver.cpp:244]     Train net output #1: loss = 0.230728 (* 1 = 0.230728 loss)\nI0818 10:14:38.104060 17389 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0818 10:16:56.168655 17389 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0818 10:18:16.786651 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43972\nI0818 10:18:16.786893 17389 solver.cpp:404]     Test net output #1: loss = 3.40687 (* 1 = 3.40687 loss)\nI0818 10:18:18.094557 17389 solver.cpp:228] Iteration 29500, loss = 0.200785\nI0818 10:18:18.094602 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 10:18:18.094625 17389 solver.cpp:244]     Train net output #1: loss = 0.200784 (* 1 = 0.200784 loss)\nI0818 10:18:18.177817 17389 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0818 10:20:36.274840 17389 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0818 10:21:56.897259 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42496\nI0818 10:21:56.897506 17389 solver.cpp:404]     Test net output #1: loss = 4.03983 (* 1 = 4.03983 loss)\nI0818 10:21:58.205508 17389 solver.cpp:228] Iteration 29600, loss = 0.165768\nI0818 10:21:58.205559 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 10:21:58.205584 17389 solver.cpp:244]     Train net output #1: loss = 0.165768 (* 1 = 0.165768 loss)\nI0818 10:21:58.287760 17389 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0818 10:24:16.416852 17389 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0818 10:25:37.034474 17389 solver.cpp:404]     Test net output #0: accuracy = 0.393\nI0818 10:25:37.034723 17389 solver.cpp:404]     Test net output #1: loss = 4.42961 (* 1 = 4.42961 loss)\nI0818 10:25:38.341267 17389 solver.cpp:228] Iteration 29700, loss = 0.243723\nI0818 10:25:38.341312 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 10:25:38.341336 17389 solver.cpp:244]     Train net output #1: loss = 0.243723 (* 1 = 0.243723 loss)\nI0818 10:25:38.421048 17389 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0818 10:27:56.590884 17389 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0818 10:29:17.201274 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41708\nI0818 10:29:17.201527 17389 solver.cpp:404]     Test net output #1: loss = 3.97979 (* 1 = 3.97979 loss)\nI0818 10:29:18.508401 17389 solver.cpp:228] Iteration 29800, loss = 0.270731\nI0818 10:29:18.508447 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 10:29:18.508471 17389 solver.cpp:244]     Train net output #1: loss = 0.270731 (* 1 = 0.270731 loss)\nI0818 10:29:18.594233 17389 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0818 10:31:36.731012 17389 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0818 10:32:57.351965 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46012\nI0818 10:32:57.352223 17389 solver.cpp:404]     Test net output #1: loss = 3.3638 (* 1 = 3.3638 loss)\nI0818 10:32:58.659536 17389 solver.cpp:228] Iteration 29900, loss = 0.24904\nI0818 10:32:58.659580 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 10:32:58.659603 17389 solver.cpp:244]     Train net output #1: loss = 0.24904 (* 1 = 0.24904 loss)\nI0818 10:32:58.740093 17389 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0818 10:35:16.858222 17389 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0818 10:36:37.465608 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44688\nI0818 10:36:37.465833 17389 solver.cpp:404]     Test net output #1: loss = 3.55993 (* 1 = 3.55993 loss)\nI0818 10:36:38.772446 17389 solver.cpp:228] Iteration 30000, loss = 0.209551\nI0818 10:36:38.772491 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 10:36:38.772516 17389 solver.cpp:244]     Train net output #1: loss = 0.20955 (* 1 = 0.20955 loss)\nI0818 10:36:38.852625 17389 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0818 10:38:57.003065 17389 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0818 10:40:17.616327 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41796\nI0818 10:40:17.616577 17389 solver.cpp:404]     Test net output #1: loss = 3.95181 (* 1 = 3.95181 loss)\nI0818 10:40:18.923156 17389 solver.cpp:228] Iteration 30100, loss = 0.170496\nI0818 10:40:18.923202 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 10:40:18.923226 17389 solver.cpp:244]     Train net output #1: loss = 0.170496 (* 1 = 0.170496 loss)\nI0818 10:40:19.005993 17389 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0818 10:42:37.161156 17389 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0818 10:43:57.783608 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43968\nI0818 10:43:57.783859 17389 solver.cpp:404]     Test net output #1: loss = 3.82906 (* 1 = 3.82906 loss)\nI0818 10:43:59.090281 17389 solver.cpp:228] Iteration 30200, loss = 0.230997\nI0818 10:43:59.090324 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 10:43:59.090348 17389 solver.cpp:244]     Train net output #1: loss = 0.230997 (* 1 = 0.230997 loss)\nI0818 10:43:59.190320 17389 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0818 10:46:17.363194 17389 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0818 10:47:37.978571 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45708\nI0818 10:47:37.978803 17389 solver.cpp:404]     Test net output #1: loss = 3.44143 (* 1 = 3.44143 loss)\nI0818 10:47:39.286128 17389 solver.cpp:228] Iteration 30300, loss = 0.187396\nI0818 10:47:39.286173 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 10:47:39.286197 17389 solver.cpp:244]     Train net output #1: loss = 0.187396 (* 1 = 0.187396 loss)\nI0818 10:47:39.365999 17389 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0818 10:49:57.596055 17389 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0818 10:51:18.201570 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40564\nI0818 10:51:18.201805 17389 solver.cpp:404]     Test net output #1: loss = 4.19725 (* 1 = 4.19725 loss)\nI0818 10:51:19.508836 17389 solver.cpp:228] Iteration 30400, loss = 0.162132\nI0818 10:51:19.508880 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 10:51:19.508903 17389 solver.cpp:244]     Train net output #1: loss = 0.162132 (* 1 = 0.162132 loss)\nI0818 10:51:19.591565 17389 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0818 10:53:37.741833 17389 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0818 10:54:58.357064 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4702\nI0818 10:54:58.357311 17389 solver.cpp:404]     Test net output #1: loss = 3.27935 (* 1 = 3.27935 loss)\nI0818 10:54:59.664875 17389 solver.cpp:228] Iteration 30500, loss = 0.232845\nI0818 10:54:59.664918 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 10:54:59.664942 17389 solver.cpp:244]     Train net output #1: loss = 0.232844 (* 1 = 0.232844 loss)\nI0818 10:54:59.748227 17389 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0818 10:57:17.899134 17389 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0818 10:58:38.516736 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40164\nI0818 10:58:38.517035 17389 solver.cpp:404]     Test net output #1: loss = 4.50664 (* 1 = 4.50664 loss)\nI0818 10:58:39.824470 17389 solver.cpp:228] Iteration 30600, loss = 0.174684\nI0818 10:58:39.824519 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 10:58:39.824544 17389 solver.cpp:244]     Train net output #1: loss = 0.174684 (* 1 = 0.174684 loss)\nI0818 10:58:39.913581 17389 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0818 11:00:58.223690 17389 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0818 11:02:18.832854 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45192\nI0818 11:02:18.833112 17389 solver.cpp:404]     Test net output #1: loss = 3.48309 (* 1 = 3.48309 loss)\nI0818 11:02:20.140089 17389 solver.cpp:228] Iteration 30700, loss = 0.191998\nI0818 11:02:20.140132 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 11:02:20.140156 17389 solver.cpp:244]     Train net output #1: loss = 0.191997 (* 1 = 0.191997 loss)\nI0818 11:02:20.227252 17389 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0818 11:04:38.400106 17389 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0818 11:05:59.002974 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41076\nI0818 11:05:59.003222 17389 solver.cpp:404]     Test net output #1: loss = 3.97098 (* 1 = 3.97098 loss)\nI0818 11:06:00.310525 17389 solver.cpp:228] Iteration 30800, loss = 0.217135\nI0818 11:06:00.310570 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 11:06:00.310595 17389 solver.cpp:244]     Train net output #1: loss = 0.217135 (* 1 = 0.217135 loss)\nI0818 11:06:00.397279 17389 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0818 11:08:18.494993 17389 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0818 11:09:39.105840 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44672\nI0818 11:09:39.106068 17389 solver.cpp:404]     Test net output #1: loss = 3.60411 (* 1 = 3.60411 loss)\nI0818 11:09:40.413377 17389 solver.cpp:228] Iteration 30900, loss = 0.281612\nI0818 11:09:40.413420 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 11:09:40.413444 17389 solver.cpp:244]     Train net output #1: loss = 0.281612 (* 1 = 0.281612 loss)\nI0818 11:09:40.498678 17389 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0818 11:11:58.584030 17389 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0818 11:13:19.198761 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45136\nI0818 11:13:19.199017 17389 solver.cpp:404]     Test net output #1: loss = 3.57604 (* 1 = 3.57604 loss)\nI0818 11:13:20.506752 17389 solver.cpp:228] Iteration 31000, loss = 0.210037\nI0818 11:13:20.506796 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 11:13:20.506820 17389 solver.cpp:244]     Train net output #1: loss = 0.210036 (* 1 = 0.210036 loss)\nI0818 11:13:20.594796 17389 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0818 11:15:38.717465 17389 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0818 11:16:59.314371 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42484\nI0818 11:16:59.314621 17389 solver.cpp:404]     Test net output #1: loss = 3.69273 (* 1 = 3.69273 loss)\nI0818 11:17:00.621186 17389 solver.cpp:228] Iteration 31100, loss = 0.247371\nI0818 11:17:00.621230 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 11:17:00.621253 17389 solver.cpp:244]     Train net output #1: loss = 0.247371 (* 1 = 0.247371 loss)\nI0818 11:17:00.709450 17389 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0818 11:19:18.824831 17389 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0818 11:20:39.436322 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41988\nI0818 11:20:39.436581 17389 solver.cpp:404]     Test net output #1: loss = 3.84864 (* 1 = 3.84864 loss)\nI0818 11:20:40.743712 17389 solver.cpp:228] Iteration 31200, loss = 0.104182\nI0818 11:20:40.743758 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 11:20:40.743782 17389 solver.cpp:244]     Train net output #1: loss = 0.104182 (* 1 = 0.104182 loss)\nI0818 11:20:40.831537 17389 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0818 11:22:58.936864 17389 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0818 11:24:19.543900 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45448\nI0818 11:24:19.544150 17389 solver.cpp:404]     Test net output #1: loss = 3.46685 (* 1 = 3.46685 loss)\nI0818 11:24:20.850283 17389 solver.cpp:228] Iteration 31300, loss = 0.181001\nI0818 11:24:20.850329 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 11:24:20.850353 17389 solver.cpp:244]     Train net output #1: loss = 0.181 (* 1 = 0.181 loss)\nI0818 11:24:20.936167 17389 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0818 11:26:39.190043 17389 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0818 11:27:59.807163 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4182\nI0818 11:27:59.807405 17389 solver.cpp:404]     Test net output #1: loss = 3.91736 (* 1 = 3.91736 loss)\nI0818 11:28:01.114074 17389 solver.cpp:228] Iteration 31400, loss = 0.334498\nI0818 11:28:01.114114 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 11:28:01.114128 17389 solver.cpp:244]     Train net output #1: loss = 0.334498 (* 1 = 0.334498 loss)\nI0818 11:28:01.199540 17389 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0818 11:30:19.371256 17389 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0818 11:31:39.989184 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45052\nI0818 11:31:39.989408 17389 solver.cpp:404]     Test net output #1: loss = 3.5637 (* 1 = 3.5637 loss)\nI0818 11:31:41.296115 17389 solver.cpp:228] Iteration 31500, loss = 0.134299\nI0818 11:31:41.296156 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 11:31:41.296174 17389 solver.cpp:244]     Train net output #1: loss = 0.134299 (* 1 = 0.134299 loss)\nI0818 11:31:41.381647 17389 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0818 11:33:59.561185 17389 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0818 11:35:20.170733 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44844\nI0818 11:35:20.170938 17389 solver.cpp:404]     Test net output #1: loss = 3.68209 (* 1 = 3.68209 loss)\nI0818 11:35:21.477625 17389 solver.cpp:228] Iteration 31600, loss = 0.194561\nI0818 11:35:21.477668 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 11:35:21.477684 17389 solver.cpp:244]     Train net output #1: loss = 0.194561 (* 1 = 0.194561 loss)\nI0818 11:35:21.569865 17389 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0818 11:37:39.783717 17389 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0818 11:39:00.395112 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43256\nI0818 11:39:00.395356 17389 solver.cpp:404]     Test net output #1: loss = 3.79235 (* 1 = 3.79235 loss)\nI0818 11:39:01.702293 17389 solver.cpp:228] Iteration 31700, loss = 0.155786\nI0818 11:39:01.702333 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 11:39:01.702349 17389 solver.cpp:244]     Train net output #1: loss = 0.155786 (* 1 = 0.155786 loss)\nI0818 11:39:01.789867 17389 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0818 11:41:19.971482 17389 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0818 11:42:40.593495 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42192\nI0818 11:42:40.593772 17389 solver.cpp:404]     Test net output #1: loss = 3.83745 (* 1 = 3.83745 loss)\nI0818 11:42:41.900372 17389 solver.cpp:228] Iteration 31800, loss = 0.167821\nI0818 11:42:41.900414 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 11:42:41.900430 17389 solver.cpp:244]     Train net output #1: loss = 0.167821 (* 1 = 0.167821 loss)\nI0818 11:42:41.981510 17389 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0818 11:45:00.070237 17389 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0818 11:46:20.688416 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39004\nI0818 11:46:20.688660 17389 solver.cpp:404]     Test net output #1: loss = 4.56969 (* 1 = 4.56969 loss)\nI0818 11:46:21.995527 17389 solver.cpp:228] Iteration 31900, loss = 0.204019\nI0818 11:46:21.995570 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 11:46:21.995589 17389 solver.cpp:244]     Train net output #1: loss = 0.204018 (* 1 = 0.204018 loss)\nI0818 11:46:22.093230 17389 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0818 11:48:40.245302 17389 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0818 11:50:00.868552 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45676\nI0818 11:50:00.868820 17389 solver.cpp:404]     Test net output #1: loss = 3.55308 (* 1 = 3.55308 loss)\nI0818 11:50:02.175143 17389 solver.cpp:228] Iteration 32000, loss = 0.2059\nI0818 11:50:02.175184 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 11:50:02.175200 17389 solver.cpp:244]     Train net output #1: loss = 0.205899 (* 1 = 0.205899 loss)\nI0818 11:50:02.268784 17389 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0818 11:52:20.660750 17389 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0818 11:53:41.299649 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45684\nI0818 11:53:41.299873 17389 solver.cpp:404]     Test net output #1: loss = 3.29357 (* 1 = 3.29357 loss)\nI0818 11:53:42.606060 17389 solver.cpp:228] Iteration 32100, loss = 0.147646\nI0818 11:53:42.606103 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 11:53:42.606119 17389 solver.cpp:244]     Train net output #1: loss = 0.147646 (* 1 = 0.147646 loss)\nI0818 11:53:42.690142 17389 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0818 11:56:01.096227 17389 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0818 11:57:21.721114 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42192\nI0818 11:57:21.721341 17389 solver.cpp:404]     Test net output #1: loss = 3.77285 (* 1 = 3.77285 loss)\nI0818 11:57:23.027722 17389 solver.cpp:228] Iteration 32200, loss = 0.26942\nI0818 11:57:23.027766 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 11:57:23.027782 17389 solver.cpp:244]     Train net output #1: loss = 0.26942 (* 1 = 0.26942 loss)\nI0818 11:57:23.111948 17389 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0818 11:59:41.501374 17389 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0818 12:01:02.129863 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44028\nI0818 12:01:02.130137 17389 solver.cpp:404]     Test net output #1: loss = 3.57989 (* 1 = 3.57989 loss)\nI0818 12:01:03.436466 17389 solver.cpp:228] Iteration 32300, loss = 0.144195\nI0818 12:01:03.436513 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 12:01:03.436542 17389 solver.cpp:244]     Train net output #1: loss = 0.144195 (* 1 = 0.144195 loss)\nI0818 12:01:03.521605 17389 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0818 12:03:22.066654 17389 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0818 12:04:42.883275 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43472\nI0818 12:04:42.883543 17389 solver.cpp:404]     Test net output #1: loss = 3.60018 (* 1 = 3.60018 loss)\nI0818 12:04:44.191807 17389 solver.cpp:228] Iteration 32400, loss = 0.242939\nI0818 12:04:44.191869 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 12:04:44.191886 17389 solver.cpp:244]     Train net output #1: loss = 0.242939 (* 1 = 0.242939 loss)\nI0818 12:04:44.275126 17389 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0818 12:07:02.871528 17389 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0818 12:08:24.388924 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44412\nI0818 12:08:24.389251 17389 solver.cpp:404]     Test net output #1: loss = 3.61321 (* 1 = 3.61321 loss)\nI0818 12:08:25.700639 17389 solver.cpp:228] Iteration 32500, loss = 0.245401\nI0818 12:08:25.700701 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 12:08:25.700721 17389 solver.cpp:244]     Train net output #1: loss = 0.2454 (* 1 = 0.2454 loss)\nI0818 12:08:25.782213 17389 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0818 12:10:44.377730 17389 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0818 12:12:05.444643 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40012\nI0818 12:12:05.444910 17389 solver.cpp:404]     Test net output #1: loss = 4.26744 (* 1 = 4.26744 loss)\nI0818 12:12:06.752051 17389 solver.cpp:228] Iteration 32600, loss = 0.237228\nI0818 12:12:06.752095 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 12:12:06.752112 17389 solver.cpp:244]     Train net output #1: loss = 0.237228 (* 1 = 0.237228 loss)\nI0818 12:12:06.833578 17389 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0818 12:14:25.409381 17389 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0818 12:15:46.557390 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38948\nI0818 12:15:46.557698 17389 solver.cpp:404]     Test net output #1: loss = 4.36048 (* 1 = 4.36048 loss)\nI0818 12:15:47.867146 17389 solver.cpp:228] Iteration 32700, loss = 0.192987\nI0818 12:15:47.867202 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 12:15:47.867219 17389 solver.cpp:244]     Train net output #1: loss = 0.192987 (* 1 = 0.192987 loss)\nI0818 12:15:47.950203 17389 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0818 12:18:06.506167 17389 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0818 12:19:28.008607 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43724\nI0818 12:19:28.008945 17389 solver.cpp:404]     Test net output #1: loss = 3.56252 (* 1 = 3.56252 loss)\nI0818 12:19:29.318996 17389 solver.cpp:228] Iteration 32800, loss = 0.232681\nI0818 12:19:29.319052 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 12:19:29.319070 17389 solver.cpp:244]     Train net output #1: loss = 0.232681 (* 1 = 0.232681 loss)\nI0818 12:19:29.400698 17389 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0818 12:21:48.032734 17389 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0818 12:23:09.525722 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45472\nI0818 12:23:09.526046 17389 solver.cpp:404]     Test net output #1: loss = 3.42085 (* 1 = 3.42085 loss)\nI0818 12:23:10.836403 17389 solver.cpp:228] Iteration 32900, loss = 0.192567\nI0818 12:23:10.836457 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 12:23:10.836474 17389 solver.cpp:244]     Train net output #1: loss = 0.192567 (* 1 = 0.192567 loss)\nI0818 12:23:10.923254 17389 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0818 12:25:29.613957 17389 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0818 12:26:51.116098 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42452\nI0818 12:26:51.116431 17389 solver.cpp:404]     Test net output #1: loss = 3.59671 (* 1 = 3.59671 loss)\nI0818 12:26:52.426918 17389 solver.cpp:228] Iteration 33000, loss = 0.18731\nI0818 12:26:52.426972 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 12:26:52.426990 17389 solver.cpp:244]     Train net output #1: loss = 0.18731 (* 1 = 0.18731 loss)\nI0818 12:26:52.512476 17389 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0818 12:29:11.149081 17389 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0818 12:30:32.663235 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41696\nI0818 12:30:32.663563 17389 solver.cpp:404]     Test net output #1: loss = 3.98772 (* 1 = 3.98772 loss)\nI0818 12:30:33.974282 17389 solver.cpp:228] Iteration 33100, loss = 0.133472\nI0818 12:30:33.974334 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 12:30:33.974350 17389 solver.cpp:244]     Train net output #1: loss = 0.133472 (* 1 = 0.133472 loss)\nI0818 12:30:34.060173 17389 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0818 12:32:52.689036 17389 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0818 12:34:14.191594 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43272\nI0818 12:34:14.191921 17389 solver.cpp:404]     Test net output #1: loss = 4.12004 (* 1 = 4.12004 loss)\nI0818 12:34:15.502537 17389 solver.cpp:228] Iteration 33200, loss = 0.273276\nI0818 12:34:15.502590 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 12:34:15.502607 17389 solver.cpp:244]     Train net output #1: loss = 0.273276 (* 1 = 0.273276 loss)\nI0818 12:34:15.584882 17389 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0818 12:36:34.289779 17389 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0818 12:37:55.795680 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45448\nI0818 12:37:55.796001 17389 solver.cpp:404]     Test net output #1: loss = 3.43292 (* 1 = 3.43292 loss)\nI0818 12:37:57.106153 17389 solver.cpp:228] Iteration 33300, loss = 0.211611\nI0818 12:37:57.106205 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 12:37:57.106221 17389 solver.cpp:244]     Train net output #1: loss = 0.211611 (* 1 = 0.211611 loss)\nI0818 12:37:57.188532 17389 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0818 12:40:15.952328 17389 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0818 12:41:37.442998 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44684\nI0818 12:41:37.443339 17389 solver.cpp:404]     Test net output #1: loss = 3.60644 (* 1 = 3.60644 loss)\nI0818 12:41:38.754442 17389 solver.cpp:228] Iteration 33400, loss = 0.189659\nI0818 12:41:38.754498 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 12:41:38.754515 17389 solver.cpp:244]     Train net output #1: loss = 0.189659 (* 1 = 0.189659 loss)\nI0818 12:41:38.838027 17389 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0818 12:43:57.630156 17389 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0818 12:45:19.117432 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44092\nI0818 12:45:19.117769 17389 solver.cpp:404]     Test net output #1: loss = 3.67284 (* 1 = 3.67284 loss)\nI0818 12:45:20.429034 17389 solver.cpp:228] Iteration 33500, loss = 0.237652\nI0818 12:45:20.429090 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 12:45:20.429107 17389 solver.cpp:244]     Train net output #1: loss = 0.237652 (* 1 = 0.237652 loss)\nI0818 12:45:20.517366 17389 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0818 12:47:39.254251 17389 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0818 12:49:00.731456 17389 solver.cpp:404]     Test net output #0: accuracy = 0.37044\nI0818 12:49:00.731797 17389 solver.cpp:404]     Test net output #1: loss = 4.63025 (* 1 = 4.63025 loss)\nI0818 12:49:02.041960 17389 solver.cpp:228] Iteration 33600, loss = 0.177414\nI0818 12:49:02.042013 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 12:49:02.042031 17389 solver.cpp:244]     Train net output #1: loss = 0.177414 (* 1 = 0.177414 loss)\nI0818 12:49:02.132899 17389 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0818 12:51:20.864204 17389 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0818 12:52:42.371692 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40784\nI0818 12:52:42.372030 17389 solver.cpp:404]     Test net output #1: loss = 3.97407 (* 1 = 3.97407 loss)\nI0818 12:52:43.681890 17389 solver.cpp:228] Iteration 33700, loss = 0.296924\nI0818 12:52:43.681949 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 12:52:43.681967 17389 solver.cpp:244]     Train net output #1: loss = 0.296924 (* 1 = 0.296924 loss)\nI0818 12:52:43.765851 17389 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0818 12:55:02.471263 17389 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0818 12:56:23.938854 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38992\nI0818 12:56:23.939172 17389 solver.cpp:404]     Test net output #1: loss = 4.43986 (* 1 = 4.43986 loss)\nI0818 12:56:25.251173 17389 solver.cpp:228] Iteration 33800, loss = 0.164696\nI0818 12:56:25.251225 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 12:56:25.251242 17389 solver.cpp:244]     Train net output #1: loss = 0.164696 (* 1 = 0.164696 loss)\nI0818 12:56:25.336827 17389 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0818 12:58:44.009433 17389 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0818 13:00:05.512800 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40856\nI0818 13:00:05.513100 17389 solver.cpp:404]     Test net output #1: loss = 4.31035 (* 1 = 4.31035 loss)\nI0818 13:00:06.824564 17389 solver.cpp:228] Iteration 33900, loss = 0.216819\nI0818 13:00:06.824622 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 13:00:06.824642 17389 solver.cpp:244]     Train net output #1: loss = 0.216819 (* 1 = 0.216819 loss)\nI0818 13:00:06.908108 17389 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0818 13:02:25.635330 17389 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0818 13:03:47.157410 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41036\nI0818 13:03:47.157706 17389 solver.cpp:404]     Test net output #1: loss = 4.11175 (* 1 = 4.11175 loss)\nI0818 13:03:48.469451 17389 solver.cpp:228] Iteration 34000, loss = 0.189702\nI0818 13:03:48.469503 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 13:03:48.469519 17389 solver.cpp:244]     Train net output #1: loss = 0.189701 (* 1 = 0.189701 loss)\nI0818 13:03:48.551800 17389 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0818 13:06:07.234055 17389 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0818 13:07:28.744035 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44296\nI0818 13:07:28.744361 17389 solver.cpp:404]     Test net output #1: loss = 3.88352 (* 1 = 3.88352 loss)\nI0818 13:07:30.056362 17389 solver.cpp:228] Iteration 34100, loss = 0.207429\nI0818 13:07:30.056416 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 13:07:30.056432 17389 solver.cpp:244]     Train net output #1: loss = 0.207429 (* 1 = 0.207429 loss)\nI0818 13:07:30.137316 17389 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0818 13:09:48.763051 17389 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0818 13:11:10.273692 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4364\nI0818 13:11:10.274024 17389 solver.cpp:404]     Test net output #1: loss = 3.83268 (* 1 = 3.83268 loss)\nI0818 13:11:11.585445 17389 solver.cpp:228] Iteration 34200, loss = 0.217789\nI0818 13:11:11.585503 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 13:11:11.585521 17389 solver.cpp:244]     Train net output #1: loss = 0.217789 (* 1 = 0.217789 loss)\nI0818 13:11:11.673131 17389 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0818 13:13:30.129745 17389 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0818 13:14:51.657220 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39212\nI0818 13:14:51.657551 17389 solver.cpp:404]     Test net output #1: loss = 4.269 (* 1 = 4.269 loss)\nI0818 13:14:52.969204 17389 solver.cpp:228] Iteration 34300, loss = 0.270901\nI0818 13:14:52.969264 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 13:14:52.969281 17389 solver.cpp:244]     Train net output #1: loss = 0.270901 (* 1 = 0.270901 loss)\nI0818 13:14:53.048888 17389 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0818 13:17:11.574410 17389 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0818 13:18:33.078660 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41396\nI0818 13:18:33.079004 17389 solver.cpp:404]     Test net output #1: loss = 3.90419 (* 1 = 3.90419 loss)\nI0818 13:18:34.389363 17389 solver.cpp:228] Iteration 34400, loss = 0.225608\nI0818 13:18:34.389420 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 13:18:34.389439 17389 solver.cpp:244]     Train net output #1: loss = 0.225608 (* 1 = 0.225608 loss)\nI0818 13:18:34.482409 17389 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0818 13:20:52.861351 17389 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0818 13:22:14.399528 17389 solver.cpp:404]     Test net output #0: accuracy = 0.445\nI0818 13:22:14.399840 17389 solver.cpp:404]     Test net output #1: loss = 3.6123 (* 1 = 3.6123 loss)\nI0818 13:22:15.710517 17389 solver.cpp:228] Iteration 34500, loss = 0.157771\nI0818 13:22:15.710577 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 13:22:15.710593 17389 solver.cpp:244]     Train net output #1: loss = 0.157771 (* 1 = 0.157771 loss)\nI0818 13:22:15.800850 17389 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0818 13:24:34.233950 17389 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0818 13:25:55.743211 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45844\nI0818 13:25:55.743551 17389 solver.cpp:404]     Test net output #1: loss = 3.33976 (* 1 = 3.33976 loss)\nI0818 13:25:57.054859 17389 solver.cpp:228] Iteration 34600, loss = 0.10396\nI0818 13:25:57.054919 17389 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:25:57.054935 17389 solver.cpp:244]     Train net output #1: loss = 0.103959 (* 1 = 0.103959 loss)\nI0818 13:25:57.141142 17389 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0818 13:28:15.576325 17389 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0818 13:29:37.076328 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42028\nI0818 13:29:37.076653 17389 solver.cpp:404]     Test net output #1: loss = 3.9011 (* 1 = 3.9011 loss)\nI0818 13:29:38.387300 17389 solver.cpp:228] Iteration 34700, loss = 0.234189\nI0818 13:29:38.387356 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 13:29:38.387374 17389 solver.cpp:244]     Train net output #1: loss = 0.234189 (* 1 = 0.234189 loss)\nI0818 13:29:38.479383 17389 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0818 13:31:56.939601 17389 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0818 13:33:18.460865 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43284\nI0818 13:33:18.461194 17389 solver.cpp:404]     Test net output #1: loss = 3.6911 (* 1 = 3.6911 loss)\nI0818 13:33:19.771837 17389 solver.cpp:228] Iteration 34800, loss = 0.180359\nI0818 13:33:19.771896 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 13:33:19.771915 17389 solver.cpp:244]     Train net output #1: loss = 0.180359 (* 1 = 0.180359 loss)\nI0818 13:33:19.864576 17389 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0818 13:35:38.202247 17389 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0818 13:36:59.736968 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44084\nI0818 13:36:59.737316 17389 solver.cpp:404]     Test net output #1: loss = 3.83084 (* 1 = 3.83084 loss)\nI0818 13:37:01.048174 17389 solver.cpp:228] Iteration 34900, loss = 0.157772\nI0818 13:37:01.048233 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 13:37:01.048250 17389 solver.cpp:244]     Train net output #1: loss = 0.157772 (* 1 = 0.157772 loss)\nI0818 13:37:01.137394 17389 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0818 13:39:19.439201 17389 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0818 13:40:40.980271 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43464\nI0818 13:40:40.980579 17389 solver.cpp:404]     Test net output #1: loss = 3.69603 (* 1 = 3.69603 loss)\nI0818 13:40:42.290856 17389 solver.cpp:228] Iteration 35000, loss = 0.200758\nI0818 13:40:42.290916 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 13:40:42.290935 17389 solver.cpp:244]     Train net output #1: loss = 0.200758 (* 1 = 0.200758 loss)\nI0818 13:40:42.381286 17389 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0818 13:43:00.670311 17389 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0818 13:44:22.211814 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42408\nI0818 13:44:22.212138 17389 solver.cpp:404]     Test net output #1: loss = 3.93597 (* 1 = 3.93597 loss)\nI0818 13:44:23.522428 17389 solver.cpp:228] Iteration 35100, loss = 0.209194\nI0818 13:44:23.522485 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 13:44:23.522505 17389 solver.cpp:244]     Train net output #1: loss = 0.209194 (* 1 = 0.209194 loss)\nI0818 13:44:23.613852 17389 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0818 13:46:41.914716 17389 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0818 13:48:03.437162 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42364\nI0818 13:48:03.437476 17389 solver.cpp:404]     Test net output #1: loss = 3.98944 (* 1 = 3.98944 loss)\nI0818 13:48:04.748065 17389 solver.cpp:228] Iteration 35200, loss = 0.210905\nI0818 13:48:04.748117 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 13:48:04.748134 17389 solver.cpp:244]     Train net output #1: loss = 0.210905 (* 1 = 0.210905 loss)\nI0818 13:48:04.843118 17389 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0818 13:50:23.190484 17389 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0818 13:51:44.721329 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41896\nI0818 13:51:44.721679 17389 solver.cpp:404]     Test net output #1: loss = 3.88049 (* 1 = 3.88049 loss)\nI0818 13:51:46.031967 17389 solver.cpp:228] Iteration 35300, loss = 0.146548\nI0818 13:51:46.032018 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 13:51:46.032037 17389 solver.cpp:244]     Train net output #1: loss = 0.146548 (* 1 = 0.146548 loss)\nI0818 13:51:46.123121 17389 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0818 13:54:04.414281 17389 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0818 13:55:25.929702 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39604\nI0818 13:55:25.930034 17389 solver.cpp:404]     Test net output #1: loss = 4.4239 (* 1 = 4.4239 loss)\nI0818 13:55:27.240962 17389 solver.cpp:228] Iteration 35400, loss = 0.172784\nI0818 13:55:27.241015 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 13:55:27.241034 17389 solver.cpp:244]     Train net output #1: loss = 0.172784 (* 1 = 0.172784 loss)\nI0818 13:55:27.334802 17389 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0818 13:57:45.705163 17389 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0818 13:59:07.226541 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44804\nI0818 13:59:07.226891 17389 solver.cpp:404]     Test net output #1: loss = 3.56119 (* 1 = 3.56119 loss)\nI0818 13:59:08.538602 17389 solver.cpp:228] Iteration 35500, loss = 0.23787\nI0818 13:59:08.538657 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 13:59:08.538679 17389 solver.cpp:244]     Train net output #1: loss = 0.237869 (* 1 = 0.237869 loss)\nI0818 13:59:08.627715 17389 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0818 14:01:26.934088 17389 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0818 14:02:48.436372 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45076\nI0818 14:02:48.436718 17389 solver.cpp:404]     Test net output #1: loss = 3.37783 (* 1 = 3.37783 loss)\nI0818 14:02:49.747767 17389 solver.cpp:228] Iteration 35600, loss = 0.189175\nI0818 14:02:49.747825 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 14:02:49.747843 17389 solver.cpp:244]     Train net output #1: loss = 0.189175 (* 1 = 0.189175 loss)\nI0818 14:02:49.840724 17389 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0818 14:05:08.177645 17389 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0818 14:06:29.689416 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41784\nI0818 14:06:29.689751 17389 solver.cpp:404]     Test net output #1: loss = 3.87122 (* 1 = 3.87122 loss)\nI0818 14:06:31.000542 17389 solver.cpp:228] Iteration 35700, loss = 0.306966\nI0818 14:06:31.000602 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 14:06:31.000619 17389 solver.cpp:244]     Train net output #1: loss = 0.306966 (* 1 = 0.306966 loss)\nI0818 14:06:31.095690 17389 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0818 14:08:49.469635 17389 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0818 14:10:10.979876 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40244\nI0818 14:10:10.980195 17389 solver.cpp:404]     Test net output #1: loss = 4.32155 (* 1 = 4.32155 loss)\nI0818 14:10:12.291278 17389 solver.cpp:228] Iteration 35800, loss = 0.166644\nI0818 14:10:12.291332 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 14:10:12.291348 17389 solver.cpp:244]     Train net output #1: loss = 0.166644 (* 1 = 0.166644 loss)\nI0818 14:10:12.381669 17389 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0818 14:12:30.718828 17389 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0818 14:13:52.232770 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39444\nI0818 14:13:52.233279 17389 solver.cpp:404]     Test net output #1: loss = 4.35977 (* 1 = 4.35977 loss)\nI0818 14:13:53.544338 17389 solver.cpp:228] Iteration 35900, loss = 0.239454\nI0818 14:13:53.544396 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 14:13:53.544414 17389 solver.cpp:244]     Train net output #1: loss = 0.239454 (* 1 = 0.239454 loss)\nI0818 14:13:53.635612 17389 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0818 14:16:11.960887 17389 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0818 14:17:33.486533 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44292\nI0818 14:17:33.486860 17389 solver.cpp:404]     Test net output #1: loss = 3.60495 (* 1 = 3.60495 loss)\nI0818 14:17:34.798566 17389 solver.cpp:228] Iteration 36000, loss = 0.143459\nI0818 14:17:34.798619 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 14:17:34.798637 17389 solver.cpp:244]     Train net output #1: loss = 0.143459 (* 1 = 0.143459 loss)\nI0818 14:17:34.888305 17389 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0818 14:19:53.242707 17389 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0818 14:21:14.763846 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39596\nI0818 14:21:14.764165 17389 solver.cpp:404]     Test net output #1: loss = 4.27205 (* 1 = 4.27205 loss)\nI0818 14:21:16.075335 17389 solver.cpp:228] Iteration 36100, loss = 0.321946\nI0818 14:21:16.075390 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 14:21:16.075407 17389 solver.cpp:244]     Train net output #1: loss = 0.321946 (* 1 = 0.321946 loss)\nI0818 14:21:16.163024 17389 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0818 14:23:34.581848 17389 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0818 14:24:56.111619 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44556\nI0818 14:24:56.111917 17389 solver.cpp:404]     Test net output #1: loss = 3.60023 (* 1 = 3.60023 loss)\nI0818 14:24:57.422520 17389 solver.cpp:228] Iteration 36200, loss = 0.23073\nI0818 14:24:57.422575 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 14:24:57.422592 17389 solver.cpp:244]     Train net output #1: loss = 0.230729 (* 1 = 0.230729 loss)\nI0818 14:24:57.513943 17389 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0818 14:27:15.910501 17389 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0818 14:28:37.443302 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45412\nI0818 14:28:37.443645 17389 solver.cpp:404]     Test net output #1: loss = 3.74125 (* 1 = 3.74125 loss)\nI0818 14:28:38.755128 17389 solver.cpp:228] Iteration 36300, loss = 0.232484\nI0818 14:28:38.755184 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 14:28:38.755201 17389 solver.cpp:244]     Train net output #1: loss = 0.232483 (* 1 = 0.232483 loss)\nI0818 14:28:38.845959 17389 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0818 14:30:57.359827 17389 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0818 14:32:18.888301 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42648\nI0818 14:32:18.888641 17389 solver.cpp:404]     Test net output #1: loss = 4.04529 (* 1 = 4.04529 loss)\nI0818 14:32:20.199007 17389 solver.cpp:228] Iteration 36400, loss = 0.20755\nI0818 14:32:20.199064 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 14:32:20.199080 17389 solver.cpp:244]     Train net output #1: loss = 0.20755 (* 1 = 0.20755 loss)\nI0818 14:32:20.287199 17389 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0818 14:34:38.663445 17389 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0818 14:39:25.994343 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43956\nI0818 14:39:25.995591 17389 solver.cpp:404]     Test net output #1: loss = 3.71477 (* 1 = 3.71477 loss)\nI0818 14:39:27.315129 17389 solver.cpp:228] Iteration 36500, loss = 0.207764\nI0818 14:39:27.315209 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 14:39:27.315227 17389 solver.cpp:244]     Train net output #1: loss = 0.207764 (* 1 = 0.207764 loss)\nI0818 14:39:27.389843 17389 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0818 14:41:45.799643 17389 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0818 14:43:07.334524 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44268\nI0818 14:43:07.334849 17389 solver.cpp:404]     Test net output #1: loss = 3.52742 (* 1 = 3.52742 loss)\nI0818 14:43:08.645784 17389 solver.cpp:228] Iteration 36600, loss = 0.250501\nI0818 14:43:08.645838 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 14:43:08.645853 17389 solver.cpp:244]     Train net output #1: loss = 0.2505 (* 1 = 0.2505 loss)\nI0818 14:43:08.743068 17389 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0818 14:45:27.833555 17389 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0818 14:46:49.356740 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44712\nI0818 14:46:49.357134 17389 solver.cpp:404]     Test net output #1: loss = 3.59987 (* 1 = 3.59987 loss)\nI0818 14:46:50.667188 17389 solver.cpp:228] Iteration 36700, loss = 0.116255\nI0818 14:46:50.667242 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 14:46:50.667258 17389 solver.cpp:244]     Train net output #1: loss = 0.116255 (* 1 = 0.116255 loss)\nI0818 14:46:50.757279 17389 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0818 14:49:09.174887 17389 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0818 14:50:30.688792 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42272\nI0818 14:50:30.689115 17389 solver.cpp:404]     Test net output #1: loss = 4.04447 (* 1 = 4.04447 loss)\nI0818 14:50:31.998869 17389 solver.cpp:228] Iteration 36800, loss = 0.239495\nI0818 14:50:31.998922 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 14:50:31.998939 17389 solver.cpp:244]     Train net output #1: loss = 0.239495 (* 1 = 0.239495 loss)\nI0818 14:50:32.092999 17389 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0818 14:52:50.498628 17389 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0818 14:54:12.021353 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43016\nI0818 14:54:12.021682 17389 solver.cpp:404]     Test net output #1: loss = 3.61038 (* 1 = 3.61038 loss)\nI0818 14:54:13.331162 17389 solver.cpp:228] Iteration 36900, loss = 0.212132\nI0818 14:54:13.331210 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 14:54:13.331228 17389 solver.cpp:244]     Train net output #1: loss = 0.212132 (* 1 = 0.212132 loss)\nI0818 14:54:13.420941 17389 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0818 14:56:31.882057 17389 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0818 14:57:53.411180 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44424\nI0818 14:57:53.411501 17389 solver.cpp:404]     Test net output #1: loss = 3.63374 (* 1 = 3.63374 loss)\nI0818 14:57:54.722551 17389 solver.cpp:228] Iteration 37000, loss = 0.180257\nI0818 14:57:54.722606 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 14:57:54.722623 17389 solver.cpp:244]     Train net output #1: loss = 0.180257 (* 1 = 0.180257 loss)\nI0818 14:57:54.816978 17389 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0818 15:00:13.437094 17389 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0818 15:01:34.963574 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43888\nI0818 15:01:34.963887 17389 solver.cpp:404]     Test net output #1: loss = 3.65175 (* 1 = 3.65175 loss)\nI0818 15:01:36.273334 17389 solver.cpp:228] Iteration 37100, loss = 0.127642\nI0818 15:01:36.273387 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:01:36.273403 17389 solver.cpp:244]     Train net output #1: loss = 0.127641 (* 1 = 0.127641 loss)\nI0818 15:01:36.367808 17389 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0818 15:03:55.069145 17389 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0818 15:05:16.598013 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39824\nI0818 15:05:16.598327 17389 solver.cpp:404]     Test net output #1: loss = 4.23176 (* 1 = 4.23176 loss)\nI0818 15:05:17.908587 17389 solver.cpp:228] Iteration 37200, loss = 0.213628\nI0818 15:05:17.908639 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:05:17.908656 17389 solver.cpp:244]     Train net output #1: loss = 0.213628 (* 1 = 0.213628 loss)\nI0818 15:05:18.000419 17389 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0818 15:07:36.716218 17389 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0818 15:08:58.251335 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4078\nI0818 15:08:58.251646 17389 solver.cpp:404]     Test net output #1: loss = 4.15229 (* 1 = 4.15229 loss)\nI0818 15:08:59.562726 17389 solver.cpp:228] Iteration 37300, loss = 0.161187\nI0818 15:08:59.562782 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:08:59.562798 17389 solver.cpp:244]     Train net output #1: loss = 0.161187 (* 1 = 0.161187 loss)\nI0818 15:08:59.663358 17389 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0818 15:11:18.497373 17389 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0818 15:12:40.034803 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43368\nI0818 15:12:40.035130 17389 solver.cpp:404]     Test net output #1: loss = 3.81127 (* 1 = 3.81127 loss)\nI0818 15:12:41.346350 17389 solver.cpp:228] Iteration 37400, loss = 0.313527\nI0818 15:12:41.346401 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 15:12:41.346418 17389 solver.cpp:244]     Train net output #1: loss = 0.313527 (* 1 = 0.313527 loss)\nI0818 15:12:41.442591 17389 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0818 15:15:00.148116 17389 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0818 15:16:21.688320 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42516\nI0818 15:16:21.688657 17389 solver.cpp:404]     Test net output #1: loss = 3.84652 (* 1 = 3.84652 loss)\nI0818 15:16:23.000000 17389 solver.cpp:228] Iteration 37500, loss = 0.202778\nI0818 15:16:23.000056 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:16:23.000079 17389 solver.cpp:244]     Train net output #1: loss = 0.202778 (* 1 = 0.202778 loss)\nI0818 15:16:23.091982 17389 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0818 15:18:41.795879 17389 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0818 15:20:03.321979 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4064\nI0818 15:20:03.322284 17389 solver.cpp:404]     Test net output #1: loss = 4.00379 (* 1 = 4.00379 loss)\nI0818 15:20:04.633141 17389 solver.cpp:228] Iteration 37600, loss = 0.17237\nI0818 15:20:04.633190 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:20:04.633208 17389 solver.cpp:244]     Train net output #1: loss = 0.172369 (* 1 = 0.172369 loss)\nI0818 15:20:04.721812 17389 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0818 15:22:23.415024 17389 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0818 15:23:44.939373 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39756\nI0818 15:23:44.939721 17389 solver.cpp:404]     Test net output #1: loss = 3.83469 (* 1 = 3.83469 loss)\nI0818 15:23:46.250174 17389 solver.cpp:228] Iteration 37700, loss = 0.206919\nI0818 15:23:46.250226 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 15:23:46.250243 17389 solver.cpp:244]     Train net output #1: loss = 0.206919 (* 1 = 0.206919 loss)\nI0818 15:23:46.349170 17389 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0818 15:26:05.029536 17389 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0818 15:27:26.545051 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44648\nI0818 15:27:26.545373 17389 solver.cpp:404]     Test net output #1: loss = 3.40996 (* 1 = 3.40996 loss)\nI0818 15:27:27.855834 17389 solver.cpp:228] Iteration 37800, loss = 0.138463\nI0818 15:27:27.855887 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:27:27.855906 17389 solver.cpp:244]     Train net output #1: loss = 0.138463 (* 1 = 0.138463 loss)\nI0818 15:27:27.951933 17389 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0818 15:29:46.641079 17389 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0818 15:31:08.170555 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45624\nI0818 15:31:08.170889 17389 solver.cpp:404]     Test net output #1: loss = 3.33218 (* 1 = 3.33218 loss)\nI0818 15:31:09.482240 17389 solver.cpp:228] Iteration 37900, loss = 0.208214\nI0818 15:31:09.482292 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 15:31:09.482309 17389 solver.cpp:244]     Train net output #1: loss = 0.208214 (* 1 = 0.208214 loss)\nI0818 15:31:09.578251 17389 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0818 15:33:28.277621 17389 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0818 15:34:49.815847 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43072\nI0818 15:34:49.816159 17389 solver.cpp:404]     Test net output #1: loss = 3.57772 (* 1 = 3.57772 loss)\nI0818 15:34:51.127512 17389 solver.cpp:228] Iteration 38000, loss = 0.210446\nI0818 15:34:51.127574 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 15:34:51.127599 17389 solver.cpp:244]     Train net output #1: loss = 0.210446 (* 1 = 0.210446 loss)\nI0818 15:34:51.219700 17389 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0818 15:37:09.644603 17389 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0818 15:38:31.166393 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41836\nI0818 15:38:31.166731 17389 solver.cpp:404]     Test net output #1: loss = 3.82782 (* 1 = 3.82782 loss)\nI0818 15:38:32.476814 17389 solver.cpp:228] Iteration 38100, loss = 0.17672\nI0818 15:38:32.476861 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:38:32.476878 17389 solver.cpp:244]     Train net output #1: loss = 0.17672 (* 1 = 0.17672 loss)\nI0818 15:38:32.573371 17389 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0818 15:40:50.949904 17389 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0818 15:42:12.450711 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43232\nI0818 15:42:12.451032 17389 solver.cpp:404]     Test net output #1: loss = 3.74309 (* 1 = 3.74309 loss)\nI0818 15:42:13.762290 17389 solver.cpp:228] Iteration 38200, loss = 0.145604\nI0818 15:42:13.762339 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:42:13.762356 17389 solver.cpp:244]     Train net output #1: loss = 0.145604 (* 1 = 0.145604 loss)\nI0818 15:42:13.853579 17389 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0818 15:44:32.135895 17389 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0818 15:45:53.638105 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43888\nI0818 15:45:53.638423 17389 solver.cpp:404]     Test net output #1: loss = 3.65197 (* 1 = 3.65197 loss)\nI0818 15:45:54.947930 17389 solver.cpp:228] Iteration 38300, loss = 0.239538\nI0818 15:45:54.947978 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 15:45:54.947995 17389 solver.cpp:244]     Train net output #1: loss = 0.239538 (* 1 = 0.239538 loss)\nI0818 15:45:55.036566 17389 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0818 15:48:13.378060 17389 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0818 15:49:34.912891 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42864\nI0818 15:49:34.913223 17389 solver.cpp:404]     Test net output #1: loss = 3.39293 (* 1 = 3.39293 loss)\nI0818 15:49:36.223754 17389 solver.cpp:228] Iteration 38400, loss = 0.251789\nI0818 15:49:36.223801 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 15:49:36.223819 17389 solver.cpp:244]     Train net output #1: loss = 0.251788 (* 1 = 0.251788 loss)\nI0818 15:49:36.318073 17389 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0818 15:51:54.626065 17389 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0818 15:53:16.165895 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39704\nI0818 15:53:16.166254 17389 solver.cpp:404]     Test net output #1: loss = 4.22113 (* 1 = 4.22113 loss)\nI0818 15:53:17.476081 17389 solver.cpp:228] Iteration 38500, loss = 0.142134\nI0818 15:53:17.476130 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:53:17.476146 17389 solver.cpp:244]     Train net output #1: loss = 0.142134 (* 1 = 0.142134 loss)\nI0818 15:53:17.572530 17389 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0818 15:55:35.991428 17389 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0818 15:56:57.540666 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3548\nI0818 15:56:57.540999 17389 solver.cpp:404]     Test net output #1: loss = 4.91746 (* 1 = 4.91746 loss)\nI0818 15:56:58.851131 17389 solver.cpp:228] Iteration 38600, loss = 0.176173\nI0818 15:56:58.851174 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:56:58.851191 17389 solver.cpp:244]     Train net output #1: loss = 0.176173 (* 1 = 0.176173 loss)\nI0818 15:56:58.939503 17389 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0818 15:59:17.320904 17389 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0818 16:00:38.860620 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44892\nI0818 16:00:38.860965 17389 solver.cpp:404]     Test net output #1: loss = 3.45146 (* 1 = 3.45146 loss)\nI0818 16:00:40.171408 17389 solver.cpp:228] Iteration 38700, loss = 0.234189\nI0818 16:00:40.171458 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 16:00:40.171475 17389 solver.cpp:244]     Train net output #1: loss = 0.234189 (* 1 = 0.234189 loss)\nI0818 16:00:40.263577 17389 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0818 16:02:58.587378 17389 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0818 16:04:20.101682 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42712\nI0818 16:04:20.102008 17389 solver.cpp:404]     Test net output #1: loss = 3.75342 (* 1 = 3.75342 loss)\nI0818 16:04:21.412766 17389 solver.cpp:228] Iteration 38800, loss = 0.0784305\nI0818 16:04:21.412818 17389 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:04:21.412835 17389 solver.cpp:244]     Train net output #1: loss = 0.0784304 (* 1 = 0.0784304 loss)\nI0818 16:04:21.506682 17389 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0818 16:06:39.841703 17389 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0818 16:08:01.360721 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36908\nI0818 16:08:01.361047 17389 solver.cpp:404]     Test net output #1: loss = 4.46434 (* 1 = 4.46434 loss)\nI0818 16:08:02.670639 17389 solver.cpp:228] Iteration 38900, loss = 0.118571\nI0818 16:08:02.670691 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:08:02.670708 17389 solver.cpp:244]     Train net output #1: loss = 0.118571 (* 1 = 0.118571 loss)\nI0818 16:08:02.763037 17389 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0818 16:10:21.050462 17389 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0818 16:11:42.540424 17389 solver.cpp:404]     Test net output #0: accuracy = 0.403\nI0818 16:11:42.540756 17389 solver.cpp:404]     Test net output #1: loss = 4.16802 (* 1 = 4.16802 loss)\nI0818 16:11:43.850762 17389 solver.cpp:228] Iteration 39000, loss = 0.216344\nI0818 16:11:43.850813 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 16:11:43.850831 17389 solver.cpp:244]     Train net output #1: loss = 0.216344 (* 1 = 0.216344 loss)\nI0818 16:11:43.936913 17389 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0818 16:14:02.207803 17389 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0818 16:15:23.730312 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42028\nI0818 16:15:23.730654 17389 solver.cpp:404]     Test net output #1: loss = 4.02377 (* 1 = 4.02377 loss)\nI0818 16:15:25.041750 17389 solver.cpp:228] Iteration 39100, loss = 0.160787\nI0818 16:15:25.041801 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:15:25.041817 17389 solver.cpp:244]     Train net output #1: loss = 0.160787 (* 1 = 0.160787 loss)\nI0818 16:15:25.131167 17389 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0818 16:17:43.449594 17389 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0818 16:19:04.967716 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42484\nI0818 16:19:04.968035 17389 solver.cpp:404]     Test net output #1: loss = 3.7062 (* 1 = 3.7062 loss)\nI0818 16:19:06.278942 17389 solver.cpp:228] Iteration 39200, loss = 0.172834\nI0818 16:19:06.278993 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 16:19:06.279009 17389 solver.cpp:244]     Train net output #1: loss = 0.172834 (* 1 = 0.172834 loss)\nI0818 16:19:06.370177 17389 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0818 16:21:24.800305 17389 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0818 16:22:46.303948 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4218\nI0818 16:22:46.304258 17389 solver.cpp:404]     Test net output #1: loss = 3.74597 (* 1 = 3.74597 loss)\nI0818 16:22:47.614290 17389 solver.cpp:228] Iteration 39300, loss = 0.329952\nI0818 16:22:47.614344 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 16:22:47.614362 17389 solver.cpp:244]     Train net output #1: loss = 0.329951 (* 1 = 0.329951 loss)\nI0818 16:22:47.702896 17389 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0818 16:25:06.029007 17389 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0818 16:26:27.530055 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43196\nI0818 16:26:27.530344 17389 solver.cpp:404]     Test net output #1: loss = 3.75991 (* 1 = 3.75991 loss)\nI0818 16:26:28.841756 17389 solver.cpp:228] Iteration 39400, loss = 0.21074\nI0818 16:26:28.841804 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 16:26:28.841822 17389 solver.cpp:244]     Train net output #1: loss = 0.21074 (* 1 = 0.21074 loss)\nI0818 16:26:28.927392 17389 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0818 16:28:47.244740 17389 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0818 16:30:08.733618 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43608\nI0818 16:30:08.733963 17389 solver.cpp:404]     Test net output #1: loss = 3.52428 (* 1 = 3.52428 loss)\nI0818 16:30:10.045320 17389 solver.cpp:228] Iteration 39500, loss = 0.252803\nI0818 16:30:10.045368 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 16:30:10.045385 17389 solver.cpp:244]     Train net output #1: loss = 0.252803 (* 1 = 0.252803 loss)\nI0818 16:30:10.133262 17389 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0818 16:32:28.434679 17389 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0818 16:33:49.934803 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43736\nI0818 16:33:49.935156 17389 solver.cpp:404]     Test net output #1: loss = 3.85906 (* 1 = 3.85906 loss)\nI0818 16:33:51.246522 17389 solver.cpp:228] Iteration 39600, loss = 0.176616\nI0818 16:33:51.246574 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:33:51.246592 17389 solver.cpp:244]     Train net output #1: loss = 0.176616 (* 1 = 0.176616 loss)\nI0818 16:33:51.340970 17389 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0818 16:36:09.738466 17389 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0818 16:37:31.179256 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39608\nI0818 16:37:31.179592 17389 solver.cpp:404]     Test net output #1: loss = 4.56036 (* 1 = 4.56036 loss)\nI0818 16:37:32.490814 17389 solver.cpp:228] Iteration 39700, loss = 0.168583\nI0818 16:37:32.490869 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:37:32.490885 17389 solver.cpp:244]     Train net output #1: loss = 0.168583 (* 1 = 0.168583 loss)\nI0818 16:37:32.586275 17389 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0818 16:39:50.978554 17389 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0818 16:41:12.446554 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39476\nI0818 16:41:12.446880 17389 solver.cpp:404]     Test net output #1: loss = 4.23502 (* 1 = 4.23502 loss)\nI0818 16:41:13.757941 17389 solver.cpp:228] Iteration 39800, loss = 0.272363\nI0818 16:41:13.757994 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 16:41:13.758013 17389 solver.cpp:244]     Train net output #1: loss = 0.272363 (* 1 = 0.272363 loss)\nI0818 16:41:13.850114 17389 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0818 16:43:32.148923 17389 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0818 16:44:53.619839 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45296\nI0818 16:44:53.620154 17389 solver.cpp:404]     Test net output #1: loss = 3.54552 (* 1 = 3.54552 loss)\nI0818 16:44:54.930594 17389 solver.cpp:228] Iteration 39900, loss = 0.163596\nI0818 16:44:54.930645 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:44:54.930667 17389 solver.cpp:244]     Train net output #1: loss = 0.163596 (* 1 = 0.163596 loss)\nI0818 16:44:55.019615 17389 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0818 16:47:13.294001 17389 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0818 16:48:34.751554 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40464\nI0818 16:48:34.751878 17389 solver.cpp:404]     Test net output #1: loss = 4.05211 (* 1 = 4.05211 loss)\nI0818 16:48:36.062297 17389 solver.cpp:228] Iteration 40000, loss = 0.239554\nI0818 16:48:36.062351 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:48:36.062368 17389 solver.cpp:244]     Train net output #1: loss = 0.239554 (* 1 = 0.239554 loss)\nI0818 16:48:36.155977 17389 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0818 16:50:54.474122 17389 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0818 16:52:15.926689 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43564\nI0818 16:52:15.927014 17389 solver.cpp:404]     Test net output #1: loss = 3.59932 (* 1 = 3.59932 loss)\nI0818 16:52:17.237252 17389 solver.cpp:228] Iteration 40100, loss = 0.21997\nI0818 16:52:17.237304 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:52:17.237321 17389 solver.cpp:244]     Train net output #1: loss = 0.21997 (* 1 = 0.21997 loss)\nI0818 16:52:17.327405 17389 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0818 16:54:35.615556 17389 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0818 16:55:57.054390 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4202\nI0818 16:55:57.054740 17389 solver.cpp:404]     Test net output #1: loss = 4.10218 (* 1 = 4.10218 loss)\nI0818 16:55:58.364017 17389 solver.cpp:228] Iteration 40200, loss = 0.184866\nI0818 16:55:58.364068 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:55:58.364085 17389 solver.cpp:244]     Train net output #1: loss = 0.184866 (* 1 = 0.184866 loss)\nI0818 16:55:58.456264 17389 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0818 16:58:16.832789 17389 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0818 16:59:38.293524 17389 solver.cpp:404]     Test net output #0: accuracy = 0.38244\nI0818 16:59:38.293866 17389 solver.cpp:404]     Test net output #1: loss = 4.68014 (* 1 = 4.68014 loss)\nI0818 16:59:39.603433 17389 solver.cpp:228] Iteration 40300, loss = 0.256159\nI0818 16:59:39.603487 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 16:59:39.603503 17389 solver.cpp:244]     Train net output #1: loss = 0.256159 (* 1 = 0.256159 loss)\nI0818 16:59:39.695178 17389 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0818 17:01:58.087071 17389 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0818 17:03:19.549276 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43324\nI0818 17:03:19.549610 17389 solver.cpp:404]     Test net output #1: loss = 3.86949 (* 1 = 3.86949 loss)\nI0818 17:03:20.860378 17389 solver.cpp:228] Iteration 40400, loss = 0.13561\nI0818 17:03:20.860431 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:03:20.860448 17389 solver.cpp:244]     Train net output #1: loss = 0.13561 (* 1 = 0.13561 loss)\nI0818 17:03:20.951242 17389 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0818 17:05:39.341523 17389 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0818 17:07:00.806850 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41824\nI0818 17:07:00.807179 17389 solver.cpp:404]     Test net output #1: loss = 4.19509 (* 1 = 4.19509 loss)\nI0818 17:07:02.116827 17389 solver.cpp:228] Iteration 40500, loss = 0.309796\nI0818 17:07:02.116878 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 17:07:02.116896 17389 solver.cpp:244]     Train net output #1: loss = 0.309796 (* 1 = 0.309796 loss)\nI0818 17:07:02.208078 17389 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0818 17:09:20.572932 17389 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0818 17:10:42.049253 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45676\nI0818 17:10:42.049557 17389 solver.cpp:404]     Test net output #1: loss = 3.33085 (* 1 = 3.33085 loss)\nI0818 17:10:43.358873 17389 solver.cpp:228] Iteration 40600, loss = 0.222516\nI0818 17:10:43.358927 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 17:10:43.358943 17389 solver.cpp:244]     Train net output #1: loss = 0.222516 (* 1 = 0.222516 loss)\nI0818 17:10:43.451838 17389 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0818 17:13:01.879076 17389 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0818 17:14:23.358781 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46384\nI0818 17:14:23.359097 17389 solver.cpp:404]     Test net output #1: loss = 3.47011 (* 1 = 3.47011 loss)\nI0818 17:14:24.669765 17389 solver.cpp:228] Iteration 40700, loss = 0.216539\nI0818 17:14:24.669819 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 17:14:24.669836 17389 solver.cpp:244]     Train net output #1: loss = 0.216539 (* 1 = 0.216539 loss)\nI0818 17:14:24.758052 17389 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0818 17:16:43.140882 17389 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0818 17:18:04.622339 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39916\nI0818 17:18:04.622699 17389 solver.cpp:404]     Test net output #1: loss = 4.32933 (* 1 = 4.32933 loss)\nI0818 17:18:05.933213 17389 solver.cpp:228] Iteration 40800, loss = 0.208638\nI0818 17:18:05.933267 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:18:05.933285 17389 solver.cpp:244]     Train net output #1: loss = 0.208638 (* 1 = 0.208638 loss)\nI0818 17:18:06.024158 17389 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0818 17:20:24.328229 17389 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0818 17:21:45.875844 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43028\nI0818 17:21:45.876161 17389 solver.cpp:404]     Test net output #1: loss = 3.95598 (* 1 = 3.95598 loss)\nI0818 17:21:47.186812 17389 solver.cpp:228] Iteration 40900, loss = 0.151887\nI0818 17:21:47.186866 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:21:47.186883 17389 solver.cpp:244]     Train net output #1: loss = 0.151887 (* 1 = 0.151887 loss)\nI0818 17:21:47.276728 17389 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0818 17:24:05.809973 17389 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0818 17:25:27.293030 17389 solver.cpp:404]     Test net output #0: accuracy = 0.432\nI0818 17:25:27.293344 17389 solver.cpp:404]     Test net output #1: loss = 3.78428 (* 1 = 3.78428 loss)\nI0818 17:25:28.602617 17389 solver.cpp:228] Iteration 41000, loss = 0.201651\nI0818 17:25:28.602672 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 17:25:28.602689 17389 solver.cpp:244]     Train net output #1: loss = 0.201651 (* 1 = 0.201651 loss)\nI0818 17:25:28.694576 17389 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0818 17:27:47.005542 17389 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0818 17:29:08.488245 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42996\nI0818 17:29:08.488579 17389 solver.cpp:404]     Test net output #1: loss = 3.78909 (* 1 = 3.78909 loss)\nI0818 17:29:09.799342 17389 solver.cpp:228] Iteration 41100, loss = 0.164392\nI0818 17:29:09.799397 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:29:09.799414 17389 solver.cpp:244]     Train net output #1: loss = 0.164392 (* 1 = 0.164392 loss)\nI0818 17:29:09.892606 17389 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0818 17:31:28.198797 17389 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0818 17:32:49.685555 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42276\nI0818 17:32:49.685887 17389 solver.cpp:404]     Test net output #1: loss = 3.88129 (* 1 = 3.88129 loss)\nI0818 17:32:50.997048 17389 solver.cpp:228] Iteration 41200, loss = 0.358534\nI0818 17:32:50.997102 17389 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 17:32:50.997120 17389 solver.cpp:244]     Train net output #1: loss = 0.358534 (* 1 = 0.358534 loss)\nI0818 17:32:51.083547 17389 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0818 17:35:09.566138 17389 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0818 17:36:31.082141 17389 solver.cpp:404]     Test net output #0: accuracy = 0.417\nI0818 17:36:31.082460 17389 solver.cpp:404]     Test net output #1: loss = 4.27024 (* 1 = 4.27024 loss)\nI0818 17:36:32.392670 17389 solver.cpp:228] Iteration 41300, loss = 0.212591\nI0818 17:36:32.392721 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 17:36:32.392738 17389 solver.cpp:244]     Train net output #1: loss = 0.212591 (* 1 = 0.212591 loss)\nI0818 17:36:32.479818 17389 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0818 17:38:50.832201 17389 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0818 17:40:12.377698 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42512\nI0818 17:40:12.378008 17389 solver.cpp:404]     Test net output #1: loss = 3.87923 (* 1 = 3.87923 loss)\nI0818 17:40:13.689074 17389 solver.cpp:228] Iteration 41400, loss = 0.189891\nI0818 17:40:13.689129 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:40:13.689146 17389 solver.cpp:244]     Train net output #1: loss = 0.18989 (* 1 = 0.18989 loss)\nI0818 17:40:13.774938 17389 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0818 17:42:32.162313 17389 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0818 17:43:53.534365 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41552\nI0818 17:43:53.534644 17389 solver.cpp:404]     Test net output #1: loss = 4.19059 (* 1 = 4.19059 loss)\nI0818 17:43:54.845628 17389 solver.cpp:228] Iteration 41500, loss = 0.185048\nI0818 17:43:54.845685 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 17:43:54.845702 17389 solver.cpp:244]     Train net output #1: loss = 0.185048 (* 1 = 0.185048 loss)\nI0818 17:43:54.936151 17389 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0818 17:46:13.338193 17389 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0818 17:47:34.811126 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4392\nI0818 17:47:34.811393 17389 solver.cpp:404]     Test net output #1: loss = 3.52431 (* 1 = 3.52431 loss)\nI0818 17:47:36.122647 17389 solver.cpp:228] Iteration 41600, loss = 0.18116\nI0818 17:47:36.122705 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 17:47:36.122722 17389 solver.cpp:244]     Train net output #1: loss = 0.18116 (* 1 = 0.18116 loss)\nI0818 17:47:36.210559 17389 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0818 17:49:54.603603 17389 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0818 17:51:16.084285 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44728\nI0818 17:51:16.084581 17389 solver.cpp:404]     Test net output #1: loss = 3.7108 (* 1 = 3.7108 loss)\nI0818 17:51:17.395364 17389 solver.cpp:228] Iteration 41700, loss = 0.309667\nI0818 17:51:17.395418 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 17:51:17.395434 17389 solver.cpp:244]     Train net output #1: loss = 0.309667 (* 1 = 0.309667 loss)\nI0818 17:51:17.483260 17389 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0818 17:53:35.860438 17389 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0818 17:54:57.332779 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4256\nI0818 17:54:57.333040 17389 solver.cpp:404]     Test net output #1: loss = 4.0379 (* 1 = 4.0379 loss)\nI0818 17:54:58.644192 17389 solver.cpp:228] Iteration 41800, loss = 0.226138\nI0818 17:54:58.644243 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 17:54:58.644261 17389 solver.cpp:244]     Train net output #1: loss = 0.226138 (* 1 = 0.226138 loss)\nI0818 17:54:58.731734 17389 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0818 17:57:17.142956 17389 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0818 17:58:38.342710 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42324\nI0818 17:58:38.342968 17389 solver.cpp:404]     Test net output #1: loss = 3.80814 (* 1 = 3.80814 loss)\nI0818 17:58:39.652936 17389 solver.cpp:228] Iteration 41900, loss = 0.272561\nI0818 17:58:39.652990 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 17:58:39.653008 17389 solver.cpp:244]     Train net output #1: loss = 0.272561 (* 1 = 0.272561 loss)\nI0818 17:58:39.749075 17389 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0818 18:00:58.155103 17389 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0818 18:02:19.599243 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45456\nI0818 18:02:19.599544 17389 solver.cpp:404]     Test net output #1: loss = 3.68044 (* 1 = 3.68044 loss)\nI0818 18:02:20.908998 17389 solver.cpp:228] Iteration 42000, loss = 0.174627\nI0818 18:02:20.909054 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:02:20.909070 17389 solver.cpp:244]     Train net output #1: loss = 0.174626 (* 1 = 0.174626 loss)\nI0818 18:02:21.003509 17389 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0818 18:04:39.483232 17389 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0818 18:06:01.000768 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46096\nI0818 18:06:01.001075 17389 solver.cpp:404]     Test net output #1: loss = 3.11888 (* 1 = 3.11888 loss)\nI0818 18:06:02.310636 17389 solver.cpp:228] Iteration 42100, loss = 0.199022\nI0818 18:06:02.310695 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 18:06:02.310714 17389 solver.cpp:244]     Train net output #1: loss = 0.199022 (* 1 = 0.199022 loss)\nI0818 18:06:02.405189 17389 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0818 18:08:20.899991 17389 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0818 18:09:42.376996 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44016\nI0818 18:09:42.377250 17389 solver.cpp:404]     Test net output #1: loss = 3.86214 (* 1 = 3.86214 loss)\nI0818 18:09:43.687309 17389 solver.cpp:228] Iteration 42200, loss = 0.226091\nI0818 18:09:43.687363 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:09:43.687381 17389 solver.cpp:244]     Train net output #1: loss = 0.226091 (* 1 = 0.226091 loss)\nI0818 18:09:43.780827 17389 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0818 18:12:02.154193 17389 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0818 18:13:23.641127 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43564\nI0818 18:13:23.641403 17389 solver.cpp:404]     Test net output #1: loss = 4.04486 (* 1 = 4.04486 loss)\nI0818 18:13:24.951839 17389 solver.cpp:228] Iteration 42300, loss = 0.0926837\nI0818 18:13:24.951894 17389 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:13:24.951910 17389 solver.cpp:244]     Train net output #1: loss = 0.0926835 (* 1 = 0.0926835 loss)\nI0818 18:13:25.049085 17389 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0818 18:15:43.578177 17389 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0818 18:17:04.951567 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44572\nI0818 18:17:04.951838 17389 solver.cpp:404]     Test net output #1: loss = 3.55289 (* 1 = 3.55289 loss)\nI0818 18:17:06.262343 17389 solver.cpp:228] Iteration 42400, loss = 0.15257\nI0818 18:17:06.262398 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 18:17:06.262414 17389 solver.cpp:244]     Train net output #1: loss = 0.152569 (* 1 = 0.152569 loss)\nI0818 18:17:06.355360 17389 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0818 18:19:24.854558 17389 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0818 18:20:46.274870 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46876\nI0818 18:20:46.275144 17389 solver.cpp:404]     Test net output #1: loss = 3.30299 (* 1 = 3.30299 loss)\nI0818 18:20:47.586524 17389 solver.cpp:228] Iteration 42500, loss = 0.145916\nI0818 18:20:47.586578 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:20:47.586594 17389 solver.cpp:244]     Train net output #1: loss = 0.145916 (* 1 = 0.145916 loss)\nI0818 18:20:47.672718 17389 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0818 18:23:06.017124 17389 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0818 18:24:27.323626 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43084\nI0818 18:24:27.323906 17389 solver.cpp:404]     Test net output #1: loss = 3.82626 (* 1 = 3.82626 loss)\nI0818 18:24:28.634213 17389 solver.cpp:228] Iteration 42600, loss = 0.242619\nI0818 18:24:28.634260 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:24:28.634279 17389 solver.cpp:244]     Train net output #1: loss = 0.242619 (* 1 = 0.242619 loss)\nI0818 18:24:28.722439 17389 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0818 18:26:47.062556 17389 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0818 18:28:08.403431 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45084\nI0818 18:28:08.403700 17389 solver.cpp:404]     Test net output #1: loss = 3.48149 (* 1 = 3.48149 loss)\nI0818 18:28:09.714768 17389 solver.cpp:228] Iteration 42700, loss = 0.237313\nI0818 18:28:09.714818 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:28:09.714834 17389 solver.cpp:244]     Train net output #1: loss = 0.237313 (* 1 = 0.237313 loss)\nI0818 18:28:09.803043 17389 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0818 18:30:28.121265 17389 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0818 18:31:49.357012 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43612\nI0818 18:31:49.357300 17389 solver.cpp:404]     Test net output #1: loss = 3.85544 (* 1 = 3.85544 loss)\nI0818 18:31:50.668220 17389 solver.cpp:228] Iteration 42800, loss = 0.234562\nI0818 18:31:50.668272 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 18:31:50.668288 17389 solver.cpp:244]     Train net output #1: loss = 0.234562 (* 1 = 0.234562 loss)\nI0818 18:31:50.753937 17389 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0818 18:34:09.098987 17389 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0818 18:35:30.281210 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43012\nI0818 18:35:30.281467 17389 solver.cpp:404]     Test net output #1: loss = 3.98974 (* 1 = 3.98974 loss)\nI0818 18:35:31.591716 17389 solver.cpp:228] Iteration 42900, loss = 0.163354\nI0818 18:35:31.591766 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:35:31.591784 17389 solver.cpp:244]     Train net output #1: loss = 0.163354 (* 1 = 0.163354 loss)\nI0818 18:35:31.677346 17389 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0818 18:37:49.999192 17389 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0818 18:39:11.405820 17389 solver.cpp:404]     Test net output #0: accuracy = 0.39208\nI0818 18:39:11.406077 17389 solver.cpp:404]     Test net output #1: loss = 4.24003 (* 1 = 4.24003 loss)\nI0818 18:39:12.715991 17389 solver.cpp:228] Iteration 43000, loss = 0.224253\nI0818 18:39:12.716040 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:39:12.716056 17389 solver.cpp:244]     Train net output #1: loss = 0.224253 (* 1 = 0.224253 loss)\nI0818 18:39:12.807924 17389 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0818 18:41:31.161360 17389 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0818 18:42:52.541375 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43356\nI0818 18:42:52.541674 17389 solver.cpp:404]     Test net output #1: loss = 3.66931 (* 1 = 3.66931 loss)\nI0818 18:42:53.851213 17389 solver.cpp:228] Iteration 43100, loss = 0.199401\nI0818 18:42:53.851264 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:42:53.851281 17389 solver.cpp:244]     Train net output #1: loss = 0.199401 (* 1 = 0.199401 loss)\nI0818 18:42:53.942834 17389 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0818 18:45:12.300374 17389 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0818 18:46:33.575258 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41912\nI0818 18:46:33.575485 17389 solver.cpp:404]     Test net output #1: loss = 4.40211 (* 1 = 4.40211 loss)\nI0818 18:46:34.885443 17389 solver.cpp:228] Iteration 43200, loss = 0.218792\nI0818 18:46:34.885493 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:46:34.885511 17389 solver.cpp:244]     Train net output #1: loss = 0.218792 (* 1 = 0.218792 loss)\nI0818 18:46:34.980022 17389 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0818 18:48:53.384985 17389 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0818 18:50:14.421538 17389 solver.cpp:404]     Test net output #0: accuracy = 0.47576\nI0818 18:50:14.421844 17389 solver.cpp:404]     Test net output #1: loss = 3.47081 (* 1 = 3.47081 loss)\nI0818 18:50:15.732558 17389 solver.cpp:228] Iteration 43300, loss = 0.185821\nI0818 18:50:15.732610 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:50:15.732627 17389 solver.cpp:244]     Train net output #1: loss = 0.18582 (* 1 = 0.18582 loss)\nI0818 18:50:15.821601 17389 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0818 18:52:34.191280 17389 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0818 18:53:55.292436 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4124\nI0818 18:53:55.292695 17389 solver.cpp:404]     Test net output #1: loss = 4.36218 (* 1 = 4.36218 loss)\nI0818 18:53:56.602632 17389 solver.cpp:228] Iteration 43400, loss = 0.264977\nI0818 18:53:56.602680 17389 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 18:53:56.602702 17389 solver.cpp:244]     Train net output #1: loss = 0.264977 (* 1 = 0.264977 loss)\nI0818 18:53:56.695837 17389 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0818 18:56:15.060956 17389 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0818 18:57:36.506340 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43972\nI0818 18:57:36.506583 17389 solver.cpp:404]     Test net output #1: loss = 3.98448 (* 1 = 3.98448 loss)\nI0818 18:57:37.816023 17389 solver.cpp:228] Iteration 43500, loss = 0.204338\nI0818 18:57:37.816077 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 18:57:37.816094 17389 solver.cpp:244]     Train net output #1: loss = 0.204337 (* 1 = 0.204337 loss)\nI0818 18:57:37.911679 17389 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0818 18:59:56.352826 17389 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0818 19:01:17.798161 17389 solver.cpp:404]     Test net output #0: accuracy = 0.47044\nI0818 19:01:17.798466 17389 solver.cpp:404]     Test net output #1: loss = 3.4122 (* 1 = 3.4122 loss)\nI0818 19:01:19.110654 17389 solver.cpp:228] Iteration 43600, loss = 0.148562\nI0818 19:01:19.110711 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:01:19.110735 17389 solver.cpp:244]     Train net output #1: loss = 0.148561 (* 1 = 0.148561 loss)\nI0818 19:01:19.200202 17389 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0818 19:03:37.588243 17389 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0818 19:04:58.921974 17389 solver.cpp:404]     Test net output #0: accuracy = 0.48048\nI0818 19:04:58.922230 17389 solver.cpp:404]     Test net output #1: loss = 2.95931 (* 1 = 2.95931 loss)\nI0818 19:05:00.234490 17389 solver.cpp:228] Iteration 43700, loss = 0.13269\nI0818 19:05:00.234550 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:05:00.234576 17389 solver.cpp:244]     Train net output #1: loss = 0.13269 (* 1 = 0.13269 loss)\nI0818 19:05:00.321202 17389 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0818 19:07:18.769368 17389 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0818 19:08:40.078785 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4234\nI0818 19:08:40.079058 17389 solver.cpp:404]     Test net output #1: loss = 3.92221 (* 1 = 3.92221 loss)\nI0818 19:08:41.390105 17389 solver.cpp:228] Iteration 43800, loss = 0.156246\nI0818 19:08:41.390162 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:08:41.390187 17389 solver.cpp:244]     Train net output #1: loss = 0.156245 (* 1 = 0.156245 loss)\nI0818 19:08:41.484987 17389 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0818 19:10:59.866477 17389 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0818 19:12:20.851485 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44976\nI0818 19:12:20.851786 17389 solver.cpp:404]     Test net output #1: loss = 3.65306 (* 1 = 3.65306 loss)\nI0818 19:12:22.161098 17389 solver.cpp:228] Iteration 43900, loss = 0.22325\nI0818 19:12:22.161151 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 19:12:22.161168 17389 solver.cpp:244]     Train net output #1: loss = 0.22325 (* 1 = 0.22325 loss)\nI0818 19:12:22.257784 17389 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0818 19:14:40.657268 17389 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0818 19:16:02.030478 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45468\nI0818 19:16:02.030727 17389 solver.cpp:404]     Test net output #1: loss = 3.67862 (* 1 = 3.67862 loss)\nI0818 19:16:03.340503 17389 solver.cpp:228] Iteration 44000, loss = 0.142728\nI0818 19:16:03.340556 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:16:03.340574 17389 solver.cpp:244]     Train net output #1: loss = 0.142728 (* 1 = 0.142728 loss)\nI0818 19:16:03.429222 17389 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0818 19:18:21.884866 17389 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0818 19:19:43.334594 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4732\nI0818 19:19:43.334868 17389 solver.cpp:404]     Test net output #1: loss = 3.11338 (* 1 = 3.11338 loss)\nI0818 19:19:44.645270 17389 solver.cpp:228] Iteration 44100, loss = 0.245256\nI0818 19:19:44.645323 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 19:19:44.645339 17389 solver.cpp:244]     Train net output #1: loss = 0.245256 (* 1 = 0.245256 loss)\nI0818 19:19:44.742764 17389 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0818 19:22:03.255995 17389 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0818 19:23:24.468583 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4514\nI0818 19:23:24.468875 17389 solver.cpp:404]     Test net output #1: loss = 3.62294 (* 1 = 3.62294 loss)\nI0818 19:23:25.778591 17389 solver.cpp:228] Iteration 44200, loss = 0.169513\nI0818 19:23:25.778642 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:23:25.778658 17389 solver.cpp:244]     Train net output #1: loss = 0.169512 (* 1 = 0.169512 loss)\nI0818 19:23:25.869313 17389 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0818 19:25:44.152715 17389 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0818 19:27:05.424638 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44628\nI0818 19:27:05.424882 17389 solver.cpp:404]     Test net output #1: loss = 3.55511 (* 1 = 3.55511 loss)\nI0818 19:27:06.734205 17389 solver.cpp:228] Iteration 44300, loss = 0.182206\nI0818 19:27:06.734253 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:27:06.734271 17389 solver.cpp:244]     Train net output #1: loss = 0.182205 (* 1 = 0.182205 loss)\nI0818 19:27:06.822916 17389 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0818 19:29:25.164325 17389 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0818 19:30:46.375850 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4482\nI0818 19:30:46.376116 17389 solver.cpp:404]     Test net output #1: loss = 3.62854 (* 1 = 3.62854 loss)\nI0818 19:30:47.685942 17389 solver.cpp:228] Iteration 44400, loss = 0.194818\nI0818 19:30:47.685979 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 19:30:47.685995 17389 solver.cpp:244]     Train net output #1: loss = 0.194818 (* 1 = 0.194818 loss)\nI0818 19:30:47.773221 17389 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0818 19:33:06.123157 17389 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0818 19:34:27.466598 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45388\nI0818 19:34:27.466874 17389 solver.cpp:404]     Test net output #1: loss = 3.60532 (* 1 = 3.60532 loss)\nI0818 19:34:28.777113 17389 solver.cpp:228] Iteration 44500, loss = 0.181393\nI0818 19:34:28.777161 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 19:34:28.777179 17389 solver.cpp:244]     Train net output #1: loss = 0.181393 (* 1 = 0.181393 loss)\nI0818 19:34:28.865090 17389 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0818 19:36:47.276983 17389 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0818 19:38:08.504739 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46912\nI0818 19:38:08.504989 17389 solver.cpp:404]     Test net output #1: loss = 3.07117 (* 1 = 3.07117 loss)\nI0818 19:38:09.815021 17389 solver.cpp:228] Iteration 44600, loss = 0.232902\nI0818 19:38:09.815073 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 19:38:09.815088 17389 solver.cpp:244]     Train net output #1: loss = 0.232901 (* 1 = 0.232901 loss)\nI0818 19:38:09.905515 17389 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0818 19:40:28.554298 17389 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0818 19:41:49.819128 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41732\nI0818 19:41:49.819418 17389 solver.cpp:404]     Test net output #1: loss = 3.84963 (* 1 = 3.84963 loss)\nI0818 19:41:51.129586 17389 solver.cpp:228] Iteration 44700, loss = 0.17288\nI0818 19:41:51.129638 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:41:51.129655 17389 solver.cpp:244]     Train net output #1: loss = 0.172879 (* 1 = 0.172879 loss)\nI0818 19:41:51.219681 17389 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0818 19:44:09.867441 17389 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0818 19:45:31.081141 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4352\nI0818 19:45:31.081394 17389 solver.cpp:404]     Test net output #1: loss = 3.95589 (* 1 = 3.95589 loss)\nI0818 19:45:32.391238 17389 solver.cpp:228] Iteration 44800, loss = 0.217295\nI0818 19:45:32.391289 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 19:45:32.391305 17389 solver.cpp:244]     Train net output #1: loss = 0.217294 (* 1 = 0.217294 loss)\nI0818 19:45:32.480854 17389 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0818 19:47:51.123617 17389 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0818 19:49:12.260332 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43364\nI0818 19:49:12.260599 17389 solver.cpp:404]     Test net output #1: loss = 4.12915 (* 1 = 4.12915 loss)\nI0818 19:49:13.570578 17389 solver.cpp:228] Iteration 44900, loss = 0.151311\nI0818 19:49:13.570628 17389 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:49:13.570646 17389 solver.cpp:244]     Train net output #1: loss = 0.151311 (* 1 = 0.151311 loss)\nI0818 19:49:13.663769 17389 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0818 19:51:32.250264 17389 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0818 19:52:53.437744 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44852\nI0818 19:52:53.438041 17389 solver.cpp:404]     Test net output #1: loss = 3.44516 (* 1 = 3.44516 loss)\nI0818 19:52:54.747274 17389 solver.cpp:228] Iteration 45000, loss = 0.0952325\nI0818 19:52:54.747311 17389 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:52:54.747326 17389 solver.cpp:244]     Train net output #1: loss = 0.0952323 (* 1 = 0.0952323 loss)\nI0818 19:52:54.882102 17389 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0818 19:55:13.483739 17389 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0818 19:56:34.907840 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46788\nI0818 19:56:34.908082 17389 solver.cpp:404]     Test net output #1: loss = 3.24052 (* 1 = 3.24052 loss)\nI0818 19:56:36.217658 17389 solver.cpp:228] Iteration 45100, loss = 0.16624\nI0818 19:56:36.217708 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 19:56:36.217725 17389 solver.cpp:244]     Train net output #1: loss = 0.16624 (* 1 = 0.16624 loss)\nI0818 19:56:36.315160 17389 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0818 19:58:54.923015 17389 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0818 20:00:16.382267 17389 solver.cpp:404]     Test net output #0: accuracy = 0.449\nI0818 20:00:16.382562 17389 solver.cpp:404]     Test net output #1: loss = 3.48883 (* 1 = 3.48883 loss)\nI0818 20:00:17.696509 17389 solver.cpp:228] Iteration 45200, loss = 0.328593\nI0818 20:00:17.696557 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 20:00:17.696574 17389 solver.cpp:244]     Train net output #1: loss = 0.328593 (* 1 = 0.328593 loss)\nI0818 20:00:17.782997 17389 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0818 20:02:36.360570 17389 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0818 20:03:57.795349 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44856\nI0818 20:03:57.795672 17389 solver.cpp:404]     Test net output #1: loss = 3.41341 (* 1 = 3.41341 loss)\nI0818 20:03:59.105227 17389 solver.cpp:228] Iteration 45300, loss = 0.101537\nI0818 20:03:59.105279 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:03:59.105296 17389 solver.cpp:244]     Train net output #1: loss = 0.101537 (* 1 = 0.101537 loss)\nI0818 20:03:59.196611 17389 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0818 20:06:17.822105 17389 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0818 20:07:38.809031 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46332\nI0818 20:07:38.809257 17389 solver.cpp:404]     Test net output #1: loss = 3.48968 (* 1 = 3.48968 loss)\nI0818 20:07:40.119267 17389 solver.cpp:228] Iteration 45400, loss = 0.203179\nI0818 20:07:40.119318 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:07:40.119336 17389 solver.cpp:244]     Train net output #1: loss = 0.203179 (* 1 = 0.203179 loss)\nI0818 20:07:40.215132 17389 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0818 20:09:58.834867 17389 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0818 20:11:19.938940 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4366\nI0818 20:11:19.939184 17389 solver.cpp:404]     Test net output #1: loss = 3.93023 (* 1 = 3.93023 loss)\nI0818 20:11:21.249838 17389 solver.cpp:228] Iteration 45500, loss = 0.187579\nI0818 20:11:21.249877 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 20:11:21.249892 17389 solver.cpp:244]     Train net output #1: loss = 0.187579 (* 1 = 0.187579 loss)\nI0818 20:11:21.344477 17389 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0818 20:13:39.833976 17389 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0818 20:15:00.899343 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4404\nI0818 20:15:00.899603 17389 solver.cpp:404]     Test net output #1: loss = 3.85328 (* 1 = 3.85328 loss)\nI0818 20:15:02.210285 17389 solver.cpp:228] Iteration 45600, loss = 0.135686\nI0818 20:15:02.210335 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:15:02.210352 17389 solver.cpp:244]     Train net output #1: loss = 0.135685 (* 1 = 0.135685 loss)\nI0818 20:15:02.301913 17389 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0818 20:17:20.876400 17389 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0818 20:18:41.868695 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42956\nI0818 20:18:41.868934 17389 solver.cpp:404]     Test net output #1: loss = 4.02943 (* 1 = 4.02943 loss)\nI0818 20:18:43.179249 17389 solver.cpp:228] Iteration 45700, loss = 0.238888\nI0818 20:18:43.179301 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:18:43.179318 17389 solver.cpp:244]     Train net output #1: loss = 0.238888 (* 1 = 0.238888 loss)\nI0818 20:18:43.267268 17389 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0818 20:21:01.832010 17389 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0818 20:22:22.767532 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42396\nI0818 20:22:22.767856 17389 solver.cpp:404]     Test net output #1: loss = 4.13153 (* 1 = 4.13153 loss)\nI0818 20:22:24.078955 17389 solver.cpp:228] Iteration 45800, loss = 0.149032\nI0818 20:22:24.079007 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:22:24.079032 17389 solver.cpp:244]     Train net output #1: loss = 0.149032 (* 1 = 0.149032 loss)\nI0818 20:22:24.168442 17389 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0818 20:24:42.707842 17389 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0818 20:26:04.145407 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4744\nI0818 20:26:04.145674 17389 solver.cpp:404]     Test net output #1: loss = 3.27804 (* 1 = 3.27804 loss)\nI0818 20:26:05.455818 17389 solver.cpp:228] Iteration 45900, loss = 0.180375\nI0818 20:26:05.455871 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:26:05.455888 17389 solver.cpp:244]     Train net output #1: loss = 0.180375 (* 1 = 0.180375 loss)\nI0818 20:26:05.547916 17389 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0818 20:28:24.005511 17389 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0818 20:29:45.383185 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46048\nI0818 20:29:45.383456 17389 solver.cpp:404]     Test net output #1: loss = 3.39607 (* 1 = 3.39607 loss)\nI0818 20:29:46.693361 17389 solver.cpp:228] Iteration 46000, loss = 0.275144\nI0818 20:29:46.693413 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 20:29:46.693429 17389 solver.cpp:244]     Train net output #1: loss = 0.275144 (* 1 = 0.275144 loss)\nI0818 20:29:46.786653 17389 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0818 20:32:05.259665 17389 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0818 20:33:26.343143 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45936\nI0818 20:33:26.343400 17389 solver.cpp:404]     Test net output #1: loss = 3.39551 (* 1 = 3.39551 loss)\nI0818 20:33:27.653754 17389 solver.cpp:228] Iteration 46100, loss = 0.122736\nI0818 20:33:27.653807 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:33:27.653825 17389 solver.cpp:244]     Train net output #1: loss = 0.122736 (* 1 = 0.122736 loss)\nI0818 20:33:27.744177 17389 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0818 20:35:46.262925 17389 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0818 20:37:07.659869 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44328\nI0818 20:37:07.660130 17389 solver.cpp:404]     Test net output #1: loss = 3.80341 (* 1 = 3.80341 loss)\nI0818 20:37:08.970624 17389 solver.cpp:228] Iteration 46200, loss = 0.190842\nI0818 20:37:08.970664 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:37:08.970680 17389 solver.cpp:244]     Train net output #1: loss = 0.190842 (* 1 = 0.190842 loss)\nI0818 20:37:09.067234 17389 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0818 20:39:27.585001 17389 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0818 20:40:48.689680 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44328\nI0818 20:40:48.689937 17389 solver.cpp:404]     Test net output #1: loss = 3.75675 (* 1 = 3.75675 loss)\nI0818 20:40:50.000056 17389 solver.cpp:228] Iteration 46300, loss = 0.220266\nI0818 20:40:50.000109 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:40:50.000126 17389 solver.cpp:244]     Train net output #1: loss = 0.220266 (* 1 = 0.220266 loss)\nI0818 20:40:50.090045 17389 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0818 20:43:08.684132 17389 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0818 20:44:29.820986 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4262\nI0818 20:44:29.821215 17389 solver.cpp:404]     Test net output #1: loss = 3.91752 (* 1 = 3.91752 loss)\nI0818 20:44:31.130791 17389 solver.cpp:228] Iteration 46400, loss = 0.160744\nI0818 20:44:31.130841 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:44:31.130859 17389 solver.cpp:244]     Train net output #1: loss = 0.160743 (* 1 = 0.160743 loss)\nI0818 20:44:31.220731 17389 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0818 20:46:49.742842 17389 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0818 20:48:10.966588 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46856\nI0818 20:48:10.966845 17389 solver.cpp:404]     Test net output #1: loss = 3.28518 (* 1 = 3.28518 loss)\nI0818 20:48:12.277020 17389 solver.cpp:228] Iteration 46500, loss = 0.206311\nI0818 20:48:12.277072 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 20:48:12.277089 17389 solver.cpp:244]     Train net output #1: loss = 0.206311 (* 1 = 0.206311 loss)\nI0818 20:48:12.370892 17389 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0818 20:50:30.893751 17389 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0818 20:51:52.331104 17389 solver.cpp:404]     Test net output #0: accuracy = 0.36956\nI0818 20:51:52.331382 17389 solver.cpp:404]     Test net output #1: loss = 4.79705 (* 1 = 4.79705 loss)\nI0818 20:51:53.641376 17389 solver.cpp:228] Iteration 46600, loss = 0.314493\nI0818 20:51:53.641424 17389 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 20:51:53.641441 17389 solver.cpp:244]     Train net output #1: loss = 0.314493 (* 1 = 0.314493 loss)\nI0818 20:51:53.729950 17389 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0818 20:54:12.251384 17389 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0818 20:55:33.463239 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42352\nI0818 20:55:33.463500 17389 solver.cpp:404]     Test net output #1: loss = 3.84993 (* 1 = 3.84993 loss)\nI0818 20:55:34.773308 17389 solver.cpp:228] Iteration 46700, loss = 0.192336\nI0818 20:55:34.773357 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:55:34.773375 17389 solver.cpp:244]     Train net output #1: loss = 0.192336 (* 1 = 0.192336 loss)\nI0818 20:55:34.869817 17389 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0818 20:57:53.429509 17389 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0818 20:59:14.838346 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44072\nI0818 20:59:14.838605 17389 solver.cpp:404]     Test net output #1: loss = 3.57348 (* 1 = 3.57348 loss)\nI0818 20:59:16.148952 17389 solver.cpp:228] Iteration 46800, loss = 0.177977\nI0818 20:59:16.148998 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:59:16.149015 17389 solver.cpp:244]     Train net output #1: loss = 0.177976 (* 1 = 0.177976 loss)\nI0818 20:59:16.235898 17389 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0818 21:01:34.839406 17389 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0818 21:02:56.139283 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4282\nI0818 21:02:56.139569 17389 solver.cpp:404]     Test net output #1: loss = 3.68623 (* 1 = 3.68623 loss)\nI0818 21:02:57.449455 17389 solver.cpp:228] Iteration 46900, loss = 0.143068\nI0818 21:02:57.449504 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:02:57.449520 17389 solver.cpp:244]     Train net output #1: loss = 0.143068 (* 1 = 0.143068 loss)\nI0818 21:02:57.545770 17389 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0818 21:05:16.150530 17389 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0818 21:06:37.517421 17389 solver.cpp:404]     Test net output #0: accuracy = 0.435\nI0818 21:06:37.517700 17389 solver.cpp:404]     Test net output #1: loss = 3.5982 (* 1 = 3.5982 loss)\nI0818 21:06:38.827502 17389 solver.cpp:228] Iteration 47000, loss = 0.172401\nI0818 21:06:38.827550 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:06:38.827569 17389 solver.cpp:244]     Train net output #1: loss = 0.172401 (* 1 = 0.172401 loss)\nI0818 21:06:38.926654 17389 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0818 21:08:57.529417 17389 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0818 21:10:18.983116 17389 solver.cpp:404]     Test net output #0: accuracy = 0.3866\nI0818 21:10:18.983383 17389 solver.cpp:404]     Test net output #1: loss = 4.39434 (* 1 = 4.39434 loss)\nI0818 21:10:20.293467 17389 solver.cpp:228] Iteration 47100, loss = 0.145023\nI0818 21:10:20.293516 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:10:20.293534 17389 solver.cpp:244]     Train net output #1: loss = 0.145023 (* 1 = 0.145023 loss)\nI0818 21:10:20.387398 17389 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0818 21:12:38.988909 17389 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0818 21:14:00.467290 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44816\nI0818 21:14:00.467571 17389 solver.cpp:404]     Test net output #1: loss = 3.6709 (* 1 = 3.6709 loss)\nI0818 21:14:01.777617 17389 solver.cpp:228] Iteration 47200, loss = 0.128881\nI0818 21:14:01.777665 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:14:01.777681 17389 solver.cpp:244]     Train net output #1: loss = 0.128881 (* 1 = 0.128881 loss)\nI0818 21:14:01.867172 17389 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0818 21:16:20.504868 17389 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0818 21:17:41.780905 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44632\nI0818 21:17:41.781173 17389 solver.cpp:404]     Test net output #1: loss = 3.54226 (* 1 = 3.54226 loss)\nI0818 21:17:43.091203 17389 solver.cpp:228] Iteration 47300, loss = 0.125553\nI0818 21:17:43.091241 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:17:43.091258 17389 solver.cpp:244]     Train net output #1: loss = 0.125553 (* 1 = 0.125553 loss)\nI0818 21:17:43.189280 17389 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0818 21:20:01.708467 17389 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0818 21:21:22.998896 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43588\nI0818 21:21:22.999168 17389 solver.cpp:404]     Test net output #1: loss = 3.63525 (* 1 = 3.63525 loss)\nI0818 21:21:24.308902 17389 solver.cpp:228] Iteration 47400, loss = 0.0942357\nI0818 21:21:24.308948 17389 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:21:24.308966 17389 solver.cpp:244]     Train net output #1: loss = 0.0942356 (* 1 = 0.0942356 loss)\nI0818 21:21:24.399185 17389 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0818 21:23:42.999383 17389 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0818 21:25:04.296669 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41116\nI0818 21:25:04.296952 17389 solver.cpp:404]     Test net output #1: loss = 3.98879 (* 1 = 3.98879 loss)\nI0818 21:25:05.607511 17389 solver.cpp:228] Iteration 47500, loss = 0.169469\nI0818 21:25:05.607559 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 21:25:05.607575 17389 solver.cpp:244]     Train net output #1: loss = 0.169469 (* 1 = 0.169469 loss)\nI0818 21:25:05.704071 17389 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0818 21:27:24.329182 17389 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0818 21:28:45.815968 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43796\nI0818 21:28:45.816226 17389 solver.cpp:404]     Test net output #1: loss = 3.58594 (* 1 = 3.58594 loss)\nI0818 21:28:47.126375 17389 solver.cpp:228] Iteration 47600, loss = 0.243437\nI0818 21:28:47.126422 17389 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 21:28:47.126438 17389 solver.cpp:244]     Train net output #1: loss = 0.243437 (* 1 = 0.243437 loss)\nI0818 21:28:47.218497 17389 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0818 21:31:05.818821 17389 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0818 21:32:27.293323 17389 solver.cpp:404]     Test net output #0: accuracy = 0.48908\nI0818 21:32:27.293623 17389 solver.cpp:404]     Test net output #1: loss = 3.02915 (* 1 = 3.02915 loss)\nI0818 21:32:28.603992 17389 solver.cpp:228] Iteration 47700, loss = 0.234336\nI0818 21:32:28.604040 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 21:32:28.604058 17389 solver.cpp:244]     Train net output #1: loss = 0.234336 (* 1 = 0.234336 loss)\nI0818 21:32:28.697746 17389 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0818 21:34:47.217751 17389 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0818 21:36:08.653435 17389 solver.cpp:404]     Test net output #0: accuracy = 0.43404\nI0818 21:36:08.653700 17389 solver.cpp:404]     Test net output #1: loss = 3.47432 (* 1 = 3.47432 loss)\nI0818 21:36:09.966313 17389 solver.cpp:228] Iteration 47800, loss = 0.247657\nI0818 21:36:09.966359 17389 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 21:36:09.966375 17389 solver.cpp:244]     Train net output #1: loss = 0.247657 (* 1 = 0.247657 loss)\nI0818 21:36:10.053830 17389 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0818 21:38:28.551786 17389 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0818 21:39:50.027848 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46152\nI0818 21:39:50.028134 17389 solver.cpp:404]     Test net output #1: loss = 3.57271 (* 1 = 3.57271 loss)\nI0818 21:39:51.340111 17389 solver.cpp:228] Iteration 47900, loss = 0.204871\nI0818 21:39:51.340158 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:39:51.340174 17389 solver.cpp:244]     Train net output #1: loss = 0.204871 (* 1 = 0.204871 loss)\nI0818 21:39:51.431255 17389 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0818 21:42:10.114382 17389 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0818 21:43:31.263615 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46104\nI0818 21:43:31.263890 17389 solver.cpp:404]     Test net output #1: loss = 3.46503 (* 1 = 3.46503 loss)\nI0818 21:43:32.575870 17389 solver.cpp:228] Iteration 48000, loss = 0.219194\nI0818 21:43:32.575925 17389 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 21:43:32.575942 17389 solver.cpp:244]     Train net output #1: loss = 0.219194 (* 1 = 0.219194 loss)\nI0818 21:43:32.666252 17389 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0818 21:45:51.184820 17389 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0818 21:47:12.648052 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44112\nI0818 21:47:12.648301 17389 solver.cpp:404]     Test net output #1: loss = 3.87875 (* 1 = 3.87875 loss)\nI0818 21:47:13.958240 17389 solver.cpp:228] Iteration 48100, loss = 0.0835568\nI0818 21:47:13.958293 17389 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:47:13.958312 17389 solver.cpp:244]     Train net output #1: loss = 0.0835567 (* 1 = 0.0835567 loss)\nI0818 21:47:14.051326 17389 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0818 21:49:32.562692 17389 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0818 21:50:53.938423 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44512\nI0818 21:50:53.938710 17389 solver.cpp:404]     Test net output #1: loss = 3.81053 (* 1 = 3.81053 loss)\nI0818 21:50:55.249135 17389 solver.cpp:228] Iteration 48200, loss = 0.184876\nI0818 21:50:55.249186 17389 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 21:50:55.249202 17389 solver.cpp:244]     Train net output #1: loss = 0.184876 (* 1 = 0.184876 loss)\nI0818 21:50:55.341403 17389 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0818 21:53:13.859329 17389 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0818 21:54:35.148852 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42364\nI0818 21:54:35.149101 17389 solver.cpp:404]     Test net output #1: loss = 4.15931 (* 1 = 4.15931 loss)\nI0818 21:54:36.460278 17389 solver.cpp:228] Iteration 48300, loss = 0.164602\nI0818 21:54:36.460332 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:54:36.460350 17389 solver.cpp:244]     Train net output #1: loss = 0.164601 (* 1 = 0.164601 loss)\nI0818 21:54:36.548341 17389 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0818 21:56:55.049723 17389 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0818 21:58:16.150399 17389 solver.cpp:404]     Test net output #0: accuracy = 0.47636\nI0818 21:58:16.150682 17389 solver.cpp:404]     Test net output #1: loss = 3.1725 (* 1 = 3.1725 loss)\nI0818 21:58:17.461334 17389 solver.cpp:228] Iteration 48400, loss = 0.126898\nI0818 21:58:17.461387 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:58:17.461405 17389 solver.cpp:244]     Train net output #1: loss = 0.126898 (* 1 = 0.126898 loss)\nI0818 21:58:17.552140 17389 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0818 22:00:36.036370 17389 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0818 22:01:57.238569 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44568\nI0818 22:01:57.238870 17389 solver.cpp:404]     Test net output #1: loss = 3.7789 (* 1 = 3.7789 loss)\nI0818 22:01:58.549721 17389 solver.cpp:228] Iteration 48500, loss = 0.242914\nI0818 22:01:58.549772 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:01:58.549789 17389 solver.cpp:244]     Train net output #1: loss = 0.242914 (* 1 = 0.242914 loss)\nI0818 22:01:58.639974 17389 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0818 22:04:17.139456 17389 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0818 22:05:38.551710 17389 solver.cpp:404]     Test net output #0: accuracy = 0.40284\nI0818 22:05:38.551982 17389 solver.cpp:404]     Test net output #1: loss = 4.14593 (* 1 = 4.14593 loss)\nI0818 22:05:39.863440 17389 solver.cpp:228] Iteration 48600, loss = 0.199213\nI0818 22:05:39.863479 17389 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 22:05:39.863495 17389 solver.cpp:244]     Train net output #1: loss = 0.199213 (* 1 = 0.199213 loss)\nI0818 22:05:39.951370 17389 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0818 22:07:58.454481 17389 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0818 22:09:19.841465 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45588\nI0818 22:09:19.841724 17389 solver.cpp:404]     Test net output #1: loss = 3.55611 (* 1 = 3.55611 loss)\nI0818 22:09:21.152262 17389 solver.cpp:228] Iteration 48700, loss = 0.108641\nI0818 22:09:21.152302 17389 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:09:21.152319 17389 solver.cpp:244]     Train net output #1: loss = 0.108641 (* 1 = 0.108641 loss)\nI0818 22:09:21.246532 17389 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0818 22:11:39.756381 17389 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0818 22:13:01.053997 17389 solver.cpp:404]     Test net output #0: accuracy = 0.46136\nI0818 22:13:01.054297 17389 solver.cpp:404]     Test net output #1: loss = 3.40318 (* 1 = 3.40318 loss)\nI0818 22:13:02.365224 17389 solver.cpp:228] Iteration 48800, loss = 0.153186\nI0818 22:13:02.365275 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:13:02.365293 17389 solver.cpp:244]     Train net output #1: loss = 0.153186 (* 1 = 0.153186 loss)\nI0818 22:13:02.454663 17389 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0818 22:15:21.109630 17389 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0818 22:16:42.336942 17389 solver.cpp:404]     Test net output #0: accuracy = 0.47196\nI0818 22:16:42.337219 17389 solver.cpp:404]     Test net output #1: loss = 3.14093 (* 1 = 3.14093 loss)\nI0818 22:16:43.646664 17389 solver.cpp:228] Iteration 48900, loss = 0.161785\nI0818 22:16:43.646720 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:16:43.646736 17389 solver.cpp:244]     Train net output #1: loss = 0.161785 (* 1 = 0.161785 loss)\nI0818 22:16:43.741293 17389 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0818 22:19:02.179786 17389 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0818 22:20:23.384219 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41956\nI0818 22:20:23.384500 17389 solver.cpp:404]     Test net output #1: loss = 4.13187 (* 1 = 4.13187 loss)\nI0818 22:20:24.694273 17389 solver.cpp:228] Iteration 49000, loss = 0.205019\nI0818 22:20:24.694325 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 22:20:24.694342 17389 solver.cpp:244]     Train net output #1: loss = 0.205019 (* 1 = 0.205019 loss)\nI0818 22:20:24.787426 17389 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0818 22:22:43.291167 17389 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0818 22:24:04.654132 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44584\nI0818 22:24:04.654386 17389 solver.cpp:404]     Test net output #1: loss = 3.60267 (* 1 = 3.60267 loss)\nI0818 22:24:05.963960 17389 solver.cpp:228] Iteration 49100, loss = 0.17821\nI0818 22:24:05.964009 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:24:05.964025 17389 solver.cpp:244]     Train net output #1: loss = 0.178209 (* 1 = 0.178209 loss)\nI0818 22:24:06.061465 17389 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0818 22:26:24.687850 17389 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0818 22:27:46.175359 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44664\nI0818 22:27:46.175612 17389 solver.cpp:404]     Test net output #1: loss = 3.37504 (* 1 = 3.37504 loss)\nI0818 22:27:47.485707 17389 solver.cpp:228] Iteration 49200, loss = 0.136523\nI0818 22:27:47.485746 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:27:47.485762 17389 solver.cpp:244]     Train net output #1: loss = 0.136522 (* 1 = 0.136522 loss)\nI0818 22:27:47.574669 17389 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0818 22:30:06.146844 17389 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0818 22:31:27.633718 17389 solver.cpp:404]     Test net output #0: accuracy = 0.436\nI0818 22:31:27.633991 17389 solver.cpp:404]     Test net output #1: loss = 3.6847 (* 1 = 3.6847 loss)\nI0818 22:31:28.943740 17389 solver.cpp:228] Iteration 49300, loss = 0.147915\nI0818 22:31:28.943794 17389 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:31:28.943810 17389 solver.cpp:244]     Train net output #1: loss = 0.147915 (* 1 = 0.147915 loss)\nI0818 22:31:29.039353 17389 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0818 22:33:47.510516 17389 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0818 22:35:08.976608 17389 solver.cpp:404]     Test net output #0: accuracy = 0.44792\nI0818 22:35:08.976881 17389 solver.cpp:404]     Test net output #1: loss = 3.60093 (* 1 = 3.60093 loss)\nI0818 22:35:10.286245 17389 solver.cpp:228] Iteration 49400, loss = 0.171539\nI0818 22:35:10.286298 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:35:10.286315 17389 solver.cpp:244]     Train net output #1: loss = 0.171539 (* 1 = 0.171539 loss)\nI0818 22:35:10.377055 17389 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0818 22:37:28.873740 17389 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0818 22:38:49.990130 17389 solver.cpp:404]     Test net output #0: accuracy = 0.45684\nI0818 22:38:49.990406 17389 solver.cpp:404]     Test net output #1: loss = 3.44703 (* 1 = 3.44703 loss)\nI0818 22:38:51.300309 17389 solver.cpp:228] Iteration 49500, loss = 0.168619\nI0818 22:38:51.300362 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:38:51.300379 17389 solver.cpp:244]     Train net output #1: loss = 0.168618 (* 1 = 0.168618 loss)\nI0818 22:38:51.395977 17389 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0818 22:41:09.878955 17389 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0818 22:42:31.040449 17389 solver.cpp:404]     Test net output #0: accuracy = 0.42064\nI0818 22:42:31.040760 17389 solver.cpp:404]     Test net output #1: loss = 3.90892 (* 1 = 3.90892 loss)\nI0818 22:42:32.351775 17389 solver.cpp:228] Iteration 49600, loss = 0.155736\nI0818 22:42:32.351828 17389 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:42:32.351845 17389 solver.cpp:244]     Train net output #1: loss = 0.155736 (* 1 = 0.155736 loss)\nI0818 22:42:32.444113 17389 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0818 22:44:51.111218 17389 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0818 22:46:12.593147 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4266\nI0818 22:46:12.593379 17389 solver.cpp:404]     Test net output #1: loss = 3.5447 (* 1 = 3.5447 loss)\nI0818 22:46:13.904024 17389 solver.cpp:228] Iteration 49700, loss = 0.116959\nI0818 22:46:13.904064 17389 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:46:13.904080 17389 solver.cpp:244]     Train net output #1: loss = 0.116959 (* 1 = 0.116959 loss)\nI0818 22:46:13.991168 17389 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0818 22:48:32.478034 17389 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0818 22:49:53.963050 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41636\nI0818 22:49:53.963306 17389 solver.cpp:404]     Test net output #1: loss = 3.8785 (* 1 = 3.8785 loss)\nI0818 22:49:55.274178 17389 solver.cpp:228] Iteration 49800, loss = 0.152949\nI0818 22:49:55.274232 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:49:55.274250 17389 solver.cpp:244]     Train net output #1: loss = 0.152948 (* 1 = 0.152948 loss)\nI0818 22:49:55.367063 17389 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0818 22:52:14.181957 17389 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0818 22:53:35.500208 17389 solver.cpp:404]     Test net output #0: accuracy = 0.4178\nI0818 22:53:35.500470 17389 solver.cpp:404]     Test net output #1: loss = 3.97244 (* 1 = 3.97244 loss)\nI0818 22:53:36.811453 17389 solver.cpp:228] Iteration 49900, loss = 0.228132\nI0818 22:53:36.811508 17389 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 22:53:36.811525 17389 solver.cpp:244]     Train net output #1: loss = 0.228131 (* 1 = 0.228131 loss)\nI0818 22:53:36.909113 17389 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0818 22:55:55.932070 17389 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0818 22:57:17.389734 17389 solver.cpp:404]     Test net output #0: accuracy = 0.41632\nI0818 22:57:17.389992 17389 solver.cpp:404]     Test net output #1: loss = 3.72304 (* 1 = 3.72304 loss)\nI0818 22:57:18.700954 17389 solver.cpp:228] Iteration 50000, loss = 0.171885\nI0818 22:57:18.700991 17389 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:57:18.701007 17389 solver.cpp:244]     Train net output #1: loss = 0.171885 (* 1 = 0.171885 loss)\nI0818 22:57:18.801856 17389 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0818 22:57:18.801880 17389 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0818 22:59:37.721909 17389 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0818 23:00:59.196681 17389 solver.cpp:404]     Test net output #0: accuracy = 0.53688\nI0818 23:00:59.196954 17389 solver.cpp:404]     Test net output #1: loss = 2.542 (* 1 = 2.542 loss)\nI0818 23:01:00.507853 17389 solver.cpp:228] Iteration 50100, loss = 0.0369269\nI0818 23:01:00.507894 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:01:00.507908 17389 solver.cpp:244]     Train net output #1: loss = 0.0369267 (* 1 = 0.0369267 loss)\nI0818 23:01:00.602250 17389 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0818 23:03:19.480170 17389 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0818 23:04:40.605084 17389 solver.cpp:404]     Test net output #0: accuracy = 0.55284\nI0818 23:04:40.605314 17389 solver.cpp:404]     Test net output #1: loss = 2.40631 (* 1 = 2.40631 loss)\nI0818 23:04:41.916734 17389 solver.cpp:228] Iteration 50200, loss = 0.0204081\nI0818 23:04:41.916771 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:04:41.916788 17389 solver.cpp:244]     Train net output #1: loss = 0.0204079 (* 1 = 0.0204079 loss)\nI0818 23:04:42.011107 17389 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0818 23:07:00.896375 17389 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0818 23:08:21.888514 17389 solver.cpp:404]     Test net output #0: accuracy = 0.56104\nI0818 23:08:21.888789 17389 solver.cpp:404]     Test net output #1: loss = 2.3365 (* 1 = 2.3365 loss)\nI0818 23:08:23.199290 17389 solver.cpp:228] Iteration 50300, loss = 0.0124395\nI0818 23:08:23.199342 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:08:23.199359 17389 solver.cpp:244]     Train net output #1: loss = 0.0124393 (* 1 = 0.0124393 loss)\nI0818 23:08:23.293751 17389 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0818 23:10:42.203763 17389 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0818 23:12:03.345818 17389 solver.cpp:404]     Test net output #0: accuracy = 0.56212\nI0818 23:12:03.346096 17389 solver.cpp:404]     Test net output #1: loss = 2.33373 (* 1 = 2.33373 loss)\nI0818 23:12:04.656963 17389 solver.cpp:228] Iteration 50400, loss = 0.00879799\nI0818 23:12:04.657002 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:12:04.657018 17389 solver.cpp:244]     Train net output #1: loss = 0.00879775 (* 1 = 0.00879775 loss)\nI0818 23:12:04.749207 17389 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0818 23:14:23.673996 17389 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0818 23:15:45.196661 17389 solver.cpp:404]     Test net output #0: accuracy = 0.56896\nI0818 23:15:45.196928 17389 solver.cpp:404]     Test net output #1: loss = 2.28812 (* 1 = 2.28812 loss)\nI0818 23:15:46.506729 17389 solver.cpp:228] Iteration 50500, loss = 0.00756357\nI0818 23:15:46.506783 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:15:46.506799 17389 solver.cpp:244]     Train net output #1: loss = 0.00756333 (* 1 = 0.00756333 loss)\nI0818 23:15:46.603338 17389 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0818 23:18:05.623446 17389 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0818 23:19:27.166322 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5638\nI0818 23:19:27.166589 17389 solver.cpp:404]     Test net output #1: loss = 2.31928 (* 1 = 2.31928 loss)\nI0818 23:19:28.477706 17389 solver.cpp:228] Iteration 50600, loss = 0.0073094\nI0818 23:19:28.477763 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:19:28.477789 17389 solver.cpp:244]     Train net output #1: loss = 0.00730915 (* 1 = 0.00730915 loss)\nI0818 23:19:28.568792 17389 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0818 23:21:47.474400 17389 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0818 23:23:09.000018 17389 solver.cpp:404]     Test net output #0: accuracy = 0.56848\nI0818 23:23:09.000331 17389 solver.cpp:404]     Test net output #1: loss = 2.28216 (* 1 = 2.28216 loss)\nI0818 23:23:10.311012 17389 solver.cpp:228] Iteration 50700, loss = 0.00563033\nI0818 23:23:10.311055 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:23:10.311079 17389 solver.cpp:244]     Train net output #1: loss = 0.00563009 (* 1 = 0.00563009 loss)\nI0818 23:23:10.412106 17389 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0818 23:25:29.292423 17389 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0818 23:26:50.814203 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5594\nI0818 23:26:50.814479 17389 solver.cpp:404]     Test net output #1: loss = 2.32563 (* 1 = 2.32563 loss)\nI0818 23:26:52.125233 17389 solver.cpp:228] Iteration 50800, loss = 0.00631608\nI0818 23:26:52.125288 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:26:52.125306 17389 solver.cpp:244]     Train net output #1: loss = 0.00631584 (* 1 = 0.00631584 loss)\nI0818 23:26:52.220306 17389 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0818 23:29:11.090850 17389 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0818 23:30:32.206316 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5626\nI0818 23:30:32.206595 17389 solver.cpp:404]     Test net output #1: loss = 2.30433 (* 1 = 2.30433 loss)\nI0818 23:30:33.516820 17389 solver.cpp:228] Iteration 50900, loss = 0.00572197\nI0818 23:30:33.516861 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:30:33.516877 17389 solver.cpp:244]     Train net output #1: loss = 0.00572172 (* 1 = 0.00572172 loss)\nI0818 23:30:33.614178 17389 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0818 23:32:52.521116 17389 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0818 23:34:13.751775 17389 solver.cpp:404]     Test net output #0: accuracy = 0.55908\nI0818 23:34:13.752012 17389 solver.cpp:404]     Test net output #1: loss = 2.34558 (* 1 = 2.34558 loss)\nI0818 23:34:15.062649 17389 solver.cpp:228] Iteration 51000, loss = 0.00576698\nI0818 23:34:15.062710 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:34:15.062726 17389 solver.cpp:244]     Train net output #1: loss = 0.00576673 (* 1 = 0.00576673 loss)\nI0818 23:34:15.162380 17389 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0818 23:36:34.012332 17389 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0818 23:37:55.248870 17389 solver.cpp:404]     Test net output #0: accuracy = 0.55744\nI0818 23:37:55.249195 17389 solver.cpp:404]     Test net output #1: loss = 2.32516 (* 1 = 2.32516 loss)\nI0818 23:37:56.560297 17389 solver.cpp:228] Iteration 51100, loss = 0.00520822\nI0818 23:37:56.560338 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:37:56.560353 17389 solver.cpp:244]     Train net output #1: loss = 0.00520798 (* 1 = 0.00520798 loss)\nI0818 23:37:56.652743 17389 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0818 23:40:15.498466 17389 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0818 23:41:37.012645 17389 solver.cpp:404]     Test net output #0: accuracy = 0.55352\nI0818 23:41:37.012961 17389 solver.cpp:404]     Test net output #1: loss = 2.38084 (* 1 = 2.38084 loss)\nI0818 23:41:38.322657 17389 solver.cpp:228] Iteration 51200, loss = 0.00497325\nI0818 23:41:38.322713 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:41:38.322731 17389 solver.cpp:244]     Train net output #1: loss = 0.004973 (* 1 = 0.004973 loss)\nI0818 23:41:38.416254 17389 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0818 23:43:57.304621 17389 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0818 23:45:18.804110 17389 solver.cpp:404]     Test net output #0: accuracy = 0.55444\nI0818 23:45:18.804433 17389 solver.cpp:404]     Test net output #1: loss = 2.35699 (* 1 = 2.35699 loss)\nI0818 23:45:20.114481 17389 solver.cpp:228] Iteration 51300, loss = 0.00534719\nI0818 23:45:20.114523 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:45:20.114539 17389 solver.cpp:244]     Train net output #1: loss = 0.00534695 (* 1 = 0.00534695 loss)\nI0818 23:45:20.205634 17389 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0818 23:47:39.027353 17389 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0818 23:49:00.534066 17389 solver.cpp:404]     Test net output #0: accuracy = 0.54872\nI0818 23:49:00.534420 17389 solver.cpp:404]     Test net output #1: loss = 2.40551 (* 1 = 2.40551 loss)\nI0818 23:49:01.845719 17389 solver.cpp:228] Iteration 51400, loss = 0.00389908\nI0818 23:49:01.845770 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:49:01.845788 17389 solver.cpp:244]     Train net output #1: loss = 0.00389884 (* 1 = 0.00389884 loss)\nI0818 23:49:01.936163 17389 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0818 23:51:20.777938 17389 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0818 23:52:42.285630 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5486\nI0818 23:52:42.285980 17389 solver.cpp:404]     Test net output #1: loss = 2.38756 (* 1 = 2.38756 loss)\nI0818 23:52:43.596570 17389 solver.cpp:228] Iteration 51500, loss = 0.00517084\nI0818 23:52:43.596621 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:52:43.596635 17389 solver.cpp:244]     Train net output #1: loss = 0.0051706 (* 1 = 0.0051706 loss)\nI0818 23:52:43.688555 17389 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0818 23:55:02.553380 17389 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0818 23:56:24.062180 17389 solver.cpp:404]     Test net output #0: accuracy = 0.54572\nI0818 23:56:24.062531 17389 solver.cpp:404]     Test net output #1: loss = 2.42588 (* 1 = 2.42588 loss)\nI0818 23:56:25.372006 17389 solver.cpp:228] Iteration 51600, loss = 0.0044263\nI0818 23:56:25.372056 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:56:25.372072 17389 solver.cpp:244]     Train net output #1: loss = 0.00442606 (* 1 = 0.00442606 loss)\nI0818 23:56:25.465993 17389 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0818 23:58:44.292208 17389 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 00:00:05.807112 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5464\nI0819 00:00:05.807422 17389 solver.cpp:404]     Test net output #1: loss = 2.40612 (* 1 = 2.40612 loss)\nI0819 00:00:07.117110 17389 solver.cpp:228] Iteration 51700, loss = 0.00512402\nI0819 00:00:07.117161 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:00:07.117178 17389 solver.cpp:244]     Train net output #1: loss = 0.00512378 (* 1 = 0.00512378 loss)\nI0819 00:00:07.210031 17389 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0819 00:02:26.009788 17389 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 00:03:47.521917 17389 solver.cpp:404]     Test net output #0: accuracy = 0.53948\nI0819 00:03:47.522253 17389 solver.cpp:404]     Test net output #1: loss = 2.46181 (* 1 = 2.46181 loss)\nI0819 00:03:48.832316 17389 solver.cpp:228] Iteration 51800, loss = 0.00431922\nI0819 00:03:48.832366 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:03:48.832383 17389 solver.cpp:244]     Train net output #1: loss = 0.00431897 (* 1 = 0.00431897 loss)\nI0819 00:03:48.927531 17389 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0819 00:06:07.773536 17389 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 00:07:29.295372 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5408\nI0819 00:07:29.295704 17389 solver.cpp:404]     Test net output #1: loss = 2.43517 (* 1 = 2.43517 loss)\nI0819 00:07:30.605986 17389 solver.cpp:228] Iteration 51900, loss = 0.00352658\nI0819 00:07:30.606037 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:07:30.606053 17389 solver.cpp:244]     Train net output #1: loss = 0.00352634 (* 1 = 0.00352634 loss)\nI0819 00:07:30.701951 17389 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0819 00:09:49.729609 17389 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 00:11:11.260942 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5366\nI0819 00:11:11.261260 17389 solver.cpp:404]     Test net output #1: loss = 2.48168 (* 1 = 2.48168 loss)\nI0819 00:11:12.573727 17389 solver.cpp:228] Iteration 52000, loss = 0.00459537\nI0819 00:11:12.573767 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:11:12.573782 17389 solver.cpp:244]     Train net output #1: loss = 0.00459513 (* 1 = 0.00459513 loss)\nI0819 00:11:12.663872 17389 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0819 00:13:31.646039 17389 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 00:14:53.185891 17389 solver.cpp:404]     Test net output #0: accuracy = 0.53828\nI0819 00:14:53.186220 17389 solver.cpp:404]     Test net output #1: loss = 2.45232 (* 1 = 2.45232 loss)\nI0819 00:14:54.499797 17389 solver.cpp:228] Iteration 52100, loss = 0.00453657\nI0819 00:14:54.499835 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:14:54.499851 17389 solver.cpp:244]     Train net output #1: loss = 0.00453633 (* 1 = 0.00453633 loss)\nI0819 00:14:54.586804 17389 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0819 00:17:13.391759 17389 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 00:18:34.926373 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5326\nI0819 00:18:34.926718 17389 solver.cpp:404]     Test net output #1: loss = 2.49476 (* 1 = 2.49476 loss)\nI0819 00:18:36.240527 17389 solver.cpp:228] Iteration 52200, loss = 0.00403246\nI0819 00:18:36.240566 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:18:36.240581 17389 solver.cpp:244]     Train net output #1: loss = 0.00403222 (* 1 = 0.00403222 loss)\nI0819 00:18:36.327811 17389 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0819 00:20:55.113909 17389 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 00:22:16.656906 17389 solver.cpp:404]     Test net output #0: accuracy = 0.53404\nI0819 00:22:16.657248 17389 solver.cpp:404]     Test net output #1: loss = 2.46606 (* 1 = 2.46606 loss)\nI0819 00:22:17.969727 17389 solver.cpp:228] Iteration 52300, loss = 0.00468611\nI0819 00:22:17.969777 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:22:17.969794 17389 solver.cpp:244]     Train net output #1: loss = 0.00468587 (* 1 = 0.00468587 loss)\nI0819 00:22:18.061004 17389 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0819 00:24:36.929983 17389 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 00:25:58.470044 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5308\nI0819 00:25:58.470373 17389 solver.cpp:404]     Test net output #1: loss = 2.51994 (* 1 = 2.51994 loss)\nI0819 00:25:59.782968 17389 solver.cpp:228] Iteration 52400, loss = 0.00344326\nI0819 00:25:59.783020 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:25:59.783036 17389 solver.cpp:244]     Train net output #1: loss = 0.00344301 (* 1 = 0.00344301 loss)\nI0819 00:25:59.880112 17389 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0819 00:28:18.787145 17389 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 00:29:40.315176 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5308\nI0819 00:29:40.315502 17389 solver.cpp:404]     Test net output #1: loss = 2.48904 (* 1 = 2.48904 loss)\nI0819 00:29:41.628309 17389 solver.cpp:228] Iteration 52500, loss = 0.00474488\nI0819 00:29:41.628362 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:29:41.628379 17389 solver.cpp:244]     Train net output #1: loss = 0.00474463 (* 1 = 0.00474463 loss)\nI0819 00:29:41.717931 17389 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0819 00:32:00.575192 17389 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 00:33:22.062582 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52752\nI0819 00:33:22.062943 17389 solver.cpp:404]     Test net output #1: loss = 2.52716 (* 1 = 2.52716 loss)\nI0819 00:33:23.375135 17389 solver.cpp:228] Iteration 52600, loss = 0.00425947\nI0819 00:33:23.375175 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:33:23.375190 17389 solver.cpp:244]     Train net output #1: loss = 0.00425922 (* 1 = 0.00425922 loss)\nI0819 00:33:23.467252 17389 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0819 00:35:42.359645 17389 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 00:37:03.837033 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52928\nI0819 00:37:03.837303 17389 solver.cpp:404]     Test net output #1: loss = 2.48746 (* 1 = 2.48746 loss)\nI0819 00:37:05.151311 17389 solver.cpp:228] Iteration 52700, loss = 0.00349315\nI0819 00:37:05.151350 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:37:05.151365 17389 solver.cpp:244]     Train net output #1: loss = 0.0034929 (* 1 = 0.0034929 loss)\nI0819 00:37:05.240708 17389 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0819 00:39:24.124733 17389 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 00:40:45.637853 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52536\nI0819 00:40:45.638159 17389 solver.cpp:404]     Test net output #1: loss = 2.53088 (* 1 = 2.53088 loss)\nI0819 00:40:46.950713 17389 solver.cpp:228] Iteration 52800, loss = 0.00405246\nI0819 00:40:46.950752 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:40:46.950769 17389 solver.cpp:244]     Train net output #1: loss = 0.00405222 (* 1 = 0.00405222 loss)\nI0819 00:40:47.040063 17389 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0819 00:43:05.752357 17389 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 00:44:26.535223 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52792\nI0819 00:44:26.535514 17389 solver.cpp:404]     Test net output #1: loss = 2.49039 (* 1 = 2.49039 loss)\nI0819 00:44:27.848603 17389 solver.cpp:228] Iteration 52900, loss = 0.00446387\nI0819 00:44:27.848659 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:44:27.848677 17389 solver.cpp:244]     Train net output #1: loss = 0.00446362 (* 1 = 0.00446362 loss)\nI0819 00:44:27.938349 17389 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0819 00:46:46.743595 17389 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 00:48:07.628978 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5238\nI0819 00:48:07.629277 17389 solver.cpp:404]     Test net output #1: loss = 2.5309 (* 1 = 2.5309 loss)\nI0819 00:48:08.943336 17389 solver.cpp:228] Iteration 53000, loss = 0.00443859\nI0819 00:48:08.943389 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:48:08.943408 17389 solver.cpp:244]     Train net output #1: loss = 0.00443835 (* 1 = 0.00443835 loss)\nI0819 00:48:09.032294 17389 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0819 00:50:27.532975 17389 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 00:51:48.421051 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52576\nI0819 00:51:48.421329 17389 solver.cpp:404]     Test net output #1: loss = 2.49819 (* 1 = 2.49819 loss)\nI0819 00:51:49.734983 17389 solver.cpp:228] Iteration 53100, loss = 0.00365679\nI0819 00:51:49.735033 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:51:49.735049 17389 solver.cpp:244]     Train net output #1: loss = 0.00365655 (* 1 = 0.00365655 loss)\nI0819 00:51:49.826967 17389 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0819 00:54:08.357203 17389 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 00:55:29.860282 17389 solver.cpp:404]     Test net output #0: accuracy = 0.524\nI0819 00:55:29.860559 17389 solver.cpp:404]     Test net output #1: loss = 2.52968 (* 1 = 2.52968 loss)\nI0819 00:55:31.172976 17389 solver.cpp:228] Iteration 53200, loss = 0.0034703\nI0819 00:55:31.173024 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:55:31.173040 17389 solver.cpp:244]     Train net output #1: loss = 0.00347006 (* 1 = 0.00347006 loss)\nI0819 00:55:31.265174 17389 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0819 00:57:49.789022 17389 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 00:59:11.273545 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52548\nI0819 00:59:11.273810 17389 solver.cpp:404]     Test net output #1: loss = 2.49224 (* 1 = 2.49224 loss)\nI0819 00:59:12.587543 17389 solver.cpp:228] Iteration 53300, loss = 0.00378351\nI0819 00:59:12.587589 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:59:12.587606 17389 solver.cpp:244]     Train net output #1: loss = 0.00378326 (* 1 = 0.00378326 loss)\nI0819 00:59:12.679812 17389 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0819 01:01:31.294235 17389 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 01:02:52.723199 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52296\nI0819 01:02:52.723517 17389 solver.cpp:404]     Test net output #1: loss = 2.53741 (* 1 = 2.53741 loss)\nI0819 01:02:54.036770 17389 solver.cpp:228] Iteration 53400, loss = 0.00418801\nI0819 01:02:54.036824 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:02:54.036841 17389 solver.cpp:244]     Train net output #1: loss = 0.00418776 (* 1 = 0.00418776 loss)\nI0819 01:02:54.126235 17389 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0819 01:05:12.662236 17389 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 01:06:33.844040 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5252\nI0819 01:06:33.844285 17389 solver.cpp:404]     Test net output #1: loss = 2.5008 (* 1 = 2.5008 loss)\nI0819 01:06:35.157539 17389 solver.cpp:228] Iteration 53500, loss = 0.00412187\nI0819 01:06:35.157588 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:06:35.157603 17389 solver.cpp:244]     Train net output #1: loss = 0.00412163 (* 1 = 0.00412163 loss)\nI0819 01:06:35.244122 17389 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0819 01:08:53.725528 17389 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 01:10:14.844097 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52184\nI0819 01:10:14.844400 17389 solver.cpp:404]     Test net output #1: loss = 2.53528 (* 1 = 2.53528 loss)\nI0819 01:10:16.157799 17389 solver.cpp:228] Iteration 53600, loss = 0.00411075\nI0819 01:10:16.157848 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:10:16.157866 17389 solver.cpp:244]     Train net output #1: loss = 0.0041105 (* 1 = 0.0041105 loss)\nI0819 01:10:16.249279 17389 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0819 01:12:34.772128 17389 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 01:13:55.996940 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52468\nI0819 01:13:55.997210 17389 solver.cpp:404]     Test net output #1: loss = 2.4902 (* 1 = 2.4902 loss)\nI0819 01:13:57.310441 17389 solver.cpp:228] Iteration 53700, loss = 0.00404443\nI0819 01:13:57.310492 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:13:57.310508 17389 solver.cpp:244]     Train net output #1: loss = 0.00404419 (* 1 = 0.00404419 loss)\nI0819 01:13:57.399121 17389 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0819 01:16:16.056571 17389 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 01:17:37.364300 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52132\nI0819 01:17:37.364547 17389 solver.cpp:404]     Test net output #1: loss = 2.52937 (* 1 = 2.52937 loss)\nI0819 01:17:38.676105 17389 solver.cpp:228] Iteration 53800, loss = 0.00329767\nI0819 01:17:38.676156 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:17:38.676174 17389 solver.cpp:244]     Train net output #1: loss = 0.00329743 (* 1 = 0.00329743 loss)\nI0819 01:17:38.771311 17389 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0819 01:19:57.327838 17389 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 01:21:18.323192 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52324\nI0819 01:21:18.323485 17389 solver.cpp:404]     Test net output #1: loss = 2.49331 (* 1 = 2.49331 loss)\nI0819 01:21:19.636106 17389 solver.cpp:228] Iteration 53900, loss = 0.00349515\nI0819 01:21:19.636162 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:21:19.636178 17389 solver.cpp:244]     Train net output #1: loss = 0.0034949 (* 1 = 0.0034949 loss)\nI0819 01:21:19.727169 17389 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0819 01:23:38.228705 17389 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 01:24:59.576759 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52016\nI0819 01:24:59.577039 17389 solver.cpp:404]     Test net output #1: loss = 2.53069 (* 1 = 2.53069 loss)\nI0819 01:25:00.889874 17389 solver.cpp:228] Iteration 54000, loss = 0.00428121\nI0819 01:25:00.889928 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:25:00.889945 17389 solver.cpp:244]     Train net output #1: loss = 0.00428096 (* 1 = 0.00428096 loss)\nI0819 01:25:00.982019 17389 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0819 01:27:19.457360 17389 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 01:28:40.637296 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52252\nI0819 01:28:40.637542 17389 solver.cpp:404]     Test net output #1: loss = 2.49224 (* 1 = 2.49224 loss)\nI0819 01:28:41.950439 17389 solver.cpp:228] Iteration 54100, loss = 0.00319546\nI0819 01:28:41.950494 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:28:41.950510 17389 solver.cpp:244]     Train net output #1: loss = 0.00319521 (* 1 = 0.00319521 loss)\nI0819 01:28:42.043442 17389 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0819 01:31:00.580158 17389 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 01:32:21.902391 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5208\nI0819 01:32:21.902710 17389 solver.cpp:404]     Test net output #1: loss = 2.52864 (* 1 = 2.52864 loss)\nI0819 01:32:23.216105 17389 solver.cpp:228] Iteration 54200, loss = 0.00370354\nI0819 01:32:23.216161 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:32:23.216177 17389 solver.cpp:244]     Train net output #1: loss = 0.00370329 (* 1 = 0.00370329 loss)\nI0819 01:32:23.307778 17389 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0819 01:34:41.828423 17389 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 01:36:03.290869 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52208\nI0819 01:36:03.291147 17389 solver.cpp:404]     Test net output #1: loss = 2.48861 (* 1 = 2.48861 loss)\nI0819 01:36:04.604180 17389 solver.cpp:228] Iteration 54300, loss = 0.00319605\nI0819 01:36:04.604230 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:36:04.604248 17389 solver.cpp:244]     Train net output #1: loss = 0.00319581 (* 1 = 0.00319581 loss)\nI0819 01:36:04.689913 17389 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0819 01:38:23.272058 17389 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 01:39:44.732064 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52168\nI0819 01:39:44.732319 17389 solver.cpp:404]     Test net output #1: loss = 2.51499 (* 1 = 2.51499 loss)\nI0819 01:39:46.044337 17389 solver.cpp:228] Iteration 54400, loss = 0.00408224\nI0819 01:39:46.044385 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:39:46.044402 17389 solver.cpp:244]     Train net output #1: loss = 0.004082 (* 1 = 0.004082 loss)\nI0819 01:39:46.132540 17389 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0819 01:42:04.736599 17389 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 01:43:26.185905 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52308\nI0819 01:43:26.186166 17389 solver.cpp:404]     Test net output #1: loss = 2.47732 (* 1 = 2.47732 loss)\nI0819 01:43:27.499326 17389 solver.cpp:228] Iteration 54500, loss = 0.00349002\nI0819 01:43:27.499375 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:43:27.499392 17389 solver.cpp:244]     Train net output #1: loss = 0.00348978 (* 1 = 0.00348978 loss)\nI0819 01:43:27.588641 17389 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0819 01:45:46.094696 17389 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 01:47:07.558616 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51996\nI0819 01:47:07.558897 17389 solver.cpp:404]     Test net output #1: loss = 2.51313 (* 1 = 2.51313 loss)\nI0819 01:47:08.872454 17389 solver.cpp:228] Iteration 54600, loss = 0.00396578\nI0819 01:47:08.872503 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:47:08.872520 17389 solver.cpp:244]     Train net output #1: loss = 0.00396554 (* 1 = 0.00396554 loss)\nI0819 01:47:08.959442 17389 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0819 01:49:27.599869 17389 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 01:50:48.846734 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52104\nI0819 01:50:48.846979 17389 solver.cpp:404]     Test net output #1: loss = 2.48292 (* 1 = 2.48292 loss)\nI0819 01:50:50.160567 17389 solver.cpp:228] Iteration 54700, loss = 0.00420308\nI0819 01:50:50.160616 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:50:50.160632 17389 solver.cpp:244]     Train net output #1: loss = 0.00420283 (* 1 = 0.00420283 loss)\nI0819 01:50:50.254014 17389 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0819 01:53:08.918470 17389 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 01:54:30.407196 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51896\nI0819 01:54:30.407483 17389 solver.cpp:404]     Test net output #1: loss = 2.51248 (* 1 = 2.51248 loss)\nI0819 01:54:31.721557 17389 solver.cpp:228] Iteration 54800, loss = 0.00388728\nI0819 01:54:31.721609 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:54:31.721633 17389 solver.cpp:244]     Train net output #1: loss = 0.00388703 (* 1 = 0.00388703 loss)\nI0819 01:54:31.806656 17389 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0819 01:56:50.416853 17389 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0819 01:58:11.895844 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52196\nI0819 01:58:11.896189 17389 solver.cpp:404]     Test net output #1: loss = 2.47247 (* 1 = 2.47247 loss)\nI0819 01:58:13.210170 17389 solver.cpp:228] Iteration 54900, loss = 0.00408407\nI0819 01:58:13.210223 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:58:13.210247 17389 solver.cpp:244]     Train net output #1: loss = 0.00408382 (* 1 = 0.00408382 loss)\nI0819 01:58:13.299739 17389 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0819 02:00:31.886348 17389 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0819 02:01:53.032991 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52036\nI0819 02:01:53.033315 17389 solver.cpp:404]     Test net output #1: loss = 2.50476 (* 1 = 2.50476 loss)\nI0819 02:01:54.347211 17389 solver.cpp:228] Iteration 55000, loss = 0.00372896\nI0819 02:01:54.347263 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:01:54.347288 17389 solver.cpp:244]     Train net output #1: loss = 0.00372871 (* 1 = 0.00372871 loss)\nI0819 02:01:54.436044 17389 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0819 02:04:13.025043 17389 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0819 02:05:34.401763 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52104\nI0819 02:05:34.402122 17389 solver.cpp:404]     Test net output #1: loss = 2.47234 (* 1 = 2.47234 loss)\nI0819 02:05:35.715615 17389 solver.cpp:228] Iteration 55100, loss = 0.00372205\nI0819 02:05:35.715670 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:05:35.715694 17389 solver.cpp:244]     Train net output #1: loss = 0.00372181 (* 1 = 0.00372181 loss)\nI0819 02:05:35.806511 17389 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0819 02:07:54.297554 17389 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0819 02:09:15.750452 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51824\nI0819 02:09:15.750717 17389 solver.cpp:404]     Test net output #1: loss = 2.50452 (* 1 = 2.50452 loss)\nI0819 02:09:17.064596 17389 solver.cpp:228] Iteration 55200, loss = 0.00387098\nI0819 02:09:17.064649 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:09:17.064673 17389 solver.cpp:244]     Train net output #1: loss = 0.00387074 (* 1 = 0.00387074 loss)\nI0819 02:09:17.149240 17389 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0819 02:11:35.716372 17389 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0819 02:12:57.215754 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52136\nI0819 02:12:57.216058 17389 solver.cpp:404]     Test net output #1: loss = 2.46587 (* 1 = 2.46587 loss)\nI0819 02:12:58.528833 17389 solver.cpp:228] Iteration 55300, loss = 0.00392263\nI0819 02:12:58.528888 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:12:58.528914 17389 solver.cpp:244]     Train net output #1: loss = 0.00392239 (* 1 = 0.00392239 loss)\nI0819 02:12:58.673445 17389 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0819 02:15:17.286062 17389 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0819 02:16:38.563544 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51904\nI0819 02:16:38.563793 17389 solver.cpp:404]     Test net output #1: loss = 2.49746 (* 1 = 2.49746 loss)\nI0819 02:16:39.877355 17389 solver.cpp:228] Iteration 55400, loss = 0.00465135\nI0819 02:16:39.877406 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:16:39.877430 17389 solver.cpp:244]     Train net output #1: loss = 0.0046511 (* 1 = 0.0046511 loss)\nI0819 02:16:39.964582 17389 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0819 02:18:58.535173 17389 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0819 02:20:19.762836 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52044\nI0819 02:20:19.763136 17389 solver.cpp:404]     Test net output #1: loss = 2.46502 (* 1 = 2.46502 loss)\nI0819 02:20:21.077210 17389 solver.cpp:228] Iteration 55500, loss = 0.00385632\nI0819 02:20:21.077265 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:20:21.077287 17389 solver.cpp:244]     Train net output #1: loss = 0.00385608 (* 1 = 0.00385608 loss)\nI0819 02:20:21.167382 17389 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0819 02:22:39.739431 17389 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0819 02:24:01.191751 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51936\nI0819 02:24:01.192083 17389 solver.cpp:404]     Test net output #1: loss = 2.48833 (* 1 = 2.48833 loss)\nI0819 02:24:02.503262 17389 solver.cpp:228] Iteration 55600, loss = 0.00358282\nI0819 02:24:02.503312 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:24:02.503336 17389 solver.cpp:244]     Train net output #1: loss = 0.00358257 (* 1 = 0.00358257 loss)\nI0819 02:24:02.593858 17389 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0819 02:26:21.076942 17389 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0819 02:27:42.579962 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5218\nI0819 02:27:42.580308 17389 solver.cpp:404]     Test net output #1: loss = 2.45223 (* 1 = 2.45223 loss)\nI0819 02:27:43.890411 17389 solver.cpp:228] Iteration 55700, loss = 0.00366393\nI0819 02:27:43.890466 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:27:43.890492 17389 solver.cpp:244]     Train net output #1: loss = 0.00366369 (* 1 = 0.00366369 loss)\nI0819 02:27:43.984589 17389 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0819 02:30:02.570786 17389 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0819 02:31:24.105866 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51984\nI0819 02:31:24.106207 17389 solver.cpp:404]     Test net output #1: loss = 2.48405 (* 1 = 2.48405 loss)\nI0819 02:31:25.417204 17389 solver.cpp:228] Iteration 55800, loss = 0.00349465\nI0819 02:31:25.417255 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:31:25.417279 17389 solver.cpp:244]     Train net output #1: loss = 0.00349441 (* 1 = 0.00349441 loss)\nI0819 02:31:25.506832 17389 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0819 02:33:44.107116 17389 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0819 02:35:05.728132 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5212\nI0819 02:35:05.728492 17389 solver.cpp:404]     Test net output #1: loss = 2.45346 (* 1 = 2.45346 loss)\nI0819 02:35:07.039468 17389 solver.cpp:228] Iteration 55900, loss = 0.00442038\nI0819 02:35:07.039516 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:35:07.039541 17389 solver.cpp:244]     Train net output #1: loss = 0.00442013 (* 1 = 0.00442013 loss)\nI0819 02:35:07.136193 17389 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0819 02:37:25.684847 17389 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0819 02:38:47.352576 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51916\nI0819 02:38:47.352916 17389 solver.cpp:404]     Test net output #1: loss = 2.48175 (* 1 = 2.48175 loss)\nI0819 02:38:48.664160 17389 solver.cpp:228] Iteration 56000, loss = 0.00435637\nI0819 02:38:48.664213 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:38:48.664237 17389 solver.cpp:244]     Train net output #1: loss = 0.00435612 (* 1 = 0.00435612 loss)\nI0819 02:38:48.755893 17389 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0819 02:41:07.297876 17389 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0819 02:42:28.873028 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52224\nI0819 02:42:28.873375 17389 solver.cpp:404]     Test net output #1: loss = 2.44331 (* 1 = 2.44331 loss)\nI0819 02:42:30.183254 17389 solver.cpp:228] Iteration 56100, loss = 0.00396904\nI0819 02:42:30.183305 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:42:30.183321 17389 solver.cpp:244]     Train net output #1: loss = 0.00396879 (* 1 = 0.00396879 loss)\nI0819 02:42:30.279801 17389 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0819 02:44:48.902751 17389 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0819 02:46:10.410670 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51836\nI0819 02:46:10.411031 17389 solver.cpp:404]     Test net output #1: loss = 2.47484 (* 1 = 2.47484 loss)\nI0819 02:46:11.721168 17389 solver.cpp:228] Iteration 56200, loss = 0.00410138\nI0819 02:46:11.721220 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:46:11.721237 17389 solver.cpp:244]     Train net output #1: loss = 0.00410114 (* 1 = 0.00410114 loss)\nI0819 02:46:11.810302 17389 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0819 02:48:30.437975 17389 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0819 02:49:51.948518 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52184\nI0819 02:49:51.948861 17389 solver.cpp:404]     Test net output #1: loss = 2.43913 (* 1 = 2.43913 loss)\nI0819 02:49:53.258720 17389 solver.cpp:228] Iteration 56300, loss = 0.0045068\nI0819 02:49:53.258769 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:49:53.258785 17389 solver.cpp:244]     Train net output #1: loss = 0.00450655 (* 1 = 0.00450655 loss)\nI0819 02:49:53.349169 17389 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0819 02:52:11.919596 17389 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0819 02:53:33.447790 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5192\nI0819 02:53:33.448127 17389 solver.cpp:404]     Test net output #1: loss = 2.46349 (* 1 = 2.46349 loss)\nI0819 02:53:34.759147 17389 solver.cpp:228] Iteration 56400, loss = 0.00395377\nI0819 02:53:34.759199 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:53:34.759215 17389 solver.cpp:244]     Train net output #1: loss = 0.00395353 (* 1 = 0.00395353 loss)\nI0819 02:53:34.853560 17389 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0819 02:55:53.455107 17389 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0819 02:57:14.982122 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51956\nI0819 02:57:14.982460 17389 solver.cpp:404]     Test net output #1: loss = 2.43743 (* 1 = 2.43743 loss)\nI0819 02:57:16.293282 17389 solver.cpp:228] Iteration 56500, loss = 0.00374627\nI0819 02:57:16.293331 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:57:16.293347 17389 solver.cpp:244]     Train net output #1: loss = 0.00374603 (* 1 = 0.00374603 loss)\nI0819 02:57:16.383357 17389 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0819 02:59:35.016166 17389 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0819 03:00:56.540300 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51732\nI0819 03:00:56.540654 17389 solver.cpp:404]     Test net output #1: loss = 2.46613 (* 1 = 2.46613 loss)\nI0819 03:00:57.851294 17389 solver.cpp:228] Iteration 56600, loss = 0.00395394\nI0819 03:00:57.851341 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:00:57.851357 17389 solver.cpp:244]     Train net output #1: loss = 0.0039537 (* 1 = 0.0039537 loss)\nI0819 03:00:57.939975 17389 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0819 03:03:16.546298 17389 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0819 03:04:38.064652 17389 solver.cpp:404]     Test net output #0: accuracy = 0.521\nI0819 03:04:38.064990 17389 solver.cpp:404]     Test net output #1: loss = 2.43353 (* 1 = 2.43353 loss)\nI0819 03:04:39.374893 17389 solver.cpp:228] Iteration 56700, loss = 0.00443573\nI0819 03:04:39.374943 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:04:39.374959 17389 solver.cpp:244]     Train net output #1: loss = 0.00443548 (* 1 = 0.00443548 loss)\nI0819 03:04:39.469904 17389 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0819 03:06:58.076200 17389 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0819 03:08:19.599308 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51868\nI0819 03:08:19.599671 17389 solver.cpp:404]     Test net output #1: loss = 2.46038 (* 1 = 2.46038 loss)\nI0819 03:08:20.910486 17389 solver.cpp:228] Iteration 56800, loss = 0.0043528\nI0819 03:08:20.910539 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:08:20.910557 17389 solver.cpp:244]     Train net output #1: loss = 0.00435256 (* 1 = 0.00435256 loss)\nI0819 03:08:20.997328 17389 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0819 03:10:39.506283 17389 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0819 03:12:01.030648 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52072\nI0819 03:12:01.031011 17389 solver.cpp:404]     Test net output #1: loss = 2.42918 (* 1 = 2.42918 loss)\nI0819 03:12:02.342236 17389 solver.cpp:228] Iteration 56900, loss = 0.00424223\nI0819 03:12:02.342278 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:12:02.342295 17389 solver.cpp:244]     Train net output #1: loss = 0.00424198 (* 1 = 0.00424198 loss)\nI0819 03:12:02.430083 17389 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0819 03:14:20.875946 17389 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0819 03:15:42.395282 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5192\nI0819 03:15:42.395619 17389 solver.cpp:404]     Test net output #1: loss = 2.44952 (* 1 = 2.44952 loss)\nI0819 03:15:43.706660 17389 solver.cpp:228] Iteration 57000, loss = 0.00411822\nI0819 03:15:43.706712 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:15:43.706729 17389 solver.cpp:244]     Train net output #1: loss = 0.00411797 (* 1 = 0.00411797 loss)\nI0819 03:15:43.796942 17389 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0819 03:18:02.249714 17389 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0819 03:19:23.763440 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52072\nI0819 03:19:23.763773 17389 solver.cpp:404]     Test net output #1: loss = 2.42602 (* 1 = 2.42602 loss)\nI0819 03:19:25.074558 17389 solver.cpp:228] Iteration 57100, loss = 0.00404454\nI0819 03:19:25.074607 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:19:25.074625 17389 solver.cpp:244]     Train net output #1: loss = 0.0040443 (* 1 = 0.0040443 loss)\nI0819 03:19:25.166704 17389 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0819 03:21:43.687242 17389 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0819 03:23:05.186642 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51764\nI0819 03:23:05.186988 17389 solver.cpp:404]     Test net output #1: loss = 2.45224 (* 1 = 2.45224 loss)\nI0819 03:23:06.498333 17389 solver.cpp:228] Iteration 57200, loss = 0.0046797\nI0819 03:23:06.498383 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:23:06.498399 17389 solver.cpp:244]     Train net output #1: loss = 0.00467945 (* 1 = 0.00467945 loss)\nI0819 03:23:06.594210 17389 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0819 03:25:25.053951 17389 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0819 03:26:46.548185 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52056\nI0819 03:26:46.548519 17389 solver.cpp:404]     Test net output #1: loss = 2.41894 (* 1 = 2.41894 loss)\nI0819 03:26:47.861119 17389 solver.cpp:228] Iteration 57300, loss = 0.00491148\nI0819 03:26:47.861166 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:26:47.861183 17389 solver.cpp:244]     Train net output #1: loss = 0.00491124 (* 1 = 0.00491124 loss)\nI0819 03:26:47.947223 17389 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0819 03:29:06.433110 17389 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0819 03:30:27.932730 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51736\nI0819 03:30:27.933075 17389 solver.cpp:404]     Test net output #1: loss = 2.44959 (* 1 = 2.44959 loss)\nI0819 03:30:29.245831 17389 solver.cpp:228] Iteration 57400, loss = 0.00407774\nI0819 03:30:29.245874 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:30:29.245890 17389 solver.cpp:244]     Train net output #1: loss = 0.00407749 (* 1 = 0.00407749 loss)\nI0819 03:30:29.336028 17389 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0819 03:32:47.886909 17389 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0819 03:34:09.387780 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52068\nI0819 03:34:09.388113 17389 solver.cpp:404]     Test net output #1: loss = 2.42028 (* 1 = 2.42028 loss)\nI0819 03:34:10.700502 17389 solver.cpp:228] Iteration 57500, loss = 0.00432249\nI0819 03:34:10.700551 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:34:10.700565 17389 solver.cpp:244]     Train net output #1: loss = 0.00432224 (* 1 = 0.00432224 loss)\nI0819 03:34:10.785684 17389 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0819 03:36:29.248327 17389 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0819 03:37:50.733547 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51796\nI0819 03:37:50.733894 17389 solver.cpp:404]     Test net output #1: loss = 2.44924 (* 1 = 2.44924 loss)\nI0819 03:37:52.045100 17389 solver.cpp:228] Iteration 57600, loss = 0.00358473\nI0819 03:37:52.045153 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:37:52.045169 17389 solver.cpp:244]     Train net output #1: loss = 0.00358449 (* 1 = 0.00358449 loss)\nI0819 03:37:52.137943 17389 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0819 03:40:10.600033 17389 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0819 03:41:32.080874 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52068\nI0819 03:41:32.081210 17389 solver.cpp:404]     Test net output #1: loss = 2.41273 (* 1 = 2.41273 loss)\nI0819 03:41:33.393159 17389 solver.cpp:228] Iteration 57700, loss = 0.00401022\nI0819 03:41:33.393206 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:41:33.393224 17389 solver.cpp:244]     Train net output #1: loss = 0.00400998 (* 1 = 0.00400998 loss)\nI0819 03:41:33.479351 17389 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0819 03:43:51.904639 17389 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0819 03:45:13.386396 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51768\nI0819 03:45:13.386762 17389 solver.cpp:404]     Test net output #1: loss = 2.43982 (* 1 = 2.43982 loss)\nI0819 03:45:14.699017 17389 solver.cpp:228] Iteration 57800, loss = 0.00488874\nI0819 03:45:14.699062 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:45:14.699079 17389 solver.cpp:244]     Train net output #1: loss = 0.00488849 (* 1 = 0.00488849 loss)\nI0819 03:45:14.792417 17389 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0819 03:47:33.267526 17389 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0819 03:48:54.751421 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52012\nI0819 03:48:54.751790 17389 solver.cpp:404]     Test net output #1: loss = 2.40601 (* 1 = 2.40601 loss)\nI0819 03:48:56.063657 17389 solver.cpp:228] Iteration 57900, loss = 0.00449581\nI0819 03:48:56.063705 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:48:56.063722 17389 solver.cpp:244]     Train net output #1: loss = 0.00449557 (* 1 = 0.00449557 loss)\nI0819 03:48:56.150918 17389 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0819 03:51:14.653319 17389 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0819 03:52:36.123070 17389 solver.cpp:404]     Test net output #0: accuracy = 0.519\nI0819 03:52:36.123419 17389 solver.cpp:404]     Test net output #1: loss = 2.42881 (* 1 = 2.42881 loss)\nI0819 03:52:37.435418 17389 solver.cpp:228] Iteration 58000, loss = 0.00454997\nI0819 03:52:37.435470 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:52:37.435487 17389 solver.cpp:244]     Train net output #1: loss = 0.00454973 (* 1 = 0.00454973 loss)\nI0819 03:52:37.524997 17389 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0819 03:54:56.132414 17389 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0819 03:56:17.627310 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52096\nI0819 03:56:17.627640 17389 solver.cpp:404]     Test net output #1: loss = 2.39667 (* 1 = 2.39667 loss)\nI0819 03:56:18.942312 17389 solver.cpp:228] Iteration 58100, loss = 0.00435317\nI0819 03:56:18.942364 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:56:18.942380 17389 solver.cpp:244]     Train net output #1: loss = 0.00435292 (* 1 = 0.00435292 loss)\nI0819 03:56:19.034716 17389 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0819 03:58:37.486171 17389 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0819 03:59:58.976858 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51872\nI0819 03:59:58.977200 17389 solver.cpp:404]     Test net output #1: loss = 2.42381 (* 1 = 2.42381 loss)\nI0819 04:00:00.289453 17389 solver.cpp:228] Iteration 58200, loss = 0.00500952\nI0819 04:00:00.289506 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:00:00.289522 17389 solver.cpp:244]     Train net output #1: loss = 0.00500928 (* 1 = 0.00500928 loss)\nI0819 04:00:00.377182 17389 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0819 04:02:18.889047 17389 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0819 04:03:40.373924 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52108\nI0819 04:03:40.374259 17389 solver.cpp:404]     Test net output #1: loss = 2.39295 (* 1 = 2.39295 loss)\nI0819 04:03:41.686316 17389 solver.cpp:228] Iteration 58300, loss = 0.00426516\nI0819 04:03:41.686370 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:03:41.686388 17389 solver.cpp:244]     Train net output #1: loss = 0.00426492 (* 1 = 0.00426492 loss)\nI0819 04:03:41.773111 17389 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0819 04:06:00.251359 17389 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0819 04:07:21.732604 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51872\nI0819 04:07:21.732962 17389 solver.cpp:404]     Test net output #1: loss = 2.41555 (* 1 = 2.41555 loss)\nI0819 04:07:23.045187 17389 solver.cpp:228] Iteration 58400, loss = 0.00438485\nI0819 04:07:23.045234 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:07:23.045251 17389 solver.cpp:244]     Train net output #1: loss = 0.00438461 (* 1 = 0.00438461 loss)\nI0819 04:07:23.134352 17389 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0819 04:09:41.592419 17389 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0819 04:11:03.079869 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52112\nI0819 04:11:03.080193 17389 solver.cpp:404]     Test net output #1: loss = 2.38751 (* 1 = 2.38751 loss)\nI0819 04:11:04.392222 17389 solver.cpp:228] Iteration 58500, loss = 0.00412262\nI0819 04:11:04.392276 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:11:04.392292 17389 solver.cpp:244]     Train net output #1: loss = 0.00412237 (* 1 = 0.00412237 loss)\nI0819 04:11:04.488683 17389 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0819 04:13:22.921277 17389 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0819 04:14:44.399407 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5182\nI0819 04:14:44.399750 17389 solver.cpp:404]     Test net output #1: loss = 2.4153 (* 1 = 2.4153 loss)\nI0819 04:14:45.711436 17389 solver.cpp:228] Iteration 58600, loss = 0.00405583\nI0819 04:14:45.711486 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:14:45.711503 17389 solver.cpp:244]     Train net output #1: loss = 0.00405559 (* 1 = 0.00405559 loss)\nI0819 04:14:45.802202 17389 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0819 04:17:04.290902 17389 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0819 04:18:25.803891 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52088\nI0819 04:18:25.804225 17389 solver.cpp:404]     Test net output #1: loss = 2.38639 (* 1 = 2.38639 loss)\nI0819 04:18:27.117483 17389 solver.cpp:228] Iteration 58700, loss = 0.00419658\nI0819 04:18:27.117538 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:18:27.117563 17389 solver.cpp:244]     Train net output #1: loss = 0.00419634 (* 1 = 0.00419634 loss)\nI0819 04:18:27.206619 17389 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0819 04:20:45.638824 17389 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0819 04:22:07.118388 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5186\nI0819 04:22:07.118696 17389 solver.cpp:404]     Test net output #1: loss = 2.42124 (* 1 = 2.42124 loss)\nI0819 04:22:08.432150 17389 solver.cpp:228] Iteration 58800, loss = 0.00424892\nI0819 04:22:08.432201 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:22:08.432225 17389 solver.cpp:244]     Train net output #1: loss = 0.00424868 (* 1 = 0.00424868 loss)\nI0819 04:22:08.517940 17389 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0819 04:24:27.013933 17389 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0819 04:25:48.490468 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51932\nI0819 04:25:48.490811 17389 solver.cpp:404]     Test net output #1: loss = 2.38837 (* 1 = 2.38837 loss)\nI0819 04:25:49.804939 17389 solver.cpp:228] Iteration 58900, loss = 0.00394145\nI0819 04:25:49.804996 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:25:49.805019 17389 solver.cpp:244]     Train net output #1: loss = 0.00394121 (* 1 = 0.00394121 loss)\nI0819 04:25:49.893761 17389 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0819 04:28:08.390501 17389 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0819 04:29:29.870844 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51784\nI0819 04:29:29.871175 17389 solver.cpp:404]     Test net output #1: loss = 2.41534 (* 1 = 2.41534 loss)\nI0819 04:29:31.184716 17389 solver.cpp:228] Iteration 59000, loss = 0.00444853\nI0819 04:29:31.184772 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:29:31.184798 17389 solver.cpp:244]     Train net output #1: loss = 0.00444829 (* 1 = 0.00444829 loss)\nI0819 04:29:31.275338 17389 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0819 04:31:49.862284 17389 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0819 04:33:11.322628 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52116\nI0819 04:33:11.322983 17389 solver.cpp:404]     Test net output #1: loss = 2.38229 (* 1 = 2.38229 loss)\nI0819 04:33:12.636133 17389 solver.cpp:228] Iteration 59100, loss = 0.00432218\nI0819 04:33:12.636188 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:33:12.636214 17389 solver.cpp:244]     Train net output #1: loss = 0.00432194 (* 1 = 0.00432194 loss)\nI0819 04:33:12.723863 17389 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0819 04:35:31.276415 17389 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0819 04:36:52.754974 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51836\nI0819 04:36:52.755326 17389 solver.cpp:404]     Test net output #1: loss = 2.40898 (* 1 = 2.40898 loss)\nI0819 04:36:54.068615 17389 solver.cpp:228] Iteration 59200, loss = 0.00461976\nI0819 04:36:54.068673 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:36:54.068698 17389 solver.cpp:244]     Train net output #1: loss = 0.00461952 (* 1 = 0.00461952 loss)\nI0819 04:36:54.157141 17389 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0819 04:39:12.686013 17389 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0819 04:40:34.173841 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5202\nI0819 04:40:34.174177 17389 solver.cpp:404]     Test net output #1: loss = 2.37935 (* 1 = 2.37935 loss)\nI0819 04:40:35.487901 17389 solver.cpp:228] Iteration 59300, loss = 0.00396376\nI0819 04:40:35.487956 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:40:35.487979 17389 solver.cpp:244]     Train net output #1: loss = 0.00396352 (* 1 = 0.00396352 loss)\nI0819 04:40:35.575846 17389 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0819 04:42:54.196498 17389 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0819 04:44:15.698482 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51716\nI0819 04:44:15.698801 17389 solver.cpp:404]     Test net output #1: loss = 2.4104 (* 1 = 2.4104 loss)\nI0819 04:44:17.012104 17389 solver.cpp:228] Iteration 59400, loss = 0.0045463\nI0819 04:44:17.012161 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:44:17.012184 17389 solver.cpp:244]     Train net output #1: loss = 0.00454605 (* 1 = 0.00454605 loss)\nI0819 04:44:17.103749 17389 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0819 04:46:35.662058 17389 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0819 04:47:57.170208 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51956\nI0819 04:47:57.170528 17389 solver.cpp:404]     Test net output #1: loss = 2.37831 (* 1 = 2.37831 loss)\nI0819 04:47:58.483134 17389 solver.cpp:228] Iteration 59500, loss = 0.00438146\nI0819 04:47:58.483187 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:47:58.483211 17389 solver.cpp:244]     Train net output #1: loss = 0.00438122 (* 1 = 0.00438122 loss)\nI0819 04:47:58.572654 17389 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0819 04:50:17.152745 17389 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0819 04:51:38.654999 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51776\nI0819 04:51:38.655333 17389 solver.cpp:404]     Test net output #1: loss = 2.40068 (* 1 = 2.40068 loss)\nI0819 04:51:39.966986 17389 solver.cpp:228] Iteration 59600, loss = 0.00479724\nI0819 04:51:39.967039 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:51:39.967063 17389 solver.cpp:244]     Train net output #1: loss = 0.004797 (* 1 = 0.004797 loss)\nI0819 04:51:40.052923 17389 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0819 04:53:58.496371 17389 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0819 04:55:19.973495 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52064\nI0819 04:55:19.973814 17389 solver.cpp:404]     Test net output #1: loss = 2.37437 (* 1 = 2.37437 loss)\nI0819 04:55:21.286654 17389 solver.cpp:228] Iteration 59700, loss = 0.00431297\nI0819 04:55:21.286713 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:55:21.286736 17389 solver.cpp:244]     Train net output #1: loss = 0.00431272 (* 1 = 0.00431272 loss)\nI0819 04:55:21.378614 17389 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0819 04:57:39.868993 17389 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0819 04:59:01.368505 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51808\nI0819 04:59:01.368835 17389 solver.cpp:404]     Test net output #1: loss = 2.39483 (* 1 = 2.39483 loss)\nI0819 04:59:02.682936 17389 solver.cpp:228] Iteration 59800, loss = 0.00432936\nI0819 04:59:02.682989 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:59:02.683014 17389 solver.cpp:244]     Train net output #1: loss = 0.00432911 (* 1 = 0.00432911 loss)\nI0819 04:59:02.774740 17389 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0819 05:01:21.234519 17389 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0819 05:02:42.850852 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52\nI0819 05:02:42.851212 17389 solver.cpp:404]     Test net output #1: loss = 2.37318 (* 1 = 2.37318 loss)\nI0819 05:02:44.164924 17389 solver.cpp:228] Iteration 59900, loss = 0.00455217\nI0819 05:02:44.164981 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:02:44.165005 17389 solver.cpp:244]     Train net output #1: loss = 0.00455193 (* 1 = 0.00455193 loss)\nI0819 05:02:44.251726 17389 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0819 05:05:02.733537 17389 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0819 05:06:24.357821 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51828\nI0819 05:06:24.358166 17389 solver.cpp:404]     Test net output #1: loss = 2.39925 (* 1 = 2.39925 loss)\nI0819 05:06:25.674121 17389 solver.cpp:228] Iteration 60000, loss = 0.00461159\nI0819 05:06:25.674172 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:06:25.674197 17389 solver.cpp:244]     Train net output #1: loss = 0.00461135 (* 1 = 0.00461135 loss)\nI0819 05:06:25.762418 17389 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0819 05:08:44.217286 17389 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0819 05:10:05.817533 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5214\nI0819 05:10:05.817864 17389 solver.cpp:404]     Test net output #1: loss = 2.3655 (* 1 = 2.3655 loss)\nI0819 05:10:07.131958 17389 solver.cpp:228] Iteration 60100, loss = 0.00430777\nI0819 05:10:07.132009 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:10:07.132033 17389 solver.cpp:244]     Train net output #1: loss = 0.00430752 (* 1 = 0.00430752 loss)\nI0819 05:10:07.219426 17389 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0819 05:12:25.705150 17389 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0819 05:13:47.295555 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51856\nI0819 05:13:47.295910 17389 solver.cpp:404]     Test net output #1: loss = 2.39392 (* 1 = 2.39392 loss)\nI0819 05:13:48.609441 17389 solver.cpp:228] Iteration 60200, loss = 0.0044677\nI0819 05:13:48.609491 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:13:48.609515 17389 solver.cpp:244]     Train net output #1: loss = 0.00446745 (* 1 = 0.00446745 loss)\nI0819 05:13:48.694331 17389 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0819 05:16:07.216836 17389 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0819 05:17:28.735574 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52112\nI0819 05:17:28.735914 17389 solver.cpp:404]     Test net output #1: loss = 2.36353 (* 1 = 2.36353 loss)\nI0819 05:17:30.049600 17389 solver.cpp:228] Iteration 60300, loss = 0.00448992\nI0819 05:17:30.049654 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:17:30.049677 17389 solver.cpp:244]     Train net output #1: loss = 0.00448967 (* 1 = 0.00448967 loss)\nI0819 05:17:30.143954 17389 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0819 05:19:48.664001 17389 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0819 05:21:10.158453 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5188\nI0819 05:21:10.158787 17389 solver.cpp:404]     Test net output #1: loss = 2.39243 (* 1 = 2.39243 loss)\nI0819 05:21:11.472084 17389 solver.cpp:228] Iteration 60400, loss = 0.00413996\nI0819 05:21:11.472136 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:21:11.472160 17389 solver.cpp:244]     Train net output #1: loss = 0.00413971 (* 1 = 0.00413971 loss)\nI0819 05:21:11.565505 17389 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0819 05:23:30.062983 17389 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0819 05:24:51.549368 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51908\nI0819 05:24:51.549661 17389 solver.cpp:404]     Test net output #1: loss = 2.36512 (* 1 = 2.36512 loss)\nI0819 05:24:52.863425 17389 solver.cpp:228] Iteration 60500, loss = 0.00454065\nI0819 05:24:52.863479 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:24:52.863504 17389 solver.cpp:244]     Train net output #1: loss = 0.0045404 (* 1 = 0.0045404 loss)\nI0819 05:24:52.955363 17389 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0819 05:27:11.385494 17389 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0819 05:28:32.935763 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51616\nI0819 05:28:32.936056 17389 solver.cpp:404]     Test net output #1: loss = 2.39768 (* 1 = 2.39768 loss)\nI0819 05:28:34.250288 17389 solver.cpp:228] Iteration 60600, loss = 0.00510348\nI0819 05:28:34.250345 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:28:34.250370 17389 solver.cpp:244]     Train net output #1: loss = 0.00510323 (* 1 = 0.00510323 loss)\nI0819 05:28:34.336724 17389 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0819 05:30:52.843281 17389 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0819 05:32:14.346197 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51992\nI0819 05:32:14.346527 17389 solver.cpp:404]     Test net output #1: loss = 2.36304 (* 1 = 2.36304 loss)\nI0819 05:32:15.660292 17389 solver.cpp:228] Iteration 60700, loss = 0.00446385\nI0819 05:32:15.660351 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:32:15.660374 17389 solver.cpp:244]     Train net output #1: loss = 0.0044636 (* 1 = 0.0044636 loss)\nI0819 05:32:15.744825 17389 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0819 05:34:34.239228 17389 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0819 05:35:55.773416 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51568\nI0819 05:35:55.773680 17389 solver.cpp:404]     Test net output #1: loss = 2.39638 (* 1 = 2.39638 loss)\nI0819 05:35:57.088076 17389 solver.cpp:228] Iteration 60800, loss = 0.00504294\nI0819 05:35:57.088132 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:35:57.088156 17389 solver.cpp:244]     Train net output #1: loss = 0.00504269 (* 1 = 0.00504269 loss)\nI0819 05:35:57.172505 17389 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0819 05:38:15.758992 17389 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0819 05:39:37.250761 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51888\nI0819 05:39:37.251036 17389 solver.cpp:404]     Test net output #1: loss = 2.36333 (* 1 = 2.36333 loss)\nI0819 05:39:38.564653 17389 solver.cpp:228] Iteration 60900, loss = 0.00492204\nI0819 05:39:38.564710 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:39:38.564733 17389 solver.cpp:244]     Train net output #1: loss = 0.00492179 (* 1 = 0.00492179 loss)\nI0819 05:39:38.656559 17389 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0819 05:41:57.210402 17389 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0819 05:43:18.718624 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51728\nI0819 05:43:18.718978 17389 solver.cpp:404]     Test net output #1: loss = 2.38385 (* 1 = 2.38385 loss)\nI0819 05:43:20.032629 17389 solver.cpp:228] Iteration 61000, loss = 0.00432722\nI0819 05:43:20.032688 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:43:20.032713 17389 solver.cpp:244]     Train net output #1: loss = 0.00432698 (* 1 = 0.00432698 loss)\nI0819 05:43:20.122236 17389 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0819 05:45:38.629642 17389 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0819 05:47:00.150058 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51912\nI0819 05:47:00.150328 17389 solver.cpp:404]     Test net output #1: loss = 2.35768 (* 1 = 2.35768 loss)\nI0819 05:47:01.463469 17389 solver.cpp:228] Iteration 61100, loss = 0.00465972\nI0819 05:47:01.463523 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:47:01.463549 17389 solver.cpp:244]     Train net output #1: loss = 0.00465948 (* 1 = 0.00465948 loss)\nI0819 05:47:01.552570 17389 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0819 05:49:20.045089 17389 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0819 05:50:41.572068 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5148\nI0819 05:50:41.572352 17389 solver.cpp:404]     Test net output #1: loss = 2.39492 (* 1 = 2.39492 loss)\nI0819 05:50:42.885582 17389 solver.cpp:228] Iteration 61200, loss = 0.00442971\nI0819 05:50:42.885640 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:50:42.885665 17389 solver.cpp:244]     Train net output #1: loss = 0.00442947 (* 1 = 0.00442947 loss)\nI0819 05:50:42.975303 17389 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0819 05:53:01.615021 17389 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0819 05:54:23.111311 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5188\nI0819 05:54:23.111606 17389 solver.cpp:404]     Test net output #1: loss = 2.35603 (* 1 = 2.35603 loss)\nI0819 05:54:24.425686 17389 solver.cpp:228] Iteration 61300, loss = 0.00468056\nI0819 05:54:24.425740 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:54:24.425765 17389 solver.cpp:244]     Train net output #1: loss = 0.00468032 (* 1 = 0.00468032 loss)\nI0819 05:54:24.515789 17389 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0819 05:56:43.020900 17389 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0819 05:58:04.535473 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51616\nI0819 05:58:04.535754 17389 solver.cpp:404]     Test net output #1: loss = 2.385 (* 1 = 2.385 loss)\nI0819 05:58:05.849854 17389 solver.cpp:228] Iteration 61400, loss = 0.0044892\nI0819 05:58:05.849911 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:58:05.849936 17389 solver.cpp:244]     Train net output #1: loss = 0.00448896 (* 1 = 0.00448896 loss)\nI0819 05:58:05.938674 17389 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0819 06:00:24.404697 17389 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0819 06:01:45.940431 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51856\nI0819 06:01:45.940723 17389 solver.cpp:404]     Test net output #1: loss = 2.3559 (* 1 = 2.3559 loss)\nI0819 06:01:47.255440 17389 solver.cpp:228] Iteration 61500, loss = 0.0050031\nI0819 06:01:47.255494 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:01:47.255518 17389 solver.cpp:244]     Train net output #1: loss = 0.00500286 (* 1 = 0.00500286 loss)\nI0819 06:01:47.345842 17389 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0819 06:04:05.847661 17389 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0819 06:05:27.380640 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51688\nI0819 06:05:27.380944 17389 solver.cpp:404]     Test net output #1: loss = 2.37859 (* 1 = 2.37859 loss)\nI0819 06:05:28.693367 17389 solver.cpp:228] Iteration 61600, loss = 0.00426008\nI0819 06:05:28.693420 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:05:28.693445 17389 solver.cpp:244]     Train net output #1: loss = 0.00425984 (* 1 = 0.00425984 loss)\nI0819 06:05:28.784672 17389 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0819 06:07:47.299882 17389 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0819 06:09:08.816853 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51804\nI0819 06:09:08.817158 17389 solver.cpp:404]     Test net output #1: loss = 2.35254 (* 1 = 2.35254 loss)\nI0819 06:09:10.130201 17389 solver.cpp:228] Iteration 61700, loss = 0.00399943\nI0819 06:09:10.130261 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:09:10.130285 17389 solver.cpp:244]     Train net output #1: loss = 0.00399919 (* 1 = 0.00399919 loss)\nI0819 06:09:10.218996 17389 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0819 06:11:28.703343 17389 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0819 06:12:50.218367 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51652\nI0819 06:12:50.218731 17389 solver.cpp:404]     Test net output #1: loss = 2.37787 (* 1 = 2.37787 loss)\nI0819 06:12:51.533112 17389 solver.cpp:228] Iteration 61800, loss = 0.00430234\nI0819 06:12:51.533169 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:12:51.533193 17389 solver.cpp:244]     Train net output #1: loss = 0.0043021 (* 1 = 0.0043021 loss)\nI0819 06:12:51.622223 17389 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0819 06:15:10.085685 17389 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0819 06:16:31.585258 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52028\nI0819 06:16:31.585525 17389 solver.cpp:404]     Test net output #1: loss = 2.35214 (* 1 = 2.35214 loss)\nI0819 06:16:32.899263 17389 solver.cpp:228] Iteration 61900, loss = 0.0045519\nI0819 06:16:32.899322 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:16:32.899345 17389 solver.cpp:244]     Train net output #1: loss = 0.00455166 (* 1 = 0.00455166 loss)\nI0819 06:16:32.985354 17389 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0819 06:18:51.473212 17389 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0819 06:20:12.982270 17389 solver.cpp:404]     Test net output #0: accuracy = 0.517\nI0819 06:20:12.982558 17389 solver.cpp:404]     Test net output #1: loss = 2.37808 (* 1 = 2.37808 loss)\nI0819 06:20:14.296592 17389 solver.cpp:228] Iteration 62000, loss = 0.0043725\nI0819 06:20:14.296645 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:20:14.296669 17389 solver.cpp:244]     Train net output #1: loss = 0.00437225 (* 1 = 0.00437225 loss)\nI0819 06:20:14.382172 17389 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0819 06:22:32.851527 17389 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0819 06:23:54.355710 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51904\nI0819 06:23:54.356022 17389 solver.cpp:404]     Test net output #1: loss = 2.35522 (* 1 = 2.35522 loss)\nI0819 06:23:55.669348 17389 solver.cpp:228] Iteration 62100, loss = 0.00427198\nI0819 06:23:55.669407 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:23:55.669431 17389 solver.cpp:244]     Train net output #1: loss = 0.00427173 (* 1 = 0.00427173 loss)\nI0819 06:23:55.755959 17389 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0819 06:26:14.254241 17389 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0819 06:27:35.687798 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51644\nI0819 06:27:35.688067 17389 solver.cpp:404]     Test net output #1: loss = 2.37928 (* 1 = 2.37928 loss)\nI0819 06:27:37.002269 17389 solver.cpp:228] Iteration 62200, loss = 0.00442035\nI0819 06:27:37.002322 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:27:37.002346 17389 solver.cpp:244]     Train net output #1: loss = 0.00442011 (* 1 = 0.00442011 loss)\nI0819 06:27:37.085727 17389 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0819 06:29:55.546771 17389 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0819 06:31:17.040206 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5182\nI0819 06:31:17.040542 17389 solver.cpp:404]     Test net output #1: loss = 2.35526 (* 1 = 2.35526 loss)\nI0819 06:31:18.354228 17389 solver.cpp:228] Iteration 62300, loss = 0.00448064\nI0819 06:31:18.354285 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:31:18.354310 17389 solver.cpp:244]     Train net output #1: loss = 0.0044804 (* 1 = 0.0044804 loss)\nI0819 06:31:18.441577 17389 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0819 06:33:36.905148 17389 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0819 06:34:58.386155 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51556\nI0819 06:34:58.386422 17389 solver.cpp:404]     Test net output #1: loss = 2.37807 (* 1 = 2.37807 loss)\nI0819 06:34:59.700093 17389 solver.cpp:228] Iteration 62400, loss = 0.00469475\nI0819 06:34:59.700146 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:34:59.700171 17389 solver.cpp:244]     Train net output #1: loss = 0.00469451 (* 1 = 0.00469451 loss)\nI0819 06:34:59.793404 17389 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0819 06:37:18.283200 17389 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0819 06:38:39.815443 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51904\nI0819 06:38:39.815771 17389 solver.cpp:404]     Test net output #1: loss = 2.34953 (* 1 = 2.34953 loss)\nI0819 06:38:41.129489 17389 solver.cpp:228] Iteration 62500, loss = 0.00428404\nI0819 06:38:41.129549 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:38:41.129572 17389 solver.cpp:244]     Train net output #1: loss = 0.00428379 (* 1 = 0.00428379 loss)\nI0819 06:38:41.221952 17389 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0819 06:40:59.791196 17389 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0819 06:42:21.334071 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51576\nI0819 06:42:21.334425 17389 solver.cpp:404]     Test net output #1: loss = 2.37345 (* 1 = 2.37345 loss)\nI0819 06:42:22.648360 17389 solver.cpp:228] Iteration 62600, loss = 0.00487203\nI0819 06:42:22.648417 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:42:22.648442 17389 solver.cpp:244]     Train net output #1: loss = 0.00487178 (* 1 = 0.00487178 loss)\nI0819 06:42:22.734131 17389 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0819 06:44:41.323165 17389 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0819 06:46:02.881734 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51888\nI0819 06:46:02.882087 17389 solver.cpp:404]     Test net output #1: loss = 2.35033 (* 1 = 2.35033 loss)\nI0819 06:46:04.196188 17389 solver.cpp:228] Iteration 62700, loss = 0.0046661\nI0819 06:46:04.196241 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:46:04.196265 17389 solver.cpp:244]     Train net output #1: loss = 0.00466586 (* 1 = 0.00466586 loss)\nI0819 06:46:04.282454 17389 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0819 06:48:22.792446 17389 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0819 06:49:44.307652 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51664\nI0819 06:49:44.308006 17389 solver.cpp:404]     Test net output #1: loss = 2.37512 (* 1 = 2.37512 loss)\nI0819 06:49:45.621790 17389 solver.cpp:228] Iteration 62800, loss = 0.00384789\nI0819 06:49:45.621843 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:49:45.621868 17389 solver.cpp:244]     Train net output #1: loss = 0.00384764 (* 1 = 0.00384764 loss)\nI0819 06:49:45.707726 17389 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0819 06:52:04.232369 17389 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0819 06:53:25.726296 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51872\nI0819 06:53:25.726647 17389 solver.cpp:404]     Test net output #1: loss = 2.35025 (* 1 = 2.35025 loss)\nI0819 06:53:27.040002 17389 solver.cpp:228] Iteration 62900, loss = 0.00487195\nI0819 06:53:27.040045 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:53:27.040067 17389 solver.cpp:244]     Train net output #1: loss = 0.0048717 (* 1 = 0.0048717 loss)\nI0819 06:53:27.121755 17389 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0819 06:55:45.589584 17389 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0819 06:57:07.079123 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51644\nI0819 06:57:07.079468 17389 solver.cpp:404]     Test net output #1: loss = 2.37555 (* 1 = 2.37555 loss)\nI0819 06:57:08.393317 17389 solver.cpp:228] Iteration 63000, loss = 0.00431339\nI0819 06:57:08.393369 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:57:08.393393 17389 solver.cpp:244]     Train net output #1: loss = 0.00431315 (* 1 = 0.00431315 loss)\nI0819 06:57:08.485647 17389 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0819 06:59:27.023502 17389 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0819 07:00:48.520992 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51792\nI0819 07:00:48.521325 17389 solver.cpp:404]     Test net output #1: loss = 2.3445 (* 1 = 2.3445 loss)\nI0819 07:00:49.835446 17389 solver.cpp:228] Iteration 63100, loss = 0.00412411\nI0819 07:00:49.835500 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:00:49.835523 17389 solver.cpp:244]     Train net output #1: loss = 0.00412386 (* 1 = 0.00412386 loss)\nI0819 07:00:49.925843 17389 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0819 07:03:08.434295 17389 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0819 07:04:29.925195 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5164\nI0819 07:04:29.925519 17389 solver.cpp:404]     Test net output #1: loss = 2.37503 (* 1 = 2.37503 loss)\nI0819 07:04:31.238195 17389 solver.cpp:228] Iteration 63200, loss = 0.00461168\nI0819 07:04:31.238252 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:04:31.238276 17389 solver.cpp:244]     Train net output #1: loss = 0.00461144 (* 1 = 0.00461144 loss)\nI0819 07:04:31.333097 17389 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0819 07:06:49.984207 17389 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0819 07:08:11.501058 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51684\nI0819 07:08:11.501405 17389 solver.cpp:404]     Test net output #1: loss = 2.35196 (* 1 = 2.35196 loss)\nI0819 07:08:12.814013 17389 solver.cpp:228] Iteration 63300, loss = 0.00441138\nI0819 07:08:12.814069 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:08:12.814093 17389 solver.cpp:244]     Train net output #1: loss = 0.00441114 (* 1 = 0.00441114 loss)\nI0819 07:08:12.908603 17389 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0819 07:10:31.401269 17389 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0819 07:11:52.985410 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5154\nI0819 07:11:52.985788 17389 solver.cpp:404]     Test net output #1: loss = 2.37573 (* 1 = 2.37573 loss)\nI0819 07:11:54.298085 17389 solver.cpp:228] Iteration 63400, loss = 0.00451465\nI0819 07:11:54.298143 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:11:54.298167 17389 solver.cpp:244]     Train net output #1: loss = 0.00451441 (* 1 = 0.00451441 loss)\nI0819 07:11:54.388749 17389 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0819 07:14:12.899068 17389 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0819 07:15:34.538877 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51668\nI0819 07:15:34.539217 17389 solver.cpp:404]     Test net output #1: loss = 2.36003 (* 1 = 2.36003 loss)\nI0819 07:15:35.851819 17389 solver.cpp:228] Iteration 63500, loss = 0.0037761\nI0819 07:15:35.851882 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:15:35.851907 17389 solver.cpp:244]     Train net output #1: loss = 0.00377586 (* 1 = 0.00377586 loss)\nI0819 07:15:35.938506 17389 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0819 07:17:54.451978 17389 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0819 07:19:16.048842 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5152\nI0819 07:19:16.049221 17389 solver.cpp:404]     Test net output #1: loss = 2.3819 (* 1 = 2.3819 loss)\nI0819 07:19:17.361837 17389 solver.cpp:228] Iteration 63600, loss = 0.00445703\nI0819 07:19:17.361898 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:19:17.361923 17389 solver.cpp:244]     Train net output #1: loss = 0.00445679 (* 1 = 0.00445679 loss)\nI0819 07:19:17.448421 17389 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0819 07:21:36.035717 17389 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0819 07:22:57.625733 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51744\nI0819 07:22:57.626075 17389 solver.cpp:404]     Test net output #1: loss = 2.35178 (* 1 = 2.35178 loss)\nI0819 07:22:58.939267 17389 solver.cpp:228] Iteration 63700, loss = 0.00467358\nI0819 07:22:58.939323 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:22:58.939348 17389 solver.cpp:244]     Train net output #1: loss = 0.00467333 (* 1 = 0.00467333 loss)\nI0819 07:22:59.026131 17389 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0819 07:25:17.540627 17389 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0819 07:26:39.126618 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51448\nI0819 07:26:39.126994 17389 solver.cpp:404]     Test net output #1: loss = 2.38174 (* 1 = 2.38174 loss)\nI0819 07:26:40.440495 17389 solver.cpp:228] Iteration 63800, loss = 0.00470666\nI0819 07:26:40.440537 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:26:40.440560 17389 solver.cpp:244]     Train net output #1: loss = 0.00470641 (* 1 = 0.00470641 loss)\nI0819 07:26:40.530534 17389 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0819 07:28:59.103806 17389 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0819 07:30:20.658939 17389 solver.cpp:404]     Test net output #0: accuracy = 0.518\nI0819 07:30:20.659301 17389 solver.cpp:404]     Test net output #1: loss = 2.34556 (* 1 = 2.34556 loss)\nI0819 07:30:21.972955 17389 solver.cpp:228] Iteration 63900, loss = 0.00382068\nI0819 07:30:21.973013 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:30:21.973037 17389 solver.cpp:244]     Train net output #1: loss = 0.00382043 (* 1 = 0.00382043 loss)\nI0819 07:30:22.055421 17389 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0819 07:32:40.551553 17389 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0819 07:34:02.161312 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5168\nI0819 07:34:02.161674 17389 solver.cpp:404]     Test net output #1: loss = 2.37578 (* 1 = 2.37578 loss)\nI0819 07:34:03.474906 17389 solver.cpp:228] Iteration 64000, loss = 0.00448111\nI0819 07:34:03.474951 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:34:03.474972 17389 solver.cpp:244]     Train net output #1: loss = 0.00448087 (* 1 = 0.00448087 loss)\nI0819 07:34:03.565783 17389 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0819 07:36:22.172250 17389 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0819 07:37:43.730340 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51868\nI0819 07:37:43.730680 17389 solver.cpp:404]     Test net output #1: loss = 2.35393 (* 1 = 2.35393 loss)\nI0819 07:37:45.043108 17389 solver.cpp:228] Iteration 64100, loss = 0.00445451\nI0819 07:37:45.043164 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:37:45.043180 17389 solver.cpp:244]     Train net output #1: loss = 0.00445427 (* 1 = 0.00445427 loss)\nI0819 07:37:45.125013 17389 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0819 07:40:03.653259 17389 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0819 07:41:25.166672 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51544\nI0819 07:41:25.167039 17389 solver.cpp:404]     Test net output #1: loss = 2.37287 (* 1 = 2.37287 loss)\nI0819 07:41:26.479241 17389 solver.cpp:228] Iteration 64200, loss = 0.00436126\nI0819 07:41:26.479293 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:41:26.479310 17389 solver.cpp:244]     Train net output #1: loss = 0.00436102 (* 1 = 0.00436102 loss)\nI0819 07:41:26.573246 17389 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0819 07:43:45.151533 17389 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0819 07:45:06.663550 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51716\nI0819 07:45:06.663928 17389 solver.cpp:404]     Test net output #1: loss = 2.35114 (* 1 = 2.35114 loss)\nI0819 07:45:07.976156 17389 solver.cpp:228] Iteration 64300, loss = 0.00477843\nI0819 07:45:07.976207 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:45:07.976224 17389 solver.cpp:244]     Train net output #1: loss = 0.00477818 (* 1 = 0.00477818 loss)\nI0819 07:45:08.064698 17389 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0819 07:47:26.608597 17389 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0819 07:48:48.123641 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51492\nI0819 07:48:48.123994 17389 solver.cpp:404]     Test net output #1: loss = 2.37836 (* 1 = 2.37836 loss)\nI0819 07:48:49.436419 17389 solver.cpp:228] Iteration 64400, loss = 0.00455245\nI0819 07:48:49.436473 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:48:49.436491 17389 solver.cpp:244]     Train net output #1: loss = 0.0045522 (* 1 = 0.0045522 loss)\nI0819 07:48:49.515636 17389 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0819 07:51:08.048239 17389 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0819 07:52:29.562783 17389 solver.cpp:404]     Test net output #0: accuracy = 0.516\nI0819 07:52:29.563114 17389 solver.cpp:404]     Test net output #1: loss = 2.35366 (* 1 = 2.35366 loss)\nI0819 07:52:30.874866 17389 solver.cpp:228] Iteration 64500, loss = 0.00465693\nI0819 07:52:30.874917 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:52:30.874935 17389 solver.cpp:244]     Train net output #1: loss = 0.00465669 (* 1 = 0.00465669 loss)\nI0819 07:52:30.960225 17389 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0819 07:54:49.455456 17389 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0819 07:56:10.964674 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51516\nI0819 07:56:10.965026 17389 solver.cpp:404]     Test net output #1: loss = 2.37744 (* 1 = 2.37744 loss)\nI0819 07:56:12.278491 17389 solver.cpp:228] Iteration 64600, loss = 0.00410046\nI0819 07:56:12.278540 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:56:12.278558 17389 solver.cpp:244]     Train net output #1: loss = 0.00410022 (* 1 = 0.00410022 loss)\nI0819 07:56:12.354862 17389 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0819 07:58:30.929513 17389 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0819 07:59:52.426441 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51596\nI0819 07:59:52.426803 17389 solver.cpp:404]     Test net output #1: loss = 2.36276 (* 1 = 2.36276 loss)\nI0819 07:59:53.740110 17389 solver.cpp:228] Iteration 64700, loss = 0.00440167\nI0819 07:59:53.740164 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:59:53.740181 17389 solver.cpp:244]     Train net output #1: loss = 0.00440143 (* 1 = 0.00440143 loss)\nI0819 07:59:53.831917 17389 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0819 08:02:12.457458 17389 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0819 08:03:33.957265 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51416\nI0819 08:03:33.957623 17389 solver.cpp:404]     Test net output #1: loss = 2.38604 (* 1 = 2.38604 loss)\nI0819 08:03:35.270786 17389 solver.cpp:228] Iteration 64800, loss = 0.00383548\nI0819 08:03:35.270839 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:03:35.270856 17389 solver.cpp:244]     Train net output #1: loss = 0.00383524 (* 1 = 0.00383524 loss)\nI0819 08:03:35.358965 17389 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0819 08:05:53.920748 17389 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0819 08:07:15.415244 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51628\nI0819 08:07:15.415621 17389 solver.cpp:404]     Test net output #1: loss = 2.35847 (* 1 = 2.35847 loss)\nI0819 08:07:16.728935 17389 solver.cpp:228] Iteration 64900, loss = 0.00413725\nI0819 08:07:16.728983 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:07:16.729001 17389 solver.cpp:244]     Train net output #1: loss = 0.004137 (* 1 = 0.004137 loss)\nI0819 08:07:16.815593 17389 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0819 08:09:35.502027 17389 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0819 08:10:56.998667 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51544\nI0819 08:10:56.999029 17389 solver.cpp:404]     Test net output #1: loss = 2.37426 (* 1 = 2.37426 loss)\nI0819 08:10:58.313004 17389 solver.cpp:228] Iteration 65000, loss = 0.00462407\nI0819 08:10:58.313051 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:10:58.313066 17389 solver.cpp:244]     Train net output #1: loss = 0.00462383 (* 1 = 0.00462383 loss)\nI0819 08:10:58.396920 17389 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0819 08:13:16.962441 17389 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0819 08:14:38.502481 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51684\nI0819 08:14:38.502828 17389 solver.cpp:404]     Test net output #1: loss = 2.35705 (* 1 = 2.35705 loss)\nI0819 08:14:39.814924 17389 solver.cpp:228] Iteration 65100, loss = 0.0042138\nI0819 08:14:39.814980 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:14:39.814996 17389 solver.cpp:244]     Train net output #1: loss = 0.00421355 (* 1 = 0.00421355 loss)\nI0819 08:14:39.907348 17389 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0819 08:16:58.522727 17389 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0819 08:18:20.021184 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51324\nI0819 08:18:20.021554 17389 solver.cpp:404]     Test net output #1: loss = 2.38445 (* 1 = 2.38445 loss)\nI0819 08:18:21.333289 17389 solver.cpp:228] Iteration 65200, loss = 0.00420371\nI0819 08:18:21.333343 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:18:21.333359 17389 solver.cpp:244]     Train net output #1: loss = 0.00420346 (* 1 = 0.00420346 loss)\nI0819 08:18:21.418426 17389 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0819 08:20:40.003715 17389 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0819 08:22:01.503530 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51604\nI0819 08:22:01.503868 17389 solver.cpp:404]     Test net output #1: loss = 2.35735 (* 1 = 2.35735 loss)\nI0819 08:22:02.816195 17389 solver.cpp:228] Iteration 65300, loss = 0.00436578\nI0819 08:22:02.816242 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:22:02.816259 17389 solver.cpp:244]     Train net output #1: loss = 0.00436553 (* 1 = 0.00436553 loss)\nI0819 08:22:02.899992 17389 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0819 08:24:21.508596 17389 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0819 08:25:43.007462 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5134\nI0819 08:25:43.007833 17389 solver.cpp:404]     Test net output #1: loss = 2.37742 (* 1 = 2.37742 loss)\nI0819 08:25:44.319963 17389 solver.cpp:228] Iteration 65400, loss = 0.00466209\nI0819 08:25:44.320017 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:25:44.320034 17389 solver.cpp:244]     Train net output #1: loss = 0.00466185 (* 1 = 0.00466185 loss)\nI0819 08:25:44.403822 17389 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0819 08:28:02.996309 17389 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0819 08:29:24.497489 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5178\nI0819 08:29:24.497867 17389 solver.cpp:404]     Test net output #1: loss = 2.35022 (* 1 = 2.35022 loss)\nI0819 08:29:25.810217 17389 solver.cpp:228] Iteration 65500, loss = 0.00446148\nI0819 08:29:25.810267 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:29:25.810282 17389 solver.cpp:244]     Train net output #1: loss = 0.00446124 (* 1 = 0.00446124 loss)\nI0819 08:29:25.890137 17389 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0819 08:31:44.438633 17389 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0819 08:33:05.935353 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51444\nI0819 08:33:05.935726 17389 solver.cpp:404]     Test net output #1: loss = 2.37328 (* 1 = 2.37328 loss)\nI0819 08:33:07.248014 17389 solver.cpp:228] Iteration 65600, loss = 0.00423185\nI0819 08:33:07.248059 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:33:07.248077 17389 solver.cpp:244]     Train net output #1: loss = 0.0042316 (* 1 = 0.0042316 loss)\nI0819 08:33:07.339119 17389 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0819 08:35:25.927605 17389 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0819 08:36:47.442338 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51604\nI0819 08:36:47.442715 17389 solver.cpp:404]     Test net output #1: loss = 2.35634 (* 1 = 2.35634 loss)\nI0819 08:36:48.755673 17389 solver.cpp:228] Iteration 65700, loss = 0.00454579\nI0819 08:36:48.755717 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:36:48.755733 17389 solver.cpp:244]     Train net output #1: loss = 0.00454555 (* 1 = 0.00454555 loss)\nI0819 08:36:48.838583 17389 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0819 08:39:07.449019 17389 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0819 08:40:28.969071 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51492\nI0819 08:40:28.969422 17389 solver.cpp:404]     Test net output #1: loss = 2.37465 (* 1 = 2.37465 loss)\nI0819 08:40:30.281880 17389 solver.cpp:228] Iteration 65800, loss = 0.00409945\nI0819 08:40:30.281929 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:40:30.281947 17389 solver.cpp:244]     Train net output #1: loss = 0.00409921 (* 1 = 0.00409921 loss)\nI0819 08:40:30.364312 17389 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0819 08:42:48.969692 17389 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0819 08:44:10.091465 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51588\nI0819 08:44:10.091848 17389 solver.cpp:404]     Test net output #1: loss = 2.35164 (* 1 = 2.35164 loss)\nI0819 08:44:11.403483 17389 solver.cpp:228] Iteration 65900, loss = 0.00428303\nI0819 08:44:11.403532 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:44:11.403549 17389 solver.cpp:244]     Train net output #1: loss = 0.00428278 (* 1 = 0.00428278 loss)\nI0819 08:44:11.492332 17389 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0819 08:46:29.982025 17389 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0819 08:47:51.077939 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5146\nI0819 08:47:51.078248 17389 solver.cpp:404]     Test net output #1: loss = 2.3754 (* 1 = 2.3754 loss)\nI0819 08:47:52.390578 17389 solver.cpp:228] Iteration 66000, loss = 0.00405602\nI0819 08:47:52.390627 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:47:52.390645 17389 solver.cpp:244]     Train net output #1: loss = 0.00405578 (* 1 = 0.00405578 loss)\nI0819 08:47:52.484989 17389 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0819 08:50:11.084807 17389 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0819 08:51:32.454689 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51544\nI0819 08:51:32.455034 17389 solver.cpp:404]     Test net output #1: loss = 2.3636 (* 1 = 2.3636 loss)\nI0819 08:51:33.768326 17389 solver.cpp:228] Iteration 66100, loss = 0.00433765\nI0819 08:51:33.768376 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:51:33.768393 17389 solver.cpp:244]     Train net output #1: loss = 0.0043374 (* 1 = 0.0043374 loss)\nI0819 08:51:33.854033 17389 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0819 08:53:52.361063 17389 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0819 08:55:13.840626 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51308\nI0819 08:55:13.840967 17389 solver.cpp:404]     Test net output #1: loss = 2.38312 (* 1 = 2.38312 loss)\nI0819 08:55:15.154198 17389 solver.cpp:228] Iteration 66200, loss = 0.00465929\nI0819 08:55:15.154247 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:55:15.154263 17389 solver.cpp:244]     Train net output #1: loss = 0.00465905 (* 1 = 0.00465905 loss)\nI0819 08:55:15.240640 17389 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0819 08:57:33.849040 17389 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0819 08:58:55.368824 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5166\nI0819 08:58:55.369164 17389 solver.cpp:404]     Test net output #1: loss = 2.35726 (* 1 = 2.35726 loss)\nI0819 08:58:56.682859 17389 solver.cpp:228] Iteration 66300, loss = 0.00424863\nI0819 08:58:56.682911 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:58:56.682929 17389 solver.cpp:244]     Train net output #1: loss = 0.00424838 (* 1 = 0.00424838 loss)\nI0819 08:58:56.774019 17389 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0819 09:01:15.414798 17389 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0819 09:02:36.926996 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51448\nI0819 09:02:36.927353 17389 solver.cpp:404]     Test net output #1: loss = 2.37878 (* 1 = 2.37878 loss)\nI0819 09:02:38.241034 17389 solver.cpp:228] Iteration 66400, loss = 0.0044147\nI0819 09:02:38.241080 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:02:38.241096 17389 solver.cpp:244]     Train net output #1: loss = 0.00441446 (* 1 = 0.00441446 loss)\nI0819 09:02:38.331269 17389 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0819 09:04:56.913303 17389 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0819 09:06:18.439051 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51596\nI0819 09:06:18.439390 17389 solver.cpp:404]     Test net output #1: loss = 2.3579 (* 1 = 2.3579 loss)\nI0819 09:06:19.752975 17389 solver.cpp:228] Iteration 66500, loss = 0.0039719\nI0819 09:06:19.753029 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:06:19.753046 17389 solver.cpp:244]     Train net output #1: loss = 0.00397166 (* 1 = 0.00397166 loss)\nI0819 09:06:19.837574 17389 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0819 09:08:38.335361 17389 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0819 09:09:59.853237 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51408\nI0819 09:09:59.853590 17389 solver.cpp:404]     Test net output #1: loss = 2.38125 (* 1 = 2.38125 loss)\nI0819 09:10:01.167035 17389 solver.cpp:228] Iteration 66600, loss = 0.00436024\nI0819 09:10:01.167083 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:10:01.167099 17389 solver.cpp:244]     Train net output #1: loss = 0.00436 (* 1 = 0.00436 loss)\nI0819 09:10:01.253206 17389 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0819 09:12:19.755947 17389 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0819 09:13:41.276769 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51524\nI0819 09:13:41.277130 17389 solver.cpp:404]     Test net output #1: loss = 2.36209 (* 1 = 2.36209 loss)\nI0819 09:13:42.588649 17389 solver.cpp:228] Iteration 66700, loss = 0.00451378\nI0819 09:13:42.588706 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:13:42.588723 17389 solver.cpp:244]     Train net output #1: loss = 0.00451354 (* 1 = 0.00451354 loss)\nI0819 09:13:42.673106 17389 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0819 09:16:01.222504 17389 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0819 09:17:22.743963 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51144\nI0819 09:17:22.744325 17389 solver.cpp:404]     Test net output #1: loss = 2.39116 (* 1 = 2.39116 loss)\nI0819 09:17:24.055583 17389 solver.cpp:228] Iteration 66800, loss = 0.0042575\nI0819 09:17:24.055639 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:17:24.055655 17389 solver.cpp:244]     Train net output #1: loss = 0.00425725 (* 1 = 0.00425725 loss)\nI0819 09:17:24.154356 17389 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0819 09:19:42.763622 17389 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0819 09:21:04.286178 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5162\nI0819 09:21:04.286517 17389 solver.cpp:404]     Test net output #1: loss = 2.36193 (* 1 = 2.36193 loss)\nI0819 09:21:05.596293 17389 solver.cpp:228] Iteration 66900, loss = 0.00409746\nI0819 09:21:05.596349 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:21:05.596365 17389 solver.cpp:244]     Train net output #1: loss = 0.00409722 (* 1 = 0.00409722 loss)\nI0819 09:21:05.686131 17389 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0819 09:23:24.304342 17389 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0819 09:24:45.826390 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51252\nI0819 09:24:45.826757 17389 solver.cpp:404]     Test net output #1: loss = 2.38675 (* 1 = 2.38675 loss)\nI0819 09:24:47.137563 17389 solver.cpp:228] Iteration 67000, loss = 0.00449751\nI0819 09:24:47.137617 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:24:47.137635 17389 solver.cpp:244]     Train net output #1: loss = 0.00449726 (* 1 = 0.00449726 loss)\nI0819 09:24:47.228281 17389 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0819 09:27:05.786916 17389 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0819 09:28:27.309067 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51732\nI0819 09:28:27.309417 17389 solver.cpp:404]     Test net output #1: loss = 2.3538 (* 1 = 2.3538 loss)\nI0819 09:28:28.619145 17389 solver.cpp:228] Iteration 67100, loss = 0.00415752\nI0819 09:28:28.619199 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:28:28.619216 17389 solver.cpp:244]     Train net output #1: loss = 0.00415727 (* 1 = 0.00415727 loss)\nI0819 09:28:28.717864 17389 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0819 09:30:47.402707 17389 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0819 09:32:08.911556 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51352\nI0819 09:32:08.911891 17389 solver.cpp:404]     Test net output #1: loss = 2.38164 (* 1 = 2.38164 loss)\nI0819 09:32:10.221833 17389 solver.cpp:228] Iteration 67200, loss = 0.00455939\nI0819 09:32:10.221887 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:32:10.221905 17389 solver.cpp:244]     Train net output #1: loss = 0.00455914 (* 1 = 0.00455914 loss)\nI0819 09:32:10.307493 17389 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0819 09:34:28.831768 17389 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0819 09:35:50.342499 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5168\nI0819 09:35:50.342867 17389 solver.cpp:404]     Test net output #1: loss = 2.35265 (* 1 = 2.35265 loss)\nI0819 09:35:51.653975 17389 solver.cpp:228] Iteration 67300, loss = 0.00397618\nI0819 09:35:51.654028 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:35:51.654047 17389 solver.cpp:244]     Train net output #1: loss = 0.00397593 (* 1 = 0.00397593 loss)\nI0819 09:35:51.750284 17389 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0819 09:38:10.566898 17389 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0819 09:39:32.084380 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51464\nI0819 09:39:32.084736 17389 solver.cpp:404]     Test net output #1: loss = 2.3776 (* 1 = 2.3776 loss)\nI0819 09:39:33.394951 17389 solver.cpp:228] Iteration 67400, loss = 0.00424324\nI0819 09:39:33.395004 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:39:33.395022 17389 solver.cpp:244]     Train net output #1: loss = 0.004243 (* 1 = 0.004243 loss)\nI0819 09:39:33.491449 17389 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0819 09:41:52.099951 17389 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0819 09:43:13.617167 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51624\nI0819 09:43:13.617542 17389 solver.cpp:404]     Test net output #1: loss = 2.36118 (* 1 = 2.36118 loss)\nI0819 09:43:14.929145 17389 solver.cpp:228] Iteration 67500, loss = 0.00436573\nI0819 09:43:14.929200 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:43:14.929217 17389 solver.cpp:244]     Train net output #1: loss = 0.00436548 (* 1 = 0.00436548 loss)\nI0819 09:43:15.014901 17389 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0819 09:45:33.640584 17389 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0819 09:46:55.166352 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51416\nI0819 09:46:55.166705 17389 solver.cpp:404]     Test net output #1: loss = 2.3843 (* 1 = 2.3843 loss)\nI0819 09:46:56.478169 17389 solver.cpp:228] Iteration 67600, loss = 0.00391008\nI0819 09:46:56.478224 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:46:56.478242 17389 solver.cpp:244]     Train net output #1: loss = 0.00390983 (* 1 = 0.00390983 loss)\nI0819 09:46:56.565749 17389 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0819 09:49:15.151176 17389 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0819 09:50:36.681344 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5156\nI0819 09:50:36.681730 17389 solver.cpp:404]     Test net output #1: loss = 2.3635 (* 1 = 2.3635 loss)\nI0819 09:50:37.991559 17389 solver.cpp:228] Iteration 67700, loss = 0.00431394\nI0819 09:50:37.991612 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:50:37.991629 17389 solver.cpp:244]     Train net output #1: loss = 0.0043137 (* 1 = 0.0043137 loss)\nI0819 09:50:38.083155 17389 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0819 09:52:56.690392 17389 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0819 09:54:18.217118 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51412\nI0819 09:54:18.217481 17389 solver.cpp:404]     Test net output #1: loss = 2.38364 (* 1 = 2.38364 loss)\nI0819 09:54:19.527619 17389 solver.cpp:228] Iteration 67800, loss = 0.00396143\nI0819 09:54:19.527673 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:54:19.527691 17389 solver.cpp:244]     Train net output #1: loss = 0.00396119 (* 1 = 0.00396119 loss)\nI0819 09:54:19.616072 17389 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0819 09:56:38.191890 17389 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0819 09:57:59.707756 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5158\nI0819 09:57:59.708062 17389 solver.cpp:404]     Test net output #1: loss = 2.36512 (* 1 = 2.36512 loss)\nI0819 09:58:01.017518 17389 solver.cpp:228] Iteration 67900, loss = 0.00411301\nI0819 09:58:01.017566 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:58:01.017583 17389 solver.cpp:244]     Train net output #1: loss = 0.00411277 (* 1 = 0.00411277 loss)\nI0819 09:58:01.116493 17389 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0819 10:00:19.710105 17389 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0819 10:01:41.217697 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51508\nI0819 10:01:41.218061 17389 solver.cpp:404]     Test net output #1: loss = 2.37076 (* 1 = 2.37076 loss)\nI0819 10:01:42.529181 17389 solver.cpp:228] Iteration 68000, loss = 0.00441386\nI0819 10:01:42.529223 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:01:42.529240 17389 solver.cpp:244]     Train net output #1: loss = 0.00441361 (* 1 = 0.00441361 loss)\nI0819 10:01:42.609060 17389 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0819 10:04:01.135190 17389 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0819 10:05:22.723284 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51644\nI0819 10:05:22.723646 17389 solver.cpp:404]     Test net output #1: loss = 2.36016 (* 1 = 2.36016 loss)\nI0819 10:05:24.033890 17389 solver.cpp:228] Iteration 68100, loss = 0.00405449\nI0819 10:05:24.033943 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:05:24.033960 17389 solver.cpp:244]     Train net output #1: loss = 0.00405425 (* 1 = 0.00405425 loss)\nI0819 10:05:24.117728 17389 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0819 10:07:42.696526 17389 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0819 10:09:04.205026 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51496\nI0819 10:09:04.205374 17389 solver.cpp:404]     Test net output #1: loss = 2.38274 (* 1 = 2.38274 loss)\nI0819 10:09:05.515105 17389 solver.cpp:228] Iteration 68200, loss = 0.00425525\nI0819 10:09:05.515156 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:09:05.515172 17389 solver.cpp:244]     Train net output #1: loss = 0.00425501 (* 1 = 0.00425501 loss)\nI0819 10:09:05.605767 17389 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0819 10:11:24.215852 17389 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0819 10:12:45.732849 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51652\nI0819 10:12:45.733207 17389 solver.cpp:404]     Test net output #1: loss = 2.35148 (* 1 = 2.35148 loss)\nI0819 10:12:47.043550 17389 solver.cpp:228] Iteration 68300, loss = 0.00408211\nI0819 10:12:47.043599 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:12:47.043617 17389 solver.cpp:244]     Train net output #1: loss = 0.00408187 (* 1 = 0.00408187 loss)\nI0819 10:12:47.136358 17389 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0819 10:15:05.776422 17389 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0819 10:16:27.316731 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5132\nI0819 10:16:27.317080 17389 solver.cpp:404]     Test net output #1: loss = 2.38336 (* 1 = 2.38336 loss)\nI0819 10:16:28.627540 17389 solver.cpp:228] Iteration 68400, loss = 0.00396205\nI0819 10:16:28.627588 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:16:28.627605 17389 solver.cpp:244]     Train net output #1: loss = 0.0039618 (* 1 = 0.0039618 loss)\nI0819 10:16:28.718317 17389 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0819 10:18:47.264621 17389 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0819 10:20:08.806215 17389 solver.cpp:404]     Test net output #0: accuracy = 0.515\nI0819 10:20:08.806571 17389 solver.cpp:404]     Test net output #1: loss = 2.36579 (* 1 = 2.36579 loss)\nI0819 10:20:10.116531 17389 solver.cpp:228] Iteration 68500, loss = 0.00420642\nI0819 10:20:10.116580 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:20:10.116597 17389 solver.cpp:244]     Train net output #1: loss = 0.00420617 (* 1 = 0.00420617 loss)\nI0819 10:20:10.201973 17389 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0819 10:22:28.732401 17389 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0819 10:23:50.243526 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51388\nI0819 10:23:50.243875 17389 solver.cpp:404]     Test net output #1: loss = 2.37588 (* 1 = 2.37588 loss)\nI0819 10:23:51.553936 17389 solver.cpp:228] Iteration 68600, loss = 0.00383171\nI0819 10:23:51.553984 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:23:51.554002 17389 solver.cpp:244]     Train net output #1: loss = 0.00383147 (* 1 = 0.00383147 loss)\nI0819 10:23:51.637325 17389 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0819 10:26:10.213567 17389 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0819 10:27:31.727602 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51632\nI0819 10:27:31.727953 17389 solver.cpp:404]     Test net output #1: loss = 2.36041 (* 1 = 2.36041 loss)\nI0819 10:27:33.037888 17389 solver.cpp:228] Iteration 68700, loss = 0.00385087\nI0819 10:27:33.037935 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:27:33.037952 17389 solver.cpp:244]     Train net output #1: loss = 0.00385063 (* 1 = 0.00385063 loss)\nI0819 10:27:33.120887 17389 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0819 10:29:51.665983 17389 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0819 10:31:13.183102 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51288\nI0819 10:31:13.183467 17389 solver.cpp:404]     Test net output #1: loss = 2.38637 (* 1 = 2.38637 loss)\nI0819 10:31:14.493558 17389 solver.cpp:228] Iteration 68800, loss = 0.00418421\nI0819 10:31:14.493608 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:31:14.493624 17389 solver.cpp:244]     Train net output #1: loss = 0.00418397 (* 1 = 0.00418397 loss)\nI0819 10:31:14.589601 17389 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0819 10:33:33.201311 17389 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0819 10:34:54.706957 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51564\nI0819 10:34:54.707319 17389 solver.cpp:404]     Test net output #1: loss = 2.36786 (* 1 = 2.36786 loss)\nI0819 10:34:56.017050 17389 solver.cpp:228] Iteration 68900, loss = 0.00403568\nI0819 10:34:56.017102 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:34:56.017118 17389 solver.cpp:244]     Train net output #1: loss = 0.00403543 (* 1 = 0.00403543 loss)\nI0819 10:34:56.108063 17389 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0819 10:37:14.732321 17389 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0819 10:38:36.242852 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51244\nI0819 10:38:36.243192 17389 solver.cpp:404]     Test net output #1: loss = 2.38652 (* 1 = 2.38652 loss)\nI0819 10:38:37.553119 17389 solver.cpp:228] Iteration 69000, loss = 0.00375226\nI0819 10:38:37.553169 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:38:37.553186 17389 solver.cpp:244]     Train net output #1: loss = 0.00375201 (* 1 = 0.00375201 loss)\nI0819 10:38:37.643322 17389 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0819 10:40:56.236659 17389 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0819 10:42:17.742729 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51432\nI0819 10:42:17.743093 17389 solver.cpp:404]     Test net output #1: loss = 2.36941 (* 1 = 2.36941 loss)\nI0819 10:42:19.053371 17389 solver.cpp:228] Iteration 69100, loss = 0.00399403\nI0819 10:42:19.053421 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:42:19.053437 17389 solver.cpp:244]     Train net output #1: loss = 0.00399379 (* 1 = 0.00399379 loss)\nI0819 10:42:19.149734 17389 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0819 10:44:37.899427 17389 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0819 10:45:59.409711 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51228\nI0819 10:45:59.410078 17389 solver.cpp:404]     Test net output #1: loss = 2.39131 (* 1 = 2.39131 loss)\nI0819 10:46:00.719530 17389 solver.cpp:228] Iteration 69200, loss = 0.00426317\nI0819 10:46:00.719583 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:46:00.719600 17389 solver.cpp:244]     Train net output #1: loss = 0.00426293 (* 1 = 0.00426293 loss)\nI0819 10:46:00.852128 17389 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0819 10:48:19.513054 17389 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0819 10:49:41.014091 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51364\nI0819 10:49:41.014459 17389 solver.cpp:404]     Test net output #1: loss = 2.37502 (* 1 = 2.37502 loss)\nI0819 10:49:42.324697 17389 solver.cpp:228] Iteration 69300, loss = 0.00382192\nI0819 10:49:42.324757 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:49:42.324775 17389 solver.cpp:244]     Train net output #1: loss = 0.00382168 (* 1 = 0.00382168 loss)\nI0819 10:49:42.417927 17389 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0819 10:52:01.056429 17389 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0819 10:53:22.569941 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51252\nI0819 10:53:22.570267 17389 solver.cpp:404]     Test net output #1: loss = 2.39645 (* 1 = 2.39645 loss)\nI0819 10:53:23.880383 17389 solver.cpp:228] Iteration 69400, loss = 0.00395427\nI0819 10:53:23.880434 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:53:23.880451 17389 solver.cpp:244]     Train net output #1: loss = 0.00395403 (* 1 = 0.00395403 loss)\nI0819 10:53:23.966259 17389 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0819 10:55:42.560253 17389 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0819 10:57:04.099453 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5136\nI0819 10:57:04.099817 17389 solver.cpp:404]     Test net output #1: loss = 2.36604 (* 1 = 2.36604 loss)\nI0819 10:57:05.410178 17389 solver.cpp:228] Iteration 69500, loss = 0.0045863\nI0819 10:57:05.410233 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:57:05.410249 17389 solver.cpp:244]     Train net output #1: loss = 0.00458605 (* 1 = 0.00458605 loss)\nI0819 10:57:05.499337 17389 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0819 10:59:24.297094 17389 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0819 11:00:45.829371 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5132\nI0819 11:00:45.829735 17389 solver.cpp:404]     Test net output #1: loss = 2.38803 (* 1 = 2.38803 loss)\nI0819 11:00:47.140269 17389 solver.cpp:228] Iteration 69600, loss = 0.00393007\nI0819 11:00:47.140321 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:00:47.140338 17389 solver.cpp:244]     Train net output #1: loss = 0.00392982 (* 1 = 0.00392982 loss)\nI0819 11:00:47.228842 17389 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0819 11:03:05.823843 17389 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0819 11:04:27.353672 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51336\nI0819 11:04:27.354022 17389 solver.cpp:404]     Test net output #1: loss = 2.3756 (* 1 = 2.3756 loss)\nI0819 11:04:28.664631 17389 solver.cpp:228] Iteration 69700, loss = 0.00430506\nI0819 11:04:28.664682 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:04:28.664700 17389 solver.cpp:244]     Train net output #1: loss = 0.00430482 (* 1 = 0.00430482 loss)\nI0819 11:04:28.755551 17389 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0819 11:06:47.257437 17389 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0819 11:08:08.793800 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5122\nI0819 11:08:08.794172 17389 solver.cpp:404]     Test net output #1: loss = 2.39441 (* 1 = 2.39441 loss)\nI0819 11:08:10.104496 17389 solver.cpp:228] Iteration 69800, loss = 0.00363469\nI0819 11:08:10.104549 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:08:10.104565 17389 solver.cpp:244]     Train net output #1: loss = 0.00363445 (* 1 = 0.00363445 loss)\nI0819 11:08:10.195020 17389 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0819 11:10:28.730888 17389 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0819 11:11:50.270473 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5136\nI0819 11:11:50.270814 17389 solver.cpp:404]     Test net output #1: loss = 2.37702 (* 1 = 2.37702 loss)\nI0819 11:11:51.582526 17389 solver.cpp:228] Iteration 69900, loss = 0.00377393\nI0819 11:11:51.582576 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:11:51.582593 17389 solver.cpp:244]     Train net output #1: loss = 0.00377369 (* 1 = 0.00377369 loss)\nI0819 11:11:51.677557 17389 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0819 11:14:10.169848 17389 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0819 11:15:31.707947 17389 solver.cpp:404]     Test net output #0: accuracy = 0.51044\nI0819 11:15:31.708318 17389 solver.cpp:404]     Test net output #1: loss = 2.40269 (* 1 = 2.40269 loss)\nI0819 11:15:33.019753 17389 solver.cpp:228] Iteration 70000, loss = 0.00399444\nI0819 11:15:33.019801 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:15:33.019819 17389 solver.cpp:244]     Train net output #1: loss = 0.00399419 (* 1 = 0.00399419 loss)\nI0819 11:15:33.108081 17389 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0819 11:15:33.108106 17389 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0819 11:17:51.557564 17389 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0819 11:19:13.093309 17389 solver.cpp:404]     Test net output #0: accuracy = 0.52652\nI0819 11:19:13.093684 17389 solver.cpp:404]     Test net output #1: loss = 2.29243 (* 1 = 2.29243 loss)\nI0819 11:19:14.404991 17389 solver.cpp:228] Iteration 70100, loss = 0.00404415\nI0819 11:19:14.405045 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:19:14.405061 17389 solver.cpp:244]     Train net output #1: loss = 0.0040439 (* 1 = 0.0040439 loss)\nI0819 11:19:14.486524 17389 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0819 11:21:33.028597 17389 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0819 11:22:54.565444 17389 solver.cpp:404]     Test net output #0: accuracy = 0.53688\nI0819 11:22:54.565829 17389 solver.cpp:404]     Test net output #1: loss = 2.23209 (* 1 = 2.23209 loss)\nI0819 11:22:55.877637 17389 solver.cpp:228] Iteration 70200, loss = 0.0042075\nI0819 11:22:55.877687 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:22:55.877704 17389 solver.cpp:244]     Train net output #1: loss = 0.00420726 (* 1 = 0.00420726 loss)\nI0819 11:22:55.971559 17389 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0819 11:25:14.467290 17389 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0819 11:26:36.011234 17389 solver.cpp:404]     Test net output #0: accuracy = 0.54848\nI0819 11:26:36.011564 17389 solver.cpp:404]     Test net output #1: loss = 2.14584 (* 1 = 2.14584 loss)\nI0819 11:26:37.323237 17389 solver.cpp:228] Iteration 70300, loss = 0.00372572\nI0819 11:26:37.323285 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:26:37.323302 17389 solver.cpp:244]     Train net output #1: loss = 0.00372547 (* 1 = 0.00372547 loss)\nI0819 11:26:37.417515 17389 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0819 11:28:55.902832 17389 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0819 11:30:17.447047 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5532\nI0819 11:30:17.447381 17389 solver.cpp:404]     Test net output #1: loss = 2.11436 (* 1 = 2.11436 loss)\nI0819 11:30:18.759115 17389 solver.cpp:228] Iteration 70400, loss = 0.0039734\nI0819 11:30:18.759162 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:30:18.759179 17389 solver.cpp:244]     Train net output #1: loss = 0.00397316 (* 1 = 0.00397316 loss)\nI0819 11:30:18.850076 17389 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0819 11:32:37.375077 17389 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0819 11:33:58.914542 17389 solver.cpp:404]     Test net output #0: accuracy = 0.562\nI0819 11:33:58.914908 17389 solver.cpp:404]     Test net output #1: loss = 2.05153 (* 1 = 2.05153 loss)\nI0819 11:34:00.226331 17389 solver.cpp:228] Iteration 70500, loss = 0.00407592\nI0819 11:34:00.226380 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:34:00.226397 17389 solver.cpp:244]     Train net output #1: loss = 0.00407568 (* 1 = 0.00407568 loss)\nI0819 11:34:00.314357 17389 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0819 11:36:18.740563 17389 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0819 11:37:39.383291 17389 solver.cpp:404]     Test net output #0: accuracy = 0.56636\nI0819 11:37:39.383597 17389 solver.cpp:404]     Test net output #1: loss = 2.03503 (* 1 = 2.03503 loss)\nI0819 11:37:40.690239 17389 solver.cpp:228] Iteration 70600, loss = 0.00390105\nI0819 11:37:40.690286 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:37:40.690304 17389 solver.cpp:244]     Train net output #1: loss = 0.00390081 (* 1 = 0.00390081 loss)\nI0819 11:37:40.789587 17389 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0819 11:39:59.254400 17389 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0819 11:41:20.796857 17389 solver.cpp:404]     Test net output #0: accuracy = 0.57376\nI0819 11:41:20.797215 17389 solver.cpp:404]     Test net output #1: loss = 1.98598 (* 1 = 1.98598 loss)\nI0819 11:41:22.108634 17389 solver.cpp:228] Iteration 70700, loss = 0.00393831\nI0819 11:41:22.108690 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:41:22.108707 17389 solver.cpp:244]     Train net output #1: loss = 0.00393807 (* 1 = 0.00393807 loss)\nI0819 11:41:22.194875 17389 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0819 11:43:40.936977 17389 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0819 11:45:02.477064 17389 solver.cpp:404]     Test net output #0: accuracy = 0.57536\nI0819 11:45:02.477416 17389 solver.cpp:404]     Test net output #1: loss = 1.98173 (* 1 = 1.98173 loss)\nI0819 11:45:03.788617 17389 solver.cpp:228] Iteration 70800, loss = 0.00403126\nI0819 11:45:03.788676 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:45:03.788693 17389 solver.cpp:244]     Train net output #1: loss = 0.00403102 (* 1 = 0.00403102 loss)\nI0819 11:45:03.881917 17389 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0819 11:47:22.789171 17389 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0819 11:48:44.322274 17389 solver.cpp:404]     Test net output #0: accuracy = 0.58116\nI0819 11:48:44.322640 17389 solver.cpp:404]     Test net output #1: loss = 1.9424 (* 1 = 1.9424 loss)\nI0819 11:48:45.633554 17389 solver.cpp:228] Iteration 70900, loss = 0.00381933\nI0819 11:48:45.633608 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:48:45.633625 17389 solver.cpp:244]     Train net output #1: loss = 0.00381909 (* 1 = 0.00381909 loss)\nI0819 11:48:45.730147 17389 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0819 11:51:04.524420 17389 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0819 11:52:25.148257 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5816\nI0819 11:52:25.148605 17389 solver.cpp:404]     Test net output #1: loss = 1.94416 (* 1 = 1.94416 loss)\nI0819 11:52:26.456007 17389 solver.cpp:228] Iteration 71000, loss = 0.00388862\nI0819 11:52:26.456058 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:52:26.456074 17389 solver.cpp:244]     Train net output #1: loss = 0.00388838 (* 1 = 0.00388838 loss)\nI0819 11:52:26.551990 17389 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0819 11:54:44.853826 17389 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0819 11:56:05.466917 17389 solver.cpp:404]     Test net output #0: accuracy = 0.58584\nI0819 11:56:05.467242 17389 solver.cpp:404]     Test net output #1: loss = 1.91243 (* 1 = 1.91243 loss)\nI0819 11:56:06.773406 17389 solver.cpp:228] Iteration 71100, loss = 0.00376547\nI0819 11:56:06.773453 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:56:06.773470 17389 solver.cpp:244]     Train net output #1: loss = 0.00376522 (* 1 = 0.00376522 loss)\nI0819 11:56:06.873615 17389 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0819 11:58:25.153671 17389 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0819 11:59:45.776057 17389 solver.cpp:404]     Test net output #0: accuracy = 0.58472\nI0819 11:59:45.776348 17389 solver.cpp:404]     Test net output #1: loss = 1.91901 (* 1 = 1.91901 loss)\nI0819 11:59:47.082303 17389 solver.cpp:228] Iteration 71200, loss = 0.00367548\nI0819 11:59:47.082350 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:59:47.082367 17389 solver.cpp:244]     Train net output #1: loss = 0.00367524 (* 1 = 0.00367524 loss)\nI0819 11:59:47.178911 17389 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0819 12:02:05.603626 17389 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0819 12:03:26.228894 17389 solver.cpp:404]     Test net output #0: accuracy = 0.58932\nI0819 12:03:26.229158 17389 solver.cpp:404]     Test net output #1: loss = 1.89302 (* 1 = 1.89302 loss)\nI0819 12:03:27.536216 17389 solver.cpp:228] Iteration 71300, loss = 0.00378522\nI0819 12:03:27.536262 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:03:27.536279 17389 solver.cpp:244]     Train net output #1: loss = 0.00378497 (* 1 = 0.00378497 loss)\nI0819 12:03:27.627352 17389 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0819 12:05:45.901373 17389 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0819 12:07:06.532886 17389 solver.cpp:404]     Test net output #0: accuracy = 0.58896\nI0819 12:07:06.533182 17389 solver.cpp:404]     Test net output #1: loss = 1.90105 (* 1 = 1.90105 loss)\nI0819 12:07:07.839825 17389 solver.cpp:228] Iteration 71400, loss = 0.004241\nI0819 12:07:07.839871 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:07:07.839889 17389 solver.cpp:244]     Train net output #1: loss = 0.00424075 (* 1 = 0.00424075 loss)\nI0819 12:07:07.931951 17389 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0819 12:09:26.236763 17389 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0819 12:10:46.862812 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59208\nI0819 12:10:46.863147 17389 solver.cpp:404]     Test net output #1: loss = 1.87969 (* 1 = 1.87969 loss)\nI0819 12:10:48.169740 17389 solver.cpp:228] Iteration 71500, loss = 0.00367514\nI0819 12:10:48.169786 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:10:48.169803 17389 solver.cpp:244]     Train net output #1: loss = 0.0036749 (* 1 = 0.0036749 loss)\nI0819 12:10:48.268252 17389 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0819 12:13:06.637486 17389 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0819 12:14:27.263866 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59012\nI0819 12:14:27.264165 17389 solver.cpp:404]     Test net output #1: loss = 1.89063 (* 1 = 1.89063 loss)\nI0819 12:14:28.570468 17389 solver.cpp:228] Iteration 71600, loss = 0.00378666\nI0819 12:14:28.570514 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:14:28.570531 17389 solver.cpp:244]     Train net output #1: loss = 0.00378642 (* 1 = 0.00378642 loss)\nI0819 12:14:28.673720 17389 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0819 12:16:47.270869 17389 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0819 12:18:07.886525 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59504\nI0819 12:18:07.886826 17389 solver.cpp:404]     Test net output #1: loss = 1.87194 (* 1 = 1.87194 loss)\nI0819 12:18:09.192944 17389 solver.cpp:228] Iteration 71700, loss = 0.00378568\nI0819 12:18:09.192989 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:18:09.193006 17389 solver.cpp:244]     Train net output #1: loss = 0.00378543 (* 1 = 0.00378543 loss)\nI0819 12:18:09.295317 17389 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0819 12:20:27.869580 17389 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0819 12:21:48.493522 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59276\nI0819 12:21:48.493813 17389 solver.cpp:404]     Test net output #1: loss = 1.88389 (* 1 = 1.88389 loss)\nI0819 12:21:49.799939 17389 solver.cpp:228] Iteration 71800, loss = 0.00386437\nI0819 12:21:49.799988 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:21:49.800005 17389 solver.cpp:244]     Train net output #1: loss = 0.00386413 (* 1 = 0.00386413 loss)\nI0819 12:21:49.899432 17389 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0819 12:24:08.555160 17389 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0819 12:25:29.178004 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5952\nI0819 12:25:29.178266 17389 solver.cpp:404]     Test net output #1: loss = 1.86638 (* 1 = 1.86638 loss)\nI0819 12:25:30.485126 17389 solver.cpp:228] Iteration 71900, loss = 0.00387094\nI0819 12:25:30.485173 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:25:30.485190 17389 solver.cpp:244]     Train net output #1: loss = 0.00387069 (* 1 = 0.00387069 loss)\nI0819 12:25:30.581630 17389 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0819 12:27:49.179651 17389 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0819 12:29:09.805820 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59296\nI0819 12:29:09.806090 17389 solver.cpp:404]     Test net output #1: loss = 1.87954 (* 1 = 1.87954 loss)\nI0819 12:29:11.112598 17389 solver.cpp:228] Iteration 72000, loss = 0.0042028\nI0819 12:29:11.112648 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:29:11.112664 17389 solver.cpp:244]     Train net output #1: loss = 0.00420255 (* 1 = 0.00420255 loss)\nI0819 12:29:11.212453 17389 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0819 12:31:29.813639 17389 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0819 12:32:50.454445 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59644\nI0819 12:32:50.454785 17389 solver.cpp:404]     Test net output #1: loss = 1.86449 (* 1 = 1.86449 loss)\nI0819 12:32:51.761584 17389 solver.cpp:228] Iteration 72100, loss = 0.00389747\nI0819 12:32:51.761631 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:32:51.761648 17389 solver.cpp:244]     Train net output #1: loss = 0.00389723 (* 1 = 0.00389723 loss)\nI0819 12:32:51.858645 17389 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0819 12:35:10.596904 17389 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0819 12:36:31.229199 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59416\nI0819 12:36:31.229511 17389 solver.cpp:404]     Test net output #1: loss = 1.8772 (* 1 = 1.8772 loss)\nI0819 12:36:32.536083 17389 solver.cpp:228] Iteration 72200, loss = 0.00384258\nI0819 12:36:32.536129 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:36:32.536145 17389 solver.cpp:244]     Train net output #1: loss = 0.00384234 (* 1 = 0.00384234 loss)\nI0819 12:36:32.630532 17389 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0819 12:38:51.234535 17389 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0819 12:40:11.871629 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59724\nI0819 12:40:11.871942 17389 solver.cpp:404]     Test net output #1: loss = 1.86231 (* 1 = 1.86231 loss)\nI0819 12:40:13.177935 17389 solver.cpp:228] Iteration 72300, loss = 0.00372576\nI0819 12:40:13.177984 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:40:13.178000 17389 solver.cpp:244]     Train net output #1: loss = 0.00372552 (* 1 = 0.00372552 loss)\nI0819 12:40:13.278580 17389 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0819 12:42:31.838798 17389 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0819 12:43:52.471272 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59484\nI0819 12:43:52.471544 17389 solver.cpp:404]     Test net output #1: loss = 1.87588 (* 1 = 1.87588 loss)\nI0819 12:43:53.778069 17389 solver.cpp:228] Iteration 72400, loss = 0.0038865\nI0819 12:43:53.778115 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:43:53.778131 17389 solver.cpp:244]     Train net output #1: loss = 0.00388625 (* 1 = 0.00388625 loss)\nI0819 12:43:53.880470 17389 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0819 12:46:12.745627 17389 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0819 12:47:33.381386 17389 solver.cpp:404]     Test net output #0: accuracy = 0.598\nI0819 12:47:33.381649 17389 solver.cpp:404]     Test net output #1: loss = 1.86144 (* 1 = 1.86144 loss)\nI0819 12:47:34.688139 17389 solver.cpp:228] Iteration 72500, loss = 0.00430055\nI0819 12:47:34.688187 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:47:34.688204 17389 solver.cpp:244]     Train net output #1: loss = 0.00430031 (* 1 = 0.00430031 loss)\nI0819 12:47:34.790704 17389 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0819 12:49:53.604729 17389 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0819 12:51:14.238317 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59524\nI0819 12:51:14.238629 17389 solver.cpp:404]     Test net output #1: loss = 1.87421 (* 1 = 1.87421 loss)\nI0819 12:51:15.545292 17389 solver.cpp:228] Iteration 72600, loss = 0.00389074\nI0819 12:51:15.545341 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:51:15.545357 17389 solver.cpp:244]     Train net output #1: loss = 0.00389049 (* 1 = 0.00389049 loss)\nI0819 12:51:15.639699 17389 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0819 12:53:34.358937 17389 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0819 12:54:54.994722 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59756\nI0819 12:54:54.995090 17389 solver.cpp:404]     Test net output #1: loss = 1.86089 (* 1 = 1.86089 loss)\nI0819 12:54:56.301620 17389 solver.cpp:228] Iteration 72700, loss = 0.00359554\nI0819 12:54:56.301669 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:54:56.301686 17389 solver.cpp:244]     Train net output #1: loss = 0.0035953 (* 1 = 0.0035953 loss)\nI0819 12:54:56.397408 17389 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0819 12:57:15.161005 17389 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0819 12:58:35.801399 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59392\nI0819 12:58:35.801723 17389 solver.cpp:404]     Test net output #1: loss = 1.87368 (* 1 = 1.87368 loss)\nI0819 12:58:37.108912 17389 solver.cpp:228] Iteration 72800, loss = 0.0041007\nI0819 12:58:37.108961 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:58:37.108978 17389 solver.cpp:244]     Train net output #1: loss = 0.00410046 (* 1 = 0.00410046 loss)\nI0819 12:58:37.209892 17389 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0819 13:00:55.837724 17389 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0819 13:02:16.487583 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59692\nI0819 13:02:16.487871 17389 solver.cpp:404]     Test net output #1: loss = 1.86065 (* 1 = 1.86065 loss)\nI0819 13:02:17.794657 17389 solver.cpp:228] Iteration 72900, loss = 0.00371598\nI0819 13:02:17.794708 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:02:17.794724 17389 solver.cpp:244]     Train net output #1: loss = 0.00371573 (* 1 = 0.00371573 loss)\nI0819 13:02:17.891523 17389 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0819 13:04:36.672475 17389 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0819 13:05:57.303094 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59356\nI0819 13:05:57.303361 17389 solver.cpp:404]     Test net output #1: loss = 1.87346 (* 1 = 1.87346 loss)\nI0819 13:05:58.610317 17389 solver.cpp:228] Iteration 73000, loss = 0.00381168\nI0819 13:05:58.610366 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:05:58.610383 17389 solver.cpp:244]     Train net output #1: loss = 0.00381143 (* 1 = 0.00381143 loss)\nI0819 13:05:58.713917 17389 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0819 13:08:17.441195 17389 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0819 13:09:38.079193 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59644\nI0819 13:09:38.079463 17389 solver.cpp:404]     Test net output #1: loss = 1.86108 (* 1 = 1.86108 loss)\nI0819 13:09:39.386615 17389 solver.cpp:228] Iteration 73100, loss = 0.00398358\nI0819 13:09:39.386665 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:09:39.386682 17389 solver.cpp:244]     Train net output #1: loss = 0.00398334 (* 1 = 0.00398334 loss)\nI0819 13:09:39.486120 17389 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0819 13:11:58.159816 17389 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0819 13:13:18.772663 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59372\nI0819 13:13:18.772975 17389 solver.cpp:404]     Test net output #1: loss = 1.87377 (* 1 = 1.87377 loss)\nI0819 13:13:20.080607 17389 solver.cpp:228] Iteration 73200, loss = 0.00426108\nI0819 13:13:20.080657 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:13:20.080673 17389 solver.cpp:244]     Train net output #1: loss = 0.00426084 (* 1 = 0.00426084 loss)\nI0819 13:13:20.182485 17389 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0819 13:15:38.853576 17389 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0819 13:16:59.480723 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59664\nI0819 13:16:59.481051 17389 solver.cpp:404]     Test net output #1: loss = 1.86118 (* 1 = 1.86118 loss)\nI0819 13:17:00.787508 17389 solver.cpp:228] Iteration 73300, loss = 0.00399664\nI0819 13:17:00.787561 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:17:00.787580 17389 solver.cpp:244]     Train net output #1: loss = 0.0039964 (* 1 = 0.0039964 loss)\nI0819 13:17:00.884369 17389 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0819 13:19:19.702946 17389 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0819 13:20:40.323087 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5936\nI0819 13:20:40.323401 17389 solver.cpp:404]     Test net output #1: loss = 1.87424 (* 1 = 1.87424 loss)\nI0819 13:20:41.630723 17389 solver.cpp:228] Iteration 73400, loss = 0.00384442\nI0819 13:20:41.630772 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:20:41.630789 17389 solver.cpp:244]     Train net output #1: loss = 0.00384418 (* 1 = 0.00384418 loss)\nI0819 13:20:41.728837 17389 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0819 13:23:00.397956 17389 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0819 13:24:21.026789 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59664\nI0819 13:24:21.027088 17389 solver.cpp:404]     Test net output #1: loss = 1.86135 (* 1 = 1.86135 loss)\nI0819 13:24:22.334569 17389 solver.cpp:228] Iteration 73500, loss = 0.00395204\nI0819 13:24:22.334619 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:24:22.334636 17389 solver.cpp:244]     Train net output #1: loss = 0.00395179 (* 1 = 0.00395179 loss)\nI0819 13:24:22.427976 17389 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0819 13:26:40.987529 17389 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0819 13:28:01.614229 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59296\nI0819 13:28:01.614485 17389 solver.cpp:404]     Test net output #1: loss = 1.87378 (* 1 = 1.87378 loss)\nI0819 13:28:02.921627 17389 solver.cpp:228] Iteration 73600, loss = 0.00377266\nI0819 13:28:02.921677 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:28:02.921694 17389 solver.cpp:244]     Train net output #1: loss = 0.00377242 (* 1 = 0.00377242 loss)\nI0819 13:28:03.017465 17389 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0819 13:30:21.711854 17389 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0819 13:31:42.326069 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59628\nI0819 13:31:42.326355 17389 solver.cpp:404]     Test net output #1: loss = 1.86148 (* 1 = 1.86148 loss)\nI0819 13:31:43.633380 17389 solver.cpp:228] Iteration 73700, loss = 0.00405621\nI0819 13:31:43.633431 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:31:43.633448 17389 solver.cpp:244]     Train net output #1: loss = 0.00405596 (* 1 = 0.00405596 loss)\nI0819 13:31:43.729001 17389 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0819 13:34:02.453733 17389 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0819 13:35:23.077594 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59372\nI0819 13:35:23.077893 17389 solver.cpp:404]     Test net output #1: loss = 1.87426 (* 1 = 1.87426 loss)\nI0819 13:35:24.384307 17389 solver.cpp:228] Iteration 73800, loss = 0.00380697\nI0819 13:35:24.384357 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:35:24.384374 17389 solver.cpp:244]     Train net output #1: loss = 0.00380672 (* 1 = 0.00380672 loss)\nI0819 13:35:24.480600 17389 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0819 13:37:43.173377 17389 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0819 13:39:03.798360 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59684\nI0819 13:39:03.798666 17389 solver.cpp:404]     Test net output #1: loss = 1.86176 (* 1 = 1.86176 loss)\nI0819 13:39:05.105984 17389 solver.cpp:228] Iteration 73900, loss = 0.0040827\nI0819 13:39:05.106031 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:39:05.106050 17389 solver.cpp:244]     Train net output #1: loss = 0.00408245 (* 1 = 0.00408245 loss)\nI0819 13:39:05.191875 17389 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0819 13:41:23.861796 17389 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0819 13:42:44.488329 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59436\nI0819 13:42:44.488651 17389 solver.cpp:404]     Test net output #1: loss = 1.87446 (* 1 = 1.87446 loss)\nI0819 13:42:45.795239 17389 solver.cpp:228] Iteration 74000, loss = 0.00383272\nI0819 13:42:45.795289 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:42:45.795306 17389 solver.cpp:244]     Train net output #1: loss = 0.00383248 (* 1 = 0.00383248 loss)\nI0819 13:42:45.895133 17389 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0819 13:45:04.550478 17389 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0819 13:46:25.161847 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5972\nI0819 13:46:25.162117 17389 solver.cpp:404]     Test net output #1: loss = 1.8619 (* 1 = 1.8619 loss)\nI0819 13:46:26.469327 17389 solver.cpp:228] Iteration 74100, loss = 0.00407579\nI0819 13:46:26.469378 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:46:26.469395 17389 solver.cpp:244]     Train net output #1: loss = 0.00407555 (* 1 = 0.00407555 loss)\nI0819 13:46:26.568475 17389 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0819 13:48:45.193145 17389 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0819 13:50:05.810431 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59348\nI0819 13:50:05.810726 17389 solver.cpp:404]     Test net output #1: loss = 1.87392 (* 1 = 1.87392 loss)\nI0819 13:50:07.118383 17389 solver.cpp:228] Iteration 74200, loss = 0.00390212\nI0819 13:50:07.118433 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:50:07.118450 17389 solver.cpp:244]     Train net output #1: loss = 0.00390187 (* 1 = 0.00390187 loss)\nI0819 13:50:07.214146 17389 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0819 13:52:25.780151 17389 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0819 13:53:46.411013 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59692\nI0819 13:53:46.411281 17389 solver.cpp:404]     Test net output #1: loss = 1.86126 (* 1 = 1.86126 loss)\nI0819 13:53:47.718529 17389 solver.cpp:228] Iteration 74300, loss = 0.00411939\nI0819 13:53:47.718582 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:53:47.718600 17389 solver.cpp:244]     Train net output #1: loss = 0.00411915 (* 1 = 0.00411915 loss)\nI0819 13:53:47.814293 17389 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0819 13:56:06.256454 17389 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0819 13:57:26.893625 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59348\nI0819 13:57:26.893923 17389 solver.cpp:404]     Test net output #1: loss = 1.87392 (* 1 = 1.87392 loss)\nI0819 13:57:28.200320 17389 solver.cpp:228] Iteration 74400, loss = 0.00446141\nI0819 13:57:28.200367 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:57:28.200384 17389 solver.cpp:244]     Train net output #1: loss = 0.00446116 (* 1 = 0.00446116 loss)\nI0819 13:57:28.303586 17389 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0819 13:59:46.850989 17389 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0819 14:01:07.478832 17389 solver.cpp:404]     Test net output #0: accuracy = 0.596\nI0819 14:01:07.479152 17389 solver.cpp:404]     Test net output #1: loss = 1.86174 (* 1 = 1.86174 loss)\nI0819 14:01:08.785892 17389 solver.cpp:228] Iteration 74500, loss = 0.00368384\nI0819 14:01:08.785940 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:01:08.785957 17389 solver.cpp:244]     Train net output #1: loss = 0.0036836 (* 1 = 0.0036836 loss)\nI0819 14:01:08.886032 17389 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0819 14:03:27.299756 17389 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0819 14:04:47.935333 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59356\nI0819 14:04:47.935636 17389 solver.cpp:404]     Test net output #1: loss = 1.87459 (* 1 = 1.87459 loss)\nI0819 14:04:49.243607 17389 solver.cpp:228] Iteration 74600, loss = 0.00386694\nI0819 14:04:49.243656 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:04:49.243674 17389 solver.cpp:244]     Train net output #1: loss = 0.00386669 (* 1 = 0.00386669 loss)\nI0819 14:04:49.341717 17389 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0819 14:07:07.804543 17389 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0819 14:08:28.429775 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59672\nI0819 14:08:28.430099 17389 solver.cpp:404]     Test net output #1: loss = 1.86202 (* 1 = 1.86202 loss)\nI0819 14:08:29.736663 17389 solver.cpp:228] Iteration 74700, loss = 0.00403561\nI0819 14:08:29.736712 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:08:29.736729 17389 solver.cpp:244]     Train net output #1: loss = 0.00403537 (* 1 = 0.00403537 loss)\nI0819 14:08:29.832587 17389 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0819 14:10:48.386764 17389 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0819 14:12:09.015008 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59324\nI0819 14:12:09.015300 17389 solver.cpp:404]     Test net output #1: loss = 1.87474 (* 1 = 1.87474 loss)\nI0819 14:12:10.322787 17389 solver.cpp:228] Iteration 74800, loss = 0.00401656\nI0819 14:12:10.322836 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:12:10.322854 17389 solver.cpp:244]     Train net output #1: loss = 0.00401631 (* 1 = 0.00401631 loss)\nI0819 14:12:10.417012 17389 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0819 14:14:28.906873 17389 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0819 14:15:49.542289 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59596\nI0819 14:15:49.542642 17389 solver.cpp:404]     Test net output #1: loss = 1.86239 (* 1 = 1.86239 loss)\nI0819 14:15:50.849529 17389 solver.cpp:228] Iteration 74900, loss = 0.00414513\nI0819 14:15:50.849578 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:15:50.849596 17389 solver.cpp:244]     Train net output #1: loss = 0.00414489 (* 1 = 0.00414489 loss)\nI0819 14:15:50.949317 17389 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0819 14:18:09.502468 17389 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0819 14:19:30.138591 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59304\nI0819 14:19:30.138895 17389 solver.cpp:404]     Test net output #1: loss = 1.87471 (* 1 = 1.87471 loss)\nI0819 14:19:31.445202 17389 solver.cpp:228] Iteration 75000, loss = 0.00394886\nI0819 14:19:31.445248 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:19:31.445266 17389 solver.cpp:244]     Train net output #1: loss = 0.00394862 (* 1 = 0.00394862 loss)\nI0819 14:19:31.542932 17389 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0819 14:21:50.173389 17389 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0819 14:23:10.807725 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59568\nI0819 14:23:10.808058 17389 solver.cpp:404]     Test net output #1: loss = 1.86249 (* 1 = 1.86249 loss)\nI0819 14:23:12.114935 17389 solver.cpp:228] Iteration 75100, loss = 0.0041669\nI0819 14:23:12.114981 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:23:12.114998 17389 solver.cpp:244]     Train net output #1: loss = 0.00416665 (* 1 = 0.00416665 loss)\nI0819 14:23:12.217172 17389 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0819 14:25:30.706893 17389 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0819 14:26:51.346482 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59328\nI0819 14:26:51.346832 17389 solver.cpp:404]     Test net output #1: loss = 1.87497 (* 1 = 1.87497 loss)\nI0819 14:26:52.653244 17389 solver.cpp:228] Iteration 75200, loss = 0.00379785\nI0819 14:26:52.653291 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:26:52.653309 17389 solver.cpp:244]     Train net output #1: loss = 0.00379761 (* 1 = 0.00379761 loss)\nI0819 14:26:52.746767 17389 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0819 14:29:11.205560 17389 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0819 14:30:31.848549 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59624\nI0819 14:30:31.848865 17389 solver.cpp:404]     Test net output #1: loss = 1.86261 (* 1 = 1.86261 loss)\nI0819 14:30:33.155656 17389 solver.cpp:228] Iteration 75300, loss = 0.00365846\nI0819 14:30:33.155712 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:30:33.155738 17389 solver.cpp:244]     Train net output #1: loss = 0.00365821 (* 1 = 0.00365821 loss)\nI0819 14:30:33.251423 17389 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0819 14:32:51.670662 17389 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0819 14:34:12.310982 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59336\nI0819 14:34:12.311262 17389 solver.cpp:404]     Test net output #1: loss = 1.87536 (* 1 = 1.87536 loss)\nI0819 14:34:13.619391 17389 solver.cpp:228] Iteration 75400, loss = 0.0041554\nI0819 14:34:13.619438 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:34:13.619455 17389 solver.cpp:244]     Train net output #1: loss = 0.00415515 (* 1 = 0.00415515 loss)\nI0819 14:34:13.714439 17389 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0819 14:36:32.294857 17389 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0819 14:37:52.932793 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59616\nI0819 14:37:52.933073 17389 solver.cpp:404]     Test net output #1: loss = 1.86306 (* 1 = 1.86306 loss)\nI0819 14:37:54.240775 17389 solver.cpp:228] Iteration 75500, loss = 0.00385532\nI0819 14:37:54.240823 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:37:54.240839 17389 solver.cpp:244]     Train net output #1: loss = 0.00385507 (* 1 = 0.00385507 loss)\nI0819 14:37:54.335027 17389 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0819 14:40:12.872548 17389 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0819 14:41:33.493197 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59352\nI0819 14:41:33.493508 17389 solver.cpp:404]     Test net output #1: loss = 1.87558 (* 1 = 1.87558 loss)\nI0819 14:41:34.800034 17389 solver.cpp:228] Iteration 75600, loss = 0.00380776\nI0819 14:41:34.800079 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:41:34.800096 17389 solver.cpp:244]     Train net output #1: loss = 0.00380752 (* 1 = 0.00380752 loss)\nI0819 14:41:34.899140 17389 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0819 14:43:53.531083 17389 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0819 14:45:14.153682 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59552\nI0819 14:45:14.153996 17389 solver.cpp:404]     Test net output #1: loss = 1.86312 (* 1 = 1.86312 loss)\nI0819 14:45:15.460299 17389 solver.cpp:228] Iteration 75700, loss = 0.00356901\nI0819 14:45:15.460347 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:45:15.460364 17389 solver.cpp:244]     Train net output #1: loss = 0.00356877 (* 1 = 0.00356877 loss)\nI0819 14:45:15.554441 17389 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0819 14:47:34.090718 17389 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0819 14:48:54.714915 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59272\nI0819 14:48:54.715214 17389 solver.cpp:404]     Test net output #1: loss = 1.87549 (* 1 = 1.87549 loss)\nI0819 14:48:56.021570 17389 solver.cpp:228] Iteration 75800, loss = 0.00422674\nI0819 14:48:56.021620 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:48:56.021636 17389 solver.cpp:244]     Train net output #1: loss = 0.0042265 (* 1 = 0.0042265 loss)\nI0819 14:48:56.115442 17389 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0819 14:51:14.544674 17389 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0819 14:52:35.156260 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59532\nI0819 14:52:35.156594 17389 solver.cpp:404]     Test net output #1: loss = 1.86328 (* 1 = 1.86328 loss)\nI0819 14:52:36.462805 17389 solver.cpp:228] Iteration 75900, loss = 0.00358772\nI0819 14:52:36.462853 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:52:36.462870 17389 solver.cpp:244]     Train net output #1: loss = 0.00358748 (* 1 = 0.00358748 loss)\nI0819 14:52:36.566201 17389 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0819 14:54:55.074209 17389 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0819 14:56:15.682082 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59312\nI0819 14:56:15.682368 17389 solver.cpp:404]     Test net output #1: loss = 1.87546 (* 1 = 1.87546 loss)\nI0819 14:56:16.988757 17389 solver.cpp:228] Iteration 76000, loss = 0.00395941\nI0819 14:56:16.988801 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:56:16.988818 17389 solver.cpp:244]     Train net output #1: loss = 0.00395916 (* 1 = 0.00395916 loss)\nI0819 14:56:17.084839 17389 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0819 14:58:35.607156 17389 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0819 14:59:56.219933 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5956\nI0819 14:59:56.220239 17389 solver.cpp:404]     Test net output #1: loss = 1.86313 (* 1 = 1.86313 loss)\nI0819 14:59:57.526451 17389 solver.cpp:228] Iteration 76100, loss = 0.00400022\nI0819 14:59:57.526496 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:59:57.526513 17389 solver.cpp:244]     Train net output #1: loss = 0.00399998 (* 1 = 0.00399998 loss)\nI0819 14:59:57.626005 17389 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0819 15:02:16.083019 17389 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0819 15:03:36.708783 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59288\nI0819 15:03:36.709082 17389 solver.cpp:404]     Test net output #1: loss = 1.87545 (* 1 = 1.87545 loss)\nI0819 15:03:38.015571 17389 solver.cpp:228] Iteration 76200, loss = 0.00426024\nI0819 15:03:38.015619 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:03:38.015635 17389 solver.cpp:244]     Train net output #1: loss = 0.00425999 (* 1 = 0.00425999 loss)\nI0819 15:03:38.109393 17389 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0819 15:05:56.633426 17389 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0819 15:07:17.266906 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59516\nI0819 15:07:17.267205 17389 solver.cpp:404]     Test net output #1: loss = 1.86343 (* 1 = 1.86343 loss)\nI0819 15:07:18.574445 17389 solver.cpp:228] Iteration 76300, loss = 0.00428477\nI0819 15:07:18.574491 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:07:18.574507 17389 solver.cpp:244]     Train net output #1: loss = 0.00428453 (* 1 = 0.00428453 loss)\nI0819 15:07:18.668218 17389 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0819 15:09:37.137389 17389 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0819 15:10:57.765524 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59332\nI0819 15:10:57.765856 17389 solver.cpp:404]     Test net output #1: loss = 1.87582 (* 1 = 1.87582 loss)\nI0819 15:10:59.072356 17389 solver.cpp:228] Iteration 76400, loss = 0.00407796\nI0819 15:10:59.072402 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:10:59.072417 17389 solver.cpp:244]     Train net output #1: loss = 0.00407771 (* 1 = 0.00407771 loss)\nI0819 15:10:59.171619 17389 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0819 15:13:17.701555 17389 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0819 15:14:38.329885 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59588\nI0819 15:14:38.330188 17389 solver.cpp:404]     Test net output #1: loss = 1.86333 (* 1 = 1.86333 loss)\nI0819 15:14:39.636816 17389 solver.cpp:228] Iteration 76500, loss = 0.00369774\nI0819 15:14:39.636860 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:14:39.636878 17389 solver.cpp:244]     Train net output #1: loss = 0.0036975 (* 1 = 0.0036975 loss)\nI0819 15:14:39.720157 17389 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0819 15:16:58.117460 17389 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0819 15:18:18.742969 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59336\nI0819 15:18:18.743239 17389 solver.cpp:404]     Test net output #1: loss = 1.87543 (* 1 = 1.87543 loss)\nI0819 15:18:20.049275 17389 solver.cpp:228] Iteration 76600, loss = 0.00418363\nI0819 15:18:20.049322 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:18:20.049340 17389 solver.cpp:244]     Train net output #1: loss = 0.00418338 (* 1 = 0.00418338 loss)\nI0819 15:18:20.150104 17389 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0819 15:20:38.565999 17389 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0819 15:21:59.182690 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59592\nI0819 15:21:59.182970 17389 solver.cpp:404]     Test net output #1: loss = 1.86295 (* 1 = 1.86295 loss)\nI0819 15:22:00.489807 17389 solver.cpp:228] Iteration 76700, loss = 0.00392636\nI0819 15:22:00.489853 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:22:00.489869 17389 solver.cpp:244]     Train net output #1: loss = 0.00392612 (* 1 = 0.00392612 loss)\nI0819 15:22:00.586089 17389 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0819 15:24:19.024776 17389 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0819 15:25:39.635233 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59348\nI0819 15:25:39.635603 17389 solver.cpp:404]     Test net output #1: loss = 1.87574 (* 1 = 1.87574 loss)\nI0819 15:25:40.942013 17389 solver.cpp:228] Iteration 76800, loss = 0.00400485\nI0819 15:25:40.942060 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:25:40.942076 17389 solver.cpp:244]     Train net output #1: loss = 0.0040046 (* 1 = 0.0040046 loss)\nI0819 15:25:41.037081 17389 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0819 15:27:59.484287 17389 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0819 15:29:20.100404 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59572\nI0819 15:29:20.100740 17389 solver.cpp:404]     Test net output #1: loss = 1.8635 (* 1 = 1.8635 loss)\nI0819 15:29:21.407543 17389 solver.cpp:228] Iteration 76900, loss = 0.00388146\nI0819 15:29:21.407589 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:29:21.407606 17389 solver.cpp:244]     Train net output #1: loss = 0.00388121 (* 1 = 0.00388121 loss)\nI0819 15:29:21.509382 17389 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0819 15:31:39.975411 17389 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0819 15:33:00.593127 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5928\nI0819 15:33:00.593472 17389 solver.cpp:404]     Test net output #1: loss = 1.87587 (* 1 = 1.87587 loss)\nI0819 15:33:01.900110 17389 solver.cpp:228] Iteration 77000, loss = 0.00405637\nI0819 15:33:01.900154 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:33:01.900172 17389 solver.cpp:244]     Train net output #1: loss = 0.00405612 (* 1 = 0.00405612 loss)\nI0819 15:33:01.997670 17389 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0819 15:35:20.382699 17389 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0819 15:36:41.016432 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59612\nI0819 15:36:41.016716 17389 solver.cpp:404]     Test net output #1: loss = 1.86351 (* 1 = 1.86351 loss)\nI0819 15:36:42.323011 17389 solver.cpp:228] Iteration 77100, loss = 0.00371565\nI0819 15:36:42.323060 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:36:42.323076 17389 solver.cpp:244]     Train net output #1: loss = 0.0037154 (* 1 = 0.0037154 loss)\nI0819 15:36:42.418545 17389 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0819 15:39:00.921737 17389 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0819 15:40:21.569552 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5934\nI0819 15:40:21.569857 17389 solver.cpp:404]     Test net output #1: loss = 1.87594 (* 1 = 1.87594 loss)\nI0819 15:40:22.876906 17389 solver.cpp:228] Iteration 77200, loss = 0.00394161\nI0819 15:40:22.876955 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:40:22.876971 17389 solver.cpp:244]     Train net output #1: loss = 0.00394137 (* 1 = 0.00394137 loss)\nI0819 15:40:22.978399 17389 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0819 15:42:41.606529 17389 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0819 15:44:02.259070 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59576\nI0819 15:44:02.259361 17389 solver.cpp:404]     Test net output #1: loss = 1.86369 (* 1 = 1.86369 loss)\nI0819 15:44:03.566318 17389 solver.cpp:228] Iteration 77300, loss = 0.00383224\nI0819 15:44:03.566365 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:44:03.566382 17389 solver.cpp:244]     Train net output #1: loss = 0.003832 (* 1 = 0.003832 loss)\nI0819 15:44:03.663308 17389 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0819 15:46:22.170508 17389 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0819 15:47:42.794610 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59348\nI0819 15:47:42.794890 17389 solver.cpp:404]     Test net output #1: loss = 1.87614 (* 1 = 1.87614 loss)\nI0819 15:47:44.101698 17389 solver.cpp:228] Iteration 77400, loss = 0.00383393\nI0819 15:47:44.101745 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:47:44.101763 17389 solver.cpp:244]     Train net output #1: loss = 0.00383369 (* 1 = 0.00383369 loss)\nI0819 15:47:44.196225 17389 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0819 15:50:02.669630 17389 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0819 15:51:23.275315 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59556\nI0819 15:51:23.275604 17389 solver.cpp:404]     Test net output #1: loss = 1.86367 (* 1 = 1.86367 loss)\nI0819 15:51:24.581701 17389 solver.cpp:228] Iteration 77500, loss = 0.00381894\nI0819 15:51:24.581746 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:51:24.581763 17389 solver.cpp:244]     Train net output #1: loss = 0.0038187 (* 1 = 0.0038187 loss)\nI0819 15:51:24.683833 17389 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0819 15:53:43.162919 17389 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0819 15:55:03.770524 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59344\nI0819 15:55:03.770841 17389 solver.cpp:404]     Test net output #1: loss = 1.87565 (* 1 = 1.87565 loss)\nI0819 15:55:05.077258 17389 solver.cpp:228] Iteration 77600, loss = 0.00398508\nI0819 15:55:05.077301 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:55:05.077317 17389 solver.cpp:244]     Train net output #1: loss = 0.00398483 (* 1 = 0.00398483 loss)\nI0819 15:55:05.175621 17389 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0819 15:57:23.644078 17389 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0819 15:58:44.259542 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5958\nI0819 15:58:44.259872 17389 solver.cpp:404]     Test net output #1: loss = 1.86393 (* 1 = 1.86393 loss)\nI0819 15:58:45.566613 17389 solver.cpp:228] Iteration 77700, loss = 0.00383125\nI0819 15:58:45.566658 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:58:45.566673 17389 solver.cpp:244]     Train net output #1: loss = 0.003831 (* 1 = 0.003831 loss)\nI0819 15:58:45.663058 17389 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0819 16:01:04.224058 17389 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0819 16:02:24.847091 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59356\nI0819 16:02:24.847434 17389 solver.cpp:404]     Test net output #1: loss = 1.87618 (* 1 = 1.87618 loss)\nI0819 16:02:26.154461 17389 solver.cpp:228] Iteration 77800, loss = 0.00405101\nI0819 16:02:26.154510 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:02:26.154526 17389 solver.cpp:244]     Train net output #1: loss = 0.00405076 (* 1 = 0.00405076 loss)\nI0819 16:02:26.250841 17389 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0819 16:04:44.776337 17389 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0819 16:06:05.383807 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59584\nI0819 16:06:05.384114 17389 solver.cpp:404]     Test net output #1: loss = 1.864 (* 1 = 1.864 loss)\nI0819 16:06:06.690544 17389 solver.cpp:228] Iteration 77900, loss = 0.00419078\nI0819 16:06:06.690593 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:06:06.690609 17389 solver.cpp:244]     Train net output #1: loss = 0.00419053 (* 1 = 0.00419053 loss)\nI0819 16:06:06.786260 17389 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0819 16:08:25.223812 17389 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0819 16:09:45.836323 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59372\nI0819 16:09:45.836637 17389 solver.cpp:404]     Test net output #1: loss = 1.8766 (* 1 = 1.8766 loss)\nI0819 16:09:47.143353 17389 solver.cpp:228] Iteration 78000, loss = 0.00389174\nI0819 16:09:47.143401 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:09:47.143419 17389 solver.cpp:244]     Train net output #1: loss = 0.00389149 (* 1 = 0.00389149 loss)\nI0819 16:09:47.245338 17389 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0819 16:12:05.713578 17389 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0819 16:13:26.326189 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59616\nI0819 16:13:26.326481 17389 solver.cpp:404]     Test net output #1: loss = 1.86445 (* 1 = 1.86445 loss)\nI0819 16:13:27.632694 17389 solver.cpp:228] Iteration 78100, loss = 0.00403391\nI0819 16:13:27.632741 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:13:27.632757 17389 solver.cpp:244]     Train net output #1: loss = 0.00403367 (* 1 = 0.00403367 loss)\nI0819 16:13:27.727931 17389 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0819 16:15:46.241160 17389 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0819 16:17:06.852918 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59324\nI0819 16:17:06.853248 17389 solver.cpp:404]     Test net output #1: loss = 1.87664 (* 1 = 1.87664 loss)\nI0819 16:17:08.160765 17389 solver.cpp:228] Iteration 78200, loss = 0.00427102\nI0819 16:17:08.160814 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:17:08.160831 17389 solver.cpp:244]     Train net output #1: loss = 0.00427077 (* 1 = 0.00427077 loss)\nI0819 16:17:08.256340 17389 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0819 16:19:26.759826 17389 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0819 16:20:47.376135 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5954\nI0819 16:20:47.376477 17389 solver.cpp:404]     Test net output #1: loss = 1.8643 (* 1 = 1.8643 loss)\nI0819 16:20:48.684005 17389 solver.cpp:228] Iteration 78300, loss = 0.00394964\nI0819 16:20:48.684051 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:20:48.684067 17389 solver.cpp:244]     Train net output #1: loss = 0.00394939 (* 1 = 0.00394939 loss)\nI0819 16:20:48.776571 17389 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0819 16:23:07.267691 17389 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0819 16:24:27.884138 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59296\nI0819 16:24:27.884431 17389 solver.cpp:404]     Test net output #1: loss = 1.87696 (* 1 = 1.87696 loss)\nI0819 16:24:29.190943 17389 solver.cpp:228] Iteration 78400, loss = 0.00391226\nI0819 16:24:29.190990 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:24:29.191007 17389 solver.cpp:244]     Train net output #1: loss = 0.00391202 (* 1 = 0.00391202 loss)\nI0819 16:24:29.287735 17389 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0819 16:26:47.738087 17389 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0819 16:28:08.389693 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59568\nI0819 16:28:08.389971 17389 solver.cpp:404]     Test net output #1: loss = 1.86465 (* 1 = 1.86465 loss)\nI0819 16:28:09.696866 17389 solver.cpp:228] Iteration 78500, loss = 0.00389634\nI0819 16:28:09.696913 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:28:09.696930 17389 solver.cpp:244]     Train net output #1: loss = 0.0038961 (* 1 = 0.0038961 loss)\nI0819 16:28:09.791553 17389 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0819 16:30:28.239500 17389 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0819 16:31:48.890884 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5934\nI0819 16:31:48.891175 17389 solver.cpp:404]     Test net output #1: loss = 1.87674 (* 1 = 1.87674 loss)\nI0819 16:31:50.198496 17389 solver.cpp:228] Iteration 78600, loss = 0.00384405\nI0819 16:31:50.198545 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:31:50.198562 17389 solver.cpp:244]     Train net output #1: loss = 0.0038438 (* 1 = 0.0038438 loss)\nI0819 16:31:50.296720 17389 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0819 16:34:08.769090 17389 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0819 16:35:29.415658 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59544\nI0819 16:35:29.415990 17389 solver.cpp:404]     Test net output #1: loss = 1.86449 (* 1 = 1.86449 loss)\nI0819 16:35:30.723924 17389 solver.cpp:228] Iteration 78700, loss = 0.00377352\nI0819 16:35:30.723973 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:35:30.723991 17389 solver.cpp:244]     Train net output #1: loss = 0.00377328 (* 1 = 0.00377328 loss)\nI0819 16:35:30.818495 17389 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0819 16:37:48.971762 17389 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0819 16:39:09.616361 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59344\nI0819 16:39:09.616677 17389 solver.cpp:404]     Test net output #1: loss = 1.87724 (* 1 = 1.87724 loss)\nI0819 16:39:10.923982 17389 solver.cpp:228] Iteration 78800, loss = 0.00425634\nI0819 16:39:10.924029 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:39:10.924046 17389 solver.cpp:244]     Train net output #1: loss = 0.00425609 (* 1 = 0.00425609 loss)\nI0819 16:39:11.016347 17389 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0819 16:41:29.262277 17389 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0819 16:42:49.911254 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59544\nI0819 16:42:49.911604 17389 solver.cpp:404]     Test net output #1: loss = 1.86494 (* 1 = 1.86494 loss)\nI0819 16:42:51.218920 17389 solver.cpp:228] Iteration 78900, loss = 0.00389732\nI0819 16:42:51.218968 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:42:51.218984 17389 solver.cpp:244]     Train net output #1: loss = 0.00389707 (* 1 = 0.00389707 loss)\nI0819 16:42:51.317701 17389 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0819 16:45:09.427351 17389 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0819 16:46:30.073669 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59344\nI0819 16:46:30.073962 17389 solver.cpp:404]     Test net output #1: loss = 1.87721 (* 1 = 1.87721 loss)\nI0819 16:46:31.381884 17389 solver.cpp:228] Iteration 79000, loss = 0.00409383\nI0819 16:46:31.381934 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:46:31.381952 17389 solver.cpp:244]     Train net output #1: loss = 0.00409358 (* 1 = 0.00409358 loss)\nI0819 16:46:31.473583 17389 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0819 16:48:49.613443 17389 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0819 16:50:10.252820 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5954\nI0819 16:50:10.253134 17389 solver.cpp:404]     Test net output #1: loss = 1.86552 (* 1 = 1.86552 loss)\nI0819 16:50:11.560752 17389 solver.cpp:228] Iteration 79100, loss = 0.00406138\nI0819 16:50:11.560801 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:50:11.560818 17389 solver.cpp:244]     Train net output #1: loss = 0.00406114 (* 1 = 0.00406114 loss)\nI0819 16:50:11.650632 17389 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0819 16:52:29.850952 17389 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0819 16:53:50.492076 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59292\nI0819 16:53:50.492373 17389 solver.cpp:404]     Test net output #1: loss = 1.87787 (* 1 = 1.87787 loss)\nI0819 16:53:51.799504 17389 solver.cpp:228] Iteration 79200, loss = 0.00377756\nI0819 16:53:51.799552 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:53:51.799569 17389 solver.cpp:244]     Train net output #1: loss = 0.00377732 (* 1 = 0.00377732 loss)\nI0819 16:53:51.891191 17389 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0819 16:56:10.244652 17389 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0819 16:57:30.882989 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59588\nI0819 16:57:30.883301 17389 solver.cpp:404]     Test net output #1: loss = 1.86533 (* 1 = 1.86533 loss)\nI0819 16:57:32.190955 17389 solver.cpp:228] Iteration 79300, loss = 0.00379694\nI0819 16:57:32.191004 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:57:32.191020 17389 solver.cpp:244]     Train net output #1: loss = 0.0037967 (* 1 = 0.0037967 loss)\nI0819 16:57:32.282778 17389 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0819 16:59:50.455273 17389 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0819 17:01:11.105572 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5936\nI0819 17:01:11.105921 17389 solver.cpp:404]     Test net output #1: loss = 1.87766 (* 1 = 1.87766 loss)\nI0819 17:01:12.414507 17389 solver.cpp:228] Iteration 79400, loss = 0.00370238\nI0819 17:01:12.414558 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:01:12.414583 17389 solver.cpp:244]     Train net output #1: loss = 0.00370213 (* 1 = 0.00370213 loss)\nI0819 17:01:12.507908 17389 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0819 17:03:30.633226 17389 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0819 17:04:51.281177 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5954\nI0819 17:04:51.281491 17389 solver.cpp:404]     Test net output #1: loss = 1.86529 (* 1 = 1.86529 loss)\nI0819 17:04:52.587550 17389 solver.cpp:228] Iteration 79500, loss = 0.0040213\nI0819 17:04:52.587600 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:04:52.587625 17389 solver.cpp:244]     Train net output #1: loss = 0.00402106 (* 1 = 0.00402106 loss)\nI0819 17:04:52.673956 17389 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0819 17:07:10.761178 17389 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0819 17:08:31.406260 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59312\nI0819 17:08:31.406545 17389 solver.cpp:404]     Test net output #1: loss = 1.87779 (* 1 = 1.87779 loss)\nI0819 17:08:32.714134 17389 solver.cpp:228] Iteration 79600, loss = 0.0043792\nI0819 17:08:32.714186 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:08:32.714211 17389 solver.cpp:244]     Train net output #1: loss = 0.00437896 (* 1 = 0.00437896 loss)\nI0819 17:08:32.809357 17389 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0819 17:10:50.926013 17389 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0819 17:12:11.575476 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59516\nI0819 17:12:11.575839 17389 solver.cpp:404]     Test net output #1: loss = 1.86545 (* 1 = 1.86545 loss)\nI0819 17:12:12.883026 17389 solver.cpp:228] Iteration 79700, loss = 0.00414103\nI0819 17:12:12.883080 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:12:12.883105 17389 solver.cpp:244]     Train net output #1: loss = 0.00414079 (* 1 = 0.00414079 loss)\nI0819 17:12:12.966279 17389 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0819 17:14:31.095288 17389 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0819 17:15:51.737802 17389 solver.cpp:404]     Test net output #0: accuracy = 0.5926\nI0819 17:15:51.738108 17389 solver.cpp:404]     Test net output #1: loss = 1.87802 (* 1 = 1.87802 loss)\nI0819 17:15:53.046032 17389 solver.cpp:228] Iteration 79800, loss = 0.00376542\nI0819 17:15:53.046083 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:15:53.046108 17389 solver.cpp:244]     Train net output #1: loss = 0.00376518 (* 1 = 0.00376518 loss)\nI0819 17:15:53.131732 17389 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0819 17:18:11.253962 17389 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0819 17:19:31.895007 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59472\nI0819 17:19:31.895289 17389 solver.cpp:404]     Test net output #1: loss = 1.86584 (* 1 = 1.86584 loss)\nI0819 17:19:33.202461 17389 solver.cpp:228] Iteration 79900, loss = 0.00388769\nI0819 17:19:33.202517 17389 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:19:33.202543 17389 solver.cpp:244]     Train net output #1: loss = 0.00388744 (* 1 = 0.00388744 loss)\nI0819 17:19:33.296965 17389 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0819 17:21:51.477908 17389 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35Cifar100Fig8_iter_80000.caffemodel\nI0819 17:21:51.741317 17389 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35Cifar100Fig8_iter_80000.solverstate\nI0819 17:21:52.184322 17389 solver.cpp:317] Iteration 80000, loss = 0.00385732\nI0819 17:21:52.184366 17389 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0819 17:23:12.827477 17389 solver.cpp:404]     Test net output #0: accuracy = 0.59272\nI0819 17:23:12.827817 17389 solver.cpp:404]     Test net output #1: loss = 1.87791 (* 1 = 1.87791 loss)\nI0819 17:23:12.827836 17389 solver.cpp:322] Optimization Done.\nI0819 17:23:18.205013 17389 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35Fig1a",
    "content": "I0817 16:02:21.068913 17829 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:02:21.071547 17829 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:02:21.072764 17829 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:02:21.073981 17829 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:02:21.075188 17829 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:02:21.076417 17829 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:02:21.077648 17829 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:02:21.078877 17829 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:02:21.080121 17829 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:02:21.499383 17829 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35Fig1a\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\nI0817 16:02:21.503585 17829 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:02:21.521294 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:21.521369 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:21.522433 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:02:21.522492 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:02:21.522514 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:02:21.522534 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:02:21.522554 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:02:21.522572 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:02:21.522598 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:02:21.522619 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:02:21.522639 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:02:21.522657 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:02:21.522676 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:02:21.522693 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:02:21.522713 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:02:21.522732 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:02:21.522752 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:02:21.522769 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:02:21.522788 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:02:21.522805 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:02:21.522825 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:02:21.522845 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:02:21.522876 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:02:21.522894 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:02:21.522919 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:02:21.522938 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:02:21.522955 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:02:21.522971 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:02:21.523169 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:02:21.523190 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:02:21.523208 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:02:21.523226 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:02:21.523247 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:02:21.523265 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:02:21.523284 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:02:21.523300 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:02:21.523320 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:02:21.523339 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:02:21.523358 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:02:21.523376 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:02:21.523393 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:02:21.523411 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:02:21.523437 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:02:21.523454 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:02:21.523473 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:02:21.523491 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:02:21.523511 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:02:21.523530 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:02:21.523547 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:02:21.523563 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:02:21.523591 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:02:21.523608 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:02:21.523627 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:02:21.523658 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:02:21.523679 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:02:21.523699 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:02:21.523718 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:02:21.523735 17829 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:02:21.525491 17829 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\nI0817 16:02:21.527554 17829 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:02:21.528759 17829 net.cpp:100] Creating Layer dataLayer\nI0817 16:02:21.528836 17829 net.cpp:408] dataLayer -> data_top\nI0817 16:02:21.529023 17829 net.cpp:408] dataLayer -> label\nI0817 16:02:21.529141 17829 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:02:21.534973 17834 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:02:21.556870 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:21.564519 17829 net.cpp:150] Setting up dataLayer\nI0817 16:02:21.564602 17829 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:02:21.564623 17829 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:21.564635 17829 net.cpp:165] Memory required for data: 1536500\nI0817 16:02:21.564658 17829 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:02:21.564680 17829 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:02:21.564695 17829 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:02:21.564723 17829 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:02:21.564749 17829 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:02:21.564854 17829 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:02:21.564874 17829 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:21.564888 17829 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:21.564896 17829 net.cpp:165] Memory required for data: 1537500\nI0817 16:02:21.564908 17829 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:02:21.564987 17829 net.cpp:100] Creating Layer pre_conv\nI0817 16:02:21.565002 17829 net.cpp:434] pre_conv <- data_top\nI0817 16:02:21.565023 17829 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:02:21.566862 17829 net.cpp:150] Setting up pre_conv\nI0817 16:02:21.566887 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.566900 17829 net.cpp:165] Memory required for data: 9729500\nI0817 16:02:21.566992 17829 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:02:21.567097 17829 net.cpp:100] Creating Layer pre_bn\nI0817 16:02:21.567113 17829 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:02:21.567129 17829 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:02:21.567180 17835 blocking_queue.cpp:50] Waiting for data\nI0817 16:02:21.567514 17829 net.cpp:150] Setting up pre_bn\nI0817 16:02:21.567538 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.567548 17829 net.cpp:165] Memory required for data: 17921500\nI0817 16:02:21.567586 17829 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:02:21.567649 17829 net.cpp:100] Creating Layer pre_scale\nI0817 16:02:21.567665 17829 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:02:21.567682 17829 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:02:21.567888 17829 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:02:21.568174 17829 net.cpp:150] Setting up pre_scale\nI0817 16:02:21.568194 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.568203 17829 net.cpp:165] Memory required for data: 26113500\nI0817 16:02:21.568224 17829 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:02:21.568289 17829 net.cpp:100] Creating Layer pre_relu\nI0817 16:02:21.568306 17829 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:02:21.568321 17829 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:02:21.568341 17829 net.cpp:150] Setting up pre_relu\nI0817 16:02:21.568356 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.568367 17829 net.cpp:165] Memory required for data: 34305500\nI0817 16:02:21.568377 17829 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:02:21.568392 17829 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:02:21.568403 17829 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:02:21.568421 17829 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:02:21.568444 17829 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:02:21.568516 17829 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:02:21.568534 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.568547 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.568557 17829 net.cpp:165] Memory required for data: 50689500\nI0817 16:02:21.568568 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:02:21.568600 17829 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:02:21.568615 17829 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:02:21.568634 17829 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:02:21.569001 17829 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:02:21.569022 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.569031 17829 net.cpp:165] Memory required for data: 58881500\nI0817 16:02:21.569053 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:02:21.569079 17829 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:02:21.569092 17829 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:02:21.569108 17829 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:02:21.569381 17829 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:02:21.569401 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.569411 17829 net.cpp:165] Memory required for data: 67073500\nI0817 16:02:21.569432 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:02:21.569453 17829 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:02:21.569465 17829 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:02:21.569483 17829 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.569567 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:02:21.569748 17829 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:02:21.569773 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.569783 17829 net.cpp:165] Memory required for data: 75265500\nI0817 16:02:21.569802 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:02:21.569826 17829 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:02:21.569838 17829 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:02:21.569856 17829 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.569875 17829 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:02:21.569891 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.569900 17829 net.cpp:165] Memory required for data: 83457500\nI0817 16:02:21.569911 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:02:21.569938 17829 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:02:21.569950 17829 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:02:21.569968 17829 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:02:21.570307 17829 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:02:21.570325 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.570335 17829 net.cpp:165] Memory required for data: 91649500\nI0817 16:02:21.570353 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:02:21.570375 17829 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:02:21.570387 17829 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:02:21.570402 17829 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:02:21.570674 17829 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:02:21.570693 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.570703 17829 net.cpp:165] Memory required for data: 99841500\nI0817 16:02:21.570731 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:02:21.570755 17829 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:02:21.570767 17829 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:02:21.570787 17829 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:02:21.570879 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:02:21.571053 17829 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:02:21.571071 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.571080 17829 net.cpp:165] Memory required for data: 108033500\nI0817 16:02:21.571099 17829 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:02:21.571166 17829 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:02:21.571180 17829 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:02:21.571198 17829 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:02:21.571215 17829 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:02:21.571311 17829 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:02:21.571331 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.571342 17829 net.cpp:165] Memory required for data: 116225500\nI0817 16:02:21.571352 17829 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:02:21.571367 17829 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:02:21.571378 17829 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:02:21.571398 17829 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:02:21.571416 17829 net.cpp:150] Setting up L1_b1_relu\nI0817 16:02:21.571432 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.571441 17829 net.cpp:165] Memory required for data: 124417500\nI0817 16:02:21.571451 17829 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:21.571468 17829 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:21.571480 17829 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:02:21.571493 17829 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:02:21.571512 17829 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:02:21.571597 17829 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:21.571619 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.571633 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.571652 17829 net.cpp:165] Memory required for data: 140801500\nI0817 16:02:21.571663 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:02:21.571684 17829 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:02:21.571697 17829 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:02:21.571724 17829 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:02:21.572083 17829 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:02:21.572103 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.572113 17829 net.cpp:165] Memory required for data: 148993500\nI0817 16:02:21.572131 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:02:21.572149 17829 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:02:21.572160 17829 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:02:21.572181 17829 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:02:21.572453 17829 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:02:21.572477 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.572489 17829 net.cpp:165] Memory required for data: 157185500\nI0817 16:02:21.572510 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:02:21.572527 17829 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:02:21.572540 17829 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:02:21.572554 17829 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.572643 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:02:21.572821 17829 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:02:21.572840 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.572850 17829 net.cpp:165] Memory required for data: 165377500\nI0817 16:02:21.572868 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:02:21.572885 17829 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:02:21.572896 17829 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:02:21.572914 17829 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.572933 17829 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:02:21.572948 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.572958 17829 net.cpp:165] Memory required for data: 173569500\nI0817 16:02:21.572968 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:02:21.572988 17829 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:02:21.573000 17829 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:02:21.573021 17829 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:02:21.573364 17829 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:02:21.573384 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.573395 17829 net.cpp:165] Memory required for data: 181761500\nI0817 16:02:21.573413 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:02:21.573431 17829 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:02:21.573441 17829 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:02:21.573462 17829 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:02:21.573742 17829 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:02:21.573762 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.573771 17829 net.cpp:165] Memory required for data: 189953500\nI0817 16:02:21.573804 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:02:21.573822 17829 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:02:21.573834 17829 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:02:21.573853 17829 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:02:21.573945 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:02:21.574117 17829 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:02:21.574136 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.574146 17829 net.cpp:165] Memory required for data: 198145500\nI0817 16:02:21.574164 17829 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:02:21.574193 17829 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:02:21.574206 17829 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:02:21.574219 17829 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:02:21.574235 17829 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:02:21.574292 17829 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:02:21.574313 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.574322 17829 net.cpp:165] Memory required for data: 206337500\nI0817 16:02:21.574333 17829 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:02:21.574347 17829 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:02:21.574359 17829 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:02:21.574373 17829 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:02:21.574391 17829 net.cpp:150] Setting up L1_b2_relu\nI0817 16:02:21.574407 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.574416 17829 net.cpp:165] Memory required for data: 214529500\nI0817 16:02:21.574427 17829 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:21.574441 17829 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:21.574452 17829 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:02:21.574471 17829 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:02:21.574491 17829 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:02:21.574566 17829 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:21.574601 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.574617 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.574627 17829 net.cpp:165] Memory required for data: 230913500\nI0817 16:02:21.574636 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:02:21.574662 17829 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:02:21.574676 17829 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:02:21.574692 17829 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:02:21.575039 17829 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:02:21.575059 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.575068 17829 net.cpp:165] Memory required for data: 239105500\nI0817 16:02:21.575086 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:02:21.575103 17829 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:02:21.575114 17829 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:02:21.575134 17829 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:02:21.575402 17829 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:02:21.575422 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.575431 17829 net.cpp:165] Memory required for data: 247297500\nI0817 16:02:21.575453 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:02:21.575470 17829 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:02:21.575481 17829 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:02:21.575500 17829 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.575589 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:02:21.575768 17829 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:02:21.575791 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.575803 17829 net.cpp:165] Memory required for data: 255489500\nI0817 16:02:21.575820 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:02:21.575835 17829 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:02:21.575846 17829 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:02:21.575860 17829 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.575878 17829 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:02:21.575901 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.575911 17829 net.cpp:165] Memory required for data: 263681500\nI0817 16:02:21.575922 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:02:21.575947 17829 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:02:21.575959 17829 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:02:21.575980 17829 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:02:21.576323 17829 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:02:21.576341 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.576351 17829 net.cpp:165] Memory required for data: 271873500\nI0817 16:02:21.576370 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:02:21.576400 17829 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:02:21.576412 17829 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:02:21.576429 17829 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:02:21.576717 17829 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:02:21.576736 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.576745 17829 net.cpp:165] Memory required for data: 280065500\nI0817 16:02:21.576766 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:02:21.576783 17829 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:02:21.576794 17829 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:02:21.576815 17829 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:02:21.576900 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:02:21.577069 17829 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:02:21.577090 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.577100 17829 net.cpp:165] Memory required for data: 288257500\nI0817 16:02:21.577118 17829 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:02:21.577134 17829 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:02:21.577147 17829 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:02:21.577159 17829 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:02:21.577174 17829 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:02:21.577230 17829 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:02:21.577250 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.577260 17829 net.cpp:165] Memory required for data: 296449500\nI0817 16:02:21.577270 17829 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:02:21.577288 17829 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:02:21.577301 17829 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:02:21.577313 17829 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:02:21.577332 17829 net.cpp:150] Setting up L1_b3_relu\nI0817 16:02:21.577347 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.577358 17829 net.cpp:165] Memory required for data: 304641500\nI0817 16:02:21.577368 17829 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:21.577385 17829 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:21.577397 17829 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:02:21.577411 17829 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:02:21.577430 17829 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:02:21.577502 17829 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:21.577524 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.577539 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.577549 17829 net.cpp:165] Memory required for data: 321025500\nI0817 16:02:21.577559 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:02:21.577585 17829 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:02:21.577599 17829 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:02:21.577626 17829 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:02:21.577998 17829 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:02:21.578018 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.578027 17829 net.cpp:165] Memory required for data: 329217500\nI0817 16:02:21.578045 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:02:21.578068 17829 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:02:21.578078 17829 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:02:21.578095 17829 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:02:21.578369 17829 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:02:21.578387 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.578397 17829 net.cpp:165] Memory required for data: 337409500\nI0817 16:02:21.578418 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:02:21.578435 17829 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:02:21.578446 17829 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:02:21.578466 17829 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.578552 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:02:21.578735 17829 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:02:21.578754 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.578763 17829 net.cpp:165] Memory required for data: 345601500\nI0817 16:02:21.578783 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:02:21.578797 17829 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:02:21.578809 17829 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:02:21.578824 17829 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.578846 17829 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:02:21.578861 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.578871 17829 net.cpp:165] Memory required for data: 353793500\nI0817 16:02:21.578881 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:02:21.578902 17829 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:02:21.578919 17829 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:02:21.578936 17829 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:02:21.579293 17829 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:02:21.579313 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.579322 17829 net.cpp:165] Memory required for data: 361985500\nI0817 16:02:21.579339 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:02:21.579361 17829 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:02:21.579373 17829 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:02:21.579393 17829 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:02:21.579674 17829 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:02:21.579694 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.579704 17829 net.cpp:165] Memory required for data: 370177500\nI0817 16:02:21.579725 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:02:21.579751 17829 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:02:21.579763 17829 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:02:21.579779 17829 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:02:21.579874 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:02:21.580051 17829 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:02:21.580070 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.580080 17829 net.cpp:165] Memory required for data: 378369500\nI0817 16:02:21.580098 17829 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:02:21.580116 17829 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:02:21.580127 17829 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:02:21.580139 17829 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:02:21.580159 17829 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:02:21.580221 17829 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:02:21.580246 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.580256 17829 net.cpp:165] Memory required for data: 386561500\nI0817 16:02:21.580267 17829 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:02:21.580281 17829 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:02:21.580292 17829 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:02:21.580307 17829 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:02:21.580325 17829 net.cpp:150] Setting up L1_b4_relu\nI0817 16:02:21.580341 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.580350 17829 net.cpp:165] Memory required for data: 394753500\nI0817 16:02:21.580361 17829 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:21.580380 17829 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:21.580392 17829 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:02:21.580407 17829 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:02:21.580426 17829 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:02:21.580508 17829 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:21.580528 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.580540 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.580549 17829 net.cpp:165] Memory required for data: 411137500\nI0817 16:02:21.580560 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:02:21.580588 17829 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:02:21.580602 17829 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:02:21.580626 17829 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:02:21.580981 17829 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:02:21.580999 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.581008 17829 net.cpp:165] Memory required for data: 419329500\nI0817 16:02:21.581043 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:02:21.581065 17829 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:02:21.581077 17829 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:02:21.581099 17829 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:02:21.581382 17829 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:02:21.581400 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.581410 17829 net.cpp:165] Memory required for data: 427521500\nI0817 16:02:21.581432 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:02:21.581454 17829 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:02:21.581465 17829 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:02:21.581480 17829 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.581562 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:02:21.581746 17829 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:02:21.581765 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.581775 17829 net.cpp:165] Memory required for data: 435713500\nI0817 16:02:21.581794 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:02:21.581809 17829 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:02:21.581820 17829 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:02:21.581840 17829 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.581859 17829 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:02:21.581873 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.581884 17829 net.cpp:165] Memory required for data: 443905500\nI0817 16:02:21.581893 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:02:21.581918 17829 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:02:21.581930 17829 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:02:21.581957 17829 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:02:21.582330 17829 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:02:21.582350 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.582360 17829 net.cpp:165] Memory required for data: 452097500\nI0817 16:02:21.582378 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:02:21.582401 17829 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:02:21.582412 17829 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:02:21.582428 17829 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:02:21.582702 17829 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:02:21.582721 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.582731 17829 net.cpp:165] Memory required for data: 460289500\nI0817 16:02:21.582751 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:02:21.582768 17829 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:02:21.582779 17829 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:02:21.582799 17829 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:02:21.582885 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:02:21.583055 17829 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:02:21.583081 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.583091 17829 net.cpp:165] Memory required for data: 468481500\nI0817 16:02:21.583111 17829 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:02:21.583127 17829 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:02:21.583138 17829 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:02:21.583153 17829 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:02:21.583168 17829 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:02:21.583225 17829 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:02:21.583243 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.583252 17829 net.cpp:165] Memory required for data: 476673500\nI0817 16:02:21.583263 17829 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:02:21.583282 17829 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:02:21.583294 17829 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:02:21.583307 17829 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:02:21.583325 17829 net.cpp:150] Setting up L1_b5_relu\nI0817 16:02:21.583341 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.583350 17829 net.cpp:165] Memory required for data: 484865500\nI0817 16:02:21.583360 17829 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:21.583379 17829 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:21.583390 17829 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:02:21.583405 17829 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:02:21.583425 17829 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:02:21.583501 17829 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:21.583526 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.583542 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.583554 17829 net.cpp:165] Memory required for data: 501249500\nI0817 16:02:21.583564 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:02:21.583590 17829 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:02:21.583603 17829 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:02:21.583621 17829 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:02:21.583978 17829 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:02:21.583997 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.584007 17829 net.cpp:165] Memory required for data: 509441500\nI0817 16:02:21.584034 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:02:21.584056 17829 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:02:21.584069 17829 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:02:21.584086 17829 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:02:21.584367 17829 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:02:21.584386 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.584396 17829 net.cpp:165] Memory required for data: 517633500\nI0817 16:02:21.584417 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:02:21.584434 17829 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:02:21.584446 17829 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:02:21.584466 17829 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.584553 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:02:21.584739 17829 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:02:21.584758 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.584767 17829 net.cpp:165] Memory required for data: 525825500\nI0817 16:02:21.584786 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:02:21.584802 17829 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:02:21.584813 17829 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:02:21.584827 17829 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.584846 17829 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:02:21.584861 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.584870 17829 net.cpp:165] Memory required for data: 534017500\nI0817 16:02:21.584880 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:02:21.584904 17829 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:02:21.584918 17829 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:02:21.584939 17829 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:02:21.585300 17829 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:02:21.585320 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.585330 17829 net.cpp:165] Memory required for data: 542209500\nI0817 16:02:21.585347 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:02:21.585369 17829 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:02:21.585381 17829 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:02:21.585402 17829 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:02:21.585680 17829 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:02:21.585700 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.585710 17829 net.cpp:165] Memory required for data: 550401500\nI0817 16:02:21.585731 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:02:21.585748 17829 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:02:21.585759 17829 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:02:21.585779 17829 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:02:21.585870 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:02:21.586045 17829 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:02:21.586068 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.586079 17829 net.cpp:165] Memory required for data: 558593500\nI0817 16:02:21.586097 17829 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:02:21.586125 17829 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:02:21.586138 17829 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:02:21.586153 17829 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:02:21.586166 17829 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:02:21.586225 17829 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:02:21.586243 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.586253 17829 net.cpp:165] Memory required for data: 566785500\nI0817 16:02:21.586264 17829 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:02:21.586292 17829 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:02:21.586303 17829 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:02:21.586323 17829 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:02:21.586343 17829 net.cpp:150] Setting up L1_b6_relu\nI0817 16:02:21.586359 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.586369 17829 net.cpp:165] Memory required for data: 574977500\nI0817 16:02:21.586378 17829 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:21.586392 17829 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:21.586403 17829 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:02:21.586417 17829 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:02:21.586437 17829 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:02:21.586519 17829 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:21.586540 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.586554 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.586565 17829 net.cpp:165] Memory required for data: 591361500\nI0817 16:02:21.586575 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:02:21.586601 17829 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:02:21.586616 17829 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:02:21.586637 17829 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:02:21.586995 17829 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:02:21.587014 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.587023 17829 net.cpp:165] Memory required for data: 599553500\nI0817 16:02:21.587041 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:02:21.587059 17829 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:02:21.587069 17829 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:02:21.587093 17829 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:02:21.587369 17829 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:02:21.587388 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.587397 17829 net.cpp:165] Memory required for data: 607745500\nI0817 16:02:21.587419 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:02:21.587441 17829 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:02:21.587453 17829 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:02:21.587468 17829 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.587553 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:02:21.587733 17829 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:02:21.587751 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.587761 17829 net.cpp:165] Memory required for data: 615937500\nI0817 16:02:21.587779 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:02:21.587805 17829 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:02:21.587817 17829 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:02:21.587832 17829 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.587851 17829 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:02:21.587867 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.587877 17829 net.cpp:165] Memory required for data: 624129500\nI0817 16:02:21.587887 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:02:21.587913 17829 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:02:21.587926 17829 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:02:21.587947 17829 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:02:21.588299 17829 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:02:21.588318 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.588328 17829 net.cpp:165] Memory required for data: 632321500\nI0817 16:02:21.588357 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:02:21.588376 17829 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:02:21.588387 17829 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:02:21.588403 17829 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:02:21.588698 17829 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:02:21.588717 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.588727 17829 net.cpp:165] Memory required for data: 640513500\nI0817 16:02:21.588748 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:02:21.588769 17829 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:02:21.588783 17829 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:02:21.588798 17829 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:02:21.588887 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:02:21.589063 17829 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:02:21.589082 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.589092 17829 net.cpp:165] Memory required for data: 648705500\nI0817 16:02:21.589110 17829 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:02:21.589128 17829 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:02:21.589138 17829 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:02:21.589151 17829 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:02:21.589174 17829 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:02:21.589233 17829 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:02:21.589251 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.589262 17829 net.cpp:165] Memory required for data: 656897500\nI0817 16:02:21.589272 17829 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:02:21.589287 17829 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:02:21.589299 17829 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:02:21.589318 17829 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:02:21.589336 17829 net.cpp:150] Setting up L1_b7_relu\nI0817 16:02:21.589351 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.589360 17829 net.cpp:165] Memory required for data: 665089500\nI0817 16:02:21.589371 17829 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:21.589385 17829 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:21.589396 17829 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:02:21.589411 17829 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:02:21.589431 17829 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:02:21.589510 17829 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:21.589531 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.589545 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.589555 17829 net.cpp:165] Memory required for data: 681473500\nI0817 16:02:21.589565 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:02:21.589591 17829 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:02:21.589607 17829 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:02:21.589628 17829 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:02:21.589987 17829 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:02:21.590005 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.590015 17829 net.cpp:165] Memory required for data: 689665500\nI0817 16:02:21.590034 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:02:21.590050 17829 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:02:21.590062 17829 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:02:21.590085 17829 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:02:21.590401 17829 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:02:21.590421 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.590431 17829 net.cpp:165] Memory required for data: 697857500\nI0817 16:02:21.590452 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:02:21.590473 17829 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:02:21.590486 17829 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:02:21.590502 17829 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.590590 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:02:21.590772 17829 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:02:21.590792 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.590801 17829 net.cpp:165] Memory required for data: 706049500\nI0817 16:02:21.590821 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:02:21.590837 17829 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:02:21.590847 17829 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:02:21.590867 17829 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.590886 17829 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:02:21.590901 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.590912 17829 net.cpp:165] Memory required for data: 714241500\nI0817 16:02:21.590922 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:02:21.590946 17829 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:02:21.590960 17829 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:02:21.590981 17829 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:02:21.591337 17829 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:02:21.591357 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.591367 17829 net.cpp:165] Memory required for data: 722433500\nI0817 16:02:21.591383 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:02:21.591400 17829 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:02:21.591413 17829 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:02:21.591428 17829 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:02:21.591722 17829 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:02:21.591740 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.591750 17829 net.cpp:165] Memory required for data: 730625500\nI0817 16:02:21.591773 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:02:21.591794 17829 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:02:21.591807 17829 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:02:21.591823 17829 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:02:21.591909 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:02:21.592088 17829 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:02:21.592108 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.592118 17829 net.cpp:165] Memory required for data: 738817500\nI0817 16:02:21.592135 17829 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:02:21.592152 17829 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:02:21.592164 17829 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:02:21.592177 17829 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:02:21.592197 17829 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:02:21.592252 17829 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:02:21.592274 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.592285 17829 net.cpp:165] Memory required for data: 747009500\nI0817 16:02:21.592295 17829 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:02:21.592309 17829 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:02:21.592321 17829 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:02:21.592335 17829 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:02:21.592353 17829 net.cpp:150] Setting up L1_b8_relu\nI0817 16:02:21.592378 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.592388 17829 net.cpp:165] Memory required for data: 755201500\nI0817 16:02:21.592398 17829 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:21.592417 17829 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:21.592428 17829 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:02:21.592444 17829 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:02:21.592464 17829 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:02:21.592548 17829 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:21.592567 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.592587 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.592598 17829 net.cpp:165] Memory required for data: 771585500\nI0817 16:02:21.592609 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:02:21.592628 17829 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:02:21.592641 17829 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:02:21.592664 17829 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:02:21.593031 17829 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:02:21.593052 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.593062 17829 net.cpp:165] Memory required for data: 779777500\nI0817 16:02:21.593080 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:02:21.593102 17829 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:02:21.593114 17829 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:02:21.593132 17829 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:02:21.593415 17829 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:02:21.593433 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.593443 17829 net.cpp:165] Memory required for data: 787969500\nI0817 16:02:21.593466 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:02:21.593482 17829 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:02:21.593492 17829 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:02:21.593508 17829 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.593602 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:02:21.593786 17829 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:02:21.593806 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.593816 17829 net.cpp:165] Memory required for data: 796161500\nI0817 16:02:21.593833 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:02:21.593849 17829 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:02:21.593860 17829 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:02:21.593880 17829 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.593899 17829 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:02:21.593914 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.593924 17829 net.cpp:165] Memory required for data: 804353500\nI0817 16:02:21.593935 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:02:21.593963 17829 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:02:21.593977 17829 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:02:21.593993 17829 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:02:21.594352 17829 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:02:21.594372 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.594382 17829 net.cpp:165] Memory required for data: 812545500\nI0817 16:02:21.594399 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:02:21.594416 17829 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:02:21.594427 17829 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:02:21.594449 17829 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:02:21.594750 17829 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:02:21.594774 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.594784 17829 net.cpp:165] Memory required for data: 820737500\nI0817 16:02:21.594836 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:02:21.594856 17829 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:02:21.594868 17829 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:02:21.594883 17829 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:02:21.594977 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:02:21.595155 17829 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:02:21.595173 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.595183 17829 net.cpp:165] Memory required for data: 828929500\nI0817 16:02:21.595201 17829 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:02:21.595218 17829 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:02:21.595229 17829 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:02:21.595247 17829 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:02:21.595264 17829 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:02:21.595316 17829 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:02:21.595335 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.595345 17829 net.cpp:165] Memory required for data: 837121500\nI0817 16:02:21.595355 17829 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:02:21.595369 17829 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:02:21.595381 17829 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:02:21.595399 17829 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:02:21.595418 17829 net.cpp:150] Setting up L1_b9_relu\nI0817 16:02:21.595434 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.595444 17829 net.cpp:165] Memory required for data: 845313500\nI0817 16:02:21.595453 17829 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:21.595468 17829 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:21.595479 17829 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:02:21.595500 17829 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:02:21.595522 17829 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:02:21.595605 17829 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:21.595624 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.595638 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.595648 17829 net.cpp:165] Memory required for data: 861697500\nI0817 16:02:21.595659 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:02:21.595679 17829 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:02:21.595691 17829 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:02:21.595713 17829 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:02:21.596083 17829 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:02:21.596101 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.596112 17829 net.cpp:165] Memory required for data: 863745500\nI0817 16:02:21.596129 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:02:21.596146 17829 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:02:21.596158 17829 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:02:21.596179 17829 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:02:21.596457 17829 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:02:21.596475 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.596485 17829 net.cpp:165] Memory required for data: 865793500\nI0817 16:02:21.596508 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:02:21.596534 17829 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:02:21.596555 17829 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:02:21.596573 17829 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.596669 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:02:21.596848 17829 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:02:21.596868 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.596877 17829 net.cpp:165] Memory required for data: 867841500\nI0817 16:02:21.596896 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:02:21.596916 17829 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:02:21.596928 17829 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:02:21.596943 17829 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.596962 17829 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:02:21.596982 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.596993 17829 net.cpp:165] Memory required for data: 869889500\nI0817 16:02:21.597003 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:02:21.597023 17829 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:02:21.597036 17829 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:02:21.597059 17829 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:02:21.597416 17829 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:02:21.597435 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.597445 17829 net.cpp:165] Memory required for data: 871937500\nI0817 16:02:21.597462 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:02:21.597479 17829 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:02:21.597491 17829 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:02:21.597512 17829 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:02:21.597798 17829 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:02:21.597818 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.597827 17829 net.cpp:165] Memory required for data: 873985500\nI0817 16:02:21.597849 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:02:21.597872 17829 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:02:21.597883 17829 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:02:21.597899 17829 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:02:21.597985 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:02:21.598167 17829 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:02:21.598186 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.598196 17829 net.cpp:165] Memory required for data: 876033500\nI0817 16:02:21.598214 17829 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:02:21.598238 17829 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:02:21.598251 17829 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:02:21.598268 17829 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:02:21.598371 17829 net.cpp:150] Setting up L2_b1_pool\nI0817 16:02:21.598390 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.598399 17829 net.cpp:165] Memory required for data: 878081500\nI0817 16:02:21.598410 17829 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:02:21.598426 17829 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:02:21.598438 17829 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:02:21.598450 17829 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:02:21.598470 17829 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:02:21.598527 17829 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:02:21.598547 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.598557 17829 net.cpp:165] Memory required for data: 880129500\nI0817 16:02:21.598568 17829 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:02:21.598589 17829 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:02:21.598603 17829 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:02:21.598626 17829 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:02:21.598645 17829 net.cpp:150] Setting up L2_b1_relu\nI0817 16:02:21.598659 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.598670 17829 net.cpp:165] Memory required for data: 882177500\nI0817 16:02:21.598680 17829 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:02:21.598749 17829 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:02:21.598772 17829 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:02:21.601148 17829 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:02:21.601171 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.601181 17829 net.cpp:165] Memory required for data: 884225500\nI0817 16:02:21.601192 17829 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:02:21.601210 17829 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:02:21.601223 17829 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:02:21.601236 17829 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:02:21.601258 17829 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:02:21.601352 17829 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:02:21.601377 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.601388 17829 net.cpp:165] Memory required for data: 888321500\nI0817 16:02:21.601398 17829 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:21.601413 17829 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:21.601424 17829 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:02:21.601439 17829 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:02:21.601459 17829 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:02:21.601549 17829 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:21.601572 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.601593 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.601603 17829 net.cpp:165] Memory required for data: 896513500\nI0817 16:02:21.601613 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:02:21.601639 17829 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:02:21.601651 17829 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:02:21.601670 17829 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:02:21.603166 17829 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:02:21.603188 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.603197 17829 net.cpp:165] Memory required for data: 900609500\nI0817 16:02:21.603215 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:02:21.603238 17829 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:02:21.603250 17829 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:02:21.603267 17829 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:02:21.603554 17829 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:02:21.603574 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.603590 17829 net.cpp:165] Memory required for data: 904705500\nI0817 16:02:21.603612 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:02:21.603633 17829 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:02:21.603646 17829 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:02:21.603662 17829 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.603757 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:02:21.603945 17829 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:02:21.603963 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.603972 17829 net.cpp:165] Memory required for data: 908801500\nI0817 16:02:21.603991 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:02:21.604012 17829 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:02:21.604023 17829 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:02:21.604043 17829 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.604071 17829 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:02:21.604087 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.604099 17829 net.cpp:165] Memory required for data: 912897500\nI0817 16:02:21.604110 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:02:21.604131 17829 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:02:21.604143 17829 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:02:21.604164 17829 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:02:21.604665 17829 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:02:21.604684 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.604694 17829 net.cpp:165] Memory required for data: 916993500\nI0817 16:02:21.604712 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:02:21.604730 17829 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:02:21.604742 17829 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:02:21.604763 17829 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:02:21.605046 17829 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:02:21.605065 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.605075 17829 net.cpp:165] Memory required for data: 921089500\nI0817 16:02:21.605098 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:02:21.605118 17829 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:02:21.605131 17829 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:02:21.605147 17829 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:02:21.605233 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:02:21.605417 17829 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:02:21.605437 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.605445 17829 net.cpp:165] Memory required for data: 925185500\nI0817 16:02:21.605464 17829 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:02:21.605486 17829 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:02:21.605499 17829 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:02:21.605512 17829 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:02:21.605527 17829 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:02:21.605585 17829 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:02:21.605605 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.605615 17829 net.cpp:165] Memory required for data: 929281500\nI0817 16:02:21.605624 17829 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:02:21.605640 17829 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:02:21.605651 17829 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:02:21.605670 17829 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:02:21.605690 17829 net.cpp:150] Setting up L2_b2_relu\nI0817 16:02:21.605705 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.605713 17829 net.cpp:165] Memory required for data: 933377500\nI0817 16:02:21.605725 17829 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:21.605738 17829 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:21.605748 17829 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:02:21.605763 17829 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:02:21.605783 17829 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:02:21.605870 17829 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:21.605890 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.605903 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.605913 17829 net.cpp:165] Memory required for data: 941569500\nI0817 16:02:21.605924 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:02:21.605954 17829 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:02:21.605968 17829 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:02:21.605991 17829 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:02:21.606487 17829 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:02:21.606505 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.606515 17829 net.cpp:165] Memory required for data: 945665500\nI0817 16:02:21.606533 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:02:21.606549 17829 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:02:21.606561 17829 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:02:21.606587 17829 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:02:21.606866 17829 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:02:21.606885 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.606895 17829 net.cpp:165] Memory required for data: 949761500\nI0817 16:02:21.606917 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:02:21.606938 17829 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:02:21.606951 17829 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:02:21.606967 17829 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.607053 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:02:21.607234 17829 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:02:21.607252 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.607261 17829 net.cpp:165] Memory required for data: 953857500\nI0817 16:02:21.607280 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:02:21.607300 17829 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:02:21.607312 17829 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:02:21.607326 17829 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.607347 17829 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:02:21.607360 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.607370 17829 net.cpp:165] Memory required for data: 957953500\nI0817 16:02:21.607380 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:02:21.607406 17829 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:02:21.607419 17829 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:02:21.607445 17829 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:02:21.607955 17829 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:02:21.607975 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.607985 17829 net.cpp:165] Memory required for data: 962049500\nI0817 16:02:21.608001 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:02:21.608019 17829 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:02:21.608031 17829 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:02:21.608052 17829 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:02:21.608335 17829 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:02:21.608353 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.608363 17829 net.cpp:165] Memory required for data: 966145500\nI0817 16:02:21.608384 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:02:21.608405 17829 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:02:21.608418 17829 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:02:21.608434 17829 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:02:21.608522 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:02:21.608711 17829 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:02:21.608729 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.608739 17829 net.cpp:165] Memory required for data: 970241500\nI0817 16:02:21.608758 17829 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:02:21.608779 17829 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:02:21.608793 17829 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:02:21.608814 17829 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:02:21.608831 17829 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:02:21.608887 17829 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:02:21.608906 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.608916 17829 net.cpp:165] Memory required for data: 974337500\nI0817 16:02:21.608927 17829 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:02:21.608959 17829 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:02:21.608973 17829 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:02:21.608988 17829 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:02:21.609006 17829 net.cpp:150] Setting up L2_b3_relu\nI0817 16:02:21.609021 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.609030 17829 net.cpp:165] Memory required for data: 978433500\nI0817 16:02:21.609041 17829 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:21.609061 17829 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:21.609072 17829 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:02:21.609087 17829 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:02:21.609107 17829 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:02:21.609189 17829 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:21.609213 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.609230 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.609239 17829 net.cpp:165] Memory required for data: 986625500\nI0817 16:02:21.609251 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:02:21.609271 17829 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:02:21.609283 17829 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:02:21.609302 17829 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:02:21.609817 17829 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:02:21.609836 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.609845 17829 net.cpp:165] Memory required for data: 990721500\nI0817 16:02:21.609863 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:02:21.609885 17829 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:02:21.609899 17829 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:02:21.609915 17829 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:02:21.610199 17829 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:02:21.610219 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.610226 17829 net.cpp:165] Memory required for data: 994817500\nI0817 16:02:21.610249 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:02:21.610265 17829 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:02:21.610276 17829 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:02:21.610296 17829 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.610388 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:02:21.610574 17829 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:02:21.610599 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.610610 17829 net.cpp:165] Memory required for data: 998913500\nI0817 16:02:21.610628 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:02:21.610643 17829 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:02:21.610656 17829 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:02:21.610674 17829 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.610694 17829 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:02:21.610709 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.610718 17829 net.cpp:165] Memory required for data: 1003009500\nI0817 16:02:21.610728 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:02:21.610760 17829 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:02:21.610774 17829 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:02:21.610793 17829 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:02:21.611289 17829 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:02:21.611310 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.611318 17829 net.cpp:165] Memory required for data: 1007105500\nI0817 16:02:21.611336 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:02:21.611359 17829 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:02:21.611371 17829 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:02:21.611388 17829 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:02:21.611677 17829 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:02:21.611696 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.611706 17829 net.cpp:165] Memory required for data: 1011201500\nI0817 16:02:21.611728 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:02:21.611744 17829 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:02:21.611757 17829 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:02:21.611771 17829 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:02:21.611863 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:02:21.612046 17829 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:02:21.612068 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.612078 17829 net.cpp:165] Memory required for data: 1015297500\nI0817 16:02:21.612097 17829 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:02:21.612114 17829 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:02:21.612126 17829 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:02:21.612140 17829 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:02:21.612161 17829 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:02:21.612210 17829 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:02:21.612227 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.612237 17829 net.cpp:165] Memory required for data: 1019393500\nI0817 16:02:21.612248 17829 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:02:21.612262 17829 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:02:21.612273 17829 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:02:21.612293 17829 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:02:21.612311 17829 net.cpp:150] Setting up L2_b4_relu\nI0817 16:02:21.612326 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.612336 17829 net.cpp:165] Memory required for data: 1023489500\nI0817 16:02:21.612346 17829 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:21.612361 17829 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:21.612373 17829 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:02:21.612392 17829 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:02:21.612413 17829 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:02:21.612488 17829 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:21.612507 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.612520 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.612529 17829 net.cpp:165] Memory required for data: 1031681500\nI0817 16:02:21.612540 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:02:21.612563 17829 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:02:21.612576 17829 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:02:21.612604 17829 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:02:21.613131 17829 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:02:21.613157 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.613168 17829 net.cpp:165] Memory required for data: 1035777500\nI0817 16:02:21.613185 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:02:21.613207 17829 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:02:21.613219 17829 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:02:21.613237 17829 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:02:21.613523 17829 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:02:21.613541 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.613551 17829 net.cpp:165] Memory required for data: 1039873500\nI0817 16:02:21.613572 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:02:21.613596 17829 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:02:21.613608 17829 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:02:21.613629 17829 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.613725 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:02:21.613911 17829 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:02:21.613929 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.613940 17829 net.cpp:165] Memory required for data: 1043969500\nI0817 16:02:21.613957 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:02:21.613973 17829 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:02:21.613983 17829 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:02:21.614002 17829 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.614023 17829 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:02:21.614038 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.614048 17829 net.cpp:165] Memory required for data: 1048065500\nI0817 16:02:21.614058 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:02:21.614084 17829 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:02:21.614096 17829 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:02:21.614114 17829 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:02:21.614634 17829 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:02:21.614655 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.614663 17829 net.cpp:165] Memory required for data: 1052161500\nI0817 16:02:21.614681 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:02:21.614698 17829 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:02:21.614709 17829 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:02:21.614730 17829 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:02:21.615007 17829 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:02:21.615025 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.615036 17829 net.cpp:165] Memory required for data: 1056257500\nI0817 16:02:21.615057 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:02:21.615074 17829 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:02:21.615087 17829 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:02:21.615101 17829 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:02:21.615190 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:02:21.615370 17829 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:02:21.615392 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.615403 17829 net.cpp:165] Memory required for data: 1060353500\nI0817 16:02:21.615422 17829 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:02:21.615439 17829 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:02:21.615452 17829 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:02:21.615464 17829 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:02:21.615480 17829 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:02:21.615532 17829 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:02:21.615550 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.615568 17829 net.cpp:165] Memory required for data: 1064449500\nI0817 16:02:21.615586 17829 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:02:21.615602 17829 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:02:21.615613 17829 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:02:21.615633 17829 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:02:21.615653 17829 net.cpp:150] Setting up L2_b5_relu\nI0817 16:02:21.615669 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.615679 17829 net.cpp:165] Memory required for data: 1068545500\nI0817 16:02:21.615689 17829 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:21.615702 17829 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:21.615713 17829 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:02:21.615731 17829 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:02:21.615752 17829 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:02:21.615828 17829 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:21.615846 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.615859 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.615869 17829 net.cpp:165] Memory required for data: 1076737500\nI0817 16:02:21.615880 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:02:21.615905 17829 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:02:21.615918 17829 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:02:21.615937 17829 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:02:21.616452 17829 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:02:21.616472 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.616480 17829 net.cpp:165] Memory required for data: 1080833500\nI0817 16:02:21.616498 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:02:21.616520 17829 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:02:21.616533 17829 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:02:21.616549 17829 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:02:21.616837 17829 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:02:21.616856 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.616865 17829 net.cpp:165] Memory required for data: 1084929500\nI0817 16:02:21.616888 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:02:21.616904 17829 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:02:21.616915 17829 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:02:21.616935 17829 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.617023 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:02:21.617204 17829 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:02:21.617224 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.617233 17829 net.cpp:165] Memory required for data: 1089025500\nI0817 16:02:21.617252 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:02:21.617267 17829 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:02:21.617280 17829 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:02:21.617293 17829 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.617312 17829 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:02:21.617326 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.617336 17829 net.cpp:165] Memory required for data: 1093121500\nI0817 16:02:21.617347 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:02:21.617372 17829 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:02:21.617385 17829 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:02:21.617408 17829 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:02:21.617933 17829 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:02:21.617960 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.617970 17829 net.cpp:165] Memory required for data: 1097217500\nI0817 16:02:21.617990 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:02:21.618011 17829 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:02:21.618024 17829 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:02:21.618044 17829 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:02:21.618319 17829 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:02:21.618337 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.618346 17829 net.cpp:165] Memory required for data: 1101313500\nI0817 16:02:21.618368 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:02:21.618386 17829 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:02:21.618397 17829 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:02:21.618412 17829 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:02:21.618504 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:02:21.618691 17829 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:02:21.618710 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.618721 17829 net.cpp:165] Memory required for data: 1105409500\nI0817 16:02:21.618739 17829 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:02:21.618764 17829 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:02:21.618777 17829 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:02:21.618791 17829 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:02:21.618808 17829 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:02:21.618860 17829 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:02:21.618878 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.618888 17829 net.cpp:165] Memory required for data: 1109505500\nI0817 16:02:21.618898 17829 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:02:21.618913 17829 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:02:21.618926 17829 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:02:21.618944 17829 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:02:21.618964 17829 net.cpp:150] Setting up L2_b6_relu\nI0817 16:02:21.618978 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.618988 17829 net.cpp:165] Memory required for data: 1113601500\nI0817 16:02:21.618999 17829 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:21.619014 17829 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:21.619024 17829 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:02:21.619045 17829 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:02:21.619065 17829 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:02:21.619145 17829 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:21.619165 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.619179 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.619189 17829 net.cpp:165] Memory required for data: 1121793500\nI0817 16:02:21.619199 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:02:21.619225 17829 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:02:21.619238 17829 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:02:21.619257 17829 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:02:21.619778 17829 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:02:21.619799 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.619808 17829 net.cpp:165] Memory required for data: 1125889500\nI0817 16:02:21.619827 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:02:21.619844 17829 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:02:21.619871 17829 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:02:21.619890 17829 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:02:21.620177 17829 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:02:21.620196 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.620205 17829 net.cpp:165] Memory required for data: 1129985500\nI0817 16:02:21.620227 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:02:21.620245 17829 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:02:21.620256 17829 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:02:21.620271 17829 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.620367 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:02:21.620548 17829 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:02:21.620573 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.620589 17829 net.cpp:165] Memory required for data: 1134081500\nI0817 16:02:21.620609 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:02:21.620625 17829 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:02:21.620637 17829 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:02:21.620652 17829 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.620671 17829 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:02:21.620685 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.620695 17829 net.cpp:165] Memory required for data: 1138177500\nI0817 16:02:21.620704 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:02:21.620733 17829 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:02:21.620745 17829 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:02:21.620767 17829 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:02:21.621268 17829 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:02:21.621286 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.621295 17829 net.cpp:165] Memory required for data: 1142273500\nI0817 16:02:21.621314 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:02:21.621341 17829 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:02:21.621354 17829 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:02:21.621376 17829 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:02:21.621671 17829 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:02:21.621690 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.621701 17829 net.cpp:165] Memory required for data: 1146369500\nI0817 16:02:21.621722 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:02:21.621739 17829 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:02:21.621752 17829 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:02:21.621765 17829 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:02:21.621858 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:02:21.622040 17829 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:02:21.622058 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.622067 17829 net.cpp:165] Memory required for data: 1150465500\nI0817 16:02:21.622086 17829 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:02:21.622107 17829 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:02:21.622119 17829 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:02:21.622133 17829 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:02:21.622149 17829 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:02:21.622196 17829 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:02:21.622215 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.622225 17829 net.cpp:165] Memory required for data: 1154561500\nI0817 16:02:21.622236 17829 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:02:21.622254 17829 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:02:21.622267 17829 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:02:21.622290 17829 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:02:21.622310 17829 net.cpp:150] Setting up L2_b7_relu\nI0817 16:02:21.622328 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.622337 17829 net.cpp:165] Memory required for data: 1158657500\nI0817 16:02:21.622347 17829 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:21.622361 17829 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:21.622373 17829 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:02:21.622388 17829 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:02:21.622409 17829 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:02:21.622496 17829 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:21.622515 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.622529 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.622539 17829 net.cpp:165] Memory required for data: 1166849500\nI0817 16:02:21.622550 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:02:21.622573 17829 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:02:21.622594 17829 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:02:21.622614 17829 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:02:21.623307 17829 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:02:21.623327 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.623338 17829 net.cpp:165] Memory required for data: 1170945500\nI0817 16:02:21.623356 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:02:21.623378 17829 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:02:21.623390 17829 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:02:21.623411 17829 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:02:21.623709 17829 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:02:21.623728 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.623739 17829 net.cpp:165] Memory required for data: 1175041500\nI0817 16:02:21.623760 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:02:21.623776 17829 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:02:21.623788 17829 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:02:21.623803 17829 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.623896 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:02:21.624083 17829 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:02:21.624102 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.624112 17829 net.cpp:165] Memory required for data: 1179137500\nI0817 16:02:21.624130 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:02:21.624150 17829 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:02:21.624162 17829 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:02:21.624177 17829 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.624197 17829 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:02:21.624210 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.624222 17829 net.cpp:165] Memory required for data: 1183233500\nI0817 16:02:21.624231 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:02:21.624255 17829 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:02:21.624269 17829 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:02:21.624290 17829 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:02:21.624814 17829 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:02:21.624833 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.624843 17829 net.cpp:165] Memory required for data: 1187329500\nI0817 16:02:21.624862 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:02:21.624883 17829 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:02:21.624907 17829 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:02:21.624928 17829 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:02:21.625223 17829 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:02:21.625242 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.625252 17829 net.cpp:165] Memory required for data: 1191425500\nI0817 16:02:21.625273 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:02:21.625289 17829 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:02:21.625301 17829 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:02:21.625316 17829 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:02:21.625412 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:02:21.625604 17829 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:02:21.625623 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.625633 17829 net.cpp:165] Memory required for data: 1195521500\nI0817 16:02:21.625651 17829 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:02:21.625671 17829 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:02:21.625685 17829 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:02:21.625699 17829 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:02:21.625715 17829 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:02:21.625763 17829 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:02:21.625782 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.625792 17829 net.cpp:165] Memory required for data: 1199617500\nI0817 16:02:21.625803 17829 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:02:21.625828 17829 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:02:21.625840 17829 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:02:21.625854 17829 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:02:21.625874 17829 net.cpp:150] Setting up L2_b8_relu\nI0817 16:02:21.625888 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.625897 17829 net.cpp:165] Memory required for data: 1203713500\nI0817 16:02:21.625908 17829 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:21.625922 17829 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:21.625933 17829 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:02:21.625948 17829 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:02:21.625986 17829 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:02:21.626073 17829 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:21.626092 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.626106 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.626116 17829 net.cpp:165] Memory required for data: 1211905500\nI0817 16:02:21.626126 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:02:21.626150 17829 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:02:21.626164 17829 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:02:21.626183 17829 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:02:21.626713 17829 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:02:21.626734 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.626742 17829 net.cpp:165] Memory required for data: 1216001500\nI0817 16:02:21.626761 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:02:21.626783 17829 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:02:21.626796 17829 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:02:21.626813 17829 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:02:21.627102 17829 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:02:21.627125 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.627144 17829 net.cpp:165] Memory required for data: 1220097500\nI0817 16:02:21.627168 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:02:21.627185 17829 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:02:21.627197 17829 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:02:21.627213 17829 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.627307 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:02:21.627496 17829 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:02:21.627516 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.627524 17829 net.cpp:165] Memory required for data: 1224193500\nI0817 16:02:21.627543 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:02:21.627559 17829 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:02:21.627570 17829 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:02:21.627596 17829 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.627617 17829 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:02:21.627634 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.627642 17829 net.cpp:165] Memory required for data: 1228289500\nI0817 16:02:21.627653 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:02:21.627678 17829 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:02:21.627692 17829 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:02:21.627712 17829 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:02:21.628212 17829 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:02:21.628232 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.628242 17829 net.cpp:165] Memory required for data: 1232385500\nI0817 16:02:21.628259 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:02:21.628280 17829 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:02:21.628293 17829 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:02:21.628309 17829 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:02:21.628612 17829 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:02:21.628631 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.628641 17829 net.cpp:165] Memory required for data: 1236481500\nI0817 16:02:21.628710 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:02:21.628731 17829 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:02:21.628748 17829 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:02:21.628764 17829 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:02:21.628859 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:02:21.629045 17829 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:02:21.629065 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.629073 17829 net.cpp:165] Memory required for data: 1240577500\nI0817 16:02:21.629091 17829 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:02:21.629108 17829 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:02:21.629120 17829 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:02:21.629133 17829 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:02:21.629153 17829 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:02:21.629202 17829 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:02:21.629220 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.629230 17829 net.cpp:165] Memory required for data: 1244673500\nI0817 16:02:21.629241 17829 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:02:21.629259 17829 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:02:21.629272 17829 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:02:21.629287 17829 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:02:21.629304 17829 net.cpp:150] Setting up L2_b9_relu\nI0817 16:02:21.629320 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.629329 17829 net.cpp:165] Memory required for data: 1248769500\nI0817 16:02:21.629346 17829 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:21.629365 17829 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:21.629377 17829 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:02:21.629391 17829 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:02:21.629412 17829 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:02:21.629496 17829 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:21.629523 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.629536 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.629546 17829 net.cpp:165] Memory required for data: 1256961500\nI0817 16:02:21.629557 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:02:21.629592 17829 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:02:21.629607 17829 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:02:21.629627 17829 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:02:21.630153 17829 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:02:21.630173 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.630182 17829 net.cpp:165] Memory required for data: 1257985500\nI0817 16:02:21.630200 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:02:21.630223 17829 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:02:21.630235 17829 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:02:21.630252 17829 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:02:21.630551 17829 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:02:21.630570 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.630585 17829 net.cpp:165] Memory required for data: 1259009500\nI0817 16:02:21.630609 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:02:21.630630 17829 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:02:21.630642 17829 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:02:21.630658 17829 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.630759 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:02:21.630952 17829 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:02:21.630972 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.630981 17829 net.cpp:165] Memory required for data: 1260033500\nI0817 16:02:21.631000 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:02:21.631016 17829 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:02:21.631028 17829 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:02:21.631048 17829 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.631068 17829 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:02:21.631083 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.631093 17829 net.cpp:165] Memory required for data: 1261057500\nI0817 16:02:21.631104 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:02:21.631124 17829 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:02:21.631136 17829 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:02:21.631158 17829 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:02:21.631686 17829 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:02:21.631705 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.631716 17829 net.cpp:165] Memory required for data: 1262081500\nI0817 16:02:21.631733 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:02:21.631754 17829 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:02:21.631767 17829 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:02:21.631783 17829 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:02:21.632086 17829 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:02:21.632104 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.632122 17829 net.cpp:165] Memory required for data: 1263105500\nI0817 16:02:21.632143 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:02:21.632160 17829 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:02:21.632174 17829 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:02:21.632187 17829 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:02:21.632285 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:02:21.632488 17829 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:02:21.632511 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.632521 17829 net.cpp:165] Memory required for data: 1264129500\nI0817 16:02:21.632540 17829 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:02:21.632557 17829 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:02:21.632570 17829 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:02:21.632593 17829 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:02:21.632655 17829 net.cpp:150] Setting up L3_b1_pool\nI0817 16:02:21.632674 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.632685 17829 net.cpp:165] Memory required for data: 1265153500\nI0817 16:02:21.632695 17829 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:02:21.632715 17829 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:02:21.632728 17829 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:02:21.632741 17829 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:02:21.632756 17829 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:02:21.632818 17829 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:02:21.632836 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.632848 17829 net.cpp:165] Memory required for data: 1266177500\nI0817 16:02:21.632858 17829 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:02:21.632872 17829 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:02:21.632884 17829 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:02:21.632899 17829 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:02:21.632916 17829 net.cpp:150] Setting up L3_b1_relu\nI0817 16:02:21.632931 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.632941 17829 net.cpp:165] Memory required for data: 1267201500\nI0817 16:02:21.632951 17829 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:02:21.632968 17829 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:02:21.632987 17829 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:02:21.634270 17829 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:02:21.634294 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.634308 17829 net.cpp:165] Memory required for data: 1268225500\nI0817 16:02:21.634318 17829 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:02:21.634335 17829 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:02:21.634347 17829 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:02:21.634361 17829 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:02:21.634377 17829 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:02:21.634449 17829 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:02:21.634470 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.634480 17829 net.cpp:165] Memory required for data: 1270273500\nI0817 16:02:21.634490 17829 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:21.634505 17829 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:21.634516 17829 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:02:21.634536 17829 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:02:21.634557 17829 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:02:21.634655 17829 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:21.634675 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.634687 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.634706 17829 net.cpp:165] Memory required for data: 1274369500\nI0817 16:02:21.634718 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:02:21.634742 17829 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:02:21.634757 17829 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:02:21.634776 17829 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:02:21.636808 17829 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:02:21.636837 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.636849 17829 net.cpp:165] Memory required for data: 1276417500\nI0817 16:02:21.636868 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:02:21.636886 17829 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:02:21.636898 17829 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:02:21.636920 17829 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:02:21.637235 17829 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:02:21.637254 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.637264 17829 net.cpp:165] Memory required for data: 1278465500\nI0817 16:02:21.637284 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:02:21.637305 17829 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:02:21.637320 17829 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:02:21.637336 17829 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.637428 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:02:21.637625 17829 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:02:21.637645 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.637655 17829 net.cpp:165] Memory required for data: 1280513500\nI0817 16:02:21.637673 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:02:21.637688 17829 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:02:21.637701 17829 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:02:21.637718 17829 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.637739 17829 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:02:21.637753 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.637763 17829 net.cpp:165] Memory required for data: 1282561500\nI0817 16:02:21.637773 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:02:21.637799 17829 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:02:21.637812 17829 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:02:21.637830 17829 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:02:21.638896 17829 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:02:21.638916 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.638926 17829 net.cpp:165] Memory required for data: 1284609500\nI0817 16:02:21.638952 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:02:21.638973 17829 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:02:21.638988 17829 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:02:21.639004 17829 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:02:21.639315 17829 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:02:21.639333 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.639343 17829 net.cpp:165] Memory required for data: 1286657500\nI0817 16:02:21.639364 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:02:21.639381 17829 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:02:21.639394 17829 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:02:21.639410 17829 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:02:21.639508 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:02:21.639708 17829 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:02:21.639727 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.639736 17829 net.cpp:165] Memory required for data: 1288705500\nI0817 16:02:21.639755 17829 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:02:21.639783 17829 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:02:21.639796 17829 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:02:21.639811 17829 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:02:21.639832 17829 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:02:21.639890 17829 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:02:21.639917 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.639928 17829 net.cpp:165] Memory required for data: 1290753500\nI0817 16:02:21.639940 17829 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:02:21.639952 17829 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:02:21.639964 17829 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:02:21.639979 17829 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:02:21.639998 17829 net.cpp:150] Setting up L3_b2_relu\nI0817 16:02:21.640013 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.640023 17829 net.cpp:165] Memory required for data: 1292801500\nI0817 16:02:21.640033 17829 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:21.640051 17829 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:21.640064 17829 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:02:21.640079 17829 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:02:21.640100 17829 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:02:21.640187 17829 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:21.640204 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.640216 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.640226 17829 net.cpp:165] Memory required for data: 1296897500\nI0817 16:02:21.640236 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:02:21.640257 17829 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:02:21.640270 17829 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:02:21.640295 17829 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:02:21.641361 17829 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:02:21.641381 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.641391 17829 net.cpp:165] Memory required for data: 1298945500\nI0817 16:02:21.641410 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:02:21.641427 17829 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:02:21.641439 17829 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:02:21.641460 17829 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:02:21.641762 17829 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:02:21.641786 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.641798 17829 net.cpp:165] Memory required for data: 1300993500\nI0817 16:02:21.641819 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:02:21.641835 17829 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:02:21.641849 17829 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:02:21.641865 17829 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.641964 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:02:21.642155 17829 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:02:21.642174 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.642184 17829 net.cpp:165] Memory required for data: 1303041500\nI0817 16:02:21.642202 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:02:21.642218 17829 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:02:21.642230 17829 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:02:21.642253 17829 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.642274 17829 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:02:21.642289 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.642308 17829 net.cpp:165] Memory required for data: 1305089500\nI0817 16:02:21.642319 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:02:21.642344 17829 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:02:21.642359 17829 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:02:21.642377 17829 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:02:21.643445 17829 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:02:21.643465 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.643476 17829 net.cpp:165] Memory required for data: 1307137500\nI0817 16:02:21.643493 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:02:21.643515 17829 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:02:21.643527 17829 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:02:21.643550 17829 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:02:21.643849 17829 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:02:21.643868 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.643877 17829 net.cpp:165] Memory required for data: 1309185500\nI0817 16:02:21.643898 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:02:21.643915 17829 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:02:21.643928 17829 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:02:21.643947 17829 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:02:21.644039 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:02:21.644232 17829 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:02:21.644249 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.644258 17829 net.cpp:165] Memory required for data: 1311233500\nI0817 16:02:21.644278 17829 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:02:21.644294 17829 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:02:21.644306 17829 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:02:21.644320 17829 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:02:21.644343 17829 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:02:21.644407 17829 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:02:21.644425 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.644435 17829 net.cpp:165] Memory required for data: 1313281500\nI0817 16:02:21.644445 17829 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:02:21.644459 17829 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:02:21.644471 17829 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:02:21.644485 17829 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:02:21.644510 17829 net.cpp:150] Setting up L3_b3_relu\nI0817 16:02:21.644525 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.644534 17829 net.cpp:165] Memory required for data: 1315329500\nI0817 16:02:21.644546 17829 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:21.644559 17829 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:21.644570 17829 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:02:21.644593 17829 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:02:21.644614 17829 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:02:21.644701 17829 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:21.644719 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.644732 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.644742 17829 net.cpp:165] Memory required for data: 1319425500\nI0817 16:02:21.644752 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:02:21.644773 17829 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:02:21.644785 17829 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:02:21.644809 17829 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:02:21.645898 17829 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:02:21.645918 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.645928 17829 net.cpp:165] Memory required for data: 1321473500\nI0817 16:02:21.645946 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:02:21.645964 17829 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:02:21.645975 17829 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:02:21.645998 17829 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:02:21.646301 17829 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:02:21.646323 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.646333 17829 net.cpp:165] Memory required for data: 1323521500\nI0817 16:02:21.646356 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:02:21.646373 17829 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:02:21.646384 17829 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:02:21.646400 17829 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.646493 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:02:21.646692 17829 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:02:21.646711 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.646721 17829 net.cpp:165] Memory required for data: 1325569500\nI0817 16:02:21.646739 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:02:21.646759 17829 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:02:21.646772 17829 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:02:21.646787 17829 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.646806 17829 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:02:21.646821 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.646831 17829 net.cpp:165] Memory required for data: 1327617500\nI0817 16:02:21.646842 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:02:21.646865 17829 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:02:21.646878 17829 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:02:21.646896 17829 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:02:21.647964 17829 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:02:21.647984 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.647994 17829 net.cpp:165] Memory required for data: 1329665500\nI0817 16:02:21.648012 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:02:21.648035 17829 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:02:21.648047 17829 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:02:21.648068 17829 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:02:21.648366 17829 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:02:21.648386 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.648394 17829 net.cpp:165] Memory required for data: 1331713500\nI0817 16:02:21.648416 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:02:21.648432 17829 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:02:21.648444 17829 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:02:21.648464 17829 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:02:21.648561 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:02:21.648766 17829 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:02:21.648784 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.648793 17829 net.cpp:165] Memory required for data: 1333761500\nI0817 16:02:21.648813 17829 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:02:21.648830 17829 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:02:21.648843 17829 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:02:21.648855 17829 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:02:21.648876 17829 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:02:21.648939 17829 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:02:21.648967 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.648977 17829 net.cpp:165] Memory required for data: 1335809500\nI0817 16:02:21.648988 17829 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:02:21.649000 17829 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:02:21.649013 17829 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:02:21.649032 17829 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:02:21.649052 17829 net.cpp:150] Setting up L3_b4_relu\nI0817 16:02:21.649067 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.649077 17829 net.cpp:165] Memory required for data: 1337857500\nI0817 16:02:21.649087 17829 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:21.649102 17829 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:21.649113 17829 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:02:21.649127 17829 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:02:21.649147 17829 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:02:21.649230 17829 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:21.649248 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.649262 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.649271 17829 net.cpp:165] Memory required for data: 1341953500\nI0817 16:02:21.649281 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:02:21.649302 17829 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:02:21.649314 17829 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:02:21.649338 17829 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:02:21.650414 17829 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:02:21.650434 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.650444 17829 net.cpp:165] Memory required for data: 1344001500\nI0817 16:02:21.650461 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:02:21.650478 17829 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:02:21.650491 17829 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:02:21.650511 17829 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:02:21.651821 17829 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:02:21.651844 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.651852 17829 net.cpp:165] Memory required for data: 1346049500\nI0817 16:02:21.651875 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:02:21.651892 17829 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:02:21.651904 17829 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:02:21.651924 17829 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.652017 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:02:21.652212 17829 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:02:21.652231 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.652241 17829 net.cpp:165] Memory required for data: 1348097500\nI0817 16:02:21.652259 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:02:21.652276 17829 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:02:21.652288 17829 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:02:21.652307 17829 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.652328 17829 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:02:21.652343 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.652353 17829 net.cpp:165] Memory required for data: 1350145500\nI0817 16:02:21.652364 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:02:21.652390 17829 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:02:21.652402 17829 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:02:21.652420 17829 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:02:21.654502 17829 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:02:21.654525 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.654536 17829 net.cpp:165] Memory required for data: 1352193500\nI0817 16:02:21.654553 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:02:21.654577 17829 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:02:21.654597 17829 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:02:21.654619 17829 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:02:21.654932 17829 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:02:21.654953 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.654963 17829 net.cpp:165] Memory required for data: 1354241500\nI0817 16:02:21.654984 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:02:21.655002 17829 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:02:21.655015 17829 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:02:21.655035 17829 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:02:21.655125 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:02:21.655315 17829 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:02:21.655334 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.655344 17829 net.cpp:165] Memory required for data: 1356289500\nI0817 16:02:21.655362 17829 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:02:21.655380 17829 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:02:21.655392 17829 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:02:21.655405 17829 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:02:21.655426 17829 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:02:21.655486 17829 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:02:21.655504 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.655514 17829 net.cpp:165] Memory required for data: 1358337500\nI0817 16:02:21.655524 17829 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:02:21.655539 17829 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:02:21.655551 17829 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:02:21.655570 17829 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:02:21.655597 17829 net.cpp:150] Setting up L3_b5_relu\nI0817 16:02:21.655613 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.655623 17829 net.cpp:165] Memory required for data: 1360385500\nI0817 16:02:21.655633 17829 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:21.655647 17829 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:21.655659 17829 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:02:21.655676 17829 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:02:21.655696 17829 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:02:21.655781 17829 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:21.655800 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.655814 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.655824 17829 net.cpp:165] Memory required for data: 1364481500\nI0817 16:02:21.655835 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:02:21.655854 17829 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:02:21.655867 17829 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:02:21.655891 17829 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:02:21.656961 17829 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:02:21.656982 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.656991 17829 net.cpp:165] Memory required for data: 1366529500\nI0817 16:02:21.657011 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:02:21.657027 17829 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:02:21.657047 17829 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:02:21.657071 17829 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:02:21.657373 17829 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:02:21.657397 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.657407 17829 net.cpp:165] Memory required for data: 1368577500\nI0817 16:02:21.657429 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:02:21.657446 17829 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:02:21.657459 17829 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:02:21.657474 17829 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.657565 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:02:21.657765 17829 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:02:21.657785 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.657794 17829 net.cpp:165] Memory required for data: 1370625500\nI0817 16:02:21.657814 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:02:21.657833 17829 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:02:21.657846 17829 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:02:21.657861 17829 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.657881 17829 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:02:21.657894 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.657905 17829 net.cpp:165] Memory required for data: 1372673500\nI0817 16:02:21.657914 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:02:21.657939 17829 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:02:21.657953 17829 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:02:21.657970 17829 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:02:21.659024 17829 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:02:21.659044 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.659054 17829 net.cpp:165] Memory required for data: 1374721500\nI0817 16:02:21.659072 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:02:21.659093 17829 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:02:21.659106 17829 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:02:21.659126 17829 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:02:21.659418 17829 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:02:21.659437 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.659447 17829 net.cpp:165] Memory required for data: 1376769500\nI0817 16:02:21.659468 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:02:21.659485 17829 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:02:21.659497 17829 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:02:21.659517 17829 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:02:21.659612 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:02:21.659803 17829 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:02:21.659822 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.659832 17829 net.cpp:165] Memory required for data: 1378817500\nI0817 16:02:21.659850 17829 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:02:21.659868 17829 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:02:21.659879 17829 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:02:21.659893 17829 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:02:21.659914 17829 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:02:21.659973 17829 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:02:21.659991 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.660001 17829 net.cpp:165] Memory required for data: 1380865500\nI0817 16:02:21.660012 17829 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:02:21.660027 17829 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:02:21.660038 17829 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:02:21.660063 17829 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:02:21.660084 17829 net.cpp:150] Setting up L3_b6_relu\nI0817 16:02:21.660101 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.660110 17829 net.cpp:165] Memory required for data: 1382913500\nI0817 16:02:21.660120 17829 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:21.660133 17829 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:21.660145 17829 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:02:21.660161 17829 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:02:21.660179 17829 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:02:21.660264 17829 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:21.660282 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.660295 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.660305 17829 net.cpp:165] Memory required for data: 1387009500\nI0817 16:02:21.660315 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:02:21.660334 17829 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:02:21.660347 17829 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:02:21.660372 17829 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:02:21.661428 17829 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:02:21.661448 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.661458 17829 net.cpp:165] Memory required for data: 1389057500\nI0817 16:02:21.661476 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:02:21.661494 17829 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:02:21.661506 17829 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:02:21.661528 17829 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:02:21.661833 17829 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:02:21.661851 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.661860 17829 net.cpp:165] Memory required for data: 1391105500\nI0817 16:02:21.661882 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:02:21.661898 17829 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:02:21.661911 17829 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:02:21.661926 17829 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.662019 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:02:21.662210 17829 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:02:21.662233 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.662243 17829 net.cpp:165] Memory required for data: 1393153500\nI0817 16:02:21.662262 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:02:21.662310 17829 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:02:21.662328 17829 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:02:21.662343 17829 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.662364 17829 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:02:21.662379 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.662389 17829 net.cpp:165] Memory required for data: 1395201500\nI0817 16:02:21.662400 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:02:21.662422 17829 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:02:21.662434 17829 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:02:21.662457 17829 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:02:21.663525 17829 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:02:21.663545 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.663555 17829 net.cpp:165] Memory required for data: 1397249500\nI0817 16:02:21.663573 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:02:21.663597 17829 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:02:21.663619 17829 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:02:21.663640 17829 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:02:21.663952 17829 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:02:21.663975 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.663986 17829 net.cpp:165] Memory required for data: 1399297500\nI0817 16:02:21.664008 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:02:21.664026 17829 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:02:21.664037 17829 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:02:21.664053 17829 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:02:21.664144 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:02:21.664341 17829 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:02:21.664361 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.664369 17829 net.cpp:165] Memory required for data: 1401345500\nI0817 16:02:21.664387 17829 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:02:21.664408 17829 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:02:21.664422 17829 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:02:21.664434 17829 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:02:21.664451 17829 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:02:21.664512 17829 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:02:21.664531 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.664541 17829 net.cpp:165] Memory required for data: 1403393500\nI0817 16:02:21.664551 17829 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:02:21.664566 17829 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:02:21.664583 17829 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:02:21.664599 17829 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:02:21.664618 17829 net.cpp:150] Setting up L3_b7_relu\nI0817 16:02:21.664633 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.664643 17829 net.cpp:165] Memory required for data: 1405441500\nI0817 16:02:21.664652 17829 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:21.664666 17829 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:21.664677 17829 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:02:21.664696 17829 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:02:21.664718 17829 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:02:21.664798 17829 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:21.664815 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.664829 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.664839 17829 net.cpp:165] Memory required for data: 1409537500\nI0817 16:02:21.664849 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:02:21.664875 17829 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:02:21.664888 17829 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:02:21.664907 17829 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:02:21.665982 17829 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:02:21.666003 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.666013 17829 net.cpp:165] Memory required for data: 1411585500\nI0817 16:02:21.666030 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:02:21.666051 17829 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:02:21.666064 17829 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:02:21.666081 17829 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:02:21.666383 17829 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:02:21.666401 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.666411 17829 net.cpp:165] Memory required for data: 1413633500\nI0817 16:02:21.666442 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:02:21.666463 17829 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:02:21.666477 17829 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:02:21.666493 17829 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.666597 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:02:21.666795 17829 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:02:21.666815 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.666824 17829 net.cpp:165] Memory required for data: 1415681500\nI0817 16:02:21.666843 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:02:21.666862 17829 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:02:21.666875 17829 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:02:21.666890 17829 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.666910 17829 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:02:21.666925 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.666935 17829 net.cpp:165] Memory required for data: 1417729500\nI0817 16:02:21.666945 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:02:21.666970 17829 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:02:21.666983 17829 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:02:21.667006 17829 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:02:21.668066 17829 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:02:21.668087 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.668095 17829 net.cpp:165] Memory required for data: 1419777500\nI0817 16:02:21.668113 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:02:21.668131 17829 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:02:21.668143 17829 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:02:21.668169 17829 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:02:21.668467 17829 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:02:21.668488 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.668496 17829 net.cpp:165] Memory required for data: 1421825500\nI0817 16:02:21.668517 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:02:21.668534 17829 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:02:21.668546 17829 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:02:21.668561 17829 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:02:21.668659 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:02:21.668851 17829 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:02:21.668870 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.668880 17829 net.cpp:165] Memory required for data: 1423873500\nI0817 16:02:21.668900 17829 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:02:21.668920 17829 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:02:21.668933 17829 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:02:21.668946 17829 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:02:21.668962 17829 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:02:21.669021 17829 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:02:21.669040 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.669050 17829 net.cpp:165] Memory required for data: 1425921500\nI0817 16:02:21.669060 17829 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:02:21.669075 17829 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:02:21.669085 17829 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:02:21.669100 17829 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:02:21.669118 17829 net.cpp:150] Setting up L3_b8_relu\nI0817 16:02:21.669133 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.669143 17829 net.cpp:165] Memory required for data: 1427969500\nI0817 16:02:21.669154 17829 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:21.669178 17829 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:21.669190 17829 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:02:21.669209 17829 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:02:21.669231 17829 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:02:21.669317 17829 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:21.669337 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.669349 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.669359 17829 net.cpp:165] Memory required for data: 1432065500\nI0817 16:02:21.669370 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:02:21.669395 17829 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:02:21.669409 17829 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:02:21.669427 17829 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:02:21.671491 17829 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:02:21.671514 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.671525 17829 net.cpp:165] Memory required for data: 1434113500\nI0817 16:02:21.671542 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:02:21.671564 17829 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:02:21.671583 17829 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:02:21.671602 17829 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:02:21.671900 17829 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:02:21.671921 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.671929 17829 net.cpp:165] Memory required for data: 1436161500\nI0817 16:02:21.671952 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:02:21.671968 17829 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:02:21.671980 17829 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:02:21.671995 17829 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.672089 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:02:21.672288 17829 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:02:21.672307 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.672317 17829 net.cpp:165] Memory required for data: 1438209500\nI0817 16:02:21.672334 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:02:21.672351 17829 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:02:21.672363 17829 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:02:21.672377 17829 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.672401 17829 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:02:21.672416 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.672426 17829 net.cpp:165] Memory required for data: 1440257500\nI0817 16:02:21.672437 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:02:21.672458 17829 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:02:21.672475 17829 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:02:21.672493 17829 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:02:21.673560 17829 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:02:21.673586 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.673598 17829 net.cpp:165] Memory required for data: 1442305500\nI0817 16:02:21.673616 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:02:21.673638 17829 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:02:21.673651 17829 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:02:21.673668 17829 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:02:21.673967 17829 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:02:21.673985 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.673995 17829 net.cpp:165] Memory required for data: 1444353500\nI0817 16:02:21.674026 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:02:21.674047 17829 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:02:21.674060 17829 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:02:21.674077 17829 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:02:21.674177 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:02:21.674367 17829 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:02:21.674387 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.674396 17829 net.cpp:165] Memory required for data: 1446401500\nI0817 16:02:21.674414 17829 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:02:21.674437 17829 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:02:21.674450 17829 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:02:21.674464 17829 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:02:21.674484 17829 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:02:21.674540 17829 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:02:21.674558 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.674568 17829 net.cpp:165] Memory required for data: 1448449500\nI0817 16:02:21.674585 17829 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:02:21.674609 17829 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:02:21.674623 17829 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:02:21.674638 17829 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:02:21.674657 17829 net.cpp:150] Setting up L3_b9_relu\nI0817 16:02:21.674671 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.674681 17829 net.cpp:165] Memory required for data: 1450497500\nI0817 16:02:21.674691 17829 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:02:21.674706 17829 net.cpp:100] Creating Layer post_pool\nI0817 16:02:21.674718 17829 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:02:21.674733 17829 net.cpp:408] post_pool -> post_pool\nI0817 16:02:21.674791 17829 net.cpp:150] Setting up post_pool\nI0817 16:02:21.674814 17829 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:02:21.674824 17829 net.cpp:165] Memory required for data: 1450529500\nI0817 16:02:21.674835 17829 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:02:21.674942 17829 net.cpp:100] Creating Layer post_FC\nI0817 16:02:21.674957 17829 net.cpp:434] post_FC <- post_pool\nI0817 16:02:21.674974 17829 net.cpp:408] post_FC -> post_FC_top\nI0817 16:02:21.675251 17829 net.cpp:150] Setting up post_FC\nI0817 16:02:21.675271 17829 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:21.675282 17829 net.cpp:165] Memory required for data: 1450534500\nI0817 16:02:21.675299 17829 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:02:21.675315 17829 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:02:21.675328 17829 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:02:21.675343 17829 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:02:21.675367 17829 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:02:21.675449 17829 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:02:21.675467 17829 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:21.675482 17829 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:21.675493 17829 net.cpp:165] Memory required for data: 1450544500\nI0817 16:02:21.675503 17829 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:02:21.675565 17829 net.cpp:100] Creating Layer accuracy\nI0817 16:02:21.675590 17829 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:02:21.675604 17829 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:02:21.675621 17829 net.cpp:408] accuracy -> accuracy\nI0817 16:02:21.675684 17829 net.cpp:150] Setting up accuracy\nI0817 16:02:21.675703 17829 net.cpp:157] Top shape: (1)\nI0817 16:02:21.675712 17829 net.cpp:165] Memory required for data: 1450544504\nI0817 16:02:21.675722 17829 layer_factory.hpp:77] Creating layer loss\nI0817 16:02:21.675747 17829 net.cpp:100] Creating Layer loss\nI0817 16:02:21.675760 17829 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:02:21.675772 17829 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:02:21.675788 17829 net.cpp:408] loss -> loss\nI0817 16:02:21.675858 17829 layer_factory.hpp:77] Creating layer loss\nI0817 16:02:21.676045 17829 net.cpp:150] Setting up loss\nI0817 16:02:21.676064 17829 net.cpp:157] Top shape: (1)\nI0817 16:02:21.676074 17829 net.cpp:160]     with loss weight 1\nI0817 16:02:21.676174 17829 net.cpp:165] Memory required for data: 1450544508\nI0817 16:02:21.676188 17829 net.cpp:226] loss needs backward computation.\nI0817 16:02:21.676200 17829 net.cpp:228] accuracy does not need backward computation.\nI0817 16:02:21.676211 17829 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:02:21.676221 17829 net.cpp:226] post_FC needs backward computation.\nI0817 16:02:21.676231 17829 net.cpp:226] post_pool needs backward computation.\nI0817 16:02:21.676241 17829 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:02:21.676252 17829 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:02:21.676262 17829 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:02:21.676271 17829 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:02:21.676281 17829 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:02:21.676292 17829 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:02:21.676302 17829 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:02:21.676311 17829 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:02:21.676322 17829 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:02:21.676332 17829 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:02:21.676343 17829 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:02:21.676353 17829 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:02:21.676363 17829 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:02:21.676374 17829 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:02:21.676385 17829 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:02:21.676395 17829 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:02:21.676405 17829 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:02:21.676415 17829 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:02:21.676425 17829 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:02:21.676436 17829 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:02:21.676447 17829 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:02:21.676457 17829 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:02:21.676468 17829 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:02:21.676478 17829 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:02:21.676488 17829 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:02:21.676499 17829 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:02:21.676509 17829 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:02:21.676518 17829 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:02:21.676528 17829 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:02:21.676539 17829 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:02:21.676549 17829 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:02:21.676559 17829 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:02:21.676570 17829 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:02:21.676589 17829 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:02:21.676601 17829 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:02:21.676625 17829 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:02:21.676636 17829 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:02:21.676646 17829 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:02:21.676656 17829 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:02:21.676667 17829 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:02:21.676679 17829 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:02:21.676689 17829 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:02:21.676702 17829 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:02:21.676712 17829 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:02:21.676723 17829 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:02:21.676733 17829 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:02:21.676743 17829 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:02:21.676754 17829 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:02:21.676764 17829 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:02:21.676775 17829 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:02:21.676787 17829 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:02:21.676796 17829 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:02:21.676807 17829 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:02:21.676818 17829 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:02:21.676829 17829 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:02:21.676847 17829 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:02:21.676861 17829 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:02:21.676870 17829 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:02:21.676882 17829 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:02:21.676892 17829 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:02:21.676903 17829 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:02:21.676913 17829 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:02:21.676925 17829 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:02:21.676935 17829 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:02:21.676947 17829 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:02:21.676956 17829 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:02:21.676966 17829 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:02:21.676977 17829 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:02:21.676988 17829 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:02:21.676998 17829 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:02:21.677009 17829 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:02:21.677021 17829 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:02:21.677031 17829 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:02:21.677040 17829 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:02:21.677052 17829 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:02:21.677063 17829 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:02:21.677074 17829 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:02:21.677084 17829 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:02:21.677095 17829 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:02:21.677108 17829 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:02:21.677117 17829 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:02:21.677129 17829 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:02:21.677148 17829 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:02:21.677160 17829 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:02:21.677172 17829 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:02:21.677183 17829 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:02:21.677194 17829 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:02:21.677206 17829 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:02:21.677217 17829 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:02:21.677227 17829 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:02:21.677237 17829 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:02:21.677248 17829 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:02:21.677259 17829 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:02:21.677269 17829 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:02:21.677280 17829 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:02:21.677291 17829 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:02:21.677302 17829 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:02:21.677314 17829 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:02:21.677325 17829 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:02:21.677335 17829 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:02:21.677345 17829 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:02:21.677356 17829 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:02:21.677367 17829 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:02:21.677379 17829 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:02:21.677389 17829 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:02:21.677400 17829 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:02:21.677417 17829 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:02:21.677429 17829 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:02:21.677440 17829 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:02:21.677451 17829 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:02:21.677462 17829 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:02:21.677474 17829 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:02:21.677484 17829 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:02:21.677495 17829 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:02:21.677506 17829 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:02:21.677517 17829 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:02:21.677528 17829 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:02:21.677539 17829 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:02:21.677549 17829 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:02:21.677561 17829 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:02:21.677572 17829 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:02:21.677589 17829 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:02:21.677601 17829 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:02:21.677613 17829 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:02:21.677623 17829 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:02:21.677634 17829 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:02:21.677645 17829 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:02:21.677657 17829 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:02:21.677669 17829 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:02:21.677678 17829 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:02:21.677696 17829 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:02:21.677708 17829 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:02:21.677719 17829 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:02:21.677731 17829 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:02:21.677742 17829 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:02:21.677754 17829 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:02:21.677765 17829 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:02:21.677776 17829 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:02:21.677786 17829 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:02:21.677798 17829 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:02:21.677808 17829 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:02:21.677819 17829 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:02:21.677830 17829 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:02:21.677841 17829 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:02:21.677852 17829 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:02:21.677865 17829 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:02:21.677875 17829 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:02:21.677887 17829 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:02:21.677898 17829 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:02:21.677911 17829 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:02:21.677922 17829 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:02:21.677933 17829 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:02:21.677943 17829 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:02:21.677955 17829 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:02:21.677966 17829 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:02:21.677978 17829 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:02:21.677989 17829 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:02:21.678001 17829 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:02:21.678012 17829 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:02:21.678022 17829 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:02:21.678033 17829 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:02:21.678045 17829 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:02:21.678057 17829 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:02:21.678069 17829 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:02:21.678081 17829 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:02:21.678093 17829 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:02:21.678103 17829 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:02:21.678114 17829 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:02:21.678127 17829 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:02:21.678138 17829 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:02:21.678148 17829 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:02:21.678158 17829 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:02:21.678170 17829 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:02:21.678181 17829 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:02:21.678200 17829 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:02:21.678211 17829 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:02:21.678221 17829 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:02:21.678241 17829 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:02:21.678254 17829 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:02:21.678266 17829 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:02:21.678277 17829 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:02:21.678289 17829 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:02:21.678300 17829 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:02:21.678311 17829 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:02:21.678323 17829 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:02:21.678333 17829 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:02:21.678345 17829 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:02:21.678356 17829 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:02:21.678369 17829 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:02:21.678380 17829 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:02:21.678391 17829 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:02:21.678402 17829 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:02:21.678413 17829 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:02:21.678423 17829 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:02:21.678436 17829 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:02:21.678447 17829 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:02:21.678459 17829 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:02:21.678469 17829 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:02:21.678483 17829 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:02:21.678491 17829 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:02:21.678503 17829 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:02:21.678515 17829 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:02:21.678527 17829 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:02:21.678537 17829 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:02:21.678548 17829 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:02:21.678561 17829 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:02:21.678572 17829 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:02:21.678592 17829 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:02:21.678604 17829 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:02:21.678616 17829 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:02:21.678627 17829 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:02:21.678638 17829 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:02:21.678649 17829 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:02:21.678659 17829 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:02:21.678673 17829 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:02:21.678683 17829 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:02:21.678694 17829 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:02:21.678705 17829 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:02:21.678719 17829 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:02:21.678730 17829 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:02:21.678741 17829 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:02:21.678753 17829 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:02:21.678766 17829 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:02:21.678776 17829 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:02:21.678795 17829 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:02:21.678807 17829 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:02:21.678819 17829 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:02:21.678830 17829 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:02:21.678843 17829 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:02:21.678854 17829 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:02:21.678866 17829 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:02:21.678877 17829 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:02:21.678887 17829 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:02:21.678899 17829 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:02:21.678910 17829 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:02:21.678920 17829 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:02:21.678932 17829 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:02:21.678944 17829 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:02:21.678956 17829 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:02:21.678967 17829 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:02:21.678979 17829 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:02:21.678990 17829 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:02:21.679002 17829 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:02:21.679013 17829 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:02:21.679024 17829 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:02:21.679036 17829 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:02:21.679047 17829 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:02:21.679059 17829 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:02:21.679071 17829 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:02:21.679082 17829 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:02:21.679095 17829 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:02:21.679106 17829 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:02:21.679116 17829 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:02:21.679127 17829 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:02:21.679141 17829 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:02:21.679152 17829 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:02:21.679163 17829 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:02:21.679174 17829 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:02:21.679186 17829 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:02:21.679198 17829 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:02:21.679208 17829 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:02:21.679221 17829 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:02:21.679231 17829 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:02:21.679242 17829 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:02:21.679253 17829 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:02:21.679265 17829 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:02:21.679275 17829 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:02:21.679286 17829 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:02:21.679299 17829 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:02:21.679311 17829 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:02:21.679322 17829 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:02:21.679342 17829 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:02:21.679354 17829 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:02:21.679394 17829 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:02:21.679430 17829 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:02:21.679440 17829 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:02:21.679446 17829 net.cpp:226] pre_relu needs backward computation.\nI0817 16:02:21.679451 17829 net.cpp:226] pre_scale needs backward computation.\nI0817 16:02:21.679457 17829 net.cpp:226] pre_bn needs backward computation.\nI0817 16:02:21.679462 17829 net.cpp:226] pre_conv needs backward computation.\nI0817 16:02:21.679471 17829 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:02:21.679476 17829 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:02:21.679481 17829 net.cpp:270] This network produces output accuracy\nI0817 16:02:21.679489 17829 net.cpp:270] This network produces output loss\nI0817 16:02:21.679916 17829 net.cpp:283] Network initialization done.\nI0817 16:02:21.689224 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:21.689265 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:21.689326 17829 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:02:21.689725 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:02:21.689744 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:02:21.689754 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:02:21.689764 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:02:21.689774 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:02:21.689782 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:02:21.689791 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:02:21.689800 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:02:21.689810 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:02:21.689818 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:02:21.689827 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:02:21.689836 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:02:21.689844 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:02:21.689853 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:02:21.689862 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:02:21.689872 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:02:21.689880 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:02:21.689888 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:02:21.689898 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:02:21.689918 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:02:21.689929 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:02:21.689936 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:02:21.689949 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:02:21.689959 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:02:21.689967 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:02:21.689975 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:02:21.689983 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:02:21.689991 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:02:21.690001 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:02:21.690009 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:02:21.690018 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:02:21.690027 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:02:21.690037 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:02:21.690043 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:02:21.690052 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:02:21.690062 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:02:21.690070 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:02:21.690078 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:02:21.690088 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:02:21.690095 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:02:21.690107 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:02:21.690115 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:02:21.690124 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:02:21.690132 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:02:21.690141 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:02:21.690150 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:02:21.690158 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:02:21.690166 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:02:21.690176 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:02:21.690191 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:02:21.690199 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:02:21.690208 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:02:21.690217 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:02:21.690225 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:02:21.690234 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:02:21.690243 17829 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:02:21.691902 17829 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0817 16:02:21.693521 17829 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:02:21.693754 17829 net.cpp:100] Creating Layer dataLayer\nI0817 16:02:21.693775 17829 net.cpp:408] dataLayer -> data_top\nI0817 16:02:21.693792 17829 net.cpp:408] dataLayer -> label\nI0817 16:02:21.693804 17829 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:02:21.699954 17836 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:02:21.700248 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:21.707597 17829 net.cpp:150] Setting up dataLayer\nI0817 16:02:21.707621 17829 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:02:21.707629 17829 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:21.707634 17829 net.cpp:165] Memory required for data: 1536500\nI0817 16:02:21.707641 17829 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:02:21.707657 17829 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:02:21.707664 17829 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:02:21.707672 17829 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:02:21.707687 17829 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:02:21.707803 17829 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:02:21.707816 17829 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:21.707825 17829 net.cpp:157] Top shape: 125 (125)\nI0817 16:02:21.707830 17829 net.cpp:165] Memory required for data: 1537500\nI0817 16:02:21.707836 17829 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:02:21.707854 17829 net.cpp:100] Creating Layer pre_conv\nI0817 16:02:21.707864 17829 net.cpp:434] pre_conv <- data_top\nI0817 16:02:21.707880 17829 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:02:21.708341 17829 net.cpp:150] Setting up pre_conv\nI0817 16:02:21.708369 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.708374 17829 net.cpp:165] Memory required for data: 9729500\nI0817 16:02:21.708394 17829 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:02:21.708410 17829 net.cpp:100] Creating Layer pre_bn\nI0817 16:02:21.708429 17829 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:02:21.708438 17829 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:02:21.708762 17829 net.cpp:150] Setting up pre_bn\nI0817 16:02:21.708777 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.708782 17829 net.cpp:165] Memory required for data: 17921500\nI0817 16:02:21.708801 17829 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:02:21.708811 17829 net.cpp:100] Creating Layer pre_scale\nI0817 16:02:21.708817 17829 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:02:21.708828 17829 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:02:21.708894 17829 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:02:21.709077 17829 net.cpp:150] Setting up pre_scale\nI0817 16:02:21.709090 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.709096 17829 net.cpp:165] Memory required for data: 26113500\nI0817 16:02:21.709105 17829 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:02:21.709116 17829 net.cpp:100] Creating Layer pre_relu\nI0817 16:02:21.709122 17829 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:02:21.709132 17829 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:02:21.709142 17829 net.cpp:150] Setting up pre_relu\nI0817 16:02:21.709151 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.709157 17829 net.cpp:165] Memory required for data: 34305500\nI0817 16:02:21.709161 17829 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:02:21.709168 17829 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:02:21.709173 17829 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:02:21.709184 17829 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:02:21.709194 17829 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:02:21.709259 17829 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:02:21.709272 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.709278 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.709285 17829 net.cpp:165] Memory required for data: 50689500\nI0817 16:02:21.709291 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:02:21.709302 17829 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:02:21.709308 17829 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:02:21.709321 17829 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:02:21.709725 17829 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:02:21.709743 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.709748 17829 net.cpp:165] Memory required for data: 58881500\nI0817 16:02:21.709760 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:02:21.709791 17829 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:02:21.709802 17829 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:02:21.709811 17829 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:02:21.710352 17829 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:02:21.710367 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.710376 17829 net.cpp:165] Memory required for data: 67073500\nI0817 16:02:21.710387 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:02:21.710399 17829 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:02:21.710407 17829 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:02:21.710422 17829 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.710486 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:02:21.710685 17829 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:02:21.710700 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.710705 17829 net.cpp:165] Memory required for data: 75265500\nI0817 16:02:21.710726 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:02:21.710737 17829 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:02:21.710743 17829 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:02:21.710750 17829 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.710760 17829 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:02:21.710767 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.710772 17829 net.cpp:165] Memory required for data: 83457500\nI0817 16:02:21.710777 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:02:21.710794 17829 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:02:21.710800 17829 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:02:21.710813 17829 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:02:21.711208 17829 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:02:21.711222 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.711230 17829 net.cpp:165] Memory required for data: 91649500\nI0817 16:02:21.711239 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:02:21.711249 17829 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:02:21.711254 17829 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:02:21.711266 17829 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:02:21.711575 17829 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:02:21.711592 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.711597 17829 net.cpp:165] Memory required for data: 99841500\nI0817 16:02:21.711616 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:02:21.711627 17829 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:02:21.711632 17829 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:02:21.711640 17829 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:02:21.711702 17829 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:02:21.711897 17829 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:02:21.711912 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.711917 17829 net.cpp:165] Memory required for data: 108033500\nI0817 16:02:21.711926 17829 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:02:21.711943 17829 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:02:21.711951 17829 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:02:21.711957 17829 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:02:21.711966 17829 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:02:21.712010 17829 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:02:21.712023 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.712028 17829 net.cpp:165] Memory required for data: 116225500\nI0817 16:02:21.712033 17829 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:02:21.712039 17829 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:02:21.712045 17829 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:02:21.712055 17829 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:02:21.712066 17829 net.cpp:150] Setting up L1_b1_relu\nI0817 16:02:21.712074 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.712080 17829 net.cpp:165] Memory required for data: 124417500\nI0817 16:02:21.712083 17829 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:21.712092 17829 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:21.712098 17829 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:02:21.712110 17829 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:02:21.712119 17829 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:02:21.712177 17829 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:02:21.712198 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.712205 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.712210 17829 net.cpp:165] Memory required for data: 140801500\nI0817 16:02:21.712216 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:02:21.712232 17829 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:02:21.712239 17829 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:02:21.712255 17829 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:02:21.712656 17829 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:02:21.712671 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.712677 17829 net.cpp:165] Memory required for data: 148993500\nI0817 16:02:21.712687 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:02:21.712695 17829 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:02:21.712700 17829 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:02:21.712738 17829 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:02:21.713246 17829 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:02:21.713265 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.713270 17829 net.cpp:165] Memory required for data: 157185500\nI0817 16:02:21.713281 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:02:21.713294 17829 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:02:21.713299 17829 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:02:21.713306 17829 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.713369 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:02:21.713562 17829 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:02:21.713575 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.713582 17829 net.cpp:165] Memory required for data: 165377500\nI0817 16:02:21.713593 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:02:21.713603 17829 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:02:21.713609 17829 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:02:21.713619 17829 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.713634 17829 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:02:21.713640 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.713645 17829 net.cpp:165] Memory required for data: 173569500\nI0817 16:02:21.713649 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:02:21.713660 17829 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:02:21.713665 17829 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:02:21.713677 17829 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:02:21.714071 17829 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:02:21.714085 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.714090 17829 net.cpp:165] Memory required for data: 181761500\nI0817 16:02:21.714102 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:02:21.714112 17829 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:02:21.714118 17829 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:02:21.714129 17829 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:02:21.714455 17829 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:02:21.714469 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.714475 17829 net.cpp:165] Memory required for data: 189953500\nI0817 16:02:21.714498 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:02:21.714507 17829 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:02:21.714514 17829 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:02:21.714520 17829 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:02:21.714591 17829 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:02:21.714785 17829 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:02:21.714802 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.714808 17829 net.cpp:165] Memory required for data: 198145500\nI0817 16:02:21.714825 17829 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:02:21.714834 17829 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:02:21.714844 17829 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:02:21.714850 17829 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:02:21.714862 17829 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:02:21.714901 17829 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:02:21.714912 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.714918 17829 net.cpp:165] Memory required for data: 206337500\nI0817 16:02:21.714923 17829 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:02:21.714934 17829 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:02:21.714939 17829 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:02:21.714947 17829 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:02:21.714956 17829 net.cpp:150] Setting up L1_b2_relu\nI0817 16:02:21.714962 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.714967 17829 net.cpp:165] Memory required for data: 214529500\nI0817 16:02:21.714974 17829 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:21.714982 17829 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:21.714987 17829 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:02:21.714994 17829 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:02:21.715004 17829 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:02:21.715062 17829 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:02:21.715075 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.715081 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.715086 17829 net.cpp:165] Memory required for data: 230913500\nI0817 16:02:21.715091 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:02:21.715108 17829 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:02:21.715116 17829 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:02:21.715126 17829 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:02:21.715533 17829 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:02:21.715548 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.715553 17829 net.cpp:165] Memory required for data: 239105500\nI0817 16:02:21.715564 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:02:21.715576 17829 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:02:21.715582 17829 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:02:21.715595 17829 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:02:21.716089 17829 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:02:21.716104 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.716111 17829 net.cpp:165] Memory required for data: 247297500\nI0817 16:02:21.716122 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:02:21.716132 17829 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:02:21.716138 17829 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:02:21.716150 17829 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.716226 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:02:21.716410 17829 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:02:21.716435 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.716440 17829 net.cpp:165] Memory required for data: 255489500\nI0817 16:02:21.716449 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:02:21.716459 17829 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:02:21.716466 17829 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:02:21.716478 17829 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.716495 17829 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:02:21.716506 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.716511 17829 net.cpp:165] Memory required for data: 263681500\nI0817 16:02:21.716516 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:02:21.716527 17829 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:02:21.716534 17829 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:02:21.716547 17829 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:02:21.716948 17829 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:02:21.716964 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.716969 17829 net.cpp:165] Memory required for data: 271873500\nI0817 16:02:21.716979 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:02:21.716995 17829 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:02:21.717005 17829 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:02:21.717013 17829 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:02:21.717339 17829 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:02:21.717355 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.717362 17829 net.cpp:165] Memory required for data: 280065500\nI0817 16:02:21.717373 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:02:21.717381 17829 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:02:21.717387 17829 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:02:21.717398 17829 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:02:21.717473 17829 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:02:21.717653 17829 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:02:21.717669 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.717675 17829 net.cpp:165] Memory required for data: 288257500\nI0817 16:02:21.717684 17829 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:02:21.717692 17829 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:02:21.717699 17829 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:02:21.717705 17829 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:02:21.717715 17829 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:02:21.717752 17829 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:02:21.717764 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.717770 17829 net.cpp:165] Memory required for data: 296449500\nI0817 16:02:21.717775 17829 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:02:21.717788 17829 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:02:21.717794 17829 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:02:21.717802 17829 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:02:21.717810 17829 net.cpp:150] Setting up L1_b3_relu\nI0817 16:02:21.717818 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.717823 17829 net.cpp:165] Memory required for data: 304641500\nI0817 16:02:21.717826 17829 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:21.717833 17829 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:21.717839 17829 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:02:21.717845 17829 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:02:21.717854 17829 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:02:21.717905 17829 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:02:21.717916 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.717923 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.717927 17829 net.cpp:165] Memory required for data: 321025500\nI0817 16:02:21.717933 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:02:21.717947 17829 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:02:21.717960 17829 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:02:21.717969 17829 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:02:21.718318 17829 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:02:21.718333 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.718338 17829 net.cpp:165] Memory required for data: 329217500\nI0817 16:02:21.718348 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:02:21.718358 17829 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:02:21.718364 17829 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:02:21.718372 17829 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:02:21.718652 17829 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:02:21.718664 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.718669 17829 net.cpp:165] Memory required for data: 337409500\nI0817 16:02:21.718679 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:02:21.718688 17829 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:02:21.718693 17829 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:02:21.718701 17829 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.718760 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:02:21.718919 17829 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:02:21.718931 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.718936 17829 net.cpp:165] Memory required for data: 345601500\nI0817 16:02:21.718945 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:02:21.718953 17829 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:02:21.718960 17829 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:02:21.718969 17829 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.718979 17829 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:02:21.718986 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.718991 17829 net.cpp:165] Memory required for data: 353793500\nI0817 16:02:21.718994 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:02:21.719005 17829 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:02:21.719010 17829 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:02:21.719022 17829 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:02:21.719388 17829 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:02:21.719403 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.719408 17829 net.cpp:165] Memory required for data: 361985500\nI0817 16:02:21.719421 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:02:21.719431 17829 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:02:21.719437 17829 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:02:21.719449 17829 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:02:21.719723 17829 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:02:21.719738 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.719743 17829 net.cpp:165] Memory required for data: 370177500\nI0817 16:02:21.719753 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:02:21.719761 17829 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:02:21.719766 17829 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:02:21.719774 17829 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:02:21.719833 17829 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:02:21.719987 17829 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:02:21.720000 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.720005 17829 net.cpp:165] Memory required for data: 378369500\nI0817 16:02:21.720013 17829 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:02:21.720022 17829 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:02:21.720028 17829 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:02:21.720034 17829 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:02:21.720052 17829 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:02:21.720086 17829 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:02:21.720095 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.720100 17829 net.cpp:165] Memory required for data: 386561500\nI0817 16:02:21.720105 17829 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:02:21.720116 17829 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:02:21.720122 17829 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:02:21.720129 17829 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:02:21.720137 17829 net.cpp:150] Setting up L1_b4_relu\nI0817 16:02:21.720144 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.720149 17829 net.cpp:165] Memory required for data: 394753500\nI0817 16:02:21.720154 17829 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:21.720160 17829 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:21.720165 17829 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:02:21.720172 17829 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:02:21.720181 17829 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:02:21.720229 17829 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:02:21.720240 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.720247 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.720252 17829 net.cpp:165] Memory required for data: 411137500\nI0817 16:02:21.720257 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:02:21.720270 17829 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:02:21.720276 17829 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:02:21.720285 17829 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:02:21.720649 17829 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:02:21.720664 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.720669 17829 net.cpp:165] Memory required for data: 419329500\nI0817 16:02:21.720691 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:02:21.720703 17829 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:02:21.720710 17829 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:02:21.720721 17829 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:02:21.720991 17829 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:02:21.721004 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.721009 17829 net.cpp:165] Memory required for data: 427521500\nI0817 16:02:21.721019 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:02:21.721027 17829 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:02:21.721034 17829 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:02:21.721041 17829 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.721124 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:02:21.721287 17829 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:02:21.721299 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.721304 17829 net.cpp:165] Memory required for data: 435713500\nI0817 16:02:21.721313 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:02:21.721321 17829 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:02:21.721328 17829 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:02:21.721338 17829 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.721349 17829 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:02:21.721355 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.721359 17829 net.cpp:165] Memory required for data: 443905500\nI0817 16:02:21.721364 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:02:21.721385 17829 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:02:21.721390 17829 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:02:21.721400 17829 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:02:21.721753 17829 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:02:21.721768 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.721773 17829 net.cpp:165] Memory required for data: 452097500\nI0817 16:02:21.721781 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:02:21.721793 17829 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:02:21.721801 17829 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:02:21.721808 17829 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:02:21.722082 17829 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:02:21.722098 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.722105 17829 net.cpp:165] Memory required for data: 460289500\nI0817 16:02:21.722115 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:02:21.722122 17829 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:02:21.722128 17829 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:02:21.722136 17829 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:02:21.722193 17829 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:02:21.722350 17829 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:02:21.722363 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.722369 17829 net.cpp:165] Memory required for data: 468481500\nI0817 16:02:21.722378 17829 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:02:21.722389 17829 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:02:21.722395 17829 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:02:21.722403 17829 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:02:21.722417 17829 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:02:21.722452 17829 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:02:21.722461 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.722466 17829 net.cpp:165] Memory required for data: 476673500\nI0817 16:02:21.722471 17829 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:02:21.722483 17829 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:02:21.722489 17829 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:02:21.722496 17829 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:02:21.722506 17829 net.cpp:150] Setting up L1_b5_relu\nI0817 16:02:21.722512 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.722517 17829 net.cpp:165] Memory required for data: 484865500\nI0817 16:02:21.722522 17829 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:21.722528 17829 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:21.722534 17829 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:02:21.722542 17829 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:02:21.722550 17829 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:02:21.722602 17829 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:02:21.722615 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.722621 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.722626 17829 net.cpp:165] Memory required for data: 501249500\nI0817 16:02:21.722631 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:02:21.722645 17829 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:02:21.722651 17829 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:02:21.722659 17829 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:02:21.723031 17829 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:02:21.723045 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.723057 17829 net.cpp:165] Memory required for data: 509441500\nI0817 16:02:21.723067 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:02:21.723078 17829 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:02:21.723084 17829 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:02:21.723093 17829 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:02:21.723371 17829 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:02:21.723384 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.723390 17829 net.cpp:165] Memory required for data: 517633500\nI0817 16:02:21.723400 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:02:21.723408 17829 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:02:21.723420 17829 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:02:21.723428 17829 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.723489 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:02:21.723649 17829 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:02:21.723662 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.723667 17829 net.cpp:165] Memory required for data: 525825500\nI0817 16:02:21.723676 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:02:21.723683 17829 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:02:21.723690 17829 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:02:21.723700 17829 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.723709 17829 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:02:21.723716 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.723721 17829 net.cpp:165] Memory required for data: 534017500\nI0817 16:02:21.723726 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:02:21.723736 17829 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:02:21.723740 17829 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:02:21.723752 17829 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:02:21.724100 17829 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:02:21.724114 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.724119 17829 net.cpp:165] Memory required for data: 542209500\nI0817 16:02:21.724128 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:02:21.724136 17829 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:02:21.724143 17829 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:02:21.724153 17829 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:02:21.724436 17829 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:02:21.724452 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.724458 17829 net.cpp:165] Memory required for data: 550401500\nI0817 16:02:21.724468 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:02:21.724476 17829 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:02:21.724483 17829 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:02:21.724489 17829 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:02:21.724547 17829 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:02:21.724721 17829 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:02:21.724736 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.724741 17829 net.cpp:165] Memory required for data: 558593500\nI0817 16:02:21.724750 17829 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:02:21.724771 17829 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:02:21.724778 17829 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:02:21.724786 17829 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:02:21.724793 17829 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:02:21.724833 17829 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:02:21.724843 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.724848 17829 net.cpp:165] Memory required for data: 566785500\nI0817 16:02:21.724859 17829 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:02:21.724867 17829 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:02:21.724874 17829 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:02:21.724880 17829 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:02:21.724889 17829 net.cpp:150] Setting up L1_b6_relu\nI0817 16:02:21.724896 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.724900 17829 net.cpp:165] Memory required for data: 574977500\nI0817 16:02:21.724905 17829 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:21.724912 17829 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:21.724917 17829 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:02:21.724927 17829 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:02:21.724937 17829 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:02:21.724984 17829 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:02:21.724997 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.725003 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.725008 17829 net.cpp:165] Memory required for data: 591361500\nI0817 16:02:21.725013 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:02:21.725026 17829 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:02:21.725033 17829 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:02:21.725041 17829 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:02:21.725396 17829 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:02:21.725415 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.725421 17829 net.cpp:165] Memory required for data: 599553500\nI0817 16:02:21.725430 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:02:21.725442 17829 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:02:21.725448 17829 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:02:21.725458 17829 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:02:21.725735 17829 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:02:21.725749 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.725754 17829 net.cpp:165] Memory required for data: 607745500\nI0817 16:02:21.725764 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:02:21.725772 17829 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:02:21.725777 17829 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:02:21.725788 17829 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.725847 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:02:21.726011 17829 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:02:21.726027 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.726032 17829 net.cpp:165] Memory required for data: 615937500\nI0817 16:02:21.726040 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:02:21.726048 17829 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:02:21.726054 17829 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:02:21.726061 17829 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.726070 17829 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:02:21.726078 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.726083 17829 net.cpp:165] Memory required for data: 624129500\nI0817 16:02:21.726086 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:02:21.726099 17829 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:02:21.726105 17829 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:02:21.726116 17829 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:02:21.726482 17829 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:02:21.726496 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.726507 17829 net.cpp:165] Memory required for data: 632321500\nI0817 16:02:21.726517 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:02:21.726560 17829 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:02:21.726570 17829 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:02:21.726583 17829 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:02:21.726857 17829 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:02:21.726871 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.726876 17829 net.cpp:165] Memory required for data: 640513500\nI0817 16:02:21.726886 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:02:21.726894 17829 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:02:21.726899 17829 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:02:21.726907 17829 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:02:21.726969 17829 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:02:21.727128 17829 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:02:21.727141 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.727146 17829 net.cpp:165] Memory required for data: 648705500\nI0817 16:02:21.727155 17829 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:02:21.727167 17829 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:02:21.727174 17829 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:02:21.727180 17829 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:02:21.727187 17829 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:02:21.727224 17829 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:02:21.727236 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.727241 17829 net.cpp:165] Memory required for data: 656897500\nI0817 16:02:21.727246 17829 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:02:21.727253 17829 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:02:21.727259 17829 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:02:21.727269 17829 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:02:21.727278 17829 net.cpp:150] Setting up L1_b7_relu\nI0817 16:02:21.727285 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.727289 17829 net.cpp:165] Memory required for data: 665089500\nI0817 16:02:21.727294 17829 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:21.727300 17829 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:21.727305 17829 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:02:21.727315 17829 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:02:21.727325 17829 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:02:21.727373 17829 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:02:21.727385 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.727391 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.727396 17829 net.cpp:165] Memory required for data: 681473500\nI0817 16:02:21.727401 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:02:21.727419 17829 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:02:21.727427 17829 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:02:21.727437 17829 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:02:21.727798 17829 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:02:21.727813 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.727818 17829 net.cpp:165] Memory required for data: 689665500\nI0817 16:02:21.727826 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:02:21.727838 17829 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:02:21.727843 17829 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:02:21.727861 17829 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:02:21.728140 17829 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:02:21.728153 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.728158 17829 net.cpp:165] Memory required for data: 697857500\nI0817 16:02:21.728168 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:02:21.728178 17829 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:02:21.728183 17829 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:02:21.728190 17829 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.728250 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:02:21.728418 17829 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:02:21.728432 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.728437 17829 net.cpp:165] Memory required for data: 706049500\nI0817 16:02:21.728446 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:02:21.728457 17829 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:02:21.728463 17829 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:02:21.728471 17829 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.728480 17829 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:02:21.728487 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.728492 17829 net.cpp:165] Memory required for data: 714241500\nI0817 16:02:21.728497 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:02:21.728509 17829 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:02:21.728515 17829 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:02:21.728526 17829 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:02:21.728883 17829 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:02:21.728896 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.728901 17829 net.cpp:165] Memory required for data: 722433500\nI0817 16:02:21.728910 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:02:21.728922 17829 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:02:21.728929 17829 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:02:21.728937 17829 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:02:21.729218 17829 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:02:21.729233 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.729238 17829 net.cpp:165] Memory required for data: 730625500\nI0817 16:02:21.729248 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:02:21.729255 17829 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:02:21.729261 17829 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:02:21.729269 17829 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:02:21.729329 17829 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:02:21.729497 17829 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:02:21.729511 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.729516 17829 net.cpp:165] Memory required for data: 738817500\nI0817 16:02:21.729526 17829 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:02:21.729534 17829 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:02:21.729540 17829 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:02:21.729547 17829 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:02:21.729557 17829 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:02:21.729593 17829 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:02:21.729604 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.729609 17829 net.cpp:165] Memory required for data: 747009500\nI0817 16:02:21.729614 17829 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:02:21.729624 17829 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:02:21.729629 17829 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:02:21.729636 17829 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:02:21.729652 17829 net.cpp:150] Setting up L1_b8_relu\nI0817 16:02:21.729660 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.729665 17829 net.cpp:165] Memory required for data: 755201500\nI0817 16:02:21.729668 17829 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:21.729676 17829 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:21.729681 17829 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:02:21.729687 17829 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:02:21.729697 17829 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:02:21.729748 17829 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:02:21.729759 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.729766 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.729770 17829 net.cpp:165] Memory required for data: 771585500\nI0817 16:02:21.729775 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:02:21.729789 17829 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:02:21.729795 17829 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:02:21.729804 17829 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:02:21.730165 17829 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:02:21.730182 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.730187 17829 net.cpp:165] Memory required for data: 779777500\nI0817 16:02:21.730196 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:02:21.730206 17829 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:02:21.730211 17829 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:02:21.730219 17829 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:02:21.730509 17829 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:02:21.730522 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.730526 17829 net.cpp:165] Memory required for data: 787969500\nI0817 16:02:21.730537 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:02:21.730548 17829 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:02:21.730554 17829 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:02:21.730562 17829 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.730623 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:02:21.730788 17829 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:02:21.730801 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.730806 17829 net.cpp:165] Memory required for data: 796161500\nI0817 16:02:21.730815 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:02:21.730823 17829 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:02:21.730829 17829 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:02:21.730839 17829 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.730849 17829 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:02:21.730856 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.730861 17829 net.cpp:165] Memory required for data: 804353500\nI0817 16:02:21.730865 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:02:21.730880 17829 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:02:21.730885 17829 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:02:21.730893 17829 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:02:21.731243 17829 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:02:21.731257 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.731262 17829 net.cpp:165] Memory required for data: 812545500\nI0817 16:02:21.731271 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:02:21.731282 17829 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:02:21.731288 17829 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:02:21.731303 17829 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:02:21.731585 17829 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:02:21.731600 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.731604 17829 net.cpp:165] Memory required for data: 820737500\nI0817 16:02:21.731637 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:02:21.731647 17829 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:02:21.731654 17829 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:02:21.731662 17829 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:02:21.731722 17829 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:02:21.731880 17829 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:02:21.731894 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.731899 17829 net.cpp:165] Memory required for data: 828929500\nI0817 16:02:21.731906 17829 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:02:21.731915 17829 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:02:21.731921 17829 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:02:21.731928 17829 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:02:21.731935 17829 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:02:21.731972 17829 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:02:21.731982 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.731987 17829 net.cpp:165] Memory required for data: 837121500\nI0817 16:02:21.731992 17829 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:02:21.731999 17829 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:02:21.732005 17829 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:02:21.732014 17829 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:02:21.732024 17829 net.cpp:150] Setting up L1_b9_relu\nI0817 16:02:21.732031 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.732035 17829 net.cpp:165] Memory required for data: 845313500\nI0817 16:02:21.732040 17829 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:21.732048 17829 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:21.732053 17829 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:02:21.732061 17829 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:02:21.732071 17829 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:02:21.732118 17829 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:02:21.732130 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.732136 17829 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:02:21.732141 17829 net.cpp:165] Memory required for data: 861697500\nI0817 16:02:21.732146 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:02:21.732159 17829 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:02:21.732165 17829 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:02:21.732174 17829 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:02:21.732549 17829 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:02:21.732563 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.732569 17829 net.cpp:165] Memory required for data: 863745500\nI0817 16:02:21.732578 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:02:21.732589 17829 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:02:21.732595 17829 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:02:21.732604 17829 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:02:21.732872 17829 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:02:21.732884 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.732889 17829 net.cpp:165] Memory required for data: 865793500\nI0817 16:02:21.732899 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:02:21.732914 17829 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:02:21.732921 17829 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:02:21.732931 17829 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.732991 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:02:21.733150 17829 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:02:21.733163 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.733168 17829 net.cpp:165] Memory required for data: 867841500\nI0817 16:02:21.733177 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:02:21.733184 17829 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:02:21.733191 17829 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:02:21.733197 17829 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.733207 17829 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:02:21.733213 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.733218 17829 net.cpp:165] Memory required for data: 869889500\nI0817 16:02:21.733223 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:02:21.733237 17829 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:02:21.733242 17829 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:02:21.733253 17829 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:02:21.733614 17829 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:02:21.733629 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.733634 17829 net.cpp:165] Memory required for data: 871937500\nI0817 16:02:21.733642 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:02:21.733654 17829 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:02:21.733661 17829 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:02:21.733671 17829 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:02:21.733937 17829 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:02:21.733950 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.733955 17829 net.cpp:165] Memory required for data: 873985500\nI0817 16:02:21.733965 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:02:21.733973 17829 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:02:21.733980 17829 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:02:21.733988 17829 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:02:21.734048 17829 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:02:21.734206 17829 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:02:21.734222 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.734227 17829 net.cpp:165] Memory required for data: 876033500\nI0817 16:02:21.734236 17829 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:02:21.734246 17829 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:02:21.734251 17829 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:02:21.734259 17829 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:02:21.734292 17829 net.cpp:150] Setting up L2_b1_pool\nI0817 16:02:21.734302 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.734308 17829 net.cpp:165] Memory required for data: 878081500\nI0817 16:02:21.734313 17829 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:02:21.734320 17829 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:02:21.734326 17829 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:02:21.734333 17829 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:02:21.734344 17829 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:02:21.734376 17829 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:02:21.734387 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.734392 17829 net.cpp:165] Memory required for data: 880129500\nI0817 16:02:21.734397 17829 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:02:21.734407 17829 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:02:21.734426 17829 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:02:21.734433 17829 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:02:21.734443 17829 net.cpp:150] Setting up L2_b1_relu\nI0817 16:02:21.734450 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.734454 17829 net.cpp:165] Memory required for data: 882177500\nI0817 16:02:21.734459 17829 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:02:21.734468 17829 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:02:21.734479 17829 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:02:21.736704 17829 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:02:21.736723 17829 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:02:21.736728 17829 net.cpp:165] Memory required for data: 884225500\nI0817 16:02:21.736734 17829 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:02:21.736743 17829 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:02:21.736750 17829 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:02:21.736757 17829 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:02:21.736768 17829 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:02:21.736811 17829 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:02:21.736826 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.736831 17829 net.cpp:165] Memory required for data: 888321500\nI0817 16:02:21.736837 17829 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:21.736845 17829 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:21.736850 17829 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:02:21.736860 17829 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:02:21.736871 17829 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:02:21.736920 17829 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:02:21.736932 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.736937 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.736943 17829 net.cpp:165] Memory required for data: 896513500\nI0817 16:02:21.736948 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:02:21.736961 17829 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:02:21.736968 17829 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:02:21.736979 17829 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:02:21.737491 17829 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:02:21.737506 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.737511 17829 net.cpp:165] Memory required for data: 900609500\nI0817 16:02:21.737520 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:02:21.737532 17829 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:02:21.737538 17829 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:02:21.737547 17829 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:02:21.737821 17829 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:02:21.737834 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.737839 17829 net.cpp:165] Memory required for data: 904705500\nI0817 16:02:21.737849 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:02:21.737860 17829 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:02:21.737867 17829 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:02:21.737874 17829 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.737933 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:02:21.738091 17829 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:02:21.738104 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.738109 17829 net.cpp:165] Memory required for data: 908801500\nI0817 16:02:21.738117 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:02:21.738128 17829 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:02:21.738142 17829 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:02:21.738152 17829 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.738162 17829 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:02:21.738169 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.738174 17829 net.cpp:165] Memory required for data: 912897500\nI0817 16:02:21.738179 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:02:21.738189 17829 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:02:21.738195 17829 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:02:21.738206 17829 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:02:21.738713 17829 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:02:21.738726 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.738731 17829 net.cpp:165] Memory required for data: 916993500\nI0817 16:02:21.738740 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:02:21.738749 17829 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:02:21.738755 17829 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:02:21.738766 17829 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:02:21.739032 17829 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:02:21.739044 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.739049 17829 net.cpp:165] Memory required for data: 921089500\nI0817 16:02:21.739059 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:02:21.739071 17829 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:02:21.739078 17829 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:02:21.739085 17829 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:02:21.739142 17829 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:02:21.739303 17829 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:02:21.739316 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.739321 17829 net.cpp:165] Memory required for data: 925185500\nI0817 16:02:21.739329 17829 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:02:21.739341 17829 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:02:21.739348 17829 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:02:21.739357 17829 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:02:21.739364 17829 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:02:21.739395 17829 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:02:21.739404 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.739409 17829 net.cpp:165] Memory required for data: 929281500\nI0817 16:02:21.739420 17829 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:02:21.739428 17829 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:02:21.739434 17829 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:02:21.739444 17829 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:02:21.739454 17829 net.cpp:150] Setting up L2_b2_relu\nI0817 16:02:21.739460 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.739465 17829 net.cpp:165] Memory required for data: 933377500\nI0817 16:02:21.739470 17829 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:21.739476 17829 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:21.739481 17829 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:02:21.739488 17829 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:02:21.739498 17829 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:02:21.739547 17829 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:02:21.739559 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.739567 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.739570 17829 net.cpp:165] Memory required for data: 941569500\nI0817 16:02:21.739583 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:02:21.739593 17829 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:02:21.739599 17829 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:02:21.739611 17829 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:02:21.740108 17829 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:02:21.740123 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.740128 17829 net.cpp:165] Memory required for data: 945665500\nI0817 16:02:21.740136 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:02:21.740145 17829 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:02:21.740151 17829 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:02:21.740162 17829 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:02:21.740437 17829 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:02:21.740450 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.740455 17829 net.cpp:165] Memory required for data: 949761500\nI0817 16:02:21.740465 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:02:21.740476 17829 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:02:21.740483 17829 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:02:21.740491 17829 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.740548 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:02:21.740710 17829 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:02:21.740722 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.740727 17829 net.cpp:165] Memory required for data: 953857500\nI0817 16:02:21.740736 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:02:21.740747 17829 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:02:21.740753 17829 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:02:21.740761 17829 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.740770 17829 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:02:21.740777 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.740782 17829 net.cpp:165] Memory required for data: 957953500\nI0817 16:02:21.740787 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:02:21.740799 17829 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:02:21.740805 17829 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:02:21.740818 17829 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:02:21.741309 17829 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:02:21.741323 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.741328 17829 net.cpp:165] Memory required for data: 962049500\nI0817 16:02:21.741338 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:02:21.741346 17829 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:02:21.741353 17829 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:02:21.741366 17829 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:02:21.741643 17829 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:02:21.741657 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.741662 17829 net.cpp:165] Memory required for data: 966145500\nI0817 16:02:21.741673 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:02:21.741684 17829 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:02:21.741690 17829 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:02:21.741698 17829 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:02:21.741756 17829 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:02:21.741914 17829 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:02:21.741926 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.741931 17829 net.cpp:165] Memory required for data: 970241500\nI0817 16:02:21.741940 17829 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:02:21.741951 17829 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:02:21.741964 17829 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:02:21.741971 17829 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:02:21.741979 17829 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:02:21.742012 17829 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:02:21.742023 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.742027 17829 net.cpp:165] Memory required for data: 974337500\nI0817 16:02:21.742033 17829 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:02:21.742053 17829 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:02:21.742059 17829 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:02:21.742067 17829 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:02:21.742076 17829 net.cpp:150] Setting up L2_b3_relu\nI0817 16:02:21.742084 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.742087 17829 net.cpp:165] Memory required for data: 978433500\nI0817 16:02:21.742092 17829 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:21.742102 17829 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:21.742107 17829 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:02:21.742115 17829 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:02:21.742125 17829 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:02:21.742173 17829 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:02:21.742188 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.742195 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.742200 17829 net.cpp:165] Memory required for data: 986625500\nI0817 16:02:21.742205 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:02:21.742215 17829 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:02:21.742223 17829 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:02:21.742230 17829 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:02:21.742734 17829 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:02:21.742748 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.742754 17829 net.cpp:165] Memory required for data: 990721500\nI0817 16:02:21.742763 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:02:21.742774 17829 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:02:21.742781 17829 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:02:21.742789 17829 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:02:21.743063 17829 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:02:21.743077 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.743083 17829 net.cpp:165] Memory required for data: 994817500\nI0817 16:02:21.743093 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:02:21.743100 17829 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:02:21.743106 17829 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:02:21.743116 17829 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.743177 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:02:21.743336 17829 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:02:21.743350 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.743355 17829 net.cpp:165] Memory required for data: 998913500\nI0817 16:02:21.743363 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:02:21.743371 17829 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:02:21.743377 17829 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:02:21.743389 17829 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.743399 17829 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:02:21.743407 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.743415 17829 net.cpp:165] Memory required for data: 1003009500\nI0817 16:02:21.743428 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:02:21.743443 17829 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:02:21.743448 17829 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:02:21.743458 17829 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:02:21.743949 17829 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:02:21.743963 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.743968 17829 net.cpp:165] Memory required for data: 1007105500\nI0817 16:02:21.743976 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:02:21.743988 17829 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:02:21.743994 17829 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:02:21.744004 17829 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:02:21.744267 17829 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:02:21.744280 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.744285 17829 net.cpp:165] Memory required for data: 1011201500\nI0817 16:02:21.744295 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:02:21.744304 17829 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:02:21.744310 17829 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:02:21.744318 17829 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:02:21.744379 17829 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:02:21.744546 17829 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:02:21.744561 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.744566 17829 net.cpp:165] Memory required for data: 1015297500\nI0817 16:02:21.744575 17829 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:02:21.744585 17829 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:02:21.744590 17829 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:02:21.744597 17829 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:02:21.744607 17829 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:02:21.744635 17829 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:02:21.744644 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.744649 17829 net.cpp:165] Memory required for data: 1019393500\nI0817 16:02:21.744654 17829 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:02:21.744662 17829 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:02:21.744668 17829 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:02:21.744681 17829 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:02:21.744690 17829 net.cpp:150] Setting up L2_b4_relu\nI0817 16:02:21.744698 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.744702 17829 net.cpp:165] Memory required for data: 1023489500\nI0817 16:02:21.744706 17829 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:21.744714 17829 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:21.744719 17829 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:02:21.744729 17829 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:02:21.744738 17829 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:02:21.744786 17829 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:02:21.744797 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.744803 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.744808 17829 net.cpp:165] Memory required for data: 1031681500\nI0817 16:02:21.744813 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:02:21.744827 17829 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:02:21.744833 17829 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:02:21.744843 17829 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:02:21.745348 17829 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:02:21.745362 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.745368 17829 net.cpp:165] Memory required for data: 1035777500\nI0817 16:02:21.745378 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:02:21.745389 17829 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:02:21.745395 17829 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:02:21.745404 17829 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:02:21.745676 17829 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:02:21.745690 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.745695 17829 net.cpp:165] Memory required for data: 1039873500\nI0817 16:02:21.745705 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:02:21.745714 17829 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:02:21.745720 17829 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:02:21.745730 17829 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.745790 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:02:21.745950 17829 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:02:21.745964 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.745968 17829 net.cpp:165] Memory required for data: 1043969500\nI0817 16:02:21.745976 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:02:21.745985 17829 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:02:21.745990 17829 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:02:21.746001 17829 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.746011 17829 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:02:21.746017 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.746021 17829 net.cpp:165] Memory required for data: 1048065500\nI0817 16:02:21.746026 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:02:21.746039 17829 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:02:21.746045 17829 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:02:21.746054 17829 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:02:21.746551 17829 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:02:21.746564 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.746569 17829 net.cpp:165] Memory required for data: 1052161500\nI0817 16:02:21.746578 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:02:21.746587 17829 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:02:21.746593 17829 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:02:21.746604 17829 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:02:21.746873 17829 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:02:21.746887 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.746891 17829 net.cpp:165] Memory required for data: 1056257500\nI0817 16:02:21.746902 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:02:21.746911 17829 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:02:21.746917 17829 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:02:21.746924 17829 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:02:21.746984 17829 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:02:21.747139 17829 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:02:21.747154 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.747160 17829 net.cpp:165] Memory required for data: 1060353500\nI0817 16:02:21.747169 17829 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:02:21.747177 17829 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:02:21.747184 17829 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:02:21.747190 17829 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:02:21.747198 17829 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:02:21.747228 17829 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:02:21.747243 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.747248 17829 net.cpp:165] Memory required for data: 1064449500\nI0817 16:02:21.747254 17829 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:02:21.747262 17829 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:02:21.747267 17829 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:02:21.747277 17829 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:02:21.747287 17829 net.cpp:150] Setting up L2_b5_relu\nI0817 16:02:21.747293 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.747298 17829 net.cpp:165] Memory required for data: 1068545500\nI0817 16:02:21.747303 17829 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:21.747309 17829 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:21.747314 17829 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:02:21.747324 17829 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:02:21.747334 17829 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:02:21.747381 17829 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:02:21.747393 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.747400 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.747404 17829 net.cpp:165] Memory required for data: 1076737500\nI0817 16:02:21.747409 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:02:21.747428 17829 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:02:21.747436 17829 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:02:21.747445 17829 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:02:21.747943 17829 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:02:21.747957 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.747962 17829 net.cpp:165] Memory required for data: 1080833500\nI0817 16:02:21.747972 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:02:21.747982 17829 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:02:21.747989 17829 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:02:21.747997 17829 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:02:21.748257 17829 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:02:21.748270 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.748275 17829 net.cpp:165] Memory required for data: 1084929500\nI0817 16:02:21.748286 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:02:21.748294 17829 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:02:21.748301 17829 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:02:21.748311 17829 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.748373 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:02:21.748536 17829 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:02:21.748550 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.748555 17829 net.cpp:165] Memory required for data: 1089025500\nI0817 16:02:21.748564 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:02:21.748571 17829 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:02:21.748577 17829 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:02:21.748584 17829 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.748594 17829 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:02:21.748601 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.748605 17829 net.cpp:165] Memory required for data: 1093121500\nI0817 16:02:21.748610 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:02:21.748623 17829 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:02:21.748630 17829 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:02:21.748641 17829 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:02:21.749140 17829 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:02:21.749155 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.749159 17829 net.cpp:165] Memory required for data: 1097217500\nI0817 16:02:21.749167 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:02:21.749179 17829 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:02:21.749186 17829 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:02:21.749197 17829 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:02:21.749469 17829 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:02:21.749481 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.749486 17829 net.cpp:165] Memory required for data: 1101313500\nI0817 16:02:21.749496 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:02:21.749505 17829 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:02:21.749511 17829 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:02:21.749518 17829 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:02:21.749578 17829 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:02:21.749732 17829 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:02:21.749745 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.749750 17829 net.cpp:165] Memory required for data: 1105409500\nI0817 16:02:21.749759 17829 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:02:21.749770 17829 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:02:21.749778 17829 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:02:21.749783 17829 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:02:21.749791 17829 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:02:21.749821 17829 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:02:21.749831 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.749835 17829 net.cpp:165] Memory required for data: 1109505500\nI0817 16:02:21.749841 17829 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:02:21.749848 17829 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:02:21.749855 17829 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:02:21.749864 17829 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:02:21.749874 17829 net.cpp:150] Setting up L2_b6_relu\nI0817 16:02:21.749881 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.749886 17829 net.cpp:165] Memory required for data: 1113601500\nI0817 16:02:21.749891 17829 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:21.749897 17829 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:21.749902 17829 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:02:21.749912 17829 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:02:21.749922 17829 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:02:21.749969 17829 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:02:21.749979 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.749986 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.749991 17829 net.cpp:165] Memory required for data: 1121793500\nI0817 16:02:21.749995 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:02:21.750012 17829 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:02:21.750020 17829 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:02:21.750028 17829 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:02:21.751502 17829 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:02:21.751519 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.751524 17829 net.cpp:165] Memory required for data: 1125889500\nI0817 16:02:21.751533 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:02:21.751554 17829 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:02:21.751560 17829 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:02:21.751569 17829 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:02:21.751838 17829 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:02:21.751852 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.751857 17829 net.cpp:165] Memory required for data: 1129985500\nI0817 16:02:21.751868 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:02:21.751878 17829 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:02:21.751885 17829 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:02:21.751893 17829 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.751953 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:02:21.752113 17829 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:02:21.752126 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.752131 17829 net.cpp:165] Memory required for data: 1134081500\nI0817 16:02:21.752141 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:02:21.752152 17829 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:02:21.752158 17829 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:02:21.752171 17829 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.752182 17829 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:02:21.752188 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.752192 17829 net.cpp:165] Memory required for data: 1138177500\nI0817 16:02:21.752197 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:02:21.752208 17829 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:02:21.752214 17829 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:02:21.752225 17829 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:02:21.752717 17829 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:02:21.752732 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.752737 17829 net.cpp:165] Memory required for data: 1142273500\nI0817 16:02:21.752745 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:02:21.752754 17829 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:02:21.752761 17829 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:02:21.752773 17829 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:02:21.753039 17829 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:02:21.753052 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.753057 17829 net.cpp:165] Memory required for data: 1146369500\nI0817 16:02:21.753067 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:02:21.753078 17829 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:02:21.753085 17829 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:02:21.753093 17829 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:02:21.753149 17829 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:02:21.753304 17829 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:02:21.753316 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.753321 17829 net.cpp:165] Memory required for data: 1150465500\nI0817 16:02:21.753330 17829 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:02:21.753341 17829 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:02:21.753348 17829 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:02:21.753355 17829 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:02:21.753363 17829 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:02:21.753394 17829 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:02:21.753403 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.753408 17829 net.cpp:165] Memory required for data: 1154561500\nI0817 16:02:21.753419 17829 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:02:21.753427 17829 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:02:21.753439 17829 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:02:21.753450 17829 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:02:21.753460 17829 net.cpp:150] Setting up L2_b7_relu\nI0817 16:02:21.753468 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.753473 17829 net.cpp:165] Memory required for data: 1158657500\nI0817 16:02:21.753478 17829 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:21.753484 17829 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:21.753489 17829 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:02:21.753496 17829 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:02:21.753505 17829 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:02:21.753558 17829 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:02:21.753571 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.753577 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.753582 17829 net.cpp:165] Memory required for data: 1166849500\nI0817 16:02:21.753587 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:02:21.753597 17829 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:02:21.753604 17829 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:02:21.753615 17829 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:02:21.754103 17829 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:02:21.754117 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.754122 17829 net.cpp:165] Memory required for data: 1170945500\nI0817 16:02:21.754130 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:02:21.754139 17829 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:02:21.754146 17829 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:02:21.754156 17829 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:02:21.754436 17829 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:02:21.754451 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.754456 17829 net.cpp:165] Memory required for data: 1175041500\nI0817 16:02:21.754465 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:02:21.754477 17829 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:02:21.754483 17829 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:02:21.754490 17829 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.754550 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:02:21.754706 17829 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:02:21.754719 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.754724 17829 net.cpp:165] Memory required for data: 1179137500\nI0817 16:02:21.754734 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:02:21.754745 17829 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:02:21.754750 17829 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:02:21.754757 17829 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.754766 17829 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:02:21.754776 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.754781 17829 net.cpp:165] Memory required for data: 1183233500\nI0817 16:02:21.754786 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:02:21.754797 17829 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:02:21.754803 17829 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:02:21.754814 17829 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:02:21.755297 17829 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:02:21.755311 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.755316 17829 net.cpp:165] Memory required for data: 1187329500\nI0817 16:02:21.755324 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:02:21.755340 17829 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:02:21.755347 17829 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:02:21.755357 17829 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:02:21.755637 17829 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:02:21.755650 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.755656 17829 net.cpp:165] Memory required for data: 1191425500\nI0817 16:02:21.755666 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:02:21.755677 17829 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:02:21.755683 17829 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:02:21.755690 17829 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:02:21.755748 17829 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:02:21.755913 17829 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:02:21.755925 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.755930 17829 net.cpp:165] Memory required for data: 1195521500\nI0817 16:02:21.755940 17829 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:02:21.755951 17829 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:02:21.755959 17829 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:02:21.755964 17829 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:02:21.755972 17829 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:02:21.756006 17829 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:02:21.756014 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.756019 17829 net.cpp:165] Memory required for data: 1199617500\nI0817 16:02:21.756024 17829 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:02:21.756031 17829 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:02:21.756037 17829 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:02:21.756047 17829 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:02:21.756057 17829 net.cpp:150] Setting up L2_b8_relu\nI0817 16:02:21.756063 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.756068 17829 net.cpp:165] Memory required for data: 1203713500\nI0817 16:02:21.756072 17829 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:21.756079 17829 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:21.756084 17829 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:02:21.756091 17829 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:02:21.756114 17829 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:02:21.756167 17829 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:02:21.756182 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.756189 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.756194 17829 net.cpp:165] Memory required for data: 1211905500\nI0817 16:02:21.756199 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:02:21.756211 17829 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:02:21.756217 17829 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:02:21.756228 17829 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:02:21.756731 17829 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:02:21.756747 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.756752 17829 net.cpp:165] Memory required for data: 1216001500\nI0817 16:02:21.756760 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:02:21.756769 17829 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:02:21.756775 17829 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:02:21.756786 17829 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:02:21.757062 17829 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:02:21.757082 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.757087 17829 net.cpp:165] Memory required for data: 1220097500\nI0817 16:02:21.757097 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:02:21.757108 17829 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:02:21.757115 17829 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:02:21.757123 17829 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.757181 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:02:21.757341 17829 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:02:21.757354 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.757359 17829 net.cpp:165] Memory required for data: 1224193500\nI0817 16:02:21.757369 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:02:21.757377 17829 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:02:21.757383 17829 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:02:21.757393 17829 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.757403 17829 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:02:21.757410 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.757419 17829 net.cpp:165] Memory required for data: 1228289500\nI0817 16:02:21.757424 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:02:21.757438 17829 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:02:21.757444 17829 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:02:21.757458 17829 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:02:21.758940 17829 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:02:21.758958 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.758963 17829 net.cpp:165] Memory required for data: 1232385500\nI0817 16:02:21.758972 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:02:21.758985 17829 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:02:21.758992 17829 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:02:21.759001 17829 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:02:21.759268 17829 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:02:21.759280 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.759284 17829 net.cpp:165] Memory required for data: 1236481500\nI0817 16:02:21.759333 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:02:21.759346 17829 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:02:21.759356 17829 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:02:21.759363 17829 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:02:21.759428 17829 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:02:21.759587 17829 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:02:21.759599 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.759604 17829 net.cpp:165] Memory required for data: 1240577500\nI0817 16:02:21.759613 17829 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:02:21.759623 17829 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:02:21.759629 17829 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:02:21.759635 17829 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:02:21.759646 17829 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:02:21.759675 17829 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:02:21.759683 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.759688 17829 net.cpp:165] Memory required for data: 1244673500\nI0817 16:02:21.759693 17829 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:02:21.759706 17829 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:02:21.759712 17829 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:02:21.759719 17829 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:02:21.759729 17829 net.cpp:150] Setting up L2_b9_relu\nI0817 16:02:21.759737 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.759748 17829 net.cpp:165] Memory required for data: 1248769500\nI0817 16:02:21.759753 17829 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:21.759762 17829 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:21.759766 17829 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:02:21.759776 17829 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:02:21.759786 17829 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:02:21.759835 17829 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:02:21.759848 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.759855 17829 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:02:21.759860 17829 net.cpp:165] Memory required for data: 1256961500\nI0817 16:02:21.759865 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:02:21.759876 17829 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:02:21.759882 17829 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:02:21.759891 17829 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:02:21.760388 17829 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:02:21.760402 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.760408 17829 net.cpp:165] Memory required for data: 1257985500\nI0817 16:02:21.760422 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:02:21.760434 17829 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:02:21.760442 17829 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:02:21.760449 17829 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:02:21.760727 17829 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:02:21.760740 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.760746 17829 net.cpp:165] Memory required for data: 1259009500\nI0817 16:02:21.760756 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:02:21.760767 17829 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:02:21.760774 17829 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:02:21.760782 17829 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.760841 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:02:21.761006 17829 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:02:21.761019 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.761024 17829 net.cpp:165] Memory required for data: 1260033500\nI0817 16:02:21.761032 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:02:21.761044 17829 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:02:21.761050 17829 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:02:21.761060 17829 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:02:21.761070 17829 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:02:21.761076 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.761081 17829 net.cpp:165] Memory required for data: 1261057500\nI0817 16:02:21.761086 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:02:21.761096 17829 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:02:21.761102 17829 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:02:21.761113 17829 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:02:21.761612 17829 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:02:21.761627 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.761632 17829 net.cpp:165] Memory required for data: 1262081500\nI0817 16:02:21.761641 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:02:21.761652 17829 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:02:21.761659 17829 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:02:21.761667 17829 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:02:21.761950 17829 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:02:21.761970 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.761975 17829 net.cpp:165] Memory required for data: 1263105500\nI0817 16:02:21.761984 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:02:21.761993 17829 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:02:21.761999 17829 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:02:21.762007 17829 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:02:21.762066 17829 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:02:21.762231 17829 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:02:21.762246 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.762251 17829 net.cpp:165] Memory required for data: 1264129500\nI0817 16:02:21.762260 17829 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:02:21.762270 17829 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:02:21.762276 17829 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:02:21.762285 17829 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:02:21.762323 17829 net.cpp:150] Setting up L3_b1_pool\nI0817 16:02:21.762333 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.762338 17829 net.cpp:165] Memory required for data: 1265153500\nI0817 16:02:21.762343 17829 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:02:21.762351 17829 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:02:21.762357 17829 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:02:21.762364 17829 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:02:21.762372 17829 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:02:21.762408 17829 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:02:21.762424 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.762429 17829 net.cpp:165] Memory required for data: 1266177500\nI0817 16:02:21.762434 17829 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:02:21.762442 17829 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:02:21.762449 17829 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:02:21.762455 17829 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:02:21.762465 17829 net.cpp:150] Setting up L3_b1_relu\nI0817 16:02:21.762471 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.762476 17829 net.cpp:165] Memory required for data: 1267201500\nI0817 16:02:21.762481 17829 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:02:21.762497 17829 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:02:21.762509 17829 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:02:21.763753 17829 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:02:21.763772 17829 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:02:21.763777 17829 net.cpp:165] Memory required for data: 1268225500\nI0817 16:02:21.763782 17829 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:02:21.763794 17829 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:02:21.763801 17829 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:02:21.763808 17829 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:02:21.763815 17829 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:02:21.763861 17829 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:02:21.763873 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.763878 17829 net.cpp:165] Memory required for data: 1270273500\nI0817 16:02:21.763883 17829 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:21.763890 17829 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:21.763896 17829 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:02:21.763907 17829 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:02:21.763917 17829 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:02:21.763978 17829 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:02:21.763989 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.764003 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.764008 17829 net.cpp:165] Memory required for data: 1274369500\nI0817 16:02:21.764014 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:02:21.764029 17829 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:02:21.764035 17829 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:02:21.764045 17829 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:02:21.765099 17829 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:02:21.765113 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.765118 17829 net.cpp:165] Memory required for data: 1276417500\nI0817 16:02:21.765127 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:02:21.765141 17829 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:02:21.765146 17829 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:02:21.765158 17829 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:02:21.765437 17829 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:02:21.765450 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.765455 17829 net.cpp:165] Memory required for data: 1278465500\nI0817 16:02:21.765466 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:02:21.765475 17829 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:02:21.765481 17829 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:02:21.765489 17829 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.765552 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:02:21.765712 17829 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:02:21.765725 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.765730 17829 net.cpp:165] Memory required for data: 1280513500\nI0817 16:02:21.765739 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:02:21.765746 17829 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:02:21.765753 17829 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:02:21.765763 17829 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:02:21.765774 17829 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:02:21.765779 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.765784 17829 net.cpp:165] Memory required for data: 1282561500\nI0817 16:02:21.765789 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:02:21.765803 17829 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:02:21.765810 17829 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:02:21.765817 17829 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:02:21.766866 17829 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:02:21.766881 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.766886 17829 net.cpp:165] Memory required for data: 1284609500\nI0817 16:02:21.766896 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:02:21.766907 17829 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:02:21.766913 17829 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:02:21.766922 17829 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:02:21.767199 17829 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:02:21.767212 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.767217 17829 net.cpp:165] Memory required for data: 1286657500\nI0817 16:02:21.767227 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:02:21.767238 17829 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:02:21.767246 17829 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:02:21.767253 17829 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:02:21.767314 17829 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:02:21.767482 17829 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:02:21.767503 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.767508 17829 net.cpp:165] Memory required for data: 1288705500\nI0817 16:02:21.767524 17829 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:02:21.767536 17829 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:02:21.767544 17829 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:02:21.767551 17829 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:02:21.767561 17829 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:02:21.767596 17829 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:02:21.767604 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.767608 17829 net.cpp:165] Memory required for data: 1290753500\nI0817 16:02:21.767613 17829 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:02:21.767624 17829 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:02:21.767630 17829 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:02:21.767637 17829 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:02:21.767647 17829 net.cpp:150] Setting up L3_b2_relu\nI0817 16:02:21.767653 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.767657 17829 net.cpp:165] Memory required for data: 1292801500\nI0817 16:02:21.767663 17829 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:21.767669 17829 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:21.767674 17829 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:02:21.767683 17829 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:02:21.767691 17829 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:02:21.767740 17829 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:02:21.767752 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.767758 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.767762 17829 net.cpp:165] Memory required for data: 1296897500\nI0817 16:02:21.767767 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:02:21.767781 17829 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:02:21.767788 17829 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:02:21.767797 17829 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:02:21.768841 17829 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:02:21.768857 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.768862 17829 net.cpp:165] Memory required for data: 1298945500\nI0817 16:02:21.768870 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:02:21.768882 17829 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:02:21.768889 17829 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:02:21.768900 17829 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:02:21.769168 17829 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:02:21.769181 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.769186 17829 net.cpp:165] Memory required for data: 1300993500\nI0817 16:02:21.769196 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:02:21.769204 17829 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:02:21.769210 17829 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:02:21.769220 17829 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.769279 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:02:21.769449 17829 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:02:21.769464 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.769469 17829 net.cpp:165] Memory required for data: 1303041500\nI0817 16:02:21.769477 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:02:21.769485 17829 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:02:21.769491 17829 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:02:21.769501 17829 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:02:21.769511 17829 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:02:21.769525 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.769529 17829 net.cpp:165] Memory required for data: 1305089500\nI0817 16:02:21.769534 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:02:21.769549 17829 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:02:21.769556 17829 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:02:21.769564 17829 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:02:21.770614 17829 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:02:21.770629 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.770634 17829 net.cpp:165] Memory required for data: 1307137500\nI0817 16:02:21.770642 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:02:21.770654 17829 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:02:21.770661 17829 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:02:21.770669 17829 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:02:21.770941 17829 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:02:21.770954 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.770959 17829 net.cpp:165] Memory required for data: 1309185500\nI0817 16:02:21.770969 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:02:21.770982 17829 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:02:21.770987 17829 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:02:21.770999 17829 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:02:21.771057 17829 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:02:21.771217 17829 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:02:21.771229 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.771234 17829 net.cpp:165] Memory required for data: 1311233500\nI0817 16:02:21.771244 17829 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:02:21.771252 17829 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:02:21.771258 17829 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:02:21.771265 17829 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:02:21.771276 17829 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:02:21.771309 17829 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:02:21.771318 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.771323 17829 net.cpp:165] Memory required for data: 1313281500\nI0817 16:02:21.771328 17829 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:02:21.771339 17829 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:02:21.771345 17829 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:02:21.771353 17829 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:02:21.771361 17829 net.cpp:150] Setting up L3_b3_relu\nI0817 16:02:21.771368 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.771373 17829 net.cpp:165] Memory required for data: 1315329500\nI0817 16:02:21.771378 17829 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:21.771385 17829 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:21.771391 17829 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:02:21.771399 17829 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:02:21.771409 17829 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:02:21.771464 17829 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:02:21.771476 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.771483 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.771488 17829 net.cpp:165] Memory required for data: 1319425500\nI0817 16:02:21.771493 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:02:21.771509 17829 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:02:21.771517 17829 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:02:21.771533 17829 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:02:21.772579 17829 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:02:21.772594 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.772599 17829 net.cpp:165] Memory required for data: 1321473500\nI0817 16:02:21.772608 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:02:21.772620 17829 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:02:21.772626 17829 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:02:21.772637 17829 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:02:21.772904 17829 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:02:21.772917 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.772922 17829 net.cpp:165] Memory required for data: 1323521500\nI0817 16:02:21.772933 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:02:21.772940 17829 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:02:21.772948 17829 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:02:21.772958 17829 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.773015 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:02:21.773177 17829 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:02:21.773191 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.773195 17829 net.cpp:165] Memory required for data: 1325569500\nI0817 16:02:21.773205 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:02:21.773212 17829 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:02:21.773218 17829 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:02:21.773228 17829 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:02:21.773238 17829 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:02:21.773246 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.773250 17829 net.cpp:165] Memory required for data: 1327617500\nI0817 16:02:21.773254 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:02:21.773268 17829 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:02:21.773275 17829 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:02:21.773283 17829 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:02:21.775310 17829 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:02:21.775327 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.775332 17829 net.cpp:165] Memory required for data: 1329665500\nI0817 16:02:21.775341 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:02:21.775354 17829 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:02:21.775362 17829 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:02:21.775373 17829 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:02:21.775651 17829 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:02:21.775665 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.775669 17829 net.cpp:165] Memory required for data: 1331713500\nI0817 16:02:21.775679 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:02:21.775689 17829 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:02:21.775696 17829 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:02:21.775707 17829 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:02:21.775765 17829 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:02:21.775928 17829 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:02:21.775940 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.775945 17829 net.cpp:165] Memory required for data: 1333761500\nI0817 16:02:21.775954 17829 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:02:21.775964 17829 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:02:21.775969 17829 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:02:21.775976 17829 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:02:21.775987 17829 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:02:21.776032 17829 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:02:21.776042 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.776047 17829 net.cpp:165] Memory required for data: 1335809500\nI0817 16:02:21.776052 17829 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:02:21.776060 17829 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:02:21.776067 17829 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:02:21.776077 17829 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:02:21.776087 17829 net.cpp:150] Setting up L3_b4_relu\nI0817 16:02:21.776093 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.776098 17829 net.cpp:165] Memory required for data: 1337857500\nI0817 16:02:21.776103 17829 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:21.776109 17829 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:21.776114 17829 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:02:21.776123 17829 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:02:21.776132 17829 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:02:21.776181 17829 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:02:21.776193 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.776199 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.776204 17829 net.cpp:165] Memory required for data: 1341953500\nI0817 16:02:21.776208 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:02:21.776219 17829 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:02:21.776226 17829 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:02:21.776237 17829 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:02:21.777274 17829 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:02:21.777289 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.777294 17829 net.cpp:165] Memory required for data: 1344001500\nI0817 16:02:21.777303 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:02:21.777312 17829 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:02:21.777319 17829 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:02:21.777331 17829 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:02:21.777616 17829 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:02:21.777633 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.777638 17829 net.cpp:165] Memory required for data: 1346049500\nI0817 16:02:21.777648 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:02:21.777657 17829 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:02:21.777664 17829 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:02:21.777671 17829 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.777732 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:02:21.777890 17829 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:02:21.777902 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.777907 17829 net.cpp:165] Memory required for data: 1348097500\nI0817 16:02:21.777916 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:02:21.777927 17829 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:02:21.777935 17829 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:02:21.777941 17829 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:02:21.777951 17829 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:02:21.777957 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.777962 17829 net.cpp:165] Memory required for data: 1350145500\nI0817 16:02:21.777966 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:02:21.777981 17829 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:02:21.777987 17829 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:02:21.778002 17829 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:02:21.779034 17829 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:02:21.779049 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.779054 17829 net.cpp:165] Memory required for data: 1352193500\nI0817 16:02:21.779063 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:02:21.779075 17829 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:02:21.779081 17829 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:02:21.779093 17829 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:02:21.779357 17829 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:02:21.779371 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.779376 17829 net.cpp:165] Memory required for data: 1354241500\nI0817 16:02:21.779386 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:02:21.779393 17829 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:02:21.779400 17829 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:02:21.779410 17829 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:02:21.779475 17829 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:02:21.779640 17829 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:02:21.779654 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.779659 17829 net.cpp:165] Memory required for data: 1356289500\nI0817 16:02:21.779667 17829 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:02:21.779676 17829 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:02:21.779683 17829 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:02:21.779692 17829 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:02:21.779701 17829 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:02:21.779737 17829 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:02:21.779749 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.779754 17829 net.cpp:165] Memory required for data: 1358337500\nI0817 16:02:21.779759 17829 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:02:21.779767 17829 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:02:21.779772 17829 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:02:21.779783 17829 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:02:21.779791 17829 net.cpp:150] Setting up L3_b5_relu\nI0817 16:02:21.779798 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.779803 17829 net.cpp:165] Memory required for data: 1360385500\nI0817 16:02:21.779808 17829 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:21.779814 17829 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:21.779820 17829 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:02:21.779827 17829 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:02:21.779836 17829 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:02:21.779886 17829 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:02:21.779897 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.779904 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.779908 17829 net.cpp:165] Memory required for data: 1364481500\nI0817 16:02:21.779913 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:02:21.779924 17829 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:02:21.779932 17829 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:02:21.779942 17829 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:02:21.780977 17829 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:02:21.780992 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.780997 17829 net.cpp:165] Memory required for data: 1366529500\nI0817 16:02:21.781011 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:02:21.781021 17829 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:02:21.781028 17829 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:02:21.781039 17829 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:02:21.781311 17829 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:02:21.781324 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.781329 17829 net.cpp:165] Memory required for data: 1368577500\nI0817 16:02:21.781339 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:02:21.781347 17829 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:02:21.781353 17829 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:02:21.781361 17829 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.781427 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:02:21.781589 17829 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:02:21.781605 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.781610 17829 net.cpp:165] Memory required for data: 1370625500\nI0817 16:02:21.781620 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:02:21.781627 17829 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:02:21.781633 17829 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:02:21.781641 17829 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:02:21.781649 17829 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:02:21.781656 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.781661 17829 net.cpp:165] Memory required for data: 1372673500\nI0817 16:02:21.781666 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:02:21.781679 17829 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:02:21.781685 17829 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:02:21.781694 17829 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:02:21.782724 17829 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:02:21.782739 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.782744 17829 net.cpp:165] Memory required for data: 1374721500\nI0817 16:02:21.782753 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:02:21.782768 17829 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:02:21.782774 17829 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:02:21.782785 17829 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:02:21.783054 17829 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:02:21.783067 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.783072 17829 net.cpp:165] Memory required for data: 1376769500\nI0817 16:02:21.783082 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:02:21.783092 17829 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:02:21.783097 17829 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:02:21.783108 17829 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:02:21.783166 17829 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:02:21.783326 17829 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:02:21.783339 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.783344 17829 net.cpp:165] Memory required for data: 1378817500\nI0817 16:02:21.783354 17829 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:02:21.783365 17829 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:02:21.783372 17829 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:02:21.783380 17829 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:02:21.783387 17829 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:02:21.783429 17829 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:02:21.783442 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.783447 17829 net.cpp:165] Memory required for data: 1380865500\nI0817 16:02:21.783452 17829 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:02:21.783459 17829 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:02:21.783473 17829 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:02:21.783483 17829 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:02:21.783493 17829 net.cpp:150] Setting up L3_b6_relu\nI0817 16:02:21.783499 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.783504 17829 net.cpp:165] Memory required for data: 1382913500\nI0817 16:02:21.783509 17829 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:21.783515 17829 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:21.783521 17829 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:02:21.783529 17829 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:02:21.783538 17829 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:02:21.783591 17829 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:02:21.783603 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.783609 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.783614 17829 net.cpp:165] Memory required for data: 1387009500\nI0817 16:02:21.783618 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:02:21.783629 17829 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:02:21.783637 17829 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:02:21.783648 17829 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:02:21.784682 17829 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:02:21.784696 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.784701 17829 net.cpp:165] Memory required for data: 1389057500\nI0817 16:02:21.784710 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:02:21.784723 17829 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:02:21.784729 17829 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:02:21.784737 17829 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:02:21.785013 17829 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:02:21.785027 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.785030 17829 net.cpp:165] Memory required for data: 1391105500\nI0817 16:02:21.785040 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:02:21.785049 17829 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:02:21.785056 17829 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:02:21.785063 17829 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.785123 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:02:21.785281 17829 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:02:21.785297 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.785302 17829 net.cpp:165] Memory required for data: 1393153500\nI0817 16:02:21.785311 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:02:21.785346 17829 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:02:21.785354 17829 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:02:21.785363 17829 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:02:21.785373 17829 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:02:21.785380 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.785384 17829 net.cpp:165] Memory required for data: 1395201500\nI0817 16:02:21.785390 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:02:21.785406 17829 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:02:21.785418 17829 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:02:21.785429 17829 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:02:21.786471 17829 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:02:21.786485 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.786490 17829 net.cpp:165] Memory required for data: 1397249500\nI0817 16:02:21.786499 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:02:21.786515 17829 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:02:21.786522 17829 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:02:21.786533 17829 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:02:21.786808 17829 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:02:21.786820 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.786825 17829 net.cpp:165] Memory required for data: 1399297500\nI0817 16:02:21.786836 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:02:21.786844 17829 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:02:21.786850 17829 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:02:21.786859 17829 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:02:21.786923 17829 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:02:21.787084 17829 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:02:21.787099 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.787104 17829 net.cpp:165] Memory required for data: 1401345500\nI0817 16:02:21.787113 17829 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:02:21.787122 17829 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:02:21.787128 17829 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:02:21.787135 17829 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:02:21.787142 17829 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:02:21.787179 17829 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:02:21.787190 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.787195 17829 net.cpp:165] Memory required for data: 1403393500\nI0817 16:02:21.787199 17829 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:02:21.787207 17829 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:02:21.787214 17829 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:02:21.787220 17829 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:02:21.787230 17829 net.cpp:150] Setting up L3_b7_relu\nI0817 16:02:21.787236 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.787240 17829 net.cpp:165] Memory required for data: 1405441500\nI0817 16:02:21.787245 17829 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:21.787251 17829 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:21.787257 17829 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:02:21.787267 17829 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:02:21.787276 17829 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:02:21.787323 17829 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:02:21.787334 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.787341 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.787346 17829 net.cpp:165] Memory required for data: 1409537500\nI0817 16:02:21.787351 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:02:21.787365 17829 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:02:21.787372 17829 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:02:21.787381 17829 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:02:21.789424 17829 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:02:21.789441 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.789448 17829 net.cpp:165] Memory required for data: 1411585500\nI0817 16:02:21.789456 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:02:21.789469 17829 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:02:21.789476 17829 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:02:21.789485 17829 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:02:21.789759 17829 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:02:21.789783 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.789788 17829 net.cpp:165] Memory required for data: 1413633500\nI0817 16:02:21.789798 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:02:21.789808 17829 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:02:21.789813 17829 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:02:21.789821 17829 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.789885 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:02:21.790048 17829 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:02:21.790061 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.790066 17829 net.cpp:165] Memory required for data: 1415681500\nI0817 16:02:21.790076 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:02:21.790083 17829 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:02:21.790089 17829 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:02:21.790100 17829 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:02:21.790109 17829 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:02:21.790117 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.790122 17829 net.cpp:165] Memory required for data: 1417729500\nI0817 16:02:21.790125 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:02:21.790139 17829 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:02:21.790145 17829 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:02:21.790154 17829 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:02:21.791188 17829 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:02:21.791203 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.791208 17829 net.cpp:165] Memory required for data: 1419777500\nI0817 16:02:21.791218 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:02:21.791229 17829 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:02:21.791236 17829 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:02:21.791244 17829 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:02:21.791528 17829 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:02:21.791543 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.791548 17829 net.cpp:165] Memory required for data: 1421825500\nI0817 16:02:21.791558 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:02:21.791568 17829 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:02:21.791575 17829 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:02:21.791582 17829 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:02:21.791645 17829 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:02:21.791808 17829 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:02:21.791821 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.791826 17829 net.cpp:165] Memory required for data: 1423873500\nI0817 16:02:21.791836 17829 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:02:21.791848 17829 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:02:21.791856 17829 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:02:21.791862 17829 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:02:21.791877 17829 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:02:21.791910 17829 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:02:21.791921 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.791926 17829 net.cpp:165] Memory required for data: 1425921500\nI0817 16:02:21.791930 17829 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:02:21.791944 17829 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:02:21.791949 17829 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:02:21.791957 17829 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:02:21.791966 17829 net.cpp:150] Setting up L3_b8_relu\nI0817 16:02:21.791973 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.791977 17829 net.cpp:165] Memory required for data: 1427969500\nI0817 16:02:21.791990 17829 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:21.791997 17829 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:21.792002 17829 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:02:21.792009 17829 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:02:21.792019 17829 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:02:21.792069 17829 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:02:21.792080 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.792088 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.792091 17829 net.cpp:165] Memory required for data: 1432065500\nI0817 16:02:21.792096 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:02:21.792110 17829 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:02:21.792117 17829 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:02:21.792126 17829 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:02:21.793157 17829 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:02:21.793172 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.793177 17829 net.cpp:165] Memory required for data: 1434113500\nI0817 16:02:21.793186 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:02:21.793198 17829 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:02:21.793205 17829 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:02:21.793216 17829 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:02:21.793496 17829 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:02:21.793510 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.793515 17829 net.cpp:165] Memory required for data: 1436161500\nI0817 16:02:21.793525 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:02:21.793534 17829 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:02:21.793540 17829 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:02:21.793548 17829 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.793612 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:02:21.793774 17829 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:02:21.793787 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.793792 17829 net.cpp:165] Memory required for data: 1438209500\nI0817 16:02:21.793800 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:02:21.793808 17829 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:02:21.793815 17829 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:02:21.793825 17829 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:02:21.793835 17829 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:02:21.793841 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.793846 17829 net.cpp:165] Memory required for data: 1440257500\nI0817 16:02:21.793850 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:02:21.793864 17829 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:02:21.793870 17829 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:02:21.793879 17829 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:02:21.794915 17829 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:02:21.794929 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.794934 17829 net.cpp:165] Memory required for data: 1442305500\nI0817 16:02:21.794944 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:02:21.794962 17829 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:02:21.794970 17829 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:02:21.794977 17829 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:02:21.795254 17829 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:02:21.795267 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.795279 17829 net.cpp:165] Memory required for data: 1444353500\nI0817 16:02:21.795289 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:02:21.795301 17829 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:02:21.795307 17829 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:02:21.795315 17829 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:02:21.795377 17829 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:02:21.795545 17829 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:02:21.795559 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.795564 17829 net.cpp:165] Memory required for data: 1446401500\nI0817 16:02:21.795572 17829 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:02:21.795585 17829 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:02:21.795591 17829 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:02:21.795598 17829 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:02:21.795608 17829 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:02:21.795642 17829 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:02:21.795653 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.795658 17829 net.cpp:165] Memory required for data: 1448449500\nI0817 16:02:21.795665 17829 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:02:21.795675 17829 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:02:21.795681 17829 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:02:21.795688 17829 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:02:21.795698 17829 net.cpp:150] Setting up L3_b9_relu\nI0817 16:02:21.795704 17829 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:02:21.795709 17829 net.cpp:165] Memory required for data: 1450497500\nI0817 16:02:21.795713 17829 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:02:21.795722 17829 net.cpp:100] Creating Layer post_pool\nI0817 16:02:21.795727 17829 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:02:21.795735 17829 net.cpp:408] post_pool -> post_pool\nI0817 16:02:21.795773 17829 net.cpp:150] Setting up post_pool\nI0817 16:02:21.795785 17829 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:02:21.795789 17829 net.cpp:165] Memory required for data: 1450529500\nI0817 16:02:21.795794 17829 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:02:21.795805 17829 net.cpp:100] Creating Layer post_FC\nI0817 16:02:21.795811 17829 net.cpp:434] post_FC <- post_pool\nI0817 16:02:21.795820 17829 net.cpp:408] post_FC -> post_FC_top\nI0817 16:02:21.795982 17829 net.cpp:150] Setting up post_FC\nI0817 16:02:21.795996 17829 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:21.796000 17829 net.cpp:165] Memory required for data: 1450534500\nI0817 16:02:21.796010 17829 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:02:21.796017 17829 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:02:21.796023 17829 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:02:21.796034 17829 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:02:21.796044 17829 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:02:21.796092 17829 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:02:21.796103 17829 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:21.796109 17829 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:02:21.796113 17829 net.cpp:165] Memory required for data: 1450544500\nI0817 16:02:21.796118 17829 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:02:21.796129 17829 net.cpp:100] Creating Layer accuracy\nI0817 16:02:21.796135 17829 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:02:21.796142 17829 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:02:21.796150 17829 net.cpp:408] accuracy -> accuracy\nI0817 16:02:21.796162 17829 net.cpp:150] Setting up accuracy\nI0817 16:02:21.796169 17829 net.cpp:157] Top shape: (1)\nI0817 16:02:21.796180 17829 net.cpp:165] Memory required for data: 1450544504\nI0817 16:02:21.796185 17829 layer_factory.hpp:77] Creating layer loss\nI0817 16:02:21.796193 17829 net.cpp:100] Creating Layer loss\nI0817 16:02:21.796198 17829 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:02:21.796205 17829 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:02:21.796212 17829 net.cpp:408] loss -> loss\nI0817 16:02:21.796224 17829 layer_factory.hpp:77] Creating layer loss\nI0817 16:02:21.796350 17829 net.cpp:150] Setting up loss\nI0817 16:02:21.796362 17829 net.cpp:157] Top shape: (1)\nI0817 16:02:21.796367 17829 net.cpp:160]     with loss weight 1\nI0817 16:02:21.796383 17829 net.cpp:165] Memory required for data: 1450544508\nI0817 16:02:21.796389 17829 net.cpp:226] loss needs backward computation.\nI0817 16:02:21.796396 17829 net.cpp:228] accuracy does not need backward computation.\nI0817 16:02:21.796401 17829 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:02:21.796406 17829 net.cpp:226] post_FC needs backward computation.\nI0817 16:02:21.796416 17829 net.cpp:226] post_pool needs backward computation.\nI0817 16:02:21.796422 17829 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:02:21.796427 17829 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:02:21.796432 17829 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:02:21.796437 17829 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:02:21.796442 17829 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:02:21.796447 17829 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:02:21.796452 17829 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:02:21.796456 17829 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:02:21.796461 17829 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:02:21.796466 17829 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:02:21.796471 17829 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:02:21.796476 17829 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:02:21.796483 17829 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:02:21.796486 17829 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:02:21.796491 17829 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:02:21.796496 17829 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:02:21.796501 17829 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:02:21.796506 17829 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:02:21.796511 17829 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:02:21.796516 17829 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:02:21.796521 17829 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:02:21.796526 17829 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:02:21.796531 17829 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:02:21.796536 17829 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:02:21.796541 17829 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:02:21.796546 17829 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:02:21.796551 17829 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:02:21.796556 17829 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:02:21.796561 17829 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:02:21.796566 17829 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:02:21.796571 17829 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:02:21.796576 17829 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:02:21.796581 17829 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:02:21.796586 17829 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:02:21.796598 17829 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:02:21.796603 17829 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:02:21.796609 17829 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:02:21.796613 17829 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:02:21.796619 17829 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:02:21.796624 17829 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:02:21.796629 17829 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:02:21.796634 17829 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:02:21.796640 17829 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:02:21.796645 17829 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:02:21.796650 17829 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:02:21.796655 17829 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:02:21.796660 17829 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:02:21.796665 17829 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:02:21.796674 17829 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:02:21.796679 17829 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:02:21.796684 17829 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:02:21.796689 17829 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:02:21.796695 17829 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:02:21.796700 17829 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:02:21.796705 17829 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:02:21.796710 17829 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:02:21.796715 17829 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:02:21.796720 17829 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:02:21.796725 17829 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:02:21.796731 17829 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:02:21.796736 17829 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:02:21.796741 17829 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:02:21.796747 17829 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:02:21.796752 17829 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:02:21.796757 17829 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:02:21.796762 17829 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:02:21.796767 17829 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:02:21.796772 17829 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:02:21.796777 17829 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:02:21.796782 17829 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:02:21.796787 17829 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:02:21.796792 17829 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:02:21.796798 17829 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:02:21.796803 17829 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:02:21.796809 17829 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:02:21.796814 17829 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:02:21.796818 17829 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:02:21.796823 17829 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:02:21.796828 17829 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:02:21.796834 17829 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:02:21.796839 17829 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:02:21.796850 17829 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:02:21.796856 17829 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:02:21.796861 17829 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:02:21.796867 17829 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:02:21.796872 17829 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:02:21.796877 17829 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:02:21.796882 17829 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:02:21.796887 17829 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:02:21.796892 17829 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:02:21.796897 17829 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:02:21.796902 17829 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:02:21.796907 17829 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:02:21.796913 17829 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:02:21.796918 17829 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:02:21.796923 17829 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:02:21.796932 17829 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:02:21.796938 17829 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:02:21.796943 17829 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:02:21.796949 17829 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:02:21.796954 17829 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:02:21.796959 17829 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:02:21.796965 17829 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:02:21.796970 17829 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:02:21.796975 17829 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:02:21.796982 17829 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:02:21.796986 17829 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:02:21.796991 17829 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:02:21.796996 17829 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:02:21.797001 17829 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:02:21.797006 17829 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:02:21.797011 17829 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:02:21.797018 17829 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:02:21.797022 17829 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:02:21.797027 17829 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:02:21.797034 17829 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:02:21.797039 17829 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:02:21.797044 17829 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:02:21.797049 17829 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:02:21.797055 17829 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:02:21.797060 17829 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:02:21.797065 17829 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:02:21.797070 17829 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:02:21.797075 17829 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:02:21.797080 17829 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:02:21.797086 17829 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:02:21.797091 17829 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:02:21.797096 17829 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:02:21.797101 17829 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:02:21.797114 17829 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:02:21.797121 17829 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:02:21.797127 17829 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:02:21.797132 17829 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:02:21.797137 17829 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:02:21.797142 17829 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:02:21.797148 17829 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:02:21.797153 17829 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:02:21.797158 17829 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:02:21.797163 17829 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:02:21.797168 17829 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:02:21.797173 17829 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:02:21.797179 17829 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:02:21.797184 17829 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:02:21.797190 17829 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:02:21.797195 17829 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:02:21.797201 17829 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:02:21.797206 17829 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:02:21.797212 17829 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:02:21.797217 17829 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:02:21.797222 17829 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:02:21.797228 17829 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:02:21.797233 17829 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:02:21.797240 17829 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:02:21.797245 17829 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:02:21.797250 17829 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:02:21.797256 17829 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:02:21.797261 17829 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:02:21.797267 17829 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:02:21.797272 17829 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:02:21.797277 17829 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:02:21.797283 17829 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:02:21.797288 17829 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:02:21.797293 17829 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:02:21.797299 17829 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:02:21.797304 17829 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:02:21.797314 17829 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:02:21.797320 17829 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:02:21.797327 17829 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:02:21.797332 17829 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:02:21.797338 17829 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:02:21.797343 17829 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:02:21.797348 17829 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:02:21.797353 17829 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:02:21.797359 17829 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:02:21.797366 17829 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:02:21.797371 17829 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:02:21.797381 17829 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:02:21.797389 17829 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:02:21.797394 17829 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:02:21.797399 17829 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:02:21.797405 17829 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:02:21.797415 17829 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:02:21.797422 17829 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:02:21.797428 17829 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:02:21.797433 17829 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:02:21.797441 17829 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:02:21.797446 17829 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:02:21.797451 17829 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:02:21.797457 17829 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:02:21.797463 17829 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:02:21.797468 17829 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:02:21.797474 17829 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:02:21.797479 17829 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:02:21.797484 17829 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:02:21.797490 17829 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:02:21.797495 17829 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:02:21.797502 17829 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:02:21.797508 17829 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:02:21.797513 17829 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:02:21.797519 17829 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:02:21.797524 17829 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:02:21.797530 17829 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:02:21.797535 17829 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:02:21.797541 17829 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:02:21.797546 17829 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:02:21.797552 17829 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:02:21.797559 17829 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:02:21.797564 17829 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:02:21.797569 17829 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:02:21.797575 17829 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:02:21.797580 17829 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:02:21.797586 17829 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:02:21.797591 17829 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:02:21.797597 17829 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:02:21.797602 17829 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:02:21.797608 17829 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:02:21.797614 17829 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:02:21.797619 17829 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:02:21.797626 17829 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:02:21.797631 17829 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:02:21.797637 17829 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:02:21.797642 17829 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:02:21.797648 17829 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:02:21.797658 17829 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:02:21.797664 17829 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:02:21.797672 17829 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:02:21.797677 17829 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:02:21.797683 17829 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:02:21.797688 17829 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:02:21.797694 17829 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:02:21.797699 17829 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:02:21.797705 17829 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:02:21.797710 17829 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:02:21.797716 17829 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:02:21.797721 17829 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:02:21.797727 17829 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:02:21.797732 17829 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:02:21.797739 17829 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:02:21.797744 17829 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:02:21.797750 17829 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:02:21.797755 17829 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:02:21.797761 17829 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:02:21.797766 17829 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:02:21.797772 17829 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:02:21.797777 17829 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:02:21.797783 17829 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:02:21.797790 17829 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:02:21.797794 17829 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:02:21.797801 17829 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:02:21.797806 17829 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:02:21.797811 17829 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:02:21.797817 17829 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:02:21.797822 17829 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:02:21.797828 17829 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:02:21.797834 17829 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:02:21.797839 17829 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:02:21.797845 17829 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:02:21.797852 17829 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:02:21.797857 17829 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:02:21.797863 17829 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:02:21.797868 17829 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:02:21.797873 17829 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:02:21.797879 17829 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:02:21.797884 17829 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:02:21.797890 17829 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:02:21.797896 17829 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:02:21.797901 17829 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:02:21.797907 17829 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:02:21.797914 17829 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:02:21.797919 17829 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:02:21.797930 17829 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:02:21.797937 17829 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:02:21.797942 17829 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:02:21.797948 17829 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:02:21.797955 17829 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:02:21.797960 17829 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:02:21.797966 17829 net.cpp:226] pre_relu needs backward computation.\nI0817 16:02:21.797971 17829 net.cpp:226] pre_scale needs backward computation.\nI0817 16:02:21.797976 17829 net.cpp:226] pre_bn needs backward computation.\nI0817 16:02:21.797981 17829 net.cpp:226] pre_conv needs backward computation.\nI0817 16:02:21.797987 17829 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:02:21.797994 17829 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:02:21.797998 17829 net.cpp:270] This network produces output accuracy\nI0817 16:02:21.798005 17829 net.cpp:270] This network produces output loss\nI0817 16:02:21.798336 17829 net.cpp:283] Network initialization done.\nI0817 16:02:21.799324 17829 solver.cpp:60] Solver scaffolding done.\nI0817 16:02:22.023690 17829 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:02:22.387996 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:22.388077 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:22.394901 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:22.614218 17829 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:22.614333 17829 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:22.648936 17829 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:22.649045 17829 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:23.102917 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:23.102994 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:23.110908 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:23.357570 17829 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:23.357786 17829 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:23.409548 17829 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:23.409683 17829 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:23.925034 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:23.925087 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:23.934058 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:24.200091 17829 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:24.200224 17829 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:24.271354 17829 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:24.271486 17829 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:24.355260 17829 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:02:24.841578 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:24.841631 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:24.851366 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:25.145584 17829 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:25.145740 17829 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:25.236116 17829 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:25.236268 17829 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:25.874912 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:25.874966 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:25.885078 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:26.200963 17829 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:26.201143 17829 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:26.312214 17829 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:26.312399 17829 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:27.023716 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:27.023794 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:27.035305 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:27.377117 17829 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:27.377370 17829 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:27.509719 17829 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:27.509956 17829 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:28.300889 17829 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:02:28.300945 17829 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:02:28.312692 17829 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:02:28.362449 17856 blocking_queue.cpp:50] Waiting for data\nI0817 16:02:28.416518 17847 blocking_queue.cpp:50] Waiting for data\nI0817 16:02:28.749238 17829 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:02:28.749482 17829 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:02:28.899540 17829 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:02:28.899768 17829 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:02:29.069835 17829 parallel.cpp:425] Starting Optimization\nI0817 16:02:29.071594 17829 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:02:29.071611 17829 solver.cpp:280] Learning Rate Policy: multistep\nI0817 16:02:29.076089 17829 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:03:49.251165 17829 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:03:49.251451 17829 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:03:53.255863 17829 solver.cpp:228] Iteration 0, loss = 4.85892\nI0817 16:03:53.255919 17829 solver.cpp:244]     Train net output #0: accuracy = 0.104\nI0817 16:03:53.255937 17829 solver.cpp:244]     Train net output #1: loss = 4.85892 (* 1 = 4.85892 loss)\nI0817 16:03:53.256150 17829 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0817 16:06:12.172926 17829 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:07:32.494432 17829 solver.cpp:404]     Test net output #0: accuracy = 0.17988\nI0817 16:07:32.494680 17829 solver.cpp:404]     Test net output #1: loss = 2.17815 (* 1 = 2.17815 loss)\nI0817 16:07:33.796882 17829 solver.cpp:228] Iteration 100, loss = 1.97224\nI0817 16:07:33.796928 17829 solver.cpp:244]     Train net output #0: accuracy = 0.28\nI0817 16:07:33.796946 17829 solver.cpp:244]     Train net output #1: loss = 1.97224 (* 1 = 1.97224 loss)\nI0817 16:07:33.906165 17829 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0817 16:09:52.714588 17829 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:11:13.036372 17829 solver.cpp:404]     Test net output #0: accuracy = 0.2892\nI0817 16:11:13.036629 17829 solver.cpp:404]     Test net output #1: loss = 2.01701 (* 1 = 2.01701 loss)\nI0817 16:11:14.340068 17829 solver.cpp:228] Iteration 200, loss = 1.61922\nI0817 16:11:14.340114 17829 solver.cpp:244]     Train net output #0: accuracy = 0.392\nI0817 16:11:14.340131 17829 solver.cpp:244]     Train net output #1: loss = 1.61922 (* 1 = 1.61922 loss)\nI0817 16:11:14.440620 17829 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0817 16:13:33.339319 17829 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:14:53.647728 17829 solver.cpp:404]     Test net output #0: accuracy = 0.31208\nI0817 16:14:53.647994 17829 solver.cpp:404]     Test net output #1: loss = 2.01424 (* 1 = 2.01424 loss)\nI0817 16:14:54.951272 17829 solver.cpp:228] Iteration 300, loss = 1.41787\nI0817 16:14:54.951316 17829 solver.cpp:244]     Train net output #0: accuracy = 0.408\nI0817 16:14:54.951333 17829 solver.cpp:244]     Train net output #1: loss = 1.41787 (* 1 = 1.41787 loss)\nI0817 16:14:55.059572 17829 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0817 16:17:13.897930 17829 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:18:34.190176 17829 solver.cpp:404]     Test net output #0: accuracy = 0.30048\nI0817 16:18:34.190444 17829 solver.cpp:404]     Test net output #1: loss = 2.20388 (* 1 = 2.20388 loss)\nI0817 16:18:35.492902 17829 solver.cpp:228] Iteration 400, loss = 1.22727\nI0817 16:18:35.492946 17829 solver.cpp:244]     Train net output #0: accuracy = 0.552\nI0817 16:18:35.492962 17829 solver.cpp:244]     Train net output #1: loss = 1.22727 (* 1 = 1.22727 loss)\nI0817 16:18:35.597072 17829 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0817 16:20:54.545042 17829 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:22:14.846256 17829 solver.cpp:404]     Test net output #0: accuracy = 0.28748\nI0817 16:22:14.846519 17829 solver.cpp:404]     Test net output #1: loss = 2.3712 (* 1 = 2.3712 loss)\nI0817 16:22:16.148259 17829 solver.cpp:228] Iteration 500, loss = 1.12893\nI0817 16:22:16.148303 17829 solver.cpp:244]     Train net output #0: accuracy = 0.576\nI0817 16:22:16.148320 17829 solver.cpp:244]     Train net output #1: loss = 1.12893 (* 1 = 1.12893 loss)\nI0817 16:22:16.256861 17829 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0817 16:24:35.048164 17829 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:25:55.338109 17829 solver.cpp:404]     Test net output #0: accuracy = 0.2836\nI0817 16:25:55.338387 17829 solver.cpp:404]     Test net output #1: loss = 2.37535 (* 1 = 2.37535 loss)\nI0817 16:25:56.640856 17829 solver.cpp:228] Iteration 600, loss = 1.1023\nI0817 16:25:56.640902 17829 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0817 16:25:56.640926 17829 solver.cpp:244]     Train net output #1: loss = 1.1023 (* 1 = 1.1023 loss)\nI0817 16:25:56.746098 17829 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0817 16:28:15.494271 17829 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:29:35.833281 17829 solver.cpp:404]     Test net output #0: accuracy = 0.27428\nI0817 16:29:35.833545 17829 solver.cpp:404]     Test net output #1: loss = 2.30069 (* 1 = 2.30069 loss)\nI0817 16:29:37.136664 17829 solver.cpp:228] Iteration 700, loss = 1.0211\nI0817 16:29:37.136714 17829 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0817 16:29:37.136739 17829 solver.cpp:244]     Train net output #1: loss = 1.0211 (* 1 = 1.0211 loss)\nI0817 16:29:37.238250 17829 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0817 16:31:56.053848 17829 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:33:16.388265 17829 solver.cpp:404]     Test net output #0: accuracy = 0.18596\nI0817 16:33:16.388519 17829 solver.cpp:404]     Test net output #1: loss = 3.12555 (* 1 = 3.12555 loss)\nI0817 16:33:17.691151 17829 solver.cpp:228] Iteration 800, loss = 0.828328\nI0817 16:33:17.691200 17829 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 16:33:17.691224 17829 solver.cpp:244]     Train net output #1: loss = 0.828327 (* 1 = 0.828327 loss)\nI0817 16:33:17.795881 17829 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0817 16:35:36.711650 17829 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:36:57.041482 17829 solver.cpp:404]     Test net output #0: accuracy = 0.179\nI0817 16:36:57.041762 17829 solver.cpp:404]     Test net output #1: loss = 3.3101 (* 1 = 3.3101 loss)\nI0817 16:36:58.344203 17829 solver.cpp:228] Iteration 900, loss = 0.783061\nI0817 16:36:58.344250 17829 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 16:36:58.344275 17829 solver.cpp:244]     Train net output #1: loss = 0.78306 (* 1 = 0.78306 loss)\nI0817 16:36:58.451681 17829 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0817 16:39:17.222349 17829 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:40:37.554857 17829 solver.cpp:404]     Test net output #0: accuracy = 0.19192\nI0817 16:40:37.555126 17829 solver.cpp:404]     Test net output #1: loss = 3.21605 (* 1 = 3.21605 loss)\nI0817 16:40:38.858029 17829 solver.cpp:228] Iteration 1000, loss = 0.738796\nI0817 16:40:38.858078 17829 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 16:40:38.858103 17829 solver.cpp:244]     Train net output #1: loss = 0.738796 (* 1 = 0.738796 loss)\nI0817 16:40:38.962047 17829 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0817 16:42:57.932775 17829 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:44:18.263689 17829 solver.cpp:404]     Test net output #0: accuracy = 0.11936\nI0817 16:44:18.263952 17829 solver.cpp:404]     Test net output #1: loss = 4.64284 (* 1 = 4.64284 loss)\nI0817 16:44:19.566365 17829 solver.cpp:228] Iteration 1100, loss = 0.768617\nI0817 16:44:19.566416 17829 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 16:44:19.566432 17829 solver.cpp:244]     Train net output #1: loss = 0.768617 (* 1 = 0.768617 loss)\nI0817 16:44:19.672957 17829 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0817 16:46:38.384878 17829 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:47:58.684130 17829 solver.cpp:404]     Test net output #0: accuracy = 0.1194\nI0817 16:47:58.684437 17829 solver.cpp:404]     Test net output #1: loss = 4.34838 (* 1 = 4.34838 loss)\nI0817 16:47:59.987627 17829 solver.cpp:228] Iteration 1200, loss = 0.724313\nI0817 16:47:59.987673 17829 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 16:47:59.987689 17829 solver.cpp:244]     Train net output #1: loss = 0.724312 (* 1 = 0.724312 loss)\nI0817 16:48:00.095199 17829 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0817 16:50:18.912303 17829 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 16:51:39.218552 17829 solver.cpp:404]     Test net output #0: accuracy = 0.13872\nI0817 16:51:39.218813 17829 solver.cpp:404]     Test net output #1: loss = 4.06466 (* 1 = 4.06466 loss)\nI0817 16:51:40.522251 17829 solver.cpp:228] Iteration 1300, loss = 0.685095\nI0817 16:51:40.522298 17829 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 16:51:40.522315 17829 solver.cpp:244]     Train net output #1: loss = 0.685095 (* 1 = 0.685095 loss)\nI0817 16:51:40.625152 17829 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0817 16:53:59.633318 17829 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 16:55:19.935943 17829 solver.cpp:404]     Test net output #0: accuracy = 0.12436\nI0817 16:55:19.936206 17829 solver.cpp:404]     Test net output #1: loss = 4.41228 (* 1 = 4.41228 loss)\nI0817 16:55:21.239915 17829 solver.cpp:228] Iteration 1400, loss = 0.6603\nI0817 16:55:21.239962 17829 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 16:55:21.239979 17829 solver.cpp:244]     Train net output #1: loss = 0.6603 (* 1 = 0.6603 loss)\nI0817 16:55:21.343576 17829 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0817 16:57:40.162389 17829 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 16:59:00.479436 17829 solver.cpp:404]     Test net output #0: accuracy = 0.16584\nI0817 16:59:00.479696 17829 solver.cpp:404]     Test net output #1: loss = 4.09862 (* 1 = 4.09862 loss)\nI0817 16:59:01.782810 17829 solver.cpp:228] Iteration 1500, loss = 0.686651\nI0817 16:59:01.782855 17829 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 16:59:01.782871 17829 solver.cpp:244]     Train net output #1: loss = 0.686651 (* 1 = 0.686651 loss)\nI0817 16:59:01.888854 17829 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0817 17:01:20.792505 17829 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:02:41.096349 17829 solver.cpp:404]     Test net output #0: accuracy = 0.14432\nI0817 17:02:41.096619 17829 solver.cpp:404]     Test net output #1: loss = 5.31804 (* 1 = 5.31804 loss)\nI0817 17:02:42.399700 17829 solver.cpp:228] Iteration 1600, loss = 0.710255\nI0817 17:02:42.399747 17829 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 17:02:42.399763 17829 solver.cpp:244]     Train net output #1: loss = 0.710254 (* 1 = 0.710254 loss)\nI0817 17:02:42.505110 17829 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0817 17:05:01.410907 17829 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:06:21.745723 17829 solver.cpp:404]     Test net output #0: accuracy = 0.13468\nI0817 17:06:21.745987 17829 solver.cpp:404]     Test net output #1: loss = 5.8177 (* 1 = 5.8177 loss)\nI0817 17:06:23.049648 17829 solver.cpp:228] Iteration 1700, loss = 0.590526\nI0817 17:06:23.049693 17829 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 17:06:23.049710 17829 solver.cpp:244]     Train net output #1: loss = 0.590525 (* 1 = 0.590525 loss)\nI0817 17:06:23.150008 17829 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0817 17:08:41.957795 17829 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:10:02.261333 17829 solver.cpp:404]     Test net output #0: accuracy = 0.16548\nI0817 17:10:02.261543 17829 solver.cpp:404]     Test net output #1: loss = 4.49939 (* 1 = 4.49939 loss)\nI0817 17:10:03.564563 17829 solver.cpp:228] Iteration 1800, loss = 0.553729\nI0817 17:10:03.564610 17829 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 17:10:03.564626 17829 solver.cpp:244]     Train net output #1: loss = 0.553728 (* 1 = 0.553728 loss)\nI0817 17:10:03.671319 17829 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0817 17:12:22.370821 17829 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:13:42.681138 17829 solver.cpp:404]     Test net output #0: accuracy = 0.18108\nI0817 17:13:42.681372 17829 solver.cpp:404]     Test net output #1: loss = 4.54788 (* 1 = 4.54788 loss)\nI0817 17:13:43.983239 17829 solver.cpp:228] Iteration 1900, loss = 0.570778\nI0817 17:13:43.983288 17829 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 17:13:43.983304 17829 solver.cpp:244]     Train net output #1: loss = 0.570777 (* 1 = 0.570777 loss)\nI0817 17:13:44.090422 17829 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0817 17:16:02.897274 17829 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:17:23.201506 17829 solver.cpp:404]     Test net output #0: accuracy = 0.17056\nI0817 17:17:23.201747 17829 solver.cpp:404]     Test net output #1: loss = 6.56118 (* 1 = 6.56118 loss)\nI0817 17:17:24.503751 17829 solver.cpp:228] Iteration 2000, loss = 0.610467\nI0817 17:17:24.503798 17829 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 17:17:24.503813 17829 solver.cpp:244]     Train net output #1: loss = 0.610467 (* 1 = 0.610467 loss)\nI0817 17:17:24.612097 17829 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0817 17:19:43.274220 17829 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:21:03.569365 17829 solver.cpp:404]     Test net output #0: accuracy = 0.17844\nI0817 17:21:03.569593 17829 solver.cpp:404]     Test net output #1: loss = 4.86971 (* 1 = 4.86971 loss)\nI0817 17:21:04.872655 17829 solver.cpp:228] Iteration 2100, loss = 0.503304\nI0817 17:21:04.872702 17829 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 17:21:04.872717 17829 solver.cpp:244]     Train net output #1: loss = 0.503303 (* 1 = 0.503303 loss)\nI0817 17:21:04.973956 17829 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0817 17:23:23.583714 17829 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:24:43.920605 17829 solver.cpp:404]     Test net output #0: accuracy = 0.21972\nI0817 17:24:43.920819 17829 solver.cpp:404]     Test net output #1: loss = 3.46564 (* 1 = 3.46564 loss)\nI0817 17:24:45.223302 17829 solver.cpp:228] Iteration 2200, loss = 0.384352\nI0817 17:24:45.223345 17829 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 17:24:45.223362 17829 solver.cpp:244]     Train net output #1: loss = 0.384352 (* 1 = 0.384352 loss)\nI0817 17:24:45.329524 17829 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0817 17:27:04.118862 17829 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:28:25.389024 17829 solver.cpp:404]     Test net output #0: accuracy = 0.21684\nI0817 17:28:25.389313 17829 solver.cpp:404]     Test net output #1: loss = 3.93457 (* 1 = 3.93457 loss)\nI0817 17:28:26.695720 17829 solver.cpp:228] Iteration 2300, loss = 0.520618\nI0817 17:28:26.695775 17829 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 17:28:26.695792 17829 solver.cpp:244]     Train net output #1: loss = 0.520617 (* 1 = 0.520617 loss)\nI0817 17:28:26.796078 17829 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0817 17:30:45.765592 17829 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:32:07.022914 17829 solver.cpp:404]     Test net output #0: accuracy = 0.19824\nI0817 17:32:07.023197 17829 solver.cpp:404]     Test net output #1: loss = 4.25558 (* 1 = 4.25558 loss)\nI0817 17:32:08.329087 17829 solver.cpp:228] Iteration 2400, loss = 0.544071\nI0817 17:32:08.329138 17829 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 17:32:08.329154 17829 solver.cpp:244]     Train net output #1: loss = 0.544071 (* 1 = 0.544071 loss)\nI0817 17:32:08.431242 17829 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0817 17:34:27.602025 17829 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:35:48.863371 17829 solver.cpp:404]     Test net output #0: accuracy = 0.20516\nI0817 17:35:48.863687 17829 solver.cpp:404]     Test net output #1: loss = 4.58226 (* 1 = 4.58226 loss)\nI0817 17:35:50.169286 17829 solver.cpp:228] Iteration 2500, loss = 0.438629\nI0817 17:35:50.169337 17829 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 17:35:50.169354 17829 solver.cpp:244]     Train net output #1: loss = 0.438629 (* 1 = 0.438629 loss)\nI0817 17:35:50.273447 17829 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0817 17:38:09.384534 17829 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:39:30.650888 17829 solver.cpp:404]     Test net output #0: accuracy = 0.2262\nI0817 17:39:30.651193 17829 solver.cpp:404]     Test net output #1: loss = 4.55332 (* 1 = 4.55332 loss)\nI0817 17:39:31.956630 17829 solver.cpp:228] Iteration 2600, loss = 0.360602\nI0817 17:39:31.956679 17829 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:39:31.956696 17829 solver.cpp:244]     Train net output #1: loss = 0.360601 (* 1 = 0.360601 loss)\nI0817 17:39:32.058377 17829 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0817 17:41:50.830492 17829 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:43:12.066849 17829 solver.cpp:404]     Test net output #0: accuracy = 0.2396\nI0817 17:43:12.067144 17829 solver.cpp:404]     Test net output #1: loss = 4.03254 (* 1 = 4.03254 loss)\nI0817 17:43:13.372979 17829 solver.cpp:228] Iteration 2700, loss = 0.455178\nI0817 17:43:13.373035 17829 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 17:43:13.373052 17829 solver.cpp:244]     Train net output #1: loss = 0.455177 (* 1 = 0.455177 loss)\nI0817 17:43:13.477712 17829 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0817 17:45:32.473275 17829 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:46:53.718894 17829 solver.cpp:404]     Test net output #0: accuracy = 0.23296\nI0817 17:46:53.719202 17829 solver.cpp:404]     Test net output #1: loss = 3.72893 (* 1 = 3.72893 loss)\nI0817 17:46:55.025255 17829 solver.cpp:228] Iteration 2800, loss = 0.311163\nI0817 17:46:55.025306 17829 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 17:46:55.025322 17829 solver.cpp:244]     Train net output #1: loss = 0.311162 (* 1 = 0.311162 loss)\nI0817 17:46:55.125195 17829 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0817 17:49:13.957660 17829 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:50:35.215018 17829 solver.cpp:404]     Test net output #0: accuracy = 0.26672\nI0817 17:50:35.215320 17829 solver.cpp:404]     Test net output #1: loss = 4.43039 (* 1 = 4.43039 loss)\nI0817 17:50:36.521419 17829 solver.cpp:228] Iteration 2900, loss = 0.402183\nI0817 17:50:36.521471 17829 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 17:50:36.521487 17829 solver.cpp:244]     Train net output #1: loss = 0.402182 (* 1 = 0.402182 loss)\nI0817 17:50:36.626085 17829 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0817 17:52:55.625213 17829 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 17:54:16.869134 17829 solver.cpp:404]     Test net output #0: accuracy = 0.2368\nI0817 17:54:16.869446 17829 solver.cpp:404]     Test net output #1: loss = 3.81319 (* 1 = 3.81319 loss)\nI0817 17:54:18.175084 17829 solver.cpp:228] Iteration 3000, loss = 0.310433\nI0817 17:54:18.175133 17829 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 17:54:18.175150 17829 solver.cpp:244]     Train net output #1: loss = 0.310432 (* 1 = 0.310432 loss)\nI0817 17:54:18.275535 17829 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0817 17:56:37.128937 17829 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 17:57:58.360484 17829 solver.cpp:404]     Test net output #0: accuracy = 0.32256\nI0817 17:57:58.360802 17829 solver.cpp:404]     Test net output #1: loss = 3.23464 (* 1 = 3.23464 loss)\nI0817 17:57:59.667102 17829 solver.cpp:228] Iteration 3100, loss = 0.386089\nI0817 17:57:59.667161 17829 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 17:57:59.667179 17829 solver.cpp:244]     Train net output #1: loss = 0.386088 (* 1 = 0.386088 loss)\nI0817 17:57:59.773514 17829 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0817 18:00:18.593542 17829 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:01:39.699535 17829 solver.cpp:404]     Test net output #0: accuracy = 0.29096\nI0817 18:01:39.699836 17829 solver.cpp:404]     Test net output #1: loss = 3.18062 (* 1 = 3.18062 loss)\nI0817 18:01:41.006085 17829 solver.cpp:228] Iteration 3200, loss = 0.33261\nI0817 18:01:41.006142 17829 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:01:41.006160 17829 solver.cpp:244]     Train net output #1: loss = 0.332609 (* 1 = 0.332609 loss)\nI0817 18:01:41.108397 17829 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0817 18:04:00.109339 17829 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:05:21.210780 17829 solver.cpp:404]     Test net output #0: accuracy = 0.27284\nI0817 18:05:21.211055 17829 solver.cpp:404]     Test net output #1: loss = 3.34695 (* 1 = 3.34695 loss)\nI0817 18:05:22.518520 17829 solver.cpp:228] Iteration 3300, loss = 0.287016\nI0817 18:05:22.518580 17829 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:05:22.518599 17829 solver.cpp:244]     Train net output #1: loss = 0.287016 (* 1 = 0.287016 loss)\nI0817 18:05:22.622122 17829 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0817 18:07:41.609232 17829 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:09:02.764767 17829 solver.cpp:404]     Test net output #0: accuracy = 0.34104\nI0817 18:09:02.765041 17829 solver.cpp:404]     Test net output #1: loss = 2.64227 (* 1 = 2.64227 loss)\nI0817 18:09:04.071626 17829 solver.cpp:228] Iteration 3400, loss = 0.282263\nI0817 18:09:04.071686 17829 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:09:04.071703 17829 solver.cpp:244]     Train net output #1: loss = 0.282263 (* 1 = 0.282263 loss)\nI0817 18:09:04.170274 17829 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0817 18:11:23.022768 17829 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:12:44.290052 17829 solver.cpp:404]     Test net output #0: accuracy = 0.4098\nI0817 18:12:44.290346 17829 solver.cpp:404]     Test net output #1: loss = 2.53543 (* 1 = 2.53543 loss)\nI0817 18:12:45.597975 17829 solver.cpp:228] Iteration 3500, loss = 0.291189\nI0817 18:12:45.598036 17829 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:12:45.598053 17829 solver.cpp:244]     Train net output #1: loss = 0.291189 (* 1 = 0.291189 loss)\nI0817 18:12:45.697713 17829 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0817 18:15:04.749238 17829 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:16:25.871155 17829 solver.cpp:404]     Test net output #0: accuracy = 0.253\nI0817 18:16:25.871454 17829 solver.cpp:404]     Test net output #1: loss = 4.21519 (* 1 = 4.21519 loss)\nI0817 18:16:27.178603 17829 solver.cpp:228] Iteration 3600, loss = 0.288687\nI0817 18:16:27.178663 17829 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:16:27.178679 17829 solver.cpp:244]     Train net output #1: loss = 0.288687 (* 1 = 0.288687 loss)\nI0817 18:16:27.279462 17829 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0817 18:18:46.196323 17829 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:20:07.233121 17829 solver.cpp:404]     Test net output #0: accuracy = 0.25076\nI0817 18:20:07.233397 17829 solver.cpp:404]     Test net output #1: loss = 4.39358 (* 1 = 4.39358 loss)\nI0817 18:20:08.540675 17829 solver.cpp:228] Iteration 3700, loss = 0.234198\nI0817 18:20:08.540735 17829 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:20:08.540751 17829 solver.cpp:244]     Train net output #1: loss = 0.234198 (* 1 = 0.234198 loss)\nI0817 18:20:08.645457 17829 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0817 18:22:27.655781 17829 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:23:48.833485 17829 solver.cpp:404]     Test net output #0: accuracy = 0.40468\nI0817 18:23:48.833770 17829 solver.cpp:404]     Test net output #1: loss = 2.53834 (* 1 = 2.53834 loss)\nI0817 18:23:50.140853 17829 solver.cpp:228] Iteration 3800, loss = 0.247531\nI0817 18:23:50.140913 17829 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:23:50.140930 17829 solver.cpp:244]     Train net output #1: loss = 0.247531 (* 1 = 0.247531 loss)\nI0817 18:23:50.246510 17829 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0817 18:26:09.400683 17829 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:27:30.678725 17829 solver.cpp:404]     Test net output #0: accuracy = 0.40256\nI0817 18:27:30.679025 17829 solver.cpp:404]     Test net output #1: loss = 2.55906 (* 1 = 2.55906 loss)\nI0817 18:27:31.985057 17829 solver.cpp:228] Iteration 3900, loss = 0.246956\nI0817 18:27:31.985116 17829 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:27:31.985132 17829 solver.cpp:244]     Train net output #1: loss = 0.246955 (* 1 = 0.246955 loss)\nI0817 18:27:32.085628 17829 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0817 18:29:51.233408 17829 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:31:12.491616 17829 solver.cpp:404]     Test net output #0: accuracy = 0.2868\nI0817 18:31:12.491900 17829 solver.cpp:404]     Test net output #1: loss = 3.624 (* 1 = 3.624 loss)\nI0817 18:31:13.799489 17829 solver.cpp:228] Iteration 4000, loss = 0.28238\nI0817 18:31:13.799556 17829 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 18:31:13.799573 17829 solver.cpp:244]     Train net output #1: loss = 0.28238 (* 1 = 0.28238 loss)\nI0817 18:31:13.895370 17829 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0817 18:33:33.028338 17829 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:34:54.105453 17829 solver.cpp:404]     Test net output #0: accuracy = 0.2922\nI0817 18:34:54.105753 17829 solver.cpp:404]     Test net output #1: loss = 3.81061 (* 1 = 3.81061 loss)\nI0817 18:34:55.412780 17829 solver.cpp:228] Iteration 4100, loss = 0.195051\nI0817 18:34:55.412837 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:34:55.412854 17829 solver.cpp:244]     Train net output #1: loss = 0.195051 (* 1 = 0.195051 loss)\nI0817 18:34:55.517843 17829 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0817 18:37:14.531589 17829 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:38:35.566946 17829 solver.cpp:404]     Test net output #0: accuracy = 0.29828\nI0817 18:38:35.567261 17829 solver.cpp:404]     Test net output #1: loss = 3.92089 (* 1 = 3.92089 loss)\nI0817 18:38:36.873270 17829 solver.cpp:228] Iteration 4200, loss = 0.327325\nI0817 18:38:36.873325 17829 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:38:36.873342 17829 solver.cpp:244]     Train net output #1: loss = 0.327325 (* 1 = 0.327325 loss)\nI0817 18:38:36.976596 17829 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0817 18:40:56.061172 17829 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:42:17.337460 17829 solver.cpp:404]     Test net output #0: accuracy = 0.30896\nI0817 18:42:17.337739 17829 solver.cpp:404]     Test net output #1: loss = 3.83464 (* 1 = 3.83464 loss)\nI0817 18:42:18.643805 17829 solver.cpp:228] Iteration 4300, loss = 0.25933\nI0817 18:42:18.643862 17829 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 18:42:18.643878 17829 solver.cpp:244]     Train net output #1: loss = 0.25933 (* 1 = 0.25933 loss)\nI0817 18:42:18.746762 17829 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0817 18:44:37.854423 17829 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:45:59.117553 17829 solver.cpp:404]     Test net output #0: accuracy = 0.20396\nI0817 18:45:59.117866 17829 solver.cpp:404]     Test net output #1: loss = 6.25857 (* 1 = 6.25857 loss)\nI0817 18:46:00.424238 17829 solver.cpp:228] Iteration 4400, loss = 0.188377\nI0817 18:46:00.424295 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:46:00.424312 17829 solver.cpp:244]     Train net output #1: loss = 0.188376 (* 1 = 0.188376 loss)\nI0817 18:46:00.525128 17829 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0817 18:48:19.433159 17829 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 18:49:40.688513 17829 solver.cpp:404]     Test net output #0: accuracy = 0.27508\nI0817 18:49:40.688824 17829 solver.cpp:404]     Test net output #1: loss = 4.30259 (* 1 = 4.30259 loss)\nI0817 18:49:41.994494 17829 solver.cpp:228] Iteration 4500, loss = 0.141978\nI0817 18:49:41.994550 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:49:41.994571 17829 solver.cpp:244]     Train net output #1: loss = 0.141977 (* 1 = 0.141977 loss)\nI0817 18:49:42.096902 17829 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0817 18:52:01.048513 17829 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 18:53:22.294972 17829 solver.cpp:404]     Test net output #0: accuracy = 0.4138\nI0817 18:53:22.295253 17829 solver.cpp:404]     Test net output #1: loss = 2.7612 (* 1 = 2.7612 loss)\nI0817 18:53:23.601258 17829 solver.cpp:228] Iteration 4600, loss = 0.211122\nI0817 18:53:23.601317 17829 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:53:23.601336 17829 solver.cpp:244]     Train net output #1: loss = 0.211122 (* 1 = 0.211122 loss)\nI0817 18:53:23.702378 17829 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0817 18:55:42.809516 17829 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 18:57:04.062511 17829 solver.cpp:404]     Test net output #0: accuracy = 0.46228\nI0817 18:57:04.062804 17829 solver.cpp:404]     Test net output #1: loss = 2.20074 (* 1 = 2.20074 loss)\nI0817 18:57:05.368455 17829 solver.cpp:228] Iteration 4700, loss = 0.145331\nI0817 18:57:05.368516 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:57:05.368533 17829 solver.cpp:244]     Train net output #1: loss = 0.145331 (* 1 = 0.145331 loss)\nI0817 18:57:05.470348 17829 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0817 18:59:24.589697 17829 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:00:45.861487 17829 solver.cpp:404]     Test net output #0: accuracy = 0.51068\nI0817 19:00:45.861788 17829 solver.cpp:404]     Test net output #1: loss = 2.01589 (* 1 = 2.01589 loss)\nI0817 19:00:47.167702 17829 solver.cpp:228] Iteration 4800, loss = 0.13939\nI0817 19:00:47.167762 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:00:47.167780 17829 solver.cpp:244]     Train net output #1: loss = 0.139389 (* 1 = 0.139389 loss)\nI0817 19:00:47.266780 17829 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0817 19:03:06.373719 17829 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:04:27.642962 17829 solver.cpp:404]     Test net output #0: accuracy = 0.4436\nI0817 19:04:27.643272 17829 solver.cpp:404]     Test net output #1: loss = 2.72939 (* 1 = 2.72939 loss)\nI0817 19:04:28.948979 17829 solver.cpp:228] Iteration 4900, loss = 0.174463\nI0817 19:04:28.949040 17829 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:04:28.949056 17829 solver.cpp:244]     Train net output #1: loss = 0.174463 (* 1 = 0.174463 loss)\nI0817 19:04:29.053514 17829 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0817 19:06:48.177067 17829 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:08:09.456938 17829 solver.cpp:404]     Test net output #0: accuracy = 0.47336\nI0817 19:08:09.457231 17829 solver.cpp:404]     Test net output #1: loss = 2.39655 (* 1 = 2.39655 loss)\nI0817 19:08:10.762712 17829 solver.cpp:228] Iteration 5000, loss = 0.191642\nI0817 19:08:10.762771 17829 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 19:08:10.762789 17829 solver.cpp:244]     Train net output #1: loss = 0.191642 (* 1 = 0.191642 loss)\nI0817 19:08:10.865775 17829 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0817 19:10:29.984596 17829 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:11:51.254825 17829 solver.cpp:404]     Test net output #0: accuracy = 0.4576\nI0817 19:11:51.255117 17829 solver.cpp:404]     Test net output #1: loss = 2.69505 (* 1 = 2.69505 loss)\nI0817 19:11:52.561110 17829 solver.cpp:228] Iteration 5100, loss = 0.202275\nI0817 19:11:52.561170 17829 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:11:52.561188 17829 solver.cpp:244]     Train net output #1: loss = 0.202275 (* 1 = 0.202275 loss)\nI0817 19:11:52.662958 17829 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0817 19:14:11.794456 17829 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:15:33.044937 17829 solver.cpp:404]     Test net output #0: accuracy = 0.44108\nI0817 19:15:33.045248 17829 solver.cpp:404]     Test net output #1: loss = 2.60695 (* 1 = 2.60695 loss)\nI0817 19:15:34.351323 17829 solver.cpp:228] Iteration 5200, loss = 0.147571\nI0817 19:15:34.351383 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:15:34.351402 17829 solver.cpp:244]     Train net output #1: loss = 0.14757 (* 1 = 0.14757 loss)\nI0817 19:15:34.450193 17829 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0817 19:17:53.564096 17829 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:19:14.806689 17829 solver.cpp:404]     Test net output #0: accuracy = 0.45112\nI0817 19:19:14.806982 17829 solver.cpp:404]     Test net output #1: loss = 2.89827 (* 1 = 2.89827 loss)\nI0817 19:19:16.112602 17829 solver.cpp:228] Iteration 5300, loss = 0.231103\nI0817 19:19:16.112661 17829 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 19:19:16.112679 17829 solver.cpp:244]     Train net output #1: loss = 0.231102 (* 1 = 0.231102 loss)\nI0817 19:19:16.211751 17829 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0817 19:21:35.347357 17829 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:22:56.629827 17829 solver.cpp:404]     Test net output #0: accuracy = 0.39768\nI0817 19:22:56.630128 17829 solver.cpp:404]     Test net output #1: loss = 3.31027 (* 1 = 3.31027 loss)\nI0817 19:22:57.936161 17829 solver.cpp:228] Iteration 5400, loss = 0.195012\nI0817 19:22:57.936218 17829 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:22:57.936235 17829 solver.cpp:244]     Train net output #1: loss = 0.195011 (* 1 = 0.195011 loss)\nI0817 19:22:58.040364 17829 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0817 19:25:17.162618 17829 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:26:38.418869 17829 solver.cpp:404]     Test net output #0: accuracy = 0.45288\nI0817 19:26:38.419162 17829 solver.cpp:404]     Test net output #1: loss = 2.36376 (* 1 = 2.36376 loss)\nI0817 19:26:39.725023 17829 solver.cpp:228] Iteration 5500, loss = 0.123496\nI0817 19:26:39.725081 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:26:39.725100 17829 solver.cpp:244]     Train net output #1: loss = 0.123496 (* 1 = 0.123496 loss)\nI0817 19:26:39.827430 17829 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0817 19:28:58.837106 17829 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:30:20.100493 17829 solver.cpp:404]     Test net output #0: accuracy = 0.49496\nI0817 19:30:20.100786 17829 solver.cpp:404]     Test net output #1: loss = 2.07668 (* 1 = 2.07668 loss)\nI0817 19:30:21.409513 17829 solver.cpp:228] Iteration 5600, loss = 0.207177\nI0817 19:30:21.409579 17829 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:30:21.409597 17829 solver.cpp:244]     Train net output #1: loss = 0.207177 (* 1 = 0.207177 loss)\nI0817 19:30:21.511469 17829 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0817 19:32:40.376847 17829 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:34:01.635203 17829 solver.cpp:404]     Test net output #0: accuracy = 0.40196\nI0817 19:34:01.635491 17829 solver.cpp:404]     Test net output #1: loss = 2.63775 (* 1 = 2.63775 loss)\nI0817 19:34:02.941021 17829 solver.cpp:228] Iteration 5700, loss = 0.0890469\nI0817 19:34:02.941074 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:34:02.941092 17829 solver.cpp:244]     Train net output #1: loss = 0.0890465 (* 1 = 0.0890465 loss)\nI0817 19:34:03.042129 17829 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0817 19:36:22.042484 17829 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:37:42.427569 17829 solver.cpp:404]     Test net output #0: accuracy = 0.45668\nI0817 19:37:42.427832 17829 solver.cpp:404]     Test net output #1: loss = 2.33817 (* 1 = 2.33817 loss)\nI0817 19:37:43.731381 17829 solver.cpp:228] Iteration 5800, loss = 0.16518\nI0817 19:37:43.731431 17829 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:37:43.731456 17829 solver.cpp:244]     Train net output #1: loss = 0.16518 (* 1 = 0.16518 loss)\nI0817 19:37:43.831779 17829 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0817 19:40:02.682065 17829 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:41:23.018460 17829 solver.cpp:404]     Test net output #0: accuracy = 0.48076\nI0817 19:41:23.018708 17829 solver.cpp:404]     Test net output #1: loss = 2.29472 (* 1 = 2.29472 loss)\nI0817 19:41:24.321962 17829 solver.cpp:228] Iteration 5900, loss = 0.125002\nI0817 19:41:24.322011 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:41:24.322034 17829 solver.cpp:244]     Train net output #1: loss = 0.125002 (* 1 = 0.125002 loss)\nI0817 19:41:24.423391 17829 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0817 19:43:43.419152 17829 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:45:03.762709 17829 solver.cpp:404]     Test net output #0: accuracy = 0.48408\nI0817 19:45:03.762902 17829 solver.cpp:404]     Test net output #1: loss = 2.17864 (* 1 = 2.17864 loss)\nI0817 19:45:05.066279 17829 solver.cpp:228] Iteration 6000, loss = 0.187193\nI0817 19:45:05.066329 17829 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 19:45:05.066354 17829 solver.cpp:244]     Train net output #1: loss = 0.187192 (* 1 = 0.187192 loss)\nI0817 19:45:05.168038 17829 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0817 19:47:23.915539 17829 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:48:44.249594 17829 solver.cpp:404]     Test net output #0: accuracy = 0.44496\nI0817 19:48:44.249830 17829 solver.cpp:404]     Test net output #1: loss = 2.53625 (* 1 = 2.53625 loss)\nI0817 19:48:45.553392 17829 solver.cpp:228] Iteration 6100, loss = 0.13288\nI0817 19:48:45.553442 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:48:45.553467 17829 solver.cpp:244]     Train net output #1: loss = 0.132879 (* 1 = 0.132879 loss)\nI0817 19:48:45.659375 17829 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0817 19:51:04.388273 17829 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 19:52:24.710218 17829 solver.cpp:404]     Test net output #0: accuracy = 0.55172\nI0817 19:52:24.710463 17829 solver.cpp:404]     Test net output #1: loss = 1.70754 (* 1 = 1.70754 loss)\nI0817 19:52:26.014152 17829 solver.cpp:228] Iteration 6200, loss = 0.197965\nI0817 19:52:26.014201 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:52:26.014225 17829 solver.cpp:244]     Train net output #1: loss = 0.197965 (* 1 = 0.197965 loss)\nI0817 19:52:26.116611 17829 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0817 19:54:44.756361 17829 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 19:56:05.102015 17829 solver.cpp:404]     Test net output #0: accuracy = 0.51092\nI0817 19:56:05.102257 17829 solver.cpp:404]     Test net output #1: loss = 2.07995 (* 1 = 2.07995 loss)\nI0817 19:56:06.405740 17829 solver.cpp:228] Iteration 6300, loss = 0.112665\nI0817 19:56:06.405789 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:56:06.405813 17829 solver.cpp:244]     Train net output #1: loss = 0.112665 (* 1 = 0.112665 loss)\nI0817 19:56:06.510185 17829 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0817 19:58:25.282163 17829 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 19:59:45.614266 17829 solver.cpp:404]     Test net output #0: accuracy = 0.44808\nI0817 19:59:45.614518 17829 solver.cpp:404]     Test net output #1: loss = 2.89917 (* 1 = 2.89917 loss)\nI0817 19:59:46.918124 17829 solver.cpp:228] Iteration 6400, loss = 0.227913\nI0817 19:59:46.918174 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 19:59:46.918197 17829 solver.cpp:244]     Train net output #1: loss = 0.227913 (* 1 = 0.227913 loss)\nI0817 19:59:47.020385 17829 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0817 20:02:05.766201 17829 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:03:26.110183 17829 solver.cpp:404]     Test net output #0: accuracy = 0.36072\nI0817 20:03:26.110443 17829 solver.cpp:404]     Test net output #1: loss = 3.555 (* 1 = 3.555 loss)\nI0817 20:03:27.413848 17829 solver.cpp:228] Iteration 6500, loss = 0.133899\nI0817 20:03:27.413897 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:03:27.413921 17829 solver.cpp:244]     Train net output #1: loss = 0.133899 (* 1 = 0.133899 loss)\nI0817 20:03:27.519877 17829 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0817 20:05:46.257732 17829 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:07:06.600628 17829 solver.cpp:404]     Test net output #0: accuracy = 0.48568\nI0817 20:07:06.600875 17829 solver.cpp:404]     Test net output #1: loss = 2.25731 (* 1 = 2.25731 loss)\nI0817 20:07:07.904201 17829 solver.cpp:228] Iteration 6600, loss = 0.155553\nI0817 20:07:07.904247 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:07:07.904271 17829 solver.cpp:244]     Train net output #1: loss = 0.155553 (* 1 = 0.155553 loss)\nI0817 20:07:08.009325 17829 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0817 20:09:26.916009 17829 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:10:47.258182 17829 solver.cpp:404]     Test net output #0: accuracy = 0.48436\nI0817 20:10:47.258430 17829 solver.cpp:404]     Test net output #1: loss = 2.3766 (* 1 = 2.3766 loss)\nI0817 20:10:48.561791 17829 solver.cpp:228] Iteration 6700, loss = 0.131478\nI0817 20:10:48.561837 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:10:48.561861 17829 solver.cpp:244]     Train net output #1: loss = 0.131478 (* 1 = 0.131478 loss)\nI0817 20:10:48.663712 17829 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0817 20:13:07.450047 17829 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:14:27.782517 17829 solver.cpp:404]     Test net output #0: accuracy = 0.55052\nI0817 20:14:27.782750 17829 solver.cpp:404]     Test net output #1: loss = 1.79896 (* 1 = 1.79896 loss)\nI0817 20:14:29.085186 17829 solver.cpp:228] Iteration 6800, loss = 0.0702596\nI0817 20:14:29.085230 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:14:29.085247 17829 solver.cpp:244]     Train net output #1: loss = 0.0702592 (* 1 = 0.0702592 loss)\nI0817 20:14:29.191659 17829 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0817 20:16:47.941891 17829 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:18:08.278585 17829 solver.cpp:404]     Test net output #0: accuracy = 0.4706\nI0817 20:18:08.278836 17829 solver.cpp:404]     Test net output #1: loss = 2.491 (* 1 = 2.491 loss)\nI0817 20:18:09.581635 17829 solver.cpp:228] Iteration 6900, loss = 0.133386\nI0817 20:18:09.581681 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:18:09.581696 17829 solver.cpp:244]     Train net output #1: loss = 0.133386 (* 1 = 0.133386 loss)\nI0817 20:18:09.683825 17829 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0817 20:20:28.380069 17829 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:21:48.704532 17829 solver.cpp:404]     Test net output #0: accuracy = 0.37996\nI0817 20:21:48.704771 17829 solver.cpp:404]     Test net output #1: loss = 4.18651 (* 1 = 4.18651 loss)\nI0817 20:21:50.007491 17829 solver.cpp:228] Iteration 7000, loss = 0.200024\nI0817 20:21:50.007534 17829 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:21:50.007550 17829 solver.cpp:244]     Train net output #1: loss = 0.200023 (* 1 = 0.200023 loss)\nI0817 20:21:50.109676 17829 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0817 20:24:08.957276 17829 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:25:29.286573 17829 solver.cpp:404]     Test net output #0: accuracy = 0.28416\nI0817 20:25:29.286806 17829 solver.cpp:404]     Test net output #1: loss = 5.42733 (* 1 = 5.42733 loss)\nI0817 20:25:30.589890 17829 solver.cpp:228] Iteration 7100, loss = 0.143506\nI0817 20:25:30.589933 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:25:30.589949 17829 solver.cpp:244]     Train net output #1: loss = 0.143506 (* 1 = 0.143506 loss)\nI0817 20:25:30.692065 17829 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0817 20:27:49.469362 17829 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:29:09.758486 17829 solver.cpp:404]     Test net output #0: accuracy = 0.3926\nI0817 20:29:09.758715 17829 solver.cpp:404]     Test net output #1: loss = 4.31105 (* 1 = 4.31105 loss)\nI0817 20:29:11.061136 17829 solver.cpp:228] Iteration 7200, loss = 0.0888969\nI0817 20:29:11.061179 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:29:11.061197 17829 solver.cpp:244]     Train net output #1: loss = 0.0888965 (* 1 = 0.0888965 loss)\nI0817 20:29:11.167057 17829 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0817 20:31:30.066262 17829 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:32:50.358346 17829 solver.cpp:404]     Test net output #0: accuracy = 0.47816\nI0817 20:32:50.358559 17829 solver.cpp:404]     Test net output #1: loss = 2.6072 (* 1 = 2.6072 loss)\nI0817 20:32:51.661209 17829 solver.cpp:228] Iteration 7300, loss = 0.123842\nI0817 20:32:51.661253 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:32:51.661269 17829 solver.cpp:244]     Train net output #1: loss = 0.123841 (* 1 = 0.123841 loss)\nI0817 20:32:51.767704 17829 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0817 20:35:10.642135 17829 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:36:30.968649 17829 solver.cpp:404]     Test net output #0: accuracy = 0.31216\nI0817 20:36:30.968849 17829 solver.cpp:404]     Test net output #1: loss = 5.47527 (* 1 = 5.47527 loss)\nI0817 20:36:32.271576 17829 solver.cpp:228] Iteration 7400, loss = 0.133476\nI0817 20:36:32.271620 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:36:32.271636 17829 solver.cpp:244]     Train net output #1: loss = 0.133475 (* 1 = 0.133475 loss)\nI0817 20:36:32.376389 17829 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0817 20:38:51.406673 17829 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:40:11.705055 17829 solver.cpp:404]     Test net output #0: accuracy = 0.36364\nI0817 20:40:11.705267 17829 solver.cpp:404]     Test net output #1: loss = 4.06078 (* 1 = 4.06078 loss)\nI0817 20:40:13.008162 17829 solver.cpp:228] Iteration 7500, loss = 0.0488054\nI0817 20:40:13.008205 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:40:13.008222 17829 solver.cpp:244]     Train net output #1: loss = 0.048805 (* 1 = 0.048805 loss)\nI0817 20:40:13.114336 17829 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0817 20:42:31.932863 17829 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:43:52.252702 17829 solver.cpp:404]     Test net output #0: accuracy = 0.43552\nI0817 20:43:52.252935 17829 solver.cpp:404]     Test net output #1: loss = 2.96374 (* 1 = 2.96374 loss)\nI0817 20:43:53.557386 17829 solver.cpp:228] Iteration 7600, loss = 0.151797\nI0817 20:43:53.557431 17829 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:43:53.557446 17829 solver.cpp:244]     Train net output #1: loss = 0.151797 (* 1 = 0.151797 loss)\nI0817 20:43:53.657735 17829 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0817 20:46:12.478718 17829 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:47:32.784147 17829 solver.cpp:404]     Test net output #0: accuracy = 0.37932\nI0817 20:47:32.784379 17829 solver.cpp:404]     Test net output #1: loss = 4.22284 (* 1 = 4.22284 loss)\nI0817 20:47:34.086971 17829 solver.cpp:228] Iteration 7700, loss = 0.149845\nI0817 20:47:34.087011 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:47:34.087028 17829 solver.cpp:244]     Train net output #1: loss = 0.149844 (* 1 = 0.149844 loss)\nI0817 20:47:34.193349 17829 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0817 20:49:53.015130 17829 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 20:51:13.337755 17829 solver.cpp:404]     Test net output #0: accuracy = 0.41364\nI0817 20:51:13.338007 17829 solver.cpp:404]     Test net output #1: loss = 3.17999 (* 1 = 3.17999 loss)\nI0817 20:51:14.640741 17829 solver.cpp:228] Iteration 7800, loss = 0.0876843\nI0817 20:51:14.640786 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:51:14.640802 17829 solver.cpp:244]     Train net output #1: loss = 0.0876839 (* 1 = 0.0876839 loss)\nI0817 20:51:14.746758 17829 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0817 20:53:33.554571 17829 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 20:54:53.853375 17829 solver.cpp:404]     Test net output #0: accuracy = 0.3012\nI0817 20:54:53.853593 17829 solver.cpp:404]     Test net output #1: loss = 4.5447 (* 1 = 4.5447 loss)\nI0817 20:54:55.155514 17829 solver.cpp:228] Iteration 7900, loss = 0.155869\nI0817 20:54:55.155555 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 20:54:55.155571 17829 solver.cpp:244]     Train net output #1: loss = 0.155869 (* 1 = 0.155869 loss)\nI0817 20:54:55.260615 17829 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0817 20:57:14.480877 17829 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 20:58:34.784085 17829 solver.cpp:404]     Test net output #0: accuracy = 0.40124\nI0817 20:58:34.784330 17829 solver.cpp:404]     Test net output #1: loss = 3.7607 (* 1 = 3.7607 loss)\nI0817 20:58:36.086377 17829 solver.cpp:228] Iteration 8000, loss = 0.131957\nI0817 20:58:36.086423 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:58:36.086441 17829 solver.cpp:244]     Train net output #1: loss = 0.131956 (* 1 = 0.131956 loss)\nI0817 20:58:36.194058 17829 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0817 21:00:55.463539 17829 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:02:15.944093 17829 solver.cpp:404]     Test net output #0: accuracy = 0.45156\nI0817 21:02:15.944397 17829 solver.cpp:404]     Test net output #1: loss = 3.04975 (* 1 = 3.04975 loss)\nI0817 21:02:17.250164 17829 solver.cpp:228] Iteration 8100, loss = 0.105027\nI0817 21:02:17.250211 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:02:17.250229 17829 solver.cpp:244]     Train net output #1: loss = 0.105026 (* 1 = 0.105026 loss)\nI0817 21:02:17.357812 17829 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0817 21:04:36.466267 17829 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:05:56.781848 17829 solver.cpp:404]     Test net output #0: accuracy = 0.46372\nI0817 21:05:56.782105 17829 solver.cpp:404]     Test net output #1: loss = 2.72108 (* 1 = 2.72108 loss)\nI0817 21:05:58.084636 17829 solver.cpp:228] Iteration 8200, loss = 0.165026\nI0817 21:05:58.084677 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 21:05:58.084693 17829 solver.cpp:244]     Train net output #1: loss = 0.165026 (* 1 = 0.165026 loss)\nI0817 21:05:58.193598 17829 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0817 21:08:17.217664 17829 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:09:37.548199 17829 solver.cpp:404]     Test net output #0: accuracy = 0.50092\nI0817 21:09:37.548454 17829 solver.cpp:404]     Test net output #1: loss = 2.38109 (* 1 = 2.38109 loss)\nI0817 21:09:38.850698 17829 solver.cpp:228] Iteration 8300, loss = 0.0971934\nI0817 21:09:38.850740 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:09:38.850756 17829 solver.cpp:244]     Train net output #1: loss = 0.097193 (* 1 = 0.097193 loss)\nI0817 21:09:38.957316 17829 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0817 21:11:58.262320 17829 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:13:18.626981 17829 solver.cpp:404]     Test net output #0: accuracy = 0.44484\nI0817 21:13:18.627243 17829 solver.cpp:404]     Test net output #1: loss = 2.54557 (* 1 = 2.54557 loss)\nI0817 21:13:19.930781 17829 solver.cpp:228] Iteration 8400, loss = 0.215975\nI0817 21:13:19.930826 17829 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 21:13:19.930850 17829 solver.cpp:244]     Train net output #1: loss = 0.215975 (* 1 = 0.215975 loss)\nI0817 21:13:20.035459 17829 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0817 21:15:39.166805 17829 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:16:59.484247 17829 solver.cpp:404]     Test net output #0: accuracy = 0.47812\nI0817 21:16:59.484474 17829 solver.cpp:404]     Test net output #1: loss = 2.55323 (* 1 = 2.55323 loss)\nI0817 21:17:00.787641 17829 solver.cpp:228] Iteration 8500, loss = 0.153753\nI0817 21:17:00.787684 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 21:17:00.787708 17829 solver.cpp:244]     Train net output #1: loss = 0.153753 (* 1 = 0.153753 loss)\nI0817 21:17:00.897065 17829 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0817 21:19:20.024651 17829 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:20:41.281954 17829 solver.cpp:404]     Test net output #0: accuracy = 0.4178\nI0817 21:20:41.282265 17829 solver.cpp:404]     Test net output #1: loss = 3.26674 (* 1 = 3.26674 loss)\nI0817 21:20:42.588785 17829 solver.cpp:228] Iteration 8600, loss = 0.165569\nI0817 21:20:42.588845 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:20:42.588861 17829 solver.cpp:244]     Train net output #1: loss = 0.165569 (* 1 = 0.165569 loss)\nI0817 21:20:42.691562 17829 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0817 21:23:01.862010 17829 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:24:23.130898 17829 solver.cpp:404]     Test net output #0: accuracy = 0.4678\nI0817 21:24:23.131216 17829 solver.cpp:404]     Test net output #1: loss = 2.76674 (* 1 = 2.76674 loss)\nI0817 21:24:24.438401 17829 solver.cpp:228] Iteration 8700, loss = 0.178685\nI0817 21:24:24.438462 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:24:24.438479 17829 solver.cpp:244]     Train net output #1: loss = 0.178685 (* 1 = 0.178685 loss)\nI0817 21:24:24.545754 17829 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0817 21:26:43.850404 17829 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:28:05.106998 17829 solver.cpp:404]     Test net output #0: accuracy = 0.49384\nI0817 21:28:05.107265 17829 solver.cpp:404]     Test net output #1: loss = 2.59751 (* 1 = 2.59751 loss)\nI0817 21:28:06.414429 17829 solver.cpp:228] Iteration 8800, loss = 0.105401\nI0817 21:28:06.414494 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:28:06.414511 17829 solver.cpp:244]     Train net output #1: loss = 0.1054 (* 1 = 0.1054 loss)\nI0817 21:28:06.517590 17829 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0817 21:30:25.755113 17829 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:31:47.011629 17829 solver.cpp:404]     Test net output #0: accuracy = 0.51552\nI0817 21:31:47.011955 17829 solver.cpp:404]     Test net output #1: loss = 2.24616 (* 1 = 2.24616 loss)\nI0817 21:31:48.319993 17829 solver.cpp:228] Iteration 8900, loss = 0.157675\nI0817 21:31:48.320053 17829 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 21:31:48.320070 17829 solver.cpp:244]     Train net output #1: loss = 0.157675 (* 1 = 0.157675 loss)\nI0817 21:31:48.425544 17829 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0817 21:34:07.913365 17829 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:35:29.181998 17829 solver.cpp:404]     Test net output #0: accuracy = 0.52792\nI0817 21:35:29.182312 17829 solver.cpp:404]     Test net output #1: loss = 2.18507 (* 1 = 2.18507 loss)\nI0817 21:35:30.490141 17829 solver.cpp:228] Iteration 9000, loss = 0.166081\nI0817 21:35:30.490200 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:35:30.490217 17829 solver.cpp:244]     Train net output #1: loss = 0.16608 (* 1 = 0.16608 loss)\nI0817 21:35:30.592792 17829 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0817 21:37:49.813283 17829 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:39:11.094775 17829 solver.cpp:404]     Test net output #0: accuracy = 0.50112\nI0817 21:39:11.095067 17829 solver.cpp:404]     Test net output #1: loss = 2.54887 (* 1 = 2.54887 loss)\nI0817 21:39:12.401283 17829 solver.cpp:228] Iteration 9100, loss = 0.0816541\nI0817 21:39:12.401345 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:39:12.401363 17829 solver.cpp:244]     Train net output #1: loss = 0.0816537 (* 1 = 0.0816537 loss)\nI0817 21:39:12.505178 17829 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0817 21:41:31.703161 17829 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:42:52.973901 17829 solver.cpp:404]     Test net output #0: accuracy = 0.5404\nI0817 21:42:52.974210 17829 solver.cpp:404]     Test net output #1: loss = 2.26713 (* 1 = 2.26713 loss)\nI0817 21:42:54.280285 17829 solver.cpp:228] Iteration 9200, loss = 0.0855425\nI0817 21:42:54.280347 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:42:54.280364 17829 solver.cpp:244]     Train net output #1: loss = 0.0855421 (* 1 = 0.0855421 loss)\nI0817 21:42:54.386520 17829 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0817 21:45:13.650979 17829 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:46:34.926549 17829 solver.cpp:404]     Test net output #0: accuracy = 0.5484\nI0817 21:46:34.926870 17829 solver.cpp:404]     Test net output #1: loss = 1.98915 (* 1 = 1.98915 loss)\nI0817 21:46:36.234388 17829 solver.cpp:228] Iteration 9300, loss = 0.0629079\nI0817 21:46:36.234449 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:46:36.234467 17829 solver.cpp:244]     Train net output #1: loss = 0.0629075 (* 1 = 0.0629075 loss)\nI0817 21:46:36.340147 17829 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0817 21:48:55.681761 17829 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 21:50:16.957245 17829 solver.cpp:404]     Test net output #0: accuracy = 0.5356\nI0817 21:50:16.957563 17829 solver.cpp:404]     Test net output #1: loss = 1.91193 (* 1 = 1.91193 loss)\nI0817 21:50:18.264587 17829 solver.cpp:228] Iteration 9400, loss = 0.153964\nI0817 21:50:18.264647 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:50:18.264664 17829 solver.cpp:244]     Train net output #1: loss = 0.153964 (* 1 = 0.153964 loss)\nI0817 21:50:18.370575 17829 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0817 21:52:37.537076 17829 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 21:53:58.812444 17829 solver.cpp:404]     Test net output #0: accuracy = 0.47824\nI0817 21:53:58.812774 17829 solver.cpp:404]     Test net output #1: loss = 3.05299 (* 1 = 3.05299 loss)\nI0817 21:54:00.118793 17829 solver.cpp:228] Iteration 9500, loss = 0.103696\nI0817 21:54:00.118854 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:54:00.118872 17829 solver.cpp:244]     Train net output #1: loss = 0.103695 (* 1 = 0.103695 loss)\nI0817 21:54:00.221997 17829 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0817 21:56:19.234205 17829 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 21:57:40.514026 17829 solver.cpp:404]     Test net output #0: accuracy = 0.5184\nI0817 21:57:40.514322 17829 solver.cpp:404]     Test net output #1: loss = 2.75662 (* 1 = 2.75662 loss)\nI0817 21:57:41.821663 17829 solver.cpp:228] Iteration 9600, loss = 0.0870178\nI0817 21:57:41.821724 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:57:41.821743 17829 solver.cpp:244]     Train net output #1: loss = 0.0870173 (* 1 = 0.0870173 loss)\nI0817 21:57:41.921421 17829 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0817 22:00:01.004411 17829 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:01:22.256191 17829 solver.cpp:404]     Test net output #0: accuracy = 0.58576\nI0817 22:01:22.256507 17829 solver.cpp:404]     Test net output #1: loss = 2.07262 (* 1 = 2.07262 loss)\nI0817 22:01:23.564227 17829 solver.cpp:228] Iteration 9700, loss = 0.134917\nI0817 22:01:23.564290 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 22:01:23.564306 17829 solver.cpp:244]     Train net output #1: loss = 0.134916 (* 1 = 0.134916 loss)\nI0817 22:01:23.669865 17829 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0817 22:03:42.844918 17829 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:05:04.105923 17829 solver.cpp:404]     Test net output #0: accuracy = 0.55164\nI0817 22:05:04.106215 17829 solver.cpp:404]     Test net output #1: loss = 2.58813 (* 1 = 2.58813 loss)\nI0817 22:05:05.414057 17829 solver.cpp:228] Iteration 9800, loss = 0.0739598\nI0817 22:05:05.414119 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:05:05.414137 17829 solver.cpp:244]     Train net output #1: loss = 0.0739594 (* 1 = 0.0739594 loss)\nI0817 22:05:05.515868 17829 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0817 22:07:24.773178 17829 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:08:46.034355 17829 solver.cpp:404]     Test net output #0: accuracy = 0.52176\nI0817 22:08:46.034670 17829 solver.cpp:404]     Test net output #1: loss = 2.46358 (* 1 = 2.46358 loss)\nI0817 22:08:47.342383 17829 solver.cpp:228] Iteration 9900, loss = 0.140947\nI0817 22:08:47.342444 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:08:47.342461 17829 solver.cpp:244]     Train net output #1: loss = 0.140947 (* 1 = 0.140947 loss)\nI0817 22:08:47.444185 17829 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0817 22:11:06.633437 17829 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:12:27.885195 17829 solver.cpp:404]     Test net output #0: accuracy = 0.66628\nI0817 22:12:27.885493 17829 solver.cpp:404]     Test net output #1: loss = 1.26663 (* 1 = 1.26663 loss)\nI0817 22:12:29.191751 17829 solver.cpp:228] Iteration 10000, loss = 0.127858\nI0817 22:12:29.191812 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 22:12:29.191829 17829 solver.cpp:244]     Train net output #1: loss = 0.127858 (* 1 = 0.127858 loss)\nI0817 22:12:29.297797 17829 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0817 22:14:48.659407 17829 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0817 22:16:09.898768 17829 solver.cpp:404]     Test net output #0: accuracy = 0.65772\nI0817 22:16:09.899009 17829 solver.cpp:404]     Test net output #1: loss = 1.44009 (* 1 = 1.44009 loss)\nI0817 22:16:11.205283 17829 solver.cpp:228] Iteration 10100, loss = 0.0982864\nI0817 22:16:11.205345 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 22:16:11.205363 17829 solver.cpp:244]     Train net output #1: loss = 0.098286 (* 1 = 0.098286 loss)\nI0817 22:16:11.307914 17829 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0817 22:18:30.585044 17829 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0817 22:19:51.827548 17829 solver.cpp:404]     Test net output #0: accuracy = 0.589\nI0817 22:19:51.827821 17829 solver.cpp:404]     Test net output #1: loss = 1.83678 (* 1 = 1.83678 loss)\nI0817 22:19:53.134382 17829 solver.cpp:228] Iteration 10200, loss = 0.192267\nI0817 22:19:53.134443 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 22:19:53.134460 17829 solver.cpp:244]     Train net output #1: loss = 0.192267 (* 1 = 0.192267 loss)\nI0817 22:19:53.236490 17829 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0817 22:22:12.365207 17829 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0817 22:23:33.601856 17829 solver.cpp:404]     Test net output #0: accuracy = 0.59044\nI0817 22:23:33.602088 17829 solver.cpp:404]     Test net output #1: loss = 1.92658 (* 1 = 1.92658 loss)\nI0817 22:23:34.909068 17829 solver.cpp:228] Iteration 10300, loss = 0.150306\nI0817 22:23:34.909131 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:23:34.909148 17829 solver.cpp:244]     Train net output #1: loss = 0.150306 (* 1 = 0.150306 loss)\nI0817 22:23:35.016620 17829 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0817 22:25:54.104766 17829 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0817 22:27:15.304754 17829 solver.cpp:404]     Test net output #0: accuracy = 0.52924\nI0817 22:27:15.305053 17829 solver.cpp:404]     Test net output #1: loss = 2.76872 (* 1 = 2.76872 loss)\nI0817 22:27:16.612790 17829 solver.cpp:228] Iteration 10400, loss = 0.092532\nI0817 22:27:16.612854 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:27:16.612871 17829 solver.cpp:244]     Train net output #1: loss = 0.0925315 (* 1 = 0.0925315 loss)\nI0817 22:27:16.715682 17829 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0817 22:29:35.838513 17829 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0817 22:30:57.059839 17829 solver.cpp:404]     Test net output #0: accuracy = 0.56148\nI0817 22:30:57.060143 17829 solver.cpp:404]     Test net output #1: loss = 1.96991 (* 1 = 1.96991 loss)\nI0817 22:30:58.367638 17829 solver.cpp:228] Iteration 10500, loss = 0.161851\nI0817 22:30:58.367696 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 22:30:58.367712 17829 solver.cpp:244]     Train net output #1: loss = 0.16185 (* 1 = 0.16185 loss)\nI0817 22:30:58.471956 17829 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0817 22:33:17.733310 17829 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0817 22:34:39.009832 17829 solver.cpp:404]     Test net output #0: accuracy = 0.59188\nI0817 22:34:39.010131 17829 solver.cpp:404]     Test net output #1: loss = 1.94961 (* 1 = 1.94961 loss)\nI0817 22:34:40.318326 17829 solver.cpp:228] Iteration 10600, loss = 0.114073\nI0817 22:34:40.318387 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:34:40.318404 17829 solver.cpp:244]     Train net output #1: loss = 0.114072 (* 1 = 0.114072 loss)\nI0817 22:34:40.420156 17829 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0817 22:36:59.546052 17829 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0817 22:38:20.795476 17829 solver.cpp:404]     Test net output #0: accuracy = 0.60732\nI0817 22:38:20.795780 17829 solver.cpp:404]     Test net output #1: loss = 1.86015 (* 1 = 1.86015 loss)\nI0817 22:38:22.103142 17829 solver.cpp:228] Iteration 10700, loss = 0.0787454\nI0817 22:38:22.103201 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:38:22.103219 17829 solver.cpp:244]     Train net output #1: loss = 0.0787449 (* 1 = 0.0787449 loss)\nI0817 22:38:22.204028 17829 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0817 22:40:41.453757 17829 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0817 22:42:02.678331 17829 solver.cpp:404]     Test net output #0: accuracy = 0.54704\nI0817 22:42:02.678568 17829 solver.cpp:404]     Test net output #1: loss = 1.8624 (* 1 = 1.8624 loss)\nI0817 22:42:03.986186 17829 solver.cpp:228] Iteration 10800, loss = 0.100702\nI0817 22:42:03.986239 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:42:03.986256 17829 solver.cpp:244]     Train net output #1: loss = 0.100701 (* 1 = 0.100701 loss)\nI0817 22:42:04.090709 17829 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0817 22:44:23.490001 17829 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0817 22:45:44.759394 17829 solver.cpp:404]     Test net output #0: accuracy = 0.53096\nI0817 22:45:44.759732 17829 solver.cpp:404]     Test net output #1: loss = 2.11019 (* 1 = 2.11019 loss)\nI0817 22:45:46.066915 17829 solver.cpp:228] Iteration 10900, loss = 0.0718342\nI0817 22:45:46.066972 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:45:46.066989 17829 solver.cpp:244]     Train net output #1: loss = 0.0718337 (* 1 = 0.0718337 loss)\nI0817 22:45:46.171684 17829 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0817 22:48:05.554023 17829 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0817 22:49:26.821369 17829 solver.cpp:404]     Test net output #0: accuracy = 0.5408\nI0817 22:49:26.821696 17829 solver.cpp:404]     Test net output #1: loss = 2.3663 (* 1 = 2.3663 loss)\nI0817 22:49:28.129583 17829 solver.cpp:228] Iteration 11000, loss = 0.0503204\nI0817 22:49:28.129640 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:49:28.129657 17829 solver.cpp:244]     Train net output #1: loss = 0.0503199 (* 1 = 0.0503199 loss)\nI0817 22:49:28.235147 17829 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0817 22:51:47.594947 17829 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0817 22:53:08.865196 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6276\nI0817 22:53:08.865514 17829 solver.cpp:404]     Test net output #1: loss = 1.86448 (* 1 = 1.86448 loss)\nI0817 22:53:10.172029 17829 solver.cpp:228] Iteration 11100, loss = 0.0392486\nI0817 22:53:10.172088 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:53:10.172107 17829 solver.cpp:244]     Train net output #1: loss = 0.0392482 (* 1 = 0.0392482 loss)\nI0817 22:53:10.271567 17829 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0817 22:55:29.377337 17829 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0817 22:56:50.647796 17829 solver.cpp:404]     Test net output #0: accuracy = 0.61712\nI0817 22:56:50.648092 17829 solver.cpp:404]     Test net output #1: loss = 1.96489 (* 1 = 1.96489 loss)\nI0817 22:56:51.955478 17829 solver.cpp:228] Iteration 11200, loss = 0.0889674\nI0817 22:56:51.955545 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:56:51.955564 17829 solver.cpp:244]     Train net output #1: loss = 0.0889669 (* 1 = 0.0889669 loss)\nI0817 22:56:52.055649 17829 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0817 22:59:11.109195 17829 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0817 23:00:32.382935 17829 solver.cpp:404]     Test net output #0: accuracy = 0.62904\nI0817 23:00:32.383263 17829 solver.cpp:404]     Test net output #1: loss = 1.64222 (* 1 = 1.64222 loss)\nI0817 23:00:33.690454 17829 solver.cpp:228] Iteration 11300, loss = 0.0824781\nI0817 23:00:33.690521 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:00:33.690537 17829 solver.cpp:244]     Train net output #1: loss = 0.0824776 (* 1 = 0.0824776 loss)\nI0817 23:00:33.789062 17829 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0817 23:02:52.998554 17829 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0817 23:04:14.263960 17829 solver.cpp:404]     Test net output #0: accuracy = 0.62652\nI0817 23:04:14.264284 17829 solver.cpp:404]     Test net output #1: loss = 1.74848 (* 1 = 1.74848 loss)\nI0817 23:04:15.570508 17829 solver.cpp:228] Iteration 11400, loss = 0.0761916\nI0817 23:04:15.570574 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:04:15.570591 17829 solver.cpp:244]     Train net output #1: loss = 0.0761911 (* 1 = 0.0761911 loss)\nI0817 23:04:15.672020 17829 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0817 23:06:34.918277 17829 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0817 23:07:56.185264 17829 solver.cpp:404]     Test net output #0: accuracy = 0.63332\nI0817 23:07:56.185582 17829 solver.cpp:404]     Test net output #1: loss = 1.56241 (* 1 = 1.56241 loss)\nI0817 23:07:57.491909 17829 solver.cpp:228] Iteration 11500, loss = 0.0888201\nI0817 23:07:57.491971 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:07:57.491987 17829 solver.cpp:244]     Train net output #1: loss = 0.0888196 (* 1 = 0.0888196 loss)\nI0817 23:07:57.593250 17829 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0817 23:10:16.700803 17829 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0817 23:11:37.965672 17829 solver.cpp:404]     Test net output #0: accuracy = 0.63508\nI0817 23:11:37.965973 17829 solver.cpp:404]     Test net output #1: loss = 1.52562 (* 1 = 1.52562 loss)\nI0817 23:11:39.272938 17829 solver.cpp:228] Iteration 11600, loss = 0.0646362\nI0817 23:11:39.272996 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:11:39.273015 17829 solver.cpp:244]     Train net output #1: loss = 0.0646357 (* 1 = 0.0646357 loss)\nI0817 23:11:39.374959 17829 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0817 23:13:58.524351 17829 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0817 23:15:19.801319 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67\nI0817 23:15:19.801643 17829 solver.cpp:404]     Test net output #1: loss = 1.41339 (* 1 = 1.41339 loss)\nI0817 23:15:21.108567 17829 solver.cpp:228] Iteration 11700, loss = 0.108245\nI0817 23:15:21.108629 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 23:15:21.108645 17829 solver.cpp:244]     Train net output #1: loss = 0.108245 (* 1 = 0.108245 loss)\nI0817 23:15:21.215673 17829 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0817 23:17:40.678761 17829 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0817 23:19:01.919308 17829 solver.cpp:404]     Test net output #0: accuracy = 0.56552\nI0817 23:19:01.919585 17829 solver.cpp:404]     Test net output #1: loss = 1.97754 (* 1 = 1.97754 loss)\nI0817 23:19:03.226804 17829 solver.cpp:228] Iteration 11800, loss = 0.0768161\nI0817 23:19:03.226867 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:19:03.226884 17829 solver.cpp:244]     Train net output #1: loss = 0.0768156 (* 1 = 0.0768156 loss)\nI0817 23:19:03.332062 17829 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0817 23:21:22.699371 17829 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0817 23:22:43.953593 17829 solver.cpp:404]     Test net output #0: accuracy = 0.66588\nI0817 23:22:43.953887 17829 solver.cpp:404]     Test net output #1: loss = 1.38753 (* 1 = 1.38753 loss)\nI0817 23:22:45.261461 17829 solver.cpp:228] Iteration 11900, loss = 0.139012\nI0817 23:22:45.261525 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:22:45.261544 17829 solver.cpp:244]     Train net output #1: loss = 0.139012 (* 1 = 0.139012 loss)\nI0817 23:22:45.369928 17829 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0817 23:25:04.906596 17829 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0817 23:26:26.158349 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67748\nI0817 23:26:26.158686 17829 solver.cpp:404]     Test net output #1: loss = 1.32754 (* 1 = 1.32754 loss)\nI0817 23:26:27.466033 17829 solver.cpp:228] Iteration 12000, loss = 0.0547813\nI0817 23:26:27.466095 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:26:27.466114 17829 solver.cpp:244]     Train net output #1: loss = 0.0547809 (* 1 = 0.0547809 loss)\nI0817 23:26:27.566726 17829 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0817 23:28:46.782486 17829 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0817 23:30:08.029040 17829 solver.cpp:404]     Test net output #0: accuracy = 0.65816\nI0817 23:30:08.029341 17829 solver.cpp:404]     Test net output #1: loss = 1.54023 (* 1 = 1.54023 loss)\nI0817 23:30:09.336421 17829 solver.cpp:228] Iteration 12100, loss = 0.135948\nI0817 23:30:09.336483 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 23:30:09.336500 17829 solver.cpp:244]     Train net output #1: loss = 0.135948 (* 1 = 0.135948 loss)\nI0817 23:30:09.440798 17829 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0817 23:32:28.567559 17829 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0817 23:33:49.831465 17829 solver.cpp:404]     Test net output #0: accuracy = 0.68288\nI0817 23:33:49.831801 17829 solver.cpp:404]     Test net output #1: loss = 1.39735 (* 1 = 1.39735 loss)\nI0817 23:33:51.139711 17829 solver.cpp:228] Iteration 12200, loss = 0.0513539\nI0817 23:33:51.139773 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:33:51.139791 17829 solver.cpp:244]     Train net output #1: loss = 0.0513534 (* 1 = 0.0513534 loss)\nI0817 23:33:51.243544 17829 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0817 23:36:10.509433 17829 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0817 23:37:31.774518 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6366\nI0817 23:37:31.774845 17829 solver.cpp:404]     Test net output #1: loss = 1.60055 (* 1 = 1.60055 loss)\nI0817 23:37:33.080898 17829 solver.cpp:228] Iteration 12300, loss = 0.103967\nI0817 23:37:33.080960 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:37:33.080977 17829 solver.cpp:244]     Train net output #1: loss = 0.103966 (* 1 = 0.103966 loss)\nI0817 23:37:33.185349 17829 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0817 23:39:52.390413 17829 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0817 23:41:13.618587 17829 solver.cpp:404]     Test net output #0: accuracy = 0.66096\nI0817 23:41:13.618863 17829 solver.cpp:404]     Test net output #1: loss = 1.59908 (* 1 = 1.59908 loss)\nI0817 23:41:14.925654 17829 solver.cpp:228] Iteration 12400, loss = 0.142507\nI0817 23:41:14.925719 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:41:14.925736 17829 solver.cpp:244]     Train net output #1: loss = 0.142506 (* 1 = 0.142506 loss)\nI0817 23:41:15.031978 17829 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0817 23:43:34.252172 17829 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0817 23:44:55.483448 17829 solver.cpp:404]     Test net output #0: accuracy = 0.66668\nI0817 23:44:55.483710 17829 solver.cpp:404]     Test net output #1: loss = 1.46851 (* 1 = 1.46851 loss)\nI0817 23:44:56.790365 17829 solver.cpp:228] Iteration 12500, loss = 0.0854017\nI0817 23:44:56.790428 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:44:56.790446 17829 solver.cpp:244]     Train net output #1: loss = 0.0854013 (* 1 = 0.0854013 loss)\nI0817 23:44:56.895269 17829 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0817 23:47:16.083261 17829 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0817 23:48:37.280275 17829 solver.cpp:404]     Test net output #0: accuracy = 0.64844\nI0817 23:48:37.280591 17829 solver.cpp:404]     Test net output #1: loss = 1.66711 (* 1 = 1.66711 loss)\nI0817 23:48:38.588565 17829 solver.cpp:228] Iteration 12600, loss = 0.0464312\nI0817 23:48:38.588629 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:48:38.588646 17829 solver.cpp:244]     Train net output #1: loss = 0.0464308 (* 1 = 0.0464308 loss)\nI0817 23:48:38.690033 17829 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0817 23:50:58.185343 17829 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0817 23:52:19.392804 17829 solver.cpp:404]     Test net output #0: accuracy = 0.60672\nI0817 23:52:19.393108 17829 solver.cpp:404]     Test net output #1: loss = 2.06606 (* 1 = 2.06606 loss)\nI0817 23:52:20.700778 17829 solver.cpp:228] Iteration 12700, loss = 0.147218\nI0817 23:52:20.700839 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:52:20.700857 17829 solver.cpp:244]     Train net output #1: loss = 0.147217 (* 1 = 0.147217 loss)\nI0817 23:52:20.799253 17829 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0817 23:54:40.026538 17829 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0817 23:56:01.286286 17829 solver.cpp:404]     Test net output #0: accuracy = 0.64576\nI0817 23:56:01.286587 17829 solver.cpp:404]     Test net output #1: loss = 1.46872 (* 1 = 1.46872 loss)\nI0817 23:56:02.594566 17829 solver.cpp:228] Iteration 12800, loss = 0.0485153\nI0817 23:56:02.594621 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:56:02.594640 17829 solver.cpp:244]     Train net output #1: loss = 0.0485149 (* 1 = 0.0485149 loss)\nI0817 23:56:02.695019 17829 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0817 23:58:21.847059 17829 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0817 23:59:43.098702 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70864\nI0817 23:59:43.099026 17829 solver.cpp:404]     Test net output #1: loss = 1.17899 (* 1 = 1.17899 loss)\nI0817 23:59:44.405706 17829 solver.cpp:228] Iteration 12900, loss = 0.176973\nI0817 23:59:44.405763 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 23:59:44.405779 17829 solver.cpp:244]     Train net output #1: loss = 0.176972 (* 1 = 0.176972 loss)\nI0817 23:59:44.506566 17829 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0818 00:02:02.959802 17829 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 00:03:24.240525 17829 solver.cpp:404]     Test net output #0: accuracy = 0.63664\nI0818 00:03:24.240816 17829 solver.cpp:404]     Test net output #1: loss = 1.68646 (* 1 = 1.68646 loss)\nI0818 00:03:25.548604 17829 solver.cpp:228] Iteration 13000, loss = 0.141708\nI0818 00:03:25.548660 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 00:03:25.548676 17829 solver.cpp:244]     Train net output #1: loss = 0.141707 (* 1 = 0.141707 loss)\nI0818 00:03:25.646934 17829 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0818 00:05:44.124570 17829 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 00:07:05.374171 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69108\nI0818 00:07:05.374487 17829 solver.cpp:404]     Test net output #1: loss = 1.29619 (* 1 = 1.29619 loss)\nI0818 00:07:06.682042 17829 solver.cpp:228] Iteration 13100, loss = 0.112416\nI0818 00:07:06.682099 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:07:06.682116 17829 solver.cpp:244]     Train net output #1: loss = 0.112416 (* 1 = 0.112416 loss)\nI0818 00:07:06.776913 17829 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0818 00:09:25.218094 17829 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 00:10:46.463877 17829 solver.cpp:404]     Test net output #0: accuracy = 0.65372\nI0818 00:10:46.464169 17829 solver.cpp:404]     Test net output #1: loss = 1.56954 (* 1 = 1.56954 loss)\nI0818 00:10:47.771849 17829 solver.cpp:228] Iteration 13200, loss = 0.0744585\nI0818 00:10:47.771904 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:10:47.771921 17829 solver.cpp:244]     Train net output #1: loss = 0.0744581 (* 1 = 0.0744581 loss)\nI0818 00:10:47.865475 17829 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0818 00:13:06.326061 17829 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 00:14:27.589612 17829 solver.cpp:404]     Test net output #0: accuracy = 0.5516\nI0818 00:14:27.589933 17829 solver.cpp:404]     Test net output #1: loss = 2.9193 (* 1 = 2.9193 loss)\nI0818 00:14:28.897970 17829 solver.cpp:228] Iteration 13300, loss = 0.099182\nI0818 00:14:28.898026 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:14:28.898044 17829 solver.cpp:244]     Train net output #1: loss = 0.0991817 (* 1 = 0.0991817 loss)\nI0818 00:14:28.993769 17829 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0818 00:16:47.428431 17829 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 00:18:08.692446 17829 solver.cpp:404]     Test net output #0: accuracy = 0.52352\nI0818 00:18:08.692777 17829 solver.cpp:404]     Test net output #1: loss = 3.23294 (* 1 = 3.23294 loss)\nI0818 00:18:09.999488 17829 solver.cpp:228] Iteration 13400, loss = 0.0959374\nI0818 00:18:09.999547 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:18:09.999565 17829 solver.cpp:244]     Train net output #1: loss = 0.095937 (* 1 = 0.095937 loss)\nI0818 00:18:10.094214 17829 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0818 00:20:28.537344 17829 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 00:21:49.800057 17829 solver.cpp:404]     Test net output #0: accuracy = 0.61724\nI0818 00:21:49.800356 17829 solver.cpp:404]     Test net output #1: loss = 1.88787 (* 1 = 1.88787 loss)\nI0818 00:21:51.106932 17829 solver.cpp:228] Iteration 13500, loss = 0.0731479\nI0818 00:21:51.106989 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:21:51.107007 17829 solver.cpp:244]     Train net output #1: loss = 0.0731475 (* 1 = 0.0731475 loss)\nI0818 00:21:51.210168 17829 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0818 00:24:09.713645 17829 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 00:25:30.977147 17829 solver.cpp:404]     Test net output #0: accuracy = 0.58944\nI0818 00:25:30.977475 17829 solver.cpp:404]     Test net output #1: loss = 1.98745 (* 1 = 1.98745 loss)\nI0818 00:25:32.285576 17829 solver.cpp:228] Iteration 13600, loss = 0.0246335\nI0818 00:25:32.285635 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:25:32.285652 17829 solver.cpp:244]     Train net output #1: loss = 0.0246331 (* 1 = 0.0246331 loss)\nI0818 00:25:32.380867 17829 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0818 00:27:50.868729 17829 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 00:29:12.137739 17829 solver.cpp:404]     Test net output #0: accuracy = 0.59268\nI0818 00:29:12.138037 17829 solver.cpp:404]     Test net output #1: loss = 2.3083 (* 1 = 2.3083 loss)\nI0818 00:29:13.444228 17829 solver.cpp:228] Iteration 13700, loss = 0.108582\nI0818 00:29:13.444279 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:29:13.444296 17829 solver.cpp:244]     Train net output #1: loss = 0.108582 (* 1 = 0.108582 loss)\nI0818 00:29:13.547773 17829 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0818 00:31:32.039791 17829 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 00:32:53.304859 17829 solver.cpp:404]     Test net output #0: accuracy = 0.702\nI0818 00:32:53.305176 17829 solver.cpp:404]     Test net output #1: loss = 1.3171 (* 1 = 1.3171 loss)\nI0818 00:32:54.611769 17829 solver.cpp:228] Iteration 13800, loss = 0.0619374\nI0818 00:32:54.611824 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:32:54.611841 17829 solver.cpp:244]     Train net output #1: loss = 0.0619371 (* 1 = 0.0619371 loss)\nI0818 00:32:54.710199 17829 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0818 00:35:13.238174 17829 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 00:36:34.370944 17829 solver.cpp:404]     Test net output #0: accuracy = 0.62484\nI0818 00:36:34.371245 17829 solver.cpp:404]     Test net output #1: loss = 1.88267 (* 1 = 1.88267 loss)\nI0818 00:36:35.678264 17829 solver.cpp:228] Iteration 13900, loss = 0.0798879\nI0818 00:36:35.678323 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:36:35.678340 17829 solver.cpp:244]     Train net output #1: loss = 0.0798875 (* 1 = 0.0798875 loss)\nI0818 00:36:35.781532 17829 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0818 00:38:54.319099 17829 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 00:40:15.258466 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70264\nI0818 00:40:15.258780 17829 solver.cpp:404]     Test net output #1: loss = 1.25082 (* 1 = 1.25082 loss)\nI0818 00:40:16.565526 17829 solver.cpp:228] Iteration 14000, loss = 0.123582\nI0818 00:40:16.565582 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:40:16.565599 17829 solver.cpp:244]     Train net output #1: loss = 0.123582 (* 1 = 0.123582 loss)\nI0818 00:40:16.660692 17829 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 00:42:35.167320 17829 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 00:43:55.884690 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7\nI0818 00:43:55.884943 17829 solver.cpp:404]     Test net output #1: loss = 1.18205 (* 1 = 1.18205 loss)\nI0818 00:43:57.191289 17829 solver.cpp:228] Iteration 14100, loss = 0.10791\nI0818 00:43:57.191349 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:43:57.191366 17829 solver.cpp:244]     Train net output #1: loss = 0.10791 (* 1 = 0.10791 loss)\nI0818 00:43:57.285116 17829 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0818 00:46:15.771646 17829 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 00:47:36.333626 17829 solver.cpp:404]     Test net output #0: accuracy = 0.54488\nI0818 00:47:36.333894 17829 solver.cpp:404]     Test net output #1: loss = 2.89638 (* 1 = 2.89638 loss)\nI0818 00:47:37.640611 17829 solver.cpp:228] Iteration 14200, loss = 0.0688706\nI0818 00:47:37.640669 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:47:37.640687 17829 solver.cpp:244]     Train net output #1: loss = 0.0688703 (* 1 = 0.0688703 loss)\nI0818 00:47:37.742664 17829 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0818 00:49:56.319547 17829 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 00:51:16.931612 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67484\nI0818 00:51:16.931927 17829 solver.cpp:404]     Test net output #1: loss = 1.45952 (* 1 = 1.45952 loss)\nI0818 00:51:18.238060 17829 solver.cpp:228] Iteration 14300, loss = 0.0408217\nI0818 00:51:18.238117 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:51:18.238135 17829 solver.cpp:244]     Train net output #1: loss = 0.0408213 (* 1 = 0.0408213 loss)\nI0818 00:51:18.339648 17829 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0818 00:53:36.821436 17829 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 00:54:58.096834 17829 solver.cpp:404]     Test net output #0: accuracy = 0.65644\nI0818 00:54:58.097153 17829 solver.cpp:404]     Test net output #1: loss = 1.58613 (* 1 = 1.58613 loss)\nI0818 00:54:59.405372 17829 solver.cpp:228] Iteration 14400, loss = 0.111611\nI0818 00:54:59.405432 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:54:59.405449 17829 solver.cpp:244]     Train net output #1: loss = 0.11161 (* 1 = 0.11161 loss)\nI0818 00:54:59.502843 17829 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0818 00:57:18.040140 17829 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 00:58:39.282279 17829 solver.cpp:404]     Test net output #0: accuracy = 0.5902\nI0818 00:58:39.282590 17829 solver.cpp:404]     Test net output #1: loss = 2.28118 (* 1 = 2.28118 loss)\nI0818 00:58:40.588412 17829 solver.cpp:228] Iteration 14500, loss = 0.0575773\nI0818 00:58:40.588467 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:58:40.588485 17829 solver.cpp:244]     Train net output #1: loss = 0.0575769 (* 1 = 0.0575769 loss)\nI0818 00:58:40.691467 17829 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0818 01:00:59.254891 17829 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 01:02:20.302039 17829 solver.cpp:404]     Test net output #0: accuracy = 0.63716\nI0818 01:02:20.302347 17829 solver.cpp:404]     Test net output #1: loss = 1.82575 (* 1 = 1.82575 loss)\nI0818 01:02:21.608932 17829 solver.cpp:228] Iteration 14600, loss = 0.0828514\nI0818 01:02:21.608989 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:02:21.609005 17829 solver.cpp:244]     Train net output #1: loss = 0.082851 (* 1 = 0.082851 loss)\nI0818 01:02:21.704720 17829 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0818 01:04:40.171196 17829 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 01:06:01.419502 17829 solver.cpp:404]     Test net output #0: accuracy = 0.64668\nI0818 01:06:01.419807 17829 solver.cpp:404]     Test net output #1: loss = 2.10185 (* 1 = 2.10185 loss)\nI0818 01:06:02.725985 17829 solver.cpp:228] Iteration 14700, loss = 0.109298\nI0818 01:06:02.726030 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:06:02.726047 17829 solver.cpp:244]     Train net output #1: loss = 0.109298 (* 1 = 0.109298 loss)\nI0818 01:06:02.818403 17829 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0818 01:08:21.317195 17829 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 01:09:42.587396 17829 solver.cpp:404]     Test net output #0: accuracy = 0.68024\nI0818 01:09:42.587714 17829 solver.cpp:404]     Test net output #1: loss = 1.55003 (* 1 = 1.55003 loss)\nI0818 01:09:43.897246 17829 solver.cpp:228] Iteration 14800, loss = 0.0720578\nI0818 01:09:43.897302 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:09:43.897320 17829 solver.cpp:244]     Train net output #1: loss = 0.0720574 (* 1 = 0.0720574 loss)\nI0818 01:09:43.994666 17829 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0818 01:12:02.627621 17829 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 01:13:23.706941 17829 solver.cpp:404]     Test net output #0: accuracy = 0.62984\nI0818 01:13:23.707263 17829 solver.cpp:404]     Test net output #1: loss = 2.19736 (* 1 = 2.19736 loss)\nI0818 01:13:25.016556 17829 solver.cpp:228] Iteration 14900, loss = 0.0804854\nI0818 01:13:25.016613 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:13:25.016631 17829 solver.cpp:244]     Train net output #1: loss = 0.0804851 (* 1 = 0.0804851 loss)\nI0818 01:13:25.110713 17829 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0818 01:15:43.528069 17829 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 01:17:04.691341 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6834\nI0818 01:17:04.691655 17829 solver.cpp:404]     Test net output #1: loss = 1.41557 (* 1 = 1.41557 loss)\nI0818 01:17:06.000443 17829 solver.cpp:228] Iteration 15000, loss = 0.0625123\nI0818 01:17:06.000496 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:17:06.000514 17829 solver.cpp:244]     Train net output #1: loss = 0.062512 (* 1 = 0.062512 loss)\nI0818 01:17:06.100818 17829 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0818 01:19:24.710407 17829 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 01:20:45.785239 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69156\nI0818 01:20:45.785531 17829 solver.cpp:404]     Test net output #1: loss = 1.33599 (* 1 = 1.33599 loss)\nI0818 01:20:47.094862 17829 solver.cpp:228] Iteration 15100, loss = 0.0710944\nI0818 01:20:47.094919 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:20:47.094936 17829 solver.cpp:244]     Train net output #1: loss = 0.0710941 (* 1 = 0.0710941 loss)\nI0818 01:20:47.193845 17829 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0818 01:23:05.763931 17829 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 01:24:26.760598 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7004\nI0818 01:24:26.760888 17829 solver.cpp:404]     Test net output #1: loss = 1.4682 (* 1 = 1.4682 loss)\nI0818 01:24:28.069865 17829 solver.cpp:228] Iteration 15200, loss = 0.0772258\nI0818 01:24:28.069923 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:24:28.069941 17829 solver.cpp:244]     Train net output #1: loss = 0.0772254 (* 1 = 0.0772254 loss)\nI0818 01:24:28.170722 17829 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0818 01:26:46.568450 17829 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 01:28:07.778247 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72188\nI0818 01:28:07.778542 17829 solver.cpp:404]     Test net output #1: loss = 1.31329 (* 1 = 1.31329 loss)\nI0818 01:28:09.088275 17829 solver.cpp:228] Iteration 15300, loss = 0.0546173\nI0818 01:28:09.088330 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:28:09.088347 17829 solver.cpp:244]     Train net output #1: loss = 0.0546169 (* 1 = 0.0546169 loss)\nI0818 01:28:09.182432 17829 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0818 01:30:27.567317 17829 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 01:31:48.783179 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70308\nI0818 01:31:48.783486 17829 solver.cpp:404]     Test net output #1: loss = 1.23938 (* 1 = 1.23938 loss)\nI0818 01:31:50.092496 17829 solver.cpp:228] Iteration 15400, loss = 0.0863986\nI0818 01:31:50.092556 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:31:50.092572 17829 solver.cpp:244]     Train net output #1: loss = 0.0863982 (* 1 = 0.0863982 loss)\nI0818 01:31:50.192268 17829 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0818 01:34:08.723742 17829 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 01:35:29.942701 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70852\nI0818 01:35:29.942991 17829 solver.cpp:404]     Test net output #1: loss = 1.23759 (* 1 = 1.23759 loss)\nI0818 01:35:31.252398 17829 solver.cpp:228] Iteration 15500, loss = 0.0258162\nI0818 01:35:31.252452 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:35:31.252470 17829 solver.cpp:244]     Train net output #1: loss = 0.0258159 (* 1 = 0.0258159 loss)\nI0818 01:35:31.346985 17829 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0818 01:37:49.773339 17829 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 01:39:10.940387 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67132\nI0818 01:39:10.940681 17829 solver.cpp:404]     Test net output #1: loss = 1.6867 (* 1 = 1.6867 loss)\nI0818 01:39:12.250185 17829 solver.cpp:228] Iteration 15600, loss = 0.0624947\nI0818 01:39:12.250237 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:39:12.250255 17829 solver.cpp:244]     Train net output #1: loss = 0.0624944 (* 1 = 0.0624944 loss)\nI0818 01:39:12.347229 17829 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0818 01:41:30.889009 17829 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 01:42:52.071175 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69208\nI0818 01:42:52.071471 17829 solver.cpp:404]     Test net output #1: loss = 1.46882 (* 1 = 1.46882 loss)\nI0818 01:42:53.381242 17829 solver.cpp:228] Iteration 15700, loss = 0.132939\nI0818 01:42:53.381297 17829 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 01:42:53.381314 17829 solver.cpp:244]     Train net output #1: loss = 0.132938 (* 1 = 0.132938 loss)\nI0818 01:42:53.477715 17829 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0818 01:45:12.023636 17829 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 01:46:32.918777 17829 solver.cpp:404]     Test net output #0: accuracy = 0.68824\nI0818 01:46:32.919095 17829 solver.cpp:404]     Test net output #1: loss = 1.56938 (* 1 = 1.56938 loss)\nI0818 01:46:34.227918 17829 solver.cpp:228] Iteration 15800, loss = 0.118136\nI0818 01:46:34.227980 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:46:34.227998 17829 solver.cpp:244]     Train net output #1: loss = 0.118136 (* 1 = 0.118136 loss)\nI0818 01:46:34.327728 17829 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0818 01:48:52.744866 17829 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 01:50:14.019619 17829 solver.cpp:404]     Test net output #0: accuracy = 0.66248\nI0818 01:50:14.019933 17829 solver.cpp:404]     Test net output #1: loss = 1.66813 (* 1 = 1.66813 loss)\nI0818 01:50:15.329351 17829 solver.cpp:228] Iteration 15900, loss = 0.0254671\nI0818 01:50:15.329407 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:50:15.329426 17829 solver.cpp:244]     Train net output #1: loss = 0.0254668 (* 1 = 0.0254668 loss)\nI0818 01:50:15.422217 17829 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0818 01:52:33.872853 17829 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 01:53:55.161312 17829 solver.cpp:404]     Test net output #0: accuracy = 0.61928\nI0818 01:53:55.161618 17829 solver.cpp:404]     Test net output #1: loss = 2.0657 (* 1 = 2.0657 loss)\nI0818 01:53:56.471261 17829 solver.cpp:228] Iteration 16000, loss = 0.0659493\nI0818 01:53:56.471320 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:53:56.471339 17829 solver.cpp:244]     Train net output #1: loss = 0.065949 (* 1 = 0.065949 loss)\nI0818 01:53:56.570835 17829 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0818 01:56:15.116006 17829 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 01:57:36.404037 17829 solver.cpp:404]     Test net output #0: accuracy = 0.4228\nI0818 01:57:36.404345 17829 solver.cpp:404]     Test net output #1: loss = 3.60207 (* 1 = 3.60207 loss)\nI0818 01:57:37.713752 17829 solver.cpp:228] Iteration 16100, loss = 0.192615\nI0818 01:57:37.713809 17829 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 01:57:37.713826 17829 solver.cpp:244]     Train net output #1: loss = 0.192614 (* 1 = 0.192614 loss)\nI0818 01:57:37.811777 17829 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0818 01:59:56.230912 17829 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 02:01:17.511390 17829 solver.cpp:404]     Test net output #0: accuracy = 0.61812\nI0818 02:01:17.511726 17829 solver.cpp:404]     Test net output #1: loss = 2.02093 (* 1 = 2.02093 loss)\nI0818 02:01:18.821683 17829 solver.cpp:228] Iteration 16200, loss = 0.120629\nI0818 02:01:18.821737 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:01:18.821753 17829 solver.cpp:244]     Train net output #1: loss = 0.120629 (* 1 = 0.120629 loss)\nI0818 02:01:18.918170 17829 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0818 02:03:37.353147 17829 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 02:04:58.628446 17829 solver.cpp:404]     Test net output #0: accuracy = 0.59868\nI0818 02:04:58.628782 17829 solver.cpp:404]     Test net output #1: loss = 2.27147 (* 1 = 2.27147 loss)\nI0818 02:04:59.938235 17829 solver.cpp:228] Iteration 16300, loss = 0.172284\nI0818 02:04:59.938287 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 02:04:59.938304 17829 solver.cpp:244]     Train net output #1: loss = 0.172284 (* 1 = 0.172284 loss)\nI0818 02:05:00.034230 17829 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0818 02:07:18.464277 17829 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 02:08:39.735440 17829 solver.cpp:404]     Test net output #0: accuracy = 0.66428\nI0818 02:08:39.735781 17829 solver.cpp:404]     Test net output #1: loss = 1.51141 (* 1 = 1.51141 loss)\nI0818 02:08:41.044975 17829 solver.cpp:228] Iteration 16400, loss = 0.0933695\nI0818 02:08:41.045028 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:08:41.045045 17829 solver.cpp:244]     Train net output #1: loss = 0.0933692 (* 1 = 0.0933692 loss)\nI0818 02:08:41.139482 17829 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0818 02:10:59.777397 17829 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0818 02:12:21.019589 17829 solver.cpp:404]     Test net output #0: accuracy = 0.62\nI0818 02:12:21.019883 17829 solver.cpp:404]     Test net output #1: loss = 2.32305 (* 1 = 2.32305 loss)\nI0818 02:12:22.329555 17829 solver.cpp:228] Iteration 16500, loss = 0.210209\nI0818 02:12:22.329604 17829 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 02:12:22.329622 17829 solver.cpp:244]     Train net output #1: loss = 0.210209 (* 1 = 0.210209 loss)\nI0818 02:12:22.422729 17829 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0818 02:14:40.886278 17829 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0818 02:16:02.145026 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70284\nI0818 02:16:02.145331 17829 solver.cpp:404]     Test net output #1: loss = 1.46803 (* 1 = 1.46803 loss)\nI0818 02:16:03.454391 17829 solver.cpp:228] Iteration 16600, loss = 0.155225\nI0818 02:16:03.454445 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 02:16:03.454463 17829 solver.cpp:244]     Train net output #1: loss = 0.155225 (* 1 = 0.155225 loss)\nI0818 02:16:03.553995 17829 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0818 02:18:22.137311 17829 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0818 02:19:43.394028 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71592\nI0818 02:19:43.394357 17829 solver.cpp:404]     Test net output #1: loss = 1.38952 (* 1 = 1.38952 loss)\nI0818 02:19:44.703570 17829 solver.cpp:228] Iteration 16700, loss = 0.0648943\nI0818 02:19:44.703626 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:19:44.703642 17829 solver.cpp:244]     Train net output #1: loss = 0.064894 (* 1 = 0.064894 loss)\nI0818 02:19:44.799229 17829 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0818 02:22:03.249663 17829 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0818 02:23:24.508170 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7064\nI0818 02:23:24.508488 17829 solver.cpp:404]     Test net output #1: loss = 1.45439 (* 1 = 1.45439 loss)\nI0818 02:23:25.819084 17829 solver.cpp:228] Iteration 16800, loss = 0.0985215\nI0818 02:23:25.819141 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 02:23:25.819157 17829 solver.cpp:244]     Train net output #1: loss = 0.0985212 (* 1 = 0.0985212 loss)\nI0818 02:23:25.913954 17829 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0818 02:25:44.426657 17829 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0818 02:27:05.688634 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70556\nI0818 02:27:05.688961 17829 solver.cpp:404]     Test net output #1: loss = 1.39367 (* 1 = 1.39367 loss)\nI0818 02:27:06.999203 17829 solver.cpp:228] Iteration 16900, loss = 0.0511572\nI0818 02:27:06.999260 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:27:06.999277 17829 solver.cpp:244]     Train net output #1: loss = 0.0511569 (* 1 = 0.0511569 loss)\nI0818 02:27:07.099720 17829 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0818 02:29:25.653512 17829 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0818 02:30:46.922354 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70364\nI0818 02:30:46.922685 17829 solver.cpp:404]     Test net output #1: loss = 1.37934 (* 1 = 1.37934 loss)\nI0818 02:30:48.233855 17829 solver.cpp:228] Iteration 17000, loss = 0.0610127\nI0818 02:30:48.233911 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:30:48.233928 17829 solver.cpp:244]     Train net output #1: loss = 0.0610124 (* 1 = 0.0610124 loss)\nI0818 02:30:48.330008 17829 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0818 02:33:06.775625 17829 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0818 02:34:27.863183 17829 solver.cpp:404]     Test net output #0: accuracy = 0.53492\nI0818 02:34:27.863451 17829 solver.cpp:404]     Test net output #1: loss = 3.13731 (* 1 = 3.13731 loss)\nI0818 02:34:29.173837 17829 solver.cpp:228] Iteration 17100, loss = 0.0217282\nI0818 02:34:29.173897 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:34:29.173913 17829 solver.cpp:244]     Train net output #1: loss = 0.021728 (* 1 = 0.021728 loss)\nI0818 02:34:29.271013 17829 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0818 02:36:47.722326 17829 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0818 02:38:08.591060 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70964\nI0818 02:38:08.591302 17829 solver.cpp:404]     Test net output #1: loss = 1.61812 (* 1 = 1.61812 loss)\nI0818 02:38:09.900774 17829 solver.cpp:228] Iteration 17200, loss = 0.0453736\nI0818 02:38:09.900835 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:38:09.900852 17829 solver.cpp:244]     Train net output #1: loss = 0.0453734 (* 1 = 0.0453734 loss)\nI0818 02:38:09.998946 17829 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0818 02:40:28.465145 17829 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0818 02:41:49.449847 17829 solver.cpp:404]     Test net output #0: accuracy = 0.63236\nI0818 02:41:49.450091 17829 solver.cpp:404]     Test net output #1: loss = 2.3484 (* 1 = 2.3484 loss)\nI0818 02:41:50.759166 17829 solver.cpp:228] Iteration 17300, loss = 0.123227\nI0818 02:41:50.759225 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 02:41:50.759241 17829 solver.cpp:244]     Train net output #1: loss = 0.123227 (* 1 = 0.123227 loss)\nI0818 02:41:50.853543 17829 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0818 02:44:09.312619 17829 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0818 02:45:30.511893 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7242\nI0818 02:45:30.512151 17829 solver.cpp:404]     Test net output #1: loss = 1.42316 (* 1 = 1.42316 loss)\nI0818 02:45:31.822943 17829 solver.cpp:228] Iteration 17400, loss = 0.0412785\nI0818 02:45:31.823004 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:45:31.823020 17829 solver.cpp:244]     Train net output #1: loss = 0.0412783 (* 1 = 0.0412783 loss)\nI0818 02:45:31.916893 17829 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0818 02:47:50.383255 17829 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0818 02:49:11.617707 17829 solver.cpp:404]     Test net output #0: accuracy = 0.65404\nI0818 02:49:11.617957 17829 solver.cpp:404]     Test net output #1: loss = 2.03629 (* 1 = 2.03629 loss)\nI0818 02:49:12.929070 17829 solver.cpp:228] Iteration 17500, loss = 0.0634083\nI0818 02:49:12.929126 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:49:12.929142 17829 solver.cpp:244]     Train net output #1: loss = 0.0634081 (* 1 = 0.0634081 loss)\nI0818 02:49:13.022359 17829 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0818 02:51:31.434586 17829 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0818 02:52:52.648227 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73088\nI0818 02:52:52.648535 17829 solver.cpp:404]     Test net output #1: loss = 1.30747 (* 1 = 1.30747 loss)\nI0818 02:52:53.959550 17829 solver.cpp:228] Iteration 17600, loss = 0.0777847\nI0818 02:52:53.959607 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:52:53.959625 17829 solver.cpp:244]     Train net output #1: loss = 0.0777845 (* 1 = 0.0777845 loss)\nI0818 02:52:54.056983 17829 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0818 02:55:12.495749 17829 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0818 02:56:33.738097 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67368\nI0818 02:56:33.738363 17829 solver.cpp:404]     Test net output #1: loss = 1.82247 (* 1 = 1.82247 loss)\nI0818 02:56:35.049239 17829 solver.cpp:228] Iteration 17700, loss = 0.0750997\nI0818 02:56:35.049298 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 02:56:35.049314 17829 solver.cpp:244]     Train net output #1: loss = 0.0750996 (* 1 = 0.0750996 loss)\nI0818 02:56:35.146834 17829 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0818 02:58:53.612133 17829 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0818 03:00:14.840366 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70052\nI0818 03:00:14.840622 17829 solver.cpp:404]     Test net output #1: loss = 1.62316 (* 1 = 1.62316 loss)\nI0818 03:00:16.150221 17829 solver.cpp:228] Iteration 17800, loss = 0.143965\nI0818 03:00:16.150277 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 03:00:16.150295 17829 solver.cpp:244]     Train net output #1: loss = 0.143965 (* 1 = 0.143965 loss)\nI0818 03:00:16.248111 17829 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0818 03:02:34.691361 17829 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0818 03:03:55.672531 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71188\nI0818 03:03:55.672788 17829 solver.cpp:404]     Test net output #1: loss = 1.50247 (* 1 = 1.50247 loss)\nI0818 03:03:56.981919 17829 solver.cpp:228] Iteration 17900, loss = 0.077329\nI0818 03:03:56.981976 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:03:56.981992 17829 solver.cpp:244]     Train net output #1: loss = 0.0773289 (* 1 = 0.0773289 loss)\nI0818 03:03:57.075593 17829 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0818 03:06:15.513842 17829 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0818 03:07:36.701351 17829 solver.cpp:404]     Test net output #0: accuracy = 0.57032\nI0818 03:07:36.701594 17829 solver.cpp:404]     Test net output #1: loss = 2.66101 (* 1 = 2.66101 loss)\nI0818 03:07:38.010877 17829 solver.cpp:228] Iteration 18000, loss = 0.0470882\nI0818 03:07:38.010932 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:07:38.010951 17829 solver.cpp:244]     Train net output #1: loss = 0.047088 (* 1 = 0.047088 loss)\nI0818 03:07:38.107074 17829 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0818 03:09:56.535467 17829 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0818 03:11:17.728849 17829 solver.cpp:404]     Test net output #0: accuracy = 0.745\nI0818 03:11:17.729110 17829 solver.cpp:404]     Test net output #1: loss = 1.14198 (* 1 = 1.14198 loss)\nI0818 03:11:19.038774 17829 solver.cpp:228] Iteration 18100, loss = 0.124427\nI0818 03:11:19.038830 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:11:19.038847 17829 solver.cpp:244]     Train net output #1: loss = 0.124427 (* 1 = 0.124427 loss)\nI0818 03:11:19.137756 17829 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0818 03:13:37.697875 17829 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0818 03:14:58.903764 17829 solver.cpp:404]     Test net output #0: accuracy = 0.66944\nI0818 03:14:58.904049 17829 solver.cpp:404]     Test net output #1: loss = 1.52775 (* 1 = 1.52775 loss)\nI0818 03:15:00.214495 17829 solver.cpp:228] Iteration 18200, loss = 0.130306\nI0818 03:15:00.214558 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 03:15:00.214576 17829 solver.cpp:244]     Train net output #1: loss = 0.130306 (* 1 = 0.130306 loss)\nI0818 03:15:00.306197 17829 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0818 03:17:18.739486 17829 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0818 03:18:40.017642 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72144\nI0818 03:18:40.017880 17829 solver.cpp:404]     Test net output #1: loss = 1.35352 (* 1 = 1.35352 loss)\nI0818 03:18:41.328301 17829 solver.cpp:228] Iteration 18300, loss = 0.0869123\nI0818 03:18:41.328359 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:18:41.328377 17829 solver.cpp:244]     Train net output #1: loss = 0.0869122 (* 1 = 0.0869122 loss)\nI0818 03:18:41.423387 17829 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0818 03:20:59.919977 17829 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0818 03:22:21.042441 17829 solver.cpp:404]     Test net output #0: accuracy = 0.64072\nI0818 03:22:21.042695 17829 solver.cpp:404]     Test net output #1: loss = 1.72675 (* 1 = 1.72675 loss)\nI0818 03:22:22.352210 17829 solver.cpp:228] Iteration 18400, loss = 0.154438\nI0818 03:22:22.352269 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 03:22:22.352286 17829 solver.cpp:244]     Train net output #1: loss = 0.154438 (* 1 = 0.154438 loss)\nI0818 03:22:22.449574 17829 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0818 03:24:40.888320 17829 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0818 03:26:01.896538 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74176\nI0818 03:26:01.896767 17829 solver.cpp:404]     Test net output #1: loss = 1.23848 (* 1 = 1.23848 loss)\nI0818 03:26:03.206972 17829 solver.cpp:228] Iteration 18500, loss = 0.0609903\nI0818 03:26:03.207031 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:26:03.207047 17829 solver.cpp:244]     Train net output #1: loss = 0.0609901 (* 1 = 0.0609901 loss)\nI0818 03:26:03.299295 17829 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0818 03:28:21.815084 17829 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0818 03:29:42.613932 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71088\nI0818 03:29:42.614197 17829 solver.cpp:404]     Test net output #1: loss = 1.44679 (* 1 = 1.44679 loss)\nI0818 03:29:43.924952 17829 solver.cpp:228] Iteration 18600, loss = 0.0455625\nI0818 03:29:43.925011 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:29:43.925029 17829 solver.cpp:244]     Train net output #1: loss = 0.0455623 (* 1 = 0.0455623 loss)\nI0818 03:29:44.019547 17829 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0818 03:32:02.423722 17829 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0818 03:33:23.518198 17829 solver.cpp:404]     Test net output #0: accuracy = 0.60736\nI0818 03:33:23.518504 17829 solver.cpp:404]     Test net output #1: loss = 2.42438 (* 1 = 2.42438 loss)\nI0818 03:33:24.828037 17829 solver.cpp:228] Iteration 18700, loss = 0.123472\nI0818 03:33:24.828099 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 03:33:24.828116 17829 solver.cpp:244]     Train net output #1: loss = 0.123472 (* 1 = 0.123472 loss)\nI0818 03:33:24.925048 17829 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0818 03:35:43.371822 17829 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0818 03:37:04.370808 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6748\nI0818 03:37:04.371055 17829 solver.cpp:404]     Test net output #1: loss = 1.68703 (* 1 = 1.68703 loss)\nI0818 03:37:05.681557 17829 solver.cpp:228] Iteration 18800, loss = 0.0813895\nI0818 03:37:05.681619 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 03:37:05.681638 17829 solver.cpp:244]     Train net output #1: loss = 0.0813894 (* 1 = 0.0813894 loss)\nI0818 03:37:05.769698 17829 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0818 03:39:24.193207 17829 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0818 03:40:45.407471 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72216\nI0818 03:40:45.407781 17829 solver.cpp:404]     Test net output #1: loss = 1.27303 (* 1 = 1.27303 loss)\nI0818 03:40:46.717430 17829 solver.cpp:228] Iteration 18900, loss = 0.0606649\nI0818 03:40:46.717494 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:40:46.717511 17829 solver.cpp:244]     Train net output #1: loss = 0.0606647 (* 1 = 0.0606647 loss)\nI0818 03:40:46.812381 17829 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0818 03:43:05.257242 17829 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0818 03:44:26.435048 17829 solver.cpp:404]     Test net output #0: accuracy = 0.63836\nI0818 03:44:26.435294 17829 solver.cpp:404]     Test net output #1: loss = 2.05848 (* 1 = 2.05848 loss)\nI0818 03:44:27.745491 17829 solver.cpp:228] Iteration 19000, loss = 0.0723335\nI0818 03:44:27.745556 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:44:27.745574 17829 solver.cpp:244]     Train net output #1: loss = 0.0723334 (* 1 = 0.0723334 loss)\nI0818 03:44:27.844113 17829 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0818 03:46:46.313968 17829 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0818 03:48:07.481634 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70916\nI0818 03:48:07.481878 17829 solver.cpp:404]     Test net output #1: loss = 1.39669 (* 1 = 1.39669 loss)\nI0818 03:48:08.793432 17829 solver.cpp:228] Iteration 19100, loss = 0.0644321\nI0818 03:48:08.793493 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:48:08.793510 17829 solver.cpp:244]     Train net output #1: loss = 0.064432 (* 1 = 0.064432 loss)\nI0818 03:48:08.893189 17829 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0818 03:50:27.366152 17829 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0818 03:51:48.539741 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74496\nI0818 03:51:48.540014 17829 solver.cpp:404]     Test net output #1: loss = 1.20451 (* 1 = 1.20451 loss)\nI0818 03:51:49.850911 17829 solver.cpp:228] Iteration 19200, loss = 0.0373713\nI0818 03:51:49.850970 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:51:49.850987 17829 solver.cpp:244]     Train net output #1: loss = 0.0373712 (* 1 = 0.0373712 loss)\nI0818 03:51:49.942984 17829 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0818 03:54:08.440147 17829 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0818 03:55:29.633483 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67916\nI0818 03:55:29.633728 17829 solver.cpp:404]     Test net output #1: loss = 1.81224 (* 1 = 1.81224 loss)\nI0818 03:55:30.944511 17829 solver.cpp:228] Iteration 19300, loss = 0.0527786\nI0818 03:55:30.944577 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 03:55:30.944595 17829 solver.cpp:244]     Train net output #1: loss = 0.0527785 (* 1 = 0.0527785 loss)\nI0818 03:55:31.036356 17829 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0818 03:57:49.578570 17829 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0818 03:59:10.750475 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70172\nI0818 03:59:10.750741 17829 solver.cpp:404]     Test net output #1: loss = 1.57376 (* 1 = 1.57376 loss)\nI0818 03:59:12.061792 17829 solver.cpp:228] Iteration 19400, loss = 0.0412118\nI0818 03:59:12.061851 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:59:12.061869 17829 solver.cpp:244]     Train net output #1: loss = 0.0412118 (* 1 = 0.0412118 loss)\nI0818 03:59:12.158957 17829 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0818 04:01:30.606077 17829 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0818 04:02:51.764725 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75816\nI0818 04:02:51.765030 17829 solver.cpp:404]     Test net output #1: loss = 1.04385 (* 1 = 1.04385 loss)\nI0818 04:02:53.075798 17829 solver.cpp:228] Iteration 19500, loss = 0.0894989\nI0818 04:02:53.075855 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 04:02:53.075872 17829 solver.cpp:244]     Train net output #1: loss = 0.0894988 (* 1 = 0.0894988 loss)\nI0818 04:02:53.171985 17829 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0818 04:05:11.773597 17829 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0818 04:06:32.988171 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7298\nI0818 04:06:32.988435 17829 solver.cpp:404]     Test net output #1: loss = 1.33317 (* 1 = 1.33317 loss)\nI0818 04:06:34.298421 17829 solver.cpp:228] Iteration 19600, loss = 0.0623946\nI0818 04:06:34.298481 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:06:34.298498 17829 solver.cpp:244]     Train net output #1: loss = 0.0623945 (* 1 = 0.0623945 loss)\nI0818 04:06:34.398422 17829 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0818 04:08:52.901446 17829 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0818 04:10:14.098606 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69152\nI0818 04:10:14.098866 17829 solver.cpp:404]     Test net output #1: loss = 1.80796 (* 1 = 1.80796 loss)\nI0818 04:10:15.408462 17829 solver.cpp:228] Iteration 19700, loss = 0.056269\nI0818 04:10:15.408524 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:10:15.408542 17829 solver.cpp:244]     Train net output #1: loss = 0.0562689 (* 1 = 0.0562689 loss)\nI0818 04:10:15.506886 17829 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0818 04:12:34.090085 17829 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0818 04:13:55.267181 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76408\nI0818 04:13:55.267457 17829 solver.cpp:404]     Test net output #1: loss = 0.976289 (* 1 = 0.976289 loss)\nI0818 04:13:56.576998 17829 solver.cpp:228] Iteration 19800, loss = 0.0259838\nI0818 04:13:56.577056 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:13:56.577074 17829 solver.cpp:244]     Train net output #1: loss = 0.0259836 (* 1 = 0.0259836 loss)\nI0818 04:13:56.673389 17829 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0818 04:16:15.221958 17829 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0818 04:17:36.405766 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74976\nI0818 04:17:36.406039 17829 solver.cpp:404]     Test net output #1: loss = 1.1287 (* 1 = 1.1287 loss)\nI0818 04:17:37.715318 17829 solver.cpp:228] Iteration 19900, loss = 0.0564491\nI0818 04:17:37.715369 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:17:37.715385 17829 solver.cpp:244]     Train net output #1: loss = 0.056449 (* 1 = 0.056449 loss)\nI0818 04:17:37.807931 17829 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0818 04:19:56.274271 17829 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0818 04:21:17.446669 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69908\nI0818 04:21:17.446951 17829 solver.cpp:404]     Test net output #1: loss = 1.52537 (* 1 = 1.52537 loss)\nI0818 04:21:18.756956 17829 solver.cpp:228] Iteration 20000, loss = 0.0254645\nI0818 04:21:18.757007 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:21:18.757025 17829 solver.cpp:244]     Train net output #1: loss = 0.0254644 (* 1 = 0.0254644 loss)\nI0818 04:21:18.850898 17829 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0818 04:23:37.370312 17829 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0818 04:24:58.558075 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6784\nI0818 04:24:58.558357 17829 solver.cpp:404]     Test net output #1: loss = 1.79668 (* 1 = 1.79668 loss)\nI0818 04:24:59.868266 17829 solver.cpp:228] Iteration 20100, loss = 0.0864014\nI0818 04:24:59.868315 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:24:59.868332 17829 solver.cpp:244]     Train net output #1: loss = 0.0864013 (* 1 = 0.0864013 loss)\nI0818 04:24:59.960971 17829 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0818 04:27:18.504379 17829 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0818 04:28:39.685192 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6958\nI0818 04:28:39.685431 17829 solver.cpp:404]     Test net output #1: loss = 1.51322 (* 1 = 1.51322 loss)\nI0818 04:28:40.994676 17829 solver.cpp:228] Iteration 20200, loss = 0.0618213\nI0818 04:28:40.994731 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:28:40.994748 17829 solver.cpp:244]     Train net output #1: loss = 0.0618212 (* 1 = 0.0618212 loss)\nI0818 04:28:41.095295 17829 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0818 04:30:59.619446 17829 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0818 04:32:20.824765 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67032\nI0818 04:32:20.825039 17829 solver.cpp:404]     Test net output #1: loss = 1.8037 (* 1 = 1.8037 loss)\nI0818 04:32:22.133476 17829 solver.cpp:228] Iteration 20300, loss = 0.0752037\nI0818 04:32:22.133539 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:32:22.133558 17829 solver.cpp:244]     Train net output #1: loss = 0.0752036 (* 1 = 0.0752036 loss)\nI0818 04:32:22.235213 17829 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0818 04:34:40.815764 17829 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0818 04:36:02.004758 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78612\nI0818 04:36:02.005017 17829 solver.cpp:404]     Test net output #1: loss = 0.89425 (* 1 = 0.89425 loss)\nI0818 04:36:03.314155 17829 solver.cpp:228] Iteration 20400, loss = 0.021765\nI0818 04:36:03.314213 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:36:03.314231 17829 solver.cpp:244]     Train net output #1: loss = 0.0217649 (* 1 = 0.0217649 loss)\nI0818 04:36:03.408956 17829 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0818 04:38:21.982875 17829 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0818 04:39:43.166494 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71644\nI0818 04:39:43.166745 17829 solver.cpp:404]     Test net output #1: loss = 1.27844 (* 1 = 1.27844 loss)\nI0818 04:39:44.476546 17829 solver.cpp:228] Iteration 20500, loss = 0.0533867\nI0818 04:39:44.476603 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:39:44.476621 17829 solver.cpp:244]     Train net output #1: loss = 0.0533866 (* 1 = 0.0533866 loss)\nI0818 04:39:44.575889 17829 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0818 04:42:03.206130 17829 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0818 04:43:24.408241 17829 solver.cpp:404]     Test net output #0: accuracy = 0.68476\nI0818 04:43:24.408510 17829 solver.cpp:404]     Test net output #1: loss = 1.65819 (* 1 = 1.65819 loss)\nI0818 04:43:25.718513 17829 solver.cpp:228] Iteration 20600, loss = 0.069736\nI0818 04:43:25.718574 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:43:25.718591 17829 solver.cpp:244]     Train net output #1: loss = 0.069736 (* 1 = 0.069736 loss)\nI0818 04:43:25.809242 17829 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0818 04:45:44.274819 17829 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0818 04:47:05.480890 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67472\nI0818 04:47:05.481195 17829 solver.cpp:404]     Test net output #1: loss = 1.85525 (* 1 = 1.85525 loss)\nI0818 04:47:06.791851 17829 solver.cpp:228] Iteration 20700, loss = 0.0894865\nI0818 04:47:06.791909 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:47:06.791926 17829 solver.cpp:244]     Train net output #1: loss = 0.0894864 (* 1 = 0.0894864 loss)\nI0818 04:47:06.883921 17829 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0818 04:49:25.286644 17829 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0818 04:50:46.475452 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76448\nI0818 04:50:46.475708 17829 solver.cpp:404]     Test net output #1: loss = 1.05182 (* 1 = 1.05182 loss)\nI0818 04:50:47.786130 17829 solver.cpp:228] Iteration 20800, loss = 0.125693\nI0818 04:50:47.786191 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 04:50:47.786208 17829 solver.cpp:244]     Train net output #1: loss = 0.125693 (* 1 = 0.125693 loss)\nI0818 04:50:47.885129 17829 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0818 04:53:06.284366 17829 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0818 04:54:27.487854 17829 solver.cpp:404]     Test net output #0: accuracy = 0.65356\nI0818 04:54:27.488090 17829 solver.cpp:404]     Test net output #1: loss = 1.84264 (* 1 = 1.84264 loss)\nI0818 04:54:28.798404 17829 solver.cpp:228] Iteration 20900, loss = 0.0456205\nI0818 04:54:28.798461 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:54:28.798480 17829 solver.cpp:244]     Train net output #1: loss = 0.0456204 (* 1 = 0.0456204 loss)\nI0818 04:54:28.894655 17829 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0818 04:56:47.362053 17829 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0818 04:58:08.546314 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73444\nI0818 04:58:08.546571 17829 solver.cpp:404]     Test net output #1: loss = 1.18906 (* 1 = 1.18906 loss)\nI0818 04:58:09.857686 17829 solver.cpp:228] Iteration 21000, loss = 0.173768\nI0818 04:58:09.857741 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 04:58:09.857758 17829 solver.cpp:244]     Train net output #1: loss = 0.173768 (* 1 = 0.173768 loss)\nI0818 04:58:09.956001 17829 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0818 05:00:28.460171 17829 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0818 05:01:49.646280 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75236\nI0818 05:01:49.646569 17829 solver.cpp:404]     Test net output #1: loss = 1.05381 (* 1 = 1.05381 loss)\nI0818 05:01:50.957815 17829 solver.cpp:228] Iteration 21100, loss = 0.0827914\nI0818 05:01:50.957875 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:01:50.957892 17829 solver.cpp:244]     Train net output #1: loss = 0.0827913 (* 1 = 0.0827913 loss)\nI0818 05:01:51.056131 17829 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0818 05:04:09.483944 17829 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0818 05:05:30.667471 17829 solver.cpp:404]     Test net output #0: accuracy = 0.65284\nI0818 05:05:30.667721 17829 solver.cpp:404]     Test net output #1: loss = 1.95958 (* 1 = 1.95958 loss)\nI0818 05:05:31.976950 17829 solver.cpp:228] Iteration 21200, loss = 0.147424\nI0818 05:05:31.977008 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 05:05:31.977025 17829 solver.cpp:244]     Train net output #1: loss = 0.147424 (* 1 = 0.147424 loss)\nI0818 05:05:32.070525 17829 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0818 05:07:50.526898 17829 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0818 05:09:11.714468 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7428\nI0818 05:09:11.714748 17829 solver.cpp:404]     Test net output #1: loss = 1.19565 (* 1 = 1.19565 loss)\nI0818 05:09:13.024924 17829 solver.cpp:228] Iteration 21300, loss = 0.0780262\nI0818 05:09:13.024981 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:09:13.024998 17829 solver.cpp:244]     Train net output #1: loss = 0.0780262 (* 1 = 0.0780262 loss)\nI0818 05:09:13.122951 17829 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0818 05:11:31.549721 17829 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0818 05:12:52.745411 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76424\nI0818 05:12:52.745705 17829 solver.cpp:404]     Test net output #1: loss = 1.13003 (* 1 = 1.13003 loss)\nI0818 05:12:54.056138 17829 solver.cpp:228] Iteration 21400, loss = 0.0372622\nI0818 05:12:54.056200 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:12:54.056217 17829 solver.cpp:244]     Train net output #1: loss = 0.0372622 (* 1 = 0.0372622 loss)\nI0818 05:12:54.147655 17829 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0818 05:15:12.634472 17829 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0818 05:16:33.900625 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77736\nI0818 05:16:33.900856 17829 solver.cpp:404]     Test net output #1: loss = 0.926763 (* 1 = 0.926763 loss)\nI0818 05:16:35.211657 17829 solver.cpp:228] Iteration 21500, loss = 0.0603963\nI0818 05:16:35.211715 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:16:35.211733 17829 solver.cpp:244]     Train net output #1: loss = 0.0603963 (* 1 = 0.0603963 loss)\nI0818 05:16:35.308027 17829 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0818 05:18:53.775221 17829 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0818 05:20:15.074430 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75064\nI0818 05:20:15.074725 17829 solver.cpp:404]     Test net output #1: loss = 1.20825 (* 1 = 1.20825 loss)\nI0818 05:20:16.384778 17829 solver.cpp:228] Iteration 21600, loss = 0.0875876\nI0818 05:20:16.384838 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 05:20:16.384862 17829 solver.cpp:244]     Train net output #1: loss = 0.0875875 (* 1 = 0.0875875 loss)\nI0818 05:20:16.475697 17829 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0818 05:22:34.929000 17829 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0818 05:23:56.189543 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77252\nI0818 05:23:56.189828 17829 solver.cpp:404]     Test net output #1: loss = 0.942582 (* 1 = 0.942582 loss)\nI0818 05:23:57.500722 17829 solver.cpp:228] Iteration 21700, loss = 0.0636736\nI0818 05:23:57.500784 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 05:23:57.500802 17829 solver.cpp:244]     Train net output #1: loss = 0.0636736 (* 1 = 0.0636736 loss)\nI0818 05:23:57.598394 17829 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0818 05:26:16.065726 17829 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0818 05:27:37.323546 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69428\nI0818 05:27:37.323853 17829 solver.cpp:404]     Test net output #1: loss = 1.7105 (* 1 = 1.7105 loss)\nI0818 05:27:38.634557 17829 solver.cpp:228] Iteration 21800, loss = 0.0994884\nI0818 05:27:38.634619 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 05:27:38.634636 17829 solver.cpp:244]     Train net output #1: loss = 0.0994883 (* 1 = 0.0994883 loss)\nI0818 05:27:38.730121 17829 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0818 05:29:57.178459 17829 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0818 05:31:18.427177 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75616\nI0818 05:31:18.427521 17829 solver.cpp:404]     Test net output #1: loss = 1.12064 (* 1 = 1.12064 loss)\nI0818 05:31:19.737856 17829 solver.cpp:228] Iteration 21900, loss = 0.0455426\nI0818 05:31:19.737915 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:31:19.737932 17829 solver.cpp:244]     Train net output #1: loss = 0.0455426 (* 1 = 0.0455426 loss)\nI0818 05:31:19.833719 17829 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0818 05:33:38.314354 17829 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0818 05:34:59.618693 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7106\nI0818 05:34:59.618999 17829 solver.cpp:404]     Test net output #1: loss = 1.43176 (* 1 = 1.43176 loss)\nI0818 05:35:00.929510 17829 solver.cpp:228] Iteration 22000, loss = 0.0818054\nI0818 05:35:00.929571 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:35:00.929590 17829 solver.cpp:244]     Train net output #1: loss = 0.0818054 (* 1 = 0.0818054 loss)\nI0818 05:35:01.027580 17829 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0818 05:37:19.507539 17829 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0818 05:38:40.819130 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75416\nI0818 05:38:40.819463 17829 solver.cpp:404]     Test net output #1: loss = 1.03601 (* 1 = 1.03601 loss)\nI0818 05:38:42.130131 17829 solver.cpp:228] Iteration 22100, loss = 0.135419\nI0818 05:38:42.130190 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 05:38:42.130208 17829 solver.cpp:244]     Train net output #1: loss = 0.135419 (* 1 = 0.135419 loss)\nI0818 05:38:42.220597 17829 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0818 05:41:00.699650 17829 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0818 05:42:21.993973 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69844\nI0818 05:42:21.994271 17829 solver.cpp:404]     Test net output #1: loss = 1.42723 (* 1 = 1.42723 loss)\nI0818 05:42:23.304986 17829 solver.cpp:228] Iteration 22200, loss = 0.065676\nI0818 05:42:23.305047 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:42:23.305065 17829 solver.cpp:244]     Train net output #1: loss = 0.065676 (* 1 = 0.065676 loss)\nI0818 05:42:23.402828 17829 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0818 05:44:41.998787 17829 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0818 05:46:03.282752 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72776\nI0818 05:46:03.283056 17829 solver.cpp:404]     Test net output #1: loss = 1.19826 (* 1 = 1.19826 loss)\nI0818 05:46:04.593260 17829 solver.cpp:228] Iteration 22300, loss = 0.10016\nI0818 05:46:04.593319 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:46:04.593339 17829 solver.cpp:244]     Train net output #1: loss = 0.10016 (* 1 = 0.10016 loss)\nI0818 05:46:04.689033 17829 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0818 05:48:23.180436 17829 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0818 05:49:44.413163 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7662\nI0818 05:49:44.413424 17829 solver.cpp:404]     Test net output #1: loss = 1.21149 (* 1 = 1.21149 loss)\nI0818 05:49:45.722739 17829 solver.cpp:228] Iteration 22400, loss = 0.079674\nI0818 05:49:45.722795 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:49:45.722811 17829 solver.cpp:244]     Train net output #1: loss = 0.0796739 (* 1 = 0.0796739 loss)\nI0818 05:49:45.816867 17829 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0818 05:52:04.283509 17829 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0818 05:53:25.516611 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75208\nI0818 05:53:25.516878 17829 solver.cpp:404]     Test net output #1: loss = 1.10212 (* 1 = 1.10212 loss)\nI0818 05:53:26.825878 17829 solver.cpp:228] Iteration 22500, loss = 0.147485\nI0818 05:53:26.825932 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 05:53:26.825949 17829 solver.cpp:244]     Train net output #1: loss = 0.147485 (* 1 = 0.147485 loss)\nI0818 05:53:26.920992 17829 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0818 05:55:45.368508 17829 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0818 05:57:06.567757 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69908\nI0818 05:57:06.568037 17829 solver.cpp:404]     Test net output #1: loss = 1.56733 (* 1 = 1.56733 loss)\nI0818 05:57:07.877751 17829 solver.cpp:228] Iteration 22600, loss = 0.0409504\nI0818 05:57:07.877806 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:57:07.877822 17829 solver.cpp:244]     Train net output #1: loss = 0.0409504 (* 1 = 0.0409504 loss)\nI0818 05:57:07.975210 17829 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0818 05:59:26.446432 17829 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0818 06:00:47.666028 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75172\nI0818 06:00:47.666275 17829 solver.cpp:404]     Test net output #1: loss = 1.14766 (* 1 = 1.14766 loss)\nI0818 06:00:48.977165 17829 solver.cpp:228] Iteration 22700, loss = 0.0413673\nI0818 06:00:48.977226 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:00:48.977244 17829 solver.cpp:244]     Train net output #1: loss = 0.0413672 (* 1 = 0.0413672 loss)\nI0818 06:00:49.073303 17829 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0818 06:03:07.541590 17829 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0818 06:04:28.788154 17829 solver.cpp:404]     Test net output #0: accuracy = 0.60344\nI0818 06:04:28.788393 17829 solver.cpp:404]     Test net output #1: loss = 2.38156 (* 1 = 2.38156 loss)\nI0818 06:04:30.099145 17829 solver.cpp:228] Iteration 22800, loss = 0.0357094\nI0818 06:04:30.099208 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:04:30.099226 17829 solver.cpp:244]     Train net output #1: loss = 0.0357094 (* 1 = 0.0357094 loss)\nI0818 06:04:30.196133 17829 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0818 06:06:48.668082 17829 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0818 06:08:09.902918 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74928\nI0818 06:08:09.903184 17829 solver.cpp:404]     Test net output #1: loss = 1.22031 (* 1 = 1.22031 loss)\nI0818 06:08:11.214583 17829 solver.cpp:228] Iteration 22900, loss = 0.0245618\nI0818 06:08:11.214646 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:08:11.214663 17829 solver.cpp:244]     Train net output #1: loss = 0.0245618 (* 1 = 0.0245618 loss)\nI0818 06:08:11.310128 17829 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0818 06:10:29.775285 17829 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0818 06:11:50.971303 17829 solver.cpp:404]     Test net output #0: accuracy = 0.786\nI0818 06:11:50.971556 17829 solver.cpp:404]     Test net output #1: loss = 0.936659 (* 1 = 0.936659 loss)\nI0818 06:11:52.281937 17829 solver.cpp:228] Iteration 23000, loss = 0.110655\nI0818 06:11:52.281999 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 06:11:52.282017 17829 solver.cpp:244]     Train net output #1: loss = 0.110655 (* 1 = 0.110655 loss)\nI0818 06:11:52.379797 17829 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0818 06:14:10.841300 17829 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0818 06:15:32.077632 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69876\nI0818 06:15:32.077873 17829 solver.cpp:404]     Test net output #1: loss = 1.51376 (* 1 = 1.51376 loss)\nI0818 06:15:33.388087 17829 solver.cpp:228] Iteration 23100, loss = 0.067193\nI0818 06:15:33.388149 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:15:33.388167 17829 solver.cpp:244]     Train net output #1: loss = 0.0671929 (* 1 = 0.0671929 loss)\nI0818 06:15:33.481153 17829 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0818 06:17:51.933384 17829 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0818 06:19:13.170541 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7294\nI0818 06:19:13.170780 17829 solver.cpp:404]     Test net output #1: loss = 1.22995 (* 1 = 1.22995 loss)\nI0818 06:19:14.481001 17829 solver.cpp:228] Iteration 23200, loss = 0.0472213\nI0818 06:19:14.481062 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:19:14.481081 17829 solver.cpp:244]     Train net output #1: loss = 0.0472213 (* 1 = 0.0472213 loss)\nI0818 06:19:14.574478 17829 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0818 06:21:33.039539 17829 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0818 06:22:54.270548 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7448\nI0818 06:22:54.270845 17829 solver.cpp:404]     Test net output #1: loss = 1.40461 (* 1 = 1.40461 loss)\nI0818 06:22:55.582077 17829 solver.cpp:228] Iteration 23300, loss = 0.0697169\nI0818 06:22:55.582123 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 06:22:55.582140 17829 solver.cpp:244]     Train net output #1: loss = 0.0697168 (* 1 = 0.0697168 loss)\nI0818 06:22:55.677390 17829 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0818 06:25:14.156438 17829 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0818 06:26:35.366879 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75832\nI0818 06:26:35.367113 17829 solver.cpp:404]     Test net output #1: loss = 1.22091 (* 1 = 1.22091 loss)\nI0818 06:26:36.676906 17829 solver.cpp:228] Iteration 23400, loss = 0.0445764\nI0818 06:26:36.676970 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:26:36.676987 17829 solver.cpp:244]     Train net output #1: loss = 0.0445763 (* 1 = 0.0445763 loss)\nI0818 06:26:36.775005 17829 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0818 06:28:55.227097 17829 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0818 06:30:16.434593 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7978\nI0818 06:30:16.434869 17829 solver.cpp:404]     Test net output #1: loss = 0.934001 (* 1 = 0.934001 loss)\nI0818 06:30:17.745158 17829 solver.cpp:228] Iteration 23500, loss = 0.108131\nI0818 06:30:17.745220 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:30:17.745239 17829 solver.cpp:244]     Train net output #1: loss = 0.108131 (* 1 = 0.108131 loss)\nI0818 06:30:17.844202 17829 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0818 06:32:36.202649 17829 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0818 06:33:57.395086 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76948\nI0818 06:33:57.395354 17829 solver.cpp:404]     Test net output #1: loss = 1.03923 (* 1 = 1.03923 loss)\nI0818 06:33:58.706487 17829 solver.cpp:228] Iteration 23600, loss = 0.0244959\nI0818 06:33:58.706550 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:33:58.706568 17829 solver.cpp:244]     Train net output #1: loss = 0.0244958 (* 1 = 0.0244958 loss)\nI0818 06:33:58.803854 17829 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0818 06:36:17.241379 17829 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0818 06:37:38.225174 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75468\nI0818 06:37:38.225430 17829 solver.cpp:404]     Test net output #1: loss = 1.09653 (* 1 = 1.09653 loss)\nI0818 06:37:39.534855 17829 solver.cpp:228] Iteration 23700, loss = 0.0700798\nI0818 06:37:39.534919 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 06:37:39.534936 17829 solver.cpp:244]     Train net output #1: loss = 0.0700796 (* 1 = 0.0700796 loss)\nI0818 06:37:39.630398 17829 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0818 06:39:58.094825 17829 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0818 06:41:19.044104 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71344\nI0818 06:41:19.044342 17829 solver.cpp:404]     Test net output #1: loss = 1.37302 (* 1 = 1.37302 loss)\nI0818 06:41:20.353782 17829 solver.cpp:228] Iteration 23800, loss = 0.0503507\nI0818 06:41:20.353842 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:41:20.353860 17829 solver.cpp:244]     Train net output #1: loss = 0.0503505 (* 1 = 0.0503505 loss)\nI0818 06:41:20.443924 17829 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0818 06:43:38.886076 17829 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0818 06:44:59.926862 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78608\nI0818 06:44:59.927111 17829 solver.cpp:404]     Test net output #1: loss = 0.997793 (* 1 = 0.997793 loss)\nI0818 06:45:01.237418 17829 solver.cpp:228] Iteration 23900, loss = 0.00909411\nI0818 06:45:01.237483 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:45:01.237501 17829 solver.cpp:244]     Train net output #1: loss = 0.00909396 (* 1 = 0.00909396 loss)\nI0818 06:45:01.332262 17829 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0818 06:47:19.713354 17829 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0818 06:48:40.866680 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67936\nI0818 06:48:40.866919 17829 solver.cpp:404]     Test net output #1: loss = 1.68294 (* 1 = 1.68294 loss)\nI0818 06:48:42.176627 17829 solver.cpp:228] Iteration 24000, loss = 0.0384671\nI0818 06:48:42.176687 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:48:42.176705 17829 solver.cpp:244]     Train net output #1: loss = 0.0384669 (* 1 = 0.0384669 loss)\nI0818 06:48:42.271160 17829 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0818 06:51:00.723933 17829 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0818 06:52:21.895654 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73408\nI0818 06:52:21.895948 17829 solver.cpp:404]     Test net output #1: loss = 1.30572 (* 1 = 1.30572 loss)\nI0818 06:52:23.206979 17829 solver.cpp:228] Iteration 24100, loss = 0.0785332\nI0818 06:52:23.207042 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 06:52:23.207060 17829 solver.cpp:244]     Train net output #1: loss = 0.078533 (* 1 = 0.078533 loss)\nI0818 06:52:23.302665 17829 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0818 06:54:41.831573 17829 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0818 06:56:03.087450 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79044\nI0818 06:56:03.087704 17829 solver.cpp:404]     Test net output #1: loss = 0.908039 (* 1 = 0.908039 loss)\nI0818 06:56:04.397886 17829 solver.cpp:228] Iteration 24200, loss = 0.0603193\nI0818 06:56:04.397948 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:56:04.397965 17829 solver.cpp:244]     Train net output #1: loss = 0.0603192 (* 1 = 0.0603192 loss)\nI0818 06:56:04.492285 17829 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0818 06:58:22.939095 17829 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0818 06:59:44.182812 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74556\nI0818 06:59:44.183080 17829 solver.cpp:404]     Test net output #1: loss = 1.21948 (* 1 = 1.21948 loss)\nI0818 06:59:45.492717 17829 solver.cpp:228] Iteration 24300, loss = 0.0514777\nI0818 06:59:45.492779 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 06:59:45.492796 17829 solver.cpp:244]     Train net output #1: loss = 0.0514776 (* 1 = 0.0514776 loss)\nI0818 06:59:45.585783 17829 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0818 07:02:04.100644 17829 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0818 07:03:25.342031 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76708\nI0818 07:03:25.342295 17829 solver.cpp:404]     Test net output #1: loss = 1.07445 (* 1 = 1.07445 loss)\nI0818 07:03:26.653846 17829 solver.cpp:228] Iteration 24400, loss = 0.154694\nI0818 07:03:26.653908 17829 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 07:03:26.653926 17829 solver.cpp:244]     Train net output #1: loss = 0.154694 (* 1 = 0.154694 loss)\nI0818 07:03:26.750178 17829 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0818 07:05:45.180083 17829 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0818 07:07:06.383417 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76288\nI0818 07:07:06.383685 17829 solver.cpp:404]     Test net output #1: loss = 1.04063 (* 1 = 1.04063 loss)\nI0818 07:07:07.694432 17829 solver.cpp:228] Iteration 24500, loss = 0.0750388\nI0818 07:07:07.694496 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:07:07.694514 17829 solver.cpp:244]     Train net output #1: loss = 0.0750387 (* 1 = 0.0750387 loss)\nI0818 07:07:07.785758 17829 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0818 07:09:26.339869 17829 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0818 07:10:47.573282 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78324\nI0818 07:10:47.573544 17829 solver.cpp:404]     Test net output #1: loss = 0.992608 (* 1 = 0.992608 loss)\nI0818 07:10:48.884256 17829 solver.cpp:228] Iteration 24600, loss = 0.0298374\nI0818 07:10:48.884315 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:10:48.884332 17829 solver.cpp:244]     Train net output #1: loss = 0.0298373 (* 1 = 0.0298373 loss)\nI0818 07:10:48.980497 17829 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0818 07:13:07.599583 17829 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0818 07:14:28.792804 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI0818 07:14:28.793052 17829 solver.cpp:404]     Test net output #1: loss = 1.00751 (* 1 = 1.00751 loss)\nI0818 07:14:30.103021 17829 solver.cpp:228] Iteration 24700, loss = 0.0939556\nI0818 07:14:30.103080 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 07:14:30.103098 17829 solver.cpp:244]     Train net output #1: loss = 0.0939555 (* 1 = 0.0939555 loss)\nI0818 07:14:30.201285 17829 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0818 07:16:48.652019 17829 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0818 07:18:09.675804 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7394\nI0818 07:18:09.676081 17829 solver.cpp:404]     Test net output #1: loss = 1.34659 (* 1 = 1.34659 loss)\nI0818 07:18:10.985811 17829 solver.cpp:228] Iteration 24800, loss = 0.0496077\nI0818 07:18:10.985873 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:18:10.985890 17829 solver.cpp:244]     Train net output #1: loss = 0.0496076 (* 1 = 0.0496076 loss)\nI0818 07:18:11.086916 17829 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0818 07:20:29.529500 17829 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0818 07:21:50.697273 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75868\nI0818 07:21:50.697515 17829 solver.cpp:404]     Test net output #1: loss = 1.12491 (* 1 = 1.12491 loss)\nI0818 07:21:52.007102 17829 solver.cpp:228] Iteration 24900, loss = 0.0383522\nI0818 07:21:52.007158 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:21:52.007175 17829 solver.cpp:244]     Train net output #1: loss = 0.0383521 (* 1 = 0.0383521 loss)\nI0818 07:21:52.102653 17829 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0818 07:24:10.517388 17829 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0818 07:25:31.601819 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78056\nI0818 07:25:31.602056 17829 solver.cpp:404]     Test net output #1: loss = 1.00155 (* 1 = 1.00155 loss)\nI0818 07:25:32.911284 17829 solver.cpp:228] Iteration 25000, loss = 0.0695819\nI0818 07:25:32.911343 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:25:32.911360 17829 solver.cpp:244]     Train net output #1: loss = 0.0695817 (* 1 = 0.0695817 loss)\nI0818 07:25:33.008327 17829 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0818 07:27:51.493005 17829 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0818 07:29:12.523252 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73668\nI0818 07:29:12.523540 17829 solver.cpp:404]     Test net output #1: loss = 1.44338 (* 1 = 1.44338 loss)\nI0818 07:29:13.834456 17829 solver.cpp:228] Iteration 25100, loss = 0.102031\nI0818 07:29:13.834522 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 07:29:13.834538 17829 solver.cpp:244]     Train net output #1: loss = 0.102031 (* 1 = 0.102031 loss)\nI0818 07:29:13.931277 17829 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0818 07:31:32.450865 17829 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0818 07:32:53.602771 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75556\nI0818 07:32:53.603071 17829 solver.cpp:404]     Test net output #1: loss = 1.01622 (* 1 = 1.01622 loss)\nI0818 07:32:54.912995 17829 solver.cpp:228] Iteration 25200, loss = 0.0876234\nI0818 07:32:54.913058 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 07:32:54.913074 17829 solver.cpp:244]     Train net output #1: loss = 0.0876233 (* 1 = 0.0876233 loss)\nI0818 07:32:55.010609 17829 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0818 07:35:13.534157 17829 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0818 07:36:34.717468 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77932\nI0818 07:36:34.717751 17829 solver.cpp:404]     Test net output #1: loss = 1.01855 (* 1 = 1.01855 loss)\nI0818 07:36:36.026712 17829 solver.cpp:228] Iteration 25300, loss = 0.0257461\nI0818 07:36:36.026772 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:36:36.026788 17829 solver.cpp:244]     Train net output #1: loss = 0.025746 (* 1 = 0.025746 loss)\nI0818 07:36:36.123926 17829 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0818 07:38:54.731011 17829 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0818 07:40:15.967586 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69308\nI0818 07:40:15.967870 17829 solver.cpp:404]     Test net output #1: loss = 1.725 (* 1 = 1.725 loss)\nI0818 07:40:17.278156 17829 solver.cpp:228] Iteration 25400, loss = 0.0511155\nI0818 07:40:17.278215 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:40:17.278234 17829 solver.cpp:244]     Train net output #1: loss = 0.0511154 (* 1 = 0.0511154 loss)\nI0818 07:40:17.372783 17829 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0818 07:42:35.896396 17829 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0818 07:43:57.123929 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72312\nI0818 07:43:57.124204 17829 solver.cpp:404]     Test net output #1: loss = 1.26115 (* 1 = 1.26115 loss)\nI0818 07:43:58.435406 17829 solver.cpp:228] Iteration 25500, loss = 0.0504243\nI0818 07:43:58.435467 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:43:58.435490 17829 solver.cpp:244]     Train net output #1: loss = 0.0504242 (* 1 = 0.0504242 loss)\nI0818 07:43:58.525549 17829 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0818 07:46:17.087066 17829 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0818 07:47:38.314554 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7874\nI0818 07:47:38.314801 17829 solver.cpp:404]     Test net output #1: loss = 0.986211 (* 1 = 0.986211 loss)\nI0818 07:47:39.624496 17829 solver.cpp:228] Iteration 25600, loss = 0.119958\nI0818 07:47:39.624558 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 07:47:39.624577 17829 solver.cpp:244]     Train net output #1: loss = 0.119957 (* 1 = 0.119957 loss)\nI0818 07:47:39.721036 17829 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0818 07:49:58.300184 17829 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0818 07:51:19.557339 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71392\nI0818 07:51:19.557626 17829 solver.cpp:404]     Test net output #1: loss = 1.33177 (* 1 = 1.33177 loss)\nI0818 07:51:20.867378 17829 solver.cpp:228] Iteration 25700, loss = 0.0560136\nI0818 07:51:20.867442 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:51:20.867461 17829 solver.cpp:244]     Train net output #1: loss = 0.0560135 (* 1 = 0.0560135 loss)\nI0818 07:51:20.968072 17829 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0818 07:53:39.506433 17829 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0818 07:55:00.778401 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71356\nI0818 07:55:00.778681 17829 solver.cpp:404]     Test net output #1: loss = 1.49511 (* 1 = 1.49511 loss)\nI0818 07:55:02.089902 17829 solver.cpp:228] Iteration 25800, loss = 0.0757833\nI0818 07:55:02.089963 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:55:02.089982 17829 solver.cpp:244]     Train net output #1: loss = 0.0757831 (* 1 = 0.0757831 loss)\nI0818 07:55:02.185247 17829 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0818 07:57:20.760504 17829 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0818 07:58:42.029470 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76388\nI0818 07:58:42.029721 17829 solver.cpp:404]     Test net output #1: loss = 1.32871 (* 1 = 1.32871 loss)\nI0818 07:58:43.339459 17829 solver.cpp:228] Iteration 25900, loss = 0.0439086\nI0818 07:58:43.339525 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:58:43.339543 17829 solver.cpp:244]     Train net output #1: loss = 0.0439084 (* 1 = 0.0439084 loss)\nI0818 07:58:43.436363 17829 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0818 08:01:02.006692 17829 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0818 08:02:23.274006 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70848\nI0818 08:02:23.274307 17829 solver.cpp:404]     Test net output #1: loss = 1.50063 (* 1 = 1.50063 loss)\nI0818 08:02:24.583462 17829 solver.cpp:228] Iteration 26000, loss = 0.104306\nI0818 08:02:24.583530 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:02:24.583547 17829 solver.cpp:244]     Train net output #1: loss = 0.104306 (* 1 = 0.104306 loss)\nI0818 08:02:24.682129 17829 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0818 08:04:43.132161 17829 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0818 08:06:04.343834 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77132\nI0818 08:06:04.344094 17829 solver.cpp:404]     Test net output #1: loss = 1.17571 (* 1 = 1.17571 loss)\nI0818 08:06:05.654953 17829 solver.cpp:228] Iteration 26100, loss = 0.0463193\nI0818 08:06:05.655015 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:06:05.655033 17829 solver.cpp:244]     Train net output #1: loss = 0.0463191 (* 1 = 0.0463191 loss)\nI0818 08:06:05.754145 17829 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0818 08:08:24.312757 17829 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0818 08:09:45.509003 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78856\nI0818 08:09:45.509280 17829 solver.cpp:404]     Test net output #1: loss = 0.947157 (* 1 = 0.947157 loss)\nI0818 08:09:46.818657 17829 solver.cpp:228] Iteration 26200, loss = 0.0744787\nI0818 08:09:46.818716 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:09:46.818732 17829 solver.cpp:244]     Train net output #1: loss = 0.0744785 (* 1 = 0.0744785 loss)\nI0818 08:09:46.913909 17829 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0818 08:12:05.463364 17829 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0818 08:13:26.695592 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7586\nI0818 08:13:26.695843 17829 solver.cpp:404]     Test net output #1: loss = 1.07943 (* 1 = 1.07943 loss)\nI0818 08:13:28.005831 17829 solver.cpp:228] Iteration 26300, loss = 0.0694119\nI0818 08:13:28.005888 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:13:28.005906 17829 solver.cpp:244]     Train net output #1: loss = 0.0694117 (* 1 = 0.0694117 loss)\nI0818 08:13:28.103538 17829 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0818 08:15:46.640588 17829 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0818 08:17:07.858834 17829 solver.cpp:404]     Test net output #0: accuracy = 0.81884\nI0818 08:17:07.859092 17829 solver.cpp:404]     Test net output #1: loss = 0.692987 (* 1 = 0.692987 loss)\nI0818 08:17:09.168505 17829 solver.cpp:228] Iteration 26400, loss = 0.0484912\nI0818 08:17:09.168568 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:17:09.168586 17829 solver.cpp:244]     Train net output #1: loss = 0.048491 (* 1 = 0.048491 loss)\nI0818 08:17:09.262800 17829 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0818 08:19:27.962625 17829 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0818 08:20:49.155915 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78748\nI0818 08:20:49.156219 17829 solver.cpp:404]     Test net output #1: loss = 0.90129 (* 1 = 0.90129 loss)\nI0818 08:20:50.465615 17829 solver.cpp:228] Iteration 26500, loss = 0.0386467\nI0818 08:20:50.465674 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:20:50.465692 17829 solver.cpp:244]     Train net output #1: loss = 0.0386465 (* 1 = 0.0386465 loss)\nI0818 08:20:50.559548 17829 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0818 08:23:09.187386 17829 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0818 08:24:30.395812 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74796\nI0818 08:24:30.396086 17829 solver.cpp:404]     Test net output #1: loss = 1.15434 (* 1 = 1.15434 loss)\nI0818 08:24:31.705703 17829 solver.cpp:228] Iteration 26600, loss = 0.0146547\nI0818 08:24:31.705760 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:24:31.705776 17829 solver.cpp:244]     Train net output #1: loss = 0.0146546 (* 1 = 0.0146546 loss)\nI0818 08:24:31.798213 17829 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0818 08:26:50.307770 17829 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0818 08:28:11.308132 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75376\nI0818 08:28:11.308375 17829 solver.cpp:404]     Test net output #1: loss = 1.10096 (* 1 = 1.10096 loss)\nI0818 08:28:12.618175 17829 solver.cpp:228] Iteration 26700, loss = 0.119176\nI0818 08:28:12.618230 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 08:28:12.618247 17829 solver.cpp:244]     Train net output #1: loss = 0.119176 (* 1 = 0.119176 loss)\nI0818 08:28:12.710415 17829 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0818 08:30:31.224261 17829 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0818 08:31:52.447739 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7752\nI0818 08:31:52.447994 17829 solver.cpp:404]     Test net output #1: loss = 0.997487 (* 1 = 0.997487 loss)\nI0818 08:31:53.757779 17829 solver.cpp:228] Iteration 26800, loss = 0.0586671\nI0818 08:31:53.757838 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 08:31:53.757854 17829 solver.cpp:244]     Train net output #1: loss = 0.0586669 (* 1 = 0.0586669 loss)\nI0818 08:31:53.855386 17829 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0818 08:34:12.447952 17829 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0818 08:35:33.695950 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75172\nI0818 08:35:33.696207 17829 solver.cpp:404]     Test net output #1: loss = 1.22512 (* 1 = 1.22512 loss)\nI0818 08:35:35.006014 17829 solver.cpp:228] Iteration 26900, loss = 0.0541167\nI0818 08:35:35.006070 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:35:35.006088 17829 solver.cpp:244]     Train net output #1: loss = 0.0541165 (* 1 = 0.0541165 loss)\nI0818 08:35:35.099262 17829 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0818 08:37:53.792659 17829 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0818 08:39:15.054430 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6366\nI0818 08:39:15.054697 17829 solver.cpp:404]     Test net output #1: loss = 1.9578 (* 1 = 1.9578 loss)\nI0818 08:39:16.363765 17829 solver.cpp:228] Iteration 27000, loss = 0.033521\nI0818 08:39:16.363821 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:39:16.363838 17829 solver.cpp:244]     Train net output #1: loss = 0.0335208 (* 1 = 0.0335208 loss)\nI0818 08:39:16.461745 17829 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0818 08:41:35.120931 17829 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0818 08:42:56.315845 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74612\nI0818 08:42:56.316153 17829 solver.cpp:404]     Test net output #1: loss = 1.14834 (* 1 = 1.14834 loss)\nI0818 08:42:57.625138 17829 solver.cpp:228] Iteration 27100, loss = 0.0407493\nI0818 08:42:57.625196 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:42:57.625214 17829 solver.cpp:244]     Train net output #1: loss = 0.0407491 (* 1 = 0.0407491 loss)\nI0818 08:42:57.723222 17829 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0818 08:45:16.286686 17829 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0818 08:46:37.410997 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78364\nI0818 08:46:37.411232 17829 solver.cpp:404]     Test net output #1: loss = 0.904376 (* 1 = 0.904376 loss)\nI0818 08:46:38.721148 17829 solver.cpp:228] Iteration 27200, loss = 0.0942983\nI0818 08:46:38.721205 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:46:38.721222 17829 solver.cpp:244]     Train net output #1: loss = 0.0942981 (* 1 = 0.0942981 loss)\nI0818 08:46:38.814776 17829 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0818 08:48:57.339875 17829 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0818 08:50:18.063035 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73924\nI0818 08:50:18.063283 17829 solver.cpp:404]     Test net output #1: loss = 1.20996 (* 1 = 1.20996 loss)\nI0818 08:50:19.372862 17829 solver.cpp:228] Iteration 27300, loss = 0.0940472\nI0818 08:50:19.372923 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 08:50:19.372941 17829 solver.cpp:244]     Train net output #1: loss = 0.094047 (* 1 = 0.094047 loss)\nI0818 08:50:19.466640 17829 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0818 08:52:37.999289 17829 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0818 08:53:58.957877 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80348\nI0818 08:53:58.958156 17829 solver.cpp:404]     Test net output #1: loss = 0.734861 (* 1 = 0.734861 loss)\nI0818 08:54:00.267705 17829 solver.cpp:228] Iteration 27400, loss = 0.0367849\nI0818 08:54:00.267766 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:54:00.267784 17829 solver.cpp:244]     Train net output #1: loss = 0.0367846 (* 1 = 0.0367846 loss)\nI0818 08:54:00.357379 17829 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0818 08:56:18.914350 17829 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0818 08:57:40.127676 17829 solver.cpp:404]     Test net output #0: accuracy = 0.82468\nI0818 08:57:40.127918 17829 solver.cpp:404]     Test net output #1: loss = 0.735632 (* 1 = 0.735632 loss)\nI0818 08:57:41.436970 17829 solver.cpp:228] Iteration 27500, loss = 0.0508671\nI0818 08:57:41.437029 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:57:41.437047 17829 solver.cpp:244]     Train net output #1: loss = 0.0508668 (* 1 = 0.0508668 loss)\nI0818 08:57:41.536201 17829 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0818 09:00:00.242707 17829 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0818 09:01:21.233803 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75632\nI0818 09:01:21.234094 17829 solver.cpp:404]     Test net output #1: loss = 1.09126 (* 1 = 1.09126 loss)\nI0818 09:01:22.543702 17829 solver.cpp:228] Iteration 27600, loss = 0.0720873\nI0818 09:01:22.543762 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:01:22.543781 17829 solver.cpp:244]     Train net output #1: loss = 0.072087 (* 1 = 0.072087 loss)\nI0818 09:01:22.641949 17829 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0818 09:03:41.269881 17829 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0818 09:05:02.496721 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77032\nI0818 09:05:02.496981 17829 solver.cpp:404]     Test net output #1: loss = 1.14314 (* 1 = 1.14314 loss)\nI0818 09:05:03.806617 17829 solver.cpp:228] Iteration 27700, loss = 0.0301902\nI0818 09:05:03.806677 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:05:03.806694 17829 solver.cpp:244]     Train net output #1: loss = 0.0301899 (* 1 = 0.0301899 loss)\nI0818 09:05:03.903070 17829 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0818 09:07:22.472611 17829 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0818 09:08:43.367331 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69976\nI0818 09:08:43.367585 17829 solver.cpp:404]     Test net output #1: loss = 1.5555 (* 1 = 1.5555 loss)\nI0818 09:08:44.676465 17829 solver.cpp:228] Iteration 27800, loss = 0.0432246\nI0818 09:08:44.676522 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:08:44.676540 17829 solver.cpp:244]     Train net output #1: loss = 0.0432243 (* 1 = 0.0432243 loss)\nI0818 09:08:44.774471 17829 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0818 09:11:03.409411 17829 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0818 09:12:24.391258 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75316\nI0818 09:12:24.391531 17829 solver.cpp:404]     Test net output #1: loss = 1.03021 (* 1 = 1.03021 loss)\nI0818 09:12:25.701048 17829 solver.cpp:228] Iteration 27900, loss = 0.0345949\nI0818 09:12:25.701107 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:12:25.701123 17829 solver.cpp:244]     Train net output #1: loss = 0.0345946 (* 1 = 0.0345946 loss)\nI0818 09:12:25.794229 17829 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0818 09:14:44.320426 17829 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0818 09:16:05.088158 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77668\nI0818 09:16:05.088395 17829 solver.cpp:404]     Test net output #1: loss = 1.18217 (* 1 = 1.18217 loss)\nI0818 09:16:06.397886 17829 solver.cpp:228] Iteration 28000, loss = 0.0406545\nI0818 09:16:06.397928 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 09:16:06.397944 17829 solver.cpp:244]     Train net output #1: loss = 0.0406542 (* 1 = 0.0406542 loss)\nI0818 09:16:06.493259 17829 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0818 09:18:25.010145 17829 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0818 09:19:46.156179 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76376\nI0818 09:19:46.156466 17829 solver.cpp:404]     Test net output #1: loss = 1.13655 (* 1 = 1.13655 loss)\nI0818 09:19:47.465734 17829 solver.cpp:228] Iteration 28100, loss = 0.0431939\nI0818 09:19:47.465791 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:19:47.465809 17829 solver.cpp:244]     Train net output #1: loss = 0.0431936 (* 1 = 0.0431936 loss)\nI0818 09:19:47.564250 17829 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0818 09:22:06.270942 17829 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0818 09:23:27.131628 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76972\nI0818 09:23:27.131865 17829 solver.cpp:404]     Test net output #1: loss = 1.10468 (* 1 = 1.10468 loss)\nI0818 09:23:28.441015 17829 solver.cpp:228] Iteration 28200, loss = 0.0462833\nI0818 09:23:28.441072 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:23:28.441089 17829 solver.cpp:244]     Train net output #1: loss = 0.046283 (* 1 = 0.046283 loss)\nI0818 09:23:28.534798 17829 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0818 09:25:46.995384 17829 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0818 09:27:07.751979 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7216\nI0818 09:27:07.752224 17829 solver.cpp:404]     Test net output #1: loss = 1.50544 (* 1 = 1.50544 loss)\nI0818 09:27:09.061858 17829 solver.cpp:228] Iteration 28300, loss = 0.0362289\nI0818 09:27:09.061914 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:27:09.061931 17829 solver.cpp:244]     Train net output #1: loss = 0.0362286 (* 1 = 0.0362286 loss)\nI0818 09:27:09.151865 17829 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0818 09:29:27.495981 17829 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0818 09:30:48.308312 17829 solver.cpp:404]     Test net output #0: accuracy = 0.67136\nI0818 09:30:48.308612 17829 solver.cpp:404]     Test net output #1: loss = 1.96455 (* 1 = 1.96455 loss)\nI0818 09:30:49.618492 17829 solver.cpp:228] Iteration 28400, loss = 0.0646429\nI0818 09:30:49.618551 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 09:30:49.618571 17829 solver.cpp:244]     Train net output #1: loss = 0.0646425 (* 1 = 0.0646425 loss)\nI0818 09:30:49.713080 17829 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0818 09:33:08.151651 17829 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0818 09:34:29.374712 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73068\nI0818 09:34:29.375021 17829 solver.cpp:404]     Test net output #1: loss = 1.52523 (* 1 = 1.52523 loss)\nI0818 09:34:30.685083 17829 solver.cpp:228] Iteration 28500, loss = 0.0173195\nI0818 09:34:30.685139 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:34:30.685155 17829 solver.cpp:244]     Train net output #1: loss = 0.0173192 (* 1 = 0.0173192 loss)\nI0818 09:34:30.780005 17829 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0818 09:36:49.191169 17829 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0818 09:38:10.438894 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70004\nI0818 09:38:10.439229 17829 solver.cpp:404]     Test net output #1: loss = 1.45525 (* 1 = 1.45525 loss)\nI0818 09:38:11.748344 17829 solver.cpp:228] Iteration 28600, loss = 0.0777785\nI0818 09:38:11.748397 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:38:11.748414 17829 solver.cpp:244]     Train net output #1: loss = 0.0777782 (* 1 = 0.0777782 loss)\nI0818 09:38:11.848619 17829 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0818 09:40:30.278620 17829 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0818 09:41:51.541342 17829 solver.cpp:404]     Test net output #0: accuracy = 0.68268\nI0818 09:41:51.541669 17829 solver.cpp:404]     Test net output #1: loss = 1.85985 (* 1 = 1.85985 loss)\nI0818 09:41:52.851074 17829 solver.cpp:228] Iteration 28700, loss = 0.0705822\nI0818 09:41:52.851130 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 09:41:52.851148 17829 solver.cpp:244]     Train net output #1: loss = 0.0705819 (* 1 = 0.0705819 loss)\nI0818 09:41:52.949425 17829 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0818 09:44:11.490674 17829 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0818 09:45:32.781775 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79684\nI0818 09:45:32.782110 17829 solver.cpp:404]     Test net output #1: loss = 0.814916 (* 1 = 0.814916 loss)\nI0818 09:45:34.091632 17829 solver.cpp:228] Iteration 28800, loss = 0.0690809\nI0818 09:45:34.091683 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:45:34.091699 17829 solver.cpp:244]     Train net output #1: loss = 0.0690806 (* 1 = 0.0690806 loss)\nI0818 09:45:34.190032 17829 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0818 09:47:52.586304 17829 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0818 09:49:13.856021 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6778\nI0818 09:49:13.856334 17829 solver.cpp:404]     Test net output #1: loss = 1.61276 (* 1 = 1.61276 loss)\nI0818 09:49:15.166510 17829 solver.cpp:228] Iteration 28900, loss = 0.0739607\nI0818 09:49:15.166548 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:49:15.166569 17829 solver.cpp:244]     Train net output #1: loss = 0.0739604 (* 1 = 0.0739604 loss)\nI0818 09:49:15.265758 17829 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0818 09:51:33.718410 17829 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0818 09:52:54.980777 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78904\nI0818 09:52:54.981086 17829 solver.cpp:404]     Test net output #1: loss = 0.94941 (* 1 = 0.94941 loss)\nI0818 09:52:56.290912 17829 solver.cpp:228] Iteration 29000, loss = 0.0590513\nI0818 09:52:56.290966 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:52:56.290983 17829 solver.cpp:244]     Train net output #1: loss = 0.059051 (* 1 = 0.059051 loss)\nI0818 09:52:56.388391 17829 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0818 09:55:14.888324 17829 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0818 09:56:36.153687 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74732\nI0818 09:56:36.154000 17829 solver.cpp:404]     Test net output #1: loss = 1.26022 (* 1 = 1.26022 loss)\nI0818 09:56:37.463678 17829 solver.cpp:228] Iteration 29100, loss = 0.0444365\nI0818 09:56:37.463731 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 09:56:37.463747 17829 solver.cpp:244]     Train net output #1: loss = 0.0444363 (* 1 = 0.0444363 loss)\nI0818 09:56:37.559656 17829 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0818 09:58:56.144968 17829 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0818 10:00:17.401458 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79212\nI0818 10:00:17.401788 17829 solver.cpp:404]     Test net output #1: loss = 0.89792 (* 1 = 0.89792 loss)\nI0818 10:00:18.711622 17829 solver.cpp:228] Iteration 29200, loss = 0.0723958\nI0818 10:00:18.711674 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:00:18.711693 17829 solver.cpp:244]     Train net output #1: loss = 0.0723955 (* 1 = 0.0723955 loss)\nI0818 10:00:18.807667 17829 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0818 10:02:37.274184 17829 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0818 10:03:58.533001 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70092\nI0818 10:03:58.533336 17829 solver.cpp:404]     Test net output #1: loss = 1.51327 (* 1 = 1.51327 loss)\nI0818 10:03:59.842741 17829 solver.cpp:228] Iteration 29300, loss = 0.0777186\nI0818 10:03:59.842792 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 10:03:59.842808 17829 solver.cpp:244]     Train net output #1: loss = 0.0777184 (* 1 = 0.0777184 loss)\nI0818 10:03:59.936815 17829 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0818 10:06:18.422011 17829 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0818 10:07:39.716527 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76436\nI0818 10:07:39.716850 17829 solver.cpp:404]     Test net output #1: loss = 1.12024 (* 1 = 1.12024 loss)\nI0818 10:07:41.026226 17829 solver.cpp:228] Iteration 29400, loss = 0.0559815\nI0818 10:07:41.026281 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:07:41.026298 17829 solver.cpp:244]     Train net output #1: loss = 0.0559813 (* 1 = 0.0559813 loss)\nI0818 10:07:41.163753 17829 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0818 10:09:59.612380 17829 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0818 10:11:20.891451 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70808\nI0818 10:11:20.891794 17829 solver.cpp:404]     Test net output #1: loss = 1.81473 (* 1 = 1.81473 loss)\nI0818 10:11:22.201128 17829 solver.cpp:228] Iteration 29500, loss = 0.0688247\nI0818 10:11:22.201182 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:11:22.201200 17829 solver.cpp:244]     Train net output #1: loss = 0.0688246 (* 1 = 0.0688246 loss)\nI0818 10:11:22.295179 17829 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0818 10:13:40.695788 17829 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0818 10:15:02.004513 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7476\nI0818 10:15:02.004856 17829 solver.cpp:404]     Test net output #1: loss = 1.2416 (* 1 = 1.2416 loss)\nI0818 10:15:03.314554 17829 solver.cpp:228] Iteration 29600, loss = 0.0694744\nI0818 10:15:03.314615 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 10:15:03.314632 17829 solver.cpp:244]     Train net output #1: loss = 0.0694742 (* 1 = 0.0694742 loss)\nI0818 10:15:03.408357 17829 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0818 10:17:21.861204 17829 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0818 10:18:43.162765 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71628\nI0818 10:18:43.163105 17829 solver.cpp:404]     Test net output #1: loss = 1.58622 (* 1 = 1.58622 loss)\nI0818 10:18:44.472657 17829 solver.cpp:228] Iteration 29700, loss = 0.0851607\nI0818 10:18:44.472714 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 10:18:44.472731 17829 solver.cpp:244]     Train net output #1: loss = 0.0851606 (* 1 = 0.0851606 loss)\nI0818 10:18:44.571574 17829 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0818 10:21:03.105479 17829 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0818 10:22:24.406363 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74528\nI0818 10:22:24.406692 17829 solver.cpp:404]     Test net output #1: loss = 1.48039 (* 1 = 1.48039 loss)\nI0818 10:22:25.716131 17829 solver.cpp:228] Iteration 29800, loss = 0.017099\nI0818 10:22:25.716187 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:22:25.716203 17829 solver.cpp:244]     Train net output #1: loss = 0.0170989 (* 1 = 0.0170989 loss)\nI0818 10:22:25.813474 17829 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0818 10:24:44.232501 17829 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0818 10:26:05.524160 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76276\nI0818 10:26:05.524502 17829 solver.cpp:404]     Test net output #1: loss = 1.04174 (* 1 = 1.04174 loss)\nI0818 10:26:06.834090 17829 solver.cpp:228] Iteration 29900, loss = 0.11576\nI0818 10:26:06.834146 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:26:06.834162 17829 solver.cpp:244]     Train net output #1: loss = 0.11576 (* 1 = 0.11576 loss)\nI0818 10:26:06.929944 17829 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0818 10:28:25.352160 17829 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0818 10:29:46.674165 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75204\nI0818 10:29:46.674516 17829 solver.cpp:404]     Test net output #1: loss = 1.14782 (* 1 = 1.14782 loss)\nI0818 10:29:47.984354 17829 solver.cpp:228] Iteration 30000, loss = 0.0591199\nI0818 10:29:47.984407 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 10:29:47.984424 17829 solver.cpp:244]     Train net output #1: loss = 0.0591197 (* 1 = 0.0591197 loss)\nI0818 10:29:48.081048 17829 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0818 10:32:06.647593 17829 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0818 10:33:27.949167 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77404\nI0818 10:33:27.949523 17829 solver.cpp:404]     Test net output #1: loss = 0.913409 (* 1 = 0.913409 loss)\nI0818 10:33:29.258993 17829 solver.cpp:228] Iteration 30100, loss = 0.0550925\nI0818 10:33:29.259052 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:33:29.259069 17829 solver.cpp:244]     Train net output #1: loss = 0.0550924 (* 1 = 0.0550924 loss)\nI0818 10:33:29.355033 17829 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0818 10:35:47.910632 17829 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0818 10:37:09.211587 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79456\nI0818 10:37:09.211904 17829 solver.cpp:404]     Test net output #1: loss = 0.829252 (* 1 = 0.829252 loss)\nI0818 10:37:10.521301 17829 solver.cpp:228] Iteration 30200, loss = 0.0673175\nI0818 10:37:10.521361 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 10:37:10.521379 17829 solver.cpp:244]     Train net output #1: loss = 0.0673173 (* 1 = 0.0673173 loss)\nI0818 10:37:10.617470 17829 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0818 10:39:29.209102 17829 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0818 10:40:50.509413 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73632\nI0818 10:40:50.509749 17829 solver.cpp:404]     Test net output #1: loss = 1.40353 (* 1 = 1.40353 loss)\nI0818 10:40:51.819658 17829 solver.cpp:228] Iteration 30300, loss = 0.0690222\nI0818 10:40:51.819713 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 10:40:51.819730 17829 solver.cpp:244]     Train net output #1: loss = 0.069022 (* 1 = 0.069022 loss)\nI0818 10:40:51.911820 17829 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0818 10:43:10.325906 17829 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0818 10:44:31.605846 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73936\nI0818 10:44:31.606180 17829 solver.cpp:404]     Test net output #1: loss = 1.38289 (* 1 = 1.38289 loss)\nI0818 10:44:32.915621 17829 solver.cpp:228] Iteration 30400, loss = 0.0467888\nI0818 10:44:32.915678 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:44:32.915694 17829 solver.cpp:244]     Train net output #1: loss = 0.0467886 (* 1 = 0.0467886 loss)\nI0818 10:44:33.007107 17829 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0818 10:46:51.593750 17829 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0818 10:48:12.873713 17829 solver.cpp:404]     Test net output #0: accuracy = 0.82152\nI0818 10:48:12.874027 17829 solver.cpp:404]     Test net output #1: loss = 0.750502 (* 1 = 0.750502 loss)\nI0818 10:48:14.183907 17829 solver.cpp:228] Iteration 30500, loss = 0.0608992\nI0818 10:48:14.183966 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 10:48:14.183984 17829 solver.cpp:244]     Train net output #1: loss = 0.060899 (* 1 = 0.060899 loss)\nI0818 10:48:14.282189 17829 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0818 10:50:32.680253 17829 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0818 10:51:53.965355 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79076\nI0818 10:51:53.965682 17829 solver.cpp:404]     Test net output #1: loss = 0.956138 (* 1 = 0.956138 loss)\nI0818 10:51:55.275586 17829 solver.cpp:228] Iteration 30600, loss = 0.0299257\nI0818 10:51:55.275645 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:51:55.275660 17829 solver.cpp:244]     Train net output #1: loss = 0.0299255 (* 1 = 0.0299255 loss)\nI0818 10:51:55.373716 17829 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0818 10:54:13.805866 17829 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0818 10:55:35.108942 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76384\nI0818 10:55:35.109272 17829 solver.cpp:404]     Test net output #1: loss = 1.03576 (* 1 = 1.03576 loss)\nI0818 10:55:36.418315 17829 solver.cpp:228] Iteration 30700, loss = 0.0346499\nI0818 10:55:36.418370 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:55:36.418386 17829 solver.cpp:244]     Train net output #1: loss = 0.0346497 (* 1 = 0.0346497 loss)\nI0818 10:55:36.514775 17829 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0818 10:57:54.949898 17829 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0818 10:59:16.242346 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77856\nI0818 10:59:16.242686 17829 solver.cpp:404]     Test net output #1: loss = 0.934711 (* 1 = 0.934711 loss)\nI0818 10:59:17.552528 17829 solver.cpp:228] Iteration 30800, loss = 0.0328369\nI0818 10:59:17.552587 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:59:17.552603 17829 solver.cpp:244]     Train net output #1: loss = 0.0328367 (* 1 = 0.0328367 loss)\nI0818 10:59:17.644389 17829 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0818 11:01:36.043221 17829 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0818 11:02:57.322783 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76776\nI0818 11:02:57.323114 17829 solver.cpp:404]     Test net output #1: loss = 1.0206 (* 1 = 1.0206 loss)\nI0818 11:02:58.632815 17829 solver.cpp:228] Iteration 30900, loss = 0.0217118\nI0818 11:02:58.632876 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:02:58.632894 17829 solver.cpp:244]     Train net output #1: loss = 0.0217116 (* 1 = 0.0217116 loss)\nI0818 11:02:58.729101 17829 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0818 11:05:17.169209 17829 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0818 11:06:38.445946 17829 solver.cpp:404]     Test net output #0: accuracy = 0.68984\nI0818 11:06:38.446286 17829 solver.cpp:404]     Test net output #1: loss = 1.6017 (* 1 = 1.6017 loss)\nI0818 11:06:39.755897 17829 solver.cpp:228] Iteration 31000, loss = 0.0965474\nI0818 11:06:39.755959 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 11:06:39.755975 17829 solver.cpp:244]     Train net output #1: loss = 0.0965472 (* 1 = 0.0965472 loss)\nI0818 11:06:39.849355 17829 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0818 11:08:58.362630 17829 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0818 11:10:19.610476 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76544\nI0818 11:10:19.610806 17829 solver.cpp:404]     Test net output #1: loss = 1.0464 (* 1 = 1.0464 loss)\nI0818 11:10:20.920138 17829 solver.cpp:228] Iteration 31100, loss = 0.0657618\nI0818 11:10:20.920199 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:10:20.920217 17829 solver.cpp:244]     Train net output #1: loss = 0.0657616 (* 1 = 0.0657616 loss)\nI0818 11:10:21.018856 17829 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0818 11:12:39.561632 17829 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0818 11:14:00.820225 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78164\nI0818 11:14:00.820570 17829 solver.cpp:404]     Test net output #1: loss = 1.14222 (* 1 = 1.14222 loss)\nI0818 11:14:02.130403 17829 solver.cpp:228] Iteration 31200, loss = 0.0140119\nI0818 11:14:02.130458 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:14:02.130476 17829 solver.cpp:244]     Train net output #1: loss = 0.0140117 (* 1 = 0.0140117 loss)\nI0818 11:14:02.229001 17829 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0818 11:16:20.695068 17829 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0818 11:17:41.976641 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7166\nI0818 11:17:41.976984 17829 solver.cpp:404]     Test net output #1: loss = 1.39491 (* 1 = 1.39491 loss)\nI0818 11:17:43.287062 17829 solver.cpp:228] Iteration 31300, loss = 0.127988\nI0818 11:17:43.287122 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:17:43.287139 17829 solver.cpp:244]     Train net output #1: loss = 0.127988 (* 1 = 0.127988 loss)\nI0818 11:17:43.386060 17829 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0818 11:20:01.987052 17829 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0818 11:21:23.281621 17829 solver.cpp:404]     Test net output #0: accuracy = 0.651\nI0818 11:21:23.281936 17829 solver.cpp:404]     Test net output #1: loss = 2.09567 (* 1 = 2.09567 loss)\nI0818 11:21:24.591655 17829 solver.cpp:228] Iteration 31400, loss = 0.0235898\nI0818 11:21:24.591717 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:21:24.591732 17829 solver.cpp:244]     Train net output #1: loss = 0.0235897 (* 1 = 0.0235897 loss)\nI0818 11:21:24.686944 17829 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0818 11:23:43.139861 17829 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0818 11:25:04.443928 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72928\nI0818 11:25:04.444252 17829 solver.cpp:404]     Test net output #1: loss = 1.42803 (* 1 = 1.42803 loss)\nI0818 11:25:05.753710 17829 solver.cpp:228] Iteration 31500, loss = 0.0191535\nI0818 11:25:05.753769 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:25:05.753787 17829 solver.cpp:244]     Train net output #1: loss = 0.0191533 (* 1 = 0.0191533 loss)\nI0818 11:25:05.852006 17829 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0818 11:27:24.359508 17829 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0818 11:28:45.630004 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78012\nI0818 11:28:45.630321 17829 solver.cpp:404]     Test net output #1: loss = 1.07977 (* 1 = 1.07977 loss)\nI0818 11:28:46.939790 17829 solver.cpp:228] Iteration 31600, loss = 0.0331565\nI0818 11:28:46.939851 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:28:46.939868 17829 solver.cpp:244]     Train net output #1: loss = 0.0331564 (* 1 = 0.0331564 loss)\nI0818 11:28:47.032462 17829 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0818 11:31:05.552748 17829 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0818 11:32:26.805649 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80928\nI0818 11:32:26.805965 17829 solver.cpp:404]     Test net output #1: loss = 0.856507 (* 1 = 0.856507 loss)\nI0818 11:32:28.115927 17829 solver.cpp:228] Iteration 31700, loss = 0.0391412\nI0818 11:32:28.115985 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:32:28.116003 17829 solver.cpp:244]     Train net output #1: loss = 0.039141 (* 1 = 0.039141 loss)\nI0818 11:32:28.213477 17829 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0818 11:34:46.713392 17829 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0818 11:36:07.979518 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78184\nI0818 11:36:07.979867 17829 solver.cpp:404]     Test net output #1: loss = 0.957583 (* 1 = 0.957583 loss)\nI0818 11:36:09.288995 17829 solver.cpp:228] Iteration 31800, loss = 0.0611672\nI0818 11:36:09.289055 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 11:36:09.289073 17829 solver.cpp:244]     Train net output #1: loss = 0.0611671 (* 1 = 0.0611671 loss)\nI0818 11:36:09.388248 17829 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0818 11:38:28.053807 17829 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0818 11:39:49.310497 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7658\nI0818 11:39:49.310819 17829 solver.cpp:404]     Test net output #1: loss = 0.886245 (* 1 = 0.886245 loss)\nI0818 11:39:50.620578 17829 solver.cpp:228] Iteration 31900, loss = 0.0535591\nI0818 11:39:50.620640 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:39:50.620657 17829 solver.cpp:244]     Train net output #1: loss = 0.053559 (* 1 = 0.053559 loss)\nI0818 11:39:50.720098 17829 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0818 11:42:09.723503 17829 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0818 11:43:30.996081 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77148\nI0818 11:43:30.996402 17829 solver.cpp:404]     Test net output #1: loss = 0.955302 (* 1 = 0.955302 loss)\nI0818 11:43:32.306097 17829 solver.cpp:228] Iteration 32000, loss = 0.13289\nI0818 11:43:32.306159 17829 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 11:43:32.306176 17829 solver.cpp:244]     Train net output #1: loss = 0.132889 (* 1 = 0.132889 loss)\nI0818 11:43:32.407840 17829 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0818 11:45:51.623211 17829 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0818 11:47:12.880879 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73256\nI0818 11:47:12.881199 17829 solver.cpp:404]     Test net output #1: loss = 1.44616 (* 1 = 1.44616 loss)\nI0818 11:47:14.192597 17829 solver.cpp:228] Iteration 32100, loss = 0.0512725\nI0818 11:47:14.192658 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:47:14.192677 17829 solver.cpp:244]     Train net output #1: loss = 0.0512724 (* 1 = 0.0512724 loss)\nI0818 11:47:14.291538 17829 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0818 11:49:33.411743 17829 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0818 11:50:54.695688 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7802\nI0818 11:50:54.696018 17829 solver.cpp:404]     Test net output #1: loss = 0.930922 (* 1 = 0.930922 loss)\nI0818 11:50:56.005671 17829 solver.cpp:228] Iteration 32200, loss = 0.0261014\nI0818 11:50:56.005731 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:50:56.005749 17829 solver.cpp:244]     Train net output #1: loss = 0.0261012 (* 1 = 0.0261012 loss)\nI0818 11:50:56.108224 17829 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0818 11:53:15.328686 17829 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0818 11:54:36.601208 17829 solver.cpp:404]     Test net output #0: accuracy = 0.652\nI0818 11:54:36.601523 17829 solver.cpp:404]     Test net output #1: loss = 1.53291 (* 1 = 1.53291 loss)\nI0818 11:54:37.911072 17829 solver.cpp:228] Iteration 32300, loss = 0.0447562\nI0818 11:54:37.911131 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:54:37.911149 17829 solver.cpp:244]     Train net output #1: loss = 0.0447561 (* 1 = 0.0447561 loss)\nI0818 11:54:38.013763 17829 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0818 11:56:57.122066 17829 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0818 11:58:17.468610 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73488\nI0818 11:58:17.468863 17829 solver.cpp:404]     Test net output #1: loss = 1.31733 (* 1 = 1.31733 loss)\nI0818 11:58:18.775843 17829 solver.cpp:228] Iteration 32400, loss = 0.0227952\nI0818 11:58:18.775884 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:58:18.775899 17829 solver.cpp:244]     Train net output #1: loss = 0.0227951 (* 1 = 0.0227951 loss)\nI0818 11:58:18.877185 17829 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0818 12:00:38.005316 17829 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0818 12:01:58.346034 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73148\nI0818 12:01:58.346288 17829 solver.cpp:404]     Test net output #1: loss = 1.24625 (* 1 = 1.24625 loss)\nI0818 12:01:59.653694 17829 solver.cpp:228] Iteration 32500, loss = 0.0696435\nI0818 12:01:59.653738 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:01:59.653753 17829 solver.cpp:244]     Train net output #1: loss = 0.0696433 (* 1 = 0.0696433 loss)\nI0818 12:01:59.758998 17829 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0818 12:04:19.069800 17829 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0818 12:05:39.378003 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76736\nI0818 12:05:39.378255 17829 solver.cpp:404]     Test net output #1: loss = 1.18031 (* 1 = 1.18031 loss)\nI0818 12:05:40.684447 17829 solver.cpp:228] Iteration 32600, loss = 0.0616508\nI0818 12:05:40.684490 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:05:40.684506 17829 solver.cpp:244]     Train net output #1: loss = 0.0616506 (* 1 = 0.0616506 loss)\nI0818 12:05:40.793639 17829 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0818 12:08:00.051857 17829 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0818 12:09:20.365502 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77992\nI0818 12:09:20.365761 17829 solver.cpp:404]     Test net output #1: loss = 1.03161 (* 1 = 1.03161 loss)\nI0818 12:09:21.672454 17829 solver.cpp:228] Iteration 32700, loss = 0.010891\nI0818 12:09:21.672498 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:09:21.672514 17829 solver.cpp:244]     Train net output #1: loss = 0.0108908 (* 1 = 0.0108908 loss)\nI0818 12:09:21.776757 17829 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0818 12:11:40.814786 17829 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0818 12:13:01.154942 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71236\nI0818 12:13:01.155218 17829 solver.cpp:404]     Test net output #1: loss = 1.31857 (* 1 = 1.31857 loss)\nI0818 12:13:02.462282 17829 solver.cpp:228] Iteration 32800, loss = 0.0829513\nI0818 12:13:02.462323 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:13:02.462339 17829 solver.cpp:244]     Train net output #1: loss = 0.0829511 (* 1 = 0.0829511 loss)\nI0818 12:13:02.564899 17829 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0818 12:15:21.484108 17829 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0818 12:16:41.815137 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73532\nI0818 12:16:41.815404 17829 solver.cpp:404]     Test net output #1: loss = 1.12083 (* 1 = 1.12083 loss)\nI0818 12:16:43.119081 17829 solver.cpp:228] Iteration 32900, loss = 0.0396941\nI0818 12:16:43.119124 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:16:43.119140 17829 solver.cpp:244]     Train net output #1: loss = 0.0396939 (* 1 = 0.0396939 loss)\nI0818 12:16:43.232782 17829 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0818 12:19:02.238385 17829 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0818 12:20:22.586920 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75672\nI0818 12:20:22.587201 17829 solver.cpp:404]     Test net output #1: loss = 1.08981 (* 1 = 1.08981 loss)\nI0818 12:20:23.890449 17829 solver.cpp:228] Iteration 33000, loss = 0.0791022\nI0818 12:20:23.890489 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:20:23.890506 17829 solver.cpp:244]     Train net output #1: loss = 0.0791021 (* 1 = 0.0791021 loss)\nI0818 12:20:24.003811 17829 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0818 12:22:43.184643 17829 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0818 12:24:03.470306 17829 solver.cpp:404]     Test net output #0: accuracy = 0.68364\nI0818 12:24:03.470593 17829 solver.cpp:404]     Test net output #1: loss = 1.93187 (* 1 = 1.93187 loss)\nI0818 12:24:04.773653 17829 solver.cpp:228] Iteration 33100, loss = 0.0327519\nI0818 12:24:04.773694 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:24:04.773710 17829 solver.cpp:244]     Train net output #1: loss = 0.0327518 (* 1 = 0.0327518 loss)\nI0818 12:24:04.879561 17829 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0818 12:26:23.770179 17829 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0818 12:27:44.059332 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72676\nI0818 12:27:44.059562 17829 solver.cpp:404]     Test net output #1: loss = 1.38298 (* 1 = 1.38298 loss)\nI0818 12:27:45.362767 17829 solver.cpp:228] Iteration 33200, loss = 0.0547165\nI0818 12:27:45.362812 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:27:45.362826 17829 solver.cpp:244]     Train net output #1: loss = 0.0547163 (* 1 = 0.0547163 loss)\nI0818 12:27:45.469207 17829 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0818 12:30:04.471983 17829 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0818 12:31:24.759587 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77164\nI0818 12:31:24.759811 17829 solver.cpp:404]     Test net output #1: loss = 1.13208 (* 1 = 1.13208 loss)\nI0818 12:31:26.062899 17829 solver.cpp:228] Iteration 33300, loss = 0.0237743\nI0818 12:31:26.062942 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:31:26.062957 17829 solver.cpp:244]     Train net output #1: loss = 0.0237742 (* 1 = 0.0237742 loss)\nI0818 12:31:26.175864 17829 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0818 12:33:45.225652 17829 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0818 12:35:05.511524 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7572\nI0818 12:35:05.511793 17829 solver.cpp:404]     Test net output #1: loss = 0.968756 (* 1 = 0.968756 loss)\nI0818 12:35:06.815126 17829 solver.cpp:228] Iteration 33400, loss = 0.0338396\nI0818 12:35:06.815167 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:35:06.815182 17829 solver.cpp:244]     Train net output #1: loss = 0.0338395 (* 1 = 0.0338395 loss)\nI0818 12:35:06.923879 17829 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0818 12:37:26.022976 17829 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0818 12:38:46.332924 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7722\nI0818 12:38:46.333185 17829 solver.cpp:404]     Test net output #1: loss = 0.973937 (* 1 = 0.973937 loss)\nI0818 12:38:47.635726 17829 solver.cpp:228] Iteration 33500, loss = 0.0429128\nI0818 12:38:47.635766 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:38:47.635782 17829 solver.cpp:244]     Train net output #1: loss = 0.0429126 (* 1 = 0.0429126 loss)\nI0818 12:38:47.743530 17829 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0818 12:41:06.975860 17829 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0818 12:42:27.305167 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74332\nI0818 12:42:27.305449 17829 solver.cpp:404]     Test net output #1: loss = 1.26538 (* 1 = 1.26538 loss)\nI0818 12:42:28.608736 17829 solver.cpp:228] Iteration 33600, loss = 0.00725997\nI0818 12:42:28.608777 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:42:28.608791 17829 solver.cpp:244]     Train net output #1: loss = 0.00725985 (* 1 = 0.00725985 loss)\nI0818 12:42:28.715446 17829 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0818 12:44:47.822686 17829 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0818 12:46:08.126909 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80056\nI0818 12:46:08.127193 17829 solver.cpp:404]     Test net output #1: loss = 0.848703 (* 1 = 0.848703 loss)\nI0818 12:46:09.430119 17829 solver.cpp:228] Iteration 33700, loss = 0.0219675\nI0818 12:46:09.430161 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:46:09.430177 17829 solver.cpp:244]     Train net output #1: loss = 0.0219674 (* 1 = 0.0219674 loss)\nI0818 12:46:09.539593 17829 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0818 12:48:28.690524 17829 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0818 12:49:49.003708 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76008\nI0818 12:49:49.003926 17829 solver.cpp:404]     Test net output #1: loss = 1.17647 (* 1 = 1.17647 loss)\nI0818 12:49:50.306763 17829 solver.cpp:228] Iteration 33800, loss = 0.0369127\nI0818 12:49:50.306805 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:49:50.306821 17829 solver.cpp:244]     Train net output #1: loss = 0.0369126 (* 1 = 0.0369126 loss)\nI0818 12:49:50.411628 17829 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0818 12:52:09.421859 17829 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0818 12:53:29.720444 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75112\nI0818 12:53:29.720654 17829 solver.cpp:404]     Test net output #1: loss = 1.36038 (* 1 = 1.36038 loss)\nI0818 12:53:31.024588 17829 solver.cpp:228] Iteration 33900, loss = 0.0712792\nI0818 12:53:31.024631 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:53:31.024647 17829 solver.cpp:244]     Train net output #1: loss = 0.0712791 (* 1 = 0.0712791 loss)\nI0818 12:53:31.130517 17829 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0818 12:55:50.262789 17829 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0818 12:57:10.597406 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80052\nI0818 12:57:10.597679 17829 solver.cpp:404]     Test net output #1: loss = 0.913953 (* 1 = 0.913953 loss)\nI0818 12:57:11.901549 17829 solver.cpp:228] Iteration 34000, loss = 0.0176246\nI0818 12:57:11.901599 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:57:11.901623 17829 solver.cpp:244]     Train net output #1: loss = 0.0176245 (* 1 = 0.0176245 loss)\nI0818 12:57:12.008375 17829 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0818 12:59:31.098486 17829 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0818 13:00:51.442101 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71064\nI0818 13:00:51.442394 17829 solver.cpp:404]     Test net output #1: loss = 1.40368 (* 1 = 1.40368 loss)\nI0818 13:00:52.746448 17829 solver.cpp:228] Iteration 34100, loss = 0.0152821\nI0818 13:00:52.746492 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:00:52.746516 17829 solver.cpp:244]     Train net output #1: loss = 0.015282 (* 1 = 0.015282 loss)\nI0818 13:00:52.849217 17829 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0818 13:03:11.949314 17829 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0818 13:04:32.293027 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79396\nI0818 13:04:32.293295 17829 solver.cpp:404]     Test net output #1: loss = 0.843599 (* 1 = 0.843599 loss)\nI0818 13:04:33.601343 17829 solver.cpp:228] Iteration 34200, loss = 0.0414639\nI0818 13:04:33.601390 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 13:04:33.601415 17829 solver.cpp:244]     Train net output #1: loss = 0.0414638 (* 1 = 0.0414638 loss)\nI0818 13:04:33.703873 17829 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0818 13:06:52.701138 17829 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0818 13:08:13.021600 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77064\nI0818 13:08:13.021849 17829 solver.cpp:404]     Test net output #1: loss = 0.962976 (* 1 = 0.962976 loss)\nI0818 13:08:14.329244 17829 solver.cpp:228] Iteration 34300, loss = 0.021007\nI0818 13:08:14.329291 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:08:14.329316 17829 solver.cpp:244]     Train net output #1: loss = 0.0210069 (* 1 = 0.0210069 loss)\nI0818 13:08:14.430241 17829 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0818 13:10:33.415419 17829 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0818 13:11:53.766966 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80664\nI0818 13:11:53.767190 17829 solver.cpp:404]     Test net output #1: loss = 0.771932 (* 1 = 0.771932 loss)\nI0818 13:11:55.074757 17829 solver.cpp:228] Iteration 34400, loss = 0.0205238\nI0818 13:11:55.074803 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:11:55.074829 17829 solver.cpp:244]     Train net output #1: loss = 0.0205237 (* 1 = 0.0205237 loss)\nI0818 13:11:55.175606 17829 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0818 13:14:14.169308 17829 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0818 13:15:34.492805 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75528\nI0818 13:15:34.493070 17829 solver.cpp:404]     Test net output #1: loss = 1.09 (* 1 = 1.09 loss)\nI0818 13:15:35.801161 17829 solver.cpp:228] Iteration 34500, loss = 0.00693499\nI0818 13:15:35.801206 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:15:35.801229 17829 solver.cpp:244]     Train net output #1: loss = 0.00693487 (* 1 = 0.00693487 loss)\nI0818 13:15:35.903630 17829 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0818 13:17:54.920537 17829 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0818 13:19:15.270153 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75624\nI0818 13:19:15.270439 17829 solver.cpp:404]     Test net output #1: loss = 1.0176 (* 1 = 1.0176 loss)\nI0818 13:19:16.578305 17829 solver.cpp:228] Iteration 34600, loss = 0.0400193\nI0818 13:19:16.578354 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:19:16.578378 17829 solver.cpp:244]     Train net output #1: loss = 0.0400192 (* 1 = 0.0400192 loss)\nI0818 13:19:16.679625 17829 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0818 13:21:35.726259 17829 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0818 13:22:56.080904 17829 solver.cpp:404]     Test net output #0: accuracy = 0.737\nI0818 13:22:56.081202 17829 solver.cpp:404]     Test net output #1: loss = 1.29815 (* 1 = 1.29815 loss)\nI0818 13:22:57.388831 17829 solver.cpp:228] Iteration 34700, loss = 0.0654983\nI0818 13:22:57.388880 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 13:22:57.388903 17829 solver.cpp:244]     Train net output #1: loss = 0.0654981 (* 1 = 0.0654981 loss)\nI0818 13:22:57.489992 17829 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0818 13:25:16.426131 17829 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0818 13:26:36.781563 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78396\nI0818 13:26:36.781852 17829 solver.cpp:404]     Test net output #1: loss = 0.858571 (* 1 = 0.858571 loss)\nI0818 13:26:38.090087 17829 solver.cpp:228] Iteration 34800, loss = 0.0770141\nI0818 13:26:38.090134 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 13:26:38.090158 17829 solver.cpp:244]     Train net output #1: loss = 0.077014 (* 1 = 0.077014 loss)\nI0818 13:26:38.187665 17829 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0818 13:28:56.915249 17829 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0818 13:30:17.285441 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77996\nI0818 13:30:17.285759 17829 solver.cpp:404]     Test net output #1: loss = 1.00203 (* 1 = 1.00203 loss)\nI0818 13:30:18.593883 17829 solver.cpp:228] Iteration 34900, loss = 0.0503766\nI0818 13:30:18.593932 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 13:30:18.593956 17829 solver.cpp:244]     Train net output #1: loss = 0.0503765 (* 1 = 0.0503765 loss)\nI0818 13:30:18.693518 17829 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0818 13:32:37.522426 17829 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0818 13:33:57.868077 17829 solver.cpp:404]     Test net output #0: accuracy = 0.81344\nI0818 13:33:57.868343 17829 solver.cpp:404]     Test net output #1: loss = 0.804258 (* 1 = 0.804258 loss)\nI0818 13:33:59.175617 17829 solver.cpp:228] Iteration 35000, loss = 0.0424039\nI0818 13:33:59.175668 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:33:59.175691 17829 solver.cpp:244]     Train net output #1: loss = 0.0424037 (* 1 = 0.0424037 loss)\nI0818 13:33:59.277287 17829 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0818 13:36:18.224510 17829 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0818 13:37:38.569064 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78376\nI0818 13:37:38.569391 17829 solver.cpp:404]     Test net output #1: loss = 1.05311 (* 1 = 1.05311 loss)\nI0818 13:37:39.877652 17829 solver.cpp:228] Iteration 35100, loss = 0.0389964\nI0818 13:37:39.877698 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:37:39.877724 17829 solver.cpp:244]     Train net output #1: loss = 0.0389963 (* 1 = 0.0389963 loss)\nI0818 13:37:39.979794 17829 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0818 13:39:58.687484 17829 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0818 13:41:19.012607 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80916\nI0818 13:41:19.012930 17829 solver.cpp:404]     Test net output #1: loss = 0.782131 (* 1 = 0.782131 loss)\nI0818 13:41:20.321059 17829 solver.cpp:228] Iteration 35200, loss = 0.0687261\nI0818 13:41:20.321108 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:41:20.321132 17829 solver.cpp:244]     Train net output #1: loss = 0.068726 (* 1 = 0.068726 loss)\nI0818 13:41:20.422579 17829 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0818 13:43:39.405575 17829 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0818 13:44:59.751538 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78364\nI0818 13:44:59.751863 17829 solver.cpp:404]     Test net output #1: loss = 1.00463 (* 1 = 1.00463 loss)\nI0818 13:45:01.059917 17829 solver.cpp:228] Iteration 35300, loss = 0.0781326\nI0818 13:45:01.059967 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:45:01.059990 17829 solver.cpp:244]     Train net output #1: loss = 0.0781324 (* 1 = 0.0781324 loss)\nI0818 13:45:01.162997 17829 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0818 13:47:20.253700 17829 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0818 13:48:40.609454 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73948\nI0818 13:48:40.609762 17829 solver.cpp:404]     Test net output #1: loss = 1.21917 (* 1 = 1.21917 loss)\nI0818 13:48:41.916894 17829 solver.cpp:228] Iteration 35400, loss = 0.0133662\nI0818 13:48:41.916942 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:48:41.916965 17829 solver.cpp:244]     Train net output #1: loss = 0.013366 (* 1 = 0.013366 loss)\nI0818 13:48:42.016717 17829 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0818 13:51:00.886129 17829 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0818 13:52:21.236991 17829 solver.cpp:404]     Test net output #0: accuracy = 0.779\nI0818 13:52:21.237298 17829 solver.cpp:404]     Test net output #1: loss = 0.966187 (* 1 = 0.966187 loss)\nI0818 13:52:22.544389 17829 solver.cpp:228] Iteration 35500, loss = 0.0635913\nI0818 13:52:22.544436 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 13:52:22.544461 17829 solver.cpp:244]     Train net output #1: loss = 0.0635912 (* 1 = 0.0635912 loss)\nI0818 13:52:22.647820 17829 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0818 13:54:41.502590 17829 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0818 13:56:01.852975 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77344\nI0818 13:56:01.853267 17829 solver.cpp:404]     Test net output #1: loss = 0.951169 (* 1 = 0.951169 loss)\nI0818 13:56:03.160212 17829 solver.cpp:228] Iteration 35600, loss = 0.135405\nI0818 13:56:03.160256 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 13:56:03.160274 17829 solver.cpp:244]     Train net output #1: loss = 0.135405 (* 1 = 0.135405 loss)\nI0818 13:56:03.262580 17829 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0818 13:58:22.284487 17829 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0818 13:59:42.636956 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80396\nI0818 13:59:42.637266 17829 solver.cpp:404]     Test net output #1: loss = 0.839232 (* 1 = 0.839232 loss)\nI0818 13:59:43.943711 17829 solver.cpp:228] Iteration 35700, loss = 0.0191057\nI0818 13:59:43.943758 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:59:43.943773 17829 solver.cpp:244]     Train net output #1: loss = 0.0191055 (* 1 = 0.0191055 loss)\nI0818 13:59:44.043948 17829 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0818 14:02:02.871721 17829 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0818 14:03:23.217229 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72288\nI0818 14:03:23.217519 17829 solver.cpp:404]     Test net output #1: loss = 1.38422 (* 1 = 1.38422 loss)\nI0818 14:03:24.523989 17829 solver.cpp:228] Iteration 35800, loss = 0.0369657\nI0818 14:03:24.524034 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:03:24.524050 17829 solver.cpp:244]     Train net output #1: loss = 0.0369655 (* 1 = 0.0369655 loss)\nI0818 14:03:24.625003 17829 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0818 14:05:43.443326 17829 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0818 14:07:03.791541 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73164\nI0818 14:07:03.791829 17829 solver.cpp:404]     Test net output #1: loss = 1.37096 (* 1 = 1.37096 loss)\nI0818 14:07:05.099431 17829 solver.cpp:228] Iteration 35900, loss = 0.0358492\nI0818 14:07:05.099483 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:07:05.099499 17829 solver.cpp:244]     Train net output #1: loss = 0.035849 (* 1 = 0.035849 loss)\nI0818 14:07:05.200371 17829 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0818 14:09:24.161859 17829 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0818 14:10:44.505159 17829 solver.cpp:404]     Test net output #0: accuracy = 0.6914\nI0818 14:10:44.505455 17829 solver.cpp:404]     Test net output #1: loss = 1.72279 (* 1 = 1.72279 loss)\nI0818 14:10:45.813012 17829 solver.cpp:228] Iteration 36000, loss = 0.0211765\nI0818 14:10:45.813057 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:10:45.813073 17829 solver.cpp:244]     Train net output #1: loss = 0.0211764 (* 1 = 0.0211764 loss)\nI0818 14:10:45.911247 17829 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0818 14:13:04.996713 17829 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0818 14:14:25.373399 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7302\nI0818 14:14:25.373703 17829 solver.cpp:404]     Test net output #1: loss = 1.28385 (* 1 = 1.28385 loss)\nI0818 14:14:26.681849 17829 solver.cpp:228] Iteration 36100, loss = 0.100312\nI0818 14:14:26.681900 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:14:26.681923 17829 solver.cpp:244]     Train net output #1: loss = 0.100312 (* 1 = 0.100312 loss)\nI0818 14:14:26.780627 17829 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0818 14:16:45.648154 17829 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0818 14:18:05.986132 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79696\nI0818 14:18:05.986462 17829 solver.cpp:404]     Test net output #1: loss = 0.86524 (* 1 = 0.86524 loss)\nI0818 14:18:07.292940 17829 solver.cpp:228] Iteration 36200, loss = 0.039813\nI0818 14:18:07.292989 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:18:07.293014 17829 solver.cpp:244]     Train net output #1: loss = 0.0398129 (* 1 = 0.0398129 loss)\nI0818 14:18:07.393100 17829 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0818 14:20:26.466797 17829 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0818 14:21:46.906831 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78852\nI0818 14:21:46.907145 17829 solver.cpp:404]     Test net output #1: loss = 0.915272 (* 1 = 0.915272 loss)\nI0818 14:21:48.214504 17829 solver.cpp:228] Iteration 36300, loss = 0.0340031\nI0818 14:21:48.214547 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:21:48.214563 17829 solver.cpp:244]     Train net output #1: loss = 0.0340029 (* 1 = 0.0340029 loss)\nI0818 14:21:48.312919 17829 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0818 14:24:07.246489 17829 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0818 14:25:27.594434 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80188\nI0818 14:25:27.594724 17829 solver.cpp:404]     Test net output #1: loss = 0.904242 (* 1 = 0.904242 loss)\nI0818 14:25:28.902755 17829 solver.cpp:228] Iteration 36400, loss = 0.112025\nI0818 14:25:28.902801 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:25:28.902815 17829 solver.cpp:244]     Train net output #1: loss = 0.112025 (* 1 = 0.112025 loss)\nI0818 14:25:29.003511 17829 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0818 14:27:47.957551 17829 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0818 14:29:08.349644 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78948\nI0818 14:29:08.349959 17829 solver.cpp:404]     Test net output #1: loss = 0.864022 (* 1 = 0.864022 loss)\nI0818 14:29:09.658015 17829 solver.cpp:228] Iteration 36500, loss = 0.023074\nI0818 14:29:09.658058 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:29:09.658074 17829 solver.cpp:244]     Train net output #1: loss = 0.0230739 (* 1 = 0.0230739 loss)\nI0818 14:29:09.758787 17829 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0818 14:31:28.657021 17829 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0818 14:32:49.049619 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI0818 14:32:49.049943 17829 solver.cpp:404]     Test net output #1: loss = 0.976307 (* 1 = 0.976307 loss)\nI0818 14:32:50.357450 17829 solver.cpp:228] Iteration 36600, loss = 0.0680153\nI0818 14:32:50.357492 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 14:32:50.357508 17829 solver.cpp:244]     Train net output #1: loss = 0.0680152 (* 1 = 0.0680152 loss)\nI0818 14:32:50.460489 17829 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0818 14:37:44.376016 17829 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0818 14:39:04.176240 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76568\nI0818 14:39:04.178217 17829 solver.cpp:404]     Test net output #1: loss = 1.06269 (* 1 = 1.06269 loss)\nI0818 14:39:05.472556 17829 solver.cpp:228] Iteration 36700, loss = 0.0825058\nI0818 14:39:05.472605 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:39:05.472620 17829 solver.cpp:244]     Train net output #1: loss = 0.0825056 (* 1 = 0.0825056 loss)\nI0818 14:39:05.585119 17829 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0818 14:41:24.530418 17829 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0818 14:42:44.816246 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77988\nI0818 14:42:44.816558 17829 solver.cpp:404]     Test net output #1: loss = 1.06036 (* 1 = 1.06036 loss)\nI0818 14:42:46.124399 17829 solver.cpp:228] Iteration 36800, loss = 0.0238337\nI0818 14:42:46.124450 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:42:46.124466 17829 solver.cpp:244]     Train net output #1: loss = 0.0238336 (* 1 = 0.0238336 loss)\nI0818 14:42:46.226351 17829 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0818 14:45:26.825708 17829 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0818 14:46:47.123193 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79268\nI0818 14:46:47.123569 17829 solver.cpp:404]     Test net output #1: loss = 0.994252 (* 1 = 0.994252 loss)\nI0818 14:46:48.430446 17829 solver.cpp:228] Iteration 36900, loss = 0.0172748\nI0818 14:46:48.430491 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:46:48.430506 17829 solver.cpp:244]     Train net output #1: loss = 0.0172747 (* 1 = 0.0172747 loss)\nI0818 14:46:48.531965 17829 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0818 14:49:07.598251 17829 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0818 14:50:27.915071 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7694\nI0818 14:50:27.915383 17829 solver.cpp:404]     Test net output #1: loss = 1.05061 (* 1 = 1.05061 loss)\nI0818 14:50:29.221658 17829 solver.cpp:228] Iteration 37000, loss = 0.0900735\nI0818 14:50:29.221704 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:50:29.221720 17829 solver.cpp:244]     Train net output #1: loss = 0.0900734 (* 1 = 0.0900734 loss)\nI0818 14:50:29.324066 17829 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0818 14:52:48.230010 17829 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0818 14:54:08.521520 17829 solver.cpp:404]     Test net output #0: accuracy = 0.761\nI0818 14:54:08.521828 17829 solver.cpp:404]     Test net output #1: loss = 1.14286 (* 1 = 1.14286 loss)\nI0818 14:54:09.828629 17829 solver.cpp:228] Iteration 37100, loss = 0.0353203\nI0818 14:54:09.828675 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:54:09.828691 17829 solver.cpp:244]     Train net output #1: loss = 0.0353202 (* 1 = 0.0353202 loss)\nI0818 14:54:09.933930 17829 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0818 14:56:28.767947 17829 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0818 14:57:49.054802 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78852\nI0818 14:57:49.055114 17829 solver.cpp:404]     Test net output #1: loss = 0.954395 (* 1 = 0.954395 loss)\nI0818 14:57:50.361800 17829 solver.cpp:228] Iteration 37200, loss = 0.0185751\nI0818 14:57:50.361847 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:57:50.361863 17829 solver.cpp:244]     Train net output #1: loss = 0.0185749 (* 1 = 0.0185749 loss)\nI0818 14:57:50.460364 17829 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0818 15:00:09.359525 17829 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0818 15:01:29.659200 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79548\nI0818 15:01:29.659495 17829 solver.cpp:404]     Test net output #1: loss = 0.932858 (* 1 = 0.932858 loss)\nI0818 15:01:30.966053 17829 solver.cpp:228] Iteration 37300, loss = 0.0133529\nI0818 15:01:30.966099 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:01:30.966115 17829 solver.cpp:244]     Train net output #1: loss = 0.0133528 (* 1 = 0.0133528 loss)\nI0818 15:01:31.067957 17829 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0818 15:03:50.105047 17829 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0818 15:05:10.420862 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74468\nI0818 15:05:10.421145 17829 solver.cpp:404]     Test net output #1: loss = 1.24061 (* 1 = 1.24061 loss)\nI0818 15:05:11.728016 17829 solver.cpp:228] Iteration 37400, loss = 0.0695382\nI0818 15:05:11.728061 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:05:11.728078 17829 solver.cpp:244]     Train net output #1: loss = 0.0695381 (* 1 = 0.0695381 loss)\nI0818 15:05:11.829813 17829 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0818 15:07:30.813109 17829 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0818 15:08:51.123067 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74636\nI0818 15:08:51.123356 17829 solver.cpp:404]     Test net output #1: loss = 1.15428 (* 1 = 1.15428 loss)\nI0818 15:08:52.428747 17829 solver.cpp:228] Iteration 37500, loss = 0.0405322\nI0818 15:08:52.428789 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:08:52.428805 17829 solver.cpp:244]     Train net output #1: loss = 0.0405321 (* 1 = 0.0405321 loss)\nI0818 15:08:52.535744 17829 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0818 15:11:11.449667 17829 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0818 15:12:31.751106 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78356\nI0818 15:12:31.751427 17829 solver.cpp:404]     Test net output #1: loss = 1.04906 (* 1 = 1.04906 loss)\nI0818 15:12:33.057070 17829 solver.cpp:228] Iteration 37600, loss = 0.0190007\nI0818 15:12:33.057112 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:12:33.057129 17829 solver.cpp:244]     Train net output #1: loss = 0.0190006 (* 1 = 0.0190006 loss)\nI0818 15:12:33.161258 17829 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0818 15:14:51.907285 17829 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0818 15:16:12.212527 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80544\nI0818 15:16:12.212841 17829 solver.cpp:404]     Test net output #1: loss = 0.807568 (* 1 = 0.807568 loss)\nI0818 15:16:13.517993 17829 solver.cpp:228] Iteration 37700, loss = 0.0273939\nI0818 15:16:13.518038 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:16:13.518054 17829 solver.cpp:244]     Train net output #1: loss = 0.0273937 (* 1 = 0.0273937 loss)\nI0818 15:16:13.623229 17829 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0818 15:18:32.418445 17829 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0818 15:19:52.737505 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77028\nI0818 15:19:52.737823 17829 solver.cpp:404]     Test net output #1: loss = 0.867885 (* 1 = 0.867885 loss)\nI0818 15:19:54.043638 17829 solver.cpp:228] Iteration 37800, loss = 0.0607333\nI0818 15:19:54.043682 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:19:54.043699 17829 solver.cpp:244]     Train net output #1: loss = 0.0607332 (* 1 = 0.0607332 loss)\nI0818 15:19:54.143004 17829 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0818 15:22:12.961711 17829 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0818 15:23:33.293525 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80084\nI0818 15:23:33.293839 17829 solver.cpp:404]     Test net output #1: loss = 0.792042 (* 1 = 0.792042 loss)\nI0818 15:23:34.599150 17829 solver.cpp:228] Iteration 37900, loss = 0.0226993\nI0818 15:23:34.599194 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:23:34.599210 17829 solver.cpp:244]     Train net output #1: loss = 0.0226992 (* 1 = 0.0226992 loss)\nI0818 15:23:34.700662 17829 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0818 15:25:53.715757 17829 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0818 15:27:14.042701 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8102\nI0818 15:27:14.043010 17829 solver.cpp:404]     Test net output #1: loss = 0.837135 (* 1 = 0.837135 loss)\nI0818 15:27:15.348332 17829 solver.cpp:228] Iteration 38000, loss = 0.0568379\nI0818 15:27:15.348376 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:27:15.348390 17829 solver.cpp:244]     Train net output #1: loss = 0.0568378 (* 1 = 0.0568378 loss)\nI0818 15:27:15.449630 17829 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0818 15:29:34.560077 17829 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0818 15:30:54.857427 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77168\nI0818 15:30:54.857718 17829 solver.cpp:404]     Test net output #1: loss = 0.993759 (* 1 = 0.993759 loss)\nI0818 15:30:56.162487 17829 solver.cpp:228] Iteration 38100, loss = 0.0808415\nI0818 15:30:56.162531 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:30:56.162547 17829 solver.cpp:244]     Train net output #1: loss = 0.0808414 (* 1 = 0.0808414 loss)\nI0818 15:30:56.270082 17829 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0818 15:33:15.284382 17829 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0818 15:34:35.620070 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7606\nI0818 15:34:35.620384 17829 solver.cpp:404]     Test net output #1: loss = 1.20041 (* 1 = 1.20041 loss)\nI0818 15:34:36.925554 17829 solver.cpp:228] Iteration 38200, loss = 0.0506756\nI0818 15:34:36.925597 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:34:36.925613 17829 solver.cpp:244]     Train net output #1: loss = 0.0506756 (* 1 = 0.0506756 loss)\nI0818 15:34:37.032248 17829 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0818 15:36:55.969164 17829 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0818 15:38:16.292527 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75044\nI0818 15:38:16.292840 17829 solver.cpp:404]     Test net output #1: loss = 1.28846 (* 1 = 1.28846 loss)\nI0818 15:38:17.598618 17829 solver.cpp:228] Iteration 38300, loss = 0.0340822\nI0818 15:38:17.598664 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:38:17.598680 17829 solver.cpp:244]     Train net output #1: loss = 0.0340822 (* 1 = 0.0340822 loss)\nI0818 15:38:17.698976 17829 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0818 15:40:36.501874 17829 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0818 15:41:56.803206 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77948\nI0818 15:41:56.803522 17829 solver.cpp:404]     Test net output #1: loss = 0.978505 (* 1 = 0.978505 loss)\nI0818 15:41:58.108654 17829 solver.cpp:228] Iteration 38400, loss = 0.0225244\nI0818 15:41:58.108700 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:41:58.108716 17829 solver.cpp:244]     Train net output #1: loss = 0.0225244 (* 1 = 0.0225244 loss)\nI0818 15:41:58.212715 17829 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0818 15:44:17.054370 17829 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0818 15:45:37.378558 17829 solver.cpp:404]     Test net output #0: accuracy = 0.83192\nI0818 15:45:37.378867 17829 solver.cpp:404]     Test net output #1: loss = 0.654708 (* 1 = 0.654708 loss)\nI0818 15:45:38.685425 17829 solver.cpp:228] Iteration 38500, loss = 0.0270923\nI0818 15:45:38.685470 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:45:38.685487 17829 solver.cpp:244]     Train net output #1: loss = 0.0270923 (* 1 = 0.0270923 loss)\nI0818 15:45:38.787662 17829 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0818 15:47:57.575734 17829 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0818 15:49:17.896817 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80624\nI0818 15:49:17.897130 17829 solver.cpp:404]     Test net output #1: loss = 0.891639 (* 1 = 0.891639 loss)\nI0818 15:49:19.202858 17829 solver.cpp:228] Iteration 38600, loss = 0.0625674\nI0818 15:49:19.202904 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:49:19.202920 17829 solver.cpp:244]     Train net output #1: loss = 0.0625674 (* 1 = 0.0625674 loss)\nI0818 15:49:19.302989 17829 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0818 15:51:38.100203 17829 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0818 15:52:58.410696 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80532\nI0818 15:52:58.410989 17829 solver.cpp:404]     Test net output #1: loss = 0.762307 (* 1 = 0.762307 loss)\nI0818 15:52:59.717802 17829 solver.cpp:228] Iteration 38700, loss = 0.058078\nI0818 15:52:59.717849 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:52:59.717864 17829 solver.cpp:244]     Train net output #1: loss = 0.058078 (* 1 = 0.058078 loss)\nI0818 15:52:59.819977 17829 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0818 15:55:18.677620 17829 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0818 15:56:38.988013 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79156\nI0818 15:56:38.988338 17829 solver.cpp:404]     Test net output #1: loss = 0.981671 (* 1 = 0.981671 loss)\nI0818 15:56:40.295641 17829 solver.cpp:228] Iteration 38800, loss = 0.055834\nI0818 15:56:40.295686 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:56:40.295702 17829 solver.cpp:244]     Train net output #1: loss = 0.0558339 (* 1 = 0.0558339 loss)\nI0818 15:56:40.396950 17829 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0818 15:58:59.229357 17829 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0818 16:00:19.523932 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76008\nI0818 16:00:19.524246 17829 solver.cpp:404]     Test net output #1: loss = 1.19122 (* 1 = 1.19122 loss)\nI0818 16:00:20.830674 17829 solver.cpp:228] Iteration 38900, loss = 0.0586889\nI0818 16:00:20.830721 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:00:20.830737 17829 solver.cpp:244]     Train net output #1: loss = 0.0586889 (* 1 = 0.0586889 loss)\nI0818 16:00:20.930521 17829 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0818 16:02:39.755547 17829 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0818 16:04:00.075680 17829 solver.cpp:404]     Test net output #0: accuracy = 0.777039\nI0818 16:04:00.075997 17829 solver.cpp:404]     Test net output #1: loss = 1.01427 (* 1 = 1.01427 loss)\nI0818 16:04:01.382809 17829 solver.cpp:228] Iteration 39000, loss = 0.0286839\nI0818 16:04:01.382854 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:04:01.382869 17829 solver.cpp:244]     Train net output #1: loss = 0.0286838 (* 1 = 0.0286838 loss)\nI0818 16:04:01.484144 17829 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0818 16:06:20.233006 17829 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0818 16:07:40.569672 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79636\nI0818 16:07:40.569965 17829 solver.cpp:404]     Test net output #1: loss = 0.927771 (* 1 = 0.927771 loss)\nI0818 16:07:41.876545 17829 solver.cpp:228] Iteration 39100, loss = 0.0540845\nI0818 16:07:41.876593 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:07:41.876610 17829 solver.cpp:244]     Train net output #1: loss = 0.0540844 (* 1 = 0.0540844 loss)\nI0818 16:07:41.980726 17829 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0818 16:10:00.843973 17829 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0818 16:11:21.186810 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7952\nI0818 16:11:21.187129 17829 solver.cpp:404]     Test net output #1: loss = 0.912019 (* 1 = 0.912019 loss)\nI0818 16:11:22.493273 17829 solver.cpp:228] Iteration 39200, loss = 0.0629719\nI0818 16:11:22.493320 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:11:22.493336 17829 solver.cpp:244]     Train net output #1: loss = 0.0629719 (* 1 = 0.0629719 loss)\nI0818 16:11:22.597441 17829 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0818 16:13:41.673110 17829 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0818 16:15:02.016110 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78072\nI0818 16:15:02.016405 17829 solver.cpp:404]     Test net output #1: loss = 0.950279 (* 1 = 0.950279 loss)\nI0818 16:15:03.322063 17829 solver.cpp:228] Iteration 39300, loss = 0.0408416\nI0818 16:15:03.322110 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:15:03.322126 17829 solver.cpp:244]     Train net output #1: loss = 0.0408416 (* 1 = 0.0408416 loss)\nI0818 16:15:03.424041 17829 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0818 16:17:21.430003 17829 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0818 16:18:41.760785 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75528\nI0818 16:18:41.761111 17829 solver.cpp:404]     Test net output #1: loss = 1.18184 (* 1 = 1.18184 loss)\nI0818 16:18:43.067260 17829 solver.cpp:228] Iteration 39400, loss = 0.0926433\nI0818 16:18:43.067308 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:18:43.067324 17829 solver.cpp:244]     Train net output #1: loss = 0.0926431 (* 1 = 0.0926431 loss)\nI0818 16:18:43.159214 17829 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0818 16:21:00.964395 17829 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0818 16:22:21.511672 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7818\nI0818 16:22:21.512001 17829 solver.cpp:404]     Test net output #1: loss = 1.06287 (* 1 = 1.06287 loss)\nI0818 16:22:22.821467 17829 solver.cpp:228] Iteration 39500, loss = 0.0330906\nI0818 16:22:22.821530 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:22:22.821557 17829 solver.cpp:244]     Train net output #1: loss = 0.0330905 (* 1 = 0.0330905 loss)\nI0818 16:22:22.909903 17829 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0818 16:24:40.952100 17829 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0818 16:26:02.163115 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79496\nI0818 16:26:02.163408 17829 solver.cpp:404]     Test net output #1: loss = 0.992722 (* 1 = 0.992722 loss)\nI0818 16:26:03.472113 17829 solver.cpp:228] Iteration 39600, loss = 0.0204366\nI0818 16:26:03.472172 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:26:03.472190 17829 solver.cpp:244]     Train net output #1: loss = 0.0204365 (* 1 = 0.0204365 loss)\nI0818 16:26:03.566287 17829 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0818 16:28:21.560690 17829 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0818 16:29:42.776677 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7316\nI0818 16:29:42.776965 17829 solver.cpp:404]     Test net output #1: loss = 1.29467 (* 1 = 1.29467 loss)\nI0818 16:29:44.085978 17829 solver.cpp:228] Iteration 39700, loss = 0.0253038\nI0818 16:29:44.086037 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:29:44.086055 17829 solver.cpp:244]     Train net output #1: loss = 0.0253036 (* 1 = 0.0253036 loss)\nI0818 16:29:44.179087 17829 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0818 16:32:02.229254 17829 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0818 16:33:23.417515 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80464\nI0818 16:33:23.417827 17829 solver.cpp:404]     Test net output #1: loss = 0.869769 (* 1 = 0.869769 loss)\nI0818 16:33:24.725435 17829 solver.cpp:228] Iteration 39800, loss = 0.0251452\nI0818 16:33:24.725494 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:33:24.725512 17829 solver.cpp:244]     Train net output #1: loss = 0.0251451 (* 1 = 0.0251451 loss)\nI0818 16:33:24.821663 17829 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0818 16:35:43.147817 17829 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0818 16:37:04.347504 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78728\nI0818 16:37:04.347805 17829 solver.cpp:404]     Test net output #1: loss = 0.830147 (* 1 = 0.830147 loss)\nI0818 16:37:05.653738 17829 solver.cpp:228] Iteration 39900, loss = 0.0431735\nI0818 16:37:05.653796 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:37:05.653813 17829 solver.cpp:244]     Train net output #1: loss = 0.0431734 (* 1 = 0.0431734 loss)\nI0818 16:37:05.750481 17829 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0818 16:39:24.206758 17829 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0818 16:40:45.383951 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78328\nI0818 16:40:45.384248 17829 solver.cpp:404]     Test net output #1: loss = 0.854794 (* 1 = 0.854794 loss)\nI0818 16:40:46.689589 17829 solver.cpp:228] Iteration 40000, loss = 0.014941\nI0818 16:40:46.689647 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:40:46.689664 17829 solver.cpp:244]     Train net output #1: loss = 0.0149409 (* 1 = 0.0149409 loss)\nI0818 16:40:46.790793 17829 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0818 16:43:05.208442 17829 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0818 16:44:26.324898 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76048\nI0818 16:44:26.325186 17829 solver.cpp:404]     Test net output #1: loss = 1.0806 (* 1 = 1.0806 loss)\nI0818 16:44:27.631506 17829 solver.cpp:228] Iteration 40100, loss = 0.0389007\nI0818 16:44:27.631572 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:44:27.631588 17829 solver.cpp:244]     Train net output #1: loss = 0.0389007 (* 1 = 0.0389007 loss)\nI0818 16:44:27.731210 17829 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0818 16:46:46.215265 17829 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0818 16:48:07.278900 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73228\nI0818 16:48:07.279194 17829 solver.cpp:404]     Test net output #1: loss = 1.34337 (* 1 = 1.34337 loss)\nI0818 16:48:08.585016 17829 solver.cpp:228] Iteration 40200, loss = 0.0436594\nI0818 16:48:08.585074 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:48:08.585091 17829 solver.cpp:244]     Train net output #1: loss = 0.0436594 (* 1 = 0.0436594 loss)\nI0818 16:48:08.682323 17829 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0818 16:50:27.153903 17829 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0818 16:51:48.345207 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78144\nI0818 16:51:48.345528 17829 solver.cpp:404]     Test net output #1: loss = 0.987384 (* 1 = 0.987384 loss)\nI0818 16:51:49.651427 17829 solver.cpp:228] Iteration 40300, loss = 0.00985277\nI0818 16:51:49.651485 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:51:49.651504 17829 solver.cpp:244]     Train net output #1: loss = 0.00985274 (* 1 = 0.00985274 loss)\nI0818 16:51:49.750617 17829 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0818 16:54:08.293555 17829 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0818 16:55:29.520014 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77484\nI0818 16:55:29.520303 17829 solver.cpp:404]     Test net output #1: loss = 1.18591 (* 1 = 1.18591 loss)\nI0818 16:55:30.826325 17829 solver.cpp:228] Iteration 40400, loss = 0.0157524\nI0818 16:55:30.826383 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:55:30.826400 17829 solver.cpp:244]     Train net output #1: loss = 0.0157524 (* 1 = 0.0157524 loss)\nI0818 16:55:30.930769 17829 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0818 16:57:49.464097 17829 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0818 16:59:10.709475 17829 solver.cpp:404]     Test net output #0: accuracy = 0.82932\nI0818 16:59:10.709755 17829 solver.cpp:404]     Test net output #1: loss = 0.751067 (* 1 = 0.751067 loss)\nI0818 16:59:12.015563 17829 solver.cpp:228] Iteration 40500, loss = 0.0363887\nI0818 16:59:12.015620 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:59:12.015637 17829 solver.cpp:244]     Train net output #1: loss = 0.0363887 (* 1 = 0.0363887 loss)\nI0818 16:59:12.112442 17829 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0818 17:01:30.605029 17829 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0818 17:02:51.858901 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7348\nI0818 17:02:51.859231 17829 solver.cpp:404]     Test net output #1: loss = 1.35928 (* 1 = 1.35928 loss)\nI0818 17:02:53.165429 17829 solver.cpp:228] Iteration 40600, loss = 0.0887886\nI0818 17:02:53.165488 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:02:53.165504 17829 solver.cpp:244]     Train net output #1: loss = 0.0887886 (* 1 = 0.0887886 loss)\nI0818 17:02:53.266703 17829 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0818 17:05:11.859262 17829 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0818 17:06:32.870445 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76208\nI0818 17:06:32.870744 17829 solver.cpp:404]     Test net output #1: loss = 1.13025 (* 1 = 1.13025 loss)\nI0818 17:06:34.176306 17829 solver.cpp:228] Iteration 40700, loss = 0.0357895\nI0818 17:06:34.176362 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:06:34.176378 17829 solver.cpp:244]     Train net output #1: loss = 0.0357895 (* 1 = 0.0357895 loss)\nI0818 17:06:34.274073 17829 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0818 17:08:52.949739 17829 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0818 17:10:13.894512 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8086\nI0818 17:10:13.894851 17829 solver.cpp:404]     Test net output #1: loss = 0.906935 (* 1 = 0.906935 loss)\nI0818 17:10:15.201117 17829 solver.cpp:228] Iteration 40800, loss = 0.0473707\nI0818 17:10:15.201176 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:10:15.201192 17829 solver.cpp:244]     Train net output #1: loss = 0.0473706 (* 1 = 0.0473706 loss)\nI0818 17:10:15.301254 17829 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0818 17:12:33.854856 17829 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0818 17:13:55.113536 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7604\nI0818 17:13:55.113857 17829 solver.cpp:404]     Test net output #1: loss = 1.10088 (* 1 = 1.10088 loss)\nI0818 17:13:56.420029 17829 solver.cpp:228] Iteration 40900, loss = 0.0325118\nI0818 17:13:56.420087 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:13:56.420104 17829 solver.cpp:244]     Train net output #1: loss = 0.0325117 (* 1 = 0.0325117 loss)\nI0818 17:13:56.521373 17829 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0818 17:16:14.982574 17829 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0818 17:17:35.854586 17829 solver.cpp:404]     Test net output #0: accuracy = 0.71988\nI0818 17:17:35.854931 17829 solver.cpp:404]     Test net output #1: loss = 1.50688 (* 1 = 1.50688 loss)\nI0818 17:17:37.160212 17829 solver.cpp:228] Iteration 41000, loss = 0.0271966\nI0818 17:17:37.160264 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:17:37.160280 17829 solver.cpp:244]     Train net output #1: loss = 0.0271965 (* 1 = 0.0271965 loss)\nI0818 17:17:37.263633 17829 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0818 17:19:55.813730 17829 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0818 17:21:17.093073 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74652\nI0818 17:21:17.093420 17829 solver.cpp:404]     Test net output #1: loss = 1.14069 (* 1 = 1.14069 loss)\nI0818 17:21:18.399013 17829 solver.cpp:228] Iteration 41100, loss = 0.107015\nI0818 17:21:18.399066 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:21:18.399083 17829 solver.cpp:244]     Train net output #1: loss = 0.107015 (* 1 = 0.107015 loss)\nI0818 17:21:18.496618 17829 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0818 17:23:37.094897 17829 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0818 17:24:58.171844 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80212\nI0818 17:24:58.172183 17829 solver.cpp:404]     Test net output #1: loss = 0.890748 (* 1 = 0.890748 loss)\nI0818 17:24:59.477882 17829 solver.cpp:228] Iteration 41200, loss = 0.0314558\nI0818 17:24:59.477944 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:24:59.477962 17829 solver.cpp:244]     Train net output #1: loss = 0.0314558 (* 1 = 0.0314558 loss)\nI0818 17:24:59.576314 17829 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0818 17:27:18.187494 17829 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0818 17:28:39.381315 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69128\nI0818 17:28:39.381639 17829 solver.cpp:404]     Test net output #1: loss = 1.52167 (* 1 = 1.52167 loss)\nI0818 17:28:40.687604 17829 solver.cpp:228] Iteration 41300, loss = 0.02162\nI0818 17:28:40.687662 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:28:40.687680 17829 solver.cpp:244]     Train net output #1: loss = 0.02162 (* 1 = 0.02162 loss)\nI0818 17:28:40.786161 17829 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0818 17:30:59.231313 17829 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0818 17:32:20.469941 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7724\nI0818 17:32:20.470263 17829 solver.cpp:404]     Test net output #1: loss = 1.06124 (* 1 = 1.06124 loss)\nI0818 17:32:21.775609 17829 solver.cpp:228] Iteration 41400, loss = 0.017409\nI0818 17:32:21.775662 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:32:21.775679 17829 solver.cpp:244]     Train net output #1: loss = 0.017409 (* 1 = 0.017409 loss)\nI0818 17:32:21.875516 17829 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0818 17:34:40.323549 17829 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0818 17:36:01.487162 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80808\nI0818 17:36:01.487524 17829 solver.cpp:404]     Test net output #1: loss = 0.783773 (* 1 = 0.783773 loss)\nI0818 17:36:02.793200 17829 solver.cpp:228] Iteration 41500, loss = 0.0614903\nI0818 17:36:02.793254 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:36:02.793272 17829 solver.cpp:244]     Train net output #1: loss = 0.0614903 (* 1 = 0.0614903 loss)\nI0818 17:36:02.897382 17829 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0818 17:38:21.429066 17829 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0818 17:39:42.660884 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80572\nI0818 17:39:42.661208 17829 solver.cpp:404]     Test net output #1: loss = 0.831978 (* 1 = 0.831978 loss)\nI0818 17:39:43.966518 17829 solver.cpp:228] Iteration 41600, loss = 0.0548093\nI0818 17:39:43.966583 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:39:43.966599 17829 solver.cpp:244]     Train net output #1: loss = 0.0548093 (* 1 = 0.0548093 loss)\nI0818 17:39:44.064951 17829 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0818 17:42:02.498929 17829 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0818 17:43:23.716125 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69088\nI0818 17:43:23.716473 17829 solver.cpp:404]     Test net output #1: loss = 1.61006 (* 1 = 1.61006 loss)\nI0818 17:43:25.021865 17829 solver.cpp:228] Iteration 41700, loss = 0.0367417\nI0818 17:43:25.021924 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:43:25.021940 17829 solver.cpp:244]     Train net output #1: loss = 0.0367417 (* 1 = 0.0367417 loss)\nI0818 17:43:25.122467 17829 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0818 17:45:43.578382 17829 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0818 17:47:04.807723 17829 solver.cpp:404]     Test net output #0: accuracy = 0.73836\nI0818 17:47:04.808048 17829 solver.cpp:404]     Test net output #1: loss = 1.39888 (* 1 = 1.39888 loss)\nI0818 17:47:06.113850 17829 solver.cpp:228] Iteration 41800, loss = 0.0658379\nI0818 17:47:06.113903 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:47:06.113919 17829 solver.cpp:244]     Train net output #1: loss = 0.0658378 (* 1 = 0.0658378 loss)\nI0818 17:47:06.213345 17829 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0818 17:49:24.744858 17829 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0818 17:50:45.941416 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78664\nI0818 17:50:45.941727 17829 solver.cpp:404]     Test net output #1: loss = 1.07311 (* 1 = 1.07311 loss)\nI0818 17:50:47.247179 17829 solver.cpp:228] Iteration 41900, loss = 0.0152926\nI0818 17:50:47.247228 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:50:47.247246 17829 solver.cpp:244]     Train net output #1: loss = 0.0152925 (* 1 = 0.0152925 loss)\nI0818 17:50:47.343857 17829 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0818 17:53:05.893589 17829 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0818 17:54:27.053252 17829 solver.cpp:404]     Test net output #0: accuracy = 0.742\nI0818 17:54:27.053545 17829 solver.cpp:404]     Test net output #1: loss = 1.30575 (* 1 = 1.30575 loss)\nI0818 17:54:28.358984 17829 solver.cpp:228] Iteration 42000, loss = 0.0378721\nI0818 17:54:28.359038 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:54:28.359055 17829 solver.cpp:244]     Train net output #1: loss = 0.037872 (* 1 = 0.037872 loss)\nI0818 17:54:28.454427 17829 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0818 17:56:46.967766 17829 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0818 17:58:08.119678 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74168\nI0818 17:58:08.119962 17829 solver.cpp:404]     Test net output #1: loss = 1.29811 (* 1 = 1.29811 loss)\nI0818 17:58:09.425807 17829 solver.cpp:228] Iteration 42100, loss = 0.0596464\nI0818 17:58:09.425863 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:58:09.425879 17829 solver.cpp:244]     Train net output #1: loss = 0.0596463 (* 1 = 0.0596463 loss)\nI0818 17:58:09.524092 17829 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0818 18:00:28.109720 17829 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0818 18:01:49.329522 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8246\nI0818 18:01:49.329849 17829 solver.cpp:404]     Test net output #1: loss = 0.813476 (* 1 = 0.813476 loss)\nI0818 18:01:50.635176 17829 solver.cpp:228] Iteration 42200, loss = 0.0751638\nI0818 18:01:50.635232 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:01:50.635248 17829 solver.cpp:244]     Train net output #1: loss = 0.0751638 (* 1 = 0.0751638 loss)\nI0818 18:01:50.737478 17829 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0818 18:04:09.253770 17829 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0818 18:05:30.443049 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80976\nI0818 18:05:30.443321 17829 solver.cpp:404]     Test net output #1: loss = 0.85 (* 1 = 0.85 loss)\nI0818 18:05:31.748782 17829 solver.cpp:228] Iteration 42300, loss = 0.082929\nI0818 18:05:31.748836 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:05:31.748853 17829 solver.cpp:244]     Train net output #1: loss = 0.082929 (* 1 = 0.082929 loss)\nI0818 18:05:31.848232 17829 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0818 18:07:50.344782 17829 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0818 18:09:11.562064 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80796\nI0818 18:09:11.562369 17829 solver.cpp:404]     Test net output #1: loss = 0.836189 (* 1 = 0.836189 loss)\nI0818 18:09:12.868080 17829 solver.cpp:228] Iteration 42400, loss = 0.034141\nI0818 18:09:12.868134 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:09:12.868150 17829 solver.cpp:244]     Train net output #1: loss = 0.0341409 (* 1 = 0.0341409 loss)\nI0818 18:09:12.967620 17829 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0818 18:11:31.560431 17829 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0818 18:12:52.759482 17829 solver.cpp:404]     Test net output #0: accuracy = 0.81932\nI0818 18:12:52.759837 17829 solver.cpp:404]     Test net output #1: loss = 0.746489 (* 1 = 0.746489 loss)\nI0818 18:12:54.066045 17829 solver.cpp:228] Iteration 42500, loss = 0.0252202\nI0818 18:12:54.066105 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:12:54.066123 17829 solver.cpp:244]     Train net output #1: loss = 0.0252202 (* 1 = 0.0252202 loss)\nI0818 18:12:54.167857 17829 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0818 18:15:12.712523 17829 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0818 18:16:33.926429 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80716\nI0818 18:16:33.926777 17829 solver.cpp:404]     Test net output #1: loss = 0.777592 (* 1 = 0.777592 loss)\nI0818 18:16:35.232592 17829 solver.cpp:228] Iteration 42600, loss = 0.0103562\nI0818 18:16:35.232650 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:16:35.232668 17829 solver.cpp:244]     Train net output #1: loss = 0.0103561 (* 1 = 0.0103561 loss)\nI0818 18:16:35.328434 17829 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0818 18:18:53.917577 17829 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0818 18:20:15.169469 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79416\nI0818 18:20:15.169803 17829 solver.cpp:404]     Test net output #1: loss = 0.874038 (* 1 = 0.874038 loss)\nI0818 18:20:16.476634 17829 solver.cpp:228] Iteration 42700, loss = 0.0437205\nI0818 18:20:16.476696 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:20:16.476713 17829 solver.cpp:244]     Train net output #1: loss = 0.0437205 (* 1 = 0.0437205 loss)\nI0818 18:20:16.573829 17829 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0818 18:22:35.010802 17829 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0818 18:23:56.204262 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69288\nI0818 18:23:56.204565 17829 solver.cpp:404]     Test net output #1: loss = 1.41605 (* 1 = 1.41605 loss)\nI0818 18:23:57.511943 17829 solver.cpp:228] Iteration 42800, loss = 0.0434659\nI0818 18:23:57.512006 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:23:57.512024 17829 solver.cpp:244]     Train net output #1: loss = 0.0434658 (* 1 = 0.0434658 loss)\nI0818 18:23:57.611150 17829 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0818 18:26:16.231390 17829 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0818 18:27:37.450539 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77772\nI0818 18:27:37.450827 17829 solver.cpp:404]     Test net output #1: loss = 0.985042 (* 1 = 0.985042 loss)\nI0818 18:27:38.756705 17829 solver.cpp:228] Iteration 42900, loss = 0.0294879\nI0818 18:27:38.756770 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:27:38.756789 17829 solver.cpp:244]     Train net output #1: loss = 0.0294878 (* 1 = 0.0294878 loss)\nI0818 18:27:38.855895 17829 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0818 18:29:57.448758 17829 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0818 18:31:18.676252 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80792\nI0818 18:31:18.676587 17829 solver.cpp:404]     Test net output #1: loss = 0.860523 (* 1 = 0.860523 loss)\nI0818 18:31:19.983775 17829 solver.cpp:228] Iteration 43000, loss = 0.0144037\nI0818 18:31:19.983841 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:31:19.983865 17829 solver.cpp:244]     Train net output #1: loss = 0.0144036 (* 1 = 0.0144036 loss)\nI0818 18:31:20.083966 17829 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0818 18:33:38.583745 17829 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0818 18:34:59.821208 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80944\nI0818 18:34:59.821521 17829 solver.cpp:404]     Test net output #1: loss = 0.836812 (* 1 = 0.836812 loss)\nI0818 18:35:01.128165 17829 solver.cpp:228] Iteration 43100, loss = 0.0445938\nI0818 18:35:01.128226 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:35:01.128252 17829 solver.cpp:244]     Train net output #1: loss = 0.0445937 (* 1 = 0.0445937 loss)\nI0818 18:35:01.225539 17829 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0818 18:37:19.806147 17829 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0818 18:38:41.037138 17829 solver.cpp:404]     Test net output #0: accuracy = 0.789\nI0818 18:38:41.037437 17829 solver.cpp:404]     Test net output #1: loss = 0.927748 (* 1 = 0.927748 loss)\nI0818 18:38:42.343006 17829 solver.cpp:228] Iteration 43200, loss = 0.0533202\nI0818 18:38:42.343070 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:38:42.343093 17829 solver.cpp:244]     Train net output #1: loss = 0.0533201 (* 1 = 0.0533201 loss)\nI0818 18:38:42.443523 17829 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0818 18:41:01.020390 17829 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0818 18:42:22.211313 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75732\nI0818 18:42:22.211814 17829 solver.cpp:404]     Test net output #1: loss = 1.24651 (* 1 = 1.24651 loss)\nI0818 18:42:23.518399 17829 solver.cpp:228] Iteration 43300, loss = 0.0476481\nI0818 18:42:23.518458 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:42:23.518483 17829 solver.cpp:244]     Train net output #1: loss = 0.047648 (* 1 = 0.047648 loss)\nI0818 18:42:23.616036 17829 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0818 18:44:42.135064 17829 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0818 18:46:03.325016 17829 solver.cpp:404]     Test net output #0: accuracy = 0.759\nI0818 18:46:03.325330 17829 solver.cpp:404]     Test net output #1: loss = 1.13429 (* 1 = 1.13429 loss)\nI0818 18:46:04.632050 17829 solver.cpp:228] Iteration 43400, loss = 0.0210963\nI0818 18:46:04.632108 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:46:04.632133 17829 solver.cpp:244]     Train net output #1: loss = 0.0210962 (* 1 = 0.0210962 loss)\nI0818 18:46:04.733970 17829 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0818 18:48:23.137817 17829 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0818 18:49:44.302101 17829 solver.cpp:404]     Test net output #0: accuracy = 0.82392\nI0818 18:49:44.302417 17829 solver.cpp:404]     Test net output #1: loss = 0.748443 (* 1 = 0.748443 loss)\nI0818 18:49:45.609201 17829 solver.cpp:228] Iteration 43500, loss = 0.0403734\nI0818 18:49:45.609259 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:49:45.609284 17829 solver.cpp:244]     Train net output #1: loss = 0.0403733 (* 1 = 0.0403733 loss)\nI0818 18:49:45.708595 17829 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0818 18:52:04.156901 17829 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0818 18:53:25.334760 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74984\nI0818 18:53:25.335069 17829 solver.cpp:404]     Test net output #1: loss = 1.10066 (* 1 = 1.10066 loss)\nI0818 18:53:26.641683 17829 solver.cpp:228] Iteration 43600, loss = 0.0232072\nI0818 18:53:26.641749 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:53:26.641774 17829 solver.cpp:244]     Train net output #1: loss = 0.0232071 (* 1 = 0.0232071 loss)\nI0818 18:53:26.743949 17829 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0818 18:55:45.179075 17829 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0818 18:57:06.342891 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79636\nI0818 18:57:06.343181 17829 solver.cpp:404]     Test net output #1: loss = 0.865693 (* 1 = 0.865693 loss)\nI0818 18:57:07.648313 17829 solver.cpp:228] Iteration 43700, loss = 0.070194\nI0818 18:57:07.648370 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:57:07.648394 17829 solver.cpp:244]     Train net output #1: loss = 0.0701939 (* 1 = 0.0701939 loss)\nI0818 18:57:07.746042 17829 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0818 18:59:26.251037 17829 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0818 19:00:47.432613 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80132\nI0818 19:00:47.432932 17829 solver.cpp:404]     Test net output #1: loss = 0.852823 (* 1 = 0.852823 loss)\nI0818 19:00:48.738145 17829 solver.cpp:228] Iteration 43800, loss = 0.0447091\nI0818 19:00:48.738204 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:00:48.738229 17829 solver.cpp:244]     Train net output #1: loss = 0.044709 (* 1 = 0.044709 loss)\nI0818 19:00:48.835949 17829 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0818 19:03:07.391432 17829 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0818 19:04:28.567247 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69084\nI0818 19:04:28.567543 17829 solver.cpp:404]     Test net output #1: loss = 1.65092 (* 1 = 1.65092 loss)\nI0818 19:04:29.873280 17829 solver.cpp:228] Iteration 43900, loss = 0.0344937\nI0818 19:04:29.873334 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:04:29.873358 17829 solver.cpp:244]     Train net output #1: loss = 0.0344936 (* 1 = 0.0344936 loss)\nI0818 19:04:29.975889 17829 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0818 19:06:48.568181 17829 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0818 19:08:09.768694 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0818 19:08:09.768980 17829 solver.cpp:404]     Test net output #1: loss = 0.912832 (* 1 = 0.912832 loss)\nI0818 19:08:11.075600 17829 solver.cpp:228] Iteration 44000, loss = 0.0339981\nI0818 19:08:11.075659 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:08:11.075682 17829 solver.cpp:244]     Train net output #1: loss = 0.033998 (* 1 = 0.033998 loss)\nI0818 19:08:11.176537 17829 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0818 19:10:29.752934 17829 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0818 19:11:50.931690 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77132\nI0818 19:11:50.932003 17829 solver.cpp:404]     Test net output #1: loss = 1.06465 (* 1 = 1.06465 loss)\nI0818 19:11:52.237156 17829 solver.cpp:228] Iteration 44100, loss = 0.0639115\nI0818 19:11:52.237221 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:11:52.237246 17829 solver.cpp:244]     Train net output #1: loss = 0.0639114 (* 1 = 0.0639114 loss)\nI0818 19:11:52.340943 17829 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0818 19:14:10.957309 17829 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0818 19:15:32.166575 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80948\nI0818 19:15:32.166869 17829 solver.cpp:404]     Test net output #1: loss = 0.905898 (* 1 = 0.905898 loss)\nI0818 19:15:33.473047 17829 solver.cpp:228] Iteration 44200, loss = 0.0678139\nI0818 19:15:33.473112 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:15:33.473137 17829 solver.cpp:244]     Train net output #1: loss = 0.0678137 (* 1 = 0.0678137 loss)\nI0818 19:15:33.569160 17829 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0818 19:17:52.046982 17829 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0818 19:19:13.258411 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74172\nI0818 19:19:13.258723 17829 solver.cpp:404]     Test net output #1: loss = 1.31667 (* 1 = 1.31667 loss)\nI0818 19:19:14.565166 17829 solver.cpp:228] Iteration 44300, loss = 0.0690725\nI0818 19:19:14.565232 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:19:14.565258 17829 solver.cpp:244]     Train net output #1: loss = 0.0690724 (* 1 = 0.0690724 loss)\nI0818 19:19:14.664386 17829 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0818 19:21:33.160709 17829 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0818 19:22:54.229009 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7996\nI0818 19:22:54.229344 17829 solver.cpp:404]     Test net output #1: loss = 0.855608 (* 1 = 0.855608 loss)\nI0818 19:22:55.535735 17829 solver.cpp:228] Iteration 44400, loss = 0.0284569\nI0818 19:22:55.535800 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:22:55.535825 17829 solver.cpp:244]     Train net output #1: loss = 0.0284568 (* 1 = 0.0284568 loss)\nI0818 19:22:55.637315 17829 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0818 19:25:14.196564 17829 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0818 19:26:35.453672 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75728\nI0818 19:26:35.453961 17829 solver.cpp:404]     Test net output #1: loss = 1.26776 (* 1 = 1.26776 loss)\nI0818 19:26:36.760151 17829 solver.cpp:228] Iteration 44500, loss = 0.0234484\nI0818 19:26:36.760216 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:26:36.760241 17829 solver.cpp:244]     Train net output #1: loss = 0.0234483 (* 1 = 0.0234483 loss)\nI0818 19:26:36.856158 17829 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0818 19:28:55.426687 17829 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0818 19:30:16.651654 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69908\nI0818 19:30:16.651981 17829 solver.cpp:404]     Test net output #1: loss = 1.49508 (* 1 = 1.49508 loss)\nI0818 19:30:17.958832 17829 solver.cpp:228] Iteration 44600, loss = 0.0303091\nI0818 19:30:17.958899 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:30:17.958923 17829 solver.cpp:244]     Train net output #1: loss = 0.030309 (* 1 = 0.030309 loss)\nI0818 19:30:18.057801 17829 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0818 19:32:36.641393 17829 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0818 19:33:57.860028 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76668\nI0818 19:33:57.860357 17829 solver.cpp:404]     Test net output #1: loss = 1.09062 (* 1 = 1.09062 loss)\nI0818 19:33:59.166136 17829 solver.cpp:228] Iteration 44700, loss = 0.0243803\nI0818 19:33:59.166198 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:33:59.166224 17829 solver.cpp:244]     Train net output #1: loss = 0.0243802 (* 1 = 0.0243802 loss)\nI0818 19:33:59.269599 17829 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0818 19:36:17.838230 17829 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0818 19:37:38.992481 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8126\nI0818 19:37:38.992792 17829 solver.cpp:404]     Test net output #1: loss = 0.859733 (* 1 = 0.859733 loss)\nI0818 19:37:40.299926 17829 solver.cpp:228] Iteration 44800, loss = 0.0426522\nI0818 19:37:40.299990 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:37:40.300014 17829 solver.cpp:244]     Train net output #1: loss = 0.0426521 (* 1 = 0.0426521 loss)\nI0818 19:37:40.398597 17829 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0818 19:39:58.950448 17829 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0818 19:41:20.033895 17829 solver.cpp:404]     Test net output #0: accuracy = 0.81352\nI0818 19:41:20.034201 17829 solver.cpp:404]     Test net output #1: loss = 0.842882 (* 1 = 0.842882 loss)\nI0818 19:41:21.340116 17829 solver.cpp:228] Iteration 44900, loss = 0.0467407\nI0818 19:41:21.340176 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:41:21.340200 17829 solver.cpp:244]     Train net output #1: loss = 0.0467406 (* 1 = 0.0467406 loss)\nI0818 19:41:21.440763 17829 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0818 19:43:40.098203 17829 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0818 19:45:00.885077 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80956\nI0818 19:45:00.885385 17829 solver.cpp:404]     Test net output #1: loss = 0.886677 (* 1 = 0.886677 loss)\nI0818 19:45:02.190783 17829 solver.cpp:228] Iteration 45000, loss = 0.102746\nI0818 19:45:02.190843 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:45:02.190870 17829 solver.cpp:244]     Train net output #1: loss = 0.102746 (* 1 = 0.102746 loss)\nI0818 19:45:02.290508 17829 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0818 19:47:20.881729 17829 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0818 19:48:41.855211 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77452\nI0818 19:48:41.855542 17829 solver.cpp:404]     Test net output #1: loss = 1.13871 (* 1 = 1.13871 loss)\nI0818 19:48:43.161257 17829 solver.cpp:228] Iteration 45100, loss = 0.0494946\nI0818 19:48:43.161321 17829 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:48:43.161346 17829 solver.cpp:244]     Train net output #1: loss = 0.0494946 (* 1 = 0.0494946 loss)\nI0818 19:48:43.263567 17829 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0818 19:51:01.746968 17829 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0818 19:52:22.842229 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78276\nI0818 19:52:22.842535 17829 solver.cpp:404]     Test net output #1: loss = 0.948063 (* 1 = 0.948063 loss)\nI0818 19:52:24.149348 17829 solver.cpp:228] Iteration 45200, loss = 0.0771946\nI0818 19:52:24.149412 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:52:24.149437 17829 solver.cpp:244]     Train net output #1: loss = 0.0771945 (* 1 = 0.0771945 loss)\nI0818 19:52:24.251703 17829 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0818 19:54:42.827867 17829 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0818 19:56:03.893646 17829 solver.cpp:404]     Test net output #0: accuracy = 0.81172\nI0818 19:56:03.893954 17829 solver.cpp:404]     Test net output #1: loss = 0.864139 (* 1 = 0.864139 loss)\nI0818 19:56:05.200256 17829 solver.cpp:228] Iteration 45300, loss = 0.00775722\nI0818 19:56:05.200323 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:56:05.200347 17829 solver.cpp:244]     Train net output #1: loss = 0.00775718 (* 1 = 0.00775718 loss)\nI0818 19:56:05.298363 17829 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0818 19:58:23.997323 17829 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0818 19:59:44.788425 17829 solver.cpp:404]     Test net output #0: accuracy = 0.81864\nI0818 19:59:44.788755 17829 solver.cpp:404]     Test net output #1: loss = 0.830848 (* 1 = 0.830848 loss)\nI0818 19:59:46.094920 17829 solver.cpp:228] Iteration 45400, loss = 0.0659378\nI0818 19:59:46.094985 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:59:46.095010 17829 solver.cpp:244]     Train net output #1: loss = 0.0659378 (* 1 = 0.0659378 loss)\nI0818 19:59:46.190409 17829 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0818 20:02:04.795003 17829 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0818 20:03:25.691311 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75732\nI0818 20:03:25.691655 17829 solver.cpp:404]     Test net output #1: loss = 1.23798 (* 1 = 1.23798 loss)\nI0818 20:03:26.999500 17829 solver.cpp:228] Iteration 45500, loss = 0.0298357\nI0818 20:03:26.999565 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:03:26.999589 17829 solver.cpp:244]     Train net output #1: loss = 0.0298357 (* 1 = 0.0298357 loss)\nI0818 20:03:27.095715 17829 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0818 20:05:45.676172 17829 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0818 20:07:06.912525 17829 solver.cpp:404]     Test net output #0: accuracy = 0.68692\nI0818 20:07:06.912868 17829 solver.cpp:404]     Test net output #1: loss = 1.58294 (* 1 = 1.58294 loss)\nI0818 20:07:08.219533 17829 solver.cpp:228] Iteration 45600, loss = 0.0517586\nI0818 20:07:08.219595 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:07:08.219620 17829 solver.cpp:244]     Train net output #1: loss = 0.0517586 (* 1 = 0.0517586 loss)\nI0818 20:07:08.317369 17829 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0818 20:09:26.904247 17829 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0818 20:10:48.098922 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76588\nI0818 20:10:48.099247 17829 solver.cpp:404]     Test net output #1: loss = 1.02606 (* 1 = 1.02606 loss)\nI0818 20:10:49.404973 17829 solver.cpp:228] Iteration 45700, loss = 0.0616105\nI0818 20:10:49.405038 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:10:49.405062 17829 solver.cpp:244]     Train net output #1: loss = 0.0616104 (* 1 = 0.0616104 loss)\nI0818 20:10:49.506646 17829 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0818 20:13:08.055055 17829 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0818 20:14:28.894433 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8032\nI0818 20:14:28.894773 17829 solver.cpp:404]     Test net output #1: loss = 0.867853 (* 1 = 0.867853 loss)\nI0818 20:14:30.200294 17829 solver.cpp:228] Iteration 45800, loss = 0.0349402\nI0818 20:14:30.200353 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:14:30.200377 17829 solver.cpp:244]     Train net output #1: loss = 0.0349401 (* 1 = 0.0349401 loss)\nI0818 20:14:30.299674 17829 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0818 20:16:48.899752 17829 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0818 20:18:09.999622 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75524\nI0818 20:18:09.999979 17829 solver.cpp:404]     Test net output #1: loss = 1.25105 (* 1 = 1.25105 loss)\nI0818 20:18:11.305855 17829 solver.cpp:228] Iteration 45900, loss = 0.0404582\nI0818 20:18:11.305917 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:18:11.305941 17829 solver.cpp:244]     Train net output #1: loss = 0.0404581 (* 1 = 0.0404581 loss)\nI0818 20:18:11.404286 17829 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0818 20:20:30.019287 17829 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0818 20:21:51.172988 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7876\nI0818 20:21:51.173321 17829 solver.cpp:404]     Test net output #1: loss = 0.929331 (* 1 = 0.929331 loss)\nI0818 20:21:52.478559 17829 solver.cpp:228] Iteration 46000, loss = 0.0198369\nI0818 20:21:52.478624 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:21:52.478648 17829 solver.cpp:244]     Train net output #1: loss = 0.0198368 (* 1 = 0.0198368 loss)\nI0818 20:21:52.581650 17829 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0818 20:24:11.118248 17829 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0818 20:25:32.160445 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78\nI0818 20:25:32.160826 17829 solver.cpp:404]     Test net output #1: loss = 1.11657 (* 1 = 1.11657 loss)\nI0818 20:25:33.467911 17829 solver.cpp:228] Iteration 46100, loss = 0.0503683\nI0818 20:25:33.467975 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:25:33.467999 17829 solver.cpp:244]     Train net output #1: loss = 0.0503682 (* 1 = 0.0503682 loss)\nI0818 20:25:33.564633 17829 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0818 20:27:52.102361 17829 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0818 20:29:13.224064 17829 solver.cpp:404]     Test net output #0: accuracy = 0.82676\nI0818 20:29:13.224416 17829 solver.cpp:404]     Test net output #1: loss = 0.790019 (* 1 = 0.790019 loss)\nI0818 20:29:14.530700 17829 solver.cpp:228] Iteration 46200, loss = 0.0638764\nI0818 20:29:14.530764 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:29:14.530789 17829 solver.cpp:244]     Train net output #1: loss = 0.0638764 (* 1 = 0.0638764 loss)\nI0818 20:29:14.632015 17829 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0818 20:31:33.217658 17829 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0818 20:32:54.262864 17829 solver.cpp:404]     Test net output #0: accuracy = 0.82884\nI0818 20:32:54.263217 17829 solver.cpp:404]     Test net output #1: loss = 0.746703 (* 1 = 0.746703 loss)\nI0818 20:32:55.569353 17829 solver.cpp:228] Iteration 46300, loss = 0.0499591\nI0818 20:32:55.569417 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:32:55.569442 17829 solver.cpp:244]     Train net output #1: loss = 0.049959 (* 1 = 0.049959 loss)\nI0818 20:32:55.664028 17829 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0818 20:35:14.174177 17829 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0818 20:36:35.412447 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79136\nI0818 20:36:35.412817 17829 solver.cpp:404]     Test net output #1: loss = 0.873462 (* 1 = 0.873462 loss)\nI0818 20:36:36.718358 17829 solver.cpp:228] Iteration 46400, loss = 0.0262146\nI0818 20:36:36.718422 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:36:36.718447 17829 solver.cpp:244]     Train net output #1: loss = 0.0262145 (* 1 = 0.0262145 loss)\nI0818 20:36:36.822149 17829 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0818 20:38:55.475018 17829 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0818 20:40:16.711680 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78348\nI0818 20:40:16.712009 17829 solver.cpp:404]     Test net output #1: loss = 1.07935 (* 1 = 1.07935 loss)\nI0818 20:40:18.019009 17829 solver.cpp:228] Iteration 46500, loss = 0.0153363\nI0818 20:40:18.019068 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:40:18.019093 17829 solver.cpp:244]     Train net output #1: loss = 0.0153361 (* 1 = 0.0153361 loss)\nI0818 20:40:18.116847 17829 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0818 20:42:36.676631 17829 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0818 20:43:57.940505 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79028\nI0818 20:43:57.940856 17829 solver.cpp:404]     Test net output #1: loss = 0.885664 (* 1 = 0.885664 loss)\nI0818 20:43:59.247567 17829 solver.cpp:228] Iteration 46600, loss = 0.0136849\nI0818 20:43:59.247629 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:43:59.247654 17829 solver.cpp:244]     Train net output #1: loss = 0.0136848 (* 1 = 0.0136848 loss)\nI0818 20:43:59.349130 17829 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0818 20:46:17.815506 17829 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0818 20:47:39.092427 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76692\nI0818 20:47:39.092790 17829 solver.cpp:404]     Test net output #1: loss = 1.19321 (* 1 = 1.19321 loss)\nI0818 20:47:40.398524 17829 solver.cpp:228] Iteration 46700, loss = 0.0365303\nI0818 20:47:40.398586 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:47:40.398608 17829 solver.cpp:244]     Train net output #1: loss = 0.0365302 (* 1 = 0.0365302 loss)\nI0818 20:47:40.497480 17829 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0818 20:49:59.106384 17829 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0818 20:51:20.367794 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75496\nI0818 20:51:20.368151 17829 solver.cpp:404]     Test net output #1: loss = 1.12452 (* 1 = 1.12452 loss)\nI0818 20:51:21.674871 17829 solver.cpp:228] Iteration 46800, loss = 0.0123842\nI0818 20:51:21.674933 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:51:21.674959 17829 solver.cpp:244]     Train net output #1: loss = 0.0123841 (* 1 = 0.0123841 loss)\nI0818 20:51:21.770813 17829 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0818 20:53:40.373594 17829 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0818 20:55:01.632968 17829 solver.cpp:404]     Test net output #0: accuracy = 0.81532\nI0818 20:55:01.633311 17829 solver.cpp:404]     Test net output #1: loss = 0.796704 (* 1 = 0.796704 loss)\nI0818 20:55:02.938870 17829 solver.cpp:228] Iteration 46900, loss = 0.0493955\nI0818 20:55:02.938930 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:55:02.938954 17829 solver.cpp:244]     Train net output #1: loss = 0.0493955 (* 1 = 0.0493955 loss)\nI0818 20:55:03.036247 17829 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0818 20:57:21.717260 17829 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0818 20:58:42.986770 17829 solver.cpp:404]     Test net output #0: accuracy = 0.75216\nI0818 20:58:42.987131 17829 solver.cpp:404]     Test net output #1: loss = 1.08887 (* 1 = 1.08887 loss)\nI0818 20:58:44.293088 17829 solver.cpp:228] Iteration 47000, loss = 0.0980605\nI0818 20:58:44.293149 17829 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:58:44.293174 17829 solver.cpp:244]     Train net output #1: loss = 0.0980604 (* 1 = 0.0980604 loss)\nI0818 20:58:44.393633 17829 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0818 21:01:02.942459 17829 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0818 21:02:24.204169 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77496\nI0818 21:02:24.204527 17829 solver.cpp:404]     Test net output #1: loss = 1.04354 (* 1 = 1.04354 loss)\nI0818 21:02:25.510115 17829 solver.cpp:228] Iteration 47100, loss = 0.0101093\nI0818 21:02:25.510176 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:02:25.510201 17829 solver.cpp:244]     Train net output #1: loss = 0.0101092 (* 1 = 0.0101092 loss)\nI0818 21:02:25.614969 17829 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0818 21:04:44.180178 17829 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0818 21:06:05.432374 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78636\nI0818 21:06:05.432725 17829 solver.cpp:404]     Test net output #1: loss = 0.983137 (* 1 = 0.983137 loss)\nI0818 21:06:06.738092 17829 solver.cpp:228] Iteration 47200, loss = 0.0152452\nI0818 21:06:06.738153 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:06:06.738178 17829 solver.cpp:244]     Train net output #1: loss = 0.0152452 (* 1 = 0.0152452 loss)\nI0818 21:06:06.840456 17829 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0818 21:08:25.393435 17829 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0818 21:09:46.656688 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74708\nI0818 21:09:46.657032 17829 solver.cpp:404]     Test net output #1: loss = 1.25998 (* 1 = 1.25998 loss)\nI0818 21:09:47.962810 17829 solver.cpp:228] Iteration 47300, loss = 0.0397174\nI0818 21:09:47.962867 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:09:47.962890 17829 solver.cpp:244]     Train net output #1: loss = 0.0397173 (* 1 = 0.0397173 loss)\nI0818 21:09:48.061537 17829 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0818 21:12:06.558693 17829 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0818 21:13:27.823145 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72808\nI0818 21:13:27.823493 17829 solver.cpp:404]     Test net output #1: loss = 1.38551 (* 1 = 1.38551 loss)\nI0818 21:13:29.129488 17829 solver.cpp:228] Iteration 47400, loss = 0.049306\nI0818 21:13:29.129544 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:13:29.129570 17829 solver.cpp:244]     Train net output #1: loss = 0.0493059 (* 1 = 0.0493059 loss)\nI0818 21:13:29.230487 17829 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0818 21:15:47.877398 17829 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0818 21:17:09.134691 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74964\nI0818 21:17:09.135056 17829 solver.cpp:404]     Test net output #1: loss = 1.23958 (* 1 = 1.23958 loss)\nI0818 21:17:10.441901 17829 solver.cpp:228] Iteration 47500, loss = 0.0507712\nI0818 21:17:10.441965 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:17:10.441990 17829 solver.cpp:244]     Train net output #1: loss = 0.0507711 (* 1 = 0.0507711 loss)\nI0818 21:17:10.536304 17829 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0818 21:19:29.151146 17829 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0818 21:20:50.413993 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78048\nI0818 21:20:50.414348 17829 solver.cpp:404]     Test net output #1: loss = 0.983381 (* 1 = 0.983381 loss)\nI0818 21:20:51.721307 17829 solver.cpp:228] Iteration 47600, loss = 0.10938\nI0818 21:20:51.721372 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:20:51.721398 17829 solver.cpp:244]     Train net output #1: loss = 0.10938 (* 1 = 0.10938 loss)\nI0818 21:20:51.818873 17829 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0818 21:23:10.422920 17829 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0818 21:24:31.687494 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7662\nI0818 21:24:31.687834 17829 solver.cpp:404]     Test net output #1: loss = 1.09311 (* 1 = 1.09311 loss)\nI0818 21:24:32.993835 17829 solver.cpp:228] Iteration 47700, loss = 0.0214484\nI0818 21:24:32.993897 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:24:32.993923 17829 solver.cpp:244]     Train net output #1: loss = 0.0214483 (* 1 = 0.0214483 loss)\nI0818 21:24:33.090932 17829 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0818 21:26:51.673514 17829 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0818 21:28:12.942807 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8264\nI0818 21:28:12.943151 17829 solver.cpp:404]     Test net output #1: loss = 0.793118 (* 1 = 0.793118 loss)\nI0818 21:28:14.250566 17829 solver.cpp:228] Iteration 47800, loss = 0.0375376\nI0818 21:28:14.250629 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:28:14.250654 17829 solver.cpp:244]     Train net output #1: loss = 0.0375375 (* 1 = 0.0375375 loss)\nI0818 21:28:14.348583 17829 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0818 21:30:32.946574 17829 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0818 21:31:54.213503 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8284\nI0818 21:31:54.213868 17829 solver.cpp:404]     Test net output #1: loss = 0.722284 (* 1 = 0.722284 loss)\nI0818 21:31:55.520442 17829 solver.cpp:228] Iteration 47900, loss = 0.0143628\nI0818 21:31:55.520511 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:31:55.520536 17829 solver.cpp:244]     Train net output #1: loss = 0.0143626 (* 1 = 0.0143626 loss)\nI0818 21:31:55.618436 17829 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0818 21:34:14.104671 17829 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0818 21:35:35.400147 17829 solver.cpp:404]     Test net output #0: accuracy = 0.70228\nI0818 21:35:35.400496 17829 solver.cpp:404]     Test net output #1: loss = 1.48103 (* 1 = 1.48103 loss)\nI0818 21:35:36.707379 17829 solver.cpp:228] Iteration 48000, loss = 0.0162807\nI0818 21:35:36.707444 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:35:36.707469 17829 solver.cpp:244]     Train net output #1: loss = 0.0162805 (* 1 = 0.0162805 loss)\nI0818 21:35:36.809377 17829 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0818 21:37:55.337888 17829 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0818 21:39:16.612987 17829 solver.cpp:404]     Test net output #0: accuracy = 0.81404\nI0818 21:39:16.613343 17829 solver.cpp:404]     Test net output #1: loss = 0.864084 (* 1 = 0.864084 loss)\nI0818 21:39:17.918709 17829 solver.cpp:228] Iteration 48100, loss = 0.0286687\nI0818 21:39:17.918764 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:39:17.918789 17829 solver.cpp:244]     Train net output #1: loss = 0.0286686 (* 1 = 0.0286686 loss)\nI0818 21:39:18.016086 17829 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0818 21:41:36.592674 17829 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0818 21:42:57.859341 17829 solver.cpp:404]     Test net output #0: accuracy = 0.78632\nI0818 21:42:57.859706 17829 solver.cpp:404]     Test net output #1: loss = 0.971902 (* 1 = 0.971902 loss)\nI0818 21:42:59.165421 17829 solver.cpp:228] Iteration 48200, loss = 0.0604309\nI0818 21:42:59.165482 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:42:59.165508 17829 solver.cpp:244]     Train net output #1: loss = 0.0604307 (* 1 = 0.0604307 loss)\nI0818 21:42:59.262784 17829 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0818 21:45:17.784035 17829 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0818 21:46:39.053097 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77564\nI0818 21:46:39.053459 17829 solver.cpp:404]     Test net output #1: loss = 1.10157 (* 1 = 1.10157 loss)\nI0818 21:46:40.361634 17829 solver.cpp:228] Iteration 48300, loss = 0.0581735\nI0818 21:46:40.361690 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:46:40.361714 17829 solver.cpp:244]     Train net output #1: loss = 0.0581733 (* 1 = 0.0581733 loss)\nI0818 21:46:40.457103 17829 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0818 21:48:58.559734 17829 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0818 21:50:19.836380 17829 solver.cpp:404]     Test net output #0: accuracy = 0.824\nI0818 21:50:19.836719 17829 solver.cpp:404]     Test net output #1: loss = 0.84928 (* 1 = 0.84928 loss)\nI0818 21:50:21.145768 17829 solver.cpp:228] Iteration 48400, loss = 0.0455593\nI0818 21:50:21.145812 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:50:21.145834 17829 solver.cpp:244]     Train net output #1: loss = 0.0455591 (* 1 = 0.0455591 loss)\nI0818 21:50:21.237334 17829 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0818 21:52:39.268781 17829 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0818 21:54:00.559212 17829 solver.cpp:404]     Test net output #0: accuracy = 0.69596\nI0818 21:54:00.559562 17829 solver.cpp:404]     Test net output #1: loss = 1.58942 (* 1 = 1.58942 loss)\nI0818 21:54:01.869415 17829 solver.cpp:228] Iteration 48500, loss = 0.0835892\nI0818 21:54:01.869469 17829 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:54:01.869499 17829 solver.cpp:244]     Train net output #1: loss = 0.083589 (* 1 = 0.083589 loss)\nI0818 21:54:01.962590 17829 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0818 21:56:20.083670 17829 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0818 21:57:41.377172 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74672\nI0818 21:57:41.377516 17829 solver.cpp:404]     Test net output #1: loss = 1.42024 (* 1 = 1.42024 loss)\nI0818 21:57:42.686192 17829 solver.cpp:228] Iteration 48600, loss = 0.0783131\nI0818 21:57:42.686249 17829 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:57:42.686273 17829 solver.cpp:244]     Train net output #1: loss = 0.078313 (* 1 = 0.078313 loss)\nI0818 21:57:42.777613 17829 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0818 22:00:00.851840 17829 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0818 22:01:22.129087 17829 solver.cpp:404]     Test net output #0: accuracy = 0.77368\nI0818 22:01:22.129448 17829 solver.cpp:404]     Test net output #1: loss = 0.995011 (* 1 = 0.995011 loss)\nI0818 22:01:23.439028 17829 solver.cpp:228] Iteration 48700, loss = 0.0220105\nI0818 22:01:23.439074 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:01:23.439097 17829 solver.cpp:244]     Train net output #1: loss = 0.0220103 (* 1 = 0.0220103 loss)\nI0818 22:01:23.527448 17829 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0818 22:03:41.542430 17829 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0818 22:05:02.826246 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0818 22:05:02.826591 17829 solver.cpp:404]     Test net output #1: loss = 0.828228 (* 1 = 0.828228 loss)\nI0818 22:05:04.134747 17829 solver.cpp:228] Iteration 48800, loss = 0.0332488\nI0818 22:05:04.134805 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:05:04.134830 17829 solver.cpp:244]     Train net output #1: loss = 0.0332486 (* 1 = 0.0332486 loss)\nI0818 22:05:04.228809 17829 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0818 22:07:22.307869 17829 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0818 22:08:43.564329 17829 solver.cpp:404]     Test net output #0: accuracy = 0.7796\nI0818 22:08:43.564677 17829 solver.cpp:404]     Test net output #1: loss = 1.03609 (* 1 = 1.03609 loss)\nI0818 22:08:44.874143 17829 solver.cpp:228] Iteration 48900, loss = 0.0459056\nI0818 22:08:44.874200 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:08:44.874225 17829 solver.cpp:244]     Train net output #1: loss = 0.0459055 (* 1 = 0.0459055 loss)\nI0818 22:08:44.966998 17829 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0818 22:11:03.024245 17829 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0818 22:12:24.276717 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80816\nI0818 22:12:24.277068 17829 solver.cpp:404]     Test net output #1: loss = 0.858437 (* 1 = 0.858437 loss)\nI0818 22:12:25.585541 17829 solver.cpp:228] Iteration 49000, loss = 0.0477137\nI0818 22:12:25.585597 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:12:25.585620 17829 solver.cpp:244]     Train net output #1: loss = 0.0477136 (* 1 = 0.0477136 loss)\nI0818 22:12:25.683085 17829 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0818 22:14:43.796249 17829 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0818 22:16:05.051496 17829 solver.cpp:404]     Test net output #0: accuracy = 0.784\nI0818 22:16:05.051862 17829 solver.cpp:404]     Test net output #1: loss = 0.931108 (* 1 = 0.931108 loss)\nI0818 22:16:06.360699 17829 solver.cpp:228] Iteration 49100, loss = 0.0572506\nI0818 22:16:06.360755 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:16:06.360780 17829 solver.cpp:244]     Train net output #1: loss = 0.0572504 (* 1 = 0.0572504 loss)\nI0818 22:16:06.457114 17829 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0818 22:18:24.475329 17829 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0818 22:19:45.748522 17829 solver.cpp:404]     Test net output #0: accuracy = 0.76208\nI0818 22:19:45.748863 17829 solver.cpp:404]     Test net output #1: loss = 1.21297 (* 1 = 1.21297 loss)\nI0818 22:19:47.057634 17829 solver.cpp:228] Iteration 49200, loss = 0.0216056\nI0818 22:19:47.057694 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:19:47.057718 17829 solver.cpp:244]     Train net output #1: loss = 0.0216055 (* 1 = 0.0216055 loss)\nI0818 22:19:47.147318 17829 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0818 22:22:05.183745 17829 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0818 22:23:26.453929 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8298\nI0818 22:23:26.454290 17829 solver.cpp:404]     Test net output #1: loss = 0.737582 (* 1 = 0.737582 loss)\nI0818 22:23:27.768560 17829 solver.cpp:228] Iteration 49300, loss = 0.0368939\nI0818 22:23:27.768613 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:23:27.768631 17829 solver.cpp:244]     Train net output #1: loss = 0.0368938 (* 1 = 0.0368938 loss)\nI0818 22:23:27.857888 17829 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0818 22:25:45.899554 17829 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0818 22:27:07.203259 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79508\nI0818 22:27:07.203624 17829 solver.cpp:404]     Test net output #1: loss = 0.994212 (* 1 = 0.994212 loss)\nI0818 22:27:08.513758 17829 solver.cpp:228] Iteration 49400, loss = 0.0425389\nI0818 22:27:08.513819 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:27:08.513837 17829 solver.cpp:244]     Train net output #1: loss = 0.0425388 (* 1 = 0.0425388 loss)\nI0818 22:27:08.603816 17829 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0818 22:29:26.595068 17829 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0818 22:30:47.860697 17829 solver.cpp:404]     Test net output #0: accuracy = 0.80944\nI0818 22:30:47.861047 17829 solver.cpp:404]     Test net output #1: loss = 0.946122 (* 1 = 0.946122 loss)\nI0818 22:30:49.169790 17829 solver.cpp:228] Iteration 49500, loss = 0.0528689\nI0818 22:30:49.169849 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:30:49.169867 17829 solver.cpp:244]     Train net output #1: loss = 0.0528688 (* 1 = 0.0528688 loss)\nI0818 22:30:49.266762 17829 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0818 22:33:07.257431 17829 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0818 22:34:28.534557 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79332\nI0818 22:34:28.534915 17829 solver.cpp:404]     Test net output #1: loss = 1.02861 (* 1 = 1.02861 loss)\nI0818 22:34:29.843204 17829 solver.cpp:228] Iteration 49600, loss = 0.0157914\nI0818 22:34:29.843266 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:34:29.843282 17829 solver.cpp:244]     Train net output #1: loss = 0.0157912 (* 1 = 0.0157912 loss)\nI0818 22:34:29.935643 17829 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0818 22:36:48.018704 17829 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0818 22:38:09.286834 17829 solver.cpp:404]     Test net output #0: accuracy = 0.74032\nI0818 22:38:09.287171 17829 solver.cpp:404]     Test net output #1: loss = 1.28385 (* 1 = 1.28385 loss)\nI0818 22:38:10.595897 17829 solver.cpp:228] Iteration 49700, loss = 0.0208877\nI0818 22:38:10.595958 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:38:10.595975 17829 solver.cpp:244]     Train net output #1: loss = 0.0208875 (* 1 = 0.0208875 loss)\nI0818 22:38:10.689163 17829 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0818 22:40:28.826957 17829 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0818 22:41:50.088469 17829 solver.cpp:404]     Test net output #0: accuracy = 0.82788\nI0818 22:41:50.088835 17829 solver.cpp:404]     Test net output #1: loss = 0.733889 (* 1 = 0.733889 loss)\nI0818 22:41:51.399055 17829 solver.cpp:228] Iteration 49800, loss = 0.0312447\nI0818 22:41:51.399114 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:41:51.399132 17829 solver.cpp:244]     Train net output #1: loss = 0.0312445 (* 1 = 0.0312445 loss)\nI0818 22:41:51.487025 17829 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0818 22:44:09.513087 17829 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0818 22:45:30.790609 17829 solver.cpp:404]     Test net output #0: accuracy = 0.79784\nI0818 22:45:30.790952 17829 solver.cpp:404]     Test net output #1: loss = 0.925804 (* 1 = 0.925804 loss)\nI0818 22:45:32.099802 17829 solver.cpp:228] Iteration 49900, loss = 0.0349834\nI0818 22:45:32.099864 17829 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:45:32.099882 17829 solver.cpp:244]     Train net output #1: loss = 0.0349833 (* 1 = 0.0349833 loss)\nI0818 22:45:32.194306 17829 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0818 22:47:50.249141 17829 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0818 22:49:11.529147 17829 solver.cpp:404]     Test net output #0: accuracy = 0.72376\nI0818 22:49:11.529531 17829 solver.cpp:404]     Test net output #1: loss = 1.45113 (* 1 = 1.45113 loss)\nI0818 22:49:12.839646 17829 solver.cpp:228] Iteration 50000, loss = 0.0109145\nI0818 22:49:12.839707 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:49:12.839723 17829 solver.cpp:244]     Train net output #1: loss = 0.0109143 (* 1 = 0.0109143 loss)\nI0818 22:49:12.932898 17829 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0818 22:49:12.932924 17829 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0818 22:51:31.045910 17829 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0818 22:52:52.300647 17829 solver.cpp:404]     Test net output #0: accuracy = 0.84064\nI0818 22:52:52.300976 17829 solver.cpp:404]     Test net output #1: loss = 0.689055 (* 1 = 0.689055 loss)\nI0818 22:52:53.611382 17829 solver.cpp:228] Iteration 50100, loss = 0.0112248\nI0818 22:52:53.611439 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:52:53.611454 17829 solver.cpp:244]     Train net output #1: loss = 0.0112246 (* 1 = 0.0112246 loss)\nI0818 22:52:53.705637 17829 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0818 22:55:11.837406 17829 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0818 22:56:33.101851 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8616\nI0818 22:56:33.102200 17829 solver.cpp:404]     Test net output #1: loss = 0.576767 (* 1 = 0.576767 loss)\nI0818 22:56:34.412390 17829 solver.cpp:228] Iteration 50200, loss = 0.0135657\nI0818 22:56:34.412451 17829 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:56:34.412467 17829 solver.cpp:244]     Train net output #1: loss = 0.0135656 (* 1 = 0.0135656 loss)\nI0818 22:56:34.502532 17829 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0818 22:58:52.550082 17829 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0818 23:00:13.796159 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87248\nI0818 23:00:13.796509 17829 solver.cpp:404]     Test net output #1: loss = 0.520459 (* 1 = 0.520459 loss)\nI0818 23:00:15.105559 17829 solver.cpp:228] Iteration 50300, loss = 0.00234537\nI0818 23:00:15.105620 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:00:15.105636 17829 solver.cpp:244]     Train net output #1: loss = 0.0023452 (* 1 = 0.0023452 loss)\nI0818 23:00:15.201040 17829 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0818 23:02:33.429337 17829 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0818 23:03:54.694150 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0818 23:03:54.694521 17829 solver.cpp:404]     Test net output #1: loss = 0.514334 (* 1 = 0.514334 loss)\nI0818 23:03:56.003015 17829 solver.cpp:228] Iteration 50400, loss = 0.00111549\nI0818 23:03:56.003072 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:03:56.003089 17829 solver.cpp:244]     Train net output #1: loss = 0.00111531 (* 1 = 0.00111531 loss)\nI0818 23:03:56.100860 17829 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0818 23:06:14.332322 17829 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0818 23:07:35.595788 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87872\nI0818 23:07:35.596132 17829 solver.cpp:404]     Test net output #1: loss = 0.499108 (* 1 = 0.499108 loss)\nI0818 23:07:36.905880 17829 solver.cpp:228] Iteration 50500, loss = 0.00105966\nI0818 23:07:36.905941 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:07:36.905959 17829 solver.cpp:244]     Train net output #1: loss = 0.00105949 (* 1 = 0.00105949 loss)\nI0818 23:07:36.991605 17829 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0818 23:09:55.055140 17829 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0818 23:11:16.303771 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87948\nI0818 23:11:16.304123 17829 solver.cpp:404]     Test net output #1: loss = 0.495433 (* 1 = 0.495433 loss)\nI0818 23:11:17.614383 17829 solver.cpp:228] Iteration 50600, loss = 0.00107736\nI0818 23:11:17.614444 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:11:17.614461 17829 solver.cpp:244]     Train net output #1: loss = 0.00107718 (* 1 = 0.00107718 loss)\nI0818 23:11:17.704888 17829 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0818 23:13:35.787591 17829 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0818 23:14:56.337378 17829 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0818 23:14:56.337723 17829 solver.cpp:404]     Test net output #1: loss = 0.487103 (* 1 = 0.487103 loss)\nI0818 23:14:57.646201 17829 solver.cpp:228] Iteration 50700, loss = 0.00110797\nI0818 23:14:57.646265 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:14:57.646283 17829 solver.cpp:244]     Train net output #1: loss = 0.0011078 (* 1 = 0.0011078 loss)\nI0818 23:14:57.741886 17829 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0818 23:17:15.826635 17829 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0818 23:18:37.105132 17829 solver.cpp:404]     Test net output #0: accuracy = 0.88188\nI0818 23:18:37.105476 17829 solver.cpp:404]     Test net output #1: loss = 0.490691 (* 1 = 0.490691 loss)\nI0818 23:18:38.415127 17829 solver.cpp:228] Iteration 50800, loss = 0.0011329\nI0818 23:18:38.415189 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:18:38.415207 17829 solver.cpp:244]     Train net output #1: loss = 0.00113272 (* 1 = 0.00113272 loss)\nI0818 23:18:38.507711 17829 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0818 23:20:56.540693 17829 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0818 23:22:17.793515 17829 solver.cpp:404]     Test net output #0: accuracy = 0.88028\nI0818 23:22:17.793875 17829 solver.cpp:404]     Test net output #1: loss = 0.488495 (* 1 = 0.488495 loss)\nI0818 23:22:19.102140 17829 solver.cpp:228] Iteration 50900, loss = 0.000917022\nI0818 23:22:19.102200 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:22:19.102217 17829 solver.cpp:244]     Train net output #1: loss = 0.000916847 (* 1 = 0.000916847 loss)\nI0818 23:22:19.190170 17829 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0818 23:24:37.344023 17829 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0818 23:25:58.596927 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0818 23:25:58.597272 17829 solver.cpp:404]     Test net output #1: loss = 0.492737 (* 1 = 0.492737 loss)\nI0818 23:25:59.904970 17829 solver.cpp:228] Iteration 51000, loss = 0.00065576\nI0818 23:25:59.905030 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:25:59.905048 17829 solver.cpp:244]     Train net output #1: loss = 0.000655584 (* 1 = 0.000655584 loss)\nI0818 23:26:00.009330 17829 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0818 23:28:18.536455 17829 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0818 23:29:39.797451 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8794\nI0818 23:29:39.797823 17829 solver.cpp:404]     Test net output #1: loss = 0.492069 (* 1 = 0.492069 loss)\nI0818 23:29:41.106489 17829 solver.cpp:228] Iteration 51100, loss = 0.000682695\nI0818 23:29:41.106554 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:29:41.106570 17829 solver.cpp:244]     Train net output #1: loss = 0.00068252 (* 1 = 0.00068252 loss)\nI0818 23:29:41.201035 17829 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0818 23:31:59.633296 17829 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0818 23:33:20.895898 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87876\nI0818 23:33:20.896229 17829 solver.cpp:404]     Test net output #1: loss = 0.496234 (* 1 = 0.496234 loss)\nI0818 23:33:22.204371 17829 solver.cpp:228] Iteration 51200, loss = 0.00056212\nI0818 23:33:22.204430 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:33:22.204447 17829 solver.cpp:244]     Train net output #1: loss = 0.000561944 (* 1 = 0.000561944 loss)\nI0818 23:33:22.296929 17829 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0818 23:35:40.858832 17829 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0818 23:37:02.108343 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8768\nI0818 23:37:02.108712 17829 solver.cpp:404]     Test net output #1: loss = 0.499574 (* 1 = 0.499574 loss)\nI0818 23:37:03.417146 17829 solver.cpp:228] Iteration 51300, loss = 0.000625278\nI0818 23:37:03.417207 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:37:03.417224 17829 solver.cpp:244]     Train net output #1: loss = 0.000625102 (* 1 = 0.000625102 loss)\nI0818 23:37:03.512025 17829 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0818 23:39:22.015352 17829 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0818 23:40:43.268345 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87684\nI0818 23:40:43.268720 17829 solver.cpp:404]     Test net output #1: loss = 0.503155 (* 1 = 0.503155 loss)\nI0818 23:40:44.578012 17829 solver.cpp:228] Iteration 51400, loss = 0.000424015\nI0818 23:40:44.578073 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:40:44.578090 17829 solver.cpp:244]     Train net output #1: loss = 0.000423839 (* 1 = 0.000423839 loss)\nI0818 23:40:44.670774 17829 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0818 23:43:03.167047 17829 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0818 23:44:24.417191 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87432\nI0818 23:44:24.417568 17829 solver.cpp:404]     Test net output #1: loss = 0.505136 (* 1 = 0.505136 loss)\nI0818 23:44:25.726291 17829 solver.cpp:228] Iteration 51500, loss = 0.000530626\nI0818 23:44:25.726351 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:44:25.726367 17829 solver.cpp:244]     Train net output #1: loss = 0.000530451 (* 1 = 0.000530451 loss)\nI0818 23:44:25.827035 17829 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0818 23:46:44.462666 17829 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0818 23:48:05.701617 17829 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0818 23:48:05.701977 17829 solver.cpp:404]     Test net output #1: loss = 0.506247 (* 1 = 0.506247 loss)\nI0818 23:48:07.010668 17829 solver.cpp:228] Iteration 51600, loss = 0.000599609\nI0818 23:48:07.010730 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:48:07.010746 17829 solver.cpp:244]     Train net output #1: loss = 0.000599433 (* 1 = 0.000599433 loss)\nI0818 23:48:07.109480 17829 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0818 23:50:25.659996 17829 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0818 23:51:46.928871 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87292\nI0818 23:51:46.929237 17829 solver.cpp:404]     Test net output #1: loss = 0.507835 (* 1 = 0.507835 loss)\nI0818 23:51:48.237289 17829 solver.cpp:228] Iteration 51700, loss = 0.00047696\nI0818 23:51:48.237347 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:51:48.237365 17829 solver.cpp:244]     Train net output #1: loss = 0.000476784 (* 1 = 0.000476784 loss)\nI0818 23:51:48.336486 17829 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0818 23:54:06.844177 17829 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0818 23:55:28.131181 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8726\nI0818 23:55:28.131554 17829 solver.cpp:404]     Test net output #1: loss = 0.510678 (* 1 = 0.510678 loss)\nI0818 23:55:29.439774 17829 solver.cpp:228] Iteration 51800, loss = 0.000468562\nI0818 23:55:29.439836 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:55:29.439852 17829 solver.cpp:244]     Train net output #1: loss = 0.000468387 (* 1 = 0.000468387 loss)\nI0818 23:55:29.533449 17829 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0818 23:57:48.004134 17829 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0818 23:59:09.283649 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87088\nI0818 23:59:09.284019 17829 solver.cpp:404]     Test net output #1: loss = 0.51367 (* 1 = 0.51367 loss)\nI0818 23:59:10.592701 17829 solver.cpp:228] Iteration 51900, loss = 0.000584611\nI0818 23:59:10.592761 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:59:10.592777 17829 solver.cpp:244]     Train net output #1: loss = 0.000584435 (* 1 = 0.000584435 loss)\nI0818 23:59:10.692519 17829 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0819 00:01:29.195225 17829 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 00:02:50.460820 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87188\nI0819 00:02:50.461190 17829 solver.cpp:404]     Test net output #1: loss = 0.513155 (* 1 = 0.513155 loss)\nI0819 00:02:51.768972 17829 solver.cpp:228] Iteration 52000, loss = 0.000520279\nI0819 00:02:51.769032 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:02:51.769048 17829 solver.cpp:244]     Train net output #1: loss = 0.000520103 (* 1 = 0.000520103 loss)\nI0819 00:02:51.864729 17829 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0819 00:05:10.455000 17829 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 00:06:31.691793 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86984\nI0819 00:06:31.692121 17829 solver.cpp:404]     Test net output #1: loss = 0.516725 (* 1 = 0.516725 loss)\nI0819 00:06:33.000735 17829 solver.cpp:228] Iteration 52100, loss = 0.000386891\nI0819 00:06:33.000797 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:06:33.000813 17829 solver.cpp:244]     Train net output #1: loss = 0.000386715 (* 1 = 0.000386715 loss)\nI0819 00:06:33.096597 17829 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0819 00:08:51.557545 17829 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 00:10:12.824030 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8692\nI0819 00:10:12.824359 17829 solver.cpp:404]     Test net output #1: loss = 0.517137 (* 1 = 0.517137 loss)\nI0819 00:10:14.132129 17829 solver.cpp:228] Iteration 52200, loss = 0.00042359\nI0819 00:10:14.132190 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:10:14.132208 17829 solver.cpp:244]     Train net output #1: loss = 0.000423414 (* 1 = 0.000423414 loss)\nI0819 00:10:14.233815 17829 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0819 00:12:32.771953 17829 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 00:13:54.043015 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86752\nI0819 00:13:54.043390 17829 solver.cpp:404]     Test net output #1: loss = 0.520566 (* 1 = 0.520566 loss)\nI0819 00:13:55.352241 17829 solver.cpp:228] Iteration 52300, loss = 0.000557934\nI0819 00:13:55.352303 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:13:55.352320 17829 solver.cpp:244]     Train net output #1: loss = 0.000557759 (* 1 = 0.000557759 loss)\nI0819 00:13:55.444392 17829 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0819 00:16:13.899516 17829 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 00:17:35.151283 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86768\nI0819 00:17:35.151619 17829 solver.cpp:404]     Test net output #1: loss = 0.520861 (* 1 = 0.520861 loss)\nI0819 00:17:36.456682 17829 solver.cpp:228] Iteration 52400, loss = 0.000363462\nI0819 00:17:36.456738 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:17:36.456755 17829 solver.cpp:244]     Train net output #1: loss = 0.000363286 (* 1 = 0.000363286 loss)\nI0819 00:17:36.557267 17829 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0819 00:19:55.102150 17829 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 00:21:16.357858 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86624\nI0819 00:21:16.358222 17829 solver.cpp:404]     Test net output #1: loss = 0.52331 (* 1 = 0.52331 loss)\nI0819 00:21:17.664238 17829 solver.cpp:228] Iteration 52500, loss = 0.000429426\nI0819 00:21:17.664296 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:21:17.664314 17829 solver.cpp:244]     Train net output #1: loss = 0.00042925 (* 1 = 0.00042925 loss)\nI0819 00:21:17.770597 17829 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0819 00:23:36.413233 17829 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 00:24:57.647858 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86688\nI0819 00:24:57.648190 17829 solver.cpp:404]     Test net output #1: loss = 0.521959 (* 1 = 0.521959 loss)\nI0819 00:24:58.953716 17829 solver.cpp:228] Iteration 52600, loss = 0.000529149\nI0819 00:24:58.953771 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:24:58.953788 17829 solver.cpp:244]     Train net output #1: loss = 0.000528973 (* 1 = 0.000528973 loss)\nI0819 00:24:59.055534 17829 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0819 00:27:17.750325 17829 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 00:28:38.999790 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86624\nI0819 00:28:39.000144 17829 solver.cpp:404]     Test net output #1: loss = 0.522747 (* 1 = 0.522747 loss)\nI0819 00:28:40.306020 17829 solver.cpp:228] Iteration 52700, loss = 0.000393996\nI0819 00:28:40.306077 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:28:40.306092 17829 solver.cpp:244]     Train net output #1: loss = 0.00039382 (* 1 = 0.00039382 loss)\nI0819 00:28:40.408061 17829 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0819 00:30:58.863029 17829 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 00:32:20.072856 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86688\nI0819 00:32:20.073215 17829 solver.cpp:404]     Test net output #1: loss = 0.522862 (* 1 = 0.522862 loss)\nI0819 00:32:21.379230 17829 solver.cpp:228] Iteration 52800, loss = 0.000312557\nI0819 00:32:21.379289 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:32:21.379305 17829 solver.cpp:244]     Train net output #1: loss = 0.000312381 (* 1 = 0.000312381 loss)\nI0819 00:32:21.481708 17829 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0819 00:34:40.030333 17829 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 00:36:01.277576 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86484\nI0819 00:36:01.277928 17829 solver.cpp:404]     Test net output #1: loss = 0.526341 (* 1 = 0.526341 loss)\nI0819 00:36:02.583189 17829 solver.cpp:228] Iteration 52900, loss = 0.000309159\nI0819 00:36:02.583250 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:36:02.583266 17829 solver.cpp:244]     Train net output #1: loss = 0.000308983 (* 1 = 0.000308983 loss)\nI0819 00:36:02.682010 17829 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0819 00:38:21.218122 17829 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 00:39:42.454322 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86648\nI0819 00:39:42.454682 17829 solver.cpp:404]     Test net output #1: loss = 0.52198 (* 1 = 0.52198 loss)\nI0819 00:39:43.760440 17829 solver.cpp:228] Iteration 53000, loss = 0.000548755\nI0819 00:39:43.760502 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:39:43.760519 17829 solver.cpp:244]     Train net output #1: loss = 0.000548579 (* 1 = 0.000548579 loss)\nI0819 00:39:43.860024 17829 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0819 00:42:02.335978 17829 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 00:43:22.991267 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86408\nI0819 00:43:22.991600 17829 solver.cpp:404]     Test net output #1: loss = 0.526062 (* 1 = 0.526062 loss)\nI0819 00:43:24.297241 17829 solver.cpp:228] Iteration 53100, loss = 0.000426996\nI0819 00:43:24.297302 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:43:24.297319 17829 solver.cpp:244]     Train net output #1: loss = 0.00042682 (* 1 = 0.00042682 loss)\nI0819 00:43:24.401918 17829 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0819 00:45:42.846992 17829 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 00:47:03.396584 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86484\nI0819 00:47:03.396884 17829 solver.cpp:404]     Test net output #1: loss = 0.523588 (* 1 = 0.523588 loss)\nI0819 00:47:04.702391 17829 solver.cpp:228] Iteration 53200, loss = 0.000386156\nI0819 00:47:04.702453 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:47:04.702471 17829 solver.cpp:244]     Train net output #1: loss = 0.00038598 (* 1 = 0.00038598 loss)\nI0819 00:47:04.796641 17829 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0819 00:49:23.133723 17829 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 00:50:43.736608 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8636\nI0819 00:50:43.736915 17829 solver.cpp:404]     Test net output #1: loss = 0.525145 (* 1 = 0.525145 loss)\nI0819 00:50:45.042480 17829 solver.cpp:228] Iteration 53300, loss = 0.000431307\nI0819 00:50:45.042538 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:50:45.042556 17829 solver.cpp:244]     Train net output #1: loss = 0.000431131 (* 1 = 0.000431131 loss)\nI0819 00:50:45.143851 17829 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0819 00:53:03.516119 17829 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 00:54:24.628697 17829 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0819 00:54:24.629050 17829 solver.cpp:404]     Test net output #1: loss = 0.525041 (* 1 = 0.525041 loss)\nI0819 00:54:25.934934 17829 solver.cpp:228] Iteration 53400, loss = 0.000488133\nI0819 00:54:25.934993 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:54:25.935009 17829 solver.cpp:244]     Train net output #1: loss = 0.000487958 (* 1 = 0.000487958 loss)\nI0819 00:54:26.032099 17829 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0819 00:56:44.044344 17829 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 00:58:05.280562 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86376\nI0819 00:58:05.280927 17829 solver.cpp:404]     Test net output #1: loss = 0.525802 (* 1 = 0.525802 loss)\nI0819 00:58:06.586581 17829 solver.cpp:228] Iteration 53500, loss = 0.000417884\nI0819 00:58:06.586642 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:58:06.586660 17829 solver.cpp:244]     Train net output #1: loss = 0.000417708 (* 1 = 0.000417708 loss)\nI0819 00:58:06.679301 17829 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0819 01:00:24.725324 17829 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 01:01:45.946058 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8644\nI0819 01:01:45.946420 17829 solver.cpp:404]     Test net output #1: loss = 0.524836 (* 1 = 0.524836 loss)\nI0819 01:01:47.253692 17829 solver.cpp:228] Iteration 53600, loss = 0.000386928\nI0819 01:01:47.253756 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:01:47.253773 17829 solver.cpp:244]     Train net output #1: loss = 0.000386752 (* 1 = 0.000386752 loss)\nI0819 01:01:47.350294 17829 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0819 01:04:05.360643 17829 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 01:05:26.612574 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86284\nI0819 01:05:26.612908 17829 solver.cpp:404]     Test net output #1: loss = 0.527848 (* 1 = 0.527848 loss)\nI0819 01:05:27.920373 17829 solver.cpp:228] Iteration 53700, loss = 0.000395919\nI0819 01:05:27.920433 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:05:27.920451 17829 solver.cpp:244]     Train net output #1: loss = 0.000395743 (* 1 = 0.000395743 loss)\nI0819 01:05:28.010771 17829 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0819 01:07:46.062746 17829 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 01:09:07.298388 17829 solver.cpp:404]     Test net output #0: accuracy = 0.864\nI0819 01:09:07.298753 17829 solver.cpp:404]     Test net output #1: loss = 0.52381 (* 1 = 0.52381 loss)\nI0819 01:09:08.605984 17829 solver.cpp:228] Iteration 53800, loss = 0.000447802\nI0819 01:09:08.606045 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:09:08.606061 17829 solver.cpp:244]     Train net output #1: loss = 0.000447626 (* 1 = 0.000447626 loss)\nI0819 01:09:08.700783 17829 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0819 01:11:26.719929 17829 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 01:12:47.959311 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8622\nI0819 01:12:47.959678 17829 solver.cpp:404]     Test net output #1: loss = 0.526702 (* 1 = 0.526702 loss)\nI0819 01:12:49.266799 17829 solver.cpp:228] Iteration 53900, loss = 0.000508892\nI0819 01:12:49.266860 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:12:49.266877 17829 solver.cpp:244]     Train net output #1: loss = 0.000508716 (* 1 = 0.000508716 loss)\nI0819 01:12:49.358086 17829 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0819 01:15:07.377946 17829 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 01:16:28.617147 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8632\nI0819 01:16:28.617498 17829 solver.cpp:404]     Test net output #1: loss = 0.52499 (* 1 = 0.52499 loss)\nI0819 01:16:29.925218 17829 solver.cpp:228] Iteration 54000, loss = 0.000391664\nI0819 01:16:29.925279 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:16:29.925297 17829 solver.cpp:244]     Train net output #1: loss = 0.000391488 (* 1 = 0.000391488 loss)\nI0819 01:16:30.018381 17829 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0819 01:18:48.034926 17829 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 01:20:09.430330 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86212\nI0819 01:20:09.430661 17829 solver.cpp:404]     Test net output #1: loss = 0.526226 (* 1 = 0.526226 loss)\nI0819 01:20:10.737951 17829 solver.cpp:228] Iteration 54100, loss = 0.000410102\nI0819 01:20:10.738013 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:20:10.738039 17829 solver.cpp:244]     Train net output #1: loss = 0.000409927 (* 1 = 0.000409927 loss)\nI0819 01:20:10.827878 17829 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0819 01:22:28.870931 17829 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 01:23:50.198804 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86348\nI0819 01:23:50.199180 17829 solver.cpp:404]     Test net output #1: loss = 0.524487 (* 1 = 0.524487 loss)\nI0819 01:23:51.506194 17829 solver.cpp:228] Iteration 54200, loss = 0.000408527\nI0819 01:23:51.506255 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:23:51.506279 17829 solver.cpp:244]     Train net output #1: loss = 0.000408352 (* 1 = 0.000408352 loss)\nI0819 01:23:51.596639 17829 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0819 01:26:09.643914 17829 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 01:27:31.024680 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8622\nI0819 01:27:31.025050 17829 solver.cpp:404]     Test net output #1: loss = 0.525558 (* 1 = 0.525558 loss)\nI0819 01:27:32.332301 17829 solver.cpp:228] Iteration 54300, loss = 0.000325294\nI0819 01:27:32.332361 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:27:32.332386 17829 solver.cpp:244]     Train net output #1: loss = 0.000325118 (* 1 = 0.000325118 loss)\nI0819 01:27:32.427764 17829 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0819 01:29:50.504739 17829 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 01:31:11.926609 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86304\nI0819 01:31:11.926980 17829 solver.cpp:404]     Test net output #1: loss = 0.523401 (* 1 = 0.523401 loss)\nI0819 01:31:13.234045 17829 solver.cpp:228] Iteration 54400, loss = 0.000467864\nI0819 01:31:13.234108 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:31:13.234133 17829 solver.cpp:244]     Train net output #1: loss = 0.000467689 (* 1 = 0.000467689 loss)\nI0819 01:31:13.330785 17829 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0819 01:33:31.344619 17829 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 01:34:52.599823 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86232\nI0819 01:34:52.600177 17829 solver.cpp:404]     Test net output #1: loss = 0.525355 (* 1 = 0.525355 loss)\nI0819 01:34:53.906322 17829 solver.cpp:228] Iteration 54500, loss = 0.000376914\nI0819 01:34:53.906384 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:34:53.906409 17829 solver.cpp:244]     Train net output #1: loss = 0.000376738 (* 1 = 0.000376738 loss)\nI0819 01:34:53.998055 17829 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0819 01:37:12.002661 17829 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 01:38:33.375073 17829 solver.cpp:404]     Test net output #0: accuracy = 0.863\nI0819 01:38:33.375464 17829 solver.cpp:404]     Test net output #1: loss = 0.522451 (* 1 = 0.522451 loss)\nI0819 01:38:34.682792 17829 solver.cpp:228] Iteration 54600, loss = 0.000418623\nI0819 01:38:34.682850 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:38:34.682875 17829 solver.cpp:244]     Train net output #1: loss = 0.000418447 (* 1 = 0.000418447 loss)\nI0819 01:38:34.776317 17829 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0819 01:40:52.897935 17829 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 01:42:14.190162 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86136\nI0819 01:42:14.190534 17829 solver.cpp:404]     Test net output #1: loss = 0.524457 (* 1 = 0.524457 loss)\nI0819 01:42:15.497663 17829 solver.cpp:228] Iteration 54700, loss = 0.000360861\nI0819 01:42:15.497720 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:42:15.497752 17829 solver.cpp:244]     Train net output #1: loss = 0.000360685 (* 1 = 0.000360685 loss)\nI0819 01:42:15.595186 17829 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0819 01:44:33.727679 17829 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 01:45:55.095736 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86336\nI0819 01:45:55.096096 17829 solver.cpp:404]     Test net output #1: loss = 0.521175 (* 1 = 0.521175 loss)\nI0819 01:45:56.401758 17829 solver.cpp:228] Iteration 54800, loss = 0.000257596\nI0819 01:45:56.401816 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:45:56.401840 17829 solver.cpp:244]     Train net output #1: loss = 0.000257421 (* 1 = 0.000257421 loss)\nI0819 01:45:56.500497 17829 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0819 01:48:14.624315 17829 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0819 01:49:35.938230 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86208\nI0819 01:49:35.938580 17829 solver.cpp:404]     Test net output #1: loss = 0.523357 (* 1 = 0.523357 loss)\nI0819 01:49:37.244554 17829 solver.cpp:228] Iteration 54900, loss = 0.000399044\nI0819 01:49:37.244612 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:49:37.244638 17829 solver.cpp:244]     Train net output #1: loss = 0.000398868 (* 1 = 0.000398868 loss)\nI0819 01:49:37.333811 17829 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0819 01:51:55.404942 17829 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0819 01:53:16.807797 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86304\nI0819 01:53:16.808158 17829 solver.cpp:404]     Test net output #1: loss = 0.521538 (* 1 = 0.521538 loss)\nI0819 01:53:18.114364 17829 solver.cpp:228] Iteration 55000, loss = 0.000341208\nI0819 01:53:18.114419 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:53:18.114444 17829 solver.cpp:244]     Train net output #1: loss = 0.000341032 (* 1 = 0.000341032 loss)\nI0819 01:53:18.209404 17829 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0819 01:55:36.248461 17829 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0819 01:56:57.675750 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86208\nI0819 01:56:57.676101 17829 solver.cpp:404]     Test net output #1: loss = 0.522681 (* 1 = 0.522681 loss)\nI0819 01:56:58.982445 17829 solver.cpp:228] Iteration 55100, loss = 0.000493375\nI0819 01:56:58.982501 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:56:58.982527 17829 solver.cpp:244]     Train net output #1: loss = 0.000493199 (* 1 = 0.000493199 loss)\nI0819 01:56:59.083079 17829 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0819 01:59:17.280236 17829 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0819 02:00:38.612718 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86336\nI0819 02:00:38.613061 17829 solver.cpp:404]     Test net output #1: loss = 0.51901 (* 1 = 0.51901 loss)\nI0819 02:00:39.918938 17829 solver.cpp:228] Iteration 55200, loss = 0.000414597\nI0819 02:00:39.918993 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:00:39.919018 17829 solver.cpp:244]     Train net output #1: loss = 0.000414421 (* 1 = 0.000414421 loss)\nI0819 02:00:40.019531 17829 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0819 02:02:58.182099 17829 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0819 02:04:19.566315 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8622\nI0819 02:04:19.566694 17829 solver.cpp:404]     Test net output #1: loss = 0.52089 (* 1 = 0.52089 loss)\nI0819 02:04:20.873853 17829 solver.cpp:228] Iteration 55300, loss = 0.000509863\nI0819 02:04:20.873908 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:04:20.873932 17829 solver.cpp:244]     Train net output #1: loss = 0.000509688 (* 1 = 0.000509688 loss)\nI0819 02:04:20.970474 17829 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0819 02:06:39.070520 17829 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0819 02:08:00.437741 17829 solver.cpp:404]     Test net output #0: accuracy = 0.863\nI0819 02:08:00.438115 17829 solver.cpp:404]     Test net output #1: loss = 0.518378 (* 1 = 0.518378 loss)\nI0819 02:08:01.744077 17829 solver.cpp:228] Iteration 55400, loss = 0.0003713\nI0819 02:08:01.744130 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:08:01.744154 17829 solver.cpp:244]     Train net output #1: loss = 0.000371124 (* 1 = 0.000371124 loss)\nI0819 02:08:01.840437 17829 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0819 02:10:19.933660 17829 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0819 02:11:41.312017 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86268\nI0819 02:11:41.312352 17829 solver.cpp:404]     Test net output #1: loss = 0.519784 (* 1 = 0.519784 loss)\nI0819 02:11:42.619485 17829 solver.cpp:228] Iteration 55500, loss = 0.000378068\nI0819 02:11:42.619545 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:11:42.619570 17829 solver.cpp:244]     Train net output #1: loss = 0.000377893 (* 1 = 0.000377893 loss)\nI0819 02:11:42.712085 17829 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0819 02:14:00.883394 17829 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0819 02:15:22.265648 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86368\nI0819 02:15:22.266005 17829 solver.cpp:404]     Test net output #1: loss = 0.5173 (* 1 = 0.5173 loss)\nI0819 02:15:23.573134 17829 solver.cpp:228] Iteration 55600, loss = 0.000398419\nI0819 02:15:23.573191 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:15:23.573216 17829 solver.cpp:244]     Train net output #1: loss = 0.000398244 (* 1 = 0.000398244 loss)\nI0819 02:15:23.667006 17829 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0819 02:17:41.803475 17829 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0819 02:19:03.246201 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86268\nI0819 02:19:03.246569 17829 solver.cpp:404]     Test net output #1: loss = 0.517732 (* 1 = 0.517732 loss)\nI0819 02:19:04.553580 17829 solver.cpp:228] Iteration 55700, loss = 0.000462446\nI0819 02:19:04.553623 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:19:04.553647 17829 solver.cpp:244]     Train net output #1: loss = 0.00046227 (* 1 = 0.00046227 loss)\nI0819 02:19:04.654155 17829 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0819 02:21:22.796751 17829 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0819 02:22:44.132973 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86364\nI0819 02:22:44.133337 17829 solver.cpp:404]     Test net output #1: loss = 0.516469 (* 1 = 0.516469 loss)\nI0819 02:22:45.440361 17829 solver.cpp:228] Iteration 55800, loss = 0.000454357\nI0819 02:22:45.440423 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:22:45.440448 17829 solver.cpp:244]     Train net output #1: loss = 0.000454181 (* 1 = 0.000454181 loss)\nI0819 02:22:45.531085 17829 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0819 02:25:03.659095 17829 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0819 02:26:25.028615 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86244\nI0819 02:26:25.028995 17829 solver.cpp:404]     Test net output #1: loss = 0.517979 (* 1 = 0.517979 loss)\nI0819 02:26:26.334849 17829 solver.cpp:228] Iteration 55900, loss = 0.000416209\nI0819 02:26:26.334904 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:26:26.334929 17829 solver.cpp:244]     Train net output #1: loss = 0.000416033 (* 1 = 0.000416033 loss)\nI0819 02:26:26.428087 17829 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0819 02:28:44.578619 17829 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0819 02:30:05.983506 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86364\nI0819 02:30:05.983855 17829 solver.cpp:404]     Test net output #1: loss = 0.514984 (* 1 = 0.514984 loss)\nI0819 02:30:07.290797 17829 solver.cpp:228] Iteration 56000, loss = 0.000476402\nI0819 02:30:07.290858 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:30:07.290881 17829 solver.cpp:244]     Train net output #1: loss = 0.000476226 (* 1 = 0.000476226 loss)\nI0819 02:30:07.382408 17829 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0819 02:32:25.545143 17829 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0819 02:33:46.894556 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86244\nI0819 02:33:46.894935 17829 solver.cpp:404]     Test net output #1: loss = 0.517607 (* 1 = 0.517607 loss)\nI0819 02:33:48.201563 17829 solver.cpp:228] Iteration 56100, loss = 0.000430404\nI0819 02:33:48.201620 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:33:48.201647 17829 solver.cpp:244]     Train net output #1: loss = 0.000430228 (* 1 = 0.000430228 loss)\nI0819 02:33:48.292647 17829 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0819 02:36:06.389688 17829 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0819 02:37:27.799074 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86376\nI0819 02:37:27.799435 17829 solver.cpp:404]     Test net output #1: loss = 0.513923 (* 1 = 0.513923 loss)\nI0819 02:37:29.106042 17829 solver.cpp:228] Iteration 56200, loss = 0.000447254\nI0819 02:37:29.106097 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:37:29.106123 17829 solver.cpp:244]     Train net output #1: loss = 0.000447079 (* 1 = 0.000447079 loss)\nI0819 02:37:29.198947 17829 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0819 02:39:47.319641 17829 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0819 02:41:08.714383 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86268\nI0819 02:41:08.714730 17829 solver.cpp:404]     Test net output #1: loss = 0.516827 (* 1 = 0.516827 loss)\nI0819 02:41:10.020052 17829 solver.cpp:228] Iteration 56300, loss = 0.000346161\nI0819 02:41:10.020107 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:41:10.020130 17829 solver.cpp:244]     Train net output #1: loss = 0.000345986 (* 1 = 0.000345986 loss)\nI0819 02:41:10.114290 17829 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0819 02:43:28.191558 17829 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0819 02:44:49.555127 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86372\nI0819 02:44:49.555476 17829 solver.cpp:404]     Test net output #1: loss = 0.514283 (* 1 = 0.514283 loss)\nI0819 02:44:50.862269 17829 solver.cpp:228] Iteration 56400, loss = 0.000355078\nI0819 02:44:50.862326 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:44:50.862350 17829 solver.cpp:244]     Train net output #1: loss = 0.000354902 (* 1 = 0.000354902 loss)\nI0819 02:44:50.959900 17829 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0819 02:47:09.085958 17829 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0819 02:48:30.431274 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86256\nI0819 02:48:30.431627 17829 solver.cpp:404]     Test net output #1: loss = 0.515922 (* 1 = 0.515922 loss)\nI0819 02:48:31.738410 17829 solver.cpp:228] Iteration 56500, loss = 0.000412338\nI0819 02:48:31.738466 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:48:31.738492 17829 solver.cpp:244]     Train net output #1: loss = 0.000412162 (* 1 = 0.000412162 loss)\nI0819 02:48:31.837097 17829 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0819 02:50:49.945700 17829 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0819 02:52:11.319012 17829 solver.cpp:404]     Test net output #0: accuracy = 0.864\nI0819 02:52:11.319375 17829 solver.cpp:404]     Test net output #1: loss = 0.513163 (* 1 = 0.513163 loss)\nI0819 02:52:12.626564 17829 solver.cpp:228] Iteration 56600, loss = 0.000395846\nI0819 02:52:12.626627 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:52:12.626652 17829 solver.cpp:244]     Train net output #1: loss = 0.00039567 (* 1 = 0.00039567 loss)\nI0819 02:52:12.719652 17829 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0819 02:54:30.866621 17829 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0819 02:55:52.147824 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86308\nI0819 02:55:52.148177 17829 solver.cpp:404]     Test net output #1: loss = 0.514709 (* 1 = 0.514709 loss)\nI0819 02:55:53.455981 17829 solver.cpp:228] Iteration 56700, loss = 0.000456242\nI0819 02:55:53.456041 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:55:53.456059 17829 solver.cpp:244]     Train net output #1: loss = 0.000456067 (* 1 = 0.000456067 loss)\nI0819 02:55:53.555449 17829 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0819 02:58:11.577247 17829 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0819 02:59:32.829246 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8644\nI0819 02:59:32.829601 17829 solver.cpp:404]     Test net output #1: loss = 0.51177 (* 1 = 0.51177 loss)\nI0819 02:59:34.135099 17829 solver.cpp:228] Iteration 56800, loss = 0.000559153\nI0819 02:59:34.135159 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:59:34.135177 17829 solver.cpp:244]     Train net output #1: loss = 0.000558977 (* 1 = 0.000558977 loss)\nI0819 02:59:34.230201 17829 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0819 03:01:52.282579 17829 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0819 03:03:13.518082 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86236\nI0819 03:03:13.518445 17829 solver.cpp:404]     Test net output #1: loss = 0.514614 (* 1 = 0.514614 loss)\nI0819 03:03:14.824142 17829 solver.cpp:228] Iteration 56900, loss = 0.00051471\nI0819 03:03:14.824201 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:03:14.824219 17829 solver.cpp:244]     Train net output #1: loss = 0.000514535 (* 1 = 0.000514535 loss)\nI0819 03:03:14.924819 17829 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0819 03:05:33.777698 17829 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0819 03:06:55.033372 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86392\nI0819 03:06:55.033730 17829 solver.cpp:404]     Test net output #1: loss = 0.511087 (* 1 = 0.511087 loss)\nI0819 03:06:56.339540 17829 solver.cpp:228] Iteration 57000, loss = 0.000510089\nI0819 03:06:56.339596 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:06:56.339612 17829 solver.cpp:244]     Train net output #1: loss = 0.000509913 (* 1 = 0.000509913 loss)\nI0819 03:06:56.438953 17829 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0819 03:09:15.560035 17829 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0819 03:10:36.781307 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86308\nI0819 03:10:36.781682 17829 solver.cpp:404]     Test net output #1: loss = 0.513458 (* 1 = 0.513458 loss)\nI0819 03:10:38.087214 17829 solver.cpp:228] Iteration 57100, loss = 0.00043349\nI0819 03:10:38.087270 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:10:38.087287 17829 solver.cpp:244]     Train net output #1: loss = 0.000433314 (* 1 = 0.000433314 loss)\nI0819 03:10:38.188061 17829 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0819 03:12:57.353147 17829 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0819 03:14:18.598774 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86412\nI0819 03:14:18.599123 17829 solver.cpp:404]     Test net output #1: loss = 0.510174 (* 1 = 0.510174 loss)\nI0819 03:14:19.905143 17829 solver.cpp:228] Iteration 57200, loss = 0.000428368\nI0819 03:14:19.905200 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:14:19.905215 17829 solver.cpp:244]     Train net output #1: loss = 0.000428192 (* 1 = 0.000428192 loss)\nI0819 03:14:20.010777 17829 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0819 03:16:39.357168 17829 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0819 03:18:00.605801 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8626\nI0819 03:18:00.606173 17829 solver.cpp:404]     Test net output #1: loss = 0.511522 (* 1 = 0.511522 loss)\nI0819 03:18:01.911856 17829 solver.cpp:228] Iteration 57300, loss = 0.000337479\nI0819 03:18:01.911911 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:18:01.911929 17829 solver.cpp:244]     Train net output #1: loss = 0.000337303 (* 1 = 0.000337303 loss)\nI0819 03:18:02.013121 17829 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0819 03:20:21.096362 17829 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0819 03:21:42.302049 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86416\nI0819 03:21:42.302417 17829 solver.cpp:404]     Test net output #1: loss = 0.50874 (* 1 = 0.50874 loss)\nI0819 03:21:43.607919 17829 solver.cpp:228] Iteration 57400, loss = 0.000539074\nI0819 03:21:43.607976 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:21:43.607993 17829 solver.cpp:244]     Train net output #1: loss = 0.000538898 (* 1 = 0.000538898 loss)\nI0819 03:21:43.715015 17829 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0819 03:24:03.019753 17829 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0819 03:25:24.226783 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86344\nI0819 03:25:24.227129 17829 solver.cpp:404]     Test net output #1: loss = 0.509303 (* 1 = 0.509303 loss)\nI0819 03:25:25.534006 17829 solver.cpp:228] Iteration 57500, loss = 0.000501514\nI0819 03:25:25.534067 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:25:25.534083 17829 solver.cpp:244]     Train net output #1: loss = 0.000501338 (* 1 = 0.000501338 loss)\nI0819 03:25:25.638857 17829 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0819 03:27:44.673760 17829 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0819 03:29:05.832489 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86428\nI0819 03:29:05.832795 17829 solver.cpp:404]     Test net output #1: loss = 0.507613 (* 1 = 0.507613 loss)\nI0819 03:29:07.138470 17829 solver.cpp:228] Iteration 57600, loss = 0.000360979\nI0819 03:29:07.138528 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:29:07.138545 17829 solver.cpp:244]     Train net output #1: loss = 0.000360803 (* 1 = 0.000360803 loss)\nI0819 03:29:07.242789 17829 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0819 03:31:26.388444 17829 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0819 03:32:47.429498 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86304\nI0819 03:32:47.429828 17829 solver.cpp:404]     Test net output #1: loss = 0.510536 (* 1 = 0.510536 loss)\nI0819 03:32:48.735728 17829 solver.cpp:228] Iteration 57700, loss = 0.000404038\nI0819 03:32:48.735780 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:32:48.735797 17829 solver.cpp:244]     Train net output #1: loss = 0.000403862 (* 1 = 0.000403862 loss)\nI0819 03:32:48.841339 17829 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0819 03:35:07.902604 17829 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0819 03:36:29.125708 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86448\nI0819 03:36:29.126000 17829 solver.cpp:404]     Test net output #1: loss = 0.506827 (* 1 = 0.506827 loss)\nI0819 03:36:30.431646 17829 solver.cpp:228] Iteration 57800, loss = 0.000364759\nI0819 03:36:30.431701 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:36:30.431718 17829 solver.cpp:244]     Train net output #1: loss = 0.000364583 (* 1 = 0.000364583 loss)\nI0819 03:36:30.530664 17829 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0819 03:38:49.588951 17829 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0819 03:40:10.805318 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86336\nI0819 03:40:10.805621 17829 solver.cpp:404]     Test net output #1: loss = 0.508562 (* 1 = 0.508562 loss)\nI0819 03:40:12.111405 17829 solver.cpp:228] Iteration 57900, loss = 0.000411621\nI0819 03:40:12.111457 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:40:12.111474 17829 solver.cpp:244]     Train net output #1: loss = 0.000411445 (* 1 = 0.000411445 loss)\nI0819 03:40:12.214777 17829 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0819 03:42:31.295699 17829 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0819 03:43:52.435850 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86476\nI0819 03:43:52.436195 17829 solver.cpp:404]     Test net output #1: loss = 0.505421 (* 1 = 0.505421 loss)\nI0819 03:43:53.745285 17829 solver.cpp:228] Iteration 58000, loss = 0.000461484\nI0819 03:43:53.745342 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:43:53.745362 17829 solver.cpp:244]     Train net output #1: loss = 0.000461308 (* 1 = 0.000461308 loss)\nI0819 03:43:53.844233 17829 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0819 03:46:12.981793 17829 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0819 03:47:33.941087 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86408\nI0819 03:47:33.941354 17829 solver.cpp:404]     Test net output #1: loss = 0.508412 (* 1 = 0.508412 loss)\nI0819 03:47:35.249151 17829 solver.cpp:228] Iteration 58100, loss = 0.000359044\nI0819 03:47:35.249210 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:47:35.249228 17829 solver.cpp:244]     Train net output #1: loss = 0.000358868 (* 1 = 0.000358868 loss)\nI0819 03:47:35.347905 17829 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0819 03:49:54.480177 17829 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0819 03:51:15.623292 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86496\nI0819 03:51:15.623620 17829 solver.cpp:404]     Test net output #1: loss = 0.506158 (* 1 = 0.506158 loss)\nI0819 03:51:16.932493 17829 solver.cpp:228] Iteration 58200, loss = 0.000384477\nI0819 03:51:16.932554 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:51:16.932570 17829 solver.cpp:244]     Train net output #1: loss = 0.000384302 (* 1 = 0.000384302 loss)\nI0819 03:51:17.032928 17829 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0819 03:53:35.835665 17829 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0819 03:54:57.038063 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86436\nI0819 03:54:57.038344 17829 solver.cpp:404]     Test net output #1: loss = 0.508339 (* 1 = 0.508339 loss)\nI0819 03:54:58.345949 17829 solver.cpp:228] Iteration 58300, loss = 0.000481467\nI0819 03:54:58.346005 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:54:58.346022 17829 solver.cpp:244]     Train net output #1: loss = 0.000481291 (* 1 = 0.000481291 loss)\nI0819 03:54:58.447535 17829 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0819 03:57:17.507380 17829 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0819 03:58:38.596715 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86576\nI0819 03:58:38.597030 17829 solver.cpp:404]     Test net output #1: loss = 0.505241 (* 1 = 0.505241 loss)\nI0819 03:58:39.906296 17829 solver.cpp:228] Iteration 58400, loss = 0.000413288\nI0819 03:58:39.906349 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:58:39.906366 17829 solver.cpp:244]     Train net output #1: loss = 0.000413112 (* 1 = 0.000413112 loss)\nI0819 03:58:40.008327 17829 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0819 04:00:59.138046 17829 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0819 04:02:20.137432 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86444\nI0819 04:02:20.137769 17829 solver.cpp:404]     Test net output #1: loss = 0.506058 (* 1 = 0.506058 loss)\nI0819 04:02:21.446485 17829 solver.cpp:228] Iteration 58500, loss = 0.000448243\nI0819 04:02:21.446547 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:02:21.446568 17829 solver.cpp:244]     Train net output #1: loss = 0.000448068 (* 1 = 0.000448068 loss)\nI0819 04:02:21.542623 17829 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0819 04:04:40.725992 17829 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0819 04:06:01.934773 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86528\nI0819 04:06:01.935104 17829 solver.cpp:404]     Test net output #1: loss = 0.503393 (* 1 = 0.503393 loss)\nI0819 04:06:03.243824 17829 solver.cpp:228] Iteration 58600, loss = 0.00042653\nI0819 04:06:03.243886 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:06:03.243903 17829 solver.cpp:244]     Train net output #1: loss = 0.000426355 (* 1 = 0.000426355 loss)\nI0819 04:06:03.346773 17829 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0819 04:08:22.402838 17829 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0819 04:09:43.639886 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86364\nI0819 04:09:43.640223 17829 solver.cpp:404]     Test net output #1: loss = 0.505958 (* 1 = 0.505958 loss)\nI0819 04:09:44.948525 17829 solver.cpp:228] Iteration 58700, loss = 0.0005061\nI0819 04:09:44.948591 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:09:44.948608 17829 solver.cpp:244]     Train net output #1: loss = 0.000505924 (* 1 = 0.000505924 loss)\nI0819 04:09:45.044739 17829 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0819 04:12:04.003366 17829 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0819 04:13:24.962324 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86508\nI0819 04:13:24.962637 17829 solver.cpp:404]     Test net output #1: loss = 0.503734 (* 1 = 0.503734 loss)\nI0819 04:13:26.271225 17829 solver.cpp:228] Iteration 58800, loss = 0.000357995\nI0819 04:13:26.271286 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:13:26.271302 17829 solver.cpp:244]     Train net output #1: loss = 0.00035782 (* 1 = 0.00035782 loss)\nI0819 04:13:26.373443 17829 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0819 04:15:45.483324 17829 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0819 04:17:06.301977 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86364\nI0819 04:17:06.302251 17829 solver.cpp:404]     Test net output #1: loss = 0.507566 (* 1 = 0.507566 loss)\nI0819 04:17:07.611786 17829 solver.cpp:228] Iteration 58900, loss = 0.000399809\nI0819 04:17:07.611847 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:17:07.611865 17829 solver.cpp:244]     Train net output #1: loss = 0.000399634 (* 1 = 0.000399634 loss)\nI0819 04:17:07.708063 17829 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0819 04:19:26.893642 17829 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0819 04:20:47.815104 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86484\nI0819 04:20:47.815369 17829 solver.cpp:404]     Test net output #1: loss = 0.50423 (* 1 = 0.50423 loss)\nI0819 04:20:49.123872 17829 solver.cpp:228] Iteration 59000, loss = 0.000403143\nI0819 04:20:49.123932 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:20:49.123950 17829 solver.cpp:244]     Train net output #1: loss = 0.000402968 (* 1 = 0.000402968 loss)\nI0819 04:20:49.223567 17829 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0819 04:23:08.349215 17829 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0819 04:24:29.270915 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8642\nI0819 04:24:29.271210 17829 solver.cpp:404]     Test net output #1: loss = 0.50516 (* 1 = 0.50516 loss)\nI0819 04:24:30.579550 17829 solver.cpp:228] Iteration 59100, loss = 0.000409478\nI0819 04:24:30.579614 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:24:30.579632 17829 solver.cpp:244]     Train net output #1: loss = 0.000409303 (* 1 = 0.000409303 loss)\nI0819 04:24:30.680397 17829 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0819 04:26:49.781358 17829 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0819 04:28:10.556188 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8658\nI0819 04:28:10.556496 17829 solver.cpp:404]     Test net output #1: loss = 0.502314 (* 1 = 0.502314 loss)\nI0819 04:28:11.865226 17829 solver.cpp:228] Iteration 59200, loss = 0.000402444\nI0819 04:28:11.865285 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:28:11.865303 17829 solver.cpp:244]     Train net output #1: loss = 0.000402268 (* 1 = 0.000402268 loss)\nI0819 04:28:11.962594 17829 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0819 04:30:31.084136 17829 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0819 04:31:51.976286 17829 solver.cpp:404]     Test net output #0: accuracy = 0.865001\nI0819 04:31:51.976577 17829 solver.cpp:404]     Test net output #1: loss = 0.503637 (* 1 = 0.503637 loss)\nI0819 04:31:53.284996 17829 solver.cpp:228] Iteration 59300, loss = 0.000414112\nI0819 04:31:53.285056 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:31:53.285073 17829 solver.cpp:244]     Train net output #1: loss = 0.000413936 (* 1 = 0.000413936 loss)\nI0819 04:31:53.383388 17829 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0819 04:34:12.504930 17829 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0819 04:35:33.441506 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8658\nI0819 04:35:33.441788 17829 solver.cpp:404]     Test net output #1: loss = 0.502031 (* 1 = 0.502031 loss)\nI0819 04:35:34.749985 17829 solver.cpp:228] Iteration 59400, loss = 0.000408063\nI0819 04:35:34.750043 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:35:34.750061 17829 solver.cpp:244]     Train net output #1: loss = 0.000407887 (* 1 = 0.000407887 loss)\nI0819 04:35:34.854434 17829 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0819 04:37:53.899467 17829 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0819 04:39:14.601119 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8646\nI0819 04:39:14.601395 17829 solver.cpp:404]     Test net output #1: loss = 0.503895 (* 1 = 0.503895 loss)\nI0819 04:39:15.910466 17829 solver.cpp:228] Iteration 59500, loss = 0.000486765\nI0819 04:39:15.910526 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:39:15.910544 17829 solver.cpp:244]     Train net output #1: loss = 0.000486589 (* 1 = 0.000486589 loss)\nI0819 04:39:16.008554 17829 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0819 04:41:35.027890 17829 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0819 04:42:56.087677 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86572\nI0819 04:42:56.088014 17829 solver.cpp:404]     Test net output #1: loss = 0.501657 (* 1 = 0.501657 loss)\nI0819 04:42:57.396921 17829 solver.cpp:228] Iteration 59600, loss = 0.000466024\nI0819 04:42:57.396978 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:42:57.396996 17829 solver.cpp:244]     Train net output #1: loss = 0.000465849 (* 1 = 0.000465849 loss)\nI0819 04:42:57.495229 17829 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0819 04:45:16.623237 17829 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0819 04:46:37.808401 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86496\nI0819 04:46:37.808681 17829 solver.cpp:404]     Test net output #1: loss = 0.502455 (* 1 = 0.502455 loss)\nI0819 04:46:39.117352 17829 solver.cpp:228] Iteration 59700, loss = 0.000438938\nI0819 04:46:39.117410 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:46:39.117427 17829 solver.cpp:244]     Train net output #1: loss = 0.000438762 (* 1 = 0.000438762 loss)\nI0819 04:46:39.214694 17829 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0819 04:48:58.347142 17829 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0819 04:50:19.344146 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8658\nI0819 04:50:19.344489 17829 solver.cpp:404]     Test net output #1: loss = 0.500399 (* 1 = 0.500399 loss)\nI0819 04:50:20.652667 17829 solver.cpp:228] Iteration 59800, loss = 0.000406664\nI0819 04:50:20.652725 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:50:20.652742 17829 solver.cpp:244]     Train net output #1: loss = 0.000406488 (* 1 = 0.000406488 loss)\nI0819 04:50:20.752532 17829 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0819 04:52:39.863487 17829 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0819 04:54:01.119160 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86444\nI0819 04:54:01.119484 17829 solver.cpp:404]     Test net output #1: loss = 0.502571 (* 1 = 0.502571 loss)\nI0819 04:54:02.428364 17829 solver.cpp:228] Iteration 59900, loss = 0.000454898\nI0819 04:54:02.428419 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:54:02.428436 17829 solver.cpp:244]     Train net output #1: loss = 0.000454723 (* 1 = 0.000454723 loss)\nI0819 04:54:02.526635 17829 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0819 04:56:21.667651 17829 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0819 04:57:42.836866 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86572\nI0819 04:57:42.837151 17829 solver.cpp:404]     Test net output #1: loss = 0.49967 (* 1 = 0.49967 loss)\nI0819 04:57:44.146203 17829 solver.cpp:228] Iteration 60000, loss = 0.000566923\nI0819 04:57:44.146261 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:57:44.146277 17829 solver.cpp:244]     Train net output #1: loss = 0.000566747 (* 1 = 0.000566747 loss)\nI0819 04:57:44.247639 17829 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0819 05:00:03.397136 17829 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0819 05:01:24.566496 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86516\nI0819 05:01:24.566818 17829 solver.cpp:404]     Test net output #1: loss = 0.500312 (* 1 = 0.500312 loss)\nI0819 05:01:25.875578 17829 solver.cpp:228] Iteration 60100, loss = 0.000396626\nI0819 05:01:25.875636 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:01:25.875653 17829 solver.cpp:244]     Train net output #1: loss = 0.00039645 (* 1 = 0.00039645 loss)\nI0819 05:01:25.979579 17829 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0819 05:03:45.096633 17829 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0819 05:05:06.261785 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86584\nI0819 05:05:06.262084 17829 solver.cpp:404]     Test net output #1: loss = 0.499482 (* 1 = 0.499482 loss)\nI0819 05:05:07.570083 17829 solver.cpp:228] Iteration 60200, loss = 0.000458708\nI0819 05:05:07.570143 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:05:07.570158 17829 solver.cpp:244]     Train net output #1: loss = 0.000458533 (* 1 = 0.000458533 loss)\nI0819 05:05:07.668548 17829 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0819 05:07:26.780781 17829 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0819 05:08:47.971765 17829 solver.cpp:404]     Test net output #0: accuracy = 0.864881\nI0819 05:08:47.972066 17829 solver.cpp:404]     Test net output #1: loss = 0.501162 (* 1 = 0.501162 loss)\nI0819 05:08:49.279928 17829 solver.cpp:228] Iteration 60300, loss = 0.000506906\nI0819 05:08:49.279986 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:08:49.280001 17829 solver.cpp:244]     Train net output #1: loss = 0.00050673 (* 1 = 0.00050673 loss)\nI0819 05:08:49.380954 17829 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0819 05:11:08.490361 17829 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0819 05:12:29.546203 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8654\nI0819 05:12:29.546653 17829 solver.cpp:404]     Test net output #1: loss = 0.500553 (* 1 = 0.500553 loss)\nI0819 05:12:30.855959 17829 solver.cpp:228] Iteration 60400, loss = 0.000395131\nI0819 05:12:30.856020 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:12:30.856037 17829 solver.cpp:244]     Train net output #1: loss = 0.000394955 (* 1 = 0.000394955 loss)\nI0819 05:12:30.956331 17829 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0819 05:14:50.098386 17829 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0819 05:16:10.964148 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86516\nI0819 05:16:10.964421 17829 solver.cpp:404]     Test net output #1: loss = 0.501022 (* 1 = 0.501022 loss)\nI0819 05:16:12.272605 17829 solver.cpp:228] Iteration 60500, loss = 0.00049918\nI0819 05:16:12.272665 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:16:12.272682 17829 solver.cpp:244]     Train net output #1: loss = 0.000499004 (* 1 = 0.000499004 loss)\nI0819 05:16:12.374924 17829 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0819 05:18:31.507218 17829 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0819 05:19:52.421706 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86616\nI0819 05:19:52.421988 17829 solver.cpp:404]     Test net output #1: loss = 0.498669 (* 1 = 0.498669 loss)\nI0819 05:19:53.730746 17829 solver.cpp:228] Iteration 60600, loss = 0.000511153\nI0819 05:19:53.730804 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:19:53.730820 17829 solver.cpp:244]     Train net output #1: loss = 0.000510978 (* 1 = 0.000510978 loss)\nI0819 05:19:53.830665 17829 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0819 05:22:12.936394 17829 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0819 05:23:33.742455 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86516\nI0819 05:23:33.742743 17829 solver.cpp:404]     Test net output #1: loss = 0.500913 (* 1 = 0.500913 loss)\nI0819 05:23:35.051070 17829 solver.cpp:228] Iteration 60700, loss = 0.000400263\nI0819 05:23:35.051131 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:23:35.051147 17829 solver.cpp:244]     Train net output #1: loss = 0.000400087 (* 1 = 0.000400087 loss)\nI0819 05:23:35.151046 17829 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0819 05:25:54.319022 17829 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0819 05:27:15.500875 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86568\nI0819 05:27:15.501204 17829 solver.cpp:404]     Test net output #1: loss = 0.498858 (* 1 = 0.498858 loss)\nI0819 05:27:16.809914 17829 solver.cpp:228] Iteration 60800, loss = 0.000456169\nI0819 05:27:16.809973 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:27:16.809989 17829 solver.cpp:244]     Train net output #1: loss = 0.000455993 (* 1 = 0.000455993 loss)\nI0819 05:27:16.905546 17829 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0819 05:29:36.020740 17829 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0819 05:30:57.192438 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86552\nI0819 05:30:57.192824 17829 solver.cpp:404]     Test net output #1: loss = 0.498189 (* 1 = 0.498189 loss)\nI0819 05:30:58.500753 17829 solver.cpp:228] Iteration 60900, loss = 0.000445683\nI0819 05:30:58.500811 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:30:58.500829 17829 solver.cpp:244]     Train net output #1: loss = 0.000445507 (* 1 = 0.000445507 loss)\nI0819 05:30:58.601872 17829 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0819 05:33:17.942507 17829 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0819 05:34:39.130177 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86624\nI0819 05:34:39.130462 17829 solver.cpp:404]     Test net output #1: loss = 0.495647 (* 1 = 0.495647 loss)\nI0819 05:34:40.439031 17829 solver.cpp:228] Iteration 61000, loss = 0.000492744\nI0819 05:34:40.439090 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:34:40.439106 17829 solver.cpp:244]     Train net output #1: loss = 0.000492568 (* 1 = 0.000492568 loss)\nI0819 05:34:40.538345 17829 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0819 05:36:59.659636 17829 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0819 05:38:20.881433 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86544\nI0819 05:38:20.881719 17829 solver.cpp:404]     Test net output #1: loss = 0.497968 (* 1 = 0.497968 loss)\nI0819 05:38:22.190366 17829 solver.cpp:228] Iteration 61100, loss = 0.00042188\nI0819 05:38:22.190425 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:38:22.190443 17829 solver.cpp:244]     Train net output #1: loss = 0.000421704 (* 1 = 0.000421704 loss)\nI0819 05:38:22.289579 17829 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0819 05:40:41.353590 17829 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0819 05:42:02.555752 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86636\nI0819 05:42:02.556041 17829 solver.cpp:404]     Test net output #1: loss = 0.497796 (* 1 = 0.497796 loss)\nI0819 05:42:03.866030 17829 solver.cpp:228] Iteration 61200, loss = 0.000432491\nI0819 05:42:03.866084 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:42:03.866101 17829 solver.cpp:244]     Train net output #1: loss = 0.000432316 (* 1 = 0.000432316 loss)\nI0819 05:42:03.966830 17829 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0819 05:44:23.087292 17829 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0819 05:45:44.076486 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8652\nI0819 05:45:44.076808 17829 solver.cpp:404]     Test net output #1: loss = 0.49917 (* 1 = 0.49917 loss)\nI0819 05:45:45.385952 17829 solver.cpp:228] Iteration 61300, loss = 0.000483532\nI0819 05:45:45.386013 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:45:45.386030 17829 solver.cpp:244]     Train net output #1: loss = 0.000483357 (* 1 = 0.000483357 loss)\nI0819 05:45:45.484468 17829 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0819 05:48:04.598336 17829 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0819 05:49:25.650563 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86588\nI0819 05:49:25.650845 17829 solver.cpp:404]     Test net output #1: loss = 0.498177 (* 1 = 0.498177 loss)\nI0819 05:49:26.960539 17829 solver.cpp:228] Iteration 61400, loss = 0.000392686\nI0819 05:49:26.960600 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:49:26.960618 17829 solver.cpp:244]     Train net output #1: loss = 0.00039251 (* 1 = 0.00039251 loss)\nI0819 05:49:27.058807 17829 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0819 05:51:46.280558 17829 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0819 05:53:07.100419 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86532\nI0819 05:53:07.100759 17829 solver.cpp:404]     Test net output #1: loss = 0.497997 (* 1 = 0.497997 loss)\nI0819 05:53:08.410663 17829 solver.cpp:228] Iteration 61500, loss = 0.000438677\nI0819 05:53:08.410722 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:53:08.410740 17829 solver.cpp:244]     Train net output #1: loss = 0.000438502 (* 1 = 0.000438502 loss)\nI0819 05:53:08.505297 17829 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0819 05:55:27.609498 17829 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0819 05:56:48.403322 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86644\nI0819 05:56:48.403646 17829 solver.cpp:404]     Test net output #1: loss = 0.497766 (* 1 = 0.497766 loss)\nI0819 05:56:49.712092 17829 solver.cpp:228] Iteration 61600, loss = 0.000472076\nI0819 05:56:49.712153 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:56:49.712170 17829 solver.cpp:244]     Train net output #1: loss = 0.000471901 (* 1 = 0.000471901 loss)\nI0819 05:56:49.812825 17829 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0819 05:59:08.933179 17829 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0819 06:00:29.937700 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86572\nI0819 06:00:29.938063 17829 solver.cpp:404]     Test net output #1: loss = 0.497611 (* 1 = 0.497611 loss)\nI0819 06:00:31.247859 17829 solver.cpp:228] Iteration 61700, loss = 0.000497102\nI0819 06:00:31.247920 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:00:31.247936 17829 solver.cpp:244]     Train net output #1: loss = 0.000496926 (* 1 = 0.000496926 loss)\nI0819 06:00:31.350620 17829 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0819 06:02:50.525902 17829 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0819 06:04:11.647534 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8664\nI0819 06:04:11.647907 17829 solver.cpp:404]     Test net output #1: loss = 0.496698 (* 1 = 0.496698 loss)\nI0819 06:04:12.956814 17829 solver.cpp:228] Iteration 61800, loss = 0.000460513\nI0819 06:04:12.956868 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:04:12.956885 17829 solver.cpp:244]     Train net output #1: loss = 0.000460337 (* 1 = 0.000460337 loss)\nI0819 06:04:13.056777 17829 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0819 06:06:32.055009 17829 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0819 06:07:53.300953 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86572\nI0819 06:07:53.301329 17829 solver.cpp:404]     Test net output #1: loss = 0.498633 (* 1 = 0.498633 loss)\nI0819 06:07:54.609410 17829 solver.cpp:228] Iteration 61900, loss = 0.000532155\nI0819 06:07:54.609468 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:07:54.609490 17829 solver.cpp:244]     Train net output #1: loss = 0.000531979 (* 1 = 0.000531979 loss)\nI0819 06:07:54.707667 17829 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0819 06:10:13.930527 17829 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0819 06:11:35.191917 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86704\nI0819 06:11:35.192292 17829 solver.cpp:404]     Test net output #1: loss = 0.496527 (* 1 = 0.496527 loss)\nI0819 06:11:36.500970 17829 solver.cpp:228] Iteration 62000, loss = 0.000521943\nI0819 06:11:36.501030 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:11:36.501047 17829 solver.cpp:244]     Train net output #1: loss = 0.000521768 (* 1 = 0.000521768 loss)\nI0819 06:11:36.601827 17829 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0819 06:13:55.608810 17829 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0819 06:15:16.866058 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86596\nI0819 06:15:16.866427 17829 solver.cpp:404]     Test net output #1: loss = 0.497715 (* 1 = 0.497715 loss)\nI0819 06:15:18.175262 17829 solver.cpp:228] Iteration 62100, loss = 0.000451344\nI0819 06:15:18.175317 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:15:18.175333 17829 solver.cpp:244]     Train net output #1: loss = 0.000451169 (* 1 = 0.000451169 loss)\nI0819 06:15:18.270860 17829 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0819 06:17:37.214462 17829 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0819 06:18:58.477067 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86708\nI0819 06:18:58.477445 17829 solver.cpp:404]     Test net output #1: loss = 0.49698 (* 1 = 0.49698 loss)\nI0819 06:18:59.786226 17829 solver.cpp:228] Iteration 62200, loss = 0.000469744\nI0819 06:18:59.786281 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:18:59.786298 17829 solver.cpp:244]     Train net output #1: loss = 0.000469568 (* 1 = 0.000469568 loss)\nI0819 06:18:59.883363 17829 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0819 06:21:18.901963 17829 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0819 06:22:40.155166 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86592\nI0819 06:22:40.155535 17829 solver.cpp:404]     Test net output #1: loss = 0.498323 (* 1 = 0.498323 loss)\nI0819 06:22:41.465188 17829 solver.cpp:228] Iteration 62300, loss = 0.000386087\nI0819 06:22:41.465245 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:22:41.465262 17829 solver.cpp:244]     Train net output #1: loss = 0.000385911 (* 1 = 0.000385911 loss)\nI0819 06:22:41.566488 17829 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0819 06:25:00.785590 17829 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0819 06:26:22.035907 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0819 06:26:22.036284 17829 solver.cpp:404]     Test net output #1: loss = 0.497013 (* 1 = 0.497013 loss)\nI0819 06:26:23.345510 17829 solver.cpp:228] Iteration 62400, loss = 0.000407564\nI0819 06:26:23.345567 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:26:23.345584 17829 solver.cpp:244]     Train net output #1: loss = 0.000407389 (* 1 = 0.000407389 loss)\nI0819 06:26:23.448323 17829 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0819 06:28:42.644249 17829 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0819 06:30:03.919219 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86628\nI0819 06:30:03.919579 17829 solver.cpp:404]     Test net output #1: loss = 0.498101 (* 1 = 0.498101 loss)\nI0819 06:30:05.227953 17829 solver.cpp:228] Iteration 62500, loss = 0.000413391\nI0819 06:30:05.228010 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:30:05.228027 17829 solver.cpp:244]     Train net output #1: loss = 0.000413216 (* 1 = 0.000413216 loss)\nI0819 06:30:05.325186 17829 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0819 06:32:24.341130 17829 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0819 06:33:45.612853 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86772\nI0819 06:33:45.613235 17829 solver.cpp:404]     Test net output #1: loss = 0.495835 (* 1 = 0.495835 loss)\nI0819 06:33:46.921598 17829 solver.cpp:228] Iteration 62600, loss = 0.000498528\nI0819 06:33:46.921660 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:33:46.921677 17829 solver.cpp:244]     Train net output #1: loss = 0.000498353 (* 1 = 0.000498353 loss)\nI0819 06:33:47.024495 17829 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0819 06:36:06.121772 17829 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0819 06:37:27.389760 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8676\nI0819 06:37:27.390115 17829 solver.cpp:404]     Test net output #1: loss = 0.494599 (* 1 = 0.494599 loss)\nI0819 06:37:28.699265 17829 solver.cpp:228] Iteration 62700, loss = 0.000463472\nI0819 06:37:28.699324 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:37:28.699342 17829 solver.cpp:244]     Train net output #1: loss = 0.000463297 (* 1 = 0.000463297 loss)\nI0819 06:37:28.799784 17829 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0819 06:39:47.926703 17829 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0819 06:41:09.223723 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8678\nI0819 06:41:09.224072 17829 solver.cpp:404]     Test net output #1: loss = 0.494003 (* 1 = 0.494003 loss)\nI0819 06:41:10.533855 17829 solver.cpp:228] Iteration 62800, loss = 0.000508916\nI0819 06:41:10.533916 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:41:10.533933 17829 solver.cpp:244]     Train net output #1: loss = 0.000508741 (* 1 = 0.000508741 loss)\nI0819 06:41:10.630344 17829 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0819 06:43:29.807564 17829 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0819 06:44:51.093828 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86692\nI0819 06:44:51.094180 17829 solver.cpp:404]     Test net output #1: loss = 0.494542 (* 1 = 0.494542 loss)\nI0819 06:44:52.404233 17829 solver.cpp:228] Iteration 62900, loss = 0.000529027\nI0819 06:44:52.404294 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:44:52.404312 17829 solver.cpp:244]     Train net output #1: loss = 0.000528851 (* 1 = 0.000528851 loss)\nI0819 06:44:52.503698 17829 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0819 06:47:11.711843 17829 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0819 06:48:33.006036 17829 solver.cpp:404]     Test net output #0: accuracy = 0.867\nI0819 06:48:33.006425 17829 solver.cpp:404]     Test net output #1: loss = 0.494629 (* 1 = 0.494629 loss)\nI0819 06:48:34.314404 17829 solver.cpp:228] Iteration 63000, loss = 0.00044797\nI0819 06:48:34.314465 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:48:34.314486 17829 solver.cpp:244]     Train net output #1: loss = 0.000447794 (* 1 = 0.000447794 loss)\nI0819 06:48:34.416147 17829 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0819 06:50:53.575043 17829 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0819 06:52:14.852053 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 06:52:14.852422 17829 solver.cpp:404]     Test net output #1: loss = 0.495744 (* 1 = 0.495744 loss)\nI0819 06:52:16.162286 17829 solver.cpp:228] Iteration 63100, loss = 0.000474221\nI0819 06:52:16.162348 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:52:16.162364 17829 solver.cpp:244]     Train net output #1: loss = 0.000474046 (* 1 = 0.000474046 loss)\nI0819 06:52:16.264461 17829 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0819 06:54:35.382786 17829 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0819 06:55:56.661031 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86768\nI0819 06:55:56.661384 17829 solver.cpp:404]     Test net output #1: loss = 0.494836 (* 1 = 0.494836 loss)\nI0819 06:55:57.969544 17829 solver.cpp:228] Iteration 63200, loss = 0.000461399\nI0819 06:55:57.969605 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:55:57.969622 17829 solver.cpp:244]     Train net output #1: loss = 0.000461224 (* 1 = 0.000461224 loss)\nI0819 06:55:58.069365 17829 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0819 06:58:17.154736 17829 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0819 06:59:38.427448 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 06:59:38.427832 17829 solver.cpp:404]     Test net output #1: loss = 0.494898 (* 1 = 0.494898 loss)\nI0819 06:59:39.736456 17829 solver.cpp:228] Iteration 63300, loss = 0.000469932\nI0819 06:59:39.736521 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:59:39.736538 17829 solver.cpp:244]     Train net output #1: loss = 0.000469756 (* 1 = 0.000469756 loss)\nI0819 06:59:39.835544 17829 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0819 07:01:58.906982 17829 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0819 07:03:20.177461 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86768\nI0819 07:03:20.177814 17829 solver.cpp:404]     Test net output #1: loss = 0.494136 (* 1 = 0.494136 loss)\nI0819 07:03:21.488416 17829 solver.cpp:228] Iteration 63400, loss = 0.000448599\nI0819 07:03:21.488477 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:03:21.488494 17829 solver.cpp:244]     Train net output #1: loss = 0.000448423 (* 1 = 0.000448423 loss)\nI0819 07:03:21.588173 17829 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0819 07:05:40.715422 17829 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0819 07:07:02.002147 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 07:07:02.002498 17829 solver.cpp:404]     Test net output #1: loss = 0.496109 (* 1 = 0.496109 loss)\nI0819 07:07:03.312225 17829 solver.cpp:228] Iteration 63500, loss = 0.000470922\nI0819 07:07:03.312288 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:07:03.312305 17829 solver.cpp:244]     Train net output #1: loss = 0.000470746 (* 1 = 0.000470746 loss)\nI0819 07:07:03.412036 17829 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0819 07:09:22.546896 17829 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0819 07:10:43.824462 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86712\nI0819 07:10:43.824813 17829 solver.cpp:404]     Test net output #1: loss = 0.494487 (* 1 = 0.494487 loss)\nI0819 07:10:45.134096 17829 solver.cpp:228] Iteration 63600, loss = 0.000470244\nI0819 07:10:45.134160 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:10:45.134176 17829 solver.cpp:244]     Train net output #1: loss = 0.000470068 (* 1 = 0.000470068 loss)\nI0819 07:10:45.237926 17829 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0819 07:13:04.385406 17829 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0819 07:14:25.639308 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86668\nI0819 07:14:25.639686 17829 solver.cpp:404]     Test net output #1: loss = 0.495401 (* 1 = 0.495401 loss)\nI0819 07:14:26.948328 17829 solver.cpp:228] Iteration 63700, loss = 0.000447584\nI0819 07:14:26.948390 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:14:26.948407 17829 solver.cpp:244]     Train net output #1: loss = 0.000447408 (* 1 = 0.000447408 loss)\nI0819 07:14:27.051148 17829 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0819 07:16:46.233595 17829 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0819 07:18:07.497258 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8674\nI0819 07:18:07.497642 17829 solver.cpp:404]     Test net output #1: loss = 0.494042 (* 1 = 0.494042 loss)\nI0819 07:18:08.807917 17829 solver.cpp:228] Iteration 63800, loss = 0.000405288\nI0819 07:18:08.807977 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:18:08.807994 17829 solver.cpp:244]     Train net output #1: loss = 0.000405112 (* 1 = 0.000405112 loss)\nI0819 07:18:08.904544 17829 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0819 07:20:28.102947 17829 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0819 07:21:49.363165 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86644\nI0819 07:21:49.363520 17829 solver.cpp:404]     Test net output #1: loss = 0.495691 (* 1 = 0.495691 loss)\nI0819 07:21:50.672642 17829 solver.cpp:228] Iteration 63900, loss = 0.000528598\nI0819 07:21:50.672701 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:21:50.672719 17829 solver.cpp:244]     Train net output #1: loss = 0.000528422 (* 1 = 0.000528422 loss)\nI0819 07:21:50.773092 17829 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0819 07:24:09.975523 17829 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0819 07:25:31.229683 17829 solver.cpp:404]     Test net output #0: accuracy = 0.867\nI0819 07:25:31.230060 17829 solver.cpp:404]     Test net output #1: loss = 0.492983 (* 1 = 0.492983 loss)\nI0819 07:25:32.539090 17829 solver.cpp:228] Iteration 64000, loss = 0.000382098\nI0819 07:25:32.539153 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:25:32.539170 17829 solver.cpp:244]     Train net output #1: loss = 0.000381923 (* 1 = 0.000381923 loss)\nI0819 07:25:32.642843 17829 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0819 07:27:51.865527 17829 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0819 07:29:13.116734 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8662\nI0819 07:29:13.117108 17829 solver.cpp:404]     Test net output #1: loss = 0.496686 (* 1 = 0.496686 loss)\nI0819 07:29:14.424829 17829 solver.cpp:228] Iteration 64100, loss = 0.000491214\nI0819 07:29:14.424890 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:29:14.424908 17829 solver.cpp:244]     Train net output #1: loss = 0.000491039 (* 1 = 0.000491039 loss)\nI0819 07:29:14.520787 17829 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0819 07:31:33.673903 17829 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0819 07:32:54.923346 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86756\nI0819 07:32:54.923719 17829 solver.cpp:404]     Test net output #1: loss = 0.495947 (* 1 = 0.495947 loss)\nI0819 07:32:56.229745 17829 solver.cpp:228] Iteration 64200, loss = 0.000377922\nI0819 07:32:56.229807 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:32:56.229825 17829 solver.cpp:244]     Train net output #1: loss = 0.000377746 (* 1 = 0.000377746 loss)\nI0819 07:32:56.334442 17829 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0819 07:35:15.412518 17829 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0819 07:36:36.687378 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8664\nI0819 07:36:36.687741 17829 solver.cpp:404]     Test net output #1: loss = 0.496504 (* 1 = 0.496504 loss)\nI0819 07:36:37.993636 17829 solver.cpp:228] Iteration 64300, loss = 0.00036369\nI0819 07:36:37.993697 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:36:37.993716 17829 solver.cpp:244]     Train net output #1: loss = 0.000363515 (* 1 = 0.000363515 loss)\nI0819 07:36:38.099280 17829 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0819 07:38:57.410876 17829 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0819 07:40:18.653615 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86736\nI0819 07:40:18.653975 17829 solver.cpp:404]     Test net output #1: loss = 0.495313 (* 1 = 0.495313 loss)\nI0819 07:40:19.959661 17829 solver.cpp:228] Iteration 64400, loss = 0.000486125\nI0819 07:40:19.959722 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:40:19.959739 17829 solver.cpp:244]     Train net output #1: loss = 0.000485949 (* 1 = 0.000485949 loss)\nI0819 07:40:20.062122 17829 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0819 07:42:39.277104 17829 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0819 07:44:00.537163 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86612\nI0819 07:44:00.537503 17829 solver.cpp:404]     Test net output #1: loss = 0.496234 (* 1 = 0.496234 loss)\nI0819 07:44:01.843822 17829 solver.cpp:228] Iteration 64500, loss = 0.000439114\nI0819 07:44:01.843879 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:44:01.843897 17829 solver.cpp:244]     Train net output #1: loss = 0.000438938 (* 1 = 0.000438938 loss)\nI0819 07:44:01.946421 17829 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0819 07:46:21.078488 17829 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0819 07:47:42.307620 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8678\nI0819 07:47:42.307972 17829 solver.cpp:404]     Test net output #1: loss = 0.494523 (* 1 = 0.494523 loss)\nI0819 07:47:43.613945 17829 solver.cpp:228] Iteration 64600, loss = 0.000394912\nI0819 07:47:43.614006 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:47:43.614024 17829 solver.cpp:244]     Train net output #1: loss = 0.000394737 (* 1 = 0.000394737 loss)\nI0819 07:47:43.716356 17829 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0819 07:50:02.873148 17829 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0819 07:51:24.118854 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8658\nI0819 07:51:24.119232 17829 solver.cpp:404]     Test net output #1: loss = 0.496673 (* 1 = 0.496673 loss)\nI0819 07:51:25.426252 17829 solver.cpp:228] Iteration 64700, loss = 0.000433611\nI0819 07:51:25.426313 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:51:25.426331 17829 solver.cpp:244]     Train net output #1: loss = 0.000433435 (* 1 = 0.000433435 loss)\nI0819 07:51:25.525573 17829 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0819 07:53:44.693387 17829 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0819 07:55:05.943316 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86776\nI0819 07:55:05.943711 17829 solver.cpp:404]     Test net output #1: loss = 0.494431 (* 1 = 0.494431 loss)\nI0819 07:55:07.250242 17829 solver.cpp:228] Iteration 64800, loss = 0.000484421\nI0819 07:55:07.250304 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:55:07.250321 17829 solver.cpp:244]     Train net output #1: loss = 0.000484246 (* 1 = 0.000484246 loss)\nI0819 07:55:07.356016 17829 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0819 07:57:26.467665 17829 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0819 07:58:47.710777 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86608\nI0819 07:58:47.711133 17829 solver.cpp:404]     Test net output #1: loss = 0.495835 (* 1 = 0.495835 loss)\nI0819 07:58:49.017841 17829 solver.cpp:228] Iteration 64900, loss = 0.000435713\nI0819 07:58:49.017901 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:58:49.017918 17829 solver.cpp:244]     Train net output #1: loss = 0.000435537 (* 1 = 0.000435537 loss)\nI0819 07:58:49.122972 17829 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0819 08:01:08.406603 17829 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0819 08:02:29.643586 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 08:02:29.643959 17829 solver.cpp:404]     Test net output #1: loss = 0.495172 (* 1 = 0.495172 loss)\nI0819 08:02:30.951045 17829 solver.cpp:228] Iteration 65000, loss = 0.000525623\nI0819 08:02:30.951104 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:02:30.951122 17829 solver.cpp:244]     Train net output #1: loss = 0.000525447 (* 1 = 0.000525447 loss)\nI0819 08:02:31.055188 17829 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0819 08:04:50.272276 17829 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0819 08:06:11.510820 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86616\nI0819 08:06:11.511198 17829 solver.cpp:404]     Test net output #1: loss = 0.495328 (* 1 = 0.495328 loss)\nI0819 08:06:12.818373 17829 solver.cpp:228] Iteration 65100, loss = 0.000493508\nI0819 08:06:12.818434 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:06:12.818452 17829 solver.cpp:244]     Train net output #1: loss = 0.000493332 (* 1 = 0.000493332 loss)\nI0819 08:06:12.920917 17829 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0819 08:08:32.041009 17829 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0819 08:09:53.256165 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0819 08:09:53.256567 17829 solver.cpp:404]     Test net output #1: loss = 0.493996 (* 1 = 0.493996 loss)\nI0819 08:09:54.563218 17829 solver.cpp:228] Iteration 65200, loss = 0.000507007\nI0819 08:09:54.563279 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:09:54.563297 17829 solver.cpp:244]     Train net output #1: loss = 0.000506831 (* 1 = 0.000506831 loss)\nI0819 08:09:54.663563 17829 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0819 08:12:13.863458 17829 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0819 08:13:34.184770 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86648\nI0819 08:13:34.185101 17829 solver.cpp:404]     Test net output #1: loss = 0.49574 (* 1 = 0.49574 loss)\nI0819 08:13:35.488461 17829 solver.cpp:228] Iteration 65300, loss = 0.000450077\nI0819 08:13:35.488512 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:13:35.488538 17829 solver.cpp:244]     Train net output #1: loss = 0.000449901 (* 1 = 0.000449901 loss)\nI0819 08:13:35.593529 17829 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0819 08:15:54.637004 17829 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0819 08:17:14.952556 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86776\nI0819 08:17:14.952870 17829 solver.cpp:404]     Test net output #1: loss = 0.495487 (* 1 = 0.495487 loss)\nI0819 08:17:16.255158 17829 solver.cpp:228] Iteration 65400, loss = 0.000417474\nI0819 08:17:16.255208 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:17:16.255233 17829 solver.cpp:244]     Train net output #1: loss = 0.000417299 (* 1 = 0.000417299 loss)\nI0819 08:17:16.361464 17829 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0819 08:19:35.416504 17829 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0819 08:20:55.751510 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86632\nI0819 08:20:55.751833 17829 solver.cpp:404]     Test net output #1: loss = 0.496946 (* 1 = 0.496946 loss)\nI0819 08:20:57.054888 17829 solver.cpp:228] Iteration 65500, loss = 0.000454443\nI0819 08:20:57.054932 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:20:57.054949 17829 solver.cpp:244]     Train net output #1: loss = 0.000454268 (* 1 = 0.000454268 loss)\nI0819 08:20:57.168788 17829 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0819 08:23:16.483963 17829 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0819 08:24:36.782593 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86788\nI0819 08:24:36.782945 17829 solver.cpp:404]     Test net output #1: loss = 0.495798 (* 1 = 0.495798 loss)\nI0819 08:24:38.086207 17829 solver.cpp:228] Iteration 65600, loss = 0.000509213\nI0819 08:24:38.086249 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:24:38.086266 17829 solver.cpp:244]     Train net output #1: loss = 0.000509037 (* 1 = 0.000509037 loss)\nI0819 08:24:38.193773 17829 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0819 08:26:57.503609 17829 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0819 08:28:17.881669 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86652\nI0819 08:28:17.881999 17829 solver.cpp:404]     Test net output #1: loss = 0.496931 (* 1 = 0.496931 loss)\nI0819 08:28:19.185508 17829 solver.cpp:228] Iteration 65700, loss = 0.000460505\nI0819 08:28:19.185555 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:28:19.185570 17829 solver.cpp:244]     Train net output #1: loss = 0.000460329 (* 1 = 0.000460329 loss)\nI0819 08:28:19.290980 17829 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0819 08:30:38.402665 17829 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0819 08:31:58.709697 17829 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 08:31:58.710024 17829 solver.cpp:404]     Test net output #1: loss = 0.495415 (* 1 = 0.495415 loss)\nI0819 08:32:00.013062 17829 solver.cpp:228] Iteration 65800, loss = 0.000418522\nI0819 08:32:00.013109 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:32:00.013125 17829 solver.cpp:244]     Train net output #1: loss = 0.000418346 (* 1 = 0.000418346 loss)\nI0819 08:32:00.121213 17829 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0819 08:34:19.313204 17829 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0819 08:35:39.611273 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86676\nI0819 08:35:39.611623 17829 solver.cpp:404]     Test net output #1: loss = 0.497421 (* 1 = 0.497421 loss)\nI0819 08:35:40.914186 17829 solver.cpp:228] Iteration 65900, loss = 0.000461352\nI0819 08:35:40.914232 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:35:40.914247 17829 solver.cpp:244]     Train net output #1: loss = 0.000461177 (* 1 = 0.000461177 loss)\nI0819 08:35:41.024202 17829 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0819 08:38:00.141472 17829 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0819 08:39:20.474750 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8682\nI0819 08:39:20.475088 17829 solver.cpp:404]     Test net output #1: loss = 0.495852 (* 1 = 0.495852 loss)\nI0819 08:39:21.780362 17829 solver.cpp:228] Iteration 66000, loss = 0.000426039\nI0819 08:39:21.780414 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:39:21.780429 17829 solver.cpp:244]     Train net output #1: loss = 0.000425863 (* 1 = 0.000425863 loss)\nI0819 08:39:21.877709 17829 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0819 08:41:40.167927 17829 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0819 08:43:00.477316 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86684\nI0819 08:43:00.477675 17829 solver.cpp:404]     Test net output #1: loss = 0.496488 (* 1 = 0.496488 loss)\nI0819 08:43:01.783711 17829 solver.cpp:228] Iteration 66100, loss = 0.00048218\nI0819 08:43:01.783759 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:43:01.783776 17829 solver.cpp:244]     Train net output #1: loss = 0.000482004 (* 1 = 0.000482004 loss)\nI0819 08:43:01.883546 17829 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0819 08:45:20.252384 17829 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0819 08:46:40.570863 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86868\nI0819 08:46:40.571188 17829 solver.cpp:404]     Test net output #1: loss = 0.496053 (* 1 = 0.496053 loss)\nI0819 08:46:41.878425 17829 solver.cpp:228] Iteration 66200, loss = 0.000507418\nI0819 08:46:41.878473 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:46:41.878489 17829 solver.cpp:244]     Train net output #1: loss = 0.000507242 (* 1 = 0.000507242 loss)\nI0819 08:46:41.976204 17829 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0819 08:49:00.308984 17829 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0819 08:50:20.620579 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 08:50:20.620931 17829 solver.cpp:404]     Test net output #1: loss = 0.497848 (* 1 = 0.497848 loss)\nI0819 08:50:21.927158 17829 solver.cpp:228] Iteration 66300, loss = 0.000465193\nI0819 08:50:21.927204 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:50:21.927222 17829 solver.cpp:244]     Train net output #1: loss = 0.000465018 (* 1 = 0.000465018 loss)\nI0819 08:50:22.028998 17829 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0819 08:52:40.408819 17829 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0819 08:54:00.733916 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86828\nI0819 08:54:00.734244 17829 solver.cpp:404]     Test net output #1: loss = 0.497257 (* 1 = 0.497257 loss)\nI0819 08:54:02.040537 17829 solver.cpp:228] Iteration 66400, loss = 0.000439924\nI0819 08:54:02.040583 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:54:02.040601 17829 solver.cpp:244]     Train net output #1: loss = 0.000439748 (* 1 = 0.000439748 loss)\nI0819 08:54:02.140036 17829 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0819 08:56:20.470088 17829 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0819 08:57:40.765557 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86652\nI0819 08:57:40.765918 17829 solver.cpp:404]     Test net output #1: loss = 0.498677 (* 1 = 0.498677 loss)\nI0819 08:57:42.072536 17829 solver.cpp:228] Iteration 66500, loss = 0.000483067\nI0819 08:57:42.072582 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:57:42.072598 17829 solver.cpp:244]     Train net output #1: loss = 0.000482891 (* 1 = 0.000482891 loss)\nI0819 08:57:42.173578 17829 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0819 09:00:00.463837 17829 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0819 09:01:20.769304 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86872\nI0819 09:01:20.769671 17829 solver.cpp:404]     Test net output #1: loss = 0.497012 (* 1 = 0.497012 loss)\nI0819 09:01:22.075422 17829 solver.cpp:228] Iteration 66600, loss = 0.000393787\nI0819 09:01:22.075465 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:01:22.075481 17829 solver.cpp:244]     Train net output #1: loss = 0.000393612 (* 1 = 0.000393612 loss)\nI0819 09:01:22.173943 17829 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0819 09:03:40.594640 17829 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0819 09:05:00.898564 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86748\nI0819 09:05:00.898922 17829 solver.cpp:404]     Test net output #1: loss = 0.498797 (* 1 = 0.498797 loss)\nI0819 09:05:02.205651 17829 solver.cpp:228] Iteration 66700, loss = 0.000500324\nI0819 09:05:02.205694 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:05:02.205710 17829 solver.cpp:244]     Train net output #1: loss = 0.000500148 (* 1 = 0.000500148 loss)\nI0819 09:05:02.307363 17829 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0819 09:07:20.652068 17829 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0819 09:08:40.934885 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86908\nI0819 09:08:40.935243 17829 solver.cpp:404]     Test net output #1: loss = 0.496997 (* 1 = 0.496997 loss)\nI0819 09:08:42.240346 17829 solver.cpp:228] Iteration 66800, loss = 0.000442649\nI0819 09:08:42.240389 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:08:42.240404 17829 solver.cpp:244]     Train net output #1: loss = 0.000442473 (* 1 = 0.000442473 loss)\nI0819 09:08:42.340869 17829 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0819 09:11:00.739622 17829 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0819 09:12:21.033509 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86656\nI0819 09:12:21.033862 17829 solver.cpp:404]     Test net output #1: loss = 0.500119 (* 1 = 0.500119 loss)\nI0819 09:12:22.339869 17829 solver.cpp:228] Iteration 66900, loss = 0.000453282\nI0819 09:12:22.339912 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:12:22.339928 17829 solver.cpp:244]     Train net output #1: loss = 0.000453106 (* 1 = 0.000453106 loss)\nI0819 09:12:22.439864 17829 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0819 09:14:40.971305 17829 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0819 09:16:01.256561 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86844\nI0819 09:16:01.256889 17829 solver.cpp:404]     Test net output #1: loss = 0.497397 (* 1 = 0.497397 loss)\nI0819 09:16:02.562372 17829 solver.cpp:228] Iteration 67000, loss = 0.000502258\nI0819 09:16:02.562419 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:16:02.562435 17829 solver.cpp:244]     Train net output #1: loss = 0.000502083 (* 1 = 0.000502083 loss)\nI0819 09:16:02.664499 17829 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0819 09:18:21.467638 17829 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0819 09:19:41.789727 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0819 09:19:41.790067 17829 solver.cpp:404]     Test net output #1: loss = 0.497893 (* 1 = 0.497893 loss)\nI0819 09:19:43.096757 17829 solver.cpp:228] Iteration 67100, loss = 0.000404866\nI0819 09:19:43.096799 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:19:43.096815 17829 solver.cpp:244]     Train net output #1: loss = 0.00040469 (* 1 = 0.00040469 loss)\nI0819 09:19:43.203637 17829 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0819 09:22:02.477175 17829 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0819 09:23:22.790248 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86896\nI0819 09:23:22.790578 17829 solver.cpp:404]     Test net output #1: loss = 0.495281 (* 1 = 0.495281 loss)\nI0819 09:23:24.096171 17829 solver.cpp:228] Iteration 67200, loss = 0.000481997\nI0819 09:23:24.096215 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:23:24.096231 17829 solver.cpp:244]     Train net output #1: loss = 0.000481821 (* 1 = 0.000481821 loss)\nI0819 09:23:24.203665 17829 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0819 09:25:43.539098 17829 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0819 09:27:03.864218 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0819 09:27:03.864570 17829 solver.cpp:404]     Test net output #1: loss = 0.497583 (* 1 = 0.497583 loss)\nI0819 09:27:05.170461 17829 solver.cpp:228] Iteration 67300, loss = 0.000439252\nI0819 09:27:05.170503 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:27:05.170519 17829 solver.cpp:244]     Train net output #1: loss = 0.000439076 (* 1 = 0.000439076 loss)\nI0819 09:27:05.277549 17829 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0819 09:29:24.549633 17829 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0819 09:30:44.882444 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8686\nI0819 09:30:44.882802 17829 solver.cpp:404]     Test net output #1: loss = 0.4968 (* 1 = 0.4968 loss)\nI0819 09:30:46.188699 17829 solver.cpp:228] Iteration 67400, loss = 0.000423998\nI0819 09:30:46.188741 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:30:46.188757 17829 solver.cpp:244]     Train net output #1: loss = 0.000423822 (* 1 = 0.000423822 loss)\nI0819 09:30:46.291658 17829 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0819 09:33:05.530650 17829 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0819 09:34:25.822577 17829 solver.cpp:404]     Test net output #0: accuracy = 0.867\nI0819 09:34:25.822909 17829 solver.cpp:404]     Test net output #1: loss = 0.497724 (* 1 = 0.497724 loss)\nI0819 09:34:27.129163 17829 solver.cpp:228] Iteration 67500, loss = 0.000466275\nI0819 09:34:27.129206 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:34:27.129221 17829 solver.cpp:244]     Train net output #1: loss = 0.0004661 (* 1 = 0.0004661 loss)\nI0819 09:34:27.231330 17829 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0819 09:36:46.468219 17829 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0819 09:38:06.802664 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86828\nI0819 09:38:06.803020 17829 solver.cpp:404]     Test net output #1: loss = 0.496754 (* 1 = 0.496754 loss)\nI0819 09:38:08.108311 17829 solver.cpp:228] Iteration 67600, loss = 0.000466783\nI0819 09:38:08.108355 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:38:08.108371 17829 solver.cpp:244]     Train net output #1: loss = 0.000466607 (* 1 = 0.000466607 loss)\nI0819 09:38:08.215886 17829 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0819 09:40:27.483402 17829 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0819 09:41:47.802955 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86736\nI0819 09:41:47.803306 17829 solver.cpp:404]     Test net output #1: loss = 0.498321 (* 1 = 0.498321 loss)\nI0819 09:41:49.111212 17829 solver.cpp:228] Iteration 67700, loss = 0.000508503\nI0819 09:41:49.111274 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:41:49.111290 17829 solver.cpp:244]     Train net output #1: loss = 0.000508328 (* 1 = 0.000508328 loss)\nI0819 09:41:49.206295 17829 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0819 09:44:07.723675 17829 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0819 09:45:28.879791 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86836\nI0819 09:45:28.880074 17829 solver.cpp:404]     Test net output #1: loss = 0.498181 (* 1 = 0.498181 loss)\nI0819 09:45:30.188133 17829 solver.cpp:228] Iteration 67800, loss = 0.00047042\nI0819 09:45:30.188194 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:45:30.188211 17829 solver.cpp:244]     Train net output #1: loss = 0.000470244 (* 1 = 0.000470244 loss)\nI0819 09:45:30.282279 17829 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0819 09:47:48.836755 17829 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0819 09:49:09.880506 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86728\nI0819 09:49:09.880781 17829 solver.cpp:404]     Test net output #1: loss = 0.498714 (* 1 = 0.498714 loss)\nI0819 09:49:11.189393 17829 solver.cpp:228] Iteration 67900, loss = 0.000416425\nI0819 09:49:11.189455 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:49:11.189472 17829 solver.cpp:244]     Train net output #1: loss = 0.000416249 (* 1 = 0.000416249 loss)\nI0819 09:49:11.289073 17829 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0819 09:51:29.815552 17829 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0819 09:52:50.965672 17829 solver.cpp:404]     Test net output #0: accuracy = 0.869\nI0819 09:52:50.966033 17829 solver.cpp:404]     Test net output #1: loss = 0.497174 (* 1 = 0.497174 loss)\nI0819 09:52:52.274104 17829 solver.cpp:228] Iteration 68000, loss = 0.00043583\nI0819 09:52:52.274165 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:52:52.274183 17829 solver.cpp:244]     Train net output #1: loss = 0.000435654 (* 1 = 0.000435654 loss)\nI0819 09:52:52.374868 17829 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0819 09:55:10.972807 17829 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0819 09:56:32.140956 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86792\nI0819 09:56:32.141243 17829 solver.cpp:404]     Test net output #1: loss = 0.499039 (* 1 = 0.499039 loss)\nI0819 09:56:33.450255 17829 solver.cpp:228] Iteration 68100, loss = 0.000484558\nI0819 09:56:33.450314 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:56:33.450330 17829 solver.cpp:244]     Train net output #1: loss = 0.000484382 (* 1 = 0.000484382 loss)\nI0819 09:56:33.543809 17829 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0819 09:58:52.099135 17829 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0819 10:00:13.273371 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8688\nI0819 10:00:13.273707 17829 solver.cpp:404]     Test net output #1: loss = 0.497937 (* 1 = 0.497937 loss)\nI0819 10:00:14.581934 17829 solver.cpp:228] Iteration 68200, loss = 0.000423039\nI0819 10:00:14.581993 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:00:14.582011 17829 solver.cpp:244]     Train net output #1: loss = 0.000422863 (* 1 = 0.000422863 loss)\nI0819 10:00:14.676226 17829 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0819 10:02:33.139484 17829 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0819 10:03:54.259718 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86772\nI0819 10:03:54.260006 17829 solver.cpp:404]     Test net output #1: loss = 0.501137 (* 1 = 0.501137 loss)\nI0819 10:03:55.568208 17829 solver.cpp:228] Iteration 68300, loss = 0.000422169\nI0819 10:03:55.568267 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:03:55.568285 17829 solver.cpp:244]     Train net output #1: loss = 0.000421993 (* 1 = 0.000421993 loss)\nI0819 10:03:55.666415 17829 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0819 10:06:14.158563 17829 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0819 10:07:35.323163 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86852\nI0819 10:07:35.323473 17829 solver.cpp:404]     Test net output #1: loss = 0.499092 (* 1 = 0.499092 loss)\nI0819 10:07:36.632216 17829 solver.cpp:228] Iteration 68400, loss = 0.000501502\nI0819 10:07:36.632275 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:07:36.632293 17829 solver.cpp:244]     Train net output #1: loss = 0.000501326 (* 1 = 0.000501326 loss)\nI0819 10:07:36.723999 17829 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0819 10:09:55.169060 17829 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0819 10:11:16.338944 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86736\nI0819 10:11:16.339282 17829 solver.cpp:404]     Test net output #1: loss = 0.502248 (* 1 = 0.502248 loss)\nI0819 10:11:17.648939 17829 solver.cpp:228] Iteration 68500, loss = 0.00050897\nI0819 10:11:17.648998 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:11:17.649015 17829 solver.cpp:244]     Train net output #1: loss = 0.000508794 (* 1 = 0.000508794 loss)\nI0819 10:11:17.743454 17829 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0819 10:13:36.199604 17829 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0819 10:14:57.024324 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86868\nI0819 10:14:57.024627 17829 solver.cpp:404]     Test net output #1: loss = 0.499785 (* 1 = 0.499785 loss)\nI0819 10:14:58.332841 17829 solver.cpp:228] Iteration 68600, loss = 0.000492508\nI0819 10:14:58.332901 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:14:58.332917 17829 solver.cpp:244]     Train net output #1: loss = 0.000492332 (* 1 = 0.000492332 loss)\nI0819 10:14:58.433678 17829 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0819 10:17:16.961755 17829 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0819 10:18:37.987690 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86816\nI0819 10:18:37.987982 17829 solver.cpp:404]     Test net output #1: loss = 0.501083 (* 1 = 0.501083 loss)\nI0819 10:18:39.297592 17829 solver.cpp:228] Iteration 68700, loss = 0.00040526\nI0819 10:18:39.297652 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:18:39.297669 17829 solver.cpp:244]     Train net output #1: loss = 0.000405084 (* 1 = 0.000405084 loss)\nI0819 10:18:39.394806 17829 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0819 10:20:57.909098 17829 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0819 10:22:18.998762 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86912\nI0819 10:22:18.999086 17829 solver.cpp:404]     Test net output #1: loss = 0.498634 (* 1 = 0.498634 loss)\nI0819 10:22:20.309578 17829 solver.cpp:228] Iteration 68800, loss = 0.000416579\nI0819 10:22:20.309638 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:22:20.309654 17829 solver.cpp:244]     Train net output #1: loss = 0.000416403 (* 1 = 0.000416403 loss)\nI0819 10:22:20.404908 17829 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0819 10:24:38.909200 17829 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0819 10:26:00.003613 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86792\nI0819 10:26:00.003918 17829 solver.cpp:404]     Test net output #1: loss = 0.501602 (* 1 = 0.501602 loss)\nI0819 10:26:01.313477 17829 solver.cpp:228] Iteration 68900, loss = 0.000394654\nI0819 10:26:01.313539 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:26:01.313557 17829 solver.cpp:244]     Train net output #1: loss = 0.000394478 (* 1 = 0.000394478 loss)\nI0819 10:26:01.409415 17829 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0819 10:28:19.990046 17829 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0819 10:29:41.232969 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8692\nI0819 10:29:41.233264 17829 solver.cpp:404]     Test net output #1: loss = 0.498822 (* 1 = 0.498822 loss)\nI0819 10:29:42.543674 17829 solver.cpp:228] Iteration 69000, loss = 0.000452693\nI0819 10:29:42.543736 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:29:42.543753 17829 solver.cpp:244]     Train net output #1: loss = 0.000452517 (* 1 = 0.000452517 loss)\nI0819 10:29:42.642268 17829 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0819 10:32:01.335342 17829 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0819 10:33:22.499719 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86836\nI0819 10:33:22.500092 17829 solver.cpp:404]     Test net output #1: loss = 0.502154 (* 1 = 0.502154 loss)\nI0819 10:33:23.810117 17829 solver.cpp:228] Iteration 69100, loss = 0.000483208\nI0819 10:33:23.810180 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:33:23.810197 17829 solver.cpp:244]     Train net output #1: loss = 0.000483032 (* 1 = 0.000483032 loss)\nI0819 10:33:23.907125 17829 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0819 10:35:42.305928 17829 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0819 10:37:03.575214 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86996\nI0819 10:37:03.575579 17829 solver.cpp:404]     Test net output #1: loss = 0.499701 (* 1 = 0.499701 loss)\nI0819 10:37:04.885221 17829 solver.cpp:228] Iteration 69200, loss = 0.000431643\nI0819 10:37:04.885285 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:37:04.885310 17829 solver.cpp:244]     Train net output #1: loss = 0.000431467 (* 1 = 0.000431467 loss)\nI0819 10:37:04.982578 17829 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0819 10:39:23.389219 17829 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0819 10:40:44.614168 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86852\nI0819 10:40:44.614526 17829 solver.cpp:404]     Test net output #1: loss = 0.502574 (* 1 = 0.502574 loss)\nI0819 10:40:45.923949 17829 solver.cpp:228] Iteration 69300, loss = 0.000439053\nI0819 10:40:45.924015 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:40:45.924038 17829 solver.cpp:244]     Train net output #1: loss = 0.000438877 (* 1 = 0.000438877 loss)\nI0819 10:40:46.018034 17829 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0819 10:43:04.514197 17829 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0819 10:44:25.767788 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8696\nI0819 10:44:25.768167 17829 solver.cpp:404]     Test net output #1: loss = 0.502028 (* 1 = 0.502028 loss)\nI0819 10:44:27.078071 17829 solver.cpp:228] Iteration 69400, loss = 0.000445538\nI0819 10:44:27.078138 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:44:27.078164 17829 solver.cpp:244]     Train net output #1: loss = 0.000445362 (* 1 = 0.000445362 loss)\nI0819 10:44:27.175849 17829 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0819 10:46:45.607997 17829 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0819 10:48:06.810592 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8682\nI0819 10:48:06.810987 17829 solver.cpp:404]     Test net output #1: loss = 0.504413 (* 1 = 0.504413 loss)\nI0819 10:48:08.119794 17829 solver.cpp:228] Iteration 69500, loss = 0.000454048\nI0819 10:48:08.119859 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:48:08.119884 17829 solver.cpp:244]     Train net output #1: loss = 0.000453872 (* 1 = 0.000453872 loss)\nI0819 10:48:08.214812 17829 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0819 10:50:26.758545 17829 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0819 10:51:47.991668 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86948\nI0819 10:51:47.992056 17829 solver.cpp:404]     Test net output #1: loss = 0.501511 (* 1 = 0.501511 loss)\nI0819 10:51:49.300470 17829 solver.cpp:228] Iteration 69600, loss = 0.000431443\nI0819 10:51:49.300534 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:51:49.300557 17829 solver.cpp:244]     Train net output #1: loss = 0.000431267 (* 1 = 0.000431267 loss)\nI0819 10:51:49.399574 17829 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0819 10:54:08.002044 17829 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0819 10:55:29.259393 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86792\nI0819 10:55:29.259752 17829 solver.cpp:404]     Test net output #1: loss = 0.503482 (* 1 = 0.503482 loss)\nI0819 10:55:30.570088 17829 solver.cpp:228] Iteration 69700, loss = 0.000470234\nI0819 10:55:30.570153 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:55:30.570178 17829 solver.cpp:244]     Train net output #1: loss = 0.000470058 (* 1 = 0.000470058 loss)\nI0819 10:55:30.664813 17829 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0819 10:57:49.154640 17829 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0819 10:59:10.423573 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8692\nI0819 10:59:10.423930 17829 solver.cpp:404]     Test net output #1: loss = 0.503025 (* 1 = 0.503025 loss)\nI0819 10:59:11.734179 17829 solver.cpp:228] Iteration 69800, loss = 0.000383647\nI0819 10:59:11.734243 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:59:11.734268 17829 solver.cpp:244]     Train net output #1: loss = 0.000383472 (* 1 = 0.000383472 loss)\nI0819 10:59:11.826913 17829 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0819 11:01:30.273552 17829 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0819 11:02:51.401837 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8682\nI0819 11:02:51.402185 17829 solver.cpp:404]     Test net output #1: loss = 0.506246 (* 1 = 0.506246 loss)\nI0819 11:02:52.717990 17829 solver.cpp:228] Iteration 69900, loss = 0.000441796\nI0819 11:02:52.718055 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:02:52.718080 17829 solver.cpp:244]     Train net output #1: loss = 0.00044162 (* 1 = 0.00044162 loss)\nI0819 11:02:52.811926 17829 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0819 11:05:11.356375 17829 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0819 11:06:32.499058 17829 solver.cpp:404]     Test net output #0: accuracy = 0.86996\nI0819 11:06:32.499409 17829 solver.cpp:404]     Test net output #1: loss = 0.502804 (* 1 = 0.502804 loss)\nI0819 11:06:33.818306 17829 solver.cpp:228] Iteration 70000, loss = 0.00048379\nI0819 11:06:33.818377 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:06:33.818409 17829 solver.cpp:244]     Train net output #1: loss = 0.000483614 (* 1 = 0.000483614 loss)\nI0819 11:06:33.906697 17829 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0819 11:06:33.906723 17829 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0819 11:08:52.436784 17829 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0819 11:10:13.447896 17829 solver.cpp:404]     Test net output #0: accuracy = 0.87396\nI0819 11:10:13.448230 17829 solver.cpp:404]     Test net output #1: loss = 0.485843 (* 1 = 0.485843 loss)\nI0819 11:10:14.757968 17829 solver.cpp:228] Iteration 70100, loss = 0.00038469\nI0819 11:10:14.758033 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:10:14.758057 17829 solver.cpp:244]     Train net output #1: loss = 0.000384514 (* 1 = 0.000384514 loss)\nI0819 11:10:14.854007 17829 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0819 11:12:33.351680 17829 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0819 11:13:54.431377 17829 solver.cpp:404]     Test net output #0: accuracy = 0.88008\nI0819 11:13:54.431748 17829 solver.cpp:404]     Test net output #1: loss = 0.466028 (* 1 = 0.466028 loss)\nI0819 11:13:55.741307 17829 solver.cpp:228] Iteration 70200, loss = 0.000413838\nI0819 11:13:55.741369 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:13:55.741392 17829 solver.cpp:244]     Train net output #1: loss = 0.000413662 (* 1 = 0.000413662 loss)\nI0819 11:13:55.835599 17829 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0819 11:16:14.321355 17829 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0819 11:17:35.647610 17829 solver.cpp:404]     Test net output #0: accuracy = 0.88244\nI0819 11:17:35.648008 17829 solver.cpp:404]     Test net output #1: loss = 0.45314 (* 1 = 0.45314 loss)\nI0819 11:17:36.957409 17829 solver.cpp:228] Iteration 70300, loss = 0.000450327\nI0819 11:17:36.957465 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:17:36.957484 17829 solver.cpp:244]     Train net output #1: loss = 0.000450151 (* 1 = 0.000450151 loss)\nI0819 11:17:37.052373 17829 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0819 11:19:55.529072 17829 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0819 11:21:16.847522 17829 solver.cpp:404]     Test net output #0: accuracy = 0.88724\nI0819 11:21:16.847890 17829 solver.cpp:404]     Test net output #1: loss = 0.439577 (* 1 = 0.439577 loss)\nI0819 11:21:18.155855 17829 solver.cpp:228] Iteration 70400, loss = 0.000421687\nI0819 11:21:18.155915 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:21:18.155931 17829 solver.cpp:244]     Train net output #1: loss = 0.000421511 (* 1 = 0.000421511 loss)\nI0819 11:21:18.254549 17829 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0819 11:23:36.768491 17829 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0819 11:24:58.169440 17829 solver.cpp:404]     Test net output #0: accuracy = 0.88892\nI0819 11:24:58.169847 17829 solver.cpp:404]     Test net output #1: loss = 0.430698 (* 1 = 0.430698 loss)\nI0819 11:24:59.479840 17829 solver.cpp:228] Iteration 70500, loss = 0.000409988\nI0819 11:24:59.479902 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:24:59.479919 17829 solver.cpp:244]     Train net output #1: loss = 0.000409812 (* 1 = 0.000409812 loss)\nI0819 11:24:59.578085 17829 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0819 11:27:18.144721 17829 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0819 11:28:39.441648 17829 solver.cpp:404]     Test net output #0: accuracy = 0.89196\nI0819 11:28:39.442054 17829 solver.cpp:404]     Test net output #1: loss = 0.421573 (* 1 = 0.421573 loss)\nI0819 11:28:40.751562 17829 solver.cpp:228] Iteration 70600, loss = 0.000404478\nI0819 11:28:40.751619 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:28:40.751644 17829 solver.cpp:244]     Train net output #1: loss = 0.000404302 (* 1 = 0.000404302 loss)\nI0819 11:28:40.848459 17829 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0819 11:30:59.442749 17829 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0819 11:32:20.726652 17829 solver.cpp:404]     Test net output #0: accuracy = 0.89376\nI0819 11:32:20.727039 17829 solver.cpp:404]     Test net output #1: loss = 0.414951 (* 1 = 0.414951 loss)\nI0819 11:32:22.035656 17829 solver.cpp:228] Iteration 70700, loss = 0.000500713\nI0819 11:32:22.035713 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:32:22.035744 17829 solver.cpp:244]     Train net output #1: loss = 0.000500538 (* 1 = 0.000500538 loss)\nI0819 11:32:22.128638 17829 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0819 11:34:40.588968 17829 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0819 11:36:01.917273 17829 solver.cpp:404]     Test net output #0: accuracy = 0.89592\nI0819 11:36:01.917655 17829 solver.cpp:404]     Test net output #1: loss = 0.409482 (* 1 = 0.409482 loss)\nI0819 11:36:03.227916 17829 solver.cpp:228] Iteration 70800, loss = 0.000440951\nI0819 11:36:03.227974 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:36:03.227998 17829 solver.cpp:244]     Train net output #1: loss = 0.000440776 (* 1 = 0.000440776 loss)\nI0819 11:36:03.324481 17829 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0819 11:38:21.792248 17829 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0819 11:39:43.214282 17829 solver.cpp:404]     Test net output #0: accuracy = 0.8976\nI0819 11:39:43.214709 17829 solver.cpp:404]     Test net output #1: loss = 0.404084 (* 1 = 0.404084 loss)\nI0819 11:39:44.524438 17829 solver.cpp:228] Iteration 70900, loss = 0.000388402\nI0819 11:39:44.524498 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:39:44.524523 17829 solver.cpp:244]     Train net output #1: loss = 0.000388226 (* 1 = 0.000388226 loss)\nI0819 11:39:44.618222 17829 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0819 11:42:03.015405 17829 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0819 11:43:23.572196 17829 solver.cpp:404]     Test net output #0: accuracy = 0.89948\nI0819 11:43:23.572538 17829 solver.cpp:404]     Test net output #1: loss = 0.401198 (* 1 = 0.401198 loss)\nI0819 11:43:24.879839 17829 solver.cpp:228] Iteration 71000, loss = 0.000391858\nI0819 11:43:24.879886 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:43:24.879904 17829 solver.cpp:244]     Train net output #1: loss = 0.000391683 (* 1 = 0.000391683 loss)\nI0819 11:43:24.980768 17829 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0819 11:45:44.100540 17829 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0819 11:47:04.421021 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9002\nI0819 11:47:04.421315 17829 solver.cpp:404]     Test net output #1: loss = 0.396767 (* 1 = 0.396767 loss)\nI0819 11:47:05.727793 17829 solver.cpp:228] Iteration 71100, loss = 0.000371502\nI0819 11:47:05.727840 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:47:05.727855 17829 solver.cpp:244]     Train net output #1: loss = 0.000371326 (* 1 = 0.000371326 loss)\nI0819 11:47:05.840314 17829 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0819 11:49:24.836150 17829 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0819 11:50:45.143959 17829 solver.cpp:404]     Test net output #0: accuracy = 0.901001\nI0819 11:50:45.144265 17829 solver.cpp:404]     Test net output #1: loss = 0.39552 (* 1 = 0.39552 loss)\nI0819 11:50:46.450649 17829 solver.cpp:228] Iteration 71200, loss = 0.000431948\nI0819 11:50:46.450700 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:50:46.450716 17829 solver.cpp:244]     Train net output #1: loss = 0.000431772 (* 1 = 0.000431772 loss)\nI0819 11:50:46.555179 17829 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0819 11:53:05.568889 17829 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0819 11:54:25.895820 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90196\nI0819 11:54:25.896154 17829 solver.cpp:404]     Test net output #1: loss = 0.391722 (* 1 = 0.391722 loss)\nI0819 11:54:27.203111 17829 solver.cpp:228] Iteration 71300, loss = 0.000460485\nI0819 11:54:27.203155 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:54:27.203171 17829 solver.cpp:244]     Train net output #1: loss = 0.000460309 (* 1 = 0.000460309 loss)\nI0819 11:54:27.306691 17829 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0819 11:56:46.458215 17829 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0819 11:58:06.781924 17829 solver.cpp:404]     Test net output #0: accuracy = 0.902201\nI0819 11:58:06.782225 17829 solver.cpp:404]     Test net output #1: loss = 0.39146 (* 1 = 0.39146 loss)\nI0819 11:58:08.089637 17829 solver.cpp:228] Iteration 71400, loss = 0.000438034\nI0819 11:58:08.089689 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:58:08.089705 17829 solver.cpp:244]     Train net output #1: loss = 0.000437858 (* 1 = 0.000437858 loss)\nI0819 11:58:08.195467 17829 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0819 12:00:27.400647 17829 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0819 12:01:47.731125 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90284\nI0819 12:01:47.731436 17829 solver.cpp:404]     Test net output #1: loss = 0.388495 (* 1 = 0.388495 loss)\nI0819 12:01:49.037848 17829 solver.cpp:228] Iteration 71500, loss = 0.000453213\nI0819 12:01:49.037894 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:01:49.037910 17829 solver.cpp:244]     Train net output #1: loss = 0.000453037 (* 1 = 0.000453037 loss)\nI0819 12:01:49.143399 17829 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0819 12:04:08.348562 17829 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0819 12:05:28.674235 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90224\nI0819 12:05:28.674527 17829 solver.cpp:404]     Test net output #1: loss = 0.389076 (* 1 = 0.389076 loss)\nI0819 12:05:29.981725 17829 solver.cpp:228] Iteration 71600, loss = 0.000421578\nI0819 12:05:29.981773 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:05:29.981789 17829 solver.cpp:244]     Train net output #1: loss = 0.000421402 (* 1 = 0.000421402 loss)\nI0819 12:05:30.089529 17829 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0819 12:07:49.358840 17829 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0819 12:09:09.697329 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90276\nI0819 12:09:09.697624 17829 solver.cpp:404]     Test net output #1: loss = 0.386396 (* 1 = 0.386396 loss)\nI0819 12:09:11.003803 17829 solver.cpp:228] Iteration 71700, loss = 0.000475035\nI0819 12:09:11.003846 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:09:11.003862 17829 solver.cpp:244]     Train net output #1: loss = 0.000474859 (* 1 = 0.000474859 loss)\nI0819 12:09:11.112432 17829 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0819 12:11:30.289096 17829 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0819 12:12:50.590489 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90256\nI0819 12:12:50.590819 17829 solver.cpp:404]     Test net output #1: loss = 0.387178 (* 1 = 0.387178 loss)\nI0819 12:12:51.896255 17829 solver.cpp:228] Iteration 71800, loss = 0.000415628\nI0819 12:12:51.896297 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:12:51.896313 17829 solver.cpp:244]     Train net output #1: loss = 0.000415453 (* 1 = 0.000415453 loss)\nI0819 12:12:52.005480 17829 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0819 12:15:11.240566 17829 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0819 12:16:31.580322 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90328\nI0819 12:16:31.580652 17829 solver.cpp:404]     Test net output #1: loss = 0.384788 (* 1 = 0.384788 loss)\nI0819 12:16:32.886587 17829 solver.cpp:228] Iteration 71900, loss = 0.000415637\nI0819 12:16:32.886631 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:16:32.886648 17829 solver.cpp:244]     Train net output #1: loss = 0.000415461 (* 1 = 0.000415461 loss)\nI0819 12:16:32.993316 17829 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0819 12:18:52.266556 17829 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0819 12:20:12.612164 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0819 12:20:12.612505 17829 solver.cpp:404]     Test net output #1: loss = 0.385911 (* 1 = 0.385911 loss)\nI0819 12:20:13.918989 17829 solver.cpp:228] Iteration 72000, loss = 0.000408526\nI0819 12:20:13.919037 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:20:13.919062 17829 solver.cpp:244]     Train net output #1: loss = 0.00040835 (* 1 = 0.00040835 loss)\nI0819 12:20:14.028905 17829 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0819 12:22:33.186730 17829 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0819 12:23:53.510294 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90392\nI0819 12:23:53.510643 17829 solver.cpp:404]     Test net output #1: loss = 0.383752 (* 1 = 0.383752 loss)\nI0819 12:23:54.816623 17829 solver.cpp:228] Iteration 72100, loss = 0.00041924\nI0819 12:23:54.816673 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:23:54.816696 17829 solver.cpp:244]     Train net output #1: loss = 0.000419064 (* 1 = 0.000419064 loss)\nI0819 12:23:54.924880 17829 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0819 12:26:14.034354 17829 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0819 12:27:34.361877 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90348\nI0819 12:27:34.362181 17829 solver.cpp:404]     Test net output #1: loss = 0.385124 (* 1 = 0.385124 loss)\nI0819 12:27:35.668328 17829 solver.cpp:228] Iteration 72200, loss = 0.000444745\nI0819 12:27:35.668375 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:27:35.668400 17829 solver.cpp:244]     Train net output #1: loss = 0.00044457 (* 1 = 0.00044457 loss)\nI0819 12:27:35.769263 17829 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0819 12:29:54.877413 17829 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0819 12:31:15.188237 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9032\nI0819 12:31:15.188539 17829 solver.cpp:404]     Test net output #1: loss = 0.38328 (* 1 = 0.38328 loss)\nI0819 12:31:16.494601 17829 solver.cpp:228] Iteration 72300, loss = 0.000369591\nI0819 12:31:16.494645 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:31:16.494669 17829 solver.cpp:244]     Train net output #1: loss = 0.000369415 (* 1 = 0.000369415 loss)\nI0819 12:31:16.601469 17829 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0819 12:33:35.713660 17829 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0819 12:34:56.041642 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9032\nI0819 12:34:56.041980 17829 solver.cpp:404]     Test net output #1: loss = 0.384814 (* 1 = 0.384814 loss)\nI0819 12:34:57.348553 17829 solver.cpp:228] Iteration 72400, loss = 0.000440666\nI0819 12:34:57.348601 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:34:57.348626 17829 solver.cpp:244]     Train net output #1: loss = 0.00044049 (* 1 = 0.00044049 loss)\nI0819 12:34:57.449604 17829 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0819 12:37:16.562376 17829 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0819 12:38:36.897560 17829 solver.cpp:404]     Test net output #0: accuracy = 0.903\nI0819 12:38:36.897863 17829 solver.cpp:404]     Test net output #1: loss = 0.382985 (* 1 = 0.382985 loss)\nI0819 12:38:38.204879 17829 solver.cpp:228] Iteration 72500, loss = 0.00043441\nI0819 12:38:38.204923 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:38:38.204947 17829 solver.cpp:244]     Train net output #1: loss = 0.000434234 (* 1 = 0.000434234 loss)\nI0819 12:38:38.308729 17829 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0819 12:40:57.452849 17829 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0819 12:42:17.790902 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90284\nI0819 12:42:17.791209 17829 solver.cpp:404]     Test net output #1: loss = 0.384427 (* 1 = 0.384427 loss)\nI0819 12:42:19.096791 17829 solver.cpp:228] Iteration 72600, loss = 0.000432243\nI0819 12:42:19.096838 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:42:19.096854 17829 solver.cpp:244]     Train net output #1: loss = 0.000432067 (* 1 = 0.000432067 loss)\nI0819 12:42:19.204639 17829 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0819 12:44:38.160727 17829 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0819 12:45:58.485658 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 12:45:58.485992 17829 solver.cpp:404]     Test net output #1: loss = 0.382764 (* 1 = 0.382764 loss)\nI0819 12:45:59.791064 17829 solver.cpp:228] Iteration 72700, loss = 0.000460241\nI0819 12:45:59.791107 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:45:59.791123 17829 solver.cpp:244]     Train net output #1: loss = 0.000460065 (* 1 = 0.000460065 loss)\nI0819 12:45:59.893728 17829 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0819 12:48:18.951853 17829 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0819 12:49:39.275984 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90308\nI0819 12:49:39.276324 17829 solver.cpp:404]     Test net output #1: loss = 0.38429 (* 1 = 0.38429 loss)\nI0819 12:49:40.581683 17829 solver.cpp:228] Iteration 72800, loss = 0.000427676\nI0819 12:49:40.581727 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:49:40.581743 17829 solver.cpp:244]     Train net output #1: loss = 0.0004275 (* 1 = 0.0004275 loss)\nI0819 12:49:40.692001 17829 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0819 12:51:59.943655 17829 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0819 12:53:20.272516 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90332\nI0819 12:53:20.272873 17829 solver.cpp:404]     Test net output #1: loss = 0.382691 (* 1 = 0.382691 loss)\nI0819 12:53:21.578922 17829 solver.cpp:228] Iteration 72900, loss = 0.000367442\nI0819 12:53:21.578963 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:53:21.578979 17829 solver.cpp:244]     Train net output #1: loss = 0.000367267 (* 1 = 0.000367267 loss)\nI0819 12:53:21.683869 17829 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0819 12:55:40.880455 17829 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0819 12:57:01.193904 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90332\nI0819 12:57:01.194269 17829 solver.cpp:404]     Test net output #1: loss = 0.384275 (* 1 = 0.384275 loss)\nI0819 12:57:02.500720 17829 solver.cpp:228] Iteration 73000, loss = 0.00047174\nI0819 12:57:02.500759 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:57:02.500777 17829 solver.cpp:244]     Train net output #1: loss = 0.000471564 (* 1 = 0.000471564 loss)\nI0819 12:57:02.608669 17829 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0819 12:59:22.001821 17829 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0819 13:00:42.348130 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90336\nI0819 13:00:42.348495 17829 solver.cpp:404]     Test net output #1: loss = 0.38268 (* 1 = 0.38268 loss)\nI0819 13:00:43.654530 17829 solver.cpp:228] Iteration 73100, loss = 0.000423095\nI0819 13:00:43.654572 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:00:43.654587 17829 solver.cpp:244]     Train net output #1: loss = 0.000422919 (* 1 = 0.000422919 loss)\nI0819 13:00:43.759287 17829 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0819 13:03:02.886310 17829 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0819 13:04:23.212394 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90336\nI0819 13:04:23.212757 17829 solver.cpp:404]     Test net output #1: loss = 0.384177 (* 1 = 0.384177 loss)\nI0819 13:04:24.518620 17829 solver.cpp:228] Iteration 73200, loss = 0.000356413\nI0819 13:04:24.518662 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:04:24.518678 17829 solver.cpp:244]     Train net output #1: loss = 0.000356237 (* 1 = 0.000356237 loss)\nI0819 13:04:24.627255 17829 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0819 13:06:43.771510 17829 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0819 13:08:04.067134 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90296\nI0819 13:08:04.067499 17829 solver.cpp:404]     Test net output #1: loss = 0.382591 (* 1 = 0.382591 loss)\nI0819 13:08:05.372875 17829 solver.cpp:228] Iteration 73300, loss = 0.00042619\nI0819 13:08:05.372918 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:08:05.372936 17829 solver.cpp:244]     Train net output #1: loss = 0.000426014 (* 1 = 0.000426014 loss)\nI0819 13:08:05.482071 17829 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0819 13:10:24.577116 17829 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0819 13:11:44.865967 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9028\nI0819 13:11:44.866327 17829 solver.cpp:404]     Test net output #1: loss = 0.384269 (* 1 = 0.384269 loss)\nI0819 13:11:46.171871 17829 solver.cpp:228] Iteration 73400, loss = 0.000457139\nI0819 13:11:46.171914 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:11:46.171931 17829 solver.cpp:244]     Train net output #1: loss = 0.000456964 (* 1 = 0.000456964 loss)\nI0819 13:11:46.282670 17829 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0819 13:14:05.515517 17829 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0819 13:15:25.847754 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90292\nI0819 13:15:25.848125 17829 solver.cpp:404]     Test net output #1: loss = 0.382649 (* 1 = 0.382649 loss)\nI0819 13:15:27.154747 17829 solver.cpp:228] Iteration 73500, loss = 0.000444807\nI0819 13:15:27.154790 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:15:27.154805 17829 solver.cpp:244]     Train net output #1: loss = 0.000444631 (* 1 = 0.000444631 loss)\nI0819 13:15:27.259516 17829 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0819 13:17:46.402532 17829 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0819 13:19:06.737212 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90296\nI0819 13:19:06.737573 17829 solver.cpp:404]     Test net output #1: loss = 0.384276 (* 1 = 0.384276 loss)\nI0819 13:19:08.043066 17829 solver.cpp:228] Iteration 73600, loss = 0.00046946\nI0819 13:19:08.043109 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:19:08.043125 17829 solver.cpp:244]     Train net output #1: loss = 0.000469285 (* 1 = 0.000469285 loss)\nI0819 13:19:08.150452 17829 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0819 13:21:27.281975 17829 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0819 13:22:47.586529 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9028\nI0819 13:22:47.586890 17829 solver.cpp:404]     Test net output #1: loss = 0.38267 (* 1 = 0.38267 loss)\nI0819 13:22:48.892922 17829 solver.cpp:228] Iteration 73700, loss = 0.000440882\nI0819 13:22:48.892966 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:22:48.892983 17829 solver.cpp:244]     Train net output #1: loss = 0.000440706 (* 1 = 0.000440706 loss)\nI0819 13:22:48.994976 17829 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0819 13:25:08.147178 17829 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0819 13:26:29.044697 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9028\nI0819 13:26:29.045056 17829 solver.cpp:404]     Test net output #1: loss = 0.384313 (* 1 = 0.384313 loss)\nI0819 13:26:30.355695 17829 solver.cpp:228] Iteration 73800, loss = 0.000411799\nI0819 13:26:30.355759 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:26:30.355777 17829 solver.cpp:244]     Train net output #1: loss = 0.000411623 (* 1 = 0.000411623 loss)\nI0819 13:26:30.459656 17829 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0819 13:28:49.604151 17829 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0819 13:30:09.936223 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90288\nI0819 13:30:09.936592 17829 solver.cpp:404]     Test net output #1: loss = 0.382763 (* 1 = 0.382763 loss)\nI0819 13:30:11.242249 17829 solver.cpp:228] Iteration 73900, loss = 0.000442029\nI0819 13:30:11.242293 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:30:11.242311 17829 solver.cpp:244]     Train net output #1: loss = 0.000441854 (* 1 = 0.000441854 loss)\nI0819 13:30:11.347378 17829 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0819 13:32:30.598057 17829 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0819 13:33:50.914718 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 13:33:50.915077 17829 solver.cpp:404]     Test net output #1: loss = 0.3844 (* 1 = 0.3844 loss)\nI0819 13:33:52.222091 17829 solver.cpp:228] Iteration 74000, loss = 0.000486586\nI0819 13:33:52.222136 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:33:52.222153 17829 solver.cpp:244]     Train net output #1: loss = 0.00048641 (* 1 = 0.00048641 loss)\nI0819 13:33:52.328246 17829 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0819 13:36:11.451258 17829 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0819 13:37:31.741971 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90336\nI0819 13:37:31.742341 17829 solver.cpp:404]     Test net output #1: loss = 0.382815 (* 1 = 0.382815 loss)\nI0819 13:37:33.048792 17829 solver.cpp:228] Iteration 74100, loss = 0.000485593\nI0819 13:37:33.048840 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:37:33.048856 17829 solver.cpp:244]     Train net output #1: loss = 0.000485418 (* 1 = 0.000485418 loss)\nI0819 13:37:33.151310 17829 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0819 13:39:52.403574 17829 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0819 13:41:12.727671 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9034\nI0819 13:41:12.728039 17829 solver.cpp:404]     Test net output #1: loss = 0.384443 (* 1 = 0.384443 loss)\nI0819 13:41:14.033885 17829 solver.cpp:228] Iteration 74200, loss = 0.000463261\nI0819 13:41:14.033931 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:41:14.033946 17829 solver.cpp:244]     Train net output #1: loss = 0.000463086 (* 1 = 0.000463086 loss)\nI0819 13:41:14.145378 17829 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0819 13:43:33.179338 17829 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0819 13:44:53.472813 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90348\nI0819 13:44:53.473175 17829 solver.cpp:404]     Test net output #1: loss = 0.382895 (* 1 = 0.382895 loss)\nI0819 13:44:54.778712 17829 solver.cpp:228] Iteration 74300, loss = 0.000402657\nI0819 13:44:54.778758 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:44:54.778774 17829 solver.cpp:244]     Train net output #1: loss = 0.000402481 (* 1 = 0.000402481 loss)\nI0819 13:44:54.883174 17829 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0819 13:47:13.978147 17829 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0819 13:48:34.275362 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0819 13:48:34.275739 17829 solver.cpp:404]     Test net output #1: loss = 0.384528 (* 1 = 0.384528 loss)\nI0819 13:48:35.582020 17829 solver.cpp:228] Iteration 74400, loss = 0.00047429\nI0819 13:48:35.582067 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:48:35.582083 17829 solver.cpp:244]     Train net output #1: loss = 0.000474114 (* 1 = 0.000474114 loss)\nI0819 13:48:35.690620 17829 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0819 13:50:54.710686 17829 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0819 13:52:15.028950 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90328\nI0819 13:52:15.029302 17829 solver.cpp:404]     Test net output #1: loss = 0.38295 (* 1 = 0.38295 loss)\nI0819 13:52:16.335408 17829 solver.cpp:228] Iteration 74500, loss = 0.000415135\nI0819 13:52:16.335455 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:52:16.335472 17829 solver.cpp:244]     Train net output #1: loss = 0.000414959 (* 1 = 0.000414959 loss)\nI0819 13:52:16.445016 17829 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0819 13:54:35.502074 17829 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0819 13:55:55.834489 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90308\nI0819 13:55:55.834854 17829 solver.cpp:404]     Test net output #1: loss = 0.384596 (* 1 = 0.384596 loss)\nI0819 13:55:57.140449 17829 solver.cpp:228] Iteration 74600, loss = 0.00039202\nI0819 13:55:57.140496 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:55:57.140511 17829 solver.cpp:244]     Train net output #1: loss = 0.000391844 (* 1 = 0.000391844 loss)\nI0819 13:55:57.246754 17829 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0819 13:58:16.259290 17829 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0819 13:59:36.568364 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9032\nI0819 13:59:36.568706 17829 solver.cpp:404]     Test net output #1: loss = 0.383168 (* 1 = 0.383168 loss)\nI0819 13:59:37.874593 17829 solver.cpp:228] Iteration 74700, loss = 0.000435969\nI0819 13:59:37.874639 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:59:37.874655 17829 solver.cpp:244]     Train net output #1: loss = 0.000435793 (* 1 = 0.000435793 loss)\nI0819 13:59:37.983549 17829 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0819 14:01:57.090570 17829 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0819 14:03:18.080015 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9032\nI0819 14:03:18.080359 17829 solver.cpp:404]     Test net output #1: loss = 0.38481 (* 1 = 0.38481 loss)\nI0819 14:03:19.389652 17829 solver.cpp:228] Iteration 74800, loss = 0.000400451\nI0819 14:03:19.389714 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:03:19.389730 17829 solver.cpp:244]     Train net output #1: loss = 0.000400276 (* 1 = 0.000400276 loss)\nI0819 14:03:19.485669 17829 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0819 14:05:38.200809 17829 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0819 14:06:59.326184 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9032\nI0819 14:06:59.326496 17829 solver.cpp:404]     Test net output #1: loss = 0.383345 (* 1 = 0.383345 loss)\nI0819 14:07:00.635025 17829 solver.cpp:228] Iteration 74900, loss = 0.000467747\nI0819 14:07:00.635084 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:07:00.635102 17829 solver.cpp:244]     Train net output #1: loss = 0.000467571 (* 1 = 0.000467571 loss)\nI0819 14:07:00.730474 17829 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0819 14:09:19.433989 17829 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0819 14:10:40.459231 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90324\nI0819 14:10:40.459544 17829 solver.cpp:404]     Test net output #1: loss = 0.384928 (* 1 = 0.384928 loss)\nI0819 14:10:41.768210 17829 solver.cpp:228] Iteration 75000, loss = 0.000401047\nI0819 14:10:41.768267 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:10:41.768283 17829 solver.cpp:244]     Train net output #1: loss = 0.000400871 (* 1 = 0.000400871 loss)\nI0819 14:10:41.868934 17829 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0819 14:13:00.513087 17829 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0819 14:14:21.702966 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90288\nI0819 14:14:21.703274 17829 solver.cpp:404]     Test net output #1: loss = 0.383457 (* 1 = 0.383457 loss)\nI0819 14:14:23.011734 17829 solver.cpp:228] Iteration 75100, loss = 0.000449639\nI0819 14:14:23.011792 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:14:23.011809 17829 solver.cpp:244]     Train net output #1: loss = 0.000449464 (* 1 = 0.000449464 loss)\nI0819 14:14:23.106638 17829 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0819 14:16:41.674616 17829 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0819 14:18:02.849200 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9032\nI0819 14:18:02.849531 17829 solver.cpp:404]     Test net output #1: loss = 0.38498 (* 1 = 0.38498 loss)\nI0819 14:18:04.157927 17829 solver.cpp:228] Iteration 75200, loss = 0.000444792\nI0819 14:18:04.157979 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:18:04.157996 17829 solver.cpp:244]     Train net output #1: loss = 0.000444617 (* 1 = 0.000444617 loss)\nI0819 14:18:04.258353 17829 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0819 14:20:22.766206 17829 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0819 14:21:43.938431 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90296\nI0819 14:21:43.938756 17829 solver.cpp:404]     Test net output #1: loss = 0.38348 (* 1 = 0.38348 loss)\nI0819 14:21:45.247413 17829 solver.cpp:228] Iteration 75300, loss = 0.00050678\nI0819 14:21:45.247469 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:21:45.247488 17829 solver.cpp:244]     Train net output #1: loss = 0.000506604 (* 1 = 0.000506604 loss)\nI0819 14:21:45.348851 17829 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0819 14:24:03.908962 17829 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0819 14:25:25.094514 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 14:25:25.094820 17829 solver.cpp:404]     Test net output #1: loss = 0.385123 (* 1 = 0.385123 loss)\nI0819 14:25:26.403816 17829 solver.cpp:228] Iteration 75400, loss = 0.000399778\nI0819 14:25:26.403873 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:25:26.403890 17829 solver.cpp:244]     Train net output #1: loss = 0.000399602 (* 1 = 0.000399602 loss)\nI0819 14:25:26.500769 17829 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0819 14:27:45.073599 17829 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0819 14:29:05.880080 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90296\nI0819 14:29:05.880409 17829 solver.cpp:404]     Test net output #1: loss = 0.383599 (* 1 = 0.383599 loss)\nI0819 14:29:07.188449 17829 solver.cpp:228] Iteration 75500, loss = 0.000367831\nI0819 14:29:07.188505 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:29:07.188521 17829 solver.cpp:244]     Train net output #1: loss = 0.000367655 (* 1 = 0.000367655 loss)\nI0819 14:29:07.283526 17829 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0819 14:31:25.831404 17829 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0819 14:32:46.704222 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 14:32:46.704552 17829 solver.cpp:404]     Test net output #1: loss = 0.385176 (* 1 = 0.385176 loss)\nI0819 14:32:48.012729 17829 solver.cpp:228] Iteration 75600, loss = 0.000439571\nI0819 14:32:48.012785 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:32:48.012801 17829 solver.cpp:244]     Train net output #1: loss = 0.000439395 (* 1 = 0.000439395 loss)\nI0819 14:32:48.112905 17829 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0819 14:35:06.724951 17829 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0819 14:36:27.564730 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90296\nI0819 14:36:27.565022 17829 solver.cpp:404]     Test net output #1: loss = 0.383673 (* 1 = 0.383673 loss)\nI0819 14:36:28.873234 17829 solver.cpp:228] Iteration 75700, loss = 0.000393722\nI0819 14:36:28.873291 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:36:28.873308 17829 solver.cpp:244]     Train net output #1: loss = 0.000393547 (* 1 = 0.000393547 loss)\nI0819 14:36:28.970361 17829 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0819 14:38:47.555974 17829 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0819 14:40:08.395978 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 14:40:08.396306 17829 solver.cpp:404]     Test net output #1: loss = 0.385209 (* 1 = 0.385209 loss)\nI0819 14:40:09.704797 17829 solver.cpp:228] Iteration 75800, loss = 0.000420577\nI0819 14:40:09.704852 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:40:09.704869 17829 solver.cpp:244]     Train net output #1: loss = 0.000420402 (* 1 = 0.000420402 loss)\nI0819 14:40:09.805939 17829 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0819 14:42:28.371551 17829 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0819 14:43:49.224413 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90296\nI0819 14:43:49.224731 17829 solver.cpp:404]     Test net output #1: loss = 0.383683 (* 1 = 0.383683 loss)\nI0819 14:43:50.533617 17829 solver.cpp:228] Iteration 75900, loss = 0.000408395\nI0819 14:43:50.533675 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:43:50.533692 17829 solver.cpp:244]     Train net output #1: loss = 0.00040822 (* 1 = 0.00040822 loss)\nI0819 14:43:50.629878 17829 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0819 14:46:09.243816 17829 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0819 14:47:30.059216 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90292\nI0819 14:47:30.059489 17829 solver.cpp:404]     Test net output #1: loss = 0.385259 (* 1 = 0.385259 loss)\nI0819 14:47:31.368237 17829 solver.cpp:228] Iteration 76000, loss = 0.000429986\nI0819 14:47:31.368295 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:47:31.368314 17829 solver.cpp:244]     Train net output #1: loss = 0.00042981 (* 1 = 0.00042981 loss)\nI0819 14:47:31.468623 17829 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0819 14:49:49.988914 17829 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0819 14:51:10.844207 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9028\nI0819 14:51:10.844513 17829 solver.cpp:404]     Test net output #1: loss = 0.383776 (* 1 = 0.383776 loss)\nI0819 14:51:12.153014 17829 solver.cpp:228] Iteration 76100, loss = 0.000470626\nI0819 14:51:12.153074 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:51:12.153090 17829 solver.cpp:244]     Train net output #1: loss = 0.00047045 (* 1 = 0.00047045 loss)\nI0819 14:51:12.252447 17829 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0819 14:53:30.844862 17829 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0819 14:54:52.046847 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90292\nI0819 14:54:52.047158 17829 solver.cpp:404]     Test net output #1: loss = 0.385364 (* 1 = 0.385364 loss)\nI0819 14:54:53.356390 17829 solver.cpp:228] Iteration 76200, loss = 0.000429267\nI0819 14:54:53.356446 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:54:53.356463 17829 solver.cpp:244]     Train net output #1: loss = 0.000429092 (* 1 = 0.000429092 loss)\nI0819 14:54:53.455039 17829 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0819 14:57:12.073647 17829 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0819 14:58:33.289168 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90276\nI0819 14:58:33.289490 17829 solver.cpp:404]     Test net output #1: loss = 0.38383 (* 1 = 0.38383 loss)\nI0819 14:58:34.598258 17829 solver.cpp:228] Iteration 76300, loss = 0.00047262\nI0819 14:58:34.598315 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:58:34.598331 17829 solver.cpp:244]     Train net output #1: loss = 0.000472444 (* 1 = 0.000472444 loss)\nI0819 14:58:34.692267 17829 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0819 15:00:53.354790 17829 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0819 15:02:14.554965 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90292\nI0819 15:02:14.555352 17829 solver.cpp:404]     Test net output #1: loss = 0.385373 (* 1 = 0.385373 loss)\nI0819 15:02:15.865017 17829 solver.cpp:228] Iteration 76400, loss = 0.000414465\nI0819 15:02:15.865073 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:02:15.865090 17829 solver.cpp:244]     Train net output #1: loss = 0.000414289 (* 1 = 0.000414289 loss)\nI0819 15:02:15.957532 17829 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0819 15:04:34.536845 17829 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0819 15:05:55.753234 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90268\nI0819 15:05:55.753530 17829 solver.cpp:404]     Test net output #1: loss = 0.383828 (* 1 = 0.383828 loss)\nI0819 15:05:57.061552 17829 solver.cpp:228] Iteration 76500, loss = 0.000397253\nI0819 15:05:57.061611 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:05:57.061630 17829 solver.cpp:244]     Train net output #1: loss = 0.000397077 (* 1 = 0.000397077 loss)\nI0819 15:05:57.160182 17829 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0819 15:08:15.783159 17829 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0819 15:09:37.019930 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90292\nI0819 15:09:37.020231 17829 solver.cpp:404]     Test net output #1: loss = 0.385493 (* 1 = 0.385493 loss)\nI0819 15:09:38.329668 17829 solver.cpp:228] Iteration 76600, loss = 0.000453262\nI0819 15:09:38.329728 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:09:38.329744 17829 solver.cpp:244]     Train net output #1: loss = 0.000453086 (* 1 = 0.000453086 loss)\nI0819 15:09:38.427064 17829 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0819 15:11:56.932543 17829 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0819 15:13:18.151749 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90276\nI0819 15:13:18.152124 17829 solver.cpp:404]     Test net output #1: loss = 0.383988 (* 1 = 0.383988 loss)\nI0819 15:13:19.460597 17829 solver.cpp:228] Iteration 76700, loss = 0.000445355\nI0819 15:13:19.460659 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:13:19.460676 17829 solver.cpp:244]     Train net output #1: loss = 0.000445179 (* 1 = 0.000445179 loss)\nI0819 15:13:19.553405 17829 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0819 15:15:38.008850 17829 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0819 15:16:59.223171 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 15:16:59.223469 17829 solver.cpp:404]     Test net output #1: loss = 0.385624 (* 1 = 0.385624 loss)\nI0819 15:17:00.531858 17829 solver.cpp:228] Iteration 76800, loss = 0.000409669\nI0819 15:17:00.531918 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:17:00.531934 17829 solver.cpp:244]     Train net output #1: loss = 0.000409493 (* 1 = 0.000409493 loss)\nI0819 15:17:00.626230 17829 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0819 15:19:19.106851 17829 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0819 15:20:40.308970 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 15:20:40.309273 17829 solver.cpp:404]     Test net output #1: loss = 0.384066 (* 1 = 0.384066 loss)\nI0819 15:20:41.617239 17829 solver.cpp:228] Iteration 76900, loss = 0.000463797\nI0819 15:20:41.617303 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:20:41.617322 17829 solver.cpp:244]     Train net output #1: loss = 0.000463621 (* 1 = 0.000463621 loss)\nI0819 15:20:41.710285 17829 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0819 15:23:00.163301 17829 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0819 15:24:21.331403 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90316\nI0819 15:24:21.331723 17829 solver.cpp:404]     Test net output #1: loss = 0.385628 (* 1 = 0.385628 loss)\nI0819 15:24:22.639950 17829 solver.cpp:228] Iteration 77000, loss = 0.000459707\nI0819 15:24:22.640010 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:24:22.640027 17829 solver.cpp:244]     Train net output #1: loss = 0.000459531 (* 1 = 0.000459531 loss)\nI0819 15:24:22.739588 17829 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0819 15:26:41.339125 17829 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0819 15:28:02.528023 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90264\nI0819 15:28:02.528331 17829 solver.cpp:404]     Test net output #1: loss = 0.384111 (* 1 = 0.384111 loss)\nI0819 15:28:03.836998 17829 solver.cpp:228] Iteration 77100, loss = 0.000387646\nI0819 15:28:03.837056 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:28:03.837071 17829 solver.cpp:244]     Train net output #1: loss = 0.000387471 (* 1 = 0.000387471 loss)\nI0819 15:28:03.931402 17829 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0819 15:30:22.380007 17829 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0819 15:31:43.597786 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90276\nI0819 15:31:43.598107 17829 solver.cpp:404]     Test net output #1: loss = 0.385686 (* 1 = 0.385686 loss)\nI0819 15:31:44.907469 17829 solver.cpp:228] Iteration 77200, loss = 0.000433133\nI0819 15:31:44.907526 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:31:44.907546 17829 solver.cpp:244]     Train net output #1: loss = 0.000432957 (* 1 = 0.000432957 loss)\nI0819 15:31:45.001574 17829 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0819 15:34:03.480387 17829 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0819 15:35:24.705868 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90264\nI0819 15:35:24.706161 17829 solver.cpp:404]     Test net output #1: loss = 0.384117 (* 1 = 0.384117 loss)\nI0819 15:35:26.015609 17829 solver.cpp:228] Iteration 77300, loss = 0.000411933\nI0819 15:35:26.015666 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:35:26.015682 17829 solver.cpp:244]     Train net output #1: loss = 0.000411757 (* 1 = 0.000411757 loss)\nI0819 15:35:26.108121 17829 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0819 15:37:44.671957 17829 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0819 15:39:05.849184 17829 solver.cpp:404]     Test net output #0: accuracy = 0.903\nI0819 15:39:05.849531 17829 solver.cpp:404]     Test net output #1: loss = 0.385674 (* 1 = 0.385674 loss)\nI0819 15:39:07.157922 17829 solver.cpp:228] Iteration 77400, loss = 0.000467234\nI0819 15:39:07.157979 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:39:07.157994 17829 solver.cpp:244]     Train net output #1: loss = 0.000467058 (* 1 = 0.000467058 loss)\nI0819 15:39:07.258785 17829 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0819 15:41:25.984666 17829 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0819 15:42:47.191195 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90288\nI0819 15:42:47.191552 17829 solver.cpp:404]     Test net output #1: loss = 0.384194 (* 1 = 0.384194 loss)\nI0819 15:42:48.500051 17829 solver.cpp:228] Iteration 77500, loss = 0.000440415\nI0819 15:42:48.500105 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:42:48.500123 17829 solver.cpp:244]     Train net output #1: loss = 0.000440239 (* 1 = 0.000440239 loss)\nI0819 15:42:48.597153 17829 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0819 15:45:07.246497 17829 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0819 15:46:28.519453 17829 solver.cpp:404]     Test net output #0: accuracy = 0.903\nI0819 15:46:28.519779 17829 solver.cpp:404]     Test net output #1: loss = 0.38575 (* 1 = 0.38575 loss)\nI0819 15:46:29.828407 17829 solver.cpp:228] Iteration 77600, loss = 0.000520299\nI0819 15:46:29.828462 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:46:29.828480 17829 solver.cpp:244]     Train net output #1: loss = 0.000520123 (* 1 = 0.000520123 loss)\nI0819 15:46:29.927610 17829 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0819 15:48:48.495442 17829 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0819 15:50:09.764878 17829 solver.cpp:404]     Test net output #0: accuracy = 0.903\nI0819 15:50:09.765209 17829 solver.cpp:404]     Test net output #1: loss = 0.384276 (* 1 = 0.384276 loss)\nI0819 15:50:11.074076 17829 solver.cpp:228] Iteration 77700, loss = 0.000397352\nI0819 15:50:11.074133 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:50:11.074149 17829 solver.cpp:244]     Train net output #1: loss = 0.000397176 (* 1 = 0.000397176 loss)\nI0819 15:50:11.167155 17829 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0819 15:52:29.721452 17829 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0819 15:53:50.966919 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90312\nI0819 15:53:50.967211 17829 solver.cpp:404]     Test net output #1: loss = 0.385816 (* 1 = 0.385816 loss)\nI0819 15:53:52.276268 17829 solver.cpp:228] Iteration 77800, loss = 0.000457525\nI0819 15:53:52.276324 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:53:52.276341 17829 solver.cpp:244]     Train net output #1: loss = 0.00045735 (* 1 = 0.00045735 loss)\nI0819 15:53:52.372128 17829 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0819 15:56:10.856861 17829 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0819 15:57:31.937873 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90308\nI0819 15:57:31.938159 17829 solver.cpp:404]     Test net output #1: loss = 0.384363 (* 1 = 0.384363 loss)\nI0819 15:57:33.246981 17829 solver.cpp:228] Iteration 77900, loss = 0.000427046\nI0819 15:57:33.247040 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:57:33.247056 17829 solver.cpp:244]     Train net output #1: loss = 0.00042687 (* 1 = 0.00042687 loss)\nI0819 15:57:33.340507 17829 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0819 15:59:51.930784 17829 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0819 16:01:12.928619 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9028\nI0819 16:01:12.928947 17829 solver.cpp:404]     Test net output #1: loss = 0.386006 (* 1 = 0.386006 loss)\nI0819 16:01:14.237990 17829 solver.cpp:228] Iteration 78000, loss = 0.000397484\nI0819 16:01:14.238045 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:01:14.238061 17829 solver.cpp:244]     Train net output #1: loss = 0.000397308 (* 1 = 0.000397308 loss)\nI0819 16:01:14.336720 17829 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0819 16:03:32.879362 17829 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0819 16:04:53.982964 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90256\nI0819 16:04:53.983269 17829 solver.cpp:404]     Test net output #1: loss = 0.384416 (* 1 = 0.384416 loss)\nI0819 16:04:55.291571 17829 solver.cpp:228] Iteration 78100, loss = 0.000408314\nI0819 16:04:55.291626 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:04:55.291642 17829 solver.cpp:244]     Train net output #1: loss = 0.000408138 (* 1 = 0.000408138 loss)\nI0819 16:04:55.392344 17829 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0819 16:07:13.921660 17829 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0819 16:08:35.008828 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90284\nI0819 16:08:35.009131 17829 solver.cpp:404]     Test net output #1: loss = 0.385967 (* 1 = 0.385967 loss)\nI0819 16:08:36.317788 17829 solver.cpp:228] Iteration 78200, loss = 0.000349659\nI0819 16:08:36.317842 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:08:36.317859 17829 solver.cpp:244]     Train net output #1: loss = 0.000349484 (* 1 = 0.000349484 loss)\nI0819 16:08:36.411933 17829 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0819 16:10:55.032687 17829 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0819 16:12:16.218257 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90288\nI0819 16:12:16.218602 17829 solver.cpp:404]     Test net output #1: loss = 0.384398 (* 1 = 0.384398 loss)\nI0819 16:12:17.527792 17829 solver.cpp:228] Iteration 78300, loss = 0.000484539\nI0819 16:12:17.527850 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:12:17.527865 17829 solver.cpp:244]     Train net output #1: loss = 0.000484363 (* 1 = 0.000484363 loss)\nI0819 16:12:17.624899 17829 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0819 16:14:36.562153 17829 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0819 16:15:57.429605 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90308\nI0819 16:15:57.430021 17829 solver.cpp:404]     Test net output #1: loss = 0.385936 (* 1 = 0.385936 loss)\nI0819 16:15:58.739186 17829 solver.cpp:228] Iteration 78400, loss = 0.000434312\nI0819 16:15:58.739234 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:15:58.739251 17829 solver.cpp:244]     Train net output #1: loss = 0.000434136 (* 1 = 0.000434136 loss)\nI0819 16:15:58.842278 17829 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0819 16:18:17.957010 17829 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0819 16:19:38.271208 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 16:19:38.271586 17829 solver.cpp:404]     Test net output #1: loss = 0.384471 (* 1 = 0.384471 loss)\nI0819 16:19:39.577576 17829 solver.cpp:228] Iteration 78500, loss = 0.000425632\nI0819 16:19:39.577622 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:19:39.577638 17829 solver.cpp:244]     Train net output #1: loss = 0.000425456 (* 1 = 0.000425456 loss)\nI0819 16:19:39.683398 17829 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0819 16:21:58.621572 17829 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0819 16:23:18.955353 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90328\nI0819 16:23:18.955720 17829 solver.cpp:404]     Test net output #1: loss = 0.386083 (* 1 = 0.386083 loss)\nI0819 16:23:20.262011 17829 solver.cpp:228] Iteration 78600, loss = 0.000396189\nI0819 16:23:20.262058 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:23:20.262073 17829 solver.cpp:244]     Train net output #1: loss = 0.000396013 (* 1 = 0.000396013 loss)\nI0819 16:23:20.363348 17829 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0819 16:25:38.803674 17829 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0819 16:26:59.112542 17829 solver.cpp:404]     Test net output #0: accuracy = 0.903\nI0819 16:26:59.112890 17829 solver.cpp:404]     Test net output #1: loss = 0.38466 (* 1 = 0.38466 loss)\nI0819 16:27:00.420300 17829 solver.cpp:228] Iteration 78700, loss = 0.000439697\nI0819 16:27:00.420346 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:27:00.420362 17829 solver.cpp:244]     Train net output #1: loss = 0.000439521 (* 1 = 0.000439521 loss)\nI0819 16:27:00.515544 17829 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0819 16:29:18.895301 17829 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0819 16:30:39.193747 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90308\nI0819 16:30:39.194099 17829 solver.cpp:404]     Test net output #1: loss = 0.386172 (* 1 = 0.386172 loss)\nI0819 16:30:40.500535 17829 solver.cpp:228] Iteration 78800, loss = 0.000473243\nI0819 16:30:40.500581 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:30:40.500596 17829 solver.cpp:244]     Train net output #1: loss = 0.000473067 (* 1 = 0.000473067 loss)\nI0819 16:30:40.596720 17829 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0819 16:32:58.972357 17829 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0819 16:34:19.272049 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90292\nI0819 16:34:19.272416 17829 solver.cpp:404]     Test net output #1: loss = 0.384623 (* 1 = 0.384623 loss)\nI0819 16:34:20.578094 17829 solver.cpp:228] Iteration 78900, loss = 0.00044779\nI0819 16:34:20.578140 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:34:20.578156 17829 solver.cpp:244]     Train net output #1: loss = 0.000447614 (* 1 = 0.000447614 loss)\nI0819 16:34:20.677176 17829 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0819 16:36:39.065610 17829 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0819 16:37:59.371496 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90316\nI0819 16:37:59.371868 17829 solver.cpp:404]     Test net output #1: loss = 0.386186 (* 1 = 0.386186 loss)\nI0819 16:38:00.677675 17829 solver.cpp:228] Iteration 79000, loss = 0.000431974\nI0819 16:38:00.677721 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:38:00.677736 17829 solver.cpp:244]     Train net output #1: loss = 0.000431798 (* 1 = 0.000431798 loss)\nI0819 16:38:00.773767 17829 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0819 16:40:19.115886 17829 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0819 16:41:39.428550 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 16:41:39.428926 17829 solver.cpp:404]     Test net output #1: loss = 0.384669 (* 1 = 0.384669 loss)\nI0819 16:41:40.736166 17829 solver.cpp:228] Iteration 79100, loss = 0.00045796\nI0819 16:41:40.736210 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:41:40.736227 17829 solver.cpp:244]     Train net output #1: loss = 0.000457785 (* 1 = 0.000457785 loss)\nI0819 16:41:40.836385 17829 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0819 16:43:59.219449 17829 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0819 16:45:19.522529 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90328\nI0819 16:45:19.522897 17829 solver.cpp:404]     Test net output #1: loss = 0.386244 (* 1 = 0.386244 loss)\nI0819 16:45:20.829453 17829 solver.cpp:228] Iteration 79200, loss = 0.000489718\nI0819 16:45:20.829500 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:45:20.829516 17829 solver.cpp:244]     Train net output #1: loss = 0.000489543 (* 1 = 0.000489543 loss)\nI0819 16:45:20.925611 17829 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0819 16:47:39.213979 17829 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0819 16:48:59.536955 17829 solver.cpp:404]     Test net output #0: accuracy = 0.903\nI0819 16:48:59.537302 17829 solver.cpp:404]     Test net output #1: loss = 0.384823 (* 1 = 0.384823 loss)\nI0819 16:49:00.842854 17829 solver.cpp:228] Iteration 79300, loss = 0.000439899\nI0819 16:49:00.842900 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:49:00.842916 17829 solver.cpp:244]     Train net output #1: loss = 0.000439723 (* 1 = 0.000439723 loss)\nI0819 16:49:00.943830 17829 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0819 16:51:19.251070 17829 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0819 16:52:39.569689 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90308\nI0819 16:52:39.569999 17829 solver.cpp:404]     Test net output #1: loss = 0.386377 (* 1 = 0.386377 loss)\nI0819 16:52:40.876343 17829 solver.cpp:228] Iteration 79400, loss = 0.00048283\nI0819 16:52:40.876389 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:52:40.876405 17829 solver.cpp:244]     Train net output #1: loss = 0.000482654 (* 1 = 0.000482654 loss)\nI0819 16:52:40.983424 17829 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0819 16:54:59.353978 17829 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0819 16:56:19.678884 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90288\nI0819 16:56:19.679174 17829 solver.cpp:404]     Test net output #1: loss = 0.384896 (* 1 = 0.384896 loss)\nI0819 16:56:20.984304 17829 solver.cpp:228] Iteration 79500, loss = 0.000415429\nI0819 16:56:20.984351 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:56:20.984367 17829 solver.cpp:244]     Train net output #1: loss = 0.000415253 (* 1 = 0.000415253 loss)\nI0819 16:56:21.086853 17829 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0819 16:58:39.402266 17829 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0819 16:59:59.710146 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90324\nI0819 16:59:59.710443 17829 solver.cpp:404]     Test net output #1: loss = 0.386415 (* 1 = 0.386415 loss)\nI0819 17:00:01.016563 17829 solver.cpp:228] Iteration 79600, loss = 0.000421943\nI0819 17:00:01.016611 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:00:01.016628 17829 solver.cpp:244]     Train net output #1: loss = 0.000421767 (* 1 = 0.000421767 loss)\nI0819 17:00:01.122300 17829 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0819 17:02:19.407374 17829 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0819 17:03:39.720206 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9028\nI0819 17:03:39.720502 17829 solver.cpp:404]     Test net output #1: loss = 0.385001 (* 1 = 0.385001 loss)\nI0819 17:03:41.027349 17829 solver.cpp:228] Iteration 79700, loss = 0.000458865\nI0819 17:03:41.027393 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:03:41.027410 17829 solver.cpp:244]     Train net output #1: loss = 0.000458689 (* 1 = 0.000458689 loss)\nI0819 17:03:41.128252 17829 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0819 17:05:59.414636 17829 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0819 17:07:19.712056 17829 solver.cpp:404]     Test net output #0: accuracy = 0.90324\nI0819 17:07:19.712350 17829 solver.cpp:404]     Test net output #1: loss = 0.386511 (* 1 = 0.386511 loss)\nI0819 17:07:21.017668 17829 solver.cpp:228] Iteration 79800, loss = 0.000444946\nI0819 17:07:21.017714 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:07:21.017730 17829 solver.cpp:244]     Train net output #1: loss = 0.00044477 (* 1 = 0.00044477 loss)\nI0819 17:07:21.125447 17829 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0819 17:09:39.502955 17829 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0819 17:10:59.795351 17829 solver.cpp:404]     Test net output #0: accuracy = 0.903\nI0819 17:10:59.795670 17829 solver.cpp:404]     Test net output #1: loss = 0.384998 (* 1 = 0.384998 loss)\nI0819 17:11:01.100914 17829 solver.cpp:228] Iteration 79900, loss = 0.000406493\nI0819 17:11:01.100960 17829 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:11:01.100975 17829 solver.cpp:244]     Train net output #1: loss = 0.000406317 (* 1 = 0.000406317 loss)\nI0819 17:11:01.207397 17829 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0819 17:13:19.571498 17829 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35Fig1a_iter_80000.caffemodel\nI0819 17:13:19.851524 17829 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35Fig1a_iter_80000.solverstate\nI0819 17:13:20.292737 17829 solver.cpp:317] Iteration 80000, loss = 0.0004631\nI0819 17:13:20.292775 17829 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0819 17:14:40.585319 17829 solver.cpp:404]     Test net output #0: accuracy = 0.9032\nI0819 17:14:40.585680 17829 solver.cpp:404]     Test net output #1: loss = 0.386595 (* 1 = 0.386595 loss)\nI0819 17:14:40.585692 17829 solver.cpp:322] Optimization Done.\nI0819 17:14:45.952630 17829 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35NestFig9",
    "content": "I0817 16:28:01.063012 17538 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:28:01.065336 17538 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:28:01.066763 17538 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:28:01.067975 17538 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:28:01.069188 17538 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:28:01.070420 17538 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:28:01.071646 17538 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:28:01.072872 17538 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:28:01.074100 17538 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:28:01.490356 17538 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35NestFig9\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\ntype: \"Nesterov\"\nI0817 16:28:01.494405 17538 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:28:01.511265 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:01.511338 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:01.512414 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:28:01.512470 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:28:01.512490 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:28:01.512508 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:28:01.512528 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:28:01.512544 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:28:01.512562 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:28:01.512580 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:28:01.512600 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:28:01.512617 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:28:01.512636 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:28:01.512652 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:28:01.512672 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:28:01.512691 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:28:01.512711 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:28:01.512729 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:28:01.512747 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:28:01.512764 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:28:01.512784 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:28:01.512801 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:28:01.512836 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:28:01.512856 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:28:01.512879 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:28:01.512899 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:28:01.512919 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:28:01.512933 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:28:01.512953 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:28:01.512967 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:28:01.512984 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:28:01.513003 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:28:01.513022 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:28:01.513041 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:28:01.513059 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:28:01.513077 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:28:01.513094 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:28:01.513123 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:28:01.513144 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:28:01.513161 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:28:01.513180 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:28:01.513198 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:28:01.513223 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:28:01.513240 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:28:01.513257 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:28:01.513274 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:28:01.513293 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:28:01.513311 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:28:01.513330 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:28:01.513346 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:28:01.513366 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:28:01.513383 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:28:01.513401 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:28:01.513428 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:28:01.513449 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:28:01.513468 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:28:01.513485 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:28:01.513502 17538 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:28:01.515251 17538 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\nI0817 16:28:01.517328 17538 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:28:01.518541 17538 net.cpp:100] Creating Layer dataLayer\nI0817 16:28:01.518615 17538 net.cpp:408] dataLayer -> data_top\nI0817 16:28:01.518800 17538 net.cpp:408] dataLayer -> label\nI0817 16:28:01.518916 17538 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:28:01.528430 17543 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:28:01.550786 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:01.557972 17538 net.cpp:150] Setting up dataLayer\nI0817 16:28:01.558033 17538 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:28:01.558045 17538 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:01.558051 17538 net.cpp:165] Memory required for data: 1536500\nI0817 16:28:01.558068 17538 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:28:01.558082 17538 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:28:01.558090 17538 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:28:01.558122 17538 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:28:01.558141 17538 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:28:01.558220 17538 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:28:01.558233 17538 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:01.558239 17538 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:01.558244 17538 net.cpp:165] Memory required for data: 1537500\nI0817 16:28:01.558250 17538 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:28:01.558312 17538 net.cpp:100] Creating Layer pre_conv\nI0817 16:28:01.558324 17538 net.cpp:434] pre_conv <- data_top\nI0817 16:28:01.558337 17538 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:28:01.560050 17538 net.cpp:150] Setting up pre_conv\nI0817 16:28:01.560079 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.560086 17538 net.cpp:165] Memory required for data: 9729500\nI0817 16:28:01.560153 17538 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:28:01.560232 17538 net.cpp:100] Creating Layer pre_bn\nI0817 16:28:01.560245 17538 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:28:01.560257 17538 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:28:01.560586 17544 blocking_queue.cpp:50] Waiting for data\nI0817 16:28:01.560712 17538 net.cpp:150] Setting up pre_bn\nI0817 16:28:01.560731 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.560737 17538 net.cpp:165] Memory required for data: 17921500\nI0817 16:28:01.560755 17538 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:28:01.560803 17538 net.cpp:100] Creating Layer pre_scale\nI0817 16:28:01.560813 17538 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:28:01.560822 17538 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:28:01.560988 17538 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:28:01.561246 17538 net.cpp:150] Setting up pre_scale\nI0817 16:28:01.561262 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.561269 17538 net.cpp:165] Memory required for data: 26113500\nI0817 16:28:01.561280 17538 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:28:01.561326 17538 net.cpp:100] Creating Layer pre_relu\nI0817 16:28:01.561334 17538 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:28:01.561343 17538 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:28:01.561353 17538 net.cpp:150] Setting up pre_relu\nI0817 16:28:01.561362 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.561367 17538 net.cpp:165] Memory required for data: 34305500\nI0817 16:28:01.561372 17538 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:28:01.561383 17538 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:28:01.561388 17538 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:28:01.561395 17538 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:28:01.561405 17538 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:28:01.561453 17538 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:28:01.561465 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.561472 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.561477 17538 net.cpp:165] Memory required for data: 50689500\nI0817 16:28:01.561483 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:28:01.561497 17538 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:28:01.561504 17538 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:28:01.561513 17538 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:28:01.561815 17538 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:28:01.561830 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.561836 17538 net.cpp:165] Memory required for data: 58881500\nI0817 16:28:01.561856 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:28:01.561870 17538 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:28:01.561877 17538 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:28:01.561888 17538 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:28:01.562125 17538 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:28:01.562139 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.562144 17538 net.cpp:165] Memory required for data: 67073500\nI0817 16:28:01.562155 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:28:01.562165 17538 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:28:01.562170 17538 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:28:01.562177 17538 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.562230 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:28:01.562368 17538 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:28:01.562381 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.562386 17538 net.cpp:165] Memory required for data: 75265500\nI0817 16:28:01.562397 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:28:01.562415 17538 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:28:01.562422 17538 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:28:01.562428 17538 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.562438 17538 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:28:01.562445 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.562450 17538 net.cpp:165] Memory required for data: 83457500\nI0817 16:28:01.562455 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:28:01.562469 17538 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:28:01.562475 17538 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:28:01.562489 17538 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:28:01.562790 17538 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:28:01.562804 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.562810 17538 net.cpp:165] Memory required for data: 91649500\nI0817 16:28:01.562819 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:28:01.562831 17538 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:28:01.562837 17538 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:28:01.562846 17538 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:28:01.563076 17538 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:28:01.563089 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.563094 17538 net.cpp:165] Memory required for data: 99841500\nI0817 16:28:01.563114 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:28:01.563124 17538 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:28:01.563129 17538 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:28:01.563141 17538 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:28:01.563196 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:28:01.563334 17538 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:28:01.563349 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.563354 17538 net.cpp:165] Memory required for data: 108033500\nI0817 16:28:01.563362 17538 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:28:01.563415 17538 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:28:01.563426 17538 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:28:01.563433 17538 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:28:01.563441 17538 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:28:01.563514 17538 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:28:01.563529 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.563534 17538 net.cpp:165] Memory required for data: 116225500\nI0817 16:28:01.563545 17538 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:28:01.563555 17538 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:28:01.563560 17538 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:28:01.563566 17538 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:28:01.563576 17538 net.cpp:150] Setting up L1_b1_relu\nI0817 16:28:01.563583 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.563588 17538 net.cpp:165] Memory required for data: 124417500\nI0817 16:28:01.563593 17538 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:01.563602 17538 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:01.563607 17538 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:28:01.563617 17538 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:28:01.563627 17538 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:28:01.563668 17538 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:01.563683 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.563697 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.563704 17538 net.cpp:165] Memory required for data: 140801500\nI0817 16:28:01.563709 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:28:01.563720 17538 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:28:01.563726 17538 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:28:01.563735 17538 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:28:01.564043 17538 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:28:01.564059 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.564064 17538 net.cpp:165] Memory required for data: 148993500\nI0817 16:28:01.564072 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:28:01.564085 17538 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:28:01.564092 17538 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:28:01.564100 17538 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:28:01.564350 17538 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:28:01.564365 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.564370 17538 net.cpp:165] Memory required for data: 157185500\nI0817 16:28:01.564381 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:28:01.564393 17538 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:28:01.564399 17538 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:28:01.564407 17538 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.564461 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:28:01.564599 17538 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:28:01.564612 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.564617 17538 net.cpp:165] Memory required for data: 165377500\nI0817 16:28:01.564626 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:28:01.564635 17538 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:28:01.564640 17538 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:28:01.564651 17538 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.564661 17538 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:28:01.564668 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.564673 17538 net.cpp:165] Memory required for data: 173569500\nI0817 16:28:01.564677 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:28:01.564692 17538 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:28:01.564697 17538 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:28:01.564705 17538 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:28:01.565009 17538 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:28:01.565023 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.565028 17538 net.cpp:165] Memory required for data: 181761500\nI0817 16:28:01.565037 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:28:01.565049 17538 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:28:01.565057 17538 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:28:01.565063 17538 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:28:01.565304 17538 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:28:01.565318 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.565323 17538 net.cpp:165] Memory required for data: 189953500\nI0817 16:28:01.565342 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:28:01.565351 17538 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:28:01.565361 17538 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:28:01.565368 17538 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:28:01.565420 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:28:01.565558 17538 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:28:01.565572 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.565577 17538 net.cpp:165] Memory required for data: 198145500\nI0817 16:28:01.565585 17538 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:28:01.565604 17538 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:28:01.565611 17538 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:28:01.565618 17538 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:28:01.565629 17538 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:28:01.565660 17538 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:28:01.565671 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.565676 17538 net.cpp:165] Memory required for data: 206337500\nI0817 16:28:01.565682 17538 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:28:01.565693 17538 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:28:01.565699 17538 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:28:01.565706 17538 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:28:01.565716 17538 net.cpp:150] Setting up L1_b2_relu\nI0817 16:28:01.565722 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.565727 17538 net.cpp:165] Memory required for data: 214529500\nI0817 16:28:01.565732 17538 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:01.565738 17538 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:01.565743 17538 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:28:01.565752 17538 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:28:01.565759 17538 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:28:01.565804 17538 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:01.565815 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.565822 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.565826 17538 net.cpp:165] Memory required for data: 230913500\nI0817 16:28:01.565832 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:28:01.565846 17538 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:28:01.565852 17538 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:28:01.565861 17538 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:28:01.566201 17538 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:28:01.566216 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.566222 17538 net.cpp:165] Memory required for data: 239105500\nI0817 16:28:01.566231 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:28:01.566244 17538 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:28:01.566251 17538 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:28:01.566258 17538 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:28:01.566498 17538 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:28:01.566514 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.566519 17538 net.cpp:165] Memory required for data: 247297500\nI0817 16:28:01.566529 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:28:01.566539 17538 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:28:01.566545 17538 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:28:01.566551 17538 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.566603 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:28:01.566741 17538 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:28:01.566754 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.566759 17538 net.cpp:165] Memory required for data: 255489500\nI0817 16:28:01.566768 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:28:01.566779 17538 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:28:01.566786 17538 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:28:01.566795 17538 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.566805 17538 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:28:01.566819 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.566824 17538 net.cpp:165] Memory required for data: 263681500\nI0817 16:28:01.566829 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:28:01.566840 17538 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:28:01.566846 17538 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:28:01.566857 17538 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:28:01.567178 17538 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:28:01.567193 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.567198 17538 net.cpp:165] Memory required for data: 271873500\nI0817 16:28:01.567206 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:28:01.567221 17538 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:28:01.567229 17538 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:28:01.567236 17538 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:28:01.567471 17538 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:28:01.567486 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.567490 17538 net.cpp:165] Memory required for data: 280065500\nI0817 16:28:01.567502 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:28:01.567509 17538 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:28:01.567515 17538 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:28:01.567523 17538 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:28:01.567577 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:28:01.567718 17538 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:28:01.567731 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.567736 17538 net.cpp:165] Memory required for data: 288257500\nI0817 16:28:01.567745 17538 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:28:01.567754 17538 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:28:01.567760 17538 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:28:01.567767 17538 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:28:01.567777 17538 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:28:01.567808 17538 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:28:01.567817 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.567822 17538 net.cpp:165] Memory required for data: 296449500\nI0817 16:28:01.567827 17538 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:28:01.567838 17538 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:28:01.567844 17538 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:28:01.567852 17538 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:28:01.567859 17538 net.cpp:150] Setting up L1_b3_relu\nI0817 16:28:01.567867 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.567872 17538 net.cpp:165] Memory required for data: 304641500\nI0817 16:28:01.567876 17538 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:01.567883 17538 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:01.567888 17538 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:28:01.567895 17538 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:28:01.567905 17538 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:28:01.567950 17538 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:01.567962 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.567968 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.567973 17538 net.cpp:165] Memory required for data: 321025500\nI0817 16:28:01.567978 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:28:01.567992 17538 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:28:01.567998 17538 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:28:01.568014 17538 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:28:01.568327 17538 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:28:01.568342 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.568347 17538 net.cpp:165] Memory required for data: 329217500\nI0817 16:28:01.568356 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:28:01.568368 17538 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:28:01.568374 17538 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:28:01.568383 17538 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:28:01.568622 17538 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:28:01.568636 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.568641 17538 net.cpp:165] Memory required for data: 337409500\nI0817 16:28:01.568651 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:28:01.568660 17538 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:28:01.568666 17538 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:28:01.568673 17538 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.568729 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:28:01.568869 17538 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:28:01.568883 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.568888 17538 net.cpp:165] Memory required for data: 345601500\nI0817 16:28:01.568897 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:28:01.568905 17538 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:28:01.568912 17538 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:28:01.568922 17538 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.568931 17538 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:28:01.568938 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.568943 17538 net.cpp:165] Memory required for data: 353793500\nI0817 16:28:01.568948 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:28:01.568961 17538 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:28:01.568969 17538 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:28:01.568976 17538 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:28:01.569290 17538 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:28:01.569305 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.569310 17538 net.cpp:165] Memory required for data: 361985500\nI0817 16:28:01.569319 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:28:01.569329 17538 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:28:01.569334 17538 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:28:01.569345 17538 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:28:01.569592 17538 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:28:01.569607 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.569612 17538 net.cpp:165] Memory required for data: 370177500\nI0817 16:28:01.569622 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:28:01.569631 17538 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:28:01.569638 17538 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:28:01.569644 17538 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:28:01.569700 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:28:01.569834 17538 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:28:01.569847 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.569852 17538 net.cpp:165] Memory required for data: 378369500\nI0817 16:28:01.569861 17538 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:28:01.569874 17538 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:28:01.569880 17538 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:28:01.569887 17538 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:28:01.569895 17538 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:28:01.569936 17538 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:28:01.569947 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.569952 17538 net.cpp:165] Memory required for data: 386561500\nI0817 16:28:01.569957 17538 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:28:01.569965 17538 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:28:01.569972 17538 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:28:01.569981 17538 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:28:01.569991 17538 net.cpp:150] Setting up L1_b4_relu\nI0817 16:28:01.569999 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.570003 17538 net.cpp:165] Memory required for data: 394753500\nI0817 16:28:01.570009 17538 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:01.570016 17538 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:01.570021 17538 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:28:01.570031 17538 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:28:01.570041 17538 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:28:01.570083 17538 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:01.570094 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.570107 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.570113 17538 net.cpp:165] Memory required for data: 411137500\nI0817 16:28:01.570118 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:28:01.570133 17538 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:28:01.570139 17538 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:28:01.570148 17538 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:28:01.570461 17538 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:28:01.570475 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.570480 17538 net.cpp:165] Memory required for data: 419329500\nI0817 16:28:01.570504 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:28:01.570514 17538 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:28:01.570520 17538 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:28:01.570531 17538 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:28:01.570772 17538 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:28:01.570785 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.570791 17538 net.cpp:165] Memory required for data: 427521500\nI0817 16:28:01.570801 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:28:01.570811 17538 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:28:01.570816 17538 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:28:01.570823 17538 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.570878 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:28:01.571017 17538 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:28:01.571030 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.571035 17538 net.cpp:165] Memory required for data: 435713500\nI0817 16:28:01.571044 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:28:01.571053 17538 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:28:01.571058 17538 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:28:01.571069 17538 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.571079 17538 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:28:01.571085 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.571090 17538 net.cpp:165] Memory required for data: 443905500\nI0817 16:28:01.571095 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:28:01.571115 17538 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:28:01.571130 17538 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:28:01.571138 17538 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:28:01.571457 17538 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:28:01.571471 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.571476 17538 net.cpp:165] Memory required for data: 452097500\nI0817 16:28:01.571485 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:28:01.571498 17538 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:28:01.571504 17538 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:28:01.571513 17538 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:28:01.571749 17538 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:28:01.571765 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.571770 17538 net.cpp:165] Memory required for data: 460289500\nI0817 16:28:01.571780 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:28:01.571790 17538 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:28:01.571796 17538 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:28:01.571804 17538 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:28:01.571854 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:28:01.571993 17538 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:28:01.572006 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.572012 17538 net.cpp:165] Memory required for data: 468481500\nI0817 16:28:01.572021 17538 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:28:01.572033 17538 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:28:01.572039 17538 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:28:01.572046 17538 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:28:01.572059 17538 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:28:01.572089 17538 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:28:01.572106 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.572113 17538 net.cpp:165] Memory required for data: 476673500\nI0817 16:28:01.572118 17538 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:28:01.572129 17538 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:28:01.572135 17538 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:28:01.572142 17538 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:28:01.572151 17538 net.cpp:150] Setting up L1_b5_relu\nI0817 16:28:01.572160 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.572163 17538 net.cpp:165] Memory required for data: 484865500\nI0817 16:28:01.572170 17538 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:01.572176 17538 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:01.572181 17538 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:28:01.572190 17538 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:28:01.572198 17538 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:28:01.572244 17538 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:01.572257 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.572263 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.572268 17538 net.cpp:165] Memory required for data: 501249500\nI0817 16:28:01.572273 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:28:01.572288 17538 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:28:01.572293 17538 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:28:01.572302 17538 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:28:01.572605 17538 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:28:01.572620 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.572625 17538 net.cpp:165] Memory required for data: 509441500\nI0817 16:28:01.572641 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:28:01.572654 17538 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:28:01.572660 17538 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:28:01.572669 17538 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:28:01.572909 17538 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:28:01.572923 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.572928 17538 net.cpp:165] Memory required for data: 517633500\nI0817 16:28:01.572938 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:28:01.572947 17538 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:28:01.572953 17538 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:28:01.572960 17538 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.573015 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:28:01.573165 17538 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:28:01.573179 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.573185 17538 net.cpp:165] Memory required for data: 525825500\nI0817 16:28:01.573194 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:28:01.573202 17538 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:28:01.573209 17538 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:28:01.573218 17538 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.573230 17538 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:28:01.573237 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.573241 17538 net.cpp:165] Memory required for data: 534017500\nI0817 16:28:01.573246 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:28:01.573261 17538 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:28:01.573266 17538 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:28:01.573276 17538 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:28:01.573588 17538 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:28:01.573602 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.573607 17538 net.cpp:165] Memory required for data: 542209500\nI0817 16:28:01.573616 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:28:01.573626 17538 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:28:01.573632 17538 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:28:01.573643 17538 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:28:01.573884 17538 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:28:01.573899 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.573905 17538 net.cpp:165] Memory required for data: 550401500\nI0817 16:28:01.573915 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:28:01.573925 17538 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:28:01.573930 17538 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:28:01.573937 17538 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:28:01.573989 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:28:01.574136 17538 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:28:01.574148 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.574154 17538 net.cpp:165] Memory required for data: 558593500\nI0817 16:28:01.574163 17538 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:28:01.574182 17538 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:28:01.574188 17538 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:28:01.574196 17538 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:28:01.574204 17538 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:28:01.574239 17538 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:28:01.574251 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.574255 17538 net.cpp:165] Memory required for data: 566785500\nI0817 16:28:01.574261 17538 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:28:01.574278 17538 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:28:01.574285 17538 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:28:01.574291 17538 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:28:01.574301 17538 net.cpp:150] Setting up L1_b6_relu\nI0817 16:28:01.574308 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.574313 17538 net.cpp:165] Memory required for data: 574977500\nI0817 16:28:01.574318 17538 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:01.574326 17538 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:01.574331 17538 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:28:01.574340 17538 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:28:01.574350 17538 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:28:01.574393 17538 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:01.574404 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.574410 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.574415 17538 net.cpp:165] Memory required for data: 591361500\nI0817 16:28:01.574420 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:28:01.574435 17538 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:28:01.574442 17538 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:28:01.574451 17538 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:28:01.574762 17538 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:28:01.574777 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.574782 17538 net.cpp:165] Memory required for data: 599553500\nI0817 16:28:01.574791 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:28:01.574800 17538 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:28:01.574810 17538 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:28:01.574817 17538 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:28:01.575057 17538 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:28:01.575069 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.575075 17538 net.cpp:165] Memory required for data: 607745500\nI0817 16:28:01.575085 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:28:01.575094 17538 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:28:01.575100 17538 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:28:01.575119 17538 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.575175 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:28:01.575312 17538 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:28:01.575328 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.575333 17538 net.cpp:165] Memory required for data: 615937500\nI0817 16:28:01.575343 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:28:01.575351 17538 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:28:01.575357 17538 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:28:01.575364 17538 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.575373 17538 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:28:01.575381 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.575386 17538 net.cpp:165] Memory required for data: 624129500\nI0817 16:28:01.575390 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:28:01.575404 17538 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:28:01.575412 17538 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:28:01.575423 17538 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:28:01.575732 17538 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:28:01.575747 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.575752 17538 net.cpp:165] Memory required for data: 632321500\nI0817 16:28:01.575768 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:28:01.575780 17538 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:28:01.575788 17538 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:28:01.575798 17538 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:28:01.576033 17538 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:28:01.576046 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.576051 17538 net.cpp:165] Memory required for data: 640513500\nI0817 16:28:01.576061 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:28:01.576071 17538 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:28:01.576076 17538 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:28:01.576083 17538 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:28:01.576146 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:28:01.576287 17538 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:28:01.576299 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.576304 17538 net.cpp:165] Memory required for data: 648705500\nI0817 16:28:01.576313 17538 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:28:01.576325 17538 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:28:01.576333 17538 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:28:01.576339 17538 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:28:01.576347 17538 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:28:01.576380 17538 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:28:01.576392 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.576397 17538 net.cpp:165] Memory required for data: 656897500\nI0817 16:28:01.576402 17538 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:28:01.576411 17538 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:28:01.576416 17538 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:28:01.576426 17538 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:28:01.576436 17538 net.cpp:150] Setting up L1_b7_relu\nI0817 16:28:01.576443 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.576447 17538 net.cpp:165] Memory required for data: 665089500\nI0817 16:28:01.576452 17538 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:01.576459 17538 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:01.576465 17538 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:28:01.576474 17538 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:28:01.576484 17538 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:28:01.576525 17538 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:01.576536 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.576544 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.576548 17538 net.cpp:165] Memory required for data: 681473500\nI0817 16:28:01.576555 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:28:01.576571 17538 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:28:01.576578 17538 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:28:01.576587 17538 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:28:01.576902 17538 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:28:01.576917 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.576922 17538 net.cpp:165] Memory required for data: 689665500\nI0817 16:28:01.576931 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:28:01.576946 17538 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:28:01.576951 17538 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:28:01.576962 17538 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:28:01.577219 17538 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:28:01.577256 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.577261 17538 net.cpp:165] Memory required for data: 697857500\nI0817 16:28:01.577272 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:28:01.577281 17538 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:28:01.577287 17538 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:28:01.577298 17538 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.577353 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:28:01.577493 17538 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:28:01.577507 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.577512 17538 net.cpp:165] Memory required for data: 706049500\nI0817 16:28:01.577520 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:28:01.577533 17538 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:28:01.577539 17538 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:28:01.577548 17538 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.577556 17538 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:28:01.577564 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.577569 17538 net.cpp:165] Memory required for data: 714241500\nI0817 16:28:01.577574 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:28:01.577587 17538 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:28:01.577594 17538 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:28:01.577605 17538 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:28:01.577919 17538 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:28:01.577932 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.577937 17538 net.cpp:165] Memory required for data: 722433500\nI0817 16:28:01.577946 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:28:01.577958 17538 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:28:01.577965 17538 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:28:01.577975 17538 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:28:01.578230 17538 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:28:01.578245 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.578250 17538 net.cpp:165] Memory required for data: 730625500\nI0817 16:28:01.578260 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:28:01.578269 17538 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:28:01.578275 17538 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:28:01.578284 17538 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:28:01.578339 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:28:01.578481 17538 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:28:01.578495 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.578500 17538 net.cpp:165] Memory required for data: 738817500\nI0817 16:28:01.578510 17538 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:28:01.578521 17538 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:28:01.578527 17538 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:28:01.578534 17538 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:28:01.578542 17538 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:28:01.578577 17538 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:28:01.578588 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.578593 17538 net.cpp:165] Memory required for data: 747009500\nI0817 16:28:01.578598 17538 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:28:01.578606 17538 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:28:01.578611 17538 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:28:01.578621 17538 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:28:01.578631 17538 net.cpp:150] Setting up L1_b8_relu\nI0817 16:28:01.578645 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.578650 17538 net.cpp:165] Memory required for data: 755201500\nI0817 16:28:01.578655 17538 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:01.578663 17538 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:01.578668 17538 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:28:01.578678 17538 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:28:01.578689 17538 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:28:01.578732 17538 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:01.578744 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.578752 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.578755 17538 net.cpp:165] Memory required for data: 771585500\nI0817 16:28:01.578761 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:28:01.578775 17538 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:28:01.578781 17538 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:28:01.578790 17538 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:28:01.579121 17538 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:28:01.579139 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.579144 17538 net.cpp:165] Memory required for data: 779777500\nI0817 16:28:01.579154 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:28:01.579162 17538 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:28:01.579169 17538 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:28:01.579180 17538 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:28:01.579422 17538 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:28:01.579435 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.579440 17538 net.cpp:165] Memory required for data: 787969500\nI0817 16:28:01.579452 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:28:01.579463 17538 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:28:01.579469 17538 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:28:01.579478 17538 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.579530 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:28:01.579679 17538 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:28:01.579692 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.579697 17538 net.cpp:165] Memory required for data: 796161500\nI0817 16:28:01.579706 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:28:01.579715 17538 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:28:01.579720 17538 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:28:01.579730 17538 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.579741 17538 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:28:01.579748 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.579753 17538 net.cpp:165] Memory required for data: 804353500\nI0817 16:28:01.579758 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:28:01.579772 17538 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:28:01.579778 17538 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:28:01.579787 17538 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:28:01.580111 17538 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:28:01.580126 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.580130 17538 net.cpp:165] Memory required for data: 812545500\nI0817 16:28:01.580139 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:28:01.580152 17538 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:28:01.580158 17538 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:28:01.580166 17538 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:28:01.580418 17538 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:28:01.580432 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.580438 17538 net.cpp:165] Memory required for data: 820737500\nI0817 16:28:01.580469 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:28:01.580482 17538 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:28:01.580488 17538 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:28:01.580498 17538 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:28:01.580550 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:28:01.580688 17538 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:28:01.580701 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.580706 17538 net.cpp:165] Memory required for data: 828929500\nI0817 16:28:01.580716 17538 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:28:01.580725 17538 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:28:01.580731 17538 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:28:01.580739 17538 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:28:01.580746 17538 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:28:01.580780 17538 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:28:01.580790 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.580796 17538 net.cpp:165] Memory required for data: 837121500\nI0817 16:28:01.580801 17538 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:28:01.580808 17538 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:28:01.580813 17538 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:28:01.580823 17538 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:28:01.580832 17538 net.cpp:150] Setting up L1_b9_relu\nI0817 16:28:01.580840 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.580844 17538 net.cpp:165] Memory required for data: 845313500\nI0817 16:28:01.580849 17538 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:01.580857 17538 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:01.580862 17538 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:28:01.580873 17538 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:28:01.580883 17538 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:28:01.580925 17538 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:01.580937 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.580945 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.580950 17538 net.cpp:165] Memory required for data: 861697500\nI0817 16:28:01.580955 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:28:01.580971 17538 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:28:01.580978 17538 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:28:01.580988 17538 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:28:01.581313 17538 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:28:01.581328 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.581333 17538 net.cpp:165] Memory required for data: 863745500\nI0817 16:28:01.581342 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:28:01.581354 17538 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:28:01.581362 17538 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:28:01.581369 17538 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:28:01.581606 17538 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:28:01.581619 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.581624 17538 net.cpp:165] Memory required for data: 865793500\nI0817 16:28:01.581635 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:28:01.581650 17538 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:28:01.581657 17538 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:28:01.581670 17538 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.581723 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:28:01.581864 17538 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:28:01.581877 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.581882 17538 net.cpp:165] Memory required for data: 867841500\nI0817 16:28:01.581892 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:28:01.581900 17538 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:28:01.581907 17538 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:28:01.581917 17538 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.581926 17538 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:28:01.581933 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.581938 17538 net.cpp:165] Memory required for data: 869889500\nI0817 16:28:01.581943 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:28:01.581957 17538 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:28:01.581964 17538 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:28:01.581972 17538 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:28:01.582300 17538 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:28:01.582314 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.582320 17538 net.cpp:165] Memory required for data: 871937500\nI0817 16:28:01.582329 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:28:01.582339 17538 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:28:01.582350 17538 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:28:01.582357 17538 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:28:01.582600 17538 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:28:01.582613 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.582618 17538 net.cpp:165] Memory required for data: 873985500\nI0817 16:28:01.582629 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:28:01.582638 17538 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:28:01.582644 17538 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:28:01.582651 17538 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:28:01.582707 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:28:01.582849 17538 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:28:01.582864 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.582870 17538 net.cpp:165] Memory required for data: 876033500\nI0817 16:28:01.582878 17538 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:28:01.582888 17538 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:28:01.582895 17538 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:28:01.582902 17538 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:28:01.582991 17538 net.cpp:150] Setting up L2_b1_pool\nI0817 16:28:01.583008 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.583012 17538 net.cpp:165] Memory required for data: 878081500\nI0817 16:28:01.583019 17538 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:28:01.583027 17538 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:28:01.583034 17538 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:28:01.583043 17538 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:28:01.583052 17538 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:28:01.583084 17538 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:28:01.583093 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.583098 17538 net.cpp:165] Memory required for data: 880129500\nI0817 16:28:01.583111 17538 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:28:01.583122 17538 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:28:01.583128 17538 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:28:01.583143 17538 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:28:01.583153 17538 net.cpp:150] Setting up L2_b1_relu\nI0817 16:28:01.583160 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.583165 17538 net.cpp:165] Memory required for data: 882177500\nI0817 16:28:01.583170 17538 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:28:01.583218 17538 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:28:01.583235 17538 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:28:01.585566 17538 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:28:01.585585 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.585590 17538 net.cpp:165] Memory required for data: 884225500\nI0817 16:28:01.585597 17538 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:28:01.585608 17538 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:28:01.585614 17538 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:28:01.585621 17538 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:28:01.585633 17538 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:28:01.585711 17538 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:28:01.585729 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.585736 17538 net.cpp:165] Memory required for data: 888321500\nI0817 16:28:01.585741 17538 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:01.585749 17538 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:01.585755 17538 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:28:01.585767 17538 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:28:01.585777 17538 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:28:01.585825 17538 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:01.585836 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.585844 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.585849 17538 net.cpp:165] Memory required for data: 896513500\nI0817 16:28:01.585853 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:28:01.585868 17538 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:28:01.585875 17538 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:28:01.585887 17538 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:28:01.587337 17538 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:28:01.587354 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.587360 17538 net.cpp:165] Memory required for data: 900609500\nI0817 16:28:01.587369 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:28:01.587383 17538 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:28:01.587390 17538 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:28:01.587399 17538 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:28:01.587648 17538 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:28:01.587662 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.587667 17538 net.cpp:165] Memory required for data: 904705500\nI0817 16:28:01.587678 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:28:01.587687 17538 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:28:01.587693 17538 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:28:01.587704 17538 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.587760 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:28:01.587909 17538 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:28:01.587923 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.587927 17538 net.cpp:165] Memory required for data: 908801500\nI0817 16:28:01.587936 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:28:01.587945 17538 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:28:01.587951 17538 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:28:01.587970 17538 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.587980 17538 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:28:01.587987 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.587992 17538 net.cpp:165] Memory required for data: 912897500\nI0817 16:28:01.587997 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:28:01.588012 17538 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:28:01.588018 17538 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:28:01.588027 17538 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:28:01.588495 17538 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:28:01.588510 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.588516 17538 net.cpp:165] Memory required for data: 916993500\nI0817 16:28:01.588526 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:28:01.588538 17538 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:28:01.588546 17538 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:28:01.588553 17538 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:28:01.588801 17538 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:28:01.588814 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.588819 17538 net.cpp:165] Memory required for data: 921089500\nI0817 16:28:01.588830 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:28:01.588840 17538 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:28:01.588845 17538 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:28:01.588853 17538 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:28:01.588909 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:28:01.589053 17538 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:28:01.589069 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.589074 17538 net.cpp:165] Memory required for data: 925185500\nI0817 16:28:01.589084 17538 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:28:01.589093 17538 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:28:01.589099 17538 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:28:01.589113 17538 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:28:01.589121 17538 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:28:01.589152 17538 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:28:01.589162 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.589167 17538 net.cpp:165] Memory required for data: 929281500\nI0817 16:28:01.589172 17538 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:28:01.589180 17538 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:28:01.589186 17538 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:28:01.589196 17538 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:28:01.589206 17538 net.cpp:150] Setting up L2_b2_relu\nI0817 16:28:01.589213 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.589218 17538 net.cpp:165] Memory required for data: 933377500\nI0817 16:28:01.589223 17538 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:01.589231 17538 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:01.589236 17538 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:28:01.589246 17538 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:28:01.589256 17538 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:28:01.589298 17538 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:01.589310 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.589316 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.589321 17538 net.cpp:165] Memory required for data: 941569500\nI0817 16:28:01.589326 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:28:01.589349 17538 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:28:01.589356 17538 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:28:01.589365 17538 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:28:01.589833 17538 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:28:01.589848 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.589854 17538 net.cpp:165] Memory required for data: 945665500\nI0817 16:28:01.589862 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:28:01.589875 17538 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:28:01.589881 17538 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:28:01.589890 17538 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:28:01.590140 17538 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:28:01.590154 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.590159 17538 net.cpp:165] Memory required for data: 949761500\nI0817 16:28:01.590170 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:28:01.590178 17538 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:28:01.590184 17538 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:28:01.590195 17538 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.590250 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:28:01.590401 17538 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:28:01.590415 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.590420 17538 net.cpp:165] Memory required for data: 953857500\nI0817 16:28:01.590430 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:28:01.590437 17538 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:28:01.590445 17538 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:28:01.590451 17538 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.590463 17538 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:28:01.590471 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.590476 17538 net.cpp:165] Memory required for data: 957953500\nI0817 16:28:01.590481 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:28:01.590492 17538 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:28:01.590502 17538 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:28:01.590510 17538 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:28:01.590993 17538 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:28:01.591008 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.591014 17538 net.cpp:165] Memory required for data: 962049500\nI0817 16:28:01.591023 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:28:01.591035 17538 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:28:01.591042 17538 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:28:01.591053 17538 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:28:01.591305 17538 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:28:01.591320 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.591325 17538 net.cpp:165] Memory required for data: 966145500\nI0817 16:28:01.591336 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:28:01.591344 17538 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:28:01.591351 17538 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:28:01.591357 17538 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:28:01.591415 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:28:01.591562 17538 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:28:01.591578 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.591583 17538 net.cpp:165] Memory required for data: 970241500\nI0817 16:28:01.591593 17538 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:28:01.591603 17538 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:28:01.591609 17538 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:28:01.591622 17538 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:28:01.591630 17538 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:28:01.591661 17538 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:28:01.591672 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.591677 17538 net.cpp:165] Memory required for data: 974337500\nI0817 16:28:01.591682 17538 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:28:01.591706 17538 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:28:01.591712 17538 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:28:01.591719 17538 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:28:01.591729 17538 net.cpp:150] Setting up L2_b3_relu\nI0817 16:28:01.591737 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.591742 17538 net.cpp:165] Memory required for data: 978433500\nI0817 16:28:01.591747 17538 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:01.591754 17538 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:01.591759 17538 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:28:01.591768 17538 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:28:01.591776 17538 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:28:01.591825 17538 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:01.591836 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.591842 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.591847 17538 net.cpp:165] Memory required for data: 986625500\nI0817 16:28:01.591853 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:28:01.591867 17538 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:28:01.591874 17538 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:28:01.591888 17538 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:28:01.592353 17538 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:28:01.592368 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.592373 17538 net.cpp:165] Memory required for data: 990721500\nI0817 16:28:01.592382 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:28:01.592396 17538 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:28:01.592402 17538 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:28:01.592412 17538 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:28:01.592658 17538 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:28:01.592671 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.592676 17538 net.cpp:165] Memory required for data: 994817500\nI0817 16:28:01.592687 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:28:01.592695 17538 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:28:01.592702 17538 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:28:01.592710 17538 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.592766 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:28:01.592909 17538 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:28:01.592922 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.592928 17538 net.cpp:165] Memory required for data: 998913500\nI0817 16:28:01.592937 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:28:01.592945 17538 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:28:01.592952 17538 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:28:01.592962 17538 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.592972 17538 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:28:01.592978 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.592983 17538 net.cpp:165] Memory required for data: 1003009500\nI0817 16:28:01.592988 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:28:01.593009 17538 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:28:01.593016 17538 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:28:01.593025 17538 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:28:01.593492 17538 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:28:01.593508 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.593513 17538 net.cpp:165] Memory required for data: 1007105500\nI0817 16:28:01.593521 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:28:01.593536 17538 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:28:01.593544 17538 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:28:01.593552 17538 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:28:01.593798 17538 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:28:01.593814 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.593819 17538 net.cpp:165] Memory required for data: 1011201500\nI0817 16:28:01.593830 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:28:01.593839 17538 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:28:01.593845 17538 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:28:01.593852 17538 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:28:01.593907 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:28:01.594055 17538 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:28:01.594069 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.594074 17538 net.cpp:165] Memory required for data: 1015297500\nI0817 16:28:01.594084 17538 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:28:01.594092 17538 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:28:01.594099 17538 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:28:01.594111 17538 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:28:01.594123 17538 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:28:01.594151 17538 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:28:01.594161 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.594166 17538 net.cpp:165] Memory required for data: 1019393500\nI0817 16:28:01.594171 17538 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:28:01.594182 17538 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:28:01.594187 17538 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:28:01.594194 17538 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:28:01.594203 17538 net.cpp:150] Setting up L2_b4_relu\nI0817 16:28:01.594211 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.594215 17538 net.cpp:165] Memory required for data: 1023489500\nI0817 16:28:01.594221 17538 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:01.594228 17538 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:01.594233 17538 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:28:01.594240 17538 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:28:01.594250 17538 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:28:01.594296 17538 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:01.594308 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.594314 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.594319 17538 net.cpp:165] Memory required for data: 1031681500\nI0817 16:28:01.594324 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:28:01.594338 17538 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:28:01.594346 17538 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:28:01.594354 17538 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:28:01.594810 17538 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:28:01.594830 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.594836 17538 net.cpp:165] Memory required for data: 1035777500\nI0817 16:28:01.594846 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:28:01.594857 17538 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:28:01.594863 17538 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:28:01.594872 17538 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:28:01.595127 17538 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:28:01.595144 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.595149 17538 net.cpp:165] Memory required for data: 1039873500\nI0817 16:28:01.595160 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:28:01.595168 17538 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:28:01.595175 17538 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:28:01.595182 17538 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.595237 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:28:01.595391 17538 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:28:01.595404 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.595409 17538 net.cpp:165] Memory required for data: 1043969500\nI0817 16:28:01.595419 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:28:01.595427 17538 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:28:01.595433 17538 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:28:01.595443 17538 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.595453 17538 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:28:01.595460 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.595465 17538 net.cpp:165] Memory required for data: 1048065500\nI0817 16:28:01.595470 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:28:01.595484 17538 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:28:01.595489 17538 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:28:01.595499 17538 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:28:01.595955 17538 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:28:01.595969 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.595974 17538 net.cpp:165] Memory required for data: 1052161500\nI0817 16:28:01.595983 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:28:01.595995 17538 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:28:01.596002 17538 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:28:01.596010 17538 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:28:01.596261 17538 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:28:01.596276 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.596280 17538 net.cpp:165] Memory required for data: 1056257500\nI0817 16:28:01.596290 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:28:01.596302 17538 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:28:01.596308 17538 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:28:01.596316 17538 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:28:01.596370 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:28:01.596516 17538 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:28:01.596529 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.596535 17538 net.cpp:165] Memory required for data: 1060353500\nI0817 16:28:01.596544 17538 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:28:01.596556 17538 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:28:01.596563 17538 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:28:01.596570 17538 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:28:01.596580 17538 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:28:01.596606 17538 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:28:01.596616 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.596627 17538 net.cpp:165] Memory required for data: 1064449500\nI0817 16:28:01.596633 17538 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:28:01.596640 17538 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:28:01.596647 17538 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:28:01.596657 17538 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:28:01.596666 17538 net.cpp:150] Setting up L2_b5_relu\nI0817 16:28:01.596673 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.596678 17538 net.cpp:165] Memory required for data: 1068545500\nI0817 16:28:01.596683 17538 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:01.596690 17538 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:01.596695 17538 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:28:01.596702 17538 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:28:01.596712 17538 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:28:01.596760 17538 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:01.596771 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.596778 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.596783 17538 net.cpp:165] Memory required for data: 1076737500\nI0817 16:28:01.596788 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:28:01.596799 17538 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:28:01.596806 17538 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:28:01.596817 17538 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:28:01.597295 17538 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:28:01.597309 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.597316 17538 net.cpp:165] Memory required for data: 1080833500\nI0817 16:28:01.597324 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:28:01.597337 17538 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:28:01.597343 17538 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:28:01.597352 17538 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:28:01.597604 17538 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:28:01.597620 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.597625 17538 net.cpp:165] Memory required for data: 1084929500\nI0817 16:28:01.597636 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:28:01.597645 17538 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:28:01.597651 17538 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:28:01.597659 17538 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.597713 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:28:01.597862 17538 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:28:01.597875 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.597880 17538 net.cpp:165] Memory required for data: 1089025500\nI0817 16:28:01.597889 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:28:01.597898 17538 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:28:01.597903 17538 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:28:01.597913 17538 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.597923 17538 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:28:01.597930 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.597935 17538 net.cpp:165] Memory required for data: 1093121500\nI0817 16:28:01.597940 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:28:01.597951 17538 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:28:01.597957 17538 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:28:01.597970 17538 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:28:01.598431 17538 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:28:01.598453 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.598457 17538 net.cpp:165] Memory required for data: 1097217500\nI0817 16:28:01.598466 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:28:01.598476 17538 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:28:01.598484 17538 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:28:01.598495 17538 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:28:01.598742 17538 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:28:01.598755 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.598760 17538 net.cpp:165] Memory required for data: 1101313500\nI0817 16:28:01.598772 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:28:01.598783 17538 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:28:01.598789 17538 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:28:01.598796 17538 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:28:01.598851 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:28:01.599023 17538 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:28:01.599037 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.599042 17538 net.cpp:165] Memory required for data: 1105409500\nI0817 16:28:01.599051 17538 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:28:01.599063 17538 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:28:01.599071 17538 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:28:01.599077 17538 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:28:01.599088 17538 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:28:01.599123 17538 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:28:01.599133 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.599138 17538 net.cpp:165] Memory required for data: 1109505500\nI0817 16:28:01.599143 17538 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:28:01.599151 17538 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:28:01.599158 17538 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:28:01.599175 17538 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:28:01.599185 17538 net.cpp:150] Setting up L2_b6_relu\nI0817 16:28:01.599192 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.599197 17538 net.cpp:165] Memory required for data: 1113601500\nI0817 16:28:01.599202 17538 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:01.599210 17538 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:01.599215 17538 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:28:01.599221 17538 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:28:01.599231 17538 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:28:01.599277 17538 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:01.599289 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.599297 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.599301 17538 net.cpp:165] Memory required for data: 1121793500\nI0817 16:28:01.599306 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:28:01.599318 17538 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:28:01.599324 17538 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:28:01.599336 17538 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:28:01.599800 17538 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:28:01.599815 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.599820 17538 net.cpp:165] Memory required for data: 1125889500\nI0817 16:28:01.599829 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:28:01.599841 17538 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:28:01.599855 17538 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:28:01.599864 17538 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:28:01.600124 17538 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:28:01.600138 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.600143 17538 net.cpp:165] Memory required for data: 1129985500\nI0817 16:28:01.600153 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:28:01.600165 17538 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:28:01.600172 17538 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:28:01.600179 17538 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.600234 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:28:01.600386 17538 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:28:01.600399 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.600404 17538 net.cpp:165] Memory required for data: 1134081500\nI0817 16:28:01.600414 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:28:01.600426 17538 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:28:01.600432 17538 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:28:01.600442 17538 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.600452 17538 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:28:01.600459 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.600464 17538 net.cpp:165] Memory required for data: 1138177500\nI0817 16:28:01.600469 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:28:01.600481 17538 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:28:01.600486 17538 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:28:01.600497 17538 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:28:01.600960 17538 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:28:01.600975 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.600980 17538 net.cpp:165] Memory required for data: 1142273500\nI0817 16:28:01.600988 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:28:01.600997 17538 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:28:01.601003 17538 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:28:01.601014 17538 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:28:01.601270 17538 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:28:01.601284 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.601290 17538 net.cpp:165] Memory required for data: 1146369500\nI0817 16:28:01.601300 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:28:01.601313 17538 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:28:01.601320 17538 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:28:01.601327 17538 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:28:01.601382 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:28:01.601532 17538 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:28:01.601546 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.601552 17538 net.cpp:165] Memory required for data: 1150465500\nI0817 16:28:01.601560 17538 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:28:01.601572 17538 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:28:01.601578 17538 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:28:01.601585 17538 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:28:01.601593 17538 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:28:01.601622 17538 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:28:01.601632 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.601637 17538 net.cpp:165] Memory required for data: 1154561500\nI0817 16:28:01.601642 17538 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:28:01.601650 17538 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:28:01.601656 17538 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:28:01.601673 17538 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:28:01.601683 17538 net.cpp:150] Setting up L2_b7_relu\nI0817 16:28:01.601691 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.601696 17538 net.cpp:165] Memory required for data: 1158657500\nI0817 16:28:01.601701 17538 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:01.601708 17538 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:01.601713 17538 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:28:01.601721 17538 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:28:01.601729 17538 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:28:01.601778 17538 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:01.601789 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.601796 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.601801 17538 net.cpp:165] Memory required for data: 1166849500\nI0817 16:28:01.601806 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:28:01.601817 17538 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:28:01.601824 17538 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:28:01.601836 17538 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:28:01.602309 17538 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:28:01.602324 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.602329 17538 net.cpp:165] Memory required for data: 1170945500\nI0817 16:28:01.602339 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:28:01.602349 17538 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:28:01.602355 17538 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:28:01.602368 17538 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:28:01.602620 17538 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:28:01.602634 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.602639 17538 net.cpp:165] Memory required for data: 1175041500\nI0817 16:28:01.602650 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:28:01.602661 17538 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:28:01.602669 17538 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:28:01.602675 17538 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.602730 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:28:01.602882 17538 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:28:01.602895 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.602900 17538 net.cpp:165] Memory required for data: 1179137500\nI0817 16:28:01.602910 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:28:01.602921 17538 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:28:01.602927 17538 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:28:01.602934 17538 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.602944 17538 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:28:01.602952 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.602957 17538 net.cpp:165] Memory required for data: 1183233500\nI0817 16:28:01.602960 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:28:01.602974 17538 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:28:01.602980 17538 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:28:01.602991 17538 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:28:01.603461 17538 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:28:01.603476 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.603480 17538 net.cpp:165] Memory required for data: 1187329500\nI0817 16:28:01.603489 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:28:01.603498 17538 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:28:01.603513 17538 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:28:01.603528 17538 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:28:01.603782 17538 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:28:01.603796 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.603801 17538 net.cpp:165] Memory required for data: 1191425500\nI0817 16:28:01.603812 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:28:01.603823 17538 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:28:01.603830 17538 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:28:01.603837 17538 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:28:01.603895 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:28:01.604046 17538 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:28:01.604059 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.604064 17538 net.cpp:165] Memory required for data: 1195521500\nI0817 16:28:01.604074 17538 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:28:01.604085 17538 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:28:01.604092 17538 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:28:01.604099 17538 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:28:01.604115 17538 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:28:01.604146 17538 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:28:01.604156 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.604161 17538 net.cpp:165] Memory required for data: 1199617500\nI0817 16:28:01.604166 17538 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:28:01.604173 17538 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:28:01.604179 17538 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:28:01.604187 17538 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:28:01.604198 17538 net.cpp:150] Setting up L2_b8_relu\nI0817 16:28:01.604205 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.604210 17538 net.cpp:165] Memory required for data: 1203713500\nI0817 16:28:01.604215 17538 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:01.604223 17538 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:01.604228 17538 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:28:01.604235 17538 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:28:01.604257 17538 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:28:01.604308 17538 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:01.604323 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.604331 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.604336 17538 net.cpp:165] Memory required for data: 1211905500\nI0817 16:28:01.604341 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:28:01.604353 17538 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:28:01.604359 17538 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:28:01.604372 17538 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:28:01.604838 17538 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:28:01.604852 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.604857 17538 net.cpp:165] Memory required for data: 1216001500\nI0817 16:28:01.604867 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:28:01.604876 17538 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:28:01.604882 17538 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:28:01.604893 17538 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:28:01.605151 17538 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:28:01.605165 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.605177 17538 net.cpp:165] Memory required for data: 1220097500\nI0817 16:28:01.605188 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:28:01.605201 17538 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:28:01.605207 17538 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:28:01.605214 17538 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.605270 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:28:01.605422 17538 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:28:01.605435 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.605442 17538 net.cpp:165] Memory required for data: 1224193500\nI0817 16:28:01.605450 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:28:01.605458 17538 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:28:01.605465 17538 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:28:01.605476 17538 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.605487 17538 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:28:01.605494 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.605499 17538 net.cpp:165] Memory required for data: 1228289500\nI0817 16:28:01.605504 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:28:01.605518 17538 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:28:01.605525 17538 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:28:01.605535 17538 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:28:01.605998 17538 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:28:01.606011 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.606016 17538 net.cpp:165] Memory required for data: 1232385500\nI0817 16:28:01.606025 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:28:01.606034 17538 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:28:01.606040 17538 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:28:01.606048 17538 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:28:01.606313 17538 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:28:01.606328 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.606333 17538 net.cpp:165] Memory required for data: 1236481500\nI0817 16:28:01.606377 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:28:01.606392 17538 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:28:01.606400 17538 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:28:01.606407 17538 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:28:01.606462 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:28:01.606616 17538 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:28:01.606629 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.606634 17538 net.cpp:165] Memory required for data: 1240577500\nI0817 16:28:01.606644 17538 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:28:01.606653 17538 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:28:01.606659 17538 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:28:01.606667 17538 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:28:01.606678 17538 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:28:01.606705 17538 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:28:01.606714 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.606719 17538 net.cpp:165] Memory required for data: 1244673500\nI0817 16:28:01.606725 17538 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:28:01.606735 17538 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:28:01.606742 17538 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:28:01.606750 17538 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:28:01.606758 17538 net.cpp:150] Setting up L2_b9_relu\nI0817 16:28:01.606765 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.606770 17538 net.cpp:165] Memory required for data: 1248769500\nI0817 16:28:01.606782 17538 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:01.606793 17538 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:01.606799 17538 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:28:01.606806 17538 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:28:01.606817 17538 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:28:01.606865 17538 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:01.606878 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.606884 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.606889 17538 net.cpp:165] Memory required for data: 1256961500\nI0817 16:28:01.606894 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:28:01.606909 17538 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:28:01.606915 17538 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:28:01.606925 17538 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:28:01.607411 17538 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:28:01.607426 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.607432 17538 net.cpp:165] Memory required for data: 1257985500\nI0817 16:28:01.607441 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:28:01.607453 17538 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:28:01.607460 17538 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:28:01.607470 17538 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:28:01.607733 17538 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:28:01.607745 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.607751 17538 net.cpp:165] Memory required for data: 1259009500\nI0817 16:28:01.607761 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:28:01.607770 17538 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:28:01.607776 17538 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:28:01.607787 17538 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.607843 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:28:01.607997 17538 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:28:01.608011 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.608016 17538 net.cpp:165] Memory required for data: 1260033500\nI0817 16:28:01.608024 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:28:01.608033 17538 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:28:01.608039 17538 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:28:01.608049 17538 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.608059 17538 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:28:01.608067 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.608072 17538 net.cpp:165] Memory required for data: 1261057500\nI0817 16:28:01.608078 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:28:01.608091 17538 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:28:01.608098 17538 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:28:01.608114 17538 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:28:01.608613 17538 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:28:01.608628 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.608633 17538 net.cpp:165] Memory required for data: 1262081500\nI0817 16:28:01.608642 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:28:01.608654 17538 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:28:01.608661 17538 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:28:01.608669 17538 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:28:01.608932 17538 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:28:01.608945 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.608960 17538 net.cpp:165] Memory required for data: 1263105500\nI0817 16:28:01.608971 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:28:01.608983 17538 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:28:01.608989 17538 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:28:01.608997 17538 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:28:01.609056 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:28:01.609226 17538 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:28:01.609241 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.609246 17538 net.cpp:165] Memory required for data: 1264129500\nI0817 16:28:01.609254 17538 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:28:01.609263 17538 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:28:01.609271 17538 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:28:01.609282 17538 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:28:01.609316 17538 net.cpp:150] Setting up L3_b1_pool\nI0817 16:28:01.609326 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.609331 17538 net.cpp:165] Memory required for data: 1265153500\nI0817 16:28:01.609336 17538 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:28:01.609349 17538 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:28:01.609354 17538 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:28:01.609361 17538 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:28:01.609369 17538 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:28:01.609400 17538 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:28:01.609411 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.609416 17538 net.cpp:165] Memory required for data: 1266177500\nI0817 16:28:01.609421 17538 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:28:01.609431 17538 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:28:01.609436 17538 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:28:01.609443 17538 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:28:01.609453 17538 net.cpp:150] Setting up L3_b1_relu\nI0817 16:28:01.609460 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.609465 17538 net.cpp:165] Memory required for data: 1267201500\nI0817 16:28:01.609470 17538 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:28:01.609479 17538 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:28:01.609486 17538 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:28:01.610811 17538 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:28:01.610832 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.610838 17538 net.cpp:165] Memory required for data: 1268225500\nI0817 16:28:01.610844 17538 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:28:01.610854 17538 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:28:01.610860 17538 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:28:01.610868 17538 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:28:01.610877 17538 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:28:01.610920 17538 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:28:01.610932 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.610937 17538 net.cpp:165] Memory required for data: 1270273500\nI0817 16:28:01.610944 17538 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:01.610954 17538 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:01.610960 17538 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:28:01.610968 17538 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:28:01.610978 17538 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:28:01.611029 17538 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:01.611042 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.611048 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.611060 17538 net.cpp:165] Memory required for data: 1274369500\nI0817 16:28:01.611066 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:28:01.611078 17538 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:28:01.611085 17538 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:28:01.611098 17538 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:28:01.613098 17538 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:28:01.613121 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.613126 17538 net.cpp:165] Memory required for data: 1276417500\nI0817 16:28:01.613137 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:28:01.613150 17538 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:28:01.613157 17538 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:28:01.613168 17538 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:28:01.613433 17538 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:28:01.613447 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.613453 17538 net.cpp:165] Memory required for data: 1278465500\nI0817 16:28:01.613463 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:28:01.613472 17538 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:28:01.613479 17538 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:28:01.613490 17538 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.613548 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:28:01.613704 17538 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:28:01.613718 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.613723 17538 net.cpp:165] Memory required for data: 1280513500\nI0817 16:28:01.613732 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:28:01.613740 17538 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:28:01.613747 17538 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:28:01.613757 17538 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.613767 17538 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:28:01.613775 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.613780 17538 net.cpp:165] Memory required for data: 1282561500\nI0817 16:28:01.613785 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:28:01.613800 17538 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:28:01.613806 17538 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:28:01.613816 17538 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:28:01.614847 17538 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:28:01.614862 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.614867 17538 net.cpp:165] Memory required for data: 1284609500\nI0817 16:28:01.614877 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:28:01.614887 17538 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:28:01.614893 17538 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:28:01.614905 17538 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:28:01.615182 17538 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:28:01.615198 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.615205 17538 net.cpp:165] Memory required for data: 1286657500\nI0817 16:28:01.615216 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:28:01.615224 17538 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:28:01.615231 17538 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:28:01.615238 17538 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:28:01.615295 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:28:01.615450 17538 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:28:01.615463 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.615468 17538 net.cpp:165] Memory required for data: 1288705500\nI0817 16:28:01.615478 17538 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:28:01.615495 17538 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:28:01.615505 17538 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:28:01.615512 17538 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:28:01.615520 17538 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:28:01.615557 17538 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:28:01.615567 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.615572 17538 net.cpp:165] Memory required for data: 1290753500\nI0817 16:28:01.615577 17538 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:28:01.615586 17538 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:28:01.615592 17538 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:28:01.615598 17538 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:28:01.615607 17538 net.cpp:150] Setting up L3_b2_relu\nI0817 16:28:01.615614 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.615619 17538 net.cpp:165] Memory required for data: 1292801500\nI0817 16:28:01.615624 17538 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:01.615631 17538 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:01.615638 17538 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:28:01.615644 17538 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:28:01.615654 17538 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:28:01.615702 17538 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:01.615715 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.615721 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.615725 17538 net.cpp:165] Memory required for data: 1296897500\nI0817 16:28:01.615731 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:28:01.615747 17538 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:28:01.615754 17538 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:28:01.615763 17538 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:28:01.616786 17538 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:28:01.616802 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.616807 17538 net.cpp:165] Memory required for data: 1298945500\nI0817 16:28:01.616816 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:28:01.616825 17538 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:28:01.616832 17538 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:28:01.616843 17538 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:28:01.617106 17538 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:28:01.617120 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.617125 17538 net.cpp:165] Memory required for data: 1300993500\nI0817 16:28:01.617136 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:28:01.617144 17538 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:28:01.617151 17538 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:28:01.617161 17538 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.617223 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:28:01.617379 17538 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:28:01.617393 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.617398 17538 net.cpp:165] Memory required for data: 1303041500\nI0817 16:28:01.617406 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:28:01.617419 17538 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:28:01.617425 17538 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:28:01.617432 17538 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.617442 17538 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:28:01.617449 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.617460 17538 net.cpp:165] Memory required for data: 1305089500\nI0817 16:28:01.617466 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:28:01.617480 17538 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:28:01.617486 17538 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:28:01.617497 17538 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:28:01.618533 17538 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:28:01.618551 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.618556 17538 net.cpp:165] Memory required for data: 1307137500\nI0817 16:28:01.618564 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:28:01.618573 17538 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:28:01.618580 17538 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:28:01.618592 17538 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:28:01.618860 17538 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:28:01.618875 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.618880 17538 net.cpp:165] Memory required for data: 1309185500\nI0817 16:28:01.618891 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:28:01.618901 17538 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:28:01.618907 17538 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:28:01.618914 17538 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:28:01.618970 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:28:01.619133 17538 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:28:01.619148 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.619153 17538 net.cpp:165] Memory required for data: 1311233500\nI0817 16:28:01.619163 17538 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:28:01.619175 17538 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:28:01.619182 17538 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:28:01.619189 17538 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:28:01.619197 17538 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:28:01.619233 17538 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:28:01.619246 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.619251 17538 net.cpp:165] Memory required for data: 1313281500\nI0817 16:28:01.619256 17538 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:28:01.619263 17538 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:28:01.619269 17538 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:28:01.619277 17538 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:28:01.619287 17538 net.cpp:150] Setting up L3_b3_relu\nI0817 16:28:01.619293 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.619298 17538 net.cpp:165] Memory required for data: 1315329500\nI0817 16:28:01.619302 17538 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:01.619310 17538 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:01.619315 17538 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:28:01.619328 17538 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:28:01.619338 17538 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:28:01.619385 17538 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:01.619396 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.619403 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.619408 17538 net.cpp:165] Memory required for data: 1319425500\nI0817 16:28:01.619413 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:28:01.619427 17538 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:28:01.619434 17538 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:28:01.619451 17538 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:28:01.620472 17538 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:28:01.620488 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.620493 17538 net.cpp:165] Memory required for data: 1321473500\nI0817 16:28:01.620502 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:28:01.620515 17538 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:28:01.620522 17538 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:28:01.620530 17538 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:28:01.620797 17538 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:28:01.620811 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.620816 17538 net.cpp:165] Memory required for data: 1323521500\nI0817 16:28:01.620826 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:28:01.620838 17538 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:28:01.620846 17538 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:28:01.620853 17538 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.620913 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:28:01.621068 17538 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:28:01.621081 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.621086 17538 net.cpp:165] Memory required for data: 1325569500\nI0817 16:28:01.621096 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:28:01.621112 17538 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:28:01.621120 17538 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:28:01.621127 17538 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.621137 17538 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:28:01.621145 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.621150 17538 net.cpp:165] Memory required for data: 1327617500\nI0817 16:28:01.621155 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:28:01.621170 17538 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:28:01.621176 17538 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:28:01.621187 17538 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:28:01.622236 17538 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:28:01.622251 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.622256 17538 net.cpp:165] Memory required for data: 1329665500\nI0817 16:28:01.622265 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:28:01.622275 17538 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:28:01.622282 17538 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:28:01.622292 17538 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:28:01.622565 17538 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:28:01.622582 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.622587 17538 net.cpp:165] Memory required for data: 1331713500\nI0817 16:28:01.622598 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:28:01.622607 17538 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:28:01.622613 17538 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:28:01.622622 17538 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:28:01.622678 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:28:01.622841 17538 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:28:01.622854 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.622859 17538 net.cpp:165] Memory required for data: 1333761500\nI0817 16:28:01.622869 17538 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:28:01.622880 17538 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:28:01.622887 17538 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:28:01.622895 17538 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:28:01.622902 17538 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:28:01.622939 17538 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:28:01.622956 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.622961 17538 net.cpp:165] Memory required for data: 1335809500\nI0817 16:28:01.622967 17538 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:28:01.622974 17538 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:28:01.622980 17538 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:28:01.622987 17538 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:28:01.622997 17538 net.cpp:150] Setting up L3_b4_relu\nI0817 16:28:01.623004 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.623008 17538 net.cpp:165] Memory required for data: 1337857500\nI0817 16:28:01.623013 17538 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:01.623020 17538 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:01.623026 17538 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:28:01.623036 17538 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:28:01.623046 17538 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:28:01.623092 17538 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:01.623111 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.623117 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.623122 17538 net.cpp:165] Memory required for data: 1341953500\nI0817 16:28:01.623128 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:28:01.623150 17538 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:28:01.623157 17538 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:28:01.623167 17538 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:28:01.624197 17538 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:28:01.624212 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.624217 17538 net.cpp:165] Memory required for data: 1344001500\nI0817 16:28:01.624227 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:28:01.624238 17538 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:28:01.624245 17538 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:28:01.624253 17538 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:28:01.625502 17538 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:28:01.625522 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.625529 17538 net.cpp:165] Memory required for data: 1346049500\nI0817 16:28:01.625541 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:28:01.625553 17538 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:28:01.625561 17538 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:28:01.625569 17538 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.625629 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:28:01.625787 17538 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:28:01.625800 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.625805 17538 net.cpp:165] Memory required for data: 1348097500\nI0817 16:28:01.625815 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:28:01.625826 17538 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:28:01.625833 17538 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:28:01.625839 17538 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.625849 17538 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:28:01.625856 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.625861 17538 net.cpp:165] Memory required for data: 1350145500\nI0817 16:28:01.625866 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:28:01.625880 17538 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:28:01.625887 17538 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:28:01.625896 17538 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:28:01.627905 17538 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:28:01.627925 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.627931 17538 net.cpp:165] Memory required for data: 1352193500\nI0817 16:28:01.627940 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:28:01.627950 17538 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:28:01.627957 17538 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:28:01.627972 17538 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:28:01.628239 17538 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:28:01.628257 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.628262 17538 net.cpp:165] Memory required for data: 1354241500\nI0817 16:28:01.628273 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:28:01.628283 17538 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:28:01.628289 17538 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:28:01.628298 17538 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:28:01.628355 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:28:01.628504 17538 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:28:01.628517 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.628523 17538 net.cpp:165] Memory required for data: 1356289500\nI0817 16:28:01.628532 17538 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:28:01.628545 17538 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:28:01.628551 17538 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:28:01.628558 17538 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:28:01.628566 17538 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:28:01.628602 17538 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:28:01.628613 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.628618 17538 net.cpp:165] Memory required for data: 1358337500\nI0817 16:28:01.628623 17538 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:28:01.628631 17538 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:28:01.628636 17538 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:28:01.628644 17538 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:28:01.628654 17538 net.cpp:150] Setting up L3_b5_relu\nI0817 16:28:01.628660 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.628665 17538 net.cpp:165] Memory required for data: 1360385500\nI0817 16:28:01.628670 17538 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:01.628677 17538 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:01.628682 17538 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:28:01.628693 17538 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:28:01.628703 17538 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:28:01.628748 17538 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:01.628759 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.628767 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.628772 17538 net.cpp:165] Memory required for data: 1364481500\nI0817 16:28:01.628777 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:28:01.628792 17538 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:28:01.628798 17538 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:28:01.628808 17538 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:28:01.629825 17538 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:28:01.629842 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.629847 17538 net.cpp:165] Memory required for data: 1366529500\nI0817 16:28:01.629856 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:28:01.629876 17538 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:28:01.629884 17538 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:28:01.629892 17538 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:28:01.630156 17538 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:28:01.630170 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.630175 17538 net.cpp:165] Memory required for data: 1368577500\nI0817 16:28:01.630187 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:28:01.630198 17538 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:28:01.630204 17538 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:28:01.630213 17538 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.630273 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:28:01.630429 17538 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:28:01.630442 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.630448 17538 net.cpp:165] Memory required for data: 1370625500\nI0817 16:28:01.630457 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:28:01.630468 17538 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:28:01.630475 17538 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:28:01.630482 17538 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.630493 17538 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:28:01.630501 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.630506 17538 net.cpp:165] Memory required for data: 1372673500\nI0817 16:28:01.630511 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:28:01.630524 17538 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:28:01.630532 17538 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:28:01.630542 17538 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:28:01.631549 17538 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:28:01.631564 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.631569 17538 net.cpp:165] Memory required for data: 1374721500\nI0817 16:28:01.631578 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:28:01.631588 17538 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:28:01.631594 17538 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:28:01.631605 17538 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:28:01.631866 17538 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:28:01.631882 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.631887 17538 net.cpp:165] Memory required for data: 1376769500\nI0817 16:28:01.631898 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:28:01.631907 17538 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:28:01.631913 17538 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:28:01.631922 17538 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:28:01.631978 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:28:01.632138 17538 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:28:01.632153 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.632158 17538 net.cpp:165] Memory required for data: 1378817500\nI0817 16:28:01.632166 17538 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:28:01.632179 17538 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:28:01.632185 17538 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:28:01.632192 17538 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:28:01.632200 17538 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:28:01.632239 17538 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:28:01.632252 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.632256 17538 net.cpp:165] Memory required for data: 1380865500\nI0817 16:28:01.632262 17538 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:28:01.632269 17538 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:28:01.632275 17538 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:28:01.632289 17538 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:28:01.632300 17538 net.cpp:150] Setting up L3_b6_relu\nI0817 16:28:01.632308 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.632313 17538 net.cpp:165] Memory required for data: 1382913500\nI0817 16:28:01.632318 17538 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:01.632324 17538 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:01.632330 17538 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:28:01.632340 17538 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:28:01.632350 17538 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:28:01.632396 17538 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:01.632407 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.632414 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.632419 17538 net.cpp:165] Memory required for data: 1387009500\nI0817 16:28:01.632424 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:28:01.632439 17538 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:28:01.632447 17538 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:28:01.632455 17538 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:28:01.633476 17538 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:28:01.633491 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.633496 17538 net.cpp:165] Memory required for data: 1389057500\nI0817 16:28:01.633505 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:28:01.633517 17538 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:28:01.633524 17538 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:28:01.633533 17538 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:28:01.633792 17538 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:28:01.633806 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.633811 17538 net.cpp:165] Memory required for data: 1391105500\nI0817 16:28:01.633821 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:28:01.633833 17538 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:28:01.633841 17538 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:28:01.633847 17538 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.633906 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:28:01.634060 17538 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:28:01.634073 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.634078 17538 net.cpp:165] Memory required for data: 1393153500\nI0817 16:28:01.634088 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:28:01.634126 17538 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:28:01.634137 17538 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:28:01.634145 17538 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.634155 17538 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:28:01.634163 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.634168 17538 net.cpp:165] Memory required for data: 1395201500\nI0817 16:28:01.634174 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:28:01.634186 17538 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:28:01.634191 17538 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:28:01.634201 17538 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:28:01.635227 17538 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:28:01.635242 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.635248 17538 net.cpp:165] Memory required for data: 1397249500\nI0817 16:28:01.635257 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:28:01.635269 17538 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:28:01.635283 17538 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:28:01.635293 17538 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:28:01.635553 17538 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:28:01.635566 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.635571 17538 net.cpp:165] Memory required for data: 1399297500\nI0817 16:28:01.635582 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:28:01.635594 17538 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:28:01.635601 17538 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:28:01.635608 17538 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:28:01.635668 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:28:01.635823 17538 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:28:01.635836 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.635841 17538 net.cpp:165] Memory required for data: 1401345500\nI0817 16:28:01.635851 17538 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:28:01.635864 17538 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:28:01.635871 17538 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:28:01.635879 17538 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:28:01.635886 17538 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:28:01.635922 17538 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:28:01.635934 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.635939 17538 net.cpp:165] Memory required for data: 1403393500\nI0817 16:28:01.635944 17538 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:28:01.635952 17538 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:28:01.635958 17538 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:28:01.635968 17538 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:28:01.635978 17538 net.cpp:150] Setting up L3_b7_relu\nI0817 16:28:01.635985 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.635990 17538 net.cpp:165] Memory required for data: 1405441500\nI0817 16:28:01.635995 17538 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:01.636003 17538 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:01.636008 17538 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:28:01.636015 17538 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:28:01.636024 17538 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:28:01.636073 17538 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:01.636085 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.636092 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.636097 17538 net.cpp:165] Memory required for data: 1409537500\nI0817 16:28:01.636107 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:28:01.636123 17538 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:28:01.636131 17538 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:28:01.636139 17538 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:28:01.637152 17538 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:28:01.637167 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.637172 17538 net.cpp:165] Memory required for data: 1411585500\nI0817 16:28:01.637181 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:28:01.637193 17538 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:28:01.637200 17538 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:28:01.637208 17538 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:28:01.637477 17538 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:28:01.637490 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.637496 17538 net.cpp:165] Memory required for data: 1413633500\nI0817 16:28:01.637513 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:28:01.637523 17538 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:28:01.637529 17538 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:28:01.637537 17538 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.637598 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:28:01.637753 17538 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:28:01.637766 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.637771 17538 net.cpp:165] Memory required for data: 1415681500\nI0817 16:28:01.637781 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:28:01.637789 17538 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:28:01.637795 17538 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:28:01.637802 17538 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.637815 17538 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:28:01.637823 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.637827 17538 net.cpp:165] Memory required for data: 1417729500\nI0817 16:28:01.637832 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:28:01.637845 17538 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:28:01.637850 17538 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:28:01.637861 17538 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:28:01.638880 17538 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:28:01.638895 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.638900 17538 net.cpp:165] Memory required for data: 1419777500\nI0817 16:28:01.638909 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:28:01.638921 17538 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:28:01.638928 17538 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:28:01.638937 17538 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:28:01.639204 17538 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:28:01.639219 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.639223 17538 net.cpp:165] Memory required for data: 1421825500\nI0817 16:28:01.639235 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:28:01.639246 17538 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:28:01.639252 17538 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:28:01.639261 17538 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:28:01.639319 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:28:01.639477 17538 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:28:01.639492 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.639497 17538 net.cpp:165] Memory required for data: 1423873500\nI0817 16:28:01.639505 17538 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:28:01.639518 17538 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:28:01.639524 17538 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:28:01.639531 17538 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:28:01.639541 17538 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:28:01.639575 17538 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:28:01.639586 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.639591 17538 net.cpp:165] Memory required for data: 1425921500\nI0817 16:28:01.639596 17538 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:28:01.639607 17538 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:28:01.639613 17538 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:28:01.639621 17538 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:28:01.639631 17538 net.cpp:150] Setting up L3_b8_relu\nI0817 16:28:01.639637 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.639642 17538 net.cpp:165] Memory required for data: 1427969500\nI0817 16:28:01.639647 17538 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:01.639662 17538 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:01.639667 17538 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:28:01.639675 17538 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:28:01.639685 17538 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:28:01.639734 17538 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:01.639746 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.639753 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.639758 17538 net.cpp:165] Memory required for data: 1432065500\nI0817 16:28:01.639763 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:28:01.639778 17538 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:28:01.639786 17538 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:28:01.639794 17538 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:28:01.641892 17538 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:28:01.641911 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.641918 17538 net.cpp:165] Memory required for data: 1434113500\nI0817 16:28:01.641928 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:28:01.641942 17538 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:28:01.641949 17538 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:28:01.641958 17538 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:28:01.642254 17538 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:28:01.642271 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.642276 17538 net.cpp:165] Memory required for data: 1436161500\nI0817 16:28:01.642287 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:28:01.642298 17538 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:28:01.642305 17538 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:28:01.642316 17538 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.642383 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:28:01.642575 17538 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:28:01.642590 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.642596 17538 net.cpp:165] Memory required for data: 1438209500\nI0817 16:28:01.642606 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:28:01.642628 17538 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:28:01.642638 17538 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:28:01.642650 17538 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.642662 17538 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:28:01.642669 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.642674 17538 net.cpp:165] Memory required for data: 1440257500\nI0817 16:28:01.642679 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:28:01.642693 17538 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:28:01.642704 17538 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:28:01.642712 17538 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:28:01.643841 17538 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:28:01.643857 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.643877 17538 net.cpp:165] Memory required for data: 1442305500\nI0817 16:28:01.643888 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:28:01.643901 17538 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:28:01.643909 17538 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:28:01.643918 17538 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:28:01.644245 17538 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:28:01.644260 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.644265 17538 net.cpp:165] Memory required for data: 1444353500\nI0817 16:28:01.644292 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:28:01.644304 17538 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:28:01.644309 17538 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:28:01.644317 17538 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:28:01.644379 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:28:01.644533 17538 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:28:01.644547 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.644552 17538 net.cpp:165] Memory required for data: 1446401500\nI0817 16:28:01.644562 17538 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:28:01.644572 17538 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:28:01.644577 17538 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:28:01.644584 17538 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:28:01.644595 17538 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:28:01.644629 17538 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:28:01.644644 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.644649 17538 net.cpp:165] Memory required for data: 1448449500\nI0817 16:28:01.644654 17538 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:28:01.644664 17538 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:28:01.644668 17538 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:28:01.644676 17538 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:28:01.644685 17538 net.cpp:150] Setting up L3_b9_relu\nI0817 16:28:01.644692 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.644697 17538 net.cpp:165] Memory required for data: 1450497500\nI0817 16:28:01.644702 17538 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:28:01.644713 17538 net.cpp:100] Creating Layer post_pool\nI0817 16:28:01.644719 17538 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:28:01.644727 17538 net.cpp:408] post_pool -> post_pool\nI0817 16:28:01.644762 17538 net.cpp:150] Setting up post_pool\nI0817 16:28:01.644770 17538 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:28:01.644775 17538 net.cpp:165] Memory required for data: 1450529500\nI0817 16:28:01.644780 17538 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:28:01.644865 17538 net.cpp:100] Creating Layer post_FC\nI0817 16:28:01.644879 17538 net.cpp:434] post_FC <- post_pool\nI0817 16:28:01.644894 17538 net.cpp:408] post_FC -> post_FC_top\nI0817 16:28:01.645148 17538 net.cpp:150] Setting up post_FC\nI0817 16:28:01.645164 17538 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:01.645170 17538 net.cpp:165] Memory required for data: 1450534500\nI0817 16:28:01.645179 17538 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:28:01.645189 17538 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:28:01.645195 17538 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:28:01.645205 17538 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:28:01.645216 17538 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:28:01.645267 17538 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:28:01.645278 17538 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:01.645285 17538 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:01.645290 17538 net.cpp:165] Memory required for data: 1450544500\nI0817 16:28:01.645295 17538 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:28:01.645339 17538 net.cpp:100] Creating Layer accuracy\nI0817 16:28:01.645349 17538 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:28:01.645357 17538 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:28:01.645365 17538 net.cpp:408] accuracy -> accuracy\nI0817 16:28:01.645409 17538 net.cpp:150] Setting up accuracy\nI0817 16:28:01.645422 17538 net.cpp:157] Top shape: (1)\nI0817 16:28:01.645427 17538 net.cpp:165] Memory required for data: 1450544504\nI0817 16:28:01.645440 17538 layer_factory.hpp:77] Creating layer loss\nI0817 16:28:01.645452 17538 net.cpp:100] Creating Layer loss\nI0817 16:28:01.645459 17538 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:28:01.645467 17538 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:28:01.645474 17538 net.cpp:408] loss -> loss\nI0817 16:28:01.645527 17538 layer_factory.hpp:77] Creating layer loss\nI0817 16:28:01.645686 17538 net.cpp:150] Setting up loss\nI0817 16:28:01.645701 17538 net.cpp:157] Top shape: (1)\nI0817 16:28:01.645706 17538 net.cpp:160]     with loss weight 1\nI0817 16:28:01.645782 17538 net.cpp:165] Memory required for data: 1450544508\nI0817 16:28:01.645792 17538 net.cpp:226] loss needs backward computation.\nI0817 16:28:01.645797 17538 net.cpp:228] accuracy does not need backward computation.\nI0817 16:28:01.645804 17538 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:28:01.645809 17538 net.cpp:226] post_FC needs backward computation.\nI0817 16:28:01.645814 17538 net.cpp:226] post_pool needs backward computation.\nI0817 16:28:01.645819 17538 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:28:01.645824 17538 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:28:01.645830 17538 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:28:01.645835 17538 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:28:01.645840 17538 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:28:01.645845 17538 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:28:01.645850 17538 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:28:01.645855 17538 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:28:01.645860 17538 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:28:01.645865 17538 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:28:01.645871 17538 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:28:01.645876 17538 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:28:01.645881 17538 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:28:01.645886 17538 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:28:01.645891 17538 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:28:01.645897 17538 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:28:01.645901 17538 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:28:01.645906 17538 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:28:01.645912 17538 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:28:01.645917 17538 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:28:01.645922 17538 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:28:01.645927 17538 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:28:01.645933 17538 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:28:01.645938 17538 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:28:01.645943 17538 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:28:01.645948 17538 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:28:01.645953 17538 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:28:01.645957 17538 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:28:01.645963 17538 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:28:01.645968 17538 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:28:01.645973 17538 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:28:01.645978 17538 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:28:01.645984 17538 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:28:01.645989 17538 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:28:01.645994 17538 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:28:01.646010 17538 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:28:01.646016 17538 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:28:01.646021 17538 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:28:01.646026 17538 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:28:01.646033 17538 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:28:01.646037 17538 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:28:01.646042 17538 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:28:01.646049 17538 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:28:01.646054 17538 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:28:01.646059 17538 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:28:01.646064 17538 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:28:01.646069 17538 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:28:01.646073 17538 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:28:01.646080 17538 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:28:01.646085 17538 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:28:01.646090 17538 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:28:01.646095 17538 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:28:01.646100 17538 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:28:01.646126 17538 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:28:01.646132 17538 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:28:01.646137 17538 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:28:01.646142 17538 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:28:01.646147 17538 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:28:01.646153 17538 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:28:01.646158 17538 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:28:01.646163 17538 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:28:01.646168 17538 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:28:01.646174 17538 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:28:01.646180 17538 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:28:01.646185 17538 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:28:01.646190 17538 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:28:01.646195 17538 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:28:01.646200 17538 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:28:01.646205 17538 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:28:01.646210 17538 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:28:01.646216 17538 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:28:01.646221 17538 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:28:01.646227 17538 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:28:01.646232 17538 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:28:01.646237 17538 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:28:01.646242 17538 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:28:01.646248 17538 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:28:01.646253 17538 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:28:01.646258 17538 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:28:01.646263 17538 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:28:01.646270 17538 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:28:01.646275 17538 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:28:01.646286 17538 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:28:01.646291 17538 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:28:01.646298 17538 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:28:01.646303 17538 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:28:01.646308 17538 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:28:01.646314 17538 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:28:01.646319 17538 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:28:01.646324 17538 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:28:01.646329 17538 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:28:01.646334 17538 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:28:01.646340 17538 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:28:01.646345 17538 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:28:01.646350 17538 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:28:01.646356 17538 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:28:01.646361 17538 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:28:01.646368 17538 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:28:01.646373 17538 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:28:01.646378 17538 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:28:01.646383 17538 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:28:01.646389 17538 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:28:01.646396 17538 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:28:01.646404 17538 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:28:01.646409 17538 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:28:01.646414 17538 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:28:01.646420 17538 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:28:01.646425 17538 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:28:01.646430 17538 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:28:01.646436 17538 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:28:01.646441 17538 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:28:01.646446 17538 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:28:01.646452 17538 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:28:01.646457 17538 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:28:01.646463 17538 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:28:01.646469 17538 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:28:01.646474 17538 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:28:01.646479 17538 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:28:01.646484 17538 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:28:01.646491 17538 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:28:01.646495 17538 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:28:01.646500 17538 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:28:01.646507 17538 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:28:01.646512 17538 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:28:01.646517 17538 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:28:01.646522 17538 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:28:01.646528 17538 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:28:01.646533 17538 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:28:01.646538 17538 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:28:01.646543 17538 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:28:01.646553 17538 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:28:01.646559 17538 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:28:01.646564 17538 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:28:01.646570 17538 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:28:01.646575 17538 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:28:01.646584 17538 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:28:01.646589 17538 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:28:01.646595 17538 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:28:01.646600 17538 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:28:01.646605 17538 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:28:01.646611 17538 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:28:01.646616 17538 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:28:01.646622 17538 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:28:01.646627 17538 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:28:01.646633 17538 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:28:01.646639 17538 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:28:01.646644 17538 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:28:01.646649 17538 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:28:01.646656 17538 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:28:01.646661 17538 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:28:01.646667 17538 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:28:01.646672 17538 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:28:01.646677 17538 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:28:01.646683 17538 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:28:01.646688 17538 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:28:01.646694 17538 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:28:01.646700 17538 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:28:01.646705 17538 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:28:01.646711 17538 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:28:01.646716 17538 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:28:01.646723 17538 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:28:01.646728 17538 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:28:01.646733 17538 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:28:01.646739 17538 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:28:01.646744 17538 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:28:01.646750 17538 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:28:01.646756 17538 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:28:01.646761 17538 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:28:01.646767 17538 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:28:01.646772 17538 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:28:01.646777 17538 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:28:01.646783 17538 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:28:01.646790 17538 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:28:01.646795 17538 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:28:01.646801 17538 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:28:01.646806 17538 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:28:01.646811 17538 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:28:01.646823 17538 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:28:01.646829 17538 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:28:01.646834 17538 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:28:01.646841 17538 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:28:01.646847 17538 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:28:01.646852 17538 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:28:01.646857 17538 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:28:01.646863 17538 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:28:01.646869 17538 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:28:01.646874 17538 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:28:01.646880 17538 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:28:01.646886 17538 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:28:01.646893 17538 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:28:01.646898 17538 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:28:01.646903 17538 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:28:01.646908 17538 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:28:01.646914 17538 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:28:01.646919 17538 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:28:01.646924 17538 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:28:01.646930 17538 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:28:01.646935 17538 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:28:01.646941 17538 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:28:01.646947 17538 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:28:01.646953 17538 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:28:01.646958 17538 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:28:01.646965 17538 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:28:01.646970 17538 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:28:01.646975 17538 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:28:01.646981 17538 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:28:01.646987 17538 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:28:01.646993 17538 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:28:01.646999 17538 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:28:01.647004 17538 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:28:01.647011 17538 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:28:01.647017 17538 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:28:01.647022 17538 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:28:01.647027 17538 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:28:01.647032 17538 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:28:01.647038 17538 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:28:01.647044 17538 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:28:01.647050 17538 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:28:01.647056 17538 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:28:01.647061 17538 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:28:01.647068 17538 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:28:01.647076 17538 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:28:01.647083 17538 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:28:01.647089 17538 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:28:01.647099 17538 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:28:01.647114 17538 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:28:01.647119 17538 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:28:01.647125 17538 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:28:01.647131 17538 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:28:01.647137 17538 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:28:01.647143 17538 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:28:01.647150 17538 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:28:01.647155 17538 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:28:01.647161 17538 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:28:01.647166 17538 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:28:01.647171 17538 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:28:01.647177 17538 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:28:01.647183 17538 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:28:01.647189 17538 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:28:01.647195 17538 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:28:01.647202 17538 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:28:01.647207 17538 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:28:01.647212 17538 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:28:01.647217 17538 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:28:01.647223 17538 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:28:01.647229 17538 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:28:01.647235 17538 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:28:01.647241 17538 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:28:01.647248 17538 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:28:01.647253 17538 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:28:01.647264 17538 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:28:01.647269 17538 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:28:01.647274 17538 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:28:01.647280 17538 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:28:01.647286 17538 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:28:01.647292 17538 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:28:01.647298 17538 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:28:01.647303 17538 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:28:01.647310 17538 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:28:01.647316 17538 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:28:01.647321 17538 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:28:01.647327 17538 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:28:01.647332 17538 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:28:01.647338 17538 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:28:01.647344 17538 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:28:01.647351 17538 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:28:01.647356 17538 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:28:01.647361 17538 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:28:01.647368 17538 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:28:01.647373 17538 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:28:01.647379 17538 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:28:01.647390 17538 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:28:01.647397 17538 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:28:01.647403 17538 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:28:01.647408 17538 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:28:01.647414 17538 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:28:01.647420 17538 net.cpp:226] pre_relu needs backward computation.\nI0817 16:28:01.647425 17538 net.cpp:226] pre_scale needs backward computation.\nI0817 16:28:01.647430 17538 net.cpp:226] pre_bn needs backward computation.\nI0817 16:28:01.647436 17538 net.cpp:226] pre_conv needs backward computation.\nI0817 16:28:01.647442 17538 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:28:01.647449 17538 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:28:01.647454 17538 net.cpp:270] This network produces output accuracy\nI0817 16:28:01.647460 17538 net.cpp:270] This network produces output loss\nI0817 16:28:01.647825 17538 net.cpp:283] Network initialization done.\nI0817 16:28:01.657086 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:01.657131 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:01.657181 17538 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:28:01.657557 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:28:01.657575 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:28:01.657585 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:28:01.657595 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:28:01.657605 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:28:01.657613 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:28:01.657624 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:28:01.657632 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:28:01.657641 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:28:01.657650 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:28:01.657660 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:28:01.657668 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:28:01.657678 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:28:01.657686 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:28:01.657696 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:28:01.657704 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:28:01.657713 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:28:01.657722 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:28:01.657732 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:28:01.657749 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:28:01.657759 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:28:01.657768 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:28:01.657780 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:28:01.657790 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:28:01.657799 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:28:01.657807 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:28:01.657816 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:28:01.657825 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:28:01.657833 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:28:01.657842 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:28:01.657851 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:28:01.657860 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:28:01.657869 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:28:01.657877 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:28:01.657886 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:28:01.657894 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:28:01.657903 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:28:01.657912 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:28:01.657922 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:28:01.657929 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:28:01.657941 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:28:01.657950 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:28:01.657958 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:28:01.657968 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:28:01.657976 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:28:01.657984 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:28:01.657994 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:28:01.658001 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:28:01.658011 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:28:01.658027 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:28:01.658037 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:28:01.658046 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:28:01.658054 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:28:01.658063 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:28:01.658072 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:28:01.658080 17538 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:28:01.659765 17538 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0817 16:28:01.661386 17538 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:28:01.661595 17538 net.cpp:100] Creating Layer dataLayer\nI0817 16:28:01.661617 17538 net.cpp:408] dataLayer -> data_top\nI0817 16:28:01.661633 17538 net.cpp:408] dataLayer -> label\nI0817 16:28:01.661644 17538 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:28:01.672415 17545 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:28:01.672669 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:01.679985 17538 net.cpp:150] Setting up dataLayer\nI0817 16:28:01.680021 17538 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:28:01.680032 17538 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:01.680037 17538 net.cpp:165] Memory required for data: 1536500\nI0817 16:28:01.680043 17538 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:28:01.680054 17538 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:28:01.680060 17538 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:28:01.680089 17538 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:28:01.680110 17538 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:28:01.680250 17538 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:28:01.680276 17538 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:01.680285 17538 net.cpp:157] Top shape: 125 (125)\nI0817 16:28:01.680290 17538 net.cpp:165] Memory required for data: 1537500\nI0817 16:28:01.680296 17538 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:28:01.680312 17538 net.cpp:100] Creating Layer pre_conv\nI0817 16:28:01.680320 17538 net.cpp:434] pre_conv <- data_top\nI0817 16:28:01.680331 17538 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:28:01.680754 17538 net.cpp:150] Setting up pre_conv\nI0817 16:28:01.680783 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.680789 17538 net.cpp:165] Memory required for data: 9729500\nI0817 16:28:01.680805 17538 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:28:01.680819 17538 net.cpp:100] Creating Layer pre_bn\nI0817 16:28:01.680826 17538 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:28:01.680835 17538 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:28:01.681186 17538 net.cpp:150] Setting up pre_bn\nI0817 16:28:01.681205 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.681210 17538 net.cpp:165] Memory required for data: 17921500\nI0817 16:28:01.681227 17538 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:28:01.681237 17538 net.cpp:100] Creating Layer pre_scale\nI0817 16:28:01.681243 17538 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:28:01.681254 17538 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:28:01.681318 17538 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:28:01.681502 17538 net.cpp:150] Setting up pre_scale\nI0817 16:28:01.681520 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.681526 17538 net.cpp:165] Memory required for data: 26113500\nI0817 16:28:01.681536 17538 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:28:01.681545 17538 net.cpp:100] Creating Layer pre_relu\nI0817 16:28:01.681550 17538 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:28:01.681589 17538 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:28:01.681602 17538 net.cpp:150] Setting up pre_relu\nI0817 16:28:01.681609 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.681614 17538 net.cpp:165] Memory required for data: 34305500\nI0817 16:28:01.681622 17538 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:28:01.681630 17538 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:28:01.681635 17538 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:28:01.681659 17538 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:28:01.681673 17538 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:28:01.681722 17538 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:28:01.681737 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.681746 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.681749 17538 net.cpp:165] Memory required for data: 50689500\nI0817 16:28:01.681756 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:28:01.681778 17538 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:28:01.681787 17538 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:28:01.681797 17538 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:28:01.682240 17538 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:28:01.682256 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.682262 17538 net.cpp:165] Memory required for data: 58881500\nI0817 16:28:01.682274 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:28:01.682288 17538 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:28:01.682296 17538 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:28:01.682307 17538 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:28:01.682884 17538 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:28:01.682900 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.682905 17538 net.cpp:165] Memory required for data: 67073500\nI0817 16:28:01.682919 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:28:01.682930 17538 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:28:01.682937 17538 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:28:01.682948 17538 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.683018 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:28:01.683208 17538 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:28:01.683224 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.683238 17538 net.cpp:165] Memory required for data: 75265500\nI0817 16:28:01.683248 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:28:01.683256 17538 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:28:01.683265 17538 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:28:01.683274 17538 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.683284 17538 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:28:01.683290 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.683296 17538 net.cpp:165] Memory required for data: 83457500\nI0817 16:28:01.683300 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:28:01.683316 17538 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:28:01.683322 17538 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:28:01.683336 17538 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:28:01.683734 17538 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:28:01.683751 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.683758 17538 net.cpp:165] Memory required for data: 91649500\nI0817 16:28:01.683766 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:28:01.683784 17538 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:28:01.683792 17538 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:28:01.683804 17538 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:28:01.684125 17538 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:28:01.684142 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.684147 17538 net.cpp:165] Memory required for data: 99841500\nI0817 16:28:01.684165 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:28:01.684178 17538 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:28:01.684185 17538 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:28:01.684193 17538 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:28:01.684260 17538 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:28:01.684463 17538 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:28:01.684482 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.684489 17538 net.cpp:165] Memory required for data: 108033500\nI0817 16:28:01.684499 17538 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:28:01.684507 17538 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:28:01.684514 17538 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:28:01.684523 17538 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:28:01.684531 17538 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:28:01.684574 17538 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:28:01.684587 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.684593 17538 net.cpp:165] Memory required for data: 116225500\nI0817 16:28:01.684598 17538 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:28:01.684609 17538 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:28:01.684617 17538 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:28:01.684623 17538 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:28:01.684633 17538 net.cpp:150] Setting up L1_b1_relu\nI0817 16:28:01.684639 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.684644 17538 net.cpp:165] Memory required for data: 124417500\nI0817 16:28:01.684653 17538 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:01.684664 17538 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:01.684670 17538 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:28:01.684679 17538 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:28:01.684690 17538 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:28:01.684747 17538 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:28:01.684767 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.684773 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.684778 17538 net.cpp:165] Memory required for data: 140801500\nI0817 16:28:01.684787 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:28:01.684798 17538 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:28:01.684804 17538 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:28:01.684821 17538 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:28:01.685263 17538 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:28:01.685279 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.685284 17538 net.cpp:165] Memory required for data: 148993500\nI0817 16:28:01.685298 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:28:01.685308 17538 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:28:01.685314 17538 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:28:01.685322 17538 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:28:01.685642 17538 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:28:01.685655 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.685660 17538 net.cpp:165] Memory required for data: 157185500\nI0817 16:28:01.685674 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:28:01.685686 17538 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:28:01.685693 17538 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:28:01.685704 17538 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.685786 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:28:01.686175 17538 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:28:01.686193 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.686199 17538 net.cpp:165] Memory required for data: 165377500\nI0817 16:28:01.686209 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:28:01.686219 17538 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:28:01.686226 17538 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:28:01.686238 17538 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.686250 17538 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:28:01.686259 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.686264 17538 net.cpp:165] Memory required for data: 173569500\nI0817 16:28:01.686269 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:28:01.686285 17538 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:28:01.686290 17538 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:28:01.686300 17538 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:28:01.686693 17538 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:28:01.686712 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.686717 17538 net.cpp:165] Memory required for data: 181761500\nI0817 16:28:01.686725 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:28:01.686743 17538 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:28:01.686750 17538 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:28:01.686758 17538 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:28:01.687086 17538 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:28:01.687108 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.687116 17538 net.cpp:165] Memory required for data: 189953500\nI0817 16:28:01.687134 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:28:01.687149 17538 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:28:01.687156 17538 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:28:01.687167 17538 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:28:01.687234 17538 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:28:01.687420 17538 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:28:01.687436 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.687450 17538 net.cpp:165] Memory required for data: 198145500\nI0817 16:28:01.687460 17538 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:28:01.687472 17538 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:28:01.687479 17538 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:28:01.687486 17538 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:28:01.687496 17538 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:28:01.687541 17538 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:28:01.687554 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.687559 17538 net.cpp:165] Memory required for data: 206337500\nI0817 16:28:01.687567 17538 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:28:01.687577 17538 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:28:01.687582 17538 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:28:01.687592 17538 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:28:01.687602 17538 net.cpp:150] Setting up L1_b2_relu\nI0817 16:28:01.687609 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.687614 17538 net.cpp:165] Memory required for data: 214529500\nI0817 16:28:01.687619 17538 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:01.687628 17538 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:01.687634 17538 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:28:01.687643 17538 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:28:01.687652 17538 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:28:01.687710 17538 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:28:01.687721 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.687726 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.687734 17538 net.cpp:165] Memory required for data: 230913500\nI0817 16:28:01.687741 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:28:01.687750 17538 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:28:01.687757 17538 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:28:01.687769 17538 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:28:01.688177 17538 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:28:01.688194 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.688200 17538 net.cpp:165] Memory required for data: 239105500\nI0817 16:28:01.688210 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:28:01.688220 17538 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:28:01.688226 17538 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:28:01.688239 17538 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:28:01.688549 17538 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:28:01.688562 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.688570 17538 net.cpp:165] Memory required for data: 247297500\nI0817 16:28:01.688582 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:28:01.688594 17538 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:28:01.688601 17538 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:28:01.688612 17538 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.688674 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:28:01.688858 17538 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:28:01.688871 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.688876 17538 net.cpp:165] Memory required for data: 255489500\nI0817 16:28:01.688889 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:28:01.688900 17538 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:28:01.688907 17538 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:28:01.688915 17538 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.688930 17538 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:28:01.688941 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.688946 17538 net.cpp:165] Memory required for data: 263681500\nI0817 16:28:01.688951 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:28:01.688966 17538 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:28:01.688976 17538 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:28:01.688987 17538 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:28:01.689579 17538 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:28:01.689594 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.689600 17538 net.cpp:165] Memory required for data: 271873500\nI0817 16:28:01.689612 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:28:01.689628 17538 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:28:01.689635 17538 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:28:01.689646 17538 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:28:01.689977 17538 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:28:01.689993 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.689998 17538 net.cpp:165] Memory required for data: 280065500\nI0817 16:28:01.690011 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:28:01.690024 17538 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:28:01.690032 17538 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:28:01.690039 17538 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:28:01.690119 17538 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:28:01.690304 17538 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:28:01.690316 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.690322 17538 net.cpp:165] Memory required for data: 288257500\nI0817 16:28:01.690331 17538 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:28:01.690346 17538 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:28:01.690353 17538 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:28:01.690361 17538 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:28:01.690368 17538 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:28:01.690409 17538 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:28:01.690421 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.690428 17538 net.cpp:165] Memory required for data: 296449500\nI0817 16:28:01.690433 17538 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:28:01.690440 17538 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:28:01.690446 17538 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:28:01.690457 17538 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:28:01.690467 17538 net.cpp:150] Setting up L1_b3_relu\nI0817 16:28:01.690476 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.690482 17538 net.cpp:165] Memory required for data: 304641500\nI0817 16:28:01.690487 17538 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:01.690495 17538 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:01.690500 17538 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:28:01.690506 17538 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:28:01.690520 17538 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:28:01.690577 17538 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:28:01.690592 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.690598 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.690603 17538 net.cpp:165] Memory required for data: 321025500\nI0817 16:28:01.690609 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:28:01.690627 17538 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:28:01.690634 17538 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:28:01.690651 17538 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:28:01.691009 17538 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:28:01.691023 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.691028 17538 net.cpp:165] Memory required for data: 329217500\nI0817 16:28:01.691037 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:28:01.691046 17538 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:28:01.691052 17538 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:28:01.691061 17538 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:28:01.691339 17538 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:28:01.691354 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.691359 17538 net.cpp:165] Memory required for data: 337409500\nI0817 16:28:01.691370 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:28:01.691381 17538 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:28:01.691387 17538 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:28:01.691395 17538 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.691455 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:28:01.691637 17538 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:28:01.691653 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.691658 17538 net.cpp:165] Memory required for data: 345601500\nI0817 16:28:01.691668 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:28:01.691675 17538 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:28:01.691681 17538 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:28:01.691691 17538 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.691702 17538 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:28:01.691709 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.691715 17538 net.cpp:165] Memory required for data: 353793500\nI0817 16:28:01.691720 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:28:01.691732 17538 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:28:01.691740 17538 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:28:01.691747 17538 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:28:01.692127 17538 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:28:01.692143 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.692148 17538 net.cpp:165] Memory required for data: 361985500\nI0817 16:28:01.692157 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:28:01.692170 17538 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:28:01.692176 17538 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:28:01.692184 17538 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:28:01.692456 17538 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:28:01.692471 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.692476 17538 net.cpp:165] Memory required for data: 370177500\nI0817 16:28:01.692486 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:28:01.692497 17538 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:28:01.692503 17538 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:28:01.692513 17538 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:28:01.692570 17538 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:28:01.692724 17538 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:28:01.692739 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.692744 17538 net.cpp:165] Memory required for data: 378369500\nI0817 16:28:01.692752 17538 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:28:01.692764 17538 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:28:01.692771 17538 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:28:01.692777 17538 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:28:01.692792 17538 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:28:01.692831 17538 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:28:01.692842 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.692847 17538 net.cpp:165] Memory required for data: 386561500\nI0817 16:28:01.692852 17538 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:28:01.692858 17538 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:28:01.692864 17538 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:28:01.692874 17538 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:28:01.692884 17538 net.cpp:150] Setting up L1_b4_relu\nI0817 16:28:01.692891 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.692896 17538 net.cpp:165] Memory required for data: 394753500\nI0817 16:28:01.692901 17538 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:01.692909 17538 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:01.692914 17538 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:28:01.692920 17538 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:28:01.692930 17538 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:28:01.692978 17538 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:28:01.692991 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.692997 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.693002 17538 net.cpp:165] Memory required for data: 411137500\nI0817 16:28:01.693007 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:28:01.693018 17538 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:28:01.693024 17538 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:28:01.693037 17538 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:28:01.693397 17538 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:28:01.693413 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.693418 17538 net.cpp:165] Memory required for data: 419329500\nI0817 16:28:01.693469 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:28:01.693482 17538 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:28:01.693490 17538 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:28:01.693500 17538 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:28:01.693774 17538 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:28:01.693787 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.693794 17538 net.cpp:165] Memory required for data: 427521500\nI0817 16:28:01.693804 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:28:01.693815 17538 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:28:01.693821 17538 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:28:01.693830 17538 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.693887 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:28:01.694047 17538 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:28:01.694061 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.694066 17538 net.cpp:165] Memory required for data: 435713500\nI0817 16:28:01.694075 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:28:01.694083 17538 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:28:01.694089 17538 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:28:01.694099 17538 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.694118 17538 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:28:01.694125 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.694130 17538 net.cpp:165] Memory required for data: 443905500\nI0817 16:28:01.694135 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:28:01.694156 17538 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:28:01.694164 17538 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:28:01.694175 17538 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:28:01.694522 17538 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:28:01.694537 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.694542 17538 net.cpp:165] Memory required for data: 452097500\nI0817 16:28:01.694551 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:28:01.694561 17538 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:28:01.694566 17538 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:28:01.694574 17538 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:28:01.694850 17538 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:28:01.694864 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.694869 17538 net.cpp:165] Memory required for data: 460289500\nI0817 16:28:01.694880 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:28:01.694893 17538 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:28:01.694900 17538 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:28:01.694907 17538 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:28:01.694968 17538 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:28:01.695139 17538 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:28:01.695153 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.695158 17538 net.cpp:165] Memory required for data: 468481500\nI0817 16:28:01.695168 17538 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:28:01.695176 17538 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:28:01.695183 17538 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:28:01.695189 17538 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:28:01.695200 17538 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:28:01.695235 17538 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:28:01.695250 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.695255 17538 net.cpp:165] Memory required for data: 476673500\nI0817 16:28:01.695261 17538 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:28:01.695267 17538 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:28:01.695273 17538 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:28:01.695279 17538 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:28:01.695289 17538 net.cpp:150] Setting up L1_b5_relu\nI0817 16:28:01.695297 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.695300 17538 net.cpp:165] Memory required for data: 484865500\nI0817 16:28:01.695305 17538 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:01.695315 17538 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:01.695320 17538 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:28:01.695328 17538 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:28:01.695338 17538 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:28:01.695387 17538 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:28:01.695399 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.695406 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.695411 17538 net.cpp:165] Memory required for data: 501249500\nI0817 16:28:01.695416 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:28:01.695426 17538 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:28:01.695433 17538 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:28:01.695446 17538 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:28:01.695803 17538 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:28:01.695824 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.695830 17538 net.cpp:165] Memory required for data: 509441500\nI0817 16:28:01.695839 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:28:01.695848 17538 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:28:01.695854 17538 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:28:01.695863 17538 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:28:01.696147 17538 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:28:01.696162 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.696167 17538 net.cpp:165] Memory required for data: 517633500\nI0817 16:28:01.696178 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:28:01.696192 17538 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:28:01.696197 17538 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:28:01.696205 17538 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.696266 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:28:01.696430 17538 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:28:01.696444 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.696450 17538 net.cpp:165] Memory required for data: 525825500\nI0817 16:28:01.696460 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:28:01.696467 17538 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:28:01.696473 17538 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:28:01.696483 17538 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.696493 17538 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:28:01.696501 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.696506 17538 net.cpp:165] Memory required for data: 534017500\nI0817 16:28:01.696511 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:28:01.696524 17538 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:28:01.696530 17538 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:28:01.696538 17538 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:28:01.696885 17538 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:28:01.696900 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.696905 17538 net.cpp:165] Memory required for data: 542209500\nI0817 16:28:01.696914 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:28:01.696926 17538 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:28:01.696933 17538 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:28:01.696940 17538 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:28:01.697235 17538 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:28:01.697250 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.697255 17538 net.cpp:165] Memory required for data: 550401500\nI0817 16:28:01.697266 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:28:01.697274 17538 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:28:01.697281 17538 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:28:01.697291 17538 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:28:01.697351 17538 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:28:01.697512 17538 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:28:01.697526 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.697532 17538 net.cpp:165] Memory required for data: 558593500\nI0817 16:28:01.697541 17538 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:28:01.697559 17538 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:28:01.697566 17538 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:28:01.697573 17538 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:28:01.697584 17538 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:28:01.697619 17538 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:28:01.697629 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.697634 17538 net.cpp:165] Memory required for data: 566785500\nI0817 16:28:01.697648 17538 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:28:01.697659 17538 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:28:01.697664 17538 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:28:01.697672 17538 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:28:01.697681 17538 net.cpp:150] Setting up L1_b6_relu\nI0817 16:28:01.697690 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.697695 17538 net.cpp:165] Memory required for data: 574977500\nI0817 16:28:01.697698 17538 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:01.697705 17538 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:01.697710 17538 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:28:01.697717 17538 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:28:01.697726 17538 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:28:01.697778 17538 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:28:01.697790 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.697798 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.697803 17538 net.cpp:165] Memory required for data: 591361500\nI0817 16:28:01.697808 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:28:01.697821 17538 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:28:01.697827 17538 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:28:01.697836 17538 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:28:01.698204 17538 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:28:01.698218 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.698225 17538 net.cpp:165] Memory required for data: 599553500\nI0817 16:28:01.698232 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:28:01.698247 17538 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:28:01.698253 17538 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:28:01.698262 17538 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:28:01.698539 17538 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:28:01.698556 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.698561 17538 net.cpp:165] Memory required for data: 607745500\nI0817 16:28:01.698572 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:28:01.698580 17538 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:28:01.698586 17538 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:28:01.698595 17538 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.698652 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:28:01.698813 17538 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:28:01.698827 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.698832 17538 net.cpp:165] Memory required for data: 615937500\nI0817 16:28:01.698842 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:28:01.698853 17538 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:28:01.698858 17538 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:28:01.698868 17538 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.698902 17538 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:28:01.698910 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.698915 17538 net.cpp:165] Memory required for data: 624129500\nI0817 16:28:01.698920 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:28:01.698931 17538 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:28:01.698936 17538 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:28:01.698948 17538 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:28:01.699333 17538 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:28:01.699357 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.699362 17538 net.cpp:165] Memory required for data: 632321500\nI0817 16:28:01.699371 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:28:01.699380 17538 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:28:01.699388 17538 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:28:01.699398 17538 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:28:01.699674 17538 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:28:01.699689 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.699694 17538 net.cpp:165] Memory required for data: 640513500\nI0817 16:28:01.699704 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:28:01.699717 17538 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:28:01.699723 17538 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:28:01.699730 17538 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:28:01.699791 17538 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:28:01.699954 17538 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:28:01.699968 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.699973 17538 net.cpp:165] Memory required for data: 648705500\nI0817 16:28:01.699982 17538 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:28:01.699995 17538 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:28:01.700001 17538 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:28:01.700008 17538 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:28:01.700016 17538 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:28:01.700052 17538 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:28:01.700064 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.700069 17538 net.cpp:165] Memory required for data: 656897500\nI0817 16:28:01.700075 17538 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:28:01.700083 17538 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:28:01.700088 17538 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:28:01.700098 17538 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:28:01.700117 17538 net.cpp:150] Setting up L1_b7_relu\nI0817 16:28:01.700124 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.700129 17538 net.cpp:165] Memory required for data: 665089500\nI0817 16:28:01.700134 17538 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:01.700141 17538 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:01.700147 17538 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:28:01.700155 17538 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:28:01.700165 17538 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:28:01.700217 17538 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:28:01.700228 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.700235 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.700240 17538 net.cpp:165] Memory required for data: 681473500\nI0817 16:28:01.700245 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:28:01.700256 17538 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:28:01.700263 17538 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:28:01.700275 17538 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:28:01.700629 17538 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:28:01.700644 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.700649 17538 net.cpp:165] Memory required for data: 689665500\nI0817 16:28:01.700657 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:28:01.700666 17538 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:28:01.700672 17538 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:28:01.700690 17538 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:28:01.700971 17538 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:28:01.700987 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.700994 17538 net.cpp:165] Memory required for data: 697857500\nI0817 16:28:01.701004 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:28:01.701012 17538 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:28:01.701019 17538 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:28:01.701025 17538 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.701083 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:28:01.701254 17538 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:28:01.701268 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.701273 17538 net.cpp:165] Memory required for data: 706049500\nI0817 16:28:01.701283 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:28:01.701294 17538 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:28:01.701300 17538 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:28:01.701308 17538 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.701318 17538 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:28:01.701325 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.701329 17538 net.cpp:165] Memory required for data: 714241500\nI0817 16:28:01.701334 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:28:01.701351 17538 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:28:01.701357 17538 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:28:01.701370 17538 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:28:01.701725 17538 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:28:01.701740 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.701745 17538 net.cpp:165] Memory required for data: 722433500\nI0817 16:28:01.701755 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:28:01.701762 17538 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:28:01.701769 17538 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:28:01.701781 17538 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:28:01.702055 17538 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:28:01.702069 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.702075 17538 net.cpp:165] Memory required for data: 730625500\nI0817 16:28:01.702085 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:28:01.702095 17538 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:28:01.702107 17538 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:28:01.702116 17538 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:28:01.702175 17538 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:28:01.702337 17538 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:28:01.702350 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.702355 17538 net.cpp:165] Memory required for data: 738817500\nI0817 16:28:01.702364 17538 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:28:01.702376 17538 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:28:01.702383 17538 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:28:01.702390 17538 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:28:01.702397 17538 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:28:01.702435 17538 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:28:01.702447 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.702452 17538 net.cpp:165] Memory required for data: 747009500\nI0817 16:28:01.702457 17538 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:28:01.702466 17538 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:28:01.702471 17538 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:28:01.702481 17538 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:28:01.702497 17538 net.cpp:150] Setting up L1_b8_relu\nI0817 16:28:01.702505 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.702510 17538 net.cpp:165] Memory required for data: 755201500\nI0817 16:28:01.702515 17538 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:01.702522 17538 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:01.702527 17538 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:28:01.702534 17538 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:28:01.702544 17538 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:28:01.702595 17538 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:28:01.702608 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.702615 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.702620 17538 net.cpp:165] Memory required for data: 771585500\nI0817 16:28:01.702625 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:28:01.702636 17538 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:28:01.702642 17538 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:28:01.702654 17538 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:28:01.703016 17538 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:28:01.703032 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.703037 17538 net.cpp:165] Memory required for data: 779777500\nI0817 16:28:01.703044 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:28:01.703058 17538 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:28:01.703065 17538 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:28:01.703078 17538 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:28:01.703364 17538 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:28:01.703379 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.703384 17538 net.cpp:165] Memory required for data: 787969500\nI0817 16:28:01.703394 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:28:01.703403 17538 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:28:01.703409 17538 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:28:01.703418 17538 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.703478 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:28:01.703639 17538 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:28:01.703654 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.703658 17538 net.cpp:165] Memory required for data: 796161500\nI0817 16:28:01.703667 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:28:01.703678 17538 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:28:01.703685 17538 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:28:01.703692 17538 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.703702 17538 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:28:01.703709 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.703714 17538 net.cpp:165] Memory required for data: 804353500\nI0817 16:28:01.703719 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:28:01.703734 17538 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:28:01.703742 17538 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:28:01.703752 17538 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:28:01.704113 17538 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:28:01.704128 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.704133 17538 net.cpp:165] Memory required for data: 812545500\nI0817 16:28:01.704143 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:28:01.704154 17538 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:28:01.704160 17538 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:28:01.704175 17538 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:28:01.704460 17538 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:28:01.704473 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.704479 17538 net.cpp:165] Memory required for data: 820737500\nI0817 16:28:01.704514 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:28:01.704525 17538 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:28:01.704531 17538 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:28:01.704542 17538 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:28:01.704599 17538 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:28:01.704761 17538 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:28:01.704774 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.704779 17538 net.cpp:165] Memory required for data: 828929500\nI0817 16:28:01.704789 17538 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:28:01.704802 17538 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:28:01.704807 17538 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:28:01.704814 17538 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:28:01.704823 17538 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:28:01.704856 17538 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:28:01.704869 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.704874 17538 net.cpp:165] Memory required for data: 837121500\nI0817 16:28:01.704879 17538 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:28:01.704886 17538 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:28:01.704892 17538 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:28:01.704902 17538 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:28:01.704912 17538 net.cpp:150] Setting up L1_b9_relu\nI0817 16:28:01.704921 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.704924 17538 net.cpp:165] Memory required for data: 845313500\nI0817 16:28:01.704929 17538 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:01.704939 17538 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:01.704946 17538 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:28:01.704952 17538 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:28:01.704962 17538 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:28:01.705013 17538 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:28:01.705024 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.705031 17538 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:28:01.705036 17538 net.cpp:165] Memory required for data: 861697500\nI0817 16:28:01.705041 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:28:01.705055 17538 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:28:01.705061 17538 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:28:01.705070 17538 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:28:01.705432 17538 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:28:01.705447 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.705453 17538 net.cpp:165] Memory required for data: 863745500\nI0817 16:28:01.705461 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:28:01.705473 17538 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:28:01.705480 17538 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:28:01.705488 17538 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:28:01.705759 17538 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:28:01.705775 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.705780 17538 net.cpp:165] Memory required for data: 865793500\nI0817 16:28:01.705797 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:28:01.705806 17538 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:28:01.705813 17538 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:28:01.705821 17538 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.705879 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:28:01.706051 17538 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:28:01.706064 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.706069 17538 net.cpp:165] Memory required for data: 867841500\nI0817 16:28:01.706079 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:28:01.706086 17538 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:28:01.706094 17538 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:28:01.706112 17538 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.706125 17538 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:28:01.706132 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.706136 17538 net.cpp:165] Memory required for data: 869889500\nI0817 16:28:01.706142 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:28:01.706156 17538 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:28:01.706161 17538 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:28:01.706171 17538 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:28:01.706527 17538 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:28:01.706542 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.706547 17538 net.cpp:165] Memory required for data: 871937500\nI0817 16:28:01.706557 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:28:01.706569 17538 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:28:01.706576 17538 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:28:01.706585 17538 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:28:01.706856 17538 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:28:01.706871 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.706876 17538 net.cpp:165] Memory required for data: 873985500\nI0817 16:28:01.706885 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:28:01.706897 17538 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:28:01.706903 17538 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:28:01.706912 17538 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:28:01.706971 17538 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:28:01.707139 17538 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:28:01.707154 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.707159 17538 net.cpp:165] Memory required for data: 876033500\nI0817 16:28:01.707167 17538 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:28:01.707180 17538 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:28:01.707186 17538 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:28:01.707197 17538 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:28:01.707227 17538 net.cpp:150] Setting up L2_b1_pool\nI0817 16:28:01.707237 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.707242 17538 net.cpp:165] Memory required for data: 878081500\nI0817 16:28:01.707247 17538 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:28:01.707255 17538 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:28:01.707262 17538 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:28:01.707271 17538 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:28:01.707279 17538 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:28:01.707312 17538 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:28:01.707322 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.707326 17538 net.cpp:165] Memory required for data: 880129500\nI0817 16:28:01.707332 17538 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:28:01.707340 17538 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:28:01.707352 17538 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:28:01.707365 17538 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:28:01.707375 17538 net.cpp:150] Setting up L2_b1_relu\nI0817 16:28:01.707381 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.707386 17538 net.cpp:165] Memory required for data: 882177500\nI0817 16:28:01.707391 17538 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:28:01.707401 17538 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:28:01.707407 17538 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:28:01.709691 17538 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:28:01.709709 17538 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:28:01.709714 17538 net.cpp:165] Memory required for data: 884225500\nI0817 16:28:01.709720 17538 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:28:01.709733 17538 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:28:01.709739 17538 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:28:01.709748 17538 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:28:01.709755 17538 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:28:01.709805 17538 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:28:01.709818 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.709823 17538 net.cpp:165] Memory required for data: 888321500\nI0817 16:28:01.709828 17538 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:01.709836 17538 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:01.709841 17538 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:28:01.709852 17538 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:28:01.709862 17538 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:28:01.709913 17538 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:28:01.709928 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.709936 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.709940 17538 net.cpp:165] Memory required for data: 896513500\nI0817 16:28:01.709946 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:28:01.709957 17538 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:28:01.709964 17538 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:28:01.709974 17538 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:28:01.710484 17538 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:28:01.710499 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.710503 17538 net.cpp:165] Memory required for data: 900609500\nI0817 16:28:01.710513 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:28:01.710525 17538 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:28:01.710532 17538 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:28:01.710541 17538 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:28:01.710817 17538 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:28:01.710830 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.710836 17538 net.cpp:165] Memory required for data: 904705500\nI0817 16:28:01.710846 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:28:01.710855 17538 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:28:01.710862 17538 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:28:01.710872 17538 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.710933 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:28:01.711093 17538 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:28:01.711112 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.711117 17538 net.cpp:165] Memory required for data: 908801500\nI0817 16:28:01.711127 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:28:01.711134 17538 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:28:01.711149 17538 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:28:01.711160 17538 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.711170 17538 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:28:01.711177 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.711182 17538 net.cpp:165] Memory required for data: 912897500\nI0817 16:28:01.711187 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:28:01.711201 17538 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:28:01.711207 17538 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:28:01.711216 17538 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:28:01.711714 17538 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:28:01.711727 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.711732 17538 net.cpp:165] Memory required for data: 916993500\nI0817 16:28:01.711741 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:28:01.711755 17538 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:28:01.711760 17538 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:28:01.711769 17538 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:28:01.712033 17538 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:28:01.712046 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.712052 17538 net.cpp:165] Memory required for data: 921089500\nI0817 16:28:01.712062 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:28:01.712071 17538 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:28:01.712077 17538 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:28:01.712087 17538 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:28:01.712154 17538 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:28:01.712313 17538 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:28:01.712327 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.712332 17538 net.cpp:165] Memory required for data: 925185500\nI0817 16:28:01.712342 17538 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:28:01.712350 17538 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:28:01.712357 17538 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:28:01.712364 17538 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:28:01.712374 17538 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:28:01.712404 17538 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:28:01.712414 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.712419 17538 net.cpp:165] Memory required for data: 929281500\nI0817 16:28:01.712424 17538 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:28:01.712431 17538 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:28:01.712436 17538 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:28:01.712446 17538 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:28:01.712456 17538 net.cpp:150] Setting up L2_b2_relu\nI0817 16:28:01.712463 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.712468 17538 net.cpp:165] Memory required for data: 933377500\nI0817 16:28:01.712473 17538 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:01.712481 17538 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:01.712486 17538 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:28:01.712496 17538 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:28:01.712505 17538 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:28:01.712553 17538 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:28:01.712564 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.712571 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.712576 17538 net.cpp:165] Memory required for data: 941569500\nI0817 16:28:01.712589 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:28:01.712602 17538 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:28:01.712610 17538 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:28:01.712620 17538 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:28:01.713129 17538 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:28:01.713143 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.713150 17538 net.cpp:165] Memory required for data: 945665500\nI0817 16:28:01.713158 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:28:01.713169 17538 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:28:01.713176 17538 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:28:01.713184 17538 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:28:01.713457 17538 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:28:01.713471 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.713476 17538 net.cpp:165] Memory required for data: 949761500\nI0817 16:28:01.713486 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:28:01.713495 17538 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:28:01.713501 17538 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:28:01.713511 17538 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.713569 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:28:01.713734 17538 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:28:01.713747 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.713752 17538 net.cpp:165] Memory required for data: 953857500\nI0817 16:28:01.713762 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:28:01.713769 17538 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:28:01.713775 17538 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:28:01.713785 17538 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.713795 17538 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:28:01.713804 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.713807 17538 net.cpp:165] Memory required for data: 957953500\nI0817 16:28:01.713812 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:28:01.713826 17538 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:28:01.713832 17538 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:28:01.713841 17538 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:28:01.714347 17538 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:28:01.714362 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.714368 17538 net.cpp:165] Memory required for data: 962049500\nI0817 16:28:01.714377 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:28:01.714385 17538 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:28:01.714395 17538 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:28:01.714403 17538 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:28:01.714671 17538 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:28:01.714684 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.714689 17538 net.cpp:165] Memory required for data: 966145500\nI0817 16:28:01.714700 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:28:01.714709 17538 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:28:01.714715 17538 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:28:01.714721 17538 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:28:01.714782 17538 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:28:01.714943 17538 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:28:01.714959 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.714964 17538 net.cpp:165] Memory required for data: 970241500\nI0817 16:28:01.714973 17538 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:28:01.714982 17538 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:28:01.714996 17538 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:28:01.715003 17538 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:28:01.715011 17538 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:28:01.715044 17538 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:28:01.715056 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.715061 17538 net.cpp:165] Memory required for data: 974337500\nI0817 16:28:01.715066 17538 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:28:01.715088 17538 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:28:01.715095 17538 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:28:01.715108 17538 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:28:01.715119 17538 net.cpp:150] Setting up L2_b3_relu\nI0817 16:28:01.715126 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.715131 17538 net.cpp:165] Memory required for data: 978433500\nI0817 16:28:01.715137 17538 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:01.715143 17538 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:01.715149 17538 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:28:01.715157 17538 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:28:01.715167 17538 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:28:01.715219 17538 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:28:01.715231 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.715239 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.715243 17538 net.cpp:165] Memory required for data: 986625500\nI0817 16:28:01.715248 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:28:01.715262 17538 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:28:01.715270 17538 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:28:01.715283 17538 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:28:01.715777 17538 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:28:01.715791 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.715796 17538 net.cpp:165] Memory required for data: 990721500\nI0817 16:28:01.715806 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:28:01.715817 17538 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:28:01.715824 17538 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:28:01.715837 17538 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:28:01.716116 17538 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:28:01.716130 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.716135 17538 net.cpp:165] Memory required for data: 994817500\nI0817 16:28:01.716146 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:28:01.716154 17538 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:28:01.716161 17538 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:28:01.716168 17538 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.716231 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:28:01.716389 17538 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:28:01.716403 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.716408 17538 net.cpp:165] Memory required for data: 998913500\nI0817 16:28:01.716418 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:28:01.716429 17538 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:28:01.716436 17538 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:28:01.716444 17538 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.716454 17538 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:28:01.716460 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.716473 17538 net.cpp:165] Memory required for data: 1003009500\nI0817 16:28:01.716478 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:28:01.716492 17538 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:28:01.716498 17538 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:28:01.716507 17538 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:28:01.717000 17538 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:28:01.717015 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.717020 17538 net.cpp:165] Memory required for data: 1007105500\nI0817 16:28:01.717030 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:28:01.717043 17538 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:28:01.717051 17538 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:28:01.717058 17538 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:28:01.717337 17538 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:28:01.717355 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.717360 17538 net.cpp:165] Memory required for data: 1011201500\nI0817 16:28:01.717371 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:28:01.717380 17538 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:28:01.717386 17538 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:28:01.717393 17538 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:28:01.717452 17538 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:28:01.717615 17538 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:28:01.717629 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.717634 17538 net.cpp:165] Memory required for data: 1015297500\nI0817 16:28:01.717643 17538 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:28:01.717653 17538 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:28:01.717658 17538 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:28:01.717665 17538 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:28:01.717676 17538 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:28:01.717705 17538 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:28:01.717715 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.717718 17538 net.cpp:165] Memory required for data: 1019393500\nI0817 16:28:01.717723 17538 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:28:01.717734 17538 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:28:01.717741 17538 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:28:01.717747 17538 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:28:01.717757 17538 net.cpp:150] Setting up L2_b4_relu\nI0817 16:28:01.717764 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.717768 17538 net.cpp:165] Memory required for data: 1023489500\nI0817 16:28:01.717773 17538 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:01.717780 17538 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:01.717787 17538 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:28:01.717793 17538 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:28:01.717803 17538 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:28:01.717854 17538 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:28:01.717866 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.717874 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.717878 17538 net.cpp:165] Memory required for data: 1031681500\nI0817 16:28:01.717883 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:28:01.717897 17538 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:28:01.717903 17538 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:28:01.717913 17538 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:28:01.718423 17538 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:28:01.718438 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.718443 17538 net.cpp:165] Memory required for data: 1035777500\nI0817 16:28:01.718452 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:28:01.718466 17538 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:28:01.718472 17538 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:28:01.718482 17538 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:28:01.718755 17538 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:28:01.718770 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.718775 17538 net.cpp:165] Memory required for data: 1039873500\nI0817 16:28:01.718786 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:28:01.718794 17538 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:28:01.718801 17538 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:28:01.718808 17538 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.718868 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:28:01.719036 17538 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:28:01.719049 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.719054 17538 net.cpp:165] Memory required for data: 1043969500\nI0817 16:28:01.719063 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:28:01.719071 17538 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:28:01.719077 17538 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:28:01.719087 17538 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.719097 17538 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:28:01.719111 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.719116 17538 net.cpp:165] Memory required for data: 1048065500\nI0817 16:28:01.719122 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:28:01.719135 17538 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:28:01.719142 17538 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:28:01.719151 17538 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:28:01.719645 17538 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:28:01.719660 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.719665 17538 net.cpp:165] Memory required for data: 1052161500\nI0817 16:28:01.719673 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:28:01.719686 17538 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:28:01.719692 17538 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:28:01.719700 17538 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:28:01.719972 17538 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:28:01.719985 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.719990 17538 net.cpp:165] Memory required for data: 1056257500\nI0817 16:28:01.720001 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:28:01.720012 17538 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:28:01.720019 17538 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:28:01.720026 17538 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:28:01.720084 17538 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:28:01.720250 17538 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:28:01.720264 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.720269 17538 net.cpp:165] Memory required for data: 1060353500\nI0817 16:28:01.720278 17538 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:28:01.720290 17538 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:28:01.720297 17538 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:28:01.720304 17538 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:28:01.720314 17538 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:28:01.720343 17538 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:28:01.720360 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.720366 17538 net.cpp:165] Memory required for data: 1064449500\nI0817 16:28:01.720371 17538 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:28:01.720378 17538 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:28:01.720384 17538 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:28:01.720394 17538 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:28:01.720405 17538 net.cpp:150] Setting up L2_b5_relu\nI0817 16:28:01.720412 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.720417 17538 net.cpp:165] Memory required for data: 1068545500\nI0817 16:28:01.720422 17538 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:01.720429 17538 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:01.720434 17538 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:28:01.720441 17538 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:28:01.720451 17538 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:28:01.720504 17538 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:28:01.720515 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.720522 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.720527 17538 net.cpp:165] Memory required for data: 1076737500\nI0817 16:28:01.720532 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:28:01.720546 17538 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:28:01.720552 17538 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:28:01.720562 17538 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:28:01.721060 17538 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:28:01.721076 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.721081 17538 net.cpp:165] Memory required for data: 1080833500\nI0817 16:28:01.721089 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:28:01.721107 17538 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:28:01.721113 17538 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:28:01.721122 17538 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:28:01.721393 17538 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:28:01.721410 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.721415 17538 net.cpp:165] Memory required for data: 1084929500\nI0817 16:28:01.721426 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:28:01.721433 17538 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:28:01.721441 17538 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:28:01.721447 17538 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.721506 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:28:01.721669 17538 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:28:01.721683 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.721688 17538 net.cpp:165] Memory required for data: 1089025500\nI0817 16:28:01.721696 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:28:01.721704 17538 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:28:01.721711 17538 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:28:01.721721 17538 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.721731 17538 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:28:01.721740 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.721743 17538 net.cpp:165] Memory required for data: 1093121500\nI0817 16:28:01.721748 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:28:01.721761 17538 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:28:01.721768 17538 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:28:01.721783 17538 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:28:01.722280 17538 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:28:01.722295 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.722301 17538 net.cpp:165] Memory required for data: 1097217500\nI0817 16:28:01.722309 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:28:01.722318 17538 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:28:01.722324 17538 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:28:01.722335 17538 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:28:01.722610 17538 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:28:01.722625 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.722630 17538 net.cpp:165] Memory required for data: 1101313500\nI0817 16:28:01.722640 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:28:01.722651 17538 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:28:01.722658 17538 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:28:01.722666 17538 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:28:01.722724 17538 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:28:01.722882 17538 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:28:01.722895 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.722900 17538 net.cpp:165] Memory required for data: 1105409500\nI0817 16:28:01.722910 17538 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:28:01.722921 17538 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:28:01.722929 17538 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:28:01.722935 17538 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:28:01.722946 17538 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:28:01.722975 17538 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:28:01.722985 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.722990 17538 net.cpp:165] Memory required for data: 1109505500\nI0817 16:28:01.722995 17538 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:28:01.723002 17538 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:28:01.723007 17538 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:28:01.723017 17538 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:28:01.723027 17538 net.cpp:150] Setting up L2_b6_relu\nI0817 16:28:01.723036 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.723039 17538 net.cpp:165] Memory required for data: 1113601500\nI0817 16:28:01.723044 17538 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:01.723052 17538 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:01.723057 17538 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:28:01.723064 17538 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:28:01.723073 17538 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:28:01.723132 17538 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:28:01.723146 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.723153 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.723157 17538 net.cpp:165] Memory required for data: 1121793500\nI0817 16:28:01.723163 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:28:01.723175 17538 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:28:01.723181 17538 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:28:01.723192 17538 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:28:01.724689 17538 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:28:01.724706 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.724711 17538 net.cpp:165] Memory required for data: 1125889500\nI0817 16:28:01.724721 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:28:01.724741 17538 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:28:01.724750 17538 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:28:01.724757 17538 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:28:01.725033 17538 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:28:01.725047 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.725052 17538 net.cpp:165] Memory required for data: 1129985500\nI0817 16:28:01.725064 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:28:01.725072 17538 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:28:01.725078 17538 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:28:01.725088 17538 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.725157 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:28:01.725317 17538 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:28:01.725332 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.725337 17538 net.cpp:165] Memory required for data: 1134081500\nI0817 16:28:01.725345 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:28:01.725353 17538 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:28:01.725360 17538 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:28:01.725370 17538 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.725380 17538 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:28:01.725388 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.725392 17538 net.cpp:165] Memory required for data: 1138177500\nI0817 16:28:01.725397 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:28:01.725412 17538 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:28:01.725419 17538 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:28:01.725427 17538 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:28:01.725915 17538 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:28:01.725929 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.725934 17538 net.cpp:165] Memory required for data: 1142273500\nI0817 16:28:01.725944 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:28:01.725956 17538 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:28:01.725962 17538 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:28:01.725970 17538 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:28:01.726244 17538 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:28:01.726258 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.726263 17538 net.cpp:165] Memory required for data: 1146369500\nI0817 16:28:01.726274 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:28:01.726282 17538 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:28:01.726289 17538 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:28:01.726300 17538 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:28:01.726357 17538 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:28:01.726517 17538 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:28:01.726531 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.726536 17538 net.cpp:165] Memory required for data: 1150465500\nI0817 16:28:01.726546 17538 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:28:01.726554 17538 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:28:01.726560 17538 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:28:01.726567 17538 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:28:01.726578 17538 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:28:01.726608 17538 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:28:01.726617 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.726621 17538 net.cpp:165] Memory required for data: 1154561500\nI0817 16:28:01.726627 17538 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:28:01.726640 17538 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:28:01.726653 17538 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:28:01.726661 17538 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:28:01.726671 17538 net.cpp:150] Setting up L2_b7_relu\nI0817 16:28:01.726678 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.726683 17538 net.cpp:165] Memory required for data: 1158657500\nI0817 16:28:01.726687 17538 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:01.726696 17538 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:01.726701 17538 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:28:01.726711 17538 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:28:01.726722 17538 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:28:01.726769 17538 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:28:01.726781 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.726788 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.726794 17538 net.cpp:165] Memory required for data: 1166849500\nI0817 16:28:01.726799 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:28:01.726814 17538 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:28:01.726819 17538 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:28:01.726830 17538 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:28:01.727325 17538 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:28:01.727340 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.727345 17538 net.cpp:165] Memory required for data: 1170945500\nI0817 16:28:01.727355 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:28:01.727366 17538 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:28:01.727373 17538 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:28:01.727381 17538 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:28:01.727653 17538 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:28:01.727668 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.727672 17538 net.cpp:165] Memory required for data: 1175041500\nI0817 16:28:01.727682 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:28:01.727691 17538 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:28:01.727697 17538 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:28:01.727707 17538 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.727766 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:28:01.727926 17538 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:28:01.727939 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.727944 17538 net.cpp:165] Memory required for data: 1179137500\nI0817 16:28:01.727953 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:28:01.727962 17538 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:28:01.727968 17538 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:28:01.727978 17538 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.727988 17538 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:28:01.727995 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.728000 17538 net.cpp:165] Memory required for data: 1183233500\nI0817 16:28:01.728005 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:28:01.728019 17538 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:28:01.728025 17538 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:28:01.728034 17538 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:28:01.728530 17538 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:28:01.728545 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.728550 17538 net.cpp:165] Memory required for data: 1187329500\nI0817 16:28:01.728566 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:28:01.728581 17538 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:28:01.728588 17538 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:28:01.728596 17538 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:28:01.728871 17538 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:28:01.728885 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.728890 17538 net.cpp:165] Memory required for data: 1191425500\nI0817 16:28:01.728901 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:28:01.728910 17538 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:28:01.728916 17538 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:28:01.728924 17538 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:28:01.728987 17538 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:28:01.729152 17538 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:28:01.729169 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.729174 17538 net.cpp:165] Memory required for data: 1195521500\nI0817 16:28:01.729184 17538 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:28:01.729193 17538 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:28:01.729199 17538 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:28:01.729207 17538 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:28:01.729214 17538 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:28:01.729246 17538 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:28:01.729259 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.729264 17538 net.cpp:165] Memory required for data: 1199617500\nI0817 16:28:01.729269 17538 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:28:01.729277 17538 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:28:01.729284 17538 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:28:01.729293 17538 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:28:01.729303 17538 net.cpp:150] Setting up L2_b8_relu\nI0817 16:28:01.729310 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.729315 17538 net.cpp:165] Memory required for data: 1203713500\nI0817 16:28:01.729321 17538 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:01.729327 17538 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:01.729332 17538 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:28:01.729343 17538 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:28:01.729367 17538 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:28:01.729418 17538 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:28:01.729431 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.729439 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.729442 17538 net.cpp:165] Memory required for data: 1211905500\nI0817 16:28:01.729449 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:28:01.729463 17538 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:28:01.729470 17538 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:28:01.729482 17538 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:28:01.729979 17538 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:28:01.729992 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.729997 17538 net.cpp:165] Memory required for data: 1216001500\nI0817 16:28:01.730006 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:28:01.730015 17538 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:28:01.730022 17538 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:28:01.730033 17538 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:28:01.730319 17538 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:28:01.730341 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.730347 17538 net.cpp:165] Memory required for data: 1220097500\nI0817 16:28:01.730358 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:28:01.730367 17538 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:28:01.730373 17538 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:28:01.730381 17538 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.730443 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:28:01.730604 17538 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:28:01.730620 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.730625 17538 net.cpp:165] Memory required for data: 1224193500\nI0817 16:28:01.730635 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:28:01.730643 17538 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:28:01.730649 17538 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:28:01.730656 17538 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.730666 17538 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:28:01.730674 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.730677 17538 net.cpp:165] Memory required for data: 1228289500\nI0817 16:28:01.730682 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:28:01.730697 17538 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:28:01.730703 17538 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:28:01.730715 17538 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:28:01.732198 17538 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:28:01.732216 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.732221 17538 net.cpp:165] Memory required for data: 1232385500\nI0817 16:28:01.732231 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:28:01.732244 17538 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:28:01.732252 17538 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:28:01.732260 17538 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:28:01.732528 17538 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:28:01.732542 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.732547 17538 net.cpp:165] Memory required for data: 1236481500\nI0817 16:28:01.732597 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:28:01.732614 17538 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:28:01.732620 17538 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:28:01.732627 17538 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:28:01.732687 17538 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:28:01.732847 17538 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:28:01.732861 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.732867 17538 net.cpp:165] Memory required for data: 1240577500\nI0817 16:28:01.732875 17538 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:28:01.732884 17538 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:28:01.732892 17538 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:28:01.732898 17538 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:28:01.732909 17538 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:28:01.732937 17538 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:28:01.732946 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.732951 17538 net.cpp:165] Memory required for data: 1244673500\nI0817 16:28:01.732956 17538 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:28:01.732967 17538 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:28:01.732975 17538 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:28:01.732981 17538 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:28:01.732991 17538 net.cpp:150] Setting up L2_b9_relu\nI0817 16:28:01.732998 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.733011 17538 net.cpp:165] Memory required for data: 1248769500\nI0817 16:28:01.733016 17538 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:01.733026 17538 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:01.733032 17538 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:28:01.733041 17538 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:28:01.733050 17538 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:28:01.733108 17538 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:28:01.733121 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.733129 17538 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:28:01.733134 17538 net.cpp:165] Memory required for data: 1256961500\nI0817 16:28:01.733139 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:28:01.733153 17538 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:28:01.733160 17538 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:28:01.733170 17538 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:28:01.733669 17538 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:28:01.733683 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.733688 17538 net.cpp:165] Memory required for data: 1257985500\nI0817 16:28:01.733697 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:28:01.733710 17538 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:28:01.733716 17538 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:28:01.733727 17538 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:28:01.734001 17538 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:28:01.734015 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.734020 17538 net.cpp:165] Memory required for data: 1259009500\nI0817 16:28:01.734031 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:28:01.734040 17538 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:28:01.734046 17538 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:28:01.734057 17538 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.734122 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:28:01.734294 17538 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:28:01.734308 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.734313 17538 net.cpp:165] Memory required for data: 1260033500\nI0817 16:28:01.734323 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:28:01.734330 17538 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:28:01.734336 17538 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:28:01.734346 17538 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:28:01.734357 17538 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:28:01.734364 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.734369 17538 net.cpp:165] Memory required for data: 1261057500\nI0817 16:28:01.734375 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:28:01.734388 17538 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:28:01.734395 17538 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:28:01.734403 17538 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:28:01.734891 17538 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:28:01.734905 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.734910 17538 net.cpp:165] Memory required for data: 1262081500\nI0817 16:28:01.734920 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:28:01.734932 17538 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:28:01.734939 17538 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:28:01.734947 17538 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:28:01.735227 17538 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:28:01.735249 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.735255 17538 net.cpp:165] Memory required for data: 1263105500\nI0817 16:28:01.735265 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:28:01.735277 17538 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:28:01.735285 17538 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:28:01.735291 17538 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:28:01.735352 17538 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:28:01.735520 17538 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:28:01.735533 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.735538 17538 net.cpp:165] Memory required for data: 1264129500\nI0817 16:28:01.735548 17538 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:28:01.735558 17538 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:28:01.735563 17538 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:28:01.735574 17538 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:28:01.735610 17538 net.cpp:150] Setting up L3_b1_pool\nI0817 16:28:01.735618 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.735623 17538 net.cpp:165] Memory required for data: 1265153500\nI0817 16:28:01.735630 17538 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:28:01.735642 17538 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:28:01.735649 17538 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:28:01.735656 17538 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:28:01.735664 17538 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:28:01.735697 17538 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:28:01.735705 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.735710 17538 net.cpp:165] Memory required for data: 1266177500\nI0817 16:28:01.735715 17538 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:28:01.735726 17538 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:28:01.735733 17538 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:28:01.735739 17538 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:28:01.735749 17538 net.cpp:150] Setting up L3_b1_relu\nI0817 16:28:01.735756 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.735760 17538 net.cpp:165] Memory required for data: 1267201500\nI0817 16:28:01.735765 17538 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:28:01.735774 17538 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:28:01.735782 17538 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:28:01.737026 17538 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:28:01.737047 17538 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:28:01.737053 17538 net.cpp:165] Memory required for data: 1268225500\nI0817 16:28:01.737059 17538 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:28:01.737068 17538 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:28:01.737076 17538 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:28:01.737082 17538 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:28:01.737093 17538 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:28:01.737143 17538 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:28:01.737154 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.737159 17538 net.cpp:165] Memory required for data: 1270273500\nI0817 16:28:01.737165 17538 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:01.737177 17538 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:01.737184 17538 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:28:01.737191 17538 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:28:01.737201 17538 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:28:01.737258 17538 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:28:01.737278 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.737287 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.737290 17538 net.cpp:165] Memory required for data: 1274369500\nI0817 16:28:01.737296 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:28:01.737308 17538 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:28:01.737314 17538 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:28:01.737327 17538 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:28:01.738379 17538 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:28:01.738394 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.738399 17538 net.cpp:165] Memory required for data: 1276417500\nI0817 16:28:01.738409 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:28:01.738420 17538 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:28:01.738425 17538 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:28:01.738437 17538 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:28:01.738711 17538 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:28:01.738728 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.738734 17538 net.cpp:165] Memory required for data: 1278465500\nI0817 16:28:01.738744 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:28:01.738752 17538 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:28:01.738759 17538 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:28:01.738767 17538 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.738826 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:28:01.738986 17538 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:28:01.738999 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.739004 17538 net.cpp:165] Memory required for data: 1280513500\nI0817 16:28:01.739013 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:28:01.739022 17538 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:28:01.739034 17538 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:28:01.739042 17538 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:28:01.739051 17538 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:28:01.739059 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.739064 17538 net.cpp:165] Memory required for data: 1282561500\nI0817 16:28:01.739069 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:28:01.739084 17538 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:28:01.739090 17538 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:28:01.739099 17538 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:28:01.740152 17538 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:28:01.740167 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.740173 17538 net.cpp:165] Memory required for data: 1284609500\nI0817 16:28:01.740182 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:28:01.740195 17538 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:28:01.740201 17538 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:28:01.740212 17538 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:28:01.740483 17538 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:28:01.740497 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.740502 17538 net.cpp:165] Memory required for data: 1286657500\nI0817 16:28:01.740514 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:28:01.740522 17538 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:28:01.740528 17538 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:28:01.740540 17538 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:28:01.740599 17538 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:28:01.740761 17538 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:28:01.740774 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.740779 17538 net.cpp:165] Memory required for data: 1288705500\nI0817 16:28:01.740797 17538 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:28:01.740805 17538 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:28:01.740813 17538 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:28:01.740819 17538 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:28:01.740830 17538 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:28:01.740869 17538 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:28:01.740878 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.740882 17538 net.cpp:165] Memory required for data: 1290753500\nI0817 16:28:01.740888 17538 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:28:01.740896 17538 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:28:01.740902 17538 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:28:01.740909 17538 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:28:01.740921 17538 net.cpp:150] Setting up L3_b2_relu\nI0817 16:28:01.740929 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.740934 17538 net.cpp:165] Memory required for data: 1292801500\nI0817 16:28:01.740939 17538 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:01.740947 17538 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:01.740952 17538 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:28:01.740959 17538 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:28:01.740969 17538 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:28:01.741019 17538 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:28:01.741031 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.741039 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.741042 17538 net.cpp:165] Memory required for data: 1296897500\nI0817 16:28:01.741048 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:28:01.741060 17538 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:28:01.741066 17538 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:28:01.741078 17538 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:28:01.742128 17538 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:28:01.742143 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.742148 17538 net.cpp:165] Memory required for data: 1298945500\nI0817 16:28:01.742158 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:28:01.742167 17538 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:28:01.742174 17538 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:28:01.742188 17538 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:28:01.742539 17538 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:28:01.742558 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.742563 17538 net.cpp:165] Memory required for data: 1300993500\nI0817 16:28:01.742575 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:28:01.742584 17538 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:28:01.742591 17538 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:28:01.742599 17538 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.742660 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:28:01.742820 17538 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:28:01.742835 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.742839 17538 net.cpp:165] Memory required for data: 1303041500\nI0817 16:28:01.742848 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:28:01.742859 17538 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:28:01.742866 17538 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:28:01.742873 17538 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:28:01.742890 17538 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:28:01.742898 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.742903 17538 net.cpp:165] Memory required for data: 1305089500\nI0817 16:28:01.742908 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:28:01.742924 17538 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:28:01.742930 17538 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:28:01.742939 17538 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:28:01.743994 17538 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:28:01.744010 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.744015 17538 net.cpp:165] Memory required for data: 1307137500\nI0817 16:28:01.744025 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:28:01.744037 17538 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:28:01.744045 17538 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:28:01.744055 17538 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:28:01.744334 17538 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:28:01.744349 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.744354 17538 net.cpp:165] Memory required for data: 1309185500\nI0817 16:28:01.744364 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:28:01.744374 17538 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:28:01.744380 17538 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:28:01.744390 17538 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:28:01.744455 17538 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:28:01.744614 17538 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:28:01.744627 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.744632 17538 net.cpp:165] Memory required for data: 1311233500\nI0817 16:28:01.744642 17538 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:28:01.744652 17538 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:28:01.744657 17538 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:28:01.744664 17538 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:28:01.744675 17538 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:28:01.744712 17538 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:28:01.744724 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.744729 17538 net.cpp:165] Memory required for data: 1313281500\nI0817 16:28:01.744735 17538 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:28:01.744743 17538 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:28:01.744750 17538 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:28:01.744760 17538 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:28:01.744770 17538 net.cpp:150] Setting up L3_b3_relu\nI0817 16:28:01.744776 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.744781 17538 net.cpp:165] Memory required for data: 1315329500\nI0817 16:28:01.744786 17538 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:01.744794 17538 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:01.744799 17538 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:28:01.744807 17538 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:28:01.744817 17538 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:28:01.744868 17538 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:28:01.744879 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.744886 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.744891 17538 net.cpp:165] Memory required for data: 1319425500\nI0817 16:28:01.744896 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:28:01.744907 17538 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:28:01.744920 17538 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:28:01.744935 17538 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:28:01.745985 17538 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:28:01.746001 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.746006 17538 net.cpp:165] Memory required for data: 1321473500\nI0817 16:28:01.746014 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:28:01.746024 17538 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:28:01.746031 17538 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:28:01.746042 17538 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:28:01.746325 17538 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:28:01.746338 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.746343 17538 net.cpp:165] Memory required for data: 1323521500\nI0817 16:28:01.746354 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:28:01.746362 17538 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:28:01.746369 17538 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:28:01.746376 17538 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.746439 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:28:01.746600 17538 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:28:01.746613 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.746618 17538 net.cpp:165] Memory required for data: 1325569500\nI0817 16:28:01.746628 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:28:01.746641 17538 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:28:01.746649 17538 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:28:01.746655 17538 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:28:01.746665 17538 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:28:01.746673 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.746677 17538 net.cpp:165] Memory required for data: 1327617500\nI0817 16:28:01.746682 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:28:01.746696 17538 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:28:01.746703 17538 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:28:01.746711 17538 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:28:01.748728 17538 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:28:01.748746 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.748752 17538 net.cpp:165] Memory required for data: 1329665500\nI0817 16:28:01.748762 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:28:01.748772 17538 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:28:01.748780 17538 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:28:01.748791 17538 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:28:01.749066 17538 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:28:01.749083 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.749089 17538 net.cpp:165] Memory required for data: 1331713500\nI0817 16:28:01.749100 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:28:01.749115 17538 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:28:01.749122 17538 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:28:01.749130 17538 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:28:01.749191 17538 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:28:01.749353 17538 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:28:01.749366 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.749372 17538 net.cpp:165] Memory required for data: 1333761500\nI0817 16:28:01.749382 17538 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:28:01.749393 17538 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:28:01.749399 17538 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:28:01.749406 17538 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:28:01.749425 17538 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:28:01.749464 17538 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:28:01.749475 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.749480 17538 net.cpp:165] Memory required for data: 1335809500\nI0817 16:28:01.749486 17538 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:28:01.749493 17538 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:28:01.749500 17538 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:28:01.749507 17538 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:28:01.749517 17538 net.cpp:150] Setting up L3_b4_relu\nI0817 16:28:01.749524 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.749529 17538 net.cpp:165] Memory required for data: 1337857500\nI0817 16:28:01.749533 17538 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:01.749541 17538 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:01.749547 17538 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:28:01.749557 17538 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:28:01.749567 17538 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:28:01.749614 17538 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:28:01.749626 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.749634 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.749639 17538 net.cpp:165] Memory required for data: 1341953500\nI0817 16:28:01.749644 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:28:01.749658 17538 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:28:01.749665 17538 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:28:01.749675 17538 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:28:01.750708 17538 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:28:01.750725 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.750730 17538 net.cpp:165] Memory required for data: 1344001500\nI0817 16:28:01.750738 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:28:01.750751 17538 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:28:01.750758 17538 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:28:01.750766 17538 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:28:01.751040 17538 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:28:01.751054 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.751060 17538 net.cpp:165] Memory required for data: 1346049500\nI0817 16:28:01.751070 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:28:01.751082 17538 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:28:01.751090 17538 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:28:01.751096 17538 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.751165 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:28:01.751329 17538 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:28:01.751343 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.751348 17538 net.cpp:165] Memory required for data: 1348097500\nI0817 16:28:01.751358 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:28:01.751368 17538 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:28:01.751375 17538 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:28:01.751382 17538 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:28:01.751392 17538 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:28:01.751400 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.751405 17538 net.cpp:165] Memory required for data: 1350145500\nI0817 16:28:01.751410 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:28:01.751423 17538 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:28:01.751436 17538 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:28:01.751448 17538 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:28:01.752476 17538 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:28:01.752491 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.752496 17538 net.cpp:165] Memory required for data: 1352193500\nI0817 16:28:01.752506 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:28:01.752516 17538 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:28:01.752521 17538 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:28:01.752535 17538 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:28:01.752806 17538 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:28:01.752820 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.752825 17538 net.cpp:165] Memory required for data: 1354241500\nI0817 16:28:01.752835 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:28:01.752845 17538 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:28:01.752851 17538 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:28:01.752858 17538 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:28:01.752919 17538 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:28:01.753080 17538 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:28:01.753096 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.753108 17538 net.cpp:165] Memory required for data: 1356289500\nI0817 16:28:01.753118 17538 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:28:01.753127 17538 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:28:01.753134 17538 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:28:01.753141 17538 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:28:01.753149 17538 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:28:01.753187 17538 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:28:01.753199 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.753204 17538 net.cpp:165] Memory required for data: 1358337500\nI0817 16:28:01.753209 17538 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:28:01.753217 17538 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:28:01.753223 17538 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:28:01.753232 17538 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:28:01.753240 17538 net.cpp:150] Setting up L3_b5_relu\nI0817 16:28:01.753247 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.753252 17538 net.cpp:165] Memory required for data: 1360385500\nI0817 16:28:01.753257 17538 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:01.753265 17538 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:01.753270 17538 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:28:01.753280 17538 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:28:01.753291 17538 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:28:01.753336 17538 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:28:01.753348 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.753355 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.753360 17538 net.cpp:165] Memory required for data: 1364481500\nI0817 16:28:01.753365 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:28:01.753379 17538 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:28:01.753386 17538 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:28:01.753396 17538 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:28:01.754426 17538 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:28:01.754441 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.754446 17538 net.cpp:165] Memory required for data: 1366529500\nI0817 16:28:01.754463 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:28:01.754477 17538 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:28:01.754483 17538 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:28:01.754492 17538 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:28:01.754768 17538 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:28:01.754781 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.754787 17538 net.cpp:165] Memory required for data: 1368577500\nI0817 16:28:01.754797 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:28:01.754809 17538 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:28:01.754817 17538 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:28:01.754824 17538 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.754885 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:28:01.755050 17538 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:28:01.755064 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.755069 17538 net.cpp:165] Memory required for data: 1370625500\nI0817 16:28:01.755079 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:28:01.755089 17538 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:28:01.755096 17538 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:28:01.755111 17538 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:28:01.755122 17538 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:28:01.755131 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.755136 17538 net.cpp:165] Memory required for data: 1372673500\nI0817 16:28:01.755141 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:28:01.755151 17538 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:28:01.755157 17538 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:28:01.755168 17538 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:28:01.756201 17538 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:28:01.756216 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.756220 17538 net.cpp:165] Memory required for data: 1374721500\nI0817 16:28:01.756235 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:28:01.756252 17538 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:28:01.756263 17538 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:28:01.756279 17538 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:28:01.756623 17538 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:28:01.756639 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.756644 17538 net.cpp:165] Memory required for data: 1376769500\nI0817 16:28:01.756654 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:28:01.756664 17538 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:28:01.756670 17538 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:28:01.756678 17538 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:28:01.756742 17538 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:28:01.756901 17538 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:28:01.756916 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.756922 17538 net.cpp:165] Memory required for data: 1378817500\nI0817 16:28:01.756932 17538 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:28:01.756942 17538 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:28:01.756947 17538 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:28:01.756955 17538 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:28:01.756963 17538 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:28:01.757004 17538 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:28:01.757016 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.757021 17538 net.cpp:165] Memory required for data: 1380865500\nI0817 16:28:01.757027 17538 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:28:01.757042 17538 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:28:01.757050 17538 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:28:01.757056 17538 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:28:01.757066 17538 net.cpp:150] Setting up L3_b6_relu\nI0817 16:28:01.757073 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.757078 17538 net.cpp:165] Memory required for data: 1382913500\nI0817 16:28:01.757083 17538 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:01.757093 17538 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:01.757099 17538 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:28:01.757114 17538 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:28:01.757125 17538 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:28:01.757175 17538 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:28:01.757192 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.757200 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.757205 17538 net.cpp:165] Memory required for data: 1387009500\nI0817 16:28:01.757210 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:28:01.757222 17538 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:28:01.757228 17538 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:28:01.757237 17538 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:28:01.758270 17538 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:28:01.758285 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.758289 17538 net.cpp:165] Memory required for data: 1389057500\nI0817 16:28:01.758299 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:28:01.758311 17538 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:28:01.758318 17538 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:28:01.758327 17538 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:28:01.758600 17538 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:28:01.758613 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.758618 17538 net.cpp:165] Memory required for data: 1391105500\nI0817 16:28:01.758630 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:28:01.758641 17538 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:28:01.758648 17538 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:28:01.758656 17538 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.758718 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:28:01.758883 17538 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:28:01.758898 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.758903 17538 net.cpp:165] Memory required for data: 1393153500\nI0817 16:28:01.758911 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:28:01.758949 17538 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:28:01.758957 17538 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:28:01.758965 17538 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:28:01.758976 17538 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:28:01.758983 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.758988 17538 net.cpp:165] Memory required for data: 1395201500\nI0817 16:28:01.758993 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:28:01.759006 17538 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:28:01.759011 17538 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:28:01.759019 17538 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:28:01.760048 17538 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:28:01.760063 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.760069 17538 net.cpp:165] Memory required for data: 1397249500\nI0817 16:28:01.760088 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:28:01.760108 17538 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:28:01.760116 17538 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:28:01.760124 17538 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:28:01.760401 17538 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:28:01.760414 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.760419 17538 net.cpp:165] Memory required for data: 1399297500\nI0817 16:28:01.760430 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:28:01.760442 17538 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:28:01.760448 17538 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:28:01.760457 17538 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:28:01.760519 17538 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:28:01.760679 17538 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:28:01.760691 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.760696 17538 net.cpp:165] Memory required for data: 1401345500\nI0817 16:28:01.760706 17538 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:28:01.760718 17538 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:28:01.760725 17538 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:28:01.760732 17538 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:28:01.760743 17538 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:28:01.760777 17538 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:28:01.760788 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.760793 17538 net.cpp:165] Memory required for data: 1403393500\nI0817 16:28:01.760798 17538 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:28:01.760809 17538 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:28:01.760815 17538 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:28:01.760823 17538 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:28:01.760833 17538 net.cpp:150] Setting up L3_b7_relu\nI0817 16:28:01.760839 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.760844 17538 net.cpp:165] Memory required for data: 1405441500\nI0817 16:28:01.760848 17538 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:01.760856 17538 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:01.760862 17538 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:28:01.760869 17538 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:28:01.760879 17538 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:28:01.760928 17538 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:28:01.760941 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.760947 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.760952 17538 net.cpp:165] Memory required for data: 1409537500\nI0817 16:28:01.760957 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:28:01.760972 17538 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:28:01.760979 17538 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:28:01.760988 17538 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:28:01.763212 17538 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:28:01.763233 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.763239 17538 net.cpp:165] Memory required for data: 1411585500\nI0817 16:28:01.763249 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:28:01.763259 17538 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:28:01.763267 17538 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:28:01.763278 17538 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:28:01.763556 17538 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:28:01.763576 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.763582 17538 net.cpp:165] Memory required for data: 1413633500\nI0817 16:28:01.763593 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:28:01.763605 17538 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:28:01.763612 17538 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:28:01.763620 17538 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.763681 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:28:01.763846 17538 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:28:01.763859 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.763864 17538 net.cpp:165] Memory required for data: 1415681500\nI0817 16:28:01.763875 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:28:01.763882 17538 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:28:01.763888 17538 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:28:01.763900 17538 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:28:01.763909 17538 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:28:01.763916 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.763921 17538 net.cpp:165] Memory required for data: 1417729500\nI0817 16:28:01.763926 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:28:01.763941 17538 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:28:01.763947 17538 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:28:01.763955 17538 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:28:01.764984 17538 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:28:01.764999 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.765004 17538 net.cpp:165] Memory required for data: 1419777500\nI0817 16:28:01.765014 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:28:01.765027 17538 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:28:01.765034 17538 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:28:01.765043 17538 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:28:01.765328 17538 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:28:01.765342 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.765347 17538 net.cpp:165] Memory required for data: 1421825500\nI0817 16:28:01.765358 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:28:01.765367 17538 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:28:01.765373 17538 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:28:01.765381 17538 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:28:01.765444 17538 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:28:01.765606 17538 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:28:01.765619 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.765625 17538 net.cpp:165] Memory required for data: 1423873500\nI0817 16:28:01.765633 17538 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:28:01.765643 17538 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:28:01.765650 17538 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:28:01.765656 17538 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:28:01.765667 17538 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:28:01.765702 17538 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:28:01.765717 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.765722 17538 net.cpp:165] Memory required for data: 1425921500\nI0817 16:28:01.765728 17538 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:28:01.765735 17538 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:28:01.765741 17538 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:28:01.765748 17538 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:28:01.765758 17538 net.cpp:150] Setting up L3_b8_relu\nI0817 16:28:01.765765 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.765777 17538 net.cpp:165] Memory required for data: 1427969500\nI0817 16:28:01.765784 17538 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:01.765794 17538 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:01.765799 17538 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:28:01.765806 17538 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:28:01.765816 17538 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:28:01.765868 17538 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:28:01.765882 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.765887 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.765892 17538 net.cpp:165] Memory required for data: 1432065500\nI0817 16:28:01.765898 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:28:01.765909 17538 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:28:01.765915 17538 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:28:01.765928 17538 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:28:01.766983 17538 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:28:01.766999 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.767004 17538 net.cpp:165] Memory required for data: 1434113500\nI0817 16:28:01.767014 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:28:01.767024 17538 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:28:01.767030 17538 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:28:01.767041 17538 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:28:01.767325 17538 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:28:01.767344 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.767349 17538 net.cpp:165] Memory required for data: 1436161500\nI0817 16:28:01.767360 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:28:01.767369 17538 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:28:01.767376 17538 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:28:01.767385 17538 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.767446 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:28:01.767611 17538 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:28:01.767626 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.767630 17538 net.cpp:165] Memory required for data: 1438209500\nI0817 16:28:01.767639 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:28:01.767647 17538 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:28:01.767654 17538 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:28:01.767664 17538 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:28:01.767675 17538 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:28:01.767683 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.767686 17538 net.cpp:165] Memory required for data: 1440257500\nI0817 16:28:01.767691 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:28:01.767705 17538 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:28:01.767712 17538 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:28:01.767721 17538 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:28:01.768752 17538 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:28:01.768769 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.768774 17538 net.cpp:165] Memory required for data: 1442305500\nI0817 16:28:01.768782 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:28:01.768797 17538 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:28:01.768805 17538 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:28:01.768817 17538 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:28:01.769086 17538 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:28:01.769100 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.769119 17538 net.cpp:165] Memory required for data: 1444353500\nI0817 16:28:01.769129 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:28:01.769138 17538 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:28:01.769145 17538 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:28:01.769156 17538 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:28:01.769217 17538 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:28:01.769381 17538 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:28:01.769394 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.769399 17538 net.cpp:165] Memory required for data: 1446401500\nI0817 16:28:01.769409 17538 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:28:01.769418 17538 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:28:01.769424 17538 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:28:01.769433 17538 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:28:01.769444 17538 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:28:01.769482 17538 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:28:01.769495 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.769500 17538 net.cpp:165] Memory required for data: 1448449500\nI0817 16:28:01.769505 17538 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:28:01.769513 17538 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:28:01.769520 17538 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:28:01.769526 17538 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:28:01.769538 17538 net.cpp:150] Setting up L3_b9_relu\nI0817 16:28:01.769546 17538 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:28:01.769551 17538 net.cpp:165] Memory required for data: 1450497500\nI0817 16:28:01.769556 17538 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:28:01.769565 17538 net.cpp:100] Creating Layer post_pool\nI0817 16:28:01.769572 17538 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:28:01.769578 17538 net.cpp:408] post_pool -> post_pool\nI0817 16:28:01.769614 17538 net.cpp:150] Setting up post_pool\nI0817 16:28:01.769623 17538 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:28:01.769629 17538 net.cpp:165] Memory required for data: 1450529500\nI0817 16:28:01.769634 17538 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:28:01.769649 17538 net.cpp:100] Creating Layer post_FC\nI0817 16:28:01.769654 17538 net.cpp:434] post_FC <- post_pool\nI0817 16:28:01.769662 17538 net.cpp:408] post_FC -> post_FC_top\nI0817 16:28:01.769829 17538 net.cpp:150] Setting up post_FC\nI0817 16:28:01.769843 17538 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:01.769850 17538 net.cpp:165] Memory required for data: 1450534500\nI0817 16:28:01.769858 17538 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:28:01.769866 17538 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:28:01.769872 17538 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:28:01.769883 17538 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:28:01.769893 17538 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:28:01.769944 17538 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:28:01.769956 17538 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:01.769963 17538 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:28:01.769968 17538 net.cpp:165] Memory required for data: 1450544500\nI0817 16:28:01.769973 17538 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:28:01.769980 17538 net.cpp:100] Creating Layer accuracy\nI0817 16:28:01.769986 17538 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:28:01.769994 17538 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:28:01.770004 17538 net.cpp:408] accuracy -> accuracy\nI0817 16:28:01.770016 17538 net.cpp:150] Setting up accuracy\nI0817 16:28:01.770025 17538 net.cpp:157] Top shape: (1)\nI0817 16:28:01.770035 17538 net.cpp:165] Memory required for data: 1450544504\nI0817 16:28:01.770041 17538 layer_factory.hpp:77] Creating layer loss\nI0817 16:28:01.770050 17538 net.cpp:100] Creating Layer loss\nI0817 16:28:01.770054 17538 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:28:01.770061 17538 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:28:01.770068 17538 net.cpp:408] loss -> loss\nI0817 16:28:01.770081 17538 layer_factory.hpp:77] Creating layer loss\nI0817 16:28:01.770210 17538 net.cpp:150] Setting up loss\nI0817 16:28:01.770225 17538 net.cpp:157] Top shape: (1)\nI0817 16:28:01.770229 17538 net.cpp:160]     with loss weight 1\nI0817 16:28:01.770247 17538 net.cpp:165] Memory required for data: 1450544508\nI0817 16:28:01.770253 17538 net.cpp:226] loss needs backward computation.\nI0817 16:28:01.770259 17538 net.cpp:228] accuracy does not need backward computation.\nI0817 16:28:01.770265 17538 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:28:01.770272 17538 net.cpp:226] post_FC needs backward computation.\nI0817 16:28:01.770277 17538 net.cpp:226] post_pool needs backward computation.\nI0817 16:28:01.770282 17538 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:28:01.770285 17538 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:28:01.770292 17538 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:28:01.770297 17538 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:28:01.770301 17538 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:28:01.770306 17538 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:28:01.770311 17538 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:28:01.770315 17538 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:28:01.770320 17538 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:28:01.770325 17538 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:28:01.770331 17538 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:28:01.770336 17538 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:28:01.770341 17538 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:28:01.770346 17538 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:28:01.770351 17538 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:28:01.770356 17538 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:28:01.770361 17538 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:28:01.770366 17538 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:28:01.770371 17538 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:28:01.770376 17538 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:28:01.770382 17538 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:28:01.770387 17538 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:28:01.770392 17538 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:28:01.770401 17538 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:28:01.770406 17538 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:28:01.770411 17538 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:28:01.770416 17538 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:28:01.770421 17538 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:28:01.770426 17538 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:28:01.770431 17538 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:28:01.770437 17538 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:28:01.770442 17538 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:28:01.770447 17538 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:28:01.770452 17538 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:28:01.770463 17538 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:28:01.770469 17538 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:28:01.770474 17538 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:28:01.770479 17538 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:28:01.770485 17538 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:28:01.770490 17538 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:28:01.770495 17538 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:28:01.770500 17538 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:28:01.770505 17538 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:28:01.770511 17538 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:28:01.770516 17538 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:28:01.770521 17538 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:28:01.770526 17538 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:28:01.770531 17538 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:28:01.770536 17538 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:28:01.770542 17538 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:28:01.770547 17538 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:28:01.770552 17538 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:28:01.770558 17538 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:28:01.770563 17538 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:28:01.770570 17538 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:28:01.770575 17538 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:28:01.770579 17538 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:28:01.770584 17538 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:28:01.770589 17538 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:28:01.770594 17538 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:28:01.770599 17538 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:28:01.770604 17538 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:28:01.770611 17538 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:28:01.770615 17538 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:28:01.770620 17538 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:28:01.770625 17538 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:28:01.770630 17538 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:28:01.770637 17538 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:28:01.770642 17538 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:28:01.770647 17538 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:28:01.770653 17538 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:28:01.770658 17538 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:28:01.770663 17538 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:28:01.770668 17538 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:28:01.770673 17538 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:28:01.770678 17538 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:28:01.770684 17538 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:28:01.770689 17538 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:28:01.770694 17538 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:28:01.770699 17538 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:28:01.770705 17538 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:28:01.770716 17538 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:28:01.770721 17538 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:28:01.770727 17538 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:28:01.770733 17538 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:28:01.770738 17538 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:28:01.770743 17538 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:28:01.770750 17538 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:28:01.770754 17538 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:28:01.770759 17538 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:28:01.770764 17538 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:28:01.770772 17538 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:28:01.770778 17538 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:28:01.770783 17538 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:28:01.770789 17538 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:28:01.770795 17538 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:28:01.770800 17538 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:28:01.770805 17538 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:28:01.770812 17538 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:28:01.770817 17538 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:28:01.770822 17538 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:28:01.770826 17538 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:28:01.770833 17538 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:28:01.770838 17538 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:28:01.770843 17538 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:28:01.770849 17538 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:28:01.770854 17538 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:28:01.770859 17538 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:28:01.770864 17538 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:28:01.770870 17538 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:28:01.770875 17538 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:28:01.770880 17538 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:28:01.770886 17538 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:28:01.770891 17538 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:28:01.770896 17538 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:28:01.770903 17538 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:28:01.770908 17538 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:28:01.770913 17538 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:28:01.770920 17538 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:28:01.770925 17538 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:28:01.770929 17538 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:28:01.770936 17538 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:28:01.770941 17538 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:28:01.770946 17538 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:28:01.770951 17538 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:28:01.770957 17538 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:28:01.770963 17538 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:28:01.770968 17538 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:28:01.770978 17538 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:28:01.770984 17538 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:28:01.770989 17538 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:28:01.770995 17538 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:28:01.771001 17538 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:28:01.771006 17538 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:28:01.771011 17538 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:28:01.771018 17538 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:28:01.771023 17538 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:28:01.771028 17538 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:28:01.771034 17538 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:28:01.771039 17538 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:28:01.771044 17538 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:28:01.771049 17538 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:28:01.771055 17538 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:28:01.771064 17538 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:28:01.771070 17538 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:28:01.771075 17538 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:28:01.771080 17538 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:28:01.771086 17538 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:28:01.771092 17538 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:28:01.771097 17538 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:28:01.771108 17538 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:28:01.771114 17538 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:28:01.771121 17538 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:28:01.771126 17538 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:28:01.771132 17538 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:28:01.771138 17538 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:28:01.771143 17538 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:28:01.771149 17538 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:28:01.771154 17538 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:28:01.771160 17538 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:28:01.771165 17538 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:28:01.771170 17538 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:28:01.771176 17538 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:28:01.771183 17538 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:28:01.771188 17538 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:28:01.771193 17538 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:28:01.771199 17538 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:28:01.771204 17538 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:28:01.771210 17538 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:28:01.771216 17538 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:28:01.771221 17538 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:28:01.771227 17538 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:28:01.771232 17538 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:28:01.771239 17538 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:28:01.771245 17538 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:28:01.771255 17538 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:28:01.771261 17538 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:28:01.771267 17538 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:28:01.771273 17538 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:28:01.771280 17538 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:28:01.771284 17538 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:28:01.771291 17538 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:28:01.771296 17538 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:28:01.771301 17538 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:28:01.771307 17538 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:28:01.771312 17538 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:28:01.771318 17538 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:28:01.771324 17538 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:28:01.771330 17538 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:28:01.771335 17538 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:28:01.771342 17538 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:28:01.771347 17538 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:28:01.771353 17538 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:28:01.771358 17538 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:28:01.771363 17538 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:28:01.771369 17538 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:28:01.771375 17538 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:28:01.771380 17538 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:28:01.771387 17538 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:28:01.771392 17538 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:28:01.771399 17538 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:28:01.771404 17538 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:28:01.771409 17538 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:28:01.771414 17538 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:28:01.771420 17538 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:28:01.771426 17538 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:28:01.771432 17538 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:28:01.771437 17538 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:28:01.771445 17538 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:28:01.771450 17538 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:28:01.771459 17538 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:28:01.771466 17538 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:28:01.771472 17538 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:28:01.771477 17538 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:28:01.771483 17538 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:28:01.771489 17538 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:28:01.771495 17538 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:28:01.771500 17538 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:28:01.771507 17538 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:28:01.771512 17538 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:28:01.771518 17538 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:28:01.771524 17538 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:28:01.771530 17538 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:28:01.771540 17538 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:28:01.771546 17538 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:28:01.771553 17538 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:28:01.771559 17538 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:28:01.771564 17538 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:28:01.771570 17538 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:28:01.771575 17538 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:28:01.771581 17538 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:28:01.771587 17538 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:28:01.771594 17538 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:28:01.771598 17538 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:28:01.771605 17538 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:28:01.771610 17538 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:28:01.771616 17538 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:28:01.771621 17538 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:28:01.771628 17538 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:28:01.771634 17538 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:28:01.771641 17538 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:28:01.771646 17538 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:28:01.771651 17538 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:28:01.771656 17538 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:28:01.771662 17538 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:28:01.771668 17538 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:28:01.771674 17538 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:28:01.771679 17538 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:28:01.771687 17538 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:28:01.771692 17538 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:28:01.771697 17538 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:28:01.771703 17538 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:28:01.771708 17538 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:28:01.771714 17538 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:28:01.771720 17538 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:28:01.771725 17538 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:28:01.771731 17538 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:28:01.771737 17538 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:28:01.771744 17538 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:28:01.771749 17538 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:28:01.771755 17538 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:28:01.771761 17538 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:28:01.771766 17538 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:28:01.771772 17538 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:28:01.771778 17538 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:28:01.771785 17538 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:28:01.771790 17538 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:28:01.771796 17538 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:28:01.771802 17538 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:28:01.771808 17538 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:28:01.771821 17538 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:28:01.771826 17538 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:28:01.771832 17538 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:28:01.771838 17538 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:28:01.771844 17538 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:28:01.771850 17538 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:28:01.771857 17538 net.cpp:226] pre_relu needs backward computation.\nI0817 16:28:01.771862 17538 net.cpp:226] pre_scale needs backward computation.\nI0817 16:28:01.771867 17538 net.cpp:226] pre_bn needs backward computation.\nI0817 16:28:01.771872 17538 net.cpp:226] pre_conv needs backward computation.\nI0817 16:28:01.771878 17538 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:28:01.771885 17538 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:28:01.771890 17538 net.cpp:270] This network produces output accuracy\nI0817 16:28:01.771896 17538 net.cpp:270] This network produces output loss\nI0817 16:28:01.772231 17538 net.cpp:283] Network initialization done.\nI0817 16:28:01.773233 17538 solver.cpp:60] Solver scaffolding done.\nI0817 16:28:01.995707 17538 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:28:02.347564 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:02.347617 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:02.354784 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:02.582250 17538 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:02.582337 17538 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:02.616351 17538 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:02.616436 17538 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:03.061100 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:03.061170 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:03.069414 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:03.313372 17538 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:03.313519 17538 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:03.365386 17538 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:03.365520 17538 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:03.880116 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:03.880182 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:03.888886 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:04.154458 17538 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:04.154587 17538 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:04.225916 17538 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:04.226043 17538 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:04.309484 17538 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:28:04.787720 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:04.787801 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:04.797175 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:05.082239 17538 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:05.082451 17538 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:05.174733 17538 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:05.174916 17538 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:05.831085 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:05.831167 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:05.841529 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:06.159224 17538 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:06.159451 17538 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:06.271425 17538 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:06.271638 17538 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:06.984575 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:06.984652 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:06.995949 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:07.337985 17538 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:07.338234 17538 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:07.469977 17538 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:07.470216 17538 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:08.246632 17538 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:28:08.246711 17538 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:28:08.259810 17538 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:28:08.298969 17565 blocking_queue.cpp:50] Waiting for data\nI0817 16:28:08.343991 17562 blocking_queue.cpp:50] Waiting for data\nI0817 16:28:08.688441 17538 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:28:08.688731 17538 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:28:08.839380 17538 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:28:08.839645 17538 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:28:09.009224 17538 parallel.cpp:425] Starting Optimization\nI0817 16:28:09.010438 17538 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:28:09.010529 17538 solver.cpp:280] Learning Rate Policy: multistep\nI0817 16:28:09.014863 17538 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:29:31.938338 17538 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:29:31.938668 17538 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:29:35.966042 17538 solver.cpp:228] Iteration 0, loss = 3.06302\nI0817 16:29:35.966094 17538 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0817 16:29:35.966112 17538 solver.cpp:244]     Train net output #1: loss = 3.06302 (* 1 = 3.06302 loss)\nI0817 16:29:35.992027 17538 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0817 16:31:53.348592 17538 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:33:14.966532 17538 solver.cpp:404]     Test net output #0: accuracy = 0.12792\nI0817 16:33:14.966800 17538 solver.cpp:404]     Test net output #1: loss = 2.87885 (* 1 = 2.87885 loss)\nI0817 16:33:16.273218 17538 solver.cpp:228] Iteration 100, loss = 2.29194\nI0817 16:33:16.273267 17538 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0817 16:33:16.273285 17538 solver.cpp:244]     Train net output #1: loss = 2.29194 (* 1 = 2.29194 loss)\nI0817 16:33:16.381240 17538 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0817 16:35:33.466647 17538 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:36:55.672942 17538 solver.cpp:404]     Test net output #0: accuracy = 0.1572\nI0817 16:36:55.673243 17538 solver.cpp:404]     Test net output #1: loss = 2.32965 (* 1 = 2.32965 loss)\nI0817 16:36:56.993741 17538 solver.cpp:228] Iteration 200, loss = 2.11523\nI0817 16:36:56.993787 17538 solver.cpp:244]     Train net output #0: accuracy = 0.216\nI0817 16:36:56.993803 17538 solver.cpp:244]     Train net output #1: loss = 2.11523 (* 1 = 2.11523 loss)\nI0817 16:36:57.082586 17538 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0817 16:39:14.161350 17538 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:40:36.363811 17538 solver.cpp:404]     Test net output #0: accuracy = 0.14948\nI0817 16:40:36.364100 17538 solver.cpp:404]     Test net output #1: loss = 6.18769 (* 1 = 6.18769 loss)\nI0817 16:40:37.685155 17538 solver.cpp:228] Iteration 300, loss = 1.69988\nI0817 16:40:37.685199 17538 solver.cpp:244]     Train net output #0: accuracy = 0.368\nI0817 16:40:37.685214 17538 solver.cpp:244]     Train net output #1: loss = 1.69988 (* 1 = 1.69988 loss)\nI0817 16:40:37.770328 17538 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0817 16:42:54.784152 17538 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:44:17.003789 17538 solver.cpp:404]     Test net output #0: accuracy = 0.10264\nI0817 16:44:17.004091 17538 solver.cpp:404]     Test net output #1: loss = 9.09125 (* 1 = 9.09125 loss)\nI0817 16:44:18.324538 17538 solver.cpp:228] Iteration 400, loss = 1.44281\nI0817 16:44:18.324581 17538 solver.cpp:244]     Train net output #0: accuracy = 0.504\nI0817 16:44:18.324597 17538 solver.cpp:244]     Train net output #1: loss = 1.44281 (* 1 = 1.44281 loss)\nI0817 16:44:18.415007 17538 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0817 16:46:35.492156 17538 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:47:57.780259 17538 solver.cpp:404]     Test net output #0: accuracy = 0.14888\nI0817 16:47:57.780567 17538 solver.cpp:404]     Test net output #1: loss = 3.88144 (* 1 = 3.88144 loss)\nI0817 16:47:59.102468 17538 solver.cpp:228] Iteration 500, loss = 1.17776\nI0817 16:47:59.102511 17538 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0817 16:47:59.102535 17538 solver.cpp:244]     Train net output #1: loss = 1.17776 (* 1 = 1.17776 loss)\nI0817 16:47:59.189344 17538 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0817 16:50:16.334708 17538 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:51:38.726172 17538 solver.cpp:404]     Test net output #0: accuracy = 0.2032\nI0817 16:51:38.726487 17538 solver.cpp:404]     Test net output #1: loss = 2.89679 (* 1 = 2.89679 loss)\nI0817 16:51:40.050019 17538 solver.cpp:228] Iteration 600, loss = 1.03706\nI0817 16:51:40.050060 17538 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0817 16:51:40.050084 17538 solver.cpp:244]     Train net output #1: loss = 1.03706 (* 1 = 1.03706 loss)\nI0817 16:51:40.133381 17538 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0817 16:53:57.384979 17538 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:55:19.763411 17538 solver.cpp:404]     Test net output #0: accuracy = 0.20148\nI0817 16:55:19.763695 17538 solver.cpp:404]     Test net output #1: loss = 2.99352 (* 1 = 2.99352 loss)\nI0817 16:55:21.084978 17538 solver.cpp:228] Iteration 700, loss = 0.834065\nI0817 16:55:21.085016 17538 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 16:55:21.085041 17538 solver.cpp:244]     Train net output #1: loss = 0.834065 (* 1 = 0.834065 loss)\nI0817 16:55:21.173682 17538 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0817 16:57:38.390900 17538 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:59:00.750932 17538 solver.cpp:404]     Test net output #0: accuracy = 0.17856\nI0817 16:59:00.751245 17538 solver.cpp:404]     Test net output #1: loss = 3.52643 (* 1 = 3.52643 loss)\nI0817 16:59:02.073341 17538 solver.cpp:228] Iteration 800, loss = 0.687869\nI0817 16:59:02.073379 17538 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 16:59:02.073403 17538 solver.cpp:244]     Train net output #1: loss = 0.687869 (* 1 = 0.687869 loss)\nI0817 16:59:02.155550 17538 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0817 17:01:19.356495 17538 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 17:02:41.597029 17538 solver.cpp:404]     Test net output #0: accuracy = 0.16516\nI0817 17:02:41.597306 17538 solver.cpp:404]     Test net output #1: loss = 4.04935 (* 1 = 4.04935 loss)\nI0817 17:02:42.918694 17538 solver.cpp:228] Iteration 900, loss = 0.554795\nI0817 17:02:42.918741 17538 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 17:02:42.918766 17538 solver.cpp:244]     Train net output #1: loss = 0.554795 (* 1 = 0.554795 loss)\nI0817 17:02:43.008549 17538 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0817 17:05:00.238410 17538 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 17:06:22.617838 17538 solver.cpp:404]     Test net output #0: accuracy = 0.16808\nI0817 17:06:22.618124 17538 solver.cpp:404]     Test net output #1: loss = 4.01335 (* 1 = 4.01335 loss)\nI0817 17:06:23.939782 17538 solver.cpp:228] Iteration 1000, loss = 0.539904\nI0817 17:06:23.939826 17538 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 17:06:23.939851 17538 solver.cpp:244]     Train net output #1: loss = 0.539904 (* 1 = 0.539904 loss)\nI0817 17:06:24.021255 17538 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0817 17:08:41.252749 17538 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 17:10:03.504487 17538 solver.cpp:404]     Test net output #0: accuracy = 0.14152\nI0817 17:10:03.504842 17538 solver.cpp:404]     Test net output #1: loss = 4.51624 (* 1 = 4.51624 loss)\nI0817 17:10:04.825791 17538 solver.cpp:228] Iteration 1100, loss = 0.488\nI0817 17:10:04.825834 17538 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 17:10:04.825850 17538 solver.cpp:244]     Train net output #1: loss = 0.488 (* 1 = 0.488 loss)\nI0817 17:10:04.910907 17538 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0817 17:12:22.073149 17538 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 17:13:44.261682 17538 solver.cpp:404]     Test net output #0: accuracy = 0.10964\nI0817 17:13:44.261983 17538 solver.cpp:404]     Test net output #1: loss = 5.31146 (* 1 = 5.31146 loss)\nI0817 17:13:45.582764 17538 solver.cpp:228] Iteration 1200, loss = 0.436406\nI0817 17:13:45.582805 17538 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:13:45.582821 17538 solver.cpp:244]     Train net output #1: loss = 0.436406 (* 1 = 0.436406 loss)\nI0817 17:13:45.671663 17538 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0817 17:16:02.931715 17538 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 17:17:25.123957 17538 solver.cpp:404]     Test net output #0: accuracy = 0.14116\nI0817 17:17:25.124271 17538 solver.cpp:404]     Test net output #1: loss = 4.96019 (* 1 = 4.96019 loss)\nI0817 17:17:26.445866 17538 solver.cpp:228] Iteration 1300, loss = 0.424377\nI0817 17:17:26.445907 17538 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:17:26.445924 17538 solver.cpp:244]     Train net output #1: loss = 0.424377 (* 1 = 0.424377 loss)\nI0817 17:17:26.535045 17538 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0817 17:19:43.692886 17538 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:21:05.885335 17538 solver.cpp:404]     Test net output #0: accuracy = 0.12808\nI0817 17:21:05.885604 17538 solver.cpp:404]     Test net output #1: loss = 5.42969 (* 1 = 5.42969 loss)\nI0817 17:21:07.206815 17538 solver.cpp:228] Iteration 1400, loss = 0.448521\nI0817 17:21:07.206858 17538 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 17:21:07.206874 17538 solver.cpp:244]     Train net output #1: loss = 0.448521 (* 1 = 0.448521 loss)\nI0817 17:21:07.296304 17538 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0817 17:23:24.399420 17538 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:24:46.590636 17538 solver.cpp:404]     Test net output #0: accuracy = 0.114\nI0817 17:24:46.590919 17538 solver.cpp:404]     Test net output #1: loss = 5.62252 (* 1 = 5.62252 loss)\nI0817 17:24:47.911852 17538 solver.cpp:228] Iteration 1500, loss = 0.38463\nI0817 17:24:47.911896 17538 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0817 17:24:47.911912 17538 solver.cpp:244]     Train net output #1: loss = 0.38463 (* 1 = 0.38463 loss)\nI0817 17:24:47.998672 17538 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0817 17:27:05.091100 17538 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:28:27.297935 17538 solver.cpp:404]     Test net output #0: accuracy = 0.12108\nI0817 17:28:27.298245 17538 solver.cpp:404]     Test net output #1: loss = 5.50828 (* 1 = 5.50828 loss)\nI0817 17:28:28.619345 17538 solver.cpp:228] Iteration 1600, loss = 0.307689\nI0817 17:28:28.619390 17538 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:28:28.619406 17538 solver.cpp:244]     Train net output #1: loss = 0.307689 (* 1 = 0.307689 loss)\nI0817 17:28:28.708432 17538 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0817 17:30:45.796120 17538 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:32:07.987031 17538 solver.cpp:404]     Test net output #0: accuracy = 0.10676\nI0817 17:32:07.987315 17538 solver.cpp:404]     Test net output #1: loss = 7.04554 (* 1 = 7.04554 loss)\nI0817 17:32:09.309558 17538 solver.cpp:228] Iteration 1700, loss = 0.337676\nI0817 17:32:09.309598 17538 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 17:32:09.309615 17538 solver.cpp:244]     Train net output #1: loss = 0.337676 (* 1 = 0.337676 loss)\nI0817 17:32:09.399575 17538 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0817 17:34:26.436586 17538 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:35:48.648298 17538 solver.cpp:404]     Test net output #0: accuracy = 0.12216\nI0817 17:35:48.648581 17538 solver.cpp:404]     Test net output #1: loss = 5.39325 (* 1 = 5.39325 loss)\nI0817 17:35:49.969972 17538 solver.cpp:228] Iteration 1800, loss = 0.373931\nI0817 17:35:49.970012 17538 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:35:49.970028 17538 solver.cpp:244]     Train net output #1: loss = 0.373931 (* 1 = 0.373931 loss)\nI0817 17:35:50.060223 17538 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0817 17:38:07.196557 17538 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:39:29.381913 17538 solver.cpp:404]     Test net output #0: accuracy = 0.11228\nI0817 17:39:29.382211 17538 solver.cpp:404]     Test net output #1: loss = 6.08652 (* 1 = 6.08652 loss)\nI0817 17:39:30.703272 17538 solver.cpp:228] Iteration 1900, loss = 0.342793\nI0817 17:39:30.703315 17538 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 17:39:30.703330 17538 solver.cpp:244]     Train net output #1: loss = 0.342793 (* 1 = 0.342793 loss)\nI0817 17:39:30.789069 17538 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0817 17:41:47.830380 17538 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:43:09.865273 17538 solver.cpp:404]     Test net output #0: accuracy = 0.11348\nI0817 17:43:09.865561 17538 solver.cpp:404]     Test net output #1: loss = 6.4229 (* 1 = 6.4229 loss)\nI0817 17:43:11.186480 17538 solver.cpp:228] Iteration 2000, loss = 0.260187\nI0817 17:43:11.186520 17538 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:43:11.186537 17538 solver.cpp:244]     Train net output #1: loss = 0.260187 (* 1 = 0.260187 loss)\nI0817 17:43:11.269973 17538 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0817 17:45:28.366993 17538 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:46:50.546442 17538 solver.cpp:404]     Test net output #0: accuracy = 0.126\nI0817 17:46:50.546665 17538 solver.cpp:404]     Test net output #1: loss = 5.53225 (* 1 = 5.53225 loss)\nI0817 17:46:51.868155 17538 solver.cpp:228] Iteration 2100, loss = 0.274176\nI0817 17:46:51.868198 17538 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 17:46:51.868218 17538 solver.cpp:244]     Train net output #1: loss = 0.274176 (* 1 = 0.274176 loss)\nI0817 17:46:51.955339 17538 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0817 17:49:09.112467 17538 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:50:31.284229 17538 solver.cpp:404]     Test net output #0: accuracy = 0.1238\nI0817 17:50:31.284471 17538 solver.cpp:404]     Test net output #1: loss = 5.52607 (* 1 = 5.52607 loss)\nI0817 17:50:32.605535 17538 solver.cpp:228] Iteration 2200, loss = 0.216817\nI0817 17:50:32.605574 17538 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:50:32.605589 17538 solver.cpp:244]     Train net output #1: loss = 0.216817 (* 1 = 0.216817 loss)\nI0817 17:50:32.692740 17538 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0817 17:52:49.904218 17538 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:54:12.056695 17538 solver.cpp:404]     Test net output #0: accuracy = 0.1326\nI0817 17:54:12.056941 17538 solver.cpp:404]     Test net output #1: loss = 5.45533 (* 1 = 5.45533 loss)\nI0817 17:54:13.378831 17538 solver.cpp:228] Iteration 2300, loss = 0.270605\nI0817 17:54:13.378870 17538 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:54:13.378886 17538 solver.cpp:244]     Train net output #1: loss = 0.270605 (* 1 = 0.270605 loss)\nI0817 17:54:13.466342 17538 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0817 17:56:30.709163 17538 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:57:52.852545 17538 solver.cpp:404]     Test net output #0: accuracy = 0.12732\nI0817 17:57:52.852787 17538 solver.cpp:404]     Test net output #1: loss = 6.16063 (* 1 = 6.16063 loss)\nI0817 17:57:54.173167 17538 solver.cpp:228] Iteration 2400, loss = 0.234033\nI0817 17:57:54.173207 17538 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:57:54.173223 17538 solver.cpp:244]     Train net output #1: loss = 0.234033 (* 1 = 0.234033 loss)\nI0817 17:57:54.260710 17538 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0817 18:00:11.643564 17538 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 18:01:33.770915 17538 solver.cpp:404]     Test net output #0: accuracy = 0.12456\nI0817 18:01:33.771175 17538 solver.cpp:404]     Test net output #1: loss = 7.05799 (* 1 = 7.05799 loss)\nI0817 18:01:35.093235 17538 solver.cpp:228] Iteration 2500, loss = 0.191202\nI0817 18:01:35.093282 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:01:35.093299 17538 solver.cpp:244]     Train net output #1: loss = 0.191202 (* 1 = 0.191202 loss)\nI0817 18:01:35.179633 17538 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0817 18:03:52.523955 17538 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 18:05:14.638943 17538 solver.cpp:404]     Test net output #0: accuracy = 0.12932\nI0817 18:05:14.639179 17538 solver.cpp:404]     Test net output #1: loss = 5.82963 (* 1 = 5.82963 loss)\nI0817 18:05:15.960088 17538 solver.cpp:228] Iteration 2600, loss = 0.193777\nI0817 18:05:15.960129 17538 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:05:15.960145 17538 solver.cpp:244]     Train net output #1: loss = 0.193777 (* 1 = 0.193777 loss)\nI0817 18:05:16.053755 17538 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0817 18:07:33.500147 17538 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 18:08:55.594741 17538 solver.cpp:404]     Test net output #0: accuracy = 0.14324\nI0817 18:08:55.594990 17538 solver.cpp:404]     Test net output #1: loss = 5.19766 (* 1 = 5.19766 loss)\nI0817 18:08:56.916553 17538 solver.cpp:228] Iteration 2700, loss = 0.239267\nI0817 18:08:56.916594 17538 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:08:56.916610 17538 solver.cpp:244]     Train net output #1: loss = 0.239267 (* 1 = 0.239267 loss)\nI0817 18:08:57.004518 17538 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0817 18:11:14.376559 17538 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 18:12:36.478955 17538 solver.cpp:404]     Test net output #0: accuracy = 0.1124\nI0817 18:12:36.479226 17538 solver.cpp:404]     Test net output #1: loss = 7.31251 (* 1 = 7.31251 loss)\nI0817 18:12:37.800246 17538 solver.cpp:228] Iteration 2800, loss = 0.191064\nI0817 18:12:37.800289 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:12:37.800305 17538 solver.cpp:244]     Train net output #1: loss = 0.191064 (* 1 = 0.191064 loss)\nI0817 18:12:37.890780 17538 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0817 18:14:55.275514 17538 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 18:16:17.400208 17538 solver.cpp:404]     Test net output #0: accuracy = 0.14152\nI0817 18:16:17.400467 17538 solver.cpp:404]     Test net output #1: loss = 6.17173 (* 1 = 6.17173 loss)\nI0817 18:16:18.721123 17538 solver.cpp:228] Iteration 2900, loss = 0.209957\nI0817 18:16:18.721163 17538 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 18:16:18.721179 17538 solver.cpp:244]     Train net output #1: loss = 0.209957 (* 1 = 0.209957 loss)\nI0817 18:16:18.811488 17538 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0817 18:18:36.217042 17538 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 18:19:58.331274 17538 solver.cpp:404]     Test net output #0: accuracy = 0.13572\nI0817 18:19:58.331526 17538 solver.cpp:404]     Test net output #1: loss = 7.47082 (* 1 = 7.47082 loss)\nI0817 18:19:59.652397 17538 solver.cpp:228] Iteration 3000, loss = 0.17263\nI0817 18:19:59.652438 17538 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:19:59.652454 17538 solver.cpp:244]     Train net output #1: loss = 0.17263 (* 1 = 0.17263 loss)\nI0817 18:19:59.741780 17538 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0817 18:22:17.082612 17538 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:23:39.202137 17538 solver.cpp:404]     Test net output #0: accuracy = 0.137\nI0817 18:23:39.202379 17538 solver.cpp:404]     Test net output #1: loss = 7.05193 (* 1 = 7.05193 loss)\nI0817 18:23:40.523811 17538 solver.cpp:228] Iteration 3100, loss = 0.251833\nI0817 18:23:40.523851 17538 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 18:23:40.523867 17538 solver.cpp:244]     Train net output #1: loss = 0.251833 (* 1 = 0.251833 loss)\nI0817 18:23:40.616308 17538 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0817 18:25:57.962735 17538 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:27:20.048192 17538 solver.cpp:404]     Test net output #0: accuracy = 0.1438\nI0817 18:27:20.048422 17538 solver.cpp:404]     Test net output #1: loss = 6.28683 (* 1 = 6.28683 loss)\nI0817 18:27:21.369954 17538 solver.cpp:228] Iteration 3200, loss = 0.124728\nI0817 18:27:21.369997 17538 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:27:21.370014 17538 solver.cpp:244]     Train net output #1: loss = 0.124728 (* 1 = 0.124728 loss)\nI0817 18:27:21.455278 17538 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0817 18:29:38.834733 17538 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:31:00.925246 17538 solver.cpp:404]     Test net output #0: accuracy = 0.1788\nI0817 18:31:00.925493 17538 solver.cpp:404]     Test net output #1: loss = 5.14367 (* 1 = 5.14367 loss)\nI0817 18:31:02.246544 17538 solver.cpp:228] Iteration 3300, loss = 0.168747\nI0817 18:31:02.246584 17538 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 18:31:02.246600 17538 solver.cpp:244]     Train net output #1: loss = 0.168747 (* 1 = 0.168747 loss)\nI0817 18:31:02.334535 17538 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0817 18:33:19.710584 17538 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:34:41.419790 17538 solver.cpp:404]     Test net output #0: accuracy = 0.20248\nI0817 18:34:41.420007 17538 solver.cpp:404]     Test net output #1: loss = 4.74968 (* 1 = 4.74968 loss)\nI0817 18:34:42.741641 17538 solver.cpp:228] Iteration 3400, loss = 0.136477\nI0817 18:34:42.741681 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:34:42.741698 17538 solver.cpp:244]     Train net output #1: loss = 0.136477 (* 1 = 0.136477 loss)\nI0817 18:34:42.834102 17538 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0817 18:37:00.200825 17538 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:38:22.150110 17538 solver.cpp:404]     Test net output #0: accuracy = 0.2422\nI0817 18:38:22.150331 17538 solver.cpp:404]     Test net output #1: loss = 4.45704 (* 1 = 4.45704 loss)\nI0817 18:38:23.472245 17538 solver.cpp:228] Iteration 3500, loss = 0.21159\nI0817 18:38:23.472291 17538 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 18:38:23.472307 17538 solver.cpp:244]     Train net output #1: loss = 0.21159 (* 1 = 0.21159 loss)\nI0817 18:38:23.564522 17538 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0817 18:40:40.879638 17538 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:42:02.962786 17538 solver.cpp:404]     Test net output #0: accuracy = 0.24588\nI0817 18:42:02.963052 17538 solver.cpp:404]     Test net output #1: loss = 3.99886 (* 1 = 3.99886 loss)\nI0817 18:42:04.283381 17538 solver.cpp:228] Iteration 3600, loss = 0.087153\nI0817 18:42:04.283423 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:42:04.283439 17538 solver.cpp:244]     Train net output #1: loss = 0.087153 (* 1 = 0.087153 loss)\nI0817 18:42:04.373620 17538 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0817 18:44:21.708698 17538 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:45:43.826099 17538 solver.cpp:404]     Test net output #0: accuracy = 0.20136\nI0817 18:45:43.826320 17538 solver.cpp:404]     Test net output #1: loss = 4.85736 (* 1 = 4.85736 loss)\nI0817 18:45:45.146309 17538 solver.cpp:228] Iteration 3700, loss = 0.10695\nI0817 18:45:45.146351 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:45:45.146368 17538 solver.cpp:244]     Train net output #1: loss = 0.10695 (* 1 = 0.10695 loss)\nI0817 18:45:45.234040 17538 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0817 18:48:02.676095 17538 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:49:24.758647 17538 solver.cpp:404]     Test net output #0: accuracy = 0.21592\nI0817 18:49:24.758867 17538 solver.cpp:404]     Test net output #1: loss = 4.81316 (* 1 = 4.81316 loss)\nI0817 18:49:26.080930 17538 solver.cpp:228] Iteration 3800, loss = 0.105716\nI0817 18:49:26.080974 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:49:26.080991 17538 solver.cpp:244]     Train net output #1: loss = 0.105716 (* 1 = 0.105716 loss)\nI0817 18:49:26.172873 17538 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0817 18:51:43.500219 17538 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:53:05.602738 17538 solver.cpp:404]     Test net output #0: accuracy = 0.17136\nI0817 18:53:05.603020 17538 solver.cpp:404]     Test net output #1: loss = 5.38619 (* 1 = 5.38619 loss)\nI0817 18:53:06.924322 17538 solver.cpp:228] Iteration 3900, loss = 0.100259\nI0817 18:53:06.924365 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:53:06.924381 17538 solver.cpp:244]     Train net output #1: loss = 0.100259 (* 1 = 0.100259 loss)\nI0817 18:53:07.011598 17538 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0817 18:55:24.385824 17538 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:56:46.516083 17538 solver.cpp:404]     Test net output #0: accuracy = 0.24612\nI0817 18:56:46.516324 17538 solver.cpp:404]     Test net output #1: loss = 4.3309 (* 1 = 4.3309 loss)\nI0817 18:56:47.838465 17538 solver.cpp:228] Iteration 4000, loss = 0.125448\nI0817 18:56:47.838510 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:56:47.838526 17538 solver.cpp:244]     Train net output #1: loss = 0.125448 (* 1 = 0.125448 loss)\nI0817 18:56:47.927232 17538 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0817 18:59:05.379065 17538 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 19:00:27.507325 17538 solver.cpp:404]     Test net output #0: accuracy = 0.30524\nI0817 19:00:27.507552 17538 solver.cpp:404]     Test net output #1: loss = 3.43381 (* 1 = 3.43381 loss)\nI0817 19:00:28.829639 17538 solver.cpp:228] Iteration 4100, loss = 0.150964\nI0817 19:00:28.829684 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:00:28.829699 17538 solver.cpp:244]     Train net output #1: loss = 0.150963 (* 1 = 0.150963 loss)\nI0817 19:00:28.922525 17538 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0817 19:02:46.218227 17538 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 19:04:08.308483 17538 solver.cpp:404]     Test net output #0: accuracy = 0.24508\nI0817 19:04:08.308719 17538 solver.cpp:404]     Test net output #1: loss = 4.20046 (* 1 = 4.20046 loss)\nI0817 19:04:09.630491 17538 solver.cpp:228] Iteration 4200, loss = 0.0692668\nI0817 19:04:09.630535 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:04:09.630553 17538 solver.cpp:244]     Train net output #1: loss = 0.0692667 (* 1 = 0.0692667 loss)\nI0817 19:04:09.720625 17538 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0817 19:06:27.080353 17538 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 19:07:48.915853 17538 solver.cpp:404]     Test net output #0: accuracy = 0.20084\nI0817 19:07:48.916057 17538 solver.cpp:404]     Test net output #1: loss = 5.58079 (* 1 = 5.58079 loss)\nI0817 19:07:50.237962 17538 solver.cpp:228] Iteration 4300, loss = 0.117014\nI0817 19:07:50.238005 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:07:50.238023 17538 solver.cpp:244]     Train net output #1: loss = 0.117014 (* 1 = 0.117014 loss)\nI0817 19:07:50.330598 17538 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0817 19:10:07.829480 17538 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 19:11:29.493964 17538 solver.cpp:404]     Test net output #0: accuracy = 0.24888\nI0817 19:11:29.494194 17538 solver.cpp:404]     Test net output #1: loss = 4.14038 (* 1 = 4.14038 loss)\nI0817 19:11:30.815850 17538 solver.cpp:228] Iteration 4400, loss = 0.0921674\nI0817 19:11:30.815896 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:11:30.815912 17538 solver.cpp:244]     Train net output #1: loss = 0.0921674 (* 1 = 0.0921674 loss)\nI0817 19:11:30.906394 17538 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0817 19:13:48.393786 17538 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 19:15:10.493973 17538 solver.cpp:404]     Test net output #0: accuracy = 0.17316\nI0817 19:15:10.494197 17538 solver.cpp:404]     Test net output #1: loss = 5.77184 (* 1 = 5.77184 loss)\nI0817 19:15:11.816157 17538 solver.cpp:228] Iteration 4500, loss = 0.0696025\nI0817 19:15:11.816205 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:15:11.816229 17538 solver.cpp:244]     Train net output #1: loss = 0.0696024 (* 1 = 0.0696024 loss)\nI0817 19:15:11.903659 17538 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0817 19:17:29.333242 17538 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 19:18:51.467587 17538 solver.cpp:404]     Test net output #0: accuracy = 0.31492\nI0817 19:18:51.467839 17538 solver.cpp:404]     Test net output #1: loss = 3.16503 (* 1 = 3.16503 loss)\nI0817 19:18:52.788957 17538 solver.cpp:228] Iteration 4600, loss = 0.140666\nI0817 19:18:52.789001 17538 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:18:52.789023 17538 solver.cpp:244]     Train net output #1: loss = 0.140666 (* 1 = 0.140666 loss)\nI0817 19:18:52.883504 17538 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0817 19:21:10.288602 17538 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:22:32.414222 17538 solver.cpp:404]     Test net output #0: accuracy = 0.2902\nI0817 19:22:32.414546 17538 solver.cpp:404]     Test net output #1: loss = 3.59039 (* 1 = 3.59039 loss)\nI0817 19:22:33.737004 17538 solver.cpp:228] Iteration 4700, loss = 0.132674\nI0817 19:22:33.737056 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:22:33.737079 17538 solver.cpp:244]     Train net output #1: loss = 0.132674 (* 1 = 0.132674 loss)\nI0817 19:22:33.828277 17538 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0817 19:24:51.249541 17538 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:26:13.188007 17538 solver.cpp:404]     Test net output #0: accuracy = 0.26596\nI0817 19:26:13.188251 17538 solver.cpp:404]     Test net output #1: loss = 3.82181 (* 1 = 3.82181 loss)\nI0817 19:26:14.510512 17538 solver.cpp:228] Iteration 4800, loss = 0.101229\nI0817 19:26:14.510566 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:26:14.510584 17538 solver.cpp:244]     Train net output #1: loss = 0.101229 (* 1 = 0.101229 loss)\nI0817 19:26:14.596334 17538 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0817 19:28:31.928357 17538 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:29:54.028738 17538 solver.cpp:404]     Test net output #0: accuracy = 0.2128\nI0817 19:29:54.028965 17538 solver.cpp:404]     Test net output #1: loss = 4.87242 (* 1 = 4.87242 loss)\nI0817 19:29:55.351305 17538 solver.cpp:228] Iteration 4900, loss = 0.0952675\nI0817 19:29:55.351347 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:29:55.351363 17538 solver.cpp:244]     Train net output #1: loss = 0.0952674 (* 1 = 0.0952674 loss)\nI0817 19:29:55.444953 17538 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0817 19:32:12.833498 17538 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:33:34.943802 17538 solver.cpp:404]     Test net output #0: accuracy = 0.27748\nI0817 19:33:34.944078 17538 solver.cpp:404]     Test net output #1: loss = 3.89173 (* 1 = 3.89173 loss)\nI0817 19:33:36.265812 17538 solver.cpp:228] Iteration 5000, loss = 0.0691052\nI0817 19:33:36.265851 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:33:36.265867 17538 solver.cpp:244]     Train net output #1: loss = 0.0691052 (* 1 = 0.0691052 loss)\nI0817 19:33:36.350927 17538 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0817 19:35:53.798403 17538 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:37:15.916170 17538 solver.cpp:404]     Test net output #0: accuracy = 0.3952\nI0817 19:37:15.916435 17538 solver.cpp:404]     Test net output #1: loss = 2.69085 (* 1 = 2.69085 loss)\nI0817 19:37:17.238050 17538 solver.cpp:228] Iteration 5100, loss = 0.0813509\nI0817 19:37:17.238093 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:37:17.238111 17538 solver.cpp:244]     Train net output #1: loss = 0.0813508 (* 1 = 0.0813508 loss)\nI0817 19:37:17.332204 17538 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0817 19:39:34.742785 17538 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:40:56.830278 17538 solver.cpp:404]     Test net output #0: accuracy = 0.37948\nI0817 19:40:56.830579 17538 solver.cpp:404]     Test net output #1: loss = 3.21088 (* 1 = 3.21088 loss)\nI0817 19:40:58.155450 17538 solver.cpp:228] Iteration 5200, loss = 0.0791134\nI0817 19:40:58.155493 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:40:58.155508 17538 solver.cpp:244]     Train net output #1: loss = 0.0791133 (* 1 = 0.0791133 loss)\nI0817 19:40:58.246804 17538 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0817 19:43:15.740381 17538 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:44:37.861250 17538 solver.cpp:404]     Test net output #0: accuracy = 0.41656\nI0817 19:44:37.861541 17538 solver.cpp:404]     Test net output #1: loss = 2.5343 (* 1 = 2.5343 loss)\nI0817 19:44:39.186466 17538 solver.cpp:228] Iteration 5300, loss = 0.0878735\nI0817 19:44:39.186506 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:44:39.186522 17538 solver.cpp:244]     Train net output #1: loss = 0.0878734 (* 1 = 0.0878734 loss)\nI0817 19:44:39.269068 17538 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0817 19:46:56.706135 17538 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:48:18.866611 17538 solver.cpp:404]     Test net output #0: accuracy = 0.39092\nI0817 19:48:18.866876 17538 solver.cpp:404]     Test net output #1: loss = 2.85772 (* 1 = 2.85772 loss)\nI0817 19:48:20.191025 17538 solver.cpp:228] Iteration 5400, loss = 0.109622\nI0817 19:48:20.191066 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:48:20.191081 17538 solver.cpp:244]     Train net output #1: loss = 0.109622 (* 1 = 0.109622 loss)\nI0817 19:48:20.282703 17538 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0817 19:50:37.758862 17538 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:51:59.899731 17538 solver.cpp:404]     Test net output #0: accuracy = 0.4118\nI0817 19:51:59.900018 17538 solver.cpp:404]     Test net output #1: loss = 2.379 (* 1 = 2.379 loss)\nI0817 19:52:01.223980 17538 solver.cpp:228] Iteration 5500, loss = 0.0569534\nI0817 19:52:01.224021 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:52:01.224036 17538 solver.cpp:244]     Train net output #1: loss = 0.0569534 (* 1 = 0.0569534 loss)\nI0817 19:52:01.306303 17538 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0817 19:54:18.745538 17538 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:55:40.862077 17538 solver.cpp:404]     Test net output #0: accuracy = 0.39364\nI0817 19:55:40.862352 17538 solver.cpp:404]     Test net output #1: loss = 3.05365 (* 1 = 3.05365 loss)\nI0817 19:55:42.185631 17538 solver.cpp:228] Iteration 5600, loss = 0.142061\nI0817 19:55:42.185670 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:55:42.185685 17538 solver.cpp:244]     Train net output #1: loss = 0.142061 (* 1 = 0.142061 loss)\nI0817 19:55:42.269824 17538 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0817 19:57:59.680559 17538 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:59:21.805047 17538 solver.cpp:404]     Test net output #0: accuracy = 0.45888\nI0817 19:59:21.805322 17538 solver.cpp:404]     Test net output #1: loss = 2.45052 (* 1 = 2.45052 loss)\nI0817 19:59:23.129542 17538 solver.cpp:228] Iteration 5700, loss = 0.0927099\nI0817 19:59:23.129592 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:59:23.129607 17538 solver.cpp:244]     Train net output #1: loss = 0.0927099 (* 1 = 0.0927099 loss)\nI0817 19:59:23.220507 17538 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0817 20:01:40.603801 17538 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 20:03:02.611081 17538 solver.cpp:404]     Test net output #0: accuracy = 0.45336\nI0817 20:03:02.611367 17538 solver.cpp:404]     Test net output #1: loss = 2.60451 (* 1 = 2.60451 loss)\nI0817 20:03:03.935237 17538 solver.cpp:228] Iteration 5800, loss = 0.208745\nI0817 20:03:03.935276 17538 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:03:03.935292 17538 solver.cpp:244]     Train net output #1: loss = 0.208745 (* 1 = 0.208745 loss)\nI0817 20:03:04.026525 17538 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0817 20:05:21.443575 17538 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 20:06:43.598233 17538 solver.cpp:404]     Test net output #0: accuracy = 0.29776\nI0817 20:06:43.598548 17538 solver.cpp:404]     Test net output #1: loss = 4.17903 (* 1 = 4.17903 loss)\nI0817 20:06:44.921810 17538 solver.cpp:228] Iteration 5900, loss = 0.0553511\nI0817 20:06:44.921850 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:06:44.921865 17538 solver.cpp:244]     Train net output #1: loss = 0.0553512 (* 1 = 0.0553512 loss)\nI0817 20:06:45.007427 17538 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0817 20:09:02.353029 17538 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 20:10:24.512552 17538 solver.cpp:404]     Test net output #0: accuracy = 0.39144\nI0817 20:10:24.512848 17538 solver.cpp:404]     Test net output #1: loss = 2.9501 (* 1 = 2.9501 loss)\nI0817 20:10:25.836863 17538 solver.cpp:228] Iteration 6000, loss = 0.117371\nI0817 20:10:25.836905 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:10:25.836920 17538 solver.cpp:244]     Train net output #1: loss = 0.117371 (* 1 = 0.117371 loss)\nI0817 20:10:25.920821 17538 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0817 20:12:43.010776 17538 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 20:14:05.130762 17538 solver.cpp:404]     Test net output #0: accuracy = 0.29404\nI0817 20:14:05.131041 17538 solver.cpp:404]     Test net output #1: loss = 4.16305 (* 1 = 4.16305 loss)\nI0817 20:14:06.454371 17538 solver.cpp:228] Iteration 6100, loss = 0.0573092\nI0817 20:14:06.454411 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:14:06.454427 17538 solver.cpp:244]     Train net output #1: loss = 0.0573092 (* 1 = 0.0573092 loss)\nI0817 20:14:06.543339 17538 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0817 20:16:23.733103 17538 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 20:17:45.876374 17538 solver.cpp:404]     Test net output #0: accuracy = 0.57496\nI0817 20:17:45.876685 17538 solver.cpp:404]     Test net output #1: loss = 1.89064 (* 1 = 1.89064 loss)\nI0817 20:17:47.200134 17538 solver.cpp:228] Iteration 6200, loss = 0.0352105\nI0817 20:17:47.200176 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:17:47.200191 17538 solver.cpp:244]     Train net output #1: loss = 0.0352105 (* 1 = 0.0352105 loss)\nI0817 20:17:47.285459 17538 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0817 20:20:04.489078 17538 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:21:26.619945 17538 solver.cpp:404]     Test net output #0: accuracy = 0.45092\nI0817 20:21:26.620254 17538 solver.cpp:404]     Test net output #1: loss = 2.50611 (* 1 = 2.50611 loss)\nI0817 20:21:27.944288 17538 solver.cpp:228] Iteration 6300, loss = 0.0702664\nI0817 20:21:27.944336 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:21:27.944352 17538 solver.cpp:244]     Train net output #1: loss = 0.0702665 (* 1 = 0.0702665 loss)\nI0817 20:21:28.026898 17538 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0817 20:23:45.132560 17538 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:25:07.280614 17538 solver.cpp:404]     Test net output #0: accuracy = 0.37904\nI0817 20:25:07.280918 17538 solver.cpp:404]     Test net output #1: loss = 3.55106 (* 1 = 3.55106 loss)\nI0817 20:25:08.603521 17538 solver.cpp:228] Iteration 6400, loss = 0.114766\nI0817 20:25:08.603565 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:25:08.603581 17538 solver.cpp:244]     Train net output #1: loss = 0.114766 (* 1 = 0.114766 loss)\nI0817 20:25:08.692098 17538 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0817 20:27:25.776216 17538 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:28:47.929340 17538 solver.cpp:404]     Test net output #0: accuracy = 0.45344\nI0817 20:28:47.929623 17538 solver.cpp:404]     Test net output #1: loss = 2.72765 (* 1 = 2.72765 loss)\nI0817 20:28:49.252346 17538 solver.cpp:228] Iteration 6500, loss = 0.0580529\nI0817 20:28:49.252400 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:28:49.252416 17538 solver.cpp:244]     Train net output #1: loss = 0.0580529 (* 1 = 0.0580529 loss)\nI0817 20:28:49.335203 17538 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0817 20:31:06.463160 17538 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:32:28.632171 17538 solver.cpp:404]     Test net output #0: accuracy = 0.5762\nI0817 20:32:28.632483 17538 solver.cpp:404]     Test net output #1: loss = 1.82777 (* 1 = 1.82777 loss)\nI0817 20:32:29.954604 17538 solver.cpp:228] Iteration 6600, loss = 0.113338\nI0817 20:32:29.954646 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:32:29.954661 17538 solver.cpp:244]     Train net output #1: loss = 0.113338 (* 1 = 0.113338 loss)\nI0817 20:32:30.041661 17538 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0817 20:34:47.126188 17538 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:36:09.257354 17538 solver.cpp:404]     Test net output #0: accuracy = 0.41584\nI0817 20:36:09.257658 17538 solver.cpp:404]     Test net output #1: loss = 2.87106 (* 1 = 2.87106 loss)\nI0817 20:36:10.579838 17538 solver.cpp:228] Iteration 6700, loss = 0.081907\nI0817 20:36:10.579891 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:36:10.579908 17538 solver.cpp:244]     Train net output #1: loss = 0.0819071 (* 1 = 0.0819071 loss)\nI0817 20:36:10.663632 17538 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0817 20:38:27.854517 17538 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:39:50.016144 17538 solver.cpp:404]     Test net output #0: accuracy = 0.57972\nI0817 20:39:50.016433 17538 solver.cpp:404]     Test net output #1: loss = 1.69479 (* 1 = 1.69479 loss)\nI0817 20:39:51.338893 17538 solver.cpp:228] Iteration 6800, loss = 0.0895766\nI0817 20:39:51.338945 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:39:51.338963 17538 solver.cpp:244]     Train net output #1: loss = 0.0895766 (* 1 = 0.0895766 loss)\nI0817 20:39:51.420419 17538 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0817 20:42:08.518857 17538 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:43:30.683820 17538 solver.cpp:404]     Test net output #0: accuracy = 0.47188\nI0817 20:43:30.684106 17538 solver.cpp:404]     Test net output #1: loss = 2.91599 (* 1 = 2.91599 loss)\nI0817 20:43:32.005874 17538 solver.cpp:228] Iteration 6900, loss = 0.0694996\nI0817 20:43:32.005918 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:43:32.005934 17538 solver.cpp:244]     Train net output #1: loss = 0.0694997 (* 1 = 0.0694997 loss)\nI0817 20:43:32.094846 17538 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0817 20:45:49.296084 17538 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:47:11.446835 17538 solver.cpp:404]     Test net output #0: accuracy = 0.53836\nI0817 20:47:11.447141 17538 solver.cpp:404]     Test net output #1: loss = 2.1813 (* 1 = 2.1813 loss)\nI0817 20:47:12.769073 17538 solver.cpp:228] Iteration 7000, loss = 0.0560689\nI0817 20:47:12.769127 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:47:12.769145 17538 solver.cpp:244]     Train net output #1: loss = 0.056069 (* 1 = 0.056069 loss)\nI0817 20:47:12.855435 17538 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0817 20:49:29.877209 17538 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:50:52.028571 17538 solver.cpp:404]     Test net output #0: accuracy = 0.43688\nI0817 20:50:52.028880 17538 solver.cpp:404]     Test net output #1: loss = 2.78135 (* 1 = 2.78135 loss)\nI0817 20:50:53.351636 17538 solver.cpp:228] Iteration 7100, loss = 0.0629584\nI0817 20:50:53.351681 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:50:53.351697 17538 solver.cpp:244]     Train net output #1: loss = 0.0629584 (* 1 = 0.0629584 loss)\nI0817 20:50:53.437899 17538 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0817 20:53:10.513366 17538 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:54:32.672432 17538 solver.cpp:404]     Test net output #0: accuracy = 0.47476\nI0817 20:54:32.672745 17538 solver.cpp:404]     Test net output #1: loss = 2.49216 (* 1 = 2.49216 loss)\nI0817 20:54:33.995162 17538 solver.cpp:228] Iteration 7200, loss = 0.0593545\nI0817 20:54:33.995203 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:54:33.995219 17538 solver.cpp:244]     Train net output #1: loss = 0.0593545 (* 1 = 0.0593545 loss)\nI0817 20:54:34.083097 17538 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0817 20:56:51.236929 17538 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:58:13.332620 17538 solver.cpp:404]     Test net output #0: accuracy = 0.54052\nI0817 20:58:13.332839 17538 solver.cpp:404]     Test net output #1: loss = 2.0583 (* 1 = 2.0583 loss)\nI0817 20:58:14.654253 17538 solver.cpp:228] Iteration 7300, loss = 0.0464729\nI0817 20:58:14.654299 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:58:14.654314 17538 solver.cpp:244]     Train net output #1: loss = 0.046473 (* 1 = 0.046473 loss)\nI0817 20:58:14.742043 17538 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0817 21:00:31.931876 17538 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 21:01:53.627198 17538 solver.cpp:404]     Test net output #0: accuracy = 0.59816\nI0817 21:01:53.627490 17538 solver.cpp:404]     Test net output #1: loss = 1.64438 (* 1 = 1.64438 loss)\nI0817 21:01:54.948412 17538 solver.cpp:228] Iteration 7400, loss = 0.0548433\nI0817 21:01:54.948457 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:01:54.948472 17538 solver.cpp:244]     Train net output #1: loss = 0.0548433 (* 1 = 0.0548433 loss)\nI0817 21:01:55.037850 17538 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0817 21:04:12.032944 17538 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 21:05:34.008147 17538 solver.cpp:404]     Test net output #0: accuracy = 0.63904\nI0817 21:05:34.008378 17538 solver.cpp:404]     Test net output #1: loss = 1.45229 (* 1 = 1.45229 loss)\nI0817 21:05:35.330235 17538 solver.cpp:228] Iteration 7500, loss = 0.0787292\nI0817 21:05:35.330279 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:05:35.330299 17538 solver.cpp:244]     Train net output #1: loss = 0.0787293 (* 1 = 0.0787293 loss)\nI0817 21:05:35.417101 17538 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0817 21:07:52.137866 17538 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 21:09:14.064219 17538 solver.cpp:404]     Test net output #0: accuracy = 0.54508\nI0817 21:09:14.064463 17538 solver.cpp:404]     Test net output #1: loss = 2.2043 (* 1 = 2.2043 loss)\nI0817 21:09:15.386909 17538 solver.cpp:228] Iteration 7600, loss = 0.0359149\nI0817 21:09:15.386956 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:09:15.386979 17538 solver.cpp:244]     Train net output #1: loss = 0.035915 (* 1 = 0.035915 loss)\nI0817 21:09:15.474578 17538 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0817 21:11:32.200474 17538 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 21:12:54.121042 17538 solver.cpp:404]     Test net output #0: accuracy = 0.56088\nI0817 21:12:54.121331 17538 solver.cpp:404]     Test net output #1: loss = 2.27138 (* 1 = 2.27138 loss)\nI0817 21:12:55.443922 17538 solver.cpp:228] Iteration 7700, loss = 0.0667818\nI0817 21:12:55.443969 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:12:55.443992 17538 solver.cpp:244]     Train net output #1: loss = 0.0667819 (* 1 = 0.0667819 loss)\nI0817 21:12:55.524754 17538 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0817 21:15:12.357977 17538 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 21:16:34.081892 17538 solver.cpp:404]     Test net output #0: accuracy = 0.56352\nI0817 21:16:34.082099 17538 solver.cpp:404]     Test net output #1: loss = 1.97218 (* 1 = 1.97218 loss)\nI0817 21:16:35.403838 17538 solver.cpp:228] Iteration 7800, loss = 0.0424434\nI0817 21:16:35.403887 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:16:35.403911 17538 solver.cpp:244]     Train net output #1: loss = 0.0424434 (* 1 = 0.0424434 loss)\nI0817 21:16:35.484596 17538 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0817 21:18:52.217495 17538 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:20:13.941119 17538 solver.cpp:404]     Test net output #0: accuracy = 0.5324\nI0817 21:20:13.941337 17538 solver.cpp:404]     Test net output #1: loss = 2.30602 (* 1 = 2.30602 loss)\nI0817 21:20:15.263520 17538 solver.cpp:228] Iteration 7900, loss = 0.130713\nI0817 21:20:15.263568 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 21:20:15.263592 17538 solver.cpp:244]     Train net output #1: loss = 0.130713 (* 1 = 0.130713 loss)\nI0817 21:20:15.344648 17538 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0817 21:22:32.075492 17538 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:23:53.908093 17538 solver.cpp:404]     Test net output #0: accuracy = 0.69772\nI0817 21:23:53.908318 17538 solver.cpp:404]     Test net output #1: loss = 1.1691 (* 1 = 1.1691 loss)\nI0817 21:23:55.229977 17538 solver.cpp:228] Iteration 8000, loss = 0.0705388\nI0817 21:23:55.230026 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:23:55.230049 17538 solver.cpp:244]     Train net output #1: loss = 0.0705389 (* 1 = 0.0705389 loss)\nI0817 21:23:55.310775 17538 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0817 21:26:12.103366 17538 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:27:33.845922 17538 solver.cpp:404]     Test net output #0: accuracy = 0.63616\nI0817 21:27:33.846137 17538 solver.cpp:404]     Test net output #1: loss = 1.5778 (* 1 = 1.5778 loss)\nI0817 21:27:35.167179 17538 solver.cpp:228] Iteration 8100, loss = 0.0408457\nI0817 21:27:35.167225 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:27:35.167250 17538 solver.cpp:244]     Train net output #1: loss = 0.0408457 (* 1 = 0.0408457 loss)\nI0817 21:27:35.263185 17538 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0817 21:29:51.971222 17538 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:31:13.801610 17538 solver.cpp:404]     Test net output #0: accuracy = 0.558\nI0817 21:31:13.801842 17538 solver.cpp:404]     Test net output #1: loss = 2.26473 (* 1 = 2.26473 loss)\nI0817 21:31:15.122993 17538 solver.cpp:228] Iteration 8200, loss = 0.0824565\nI0817 21:31:15.123039 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:31:15.123062 17538 solver.cpp:244]     Train net output #1: loss = 0.0824565 (* 1 = 0.0824565 loss)\nI0817 21:31:15.210845 17538 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0817 21:33:31.955396 17538 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:34:53.606744 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6002\nI0817 21:34:53.606967 17538 solver.cpp:404]     Test net output #1: loss = 1.85579 (* 1 = 1.85579 loss)\nI0817 21:34:54.929558 17538 solver.cpp:228] Iteration 8300, loss = 0.0380418\nI0817 21:34:54.929607 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:34:54.929632 17538 solver.cpp:244]     Train net output #1: loss = 0.0380418 (* 1 = 0.0380418 loss)\nI0817 21:34:55.013948 17538 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0817 21:37:11.774297 17538 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:38:33.708742 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6812\nI0817 21:38:33.708969 17538 solver.cpp:404]     Test net output #1: loss = 1.36765 (* 1 = 1.36765 loss)\nI0817 21:38:35.030995 17538 solver.cpp:228] Iteration 8400, loss = 0.0470199\nI0817 21:38:35.031041 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:38:35.031064 17538 solver.cpp:244]     Train net output #1: loss = 0.0470199 (* 1 = 0.0470199 loss)\nI0817 21:38:35.113667 17538 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0817 21:40:51.843298 17538 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:42:13.858573 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6782\nI0817 21:42:13.858839 17538 solver.cpp:404]     Test net output #1: loss = 1.3181 (* 1 = 1.3181 loss)\nI0817 21:42:15.181304 17538 solver.cpp:228] Iteration 8500, loss = 0.0505058\nI0817 21:42:15.181352 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:42:15.181376 17538 solver.cpp:244]     Train net output #1: loss = 0.0505058 (* 1 = 0.0505058 loss)\nI0817 21:42:15.268101 17538 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0817 21:44:31.977489 17538 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:45:54.068967 17538 solver.cpp:404]     Test net output #0: accuracy = 0.69172\nI0817 21:45:54.069218 17538 solver.cpp:404]     Test net output #1: loss = 1.30325 (* 1 = 1.30325 loss)\nI0817 21:45:55.390348 17538 solver.cpp:228] Iteration 8600, loss = 0.154327\nI0817 21:45:55.390391 17538 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:45:55.390414 17538 solver.cpp:244]     Train net output #1: loss = 0.154327 (* 1 = 0.154327 loss)\nI0817 21:45:55.470996 17538 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0817 21:48:12.201344 17538 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:49:33.962721 17538 solver.cpp:404]     Test net output #0: accuracy = 0.58524\nI0817 21:49:33.962961 17538 solver.cpp:404]     Test net output #1: loss = 1.9888 (* 1 = 1.9888 loss)\nI0817 21:49:35.284658 17538 solver.cpp:228] Iteration 8700, loss = 0.0670138\nI0817 21:49:35.284700 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:49:35.284724 17538 solver.cpp:244]     Train net output #1: loss = 0.0670137 (* 1 = 0.0670137 loss)\nI0817 21:49:35.367815 17538 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0817 21:51:52.112634 17538 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:53:13.921216 17538 solver.cpp:404]     Test net output #0: accuracy = 0.60864\nI0817 21:53:13.921502 17538 solver.cpp:404]     Test net output #1: loss = 1.75641 (* 1 = 1.75641 loss)\nI0817 21:53:15.242990 17538 solver.cpp:228] Iteration 8800, loss = 0.0755038\nI0817 21:53:15.243033 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:53:15.243057 17538 solver.cpp:244]     Train net output #1: loss = 0.0755038 (* 1 = 0.0755038 loss)\nI0817 21:53:15.326004 17538 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0817 21:55:32.097713 17538 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:56:53.929625 17538 solver.cpp:404]     Test net output #0: accuracy = 0.64024\nI0817 21:56:53.929882 17538 solver.cpp:404]     Test net output #1: loss = 1.57257 (* 1 = 1.57257 loss)\nI0817 21:56:55.252712 17538 solver.cpp:228] Iteration 8900, loss = 0.144547\nI0817 21:56:55.252755 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:56:55.252779 17538 solver.cpp:244]     Train net output #1: loss = 0.144547 (* 1 = 0.144547 loss)\nI0817 21:56:55.337527 17538 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0817 21:59:12.046416 17538 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 22:00:33.867334 17538 solver.cpp:404]     Test net output #0: accuracy = 0.48208\nI0817 22:00:33.867580 17538 solver.cpp:404]     Test net output #1: loss = 2.65949 (* 1 = 2.65949 loss)\nI0817 22:00:35.189050 17538 solver.cpp:228] Iteration 9000, loss = 0.0660429\nI0817 22:00:35.189093 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:00:35.189116 17538 solver.cpp:244]     Train net output #1: loss = 0.0660429 (* 1 = 0.0660429 loss)\nI0817 22:00:35.275506 17538 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0817 22:02:51.996794 17538 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 22:04:13.648979 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7134\nI0817 22:04:13.649235 17538 solver.cpp:404]     Test net output #1: loss = 1.13559 (* 1 = 1.13559 loss)\nI0817 22:04:14.971045 17538 solver.cpp:228] Iteration 9100, loss = 0.0374421\nI0817 22:04:14.971091 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:04:14.971114 17538 solver.cpp:244]     Train net output #1: loss = 0.0374421 (* 1 = 0.0374421 loss)\nI0817 22:04:15.053112 17538 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0817 22:06:31.886348 17538 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 22:07:53.533586 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70676\nI0817 22:07:53.533813 17538 solver.cpp:404]     Test net output #1: loss = 1.19993 (* 1 = 1.19993 loss)\nI0817 22:07:54.854992 17538 solver.cpp:228] Iteration 9200, loss = 0.103506\nI0817 22:07:54.855036 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:07:54.855059 17538 solver.cpp:244]     Train net output #1: loss = 0.103506 (* 1 = 0.103506 loss)\nI0817 22:07:54.939919 17538 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0817 22:10:11.727833 17538 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 22:11:33.722885 17538 solver.cpp:404]     Test net output #0: accuracy = 0.64244\nI0817 22:11:33.723117 17538 solver.cpp:404]     Test net output #1: loss = 1.88209 (* 1 = 1.88209 loss)\nI0817 22:11:35.044315 17538 solver.cpp:228] Iteration 9300, loss = 0.0437669\nI0817 22:11:35.044359 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:11:35.044384 17538 solver.cpp:244]     Train net output #1: loss = 0.0437669 (* 1 = 0.0437669 loss)\nI0817 22:11:35.133553 17538 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0817 22:13:51.959403 17538 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 22:15:14.048305 17538 solver.cpp:404]     Test net output #0: accuracy = 0.62744\nI0817 22:15:14.048559 17538 solver.cpp:404]     Test net output #1: loss = 1.72996 (* 1 = 1.72996 loss)\nI0817 22:15:15.371315 17538 solver.cpp:228] Iteration 9400, loss = 0.0251229\nI0817 22:15:15.371359 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:15:15.371382 17538 solver.cpp:244]     Train net output #1: loss = 0.0251229 (* 1 = 0.0251229 loss)\nI0817 22:15:15.454505 17538 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0817 22:17:32.253178 17538 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:18:54.317193 17538 solver.cpp:404]     Test net output #0: accuracy = 0.66416\nI0817 22:18:54.317409 17538 solver.cpp:404]     Test net output #1: loss = 1.56155 (* 1 = 1.56155 loss)\nI0817 22:18:55.640122 17538 solver.cpp:228] Iteration 9500, loss = 0.0698417\nI0817 22:18:55.640163 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:18:55.640188 17538 solver.cpp:244]     Train net output #1: loss = 0.0698417 (* 1 = 0.0698417 loss)\nI0817 22:18:55.720352 17538 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0817 22:21:12.555474 17538 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:22:34.602632 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70712\nI0817 22:22:34.602891 17538 solver.cpp:404]     Test net output #1: loss = 1.40643 (* 1 = 1.40643 loss)\nI0817 22:22:35.924522 17538 solver.cpp:228] Iteration 9600, loss = 0.0428968\nI0817 22:22:35.924567 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:22:35.924592 17538 solver.cpp:244]     Train net output #1: loss = 0.0428967 (* 1 = 0.0428967 loss)\nI0817 22:22:36.013792 17538 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0817 22:24:52.835253 17538 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:26:14.974303 17538 solver.cpp:404]     Test net output #0: accuracy = 0.64372\nI0817 22:26:14.974560 17538 solver.cpp:404]     Test net output #1: loss = 1.72613 (* 1 = 1.72613 loss)\nI0817 22:26:16.296880 17538 solver.cpp:228] Iteration 9700, loss = 0.0881652\nI0817 22:26:16.296923 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:26:16.296947 17538 solver.cpp:244]     Train net output #1: loss = 0.0881652 (* 1 = 0.0881652 loss)\nI0817 22:26:16.375748 17538 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0817 22:28:33.126118 17538 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:29:55.216576 17538 solver.cpp:404]     Test net output #0: accuracy = 0.667\nI0817 22:29:55.216810 17538 solver.cpp:404]     Test net output #1: loss = 1.29372 (* 1 = 1.29372 loss)\nI0817 22:29:56.538085 17538 solver.cpp:228] Iteration 9800, loss = 0.0422047\nI0817 22:29:56.538131 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:29:56.538157 17538 solver.cpp:244]     Train net output #1: loss = 0.0422047 (* 1 = 0.0422047 loss)\nI0817 22:29:56.618381 17538 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0817 22:32:13.369333 17538 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:33:35.505173 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6406\nI0817 22:33:35.505470 17538 solver.cpp:404]     Test net output #1: loss = 1.81619 (* 1 = 1.81619 loss)\nI0817 22:33:36.826879 17538 solver.cpp:228] Iteration 9900, loss = 0.0711687\nI0817 22:33:36.826925 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:33:36.826948 17538 solver.cpp:244]     Train net output #1: loss = 0.0711686 (* 1 = 0.0711686 loss)\nI0817 22:33:36.914824 17538 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0817 22:35:53.629634 17538 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:37:15.493065 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6178\nI0817 22:37:15.493345 17538 solver.cpp:404]     Test net output #1: loss = 1.89754 (* 1 = 1.89754 loss)\nI0817 22:37:16.816020 17538 solver.cpp:228] Iteration 10000, loss = 0.0333779\nI0817 22:37:16.816064 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:37:16.816087 17538 solver.cpp:244]     Train net output #1: loss = 0.0333778 (* 1 = 0.0333778 loss)\nI0817 22:37:16.899703 17538 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0817 22:39:33.645527 17538 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0817 22:40:55.669756 17538 solver.cpp:404]     Test net output #0: accuracy = 0.64132\nI0817 22:40:55.670056 17538 solver.cpp:404]     Test net output #1: loss = 2.03322 (* 1 = 2.03322 loss)\nI0817 22:40:56.992380 17538 solver.cpp:228] Iteration 10100, loss = 0.0668678\nI0817 22:40:56.992422 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:40:56.992446 17538 solver.cpp:244]     Train net output #1: loss = 0.0668678 (* 1 = 0.0668678 loss)\nI0817 22:40:57.071396 17538 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0817 22:43:13.813568 17538 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0817 22:44:36.087261 17538 solver.cpp:404]     Test net output #0: accuracy = 0.51172\nI0817 22:44:36.087550 17538 solver.cpp:404]     Test net output #1: loss = 3.16089 (* 1 = 3.16089 loss)\nI0817 22:44:37.409889 17538 solver.cpp:228] Iteration 10200, loss = 0.0462672\nI0817 22:44:37.409934 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:44:37.409956 17538 solver.cpp:244]     Train net output #1: loss = 0.0462672 (* 1 = 0.0462672 loss)\nI0817 22:44:37.491936 17538 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0817 22:46:54.337579 17538 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0817 22:48:16.634829 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6144\nI0817 22:48:16.635141 17538 solver.cpp:404]     Test net output #1: loss = 2.05368 (* 1 = 2.05368 loss)\nI0817 22:48:17.957427 17538 solver.cpp:228] Iteration 10300, loss = 0.151708\nI0817 22:48:17.957464 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 22:48:17.957494 17538 solver.cpp:244]     Train net output #1: loss = 0.151708 (* 1 = 0.151708 loss)\nI0817 22:48:18.046774 17538 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0817 22:50:35.129880 17538 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0817 22:51:57.328186 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70748\nI0817 22:51:57.328487 17538 solver.cpp:404]     Test net output #1: loss = 1.22855 (* 1 = 1.22855 loss)\nI0817 22:51:58.649734 17538 solver.cpp:228] Iteration 10400, loss = 0.177784\nI0817 22:51:58.649775 17538 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 22:51:58.649791 17538 solver.cpp:244]     Train net output #1: loss = 0.177784 (* 1 = 0.177784 loss)\nI0817 22:51:58.740533 17538 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0817 22:54:15.846305 17538 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0817 22:55:38.015444 17538 solver.cpp:404]     Test net output #0: accuracy = 0.66304\nI0817 22:55:38.015727 17538 solver.cpp:404]     Test net output #1: loss = 1.57732 (* 1 = 1.57732 loss)\nI0817 22:55:39.336684 17538 solver.cpp:228] Iteration 10500, loss = 0.0840114\nI0817 22:55:39.336733 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:55:39.336760 17538 solver.cpp:244]     Train net output #1: loss = 0.0840114 (* 1 = 0.0840114 loss)\nI0817 22:55:39.424235 17538 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0817 22:57:56.532734 17538 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0817 22:59:18.701941 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6654\nI0817 22:59:18.702249 17538 solver.cpp:404]     Test net output #1: loss = 1.65072 (* 1 = 1.65072 loss)\nI0817 22:59:20.023387 17538 solver.cpp:228] Iteration 10600, loss = 0.0510655\nI0817 22:59:20.023428 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:59:20.023444 17538 solver.cpp:244]     Train net output #1: loss = 0.0510656 (* 1 = 0.0510656 loss)\nI0817 22:59:20.114079 17538 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0817 23:01:37.178807 17538 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0817 23:02:59.355245 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7486\nI0817 23:02:59.355550 17538 solver.cpp:404]     Test net output #1: loss = 0.999119 (* 1 = 0.999119 loss)\nI0817 23:03:00.677917 17538 solver.cpp:228] Iteration 10700, loss = 0.0220904\nI0817 23:03:00.677963 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:03:00.677978 17538 solver.cpp:244]     Train net output #1: loss = 0.0220904 (* 1 = 0.0220904 loss)\nI0817 23:03:00.761693 17538 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0817 23:05:17.854571 17538 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0817 23:06:40.036013 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71996\nI0817 23:06:40.036339 17538 solver.cpp:404]     Test net output #1: loss = 1.27616 (* 1 = 1.27616 loss)\nI0817 23:06:41.357605 17538 solver.cpp:228] Iteration 10800, loss = 0.0422388\nI0817 23:06:41.357650 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:06:41.357667 17538 solver.cpp:244]     Train net output #1: loss = 0.0422387 (* 1 = 0.0422387 loss)\nI0817 23:06:41.447561 17538 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0817 23:08:58.615700 17538 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0817 23:10:20.771337 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71844\nI0817 23:10:20.771616 17538 solver.cpp:404]     Test net output #1: loss = 1.28761 (* 1 = 1.28761 loss)\nI0817 23:10:22.092583 17538 solver.cpp:228] Iteration 10900, loss = 0.106973\nI0817 23:10:22.092628 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:10:22.092643 17538 solver.cpp:244]     Train net output #1: loss = 0.106973 (* 1 = 0.106973 loss)\nI0817 23:10:22.190394 17538 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0817 23:12:39.244539 17538 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0817 23:14:01.503947 17538 solver.cpp:404]     Test net output #0: accuracy = 0.684\nI0817 23:14:01.504232 17538 solver.cpp:404]     Test net output #1: loss = 1.53968 (* 1 = 1.53968 loss)\nI0817 23:14:02.827436 17538 solver.cpp:228] Iteration 11000, loss = 0.152309\nI0817 23:14:02.827486 17538 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 23:14:02.827509 17538 solver.cpp:244]     Train net output #1: loss = 0.152309 (* 1 = 0.152309 loss)\nI0817 23:14:02.915618 17538 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0817 23:16:20.015141 17538 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0817 23:17:42.310505 17538 solver.cpp:404]     Test net output #0: accuracy = 0.52212\nI0817 23:17:42.310807 17538 solver.cpp:404]     Test net output #1: loss = 2.89792 (* 1 = 2.89792 loss)\nI0817 23:17:43.632496 17538 solver.cpp:228] Iteration 11100, loss = 0.0965048\nI0817 23:17:43.632544 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:17:43.632567 17538 solver.cpp:244]     Train net output #1: loss = 0.0965048 (* 1 = 0.0965048 loss)\nI0817 23:17:43.720278 17538 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0817 23:20:00.864056 17538 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0817 23:21:23.095278 17538 solver.cpp:404]     Test net output #0: accuracy = 0.67352\nI0817 23:21:23.095577 17538 solver.cpp:404]     Test net output #1: loss = 1.44233 (* 1 = 1.44233 loss)\nI0817 23:21:24.417948 17538 solver.cpp:228] Iteration 11200, loss = 0.0501416\nI0817 23:21:24.417992 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:21:24.418015 17538 solver.cpp:244]     Train net output #1: loss = 0.0501416 (* 1 = 0.0501416 loss)\nI0817 23:21:24.501991 17538 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0817 23:23:41.625258 17538 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0817 23:25:03.887753 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71696\nI0817 23:25:03.888063 17538 solver.cpp:404]     Test net output #1: loss = 1.38727 (* 1 = 1.38727 loss)\nI0817 23:25:05.209810 17538 solver.cpp:228] Iteration 11300, loss = 0.0836329\nI0817 23:25:05.209856 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:25:05.209878 17538 solver.cpp:244]     Train net output #1: loss = 0.0836329 (* 1 = 0.0836329 loss)\nI0817 23:25:05.296778 17538 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0817 23:27:22.358369 17538 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0817 23:28:44.637203 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74376\nI0817 23:28:44.637501 17538 solver.cpp:404]     Test net output #1: loss = 1.14362 (* 1 = 1.14362 loss)\nI0817 23:28:45.960549 17538 solver.cpp:228] Iteration 11400, loss = 0.0429102\nI0817 23:28:45.960594 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:28:45.960618 17538 solver.cpp:244]     Train net output #1: loss = 0.0429102 (* 1 = 0.0429102 loss)\nI0817 23:28:46.049777 17538 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0817 23:31:03.098845 17538 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0817 23:32:25.414561 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71444\nI0817 23:32:25.414862 17538 solver.cpp:404]     Test net output #1: loss = 1.29486 (* 1 = 1.29486 loss)\nI0817 23:32:26.738020 17538 solver.cpp:228] Iteration 11500, loss = 0.0940014\nI0817 23:32:26.738066 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:32:26.738090 17538 solver.cpp:244]     Train net output #1: loss = 0.0940014 (* 1 = 0.0940014 loss)\nI0817 23:32:26.826975 17538 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0817 23:34:44.012498 17538 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0817 23:36:06.297916 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7096\nI0817 23:36:06.298229 17538 solver.cpp:404]     Test net output #1: loss = 1.38388 (* 1 = 1.38388 loss)\nI0817 23:36:07.627946 17538 solver.cpp:228] Iteration 11600, loss = 0.0434806\nI0817 23:36:07.627993 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:36:07.628016 17538 solver.cpp:244]     Train net output #1: loss = 0.0434806 (* 1 = 0.0434806 loss)\nI0817 23:36:07.710021 17538 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0817 23:38:24.813331 17538 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0817 23:39:47.121737 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73132\nI0817 23:39:47.122054 17538 solver.cpp:404]     Test net output #1: loss = 1.22922 (* 1 = 1.22922 loss)\nI0817 23:39:48.444227 17538 solver.cpp:228] Iteration 11700, loss = 0.0795866\nI0817 23:39:48.444274 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:39:48.444298 17538 solver.cpp:244]     Train net output #1: loss = 0.0795866 (* 1 = 0.0795866 loss)\nI0817 23:39:48.529526 17538 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0817 23:42:05.628424 17538 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0817 23:43:27.925961 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71288\nI0817 23:43:27.926288 17538 solver.cpp:404]     Test net output #1: loss = 1.24768 (* 1 = 1.24768 loss)\nI0817 23:43:29.248670 17538 solver.cpp:228] Iteration 11800, loss = 0.0192963\nI0817 23:43:29.248713 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:43:29.248736 17538 solver.cpp:244]     Train net output #1: loss = 0.0192964 (* 1 = 0.0192964 loss)\nI0817 23:43:29.331310 17538 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0817 23:45:46.385197 17538 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0817 23:47:08.715740 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73056\nI0817 23:47:08.716037 17538 solver.cpp:404]     Test net output #1: loss = 1.27737 (* 1 = 1.27737 loss)\nI0817 23:47:10.037266 17538 solver.cpp:228] Iteration 11900, loss = 0.0515351\nI0817 23:47:10.037310 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:47:10.037333 17538 solver.cpp:244]     Train net output #1: loss = 0.0515351 (* 1 = 0.0515351 loss)\nI0817 23:47:10.129796 17538 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0817 23:49:27.323945 17538 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0817 23:50:49.632503 17538 solver.cpp:404]     Test net output #0: accuracy = 0.61516\nI0817 23:50:49.632812 17538 solver.cpp:404]     Test net output #1: loss = 1.95598 (* 1 = 1.95598 loss)\nI0817 23:50:50.954623 17538 solver.cpp:228] Iteration 12000, loss = 0.0616282\nI0817 23:50:50.954658 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:50:50.954682 17538 solver.cpp:244]     Train net output #1: loss = 0.0616282 (* 1 = 0.0616282 loss)\nI0817 23:50:51.045871 17538 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0817 23:53:08.154342 17538 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0817 23:54:30.371053 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6856\nI0817 23:54:30.371366 17538 solver.cpp:404]     Test net output #1: loss = 1.4936 (* 1 = 1.4936 loss)\nI0817 23:54:31.694190 17538 solver.cpp:228] Iteration 12100, loss = 0.0472344\nI0817 23:54:31.694229 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:54:31.694254 17538 solver.cpp:244]     Train net output #1: loss = 0.0472344 (* 1 = 0.0472344 loss)\nI0817 23:54:31.785920 17538 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0817 23:56:48.838148 17538 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0817 23:58:11.032439 17538 solver.cpp:404]     Test net output #0: accuracy = 0.68492\nI0817 23:58:11.032766 17538 solver.cpp:404]     Test net output #1: loss = 1.50526 (* 1 = 1.50526 loss)\nI0817 23:58:12.354810 17538 solver.cpp:228] Iteration 12200, loss = 0.0460562\nI0817 23:58:12.354851 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:58:12.354873 17538 solver.cpp:244]     Train net output #1: loss = 0.0460562 (* 1 = 0.0460562 loss)\nI0817 23:58:12.444270 17538 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0818 00:00:29.650609 17538 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0818 00:01:51.959702 17538 solver.cpp:404]     Test net output #0: accuracy = 0.702441\nI0818 00:01:51.960013 17538 solver.cpp:404]     Test net output #1: loss = 1.41897 (* 1 = 1.41897 loss)\nI0818 00:01:53.281440 17538 solver.cpp:228] Iteration 12300, loss = 0.0869048\nI0818 00:01:53.281486 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:01:53.281509 17538 solver.cpp:244]     Train net output #1: loss = 0.0869048 (* 1 = 0.0869048 loss)\nI0818 00:01:53.368445 17538 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0818 00:04:10.528239 17538 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0818 00:05:32.772040 17538 solver.cpp:404]     Test net output #0: accuracy = 0.64676\nI0818 00:05:32.772354 17538 solver.cpp:404]     Test net output #1: loss = 1.87605 (* 1 = 1.87605 loss)\nI0818 00:05:34.093686 17538 solver.cpp:228] Iteration 12400, loss = 0.115978\nI0818 00:05:34.093730 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:05:34.093755 17538 solver.cpp:244]     Train net output #1: loss = 0.115978 (* 1 = 0.115978 loss)\nI0818 00:05:34.180891 17538 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0818 00:07:51.363392 17538 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0818 00:09:13.530382 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74368\nI0818 00:09:13.530666 17538 solver.cpp:404]     Test net output #1: loss = 1.10048 (* 1 = 1.10048 loss)\nI0818 00:09:14.852061 17538 solver.cpp:228] Iteration 12500, loss = 0.0889096\nI0818 00:09:14.852103 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:09:14.852119 17538 solver.cpp:244]     Train net output #1: loss = 0.0889096 (* 1 = 0.0889096 loss)\nI0818 00:09:14.937165 17538 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0818 00:11:32.122124 17538 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0818 00:12:54.272567 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7344\nI0818 00:12:54.272845 17538 solver.cpp:404]     Test net output #1: loss = 1.1298 (* 1 = 1.1298 loss)\nI0818 00:12:55.594094 17538 solver.cpp:228] Iteration 12600, loss = 0.129098\nI0818 00:12:55.594137 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:12:55.594152 17538 solver.cpp:244]     Train net output #1: loss = 0.129098 (* 1 = 0.129098 loss)\nI0818 00:12:55.677239 17538 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0818 00:15:12.754281 17538 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 00:16:34.891839 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70412\nI0818 00:16:34.892146 17538 solver.cpp:404]     Test net output #1: loss = 1.3948 (* 1 = 1.3948 loss)\nI0818 00:16:36.213594 17538 solver.cpp:228] Iteration 12700, loss = 0.0974472\nI0818 00:16:36.213637 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:16:36.213654 17538 solver.cpp:244]     Train net output #1: loss = 0.0974472 (* 1 = 0.0974472 loss)\nI0818 00:16:36.298910 17538 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0818 00:18:53.432181 17538 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 00:20:15.575143 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72988\nI0818 00:20:15.575451 17538 solver.cpp:404]     Test net output #1: loss = 1.25833 (* 1 = 1.25833 loss)\nI0818 00:20:16.897505 17538 solver.cpp:228] Iteration 12800, loss = 0.0720728\nI0818 00:20:16.897543 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:20:16.897559 17538 solver.cpp:244]     Train net output #1: loss = 0.0720728 (* 1 = 0.0720728 loss)\nI0818 00:20:16.983292 17538 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0818 00:22:34.127341 17538 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 00:23:56.281317 17538 solver.cpp:404]     Test net output #0: accuracy = 0.716\nI0818 00:23:56.281632 17538 solver.cpp:404]     Test net output #1: loss = 1.36608 (* 1 = 1.36608 loss)\nI0818 00:23:57.603796 17538 solver.cpp:228] Iteration 12900, loss = 0.0383107\nI0818 00:23:57.603834 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:23:57.603850 17538 solver.cpp:244]     Train net output #1: loss = 0.0383107 (* 1 = 0.0383107 loss)\nI0818 00:23:57.687783 17538 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0818 00:26:14.836352 17538 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 00:27:37.017391 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70248\nI0818 00:27:37.017712 17538 solver.cpp:404]     Test net output #1: loss = 1.37363 (* 1 = 1.37363 loss)\nI0818 00:27:38.339653 17538 solver.cpp:228] Iteration 13000, loss = 0.0726974\nI0818 00:27:38.339692 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:27:38.339709 17538 solver.cpp:244]     Train net output #1: loss = 0.0726974 (* 1 = 0.0726974 loss)\nI0818 00:27:38.426249 17538 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0818 00:29:55.534559 17538 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 00:31:17.709410 17538 solver.cpp:404]     Test net output #0: accuracy = 0.702\nI0818 00:31:17.709717 17538 solver.cpp:404]     Test net output #1: loss = 1.4353 (* 1 = 1.4353 loss)\nI0818 00:31:19.031261 17538 solver.cpp:228] Iteration 13100, loss = 0.0953269\nI0818 00:31:19.031301 17538 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:31:19.031321 17538 solver.cpp:244]     Train net output #1: loss = 0.0953269 (* 1 = 0.0953269 loss)\nI0818 00:31:19.122825 17538 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0818 00:33:36.214679 17538 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 00:34:58.380090 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71936\nI0818 00:34:58.380404 17538 solver.cpp:404]     Test net output #1: loss = 1.35121 (* 1 = 1.35121 loss)\nI0818 00:34:59.701938 17538 solver.cpp:228] Iteration 13200, loss = 0.0418971\nI0818 00:34:59.701980 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:34:59.701997 17538 solver.cpp:244]     Train net output #1: loss = 0.0418972 (* 1 = 0.0418972 loss)\nI0818 00:34:59.788398 17538 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0818 00:37:16.925824 17538 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 00:38:39.120309 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73904\nI0818 00:38:39.120604 17538 solver.cpp:404]     Test net output #1: loss = 1.14753 (* 1 = 1.14753 loss)\nI0818 00:38:40.443114 17538 solver.cpp:228] Iteration 13300, loss = 0.0441636\nI0818 00:38:40.443156 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:38:40.443181 17538 solver.cpp:244]     Train net output #1: loss = 0.0441636 (* 1 = 0.0441636 loss)\nI0818 00:38:40.531057 17538 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0818 00:40:57.726209 17538 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 00:42:19.782353 17538 solver.cpp:404]     Test net output #0: accuracy = 0.5964\nI0818 00:42:19.782624 17538 solver.cpp:404]     Test net output #1: loss = 2.61259 (* 1 = 2.61259 loss)\nI0818 00:42:21.105193 17538 solver.cpp:228] Iteration 13400, loss = 0.158648\nI0818 00:42:21.105239 17538 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 00:42:21.105263 17538 solver.cpp:244]     Train net output #1: loss = 0.158648 (* 1 = 0.158648 loss)\nI0818 00:42:21.187928 17538 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0818 00:44:38.311354 17538 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 00:45:59.774726 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75664\nI0818 00:45:59.774997 17538 solver.cpp:404]     Test net output #1: loss = 1.03824 (* 1 = 1.03824 loss)\nI0818 00:46:01.097311 17538 solver.cpp:228] Iteration 13500, loss = 0.0959605\nI0818 00:46:01.097355 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:46:01.097379 17538 solver.cpp:244]     Train net output #1: loss = 0.0959605 (* 1 = 0.0959605 loss)\nI0818 00:46:01.187399 17538 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0818 00:48:18.185457 17538 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 00:49:39.296149 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77432\nI0818 00:49:39.296429 17538 solver.cpp:404]     Test net output #1: loss = 0.933247 (* 1 = 0.933247 loss)\nI0818 00:49:40.615010 17538 solver.cpp:228] Iteration 13600, loss = 0.115344\nI0818 00:49:40.615046 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:49:40.615061 17538 solver.cpp:244]     Train net output #1: loss = 0.115344 (* 1 = 0.115344 loss)\nI0818 00:49:40.710613 17538 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0818 00:51:57.617012 17538 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 00:53:18.821856 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7134\nI0818 00:53:18.822099 17538 solver.cpp:404]     Test net output #1: loss = 1.35208 (* 1 = 1.35208 loss)\nI0818 00:53:20.141774 17538 solver.cpp:228] Iteration 13700, loss = 0.0361101\nI0818 00:53:20.141808 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:53:20.141824 17538 solver.cpp:244]     Train net output #1: loss = 0.0361101 (* 1 = 0.0361101 loss)\nI0818 00:53:20.226627 17538 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0818 00:55:36.839088 17538 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 00:56:58.053515 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7484\nI0818 00:56:58.053741 17538 solver.cpp:404]     Test net output #1: loss = 1.13754 (* 1 = 1.13754 loss)\nI0818 00:56:59.372488 17538 solver.cpp:228] Iteration 13800, loss = 0.122751\nI0818 00:56:59.372521 17538 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 00:56:59.372535 17538 solver.cpp:244]     Train net output #1: loss = 0.122751 (* 1 = 0.122751 loss)\nI0818 00:56:59.459806 17538 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0818 00:59:16.054210 17538 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 01:00:37.273658 17538 solver.cpp:404]     Test net output #0: accuracy = 0.68476\nI0818 01:00:37.273910 17538 solver.cpp:404]     Test net output #1: loss = 1.67401 (* 1 = 1.67401 loss)\nI0818 01:00:38.592658 17538 solver.cpp:228] Iteration 13900, loss = 0.0318539\nI0818 01:00:38.592700 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:00:38.592716 17538 solver.cpp:244]     Train net output #1: loss = 0.0318539 (* 1 = 0.0318539 loss)\nI0818 01:00:38.681761 17538 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0818 01:02:55.285276 17538 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 01:04:16.496932 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74208\nI0818 01:04:16.497198 17538 solver.cpp:404]     Test net output #1: loss = 1.17493 (* 1 = 1.17493 loss)\nI0818 01:04:17.816690 17538 solver.cpp:228] Iteration 14000, loss = 0.032759\nI0818 01:04:17.816723 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:04:17.816737 17538 solver.cpp:244]     Train net output #1: loss = 0.032759 (* 1 = 0.032759 loss)\nI0818 01:04:17.914551 17538 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 01:06:34.466753 17538 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 01:07:55.662638 17538 solver.cpp:404]     Test net output #0: accuracy = 0.63568\nI0818 01:07:55.662894 17538 solver.cpp:404]     Test net output #1: loss = 1.89662 (* 1 = 1.89662 loss)\nI0818 01:07:56.981608 17538 solver.cpp:228] Iteration 14100, loss = 0.131188\nI0818 01:07:56.981653 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:07:56.981668 17538 solver.cpp:244]     Train net output #1: loss = 0.131188 (* 1 = 0.131188 loss)\nI0818 01:07:57.066474 17538 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0818 01:10:13.585652 17538 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 01:11:34.804438 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75252\nI0818 01:11:34.804692 17538 solver.cpp:404]     Test net output #1: loss = 1.16851 (* 1 = 1.16851 loss)\nI0818 01:11:36.123929 17538 solver.cpp:228] Iteration 14200, loss = 0.0325622\nI0818 01:11:36.123962 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:11:36.123977 17538 solver.cpp:244]     Train net output #1: loss = 0.0325622 (* 1 = 0.0325622 loss)\nI0818 01:11:36.215894 17538 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0818 01:13:52.859606 17538 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 01:15:14.093789 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7386\nI0818 01:15:14.094019 17538 solver.cpp:404]     Test net output #1: loss = 1.18017 (* 1 = 1.18017 loss)\nI0818 01:15:15.413832 17538 solver.cpp:228] Iteration 14300, loss = 0.0480892\nI0818 01:15:15.413867 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:15:15.413882 17538 solver.cpp:244]     Train net output #1: loss = 0.0480892 (* 1 = 0.0480892 loss)\nI0818 01:15:15.502261 17538 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0818 01:17:32.063508 17538 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 01:18:53.278164 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73992\nI0818 01:18:53.278401 17538 solver.cpp:404]     Test net output #1: loss = 1.1623 (* 1 = 1.1623 loss)\nI0818 01:18:54.596992 17538 solver.cpp:228] Iteration 14400, loss = 0.0641192\nI0818 01:18:54.597026 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:18:54.597041 17538 solver.cpp:244]     Train net output #1: loss = 0.0641192 (* 1 = 0.0641192 loss)\nI0818 01:18:54.690264 17538 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0818 01:21:11.302284 17538 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 01:22:32.502068 17538 solver.cpp:404]     Test net output #0: accuracy = 0.62752\nI0818 01:22:32.502329 17538 solver.cpp:404]     Test net output #1: loss = 2.4905 (* 1 = 2.4905 loss)\nI0818 01:22:33.821303 17538 solver.cpp:228] Iteration 14500, loss = 0.0335861\nI0818 01:22:33.821337 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:22:33.821352 17538 solver.cpp:244]     Train net output #1: loss = 0.0335861 (* 1 = 0.0335861 loss)\nI0818 01:22:33.902864 17538 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0818 01:24:50.526571 17538 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 01:26:11.753129 17538 solver.cpp:404]     Test net output #0: accuracy = 0.5912\nI0818 01:26:11.753374 17538 solver.cpp:404]     Test net output #1: loss = 2.60682 (* 1 = 2.60682 loss)\nI0818 01:26:13.073071 17538 solver.cpp:228] Iteration 14600, loss = 0.0452294\nI0818 01:26:13.073107 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:26:13.073130 17538 solver.cpp:244]     Train net output #1: loss = 0.0452294 (* 1 = 0.0452294 loss)\nI0818 01:26:13.159783 17538 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0818 01:28:29.765826 17538 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 01:29:50.999563 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71592\nI0818 01:29:50.999830 17538 solver.cpp:404]     Test net output #1: loss = 1.4128 (* 1 = 1.4128 loss)\nI0818 01:29:52.319960 17538 solver.cpp:228] Iteration 14700, loss = 0.0337426\nI0818 01:29:52.319996 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:29:52.320019 17538 solver.cpp:244]     Train net output #1: loss = 0.0337426 (* 1 = 0.0337426 loss)\nI0818 01:29:52.405603 17538 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0818 01:32:09.082571 17538 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 01:33:31.282958 17538 solver.cpp:404]     Test net output #0: accuracy = 0.65592\nI0818 01:33:31.283269 17538 solver.cpp:404]     Test net output #1: loss = 1.91083 (* 1 = 1.91083 loss)\nI0818 01:33:32.605856 17538 solver.cpp:228] Iteration 14800, loss = 0.0499369\nI0818 01:33:32.605909 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:33:32.605926 17538 solver.cpp:244]     Train net output #1: loss = 0.049937 (* 1 = 0.049937 loss)\nI0818 01:33:32.683234 17538 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0818 01:35:49.540606 17538 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 01:37:11.738638 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72916\nI0818 01:37:11.738941 17538 solver.cpp:404]     Test net output #1: loss = 1.34426 (* 1 = 1.34426 loss)\nI0818 01:37:13.060667 17538 solver.cpp:228] Iteration 14900, loss = 0.0464309\nI0818 01:37:13.060720 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:37:13.060739 17538 solver.cpp:244]     Train net output #1: loss = 0.0464309 (* 1 = 0.0464309 loss)\nI0818 01:37:13.151690 17538 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0818 01:39:29.975450 17538 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 01:40:52.154413 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75584\nI0818 01:40:52.154700 17538 solver.cpp:404]     Test net output #1: loss = 1.06364 (* 1 = 1.06364 loss)\nI0818 01:40:53.476476 17538 solver.cpp:228] Iteration 15000, loss = 0.0763157\nI0818 01:40:53.476529 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:40:53.476547 17538 solver.cpp:244]     Train net output #1: loss = 0.0763157 (* 1 = 0.0763157 loss)\nI0818 01:40:53.561470 17538 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0818 01:43:10.295183 17538 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 01:44:32.477648 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7266\nI0818 01:44:32.477941 17538 solver.cpp:404]     Test net output #1: loss = 1.28552 (* 1 = 1.28552 loss)\nI0818 01:44:33.800595 17538 solver.cpp:228] Iteration 15100, loss = 0.0315449\nI0818 01:44:33.800647 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:44:33.800664 17538 solver.cpp:244]     Train net output #1: loss = 0.0315449 (* 1 = 0.0315449 loss)\nI0818 01:44:33.881146 17538 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0818 01:46:50.624435 17538 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 01:48:12.802459 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72332\nI0818 01:48:12.802752 17538 solver.cpp:404]     Test net output #1: loss = 1.33049 (* 1 = 1.33049 loss)\nI0818 01:48:14.124886 17538 solver.cpp:228] Iteration 15200, loss = 0.0594505\nI0818 01:48:14.124939 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:48:14.124956 17538 solver.cpp:244]     Train net output #1: loss = 0.0594505 (* 1 = 0.0594505 loss)\nI0818 01:48:14.209882 17538 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0818 01:50:30.986518 17538 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 01:51:53.183923 17538 solver.cpp:404]     Test net output #0: accuracy = 0.67852\nI0818 01:51:53.184237 17538 solver.cpp:404]     Test net output #1: loss = 1.58156 (* 1 = 1.58156 loss)\nI0818 01:51:54.507472 17538 solver.cpp:228] Iteration 15300, loss = 0.0488931\nI0818 01:51:54.507526 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:51:54.507544 17538 solver.cpp:244]     Train net output #1: loss = 0.0488932 (* 1 = 0.0488932 loss)\nI0818 01:51:54.583884 17538 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0818 01:54:11.361450 17538 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 01:55:33.545553 17538 solver.cpp:404]     Test net output #0: accuracy = 0.62744\nI0818 01:55:33.545851 17538 solver.cpp:404]     Test net output #1: loss = 2.01121 (* 1 = 2.01121 loss)\nI0818 01:55:34.868345 17538 solver.cpp:228] Iteration 15400, loss = 0.0715142\nI0818 01:55:34.868389 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:55:34.868405 17538 solver.cpp:244]     Train net output #1: loss = 0.0715142 (* 1 = 0.0715142 loss)\nI0818 01:55:34.949618 17538 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0818 01:57:51.758394 17538 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 01:59:13.939245 17538 solver.cpp:404]     Test net output #0: accuracy = 0.63752\nI0818 01:59:13.939579 17538 solver.cpp:404]     Test net output #1: loss = 1.88328 (* 1 = 1.88328 loss)\nI0818 01:59:15.262725 17538 solver.cpp:228] Iteration 15500, loss = 0.0518886\nI0818 01:59:15.262779 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:59:15.262796 17538 solver.cpp:244]     Train net output #1: loss = 0.0518886 (* 1 = 0.0518886 loss)\nI0818 01:59:15.344576 17538 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0818 02:01:32.259565 17538 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 02:02:54.439082 17538 solver.cpp:404]     Test net output #0: accuracy = 0.754\nI0818 02:02:54.439389 17538 solver.cpp:404]     Test net output #1: loss = 1.08257 (* 1 = 1.08257 loss)\nI0818 02:02:55.761766 17538 solver.cpp:228] Iteration 15600, loss = 0.0735143\nI0818 02:02:55.761818 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:02:55.761837 17538 solver.cpp:244]     Train net output #1: loss = 0.0735143 (* 1 = 0.0735143 loss)\nI0818 02:02:55.856201 17538 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0818 02:05:12.650192 17538 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 02:06:34.866169 17538 solver.cpp:404]     Test net output #0: accuracy = 0.65056\nI0818 02:06:34.866495 17538 solver.cpp:404]     Test net output #1: loss = 1.88306 (* 1 = 1.88306 loss)\nI0818 02:06:36.189502 17538 solver.cpp:228] Iteration 15700, loss = 0.0370692\nI0818 02:06:36.189558 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:06:36.189582 17538 solver.cpp:244]     Train net output #1: loss = 0.0370692 (* 1 = 0.0370692 loss)\nI0818 02:06:36.279240 17538 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0818 02:08:53.062892 17538 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 02:10:15.276635 17538 solver.cpp:404]     Test net output #0: accuracy = 0.757\nI0818 02:10:15.276923 17538 solver.cpp:404]     Test net output #1: loss = 1.0688 (* 1 = 1.0688 loss)\nI0818 02:10:16.599725 17538 solver.cpp:228] Iteration 15800, loss = 0.0552305\nI0818 02:10:16.599778 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:10:16.599803 17538 solver.cpp:244]     Train net output #1: loss = 0.0552305 (* 1 = 0.0552305 loss)\nI0818 02:10:16.685992 17538 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0818 02:12:33.566233 17538 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 02:13:55.835300 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74292\nI0818 02:13:55.835626 17538 solver.cpp:404]     Test net output #1: loss = 1.10726 (* 1 = 1.10726 loss)\nI0818 02:13:57.162839 17538 solver.cpp:228] Iteration 15900, loss = 0.0941277\nI0818 02:13:57.162901 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:13:57.162935 17538 solver.cpp:244]     Train net output #1: loss = 0.0941277 (* 1 = 0.0941277 loss)\nI0818 02:13:57.246840 17538 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0818 02:16:14.159451 17538 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 02:17:36.332389 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6982\nI0818 02:17:36.332713 17538 solver.cpp:404]     Test net output #1: loss = 1.55313 (* 1 = 1.55313 loss)\nI0818 02:17:37.654163 17538 solver.cpp:228] Iteration 16000, loss = 0.0299549\nI0818 02:17:37.654218 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:17:37.654243 17538 solver.cpp:244]     Train net output #1: loss = 0.0299549 (* 1 = 0.0299549 loss)\nI0818 02:17:37.741341 17538 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0818 02:19:54.626858 17538 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 02:21:16.809216 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74104\nI0818 02:21:16.809525 17538 solver.cpp:404]     Test net output #1: loss = 1.44338 (* 1 = 1.44338 loss)\nI0818 02:21:18.131453 17538 solver.cpp:228] Iteration 16100, loss = 0.0969123\nI0818 02:21:18.131505 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:21:18.131530 17538 solver.cpp:244]     Train net output #1: loss = 0.0969124 (* 1 = 0.0969124 loss)\nI0818 02:21:18.212889 17538 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0818 02:23:35.013458 17538 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 02:24:57.210664 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74468\nI0818 02:24:57.210958 17538 solver.cpp:404]     Test net output #1: loss = 1.19051 (* 1 = 1.19051 loss)\nI0818 02:24:58.532706 17538 solver.cpp:228] Iteration 16200, loss = 0.0593301\nI0818 02:24:58.532758 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:24:58.532783 17538 solver.cpp:244]     Train net output #1: loss = 0.0593302 (* 1 = 0.0593302 loss)\nI0818 02:24:58.614614 17538 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0818 02:27:15.479975 17538 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 02:28:37.676226 17538 solver.cpp:404]     Test net output #0: accuracy = 0.68956\nI0818 02:28:37.676558 17538 solver.cpp:404]     Test net output #1: loss = 1.76924 (* 1 = 1.76924 loss)\nI0818 02:28:38.998419 17538 solver.cpp:228] Iteration 16300, loss = 0.143276\nI0818 02:28:38.998461 17538 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 02:28:38.998484 17538 solver.cpp:244]     Train net output #1: loss = 0.143276 (* 1 = 0.143276 loss)\nI0818 02:28:39.085317 17538 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0818 02:30:55.870944 17538 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 02:32:18.109346 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74796\nI0818 02:32:18.109637 17538 solver.cpp:404]     Test net output #1: loss = 1.17169 (* 1 = 1.17169 loss)\nI0818 02:32:19.431074 17538 solver.cpp:228] Iteration 16400, loss = 0.0900082\nI0818 02:32:19.431128 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:32:19.431145 17538 solver.cpp:244]     Train net output #1: loss = 0.0900082 (* 1 = 0.0900082 loss)\nI0818 02:32:19.520181 17538 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0818 02:34:36.359571 17538 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0818 02:35:58.531471 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73976\nI0818 02:35:58.531764 17538 solver.cpp:404]     Test net output #1: loss = 1.20735 (* 1 = 1.20735 loss)\nI0818 02:35:59.855952 17538 solver.cpp:228] Iteration 16500, loss = 0.0208622\nI0818 02:35:59.855995 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:35:59.856012 17538 solver.cpp:244]     Train net output #1: loss = 0.0208623 (* 1 = 0.0208623 loss)\nI0818 02:35:59.947294 17538 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0818 02:38:17.179603 17538 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0818 02:39:39.364410 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72972\nI0818 02:39:39.364701 17538 solver.cpp:404]     Test net output #1: loss = 1.22642 (* 1 = 1.22642 loss)\nI0818 02:39:40.689362 17538 solver.cpp:228] Iteration 16600, loss = 0.0424811\nI0818 02:39:40.689402 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:39:40.689419 17538 solver.cpp:244]     Train net output #1: loss = 0.0424811 (* 1 = 0.0424811 loss)\nI0818 02:39:40.774343 17538 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0818 02:41:58.015957 17538 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0818 02:43:20.177502 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77956\nI0818 02:43:20.177793 17538 solver.cpp:404]     Test net output #1: loss = 0.963828 (* 1 = 0.963828 loss)\nI0818 02:43:21.501519 17538 solver.cpp:228] Iteration 16700, loss = 0.0251939\nI0818 02:43:21.501560 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:43:21.501577 17538 solver.cpp:244]     Train net output #1: loss = 0.0251938 (* 1 = 0.0251938 loss)\nI0818 02:43:21.582586 17538 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0818 02:45:38.532299 17538 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0818 02:47:00.727880 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72652\nI0818 02:47:00.728199 17538 solver.cpp:404]     Test net output #1: loss = 1.40118 (* 1 = 1.40118 loss)\nI0818 02:47:02.052572 17538 solver.cpp:228] Iteration 16800, loss = 0.0731408\nI0818 02:47:02.052611 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:47:02.052628 17538 solver.cpp:244]     Train net output #1: loss = 0.0731408 (* 1 = 0.0731408 loss)\nI0818 02:47:02.131716 17538 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0818 02:49:19.054721 17538 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0818 02:50:41.217140 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72136\nI0818 02:50:41.217449 17538 solver.cpp:404]     Test net output #1: loss = 1.40825 (* 1 = 1.40825 loss)\nI0818 02:50:42.541558 17538 solver.cpp:228] Iteration 16900, loss = 0.0451914\nI0818 02:50:42.541600 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:50:42.541616 17538 solver.cpp:244]     Train net output #1: loss = 0.0451914 (* 1 = 0.0451914 loss)\nI0818 02:50:42.626085 17538 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0818 02:52:59.611062 17538 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0818 02:54:21.783397 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77164\nI0818 02:54:21.783685 17538 solver.cpp:404]     Test net output #1: loss = 1.02735 (* 1 = 1.02735 loss)\nI0818 02:54:23.108703 17538 solver.cpp:228] Iteration 17000, loss = 0.0451177\nI0818 02:54:23.108745 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:54:23.108762 17538 solver.cpp:244]     Train net output #1: loss = 0.0451177 (* 1 = 0.0451177 loss)\nI0818 02:54:23.187420 17538 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0818 02:56:40.146682 17538 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0818 02:58:02.310349 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76624\nI0818 02:58:02.310664 17538 solver.cpp:404]     Test net output #1: loss = 0.98924 (* 1 = 0.98924 loss)\nI0818 02:58:03.635422 17538 solver.cpp:228] Iteration 17100, loss = 0.0286958\nI0818 02:58:03.635464 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:58:03.635480 17538 solver.cpp:244]     Train net output #1: loss = 0.0286958 (* 1 = 0.0286958 loss)\nI0818 02:58:03.716964 17538 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0818 03:00:20.646029 17538 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0818 03:01:42.812923 17538 solver.cpp:404]     Test net output #0: accuracy = 0.54916\nI0818 03:01:42.813235 17538 solver.cpp:404]     Test net output #1: loss = 2.8694 (* 1 = 2.8694 loss)\nI0818 03:01:44.137825 17538 solver.cpp:228] Iteration 17200, loss = 0.0819589\nI0818 03:01:44.137866 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:01:44.137883 17538 solver.cpp:244]     Train net output #1: loss = 0.0819589 (* 1 = 0.0819589 loss)\nI0818 03:01:44.216621 17538 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0818 03:04:01.151696 17538 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0818 03:05:23.339059 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72928\nI0818 03:05:23.339370 17538 solver.cpp:404]     Test net output #1: loss = 1.41495 (* 1 = 1.41495 loss)\nI0818 03:05:24.665359 17538 solver.cpp:228] Iteration 17300, loss = 0.0688375\nI0818 03:05:24.665406 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:05:24.665431 17538 solver.cpp:244]     Train net output #1: loss = 0.0688375 (* 1 = 0.0688375 loss)\nI0818 03:05:24.750588 17538 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0818 03:07:41.803851 17538 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0818 03:09:03.996881 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77372\nI0818 03:09:03.997192 17538 solver.cpp:404]     Test net output #1: loss = 0.956558 (* 1 = 0.956558 loss)\nI0818 03:09:05.323058 17538 solver.cpp:228] Iteration 17400, loss = 0.100962\nI0818 03:09:05.323106 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 03:09:05.323128 17538 solver.cpp:244]     Train net output #1: loss = 0.100962 (* 1 = 0.100962 loss)\nI0818 03:09:05.403542 17538 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0818 03:11:22.423924 17538 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0818 03:12:44.583166 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76932\nI0818 03:12:44.583483 17538 solver.cpp:404]     Test net output #1: loss = 1.00816 (* 1 = 1.00816 loss)\nI0818 03:12:45.908426 17538 solver.cpp:228] Iteration 17500, loss = 0.0956199\nI0818 03:12:45.908468 17538 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 03:12:45.908485 17538 solver.cpp:244]     Train net output #1: loss = 0.09562 (* 1 = 0.09562 loss)\nI0818 03:12:45.985716 17538 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0818 03:15:02.955982 17538 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0818 03:16:25.133937 17538 solver.cpp:404]     Test net output #0: accuracy = 0.65304\nI0818 03:16:25.134258 17538 solver.cpp:404]     Test net output #1: loss = 1.74809 (* 1 = 1.74809 loss)\nI0818 03:16:26.459933 17538 solver.cpp:228] Iteration 17600, loss = 0.0412628\nI0818 03:16:26.459976 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:16:26.459993 17538 solver.cpp:244]     Train net output #1: loss = 0.0412628 (* 1 = 0.0412628 loss)\nI0818 03:16:26.538563 17538 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0818 03:18:43.566686 17538 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0818 03:20:05.761587 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70144\nI0818 03:20:05.761888 17538 solver.cpp:404]     Test net output #1: loss = 1.50901 (* 1 = 1.50901 loss)\nI0818 03:20:07.087926 17538 solver.cpp:228] Iteration 17700, loss = 0.0851324\nI0818 03:20:07.087971 17538 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 03:20:07.087987 17538 solver.cpp:244]     Train net output #1: loss = 0.0851324 (* 1 = 0.0851324 loss)\nI0818 03:20:07.166230 17538 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0818 03:22:24.104696 17538 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0818 03:23:46.302721 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70484\nI0818 03:23:46.303017 17538 solver.cpp:404]     Test net output #1: loss = 1.46371 (* 1 = 1.46371 loss)\nI0818 03:23:47.625958 17538 solver.cpp:228] Iteration 17800, loss = 0.0664528\nI0818 03:23:47.626014 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:23:47.626039 17538 solver.cpp:244]     Train net output #1: loss = 0.0664529 (* 1 = 0.0664529 loss)\nI0818 03:23:47.712254 17538 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0818 03:26:04.565678 17538 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0818 03:27:26.786217 17538 solver.cpp:404]     Test net output #0: accuracy = 0.62744\nI0818 03:27:26.786511 17538 solver.cpp:404]     Test net output #1: loss = 1.91578 (* 1 = 1.91578 loss)\nI0818 03:27:28.109479 17538 solver.cpp:228] Iteration 17900, loss = 0.064659\nI0818 03:27:28.109537 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:27:28.109555 17538 solver.cpp:244]     Train net output #1: loss = 0.064659 (* 1 = 0.064659 loss)\nI0818 03:27:28.190222 17538 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0818 03:29:44.927273 17538 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0818 03:31:07.110988 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6782\nI0818 03:31:07.111299 17538 solver.cpp:404]     Test net output #1: loss = 1.73873 (* 1 = 1.73873 loss)\nI0818 03:31:08.432760 17538 solver.cpp:228] Iteration 18000, loss = 0.0846638\nI0818 03:31:08.432814 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:31:08.432832 17538 solver.cpp:244]     Train net output #1: loss = 0.0846638 (* 1 = 0.0846638 loss)\nI0818 03:31:08.520931 17538 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0818 03:33:25.287770 17538 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0818 03:34:47.494426 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72512\nI0818 03:34:47.494724 17538 solver.cpp:404]     Test net output #1: loss = 1.28126 (* 1 = 1.28126 loss)\nI0818 03:34:48.816848 17538 solver.cpp:228] Iteration 18100, loss = 0.0725802\nI0818 03:34:48.816902 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 03:34:48.816920 17538 solver.cpp:244]     Train net output #1: loss = 0.0725802 (* 1 = 0.0725802 loss)\nI0818 03:34:48.905422 17538 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0818 03:37:05.674154 17538 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0818 03:38:27.911929 17538 solver.cpp:404]     Test net output #0: accuracy = 0.69684\nI0818 03:38:27.912238 17538 solver.cpp:404]     Test net output #1: loss = 1.57214 (* 1 = 1.57214 loss)\nI0818 03:38:29.234192 17538 solver.cpp:228] Iteration 18200, loss = 0.0251075\nI0818 03:38:29.234246 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:38:29.234269 17538 solver.cpp:244]     Train net output #1: loss = 0.0251076 (* 1 = 0.0251076 loss)\nI0818 03:38:29.315243 17538 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0818 03:40:46.096451 17538 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0818 03:42:08.275749 17538 solver.cpp:404]     Test net output #0: accuracy = 0.66788\nI0818 03:42:08.276012 17538 solver.cpp:404]     Test net output #1: loss = 1.77171 (* 1 = 1.77171 loss)\nI0818 03:42:09.599797 17538 solver.cpp:228] Iteration 18300, loss = 0.0697436\nI0818 03:42:09.599849 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:42:09.599867 17538 solver.cpp:244]     Train net output #1: loss = 0.0697436 (* 1 = 0.0697436 loss)\nI0818 03:42:09.686938 17538 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0818 03:44:26.858283 17538 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0818 03:45:49.084945 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72556\nI0818 03:45:49.085268 17538 solver.cpp:404]     Test net output #1: loss = 1.45023 (* 1 = 1.45023 loss)\nI0818 03:45:50.411188 17538 solver.cpp:228] Iteration 18400, loss = 0.029678\nI0818 03:45:50.411232 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 03:45:50.411248 17538 solver.cpp:244]     Train net output #1: loss = 0.029678 (* 1 = 0.029678 loss)\nI0818 03:45:50.492431 17538 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0818 03:48:07.743580 17538 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0818 03:49:29.940090 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7526\nI0818 03:49:29.940405 17538 solver.cpp:404]     Test net output #1: loss = 1.11644 (* 1 = 1.11644 loss)\nI0818 03:49:31.265864 17538 solver.cpp:228] Iteration 18500, loss = 0.117208\nI0818 03:49:31.265908 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:49:31.265924 17538 solver.cpp:244]     Train net output #1: loss = 0.117208 (* 1 = 0.117208 loss)\nI0818 03:49:31.356791 17538 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0818 03:51:48.645321 17538 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0818 03:53:10.901800 17538 solver.cpp:404]     Test net output #0: accuracy = 0.722\nI0818 03:53:10.902110 17538 solver.cpp:404]     Test net output #1: loss = 1.48655 (* 1 = 1.48655 loss)\nI0818 03:53:12.227283 17538 solver.cpp:228] Iteration 18600, loss = 0.0686322\nI0818 03:53:12.227326 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 03:53:12.227342 17538 solver.cpp:244]     Train net output #1: loss = 0.0686323 (* 1 = 0.0686323 loss)\nI0818 03:53:12.310585 17538 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0818 03:55:29.514314 17538 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0818 03:56:51.696142 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73648\nI0818 03:56:51.696450 17538 solver.cpp:404]     Test net output #1: loss = 1.23101 (* 1 = 1.23101 loss)\nI0818 03:56:53.021217 17538 solver.cpp:228] Iteration 18700, loss = 0.123027\nI0818 03:56:53.021260 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 03:56:53.021277 17538 solver.cpp:244]     Train net output #1: loss = 0.123027 (* 1 = 0.123027 loss)\nI0818 03:56:53.101249 17538 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0818 03:59:10.260994 17538 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0818 04:00:32.440740 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78156\nI0818 04:00:32.441047 17538 solver.cpp:404]     Test net output #1: loss = 1.04002 (* 1 = 1.04002 loss)\nI0818 04:00:33.766530 17538 solver.cpp:228] Iteration 18800, loss = 0.0599552\nI0818 04:00:33.766572 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:00:33.766589 17538 solver.cpp:244]     Train net output #1: loss = 0.0599552 (* 1 = 0.0599552 loss)\nI0818 04:00:33.843163 17538 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0818 04:02:50.983685 17538 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0818 04:04:13.152825 17538 solver.cpp:404]     Test net output #0: accuracy = 0.57924\nI0818 04:04:13.153106 17538 solver.cpp:404]     Test net output #1: loss = 2.3041 (* 1 = 2.3041 loss)\nI0818 04:04:14.479008 17538 solver.cpp:228] Iteration 18900, loss = 0.0731139\nI0818 04:04:14.479053 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 04:04:14.479068 17538 solver.cpp:244]     Train net output #1: loss = 0.0731139 (* 1 = 0.0731139 loss)\nI0818 04:04:14.559165 17538 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0818 04:06:31.693627 17538 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0818 04:07:53.860826 17538 solver.cpp:404]     Test net output #0: accuracy = 0.68164\nI0818 04:07:53.861116 17538 solver.cpp:404]     Test net output #1: loss = 1.87214 (* 1 = 1.87214 loss)\nI0818 04:07:55.186955 17538 solver.cpp:228] Iteration 19000, loss = 0.0963062\nI0818 04:07:55.186998 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:07:55.187014 17538 solver.cpp:244]     Train net output #1: loss = 0.0963063 (* 1 = 0.0963063 loss)\nI0818 04:07:55.272058 17538 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0818 04:10:12.450567 17538 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0818 04:11:34.633489 17538 solver.cpp:404]     Test net output #0: accuracy = 0.68412\nI0818 04:11:34.633908 17538 solver.cpp:404]     Test net output #1: loss = 1.9048 (* 1 = 1.9048 loss)\nI0818 04:11:35.959512 17538 solver.cpp:228] Iteration 19100, loss = 0.0712898\nI0818 04:11:35.959558 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:11:35.959575 17538 solver.cpp:244]     Train net output #1: loss = 0.0712898 (* 1 = 0.0712898 loss)\nI0818 04:11:36.043980 17538 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0818 04:13:53.196722 17538 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0818 04:15:15.383229 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70192\nI0818 04:15:15.383491 17538 solver.cpp:404]     Test net output #1: loss = 1.63695 (* 1 = 1.63695 loss)\nI0818 04:15:16.708421 17538 solver.cpp:228] Iteration 19200, loss = 0.0590124\nI0818 04:15:16.708463 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:15:16.708480 17538 solver.cpp:244]     Train net output #1: loss = 0.0590125 (* 1 = 0.0590125 loss)\nI0818 04:15:16.795642 17538 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0818 04:17:33.982193 17538 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0818 04:18:56.133848 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74032\nI0818 04:18:56.134132 17538 solver.cpp:404]     Test net output #1: loss = 1.37393 (* 1 = 1.37393 loss)\nI0818 04:18:57.460460 17538 solver.cpp:228] Iteration 19300, loss = 0.0395061\nI0818 04:18:57.460503 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:18:57.460525 17538 solver.cpp:244]     Train net output #1: loss = 0.0395061 (* 1 = 0.0395061 loss)\nI0818 04:18:57.544564 17538 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0818 04:21:14.755049 17538 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0818 04:22:36.914675 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75436\nI0818 04:22:36.914988 17538 solver.cpp:404]     Test net output #1: loss = 1.11682 (* 1 = 1.11682 loss)\nI0818 04:22:38.240262 17538 solver.cpp:228] Iteration 19400, loss = 0.0830068\nI0818 04:22:38.240308 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 04:22:38.240324 17538 solver.cpp:244]     Train net output #1: loss = 0.0830069 (* 1 = 0.0830069 loss)\nI0818 04:22:38.323904 17538 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0818 04:24:55.416967 17538 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0818 04:26:17.590490 17538 solver.cpp:404]     Test net output #0: accuracy = 0.69596\nI0818 04:26:17.590807 17538 solver.cpp:404]     Test net output #1: loss = 1.33137 (* 1 = 1.33137 loss)\nI0818 04:26:18.915585 17538 solver.cpp:228] Iteration 19500, loss = 0.0317052\nI0818 04:26:18.915628 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:26:18.915644 17538 solver.cpp:244]     Train net output #1: loss = 0.0317052 (* 1 = 0.0317052 loss)\nI0818 04:26:19.002846 17538 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0818 04:28:36.210001 17538 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0818 04:29:58.375522 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79696\nI0818 04:29:58.375838 17538 solver.cpp:404]     Test net output #1: loss = 0.900903 (* 1 = 0.900903 loss)\nI0818 04:29:59.700811 17538 solver.cpp:228] Iteration 19600, loss = 0.025559\nI0818 04:29:59.700865 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:29:59.700882 17538 solver.cpp:244]     Train net output #1: loss = 0.025559 (* 1 = 0.025559 loss)\nI0818 04:29:59.782747 17538 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0818 04:32:17.043081 17538 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0818 04:33:39.209391 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75476\nI0818 04:33:39.209677 17538 solver.cpp:404]     Test net output #1: loss = 1.17798 (* 1 = 1.17798 loss)\nI0818 04:33:40.534314 17538 solver.cpp:228] Iteration 19700, loss = 0.0716369\nI0818 04:33:40.534359 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:33:40.534376 17538 solver.cpp:244]     Train net output #1: loss = 0.0716369 (* 1 = 0.0716369 loss)\nI0818 04:33:40.617697 17538 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0818 04:35:57.863884 17538 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0818 04:37:20.048710 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72608\nI0818 04:37:20.049016 17538 solver.cpp:404]     Test net output #1: loss = 1.40567 (* 1 = 1.40567 loss)\nI0818 04:37:21.374287 17538 solver.cpp:228] Iteration 19800, loss = 0.166089\nI0818 04:37:21.374330 17538 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 04:37:21.374346 17538 solver.cpp:244]     Train net output #1: loss = 0.166089 (* 1 = 0.166089 loss)\nI0818 04:37:21.459439 17538 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0818 04:39:38.663118 17538 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0818 04:41:00.832114 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7352\nI0818 04:41:00.832424 17538 solver.cpp:404]     Test net output #1: loss = 1.25641 (* 1 = 1.25641 loss)\nI0818 04:41:02.158618 17538 solver.cpp:228] Iteration 19900, loss = 0.0402713\nI0818 04:41:02.158659 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 04:41:02.158675 17538 solver.cpp:244]     Train net output #1: loss = 0.0402713 (* 1 = 0.0402713 loss)\nI0818 04:41:02.244120 17538 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0818 04:43:19.577206 17538 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0818 04:44:41.769811 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78236\nI0818 04:44:41.770090 17538 solver.cpp:404]     Test net output #1: loss = 1.00215 (* 1 = 1.00215 loss)\nI0818 04:44:43.094352 17538 solver.cpp:228] Iteration 20000, loss = 0.0403729\nI0818 04:44:43.094394 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:44:43.094413 17538 solver.cpp:244]     Train net output #1: loss = 0.0403729 (* 1 = 0.0403729 loss)\nI0818 04:44:43.178494 17538 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0818 04:47:00.472811 17538 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0818 04:48:22.639084 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7144\nI0818 04:48:22.639364 17538 solver.cpp:404]     Test net output #1: loss = 1.60158 (* 1 = 1.60158 loss)\nI0818 04:48:23.964298 17538 solver.cpp:228] Iteration 20100, loss = 0.0423897\nI0818 04:48:23.964342 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:48:23.964359 17538 solver.cpp:244]     Train net output #1: loss = 0.0423897 (* 1 = 0.0423897 loss)\nI0818 04:48:24.047912 17538 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0818 04:50:41.282853 17538 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0818 04:52:03.438906 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7668\nI0818 04:52:03.439188 17538 solver.cpp:404]     Test net output #1: loss = 1.24645 (* 1 = 1.24645 loss)\nI0818 04:52:04.763880 17538 solver.cpp:228] Iteration 20200, loss = 0.0829838\nI0818 04:52:04.763922 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 04:52:04.763939 17538 solver.cpp:244]     Train net output #1: loss = 0.0829838 (* 1 = 0.0829838 loss)\nI0818 04:52:04.845022 17538 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0818 04:54:22.068753 17538 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0818 04:55:44.236410 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7614\nI0818 04:55:44.236721 17538 solver.cpp:404]     Test net output #1: loss = 1.11943 (* 1 = 1.11943 loss)\nI0818 04:55:45.562583 17538 solver.cpp:228] Iteration 20300, loss = 0.144027\nI0818 04:55:45.562628 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:55:45.562644 17538 solver.cpp:244]     Train net output #1: loss = 0.144027 (* 1 = 0.144027 loss)\nI0818 04:55:45.651278 17538 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0818 04:58:02.831373 17538 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0818 04:59:25.008460 17538 solver.cpp:404]     Test net output #0: accuracy = 0.68348\nI0818 04:59:25.008761 17538 solver.cpp:404]     Test net output #1: loss = 1.58055 (* 1 = 1.58055 loss)\nI0818 04:59:26.333735 17538 solver.cpp:228] Iteration 20400, loss = 0.113308\nI0818 04:59:26.333780 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 04:59:26.333796 17538 solver.cpp:244]     Train net output #1: loss = 0.113308 (* 1 = 0.113308 loss)\nI0818 04:59:26.426481 17538 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0818 05:01:43.690599 17538 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0818 05:03:05.847391 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77424\nI0818 05:03:05.847703 17538 solver.cpp:404]     Test net output #1: loss = 1.073 (* 1 = 1.073 loss)\nI0818 05:03:07.173599 17538 solver.cpp:228] Iteration 20500, loss = 0.0353233\nI0818 05:03:07.173640 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:03:07.173656 17538 solver.cpp:244]     Train net output #1: loss = 0.0353233 (* 1 = 0.0353233 loss)\nI0818 05:03:07.261355 17538 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0818 05:05:24.580981 17538 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0818 05:06:46.736131 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78736\nI0818 05:06:46.736443 17538 solver.cpp:404]     Test net output #1: loss = 0.948761 (* 1 = 0.948761 loss)\nI0818 05:06:48.062223 17538 solver.cpp:228] Iteration 20600, loss = 0.0171697\nI0818 05:06:48.062265 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:06:48.062281 17538 solver.cpp:244]     Train net output #1: loss = 0.0171697 (* 1 = 0.0171697 loss)\nI0818 05:06:48.144501 17538 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0818 05:09:05.406597 17538 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0818 05:10:27.582938 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7712\nI0818 05:10:27.583247 17538 solver.cpp:404]     Test net output #1: loss = 1.14388 (* 1 = 1.14388 loss)\nI0818 05:10:28.910620 17538 solver.cpp:228] Iteration 20700, loss = 0.0319873\nI0818 05:10:28.910665 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:10:28.910682 17538 solver.cpp:244]     Train net output #1: loss = 0.0319873 (* 1 = 0.0319873 loss)\nI0818 05:10:28.994618 17538 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0818 05:12:46.247886 17538 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0818 05:14:08.428486 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74588\nI0818 05:14:08.428784 17538 solver.cpp:404]     Test net output #1: loss = 1.37521 (* 1 = 1.37521 loss)\nI0818 05:14:09.759197 17538 solver.cpp:228] Iteration 20800, loss = 0.0711957\nI0818 05:14:09.759240 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 05:14:09.759258 17538 solver.cpp:244]     Train net output #1: loss = 0.0711958 (* 1 = 0.0711958 loss)\nI0818 05:14:09.834082 17538 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0818 05:16:27.054226 17538 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0818 05:17:48.277292 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7082\nI0818 05:17:48.277549 17538 solver.cpp:404]     Test net output #1: loss = 1.48727 (* 1 = 1.48727 loss)\nI0818 05:17:49.600311 17538 solver.cpp:228] Iteration 20900, loss = 0.0370945\nI0818 05:17:49.600349 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:17:49.600378 17538 solver.cpp:244]     Train net output #1: loss = 0.0370946 (* 1 = 0.0370946 loss)\nI0818 05:17:49.683183 17538 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0818 05:20:06.339479 17538 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0818 05:21:27.562688 17538 solver.cpp:404]     Test net output #0: accuracy = 0.67448\nI0818 05:21:27.562932 17538 solver.cpp:404]     Test net output #1: loss = 1.54871 (* 1 = 1.54871 loss)\nI0818 05:21:28.886312 17538 solver.cpp:228] Iteration 21000, loss = 0.0939563\nI0818 05:21:28.886349 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 05:21:28.886379 17538 solver.cpp:244]     Train net output #1: loss = 0.0939564 (* 1 = 0.0939564 loss)\nI0818 05:21:28.969491 17538 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0818 05:23:45.758018 17538 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0818 05:25:07.014103 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76888\nI0818 05:25:07.014379 17538 solver.cpp:404]     Test net output #1: loss = 1.10031 (* 1 = 1.10031 loss)\nI0818 05:25:08.337879 17538 solver.cpp:228] Iteration 21100, loss = 0.0622494\nI0818 05:25:08.337918 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:25:08.337941 17538 solver.cpp:244]     Train net output #1: loss = 0.0622495 (* 1 = 0.0622495 loss)\nI0818 05:25:08.420039 17538 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0818 05:27:25.099280 17538 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0818 05:28:46.347285 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74592\nI0818 05:28:46.347540 17538 solver.cpp:404]     Test net output #1: loss = 1.28617 (* 1 = 1.28617 loss)\nI0818 05:28:47.670738 17538 solver.cpp:228] Iteration 21200, loss = 0.0302144\nI0818 05:28:47.670773 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:28:47.670797 17538 solver.cpp:244]     Train net output #1: loss = 0.0302144 (* 1 = 0.0302144 loss)\nI0818 05:28:47.751657 17538 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0818 05:31:04.372819 17538 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0818 05:32:25.609834 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76312\nI0818 05:32:25.610097 17538 solver.cpp:404]     Test net output #1: loss = 1.07485 (* 1 = 1.07485 loss)\nI0818 05:32:26.933645 17538 solver.cpp:228] Iteration 21300, loss = 0.0615282\nI0818 05:32:26.933681 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 05:32:26.933704 17538 solver.cpp:244]     Train net output #1: loss = 0.0615282 (* 1 = 0.0615282 loss)\nI0818 05:32:27.016031 17538 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0818 05:34:43.626256 17538 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0818 05:36:04.850679 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7068\nI0818 05:36:04.850951 17538 solver.cpp:404]     Test net output #1: loss = 1.55646 (* 1 = 1.55646 loss)\nI0818 05:36:06.174727 17538 solver.cpp:228] Iteration 21400, loss = 0.029308\nI0818 05:36:06.174762 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:36:06.174787 17538 solver.cpp:244]     Train net output #1: loss = 0.0293081 (* 1 = 0.0293081 loss)\nI0818 05:36:06.259538 17538 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0818 05:38:22.962430 17538 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0818 05:39:44.192942 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70392\nI0818 05:39:44.193199 17538 solver.cpp:404]     Test net output #1: loss = 1.45143 (* 1 = 1.45143 loss)\nI0818 05:39:45.516798 17538 solver.cpp:228] Iteration 21500, loss = 0.0390202\nI0818 05:39:45.516834 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 05:39:45.516858 17538 solver.cpp:244]     Train net output #1: loss = 0.0390202 (* 1 = 0.0390202 loss)\nI0818 05:39:45.595461 17538 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0818 05:42:02.275964 17538 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0818 05:43:23.517401 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76712\nI0818 05:43:23.517658 17538 solver.cpp:404]     Test net output #1: loss = 1.13164 (* 1 = 1.13164 loss)\nI0818 05:43:24.840467 17538 solver.cpp:228] Iteration 21600, loss = 0.0199076\nI0818 05:43:24.840502 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:43:24.840525 17538 solver.cpp:244]     Train net output #1: loss = 0.0199077 (* 1 = 0.0199077 loss)\nI0818 05:43:24.926437 17538 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0818 05:45:41.576956 17538 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0818 05:47:02.808993 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78096\nI0818 05:47:02.809284 17538 solver.cpp:404]     Test net output #1: loss = 0.973933 (* 1 = 0.973933 loss)\nI0818 05:47:04.131978 17538 solver.cpp:228] Iteration 21700, loss = 0.0561235\nI0818 05:47:04.132014 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:47:04.132035 17538 solver.cpp:244]     Train net output #1: loss = 0.0561236 (* 1 = 0.0561236 loss)\nI0818 05:47:04.212604 17538 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0818 05:49:20.906487 17538 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0818 05:50:42.134701 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72388\nI0818 05:50:42.134992 17538 solver.cpp:404]     Test net output #1: loss = 1.41928 (* 1 = 1.41928 loss)\nI0818 05:50:43.458087 17538 solver.cpp:228] Iteration 21800, loss = 0.0277345\nI0818 05:50:43.458127 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:50:43.458150 17538 solver.cpp:244]     Train net output #1: loss = 0.0277346 (* 1 = 0.0277346 loss)\nI0818 05:50:43.542215 17538 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0818 05:53:00.257978 17538 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0818 05:54:21.487406 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7262\nI0818 05:54:21.487674 17538 solver.cpp:404]     Test net output #1: loss = 1.35756 (* 1 = 1.35756 loss)\nI0818 05:54:22.810143 17538 solver.cpp:228] Iteration 21900, loss = 0.0400196\nI0818 05:54:22.810178 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:54:22.810201 17538 solver.cpp:244]     Train net output #1: loss = 0.0400197 (* 1 = 0.0400197 loss)\nI0818 05:54:22.887790 17538 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0818 05:56:39.564260 17538 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0818 05:58:00.801201 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7422\nI0818 05:58:00.801496 17538 solver.cpp:404]     Test net output #1: loss = 1.21182 (* 1 = 1.21182 loss)\nI0818 05:58:02.124655 17538 solver.cpp:228] Iteration 22000, loss = 0.0358433\nI0818 05:58:02.124688 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 05:58:02.124711 17538 solver.cpp:244]     Train net output #1: loss = 0.0358434 (* 1 = 0.0358434 loss)\nI0818 05:58:02.211984 17538 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0818 06:00:18.848633 17538 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0818 06:01:40.081588 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73636\nI0818 06:01:40.081876 17538 solver.cpp:404]     Test net output #1: loss = 1.31764 (* 1 = 1.31764 loss)\nI0818 06:01:41.404405 17538 solver.cpp:228] Iteration 22100, loss = 0.103796\nI0818 06:01:41.404438 17538 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 06:01:41.404460 17538 solver.cpp:244]     Train net output #1: loss = 0.103797 (* 1 = 0.103797 loss)\nI0818 06:01:41.490394 17538 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0818 06:03:58.217962 17538 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0818 06:05:19.445401 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74848\nI0818 06:05:19.445688 17538 solver.cpp:404]     Test net output #1: loss = 1.30911 (* 1 = 1.30911 loss)\nI0818 06:05:20.768225 17538 solver.cpp:228] Iteration 22200, loss = 0.0832983\nI0818 06:05:20.768257 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:05:20.768273 17538 solver.cpp:244]     Train net output #1: loss = 0.0832984 (* 1 = 0.0832984 loss)\nI0818 06:05:20.855280 17538 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0818 06:07:37.572836 17538 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0818 06:08:58.808473 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7894\nI0818 06:08:58.808763 17538 solver.cpp:404]     Test net output #1: loss = 0.907464 (* 1 = 0.907464 loss)\nI0818 06:09:00.131144 17538 solver.cpp:228] Iteration 22300, loss = 0.0285548\nI0818 06:09:00.131178 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:09:00.131193 17538 solver.cpp:244]     Train net output #1: loss = 0.0285549 (* 1 = 0.0285549 loss)\nI0818 06:09:00.209764 17538 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0818 06:11:16.842139 17538 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0818 06:12:38.086395 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72888\nI0818 06:12:38.086697 17538 solver.cpp:404]     Test net output #1: loss = 1.25285 (* 1 = 1.25285 loss)\nI0818 06:12:39.409855 17538 solver.cpp:228] Iteration 22400, loss = 0.0413186\nI0818 06:12:39.409888 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:12:39.409904 17538 solver.cpp:244]     Train net output #1: loss = 0.0413187 (* 1 = 0.0413187 loss)\nI0818 06:12:39.491360 17538 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0818 06:14:56.102268 17538 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0818 06:16:17.297052 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76808\nI0818 06:16:17.297391 17538 solver.cpp:404]     Test net output #1: loss = 1.07229 (* 1 = 1.07229 loss)\nI0818 06:16:18.619712 17538 solver.cpp:228] Iteration 22500, loss = 0.0204384\nI0818 06:16:18.619747 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:16:18.619762 17538 solver.cpp:244]     Train net output #1: loss = 0.0204385 (* 1 = 0.0204385 loss)\nI0818 06:16:18.698451 17538 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0818 06:18:35.375303 17538 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0818 06:19:56.408710 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7468\nI0818 06:19:56.408995 17538 solver.cpp:404]     Test net output #1: loss = 1.22819 (* 1 = 1.22819 loss)\nI0818 06:19:57.731204 17538 solver.cpp:228] Iteration 22600, loss = 0.0426174\nI0818 06:19:57.731240 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:19:57.731256 17538 solver.cpp:244]     Train net output #1: loss = 0.0426175 (* 1 = 0.0426175 loss)\nI0818 06:19:57.812443 17538 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0818 06:22:14.466936 17538 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0818 06:23:35.505239 17538 solver.cpp:404]     Test net output #0: accuracy = 0.764\nI0818 06:23:35.505563 17538 solver.cpp:404]     Test net output #1: loss = 1.02355 (* 1 = 1.02355 loss)\nI0818 06:23:36.828408 17538 solver.cpp:228] Iteration 22700, loss = 0.0263356\nI0818 06:23:36.828444 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:23:36.828459 17538 solver.cpp:244]     Train net output #1: loss = 0.0263357 (* 1 = 0.0263357 loss)\nI0818 06:23:36.906854 17538 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0818 06:25:53.525373 17538 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0818 06:27:14.584105 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75956\nI0818 06:27:14.584373 17538 solver.cpp:404]     Test net output #1: loss = 1.20077 (* 1 = 1.20077 loss)\nI0818 06:27:15.907297 17538 solver.cpp:228] Iteration 22800, loss = 0.0165741\nI0818 06:27:15.907331 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:27:15.907347 17538 solver.cpp:244]     Train net output #1: loss = 0.0165742 (* 1 = 0.0165742 loss)\nI0818 06:27:15.995476 17538 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0818 06:29:32.581912 17538 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0818 06:30:53.646528 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72848\nI0818 06:30:53.646791 17538 solver.cpp:404]     Test net output #1: loss = 1.44131 (* 1 = 1.44131 loss)\nI0818 06:30:54.968859 17538 solver.cpp:228] Iteration 22900, loss = 0.0191478\nI0818 06:30:54.968894 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:30:54.968909 17538 solver.cpp:244]     Train net output #1: loss = 0.0191479 (* 1 = 0.0191479 loss)\nI0818 06:30:55.051436 17538 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0818 06:33:12.067181 17538 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0818 06:34:33.118772 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78208\nI0818 06:34:33.119035 17538 solver.cpp:404]     Test net output #1: loss = 0.977091 (* 1 = 0.977091 loss)\nI0818 06:34:34.441500 17538 solver.cpp:228] Iteration 23000, loss = 0.0887106\nI0818 06:34:34.441534 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:34:34.441550 17538 solver.cpp:244]     Train net output #1: loss = 0.0887107 (* 1 = 0.0887107 loss)\nI0818 06:34:34.530146 17538 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0818 06:36:51.687789 17538 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0818 06:38:12.741549 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7146\nI0818 06:38:12.741838 17538 solver.cpp:404]     Test net output #1: loss = 1.4501 (* 1 = 1.4501 loss)\nI0818 06:38:14.064584 17538 solver.cpp:228] Iteration 23100, loss = 0.130097\nI0818 06:38:14.064617 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:38:14.064632 17538 solver.cpp:244]     Train net output #1: loss = 0.130097 (* 1 = 0.130097 loss)\nI0818 06:38:14.148632 17538 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0818 06:40:31.216497 17538 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0818 06:41:52.262504 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7642\nI0818 06:41:52.262770 17538 solver.cpp:404]     Test net output #1: loss = 1.09495 (* 1 = 1.09495 loss)\nI0818 06:41:53.585122 17538 solver.cpp:228] Iteration 23200, loss = 0.0359778\nI0818 06:41:53.585155 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 06:41:53.585171 17538 solver.cpp:244]     Train net output #1: loss = 0.0359779 (* 1 = 0.0359779 loss)\nI0818 06:41:53.673571 17538 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0818 06:44:10.764282 17538 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0818 06:45:31.796778 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78172\nI0818 06:45:31.797066 17538 solver.cpp:404]     Test net output #1: loss = 0.987324 (* 1 = 0.987324 loss)\nI0818 06:45:33.118990 17538 solver.cpp:228] Iteration 23300, loss = 0.0779792\nI0818 06:45:33.119022 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 06:45:33.119038 17538 solver.cpp:244]     Train net output #1: loss = 0.0779793 (* 1 = 0.0779793 loss)\nI0818 06:45:33.211087 17538 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0818 06:47:50.278439 17538 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0818 06:49:11.338486 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76528\nI0818 06:49:11.338784 17538 solver.cpp:404]     Test net output #1: loss = 1.02734 (* 1 = 1.02734 loss)\nI0818 06:49:12.661016 17538 solver.cpp:228] Iteration 23400, loss = 0.0592534\nI0818 06:49:12.661054 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 06:49:12.661077 17538 solver.cpp:244]     Train net output #1: loss = 0.0592535 (* 1 = 0.0592535 loss)\nI0818 06:49:12.744041 17538 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0818 06:51:29.676925 17538 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0818 06:52:50.928786 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76124\nI0818 06:52:50.929091 17538 solver.cpp:404]     Test net output #1: loss = 1.12251 (* 1 = 1.12251 loss)\nI0818 06:52:52.251286 17538 solver.cpp:228] Iteration 23500, loss = 0.0732617\nI0818 06:52:52.251322 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:52:52.251345 17538 solver.cpp:244]     Train net output #1: loss = 0.0732619 (* 1 = 0.0732619 loss)\nI0818 06:52:52.333607 17538 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0818 06:55:09.257594 17538 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0818 06:56:30.506155 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76188\nI0818 06:56:30.506445 17538 solver.cpp:404]     Test net output #1: loss = 1.27802 (* 1 = 1.27802 loss)\nI0818 06:56:31.828846 17538 solver.cpp:228] Iteration 23600, loss = 0.0623639\nI0818 06:56:31.828881 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 06:56:31.828903 17538 solver.cpp:244]     Train net output #1: loss = 0.062364 (* 1 = 0.062364 loss)\nI0818 06:56:31.907873 17538 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0818 06:58:48.506443 17538 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0818 07:00:09.705757 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76288\nI0818 07:00:09.706018 17538 solver.cpp:404]     Test net output #1: loss = 1.05163 (* 1 = 1.05163 loss)\nI0818 07:00:11.027911 17538 solver.cpp:228] Iteration 23700, loss = 0.0334594\nI0818 07:00:11.027942 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:00:11.027958 17538 solver.cpp:244]     Train net output #1: loss = 0.0334596 (* 1 = 0.0334596 loss)\nI0818 07:00:11.110522 17538 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0818 07:02:27.753521 17538 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0818 07:03:48.965354 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76624\nI0818 07:03:48.965636 17538 solver.cpp:404]     Test net output #1: loss = 1.0672 (* 1 = 1.0672 loss)\nI0818 07:03:50.287961 17538 solver.cpp:228] Iteration 23800, loss = 0.0630131\nI0818 07:03:50.287992 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:03:50.288007 17538 solver.cpp:244]     Train net output #1: loss = 0.0630133 (* 1 = 0.0630133 loss)\nI0818 07:03:50.367801 17538 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0818 07:06:06.985754 17538 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0818 07:07:28.174520 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7062\nI0818 07:07:28.174783 17538 solver.cpp:404]     Test net output #1: loss = 1.36603 (* 1 = 1.36603 loss)\nI0818 07:07:29.496847 17538 solver.cpp:228] Iteration 23900, loss = 0.0576182\nI0818 07:07:29.496878 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:07:29.496893 17538 solver.cpp:244]     Train net output #1: loss = 0.0576183 (* 1 = 0.0576183 loss)\nI0818 07:07:29.580859 17538 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0818 07:09:46.258740 17538 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0818 07:11:07.462985 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75764\nI0818 07:11:07.463264 17538 solver.cpp:404]     Test net output #1: loss = 1.04233 (* 1 = 1.04233 loss)\nI0818 07:11:08.785147 17538 solver.cpp:228] Iteration 24000, loss = 0.0344583\nI0818 07:11:08.785183 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 07:11:08.785198 17538 solver.cpp:244]     Train net output #1: loss = 0.0344584 (* 1 = 0.0344584 loss)\nI0818 07:11:08.869704 17538 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0818 07:13:25.545301 17538 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0818 07:14:47.570755 17538 solver.cpp:404]     Test net output #0: accuracy = 0.677\nI0818 07:14:47.570993 17538 solver.cpp:404]     Test net output #1: loss = 1.58399 (* 1 = 1.58399 loss)\nI0818 07:14:48.896469 17538 solver.cpp:228] Iteration 24100, loss = 0.0521954\nI0818 07:14:48.896513 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:14:48.896529 17538 solver.cpp:244]     Train net output #1: loss = 0.0521955 (* 1 = 0.0521955 loss)\nI0818 07:14:48.976495 17538 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0818 07:17:06.163645 17538 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0818 07:18:27.624891 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7084\nI0818 07:18:27.625166 17538 solver.cpp:404]     Test net output #1: loss = 1.59399 (* 1 = 1.59399 loss)\nI0818 07:18:28.948263 17538 solver.cpp:228] Iteration 24200, loss = 0.0454412\nI0818 07:18:28.948297 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:18:28.948312 17538 solver.cpp:244]     Train net output #1: loss = 0.0454413 (* 1 = 0.0454413 loss)\nI0818 07:18:29.028436 17538 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0818 07:20:46.009706 17538 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0818 07:22:07.229956 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72396\nI0818 07:22:07.230232 17538 solver.cpp:404]     Test net output #1: loss = 1.53184 (* 1 = 1.53184 loss)\nI0818 07:22:08.551827 17538 solver.cpp:228] Iteration 24300, loss = 0.045176\nI0818 07:22:08.551858 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:22:08.551874 17538 solver.cpp:244]     Train net output #1: loss = 0.0451761 (* 1 = 0.0451761 loss)\nI0818 07:22:08.633813 17538 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0818 07:24:25.757905 17538 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0818 07:25:47.768669 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76132\nI0818 07:25:47.768903 17538 solver.cpp:404]     Test net output #1: loss = 1.12089 (* 1 = 1.12089 loss)\nI0818 07:25:49.093935 17538 solver.cpp:228] Iteration 24400, loss = 0.0359134\nI0818 07:25:49.093979 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:25:49.093996 17538 solver.cpp:244]     Train net output #1: loss = 0.0359135 (* 1 = 0.0359135 loss)\nI0818 07:25:49.176223 17538 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0818 07:28:06.374531 17538 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0818 07:29:28.557574 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77356\nI0818 07:29:28.557832 17538 solver.cpp:404]     Test net output #1: loss = 1.05854 (* 1 = 1.05854 loss)\nI0818 07:29:29.883450 17538 solver.cpp:228] Iteration 24500, loss = 0.0342847\nI0818 07:29:29.883493 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:29:29.883509 17538 solver.cpp:244]     Train net output #1: loss = 0.0342848 (* 1 = 0.0342848 loss)\nI0818 07:29:29.962782 17538 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0818 07:31:47.093991 17538 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0818 07:33:09.247642 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76128\nI0818 07:33:09.247947 17538 solver.cpp:404]     Test net output #1: loss = 1.08796 (* 1 = 1.08796 loss)\nI0818 07:33:10.572950 17538 solver.cpp:228] Iteration 24600, loss = 0.0333178\nI0818 07:33:10.572993 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:33:10.573007 17538 solver.cpp:244]     Train net output #1: loss = 0.0333179 (* 1 = 0.0333179 loss)\nI0818 07:33:10.655485 17538 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0818 07:35:27.787261 17538 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0818 07:36:49.948608 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78796\nI0818 07:36:49.948832 17538 solver.cpp:404]     Test net output #1: loss = 1.01305 (* 1 = 1.01305 loss)\nI0818 07:36:51.275004 17538 solver.cpp:228] Iteration 24700, loss = 0.0254727\nI0818 07:36:51.275049 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:36:51.275064 17538 solver.cpp:244]     Train net output #1: loss = 0.0254728 (* 1 = 0.0254728 loss)\nI0818 07:36:51.359264 17538 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0818 07:39:08.484167 17538 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0818 07:40:30.595150 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78044\nI0818 07:40:30.595428 17538 solver.cpp:404]     Test net output #1: loss = 0.966572 (* 1 = 0.966572 loss)\nI0818 07:40:31.920939 17538 solver.cpp:228] Iteration 24800, loss = 0.0539159\nI0818 07:40:31.920984 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 07:40:31.921000 17538 solver.cpp:244]     Train net output #1: loss = 0.0539159 (* 1 = 0.0539159 loss)\nI0818 07:40:32.002812 17538 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0818 07:42:49.155987 17538 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0818 07:44:11.197149 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75496\nI0818 07:44:11.197407 17538 solver.cpp:404]     Test net output #1: loss = 1.32375 (* 1 = 1.32375 loss)\nI0818 07:44:12.521849 17538 solver.cpp:228] Iteration 24900, loss = 0.0393416\nI0818 07:44:12.521893 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 07:44:12.521909 17538 solver.cpp:244]     Train net output #1: loss = 0.0393417 (* 1 = 0.0393417 loss)\nI0818 07:44:12.605628 17538 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0818 07:46:29.706527 17538 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0818 07:47:50.951194 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7002\nI0818 07:47:50.951498 17538 solver.cpp:404]     Test net output #1: loss = 1.67526 (* 1 = 1.67526 loss)\nI0818 07:47:52.274307 17538 solver.cpp:228] Iteration 25000, loss = 0.0477409\nI0818 07:47:52.274340 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 07:47:52.274356 17538 solver.cpp:244]     Train net output #1: loss = 0.047741 (* 1 = 0.047741 loss)\nI0818 07:47:52.357920 17538 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0818 07:50:09.413097 17538 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0818 07:51:30.620168 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7398\nI0818 07:51:30.620460 17538 solver.cpp:404]     Test net output #1: loss = 1.49742 (* 1 = 1.49742 loss)\nI0818 07:51:31.943228 17538 solver.cpp:228] Iteration 25100, loss = 0.0727985\nI0818 07:51:31.943264 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 07:51:31.943279 17538 solver.cpp:244]     Train net output #1: loss = 0.0727986 (* 1 = 0.0727986 loss)\nI0818 07:51:32.030838 17538 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0818 07:53:49.093204 17538 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0818 07:55:11.225814 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74584\nI0818 07:55:11.226058 17538 solver.cpp:404]     Test net output #1: loss = 1.32644 (* 1 = 1.32644 loss)\nI0818 07:55:12.550995 17538 solver.cpp:228] Iteration 25200, loss = 0.066724\nI0818 07:55:12.551039 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:55:12.551055 17538 solver.cpp:244]     Train net output #1: loss = 0.066724 (* 1 = 0.066724 loss)\nI0818 07:55:12.638725 17538 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0818 07:57:29.836280 17538 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0818 07:58:52.023082 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76528\nI0818 07:58:52.023334 17538 solver.cpp:404]     Test net output #1: loss = 1.21819 (* 1 = 1.21819 loss)\nI0818 07:58:53.347836 17538 solver.cpp:228] Iteration 25300, loss = 0.0328169\nI0818 07:58:53.347877 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 07:58:53.347893 17538 solver.cpp:244]     Train net output #1: loss = 0.032817 (* 1 = 0.032817 loss)\nI0818 07:58:53.431800 17538 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0818 08:01:10.596876 17538 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0818 08:02:32.702251 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80928\nI0818 08:02:32.702534 17538 solver.cpp:404]     Test net output #1: loss = 0.904209 (* 1 = 0.904209 loss)\nI0818 08:02:34.027705 17538 solver.cpp:228] Iteration 25400, loss = 0.0529175\nI0818 08:02:34.027748 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 08:02:34.027765 17538 solver.cpp:244]     Train net output #1: loss = 0.0529176 (* 1 = 0.0529176 loss)\nI0818 08:02:34.116052 17538 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0818 08:04:51.240063 17538 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0818 08:06:13.371788 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76772\nI0818 08:06:13.372048 17538 solver.cpp:404]     Test net output #1: loss = 0.986056 (* 1 = 0.986056 loss)\nI0818 08:06:14.697113 17538 solver.cpp:228] Iteration 25500, loss = 0.0734842\nI0818 08:06:14.697156 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:06:14.697172 17538 solver.cpp:244]     Train net output #1: loss = 0.0734842 (* 1 = 0.0734842 loss)\nI0818 08:06:14.780712 17538 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0818 08:08:31.978190 17538 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0818 08:09:54.132623 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76004\nI0818 08:09:54.132886 17538 solver.cpp:404]     Test net output #1: loss = 1.17911 (* 1 = 1.17911 loss)\nI0818 08:09:55.458279 17538 solver.cpp:228] Iteration 25600, loss = 0.0600689\nI0818 08:09:55.458323 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:09:55.458339 17538 solver.cpp:244]     Train net output #1: loss = 0.0600689 (* 1 = 0.0600689 loss)\nI0818 08:09:55.538182 17538 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0818 08:12:12.744628 17538 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0818 08:13:34.859755 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72388\nI0818 08:13:34.860029 17538 solver.cpp:404]     Test net output #1: loss = 1.3969 (* 1 = 1.3969 loss)\nI0818 08:13:36.185694 17538 solver.cpp:228] Iteration 25700, loss = 0.096624\nI0818 08:13:36.185736 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 08:13:36.185752 17538 solver.cpp:244]     Train net output #1: loss = 0.0966241 (* 1 = 0.0966241 loss)\nI0818 08:13:36.266335 17538 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0818 08:15:53.442211 17538 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0818 08:17:15.565573 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73156\nI0818 08:17:15.565825 17538 solver.cpp:404]     Test net output #1: loss = 1.40701 (* 1 = 1.40701 loss)\nI0818 08:17:16.890281 17538 solver.cpp:228] Iteration 25800, loss = 0.100972\nI0818 08:17:16.890326 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 08:17:16.890352 17538 solver.cpp:244]     Train net output #1: loss = 0.100972 (* 1 = 0.100972 loss)\nI0818 08:17:16.978430 17538 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0818 08:19:34.119952 17538 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0818 08:20:56.233492 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73304\nI0818 08:20:56.233780 17538 solver.cpp:404]     Test net output #1: loss = 1.46907 (* 1 = 1.46907 loss)\nI0818 08:20:57.559198 17538 solver.cpp:228] Iteration 25900, loss = 0.0629525\nI0818 08:20:57.559248 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:20:57.559272 17538 solver.cpp:244]     Train net output #1: loss = 0.0629525 (* 1 = 0.0629525 loss)\nI0818 08:20:57.637367 17538 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0818 08:23:14.840981 17538 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0818 08:24:36.839148 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76016\nI0818 08:24:36.839401 17538 solver.cpp:404]     Test net output #1: loss = 1.01182 (* 1 = 1.01182 loss)\nI0818 08:24:38.164463 17538 solver.cpp:228] Iteration 26000, loss = 0.0744874\nI0818 08:24:38.164508 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:24:38.164532 17538 solver.cpp:244]     Train net output #1: loss = 0.0744874 (* 1 = 0.0744874 loss)\nI0818 08:24:38.244951 17538 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0818 08:26:55.408957 17538 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0818 08:28:17.238979 17538 solver.cpp:404]     Test net output #0: accuracy = 0.64144\nI0818 08:28:17.239240 17538 solver.cpp:404]     Test net output #1: loss = 2.31096 (* 1 = 2.31096 loss)\nI0818 08:28:18.564873 17538 solver.cpp:228] Iteration 26100, loss = 0.0362684\nI0818 08:28:18.564921 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 08:28:18.564944 17538 solver.cpp:244]     Train net output #1: loss = 0.0362684 (* 1 = 0.0362684 loss)\nI0818 08:28:18.644117 17538 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0818 08:30:35.797991 17538 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0818 08:31:56.999451 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76012\nI0818 08:31:56.999748 17538 solver.cpp:404]     Test net output #1: loss = 1.04551 (* 1 = 1.04551 loss)\nI0818 08:31:58.321928 17538 solver.cpp:228] Iteration 26200, loss = 0.0678558\nI0818 08:31:58.321961 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:31:58.321977 17538 solver.cpp:244]     Train net output #1: loss = 0.0678559 (* 1 = 0.0678559 loss)\nI0818 08:31:58.406190 17538 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0818 08:34:15.379227 17538 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0818 08:35:36.426007 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77664\nI0818 08:35:36.426281 17538 solver.cpp:404]     Test net output #1: loss = 0.927487 (* 1 = 0.927487 loss)\nI0818 08:35:37.749008 17538 solver.cpp:228] Iteration 26300, loss = 0.0287915\nI0818 08:35:37.749043 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 08:35:37.749059 17538 solver.cpp:244]     Train net output #1: loss = 0.0287916 (* 1 = 0.0287916 loss)\nI0818 08:35:37.839576 17538 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0818 08:37:54.756381 17538 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0818 08:39:15.815230 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7626\nI0818 08:39:15.815503 17538 solver.cpp:404]     Test net output #1: loss = 1.23688 (* 1 = 1.23688 loss)\nI0818 08:39:17.137568 17538 solver.cpp:228] Iteration 26400, loss = 0.141007\nI0818 08:39:17.137603 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 08:39:17.137617 17538 solver.cpp:244]     Train net output #1: loss = 0.141007 (* 1 = 0.141007 loss)\nI0818 08:39:17.217877 17538 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0818 08:41:34.214797 17538 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0818 08:42:56.245518 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75992\nI0818 08:42:56.245826 17538 solver.cpp:404]     Test net output #1: loss = 1.29572 (* 1 = 1.29572 loss)\nI0818 08:42:57.571928 17538 solver.cpp:228] Iteration 26500, loss = 0.0262916\nI0818 08:42:57.571972 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 08:42:57.571988 17538 solver.cpp:244]     Train net output #1: loss = 0.0262916 (* 1 = 0.0262916 loss)\nI0818 08:42:57.659032 17538 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0818 08:45:14.837713 17538 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0818 08:46:37.013540 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74156\nI0818 08:46:37.013804 17538 solver.cpp:404]     Test net output #1: loss = 1.36286 (* 1 = 1.36286 loss)\nI0818 08:46:38.340505 17538 solver.cpp:228] Iteration 26600, loss = 0.0225239\nI0818 08:46:38.340550 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:46:38.340567 17538 solver.cpp:244]     Train net output #1: loss = 0.0225239 (* 1 = 0.0225239 loss)\nI0818 08:46:38.425678 17538 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0818 08:48:55.544196 17538 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0818 08:50:17.693344 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78624\nI0818 08:50:17.693593 17538 solver.cpp:404]     Test net output #1: loss = 0.949733 (* 1 = 0.949733 loss)\nI0818 08:50:19.019366 17538 solver.cpp:228] Iteration 26700, loss = 0.0184562\nI0818 08:50:19.019412 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:50:19.019428 17538 solver.cpp:244]     Train net output #1: loss = 0.0184562 (* 1 = 0.0184562 loss)\nI0818 08:50:19.098893 17538 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0818 08:52:36.240464 17538 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0818 08:53:58.412962 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73484\nI0818 08:53:58.413278 17538 solver.cpp:404]     Test net output #1: loss = 1.47501 (* 1 = 1.47501 loss)\nI0818 08:53:59.739156 17538 solver.cpp:228] Iteration 26800, loss = 0.11097\nI0818 08:53:59.739199 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 08:53:59.739215 17538 solver.cpp:244]     Train net output #1: loss = 0.11097 (* 1 = 0.11097 loss)\nI0818 08:53:59.823778 17538 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0818 08:56:17.114697 17538 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0818 08:57:39.047340 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75232\nI0818 08:57:39.047616 17538 solver.cpp:404]     Test net output #1: loss = 1.37779 (* 1 = 1.37779 loss)\nI0818 08:57:40.373930 17538 solver.cpp:228] Iteration 26900, loss = 0.0784552\nI0818 08:57:40.373975 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 08:57:40.373991 17538 solver.cpp:244]     Train net output #1: loss = 0.0784553 (* 1 = 0.0784553 loss)\nI0818 08:57:40.454251 17538 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0818 08:59:57.624961 17538 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0818 09:01:19.745738 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80096\nI0818 09:01:19.746042 17538 solver.cpp:404]     Test net output #1: loss = 0.918085 (* 1 = 0.918085 loss)\nI0818 09:01:21.071398 17538 solver.cpp:228] Iteration 27000, loss = 0.0643535\nI0818 09:01:21.071441 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 09:01:21.071457 17538 solver.cpp:244]     Train net output #1: loss = 0.0643536 (* 1 = 0.0643536 loss)\nI0818 09:01:21.157817 17538 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0818 09:03:38.394234 17538 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0818 09:05:00.571455 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78056\nI0818 09:05:00.571768 17538 solver.cpp:404]     Test net output #1: loss = 1.12386 (* 1 = 1.12386 loss)\nI0818 09:05:01.897673 17538 solver.cpp:228] Iteration 27100, loss = 0.0610252\nI0818 09:05:01.897716 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 09:05:01.897732 17538 solver.cpp:244]     Train net output #1: loss = 0.0610253 (* 1 = 0.0610253 loss)\nI0818 09:05:01.983217 17538 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0818 09:07:19.142895 17538 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0818 09:08:41.321451 17538 solver.cpp:404]     Test net output #0: accuracy = 0.68544\nI0818 09:08:41.321776 17538 solver.cpp:404]     Test net output #1: loss = 1.93231 (* 1 = 1.93231 loss)\nI0818 09:08:42.647265 17538 solver.cpp:228] Iteration 27200, loss = 0.0372876\nI0818 09:08:42.647308 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 09:08:42.647325 17538 solver.cpp:244]     Train net output #1: loss = 0.0372877 (* 1 = 0.0372877 loss)\nI0818 09:08:42.725267 17538 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0818 09:10:59.776731 17538 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0818 09:12:21.047281 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7644\nI0818 09:12:21.047565 17538 solver.cpp:404]     Test net output #1: loss = 1.09746 (* 1 = 1.09746 loss)\nI0818 09:12:22.371642 17538 solver.cpp:228] Iteration 27300, loss = 0.024082\nI0818 09:12:22.371690 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 09:12:22.371704 17538 solver.cpp:244]     Train net output #1: loss = 0.024082 (* 1 = 0.024082 loss)\nI0818 09:12:22.457219 17538 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0818 09:14:39.461321 17538 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0818 09:16:01.620579 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7752\nI0818 09:16:01.620848 17538 solver.cpp:404]     Test net output #1: loss = 0.951863 (* 1 = 0.951863 loss)\nI0818 09:16:02.946029 17538 solver.cpp:228] Iteration 27400, loss = 0.0167974\nI0818 09:16:02.946070 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:16:02.946094 17538 solver.cpp:244]     Train net output #1: loss = 0.0167975 (* 1 = 0.0167975 loss)\nI0818 09:16:03.025346 17538 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0818 09:18:19.999102 17538 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0818 09:19:42.205821 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7746\nI0818 09:19:42.206130 17538 solver.cpp:404]     Test net output #1: loss = 1.0886 (* 1 = 1.0886 loss)\nI0818 09:19:43.530714 17538 solver.cpp:228] Iteration 27500, loss = 0.0212499\nI0818 09:19:43.530756 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 09:19:43.530771 17538 solver.cpp:244]     Train net output #1: loss = 0.02125 (* 1 = 0.02125 loss)\nI0818 09:19:43.618940 17538 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0818 09:22:00.605398 17538 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0818 09:23:22.794162 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77288\nI0818 09:23:22.794498 17538 solver.cpp:404]     Test net output #1: loss = 1.04314 (* 1 = 1.04314 loss)\nI0818 09:23:24.120836 17538 solver.cpp:228] Iteration 27600, loss = 0.055493\nI0818 09:23:24.120877 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:23:24.120893 17538 solver.cpp:244]     Train net output #1: loss = 0.055493 (* 1 = 0.055493 loss)\nI0818 09:23:24.198745 17538 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0818 09:25:41.074694 17538 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0818 09:27:02.312856 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78992\nI0818 09:27:02.313307 17538 solver.cpp:404]     Test net output #1: loss = 1.03766 (* 1 = 1.03766 loss)\nI0818 09:27:03.636461 17538 solver.cpp:228] Iteration 27700, loss = 0.0642995\nI0818 09:27:03.636494 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:27:03.636509 17538 solver.cpp:244]     Train net output #1: loss = 0.0642995 (* 1 = 0.0642995 loss)\nI0818 09:27:03.723206 17538 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0818 09:29:20.313038 17538 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0818 09:30:41.557628 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79556\nI0818 09:30:41.557911 17538 solver.cpp:404]     Test net output #1: loss = 0.857642 (* 1 = 0.857642 loss)\nI0818 09:30:42.880853 17538 solver.cpp:228] Iteration 27800, loss = 0.0264634\nI0818 09:30:42.880887 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:30:42.880903 17538 solver.cpp:244]     Train net output #1: loss = 0.0264634 (* 1 = 0.0264634 loss)\nI0818 09:30:42.965618 17538 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0818 09:32:59.537822 17538 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0818 09:34:20.784729 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75788\nI0818 09:34:20.785022 17538 solver.cpp:404]     Test net output #1: loss = 1.16024 (* 1 = 1.16024 loss)\nI0818 09:34:22.107331 17538 solver.cpp:228] Iteration 27900, loss = 0.0210748\nI0818 09:34:22.107363 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:34:22.107379 17538 solver.cpp:244]     Train net output #1: loss = 0.0210748 (* 1 = 0.0210748 loss)\nI0818 09:34:22.193374 17538 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0818 09:36:38.920979 17538 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0818 09:38:00.172623 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73984\nI0818 09:38:00.172921 17538 solver.cpp:404]     Test net output #1: loss = 1.35446 (* 1 = 1.35446 loss)\nI0818 09:38:01.496031 17538 solver.cpp:228] Iteration 28000, loss = 0.10384\nI0818 09:38:01.496064 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:38:01.496079 17538 solver.cpp:244]     Train net output #1: loss = 0.10384 (* 1 = 0.10384 loss)\nI0818 09:38:01.577123 17538 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0818 09:40:18.179486 17538 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0818 09:41:39.440528 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80348\nI0818 09:41:39.440829 17538 solver.cpp:404]     Test net output #1: loss = 0.902332 (* 1 = 0.902332 loss)\nI0818 09:41:40.766211 17538 solver.cpp:228] Iteration 28100, loss = 0.0149494\nI0818 09:41:40.766250 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:41:40.766273 17538 solver.cpp:244]     Train net output #1: loss = 0.0149494 (* 1 = 0.0149494 loss)\nI0818 09:41:40.849303 17538 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0818 09:43:57.470295 17538 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0818 09:45:18.695076 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7086\nI0818 09:45:18.695367 17538 solver.cpp:404]     Test net output #1: loss = 1.82053 (* 1 = 1.82053 loss)\nI0818 09:45:20.018788 17538 solver.cpp:228] Iteration 28200, loss = 0.00996492\nI0818 09:45:20.018829 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:45:20.018846 17538 solver.cpp:244]     Train net output #1: loss = 0.00996494 (* 1 = 0.00996494 loss)\nI0818 09:45:20.104084 17538 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0818 09:47:36.739128 17538 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0818 09:48:57.954401 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72128\nI0818 09:48:57.954669 17538 solver.cpp:404]     Test net output #1: loss = 1.45969 (* 1 = 1.45969 loss)\nI0818 09:48:59.276875 17538 solver.cpp:228] Iteration 28300, loss = 0.0497362\nI0818 09:48:59.276917 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 09:48:59.276934 17538 solver.cpp:244]     Train net output #1: loss = 0.0497362 (* 1 = 0.0497362 loss)\nI0818 09:48:59.361778 17538 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0818 09:51:15.933002 17538 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0818 09:52:37.167839 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79128\nI0818 09:52:37.168148 17538 solver.cpp:404]     Test net output #1: loss = 0.974624 (* 1 = 0.974624 loss)\nI0818 09:52:38.490839 17538 solver.cpp:228] Iteration 28400, loss = 0.0420199\nI0818 09:52:38.490885 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 09:52:38.490900 17538 solver.cpp:244]     Train net output #1: loss = 0.0420199 (* 1 = 0.0420199 loss)\nI0818 09:52:38.577339 17538 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0818 09:54:55.160845 17538 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0818 09:56:16.393800 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78896\nI0818 09:56:16.394079 17538 solver.cpp:404]     Test net output #1: loss = 0.910355 (* 1 = 0.910355 loss)\nI0818 09:56:17.716891 17538 solver.cpp:228] Iteration 28500, loss = 0.129851\nI0818 09:56:17.716931 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 09:56:17.716948 17538 solver.cpp:244]     Train net output #1: loss = 0.129851 (* 1 = 0.129851 loss)\nI0818 09:56:17.797649 17538 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0818 09:58:34.378684 17538 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0818 09:59:55.632715 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74148\nI0818 09:59:55.632984 17538 solver.cpp:404]     Test net output #1: loss = 1.26342 (* 1 = 1.26342 loss)\nI0818 09:59:56.955150 17538 solver.cpp:228] Iteration 28600, loss = 0.0371889\nI0818 09:59:56.955193 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 09:59:56.955209 17538 solver.cpp:244]     Train net output #1: loss = 0.0371889 (* 1 = 0.0371889 loss)\nI0818 09:59:57.036495 17538 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0818 10:02:13.635905 17538 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0818 10:03:34.879910 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75804\nI0818 10:03:34.880198 17538 solver.cpp:404]     Test net output #1: loss = 1.19061 (* 1 = 1.19061 loss)\nI0818 10:03:36.202888 17538 solver.cpp:228] Iteration 28700, loss = 0.0320321\nI0818 10:03:36.202927 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:03:36.202944 17538 solver.cpp:244]     Train net output #1: loss = 0.0320321 (* 1 = 0.0320321 loss)\nI0818 10:03:36.284704 17538 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0818 10:05:52.932304 17538 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0818 10:07:14.172292 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81988\nI0818 10:07:14.172608 17538 solver.cpp:404]     Test net output #1: loss = 0.742 (* 1 = 0.742 loss)\nI0818 10:07:15.495087 17538 solver.cpp:228] Iteration 28800, loss = 0.0205337\nI0818 10:07:15.495128 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:07:15.495144 17538 solver.cpp:244]     Train net output #1: loss = 0.0205337 (* 1 = 0.0205337 loss)\nI0818 10:07:15.578995 17538 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0818 10:09:32.235496 17538 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0818 10:10:53.443832 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77328\nI0818 10:10:53.444128 17538 solver.cpp:404]     Test net output #1: loss = 1.06504 (* 1 = 1.06504 loss)\nI0818 10:10:54.767406 17538 solver.cpp:228] Iteration 28900, loss = 0.03023\nI0818 10:10:54.767438 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:10:54.767453 17538 solver.cpp:244]     Train net output #1: loss = 0.03023 (* 1 = 0.03023 loss)\nI0818 10:10:54.845954 17538 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0818 10:13:11.537003 17538 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0818 10:14:32.778511 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77088\nI0818 10:14:32.778805 17538 solver.cpp:404]     Test net output #1: loss = 1.02358 (* 1 = 1.02358 loss)\nI0818 10:14:34.101867 17538 solver.cpp:228] Iteration 29000, loss = 0.0289068\nI0818 10:14:34.101899 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:14:34.101914 17538 solver.cpp:244]     Train net output #1: loss = 0.0289068 (* 1 = 0.0289068 loss)\nI0818 10:14:34.185587 17538 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0818 10:16:50.869743 17538 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0818 10:18:12.118487 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7402\nI0818 10:18:12.118784 17538 solver.cpp:404]     Test net output #1: loss = 1.30441 (* 1 = 1.30441 loss)\nI0818 10:18:13.441038 17538 solver.cpp:228] Iteration 29100, loss = 0.0591856\nI0818 10:18:13.441071 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:18:13.441087 17538 solver.cpp:244]     Train net output #1: loss = 0.0591857 (* 1 = 0.0591857 loss)\nI0818 10:18:13.523214 17538 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0818 10:20:30.127092 17538 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0818 10:21:51.349814 17538 solver.cpp:404]     Test net output #0: accuracy = 0.63284\nI0818 10:21:51.350114 17538 solver.cpp:404]     Test net output #1: loss = 2.27389 (* 1 = 2.27389 loss)\nI0818 10:21:52.672310 17538 solver.cpp:228] Iteration 29200, loss = 0.0861882\nI0818 10:21:52.672343 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 10:21:52.672358 17538 solver.cpp:244]     Train net output #1: loss = 0.0861882 (* 1 = 0.0861882 loss)\nI0818 10:21:52.752703 17538 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0818 10:24:09.377507 17538 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0818 10:25:30.601351 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7128\nI0818 10:25:30.601655 17538 solver.cpp:404]     Test net output #1: loss = 1.4929 (* 1 = 1.4929 loss)\nI0818 10:25:31.923660 17538 solver.cpp:228] Iteration 29300, loss = 0.028013\nI0818 10:25:31.923693 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:25:31.923708 17538 solver.cpp:244]     Train net output #1: loss = 0.028013 (* 1 = 0.028013 loss)\nI0818 10:25:32.004817 17538 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0818 10:27:48.627718 17538 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0818 10:29:09.872756 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7674\nI0818 10:29:09.873042 17538 solver.cpp:404]     Test net output #1: loss = 1.0677 (* 1 = 1.0677 loss)\nI0818 10:29:11.196388 17538 solver.cpp:228] Iteration 29400, loss = 0.0193393\nI0818 10:29:11.196424 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:29:11.196446 17538 solver.cpp:244]     Train net output #1: loss = 0.0193393 (* 1 = 0.0193393 loss)\nI0818 10:29:11.282137 17538 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0818 10:31:27.921514 17538 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0818 10:32:49.194154 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78888\nI0818 10:32:49.194468 17538 solver.cpp:404]     Test net output #1: loss = 0.889573 (* 1 = 0.889573 loss)\nI0818 10:32:50.518465 17538 solver.cpp:228] Iteration 29500, loss = 0.0300086\nI0818 10:32:50.518501 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:32:50.518525 17538 solver.cpp:244]     Train net output #1: loss = 0.0300086 (* 1 = 0.0300086 loss)\nI0818 10:32:50.598279 17538 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0818 10:35:07.192806 17538 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0818 10:36:28.435127 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77552\nI0818 10:36:28.435432 17538 solver.cpp:404]     Test net output #1: loss = 1.1359 (* 1 = 1.1359 loss)\nI0818 10:36:29.759063 17538 solver.cpp:228] Iteration 29600, loss = 0.043754\nI0818 10:36:29.759097 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:36:29.759114 17538 solver.cpp:244]     Train net output #1: loss = 0.043754 (* 1 = 0.043754 loss)\nI0818 10:36:29.837725 17538 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0818 10:38:46.418145 17538 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0818 10:40:07.683413 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76084\nI0818 10:40:07.683724 17538 solver.cpp:404]     Test net output #1: loss = 1.2187 (* 1 = 1.2187 loss)\nI0818 10:40:09.005880 17538 solver.cpp:228] Iteration 29700, loss = 0.146829\nI0818 10:40:09.005918 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 10:40:09.005939 17538 solver.cpp:244]     Train net output #1: loss = 0.146829 (* 1 = 0.146829 loss)\nI0818 10:40:09.092507 17538 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0818 10:42:25.714680 17538 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0818 10:43:46.990417 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74648\nI0818 10:43:46.990718 17538 solver.cpp:404]     Test net output #1: loss = 1.44484 (* 1 = 1.44484 loss)\nI0818 10:43:48.314379 17538 solver.cpp:228] Iteration 29800, loss = 0.0250657\nI0818 10:43:48.314419 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:43:48.314435 17538 solver.cpp:244]     Train net output #1: loss = 0.0250657 (* 1 = 0.0250657 loss)\nI0818 10:43:48.398612 17538 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0818 10:46:05.036607 17538 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0818 10:47:26.284621 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8006\nI0818 10:47:26.284941 17538 solver.cpp:404]     Test net output #1: loss = 0.917974 (* 1 = 0.917974 loss)\nI0818 10:47:27.608433 17538 solver.cpp:228] Iteration 29900, loss = 0.0334813\nI0818 10:47:27.608466 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 10:47:27.608482 17538 solver.cpp:244]     Train net output #1: loss = 0.0334812 (* 1 = 0.0334812 loss)\nI0818 10:47:27.691962 17538 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0818 10:49:44.324301 17538 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0818 10:51:05.428650 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77068\nI0818 10:51:05.428944 17538 solver.cpp:404]     Test net output #1: loss = 1.08262 (* 1 = 1.08262 loss)\nI0818 10:51:06.752452 17538 solver.cpp:228] Iteration 30000, loss = 0.100023\nI0818 10:51:06.752487 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 10:51:06.752502 17538 solver.cpp:244]     Train net output #1: loss = 0.100023 (* 1 = 0.100023 loss)\nI0818 10:51:06.832128 17538 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0818 10:53:23.474267 17538 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0818 10:54:44.613509 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75384\nI0818 10:54:44.613806 17538 solver.cpp:404]     Test net output #1: loss = 1.30116 (* 1 = 1.30116 loss)\nI0818 10:54:45.937609 17538 solver.cpp:228] Iteration 30100, loss = 0.0240683\nI0818 10:54:45.937645 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 10:54:45.937661 17538 solver.cpp:244]     Train net output #1: loss = 0.0240682 (* 1 = 0.0240682 loss)\nI0818 10:54:46.021881 17538 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0818 10:57:02.695860 17538 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0818 10:58:23.788534 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72516\nI0818 10:58:23.788835 17538 solver.cpp:404]     Test net output #1: loss = 1.58631 (* 1 = 1.58631 loss)\nI0818 10:58:25.112738 17538 solver.cpp:228] Iteration 30200, loss = 0.0157673\nI0818 10:58:25.112782 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 10:58:25.112799 17538 solver.cpp:244]     Train net output #1: loss = 0.0157672 (* 1 = 0.0157672 loss)\nI0818 10:58:25.189657 17538 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0818 11:00:41.781697 17538 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0818 11:02:02.865032 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75288\nI0818 11:02:02.865301 17538 solver.cpp:404]     Test net output #1: loss = 1.39612 (* 1 = 1.39612 loss)\nI0818 11:02:04.188735 17538 solver.cpp:228] Iteration 30300, loss = 0.0530203\nI0818 11:02:04.188781 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:02:04.188797 17538 solver.cpp:244]     Train net output #1: loss = 0.0530202 (* 1 = 0.0530202 loss)\nI0818 11:02:04.265512 17538 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0818 11:04:20.866549 17538 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0818 11:05:41.929417 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78888\nI0818 11:05:41.929721 17538 solver.cpp:404]     Test net output #1: loss = 1.05424 (* 1 = 1.05424 loss)\nI0818 11:05:43.253152 17538 solver.cpp:228] Iteration 30400, loss = 0.0686467\nI0818 11:05:43.253187 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 11:05:43.253203 17538 solver.cpp:244]     Train net output #1: loss = 0.0686466 (* 1 = 0.0686466 loss)\nI0818 11:05:43.329568 17538 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0818 11:07:59.928016 17538 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0818 11:09:21.009196 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76188\nI0818 11:09:21.009488 17538 solver.cpp:404]     Test net output #1: loss = 1.17284 (* 1 = 1.17284 loss)\nI0818 11:09:22.331610 17538 solver.cpp:228] Iteration 30500, loss = 0.0125613\nI0818 11:09:22.331656 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 11:09:22.331672 17538 solver.cpp:244]     Train net output #1: loss = 0.0125612 (* 1 = 0.0125612 loss)\nI0818 11:09:22.414575 17538 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0818 11:11:39.178575 17538 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0818 11:13:00.347899 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74412\nI0818 11:13:00.348201 17538 solver.cpp:404]     Test net output #1: loss = 1.3297 (* 1 = 1.3297 loss)\nI0818 11:13:01.671464 17538 solver.cpp:228] Iteration 30600, loss = 0.0909775\nI0818 11:13:01.671509 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:13:01.671525 17538 solver.cpp:244]     Train net output #1: loss = 0.0909774 (* 1 = 0.0909774 loss)\nI0818 11:13:01.763859 17538 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0818 11:15:18.669528 17538 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0818 11:16:39.750448 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8038\nI0818 11:16:39.750753 17538 solver.cpp:404]     Test net output #1: loss = 0.820749 (* 1 = 0.820749 loss)\nI0818 11:16:41.073495 17538 solver.cpp:228] Iteration 30700, loss = 0.0848015\nI0818 11:16:41.073541 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:16:41.073557 17538 solver.cpp:244]     Train net output #1: loss = 0.0848014 (* 1 = 0.0848014 loss)\nI0818 11:16:41.163323 17538 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0818 11:18:58.137038 17538 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0818 11:20:19.221779 17538 solver.cpp:404]     Test net output #0: accuracy = 0.69764\nI0818 11:20:19.222091 17538 solver.cpp:404]     Test net output #1: loss = 1.7583 (* 1 = 1.7583 loss)\nI0818 11:20:20.545389 17538 solver.cpp:228] Iteration 30800, loss = 0.0562358\nI0818 11:20:20.545439 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:20:20.545454 17538 solver.cpp:244]     Train net output #1: loss = 0.0562357 (* 1 = 0.0562357 loss)\nI0818 11:20:20.629374 17538 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0818 11:22:37.537298 17538 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0818 11:23:58.679268 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71564\nI0818 11:23:58.679566 17538 solver.cpp:404]     Test net output #1: loss = 1.4732 (* 1 = 1.4732 loss)\nI0818 11:24:00.002151 17538 solver.cpp:228] Iteration 30900, loss = 0.104061\nI0818 11:24:00.002197 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 11:24:00.002214 17538 solver.cpp:244]     Train net output #1: loss = 0.104061 (* 1 = 0.104061 loss)\nI0818 11:24:00.083640 17538 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0818 11:26:16.948940 17538 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0818 11:27:38.201539 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81672\nI0818 11:27:38.201822 17538 solver.cpp:404]     Test net output #1: loss = 0.816777 (* 1 = 0.816777 loss)\nI0818 11:27:39.525895 17538 solver.cpp:228] Iteration 31000, loss = 0.0235034\nI0818 11:27:39.525944 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:27:39.525969 17538 solver.cpp:244]     Train net output #1: loss = 0.0235033 (* 1 = 0.0235033 loss)\nI0818 11:27:39.612679 17538 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0818 11:29:56.614513 17538 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0818 11:31:17.863085 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79196\nI0818 11:31:17.863378 17538 solver.cpp:404]     Test net output #1: loss = 1.01945 (* 1 = 1.01945 loss)\nI0818 11:31:19.185741 17538 solver.cpp:228] Iteration 31100, loss = 0.0517988\nI0818 11:31:19.185791 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:31:19.185814 17538 solver.cpp:244]     Train net output #1: loss = 0.0517987 (* 1 = 0.0517987 loss)\nI0818 11:31:19.278000 17538 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0818 11:33:36.326758 17538 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0818 11:34:57.595708 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81608\nI0818 11:34:57.595991 17538 solver.cpp:404]     Test net output #1: loss = 0.819006 (* 1 = 0.819006 loss)\nI0818 11:34:58.919291 17538 solver.cpp:228] Iteration 31200, loss = 0.0692891\nI0818 11:34:58.919339 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:34:58.919369 17538 solver.cpp:244]     Train net output #1: loss = 0.069289 (* 1 = 0.069289 loss)\nI0818 11:34:58.999053 17538 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0818 11:37:15.858196 17538 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0818 11:38:37.173931 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81316\nI0818 11:38:37.174325 17538 solver.cpp:404]     Test net output #1: loss = 0.944567 (* 1 = 0.944567 loss)\nI0818 11:38:38.497747 17538 solver.cpp:228] Iteration 31300, loss = 0.040277\nI0818 11:38:38.497794 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:38:38.497819 17538 solver.cpp:244]     Train net output #1: loss = 0.0402768 (* 1 = 0.0402768 loss)\nI0818 11:38:38.579782 17538 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0818 11:40:55.564110 17538 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0818 11:42:16.858896 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72192\nI0818 11:42:16.859199 17538 solver.cpp:404]     Test net output #1: loss = 1.58147 (* 1 = 1.58147 loss)\nI0818 11:42:18.182355 17538 solver.cpp:228] Iteration 31400, loss = 0.0615305\nI0818 11:42:18.182404 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 11:42:18.182427 17538 solver.cpp:244]     Train net output #1: loss = 0.0615304 (* 1 = 0.0615304 loss)\nI0818 11:42:18.272878 17538 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0818 11:44:35.317975 17538 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0818 11:45:56.610002 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76008\nI0818 11:45:56.610282 17538 solver.cpp:404]     Test net output #1: loss = 1.15496 (* 1 = 1.15496 loss)\nI0818 11:45:57.933071 17538 solver.cpp:228] Iteration 31500, loss = 0.0302912\nI0818 11:45:57.933120 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:45:57.933145 17538 solver.cpp:244]     Train net output #1: loss = 0.0302911 (* 1 = 0.0302911 loss)\nI0818 11:45:58.019965 17538 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0818 11:48:15.029402 17538 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0818 11:49:36.293272 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76128\nI0818 11:49:36.293583 17538 solver.cpp:404]     Test net output #1: loss = 1.16367 (* 1 = 1.16367 loss)\nI0818 11:49:37.616971 17538 solver.cpp:228] Iteration 31600, loss = 0.0339622\nI0818 11:49:37.617017 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 11:49:37.617043 17538 solver.cpp:244]     Train net output #1: loss = 0.0339622 (* 1 = 0.0339622 loss)\nI0818 11:49:37.706970 17538 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0818 11:51:54.703879 17538 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0818 11:53:15.958024 17538 solver.cpp:404]     Test net output #0: accuracy = 0.798\nI0818 11:53:15.958323 17538 solver.cpp:404]     Test net output #1: loss = 0.971904 (* 1 = 0.971904 loss)\nI0818 11:53:17.279646 17538 solver.cpp:228] Iteration 31700, loss = 0.0368562\nI0818 11:53:17.279685 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 11:53:17.279708 17538 solver.cpp:244]     Train net output #1: loss = 0.0368562 (* 1 = 0.0368562 loss)\nI0818 11:53:17.365330 17538 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0818 11:55:34.419504 17538 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0818 11:56:55.655762 17538 solver.cpp:404]     Test net output #0: accuracy = 0.69436\nI0818 11:56:55.656046 17538 solver.cpp:404]     Test net output #1: loss = 1.44931 (* 1 = 1.44931 loss)\nI0818 11:56:56.975545 17538 solver.cpp:228] Iteration 31800, loss = 0.0972752\nI0818 11:56:56.975580 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 11:56:56.975595 17538 solver.cpp:244]     Train net output #1: loss = 0.0972751 (* 1 = 0.0972751 loss)\nI0818 11:56:57.064549 17538 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0818 11:59:13.970356 17538 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0818 12:00:35.205785 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78652\nI0818 12:00:35.206087 17538 solver.cpp:404]     Test net output #1: loss = 1.02924 (* 1 = 1.02924 loss)\nI0818 12:00:36.526397 17538 solver.cpp:228] Iteration 31900, loss = 0.0445149\nI0818 12:00:36.526432 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 12:00:36.526448 17538 solver.cpp:244]     Train net output #1: loss = 0.0445148 (* 1 = 0.0445148 loss)\nI0818 12:00:36.615924 17538 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0818 12:02:53.555711 17538 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0818 12:04:14.761831 17538 solver.cpp:404]     Test net output #0: accuracy = 0.6824\nI0818 12:04:14.762118 17538 solver.cpp:404]     Test net output #1: loss = 1.57279 (* 1 = 1.57279 loss)\nI0818 12:04:16.083009 17538 solver.cpp:228] Iteration 32000, loss = 0.00571729\nI0818 12:04:16.083043 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:04:16.083058 17538 solver.cpp:244]     Train net output #1: loss = 0.0057172 (* 1 = 0.0057172 loss)\nI0818 12:04:16.174232 17538 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0818 12:06:33.159106 17538 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0818 12:07:54.385848 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7758\nI0818 12:07:54.386153 17538 solver.cpp:404]     Test net output #1: loss = 1.15759 (* 1 = 1.15759 loss)\nI0818 12:07:55.705926 17538 solver.cpp:228] Iteration 32100, loss = 0.0490469\nI0818 12:07:55.705961 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:07:55.705976 17538 solver.cpp:244]     Train net output #1: loss = 0.0490468 (* 1 = 0.0490468 loss)\nI0818 12:07:55.795497 17538 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0818 12:10:12.769908 17538 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0818 12:11:34.000253 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77104\nI0818 12:11:34.000535 17538 solver.cpp:404]     Test net output #1: loss = 1.14663 (* 1 = 1.14663 loss)\nI0818 12:11:35.320297 17538 solver.cpp:228] Iteration 32200, loss = 0.0448931\nI0818 12:11:35.320332 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:11:35.320346 17538 solver.cpp:244]     Train net output #1: loss = 0.044893 (* 1 = 0.044893 loss)\nI0818 12:11:35.407143 17538 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0818 12:13:52.393046 17538 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0818 12:15:13.632031 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7282\nI0818 12:15:13.632341 17538 solver.cpp:404]     Test net output #1: loss = 1.68533 (* 1 = 1.68533 loss)\nI0818 12:15:14.952045 17538 solver.cpp:228] Iteration 32300, loss = 0.0482599\nI0818 12:15:14.952080 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:15:14.952095 17538 solver.cpp:244]     Train net output #1: loss = 0.0482598 (* 1 = 0.0482598 loss)\nI0818 12:15:15.045181 17538 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0818 12:17:31.921633 17538 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0818 12:18:53.163348 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7806\nI0818 12:18:53.163605 17538 solver.cpp:404]     Test net output #1: loss = 1.10902 (* 1 = 1.10902 loss)\nI0818 12:18:54.482738 17538 solver.cpp:228] Iteration 32400, loss = 0.0348743\nI0818 12:18:54.482774 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:18:54.482787 17538 solver.cpp:244]     Train net output #1: loss = 0.0348742 (* 1 = 0.0348742 loss)\nI0818 12:18:54.571091 17538 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0818 12:21:11.774531 17538 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0818 12:22:33.513689 17538 solver.cpp:404]     Test net output #0: accuracy = 0.69348\nI0818 12:22:33.513968 17538 solver.cpp:404]     Test net output #1: loss = 2.13324 (* 1 = 2.13324 loss)\nI0818 12:22:34.835850 17538 solver.cpp:228] Iteration 32500, loss = 0.0431577\nI0818 12:22:34.835889 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 12:22:34.835906 17538 solver.cpp:244]     Train net output #1: loss = 0.0431576 (* 1 = 0.0431576 loss)\nI0818 12:22:34.926496 17538 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0818 12:24:52.074658 17538 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0818 12:26:13.812157 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74088\nI0818 12:26:13.812408 17538 solver.cpp:404]     Test net output #1: loss = 1.46822 (* 1 = 1.46822 loss)\nI0818 12:26:15.134732 17538 solver.cpp:228] Iteration 32600, loss = 0.0570478\nI0818 12:26:15.134773 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:26:15.134788 17538 solver.cpp:244]     Train net output #1: loss = 0.0570477 (* 1 = 0.0570477 loss)\nI0818 12:26:15.223533 17538 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0818 12:28:32.331377 17538 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0818 12:29:54.434635 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78272\nI0818 12:29:54.434891 17538 solver.cpp:404]     Test net output #1: loss = 0.969532 (* 1 = 0.969532 loss)\nI0818 12:29:55.756769 17538 solver.cpp:228] Iteration 32700, loss = 0.0217655\nI0818 12:29:55.756809 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:29:55.756825 17538 solver.cpp:244]     Train net output #1: loss = 0.0217654 (* 1 = 0.0217654 loss)\nI0818 12:29:55.845356 17538 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0818 12:32:13.001484 17538 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0818 12:33:35.119866 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70012\nI0818 12:33:35.120198 17538 solver.cpp:404]     Test net output #1: loss = 2.03327 (* 1 = 2.03327 loss)\nI0818 12:33:36.442391 17538 solver.cpp:228] Iteration 32800, loss = 0.037559\nI0818 12:33:36.442430 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:33:36.442446 17538 solver.cpp:244]     Train net output #1: loss = 0.0375589 (* 1 = 0.0375589 loss)\nI0818 12:33:36.533839 17538 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0818 12:35:53.718343 17538 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0818 12:37:15.816485 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74232\nI0818 12:37:15.816789 17538 solver.cpp:404]     Test net output #1: loss = 1.23398 (* 1 = 1.23398 loss)\nI0818 12:37:17.138558 17538 solver.cpp:228] Iteration 32900, loss = 0.102922\nI0818 12:37:17.138600 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:37:17.138615 17538 solver.cpp:244]     Train net output #1: loss = 0.102921 (* 1 = 0.102921 loss)\nI0818 12:37:17.222156 17538 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0818 12:39:34.311158 17538 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0818 12:40:56.268616 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78872\nI0818 12:40:56.268941 17538 solver.cpp:404]     Test net output #1: loss = 0.974558 (* 1 = 0.974558 loss)\nI0818 12:40:57.590405 17538 solver.cpp:228] Iteration 33000, loss = 0.0693677\nI0818 12:40:57.590445 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 12:40:57.590461 17538 solver.cpp:244]     Train net output #1: loss = 0.0693676 (* 1 = 0.0693676 loss)\nI0818 12:40:57.681329 17538 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0818 12:43:14.794991 17538 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0818 12:44:36.701313 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72924\nI0818 12:44:36.701624 17538 solver.cpp:404]     Test net output #1: loss = 1.61335 (* 1 = 1.61335 loss)\nI0818 12:44:38.023514 17538 solver.cpp:228] Iteration 33100, loss = 0.0188807\nI0818 12:44:38.023555 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:44:38.023569 17538 solver.cpp:244]     Train net output #1: loss = 0.0188806 (* 1 = 0.0188806 loss)\nI0818 12:44:38.114200 17538 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0818 12:46:55.310292 17538 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0818 12:48:17.226254 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72772\nI0818 12:48:17.226579 17538 solver.cpp:404]     Test net output #1: loss = 1.30718 (* 1 = 1.30718 loss)\nI0818 12:48:18.548357 17538 solver.cpp:228] Iteration 33200, loss = 0.0202046\nI0818 12:48:18.548398 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 12:48:18.548414 17538 solver.cpp:244]     Train net output #1: loss = 0.0202045 (* 1 = 0.0202045 loss)\nI0818 12:48:18.631408 17538 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0818 12:50:35.821769 17538 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0818 12:51:57.709817 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73336\nI0818 12:51:57.710144 17538 solver.cpp:404]     Test net output #1: loss = 1.30554 (* 1 = 1.30554 loss)\nI0818 12:51:59.031970 17538 solver.cpp:228] Iteration 33300, loss = 0.0875644\nI0818 12:51:59.032009 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 12:51:59.032025 17538 solver.cpp:244]     Train net output #1: loss = 0.0875643 (* 1 = 0.0875643 loss)\nI0818 12:51:59.116390 17538 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0818 12:54:16.332759 17538 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0818 12:55:38.053334 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78492\nI0818 12:55:38.053619 17538 solver.cpp:404]     Test net output #1: loss = 1.05201 (* 1 = 1.05201 loss)\nI0818 12:55:39.375391 17538 solver.cpp:228] Iteration 33400, loss = 0.0310423\nI0818 12:55:39.375433 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:55:39.375450 17538 solver.cpp:244]     Train net output #1: loss = 0.0310422 (* 1 = 0.0310422 loss)\nI0818 12:55:39.464632 17538 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0818 12:57:56.626615 17538 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0818 12:59:18.775743 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8166\nI0818 12:59:18.776072 17538 solver.cpp:404]     Test net output #1: loss = 0.81294 (* 1 = 0.81294 loss)\nI0818 12:59:20.098002 17538 solver.cpp:228] Iteration 33500, loss = 0.0132763\nI0818 12:59:20.098042 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 12:59:20.098059 17538 solver.cpp:244]     Train net output #1: loss = 0.0132762 (* 1 = 0.0132762 loss)\nI0818 12:59:20.187346 17538 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0818 13:01:37.309209 17538 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0818 13:02:59.195828 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80496\nI0818 13:02:59.196157 17538 solver.cpp:404]     Test net output #1: loss = 0.97338 (* 1 = 0.97338 loss)\nI0818 13:03:00.518018 17538 solver.cpp:228] Iteration 33600, loss = 0.017199\nI0818 13:03:00.518059 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:03:00.518074 17538 solver.cpp:244]     Train net output #1: loss = 0.0171989 (* 1 = 0.0171989 loss)\nI0818 13:03:00.613636 17538 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0818 13:05:17.755913 17538 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0818 13:06:39.458712 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73388\nI0818 13:06:39.459022 17538 solver.cpp:404]     Test net output #1: loss = 1.4166 (* 1 = 1.4166 loss)\nI0818 13:06:40.780853 17538 solver.cpp:228] Iteration 33700, loss = 0.0569445\nI0818 13:06:40.780895 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:06:40.780911 17538 solver.cpp:244]     Train net output #1: loss = 0.0569444 (* 1 = 0.0569444 loss)\nI0818 13:06:40.868783 17538 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0818 13:08:57.944380 17538 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0818 13:10:20.094241 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80844\nI0818 13:10:20.094569 17538 solver.cpp:404]     Test net output #1: loss = 0.849185 (* 1 = 0.849185 loss)\nI0818 13:10:21.416651 17538 solver.cpp:228] Iteration 33800, loss = 0.039976\nI0818 13:10:21.416692 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:10:21.416708 17538 solver.cpp:244]     Train net output #1: loss = 0.0399759 (* 1 = 0.0399759 loss)\nI0818 13:10:21.501128 17538 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0818 13:12:38.642015 17538 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0818 13:14:00.664577 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79968\nI0818 13:14:00.664901 17538 solver.cpp:404]     Test net output #1: loss = 0.869666 (* 1 = 0.869666 loss)\nI0818 13:14:01.986618 17538 solver.cpp:228] Iteration 33900, loss = 0.0645242\nI0818 13:14:01.986659 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 13:14:01.986675 17538 solver.cpp:244]     Train net output #1: loss = 0.0645241 (* 1 = 0.0645241 loss)\nI0818 13:14:02.071740 17538 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0818 13:16:19.258177 17538 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0818 13:17:41.052942 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74916\nI0818 13:17:41.053220 17538 solver.cpp:404]     Test net output #1: loss = 1.22919 (* 1 = 1.22919 loss)\nI0818 13:17:42.374423 17538 solver.cpp:228] Iteration 34000, loss = 0.0550411\nI0818 13:17:42.374466 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:17:42.374482 17538 solver.cpp:244]     Train net output #1: loss = 0.055041 (* 1 = 0.055041 loss)\nI0818 13:17:42.467990 17538 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0818 13:19:59.668583 17538 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0818 13:21:21.828284 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI0818 13:21:21.828629 17538 solver.cpp:404]     Test net output #1: loss = 0.965903 (* 1 = 0.965903 loss)\nI0818 13:21:23.150372 17538 solver.cpp:228] Iteration 34100, loss = 0.0605675\nI0818 13:21:23.150410 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 13:21:23.150426 17538 solver.cpp:244]     Train net output #1: loss = 0.0605674 (* 1 = 0.0605674 loss)\nI0818 13:21:23.297935 17538 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0818 13:23:40.492928 17538 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0818 13:25:02.686266 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72888\nI0818 13:25:02.686606 17538 solver.cpp:404]     Test net output #1: loss = 1.50256 (* 1 = 1.50256 loss)\nI0818 13:25:04.008384 17538 solver.cpp:228] Iteration 34200, loss = 0.00522867\nI0818 13:25:04.008422 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:25:04.008438 17538 solver.cpp:244]     Train net output #1: loss = 0.00522853 (* 1 = 0.00522853 loss)\nI0818 13:25:04.094861 17538 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0818 13:27:21.223472 17538 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0818 13:28:43.400774 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7248\nI0818 13:28:43.401113 17538 solver.cpp:404]     Test net output #1: loss = 1.33792 (* 1 = 1.33792 loss)\nI0818 13:28:44.723016 17538 solver.cpp:228] Iteration 34300, loss = 0.0697608\nI0818 13:28:44.723053 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 13:28:44.723069 17538 solver.cpp:244]     Train net output #1: loss = 0.0697606 (* 1 = 0.0697606 loss)\nI0818 13:28:44.811992 17538 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0818 13:31:02.038637 17538 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0818 13:32:24.204130 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78768\nI0818 13:32:24.204463 17538 solver.cpp:404]     Test net output #1: loss = 0.882763 (* 1 = 0.882763 loss)\nI0818 13:32:25.526180 17538 solver.cpp:228] Iteration 34400, loss = 0.0278602\nI0818 13:32:25.526217 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:32:25.526233 17538 solver.cpp:244]     Train net output #1: loss = 0.0278601 (* 1 = 0.0278601 loss)\nI0818 13:32:25.611539 17538 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0818 13:34:42.769515 17538 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0818 13:36:04.925964 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79296\nI0818 13:36:04.926276 17538 solver.cpp:404]     Test net output #1: loss = 0.844403 (* 1 = 0.844403 loss)\nI0818 13:36:06.247617 17538 solver.cpp:228] Iteration 34500, loss = 0.0417101\nI0818 13:36:06.247656 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:36:06.247673 17538 solver.cpp:244]     Train net output #1: loss = 0.04171 (* 1 = 0.04171 loss)\nI0818 13:36:06.337071 17538 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0818 13:38:23.599884 17538 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0818 13:39:45.777731 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75668\nI0818 13:39:45.778069 17538 solver.cpp:404]     Test net output #1: loss = 1.23282 (* 1 = 1.23282 loss)\nI0818 13:39:47.100157 17538 solver.cpp:228] Iteration 34600, loss = 0.0212597\nI0818 13:39:47.100193 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 13:39:47.100208 17538 solver.cpp:244]     Train net output #1: loss = 0.0212595 (* 1 = 0.0212595 loss)\nI0818 13:39:47.182353 17538 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0818 13:42:04.334913 17538 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0818 13:43:26.518025 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75884\nI0818 13:43:26.518347 17538 solver.cpp:404]     Test net output #1: loss = 1.17273 (* 1 = 1.17273 loss)\nI0818 13:43:27.840363 17538 solver.cpp:228] Iteration 34700, loss = 0.0504023\nI0818 13:43:27.840405 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:43:27.840422 17538 solver.cpp:244]     Train net output #1: loss = 0.0504022 (* 1 = 0.0504022 loss)\nI0818 13:43:27.924039 17538 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0818 13:45:45.087740 17538 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0818 13:47:07.248836 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7666\nI0818 13:47:07.249176 17538 solver.cpp:404]     Test net output #1: loss = 1.02914 (* 1 = 1.02914 loss)\nI0818 13:47:08.570399 17538 solver.cpp:228] Iteration 34800, loss = 0.0362441\nI0818 13:47:08.570441 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:47:08.570456 17538 solver.cpp:244]     Train net output #1: loss = 0.0362439 (* 1 = 0.0362439 loss)\nI0818 13:47:08.653359 17538 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0818 13:49:25.782601 17538 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0818 13:50:47.945339 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76716\nI0818 13:50:47.945670 17538 solver.cpp:404]     Test net output #1: loss = 1.10416 (* 1 = 1.10416 loss)\nI0818 13:50:49.267637 17538 solver.cpp:228] Iteration 34900, loss = 0.0755354\nI0818 13:50:49.267680 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 13:50:49.267695 17538 solver.cpp:244]     Train net output #1: loss = 0.0755352 (* 1 = 0.0755352 loss)\nI0818 13:50:49.353596 17538 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0818 13:53:06.437705 17538 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0818 13:54:28.587764 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7906\nI0818 13:54:28.588081 17538 solver.cpp:404]     Test net output #1: loss = 1.05074 (* 1 = 1.05074 loss)\nI0818 13:54:29.909894 17538 solver.cpp:228] Iteration 35000, loss = 0.0416254\nI0818 13:54:29.909932 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 13:54:29.909950 17538 solver.cpp:244]     Train net output #1: loss = 0.0416253 (* 1 = 0.0416253 loss)\nI0818 13:54:29.994659 17538 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0818 13:56:47.172961 17538 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0818 13:58:09.313802 17538 solver.cpp:404]     Test net output #0: accuracy = 0.801\nI0818 13:58:09.314138 17538 solver.cpp:404]     Test net output #1: loss = 0.918414 (* 1 = 0.918414 loss)\nI0818 13:58:10.636014 17538 solver.cpp:228] Iteration 35100, loss = 0.0371861\nI0818 13:58:10.636051 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 13:58:10.636067 17538 solver.cpp:244]     Train net output #1: loss = 0.037186 (* 1 = 0.037186 loss)\nI0818 13:58:10.722726 17538 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0818 14:00:27.957094 17538 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0818 14:01:50.115272 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79912\nI0818 14:01:50.115589 17538 solver.cpp:404]     Test net output #1: loss = 0.902003 (* 1 = 0.902003 loss)\nI0818 14:01:51.436967 17538 solver.cpp:228] Iteration 35200, loss = 0.0486115\nI0818 14:01:51.437007 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:01:51.437023 17538 solver.cpp:244]     Train net output #1: loss = 0.0486114 (* 1 = 0.0486114 loss)\nI0818 14:01:51.521466 17538 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0818 14:04:08.612988 17538 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0818 14:05:30.756206 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74748\nI0818 14:05:30.756592 17538 solver.cpp:404]     Test net output #1: loss = 1.24662 (* 1 = 1.24662 loss)\nI0818 14:05:32.078312 17538 solver.cpp:228] Iteration 35300, loss = 0.115429\nI0818 14:05:32.078353 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 14:05:32.078369 17538 solver.cpp:244]     Train net output #1: loss = 0.115429 (* 1 = 0.115429 loss)\nI0818 14:05:32.166546 17538 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0818 14:07:49.288208 17538 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0818 14:09:11.440721 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76284\nI0818 14:09:11.441059 17538 solver.cpp:404]     Test net output #1: loss = 1.28356 (* 1 = 1.28356 loss)\nI0818 14:09:12.762236 17538 solver.cpp:228] Iteration 35400, loss = 0.0179211\nI0818 14:09:12.762274 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:09:12.762290 17538 solver.cpp:244]     Train net output #1: loss = 0.017921 (* 1 = 0.017921 loss)\nI0818 14:09:12.845752 17538 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0818 14:11:29.986021 17538 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0818 14:12:52.151762 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77048\nI0818 14:12:52.152089 17538 solver.cpp:404]     Test net output #1: loss = 1.20202 (* 1 = 1.20202 loss)\nI0818 14:12:53.473763 17538 solver.cpp:228] Iteration 35500, loss = 0.0302868\nI0818 14:12:53.473800 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:12:53.473817 17538 solver.cpp:244]     Train net output #1: loss = 0.0302867 (* 1 = 0.0302867 loss)\nI0818 14:12:53.563771 17538 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0818 14:15:10.626303 17538 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0818 14:16:32.798820 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77968\nI0818 14:16:32.799139 17538 solver.cpp:404]     Test net output #1: loss = 1.1136 (* 1 = 1.1136 loss)\nI0818 14:16:34.120338 17538 solver.cpp:228] Iteration 35600, loss = 0.0280244\nI0818 14:16:34.120376 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:16:34.120393 17538 solver.cpp:244]     Train net output #1: loss = 0.0280243 (* 1 = 0.0280243 loss)\nI0818 14:16:34.213291 17538 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0818 14:18:51.330564 17538 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0818 14:20:13.471102 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75272\nI0818 14:20:13.471421 17538 solver.cpp:404]     Test net output #1: loss = 1.21649 (* 1 = 1.21649 loss)\nI0818 14:20:14.793349 17538 solver.cpp:228] Iteration 35700, loss = 0.0186258\nI0818 14:20:14.793385 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:20:14.793398 17538 solver.cpp:244]     Train net output #1: loss = 0.0186257 (* 1 = 0.0186257 loss)\nI0818 14:20:14.879175 17538 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0818 14:22:31.998618 17538 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0818 14:23:54.156011 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79688\nI0818 14:23:54.156330 17538 solver.cpp:404]     Test net output #1: loss = 0.877783 (* 1 = 0.877783 loss)\nI0818 14:23:55.478171 17538 solver.cpp:228] Iteration 35800, loss = 0.0540506\nI0818 14:23:55.478205 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:23:55.478220 17538 solver.cpp:244]     Train net output #1: loss = 0.0540504 (* 1 = 0.0540504 loss)\nI0818 14:23:55.562189 17538 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0818 14:26:12.635741 17538 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0818 14:27:34.793133 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77608\nI0818 14:27:34.793478 17538 solver.cpp:404]     Test net output #1: loss = 1.09252 (* 1 = 1.09252 loss)\nI0818 14:27:36.115510 17538 solver.cpp:228] Iteration 35900, loss = 0.0648167\nI0818 14:27:36.115548 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 14:27:36.115562 17538 solver.cpp:244]     Train net output #1: loss = 0.0648166 (* 1 = 0.0648166 loss)\nI0818 14:27:36.203752 17538 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0818 14:29:53.323923 17538 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0818 14:31:15.497118 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73712\nI0818 14:31:15.497455 17538 solver.cpp:404]     Test net output #1: loss = 1.28609 (* 1 = 1.28609 loss)\nI0818 14:31:16.819708 17538 solver.cpp:228] Iteration 36000, loss = 0.0613259\nI0818 14:31:16.819743 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:31:16.819758 17538 solver.cpp:244]     Train net output #1: loss = 0.0613257 (* 1 = 0.0613257 loss)\nI0818 14:31:16.906101 17538 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0818 14:33:34.068140 17538 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0818 14:34:56.240180 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80536\nI0818 14:34:56.240526 17538 solver.cpp:404]     Test net output #1: loss = 0.867383 (* 1 = 0.867383 loss)\nI0818 14:34:57.562227 17538 solver.cpp:228] Iteration 36100, loss = 0.0192793\nI0818 14:34:57.562264 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:34:57.562278 17538 solver.cpp:244]     Train net output #1: loss = 0.0192791 (* 1 = 0.0192791 loss)\nI0818 14:34:57.649762 17538 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0818 14:40:38.629546 17538 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0818 14:42:00.984077 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78468\nI0818 14:42:00.985937 17538 solver.cpp:404]     Test net output #1: loss = 1.01304 (* 1 = 1.01304 loss)\nI0818 14:42:02.310175 17538 solver.cpp:228] Iteration 36200, loss = 0.0446474\nI0818 14:42:02.310215 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 14:42:02.310246 17538 solver.cpp:244]     Train net output #1: loss = 0.0446473 (* 1 = 0.0446473 loss)\nI0818 14:42:02.398088 17538 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0818 14:44:31.620962 17538 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0818 14:45:53.772646 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76508\nI0818 14:45:53.773035 17538 solver.cpp:404]     Test net output #1: loss = 1.1266 (* 1 = 1.1266 loss)\nI0818 14:45:55.096927 17538 solver.cpp:228] Iteration 36300, loss = 0.0671466\nI0818 14:45:55.096969 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 14:45:55.096985 17538 solver.cpp:244]     Train net output #1: loss = 0.0671464 (* 1 = 0.0671464 loss)\nI0818 14:45:55.186296 17538 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0818 14:48:12.283041 17538 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0818 14:49:34.479233 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76312\nI0818 14:49:34.479559 17538 solver.cpp:404]     Test net output #1: loss = 1.09993 (* 1 = 1.09993 loss)\nI0818 14:49:35.802817 17538 solver.cpp:228] Iteration 36400, loss = 0.0282645\nI0818 14:49:35.802858 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 14:49:35.802875 17538 solver.cpp:244]     Train net output #1: loss = 0.0282643 (* 1 = 0.0282643 loss)\nI0818 14:49:35.891397 17538 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0818 14:51:53.046437 17538 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0818 14:53:15.151295 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77104\nI0818 14:53:15.151588 17538 solver.cpp:404]     Test net output #1: loss = 1.12024 (* 1 = 1.12024 loss)\nI0818 14:53:16.475349 17538 solver.cpp:228] Iteration 36500, loss = 0.0590876\nI0818 14:53:16.475389 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 14:53:16.475405 17538 solver.cpp:244]     Train net output #1: loss = 0.0590874 (* 1 = 0.0590874 loss)\nI0818 14:53:16.560333 17538 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0818 14:55:33.756248 17538 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0818 14:56:55.889112 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74244\nI0818 14:56:55.889359 17538 solver.cpp:404]     Test net output #1: loss = 1.29609 (* 1 = 1.29609 loss)\nI0818 14:56:57.213160 17538 solver.cpp:228] Iteration 36600, loss = 0.011813\nI0818 14:56:57.213199 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 14:56:57.213215 17538 solver.cpp:244]     Train net output #1: loss = 0.0118129 (* 1 = 0.0118129 loss)\nI0818 14:56:57.300320 17538 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0818 14:59:14.511509 17538 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0818 15:00:36.722784 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71252\nI0818 15:00:36.723059 17538 solver.cpp:404]     Test net output #1: loss = 1.55433 (* 1 = 1.55433 loss)\nI0818 15:00:38.050923 17538 solver.cpp:228] Iteration 36700, loss = 0.0532697\nI0818 15:00:38.050966 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:00:38.050990 17538 solver.cpp:244]     Train net output #1: loss = 0.0532696 (* 1 = 0.0532696 loss)\nI0818 15:00:38.127013 17538 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0818 15:02:55.212410 17538 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0818 15:04:17.115617 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81284\nI0818 15:04:17.115876 17538 solver.cpp:404]     Test net output #1: loss = 0.867984 (* 1 = 0.867984 loss)\nI0818 15:04:18.441009 17538 solver.cpp:228] Iteration 36800, loss = 0.0468106\nI0818 15:04:18.441051 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:04:18.441073 17538 solver.cpp:244]     Train net output #1: loss = 0.0468104 (* 1 = 0.0468104 loss)\nI0818 15:04:18.523572 17538 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0818 15:06:35.753453 17538 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0818 15:07:57.954169 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81148\nI0818 15:07:57.954463 17538 solver.cpp:404]     Test net output #1: loss = 0.786101 (* 1 = 0.786101 loss)\nI0818 15:07:59.279549 17538 solver.cpp:228] Iteration 36900, loss = 0.0179176\nI0818 15:07:59.279589 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:07:59.279613 17538 solver.cpp:244]     Train net output #1: loss = 0.0179175 (* 1 = 0.0179175 loss)\nI0818 15:07:59.365543 17538 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0818 15:10:16.492130 17538 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0818 15:11:38.664065 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73068\nI0818 15:11:38.664331 17538 solver.cpp:404]     Test net output #1: loss = 1.64732 (* 1 = 1.64732 loss)\nI0818 15:11:39.988171 17538 solver.cpp:228] Iteration 37000, loss = 0.066987\nI0818 15:11:39.988214 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:11:39.988242 17538 solver.cpp:244]     Train net output #1: loss = 0.0669869 (* 1 = 0.0669869 loss)\nI0818 15:11:40.070477 17538 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0818 15:13:57.152864 17538 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0818 15:15:19.345580 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74676\nI0818 15:15:19.345836 17538 solver.cpp:404]     Test net output #1: loss = 1.20403 (* 1 = 1.20403 loss)\nI0818 15:15:20.670250 17538 solver.cpp:228] Iteration 37100, loss = 0.110308\nI0818 15:15:20.670292 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:15:20.670315 17538 solver.cpp:244]     Train net output #1: loss = 0.110308 (* 1 = 0.110308 loss)\nI0818 15:15:20.760666 17538 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0818 15:17:37.946055 17538 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0818 15:18:59.886849 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78688\nI0818 15:18:59.887105 17538 solver.cpp:404]     Test net output #1: loss = 1.03808 (* 1 = 1.03808 loss)\nI0818 15:19:01.212307 17538 solver.cpp:228] Iteration 37200, loss = 0.0262379\nI0818 15:19:01.212347 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:19:01.212370 17538 solver.cpp:244]     Train net output #1: loss = 0.0262377 (* 1 = 0.0262377 loss)\nI0818 15:19:01.300770 17538 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0818 15:21:18.467037 17538 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0818 15:22:39.920708 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75436\nI0818 15:22:39.921025 17538 solver.cpp:404]     Test net output #1: loss = 1.23002 (* 1 = 1.23002 loss)\nI0818 15:22:41.242261 17538 solver.cpp:228] Iteration 37300, loss = 0.0468522\nI0818 15:22:41.242301 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:22:41.242316 17538 solver.cpp:244]     Train net output #1: loss = 0.046852 (* 1 = 0.046852 loss)\nI0818 15:22:41.332733 17538 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0818 15:24:58.360122 17538 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0818 15:26:20.180860 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81268\nI0818 15:26:20.181102 17538 solver.cpp:404]     Test net output #1: loss = 0.764984 (* 1 = 0.764984 loss)\nI0818 15:26:21.505236 17538 solver.cpp:228] Iteration 37400, loss = 0.0756818\nI0818 15:26:21.505277 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:26:21.505301 17538 solver.cpp:244]     Train net output #1: loss = 0.0756817 (* 1 = 0.0756817 loss)\nI0818 15:26:21.601112 17538 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0818 15:28:38.724530 17538 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0818 15:30:00.956141 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7496\nI0818 15:30:00.956418 17538 solver.cpp:404]     Test net output #1: loss = 1.15299 (* 1 = 1.15299 loss)\nI0818 15:30:02.280303 17538 solver.cpp:228] Iteration 37500, loss = 0.0133145\nI0818 15:30:02.280342 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:30:02.280365 17538 solver.cpp:244]     Train net output #1: loss = 0.0133143 (* 1 = 0.0133143 loss)\nI0818 15:30:02.367605 17538 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0818 15:32:19.449919 17538 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0818 15:33:41.658078 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77104\nI0818 15:33:41.658335 17538 solver.cpp:404]     Test net output #1: loss = 1.13501 (* 1 = 1.13501 loss)\nI0818 15:33:42.983266 17538 solver.cpp:228] Iteration 37600, loss = 0.0314847\nI0818 15:33:42.983307 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:33:42.983331 17538 solver.cpp:244]     Train net output #1: loss = 0.0314845 (* 1 = 0.0314845 loss)\nI0818 15:33:43.070497 17538 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0818 15:36:00.177345 17538 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0818 15:37:22.404161 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80016\nI0818 15:37:22.404449 17538 solver.cpp:404]     Test net output #1: loss = 0.961809 (* 1 = 0.961809 loss)\nI0818 15:37:23.729195 17538 solver.cpp:228] Iteration 37700, loss = 0.0327551\nI0818 15:37:23.729244 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:37:23.729269 17538 solver.cpp:244]     Train net output #1: loss = 0.032755 (* 1 = 0.032755 loss)\nI0818 15:37:23.820583 17538 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0818 15:39:40.948627 17538 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0818 15:41:03.216379 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7878\nI0818 15:41:03.216660 17538 solver.cpp:404]     Test net output #1: loss = 1.00867 (* 1 = 1.00867 loss)\nI0818 15:41:04.541643 17538 solver.cpp:228] Iteration 37800, loss = 0.0181262\nI0818 15:41:04.541685 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:41:04.541709 17538 solver.cpp:244]     Train net output #1: loss = 0.0181261 (* 1 = 0.0181261 loss)\nI0818 15:41:04.626348 17538 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0818 15:43:21.710469 17538 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0818 15:44:43.935396 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78256\nI0818 15:44:43.935672 17538 solver.cpp:404]     Test net output #1: loss = 1.10234 (* 1 = 1.10234 loss)\nI0818 15:44:45.259604 17538 solver.cpp:228] Iteration 37900, loss = 0.0297698\nI0818 15:44:45.259645 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:44:45.259668 17538 solver.cpp:244]     Train net output #1: loss = 0.0297696 (* 1 = 0.0297696 loss)\nI0818 15:44:45.342640 17538 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0818 15:47:02.539026 17538 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0818 15:48:24.764547 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80156\nI0818 15:48:24.764804 17538 solver.cpp:404]     Test net output #1: loss = 0.833553 (* 1 = 0.833553 loss)\nI0818 15:48:26.089232 17538 solver.cpp:228] Iteration 38000, loss = 0.0686233\nI0818 15:48:26.089275 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:48:26.089299 17538 solver.cpp:244]     Train net output #1: loss = 0.0686231 (* 1 = 0.0686231 loss)\nI0818 15:48:26.171442 17538 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0818 15:50:43.279208 17538 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0818 15:52:05.367643 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75276\nI0818 15:52:05.367944 17538 solver.cpp:404]     Test net output #1: loss = 1.4405 (* 1 = 1.4405 loss)\nI0818 15:52:06.692847 17538 solver.cpp:228] Iteration 38100, loss = 0.0279557\nI0818 15:52:06.692889 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:52:06.692912 17538 solver.cpp:244]     Train net output #1: loss = 0.0279555 (* 1 = 0.0279555 loss)\nI0818 15:52:06.777655 17538 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0818 15:54:23.999514 17538 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0818 15:55:46.066339 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71656\nI0818 15:55:46.066612 17538 solver.cpp:404]     Test net output #1: loss = 1.36941 (* 1 = 1.36941 loss)\nI0818 15:55:47.391453 17538 solver.cpp:228] Iteration 38200, loss = 0.0318599\nI0818 15:55:47.391496 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:55:47.391520 17538 solver.cpp:244]     Train net output #1: loss = 0.0318597 (* 1 = 0.0318597 loss)\nI0818 15:55:47.475287 17538 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0818 15:58:04.582737 17538 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0818 15:59:26.475594 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82384\nI0818 15:59:26.475831 17538 solver.cpp:404]     Test net output #1: loss = 0.796563 (* 1 = 0.796563 loss)\nI0818 15:59:27.801048 17538 solver.cpp:228] Iteration 38300, loss = 0.0414996\nI0818 15:59:27.801091 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:59:27.801116 17538 solver.cpp:244]     Train net output #1: loss = 0.0414995 (* 1 = 0.0414995 loss)\nI0818 15:59:27.886267 17538 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0818 16:01:45.042788 17538 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0818 16:03:07.172437 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74832\nI0818 16:03:07.172708 17538 solver.cpp:404]     Test net output #1: loss = 1.23886 (* 1 = 1.23886 loss)\nI0818 16:03:08.497436 17538 solver.cpp:228] Iteration 38400, loss = 0.0355863\nI0818 16:03:08.497479 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:03:08.497506 17538 solver.cpp:244]     Train net output #1: loss = 0.0355861 (* 1 = 0.0355861 loss)\nI0818 16:03:08.576357 17538 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0818 16:05:25.772105 17538 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0818 16:06:47.997917 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77168\nI0818 16:06:47.998198 17538 solver.cpp:404]     Test net output #1: loss = 1.14364 (* 1 = 1.14364 loss)\nI0818 16:06:49.322998 17538 solver.cpp:228] Iteration 38500, loss = 0.0229937\nI0818 16:06:49.323038 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:06:49.323061 17538 solver.cpp:244]     Train net output #1: loss = 0.0229936 (* 1 = 0.0229936 loss)\nI0818 16:06:49.408681 17538 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0818 16:09:06.597741 17538 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0818 16:10:28.668844 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77844\nI0818 16:10:28.669126 17538 solver.cpp:404]     Test net output #1: loss = 1.06729 (* 1 = 1.06729 loss)\nI0818 16:10:29.993866 17538 solver.cpp:228] Iteration 38600, loss = 0.0417827\nI0818 16:10:29.993907 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:10:29.993930 17538 solver.cpp:244]     Train net output #1: loss = 0.0417826 (* 1 = 0.0417826 loss)\nI0818 16:10:30.077294 17538 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0818 16:12:47.226663 17538 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0818 16:14:09.409238 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78164\nI0818 16:14:09.409490 17538 solver.cpp:404]     Test net output #1: loss = 1.03303 (* 1 = 1.03303 loss)\nI0818 16:14:10.733839 17538 solver.cpp:228] Iteration 38700, loss = 0.0284873\nI0818 16:14:10.733881 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:14:10.733906 17538 solver.cpp:244]     Train net output #1: loss = 0.0284871 (* 1 = 0.0284871 loss)\nI0818 16:14:10.816218 17538 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0818 16:16:27.892467 17538 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0818 16:17:49.963376 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82924\nI0818 16:17:49.963629 17538 solver.cpp:404]     Test net output #1: loss = 0.708 (* 1 = 0.708 loss)\nI0818 16:17:51.288940 17538 solver.cpp:228] Iteration 38800, loss = 0.0374855\nI0818 16:17:51.288981 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:17:51.289005 17538 solver.cpp:244]     Train net output #1: loss = 0.0374853 (* 1 = 0.0374853 loss)\nI0818 16:17:51.368290 17538 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0818 16:20:08.448560 17538 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0818 16:21:30.665267 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72684\nI0818 16:21:30.665570 17538 solver.cpp:404]     Test net output #1: loss = 1.33377 (* 1 = 1.33377 loss)\nI0818 16:21:31.990849 17538 solver.cpp:228] Iteration 38900, loss = 0.0816281\nI0818 16:21:31.990890 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:21:31.990912 17538 solver.cpp:244]     Train net output #1: loss = 0.0816279 (* 1 = 0.0816279 loss)\nI0818 16:21:32.071557 17538 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0818 16:23:49.162148 17538 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0818 16:25:11.310672 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78724\nI0818 16:25:11.310936 17538 solver.cpp:404]     Test net output #1: loss = 1.00564 (* 1 = 1.00564 loss)\nI0818 16:25:12.634469 17538 solver.cpp:228] Iteration 39000, loss = 0.064575\nI0818 16:25:12.634505 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:25:12.634522 17538 solver.cpp:244]     Train net output #1: loss = 0.0645748 (* 1 = 0.0645748 loss)\nI0818 16:25:12.719516 17538 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0818 16:27:29.871363 17538 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0818 16:28:52.001092 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78012\nI0818 16:28:52.001355 17538 solver.cpp:404]     Test net output #1: loss = 1.01803 (* 1 = 1.01803 loss)\nI0818 16:28:53.325192 17538 solver.cpp:228] Iteration 39100, loss = 0.0494995\nI0818 16:28:53.325227 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:28:53.325243 17538 solver.cpp:244]     Train net output #1: loss = 0.0494993 (* 1 = 0.0494993 loss)\nI0818 16:28:53.407485 17538 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0818 16:31:10.507500 17538 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0818 16:32:32.666118 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82796\nI0818 16:32:32.666406 17538 solver.cpp:404]     Test net output #1: loss = 0.797233 (* 1 = 0.797233 loss)\nI0818 16:32:33.989958 17538 solver.cpp:228] Iteration 39200, loss = 0.0441559\nI0818 16:32:33.989990 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:32:33.990006 17538 solver.cpp:244]     Train net output #1: loss = 0.0441557 (* 1 = 0.0441557 loss)\nI0818 16:32:34.075791 17538 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0818 16:34:51.081097 17538 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0818 16:36:13.283335 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8058\nI0818 16:36:13.283598 17538 solver.cpp:404]     Test net output #1: loss = 0.882382 (* 1 = 0.882382 loss)\nI0818 16:36:14.607075 17538 solver.cpp:228] Iteration 39300, loss = 0.0812557\nI0818 16:36:14.607113 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:36:14.607131 17538 solver.cpp:244]     Train net output #1: loss = 0.0812555 (* 1 = 0.0812555 loss)\nI0818 16:36:14.691987 17538 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0818 16:38:31.756773 17538 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0818 16:39:53.905596 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7284\nI0818 16:39:53.905853 17538 solver.cpp:404]     Test net output #1: loss = 1.49256 (* 1 = 1.49256 loss)\nI0818 16:39:55.229409 17538 solver.cpp:228] Iteration 39400, loss = 0.0164388\nI0818 16:39:55.229449 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:39:55.229465 17538 solver.cpp:244]     Train net output #1: loss = 0.0164386 (* 1 = 0.0164386 loss)\nI0818 16:39:55.311240 17538 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0818 16:42:12.432042 17538 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0818 16:43:34.214223 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73184\nI0818 16:43:34.214503 17538 solver.cpp:404]     Test net output #1: loss = 1.37095 (* 1 = 1.37095 loss)\nI0818 16:43:35.538194 17538 solver.cpp:228] Iteration 39500, loss = 0.0194622\nI0818 16:43:35.538233 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:43:35.538249 17538 solver.cpp:244]     Train net output #1: loss = 0.019462 (* 1 = 0.019462 loss)\nI0818 16:43:35.626682 17538 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0818 16:45:52.734025 17538 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0818 16:47:14.679862 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80892\nI0818 16:47:14.680112 17538 solver.cpp:404]     Test net output #1: loss = 0.80365 (* 1 = 0.80365 loss)\nI0818 16:47:16.003756 17538 solver.cpp:228] Iteration 39600, loss = 0.0935817\nI0818 16:47:16.003794 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:47:16.003810 17538 solver.cpp:244]     Train net output #1: loss = 0.0935815 (* 1 = 0.0935815 loss)\nI0818 16:47:16.090430 17538 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0818 16:49:33.209458 17538 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0818 16:50:55.135697 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81184\nI0818 16:50:55.136006 17538 solver.cpp:404]     Test net output #1: loss = 0.757112 (* 1 = 0.757112 loss)\nI0818 16:50:56.459506 17538 solver.cpp:228] Iteration 39700, loss = 0.0128568\nI0818 16:50:56.459544 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:50:56.459560 17538 solver.cpp:244]     Train net output #1: loss = 0.0128566 (* 1 = 0.0128566 loss)\nI0818 16:50:56.552304 17538 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0818 16:53:13.638463 17538 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0818 16:54:35.812919 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI0818 16:54:35.813163 17538 solver.cpp:404]     Test net output #1: loss = 0.901074 (* 1 = 0.901074 loss)\nI0818 16:54:37.137071 17538 solver.cpp:228] Iteration 39800, loss = 0.0317897\nI0818 16:54:37.137112 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:54:37.137128 17538 solver.cpp:244]     Train net output #1: loss = 0.0317895 (* 1 = 0.0317895 loss)\nI0818 16:54:37.219641 17538 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0818 16:56:54.330305 17538 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0818 16:58:16.494906 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81876\nI0818 16:58:16.495146 17538 solver.cpp:404]     Test net output #1: loss = 0.885844 (* 1 = 0.885844 loss)\nI0818 16:58:17.819187 17538 solver.cpp:228] Iteration 39900, loss = 0.055314\nI0818 16:58:17.819226 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:58:17.819243 17538 solver.cpp:244]     Train net output #1: loss = 0.0553138 (* 1 = 0.0553138 loss)\nI0818 16:58:17.909238 17538 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0818 17:00:35.081100 17538 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0818 17:01:56.874712 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76712\nI0818 17:01:56.874986 17538 solver.cpp:404]     Test net output #1: loss = 1.10259 (* 1 = 1.10259 loss)\nI0818 17:01:58.198629 17538 solver.cpp:228] Iteration 40000, loss = 0.0590518\nI0818 17:01:58.198669 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:01:58.198685 17538 solver.cpp:244]     Train net output #1: loss = 0.0590517 (* 1 = 0.0590517 loss)\nI0818 17:01:58.287354 17538 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0818 17:04:15.368886 17538 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0818 17:05:37.536829 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7764\nI0818 17:05:37.537101 17538 solver.cpp:404]     Test net output #1: loss = 0.913625 (* 1 = 0.913625 loss)\nI0818 17:05:38.861269 17538 solver.cpp:228] Iteration 40100, loss = 0.0819629\nI0818 17:05:38.861315 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:05:38.861330 17538 solver.cpp:244]     Train net output #1: loss = 0.0819627 (* 1 = 0.0819627 loss)\nI0818 17:05:38.945741 17538 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0818 17:07:56.035823 17538 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0818 17:09:18.285370 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82592\nI0818 17:09:18.285655 17538 solver.cpp:404]     Test net output #1: loss = 0.73256 (* 1 = 0.73256 loss)\nI0818 17:09:19.610998 17538 solver.cpp:228] Iteration 40200, loss = 0.0390119\nI0818 17:09:19.611043 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:09:19.611066 17538 solver.cpp:244]     Train net output #1: loss = 0.0390117 (* 1 = 0.0390117 loss)\nI0818 17:09:19.692827 17538 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0818 17:11:36.796535 17538 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0818 17:12:59.013006 17538 solver.cpp:404]     Test net output #0: accuracy = 0.72216\nI0818 17:12:59.013303 17538 solver.cpp:404]     Test net output #1: loss = 1.53889 (* 1 = 1.53889 loss)\nI0818 17:13:00.338385 17538 solver.cpp:228] Iteration 40300, loss = 0.016512\nI0818 17:13:00.338429 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:13:00.338452 17538 solver.cpp:244]     Train net output #1: loss = 0.0165118 (* 1 = 0.0165118 loss)\nI0818 17:13:00.421430 17538 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0818 17:15:17.569146 17538 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0818 17:16:39.711329 17538 solver.cpp:404]     Test net output #0: accuracy = 0.819\nI0818 17:16:39.711606 17538 solver.cpp:404]     Test net output #1: loss = 0.716648 (* 1 = 0.716648 loss)\nI0818 17:16:41.036267 17538 solver.cpp:228] Iteration 40400, loss = 0.0307029\nI0818 17:16:41.036310 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:16:41.036334 17538 solver.cpp:244]     Train net output #1: loss = 0.0307027 (* 1 = 0.0307027 loss)\nI0818 17:16:41.121872 17538 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0818 17:18:58.167304 17538 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0818 17:20:20.425798 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8106\nI0818 17:20:20.426126 17538 solver.cpp:404]     Test net output #1: loss = 0.826242 (* 1 = 0.826242 loss)\nI0818 17:20:21.751310 17538 solver.cpp:228] Iteration 40500, loss = 0.0311114\nI0818 17:20:21.751354 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:20:21.751379 17538 solver.cpp:244]     Train net output #1: loss = 0.0311112 (* 1 = 0.0311112 loss)\nI0818 17:20:21.838209 17538 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0818 17:22:39.017204 17538 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0818 17:24:01.294088 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82904\nI0818 17:24:01.294404 17538 solver.cpp:404]     Test net output #1: loss = 0.80347 (* 1 = 0.80347 loss)\nI0818 17:24:02.619374 17538 solver.cpp:228] Iteration 40600, loss = 0.0447423\nI0818 17:24:02.619417 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:24:02.619441 17538 solver.cpp:244]     Train net output #1: loss = 0.0447421 (* 1 = 0.0447421 loss)\nI0818 17:24:02.699095 17538 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0818 17:26:19.829679 17538 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0818 17:27:42.029070 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77144\nI0818 17:27:42.029323 17538 solver.cpp:404]     Test net output #1: loss = 1.07256 (* 1 = 1.07256 loss)\nI0818 17:27:43.354518 17538 solver.cpp:228] Iteration 40700, loss = 0.0656715\nI0818 17:27:43.354562 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:27:43.354585 17538 solver.cpp:244]     Train net output #1: loss = 0.0656713 (* 1 = 0.0656713 loss)\nI0818 17:27:43.436316 17538 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0818 17:30:00.651473 17538 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0818 17:31:22.879374 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77064\nI0818 17:31:22.879699 17538 solver.cpp:404]     Test net output #1: loss = 1.12056 (* 1 = 1.12056 loss)\nI0818 17:31:24.204612 17538 solver.cpp:228] Iteration 40800, loss = 0.0487752\nI0818 17:31:24.204656 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:31:24.204679 17538 solver.cpp:244]     Train net output #1: loss = 0.048775 (* 1 = 0.048775 loss)\nI0818 17:31:24.291321 17538 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0818 17:33:41.448673 17538 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0818 17:35:03.688906 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7994\nI0818 17:35:03.689185 17538 solver.cpp:404]     Test net output #1: loss = 0.926559 (* 1 = 0.926559 loss)\nI0818 17:35:05.013460 17538 solver.cpp:228] Iteration 40900, loss = 0.0515057\nI0818 17:35:05.013509 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:35:05.013533 17538 solver.cpp:244]     Train net output #1: loss = 0.0515055 (* 1 = 0.0515055 loss)\nI0818 17:35:05.098291 17538 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0818 17:37:22.253602 17538 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0818 17:38:44.501072 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81588\nI0818 17:38:44.501325 17538 solver.cpp:404]     Test net output #1: loss = 0.800172 (* 1 = 0.800172 loss)\nI0818 17:38:45.826196 17538 solver.cpp:228] Iteration 41000, loss = 0.037875\nI0818 17:38:45.826241 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:38:45.826267 17538 solver.cpp:244]     Train net output #1: loss = 0.0378748 (* 1 = 0.0378748 loss)\nI0818 17:38:45.903537 17538 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0818 17:41:03.122882 17538 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0818 17:42:25.346880 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79132\nI0818 17:42:25.347172 17538 solver.cpp:404]     Test net output #1: loss = 0.952588 (* 1 = 0.952588 loss)\nI0818 17:42:26.672968 17538 solver.cpp:228] Iteration 41100, loss = 0.0399279\nI0818 17:42:26.673012 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:42:26.673036 17538 solver.cpp:244]     Train net output #1: loss = 0.0399277 (* 1 = 0.0399277 loss)\nI0818 17:42:26.757865 17538 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0818 17:44:43.972966 17538 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0818 17:46:06.214447 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73524\nI0818 17:46:06.214702 17538 solver.cpp:404]     Test net output #1: loss = 1.29971 (* 1 = 1.29971 loss)\nI0818 17:46:07.539878 17538 solver.cpp:228] Iteration 41200, loss = 0.0239866\nI0818 17:46:07.539922 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:46:07.539947 17538 solver.cpp:244]     Train net output #1: loss = 0.0239865 (* 1 = 0.0239865 loss)\nI0818 17:46:07.627602 17538 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0818 17:48:24.833734 17538 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0818 17:49:47.070952 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8048\nI0818 17:49:47.071246 17538 solver.cpp:404]     Test net output #1: loss = 0.770105 (* 1 = 0.770105 loss)\nI0818 17:49:48.394748 17538 solver.cpp:228] Iteration 41300, loss = 0.115086\nI0818 17:49:48.394793 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:49:48.394817 17538 solver.cpp:244]     Train net output #1: loss = 0.115086 (* 1 = 0.115086 loss)\nI0818 17:49:48.480852 17538 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0818 17:52:05.793366 17538 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0818 17:53:28.022819 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8262\nI0818 17:53:28.023128 17538 solver.cpp:404]     Test net output #1: loss = 0.680021 (* 1 = 0.680021 loss)\nI0818 17:53:29.347666 17538 solver.cpp:228] Iteration 41400, loss = 0.0606792\nI0818 17:53:29.347710 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:53:29.347733 17538 solver.cpp:244]     Train net output #1: loss = 0.060679 (* 1 = 0.060679 loss)\nI0818 17:53:29.431121 17538 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0818 17:55:46.624536 17538 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0818 17:57:08.846153 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81244\nI0818 17:57:08.846434 17538 solver.cpp:404]     Test net output #1: loss = 0.812628 (* 1 = 0.812628 loss)\nI0818 17:57:10.171056 17538 solver.cpp:228] Iteration 41500, loss = 0.0311945\nI0818 17:57:10.171100 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:57:10.171124 17538 solver.cpp:244]     Train net output #1: loss = 0.0311943 (* 1 = 0.0311943 loss)\nI0818 17:57:10.260044 17538 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0818 17:59:27.485692 17538 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0818 18:00:49.681972 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8028\nI0818 18:00:49.682296 17538 solver.cpp:404]     Test net output #1: loss = 0.840197 (* 1 = 0.840197 loss)\nI0818 18:00:51.006219 17538 solver.cpp:228] Iteration 41600, loss = 0.0162526\nI0818 18:00:51.006264 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:00:51.006289 17538 solver.cpp:244]     Train net output #1: loss = 0.0162525 (* 1 = 0.0162525 loss)\nI0818 18:00:51.088273 17538 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0818 18:03:08.221362 17538 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0818 18:04:30.431335 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79324\nI0818 18:04:30.431617 17538 solver.cpp:404]     Test net output #1: loss = 0.948411 (* 1 = 0.948411 loss)\nI0818 18:04:31.756618 17538 solver.cpp:228] Iteration 41700, loss = 0.0679619\nI0818 18:04:31.756661 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:04:31.756685 17538 solver.cpp:244]     Train net output #1: loss = 0.0679617 (* 1 = 0.0679617 loss)\nI0818 18:04:31.838654 17538 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0818 18:06:49.038066 17538 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0818 18:08:11.242061 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77512\nI0818 18:08:11.242336 17538 solver.cpp:404]     Test net output #1: loss = 1.02059 (* 1 = 1.02059 loss)\nI0818 18:08:12.566370 17538 solver.cpp:228] Iteration 41800, loss = 0.0163261\nI0818 18:08:12.566408 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:08:12.566424 17538 solver.cpp:244]     Train net output #1: loss = 0.0163259 (* 1 = 0.0163259 loss)\nI0818 18:08:12.655364 17538 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0818 18:10:29.938961 17538 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0818 18:11:52.155925 17538 solver.cpp:404]     Test net output #0: accuracy = 0.797\nI0818 18:11:52.156234 17538 solver.cpp:404]     Test net output #1: loss = 0.931179 (* 1 = 0.931179 loss)\nI0818 18:11:53.480126 17538 solver.cpp:228] Iteration 41900, loss = 0.0511602\nI0818 18:11:53.480166 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:11:53.480183 17538 solver.cpp:244]     Train net output #1: loss = 0.05116 (* 1 = 0.05116 loss)\nI0818 18:11:53.561115 17538 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0818 18:14:10.671643 17538 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0818 18:15:32.863234 17538 solver.cpp:404]     Test net output #0: accuracy = 0.84756\nI0818 18:15:32.863514 17538 solver.cpp:404]     Test net output #1: loss = 0.682709 (* 1 = 0.682709 loss)\nI0818 18:15:34.187114 17538 solver.cpp:228] Iteration 42000, loss = 0.0238205\nI0818 18:15:34.187165 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:15:34.187182 17538 solver.cpp:244]     Train net output #1: loss = 0.0238203 (* 1 = 0.0238203 loss)\nI0818 18:15:34.274930 17538 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0818 18:17:51.452384 17538 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0818 18:19:13.645587 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77732\nI0818 18:19:13.645880 17538 solver.cpp:404]     Test net output #1: loss = 1.14947 (* 1 = 1.14947 loss)\nI0818 18:19:14.969545 17538 solver.cpp:228] Iteration 42100, loss = 0.0296363\nI0818 18:19:14.969583 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:19:14.969599 17538 solver.cpp:244]     Train net output #1: loss = 0.0296362 (* 1 = 0.0296362 loss)\nI0818 18:19:15.060801 17538 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0818 18:21:32.202191 17538 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0818 18:22:54.410759 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81212\nI0818 18:22:54.411080 17538 solver.cpp:404]     Test net output #1: loss = 0.863841 (* 1 = 0.863841 loss)\nI0818 18:22:55.734937 17538 solver.cpp:228] Iteration 42200, loss = 0.0408723\nI0818 18:22:55.734977 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:22:55.734993 17538 solver.cpp:244]     Train net output #1: loss = 0.0408721 (* 1 = 0.0408721 loss)\nI0818 18:22:55.819288 17538 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0818 18:25:12.988018 17538 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0818 18:26:35.225320 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7794\nI0818 18:26:35.225580 17538 solver.cpp:404]     Test net output #1: loss = 0.987249 (* 1 = 0.987249 loss)\nI0818 18:26:36.549451 17538 solver.cpp:228] Iteration 42300, loss = 0.0608049\nI0818 18:26:36.549500 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:26:36.549518 17538 solver.cpp:244]     Train net output #1: loss = 0.0608047 (* 1 = 0.0608047 loss)\nI0818 18:26:36.628983 17538 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0818 18:28:53.805702 17538 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0818 18:30:16.052846 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74932\nI0818 18:30:16.053135 17538 solver.cpp:404]     Test net output #1: loss = 1.42195 (* 1 = 1.42195 loss)\nI0818 18:30:17.377146 17538 solver.cpp:228] Iteration 42400, loss = 0.028864\nI0818 18:30:17.377185 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:30:17.377202 17538 solver.cpp:244]     Train net output #1: loss = 0.0288638 (* 1 = 0.0288638 loss)\nI0818 18:30:17.463531 17538 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0818 18:32:34.716097 17538 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0818 18:33:56.911849 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77784\nI0818 18:33:56.912137 17538 solver.cpp:404]     Test net output #1: loss = 1.01005 (* 1 = 1.01005 loss)\nI0818 18:33:58.235329 17538 solver.cpp:228] Iteration 42500, loss = 0.0298084\nI0818 18:33:58.235379 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:33:58.235396 17538 solver.cpp:244]     Train net output #1: loss = 0.0298082 (* 1 = 0.0298082 loss)\nI0818 18:33:58.317837 17538 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0818 18:36:15.460291 17538 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0818 18:37:37.716778 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80436\nI0818 18:37:37.717056 17538 solver.cpp:404]     Test net output #1: loss = 0.974541 (* 1 = 0.974541 loss)\nI0818 18:37:39.040755 17538 solver.cpp:228] Iteration 42600, loss = 0.0230715\nI0818 18:37:39.040796 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:37:39.040812 17538 solver.cpp:244]     Train net output #1: loss = 0.0230713 (* 1 = 0.0230713 loss)\nI0818 18:37:39.126904 17538 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0818 18:39:56.414278 17538 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0818 18:41:18.664846 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82052\nI0818 18:41:18.665163 17538 solver.cpp:404]     Test net output #1: loss = 0.766117 (* 1 = 0.766117 loss)\nI0818 18:41:19.989265 17538 solver.cpp:228] Iteration 42700, loss = 0.0479251\nI0818 18:41:19.989305 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:41:19.989326 17538 solver.cpp:244]     Train net output #1: loss = 0.047925 (* 1 = 0.047925 loss)\nI0818 18:41:20.084599 17538 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0818 18:43:37.301503 17538 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0818 18:44:59.493778 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80572\nI0818 18:44:59.494052 17538 solver.cpp:404]     Test net output #1: loss = 0.874691 (* 1 = 0.874691 loss)\nI0818 18:45:00.817726 17538 solver.cpp:228] Iteration 42800, loss = 0.0623702\nI0818 18:45:00.817777 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:45:00.817795 17538 solver.cpp:244]     Train net output #1: loss = 0.0623701 (* 1 = 0.0623701 loss)\nI0818 18:45:00.900050 17538 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0818 18:47:18.103313 17538 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0818 18:48:40.311367 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82888\nI0818 18:48:40.311643 17538 solver.cpp:404]     Test net output #1: loss = 0.70258 (* 1 = 0.70258 loss)\nI0818 18:48:41.635160 17538 solver.cpp:228] Iteration 42900, loss = 0.0123369\nI0818 18:48:41.635200 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:48:41.635215 17538 solver.cpp:244]     Train net output #1: loss = 0.0123368 (* 1 = 0.0123368 loss)\nI0818 18:48:41.722110 17538 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0818 18:50:58.892338 17538 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0818 18:52:20.861951 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75796\nI0818 18:52:20.862248 17538 solver.cpp:404]     Test net output #1: loss = 1.05124 (* 1 = 1.05124 loss)\nI0818 18:52:22.185945 17538 solver.cpp:228] Iteration 43000, loss = 0.106998\nI0818 18:52:22.185995 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:52:22.186012 17538 solver.cpp:244]     Train net output #1: loss = 0.106998 (* 1 = 0.106998 loss)\nI0818 18:52:22.266688 17538 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0818 18:54:39.469034 17538 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0818 18:56:01.708791 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75324\nI0818 18:56:01.709040 17538 solver.cpp:404]     Test net output #1: loss = 1.18799 (* 1 = 1.18799 loss)\nI0818 18:56:03.033345 17538 solver.cpp:228] Iteration 43100, loss = 0.0279635\nI0818 18:56:03.033394 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:56:03.033411 17538 solver.cpp:244]     Train net output #1: loss = 0.0279634 (* 1 = 0.0279634 loss)\nI0818 18:56:03.117640 17538 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0818 18:58:20.276360 17538 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0818 18:59:42.521431 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75636\nI0818 18:59:42.521695 17538 solver.cpp:404]     Test net output #1: loss = 1.2303 (* 1 = 1.2303 loss)\nI0818 18:59:43.846282 17538 solver.cpp:228] Iteration 43200, loss = 0.0283238\nI0818 18:59:43.846323 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:59:43.846339 17538 solver.cpp:244]     Train net output #1: loss = 0.0283237 (* 1 = 0.0283237 loss)\nI0818 18:59:43.926556 17538 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0818 19:02:01.056715 17538 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0818 19:03:23.279397 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7682\nI0818 19:03:23.279716 17538 solver.cpp:404]     Test net output #1: loss = 1.23273 (* 1 = 1.23273 loss)\nI0818 19:03:24.603883 17538 solver.cpp:228] Iteration 43300, loss = 0.0677444\nI0818 19:03:24.603922 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:03:24.603937 17538 solver.cpp:244]     Train net output #1: loss = 0.0677442 (* 1 = 0.0677442 loss)\nI0818 19:03:24.692620 17538 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0818 19:05:41.741547 17538 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0818 19:07:03.054522 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77968\nI0818 19:07:03.054837 17538 solver.cpp:404]     Test net output #1: loss = 0.977311 (* 1 = 0.977311 loss)\nI0818 19:07:04.379351 17538 solver.cpp:228] Iteration 43400, loss = 0.0595018\nI0818 19:07:04.379386 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:07:04.379401 17538 solver.cpp:244]     Train net output #1: loss = 0.0595017 (* 1 = 0.0595017 loss)\nI0818 19:07:04.467495 17538 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0818 19:09:21.433015 17538 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0818 19:10:42.741502 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7328\nI0818 19:10:42.741825 17538 solver.cpp:404]     Test net output #1: loss = 1.26885 (* 1 = 1.26885 loss)\nI0818 19:10:44.065831 17538 solver.cpp:228] Iteration 43500, loss = 0.0534175\nI0818 19:10:44.065865 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:10:44.065881 17538 solver.cpp:244]     Train net output #1: loss = 0.0534174 (* 1 = 0.0534174 loss)\nI0818 19:10:44.148317 17538 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0818 19:13:01.177247 17538 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0818 19:14:22.461896 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79596\nI0818 19:14:22.462226 17538 solver.cpp:404]     Test net output #1: loss = 0.869247 (* 1 = 0.869247 loss)\nI0818 19:14:23.787334 17538 solver.cpp:228] Iteration 43600, loss = 0.057455\nI0818 19:14:23.787367 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:14:23.787384 17538 solver.cpp:244]     Train net output #1: loss = 0.0574549 (* 1 = 0.0574549 loss)\nI0818 19:14:23.870770 17538 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0818 19:16:40.819974 17538 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0818 19:18:02.136543 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78064\nI0818 19:18:02.136865 17538 solver.cpp:404]     Test net output #1: loss = 1.1376 (* 1 = 1.1376 loss)\nI0818 19:18:03.462087 17538 solver.cpp:228] Iteration 43700, loss = 0.0191688\nI0818 19:18:03.462121 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:18:03.462136 17538 solver.cpp:244]     Train net output #1: loss = 0.0191687 (* 1 = 0.0191687 loss)\nI0818 19:18:03.549952 17538 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0818 19:20:20.554494 17538 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0818 19:21:41.873808 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75268\nI0818 19:21:41.874132 17538 solver.cpp:404]     Test net output #1: loss = 1.26419 (* 1 = 1.26419 loss)\nI0818 19:21:43.199383 17538 solver.cpp:228] Iteration 43800, loss = 0.0407363\nI0818 19:21:43.199422 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:21:43.199439 17538 solver.cpp:244]     Train net output #1: loss = 0.0407362 (* 1 = 0.0407362 loss)\nI0818 19:21:43.285974 17538 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0818 19:24:00.258383 17538 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0818 19:25:21.605271 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80132\nI0818 19:25:21.605602 17538 solver.cpp:404]     Test net output #1: loss = 0.957224 (* 1 = 0.957224 loss)\nI0818 19:25:22.930362 17538 solver.cpp:228] Iteration 43900, loss = 0.0394195\nI0818 19:25:22.930397 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:25:22.930413 17538 solver.cpp:244]     Train net output #1: loss = 0.0394194 (* 1 = 0.0394194 loss)\nI0818 19:25:23.025483 17538 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0818 19:27:40.034775 17538 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0818 19:29:01.371959 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7612\nI0818 19:29:01.372272 17538 solver.cpp:404]     Test net output #1: loss = 1.33332 (* 1 = 1.33332 loss)\nI0818 19:29:02.697414 17538 solver.cpp:228] Iteration 44000, loss = 0.0290998\nI0818 19:29:02.697451 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:29:02.697474 17538 solver.cpp:244]     Train net output #1: loss = 0.0290997 (* 1 = 0.0290997 loss)\nI0818 19:29:02.784035 17538 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0818 19:31:19.738960 17538 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0818 19:32:41.101560 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82692\nI0818 19:32:41.101908 17538 solver.cpp:404]     Test net output #1: loss = 0.78058 (* 1 = 0.78058 loss)\nI0818 19:32:42.426388 17538 solver.cpp:228] Iteration 44100, loss = 0.0182621\nI0818 19:32:42.426427 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:32:42.426450 17538 solver.cpp:244]     Train net output #1: loss = 0.0182619 (* 1 = 0.0182619 loss)\nI0818 19:32:42.515647 17538 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0818 19:34:59.508509 17538 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0818 19:36:20.855435 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77836\nI0818 19:36:20.855738 17538 solver.cpp:404]     Test net output #1: loss = 0.984969 (* 1 = 0.984969 loss)\nI0818 19:36:22.180472 17538 solver.cpp:228] Iteration 44200, loss = 0.0379306\nI0818 19:36:22.180512 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:36:22.180536 17538 solver.cpp:244]     Train net output #1: loss = 0.0379304 (* 1 = 0.0379304 loss)\nI0818 19:36:22.268149 17538 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0818 19:38:39.199113 17538 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0818 19:40:00.545183 17538 solver.cpp:404]     Test net output #0: accuracy = 0.83188\nI0818 19:40:00.545491 17538 solver.cpp:404]     Test net output #1: loss = 0.70152 (* 1 = 0.70152 loss)\nI0818 19:40:01.870673 17538 solver.cpp:228] Iteration 44300, loss = 0.0373751\nI0818 19:40:01.870712 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:40:01.870733 17538 solver.cpp:244]     Train net output #1: loss = 0.0373749 (* 1 = 0.0373749 loss)\nI0818 19:40:01.950171 17538 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0818 19:42:18.983019 17538 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0818 19:43:40.346524 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76644\nI0818 19:43:40.346899 17538 solver.cpp:404]     Test net output #1: loss = 1.17693 (* 1 = 1.17693 loss)\nI0818 19:43:41.671325 17538 solver.cpp:228] Iteration 44400, loss = 0.0387981\nI0818 19:43:41.671363 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:43:41.671386 17538 solver.cpp:244]     Train net output #1: loss = 0.038798 (* 1 = 0.038798 loss)\nI0818 19:43:41.759577 17538 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0818 19:45:58.785950 17538 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0818 19:47:20.138957 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74128\nI0818 19:47:20.139272 17538 solver.cpp:404]     Test net output #1: loss = 1.34666 (* 1 = 1.34666 loss)\nI0818 19:47:21.464735 17538 solver.cpp:228] Iteration 44500, loss = 0.0625888\nI0818 19:47:21.464772 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:47:21.464795 17538 solver.cpp:244]     Train net output #1: loss = 0.0625887 (* 1 = 0.0625887 loss)\nI0818 19:47:21.553863 17538 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0818 19:49:38.706470 17538 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0818 19:51:00.018355 17538 solver.cpp:404]     Test net output #0: accuracy = 0.757\nI0818 19:51:00.018710 17538 solver.cpp:404]     Test net output #1: loss = 1.2816 (* 1 = 1.2816 loss)\nI0818 19:51:01.342551 17538 solver.cpp:228] Iteration 44600, loss = 0.0459768\nI0818 19:51:01.342591 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:51:01.342613 17538 solver.cpp:244]     Train net output #1: loss = 0.0459766 (* 1 = 0.0459766 loss)\nI0818 19:51:01.423712 17538 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0818 19:53:18.439483 17538 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0818 19:54:39.845192 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80736\nI0818 19:54:39.845520 17538 solver.cpp:404]     Test net output #1: loss = 0.883539 (* 1 = 0.883539 loss)\nI0818 19:54:41.169600 17538 solver.cpp:228] Iteration 44700, loss = 0.0930926\nI0818 19:54:41.169633 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:54:41.169649 17538 solver.cpp:244]     Train net output #1: loss = 0.0930925 (* 1 = 0.0930925 loss)\nI0818 19:54:41.255681 17538 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0818 19:56:58.137389 17538 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0818 19:58:19.487179 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77712\nI0818 19:58:19.487476 17538 solver.cpp:404]     Test net output #1: loss = 1.10073 (* 1 = 1.10073 loss)\nI0818 19:58:20.811391 17538 solver.cpp:228] Iteration 44800, loss = 0.0296469\nI0818 19:58:20.811429 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:58:20.811445 17538 solver.cpp:244]     Train net output #1: loss = 0.0296467 (* 1 = 0.0296467 loss)\nI0818 19:58:20.889839 17538 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0818 20:00:37.622107 17538 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0818 20:01:58.790374 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74292\nI0818 20:01:58.790674 17538 solver.cpp:404]     Test net output #1: loss = 1.27486 (* 1 = 1.27486 loss)\nI0818 20:02:00.114387 17538 solver.cpp:228] Iteration 44900, loss = 0.013633\nI0818 20:02:00.114423 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:02:00.114439 17538 solver.cpp:244]     Train net output #1: loss = 0.0136329 (* 1 = 0.0136329 loss)\nI0818 20:02:00.200083 17538 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0818 20:04:16.956502 17538 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0818 20:05:38.158746 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8216\nI0818 20:05:38.159072 17538 solver.cpp:404]     Test net output #1: loss = 0.758288 (* 1 = 0.758288 loss)\nI0818 20:05:39.483451 17538 solver.cpp:228] Iteration 45000, loss = 0.0226282\nI0818 20:05:39.483489 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:05:39.483505 17538 solver.cpp:244]     Train net output #1: loss = 0.0226281 (* 1 = 0.0226281 loss)\nI0818 20:05:39.562485 17538 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0818 20:07:56.310524 17538 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0818 20:09:17.496947 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7842\nI0818 20:09:17.497270 17538 solver.cpp:404]     Test net output #1: loss = 0.928087 (* 1 = 0.928087 loss)\nI0818 20:09:18.821519 17538 solver.cpp:228] Iteration 45100, loss = 0.0343601\nI0818 20:09:18.821555 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:09:18.821570 17538 solver.cpp:244]     Train net output #1: loss = 0.03436 (* 1 = 0.03436 loss)\nI0818 20:09:18.907650 17538 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0818 20:11:35.601141 17538 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0818 20:12:56.783293 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77676\nI0818 20:12:56.783624 17538 solver.cpp:404]     Test net output #1: loss = 1.07548 (* 1 = 1.07548 loss)\nI0818 20:12:58.108003 17538 solver.cpp:228] Iteration 45200, loss = 0.117151\nI0818 20:12:58.108042 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:12:58.108057 17538 solver.cpp:244]     Train net output #1: loss = 0.117151 (* 1 = 0.117151 loss)\nI0818 20:12:58.194499 17538 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0818 20:15:14.967665 17538 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0818 20:16:36.157168 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80796\nI0818 20:16:36.157505 17538 solver.cpp:404]     Test net output #1: loss = 0.899075 (* 1 = 0.899075 loss)\nI0818 20:16:37.482758 17538 solver.cpp:228] Iteration 45300, loss = 0.0235135\nI0818 20:16:37.482795 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:16:37.482820 17538 solver.cpp:244]     Train net output #1: loss = 0.0235133 (* 1 = 0.0235133 loss)\nI0818 20:16:37.564705 17538 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0818 20:18:54.351029 17538 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0818 20:20:15.528023 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78768\nI0818 20:20:15.528362 17538 solver.cpp:404]     Test net output #1: loss = 1.06565 (* 1 = 1.06565 loss)\nI0818 20:20:16.853688 17538 solver.cpp:228] Iteration 45400, loss = 0.0265147\nI0818 20:20:16.853727 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:20:16.853751 17538 solver.cpp:244]     Train net output #1: loss = 0.0265145 (* 1 = 0.0265145 loss)\nI0818 20:20:16.938843 17538 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0818 20:22:33.674132 17538 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0818 20:23:54.848837 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79844\nI0818 20:23:54.849180 17538 solver.cpp:404]     Test net output #1: loss = 1.01666 (* 1 = 1.01666 loss)\nI0818 20:23:56.174906 17538 solver.cpp:228] Iteration 45500, loss = 0.017386\nI0818 20:23:56.174944 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:23:56.174968 17538 solver.cpp:244]     Train net output #1: loss = 0.0173858 (* 1 = 0.0173858 loss)\nI0818 20:23:56.254045 17538 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0818 20:26:12.960525 17538 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0818 20:27:34.141180 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82592\nI0818 20:27:34.141518 17538 solver.cpp:404]     Test net output #1: loss = 0.795771 (* 1 = 0.795771 loss)\nI0818 20:27:35.466511 17538 solver.cpp:228] Iteration 45600, loss = 0.0678998\nI0818 20:27:35.466552 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:27:35.466574 17538 solver.cpp:244]     Train net output #1: loss = 0.0678996 (* 1 = 0.0678996 loss)\nI0818 20:27:35.543212 17538 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0818 20:29:52.284657 17538 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0818 20:31:13.457594 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76676\nI0818 20:31:13.457926 17538 solver.cpp:404]     Test net output #1: loss = 1.21712 (* 1 = 1.21712 loss)\nI0818 20:31:14.783082 17538 solver.cpp:228] Iteration 45700, loss = 0.0242769\nI0818 20:31:14.783120 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:31:14.783143 17538 solver.cpp:244]     Train net output #1: loss = 0.0242767 (* 1 = 0.0242767 loss)\nI0818 20:31:14.864459 17538 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0818 20:33:31.609555 17538 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0818 20:34:52.949534 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7558\nI0818 20:34:52.949869 17538 solver.cpp:404]     Test net output #1: loss = 1.27687 (* 1 = 1.27687 loss)\nI0818 20:34:54.275183 17538 solver.cpp:228] Iteration 45800, loss = 0.0332887\nI0818 20:34:54.275221 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:34:54.275244 17538 solver.cpp:244]     Train net output #1: loss = 0.0332885 (* 1 = 0.0332885 loss)\nI0818 20:34:54.353571 17538 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0818 20:37:11.016379 17538 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0818 20:38:32.332111 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7904\nI0818 20:38:32.332454 17538 solver.cpp:404]     Test net output #1: loss = 1.07771 (* 1 = 1.07771 loss)\nI0818 20:38:33.656991 17538 solver.cpp:228] Iteration 45900, loss = 0.0150612\nI0818 20:38:33.657029 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:38:33.657052 17538 solver.cpp:244]     Train net output #1: loss = 0.015061 (* 1 = 0.015061 loss)\nI0818 20:38:33.741497 17538 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0818 20:40:50.446060 17538 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0818 20:42:11.788357 17538 solver.cpp:404]     Test net output #0: accuracy = 0.76776\nI0818 20:42:11.788697 17538 solver.cpp:404]     Test net output #1: loss = 1.12271 (* 1 = 1.12271 loss)\nI0818 20:42:13.114051 17538 solver.cpp:228] Iteration 46000, loss = 0.0364396\nI0818 20:42:13.114089 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:42:13.114112 17538 solver.cpp:244]     Train net output #1: loss = 0.0364394 (* 1 = 0.0364394 loss)\nI0818 20:42:13.197852 17538 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0818 20:44:29.960628 17538 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0818 20:45:51.334911 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8042\nI0818 20:45:51.335244 17538 solver.cpp:404]     Test net output #1: loss = 1.01257 (* 1 = 1.01257 loss)\nI0818 20:45:52.659375 17538 solver.cpp:228] Iteration 46100, loss = 0.0430232\nI0818 20:45:52.659420 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:45:52.659442 17538 solver.cpp:244]     Train net output #1: loss = 0.043023 (* 1 = 0.043023 loss)\nI0818 20:45:52.747282 17538 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0818 20:48:09.540422 17538 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0818 20:49:30.946740 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71872\nI0818 20:49:30.947070 17538 solver.cpp:404]     Test net output #1: loss = 1.55747 (* 1 = 1.55747 loss)\nI0818 20:49:32.271976 17538 solver.cpp:228] Iteration 46200, loss = 0.101342\nI0818 20:49:32.272014 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:49:32.272037 17538 solver.cpp:244]     Train net output #1: loss = 0.101342 (* 1 = 0.101342 loss)\nI0818 20:49:32.354882 17538 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0818 20:51:49.134297 17538 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0818 20:53:10.502244 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77948\nI0818 20:53:10.502584 17538 solver.cpp:404]     Test net output #1: loss = 1.1855 (* 1 = 1.1855 loss)\nI0818 20:53:11.828349 17538 solver.cpp:228] Iteration 46300, loss = 0.0108807\nI0818 20:53:11.828387 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:53:11.828416 17538 solver.cpp:244]     Train net output #1: loss = 0.0108806 (* 1 = 0.0108806 loss)\nI0818 20:53:11.953821 17538 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0818 20:55:28.722283 17538 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0818 20:56:50.072372 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7908\nI0818 20:56:50.072710 17538 solver.cpp:404]     Test net output #1: loss = 0.993277 (* 1 = 0.993277 loss)\nI0818 20:56:51.398012 17538 solver.cpp:228] Iteration 46400, loss = 0.081706\nI0818 20:56:51.398048 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:56:51.398066 17538 solver.cpp:244]     Train net output #1: loss = 0.0817058 (* 1 = 0.0817058 loss)\nI0818 20:56:51.479701 17538 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0818 20:59:08.541302 17538 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0818 21:00:30.812238 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79176\nI0818 21:00:30.812564 17538 solver.cpp:404]     Test net output #1: loss = 1.05174 (* 1 = 1.05174 loss)\nI0818 21:00:32.145489 17538 solver.cpp:228] Iteration 46500, loss = 0.0426818\nI0818 21:00:32.145545 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:00:32.145568 17538 solver.cpp:244]     Train net output #1: loss = 0.0426817 (* 1 = 0.0426817 loss)\nI0818 21:00:32.221243 17538 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0818 21:02:49.439523 17538 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0818 21:04:11.673270 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75232\nI0818 21:04:11.673560 17538 solver.cpp:404]     Test net output #1: loss = 1.34857 (* 1 = 1.34857 loss)\nI0818 21:04:13.001416 17538 solver.cpp:228] Iteration 46600, loss = 0.0371367\nI0818 21:04:13.001462 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:04:13.001490 17538 solver.cpp:244]     Train net output #1: loss = 0.0371366 (* 1 = 0.0371366 loss)\nI0818 21:04:13.080849 17538 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0818 21:06:30.492977 17538 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0818 21:07:52.742682 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75856\nI0818 21:07:52.742944 17538 solver.cpp:404]     Test net output #1: loss = 1.09739 (* 1 = 1.09739 loss)\nI0818 21:07:54.071874 17538 solver.cpp:228] Iteration 46700, loss = 0.0228281\nI0818 21:07:54.071918 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:07:54.071941 17538 solver.cpp:244]     Train net output #1: loss = 0.022828 (* 1 = 0.022828 loss)\nI0818 21:07:54.154203 17538 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0818 21:10:11.623200 17538 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0818 21:11:33.854830 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8366\nI0818 21:11:33.855114 17538 solver.cpp:404]     Test net output #1: loss = 0.69052 (* 1 = 0.69052 loss)\nI0818 21:11:35.183557 17538 solver.cpp:228] Iteration 46800, loss = 0.00598108\nI0818 21:11:35.183600 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:11:35.183624 17538 solver.cpp:244]     Train net output #1: loss = 0.00598092 (* 1 = 0.00598092 loss)\nI0818 21:11:35.266957 17538 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0818 21:13:52.806601 17538 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0818 21:15:14.208145 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81064\nI0818 21:15:14.208446 17538 solver.cpp:404]     Test net output #1: loss = 0.851607 (* 1 = 0.851607 loss)\nI0818 21:15:15.533288 17538 solver.cpp:228] Iteration 46900, loss = 0.0325079\nI0818 21:15:15.533325 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:15:15.533340 17538 solver.cpp:244]     Train net output #1: loss = 0.0325077 (* 1 = 0.0325077 loss)\nI0818 21:15:15.621489 17538 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0818 21:17:33.115105 17538 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0818 21:18:55.390703 17538 solver.cpp:404]     Test net output #0: accuracy = 0.797\nI0818 21:18:55.390983 17538 solver.cpp:404]     Test net output #1: loss = 0.982336 (* 1 = 0.982336 loss)\nI0818 21:18:56.719405 17538 solver.cpp:228] Iteration 47000, loss = 0.020895\nI0818 21:18:56.719449 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:18:56.719472 17538 solver.cpp:244]     Train net output #1: loss = 0.0208949 (* 1 = 0.0208949 loss)\nI0818 21:18:56.809140 17538 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0818 21:21:14.284585 17538 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0818 21:22:36.535092 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7958\nI0818 21:22:36.535434 17538 solver.cpp:404]     Test net output #1: loss = 0.875927 (* 1 = 0.875927 loss)\nI0818 21:22:37.864445 17538 solver.cpp:228] Iteration 47100, loss = 0.0289097\nI0818 21:22:37.864495 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:22:37.864518 17538 solver.cpp:244]     Train net output #1: loss = 0.0289096 (* 1 = 0.0289096 loss)\nI0818 21:22:37.957082 17538 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0818 21:24:55.443755 17538 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0818 21:26:17.573410 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75652\nI0818 21:26:17.573731 17538 solver.cpp:404]     Test net output #1: loss = 1.18879 (* 1 = 1.18879 loss)\nI0818 21:26:18.902109 17538 solver.cpp:228] Iteration 47200, loss = 0.0215309\nI0818 21:26:18.902153 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:26:18.902176 17538 solver.cpp:244]     Train net output #1: loss = 0.0215307 (* 1 = 0.0215307 loss)\nI0818 21:26:18.987216 17538 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0818 21:28:36.484982 17538 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0818 21:29:58.408255 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73376\nI0818 21:29:58.408550 17538 solver.cpp:404]     Test net output #1: loss = 1.40172 (* 1 = 1.40172 loss)\nI0818 21:29:59.736614 17538 solver.cpp:228] Iteration 47300, loss = 0.0429783\nI0818 21:29:59.736660 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:29:59.736682 17538 solver.cpp:244]     Train net output #1: loss = 0.0429782 (* 1 = 0.0429782 loss)\nI0818 21:29:59.819412 17538 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0818 21:32:17.233192 17538 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0818 21:33:39.289572 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77372\nI0818 21:33:39.289867 17538 solver.cpp:404]     Test net output #1: loss = 0.989952 (* 1 = 0.989952 loss)\nI0818 21:33:40.618386 17538 solver.cpp:228] Iteration 47400, loss = 0.0271138\nI0818 21:33:40.618432 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:33:40.618458 17538 solver.cpp:244]     Train net output #1: loss = 0.0271137 (* 1 = 0.0271137 loss)\nI0818 21:33:40.705057 17538 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0818 21:35:58.232360 17538 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0818 21:37:20.437568 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81712\nI0818 21:37:20.437885 17538 solver.cpp:404]     Test net output #1: loss = 0.876659 (* 1 = 0.876659 loss)\nI0818 21:37:21.765691 17538 solver.cpp:228] Iteration 47500, loss = 0.0781352\nI0818 21:37:21.765735 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:37:21.765759 17538 solver.cpp:244]     Train net output #1: loss = 0.078135 (* 1 = 0.078135 loss)\nI0818 21:37:21.852916 17538 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0818 21:39:39.334170 17538 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0818 21:41:01.334077 17538 solver.cpp:404]     Test net output #0: accuracy = 0.73452\nI0818 21:41:01.334426 17538 solver.cpp:404]     Test net output #1: loss = 1.34986 (* 1 = 1.34986 loss)\nI0818 21:41:02.659734 17538 solver.cpp:228] Iteration 47600, loss = 0.0478694\nI0818 21:41:02.659783 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:41:02.659808 17538 solver.cpp:244]     Train net output #1: loss = 0.0478693 (* 1 = 0.0478693 loss)\nI0818 21:41:02.746579 17538 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0818 21:43:19.915705 17538 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0818 21:44:41.775682 17538 solver.cpp:404]     Test net output #0: accuracy = 0.70724\nI0818 21:44:41.776018 17538 solver.cpp:404]     Test net output #1: loss = 1.62896 (* 1 = 1.62896 loss)\nI0818 21:44:43.100888 17538 solver.cpp:228] Iteration 47700, loss = 0.0184483\nI0818 21:44:43.100930 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:44:43.100955 17538 solver.cpp:244]     Train net output #1: loss = 0.0184481 (* 1 = 0.0184481 loss)\nI0818 21:44:43.186285 17538 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0818 21:47:00.487473 17538 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0818 21:48:22.523751 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8046\nI0818 21:48:22.524070 17538 solver.cpp:404]     Test net output #1: loss = 0.941339 (* 1 = 0.941339 loss)\nI0818 21:48:23.852149 17538 solver.cpp:228] Iteration 47800, loss = 0.0378683\nI0818 21:48:23.852190 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:48:23.852216 17538 solver.cpp:244]     Train net output #1: loss = 0.0378682 (* 1 = 0.0378682 loss)\nI0818 21:48:23.936185 17538 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0818 21:50:41.438356 17538 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0818 21:52:03.713783 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82112\nI0818 21:52:03.714098 17538 solver.cpp:404]     Test net output #1: loss = 0.803196 (* 1 = 0.803196 loss)\nI0818 21:52:05.042538 17538 solver.cpp:228] Iteration 47900, loss = 0.0312799\nI0818 21:52:05.042582 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:52:05.042604 17538 solver.cpp:244]     Train net output #1: loss = 0.0312798 (* 1 = 0.0312798 loss)\nI0818 21:52:05.131732 17538 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0818 21:54:22.635151 17538 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0818 21:55:44.926595 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81808\nI0818 21:55:44.926942 17538 solver.cpp:404]     Test net output #1: loss = 0.85308 (* 1 = 0.85308 loss)\nI0818 21:55:46.254436 17538 solver.cpp:228] Iteration 48000, loss = 0.065661\nI0818 21:55:46.254484 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:55:46.254508 17538 solver.cpp:244]     Train net output #1: loss = 0.0656609 (* 1 = 0.0656609 loss)\nI0818 21:55:46.334439 17538 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0818 21:58:03.769568 17538 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0818 21:59:26.000888 17538 solver.cpp:404]     Test net output #0: accuracy = 0.71364\nI0818 21:59:26.001237 17538 solver.cpp:404]     Test net output #1: loss = 1.55483 (* 1 = 1.55483 loss)\nI0818 21:59:27.329344 17538 solver.cpp:228] Iteration 48100, loss = 0.0181911\nI0818 21:59:27.329391 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 21:59:27.329408 17538 solver.cpp:244]     Train net output #1: loss = 0.018191 (* 1 = 0.018191 loss)\nI0818 21:59:27.407660 17538 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0818 22:01:44.820771 17538 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0818 22:03:06.122824 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80204\nI0818 22:03:06.123136 17538 solver.cpp:404]     Test net output #1: loss = 0.848317 (* 1 = 0.848317 loss)\nI0818 22:03:07.446951 17538 solver.cpp:228] Iteration 48200, loss = 0.012337\nI0818 22:03:07.446987 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:03:07.447002 17538 solver.cpp:244]     Train net output #1: loss = 0.0123369 (* 1 = 0.0123369 loss)\nI0818 22:03:07.535956 17538 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0818 22:05:24.671131 17538 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0818 22:06:45.990460 17538 solver.cpp:404]     Test net output #0: accuracy = 0.81264\nI0818 22:06:45.990696 17538 solver.cpp:404]     Test net output #1: loss = 0.83614 (* 1 = 0.83614 loss)\nI0818 22:06:47.314323 17538 solver.cpp:228] Iteration 48300, loss = 0.099788\nI0818 22:06:47.314360 17538 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:06:47.314376 17538 solver.cpp:244]     Train net output #1: loss = 0.0997878 (* 1 = 0.0997878 loss)\nI0818 22:06:47.393594 17538 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0818 22:09:04.375692 17538 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0818 22:10:25.685511 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78676\nI0818 22:10:25.685822 17538 solver.cpp:404]     Test net output #1: loss = 1.00781 (* 1 = 1.00781 loss)\nI0818 22:10:27.007483 17538 solver.cpp:228] Iteration 48400, loss = 0.0531672\nI0818 22:10:27.007519 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:10:27.007534 17538 solver.cpp:244]     Train net output #1: loss = 0.0531671 (* 1 = 0.0531671 loss)\nI0818 22:10:27.096616 17538 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0818 22:12:44.075177 17538 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0818 22:14:05.394788 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80776\nI0818 22:14:05.395089 17538 solver.cpp:404]     Test net output #1: loss = 0.907644 (* 1 = 0.907644 loss)\nI0818 22:14:06.716470 17538 solver.cpp:228] Iteration 48500, loss = 0.0510124\nI0818 22:14:06.716508 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:14:06.716523 17538 solver.cpp:244]     Train net output #1: loss = 0.0510122 (* 1 = 0.0510122 loss)\nI0818 22:14:06.802999 17538 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0818 22:16:23.808444 17538 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0818 22:17:44.965644 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7766\nI0818 22:17:44.965937 17538 solver.cpp:404]     Test net output #1: loss = 1.04137 (* 1 = 1.04137 loss)\nI0818 22:17:46.287178 17538 solver.cpp:228] Iteration 48600, loss = 0.052559\nI0818 22:17:46.287214 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:17:46.287230 17538 solver.cpp:244]     Train net output #1: loss = 0.0525589 (* 1 = 0.0525589 loss)\nI0818 22:17:46.374071 17538 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0818 22:20:03.363205 17538 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0818 22:21:24.536545 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7702\nI0818 22:21:24.536844 17538 solver.cpp:404]     Test net output #1: loss = 1.0944 (* 1 = 1.0944 loss)\nI0818 22:21:25.858008 17538 solver.cpp:228] Iteration 48700, loss = 0.042894\nI0818 22:21:25.858044 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:21:25.858059 17538 solver.cpp:244]     Train net output #1: loss = 0.0428939 (* 1 = 0.0428939 loss)\nI0818 22:21:25.945078 17538 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0818 22:23:42.937455 17538 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0818 22:25:04.092190 17538 solver.cpp:404]     Test net output #0: accuracy = 0.818\nI0818 22:25:04.092428 17538 solver.cpp:404]     Test net output #1: loss = 0.844429 (* 1 = 0.844429 loss)\nI0818 22:25:05.412926 17538 solver.cpp:228] Iteration 48800, loss = 0.0393534\nI0818 22:25:05.412966 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:25:05.412981 17538 solver.cpp:244]     Train net output #1: loss = 0.0393533 (* 1 = 0.0393533 loss)\nI0818 22:25:05.504071 17538 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0818 22:27:22.544409 17538 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0818 22:28:43.725723 17538 solver.cpp:404]     Test net output #0: accuracy = 0.7668\nI0818 22:28:43.725981 17538 solver.cpp:404]     Test net output #1: loss = 1.14558 (* 1 = 1.14558 loss)\nI0818 22:28:45.046708 17538 solver.cpp:228] Iteration 48900, loss = 0.0193975\nI0818 22:28:45.046747 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:28:45.046763 17538 solver.cpp:244]     Train net output #1: loss = 0.0193974 (* 1 = 0.0193974 loss)\nI0818 22:28:45.134457 17538 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0818 22:31:02.184830 17538 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0818 22:32:23.370370 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82696\nI0818 22:32:23.370683 17538 solver.cpp:404]     Test net output #1: loss = 0.725696 (* 1 = 0.725696 loss)\nI0818 22:32:24.691288 17538 solver.cpp:228] Iteration 49000, loss = 0.0364994\nI0818 22:32:24.691328 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:32:24.691342 17538 solver.cpp:244]     Train net output #1: loss = 0.0364992 (* 1 = 0.0364992 loss)\nI0818 22:32:24.782364 17538 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0818 22:34:41.692503 17538 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0818 22:36:02.887128 17538 solver.cpp:404]     Test net output #0: accuracy = 0.77616\nI0818 22:36:02.887397 17538 solver.cpp:404]     Test net output #1: loss = 0.991557 (* 1 = 0.991557 loss)\nI0818 22:36:04.209429 17538 solver.cpp:228] Iteration 49100, loss = 0.0424186\nI0818 22:36:04.209468 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:36:04.209484 17538 solver.cpp:244]     Train net output #1: loss = 0.0424184 (* 1 = 0.0424184 loss)\nI0818 22:36:04.297209 17538 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0818 22:38:21.216198 17538 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0818 22:39:42.403930 17538 solver.cpp:404]     Test net output #0: accuracy = 0.82224\nI0818 22:39:42.404192 17538 solver.cpp:404]     Test net output #1: loss = 0.753585 (* 1 = 0.753585 loss)\nI0818 22:39:43.725148 17538 solver.cpp:228] Iteration 49200, loss = 0.0700958\nI0818 22:39:43.725186 17538 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:39:43.725203 17538 solver.cpp:244]     Train net output #1: loss = 0.0700956 (* 1 = 0.0700956 loss)\nI0818 22:39:43.813688 17538 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0818 22:42:00.816957 17538 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0818 22:43:21.993151 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8024\nI0818 22:43:21.993449 17538 solver.cpp:404]     Test net output #1: loss = 1.01555 (* 1 = 1.01555 loss)\nI0818 22:43:23.314757 17538 solver.cpp:228] Iteration 49300, loss = 0.0340008\nI0818 22:43:23.314795 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:43:23.314810 17538 solver.cpp:244]     Train net output #1: loss = 0.0340007 (* 1 = 0.0340007 loss)\nI0818 22:43:23.406184 17538 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0818 22:45:40.341931 17538 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0818 22:47:01.519737 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79612\nI0818 22:47:01.519973 17538 solver.cpp:404]     Test net output #1: loss = 0.824082 (* 1 = 0.824082 loss)\nI0818 22:47:02.840916 17538 solver.cpp:228] Iteration 49400, loss = 0.0111399\nI0818 22:47:02.840953 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:47:02.840968 17538 solver.cpp:244]     Train net output #1: loss = 0.0111397 (* 1 = 0.0111397 loss)\nI0818 22:47:02.935629 17538 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0818 22:49:19.883764 17538 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0818 22:50:41.169118 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74856\nI0818 22:50:41.169445 17538 solver.cpp:404]     Test net output #1: loss = 1.18847 (* 1 = 1.18847 loss)\nI0818 22:50:42.490362 17538 solver.cpp:228] Iteration 49500, loss = 0.071994\nI0818 22:50:42.490401 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:50:42.490416 17538 solver.cpp:244]     Train net output #1: loss = 0.0719938 (* 1 = 0.0719938 loss)\nI0818 22:50:42.575222 17538 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0818 22:52:59.593564 17538 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0818 22:54:21.921175 17538 solver.cpp:404]     Test net output #0: accuracy = 0.74276\nI0818 22:54:21.921515 17538 solver.cpp:404]     Test net output #1: loss = 1.34937 (* 1 = 1.34937 loss)\nI0818 22:54:23.246398 17538 solver.cpp:228] Iteration 49600, loss = 0.0298091\nI0818 22:54:23.246438 17538 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:54:23.246480 17538 solver.cpp:244]     Train net output #1: loss = 0.0298089 (* 1 = 0.0298089 loss)\nI0818 22:54:23.327594 17538 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0818 22:56:40.496666 17538 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0818 22:58:02.779425 17538 solver.cpp:404]     Test net output #0: accuracy = 0.80648\nI0818 22:58:02.779773 17538 solver.cpp:404]     Test net output #1: loss = 0.934385 (* 1 = 0.934385 loss)\nI0818 22:58:04.105981 17538 solver.cpp:228] Iteration 49700, loss = 0.0186165\nI0818 22:58:04.106030 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:58:04.106047 17538 solver.cpp:244]     Train net output #1: loss = 0.0186164 (* 1 = 0.0186164 loss)\nI0818 22:58:04.187693 17538 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0818 23:00:21.303231 17538 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0818 23:01:43.631206 17538 solver.cpp:404]     Test net output #0: accuracy = 0.75548\nI0818 23:01:43.631531 17538 solver.cpp:404]     Test net output #1: loss = 1.26728 (* 1 = 1.26728 loss)\nI0818 23:01:44.957353 17538 solver.cpp:228] Iteration 49800, loss = 0.0921712\nI0818 23:01:44.957392 17538 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:01:44.957407 17538 solver.cpp:244]     Train net output #1: loss = 0.0921711 (* 1 = 0.0921711 loss)\nI0818 23:01:45.046618 17538 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0818 23:04:02.212415 17538 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0818 23:05:24.494707 17538 solver.cpp:404]     Test net output #0: accuracy = 0.78424\nI0818 23:05:24.495065 17538 solver.cpp:404]     Test net output #1: loss = 1.02019 (* 1 = 1.02019 loss)\nI0818 23:05:25.819494 17538 solver.cpp:228] Iteration 49900, loss = 0.0831162\nI0818 23:05:25.819542 17538 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:05:25.819561 17538 solver.cpp:244]     Train net output #1: loss = 0.083116 (* 1 = 0.083116 loss)\nI0818 23:05:25.901101 17538 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0818 23:07:43.041173 17538 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0818 23:09:05.336676 17538 solver.cpp:404]     Test net output #0: accuracy = 0.79948\nI0818 23:09:05.337030 17538 solver.cpp:404]     Test net output #1: loss = 0.959015 (* 1 = 0.959015 loss)\nI0818 23:09:06.661386 17538 solver.cpp:228] Iteration 50000, loss = 0.0124353\nI0818 23:09:06.661434 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:09:06.661451 17538 solver.cpp:244]     Train net output #1: loss = 0.0124351 (* 1 = 0.0124351 loss)\nI0818 23:09:06.745707 17538 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0818 23:09:06.745731 17538 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0818 23:11:23.809144 17538 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0818 23:12:46.098368 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8262\nI0818 23:12:46.098718 17538 solver.cpp:404]     Test net output #1: loss = 0.815173 (* 1 = 0.815173 loss)\nI0818 23:12:47.422639 17538 solver.cpp:228] Iteration 50100, loss = 0.00232079\nI0818 23:12:47.422683 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:12:47.422700 17538 solver.cpp:244]     Train net output #1: loss = 0.00232061 (* 1 = 0.00232061 loss)\nI0818 23:12:47.511626 17538 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0818 23:15:04.722539 17538 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0818 23:16:27.012459 17538 solver.cpp:404]     Test net output #0: accuracy = 0.84224\nI0818 23:16:27.012809 17538 solver.cpp:404]     Test net output #1: loss = 0.728965 (* 1 = 0.728965 loss)\nI0818 23:16:28.336668 17538 solver.cpp:228] Iteration 50200, loss = 0.00071434\nI0818 23:16:28.336714 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:16:28.336730 17538 solver.cpp:244]     Train net output #1: loss = 0.000714158 (* 1 = 0.000714158 loss)\nI0818 23:16:28.421021 17538 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0818 23:18:45.504298 17538 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0818 23:20:07.779068 17538 solver.cpp:404]     Test net output #0: accuracy = 0.84416\nI0818 23:20:07.779398 17538 solver.cpp:404]     Test net output #1: loss = 0.703071 (* 1 = 0.703071 loss)\nI0818 23:20:09.103147 17538 solver.cpp:228] Iteration 50300, loss = 0.00105835\nI0818 23:20:09.103193 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:20:09.103209 17538 solver.cpp:244]     Train net output #1: loss = 0.00105816 (* 1 = 0.00105816 loss)\nI0818 23:20:09.196094 17538 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0818 23:22:26.373420 17538 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0818 23:23:48.598309 17538 solver.cpp:404]     Test net output #0: accuracy = 0.852\nI0818 23:23:48.598666 17538 solver.cpp:404]     Test net output #1: loss = 0.67587 (* 1 = 0.67587 loss)\nI0818 23:23:49.922143 17538 solver.cpp:228] Iteration 50400, loss = 0.00126425\nI0818 23:23:49.922189 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:23:49.922206 17538 solver.cpp:244]     Train net output #1: loss = 0.00126407 (* 1 = 0.00126407 loss)\nI0818 23:23:50.005581 17538 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0818 23:26:07.118930 17538 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0818 23:27:29.375010 17538 solver.cpp:404]     Test net output #0: accuracy = 0.85672\nI0818 23:27:29.375370 17538 solver.cpp:404]     Test net output #1: loss = 0.653701 (* 1 = 0.653701 loss)\nI0818 23:27:30.699376 17538 solver.cpp:228] Iteration 50500, loss = 0.0011953\nI0818 23:27:30.699422 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:27:30.699440 17538 solver.cpp:244]     Train net output #1: loss = 0.00119511 (* 1 = 0.00119511 loss)\nI0818 23:27:30.780968 17538 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0818 23:29:47.946214 17538 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0818 23:31:10.212225 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86016\nI0818 23:31:10.212574 17538 solver.cpp:404]     Test net output #1: loss = 0.646098 (* 1 = 0.646098 loss)\nI0818 23:31:11.536870 17538 solver.cpp:228] Iteration 50600, loss = 0.000369198\nI0818 23:31:11.536906 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:31:11.536921 17538 solver.cpp:244]     Train net output #1: loss = 0.000369017 (* 1 = 0.000369017 loss)\nI0818 23:31:11.623306 17538 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0818 23:33:28.741245 17538 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0818 23:34:50.990031 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8612\nI0818 23:34:50.990391 17538 solver.cpp:404]     Test net output #1: loss = 0.630107 (* 1 = 0.630107 loss)\nI0818 23:34:52.314307 17538 solver.cpp:228] Iteration 50700, loss = 0.00068226\nI0818 23:34:52.314357 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:34:52.314373 17538 solver.cpp:244]     Train net output #1: loss = 0.000682078 (* 1 = 0.000682078 loss)\nI0818 23:34:52.407394 17538 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0818 23:37:09.673630 17538 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0818 23:38:31.940523 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86228\nI0818 23:38:31.940816 17538 solver.cpp:404]     Test net output #1: loss = 0.629629 (* 1 = 0.629629 loss)\nI0818 23:38:33.264927 17538 solver.cpp:228] Iteration 50800, loss = 0.00058548\nI0818 23:38:33.264973 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:38:33.264988 17538 solver.cpp:244]     Train net output #1: loss = 0.000585299 (* 1 = 0.000585299 loss)\nI0818 23:38:33.347806 17538 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0818 23:40:50.506760 17538 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0818 23:42:12.783670 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86324\nI0818 23:42:12.783979 17538 solver.cpp:404]     Test net output #1: loss = 0.617145 (* 1 = 0.617145 loss)\nI0818 23:42:14.108448 17538 solver.cpp:228] Iteration 50900, loss = 0.000470701\nI0818 23:42:14.108494 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:42:14.108511 17538 solver.cpp:244]     Train net output #1: loss = 0.00047052 (* 1 = 0.00047052 loss)\nI0818 23:42:14.192093 17538 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0818 23:44:31.376081 17538 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0818 23:45:53.648406 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86524\nI0818 23:45:53.648676 17538 solver.cpp:404]     Test net output #1: loss = 0.617165 (* 1 = 0.617165 loss)\nI0818 23:45:54.972823 17538 solver.cpp:228] Iteration 51000, loss = 0.000485229\nI0818 23:45:54.972872 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:45:54.972889 17538 solver.cpp:244]     Train net output #1: loss = 0.000485047 (* 1 = 0.000485047 loss)\nI0818 23:45:55.058179 17538 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0818 23:48:12.238656 17538 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0818 23:49:34.490089 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86684\nI0818 23:49:34.490357 17538 solver.cpp:404]     Test net output #1: loss = 0.605995 (* 1 = 0.605995 loss)\nI0818 23:49:35.814126 17538 solver.cpp:228] Iteration 51100, loss = 0.000651997\nI0818 23:49:35.814174 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:49:35.814193 17538 solver.cpp:244]     Train net output #1: loss = 0.000651815 (* 1 = 0.000651815 loss)\nI0818 23:49:35.904141 17538 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0818 23:51:53.244668 17538 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0818 23:53:15.496997 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86508\nI0818 23:53:15.497350 17538 solver.cpp:404]     Test net output #1: loss = 0.614136 (* 1 = 0.614136 loss)\nI0818 23:53:16.821594 17538 solver.cpp:228] Iteration 51200, loss = 0.000539325\nI0818 23:53:16.821645 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:53:16.821662 17538 solver.cpp:244]     Train net output #1: loss = 0.000539144 (* 1 = 0.000539144 loss)\nI0818 23:53:16.903374 17538 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0818 23:55:34.089040 17538 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0818 23:56:56.346513 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86712\nI0818 23:56:56.346798 17538 solver.cpp:404]     Test net output #1: loss = 0.600528 (* 1 = 0.600528 loss)\nI0818 23:56:57.670441 17538 solver.cpp:228] Iteration 51300, loss = 0.000533438\nI0818 23:56:57.670491 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:56:57.670508 17538 solver.cpp:244]     Train net output #1: loss = 0.000533256 (* 1 = 0.000533256 loss)\nI0818 23:56:57.756216 17538 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0818 23:59:14.922585 17538 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0819 00:00:37.135831 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 00:00:37.136137 17538 solver.cpp:404]     Test net output #1: loss = 0.607458 (* 1 = 0.607458 loss)\nI0819 00:00:38.460459 17538 solver.cpp:228] Iteration 51400, loss = 0.000552864\nI0819 00:00:38.460505 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:00:38.460522 17538 solver.cpp:244]     Train net output #1: loss = 0.000552682 (* 1 = 0.000552682 loss)\nI0819 00:00:38.544800 17538 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0819 00:02:55.802669 17538 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0819 00:04:18.024611 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86832\nI0819 00:04:18.024899 17538 solver.cpp:404]     Test net output #1: loss = 0.595541 (* 1 = 0.595541 loss)\nI0819 00:04:19.348773 17538 solver.cpp:228] Iteration 51500, loss = 0.000496906\nI0819 00:04:19.348822 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:04:19.348839 17538 solver.cpp:244]     Train net output #1: loss = 0.000496725 (* 1 = 0.000496725 loss)\nI0819 00:04:19.434718 17538 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0819 00:06:36.642735 17538 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0819 00:07:58.886889 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86792\nI0819 00:07:58.887166 17538 solver.cpp:404]     Test net output #1: loss = 0.601018 (* 1 = 0.601018 loss)\nI0819 00:08:00.210701 17538 solver.cpp:228] Iteration 51600, loss = 0.000365042\nI0819 00:08:00.210750 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:08:00.210767 17538 solver.cpp:244]     Train net output #1: loss = 0.000364861 (* 1 = 0.000364861 loss)\nI0819 00:08:00.290395 17538 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0819 00:10:17.539772 17538 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 00:11:39.510852 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86848\nI0819 00:11:39.511126 17538 solver.cpp:404]     Test net output #1: loss = 0.593911 (* 1 = 0.593911 loss)\nI0819 00:11:40.835201 17538 solver.cpp:228] Iteration 51700, loss = 0.000270227\nI0819 00:11:40.835252 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:11:40.835268 17538 solver.cpp:244]     Train net output #1: loss = 0.000270046 (* 1 = 0.000270046 loss)\nI0819 00:11:40.921860 17538 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0819 00:13:58.221318 17538 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 00:15:20.242434 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8678\nI0819 00:15:20.242727 17538 solver.cpp:404]     Test net output #1: loss = 0.599787 (* 1 = 0.599787 loss)\nI0819 00:15:21.567535 17538 solver.cpp:228] Iteration 51800, loss = 0.000303493\nI0819 00:15:21.567584 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:15:21.567601 17538 solver.cpp:244]     Train net output #1: loss = 0.000303312 (* 1 = 0.000303312 loss)\nI0819 00:15:21.654973 17538 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0819 00:17:38.779112 17538 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 00:19:00.880254 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86784\nI0819 00:19:00.880547 17538 solver.cpp:404]     Test net output #1: loss = 0.591159 (* 1 = 0.591159 loss)\nI0819 00:19:02.204208 17538 solver.cpp:228] Iteration 51900, loss = 0.000348794\nI0819 00:19:02.204254 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:19:02.204272 17538 solver.cpp:244]     Train net output #1: loss = 0.000348613 (* 1 = 0.000348613 loss)\nI0819 00:19:02.288913 17538 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0819 00:21:19.592294 17538 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 00:22:41.734143 17538 solver.cpp:404]     Test net output #0: accuracy = 0.867\nI0819 00:22:41.734455 17538 solver.cpp:404]     Test net output #1: loss = 0.59866 (* 1 = 0.59866 loss)\nI0819 00:22:43.058269 17538 solver.cpp:228] Iteration 52000, loss = 0.000518524\nI0819 00:22:43.058306 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:22:43.058321 17538 solver.cpp:244]     Train net output #1: loss = 0.000518343 (* 1 = 0.000518343 loss)\nI0819 00:22:43.147306 17538 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0819 00:25:00.363912 17538 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 00:26:22.536288 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86844\nI0819 00:26:22.536577 17538 solver.cpp:404]     Test net output #1: loss = 0.58838 (* 1 = 0.58838 loss)\nI0819 00:26:23.859688 17538 solver.cpp:228] Iteration 52100, loss = 0.000299814\nI0819 00:26:23.859736 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:26:23.859753 17538 solver.cpp:244]     Train net output #1: loss = 0.000299633 (* 1 = 0.000299633 loss)\nI0819 00:26:23.946386 17538 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0819 00:28:41.198845 17538 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 00:30:03.362315 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 00:30:03.362578 17538 solver.cpp:404]     Test net output #1: loss = 0.594172 (* 1 = 0.594172 loss)\nI0819 00:30:04.686687 17538 solver.cpp:228] Iteration 52200, loss = 0.000356363\nI0819 00:30:04.686739 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:30:04.686756 17538 solver.cpp:244]     Train net output #1: loss = 0.000356182 (* 1 = 0.000356182 loss)\nI0819 00:30:04.769289 17538 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0819 00:32:21.955771 17538 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 00:33:44.190107 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86712\nI0819 00:33:44.190373 17538 solver.cpp:404]     Test net output #1: loss = 0.587382 (* 1 = 0.587382 loss)\nI0819 00:33:45.514448 17538 solver.cpp:228] Iteration 52300, loss = 0.000231297\nI0819 00:33:45.514499 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:33:45.514516 17538 solver.cpp:244]     Train net output #1: loss = 0.000231116 (* 1 = 0.000231116 loss)\nI0819 00:33:45.601533 17538 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0819 00:36:02.895485 17538 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 00:37:24.935977 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86608\nI0819 00:37:24.936240 17538 solver.cpp:404]     Test net output #1: loss = 0.592676 (* 1 = 0.592676 loss)\nI0819 00:37:26.260865 17538 solver.cpp:228] Iteration 52400, loss = 0.000444921\nI0819 00:37:26.260906 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:37:26.260923 17538 solver.cpp:244]     Train net output #1: loss = 0.00044474 (* 1 = 0.00044474 loss)\nI0819 00:37:26.349128 17538 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0819 00:39:43.535866 17538 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 00:41:05.672607 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 00:41:05.672907 17538 solver.cpp:404]     Test net output #1: loss = 0.586218 (* 1 = 0.586218 loss)\nI0819 00:41:06.996470 17538 solver.cpp:228] Iteration 52500, loss = 0.000426861\nI0819 00:41:06.996511 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:41:06.996527 17538 solver.cpp:244]     Train net output #1: loss = 0.000426679 (* 1 = 0.000426679 loss)\nI0819 00:41:07.086086 17538 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0819 00:43:24.149049 17538 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 00:44:45.796062 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86536\nI0819 00:44:45.796403 17538 solver.cpp:404]     Test net output #1: loss = 0.590827 (* 1 = 0.590827 loss)\nI0819 00:44:47.123531 17538 solver.cpp:228] Iteration 52600, loss = 0.00050957\nI0819 00:44:47.123571 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:44:47.123587 17538 solver.cpp:244]     Train net output #1: loss = 0.000509389 (* 1 = 0.000509389 loss)\nI0819 00:44:47.203279 17538 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0819 00:47:04.362109 17538 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 00:48:26.002662 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86696\nI0819 00:48:26.002975 17538 solver.cpp:404]     Test net output #1: loss = 0.584028 (* 1 = 0.584028 loss)\nI0819 00:48:27.331207 17538 solver.cpp:228] Iteration 52700, loss = 0.000276792\nI0819 00:48:27.331248 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:48:27.331264 17538 solver.cpp:244]     Train net output #1: loss = 0.000276611 (* 1 = 0.000276611 loss)\nI0819 00:48:27.413432 17538 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0819 00:50:44.566246 17538 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 00:52:06.262292 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86528\nI0819 00:52:06.262607 17538 solver.cpp:404]     Test net output #1: loss = 0.590657 (* 1 = 0.590657 loss)\nI0819 00:52:07.590520 17538 solver.cpp:228] Iteration 52800, loss = 0.000384425\nI0819 00:52:07.590559 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:52:07.590574 17538 solver.cpp:244]     Train net output #1: loss = 0.000384244 (* 1 = 0.000384244 loss)\nI0819 00:52:07.675526 17538 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0819 00:54:24.922968 17538 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 00:55:47.070652 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86668\nI0819 00:55:47.070937 17538 solver.cpp:404]     Test net output #1: loss = 0.583991 (* 1 = 0.583991 loss)\nI0819 00:55:48.397517 17538 solver.cpp:228] Iteration 52900, loss = 0.000422178\nI0819 00:55:48.397557 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:55:48.397572 17538 solver.cpp:244]     Train net output #1: loss = 0.000421997 (* 1 = 0.000421997 loss)\nI0819 00:55:48.481462 17538 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0819 00:58:05.664957 17538 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 00:59:27.581269 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86556\nI0819 00:59:27.581563 17538 solver.cpp:404]     Test net output #1: loss = 0.58787 (* 1 = 0.58787 loss)\nI0819 00:59:28.908277 17538 solver.cpp:228] Iteration 53000, loss = 0.000351425\nI0819 00:59:28.908318 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:59:28.908339 17538 solver.cpp:244]     Train net output #1: loss = 0.000351244 (* 1 = 0.000351244 loss)\nI0819 00:59:28.996443 17538 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0819 01:01:46.205147 17538 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 01:03:08.083909 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86612\nI0819 01:03:08.084205 17538 solver.cpp:404]     Test net output #1: loss = 0.581876 (* 1 = 0.581876 loss)\nI0819 01:03:09.411125 17538 solver.cpp:228] Iteration 53100, loss = 0.000455848\nI0819 01:03:09.411165 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:03:09.411180 17538 solver.cpp:244]     Train net output #1: loss = 0.000455667 (* 1 = 0.000455667 loss)\nI0819 01:03:09.487152 17538 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0819 01:05:26.669208 17538 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 01:06:48.922822 17538 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0819 01:06:48.923110 17538 solver.cpp:404]     Test net output #1: loss = 0.584493 (* 1 = 0.584493 loss)\nI0819 01:06:50.250036 17538 solver.cpp:228] Iteration 53200, loss = 0.000476325\nI0819 01:06:50.250075 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:06:50.250092 17538 solver.cpp:244]     Train net output #1: loss = 0.000476143 (* 1 = 0.000476143 loss)\nI0819 01:06:50.335649 17538 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0819 01:09:07.630679 17538 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 01:10:29.538480 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8664\nI0819 01:10:29.538776 17538 solver.cpp:404]     Test net output #1: loss = 0.579202 (* 1 = 0.579202 loss)\nI0819 01:10:30.865880 17538 solver.cpp:228] Iteration 53300, loss = 0.000302419\nI0819 01:10:30.865919 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:10:30.865934 17538 solver.cpp:244]     Train net output #1: loss = 0.000302238 (* 1 = 0.000302238 loss)\nI0819 01:10:30.946347 17538 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0819 01:12:48.131299 17538 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 01:14:10.382628 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86596\nI0819 01:14:10.382894 17538 solver.cpp:404]     Test net output #1: loss = 0.580381 (* 1 = 0.580381 loss)\nI0819 01:14:11.709493 17538 solver.cpp:228] Iteration 53400, loss = 0.000339044\nI0819 01:14:11.709533 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:14:11.709549 17538 solver.cpp:244]     Train net output #1: loss = 0.000338863 (* 1 = 0.000338863 loss)\nI0819 01:14:11.788883 17538 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0819 01:16:28.996265 17538 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 01:17:50.920790 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8666\nI0819 01:17:50.921042 17538 solver.cpp:404]     Test net output #1: loss = 0.576243 (* 1 = 0.576243 loss)\nI0819 01:17:52.248083 17538 solver.cpp:228] Iteration 53500, loss = 0.000430007\nI0819 01:17:52.248124 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:17:52.248139 17538 solver.cpp:244]     Train net output #1: loss = 0.000429825 (* 1 = 0.000429825 loss)\nI0819 01:17:52.328270 17538 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0819 01:20:09.567528 17538 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 01:21:31.808722 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86604\nI0819 01:21:31.808989 17538 solver.cpp:404]     Test net output #1: loss = 0.579368 (* 1 = 0.579368 loss)\nI0819 01:21:33.135660 17538 solver.cpp:228] Iteration 53600, loss = 0.000482528\nI0819 01:21:33.135699 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:21:33.135715 17538 solver.cpp:244]     Train net output #1: loss = 0.000482346 (* 1 = 0.000482346 loss)\nI0819 01:21:33.218552 17538 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0819 01:23:50.426862 17538 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 01:25:12.683518 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86684\nI0819 01:25:12.683806 17538 solver.cpp:404]     Test net output #1: loss = 0.57537 (* 1 = 0.57537 loss)\nI0819 01:25:14.010931 17538 solver.cpp:228] Iteration 53700, loss = 0.000404582\nI0819 01:25:14.010968 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:25:14.010984 17538 solver.cpp:244]     Train net output #1: loss = 0.000404401 (* 1 = 0.000404401 loss)\nI0819 01:25:14.095090 17538 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0819 01:27:31.256065 17538 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 01:28:53.518414 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86624\nI0819 01:28:53.518676 17538 solver.cpp:404]     Test net output #1: loss = 0.577321 (* 1 = 0.577321 loss)\nI0819 01:28:54.846302 17538 solver.cpp:228] Iteration 53800, loss = 0.000387956\nI0819 01:28:54.846345 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:28:54.846362 17538 solver.cpp:244]     Train net output #1: loss = 0.000387775 (* 1 = 0.000387775 loss)\nI0819 01:28:54.925189 17538 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0819 01:31:12.162274 17538 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 01:32:34.407480 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8668\nI0819 01:32:34.407796 17538 solver.cpp:404]     Test net output #1: loss = 0.572946 (* 1 = 0.572946 loss)\nI0819 01:32:35.734812 17538 solver.cpp:228] Iteration 53900, loss = 0.000299089\nI0819 01:32:35.734853 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:32:35.734868 17538 solver.cpp:244]     Train net output #1: loss = 0.000298907 (* 1 = 0.000298907 loss)\nI0819 01:32:35.816124 17538 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0819 01:34:53.016911 17538 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 01:36:15.185369 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86596\nI0819 01:36:15.185622 17538 solver.cpp:404]     Test net output #1: loss = 0.57702 (* 1 = 0.57702 loss)\nI0819 01:36:16.512923 17538 solver.cpp:228] Iteration 54000, loss = 0.000274718\nI0819 01:36:16.512962 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:36:16.512979 17538 solver.cpp:244]     Train net output #1: loss = 0.000274537 (* 1 = 0.000274537 loss)\nI0819 01:36:16.591183 17538 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0819 01:38:33.806995 17538 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 01:39:55.719868 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86692\nI0819 01:39:55.720157 17538 solver.cpp:404]     Test net output #1: loss = 0.572254 (* 1 = 0.572254 loss)\nI0819 01:39:57.047148 17538 solver.cpp:228] Iteration 54100, loss = 0.000334261\nI0819 01:39:57.047188 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:39:57.047204 17538 solver.cpp:244]     Train net output #1: loss = 0.00033408 (* 1 = 0.00033408 loss)\nI0819 01:39:57.130184 17538 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0819 01:42:14.286110 17538 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 01:43:36.326742 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8666\nI0819 01:43:36.326988 17538 solver.cpp:404]     Test net output #1: loss = 0.575628 (* 1 = 0.575628 loss)\nI0819 01:43:37.654101 17538 solver.cpp:228] Iteration 54200, loss = 0.000321508\nI0819 01:43:37.654140 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:43:37.654156 17538 solver.cpp:244]     Train net output #1: loss = 0.000321327 (* 1 = 0.000321327 loss)\nI0819 01:43:37.735152 17538 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0819 01:45:55.248170 17538 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 01:47:17.391126 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86704\nI0819 01:47:17.391417 17538 solver.cpp:404]     Test net output #1: loss = 0.571737 (* 1 = 0.571737 loss)\nI0819 01:47:18.718657 17538 solver.cpp:228] Iteration 54300, loss = 0.000419099\nI0819 01:47:18.718698 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:47:18.718714 17538 solver.cpp:244]     Train net output #1: loss = 0.000418918 (* 1 = 0.000418918 loss)\nI0819 01:47:18.804585 17538 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0819 01:49:36.315502 17538 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 01:50:58.543669 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8666\nI0819 01:50:58.544016 17538 solver.cpp:404]     Test net output #1: loss = 0.572739 (* 1 = 0.572739 loss)\nI0819 01:50:59.871428 17538 solver.cpp:228] Iteration 54400, loss = 0.000494242\nI0819 01:50:59.871469 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:50:59.871485 17538 solver.cpp:244]     Train net output #1: loss = 0.000494061 (* 1 = 0.000494061 loss)\nI0819 01:50:59.953505 17538 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0819 01:53:17.562593 17538 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 01:54:39.754920 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86704\nI0819 01:54:39.755215 17538 solver.cpp:404]     Test net output #1: loss = 0.568256 (* 1 = 0.568256 loss)\nI0819 01:54:41.082270 17538 solver.cpp:228] Iteration 54500, loss = 0.000280901\nI0819 01:54:41.082311 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:54:41.082327 17538 solver.cpp:244]     Train net output #1: loss = 0.000280719 (* 1 = 0.000280719 loss)\nI0819 01:54:41.169212 17538 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0819 01:56:58.555212 17538 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 01:58:20.768246 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86684\nI0819 01:58:20.768539 17538 solver.cpp:404]     Test net output #1: loss = 0.571649 (* 1 = 0.571649 loss)\nI0819 01:58:22.097187 17538 solver.cpp:228] Iteration 54600, loss = 0.0004191\nI0819 01:58:22.097226 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:58:22.097244 17538 solver.cpp:244]     Train net output #1: loss = 0.000418918 (* 1 = 0.000418918 loss)\nI0819 01:58:22.182265 17538 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0819 02:00:39.739753 17538 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 02:02:01.960712 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8672\nI0819 02:02:01.960979 17538 solver.cpp:404]     Test net output #1: loss = 0.568065 (* 1 = 0.568065 loss)\nI0819 02:02:03.289494 17538 solver.cpp:228] Iteration 54700, loss = 0.000365286\nI0819 02:02:03.289530 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:02:03.289546 17538 solver.cpp:244]     Train net output #1: loss = 0.000365105 (* 1 = 0.000365105 loss)\nI0819 02:02:03.376201 17538 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0819 02:04:20.928413 17538 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 02:05:43.129581 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86676\nI0819 02:05:43.129843 17538 solver.cpp:404]     Test net output #1: loss = 0.570092 (* 1 = 0.570092 loss)\nI0819 02:05:44.458264 17538 solver.cpp:228] Iteration 54800, loss = 0.000429103\nI0819 02:05:44.458304 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:05:44.458320 17538 solver.cpp:244]     Train net output #1: loss = 0.000428921 (* 1 = 0.000428921 loss)\nI0819 02:05:44.546857 17538 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0819 02:08:02.170073 17538 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0819 02:09:24.369902 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86776\nI0819 02:09:24.370192 17538 solver.cpp:404]     Test net output #1: loss = 0.56338 (* 1 = 0.56338 loss)\nI0819 02:09:25.698981 17538 solver.cpp:228] Iteration 54900, loss = 0.000267851\nI0819 02:09:25.699019 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:09:25.699035 17538 solver.cpp:244]     Train net output #1: loss = 0.00026767 (* 1 = 0.00026767 loss)\nI0819 02:09:25.776630 17538 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0819 02:11:43.329655 17538 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0819 02:13:05.536046 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8666\nI0819 02:13:05.536368 17538 solver.cpp:404]     Test net output #1: loss = 0.566759 (* 1 = 0.566759 loss)\nI0819 02:13:06.864476 17538 solver.cpp:228] Iteration 55000, loss = 0.000342555\nI0819 02:13:06.864516 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:13:06.864531 17538 solver.cpp:244]     Train net output #1: loss = 0.000342373 (* 1 = 0.000342373 loss)\nI0819 02:13:06.949661 17538 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0819 02:15:24.423789 17538 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0819 02:16:46.620751 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 02:16:46.621042 17538 solver.cpp:404]     Test net output #1: loss = 0.563034 (* 1 = 0.563034 loss)\nI0819 02:16:47.949273 17538 solver.cpp:228] Iteration 55100, loss = 0.00027137\nI0819 02:16:47.949313 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:16:47.949328 17538 solver.cpp:244]     Train net output #1: loss = 0.000271189 (* 1 = 0.000271189 loss)\nI0819 02:16:48.041419 17538 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0819 02:19:05.638161 17538 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0819 02:20:27.842272 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8664\nI0819 02:20:27.842552 17538 solver.cpp:404]     Test net output #1: loss = 0.56678 (* 1 = 0.56678 loss)\nI0819 02:20:29.171438 17538 solver.cpp:228] Iteration 55200, loss = 0.00042773\nI0819 02:20:29.171488 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:20:29.171511 17538 solver.cpp:244]     Train net output #1: loss = 0.000427549 (* 1 = 0.000427549 loss)\nI0819 02:20:29.250511 17538 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0819 02:22:46.775899 17538 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0819 02:24:08.794365 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86792\nI0819 02:24:08.794638 17538 solver.cpp:404]     Test net output #1: loss = 0.561932 (* 1 = 0.561932 loss)\nI0819 02:24:10.123258 17538 solver.cpp:228] Iteration 55300, loss = 0.000431179\nI0819 02:24:10.123302 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:24:10.123325 17538 solver.cpp:244]     Train net output #1: loss = 0.000430997 (* 1 = 0.000430997 loss)\nI0819 02:24:10.210634 17538 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0819 02:26:27.770103 17538 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0819 02:27:49.827405 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86756\nI0819 02:27:49.827700 17538 solver.cpp:404]     Test net output #1: loss = 0.565678 (* 1 = 0.565678 loss)\nI0819 02:27:51.161432 17538 solver.cpp:228] Iteration 55400, loss = 0.000417807\nI0819 02:27:51.161471 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:27:51.161487 17538 solver.cpp:244]     Train net output #1: loss = 0.000417625 (* 1 = 0.000417625 loss)\nI0819 02:27:51.236210 17538 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0819 02:30:08.784749 17538 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0819 02:31:30.607333 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86756\nI0819 02:31:30.607592 17538 solver.cpp:404]     Test net output #1: loss = 0.562196 (* 1 = 0.562196 loss)\nI0819 02:31:31.934831 17538 solver.cpp:228] Iteration 55500, loss = 0.000392666\nI0819 02:31:31.934871 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:31:31.934887 17538 solver.cpp:244]     Train net output #1: loss = 0.000392485 (* 1 = 0.000392485 loss)\nI0819 02:31:32.014520 17538 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0819 02:33:49.474766 17538 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0819 02:35:11.741216 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86696\nI0819 02:35:11.741489 17538 solver.cpp:404]     Test net output #1: loss = 0.563729 (* 1 = 0.563729 loss)\nI0819 02:35:13.069890 17538 solver.cpp:228] Iteration 55600, loss = 0.000335214\nI0819 02:35:13.069928 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:35:13.069944 17538 solver.cpp:244]     Train net output #1: loss = 0.000335032 (* 1 = 0.000335032 loss)\nI0819 02:35:13.153789 17538 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0819 02:37:30.563181 17538 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0819 02:38:52.818522 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86752\nI0819 02:38:52.818785 17538 solver.cpp:404]     Test net output #1: loss = 0.560987 (* 1 = 0.560987 loss)\nI0819 02:38:54.145292 17538 solver.cpp:228] Iteration 55700, loss = 0.00030282\nI0819 02:38:54.145334 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:38:54.145349 17538 solver.cpp:244]     Train net output #1: loss = 0.000302639 (* 1 = 0.000302639 loss)\nI0819 02:38:54.232484 17538 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0819 02:41:11.701329 17538 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0819 02:42:33.937934 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86688\nI0819 02:42:33.938266 17538 solver.cpp:404]     Test net output #1: loss = 0.562596 (* 1 = 0.562596 loss)\nI0819 02:42:35.265218 17538 solver.cpp:228] Iteration 55800, loss = 0.000417231\nI0819 02:42:35.265259 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:42:35.265280 17538 solver.cpp:244]     Train net output #1: loss = 0.000417049 (* 1 = 0.000417049 loss)\nI0819 02:42:35.350384 17538 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0819 02:44:52.770982 17538 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0819 02:46:14.999564 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86804\nI0819 02:46:14.999861 17538 solver.cpp:404]     Test net output #1: loss = 0.55753 (* 1 = 0.55753 loss)\nI0819 02:46:16.326697 17538 solver.cpp:228] Iteration 55900, loss = 0.000345405\nI0819 02:46:16.326737 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:46:16.326753 17538 solver.cpp:244]     Train net output #1: loss = 0.000345224 (* 1 = 0.000345224 loss)\nI0819 02:46:16.413214 17538 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0819 02:48:33.900871 17538 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0819 02:49:56.081205 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86696\nI0819 02:49:56.081513 17538 solver.cpp:404]     Test net output #1: loss = 0.558439 (* 1 = 0.558439 loss)\nI0819 02:49:57.409108 17538 solver.cpp:228] Iteration 56000, loss = 0.000304417\nI0819 02:49:57.409150 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:49:57.409166 17538 solver.cpp:244]     Train net output #1: loss = 0.000304236 (* 1 = 0.000304236 loss)\nI0819 02:49:57.494212 17538 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0819 02:52:14.925163 17538 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0819 02:53:37.000594 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86836\nI0819 02:53:37.000847 17538 solver.cpp:404]     Test net output #1: loss = 0.557989 (* 1 = 0.557989 loss)\nI0819 02:53:38.328178 17538 solver.cpp:228] Iteration 56100, loss = 0.000348696\nI0819 02:53:38.328220 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:53:38.328235 17538 solver.cpp:244]     Train net output #1: loss = 0.000348514 (* 1 = 0.000348514 loss)\nI0819 02:53:38.415925 17538 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0819 02:55:55.938638 17538 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0819 02:57:17.858381 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 02:57:17.858672 17538 solver.cpp:404]     Test net output #1: loss = 0.559747 (* 1 = 0.559747 loss)\nI0819 02:57:19.185201 17538 solver.cpp:228] Iteration 56200, loss = 0.000395887\nI0819 02:57:19.185241 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:57:19.185257 17538 solver.cpp:244]     Train net output #1: loss = 0.000395705 (* 1 = 0.000395705 loss)\nI0819 02:57:19.267369 17538 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0819 02:59:36.696393 17538 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0819 03:00:58.592120 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8684\nI0819 03:00:58.592376 17538 solver.cpp:404]     Test net output #1: loss = 0.554909 (* 1 = 0.554909 loss)\nI0819 03:00:59.919556 17538 solver.cpp:228] Iteration 56300, loss = 0.000442816\nI0819 03:00:59.919596 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:00:59.919613 17538 solver.cpp:244]     Train net output #1: loss = 0.000442635 (* 1 = 0.000442635 loss)\nI0819 03:01:00.001386 17538 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0819 03:03:17.445225 17538 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0819 03:04:39.317816 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 03:04:39.318078 17538 solver.cpp:404]     Test net output #1: loss = 0.559715 (* 1 = 0.559715 loss)\nI0819 03:04:40.645957 17538 solver.cpp:228] Iteration 56400, loss = 0.000411491\nI0819 03:04:40.645998 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:04:40.646013 17538 solver.cpp:244]     Train net output #1: loss = 0.000411309 (* 1 = 0.000411309 loss)\nI0819 03:04:40.729132 17538 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0819 03:06:58.197152 17538 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0819 03:08:19.926405 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86908\nI0819 03:08:19.926666 17538 solver.cpp:404]     Test net output #1: loss = 0.553413 (* 1 = 0.553413 loss)\nI0819 03:08:21.254575 17538 solver.cpp:228] Iteration 56500, loss = 0.000380232\nI0819 03:08:21.254616 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:08:21.254631 17538 solver.cpp:244]     Train net output #1: loss = 0.000380051 (* 1 = 0.000380051 loss)\nI0819 03:08:21.335909 17538 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0819 03:10:38.807209 17538 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0819 03:12:00.787225 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86776\nI0819 03:12:00.787485 17538 solver.cpp:404]     Test net output #1: loss = 0.558511 (* 1 = 0.558511 loss)\nI0819 03:12:02.114114 17538 solver.cpp:228] Iteration 56600, loss = 0.000369161\nI0819 03:12:02.114151 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:12:02.114167 17538 solver.cpp:244]     Train net output #1: loss = 0.000368979 (* 1 = 0.000368979 loss)\nI0819 03:12:02.201021 17538 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0819 03:14:19.619660 17538 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0819 03:15:41.483652 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86872\nI0819 03:15:41.483922 17538 solver.cpp:404]     Test net output #1: loss = 0.553673 (* 1 = 0.553673 loss)\nI0819 03:15:42.810153 17538 solver.cpp:228] Iteration 56700, loss = 0.000351535\nI0819 03:15:42.810194 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:15:42.810209 17538 solver.cpp:244]     Train net output #1: loss = 0.000351354 (* 1 = 0.000351354 loss)\nI0819 03:15:42.899315 17538 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0819 03:18:00.367132 17538 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0819 03:19:22.152889 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86756\nI0819 03:19:22.153141 17538 solver.cpp:404]     Test net output #1: loss = 0.557343 (* 1 = 0.557343 loss)\nI0819 03:19:23.479543 17538 solver.cpp:228] Iteration 56800, loss = 0.000346067\nI0819 03:19:23.479583 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:19:23.479598 17538 solver.cpp:244]     Train net output #1: loss = 0.000345885 (* 1 = 0.000345885 loss)\nI0819 03:19:23.567368 17538 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0819 03:21:40.935561 17538 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0819 03:23:02.730474 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86816\nI0819 03:23:02.730808 17538 solver.cpp:404]     Test net output #1: loss = 0.553863 (* 1 = 0.553863 loss)\nI0819 03:23:04.058912 17538 solver.cpp:228] Iteration 56900, loss = 0.000302766\nI0819 03:23:04.058951 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:23:04.058969 17538 solver.cpp:244]     Train net output #1: loss = 0.000302585 (* 1 = 0.000302585 loss)\nI0819 03:23:04.142076 17538 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0819 03:25:21.589467 17538 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0819 03:26:43.479027 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86728\nI0819 03:26:43.479354 17538 solver.cpp:404]     Test net output #1: loss = 0.556198 (* 1 = 0.556198 loss)\nI0819 03:26:44.807186 17538 solver.cpp:228] Iteration 57000, loss = 0.00036001\nI0819 03:26:44.807226 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:26:44.807242 17538 solver.cpp:244]     Train net output #1: loss = 0.000359829 (* 1 = 0.000359829 loss)\nI0819 03:26:44.890496 17538 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0819 03:29:02.411778 17538 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0819 03:30:24.400557 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86832\nI0819 03:30:24.400873 17538 solver.cpp:404]     Test net output #1: loss = 0.555316 (* 1 = 0.555316 loss)\nI0819 03:30:25.728890 17538 solver.cpp:228] Iteration 57100, loss = 0.00038597\nI0819 03:30:25.728931 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:30:25.728947 17538 solver.cpp:244]     Train net output #1: loss = 0.000385788 (* 1 = 0.000385788 loss)\nI0819 03:30:25.809648 17538 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0819 03:32:43.233660 17538 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0819 03:34:05.494629 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86752\nI0819 03:34:05.494968 17538 solver.cpp:404]     Test net output #1: loss = 0.555994 (* 1 = 0.555994 loss)\nI0819 03:34:06.822626 17538 solver.cpp:228] Iteration 57200, loss = 0.000352802\nI0819 03:34:06.822667 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:34:06.822684 17538 solver.cpp:244]     Train net output #1: loss = 0.000352621 (* 1 = 0.000352621 loss)\nI0819 03:34:06.902212 17538 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0819 03:36:24.292959 17538 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0819 03:37:46.156536 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86808\nI0819 03:37:46.156785 17538 solver.cpp:404]     Test net output #1: loss = 0.553247 (* 1 = 0.553247 loss)\nI0819 03:37:47.484643 17538 solver.cpp:228] Iteration 57300, loss = 0.000298657\nI0819 03:37:47.484686 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:37:47.484702 17538 solver.cpp:244]     Train net output #1: loss = 0.000298475 (* 1 = 0.000298475 loss)\nI0819 03:37:47.564142 17538 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0819 03:40:05.012295 17538 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0819 03:41:26.808001 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86764\nI0819 03:41:26.808264 17538 solver.cpp:404]     Test net output #1: loss = 0.554579 (* 1 = 0.554579 loss)\nI0819 03:41:28.135576 17538 solver.cpp:228] Iteration 57400, loss = 0.000326353\nI0819 03:41:28.135617 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:41:28.135633 17538 solver.cpp:244]     Train net output #1: loss = 0.000326171 (* 1 = 0.000326171 loss)\nI0819 03:41:28.221989 17538 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0819 03:43:45.660158 17538 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0819 03:45:07.520704 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8686\nI0819 03:45:07.520967 17538 solver.cpp:404]     Test net output #1: loss = 0.551514 (* 1 = 0.551514 loss)\nI0819 03:45:08.847076 17538 solver.cpp:228] Iteration 57500, loss = 0.000340804\nI0819 03:45:08.847116 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:45:08.847133 17538 solver.cpp:244]     Train net output #1: loss = 0.000340622 (* 1 = 0.000340622 loss)\nI0819 03:45:08.927289 17538 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0819 03:47:26.370857 17538 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0819 03:48:48.179798 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86772\nI0819 03:48:48.180088 17538 solver.cpp:404]     Test net output #1: loss = 0.552183 (* 1 = 0.552183 loss)\nI0819 03:48:49.508074 17538 solver.cpp:228] Iteration 57600, loss = 0.000316082\nI0819 03:48:49.508116 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:48:49.508132 17538 solver.cpp:244]     Train net output #1: loss = 0.0003159 (* 1 = 0.0003159 loss)\nI0819 03:48:49.592736 17538 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0819 03:51:07.130072 17538 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0819 03:52:29.073714 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86816\nI0819 03:52:29.073978 17538 solver.cpp:404]     Test net output #1: loss = 0.551267 (* 1 = 0.551267 loss)\nI0819 03:52:30.400740 17538 solver.cpp:228] Iteration 57700, loss = 0.000370074\nI0819 03:52:30.400782 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:52:30.400799 17538 solver.cpp:244]     Train net output #1: loss = 0.000369892 (* 1 = 0.000369892 loss)\nI0819 03:52:30.488209 17538 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0819 03:54:47.902513 17538 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0819 03:56:10.061223 17538 solver.cpp:404]     Test net output #0: accuracy = 0.867921\nI0819 03:56:10.061530 17538 solver.cpp:404]     Test net output #1: loss = 0.551529 (* 1 = 0.551529 loss)\nI0819 03:56:11.388350 17538 solver.cpp:228] Iteration 57800, loss = 0.000468228\nI0819 03:56:11.388386 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:56:11.388402 17538 solver.cpp:244]     Train net output #1: loss = 0.000468046 (* 1 = 0.000468046 loss)\nI0819 03:56:11.472468 17538 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0819 03:58:28.948014 17538 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0819 03:59:51.124644 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86824\nI0819 03:59:51.124933 17538 solver.cpp:404]     Test net output #1: loss = 0.549352 (* 1 = 0.549352 loss)\nI0819 03:59:52.452031 17538 solver.cpp:228] Iteration 57900, loss = 0.000271173\nI0819 03:59:52.452070 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:59:52.452086 17538 solver.cpp:244]     Train net output #1: loss = 0.000270992 (* 1 = 0.000270992 loss)\nI0819 03:59:52.535342 17538 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0819 04:02:09.970748 17538 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0819 04:03:31.991093 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86764\nI0819 04:03:31.991402 17538 solver.cpp:404]     Test net output #1: loss = 0.552004 (* 1 = 0.552004 loss)\nI0819 04:03:33.317889 17538 solver.cpp:228] Iteration 58000, loss = 0.000314042\nI0819 04:03:33.317927 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:03:33.317944 17538 solver.cpp:244]     Train net output #1: loss = 0.00031386 (* 1 = 0.00031386 loss)\nI0819 04:03:33.404839 17538 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0819 04:05:50.913110 17538 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0819 04:07:13.135560 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86796\nI0819 04:07:13.135851 17538 solver.cpp:404]     Test net output #1: loss = 0.548815 (* 1 = 0.548815 loss)\nI0819 04:07:14.462234 17538 solver.cpp:228] Iteration 58100, loss = 0.000249957\nI0819 04:07:14.462272 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:07:14.462288 17538 solver.cpp:244]     Train net output #1: loss = 0.000249775 (* 1 = 0.000249775 loss)\nI0819 04:07:14.544095 17538 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0819 04:09:32.002032 17538 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0819 04:10:54.252234 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86704\nI0819 04:10:54.252573 17538 solver.cpp:404]     Test net output #1: loss = 0.55136 (* 1 = 0.55136 loss)\nI0819 04:10:55.579907 17538 solver.cpp:228] Iteration 58200, loss = 0.000356438\nI0819 04:10:55.579946 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:10:55.579962 17538 solver.cpp:244]     Train net output #1: loss = 0.000356257 (* 1 = 0.000356257 loss)\nI0819 04:10:55.660157 17538 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0819 04:13:13.045795 17538 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0819 04:14:35.299104 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86824\nI0819 04:14:35.299378 17538 solver.cpp:404]     Test net output #1: loss = 0.547847 (* 1 = 0.547847 loss)\nI0819 04:14:36.626649 17538 solver.cpp:228] Iteration 58300, loss = 0.000313602\nI0819 04:14:36.626688 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:14:36.626703 17538 solver.cpp:244]     Train net output #1: loss = 0.00031342 (* 1 = 0.00031342 loss)\nI0819 04:14:36.708853 17538 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0819 04:16:54.079725 17538 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0819 04:18:16.299348 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 04:18:16.299628 17538 solver.cpp:404]     Test net output #1: loss = 0.55225 (* 1 = 0.55225 loss)\nI0819 04:18:17.626152 17538 solver.cpp:228] Iteration 58400, loss = 0.00040011\nI0819 04:18:17.626190 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:18:17.626206 17538 solver.cpp:244]     Train net output #1: loss = 0.000399929 (* 1 = 0.000399929 loss)\nI0819 04:18:17.715253 17538 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0819 04:20:35.211122 17538 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0819 04:21:57.305979 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86748\nI0819 04:21:57.306290 17538 solver.cpp:404]     Test net output #1: loss = 0.549863 (* 1 = 0.549863 loss)\nI0819 04:21:58.633020 17538 solver.cpp:228] Iteration 58500, loss = 0.00042901\nI0819 04:21:58.633059 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:21:58.633074 17538 solver.cpp:244]     Train net output #1: loss = 0.000428829 (* 1 = 0.000428829 loss)\nI0819 04:21:58.715811 17538 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0819 04:24:16.259783 17538 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0819 04:25:38.219789 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86704\nI0819 04:25:38.220047 17538 solver.cpp:404]     Test net output #1: loss = 0.548511 (* 1 = 0.548511 loss)\nI0819 04:25:39.546421 17538 solver.cpp:228] Iteration 58600, loss = 0.000429672\nI0819 04:25:39.546459 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:25:39.546475 17538 solver.cpp:244]     Train net output #1: loss = 0.000429491 (* 1 = 0.000429491 loss)\nI0819 04:25:39.628180 17538 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0819 04:27:57.070248 17538 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0819 04:29:18.998705 17538 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 04:29:18.998977 17538 solver.cpp:404]     Test net output #1: loss = 0.546264 (* 1 = 0.546264 loss)\nI0819 04:29:20.325754 17538 solver.cpp:228] Iteration 58700, loss = 0.00036217\nI0819 04:29:20.325791 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:29:20.325808 17538 solver.cpp:244]     Train net output #1: loss = 0.000361988 (* 1 = 0.000361988 loss)\nI0819 04:29:20.409467 17538 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0819 04:31:38.047621 17538 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0819 04:33:00.273052 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86668\nI0819 04:33:00.273398 17538 solver.cpp:404]     Test net output #1: loss = 0.549755 (* 1 = 0.549755 loss)\nI0819 04:33:01.599700 17538 solver.cpp:228] Iteration 58800, loss = 0.000343495\nI0819 04:33:01.599738 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:33:01.599753 17538 solver.cpp:244]     Train net output #1: loss = 0.000343313 (* 1 = 0.000343313 loss)\nI0819 04:33:01.686728 17538 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0819 04:35:19.316841 17538 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0819 04:36:41.555253 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86708\nI0819 04:36:41.555562 17538 solver.cpp:404]     Test net output #1: loss = 0.547808 (* 1 = 0.547808 loss)\nI0819 04:36:42.882889 17538 solver.cpp:228] Iteration 58900, loss = 0.000379643\nI0819 04:36:42.882925 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:36:42.882942 17538 solver.cpp:244]     Train net output #1: loss = 0.000379462 (* 1 = 0.000379462 loss)\nI0819 04:36:42.970476 17538 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0819 04:39:00.576190 17538 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0819 04:40:22.768363 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 04:40:22.768669 17538 solver.cpp:404]     Test net output #1: loss = 0.548308 (* 1 = 0.548308 loss)\nI0819 04:40:24.095435 17538 solver.cpp:228] Iteration 59000, loss = 0.000390499\nI0819 04:40:24.095474 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:40:24.095489 17538 solver.cpp:244]     Train net output #1: loss = 0.000390317 (* 1 = 0.000390317 loss)\nI0819 04:40:24.178874 17538 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0819 04:42:41.833232 17538 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0819 04:44:04.048183 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86728\nI0819 04:44:04.048456 17538 solver.cpp:404]     Test net output #1: loss = 0.547674 (* 1 = 0.547674 loss)\nI0819 04:44:05.374645 17538 solver.cpp:228] Iteration 59100, loss = 0.000342503\nI0819 04:44:05.374682 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:44:05.374698 17538 solver.cpp:244]     Train net output #1: loss = 0.000342321 (* 1 = 0.000342321 loss)\nI0819 04:44:05.455317 17538 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0819 04:46:22.998489 17538 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0819 04:47:45.201628 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86624\nI0819 04:47:45.201910 17538 solver.cpp:404]     Test net output #1: loss = 0.550756 (* 1 = 0.550756 loss)\nI0819 04:47:46.528321 17538 solver.cpp:228] Iteration 59200, loss = 0.000350086\nI0819 04:47:46.528358 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:47:46.528374 17538 solver.cpp:244]     Train net output #1: loss = 0.000349905 (* 1 = 0.000349905 loss)\nI0819 04:47:46.615278 17538 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0819 04:50:04.205004 17538 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0819 04:51:26.417498 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86736\nI0819 04:51:26.417806 17538 solver.cpp:404]     Test net output #1: loss = 0.548077 (* 1 = 0.548077 loss)\nI0819 04:51:27.744657 17538 solver.cpp:228] Iteration 59300, loss = 0.000381855\nI0819 04:51:27.744695 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:51:27.744711 17538 solver.cpp:244]     Train net output #1: loss = 0.000381674 (* 1 = 0.000381674 loss)\nI0819 04:51:27.832197 17538 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0819 04:53:45.365041 17538 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0819 04:55:07.625881 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86692\nI0819 04:55:07.626148 17538 solver.cpp:404]     Test net output #1: loss = 0.549558 (* 1 = 0.549558 loss)\nI0819 04:55:08.953029 17538 solver.cpp:228] Iteration 59400, loss = 0.000328788\nI0819 04:55:08.953068 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:55:08.953084 17538 solver.cpp:244]     Train net output #1: loss = 0.000328606 (* 1 = 0.000328606 loss)\nI0819 04:55:09.033318 17538 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0819 04:57:26.614609 17538 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0819 04:58:48.855286 17538 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 04:58:48.855588 17538 solver.cpp:404]     Test net output #1: loss = 0.545881 (* 1 = 0.545881 loss)\nI0819 04:58:50.182286 17538 solver.cpp:228] Iteration 59500, loss = 0.000348995\nI0819 04:58:50.182323 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:58:50.182343 17538 solver.cpp:244]     Train net output #1: loss = 0.000348813 (* 1 = 0.000348813 loss)\nI0819 04:58:50.265959 17538 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0819 05:01:07.805222 17538 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0819 05:02:29.994310 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86704\nI0819 05:02:29.994660 17538 solver.cpp:404]     Test net output #1: loss = 0.548753 (* 1 = 0.548753 loss)\nI0819 05:02:31.322460 17538 solver.cpp:228] Iteration 59600, loss = 0.000439897\nI0819 05:02:31.322501 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:02:31.322522 17538 solver.cpp:244]     Train net output #1: loss = 0.000439716 (* 1 = 0.000439716 loss)\nI0819 05:02:31.403472 17538 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0819 05:04:48.905300 17538 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0819 05:06:11.151729 17538 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 05:06:11.152043 17538 solver.cpp:404]     Test net output #1: loss = 0.545839 (* 1 = 0.545839 loss)\nI0819 05:06:12.481480 17538 solver.cpp:228] Iteration 59700, loss = 0.000372589\nI0819 05:06:12.481521 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:06:12.481537 17538 solver.cpp:244]     Train net output #1: loss = 0.000372407 (* 1 = 0.000372407 loss)\nI0819 05:06:12.563818 17538 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0819 05:08:30.080973 17538 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0819 05:09:51.927543 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86664\nI0819 05:09:51.927856 17538 solver.cpp:404]     Test net output #1: loss = 0.549404 (* 1 = 0.549404 loss)\nI0819 05:09:53.255070 17538 solver.cpp:228] Iteration 59800, loss = 0.000367265\nI0819 05:09:53.255107 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:09:53.255123 17538 solver.cpp:244]     Train net output #1: loss = 0.000367084 (* 1 = 0.000367084 loss)\nI0819 05:09:53.340076 17538 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0819 05:12:10.996556 17538 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0819 05:13:32.800256 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86784\nI0819 05:13:32.800622 17538 solver.cpp:404]     Test net output #1: loss = 0.547233 (* 1 = 0.547233 loss)\nI0819 05:13:34.131624 17538 solver.cpp:228] Iteration 59900, loss = 0.000363882\nI0819 05:13:34.131671 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:13:34.131695 17538 solver.cpp:244]     Train net output #1: loss = 0.0003637 (* 1 = 0.0003637 loss)\nI0819 05:13:34.215044 17538 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0819 05:15:51.678469 17538 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0819 05:17:13.483258 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86708\nI0819 05:17:13.483527 17538 solver.cpp:404]     Test net output #1: loss = 0.548616 (* 1 = 0.548616 loss)\nI0819 05:17:14.812201 17538 solver.cpp:228] Iteration 60000, loss = 0.000298921\nI0819 05:17:14.812242 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:17:14.812264 17538 solver.cpp:244]     Train net output #1: loss = 0.00029874 (* 1 = 0.00029874 loss)\nI0819 05:17:14.893245 17538 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0819 05:19:32.466511 17538 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0819 05:20:54.520247 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86832\nI0819 05:20:54.520575 17538 solver.cpp:404]     Test net output #1: loss = 0.546263 (* 1 = 0.546263 loss)\nI0819 05:20:55.850134 17538 solver.cpp:228] Iteration 60100, loss = 0.000340567\nI0819 05:20:55.850178 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:20:55.850195 17538 solver.cpp:244]     Train net output #1: loss = 0.000340385 (* 1 = 0.000340385 loss)\nI0819 05:20:55.932812 17538 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0819 05:23:13.416543 17538 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0819 05:24:35.583847 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0819 05:24:35.584136 17538 solver.cpp:404]     Test net output #1: loss = 0.547272 (* 1 = 0.547272 loss)\nI0819 05:24:36.910720 17538 solver.cpp:228] Iteration 60200, loss = 0.000334642\nI0819 05:24:36.910759 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:24:36.910776 17538 solver.cpp:244]     Train net output #1: loss = 0.000334461 (* 1 = 0.000334461 loss)\nI0819 05:24:37.006294 17538 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0819 05:26:54.534127 17538 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0819 05:28:16.695680 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86788\nI0819 05:28:16.695946 17538 solver.cpp:404]     Test net output #1: loss = 0.546814 (* 1 = 0.546814 loss)\nI0819 05:28:18.023138 17538 solver.cpp:228] Iteration 60300, loss = 0.000311059\nI0819 05:28:18.023180 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:28:18.023195 17538 solver.cpp:244]     Train net output #1: loss = 0.000310877 (* 1 = 0.000310877 loss)\nI0819 05:28:18.110528 17538 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0819 05:30:35.401787 17538 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0819 05:31:57.634977 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86672\nI0819 05:31:57.635242 17538 solver.cpp:404]     Test net output #1: loss = 0.550396 (* 1 = 0.550396 loss)\nI0819 05:31:58.962383 17538 solver.cpp:228] Iteration 60400, loss = 0.000433285\nI0819 05:31:58.962427 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:31:58.962450 17538 solver.cpp:244]     Train net output #1: loss = 0.000433104 (* 1 = 0.000433104 loss)\nI0819 05:31:59.043938 17538 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0819 05:34:16.290779 17538 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0819 05:35:38.547332 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8684\nI0819 05:35:38.547629 17538 solver.cpp:404]     Test net output #1: loss = 0.546336 (* 1 = 0.546336 loss)\nI0819 05:35:39.874796 17538 solver.cpp:228] Iteration 60500, loss = 0.000355394\nI0819 05:35:39.874840 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:35:39.874864 17538 solver.cpp:244]     Train net output #1: loss = 0.000355213 (* 1 = 0.000355213 loss)\nI0819 05:35:39.952787 17538 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0819 05:37:57.177047 17538 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0819 05:39:19.147176 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8674\nI0819 05:39:19.147438 17538 solver.cpp:404]     Test net output #1: loss = 0.549185 (* 1 = 0.549185 loss)\nI0819 05:39:20.475270 17538 solver.cpp:228] Iteration 60600, loss = 0.000389684\nI0819 05:39:20.475313 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:39:20.475335 17538 solver.cpp:244]     Train net output #1: loss = 0.000389502 (* 1 = 0.000389502 loss)\nI0819 05:39:20.561982 17538 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0819 05:41:37.744984 17538 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0819 05:42:59.884377 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86836\nI0819 05:42:59.884711 17538 solver.cpp:404]     Test net output #1: loss = 0.545498 (* 1 = 0.545498 loss)\nI0819 05:43:01.211975 17538 solver.cpp:228] Iteration 60700, loss = 0.000417379\nI0819 05:43:01.212015 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:43:01.212038 17538 solver.cpp:244]     Train net output #1: loss = 0.000417198 (* 1 = 0.000417198 loss)\nI0819 05:43:01.288475 17538 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0819 05:45:18.420289 17538 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0819 05:46:40.632632 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8674\nI0819 05:46:40.632925 17538 solver.cpp:404]     Test net output #1: loss = 0.549299 (* 1 = 0.549299 loss)\nI0819 05:46:41.959635 17538 solver.cpp:228] Iteration 60800, loss = 0.000388235\nI0819 05:46:41.959678 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:46:41.959702 17538 solver.cpp:244]     Train net output #1: loss = 0.000388053 (* 1 = 0.000388053 loss)\nI0819 05:46:42.034788 17538 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0819 05:48:59.296129 17538 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0819 05:50:21.484035 17538 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 05:50:21.484330 17538 solver.cpp:404]     Test net output #1: loss = 0.547496 (* 1 = 0.547496 loss)\nI0819 05:50:22.811065 17538 solver.cpp:228] Iteration 60900, loss = 0.000301674\nI0819 05:50:22.811106 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:50:22.811128 17538 solver.cpp:244]     Train net output #1: loss = 0.000301493 (* 1 = 0.000301493 loss)\nI0819 05:50:22.891044 17538 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0819 05:52:40.196444 17538 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0819 05:54:02.393496 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86768\nI0819 05:54:02.393807 17538 solver.cpp:404]     Test net output #1: loss = 0.547407 (* 1 = 0.547407 loss)\nI0819 05:54:03.720685 17538 solver.cpp:228] Iteration 61000, loss = 0.000394629\nI0819 05:54:03.720724 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:54:03.720746 17538 solver.cpp:244]     Train net output #1: loss = 0.000394448 (* 1 = 0.000394448 loss)\nI0819 05:54:03.801254 17538 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0819 05:56:20.967902 17538 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0819 05:57:43.189157 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86828\nI0819 05:57:43.189452 17538 solver.cpp:404]     Test net output #1: loss = 0.546624 (* 1 = 0.546624 loss)\nI0819 05:57:44.516990 17538 solver.cpp:228] Iteration 61100, loss = 0.000371497\nI0819 05:57:44.517030 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:57:44.517052 17538 solver.cpp:244]     Train net output #1: loss = 0.000371316 (* 1 = 0.000371316 loss)\nI0819 05:57:44.596879 17538 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0819 06:00:01.772656 17538 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0819 06:01:23.956025 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86748\nI0819 06:01:23.956370 17538 solver.cpp:404]     Test net output #1: loss = 0.547718 (* 1 = 0.547718 loss)\nI0819 06:01:25.283007 17538 solver.cpp:228] Iteration 61200, loss = 0.000328476\nI0819 06:01:25.283049 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:01:25.283072 17538 solver.cpp:244]     Train net output #1: loss = 0.000328295 (* 1 = 0.000328295 loss)\nI0819 06:01:25.361263 17538 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0819 06:03:42.526298 17538 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0819 06:05:04.755152 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86908\nI0819 06:05:04.755462 17538 solver.cpp:404]     Test net output #1: loss = 0.546855 (* 1 = 0.546855 loss)\nI0819 06:05:06.082365 17538 solver.cpp:228] Iteration 61300, loss = 0.000311456\nI0819 06:05:06.082407 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:05:06.082429 17538 solver.cpp:244]     Train net output #1: loss = 0.000311275 (* 1 = 0.000311275 loss)\nI0819 06:05:06.161715 17538 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0819 06:07:23.344295 17538 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0819 06:08:45.567540 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8678\nI0819 06:08:45.567843 17538 solver.cpp:404]     Test net output #1: loss = 0.548943 (* 1 = 0.548943 loss)\nI0819 06:08:46.895023 17538 solver.cpp:228] Iteration 61400, loss = 0.000310015\nI0819 06:08:46.895066 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:08:46.895090 17538 solver.cpp:244]     Train net output #1: loss = 0.000309834 (* 1 = 0.000309834 loss)\nI0819 06:08:46.976361 17538 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0819 06:11:04.113057 17538 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0819 06:12:26.305028 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86856\nI0819 06:12:26.305358 17538 solver.cpp:404]     Test net output #1: loss = 0.547457 (* 1 = 0.547457 loss)\nI0819 06:12:27.633424 17538 solver.cpp:228] Iteration 61500, loss = 0.000296671\nI0819 06:12:27.633466 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:12:27.633488 17538 solver.cpp:244]     Train net output #1: loss = 0.000296489 (* 1 = 0.000296489 loss)\nI0819 06:12:27.716707 17538 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0819 06:14:44.890125 17538 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0819 06:16:07.102099 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8674\nI0819 06:16:07.102406 17538 solver.cpp:404]     Test net output #1: loss = 0.550921 (* 1 = 0.550921 loss)\nI0819 06:16:08.430166 17538 solver.cpp:228] Iteration 61600, loss = 0.000264489\nI0819 06:16:08.430208 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:16:08.430232 17538 solver.cpp:244]     Train net output #1: loss = 0.000264307 (* 1 = 0.000264307 loss)\nI0819 06:16:08.508481 17538 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0819 06:18:25.659826 17538 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0819 06:19:47.896960 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86856\nI0819 06:19:47.897243 17538 solver.cpp:404]     Test net output #1: loss = 0.549358 (* 1 = 0.549358 loss)\nI0819 06:19:49.224763 17538 solver.cpp:228] Iteration 61700, loss = 0.000342267\nI0819 06:19:49.224805 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:19:49.224828 17538 solver.cpp:244]     Train net output #1: loss = 0.000342086 (* 1 = 0.000342086 loss)\nI0819 06:19:49.300107 17538 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0819 06:22:06.444789 17538 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0819 06:23:28.581140 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8676\nI0819 06:23:28.581451 17538 solver.cpp:404]     Test net output #1: loss = 0.549567 (* 1 = 0.549567 loss)\nI0819 06:23:29.907826 17538 solver.cpp:228] Iteration 61800, loss = 0.00032896\nI0819 06:23:29.907869 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:23:29.907892 17538 solver.cpp:244]     Train net output #1: loss = 0.000328778 (* 1 = 0.000328778 loss)\nI0819 06:23:29.989943 17538 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0819 06:25:47.214623 17538 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0819 06:27:09.074568 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86872\nI0819 06:27:09.074877 17538 solver.cpp:404]     Test net output #1: loss = 0.548055 (* 1 = 0.548055 loss)\nI0819 06:27:10.402354 17538 solver.cpp:228] Iteration 61900, loss = 0.000367459\nI0819 06:27:10.402396 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:27:10.402420 17538 solver.cpp:244]     Train net output #1: loss = 0.000367277 (* 1 = 0.000367277 loss)\nI0819 06:27:10.487422 17538 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0819 06:29:27.788285 17538 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0819 06:30:49.958690 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 06:30:49.959017 17538 solver.cpp:404]     Test net output #1: loss = 0.552042 (* 1 = 0.552042 loss)\nI0819 06:30:51.285305 17538 solver.cpp:228] Iteration 62000, loss = 0.000354897\nI0819 06:30:51.285346 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:30:51.285368 17538 solver.cpp:244]     Train net output #1: loss = 0.000354716 (* 1 = 0.000354716 loss)\nI0819 06:30:51.363730 17538 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0819 06:33:08.703109 17538 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0819 06:34:30.660089 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8682\nI0819 06:34:30.660393 17538 solver.cpp:404]     Test net output #1: loss = 0.548468 (* 1 = 0.548468 loss)\nI0819 06:34:31.984592 17538 solver.cpp:228] Iteration 62100, loss = 0.000393627\nI0819 06:34:31.984637 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:34:31.984660 17538 solver.cpp:244]     Train net output #1: loss = 0.000393446 (* 1 = 0.000393446 loss)\nI0819 06:34:32.073825 17538 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0819 06:36:49.345265 17538 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0819 06:38:11.166738 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8674\nI0819 06:38:11.167037 17538 solver.cpp:404]     Test net output #1: loss = 0.551984 (* 1 = 0.551984 loss)\nI0819 06:38:12.491804 17538 solver.cpp:228] Iteration 62200, loss = 0.000340197\nI0819 06:38:12.491844 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:38:12.491868 17538 solver.cpp:244]     Train net output #1: loss = 0.000340015 (* 1 = 0.000340015 loss)\nI0819 06:38:12.571576 17538 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0819 06:40:29.761768 17538 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0819 06:41:51.765712 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86864\nI0819 06:41:51.766021 17538 solver.cpp:404]     Test net output #1: loss = 0.549874 (* 1 = 0.549874 loss)\nI0819 06:41:53.091073 17538 solver.cpp:228] Iteration 62300, loss = 0.000330012\nI0819 06:41:53.091115 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:41:53.091138 17538 solver.cpp:244]     Train net output #1: loss = 0.000329831 (* 1 = 0.000329831 loss)\nI0819 06:41:53.174780 17538 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0819 06:44:10.420635 17538 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0819 06:45:32.312341 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86752\nI0819 06:45:32.312602 17538 solver.cpp:404]     Test net output #1: loss = 0.551685 (* 1 = 0.551685 loss)\nI0819 06:45:33.636485 17538 solver.cpp:228] Iteration 62400, loss = 0.000334228\nI0819 06:45:33.636525 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:45:33.636540 17538 solver.cpp:244]     Train net output #1: loss = 0.000334046 (* 1 = 0.000334046 loss)\nI0819 06:45:33.718108 17538 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0819 06:47:50.854462 17538 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0819 06:49:12.927099 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86792\nI0819 06:49:12.927384 17538 solver.cpp:404]     Test net output #1: loss = 0.55029 (* 1 = 0.55029 loss)\nI0819 06:49:14.252388 17538 solver.cpp:228] Iteration 62500, loss = 0.000371535\nI0819 06:49:14.252424 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:49:14.252440 17538 solver.cpp:244]     Train net output #1: loss = 0.000371353 (* 1 = 0.000371353 loss)\nI0819 06:49:14.331331 17538 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0819 06:51:31.603160 17538 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0819 06:52:53.600572 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86736\nI0819 06:52:53.600921 17538 solver.cpp:404]     Test net output #1: loss = 0.550993 (* 1 = 0.550993 loss)\nI0819 06:52:54.924989 17538 solver.cpp:228] Iteration 62600, loss = 0.000319098\nI0819 06:52:54.925026 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:52:54.925042 17538 solver.cpp:244]     Train net output #1: loss = 0.000318917 (* 1 = 0.000318917 loss)\nI0819 06:52:55.008986 17538 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0819 06:55:12.287576 17538 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0819 06:56:34.193264 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86796\nI0819 06:56:34.193534 17538 solver.cpp:404]     Test net output #1: loss = 0.551222 (* 1 = 0.551222 loss)\nI0819 06:56:35.519304 17538 solver.cpp:228] Iteration 62700, loss = 0.000308098\nI0819 06:56:35.519343 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:56:35.519361 17538 solver.cpp:244]     Train net output #1: loss = 0.000307917 (* 1 = 0.000307917 loss)\nI0819 06:56:35.599337 17538 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0819 06:58:52.790537 17538 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0819 07:00:14.680696 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 07:00:14.680964 17538 solver.cpp:404]     Test net output #1: loss = 0.551278 (* 1 = 0.551278 loss)\nI0819 07:00:16.005910 17538 solver.cpp:228] Iteration 62800, loss = 0.000270195\nI0819 07:00:16.005952 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:00:16.005969 17538 solver.cpp:244]     Train net output #1: loss = 0.000270013 (* 1 = 0.000270013 loss)\nI0819 07:00:16.094501 17538 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0819 07:02:33.428820 17538 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0819 07:03:55.251241 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86844\nI0819 07:03:55.251526 17538 solver.cpp:404]     Test net output #1: loss = 0.548756 (* 1 = 0.548756 loss)\nI0819 07:03:56.577397 17538 solver.cpp:228] Iteration 62900, loss = 0.000313706\nI0819 07:03:56.577438 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:03:56.577455 17538 solver.cpp:244]     Train net output #1: loss = 0.000313524 (* 1 = 0.000313524 loss)\nI0819 07:03:56.656675 17538 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0819 07:06:13.877617 17538 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0819 07:07:35.965517 17538 solver.cpp:404]     Test net output #0: accuracy = 0.867\nI0819 07:07:35.965817 17538 solver.cpp:404]     Test net output #1: loss = 0.551515 (* 1 = 0.551515 loss)\nI0819 07:07:37.290122 17538 solver.cpp:228] Iteration 63000, loss = 0.000406231\nI0819 07:07:37.290163 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:07:37.290180 17538 solver.cpp:244]     Train net output #1: loss = 0.00040605 (* 1 = 0.00040605 loss)\nI0819 07:07:37.370422 17538 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0819 07:09:54.612648 17538 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0819 07:11:16.841843 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86852\nI0819 07:11:16.842190 17538 solver.cpp:404]     Test net output #1: loss = 0.550156 (* 1 = 0.550156 loss)\nI0819 07:11:18.167393 17538 solver.cpp:228] Iteration 63100, loss = 0.000323835\nI0819 07:11:18.167435 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:11:18.167453 17538 solver.cpp:244]     Train net output #1: loss = 0.000323653 (* 1 = 0.000323653 loss)\nI0819 07:11:18.249238 17538 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0819 07:13:35.489065 17538 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0819 07:14:57.751056 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86804\nI0819 07:14:57.751349 17538 solver.cpp:404]     Test net output #1: loss = 0.551098 (* 1 = 0.551098 loss)\nI0819 07:14:59.075235 17538 solver.cpp:228] Iteration 63200, loss = 0.000317868\nI0819 07:14:59.075278 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:14:59.075294 17538 solver.cpp:244]     Train net output #1: loss = 0.000317687 (* 1 = 0.000317687 loss)\nI0819 07:14:59.162307 17538 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0819 07:17:16.493141 17538 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0819 07:18:38.715926 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86832\nI0819 07:18:38.716208 17538 solver.cpp:404]     Test net output #1: loss = 0.552344 (* 1 = 0.552344 loss)\nI0819 07:18:40.041319 17538 solver.cpp:228] Iteration 63300, loss = 0.000326947\nI0819 07:18:40.041360 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:18:40.041376 17538 solver.cpp:244]     Train net output #1: loss = 0.000326765 (* 1 = 0.000326765 loss)\nI0819 07:18:40.130250 17538 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0819 07:20:57.411028 17538 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0819 07:22:19.566779 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8672\nI0819 07:22:19.567107 17538 solver.cpp:404]     Test net output #1: loss = 0.554647 (* 1 = 0.554647 loss)\nI0819 07:22:20.891789 17538 solver.cpp:228] Iteration 63400, loss = 0.000325197\nI0819 07:22:20.891830 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:22:20.891846 17538 solver.cpp:244]     Train net output #1: loss = 0.000325015 (* 1 = 0.000325015 loss)\nI0819 07:22:20.977957 17538 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0819 07:24:38.079190 17538 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0819 07:26:00.297616 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86852\nI0819 07:26:00.297888 17538 solver.cpp:404]     Test net output #1: loss = 0.551829 (* 1 = 0.551829 loss)\nI0819 07:26:01.622612 17538 solver.cpp:228] Iteration 63500, loss = 0.000365955\nI0819 07:26:01.622655 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:26:01.622673 17538 solver.cpp:244]     Train net output #1: loss = 0.000365774 (* 1 = 0.000365774 loss)\nI0819 07:26:01.714684 17538 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0819 07:28:18.846469 17538 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0819 07:29:41.090863 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86764\nI0819 07:29:41.091135 17538 solver.cpp:404]     Test net output #1: loss = 0.554678 (* 1 = 0.554678 loss)\nI0819 07:29:42.417089 17538 solver.cpp:228] Iteration 63600, loss = 0.0003218\nI0819 07:29:42.417130 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:29:42.417146 17538 solver.cpp:244]     Train net output #1: loss = 0.000321619 (* 1 = 0.000321619 loss)\nI0819 07:29:42.499646 17538 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0819 07:31:59.632481 17538 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0819 07:33:21.838356 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86816\nI0819 07:33:21.838712 17538 solver.cpp:404]     Test net output #1: loss = 0.552538 (* 1 = 0.552538 loss)\nI0819 07:33:23.162880 17538 solver.cpp:228] Iteration 63700, loss = 0.000397303\nI0819 07:33:23.162921 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:33:23.162937 17538 solver.cpp:244]     Train net output #1: loss = 0.000397121 (* 1 = 0.000397121 loss)\nI0819 07:33:23.252066 17538 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0819 07:35:40.537127 17538 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0819 07:37:02.696734 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86676\nI0819 07:37:02.697008 17538 solver.cpp:404]     Test net output #1: loss = 0.555778 (* 1 = 0.555778 loss)\nI0819 07:37:04.021455 17538 solver.cpp:228] Iteration 63800, loss = 0.000366448\nI0819 07:37:04.021494 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:37:04.021510 17538 solver.cpp:244]     Train net output #1: loss = 0.000366267 (* 1 = 0.000366267 loss)\nI0819 07:37:04.103586 17538 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0819 07:39:21.385149 17538 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0819 07:40:43.469813 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86748\nI0819 07:40:43.470154 17538 solver.cpp:404]     Test net output #1: loss = 0.553171 (* 1 = 0.553171 loss)\nI0819 07:40:44.795286 17538 solver.cpp:228] Iteration 63900, loss = 0.000301459\nI0819 07:40:44.795327 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:40:44.795343 17538 solver.cpp:244]     Train net output #1: loss = 0.000301277 (* 1 = 0.000301277 loss)\nI0819 07:40:44.877552 17538 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0819 07:43:02.116950 17538 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0819 07:44:24.187237 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86692\nI0819 07:44:24.187496 17538 solver.cpp:404]     Test net output #1: loss = 0.555103 (* 1 = 0.555103 loss)\nI0819 07:44:25.511808 17538 solver.cpp:228] Iteration 64000, loss = 0.000333993\nI0819 07:44:25.511849 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:44:25.511867 17538 solver.cpp:244]     Train net output #1: loss = 0.000333812 (* 1 = 0.000333812 loss)\nI0819 07:44:25.599756 17538 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0819 07:46:42.858666 17538 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0819 07:48:04.733371 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86804\nI0819 07:48:04.733661 17538 solver.cpp:404]     Test net output #1: loss = 0.553332 (* 1 = 0.553332 loss)\nI0819 07:48:06.058605 17538 solver.cpp:228] Iteration 64100, loss = 0.000326193\nI0819 07:48:06.058646 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:48:06.058663 17538 solver.cpp:244]     Train net output #1: loss = 0.000326012 (* 1 = 0.000326012 loss)\nI0819 07:48:06.144094 17538 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0819 07:50:23.266216 17538 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0819 07:51:45.135763 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0819 07:51:45.136057 17538 solver.cpp:404]     Test net output #1: loss = 0.557879 (* 1 = 0.557879 loss)\nI0819 07:51:46.461196 17538 solver.cpp:228] Iteration 64200, loss = 0.000305385\nI0819 07:51:46.461238 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:51:46.461254 17538 solver.cpp:244]     Train net output #1: loss = 0.000305204 (* 1 = 0.000305204 loss)\nI0819 07:51:46.556752 17538 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0819 07:54:03.866328 17538 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0819 07:55:25.895803 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86804\nI0819 07:55:25.896076 17538 solver.cpp:404]     Test net output #1: loss = 0.555796 (* 1 = 0.555796 loss)\nI0819 07:55:27.221134 17538 solver.cpp:228] Iteration 64300, loss = 0.000299422\nI0819 07:55:27.221174 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:55:27.221191 17538 solver.cpp:244]     Train net output #1: loss = 0.000299241 (* 1 = 0.000299241 loss)\nI0819 07:55:27.308221 17538 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0819 07:57:44.559178 17538 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0819 07:59:06.638840 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8668\nI0819 07:59:06.639112 17538 solver.cpp:404]     Test net output #1: loss = 0.556497 (* 1 = 0.556497 loss)\nI0819 07:59:07.963003 17538 solver.cpp:228] Iteration 64400, loss = 0.000335554\nI0819 07:59:07.963044 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:59:07.963062 17538 solver.cpp:244]     Train net output #1: loss = 0.000335372 (* 1 = 0.000335372 loss)\nI0819 07:59:08.053056 17538 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0819 08:01:25.316311 17538 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0819 08:02:47.566313 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86748\nI0819 08:02:47.566632 17538 solver.cpp:404]     Test net output #1: loss = 0.555656 (* 1 = 0.555656 loss)\nI0819 08:02:48.891894 17538 solver.cpp:228] Iteration 64500, loss = 0.000315814\nI0819 08:02:48.891934 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:02:48.891950 17538 solver.cpp:244]     Train net output #1: loss = 0.000315632 (* 1 = 0.000315632 loss)\nI0819 08:02:48.980219 17538 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0819 08:05:06.211902 17538 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0819 08:06:28.420603 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86676\nI0819 08:06:28.420876 17538 solver.cpp:404]     Test net output #1: loss = 0.557779 (* 1 = 0.557779 loss)\nI0819 08:06:29.745533 17538 solver.cpp:228] Iteration 64600, loss = 0.000337954\nI0819 08:06:29.745575 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:06:29.745591 17538 solver.cpp:244]     Train net output #1: loss = 0.000337773 (* 1 = 0.000337773 loss)\nI0819 08:06:29.826113 17538 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0819 08:08:47.096550 17538 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0819 08:10:09.322739 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86716\nI0819 08:10:09.323019 17538 solver.cpp:404]     Test net output #1: loss = 0.558663 (* 1 = 0.558663 loss)\nI0819 08:10:10.648605 17538 solver.cpp:228] Iteration 64700, loss = 0.000325316\nI0819 08:10:10.648646 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:10:10.648663 17538 solver.cpp:244]     Train net output #1: loss = 0.000325135 (* 1 = 0.000325135 loss)\nI0819 08:10:10.729873 17538 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0819 08:12:27.998533 17538 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0819 08:13:50.235020 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86648\nI0819 08:13:50.235359 17538 solver.cpp:404]     Test net output #1: loss = 0.558694 (* 1 = 0.558694 loss)\nI0819 08:13:51.560781 17538 solver.cpp:228] Iteration 64800, loss = 0.000339796\nI0819 08:13:51.560822 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:13:51.560838 17538 solver.cpp:244]     Train net output #1: loss = 0.000339614 (* 1 = 0.000339614 loss)\nI0819 08:13:51.645035 17538 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0819 08:16:08.991147 17538 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0819 08:17:31.210094 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 08:17:31.210397 17538 solver.cpp:404]     Test net output #1: loss = 0.557858 (* 1 = 0.557858 loss)\nI0819 08:17:32.534540 17538 solver.cpp:228] Iteration 64900, loss = 0.000359752\nI0819 08:17:32.534581 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:17:32.534597 17538 solver.cpp:244]     Train net output #1: loss = 0.00035957 (* 1 = 0.00035957 loss)\nI0819 08:17:32.611711 17538 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0819 08:19:49.791394 17538 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0819 08:21:12.020365 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86636\nI0819 08:21:12.020709 17538 solver.cpp:404]     Test net output #1: loss = 0.558598 (* 1 = 0.558598 loss)\nI0819 08:21:13.344794 17538 solver.cpp:228] Iteration 65000, loss = 0.000334198\nI0819 08:21:13.344831 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:21:13.344846 17538 solver.cpp:244]     Train net output #1: loss = 0.000334016 (* 1 = 0.000334016 loss)\nI0819 08:21:13.434551 17538 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0819 08:23:30.645337 17538 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0819 08:24:52.871232 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86744\nI0819 08:24:52.871544 17538 solver.cpp:404]     Test net output #1: loss = 0.55705 (* 1 = 0.55705 loss)\nI0819 08:24:54.195088 17538 solver.cpp:228] Iteration 65100, loss = 0.000338009\nI0819 08:24:54.195123 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:24:54.195139 17538 solver.cpp:244]     Train net output #1: loss = 0.000337828 (* 1 = 0.000337828 loss)\nI0819 08:24:54.272539 17538 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0819 08:27:11.079910 17538 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0819 08:28:33.300418 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86656\nI0819 08:28:33.300700 17538 solver.cpp:404]     Test net output #1: loss = 0.561065 (* 1 = 0.561065 loss)\nI0819 08:28:34.624133 17538 solver.cpp:228] Iteration 65200, loss = 0.000323059\nI0819 08:28:34.624169 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:28:34.624186 17538 solver.cpp:244]     Train net output #1: loss = 0.000322877 (* 1 = 0.000322877 loss)\nI0819 08:28:34.702586 17538 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0819 08:30:51.504539 17538 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0819 08:32:13.729707 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86692\nI0819 08:32:13.730048 17538 solver.cpp:404]     Test net output #1: loss = 0.559715 (* 1 = 0.559715 loss)\nI0819 08:32:15.054275 17538 solver.cpp:228] Iteration 65300, loss = 0.000313258\nI0819 08:32:15.054312 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:32:15.054328 17538 solver.cpp:244]     Train net output #1: loss = 0.000313077 (* 1 = 0.000313077 loss)\nI0819 08:32:15.133548 17538 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0819 08:34:31.972983 17538 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0819 08:35:54.198179 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86588\nI0819 08:35:54.198484 17538 solver.cpp:404]     Test net output #1: loss = 0.562091 (* 1 = 0.562091 loss)\nI0819 08:35:55.522413 17538 solver.cpp:228] Iteration 65400, loss = 0.000340171\nI0819 08:35:55.522450 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:35:55.522466 17538 solver.cpp:244]     Train net output #1: loss = 0.000339989 (* 1 = 0.000339989 loss)\nI0819 08:35:55.605006 17538 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0819 08:38:12.406666 17538 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0819 08:39:34.628520 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8666\nI0819 08:39:34.628808 17538 solver.cpp:404]     Test net output #1: loss = 0.560827 (* 1 = 0.560827 loss)\nI0819 08:39:35.952411 17538 solver.cpp:228] Iteration 65500, loss = 0.000338254\nI0819 08:39:35.952447 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:39:35.952462 17538 solver.cpp:244]     Train net output #1: loss = 0.000338072 (* 1 = 0.000338072 loss)\nI0819 08:39:36.045838 17538 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0819 08:41:52.890473 17538 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0819 08:43:14.701341 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86608\nI0819 08:43:14.701650 17538 solver.cpp:404]     Test net output #1: loss = 0.563686 (* 1 = 0.563686 loss)\nI0819 08:43:16.028059 17538 solver.cpp:228] Iteration 65600, loss = 0.000291702\nI0819 08:43:16.028100 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:43:16.028116 17538 solver.cpp:244]     Train net output #1: loss = 0.00029152 (* 1 = 0.00029152 loss)\nI0819 08:43:16.111245 17538 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0819 08:45:33.130620 17538 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0819 08:46:54.761276 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8666\nI0819 08:46:54.761572 17538 solver.cpp:404]     Test net output #1: loss = 0.56178 (* 1 = 0.56178 loss)\nI0819 08:46:56.088642 17538 solver.cpp:228] Iteration 65700, loss = 0.000344029\nI0819 08:46:56.088680 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:46:56.088696 17538 solver.cpp:244]     Train net output #1: loss = 0.000343847 (* 1 = 0.000343847 loss)\nI0819 08:46:56.166818 17538 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0819 08:49:13.400979 17538 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0819 08:50:35.217191 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86584\nI0819 08:50:35.217501 17538 solver.cpp:404]     Test net output #1: loss = 0.563741 (* 1 = 0.563741 loss)\nI0819 08:50:36.544395 17538 solver.cpp:228] Iteration 65800, loss = 0.000321289\nI0819 08:50:36.544435 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:50:36.544450 17538 solver.cpp:244]     Train net output #1: loss = 0.000321108 (* 1 = 0.000321108 loss)\nI0819 08:50:36.625759 17538 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0819 08:52:54.112434 17538 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0819 08:54:16.327080 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86704\nI0819 08:54:16.327389 17538 solver.cpp:404]     Test net output #1: loss = 0.561603 (* 1 = 0.561603 loss)\nI0819 08:54:17.655163 17538 solver.cpp:228] Iteration 65900, loss = 0.000313773\nI0819 08:54:17.655202 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:54:17.655218 17538 solver.cpp:244]     Train net output #1: loss = 0.000313591 (* 1 = 0.000313591 loss)\nI0819 08:54:17.742341 17538 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0819 08:56:35.258870 17538 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0819 08:57:57.172792 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86532\nI0819 08:57:57.173149 17538 solver.cpp:404]     Test net output #1: loss = 0.564431 (* 1 = 0.564431 loss)\nI0819 08:57:58.500555 17538 solver.cpp:228] Iteration 66000, loss = 0.00032632\nI0819 08:57:58.500594 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 08:57:58.500609 17538 solver.cpp:244]     Train net output #1: loss = 0.000326138 (* 1 = 0.000326138 loss)\nI0819 08:57:58.579298 17538 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0819 09:00:16.136193 17538 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0819 09:01:38.270515 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86644\nI0819 09:01:38.270887 17538 solver.cpp:404]     Test net output #1: loss = 0.565195 (* 1 = 0.565195 loss)\nI0819 09:01:39.597961 17538 solver.cpp:228] Iteration 66100, loss = 0.000351473\nI0819 09:01:39.597998 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:01:39.598014 17538 solver.cpp:244]     Train net output #1: loss = 0.000351292 (* 1 = 0.000351292 loss)\nI0819 09:01:39.678625 17538 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0819 09:03:57.230818 17538 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0819 09:05:19.503765 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86536\nI0819 09:05:19.504137 17538 solver.cpp:404]     Test net output #1: loss = 0.566325 (* 1 = 0.566325 loss)\nI0819 09:05:20.830705 17538 solver.cpp:228] Iteration 66200, loss = 0.000323412\nI0819 09:05:20.830740 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:05:20.830755 17538 solver.cpp:244]     Train net output #1: loss = 0.00032323 (* 1 = 0.00032323 loss)\nI0819 09:05:20.915616 17538 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0819 09:07:38.529494 17538 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0819 09:09:00.809952 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86684\nI0819 09:09:00.810323 17538 solver.cpp:404]     Test net output #1: loss = 0.564818 (* 1 = 0.564818 loss)\nI0819 09:09:02.137497 17538 solver.cpp:228] Iteration 66300, loss = 0.00031492\nI0819 09:09:02.137534 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:09:02.137550 17538 solver.cpp:244]     Train net output #1: loss = 0.000314739 (* 1 = 0.000314739 loss)\nI0819 09:09:02.218278 17538 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0819 09:11:19.767403 17538 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0819 09:12:42.022948 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86516\nI0819 09:12:42.023325 17538 solver.cpp:404]     Test net output #1: loss = 0.566944 (* 1 = 0.566944 loss)\nI0819 09:12:43.350035 17538 solver.cpp:228] Iteration 66400, loss = 0.000315055\nI0819 09:12:43.350071 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:12:43.350088 17538 solver.cpp:244]     Train net output #1: loss = 0.000314873 (* 1 = 0.000314873 loss)\nI0819 09:12:43.434617 17538 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0819 09:15:01.057631 17538 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0819 09:16:23.328688 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86624\nI0819 09:16:23.329041 17538 solver.cpp:404]     Test net output #1: loss = 0.567351 (* 1 = 0.567351 loss)\nI0819 09:16:24.655741 17538 solver.cpp:228] Iteration 66500, loss = 0.000248719\nI0819 09:16:24.655779 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:16:24.655796 17538 solver.cpp:244]     Train net output #1: loss = 0.000248538 (* 1 = 0.000248538 loss)\nI0819 09:16:24.737802 17538 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0819 09:18:42.238940 17538 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0819 09:20:04.501961 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0819 09:20:04.502311 17538 solver.cpp:404]     Test net output #1: loss = 0.569006 (* 1 = 0.569006 loss)\nI0819 09:20:05.828760 17538 solver.cpp:228] Iteration 66600, loss = 0.000376078\nI0819 09:20:05.828800 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:20:05.828816 17538 solver.cpp:244]     Train net output #1: loss = 0.000375896 (* 1 = 0.000375896 loss)\nI0819 09:20:05.912714 17538 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0819 09:22:23.288632 17538 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0819 09:23:45.539713 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8654\nI0819 09:23:45.540083 17538 solver.cpp:404]     Test net output #1: loss = 0.568788 (* 1 = 0.568788 loss)\nI0819 09:23:46.866314 17538 solver.cpp:228] Iteration 66700, loss = 0.000318956\nI0819 09:23:46.866358 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:23:46.866374 17538 solver.cpp:244]     Train net output #1: loss = 0.000318775 (* 1 = 0.000318775 loss)\nI0819 09:23:46.952988 17538 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0819 09:26:04.344235 17538 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0819 09:27:26.620807 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86464\nI0819 09:27:26.621177 17538 solver.cpp:404]     Test net output #1: loss = 0.57002 (* 1 = 0.57002 loss)\nI0819 09:27:27.948335 17538 solver.cpp:228] Iteration 66800, loss = 0.000317661\nI0819 09:27:27.948374 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:27:27.948390 17538 solver.cpp:244]     Train net output #1: loss = 0.000317479 (* 1 = 0.000317479 loss)\nI0819 09:27:28.037050 17538 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0819 09:29:45.508365 17538 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0819 09:31:07.756640 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86532\nI0819 09:31:07.756985 17538 solver.cpp:404]     Test net output #1: loss = 0.569704 (* 1 = 0.569704 loss)\nI0819 09:31:09.083763 17538 solver.cpp:228] Iteration 66900, loss = 0.000306934\nI0819 09:31:09.083799 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:31:09.083817 17538 solver.cpp:244]     Train net output #1: loss = 0.000306753 (* 1 = 0.000306753 loss)\nI0819 09:31:09.167366 17538 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0819 09:33:26.714242 17538 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0819 09:34:48.950875 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86416\nI0819 09:34:48.951236 17538 solver.cpp:404]     Test net output #1: loss = 0.570219 (* 1 = 0.570219 loss)\nI0819 09:34:50.277902 17538 solver.cpp:228] Iteration 67000, loss = 0.000282102\nI0819 09:34:50.277941 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:34:50.277957 17538 solver.cpp:244]     Train net output #1: loss = 0.00028192 (* 1 = 0.00028192 loss)\nI0819 09:34:50.363617 17538 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0819 09:37:07.758723 17538 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0819 09:38:29.893141 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86524\nI0819 09:38:29.893484 17538 solver.cpp:404]     Test net output #1: loss = 0.567408 (* 1 = 0.567408 loss)\nI0819 09:38:31.221525 17538 solver.cpp:228] Iteration 67100, loss = 0.00033467\nI0819 09:38:31.221567 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:38:31.221583 17538 solver.cpp:244]     Train net output #1: loss = 0.000334488 (* 1 = 0.000334488 loss)\nI0819 09:38:31.306076 17538 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0819 09:40:48.756916 17538 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0819 09:42:10.625107 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86448\nI0819 09:42:10.625414 17538 solver.cpp:404]     Test net output #1: loss = 0.570877 (* 1 = 0.570877 loss)\nI0819 09:42:11.952790 17538 solver.cpp:228] Iteration 67200, loss = 0.00035358\nI0819 09:42:11.952831 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:42:11.952847 17538 solver.cpp:244]     Train net output #1: loss = 0.000353399 (* 1 = 0.000353399 loss)\nI0819 09:42:12.040261 17538 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0819 09:44:29.482177 17538 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0819 09:45:51.666465 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86468\nI0819 09:45:51.666816 17538 solver.cpp:404]     Test net output #1: loss = 0.571273 (* 1 = 0.571273 loss)\nI0819 09:45:52.993412 17538 solver.cpp:228] Iteration 67300, loss = 0.000298462\nI0819 09:45:52.993451 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:45:52.993468 17538 solver.cpp:244]     Train net output #1: loss = 0.000298281 (* 1 = 0.000298281 loss)\nI0819 09:45:53.075521 17538 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0819 09:48:10.524847 17538 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0819 09:49:32.687589 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86412\nI0819 09:49:32.687922 17538 solver.cpp:404]     Test net output #1: loss = 0.574977 (* 1 = 0.574977 loss)\nI0819 09:49:34.014468 17538 solver.cpp:228] Iteration 67400, loss = 0.000341713\nI0819 09:49:34.014508 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:49:34.014523 17538 solver.cpp:244]     Train net output #1: loss = 0.000341531 (* 1 = 0.000341531 loss)\nI0819 09:49:34.098445 17538 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0819 09:51:51.603230 17538 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0819 09:53:13.408316 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86512\nI0819 09:53:13.408679 17538 solver.cpp:404]     Test net output #1: loss = 0.573651 (* 1 = 0.573651 loss)\nI0819 09:53:14.734577 17538 solver.cpp:228] Iteration 67500, loss = 0.000312242\nI0819 09:53:14.734617 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:53:14.734633 17538 solver.cpp:244]     Train net output #1: loss = 0.00031206 (* 1 = 0.00031206 loss)\nI0819 09:53:14.820863 17538 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0819 09:55:32.333515 17538 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0819 09:56:54.068207 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86444\nI0819 09:56:54.068753 17538 solver.cpp:404]     Test net output #1: loss = 0.573875 (* 1 = 0.573875 loss)\nI0819 09:56:55.395491 17538 solver.cpp:228] Iteration 67600, loss = 0.000314026\nI0819 09:56:55.395530 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 09:56:55.395546 17538 solver.cpp:244]     Train net output #1: loss = 0.000313844 (* 1 = 0.000313844 loss)\nI0819 09:56:55.484087 17538 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0819 09:59:12.907528 17538 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0819 10:00:34.862057 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86508\nI0819 10:00:34.862409 17538 solver.cpp:404]     Test net output #1: loss = 0.57224 (* 1 = 0.57224 loss)\nI0819 10:00:36.188935 17538 solver.cpp:228] Iteration 67700, loss = 0.00031083\nI0819 10:00:36.188971 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:00:36.188988 17538 solver.cpp:244]     Train net output #1: loss = 0.000310648 (* 1 = 0.000310648 loss)\nI0819 10:00:36.270932 17538 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0819 10:02:53.474098 17538 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0819 10:04:15.741482 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86408\nI0819 10:04:15.741863 17538 solver.cpp:404]     Test net output #1: loss = 0.573736 (* 1 = 0.573736 loss)\nI0819 10:04:17.068083 17538 solver.cpp:228] Iteration 67800, loss = 0.000323598\nI0819 10:04:17.068119 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:04:17.068133 17538 solver.cpp:244]     Train net output #1: loss = 0.000323417 (* 1 = 0.000323417 loss)\nI0819 10:04:17.146309 17538 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0819 10:06:34.393311 17538 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0819 10:07:56.659657 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0819 10:07:56.660029 17538 solver.cpp:404]     Test net output #1: loss = 0.574969 (* 1 = 0.574969 loss)\nI0819 10:07:57.986986 17538 solver.cpp:228] Iteration 67900, loss = 0.000313397\nI0819 10:07:57.987021 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:07:57.987036 17538 solver.cpp:244]     Train net output #1: loss = 0.000313215 (* 1 = 0.000313215 loss)\nI0819 10:07:58.067797 17538 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0819 10:10:15.305513 17538 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0819 10:11:37.556924 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86364\nI0819 10:11:37.557312 17538 solver.cpp:404]     Test net output #1: loss = 0.575853 (* 1 = 0.575853 loss)\nI0819 10:11:38.883932 17538 solver.cpp:228] Iteration 68000, loss = 0.000276688\nI0819 10:11:38.883971 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:11:38.883987 17538 solver.cpp:244]     Train net output #1: loss = 0.000276506 (* 1 = 0.000276506 loss)\nI0819 10:11:38.964042 17538 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0819 10:13:56.179774 17538 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0819 10:15:18.457844 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8644\nI0819 10:15:18.458191 17538 solver.cpp:404]     Test net output #1: loss = 0.575846 (* 1 = 0.575846 loss)\nI0819 10:15:19.785382 17538 solver.cpp:228] Iteration 68100, loss = 0.000299268\nI0819 10:15:19.785424 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:15:19.785439 17538 solver.cpp:244]     Train net output #1: loss = 0.000299087 (* 1 = 0.000299087 loss)\nI0819 10:15:19.868132 17538 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0819 10:17:37.142882 17538 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0819 10:18:59.426108 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86336\nI0819 10:18:59.426507 17538 solver.cpp:404]     Test net output #1: loss = 0.579397 (* 1 = 0.579397 loss)\nI0819 10:19:00.753016 17538 solver.cpp:228] Iteration 68200, loss = 0.000337929\nI0819 10:19:00.753056 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:19:00.753072 17538 solver.cpp:244]     Train net output #1: loss = 0.000337748 (* 1 = 0.000337748 loss)\nI0819 10:19:00.833274 17538 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0819 10:21:18.139679 17538 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0819 10:22:40.422348 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86424\nI0819 10:22:40.422710 17538 solver.cpp:404]     Test net output #1: loss = 0.57862 (* 1 = 0.57862 loss)\nI0819 10:22:41.750221 17538 solver.cpp:228] Iteration 68300, loss = 0.000287388\nI0819 10:22:41.750262 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:22:41.750277 17538 solver.cpp:244]     Train net output #1: loss = 0.000287206 (* 1 = 0.000287206 loss)\nI0819 10:22:41.834202 17538 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0819 10:24:59.199627 17538 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0819 10:26:21.486143 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86364\nI0819 10:26:21.486510 17538 solver.cpp:404]     Test net output #1: loss = 0.580306 (* 1 = 0.580306 loss)\nI0819 10:26:22.813266 17538 solver.cpp:228] Iteration 68400, loss = 0.000298\nI0819 10:26:22.813308 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:26:22.813325 17538 solver.cpp:244]     Train net output #1: loss = 0.000297818 (* 1 = 0.000297818 loss)\nI0819 10:26:22.898744 17538 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0819 10:28:40.255830 17538 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0819 10:30:02.528431 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86388\nI0819 10:30:02.528789 17538 solver.cpp:404]     Test net output #1: loss = 0.580261 (* 1 = 0.580261 loss)\nI0819 10:30:03.856114 17538 solver.cpp:228] Iteration 68500, loss = 0.000304266\nI0819 10:30:03.856151 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:30:03.856166 17538 solver.cpp:244]     Train net output #1: loss = 0.000304085 (* 1 = 0.000304085 loss)\nI0819 10:30:03.935760 17538 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0819 10:32:21.138460 17538 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0819 10:33:43.416494 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86372\nI0819 10:33:43.416846 17538 solver.cpp:404]     Test net output #1: loss = 0.581594 (* 1 = 0.581594 loss)\nI0819 10:33:44.744700 17538 solver.cpp:228] Iteration 68600, loss = 0.000299768\nI0819 10:33:44.744740 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:33:44.744755 17538 solver.cpp:244]     Train net output #1: loss = 0.000299586 (* 1 = 0.000299586 loss)\nI0819 10:33:44.821346 17538 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0819 10:36:02.053242 17538 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0819 10:37:24.335186 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8644\nI0819 10:37:24.335536 17538 solver.cpp:404]     Test net output #1: loss = 0.58077 (* 1 = 0.58077 loss)\nI0819 10:37:25.662603 17538 solver.cpp:228] Iteration 68700, loss = 0.000296229\nI0819 10:37:25.662642 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:37:25.662658 17538 solver.cpp:244]     Train net output #1: loss = 0.000296047 (* 1 = 0.000296047 loss)\nI0819 10:37:25.741677 17538 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0819 10:39:42.940078 17538 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0819 10:41:05.213316 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86316\nI0819 10:41:05.213666 17538 solver.cpp:404]     Test net output #1: loss = 0.584535 (* 1 = 0.584535 loss)\nI0819 10:41:06.540758 17538 solver.cpp:228] Iteration 68800, loss = 0.000305201\nI0819 10:41:06.540797 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:41:06.540812 17538 solver.cpp:244]     Train net output #1: loss = 0.000305019 (* 1 = 0.000305019 loss)\nI0819 10:41:06.617185 17538 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0819 10:43:23.855403 17538 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0819 10:44:46.150180 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86452\nI0819 10:44:46.150564 17538 solver.cpp:404]     Test net output #1: loss = 0.581592 (* 1 = 0.581592 loss)\nI0819 10:44:47.477862 17538 solver.cpp:228] Iteration 68900, loss = 0.000272627\nI0819 10:44:47.477900 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:44:47.477916 17538 solver.cpp:244]     Train net output #1: loss = 0.000272445 (* 1 = 0.000272445 loss)\nI0819 10:44:47.556201 17538 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0819 10:47:04.793431 17538 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0819 10:48:27.064021 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86308\nI0819 10:48:27.064384 17538 solver.cpp:404]     Test net output #1: loss = 0.58695 (* 1 = 0.58695 loss)\nI0819 10:48:28.391512 17538 solver.cpp:228] Iteration 69000, loss = 0.000337511\nI0819 10:48:28.391548 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:48:28.391562 17538 solver.cpp:244]     Train net output #1: loss = 0.000337329 (* 1 = 0.000337329 loss)\nI0819 10:48:28.470619 17538 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0819 10:50:45.720487 17538 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0819 10:52:07.987591 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86412\nI0819 10:52:07.987933 17538 solver.cpp:404]     Test net output #1: loss = 0.583689 (* 1 = 0.583689 loss)\nI0819 10:52:09.314342 17538 solver.cpp:228] Iteration 69100, loss = 0.000255216\nI0819 10:52:09.314376 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:52:09.314391 17538 solver.cpp:244]     Train net output #1: loss = 0.000255035 (* 1 = 0.000255035 loss)\nI0819 10:52:09.397198 17538 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0819 10:54:26.690932 17538 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0819 10:55:48.981832 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86296\nI0819 10:55:48.982206 17538 solver.cpp:404]     Test net output #1: loss = 0.588749 (* 1 = 0.588749 loss)\nI0819 10:55:50.309006 17538 solver.cpp:228] Iteration 69200, loss = 0.000286613\nI0819 10:55:50.309041 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:55:50.309057 17538 solver.cpp:244]     Train net output #1: loss = 0.000286432 (* 1 = 0.000286432 loss)\nI0819 10:55:50.391810 17538 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0819 10:58:07.630693 17538 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0819 10:59:29.925675 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86364\nI0819 10:59:29.926028 17538 solver.cpp:404]     Test net output #1: loss = 0.587103 (* 1 = 0.587103 loss)\nI0819 10:59:31.252737 17538 solver.cpp:228] Iteration 69300, loss = 0.000303571\nI0819 10:59:31.252770 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:59:31.252786 17538 solver.cpp:244]     Train net output #1: loss = 0.000303389 (* 1 = 0.000303389 loss)\nI0819 10:59:31.335182 17538 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0819 11:01:48.626646 17538 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0819 11:03:10.915436 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86352\nI0819 11:03:10.915804 17538 solver.cpp:404]     Test net output #1: loss = 0.58676 (* 1 = 0.58676 loss)\nI0819 11:03:12.244284 17538 solver.cpp:228] Iteration 69400, loss = 0.000293269\nI0819 11:03:12.244320 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:03:12.244335 17538 solver.cpp:244]     Train net output #1: loss = 0.000293088 (* 1 = 0.000293088 loss)\nI0819 11:03:12.327956 17538 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0819 11:05:29.600106 17538 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0819 11:06:51.855605 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86388\nI0819 11:06:51.855971 17538 solver.cpp:404]     Test net output #1: loss = 0.588419 (* 1 = 0.588419 loss)\nI0819 11:06:53.182054 17538 solver.cpp:228] Iteration 69500, loss = 0.000306583\nI0819 11:06:53.182087 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:06:53.182102 17538 solver.cpp:244]     Train net output #1: loss = 0.000306402 (* 1 = 0.000306402 loss)\nI0819 11:06:53.269939 17538 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0819 11:09:10.598930 17538 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0819 11:10:32.864948 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86312\nI0819 11:10:32.865329 17538 solver.cpp:404]     Test net output #1: loss = 0.592177 (* 1 = 0.592177 loss)\nI0819 11:10:34.192668 17538 solver.cpp:228] Iteration 69600, loss = 0.000292051\nI0819 11:10:34.192703 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:10:34.192719 17538 solver.cpp:244]     Train net output #1: loss = 0.00029187 (* 1 = 0.00029187 loss)\nI0819 11:10:34.276563 17538 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0819 11:12:51.556428 17538 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0819 11:14:13.834985 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86344\nI0819 11:14:13.835332 17538 solver.cpp:404]     Test net output #1: loss = 0.591376 (* 1 = 0.591376 loss)\nI0819 11:14:15.162351 17538 solver.cpp:228] Iteration 69700, loss = 0.000252562\nI0819 11:14:15.162386 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:14:15.162402 17538 solver.cpp:244]     Train net output #1: loss = 0.000252381 (* 1 = 0.000252381 loss)\nI0819 11:14:15.241793 17538 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0819 11:16:32.594512 17538 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0819 11:17:54.871192 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86352\nI0819 11:17:54.871575 17538 solver.cpp:404]     Test net output #1: loss = 0.591085 (* 1 = 0.591085 loss)\nI0819 11:17:56.198233 17538 solver.cpp:228] Iteration 69800, loss = 0.000298379\nI0819 11:17:56.198271 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:17:56.198285 17538 solver.cpp:244]     Train net output #1: loss = 0.000298198 (* 1 = 0.000298198 loss)\nI0819 11:17:56.280037 17538 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0819 11:20:13.467922 17538 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0819 11:21:35.757714 17538 solver.cpp:404]     Test net output #0: accuracy = 0.864\nI0819 11:21:35.758087 17538 solver.cpp:404]     Test net output #1: loss = 0.593448 (* 1 = 0.593448 loss)\nI0819 11:21:37.084939 17538 solver.cpp:228] Iteration 69900, loss = 0.000285742\nI0819 11:21:37.084975 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:21:37.084991 17538 solver.cpp:244]     Train net output #1: loss = 0.00028556 (* 1 = 0.00028556 loss)\nI0819 11:21:37.167213 17538 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0819 11:23:54.419479 17538 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0819 11:25:16.696597 17538 solver.cpp:404]     Test net output #0: accuracy = 0.86296\nI0819 11:25:16.696967 17538 solver.cpp:404]     Test net output #1: loss = 0.594898 (* 1 = 0.594898 loss)\nI0819 11:25:18.025563 17538 solver.cpp:228] Iteration 70000, loss = 0.000302584\nI0819 11:25:18.025604 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:25:18.025620 17538 solver.cpp:244]     Train net output #1: loss = 0.000302403 (* 1 = 0.000302403 loss)\nI0819 11:25:18.105670 17538 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0819 11:25:18.105695 17538 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0819 11:27:35.424237 17538 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0819 11:28:57.712831 17538 solver.cpp:404]     Test net output #0: accuracy = 0.87048\nI0819 11:28:57.713214 17538 solver.cpp:404]     Test net output #1: loss = 0.570115 (* 1 = 0.570115 loss)\nI0819 11:28:59.041168 17538 solver.cpp:228] Iteration 70100, loss = 0.000308189\nI0819 11:28:59.041205 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:28:59.041221 17538 solver.cpp:244]     Train net output #1: loss = 0.000308007 (* 1 = 0.000308007 loss)\nI0819 11:28:59.118958 17538 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0819 11:31:16.347478 17538 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0819 11:32:38.622197 17538 solver.cpp:404]     Test net output #0: accuracy = 0.87588\nI0819 11:32:38.622577 17538 solver.cpp:404]     Test net output #1: loss = 0.548893 (* 1 = 0.548893 loss)\nI0819 11:32:39.950562 17538 solver.cpp:228] Iteration 70200, loss = 0.0002788\nI0819 11:32:39.950603 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:32:39.950619 17538 solver.cpp:244]     Train net output #1: loss = 0.000278618 (* 1 = 0.000278618 loss)\nI0819 11:32:40.026851 17538 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0819 11:34:57.256491 17538 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0819 11:36:19.530261 17538 solver.cpp:404]     Test net output #0: accuracy = 0.8798\nI0819 11:36:19.530649 17538 solver.cpp:404]     Test net output #1: loss = 0.530166 (* 1 = 0.530166 loss)\nI0819 11:36:20.858189 17538 solver.cpp:228] Iteration 70300, loss = 0.000288187\nI0819 11:36:20.858227 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:36:20.858242 17538 solver.cpp:244]     Train net output #1: loss = 0.000288006 (* 1 = 0.000288006 loss)\nI0819 11:36:20.935504 17538 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0819 11:38:38.121904 17538 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0819 11:40:00.407943 17538 solver.cpp:404]     Test net output #0: accuracy = 0.88388\nI0819 11:40:00.408318 17538 solver.cpp:404]     Test net output #1: loss = 0.515459 (* 1 = 0.515459 loss)\nI0819 11:40:01.736552 17538 solver.cpp:228] Iteration 70400, loss = 0.000295174\nI0819 11:40:01.736593 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:40:01.736609 17538 solver.cpp:244]     Train net output #1: loss = 0.000294993 (* 1 = 0.000294993 loss)\nI0819 11:40:01.818904 17538 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0819 11:42:19.007827 17538 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0819 11:43:41.293866 17538 solver.cpp:404]     Test net output #0: accuracy = 0.88612\nI0819 11:43:41.294251 17538 solver.cpp:404]     Test net output #1: loss = 0.503396 (* 1 = 0.503396 loss)\nI0819 11:43:42.622190 17538 solver.cpp:228] Iteration 70500, loss = 0.000267711\nI0819 11:43:42.622227 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:43:42.622243 17538 solver.cpp:244]     Train net output #1: loss = 0.000267529 (* 1 = 0.000267529 loss)\nI0819 11:43:42.703667 17538 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0819 11:45:59.952913 17538 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0819 11:47:22.228837 17538 solver.cpp:404]     Test net output #0: accuracy = 0.88968\nI0819 11:47:22.229193 17538 solver.cpp:404]     Test net output #1: loss = 0.492861 (* 1 = 0.492861 loss)\nI0819 11:47:23.557159 17538 solver.cpp:228] Iteration 70600, loss = 0.000271325\nI0819 11:47:23.557195 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:47:23.557210 17538 solver.cpp:244]     Train net output #1: loss = 0.000271143 (* 1 = 0.000271143 loss)\nI0819 11:47:23.639595 17538 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0819 11:49:40.879535 17538 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0819 11:51:03.180656 17538 solver.cpp:404]     Test net output #0: accuracy = 0.89236\nI0819 11:51:03.181018 17538 solver.cpp:404]     Test net output #1: loss = 0.48467 (* 1 = 0.48467 loss)\nI0819 11:51:04.508321 17538 solver.cpp:228] Iteration 70700, loss = 0.000268884\nI0819 11:51:04.508358 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:51:04.508373 17538 solver.cpp:244]     Train net output #1: loss = 0.000268703 (* 1 = 0.000268703 loss)\nI0819 11:51:04.595667 17538 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0819 11:53:21.783773 17538 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0819 11:54:44.052003 17538 solver.cpp:404]     Test net output #0: accuracy = 0.89444\nI0819 11:54:44.052382 17538 solver.cpp:404]     Test net output #1: loss = 0.47749 (* 1 = 0.47749 loss)\nI0819 11:54:45.380537 17538 solver.cpp:228] Iteration 70800, loss = 0.000279175\nI0819 11:54:45.380573 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:54:45.380594 17538 solver.cpp:244]     Train net output #1: loss = 0.000278993 (* 1 = 0.000278993 loss)\nI0819 11:54:45.463459 17538 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0819 11:57:02.695991 17538 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0819 11:58:24.980182 17538 solver.cpp:404]     Test net output #0: accuracy = 0.89572\nI0819 11:58:24.980556 17538 solver.cpp:404]     Test net output #1: loss = 0.472652 (* 1 = 0.472652 loss)\nI0819 11:58:26.309037 17538 solver.cpp:228] Iteration 70900, loss = 0.000270643\nI0819 11:58:26.309074 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:58:26.309089 17538 solver.cpp:244]     Train net output #1: loss = 0.000270461 (* 1 = 0.000270461 loss)\nI0819 11:58:26.394371 17538 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0819 12:00:43.591863 17538 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0819 12:02:05.878671 17538 solver.cpp:404]     Test net output #0: accuracy = 0.89744\nI0819 12:02:05.879042 17538 solver.cpp:404]     Test net output #1: loss = 0.467588 (* 1 = 0.467588 loss)\nI0819 12:02:07.207141 17538 solver.cpp:228] Iteration 71000, loss = 0.000276833\nI0819 12:02:07.207176 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:02:07.207192 17538 solver.cpp:244]     Train net output #1: loss = 0.000276652 (* 1 = 0.000276652 loss)\nI0819 12:02:07.287155 17538 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0819 12:04:24.503340 17538 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0819 12:05:46.818624 17538 solver.cpp:404]     Test net output #0: accuracy = 0.89764\nI0819 12:05:46.818996 17538 solver.cpp:404]     Test net output #1: loss = 0.465172 (* 1 = 0.465172 loss)\nI0819 12:05:48.148535 17538 solver.cpp:228] Iteration 71100, loss = 0.000300562\nI0819 12:05:48.148577 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:05:48.148602 17538 solver.cpp:244]     Train net output #1: loss = 0.00030038 (* 1 = 0.00030038 loss)\nI0819 12:05:48.233090 17538 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0819 12:08:05.539584 17538 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0819 12:09:27.814481 17538 solver.cpp:404]     Test net output #0: accuracy = 0.899\nI0819 12:09:27.814833 17538 solver.cpp:404]     Test net output #1: loss = 0.461347 (* 1 = 0.461347 loss)\nI0819 12:09:29.141824 17538 solver.cpp:228] Iteration 71200, loss = 0.000302869\nI0819 12:09:29.141862 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:09:29.141878 17538 solver.cpp:244]     Train net output #1: loss = 0.000302687 (* 1 = 0.000302687 loss)\nI0819 12:09:29.226258 17538 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0819 12:11:46.499902 17538 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0819 12:13:08.800652 17538 solver.cpp:404]     Test net output #0: accuracy = 0.89968\nI0819 12:13:08.800994 17538 solver.cpp:404]     Test net output #1: loss = 0.460018 (* 1 = 0.460018 loss)\nI0819 12:13:10.127521 17538 solver.cpp:228] Iteration 71300, loss = 0.000277258\nI0819 12:13:10.127559 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:13:10.127575 17538 solver.cpp:244]     Train net output #1: loss = 0.000277076 (* 1 = 0.000277076 loss)\nI0819 12:13:10.206768 17538 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0819 12:15:27.435730 17538 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0819 12:16:49.726056 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9012\nI0819 12:16:49.726439 17538 solver.cpp:404]     Test net output #1: loss = 0.456809 (* 1 = 0.456809 loss)\nI0819 12:16:51.052623 17538 solver.cpp:228] Iteration 71400, loss = 0.000287213\nI0819 12:16:51.052659 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:16:51.052673 17538 solver.cpp:244]     Train net output #1: loss = 0.000287032 (* 1 = 0.000287032 loss)\nI0819 12:16:51.131063 17538 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0819 12:19:08.441546 17538 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0819 12:20:30.725075 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90116\nI0819 12:20:30.725442 17538 solver.cpp:404]     Test net output #1: loss = 0.456401 (* 1 = 0.456401 loss)\nI0819 12:20:32.052356 17538 solver.cpp:228] Iteration 71500, loss = 0.000296305\nI0819 12:20:32.052390 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:20:32.052405 17538 solver.cpp:244]     Train net output #1: loss = 0.000296123 (* 1 = 0.000296123 loss)\nI0819 12:20:32.136327 17538 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0819 12:22:49.411046 17538 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0819 12:24:11.695762 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9016\nI0819 12:24:11.696140 17538 solver.cpp:404]     Test net output #1: loss = 0.453982 (* 1 = 0.453982 loss)\nI0819 12:24:13.022567 17538 solver.cpp:228] Iteration 71600, loss = 0.000249747\nI0819 12:24:13.022598 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:24:13.022614 17538 solver.cpp:244]     Train net output #1: loss = 0.000249566 (* 1 = 0.000249566 loss)\nI0819 12:24:13.103747 17538 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0819 12:26:30.326902 17538 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0819 12:27:52.606607 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90184\nI0819 12:27:52.606982 17538 solver.cpp:404]     Test net output #1: loss = 0.453937 (* 1 = 0.453937 loss)\nI0819 12:27:53.933840 17538 solver.cpp:228] Iteration 71700, loss = 0.000272758\nI0819 12:27:53.933873 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:27:53.933889 17538 solver.cpp:244]     Train net output #1: loss = 0.000272577 (* 1 = 0.000272577 loss)\nI0819 12:27:54.015205 17538 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0819 12:30:11.237217 17538 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0819 12:31:33.507339 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90256\nI0819 12:31:33.507724 17538 solver.cpp:404]     Test net output #1: loss = 0.452152 (* 1 = 0.452152 loss)\nI0819 12:31:34.834475 17538 solver.cpp:228] Iteration 71800, loss = 0.000293864\nI0819 12:31:34.834508 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:31:34.834523 17538 solver.cpp:244]     Train net output #1: loss = 0.000293683 (* 1 = 0.000293683 loss)\nI0819 12:31:34.918540 17538 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0819 12:33:52.169606 17538 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0819 12:35:14.464069 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90332\nI0819 12:35:14.464468 17538 solver.cpp:404]     Test net output #1: loss = 0.452444 (* 1 = 0.452444 loss)\nI0819 12:35:15.791213 17538 solver.cpp:228] Iteration 71900, loss = 0.000289953\nI0819 12:35:15.791250 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:35:15.791265 17538 solver.cpp:244]     Train net output #1: loss = 0.000289772 (* 1 = 0.000289772 loss)\nI0819 12:35:15.872848 17538 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0819 12:37:33.110987 17538 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0819 12:38:55.407829 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90428\nI0819 12:38:55.408181 17538 solver.cpp:404]     Test net output #1: loss = 0.450986 (* 1 = 0.450986 loss)\nI0819 12:38:56.735250 17538 solver.cpp:228] Iteration 72000, loss = 0.000294119\nI0819 12:38:56.735283 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:38:56.735299 17538 solver.cpp:244]     Train net output #1: loss = 0.000293937 (* 1 = 0.000293937 loss)\nI0819 12:38:56.812808 17538 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0819 12:41:14.058909 17538 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0819 12:42:36.346050 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90416\nI0819 12:42:36.346426 17538 solver.cpp:404]     Test net output #1: loss = 0.451372 (* 1 = 0.451372 loss)\nI0819 12:42:37.672986 17538 solver.cpp:228] Iteration 72100, loss = 0.000325362\nI0819 12:42:37.673020 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:42:37.673036 17538 solver.cpp:244]     Train net output #1: loss = 0.00032518 (* 1 = 0.00032518 loss)\nI0819 12:42:37.757232 17538 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0819 12:44:54.990352 17538 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0819 12:46:17.264058 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90452\nI0819 12:46:17.264430 17538 solver.cpp:404]     Test net output #1: loss = 0.450353 (* 1 = 0.450353 loss)\nI0819 12:46:18.591295 17538 solver.cpp:228] Iteration 72200, loss = 0.000298912\nI0819 12:46:18.591328 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:46:18.591348 17538 solver.cpp:244]     Train net output #1: loss = 0.000298731 (* 1 = 0.000298731 loss)\nI0819 12:46:18.671671 17538 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0819 12:48:35.910647 17538 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0819 12:49:58.178007 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9048\nI0819 12:49:58.178387 17538 solver.cpp:404]     Test net output #1: loss = 0.450698 (* 1 = 0.450698 loss)\nI0819 12:49:59.504665 17538 solver.cpp:228] Iteration 72300, loss = 0.000265492\nI0819 12:49:59.504698 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:49:59.504714 17538 solver.cpp:244]     Train net output #1: loss = 0.00026531 (* 1 = 0.00026531 loss)\nI0819 12:49:59.581699 17538 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0819 12:52:16.770915 17538 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0819 12:53:39.028170 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90436\nI0819 12:53:39.028561 17538 solver.cpp:404]     Test net output #1: loss = 0.449897 (* 1 = 0.449897 loss)\nI0819 12:53:40.355067 17538 solver.cpp:228] Iteration 72400, loss = 0.000281222\nI0819 12:53:40.355103 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:53:40.355118 17538 solver.cpp:244]     Train net output #1: loss = 0.00028104 (* 1 = 0.00028104 loss)\nI0819 12:53:40.436472 17538 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0819 12:55:57.663564 17538 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0819 12:57:19.916800 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90448\nI0819 12:57:19.917158 17538 solver.cpp:404]     Test net output #1: loss = 0.450298 (* 1 = 0.450298 loss)\nI0819 12:57:21.243999 17538 solver.cpp:228] Iteration 72500, loss = 0.000304282\nI0819 12:57:21.244033 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 12:57:21.244050 17538 solver.cpp:244]     Train net output #1: loss = 0.0003041 (* 1 = 0.0003041 loss)\nI0819 12:57:21.326027 17538 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0819 12:59:38.572826 17538 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0819 13:01:00.841815 17538 solver.cpp:404]     Test net output #0: accuracy = 0.904\nI0819 13:01:00.842169 17538 solver.cpp:404]     Test net output #1: loss = 0.44975 (* 1 = 0.44975 loss)\nI0819 13:01:02.168843 17538 solver.cpp:228] Iteration 72600, loss = 0.000319814\nI0819 13:01:02.168877 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:01:02.168892 17538 solver.cpp:244]     Train net output #1: loss = 0.000319632 (* 1 = 0.000319632 loss)\nI0819 13:01:02.250682 17538 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0819 13:03:19.513643 17538 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0819 13:04:41.790637 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90396\nI0819 13:04:41.790995 17538 solver.cpp:404]     Test net output #1: loss = 0.450168 (* 1 = 0.450168 loss)\nI0819 13:04:43.118182 17538 solver.cpp:228] Iteration 72700, loss = 0.000294955\nI0819 13:04:43.118218 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:04:43.118234 17538 solver.cpp:244]     Train net output #1: loss = 0.000294773 (* 1 = 0.000294773 loss)\nI0819 13:04:43.197816 17538 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0819 13:07:00.470396 17538 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0819 13:08:22.295603 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9038\nI0819 13:08:22.295893 17538 solver.cpp:404]     Test net output #1: loss = 0.449818 (* 1 = 0.449818 loss)\nI0819 13:08:23.622457 17538 solver.cpp:228] Iteration 72800, loss = 0.000300158\nI0819 13:08:23.622493 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:08:23.622509 17538 solver.cpp:244]     Train net output #1: loss = 0.000299976 (* 1 = 0.000299976 loss)\nI0819 13:08:23.708961 17538 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0819 13:10:41.009354 17538 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0819 13:12:02.837158 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90404\nI0819 13:12:02.837445 17538 solver.cpp:404]     Test net output #1: loss = 0.450063 (* 1 = 0.450063 loss)\nI0819 13:12:04.164105 17538 solver.cpp:228] Iteration 72900, loss = 0.000311579\nI0819 13:12:04.164142 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:12:04.164158 17538 solver.cpp:244]     Train net output #1: loss = 0.000311398 (* 1 = 0.000311398 loss)\nI0819 13:12:04.241245 17538 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0819 13:14:21.514348 17538 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0819 13:15:43.318787 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90384\nI0819 13:15:43.319084 17538 solver.cpp:404]     Test net output #1: loss = 0.449788 (* 1 = 0.449788 loss)\nI0819 13:15:44.646127 17538 solver.cpp:228] Iteration 73000, loss = 0.000277431\nI0819 13:15:44.646165 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:15:44.646181 17538 solver.cpp:244]     Train net output #1: loss = 0.00027725 (* 1 = 0.00027725 loss)\nI0819 13:15:44.729282 17538 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0819 13:18:02.033422 17538 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0819 13:19:24.265002 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90404\nI0819 13:19:24.265328 17538 solver.cpp:404]     Test net output #1: loss = 0.449949 (* 1 = 0.449949 loss)\nI0819 13:19:25.592213 17538 solver.cpp:228] Iteration 73100, loss = 0.000292603\nI0819 13:19:25.592250 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:19:25.592267 17538 solver.cpp:244]     Train net output #1: loss = 0.000292421 (* 1 = 0.000292421 loss)\nI0819 13:19:25.673933 17538 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0819 13:21:42.850817 17538 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0819 13:23:05.091289 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 13:23:05.091646 17538 solver.cpp:404]     Test net output #1: loss = 0.44977 (* 1 = 0.44977 loss)\nI0819 13:23:06.419328 17538 solver.cpp:228] Iteration 73200, loss = 0.0003166\nI0819 13:23:06.419369 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:23:06.419384 17538 solver.cpp:244]     Train net output #1: loss = 0.000316419 (* 1 = 0.000316419 loss)\nI0819 13:23:06.496639 17538 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0819 13:25:23.680037 17538 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0819 13:26:45.927937 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90424\nI0819 13:26:45.928223 17538 solver.cpp:404]     Test net output #1: loss = 0.449818 (* 1 = 0.449818 loss)\nI0819 13:26:47.255076 17538 solver.cpp:228] Iteration 73300, loss = 0.000276224\nI0819 13:26:47.255115 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:26:47.255133 17538 solver.cpp:244]     Train net output #1: loss = 0.000276042 (* 1 = 0.000276042 loss)\nI0819 13:26:47.337020 17538 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0819 13:29:04.568397 17538 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0819 13:30:26.535449 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90392\nI0819 13:30:26.535733 17538 solver.cpp:404]     Test net output #1: loss = 0.449762 (* 1 = 0.449762 loss)\nI0819 13:30:27.863092 17538 solver.cpp:228] Iteration 73400, loss = 0.000269567\nI0819 13:30:27.863133 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:30:27.863148 17538 solver.cpp:244]     Train net output #1: loss = 0.000269385 (* 1 = 0.000269385 loss)\nI0819 13:30:27.944769 17538 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0819 13:32:45.111987 17538 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0819 13:34:07.331302 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90416\nI0819 13:34:07.331615 17538 solver.cpp:404]     Test net output #1: loss = 0.449903 (* 1 = 0.449903 loss)\nI0819 13:34:08.659076 17538 solver.cpp:228] Iteration 73500, loss = 0.000269275\nI0819 13:34:08.659113 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:34:08.659128 17538 solver.cpp:244]     Train net output #1: loss = 0.000269093 (* 1 = 0.000269093 loss)\nI0819 13:34:08.737293 17538 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0819 13:36:25.896817 17538 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0819 13:37:48.085204 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 13:37:48.085525 17538 solver.cpp:404]     Test net output #1: loss = 0.449884 (* 1 = 0.449884 loss)\nI0819 13:37:49.412689 17538 solver.cpp:228] Iteration 73600, loss = 0.000280029\nI0819 13:37:49.412729 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:37:49.412744 17538 solver.cpp:244]     Train net output #1: loss = 0.000279847 (* 1 = 0.000279847 loss)\nI0819 13:37:49.489892 17538 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0819 13:40:06.752941 17538 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0819 13:41:28.863661 17538 solver.cpp:404]     Test net output #0: accuracy = 0.904\nI0819 13:41:28.863999 17538 solver.cpp:404]     Test net output #1: loss = 0.449881 (* 1 = 0.449881 loss)\nI0819 13:41:30.190801 17538 solver.cpp:228] Iteration 73700, loss = 0.000310162\nI0819 13:41:30.190840 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:41:30.190856 17538 solver.cpp:244]     Train net output #1: loss = 0.00030998 (* 1 = 0.00030998 loss)\nI0819 13:41:30.277971 17538 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0819 13:43:47.606104 17538 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0819 13:45:09.693437 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90368\nI0819 13:45:09.693809 17538 solver.cpp:404]     Test net output #1: loss = 0.450035 (* 1 = 0.450035 loss)\nI0819 13:45:11.020454 17538 solver.cpp:228] Iteration 73800, loss = 0.00028823\nI0819 13:45:11.020493 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:45:11.020508 17538 solver.cpp:244]     Train net output #1: loss = 0.000288049 (* 1 = 0.000288049 loss)\nI0819 13:45:11.101510 17538 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0819 13:47:28.270537 17538 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0819 13:48:50.128080 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9038\nI0819 13:48:50.128415 17538 solver.cpp:404]     Test net output #1: loss = 0.450031 (* 1 = 0.450031 loss)\nI0819 13:48:51.454900 17538 solver.cpp:228] Iteration 73900, loss = 0.000294693\nI0819 13:48:51.454939 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:48:51.454954 17538 solver.cpp:244]     Train net output #1: loss = 0.000294511 (* 1 = 0.000294511 loss)\nI0819 13:48:51.537353 17538 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0819 13:51:08.741829 17538 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0819 13:52:30.993520 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 13:52:30.993904 17538 solver.cpp:404]     Test net output #1: loss = 0.450179 (* 1 = 0.450179 loss)\nI0819 13:52:32.320129 17538 solver.cpp:228] Iteration 74000, loss = 0.000281485\nI0819 13:52:32.320170 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:52:32.320186 17538 solver.cpp:244]     Train net output #1: loss = 0.000281303 (* 1 = 0.000281303 loss)\nI0819 13:52:32.405562 17538 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0819 13:54:49.655771 17538 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0819 13:56:11.831964 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90392\nI0819 13:56:11.832336 17538 solver.cpp:404]     Test net output #1: loss = 0.450104 (* 1 = 0.450104 loss)\nI0819 13:56:13.159844 17538 solver.cpp:228] Iteration 74100, loss = 0.00026091\nI0819 13:56:13.159883 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:56:13.159898 17538 solver.cpp:244]     Train net output #1: loss = 0.000260729 (* 1 = 0.000260729 loss)\nI0819 13:56:13.242624 17538 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0819 13:58:30.554399 17538 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0819 13:59:52.876196 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 13:59:52.876582 17538 solver.cpp:404]     Test net output #1: loss = 0.450318 (* 1 = 0.450318 loss)\nI0819 13:59:54.204128 17538 solver.cpp:228] Iteration 74200, loss = 0.000268678\nI0819 13:59:54.204165 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 13:59:54.204180 17538 solver.cpp:244]     Train net output #1: loss = 0.000268496 (* 1 = 0.000268496 loss)\nI0819 13:59:54.285956 17538 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0819 14:02:11.631539 17538 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0819 14:03:33.937209 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90388\nI0819 14:03:33.937561 17538 solver.cpp:404]     Test net output #1: loss = 0.450237 (* 1 = 0.450237 loss)\nI0819 14:03:35.264468 17538 solver.cpp:228] Iteration 74300, loss = 0.000280944\nI0819 14:03:35.264508 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:03:35.264524 17538 solver.cpp:244]     Train net output #1: loss = 0.000280763 (* 1 = 0.000280763 loss)\nI0819 14:03:35.347658 17538 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0819 14:05:52.655481 17538 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0819 14:07:14.959810 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0819 14:07:14.960201 17538 solver.cpp:404]     Test net output #1: loss = 0.450533 (* 1 = 0.450533 loss)\nI0819 14:07:16.287446 17538 solver.cpp:228] Iteration 74400, loss = 0.000279666\nI0819 14:07:16.287485 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:07:16.287502 17538 solver.cpp:244]     Train net output #1: loss = 0.000279484 (* 1 = 0.000279484 loss)\nI0819 14:07:16.366145 17538 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0819 14:09:33.690371 17538 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0819 14:10:55.947988 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90388\nI0819 14:10:55.948331 17538 solver.cpp:404]     Test net output #1: loss = 0.450422 (* 1 = 0.450422 loss)\nI0819 14:10:57.276242 17538 solver.cpp:228] Iteration 74500, loss = 0.000278085\nI0819 14:10:57.276281 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:10:57.276298 17538 solver.cpp:244]     Train net output #1: loss = 0.000277903 (* 1 = 0.000277903 loss)\nI0819 14:10:57.356871 17538 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0819 14:13:14.087946 17538 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0819 14:14:35.279747 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9038\nI0819 14:14:35.280045 17538 solver.cpp:404]     Test net output #1: loss = 0.450531 (* 1 = 0.450531 loss)\nI0819 14:14:36.604089 17538 solver.cpp:228] Iteration 74600, loss = 0.000273265\nI0819 14:14:36.604126 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:14:36.604141 17538 solver.cpp:244]     Train net output #1: loss = 0.000273084 (* 1 = 0.000273084 loss)\nI0819 14:14:36.680586 17538 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0819 14:16:53.383272 17538 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0819 14:18:14.586030 17538 solver.cpp:404]     Test net output #0: accuracy = 0.904\nI0819 14:18:14.586313 17538 solver.cpp:404]     Test net output #1: loss = 0.450405 (* 1 = 0.450405 loss)\nI0819 14:18:15.910559 17538 solver.cpp:228] Iteration 74700, loss = 0.000247924\nI0819 14:18:15.910598 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:18:15.910611 17538 solver.cpp:244]     Train net output #1: loss = 0.000247742 (* 1 = 0.000247742 loss)\nI0819 14:18:15.992358 17538 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0819 14:20:32.679638 17538 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0819 14:21:53.876262 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 14:21:53.876593 17538 solver.cpp:404]     Test net output #1: loss = 0.450618 (* 1 = 0.450618 loss)\nI0819 14:21:55.201310 17538 solver.cpp:228] Iteration 74800, loss = 0.000300667\nI0819 14:21:55.201347 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:21:55.201362 17538 solver.cpp:244]     Train net output #1: loss = 0.000300486 (* 1 = 0.000300486 loss)\nI0819 14:21:55.279747 17538 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0819 14:24:11.976511 17538 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0819 14:25:33.173302 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90396\nI0819 14:25:33.173588 17538 solver.cpp:404]     Test net output #1: loss = 0.450422 (* 1 = 0.450422 loss)\nI0819 14:25:34.497385 17538 solver.cpp:228] Iteration 74900, loss = 0.000287329\nI0819 14:25:34.497422 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:25:34.497437 17538 solver.cpp:244]     Train net output #1: loss = 0.000287147 (* 1 = 0.000287147 loss)\nI0819 14:25:34.574447 17538 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0819 14:27:51.297801 17538 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0819 14:29:12.487565 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90372\nI0819 14:29:12.487862 17538 solver.cpp:404]     Test net output #1: loss = 0.450732 (* 1 = 0.450732 loss)\nI0819 14:29:13.813287 17538 solver.cpp:228] Iteration 75000, loss = 0.000298713\nI0819 14:29:13.813324 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:29:13.813339 17538 solver.cpp:244]     Train net output #1: loss = 0.000298532 (* 1 = 0.000298532 loss)\nI0819 14:29:13.896396 17538 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0819 14:31:30.636546 17538 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0819 14:32:51.801811 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 14:32:51.802158 17538 solver.cpp:404]     Test net output #1: loss = 0.450638 (* 1 = 0.450638 loss)\nI0819 14:32:53.126019 17538 solver.cpp:228] Iteration 75100, loss = 0.000317306\nI0819 14:32:53.126054 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:32:53.126068 17538 solver.cpp:244]     Train net output #1: loss = 0.000317125 (* 1 = 0.000317125 loss)\nI0819 14:32:53.208025 17538 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0819 14:35:09.991235 17538 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0819 14:36:31.179852 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90368\nI0819 14:36:31.180167 17538 solver.cpp:404]     Test net output #1: loss = 0.450891 (* 1 = 0.450891 loss)\nI0819 14:36:32.504878 17538 solver.cpp:228] Iteration 75200, loss = 0.000298454\nI0819 14:36:32.504915 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:36:32.504930 17538 solver.cpp:244]     Train net output #1: loss = 0.000298273 (* 1 = 0.000298273 loss)\nI0819 14:36:32.586896 17538 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0819 14:38:49.413767 17538 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0819 14:40:10.754339 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0819 14:40:10.754700 17538 solver.cpp:404]     Test net output #1: loss = 0.45078 (* 1 = 0.45078 loss)\nI0819 14:40:12.081423 17538 solver.cpp:228] Iteration 75300, loss = 0.000270319\nI0819 14:40:12.081462 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:40:12.081480 17538 solver.cpp:244]     Train net output #1: loss = 0.000270137 (* 1 = 0.000270137 loss)\nI0819 14:40:12.169247 17538 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0819 14:42:29.372022 17538 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0819 14:43:51.653065 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9034\nI0819 14:43:51.653448 17538 solver.cpp:404]     Test net output #1: loss = 0.451182 (* 1 = 0.451182 loss)\nI0819 14:43:52.980803 17538 solver.cpp:228] Iteration 75400, loss = 0.000292636\nI0819 14:43:52.980839 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:43:52.980855 17538 solver.cpp:244]     Train net output #1: loss = 0.000292454 (* 1 = 0.000292454 loss)\nI0819 14:43:53.066021 17538 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0819 14:46:10.240617 17538 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0819 14:47:32.505956 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90356\nI0819 14:47:32.506345 17538 solver.cpp:404]     Test net output #1: loss = 0.45103 (* 1 = 0.45103 loss)\nI0819 14:47:33.833487 17538 solver.cpp:228] Iteration 75500, loss = 0.000279724\nI0819 14:47:33.833524 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:47:33.833540 17538 solver.cpp:244]     Train net output #1: loss = 0.000279542 (* 1 = 0.000279542 loss)\nI0819 14:47:33.914654 17538 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0819 14:49:51.184296 17538 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0819 14:51:13.472229 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90344\nI0819 14:51:13.472601 17538 solver.cpp:404]     Test net output #1: loss = 0.451354 (* 1 = 0.451354 loss)\nI0819 14:51:14.799343 17538 solver.cpp:228] Iteration 75600, loss = 0.00027913\nI0819 14:51:14.799383 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:51:14.799399 17538 solver.cpp:244]     Train net output #1: loss = 0.000278948 (* 1 = 0.000278948 loss)\nI0819 14:51:14.886643 17538 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0819 14:53:32.134685 17538 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0819 14:54:54.427732 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90384\nI0819 14:54:54.428086 17538 solver.cpp:404]     Test net output #1: loss = 0.451161 (* 1 = 0.451161 loss)\nI0819 14:54:55.755419 17538 solver.cpp:228] Iteration 75700, loss = 0.000268842\nI0819 14:54:55.755457 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:54:55.755473 17538 solver.cpp:244]     Train net output #1: loss = 0.00026866 (* 1 = 0.00026866 loss)\nI0819 14:54:55.837317 17538 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0819 14:57:12.986047 17538 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0819 14:58:35.282572 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 14:58:35.282951 17538 solver.cpp:404]     Test net output #1: loss = 0.451481 (* 1 = 0.451481 loss)\nI0819 14:58:36.609804 17538 solver.cpp:228] Iteration 75800, loss = 0.000310152\nI0819 14:58:36.609843 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:58:36.609858 17538 solver.cpp:244]     Train net output #1: loss = 0.00030997 (* 1 = 0.00030997 loss)\nI0819 14:58:36.695451 17538 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0819 15:00:54.030346 17538 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0819 15:02:16.306936 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 15:02:16.307298 17538 solver.cpp:404]     Test net output #1: loss = 0.45127 (* 1 = 0.45127 loss)\nI0819 15:02:17.634358 17538 solver.cpp:228] Iteration 75900, loss = 0.000295125\nI0819 15:02:17.634397 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:02:17.634413 17538 solver.cpp:244]     Train net output #1: loss = 0.000294944 (* 1 = 0.000294944 loss)\nI0819 15:02:17.722218 17538 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0819 15:04:34.923409 17538 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0819 15:05:57.213137 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90364\nI0819 15:05:57.213526 17538 solver.cpp:404]     Test net output #1: loss = 0.451583 (* 1 = 0.451583 loss)\nI0819 15:05:58.540585 17538 solver.cpp:228] Iteration 76000, loss = 0.00025947\nI0819 15:05:58.540622 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:05:58.540638 17538 solver.cpp:244]     Train net output #1: loss = 0.000259288 (* 1 = 0.000259288 loss)\nI0819 15:05:58.624827 17538 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0819 15:08:15.865247 17538 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0819 15:09:38.155663 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90368\nI0819 15:09:38.156023 17538 solver.cpp:404]     Test net output #1: loss = 0.451471 (* 1 = 0.451471 loss)\nI0819 15:09:39.483017 17538 solver.cpp:228] Iteration 76100, loss = 0.000283748\nI0819 15:09:39.483057 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:09:39.483072 17538 solver.cpp:244]     Train net output #1: loss = 0.000283566 (* 1 = 0.000283566 loss)\nI0819 15:09:39.564141 17538 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0819 15:11:56.714133 17538 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0819 15:13:18.995618 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0819 15:13:18.995990 17538 solver.cpp:404]     Test net output #1: loss = 0.451705 (* 1 = 0.451705 loss)\nI0819 15:13:20.323252 17538 solver.cpp:228] Iteration 76200, loss = 0.000279037\nI0819 15:13:20.323293 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:13:20.323312 17538 solver.cpp:244]     Train net output #1: loss = 0.000278855 (* 1 = 0.000278855 loss)\nI0819 15:13:20.407892 17538 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0819 15:15:37.612474 17538 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0819 15:16:59.909417 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90376\nI0819 15:16:59.909786 17538 solver.cpp:404]     Test net output #1: loss = 0.451476 (* 1 = 0.451476 loss)\nI0819 15:17:01.237116 17538 solver.cpp:228] Iteration 76300, loss = 0.00028428\nI0819 15:17:01.237155 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:17:01.237170 17538 solver.cpp:244]     Train net output #1: loss = 0.000284099 (* 1 = 0.000284099 loss)\nI0819 15:17:01.319944 17538 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0819 15:19:18.558710 17538 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0819 15:20:40.848585 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0819 15:20:40.848958 17538 solver.cpp:404]     Test net output #1: loss = 0.451814 (* 1 = 0.451814 loss)\nI0819 15:20:42.176383 17538 solver.cpp:228] Iteration 76400, loss = 0.000313994\nI0819 15:20:42.176419 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:20:42.176435 17538 solver.cpp:244]     Train net output #1: loss = 0.000313812 (* 1 = 0.000313812 loss)\nI0819 15:20:42.261078 17538 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0819 15:22:59.587186 17538 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0819 15:24:21.898900 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90368\nI0819 15:24:21.899282 17538 solver.cpp:404]     Test net output #1: loss = 0.451611 (* 1 = 0.451611 loss)\nI0819 15:24:23.226635 17538 solver.cpp:228] Iteration 76500, loss = 0.000301361\nI0819 15:24:23.226670 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:24:23.226686 17538 solver.cpp:244]     Train net output #1: loss = 0.00030118 (* 1 = 0.00030118 loss)\nI0819 15:24:23.311430 17538 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0819 15:26:40.561331 17538 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0819 15:28:02.842485 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90364\nI0819 15:28:02.842864 17538 solver.cpp:404]     Test net output #1: loss = 0.451924 (* 1 = 0.451924 loss)\nI0819 15:28:04.169549 17538 solver.cpp:228] Iteration 76600, loss = 0.000265575\nI0819 15:28:04.169584 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:28:04.169600 17538 solver.cpp:244]     Train net output #1: loss = 0.000265393 (* 1 = 0.000265393 loss)\nI0819 15:28:04.254652 17538 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0819 15:30:21.485903 17538 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0819 15:31:43.762897 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0819 15:31:43.763278 17538 solver.cpp:404]     Test net output #1: loss = 0.451736 (* 1 = 0.451736 loss)\nI0819 15:31:45.090234 17538 solver.cpp:228] Iteration 76700, loss = 0.000295778\nI0819 15:31:45.090270 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:31:45.090284 17538 solver.cpp:244]     Train net output #1: loss = 0.000295596 (* 1 = 0.000295596 loss)\nI0819 15:31:45.174795 17538 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0819 15:34:02.445192 17538 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0819 15:35:24.738482 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90348\nI0819 15:35:24.738862 17538 solver.cpp:404]     Test net output #1: loss = 0.452068 (* 1 = 0.452068 loss)\nI0819 15:35:26.066603 17538 solver.cpp:228] Iteration 76800, loss = 0.000319435\nI0819 15:35:26.066640 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:35:26.066655 17538 solver.cpp:244]     Train net output #1: loss = 0.000319254 (* 1 = 0.000319254 loss)\nI0819 15:35:26.145386 17538 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0819 15:37:43.367941 17538 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0819 15:39:05.652716 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0819 15:39:05.653100 17538 solver.cpp:404]     Test net output #1: loss = 0.451829 (* 1 = 0.451829 loss)\nI0819 15:39:06.981745 17538 solver.cpp:228] Iteration 76900, loss = 0.000269865\nI0819 15:39:06.981781 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:39:06.981797 17538 solver.cpp:244]     Train net output #1: loss = 0.000269684 (* 1 = 0.000269684 loss)\nI0819 15:39:07.059214 17538 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0819 15:41:24.240408 17538 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0819 15:42:46.518657 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90348\nI0819 15:42:46.519027 17538 solver.cpp:404]     Test net output #1: loss = 0.45225 (* 1 = 0.45225 loss)\nI0819 15:42:47.847338 17538 solver.cpp:228] Iteration 77000, loss = 0.000279037\nI0819 15:42:47.847378 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:42:47.847393 17538 solver.cpp:244]     Train net output #1: loss = 0.000278856 (* 1 = 0.000278856 loss)\nI0819 15:42:47.928798 17538 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0819 15:45:05.222373 17538 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0819 15:46:27.476305 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9038\nI0819 15:46:27.476699 17538 solver.cpp:404]     Test net output #1: loss = 0.451925 (* 1 = 0.451925 loss)\nI0819 15:46:28.804126 17538 solver.cpp:228] Iteration 77100, loss = 0.000317283\nI0819 15:46:28.804162 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:46:28.804177 17538 solver.cpp:244]     Train net output #1: loss = 0.000317101 (* 1 = 0.000317101 loss)\nI0819 15:46:28.883569 17538 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0819 15:48:46.109658 17538 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0819 15:50:08.438730 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0819 15:50:08.439077 17538 solver.cpp:404]     Test net output #1: loss = 0.452256 (* 1 = 0.452256 loss)\nI0819 15:50:09.770004 17538 solver.cpp:228] Iteration 77200, loss = 0.000281078\nI0819 15:50:09.770045 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:50:09.770059 17538 solver.cpp:244]     Train net output #1: loss = 0.000280896 (* 1 = 0.000280896 loss)\nI0819 15:50:09.841753 17538 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0819 15:52:27.055794 17538 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0819 15:53:49.390861 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9038\nI0819 15:53:49.391240 17538 solver.cpp:404]     Test net output #1: loss = 0.452037 (* 1 = 0.452037 loss)\nI0819 15:53:50.718736 17538 solver.cpp:228] Iteration 77300, loss = 0.000287342\nI0819 15:53:50.718778 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:53:50.718793 17538 solver.cpp:244]     Train net output #1: loss = 0.00028716 (* 1 = 0.00028716 loss)\nI0819 15:53:50.803100 17538 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0819 15:56:08.066794 17538 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0819 15:57:30.414289 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0819 15:57:30.414656 17538 solver.cpp:404]     Test net output #1: loss = 0.452381 (* 1 = 0.452381 loss)\nI0819 15:57:31.742820 17538 solver.cpp:228] Iteration 77400, loss = 0.000280309\nI0819 15:57:31.742861 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:57:31.742875 17538 solver.cpp:244]     Train net output #1: loss = 0.000280127 (* 1 = 0.000280127 loss)\nI0819 15:57:31.820346 17538 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0819 15:59:49.064429 17538 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0819 16:01:11.366142 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90372\nI0819 16:01:11.366528 17538 solver.cpp:404]     Test net output #1: loss = 0.452199 (* 1 = 0.452199 loss)\nI0819 16:01:12.694764 17538 solver.cpp:228] Iteration 77500, loss = 0.00028435\nI0819 16:01:12.694803 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:01:12.694818 17538 solver.cpp:244]     Train net output #1: loss = 0.000284168 (* 1 = 0.000284168 loss)\nI0819 16:01:12.780120 17538 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0819 16:03:30.066514 17538 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0819 16:04:52.356267 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9034\nI0819 16:04:52.356631 17538 solver.cpp:404]     Test net output #1: loss = 0.452635 (* 1 = 0.452635 loss)\nI0819 16:04:53.684239 17538 solver.cpp:228] Iteration 77600, loss = 0.000268011\nI0819 16:04:53.684278 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:04:53.684296 17538 solver.cpp:244]     Train net output #1: loss = 0.000267829 (* 1 = 0.000267829 loss)\nI0819 16:04:53.764928 17538 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0819 16:07:10.992807 17538 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0819 16:08:33.259665 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0819 16:08:33.260052 17538 solver.cpp:404]     Test net output #1: loss = 0.452365 (* 1 = 0.452365 loss)\nI0819 16:08:34.587002 17538 solver.cpp:228] Iteration 77700, loss = 0.000277073\nI0819 16:08:34.587038 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:08:34.587054 17538 solver.cpp:244]     Train net output #1: loss = 0.000276891 (* 1 = 0.000276891 loss)\nI0819 16:08:34.673111 17538 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0819 16:10:51.873277 17538 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0819 16:12:14.159592 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90328\nI0819 16:12:14.159940 17538 solver.cpp:404]     Test net output #1: loss = 0.45273 (* 1 = 0.45273 loss)\nI0819 16:12:15.486709 17538 solver.cpp:228] Iteration 77800, loss = 0.000279006\nI0819 16:12:15.486747 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:12:15.486762 17538 solver.cpp:244]     Train net output #1: loss = 0.000278825 (* 1 = 0.000278825 loss)\nI0819 16:12:15.572849 17538 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0819 16:14:32.786475 17538 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0819 16:15:55.071590 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9034\nI0819 16:15:55.071967 17538 solver.cpp:404]     Test net output #1: loss = 0.452498 (* 1 = 0.452498 loss)\nI0819 16:15:56.398671 17538 solver.cpp:228] Iteration 77900, loss = 0.000307024\nI0819 16:15:56.398708 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:15:56.398725 17538 solver.cpp:244]     Train net output #1: loss = 0.000306842 (* 1 = 0.000306842 loss)\nI0819 16:15:56.482918 17538 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0819 16:18:13.648308 17538 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0819 16:19:35.932620 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90316\nI0819 16:19:35.933096 17538 solver.cpp:404]     Test net output #1: loss = 0.452856 (* 1 = 0.452856 loss)\nI0819 16:19:37.260144 17538 solver.cpp:228] Iteration 78000, loss = 0.000260724\nI0819 16:19:37.260182 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:19:37.260197 17538 solver.cpp:244]     Train net output #1: loss = 0.000260542 (* 1 = 0.000260542 loss)\nI0819 16:19:37.337960 17538 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0819 16:21:54.517437 17538 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0819 16:23:16.796254 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0819 16:23:16.796635 17538 solver.cpp:404]     Test net output #1: loss = 0.452677 (* 1 = 0.452677 loss)\nI0819 16:23:18.123958 17538 solver.cpp:228] Iteration 78100, loss = 0.000283889\nI0819 16:23:18.123996 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:23:18.124011 17538 solver.cpp:244]     Train net output #1: loss = 0.000283708 (* 1 = 0.000283708 loss)\nI0819 16:23:18.203821 17538 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0819 16:25:35.438731 17538 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0819 16:26:57.714200 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90328\nI0819 16:26:57.714566 17538 solver.cpp:404]     Test net output #1: loss = 0.453068 (* 1 = 0.453068 loss)\nI0819 16:26:59.041508 17538 solver.cpp:228] Iteration 78200, loss = 0.000280188\nI0819 16:26:59.041541 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:26:59.041558 17538 solver.cpp:244]     Train net output #1: loss = 0.000280006 (* 1 = 0.000280006 loss)\nI0819 16:26:59.121193 17538 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0819 16:29:16.351198 17538 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0819 16:30:38.623505 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0819 16:30:38.623860 17538 solver.cpp:404]     Test net output #1: loss = 0.452786 (* 1 = 0.452786 loss)\nI0819 16:30:39.950438 17538 solver.cpp:228] Iteration 78300, loss = 0.000290378\nI0819 16:30:39.950475 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:30:39.950490 17538 solver.cpp:244]     Train net output #1: loss = 0.000290196 (* 1 = 0.000290196 loss)\nI0819 16:30:40.033614 17538 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0819 16:32:57.341812 17538 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0819 16:34:19.606163 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90328\nI0819 16:34:19.606550 17538 solver.cpp:404]     Test net output #1: loss = 0.453141 (* 1 = 0.453141 loss)\nI0819 16:34:20.933265 17538 solver.cpp:228] Iteration 78400, loss = 0.000295837\nI0819 16:34:20.933312 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:34:20.933329 17538 solver.cpp:244]     Train net output #1: loss = 0.000295655 (* 1 = 0.000295655 loss)\nI0819 16:34:21.012204 17538 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0819 16:36:38.335019 17538 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0819 16:38:00.638614 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0819 16:38:00.638978 17538 solver.cpp:404]     Test net output #1: loss = 0.452897 (* 1 = 0.452897 loss)\nI0819 16:38:01.965858 17538 solver.cpp:228] Iteration 78500, loss = 0.000281393\nI0819 16:38:01.965895 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:38:01.965910 17538 solver.cpp:244]     Train net output #1: loss = 0.000281211 (* 1 = 0.000281211 loss)\nI0819 16:38:02.045445 17538 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0819 16:40:19.299322 17538 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0819 16:41:41.589354 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9032\nI0819 16:41:41.589745 17538 solver.cpp:404]     Test net output #1: loss = 0.453336 (* 1 = 0.453336 loss)\nI0819 16:41:42.917125 17538 solver.cpp:228] Iteration 78600, loss = 0.000290478\nI0819 16:41:42.917160 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:41:42.917176 17538 solver.cpp:244]     Train net output #1: loss = 0.000290297 (* 1 = 0.000290297 loss)\nI0819 16:41:42.995184 17538 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0819 16:44:00.316584 17538 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0819 16:45:22.608713 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90344\nI0819 16:45:22.609097 17538 solver.cpp:404]     Test net output #1: loss = 0.453048 (* 1 = 0.453048 loss)\nI0819 16:45:23.936235 17538 solver.cpp:228] Iteration 78700, loss = 0.000275313\nI0819 16:45:23.936271 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:45:23.936286 17538 solver.cpp:244]     Train net output #1: loss = 0.000275132 (* 1 = 0.000275132 loss)\nI0819 16:45:24.017827 17538 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0819 16:47:41.305430 17538 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0819 16:49:03.394870 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90304\nI0819 16:49:03.395242 17538 solver.cpp:404]     Test net output #1: loss = 0.453478 (* 1 = 0.453478 loss)\nI0819 16:49:04.724618 17538 solver.cpp:228] Iteration 78800, loss = 0.000271027\nI0819 16:49:04.724661 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:49:04.724678 17538 solver.cpp:244]     Train net output #1: loss = 0.000270845 (* 1 = 0.000270845 loss)\nI0819 16:49:04.801386 17538 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0819 16:51:22.077858 17538 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0819 16:52:43.917579 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90336\nI0819 16:52:43.917917 17538 solver.cpp:404]     Test net output #1: loss = 0.453129 (* 1 = 0.453129 loss)\nI0819 16:52:45.245976 17538 solver.cpp:228] Iteration 78900, loss = 0.000296401\nI0819 16:52:45.246016 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:52:45.246032 17538 solver.cpp:244]     Train net output #1: loss = 0.000296219 (* 1 = 0.000296219 loss)\nI0819 16:52:45.328604 17538 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0819 16:55:02.562458 17538 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0819 16:56:24.395905 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90292\nI0819 16:56:24.396229 17538 solver.cpp:404]     Test net output #1: loss = 0.453507 (* 1 = 0.453507 loss)\nI0819 16:56:25.724380 17538 solver.cpp:228] Iteration 79000, loss = 0.000270353\nI0819 16:56:25.724421 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 16:56:25.724436 17538 solver.cpp:244]     Train net output #1: loss = 0.000270171 (* 1 = 0.000270171 loss)\nI0819 16:56:25.808701 17538 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0819 16:58:43.055711 17538 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0819 17:00:04.818840 17538 solver.cpp:404]     Test net output #0: accuracy = 0.9034\nI0819 17:00:04.819173 17538 solver.cpp:404]     Test net output #1: loss = 0.453171 (* 1 = 0.453171 loss)\nI0819 17:00:06.147066 17538 solver.cpp:228] Iteration 79100, loss = 0.000274134\nI0819 17:00:06.147107 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:00:06.147122 17538 solver.cpp:244]     Train net output #1: loss = 0.000273952 (* 1 = 0.000273952 loss)\nI0819 17:00:06.228340 17538 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0819 17:02:23.480392 17538 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0819 17:03:45.770241 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90336\nI0819 17:03:45.770607 17538 solver.cpp:404]     Test net output #1: loss = 0.453529 (* 1 = 0.453529 loss)\nI0819 17:03:47.098654 17538 solver.cpp:228] Iteration 79200, loss = 0.000282543\nI0819 17:03:47.098693 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:03:47.098711 17538 solver.cpp:244]     Train net output #1: loss = 0.000282361 (* 1 = 0.000282361 loss)\nI0819 17:03:47.182680 17538 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0819 17:06:04.408877 17538 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0819 17:07:26.697502 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90364\nI0819 17:07:26.697899 17538 solver.cpp:404]     Test net output #1: loss = 0.453307 (* 1 = 0.453307 loss)\nI0819 17:07:28.026306 17538 solver.cpp:228] Iteration 79300, loss = 0.000250544\nI0819 17:07:28.026347 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:07:28.026362 17538 solver.cpp:244]     Train net output #1: loss = 0.000250362 (* 1 = 0.000250362 loss)\nI0819 17:07:28.108372 17538 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0819 17:09:45.338914 17538 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0819 17:11:07.681078 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90312\nI0819 17:11:07.681442 17538 solver.cpp:404]     Test net output #1: loss = 0.453766 (* 1 = 0.453766 loss)\nI0819 17:11:09.009732 17538 solver.cpp:228] Iteration 79400, loss = 0.000295651\nI0819 17:11:09.009773 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:11:09.009788 17538 solver.cpp:244]     Train net output #1: loss = 0.000295469 (* 1 = 0.000295469 loss)\nI0819 17:11:09.086678 17538 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0819 17:13:26.385761 17538 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0819 17:14:48.698684 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90368\nI0819 17:14:48.699080 17538 solver.cpp:404]     Test net output #1: loss = 0.453433 (* 1 = 0.453433 loss)\nI0819 17:14:50.026412 17538 solver.cpp:228] Iteration 79500, loss = 0.000270583\nI0819 17:14:50.026453 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:14:50.026468 17538 solver.cpp:244]     Train net output #1: loss = 0.000270401 (* 1 = 0.000270401 loss)\nI0819 17:14:50.107910 17538 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0819 17:17:07.332835 17538 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0819 17:18:29.622221 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90328\nI0819 17:18:29.622586 17538 solver.cpp:404]     Test net output #1: loss = 0.453851 (* 1 = 0.453851 loss)\nI0819 17:18:30.949010 17538 solver.cpp:228] Iteration 79600, loss = 0.000284222\nI0819 17:18:30.949051 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:18:30.949066 17538 solver.cpp:244]     Train net output #1: loss = 0.000284041 (* 1 = 0.000284041 loss)\nI0819 17:18:31.031870 17538 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0819 17:20:48.363678 17538 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0819 17:22:10.639370 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0819 17:22:10.639732 17538 solver.cpp:404]     Test net output #1: loss = 0.453493 (* 1 = 0.453493 loss)\nI0819 17:22:11.966680 17538 solver.cpp:228] Iteration 79700, loss = 0.000283884\nI0819 17:22:11.966718 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:22:11.966734 17538 solver.cpp:244]     Train net output #1: loss = 0.000283702 (* 1 = 0.000283702 loss)\nI0819 17:22:12.058593 17538 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0819 17:24:29.457945 17538 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0819 17:25:51.755022 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90316\nI0819 17:25:51.755388 17538 solver.cpp:404]     Test net output #1: loss = 0.453833 (* 1 = 0.453833 loss)\nI0819 17:25:53.081881 17538 solver.cpp:228] Iteration 79800, loss = 0.000257664\nI0819 17:25:53.081919 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:25:53.081935 17538 solver.cpp:244]     Train net output #1: loss = 0.000257483 (* 1 = 0.000257483 loss)\nI0819 17:25:53.169633 17538 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0819 17:28:10.435101 17538 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0819 17:29:32.732906 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90368\nI0819 17:29:32.733276 17538 solver.cpp:404]     Test net output #1: loss = 0.453514 (* 1 = 0.453514 loss)\nI0819 17:29:34.060628 17538 solver.cpp:228] Iteration 79900, loss = 0.000292312\nI0819 17:29:34.060667 17538 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 17:29:34.060683 17538 solver.cpp:244]     Train net output #1: loss = 0.00029213 (* 1 = 0.00029213 loss)\nI0819 17:29:34.144675 17538 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0819 17:31:51.484453 17538 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35NestFig9_iter_80000.caffemodel\nI0819 17:31:51.750512 17538 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35NestFig9_iter_80000.solverstate\nI0819 17:31:52.199751 17538 solver.cpp:317] Iteration 80000, loss = 0.000255569\nI0819 17:31:52.199801 17538 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0819 17:33:14.500488 17538 solver.cpp:404]     Test net output #0: accuracy = 0.90332\nI0819 17:33:14.500843 17538 solver.cpp:404]     Test net output #1: loss = 0.453885 (* 1 = 0.453885 loss)\nI0819 17:33:14.500855 17538 solver.cpp:322] Optimization Done.\nI0819 17:33:19.880928 17538 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35Res110Fig6b",
    "content": "I0821 08:59:08.195036 32502 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 08:59:08.197556 32502 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 08:59:08.198750 32502 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 08:59:08.199939 32502 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 08:59:08.201122 32502 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 08:59:08.202322 32502 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 08:59:08.203521 32502 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 08:59:08.204723 32502 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 08:59:08.205955 32502 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 08:59:08.624310 32502 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35Res110Fig6b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\nI0821 08:59:08.629238 32502 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 08:59:08.654345 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:08.654451 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:08.656095 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 08:59:08.656157 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 08:59:08.656177 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:59:08.656196 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:59:08.656215 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:59:08.656234 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:59:08.656252 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:59:08.656270 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:59:08.656289 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:59:08.656306 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:59:08.656327 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:59:08.656342 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:59:08.656361 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:59:08.656380 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:59:08.656399 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:59:08.656417 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:59:08.656435 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:59:08.656453 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:59:08.656473 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:59:08.656491 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:59:08.656524 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b10_cbr1_bn\nI0821 08:59:08.656543 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b10_cbr2_bn\nI0821 08:59:08.656560 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b11_cbr1_bn\nI0821 08:59:08.656579 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b11_cbr2_bn\nI0821 08:59:08.656597 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b12_cbr1_bn\nI0821 08:59:08.656615 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b12_cbr2_bn\nI0821 08:59:08.656633 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b13_cbr1_bn\nI0821 08:59:08.656649 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b13_cbr2_bn\nI0821 08:59:08.656667 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b14_cbr1_bn\nI0821 08:59:08.656685 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b14_cbr2_bn\nI0821 08:59:08.656705 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b15_cbr1_bn\nI0821 08:59:08.656721 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b15_cbr2_bn\nI0821 08:59:08.656741 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b16_cbr1_bn\nI0821 08:59:08.656757 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b16_cbr2_bn\nI0821 08:59:08.656776 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b17_cbr1_bn\nI0821 08:59:08.656795 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b17_cbr2_bn\nI0821 08:59:08.656814 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b18_cbr1_bn\nI0821 08:59:08.656832 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b18_cbr2_bn\nI0821 08:59:08.656850 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:59:08.656867 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:59:08.656893 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:59:08.656908 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:59:08.656926 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:59:08.656944 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:59:08.656963 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:59:08.656982 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:59:08.657001 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:59:08.657024 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:59:08.657044 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:59:08.657061 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:59:08.657078 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:59:08.657105 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:59:08.657126 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:59:08.657145 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:59:08.657165 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:59:08.657179 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:59:08.657198 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b10_cbr1_bn\nI0821 08:59:08.657217 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b10_cbr2_bn\nI0821 08:59:08.657236 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b11_cbr1_bn\nI0821 08:59:08.657253 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b11_cbr2_bn\nI0821 08:59:08.657271 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b12_cbr1_bn\nI0821 08:59:08.657287 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b12_cbr2_bn\nI0821 08:59:08.657306 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b13_cbr1_bn\nI0821 08:59:08.657325 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b13_cbr2_bn\nI0821 08:59:08.657343 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b14_cbr1_bn\nI0821 08:59:08.657361 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b14_cbr2_bn\nI0821 08:59:08.657377 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b15_cbr1_bn\nI0821 08:59:08.657397 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b15_cbr2_bn\nI0821 08:59:08.657415 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b16_cbr1_bn\nI0821 08:59:08.657433 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b16_cbr2_bn\nI0821 08:59:08.657452 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b17_cbr1_bn\nI0821 08:59:08.657469 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b17_cbr2_bn\nI0821 08:59:08.657485 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b18_cbr1_bn\nI0821 08:59:08.657503 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b18_cbr2_bn\nI0821 08:59:08.657523 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:59:08.657541 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:59:08.657565 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:59:08.657585 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:59:08.657604 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:59:08.657621 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:59:08.657639 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:59:08.657670 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:59:08.657690 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:59:08.657707 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:59:08.657727 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:59:08.657743 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:59:08.657760 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:59:08.657779 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:59:08.657799 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:59:08.657817 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:59:08.657836 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:59:08.657851 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:59:08.657871 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b10_cbr1_bn\nI0821 08:59:08.657887 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b10_cbr2_bn\nI0821 08:59:08.657905 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b11_cbr1_bn\nI0821 08:59:08.657923 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b11_cbr2_bn\nI0821 08:59:08.657941 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b12_cbr1_bn\nI0821 08:59:08.657959 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b12_cbr2_bn\nI0821 08:59:08.657977 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b13_cbr1_bn\nI0821 08:59:08.657994 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b13_cbr2_bn\nI0821 08:59:08.658012 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b14_cbr1_bn\nI0821 08:59:08.658051 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b14_cbr2_bn\nI0821 08:59:08.658069 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b15_cbr1_bn\nI0821 08:59:08.658087 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b15_cbr2_bn\nI0821 08:59:08.658104 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b16_cbr1_bn\nI0821 08:59:08.658123 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b16_cbr2_bn\nI0821 08:59:08.658141 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b17_cbr1_bn\nI0821 08:59:08.658159 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b17_cbr2_bn\nI0821 08:59:08.658179 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b18_cbr1_bn\nI0821 08:59:08.658195 32502 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b18_cbr2_bn\nI0821 08:59:08.661522 32502 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr2_conv_top\"\n  param {\n   \nI0821 08:59:08.665316 32502 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:59:08.667717 32502 net.cpp:100] Creating Layer dataLayer\nI0821 08:59:08.667793 32502 net.cpp:408] dataLayer -> data_top\nI0821 08:59:08.668005 32502 net.cpp:408] dataLayer -> label\nI0821 08:59:08.668138 32502 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:59:08.678056 32507 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0821 08:59:08.724216 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:08.730196 32502 net.cpp:150] Setting up dataLayer\nI0821 08:59:08.730265 32502 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI0821 08:59:08.730279 32502 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:08.730284 32502 net.cpp:165] Memory required for data: 1229200\nI0821 08:59:08.730301 32502 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:59:08.730319 32502 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:59:08.730326 32502 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:59:08.730345 32502 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:59:08.730361 32502 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:59:08.730433 32502 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:59:08.730446 32502 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:08.730453 32502 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:08.730458 32502 net.cpp:165] Memory required for data: 1230000\nI0821 08:59:08.730463 32502 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:59:08.730537 32502 net.cpp:100] Creating Layer pre_conv\nI0821 08:59:08.730551 32502 net.cpp:434] pre_conv <- data_top\nI0821 08:59:08.730563 32502 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:59:08.732444 32502 net.cpp:150] Setting up pre_conv\nI0821 08:59:08.732463 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.732470 32502 net.cpp:165] Memory required for data: 7783600\nI0821 08:59:08.732529 32502 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:59:08.732605 32502 net.cpp:100] Creating Layer pre_bn\nI0821 08:59:08.732619 32502 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:59:08.732627 32502 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:59:08.732753 32509 blocking_queue.cpp:50] Waiting for data\nI0821 08:59:08.732972 32502 net.cpp:150] Setting up pre_bn\nI0821 08:59:08.732992 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.732998 32502 net.cpp:165] Memory required for data: 14337200\nI0821 08:59:08.733016 32502 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:59:08.733067 32502 net.cpp:100] Creating Layer pre_scale\nI0821 08:59:08.733078 32502 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:59:08.733086 32502 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:59:08.733265 32502 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:59:08.735188 32502 net.cpp:150] Setting up pre_scale\nI0821 08:59:08.735205 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.735210 32502 net.cpp:165] Memory required for data: 20890800\nI0821 08:59:08.735222 32502 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:59:08.735271 32502 net.cpp:100] Creating Layer pre_relu\nI0821 08:59:08.735283 32502 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:59:08.735296 32502 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:59:08.735306 32502 net.cpp:150] Setting up pre_relu\nI0821 08:59:08.735314 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.735319 32502 net.cpp:165] Memory required for data: 27444400\nI0821 08:59:08.735324 32502 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:59:08.735332 32502 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:59:08.735337 32502 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:59:08.735347 32502 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:59:08.735357 32502 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:59:08.735401 32502 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:59:08.735412 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.735419 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.735424 32502 net.cpp:165] Memory required for data: 40551600\nI0821 08:59:08.735430 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:59:08.735443 32502 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:59:08.735450 32502 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:59:08.735458 32502 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:59:08.735779 32502 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:59:08.735793 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.735800 32502 net.cpp:165] Memory required for data: 47105200\nI0821 08:59:08.735815 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:59:08.735828 32502 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:59:08.735834 32502 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:59:08.735843 32502 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:59:08.736065 32502 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:59:08.736078 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.736083 32502 net.cpp:165] Memory required for data: 53658800\nI0821 08:59:08.736094 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:59:08.736104 32502 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:59:08.736109 32502 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:59:08.736120 32502 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:59:08.736171 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:59:08.736306 32502 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:59:08.736320 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.736325 32502 net.cpp:165] Memory required for data: 60212400\nI0821 08:59:08.736335 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:59:08.736342 32502 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:59:08.736348 32502 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:59:08.736356 32502 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:59:08.736367 32502 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:59:08.736374 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.736379 32502 net.cpp:165] Memory required for data: 66766000\nI0821 08:59:08.736384 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:59:08.736395 32502 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:59:08.736403 32502 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:59:08.736412 32502 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:59:08.736891 32502 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:59:08.736914 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.736920 32502 net.cpp:165] Memory required for data: 73319600\nI0821 08:59:08.736930 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:59:08.736943 32502 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:59:08.736948 32502 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:59:08.736959 32502 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:59:08.737186 32502 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:59:08.737200 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.737205 32502 net.cpp:165] Memory required for data: 79873200\nI0821 08:59:08.737220 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:59:08.737231 32502 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:59:08.737237 32502 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:59:08.737246 32502 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:59:08.737301 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:59:08.737435 32502 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:59:08.737448 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.737453 32502 net.cpp:165] Memory required for data: 86426800\nI0821 08:59:08.737462 32502 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:59:08.737519 32502 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:59:08.737531 32502 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:59:08.737540 32502 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:59:08.737550 32502 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:59:08.737627 32502 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:59:08.737645 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.737650 32502 net.cpp:165] Memory required for data: 92980400\nI0821 08:59:08.737656 32502 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:59:08.737665 32502 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:59:08.737670 32502 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:59:08.737679 32502 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:59:08.737689 32502 net.cpp:150] Setting up L1_b1_relu\nI0821 08:59:08.737695 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.737700 32502 net.cpp:165] Memory required for data: 99534000\nI0821 08:59:08.737704 32502 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:08.737716 32502 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:08.737722 32502 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:59:08.737730 32502 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:59:08.737740 32502 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:59:08.737789 32502 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:08.737802 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.737808 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.737813 32502 net.cpp:165] Memory required for data: 112641200\nI0821 08:59:08.737818 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:59:08.737830 32502 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:59:08.737836 32502 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:59:08.737848 32502 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:59:08.738142 32502 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:59:08.738157 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.738162 32502 net.cpp:165] Memory required for data: 119194800\nI0821 08:59:08.738170 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:59:08.738180 32502 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:59:08.738186 32502 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:59:08.738209 32502 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:59:08.738445 32502 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:59:08.738461 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.738466 32502 net.cpp:165] Memory required for data: 125748400\nI0821 08:59:08.738477 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:59:08.738487 32502 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:59:08.738492 32502 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:59:08.738499 32502 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:59:08.738551 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:59:08.738687 32502 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:59:08.738699 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.738704 32502 net.cpp:165] Memory required for data: 132302000\nI0821 08:59:08.738714 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:59:08.738725 32502 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:59:08.738730 32502 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:59:08.738739 32502 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:59:08.738754 32502 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:59:08.738764 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.738768 32502 net.cpp:165] Memory required for data: 138855600\nI0821 08:59:08.738772 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:59:08.738786 32502 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:59:08.738792 32502 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:59:08.738802 32502 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:59:08.739094 32502 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:59:08.739109 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.739114 32502 net.cpp:165] Memory required for data: 145409200\nI0821 08:59:08.739122 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:59:08.739131 32502 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:59:08.739136 32502 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:59:08.739147 32502 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:59:08.739377 32502 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:59:08.739390 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.739395 32502 net.cpp:165] Memory required for data: 151962800\nI0821 08:59:08.739413 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:59:08.739423 32502 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:59:08.739429 32502 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:59:08.739436 32502 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:59:08.739491 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:59:08.739630 32502 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:59:08.739644 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.739648 32502 net.cpp:165] Memory required for data: 158516400\nI0821 08:59:08.739657 32502 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:59:08.739670 32502 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:59:08.739675 32502 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:59:08.739682 32502 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:59:08.739691 32502 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:59:08.739723 32502 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:59:08.739732 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.739737 32502 net.cpp:165] Memory required for data: 165070000\nI0821 08:59:08.739748 32502 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:59:08.739758 32502 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:59:08.739763 32502 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:59:08.739771 32502 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:59:08.739789 32502 net.cpp:150] Setting up L1_b2_relu\nI0821 08:59:08.739795 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.739800 32502 net.cpp:165] Memory required for data: 171623600\nI0821 08:59:08.739805 32502 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:08.739812 32502 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:08.739817 32502 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:59:08.739827 32502 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:59:08.739837 32502 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:59:08.739878 32502 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:08.739889 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.739897 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.739900 32502 net.cpp:165] Memory required for data: 184730800\nI0821 08:59:08.739905 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:59:08.739919 32502 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:59:08.739925 32502 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:59:08.739934 32502 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:59:08.740233 32502 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:59:08.740247 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.740252 32502 net.cpp:165] Memory required for data: 191284400\nI0821 08:59:08.740262 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:59:08.740272 32502 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:59:08.740278 32502 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:59:08.740289 32502 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:59:08.740514 32502 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:59:08.740527 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.740532 32502 net.cpp:165] Memory required for data: 197838000\nI0821 08:59:08.740542 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:59:08.740551 32502 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:59:08.740557 32502 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:59:08.740564 32502 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:59:08.740618 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:59:08.740761 32502 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:59:08.740774 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.740779 32502 net.cpp:165] Memory required for data: 204391600\nI0821 08:59:08.740788 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:59:08.740799 32502 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:59:08.740805 32502 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:59:08.740813 32502 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:59:08.740823 32502 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:59:08.740829 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.740833 32502 net.cpp:165] Memory required for data: 210945200\nI0821 08:59:08.740839 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:59:08.740851 32502 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:59:08.740857 32502 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:59:08.740869 32502 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:59:08.741165 32502 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:59:08.741179 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.741184 32502 net.cpp:165] Memory required for data: 217498800\nI0821 08:59:08.741194 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:59:08.741210 32502 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:59:08.741224 32502 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:59:08.741235 32502 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:59:08.741462 32502 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:59:08.741477 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.741482 32502 net.cpp:165] Memory required for data: 224052400\nI0821 08:59:08.741492 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:59:08.741500 32502 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:59:08.741506 32502 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:59:08.741513 32502 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:59:08.741569 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:59:08.741700 32502 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:59:08.741714 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.741719 32502 net.cpp:165] Memory required for data: 230606000\nI0821 08:59:08.741727 32502 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:59:08.741739 32502 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:59:08.741758 32502 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:59:08.741766 32502 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:59:08.741775 32502 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:59:08.741807 32502 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:59:08.741819 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.741824 32502 net.cpp:165] Memory required for data: 237159600\nI0821 08:59:08.741829 32502 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:59:08.741837 32502 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:59:08.741842 32502 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:59:08.741852 32502 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:59:08.741861 32502 net.cpp:150] Setting up L1_b3_relu\nI0821 08:59:08.741868 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.741873 32502 net.cpp:165] Memory required for data: 243713200\nI0821 08:59:08.741878 32502 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:08.741885 32502 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:08.741890 32502 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:59:08.741900 32502 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:59:08.741910 32502 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:59:08.741951 32502 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:08.741962 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.741969 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.741974 32502 net.cpp:165] Memory required for data: 256820400\nI0821 08:59:08.741978 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:59:08.741993 32502 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:59:08.741999 32502 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:59:08.742008 32502 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:59:08.742306 32502 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:59:08.742321 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.742326 32502 net.cpp:165] Memory required for data: 263374000\nI0821 08:59:08.742334 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:59:08.742347 32502 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:59:08.742352 32502 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:59:08.742363 32502 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:59:08.742594 32502 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:59:08.742607 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.742612 32502 net.cpp:165] Memory required for data: 269927600\nI0821 08:59:08.742630 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:59:08.742640 32502 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:59:08.742645 32502 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:59:08.742655 32502 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:59:08.742709 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:59:08.742852 32502 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:59:08.742869 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.742874 32502 net.cpp:165] Memory required for data: 276481200\nI0821 08:59:08.742883 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:59:08.742892 32502 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:59:08.742897 32502 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:59:08.742904 32502 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:59:08.742913 32502 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:59:08.742920 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.742924 32502 net.cpp:165] Memory required for data: 283034800\nI0821 08:59:08.742929 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:59:08.742943 32502 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:59:08.742949 32502 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:59:08.742959 32502 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:59:08.743265 32502 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:59:08.743279 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.743285 32502 net.cpp:165] Memory required for data: 289588400\nI0821 08:59:08.743294 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:59:08.743305 32502 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:59:08.743312 32502 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:59:08.743322 32502 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:59:08.743556 32502 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:59:08.743569 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.743574 32502 net.cpp:165] Memory required for data: 296142000\nI0821 08:59:08.743584 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:59:08.743593 32502 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:59:08.743599 32502 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:59:08.743609 32502 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:59:08.743661 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:59:08.743800 32502 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:59:08.743816 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.743821 32502 net.cpp:165] Memory required for data: 302695600\nI0821 08:59:08.743831 32502 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:59:08.743840 32502 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:59:08.743846 32502 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:59:08.743852 32502 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:59:08.743863 32502 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:59:08.743893 32502 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:59:08.743903 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.743907 32502 net.cpp:165] Memory required for data: 309249200\nI0821 08:59:08.743912 32502 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:59:08.743927 32502 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:59:08.743932 32502 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:59:08.743939 32502 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:59:08.743948 32502 net.cpp:150] Setting up L1_b4_relu\nI0821 08:59:08.743955 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.743960 32502 net.cpp:165] Memory required for data: 315802800\nI0821 08:59:08.743964 32502 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:08.743983 32502 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:08.743988 32502 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:59:08.743996 32502 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:59:08.744006 32502 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:59:08.744047 32502 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:08.744061 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.744068 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.744073 32502 net.cpp:165] Memory required for data: 328910000\nI0821 08:59:08.744078 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:59:08.744089 32502 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:59:08.744096 32502 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:59:08.744104 32502 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:59:08.744406 32502 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:59:08.744421 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.744426 32502 net.cpp:165] Memory required for data: 335463600\nI0821 08:59:08.744446 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:59:08.744459 32502 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:59:08.744465 32502 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:59:08.744473 32502 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:59:08.744706 32502 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:59:08.744719 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.744724 32502 net.cpp:165] Memory required for data: 342017200\nI0821 08:59:08.744735 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:59:08.744748 32502 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:59:08.744755 32502 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:59:08.744768 32502 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:59:08.744822 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:59:08.744957 32502 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:59:08.744972 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.744977 32502 net.cpp:165] Memory required for data: 348570800\nI0821 08:59:08.744987 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:59:08.744995 32502 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:59:08.745000 32502 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:59:08.745008 32502 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:59:08.745018 32502 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:59:08.745024 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.745028 32502 net.cpp:165] Memory required for data: 355124400\nI0821 08:59:08.745033 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:59:08.745048 32502 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:59:08.745052 32502 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:59:08.745064 32502 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:59:08.745368 32502 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:59:08.745383 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.745388 32502 net.cpp:165] Memory required for data: 361678000\nI0821 08:59:08.745395 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:59:08.745409 32502 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:59:08.745415 32502 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:59:08.745429 32502 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:59:08.745654 32502 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:59:08.745667 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.745672 32502 net.cpp:165] Memory required for data: 368231600\nI0821 08:59:08.745689 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:59:08.745699 32502 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:59:08.745705 32502 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:59:08.745712 32502 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:59:08.745774 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:59:08.745908 32502 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:59:08.745921 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.745926 32502 net.cpp:165] Memory required for data: 374785200\nI0821 08:59:08.745935 32502 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:59:08.745949 32502 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:59:08.745955 32502 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:59:08.745962 32502 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:59:08.745970 32502 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:59:08.746002 32502 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:59:08.746012 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.746017 32502 net.cpp:165] Memory required for data: 381338800\nI0821 08:59:08.746022 32502 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:59:08.746031 32502 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:59:08.746035 32502 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:59:08.746045 32502 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:59:08.746054 32502 net.cpp:150] Setting up L1_b5_relu\nI0821 08:59:08.746062 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.746067 32502 net.cpp:165] Memory required for data: 387892400\nI0821 08:59:08.746070 32502 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:08.746078 32502 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:08.746083 32502 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:59:08.746093 32502 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:59:08.746101 32502 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:59:08.746142 32502 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:08.746155 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.746160 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.746165 32502 net.cpp:165] Memory required for data: 400999600\nI0821 08:59:08.746170 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:59:08.746183 32502 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:59:08.746191 32502 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:59:08.746198 32502 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:59:08.746502 32502 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:59:08.746516 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.746521 32502 net.cpp:165] Memory required for data: 407553200\nI0821 08:59:08.746531 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:59:08.746541 32502 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:59:08.746548 32502 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:59:08.746558 32502 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:59:08.746795 32502 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:59:08.746809 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.746814 32502 net.cpp:165] Memory required for data: 414106800\nI0821 08:59:08.746825 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:59:08.746834 32502 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:59:08.746840 32502 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:59:08.746853 32502 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:59:08.746914 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:59:08.747051 32502 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:59:08.747066 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.747071 32502 net.cpp:165] Memory required for data: 420660400\nI0821 08:59:08.747081 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:59:08.747088 32502 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:59:08.747094 32502 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:59:08.747102 32502 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:59:08.747112 32502 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:59:08.747118 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.747123 32502 net.cpp:165] Memory required for data: 427214000\nI0821 08:59:08.747128 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:59:08.747141 32502 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:59:08.747148 32502 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:59:08.747159 32502 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:59:08.747467 32502 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:59:08.747481 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.747486 32502 net.cpp:165] Memory required for data: 433767600\nI0821 08:59:08.747495 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:59:08.747508 32502 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:59:08.747514 32502 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:59:08.747524 32502 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:59:08.747762 32502 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:59:08.747776 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.747781 32502 net.cpp:165] Memory required for data: 440321200\nI0821 08:59:08.747792 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:59:08.747800 32502 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:59:08.747807 32502 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:59:08.747813 32502 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:59:08.747869 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:59:08.748008 32502 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:59:08.748021 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.748026 32502 net.cpp:165] Memory required for data: 446874800\nI0821 08:59:08.748035 32502 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:59:08.748051 32502 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:59:08.748057 32502 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:59:08.748065 32502 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:59:08.748076 32502 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:59:08.748107 32502 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:59:08.748122 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.748127 32502 net.cpp:165] Memory required for data: 453428400\nI0821 08:59:08.748132 32502 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:59:08.748141 32502 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:59:08.748147 32502 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:59:08.748153 32502 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:59:08.748162 32502 net.cpp:150] Setting up L1_b6_relu\nI0821 08:59:08.748169 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.748173 32502 net.cpp:165] Memory required for data: 459982000\nI0821 08:59:08.748178 32502 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:08.748191 32502 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:08.748196 32502 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:59:08.748204 32502 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:59:08.748222 32502 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:59:08.748267 32502 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:08.748281 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.748287 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.748291 32502 net.cpp:165] Memory required for data: 473089200\nI0821 08:59:08.748296 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:59:08.748308 32502 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:59:08.748313 32502 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:59:08.748325 32502 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:59:08.748634 32502 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:59:08.748648 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.748653 32502 net.cpp:165] Memory required for data: 479642800\nI0821 08:59:08.748662 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:59:08.748672 32502 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:59:08.748677 32502 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:59:08.748685 32502 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:59:08.748927 32502 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:59:08.748941 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.748945 32502 net.cpp:165] Memory required for data: 486196400\nI0821 08:59:08.748956 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:59:08.748968 32502 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:59:08.748975 32502 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:59:08.748982 32502 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:59:08.749037 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:59:08.749173 32502 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:59:08.749186 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.749191 32502 net.cpp:165] Memory required for data: 492750000\nI0821 08:59:08.749200 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:59:08.749208 32502 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:59:08.749214 32502 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:59:08.749224 32502 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:59:08.749234 32502 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:59:08.749241 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.749245 32502 net.cpp:165] Memory required for data: 499303600\nI0821 08:59:08.749250 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:59:08.749265 32502 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:59:08.749271 32502 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:59:08.749279 32502 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:59:08.749581 32502 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:59:08.749595 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.749600 32502 net.cpp:165] Memory required for data: 505857200\nI0821 08:59:08.749608 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:59:08.749620 32502 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:59:08.749626 32502 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:59:08.749634 32502 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:59:08.749881 32502 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:59:08.749896 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.749900 32502 net.cpp:165] Memory required for data: 512410800\nI0821 08:59:08.749912 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:59:08.749919 32502 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:59:08.749925 32502 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:59:08.749936 32502 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:59:08.749997 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:59:08.750138 32502 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:59:08.750151 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.750156 32502 net.cpp:165] Memory required for data: 518964400\nI0821 08:59:08.750165 32502 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:59:08.750174 32502 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:59:08.750180 32502 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:59:08.750187 32502 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:59:08.750198 32502 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:59:08.750229 32502 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:59:08.750241 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.750247 32502 net.cpp:165] Memory required for data: 525518000\nI0821 08:59:08.750252 32502 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:59:08.750259 32502 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:59:08.750264 32502 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:59:08.750272 32502 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:59:08.750280 32502 net.cpp:150] Setting up L1_b7_relu\nI0821 08:59:08.750288 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.750293 32502 net.cpp:165] Memory required for data: 532071600\nI0821 08:59:08.750298 32502 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:08.750308 32502 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:08.750313 32502 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:59:08.750319 32502 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:59:08.750329 32502 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:59:08.750373 32502 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:08.750385 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.750391 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.750396 32502 net.cpp:165] Memory required for data: 545178800\nI0821 08:59:08.750401 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:59:08.750411 32502 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:59:08.750417 32502 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:59:08.750429 32502 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:59:08.750741 32502 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:59:08.750761 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.750766 32502 net.cpp:165] Memory required for data: 551732400\nI0821 08:59:08.750774 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:59:08.750783 32502 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:59:08.750789 32502 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:59:08.750797 32502 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:59:08.751037 32502 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:59:08.751050 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.751055 32502 net.cpp:165] Memory required for data: 558286000\nI0821 08:59:08.751066 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:59:08.751077 32502 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:59:08.751083 32502 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:59:08.751091 32502 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:59:08.751152 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:59:08.751294 32502 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:59:08.751307 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.751312 32502 net.cpp:165] Memory required for data: 564839600\nI0821 08:59:08.751328 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:59:08.751336 32502 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:59:08.751343 32502 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:59:08.751353 32502 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:59:08.751363 32502 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:59:08.751369 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.751374 32502 net.cpp:165] Memory required for data: 571393200\nI0821 08:59:08.751379 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:59:08.751392 32502 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:59:08.751399 32502 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:59:08.751406 32502 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:59:08.751718 32502 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:59:08.751732 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.751737 32502 net.cpp:165] Memory required for data: 577946800\nI0821 08:59:08.751751 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:59:08.751765 32502 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:59:08.751771 32502 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:59:08.751780 32502 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:59:08.752019 32502 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:59:08.752032 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.752038 32502 net.cpp:165] Memory required for data: 584500400\nI0821 08:59:08.752048 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:59:08.752058 32502 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:59:08.752063 32502 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:59:08.752074 32502 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:59:08.752128 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:59:08.752265 32502 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:59:08.752280 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.752285 32502 net.cpp:165] Memory required for data: 591054000\nI0821 08:59:08.752295 32502 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:59:08.752303 32502 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:59:08.752310 32502 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:59:08.752316 32502 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:59:08.752327 32502 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:59:08.752358 32502 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:59:08.752368 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.752372 32502 net.cpp:165] Memory required for data: 597607600\nI0821 08:59:08.752377 32502 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:59:08.752388 32502 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:59:08.752394 32502 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:59:08.752401 32502 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:59:08.752410 32502 net.cpp:150] Setting up L1_b8_relu\nI0821 08:59:08.752418 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.752421 32502 net.cpp:165] Memory required for data: 604161200\nI0821 08:59:08.752426 32502 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:08.752436 32502 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:08.752441 32502 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:59:08.752449 32502 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:59:08.752459 32502 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:59:08.752501 32502 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:08.752512 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.752529 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.752534 32502 net.cpp:165] Memory required for data: 617268400\nI0821 08:59:08.752539 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:59:08.752550 32502 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:59:08.752557 32502 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:59:08.752565 32502 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:59:08.752895 32502 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:59:08.752910 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.752915 32502 net.cpp:165] Memory required for data: 623822000\nI0821 08:59:08.752923 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:59:08.752935 32502 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:59:08.752943 32502 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:59:08.752950 32502 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:59:08.753192 32502 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:59:08.753208 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.753213 32502 net.cpp:165] Memory required for data: 630375600\nI0821 08:59:08.753224 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:59:08.753233 32502 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:59:08.753238 32502 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:59:08.753247 32502 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:59:08.753299 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:59:08.753446 32502 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:59:08.753459 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.753464 32502 net.cpp:165] Memory required for data: 636929200\nI0821 08:59:08.753473 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:59:08.753484 32502 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:59:08.753490 32502 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:59:08.753500 32502 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:59:08.753510 32502 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:59:08.753517 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.753522 32502 net.cpp:165] Memory required for data: 643482800\nI0821 08:59:08.753526 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:59:08.753537 32502 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:59:08.753543 32502 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:59:08.753553 32502 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:59:08.753878 32502 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:59:08.753892 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.753897 32502 net.cpp:165] Memory required for data: 650036400\nI0821 08:59:08.753907 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:59:08.753916 32502 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:59:08.753922 32502 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:59:08.753933 32502 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:59:08.754174 32502 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:59:08.754187 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.754192 32502 net.cpp:165] Memory required for data: 656590000\nI0821 08:59:08.754221 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:59:08.754233 32502 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:59:08.754240 32502 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:59:08.754246 32502 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:59:08.754302 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:59:08.754441 32502 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:59:08.754454 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.754459 32502 net.cpp:165] Memory required for data: 663143600\nI0821 08:59:08.754475 32502 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:59:08.754485 32502 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:59:08.754492 32502 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:59:08.754498 32502 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:59:08.754508 32502 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:59:08.754540 32502 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:59:08.754554 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.754559 32502 net.cpp:165] Memory required for data: 669697200\nI0821 08:59:08.754565 32502 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:59:08.754572 32502 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:59:08.754578 32502 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:59:08.754585 32502 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:59:08.754595 32502 net.cpp:150] Setting up L1_b9_relu\nI0821 08:59:08.754601 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.754606 32502 net.cpp:165] Memory required for data: 676250800\nI0821 08:59:08.754611 32502 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:08.754621 32502 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:08.754626 32502 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:59:08.754634 32502 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:59:08.754647 32502 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:59:08.754693 32502 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:08.754705 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.754712 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.754716 32502 net.cpp:165] Memory required for data: 689358000\nI0821 08:59:08.754721 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_conv\nI0821 08:59:08.754732 32502 net.cpp:100] Creating Layer L1_b10_cbr1_conv\nI0821 08:59:08.754739 32502 net.cpp:434] L1_b10_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:59:08.754755 32502 net.cpp:408] L1_b10_cbr1_conv -> L1_b10_cbr1_conv_top\nI0821 08:59:08.755071 32502 net.cpp:150] Setting up L1_b10_cbr1_conv\nI0821 08:59:08.755085 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.755091 32502 net.cpp:165] Memory required for data: 695911600\nI0821 08:59:08.755100 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_bn\nI0821 08:59:08.755108 32502 net.cpp:100] Creating Layer L1_b10_cbr1_bn\nI0821 08:59:08.755115 32502 net.cpp:434] L1_b10_cbr1_bn <- L1_b10_cbr1_conv_top\nI0821 08:59:08.755122 32502 net.cpp:408] L1_b10_cbr1_bn -> L1_b10_cbr1_bn_top\nI0821 08:59:08.755362 32502 net.cpp:150] Setting up L1_b10_cbr1_bn\nI0821 08:59:08.755374 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.755379 32502 net.cpp:165] Memory required for data: 702465200\nI0821 08:59:08.755389 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0821 08:59:08.755401 32502 net.cpp:100] Creating Layer L1_b10_cbr1_scale\nI0821 08:59:08.755408 32502 net.cpp:434] L1_b10_cbr1_scale <- L1_b10_cbr1_bn_top\nI0821 08:59:08.755415 32502 net.cpp:395] L1_b10_cbr1_scale -> L1_b10_cbr1_bn_top (in-place)\nI0821 08:59:08.755476 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0821 08:59:08.755615 32502 net.cpp:150] Setting up L1_b10_cbr1_scale\nI0821 08:59:08.755628 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.755633 32502 net.cpp:165] Memory required for data: 709018800\nI0821 08:59:08.755642 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_relu\nI0821 08:59:08.755650 32502 net.cpp:100] Creating Layer L1_b10_cbr1_relu\nI0821 08:59:08.755656 32502 net.cpp:434] L1_b10_cbr1_relu <- L1_b10_cbr1_bn_top\nI0821 08:59:08.755667 32502 net.cpp:395] L1_b10_cbr1_relu -> L1_b10_cbr1_bn_top (in-place)\nI0821 08:59:08.755683 32502 net.cpp:150] Setting up L1_b10_cbr1_relu\nI0821 08:59:08.755690 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.755695 32502 net.cpp:165] Memory required for data: 715572400\nI0821 08:59:08.755700 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr2_conv\nI0821 08:59:08.755713 32502 net.cpp:100] Creating Layer L1_b10_cbr2_conv\nI0821 08:59:08.755719 32502 net.cpp:434] L1_b10_cbr2_conv <- L1_b10_cbr1_bn_top\nI0821 08:59:08.755728 32502 net.cpp:408] L1_b10_cbr2_conv -> L1_b10_cbr2_conv_top\nI0821 08:59:08.756043 32502 net.cpp:150] Setting up L1_b10_cbr2_conv\nI0821 08:59:08.756058 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.756063 32502 net.cpp:165] Memory required for data: 722126000\nI0821 08:59:08.756072 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr2_bn\nI0821 08:59:08.756084 32502 net.cpp:100] Creating Layer L1_b10_cbr2_bn\nI0821 08:59:08.756090 32502 net.cpp:434] L1_b10_cbr2_bn <- L1_b10_cbr2_conv_top\nI0821 08:59:08.756098 32502 net.cpp:408] L1_b10_cbr2_bn -> L1_b10_cbr2_bn_top\nI0821 08:59:08.756343 32502 net.cpp:150] Setting up L1_b10_cbr2_bn\nI0821 08:59:08.756356 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.756361 32502 net.cpp:165] Memory required for data: 728679600\nI0821 08:59:08.756371 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0821 08:59:08.756381 32502 net.cpp:100] Creating Layer L1_b10_cbr2_scale\nI0821 08:59:08.756386 32502 net.cpp:434] L1_b10_cbr2_scale <- L1_b10_cbr2_bn_top\nI0821 08:59:08.756397 32502 net.cpp:395] L1_b10_cbr2_scale -> L1_b10_cbr2_bn_top (in-place)\nI0821 08:59:08.756450 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0821 08:59:08.756593 32502 net.cpp:150] Setting up L1_b10_cbr2_scale\nI0821 08:59:08.756606 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.756611 32502 net.cpp:165] Memory required for data: 735233200\nI0821 08:59:08.756620 32502 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise\nI0821 08:59:08.756630 32502 net.cpp:100] Creating Layer L1_b10_sum_eltwise\nI0821 08:59:08.756635 32502 net.cpp:434] L1_b10_sum_eltwise <- L1_b10_cbr2_bn_top\nI0821 08:59:08.756644 32502 net.cpp:434] L1_b10_sum_eltwise <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:59:08.756654 32502 net.cpp:408] L1_b10_sum_eltwise -> L1_b10_sum_eltwise_top\nI0821 08:59:08.756685 32502 net.cpp:150] Setting up L1_b10_sum_eltwise\nI0821 08:59:08.756700 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.756705 32502 net.cpp:165] Memory required for data: 741786800\nI0821 08:59:08.756709 32502 layer_factory.hpp:77] Creating layer L1_b10_relu\nI0821 08:59:08.756717 32502 net.cpp:100] Creating Layer L1_b10_relu\nI0821 08:59:08.756722 32502 net.cpp:434] L1_b10_relu <- L1_b10_sum_eltwise_top\nI0821 08:59:08.756729 32502 net.cpp:395] L1_b10_relu -> L1_b10_sum_eltwise_top (in-place)\nI0821 08:59:08.756738 32502 net.cpp:150] Setting up L1_b10_relu\nI0821 08:59:08.756752 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.756757 32502 net.cpp:165] Memory required for data: 748340400\nI0821 08:59:08.756762 32502 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:08.756772 32502 net.cpp:100] Creating Layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:08.756778 32502 net.cpp:434] L1_b10_sum_eltwise_top_L1_b10_relu_0_split <- L1_b10_sum_eltwise_top\nI0821 08:59:08.756784 32502 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0821 08:59:08.756794 32502 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0821 08:59:08.756840 32502 net.cpp:150] Setting up L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:08.756852 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.756860 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.756865 32502 net.cpp:165] Memory required for data: 761447600\nI0821 08:59:08.756870 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_conv\nI0821 08:59:08.756889 32502 net.cpp:100] Creating Layer L1_b11_cbr1_conv\nI0821 08:59:08.756896 32502 net.cpp:434] L1_b11_cbr1_conv <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0821 08:59:08.756907 32502 net.cpp:408] L1_b11_cbr1_conv -> L1_b11_cbr1_conv_top\nI0821 08:59:08.757226 32502 net.cpp:150] Setting up L1_b11_cbr1_conv\nI0821 08:59:08.757241 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.757246 32502 net.cpp:165] Memory required for data: 768001200\nI0821 08:59:08.757254 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_bn\nI0821 08:59:08.757264 32502 net.cpp:100] Creating Layer L1_b11_cbr1_bn\nI0821 08:59:08.757270 32502 net.cpp:434] L1_b11_cbr1_bn <- L1_b11_cbr1_conv_top\nI0821 08:59:08.757278 32502 net.cpp:408] L1_b11_cbr1_bn -> L1_b11_cbr1_bn_top\nI0821 08:59:08.757522 32502 net.cpp:150] Setting up L1_b11_cbr1_bn\nI0821 08:59:08.757535 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.757540 32502 net.cpp:165] Memory required for data: 774554800\nI0821 08:59:08.757551 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0821 08:59:08.757563 32502 net.cpp:100] Creating Layer L1_b11_cbr1_scale\nI0821 08:59:08.757570 32502 net.cpp:434] L1_b11_cbr1_scale <- L1_b11_cbr1_bn_top\nI0821 08:59:08.757577 32502 net.cpp:395] L1_b11_cbr1_scale -> L1_b11_cbr1_bn_top (in-place)\nI0821 08:59:08.757633 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0821 08:59:08.757782 32502 net.cpp:150] Setting up L1_b11_cbr1_scale\nI0821 08:59:08.757797 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.757802 32502 net.cpp:165] Memory required for data: 781108400\nI0821 08:59:08.757810 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_relu\nI0821 08:59:08.757818 32502 net.cpp:100] Creating Layer L1_b11_cbr1_relu\nI0821 08:59:08.757824 32502 net.cpp:434] L1_b11_cbr1_relu <- L1_b11_cbr1_bn_top\nI0821 08:59:08.757834 32502 net.cpp:395] L1_b11_cbr1_relu -> L1_b11_cbr1_bn_top (in-place)\nI0821 08:59:08.757844 32502 net.cpp:150] Setting up L1_b11_cbr1_relu\nI0821 08:59:08.757851 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.757856 32502 net.cpp:165] Memory required for data: 787662000\nI0821 08:59:08.757861 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr2_conv\nI0821 08:59:08.757874 32502 net.cpp:100] Creating Layer L1_b11_cbr2_conv\nI0821 08:59:08.757880 32502 net.cpp:434] L1_b11_cbr2_conv <- L1_b11_cbr1_bn_top\nI0821 08:59:08.757889 32502 net.cpp:408] L1_b11_cbr2_conv -> L1_b11_cbr2_conv_top\nI0821 08:59:08.758208 32502 net.cpp:150] Setting up L1_b11_cbr2_conv\nI0821 08:59:08.758221 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.758226 32502 net.cpp:165] Memory required for data: 794215600\nI0821 08:59:08.758235 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr2_bn\nI0821 08:59:08.758247 32502 net.cpp:100] Creating Layer L1_b11_cbr2_bn\nI0821 08:59:08.758253 32502 net.cpp:434] L1_b11_cbr2_bn <- L1_b11_cbr2_conv_top\nI0821 08:59:08.758262 32502 net.cpp:408] L1_b11_cbr2_bn -> L1_b11_cbr2_bn_top\nI0821 08:59:08.758508 32502 net.cpp:150] Setting up L1_b11_cbr2_bn\nI0821 08:59:08.758522 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.758527 32502 net.cpp:165] Memory required for data: 800769200\nI0821 08:59:08.758536 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0821 08:59:08.758545 32502 net.cpp:100] Creating Layer L1_b11_cbr2_scale\nI0821 08:59:08.758551 32502 net.cpp:434] L1_b11_cbr2_scale <- L1_b11_cbr2_bn_top\nI0821 08:59:08.758561 32502 net.cpp:395] L1_b11_cbr2_scale -> L1_b11_cbr2_bn_top (in-place)\nI0821 08:59:08.758616 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0821 08:59:08.758761 32502 net.cpp:150] Setting up L1_b11_cbr2_scale\nI0821 08:59:08.758780 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.758785 32502 net.cpp:165] Memory required for data: 807322800\nI0821 08:59:08.758793 32502 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise\nI0821 08:59:08.758802 32502 net.cpp:100] Creating Layer L1_b11_sum_eltwise\nI0821 08:59:08.758808 32502 net.cpp:434] L1_b11_sum_eltwise <- L1_b11_cbr2_bn_top\nI0821 08:59:08.758822 32502 net.cpp:434] L1_b11_sum_eltwise <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0821 08:59:08.758833 32502 net.cpp:408] L1_b11_sum_eltwise -> L1_b11_sum_eltwise_top\nI0821 08:59:08.758865 32502 net.cpp:150] Setting up L1_b11_sum_eltwise\nI0821 08:59:08.758874 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.758879 32502 net.cpp:165] Memory required for data: 813876400\nI0821 08:59:08.758884 32502 layer_factory.hpp:77] Creating layer L1_b11_relu\nI0821 08:59:08.758895 32502 net.cpp:100] Creating Layer L1_b11_relu\nI0821 08:59:08.758901 32502 net.cpp:434] L1_b11_relu <- L1_b11_sum_eltwise_top\nI0821 08:59:08.758908 32502 net.cpp:395] L1_b11_relu -> L1_b11_sum_eltwise_top (in-place)\nI0821 08:59:08.758918 32502 net.cpp:150] Setting up L1_b11_relu\nI0821 08:59:08.758924 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.758929 32502 net.cpp:165] Memory required for data: 820430000\nI0821 08:59:08.758934 32502 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:08.758944 32502 net.cpp:100] Creating Layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:08.758949 32502 net.cpp:434] L1_b11_sum_eltwise_top_L1_b11_relu_0_split <- L1_b11_sum_eltwise_top\nI0821 08:59:08.758956 32502 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0821 08:59:08.758965 32502 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0821 08:59:08.759008 32502 net.cpp:150] Setting up L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:08.759022 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.759029 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.759034 32502 net.cpp:165] Memory required for data: 833537200\nI0821 08:59:08.759039 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_conv\nI0821 08:59:08.759050 32502 net.cpp:100] Creating Layer L1_b12_cbr1_conv\nI0821 08:59:08.759057 32502 net.cpp:434] L1_b12_cbr1_conv <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0821 08:59:08.759064 32502 net.cpp:408] L1_b12_cbr1_conv -> L1_b12_cbr1_conv_top\nI0821 08:59:08.759382 32502 net.cpp:150] Setting up L1_b12_cbr1_conv\nI0821 08:59:08.759395 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.759400 32502 net.cpp:165] Memory required for data: 840090800\nI0821 08:59:08.759409 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_bn\nI0821 08:59:08.759421 32502 net.cpp:100] Creating Layer L1_b12_cbr1_bn\nI0821 08:59:08.759428 32502 net.cpp:434] L1_b12_cbr1_bn <- L1_b12_cbr1_conv_top\nI0821 08:59:08.759435 32502 net.cpp:408] L1_b12_cbr1_bn -> L1_b12_cbr1_bn_top\nI0821 08:59:08.759681 32502 net.cpp:150] Setting up L1_b12_cbr1_bn\nI0821 08:59:08.759694 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.759699 32502 net.cpp:165] Memory required for data: 846644400\nI0821 08:59:08.759709 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0821 08:59:08.759718 32502 net.cpp:100] Creating Layer L1_b12_cbr1_scale\nI0821 08:59:08.759724 32502 net.cpp:434] L1_b12_cbr1_scale <- L1_b12_cbr1_bn_top\nI0821 08:59:08.759735 32502 net.cpp:395] L1_b12_cbr1_scale -> L1_b12_cbr1_bn_top (in-place)\nI0821 08:59:08.759796 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0821 08:59:08.760009 32502 net.cpp:150] Setting up L1_b12_cbr1_scale\nI0821 08:59:08.760030 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.760038 32502 net.cpp:165] Memory required for data: 853198000\nI0821 08:59:08.760056 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_relu\nI0821 08:59:08.760071 32502 net.cpp:100] Creating Layer L1_b12_cbr1_relu\nI0821 08:59:08.760082 32502 net.cpp:434] L1_b12_cbr1_relu <- L1_b12_cbr1_bn_top\nI0821 08:59:08.760098 32502 net.cpp:395] L1_b12_cbr1_relu -> L1_b12_cbr1_bn_top (in-place)\nI0821 08:59:08.760112 32502 net.cpp:150] Setting up L1_b12_cbr1_relu\nI0821 08:59:08.760119 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.760131 32502 net.cpp:165] Memory required for data: 859751600\nI0821 08:59:08.760136 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr2_conv\nI0821 08:59:08.760151 32502 net.cpp:100] Creating Layer L1_b12_cbr2_conv\nI0821 08:59:08.760157 32502 net.cpp:434] L1_b12_cbr2_conv <- L1_b12_cbr1_bn_top\nI0821 08:59:08.760166 32502 net.cpp:408] L1_b12_cbr2_conv -> L1_b12_cbr2_conv_top\nI0821 08:59:08.760493 32502 net.cpp:150] Setting up L1_b12_cbr2_conv\nI0821 08:59:08.760507 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.760512 32502 net.cpp:165] Memory required for data: 866305200\nI0821 08:59:08.760522 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr2_bn\nI0821 08:59:08.760531 32502 net.cpp:100] Creating Layer L1_b12_cbr2_bn\nI0821 08:59:08.760537 32502 net.cpp:434] L1_b12_cbr2_bn <- L1_b12_cbr2_conv_top\nI0821 08:59:08.760548 32502 net.cpp:408] L1_b12_cbr2_bn -> L1_b12_cbr2_bn_top\nI0821 08:59:08.760794 32502 net.cpp:150] Setting up L1_b12_cbr2_bn\nI0821 08:59:08.760808 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.760813 32502 net.cpp:165] Memory required for data: 872858800\nI0821 08:59:08.760823 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0821 08:59:08.760833 32502 net.cpp:100] Creating Layer L1_b12_cbr2_scale\nI0821 08:59:08.760838 32502 net.cpp:434] L1_b12_cbr2_scale <- L1_b12_cbr2_bn_top\nI0821 08:59:08.760848 32502 net.cpp:395] L1_b12_cbr2_scale -> L1_b12_cbr2_bn_top (in-place)\nI0821 08:59:08.760903 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0821 08:59:08.761044 32502 net.cpp:150] Setting up L1_b12_cbr2_scale\nI0821 08:59:08.761060 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.761065 32502 net.cpp:165] Memory required for data: 879412400\nI0821 08:59:08.761075 32502 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise\nI0821 08:59:08.761083 32502 net.cpp:100] Creating Layer L1_b12_sum_eltwise\nI0821 08:59:08.761090 32502 net.cpp:434] L1_b12_sum_eltwise <- L1_b12_cbr2_bn_top\nI0821 08:59:08.761096 32502 net.cpp:434] L1_b12_sum_eltwise <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0821 08:59:08.761104 32502 net.cpp:408] L1_b12_sum_eltwise -> L1_b12_sum_eltwise_top\nI0821 08:59:08.761140 32502 net.cpp:150] Setting up L1_b12_sum_eltwise\nI0821 08:59:08.761150 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.761154 32502 net.cpp:165] Memory required for data: 885966000\nI0821 08:59:08.761160 32502 layer_factory.hpp:77] Creating layer L1_b12_relu\nI0821 08:59:08.761170 32502 net.cpp:100] Creating Layer L1_b12_relu\nI0821 08:59:08.761176 32502 net.cpp:434] L1_b12_relu <- L1_b12_sum_eltwise_top\nI0821 08:59:08.761183 32502 net.cpp:395] L1_b12_relu -> L1_b12_sum_eltwise_top (in-place)\nI0821 08:59:08.761193 32502 net.cpp:150] Setting up L1_b12_relu\nI0821 08:59:08.761198 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.761204 32502 net.cpp:165] Memory required for data: 892519600\nI0821 08:59:08.761209 32502 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:08.761219 32502 net.cpp:100] Creating Layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:08.761224 32502 net.cpp:434] L1_b12_sum_eltwise_top_L1_b12_relu_0_split <- L1_b12_sum_eltwise_top\nI0821 08:59:08.761231 32502 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0821 08:59:08.761241 32502 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0821 08:59:08.761284 32502 net.cpp:150] Setting up L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:08.761299 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.761307 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.761310 32502 net.cpp:165] Memory required for data: 905626800\nI0821 08:59:08.761315 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_conv\nI0821 08:59:08.761327 32502 net.cpp:100] Creating Layer L1_b13_cbr1_conv\nI0821 08:59:08.761333 32502 net.cpp:434] L1_b13_cbr1_conv <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0821 08:59:08.761349 32502 net.cpp:408] L1_b13_cbr1_conv -> L1_b13_cbr1_conv_top\nI0821 08:59:08.761675 32502 net.cpp:150] Setting up L1_b13_cbr1_conv\nI0821 08:59:08.761689 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.761694 32502 net.cpp:165] Memory required for data: 912180400\nI0821 08:59:08.761703 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_bn\nI0821 08:59:08.761730 32502 net.cpp:100] Creating Layer L1_b13_cbr1_bn\nI0821 08:59:08.761736 32502 net.cpp:434] L1_b13_cbr1_bn <- L1_b13_cbr1_conv_top\nI0821 08:59:08.761751 32502 net.cpp:408] L1_b13_cbr1_bn -> L1_b13_cbr1_bn_top\nI0821 08:59:08.761993 32502 net.cpp:150] Setting up L1_b13_cbr1_bn\nI0821 08:59:08.762006 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.762012 32502 net.cpp:165] Memory required for data: 918734000\nI0821 08:59:08.762022 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0821 08:59:08.762032 32502 net.cpp:100] Creating Layer L1_b13_cbr1_scale\nI0821 08:59:08.762037 32502 net.cpp:434] L1_b13_cbr1_scale <- L1_b13_cbr1_bn_top\nI0821 08:59:08.762045 32502 net.cpp:395] L1_b13_cbr1_scale -> L1_b13_cbr1_bn_top (in-place)\nI0821 08:59:08.762101 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0821 08:59:08.762244 32502 net.cpp:150] Setting up L1_b13_cbr1_scale\nI0821 08:59:08.762257 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.762262 32502 net.cpp:165] Memory required for data: 925287600\nI0821 08:59:08.762272 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_relu\nI0821 08:59:08.762280 32502 net.cpp:100] Creating Layer L1_b13_cbr1_relu\nI0821 08:59:08.762290 32502 net.cpp:434] L1_b13_cbr1_relu <- L1_b13_cbr1_bn_top\nI0821 08:59:08.762296 32502 net.cpp:395] L1_b13_cbr1_relu -> L1_b13_cbr1_bn_top (in-place)\nI0821 08:59:08.762306 32502 net.cpp:150] Setting up L1_b13_cbr1_relu\nI0821 08:59:08.762313 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.762317 32502 net.cpp:165] Memory required for data: 931841200\nI0821 08:59:08.762322 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr2_conv\nI0821 08:59:08.762336 32502 net.cpp:100] Creating Layer L1_b13_cbr2_conv\nI0821 08:59:08.762342 32502 net.cpp:434] L1_b13_cbr2_conv <- L1_b13_cbr1_bn_top\nI0821 08:59:08.762356 32502 net.cpp:408] L1_b13_cbr2_conv -> L1_b13_cbr2_conv_top\nI0821 08:59:08.762673 32502 net.cpp:150] Setting up L1_b13_cbr2_conv\nI0821 08:59:08.762687 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.762692 32502 net.cpp:165] Memory required for data: 938394800\nI0821 08:59:08.762701 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr2_bn\nI0821 08:59:08.762713 32502 net.cpp:100] Creating Layer L1_b13_cbr2_bn\nI0821 08:59:08.762720 32502 net.cpp:434] L1_b13_cbr2_bn <- L1_b13_cbr2_conv_top\nI0821 08:59:08.762728 32502 net.cpp:408] L1_b13_cbr2_bn -> L1_b13_cbr2_bn_top\nI0821 08:59:08.762984 32502 net.cpp:150] Setting up L1_b13_cbr2_bn\nI0821 08:59:08.762997 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.763002 32502 net.cpp:165] Memory required for data: 944948400\nI0821 08:59:08.763012 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0821 08:59:08.763021 32502 net.cpp:100] Creating Layer L1_b13_cbr2_scale\nI0821 08:59:08.763027 32502 net.cpp:434] L1_b13_cbr2_scale <- L1_b13_cbr2_bn_top\nI0821 08:59:08.763036 32502 net.cpp:395] L1_b13_cbr2_scale -> L1_b13_cbr2_bn_top (in-place)\nI0821 08:59:08.763093 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0821 08:59:08.763234 32502 net.cpp:150] Setting up L1_b13_cbr2_scale\nI0821 08:59:08.763247 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.763252 32502 net.cpp:165] Memory required for data: 951502000\nI0821 08:59:08.763260 32502 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise\nI0821 08:59:08.763270 32502 net.cpp:100] Creating Layer L1_b13_sum_eltwise\nI0821 08:59:08.763276 32502 net.cpp:434] L1_b13_sum_eltwise <- L1_b13_cbr2_bn_top\nI0821 08:59:08.763283 32502 net.cpp:434] L1_b13_sum_eltwise <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0821 08:59:08.763300 32502 net.cpp:408] L1_b13_sum_eltwise -> L1_b13_sum_eltwise_top\nI0821 08:59:08.763332 32502 net.cpp:150] Setting up L1_b13_sum_eltwise\nI0821 08:59:08.763341 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.763346 32502 net.cpp:165] Memory required for data: 958055600\nI0821 08:59:08.763351 32502 layer_factory.hpp:77] Creating layer L1_b13_relu\nI0821 08:59:08.763362 32502 net.cpp:100] Creating Layer L1_b13_relu\nI0821 08:59:08.763368 32502 net.cpp:434] L1_b13_relu <- L1_b13_sum_eltwise_top\nI0821 08:59:08.763375 32502 net.cpp:395] L1_b13_relu -> L1_b13_sum_eltwise_top (in-place)\nI0821 08:59:08.763384 32502 net.cpp:150] Setting up L1_b13_relu\nI0821 08:59:08.763391 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.763396 32502 net.cpp:165] Memory required for data: 964609200\nI0821 08:59:08.763401 32502 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:08.763408 32502 net.cpp:100] Creating Layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:08.763413 32502 net.cpp:434] L1_b13_sum_eltwise_top_L1_b13_relu_0_split <- L1_b13_sum_eltwise_top\nI0821 08:59:08.763420 32502 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0821 08:59:08.763430 32502 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0821 08:59:08.763475 32502 net.cpp:150] Setting up L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:08.763487 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.763494 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.763499 32502 net.cpp:165] Memory required for data: 977716400\nI0821 08:59:08.763504 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_conv\nI0821 08:59:08.763517 32502 net.cpp:100] Creating Layer L1_b14_cbr1_conv\nI0821 08:59:08.763523 32502 net.cpp:434] L1_b14_cbr1_conv <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0821 08:59:08.763532 32502 net.cpp:408] L1_b14_cbr1_conv -> L1_b14_cbr1_conv_top\nI0821 08:59:08.763854 32502 net.cpp:150] Setting up L1_b14_cbr1_conv\nI0821 08:59:08.763867 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.763872 32502 net.cpp:165] Memory required for data: 984270000\nI0821 08:59:08.763882 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_bn\nI0821 08:59:08.763895 32502 net.cpp:100] Creating Layer L1_b14_cbr1_bn\nI0821 08:59:08.763901 32502 net.cpp:434] L1_b14_cbr1_bn <- L1_b14_cbr1_conv_top\nI0821 08:59:08.763911 32502 net.cpp:408] L1_b14_cbr1_bn -> L1_b14_cbr1_bn_top\nI0821 08:59:08.764154 32502 net.cpp:150] Setting up L1_b14_cbr1_bn\nI0821 08:59:08.764168 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.764173 32502 net.cpp:165] Memory required for data: 990823600\nI0821 08:59:08.764183 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0821 08:59:08.764191 32502 net.cpp:100] Creating Layer L1_b14_cbr1_scale\nI0821 08:59:08.764197 32502 net.cpp:434] L1_b14_cbr1_scale <- L1_b14_cbr1_bn_top\nI0821 08:59:08.764205 32502 net.cpp:395] L1_b14_cbr1_scale -> L1_b14_cbr1_bn_top (in-place)\nI0821 08:59:08.764261 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0821 08:59:08.764403 32502 net.cpp:150] Setting up L1_b14_cbr1_scale\nI0821 08:59:08.764416 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.764421 32502 net.cpp:165] Memory required for data: 997377200\nI0821 08:59:08.764430 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_relu\nI0821 08:59:08.764439 32502 net.cpp:100] Creating Layer L1_b14_cbr1_relu\nI0821 08:59:08.764444 32502 net.cpp:434] L1_b14_cbr1_relu <- L1_b14_cbr1_bn_top\nI0821 08:59:08.764456 32502 net.cpp:395] L1_b14_cbr1_relu -> L1_b14_cbr1_bn_top (in-place)\nI0821 08:59:08.764466 32502 net.cpp:150] Setting up L1_b14_cbr1_relu\nI0821 08:59:08.764472 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.764477 32502 net.cpp:165] Memory required for data: 1003930800\nI0821 08:59:08.764482 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr2_conv\nI0821 08:59:08.764503 32502 net.cpp:100] Creating Layer L1_b14_cbr2_conv\nI0821 08:59:08.764508 32502 net.cpp:434] L1_b14_cbr2_conv <- L1_b14_cbr1_bn_top\nI0821 08:59:08.764518 32502 net.cpp:408] L1_b14_cbr2_conv -> L1_b14_cbr2_conv_top\nI0821 08:59:08.764844 32502 net.cpp:150] Setting up L1_b14_cbr2_conv\nI0821 08:59:08.764858 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.764863 32502 net.cpp:165] Memory required for data: 1010484400\nI0821 08:59:08.764873 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr2_bn\nI0821 08:59:08.764884 32502 net.cpp:100] Creating Layer L1_b14_cbr2_bn\nI0821 08:59:08.764890 32502 net.cpp:434] L1_b14_cbr2_bn <- L1_b14_cbr2_conv_top\nI0821 08:59:08.764899 32502 net.cpp:408] L1_b14_cbr2_bn -> L1_b14_cbr2_bn_top\nI0821 08:59:08.765144 32502 net.cpp:150] Setting up L1_b14_cbr2_bn\nI0821 08:59:08.765161 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.765166 32502 net.cpp:165] Memory required for data: 1017038000\nI0821 08:59:08.765177 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0821 08:59:08.765184 32502 net.cpp:100] Creating Layer L1_b14_cbr2_scale\nI0821 08:59:08.765192 32502 net.cpp:434] L1_b14_cbr2_scale <- L1_b14_cbr2_bn_top\nI0821 08:59:08.765198 32502 net.cpp:395] L1_b14_cbr2_scale -> L1_b14_cbr2_bn_top (in-place)\nI0821 08:59:08.765254 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0821 08:59:08.765398 32502 net.cpp:150] Setting up L1_b14_cbr2_scale\nI0821 08:59:08.765411 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.765416 32502 net.cpp:165] Memory required for data: 1023591600\nI0821 08:59:08.765425 32502 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise\nI0821 08:59:08.765437 32502 net.cpp:100] Creating Layer L1_b14_sum_eltwise\nI0821 08:59:08.765444 32502 net.cpp:434] L1_b14_sum_eltwise <- L1_b14_cbr2_bn_top\nI0821 08:59:08.765450 32502 net.cpp:434] L1_b14_sum_eltwise <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0821 08:59:08.765460 32502 net.cpp:408] L1_b14_sum_eltwise -> L1_b14_sum_eltwise_top\nI0821 08:59:08.765492 32502 net.cpp:150] Setting up L1_b14_sum_eltwise\nI0821 08:59:08.765501 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.765506 32502 net.cpp:165] Memory required for data: 1030145200\nI0821 08:59:08.765511 32502 layer_factory.hpp:77] Creating layer L1_b14_relu\nI0821 08:59:08.765522 32502 net.cpp:100] Creating Layer L1_b14_relu\nI0821 08:59:08.765528 32502 net.cpp:434] L1_b14_relu <- L1_b14_sum_eltwise_top\nI0821 08:59:08.765535 32502 net.cpp:395] L1_b14_relu -> L1_b14_sum_eltwise_top (in-place)\nI0821 08:59:08.765544 32502 net.cpp:150] Setting up L1_b14_relu\nI0821 08:59:08.765552 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.765557 32502 net.cpp:165] Memory required for data: 1036698800\nI0821 08:59:08.765560 32502 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:08.765568 32502 net.cpp:100] Creating Layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:08.765573 32502 net.cpp:434] L1_b14_sum_eltwise_top_L1_b14_relu_0_split <- L1_b14_sum_eltwise_top\nI0821 08:59:08.765580 32502 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0821 08:59:08.765589 32502 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0821 08:59:08.765635 32502 net.cpp:150] Setting up L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:08.765647 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.765653 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.765658 32502 net.cpp:165] Memory required for data: 1049806000\nI0821 08:59:08.765663 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_conv\nI0821 08:59:08.765677 32502 net.cpp:100] Creating Layer L1_b15_cbr1_conv\nI0821 08:59:08.765683 32502 net.cpp:434] L1_b15_cbr1_conv <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0821 08:59:08.765692 32502 net.cpp:408] L1_b15_cbr1_conv -> L1_b15_cbr1_conv_top\nI0821 08:59:08.766021 32502 net.cpp:150] Setting up L1_b15_cbr1_conv\nI0821 08:59:08.766042 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.766047 32502 net.cpp:165] Memory required for data: 1056359600\nI0821 08:59:08.766057 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_bn\nI0821 08:59:08.766068 32502 net.cpp:100] Creating Layer L1_b15_cbr1_bn\nI0821 08:59:08.766075 32502 net.cpp:434] L1_b15_cbr1_bn <- L1_b15_cbr1_conv_top\nI0821 08:59:08.766083 32502 net.cpp:408] L1_b15_cbr1_bn -> L1_b15_cbr1_bn_top\nI0821 08:59:08.766330 32502 net.cpp:150] Setting up L1_b15_cbr1_bn\nI0821 08:59:08.766343 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.766348 32502 net.cpp:165] Memory required for data: 1062913200\nI0821 08:59:08.766360 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0821 08:59:08.766367 32502 net.cpp:100] Creating Layer L1_b15_cbr1_scale\nI0821 08:59:08.766373 32502 net.cpp:434] L1_b15_cbr1_scale <- L1_b15_cbr1_bn_top\nI0821 08:59:08.766381 32502 net.cpp:395] L1_b15_cbr1_scale -> L1_b15_cbr1_bn_top (in-place)\nI0821 08:59:08.766438 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0821 08:59:08.766582 32502 net.cpp:150] Setting up L1_b15_cbr1_scale\nI0821 08:59:08.766595 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.766600 32502 net.cpp:165] Memory required for data: 1069466800\nI0821 08:59:08.766609 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_relu\nI0821 08:59:08.766618 32502 net.cpp:100] Creating Layer L1_b15_cbr1_relu\nI0821 08:59:08.766624 32502 net.cpp:434] L1_b15_cbr1_relu <- L1_b15_cbr1_bn_top\nI0821 08:59:08.766634 32502 net.cpp:395] L1_b15_cbr1_relu -> L1_b15_cbr1_bn_top (in-place)\nI0821 08:59:08.766644 32502 net.cpp:150] Setting up L1_b15_cbr1_relu\nI0821 08:59:08.766651 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.766655 32502 net.cpp:165] Memory required for data: 1076020400\nI0821 08:59:08.766660 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr2_conv\nI0821 08:59:08.766671 32502 net.cpp:100] Creating Layer L1_b15_cbr2_conv\nI0821 08:59:08.766677 32502 net.cpp:434] L1_b15_cbr2_conv <- L1_b15_cbr1_bn_top\nI0821 08:59:08.766688 32502 net.cpp:408] L1_b15_cbr2_conv -> L1_b15_cbr2_conv_top\nI0821 08:59:08.767041 32502 net.cpp:150] Setting up L1_b15_cbr2_conv\nI0821 08:59:08.767056 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.767061 32502 net.cpp:165] Memory required for data: 1082574000\nI0821 08:59:08.767071 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr2_bn\nI0821 08:59:08.767079 32502 net.cpp:100] Creating Layer L1_b15_cbr2_bn\nI0821 08:59:08.767086 32502 net.cpp:434] L1_b15_cbr2_bn <- L1_b15_cbr2_conv_top\nI0821 08:59:08.767097 32502 net.cpp:408] L1_b15_cbr2_bn -> L1_b15_cbr2_bn_top\nI0821 08:59:08.767343 32502 net.cpp:150] Setting up L1_b15_cbr2_bn\nI0821 08:59:08.767360 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.767365 32502 net.cpp:165] Memory required for data: 1089127600\nI0821 08:59:08.767376 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0821 08:59:08.767385 32502 net.cpp:100] Creating Layer L1_b15_cbr2_scale\nI0821 08:59:08.767391 32502 net.cpp:434] L1_b15_cbr2_scale <- L1_b15_cbr2_bn_top\nI0821 08:59:08.767398 32502 net.cpp:395] L1_b15_cbr2_scale -> L1_b15_cbr2_bn_top (in-place)\nI0821 08:59:08.767452 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0821 08:59:08.767598 32502 net.cpp:150] Setting up L1_b15_cbr2_scale\nI0821 08:59:08.767611 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.767616 32502 net.cpp:165] Memory required for data: 1095681200\nI0821 08:59:08.767626 32502 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise\nI0821 08:59:08.767637 32502 net.cpp:100] Creating Layer L1_b15_sum_eltwise\nI0821 08:59:08.767644 32502 net.cpp:434] L1_b15_sum_eltwise <- L1_b15_cbr2_bn_top\nI0821 08:59:08.767652 32502 net.cpp:434] L1_b15_sum_eltwise <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0821 08:59:08.767662 32502 net.cpp:408] L1_b15_sum_eltwise -> L1_b15_sum_eltwise_top\nI0821 08:59:08.767693 32502 net.cpp:150] Setting up L1_b15_sum_eltwise\nI0821 08:59:08.767710 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.767715 32502 net.cpp:165] Memory required for data: 1102234800\nI0821 08:59:08.767720 32502 layer_factory.hpp:77] Creating layer L1_b15_relu\nI0821 08:59:08.767730 32502 net.cpp:100] Creating Layer L1_b15_relu\nI0821 08:59:08.767737 32502 net.cpp:434] L1_b15_relu <- L1_b15_sum_eltwise_top\nI0821 08:59:08.767750 32502 net.cpp:395] L1_b15_relu -> L1_b15_sum_eltwise_top (in-place)\nI0821 08:59:08.767761 32502 net.cpp:150] Setting up L1_b15_relu\nI0821 08:59:08.767768 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.767772 32502 net.cpp:165] Memory required for data: 1108788400\nI0821 08:59:08.767777 32502 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:08.767784 32502 net.cpp:100] Creating Layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:08.767791 32502 net.cpp:434] L1_b15_sum_eltwise_top_L1_b15_relu_0_split <- L1_b15_sum_eltwise_top\nI0821 08:59:08.767797 32502 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0821 08:59:08.767807 32502 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0821 08:59:08.767855 32502 net.cpp:150] Setting up L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:08.767868 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.767874 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.767879 32502 net.cpp:165] Memory required for data: 1121895600\nI0821 08:59:08.767884 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_conv\nI0821 08:59:08.767899 32502 net.cpp:100] Creating Layer L1_b16_cbr1_conv\nI0821 08:59:08.767904 32502 net.cpp:434] L1_b16_cbr1_conv <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0821 08:59:08.767913 32502 net.cpp:408] L1_b16_cbr1_conv -> L1_b16_cbr1_conv_top\nI0821 08:59:08.768235 32502 net.cpp:150] Setting up L1_b16_cbr1_conv\nI0821 08:59:08.768249 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.768254 32502 net.cpp:165] Memory required for data: 1128449200\nI0821 08:59:08.768263 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_bn\nI0821 08:59:08.768275 32502 net.cpp:100] Creating Layer L1_b16_cbr1_bn\nI0821 08:59:08.768282 32502 net.cpp:434] L1_b16_cbr1_bn <- L1_b16_cbr1_conv_top\nI0821 08:59:08.768290 32502 net.cpp:408] L1_b16_cbr1_bn -> L1_b16_cbr1_bn_top\nI0821 08:59:08.768534 32502 net.cpp:150] Setting up L1_b16_cbr1_bn\nI0821 08:59:08.768549 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.768555 32502 net.cpp:165] Memory required for data: 1135002800\nI0821 08:59:08.768565 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0821 08:59:08.768574 32502 net.cpp:100] Creating Layer L1_b16_cbr1_scale\nI0821 08:59:08.768581 32502 net.cpp:434] L1_b16_cbr1_scale <- L1_b16_cbr1_bn_top\nI0821 08:59:08.768589 32502 net.cpp:395] L1_b16_cbr1_scale -> L1_b16_cbr1_bn_top (in-place)\nI0821 08:59:08.768643 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0821 08:59:08.768795 32502 net.cpp:150] Setting up L1_b16_cbr1_scale\nI0821 08:59:08.768810 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.768815 32502 net.cpp:165] Memory required for data: 1141556400\nI0821 08:59:08.768823 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_relu\nI0821 08:59:08.768836 32502 net.cpp:100] Creating Layer L1_b16_cbr1_relu\nI0821 08:59:08.768843 32502 net.cpp:434] L1_b16_cbr1_relu <- L1_b16_cbr1_bn_top\nI0821 08:59:08.768853 32502 net.cpp:395] L1_b16_cbr1_relu -> L1_b16_cbr1_bn_top (in-place)\nI0821 08:59:08.768863 32502 net.cpp:150] Setting up L1_b16_cbr1_relu\nI0821 08:59:08.768870 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.768874 32502 net.cpp:165] Memory required for data: 1148110000\nI0821 08:59:08.768879 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr2_conv\nI0821 08:59:08.768890 32502 net.cpp:100] Creating Layer L1_b16_cbr2_conv\nI0821 08:59:08.768895 32502 net.cpp:434] L1_b16_cbr2_conv <- L1_b16_cbr1_bn_top\nI0821 08:59:08.768915 32502 net.cpp:408] L1_b16_cbr2_conv -> L1_b16_cbr2_conv_top\nI0821 08:59:08.769238 32502 net.cpp:150] Setting up L1_b16_cbr2_conv\nI0821 08:59:08.769253 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.769258 32502 net.cpp:165] Memory required for data: 1154663600\nI0821 08:59:08.769265 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr2_bn\nI0821 08:59:08.769275 32502 net.cpp:100] Creating Layer L1_b16_cbr2_bn\nI0821 08:59:08.769281 32502 net.cpp:434] L1_b16_cbr2_bn <- L1_b16_cbr2_conv_top\nI0821 08:59:08.769294 32502 net.cpp:408] L1_b16_cbr2_bn -> L1_b16_cbr2_bn_top\nI0821 08:59:08.769546 32502 net.cpp:150] Setting up L1_b16_cbr2_bn\nI0821 08:59:08.769558 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.769563 32502 net.cpp:165] Memory required for data: 1161217200\nI0821 08:59:08.769574 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0821 08:59:08.769585 32502 net.cpp:100] Creating Layer L1_b16_cbr2_scale\nI0821 08:59:08.769593 32502 net.cpp:434] L1_b16_cbr2_scale <- L1_b16_cbr2_bn_top\nI0821 08:59:08.769599 32502 net.cpp:395] L1_b16_cbr2_scale -> L1_b16_cbr2_bn_top (in-place)\nI0821 08:59:08.769654 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0821 08:59:08.769809 32502 net.cpp:150] Setting up L1_b16_cbr2_scale\nI0821 08:59:08.769821 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.769826 32502 net.cpp:165] Memory required for data: 1167770800\nI0821 08:59:08.769835 32502 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise\nI0821 08:59:08.769847 32502 net.cpp:100] Creating Layer L1_b16_sum_eltwise\nI0821 08:59:08.769855 32502 net.cpp:434] L1_b16_sum_eltwise <- L1_b16_cbr2_bn_top\nI0821 08:59:08.769861 32502 net.cpp:434] L1_b16_sum_eltwise <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0821 08:59:08.769870 32502 net.cpp:408] L1_b16_sum_eltwise -> L1_b16_sum_eltwise_top\nI0821 08:59:08.769904 32502 net.cpp:150] Setting up L1_b16_sum_eltwise\nI0821 08:59:08.769913 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.769918 32502 net.cpp:165] Memory required for data: 1174324400\nI0821 08:59:08.769923 32502 layer_factory.hpp:77] Creating layer L1_b16_relu\nI0821 08:59:08.769932 32502 net.cpp:100] Creating Layer L1_b16_relu\nI0821 08:59:08.769937 32502 net.cpp:434] L1_b16_relu <- L1_b16_sum_eltwise_top\nI0821 08:59:08.769948 32502 net.cpp:395] L1_b16_relu -> L1_b16_sum_eltwise_top (in-place)\nI0821 08:59:08.769958 32502 net.cpp:150] Setting up L1_b16_relu\nI0821 08:59:08.769964 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.769969 32502 net.cpp:165] Memory required for data: 1180878000\nI0821 08:59:08.769973 32502 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:08.769980 32502 net.cpp:100] Creating Layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:08.769985 32502 net.cpp:434] L1_b16_sum_eltwise_top_L1_b16_relu_0_split <- L1_b16_sum_eltwise_top\nI0821 08:59:08.769994 32502 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0821 08:59:08.770002 32502 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0821 08:59:08.770050 32502 net.cpp:150] Setting up L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:08.770061 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.770067 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.770072 32502 net.cpp:165] Memory required for data: 1193985200\nI0821 08:59:08.770077 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_conv\nI0821 08:59:08.770087 32502 net.cpp:100] Creating Layer L1_b17_cbr1_conv\nI0821 08:59:08.770093 32502 net.cpp:434] L1_b17_cbr1_conv <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0821 08:59:08.770105 32502 net.cpp:408] L1_b17_cbr1_conv -> L1_b17_cbr1_conv_top\nI0821 08:59:08.770428 32502 net.cpp:150] Setting up L1_b17_cbr1_conv\nI0821 08:59:08.770442 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.770447 32502 net.cpp:165] Memory required for data: 1200538800\nI0821 08:59:08.770463 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_bn\nI0821 08:59:08.770473 32502 net.cpp:100] Creating Layer L1_b17_cbr1_bn\nI0821 08:59:08.770479 32502 net.cpp:434] L1_b17_cbr1_bn <- L1_b17_cbr1_conv_top\nI0821 08:59:08.770493 32502 net.cpp:408] L1_b17_cbr1_bn -> L1_b17_cbr1_bn_top\nI0821 08:59:08.770747 32502 net.cpp:150] Setting up L1_b17_cbr1_bn\nI0821 08:59:08.770766 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.770771 32502 net.cpp:165] Memory required for data: 1207092400\nI0821 08:59:08.770781 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0821 08:59:08.770790 32502 net.cpp:100] Creating Layer L1_b17_cbr1_scale\nI0821 08:59:08.770797 32502 net.cpp:434] L1_b17_cbr1_scale <- L1_b17_cbr1_bn_top\nI0821 08:59:08.770803 32502 net.cpp:395] L1_b17_cbr1_scale -> L1_b17_cbr1_bn_top (in-place)\nI0821 08:59:08.770859 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0821 08:59:08.771006 32502 net.cpp:150] Setting up L1_b17_cbr1_scale\nI0821 08:59:08.771019 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.771024 32502 net.cpp:165] Memory required for data: 1213646000\nI0821 08:59:08.771034 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_relu\nI0821 08:59:08.771044 32502 net.cpp:100] Creating Layer L1_b17_cbr1_relu\nI0821 08:59:08.771050 32502 net.cpp:434] L1_b17_cbr1_relu <- L1_b17_cbr1_bn_top\nI0821 08:59:08.771057 32502 net.cpp:395] L1_b17_cbr1_relu -> L1_b17_cbr1_bn_top (in-place)\nI0821 08:59:08.771067 32502 net.cpp:150] Setting up L1_b17_cbr1_relu\nI0821 08:59:08.771075 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.771078 32502 net.cpp:165] Memory required for data: 1220199600\nI0821 08:59:08.771083 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr2_conv\nI0821 08:59:08.771097 32502 net.cpp:100] Creating Layer L1_b17_cbr2_conv\nI0821 08:59:08.771103 32502 net.cpp:434] L1_b17_cbr2_conv <- L1_b17_cbr1_bn_top\nI0821 08:59:08.771114 32502 net.cpp:408] L1_b17_cbr2_conv -> L1_b17_cbr2_conv_top\nI0821 08:59:08.771438 32502 net.cpp:150] Setting up L1_b17_cbr2_conv\nI0821 08:59:08.771452 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.771457 32502 net.cpp:165] Memory required for data: 1226753200\nI0821 08:59:08.771466 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr2_bn\nI0821 08:59:08.771476 32502 net.cpp:100] Creating Layer L1_b17_cbr2_bn\nI0821 08:59:08.771481 32502 net.cpp:434] L1_b17_cbr2_bn <- L1_b17_cbr2_conv_top\nI0821 08:59:08.771492 32502 net.cpp:408] L1_b17_cbr2_bn -> L1_b17_cbr2_bn_top\nI0821 08:59:08.771749 32502 net.cpp:150] Setting up L1_b17_cbr2_bn\nI0821 08:59:08.771762 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.771767 32502 net.cpp:165] Memory required for data: 1233306800\nI0821 08:59:08.771777 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0821 08:59:08.771790 32502 net.cpp:100] Creating Layer L1_b17_cbr2_scale\nI0821 08:59:08.771795 32502 net.cpp:434] L1_b17_cbr2_scale <- L1_b17_cbr2_bn_top\nI0821 08:59:08.771803 32502 net.cpp:395] L1_b17_cbr2_scale -> L1_b17_cbr2_bn_top (in-place)\nI0821 08:59:08.771858 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0821 08:59:08.772008 32502 net.cpp:150] Setting up L1_b17_cbr2_scale\nI0821 08:59:08.772022 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.772027 32502 net.cpp:165] Memory required for data: 1239860400\nI0821 08:59:08.772035 32502 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise\nI0821 08:59:08.772048 32502 net.cpp:100] Creating Layer L1_b17_sum_eltwise\nI0821 08:59:08.772055 32502 net.cpp:434] L1_b17_sum_eltwise <- L1_b17_cbr2_bn_top\nI0821 08:59:08.772063 32502 net.cpp:434] L1_b17_sum_eltwise <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0821 08:59:08.772070 32502 net.cpp:408] L1_b17_sum_eltwise -> L1_b17_sum_eltwise_top\nI0821 08:59:08.772105 32502 net.cpp:150] Setting up L1_b17_sum_eltwise\nI0821 08:59:08.772115 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.772119 32502 net.cpp:165] Memory required for data: 1246414000\nI0821 08:59:08.772131 32502 layer_factory.hpp:77] Creating layer L1_b17_relu\nI0821 08:59:08.772140 32502 net.cpp:100] Creating Layer L1_b17_relu\nI0821 08:59:08.772145 32502 net.cpp:434] L1_b17_relu <- L1_b17_sum_eltwise_top\nI0821 08:59:08.772156 32502 net.cpp:395] L1_b17_relu -> L1_b17_sum_eltwise_top (in-place)\nI0821 08:59:08.772166 32502 net.cpp:150] Setting up L1_b17_relu\nI0821 08:59:08.772173 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.772177 32502 net.cpp:165] Memory required for data: 1252967600\nI0821 08:59:08.772182 32502 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:08.772189 32502 net.cpp:100] Creating Layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:08.772194 32502 net.cpp:434] L1_b17_sum_eltwise_top_L1_b17_relu_0_split <- L1_b17_sum_eltwise_top\nI0821 08:59:08.772202 32502 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0821 08:59:08.772212 32502 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0821 08:59:08.772259 32502 net.cpp:150] Setting up L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:08.772271 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.772279 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.772282 32502 net.cpp:165] Memory required for data: 1266074800\nI0821 08:59:08.772287 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_conv\nI0821 08:59:08.772298 32502 net.cpp:100] Creating Layer L1_b18_cbr1_conv\nI0821 08:59:08.772305 32502 net.cpp:434] L1_b18_cbr1_conv <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0821 08:59:08.772316 32502 net.cpp:408] L1_b18_cbr1_conv -> L1_b18_cbr1_conv_top\nI0821 08:59:08.772635 32502 net.cpp:150] Setting up L1_b18_cbr1_conv\nI0821 08:59:08.772650 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.772655 32502 net.cpp:165] Memory required for data: 1272628400\nI0821 08:59:08.772663 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_bn\nI0821 08:59:08.772672 32502 net.cpp:100] Creating Layer L1_b18_cbr1_bn\nI0821 08:59:08.772678 32502 net.cpp:434] L1_b18_cbr1_bn <- L1_b18_cbr1_conv_top\nI0821 08:59:08.772691 32502 net.cpp:408] L1_b18_cbr1_bn -> L1_b18_cbr1_bn_top\nI0821 08:59:08.772963 32502 net.cpp:150] Setting up L1_b18_cbr1_bn\nI0821 08:59:08.772976 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.772982 32502 net.cpp:165] Memory required for data: 1279182000\nI0821 08:59:08.772992 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0821 08:59:08.773005 32502 net.cpp:100] Creating Layer L1_b18_cbr1_scale\nI0821 08:59:08.773011 32502 net.cpp:434] L1_b18_cbr1_scale <- L1_b18_cbr1_bn_top\nI0821 08:59:08.773020 32502 net.cpp:395] L1_b18_cbr1_scale -> L1_b18_cbr1_bn_top (in-place)\nI0821 08:59:08.773077 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0821 08:59:08.773221 32502 net.cpp:150] Setting up L1_b18_cbr1_scale\nI0821 08:59:08.773234 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.773239 32502 net.cpp:165] Memory required for data: 1285735600\nI0821 08:59:08.773248 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_relu\nI0821 08:59:08.773257 32502 net.cpp:100] Creating Layer L1_b18_cbr1_relu\nI0821 08:59:08.773263 32502 net.cpp:434] L1_b18_cbr1_relu <- L1_b18_cbr1_bn_top\nI0821 08:59:08.773273 32502 net.cpp:395] L1_b18_cbr1_relu -> L1_b18_cbr1_bn_top (in-place)\nI0821 08:59:08.773283 32502 net.cpp:150] Setting up L1_b18_cbr1_relu\nI0821 08:59:08.773289 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.773294 32502 net.cpp:165] Memory required for data: 1292289200\nI0821 08:59:08.773299 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr2_conv\nI0821 08:59:08.773315 32502 net.cpp:100] Creating Layer L1_b18_cbr2_conv\nI0821 08:59:08.773321 32502 net.cpp:434] L1_b18_cbr2_conv <- L1_b18_cbr1_bn_top\nI0821 08:59:08.773332 32502 net.cpp:408] L1_b18_cbr2_conv -> L1_b18_cbr2_conv_top\nI0821 08:59:08.773655 32502 net.cpp:150] Setting up L1_b18_cbr2_conv\nI0821 08:59:08.773676 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.773681 32502 net.cpp:165] Memory required for data: 1298842800\nI0821 08:59:08.773690 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr2_bn\nI0821 08:59:08.773699 32502 net.cpp:100] Creating Layer L1_b18_cbr2_bn\nI0821 08:59:08.773706 32502 net.cpp:434] L1_b18_cbr2_bn <- L1_b18_cbr2_conv_top\nI0821 08:59:08.773715 32502 net.cpp:408] L1_b18_cbr2_bn -> L1_b18_cbr2_bn_top\nI0821 08:59:08.773968 32502 net.cpp:150] Setting up L1_b18_cbr2_bn\nI0821 08:59:08.773983 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.773988 32502 net.cpp:165] Memory required for data: 1305396400\nI0821 08:59:08.774031 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0821 08:59:08.774049 32502 net.cpp:100] Creating Layer L1_b18_cbr2_scale\nI0821 08:59:08.774056 32502 net.cpp:434] L1_b18_cbr2_scale <- L1_b18_cbr2_bn_top\nI0821 08:59:08.774065 32502 net.cpp:395] L1_b18_cbr2_scale -> L1_b18_cbr2_bn_top (in-place)\nI0821 08:59:08.774123 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0821 08:59:08.774266 32502 net.cpp:150] Setting up L1_b18_cbr2_scale\nI0821 08:59:08.774278 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.774283 32502 net.cpp:165] Memory required for data: 1311950000\nI0821 08:59:08.774293 32502 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise\nI0821 08:59:08.774302 32502 net.cpp:100] Creating Layer L1_b18_sum_eltwise\nI0821 08:59:08.774308 32502 net.cpp:434] L1_b18_sum_eltwise <- L1_b18_cbr2_bn_top\nI0821 08:59:08.774315 32502 net.cpp:434] L1_b18_sum_eltwise <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0821 08:59:08.774327 32502 net.cpp:408] L1_b18_sum_eltwise -> L1_b18_sum_eltwise_top\nI0821 08:59:08.774358 32502 net.cpp:150] Setting up L1_b18_sum_eltwise\nI0821 08:59:08.774368 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.774372 32502 net.cpp:165] Memory required for data: 1318503600\nI0821 08:59:08.774377 32502 layer_factory.hpp:77] Creating layer L1_b18_relu\nI0821 08:59:08.774389 32502 net.cpp:100] Creating Layer L1_b18_relu\nI0821 08:59:08.774394 32502 net.cpp:434] L1_b18_relu <- L1_b18_sum_eltwise_top\nI0821 08:59:08.774405 32502 net.cpp:395] L1_b18_relu -> L1_b18_sum_eltwise_top (in-place)\nI0821 08:59:08.774415 32502 net.cpp:150] Setting up L1_b18_relu\nI0821 08:59:08.774420 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.774425 32502 net.cpp:165] Memory required for data: 1325057200\nI0821 08:59:08.774430 32502 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:08.774436 32502 net.cpp:100] Creating Layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:08.774441 32502 net.cpp:434] L1_b18_sum_eltwise_top_L1_b18_relu_0_split <- L1_b18_sum_eltwise_top\nI0821 08:59:08.774449 32502 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0821 08:59:08.774458 32502 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0821 08:59:08.774505 32502 net.cpp:150] Setting up L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:08.774516 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.774523 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.774528 32502 net.cpp:165] Memory required for data: 1338164400\nI0821 08:59:08.774533 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:59:08.774546 32502 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:59:08.774552 32502 net.cpp:434] L2_b1_cbr1_conv <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0821 08:59:08.774562 32502 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:59:08.774896 32502 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:59:08.774910 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.774915 32502 net.cpp:165] Memory required for data: 1339802800\nI0821 08:59:08.774924 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:59:08.774945 32502 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:59:08.774951 32502 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:59:08.774962 32502 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:59:08.775207 32502 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:59:08.775220 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.775225 32502 net.cpp:165] Memory required for data: 1341441200\nI0821 08:59:08.775235 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:59:08.775244 32502 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:59:08.775250 32502 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:59:08.775259 32502 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:59:08.775316 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:59:08.775463 32502 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:59:08.775480 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.775485 32502 net.cpp:165] Memory required for data: 1343079600\nI0821 08:59:08.775494 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:59:08.775502 32502 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:59:08.775508 32502 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:59:08.775516 32502 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:59:08.775526 32502 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:59:08.775532 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.775537 32502 net.cpp:165] Memory required for data: 1344718000\nI0821 08:59:08.775542 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:59:08.775555 32502 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:59:08.775562 32502 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:59:08.775573 32502 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:59:08.775904 32502 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:59:08.775918 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.775923 32502 net.cpp:165] Memory required for data: 1346356400\nI0821 08:59:08.775933 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:59:08.775944 32502 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:59:08.775951 32502 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:59:08.775962 32502 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:59:08.776209 32502 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:59:08.776223 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.776228 32502 net.cpp:165] Memory required for data: 1347994800\nI0821 08:59:08.776238 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:59:08.776247 32502 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:59:08.776252 32502 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:59:08.776260 32502 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:59:08.776319 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:59:08.776466 32502 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:59:08.776479 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.776484 32502 net.cpp:165] Memory required for data: 1349633200\nI0821 08:59:08.776494 32502 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:59:08.776506 32502 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:59:08.776513 32502 net.cpp:434] L2_b1_pool <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0821 08:59:08.776522 32502 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:59:08.776612 32502 net.cpp:150] Setting up L2_b1_pool\nI0821 08:59:08.776628 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.776633 32502 net.cpp:165] Memory required for data: 1351271600\nI0821 08:59:08.776638 32502 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:59:08.776652 32502 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:59:08.776659 32502 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:59:08.776665 32502 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:59:08.776680 32502 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:59:08.776715 32502 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:59:08.776726 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.776731 32502 net.cpp:165] Memory required for data: 1352910000\nI0821 08:59:08.776734 32502 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:59:08.776752 32502 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:59:08.776760 32502 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:59:08.776767 32502 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:59:08.776777 32502 net.cpp:150] Setting up L2_b1_relu\nI0821 08:59:08.776783 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.776788 32502 net.cpp:165] Memory required for data: 1354548400\nI0821 08:59:08.776793 32502 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:59:08.776847 32502 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:59:08.776862 32502 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:59:08.779044 32502 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:59:08.779067 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:08.779072 32502 net.cpp:165] Memory required for data: 1356186800\nI0821 08:59:08.779078 32502 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:59:08.779089 32502 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:59:08.779096 32502 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:59:08.779103 32502 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:59:08.779111 32502 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:59:08.779196 32502 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:59:08.779212 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.779217 32502 net.cpp:165] Memory required for data: 1359463600\nI0821 08:59:08.779223 32502 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:08.779232 32502 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:08.779237 32502 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:59:08.779249 32502 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:59:08.779260 32502 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:59:08.779311 32502 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:08.779323 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.779330 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.779335 32502 net.cpp:165] Memory required for data: 1366017200\nI0821 08:59:08.779340 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:59:08.779353 32502 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:59:08.779361 32502 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:59:08.779372 32502 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:59:08.780850 32502 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:59:08.780867 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.780874 32502 net.cpp:165] Memory required for data: 1369294000\nI0821 08:59:08.780884 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:59:08.780894 32502 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:59:08.780900 32502 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:59:08.780911 32502 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:59:08.781167 32502 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:59:08.781182 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.781186 32502 net.cpp:165] Memory required for data: 1372570800\nI0821 08:59:08.781198 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:59:08.781206 32502 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:59:08.781213 32502 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:59:08.781221 32502 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:59:08.781291 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:59:08.781442 32502 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:59:08.781457 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.781463 32502 net.cpp:165] Memory required for data: 1375847600\nI0821 08:59:08.781472 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:59:08.781481 32502 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:59:08.781486 32502 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:59:08.781494 32502 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:59:08.781503 32502 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:59:08.781510 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.781515 32502 net.cpp:165] Memory required for data: 1379124400\nI0821 08:59:08.781519 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:59:08.781533 32502 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:59:08.781539 32502 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:59:08.781551 32502 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:59:08.782025 32502 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:59:08.782040 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.782045 32502 net.cpp:165] Memory required for data: 1382401200\nI0821 08:59:08.782054 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:59:08.782066 32502 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:59:08.782073 32502 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:59:08.782083 32502 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:59:08.782330 32502 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:59:08.782344 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.782349 32502 net.cpp:165] Memory required for data: 1385678000\nI0821 08:59:08.782359 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:59:08.782367 32502 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:59:08.782373 32502 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:59:08.782382 32502 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:59:08.782440 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:59:08.782586 32502 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:59:08.782599 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.782604 32502 net.cpp:165] Memory required for data: 1388954800\nI0821 08:59:08.782613 32502 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:59:08.782629 32502 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:59:08.782634 32502 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:59:08.782642 32502 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:59:08.782649 32502 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:59:08.782676 32502 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:59:08.782686 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.782691 32502 net.cpp:165] Memory required for data: 1392231600\nI0821 08:59:08.782696 32502 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:59:08.782707 32502 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:59:08.782713 32502 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:59:08.782721 32502 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:59:08.782729 32502 net.cpp:150] Setting up L2_b2_relu\nI0821 08:59:08.782737 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.782742 32502 net.cpp:165] Memory required for data: 1395508400\nI0821 08:59:08.782752 32502 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:08.782760 32502 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:08.782765 32502 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:59:08.782773 32502 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:59:08.782790 32502 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:59:08.782840 32502 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:08.782850 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.782856 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.782860 32502 net.cpp:165] Memory required for data: 1402062000\nI0821 08:59:08.782866 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:59:08.782879 32502 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:59:08.782886 32502 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:59:08.782894 32502 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:59:08.783360 32502 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:59:08.783375 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.783380 32502 net.cpp:165] Memory required for data: 1405338800\nI0821 08:59:08.783388 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:59:08.783401 32502 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:59:08.783407 32502 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:59:08.783417 32502 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:59:08.783661 32502 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:59:08.783674 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.783679 32502 net.cpp:165] Memory required for data: 1408615600\nI0821 08:59:08.783690 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:59:08.783699 32502 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:59:08.783704 32502 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:59:08.783712 32502 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:59:08.783778 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:59:08.783928 32502 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:59:08.783941 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.783946 32502 net.cpp:165] Memory required for data: 1411892400\nI0821 08:59:08.783956 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:59:08.783967 32502 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:59:08.783972 32502 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:59:08.783980 32502 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:59:08.783989 32502 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:59:08.783996 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.784001 32502 net.cpp:165] Memory required for data: 1415169200\nI0821 08:59:08.784005 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:59:08.784019 32502 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:59:08.784024 32502 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:59:08.784035 32502 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:59:08.784504 32502 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:59:08.784518 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.784523 32502 net.cpp:165] Memory required for data: 1418446000\nI0821 08:59:08.784533 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:59:08.784544 32502 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:59:08.784550 32502 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:59:08.784564 32502 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:59:08.784819 32502 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:59:08.784837 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.784842 32502 net.cpp:165] Memory required for data: 1421722800\nI0821 08:59:08.784852 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:59:08.784860 32502 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:59:08.784868 32502 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:59:08.784874 32502 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:59:08.784941 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:59:08.785090 32502 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:59:08.785104 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.785109 32502 net.cpp:165] Memory required for data: 1424999600\nI0821 08:59:08.785117 32502 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:59:08.785126 32502 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:59:08.785135 32502 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:59:08.785143 32502 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:59:08.785151 32502 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:59:08.785178 32502 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:59:08.785187 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.785192 32502 net.cpp:165] Memory required for data: 1428276400\nI0821 08:59:08.785197 32502 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:59:08.785208 32502 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:59:08.785214 32502 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:59:08.785221 32502 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:59:08.785230 32502 net.cpp:150] Setting up L2_b3_relu\nI0821 08:59:08.785238 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.785243 32502 net.cpp:165] Memory required for data: 1431553200\nI0821 08:59:08.785248 32502 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:08.785254 32502 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:08.785259 32502 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:59:08.785266 32502 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:59:08.785276 32502 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:59:08.785326 32502 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:08.785338 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.785346 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.785349 32502 net.cpp:165] Memory required for data: 1438106800\nI0821 08:59:08.785354 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:59:08.785367 32502 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:59:08.785374 32502 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:59:08.785383 32502 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:59:08.785866 32502 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:59:08.785881 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.785886 32502 net.cpp:165] Memory required for data: 1441383600\nI0821 08:59:08.785894 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:59:08.785907 32502 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:59:08.785912 32502 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:59:08.785923 32502 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:59:08.786177 32502 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:59:08.786190 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.786195 32502 net.cpp:165] Memory required for data: 1444660400\nI0821 08:59:08.786206 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:59:08.786214 32502 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:59:08.786221 32502 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:59:08.786228 32502 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:59:08.786288 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:59:08.786437 32502 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:59:08.786449 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.786454 32502 net.cpp:165] Memory required for data: 1447937200\nI0821 08:59:08.786463 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:59:08.786480 32502 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:59:08.786487 32502 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:59:08.786494 32502 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:59:08.786504 32502 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:59:08.786511 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.786516 32502 net.cpp:165] Memory required for data: 1451214000\nI0821 08:59:08.786521 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:59:08.786535 32502 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:59:08.786541 32502 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:59:08.786550 32502 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:59:08.787025 32502 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:59:08.787040 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.787045 32502 net.cpp:165] Memory required for data: 1454490800\nI0821 08:59:08.787055 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:59:08.787066 32502 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:59:08.787072 32502 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:59:08.787081 32502 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:59:08.787335 32502 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:59:08.787351 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.787358 32502 net.cpp:165] Memory required for data: 1457767600\nI0821 08:59:08.787367 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:59:08.787376 32502 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:59:08.787382 32502 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:59:08.787390 32502 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:59:08.787447 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:59:08.787602 32502 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:59:08.787616 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.787621 32502 net.cpp:165] Memory required for data: 1461044400\nI0821 08:59:08.787629 32502 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:59:08.787639 32502 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:59:08.787645 32502 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:59:08.787652 32502 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:59:08.787662 32502 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:59:08.787690 32502 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:59:08.787699 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.787704 32502 net.cpp:165] Memory required for data: 1464321200\nI0821 08:59:08.787709 32502 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:59:08.787724 32502 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:59:08.787729 32502 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:59:08.787736 32502 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:59:08.787751 32502 net.cpp:150] Setting up L2_b4_relu\nI0821 08:59:08.787760 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.787763 32502 net.cpp:165] Memory required for data: 1467598000\nI0821 08:59:08.787768 32502 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:08.787776 32502 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:08.787781 32502 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:59:08.787789 32502 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:59:08.787798 32502 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:59:08.787847 32502 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:08.787858 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.787873 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.787876 32502 net.cpp:165] Memory required for data: 1474151600\nI0821 08:59:08.787883 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:59:08.787896 32502 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:59:08.787904 32502 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:59:08.787912 32502 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:59:08.788386 32502 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:59:08.788400 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.788405 32502 net.cpp:165] Memory required for data: 1477428400\nI0821 08:59:08.788414 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:59:08.788426 32502 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:59:08.788434 32502 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:59:08.788444 32502 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:59:08.788697 32502 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:59:08.788713 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.788718 32502 net.cpp:165] Memory required for data: 1480705200\nI0821 08:59:08.788729 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:59:08.788738 32502 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:59:08.788749 32502 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:59:08.788758 32502 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:59:08.788817 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:59:08.788966 32502 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:59:08.788980 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.788985 32502 net.cpp:165] Memory required for data: 1483982000\nI0821 08:59:08.788993 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:59:08.789001 32502 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:59:08.789007 32502 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:59:08.789017 32502 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:59:08.789027 32502 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:59:08.789034 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.789039 32502 net.cpp:165] Memory required for data: 1487258800\nI0821 08:59:08.789044 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:59:08.789057 32502 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:59:08.789063 32502 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:59:08.789072 32502 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:59:08.789546 32502 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:59:08.789561 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.789566 32502 net.cpp:165] Memory required for data: 1490535600\nI0821 08:59:08.789573 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:59:08.789585 32502 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:59:08.789592 32502 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:59:08.789600 32502 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:59:08.789863 32502 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:59:08.789880 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.789885 32502 net.cpp:165] Memory required for data: 1493812400\nI0821 08:59:08.789896 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:59:08.789904 32502 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:59:08.789911 32502 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:59:08.789917 32502 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:59:08.789974 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:59:08.790130 32502 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:59:08.790143 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.790148 32502 net.cpp:165] Memory required for data: 1497089200\nI0821 08:59:08.790158 32502 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:59:08.790174 32502 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:59:08.790180 32502 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:59:08.790187 32502 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:59:08.790199 32502 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:59:08.790226 32502 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:59:08.790237 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.790242 32502 net.cpp:165] Memory required for data: 1500366000\nI0821 08:59:08.790247 32502 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:59:08.790256 32502 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:59:08.790261 32502 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:59:08.790271 32502 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:59:08.790282 32502 net.cpp:150] Setting up L2_b5_relu\nI0821 08:59:08.790288 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.790292 32502 net.cpp:165] Memory required for data: 1503642800\nI0821 08:59:08.790297 32502 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:08.790304 32502 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:08.790309 32502 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:59:08.790318 32502 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:59:08.790326 32502 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:59:08.790375 32502 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:08.790387 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.790393 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.790398 32502 net.cpp:165] Memory required for data: 1510196400\nI0821 08:59:08.790403 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:59:08.790416 32502 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:59:08.790423 32502 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:59:08.790432 32502 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:59:08.790904 32502 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:59:08.790920 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.790925 32502 net.cpp:165] Memory required for data: 1513473200\nI0821 08:59:08.790933 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:59:08.790946 32502 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:59:08.790951 32502 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:59:08.790961 32502 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:59:08.791213 32502 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:59:08.791229 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.791234 32502 net.cpp:165] Memory required for data: 1516750000\nI0821 08:59:08.791244 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:59:08.791252 32502 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:59:08.791259 32502 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:59:08.791266 32502 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:59:08.791322 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:59:08.792475 32502 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:59:08.792492 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.792498 32502 net.cpp:165] Memory required for data: 1520026800\nI0821 08:59:08.792508 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:59:08.792517 32502 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:59:08.792523 32502 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:59:08.792531 32502 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:59:08.792541 32502 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:59:08.792557 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.792562 32502 net.cpp:165] Memory required for data: 1523303600\nI0821 08:59:08.792567 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:59:08.792582 32502 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:59:08.792587 32502 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:59:08.792595 32502 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:59:08.793078 32502 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:59:08.793093 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.793098 32502 net.cpp:165] Memory required for data: 1526580400\nI0821 08:59:08.793107 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:59:08.793119 32502 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:59:08.793125 32502 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:59:08.793136 32502 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:59:08.793383 32502 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:59:08.793396 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.793401 32502 net.cpp:165] Memory required for data: 1529857200\nI0821 08:59:08.793412 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:59:08.793421 32502 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:59:08.793427 32502 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:59:08.793436 32502 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:59:08.793493 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:59:08.793643 32502 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:59:08.793658 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.793663 32502 net.cpp:165] Memory required for data: 1533134000\nI0821 08:59:08.793673 32502 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:59:08.793681 32502 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:59:08.793689 32502 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:59:08.793695 32502 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:59:08.793704 32502 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:59:08.793733 32502 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:59:08.793748 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.793753 32502 net.cpp:165] Memory required for data: 1536410800\nI0821 08:59:08.793759 32502 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:59:08.793767 32502 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:59:08.793773 32502 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:59:08.793783 32502 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:59:08.793793 32502 net.cpp:150] Setting up L2_b6_relu\nI0821 08:59:08.793800 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.793804 32502 net.cpp:165] Memory required for data: 1539687600\nI0821 08:59:08.793809 32502 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:08.793817 32502 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:08.793822 32502 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:59:08.793833 32502 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:59:08.793843 32502 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:59:08.793889 32502 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:08.793900 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.793906 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.793910 32502 net.cpp:165] Memory required for data: 1546241200\nI0821 08:59:08.793915 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:59:08.793931 32502 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:59:08.793936 32502 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:59:08.793952 32502 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:59:08.794422 32502 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:59:08.794436 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.794441 32502 net.cpp:165] Memory required for data: 1549518000\nI0821 08:59:08.794450 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:59:08.794462 32502 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:59:08.794468 32502 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:59:08.794477 32502 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:59:08.794721 32502 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:59:08.794734 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.794739 32502 net.cpp:165] Memory required for data: 1552794800\nI0821 08:59:08.794756 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:59:08.794765 32502 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:59:08.794771 32502 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:59:08.794780 32502 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:59:08.794838 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:59:08.794986 32502 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:59:08.795001 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.795006 32502 net.cpp:165] Memory required for data: 1556071600\nI0821 08:59:08.795017 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:59:08.795024 32502 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:59:08.795030 32502 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:59:08.795037 32502 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:59:08.795047 32502 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:59:08.795054 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.795058 32502 net.cpp:165] Memory required for data: 1559348400\nI0821 08:59:08.795063 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:59:08.795078 32502 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:59:08.795083 32502 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:59:08.795094 32502 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:59:08.795557 32502 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:59:08.795570 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.795575 32502 net.cpp:165] Memory required for data: 1562625200\nI0821 08:59:08.795584 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:59:08.795596 32502 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:59:08.795603 32502 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:59:08.795614 32502 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:59:08.795868 32502 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:59:08.795882 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.795887 32502 net.cpp:165] Memory required for data: 1565902000\nI0821 08:59:08.795898 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:59:08.795936 32502 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:59:08.795945 32502 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:59:08.795953 32502 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:59:08.796012 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:59:08.796157 32502 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:59:08.796170 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.796175 32502 net.cpp:165] Memory required for data: 1569178800\nI0821 08:59:08.796185 32502 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:59:08.796197 32502 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:59:08.796203 32502 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:59:08.796211 32502 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:59:08.796226 32502 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:59:08.796252 32502 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:59:08.796262 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.796267 32502 net.cpp:165] Memory required for data: 1572455600\nI0821 08:59:08.796272 32502 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:59:08.796279 32502 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:59:08.796284 32502 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:59:08.796296 32502 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:59:08.796306 32502 net.cpp:150] Setting up L2_b7_relu\nI0821 08:59:08.796314 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.796317 32502 net.cpp:165] Memory required for data: 1575732400\nI0821 08:59:08.796322 32502 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:08.796329 32502 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:08.796334 32502 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:59:08.796344 32502 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:59:08.796355 32502 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:59:08.796401 32502 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:08.796413 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.796421 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.796424 32502 net.cpp:165] Memory required for data: 1582286000\nI0821 08:59:08.796430 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:59:08.796442 32502 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:59:08.796447 32502 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:59:08.796459 32502 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:59:08.796931 32502 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:59:08.796947 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.796952 32502 net.cpp:165] Memory required for data: 1585562800\nI0821 08:59:08.796960 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:59:08.796969 32502 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:59:08.796977 32502 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:59:08.796988 32502 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:59:08.797245 32502 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:59:08.797257 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.797262 32502 net.cpp:165] Memory required for data: 1588839600\nI0821 08:59:08.797273 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:59:08.797284 32502 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:59:08.797291 32502 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:59:08.797299 32502 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:59:08.797356 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:59:08.797503 32502 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:59:08.797516 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.797521 32502 net.cpp:165] Memory required for data: 1592116400\nI0821 08:59:08.797530 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:59:08.797541 32502 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:59:08.797549 32502 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:59:08.797555 32502 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:59:08.797564 32502 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:59:08.797571 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.797576 32502 net.cpp:165] Memory required for data: 1595393200\nI0821 08:59:08.797581 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:59:08.797595 32502 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:59:08.797607 32502 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:59:08.797619 32502 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:59:08.798087 32502 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:59:08.798101 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.798107 32502 net.cpp:165] Memory required for data: 1598670000\nI0821 08:59:08.798115 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:59:08.798125 32502 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:59:08.798130 32502 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:59:08.798143 32502 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:59:08.798394 32502 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:59:08.798408 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.798413 32502 net.cpp:165] Memory required for data: 1601946800\nI0821 08:59:08.798422 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:59:08.798434 32502 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:59:08.798440 32502 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:59:08.798449 32502 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:59:08.798504 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:59:08.798660 32502 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:59:08.798674 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.798679 32502 net.cpp:165] Memory required for data: 1605223600\nI0821 08:59:08.798688 32502 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:59:08.798697 32502 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:59:08.798703 32502 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:59:08.798713 32502 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:59:08.798722 32502 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:59:08.798754 32502 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:59:08.798768 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.798774 32502 net.cpp:165] Memory required for data: 1608500400\nI0821 08:59:08.798779 32502 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:59:08.798786 32502 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:59:08.798792 32502 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:59:08.798799 32502 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:59:08.798808 32502 net.cpp:150] Setting up L2_b8_relu\nI0821 08:59:08.798815 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.798820 32502 net.cpp:165] Memory required for data: 1611777200\nI0821 08:59:08.798825 32502 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:08.798835 32502 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:08.798840 32502 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:59:08.798847 32502 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:59:08.798857 32502 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:59:08.798907 32502 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:08.798918 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.798924 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.798929 32502 net.cpp:165] Memory required for data: 1618330800\nI0821 08:59:08.798934 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:59:08.798944 32502 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:59:08.798951 32502 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:59:08.798962 32502 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:59:08.799432 32502 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:59:08.799448 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.799453 32502 net.cpp:165] Memory required for data: 1621607600\nI0821 08:59:08.799468 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:59:08.799477 32502 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:59:08.799484 32502 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:59:08.799494 32502 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:59:08.799756 32502 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:59:08.799768 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.799773 32502 net.cpp:165] Memory required for data: 1624884400\nI0821 08:59:08.799784 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:59:08.799795 32502 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:59:08.799803 32502 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:59:08.799810 32502 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:59:08.799866 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:59:08.800016 32502 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:59:08.800029 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.800034 32502 net.cpp:165] Memory required for data: 1628161200\nI0821 08:59:08.800043 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:59:08.800055 32502 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:59:08.800060 32502 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:59:08.800067 32502 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:59:08.800077 32502 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:59:08.800084 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.800088 32502 net.cpp:165] Memory required for data: 1631438000\nI0821 08:59:08.800093 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:59:08.800107 32502 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:59:08.800113 32502 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:59:08.800124 32502 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:59:08.800591 32502 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:59:08.800606 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.800611 32502 net.cpp:165] Memory required for data: 1634714800\nI0821 08:59:08.800619 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:59:08.800628 32502 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:59:08.800635 32502 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:59:08.800643 32502 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:59:08.800900 32502 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:59:08.800915 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.800920 32502 net.cpp:165] Memory required for data: 1637991600\nI0821 08:59:08.800930 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:59:08.800940 32502 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:59:08.800945 32502 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:59:08.800956 32502 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:59:08.801013 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:59:08.801163 32502 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:59:08.801175 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.801180 32502 net.cpp:165] Memory required for data: 1641268400\nI0821 08:59:08.801190 32502 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:59:08.801199 32502 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:59:08.801205 32502 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:59:08.801213 32502 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:59:08.801223 32502 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:59:08.801250 32502 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:59:08.801262 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.801267 32502 net.cpp:165] Memory required for data: 1644545200\nI0821 08:59:08.801272 32502 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:59:08.801287 32502 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:59:08.801293 32502 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:59:08.801301 32502 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:59:08.801311 32502 net.cpp:150] Setting up L2_b9_relu\nI0821 08:59:08.801317 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.801321 32502 net.cpp:165] Memory required for data: 1647822000\nI0821 08:59:08.801326 32502 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:08.801337 32502 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:08.801342 32502 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:59:08.801349 32502 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:59:08.801359 32502 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:59:08.801409 32502 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:08.801420 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.801426 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.801431 32502 net.cpp:165] Memory required for data: 1654375600\nI0821 08:59:08.801436 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_conv\nI0821 08:59:08.801447 32502 net.cpp:100] Creating Layer L2_b10_cbr1_conv\nI0821 08:59:08.801453 32502 net.cpp:434] L2_b10_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:59:08.801465 32502 net.cpp:408] L2_b10_cbr1_conv -> L2_b10_cbr1_conv_top\nI0821 08:59:08.801947 32502 net.cpp:150] Setting up L2_b10_cbr1_conv\nI0821 08:59:08.801962 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.801967 32502 net.cpp:165] Memory required for data: 1657652400\nI0821 08:59:08.801976 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_bn\nI0821 08:59:08.801985 32502 net.cpp:100] Creating Layer L2_b10_cbr1_bn\nI0821 08:59:08.801991 32502 net.cpp:434] L2_b10_cbr1_bn <- L2_b10_cbr1_conv_top\nI0821 08:59:08.802002 32502 net.cpp:408] L2_b10_cbr1_bn -> L2_b10_cbr1_bn_top\nI0821 08:59:08.802260 32502 net.cpp:150] Setting up L2_b10_cbr1_bn\nI0821 08:59:08.802273 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.802279 32502 net.cpp:165] Memory required for data: 1660929200\nI0821 08:59:08.802289 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0821 08:59:08.802301 32502 net.cpp:100] Creating Layer L2_b10_cbr1_scale\nI0821 08:59:08.802307 32502 net.cpp:434] L2_b10_cbr1_scale <- L2_b10_cbr1_bn_top\nI0821 08:59:08.802315 32502 net.cpp:395] L2_b10_cbr1_scale -> L2_b10_cbr1_bn_top (in-place)\nI0821 08:59:08.802371 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0821 08:59:08.802525 32502 net.cpp:150] Setting up L2_b10_cbr1_scale\nI0821 08:59:08.802536 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.802541 32502 net.cpp:165] Memory required for data: 1664206000\nI0821 08:59:08.802551 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_relu\nI0821 08:59:08.802558 32502 net.cpp:100] Creating Layer L2_b10_cbr1_relu\nI0821 08:59:08.802565 32502 net.cpp:434] L2_b10_cbr1_relu <- L2_b10_cbr1_bn_top\nI0821 08:59:08.802575 32502 net.cpp:395] L2_b10_cbr1_relu -> L2_b10_cbr1_bn_top (in-place)\nI0821 08:59:08.802585 32502 net.cpp:150] Setting up L2_b10_cbr1_relu\nI0821 08:59:08.802592 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.802597 32502 net.cpp:165] Memory required for data: 1667482800\nI0821 08:59:08.802601 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr2_conv\nI0821 08:59:08.802615 32502 net.cpp:100] Creating Layer L2_b10_cbr2_conv\nI0821 08:59:08.802621 32502 net.cpp:434] L2_b10_cbr2_conv <- L2_b10_cbr1_bn_top\nI0821 08:59:08.802634 32502 net.cpp:408] L2_b10_cbr2_conv -> L2_b10_cbr2_conv_top\nI0821 08:59:08.803107 32502 net.cpp:150] Setting up L2_b10_cbr2_conv\nI0821 08:59:08.803122 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.803133 32502 net.cpp:165] Memory required for data: 1670759600\nI0821 08:59:08.803143 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr2_bn\nI0821 08:59:08.803153 32502 net.cpp:100] Creating Layer L2_b10_cbr2_bn\nI0821 08:59:08.803158 32502 net.cpp:434] L2_b10_cbr2_bn <- L2_b10_cbr2_conv_top\nI0821 08:59:08.803166 32502 net.cpp:408] L2_b10_cbr2_bn -> L2_b10_cbr2_bn_top\nI0821 08:59:08.803417 32502 net.cpp:150] Setting up L2_b10_cbr2_bn\nI0821 08:59:08.803431 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.803436 32502 net.cpp:165] Memory required for data: 1674036400\nI0821 08:59:08.803445 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0821 08:59:08.803454 32502 net.cpp:100] Creating Layer L2_b10_cbr2_scale\nI0821 08:59:08.803460 32502 net.cpp:434] L2_b10_cbr2_scale <- L2_b10_cbr2_bn_top\nI0821 08:59:08.803472 32502 net.cpp:395] L2_b10_cbr2_scale -> L2_b10_cbr2_bn_top (in-place)\nI0821 08:59:08.803527 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0821 08:59:08.803684 32502 net.cpp:150] Setting up L2_b10_cbr2_scale\nI0821 08:59:08.803697 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.803702 32502 net.cpp:165] Memory required for data: 1677313200\nI0821 08:59:08.803711 32502 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise\nI0821 08:59:08.803720 32502 net.cpp:100] Creating Layer L2_b10_sum_eltwise\nI0821 08:59:08.803726 32502 net.cpp:434] L2_b10_sum_eltwise <- L2_b10_cbr2_bn_top\nI0821 08:59:08.803735 32502 net.cpp:434] L2_b10_sum_eltwise <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:59:08.803750 32502 net.cpp:408] L2_b10_sum_eltwise -> L2_b10_sum_eltwise_top\nI0821 08:59:08.803779 32502 net.cpp:150] Setting up L2_b10_sum_eltwise\nI0821 08:59:08.803789 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.803793 32502 net.cpp:165] Memory required for data: 1680590000\nI0821 08:59:08.803799 32502 layer_factory.hpp:77] Creating layer L2_b10_relu\nI0821 08:59:08.803810 32502 net.cpp:100] Creating Layer L2_b10_relu\nI0821 08:59:08.803817 32502 net.cpp:434] L2_b10_relu <- L2_b10_sum_eltwise_top\nI0821 08:59:08.803823 32502 net.cpp:395] L2_b10_relu -> L2_b10_sum_eltwise_top (in-place)\nI0821 08:59:08.803833 32502 net.cpp:150] Setting up L2_b10_relu\nI0821 08:59:08.803839 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.803843 32502 net.cpp:165] Memory required for data: 1683866800\nI0821 08:59:08.803848 32502 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:08.803858 32502 net.cpp:100] Creating Layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:08.803864 32502 net.cpp:434] L2_b10_sum_eltwise_top_L2_b10_relu_0_split <- L2_b10_sum_eltwise_top\nI0821 08:59:08.803871 32502 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0821 08:59:08.803880 32502 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0821 08:59:08.803926 32502 net.cpp:150] Setting up L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:08.803941 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.803947 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.803952 32502 net.cpp:165] Memory required for data: 1690420400\nI0821 08:59:08.803956 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_conv\nI0821 08:59:08.803967 32502 net.cpp:100] Creating Layer L2_b11_cbr1_conv\nI0821 08:59:08.803973 32502 net.cpp:434] L2_b11_cbr1_conv <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0821 08:59:08.803982 32502 net.cpp:408] L2_b11_cbr1_conv -> L2_b11_cbr1_conv_top\nI0821 08:59:08.804452 32502 net.cpp:150] Setting up L2_b11_cbr1_conv\nI0821 08:59:08.804466 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.804471 32502 net.cpp:165] Memory required for data: 1693697200\nI0821 08:59:08.804481 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_bn\nI0821 08:59:08.804492 32502 net.cpp:100] Creating Layer L2_b11_cbr1_bn\nI0821 08:59:08.804499 32502 net.cpp:434] L2_b11_cbr1_bn <- L2_b11_cbr1_conv_top\nI0821 08:59:08.804514 32502 net.cpp:408] L2_b11_cbr1_bn -> L2_b11_cbr1_bn_top\nI0821 08:59:08.804778 32502 net.cpp:150] Setting up L2_b11_cbr1_bn\nI0821 08:59:08.804792 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.804797 32502 net.cpp:165] Memory required for data: 1696974000\nI0821 08:59:08.804808 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0821 08:59:08.804816 32502 net.cpp:100] Creating Layer L2_b11_cbr1_scale\nI0821 08:59:08.804823 32502 net.cpp:434] L2_b11_cbr1_scale <- L2_b11_cbr1_bn_top\nI0821 08:59:08.804833 32502 net.cpp:395] L2_b11_cbr1_scale -> L2_b11_cbr1_bn_top (in-place)\nI0821 08:59:08.804890 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0821 08:59:08.805043 32502 net.cpp:150] Setting up L2_b11_cbr1_scale\nI0821 08:59:08.805057 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.805061 32502 net.cpp:165] Memory required for data: 1700250800\nI0821 08:59:08.805070 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_relu\nI0821 08:59:08.805078 32502 net.cpp:100] Creating Layer L2_b11_cbr1_relu\nI0821 08:59:08.805084 32502 net.cpp:434] L2_b11_cbr1_relu <- L2_b11_cbr1_bn_top\nI0821 08:59:08.805094 32502 net.cpp:395] L2_b11_cbr1_relu -> L2_b11_cbr1_bn_top (in-place)\nI0821 08:59:08.805104 32502 net.cpp:150] Setting up L2_b11_cbr1_relu\nI0821 08:59:08.805112 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.805116 32502 net.cpp:165] Memory required for data: 1703527600\nI0821 08:59:08.805121 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr2_conv\nI0821 08:59:08.805136 32502 net.cpp:100] Creating Layer L2_b11_cbr2_conv\nI0821 08:59:08.805143 32502 net.cpp:434] L2_b11_cbr2_conv <- L2_b11_cbr1_bn_top\nI0821 08:59:08.805151 32502 net.cpp:408] L2_b11_cbr2_conv -> L2_b11_cbr2_conv_top\nI0821 08:59:08.805619 32502 net.cpp:150] Setting up L2_b11_cbr2_conv\nI0821 08:59:08.805634 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.805639 32502 net.cpp:165] Memory required for data: 1706804400\nI0821 08:59:08.805647 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr2_bn\nI0821 08:59:08.805663 32502 net.cpp:100] Creating Layer L2_b11_cbr2_bn\nI0821 08:59:08.805670 32502 net.cpp:434] L2_b11_cbr2_bn <- L2_b11_cbr2_conv_top\nI0821 08:59:08.805678 32502 net.cpp:408] L2_b11_cbr2_bn -> L2_b11_cbr2_bn_top\nI0821 08:59:08.805938 32502 net.cpp:150] Setting up L2_b11_cbr2_bn\nI0821 08:59:08.805951 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.805956 32502 net.cpp:165] Memory required for data: 1710081200\nI0821 08:59:08.805968 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0821 08:59:08.805976 32502 net.cpp:100] Creating Layer L2_b11_cbr2_scale\nI0821 08:59:08.805982 32502 net.cpp:434] L2_b11_cbr2_scale <- L2_b11_cbr2_bn_top\nI0821 08:59:08.805992 32502 net.cpp:395] L2_b11_cbr2_scale -> L2_b11_cbr2_bn_top (in-place)\nI0821 08:59:08.806051 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0821 08:59:08.806202 32502 net.cpp:150] Setting up L2_b11_cbr2_scale\nI0821 08:59:08.806216 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.806221 32502 net.cpp:165] Memory required for data: 1713358000\nI0821 08:59:08.806229 32502 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise\nI0821 08:59:08.806238 32502 net.cpp:100] Creating Layer L2_b11_sum_eltwise\nI0821 08:59:08.806244 32502 net.cpp:434] L2_b11_sum_eltwise <- L2_b11_cbr2_bn_top\nI0821 08:59:08.806252 32502 net.cpp:434] L2_b11_sum_eltwise <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0821 08:59:08.806262 32502 net.cpp:408] L2_b11_sum_eltwise -> L2_b11_sum_eltwise_top\nI0821 08:59:08.806289 32502 net.cpp:150] Setting up L2_b11_sum_eltwise\nI0821 08:59:08.806298 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.806303 32502 net.cpp:165] Memory required for data: 1716634800\nI0821 08:59:08.806308 32502 layer_factory.hpp:77] Creating layer L2_b11_relu\nI0821 08:59:08.806318 32502 net.cpp:100] Creating Layer L2_b11_relu\nI0821 08:59:08.806324 32502 net.cpp:434] L2_b11_relu <- L2_b11_sum_eltwise_top\nI0821 08:59:08.806337 32502 net.cpp:395] L2_b11_relu -> L2_b11_sum_eltwise_top (in-place)\nI0821 08:59:08.806347 32502 net.cpp:150] Setting up L2_b11_relu\nI0821 08:59:08.806354 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.806360 32502 net.cpp:165] Memory required for data: 1719911600\nI0821 08:59:08.806365 32502 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:08.806371 32502 net.cpp:100] Creating Layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:08.806376 32502 net.cpp:434] L2_b11_sum_eltwise_top_L2_b11_relu_0_split <- L2_b11_sum_eltwise_top\nI0821 08:59:08.806386 32502 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0821 08:59:08.806396 32502 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0821 08:59:08.806442 32502 net.cpp:150] Setting up L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:08.806457 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.806463 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.806468 32502 net.cpp:165] Memory required for data: 1726465200\nI0821 08:59:08.806473 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_conv\nI0821 08:59:08.806484 32502 net.cpp:100] Creating Layer L2_b12_cbr1_conv\nI0821 08:59:08.806490 32502 net.cpp:434] L2_b12_cbr1_conv <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0821 08:59:08.806499 32502 net.cpp:408] L2_b12_cbr1_conv -> L2_b12_cbr1_conv_top\nI0821 08:59:08.806984 32502 net.cpp:150] Setting up L2_b12_cbr1_conv\nI0821 08:59:08.806999 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.807004 32502 net.cpp:165] Memory required for data: 1729742000\nI0821 08:59:08.807013 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_bn\nI0821 08:59:08.807027 32502 net.cpp:100] Creating Layer L2_b12_cbr1_bn\nI0821 08:59:08.807034 32502 net.cpp:434] L2_b12_cbr1_bn <- L2_b12_cbr1_conv_top\nI0821 08:59:08.807042 32502 net.cpp:408] L2_b12_cbr1_bn -> L2_b12_cbr1_bn_top\nI0821 08:59:08.807299 32502 net.cpp:150] Setting up L2_b12_cbr1_bn\nI0821 08:59:08.807313 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.807318 32502 net.cpp:165] Memory required for data: 1733018800\nI0821 08:59:08.807328 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0821 08:59:08.807337 32502 net.cpp:100] Creating Layer L2_b12_cbr1_scale\nI0821 08:59:08.807343 32502 net.cpp:434] L2_b12_cbr1_scale <- L2_b12_cbr1_bn_top\nI0821 08:59:08.807354 32502 net.cpp:395] L2_b12_cbr1_scale -> L2_b12_cbr1_bn_top (in-place)\nI0821 08:59:08.807413 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0821 08:59:08.807567 32502 net.cpp:150] Setting up L2_b12_cbr1_scale\nI0821 08:59:08.807580 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.807585 32502 net.cpp:165] Memory required for data: 1736295600\nI0821 08:59:08.807595 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_relu\nI0821 08:59:08.807602 32502 net.cpp:100] Creating Layer L2_b12_cbr1_relu\nI0821 08:59:08.807608 32502 net.cpp:434] L2_b12_cbr1_relu <- L2_b12_cbr1_bn_top\nI0821 08:59:08.807618 32502 net.cpp:395] L2_b12_cbr1_relu -> L2_b12_cbr1_bn_top (in-place)\nI0821 08:59:08.807628 32502 net.cpp:150] Setting up L2_b12_cbr1_relu\nI0821 08:59:08.807636 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.807639 32502 net.cpp:165] Memory required for data: 1739572400\nI0821 08:59:08.807644 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr2_conv\nI0821 08:59:08.807658 32502 net.cpp:100] Creating Layer L2_b12_cbr2_conv\nI0821 08:59:08.807664 32502 net.cpp:434] L2_b12_cbr2_conv <- L2_b12_cbr1_bn_top\nI0821 08:59:08.807673 32502 net.cpp:408] L2_b12_cbr2_conv -> L2_b12_cbr2_conv_top\nI0821 08:59:08.808152 32502 net.cpp:150] Setting up L2_b12_cbr2_conv\nI0821 08:59:08.808167 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.808172 32502 net.cpp:165] Memory required for data: 1742849200\nI0821 08:59:08.808182 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr2_bn\nI0821 08:59:08.808202 32502 net.cpp:100] Creating Layer L2_b12_cbr2_bn\nI0821 08:59:08.808208 32502 net.cpp:434] L2_b12_cbr2_bn <- L2_b12_cbr2_conv_top\nI0821 08:59:08.808218 32502 net.cpp:408] L2_b12_cbr2_bn -> L2_b12_cbr2_bn_top\nI0821 08:59:08.808472 32502 net.cpp:150] Setting up L2_b12_cbr2_bn\nI0821 08:59:08.808485 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.808490 32502 net.cpp:165] Memory required for data: 1746126000\nI0821 08:59:08.808501 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0821 08:59:08.808511 32502 net.cpp:100] Creating Layer L2_b12_cbr2_scale\nI0821 08:59:08.808516 32502 net.cpp:434] L2_b12_cbr2_scale <- L2_b12_cbr2_bn_top\nI0821 08:59:08.808523 32502 net.cpp:395] L2_b12_cbr2_scale -> L2_b12_cbr2_bn_top (in-place)\nI0821 08:59:08.808583 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0821 08:59:08.808732 32502 net.cpp:150] Setting up L2_b12_cbr2_scale\nI0821 08:59:08.808753 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.808759 32502 net.cpp:165] Memory required for data: 1749402800\nI0821 08:59:08.808768 32502 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise\nI0821 08:59:08.808778 32502 net.cpp:100] Creating Layer L2_b12_sum_eltwise\nI0821 08:59:08.808784 32502 net.cpp:434] L2_b12_sum_eltwise <- L2_b12_cbr2_bn_top\nI0821 08:59:08.808791 32502 net.cpp:434] L2_b12_sum_eltwise <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0821 08:59:08.808799 32502 net.cpp:408] L2_b12_sum_eltwise -> L2_b12_sum_eltwise_top\nI0821 08:59:08.808830 32502 net.cpp:150] Setting up L2_b12_sum_eltwise\nI0821 08:59:08.808840 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.808843 32502 net.cpp:165] Memory required for data: 1752679600\nI0821 08:59:08.808848 32502 layer_factory.hpp:77] Creating layer L2_b12_relu\nI0821 08:59:08.808856 32502 net.cpp:100] Creating Layer L2_b12_relu\nI0821 08:59:08.808862 32502 net.cpp:434] L2_b12_relu <- L2_b12_sum_eltwise_top\nI0821 08:59:08.808876 32502 net.cpp:395] L2_b12_relu -> L2_b12_sum_eltwise_top (in-place)\nI0821 08:59:08.808886 32502 net.cpp:150] Setting up L2_b12_relu\nI0821 08:59:08.808892 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.808897 32502 net.cpp:165] Memory required for data: 1755956400\nI0821 08:59:08.808902 32502 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:08.808909 32502 net.cpp:100] Creating Layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:08.808914 32502 net.cpp:434] L2_b12_sum_eltwise_top_L2_b12_relu_0_split <- L2_b12_sum_eltwise_top\nI0821 08:59:08.808924 32502 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0821 08:59:08.808934 32502 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0821 08:59:08.808979 32502 net.cpp:150] Setting up L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:08.808991 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.808997 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.809002 32502 net.cpp:165] Memory required for data: 1762510000\nI0821 08:59:08.809007 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_conv\nI0821 08:59:08.809021 32502 net.cpp:100] Creating Layer L2_b13_cbr1_conv\nI0821 08:59:08.809027 32502 net.cpp:434] L2_b13_cbr1_conv <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0821 08:59:08.809037 32502 net.cpp:408] L2_b13_cbr1_conv -> L2_b13_cbr1_conv_top\nI0821 08:59:08.809514 32502 net.cpp:150] Setting up L2_b13_cbr1_conv\nI0821 08:59:08.809528 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.809533 32502 net.cpp:165] Memory required for data: 1765786800\nI0821 08:59:08.809542 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_bn\nI0821 08:59:08.809554 32502 net.cpp:100] Creating Layer L2_b13_cbr1_bn\nI0821 08:59:08.809561 32502 net.cpp:434] L2_b13_cbr1_bn <- L2_b13_cbr1_conv_top\nI0821 08:59:08.809569 32502 net.cpp:408] L2_b13_cbr1_bn -> L2_b13_cbr1_bn_top\nI0821 08:59:08.809908 32502 net.cpp:150] Setting up L2_b13_cbr1_bn\nI0821 08:59:08.809937 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.809942 32502 net.cpp:165] Memory required for data: 1769063600\nI0821 08:59:08.809953 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0821 08:59:08.809963 32502 net.cpp:100] Creating Layer L2_b13_cbr1_scale\nI0821 08:59:08.809969 32502 net.cpp:434] L2_b13_cbr1_scale <- L2_b13_cbr1_bn_top\nI0821 08:59:08.809980 32502 net.cpp:395] L2_b13_cbr1_scale -> L2_b13_cbr1_bn_top (in-place)\nI0821 08:59:08.810042 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0821 08:59:08.810199 32502 net.cpp:150] Setting up L2_b13_cbr1_scale\nI0821 08:59:08.810211 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.810216 32502 net.cpp:165] Memory required for data: 1772340400\nI0821 08:59:08.810226 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_relu\nI0821 08:59:08.810235 32502 net.cpp:100] Creating Layer L2_b13_cbr1_relu\nI0821 08:59:08.810240 32502 net.cpp:434] L2_b13_cbr1_relu <- L2_b13_cbr1_bn_top\nI0821 08:59:08.810248 32502 net.cpp:395] L2_b13_cbr1_relu -> L2_b13_cbr1_bn_top (in-place)\nI0821 08:59:08.810261 32502 net.cpp:150] Setting up L2_b13_cbr1_relu\nI0821 08:59:08.810267 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.810272 32502 net.cpp:165] Memory required for data: 1775617200\nI0821 08:59:08.810277 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr2_conv\nI0821 08:59:08.810288 32502 net.cpp:100] Creating Layer L2_b13_cbr2_conv\nI0821 08:59:08.810297 32502 net.cpp:434] L2_b13_cbr2_conv <- L2_b13_cbr1_bn_top\nI0821 08:59:08.810307 32502 net.cpp:408] L2_b13_cbr2_conv -> L2_b13_cbr2_conv_top\nI0821 08:59:08.810809 32502 net.cpp:150] Setting up L2_b13_cbr2_conv\nI0821 08:59:08.810824 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.810829 32502 net.cpp:165] Memory required for data: 1778894000\nI0821 08:59:08.810838 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr2_bn\nI0821 08:59:08.810852 32502 net.cpp:100] Creating Layer L2_b13_cbr2_bn\nI0821 08:59:08.810858 32502 net.cpp:434] L2_b13_cbr2_bn <- L2_b13_cbr2_conv_top\nI0821 08:59:08.810868 32502 net.cpp:408] L2_b13_cbr2_bn -> L2_b13_cbr2_bn_top\nI0821 08:59:08.811131 32502 net.cpp:150] Setting up L2_b13_cbr2_bn\nI0821 08:59:08.811144 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.811149 32502 net.cpp:165] Memory required for data: 1782170800\nI0821 08:59:08.811161 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0821 08:59:08.811169 32502 net.cpp:100] Creating Layer L2_b13_cbr2_scale\nI0821 08:59:08.811175 32502 net.cpp:434] L2_b13_cbr2_scale <- L2_b13_cbr2_bn_top\nI0821 08:59:08.811183 32502 net.cpp:395] L2_b13_cbr2_scale -> L2_b13_cbr2_bn_top (in-place)\nI0821 08:59:08.811244 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0821 08:59:08.811398 32502 net.cpp:150] Setting up L2_b13_cbr2_scale\nI0821 08:59:08.811414 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.811419 32502 net.cpp:165] Memory required for data: 1785447600\nI0821 08:59:08.811427 32502 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise\nI0821 08:59:08.811436 32502 net.cpp:100] Creating Layer L2_b13_sum_eltwise\nI0821 08:59:08.811444 32502 net.cpp:434] L2_b13_sum_eltwise <- L2_b13_cbr2_bn_top\nI0821 08:59:08.811450 32502 net.cpp:434] L2_b13_sum_eltwise <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0821 08:59:08.811458 32502 net.cpp:408] L2_b13_sum_eltwise -> L2_b13_sum_eltwise_top\nI0821 08:59:08.811489 32502 net.cpp:150] Setting up L2_b13_sum_eltwise\nI0821 08:59:08.811498 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.811503 32502 net.cpp:165] Memory required for data: 1788724400\nI0821 08:59:08.811508 32502 layer_factory.hpp:77] Creating layer L2_b13_relu\nI0821 08:59:08.811517 32502 net.cpp:100] Creating Layer L2_b13_relu\nI0821 08:59:08.811522 32502 net.cpp:434] L2_b13_relu <- L2_b13_sum_eltwise_top\nI0821 08:59:08.811532 32502 net.cpp:395] L2_b13_relu -> L2_b13_sum_eltwise_top (in-place)\nI0821 08:59:08.811542 32502 net.cpp:150] Setting up L2_b13_relu\nI0821 08:59:08.811556 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.811561 32502 net.cpp:165] Memory required for data: 1792001200\nI0821 08:59:08.811566 32502 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:08.811573 32502 net.cpp:100] Creating Layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:08.811579 32502 net.cpp:434] L2_b13_sum_eltwise_top_L2_b13_relu_0_split <- L2_b13_sum_eltwise_top\nI0821 08:59:08.811589 32502 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0821 08:59:08.811599 32502 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0821 08:59:08.811646 32502 net.cpp:150] Setting up L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:08.811657 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.811664 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.811669 32502 net.cpp:165] Memory required for data: 1798554800\nI0821 08:59:08.811674 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_conv\nI0821 08:59:08.811688 32502 net.cpp:100] Creating Layer L2_b14_cbr1_conv\nI0821 08:59:08.811694 32502 net.cpp:434] L2_b14_cbr1_conv <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0821 08:59:08.811704 32502 net.cpp:408] L2_b14_cbr1_conv -> L2_b14_cbr1_conv_top\nI0821 08:59:08.812192 32502 net.cpp:150] Setting up L2_b14_cbr1_conv\nI0821 08:59:08.812207 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.812212 32502 net.cpp:165] Memory required for data: 1801831600\nI0821 08:59:08.812222 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_bn\nI0821 08:59:08.812233 32502 net.cpp:100] Creating Layer L2_b14_cbr1_bn\nI0821 08:59:08.812240 32502 net.cpp:434] L2_b14_cbr1_bn <- L2_b14_cbr1_conv_top\nI0821 08:59:08.812248 32502 net.cpp:408] L2_b14_cbr1_bn -> L2_b14_cbr1_bn_top\nI0821 08:59:08.812501 32502 net.cpp:150] Setting up L2_b14_cbr1_bn\nI0821 08:59:08.812515 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.812520 32502 net.cpp:165] Memory required for data: 1805108400\nI0821 08:59:08.812530 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0821 08:59:08.812538 32502 net.cpp:100] Creating Layer L2_b14_cbr1_scale\nI0821 08:59:08.812544 32502 net.cpp:434] L2_b14_cbr1_scale <- L2_b14_cbr1_bn_top\nI0821 08:59:08.812551 32502 net.cpp:395] L2_b14_cbr1_scale -> L2_b14_cbr1_bn_top (in-place)\nI0821 08:59:08.812614 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0821 08:59:08.812774 32502 net.cpp:150] Setting up L2_b14_cbr1_scale\nI0821 08:59:08.812790 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.812795 32502 net.cpp:165] Memory required for data: 1808385200\nI0821 08:59:08.812804 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_relu\nI0821 08:59:08.812813 32502 net.cpp:100] Creating Layer L2_b14_cbr1_relu\nI0821 08:59:08.812819 32502 net.cpp:434] L2_b14_cbr1_relu <- L2_b14_cbr1_bn_top\nI0821 08:59:08.812826 32502 net.cpp:395] L2_b14_cbr1_relu -> L2_b14_cbr1_bn_top (in-place)\nI0821 08:59:08.812836 32502 net.cpp:150] Setting up L2_b14_cbr1_relu\nI0821 08:59:08.812842 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.812847 32502 net.cpp:165] Memory required for data: 1811662000\nI0821 08:59:08.812852 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr2_conv\nI0821 08:59:08.812866 32502 net.cpp:100] Creating Layer L2_b14_cbr2_conv\nI0821 08:59:08.812872 32502 net.cpp:434] L2_b14_cbr2_conv <- L2_b14_cbr1_bn_top\nI0821 08:59:08.812883 32502 net.cpp:408] L2_b14_cbr2_conv -> L2_b14_cbr2_conv_top\nI0821 08:59:08.813359 32502 net.cpp:150] Setting up L2_b14_cbr2_conv\nI0821 08:59:08.813374 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.813379 32502 net.cpp:165] Memory required for data: 1814938800\nI0821 08:59:08.813387 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr2_bn\nI0821 08:59:08.813400 32502 net.cpp:100] Creating Layer L2_b14_cbr2_bn\nI0821 08:59:08.813405 32502 net.cpp:434] L2_b14_cbr2_bn <- L2_b14_cbr2_conv_top\nI0821 08:59:08.813423 32502 net.cpp:408] L2_b14_cbr2_bn -> L2_b14_cbr2_bn_top\nI0821 08:59:08.813680 32502 net.cpp:150] Setting up L2_b14_cbr2_bn\nI0821 08:59:08.813694 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.813699 32502 net.cpp:165] Memory required for data: 1818215600\nI0821 08:59:08.813709 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0821 08:59:08.813717 32502 net.cpp:100] Creating Layer L2_b14_cbr2_scale\nI0821 08:59:08.813724 32502 net.cpp:434] L2_b14_cbr2_scale <- L2_b14_cbr2_bn_top\nI0821 08:59:08.813731 32502 net.cpp:395] L2_b14_cbr2_scale -> L2_b14_cbr2_bn_top (in-place)\nI0821 08:59:08.813798 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0821 08:59:08.813947 32502 net.cpp:150] Setting up L2_b14_cbr2_scale\nI0821 08:59:08.813961 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.813966 32502 net.cpp:165] Memory required for data: 1821492400\nI0821 08:59:08.813976 32502 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise\nI0821 08:59:08.813987 32502 net.cpp:100] Creating Layer L2_b14_sum_eltwise\nI0821 08:59:08.813993 32502 net.cpp:434] L2_b14_sum_eltwise <- L2_b14_cbr2_bn_top\nI0821 08:59:08.814000 32502 net.cpp:434] L2_b14_sum_eltwise <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0821 08:59:08.814008 32502 net.cpp:408] L2_b14_sum_eltwise -> L2_b14_sum_eltwise_top\nI0821 08:59:08.814035 32502 net.cpp:150] Setting up L2_b14_sum_eltwise\nI0821 08:59:08.814044 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.814049 32502 net.cpp:165] Memory required for data: 1824769200\nI0821 08:59:08.814054 32502 layer_factory.hpp:77] Creating layer L2_b14_relu\nI0821 08:59:08.814064 32502 net.cpp:100] Creating Layer L2_b14_relu\nI0821 08:59:08.814070 32502 net.cpp:434] L2_b14_relu <- L2_b14_sum_eltwise_top\nI0821 08:59:08.814080 32502 net.cpp:395] L2_b14_relu -> L2_b14_sum_eltwise_top (in-place)\nI0821 08:59:08.814090 32502 net.cpp:150] Setting up L2_b14_relu\nI0821 08:59:08.814096 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.814101 32502 net.cpp:165] Memory required for data: 1828046000\nI0821 08:59:08.814106 32502 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:08.814112 32502 net.cpp:100] Creating Layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:08.814118 32502 net.cpp:434] L2_b14_sum_eltwise_top_L2_b14_relu_0_split <- L2_b14_sum_eltwise_top\nI0821 08:59:08.814126 32502 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0821 08:59:08.814136 32502 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0821 08:59:08.814187 32502 net.cpp:150] Setting up L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:08.814198 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.814204 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.814209 32502 net.cpp:165] Memory required for data: 1834599600\nI0821 08:59:08.814214 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_conv\nI0821 08:59:08.814230 32502 net.cpp:100] Creating Layer L2_b15_cbr1_conv\nI0821 08:59:08.814237 32502 net.cpp:434] L2_b15_cbr1_conv <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0821 08:59:08.814246 32502 net.cpp:408] L2_b15_cbr1_conv -> L2_b15_cbr1_conv_top\nI0821 08:59:08.814730 32502 net.cpp:150] Setting up L2_b15_cbr1_conv\nI0821 08:59:08.814749 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.814755 32502 net.cpp:165] Memory required for data: 1837876400\nI0821 08:59:08.814764 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_bn\nI0821 08:59:08.814777 32502 net.cpp:100] Creating Layer L2_b15_cbr1_bn\nI0821 08:59:08.814784 32502 net.cpp:434] L2_b15_cbr1_bn <- L2_b15_cbr1_conv_top\nI0821 08:59:08.814795 32502 net.cpp:408] L2_b15_cbr1_bn -> L2_b15_cbr1_bn_top\nI0821 08:59:08.815052 32502 net.cpp:150] Setting up L2_b15_cbr1_bn\nI0821 08:59:08.815064 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.815069 32502 net.cpp:165] Memory required for data: 1841153200\nI0821 08:59:08.815086 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0821 08:59:08.815095 32502 net.cpp:100] Creating Layer L2_b15_cbr1_scale\nI0821 08:59:08.815101 32502 net.cpp:434] L2_b15_cbr1_scale <- L2_b15_cbr1_bn_top\nI0821 08:59:08.815109 32502 net.cpp:395] L2_b15_cbr1_scale -> L2_b15_cbr1_bn_top (in-place)\nI0821 08:59:08.815171 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0821 08:59:08.815323 32502 net.cpp:150] Setting up L2_b15_cbr1_scale\nI0821 08:59:08.815340 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.815345 32502 net.cpp:165] Memory required for data: 1844430000\nI0821 08:59:08.815353 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_relu\nI0821 08:59:08.815361 32502 net.cpp:100] Creating Layer L2_b15_cbr1_relu\nI0821 08:59:08.815367 32502 net.cpp:434] L2_b15_cbr1_relu <- L2_b15_cbr1_bn_top\nI0821 08:59:08.815374 32502 net.cpp:395] L2_b15_cbr1_relu -> L2_b15_cbr1_bn_top (in-place)\nI0821 08:59:08.815384 32502 net.cpp:150] Setting up L2_b15_cbr1_relu\nI0821 08:59:08.815392 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.815397 32502 net.cpp:165] Memory required for data: 1847706800\nI0821 08:59:08.815400 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr2_conv\nI0821 08:59:08.815415 32502 net.cpp:100] Creating Layer L2_b15_cbr2_conv\nI0821 08:59:08.815421 32502 net.cpp:434] L2_b15_cbr2_conv <- L2_b15_cbr1_bn_top\nI0821 08:59:08.815433 32502 net.cpp:408] L2_b15_cbr2_conv -> L2_b15_cbr2_conv_top\nI0821 08:59:08.815922 32502 net.cpp:150] Setting up L2_b15_cbr2_conv\nI0821 08:59:08.815937 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.815942 32502 net.cpp:165] Memory required for data: 1850983600\nI0821 08:59:08.815950 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr2_bn\nI0821 08:59:08.815963 32502 net.cpp:100] Creating Layer L2_b15_cbr2_bn\nI0821 08:59:08.815969 32502 net.cpp:434] L2_b15_cbr2_bn <- L2_b15_cbr2_conv_top\nI0821 08:59:08.815981 32502 net.cpp:408] L2_b15_cbr2_bn -> L2_b15_cbr2_bn_top\nI0821 08:59:08.816246 32502 net.cpp:150] Setting up L2_b15_cbr2_bn\nI0821 08:59:08.816259 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.816264 32502 net.cpp:165] Memory required for data: 1854260400\nI0821 08:59:08.816274 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0821 08:59:08.816284 32502 net.cpp:100] Creating Layer L2_b15_cbr2_scale\nI0821 08:59:08.816290 32502 net.cpp:434] L2_b15_cbr2_scale <- L2_b15_cbr2_bn_top\nI0821 08:59:08.816298 32502 net.cpp:395] L2_b15_cbr2_scale -> L2_b15_cbr2_bn_top (in-place)\nI0821 08:59:08.816359 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0821 08:59:08.816509 32502 net.cpp:150] Setting up L2_b15_cbr2_scale\nI0821 08:59:08.816520 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.816525 32502 net.cpp:165] Memory required for data: 1857537200\nI0821 08:59:08.816534 32502 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise\nI0821 08:59:08.816548 32502 net.cpp:100] Creating Layer L2_b15_sum_eltwise\nI0821 08:59:08.816555 32502 net.cpp:434] L2_b15_sum_eltwise <- L2_b15_cbr2_bn_top\nI0821 08:59:08.816561 32502 net.cpp:434] L2_b15_sum_eltwise <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0821 08:59:08.816570 32502 net.cpp:408] L2_b15_sum_eltwise -> L2_b15_sum_eltwise_top\nI0821 08:59:08.816596 32502 net.cpp:150] Setting up L2_b15_sum_eltwise\nI0821 08:59:08.816606 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.816610 32502 net.cpp:165] Memory required for data: 1860814000\nI0821 08:59:08.816615 32502 layer_factory.hpp:77] Creating layer L2_b15_relu\nI0821 08:59:08.816627 32502 net.cpp:100] Creating Layer L2_b15_relu\nI0821 08:59:08.816632 32502 net.cpp:434] L2_b15_relu <- L2_b15_sum_eltwise_top\nI0821 08:59:08.816639 32502 net.cpp:395] L2_b15_relu -> L2_b15_sum_eltwise_top (in-place)\nI0821 08:59:08.816648 32502 net.cpp:150] Setting up L2_b15_relu\nI0821 08:59:08.816655 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.816660 32502 net.cpp:165] Memory required for data: 1864090800\nI0821 08:59:08.816671 32502 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:08.816679 32502 net.cpp:100] Creating Layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:08.816684 32502 net.cpp:434] L2_b15_sum_eltwise_top_L2_b15_relu_0_split <- L2_b15_sum_eltwise_top\nI0821 08:59:08.816692 32502 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0821 08:59:08.816701 32502 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0821 08:59:08.816757 32502 net.cpp:150] Setting up L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:08.816771 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.816777 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.816781 32502 net.cpp:165] Memory required for data: 1870644400\nI0821 08:59:08.816787 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_conv\nI0821 08:59:08.816802 32502 net.cpp:100] Creating Layer L2_b16_cbr1_conv\nI0821 08:59:08.816807 32502 net.cpp:434] L2_b16_cbr1_conv <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0821 08:59:08.816817 32502 net.cpp:408] L2_b16_cbr1_conv -> L2_b16_cbr1_conv_top\nI0821 08:59:08.818605 32502 net.cpp:150] Setting up L2_b16_cbr1_conv\nI0821 08:59:08.818624 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.818629 32502 net.cpp:165] Memory required for data: 1873921200\nI0821 08:59:08.818639 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_bn\nI0821 08:59:08.818648 32502 net.cpp:100] Creating Layer L2_b16_cbr1_bn\nI0821 08:59:08.818655 32502 net.cpp:434] L2_b16_cbr1_bn <- L2_b16_cbr1_conv_top\nI0821 08:59:08.818667 32502 net.cpp:408] L2_b16_cbr1_bn -> L2_b16_cbr1_bn_top\nI0821 08:59:08.818940 32502 net.cpp:150] Setting up L2_b16_cbr1_bn\nI0821 08:59:08.818954 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.818959 32502 net.cpp:165] Memory required for data: 1877198000\nI0821 08:59:08.818970 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0821 08:59:08.818984 32502 net.cpp:100] Creating Layer L2_b16_cbr1_scale\nI0821 08:59:08.818989 32502 net.cpp:434] L2_b16_cbr1_scale <- L2_b16_cbr1_bn_top\nI0821 08:59:08.818997 32502 net.cpp:395] L2_b16_cbr1_scale -> L2_b16_cbr1_bn_top (in-place)\nI0821 08:59:08.819057 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0821 08:59:08.819214 32502 net.cpp:150] Setting up L2_b16_cbr1_scale\nI0821 08:59:08.819227 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.819232 32502 net.cpp:165] Memory required for data: 1880474800\nI0821 08:59:08.819242 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_relu\nI0821 08:59:08.819253 32502 net.cpp:100] Creating Layer L2_b16_cbr1_relu\nI0821 08:59:08.819259 32502 net.cpp:434] L2_b16_cbr1_relu <- L2_b16_cbr1_bn_top\nI0821 08:59:08.819267 32502 net.cpp:395] L2_b16_cbr1_relu -> L2_b16_cbr1_bn_top (in-place)\nI0821 08:59:08.819277 32502 net.cpp:150] Setting up L2_b16_cbr1_relu\nI0821 08:59:08.819283 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.819288 32502 net.cpp:165] Memory required for data: 1883751600\nI0821 08:59:08.819293 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr2_conv\nI0821 08:59:08.819308 32502 net.cpp:100] Creating Layer L2_b16_cbr2_conv\nI0821 08:59:08.819314 32502 net.cpp:434] L2_b16_cbr2_conv <- L2_b16_cbr1_bn_top\nI0821 08:59:08.819324 32502 net.cpp:408] L2_b16_cbr2_conv -> L2_b16_cbr2_conv_top\nI0821 08:59:08.819803 32502 net.cpp:150] Setting up L2_b16_cbr2_conv\nI0821 08:59:08.819818 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.819823 32502 net.cpp:165] Memory required for data: 1887028400\nI0821 08:59:08.819833 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr2_bn\nI0821 08:59:08.819842 32502 net.cpp:100] Creating Layer L2_b16_cbr2_bn\nI0821 08:59:08.819849 32502 net.cpp:434] L2_b16_cbr2_bn <- L2_b16_cbr2_conv_top\nI0821 08:59:08.819859 32502 net.cpp:408] L2_b16_cbr2_bn -> L2_b16_cbr2_bn_top\nI0821 08:59:08.820118 32502 net.cpp:150] Setting up L2_b16_cbr2_bn\nI0821 08:59:08.820138 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.820143 32502 net.cpp:165] Memory required for data: 1890305200\nI0821 08:59:08.820154 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0821 08:59:08.820166 32502 net.cpp:100] Creating Layer L2_b16_cbr2_scale\nI0821 08:59:08.820173 32502 net.cpp:434] L2_b16_cbr2_scale <- L2_b16_cbr2_bn_top\nI0821 08:59:08.820180 32502 net.cpp:395] L2_b16_cbr2_scale -> L2_b16_cbr2_bn_top (in-place)\nI0821 08:59:08.820236 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0821 08:59:08.820390 32502 net.cpp:150] Setting up L2_b16_cbr2_scale\nI0821 08:59:08.820403 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.820408 32502 net.cpp:165] Memory required for data: 1893582000\nI0821 08:59:08.820417 32502 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise\nI0821 08:59:08.820427 32502 net.cpp:100] Creating Layer L2_b16_sum_eltwise\nI0821 08:59:08.820433 32502 net.cpp:434] L2_b16_sum_eltwise <- L2_b16_cbr2_bn_top\nI0821 08:59:08.820443 32502 net.cpp:434] L2_b16_sum_eltwise <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0821 08:59:08.820451 32502 net.cpp:408] L2_b16_sum_eltwise -> L2_b16_sum_eltwise_top\nI0821 08:59:08.820479 32502 net.cpp:150] Setting up L2_b16_sum_eltwise\nI0821 08:59:08.820492 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.820497 32502 net.cpp:165] Memory required for data: 1896858800\nI0821 08:59:08.820502 32502 layer_factory.hpp:77] Creating layer L2_b16_relu\nI0821 08:59:08.820509 32502 net.cpp:100] Creating Layer L2_b16_relu\nI0821 08:59:08.820515 32502 net.cpp:434] L2_b16_relu <- L2_b16_sum_eltwise_top\nI0821 08:59:08.820523 32502 net.cpp:395] L2_b16_relu -> L2_b16_sum_eltwise_top (in-place)\nI0821 08:59:08.820533 32502 net.cpp:150] Setting up L2_b16_relu\nI0821 08:59:08.820538 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.820544 32502 net.cpp:165] Memory required for data: 1900135600\nI0821 08:59:08.820547 32502 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:08.820557 32502 net.cpp:100] Creating Layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:08.820564 32502 net.cpp:434] L2_b16_sum_eltwise_top_L2_b16_relu_0_split <- L2_b16_sum_eltwise_top\nI0821 08:59:08.820570 32502 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0821 08:59:08.820580 32502 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0821 08:59:08.820628 32502 net.cpp:150] Setting up L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:08.820641 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.820647 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.820652 32502 net.cpp:165] Memory required for data: 1906689200\nI0821 08:59:08.820657 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_conv\nI0821 08:59:08.820668 32502 net.cpp:100] Creating Layer L2_b17_cbr1_conv\nI0821 08:59:08.820674 32502 net.cpp:434] L2_b17_cbr1_conv <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0821 08:59:08.820688 32502 net.cpp:408] L2_b17_cbr1_conv -> L2_b17_cbr1_conv_top\nI0821 08:59:08.821173 32502 net.cpp:150] Setting up L2_b17_cbr1_conv\nI0821 08:59:08.821188 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.821193 32502 net.cpp:165] Memory required for data: 1909966000\nI0821 08:59:08.821202 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_bn\nI0821 08:59:08.821213 32502 net.cpp:100] Creating Layer L2_b17_cbr1_bn\nI0821 08:59:08.821218 32502 net.cpp:434] L2_b17_cbr1_bn <- L2_b17_cbr1_conv_top\nI0821 08:59:08.821230 32502 net.cpp:408] L2_b17_cbr1_bn -> L2_b17_cbr1_bn_top\nI0821 08:59:08.821492 32502 net.cpp:150] Setting up L2_b17_cbr1_bn\nI0821 08:59:08.821506 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.821511 32502 net.cpp:165] Memory required for data: 1913242800\nI0821 08:59:08.821521 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0821 08:59:08.821532 32502 net.cpp:100] Creating Layer L2_b17_cbr1_scale\nI0821 08:59:08.821547 32502 net.cpp:434] L2_b17_cbr1_scale <- L2_b17_cbr1_bn_top\nI0821 08:59:08.821554 32502 net.cpp:395] L2_b17_cbr1_scale -> L2_b17_cbr1_bn_top (in-place)\nI0821 08:59:08.821615 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0821 08:59:08.821775 32502 net.cpp:150] Setting up L2_b17_cbr1_scale\nI0821 08:59:08.821789 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.821794 32502 net.cpp:165] Memory required for data: 1916519600\nI0821 08:59:08.821805 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_relu\nI0821 08:59:08.821815 32502 net.cpp:100] Creating Layer L2_b17_cbr1_relu\nI0821 08:59:08.821822 32502 net.cpp:434] L2_b17_cbr1_relu <- L2_b17_cbr1_bn_top\nI0821 08:59:08.821830 32502 net.cpp:395] L2_b17_cbr1_relu -> L2_b17_cbr1_bn_top (in-place)\nI0821 08:59:08.821840 32502 net.cpp:150] Setting up L2_b17_cbr1_relu\nI0821 08:59:08.821846 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.821851 32502 net.cpp:165] Memory required for data: 1919796400\nI0821 08:59:08.821856 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr2_conv\nI0821 08:59:08.821869 32502 net.cpp:100] Creating Layer L2_b17_cbr2_conv\nI0821 08:59:08.821876 32502 net.cpp:434] L2_b17_cbr2_conv <- L2_b17_cbr1_bn_top\nI0821 08:59:08.821887 32502 net.cpp:408] L2_b17_cbr2_conv -> L2_b17_cbr2_conv_top\nI0821 08:59:08.822365 32502 net.cpp:150] Setting up L2_b17_cbr2_conv\nI0821 08:59:08.822378 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.822383 32502 net.cpp:165] Memory required for data: 1923073200\nI0821 08:59:08.822392 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr2_bn\nI0821 08:59:08.822402 32502 net.cpp:100] Creating Layer L2_b17_cbr2_bn\nI0821 08:59:08.822408 32502 net.cpp:434] L2_b17_cbr2_bn <- L2_b17_cbr2_conv_top\nI0821 08:59:08.822417 32502 net.cpp:408] L2_b17_cbr2_bn -> L2_b17_cbr2_bn_top\nI0821 08:59:08.822679 32502 net.cpp:150] Setting up L2_b17_cbr2_bn\nI0821 08:59:08.822692 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.822697 32502 net.cpp:165] Memory required for data: 1926350000\nI0821 08:59:08.822707 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0821 08:59:08.822716 32502 net.cpp:100] Creating Layer L2_b17_cbr2_scale\nI0821 08:59:08.822722 32502 net.cpp:434] L2_b17_cbr2_scale <- L2_b17_cbr2_bn_top\nI0821 08:59:08.822732 32502 net.cpp:395] L2_b17_cbr2_scale -> L2_b17_cbr2_bn_top (in-place)\nI0821 08:59:08.822798 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0821 08:59:08.822957 32502 net.cpp:150] Setting up L2_b17_cbr2_scale\nI0821 08:59:08.822969 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.822974 32502 net.cpp:165] Memory required for data: 1929626800\nI0821 08:59:08.822983 32502 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise\nI0821 08:59:08.822993 32502 net.cpp:100] Creating Layer L2_b17_sum_eltwise\nI0821 08:59:08.822999 32502 net.cpp:434] L2_b17_sum_eltwise <- L2_b17_cbr2_bn_top\nI0821 08:59:08.823006 32502 net.cpp:434] L2_b17_sum_eltwise <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0821 08:59:08.823017 32502 net.cpp:408] L2_b17_sum_eltwise -> L2_b17_sum_eltwise_top\nI0821 08:59:08.823045 32502 net.cpp:150] Setting up L2_b17_sum_eltwise\nI0821 08:59:08.823057 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.823062 32502 net.cpp:165] Memory required for data: 1932903600\nI0821 08:59:08.823067 32502 layer_factory.hpp:77] Creating layer L2_b17_relu\nI0821 08:59:08.823076 32502 net.cpp:100] Creating Layer L2_b17_relu\nI0821 08:59:08.823081 32502 net.cpp:434] L2_b17_relu <- L2_b17_sum_eltwise_top\nI0821 08:59:08.823088 32502 net.cpp:395] L2_b17_relu -> L2_b17_sum_eltwise_top (in-place)\nI0821 08:59:08.823097 32502 net.cpp:150] Setting up L2_b17_relu\nI0821 08:59:08.823104 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.823108 32502 net.cpp:165] Memory required for data: 1936180400\nI0821 08:59:08.823113 32502 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:08.823124 32502 net.cpp:100] Creating Layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:08.823137 32502 net.cpp:434] L2_b17_sum_eltwise_top_L2_b17_relu_0_split <- L2_b17_sum_eltwise_top\nI0821 08:59:08.823145 32502 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0821 08:59:08.823155 32502 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0821 08:59:08.823202 32502 net.cpp:150] Setting up L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:08.823217 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.823225 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.823230 32502 net.cpp:165] Memory required for data: 1942734000\nI0821 08:59:08.823235 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_conv\nI0821 08:59:08.823246 32502 net.cpp:100] Creating Layer L2_b18_cbr1_conv\nI0821 08:59:08.823251 32502 net.cpp:434] L2_b18_cbr1_conv <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0821 08:59:08.823261 32502 net.cpp:408] L2_b18_cbr1_conv -> L2_b18_cbr1_conv_top\nI0821 08:59:08.823781 32502 net.cpp:150] Setting up L2_b18_cbr1_conv\nI0821 08:59:08.823798 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.823804 32502 net.cpp:165] Memory required for data: 1946010800\nI0821 08:59:08.823813 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_bn\nI0821 08:59:08.823825 32502 net.cpp:100] Creating Layer L2_b18_cbr1_bn\nI0821 08:59:08.823832 32502 net.cpp:434] L2_b18_cbr1_bn <- L2_b18_cbr1_conv_top\nI0821 08:59:08.823840 32502 net.cpp:408] L2_b18_cbr1_bn -> L2_b18_cbr1_bn_top\nI0821 08:59:08.824102 32502 net.cpp:150] Setting up L2_b18_cbr1_bn\nI0821 08:59:08.824115 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.824120 32502 net.cpp:165] Memory required for data: 1949287600\nI0821 08:59:08.824131 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0821 08:59:08.824139 32502 net.cpp:100] Creating Layer L2_b18_cbr1_scale\nI0821 08:59:08.824146 32502 net.cpp:434] L2_b18_cbr1_scale <- L2_b18_cbr1_bn_top\nI0821 08:59:08.824156 32502 net.cpp:395] L2_b18_cbr1_scale -> L2_b18_cbr1_bn_top (in-place)\nI0821 08:59:08.824215 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0821 08:59:08.824370 32502 net.cpp:150] Setting up L2_b18_cbr1_scale\nI0821 08:59:08.824383 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.824388 32502 net.cpp:165] Memory required for data: 1952564400\nI0821 08:59:08.824396 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_relu\nI0821 08:59:08.824404 32502 net.cpp:100] Creating Layer L2_b18_cbr1_relu\nI0821 08:59:08.824411 32502 net.cpp:434] L2_b18_cbr1_relu <- L2_b18_cbr1_bn_top\nI0821 08:59:08.824419 32502 net.cpp:395] L2_b18_cbr1_relu -> L2_b18_cbr1_bn_top (in-place)\nI0821 08:59:08.824429 32502 net.cpp:150] Setting up L2_b18_cbr1_relu\nI0821 08:59:08.824434 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.824440 32502 net.cpp:165] Memory required for data: 1955841200\nI0821 08:59:08.824445 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr2_conv\nI0821 08:59:08.824457 32502 net.cpp:100] Creating Layer L2_b18_cbr2_conv\nI0821 08:59:08.824463 32502 net.cpp:434] L2_b18_cbr2_conv <- L2_b18_cbr1_bn_top\nI0821 08:59:08.824475 32502 net.cpp:408] L2_b18_cbr2_conv -> L2_b18_cbr2_conv_top\nI0821 08:59:08.824964 32502 net.cpp:150] Setting up L2_b18_cbr2_conv\nI0821 08:59:08.824978 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.824983 32502 net.cpp:165] Memory required for data: 1959118000\nI0821 08:59:08.824992 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr2_bn\nI0821 08:59:08.825004 32502 net.cpp:100] Creating Layer L2_b18_cbr2_bn\nI0821 08:59:08.825011 32502 net.cpp:434] L2_b18_cbr2_bn <- L2_b18_cbr2_conv_top\nI0821 08:59:08.825022 32502 net.cpp:408] L2_b18_cbr2_bn -> L2_b18_cbr2_bn_top\nI0821 08:59:08.825278 32502 net.cpp:150] Setting up L2_b18_cbr2_bn\nI0821 08:59:08.825292 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.825297 32502 net.cpp:165] Memory required for data: 1962394800\nI0821 08:59:08.825307 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0821 08:59:08.825323 32502 net.cpp:100] Creating Layer L2_b18_cbr2_scale\nI0821 08:59:08.825330 32502 net.cpp:434] L2_b18_cbr2_scale <- L2_b18_cbr2_bn_top\nI0821 08:59:08.825338 32502 net.cpp:395] L2_b18_cbr2_scale -> L2_b18_cbr2_bn_top (in-place)\nI0821 08:59:08.825402 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0821 08:59:08.825562 32502 net.cpp:150] Setting up L2_b18_cbr2_scale\nI0821 08:59:08.825577 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.825583 32502 net.cpp:165] Memory required for data: 1965671600\nI0821 08:59:08.825592 32502 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise\nI0821 08:59:08.825601 32502 net.cpp:100] Creating Layer L2_b18_sum_eltwise\nI0821 08:59:08.825608 32502 net.cpp:434] L2_b18_sum_eltwise <- L2_b18_cbr2_bn_top\nI0821 08:59:08.825614 32502 net.cpp:434] L2_b18_sum_eltwise <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0821 08:59:08.825623 32502 net.cpp:408] L2_b18_sum_eltwise -> L2_b18_sum_eltwise_top\nI0821 08:59:08.825654 32502 net.cpp:150] Setting up L2_b18_sum_eltwise\nI0821 08:59:08.825662 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.825667 32502 net.cpp:165] Memory required for data: 1968948400\nI0821 08:59:08.825672 32502 layer_factory.hpp:77] Creating layer L2_b18_relu\nI0821 08:59:08.825680 32502 net.cpp:100] Creating Layer L2_b18_relu\nI0821 08:59:08.825685 32502 net.cpp:434] L2_b18_relu <- L2_b18_sum_eltwise_top\nI0821 08:59:08.825695 32502 net.cpp:395] L2_b18_relu -> L2_b18_sum_eltwise_top (in-place)\nI0821 08:59:08.825706 32502 net.cpp:150] Setting up L2_b18_relu\nI0821 08:59:08.825712 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.825716 32502 net.cpp:165] Memory required for data: 1972225200\nI0821 08:59:08.825721 32502 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:08.825728 32502 net.cpp:100] Creating Layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:08.825733 32502 net.cpp:434] L2_b18_sum_eltwise_top_L2_b18_relu_0_split <- L2_b18_sum_eltwise_top\nI0821 08:59:08.825749 32502 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0821 08:59:08.825762 32502 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0821 08:59:08.825809 32502 net.cpp:150] Setting up L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:08.825821 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.825829 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:08.825832 32502 net.cpp:165] Memory required for data: 1978778800\nI0821 08:59:08.825837 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:59:08.825851 32502 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:59:08.825858 32502 net.cpp:434] L3_b1_cbr1_conv <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0821 08:59:08.825868 32502 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:59:08.826357 32502 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:59:08.826371 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.826376 32502 net.cpp:165] Memory required for data: 1979598000\nI0821 08:59:08.826453 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:59:08.826467 32502 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:59:08.826474 32502 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:59:08.826483 32502 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:59:08.826766 32502 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:59:08.826779 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.826786 32502 net.cpp:165] Memory required for data: 1980417200\nI0821 08:59:08.826797 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:59:08.826808 32502 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:59:08.826815 32502 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:59:08.826822 32502 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:59:08.826887 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:59:08.827054 32502 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:59:08.827067 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.827072 32502 net.cpp:165] Memory required for data: 1981236400\nI0821 08:59:08.827081 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:59:08.827093 32502 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:59:08.827100 32502 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:59:08.827107 32502 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:59:08.827116 32502 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:59:08.827123 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.827128 32502 net.cpp:165] Memory required for data: 1982055600\nI0821 08:59:08.827132 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:59:08.827147 32502 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:59:08.827152 32502 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:59:08.827163 32502 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:59:08.827649 32502 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:59:08.827663 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.827668 32502 net.cpp:165] Memory required for data: 1982874800\nI0821 08:59:08.827677 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:59:08.827687 32502 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:59:08.827692 32502 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:59:08.827703 32502 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:59:08.827980 32502 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:59:08.827997 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.828002 32502 net.cpp:165] Memory required for data: 1983694000\nI0821 08:59:08.828013 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:59:08.828022 32502 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:59:08.828028 32502 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:59:08.828037 32502 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:59:08.828094 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:59:08.828253 32502 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:59:08.828265 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.828270 32502 net.cpp:165] Memory required for data: 1984513200\nI0821 08:59:08.828279 32502 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:59:08.828291 32502 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:59:08.828299 32502 net.cpp:434] L3_b1_pool <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0821 08:59:08.828307 32502 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:59:08.828346 32502 net.cpp:150] Setting up L3_b1_pool\nI0821 08:59:08.828358 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.828362 32502 net.cpp:165] Memory required for data: 1985332400\nI0821 08:59:08.828368 32502 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:59:08.828377 32502 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:59:08.828382 32502 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:59:08.828389 32502 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:59:08.828397 32502 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:59:08.828434 32502 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:59:08.828445 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.828450 32502 net.cpp:165] Memory required for data: 1986151600\nI0821 08:59:08.828455 32502 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:59:08.828464 32502 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:59:08.828469 32502 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:59:08.828476 32502 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:59:08.828485 32502 net.cpp:150] Setting up L3_b1_relu\nI0821 08:59:08.828492 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.828505 32502 net.cpp:165] Memory required for data: 1986970800\nI0821 08:59:08.828513 32502 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:59:08.828531 32502 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:59:08.828548 32502 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:59:08.829772 32502 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:59:08.829790 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:08.829797 32502 net.cpp:165] Memory required for data: 1987790000\nI0821 08:59:08.829802 32502 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:59:08.829812 32502 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:59:08.829821 32502 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:59:08.829829 32502 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:59:08.829838 32502 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:59:08.829879 32502 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:59:08.829893 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.829898 32502 net.cpp:165] Memory required for data: 1989428400\nI0821 08:59:08.829905 32502 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:08.829912 32502 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:08.829917 32502 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:59:08.829928 32502 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:59:08.829938 32502 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:59:08.829993 32502 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:08.830006 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.830013 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.830018 32502 net.cpp:165] Memory required for data: 1992705200\nI0821 08:59:08.830024 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:59:08.830034 32502 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:59:08.830041 32502 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:59:08.830054 32502 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:59:08.832074 32502 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:59:08.832092 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.832098 32502 net.cpp:165] Memory required for data: 1994343600\nI0821 08:59:08.832108 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:59:08.832120 32502 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:59:08.832128 32502 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:59:08.832136 32502 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:59:08.832409 32502 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:59:08.832423 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.832428 32502 net.cpp:165] Memory required for data: 1995982000\nI0821 08:59:08.832439 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:59:08.832451 32502 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:59:08.832458 32502 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:59:08.832466 32502 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:59:08.832533 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:59:08.832695 32502 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:59:08.832708 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.832713 32502 net.cpp:165] Memory required for data: 1997620400\nI0821 08:59:08.832722 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:59:08.832733 32502 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:59:08.832741 32502 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:59:08.832757 32502 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:59:08.832768 32502 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:59:08.832775 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.832787 32502 net.cpp:165] Memory required for data: 1999258800\nI0821 08:59:08.832792 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:59:08.832805 32502 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:59:08.832811 32502 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:59:08.832823 32502 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:59:08.833861 32502 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:59:08.833876 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.833881 32502 net.cpp:165] Memory required for data: 2000897200\nI0821 08:59:08.833890 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:59:08.833902 32502 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:59:08.833909 32502 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:59:08.833917 32502 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:59:08.834193 32502 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:59:08.834208 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.834213 32502 net.cpp:165] Memory required for data: 2002535600\nI0821 08:59:08.834223 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:59:08.834233 32502 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:59:08.834239 32502 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:59:08.834246 32502 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:59:08.834308 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:59:08.834467 32502 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:59:08.834483 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.834488 32502 net.cpp:165] Memory required for data: 2004174000\nI0821 08:59:08.834497 32502 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:59:08.834506 32502 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:59:08.834512 32502 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:59:08.834520 32502 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:59:08.834528 32502 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:59:08.834564 32502 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:59:08.834574 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.834579 32502 net.cpp:165] Memory required for data: 2005812400\nI0821 08:59:08.834584 32502 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:59:08.834592 32502 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:59:08.834599 32502 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:59:08.834605 32502 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:59:08.834615 32502 net.cpp:150] Setting up L3_b2_relu\nI0821 08:59:08.834622 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.834626 32502 net.cpp:165] Memory required for data: 2007450800\nI0821 08:59:08.834631 32502 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:08.834645 32502 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:08.834650 32502 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:59:08.834657 32502 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:59:08.834667 32502 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:59:08.834713 32502 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:08.834729 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.834736 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.834741 32502 net.cpp:165] Memory required for data: 2010727600\nI0821 08:59:08.834753 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:59:08.834764 32502 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:59:08.834770 32502 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:59:08.834780 32502 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:59:08.835824 32502 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:59:08.835839 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.835844 32502 net.cpp:165] Memory required for data: 2012366000\nI0821 08:59:08.835852 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:59:08.835865 32502 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:59:08.835871 32502 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:59:08.835880 32502 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:59:08.836154 32502 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:59:08.836168 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.836172 32502 net.cpp:165] Memory required for data: 2014004400\nI0821 08:59:08.836182 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:59:08.836194 32502 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:59:08.836201 32502 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:59:08.836213 32502 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:59:08.836272 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:59:08.836437 32502 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:59:08.836450 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.836455 32502 net.cpp:165] Memory required for data: 2015642800\nI0821 08:59:08.836464 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:59:08.836472 32502 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:59:08.836478 32502 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:59:08.836489 32502 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:59:08.836499 32502 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:59:08.836506 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.836510 32502 net.cpp:165] Memory required for data: 2017281200\nI0821 08:59:08.836515 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:59:08.836526 32502 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:59:08.836531 32502 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:59:08.836542 32502 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:59:08.837759 32502 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:59:08.837774 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.837780 32502 net.cpp:165] Memory required for data: 2018919600\nI0821 08:59:08.837790 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:59:08.837802 32502 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:59:08.837808 32502 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:59:08.837817 32502 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:59:08.838093 32502 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:59:08.838105 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.838110 32502 net.cpp:165] Memory required for data: 2020558000\nI0821 08:59:08.838121 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:59:08.838130 32502 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:59:08.838136 32502 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:59:08.838145 32502 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:59:08.838230 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:59:08.838394 32502 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:59:08.838407 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.838413 32502 net.cpp:165] Memory required for data: 2022196400\nI0821 08:59:08.838423 32502 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:59:08.838431 32502 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:59:08.838438 32502 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:59:08.838444 32502 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:59:08.838456 32502 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:59:08.838492 32502 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:59:08.838512 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.838517 32502 net.cpp:165] Memory required for data: 2023834800\nI0821 08:59:08.838523 32502 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:59:08.838531 32502 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:59:08.838536 32502 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:59:08.838544 32502 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:59:08.838553 32502 net.cpp:150] Setting up L3_b3_relu\nI0821 08:59:08.838560 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.838564 32502 net.cpp:165] Memory required for data: 2025473200\nI0821 08:59:08.838569 32502 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:08.838579 32502 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:08.838585 32502 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:59:08.838593 32502 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:59:08.838603 32502 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:59:08.838650 32502 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:08.838665 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.838672 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.838676 32502 net.cpp:165] Memory required for data: 2028750000\nI0821 08:59:08.838681 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:59:08.838693 32502 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:59:08.838699 32502 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:59:08.838708 32502 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:59:08.839751 32502 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:59:08.839769 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.839774 32502 net.cpp:165] Memory required for data: 2030388400\nI0821 08:59:08.839783 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:59:08.839793 32502 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:59:08.839800 32502 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:59:08.839808 32502 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:59:08.840082 32502 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:59:08.840095 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.840101 32502 net.cpp:165] Memory required for data: 2032026800\nI0821 08:59:08.840111 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:59:08.840122 32502 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:59:08.840128 32502 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:59:08.840142 32502 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:59:08.840205 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:59:08.840371 32502 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:59:08.840384 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.840389 32502 net.cpp:165] Memory required for data: 2033665200\nI0821 08:59:08.840399 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:59:08.840406 32502 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:59:08.840412 32502 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:59:08.840422 32502 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:59:08.840433 32502 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:59:08.840440 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.840445 32502 net.cpp:165] Memory required for data: 2035303600\nI0821 08:59:08.840450 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:59:08.840462 32502 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:59:08.840469 32502 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:59:08.840477 32502 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:59:08.841531 32502 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:59:08.841547 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.841552 32502 net.cpp:165] Memory required for data: 2036942000\nI0821 08:59:08.841560 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:59:08.841572 32502 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:59:08.841579 32502 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:59:08.841588 32502 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:59:08.841868 32502 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:59:08.841881 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.841886 32502 net.cpp:165] Memory required for data: 2038580400\nI0821 08:59:08.841897 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:59:08.841907 32502 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:59:08.841913 32502 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:59:08.841920 32502 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:59:08.841984 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:59:08.842146 32502 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:59:08.842159 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.842165 32502 net.cpp:165] Memory required for data: 2040218800\nI0821 08:59:08.842175 32502 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:59:08.842183 32502 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:59:08.842190 32502 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:59:08.842196 32502 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:59:08.842207 32502 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:59:08.842242 32502 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:59:08.842254 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.842259 32502 net.cpp:165] Memory required for data: 2041857200\nI0821 08:59:08.842264 32502 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:59:08.842272 32502 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:59:08.842278 32502 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:59:08.842284 32502 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:59:08.842294 32502 net.cpp:150] Setting up L3_b4_relu\nI0821 08:59:08.842301 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.842305 32502 net.cpp:165] Memory required for data: 2043495600\nI0821 08:59:08.842310 32502 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:08.842322 32502 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:08.842329 32502 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:59:08.842335 32502 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:59:08.842345 32502 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:59:08.842394 32502 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:08.842406 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.842413 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.842418 32502 net.cpp:165] Memory required for data: 2046772400\nI0821 08:59:08.842422 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:59:08.842433 32502 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:59:08.842440 32502 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:59:08.842453 32502 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:59:08.843487 32502 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:59:08.843502 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.843508 32502 net.cpp:165] Memory required for data: 2048410800\nI0821 08:59:08.843516 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:59:08.843526 32502 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:59:08.843539 32502 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:59:08.843551 32502 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:59:08.843837 32502 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:59:08.843852 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.843858 32502 net.cpp:165] Memory required for data: 2050049200\nI0821 08:59:08.843868 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:59:08.843878 32502 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:59:08.843883 32502 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:59:08.843891 32502 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:59:08.843951 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:59:08.844110 32502 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:59:08.844123 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.844128 32502 net.cpp:165] Memory required for data: 2051687600\nI0821 08:59:08.844137 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:59:08.844146 32502 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:59:08.844152 32502 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:59:08.844162 32502 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:59:08.844172 32502 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:59:08.844179 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.844184 32502 net.cpp:165] Memory required for data: 2053326000\nI0821 08:59:08.844188 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:59:08.844203 32502 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:59:08.844209 32502 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:59:08.844218 32502 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:59:08.846246 32502 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:59:08.846266 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.846271 32502 net.cpp:165] Memory required for data: 2054964400\nI0821 08:59:08.846282 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:59:08.846292 32502 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:59:08.846299 32502 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:59:08.846310 32502 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:59:08.846586 32502 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:59:08.846599 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.846604 32502 net.cpp:165] Memory required for data: 2056602800\nI0821 08:59:08.846616 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:59:08.846626 32502 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:59:08.846633 32502 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:59:08.846642 32502 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:59:08.846704 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:59:08.846877 32502 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:59:08.846891 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.846896 32502 net.cpp:165] Memory required for data: 2058241200\nI0821 08:59:08.846905 32502 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:59:08.846915 32502 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:59:08.846921 32502 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:59:08.846930 32502 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:59:08.846940 32502 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:59:08.846976 32502 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:59:08.846985 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.846989 32502 net.cpp:165] Memory required for data: 2059879600\nI0821 08:59:08.846994 32502 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:59:08.847005 32502 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:59:08.847012 32502 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:59:08.847026 32502 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:59:08.847038 32502 net.cpp:150] Setting up L3_b5_relu\nI0821 08:59:08.847044 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.847048 32502 net.cpp:165] Memory required for data: 2061518000\nI0821 08:59:08.847054 32502 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:08.847060 32502 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:08.847065 32502 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:59:08.847074 32502 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:59:08.847082 32502 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:59:08.847133 32502 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:08.847146 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.847152 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.847157 32502 net.cpp:165] Memory required for data: 2064794800\nI0821 08:59:08.847162 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:59:08.847175 32502 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:59:08.847182 32502 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:59:08.847192 32502 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:59:08.848223 32502 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:59:08.848238 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.848243 32502 net.cpp:165] Memory required for data: 2066433200\nI0821 08:59:08.848260 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:59:08.848273 32502 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:59:08.848279 32502 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:59:08.848289 32502 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:59:08.848562 32502 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:59:08.848574 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.848579 32502 net.cpp:165] Memory required for data: 2068071600\nI0821 08:59:08.848589 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:59:08.848598 32502 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:59:08.848604 32502 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:59:08.848615 32502 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:59:08.848676 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:59:08.848846 32502 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:59:08.848860 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.848865 32502 net.cpp:165] Memory required for data: 2069710000\nI0821 08:59:08.848875 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:59:08.848882 32502 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:59:08.848888 32502 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:59:08.848898 32502 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:59:08.848908 32502 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:59:08.848915 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.848920 32502 net.cpp:165] Memory required for data: 2071348400\nI0821 08:59:08.848925 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:59:08.848938 32502 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:59:08.848944 32502 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:59:08.848955 32502 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:59:08.849992 32502 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:59:08.850006 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.850013 32502 net.cpp:165] Memory required for data: 2072986800\nI0821 08:59:08.850021 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:59:08.850030 32502 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:59:08.850044 32502 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:59:08.850057 32502 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:59:08.850340 32502 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:59:08.850356 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.850361 32502 net.cpp:165] Memory required for data: 2074625200\nI0821 08:59:08.850373 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:59:08.850380 32502 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:59:08.850388 32502 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:59:08.850395 32502 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:59:08.850455 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:59:08.850617 32502 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:59:08.850630 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.850636 32502 net.cpp:165] Memory required for data: 2076263600\nI0821 08:59:08.850644 32502 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:59:08.850656 32502 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:59:08.850663 32502 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:59:08.850670 32502 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:59:08.850678 32502 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:59:08.850715 32502 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:59:08.850728 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.850733 32502 net.cpp:165] Memory required for data: 2077902000\nI0821 08:59:08.850738 32502 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:59:08.850750 32502 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:59:08.850757 32502 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:59:08.850765 32502 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:59:08.850775 32502 net.cpp:150] Setting up L3_b6_relu\nI0821 08:59:08.850781 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.850786 32502 net.cpp:165] Memory required for data: 2079540400\nI0821 08:59:08.850790 32502 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:08.850798 32502 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:08.850803 32502 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:59:08.850814 32502 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:59:08.850824 32502 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:59:08.850872 32502 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:08.850883 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.850889 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.850894 32502 net.cpp:165] Memory required for data: 2082817200\nI0821 08:59:08.850899 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:59:08.850914 32502 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:59:08.850920 32502 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:59:08.850930 32502 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:59:08.851961 32502 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:59:08.851977 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.851981 32502 net.cpp:165] Memory required for data: 2084455600\nI0821 08:59:08.851990 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:59:08.852000 32502 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:59:08.852010 32502 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:59:08.852018 32502 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:59:08.852290 32502 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:59:08.852303 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.852308 32502 net.cpp:165] Memory required for data: 2086094000\nI0821 08:59:08.852326 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:59:08.852335 32502 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:59:08.852341 32502 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:59:08.852352 32502 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:59:08.852416 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:59:08.852586 32502 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:59:08.852598 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.852604 32502 net.cpp:165] Memory required for data: 2087732400\nI0821 08:59:08.852613 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:59:08.852624 32502 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:59:08.852630 32502 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:59:08.852638 32502 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:59:08.852648 32502 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:59:08.852654 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.852658 32502 net.cpp:165] Memory required for data: 2089370800\nI0821 08:59:08.852663 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:59:08.852677 32502 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:59:08.852684 32502 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:59:08.852694 32502 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:59:08.853734 32502 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:59:08.853754 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.853760 32502 net.cpp:165] Memory required for data: 2091009200\nI0821 08:59:08.853768 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:59:08.853777 32502 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:59:08.853785 32502 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:59:08.853796 32502 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:59:08.854076 32502 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:59:08.854091 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.854097 32502 net.cpp:165] Memory required for data: 2092647600\nI0821 08:59:08.854107 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:59:08.854116 32502 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:59:08.854122 32502 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:59:08.854130 32502 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:59:08.854192 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:59:08.854357 32502 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:59:08.854369 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.854374 32502 net.cpp:165] Memory required for data: 2094286000\nI0821 08:59:08.854383 32502 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:59:08.854395 32502 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:59:08.854401 32502 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:59:08.854408 32502 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:59:08.854416 32502 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:59:08.854454 32502 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:59:08.854467 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.854471 32502 net.cpp:165] Memory required for data: 2095924400\nI0821 08:59:08.854476 32502 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:59:08.854485 32502 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:59:08.854490 32502 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:59:08.854497 32502 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:59:08.854507 32502 net.cpp:150] Setting up L3_b7_relu\nI0821 08:59:08.854514 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.854518 32502 net.cpp:165] Memory required for data: 2097562800\nI0821 08:59:08.854523 32502 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:08.854537 32502 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:08.854543 32502 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:59:08.854553 32502 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:59:08.854564 32502 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:59:08.854614 32502 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:08.854624 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.854631 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.854635 32502 net.cpp:165] Memory required for data: 2100839600\nI0821 08:59:08.854641 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:59:08.854656 32502 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:59:08.854663 32502 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:59:08.854672 32502 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:59:08.855715 32502 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:59:08.855731 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.855736 32502 net.cpp:165] Memory required for data: 2102478000\nI0821 08:59:08.855751 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:59:08.855763 32502 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:59:08.855770 32502 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:59:08.855778 32502 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:59:08.856055 32502 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:59:08.856067 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.856072 32502 net.cpp:165] Memory required for data: 2104116400\nI0821 08:59:08.856083 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:59:08.856096 32502 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:59:08.856101 32502 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:59:08.856109 32502 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:59:08.856173 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:59:08.856335 32502 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:59:08.856348 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.856353 32502 net.cpp:165] Memory required for data: 2105754800\nI0821 08:59:08.856362 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:59:08.856374 32502 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:59:08.856380 32502 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:59:08.856387 32502 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:59:08.856397 32502 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:59:08.856403 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.856408 32502 net.cpp:165] Memory required for data: 2107393200\nI0821 08:59:08.856413 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:59:08.856427 32502 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:59:08.856433 32502 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:59:08.856444 32502 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:59:08.857482 32502 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:59:08.857497 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.857502 32502 net.cpp:165] Memory required for data: 2109031600\nI0821 08:59:08.857511 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:59:08.857522 32502 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:59:08.857527 32502 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:59:08.857539 32502 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:59:08.857818 32502 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:59:08.857838 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.857843 32502 net.cpp:165] Memory required for data: 2110670000\nI0821 08:59:08.857861 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:59:08.857869 32502 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:59:08.857877 32502 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:59:08.857884 32502 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:59:08.857944 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:59:08.858116 32502 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:59:08.858130 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.858135 32502 net.cpp:165] Memory required for data: 2112308400\nI0821 08:59:08.858144 32502 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:59:08.858156 32502 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:59:08.858163 32502 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:59:08.858170 32502 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:59:08.858178 32502 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:59:08.858216 32502 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:59:08.858227 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.858232 32502 net.cpp:165] Memory required for data: 2113946800\nI0821 08:59:08.858237 32502 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:59:08.858245 32502 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:59:08.858250 32502 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:59:08.858258 32502 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:59:08.858268 32502 net.cpp:150] Setting up L3_b8_relu\nI0821 08:59:08.858274 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.858278 32502 net.cpp:165] Memory required for data: 2115585200\nI0821 08:59:08.858283 32502 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:08.858290 32502 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:08.858295 32502 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:59:08.858305 32502 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:59:08.858316 32502 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:59:08.858363 32502 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:08.858374 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.858381 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.858386 32502 net.cpp:165] Memory required for data: 2118862000\nI0821 08:59:08.858392 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:59:08.858405 32502 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:59:08.858412 32502 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:59:08.858420 32502 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:59:08.860530 32502 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:59:08.860548 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.860554 32502 net.cpp:165] Memory required for data: 2120500400\nI0821 08:59:08.860564 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:59:08.860577 32502 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:59:08.860584 32502 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:59:08.860594 32502 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:59:08.860885 32502 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:59:08.860899 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.860904 32502 net.cpp:165] Memory required for data: 2122138800\nI0821 08:59:08.860915 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:59:08.860924 32502 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:59:08.860930 32502 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:59:08.860939 32502 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:59:08.861004 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:59:08.861183 32502 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:59:08.861196 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.861202 32502 net.cpp:165] Memory required for data: 2123777200\nI0821 08:59:08.861212 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:59:08.861219 32502 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:59:08.861227 32502 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:59:08.861233 32502 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:59:08.861243 32502 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:59:08.861250 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.861254 32502 net.cpp:165] Memory required for data: 2125415600\nI0821 08:59:08.861259 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:59:08.861274 32502 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:59:08.861280 32502 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:59:08.861291 32502 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:59:08.862339 32502 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:59:08.862354 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.862360 32502 net.cpp:165] Memory required for data: 2127054000\nI0821 08:59:08.862368 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:59:08.862381 32502 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:59:08.862388 32502 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:59:08.862396 32502 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:59:08.862670 32502 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:59:08.862684 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.862689 32502 net.cpp:165] Memory required for data: 2128692400\nI0821 08:59:08.862699 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:59:08.862711 32502 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:59:08.862718 32502 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:59:08.862726 32502 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:59:08.862797 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:59:08.862963 32502 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:59:08.862977 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.862982 32502 net.cpp:165] Memory required for data: 2130330800\nI0821 08:59:08.862990 32502 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:59:08.863003 32502 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:59:08.863009 32502 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:59:08.863016 32502 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:59:08.863028 32502 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:59:08.863064 32502 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:59:08.863075 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.863080 32502 net.cpp:165] Memory required for data: 2131969200\nI0821 08:59:08.863085 32502 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:59:08.863093 32502 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:59:08.863099 32502 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:59:08.863111 32502 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:59:08.863121 32502 net.cpp:150] Setting up L3_b9_relu\nI0821 08:59:08.863128 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.863133 32502 net.cpp:165] Memory required for data: 2133607600\nI0821 08:59:08.863137 32502 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:08.863144 32502 net.cpp:100] Creating Layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:08.863150 32502 net.cpp:434] L3_b9_sum_eltwise_top_L3_b9_relu_0_split <- L3_b9_sum_eltwise_top\nI0821 08:59:08.863157 32502 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0821 08:59:08.863168 32502 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0821 08:59:08.863227 32502 net.cpp:150] Setting up L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:08.863240 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.863246 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.863250 32502 net.cpp:165] Memory required for data: 2136884400\nI0821 08:59:08.863255 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_conv\nI0821 08:59:08.863270 32502 net.cpp:100] Creating Layer L3_b10_cbr1_conv\nI0821 08:59:08.863276 32502 net.cpp:434] L3_b10_cbr1_conv <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0821 08:59:08.863286 32502 net.cpp:408] L3_b10_cbr1_conv -> L3_b10_cbr1_conv_top\nI0821 08:59:08.864318 32502 net.cpp:150] Setting up L3_b10_cbr1_conv\nI0821 08:59:08.864333 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.864339 32502 net.cpp:165] Memory required for data: 2138522800\nI0821 08:59:08.864347 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_bn\nI0821 08:59:08.864361 32502 net.cpp:100] Creating Layer L3_b10_cbr1_bn\nI0821 08:59:08.864367 32502 net.cpp:434] L3_b10_cbr1_bn <- L3_b10_cbr1_conv_top\nI0821 08:59:08.864375 32502 net.cpp:408] L3_b10_cbr1_bn -> L3_b10_cbr1_bn_top\nI0821 08:59:08.865660 32502 net.cpp:150] Setting up L3_b10_cbr1_bn\nI0821 08:59:08.865679 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.865684 32502 net.cpp:165] Memory required for data: 2140161200\nI0821 08:59:08.865695 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0821 08:59:08.865707 32502 net.cpp:100] Creating Layer L3_b10_cbr1_scale\nI0821 08:59:08.865715 32502 net.cpp:434] L3_b10_cbr1_scale <- L3_b10_cbr1_bn_top\nI0821 08:59:08.865723 32502 net.cpp:395] L3_b10_cbr1_scale -> L3_b10_cbr1_bn_top (in-place)\nI0821 08:59:08.865799 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0821 08:59:08.865970 32502 net.cpp:150] Setting up L3_b10_cbr1_scale\nI0821 08:59:08.865984 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.865989 32502 net.cpp:165] Memory required for data: 2141799600\nI0821 08:59:08.865998 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_relu\nI0821 08:59:08.866009 32502 net.cpp:100] Creating Layer L3_b10_cbr1_relu\nI0821 08:59:08.866016 32502 net.cpp:434] L3_b10_cbr1_relu <- L3_b10_cbr1_bn_top\nI0821 08:59:08.866024 32502 net.cpp:395] L3_b10_cbr1_relu -> L3_b10_cbr1_bn_top (in-place)\nI0821 08:59:08.866034 32502 net.cpp:150] Setting up L3_b10_cbr1_relu\nI0821 08:59:08.866040 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.866045 32502 net.cpp:165] Memory required for data: 2143438000\nI0821 08:59:08.866050 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr2_conv\nI0821 08:59:08.866065 32502 net.cpp:100] Creating Layer L3_b10_cbr2_conv\nI0821 08:59:08.866070 32502 net.cpp:434] L3_b10_cbr2_conv <- L3_b10_cbr1_bn_top\nI0821 08:59:08.866081 32502 net.cpp:408] L3_b10_cbr2_conv -> L3_b10_cbr2_conv_top\nI0821 08:59:08.867149 32502 net.cpp:150] Setting up L3_b10_cbr2_conv\nI0821 08:59:08.867164 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.867169 32502 net.cpp:165] Memory required for data: 2145076400\nI0821 08:59:08.867179 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr2_bn\nI0821 08:59:08.867189 32502 net.cpp:100] Creating Layer L3_b10_cbr2_bn\nI0821 08:59:08.867197 32502 net.cpp:434] L3_b10_cbr2_bn <- L3_b10_cbr2_conv_top\nI0821 08:59:08.867208 32502 net.cpp:408] L3_b10_cbr2_bn -> L3_b10_cbr2_bn_top\nI0821 08:59:08.867480 32502 net.cpp:150] Setting up L3_b10_cbr2_bn\nI0821 08:59:08.867496 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.867501 32502 net.cpp:165] Memory required for data: 2146714800\nI0821 08:59:08.867511 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0821 08:59:08.867521 32502 net.cpp:100] Creating Layer L3_b10_cbr2_scale\nI0821 08:59:08.867527 32502 net.cpp:434] L3_b10_cbr2_scale <- L3_b10_cbr2_bn_top\nI0821 08:59:08.867534 32502 net.cpp:395] L3_b10_cbr2_scale -> L3_b10_cbr2_bn_top (in-place)\nI0821 08:59:08.867594 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0821 08:59:08.867777 32502 net.cpp:150] Setting up L3_b10_cbr2_scale\nI0821 08:59:08.867791 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.867796 32502 net.cpp:165] Memory required for data: 2148353200\nI0821 08:59:08.867805 32502 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise\nI0821 08:59:08.867817 32502 net.cpp:100] Creating Layer L3_b10_sum_eltwise\nI0821 08:59:08.867825 32502 net.cpp:434] L3_b10_sum_eltwise <- L3_b10_cbr2_bn_top\nI0821 08:59:08.867832 32502 net.cpp:434] L3_b10_sum_eltwise <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0821 08:59:08.867841 32502 net.cpp:408] L3_b10_sum_eltwise -> L3_b10_sum_eltwise_top\nI0821 08:59:08.867877 32502 net.cpp:150] Setting up L3_b10_sum_eltwise\nI0821 08:59:08.867889 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.867894 32502 net.cpp:165] Memory required for data: 2149991600\nI0821 08:59:08.867899 32502 layer_factory.hpp:77] Creating layer L3_b10_relu\nI0821 08:59:08.867908 32502 net.cpp:100] Creating Layer L3_b10_relu\nI0821 08:59:08.867913 32502 net.cpp:434] L3_b10_relu <- L3_b10_sum_eltwise_top\nI0821 08:59:08.867920 32502 net.cpp:395] L3_b10_relu -> L3_b10_sum_eltwise_top (in-place)\nI0821 08:59:08.867930 32502 net.cpp:150] Setting up L3_b10_relu\nI0821 08:59:08.867936 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.867941 32502 net.cpp:165] Memory required for data: 2151630000\nI0821 08:59:08.867945 32502 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:08.867952 32502 net.cpp:100] Creating Layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:08.867957 32502 net.cpp:434] L3_b10_sum_eltwise_top_L3_b10_relu_0_split <- L3_b10_sum_eltwise_top\nI0821 08:59:08.867969 32502 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0821 08:59:08.867979 32502 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0821 08:59:08.868026 32502 net.cpp:150] Setting up L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:08.868038 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.868044 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.868049 32502 net.cpp:165] Memory required for data: 2154906800\nI0821 08:59:08.868054 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_conv\nI0821 08:59:08.868068 32502 net.cpp:100] Creating Layer L3_b11_cbr1_conv\nI0821 08:59:08.868075 32502 net.cpp:434] L3_b11_cbr1_conv <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0821 08:59:08.868084 32502 net.cpp:408] L3_b11_cbr1_conv -> L3_b11_cbr1_conv_top\nI0821 08:59:08.869128 32502 net.cpp:150] Setting up L3_b11_cbr1_conv\nI0821 08:59:08.869143 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.869148 32502 net.cpp:165] Memory required for data: 2156545200\nI0821 08:59:08.869158 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_bn\nI0821 08:59:08.869170 32502 net.cpp:100] Creating Layer L3_b11_cbr1_bn\nI0821 08:59:08.869177 32502 net.cpp:434] L3_b11_cbr1_bn <- L3_b11_cbr1_conv_top\nI0821 08:59:08.869185 32502 net.cpp:408] L3_b11_cbr1_bn -> L3_b11_cbr1_bn_top\nI0821 08:59:08.869462 32502 net.cpp:150] Setting up L3_b11_cbr1_bn\nI0821 08:59:08.869477 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.869482 32502 net.cpp:165] Memory required for data: 2158183600\nI0821 08:59:08.869491 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0821 08:59:08.869503 32502 net.cpp:100] Creating Layer L3_b11_cbr1_scale\nI0821 08:59:08.869509 32502 net.cpp:434] L3_b11_cbr1_scale <- L3_b11_cbr1_bn_top\nI0821 08:59:08.869518 32502 net.cpp:395] L3_b11_cbr1_scale -> L3_b11_cbr1_bn_top (in-place)\nI0821 08:59:08.869580 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0821 08:59:08.869741 32502 net.cpp:150] Setting up L3_b11_cbr1_scale\nI0821 08:59:08.869760 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.869765 32502 net.cpp:165] Memory required for data: 2159822000\nI0821 08:59:08.869781 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_relu\nI0821 08:59:08.869793 32502 net.cpp:100] Creating Layer L3_b11_cbr1_relu\nI0821 08:59:08.869801 32502 net.cpp:434] L3_b11_cbr1_relu <- L3_b11_cbr1_bn_top\nI0821 08:59:08.869807 32502 net.cpp:395] L3_b11_cbr1_relu -> L3_b11_cbr1_bn_top (in-place)\nI0821 08:59:08.869817 32502 net.cpp:150] Setting up L3_b11_cbr1_relu\nI0821 08:59:08.869824 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.869828 32502 net.cpp:165] Memory required for data: 2161460400\nI0821 08:59:08.869833 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr2_conv\nI0821 08:59:08.869849 32502 net.cpp:100] Creating Layer L3_b11_cbr2_conv\nI0821 08:59:08.869855 32502 net.cpp:434] L3_b11_cbr2_conv <- L3_b11_cbr1_bn_top\nI0821 08:59:08.869866 32502 net.cpp:408] L3_b11_cbr2_conv -> L3_b11_cbr2_conv_top\nI0821 08:59:08.870895 32502 net.cpp:150] Setting up L3_b11_cbr2_conv\nI0821 08:59:08.870910 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.870916 32502 net.cpp:165] Memory required for data: 2163098800\nI0821 08:59:08.870924 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr2_bn\nI0821 08:59:08.870934 32502 net.cpp:100] Creating Layer L3_b11_cbr2_bn\nI0821 08:59:08.870940 32502 net.cpp:434] L3_b11_cbr2_bn <- L3_b11_cbr2_conv_top\nI0821 08:59:08.870951 32502 net.cpp:408] L3_b11_cbr2_bn -> L3_b11_cbr2_bn_top\nI0821 08:59:08.871237 32502 net.cpp:150] Setting up L3_b11_cbr2_bn\nI0821 08:59:08.871250 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.871255 32502 net.cpp:165] Memory required for data: 2164737200\nI0821 08:59:08.871265 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0821 08:59:08.871273 32502 net.cpp:100] Creating Layer L3_b11_cbr2_scale\nI0821 08:59:08.871280 32502 net.cpp:434] L3_b11_cbr2_scale <- L3_b11_cbr2_bn_top\nI0821 08:59:08.871287 32502 net.cpp:395] L3_b11_cbr2_scale -> L3_b11_cbr2_bn_top (in-place)\nI0821 08:59:08.871352 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0821 08:59:08.871515 32502 net.cpp:150] Setting up L3_b11_cbr2_scale\nI0821 08:59:08.871531 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.871536 32502 net.cpp:165] Memory required for data: 2166375600\nI0821 08:59:08.871546 32502 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise\nI0821 08:59:08.871556 32502 net.cpp:100] Creating Layer L3_b11_sum_eltwise\nI0821 08:59:08.871562 32502 net.cpp:434] L3_b11_sum_eltwise <- L3_b11_cbr2_bn_top\nI0821 08:59:08.871568 32502 net.cpp:434] L3_b11_sum_eltwise <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0821 08:59:08.871577 32502 net.cpp:408] L3_b11_sum_eltwise -> L3_b11_sum_eltwise_top\nI0821 08:59:08.871613 32502 net.cpp:150] Setting up L3_b11_sum_eltwise\nI0821 08:59:08.871625 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.871630 32502 net.cpp:165] Memory required for data: 2168014000\nI0821 08:59:08.871635 32502 layer_factory.hpp:77] Creating layer L3_b11_relu\nI0821 08:59:08.871644 32502 net.cpp:100] Creating Layer L3_b11_relu\nI0821 08:59:08.871650 32502 net.cpp:434] L3_b11_relu <- L3_b11_sum_eltwise_top\nI0821 08:59:08.871657 32502 net.cpp:395] L3_b11_relu -> L3_b11_sum_eltwise_top (in-place)\nI0821 08:59:08.871666 32502 net.cpp:150] Setting up L3_b11_relu\nI0821 08:59:08.871673 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.871677 32502 net.cpp:165] Memory required for data: 2169652400\nI0821 08:59:08.871682 32502 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:08.871690 32502 net.cpp:100] Creating Layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:08.871695 32502 net.cpp:434] L3_b11_sum_eltwise_top_L3_b11_relu_0_split <- L3_b11_sum_eltwise_top\nI0821 08:59:08.871704 32502 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0821 08:59:08.871716 32502 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0821 08:59:08.871769 32502 net.cpp:150] Setting up L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:08.871788 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.871795 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.871799 32502 net.cpp:165] Memory required for data: 2172929200\nI0821 08:59:08.871805 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_conv\nI0821 08:59:08.871820 32502 net.cpp:100] Creating Layer L3_b12_cbr1_conv\nI0821 08:59:08.871827 32502 net.cpp:434] L3_b12_cbr1_conv <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0821 08:59:08.871836 32502 net.cpp:408] L3_b12_cbr1_conv -> L3_b12_cbr1_conv_top\nI0821 08:59:08.872877 32502 net.cpp:150] Setting up L3_b12_cbr1_conv\nI0821 08:59:08.872892 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.872898 32502 net.cpp:165] Memory required for data: 2174567600\nI0821 08:59:08.872906 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_bn\nI0821 08:59:08.872920 32502 net.cpp:100] Creating Layer L3_b12_cbr1_bn\nI0821 08:59:08.872927 32502 net.cpp:434] L3_b12_cbr1_bn <- L3_b12_cbr1_conv_top\nI0821 08:59:08.872936 32502 net.cpp:408] L3_b12_cbr1_bn -> L3_b12_cbr1_bn_top\nI0821 08:59:08.873209 32502 net.cpp:150] Setting up L3_b12_cbr1_bn\nI0821 08:59:08.873222 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.873227 32502 net.cpp:165] Memory required for data: 2176206000\nI0821 08:59:08.873237 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0821 08:59:08.873250 32502 net.cpp:100] Creating Layer L3_b12_cbr1_scale\nI0821 08:59:08.873255 32502 net.cpp:434] L3_b12_cbr1_scale <- L3_b12_cbr1_bn_top\nI0821 08:59:08.873263 32502 net.cpp:395] L3_b12_cbr1_scale -> L3_b12_cbr1_bn_top (in-place)\nI0821 08:59:08.873325 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0821 08:59:08.873492 32502 net.cpp:150] Setting up L3_b12_cbr1_scale\nI0821 08:59:08.873505 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.873510 32502 net.cpp:165] Memory required for data: 2177844400\nI0821 08:59:08.873519 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_relu\nI0821 08:59:08.873530 32502 net.cpp:100] Creating Layer L3_b12_cbr1_relu\nI0821 08:59:08.873538 32502 net.cpp:434] L3_b12_cbr1_relu <- L3_b12_cbr1_bn_top\nI0821 08:59:08.873548 32502 net.cpp:395] L3_b12_cbr1_relu -> L3_b12_cbr1_bn_top (in-place)\nI0821 08:59:08.873558 32502 net.cpp:150] Setting up L3_b12_cbr1_relu\nI0821 08:59:08.873564 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.873569 32502 net.cpp:165] Memory required for data: 2179482800\nI0821 08:59:08.873574 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr2_conv\nI0821 08:59:08.873584 32502 net.cpp:100] Creating Layer L3_b12_cbr2_conv\nI0821 08:59:08.873590 32502 net.cpp:434] L3_b12_cbr2_conv <- L3_b12_cbr1_bn_top\nI0821 08:59:08.873601 32502 net.cpp:408] L3_b12_cbr2_conv -> L3_b12_cbr2_conv_top\nI0821 08:59:08.875629 32502 net.cpp:150] Setting up L3_b12_cbr2_conv\nI0821 08:59:08.875648 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.875653 32502 net.cpp:165] Memory required for data: 2181121200\nI0821 08:59:08.875663 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr2_bn\nI0821 08:59:08.875675 32502 net.cpp:100] Creating Layer L3_b12_cbr2_bn\nI0821 08:59:08.875682 32502 net.cpp:434] L3_b12_cbr2_bn <- L3_b12_cbr2_conv_top\nI0821 08:59:08.875691 32502 net.cpp:408] L3_b12_cbr2_bn -> L3_b12_cbr2_bn_top\nI0821 08:59:08.875977 32502 net.cpp:150] Setting up L3_b12_cbr2_bn\nI0821 08:59:08.875991 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.875996 32502 net.cpp:165] Memory required for data: 2182759600\nI0821 08:59:08.876008 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0821 08:59:08.876019 32502 net.cpp:100] Creating Layer L3_b12_cbr2_scale\nI0821 08:59:08.876026 32502 net.cpp:434] L3_b12_cbr2_scale <- L3_b12_cbr2_bn_top\nI0821 08:59:08.876034 32502 net.cpp:395] L3_b12_cbr2_scale -> L3_b12_cbr2_bn_top (in-place)\nI0821 08:59:08.876099 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0821 08:59:08.876267 32502 net.cpp:150] Setting up L3_b12_cbr2_scale\nI0821 08:59:08.876281 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.876293 32502 net.cpp:165] Memory required for data: 2184398000\nI0821 08:59:08.876303 32502 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise\nI0821 08:59:08.876315 32502 net.cpp:100] Creating Layer L3_b12_sum_eltwise\nI0821 08:59:08.876322 32502 net.cpp:434] L3_b12_sum_eltwise <- L3_b12_cbr2_bn_top\nI0821 08:59:08.876329 32502 net.cpp:434] L3_b12_sum_eltwise <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0821 08:59:08.876340 32502 net.cpp:408] L3_b12_sum_eltwise -> L3_b12_sum_eltwise_top\nI0821 08:59:08.876376 32502 net.cpp:150] Setting up L3_b12_sum_eltwise\nI0821 08:59:08.876389 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.876392 32502 net.cpp:165] Memory required for data: 2186036400\nI0821 08:59:08.876397 32502 layer_factory.hpp:77] Creating layer L3_b12_relu\nI0821 08:59:08.876410 32502 net.cpp:100] Creating Layer L3_b12_relu\nI0821 08:59:08.876416 32502 net.cpp:434] L3_b12_relu <- L3_b12_sum_eltwise_top\nI0821 08:59:08.876423 32502 net.cpp:395] L3_b12_relu -> L3_b12_sum_eltwise_top (in-place)\nI0821 08:59:08.876433 32502 net.cpp:150] Setting up L3_b12_relu\nI0821 08:59:08.876441 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.876444 32502 net.cpp:165] Memory required for data: 2187674800\nI0821 08:59:08.876449 32502 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:08.876457 32502 net.cpp:100] Creating Layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:08.876462 32502 net.cpp:434] L3_b12_sum_eltwise_top_L3_b12_relu_0_split <- L3_b12_sum_eltwise_top\nI0821 08:59:08.876471 32502 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0821 08:59:08.876479 32502 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0821 08:59:08.876530 32502 net.cpp:150] Setting up L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:08.876543 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.876549 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.876554 32502 net.cpp:165] Memory required for data: 2190951600\nI0821 08:59:08.876559 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_conv\nI0821 08:59:08.876574 32502 net.cpp:100] Creating Layer L3_b13_cbr1_conv\nI0821 08:59:08.876579 32502 net.cpp:434] L3_b13_cbr1_conv <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0821 08:59:08.876590 32502 net.cpp:408] L3_b13_cbr1_conv -> L3_b13_cbr1_conv_top\nI0821 08:59:08.877625 32502 net.cpp:150] Setting up L3_b13_cbr1_conv\nI0821 08:59:08.877641 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.877646 32502 net.cpp:165] Memory required for data: 2192590000\nI0821 08:59:08.877655 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_bn\nI0821 08:59:08.877667 32502 net.cpp:100] Creating Layer L3_b13_cbr1_bn\nI0821 08:59:08.877674 32502 net.cpp:434] L3_b13_cbr1_bn <- L3_b13_cbr1_conv_top\nI0821 08:59:08.877687 32502 net.cpp:408] L3_b13_cbr1_bn -> L3_b13_cbr1_bn_top\nI0821 08:59:08.877969 32502 net.cpp:150] Setting up L3_b13_cbr1_bn\nI0821 08:59:08.877981 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.877986 32502 net.cpp:165] Memory required for data: 2194228400\nI0821 08:59:08.877997 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0821 08:59:08.878006 32502 net.cpp:100] Creating Layer L3_b13_cbr1_scale\nI0821 08:59:08.878012 32502 net.cpp:434] L3_b13_cbr1_scale <- L3_b13_cbr1_bn_top\nI0821 08:59:08.878021 32502 net.cpp:395] L3_b13_cbr1_scale -> L3_b13_cbr1_bn_top (in-place)\nI0821 08:59:08.878083 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0821 08:59:08.878248 32502 net.cpp:150] Setting up L3_b13_cbr1_scale\nI0821 08:59:08.878262 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.878268 32502 net.cpp:165] Memory required for data: 2195866800\nI0821 08:59:08.878276 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_relu\nI0821 08:59:08.878284 32502 net.cpp:100] Creating Layer L3_b13_cbr1_relu\nI0821 08:59:08.878291 32502 net.cpp:434] L3_b13_cbr1_relu <- L3_b13_cbr1_bn_top\nI0821 08:59:08.878309 32502 net.cpp:395] L3_b13_cbr1_relu -> L3_b13_cbr1_bn_top (in-place)\nI0821 08:59:08.878320 32502 net.cpp:150] Setting up L3_b13_cbr1_relu\nI0821 08:59:08.878329 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.878332 32502 net.cpp:165] Memory required for data: 2197505200\nI0821 08:59:08.878337 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr2_conv\nI0821 08:59:08.878351 32502 net.cpp:100] Creating Layer L3_b13_cbr2_conv\nI0821 08:59:08.878357 32502 net.cpp:434] L3_b13_cbr2_conv <- L3_b13_cbr1_bn_top\nI0821 08:59:08.878366 32502 net.cpp:408] L3_b13_cbr2_conv -> L3_b13_cbr2_conv_top\nI0821 08:59:08.879406 32502 net.cpp:150] Setting up L3_b13_cbr2_conv\nI0821 08:59:08.879421 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.879426 32502 net.cpp:165] Memory required for data: 2199143600\nI0821 08:59:08.879436 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr2_bn\nI0821 08:59:08.879447 32502 net.cpp:100] Creating Layer L3_b13_cbr2_bn\nI0821 08:59:08.879454 32502 net.cpp:434] L3_b13_cbr2_bn <- L3_b13_cbr2_conv_top\nI0821 08:59:08.879462 32502 net.cpp:408] L3_b13_cbr2_bn -> L3_b13_cbr2_bn_top\nI0821 08:59:08.879736 32502 net.cpp:150] Setting up L3_b13_cbr2_bn\nI0821 08:59:08.879755 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.879760 32502 net.cpp:165] Memory required for data: 2200782000\nI0821 08:59:08.879771 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0821 08:59:08.879786 32502 net.cpp:100] Creating Layer L3_b13_cbr2_scale\nI0821 08:59:08.879791 32502 net.cpp:434] L3_b13_cbr2_scale <- L3_b13_cbr2_bn_top\nI0821 08:59:08.879799 32502 net.cpp:395] L3_b13_cbr2_scale -> L3_b13_cbr2_bn_top (in-place)\nI0821 08:59:08.879863 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0821 08:59:08.880033 32502 net.cpp:150] Setting up L3_b13_cbr2_scale\nI0821 08:59:08.880046 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.880051 32502 net.cpp:165] Memory required for data: 2202420400\nI0821 08:59:08.880061 32502 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise\nI0821 08:59:08.880074 32502 net.cpp:100] Creating Layer L3_b13_sum_eltwise\nI0821 08:59:08.880080 32502 net.cpp:434] L3_b13_sum_eltwise <- L3_b13_cbr2_bn_top\nI0821 08:59:08.880087 32502 net.cpp:434] L3_b13_sum_eltwise <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0821 08:59:08.880098 32502 net.cpp:408] L3_b13_sum_eltwise -> L3_b13_sum_eltwise_top\nI0821 08:59:08.880133 32502 net.cpp:150] Setting up L3_b13_sum_eltwise\nI0821 08:59:08.880146 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.880149 32502 net.cpp:165] Memory required for data: 2204058800\nI0821 08:59:08.880154 32502 layer_factory.hpp:77] Creating layer L3_b13_relu\nI0821 08:59:08.880165 32502 net.cpp:100] Creating Layer L3_b13_relu\nI0821 08:59:08.880172 32502 net.cpp:434] L3_b13_relu <- L3_b13_sum_eltwise_top\nI0821 08:59:08.880179 32502 net.cpp:395] L3_b13_relu -> L3_b13_sum_eltwise_top (in-place)\nI0821 08:59:08.880188 32502 net.cpp:150] Setting up L3_b13_relu\nI0821 08:59:08.880195 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.880199 32502 net.cpp:165] Memory required for data: 2205697200\nI0821 08:59:08.880204 32502 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:08.880211 32502 net.cpp:100] Creating Layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:08.880216 32502 net.cpp:434] L3_b13_sum_eltwise_top_L3_b13_relu_0_split <- L3_b13_sum_eltwise_top\nI0821 08:59:08.880224 32502 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0821 08:59:08.880234 32502 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0821 08:59:08.880285 32502 net.cpp:150] Setting up L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:08.880297 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.880304 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.880308 32502 net.cpp:165] Memory required for data: 2208974000\nI0821 08:59:08.880321 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_conv\nI0821 08:59:08.880334 32502 net.cpp:100] Creating Layer L3_b14_cbr1_conv\nI0821 08:59:08.880342 32502 net.cpp:434] L3_b14_cbr1_conv <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0821 08:59:08.880350 32502 net.cpp:408] L3_b14_cbr1_conv -> L3_b14_cbr1_conv_top\nI0821 08:59:08.881386 32502 net.cpp:150] Setting up L3_b14_cbr1_conv\nI0821 08:59:08.881402 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.881407 32502 net.cpp:165] Memory required for data: 2210612400\nI0821 08:59:08.881415 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_bn\nI0821 08:59:08.881430 32502 net.cpp:100] Creating Layer L3_b14_cbr1_bn\nI0821 08:59:08.881438 32502 net.cpp:434] L3_b14_cbr1_bn <- L3_b14_cbr1_conv_top\nI0821 08:59:08.881448 32502 net.cpp:408] L3_b14_cbr1_bn -> L3_b14_cbr1_bn_top\nI0821 08:59:08.881727 32502 net.cpp:150] Setting up L3_b14_cbr1_bn\nI0821 08:59:08.881741 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.881750 32502 net.cpp:165] Memory required for data: 2212250800\nI0821 08:59:08.881762 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0821 08:59:08.881774 32502 net.cpp:100] Creating Layer L3_b14_cbr1_scale\nI0821 08:59:08.881780 32502 net.cpp:434] L3_b14_cbr1_scale <- L3_b14_cbr1_bn_top\nI0821 08:59:08.881795 32502 net.cpp:395] L3_b14_cbr1_scale -> L3_b14_cbr1_bn_top (in-place)\nI0821 08:59:08.881858 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0821 08:59:08.882024 32502 net.cpp:150] Setting up L3_b14_cbr1_scale\nI0821 08:59:08.882037 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.882042 32502 net.cpp:165] Memory required for data: 2213889200\nI0821 08:59:08.882051 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_relu\nI0821 08:59:08.882060 32502 net.cpp:100] Creating Layer L3_b14_cbr1_relu\nI0821 08:59:08.882066 32502 net.cpp:434] L3_b14_cbr1_relu <- L3_b14_cbr1_bn_top\nI0821 08:59:08.882076 32502 net.cpp:395] L3_b14_cbr1_relu -> L3_b14_cbr1_bn_top (in-place)\nI0821 08:59:08.882086 32502 net.cpp:150] Setting up L3_b14_cbr1_relu\nI0821 08:59:08.882092 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.882097 32502 net.cpp:165] Memory required for data: 2215527600\nI0821 08:59:08.882102 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr2_conv\nI0821 08:59:08.882115 32502 net.cpp:100] Creating Layer L3_b14_cbr2_conv\nI0821 08:59:08.882122 32502 net.cpp:434] L3_b14_cbr2_conv <- L3_b14_cbr1_bn_top\nI0821 08:59:08.882130 32502 net.cpp:408] L3_b14_cbr2_conv -> L3_b14_cbr2_conv_top\nI0821 08:59:08.883169 32502 net.cpp:150] Setting up L3_b14_cbr2_conv\nI0821 08:59:08.883185 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.883190 32502 net.cpp:165] Memory required for data: 2217166000\nI0821 08:59:08.883198 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr2_bn\nI0821 08:59:08.883211 32502 net.cpp:100] Creating Layer L3_b14_cbr2_bn\nI0821 08:59:08.883219 32502 net.cpp:434] L3_b14_cbr2_bn <- L3_b14_cbr2_conv_top\nI0821 08:59:08.883226 32502 net.cpp:408] L3_b14_cbr2_bn -> L3_b14_cbr2_bn_top\nI0821 08:59:08.883502 32502 net.cpp:150] Setting up L3_b14_cbr2_bn\nI0821 08:59:08.883514 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.883519 32502 net.cpp:165] Memory required for data: 2218804400\nI0821 08:59:08.883529 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0821 08:59:08.883541 32502 net.cpp:100] Creating Layer L3_b14_cbr2_scale\nI0821 08:59:08.883548 32502 net.cpp:434] L3_b14_cbr2_scale <- L3_b14_cbr2_bn_top\nI0821 08:59:08.883558 32502 net.cpp:395] L3_b14_cbr2_scale -> L3_b14_cbr2_bn_top (in-place)\nI0821 08:59:08.883620 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0821 08:59:08.883798 32502 net.cpp:150] Setting up L3_b14_cbr2_scale\nI0821 08:59:08.883812 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.883817 32502 net.cpp:165] Memory required for data: 2220442800\nI0821 08:59:08.883826 32502 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise\nI0821 08:59:08.883836 32502 net.cpp:100] Creating Layer L3_b14_sum_eltwise\nI0821 08:59:08.883848 32502 net.cpp:434] L3_b14_sum_eltwise <- L3_b14_cbr2_bn_top\nI0821 08:59:08.883857 32502 net.cpp:434] L3_b14_sum_eltwise <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0821 08:59:08.883867 32502 net.cpp:408] L3_b14_sum_eltwise -> L3_b14_sum_eltwise_top\nI0821 08:59:08.883903 32502 net.cpp:150] Setting up L3_b14_sum_eltwise\nI0821 08:59:08.883914 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.883919 32502 net.cpp:165] Memory required for data: 2222081200\nI0821 08:59:08.883924 32502 layer_factory.hpp:77] Creating layer L3_b14_relu\nI0821 08:59:08.883934 32502 net.cpp:100] Creating Layer L3_b14_relu\nI0821 08:59:08.883941 32502 net.cpp:434] L3_b14_relu <- L3_b14_sum_eltwise_top\nI0821 08:59:08.883949 32502 net.cpp:395] L3_b14_relu -> L3_b14_sum_eltwise_top (in-place)\nI0821 08:59:08.883957 32502 net.cpp:150] Setting up L3_b14_relu\nI0821 08:59:08.883965 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.883970 32502 net.cpp:165] Memory required for data: 2223719600\nI0821 08:59:08.883973 32502 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:08.884045 32502 net.cpp:100] Creating Layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:08.884055 32502 net.cpp:434] L3_b14_sum_eltwise_top_L3_b14_relu_0_split <- L3_b14_sum_eltwise_top\nI0821 08:59:08.884064 32502 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0821 08:59:08.884074 32502 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0821 08:59:08.884125 32502 net.cpp:150] Setting up L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:08.884137 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.884143 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.884148 32502 net.cpp:165] Memory required for data: 2226996400\nI0821 08:59:08.884155 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_conv\nI0821 08:59:08.884169 32502 net.cpp:100] Creating Layer L3_b15_cbr1_conv\nI0821 08:59:08.884176 32502 net.cpp:434] L3_b15_cbr1_conv <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0821 08:59:08.884186 32502 net.cpp:408] L3_b15_cbr1_conv -> L3_b15_cbr1_conv_top\nI0821 08:59:08.885226 32502 net.cpp:150] Setting up L3_b15_cbr1_conv\nI0821 08:59:08.885241 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.885246 32502 net.cpp:165] Memory required for data: 2228634800\nI0821 08:59:08.885255 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_bn\nI0821 08:59:08.885267 32502 net.cpp:100] Creating Layer L3_b15_cbr1_bn\nI0821 08:59:08.885274 32502 net.cpp:434] L3_b15_cbr1_bn <- L3_b15_cbr1_conv_top\nI0821 08:59:08.885285 32502 net.cpp:408] L3_b15_cbr1_bn -> L3_b15_cbr1_bn_top\nI0821 08:59:08.885560 32502 net.cpp:150] Setting up L3_b15_cbr1_bn\nI0821 08:59:08.885572 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.885577 32502 net.cpp:165] Memory required for data: 2230273200\nI0821 08:59:08.885587 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0821 08:59:08.885596 32502 net.cpp:100] Creating Layer L3_b15_cbr1_scale\nI0821 08:59:08.885602 32502 net.cpp:434] L3_b15_cbr1_scale <- L3_b15_cbr1_bn_top\nI0821 08:59:08.885613 32502 net.cpp:395] L3_b15_cbr1_scale -> L3_b15_cbr1_bn_top (in-place)\nI0821 08:59:08.885675 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0821 08:59:08.885846 32502 net.cpp:150] Setting up L3_b15_cbr1_scale\nI0821 08:59:08.885860 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.885864 32502 net.cpp:165] Memory required for data: 2231911600\nI0821 08:59:08.885874 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_relu\nI0821 08:59:08.885882 32502 net.cpp:100] Creating Layer L3_b15_cbr1_relu\nI0821 08:59:08.885890 32502 net.cpp:434] L3_b15_cbr1_relu <- L3_b15_cbr1_bn_top\nI0821 08:59:08.885900 32502 net.cpp:395] L3_b15_cbr1_relu -> L3_b15_cbr1_bn_top (in-place)\nI0821 08:59:08.885910 32502 net.cpp:150] Setting up L3_b15_cbr1_relu\nI0821 08:59:08.885916 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.885927 32502 net.cpp:165] Memory required for data: 2233550000\nI0821 08:59:08.885932 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr2_conv\nI0821 08:59:08.885947 32502 net.cpp:100] Creating Layer L3_b15_cbr2_conv\nI0821 08:59:08.885953 32502 net.cpp:434] L3_b15_cbr2_conv <- L3_b15_cbr1_bn_top\nI0821 08:59:08.885962 32502 net.cpp:408] L3_b15_cbr2_conv -> L3_b15_cbr2_conv_top\nI0821 08:59:08.887008 32502 net.cpp:150] Setting up L3_b15_cbr2_conv\nI0821 08:59:08.887027 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.887032 32502 net.cpp:165] Memory required for data: 2235188400\nI0821 08:59:08.887042 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr2_bn\nI0821 08:59:08.887051 32502 net.cpp:100] Creating Layer L3_b15_cbr2_bn\nI0821 08:59:08.887058 32502 net.cpp:434] L3_b15_cbr2_bn <- L3_b15_cbr2_conv_top\nI0821 08:59:08.887069 32502 net.cpp:408] L3_b15_cbr2_bn -> L3_b15_cbr2_bn_top\nI0821 08:59:08.887341 32502 net.cpp:150] Setting up L3_b15_cbr2_bn\nI0821 08:59:08.887354 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.887359 32502 net.cpp:165] Memory required for data: 2236826800\nI0821 08:59:08.887370 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0821 08:59:08.887382 32502 net.cpp:100] Creating Layer L3_b15_cbr2_scale\nI0821 08:59:08.887388 32502 net.cpp:434] L3_b15_cbr2_scale <- L3_b15_cbr2_bn_top\nI0821 08:59:08.887397 32502 net.cpp:395] L3_b15_cbr2_scale -> L3_b15_cbr2_bn_top (in-place)\nI0821 08:59:08.887456 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0821 08:59:08.887622 32502 net.cpp:150] Setting up L3_b15_cbr2_scale\nI0821 08:59:08.887636 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.887641 32502 net.cpp:165] Memory required for data: 2238465200\nI0821 08:59:08.887650 32502 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise\nI0821 08:59:08.887660 32502 net.cpp:100] Creating Layer L3_b15_sum_eltwise\nI0821 08:59:08.887666 32502 net.cpp:434] L3_b15_sum_eltwise <- L3_b15_cbr2_bn_top\nI0821 08:59:08.887673 32502 net.cpp:434] L3_b15_sum_eltwise <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0821 08:59:08.887684 32502 net.cpp:408] L3_b15_sum_eltwise -> L3_b15_sum_eltwise_top\nI0821 08:59:08.887719 32502 net.cpp:150] Setting up L3_b15_sum_eltwise\nI0821 08:59:08.887728 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.887733 32502 net.cpp:165] Memory required for data: 2240103600\nI0821 08:59:08.887738 32502 layer_factory.hpp:77] Creating layer L3_b15_relu\nI0821 08:59:08.887755 32502 net.cpp:100] Creating Layer L3_b15_relu\nI0821 08:59:08.887763 32502 net.cpp:434] L3_b15_relu <- L3_b15_sum_eltwise_top\nI0821 08:59:08.887770 32502 net.cpp:395] L3_b15_relu -> L3_b15_sum_eltwise_top (in-place)\nI0821 08:59:08.887780 32502 net.cpp:150] Setting up L3_b15_relu\nI0821 08:59:08.887787 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.887791 32502 net.cpp:165] Memory required for data: 2241742000\nI0821 08:59:08.887796 32502 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:08.887804 32502 net.cpp:100] Creating Layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:08.887809 32502 net.cpp:434] L3_b15_sum_eltwise_top_L3_b15_relu_0_split <- L3_b15_sum_eltwise_top\nI0821 08:59:08.887816 32502 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0821 08:59:08.887826 32502 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0821 08:59:08.887877 32502 net.cpp:150] Setting up L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:08.887889 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.887897 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.887900 32502 net.cpp:165] Memory required for data: 2245018800\nI0821 08:59:08.887907 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_conv\nI0821 08:59:08.887919 32502 net.cpp:100] Creating Layer L3_b16_cbr1_conv\nI0821 08:59:08.887926 32502 net.cpp:434] L3_b16_cbr1_conv <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0821 08:59:08.887943 32502 net.cpp:408] L3_b16_cbr1_conv -> L3_b16_cbr1_conv_top\nI0821 08:59:08.889984 32502 net.cpp:150] Setting up L3_b16_cbr1_conv\nI0821 08:59:08.890002 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.890007 32502 net.cpp:165] Memory required for data: 2246657200\nI0821 08:59:08.890017 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_bn\nI0821 08:59:08.890027 32502 net.cpp:100] Creating Layer L3_b16_cbr1_bn\nI0821 08:59:08.890034 32502 net.cpp:434] L3_b16_cbr1_bn <- L3_b16_cbr1_conv_top\nI0821 08:59:08.890046 32502 net.cpp:408] L3_b16_cbr1_bn -> L3_b16_cbr1_bn_top\nI0821 08:59:08.890333 32502 net.cpp:150] Setting up L3_b16_cbr1_bn\nI0821 08:59:08.890347 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.890352 32502 net.cpp:165] Memory required for data: 2248295600\nI0821 08:59:08.890362 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0821 08:59:08.890372 32502 net.cpp:100] Creating Layer L3_b16_cbr1_scale\nI0821 08:59:08.890377 32502 net.cpp:434] L3_b16_cbr1_scale <- L3_b16_cbr1_bn_top\nI0821 08:59:08.890385 32502 net.cpp:395] L3_b16_cbr1_scale -> L3_b16_cbr1_bn_top (in-place)\nI0821 08:59:08.890452 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0821 08:59:08.890619 32502 net.cpp:150] Setting up L3_b16_cbr1_scale\nI0821 08:59:08.890632 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.890637 32502 net.cpp:165] Memory required for data: 2249934000\nI0821 08:59:08.890646 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_relu\nI0821 08:59:08.890657 32502 net.cpp:100] Creating Layer L3_b16_cbr1_relu\nI0821 08:59:08.890664 32502 net.cpp:434] L3_b16_cbr1_relu <- L3_b16_cbr1_bn_top\nI0821 08:59:08.890671 32502 net.cpp:395] L3_b16_cbr1_relu -> L3_b16_cbr1_bn_top (in-place)\nI0821 08:59:08.890681 32502 net.cpp:150] Setting up L3_b16_cbr1_relu\nI0821 08:59:08.890688 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.890693 32502 net.cpp:165] Memory required for data: 2251572400\nI0821 08:59:08.890697 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr2_conv\nI0821 08:59:08.890712 32502 net.cpp:100] Creating Layer L3_b16_cbr2_conv\nI0821 08:59:08.890718 32502 net.cpp:434] L3_b16_cbr2_conv <- L3_b16_cbr1_bn_top\nI0821 08:59:08.890727 32502 net.cpp:408] L3_b16_cbr2_conv -> L3_b16_cbr2_conv_top\nI0821 08:59:08.891773 32502 net.cpp:150] Setting up L3_b16_cbr2_conv\nI0821 08:59:08.891788 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.891793 32502 net.cpp:165] Memory required for data: 2253210800\nI0821 08:59:08.891803 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr2_bn\nI0821 08:59:08.891815 32502 net.cpp:100] Creating Layer L3_b16_cbr2_bn\nI0821 08:59:08.891822 32502 net.cpp:434] L3_b16_cbr2_bn <- L3_b16_cbr2_conv_top\nI0821 08:59:08.891834 32502 net.cpp:408] L3_b16_cbr2_bn -> L3_b16_cbr2_bn_top\nI0821 08:59:08.892117 32502 net.cpp:150] Setting up L3_b16_cbr2_bn\nI0821 08:59:08.892130 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.892135 32502 net.cpp:165] Memory required for data: 2254849200\nI0821 08:59:08.892145 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0821 08:59:08.892154 32502 net.cpp:100] Creating Layer L3_b16_cbr2_scale\nI0821 08:59:08.892161 32502 net.cpp:434] L3_b16_cbr2_scale <- L3_b16_cbr2_bn_top\nI0821 08:59:08.892172 32502 net.cpp:395] L3_b16_cbr2_scale -> L3_b16_cbr2_bn_top (in-place)\nI0821 08:59:08.892235 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0821 08:59:08.892400 32502 net.cpp:150] Setting up L3_b16_cbr2_scale\nI0821 08:59:08.892412 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.892416 32502 net.cpp:165] Memory required for data: 2256487600\nI0821 08:59:08.892426 32502 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise\nI0821 08:59:08.892437 32502 net.cpp:100] Creating Layer L3_b16_sum_eltwise\nI0821 08:59:08.892444 32502 net.cpp:434] L3_b16_sum_eltwise <- L3_b16_cbr2_bn_top\nI0821 08:59:08.892452 32502 net.cpp:434] L3_b16_sum_eltwise <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0821 08:59:08.892467 32502 net.cpp:408] L3_b16_sum_eltwise -> L3_b16_sum_eltwise_top\nI0821 08:59:08.892508 32502 net.cpp:150] Setting up L3_b16_sum_eltwise\nI0821 08:59:08.892518 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.892523 32502 net.cpp:165] Memory required for data: 2258126000\nI0821 08:59:08.892527 32502 layer_factory.hpp:77] Creating layer L3_b16_relu\nI0821 08:59:08.892535 32502 net.cpp:100] Creating Layer L3_b16_relu\nI0821 08:59:08.892541 32502 net.cpp:434] L3_b16_relu <- L3_b16_sum_eltwise_top\nI0821 08:59:08.892551 32502 net.cpp:395] L3_b16_relu -> L3_b16_sum_eltwise_top (in-place)\nI0821 08:59:08.892561 32502 net.cpp:150] Setting up L3_b16_relu\nI0821 08:59:08.892568 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.892573 32502 net.cpp:165] Memory required for data: 2259764400\nI0821 08:59:08.892578 32502 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:08.892585 32502 net.cpp:100] Creating Layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:08.892590 32502 net.cpp:434] L3_b16_sum_eltwise_top_L3_b16_relu_0_split <- L3_b16_sum_eltwise_top\nI0821 08:59:08.892597 32502 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0821 08:59:08.892607 32502 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0821 08:59:08.892658 32502 net.cpp:150] Setting up L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:08.892670 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.892678 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.892681 32502 net.cpp:165] Memory required for data: 2263041200\nI0821 08:59:08.892686 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_conv\nI0821 08:59:08.892699 32502 net.cpp:100] Creating Layer L3_b17_cbr1_conv\nI0821 08:59:08.892704 32502 net.cpp:434] L3_b17_cbr1_conv <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0821 08:59:08.892716 32502 net.cpp:408] L3_b17_cbr1_conv -> L3_b17_cbr1_conv_top\nI0821 08:59:08.893760 32502 net.cpp:150] Setting up L3_b17_cbr1_conv\nI0821 08:59:08.893775 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.893780 32502 net.cpp:165] Memory required for data: 2264679600\nI0821 08:59:08.893790 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_bn\nI0821 08:59:08.893800 32502 net.cpp:100] Creating Layer L3_b17_cbr1_bn\nI0821 08:59:08.893806 32502 net.cpp:434] L3_b17_cbr1_bn <- L3_b17_cbr1_conv_top\nI0821 08:59:08.893817 32502 net.cpp:408] L3_b17_cbr1_bn -> L3_b17_cbr1_bn_top\nI0821 08:59:08.894095 32502 net.cpp:150] Setting up L3_b17_cbr1_bn\nI0821 08:59:08.894109 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.894114 32502 net.cpp:165] Memory required for data: 2266318000\nI0821 08:59:08.894124 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0821 08:59:08.894132 32502 net.cpp:100] Creating Layer L3_b17_cbr1_scale\nI0821 08:59:08.894140 32502 net.cpp:434] L3_b17_cbr1_scale <- L3_b17_cbr1_bn_top\nI0821 08:59:08.894146 32502 net.cpp:395] L3_b17_cbr1_scale -> L3_b17_cbr1_bn_top (in-place)\nI0821 08:59:08.894213 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0821 08:59:08.894371 32502 net.cpp:150] Setting up L3_b17_cbr1_scale\nI0821 08:59:08.894387 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.894392 32502 net.cpp:165] Memory required for data: 2267956400\nI0821 08:59:08.894400 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_relu\nI0821 08:59:08.894409 32502 net.cpp:100] Creating Layer L3_b17_cbr1_relu\nI0821 08:59:08.894415 32502 net.cpp:434] L3_b17_cbr1_relu <- L3_b17_cbr1_bn_top\nI0821 08:59:08.894423 32502 net.cpp:395] L3_b17_cbr1_relu -> L3_b17_cbr1_bn_top (in-place)\nI0821 08:59:08.894433 32502 net.cpp:150] Setting up L3_b17_cbr1_relu\nI0821 08:59:08.894439 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.894443 32502 net.cpp:165] Memory required for data: 2269594800\nI0821 08:59:08.894448 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr2_conv\nI0821 08:59:08.894469 32502 net.cpp:100] Creating Layer L3_b17_cbr2_conv\nI0821 08:59:08.894476 32502 net.cpp:434] L3_b17_cbr2_conv <- L3_b17_cbr1_bn_top\nI0821 08:59:08.894485 32502 net.cpp:408] L3_b17_cbr2_conv -> L3_b17_cbr2_conv_top\nI0821 08:59:08.895520 32502 net.cpp:150] Setting up L3_b17_cbr2_conv\nI0821 08:59:08.895536 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.895541 32502 net.cpp:165] Memory required for data: 2271233200\nI0821 08:59:08.895550 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr2_bn\nI0821 08:59:08.895562 32502 net.cpp:100] Creating Layer L3_b17_cbr2_bn\nI0821 08:59:08.895570 32502 net.cpp:434] L3_b17_cbr2_bn <- L3_b17_cbr2_conv_top\nI0821 08:59:08.895582 32502 net.cpp:408] L3_b17_cbr2_bn -> L3_b17_cbr2_bn_top\nI0821 08:59:08.895864 32502 net.cpp:150] Setting up L3_b17_cbr2_bn\nI0821 08:59:08.895879 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.895884 32502 net.cpp:165] Memory required for data: 2272871600\nI0821 08:59:08.895894 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0821 08:59:08.895902 32502 net.cpp:100] Creating Layer L3_b17_cbr2_scale\nI0821 08:59:08.895908 32502 net.cpp:434] L3_b17_cbr2_scale <- L3_b17_cbr2_bn_top\nI0821 08:59:08.895921 32502 net.cpp:395] L3_b17_cbr2_scale -> L3_b17_cbr2_bn_top (in-place)\nI0821 08:59:08.895983 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0821 08:59:08.896147 32502 net.cpp:150] Setting up L3_b17_cbr2_scale\nI0821 08:59:08.896159 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.896164 32502 net.cpp:165] Memory required for data: 2274510000\nI0821 08:59:08.896174 32502 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise\nI0821 08:59:08.896188 32502 net.cpp:100] Creating Layer L3_b17_sum_eltwise\nI0821 08:59:08.896194 32502 net.cpp:434] L3_b17_sum_eltwise <- L3_b17_cbr2_bn_top\nI0821 08:59:08.896203 32502 net.cpp:434] L3_b17_sum_eltwise <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0821 08:59:08.896210 32502 net.cpp:408] L3_b17_sum_eltwise -> L3_b17_sum_eltwise_top\nI0821 08:59:08.896250 32502 net.cpp:150] Setting up L3_b17_sum_eltwise\nI0821 08:59:08.896260 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.896265 32502 net.cpp:165] Memory required for data: 2276148400\nI0821 08:59:08.896270 32502 layer_factory.hpp:77] Creating layer L3_b17_relu\nI0821 08:59:08.896278 32502 net.cpp:100] Creating Layer L3_b17_relu\nI0821 08:59:08.896284 32502 net.cpp:434] L3_b17_relu <- L3_b17_sum_eltwise_top\nI0821 08:59:08.896294 32502 net.cpp:395] L3_b17_relu -> L3_b17_sum_eltwise_top (in-place)\nI0821 08:59:08.896304 32502 net.cpp:150] Setting up L3_b17_relu\nI0821 08:59:08.896311 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.896315 32502 net.cpp:165] Memory required for data: 2277786800\nI0821 08:59:08.896320 32502 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:08.896327 32502 net.cpp:100] Creating Layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:08.896333 32502 net.cpp:434] L3_b17_sum_eltwise_top_L3_b17_relu_0_split <- L3_b17_sum_eltwise_top\nI0821 08:59:08.896340 32502 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0821 08:59:08.896350 32502 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0821 08:59:08.896405 32502 net.cpp:150] Setting up L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:08.896417 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.896425 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.896430 32502 net.cpp:165] Memory required for data: 2281063600\nI0821 08:59:08.896435 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_conv\nI0821 08:59:08.896445 32502 net.cpp:100] Creating Layer L3_b18_cbr1_conv\nI0821 08:59:08.896451 32502 net.cpp:434] L3_b18_cbr1_conv <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0821 08:59:08.896463 32502 net.cpp:408] L3_b18_cbr1_conv -> L3_b18_cbr1_conv_top\nI0821 08:59:08.897501 32502 net.cpp:150] Setting up L3_b18_cbr1_conv\nI0821 08:59:08.897523 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.897528 32502 net.cpp:165] Memory required for data: 2282702000\nI0821 08:59:08.897537 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_bn\nI0821 08:59:08.897549 32502 net.cpp:100] Creating Layer L3_b18_cbr1_bn\nI0821 08:59:08.897557 32502 net.cpp:434] L3_b18_cbr1_bn <- L3_b18_cbr1_conv_top\nI0821 08:59:08.897564 32502 net.cpp:408] L3_b18_cbr1_bn -> L3_b18_cbr1_bn_top\nI0821 08:59:08.897853 32502 net.cpp:150] Setting up L3_b18_cbr1_bn\nI0821 08:59:08.897866 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.897872 32502 net.cpp:165] Memory required for data: 2284340400\nI0821 08:59:08.897882 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0821 08:59:08.897891 32502 net.cpp:100] Creating Layer L3_b18_cbr1_scale\nI0821 08:59:08.897897 32502 net.cpp:434] L3_b18_cbr1_scale <- L3_b18_cbr1_bn_top\nI0821 08:59:08.897905 32502 net.cpp:395] L3_b18_cbr1_scale -> L3_b18_cbr1_bn_top (in-place)\nI0821 08:59:08.897969 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0821 08:59:08.898126 32502 net.cpp:150] Setting up L3_b18_cbr1_scale\nI0821 08:59:08.898142 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.898147 32502 net.cpp:165] Memory required for data: 2285978800\nI0821 08:59:08.898156 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_relu\nI0821 08:59:08.898164 32502 net.cpp:100] Creating Layer L3_b18_cbr1_relu\nI0821 08:59:08.898171 32502 net.cpp:434] L3_b18_cbr1_relu <- L3_b18_cbr1_bn_top\nI0821 08:59:08.898178 32502 net.cpp:395] L3_b18_cbr1_relu -> L3_b18_cbr1_bn_top (in-place)\nI0821 08:59:08.898188 32502 net.cpp:150] Setting up L3_b18_cbr1_relu\nI0821 08:59:08.898195 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.898200 32502 net.cpp:165] Memory required for data: 2287617200\nI0821 08:59:08.898205 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr2_conv\nI0821 08:59:08.898218 32502 net.cpp:100] Creating Layer L3_b18_cbr2_conv\nI0821 08:59:08.898224 32502 net.cpp:434] L3_b18_cbr2_conv <- L3_b18_cbr1_bn_top\nI0821 08:59:08.898236 32502 net.cpp:408] L3_b18_cbr2_conv -> L3_b18_cbr2_conv_top\nI0821 08:59:08.899276 32502 net.cpp:150] Setting up L3_b18_cbr2_conv\nI0821 08:59:08.899291 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.899296 32502 net.cpp:165] Memory required for data: 2289255600\nI0821 08:59:08.899304 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr2_bn\nI0821 08:59:08.899317 32502 net.cpp:100] Creating Layer L3_b18_cbr2_bn\nI0821 08:59:08.899324 32502 net.cpp:434] L3_b18_cbr2_bn <- L3_b18_cbr2_conv_top\nI0821 08:59:08.899333 32502 net.cpp:408] L3_b18_cbr2_bn -> L3_b18_cbr2_bn_top\nI0821 08:59:08.899603 32502 net.cpp:150] Setting up L3_b18_cbr2_bn\nI0821 08:59:08.899616 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.899621 32502 net.cpp:165] Memory required for data: 2290894000\nI0821 08:59:08.899632 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0821 08:59:08.899643 32502 net.cpp:100] Creating Layer L3_b18_cbr2_scale\nI0821 08:59:08.899651 32502 net.cpp:434] L3_b18_cbr2_scale <- L3_b18_cbr2_bn_top\nI0821 08:59:08.899658 32502 net.cpp:395] L3_b18_cbr2_scale -> L3_b18_cbr2_bn_top (in-place)\nI0821 08:59:08.899719 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0821 08:59:08.899886 32502 net.cpp:150] Setting up L3_b18_cbr2_scale\nI0821 08:59:08.899900 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.899905 32502 net.cpp:165] Memory required for data: 2292532400\nI0821 08:59:08.899914 32502 layer_factory.hpp:77] Creating layer L3_b18_sum_eltwise\nI0821 08:59:08.899927 32502 net.cpp:100] Creating Layer L3_b18_sum_eltwise\nI0821 08:59:08.899935 32502 net.cpp:434] L3_b18_sum_eltwise <- L3_b18_cbr2_bn_top\nI0821 08:59:08.899941 32502 net.cpp:434] L3_b18_sum_eltwise <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0821 08:59:08.899950 32502 net.cpp:408] L3_b18_sum_eltwise -> L3_b18_sum_eltwise_top\nI0821 08:59:08.899986 32502 net.cpp:150] Setting up L3_b18_sum_eltwise\nI0821 08:59:08.899996 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.900007 32502 net.cpp:165] Memory required for data: 2294170800\nI0821 08:59:08.900012 32502 layer_factory.hpp:77] Creating layer L3_b18_relu\nI0821 08:59:08.900020 32502 net.cpp:100] Creating Layer L3_b18_relu\nI0821 08:59:08.900027 32502 net.cpp:434] L3_b18_relu <- L3_b18_sum_eltwise_top\nI0821 08:59:08.900037 32502 net.cpp:395] L3_b18_relu -> L3_b18_sum_eltwise_top (in-place)\nI0821 08:59:08.900046 32502 net.cpp:150] Setting up L3_b18_relu\nI0821 08:59:08.900053 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:08.900058 32502 net.cpp:165] Memory required for data: 2295809200\nI0821 08:59:08.900063 32502 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:59:08.900071 32502 net.cpp:100] Creating Layer post_pool\nI0821 08:59:08.900077 32502 net.cpp:434] post_pool <- L3_b18_sum_eltwise_top\nI0821 08:59:08.900086 32502 net.cpp:408] post_pool -> post_pool\nI0821 08:59:08.900121 32502 net.cpp:150] Setting up post_pool\nI0821 08:59:08.900136 32502 net.cpp:157] Top shape: 100 64 1 1 (6400)\nI0821 08:59:08.900142 32502 net.cpp:165] Memory required for data: 2295834800\nI0821 08:59:08.900147 32502 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:59:08.900255 32502 net.cpp:100] Creating Layer post_FC\nI0821 08:59:08.900269 32502 net.cpp:434] post_FC <- post_pool\nI0821 08:59:08.900279 32502 net.cpp:408] post_FC -> post_FC_top\nI0821 08:59:08.900568 32502 net.cpp:150] Setting up post_FC\nI0821 08:59:08.900586 32502 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:08.900593 32502 net.cpp:165] Memory required for data: 2295838800\nI0821 08:59:08.900602 32502 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:59:08.900610 32502 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:59:08.900616 32502 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:59:08.900625 32502 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:59:08.900636 32502 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:59:08.900688 32502 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:59:08.900701 32502 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:08.900707 32502 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:08.900712 32502 net.cpp:165] Memory required for data: 2295846800\nI0821 08:59:08.900717 32502 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:59:08.900771 32502 net.cpp:100] Creating Layer accuracy\nI0821 08:59:08.900785 32502 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:59:08.900797 32502 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:59:08.900805 32502 net.cpp:408] accuracy -> accuracy\nI0821 08:59:08.900853 32502 net.cpp:150] Setting up accuracy\nI0821 08:59:08.900866 32502 net.cpp:157] Top shape: (1)\nI0821 08:59:08.900872 32502 net.cpp:165] Memory required for data: 2295846804\nI0821 08:59:08.900877 32502 layer_factory.hpp:77] Creating layer loss\nI0821 08:59:08.900887 32502 net.cpp:100] Creating Layer loss\nI0821 08:59:08.900892 32502 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:59:08.900899 32502 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:59:08.900907 32502 net.cpp:408] loss -> loss\nI0821 08:59:08.901862 32502 layer_factory.hpp:77] Creating layer loss\nI0821 08:59:08.902990 32502 net.cpp:150] Setting up loss\nI0821 08:59:08.903012 32502 net.cpp:157] Top shape: (1)\nI0821 08:59:08.903017 32502 net.cpp:160]     with loss weight 1\nI0821 08:59:08.903103 32502 net.cpp:165] Memory required for data: 2295846808\nI0821 08:59:08.903112 32502 net.cpp:226] loss needs backward computation.\nI0821 08:59:08.903118 32502 net.cpp:228] accuracy does not need backward computation.\nI0821 08:59:08.903125 32502 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:59:08.903131 32502 net.cpp:226] post_FC needs backward computation.\nI0821 08:59:08.903136 32502 net.cpp:226] post_pool needs backward computation.\nI0821 08:59:08.903141 32502 net.cpp:226] L3_b18_relu needs backward computation.\nI0821 08:59:08.903152 32502 net.cpp:226] L3_b18_sum_eltwise needs backward computation.\nI0821 08:59:08.903158 32502 net.cpp:226] L3_b18_cbr2_scale needs backward computation.\nI0821 08:59:08.903163 32502 net.cpp:226] L3_b18_cbr2_bn needs backward computation.\nI0821 08:59:08.903168 32502 net.cpp:226] L3_b18_cbr2_conv needs backward computation.\nI0821 08:59:08.903173 32502 net.cpp:226] L3_b18_cbr1_relu needs backward computation.\nI0821 08:59:08.903178 32502 net.cpp:226] L3_b18_cbr1_scale needs backward computation.\nI0821 08:59:08.903183 32502 net.cpp:226] L3_b18_cbr1_bn needs backward computation.\nI0821 08:59:08.903188 32502 net.cpp:226] L3_b18_cbr1_conv needs backward computation.\nI0821 08:59:08.903193 32502 net.cpp:226] L3_b17_sum_eltwise_top_L3_b17_relu_0_split needs backward computation.\nI0821 08:59:08.903198 32502 net.cpp:226] L3_b17_relu needs backward computation.\nI0821 08:59:08.903203 32502 net.cpp:226] L3_b17_sum_eltwise needs backward computation.\nI0821 08:59:08.903209 32502 net.cpp:226] L3_b17_cbr2_scale needs backward computation.\nI0821 08:59:08.903214 32502 net.cpp:226] L3_b17_cbr2_bn needs backward computation.\nI0821 08:59:08.903219 32502 net.cpp:226] L3_b17_cbr2_conv needs backward computation.\nI0821 08:59:08.903224 32502 net.cpp:226] L3_b17_cbr1_relu needs backward computation.\nI0821 08:59:08.903229 32502 net.cpp:226] L3_b17_cbr1_scale needs backward computation.\nI0821 08:59:08.903234 32502 net.cpp:226] L3_b17_cbr1_bn needs backward computation.\nI0821 08:59:08.903239 32502 net.cpp:226] L3_b17_cbr1_conv needs backward computation.\nI0821 08:59:08.903244 32502 net.cpp:226] L3_b16_sum_eltwise_top_L3_b16_relu_0_split needs backward computation.\nI0821 08:59:08.903249 32502 net.cpp:226] L3_b16_relu needs backward computation.\nI0821 08:59:08.903254 32502 net.cpp:226] L3_b16_sum_eltwise needs backward computation.\nI0821 08:59:08.903260 32502 net.cpp:226] L3_b16_cbr2_scale needs backward computation.\nI0821 08:59:08.903265 32502 net.cpp:226] L3_b16_cbr2_bn needs backward computation.\nI0821 08:59:08.903270 32502 net.cpp:226] L3_b16_cbr2_conv needs backward computation.\nI0821 08:59:08.903275 32502 net.cpp:226] L3_b16_cbr1_relu needs backward computation.\nI0821 08:59:08.903280 32502 net.cpp:226] L3_b16_cbr1_scale needs backward computation.\nI0821 08:59:08.903285 32502 net.cpp:226] L3_b16_cbr1_bn needs backward computation.\nI0821 08:59:08.903290 32502 net.cpp:226] L3_b16_cbr1_conv needs backward computation.\nI0821 08:59:08.903295 32502 net.cpp:226] L3_b15_sum_eltwise_top_L3_b15_relu_0_split needs backward computation.\nI0821 08:59:08.903301 32502 net.cpp:226] L3_b15_relu needs backward computation.\nI0821 08:59:08.903306 32502 net.cpp:226] L3_b15_sum_eltwise needs backward computation.\nI0821 08:59:08.903311 32502 net.cpp:226] L3_b15_cbr2_scale needs backward computation.\nI0821 08:59:08.903316 32502 net.cpp:226] L3_b15_cbr2_bn needs backward computation.\nI0821 08:59:08.903321 32502 net.cpp:226] L3_b15_cbr2_conv needs backward computation.\nI0821 08:59:08.903326 32502 net.cpp:226] L3_b15_cbr1_relu needs backward computation.\nI0821 08:59:08.903331 32502 net.cpp:226] L3_b15_cbr1_scale needs backward computation.\nI0821 08:59:08.903336 32502 net.cpp:226] L3_b15_cbr1_bn needs backward computation.\nI0821 08:59:08.903342 32502 net.cpp:226] L3_b15_cbr1_conv needs backward computation.\nI0821 08:59:08.903347 32502 net.cpp:226] L3_b14_sum_eltwise_top_L3_b14_relu_0_split needs backward computation.\nI0821 08:59:08.903352 32502 net.cpp:226] L3_b14_relu needs backward computation.\nI0821 08:59:08.903357 32502 net.cpp:226] L3_b14_sum_eltwise needs backward computation.\nI0821 08:59:08.903362 32502 net.cpp:226] L3_b14_cbr2_scale needs backward computation.\nI0821 08:59:08.903367 32502 net.cpp:226] L3_b14_cbr2_bn needs backward computation.\nI0821 08:59:08.903373 32502 net.cpp:226] L3_b14_cbr2_conv needs backward computation.\nI0821 08:59:08.903378 32502 net.cpp:226] L3_b14_cbr1_relu needs backward computation.\nI0821 08:59:08.903383 32502 net.cpp:226] L3_b14_cbr1_scale needs backward computation.\nI0821 08:59:08.903388 32502 net.cpp:226] L3_b14_cbr1_bn needs backward computation.\nI0821 08:59:08.903398 32502 net.cpp:226] L3_b14_cbr1_conv needs backward computation.\nI0821 08:59:08.903404 32502 net.cpp:226] L3_b13_sum_eltwise_top_L3_b13_relu_0_split needs backward computation.\nI0821 08:59:08.903409 32502 net.cpp:226] L3_b13_relu needs backward computation.\nI0821 08:59:08.903414 32502 net.cpp:226] L3_b13_sum_eltwise needs backward computation.\nI0821 08:59:08.903420 32502 net.cpp:226] L3_b13_cbr2_scale needs backward computation.\nI0821 08:59:08.903425 32502 net.cpp:226] L3_b13_cbr2_bn needs backward computation.\nI0821 08:59:08.903431 32502 net.cpp:226] L3_b13_cbr2_conv needs backward computation.\nI0821 08:59:08.903436 32502 net.cpp:226] L3_b13_cbr1_relu needs backward computation.\nI0821 08:59:08.903441 32502 net.cpp:226] L3_b13_cbr1_scale needs backward computation.\nI0821 08:59:08.903446 32502 net.cpp:226] L3_b13_cbr1_bn needs backward computation.\nI0821 08:59:08.903451 32502 net.cpp:226] L3_b13_cbr1_conv needs backward computation.\nI0821 08:59:08.903456 32502 net.cpp:226] L3_b12_sum_eltwise_top_L3_b12_relu_0_split needs backward computation.\nI0821 08:59:08.903462 32502 net.cpp:226] L3_b12_relu needs backward computation.\nI0821 08:59:08.903467 32502 net.cpp:226] L3_b12_sum_eltwise needs backward computation.\nI0821 08:59:08.903472 32502 net.cpp:226] L3_b12_cbr2_scale needs backward computation.\nI0821 08:59:08.903478 32502 net.cpp:226] L3_b12_cbr2_bn needs backward computation.\nI0821 08:59:08.903483 32502 net.cpp:226] L3_b12_cbr2_conv needs backward computation.\nI0821 08:59:08.903491 32502 net.cpp:226] L3_b12_cbr1_relu needs backward computation.\nI0821 08:59:08.903497 32502 net.cpp:226] L3_b12_cbr1_scale needs backward computation.\nI0821 08:59:08.903502 32502 net.cpp:226] L3_b12_cbr1_bn needs backward computation.\nI0821 08:59:08.903507 32502 net.cpp:226] L3_b12_cbr1_conv needs backward computation.\nI0821 08:59:08.903513 32502 net.cpp:226] L3_b11_sum_eltwise_top_L3_b11_relu_0_split needs backward computation.\nI0821 08:59:08.903518 32502 net.cpp:226] L3_b11_relu needs backward computation.\nI0821 08:59:08.903523 32502 net.cpp:226] L3_b11_sum_eltwise needs backward computation.\nI0821 08:59:08.903529 32502 net.cpp:226] L3_b11_cbr2_scale needs backward computation.\nI0821 08:59:08.903534 32502 net.cpp:226] L3_b11_cbr2_bn needs backward computation.\nI0821 08:59:08.903539 32502 net.cpp:226] L3_b11_cbr2_conv needs backward computation.\nI0821 08:59:08.903545 32502 net.cpp:226] L3_b11_cbr1_relu needs backward computation.\nI0821 08:59:08.903550 32502 net.cpp:226] L3_b11_cbr1_scale needs backward computation.\nI0821 08:59:08.903555 32502 net.cpp:226] L3_b11_cbr1_bn needs backward computation.\nI0821 08:59:08.903560 32502 net.cpp:226] L3_b11_cbr1_conv needs backward computation.\nI0821 08:59:08.903566 32502 net.cpp:226] L3_b10_sum_eltwise_top_L3_b10_relu_0_split needs backward computation.\nI0821 08:59:08.903571 32502 net.cpp:226] L3_b10_relu needs backward computation.\nI0821 08:59:08.903576 32502 net.cpp:226] L3_b10_sum_eltwise needs backward computation.\nI0821 08:59:08.903583 32502 net.cpp:226] L3_b10_cbr2_scale needs backward computation.\nI0821 08:59:08.903587 32502 net.cpp:226] L3_b10_cbr2_bn needs backward computation.\nI0821 08:59:08.903592 32502 net.cpp:226] L3_b10_cbr2_conv needs backward computation.\nI0821 08:59:08.903597 32502 net.cpp:226] L3_b10_cbr1_relu needs backward computation.\nI0821 08:59:08.903602 32502 net.cpp:226] L3_b10_cbr1_scale needs backward computation.\nI0821 08:59:08.903607 32502 net.cpp:226] L3_b10_cbr1_bn needs backward computation.\nI0821 08:59:08.903614 32502 net.cpp:226] L3_b10_cbr1_conv needs backward computation.\nI0821 08:59:08.903619 32502 net.cpp:226] L3_b9_sum_eltwise_top_L3_b9_relu_0_split needs backward computation.\nI0821 08:59:08.903623 32502 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:59:08.903628 32502 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:59:08.903635 32502 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:59:08.903640 32502 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:59:08.903645 32502 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:59:08.903656 32502 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:59:08.903661 32502 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:59:08.903666 32502 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:59:08.903671 32502 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:59:08.903676 32502 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:59:08.903681 32502 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:59:08.903687 32502 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:59:08.903693 32502 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:59:08.903698 32502 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:59:08.903703 32502 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:59:08.903708 32502 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:59:08.903713 32502 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:59:08.903718 32502 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:59:08.903723 32502 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:59:08.903729 32502 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:59:08.903734 32502 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:59:08.903739 32502 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:59:08.903753 32502 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:59:08.903759 32502 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:59:08.903764 32502 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:59:08.903770 32502 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:59:08.903775 32502 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:59:08.903780 32502 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:59:08.903785 32502 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:59:08.903797 32502 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:59:08.903803 32502 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:59:08.903808 32502 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:59:08.903815 32502 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:59:08.903820 32502 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:59:08.903825 32502 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:59:08.903831 32502 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:59:08.903836 32502 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:59:08.903841 32502 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:59:08.903846 32502 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:59:08.903851 32502 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:59:08.903856 32502 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:59:08.903861 32502 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:59:08.903867 32502 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:59:08.903872 32502 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:59:08.903878 32502 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:59:08.903883 32502 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:59:08.903888 32502 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:59:08.903893 32502 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:59:08.903899 32502 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:59:08.903904 32502 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:59:08.903910 32502 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:59:08.903915 32502 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:59:08.903928 32502 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:59:08.903933 32502 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:59:08.903939 32502 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:59:08.903944 32502 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:59:08.903949 32502 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:59:08.903954 32502 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:59:08.903959 32502 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:59:08.903965 32502 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:59:08.903971 32502 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:59:08.903976 32502 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:59:08.903982 32502 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:59:08.903988 32502 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:59:08.903993 32502 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:59:08.904000 32502 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:59:08.904005 32502 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:59:08.904009 32502 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:59:08.904014 32502 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:59:08.904021 32502 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:59:08.904026 32502 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:59:08.904031 32502 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:59:08.904037 32502 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:59:08.904042 32502 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:59:08.904047 32502 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:59:08.904053 32502 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:59:08.904058 32502 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:59:08.904063 32502 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:59:08.904068 32502 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:59:08.904075 32502 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:59:08.904080 32502 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:59:08.904088 32502 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:59:08.904091 32502 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:59:08.904098 32502 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:59:08.904103 32502 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:59:08.904109 32502 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:59:08.904114 32502 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:59:08.904120 32502 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:59:08.904126 32502 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:59:08.904131 32502 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:59:08.904136 32502 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:59:08.904142 32502 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:59:08.904148 32502 net.cpp:226] L2_b18_sum_eltwise_top_L2_b18_relu_0_split needs backward computation.\nI0821 08:59:08.904153 32502 net.cpp:226] L2_b18_relu needs backward computation.\nI0821 08:59:08.904160 32502 net.cpp:226] L2_b18_sum_eltwise needs backward computation.\nI0821 08:59:08.904168 32502 net.cpp:226] L2_b18_cbr2_scale needs backward computation.\nI0821 08:59:08.904175 32502 net.cpp:226] L2_b18_cbr2_bn needs backward computation.\nI0821 08:59:08.904180 32502 net.cpp:226] L2_b18_cbr2_conv needs backward computation.\nI0821 08:59:08.904186 32502 net.cpp:226] L2_b18_cbr1_relu needs backward computation.\nI0821 08:59:08.904196 32502 net.cpp:226] L2_b18_cbr1_scale needs backward computation.\nI0821 08:59:08.904201 32502 net.cpp:226] L2_b18_cbr1_bn needs backward computation.\nI0821 08:59:08.904207 32502 net.cpp:226] L2_b18_cbr1_conv needs backward computation.\nI0821 08:59:08.904213 32502 net.cpp:226] L2_b17_sum_eltwise_top_L2_b17_relu_0_split needs backward computation.\nI0821 08:59:08.904219 32502 net.cpp:226] L2_b17_relu needs backward computation.\nI0821 08:59:08.904224 32502 net.cpp:226] L2_b17_sum_eltwise needs backward computation.\nI0821 08:59:08.904232 32502 net.cpp:226] L2_b17_cbr2_scale needs backward computation.\nI0821 08:59:08.904237 32502 net.cpp:226] L2_b17_cbr2_bn needs backward computation.\nI0821 08:59:08.904242 32502 net.cpp:226] L2_b17_cbr2_conv needs backward computation.\nI0821 08:59:08.904247 32502 net.cpp:226] L2_b17_cbr1_relu needs backward computation.\nI0821 08:59:08.904253 32502 net.cpp:226] L2_b17_cbr1_scale needs backward computation.\nI0821 08:59:08.904258 32502 net.cpp:226] L2_b17_cbr1_bn needs backward computation.\nI0821 08:59:08.904263 32502 net.cpp:226] L2_b17_cbr1_conv needs backward computation.\nI0821 08:59:08.904269 32502 net.cpp:226] L2_b16_sum_eltwise_top_L2_b16_relu_0_split needs backward computation.\nI0821 08:59:08.904275 32502 net.cpp:226] L2_b16_relu needs backward computation.\nI0821 08:59:08.904280 32502 net.cpp:226] L2_b16_sum_eltwise needs backward computation.\nI0821 08:59:08.904286 32502 net.cpp:226] L2_b16_cbr2_scale needs backward computation.\nI0821 08:59:08.904291 32502 net.cpp:226] L2_b16_cbr2_bn needs backward computation.\nI0821 08:59:08.904297 32502 net.cpp:226] L2_b16_cbr2_conv needs backward computation.\nI0821 08:59:08.904304 32502 net.cpp:226] L2_b16_cbr1_relu needs backward computation.\nI0821 08:59:08.904309 32502 net.cpp:226] L2_b16_cbr1_scale needs backward computation.\nI0821 08:59:08.904314 32502 net.cpp:226] L2_b16_cbr1_bn needs backward computation.\nI0821 08:59:08.904320 32502 net.cpp:226] L2_b16_cbr1_conv needs backward computation.\nI0821 08:59:08.904325 32502 net.cpp:226] L2_b15_sum_eltwise_top_L2_b15_relu_0_split needs backward computation.\nI0821 08:59:08.904331 32502 net.cpp:226] L2_b15_relu needs backward computation.\nI0821 08:59:08.904336 32502 net.cpp:226] L2_b15_sum_eltwise needs backward computation.\nI0821 08:59:08.904343 32502 net.cpp:226] L2_b15_cbr2_scale needs backward computation.\nI0821 08:59:08.904348 32502 net.cpp:226] L2_b15_cbr2_bn needs backward computation.\nI0821 08:59:08.904355 32502 net.cpp:226] L2_b15_cbr2_conv needs backward computation.\nI0821 08:59:08.904359 32502 net.cpp:226] L2_b15_cbr1_relu needs backward computation.\nI0821 08:59:08.904366 32502 net.cpp:226] L2_b15_cbr1_scale needs backward computation.\nI0821 08:59:08.904371 32502 net.cpp:226] L2_b15_cbr1_bn needs backward computation.\nI0821 08:59:08.904376 32502 net.cpp:226] L2_b15_cbr1_conv needs backward computation.\nI0821 08:59:08.904382 32502 net.cpp:226] L2_b14_sum_eltwise_top_L2_b14_relu_0_split needs backward computation.\nI0821 08:59:08.904388 32502 net.cpp:226] L2_b14_relu needs backward computation.\nI0821 08:59:08.904393 32502 net.cpp:226] L2_b14_sum_eltwise needs backward computation.\nI0821 08:59:08.904400 32502 net.cpp:226] L2_b14_cbr2_scale needs backward computation.\nI0821 08:59:08.904405 32502 net.cpp:226] L2_b14_cbr2_bn needs backward computation.\nI0821 08:59:08.904412 32502 net.cpp:226] L2_b14_cbr2_conv needs backward computation.\nI0821 08:59:08.904417 32502 net.cpp:226] L2_b14_cbr1_relu needs backward computation.\nI0821 08:59:08.904422 32502 net.cpp:226] L2_b14_cbr1_scale needs backward computation.\nI0821 08:59:08.904428 32502 net.cpp:226] L2_b14_cbr1_bn needs backward computation.\nI0821 08:59:08.904433 32502 net.cpp:226] L2_b14_cbr1_conv needs backward computation.\nI0821 08:59:08.904439 32502 net.cpp:226] L2_b13_sum_eltwise_top_L2_b13_relu_0_split needs backward computation.\nI0821 08:59:08.904444 32502 net.cpp:226] L2_b13_relu needs backward computation.\nI0821 08:59:08.904450 32502 net.cpp:226] L2_b13_sum_eltwise needs backward computation.\nI0821 08:59:08.904464 32502 net.cpp:226] L2_b13_cbr2_scale needs backward computation.\nI0821 08:59:08.904469 32502 net.cpp:226] L2_b13_cbr2_bn needs backward computation.\nI0821 08:59:08.904476 32502 net.cpp:226] L2_b13_cbr2_conv needs backward computation.\nI0821 08:59:08.904481 32502 net.cpp:226] L2_b13_cbr1_relu needs backward computation.\nI0821 08:59:08.904487 32502 net.cpp:226] L2_b13_cbr1_scale needs backward computation.\nI0821 08:59:08.904492 32502 net.cpp:226] L2_b13_cbr1_bn needs backward computation.\nI0821 08:59:08.904498 32502 net.cpp:226] L2_b13_cbr1_conv needs backward computation.\nI0821 08:59:08.904505 32502 net.cpp:226] L2_b12_sum_eltwise_top_L2_b12_relu_0_split needs backward computation.\nI0821 08:59:08.904510 32502 net.cpp:226] L2_b12_relu needs backward computation.\nI0821 08:59:08.904515 32502 net.cpp:226] L2_b12_sum_eltwise needs backward computation.\nI0821 08:59:08.904522 32502 net.cpp:226] L2_b12_cbr2_scale needs backward computation.\nI0821 08:59:08.904527 32502 net.cpp:226] L2_b12_cbr2_bn needs backward computation.\nI0821 08:59:08.904533 32502 net.cpp:226] L2_b12_cbr2_conv needs backward computation.\nI0821 08:59:08.904539 32502 net.cpp:226] L2_b12_cbr1_relu needs backward computation.\nI0821 08:59:08.904544 32502 net.cpp:226] L2_b12_cbr1_scale needs backward computation.\nI0821 08:59:08.904551 32502 net.cpp:226] L2_b12_cbr1_bn needs backward computation.\nI0821 08:59:08.904556 32502 net.cpp:226] L2_b12_cbr1_conv needs backward computation.\nI0821 08:59:08.904562 32502 net.cpp:226] L2_b11_sum_eltwise_top_L2_b11_relu_0_split needs backward computation.\nI0821 08:59:08.904567 32502 net.cpp:226] L2_b11_relu needs backward computation.\nI0821 08:59:08.904573 32502 net.cpp:226] L2_b11_sum_eltwise needs backward computation.\nI0821 08:59:08.904579 32502 net.cpp:226] L2_b11_cbr2_scale needs backward computation.\nI0821 08:59:08.904585 32502 net.cpp:226] L2_b11_cbr2_bn needs backward computation.\nI0821 08:59:08.904592 32502 net.cpp:226] L2_b11_cbr2_conv needs backward computation.\nI0821 08:59:08.904597 32502 net.cpp:226] L2_b11_cbr1_relu needs backward computation.\nI0821 08:59:08.904603 32502 net.cpp:226] L2_b11_cbr1_scale needs backward computation.\nI0821 08:59:08.904608 32502 net.cpp:226] L2_b11_cbr1_bn needs backward computation.\nI0821 08:59:08.904613 32502 net.cpp:226] L2_b11_cbr1_conv needs backward computation.\nI0821 08:59:08.904619 32502 net.cpp:226] L2_b10_sum_eltwise_top_L2_b10_relu_0_split needs backward computation.\nI0821 08:59:08.904625 32502 net.cpp:226] L2_b10_relu needs backward computation.\nI0821 08:59:08.904630 32502 net.cpp:226] L2_b10_sum_eltwise needs backward computation.\nI0821 08:59:08.904637 32502 net.cpp:226] L2_b10_cbr2_scale needs backward computation.\nI0821 08:59:08.904642 32502 net.cpp:226] L2_b10_cbr2_bn needs backward computation.\nI0821 08:59:08.904649 32502 net.cpp:226] L2_b10_cbr2_conv needs backward computation.\nI0821 08:59:08.904654 32502 net.cpp:226] L2_b10_cbr1_relu needs backward computation.\nI0821 08:59:08.904660 32502 net.cpp:226] L2_b10_cbr1_scale needs backward computation.\nI0821 08:59:08.904665 32502 net.cpp:226] L2_b10_cbr1_bn needs backward computation.\nI0821 08:59:08.904671 32502 net.cpp:226] L2_b10_cbr1_conv needs backward computation.\nI0821 08:59:08.904676 32502 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:59:08.904682 32502 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:59:08.904688 32502 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:59:08.904695 32502 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:59:08.904700 32502 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:59:08.904706 32502 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:59:08.904711 32502 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:59:08.904717 32502 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:59:08.904722 32502 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:59:08.904727 32502 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:59:08.904739 32502 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:59:08.904752 32502 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:59:08.904757 32502 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:59:08.904764 32502 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:59:08.904770 32502 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:59:08.904777 32502 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:59:08.904781 32502 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:59:08.904788 32502 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:59:08.904793 32502 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:59:08.904798 32502 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:59:08.904803 32502 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:59:08.904809 32502 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:59:08.904815 32502 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:59:08.904822 32502 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:59:08.904827 32502 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:59:08.904834 32502 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:59:08.904839 32502 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:59:08.904844 32502 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:59:08.904850 32502 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:59:08.904855 32502 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:59:08.904861 32502 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:59:08.904867 32502 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:59:08.904873 32502 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:59:08.904881 32502 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:59:08.904886 32502 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:59:08.904894 32502 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:59:08.904901 32502 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:59:08.904906 32502 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:59:08.904912 32502 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:59:08.904918 32502 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:59:08.904924 32502 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:59:08.904929 32502 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:59:08.904935 32502 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:59:08.904942 32502 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:59:08.904947 32502 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:59:08.904953 32502 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:59:08.904960 32502 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:59:08.904965 32502 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:59:08.904971 32502 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:59:08.904978 32502 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:59:08.904983 32502 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:59:08.904989 32502 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:59:08.904995 32502 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:59:08.905001 32502 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:59:08.905007 32502 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:59:08.905014 32502 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:59:08.905019 32502 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:59:08.905030 32502 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:59:08.905036 32502 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:59:08.905041 32502 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:59:08.905048 32502 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:59:08.905055 32502 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:59:08.905061 32502 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:59:08.905066 32502 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:59:08.905072 32502 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:59:08.905078 32502 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:59:08.905084 32502 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:59:08.905089 32502 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:59:08.905095 32502 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:59:08.905102 32502 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:59:08.905107 32502 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:59:08.905113 32502 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:59:08.905119 32502 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:59:08.905127 32502 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:59:08.905133 32502 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:59:08.905138 32502 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:59:08.905143 32502 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:59:08.905149 32502 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:59:08.905154 32502 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:59:08.905160 32502 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:59:08.905166 32502 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:59:08.905172 32502 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:59:08.905179 32502 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:59:08.905184 32502 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:59:08.905190 32502 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:59:08.905197 32502 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:59:08.905203 32502 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:59:08.905210 32502 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:59:08.905215 32502 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:59:08.905220 32502 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:59:08.905226 32502 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:59:08.905232 32502 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:59:08.905238 32502 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:59:08.905243 32502 net.cpp:226] L1_b18_sum_eltwise_top_L1_b18_relu_0_split needs backward computation.\nI0821 08:59:08.905249 32502 net.cpp:226] L1_b18_relu needs backward computation.\nI0821 08:59:08.905256 32502 net.cpp:226] L1_b18_sum_eltwise needs backward computation.\nI0821 08:59:08.905261 32502 net.cpp:226] L1_b18_cbr2_scale needs backward computation.\nI0821 08:59:08.905267 32502 net.cpp:226] L1_b18_cbr2_bn needs backward computation.\nI0821 08:59:08.905273 32502 net.cpp:226] L1_b18_cbr2_conv needs backward computation.\nI0821 08:59:08.905279 32502 net.cpp:226] L1_b18_cbr1_relu needs backward computation.\nI0821 08:59:08.905284 32502 net.cpp:226] L1_b18_cbr1_scale needs backward computation.\nI0821 08:59:08.905289 32502 net.cpp:226] L1_b18_cbr1_bn needs backward computation.\nI0821 08:59:08.905295 32502 net.cpp:226] L1_b18_cbr1_conv needs backward computation.\nI0821 08:59:08.905302 32502 net.cpp:226] L1_b17_sum_eltwise_top_L1_b17_relu_0_split needs backward computation.\nI0821 08:59:08.905313 32502 net.cpp:226] L1_b17_relu needs backward computation.\nI0821 08:59:08.905318 32502 net.cpp:226] L1_b17_sum_eltwise needs backward computation.\nI0821 08:59:08.905324 32502 net.cpp:226] L1_b17_cbr2_scale needs backward computation.\nI0821 08:59:08.905330 32502 net.cpp:226] L1_b17_cbr2_bn needs backward computation.\nI0821 08:59:08.905336 32502 net.cpp:226] L1_b17_cbr2_conv needs backward computation.\nI0821 08:59:08.905342 32502 net.cpp:226] L1_b17_cbr1_relu needs backward computation.\nI0821 08:59:08.905347 32502 net.cpp:226] L1_b17_cbr1_scale needs backward computation.\nI0821 08:59:08.905354 32502 net.cpp:226] L1_b17_cbr1_bn needs backward computation.\nI0821 08:59:08.905359 32502 net.cpp:226] L1_b17_cbr1_conv needs backward computation.\nI0821 08:59:08.905365 32502 net.cpp:226] L1_b16_sum_eltwise_top_L1_b16_relu_0_split needs backward computation.\nI0821 08:59:08.905371 32502 net.cpp:226] L1_b16_relu needs backward computation.\nI0821 08:59:08.905376 32502 net.cpp:226] L1_b16_sum_eltwise needs backward computation.\nI0821 08:59:08.905383 32502 net.cpp:226] L1_b16_cbr2_scale needs backward computation.\nI0821 08:59:08.905388 32502 net.cpp:226] L1_b16_cbr2_bn needs backward computation.\nI0821 08:59:08.905395 32502 net.cpp:226] L1_b16_cbr2_conv needs backward computation.\nI0821 08:59:08.905400 32502 net.cpp:226] L1_b16_cbr1_relu needs backward computation.\nI0821 08:59:08.905405 32502 net.cpp:226] L1_b16_cbr1_scale needs backward computation.\nI0821 08:59:08.905411 32502 net.cpp:226] L1_b16_cbr1_bn needs backward computation.\nI0821 08:59:08.905417 32502 net.cpp:226] L1_b16_cbr1_conv needs backward computation.\nI0821 08:59:08.905422 32502 net.cpp:226] L1_b15_sum_eltwise_top_L1_b15_relu_0_split needs backward computation.\nI0821 08:59:08.905428 32502 net.cpp:226] L1_b15_relu needs backward computation.\nI0821 08:59:08.905434 32502 net.cpp:226] L1_b15_sum_eltwise needs backward computation.\nI0821 08:59:08.905441 32502 net.cpp:226] L1_b15_cbr2_scale needs backward computation.\nI0821 08:59:08.905447 32502 net.cpp:226] L1_b15_cbr2_bn needs backward computation.\nI0821 08:59:08.905452 32502 net.cpp:226] L1_b15_cbr2_conv needs backward computation.\nI0821 08:59:08.905458 32502 net.cpp:226] L1_b15_cbr1_relu needs backward computation.\nI0821 08:59:08.905463 32502 net.cpp:226] L1_b15_cbr1_scale needs backward computation.\nI0821 08:59:08.905469 32502 net.cpp:226] L1_b15_cbr1_bn needs backward computation.\nI0821 08:59:08.905474 32502 net.cpp:226] L1_b15_cbr1_conv needs backward computation.\nI0821 08:59:08.905480 32502 net.cpp:226] L1_b14_sum_eltwise_top_L1_b14_relu_0_split needs backward computation.\nI0821 08:59:08.905486 32502 net.cpp:226] L1_b14_relu needs backward computation.\nI0821 08:59:08.905493 32502 net.cpp:226] L1_b14_sum_eltwise needs backward computation.\nI0821 08:59:08.905498 32502 net.cpp:226] L1_b14_cbr2_scale needs backward computation.\nI0821 08:59:08.905503 32502 net.cpp:226] L1_b14_cbr2_bn needs backward computation.\nI0821 08:59:08.905509 32502 net.cpp:226] L1_b14_cbr2_conv needs backward computation.\nI0821 08:59:08.905515 32502 net.cpp:226] L1_b14_cbr1_relu needs backward computation.\nI0821 08:59:08.905520 32502 net.cpp:226] L1_b14_cbr1_scale needs backward computation.\nI0821 08:59:08.905526 32502 net.cpp:226] L1_b14_cbr1_bn needs backward computation.\nI0821 08:59:08.905532 32502 net.cpp:226] L1_b14_cbr1_conv needs backward computation.\nI0821 08:59:08.905539 32502 net.cpp:226] L1_b13_sum_eltwise_top_L1_b13_relu_0_split needs backward computation.\nI0821 08:59:08.905544 32502 net.cpp:226] L1_b13_relu needs backward computation.\nI0821 08:59:08.905550 32502 net.cpp:226] L1_b13_sum_eltwise needs backward computation.\nI0821 08:59:08.905556 32502 net.cpp:226] L1_b13_cbr2_scale needs backward computation.\nI0821 08:59:08.905562 32502 net.cpp:226] L1_b13_cbr2_bn needs backward computation.\nI0821 08:59:08.905568 32502 net.cpp:226] L1_b13_cbr2_conv needs backward computation.\nI0821 08:59:08.905573 32502 net.cpp:226] L1_b13_cbr1_relu needs backward computation.\nI0821 08:59:08.905580 32502 net.cpp:226] L1_b13_cbr1_scale needs backward computation.\nI0821 08:59:08.905589 32502 net.cpp:226] L1_b13_cbr1_bn needs backward computation.\nI0821 08:59:08.905596 32502 net.cpp:226] L1_b13_cbr1_conv needs backward computation.\nI0821 08:59:08.905601 32502 net.cpp:226] L1_b12_sum_eltwise_top_L1_b12_relu_0_split needs backward computation.\nI0821 08:59:08.905611 32502 net.cpp:226] L1_b12_relu needs backward computation.\nI0821 08:59:08.905616 32502 net.cpp:226] L1_b12_sum_eltwise needs backward computation.\nI0821 08:59:08.905623 32502 net.cpp:226] L1_b12_cbr2_scale needs backward computation.\nI0821 08:59:08.905629 32502 net.cpp:226] L1_b12_cbr2_bn needs backward computation.\nI0821 08:59:08.905634 32502 net.cpp:226] L1_b12_cbr2_conv needs backward computation.\nI0821 08:59:08.905640 32502 net.cpp:226] L1_b12_cbr1_relu needs backward computation.\nI0821 08:59:08.905647 32502 net.cpp:226] L1_b12_cbr1_scale needs backward computation.\nI0821 08:59:08.905652 32502 net.cpp:226] L1_b12_cbr1_bn needs backward computation.\nI0821 08:59:08.905658 32502 net.cpp:226] L1_b12_cbr1_conv needs backward computation.\nI0821 08:59:08.905664 32502 net.cpp:226] L1_b11_sum_eltwise_top_L1_b11_relu_0_split needs backward computation.\nI0821 08:59:08.905670 32502 net.cpp:226] L1_b11_relu needs backward computation.\nI0821 08:59:08.905675 32502 net.cpp:226] L1_b11_sum_eltwise needs backward computation.\nI0821 08:59:08.905683 32502 net.cpp:226] L1_b11_cbr2_scale needs backward computation.\nI0821 08:59:08.905689 32502 net.cpp:226] L1_b11_cbr2_bn needs backward computation.\nI0821 08:59:08.905694 32502 net.cpp:226] L1_b11_cbr2_conv needs backward computation.\nI0821 08:59:08.905699 32502 net.cpp:226] L1_b11_cbr1_relu needs backward computation.\nI0821 08:59:08.905705 32502 net.cpp:226] L1_b11_cbr1_scale needs backward computation.\nI0821 08:59:08.905710 32502 net.cpp:226] L1_b11_cbr1_bn needs backward computation.\nI0821 08:59:08.905716 32502 net.cpp:226] L1_b11_cbr1_conv needs backward computation.\nI0821 08:59:08.905722 32502 net.cpp:226] L1_b10_sum_eltwise_top_L1_b10_relu_0_split needs backward computation.\nI0821 08:59:08.905728 32502 net.cpp:226] L1_b10_relu needs backward computation.\nI0821 08:59:08.905735 32502 net.cpp:226] L1_b10_sum_eltwise needs backward computation.\nI0821 08:59:08.905741 32502 net.cpp:226] L1_b10_cbr2_scale needs backward computation.\nI0821 08:59:08.905752 32502 net.cpp:226] L1_b10_cbr2_bn needs backward computation.\nI0821 08:59:08.905760 32502 net.cpp:226] L1_b10_cbr2_conv needs backward computation.\nI0821 08:59:08.905766 32502 net.cpp:226] L1_b10_cbr1_relu needs backward computation.\nI0821 08:59:08.905771 32502 net.cpp:226] L1_b10_cbr1_scale needs backward computation.\nI0821 08:59:08.905776 32502 net.cpp:226] L1_b10_cbr1_bn needs backward computation.\nI0821 08:59:08.905782 32502 net.cpp:226] L1_b10_cbr1_conv needs backward computation.\nI0821 08:59:08.905788 32502 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:59:08.905794 32502 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:59:08.905800 32502 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:59:08.905807 32502 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:59:08.905812 32502 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:59:08.905818 32502 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:59:08.905824 32502 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:59:08.905829 32502 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:59:08.905835 32502 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:59:08.905841 32502 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:59:08.905848 32502 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:59:08.905853 32502 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:59:08.905858 32502 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:59:08.905864 32502 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:59:08.905875 32502 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:59:08.905882 32502 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:59:08.905889 32502 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:59:08.905894 32502 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:59:08.905900 32502 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:59:08.905905 32502 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:59:08.905911 32502 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:59:08.905917 32502 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:59:08.905922 32502 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:59:08.905930 32502 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:59:08.905936 32502 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:59:08.905941 32502 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:59:08.905947 32502 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:59:08.905953 32502 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:59:08.905959 32502 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:59:08.905964 32502 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:59:08.905972 32502 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:59:08.905977 32502 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:59:08.905982 32502 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:59:08.905989 32502 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:59:08.905995 32502 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:59:08.906002 32502 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:59:08.906008 32502 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:59:08.906013 32502 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:59:08.906018 32502 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:59:08.906024 32502 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:59:08.906030 32502 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:59:08.906036 32502 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:59:08.906042 32502 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:59:08.906049 32502 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:59:08.906054 32502 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:59:08.906061 32502 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:59:08.906067 32502 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:59:08.906072 32502 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:59:08.906078 32502 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:59:08.906085 32502 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:59:08.906090 32502 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:59:08.906096 32502 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:59:08.906102 32502 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:59:08.906108 32502 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:59:08.906114 32502 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:59:08.906121 32502 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:59:08.906126 32502 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:59:08.906131 32502 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:59:08.906137 32502 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:59:08.906143 32502 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:59:08.906149 32502 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:59:08.906160 32502 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:59:08.906167 32502 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:59:08.906173 32502 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:59:08.906178 32502 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:59:08.906185 32502 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:59:08.906191 32502 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:59:08.906196 32502 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:59:08.906203 32502 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:59:08.906208 32502 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:59:08.906214 32502 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:59:08.906220 32502 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:59:08.906225 32502 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:59:08.906232 32502 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:59:08.906239 32502 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:59:08.906244 32502 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:59:08.906250 32502 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:59:08.906256 32502 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:59:08.906261 32502 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:59:08.906267 32502 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:59:08.906273 32502 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:59:08.906280 32502 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:59:08.906286 32502 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:59:08.906292 32502 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:59:08.906298 32502 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:59:08.906304 32502 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:59:08.906309 32502 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:59:08.906316 32502 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:59:08.906322 32502 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:59:08.906327 32502 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:59:08.906332 32502 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:59:08.906338 32502 net.cpp:226] pre_relu needs backward computation.\nI0821 08:59:08.906343 32502 net.cpp:226] pre_scale needs backward computation.\nI0821 08:59:08.906348 32502 net.cpp:226] pre_bn needs backward computation.\nI0821 08:59:08.906358 32502 net.cpp:226] pre_conv needs backward computation.\nI0821 08:59:08.906364 32502 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:59:08.906371 32502 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:59:08.906375 32502 net.cpp:270] This network produces output accuracy\nI0821 08:59:08.906383 32502 net.cpp:270] This network produces output loss\nI0821 08:59:08.907119 32502 net.cpp:283] Network initialization done.\nI0821 08:59:08.924100 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:08.924160 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:08.924216 32502 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 08:59:08.924942 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 08:59:08.924962 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 08:59:08.924973 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:59:08.924991 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:59:08.925002 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:59:08.925011 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:59:08.925021 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:59:08.925030 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:59:08.925040 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:59:08.925050 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:59:08.925058 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:59:08.925066 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:59:08.925076 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:59:08.925086 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:59:08.925094 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:59:08.925103 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:59:08.925112 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:59:08.925122 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:59:08.925130 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:59:08.925139 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:59:08.925148 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b10_cbr1_bn\nI0821 08:59:08.925158 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b10_cbr2_bn\nI0821 08:59:08.925165 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b11_cbr1_bn\nI0821 08:59:08.925174 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b11_cbr2_bn\nI0821 08:59:08.925184 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b12_cbr1_bn\nI0821 08:59:08.925192 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b12_cbr2_bn\nI0821 08:59:08.925201 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b13_cbr1_bn\nI0821 08:59:08.925209 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b13_cbr2_bn\nI0821 08:59:08.925217 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b14_cbr1_bn\nI0821 08:59:08.925226 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b14_cbr2_bn\nI0821 08:59:08.925235 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b15_cbr1_bn\nI0821 08:59:08.925245 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b15_cbr2_bn\nI0821 08:59:08.925253 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b16_cbr1_bn\nI0821 08:59:08.925268 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b16_cbr2_bn\nI0821 08:59:08.925278 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b17_cbr1_bn\nI0821 08:59:08.925287 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b17_cbr2_bn\nI0821 08:59:08.925297 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b18_cbr1_bn\nI0821 08:59:08.925304 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b18_cbr2_bn\nI0821 08:59:08.925313 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:59:08.925323 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:59:08.925334 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:59:08.925343 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:59:08.925353 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:59:08.925360 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:59:08.925369 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:59:08.925379 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:59:08.925387 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:59:08.925395 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:59:08.925403 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:59:08.925412 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:59:08.925420 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:59:08.925429 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:59:08.925438 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:59:08.925447 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:59:08.925457 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:59:08.925464 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:59:08.925473 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b10_cbr1_bn\nI0821 08:59:08.925482 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b10_cbr2_bn\nI0821 08:59:08.925492 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b11_cbr1_bn\nI0821 08:59:08.925499 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b11_cbr2_bn\nI0821 08:59:08.925508 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b12_cbr1_bn\nI0821 08:59:08.925518 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b12_cbr2_bn\nI0821 08:59:08.925526 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b13_cbr1_bn\nI0821 08:59:08.925535 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b13_cbr2_bn\nI0821 08:59:08.925551 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b14_cbr1_bn\nI0821 08:59:08.925560 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b14_cbr2_bn\nI0821 08:59:08.925568 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b15_cbr1_bn\nI0821 08:59:08.925576 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b15_cbr2_bn\nI0821 08:59:08.925585 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b16_cbr1_bn\nI0821 08:59:08.925593 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b16_cbr2_bn\nI0821 08:59:08.925603 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b17_cbr1_bn\nI0821 08:59:08.925611 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b17_cbr2_bn\nI0821 08:59:08.925619 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b18_cbr1_bn\nI0821 08:59:08.925627 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b18_cbr2_bn\nI0821 08:59:08.925637 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:59:08.925645 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:59:08.925657 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:59:08.925667 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:59:08.925675 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:59:08.925684 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:59:08.925693 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:59:08.925700 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:59:08.925709 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:59:08.925719 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:59:08.925727 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:59:08.925735 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:59:08.925750 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:59:08.925760 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:59:08.925770 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:59:08.925778 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:59:08.925787 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:59:08.925796 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:59:08.925804 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b10_cbr1_bn\nI0821 08:59:08.925812 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b10_cbr2_bn\nI0821 08:59:08.925827 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b11_cbr1_bn\nI0821 08:59:08.925837 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b11_cbr2_bn\nI0821 08:59:08.925846 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b12_cbr1_bn\nI0821 08:59:08.925854 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b12_cbr2_bn\nI0821 08:59:08.925863 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b13_cbr1_bn\nI0821 08:59:08.925871 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b13_cbr2_bn\nI0821 08:59:08.925880 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b14_cbr1_bn\nI0821 08:59:08.925889 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b14_cbr2_bn\nI0821 08:59:08.925899 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b15_cbr1_bn\nI0821 08:59:08.925906 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b15_cbr2_bn\nI0821 08:59:08.925915 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b16_cbr1_bn\nI0821 08:59:08.925923 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b16_cbr2_bn\nI0821 08:59:08.925932 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b17_cbr1_bn\nI0821 08:59:08.925941 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b17_cbr2_bn\nI0821 08:59:08.925951 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b18_cbr1_bn\nI0821 08:59:08.925958 32502 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b18_cbr2_bn\nI0821 08:59:08.929137 32502 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_si\nI0821 08:59:08.932354 32502 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:59:08.932582 32502 net.cpp:100] Creating Layer dataLayer\nI0821 08:59:08.932601 32502 net.cpp:408] dataLayer -> data_top\nI0821 08:59:08.932621 32502 net.cpp:408] dataLayer -> label\nI0821 08:59:08.932633 32502 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:59:08.944706 32510 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 08:59:08.944965 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:08.951769 32502 net.cpp:150] Setting up dataLayer\nI0821 08:59:08.951792 32502 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI0821 08:59:08.951800 32502 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:08.951805 32502 net.cpp:165] Memory required for data: 1229200\nI0821 08:59:08.951812 32502 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:59:08.951827 32502 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:59:08.951851 32502 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:59:08.951863 32502 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:59:08.951875 32502 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:59:08.951948 32502 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:59:08.951959 32502 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:08.951966 32502 net.cpp:157] Top shape: 100 (100)\nI0821 08:59:08.951970 32502 net.cpp:165] Memory required for data: 1230000\nI0821 08:59:08.951977 32502 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:59:08.951997 32502 net.cpp:100] Creating Layer pre_conv\nI0821 08:59:08.952003 32502 net.cpp:434] pre_conv <- data_top\nI0821 08:59:08.952015 32502 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:59:08.952581 32502 net.cpp:150] Setting up pre_conv\nI0821 08:59:08.952600 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.952605 32502 net.cpp:165] Memory required for data: 7783600\nI0821 08:59:08.952620 32502 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:59:08.952636 32502 net.cpp:100] Creating Layer pre_bn\nI0821 08:59:08.952643 32502 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:59:08.952651 32502 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:59:08.953037 32502 net.cpp:150] Setting up pre_bn\nI0821 08:59:08.953052 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.953058 32502 net.cpp:165] Memory required for data: 14337200\nI0821 08:59:08.953075 32502 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:59:08.953088 32502 net.cpp:100] Creating Layer pre_scale\nI0821 08:59:08.953094 32502 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:59:08.953102 32502 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:59:08.953222 32502 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:59:08.953413 32502 net.cpp:150] Setting up pre_scale\nI0821 08:59:08.953428 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.953434 32502 net.cpp:165] Memory required for data: 20890800\nI0821 08:59:08.953445 32502 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:59:08.953454 32502 net.cpp:100] Creating Layer pre_relu\nI0821 08:59:08.953459 32502 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:59:08.953470 32502 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:59:08.953483 32502 net.cpp:150] Setting up pre_relu\nI0821 08:59:08.953491 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.953495 32502 net.cpp:165] Memory required for data: 27444400\nI0821 08:59:08.953500 32502 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:59:08.953507 32502 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:59:08.953512 32502 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:59:08.953522 32502 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:59:08.953532 32502 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:59:08.953591 32502 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:59:08.953601 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.953610 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.953615 32502 net.cpp:165] Memory required for data: 40551600\nI0821 08:59:08.953620 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:59:08.953632 32502 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:59:08.953637 32502 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:59:08.953649 32502 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:59:08.954080 32502 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:59:08.954097 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.954104 32502 net.cpp:165] Memory required for data: 47105200\nI0821 08:59:08.954118 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:59:08.954129 32502 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:59:08.954134 32502 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:59:08.954146 32502 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:59:08.954587 32502 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:59:08.954605 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.954610 32502 net.cpp:165] Memory required for data: 53658800\nI0821 08:59:08.954624 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:59:08.954634 32502 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:59:08.954639 32502 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:59:08.954648 32502 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:59:08.954789 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:59:08.954982 32502 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:59:08.954996 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.955001 32502 net.cpp:165] Memory required for data: 60212400\nI0821 08:59:08.955011 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:59:08.955023 32502 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:59:08.955029 32502 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:59:08.955036 32502 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:59:08.955049 32502 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:59:08.955056 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.955061 32502 net.cpp:165] Memory required for data: 66766000\nI0821 08:59:08.955066 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:59:08.955080 32502 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:59:08.955088 32502 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:59:08.955101 32502 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:59:08.955534 32502 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:59:08.955549 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.955554 32502 net.cpp:165] Memory required for data: 73319600\nI0821 08:59:08.955564 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:59:08.955574 32502 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:59:08.955582 32502 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:59:08.955595 32502 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:59:08.955932 32502 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:59:08.955950 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.955955 32502 net.cpp:165] Memory required for data: 79873200\nI0821 08:59:08.955977 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:59:08.955987 32502 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:59:08.955993 32502 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:59:08.956001 32502 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:59:08.956068 32502 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:59:08.956254 32502 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:59:08.956269 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.956274 32502 net.cpp:165] Memory required for data: 86426800\nI0821 08:59:08.956282 32502 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:59:08.956298 32502 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:59:08.956305 32502 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:59:08.956311 32502 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:59:08.956320 32502 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:59:08.956358 32502 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:59:08.956368 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.956380 32502 net.cpp:165] Memory required for data: 92980400\nI0821 08:59:08.956387 32502 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:59:08.956394 32502 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:59:08.956399 32502 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:59:08.956409 32502 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:59:08.956419 32502 net.cpp:150] Setting up L1_b1_relu\nI0821 08:59:08.956426 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.956430 32502 net.cpp:165] Memory required for data: 99534000\nI0821 08:59:08.956435 32502 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:08.956445 32502 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:08.956450 32502 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:59:08.956459 32502 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:59:08.956467 32502 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:59:08.956519 32502 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:59:08.956532 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.956537 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.956542 32502 net.cpp:165] Memory required for data: 112641200\nI0821 08:59:08.956547 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:59:08.956562 32502 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:59:08.956568 32502 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:59:08.956576 32502 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:59:08.956990 32502 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:59:08.957005 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.957010 32502 net.cpp:165] Memory required for data: 119194800\nI0821 08:59:08.957020 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:59:08.957031 32502 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:59:08.957037 32502 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:59:08.957046 32502 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:59:08.957514 32502 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:59:08.957528 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.957533 32502 net.cpp:165] Memory required for data: 125748400\nI0821 08:59:08.957545 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:59:08.957553 32502 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:59:08.957559 32502 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:59:08.957567 32502 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:59:08.957630 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:59:08.957803 32502 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:59:08.957816 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.957821 32502 net.cpp:165] Memory required for data: 132302000\nI0821 08:59:08.957831 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:59:08.957839 32502 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:59:08.957845 32502 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:59:08.957855 32502 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:59:08.957865 32502 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:59:08.957872 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.957877 32502 net.cpp:165] Memory required for data: 138855600\nI0821 08:59:08.957882 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:59:08.957895 32502 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:59:08.957901 32502 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:59:08.957909 32502 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:59:08.958277 32502 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:59:08.958292 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.958297 32502 net.cpp:165] Memory required for data: 145409200\nI0821 08:59:08.958307 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:59:08.958322 32502 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:59:08.958328 32502 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:59:08.958336 32502 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:59:08.958622 32502 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:59:08.958639 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.958644 32502 net.cpp:165] Memory required for data: 151962800\nI0821 08:59:08.958660 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:59:08.958672 32502 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:59:08.958679 32502 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:59:08.958690 32502 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:59:08.958772 32502 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:59:08.958966 32502 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:59:08.958981 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.958986 32502 net.cpp:165] Memory required for data: 158516400\nI0821 08:59:08.958998 32502 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:59:08.959012 32502 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:59:08.959018 32502 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:59:08.959025 32502 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:59:08.959035 32502 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:59:08.959079 32502 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:59:08.959089 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.959095 32502 net.cpp:165] Memory required for data: 165070000\nI0821 08:59:08.959100 32502 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:59:08.959110 32502 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:59:08.959116 32502 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:59:08.959123 32502 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:59:08.959141 32502 net.cpp:150] Setting up L1_b2_relu\nI0821 08:59:08.959151 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.959157 32502 net.cpp:165] Memory required for data: 171623600\nI0821 08:59:08.959162 32502 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:08.959169 32502 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:08.959175 32502 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:59:08.959185 32502 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:59:08.959198 32502 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:59:08.959250 32502 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:59:08.959260 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.959270 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.959275 32502 net.cpp:165] Memory required for data: 184730800\nI0821 08:59:08.959280 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:59:08.959295 32502 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:59:08.959301 32502 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:59:08.959314 32502 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:59:08.959763 32502 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:59:08.959779 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.959784 32502 net.cpp:165] Memory required for data: 191284400\nI0821 08:59:08.959794 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:59:08.959803 32502 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:59:08.959816 32502 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:59:08.959830 32502 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:59:08.960317 32502 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:59:08.960335 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.960340 32502 net.cpp:165] Memory required for data: 197838000\nI0821 08:59:08.960355 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:59:08.960364 32502 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:59:08.960371 32502 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:59:08.960384 32502 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:59:08.960456 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:59:08.960650 32502 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:59:08.960667 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.960674 32502 net.cpp:165] Memory required for data: 204391600\nI0821 08:59:08.960683 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:59:08.960695 32502 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:59:08.960701 32502 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:59:08.960708 32502 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:59:08.960718 32502 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:59:08.960726 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.960731 32502 net.cpp:165] Memory required for data: 210945200\nI0821 08:59:08.960736 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:59:08.960758 32502 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:59:08.960770 32502 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:59:08.960783 32502 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:59:08.961196 32502 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:59:08.961213 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.961218 32502 net.cpp:165] Memory required for data: 217498800\nI0821 08:59:08.961230 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:59:08.961247 32502 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:59:08.961256 32502 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:59:08.961266 32502 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:59:08.961591 32502 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:59:08.961604 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.961609 32502 net.cpp:165] Memory required for data: 224052400\nI0821 08:59:08.961623 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:59:08.961632 32502 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:59:08.961638 32502 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:59:08.961647 32502 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:59:08.961720 32502 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:59:08.961920 32502 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:59:08.961935 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.961940 32502 net.cpp:165] Memory required for data: 230606000\nI0821 08:59:08.961948 32502 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:59:08.961963 32502 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:59:08.961971 32502 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:59:08.961977 32502 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:59:08.961985 32502 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:59:08.962029 32502 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:59:08.962039 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.962045 32502 net.cpp:165] Memory required for data: 237159600\nI0821 08:59:08.962052 32502 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:59:08.962060 32502 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:59:08.962066 32502 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:59:08.962086 32502 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:59:08.962097 32502 net.cpp:150] Setting up L1_b3_relu\nI0821 08:59:08.962105 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.962110 32502 net.cpp:165] Memory required for data: 243713200\nI0821 08:59:08.962116 32502 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:08.962124 32502 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:08.962129 32502 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:59:08.962141 32502 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:59:08.962151 32502 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:59:08.962208 32502 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:59:08.962218 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.962224 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.962229 32502 net.cpp:165] Memory required for data: 256820400\nI0821 08:59:08.962234 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:59:08.962249 32502 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:59:08.962255 32502 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:59:08.962266 32502 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:59:08.962693 32502 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:59:08.962708 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.962716 32502 net.cpp:165] Memory required for data: 263374000\nI0821 08:59:08.962726 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:59:08.962738 32502 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:59:08.962754 32502 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:59:08.962767 32502 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:59:08.963089 32502 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:59:08.963105 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.963110 32502 net.cpp:165] Memory required for data: 269927600\nI0821 08:59:08.963122 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:59:08.963134 32502 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:59:08.963140 32502 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:59:08.963151 32502 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:59:08.963222 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:59:08.963414 32502 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:59:08.963428 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.963433 32502 net.cpp:165] Memory required for data: 276481200\nI0821 08:59:08.963443 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:59:08.963452 32502 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:59:08.963459 32502 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:59:08.963466 32502 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:59:08.963479 32502 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:59:08.963486 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.963491 32502 net.cpp:165] Memory required for data: 283034800\nI0821 08:59:08.963496 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:59:08.963510 32502 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:59:08.963521 32502 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:59:08.963532 32502 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:59:08.963939 32502 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:59:08.963953 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.963958 32502 net.cpp:165] Memory required for data: 289588400\nI0821 08:59:08.963968 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:59:08.963979 32502 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:59:08.963992 32502 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:59:08.964001 32502 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:59:08.964293 32502 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:59:08.964306 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.964311 32502 net.cpp:165] Memory required for data: 296142000\nI0821 08:59:08.964321 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:59:08.964330 32502 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:59:08.964336 32502 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:59:08.964347 32502 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:59:08.964407 32502 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:59:08.964593 32502 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:59:08.964612 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.964617 32502 net.cpp:165] Memory required for data: 302695600\nI0821 08:59:08.964627 32502 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:59:08.964635 32502 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:59:08.964642 32502 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:59:08.964648 32502 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:59:08.964656 32502 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:59:08.964695 32502 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:59:08.964709 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.964714 32502 net.cpp:165] Memory required for data: 309249200\nI0821 08:59:08.964718 32502 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:59:08.964725 32502 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:59:08.964735 32502 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:59:08.964748 32502 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:59:08.964759 32502 net.cpp:150] Setting up L1_b4_relu\nI0821 08:59:08.964766 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.964771 32502 net.cpp:165] Memory required for data: 315802800\nI0821 08:59:08.964776 32502 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:08.964783 32502 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:08.964788 32502 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:59:08.964798 32502 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:59:08.964808 32502 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:59:08.964859 32502 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:59:08.964870 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.964877 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.964882 32502 net.cpp:165] Memory required for data: 328910000\nI0821 08:59:08.964887 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:59:08.964901 32502 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:59:08.964908 32502 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:59:08.964917 32502 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:59:08.965283 32502 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:59:08.965297 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.965302 32502 net.cpp:165] Memory required for data: 335463600\nI0821 08:59:08.965327 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:59:08.965337 32502 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:59:08.965343 32502 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:59:08.965354 32502 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:59:08.965633 32502 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:59:08.965647 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.965658 32502 net.cpp:165] Memory required for data: 342017200\nI0821 08:59:08.965669 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:59:08.965678 32502 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:59:08.965684 32502 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:59:08.965692 32502 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:59:08.965765 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:59:08.965931 32502 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:59:08.965945 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.965950 32502 net.cpp:165] Memory required for data: 348570800\nI0821 08:59:08.965960 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:59:08.965967 32502 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:59:08.965973 32502 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:59:08.965983 32502 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:59:08.965993 32502 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:59:08.966001 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.966006 32502 net.cpp:165] Memory required for data: 355124400\nI0821 08:59:08.966009 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:59:08.966022 32502 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:59:08.966028 32502 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:59:08.966039 32502 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:59:08.966398 32502 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:59:08.966413 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.966418 32502 net.cpp:165] Memory required for data: 361678000\nI0821 08:59:08.966426 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:59:08.966454 32502 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:59:08.966462 32502 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:59:08.966471 32502 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:59:08.966768 32502 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:59:08.966784 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.966790 32502 net.cpp:165] Memory required for data: 368231600\nI0821 08:59:08.966800 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:59:08.966809 32502 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:59:08.966815 32502 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:59:08.966823 32502 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:59:08.966886 32502 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:59:08.967080 32502 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:59:08.967095 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.967100 32502 net.cpp:165] Memory required for data: 374785200\nI0821 08:59:08.967109 32502 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:59:08.967118 32502 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:59:08.967124 32502 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:59:08.967131 32502 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:59:08.967142 32502 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:59:08.967177 32502 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:59:08.967190 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.967195 32502 net.cpp:165] Memory required for data: 381338800\nI0821 08:59:08.967200 32502 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:59:08.967211 32502 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:59:08.967217 32502 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:59:08.967224 32502 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:59:08.967234 32502 net.cpp:150] Setting up L1_b5_relu\nI0821 08:59:08.967242 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.967245 32502 net.cpp:165] Memory required for data: 387892400\nI0821 08:59:08.967262 32502 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:08.967269 32502 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:08.967274 32502 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:59:08.967283 32502 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:59:08.967291 32502 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:59:08.967344 32502 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:59:08.967356 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.967362 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.967367 32502 net.cpp:165] Memory required for data: 400999600\nI0821 08:59:08.967371 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:59:08.967386 32502 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:59:08.967391 32502 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:59:08.967401 32502 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:59:08.967772 32502 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:59:08.967787 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.967792 32502 net.cpp:165] Memory required for data: 407553200\nI0821 08:59:08.967800 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:59:08.967813 32502 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:59:08.967819 32502 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:59:08.967828 32502 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:59:08.968109 32502 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:59:08.968123 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.968128 32502 net.cpp:165] Memory required for data: 414106800\nI0821 08:59:08.968139 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:59:08.968147 32502 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:59:08.968153 32502 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:59:08.968161 32502 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:59:08.968225 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:59:08.968389 32502 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:59:08.968403 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.968408 32502 net.cpp:165] Memory required for data: 420660400\nI0821 08:59:08.968418 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:59:08.968426 32502 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:59:08.968432 32502 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:59:08.968443 32502 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:59:08.968452 32502 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:59:08.968461 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.968464 32502 net.cpp:165] Memory required for data: 427214000\nI0821 08:59:08.968469 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:59:08.968483 32502 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:59:08.968488 32502 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:59:08.968497 32502 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:59:08.968861 32502 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:59:08.968876 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.968881 32502 net.cpp:165] Memory required for data: 433767600\nI0821 08:59:08.968890 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:59:08.968900 32502 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:59:08.968906 32502 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:59:08.968916 32502 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:59:08.969208 32502 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:59:08.969224 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.969238 32502 net.cpp:165] Memory required for data: 440321200\nI0821 08:59:08.969247 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:59:08.969256 32502 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:59:08.969262 32502 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:59:08.969270 32502 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:59:08.969331 32502 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:59:08.969497 32502 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:59:08.969511 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.969516 32502 net.cpp:165] Memory required for data: 446874800\nI0821 08:59:08.969525 32502 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:59:08.969547 32502 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:59:08.969553 32502 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:59:08.969560 32502 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:59:08.969568 32502 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:59:08.969607 32502 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:59:08.969617 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.969622 32502 net.cpp:165] Memory required for data: 453428400\nI0821 08:59:08.969627 32502 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:59:08.969635 32502 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:59:08.969641 32502 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:59:08.969647 32502 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:59:08.969657 32502 net.cpp:150] Setting up L1_b6_relu\nI0821 08:59:08.969665 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.969668 32502 net.cpp:165] Memory required for data: 459982000\nI0821 08:59:08.969673 32502 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:08.969681 32502 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:08.969686 32502 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:59:08.969696 32502 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:59:08.969705 32502 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:59:08.969761 32502 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:59:08.969774 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.969781 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.969785 32502 net.cpp:165] Memory required for data: 473089200\nI0821 08:59:08.969790 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:59:08.969805 32502 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:59:08.969812 32502 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:59:08.969821 32502 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:59:08.970183 32502 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:59:08.970197 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.970202 32502 net.cpp:165] Memory required for data: 479642800\nI0821 08:59:08.970211 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:59:08.970223 32502 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:59:08.970230 32502 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:59:08.970238 32502 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:59:08.970544 32502 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:59:08.970559 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.970564 32502 net.cpp:165] Memory required for data: 486196400\nI0821 08:59:08.970576 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:59:08.970584 32502 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:59:08.970590 32502 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:59:08.970602 32502 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:59:08.970670 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:59:08.970844 32502 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:59:08.970860 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.970865 32502 net.cpp:165] Memory required for data: 492750000\nI0821 08:59:08.970875 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:59:08.970883 32502 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:59:08.970890 32502 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:59:08.970896 32502 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:59:08.970906 32502 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:59:08.970913 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.970917 32502 net.cpp:165] Memory required for data: 499303600\nI0821 08:59:08.970922 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:59:08.970937 32502 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:59:08.970942 32502 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:59:08.970953 32502 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:59:08.971321 32502 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:59:08.971336 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.971341 32502 net.cpp:165] Memory required for data: 505857200\nI0821 08:59:08.971350 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:59:08.971362 32502 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:59:08.971369 32502 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:59:08.971379 32502 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:59:08.971658 32502 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:59:08.971671 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.971676 32502 net.cpp:165] Memory required for data: 512410800\nI0821 08:59:08.971688 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:59:08.971696 32502 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:59:08.971702 32502 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:59:08.971710 32502 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:59:08.971781 32502 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:59:08.971946 32502 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:59:08.971961 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.971966 32502 net.cpp:165] Memory required for data: 518964400\nI0821 08:59:08.971974 32502 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:59:08.971987 32502 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:59:08.971993 32502 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:59:08.972000 32502 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:59:08.972007 32502 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:59:08.972046 32502 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:59:08.972057 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.972062 32502 net.cpp:165] Memory required for data: 525518000\nI0821 08:59:08.972069 32502 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:59:08.972075 32502 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:59:08.972081 32502 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:59:08.972091 32502 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:59:08.972101 32502 net.cpp:150] Setting up L1_b7_relu\nI0821 08:59:08.972108 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.972112 32502 net.cpp:165] Memory required for data: 532071600\nI0821 08:59:08.972117 32502 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:08.972124 32502 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:08.972129 32502 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:59:08.972139 32502 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:59:08.972157 32502 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:59:08.972206 32502 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:59:08.972216 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.972223 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.972228 32502 net.cpp:165] Memory required for data: 545178800\nI0821 08:59:08.972232 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:59:08.972247 32502 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:59:08.972254 32502 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:59:08.972262 32502 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:59:08.972640 32502 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:59:08.972654 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.972659 32502 net.cpp:165] Memory required for data: 551732400\nI0821 08:59:08.972668 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:59:08.972681 32502 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:59:08.972687 32502 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:59:08.972698 32502 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:59:08.972996 32502 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:59:08.973011 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.973016 32502 net.cpp:165] Memory required for data: 558286000\nI0821 08:59:08.973026 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:59:08.973036 32502 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:59:08.973042 32502 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:59:08.973052 32502 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:59:08.973114 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:59:08.973278 32502 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:59:08.973291 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.973296 32502 net.cpp:165] Memory required for data: 564839600\nI0821 08:59:08.973305 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:59:08.973316 32502 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:59:08.973323 32502 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:59:08.973330 32502 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:59:08.973340 32502 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:59:08.973347 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.973351 32502 net.cpp:165] Memory required for data: 571393200\nI0821 08:59:08.973356 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:59:08.973372 32502 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:59:08.973378 32502 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:59:08.973389 32502 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:59:08.973759 32502 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:59:08.973774 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.973779 32502 net.cpp:165] Memory required for data: 577946800\nI0821 08:59:08.973788 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:59:08.973801 32502 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:59:08.973808 32502 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:59:08.973819 32502 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:59:08.974103 32502 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:59:08.974117 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.974123 32502 net.cpp:165] Memory required for data: 584500400\nI0821 08:59:08.974133 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:59:08.974140 32502 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:59:08.974148 32502 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:59:08.974155 32502 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:59:08.974226 32502 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:59:08.974414 32502 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:59:08.974429 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.974434 32502 net.cpp:165] Memory required for data: 591054000\nI0821 08:59:08.974444 32502 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:59:08.974457 32502 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:59:08.974463 32502 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:59:08.974470 32502 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:59:08.974478 32502 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:59:08.974519 32502 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:59:08.974530 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.974535 32502 net.cpp:165] Memory required for data: 597607600\nI0821 08:59:08.974540 32502 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:59:08.974548 32502 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:59:08.974555 32502 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:59:08.974565 32502 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:59:08.974575 32502 net.cpp:150] Setting up L1_b8_relu\nI0821 08:59:08.974581 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.974586 32502 net.cpp:165] Memory required for data: 604161200\nI0821 08:59:08.974591 32502 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:08.974597 32502 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:08.974602 32502 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:59:08.974612 32502 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:59:08.974622 32502 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:59:08.974673 32502 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:59:08.974685 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.974691 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.974696 32502 net.cpp:165] Memory required for data: 617268400\nI0821 08:59:08.974701 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:59:08.974715 32502 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:59:08.974722 32502 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:59:08.974731 32502 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:59:08.975111 32502 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:59:08.975128 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.975133 32502 net.cpp:165] Memory required for data: 623822000\nI0821 08:59:08.975142 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:59:08.975152 32502 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:59:08.975158 32502 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:59:08.975169 32502 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:59:08.975452 32502 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:59:08.975466 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.975471 32502 net.cpp:165] Memory required for data: 630375600\nI0821 08:59:08.975481 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:59:08.975492 32502 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:59:08.975499 32502 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:59:08.975507 32502 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:59:08.975566 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:59:08.975731 32502 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:59:08.975750 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.975756 32502 net.cpp:165] Memory required for data: 636929200\nI0821 08:59:08.975774 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:59:08.975781 32502 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:59:08.975787 32502 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:59:08.975800 32502 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:59:08.975810 32502 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:59:08.975816 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.975821 32502 net.cpp:165] Memory required for data: 643482800\nI0821 08:59:08.975826 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:59:08.975841 32502 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:59:08.975847 32502 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:59:08.975854 32502 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:59:08.976215 32502 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:59:08.976229 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.976234 32502 net.cpp:165] Memory required for data: 650036400\nI0821 08:59:08.976243 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:59:08.976255 32502 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:59:08.976263 32502 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:59:08.976270 32502 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:59:08.976550 32502 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:59:08.976564 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.976569 32502 net.cpp:165] Memory required for data: 656590000\nI0821 08:59:08.976601 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:59:08.976614 32502 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:59:08.976620 32502 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:59:08.976630 32502 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:59:08.976691 32502 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:59:08.976861 32502 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:59:08.976874 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.976879 32502 net.cpp:165] Memory required for data: 663143600\nI0821 08:59:08.976889 32502 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:59:08.976898 32502 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:59:08.976904 32502 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:59:08.976912 32502 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:59:08.976919 32502 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:59:08.976958 32502 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:59:08.976966 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.976971 32502 net.cpp:165] Memory required for data: 669697200\nI0821 08:59:08.976976 32502 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:59:08.976984 32502 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:59:08.976990 32502 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:59:08.976999 32502 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:59:08.977010 32502 net.cpp:150] Setting up L1_b9_relu\nI0821 08:59:08.977016 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.977020 32502 net.cpp:165] Memory required for data: 676250800\nI0821 08:59:08.977025 32502 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:08.977032 32502 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:08.977037 32502 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:59:08.977047 32502 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:59:08.977057 32502 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:59:08.977104 32502 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:59:08.977123 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.977130 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.977135 32502 net.cpp:165] Memory required for data: 689358000\nI0821 08:59:08.977140 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_conv\nI0821 08:59:08.977154 32502 net.cpp:100] Creating Layer L1_b10_cbr1_conv\nI0821 08:59:08.977161 32502 net.cpp:434] L1_b10_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:59:08.977170 32502 net.cpp:408] L1_b10_cbr1_conv -> L1_b10_cbr1_conv_top\nI0821 08:59:08.977540 32502 net.cpp:150] Setting up L1_b10_cbr1_conv\nI0821 08:59:08.977555 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.977560 32502 net.cpp:165] Memory required for data: 695911600\nI0821 08:59:08.977568 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_bn\nI0821 08:59:08.977577 32502 net.cpp:100] Creating Layer L1_b10_cbr1_bn\nI0821 08:59:08.977586 32502 net.cpp:434] L1_b10_cbr1_bn <- L1_b10_cbr1_conv_top\nI0821 08:59:08.977596 32502 net.cpp:408] L1_b10_cbr1_bn -> L1_b10_cbr1_bn_top\nI0821 08:59:08.977876 32502 net.cpp:150] Setting up L1_b10_cbr1_bn\nI0821 08:59:08.977890 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.977895 32502 net.cpp:165] Memory required for data: 702465200\nI0821 08:59:08.977906 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0821 08:59:08.977915 32502 net.cpp:100] Creating Layer L1_b10_cbr1_scale\nI0821 08:59:08.977921 32502 net.cpp:434] L1_b10_cbr1_scale <- L1_b10_cbr1_bn_top\nI0821 08:59:08.977931 32502 net.cpp:395] L1_b10_cbr1_scale -> L1_b10_cbr1_bn_top (in-place)\nI0821 08:59:08.977993 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0821 08:59:08.978159 32502 net.cpp:150] Setting up L1_b10_cbr1_scale\nI0821 08:59:08.978175 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.978180 32502 net.cpp:165] Memory required for data: 709018800\nI0821 08:59:08.978189 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr1_relu\nI0821 08:59:08.978197 32502 net.cpp:100] Creating Layer L1_b10_cbr1_relu\nI0821 08:59:08.978202 32502 net.cpp:434] L1_b10_cbr1_relu <- L1_b10_cbr1_bn_top\nI0821 08:59:08.978210 32502 net.cpp:395] L1_b10_cbr1_relu -> L1_b10_cbr1_bn_top (in-place)\nI0821 08:59:08.978219 32502 net.cpp:150] Setting up L1_b10_cbr1_relu\nI0821 08:59:08.978226 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.978230 32502 net.cpp:165] Memory required for data: 715572400\nI0821 08:59:08.978235 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr2_conv\nI0821 08:59:08.978250 32502 net.cpp:100] Creating Layer L1_b10_cbr2_conv\nI0821 08:59:08.978255 32502 net.cpp:434] L1_b10_cbr2_conv <- L1_b10_cbr1_bn_top\nI0821 08:59:08.978266 32502 net.cpp:408] L1_b10_cbr2_conv -> L1_b10_cbr2_conv_top\nI0821 08:59:08.978633 32502 net.cpp:150] Setting up L1_b10_cbr2_conv\nI0821 08:59:08.978648 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.978652 32502 net.cpp:165] Memory required for data: 722126000\nI0821 08:59:08.978662 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr2_bn\nI0821 08:59:08.978674 32502 net.cpp:100] Creating Layer L1_b10_cbr2_bn\nI0821 08:59:08.978680 32502 net.cpp:434] L1_b10_cbr2_bn <- L1_b10_cbr2_conv_top\nI0821 08:59:08.978691 32502 net.cpp:408] L1_b10_cbr2_bn -> L1_b10_cbr2_bn_top\nI0821 08:59:08.978977 32502 net.cpp:150] Setting up L1_b10_cbr2_bn\nI0821 08:59:08.978991 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.978996 32502 net.cpp:165] Memory required for data: 728679600\nI0821 08:59:08.979007 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0821 08:59:08.979015 32502 net.cpp:100] Creating Layer L1_b10_cbr2_scale\nI0821 08:59:08.979022 32502 net.cpp:434] L1_b10_cbr2_scale <- L1_b10_cbr2_bn_top\nI0821 08:59:08.979029 32502 net.cpp:395] L1_b10_cbr2_scale -> L1_b10_cbr2_bn_top (in-place)\nI0821 08:59:08.979091 32502 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0821 08:59:08.979250 32502 net.cpp:150] Setting up L1_b10_cbr2_scale\nI0821 08:59:08.979264 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.979275 32502 net.cpp:165] Memory required for data: 735233200\nI0821 08:59:08.979285 32502 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise\nI0821 08:59:08.979297 32502 net.cpp:100] Creating Layer L1_b10_sum_eltwise\nI0821 08:59:08.979303 32502 net.cpp:434] L1_b10_sum_eltwise <- L1_b10_cbr2_bn_top\nI0821 08:59:08.979311 32502 net.cpp:434] L1_b10_sum_eltwise <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:59:08.979320 32502 net.cpp:408] L1_b10_sum_eltwise -> L1_b10_sum_eltwise_top\nI0821 08:59:08.979357 32502 net.cpp:150] Setting up L1_b10_sum_eltwise\nI0821 08:59:08.979369 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.979374 32502 net.cpp:165] Memory required for data: 741786800\nI0821 08:59:08.979379 32502 layer_factory.hpp:77] Creating layer L1_b10_relu\nI0821 08:59:08.979387 32502 net.cpp:100] Creating Layer L1_b10_relu\nI0821 08:59:08.979393 32502 net.cpp:434] L1_b10_relu <- L1_b10_sum_eltwise_top\nI0821 08:59:08.979403 32502 net.cpp:395] L1_b10_relu -> L1_b10_sum_eltwise_top (in-place)\nI0821 08:59:08.979413 32502 net.cpp:150] Setting up L1_b10_relu\nI0821 08:59:08.979419 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.979423 32502 net.cpp:165] Memory required for data: 748340400\nI0821 08:59:08.979429 32502 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:08.979435 32502 net.cpp:100] Creating Layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:08.979440 32502 net.cpp:434] L1_b10_sum_eltwise_top_L1_b10_relu_0_split <- L1_b10_sum_eltwise_top\nI0821 08:59:08.979450 32502 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0821 08:59:08.979460 32502 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0821 08:59:08.979507 32502 net.cpp:150] Setting up L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0821 08:59:08.979519 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.979526 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.979531 32502 net.cpp:165] Memory required for data: 761447600\nI0821 08:59:08.979535 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_conv\nI0821 08:59:08.979549 32502 net.cpp:100] Creating Layer L1_b11_cbr1_conv\nI0821 08:59:08.979555 32502 net.cpp:434] L1_b11_cbr1_conv <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0821 08:59:08.979564 32502 net.cpp:408] L1_b11_cbr1_conv -> L1_b11_cbr1_conv_top\nI0821 08:59:08.979935 32502 net.cpp:150] Setting up L1_b11_cbr1_conv\nI0821 08:59:08.979949 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.979954 32502 net.cpp:165] Memory required for data: 768001200\nI0821 08:59:08.979964 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_bn\nI0821 08:59:08.979981 32502 net.cpp:100] Creating Layer L1_b11_cbr1_bn\nI0821 08:59:08.979987 32502 net.cpp:434] L1_b11_cbr1_bn <- L1_b11_cbr1_conv_top\nI0821 08:59:08.979998 32502 net.cpp:408] L1_b11_cbr1_bn -> L1_b11_cbr1_bn_top\nI0821 08:59:08.980283 32502 net.cpp:150] Setting up L1_b11_cbr1_bn\nI0821 08:59:08.980295 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.980300 32502 net.cpp:165] Memory required for data: 774554800\nI0821 08:59:08.980310 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0821 08:59:08.980320 32502 net.cpp:100] Creating Layer L1_b11_cbr1_scale\nI0821 08:59:08.980326 32502 net.cpp:434] L1_b11_cbr1_scale <- L1_b11_cbr1_bn_top\nI0821 08:59:08.980336 32502 net.cpp:395] L1_b11_cbr1_scale -> L1_b11_cbr1_bn_top (in-place)\nI0821 08:59:08.980397 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0821 08:59:08.980566 32502 net.cpp:150] Setting up L1_b11_cbr1_scale\nI0821 08:59:08.980579 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.980584 32502 net.cpp:165] Memory required for data: 781108400\nI0821 08:59:08.980593 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr1_relu\nI0821 08:59:08.980604 32502 net.cpp:100] Creating Layer L1_b11_cbr1_relu\nI0821 08:59:08.980610 32502 net.cpp:434] L1_b11_cbr1_relu <- L1_b11_cbr1_bn_top\nI0821 08:59:08.980624 32502 net.cpp:395] L1_b11_cbr1_relu -> L1_b11_cbr1_bn_top (in-place)\nI0821 08:59:08.980635 32502 net.cpp:150] Setting up L1_b11_cbr1_relu\nI0821 08:59:08.980643 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.980646 32502 net.cpp:165] Memory required for data: 787662000\nI0821 08:59:08.980651 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr2_conv\nI0821 08:59:08.980665 32502 net.cpp:100] Creating Layer L1_b11_cbr2_conv\nI0821 08:59:08.980670 32502 net.cpp:434] L1_b11_cbr2_conv <- L1_b11_cbr1_bn_top\nI0821 08:59:08.980681 32502 net.cpp:408] L1_b11_cbr2_conv -> L1_b11_cbr2_conv_top\nI0821 08:59:08.981052 32502 net.cpp:150] Setting up L1_b11_cbr2_conv\nI0821 08:59:08.981067 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.981072 32502 net.cpp:165] Memory required for data: 794215600\nI0821 08:59:08.981081 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr2_bn\nI0821 08:59:08.981094 32502 net.cpp:100] Creating Layer L1_b11_cbr2_bn\nI0821 08:59:08.981101 32502 net.cpp:434] L1_b11_cbr2_bn <- L1_b11_cbr2_conv_top\nI0821 08:59:08.981112 32502 net.cpp:408] L1_b11_cbr2_bn -> L1_b11_cbr2_bn_top\nI0821 08:59:08.981391 32502 net.cpp:150] Setting up L1_b11_cbr2_bn\nI0821 08:59:08.981405 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.981410 32502 net.cpp:165] Memory required for data: 800769200\nI0821 08:59:08.981420 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0821 08:59:08.981428 32502 net.cpp:100] Creating Layer L1_b11_cbr2_scale\nI0821 08:59:08.981434 32502 net.cpp:434] L1_b11_cbr2_scale <- L1_b11_cbr2_bn_top\nI0821 08:59:08.981442 32502 net.cpp:395] L1_b11_cbr2_scale -> L1_b11_cbr2_bn_top (in-place)\nI0821 08:59:08.981505 32502 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0821 08:59:08.981667 32502 net.cpp:150] Setting up L1_b11_cbr2_scale\nI0821 08:59:08.981680 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.981685 32502 net.cpp:165] Memory required for data: 807322800\nI0821 08:59:08.981694 32502 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise\nI0821 08:59:08.981706 32502 net.cpp:100] Creating Layer L1_b11_sum_eltwise\nI0821 08:59:08.981712 32502 net.cpp:434] L1_b11_sum_eltwise <- L1_b11_cbr2_bn_top\nI0821 08:59:08.981719 32502 net.cpp:434] L1_b11_sum_eltwise <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0821 08:59:08.981727 32502 net.cpp:408] L1_b11_sum_eltwise -> L1_b11_sum_eltwise_top\nI0821 08:59:08.981771 32502 net.cpp:150] Setting up L1_b11_sum_eltwise\nI0821 08:59:08.981784 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.981789 32502 net.cpp:165] Memory required for data: 813876400\nI0821 08:59:08.981794 32502 layer_factory.hpp:77] Creating layer L1_b11_relu\nI0821 08:59:08.981802 32502 net.cpp:100] Creating Layer L1_b11_relu\nI0821 08:59:08.981807 32502 net.cpp:434] L1_b11_relu <- L1_b11_sum_eltwise_top\nI0821 08:59:08.981817 32502 net.cpp:395] L1_b11_relu -> L1_b11_sum_eltwise_top (in-place)\nI0821 08:59:08.981827 32502 net.cpp:150] Setting up L1_b11_relu\nI0821 08:59:08.981834 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.981839 32502 net.cpp:165] Memory required for data: 820430000\nI0821 08:59:08.981844 32502 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:08.981850 32502 net.cpp:100] Creating Layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:08.981856 32502 net.cpp:434] L1_b11_sum_eltwise_top_L1_b11_relu_0_split <- L1_b11_sum_eltwise_top\nI0821 08:59:08.981868 32502 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0821 08:59:08.981878 32502 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0821 08:59:08.981927 32502 net.cpp:150] Setting up L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0821 08:59:08.981940 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.981945 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.981950 32502 net.cpp:165] Memory required for data: 833537200\nI0821 08:59:08.981962 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_conv\nI0821 08:59:08.981976 32502 net.cpp:100] Creating Layer L1_b12_cbr1_conv\nI0821 08:59:08.981983 32502 net.cpp:434] L1_b12_cbr1_conv <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0821 08:59:08.981992 32502 net.cpp:408] L1_b12_cbr1_conv -> L1_b12_cbr1_conv_top\nI0821 08:59:08.982362 32502 net.cpp:150] Setting up L1_b12_cbr1_conv\nI0821 08:59:08.982376 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.982381 32502 net.cpp:165] Memory required for data: 840090800\nI0821 08:59:08.982390 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_bn\nI0821 08:59:08.982403 32502 net.cpp:100] Creating Layer L1_b12_cbr1_bn\nI0821 08:59:08.982409 32502 net.cpp:434] L1_b12_cbr1_bn <- L1_b12_cbr1_conv_top\nI0821 08:59:08.982419 32502 net.cpp:408] L1_b12_cbr1_bn -> L1_b12_cbr1_bn_top\nI0821 08:59:08.982702 32502 net.cpp:150] Setting up L1_b12_cbr1_bn\nI0821 08:59:08.982717 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.982722 32502 net.cpp:165] Memory required for data: 846644400\nI0821 08:59:08.982731 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0821 08:59:08.982740 32502 net.cpp:100] Creating Layer L1_b12_cbr1_scale\nI0821 08:59:08.982753 32502 net.cpp:434] L1_b12_cbr1_scale <- L1_b12_cbr1_bn_top\nI0821 08:59:08.982760 32502 net.cpp:395] L1_b12_cbr1_scale -> L1_b12_cbr1_bn_top (in-place)\nI0821 08:59:08.982826 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0821 08:59:08.982990 32502 net.cpp:150] Setting up L1_b12_cbr1_scale\nI0821 08:59:08.983003 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.983008 32502 net.cpp:165] Memory required for data: 853198000\nI0821 08:59:08.983017 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr1_relu\nI0821 08:59:08.983028 32502 net.cpp:100] Creating Layer L1_b12_cbr1_relu\nI0821 08:59:08.983034 32502 net.cpp:434] L1_b12_cbr1_relu <- L1_b12_cbr1_bn_top\nI0821 08:59:08.983042 32502 net.cpp:395] L1_b12_cbr1_relu -> L1_b12_cbr1_bn_top (in-place)\nI0821 08:59:08.983052 32502 net.cpp:150] Setting up L1_b12_cbr1_relu\nI0821 08:59:08.983058 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.983062 32502 net.cpp:165] Memory required for data: 859751600\nI0821 08:59:08.983067 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr2_conv\nI0821 08:59:08.983080 32502 net.cpp:100] Creating Layer L1_b12_cbr2_conv\nI0821 08:59:08.983086 32502 net.cpp:434] L1_b12_cbr2_conv <- L1_b12_cbr1_bn_top\nI0821 08:59:08.983098 32502 net.cpp:408] L1_b12_cbr2_conv -> L1_b12_cbr2_conv_top\nI0821 08:59:08.983458 32502 net.cpp:150] Setting up L1_b12_cbr2_conv\nI0821 08:59:08.983471 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.983476 32502 net.cpp:165] Memory required for data: 866305200\nI0821 08:59:08.983485 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr2_bn\nI0821 08:59:08.983497 32502 net.cpp:100] Creating Layer L1_b12_cbr2_bn\nI0821 08:59:08.983505 32502 net.cpp:434] L1_b12_cbr2_bn <- L1_b12_cbr2_conv_top\nI0821 08:59:08.983512 32502 net.cpp:408] L1_b12_cbr2_bn -> L1_b12_cbr2_bn_top\nI0821 08:59:08.983808 32502 net.cpp:150] Setting up L1_b12_cbr2_bn\nI0821 08:59:08.983820 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.983826 32502 net.cpp:165] Memory required for data: 872858800\nI0821 08:59:08.983836 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0821 08:59:08.983845 32502 net.cpp:100] Creating Layer L1_b12_cbr2_scale\nI0821 08:59:08.983851 32502 net.cpp:434] L1_b12_cbr2_scale <- L1_b12_cbr2_bn_top\nI0821 08:59:08.983860 32502 net.cpp:395] L1_b12_cbr2_scale -> L1_b12_cbr2_bn_top (in-place)\nI0821 08:59:08.983922 32502 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0821 08:59:08.984091 32502 net.cpp:150] Setting up L1_b12_cbr2_scale\nI0821 08:59:08.984104 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.984109 32502 net.cpp:165] Memory required for data: 879412400\nI0821 08:59:08.984118 32502 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise\nI0821 08:59:08.984134 32502 net.cpp:100] Creating Layer L1_b12_sum_eltwise\nI0821 08:59:08.984140 32502 net.cpp:434] L1_b12_sum_eltwise <- L1_b12_cbr2_bn_top\nI0821 08:59:08.984148 32502 net.cpp:434] L1_b12_sum_eltwise <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0821 08:59:08.984158 32502 net.cpp:408] L1_b12_sum_eltwise -> L1_b12_sum_eltwise_top\nI0821 08:59:08.984195 32502 net.cpp:150] Setting up L1_b12_sum_eltwise\nI0821 08:59:08.984206 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.984211 32502 net.cpp:165] Memory required for data: 885966000\nI0821 08:59:08.984217 32502 layer_factory.hpp:77] Creating layer L1_b12_relu\nI0821 08:59:08.984227 32502 net.cpp:100] Creating Layer L1_b12_relu\nI0821 08:59:08.984235 32502 net.cpp:434] L1_b12_relu <- L1_b12_sum_eltwise_top\nI0821 08:59:08.984241 32502 net.cpp:395] L1_b12_relu -> L1_b12_sum_eltwise_top (in-place)\nI0821 08:59:08.984251 32502 net.cpp:150] Setting up L1_b12_relu\nI0821 08:59:08.984257 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.984261 32502 net.cpp:165] Memory required for data: 892519600\nI0821 08:59:08.984266 32502 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:08.984273 32502 net.cpp:100] Creating Layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:08.984278 32502 net.cpp:434] L1_b12_sum_eltwise_top_L1_b12_relu_0_split <- L1_b12_sum_eltwise_top\nI0821 08:59:08.984285 32502 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0821 08:59:08.984294 32502 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0821 08:59:08.984347 32502 net.cpp:150] Setting up L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0821 08:59:08.984359 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.984365 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.984370 32502 net.cpp:165] Memory required for data: 905626800\nI0821 08:59:08.984375 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_conv\nI0821 08:59:08.984388 32502 net.cpp:100] Creating Layer L1_b13_cbr1_conv\nI0821 08:59:08.984395 32502 net.cpp:434] L1_b13_cbr1_conv <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0821 08:59:08.984405 32502 net.cpp:408] L1_b13_cbr1_conv -> L1_b13_cbr1_conv_top\nI0821 08:59:08.984779 32502 net.cpp:150] Setting up L1_b13_cbr1_conv\nI0821 08:59:08.984794 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.984799 32502 net.cpp:165] Memory required for data: 912180400\nI0821 08:59:08.984808 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_bn\nI0821 08:59:08.984833 32502 net.cpp:100] Creating Layer L1_b13_cbr1_bn\nI0821 08:59:08.984839 32502 net.cpp:434] L1_b13_cbr1_bn <- L1_b13_cbr1_conv_top\nI0821 08:59:08.984848 32502 net.cpp:408] L1_b13_cbr1_bn -> L1_b13_cbr1_bn_top\nI0821 08:59:08.985136 32502 net.cpp:150] Setting up L1_b13_cbr1_bn\nI0821 08:59:08.985152 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.985157 32502 net.cpp:165] Memory required for data: 918734000\nI0821 08:59:08.985168 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0821 08:59:08.985177 32502 net.cpp:100] Creating Layer L1_b13_cbr1_scale\nI0821 08:59:08.985183 32502 net.cpp:434] L1_b13_cbr1_scale <- L1_b13_cbr1_bn_top\nI0821 08:59:08.985191 32502 net.cpp:395] L1_b13_cbr1_scale -> L1_b13_cbr1_bn_top (in-place)\nI0821 08:59:08.985251 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0821 08:59:08.985419 32502 net.cpp:150] Setting up L1_b13_cbr1_scale\nI0821 08:59:08.985432 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.985437 32502 net.cpp:165] Memory required for data: 925287600\nI0821 08:59:08.985446 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr1_relu\nI0821 08:59:08.985457 32502 net.cpp:100] Creating Layer L1_b13_cbr1_relu\nI0821 08:59:08.985463 32502 net.cpp:434] L1_b13_cbr1_relu <- L1_b13_cbr1_bn_top\nI0821 08:59:08.985471 32502 net.cpp:395] L1_b13_cbr1_relu -> L1_b13_cbr1_bn_top (in-place)\nI0821 08:59:08.985481 32502 net.cpp:150] Setting up L1_b13_cbr1_relu\nI0821 08:59:08.985496 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.985502 32502 net.cpp:165] Memory required for data: 931841200\nI0821 08:59:08.985507 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr2_conv\nI0821 08:59:08.985517 32502 net.cpp:100] Creating Layer L1_b13_cbr2_conv\nI0821 08:59:08.985523 32502 net.cpp:434] L1_b13_cbr2_conv <- L1_b13_cbr1_bn_top\nI0821 08:59:08.985534 32502 net.cpp:408] L1_b13_cbr2_conv -> L1_b13_cbr2_conv_top\nI0821 08:59:08.985910 32502 net.cpp:150] Setting up L1_b13_cbr2_conv\nI0821 08:59:08.985925 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.985930 32502 net.cpp:165] Memory required for data: 938394800\nI0821 08:59:08.985939 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr2_bn\nI0821 08:59:08.985947 32502 net.cpp:100] Creating Layer L1_b13_cbr2_bn\nI0821 08:59:08.985954 32502 net.cpp:434] L1_b13_cbr2_bn <- L1_b13_cbr2_conv_top\nI0821 08:59:08.985965 32502 net.cpp:408] L1_b13_cbr2_bn -> L1_b13_cbr2_bn_top\nI0821 08:59:08.987267 32502 net.cpp:150] Setting up L1_b13_cbr2_bn\nI0821 08:59:08.987284 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.987289 32502 net.cpp:165] Memory required for data: 944948400\nI0821 08:59:08.987301 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0821 08:59:08.987310 32502 net.cpp:100] Creating Layer L1_b13_cbr2_scale\nI0821 08:59:08.987318 32502 net.cpp:434] L1_b13_cbr2_scale <- L1_b13_cbr2_bn_top\nI0821 08:59:08.987329 32502 net.cpp:395] L1_b13_cbr2_scale -> L1_b13_cbr2_bn_top (in-place)\nI0821 08:59:08.987392 32502 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0821 08:59:08.987558 32502 net.cpp:150] Setting up L1_b13_cbr2_scale\nI0821 08:59:08.987574 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.987579 32502 net.cpp:165] Memory required for data: 951502000\nI0821 08:59:08.987589 32502 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise\nI0821 08:59:08.987598 32502 net.cpp:100] Creating Layer L1_b13_sum_eltwise\nI0821 08:59:08.987604 32502 net.cpp:434] L1_b13_sum_eltwise <- L1_b13_cbr2_bn_top\nI0821 08:59:08.987612 32502 net.cpp:434] L1_b13_sum_eltwise <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0821 08:59:08.987619 32502 net.cpp:408] L1_b13_sum_eltwise -> L1_b13_sum_eltwise_top\nI0821 08:59:08.987660 32502 net.cpp:150] Setting up L1_b13_sum_eltwise\nI0821 08:59:08.987673 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.987678 32502 net.cpp:165] Memory required for data: 958055600\nI0821 08:59:08.987682 32502 layer_factory.hpp:77] Creating layer L1_b13_relu\nI0821 08:59:08.987694 32502 net.cpp:100] Creating Layer L1_b13_relu\nI0821 08:59:08.987699 32502 net.cpp:434] L1_b13_relu <- L1_b13_sum_eltwise_top\nI0821 08:59:08.987706 32502 net.cpp:395] L1_b13_relu -> L1_b13_sum_eltwise_top (in-place)\nI0821 08:59:08.987716 32502 net.cpp:150] Setting up L1_b13_relu\nI0821 08:59:08.987723 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.987727 32502 net.cpp:165] Memory required for data: 964609200\nI0821 08:59:08.987732 32502 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:08.987741 32502 net.cpp:100] Creating Layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:08.987754 32502 net.cpp:434] L1_b13_sum_eltwise_top_L1_b13_relu_0_split <- L1_b13_sum_eltwise_top\nI0821 08:59:08.987762 32502 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0821 08:59:08.987772 32502 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0821 08:59:08.987823 32502 net.cpp:150] Setting up L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0821 08:59:08.987838 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.987845 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.987849 32502 net.cpp:165] Memory required for data: 977716400\nI0821 08:59:08.987854 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_conv\nI0821 08:59:08.987866 32502 net.cpp:100] Creating Layer L1_b14_cbr1_conv\nI0821 08:59:08.987881 32502 net.cpp:434] L1_b14_cbr1_conv <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0821 08:59:08.987891 32502 net.cpp:408] L1_b14_cbr1_conv -> L1_b14_cbr1_conv_top\nI0821 08:59:08.988265 32502 net.cpp:150] Setting up L1_b14_cbr1_conv\nI0821 08:59:08.988279 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.988284 32502 net.cpp:165] Memory required for data: 984270000\nI0821 08:59:08.988293 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_bn\nI0821 08:59:08.988306 32502 net.cpp:100] Creating Layer L1_b14_cbr1_bn\nI0821 08:59:08.988312 32502 net.cpp:434] L1_b14_cbr1_bn <- L1_b14_cbr1_conv_top\nI0821 08:59:08.988320 32502 net.cpp:408] L1_b14_cbr1_bn -> L1_b14_cbr1_bn_top\nI0821 08:59:08.988596 32502 net.cpp:150] Setting up L1_b14_cbr1_bn\nI0821 08:59:08.988610 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.988615 32502 net.cpp:165] Memory required for data: 990823600\nI0821 08:59:08.988626 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0821 08:59:08.988634 32502 net.cpp:100] Creating Layer L1_b14_cbr1_scale\nI0821 08:59:08.988641 32502 net.cpp:434] L1_b14_cbr1_scale <- L1_b14_cbr1_bn_top\nI0821 08:59:08.988651 32502 net.cpp:395] L1_b14_cbr1_scale -> L1_b14_cbr1_bn_top (in-place)\nI0821 08:59:08.988714 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0821 08:59:08.988883 32502 net.cpp:150] Setting up L1_b14_cbr1_scale\nI0821 08:59:08.988898 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.988903 32502 net.cpp:165] Memory required for data: 997377200\nI0821 08:59:08.988911 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr1_relu\nI0821 08:59:08.988919 32502 net.cpp:100] Creating Layer L1_b14_cbr1_relu\nI0821 08:59:08.988925 32502 net.cpp:434] L1_b14_cbr1_relu <- L1_b14_cbr1_bn_top\nI0821 08:59:08.988934 32502 net.cpp:395] L1_b14_cbr1_relu -> L1_b14_cbr1_bn_top (in-place)\nI0821 08:59:08.988942 32502 net.cpp:150] Setting up L1_b14_cbr1_relu\nI0821 08:59:08.988950 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.988955 32502 net.cpp:165] Memory required for data: 1003930800\nI0821 08:59:08.988960 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr2_conv\nI0821 08:59:08.988973 32502 net.cpp:100] Creating Layer L1_b14_cbr2_conv\nI0821 08:59:08.988979 32502 net.cpp:434] L1_b14_cbr2_conv <- L1_b14_cbr1_bn_top\nI0821 08:59:08.988991 32502 net.cpp:408] L1_b14_cbr2_conv -> L1_b14_cbr2_conv_top\nI0821 08:59:08.989354 32502 net.cpp:150] Setting up L1_b14_cbr2_conv\nI0821 08:59:08.989368 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.989373 32502 net.cpp:165] Memory required for data: 1010484400\nI0821 08:59:08.989382 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr2_bn\nI0821 08:59:08.989395 32502 net.cpp:100] Creating Layer L1_b14_cbr2_bn\nI0821 08:59:08.989401 32502 net.cpp:434] L1_b14_cbr2_bn <- L1_b14_cbr2_conv_top\nI0821 08:59:08.989416 32502 net.cpp:408] L1_b14_cbr2_bn -> L1_b14_cbr2_bn_top\nI0821 08:59:08.989698 32502 net.cpp:150] Setting up L1_b14_cbr2_bn\nI0821 08:59:08.989711 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.989717 32502 net.cpp:165] Memory required for data: 1017038000\nI0821 08:59:08.989727 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0821 08:59:08.989735 32502 net.cpp:100] Creating Layer L1_b14_cbr2_scale\nI0821 08:59:08.989748 32502 net.cpp:434] L1_b14_cbr2_scale <- L1_b14_cbr2_bn_top\nI0821 08:59:08.989759 32502 net.cpp:395] L1_b14_cbr2_scale -> L1_b14_cbr2_bn_top (in-place)\nI0821 08:59:08.989821 32502 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0821 08:59:08.989979 32502 net.cpp:150] Setting up L1_b14_cbr2_scale\nI0821 08:59:08.989995 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.990000 32502 net.cpp:165] Memory required for data: 1023591600\nI0821 08:59:08.990008 32502 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise\nI0821 08:59:08.990017 32502 net.cpp:100] Creating Layer L1_b14_sum_eltwise\nI0821 08:59:08.990025 32502 net.cpp:434] L1_b14_sum_eltwise <- L1_b14_cbr2_bn_top\nI0821 08:59:08.990037 32502 net.cpp:434] L1_b14_sum_eltwise <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0821 08:59:08.990046 32502 net.cpp:408] L1_b14_sum_eltwise -> L1_b14_sum_eltwise_top\nI0821 08:59:08.990085 32502 net.cpp:150] Setting up L1_b14_sum_eltwise\nI0821 08:59:08.990095 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.990100 32502 net.cpp:165] Memory required for data: 1030145200\nI0821 08:59:08.990105 32502 layer_factory.hpp:77] Creating layer L1_b14_relu\nI0821 08:59:08.990113 32502 net.cpp:100] Creating Layer L1_b14_relu\nI0821 08:59:08.990123 32502 net.cpp:434] L1_b14_relu <- L1_b14_sum_eltwise_top\nI0821 08:59:08.990129 32502 net.cpp:395] L1_b14_relu -> L1_b14_sum_eltwise_top (in-place)\nI0821 08:59:08.990139 32502 net.cpp:150] Setting up L1_b14_relu\nI0821 08:59:08.990145 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.990150 32502 net.cpp:165] Memory required for data: 1036698800\nI0821 08:59:08.990154 32502 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:08.990161 32502 net.cpp:100] Creating Layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:08.990167 32502 net.cpp:434] L1_b14_sum_eltwise_top_L1_b14_relu_0_split <- L1_b14_sum_eltwise_top\nI0821 08:59:08.990177 32502 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0821 08:59:08.990187 32502 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0821 08:59:08.990236 32502 net.cpp:150] Setting up L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0821 08:59:08.990247 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.990254 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.990258 32502 net.cpp:165] Memory required for data: 1049806000\nI0821 08:59:08.990263 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_conv\nI0821 08:59:08.990279 32502 net.cpp:100] Creating Layer L1_b15_cbr1_conv\nI0821 08:59:08.990286 32502 net.cpp:434] L1_b15_cbr1_conv <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0821 08:59:08.990295 32502 net.cpp:408] L1_b15_cbr1_conv -> L1_b15_cbr1_conv_top\nI0821 08:59:08.990655 32502 net.cpp:150] Setting up L1_b15_cbr1_conv\nI0821 08:59:08.990669 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.990674 32502 net.cpp:165] Memory required for data: 1056359600\nI0821 08:59:08.990684 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_bn\nI0821 08:59:08.990696 32502 net.cpp:100] Creating Layer L1_b15_cbr1_bn\nI0821 08:59:08.990703 32502 net.cpp:434] L1_b15_cbr1_bn <- L1_b15_cbr1_conv_top\nI0821 08:59:08.990711 32502 net.cpp:408] L1_b15_cbr1_bn -> L1_b15_cbr1_bn_top\nI0821 08:59:08.990996 32502 net.cpp:150] Setting up L1_b15_cbr1_bn\nI0821 08:59:08.991010 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.991015 32502 net.cpp:165] Memory required for data: 1062913200\nI0821 08:59:08.991026 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0821 08:59:08.991034 32502 net.cpp:100] Creating Layer L1_b15_cbr1_scale\nI0821 08:59:08.991041 32502 net.cpp:434] L1_b15_cbr1_scale <- L1_b15_cbr1_bn_top\nI0821 08:59:08.991055 32502 net.cpp:395] L1_b15_cbr1_scale -> L1_b15_cbr1_bn_top (in-place)\nI0821 08:59:08.991117 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0821 08:59:08.991272 32502 net.cpp:150] Setting up L1_b15_cbr1_scale\nI0821 08:59:08.991288 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.991293 32502 net.cpp:165] Memory required for data: 1069466800\nI0821 08:59:08.991302 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr1_relu\nI0821 08:59:08.991312 32502 net.cpp:100] Creating Layer L1_b15_cbr1_relu\nI0821 08:59:08.991317 32502 net.cpp:434] L1_b15_cbr1_relu <- L1_b15_cbr1_bn_top\nI0821 08:59:08.991324 32502 net.cpp:395] L1_b15_cbr1_relu -> L1_b15_cbr1_bn_top (in-place)\nI0821 08:59:08.991334 32502 net.cpp:150] Setting up L1_b15_cbr1_relu\nI0821 08:59:08.991341 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.991345 32502 net.cpp:165] Memory required for data: 1076020400\nI0821 08:59:08.991356 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr2_conv\nI0821 08:59:08.991371 32502 net.cpp:100] Creating Layer L1_b15_cbr2_conv\nI0821 08:59:08.991377 32502 net.cpp:434] L1_b15_cbr2_conv <- L1_b15_cbr1_bn_top\nI0821 08:59:08.991389 32502 net.cpp:408] L1_b15_cbr2_conv -> L1_b15_cbr2_conv_top\nI0821 08:59:08.991770 32502 net.cpp:150] Setting up L1_b15_cbr2_conv\nI0821 08:59:08.991785 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.991789 32502 net.cpp:165] Memory required for data: 1082574000\nI0821 08:59:08.991798 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr2_bn\nI0821 08:59:08.991811 32502 net.cpp:100] Creating Layer L1_b15_cbr2_bn\nI0821 08:59:08.991817 32502 net.cpp:434] L1_b15_cbr2_bn <- L1_b15_cbr2_conv_top\nI0821 08:59:08.991828 32502 net.cpp:408] L1_b15_cbr2_bn -> L1_b15_cbr2_bn_top\nI0821 08:59:08.992110 32502 net.cpp:150] Setting up L1_b15_cbr2_bn\nI0821 08:59:08.992123 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.992128 32502 net.cpp:165] Memory required for data: 1089127600\nI0821 08:59:08.992139 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0821 08:59:08.992148 32502 net.cpp:100] Creating Layer L1_b15_cbr2_scale\nI0821 08:59:08.992154 32502 net.cpp:434] L1_b15_cbr2_scale <- L1_b15_cbr2_bn_top\nI0821 08:59:08.992161 32502 net.cpp:395] L1_b15_cbr2_scale -> L1_b15_cbr2_bn_top (in-place)\nI0821 08:59:08.992225 32502 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0821 08:59:08.993388 32502 net.cpp:150] Setting up L1_b15_cbr2_scale\nI0821 08:59:08.993405 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.993412 32502 net.cpp:165] Memory required for data: 1095681200\nI0821 08:59:08.993422 32502 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise\nI0821 08:59:08.993430 32502 net.cpp:100] Creating Layer L1_b15_sum_eltwise\nI0821 08:59:08.993438 32502 net.cpp:434] L1_b15_sum_eltwise <- L1_b15_cbr2_bn_top\nI0821 08:59:08.993444 32502 net.cpp:434] L1_b15_sum_eltwise <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0821 08:59:08.993456 32502 net.cpp:408] L1_b15_sum_eltwise -> L1_b15_sum_eltwise_top\nI0821 08:59:08.993495 32502 net.cpp:150] Setting up L1_b15_sum_eltwise\nI0821 08:59:08.993507 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.993512 32502 net.cpp:165] Memory required for data: 1102234800\nI0821 08:59:08.993517 32502 layer_factory.hpp:77] Creating layer L1_b15_relu\nI0821 08:59:08.993525 32502 net.cpp:100] Creating Layer L1_b15_relu\nI0821 08:59:08.993531 32502 net.cpp:434] L1_b15_relu <- L1_b15_sum_eltwise_top\nI0821 08:59:08.993538 32502 net.cpp:395] L1_b15_relu -> L1_b15_sum_eltwise_top (in-place)\nI0821 08:59:08.993551 32502 net.cpp:150] Setting up L1_b15_relu\nI0821 08:59:08.993558 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.993563 32502 net.cpp:165] Memory required for data: 1108788400\nI0821 08:59:08.993568 32502 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:08.993574 32502 net.cpp:100] Creating Layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:08.993580 32502 net.cpp:434] L1_b15_sum_eltwise_top_L1_b15_relu_0_split <- L1_b15_sum_eltwise_top\nI0821 08:59:08.993587 32502 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0821 08:59:08.993597 32502 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0821 08:59:08.993649 32502 net.cpp:150] Setting up L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0821 08:59:08.993661 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.993667 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.993672 32502 net.cpp:165] Memory required for data: 1121895600\nI0821 08:59:08.993677 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_conv\nI0821 08:59:08.993688 32502 net.cpp:100] Creating Layer L1_b16_cbr1_conv\nI0821 08:59:08.993695 32502 net.cpp:434] L1_b16_cbr1_conv <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0821 08:59:08.993715 32502 net.cpp:408] L1_b16_cbr1_conv -> L1_b16_cbr1_conv_top\nI0821 08:59:08.994094 32502 net.cpp:150] Setting up L1_b16_cbr1_conv\nI0821 08:59:08.994109 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.994114 32502 net.cpp:165] Memory required for data: 1128449200\nI0821 08:59:08.994123 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_bn\nI0821 08:59:08.994133 32502 net.cpp:100] Creating Layer L1_b16_cbr1_bn\nI0821 08:59:08.994140 32502 net.cpp:434] L1_b16_cbr1_bn <- L1_b16_cbr1_conv_top\nI0821 08:59:08.994153 32502 net.cpp:408] L1_b16_cbr1_bn -> L1_b16_cbr1_bn_top\nI0821 08:59:08.994431 32502 net.cpp:150] Setting up L1_b16_cbr1_bn\nI0821 08:59:08.994443 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.994448 32502 net.cpp:165] Memory required for data: 1135002800\nI0821 08:59:08.994459 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0821 08:59:08.994470 32502 net.cpp:100] Creating Layer L1_b16_cbr1_scale\nI0821 08:59:08.994477 32502 net.cpp:434] L1_b16_cbr1_scale <- L1_b16_cbr1_bn_top\nI0821 08:59:08.994485 32502 net.cpp:395] L1_b16_cbr1_scale -> L1_b16_cbr1_bn_top (in-place)\nI0821 08:59:08.994549 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0821 08:59:08.994710 32502 net.cpp:150] Setting up L1_b16_cbr1_scale\nI0821 08:59:08.994724 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.994729 32502 net.cpp:165] Memory required for data: 1141556400\nI0821 08:59:08.994738 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr1_relu\nI0821 08:59:08.994752 32502 net.cpp:100] Creating Layer L1_b16_cbr1_relu\nI0821 08:59:08.994760 32502 net.cpp:434] L1_b16_cbr1_relu <- L1_b16_cbr1_bn_top\nI0821 08:59:08.994773 32502 net.cpp:395] L1_b16_cbr1_relu -> L1_b16_cbr1_bn_top (in-place)\nI0821 08:59:08.994784 32502 net.cpp:150] Setting up L1_b16_cbr1_relu\nI0821 08:59:08.994791 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.994796 32502 net.cpp:165] Memory required for data: 1148110000\nI0821 08:59:08.994801 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr2_conv\nI0821 08:59:08.994815 32502 net.cpp:100] Creating Layer L1_b16_cbr2_conv\nI0821 08:59:08.994822 32502 net.cpp:434] L1_b16_cbr2_conv <- L1_b16_cbr1_bn_top\nI0821 08:59:08.994829 32502 net.cpp:408] L1_b16_cbr2_conv -> L1_b16_cbr2_conv_top\nI0821 08:59:08.995187 32502 net.cpp:150] Setting up L1_b16_cbr2_conv\nI0821 08:59:08.995201 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.995206 32502 net.cpp:165] Memory required for data: 1154663600\nI0821 08:59:08.995215 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr2_bn\nI0821 08:59:08.995227 32502 net.cpp:100] Creating Layer L1_b16_cbr2_bn\nI0821 08:59:08.995234 32502 net.cpp:434] L1_b16_cbr2_bn <- L1_b16_cbr2_conv_top\nI0821 08:59:08.995242 32502 net.cpp:408] L1_b16_cbr2_bn -> L1_b16_cbr2_bn_top\nI0821 08:59:08.995517 32502 net.cpp:150] Setting up L1_b16_cbr2_bn\nI0821 08:59:08.995529 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.995534 32502 net.cpp:165] Memory required for data: 1161217200\nI0821 08:59:08.995545 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0821 08:59:08.995558 32502 net.cpp:100] Creating Layer L1_b16_cbr2_scale\nI0821 08:59:08.995565 32502 net.cpp:434] L1_b16_cbr2_scale <- L1_b16_cbr2_bn_top\nI0821 08:59:08.995573 32502 net.cpp:395] L1_b16_cbr2_scale -> L1_b16_cbr2_bn_top (in-place)\nI0821 08:59:08.995632 32502 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0821 08:59:08.995797 32502 net.cpp:150] Setting up L1_b16_cbr2_scale\nI0821 08:59:08.995811 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.995816 32502 net.cpp:165] Memory required for data: 1167770800\nI0821 08:59:08.995826 32502 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise\nI0821 08:59:08.995834 32502 net.cpp:100] Creating Layer L1_b16_sum_eltwise\nI0821 08:59:08.995841 32502 net.cpp:434] L1_b16_sum_eltwise <- L1_b16_cbr2_bn_top\nI0821 08:59:08.995848 32502 net.cpp:434] L1_b16_sum_eltwise <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0821 08:59:08.995867 32502 net.cpp:408] L1_b16_sum_eltwise -> L1_b16_sum_eltwise_top\nI0821 08:59:08.995903 32502 net.cpp:150] Setting up L1_b16_sum_eltwise\nI0821 08:59:08.995915 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.995920 32502 net.cpp:165] Memory required for data: 1174324400\nI0821 08:59:08.995925 32502 layer_factory.hpp:77] Creating layer L1_b16_relu\nI0821 08:59:08.995934 32502 net.cpp:100] Creating Layer L1_b16_relu\nI0821 08:59:08.995939 32502 net.cpp:434] L1_b16_relu <- L1_b16_sum_eltwise_top\nI0821 08:59:08.995946 32502 net.cpp:395] L1_b16_relu -> L1_b16_sum_eltwise_top (in-place)\nI0821 08:59:08.995955 32502 net.cpp:150] Setting up L1_b16_relu\nI0821 08:59:08.995962 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.995966 32502 net.cpp:165] Memory required for data: 1180878000\nI0821 08:59:08.995971 32502 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:08.995981 32502 net.cpp:100] Creating Layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:08.995986 32502 net.cpp:434] L1_b16_sum_eltwise_top_L1_b16_relu_0_split <- L1_b16_sum_eltwise_top\nI0821 08:59:08.995995 32502 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0821 08:59:08.996003 32502 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0821 08:59:08.996054 32502 net.cpp:150] Setting up L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0821 08:59:08.996067 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.996073 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.996078 32502 net.cpp:165] Memory required for data: 1193985200\nI0821 08:59:08.996083 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_conv\nI0821 08:59:08.996094 32502 net.cpp:100] Creating Layer L1_b17_cbr1_conv\nI0821 08:59:08.996100 32502 net.cpp:434] L1_b17_cbr1_conv <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0821 08:59:08.996112 32502 net.cpp:408] L1_b17_cbr1_conv -> L1_b17_cbr1_conv_top\nI0821 08:59:08.996474 32502 net.cpp:150] Setting up L1_b17_cbr1_conv\nI0821 08:59:08.996489 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.996493 32502 net.cpp:165] Memory required for data: 1200538800\nI0821 08:59:08.996502 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_bn\nI0821 08:59:08.996511 32502 net.cpp:100] Creating Layer L1_b17_cbr1_bn\nI0821 08:59:08.996518 32502 net.cpp:434] L1_b17_cbr1_bn <- L1_b17_cbr1_conv_top\nI0821 08:59:08.996526 32502 net.cpp:408] L1_b17_cbr1_bn -> L1_b17_cbr1_bn_top\nI0821 08:59:08.996817 32502 net.cpp:150] Setting up L1_b17_cbr1_bn\nI0821 08:59:08.996830 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.996835 32502 net.cpp:165] Memory required for data: 1207092400\nI0821 08:59:08.996846 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0821 08:59:08.996858 32502 net.cpp:100] Creating Layer L1_b17_cbr1_scale\nI0821 08:59:08.996865 32502 net.cpp:434] L1_b17_cbr1_scale <- L1_b17_cbr1_bn_top\nI0821 08:59:08.996872 32502 net.cpp:395] L1_b17_cbr1_scale -> L1_b17_cbr1_bn_top (in-place)\nI0821 08:59:08.996937 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0821 08:59:08.997094 32502 net.cpp:150] Setting up L1_b17_cbr1_scale\nI0821 08:59:08.997107 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.997112 32502 net.cpp:165] Memory required for data: 1213646000\nI0821 08:59:08.997123 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr1_relu\nI0821 08:59:08.997130 32502 net.cpp:100] Creating Layer L1_b17_cbr1_relu\nI0821 08:59:08.997136 32502 net.cpp:434] L1_b17_cbr1_relu <- L1_b17_cbr1_bn_top\nI0821 08:59:08.997146 32502 net.cpp:395] L1_b17_cbr1_relu -> L1_b17_cbr1_bn_top (in-place)\nI0821 08:59:08.997156 32502 net.cpp:150] Setting up L1_b17_cbr1_relu\nI0821 08:59:08.997164 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.997169 32502 net.cpp:165] Memory required for data: 1220199600\nI0821 08:59:08.997174 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr2_conv\nI0821 08:59:08.997195 32502 net.cpp:100] Creating Layer L1_b17_cbr2_conv\nI0821 08:59:08.997200 32502 net.cpp:434] L1_b17_cbr2_conv <- L1_b17_cbr1_bn_top\nI0821 08:59:08.997210 32502 net.cpp:408] L1_b17_cbr2_conv -> L1_b17_cbr2_conv_top\nI0821 08:59:08.997570 32502 net.cpp:150] Setting up L1_b17_cbr2_conv\nI0821 08:59:08.997584 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.997589 32502 net.cpp:165] Memory required for data: 1226753200\nI0821 08:59:08.997597 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr2_bn\nI0821 08:59:08.997609 32502 net.cpp:100] Creating Layer L1_b17_cbr2_bn\nI0821 08:59:08.997617 32502 net.cpp:434] L1_b17_cbr2_bn <- L1_b17_cbr2_conv_top\nI0821 08:59:08.997624 32502 net.cpp:408] L1_b17_cbr2_bn -> L1_b17_cbr2_bn_top\nI0821 08:59:08.997907 32502 net.cpp:150] Setting up L1_b17_cbr2_bn\nI0821 08:59:08.997921 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.997926 32502 net.cpp:165] Memory required for data: 1233306800\nI0821 08:59:08.997937 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0821 08:59:08.997946 32502 net.cpp:100] Creating Layer L1_b17_cbr2_scale\nI0821 08:59:08.997952 32502 net.cpp:434] L1_b17_cbr2_scale <- L1_b17_cbr2_bn_top\nI0821 08:59:08.997963 32502 net.cpp:395] L1_b17_cbr2_scale -> L1_b17_cbr2_bn_top (in-place)\nI0821 08:59:08.998024 32502 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0821 08:59:08.998188 32502 net.cpp:150] Setting up L1_b17_cbr2_scale\nI0821 08:59:08.998201 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.998206 32502 net.cpp:165] Memory required for data: 1239860400\nI0821 08:59:08.998215 32502 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise\nI0821 08:59:08.998224 32502 net.cpp:100] Creating Layer L1_b17_sum_eltwise\nI0821 08:59:08.998231 32502 net.cpp:434] L1_b17_sum_eltwise <- L1_b17_cbr2_bn_top\nI0821 08:59:08.998239 32502 net.cpp:434] L1_b17_sum_eltwise <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0821 08:59:08.998248 32502 net.cpp:408] L1_b17_sum_eltwise -> L1_b17_sum_eltwise_top\nI0821 08:59:08.998283 32502 net.cpp:150] Setting up L1_b17_sum_eltwise\nI0821 08:59:08.998296 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.998301 32502 net.cpp:165] Memory required for data: 1246414000\nI0821 08:59:08.998306 32502 layer_factory.hpp:77] Creating layer L1_b17_relu\nI0821 08:59:08.998314 32502 net.cpp:100] Creating Layer L1_b17_relu\nI0821 08:59:08.998320 32502 net.cpp:434] L1_b17_relu <- L1_b17_sum_eltwise_top\nI0821 08:59:08.998327 32502 net.cpp:395] L1_b17_relu -> L1_b17_sum_eltwise_top (in-place)\nI0821 08:59:08.998337 32502 net.cpp:150] Setting up L1_b17_relu\nI0821 08:59:08.998343 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.998348 32502 net.cpp:165] Memory required for data: 1252967600\nI0821 08:59:08.998353 32502 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:08.998363 32502 net.cpp:100] Creating Layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:08.998368 32502 net.cpp:434] L1_b17_sum_eltwise_top_L1_b17_relu_0_split <- L1_b17_sum_eltwise_top\nI0821 08:59:08.998376 32502 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0821 08:59:08.998386 32502 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0821 08:59:08.998435 32502 net.cpp:150] Setting up L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0821 08:59:08.998448 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.998456 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.998459 32502 net.cpp:165] Memory required for data: 1266074800\nI0821 08:59:08.998464 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_conv\nI0821 08:59:08.998476 32502 net.cpp:100] Creating Layer L1_b18_cbr1_conv\nI0821 08:59:08.998482 32502 net.cpp:434] L1_b18_cbr1_conv <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0821 08:59:08.998492 32502 net.cpp:408] L1_b18_cbr1_conv -> L1_b18_cbr1_conv_top\nI0821 08:59:08.998863 32502 net.cpp:150] Setting up L1_b18_cbr1_conv\nI0821 08:59:08.998884 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.998889 32502 net.cpp:165] Memory required for data: 1272628400\nI0821 08:59:08.998898 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_bn\nI0821 08:59:08.998910 32502 net.cpp:100] Creating Layer L1_b18_cbr1_bn\nI0821 08:59:08.998917 32502 net.cpp:434] L1_b18_cbr1_bn <- L1_b18_cbr1_conv_top\nI0821 08:59:08.998925 32502 net.cpp:408] L1_b18_cbr1_bn -> L1_b18_cbr1_bn_top\nI0821 08:59:08.999227 32502 net.cpp:150] Setting up L1_b18_cbr1_bn\nI0821 08:59:08.999243 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.999248 32502 net.cpp:165] Memory required for data: 1279182000\nI0821 08:59:08.999258 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0821 08:59:08.999269 32502 net.cpp:100] Creating Layer L1_b18_cbr1_scale\nI0821 08:59:08.999274 32502 net.cpp:434] L1_b18_cbr1_scale <- L1_b18_cbr1_bn_top\nI0821 08:59:08.999285 32502 net.cpp:395] L1_b18_cbr1_scale -> L1_b18_cbr1_bn_top (in-place)\nI0821 08:59:08.999346 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0821 08:59:08.999505 32502 net.cpp:150] Setting up L1_b18_cbr1_scale\nI0821 08:59:08.999521 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.999526 32502 net.cpp:165] Memory required for data: 1285735600\nI0821 08:59:08.999534 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr1_relu\nI0821 08:59:08.999542 32502 net.cpp:100] Creating Layer L1_b18_cbr1_relu\nI0821 08:59:08.999549 32502 net.cpp:434] L1_b18_cbr1_relu <- L1_b18_cbr1_bn_top\nI0821 08:59:08.999557 32502 net.cpp:395] L1_b18_cbr1_relu -> L1_b18_cbr1_bn_top (in-place)\nI0821 08:59:08.999565 32502 net.cpp:150] Setting up L1_b18_cbr1_relu\nI0821 08:59:08.999572 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:08.999577 32502 net.cpp:165] Memory required for data: 1292289200\nI0821 08:59:08.999583 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr2_conv\nI0821 08:59:08.999595 32502 net.cpp:100] Creating Layer L1_b18_cbr2_conv\nI0821 08:59:08.999601 32502 net.cpp:434] L1_b18_cbr2_conv <- L1_b18_cbr1_bn_top\nI0821 08:59:08.999613 32502 net.cpp:408] L1_b18_cbr2_conv -> L1_b18_cbr2_conv_top\nI0821 08:59:08.999985 32502 net.cpp:150] Setting up L1_b18_cbr2_conv\nI0821 08:59:09.000000 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:09.000006 32502 net.cpp:165] Memory required for data: 1298842800\nI0821 08:59:09.000015 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr2_bn\nI0821 08:59:09.000030 32502 net.cpp:100] Creating Layer L1_b18_cbr2_bn\nI0821 08:59:09.000036 32502 net.cpp:434] L1_b18_cbr2_bn <- L1_b18_cbr2_conv_top\nI0821 08:59:09.000047 32502 net.cpp:408] L1_b18_cbr2_bn -> L1_b18_cbr2_bn_top\nI0821 08:59:09.000326 32502 net.cpp:150] Setting up L1_b18_cbr2_bn\nI0821 08:59:09.000340 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:09.000345 32502 net.cpp:165] Memory required for data: 1305396400\nI0821 08:59:09.000389 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0821 08:59:09.000401 32502 net.cpp:100] Creating Layer L1_b18_cbr2_scale\nI0821 08:59:09.000408 32502 net.cpp:434] L1_b18_cbr2_scale <- L1_b18_cbr2_bn_top\nI0821 08:59:09.000416 32502 net.cpp:395] L1_b18_cbr2_scale -> L1_b18_cbr2_bn_top (in-place)\nI0821 08:59:09.000479 32502 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0821 08:59:09.000639 32502 net.cpp:150] Setting up L1_b18_cbr2_scale\nI0821 08:59:09.000653 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:09.000658 32502 net.cpp:165] Memory required for data: 1311950000\nI0821 08:59:09.000666 32502 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise\nI0821 08:59:09.000679 32502 net.cpp:100] Creating Layer L1_b18_sum_eltwise\nI0821 08:59:09.000686 32502 net.cpp:434] L1_b18_sum_eltwise <- L1_b18_cbr2_bn_top\nI0821 08:59:09.000694 32502 net.cpp:434] L1_b18_sum_eltwise <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0821 08:59:09.000701 32502 net.cpp:408] L1_b18_sum_eltwise -> L1_b18_sum_eltwise_top\nI0821 08:59:09.000738 32502 net.cpp:150] Setting up L1_b18_sum_eltwise\nI0821 08:59:09.000761 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:09.000767 32502 net.cpp:165] Memory required for data: 1318503600\nI0821 08:59:09.000772 32502 layer_factory.hpp:77] Creating layer L1_b18_relu\nI0821 08:59:09.000782 32502 net.cpp:100] Creating Layer L1_b18_relu\nI0821 08:59:09.000787 32502 net.cpp:434] L1_b18_relu <- L1_b18_sum_eltwise_top\nI0821 08:59:09.000797 32502 net.cpp:395] L1_b18_relu -> L1_b18_sum_eltwise_top (in-place)\nI0821 08:59:09.000808 32502 net.cpp:150] Setting up L1_b18_relu\nI0821 08:59:09.000815 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:09.000819 32502 net.cpp:165] Memory required for data: 1325057200\nI0821 08:59:09.000824 32502 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:09.000834 32502 net.cpp:100] Creating Layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:09.000840 32502 net.cpp:434] L1_b18_sum_eltwise_top_L1_b18_relu_0_split <- L1_b18_sum_eltwise_top\nI0821 08:59:09.000847 32502 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0821 08:59:09.000857 32502 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0821 08:59:09.000908 32502 net.cpp:150] Setting up L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0821 08:59:09.000921 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:09.000927 32502 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0821 08:59:09.000932 32502 net.cpp:165] Memory required for data: 1338164400\nI0821 08:59:09.000937 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:59:09.000948 32502 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:59:09.000955 32502 net.cpp:434] L2_b1_cbr1_conv <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0821 08:59:09.000967 32502 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:59:09.001339 32502 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:59:09.001353 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.001358 32502 net.cpp:165] Memory required for data: 1339802800\nI0821 08:59:09.001368 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:59:09.001380 32502 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:59:09.001387 32502 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:59:09.001396 32502 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:59:09.001674 32502 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:59:09.001690 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.001695 32502 net.cpp:165] Memory required for data: 1341441200\nI0821 08:59:09.001706 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:59:09.001715 32502 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:59:09.001721 32502 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:59:09.001729 32502 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:59:09.001796 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:59:09.001963 32502 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:59:09.001976 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.001981 32502 net.cpp:165] Memory required for data: 1343079600\nI0821 08:59:09.001991 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:59:09.001998 32502 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:59:09.002004 32502 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:59:09.002014 32502 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:59:09.002024 32502 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:59:09.002032 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.002037 32502 net.cpp:165] Memory required for data: 1344718000\nI0821 08:59:09.002041 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:59:09.002051 32502 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:59:09.002058 32502 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:59:09.002075 32502 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:59:09.002437 32502 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:59:09.002451 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.002456 32502 net.cpp:165] Memory required for data: 1346356400\nI0821 08:59:09.002465 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:59:09.002475 32502 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:59:09.002481 32502 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:59:09.002492 32502 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:59:09.002771 32502 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:59:09.002785 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.002790 32502 net.cpp:165] Memory required for data: 1347994800\nI0821 08:59:09.002800 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:59:09.002815 32502 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:59:09.002820 32502 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:59:09.002828 32502 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:59:09.002888 32502 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:59:09.003052 32502 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:59:09.003064 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.003069 32502 net.cpp:165] Memory required for data: 1349633200\nI0821 08:59:09.003079 32502 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:59:09.003095 32502 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:59:09.003103 32502 net.cpp:434] L2_b1_pool <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0821 08:59:09.003114 32502 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:59:09.003145 32502 net.cpp:150] Setting up L2_b1_pool\nI0821 08:59:09.003155 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.003159 32502 net.cpp:165] Memory required for data: 1351271600\nI0821 08:59:09.003165 32502 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:59:09.003173 32502 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:59:09.003180 32502 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:59:09.003185 32502 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:59:09.003196 32502 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:59:09.003231 32502 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:59:09.003240 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.003244 32502 net.cpp:165] Memory required for data: 1352910000\nI0821 08:59:09.003249 32502 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:59:09.003257 32502 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:59:09.003263 32502 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:59:09.003273 32502 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:59:09.003283 32502 net.cpp:150] Setting up L2_b1_relu\nI0821 08:59:09.003290 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.003294 32502 net.cpp:165] Memory required for data: 1354548400\nI0821 08:59:09.003299 32502 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:59:09.003310 32502 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:59:09.003317 32502 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:59:09.005228 32502 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:59:09.005245 32502 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0821 08:59:09.005251 32502 net.cpp:165] Memory required for data: 1356186800\nI0821 08:59:09.005257 32502 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:59:09.005269 32502 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:59:09.005275 32502 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:59:09.005282 32502 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:59:09.005293 32502 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:59:09.005338 32502 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:59:09.005354 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.005359 32502 net.cpp:165] Memory required for data: 1359463600\nI0821 08:59:09.005373 32502 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:09.005380 32502 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:09.005386 32502 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:59:09.005394 32502 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:59:09.005406 32502 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:59:09.005462 32502 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:59:09.005475 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.005481 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.005486 32502 net.cpp:165] Memory required for data: 1366017200\nI0821 08:59:09.005491 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:59:09.005509 32502 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:59:09.005517 32502 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:59:09.005527 32502 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:59:09.006052 32502 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:59:09.006067 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.006072 32502 net.cpp:165] Memory required for data: 1369294000\nI0821 08:59:09.006083 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:59:09.006094 32502 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:59:09.006101 32502 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:59:09.006110 32502 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:59:09.006381 32502 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:59:09.006393 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.006398 32502 net.cpp:165] Memory required for data: 1372570800\nI0821 08:59:09.006409 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:59:09.006418 32502 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:59:09.006424 32502 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:59:09.006435 32502 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:59:09.006497 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:59:09.006659 32502 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:59:09.006671 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.006676 32502 net.cpp:165] Memory required for data: 1375847600\nI0821 08:59:09.006686 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:59:09.006695 32502 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:59:09.006700 32502 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:59:09.006708 32502 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:59:09.006717 32502 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:59:09.006724 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.006729 32502 net.cpp:165] Memory required for data: 1379124400\nI0821 08:59:09.006733 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:59:09.006757 32502 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:59:09.006763 32502 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:59:09.006775 32502 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:59:09.007275 32502 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:59:09.007290 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.007295 32502 net.cpp:165] Memory required for data: 1382401200\nI0821 08:59:09.007304 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:59:09.007316 32502 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:59:09.007323 32502 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:59:09.007334 32502 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:59:09.007602 32502 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:59:09.007616 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.007622 32502 net.cpp:165] Memory required for data: 1385678000\nI0821 08:59:09.007638 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:59:09.007648 32502 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:59:09.007654 32502 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:59:09.007663 32502 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:59:09.007725 32502 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:59:09.007892 32502 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:59:09.007910 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.007915 32502 net.cpp:165] Memory required for data: 1388954800\nI0821 08:59:09.007923 32502 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:59:09.007933 32502 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:59:09.007941 32502 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:59:09.007947 32502 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:59:09.007956 32502 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:59:09.007987 32502 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:59:09.007997 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.008002 32502 net.cpp:165] Memory required for data: 1392231600\nI0821 08:59:09.008007 32502 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:59:09.008014 32502 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:59:09.008019 32502 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:59:09.008030 32502 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:59:09.008039 32502 net.cpp:150] Setting up L2_b2_relu\nI0821 08:59:09.008046 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.008051 32502 net.cpp:165] Memory required for data: 1395508400\nI0821 08:59:09.008056 32502 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:09.008062 32502 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:09.008069 32502 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:59:09.008078 32502 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:59:09.008088 32502 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:59:09.008136 32502 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:59:09.008147 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.008154 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.008158 32502 net.cpp:165] Memory required for data: 1402062000\nI0821 08:59:09.008164 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:59:09.008178 32502 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:59:09.008184 32502 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:59:09.008194 32502 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:59:09.008693 32502 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:59:09.008708 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.008713 32502 net.cpp:165] Memory required for data: 1405338800\nI0821 08:59:09.008723 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:59:09.008736 32502 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:59:09.008749 32502 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:59:09.008759 32502 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:59:09.009026 32502 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:59:09.009039 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.009044 32502 net.cpp:165] Memory required for data: 1408615600\nI0821 08:59:09.009055 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:59:09.009063 32502 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:59:09.009069 32502 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:59:09.009078 32502 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:59:09.009148 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:59:09.009307 32502 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:59:09.009323 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.009328 32502 net.cpp:165] Memory required for data: 1411892400\nI0821 08:59:09.009337 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:59:09.009346 32502 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:59:09.009352 32502 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:59:09.009359 32502 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:59:09.009369 32502 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:59:09.009377 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.009380 32502 net.cpp:165] Memory required for data: 1415169200\nI0821 08:59:09.009385 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:59:09.009399 32502 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:59:09.009405 32502 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:59:09.009416 32502 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:59:09.010012 32502 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:59:09.010030 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.010035 32502 net.cpp:165] Memory required for data: 1418446000\nI0821 08:59:09.010044 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:59:09.010057 32502 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:59:09.010064 32502 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:59:09.010076 32502 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:59:09.010349 32502 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:59:09.010362 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.010367 32502 net.cpp:165] Memory required for data: 1421722800\nI0821 08:59:09.010377 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:59:09.010387 32502 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:59:09.010393 32502 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:59:09.010401 32502 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:59:09.010466 32502 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:59:09.010627 32502 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:59:09.010640 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.010645 32502 net.cpp:165] Memory required for data: 1424999600\nI0821 08:59:09.010654 32502 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:59:09.010666 32502 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:59:09.010673 32502 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:59:09.010680 32502 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:59:09.010687 32502 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:59:09.010715 32502 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:59:09.010725 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.010730 32502 net.cpp:165] Memory required for data: 1428276400\nI0821 08:59:09.010735 32502 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:59:09.010752 32502 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:59:09.010761 32502 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:59:09.010767 32502 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:59:09.010797 32502 net.cpp:150] Setting up L2_b3_relu\nI0821 08:59:09.010805 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.010809 32502 net.cpp:165] Memory required for data: 1431553200\nI0821 08:59:09.010814 32502 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:09.010823 32502 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:09.010828 32502 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:59:09.010835 32502 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:59:09.010854 32502 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:59:09.010908 32502 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:59:09.010921 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.010927 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.010932 32502 net.cpp:165] Memory required for data: 1438106800\nI0821 08:59:09.010937 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:59:09.010952 32502 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:59:09.010958 32502 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:59:09.010968 32502 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:59:09.011471 32502 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:59:09.011484 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.011489 32502 net.cpp:165] Memory required for data: 1441383600\nI0821 08:59:09.011498 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:59:09.011512 32502 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:59:09.011518 32502 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:59:09.011528 32502 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:59:09.011806 32502 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:59:09.011819 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.011824 32502 net.cpp:165] Memory required for data: 1444660400\nI0821 08:59:09.011835 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:59:09.011844 32502 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:59:09.011850 32502 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:59:09.011858 32502 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:59:09.011920 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:59:09.012080 32502 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:59:09.012096 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.012102 32502 net.cpp:165] Memory required for data: 1447937200\nI0821 08:59:09.012111 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:59:09.012120 32502 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:59:09.012125 32502 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:59:09.012133 32502 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:59:09.012142 32502 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:59:09.012150 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.012154 32502 net.cpp:165] Memory required for data: 1451214000\nI0821 08:59:09.012158 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:59:09.012173 32502 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:59:09.012179 32502 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:59:09.012192 32502 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:59:09.012691 32502 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:59:09.012704 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.012709 32502 net.cpp:165] Memory required for data: 1454490800\nI0821 08:59:09.012718 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:59:09.012730 32502 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:59:09.012737 32502 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:59:09.012754 32502 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:59:09.013025 32502 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:59:09.013037 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.013042 32502 net.cpp:165] Memory required for data: 1457767600\nI0821 08:59:09.013053 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:59:09.013062 32502 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:59:09.013068 32502 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:59:09.013077 32502 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:59:09.013149 32502 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:59:09.013306 32502 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:59:09.013319 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.013324 32502 net.cpp:165] Memory required for data: 1461044400\nI0821 08:59:09.013334 32502 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:59:09.013345 32502 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:59:09.013352 32502 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:59:09.013360 32502 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:59:09.013367 32502 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:59:09.013394 32502 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:59:09.013403 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.013408 32502 net.cpp:165] Memory required for data: 1464321200\nI0821 08:59:09.013413 32502 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:59:09.013425 32502 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:59:09.013432 32502 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:59:09.013438 32502 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:59:09.013448 32502 net.cpp:150] Setting up L2_b4_relu\nI0821 08:59:09.013455 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.013459 32502 net.cpp:165] Memory required for data: 1467598000\nI0821 08:59:09.013464 32502 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:09.013471 32502 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:09.013478 32502 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:59:09.013484 32502 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:59:09.013494 32502 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:59:09.013545 32502 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:59:09.013556 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.013563 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.013568 32502 net.cpp:165] Memory required for data: 1474151600\nI0821 08:59:09.013572 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:59:09.013586 32502 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:59:09.013593 32502 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:59:09.013602 32502 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:59:09.014107 32502 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:59:09.014122 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.014127 32502 net.cpp:165] Memory required for data: 1477428400\nI0821 08:59:09.014137 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:59:09.014150 32502 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:59:09.014158 32502 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:59:09.014168 32502 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:59:09.014442 32502 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:59:09.014456 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.014461 32502 net.cpp:165] Memory required for data: 1480705200\nI0821 08:59:09.014472 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:59:09.014480 32502 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:59:09.014487 32502 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:59:09.014494 32502 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:59:09.014559 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:59:09.014719 32502 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:59:09.014732 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.014737 32502 net.cpp:165] Memory required for data: 1483982000\nI0821 08:59:09.014760 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:59:09.014772 32502 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:59:09.014780 32502 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:59:09.014787 32502 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:59:09.014797 32502 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:59:09.014804 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.014808 32502 net.cpp:165] Memory required for data: 1487258800\nI0821 08:59:09.014813 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:59:09.014827 32502 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:59:09.014832 32502 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:59:09.014842 32502 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:59:09.015338 32502 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:59:09.015353 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.015358 32502 net.cpp:165] Memory required for data: 1490535600\nI0821 08:59:09.015367 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:59:09.015379 32502 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:59:09.015386 32502 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:59:09.015394 32502 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:59:09.015666 32502 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:59:09.015681 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.015686 32502 net.cpp:165] Memory required for data: 1493812400\nI0821 08:59:09.015697 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:59:09.015707 32502 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:59:09.015713 32502 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:59:09.015720 32502 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:59:09.015786 32502 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:59:09.015952 32502 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:59:09.015965 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.015970 32502 net.cpp:165] Memory required for data: 1497089200\nI0821 08:59:09.015980 32502 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:59:09.015990 32502 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:59:09.015995 32502 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:59:09.016002 32502 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:59:09.016013 32502 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:59:09.016041 32502 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:59:09.016050 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.016055 32502 net.cpp:165] Memory required for data: 1500366000\nI0821 08:59:09.016060 32502 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:59:09.016072 32502 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:59:09.016077 32502 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:59:09.016084 32502 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:59:09.016094 32502 net.cpp:150] Setting up L2_b5_relu\nI0821 08:59:09.016100 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.016105 32502 net.cpp:165] Memory required for data: 1503642800\nI0821 08:59:09.016110 32502 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:09.016116 32502 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:09.016122 32502 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:59:09.016129 32502 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:59:09.016139 32502 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:59:09.016189 32502 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:59:09.016201 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.016216 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.016221 32502 net.cpp:165] Memory required for data: 1510196400\nI0821 08:59:09.016225 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:59:09.016239 32502 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:59:09.016247 32502 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:59:09.016255 32502 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:59:09.016764 32502 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:59:09.016779 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.016784 32502 net.cpp:165] Memory required for data: 1513473200\nI0821 08:59:09.016793 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:59:09.016805 32502 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:59:09.016813 32502 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:59:09.016822 32502 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:59:09.017092 32502 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:59:09.017105 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.017110 32502 net.cpp:165] Memory required for data: 1516750000\nI0821 08:59:09.017122 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:59:09.017130 32502 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:59:09.017137 32502 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:59:09.017143 32502 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:59:09.017205 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:59:09.017362 32502 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:59:09.017375 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.017380 32502 net.cpp:165] Memory required for data: 1520026800\nI0821 08:59:09.017390 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:59:09.017398 32502 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:59:09.017405 32502 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:59:09.017415 32502 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:59:09.017424 32502 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:59:09.017431 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.017436 32502 net.cpp:165] Memory required for data: 1523303600\nI0821 08:59:09.017441 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:59:09.017454 32502 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:59:09.017460 32502 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:59:09.017469 32502 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:59:09.018002 32502 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:59:09.018018 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.018023 32502 net.cpp:165] Memory required for data: 1526580400\nI0821 08:59:09.018031 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:59:09.018043 32502 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:59:09.018050 32502 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:59:09.018059 32502 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:59:09.018329 32502 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:59:09.018345 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.018350 32502 net.cpp:165] Memory required for data: 1529857200\nI0821 08:59:09.018362 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:59:09.018369 32502 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:59:09.018375 32502 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:59:09.018383 32502 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:59:09.018443 32502 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:59:09.018609 32502 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:59:09.018621 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.018626 32502 net.cpp:165] Memory required for data: 1533134000\nI0821 08:59:09.018642 32502 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:59:09.018651 32502 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:59:09.018658 32502 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:59:09.018666 32502 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:59:09.018676 32502 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:59:09.018704 32502 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:59:09.018717 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.018721 32502 net.cpp:165] Memory required for data: 1536410800\nI0821 08:59:09.018726 32502 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:59:09.018738 32502 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:59:09.018749 32502 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:59:09.018757 32502 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:59:09.018767 32502 net.cpp:150] Setting up L2_b6_relu\nI0821 08:59:09.018774 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.018779 32502 net.cpp:165] Memory required for data: 1539687600\nI0821 08:59:09.018784 32502 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:09.018791 32502 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:09.018796 32502 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:59:09.018803 32502 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:59:09.018813 32502 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:59:09.018865 32502 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:59:09.018877 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.018883 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.018888 32502 net.cpp:165] Memory required for data: 1546241200\nI0821 08:59:09.018893 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:59:09.018908 32502 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:59:09.018915 32502 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:59:09.018924 32502 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:59:09.019425 32502 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:59:09.019439 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.019444 32502 net.cpp:165] Memory required for data: 1549518000\nI0821 08:59:09.019454 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:59:09.019465 32502 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:59:09.019472 32502 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:59:09.019480 32502 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:59:09.019763 32502 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:59:09.019779 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.019784 32502 net.cpp:165] Memory required for data: 1552794800\nI0821 08:59:09.019794 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:59:09.019804 32502 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:59:09.019809 32502 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:59:09.019817 32502 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:59:09.019877 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:59:09.020043 32502 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:59:09.020056 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.020061 32502 net.cpp:165] Memory required for data: 1556071600\nI0821 08:59:09.020071 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:59:09.020078 32502 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:59:09.020086 32502 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:59:09.020095 32502 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:59:09.020112 32502 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:59:09.020119 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.020124 32502 net.cpp:165] Memory required for data: 1559348400\nI0821 08:59:09.020129 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:59:09.020143 32502 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:59:09.020148 32502 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:59:09.020157 32502 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:59:09.020649 32502 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:59:09.020663 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.020668 32502 net.cpp:165] Memory required for data: 1562625200\nI0821 08:59:09.020678 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:59:09.020689 32502 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:59:09.020696 32502 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:59:09.020704 32502 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:59:09.020983 32502 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:59:09.020998 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.021003 32502 net.cpp:165] Memory required for data: 1565902000\nI0821 08:59:09.021013 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:59:09.021049 32502 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:59:09.021059 32502 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:59:09.021067 32502 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:59:09.021131 32502 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:59:09.021292 32502 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:59:09.021306 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.021311 32502 net.cpp:165] Memory required for data: 1569178800\nI0821 08:59:09.021320 32502 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:59:09.021329 32502 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:59:09.021335 32502 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:59:09.021342 32502 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:59:09.021353 32502 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:59:09.021383 32502 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:59:09.021391 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.021396 32502 net.cpp:165] Memory required for data: 1572455600\nI0821 08:59:09.021401 32502 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:59:09.021409 32502 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:59:09.021419 32502 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:59:09.021425 32502 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:59:09.021435 32502 net.cpp:150] Setting up L2_b7_relu\nI0821 08:59:09.021442 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.021446 32502 net.cpp:165] Memory required for data: 1575732400\nI0821 08:59:09.021451 32502 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:09.021458 32502 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:09.021463 32502 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:59:09.021476 32502 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:59:09.021486 32502 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:59:09.021536 32502 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:59:09.021548 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.021554 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.021559 32502 net.cpp:165] Memory required for data: 1582286000\nI0821 08:59:09.021565 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:59:09.021580 32502 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:59:09.021594 32502 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:59:09.021602 32502 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:59:09.022114 32502 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:59:09.022128 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.022133 32502 net.cpp:165] Memory required for data: 1585562800\nI0821 08:59:09.022142 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:59:09.022155 32502 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:59:09.022161 32502 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:59:09.022169 32502 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:59:09.022440 32502 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:59:09.022454 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.022459 32502 net.cpp:165] Memory required for data: 1588839600\nI0821 08:59:09.022469 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:59:09.022478 32502 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:59:09.022485 32502 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:59:09.022495 32502 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:59:09.022555 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:59:09.022716 32502 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:59:09.022728 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.022733 32502 net.cpp:165] Memory required for data: 1592116400\nI0821 08:59:09.022747 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:59:09.022756 32502 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:59:09.022763 32502 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:59:09.022773 32502 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:59:09.022784 32502 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:59:09.022790 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.022795 32502 net.cpp:165] Memory required for data: 1595393200\nI0821 08:59:09.022800 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:59:09.022814 32502 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:59:09.022820 32502 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:59:09.022828 32502 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:59:09.023334 32502 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:59:09.023350 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.023355 32502 net.cpp:165] Memory required for data: 1598670000\nI0821 08:59:09.023363 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:59:09.023375 32502 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:59:09.023382 32502 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:59:09.023391 32502 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:59:09.023659 32502 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:59:09.023674 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.023677 32502 net.cpp:165] Memory required for data: 1601946800\nI0821 08:59:09.023689 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:59:09.023697 32502 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:59:09.023703 32502 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:59:09.023711 32502 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:59:09.023780 32502 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:59:09.023938 32502 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:59:09.023957 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.023962 32502 net.cpp:165] Memory required for data: 1605223600\nI0821 08:59:09.023970 32502 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:59:09.023979 32502 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:59:09.023985 32502 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:59:09.023993 32502 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:59:09.024008 32502 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:59:09.024039 32502 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:59:09.024049 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.024055 32502 net.cpp:165] Memory required for data: 1608500400\nI0821 08:59:09.024060 32502 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:59:09.024067 32502 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:59:09.024073 32502 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:59:09.024082 32502 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:59:09.024092 32502 net.cpp:150] Setting up L2_b8_relu\nI0821 08:59:09.024099 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.024103 32502 net.cpp:165] Memory required for data: 1611777200\nI0821 08:59:09.024108 32502 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:09.024116 32502 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:09.024121 32502 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:59:09.024130 32502 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:59:09.024140 32502 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:59:09.024188 32502 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:59:09.024199 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.024205 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.024210 32502 net.cpp:165] Memory required for data: 1618330800\nI0821 08:59:09.024215 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:59:09.024229 32502 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:59:09.024235 32502 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:59:09.024245 32502 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:59:09.024752 32502 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:59:09.024767 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.024772 32502 net.cpp:165] Memory required for data: 1621607600\nI0821 08:59:09.024781 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:59:09.024793 32502 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:59:09.024799 32502 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:59:09.024808 32502 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:59:09.025081 32502 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:59:09.025094 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.025099 32502 net.cpp:165] Memory required for data: 1624884400\nI0821 08:59:09.025110 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:59:09.025118 32502 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:59:09.025125 32502 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:59:09.025135 32502 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:59:09.025197 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:59:09.025357 32502 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:59:09.025372 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.025375 32502 net.cpp:165] Memory required for data: 1628161200\nI0821 08:59:09.025385 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:59:09.025393 32502 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:59:09.025399 32502 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:59:09.025406 32502 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:59:09.025416 32502 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:59:09.025424 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.025429 32502 net.cpp:165] Memory required for data: 1631438000\nI0821 08:59:09.025432 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:59:09.025446 32502 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:59:09.025460 32502 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:59:09.025471 32502 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:59:09.025979 32502 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:59:09.025993 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.025998 32502 net.cpp:165] Memory required for data: 1634714800\nI0821 08:59:09.026007 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:59:09.026020 32502 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:59:09.026026 32502 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:59:09.026037 32502 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:59:09.026301 32502 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:59:09.026314 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.026319 32502 net.cpp:165] Memory required for data: 1637991600\nI0821 08:59:09.026330 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:59:09.026340 32502 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:59:09.026345 32502 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:59:09.026353 32502 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:59:09.026415 32502 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:59:09.026576 32502 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:59:09.026592 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.026597 32502 net.cpp:165] Memory required for data: 1641268400\nI0821 08:59:09.026605 32502 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:59:09.026614 32502 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:59:09.026621 32502 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:59:09.026628 32502 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:59:09.026635 32502 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:59:09.026670 32502 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:59:09.026680 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.026685 32502 net.cpp:165] Memory required for data: 1644545200\nI0821 08:59:09.026690 32502 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:59:09.026698 32502 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:59:09.026705 32502 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:59:09.026715 32502 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:59:09.026724 32502 net.cpp:150] Setting up L2_b9_relu\nI0821 08:59:09.026731 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.026736 32502 net.cpp:165] Memory required for data: 1647822000\nI0821 08:59:09.026741 32502 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:09.026754 32502 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:09.026760 32502 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:59:09.026770 32502 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:59:09.026782 32502 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:59:09.026829 32502 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:59:09.026840 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.026847 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.026852 32502 net.cpp:165] Memory required for data: 1654375600\nI0821 08:59:09.026857 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_conv\nI0821 08:59:09.026871 32502 net.cpp:100] Creating Layer L2_b10_cbr1_conv\nI0821 08:59:09.026878 32502 net.cpp:434] L2_b10_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:59:09.026887 32502 net.cpp:408] L2_b10_cbr1_conv -> L2_b10_cbr1_conv_top\nI0821 08:59:09.027387 32502 net.cpp:150] Setting up L2_b10_cbr1_conv\nI0821 08:59:09.027401 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.027412 32502 net.cpp:165] Memory required for data: 1657652400\nI0821 08:59:09.027422 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_bn\nI0821 08:59:09.027434 32502 net.cpp:100] Creating Layer L2_b10_cbr1_bn\nI0821 08:59:09.027441 32502 net.cpp:434] L2_b10_cbr1_bn <- L2_b10_cbr1_conv_top\nI0821 08:59:09.027449 32502 net.cpp:408] L2_b10_cbr1_bn -> L2_b10_cbr1_bn_top\nI0821 08:59:09.027721 32502 net.cpp:150] Setting up L2_b10_cbr1_bn\nI0821 08:59:09.027735 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.027740 32502 net.cpp:165] Memory required for data: 1660929200\nI0821 08:59:09.027757 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0821 08:59:09.027766 32502 net.cpp:100] Creating Layer L2_b10_cbr1_scale\nI0821 08:59:09.027773 32502 net.cpp:434] L2_b10_cbr1_scale <- L2_b10_cbr1_bn_top\nI0821 08:59:09.027781 32502 net.cpp:395] L2_b10_cbr1_scale -> L2_b10_cbr1_bn_top (in-place)\nI0821 08:59:09.027845 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0821 08:59:09.028005 32502 net.cpp:150] Setting up L2_b10_cbr1_scale\nI0821 08:59:09.028022 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.028026 32502 net.cpp:165] Memory required for data: 1664206000\nI0821 08:59:09.028035 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr1_relu\nI0821 08:59:09.028043 32502 net.cpp:100] Creating Layer L2_b10_cbr1_relu\nI0821 08:59:09.028050 32502 net.cpp:434] L2_b10_cbr1_relu <- L2_b10_cbr1_bn_top\nI0821 08:59:09.028057 32502 net.cpp:395] L2_b10_cbr1_relu -> L2_b10_cbr1_bn_top (in-place)\nI0821 08:59:09.028067 32502 net.cpp:150] Setting up L2_b10_cbr1_relu\nI0821 08:59:09.028074 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.028079 32502 net.cpp:165] Memory required for data: 1667482800\nI0821 08:59:09.028084 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr2_conv\nI0821 08:59:09.028097 32502 net.cpp:100] Creating Layer L2_b10_cbr2_conv\nI0821 08:59:09.028103 32502 net.cpp:434] L2_b10_cbr2_conv <- L2_b10_cbr1_bn_top\nI0821 08:59:09.028115 32502 net.cpp:408] L2_b10_cbr2_conv -> L2_b10_cbr2_conv_top\nI0821 08:59:09.028614 32502 net.cpp:150] Setting up L2_b10_cbr2_conv\nI0821 08:59:09.028627 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.028632 32502 net.cpp:165] Memory required for data: 1670759600\nI0821 08:59:09.028641 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr2_bn\nI0821 08:59:09.028653 32502 net.cpp:100] Creating Layer L2_b10_cbr2_bn\nI0821 08:59:09.028661 32502 net.cpp:434] L2_b10_cbr2_bn <- L2_b10_cbr2_conv_top\nI0821 08:59:09.028674 32502 net.cpp:408] L2_b10_cbr2_bn -> L2_b10_cbr2_bn_top\nI0821 08:59:09.028975 32502 net.cpp:150] Setting up L2_b10_cbr2_bn\nI0821 08:59:09.028990 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.028995 32502 net.cpp:165] Memory required for data: 1674036400\nI0821 08:59:09.029006 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0821 08:59:09.029014 32502 net.cpp:100] Creating Layer L2_b10_cbr2_scale\nI0821 08:59:09.029021 32502 net.cpp:434] L2_b10_cbr2_scale <- L2_b10_cbr2_bn_top\nI0821 08:59:09.029028 32502 net.cpp:395] L2_b10_cbr2_scale -> L2_b10_cbr2_bn_top (in-place)\nI0821 08:59:09.029094 32502 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0821 08:59:09.029253 32502 net.cpp:150] Setting up L2_b10_cbr2_scale\nI0821 08:59:09.029266 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.029271 32502 net.cpp:165] Memory required for data: 1677313200\nI0821 08:59:09.029280 32502 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise\nI0821 08:59:09.029292 32502 net.cpp:100] Creating Layer L2_b10_sum_eltwise\nI0821 08:59:09.029299 32502 net.cpp:434] L2_b10_sum_eltwise <- L2_b10_cbr2_bn_top\nI0821 08:59:09.029306 32502 net.cpp:434] L2_b10_sum_eltwise <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:59:09.029314 32502 net.cpp:408] L2_b10_sum_eltwise -> L2_b10_sum_eltwise_top\nI0821 08:59:09.029342 32502 net.cpp:150] Setting up L2_b10_sum_eltwise\nI0821 08:59:09.029351 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.029356 32502 net.cpp:165] Memory required for data: 1680590000\nI0821 08:59:09.029368 32502 layer_factory.hpp:77] Creating layer L2_b10_relu\nI0821 08:59:09.029379 32502 net.cpp:100] Creating Layer L2_b10_relu\nI0821 08:59:09.029386 32502 net.cpp:434] L2_b10_relu <- L2_b10_sum_eltwise_top\nI0821 08:59:09.029392 32502 net.cpp:395] L2_b10_relu -> L2_b10_sum_eltwise_top (in-place)\nI0821 08:59:09.029402 32502 net.cpp:150] Setting up L2_b10_relu\nI0821 08:59:09.029409 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.029413 32502 net.cpp:165] Memory required for data: 1683866800\nI0821 08:59:09.029418 32502 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:09.029425 32502 net.cpp:100] Creating Layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:09.029430 32502 net.cpp:434] L2_b10_sum_eltwise_top_L2_b10_relu_0_split <- L2_b10_sum_eltwise_top\nI0821 08:59:09.029438 32502 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0821 08:59:09.029448 32502 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0821 08:59:09.029498 32502 net.cpp:150] Setting up L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0821 08:59:09.029510 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.029516 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.029521 32502 net.cpp:165] Memory required for data: 1690420400\nI0821 08:59:09.029526 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_conv\nI0821 08:59:09.029541 32502 net.cpp:100] Creating Layer L2_b11_cbr1_conv\nI0821 08:59:09.029547 32502 net.cpp:434] L2_b11_cbr1_conv <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0821 08:59:09.029556 32502 net.cpp:408] L2_b11_cbr1_conv -> L2_b11_cbr1_conv_top\nI0821 08:59:09.030068 32502 net.cpp:150] Setting up L2_b11_cbr1_conv\nI0821 08:59:09.030083 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.030088 32502 net.cpp:165] Memory required for data: 1693697200\nI0821 08:59:09.030097 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_bn\nI0821 08:59:09.030109 32502 net.cpp:100] Creating Layer L2_b11_cbr1_bn\nI0821 08:59:09.030117 32502 net.cpp:434] L2_b11_cbr1_bn <- L2_b11_cbr1_conv_top\nI0821 08:59:09.030128 32502 net.cpp:408] L2_b11_cbr1_bn -> L2_b11_cbr1_bn_top\nI0821 08:59:09.030396 32502 net.cpp:150] Setting up L2_b11_cbr1_bn\nI0821 08:59:09.030409 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.030414 32502 net.cpp:165] Memory required for data: 1696974000\nI0821 08:59:09.030424 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0821 08:59:09.030433 32502 net.cpp:100] Creating Layer L2_b11_cbr1_scale\nI0821 08:59:09.030439 32502 net.cpp:434] L2_b11_cbr1_scale <- L2_b11_cbr1_bn_top\nI0821 08:59:09.030447 32502 net.cpp:395] L2_b11_cbr1_scale -> L2_b11_cbr1_bn_top (in-place)\nI0821 08:59:09.030514 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0821 08:59:09.030671 32502 net.cpp:150] Setting up L2_b11_cbr1_scale\nI0821 08:59:09.030688 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.030692 32502 net.cpp:165] Memory required for data: 1700250800\nI0821 08:59:09.030701 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr1_relu\nI0821 08:59:09.030709 32502 net.cpp:100] Creating Layer L2_b11_cbr1_relu\nI0821 08:59:09.030715 32502 net.cpp:434] L2_b11_cbr1_relu <- L2_b11_cbr1_bn_top\nI0821 08:59:09.030724 32502 net.cpp:395] L2_b11_cbr1_relu -> L2_b11_cbr1_bn_top (in-place)\nI0821 08:59:09.030732 32502 net.cpp:150] Setting up L2_b11_cbr1_relu\nI0821 08:59:09.030740 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.030750 32502 net.cpp:165] Memory required for data: 1703527600\nI0821 08:59:09.030755 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr2_conv\nI0821 08:59:09.030771 32502 net.cpp:100] Creating Layer L2_b11_cbr2_conv\nI0821 08:59:09.030778 32502 net.cpp:434] L2_b11_cbr2_conv <- L2_b11_cbr1_bn_top\nI0821 08:59:09.030789 32502 net.cpp:408] L2_b11_cbr2_conv -> L2_b11_cbr2_conv_top\nI0821 08:59:09.031293 32502 net.cpp:150] Setting up L2_b11_cbr2_conv\nI0821 08:59:09.031308 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.031313 32502 net.cpp:165] Memory required for data: 1706804400\nI0821 08:59:09.031322 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr2_bn\nI0821 08:59:09.031334 32502 net.cpp:100] Creating Layer L2_b11_cbr2_bn\nI0821 08:59:09.031342 32502 net.cpp:434] L2_b11_cbr2_bn <- L2_b11_cbr2_conv_top\nI0821 08:59:09.031352 32502 net.cpp:408] L2_b11_cbr2_bn -> L2_b11_cbr2_bn_top\nI0821 08:59:09.031625 32502 net.cpp:150] Setting up L2_b11_cbr2_bn\nI0821 08:59:09.031638 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.031643 32502 net.cpp:165] Memory required for data: 1710081200\nI0821 08:59:09.031654 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0821 08:59:09.031663 32502 net.cpp:100] Creating Layer L2_b11_cbr2_scale\nI0821 08:59:09.031669 32502 net.cpp:434] L2_b11_cbr2_scale <- L2_b11_cbr2_bn_top\nI0821 08:59:09.031677 32502 net.cpp:395] L2_b11_cbr2_scale -> L2_b11_cbr2_bn_top (in-place)\nI0821 08:59:09.031740 32502 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0821 08:59:09.031929 32502 net.cpp:150] Setting up L2_b11_cbr2_scale\nI0821 08:59:09.031944 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.031949 32502 net.cpp:165] Memory required for data: 1713358000\nI0821 08:59:09.031958 32502 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise\nI0821 08:59:09.031970 32502 net.cpp:100] Creating Layer L2_b11_sum_eltwise\nI0821 08:59:09.031976 32502 net.cpp:434] L2_b11_sum_eltwise <- L2_b11_cbr2_bn_top\nI0821 08:59:09.031983 32502 net.cpp:434] L2_b11_sum_eltwise <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0821 08:59:09.031991 32502 net.cpp:408] L2_b11_sum_eltwise -> L2_b11_sum_eltwise_top\nI0821 08:59:09.032021 32502 net.cpp:150] Setting up L2_b11_sum_eltwise\nI0821 08:59:09.032030 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.032035 32502 net.cpp:165] Memory required for data: 1716634800\nI0821 08:59:09.032040 32502 layer_factory.hpp:77] Creating layer L2_b11_relu\nI0821 08:59:09.032052 32502 net.cpp:100] Creating Layer L2_b11_relu\nI0821 08:59:09.032058 32502 net.cpp:434] L2_b11_relu <- L2_b11_sum_eltwise_top\nI0821 08:59:09.032065 32502 net.cpp:395] L2_b11_relu -> L2_b11_sum_eltwise_top (in-place)\nI0821 08:59:09.032074 32502 net.cpp:150] Setting up L2_b11_relu\nI0821 08:59:09.032081 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.032086 32502 net.cpp:165] Memory required for data: 1719911600\nI0821 08:59:09.032090 32502 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:09.032097 32502 net.cpp:100] Creating Layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:09.032102 32502 net.cpp:434] L2_b11_sum_eltwise_top_L2_b11_relu_0_split <- L2_b11_sum_eltwise_top\nI0821 08:59:09.032110 32502 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0821 08:59:09.032119 32502 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0821 08:59:09.032174 32502 net.cpp:150] Setting up L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0821 08:59:09.032186 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.032193 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.032197 32502 net.cpp:165] Memory required for data: 1726465200\nI0821 08:59:09.032202 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_conv\nI0821 08:59:09.032217 32502 net.cpp:100] Creating Layer L2_b12_cbr1_conv\nI0821 08:59:09.032223 32502 net.cpp:434] L2_b12_cbr1_conv <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0821 08:59:09.032232 32502 net.cpp:408] L2_b12_cbr1_conv -> L2_b12_cbr1_conv_top\nI0821 08:59:09.033751 32502 net.cpp:150] Setting up L2_b12_cbr1_conv\nI0821 08:59:09.033768 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.033773 32502 net.cpp:165] Memory required for data: 1729742000\nI0821 08:59:09.033783 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_bn\nI0821 08:59:09.033802 32502 net.cpp:100] Creating Layer L2_b12_cbr1_bn\nI0821 08:59:09.033808 32502 net.cpp:434] L2_b12_cbr1_bn <- L2_b12_cbr1_conv_top\nI0821 08:59:09.033820 32502 net.cpp:408] L2_b12_cbr1_bn -> L2_b12_cbr1_bn_top\nI0821 08:59:09.034096 32502 net.cpp:150] Setting up L2_b12_cbr1_bn\nI0821 08:59:09.034111 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.034116 32502 net.cpp:165] Memory required for data: 1733018800\nI0821 08:59:09.034126 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0821 08:59:09.034138 32502 net.cpp:100] Creating Layer L2_b12_cbr1_scale\nI0821 08:59:09.034145 32502 net.cpp:434] L2_b12_cbr1_scale <- L2_b12_cbr1_bn_top\nI0821 08:59:09.034153 32502 net.cpp:395] L2_b12_cbr1_scale -> L2_b12_cbr1_bn_top (in-place)\nI0821 08:59:09.034214 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0821 08:59:09.034379 32502 net.cpp:150] Setting up L2_b12_cbr1_scale\nI0821 08:59:09.034392 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.034397 32502 net.cpp:165] Memory required for data: 1736295600\nI0821 08:59:09.034406 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr1_relu\nI0821 08:59:09.034420 32502 net.cpp:100] Creating Layer L2_b12_cbr1_relu\nI0821 08:59:09.034427 32502 net.cpp:434] L2_b12_cbr1_relu <- L2_b12_cbr1_bn_top\nI0821 08:59:09.034435 32502 net.cpp:395] L2_b12_cbr1_relu -> L2_b12_cbr1_bn_top (in-place)\nI0821 08:59:09.034446 32502 net.cpp:150] Setting up L2_b12_cbr1_relu\nI0821 08:59:09.034452 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.034456 32502 net.cpp:165] Memory required for data: 1739572400\nI0821 08:59:09.034461 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr2_conv\nI0821 08:59:09.034476 32502 net.cpp:100] Creating Layer L2_b12_cbr2_conv\nI0821 08:59:09.034482 32502 net.cpp:434] L2_b12_cbr2_conv <- L2_b12_cbr1_bn_top\nI0821 08:59:09.034492 32502 net.cpp:408] L2_b12_cbr2_conv -> L2_b12_cbr2_conv_top\nI0821 08:59:09.034991 32502 net.cpp:150] Setting up L2_b12_cbr2_conv\nI0821 08:59:09.035006 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.035010 32502 net.cpp:165] Memory required for data: 1742849200\nI0821 08:59:09.035019 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr2_bn\nI0821 08:59:09.035029 32502 net.cpp:100] Creating Layer L2_b12_cbr2_bn\nI0821 08:59:09.035035 32502 net.cpp:434] L2_b12_cbr2_bn <- L2_b12_cbr2_conv_top\nI0821 08:59:09.035049 32502 net.cpp:408] L2_b12_cbr2_bn -> L2_b12_cbr2_bn_top\nI0821 08:59:09.035327 32502 net.cpp:150] Setting up L2_b12_cbr2_bn\nI0821 08:59:09.035341 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.035346 32502 net.cpp:165] Memory required for data: 1746126000\nI0821 08:59:09.035356 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0821 08:59:09.035368 32502 net.cpp:100] Creating Layer L2_b12_cbr2_scale\nI0821 08:59:09.035374 32502 net.cpp:434] L2_b12_cbr2_scale <- L2_b12_cbr2_bn_top\nI0821 08:59:09.035382 32502 net.cpp:395] L2_b12_cbr2_scale -> L2_b12_cbr2_bn_top (in-place)\nI0821 08:59:09.035444 32502 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0821 08:59:09.035605 32502 net.cpp:150] Setting up L2_b12_cbr2_scale\nI0821 08:59:09.035619 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.035624 32502 net.cpp:165] Memory required for data: 1749402800\nI0821 08:59:09.035634 32502 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise\nI0821 08:59:09.035645 32502 net.cpp:100] Creating Layer L2_b12_sum_eltwise\nI0821 08:59:09.035652 32502 net.cpp:434] L2_b12_sum_eltwise <- L2_b12_cbr2_bn_top\nI0821 08:59:09.035660 32502 net.cpp:434] L2_b12_sum_eltwise <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0821 08:59:09.035667 32502 net.cpp:408] L2_b12_sum_eltwise -> L2_b12_sum_eltwise_top\nI0821 08:59:09.035698 32502 net.cpp:150] Setting up L2_b12_sum_eltwise\nI0821 08:59:09.035708 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.035712 32502 net.cpp:165] Memory required for data: 1752679600\nI0821 08:59:09.035717 32502 layer_factory.hpp:77] Creating layer L2_b12_relu\nI0821 08:59:09.035725 32502 net.cpp:100] Creating Layer L2_b12_relu\nI0821 08:59:09.035738 32502 net.cpp:434] L2_b12_relu <- L2_b12_sum_eltwise_top\nI0821 08:59:09.035753 32502 net.cpp:395] L2_b12_relu -> L2_b12_sum_eltwise_top (in-place)\nI0821 08:59:09.035764 32502 net.cpp:150] Setting up L2_b12_relu\nI0821 08:59:09.035771 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.035776 32502 net.cpp:165] Memory required for data: 1755956400\nI0821 08:59:09.035781 32502 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:09.035791 32502 net.cpp:100] Creating Layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:09.035797 32502 net.cpp:434] L2_b12_sum_eltwise_top_L2_b12_relu_0_split <- L2_b12_sum_eltwise_top\nI0821 08:59:09.035804 32502 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0821 08:59:09.035815 32502 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0821 08:59:09.035871 32502 net.cpp:150] Setting up L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0821 08:59:09.035882 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.035889 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.035893 32502 net.cpp:165] Memory required for data: 1762510000\nI0821 08:59:09.035898 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_conv\nI0821 08:59:09.035909 32502 net.cpp:100] Creating Layer L2_b13_cbr1_conv\nI0821 08:59:09.035917 32502 net.cpp:434] L2_b13_cbr1_conv <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0821 08:59:09.035928 32502 net.cpp:408] L2_b13_cbr1_conv -> L2_b13_cbr1_conv_top\nI0821 08:59:09.036430 32502 net.cpp:150] Setting up L2_b13_cbr1_conv\nI0821 08:59:09.036444 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.036449 32502 net.cpp:165] Memory required for data: 1765786800\nI0821 08:59:09.036458 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_bn\nI0821 08:59:09.036468 32502 net.cpp:100] Creating Layer L2_b13_cbr1_bn\nI0821 08:59:09.036474 32502 net.cpp:434] L2_b13_cbr1_bn <- L2_b13_cbr1_conv_top\nI0821 08:59:09.036485 32502 net.cpp:408] L2_b13_cbr1_bn -> L2_b13_cbr1_bn_top\nI0821 08:59:09.036952 32502 net.cpp:150] Setting up L2_b13_cbr1_bn\nI0821 08:59:09.036967 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.036972 32502 net.cpp:165] Memory required for data: 1769063600\nI0821 08:59:09.036983 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0821 08:59:09.036995 32502 net.cpp:100] Creating Layer L2_b13_cbr1_scale\nI0821 08:59:09.037003 32502 net.cpp:434] L2_b13_cbr1_scale <- L2_b13_cbr1_bn_top\nI0821 08:59:09.037010 32502 net.cpp:395] L2_b13_cbr1_scale -> L2_b13_cbr1_bn_top (in-place)\nI0821 08:59:09.037075 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0821 08:59:09.037241 32502 net.cpp:150] Setting up L2_b13_cbr1_scale\nI0821 08:59:09.037255 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.037259 32502 net.cpp:165] Memory required for data: 1772340400\nI0821 08:59:09.037268 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr1_relu\nI0821 08:59:09.037281 32502 net.cpp:100] Creating Layer L2_b13_cbr1_relu\nI0821 08:59:09.037288 32502 net.cpp:434] L2_b13_cbr1_relu <- L2_b13_cbr1_bn_top\nI0821 08:59:09.037295 32502 net.cpp:395] L2_b13_cbr1_relu -> L2_b13_cbr1_bn_top (in-place)\nI0821 08:59:09.037305 32502 net.cpp:150] Setting up L2_b13_cbr1_relu\nI0821 08:59:09.037312 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.037317 32502 net.cpp:165] Memory required for data: 1775617200\nI0821 08:59:09.037322 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr2_conv\nI0821 08:59:09.037336 32502 net.cpp:100] Creating Layer L2_b13_cbr2_conv\nI0821 08:59:09.037343 32502 net.cpp:434] L2_b13_cbr2_conv <- L2_b13_cbr1_bn_top\nI0821 08:59:09.037353 32502 net.cpp:408] L2_b13_cbr2_conv -> L2_b13_cbr2_conv_top\nI0821 08:59:09.037854 32502 net.cpp:150] Setting up L2_b13_cbr2_conv\nI0821 08:59:09.037874 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.037879 32502 net.cpp:165] Memory required for data: 1778894000\nI0821 08:59:09.037895 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr2_bn\nI0821 08:59:09.037905 32502 net.cpp:100] Creating Layer L2_b13_cbr2_bn\nI0821 08:59:09.037912 32502 net.cpp:434] L2_b13_cbr2_bn <- L2_b13_cbr2_conv_top\nI0821 08:59:09.037920 32502 net.cpp:408] L2_b13_cbr2_bn -> L2_b13_cbr2_bn_top\nI0821 08:59:09.038197 32502 net.cpp:150] Setting up L2_b13_cbr2_bn\nI0821 08:59:09.038209 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.038214 32502 net.cpp:165] Memory required for data: 1782170800\nI0821 08:59:09.038225 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0821 08:59:09.038233 32502 net.cpp:100] Creating Layer L2_b13_cbr2_scale\nI0821 08:59:09.038240 32502 net.cpp:434] L2_b13_cbr2_scale <- L2_b13_cbr2_bn_top\nI0821 08:59:09.038250 32502 net.cpp:395] L2_b13_cbr2_scale -> L2_b13_cbr2_bn_top (in-place)\nI0821 08:59:09.038336 32502 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0821 08:59:09.038506 32502 net.cpp:150] Setting up L2_b13_cbr2_scale\nI0821 08:59:09.038518 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.038523 32502 net.cpp:165] Memory required for data: 1785447600\nI0821 08:59:09.038532 32502 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise\nI0821 08:59:09.038542 32502 net.cpp:100] Creating Layer L2_b13_sum_eltwise\nI0821 08:59:09.038548 32502 net.cpp:434] L2_b13_sum_eltwise <- L2_b13_cbr2_bn_top\nI0821 08:59:09.038555 32502 net.cpp:434] L2_b13_sum_eltwise <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0821 08:59:09.038566 32502 net.cpp:408] L2_b13_sum_eltwise -> L2_b13_sum_eltwise_top\nI0821 08:59:09.038595 32502 net.cpp:150] Setting up L2_b13_sum_eltwise\nI0821 08:59:09.038612 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.038616 32502 net.cpp:165] Memory required for data: 1788724400\nI0821 08:59:09.038622 32502 layer_factory.hpp:77] Creating layer L2_b13_relu\nI0821 08:59:09.038630 32502 net.cpp:100] Creating Layer L2_b13_relu\nI0821 08:59:09.038636 32502 net.cpp:434] L2_b13_relu <- L2_b13_sum_eltwise_top\nI0821 08:59:09.038643 32502 net.cpp:395] L2_b13_relu -> L2_b13_sum_eltwise_top (in-place)\nI0821 08:59:09.038652 32502 net.cpp:150] Setting up L2_b13_relu\nI0821 08:59:09.038660 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.038664 32502 net.cpp:165] Memory required for data: 1792001200\nI0821 08:59:09.038668 32502 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:09.038681 32502 net.cpp:100] Creating Layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:09.038686 32502 net.cpp:434] L2_b13_sum_eltwise_top_L2_b13_relu_0_split <- L2_b13_sum_eltwise_top\nI0821 08:59:09.038694 32502 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0821 08:59:09.038704 32502 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0821 08:59:09.038763 32502 net.cpp:150] Setting up L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0821 08:59:09.038776 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.038784 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.038789 32502 net.cpp:165] Memory required for data: 1798554800\nI0821 08:59:09.038794 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_conv\nI0821 08:59:09.038805 32502 net.cpp:100] Creating Layer L2_b14_cbr1_conv\nI0821 08:59:09.038811 32502 net.cpp:434] L2_b14_cbr1_conv <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0821 08:59:09.038823 32502 net.cpp:408] L2_b14_cbr1_conv -> L2_b14_cbr1_conv_top\nI0821 08:59:09.039324 32502 net.cpp:150] Setting up L2_b14_cbr1_conv\nI0821 08:59:09.039338 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.039343 32502 net.cpp:165] Memory required for data: 1801831600\nI0821 08:59:09.039351 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_bn\nI0821 08:59:09.039361 32502 net.cpp:100] Creating Layer L2_b14_cbr1_bn\nI0821 08:59:09.039367 32502 net.cpp:434] L2_b14_cbr1_bn <- L2_b14_cbr1_conv_top\nI0821 08:59:09.039387 32502 net.cpp:408] L2_b14_cbr1_bn -> L2_b14_cbr1_bn_top\nI0821 08:59:09.039670 32502 net.cpp:150] Setting up L2_b14_cbr1_bn\nI0821 08:59:09.039683 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.039688 32502 net.cpp:165] Memory required for data: 1805108400\nI0821 08:59:09.039700 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0821 08:59:09.039710 32502 net.cpp:100] Creating Layer L2_b14_cbr1_scale\nI0821 08:59:09.039717 32502 net.cpp:434] L2_b14_cbr1_scale <- L2_b14_cbr1_bn_top\nI0821 08:59:09.039726 32502 net.cpp:395] L2_b14_cbr1_scale -> L2_b14_cbr1_bn_top (in-place)\nI0821 08:59:09.039794 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0821 08:59:09.039961 32502 net.cpp:150] Setting up L2_b14_cbr1_scale\nI0821 08:59:09.039974 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.039979 32502 net.cpp:165] Memory required for data: 1808385200\nI0821 08:59:09.039988 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr1_relu\nI0821 08:59:09.039996 32502 net.cpp:100] Creating Layer L2_b14_cbr1_relu\nI0821 08:59:09.040004 32502 net.cpp:434] L2_b14_cbr1_relu <- L2_b14_cbr1_bn_top\nI0821 08:59:09.040014 32502 net.cpp:395] L2_b14_cbr1_relu -> L2_b14_cbr1_bn_top (in-place)\nI0821 08:59:09.040024 32502 net.cpp:150] Setting up L2_b14_cbr1_relu\nI0821 08:59:09.040030 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.040035 32502 net.cpp:165] Memory required for data: 1811662000\nI0821 08:59:09.040040 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr2_conv\nI0821 08:59:09.040053 32502 net.cpp:100] Creating Layer L2_b14_cbr2_conv\nI0821 08:59:09.040060 32502 net.cpp:434] L2_b14_cbr2_conv <- L2_b14_cbr1_bn_top\nI0821 08:59:09.040074 32502 net.cpp:408] L2_b14_cbr2_conv -> L2_b14_cbr2_conv_top\nI0821 08:59:09.040563 32502 net.cpp:150] Setting up L2_b14_cbr2_conv\nI0821 08:59:09.040577 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.040582 32502 net.cpp:165] Memory required for data: 1814938800\nI0821 08:59:09.040598 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr2_bn\nI0821 08:59:09.040607 32502 net.cpp:100] Creating Layer L2_b14_cbr2_bn\nI0821 08:59:09.040614 32502 net.cpp:434] L2_b14_cbr2_bn <- L2_b14_cbr2_conv_top\nI0821 08:59:09.040622 32502 net.cpp:408] L2_b14_cbr2_bn -> L2_b14_cbr2_bn_top\nI0821 08:59:09.040908 32502 net.cpp:150] Setting up L2_b14_cbr2_bn\nI0821 08:59:09.040922 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.040927 32502 net.cpp:165] Memory required for data: 1818215600\nI0821 08:59:09.040937 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0821 08:59:09.040947 32502 net.cpp:100] Creating Layer L2_b14_cbr2_scale\nI0821 08:59:09.040953 32502 net.cpp:434] L2_b14_cbr2_scale <- L2_b14_cbr2_bn_top\nI0821 08:59:09.040964 32502 net.cpp:395] L2_b14_cbr2_scale -> L2_b14_cbr2_bn_top (in-place)\nI0821 08:59:09.041025 32502 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0821 08:59:09.041190 32502 net.cpp:150] Setting up L2_b14_cbr2_scale\nI0821 08:59:09.041203 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.041208 32502 net.cpp:165] Memory required for data: 1821492400\nI0821 08:59:09.041218 32502 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise\nI0821 08:59:09.041227 32502 net.cpp:100] Creating Layer L2_b14_sum_eltwise\nI0821 08:59:09.041234 32502 net.cpp:434] L2_b14_sum_eltwise <- L2_b14_cbr2_bn_top\nI0821 08:59:09.041241 32502 net.cpp:434] L2_b14_sum_eltwise <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0821 08:59:09.041255 32502 net.cpp:408] L2_b14_sum_eltwise -> L2_b14_sum_eltwise_top\nI0821 08:59:09.041285 32502 net.cpp:150] Setting up L2_b14_sum_eltwise\nI0821 08:59:09.041296 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.041299 32502 net.cpp:165] Memory required for data: 1824769200\nI0821 08:59:09.041306 32502 layer_factory.hpp:77] Creating layer L2_b14_relu\nI0821 08:59:09.041316 32502 net.cpp:100] Creating Layer L2_b14_relu\nI0821 08:59:09.041322 32502 net.cpp:434] L2_b14_relu <- L2_b14_sum_eltwise_top\nI0821 08:59:09.041329 32502 net.cpp:395] L2_b14_relu -> L2_b14_sum_eltwise_top (in-place)\nI0821 08:59:09.041345 32502 net.cpp:150] Setting up L2_b14_relu\nI0821 08:59:09.041353 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.041357 32502 net.cpp:165] Memory required for data: 1828046000\nI0821 08:59:09.041363 32502 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:09.041373 32502 net.cpp:100] Creating Layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:09.041378 32502 net.cpp:434] L2_b14_sum_eltwise_top_L2_b14_relu_0_split <- L2_b14_sum_eltwise_top\nI0821 08:59:09.041386 32502 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0821 08:59:09.041395 32502 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0821 08:59:09.041445 32502 net.cpp:150] Setting up L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0821 08:59:09.041460 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.041467 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.041472 32502 net.cpp:165] Memory required for data: 1834599600\nI0821 08:59:09.041477 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_conv\nI0821 08:59:09.041488 32502 net.cpp:100] Creating Layer L2_b15_cbr1_conv\nI0821 08:59:09.041494 32502 net.cpp:434] L2_b15_cbr1_conv <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0821 08:59:09.041504 32502 net.cpp:408] L2_b15_cbr1_conv -> L2_b15_cbr1_conv_top\nI0821 08:59:09.042013 32502 net.cpp:150] Setting up L2_b15_cbr1_conv\nI0821 08:59:09.042028 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.042033 32502 net.cpp:165] Memory required for data: 1837876400\nI0821 08:59:09.042042 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_bn\nI0821 08:59:09.042054 32502 net.cpp:100] Creating Layer L2_b15_cbr1_bn\nI0821 08:59:09.042062 32502 net.cpp:434] L2_b15_cbr1_bn <- L2_b15_cbr1_conv_top\nI0821 08:59:09.042069 32502 net.cpp:408] L2_b15_cbr1_bn -> L2_b15_cbr1_bn_top\nI0821 08:59:09.042351 32502 net.cpp:150] Setting up L2_b15_cbr1_bn\nI0821 08:59:09.042363 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.042368 32502 net.cpp:165] Memory required for data: 1841153200\nI0821 08:59:09.042378 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0821 08:59:09.042387 32502 net.cpp:100] Creating Layer L2_b15_cbr1_scale\nI0821 08:59:09.042393 32502 net.cpp:434] L2_b15_cbr1_scale <- L2_b15_cbr1_bn_top\nI0821 08:59:09.042404 32502 net.cpp:395] L2_b15_cbr1_scale -> L2_b15_cbr1_bn_top (in-place)\nI0821 08:59:09.042464 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0821 08:59:09.042625 32502 net.cpp:150] Setting up L2_b15_cbr1_scale\nI0821 08:59:09.042639 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.042644 32502 net.cpp:165] Memory required for data: 1844430000\nI0821 08:59:09.042652 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr1_relu\nI0821 08:59:09.042661 32502 net.cpp:100] Creating Layer L2_b15_cbr1_relu\nI0821 08:59:09.042667 32502 net.cpp:434] L2_b15_cbr1_relu <- L2_b15_cbr1_bn_top\nI0821 08:59:09.042677 32502 net.cpp:395] L2_b15_cbr1_relu -> L2_b15_cbr1_bn_top (in-place)\nI0821 08:59:09.042688 32502 net.cpp:150] Setting up L2_b15_cbr1_relu\nI0821 08:59:09.042695 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.042699 32502 net.cpp:165] Memory required for data: 1847706800\nI0821 08:59:09.042704 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr2_conv\nI0821 08:59:09.042718 32502 net.cpp:100] Creating Layer L2_b15_cbr2_conv\nI0821 08:59:09.042724 32502 net.cpp:434] L2_b15_cbr2_conv <- L2_b15_cbr1_bn_top\nI0821 08:59:09.042732 32502 net.cpp:408] L2_b15_cbr2_conv -> L2_b15_cbr2_conv_top\nI0821 08:59:09.043232 32502 net.cpp:150] Setting up L2_b15_cbr2_conv\nI0821 08:59:09.043247 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.043252 32502 net.cpp:165] Memory required for data: 1850983600\nI0821 08:59:09.043262 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr2_bn\nI0821 08:59:09.043272 32502 net.cpp:100] Creating Layer L2_b15_cbr2_bn\nI0821 08:59:09.043287 32502 net.cpp:434] L2_b15_cbr2_bn <- L2_b15_cbr2_conv_top\nI0821 08:59:09.043295 32502 net.cpp:408] L2_b15_cbr2_bn -> L2_b15_cbr2_bn_top\nI0821 08:59:09.043572 32502 net.cpp:150] Setting up L2_b15_cbr2_bn\nI0821 08:59:09.043586 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.043591 32502 net.cpp:165] Memory required for data: 1854260400\nI0821 08:59:09.043601 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0821 08:59:09.043609 32502 net.cpp:100] Creating Layer L2_b15_cbr2_scale\nI0821 08:59:09.043617 32502 net.cpp:434] L2_b15_cbr2_scale <- L2_b15_cbr2_bn_top\nI0821 08:59:09.043627 32502 net.cpp:395] L2_b15_cbr2_scale -> L2_b15_cbr2_bn_top (in-place)\nI0821 08:59:09.043689 32502 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0821 08:59:09.043862 32502 net.cpp:150] Setting up L2_b15_cbr2_scale\nI0821 08:59:09.043876 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.043881 32502 net.cpp:165] Memory required for data: 1857537200\nI0821 08:59:09.043891 32502 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise\nI0821 08:59:09.043900 32502 net.cpp:100] Creating Layer L2_b15_sum_eltwise\nI0821 08:59:09.043906 32502 net.cpp:434] L2_b15_sum_eltwise <- L2_b15_cbr2_bn_top\nI0821 08:59:09.043913 32502 net.cpp:434] L2_b15_sum_eltwise <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0821 08:59:09.043925 32502 net.cpp:408] L2_b15_sum_eltwise -> L2_b15_sum_eltwise_top\nI0821 08:59:09.043954 32502 net.cpp:150] Setting up L2_b15_sum_eltwise\nI0821 08:59:09.043963 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.043968 32502 net.cpp:165] Memory required for data: 1860814000\nI0821 08:59:09.043973 32502 layer_factory.hpp:77] Creating layer L2_b15_relu\nI0821 08:59:09.043984 32502 net.cpp:100] Creating Layer L2_b15_relu\nI0821 08:59:09.043990 32502 net.cpp:434] L2_b15_relu <- L2_b15_sum_eltwise_top\nI0821 08:59:09.043998 32502 net.cpp:395] L2_b15_relu -> L2_b15_sum_eltwise_top (in-place)\nI0821 08:59:09.044008 32502 net.cpp:150] Setting up L2_b15_relu\nI0821 08:59:09.044014 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.044018 32502 net.cpp:165] Memory required for data: 1864090800\nI0821 08:59:09.044023 32502 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:09.044034 32502 net.cpp:100] Creating Layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:09.044039 32502 net.cpp:434] L2_b15_sum_eltwise_top_L2_b15_relu_0_split <- L2_b15_sum_eltwise_top\nI0821 08:59:09.044046 32502 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0821 08:59:09.044056 32502 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0821 08:59:09.044106 32502 net.cpp:150] Setting up L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0821 08:59:09.044121 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.044127 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.044131 32502 net.cpp:165] Memory required for data: 1870644400\nI0821 08:59:09.044137 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_conv\nI0821 08:59:09.044148 32502 net.cpp:100] Creating Layer L2_b16_cbr1_conv\nI0821 08:59:09.044154 32502 net.cpp:434] L2_b16_cbr1_conv <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0821 08:59:09.044163 32502 net.cpp:408] L2_b16_cbr1_conv -> L2_b16_cbr1_conv_top\nI0821 08:59:09.044667 32502 net.cpp:150] Setting up L2_b16_cbr1_conv\nI0821 08:59:09.044682 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.044687 32502 net.cpp:165] Memory required for data: 1873921200\nI0821 08:59:09.044695 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_bn\nI0821 08:59:09.044708 32502 net.cpp:100] Creating Layer L2_b16_cbr1_bn\nI0821 08:59:09.044714 32502 net.cpp:434] L2_b16_cbr1_bn <- L2_b16_cbr1_conv_top\nI0821 08:59:09.044723 32502 net.cpp:408] L2_b16_cbr1_bn -> L2_b16_cbr1_bn_top\nI0821 08:59:09.045008 32502 net.cpp:150] Setting up L2_b16_cbr1_bn\nI0821 08:59:09.045028 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.045033 32502 net.cpp:165] Memory required for data: 1877198000\nI0821 08:59:09.045044 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0821 08:59:09.045053 32502 net.cpp:100] Creating Layer L2_b16_cbr1_scale\nI0821 08:59:09.045059 32502 net.cpp:434] L2_b16_cbr1_scale <- L2_b16_cbr1_bn_top\nI0821 08:59:09.045070 32502 net.cpp:395] L2_b16_cbr1_scale -> L2_b16_cbr1_bn_top (in-place)\nI0821 08:59:09.045132 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0821 08:59:09.045297 32502 net.cpp:150] Setting up L2_b16_cbr1_scale\nI0821 08:59:09.045310 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.045315 32502 net.cpp:165] Memory required for data: 1880474800\nI0821 08:59:09.045325 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr1_relu\nI0821 08:59:09.045332 32502 net.cpp:100] Creating Layer L2_b16_cbr1_relu\nI0821 08:59:09.045339 32502 net.cpp:434] L2_b16_cbr1_relu <- L2_b16_cbr1_bn_top\nI0821 08:59:09.045349 32502 net.cpp:395] L2_b16_cbr1_relu -> L2_b16_cbr1_bn_top (in-place)\nI0821 08:59:09.045359 32502 net.cpp:150] Setting up L2_b16_cbr1_relu\nI0821 08:59:09.045367 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.045372 32502 net.cpp:165] Memory required for data: 1883751600\nI0821 08:59:09.045377 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr2_conv\nI0821 08:59:09.045390 32502 net.cpp:100] Creating Layer L2_b16_cbr2_conv\nI0821 08:59:09.045397 32502 net.cpp:434] L2_b16_cbr2_conv <- L2_b16_cbr1_bn_top\nI0821 08:59:09.045405 32502 net.cpp:408] L2_b16_cbr2_conv -> L2_b16_cbr2_conv_top\nI0821 08:59:09.045914 32502 net.cpp:150] Setting up L2_b16_cbr2_conv\nI0821 08:59:09.045929 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.045934 32502 net.cpp:165] Memory required for data: 1887028400\nI0821 08:59:09.045943 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr2_bn\nI0821 08:59:09.045955 32502 net.cpp:100] Creating Layer L2_b16_cbr2_bn\nI0821 08:59:09.045963 32502 net.cpp:434] L2_b16_cbr2_bn <- L2_b16_cbr2_conv_top\nI0821 08:59:09.045971 32502 net.cpp:408] L2_b16_cbr2_bn -> L2_b16_cbr2_bn_top\nI0821 08:59:09.046244 32502 net.cpp:150] Setting up L2_b16_cbr2_bn\nI0821 08:59:09.046257 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.046262 32502 net.cpp:165] Memory required for data: 1890305200\nI0821 08:59:09.046272 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0821 08:59:09.046281 32502 net.cpp:100] Creating Layer L2_b16_cbr2_scale\nI0821 08:59:09.046288 32502 net.cpp:434] L2_b16_cbr2_scale <- L2_b16_cbr2_bn_top\nI0821 08:59:09.046298 32502 net.cpp:395] L2_b16_cbr2_scale -> L2_b16_cbr2_bn_top (in-place)\nI0821 08:59:09.046360 32502 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0821 08:59:09.046525 32502 net.cpp:150] Setting up L2_b16_cbr2_scale\nI0821 08:59:09.046538 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.046543 32502 net.cpp:165] Memory required for data: 1893582000\nI0821 08:59:09.046552 32502 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise\nI0821 08:59:09.046561 32502 net.cpp:100] Creating Layer L2_b16_sum_eltwise\nI0821 08:59:09.046568 32502 net.cpp:434] L2_b16_sum_eltwise <- L2_b16_cbr2_bn_top\nI0821 08:59:09.046576 32502 net.cpp:434] L2_b16_sum_eltwise <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0821 08:59:09.046586 32502 net.cpp:408] L2_b16_sum_eltwise -> L2_b16_sum_eltwise_top\nI0821 08:59:09.046614 32502 net.cpp:150] Setting up L2_b16_sum_eltwise\nI0821 08:59:09.046624 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.046628 32502 net.cpp:165] Memory required for data: 1896858800\nI0821 08:59:09.046633 32502 layer_factory.hpp:77] Creating layer L2_b16_relu\nI0821 08:59:09.046641 32502 net.cpp:100] Creating Layer L2_b16_relu\nI0821 08:59:09.046648 32502 net.cpp:434] L2_b16_relu <- L2_b16_sum_eltwise_top\nI0821 08:59:09.046658 32502 net.cpp:395] L2_b16_relu -> L2_b16_sum_eltwise_top (in-place)\nI0821 08:59:09.046667 32502 net.cpp:150] Setting up L2_b16_relu\nI0821 08:59:09.046674 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.046686 32502 net.cpp:165] Memory required for data: 1900135600\nI0821 08:59:09.046691 32502 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:09.046700 32502 net.cpp:100] Creating Layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:09.046705 32502 net.cpp:434] L2_b16_sum_eltwise_top_L2_b16_relu_0_split <- L2_b16_sum_eltwise_top\nI0821 08:59:09.046715 32502 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0821 08:59:09.046725 32502 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0821 08:59:09.046782 32502 net.cpp:150] Setting up L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0821 08:59:09.046793 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.046800 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.046805 32502 net.cpp:165] Memory required for data: 1906689200\nI0821 08:59:09.046810 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_conv\nI0821 08:59:09.046824 32502 net.cpp:100] Creating Layer L2_b17_cbr1_conv\nI0821 08:59:09.046831 32502 net.cpp:434] L2_b17_cbr1_conv <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0821 08:59:09.046841 32502 net.cpp:408] L2_b17_cbr1_conv -> L2_b17_cbr1_conv_top\nI0821 08:59:09.047345 32502 net.cpp:150] Setting up L2_b17_cbr1_conv\nI0821 08:59:09.047359 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.047364 32502 net.cpp:165] Memory required for data: 1909966000\nI0821 08:59:09.047374 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_bn\nI0821 08:59:09.047386 32502 net.cpp:100] Creating Layer L2_b17_cbr1_bn\nI0821 08:59:09.047394 32502 net.cpp:434] L2_b17_cbr1_bn <- L2_b17_cbr1_conv_top\nI0821 08:59:09.047401 32502 net.cpp:408] L2_b17_cbr1_bn -> L2_b17_cbr1_bn_top\nI0821 08:59:09.047683 32502 net.cpp:150] Setting up L2_b17_cbr1_bn\nI0821 08:59:09.047696 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.047701 32502 net.cpp:165] Memory required for data: 1913242800\nI0821 08:59:09.047713 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0821 08:59:09.047720 32502 net.cpp:100] Creating Layer L2_b17_cbr1_scale\nI0821 08:59:09.047727 32502 net.cpp:434] L2_b17_cbr1_scale <- L2_b17_cbr1_bn_top\nI0821 08:59:09.047737 32502 net.cpp:395] L2_b17_cbr1_scale -> L2_b17_cbr1_bn_top (in-place)\nI0821 08:59:09.047807 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0821 08:59:09.047974 32502 net.cpp:150] Setting up L2_b17_cbr1_scale\nI0821 08:59:09.047988 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.047993 32502 net.cpp:165] Memory required for data: 1916519600\nI0821 08:59:09.048002 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr1_relu\nI0821 08:59:09.048010 32502 net.cpp:100] Creating Layer L2_b17_cbr1_relu\nI0821 08:59:09.048017 32502 net.cpp:434] L2_b17_cbr1_relu <- L2_b17_cbr1_bn_top\nI0821 08:59:09.048027 32502 net.cpp:395] L2_b17_cbr1_relu -> L2_b17_cbr1_bn_top (in-place)\nI0821 08:59:09.048038 32502 net.cpp:150] Setting up L2_b17_cbr1_relu\nI0821 08:59:09.048045 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.048049 32502 net.cpp:165] Memory required for data: 1919796400\nI0821 08:59:09.048054 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr2_conv\nI0821 08:59:09.048070 32502 net.cpp:100] Creating Layer L2_b17_cbr2_conv\nI0821 08:59:09.048076 32502 net.cpp:434] L2_b17_cbr2_conv <- L2_b17_cbr1_bn_top\nI0821 08:59:09.048085 32502 net.cpp:408] L2_b17_cbr2_conv -> L2_b17_cbr2_conv_top\nI0821 08:59:09.048580 32502 net.cpp:150] Setting up L2_b17_cbr2_conv\nI0821 08:59:09.048594 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.048600 32502 net.cpp:165] Memory required for data: 1923073200\nI0821 08:59:09.048609 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr2_bn\nI0821 08:59:09.048619 32502 net.cpp:100] Creating Layer L2_b17_cbr2_bn\nI0821 08:59:09.048629 32502 net.cpp:434] L2_b17_cbr2_bn <- L2_b17_cbr2_conv_top\nI0821 08:59:09.048636 32502 net.cpp:408] L2_b17_cbr2_bn -> L2_b17_cbr2_bn_top\nI0821 08:59:09.048923 32502 net.cpp:150] Setting up L2_b17_cbr2_bn\nI0821 08:59:09.048936 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.048941 32502 net.cpp:165] Memory required for data: 1926350000\nI0821 08:59:09.048952 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0821 08:59:09.048961 32502 net.cpp:100] Creating Layer L2_b17_cbr2_scale\nI0821 08:59:09.048967 32502 net.cpp:434] L2_b17_cbr2_scale <- L2_b17_cbr2_bn_top\nI0821 08:59:09.048975 32502 net.cpp:395] L2_b17_cbr2_scale -> L2_b17_cbr2_bn_top (in-place)\nI0821 08:59:09.049043 32502 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0821 08:59:09.049207 32502 net.cpp:150] Setting up L2_b17_cbr2_scale\nI0821 08:59:09.049223 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.049228 32502 net.cpp:165] Memory required for data: 1929626800\nI0821 08:59:09.049237 32502 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise\nI0821 08:59:09.049247 32502 net.cpp:100] Creating Layer L2_b17_sum_eltwise\nI0821 08:59:09.049253 32502 net.cpp:434] L2_b17_sum_eltwise <- L2_b17_cbr2_bn_top\nI0821 08:59:09.049260 32502 net.cpp:434] L2_b17_sum_eltwise <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0821 08:59:09.049268 32502 net.cpp:408] L2_b17_sum_eltwise -> L2_b17_sum_eltwise_top\nI0821 08:59:09.049304 32502 net.cpp:150] Setting up L2_b17_sum_eltwise\nI0821 08:59:09.049312 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.049317 32502 net.cpp:165] Memory required for data: 1932903600\nI0821 08:59:09.049322 32502 layer_factory.hpp:77] Creating layer L2_b17_relu\nI0821 08:59:09.049330 32502 net.cpp:100] Creating Layer L2_b17_relu\nI0821 08:59:09.049336 32502 net.cpp:434] L2_b17_relu <- L2_b17_sum_eltwise_top\nI0821 08:59:09.049346 32502 net.cpp:395] L2_b17_relu -> L2_b17_sum_eltwise_top (in-place)\nI0821 08:59:09.049356 32502 net.cpp:150] Setting up L2_b17_relu\nI0821 08:59:09.049363 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.049367 32502 net.cpp:165] Memory required for data: 1936180400\nI0821 08:59:09.049372 32502 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:09.049379 32502 net.cpp:100] Creating Layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:09.049386 32502 net.cpp:434] L2_b17_sum_eltwise_top_L2_b17_relu_0_split <- L2_b17_sum_eltwise_top\nI0821 08:59:09.049396 32502 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0821 08:59:09.049405 32502 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0821 08:59:09.049456 32502 net.cpp:150] Setting up L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0821 08:59:09.049468 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.049474 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.049479 32502 net.cpp:165] Memory required for data: 1942734000\nI0821 08:59:09.049484 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_conv\nI0821 08:59:09.049499 32502 net.cpp:100] Creating Layer L2_b18_cbr1_conv\nI0821 08:59:09.049505 32502 net.cpp:434] L2_b18_cbr1_conv <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0821 08:59:09.049515 32502 net.cpp:408] L2_b18_cbr1_conv -> L2_b18_cbr1_conv_top\nI0821 08:59:09.050056 32502 net.cpp:150] Setting up L2_b18_cbr1_conv\nI0821 08:59:09.050074 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.050081 32502 net.cpp:165] Memory required for data: 1946010800\nI0821 08:59:09.050089 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_bn\nI0821 08:59:09.050101 32502 net.cpp:100] Creating Layer L2_b18_cbr1_bn\nI0821 08:59:09.050108 32502 net.cpp:434] L2_b18_cbr1_bn <- L2_b18_cbr1_conv_top\nI0821 08:59:09.050117 32502 net.cpp:408] L2_b18_cbr1_bn -> L2_b18_cbr1_bn_top\nI0821 08:59:09.050393 32502 net.cpp:150] Setting up L2_b18_cbr1_bn\nI0821 08:59:09.050407 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.050412 32502 net.cpp:165] Memory required for data: 1949287600\nI0821 08:59:09.050423 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0821 08:59:09.050441 32502 net.cpp:100] Creating Layer L2_b18_cbr1_scale\nI0821 08:59:09.050447 32502 net.cpp:434] L2_b18_cbr1_scale <- L2_b18_cbr1_bn_top\nI0821 08:59:09.050456 32502 net.cpp:395] L2_b18_cbr1_scale -> L2_b18_cbr1_bn_top (in-place)\nI0821 08:59:09.050519 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0821 08:59:09.050683 32502 net.cpp:150] Setting up L2_b18_cbr1_scale\nI0821 08:59:09.050698 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.050703 32502 net.cpp:165] Memory required for data: 1952564400\nI0821 08:59:09.050711 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr1_relu\nI0821 08:59:09.050722 32502 net.cpp:100] Creating Layer L2_b18_cbr1_relu\nI0821 08:59:09.050729 32502 net.cpp:434] L2_b18_cbr1_relu <- L2_b18_cbr1_bn_top\nI0821 08:59:09.050736 32502 net.cpp:395] L2_b18_cbr1_relu -> L2_b18_cbr1_bn_top (in-place)\nI0821 08:59:09.050752 32502 net.cpp:150] Setting up L2_b18_cbr1_relu\nI0821 08:59:09.050760 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.050765 32502 net.cpp:165] Memory required for data: 1955841200\nI0821 08:59:09.050770 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr2_conv\nI0821 08:59:09.050783 32502 net.cpp:100] Creating Layer L2_b18_cbr2_conv\nI0821 08:59:09.050789 32502 net.cpp:434] L2_b18_cbr2_conv <- L2_b18_cbr1_bn_top\nI0821 08:59:09.050798 32502 net.cpp:408] L2_b18_cbr2_conv -> L2_b18_cbr2_conv_top\nI0821 08:59:09.051300 32502 net.cpp:150] Setting up L2_b18_cbr2_conv\nI0821 08:59:09.051314 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.051318 32502 net.cpp:165] Memory required for data: 1959118000\nI0821 08:59:09.051327 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr2_bn\nI0821 08:59:09.051340 32502 net.cpp:100] Creating Layer L2_b18_cbr2_bn\nI0821 08:59:09.051347 32502 net.cpp:434] L2_b18_cbr2_bn <- L2_b18_cbr2_conv_top\nI0821 08:59:09.051355 32502 net.cpp:408] L2_b18_cbr2_bn -> L2_b18_cbr2_bn_top\nI0821 08:59:09.051630 32502 net.cpp:150] Setting up L2_b18_cbr2_bn\nI0821 08:59:09.051645 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.051651 32502 net.cpp:165] Memory required for data: 1962394800\nI0821 08:59:09.051661 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0821 08:59:09.051669 32502 net.cpp:100] Creating Layer L2_b18_cbr2_scale\nI0821 08:59:09.051676 32502 net.cpp:434] L2_b18_cbr2_scale <- L2_b18_cbr2_bn_top\nI0821 08:59:09.051683 32502 net.cpp:395] L2_b18_cbr2_scale -> L2_b18_cbr2_bn_top (in-place)\nI0821 08:59:09.051748 32502 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0821 08:59:09.051918 32502 net.cpp:150] Setting up L2_b18_cbr2_scale\nI0821 08:59:09.051930 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.051935 32502 net.cpp:165] Memory required for data: 1965671600\nI0821 08:59:09.051945 32502 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise\nI0821 08:59:09.051954 32502 net.cpp:100] Creating Layer L2_b18_sum_eltwise\nI0821 08:59:09.051961 32502 net.cpp:434] L2_b18_sum_eltwise <- L2_b18_cbr2_bn_top\nI0821 08:59:09.051968 32502 net.cpp:434] L2_b18_sum_eltwise <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0821 08:59:09.051978 32502 net.cpp:408] L2_b18_sum_eltwise -> L2_b18_sum_eltwise_top\nI0821 08:59:09.052008 32502 net.cpp:150] Setting up L2_b18_sum_eltwise\nI0821 08:59:09.052017 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.052022 32502 net.cpp:165] Memory required for data: 1968948400\nI0821 08:59:09.052027 32502 layer_factory.hpp:77] Creating layer L2_b18_relu\nI0821 08:59:09.052038 32502 net.cpp:100] Creating Layer L2_b18_relu\nI0821 08:59:09.052045 32502 net.cpp:434] L2_b18_relu <- L2_b18_sum_eltwise_top\nI0821 08:59:09.052052 32502 net.cpp:395] L2_b18_relu -> L2_b18_sum_eltwise_top (in-place)\nI0821 08:59:09.052062 32502 net.cpp:150] Setting up L2_b18_relu\nI0821 08:59:09.052068 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.052073 32502 net.cpp:165] Memory required for data: 1972225200\nI0821 08:59:09.052078 32502 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:09.052093 32502 net.cpp:100] Creating Layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:09.052098 32502 net.cpp:434] L2_b18_sum_eltwise_top_L2_b18_relu_0_split <- L2_b18_sum_eltwise_top\nI0821 08:59:09.052106 32502 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0821 08:59:09.052116 32502 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0821 08:59:09.052170 32502 net.cpp:150] Setting up L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0821 08:59:09.052182 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.052189 32502 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0821 08:59:09.052194 32502 net.cpp:165] Memory required for data: 1978778800\nI0821 08:59:09.052199 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:59:09.052213 32502 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:59:09.052220 32502 net.cpp:434] L3_b1_cbr1_conv <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0821 08:59:09.052230 32502 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:59:09.052739 32502 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:59:09.052759 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.052764 32502 net.cpp:165] Memory required for data: 1979598000\nI0821 08:59:09.052840 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:59:09.052855 32502 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:59:09.052861 32502 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:59:09.052870 32502 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:59:09.053172 32502 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:59:09.053186 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.053191 32502 net.cpp:165] Memory required for data: 1980417200\nI0821 08:59:09.053202 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:59:09.053210 32502 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:59:09.053217 32502 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:59:09.053227 32502 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:59:09.053290 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:59:09.053464 32502 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:59:09.053478 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.053483 32502 net.cpp:165] Memory required for data: 1981236400\nI0821 08:59:09.053493 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:59:09.053500 32502 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:59:09.053506 32502 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:59:09.053517 32502 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:59:09.053527 32502 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:59:09.053534 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.053539 32502 net.cpp:165] Memory required for data: 1982055600\nI0821 08:59:09.053544 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:59:09.053557 32502 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:59:09.053563 32502 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:59:09.053572 32502 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:59:09.055100 32502 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:59:09.055119 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.055124 32502 net.cpp:165] Memory required for data: 1982874800\nI0821 08:59:09.055133 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:59:09.055146 32502 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:59:09.055153 32502 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:59:09.055162 32502 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:59:09.055464 32502 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:59:09.055479 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.055483 32502 net.cpp:165] Memory required for data: 1983694000\nI0821 08:59:09.055501 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:59:09.055510 32502 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:59:09.055517 32502 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:59:09.055526 32502 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:59:09.055590 32502 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:59:09.055770 32502 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:59:09.055788 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.055793 32502 net.cpp:165] Memory required for data: 1984513200\nI0821 08:59:09.055802 32502 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:59:09.055811 32502 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:59:09.055819 32502 net.cpp:434] L3_b1_pool <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0821 08:59:09.055827 32502 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:59:09.055868 32502 net.cpp:150] Setting up L3_b1_pool\nI0821 08:59:09.055878 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.055883 32502 net.cpp:165] Memory required for data: 1985332400\nI0821 08:59:09.055888 32502 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:59:09.055898 32502 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:59:09.055904 32502 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:59:09.055912 32502 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:59:09.055918 32502 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:59:09.055958 32502 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:59:09.055968 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.055972 32502 net.cpp:165] Memory required for data: 1986151600\nI0821 08:59:09.055977 32502 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:59:09.055985 32502 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:59:09.055991 32502 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:59:09.055999 32502 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:59:09.056007 32502 net.cpp:150] Setting up L3_b1_relu\nI0821 08:59:09.056015 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.056020 32502 net.cpp:165] Memory required for data: 1986970800\nI0821 08:59:09.056023 32502 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:59:09.056032 32502 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:59:09.056043 32502 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:59:09.057242 32502 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:59:09.057261 32502 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0821 08:59:09.057266 32502 net.cpp:165] Memory required for data: 1987790000\nI0821 08:59:09.057272 32502 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:59:09.057286 32502 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:59:09.057292 32502 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:59:09.057299 32502 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:59:09.057307 32502 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:59:09.057353 32502 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:59:09.057366 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.057371 32502 net.cpp:165] Memory required for data: 1989428400\nI0821 08:59:09.057376 32502 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:09.057384 32502 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:09.057390 32502 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:59:09.057401 32502 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:59:09.057411 32502 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:59:09.057471 32502 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:59:09.057483 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.057489 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.057502 32502 net.cpp:165] Memory required for data: 1992705200\nI0821 08:59:09.057507 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:59:09.057523 32502 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:59:09.057528 32502 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:59:09.057538 32502 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:59:09.058603 32502 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:59:09.058617 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.058622 32502 net.cpp:165] Memory required for data: 1994343600\nI0821 08:59:09.058631 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:59:09.058645 32502 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:59:09.058651 32502 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:59:09.058662 32502 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:59:09.058956 32502 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:59:09.058970 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.058975 32502 net.cpp:165] Memory required for data: 1995982000\nI0821 08:59:09.058986 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:59:09.058995 32502 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:59:09.059002 32502 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:59:09.059010 32502 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:59:09.059077 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:59:09.059245 32502 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:59:09.059258 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.059263 32502 net.cpp:165] Memory required for data: 1997620400\nI0821 08:59:09.059273 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:59:09.059281 32502 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:59:09.059288 32502 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:59:09.059298 32502 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:59:09.059307 32502 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:59:09.059314 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.059319 32502 net.cpp:165] Memory required for data: 1999258800\nI0821 08:59:09.059324 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:59:09.059339 32502 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:59:09.059345 32502 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:59:09.059353 32502 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:59:09.061473 32502 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:59:09.061492 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.061497 32502 net.cpp:165] Memory required for data: 2000897200\nI0821 08:59:09.061507 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:59:09.061522 32502 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:59:09.061528 32502 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:59:09.061537 32502 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:59:09.061831 32502 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:59:09.061846 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.061851 32502 net.cpp:165] Memory required for data: 2002535600\nI0821 08:59:09.061861 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:59:09.061872 32502 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:59:09.061877 32502 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:59:09.061885 32502 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:59:09.061952 32502 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:59:09.062122 32502 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:59:09.062136 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.062141 32502 net.cpp:165] Memory required for data: 2004174000\nI0821 08:59:09.062150 32502 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:59:09.062160 32502 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:59:09.062175 32502 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:59:09.062182 32502 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:59:09.062193 32502 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:59:09.062230 32502 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:59:09.062245 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.062252 32502 net.cpp:165] Memory required for data: 2005812400\nI0821 08:59:09.062256 32502 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:59:09.062263 32502 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:59:09.062270 32502 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:59:09.062278 32502 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:59:09.062286 32502 net.cpp:150] Setting up L3_b2_relu\nI0821 08:59:09.062294 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.062297 32502 net.cpp:165] Memory required for data: 2007450800\nI0821 08:59:09.062302 32502 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:09.062312 32502 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:09.062319 32502 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:59:09.062325 32502 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:59:09.062335 32502 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:59:09.062388 32502 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:59:09.062400 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.062407 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.062412 32502 net.cpp:165] Memory required for data: 2010727600\nI0821 08:59:09.062417 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:59:09.062427 32502 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:59:09.062434 32502 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:59:09.062446 32502 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:59:09.063499 32502 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:59:09.063514 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.063519 32502 net.cpp:165] Memory required for data: 2012366000\nI0821 08:59:09.063529 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:59:09.063539 32502 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:59:09.063545 32502 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:59:09.063558 32502 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:59:09.063854 32502 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:59:09.063870 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.063875 32502 net.cpp:165] Memory required for data: 2014004400\nI0821 08:59:09.063886 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:59:09.063895 32502 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:59:09.063902 32502 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:59:09.063910 32502 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:59:09.063972 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:59:09.064142 32502 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:59:09.064155 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.064162 32502 net.cpp:165] Memory required for data: 2015642800\nI0821 08:59:09.064170 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:59:09.064178 32502 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:59:09.064185 32502 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:59:09.064195 32502 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:59:09.064205 32502 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:59:09.064213 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.064224 32502 net.cpp:165] Memory required for data: 2017281200\nI0821 08:59:09.064229 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:59:09.064244 32502 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:59:09.064249 32502 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:59:09.064258 32502 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:59:09.065307 32502 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:59:09.065322 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.065327 32502 net.cpp:165] Memory required for data: 2018919600\nI0821 08:59:09.065336 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:59:09.065349 32502 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:59:09.065356 32502 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:59:09.065366 32502 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:59:09.065651 32502 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:59:09.065665 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.065670 32502 net.cpp:165] Memory required for data: 2020558000\nI0821 08:59:09.065681 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:59:09.065690 32502 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:59:09.065696 32502 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:59:09.065707 32502 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:59:09.065778 32502 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:59:09.065955 32502 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:59:09.065968 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.065973 32502 net.cpp:165] Memory required for data: 2022196400\nI0821 08:59:09.065984 32502 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:59:09.065992 32502 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:59:09.065999 32502 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:59:09.066006 32502 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:59:09.066017 32502 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:59:09.066056 32502 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:59:09.066068 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.066073 32502 net.cpp:165] Memory required for data: 2023834800\nI0821 08:59:09.066078 32502 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:59:09.066087 32502 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:59:09.066092 32502 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:59:09.066099 32502 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:59:09.066108 32502 net.cpp:150] Setting up L3_b3_relu\nI0821 08:59:09.066115 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.066120 32502 net.cpp:165] Memory required for data: 2025473200\nI0821 08:59:09.066124 32502 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:09.066134 32502 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:09.066140 32502 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:59:09.066148 32502 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:59:09.066157 32502 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:59:09.066210 32502 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:59:09.066222 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.066229 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.066233 32502 net.cpp:165] Memory required for data: 2028750000\nI0821 08:59:09.066238 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:59:09.066249 32502 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:59:09.066256 32502 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:59:09.066268 32502 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:59:09.067355 32502 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:59:09.067370 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.067375 32502 net.cpp:165] Memory required for data: 2030388400\nI0821 08:59:09.067385 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:59:09.067394 32502 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:59:09.067401 32502 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:59:09.067412 32502 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:59:09.067701 32502 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:59:09.067718 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.067723 32502 net.cpp:165] Memory required for data: 2032026800\nI0821 08:59:09.067734 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:59:09.067742 32502 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:59:09.067973 32502 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:59:09.067982 32502 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:59:09.068049 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:59:09.068223 32502 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:59:09.068236 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.068241 32502 net.cpp:165] Memory required for data: 2033665200\nI0821 08:59:09.068251 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:59:09.068262 32502 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:59:09.068269 32502 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:59:09.068276 32502 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:59:09.068286 32502 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:59:09.068294 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.068297 32502 net.cpp:165] Memory required for data: 2035303600\nI0821 08:59:09.068302 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:59:09.068316 32502 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:59:09.068322 32502 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:59:09.068331 32502 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:59:09.069389 32502 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:59:09.069404 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.069409 32502 net.cpp:165] Memory required for data: 2036942000\nI0821 08:59:09.069418 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:59:09.069433 32502 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:59:09.069440 32502 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:59:09.069452 32502 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:59:09.069739 32502 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:59:09.069758 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.069763 32502 net.cpp:165] Memory required for data: 2038580400\nI0821 08:59:09.069774 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:59:09.069782 32502 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:59:09.069789 32502 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:59:09.069800 32502 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:59:09.069866 32502 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:59:09.070036 32502 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:59:09.070050 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.070055 32502 net.cpp:165] Memory required for data: 2040218800\nI0821 08:59:09.070063 32502 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:59:09.070072 32502 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:59:09.070080 32502 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:59:09.070086 32502 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:59:09.070096 32502 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:59:09.070137 32502 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:59:09.070153 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.070158 32502 net.cpp:165] Memory required for data: 2041857200\nI0821 08:59:09.070163 32502 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:59:09.070171 32502 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:59:09.070178 32502 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:59:09.070188 32502 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:59:09.070197 32502 net.cpp:150] Setting up L3_b4_relu\nI0821 08:59:09.070204 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.070209 32502 net.cpp:165] Memory required for data: 2043495600\nI0821 08:59:09.070214 32502 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:09.070220 32502 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:09.070226 32502 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:59:09.070233 32502 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:59:09.070243 32502 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:59:09.070297 32502 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:59:09.070309 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.070317 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.070320 32502 net.cpp:165] Memory required for data: 2046772400\nI0821 08:59:09.070325 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:59:09.070336 32502 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:59:09.070343 32502 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:59:09.070354 32502 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:59:09.071414 32502 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:59:09.071429 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.071434 32502 net.cpp:165] Memory required for data: 2048410800\nI0821 08:59:09.071444 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:59:09.071454 32502 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:59:09.071460 32502 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:59:09.071471 32502 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:59:09.071766 32502 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:59:09.071784 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.071789 32502 net.cpp:165] Memory required for data: 2050049200\nI0821 08:59:09.071799 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:59:09.071810 32502 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:59:09.071815 32502 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:59:09.071823 32502 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:59:09.071890 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:59:09.072062 32502 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:59:09.072075 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.072080 32502 net.cpp:165] Memory required for data: 2051687600\nI0821 08:59:09.072089 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:59:09.072100 32502 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:59:09.072106 32502 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:59:09.072114 32502 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:59:09.072124 32502 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:59:09.072130 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.072135 32502 net.cpp:165] Memory required for data: 2053326000\nI0821 08:59:09.072139 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:59:09.072154 32502 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:59:09.072160 32502 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:59:09.072168 32502 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:59:09.073223 32502 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:59:09.073238 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.073243 32502 net.cpp:165] Memory required for data: 2054964400\nI0821 08:59:09.073252 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:59:09.073264 32502 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:59:09.073271 32502 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:59:09.073284 32502 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:59:09.073566 32502 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:59:09.073580 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.073585 32502 net.cpp:165] Memory required for data: 2056602800\nI0821 08:59:09.073595 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:59:09.073603 32502 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:59:09.073611 32502 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:59:09.073621 32502 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:59:09.073685 32502 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:59:09.073864 32502 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:59:09.073879 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.073884 32502 net.cpp:165] Memory required for data: 2058241200\nI0821 08:59:09.073892 32502 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:59:09.073902 32502 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:59:09.073909 32502 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:59:09.073920 32502 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:59:09.073928 32502 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:59:09.073967 32502 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:59:09.073981 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.073984 32502 net.cpp:165] Memory required for data: 2059879600\nI0821 08:59:09.073990 32502 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:59:09.073998 32502 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:59:09.074004 32502 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:59:09.074014 32502 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:59:09.074024 32502 net.cpp:150] Setting up L3_b5_relu\nI0821 08:59:09.074031 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.074035 32502 net.cpp:165] Memory required for data: 2061518000\nI0821 08:59:09.074040 32502 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:09.074048 32502 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:09.074053 32502 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:59:09.074060 32502 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:59:09.074069 32502 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:59:09.074121 32502 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:59:09.074133 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.074139 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.074144 32502 net.cpp:165] Memory required for data: 2064794800\nI0821 08:59:09.074149 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:59:09.074160 32502 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:59:09.074167 32502 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:59:09.074178 32502 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:59:09.076233 32502 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:59:09.076251 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.076256 32502 net.cpp:165] Memory required for data: 2066433200\nI0821 08:59:09.076267 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:59:09.076279 32502 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:59:09.076294 32502 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:59:09.076303 32502 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:59:09.076589 32502 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:59:09.076603 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.076608 32502 net.cpp:165] Memory required for data: 2068071600\nI0821 08:59:09.076619 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:59:09.076632 32502 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:59:09.076638 32502 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:59:09.076647 32502 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:59:09.076715 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:59:09.076894 32502 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:59:09.076908 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.076913 32502 net.cpp:165] Memory required for data: 2069710000\nI0821 08:59:09.076923 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:59:09.076936 32502 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:59:09.076941 32502 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:59:09.076949 32502 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:59:09.076959 32502 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:59:09.076967 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.076972 32502 net.cpp:165] Memory required for data: 2071348400\nI0821 08:59:09.076975 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:59:09.076989 32502 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:59:09.076997 32502 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:59:09.077008 32502 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:59:09.078058 32502 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:59:09.078073 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.078078 32502 net.cpp:165] Memory required for data: 2072986800\nI0821 08:59:09.078086 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:59:09.078096 32502 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:59:09.078104 32502 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:59:09.078116 32502 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:59:09.078403 32502 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:59:09.078418 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.078423 32502 net.cpp:165] Memory required for data: 2074625200\nI0821 08:59:09.078433 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:59:09.078441 32502 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:59:09.078449 32502 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:59:09.078455 32502 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:59:09.078521 32502 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:59:09.078691 32502 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:59:09.078704 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.078709 32502 net.cpp:165] Memory required for data: 2076263600\nI0821 08:59:09.078719 32502 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:59:09.078732 32502 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:59:09.078737 32502 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:59:09.078750 32502 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:59:09.078759 32502 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:59:09.078800 32502 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:59:09.078812 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.078817 32502 net.cpp:165] Memory required for data: 2077902000\nI0821 08:59:09.078822 32502 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:59:09.078830 32502 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:59:09.078836 32502 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:59:09.078850 32502 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:59:09.078861 32502 net.cpp:150] Setting up L3_b6_relu\nI0821 08:59:09.078868 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.078872 32502 net.cpp:165] Memory required for data: 2079540400\nI0821 08:59:09.078877 32502 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:09.078886 32502 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:09.078891 32502 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:59:09.078904 32502 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:59:09.078914 32502 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:59:09.078965 32502 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:59:09.078977 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.078984 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.078989 32502 net.cpp:165] Memory required for data: 2082817200\nI0821 08:59:09.078994 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:59:09.079010 32502 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:59:09.079016 32502 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:59:09.079026 32502 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:59:09.080072 32502 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:59:09.080087 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.080092 32502 net.cpp:165] Memory required for data: 2084455600\nI0821 08:59:09.080102 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:59:09.080114 32502 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:59:09.080121 32502 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:59:09.080129 32502 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:59:09.080418 32502 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:59:09.080431 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.080436 32502 net.cpp:165] Memory required for data: 2086094000\nI0821 08:59:09.080447 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:59:09.080458 32502 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:59:09.080466 32502 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:59:09.080473 32502 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:59:09.080541 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:59:09.080710 32502 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:59:09.080724 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.080729 32502 net.cpp:165] Memory required for data: 2087732400\nI0821 08:59:09.080737 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:59:09.080754 32502 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:59:09.080762 32502 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:59:09.080770 32502 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:59:09.080782 32502 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:59:09.080790 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.080795 32502 net.cpp:165] Memory required for data: 2089370800\nI0821 08:59:09.080799 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:59:09.080811 32502 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:59:09.080816 32502 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:59:09.080827 32502 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:59:09.081877 32502 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:59:09.081892 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.081897 32502 net.cpp:165] Memory required for data: 2091009200\nI0821 08:59:09.081907 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:59:09.081915 32502 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:59:09.081929 32502 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:59:09.081943 32502 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:59:09.082238 32502 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:59:09.082252 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.082257 32502 net.cpp:165] Memory required for data: 2092647600\nI0821 08:59:09.082268 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:59:09.082278 32502 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:59:09.082283 32502 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:59:09.082291 32502 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:59:09.082356 32502 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:59:09.082521 32502 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:59:09.082538 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.082545 32502 net.cpp:165] Memory required for data: 2094286000\nI0821 08:59:09.082553 32502 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:59:09.082562 32502 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:59:09.082569 32502 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:59:09.082576 32502 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:59:09.082584 32502 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:59:09.082623 32502 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:59:09.082635 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.082639 32502 net.cpp:165] Memory required for data: 2095924400\nI0821 08:59:09.082645 32502 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:59:09.082653 32502 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:59:09.082659 32502 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:59:09.082666 32502 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:59:09.082675 32502 net.cpp:150] Setting up L3_b7_relu\nI0821 08:59:09.082682 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.082686 32502 net.cpp:165] Memory required for data: 2097562800\nI0821 08:59:09.082691 32502 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:09.082698 32502 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:09.082703 32502 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:59:09.082715 32502 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:59:09.082725 32502 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:59:09.082779 32502 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:59:09.082795 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.082803 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.082808 32502 net.cpp:165] Memory required for data: 2100839600\nI0821 08:59:09.082813 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:59:09.082823 32502 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:59:09.082830 32502 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:59:09.082839 32502 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:59:09.083884 32502 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:59:09.083899 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.083905 32502 net.cpp:165] Memory required for data: 2102478000\nI0821 08:59:09.083914 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:59:09.083926 32502 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:59:09.083933 32502 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:59:09.083941 32502 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:59:09.084229 32502 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:59:09.084242 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.084247 32502 net.cpp:165] Memory required for data: 2104116400\nI0821 08:59:09.084264 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:59:09.084277 32502 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:59:09.084283 32502 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:59:09.084291 32502 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:59:09.084362 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:59:09.084533 32502 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:59:09.084547 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.084552 32502 net.cpp:165] Memory required for data: 2105754800\nI0821 08:59:09.084560 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:59:09.084573 32502 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:59:09.084579 32502 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:59:09.084589 32502 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:59:09.084599 32502 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:59:09.084606 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.084611 32502 net.cpp:165] Memory required for data: 2107393200\nI0821 08:59:09.084616 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:59:09.084627 32502 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:59:09.084632 32502 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:59:09.084645 32502 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:59:09.085691 32502 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:59:09.085706 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.085711 32502 net.cpp:165] Memory required for data: 2109031600\nI0821 08:59:09.085721 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:59:09.085733 32502 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:59:09.085741 32502 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:59:09.085755 32502 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:59:09.086045 32502 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:59:09.086058 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.086063 32502 net.cpp:165] Memory required for data: 2110670000\nI0821 08:59:09.086074 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:59:09.086082 32502 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:59:09.086089 32502 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:59:09.086097 32502 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:59:09.086163 32502 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:59:09.086330 32502 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:59:09.086346 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.086351 32502 net.cpp:165] Memory required for data: 2112308400\nI0821 08:59:09.086361 32502 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:59:09.086370 32502 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:59:09.086376 32502 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:59:09.086383 32502 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:59:09.086391 32502 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:59:09.086431 32502 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:59:09.086443 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.086447 32502 net.cpp:165] Memory required for data: 2113946800\nI0821 08:59:09.086452 32502 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:59:09.086460 32502 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:59:09.086467 32502 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:59:09.086474 32502 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:59:09.086483 32502 net.cpp:150] Setting up L3_b8_relu\nI0821 08:59:09.086490 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.086495 32502 net.cpp:165] Memory required for data: 2115585200\nI0821 08:59:09.086499 32502 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:09.086519 32502 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:09.086524 32502 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:59:09.086532 32502 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:59:09.086542 32502 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:59:09.086593 32502 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:59:09.086608 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.086616 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.086619 32502 net.cpp:165] Memory required for data: 2118862000\nI0821 08:59:09.086624 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:59:09.086637 32502 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:59:09.086642 32502 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:59:09.086652 32502 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:59:09.087705 32502 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:59:09.087720 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.087725 32502 net.cpp:165] Memory required for data: 2120500400\nI0821 08:59:09.087734 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:59:09.087751 32502 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:59:09.087759 32502 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:59:09.087769 32502 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:59:09.088058 32502 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:59:09.088073 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.088078 32502 net.cpp:165] Memory required for data: 2122138800\nI0821 08:59:09.088088 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:59:09.088100 32502 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:59:09.088107 32502 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:59:09.088117 32502 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:59:09.088181 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:59:09.088353 32502 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:59:09.088366 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.088371 32502 net.cpp:165] Memory required for data: 2123777200\nI0821 08:59:09.088380 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:59:09.088388 32502 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:59:09.088395 32502 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:59:09.088407 32502 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:59:09.088418 32502 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:59:09.088424 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.088429 32502 net.cpp:165] Memory required for data: 2125415600\nI0821 08:59:09.088434 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:59:09.088445 32502 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:59:09.088451 32502 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:59:09.088462 32502 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:59:09.090498 32502 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:59:09.090517 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.090522 32502 net.cpp:165] Memory required for data: 2127054000\nI0821 08:59:09.090531 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:59:09.090543 32502 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:59:09.090551 32502 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:59:09.090559 32502 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:59:09.090860 32502 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:59:09.090875 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.090880 32502 net.cpp:165] Memory required for data: 2128692400\nI0821 08:59:09.090906 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:59:09.090919 32502 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:59:09.090926 32502 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:59:09.090934 32502 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:59:09.091001 32502 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:59:09.091176 32502 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:59:09.091189 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.091194 32502 net.cpp:165] Memory required for data: 2130330800\nI0821 08:59:09.091203 32502 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:59:09.091212 32502 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:59:09.091219 32502 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:59:09.091226 32502 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:59:09.091238 32502 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:59:09.091274 32502 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:59:09.091286 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.091291 32502 net.cpp:165] Memory required for data: 2131969200\nI0821 08:59:09.091296 32502 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:59:09.091308 32502 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:59:09.091315 32502 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:59:09.091321 32502 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:59:09.091331 32502 net.cpp:150] Setting up L3_b9_relu\nI0821 08:59:09.091338 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.091343 32502 net.cpp:165] Memory required for data: 2133607600\nI0821 08:59:09.091347 32502 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:09.091354 32502 net.cpp:100] Creating Layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:09.091361 32502 net.cpp:434] L3_b9_sum_eltwise_top_L3_b9_relu_0_split <- L3_b9_sum_eltwise_top\nI0821 08:59:09.091368 32502 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0821 08:59:09.091377 32502 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0821 08:59:09.091429 32502 net.cpp:150] Setting up L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0821 08:59:09.091441 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.091449 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.091454 32502 net.cpp:165] Memory required for data: 2136884400\nI0821 08:59:09.091459 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_conv\nI0821 08:59:09.091471 32502 net.cpp:100] Creating Layer L3_b10_cbr1_conv\nI0821 08:59:09.091478 32502 net.cpp:434] L3_b10_cbr1_conv <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0821 08:59:09.091488 32502 net.cpp:408] L3_b10_cbr1_conv -> L3_b10_cbr1_conv_top\nI0821 08:59:09.092533 32502 net.cpp:150] Setting up L3_b10_cbr1_conv\nI0821 08:59:09.092548 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.092555 32502 net.cpp:165] Memory required for data: 2138522800\nI0821 08:59:09.092563 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_bn\nI0821 08:59:09.092576 32502 net.cpp:100] Creating Layer L3_b10_cbr1_bn\nI0821 08:59:09.092582 32502 net.cpp:434] L3_b10_cbr1_bn <- L3_b10_cbr1_conv_top\nI0821 08:59:09.092597 32502 net.cpp:408] L3_b10_cbr1_bn -> L3_b10_cbr1_bn_top\nI0821 08:59:09.092891 32502 net.cpp:150] Setting up L3_b10_cbr1_bn\nI0821 08:59:09.092905 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.092911 32502 net.cpp:165] Memory required for data: 2140161200\nI0821 08:59:09.092921 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0821 08:59:09.092931 32502 net.cpp:100] Creating Layer L3_b10_cbr1_scale\nI0821 08:59:09.092936 32502 net.cpp:434] L3_b10_cbr1_scale <- L3_b10_cbr1_bn_top\nI0821 08:59:09.092947 32502 net.cpp:395] L3_b10_cbr1_scale -> L3_b10_cbr1_bn_top (in-place)\nI0821 08:59:09.093019 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0821 08:59:09.093190 32502 net.cpp:150] Setting up L3_b10_cbr1_scale\nI0821 08:59:09.093204 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.093209 32502 net.cpp:165] Memory required for data: 2141799600\nI0821 08:59:09.093224 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr1_relu\nI0821 08:59:09.093231 32502 net.cpp:100] Creating Layer L3_b10_cbr1_relu\nI0821 08:59:09.093237 32502 net.cpp:434] L3_b10_cbr1_relu <- L3_b10_cbr1_bn_top\nI0821 08:59:09.093247 32502 net.cpp:395] L3_b10_cbr1_relu -> L3_b10_cbr1_bn_top (in-place)\nI0821 08:59:09.093258 32502 net.cpp:150] Setting up L3_b10_cbr1_relu\nI0821 08:59:09.093266 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.093269 32502 net.cpp:165] Memory required for data: 2143438000\nI0821 08:59:09.093274 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr2_conv\nI0821 08:59:09.093288 32502 net.cpp:100] Creating Layer L3_b10_cbr2_conv\nI0821 08:59:09.093294 32502 net.cpp:434] L3_b10_cbr2_conv <- L3_b10_cbr1_bn_top\nI0821 08:59:09.093302 32502 net.cpp:408] L3_b10_cbr2_conv -> L3_b10_cbr2_conv_top\nI0821 08:59:09.094350 32502 net.cpp:150] Setting up L3_b10_cbr2_conv\nI0821 08:59:09.094365 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.094370 32502 net.cpp:165] Memory required for data: 2145076400\nI0821 08:59:09.094379 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr2_bn\nI0821 08:59:09.094393 32502 net.cpp:100] Creating Layer L3_b10_cbr2_bn\nI0821 08:59:09.094398 32502 net.cpp:434] L3_b10_cbr2_bn <- L3_b10_cbr2_conv_top\nI0821 08:59:09.094408 32502 net.cpp:408] L3_b10_cbr2_bn -> L3_b10_cbr2_bn_top\nI0821 08:59:09.094697 32502 net.cpp:150] Setting up L3_b10_cbr2_bn\nI0821 08:59:09.094710 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.094715 32502 net.cpp:165] Memory required for data: 2146714800\nI0821 08:59:09.094727 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0821 08:59:09.094738 32502 net.cpp:100] Creating Layer L3_b10_cbr2_scale\nI0821 08:59:09.094749 32502 net.cpp:434] L3_b10_cbr2_scale <- L3_b10_cbr2_bn_top\nI0821 08:59:09.094764 32502 net.cpp:395] L3_b10_cbr2_scale -> L3_b10_cbr2_bn_top (in-place)\nI0821 08:59:09.094830 32502 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0821 08:59:09.095002 32502 net.cpp:150] Setting up L3_b10_cbr2_scale\nI0821 08:59:09.095016 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.095021 32502 net.cpp:165] Memory required for data: 2148353200\nI0821 08:59:09.095031 32502 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise\nI0821 08:59:09.095039 32502 net.cpp:100] Creating Layer L3_b10_sum_eltwise\nI0821 08:59:09.095046 32502 net.cpp:434] L3_b10_sum_eltwise <- L3_b10_cbr2_bn_top\nI0821 08:59:09.095053 32502 net.cpp:434] L3_b10_sum_eltwise <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0821 08:59:09.095064 32502 net.cpp:408] L3_b10_sum_eltwise -> L3_b10_sum_eltwise_top\nI0821 08:59:09.095100 32502 net.cpp:150] Setting up L3_b10_sum_eltwise\nI0821 08:59:09.095110 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.095115 32502 net.cpp:165] Memory required for data: 2149991600\nI0821 08:59:09.095120 32502 layer_factory.hpp:77] Creating layer L3_b10_relu\nI0821 08:59:09.095131 32502 net.cpp:100] Creating Layer L3_b10_relu\nI0821 08:59:09.095139 32502 net.cpp:434] L3_b10_relu <- L3_b10_sum_eltwise_top\nI0821 08:59:09.095145 32502 net.cpp:395] L3_b10_relu -> L3_b10_sum_eltwise_top (in-place)\nI0821 08:59:09.095155 32502 net.cpp:150] Setting up L3_b10_relu\nI0821 08:59:09.095161 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.095166 32502 net.cpp:165] Memory required for data: 2151630000\nI0821 08:59:09.095171 32502 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:09.095178 32502 net.cpp:100] Creating Layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:09.095183 32502 net.cpp:434] L3_b10_sum_eltwise_top_L3_b10_relu_0_split <- L3_b10_sum_eltwise_top\nI0821 08:59:09.095191 32502 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0821 08:59:09.095207 32502 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0821 08:59:09.095260 32502 net.cpp:150] Setting up L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0821 08:59:09.095273 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.095279 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.095284 32502 net.cpp:165] Memory required for data: 2154906800\nI0821 08:59:09.095289 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_conv\nI0821 08:59:09.095304 32502 net.cpp:100] Creating Layer L3_b11_cbr1_conv\nI0821 08:59:09.095310 32502 net.cpp:434] L3_b11_cbr1_conv <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0821 08:59:09.095319 32502 net.cpp:408] L3_b11_cbr1_conv -> L3_b11_cbr1_conv_top\nI0821 08:59:09.096372 32502 net.cpp:150] Setting up L3_b11_cbr1_conv\nI0821 08:59:09.096387 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.096392 32502 net.cpp:165] Memory required for data: 2156545200\nI0821 08:59:09.096402 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_bn\nI0821 08:59:09.096415 32502 net.cpp:100] Creating Layer L3_b11_cbr1_bn\nI0821 08:59:09.096421 32502 net.cpp:434] L3_b11_cbr1_bn <- L3_b11_cbr1_conv_top\nI0821 08:59:09.096432 32502 net.cpp:408] L3_b11_cbr1_bn -> L3_b11_cbr1_bn_top\nI0821 08:59:09.096716 32502 net.cpp:150] Setting up L3_b11_cbr1_bn\nI0821 08:59:09.096729 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.096735 32502 net.cpp:165] Memory required for data: 2158183600\nI0821 08:59:09.096751 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0821 08:59:09.096760 32502 net.cpp:100] Creating Layer L3_b11_cbr1_scale\nI0821 08:59:09.096767 32502 net.cpp:434] L3_b11_cbr1_scale <- L3_b11_cbr1_bn_top\nI0821 08:59:09.096778 32502 net.cpp:395] L3_b11_cbr1_scale -> L3_b11_cbr1_bn_top (in-place)\nI0821 08:59:09.096843 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0821 08:59:09.097012 32502 net.cpp:150] Setting up L3_b11_cbr1_scale\nI0821 08:59:09.097024 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.097029 32502 net.cpp:165] Memory required for data: 2159822000\nI0821 08:59:09.097039 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr1_relu\nI0821 08:59:09.097048 32502 net.cpp:100] Creating Layer L3_b11_cbr1_relu\nI0821 08:59:09.097054 32502 net.cpp:434] L3_b11_cbr1_relu <- L3_b11_cbr1_bn_top\nI0821 08:59:09.097065 32502 net.cpp:395] L3_b11_cbr1_relu -> L3_b11_cbr1_bn_top (in-place)\nI0821 08:59:09.097075 32502 net.cpp:150] Setting up L3_b11_cbr1_relu\nI0821 08:59:09.097082 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.097087 32502 net.cpp:165] Memory required for data: 2161460400\nI0821 08:59:09.097092 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr2_conv\nI0821 08:59:09.097106 32502 net.cpp:100] Creating Layer L3_b11_cbr2_conv\nI0821 08:59:09.097112 32502 net.cpp:434] L3_b11_cbr2_conv <- L3_b11_cbr1_bn_top\nI0821 08:59:09.097121 32502 net.cpp:408] L3_b11_cbr2_conv -> L3_b11_cbr2_conv_top\nI0821 08:59:09.098167 32502 net.cpp:150] Setting up L3_b11_cbr2_conv\nI0821 08:59:09.098186 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.098191 32502 net.cpp:165] Memory required for data: 2163098800\nI0821 08:59:09.098201 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr2_bn\nI0821 08:59:09.098209 32502 net.cpp:100] Creating Layer L3_b11_cbr2_bn\nI0821 08:59:09.098215 32502 net.cpp:434] L3_b11_cbr2_bn <- L3_b11_cbr2_conv_top\nI0821 08:59:09.098228 32502 net.cpp:408] L3_b11_cbr2_bn -> L3_b11_cbr2_bn_top\nI0821 08:59:09.098517 32502 net.cpp:150] Setting up L3_b11_cbr2_bn\nI0821 08:59:09.098531 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.098536 32502 net.cpp:165] Memory required for data: 2164737200\nI0821 08:59:09.098546 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0821 08:59:09.098557 32502 net.cpp:100] Creating Layer L3_b11_cbr2_scale\nI0821 08:59:09.098564 32502 net.cpp:434] L3_b11_cbr2_scale <- L3_b11_cbr2_bn_top\nI0821 08:59:09.098579 32502 net.cpp:395] L3_b11_cbr2_scale -> L3_b11_cbr2_bn_top (in-place)\nI0821 08:59:09.098644 32502 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0821 08:59:09.098822 32502 net.cpp:150] Setting up L3_b11_cbr2_scale\nI0821 08:59:09.098836 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.098841 32502 net.cpp:165] Memory required for data: 2166375600\nI0821 08:59:09.098850 32502 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise\nI0821 08:59:09.098860 32502 net.cpp:100] Creating Layer L3_b11_sum_eltwise\nI0821 08:59:09.098866 32502 net.cpp:434] L3_b11_sum_eltwise <- L3_b11_cbr2_bn_top\nI0821 08:59:09.098873 32502 net.cpp:434] L3_b11_sum_eltwise <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0821 08:59:09.098884 32502 net.cpp:408] L3_b11_sum_eltwise -> L3_b11_sum_eltwise_top\nI0821 08:59:09.098920 32502 net.cpp:150] Setting up L3_b11_sum_eltwise\nI0821 08:59:09.098929 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.098934 32502 net.cpp:165] Memory required for data: 2168014000\nI0821 08:59:09.098939 32502 layer_factory.hpp:77] Creating layer L3_b11_relu\nI0821 08:59:09.098950 32502 net.cpp:100] Creating Layer L3_b11_relu\nI0821 08:59:09.098958 32502 net.cpp:434] L3_b11_relu <- L3_b11_sum_eltwise_top\nI0821 08:59:09.098963 32502 net.cpp:395] L3_b11_relu -> L3_b11_sum_eltwise_top (in-place)\nI0821 08:59:09.098973 32502 net.cpp:150] Setting up L3_b11_relu\nI0821 08:59:09.098980 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.098984 32502 net.cpp:165] Memory required for data: 2169652400\nI0821 08:59:09.098989 32502 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:09.098996 32502 net.cpp:100] Creating Layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:09.099002 32502 net.cpp:434] L3_b11_sum_eltwise_top_L3_b11_relu_0_split <- L3_b11_sum_eltwise_top\nI0821 08:59:09.099009 32502 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0821 08:59:09.099019 32502 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0821 08:59:09.099071 32502 net.cpp:150] Setting up L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0821 08:59:09.099084 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.099092 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.099095 32502 net.cpp:165] Memory required for data: 2172929200\nI0821 08:59:09.099100 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_conv\nI0821 08:59:09.099114 32502 net.cpp:100] Creating Layer L3_b12_cbr1_conv\nI0821 08:59:09.099122 32502 net.cpp:434] L3_b12_cbr1_conv <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0821 08:59:09.099130 32502 net.cpp:408] L3_b12_cbr1_conv -> L3_b12_cbr1_conv_top\nI0821 08:59:09.100185 32502 net.cpp:150] Setting up L3_b12_cbr1_conv\nI0821 08:59:09.100200 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.100205 32502 net.cpp:165] Memory required for data: 2174567600\nI0821 08:59:09.100214 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_bn\nI0821 08:59:09.100229 32502 net.cpp:100] Creating Layer L3_b12_cbr1_bn\nI0821 08:59:09.100236 32502 net.cpp:434] L3_b12_cbr1_bn <- L3_b12_cbr1_conv_top\nI0821 08:59:09.100247 32502 net.cpp:408] L3_b12_cbr1_bn -> L3_b12_cbr1_bn_top\nI0821 08:59:09.100613 32502 net.cpp:150] Setting up L3_b12_cbr1_bn\nI0821 08:59:09.100636 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.100644 32502 net.cpp:165] Memory required for data: 2176206000\nI0821 08:59:09.100664 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0821 08:59:09.100678 32502 net.cpp:100] Creating Layer L3_b12_cbr1_scale\nI0821 08:59:09.100688 32502 net.cpp:434] L3_b12_cbr1_scale <- L3_b12_cbr1_bn_top\nI0821 08:59:09.100708 32502 net.cpp:395] L3_b12_cbr1_scale -> L3_b12_cbr1_bn_top (in-place)\nI0821 08:59:09.100801 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0821 08:59:09.100980 32502 net.cpp:150] Setting up L3_b12_cbr1_scale\nI0821 08:59:09.100993 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.101006 32502 net.cpp:165] Memory required for data: 2177844400\nI0821 08:59:09.101016 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr1_relu\nI0821 08:59:09.101027 32502 net.cpp:100] Creating Layer L3_b12_cbr1_relu\nI0821 08:59:09.101032 32502 net.cpp:434] L3_b12_cbr1_relu <- L3_b12_cbr1_bn_top\nI0821 08:59:09.101040 32502 net.cpp:395] L3_b12_cbr1_relu -> L3_b12_cbr1_bn_top (in-place)\nI0821 08:59:09.101050 32502 net.cpp:150] Setting up L3_b12_cbr1_relu\nI0821 08:59:09.101058 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.101063 32502 net.cpp:165] Memory required for data: 2179482800\nI0821 08:59:09.101066 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr2_conv\nI0821 08:59:09.101081 32502 net.cpp:100] Creating Layer L3_b12_cbr2_conv\nI0821 08:59:09.101088 32502 net.cpp:434] L3_b12_cbr2_conv <- L3_b12_cbr1_bn_top\nI0821 08:59:09.101099 32502 net.cpp:408] L3_b12_cbr2_conv -> L3_b12_cbr2_conv_top\nI0821 08:59:09.102149 32502 net.cpp:150] Setting up L3_b12_cbr2_conv\nI0821 08:59:09.102164 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.102169 32502 net.cpp:165] Memory required for data: 2181121200\nI0821 08:59:09.102179 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr2_bn\nI0821 08:59:09.102188 32502 net.cpp:100] Creating Layer L3_b12_cbr2_bn\nI0821 08:59:09.102195 32502 net.cpp:434] L3_b12_cbr2_bn <- L3_b12_cbr2_conv_top\nI0821 08:59:09.102206 32502 net.cpp:408] L3_b12_cbr2_bn -> L3_b12_cbr2_bn_top\nI0821 08:59:09.102491 32502 net.cpp:150] Setting up L3_b12_cbr2_bn\nI0821 08:59:09.102510 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.102515 32502 net.cpp:165] Memory required for data: 2182759600\nI0821 08:59:09.102526 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0821 08:59:09.102535 32502 net.cpp:100] Creating Layer L3_b12_cbr2_scale\nI0821 08:59:09.102542 32502 net.cpp:434] L3_b12_cbr2_scale <- L3_b12_cbr2_bn_top\nI0821 08:59:09.102550 32502 net.cpp:395] L3_b12_cbr2_scale -> L3_b12_cbr2_bn_top (in-place)\nI0821 08:59:09.102612 32502 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0821 08:59:09.102792 32502 net.cpp:150] Setting up L3_b12_cbr2_scale\nI0821 08:59:09.102807 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.102811 32502 net.cpp:165] Memory required for data: 2184398000\nI0821 08:59:09.102821 32502 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise\nI0821 08:59:09.102833 32502 net.cpp:100] Creating Layer L3_b12_sum_eltwise\nI0821 08:59:09.102839 32502 net.cpp:434] L3_b12_sum_eltwise <- L3_b12_cbr2_bn_top\nI0821 08:59:09.102847 32502 net.cpp:434] L3_b12_sum_eltwise <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0821 08:59:09.102855 32502 net.cpp:408] L3_b12_sum_eltwise -> L3_b12_sum_eltwise_top\nI0821 08:59:09.102895 32502 net.cpp:150] Setting up L3_b12_sum_eltwise\nI0821 08:59:09.102906 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.102911 32502 net.cpp:165] Memory required for data: 2186036400\nI0821 08:59:09.102916 32502 layer_factory.hpp:77] Creating layer L3_b12_relu\nI0821 08:59:09.102924 32502 net.cpp:100] Creating Layer L3_b12_relu\nI0821 08:59:09.102931 32502 net.cpp:434] L3_b12_relu <- L3_b12_sum_eltwise_top\nI0821 08:59:09.102937 32502 net.cpp:395] L3_b12_relu -> L3_b12_sum_eltwise_top (in-place)\nI0821 08:59:09.102947 32502 net.cpp:150] Setting up L3_b12_relu\nI0821 08:59:09.102954 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.102959 32502 net.cpp:165] Memory required for data: 2187674800\nI0821 08:59:09.102963 32502 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:09.102970 32502 net.cpp:100] Creating Layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:09.102977 32502 net.cpp:434] L3_b12_sum_eltwise_top_L3_b12_relu_0_split <- L3_b12_sum_eltwise_top\nI0821 08:59:09.102987 32502 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0821 08:59:09.102998 32502 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0821 08:59:09.103055 32502 net.cpp:150] Setting up L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0821 08:59:09.103068 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.103075 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.103080 32502 net.cpp:165] Memory required for data: 2190951600\nI0821 08:59:09.103085 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_conv\nI0821 08:59:09.103099 32502 net.cpp:100] Creating Layer L3_b13_cbr1_conv\nI0821 08:59:09.103106 32502 net.cpp:434] L3_b13_cbr1_conv <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0821 08:59:09.103116 32502 net.cpp:408] L3_b13_cbr1_conv -> L3_b13_cbr1_conv_top\nI0821 08:59:09.105175 32502 net.cpp:150] Setting up L3_b13_cbr1_conv\nI0821 08:59:09.105192 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.105198 32502 net.cpp:165] Memory required for data: 2192590000\nI0821 08:59:09.105207 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_bn\nI0821 08:59:09.105218 32502 net.cpp:100] Creating Layer L3_b13_cbr1_bn\nI0821 08:59:09.105224 32502 net.cpp:434] L3_b13_cbr1_bn <- L3_b13_cbr1_conv_top\nI0821 08:59:09.105237 32502 net.cpp:408] L3_b13_cbr1_bn -> L3_b13_cbr1_bn_top\nI0821 08:59:09.105535 32502 net.cpp:150] Setting up L3_b13_cbr1_bn\nI0821 08:59:09.105548 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.105553 32502 net.cpp:165] Memory required for data: 2194228400\nI0821 08:59:09.105564 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0821 08:59:09.105574 32502 net.cpp:100] Creating Layer L3_b13_cbr1_scale\nI0821 08:59:09.105581 32502 net.cpp:434] L3_b13_cbr1_scale <- L3_b13_cbr1_bn_top\nI0821 08:59:09.105588 32502 net.cpp:395] L3_b13_cbr1_scale -> L3_b13_cbr1_bn_top (in-place)\nI0821 08:59:09.105655 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0821 08:59:09.105835 32502 net.cpp:150] Setting up L3_b13_cbr1_scale\nI0821 08:59:09.105852 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.105857 32502 net.cpp:165] Memory required for data: 2195866800\nI0821 08:59:09.105867 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr1_relu\nI0821 08:59:09.105875 32502 net.cpp:100] Creating Layer L3_b13_cbr1_relu\nI0821 08:59:09.105882 32502 net.cpp:434] L3_b13_cbr1_relu <- L3_b13_cbr1_bn_top\nI0821 08:59:09.105890 32502 net.cpp:395] L3_b13_cbr1_relu -> L3_b13_cbr1_bn_top (in-place)\nI0821 08:59:09.105900 32502 net.cpp:150] Setting up L3_b13_cbr1_relu\nI0821 08:59:09.105906 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.105911 32502 net.cpp:165] Memory required for data: 2197505200\nI0821 08:59:09.105916 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr2_conv\nI0821 08:59:09.105931 32502 net.cpp:100] Creating Layer L3_b13_cbr2_conv\nI0821 08:59:09.105937 32502 net.cpp:434] L3_b13_cbr2_conv <- L3_b13_cbr1_bn_top\nI0821 08:59:09.105945 32502 net.cpp:408] L3_b13_cbr2_conv -> L3_b13_cbr2_conv_top\nI0821 08:59:09.106999 32502 net.cpp:150] Setting up L3_b13_cbr2_conv\nI0821 08:59:09.107014 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.107019 32502 net.cpp:165] Memory required for data: 2199143600\nI0821 08:59:09.107028 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr2_bn\nI0821 08:59:09.107040 32502 net.cpp:100] Creating Layer L3_b13_cbr2_bn\nI0821 08:59:09.107048 32502 net.cpp:434] L3_b13_cbr2_bn <- L3_b13_cbr2_conv_top\nI0821 08:59:09.107059 32502 net.cpp:408] L3_b13_cbr2_bn -> L3_b13_cbr2_bn_top\nI0821 08:59:09.107347 32502 net.cpp:150] Setting up L3_b13_cbr2_bn\nI0821 08:59:09.107362 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.107367 32502 net.cpp:165] Memory required for data: 2200782000\nI0821 08:59:09.107376 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0821 08:59:09.107385 32502 net.cpp:100] Creating Layer L3_b13_cbr2_scale\nI0821 08:59:09.107391 32502 net.cpp:434] L3_b13_cbr2_scale <- L3_b13_cbr2_bn_top\nI0821 08:59:09.107403 32502 net.cpp:395] L3_b13_cbr2_scale -> L3_b13_cbr2_bn_top (in-place)\nI0821 08:59:09.107466 32502 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0821 08:59:09.107636 32502 net.cpp:150] Setting up L3_b13_cbr2_scale\nI0821 08:59:09.107657 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.107662 32502 net.cpp:165] Memory required for data: 2202420400\nI0821 08:59:09.107672 32502 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise\nI0821 08:59:09.107688 32502 net.cpp:100] Creating Layer L3_b13_sum_eltwise\nI0821 08:59:09.107695 32502 net.cpp:434] L3_b13_sum_eltwise <- L3_b13_cbr2_bn_top\nI0821 08:59:09.107702 32502 net.cpp:434] L3_b13_sum_eltwise <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0821 08:59:09.107710 32502 net.cpp:408] L3_b13_sum_eltwise -> L3_b13_sum_eltwise_top\nI0821 08:59:09.107756 32502 net.cpp:150] Setting up L3_b13_sum_eltwise\nI0821 08:59:09.107769 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.107774 32502 net.cpp:165] Memory required for data: 2204058800\nI0821 08:59:09.107779 32502 layer_factory.hpp:77] Creating layer L3_b13_relu\nI0821 08:59:09.107787 32502 net.cpp:100] Creating Layer L3_b13_relu\nI0821 08:59:09.107794 32502 net.cpp:434] L3_b13_relu <- L3_b13_sum_eltwise_top\nI0821 08:59:09.107803 32502 net.cpp:395] L3_b13_relu -> L3_b13_sum_eltwise_top (in-place)\nI0821 08:59:09.107815 32502 net.cpp:150] Setting up L3_b13_relu\nI0821 08:59:09.107821 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.107825 32502 net.cpp:165] Memory required for data: 2205697200\nI0821 08:59:09.107831 32502 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:09.107837 32502 net.cpp:100] Creating Layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:09.107842 32502 net.cpp:434] L3_b13_sum_eltwise_top_L3_b13_relu_0_split <- L3_b13_sum_eltwise_top\nI0821 08:59:09.107851 32502 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0821 08:59:09.107859 32502 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0821 08:59:09.107914 32502 net.cpp:150] Setting up L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0821 08:59:09.107928 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.107933 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.107939 32502 net.cpp:165] Memory required for data: 2208974000\nI0821 08:59:09.107944 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_conv\nI0821 08:59:09.107955 32502 net.cpp:100] Creating Layer L3_b14_cbr1_conv\nI0821 08:59:09.107961 32502 net.cpp:434] L3_b14_cbr1_conv <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0821 08:59:09.107973 32502 net.cpp:408] L3_b14_cbr1_conv -> L3_b14_cbr1_conv_top\nI0821 08:59:09.109024 32502 net.cpp:150] Setting up L3_b14_cbr1_conv\nI0821 08:59:09.109038 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.109043 32502 net.cpp:165] Memory required for data: 2210612400\nI0821 08:59:09.109052 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_bn\nI0821 08:59:09.109064 32502 net.cpp:100] Creating Layer L3_b14_cbr1_bn\nI0821 08:59:09.109071 32502 net.cpp:434] L3_b14_cbr1_bn <- L3_b14_cbr1_conv_top\nI0821 08:59:09.109079 32502 net.cpp:408] L3_b14_cbr1_bn -> L3_b14_cbr1_bn_top\nI0821 08:59:09.109365 32502 net.cpp:150] Setting up L3_b14_cbr1_bn\nI0821 08:59:09.109378 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.109383 32502 net.cpp:165] Memory required for data: 2212250800\nI0821 08:59:09.109395 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0821 08:59:09.109403 32502 net.cpp:100] Creating Layer L3_b14_cbr1_scale\nI0821 08:59:09.109410 32502 net.cpp:434] L3_b14_cbr1_scale <- L3_b14_cbr1_bn_top\nI0821 08:59:09.109417 32502 net.cpp:395] L3_b14_cbr1_scale -> L3_b14_cbr1_bn_top (in-place)\nI0821 08:59:09.109486 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0821 08:59:09.109655 32502 net.cpp:150] Setting up L3_b14_cbr1_scale\nI0821 08:59:09.109671 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.109676 32502 net.cpp:165] Memory required for data: 2213889200\nI0821 08:59:09.109685 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr1_relu\nI0821 08:59:09.109694 32502 net.cpp:100] Creating Layer L3_b14_cbr1_relu\nI0821 08:59:09.109707 32502 net.cpp:434] L3_b14_cbr1_relu <- L3_b14_cbr1_bn_top\nI0821 08:59:09.109715 32502 net.cpp:395] L3_b14_cbr1_relu -> L3_b14_cbr1_bn_top (in-place)\nI0821 08:59:09.109725 32502 net.cpp:150] Setting up L3_b14_cbr1_relu\nI0821 08:59:09.109732 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.109737 32502 net.cpp:165] Memory required for data: 2215527600\nI0821 08:59:09.109741 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr2_conv\nI0821 08:59:09.109762 32502 net.cpp:100] Creating Layer L3_b14_cbr2_conv\nI0821 08:59:09.109768 32502 net.cpp:434] L3_b14_cbr2_conv <- L3_b14_cbr1_bn_top\nI0821 08:59:09.109781 32502 net.cpp:408] L3_b14_cbr2_conv -> L3_b14_cbr2_conv_top\nI0821 08:59:09.110865 32502 net.cpp:150] Setting up L3_b14_cbr2_conv\nI0821 08:59:09.110882 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.110887 32502 net.cpp:165] Memory required for data: 2217166000\nI0821 08:59:09.110895 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr2_bn\nI0821 08:59:09.110909 32502 net.cpp:100] Creating Layer L3_b14_cbr2_bn\nI0821 08:59:09.110916 32502 net.cpp:434] L3_b14_cbr2_bn <- L3_b14_cbr2_conv_top\nI0821 08:59:09.110925 32502 net.cpp:408] L3_b14_cbr2_bn -> L3_b14_cbr2_bn_top\nI0821 08:59:09.111212 32502 net.cpp:150] Setting up L3_b14_cbr2_bn\nI0821 08:59:09.111227 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.111232 32502 net.cpp:165] Memory required for data: 2218804400\nI0821 08:59:09.111243 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0821 08:59:09.111254 32502 net.cpp:100] Creating Layer L3_b14_cbr2_scale\nI0821 08:59:09.111261 32502 net.cpp:434] L3_b14_cbr2_scale <- L3_b14_cbr2_bn_top\nI0821 08:59:09.111268 32502 net.cpp:395] L3_b14_cbr2_scale -> L3_b14_cbr2_bn_top (in-place)\nI0821 08:59:09.111333 32502 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0821 08:59:09.111502 32502 net.cpp:150] Setting up L3_b14_cbr2_scale\nI0821 08:59:09.111515 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.111521 32502 net.cpp:165] Memory required for data: 2220442800\nI0821 08:59:09.111529 32502 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise\nI0821 08:59:09.111542 32502 net.cpp:100] Creating Layer L3_b14_sum_eltwise\nI0821 08:59:09.111551 32502 net.cpp:434] L3_b14_sum_eltwise <- L3_b14_cbr2_bn_top\nI0821 08:59:09.111557 32502 net.cpp:434] L3_b14_sum_eltwise <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0821 08:59:09.111565 32502 net.cpp:408] L3_b14_sum_eltwise -> L3_b14_sum_eltwise_top\nI0821 08:59:09.111605 32502 net.cpp:150] Setting up L3_b14_sum_eltwise\nI0821 08:59:09.111618 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.111623 32502 net.cpp:165] Memory required for data: 2222081200\nI0821 08:59:09.111627 32502 layer_factory.hpp:77] Creating layer L3_b14_relu\nI0821 08:59:09.111635 32502 net.cpp:100] Creating Layer L3_b14_relu\nI0821 08:59:09.111641 32502 net.cpp:434] L3_b14_relu <- L3_b14_sum_eltwise_top\nI0821 08:59:09.111651 32502 net.cpp:395] L3_b14_relu -> L3_b14_sum_eltwise_top (in-place)\nI0821 08:59:09.111661 32502 net.cpp:150] Setting up L3_b14_relu\nI0821 08:59:09.111668 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.111673 32502 net.cpp:165] Memory required for data: 2223719600\nI0821 08:59:09.111678 32502 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:09.111759 32502 net.cpp:100] Creating Layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:09.111770 32502 net.cpp:434] L3_b14_sum_eltwise_top_L3_b14_relu_0_split <- L3_b14_sum_eltwise_top\nI0821 08:59:09.111779 32502 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0821 08:59:09.111789 32502 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0821 08:59:09.111845 32502 net.cpp:150] Setting up L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0821 08:59:09.111856 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.111863 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.111876 32502 net.cpp:165] Memory required for data: 2226996400\nI0821 08:59:09.111881 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_conv\nI0821 08:59:09.111896 32502 net.cpp:100] Creating Layer L3_b15_cbr1_conv\nI0821 08:59:09.111903 32502 net.cpp:434] L3_b15_cbr1_conv <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0821 08:59:09.111913 32502 net.cpp:408] L3_b15_cbr1_conv -> L3_b15_cbr1_conv_top\nI0821 08:59:09.112975 32502 net.cpp:150] Setting up L3_b15_cbr1_conv\nI0821 08:59:09.112990 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.112995 32502 net.cpp:165] Memory required for data: 2228634800\nI0821 08:59:09.113005 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_bn\nI0821 08:59:09.113018 32502 net.cpp:100] Creating Layer L3_b15_cbr1_bn\nI0821 08:59:09.113024 32502 net.cpp:434] L3_b15_cbr1_bn <- L3_b15_cbr1_conv_top\nI0821 08:59:09.113032 32502 net.cpp:408] L3_b15_cbr1_bn -> L3_b15_cbr1_bn_top\nI0821 08:59:09.113329 32502 net.cpp:150] Setting up L3_b15_cbr1_bn\nI0821 08:59:09.113343 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.113348 32502 net.cpp:165] Memory required for data: 2230273200\nI0821 08:59:09.113358 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0821 08:59:09.113368 32502 net.cpp:100] Creating Layer L3_b15_cbr1_scale\nI0821 08:59:09.113374 32502 net.cpp:434] L3_b15_cbr1_scale <- L3_b15_cbr1_bn_top\nI0821 08:59:09.113382 32502 net.cpp:395] L3_b15_cbr1_scale -> L3_b15_cbr1_bn_top (in-place)\nI0821 08:59:09.113450 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0821 08:59:09.113626 32502 net.cpp:150] Setting up L3_b15_cbr1_scale\nI0821 08:59:09.113639 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.113644 32502 net.cpp:165] Memory required for data: 2231911600\nI0821 08:59:09.113653 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr1_relu\nI0821 08:59:09.113662 32502 net.cpp:100] Creating Layer L3_b15_cbr1_relu\nI0821 08:59:09.113668 32502 net.cpp:434] L3_b15_cbr1_relu <- L3_b15_cbr1_bn_top\nI0821 08:59:09.113675 32502 net.cpp:395] L3_b15_cbr1_relu -> L3_b15_cbr1_bn_top (in-place)\nI0821 08:59:09.113688 32502 net.cpp:150] Setting up L3_b15_cbr1_relu\nI0821 08:59:09.113695 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.113700 32502 net.cpp:165] Memory required for data: 2233550000\nI0821 08:59:09.113704 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr2_conv\nI0821 08:59:09.113715 32502 net.cpp:100] Creating Layer L3_b15_cbr2_conv\nI0821 08:59:09.113724 32502 net.cpp:434] L3_b15_cbr2_conv <- L3_b15_cbr1_bn_top\nI0821 08:59:09.113734 32502 net.cpp:408] L3_b15_cbr2_conv -> L3_b15_cbr2_conv_top\nI0821 08:59:09.115012 32502 net.cpp:150] Setting up L3_b15_cbr2_conv\nI0821 08:59:09.115030 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.115034 32502 net.cpp:165] Memory required for data: 2235188400\nI0821 08:59:09.115044 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr2_bn\nI0821 08:59:09.115056 32502 net.cpp:100] Creating Layer L3_b15_cbr2_bn\nI0821 08:59:09.115063 32502 net.cpp:434] L3_b15_cbr2_bn <- L3_b15_cbr2_conv_top\nI0821 08:59:09.115072 32502 net.cpp:408] L3_b15_cbr2_bn -> L3_b15_cbr2_bn_top\nI0821 08:59:09.115367 32502 net.cpp:150] Setting up L3_b15_cbr2_bn\nI0821 08:59:09.115381 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.115386 32502 net.cpp:165] Memory required for data: 2236826800\nI0821 08:59:09.115396 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0821 08:59:09.115408 32502 net.cpp:100] Creating Layer L3_b15_cbr2_scale\nI0821 08:59:09.115414 32502 net.cpp:434] L3_b15_cbr2_scale <- L3_b15_cbr2_bn_top\nI0821 08:59:09.115423 32502 net.cpp:395] L3_b15_cbr2_scale -> L3_b15_cbr2_bn_top (in-place)\nI0821 08:59:09.115492 32502 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0821 08:59:09.115666 32502 net.cpp:150] Setting up L3_b15_cbr2_scale\nI0821 08:59:09.115679 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.115684 32502 net.cpp:165] Memory required for data: 2238465200\nI0821 08:59:09.115694 32502 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise\nI0821 08:59:09.115713 32502 net.cpp:100] Creating Layer L3_b15_sum_eltwise\nI0821 08:59:09.115721 32502 net.cpp:434] L3_b15_sum_eltwise <- L3_b15_cbr2_bn_top\nI0821 08:59:09.115728 32502 net.cpp:434] L3_b15_sum_eltwise <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0821 08:59:09.115739 32502 net.cpp:408] L3_b15_sum_eltwise -> L3_b15_sum_eltwise_top\nI0821 08:59:09.115783 32502 net.cpp:150] Setting up L3_b15_sum_eltwise\nI0821 08:59:09.115794 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.115798 32502 net.cpp:165] Memory required for data: 2240103600\nI0821 08:59:09.115804 32502 layer_factory.hpp:77] Creating layer L3_b15_relu\nI0821 08:59:09.115815 32502 net.cpp:100] Creating Layer L3_b15_relu\nI0821 08:59:09.115823 32502 net.cpp:434] L3_b15_relu <- L3_b15_sum_eltwise_top\nI0821 08:59:09.115829 32502 net.cpp:395] L3_b15_relu -> L3_b15_sum_eltwise_top (in-place)\nI0821 08:59:09.115839 32502 net.cpp:150] Setting up L3_b15_relu\nI0821 08:59:09.115846 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.115850 32502 net.cpp:165] Memory required for data: 2241742000\nI0821 08:59:09.115855 32502 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:09.115862 32502 net.cpp:100] Creating Layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:09.115867 32502 net.cpp:434] L3_b15_sum_eltwise_top_L3_b15_relu_0_split <- L3_b15_sum_eltwise_top\nI0821 08:59:09.115875 32502 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0821 08:59:09.115885 32502 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0821 08:59:09.115937 32502 net.cpp:150] Setting up L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0821 08:59:09.115949 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.115957 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.115962 32502 net.cpp:165] Memory required for data: 2245018800\nI0821 08:59:09.115965 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_conv\nI0821 08:59:09.115980 32502 net.cpp:100] Creating Layer L3_b16_cbr1_conv\nI0821 08:59:09.115988 32502 net.cpp:434] L3_b16_cbr1_conv <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0821 08:59:09.115996 32502 net.cpp:408] L3_b16_cbr1_conv -> L3_b16_cbr1_conv_top\nI0821 08:59:09.117055 32502 net.cpp:150] Setting up L3_b16_cbr1_conv\nI0821 08:59:09.117070 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.117075 32502 net.cpp:165] Memory required for data: 2246657200\nI0821 08:59:09.117084 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_bn\nI0821 08:59:09.117099 32502 net.cpp:100] Creating Layer L3_b16_cbr1_bn\nI0821 08:59:09.117105 32502 net.cpp:434] L3_b16_cbr1_bn <- L3_b16_cbr1_conv_top\nI0821 08:59:09.117116 32502 net.cpp:408] L3_b16_cbr1_bn -> L3_b16_cbr1_bn_top\nI0821 08:59:09.117408 32502 net.cpp:150] Setting up L3_b16_cbr1_bn\nI0821 08:59:09.117420 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.117425 32502 net.cpp:165] Memory required for data: 2248295600\nI0821 08:59:09.117436 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0821 08:59:09.117445 32502 net.cpp:100] Creating Layer L3_b16_cbr1_scale\nI0821 08:59:09.117451 32502 net.cpp:434] L3_b16_cbr1_scale <- L3_b16_cbr1_bn_top\nI0821 08:59:09.117460 32502 net.cpp:395] L3_b16_cbr1_scale -> L3_b16_cbr1_bn_top (in-place)\nI0821 08:59:09.117527 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0821 08:59:09.117729 32502 net.cpp:150] Setting up L3_b16_cbr1_scale\nI0821 08:59:09.117750 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.117756 32502 net.cpp:165] Memory required for data: 2249934000\nI0821 08:59:09.117765 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr1_relu\nI0821 08:59:09.117774 32502 net.cpp:100] Creating Layer L3_b16_cbr1_relu\nI0821 08:59:09.117781 32502 net.cpp:434] L3_b16_cbr1_relu <- L3_b16_cbr1_bn_top\nI0821 08:59:09.117791 32502 net.cpp:395] L3_b16_cbr1_relu -> L3_b16_cbr1_bn_top (in-place)\nI0821 08:59:09.117810 32502 net.cpp:150] Setting up L3_b16_cbr1_relu\nI0821 08:59:09.117816 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.117821 32502 net.cpp:165] Memory required for data: 2251572400\nI0821 08:59:09.117826 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr2_conv\nI0821 08:59:09.117841 32502 net.cpp:100] Creating Layer L3_b16_cbr2_conv\nI0821 08:59:09.117846 32502 net.cpp:434] L3_b16_cbr2_conv <- L3_b16_cbr1_bn_top\nI0821 08:59:09.117856 32502 net.cpp:408] L3_b16_cbr2_conv -> L3_b16_cbr2_conv_top\nI0821 08:59:09.119912 32502 net.cpp:150] Setting up L3_b16_cbr2_conv\nI0821 08:59:09.119930 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.119935 32502 net.cpp:165] Memory required for data: 2253210800\nI0821 08:59:09.119946 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr2_bn\nI0821 08:59:09.119958 32502 net.cpp:100] Creating Layer L3_b16_cbr2_bn\nI0821 08:59:09.119966 32502 net.cpp:434] L3_b16_cbr2_bn <- L3_b16_cbr2_conv_top\nI0821 08:59:09.119974 32502 net.cpp:408] L3_b16_cbr2_bn -> L3_b16_cbr2_bn_top\nI0821 08:59:09.120270 32502 net.cpp:150] Setting up L3_b16_cbr2_bn\nI0821 08:59:09.120282 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.120288 32502 net.cpp:165] Memory required for data: 2254849200\nI0821 08:59:09.120298 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0821 08:59:09.120307 32502 net.cpp:100] Creating Layer L3_b16_cbr2_scale\nI0821 08:59:09.120314 32502 net.cpp:434] L3_b16_cbr2_scale <- L3_b16_cbr2_bn_top\nI0821 08:59:09.120322 32502 net.cpp:395] L3_b16_cbr2_scale -> L3_b16_cbr2_bn_top (in-place)\nI0821 08:59:09.120391 32502 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0821 08:59:09.120568 32502 net.cpp:150] Setting up L3_b16_cbr2_scale\nI0821 08:59:09.120581 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.120586 32502 net.cpp:165] Memory required for data: 2256487600\nI0821 08:59:09.120596 32502 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise\nI0821 08:59:09.120605 32502 net.cpp:100] Creating Layer L3_b16_sum_eltwise\nI0821 08:59:09.120612 32502 net.cpp:434] L3_b16_sum_eltwise <- L3_b16_cbr2_bn_top\nI0821 08:59:09.120620 32502 net.cpp:434] L3_b16_sum_eltwise <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0821 08:59:09.120631 32502 net.cpp:408] L3_b16_sum_eltwise -> L3_b16_sum_eltwise_top\nI0821 08:59:09.120668 32502 net.cpp:150] Setting up L3_b16_sum_eltwise\nI0821 08:59:09.120683 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.120688 32502 net.cpp:165] Memory required for data: 2258126000\nI0821 08:59:09.120693 32502 layer_factory.hpp:77] Creating layer L3_b16_relu\nI0821 08:59:09.120702 32502 net.cpp:100] Creating Layer L3_b16_relu\nI0821 08:59:09.120707 32502 net.cpp:434] L3_b16_relu <- L3_b16_sum_eltwise_top\nI0821 08:59:09.120714 32502 net.cpp:395] L3_b16_relu -> L3_b16_sum_eltwise_top (in-place)\nI0821 08:59:09.120724 32502 net.cpp:150] Setting up L3_b16_relu\nI0821 08:59:09.120731 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.120736 32502 net.cpp:165] Memory required for data: 2259764400\nI0821 08:59:09.120740 32502 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:09.120757 32502 net.cpp:100] Creating Layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:09.120764 32502 net.cpp:434] L3_b16_sum_eltwise_top_L3_b16_relu_0_split <- L3_b16_sum_eltwise_top\nI0821 08:59:09.120771 32502 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0821 08:59:09.120782 32502 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0821 08:59:09.120836 32502 net.cpp:150] Setting up L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0821 08:59:09.120849 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.120856 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.120860 32502 net.cpp:165] Memory required for data: 2263041200\nI0821 08:59:09.120865 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_conv\nI0821 08:59:09.120884 32502 net.cpp:100] Creating Layer L3_b17_cbr1_conv\nI0821 08:59:09.120892 32502 net.cpp:434] L3_b17_cbr1_conv <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0821 08:59:09.120908 32502 net.cpp:408] L3_b17_cbr1_conv -> L3_b17_cbr1_conv_top\nI0821 08:59:09.121959 32502 net.cpp:150] Setting up L3_b17_cbr1_conv\nI0821 08:59:09.121974 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.121979 32502 net.cpp:165] Memory required for data: 2264679600\nI0821 08:59:09.121989 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_bn\nI0821 08:59:09.121999 32502 net.cpp:100] Creating Layer L3_b17_cbr1_bn\nI0821 08:59:09.122005 32502 net.cpp:434] L3_b17_cbr1_bn <- L3_b17_cbr1_conv_top\nI0821 08:59:09.122018 32502 net.cpp:408] L3_b17_cbr1_bn -> L3_b17_cbr1_bn_top\nI0821 08:59:09.122315 32502 net.cpp:150] Setting up L3_b17_cbr1_bn\nI0821 08:59:09.122331 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.122336 32502 net.cpp:165] Memory required for data: 2266318000\nI0821 08:59:09.122347 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0821 08:59:09.122355 32502 net.cpp:100] Creating Layer L3_b17_cbr1_scale\nI0821 08:59:09.122362 32502 net.cpp:434] L3_b17_cbr1_scale <- L3_b17_cbr1_bn_top\nI0821 08:59:09.122370 32502 net.cpp:395] L3_b17_cbr1_scale -> L3_b17_cbr1_bn_top (in-place)\nI0821 08:59:09.122433 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0821 08:59:09.122608 32502 net.cpp:150] Setting up L3_b17_cbr1_scale\nI0821 08:59:09.122622 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.122627 32502 net.cpp:165] Memory required for data: 2267956400\nI0821 08:59:09.122637 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr1_relu\nI0821 08:59:09.122644 32502 net.cpp:100] Creating Layer L3_b17_cbr1_relu\nI0821 08:59:09.122651 32502 net.cpp:434] L3_b17_cbr1_relu <- L3_b17_cbr1_bn_top\nI0821 08:59:09.122661 32502 net.cpp:395] L3_b17_cbr1_relu -> L3_b17_cbr1_bn_top (in-place)\nI0821 08:59:09.122671 32502 net.cpp:150] Setting up L3_b17_cbr1_relu\nI0821 08:59:09.122678 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.122684 32502 net.cpp:165] Memory required for data: 2269594800\nI0821 08:59:09.122687 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr2_conv\nI0821 08:59:09.122701 32502 net.cpp:100] Creating Layer L3_b17_cbr2_conv\nI0821 08:59:09.122709 32502 net.cpp:434] L3_b17_cbr2_conv <- L3_b17_cbr1_bn_top\nI0821 08:59:09.122716 32502 net.cpp:408] L3_b17_cbr2_conv -> L3_b17_cbr2_conv_top\nI0821 08:59:09.123771 32502 net.cpp:150] Setting up L3_b17_cbr2_conv\nI0821 08:59:09.123786 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.123791 32502 net.cpp:165] Memory required for data: 2271233200\nI0821 08:59:09.123800 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr2_bn\nI0821 08:59:09.123814 32502 net.cpp:100] Creating Layer L3_b17_cbr2_bn\nI0821 08:59:09.123822 32502 net.cpp:434] L3_b17_cbr2_bn <- L3_b17_cbr2_conv_top\nI0821 08:59:09.123833 32502 net.cpp:408] L3_b17_cbr2_bn -> L3_b17_cbr2_bn_top\nI0821 08:59:09.124126 32502 net.cpp:150] Setting up L3_b17_cbr2_bn\nI0821 08:59:09.124140 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.124145 32502 net.cpp:165] Memory required for data: 2272871600\nI0821 08:59:09.124155 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0821 08:59:09.124164 32502 net.cpp:100] Creating Layer L3_b17_cbr2_scale\nI0821 08:59:09.124171 32502 net.cpp:434] L3_b17_cbr2_scale <- L3_b17_cbr2_bn_top\nI0821 08:59:09.124178 32502 net.cpp:395] L3_b17_cbr2_scale -> L3_b17_cbr2_bn_top (in-place)\nI0821 08:59:09.124244 32502 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0821 08:59:09.124421 32502 net.cpp:150] Setting up L3_b17_cbr2_scale\nI0821 08:59:09.124435 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.124440 32502 net.cpp:165] Memory required for data: 2274510000\nI0821 08:59:09.124449 32502 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise\nI0821 08:59:09.124459 32502 net.cpp:100] Creating Layer L3_b17_sum_eltwise\nI0821 08:59:09.124464 32502 net.cpp:434] L3_b17_sum_eltwise <- L3_b17_cbr2_bn_top\nI0821 08:59:09.124480 32502 net.cpp:434] L3_b17_sum_eltwise <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0821 08:59:09.124490 32502 net.cpp:408] L3_b17_sum_eltwise -> L3_b17_sum_eltwise_top\nI0821 08:59:09.124527 32502 net.cpp:150] Setting up L3_b17_sum_eltwise\nI0821 08:59:09.124541 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.124546 32502 net.cpp:165] Memory required for data: 2276148400\nI0821 08:59:09.124550 32502 layer_factory.hpp:77] Creating layer L3_b17_relu\nI0821 08:59:09.124558 32502 net.cpp:100] Creating Layer L3_b17_relu\nI0821 08:59:09.124564 32502 net.cpp:434] L3_b17_relu <- L3_b17_sum_eltwise_top\nI0821 08:59:09.124572 32502 net.cpp:395] L3_b17_relu -> L3_b17_sum_eltwise_top (in-place)\nI0821 08:59:09.124581 32502 net.cpp:150] Setting up L3_b17_relu\nI0821 08:59:09.124588 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.124593 32502 net.cpp:165] Memory required for data: 2277786800\nI0821 08:59:09.124598 32502 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:09.124608 32502 net.cpp:100] Creating Layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:09.124614 32502 net.cpp:434] L3_b17_sum_eltwise_top_L3_b17_relu_0_split <- L3_b17_sum_eltwise_top\nI0821 08:59:09.124620 32502 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0821 08:59:09.124630 32502 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0821 08:59:09.124686 32502 net.cpp:150] Setting up L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0821 08:59:09.124697 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.124703 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.124708 32502 net.cpp:165] Memory required for data: 2281063600\nI0821 08:59:09.124713 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_conv\nI0821 08:59:09.124724 32502 net.cpp:100] Creating Layer L3_b18_cbr1_conv\nI0821 08:59:09.124730 32502 net.cpp:434] L3_b18_cbr1_conv <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0821 08:59:09.124748 32502 net.cpp:408] L3_b18_cbr1_conv -> L3_b18_cbr1_conv_top\nI0821 08:59:09.125802 32502 net.cpp:150] Setting up L3_b18_cbr1_conv\nI0821 08:59:09.125818 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.125823 32502 net.cpp:165] Memory required for data: 2282702000\nI0821 08:59:09.125831 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_bn\nI0821 08:59:09.125841 32502 net.cpp:100] Creating Layer L3_b18_cbr1_bn\nI0821 08:59:09.125849 32502 net.cpp:434] L3_b18_cbr1_bn <- L3_b18_cbr1_conv_top\nI0821 08:59:09.125859 32502 net.cpp:408] L3_b18_cbr1_bn -> L3_b18_cbr1_bn_top\nI0821 08:59:09.126148 32502 net.cpp:150] Setting up L3_b18_cbr1_bn\nI0821 08:59:09.126163 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.126169 32502 net.cpp:165] Memory required for data: 2284340400\nI0821 08:59:09.126179 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0821 08:59:09.126188 32502 net.cpp:100] Creating Layer L3_b18_cbr1_scale\nI0821 08:59:09.126194 32502 net.cpp:434] L3_b18_cbr1_scale <- L3_b18_cbr1_bn_top\nI0821 08:59:09.126202 32502 net.cpp:395] L3_b18_cbr1_scale -> L3_b18_cbr1_bn_top (in-place)\nI0821 08:59:09.126266 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0821 08:59:09.126443 32502 net.cpp:150] Setting up L3_b18_cbr1_scale\nI0821 08:59:09.126456 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.126461 32502 net.cpp:165] Memory required for data: 2285978800\nI0821 08:59:09.126471 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr1_relu\nI0821 08:59:09.126482 32502 net.cpp:100] Creating Layer L3_b18_cbr1_relu\nI0821 08:59:09.126489 32502 net.cpp:434] L3_b18_cbr1_relu <- L3_b18_cbr1_bn_top\nI0821 08:59:09.126497 32502 net.cpp:395] L3_b18_cbr1_relu -> L3_b18_cbr1_bn_top (in-place)\nI0821 08:59:09.126507 32502 net.cpp:150] Setting up L3_b18_cbr1_relu\nI0821 08:59:09.126513 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.126518 32502 net.cpp:165] Memory required for data: 2287617200\nI0821 08:59:09.126529 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr2_conv\nI0821 08:59:09.126544 32502 net.cpp:100] Creating Layer L3_b18_cbr2_conv\nI0821 08:59:09.126550 32502 net.cpp:434] L3_b18_cbr2_conv <- L3_b18_cbr1_bn_top\nI0821 08:59:09.126559 32502 net.cpp:408] L3_b18_cbr2_conv -> L3_b18_cbr2_conv_top\nI0821 08:59:09.127609 32502 net.cpp:150] Setting up L3_b18_cbr2_conv\nI0821 08:59:09.127624 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.127629 32502 net.cpp:165] Memory required for data: 2289255600\nI0821 08:59:09.127638 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr2_bn\nI0821 08:59:09.127650 32502 net.cpp:100] Creating Layer L3_b18_cbr2_bn\nI0821 08:59:09.127657 32502 net.cpp:434] L3_b18_cbr2_bn <- L3_b18_cbr2_conv_top\nI0821 08:59:09.127668 32502 net.cpp:408] L3_b18_cbr2_bn -> L3_b18_cbr2_bn_top\nI0821 08:59:09.127966 32502 net.cpp:150] Setting up L3_b18_cbr2_bn\nI0821 08:59:09.127981 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.127986 32502 net.cpp:165] Memory required for data: 2290894000\nI0821 08:59:09.127996 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0821 08:59:09.128005 32502 net.cpp:100] Creating Layer L3_b18_cbr2_scale\nI0821 08:59:09.128012 32502 net.cpp:434] L3_b18_cbr2_scale <- L3_b18_cbr2_bn_top\nI0821 08:59:09.128023 32502 net.cpp:395] L3_b18_cbr2_scale -> L3_b18_cbr2_bn_top (in-place)\nI0821 08:59:09.128089 32502 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0821 08:59:09.128262 32502 net.cpp:150] Setting up L3_b18_cbr2_scale\nI0821 08:59:09.128274 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.128279 32502 net.cpp:165] Memory required for data: 2292532400\nI0821 08:59:09.128288 32502 layer_factory.hpp:77] Creating layer L3_b18_sum_eltwise\nI0821 08:59:09.128298 32502 net.cpp:100] Creating Layer L3_b18_sum_eltwise\nI0821 08:59:09.128305 32502 net.cpp:434] L3_b18_sum_eltwise <- L3_b18_cbr2_bn_top\nI0821 08:59:09.128312 32502 net.cpp:434] L3_b18_sum_eltwise <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0821 08:59:09.128324 32502 net.cpp:408] L3_b18_sum_eltwise -> L3_b18_sum_eltwise_top\nI0821 08:59:09.128363 32502 net.cpp:150] Setting up L3_b18_sum_eltwise\nI0821 08:59:09.128376 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.128381 32502 net.cpp:165] Memory required for data: 2294170800\nI0821 08:59:09.128386 32502 layer_factory.hpp:77] Creating layer L3_b18_relu\nI0821 08:59:09.128392 32502 net.cpp:100] Creating Layer L3_b18_relu\nI0821 08:59:09.128398 32502 net.cpp:434] L3_b18_relu <- L3_b18_sum_eltwise_top\nI0821 08:59:09.128408 32502 net.cpp:395] L3_b18_relu -> L3_b18_sum_eltwise_top (in-place)\nI0821 08:59:09.128418 32502 net.cpp:150] Setting up L3_b18_relu\nI0821 08:59:09.128425 32502 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0821 08:59:09.128430 32502 net.cpp:165] Memory required for data: 2295809200\nI0821 08:59:09.128434 32502 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:59:09.128444 32502 net.cpp:100] Creating Layer post_pool\nI0821 08:59:09.128449 32502 net.cpp:434] post_pool <- L3_b18_sum_eltwise_top\nI0821 08:59:09.128458 32502 net.cpp:408] post_pool -> post_pool\nI0821 08:59:09.128495 32502 net.cpp:150] Setting up post_pool\nI0821 08:59:09.128507 32502 net.cpp:157] Top shape: 100 64 1 1 (6400)\nI0821 08:59:09.128512 32502 net.cpp:165] Memory required for data: 2295834800\nI0821 08:59:09.128517 32502 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:59:09.128533 32502 net.cpp:100] Creating Layer post_FC\nI0821 08:59:09.128540 32502 net.cpp:434] post_FC <- post_pool\nI0821 08:59:09.128548 32502 net.cpp:408] post_FC -> post_FC_top\nI0821 08:59:09.128727 32502 net.cpp:150] Setting up post_FC\nI0821 08:59:09.128741 32502 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:09.128751 32502 net.cpp:165] Memory required for data: 2295838800\nI0821 08:59:09.128762 32502 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:59:09.128769 32502 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:59:09.128779 32502 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:59:09.128794 32502 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:59:09.128804 32502 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:59:09.128860 32502 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:59:09.128872 32502 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:09.128880 32502 net.cpp:157] Top shape: 100 10 (1000)\nI0821 08:59:09.128883 32502 net.cpp:165] Memory required for data: 2295846800\nI0821 08:59:09.128888 32502 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:59:09.128897 32502 net.cpp:100] Creating Layer accuracy\nI0821 08:59:09.128903 32502 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:59:09.128911 32502 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:59:09.128931 32502 net.cpp:408] accuracy -> accuracy\nI0821 08:59:09.128952 32502 net.cpp:150] Setting up accuracy\nI0821 08:59:09.128962 32502 net.cpp:157] Top shape: (1)\nI0821 08:59:09.128967 32502 net.cpp:165] Memory required for data: 2295846804\nI0821 08:59:09.128971 32502 layer_factory.hpp:77] Creating layer loss\nI0821 08:59:09.128979 32502 net.cpp:100] Creating Layer loss\nI0821 08:59:09.128984 32502 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:59:09.128991 32502 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:59:09.128999 32502 net.cpp:408] loss -> loss\nI0821 08:59:09.129010 32502 layer_factory.hpp:77] Creating layer loss\nI0821 08:59:09.129138 32502 net.cpp:150] Setting up loss\nI0821 08:59:09.129150 32502 net.cpp:157] Top shape: (1)\nI0821 08:59:09.129156 32502 net.cpp:160]     with loss weight 1\nI0821 08:59:09.129173 32502 net.cpp:165] Memory required for data: 2295846808\nI0821 08:59:09.129179 32502 net.cpp:226] loss needs backward computation.\nI0821 08:59:09.129185 32502 net.cpp:228] accuracy does not need backward computation.\nI0821 08:59:09.129191 32502 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:59:09.129196 32502 net.cpp:226] post_FC needs backward computation.\nI0821 08:59:09.129201 32502 net.cpp:226] post_pool needs backward computation.\nI0821 08:59:09.129206 32502 net.cpp:226] L3_b18_relu needs backward computation.\nI0821 08:59:09.129211 32502 net.cpp:226] L3_b18_sum_eltwise needs backward computation.\nI0821 08:59:09.129217 32502 net.cpp:226] L3_b18_cbr2_scale needs backward computation.\nI0821 08:59:09.129222 32502 net.cpp:226] L3_b18_cbr2_bn needs backward computation.\nI0821 08:59:09.129227 32502 net.cpp:226] L3_b18_cbr2_conv needs backward computation.\nI0821 08:59:09.129232 32502 net.cpp:226] L3_b18_cbr1_relu needs backward computation.\nI0821 08:59:09.129237 32502 net.cpp:226] L3_b18_cbr1_scale needs backward computation.\nI0821 08:59:09.129242 32502 net.cpp:226] L3_b18_cbr1_bn needs backward computation.\nI0821 08:59:09.129247 32502 net.cpp:226] L3_b18_cbr1_conv needs backward computation.\nI0821 08:59:09.129252 32502 net.cpp:226] L3_b17_sum_eltwise_top_L3_b17_relu_0_split needs backward computation.\nI0821 08:59:09.129257 32502 net.cpp:226] L3_b17_relu needs backward computation.\nI0821 08:59:09.129262 32502 net.cpp:226] L3_b17_sum_eltwise needs backward computation.\nI0821 08:59:09.129268 32502 net.cpp:226] L3_b17_cbr2_scale needs backward computation.\nI0821 08:59:09.129273 32502 net.cpp:226] L3_b17_cbr2_bn needs backward computation.\nI0821 08:59:09.129278 32502 net.cpp:226] L3_b17_cbr2_conv needs backward computation.\nI0821 08:59:09.129283 32502 net.cpp:226] L3_b17_cbr1_relu needs backward computation.\nI0821 08:59:09.129288 32502 net.cpp:226] L3_b17_cbr1_scale needs backward computation.\nI0821 08:59:09.129293 32502 net.cpp:226] L3_b17_cbr1_bn needs backward computation.\nI0821 08:59:09.129298 32502 net.cpp:226] L3_b17_cbr1_conv needs backward computation.\nI0821 08:59:09.129304 32502 net.cpp:226] L3_b16_sum_eltwise_top_L3_b16_relu_0_split needs backward computation.\nI0821 08:59:09.129309 32502 net.cpp:226] L3_b16_relu needs backward computation.\nI0821 08:59:09.129317 32502 net.cpp:226] L3_b16_sum_eltwise needs backward computation.\nI0821 08:59:09.129329 32502 net.cpp:226] L3_b16_cbr2_scale needs backward computation.\nI0821 08:59:09.129335 32502 net.cpp:226] L3_b16_cbr2_bn needs backward computation.\nI0821 08:59:09.129340 32502 net.cpp:226] L3_b16_cbr2_conv needs backward computation.\nI0821 08:59:09.129345 32502 net.cpp:226] L3_b16_cbr1_relu needs backward computation.\nI0821 08:59:09.129350 32502 net.cpp:226] L3_b16_cbr1_scale needs backward computation.\nI0821 08:59:09.129355 32502 net.cpp:226] L3_b16_cbr1_bn needs backward computation.\nI0821 08:59:09.129360 32502 net.cpp:226] L3_b16_cbr1_conv needs backward computation.\nI0821 08:59:09.129365 32502 net.cpp:226] L3_b15_sum_eltwise_top_L3_b15_relu_0_split needs backward computation.\nI0821 08:59:09.129371 32502 net.cpp:226] L3_b15_relu needs backward computation.\nI0821 08:59:09.129376 32502 net.cpp:226] L3_b15_sum_eltwise needs backward computation.\nI0821 08:59:09.129381 32502 net.cpp:226] L3_b15_cbr2_scale needs backward computation.\nI0821 08:59:09.129386 32502 net.cpp:226] L3_b15_cbr2_bn needs backward computation.\nI0821 08:59:09.129392 32502 net.cpp:226] L3_b15_cbr2_conv needs backward computation.\nI0821 08:59:09.129397 32502 net.cpp:226] L3_b15_cbr1_relu needs backward computation.\nI0821 08:59:09.129402 32502 net.cpp:226] L3_b15_cbr1_scale needs backward computation.\nI0821 08:59:09.129406 32502 net.cpp:226] L3_b15_cbr1_bn needs backward computation.\nI0821 08:59:09.129412 32502 net.cpp:226] L3_b15_cbr1_conv needs backward computation.\nI0821 08:59:09.129417 32502 net.cpp:226] L3_b14_sum_eltwise_top_L3_b14_relu_0_split needs backward computation.\nI0821 08:59:09.129422 32502 net.cpp:226] L3_b14_relu needs backward computation.\nI0821 08:59:09.129427 32502 net.cpp:226] L3_b14_sum_eltwise needs backward computation.\nI0821 08:59:09.129433 32502 net.cpp:226] L3_b14_cbr2_scale needs backward computation.\nI0821 08:59:09.129438 32502 net.cpp:226] L3_b14_cbr2_bn needs backward computation.\nI0821 08:59:09.129443 32502 net.cpp:226] L3_b14_cbr2_conv needs backward computation.\nI0821 08:59:09.129448 32502 net.cpp:226] L3_b14_cbr1_relu needs backward computation.\nI0821 08:59:09.129453 32502 net.cpp:226] L3_b14_cbr1_scale needs backward computation.\nI0821 08:59:09.129458 32502 net.cpp:226] L3_b14_cbr1_bn needs backward computation.\nI0821 08:59:09.129464 32502 net.cpp:226] L3_b14_cbr1_conv needs backward computation.\nI0821 08:59:09.129469 32502 net.cpp:226] L3_b13_sum_eltwise_top_L3_b13_relu_0_split needs backward computation.\nI0821 08:59:09.129475 32502 net.cpp:226] L3_b13_relu needs backward computation.\nI0821 08:59:09.129480 32502 net.cpp:226] L3_b13_sum_eltwise needs backward computation.\nI0821 08:59:09.129485 32502 net.cpp:226] L3_b13_cbr2_scale needs backward computation.\nI0821 08:59:09.129490 32502 net.cpp:226] L3_b13_cbr2_bn needs backward computation.\nI0821 08:59:09.129496 32502 net.cpp:226] L3_b13_cbr2_conv needs backward computation.\nI0821 08:59:09.129501 32502 net.cpp:226] L3_b13_cbr1_relu needs backward computation.\nI0821 08:59:09.129506 32502 net.cpp:226] L3_b13_cbr1_scale needs backward computation.\nI0821 08:59:09.129511 32502 net.cpp:226] L3_b13_cbr1_bn needs backward computation.\nI0821 08:59:09.129516 32502 net.cpp:226] L3_b13_cbr1_conv needs backward computation.\nI0821 08:59:09.129521 32502 net.cpp:226] L3_b12_sum_eltwise_top_L3_b12_relu_0_split needs backward computation.\nI0821 08:59:09.129526 32502 net.cpp:226] L3_b12_relu needs backward computation.\nI0821 08:59:09.129531 32502 net.cpp:226] L3_b12_sum_eltwise needs backward computation.\nI0821 08:59:09.129537 32502 net.cpp:226] L3_b12_cbr2_scale needs backward computation.\nI0821 08:59:09.129542 32502 net.cpp:226] L3_b12_cbr2_bn needs backward computation.\nI0821 08:59:09.129547 32502 net.cpp:226] L3_b12_cbr2_conv needs backward computation.\nI0821 08:59:09.129552 32502 net.cpp:226] L3_b12_cbr1_relu needs backward computation.\nI0821 08:59:09.129557 32502 net.cpp:226] L3_b12_cbr1_scale needs backward computation.\nI0821 08:59:09.129562 32502 net.cpp:226] L3_b12_cbr1_bn needs backward computation.\nI0821 08:59:09.129567 32502 net.cpp:226] L3_b12_cbr1_conv needs backward computation.\nI0821 08:59:09.129577 32502 net.cpp:226] L3_b11_sum_eltwise_top_L3_b11_relu_0_split needs backward computation.\nI0821 08:59:09.129583 32502 net.cpp:226] L3_b11_relu needs backward computation.\nI0821 08:59:09.129588 32502 net.cpp:226] L3_b11_sum_eltwise needs backward computation.\nI0821 08:59:09.129595 32502 net.cpp:226] L3_b11_cbr2_scale needs backward computation.\nI0821 08:59:09.129601 32502 net.cpp:226] L3_b11_cbr2_bn needs backward computation.\nI0821 08:59:09.129606 32502 net.cpp:226] L3_b11_cbr2_conv needs backward computation.\nI0821 08:59:09.129611 32502 net.cpp:226] L3_b11_cbr1_relu needs backward computation.\nI0821 08:59:09.129616 32502 net.cpp:226] L3_b11_cbr1_scale needs backward computation.\nI0821 08:59:09.129621 32502 net.cpp:226] L3_b11_cbr1_bn needs backward computation.\nI0821 08:59:09.129626 32502 net.cpp:226] L3_b11_cbr1_conv needs backward computation.\nI0821 08:59:09.129631 32502 net.cpp:226] L3_b10_sum_eltwise_top_L3_b10_relu_0_split needs backward computation.\nI0821 08:59:09.129637 32502 net.cpp:226] L3_b10_relu needs backward computation.\nI0821 08:59:09.129642 32502 net.cpp:226] L3_b10_sum_eltwise needs backward computation.\nI0821 08:59:09.129648 32502 net.cpp:226] L3_b10_cbr2_scale needs backward computation.\nI0821 08:59:09.129653 32502 net.cpp:226] L3_b10_cbr2_bn needs backward computation.\nI0821 08:59:09.129659 32502 net.cpp:226] L3_b10_cbr2_conv needs backward computation.\nI0821 08:59:09.129664 32502 net.cpp:226] L3_b10_cbr1_relu needs backward computation.\nI0821 08:59:09.129669 32502 net.cpp:226] L3_b10_cbr1_scale needs backward computation.\nI0821 08:59:09.129674 32502 net.cpp:226] L3_b10_cbr1_bn needs backward computation.\nI0821 08:59:09.129680 32502 net.cpp:226] L3_b10_cbr1_conv needs backward computation.\nI0821 08:59:09.129685 32502 net.cpp:226] L3_b9_sum_eltwise_top_L3_b9_relu_0_split needs backward computation.\nI0821 08:59:09.129695 32502 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:59:09.129701 32502 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:59:09.129709 32502 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:59:09.129714 32502 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:59:09.129719 32502 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:59:09.129724 32502 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:59:09.129729 32502 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:59:09.129734 32502 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:59:09.129739 32502 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:59:09.129751 32502 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:59:09.129757 32502 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:59:09.129762 32502 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:59:09.129768 32502 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:59:09.129775 32502 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:59:09.129779 32502 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:59:09.129786 32502 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:59:09.129791 32502 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:59:09.129796 32502 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:59:09.129801 32502 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:59:09.129806 32502 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:59:09.129812 32502 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:59:09.129817 32502 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:59:09.129822 32502 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:59:09.129828 32502 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:59:09.129833 32502 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:59:09.129838 32502 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:59:09.129849 32502 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:59:09.129855 32502 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:59:09.129860 32502 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:59:09.129866 32502 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:59:09.129871 32502 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:59:09.129878 32502 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:59:09.129884 32502 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:59:09.129889 32502 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:59:09.129894 32502 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:59:09.129899 32502 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:59:09.129904 32502 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:59:09.129909 32502 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:59:09.129914 32502 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:59:09.129920 32502 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:59:09.129925 32502 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:59:09.129930 32502 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:59:09.129936 32502 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:59:09.129941 32502 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:59:09.129947 32502 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:59:09.129952 32502 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:59:09.129957 32502 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:59:09.129963 32502 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:59:09.129968 32502 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:59:09.129974 32502 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:59:09.129979 32502 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:59:09.129986 32502 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:59:09.129992 32502 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:59:09.129999 32502 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:59:09.130005 32502 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:59:09.130012 32502 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:59:09.130017 32502 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:59:09.130022 32502 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:59:09.130028 32502 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:59:09.130033 32502 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:59:09.130038 32502 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:59:09.130043 32502 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:59:09.130049 32502 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:59:09.130055 32502 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:59:09.130061 32502 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:59:09.130066 32502 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:59:09.130072 32502 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:59:09.130077 32502 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:59:09.130082 32502 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:59:09.130089 32502 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:59:09.130095 32502 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:59:09.130100 32502 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:59:09.130106 32502 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:59:09.130116 32502 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:59:09.130122 32502 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:59:09.130128 32502 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:59:09.130133 32502 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:59:09.130138 32502 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:59:09.130144 32502 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:59:09.130151 32502 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:59:09.130156 32502 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:59:09.130162 32502 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:59:09.130167 32502 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:59:09.130173 32502 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:59:09.130179 32502 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:59:09.130185 32502 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:59:09.130190 32502 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:59:09.130197 32502 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:59:09.130201 32502 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:59:09.130208 32502 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:59:09.130213 32502 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:59:09.130218 32502 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:59:09.130223 32502 net.cpp:226] L2_b18_sum_eltwise_top_L2_b18_relu_0_split needs backward computation.\nI0821 08:59:09.130229 32502 net.cpp:226] L2_b18_relu needs backward computation.\nI0821 08:59:09.130235 32502 net.cpp:226] L2_b18_sum_eltwise needs backward computation.\nI0821 08:59:09.130241 32502 net.cpp:226] L2_b18_cbr2_scale needs backward computation.\nI0821 08:59:09.130246 32502 net.cpp:226] L2_b18_cbr2_bn needs backward computation.\nI0821 08:59:09.130252 32502 net.cpp:226] L2_b18_cbr2_conv needs backward computation.\nI0821 08:59:09.130257 32502 net.cpp:226] L2_b18_cbr1_relu needs backward computation.\nI0821 08:59:09.130264 32502 net.cpp:226] L2_b18_cbr1_scale needs backward computation.\nI0821 08:59:09.130269 32502 net.cpp:226] L2_b18_cbr1_bn needs backward computation.\nI0821 08:59:09.130275 32502 net.cpp:226] L2_b18_cbr1_conv needs backward computation.\nI0821 08:59:09.130280 32502 net.cpp:226] L2_b17_sum_eltwise_top_L2_b17_relu_0_split needs backward computation.\nI0821 08:59:09.130285 32502 net.cpp:226] L2_b17_relu needs backward computation.\nI0821 08:59:09.130291 32502 net.cpp:226] L2_b17_sum_eltwise needs backward computation.\nI0821 08:59:09.130297 32502 net.cpp:226] L2_b17_cbr2_scale needs backward computation.\nI0821 08:59:09.130302 32502 net.cpp:226] L2_b17_cbr2_bn needs backward computation.\nI0821 08:59:09.130308 32502 net.cpp:226] L2_b17_cbr2_conv needs backward computation.\nI0821 08:59:09.130314 32502 net.cpp:226] L2_b17_cbr1_relu needs backward computation.\nI0821 08:59:09.130321 32502 net.cpp:226] L2_b17_cbr1_scale needs backward computation.\nI0821 08:59:09.130326 32502 net.cpp:226] L2_b17_cbr1_bn needs backward computation.\nI0821 08:59:09.130331 32502 net.cpp:226] L2_b17_cbr1_conv needs backward computation.\nI0821 08:59:09.130337 32502 net.cpp:226] L2_b16_sum_eltwise_top_L2_b16_relu_0_split needs backward computation.\nI0821 08:59:09.130343 32502 net.cpp:226] L2_b16_relu needs backward computation.\nI0821 08:59:09.130348 32502 net.cpp:226] L2_b16_sum_eltwise needs backward computation.\nI0821 08:59:09.130355 32502 net.cpp:226] L2_b16_cbr2_scale needs backward computation.\nI0821 08:59:09.130360 32502 net.cpp:226] L2_b16_cbr2_bn needs backward computation.\nI0821 08:59:09.130367 32502 net.cpp:226] L2_b16_cbr2_conv needs backward computation.\nI0821 08:59:09.130373 32502 net.cpp:226] L2_b16_cbr1_relu needs backward computation.\nI0821 08:59:09.130378 32502 net.cpp:226] L2_b16_cbr1_scale needs backward computation.\nI0821 08:59:09.130388 32502 net.cpp:226] L2_b16_cbr1_bn needs backward computation.\nI0821 08:59:09.130396 32502 net.cpp:226] L2_b16_cbr1_conv needs backward computation.\nI0821 08:59:09.130403 32502 net.cpp:226] L2_b15_sum_eltwise_top_L2_b15_relu_0_split needs backward computation.\nI0821 08:59:09.130409 32502 net.cpp:226] L2_b15_relu needs backward computation.\nI0821 08:59:09.130414 32502 net.cpp:226] L2_b15_sum_eltwise needs backward computation.\nI0821 08:59:09.130420 32502 net.cpp:226] L2_b15_cbr2_scale needs backward computation.\nI0821 08:59:09.130425 32502 net.cpp:226] L2_b15_cbr2_bn needs backward computation.\nI0821 08:59:09.130431 32502 net.cpp:226] L2_b15_cbr2_conv needs backward computation.\nI0821 08:59:09.130436 32502 net.cpp:226] L2_b15_cbr1_relu needs backward computation.\nI0821 08:59:09.130442 32502 net.cpp:226] L2_b15_cbr1_scale needs backward computation.\nI0821 08:59:09.130447 32502 net.cpp:226] L2_b15_cbr1_bn needs backward computation.\nI0821 08:59:09.130452 32502 net.cpp:226] L2_b15_cbr1_conv needs backward computation.\nI0821 08:59:09.130458 32502 net.cpp:226] L2_b14_sum_eltwise_top_L2_b14_relu_0_split needs backward computation.\nI0821 08:59:09.130465 32502 net.cpp:226] L2_b14_relu needs backward computation.\nI0821 08:59:09.130470 32502 net.cpp:226] L2_b14_sum_eltwise needs backward computation.\nI0821 08:59:09.130476 32502 net.cpp:226] L2_b14_cbr2_scale needs backward computation.\nI0821 08:59:09.130481 32502 net.cpp:226] L2_b14_cbr2_bn needs backward computation.\nI0821 08:59:09.130486 32502 net.cpp:226] L2_b14_cbr2_conv needs backward computation.\nI0821 08:59:09.130492 32502 net.cpp:226] L2_b14_cbr1_relu needs backward computation.\nI0821 08:59:09.130497 32502 net.cpp:226] L2_b14_cbr1_scale needs backward computation.\nI0821 08:59:09.130503 32502 net.cpp:226] L2_b14_cbr1_bn needs backward computation.\nI0821 08:59:09.130509 32502 net.cpp:226] L2_b14_cbr1_conv needs backward computation.\nI0821 08:59:09.130514 32502 net.cpp:226] L2_b13_sum_eltwise_top_L2_b13_relu_0_split needs backward computation.\nI0821 08:59:09.130520 32502 net.cpp:226] L2_b13_relu needs backward computation.\nI0821 08:59:09.130527 32502 net.cpp:226] L2_b13_sum_eltwise needs backward computation.\nI0821 08:59:09.130532 32502 net.cpp:226] L2_b13_cbr2_scale needs backward computation.\nI0821 08:59:09.130537 32502 net.cpp:226] L2_b13_cbr2_bn needs backward computation.\nI0821 08:59:09.130543 32502 net.cpp:226] L2_b13_cbr2_conv needs backward computation.\nI0821 08:59:09.130549 32502 net.cpp:226] L2_b13_cbr1_relu needs backward computation.\nI0821 08:59:09.130554 32502 net.cpp:226] L2_b13_cbr1_scale needs backward computation.\nI0821 08:59:09.130560 32502 net.cpp:226] L2_b13_cbr1_bn needs backward computation.\nI0821 08:59:09.130566 32502 net.cpp:226] L2_b13_cbr1_conv needs backward computation.\nI0821 08:59:09.130571 32502 net.cpp:226] L2_b12_sum_eltwise_top_L2_b12_relu_0_split needs backward computation.\nI0821 08:59:09.130578 32502 net.cpp:226] L2_b12_relu needs backward computation.\nI0821 08:59:09.130584 32502 net.cpp:226] L2_b12_sum_eltwise needs backward computation.\nI0821 08:59:09.130589 32502 net.cpp:226] L2_b12_cbr2_scale needs backward computation.\nI0821 08:59:09.130595 32502 net.cpp:226] L2_b12_cbr2_bn needs backward computation.\nI0821 08:59:09.130601 32502 net.cpp:226] L2_b12_cbr2_conv needs backward computation.\nI0821 08:59:09.130606 32502 net.cpp:226] L2_b12_cbr1_relu needs backward computation.\nI0821 08:59:09.130612 32502 net.cpp:226] L2_b12_cbr1_scale needs backward computation.\nI0821 08:59:09.130619 32502 net.cpp:226] L2_b12_cbr1_bn needs backward computation.\nI0821 08:59:09.130625 32502 net.cpp:226] L2_b12_cbr1_conv needs backward computation.\nI0821 08:59:09.130630 32502 net.cpp:226] L2_b11_sum_eltwise_top_L2_b11_relu_0_split needs backward computation.\nI0821 08:59:09.130636 32502 net.cpp:226] L2_b11_relu needs backward computation.\nI0821 08:59:09.130642 32502 net.cpp:226] L2_b11_sum_eltwise needs backward computation.\nI0821 08:59:09.130648 32502 net.cpp:226] L2_b11_cbr2_scale needs backward computation.\nI0821 08:59:09.130661 32502 net.cpp:226] L2_b11_cbr2_bn needs backward computation.\nI0821 08:59:09.130666 32502 net.cpp:226] L2_b11_cbr2_conv needs backward computation.\nI0821 08:59:09.130672 32502 net.cpp:226] L2_b11_cbr1_relu needs backward computation.\nI0821 08:59:09.130678 32502 net.cpp:226] L2_b11_cbr1_scale needs backward computation.\nI0821 08:59:09.130683 32502 net.cpp:226] L2_b11_cbr1_bn needs backward computation.\nI0821 08:59:09.130689 32502 net.cpp:226] L2_b11_cbr1_conv needs backward computation.\nI0821 08:59:09.130695 32502 net.cpp:226] L2_b10_sum_eltwise_top_L2_b10_relu_0_split needs backward computation.\nI0821 08:59:09.130702 32502 net.cpp:226] L2_b10_relu needs backward computation.\nI0821 08:59:09.130707 32502 net.cpp:226] L2_b10_sum_eltwise needs backward computation.\nI0821 08:59:09.130713 32502 net.cpp:226] L2_b10_cbr2_scale needs backward computation.\nI0821 08:59:09.130719 32502 net.cpp:226] L2_b10_cbr2_bn needs backward computation.\nI0821 08:59:09.130725 32502 net.cpp:226] L2_b10_cbr2_conv needs backward computation.\nI0821 08:59:09.130730 32502 net.cpp:226] L2_b10_cbr1_relu needs backward computation.\nI0821 08:59:09.130736 32502 net.cpp:226] L2_b10_cbr1_scale needs backward computation.\nI0821 08:59:09.130748 32502 net.cpp:226] L2_b10_cbr1_bn needs backward computation.\nI0821 08:59:09.130755 32502 net.cpp:226] L2_b10_cbr1_conv needs backward computation.\nI0821 08:59:09.130760 32502 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:59:09.130767 32502 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:59:09.130774 32502 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:59:09.130779 32502 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:59:09.130785 32502 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:59:09.130791 32502 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:59:09.130796 32502 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:59:09.130802 32502 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:59:09.130807 32502 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:59:09.130813 32502 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:59:09.130820 32502 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:59:09.130825 32502 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:59:09.130831 32502 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:59:09.130837 32502 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:59:09.130843 32502 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:59:09.130849 32502 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:59:09.130856 32502 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:59:09.130861 32502 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:59:09.130867 32502 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:59:09.130873 32502 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:59:09.130879 32502 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:59:09.130885 32502 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:59:09.130892 32502 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:59:09.130897 32502 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:59:09.130903 32502 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:59:09.130909 32502 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:59:09.130915 32502 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:59:09.130920 32502 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:59:09.130926 32502 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:59:09.130933 32502 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:59:09.130939 32502 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:59:09.130950 32502 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:59:09.130957 32502 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:59:09.130964 32502 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:59:09.130970 32502 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:59:09.130975 32502 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:59:09.130981 32502 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:59:09.130987 32502 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:59:09.130993 32502 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:59:09.131000 32502 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:59:09.131006 32502 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:59:09.131011 32502 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:59:09.131016 32502 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:59:09.131023 32502 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:59:09.131029 32502 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:59:09.131036 32502 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:59:09.131042 32502 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:59:09.131047 32502 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:59:09.131052 32502 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:59:09.131058 32502 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:59:09.131064 32502 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:59:09.131070 32502 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:59:09.131077 32502 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:59:09.131083 32502 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:59:09.131088 32502 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:59:09.131094 32502 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:59:09.131100 32502 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:59:09.131105 32502 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:59:09.131111 32502 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:59:09.131116 32502 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:59:09.131125 32502 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:59:09.131132 32502 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:59:09.131139 32502 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:59:09.131145 32502 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:59:09.131150 32502 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:59:09.131156 32502 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:59:09.131162 32502 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:59:09.131167 32502 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:59:09.131173 32502 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:59:09.131180 32502 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:59:09.131186 32502 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:59:09.131191 32502 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:59:09.131197 32502 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:59:09.131203 32502 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:59:09.131209 32502 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:59:09.131214 32502 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:59:09.131220 32502 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:59:09.131227 32502 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:59:09.131237 32502 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:59:09.131242 32502 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:59:09.131248 32502 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:59:09.131255 32502 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:59:09.131261 32502 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:59:09.131266 32502 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:59:09.131273 32502 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:59:09.131279 32502 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:59:09.131285 32502 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:59:09.131291 32502 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:59:09.131297 32502 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:59:09.131302 32502 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:59:09.131309 32502 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:59:09.131314 32502 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:59:09.131320 32502 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:59:09.131326 32502 net.cpp:226] L1_b18_sum_eltwise_top_L1_b18_relu_0_split needs backward computation.\nI0821 08:59:09.131331 32502 net.cpp:226] L1_b18_relu needs backward computation.\nI0821 08:59:09.131337 32502 net.cpp:226] L1_b18_sum_eltwise needs backward computation.\nI0821 08:59:09.131345 32502 net.cpp:226] L1_b18_cbr2_scale needs backward computation.\nI0821 08:59:09.131350 32502 net.cpp:226] L1_b18_cbr2_bn needs backward computation.\nI0821 08:59:09.131356 32502 net.cpp:226] L1_b18_cbr2_conv needs backward computation.\nI0821 08:59:09.131361 32502 net.cpp:226] L1_b18_cbr1_relu needs backward computation.\nI0821 08:59:09.131366 32502 net.cpp:226] L1_b18_cbr1_scale needs backward computation.\nI0821 08:59:09.131372 32502 net.cpp:226] L1_b18_cbr1_bn needs backward computation.\nI0821 08:59:09.131378 32502 net.cpp:226] L1_b18_cbr1_conv needs backward computation.\nI0821 08:59:09.131383 32502 net.cpp:226] L1_b17_sum_eltwise_top_L1_b17_relu_0_split needs backward computation.\nI0821 08:59:09.131389 32502 net.cpp:226] L1_b17_relu needs backward computation.\nI0821 08:59:09.131395 32502 net.cpp:226] L1_b17_sum_eltwise needs backward computation.\nI0821 08:59:09.131402 32502 net.cpp:226] L1_b17_cbr2_scale needs backward computation.\nI0821 08:59:09.131407 32502 net.cpp:226] L1_b17_cbr2_bn needs backward computation.\nI0821 08:59:09.131413 32502 net.cpp:226] L1_b17_cbr2_conv needs backward computation.\nI0821 08:59:09.131419 32502 net.cpp:226] L1_b17_cbr1_relu needs backward computation.\nI0821 08:59:09.131424 32502 net.cpp:226] L1_b17_cbr1_scale needs backward computation.\nI0821 08:59:09.131430 32502 net.cpp:226] L1_b17_cbr1_bn needs backward computation.\nI0821 08:59:09.131436 32502 net.cpp:226] L1_b17_cbr1_conv needs backward computation.\nI0821 08:59:09.131441 32502 net.cpp:226] L1_b16_sum_eltwise_top_L1_b16_relu_0_split needs backward computation.\nI0821 08:59:09.131448 32502 net.cpp:226] L1_b16_relu needs backward computation.\nI0821 08:59:09.131453 32502 net.cpp:226] L1_b16_sum_eltwise needs backward computation.\nI0821 08:59:09.131460 32502 net.cpp:226] L1_b16_cbr2_scale needs backward computation.\nI0821 08:59:09.131465 32502 net.cpp:226] L1_b16_cbr2_bn needs backward computation.\nI0821 08:59:09.131471 32502 net.cpp:226] L1_b16_cbr2_conv needs backward computation.\nI0821 08:59:09.131477 32502 net.cpp:226] L1_b16_cbr1_relu needs backward computation.\nI0821 08:59:09.131484 32502 net.cpp:226] L1_b16_cbr1_scale needs backward computation.\nI0821 08:59:09.131489 32502 net.cpp:226] L1_b16_cbr1_bn needs backward computation.\nI0821 08:59:09.131494 32502 net.cpp:226] L1_b16_cbr1_conv needs backward computation.\nI0821 08:59:09.131500 32502 net.cpp:226] L1_b15_sum_eltwise_top_L1_b15_relu_0_split needs backward computation.\nI0821 08:59:09.131505 32502 net.cpp:226] L1_b15_relu needs backward computation.\nI0821 08:59:09.131515 32502 net.cpp:226] L1_b15_sum_eltwise needs backward computation.\nI0821 08:59:09.131522 32502 net.cpp:226] L1_b15_cbr2_scale needs backward computation.\nI0821 08:59:09.131528 32502 net.cpp:226] L1_b15_cbr2_bn needs backward computation.\nI0821 08:59:09.131534 32502 net.cpp:226] L1_b15_cbr2_conv needs backward computation.\nI0821 08:59:09.131541 32502 net.cpp:226] L1_b15_cbr1_relu needs backward computation.\nI0821 08:59:09.131546 32502 net.cpp:226] L1_b15_cbr1_scale needs backward computation.\nI0821 08:59:09.131551 32502 net.cpp:226] L1_b15_cbr1_bn needs backward computation.\nI0821 08:59:09.131557 32502 net.cpp:226] L1_b15_cbr1_conv needs backward computation.\nI0821 08:59:09.131563 32502 net.cpp:226] L1_b14_sum_eltwise_top_L1_b14_relu_0_split needs backward computation.\nI0821 08:59:09.131569 32502 net.cpp:226] L1_b14_relu needs backward computation.\nI0821 08:59:09.131574 32502 net.cpp:226] L1_b14_sum_eltwise needs backward computation.\nI0821 08:59:09.131582 32502 net.cpp:226] L1_b14_cbr2_scale needs backward computation.\nI0821 08:59:09.131587 32502 net.cpp:226] L1_b14_cbr2_bn needs backward computation.\nI0821 08:59:09.131593 32502 net.cpp:226] L1_b14_cbr2_conv needs backward computation.\nI0821 08:59:09.131599 32502 net.cpp:226] L1_b14_cbr1_relu needs backward computation.\nI0821 08:59:09.131604 32502 net.cpp:226] L1_b14_cbr1_scale needs backward computation.\nI0821 08:59:09.131610 32502 net.cpp:226] L1_b14_cbr1_bn needs backward computation.\nI0821 08:59:09.131616 32502 net.cpp:226] L1_b14_cbr1_conv needs backward computation.\nI0821 08:59:09.131623 32502 net.cpp:226] L1_b13_sum_eltwise_top_L1_b13_relu_0_split needs backward computation.\nI0821 08:59:09.131628 32502 net.cpp:226] L1_b13_relu needs backward computation.\nI0821 08:59:09.131633 32502 net.cpp:226] L1_b13_sum_eltwise needs backward computation.\nI0821 08:59:09.131640 32502 net.cpp:226] L1_b13_cbr2_scale needs backward computation.\nI0821 08:59:09.131645 32502 net.cpp:226] L1_b13_cbr2_bn needs backward computation.\nI0821 08:59:09.131651 32502 net.cpp:226] L1_b13_cbr2_conv needs backward computation.\nI0821 08:59:09.131657 32502 net.cpp:226] L1_b13_cbr1_relu needs backward computation.\nI0821 08:59:09.131662 32502 net.cpp:226] L1_b13_cbr1_scale needs backward computation.\nI0821 08:59:09.131669 32502 net.cpp:226] L1_b13_cbr1_bn needs backward computation.\nI0821 08:59:09.131673 32502 net.cpp:226] L1_b13_cbr1_conv needs backward computation.\nI0821 08:59:09.131680 32502 net.cpp:226] L1_b12_sum_eltwise_top_L1_b12_relu_0_split needs backward computation.\nI0821 08:59:09.131685 32502 net.cpp:226] L1_b12_relu needs backward computation.\nI0821 08:59:09.131691 32502 net.cpp:226] L1_b12_sum_eltwise needs backward computation.\nI0821 08:59:09.131698 32502 net.cpp:226] L1_b12_cbr2_scale needs backward computation.\nI0821 08:59:09.131703 32502 net.cpp:226] L1_b12_cbr2_bn needs backward computation.\nI0821 08:59:09.131709 32502 net.cpp:226] L1_b12_cbr2_conv needs backward computation.\nI0821 08:59:09.131716 32502 net.cpp:226] L1_b12_cbr1_relu needs backward computation.\nI0821 08:59:09.131721 32502 net.cpp:226] L1_b12_cbr1_scale needs backward computation.\nI0821 08:59:09.131726 32502 net.cpp:226] L1_b12_cbr1_bn needs backward computation.\nI0821 08:59:09.131732 32502 net.cpp:226] L1_b12_cbr1_conv needs backward computation.\nI0821 08:59:09.131738 32502 net.cpp:226] L1_b11_sum_eltwise_top_L1_b11_relu_0_split needs backward computation.\nI0821 08:59:09.131748 32502 net.cpp:226] L1_b11_relu needs backward computation.\nI0821 08:59:09.131755 32502 net.cpp:226] L1_b11_sum_eltwise needs backward computation.\nI0821 08:59:09.131762 32502 net.cpp:226] L1_b11_cbr2_scale needs backward computation.\nI0821 08:59:09.131768 32502 net.cpp:226] L1_b11_cbr2_bn needs backward computation.\nI0821 08:59:09.131774 32502 net.cpp:226] L1_b11_cbr2_conv needs backward computation.\nI0821 08:59:09.131780 32502 net.cpp:226] L1_b11_cbr1_relu needs backward computation.\nI0821 08:59:09.131786 32502 net.cpp:226] L1_b11_cbr1_scale needs backward computation.\nI0821 08:59:09.131791 32502 net.cpp:226] L1_b11_cbr1_bn needs backward computation.\nI0821 08:59:09.131803 32502 net.cpp:226] L1_b11_cbr1_conv needs backward computation.\nI0821 08:59:09.131809 32502 net.cpp:226] L1_b10_sum_eltwise_top_L1_b10_relu_0_split needs backward computation.\nI0821 08:59:09.131815 32502 net.cpp:226] L1_b10_relu needs backward computation.\nI0821 08:59:09.131821 32502 net.cpp:226] L1_b10_sum_eltwise needs backward computation.\nI0821 08:59:09.131829 32502 net.cpp:226] L1_b10_cbr2_scale needs backward computation.\nI0821 08:59:09.131834 32502 net.cpp:226] L1_b10_cbr2_bn needs backward computation.\nI0821 08:59:09.131842 32502 net.cpp:226] L1_b10_cbr2_conv needs backward computation.\nI0821 08:59:09.131850 32502 net.cpp:226] L1_b10_cbr1_relu needs backward computation.\nI0821 08:59:09.131855 32502 net.cpp:226] L1_b10_cbr1_scale needs backward computation.\nI0821 08:59:09.131861 32502 net.cpp:226] L1_b10_cbr1_bn needs backward computation.\nI0821 08:59:09.131867 32502 net.cpp:226] L1_b10_cbr1_conv needs backward computation.\nI0821 08:59:09.131873 32502 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:59:09.131880 32502 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:59:09.131886 32502 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:59:09.131891 32502 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:59:09.131897 32502 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:59:09.131903 32502 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:59:09.131909 32502 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:59:09.131914 32502 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:59:09.131920 32502 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:59:09.131927 32502 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:59:09.131932 32502 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:59:09.131938 32502 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:59:09.131944 32502 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:59:09.131950 32502 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:59:09.131956 32502 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:59:09.131963 32502 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:59:09.131968 32502 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:59:09.131974 32502 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:59:09.131979 32502 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:59:09.131985 32502 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:59:09.131991 32502 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:59:09.131999 32502 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:59:09.132004 32502 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:59:09.132011 32502 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:59:09.132017 32502 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:59:09.132024 32502 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:59:09.132030 32502 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:59:09.132035 32502 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:59:09.132041 32502 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:59:09.132046 32502 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:59:09.132052 32502 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:59:09.132060 32502 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:59:09.132064 32502 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:59:09.132072 32502 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:59:09.132077 32502 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:59:09.132088 32502 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:59:09.132095 32502 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:59:09.132100 32502 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:59:09.132107 32502 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:59:09.132113 32502 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:59:09.132119 32502 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:59:09.132125 32502 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:59:09.132131 32502 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:59:09.132138 32502 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:59:09.132143 32502 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:59:09.132149 32502 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:59:09.132156 32502 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:59:09.132161 32502 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:59:09.132167 32502 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:59:09.132174 32502 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:59:09.132179 32502 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:59:09.132186 32502 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:59:09.132191 32502 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:59:09.132199 32502 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:59:09.132205 32502 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:59:09.132210 32502 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:59:09.132216 32502 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:59:09.132222 32502 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:59:09.132228 32502 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:59:09.132235 32502 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:59:09.132241 32502 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:59:09.132246 32502 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:59:09.132252 32502 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:59:09.132258 32502 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:59:09.132264 32502 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:59:09.132270 32502 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:59:09.132277 32502 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:59:09.132282 32502 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:59:09.132288 32502 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:59:09.132294 32502 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:59:09.132300 32502 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:59:09.132306 32502 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:59:09.132313 32502 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:59:09.132319 32502 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:59:09.132325 32502 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:59:09.132331 32502 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:59:09.132338 32502 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:59:09.132344 32502 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:59:09.132349 32502 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:59:09.132355 32502 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:59:09.132361 32502 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:59:09.132367 32502 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:59:09.132377 32502 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:59:09.132385 32502 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:59:09.132391 32502 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:59:09.132397 32502 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:59:09.132403 32502 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:59:09.132410 32502 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:59:09.132416 32502 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:59:09.132421 32502 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:59:09.132427 32502 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:59:09.132432 32502 net.cpp:226] pre_relu needs backward computation.\nI0821 08:59:09.132438 32502 net.cpp:226] pre_scale needs backward computation.\nI0821 08:59:09.132443 32502 net.cpp:226] pre_bn needs backward computation.\nI0821 08:59:09.132448 32502 net.cpp:226] pre_conv needs backward computation.\nI0821 08:59:09.132455 32502 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:59:09.132462 32502 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:59:09.132467 32502 net.cpp:270] This network produces output accuracy\nI0821 08:59:09.132473 32502 net.cpp:270] This network produces output loss\nI0821 08:59:09.133173 32502 net.cpp:283] Network initialization done.\nI0821 08:59:09.135306 32502 solver.cpp:60] Solver scaffolding done.\nI0821 08:59:09.382764 32502 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 08:59:09.836422 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:09.836524 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:09.845993 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:10.104111 32502 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:10.104240 32502 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:10.176182 32502 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:10.176295 32502 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:10.797322 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:10.797412 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:10.807605 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:11.104168 32502 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:11.104321 32502 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:11.215199 32502 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:11.215349 32502 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:11.949962 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:11.950057 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:11.961040 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:12.307878 32502 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:12.308070 32502 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:12.456034 32502 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:12.456205 32502 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:12.631650 32502 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 08:59:13.309468 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:13.309579 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:13.322816 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:13.709596 32502 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:13.709784 32502 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:13.899786 32502 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:13.899969 32502 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:14.902716 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:14.902806 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:14.915868 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:15.345595 32502 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:15.345842 32502 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:15.580518 32502 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:15.580754 32502 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:16.697018 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:16.697108 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:16.710986 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:17.196221 32502 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:17.196491 32502 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:17.475325 32502 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:17.475590 32502 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:18.722152 32502 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:59:18.722249 32502 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:59:18.737102 32502 data_layer.cpp:41] output data size: 100,3,32,32\nI0821 08:59:18.803328 32530 blocking_queue.cpp:50] Waiting for data\nI0821 08:59:18.872364 32527 blocking_queue.cpp:50] Waiting for data\nI0821 08:59:19.332955 32502 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:59:19.333250 32502 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0821 08:59:19.653581 32502 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:59:19.653883 32502 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0821 08:59:20.013006 32502 parallel.cpp:425] Starting Optimization\nI0821 08:59:20.014310 32502 solver.cpp:279] Solving Cifar-Resnet\nI0821 08:59:20.014328 32502 solver.cpp:280] Learning Rate Policy: multistep\nI0821 08:59:20.021895 32502 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 09:01:27.885551 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1\nI0821 09:01:27.885908 32502 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 09:01:35.373129 32502 solver.cpp:228] Iteration 0, loss = 5.1526\nI0821 09:01:35.373173 32502 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI0821 09:01:35.373201 32502 solver.cpp:244]     Train net output #1: loss = 5.1526 (* 1 = 5.1526 loss)\nI0821 09:01:35.373421 32502 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0821 09:05:16.203169 32502 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 09:07:25.938457 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1223\nI0821 09:07:25.938832 32502 solver.cpp:404]     Test net output #1: loss = 3.23427 (* 1 = 3.23427 loss)\nI0821 09:07:28.029268 32502 solver.cpp:228] Iteration 100, loss = 2.30387\nI0821 09:07:28.029330 32502 solver.cpp:244]     Train net output #0: accuracy = 0.05\nI0821 09:07:28.029348 32502 solver.cpp:244]     Train net output #1: loss = 2.30386 (* 1 = 2.30386 loss)\nI0821 09:07:28.165196 32502 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0821 09:11:08.588595 32502 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 09:13:18.285640 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1042\nI0821 09:13:18.286022 32502 solver.cpp:404]     Test net output #1: loss = 2.33381 (* 1 = 2.33381 loss)\nI0821 09:13:20.375762 32502 solver.cpp:228] Iteration 200, loss = 2.29249\nI0821 09:13:20.375825 32502 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI0821 09:13:20.375845 32502 solver.cpp:244]     Train net output #1: loss = 2.29249 (* 1 = 2.29249 loss)\nI0821 09:13:20.515419 32502 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0821 09:17:01.159049 32502 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 09:19:10.870071 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1036\nI0821 09:19:10.870456 32502 solver.cpp:404]     Test net output #1: loss = 2.33349 (* 1 = 2.33349 loss)\nI0821 09:19:12.959561 32502 solver.cpp:228] Iteration 300, loss = 2.28414\nI0821 09:19:12.959625 32502 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 09:19:12.959645 32502 solver.cpp:244]     Train net output #1: loss = 2.28414 (* 1 = 2.28414 loss)\nI0821 09:19:13.102519 32502 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0821 09:22:53.549715 32502 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 09:25:03.271351 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1004\nI0821 09:25:03.271737 32502 solver.cpp:404]     Test net output #1: loss = 2.31493 (* 1 = 2.31493 loss)\nI0821 09:25:05.361686 32502 solver.cpp:228] Iteration 400, loss = 2.31492\nI0821 09:25:05.361745 32502 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI0821 09:25:05.361766 32502 solver.cpp:244]     Train net output #1: loss = 2.31492 (* 1 = 2.31492 loss)\nI0821 09:25:05.492796 32502 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0821 09:28:45.942386 32502 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 09:30:55.667690 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1044\nI0821 09:30:55.668062 32502 solver.cpp:404]     Test net output #1: loss = 2.56747 (* 1 = 2.56747 loss)\nI0821 09:30:57.759454 32502 solver.cpp:228] Iteration 500, loss = 1.95271\nI0821 09:30:57.759518 32502 solver.cpp:244]     Train net output #0: accuracy = 0.27\nI0821 09:30:57.759537 32502 solver.cpp:244]     Train net output #1: loss = 1.95271 (* 1 = 1.95271 loss)\nI0821 09:30:57.890327 32502 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0821 09:34:38.530656 32502 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 09:36:48.230675 32502 solver.cpp:404]     Test net output #0: accuracy = 0.0944\nI0821 09:36:48.231010 32502 solver.cpp:404]     Test net output #1: loss = 3.00408 (* 1 = 3.00408 loss)\nI0821 09:36:50.321590 32502 solver.cpp:228] Iteration 600, loss = 1.89158\nI0821 09:36:50.321652 32502 solver.cpp:244]     Train net output #0: accuracy = 0.31\nI0821 09:36:50.321671 32502 solver.cpp:244]     Train net output #1: loss = 1.89158 (* 1 = 1.89158 loss)\nI0821 09:36:50.461133 32502 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0821 09:40:31.146519 32502 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 09:42:40.791637 32502 solver.cpp:404]     Test net output #0: accuracy = 0.0685\nI0821 09:42:40.792011 32502 solver.cpp:404]     Test net output #1: loss = 3.46641 (* 1 = 3.46641 loss)\nI0821 09:42:42.882362 32502 solver.cpp:228] Iteration 700, loss = 1.65433\nI0821 09:42:42.882426 32502 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI0821 09:42:42.882446 32502 solver.cpp:244]     Train net output #1: loss = 1.65433 (* 1 = 1.65433 loss)\nI0821 09:42:43.024543 32502 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0821 09:46:23.731823 32502 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 09:48:33.402477 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1011\nI0821 09:48:33.402858 32502 solver.cpp:404]     Test net output #1: loss = 4.31089 (* 1 = 4.31089 loss)\nI0821 09:48:35.494129 32502 solver.cpp:228] Iteration 800, loss = 1.40025\nI0821 09:48:35.494191 32502 solver.cpp:244]     Train net output #0: accuracy = 0.47\nI0821 09:48:35.494210 32502 solver.cpp:244]     Train net output #1: loss = 1.40025 (* 1 = 1.40025 loss)\nI0821 09:48:35.630733 32502 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0821 09:52:16.456372 32502 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 09:54:26.234109 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1037\nI0821 09:54:26.234462 32502 solver.cpp:404]     Test net output #1: loss = 4.53235 (* 1 = 4.53235 loss)\nI0821 09:54:28.325292 32502 solver.cpp:228] Iteration 900, loss = 1.32597\nI0821 09:54:28.325357 32502 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI0821 09:54:28.325376 32502 solver.cpp:244]     Train net output #1: loss = 1.32597 (* 1 = 1.32597 loss)\nI0821 09:54:28.454576 32502 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0821 09:58:08.985066 32502 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 10:00:18.690528 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1187\nI0821 10:00:18.690902 32502 solver.cpp:404]     Test net output #1: loss = 3.91611 (* 1 = 3.91611 loss)\nI0821 10:00:20.781723 32502 solver.cpp:228] Iteration 1000, loss = 1.15169\nI0821 10:00:20.781787 32502 solver.cpp:244]     Train net output #0: accuracy = 0.58\nI0821 10:00:20.781807 32502 solver.cpp:244]     Train net output #1: loss = 1.15169 (* 1 = 1.15169 loss)\nI0821 10:00:20.916676 32502 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0821 10:04:01.788529 32502 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 10:06:11.501345 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1113\nI0821 10:06:11.501713 32502 solver.cpp:404]     Test net output #1: loss = 3.87201 (* 1 = 3.87201 loss)\nI0821 10:06:13.592277 32502 solver.cpp:228] Iteration 1100, loss = 1.28666\nI0821 10:06:13.592339 32502 solver.cpp:244]     Train net output #0: accuracy = 0.56\nI0821 10:06:13.592358 32502 solver.cpp:244]     Train net output #1: loss = 1.28666 (* 1 = 1.28666 loss)\nI0821 10:06:13.731287 32502 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0821 10:09:54.561851 32502 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 10:12:04.247130 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1257\nI0821 10:12:04.247478 32502 solver.cpp:404]     Test net output #1: loss = 3.67262 (* 1 = 3.67262 loss)\nI0821 10:12:06.338048 32502 solver.cpp:228] Iteration 1200, loss = 1.18205\nI0821 10:12:06.338111 32502 solver.cpp:244]     Train net output #0: accuracy = 0.63\nI0821 10:12:06.338129 32502 solver.cpp:244]     Train net output #1: loss = 1.18204 (* 1 = 1.18204 loss)\nI0821 10:12:06.476177 32502 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0821 10:15:47.140612 32502 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 10:17:56.815668 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1301\nI0821 10:17:56.816061 32502 solver.cpp:404]     Test net output #1: loss = 3.948 (* 1 = 3.948 loss)\nI0821 10:17:58.906962 32502 solver.cpp:228] Iteration 1300, loss = 0.98712\nI0821 10:17:58.907024 32502 solver.cpp:244]     Train net output #0: accuracy = 0.63\nI0821 10:17:58.907040 32502 solver.cpp:244]     Train net output #1: loss = 0.987117 (* 1 = 0.987117 loss)\nI0821 10:17:59.047894 32502 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0821 10:21:39.722833 32502 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 10:23:49.446770 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1017\nI0821 10:23:49.447131 32502 solver.cpp:404]     Test net output #1: loss = 4.38764 (* 1 = 4.38764 loss)\nI0821 10:23:51.537412 32502 solver.cpp:228] Iteration 1400, loss = 1.15549\nI0821 10:23:51.537475 32502 solver.cpp:244]     Train net output #0: accuracy = 0.56\nI0821 10:23:51.537493 32502 solver.cpp:244]     Train net output #1: loss = 1.15548 (* 1 = 1.15548 loss)\nI0821 10:23:51.674983 32502 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0821 10:27:32.402484 32502 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 10:29:42.113852 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1553\nI0821 10:29:42.114218 32502 solver.cpp:404]     Test net output #1: loss = 3.74225 (* 1 = 3.74225 loss)\nI0821 10:29:44.204982 32502 solver.cpp:228] Iteration 1500, loss = 0.888052\nI0821 10:29:44.205044 32502 solver.cpp:244]     Train net output #0: accuracy = 0.67\nI0821 10:29:44.205060 32502 solver.cpp:244]     Train net output #1: loss = 0.888049 (* 1 = 0.888049 loss)\nI0821 10:29:44.346029 32502 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0821 10:33:25.399224 32502 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 10:35:35.092298 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1276\nI0821 10:35:35.092684 32502 solver.cpp:404]     Test net output #1: loss = 4.40066 (* 1 = 4.40066 loss)\nI0821 10:35:37.183253 32502 solver.cpp:228] Iteration 1600, loss = 1.12232\nI0821 10:35:37.183313 32502 solver.cpp:244]     Train net output #0: accuracy = 0.53\nI0821 10:35:37.183332 32502 solver.cpp:244]     Train net output #1: loss = 1.12231 (* 1 = 1.12231 loss)\nI0821 10:35:37.315340 32502 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0821 10:39:18.120657 32502 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 10:41:27.827703 32502 solver.cpp:404]     Test net output #0: accuracy = 0.192\nI0821 10:41:27.828088 32502 solver.cpp:404]     Test net output #1: loss = 3.66457 (* 1 = 3.66457 loss)\nI0821 10:41:29.917760 32502 solver.cpp:228] Iteration 1700, loss = 0.933174\nI0821 10:41:29.917822 32502 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0821 10:41:29.917841 32502 solver.cpp:244]     Train net output #1: loss = 0.933171 (* 1 = 0.933171 loss)\nI0821 10:41:30.048027 32502 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0821 10:45:10.781636 32502 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 10:47:20.501929 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1141\nI0821 10:47:20.502300 32502 solver.cpp:404]     Test net output #1: loss = 4.89425 (* 1 = 4.89425 loss)\nI0821 10:47:22.593231 32502 solver.cpp:228] Iteration 1800, loss = 0.860004\nI0821 10:47:22.593294 32502 solver.cpp:244]     Train net output #0: accuracy = 0.67\nI0821 10:47:22.593312 32502 solver.cpp:244]     Train net output #1: loss = 0.860002 (* 1 = 0.860002 loss)\nI0821 10:47:22.725090 32502 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0821 10:51:03.471302 32502 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 10:53:13.196225 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1592\nI0821 10:53:13.196575 32502 solver.cpp:404]     Test net output #1: loss = 4.14718 (* 1 = 4.14718 loss)\nI0821 10:53:15.288317 32502 solver.cpp:228] Iteration 1900, loss = 0.913057\nI0821 10:53:15.288383 32502 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0821 10:53:15.288408 32502 solver.cpp:244]     Train net output #1: loss = 0.913054 (* 1 = 0.913054 loss)\nI0821 10:53:15.423403 32502 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0821 10:56:56.146965 32502 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 10:59:05.819069 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1387\nI0821 10:59:05.819452 32502 solver.cpp:404]     Test net output #1: loss = 4.66987 (* 1 = 4.66987 loss)\nI0821 10:59:07.910830 32502 solver.cpp:228] Iteration 2000, loss = 0.714088\nI0821 10:59:07.910893 32502 solver.cpp:244]     Train net output #0: accuracy = 0.73\nI0821 10:59:07.910912 32502 solver.cpp:244]     Train net output #1: loss = 0.714086 (* 1 = 0.714086 loss)\nI0821 10:59:08.042321 32502 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0821 11:02:49.081615 32502 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 11:04:58.765949 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1124\nI0821 11:04:58.766314 32502 solver.cpp:404]     Test net output #1: loss = 4.47661 (* 1 = 4.47661 loss)\nI0821 11:05:00.857899 32502 solver.cpp:228] Iteration 2100, loss = 0.828438\nI0821 11:05:00.857962 32502 solver.cpp:244]     Train net output #0: accuracy = 0.71\nI0821 11:05:00.857985 32502 solver.cpp:244]     Train net output #1: loss = 0.828436 (* 1 = 0.828436 loss)\nI0821 11:05:00.992429 32502 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0821 11:08:41.656867 32502 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 11:10:51.339053 32502 solver.cpp:404]     Test net output #0: accuracy = 0.162\nI0821 11:10:51.339403 32502 solver.cpp:404]     Test net output #1: loss = 4.31341 (* 1 = 4.31341 loss)\nI0821 11:10:53.430351 32502 solver.cpp:228] Iteration 2200, loss = 0.854463\nI0821 11:10:53.430414 32502 solver.cpp:244]     Train net output #0: accuracy = 0.71\nI0821 11:10:53.430433 32502 solver.cpp:244]     Train net output #1: loss = 0.85446 (* 1 = 0.85446 loss)\nI0821 11:10:53.563446 32502 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0821 11:14:34.107537 32502 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 11:16:43.727473 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1889\nI0821 11:16:43.727845 32502 solver.cpp:404]     Test net output #1: loss = 3.73873 (* 1 = 3.73873 loss)\nI0821 11:16:45.819207 32502 solver.cpp:228] Iteration 2300, loss = 0.693144\nI0821 11:16:45.819269 32502 solver.cpp:244]     Train net output #0: accuracy = 0.75\nI0821 11:16:45.819288 32502 solver.cpp:244]     Train net output #1: loss = 0.693141 (* 1 = 0.693141 loss)\nI0821 11:16:45.948827 32502 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0821 11:20:26.516990 32502 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 11:22:36.192564 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1576\nI0821 11:22:36.192942 32502 solver.cpp:404]     Test net output #1: loss = 4.09844 (* 1 = 4.09844 loss)\nI0821 11:22:38.284481 32502 solver.cpp:228] Iteration 2400, loss = 0.800451\nI0821 11:22:38.284543 32502 solver.cpp:244]     Train net output #0: accuracy = 0.75\nI0821 11:22:38.284561 32502 solver.cpp:244]     Train net output #1: loss = 0.800448 (* 1 = 0.800448 loss)\nI0821 11:22:38.419499 32502 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0821 11:26:19.008075 32502 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 11:28:28.701155 32502 solver.cpp:404]     Test net output #0: accuracy = 0.135\nI0821 11:28:28.701537 32502 solver.cpp:404]     Test net output #1: loss = 4.40889 (* 1 = 4.40889 loss)\nI0821 11:28:30.792546 32502 solver.cpp:228] Iteration 2500, loss = 0.710769\nI0821 11:28:30.792609 32502 solver.cpp:244]     Train net output #0: accuracy = 0.74\nI0821 11:28:30.792628 32502 solver.cpp:244]     Train net output #1: loss = 0.710766 (* 1 = 0.710766 loss)\nI0821 11:28:30.929726 32502 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0821 11:32:11.541002 32502 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 11:34:21.210235 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1478\nI0821 11:34:21.210608 32502 solver.cpp:404]     Test net output #1: loss = 4.27384 (* 1 = 4.27384 loss)\nI0821 11:34:23.302289 32502 solver.cpp:228] Iteration 2600, loss = 0.682585\nI0821 11:34:23.302352 32502 solver.cpp:244]     Train net output #0: accuracy = 0.75\nI0821 11:34:23.302371 32502 solver.cpp:244]     Train net output #1: loss = 0.682582 (* 1 = 0.682582 loss)\nI0821 11:34:23.440038 32502 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0821 11:38:04.123080 32502 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 11:40:13.786854 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1274\nI0821 11:40:13.787269 32502 solver.cpp:404]     Test net output #1: loss = 4.46111 (* 1 = 4.46111 loss)\nI0821 11:40:15.878859 32502 solver.cpp:228] Iteration 2700, loss = 0.681637\nI0821 11:40:15.878923 32502 solver.cpp:244]     Train net output #0: accuracy = 0.79\nI0821 11:40:15.878942 32502 solver.cpp:244]     Train net output #1: loss = 0.681634 (* 1 = 0.681634 loss)\nI0821 11:40:16.015923 32502 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0821 11:43:56.926597 32502 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 11:46:06.580705 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1176\nI0821 11:46:06.581077 32502 solver.cpp:404]     Test net output #1: loss = 3.86531 (* 1 = 3.86531 loss)\nI0821 11:46:08.672787 32502 solver.cpp:228] Iteration 2800, loss = 0.597121\nI0821 11:46:08.672850 32502 solver.cpp:244]     Train net output #0: accuracy = 0.73\nI0821 11:46:08.672869 32502 solver.cpp:244]     Train net output #1: loss = 0.597118 (* 1 = 0.597118 loss)\nI0821 11:46:08.803447 32502 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0821 11:49:49.595198 32502 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 11:51:59.248466 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1524\nI0821 11:51:59.248818 32502 solver.cpp:404]     Test net output #1: loss = 3.8834 (* 1 = 3.8834 loss)\nI0821 11:52:01.339179 32502 solver.cpp:228] Iteration 2900, loss = 0.593712\nI0821 11:52:01.339242 32502 solver.cpp:244]     Train net output #0: accuracy = 0.82\nI0821 11:52:01.339262 32502 solver.cpp:244]     Train net output #1: loss = 0.593709 (* 1 = 0.593709 loss)\nI0821 11:52:01.472543 32502 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0821 11:55:42.196023 32502 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 11:57:51.870779 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1229\nI0821 11:57:51.871177 32502 solver.cpp:404]     Test net output #1: loss = 3.88293 (* 1 = 3.88293 loss)\nI0821 11:57:53.963307 32502 solver.cpp:228] Iteration 3000, loss = 0.579396\nI0821 11:57:53.963374 32502 solver.cpp:244]     Train net output #0: accuracy = 0.77\nI0821 11:57:53.963392 32502 solver.cpp:244]     Train net output #1: loss = 0.579393 (* 1 = 0.579393 loss)\nI0821 11:57:54.099827 32502 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0821 12:01:34.721882 32502 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 12:03:44.400671 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1812\nI0821 12:03:44.401059 32502 solver.cpp:404]     Test net output #1: loss = 3.86365 (* 1 = 3.86365 loss)\nI0821 12:03:46.491848 32502 solver.cpp:228] Iteration 3100, loss = 0.597391\nI0821 12:03:46.491912 32502 solver.cpp:244]     Train net output #0: accuracy = 0.77\nI0821 12:03:46.491931 32502 solver.cpp:244]     Train net output #1: loss = 0.597388 (* 1 = 0.597388 loss)\nI0821 12:03:46.628891 32502 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0821 12:07:27.234025 32502 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 12:09:36.917423 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1607\nI0821 12:09:36.917805 32502 solver.cpp:404]     Test net output #1: loss = 3.38829 (* 1 = 3.38829 loss)\nI0821 12:09:39.009398 32502 solver.cpp:228] Iteration 3200, loss = 0.591842\nI0821 12:09:39.009462 32502 solver.cpp:244]     Train net output #0: accuracy = 0.77\nI0821 12:09:39.009480 32502 solver.cpp:244]     Train net output #1: loss = 0.591839 (* 1 = 0.591839 loss)\nI0821 12:09:39.145304 32502 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0821 12:13:19.756108 32502 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 12:15:29.433640 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1448\nI0821 12:15:29.434036 32502 solver.cpp:404]     Test net output #1: loss = 3.77581 (* 1 = 3.77581 loss)\nI0821 12:15:31.525570 32502 solver.cpp:228] Iteration 3300, loss = 0.69315\nI0821 12:15:31.525634 32502 solver.cpp:244]     Train net output #0: accuracy = 0.74\nI0821 12:15:31.525651 32502 solver.cpp:244]     Train net output #1: loss = 0.693147 (* 1 = 0.693147 loss)\nI0821 12:15:31.653439 32502 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0821 12:19:12.190698 32502 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 12:21:21.876039 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1857\nI0821 12:21:21.876396 32502 solver.cpp:404]     Test net output #1: loss = 3.61988 (* 1 = 3.61988 loss)\nI0821 12:21:23.968737 32502 solver.cpp:228] Iteration 3400, loss = 0.708273\nI0821 12:21:23.968801 32502 solver.cpp:244]     Train net output #0: accuracy = 0.74\nI0821 12:21:23.968819 32502 solver.cpp:244]     Train net output #1: loss = 0.70827 (* 1 = 0.70827 loss)\nI0821 12:21:24.107107 32502 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0821 12:25:04.770705 32502 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 12:27:14.466454 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1665\nI0821 12:27:14.466843 32502 solver.cpp:404]     Test net output #1: loss = 3.1978 (* 1 = 3.1978 loss)\nI0821 12:27:16.558490 32502 solver.cpp:228] Iteration 3500, loss = 0.543278\nI0821 12:27:16.558552 32502 solver.cpp:244]     Train net output #0: accuracy = 0.79\nI0821 12:27:16.558570 32502 solver.cpp:244]     Train net output #1: loss = 0.543275 (* 1 = 0.543275 loss)\nI0821 12:27:16.694437 32502 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0821 12:30:57.373018 32502 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 12:33:07.062669 32502 solver.cpp:404]     Test net output #0: accuracy = 0.2042\nI0821 12:33:07.063047 32502 solver.cpp:404]     Test net output #1: loss = 3.38242 (* 1 = 3.38242 loss)\nI0821 12:33:09.154445 32502 solver.cpp:228] Iteration 3600, loss = 0.570432\nI0821 12:33:09.154508 32502 solver.cpp:244]     Train net output #0: accuracy = 0.77\nI0821 12:33:09.154526 32502 solver.cpp:244]     Train net output #1: loss = 0.57043 (* 1 = 0.57043 loss)\nI0821 12:33:09.287406 32502 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0821 12:36:50.070289 32502 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 12:38:59.812582 32502 solver.cpp:404]     Test net output #0: accuracy = 0.2768\nI0821 12:38:59.812978 32502 solver.cpp:404]     Test net output #1: loss = 2.65168 (* 1 = 2.65168 loss)\nI0821 12:39:01.904795 32502 solver.cpp:228] Iteration 3700, loss = 0.563735\nI0821 12:39:01.904857 32502 solver.cpp:244]     Train net output #0: accuracy = 0.83\nI0821 12:39:01.904875 32502 solver.cpp:244]     Train net output #1: loss = 0.563732 (* 1 = 0.563732 loss)\nI0821 12:39:02.043061 32502 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0821 12:42:42.716871 32502 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 12:44:52.495296 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1837\nI0821 12:44:52.495678 32502 solver.cpp:404]     Test net output #1: loss = 3.5345 (* 1 = 3.5345 loss)\nI0821 12:44:54.586964 32502 solver.cpp:228] Iteration 3800, loss = 0.605419\nI0821 12:44:54.587033 32502 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0821 12:44:54.587049 32502 solver.cpp:244]     Train net output #1: loss = 0.605416 (* 1 = 0.605416 loss)\nI0821 12:44:54.719730 32502 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0821 12:48:35.408071 32502 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 12:50:45.166101 32502 solver.cpp:404]     Test net output #0: accuracy = 0.1908\nI0821 12:50:45.166476 32502 solver.cpp:404]     Test net output #1: loss = 3.79279 (* 1 = 3.79279 loss)\nI0821 12:50:47.257542 32502 solver.cpp:228] Iteration 3900, loss = 0.62214\nI0821 12:50:47.257604 32502 solver.cpp:244]     Train net output #0: accuracy = 0.77\nI0821 12:50:47.257622 32502 solver.cpp:244]     Train net output #1: loss = 0.622137 (* 1 = 0.622137 loss)\nI0821 12:50:47.389189 32502 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0821 12:54:27.946328 32502 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 12:56:37.705000 32502 solver.cpp:404]     Test net output #0: accuracy = 0.2035\nI0821 12:56:37.705366 32502 solver.cpp:404]     Test net output #1: loss = 3.75407 (* 1 = 3.75407 loss)\nI0821 12:56:39.796492 32502 solver.cpp:228] Iteration 4000, loss = 0.436844\nI0821 12:56:39.796556 32502 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 12:56:39.796573 32502 solver.cpp:244]     Train net output #1: loss = 0.436841 (* 1 = 0.436841 loss)\nI0821 12:56:39.932611 32502 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0821 13:00:20.423712 32502 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 13:02:30.172813 32502 solver.cpp:404]     Test net output #0: accuracy = 0.2141\nI0821 13:02:30.173207 32502 solver.cpp:404]     Test net output #1: loss = 3.807 (* 1 = 3.807 loss)\nI0821 13:02:32.264304 32502 solver.cpp:228] Iteration 4100, loss = 0.387221\nI0821 13:02:32.264367 32502 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 13:02:32.264385 32502 solver.cpp:244]     Train net output #1: loss = 0.387219 (* 1 = 0.387219 loss)\nI0821 13:02:32.397763 32502 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0821 13:06:13.022400 32502 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 13:08:22.780102 32502 solver.cpp:404]     Test net output #0: accuracy = 0.2362\nI0821 13:08:22.780462 32502 solver.cpp:404]     Test net output #1: loss = 3.2204 (* 1 = 3.2204 loss)\nI0821 13:08:24.872555 32502 solver.cpp:228] Iteration 4200, loss = 0.454956\nI0821 13:08:24.872618 32502 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 13:08:24.872635 32502 solver.cpp:244]     Train net output #1: loss = 0.454954 (* 1 = 0.454954 loss)\nI0821 13:08:25.010025 32502 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0821 13:12:05.891417 32502 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 13:14:15.581292 32502 solver.cpp:404]     Test net output #0: accuracy = 0.2283\nI0821 13:14:15.581682 32502 solver.cpp:404]     Test net output #1: loss = 4.16978 (* 1 = 4.16978 loss)\nI0821 13:14:17.673444 32502 solver.cpp:228] Iteration 4300, loss = 0.340856\nI0821 13:14:17.673506 32502 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 13:14:17.673524 32502 solver.cpp:244]     Train net output #1: loss = 0.340854 (* 1 = 0.340854 loss)\nI0821 13:14:17.806661 32502 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0821 13:17:58.552191 32502 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 13:20:08.247819 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3269\nI0821 13:20:08.248175 32502 solver.cpp:404]     Test net output #1: loss = 2.84056 (* 1 = 2.84056 loss)\nI0821 13:20:10.340570 32502 solver.cpp:228] Iteration 4400, loss = 0.667615\nI0821 13:20:10.340633 32502 solver.cpp:244]     Train net output #0: accuracy = 0.75\nI0821 13:20:10.340652 32502 solver.cpp:244]     Train net output #1: loss = 0.667612 (* 1 = 0.667612 loss)\nI0821 13:20:10.471158 32502 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0821 13:23:50.803792 32502 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 13:26:00.509629 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3241\nI0821 13:26:00.509995 32502 solver.cpp:404]     Test net output #1: loss = 2.78599 (* 1 = 2.78599 loss)\nI0821 13:26:02.601399 32502 solver.cpp:228] Iteration 4500, loss = 0.465541\nI0821 13:26:02.601461 32502 solver.cpp:244]     Train net output #0: accuracy = 0.82\nI0821 13:26:02.601480 32502 solver.cpp:244]     Train net output #1: loss = 0.465538 (* 1 = 0.465538 loss)\nI0821 13:26:02.732494 32502 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0821 13:29:43.312785 32502 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 13:31:52.999001 32502 solver.cpp:404]     Test net output #0: accuracy = 0.25\nI0821 13:31:52.999398 32502 solver.cpp:404]     Test net output #1: loss = 4.11527 (* 1 = 4.11527 loss)\nI0821 13:31:55.090416 32502 solver.cpp:228] Iteration 4600, loss = 0.405168\nI0821 13:31:55.090476 32502 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 13:31:55.090494 32502 solver.cpp:244]     Train net output #1: loss = 0.405165 (* 1 = 0.405165 loss)\nI0821 13:31:55.227699 32502 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0821 13:35:36.191478 32502 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 13:37:45.895756 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3459\nI0821 13:37:45.896123 32502 solver.cpp:404]     Test net output #1: loss = 2.54934 (* 1 = 2.54934 loss)\nI0821 13:37:47.987706 32502 solver.cpp:228] Iteration 4700, loss = 0.242807\nI0821 13:37:47.987769 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 13:37:47.987787 32502 solver.cpp:244]     Train net output #1: loss = 0.242804 (* 1 = 0.242804 loss)\nI0821 13:37:48.121271 32502 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0821 13:41:29.105967 32502 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 13:43:38.795799 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3293\nI0821 13:43:38.796207 32502 solver.cpp:404]     Test net output #1: loss = 2.75827 (* 1 = 2.75827 loss)\nI0821 13:43:40.888001 32502 solver.cpp:228] Iteration 4800, loss = 0.417559\nI0821 13:43:40.888064 32502 solver.cpp:244]     Train net output #0: accuracy = 0.85\nI0821 13:43:40.888082 32502 solver.cpp:244]     Train net output #1: loss = 0.417556 (* 1 = 0.417556 loss)\nI0821 13:43:41.023267 32502 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0821 13:47:21.703547 32502 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 13:49:31.394227 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4151\nI0821 13:49:31.394587 32502 solver.cpp:404]     Test net output #1: loss = 2.0618 (* 1 = 2.0618 loss)\nI0821 13:49:33.486515 32502 solver.cpp:228] Iteration 4900, loss = 0.357507\nI0821 13:49:33.486578 32502 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 13:49:33.486598 32502 solver.cpp:244]     Train net output #1: loss = 0.357504 (* 1 = 0.357504 loss)\nI0821 13:49:33.624721 32502 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0821 13:53:14.418278 32502 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 13:55:24.098130 32502 solver.cpp:404]     Test net output #0: accuracy = 0.2651\nI0821 13:55:24.098489 32502 solver.cpp:404]     Test net output #1: loss = 3.23159 (* 1 = 3.23159 loss)\nI0821 13:55:26.190227 32502 solver.cpp:228] Iteration 5000, loss = 0.319415\nI0821 13:55:26.190290 32502 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 13:55:26.190309 32502 solver.cpp:244]     Train net output #1: loss = 0.319413 (* 1 = 0.319413 loss)\nI0821 13:55:26.329007 32502 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0821 13:59:07.023694 32502 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 14:01:16.696811 32502 solver.cpp:404]     Test net output #0: accuracy = 0.2887\nI0821 14:01:16.697190 32502 solver.cpp:404]     Test net output #1: loss = 3.96283 (* 1 = 3.96283 loss)\nI0821 14:01:18.787695 32502 solver.cpp:228] Iteration 5100, loss = 0.426602\nI0821 14:01:18.787756 32502 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 14:01:18.787775 32502 solver.cpp:244]     Train net output #1: loss = 0.426599 (* 1 = 0.426599 loss)\nI0821 14:01:18.923193 32502 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0821 14:04:59.642768 32502 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 14:07:09.316367 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3482\nI0821 14:07:09.316751 32502 solver.cpp:404]     Test net output #1: loss = 3.05243 (* 1 = 3.05243 loss)\nI0821 14:07:11.408349 32502 solver.cpp:228] Iteration 5200, loss = 0.317335\nI0821 14:07:11.408412 32502 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 14:07:11.408432 32502 solver.cpp:244]     Train net output #1: loss = 0.317332 (* 1 = 0.317332 loss)\nI0821 14:07:11.546332 32502 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0821 14:10:52.285625 32502 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 14:13:01.923872 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4608\nI0821 14:13:01.924228 32502 solver.cpp:404]     Test net output #1: loss = 2.06805 (* 1 = 2.06805 loss)\nI0821 14:13:04.016669 32502 solver.cpp:228] Iteration 5300, loss = 0.329163\nI0821 14:13:04.016731 32502 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 14:13:04.016748 32502 solver.cpp:244]     Train net output #1: loss = 0.32916 (* 1 = 0.32916 loss)\nI0821 14:13:04.149788 32502 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0821 14:16:45.011426 32502 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 14:18:54.632580 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3979\nI0821 14:18:54.632961 32502 solver.cpp:404]     Test net output #1: loss = 2.21892 (* 1 = 2.21892 loss)\nI0821 14:18:56.724428 32502 solver.cpp:228] Iteration 5400, loss = 0.397532\nI0821 14:18:56.724490 32502 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 14:18:56.724509 32502 solver.cpp:244]     Train net output #1: loss = 0.397529 (* 1 = 0.397529 loss)\nI0821 14:18:56.863634 32502 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0821 14:22:36.689867 32502 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 14:24:44.645004 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3645\nI0821 14:24:44.645329 32502 solver.cpp:404]     Test net output #1: loss = 2.51676 (* 1 = 2.51676 loss)\nI0821 14:24:46.731971 32502 solver.cpp:228] Iteration 5500, loss = 0.308177\nI0821 14:24:46.732017 32502 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 14:24:46.732034 32502 solver.cpp:244]     Train net output #1: loss = 0.308174 (* 1 = 0.308174 loss)\nI0821 14:24:46.868645 32502 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0821 14:28:25.984897 32502 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 14:30:33.942880 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3719\nI0821 14:30:33.943186 32502 solver.cpp:404]     Test net output #1: loss = 2.91521 (* 1 = 2.91521 loss)\nI0821 14:30:36.029989 32502 solver.cpp:228] Iteration 5600, loss = 0.393483\nI0821 14:30:36.030036 32502 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 14:30:36.030053 32502 solver.cpp:244]     Train net output #1: loss = 0.39348 (* 1 = 0.39348 loss)\nI0821 14:30:36.163439 32502 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0821 14:34:15.134965 32502 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 14:36:23.087668 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3284\nI0821 14:36:23.087993 32502 solver.cpp:404]     Test net output #1: loss = 3.15112 (* 1 = 3.15112 loss)\nI0821 14:36:25.175283 32502 solver.cpp:228] Iteration 5700, loss = 0.251093\nI0821 14:36:25.175329 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 14:36:25.175345 32502 solver.cpp:244]     Train net output #1: loss = 0.25109 (* 1 = 0.25109 loss)\nI0821 14:36:25.298562 32502 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0821 14:40:04.248070 32502 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 14:42:12.205709 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4884\nI0821 14:42:12.206022 32502 solver.cpp:404]     Test net output #1: loss = 1.97787 (* 1 = 1.97787 loss)\nI0821 14:42:14.294384 32502 solver.cpp:228] Iteration 5800, loss = 0.227984\nI0821 14:42:14.294430 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 14:42:14.294446 32502 solver.cpp:244]     Train net output #1: loss = 0.227981 (* 1 = 0.227981 loss)\nI0821 14:42:14.424558 32502 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0821 14:45:53.461395 32502 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 14:48:01.419594 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3592\nI0821 14:48:01.419919 32502 solver.cpp:404]     Test net output #1: loss = 3.13991 (* 1 = 3.13991 loss)\nI0821 14:48:03.507426 32502 solver.cpp:228] Iteration 5900, loss = 0.32616\nI0821 14:48:03.507473 32502 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 14:48:03.507490 32502 solver.cpp:244]     Train net output #1: loss = 0.326157 (* 1 = 0.326157 loss)\nI0821 14:48:03.633244 32502 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0821 14:51:42.589982 32502 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 14:53:50.526777 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3671\nI0821 14:53:50.527096 32502 solver.cpp:404]     Test net output #1: loss = 3.12062 (* 1 = 3.12062 loss)\nI0821 14:53:52.613786 32502 solver.cpp:228] Iteration 6000, loss = 0.18301\nI0821 14:53:52.613837 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 14:53:52.613852 32502 solver.cpp:244]     Train net output #1: loss = 0.183007 (* 1 = 0.183007 loss)\nI0821 14:53:52.745033 32502 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0821 14:57:31.686820 32502 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 14:59:39.664587 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4613\nI0821 14:59:39.664909 32502 solver.cpp:404]     Test net output #1: loss = 2.46743 (* 1 = 2.46743 loss)\nI0821 14:59:41.753331 32502 solver.cpp:228] Iteration 6100, loss = 0.228796\nI0821 14:59:41.753381 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 14:59:41.753397 32502 solver.cpp:244]     Train net output #1: loss = 0.228793 (* 1 = 0.228793 loss)\nI0821 14:59:41.876066 32502 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0821 15:03:20.760207 32502 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 15:05:28.746276 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4143\nI0821 15:05:28.746603 32502 solver.cpp:404]     Test net output #1: loss = 2.92386 (* 1 = 2.92386 loss)\nI0821 15:05:30.832638 32502 solver.cpp:228] Iteration 6200, loss = 0.284975\nI0821 15:05:30.832686 32502 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 15:05:30.832703 32502 solver.cpp:244]     Train net output #1: loss = 0.284972 (* 1 = 0.284972 loss)\nI0821 15:05:30.964072 32502 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0821 15:09:09.912415 32502 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 15:11:17.901259 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4382\nI0821 15:11:17.901583 32502 solver.cpp:404]     Test net output #1: loss = 2.39148 (* 1 = 2.39148 loss)\nI0821 15:11:19.988931 32502 solver.cpp:228] Iteration 6300, loss = 0.152641\nI0821 15:11:19.988979 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 15:11:19.988996 32502 solver.cpp:244]     Train net output #1: loss = 0.152638 (* 1 = 0.152638 loss)\nI0821 15:11:20.119209 32502 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0821 15:14:59.102157 32502 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 15:17:07.084789 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4961\nI0821 15:17:07.085086 32502 solver.cpp:404]     Test net output #1: loss = 2.25047 (* 1 = 2.25047 loss)\nI0821 15:17:09.172832 32502 solver.cpp:228] Iteration 6400, loss = 0.311849\nI0821 15:17:09.172878 32502 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 15:17:09.172894 32502 solver.cpp:244]     Train net output #1: loss = 0.311846 (* 1 = 0.311846 loss)\nI0821 15:17:09.304255 32502 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0821 15:20:48.218029 32502 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 15:22:56.203770 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3813\nI0821 15:22:56.204088 32502 solver.cpp:404]     Test net output #1: loss = 2.74725 (* 1 = 2.74725 loss)\nI0821 15:22:58.291683 32502 solver.cpp:228] Iteration 6500, loss = 0.419165\nI0821 15:22:58.291729 32502 solver.cpp:244]     Train net output #0: accuracy = 0.85\nI0821 15:22:58.291746 32502 solver.cpp:244]     Train net output #1: loss = 0.419162 (* 1 = 0.419162 loss)\nI0821 15:22:58.414319 32502 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0821 15:26:37.398769 32502 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 15:28:45.375826 32502 solver.cpp:404]     Test net output #0: accuracy = 0.508\nI0821 15:28:45.376137 32502 solver.cpp:404]     Test net output #1: loss = 1.89906 (* 1 = 1.89906 loss)\nI0821 15:28:47.463954 32502 solver.cpp:228] Iteration 6600, loss = 0.220545\nI0821 15:28:47.464001 32502 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 15:28:47.464018 32502 solver.cpp:244]     Train net output #1: loss = 0.220542 (* 1 = 0.220542 loss)\nI0821 15:28:47.591871 32502 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0821 15:32:26.567792 32502 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 15:34:34.549943 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4247\nI0821 15:34:34.550232 32502 solver.cpp:404]     Test net output #1: loss = 2.87567 (* 1 = 2.87567 loss)\nI0821 15:34:36.638705 32502 solver.cpp:228] Iteration 6700, loss = 0.247324\nI0821 15:34:36.638756 32502 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 15:34:36.638785 32502 solver.cpp:244]     Train net output #1: loss = 0.247321 (* 1 = 0.247321 loss)\nI0821 15:34:36.763188 32502 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0821 15:38:15.750391 32502 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 15:40:23.733479 32502 solver.cpp:404]     Test net output #0: accuracy = 0.3979\nI0821 15:40:23.733824 32502 solver.cpp:404]     Test net output #1: loss = 2.9949 (* 1 = 2.9949 loss)\nI0821 15:40:25.823426 32502 solver.cpp:228] Iteration 6800, loss = 0.220876\nI0821 15:40:25.823477 32502 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 15:40:25.823499 32502 solver.cpp:244]     Train net output #1: loss = 0.220873 (* 1 = 0.220873 loss)\nI0821 15:40:25.945037 32502 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0821 15:44:05.888815 32502 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 15:46:15.532534 32502 solver.cpp:404]     Test net output #0: accuracy = 0.435\nI0821 15:46:15.532909 32502 solver.cpp:404]     Test net output #1: loss = 2.63441 (* 1 = 2.63441 loss)\nI0821 15:46:17.624771 32502 solver.cpp:228] Iteration 6900, loss = 0.253787\nI0821 15:46:17.624835 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 15:46:17.624853 32502 solver.cpp:244]     Train net output #1: loss = 0.253784 (* 1 = 0.253784 loss)\nI0821 15:46:17.758504 32502 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0821 15:49:58.263257 32502 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 15:52:07.881695 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4413\nI0821 15:52:07.882064 32502 solver.cpp:404]     Test net output #1: loss = 2.37864 (* 1 = 2.37864 loss)\nI0821 15:52:09.973176 32502 solver.cpp:228] Iteration 7000, loss = 0.226963\nI0821 15:52:09.973242 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 15:52:09.973258 32502 solver.cpp:244]     Train net output #1: loss = 0.22696 (* 1 = 0.22696 loss)\nI0821 15:52:10.109189 32502 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0821 15:55:50.466925 32502 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 15:58:00.110711 32502 solver.cpp:404]     Test net output #0: accuracy = 0.53\nI0821 15:58:00.111084 32502 solver.cpp:404]     Test net output #1: loss = 1.80705 (* 1 = 1.80705 loss)\nI0821 15:58:02.202728 32502 solver.cpp:228] Iteration 7100, loss = 0.12586\nI0821 15:58:02.202788 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 15:58:02.202805 32502 solver.cpp:244]     Train net output #1: loss = 0.125857 (* 1 = 0.125857 loss)\nI0821 15:58:02.337543 32502 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0821 16:01:42.897255 32502 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 16:03:52.527724 32502 solver.cpp:404]     Test net output #0: accuracy = 0.495\nI0821 16:03:52.528110 32502 solver.cpp:404]     Test net output #1: loss = 2.11638 (* 1 = 2.11638 loss)\nI0821 16:03:54.618041 32502 solver.cpp:228] Iteration 7200, loss = 0.122971\nI0821 16:03:54.618104 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 16:03:54.618122 32502 solver.cpp:244]     Train net output #1: loss = 0.122969 (* 1 = 0.122969 loss)\nI0821 16:03:54.751466 32502 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0821 16:07:35.555091 32502 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 16:09:45.178654 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4478\nI0821 16:09:45.179019 32502 solver.cpp:404]     Test net output #1: loss = 2.92495 (* 1 = 2.92495 loss)\nI0821 16:09:47.269554 32502 solver.cpp:228] Iteration 7300, loss = 0.187171\nI0821 16:09:47.269619 32502 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 16:09:47.269639 32502 solver.cpp:244]     Train net output #1: loss = 0.187168 (* 1 = 0.187168 loss)\nI0821 16:09:47.406937 32502 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0821 16:13:28.482312 32502 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 16:15:38.149338 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4657\nI0821 16:15:38.149708 32502 solver.cpp:404]     Test net output #1: loss = 2.22126 (* 1 = 2.22126 loss)\nI0821 16:15:40.241302 32502 solver.cpp:228] Iteration 7400, loss = 0.275514\nI0821 16:15:40.241367 32502 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 16:15:40.241385 32502 solver.cpp:244]     Train net output #1: loss = 0.275511 (* 1 = 0.275511 loss)\nI0821 16:15:40.382383 32502 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0821 16:19:21.567862 32502 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 16:21:31.395470 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4531\nI0821 16:21:31.395854 32502 solver.cpp:404]     Test net output #1: loss = 2.47024 (* 1 = 2.47024 loss)\nI0821 16:21:33.487888 32502 solver.cpp:228] Iteration 7500, loss = 0.273727\nI0821 16:21:33.487957 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 16:21:33.487974 32502 solver.cpp:244]     Train net output #1: loss = 0.273725 (* 1 = 0.273725 loss)\nI0821 16:21:33.630900 32502 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0821 16:25:14.814756 32502 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 16:27:24.636186 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4398\nI0821 16:27:24.636549 32502 solver.cpp:404]     Test net output #1: loss = 3.07159 (* 1 = 3.07159 loss)\nI0821 16:27:26.728430 32502 solver.cpp:228] Iteration 7600, loss = 0.219237\nI0821 16:27:26.728492 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 16:27:26.728510 32502 solver.cpp:244]     Train net output #1: loss = 0.219234 (* 1 = 0.219234 loss)\nI0821 16:27:26.873874 32502 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0821 16:31:08.172099 32502 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 16:33:18.000579 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4975\nI0821 16:33:18.000962 32502 solver.cpp:404]     Test net output #1: loss = 2.48733 (* 1 = 2.48733 loss)\nI0821 16:33:20.093130 32502 solver.cpp:228] Iteration 7700, loss = 0.23864\nI0821 16:33:20.093195 32502 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 16:33:20.093214 32502 solver.cpp:244]     Train net output #1: loss = 0.238637 (* 1 = 0.238637 loss)\nI0821 16:33:20.230763 32502 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0821 16:37:01.511107 32502 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 16:39:11.316519 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4267\nI0821 16:39:11.316895 32502 solver.cpp:404]     Test net output #1: loss = 3.74096 (* 1 = 3.74096 loss)\nI0821 16:39:13.408330 32502 solver.cpp:228] Iteration 7800, loss = 0.166729\nI0821 16:39:13.408392 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 16:39:13.408411 32502 solver.cpp:244]     Train net output #1: loss = 0.166727 (* 1 = 0.166727 loss)\nI0821 16:39:13.547986 32502 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0821 16:42:54.652704 32502 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 16:45:02.660413 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4662\nI0821 16:45:02.660778 32502 solver.cpp:404]     Test net output #1: loss = 2.52708 (* 1 = 2.52708 loss)\nI0821 16:45:04.748833 32502 solver.cpp:228] Iteration 7900, loss = 0.209106\nI0821 16:45:04.748881 32502 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 16:45:04.748898 32502 solver.cpp:244]     Train net output #1: loss = 0.209103 (* 1 = 0.209103 loss)\nI0821 16:45:04.888862 32502 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0821 16:48:45.195279 32502 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 16:50:53.162257 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4038\nI0821 16:50:53.162592 32502 solver.cpp:404]     Test net output #1: loss = 3.04405 (* 1 = 3.04405 loss)\nI0821 16:50:55.249980 32502 solver.cpp:228] Iteration 8000, loss = 0.127369\nI0821 16:50:55.250031 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 16:50:55.250048 32502 solver.cpp:244]     Train net output #1: loss = 0.127366 (* 1 = 0.127366 loss)\nI0821 16:50:55.387137 32502 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0821 16:54:35.528878 32502 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 16:56:43.492808 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4503\nI0821 16:56:43.493142 32502 solver.cpp:404]     Test net output #1: loss = 2.66319 (* 1 = 2.66319 loss)\nI0821 16:56:45.579532 32502 solver.cpp:228] Iteration 8100, loss = 0.241622\nI0821 16:56:45.579578 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 16:56:45.579596 32502 solver.cpp:244]     Train net output #1: loss = 0.241619 (* 1 = 0.241619 loss)\nI0821 16:56:45.724401 32502 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0821 17:00:26.005079 32502 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 17:02:33.967861 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5384\nI0821 17:02:33.968178 32502 solver.cpp:404]     Test net output #1: loss = 1.96254 (* 1 = 1.96254 loss)\nI0821 17:02:36.056291 32502 solver.cpp:228] Iteration 8200, loss = 0.153141\nI0821 17:02:36.056339 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 17:02:36.056355 32502 solver.cpp:244]     Train net output #1: loss = 0.153138 (* 1 = 0.153138 loss)\nI0821 17:02:36.193821 32502 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0821 17:06:16.457676 32502 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 17:08:24.414630 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4604\nI0821 17:08:24.414968 32502 solver.cpp:404]     Test net output #1: loss = 2.84195 (* 1 = 2.84195 loss)\nI0821 17:08:26.501525 32502 solver.cpp:228] Iteration 8300, loss = 0.16737\nI0821 17:08:26.501574 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 17:08:26.501590 32502 solver.cpp:244]     Train net output #1: loss = 0.167367 (* 1 = 0.167367 loss)\nI0821 17:08:26.645633 32502 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0821 17:12:06.890997 32502 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 17:14:14.884390 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5359\nI0821 17:14:14.884657 32502 solver.cpp:404]     Test net output #1: loss = 2.27199 (* 1 = 2.27199 loss)\nI0821 17:14:16.972293 32502 solver.cpp:228] Iteration 8400, loss = 0.183777\nI0821 17:14:16.972342 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 17:14:16.972357 32502 solver.cpp:244]     Train net output #1: loss = 0.183775 (* 1 = 0.183775 loss)\nI0821 17:14:17.106953 32502 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0821 17:17:57.449256 32502 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 17:20:05.444533 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6062\nI0821 17:20:05.444854 32502 solver.cpp:404]     Test net output #1: loss = 1.65711 (* 1 = 1.65711 loss)\nI0821 17:20:07.532536 32502 solver.cpp:228] Iteration 8500, loss = 0.26491\nI0821 17:20:07.532584 32502 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 17:20:07.532601 32502 solver.cpp:244]     Train net output #1: loss = 0.264907 (* 1 = 0.264907 loss)\nI0821 17:20:07.666931 32502 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0821 17:23:48.144973 32502 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 17:25:56.140925 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5293\nI0821 17:25:56.141260 32502 solver.cpp:404]     Test net output #1: loss = 1.987 (* 1 = 1.987 loss)\nI0821 17:25:58.228199 32502 solver.cpp:228] Iteration 8600, loss = 0.123367\nI0821 17:25:58.228245 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 17:25:58.228262 32502 solver.cpp:244]     Train net output #1: loss = 0.123364 (* 1 = 0.123364 loss)\nI0821 17:25:58.365420 32502 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0821 17:29:38.727465 32502 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 17:31:46.755918 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6\nI0821 17:31:46.756244 32502 solver.cpp:404]     Test net output #1: loss = 1.66355 (* 1 = 1.66355 loss)\nI0821 17:31:48.843376 32502 solver.cpp:228] Iteration 8700, loss = 0.119131\nI0821 17:31:48.843423 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 17:31:48.843439 32502 solver.cpp:244]     Train net output #1: loss = 0.119128 (* 1 = 0.119128 loss)\nI0821 17:31:48.986943 32502 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0821 17:35:29.382580 32502 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 17:37:37.406872 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4562\nI0821 17:37:37.407217 32502 solver.cpp:404]     Test net output #1: loss = 2.88761 (* 1 = 2.88761 loss)\nI0821 17:37:39.494675 32502 solver.cpp:228] Iteration 8800, loss = 0.110739\nI0821 17:37:39.494724 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 17:37:39.494740 32502 solver.cpp:244]     Train net output #1: loss = 0.110737 (* 1 = 0.110737 loss)\nI0821 17:37:39.630791 32502 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0821 17:41:19.940668 32502 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 17:43:28.006786 32502 solver.cpp:404]     Test net output #0: accuracy = 0.341\nI0821 17:43:28.007124 32502 solver.cpp:404]     Test net output #1: loss = 5.02536 (* 1 = 5.02536 loss)\nI0821 17:43:30.094070 32502 solver.cpp:228] Iteration 8900, loss = 0.242376\nI0821 17:43:30.094118 32502 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 17:43:30.094133 32502 solver.cpp:244]     Train net output #1: loss = 0.242373 (* 1 = 0.242373 loss)\nI0821 17:43:30.236349 32502 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0821 17:47:10.508895 32502 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 17:49:18.524245 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5294\nI0821 17:49:18.524554 32502 solver.cpp:404]     Test net output #1: loss = 2.36888 (* 1 = 2.36888 loss)\nI0821 17:49:20.612440 32502 solver.cpp:228] Iteration 9000, loss = 0.313009\nI0821 17:49:20.612488 32502 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 17:49:20.612504 32502 solver.cpp:244]     Train net output #1: loss = 0.313006 (* 1 = 0.313006 loss)\nI0821 17:49:20.752347 32502 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0821 17:53:00.935362 32502 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 17:55:09.012429 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5454\nI0821 17:55:09.012768 32502 solver.cpp:404]     Test net output #1: loss = 2.12815 (* 1 = 2.12815 loss)\nI0821 17:55:11.100260 32502 solver.cpp:228] Iteration 9100, loss = 0.0810645\nI0821 17:55:11.100311 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 17:55:11.100335 32502 solver.cpp:244]     Train net output #1: loss = 0.0810618 (* 1 = 0.0810618 loss)\nI0821 17:55:11.238579 32502 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0821 17:58:51.669145 32502 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 18:00:59.734979 32502 solver.cpp:404]     Test net output #0: accuracy = 0.489\nI0821 18:00:59.735348 32502 solver.cpp:404]     Test net output #1: loss = 2.25787 (* 1 = 2.25787 loss)\nI0821 18:01:01.823386 32502 solver.cpp:228] Iteration 9200, loss = 0.151539\nI0821 18:01:01.823431 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 18:01:01.823456 32502 solver.cpp:244]     Train net output #1: loss = 0.151537 (* 1 = 0.151537 loss)\nI0821 18:01:01.953022 32502 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0821 18:04:42.047785 32502 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 18:06:50.114702 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4506\nI0821 18:06:50.115069 32502 solver.cpp:404]     Test net output #1: loss = 2.65096 (* 1 = 2.65096 loss)\nI0821 18:06:52.202564 32502 solver.cpp:228] Iteration 9300, loss = 0.182106\nI0821 18:06:52.202615 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 18:06:52.202639 32502 solver.cpp:244]     Train net output #1: loss = 0.182103 (* 1 = 0.182103 loss)\nI0821 18:06:52.340212 32502 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0821 18:10:32.690197 32502 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 18:12:40.732555 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4784\nI0821 18:12:40.732918 32502 solver.cpp:404]     Test net output #1: loss = 2.65708 (* 1 = 2.65708 loss)\nI0821 18:12:42.820590 32502 solver.cpp:228] Iteration 9400, loss = 0.162397\nI0821 18:12:42.820641 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 18:12:42.820664 32502 solver.cpp:244]     Train net output #1: loss = 0.162394 (* 1 = 0.162394 loss)\nI0821 18:12:42.954195 32502 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0821 18:16:23.282878 32502 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 18:18:31.301296 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5377\nI0821 18:18:31.301589 32502 solver.cpp:404]     Test net output #1: loss = 1.87073 (* 1 = 1.87073 loss)\nI0821 18:18:33.389403 32502 solver.cpp:228] Iteration 9500, loss = 0.149713\nI0821 18:18:33.389451 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 18:18:33.389467 32502 solver.cpp:244]     Train net output #1: loss = 0.14971 (* 1 = 0.14971 loss)\nI0821 18:18:33.531569 32502 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0821 18:22:13.714854 32502 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 18:24:21.725601 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5827\nI0821 18:24:21.725875 32502 solver.cpp:404]     Test net output #1: loss = 1.80964 (* 1 = 1.80964 loss)\nI0821 18:24:23.813191 32502 solver.cpp:228] Iteration 9600, loss = 0.202308\nI0821 18:24:23.813238 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 18:24:23.813254 32502 solver.cpp:244]     Train net output #1: loss = 0.202305 (* 1 = 0.202305 loss)\nI0821 18:24:23.949020 32502 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0821 18:28:04.195340 32502 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 18:30:12.209012 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5525\nI0821 18:30:12.209328 32502 solver.cpp:404]     Test net output #1: loss = 1.69113 (* 1 = 1.69113 loss)\nI0821 18:30:14.296631 32502 solver.cpp:228] Iteration 9700, loss = 0.137902\nI0821 18:30:14.296676 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 18:30:14.296694 32502 solver.cpp:244]     Train net output #1: loss = 0.137899 (* 1 = 0.137899 loss)\nI0821 18:30:14.437466 32502 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0821 18:33:54.851052 32502 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 18:36:02.903132 32502 solver.cpp:404]     Test net output #0: accuracy = 0.509\nI0821 18:36:02.903458 32502 solver.cpp:404]     Test net output #1: loss = 2.66877 (* 1 = 2.66877 loss)\nI0821 18:36:04.990758 32502 solver.cpp:228] Iteration 9800, loss = 0.157274\nI0821 18:36:04.990808 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 18:36:04.990833 32502 solver.cpp:244]     Train net output #1: loss = 0.157271 (* 1 = 0.157271 loss)\nI0821 18:36:05.137656 32502 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0821 18:39:45.987550 32502 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 18:41:54.028537 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4974\nI0821 18:41:54.028858 32502 solver.cpp:404]     Test net output #1: loss = 2.06746 (* 1 = 2.06746 loss)\nI0821 18:41:56.117125 32502 solver.cpp:228] Iteration 9900, loss = 0.236489\nI0821 18:41:56.117175 32502 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 18:41:56.117199 32502 solver.cpp:244]     Train net output #1: loss = 0.236486 (* 1 = 0.236486 loss)\nI0821 18:41:56.257612 32502 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0821 18:45:37.250496 32502 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 18:47:45.303376 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6026\nI0821 18:47:45.303715 32502 solver.cpp:404]     Test net output #1: loss = 1.6782 (* 1 = 1.6782 loss)\nI0821 18:47:47.393295 32502 solver.cpp:228] Iteration 10000, loss = 0.0991451\nI0821 18:47:47.393348 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 18:47:47.393373 32502 solver.cpp:244]     Train net output #1: loss = 0.0991425 (* 1 = 0.0991425 loss)\nI0821 18:47:47.536278 32502 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0821 18:51:28.516324 32502 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0821 18:53:36.565454 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5209\nI0821 18:53:36.565789 32502 solver.cpp:404]     Test net output #1: loss = 2.17576 (* 1 = 2.17576 loss)\nI0821 18:53:38.654508 32502 solver.cpp:228] Iteration 10100, loss = 0.247093\nI0821 18:53:38.654559 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 18:53:38.654583 32502 solver.cpp:244]     Train net output #1: loss = 0.24709 (* 1 = 0.24709 loss)\nI0821 18:53:38.794420 32502 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0821 18:57:19.778237 32502 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0821 18:59:27.825642 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4466\nI0821 18:59:27.825990 32502 solver.cpp:404]     Test net output #1: loss = 2.83657 (* 1 = 2.83657 loss)\nI0821 18:59:29.914569 32502 solver.cpp:228] Iteration 10200, loss = 0.209157\nI0821 18:59:29.914620 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 18:59:29.914644 32502 solver.cpp:244]     Train net output #1: loss = 0.209155 (* 1 = 0.209155 loss)\nI0821 18:59:30.058022 32502 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0821 19:03:10.877909 32502 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0821 19:05:18.911423 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5018\nI0821 19:05:18.911717 32502 solver.cpp:404]     Test net output #1: loss = 2.83057 (* 1 = 2.83057 loss)\nI0821 19:05:21.001087 32502 solver.cpp:228] Iteration 10300, loss = 0.0991272\nI0821 19:05:21.001139 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 19:05:21.001164 32502 solver.cpp:244]     Train net output #1: loss = 0.0991245 (* 1 = 0.0991245 loss)\nI0821 19:05:21.147718 32502 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0821 19:09:02.053061 32502 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0821 19:11:10.075682 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5316\nI0821 19:11:10.075990 32502 solver.cpp:404]     Test net output #1: loss = 2.19117 (* 1 = 2.19117 loss)\nI0821 19:11:12.164384 32502 solver.cpp:228] Iteration 10400, loss = 0.173412\nI0821 19:11:12.164436 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 19:11:12.164461 32502 solver.cpp:244]     Train net output #1: loss = 0.17341 (* 1 = 0.17341 loss)\nI0821 19:11:12.303419 32502 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0821 19:14:53.163666 32502 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0821 19:17:01.185068 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5133\nI0821 19:17:01.185412 32502 solver.cpp:404]     Test net output #1: loss = 2.63176 (* 1 = 2.63176 loss)\nI0821 19:17:03.273422 32502 solver.cpp:228] Iteration 10500, loss = 0.230224\nI0821 19:17:03.273473 32502 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 19:17:03.273496 32502 solver.cpp:244]     Train net output #1: loss = 0.230222 (* 1 = 0.230222 loss)\nI0821 19:17:03.423347 32502 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0821 19:20:44.320498 32502 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0821 19:22:52.374039 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5507\nI0821 19:22:52.374385 32502 solver.cpp:404]     Test net output #1: loss = 2.1414 (* 1 = 2.1414 loss)\nI0821 19:22:54.462839 32502 solver.cpp:228] Iteration 10600, loss = 0.0790026\nI0821 19:22:54.462889 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 19:22:54.462916 32502 solver.cpp:244]     Train net output #1: loss = 0.0789999 (* 1 = 0.0789999 loss)\nI0821 19:22:54.612962 32502 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0821 19:26:35.555825 32502 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0821 19:28:45.289687 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5688\nI0821 19:28:45.290066 32502 solver.cpp:404]     Test net output #1: loss = 1.77563 (* 1 = 1.77563 loss)\nI0821 19:28:47.382565 32502 solver.cpp:228] Iteration 10700, loss = 0.129368\nI0821 19:28:47.382632 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 19:28:47.382657 32502 solver.cpp:244]     Train net output #1: loss = 0.129365 (* 1 = 0.129365 loss)\nI0821 19:28:47.525425 32502 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0821 19:32:28.880790 32502 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0821 19:34:38.619628 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4988\nI0821 19:34:38.620041 32502 solver.cpp:404]     Test net output #1: loss = 2.11378 (* 1 = 2.11378 loss)\nI0821 19:34:40.711812 32502 solver.cpp:228] Iteration 10800, loss = 0.276023\nI0821 19:34:40.711879 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 19:34:40.711905 32502 solver.cpp:244]     Train net output #1: loss = 0.27602 (* 1 = 0.27602 loss)\nI0821 19:34:40.849449 32502 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0821 19:38:21.789130 32502 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0821 19:40:31.539487 32502 solver.cpp:404]     Test net output #0: accuracy = 0.629\nI0821 19:40:31.539875 32502 solver.cpp:404]     Test net output #1: loss = 1.53936 (* 1 = 1.53936 loss)\nI0821 19:40:33.631641 32502 solver.cpp:228] Iteration 10900, loss = 0.167744\nI0821 19:40:33.631708 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 19:40:33.631733 32502 solver.cpp:244]     Train net output #1: loss = 0.167742 (* 1 = 0.167742 loss)\nI0821 19:40:33.776178 32502 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0821 19:44:14.673547 32502 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0821 19:46:24.423399 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5876\nI0821 19:46:24.423797 32502 solver.cpp:404]     Test net output #1: loss = 1.54485 (* 1 = 1.54485 loss)\nI0821 19:46:26.515740 32502 solver.cpp:228] Iteration 11000, loss = 0.244997\nI0821 19:46:26.515807 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 19:46:26.515832 32502 solver.cpp:244]     Train net output #1: loss = 0.244995 (* 1 = 0.244995 loss)\nI0821 19:46:26.653074 32502 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0821 19:50:07.524278 32502 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0821 19:52:17.277685 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6188\nI0821 19:52:17.278080 32502 solver.cpp:404]     Test net output #1: loss = 1.43918 (* 1 = 1.43918 loss)\nI0821 19:52:19.372234 32502 solver.cpp:228] Iteration 11100, loss = 0.152478\nI0821 19:52:19.372301 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 19:52:19.372326 32502 solver.cpp:244]     Train net output #1: loss = 0.152475 (* 1 = 0.152475 loss)\nI0821 19:52:19.506484 32502 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0821 19:56:00.008862 32502 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0821 19:58:09.797348 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6469\nI0821 19:58:09.797724 32502 solver.cpp:404]     Test net output #1: loss = 1.56791 (* 1 = 1.56791 loss)\nI0821 19:58:11.889421 32502 solver.cpp:228] Iteration 11200, loss = 0.169666\nI0821 19:58:11.889488 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 19:58:11.889513 32502 solver.cpp:244]     Train net output #1: loss = 0.169663 (* 1 = 0.169663 loss)\nI0821 19:58:12.020745 32502 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0821 20:01:52.765079 32502 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0821 20:04:02.489909 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5847\nI0821 20:04:02.490294 32502 solver.cpp:404]     Test net output #1: loss = 1.89772 (* 1 = 1.89772 loss)\nI0821 20:04:04.581975 32502 solver.cpp:228] Iteration 11300, loss = 0.122423\nI0821 20:04:04.582042 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 20:04:04.582067 32502 solver.cpp:244]     Train net output #1: loss = 0.122421 (* 1 = 0.122421 loss)\nI0821 20:04:04.715297 32502 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0821 20:07:45.686843 32502 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0821 20:09:55.363628 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5799\nI0821 20:09:55.364017 32502 solver.cpp:404]     Test net output #1: loss = 2.12951 (* 1 = 2.12951 loss)\nI0821 20:09:57.456333 32502 solver.cpp:228] Iteration 11400, loss = 0.207684\nI0821 20:09:57.456401 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 20:09:57.456426 32502 solver.cpp:244]     Train net output #1: loss = 0.207681 (* 1 = 0.207681 loss)\nI0821 20:09:57.580768 32502 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0821 20:13:38.151202 32502 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0821 20:15:47.839146 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5793\nI0821 20:15:47.839526 32502 solver.cpp:404]     Test net output #1: loss = 1.76506 (* 1 = 1.76506 loss)\nI0821 20:15:49.931135 32502 solver.cpp:228] Iteration 11500, loss = 0.197045\nI0821 20:15:49.931206 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 20:15:49.931231 32502 solver.cpp:244]     Train net output #1: loss = 0.197042 (* 1 = 0.197042 loss)\nI0821 20:15:50.067824 32502 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0821 20:19:31.127148 32502 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0821 20:21:40.845546 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5738\nI0821 20:21:40.845935 32502 solver.cpp:404]     Test net output #1: loss = 2.16968 (* 1 = 2.16968 loss)\nI0821 20:21:42.937435 32502 solver.cpp:228] Iteration 11600, loss = 0.0662447\nI0821 20:21:42.937502 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 20:21:42.937530 32502 solver.cpp:244]     Train net output #1: loss = 0.0662421 (* 1 = 0.0662421 loss)\nI0821 20:21:43.073093 32502 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0821 20:25:23.964313 32502 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0821 20:27:33.700021 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5592\nI0821 20:27:33.700405 32502 solver.cpp:404]     Test net output #1: loss = 2.14778 (* 1 = 2.14778 loss)\nI0821 20:27:35.793447 32502 solver.cpp:228] Iteration 11700, loss = 0.124262\nI0821 20:27:35.793515 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 20:27:35.793540 32502 solver.cpp:244]     Train net output #1: loss = 0.124259 (* 1 = 0.124259 loss)\nI0821 20:27:35.928534 32502 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0821 20:31:16.588207 32502 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0821 20:33:26.281977 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5107\nI0821 20:33:26.282374 32502 solver.cpp:404]     Test net output #1: loss = 2.63588 (* 1 = 2.63588 loss)\nI0821 20:33:28.375427 32502 solver.cpp:228] Iteration 11800, loss = 0.15264\nI0821 20:33:28.375494 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 20:33:28.375519 32502 solver.cpp:244]     Train net output #1: loss = 0.152637 (* 1 = 0.152637 loss)\nI0821 20:33:28.510900 32502 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0821 20:37:09.123222 32502 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0821 20:39:18.806481 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5759\nI0821 20:39:18.806875 32502 solver.cpp:404]     Test net output #1: loss = 2.08089 (* 1 = 2.08089 loss)\nI0821 20:39:20.901460 32502 solver.cpp:228] Iteration 11900, loss = 0.156596\nI0821 20:39:20.901527 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 20:39:20.901546 32502 solver.cpp:244]     Train net output #1: loss = 0.156593 (* 1 = 0.156593 loss)\nI0821 20:39:21.028883 32502 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0821 20:43:01.623864 32502 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0821 20:45:11.301785 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5909\nI0821 20:45:11.302186 32502 solver.cpp:404]     Test net output #1: loss = 1.85836 (* 1 = 1.85836 loss)\nI0821 20:45:13.399622 32502 solver.cpp:228] Iteration 12000, loss = 0.142285\nI0821 20:45:13.399688 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 20:45:13.399706 32502 solver.cpp:244]     Train net output #1: loss = 0.142282 (* 1 = 0.142282 loss)\nI0821 20:45:13.528730 32502 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0821 20:48:54.103205 32502 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0821 20:51:03.784741 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5727\nI0821 20:51:03.785145 32502 solver.cpp:404]     Test net output #1: loss = 1.85609 (* 1 = 1.85609 loss)\nI0821 20:51:05.877578 32502 solver.cpp:228] Iteration 12100, loss = 0.189038\nI0821 20:51:05.877640 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 20:51:05.877657 32502 solver.cpp:244]     Train net output #1: loss = 0.189036 (* 1 = 0.189036 loss)\nI0821 20:51:06.015002 32502 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0821 20:54:46.653421 32502 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0821 20:56:56.327033 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5584\nI0821 20:56:56.327428 32502 solver.cpp:404]     Test net output #1: loss = 2.13903 (* 1 = 2.13903 loss)\nI0821 20:56:58.420383 32502 solver.cpp:228] Iteration 12200, loss = 0.162126\nI0821 20:56:58.420445 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 20:56:58.420464 32502 solver.cpp:244]     Train net output #1: loss = 0.162123 (* 1 = 0.162123 loss)\nI0821 20:56:58.554689 32502 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0821 21:00:39.402987 32502 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0821 21:02:49.059500 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6139\nI0821 21:02:49.059875 32502 solver.cpp:404]     Test net output #1: loss = 1.5552 (* 1 = 1.5552 loss)\nI0821 21:02:51.152966 32502 solver.cpp:228] Iteration 12300, loss = 0.0962831\nI0821 21:02:51.153030 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 21:02:51.153049 32502 solver.cpp:244]     Train net output #1: loss = 0.0962804 (* 1 = 0.0962804 loss)\nI0821 21:02:51.284962 32502 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0821 21:06:31.997795 32502 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0821 21:08:41.683307 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5298\nI0821 21:08:41.683681 32502 solver.cpp:404]     Test net output #1: loss = 2.46698 (* 1 = 2.46698 loss)\nI0821 21:08:43.776360 32502 solver.cpp:228] Iteration 12400, loss = 0.109695\nI0821 21:08:43.776425 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 21:08:43.776443 32502 solver.cpp:244]     Train net output #1: loss = 0.109693 (* 1 = 0.109693 loss)\nI0821 21:08:43.908463 32502 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0821 21:12:24.800225 32502 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0821 21:14:34.468035 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5997\nI0821 21:14:34.468407 32502 solver.cpp:404]     Test net output #1: loss = 1.56635 (* 1 = 1.56635 loss)\nI0821 21:14:36.560470 32502 solver.cpp:228] Iteration 12500, loss = 0.240113\nI0821 21:14:36.560534 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 21:14:36.560551 32502 solver.cpp:244]     Train net output #1: loss = 0.24011 (* 1 = 0.24011 loss)\nI0821 21:14:36.697494 32502 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0821 21:18:17.570269 32502 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0821 21:20:27.249097 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6346\nI0821 21:20:27.249456 32502 solver.cpp:404]     Test net output #1: loss = 1.55643 (* 1 = 1.55643 loss)\nI0821 21:20:29.342213 32502 solver.cpp:228] Iteration 12600, loss = 0.0944214\nI0821 21:20:29.342278 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 21:20:29.342296 32502 solver.cpp:244]     Train net output #1: loss = 0.0944187 (* 1 = 0.0944187 loss)\nI0821 21:20:29.471555 32502 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0821 21:24:10.087493 32502 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0821 21:26:19.755580 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5216\nI0821 21:26:19.755959 32502 solver.cpp:404]     Test net output #1: loss = 2.46059 (* 1 = 2.46059 loss)\nI0821 21:26:21.847622 32502 solver.cpp:228] Iteration 12700, loss = 0.0794869\nI0821 21:26:21.847687 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 21:26:21.847705 32502 solver.cpp:244]     Train net output #1: loss = 0.0794842 (* 1 = 0.0794842 loss)\nI0821 21:26:21.981340 32502 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0821 21:30:02.703162 32502 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0821 21:32:12.395162 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4586\nI0821 21:32:12.395517 32502 solver.cpp:404]     Test net output #1: loss = 2.82335 (* 1 = 2.82335 loss)\nI0821 21:32:14.488334 32502 solver.cpp:228] Iteration 12800, loss = 0.0683232\nI0821 21:32:14.488399 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 21:32:14.488416 32502 solver.cpp:244]     Train net output #1: loss = 0.0683205 (* 1 = 0.0683205 loss)\nI0821 21:32:14.625435 32502 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0821 21:35:55.557394 32502 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0821 21:38:05.396349 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5241\nI0821 21:38:05.396725 32502 solver.cpp:404]     Test net output #1: loss = 2.28815 (* 1 = 2.28815 loss)\nI0821 21:38:07.489060 32502 solver.cpp:228] Iteration 12900, loss = 0.231571\nI0821 21:38:07.489125 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 21:38:07.489142 32502 solver.cpp:244]     Train net output #1: loss = 0.231568 (* 1 = 0.231568 loss)\nI0821 21:38:07.621274 32502 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0821 21:41:48.549230 32502 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0821 21:43:58.390795 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6529\nI0821 21:43:58.391206 32502 solver.cpp:404]     Test net output #1: loss = 1.35505 (* 1 = 1.35505 loss)\nI0821 21:44:00.482939 32502 solver.cpp:228] Iteration 13000, loss = 0.186328\nI0821 21:44:00.483003 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 21:44:00.483021 32502 solver.cpp:244]     Train net output #1: loss = 0.186325 (* 1 = 0.186325 loss)\nI0821 21:44:00.617049 32502 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0821 21:47:41.390391 32502 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0821 21:49:51.138871 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6852\nI0821 21:49:51.139259 32502 solver.cpp:404]     Test net output #1: loss = 1.31055 (* 1 = 1.31055 loss)\nI0821 21:49:53.231876 32502 solver.cpp:228] Iteration 13100, loss = 0.216035\nI0821 21:49:53.231945 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 21:49:53.231964 32502 solver.cpp:244]     Train net output #1: loss = 0.216032 (* 1 = 0.216032 loss)\nI0821 21:49:53.363924 32502 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0821 21:53:34.147302 32502 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0821 21:55:43.897179 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6552\nI0821 21:55:43.897554 32502 solver.cpp:404]     Test net output #1: loss = 1.37287 (* 1 = 1.37287 loss)\nI0821 21:55:45.989511 32502 solver.cpp:228] Iteration 13200, loss = 0.0856257\nI0821 21:55:45.989576 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 21:55:45.989595 32502 solver.cpp:244]     Train net output #1: loss = 0.0856232 (* 1 = 0.0856232 loss)\nI0821 21:55:46.128815 32502 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0821 21:59:26.938122 32502 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0821 22:01:36.680708 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5955\nI0821 22:01:36.681100 32502 solver.cpp:404]     Test net output #1: loss = 2.08412 (* 1 = 2.08412 loss)\nI0821 22:01:38.772676 32502 solver.cpp:228] Iteration 13300, loss = 0.137856\nI0821 22:01:38.772738 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 22:01:38.772758 32502 solver.cpp:244]     Train net output #1: loss = 0.137854 (* 1 = 0.137854 loss)\nI0821 22:01:38.909250 32502 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0821 22:05:19.578567 32502 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0821 22:07:29.339489 32502 solver.cpp:404]     Test net output #0: accuracy = 0.563\nI0821 22:07:29.339882 32502 solver.cpp:404]     Test net output #1: loss = 2.40915 (* 1 = 2.40915 loss)\nI0821 22:07:31.431691 32502 solver.cpp:228] Iteration 13400, loss = 0.140462\nI0821 22:07:31.431754 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 22:07:31.431772 32502 solver.cpp:244]     Train net output #1: loss = 0.14046 (* 1 = 0.14046 loss)\nI0821 22:07:31.571385 32502 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0821 22:11:12.441696 32502 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0821 22:13:22.202221 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5877\nI0821 22:13:22.202613 32502 solver.cpp:404]     Test net output #1: loss = 1.92898 (* 1 = 1.92898 loss)\nI0821 22:13:24.294214 32502 solver.cpp:228] Iteration 13500, loss = 0.0993558\nI0821 22:13:24.294275 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 22:13:24.294294 32502 solver.cpp:244]     Train net output #1: loss = 0.0993531 (* 1 = 0.0993531 loss)\nI0821 22:13:24.434595 32502 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0821 22:17:05.312916 32502 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0821 22:19:15.034694 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6173\nI0821 22:19:15.035101 32502 solver.cpp:404]     Test net output #1: loss = 1.62439 (* 1 = 1.62439 loss)\nI0821 22:19:17.127007 32502 solver.cpp:228] Iteration 13600, loss = 0.0775061\nI0821 22:19:17.127069 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 22:19:17.127086 32502 solver.cpp:244]     Train net output #1: loss = 0.0775034 (* 1 = 0.0775034 loss)\nI0821 22:19:17.257855 32502 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0821 22:22:57.995313 32502 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0821 22:25:07.703953 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6345\nI0821 22:25:07.704345 32502 solver.cpp:404]     Test net output #1: loss = 1.57419 (* 1 = 1.57419 loss)\nI0821 22:25:09.796855 32502 solver.cpp:228] Iteration 13700, loss = 0.138486\nI0821 22:25:09.796921 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 22:25:09.796941 32502 solver.cpp:244]     Train net output #1: loss = 0.138483 (* 1 = 0.138483 loss)\nI0821 22:25:09.929657 32502 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0821 22:28:50.751623 32502 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0821 22:31:00.656827 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5234\nI0821 22:31:00.657461 32502 solver.cpp:404]     Test net output #1: loss = 2.42375 (* 1 = 2.42375 loss)\nI0821 22:31:02.750339 32502 solver.cpp:228] Iteration 13800, loss = 0.193619\nI0821 22:31:02.750401 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 22:31:02.750425 32502 solver.cpp:244]     Train net output #1: loss = 0.193616 (* 1 = 0.193616 loss)\nI0821 22:31:02.887315 32502 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0821 22:34:43.657165 32502 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0821 22:36:53.368381 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6616\nI0821 22:36:53.368759 32502 solver.cpp:404]     Test net output #1: loss = 1.52197 (* 1 = 1.52197 loss)\nI0821 22:36:55.461309 32502 solver.cpp:228] Iteration 13900, loss = 0.0438078\nI0821 22:36:55.461372 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:36:55.461390 32502 solver.cpp:244]     Train net output #1: loss = 0.0438052 (* 1 = 0.0438052 loss)\nI0821 22:36:55.602073 32502 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0821 22:40:36.289108 32502 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0821 22:42:45.980002 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4431\nI0821 22:42:45.980371 32502 solver.cpp:404]     Test net output #1: loss = 3.45093 (* 1 = 3.45093 loss)\nI0821 22:42:48.073927 32502 solver.cpp:228] Iteration 14000, loss = 0.136483\nI0821 22:42:48.073992 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 22:42:48.074009 32502 solver.cpp:244]     Train net output #1: loss = 0.136481 (* 1 = 0.136481 loss)\nI0821 22:42:48.211544 32502 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0821 22:46:28.977840 32502 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0821 22:48:38.665729 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6788\nI0821 22:48:38.666133 32502 solver.cpp:404]     Test net output #1: loss = 1.42352 (* 1 = 1.42352 loss)\nI0821 22:48:40.759003 32502 solver.cpp:228] Iteration 14100, loss = 0.10269\nI0821 22:48:40.759068 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 22:48:40.759086 32502 solver.cpp:244]     Train net output #1: loss = 0.102687 (* 1 = 0.102687 loss)\nI0821 22:48:40.894316 32502 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0821 22:52:22.059379 32502 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0821 22:54:31.745365 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5898\nI0821 22:54:31.745743 32502 solver.cpp:404]     Test net output #1: loss = 1.69891 (* 1 = 1.69891 loss)\nI0821 22:54:33.838464 32502 solver.cpp:228] Iteration 14200, loss = 0.0663683\nI0821 22:54:33.838527 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 22:54:33.838546 32502 solver.cpp:244]     Train net output #1: loss = 0.0663657 (* 1 = 0.0663657 loss)\nI0821 22:54:33.975540 32502 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0821 22:58:15.165279 32502 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0821 23:00:24.847443 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6403\nI0821 23:00:24.847828 32502 solver.cpp:404]     Test net output #1: loss = 1.55403 (* 1 = 1.55403 loss)\nI0821 23:00:26.939559 32502 solver.cpp:228] Iteration 14300, loss = 0.18406\nI0821 23:00:26.939622 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 23:00:26.939640 32502 solver.cpp:244]     Train net output #1: loss = 0.184058 (* 1 = 0.184058 loss)\nI0821 23:00:27.077242 32502 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0821 23:04:08.291249 32502 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0821 23:06:17.982870 32502 solver.cpp:404]     Test net output #0: accuracy = 0.4316\nI0821 23:06:17.983253 32502 solver.cpp:404]     Test net output #1: loss = 3.14832 (* 1 = 3.14832 loss)\nI0821 23:06:20.075968 32502 solver.cpp:228] Iteration 14400, loss = 0.0528893\nI0821 23:06:20.076032 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 23:06:20.076050 32502 solver.cpp:244]     Train net output #1: loss = 0.0528867 (* 1 = 0.0528867 loss)\nI0821 23:06:20.217684 32502 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0821 23:10:01.494860 32502 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0821 23:12:11.175359 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6231\nI0821 23:12:11.175729 32502 solver.cpp:404]     Test net output #1: loss = 1.60479 (* 1 = 1.60479 loss)\nI0821 23:12:13.269014 32502 solver.cpp:228] Iteration 14500, loss = 0.141003\nI0821 23:12:13.269078 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 23:12:13.269095 32502 solver.cpp:244]     Train net output #1: loss = 0.141 (* 1 = 0.141 loss)\nI0821 23:12:13.412329 32502 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0821 23:15:54.875033 32502 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0821 23:18:04.565021 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6439\nI0821 23:18:04.565392 32502 solver.cpp:404]     Test net output #1: loss = 1.42955 (* 1 = 1.42955 loss)\nI0821 23:18:06.658581 32502 solver.cpp:228] Iteration 14600, loss = 0.106754\nI0821 23:18:06.658644 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 23:18:06.658663 32502 solver.cpp:244]     Train net output #1: loss = 0.106752 (* 1 = 0.106752 loss)\nI0821 23:18:06.801952 32502 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0821 23:21:48.091325 32502 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0821 23:23:57.787958 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7043\nI0821 23:23:57.788365 32502 solver.cpp:404]     Test net output #1: loss = 1.28806 (* 1 = 1.28806 loss)\nI0821 23:23:59.881001 32502 solver.cpp:228] Iteration 14700, loss = 0.0923866\nI0821 23:23:59.881067 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 23:23:59.881085 32502 solver.cpp:244]     Train net output #1: loss = 0.092384 (* 1 = 0.092384 loss)\nI0821 23:24:00.021721 32502 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0821 23:27:41.332628 32502 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0821 23:29:51.030114 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6465\nI0821 23:29:51.030499 32502 solver.cpp:404]     Test net output #1: loss = 1.55713 (* 1 = 1.55713 loss)\nI0821 23:29:53.123834 32502 solver.cpp:228] Iteration 14800, loss = 0.152937\nI0821 23:29:53.123898 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 23:29:53.123914 32502 solver.cpp:244]     Train net output #1: loss = 0.152934 (* 1 = 0.152934 loss)\nI0821 23:29:53.267884 32502 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0821 23:33:34.651758 32502 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0821 23:35:44.338655 32502 solver.cpp:404]     Test net output #0: accuracy = 0.706\nI0821 23:35:44.339054 32502 solver.cpp:404]     Test net output #1: loss = 1.22693 (* 1 = 1.22693 loss)\nI0821 23:35:46.431829 32502 solver.cpp:228] Iteration 14900, loss = 0.0865188\nI0821 23:35:46.431895 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 23:35:46.431913 32502 solver.cpp:244]     Train net output #1: loss = 0.0865162 (* 1 = 0.0865162 loss)\nI0821 23:35:46.569612 32502 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0821 23:39:27.719754 32502 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0821 23:41:37.409500 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7102\nI0821 23:41:37.409886 32502 solver.cpp:404]     Test net output #1: loss = 1.16082 (* 1 = 1.16082 loss)\nI0821 23:41:39.502199 32502 solver.cpp:228] Iteration 15000, loss = 0.165102\nI0821 23:41:39.502260 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 23:41:39.502279 32502 solver.cpp:244]     Train net output #1: loss = 0.1651 (* 1 = 0.1651 loss)\nI0821 23:41:39.640314 32502 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0821 23:45:21.027598 32502 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0821 23:47:30.704031 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7208\nI0821 23:47:30.704421 32502 solver.cpp:404]     Test net output #1: loss = 1.0544 (* 1 = 1.0544 loss)\nI0821 23:47:32.797832 32502 solver.cpp:228] Iteration 15100, loss = 0.0815134\nI0821 23:47:32.797896 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 23:47:32.797915 32502 solver.cpp:244]     Train net output #1: loss = 0.0815108 (* 1 = 0.0815108 loss)\nI0821 23:47:32.938658 32502 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0821 23:51:14.227898 32502 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0821 23:53:23.916847 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6414\nI0821 23:53:23.917202 32502 solver.cpp:404]     Test net output #1: loss = 1.57942 (* 1 = 1.57942 loss)\nI0821 23:53:26.010649 32502 solver.cpp:228] Iteration 15200, loss = 0.103398\nI0821 23:53:26.010715 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 23:53:26.010733 32502 solver.cpp:244]     Train net output #1: loss = 0.103395 (* 1 = 0.103395 loss)\nI0821 23:53:26.149669 32502 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0821 23:57:07.476408 32502 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0821 23:59:17.119879 32502 solver.cpp:404]     Test net output #0: accuracy = 0.651\nI0821 23:59:17.120260 32502 solver.cpp:404]     Test net output #1: loss = 1.49398 (* 1 = 1.49398 loss)\nI0821 23:59:19.211014 32502 solver.cpp:228] Iteration 15300, loss = 0.113342\nI0821 23:59:19.211078 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 23:59:19.211097 32502 solver.cpp:244]     Train net output #1: loss = 0.113339 (* 1 = 0.113339 loss)\nI0821 23:59:19.352005 32502 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0822 00:03:00.694494 32502 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0822 00:05:10.384647 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6854\nI0822 00:05:10.385023 32502 solver.cpp:404]     Test net output #1: loss = 1.42051 (* 1 = 1.42051 loss)\nI0822 00:05:12.476974 32502 solver.cpp:228] Iteration 15400, loss = 0.262093\nI0822 00:05:12.477037 32502 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0822 00:05:12.477056 32502 solver.cpp:244]     Train net output #1: loss = 0.26209 (* 1 = 0.26209 loss)\nI0822 00:05:12.613667 32502 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0822 00:08:54.128099 32502 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0822 00:11:03.818881 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6646\nI0822 00:11:03.819283 32502 solver.cpp:404]     Test net output #1: loss = 1.42878 (* 1 = 1.42878 loss)\nI0822 00:11:05.910465 32502 solver.cpp:228] Iteration 15500, loss = 0.0767869\nI0822 00:11:05.910526 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 00:11:05.910544 32502 solver.cpp:244]     Train net output #1: loss = 0.0767842 (* 1 = 0.0767842 loss)\nI0822 00:11:06.046875 32502 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0822 00:14:47.406816 32502 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0822 00:16:57.092200 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6181\nI0822 00:16:57.092612 32502 solver.cpp:404]     Test net output #1: loss = 2.00617 (* 1 = 2.00617 loss)\nI0822 00:16:59.183444 32502 solver.cpp:228] Iteration 15600, loss = 0.119922\nI0822 00:16:59.183507 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 00:16:59.183526 32502 solver.cpp:244]     Train net output #1: loss = 0.119919 (* 1 = 0.119919 loss)\nI0822 00:16:59.327636 32502 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0822 00:20:40.831248 32502 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0822 00:22:50.583971 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6572\nI0822 00:22:50.584347 32502 solver.cpp:404]     Test net output #1: loss = 1.42138 (* 1 = 1.42138 loss)\nI0822 00:22:52.676482 32502 solver.cpp:228] Iteration 15700, loss = 0.091587\nI0822 00:22:52.676547 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0822 00:22:52.676564 32502 solver.cpp:244]     Train net output #1: loss = 0.0915843 (* 1 = 0.0915843 loss)\nI0822 00:22:52.819411 32502 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0822 00:26:34.027479 32502 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0822 00:28:43.761999 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6571\nI0822 00:28:43.762382 32502 solver.cpp:404]     Test net output #1: loss = 1.36484 (* 1 = 1.36484 loss)\nI0822 00:28:45.854437 32502 solver.cpp:228] Iteration 15800, loss = 0.0634104\nI0822 00:28:45.854501 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 00:28:45.854518 32502 solver.cpp:244]     Train net output #1: loss = 0.0634078 (* 1 = 0.0634078 loss)\nI0822 00:28:45.990481 32502 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0822 00:32:27.249709 32502 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0822 00:34:36.976318 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6393\nI0822 00:34:36.976699 32502 solver.cpp:404]     Test net output #1: loss = 1.67132 (* 1 = 1.67132 loss)\nI0822 00:34:39.069092 32502 solver.cpp:228] Iteration 15900, loss = 0.192731\nI0822 00:34:39.069155 32502 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0822 00:34:39.069172 32502 solver.cpp:244]     Train net output #1: loss = 0.192728 (* 1 = 0.192728 loss)\nI0822 00:34:39.207842 32502 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0822 00:38:20.740322 32502 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0822 00:40:30.427752 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6023\nI0822 00:40:30.428131 32502 solver.cpp:404]     Test net output #1: loss = 2.01943 (* 1 = 2.01943 loss)\nI0822 00:40:32.519659 32502 solver.cpp:228] Iteration 16000, loss = 0.123098\nI0822 00:40:32.519722 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0822 00:40:32.519740 32502 solver.cpp:244]     Train net output #1: loss = 0.123095 (* 1 = 0.123095 loss)\nI0822 00:40:32.657827 32502 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0822 00:44:13.806118 32502 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0822 00:46:22.151412 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7225\nI0822 00:46:22.151754 32502 solver.cpp:404]     Test net output #1: loss = 1.1945 (* 1 = 1.1945 loss)\nI0822 00:46:24.241585 32502 solver.cpp:228] Iteration 16100, loss = 0.156274\nI0822 00:46:24.241648 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 00:46:24.241665 32502 solver.cpp:244]     Train net output #1: loss = 0.156271 (* 1 = 0.156271 loss)\nI0822 00:46:24.384294 32502 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0822 00:50:05.612119 32502 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0822 00:52:14.149780 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6607\nI0822 00:52:14.150135 32502 solver.cpp:404]     Test net output #1: loss = 1.5573 (* 1 = 1.5573 loss)\nI0822 00:52:16.237449 32502 solver.cpp:228] Iteration 16200, loss = 0.174135\nI0822 00:52:16.237509 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0822 00:52:16.237527 32502 solver.cpp:244]     Train net output #1: loss = 0.174132 (* 1 = 0.174132 loss)\nI0822 00:52:16.385839 32502 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0822 00:55:57.624328 32502 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0822 00:58:07.282836 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6445\nI0822 00:58:07.283241 32502 solver.cpp:404]     Test net output #1: loss = 1.72096 (* 1 = 1.72096 loss)\nI0822 00:58:09.370224 32502 solver.cpp:228] Iteration 16300, loss = 0.152346\nI0822 00:58:09.370286 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0822 00:58:09.370302 32502 solver.cpp:244]     Train net output #1: loss = 0.152344 (* 1 = 0.152344 loss)\nI0822 00:58:09.512174 32502 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0822 01:01:50.763948 32502 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0822 01:04:00.418746 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6183\nI0822 01:04:00.419162 32502 solver.cpp:404]     Test net output #1: loss = 2.18236 (* 1 = 2.18236 loss)\nI0822 01:04:02.506475 32502 solver.cpp:228] Iteration 16400, loss = 0.12378\nI0822 01:04:02.506534 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 01:04:02.506552 32502 solver.cpp:244]     Train net output #1: loss = 0.123777 (* 1 = 0.123777 loss)\nI0822 01:04:02.652678 32502 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0822 01:07:43.912523 32502 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0822 01:09:53.561210 32502 solver.cpp:404]     Test net output #0: accuracy = 0.664\nI0822 01:09:53.561594 32502 solver.cpp:404]     Test net output #1: loss = 1.4083 (* 1 = 1.4083 loss)\nI0822 01:09:55.649029 32502 solver.cpp:228] Iteration 16500, loss = 0.176579\nI0822 01:09:55.649091 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0822 01:09:55.649109 32502 solver.cpp:244]     Train net output #1: loss = 0.176577 (* 1 = 0.176577 loss)\nI0822 01:09:55.800375 32502 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0822 01:13:37.176249 32502 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0822 01:15:46.811064 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7375\nI0822 01:15:46.811413 32502 solver.cpp:404]     Test net output #1: loss = 0.941992 (* 1 = 0.941992 loss)\nI0822 01:15:48.898782 32502 solver.cpp:228] Iteration 16600, loss = 0.117134\nI0822 01:15:48.898847 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 01:15:48.898865 32502 solver.cpp:244]     Train net output #1: loss = 0.117132 (* 1 = 0.117132 loss)\nI0822 01:15:49.042273 32502 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0822 01:19:29.827806 32502 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0822 01:21:39.463986 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6998\nI0822 01:21:39.464360 32502 solver.cpp:404]     Test net output #1: loss = 1.34724 (* 1 = 1.34724 loss)\nI0822 01:21:41.551425 32502 solver.cpp:228] Iteration 16700, loss = 0.0989423\nI0822 01:21:41.551488 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 01:21:41.551506 32502 solver.cpp:244]     Train net output #1: loss = 0.0989397 (* 1 = 0.0989397 loss)\nI0822 01:21:41.693536 32502 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0822 01:25:22.214504 32502 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0822 01:27:31.841639 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6979\nI0822 01:27:31.842041 32502 solver.cpp:404]     Test net output #1: loss = 1.20253 (* 1 = 1.20253 loss)\nI0822 01:27:33.929574 32502 solver.cpp:228] Iteration 16800, loss = 0.108107\nI0822 01:27:33.929638 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 01:27:33.929656 32502 solver.cpp:244]     Train net output #1: loss = 0.108105 (* 1 = 0.108105 loss)\nI0822 01:27:34.069816 32502 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0822 01:31:14.586518 32502 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0822 01:33:24.211149 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7041\nI0822 01:33:24.211544 32502 solver.cpp:404]     Test net output #1: loss = 1.18279 (* 1 = 1.18279 loss)\nI0822 01:33:26.298888 32502 solver.cpp:228] Iteration 16900, loss = 0.0962466\nI0822 01:33:26.298956 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 01:33:26.298974 32502 solver.cpp:244]     Train net output #1: loss = 0.096244 (* 1 = 0.096244 loss)\nI0822 01:33:26.440337 32502 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0822 01:37:06.886708 32502 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0822 01:39:16.526473 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6988\nI0822 01:39:16.526870 32502 solver.cpp:404]     Test net output #1: loss = 1.23747 (* 1 = 1.23747 loss)\nI0822 01:39:18.613749 32502 solver.cpp:228] Iteration 17000, loss = 0.0887034\nI0822 01:39:18.613811 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 01:39:18.613831 32502 solver.cpp:244]     Train net output #1: loss = 0.0887009 (* 1 = 0.0887009 loss)\nI0822 01:39:18.750032 32502 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0822 01:42:59.174115 32502 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0822 01:45:07.189786 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6295\nI0822 01:45:07.190109 32502 solver.cpp:404]     Test net output #1: loss = 1.54766 (* 1 = 1.54766 loss)\nI0822 01:45:09.273824 32502 solver.cpp:228] Iteration 17100, loss = 0.0774594\nI0822 01:45:09.273874 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 01:45:09.273890 32502 solver.cpp:244]     Train net output #1: loss = 0.0774569 (* 1 = 0.0774569 loss)\nI0822 01:45:09.413406 32502 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0822 01:48:49.655378 32502 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0822 01:50:57.676905 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6386\nI0822 01:50:57.677235 32502 solver.cpp:404]     Test net output #1: loss = 1.74152 (* 1 = 1.74152 loss)\nI0822 01:50:59.760188 32502 solver.cpp:228] Iteration 17200, loss = 0.274845\nI0822 01:50:59.760236 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0822 01:50:59.760253 32502 solver.cpp:244]     Train net output #1: loss = 0.274842 (* 1 = 0.274842 loss)\nI0822 01:50:59.900027 32502 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0822 01:54:39.966784 32502 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0822 01:56:47.970712 32502 solver.cpp:404]     Test net output #0: accuracy = 0.751\nI0822 01:56:47.971005 32502 solver.cpp:404]     Test net output #1: loss = 0.887293 (* 1 = 0.887293 loss)\nI0822 01:56:50.053658 32502 solver.cpp:228] Iteration 17300, loss = 0.0654586\nI0822 01:56:50.053707 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 01:56:50.053725 32502 solver.cpp:244]     Train net output #1: loss = 0.065456 (* 1 = 0.065456 loss)\nI0822 01:56:50.190645 32502 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0822 02:00:30.312801 32502 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0822 02:02:38.354846 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6218\nI0822 02:02:38.355195 32502 solver.cpp:404]     Test net output #1: loss = 1.73706 (* 1 = 1.73706 loss)\nI0822 02:02:40.437616 32502 solver.cpp:228] Iteration 17400, loss = 0.0959679\nI0822 02:02:40.437664 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 02:02:40.437681 32502 solver.cpp:244]     Train net output #1: loss = 0.0959654 (* 1 = 0.0959654 loss)\nI0822 02:02:40.583348 32502 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0822 02:06:20.740680 32502 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0822 02:08:28.778604 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6749\nI0822 02:08:28.778894 32502 solver.cpp:404]     Test net output #1: loss = 1.23512 (* 1 = 1.23512 loss)\nI0822 02:08:30.861186 32502 solver.cpp:228] Iteration 17500, loss = 0.0656575\nI0822 02:08:30.861237 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 02:08:30.861253 32502 solver.cpp:244]     Train net output #1: loss = 0.0656549 (* 1 = 0.0656549 loss)\nI0822 02:08:31.003531 32502 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0822 02:12:11.210762 32502 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0822 02:14:19.257028 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6232\nI0822 02:14:19.257359 32502 solver.cpp:404]     Test net output #1: loss = 1.88185 (* 1 = 1.88185 loss)\nI0822 02:14:21.339577 32502 solver.cpp:228] Iteration 17600, loss = 0.113685\nI0822 02:14:21.339625 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 02:14:21.339643 32502 solver.cpp:244]     Train net output #1: loss = 0.113683 (* 1 = 0.113683 loss)\nI0822 02:14:21.482910 32502 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0822 02:18:01.577219 32502 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0822 02:20:09.665812 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6928\nI0822 02:20:09.666134 32502 solver.cpp:404]     Test net output #1: loss = 1.3089 (* 1 = 1.3089 loss)\nI0822 02:20:11.748656 32502 solver.cpp:228] Iteration 17700, loss = 0.134399\nI0822 02:20:11.748705 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 02:20:11.748721 32502 solver.cpp:244]     Train net output #1: loss = 0.134396 (* 1 = 0.134396 loss)\nI0822 02:20:11.888906 32502 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0822 02:23:51.946576 32502 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0822 02:26:00.032392 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6413\nI0822 02:26:00.032690 32502 solver.cpp:404]     Test net output #1: loss = 1.76164 (* 1 = 1.76164 loss)\nI0822 02:26:02.116518 32502 solver.cpp:228] Iteration 17800, loss = 0.0996235\nI0822 02:26:02.116565 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 02:26:02.116582 32502 solver.cpp:244]     Train net output #1: loss = 0.0996209 (* 1 = 0.0996209 loss)\nI0822 02:26:02.255648 32502 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0822 02:29:42.454484 32502 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0822 02:31:50.537485 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6676\nI0822 02:31:50.537819 32502 solver.cpp:404]     Test net output #1: loss = 1.35712 (* 1 = 1.35712 loss)\nI0822 02:31:52.620955 32502 solver.cpp:228] Iteration 17900, loss = 0.0817919\nI0822 02:31:52.621006 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 02:31:52.621021 32502 solver.cpp:244]     Train net output #1: loss = 0.0817893 (* 1 = 0.0817893 loss)\nI0822 02:31:52.762276 32502 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0822 02:35:32.898079 32502 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0822 02:37:40.970892 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7023\nI0822 02:37:40.971189 32502 solver.cpp:404]     Test net output #1: loss = 1.2049 (* 1 = 1.2049 loss)\nI0822 02:37:43.054162 32502 solver.cpp:228] Iteration 18000, loss = 0.102588\nI0822 02:37:43.054210 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 02:37:43.054227 32502 solver.cpp:244]     Train net output #1: loss = 0.102585 (* 1 = 0.102585 loss)\nI0822 02:37:43.191710 32502 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0822 02:41:23.320013 32502 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0822 02:43:31.398341 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5504\nI0822 02:43:31.398684 32502 solver.cpp:404]     Test net output #1: loss = 2.15268 (* 1 = 2.15268 loss)\nI0822 02:43:33.480988 32502 solver.cpp:228] Iteration 18100, loss = 0.112163\nI0822 02:43:33.481042 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0822 02:43:33.481068 32502 solver.cpp:244]     Train net output #1: loss = 0.11216 (* 1 = 0.11216 loss)\nI0822 02:43:33.624723 32502 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0822 02:47:13.737931 32502 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0822 02:49:21.816079 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6884\nI0822 02:49:21.816416 32502 solver.cpp:404]     Test net output #1: loss = 1.35906 (* 1 = 1.35906 loss)\nI0822 02:49:23.898860 32502 solver.cpp:228] Iteration 18200, loss = 0.169052\nI0822 02:49:23.898908 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 02:49:23.898926 32502 solver.cpp:244]     Train net output #1: loss = 0.169049 (* 1 = 0.169049 loss)\nI0822 02:49:24.042346 32502 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0822 02:53:04.253262 32502 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0822 02:55:12.295266 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6467\nI0822 02:55:12.295610 32502 solver.cpp:404]     Test net output #1: loss = 1.68991 (* 1 = 1.68991 loss)\nI0822 02:55:14.378473 32502 solver.cpp:228] Iteration 18300, loss = 0.127418\nI0822 02:55:14.378520 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 02:55:14.378535 32502 solver.cpp:244]     Train net output #1: loss = 0.127415 (* 1 = 0.127415 loss)\nI0822 02:55:14.524834 32502 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0822 02:58:54.699545 32502 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0822 03:01:02.748832 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6717\nI0822 03:01:02.749130 32502 solver.cpp:404]     Test net output #1: loss = 1.7468 (* 1 = 1.7468 loss)\nI0822 03:01:04.831909 32502 solver.cpp:228] Iteration 18400, loss = 0.114027\nI0822 03:01:04.831956 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 03:01:04.831974 32502 solver.cpp:244]     Train net output #1: loss = 0.114025 (* 1 = 0.114025 loss)\nI0822 03:01:04.974467 32502 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0822 03:04:44.305577 32502 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0822 03:06:52.346912 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6635\nI0822 03:06:52.347252 32502 solver.cpp:404]     Test net output #1: loss = 1.30496 (* 1 = 1.30496 loss)\nI0822 03:06:54.429404 32502 solver.cpp:228] Iteration 18500, loss = 0.206107\nI0822 03:06:54.429451 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0822 03:06:54.429468 32502 solver.cpp:244]     Train net output #1: loss = 0.206105 (* 1 = 0.206105 loss)\nI0822 03:06:54.560861 32502 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0822 03:10:33.760530 32502 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0822 03:12:41.812608 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7105\nI0822 03:12:41.812943 32502 solver.cpp:404]     Test net output #1: loss = 1.101 (* 1 = 1.101 loss)\nI0822 03:12:43.896288 32502 solver.cpp:228] Iteration 18600, loss = 0.0412694\nI0822 03:12:43.896335 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 03:12:43.896353 32502 solver.cpp:244]     Train net output #1: loss = 0.0412668 (* 1 = 0.0412668 loss)\nI0822 03:12:44.025589 32502 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0822 03:16:23.041127 32502 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0822 03:18:31.093562 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6742\nI0822 03:18:31.093876 32502 solver.cpp:404]     Test net output #1: loss = 1.41475 (* 1 = 1.41475 loss)\nI0822 03:18:33.175997 32502 solver.cpp:228] Iteration 18700, loss = 0.0800555\nI0822 03:18:33.176045 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 03:18:33.176062 32502 solver.cpp:244]     Train net output #1: loss = 0.0800529 (* 1 = 0.0800529 loss)\nI0822 03:18:33.313467 32502 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0822 03:22:12.415735 32502 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0822 03:24:20.452973 32502 solver.cpp:404]     Test net output #0: accuracy = 0.697\nI0822 03:24:20.453266 32502 solver.cpp:404]     Test net output #1: loss = 1.53799 (* 1 = 1.53799 loss)\nI0822 03:24:22.540916 32502 solver.cpp:228] Iteration 18800, loss = 0.0224191\nI0822 03:24:22.540963 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:24:22.540980 32502 solver.cpp:244]     Train net output #1: loss = 0.0224165 (* 1 = 0.0224165 loss)\nI0822 03:24:22.672374 32502 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0822 03:28:01.755895 32502 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0822 03:30:09.801772 32502 solver.cpp:404]     Test net output #0: accuracy = 0.676\nI0822 03:30:09.802110 32502 solver.cpp:404]     Test net output #1: loss = 1.32281 (* 1 = 1.32281 loss)\nI0822 03:30:11.890031 32502 solver.cpp:228] Iteration 18900, loss = 0.0795298\nI0822 03:30:11.890082 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 03:30:11.890106 32502 solver.cpp:244]     Train net output #1: loss = 0.0795272 (* 1 = 0.0795272 loss)\nI0822 03:30:12.020279 32502 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0822 03:33:51.041219 32502 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0822 03:35:59.098698 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6729\nI0822 03:35:59.099064 32502 solver.cpp:404]     Test net output #1: loss = 1.37591 (* 1 = 1.37591 loss)\nI0822 03:36:01.187245 32502 solver.cpp:228] Iteration 19000, loss = 0.140579\nI0822 03:36:01.187294 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0822 03:36:01.187317 32502 solver.cpp:244]     Train net output #1: loss = 0.140577 (* 1 = 0.140577 loss)\nI0822 03:36:01.308573 32502 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0822 03:39:40.262660 32502 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0822 03:41:48.320500 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7193\nI0822 03:41:48.320863 32502 solver.cpp:404]     Test net output #1: loss = 1.2144 (* 1 = 1.2144 loss)\nI0822 03:41:50.410039 32502 solver.cpp:228] Iteration 19100, loss = 0.0973948\nI0822 03:41:50.410094 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 03:41:50.410120 32502 solver.cpp:244]     Train net output #1: loss = 0.0973921 (* 1 = 0.0973921 loss)\nI0822 03:41:50.546813 32502 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0822 03:45:29.528393 32502 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0822 03:47:37.587071 32502 solver.cpp:404]     Test net output #0: accuracy = 0.685\nI0822 03:47:37.587407 32502 solver.cpp:404]     Test net output #1: loss = 1.50948 (* 1 = 1.50948 loss)\nI0822 03:47:39.675557 32502 solver.cpp:228] Iteration 19200, loss = 0.183008\nI0822 03:47:39.675607 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0822 03:47:39.675631 32502 solver.cpp:244]     Train net output #1: loss = 0.183005 (* 1 = 0.183005 loss)\nI0822 03:47:39.801152 32502 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0822 03:51:18.940464 32502 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0822 03:53:26.990034 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6628\nI0822 03:53:26.990391 32502 solver.cpp:404]     Test net output #1: loss = 1.59465 (* 1 = 1.59465 loss)\nI0822 03:53:29.078992 32502 solver.cpp:228] Iteration 19300, loss = 0.0495093\nI0822 03:53:29.079043 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 03:53:29.079068 32502 solver.cpp:244]     Train net output #1: loss = 0.0495067 (* 1 = 0.0495067 loss)\nI0822 03:53:29.210490 32502 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0822 03:57:08.201627 32502 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0822 03:59:16.233180 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6936\nI0822 03:59:16.233526 32502 solver.cpp:404]     Test net output #1: loss = 1.5112 (* 1 = 1.5112 loss)\nI0822 03:59:18.322463 32502 solver.cpp:228] Iteration 19400, loss = 0.0811935\nI0822 03:59:18.322513 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 03:59:18.322538 32502 solver.cpp:244]     Train net output #1: loss = 0.0811908 (* 1 = 0.0811908 loss)\nI0822 03:59:18.451182 32502 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0822 04:02:57.431053 32502 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0822 04:05:05.466187 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7154\nI0822 04:05:05.466511 32502 solver.cpp:404]     Test net output #1: loss = 1.28034 (* 1 = 1.28034 loss)\nI0822 04:05:07.557289 32502 solver.cpp:228] Iteration 19500, loss = 0.0735555\nI0822 04:05:07.557338 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 04:05:07.557355 32502 solver.cpp:244]     Train net output #1: loss = 0.0735529 (* 1 = 0.0735529 loss)\nI0822 04:05:07.686213 32502 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0822 04:08:46.627882 32502 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0822 04:10:54.645367 32502 solver.cpp:404]     Test net output #0: accuracy = 0.685\nI0822 04:10:54.645714 32502 solver.cpp:404]     Test net output #1: loss = 1.4151 (* 1 = 1.4151 loss)\nI0822 04:10:56.733955 32502 solver.cpp:228] Iteration 19600, loss = 0.119528\nI0822 04:10:56.734004 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 04:10:56.734020 32502 solver.cpp:244]     Train net output #1: loss = 0.119526 (* 1 = 0.119526 loss)\nI0822 04:10:56.859882 32502 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0822 04:14:35.909286 32502 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0822 04:16:43.932500 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6678\nI0822 04:16:43.932873 32502 solver.cpp:404]     Test net output #1: loss = 1.68 (* 1 = 1.68 loss)\nI0822 04:16:46.020671 32502 solver.cpp:228] Iteration 19700, loss = 0.0516991\nI0822 04:16:46.020720 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 04:16:46.020737 32502 solver.cpp:244]     Train net output #1: loss = 0.0516966 (* 1 = 0.0516966 loss)\nI0822 04:16:46.144917 32502 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0822 04:20:25.218812 32502 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0822 04:22:33.233167 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7041\nI0822 04:22:33.233500 32502 solver.cpp:404]     Test net output #1: loss = 1.18765 (* 1 = 1.18765 loss)\nI0822 04:22:35.321494 32502 solver.cpp:228] Iteration 19800, loss = 0.072386\nI0822 04:22:35.321542 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 04:22:35.321558 32502 solver.cpp:244]     Train net output #1: loss = 0.0723834 (* 1 = 0.0723834 loss)\nI0822 04:22:35.449600 32502 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0822 04:26:14.521631 32502 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0822 04:28:22.545415 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6857\nI0822 04:28:22.545755 32502 solver.cpp:404]     Test net output #1: loss = 1.39746 (* 1 = 1.39746 loss)\nI0822 04:28:24.634238 32502 solver.cpp:228] Iteration 19900, loss = 0.101918\nI0822 04:28:24.634286 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 04:28:24.634304 32502 solver.cpp:244]     Train net output #1: loss = 0.101915 (* 1 = 0.101915 loss)\nI0822 04:28:24.761880 32502 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0822 04:32:03.737373 32502 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0822 04:34:11.776331 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6307\nI0822 04:34:11.776607 32502 solver.cpp:404]     Test net output #1: loss = 1.69405 (* 1 = 1.69405 loss)\nI0822 04:34:13.865221 32502 solver.cpp:228] Iteration 20000, loss = 0.156605\nI0822 04:34:13.865268 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0822 04:34:13.865285 32502 solver.cpp:244]     Train net output #1: loss = 0.156602 (* 1 = 0.156602 loss)\nI0822 04:34:13.988873 32502 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0822 04:37:53.040375 32502 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0822 04:40:01.095525 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7528\nI0822 04:40:01.095877 32502 solver.cpp:404]     Test net output #1: loss = 0.927298 (* 1 = 0.927298 loss)\nI0822 04:40:03.184547 32502 solver.cpp:228] Iteration 20100, loss = 0.113031\nI0822 04:40:03.184592 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 04:40:03.184609 32502 solver.cpp:244]     Train net output #1: loss = 0.113029 (* 1 = 0.113029 loss)\nI0822 04:40:03.315981 32502 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0822 04:43:42.321626 32502 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0822 04:45:50.372066 32502 solver.cpp:404]     Test net output #0: accuracy = 0.711\nI0822 04:45:50.372414 32502 solver.cpp:404]     Test net output #1: loss = 1.42287 (* 1 = 1.42287 loss)\nI0822 04:45:52.460711 32502 solver.cpp:228] Iteration 20200, loss = 0.161772\nI0822 04:45:52.460760 32502 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0822 04:45:52.460777 32502 solver.cpp:244]     Train net output #1: loss = 0.161769 (* 1 = 0.161769 loss)\nI0822 04:45:52.585911 32502 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0822 04:49:31.635411 32502 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0822 04:51:39.699173 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7168\nI0822 04:51:39.699517 32502 solver.cpp:404]     Test net output #1: loss = 1.18587 (* 1 = 1.18587 loss)\nI0822 04:51:41.786959 32502 solver.cpp:228] Iteration 20300, loss = 0.0770312\nI0822 04:51:41.787008 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 04:51:41.787024 32502 solver.cpp:244]     Train net output #1: loss = 0.0770285 (* 1 = 0.0770285 loss)\nI0822 04:51:41.915086 32502 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0822 04:55:20.922014 32502 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0822 04:57:28.957864 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6372\nI0822 04:57:28.958168 32502 solver.cpp:404]     Test net output #1: loss = 1.9242 (* 1 = 1.9242 loss)\nI0822 04:57:31.046010 32502 solver.cpp:228] Iteration 20400, loss = 0.137145\nI0822 04:57:31.046058 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 04:57:31.046074 32502 solver.cpp:244]     Train net output #1: loss = 0.137143 (* 1 = 0.137143 loss)\nI0822 04:57:31.170161 32502 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0822 05:01:10.142346 32502 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0822 05:03:18.187623 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6523\nI0822 05:03:18.187963 32502 solver.cpp:404]     Test net output #1: loss = 2.02169 (* 1 = 2.02169 loss)\nI0822 05:03:20.276162 32502 solver.cpp:228] Iteration 20500, loss = 0.112711\nI0822 05:03:20.276211 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 05:03:20.276227 32502 solver.cpp:244]     Train net output #1: loss = 0.112709 (* 1 = 0.112709 loss)\nI0822 05:03:20.405592 32502 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0822 05:06:59.566707 32502 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0822 05:09:07.576459 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6984\nI0822 05:09:07.576817 32502 solver.cpp:404]     Test net output #1: loss = 1.44404 (* 1 = 1.44404 loss)\nI0822 05:09:09.665343 32502 solver.cpp:228] Iteration 20600, loss = 0.11202\nI0822 05:09:09.665391 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 05:09:09.665408 32502 solver.cpp:244]     Train net output #1: loss = 0.112018 (* 1 = 0.112018 loss)\nI0822 05:09:09.792815 32502 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0822 05:12:48.862818 32502 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0822 05:14:56.879894 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7472\nI0822 05:14:56.880247 32502 solver.cpp:404]     Test net output #1: loss = 1.0003 (* 1 = 1.0003 loss)\nI0822 05:14:58.967444 32502 solver.cpp:228] Iteration 20700, loss = 0.112876\nI0822 05:14:58.967491 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 05:14:58.967507 32502 solver.cpp:244]     Train net output #1: loss = 0.112874 (* 1 = 0.112874 loss)\nI0822 05:14:59.099107 32502 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0822 05:18:38.100816 32502 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0822 05:20:46.132277 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7219\nI0822 05:20:46.132616 32502 solver.cpp:404]     Test net output #1: loss = 1.26974 (* 1 = 1.26974 loss)\nI0822 05:20:48.220223 32502 solver.cpp:228] Iteration 20800, loss = 0.0142901\nI0822 05:20:48.220271 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:20:48.220288 32502 solver.cpp:244]     Train net output #1: loss = 0.0142876 (* 1 = 0.0142876 loss)\nI0822 05:20:48.346325 32502 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0822 05:24:27.359210 32502 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0822 05:26:35.385119 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6442\nI0822 05:26:35.385479 32502 solver.cpp:404]     Test net output #1: loss = 1.76382 (* 1 = 1.76382 loss)\nI0822 05:26:37.472328 32502 solver.cpp:228] Iteration 20900, loss = 0.0302069\nI0822 05:26:37.472378 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:26:37.472393 32502 solver.cpp:244]     Train net output #1: loss = 0.0302044 (* 1 = 0.0302044 loss)\nI0822 05:26:37.605028 32502 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0822 05:30:16.632230 32502 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0822 05:32:24.661267 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6737\nI0822 05:32:24.661602 32502 solver.cpp:404]     Test net output #1: loss = 1.41164 (* 1 = 1.41164 loss)\nI0822 05:32:26.749604 32502 solver.cpp:228] Iteration 21000, loss = 0.115922\nI0822 05:32:26.749652 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 05:32:26.749670 32502 solver.cpp:244]     Train net output #1: loss = 0.115919 (* 1 = 0.115919 loss)\nI0822 05:32:26.878983 32502 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0822 05:36:05.936189 32502 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0822 05:38:13.955968 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7542\nI0822 05:38:13.956310 32502 solver.cpp:404]     Test net output #1: loss = 1.04813 (* 1 = 1.04813 loss)\nI0822 05:38:16.044451 32502 solver.cpp:228] Iteration 21100, loss = 0.0573275\nI0822 05:38:16.044499 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 05:38:16.044517 32502 solver.cpp:244]     Train net output #1: loss = 0.0573249 (* 1 = 0.0573249 loss)\nI0822 05:38:16.170845 32502 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0822 05:41:55.237835 32502 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0822 05:44:03.267455 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7273\nI0822 05:44:03.267805 32502 solver.cpp:404]     Test net output #1: loss = 1.26341 (* 1 = 1.26341 loss)\nI0822 05:44:05.356333 32502 solver.cpp:228] Iteration 21200, loss = 0.0537133\nI0822 05:44:05.356379 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 05:44:05.356397 32502 solver.cpp:244]     Train net output #1: loss = 0.0537108 (* 1 = 0.0537108 loss)\nI0822 05:44:05.480751 32502 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0822 05:47:44.571056 32502 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0822 05:49:52.595468 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7021\nI0822 05:49:52.595790 32502 solver.cpp:404]     Test net output #1: loss = 1.34101 (* 1 = 1.34101 loss)\nI0822 05:49:54.683202 32502 solver.cpp:228] Iteration 21300, loss = 0.0324894\nI0822 05:49:54.683249 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:49:54.683265 32502 solver.cpp:244]     Train net output #1: loss = 0.0324869 (* 1 = 0.0324869 loss)\nI0822 05:49:54.808537 32502 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0822 05:53:33.892336 32502 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0822 05:55:41.918123 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6801\nI0822 05:55:41.918478 32502 solver.cpp:404]     Test net output #1: loss = 1.7246 (* 1 = 1.7246 loss)\nI0822 05:55:44.008273 32502 solver.cpp:228] Iteration 21400, loss = 0.101088\nI0822 05:55:44.008322 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 05:55:44.008338 32502 solver.cpp:244]     Train net output #1: loss = 0.101085 (* 1 = 0.101085 loss)\nI0822 05:55:44.133702 32502 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0822 05:59:23.139245 32502 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0822 06:01:31.156819 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7007\nI0822 06:01:31.157171 32502 solver.cpp:404]     Test net output #1: loss = 1.36375 (* 1 = 1.36375 loss)\nI0822 06:01:33.244201 32502 solver.cpp:228] Iteration 21500, loss = 0.108266\nI0822 06:01:33.244249 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 06:01:33.244266 32502 solver.cpp:244]     Train net output #1: loss = 0.108263 (* 1 = 0.108263 loss)\nI0822 06:01:33.370327 32502 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0822 06:05:12.490885 32502 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0822 06:07:20.475960 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7493\nI0822 06:07:20.476326 32502 solver.cpp:404]     Test net output #1: loss = 1.07719 (* 1 = 1.07719 loss)\nI0822 06:07:22.564327 32502 solver.cpp:228] Iteration 21600, loss = 0.0146902\nI0822 06:07:22.564375 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:07:22.564391 32502 solver.cpp:244]     Train net output #1: loss = 0.0146876 (* 1 = 0.0146876 loss)\nI0822 06:07:22.689549 32502 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0822 06:11:01.653867 32502 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0822 06:13:09.645910 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6689\nI0822 06:13:09.646278 32502 solver.cpp:404]     Test net output #1: loss = 1.79218 (* 1 = 1.79218 loss)\nI0822 06:13:11.734247 32502 solver.cpp:228] Iteration 21700, loss = 0.0514677\nI0822 06:13:11.734294 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 06:13:11.734310 32502 solver.cpp:244]     Train net output #1: loss = 0.0514651 (* 1 = 0.0514651 loss)\nI0822 06:13:11.861982 32502 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0822 06:16:50.962282 32502 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0822 06:18:58.953923 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6263\nI0822 06:18:58.954257 32502 solver.cpp:404]     Test net output #1: loss = 1.72723 (* 1 = 1.72723 loss)\nI0822 06:19:01.042112 32502 solver.cpp:228] Iteration 21800, loss = 0.0636612\nI0822 06:19:01.042160 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 06:19:01.042176 32502 solver.cpp:244]     Train net output #1: loss = 0.0636586 (* 1 = 0.0636586 loss)\nI0822 06:19:01.171982 32502 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0822 06:22:40.156935 32502 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0822 06:24:48.142462 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6745\nI0822 06:24:48.142820 32502 solver.cpp:404]     Test net output #1: loss = 1.6341 (* 1 = 1.6341 loss)\nI0822 06:24:50.230108 32502 solver.cpp:228] Iteration 21900, loss = 0.0476688\nI0822 06:24:50.230156 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 06:24:50.230173 32502 solver.cpp:244]     Train net output #1: loss = 0.0476662 (* 1 = 0.0476662 loss)\nI0822 06:24:50.354758 32502 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0822 06:28:29.310070 32502 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0822 06:30:37.295842 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6645\nI0822 06:30:37.296200 32502 solver.cpp:404]     Test net output #1: loss = 1.46917 (* 1 = 1.46917 loss)\nI0822 06:30:39.384783 32502 solver.cpp:228] Iteration 22000, loss = 0.0929051\nI0822 06:30:39.384835 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 06:30:39.384852 32502 solver.cpp:244]     Train net output #1: loss = 0.0929025 (* 1 = 0.0929025 loss)\nI0822 06:30:39.510218 32502 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0822 06:34:18.486312 32502 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0822 06:36:27.302757 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6861\nI0822 06:36:27.303167 32502 solver.cpp:404]     Test net output #1: loss = 1.39613 (* 1 = 1.39613 loss)\nI0822 06:36:29.401329 32502 solver.cpp:228] Iteration 22100, loss = 0.0494513\nI0822 06:36:29.401379 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 06:36:29.401397 32502 solver.cpp:244]     Train net output #1: loss = 0.0494486 (* 1 = 0.0494486 loss)\nI0822 06:36:29.519414 32502 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0822 06:40:08.426115 32502 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0822 06:42:16.410957 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6566\nI0822 06:42:16.411306 32502 solver.cpp:404]     Test net output #1: loss = 1.65793 (* 1 = 1.65793 loss)\nI0822 06:42:18.499402 32502 solver.cpp:228] Iteration 22200, loss = 0.108539\nI0822 06:42:18.499454 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 06:42:18.499476 32502 solver.cpp:244]     Train net output #1: loss = 0.108537 (* 1 = 0.108537 loss)\nI0822 06:42:18.623334 32502 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0822 06:45:58.362135 32502 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0822 06:48:07.932348 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7424\nI0822 06:48:07.932698 32502 solver.cpp:404]     Test net output #1: loss = 1.221 (* 1 = 1.221 loss)\nI0822 06:48:10.024111 32502 solver.cpp:228] Iteration 22300, loss = 0.013298\nI0822 06:48:10.024176 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:48:10.024194 32502 solver.cpp:244]     Train net output #1: loss = 0.0132953 (* 1 = 0.0132953 loss)\nI0822 06:48:10.158821 32502 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0822 06:51:50.650984 32502 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0822 06:53:59.643182 32502 solver.cpp:404]     Test net output #0: accuracy = 0.673\nI0822 06:53:59.643520 32502 solver.cpp:404]     Test net output #1: loss = 1.46838 (* 1 = 1.46838 loss)\nI0822 06:54:01.734777 32502 solver.cpp:228] Iteration 22400, loss = 0.0700517\nI0822 06:54:01.734843 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 06:54:01.734860 32502 solver.cpp:244]     Train net output #1: loss = 0.070049 (* 1 = 0.070049 loss)\nI0822 06:54:01.868408 32502 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0822 06:57:42.392055 32502 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0822 06:59:51.797991 32502 solver.cpp:404]     Test net output #0: accuracy = 0.756\nI0822 06:59:51.798318 32502 solver.cpp:404]     Test net output #1: loss = 1.00289 (* 1 = 1.00289 loss)\nI0822 06:59:53.890306 32502 solver.cpp:228] Iteration 22500, loss = 0.0376874\nI0822 06:59:53.890373 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 06:59:53.890393 32502 solver.cpp:244]     Train net output #1: loss = 0.0376846 (* 1 = 0.0376846 loss)\nI0822 06:59:54.027061 32502 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0822 07:03:34.577071 32502 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0822 07:05:43.903657 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7089\nI0822 07:05:43.904002 32502 solver.cpp:404]     Test net output #1: loss = 1.21659 (* 1 = 1.21659 loss)\nI0822 07:05:45.996351 32502 solver.cpp:228] Iteration 22600, loss = 0.0230629\nI0822 07:05:45.996417 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:05:45.996434 32502 solver.cpp:244]     Train net output #1: loss = 0.0230601 (* 1 = 0.0230601 loss)\nI0822 07:05:46.127132 32502 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0822 07:09:26.634997 32502 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0822 07:11:35.855927 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7076\nI0822 07:11:35.856252 32502 solver.cpp:404]     Test net output #1: loss = 1.20743 (* 1 = 1.20743 loss)\nI0822 07:11:37.948647 32502 solver.cpp:228] Iteration 22700, loss = 0.0734908\nI0822 07:11:37.948710 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 07:11:37.948727 32502 solver.cpp:244]     Train net output #1: loss = 0.0734881 (* 1 = 0.0734881 loss)\nI0822 07:11:38.084974 32502 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0822 07:15:18.618494 32502 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0822 07:17:28.010920 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6784\nI0822 07:17:28.011272 32502 solver.cpp:404]     Test net output #1: loss = 1.49009 (* 1 = 1.49009 loss)\nI0822 07:17:30.104459 32502 solver.cpp:228] Iteration 22800, loss = 0.0615269\nI0822 07:17:30.104524 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 07:17:30.104542 32502 solver.cpp:244]     Train net output #1: loss = 0.0615242 (* 1 = 0.0615242 loss)\nI0822 07:17:30.233007 32502 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0822 07:21:10.742352 32502 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0822 07:23:20.218322 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6803\nI0822 07:23:20.218719 32502 solver.cpp:404]     Test net output #1: loss = 1.32308 (* 1 = 1.32308 loss)\nI0822 07:23:22.311934 32502 solver.cpp:228] Iteration 22900, loss = 0.0328933\nI0822 07:23:22.312003 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 07:23:22.312021 32502 solver.cpp:244]     Train net output #1: loss = 0.0328905 (* 1 = 0.0328905 loss)\nI0822 07:23:22.439110 32502 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0822 07:27:02.950877 32502 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0822 07:29:12.555272 32502 solver.cpp:404]     Test net output #0: accuracy = 0.771\nI0822 07:29:12.555651 32502 solver.cpp:404]     Test net output #1: loss = 0.978632 (* 1 = 0.978632 loss)\nI0822 07:29:14.648807 32502 solver.cpp:228] Iteration 23000, loss = 0.0720234\nI0822 07:29:14.648870 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 07:29:14.648886 32502 solver.cpp:244]     Train net output #1: loss = 0.0720206 (* 1 = 0.0720206 loss)\nI0822 07:29:14.776669 32502 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0822 07:32:55.263340 32502 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0822 07:35:04.700551 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7189\nI0822 07:35:04.700917 32502 solver.cpp:404]     Test net output #1: loss = 1.26248 (* 1 = 1.26248 loss)\nI0822 07:35:06.794651 32502 solver.cpp:228] Iteration 23100, loss = 0.0820026\nI0822 07:35:06.794714 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 07:35:06.794731 32502 solver.cpp:244]     Train net output #1: loss = 0.0819999 (* 1 = 0.0819999 loss)\nI0822 07:35:06.925138 32502 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0822 07:38:47.376320 32502 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0822 07:40:57.023829 32502 solver.cpp:404]     Test net output #0: accuracy = 0.764\nI0822 07:40:57.024184 32502 solver.cpp:404]     Test net output #1: loss = 0.85446 (* 1 = 0.85446 loss)\nI0822 07:40:59.117089 32502 solver.cpp:228] Iteration 23200, loss = 0.0459827\nI0822 07:40:59.117154 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 07:40:59.117171 32502 solver.cpp:244]     Train net output #1: loss = 0.04598 (* 1 = 0.04598 loss)\nI0822 07:40:59.243865 32502 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0822 07:44:39.703748 32502 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0822 07:46:49.179123 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7747\nI0822 07:46:49.179494 32502 solver.cpp:404]     Test net output #1: loss = 0.88394 (* 1 = 0.88394 loss)\nI0822 07:46:51.272579 32502 solver.cpp:228] Iteration 23300, loss = 0.0537817\nI0822 07:46:51.272644 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 07:46:51.272660 32502 solver.cpp:244]     Train net output #1: loss = 0.053779 (* 1 = 0.053779 loss)\nI0822 07:46:51.405872 32502 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0822 07:50:31.916522 32502 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0822 07:52:41.212712 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6658\nI0822 07:52:41.213042 32502 solver.cpp:404]     Test net output #1: loss = 1.44181 (* 1 = 1.44181 loss)\nI0822 07:52:43.306504 32502 solver.cpp:228] Iteration 23400, loss = 0.0600473\nI0822 07:52:43.306569 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 07:52:43.306586 32502 solver.cpp:244]     Train net output #1: loss = 0.0600445 (* 1 = 0.0600445 loss)\nI0822 07:52:43.434896 32502 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0822 07:56:23.977382 32502 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0822 07:58:33.419356 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7535\nI0822 07:58:33.419734 32502 solver.cpp:404]     Test net output #1: loss = 1.00794 (* 1 = 1.00794 loss)\nI0822 07:58:35.513028 32502 solver.cpp:228] Iteration 23500, loss = 0.0474384\nI0822 07:58:35.513092 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 07:58:35.513109 32502 solver.cpp:244]     Train net output #1: loss = 0.0474357 (* 1 = 0.0474357 loss)\nI0822 07:58:35.647874 32502 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0822 08:02:16.223899 32502 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0822 08:04:25.607591 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7207\nI0822 08:04:25.607987 32502 solver.cpp:404]     Test net output #1: loss = 1.36394 (* 1 = 1.36394 loss)\nI0822 08:04:27.700882 32502 solver.cpp:228] Iteration 23600, loss = 0.0615082\nI0822 08:04:27.700947 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 08:04:27.700968 32502 solver.cpp:244]     Train net output #1: loss = 0.0615055 (* 1 = 0.0615055 loss)\nI0822 08:04:27.828331 32502 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0822 08:08:08.299897 32502 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0822 08:10:17.920706 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7582\nI0822 08:10:17.921053 32502 solver.cpp:404]     Test net output #1: loss = 1.05499 (* 1 = 1.05499 loss)\nI0822 08:10:20.013936 32502 solver.cpp:228] Iteration 23700, loss = 0.0412387\nI0822 08:10:20.014006 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 08:10:20.014025 32502 solver.cpp:244]     Train net output #1: loss = 0.041236 (* 1 = 0.041236 loss)\nI0822 08:10:20.149802 32502 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0822 08:14:00.648102 32502 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0822 08:16:10.232112 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5896\nI0822 08:16:10.232463 32502 solver.cpp:404]     Test net output #1: loss = 2.08845 (* 1 = 2.08845 loss)\nI0822 08:16:12.324522 32502 solver.cpp:228] Iteration 23800, loss = 0.04562\nI0822 08:16:12.324587 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 08:16:12.324604 32502 solver.cpp:244]     Train net output #1: loss = 0.0456173 (* 1 = 0.0456173 loss)\nI0822 08:16:12.454710 32502 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0822 08:19:53.538283 32502 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0822 08:22:03.119033 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7128\nI0822 08:22:03.119348 32502 solver.cpp:404]     Test net output #1: loss = 1.28032 (* 1 = 1.28032 loss)\nI0822 08:22:05.212669 32502 solver.cpp:228] Iteration 23900, loss = 0.100251\nI0822 08:22:05.212728 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 08:22:05.212745 32502 solver.cpp:244]     Train net output #1: loss = 0.100249 (* 1 = 0.100249 loss)\nI0822 08:22:05.358193 32502 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0822 08:25:46.719923 32502 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0822 08:27:56.313112 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7648\nI0822 08:27:56.313474 32502 solver.cpp:404]     Test net output #1: loss = 0.923127 (* 1 = 0.923127 loss)\nI0822 08:27:58.405421 32502 solver.cpp:228] Iteration 24000, loss = 0.0232869\nI0822 08:27:58.405483 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:27:58.405501 32502 solver.cpp:244]     Train net output #1: loss = 0.0232842 (* 1 = 0.0232842 loss)\nI0822 08:27:58.547611 32502 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0822 08:31:39.741760 32502 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0822 08:33:49.262254 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7254\nI0822 08:33:49.262624 32502 solver.cpp:404]     Test net output #1: loss = 1.16541 (* 1 = 1.16541 loss)\nI0822 08:33:51.355020 32502 solver.cpp:228] Iteration 24100, loss = 0.0481029\nI0822 08:33:51.355084 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 08:33:51.355103 32502 solver.cpp:244]     Train net output #1: loss = 0.0481003 (* 1 = 0.0481003 loss)\nI0822 08:33:51.498373 32502 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0822 08:37:32.914526 32502 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0822 08:39:42.516131 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7015\nI0822 08:39:42.516495 32502 solver.cpp:404]     Test net output #1: loss = 1.36428 (* 1 = 1.36428 loss)\nI0822 08:39:44.609485 32502 solver.cpp:228] Iteration 24200, loss = 0.0390473\nI0822 08:39:44.609549 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 08:39:44.609566 32502 solver.cpp:244]     Train net output #1: loss = 0.0390446 (* 1 = 0.0390446 loss)\nI0822 08:39:44.753679 32502 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0822 08:43:25.985653 32502 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0822 08:45:35.413997 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7233\nI0822 08:45:35.414350 32502 solver.cpp:404]     Test net output #1: loss = 1.16471 (* 1 = 1.16471 loss)\nI0822 08:45:37.507540 32502 solver.cpp:228] Iteration 24300, loss = 0.0615922\nI0822 08:45:37.507606 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 08:45:37.507623 32502 solver.cpp:244]     Train net output #1: loss = 0.0615896 (* 1 = 0.0615896 loss)\nI0822 08:45:37.649991 32502 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0822 08:49:18.827070 32502 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0822 08:51:28.436144 32502 solver.cpp:404]     Test net output #0: accuracy = 0.72\nI0822 08:51:28.436520 32502 solver.cpp:404]     Test net output #1: loss = 1.28252 (* 1 = 1.28252 loss)\nI0822 08:51:30.530429 32502 solver.cpp:228] Iteration 24400, loss = 0.0272172\nI0822 08:51:30.530494 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:51:30.530513 32502 solver.cpp:244]     Train net output #1: loss = 0.0272145 (* 1 = 0.0272145 loss)\nI0822 08:51:30.672869 32502 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0822 08:55:11.964172 32502 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0822 08:57:21.570649 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7361\nI0822 08:57:21.571017 32502 solver.cpp:404]     Test net output #1: loss = 1.18913 (* 1 = 1.18913 loss)\nI0822 08:57:23.665673 32502 solver.cpp:228] Iteration 24500, loss = 0.0509537\nI0822 08:57:23.665738 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 08:57:23.665757 32502 solver.cpp:244]     Train net output #1: loss = 0.050951 (* 1 = 0.050951 loss)\nI0822 08:57:23.802269 32502 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0822 09:01:05.097484 32502 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0822 09:03:14.640755 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7647\nI0822 09:03:14.641139 32502 solver.cpp:404]     Test net output #1: loss = 0.939235 (* 1 = 0.939235 loss)\nI0822 09:03:16.733944 32502 solver.cpp:228] Iteration 24600, loss = 0.0791154\nI0822 09:03:16.734011 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 09:03:16.734028 32502 solver.cpp:244]     Train net output #1: loss = 0.0791127 (* 1 = 0.0791127 loss)\nI0822 09:03:16.870040 32502 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0822 09:06:58.230525 32502 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0822 09:09:07.884923 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7357\nI0822 09:09:07.885280 32502 solver.cpp:404]     Test net output #1: loss = 1.19144 (* 1 = 1.19144 loss)\nI0822 09:09:09.976907 32502 solver.cpp:228] Iteration 24700, loss = 0.0352503\nI0822 09:09:09.976974 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 09:09:09.976992 32502 solver.cpp:244]     Train net output #1: loss = 0.0352476 (* 1 = 0.0352476 loss)\nI0822 09:09:10.128005 32502 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0822 09:12:51.659569 32502 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0822 09:15:01.302757 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6606\nI0822 09:15:01.303151 32502 solver.cpp:404]     Test net output #1: loss = 1.76429 (* 1 = 1.76429 loss)\nI0822 09:15:03.395596 32502 solver.cpp:228] Iteration 24800, loss = 0.0298549\nI0822 09:15:03.395656 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 09:15:03.395673 32502 solver.cpp:244]     Train net output #1: loss = 0.0298522 (* 1 = 0.0298522 loss)\nI0822 09:15:03.537385 32502 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0822 09:18:44.804168 32502 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0822 09:20:53.886242 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7401\nI0822 09:20:53.886566 32502 solver.cpp:404]     Test net output #1: loss = 1.22883 (* 1 = 1.22883 loss)\nI0822 09:20:55.979017 32502 solver.cpp:228] Iteration 24900, loss = 0.117207\nI0822 09:20:55.979081 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 09:20:55.979099 32502 solver.cpp:244]     Train net output #1: loss = 0.117204 (* 1 = 0.117204 loss)\nI0822 09:20:56.116111 32502 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0822 09:24:37.391795 32502 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0822 09:26:47.024425 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7538\nI0822 09:26:47.024811 32502 solver.cpp:404]     Test net output #1: loss = 0.998213 (* 1 = 0.998213 loss)\nI0822 09:26:49.117126 32502 solver.cpp:228] Iteration 25000, loss = 0.0463181\nI0822 09:26:49.117194 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 09:26:49.117219 32502 solver.cpp:244]     Train net output #1: loss = 0.0463154 (* 1 = 0.0463154 loss)\nI0822 09:26:49.260078 32502 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0822 09:30:30.523460 32502 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0822 09:32:40.080351 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7308\nI0822 09:32:40.080713 32502 solver.cpp:404]     Test net output #1: loss = 1.27469 (* 1 = 1.27469 loss)\nI0822 09:32:42.173126 32502 solver.cpp:228] Iteration 25100, loss = 0.0444236\nI0822 09:32:42.173194 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 09:32:42.173219 32502 solver.cpp:244]     Train net output #1: loss = 0.0444209 (* 1 = 0.0444209 loss)\nI0822 09:32:42.325743 32502 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0822 09:36:23.717236 32502 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0822 09:38:33.379989 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7399\nI0822 09:38:33.380383 32502 solver.cpp:404]     Test net output #1: loss = 1.21167 (* 1 = 1.21167 loss)\nI0822 09:38:35.473172 32502 solver.cpp:228] Iteration 25200, loss = 0.0448194\nI0822 09:38:35.473238 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 09:38:35.473264 32502 solver.cpp:244]     Train net output #1: loss = 0.0448167 (* 1 = 0.0448167 loss)\nI0822 09:38:35.610086 32502 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0822 09:42:16.849846 32502 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0822 09:44:26.481609 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7214\nI0822 09:44:26.481964 32502 solver.cpp:404]     Test net output #1: loss = 1.38074 (* 1 = 1.38074 loss)\nI0822 09:44:28.574008 32502 solver.cpp:228] Iteration 25300, loss = 0.0504735\nI0822 09:44:28.574075 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 09:44:28.574102 32502 solver.cpp:244]     Train net output #1: loss = 0.0504708 (* 1 = 0.0504708 loss)\nI0822 09:44:28.716231 32502 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0822 09:48:09.941699 32502 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0822 09:50:19.571749 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7317\nI0822 09:50:19.572134 32502 solver.cpp:404]     Test net output #1: loss = 1.15824 (* 1 = 1.15824 loss)\nI0822 09:50:21.664841 32502 solver.cpp:228] Iteration 25400, loss = 0.0932385\nI0822 09:50:21.664908 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 09:50:21.664932 32502 solver.cpp:244]     Train net output #1: loss = 0.0932359 (* 1 = 0.0932359 loss)\nI0822 09:50:21.802268 32502 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0822 09:54:03.129643 32502 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0822 09:56:12.385433 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6956\nI0822 09:56:12.385798 32502 solver.cpp:404]     Test net output #1: loss = 1.41086 (* 1 = 1.41086 loss)\nI0822 09:56:14.478433 32502 solver.cpp:228] Iteration 25500, loss = 0.0570256\nI0822 09:56:14.478497 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 09:56:14.478521 32502 solver.cpp:244]     Train net output #1: loss = 0.057023 (* 1 = 0.057023 loss)\nI0822 09:56:14.620102 32502 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0822 09:59:56.033459 32502 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0822 10:02:05.514856 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7575\nI0822 10:02:05.515239 32502 solver.cpp:404]     Test net output #1: loss = 1.04714 (* 1 = 1.04714 loss)\nI0822 10:02:07.608901 32502 solver.cpp:228] Iteration 25600, loss = 0.0417556\nI0822 10:02:07.608970 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 10:02:07.608989 32502 solver.cpp:244]     Train net output #1: loss = 0.041753 (* 1 = 0.041753 loss)\nI0822 10:02:07.736920 32502 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0822 10:05:49.135922 32502 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0822 10:07:58.747745 32502 solver.cpp:404]     Test net output #0: accuracy = 0.745\nI0822 10:07:58.748124 32502 solver.cpp:404]     Test net output #1: loss = 1.0412 (* 1 = 1.0412 loss)\nI0822 10:08:00.840674 32502 solver.cpp:228] Iteration 25700, loss = 0.0562232\nI0822 10:08:00.840739 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 10:08:00.840756 32502 solver.cpp:244]     Train net output #1: loss = 0.0562206 (* 1 = 0.0562206 loss)\nI0822 10:08:00.981501 32502 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0822 10:11:42.104746 32502 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0822 10:13:51.759711 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7062\nI0822 10:13:51.760113 32502 solver.cpp:404]     Test net output #1: loss = 1.48413 (* 1 = 1.48413 loss)\nI0822 10:13:53.853327 32502 solver.cpp:228] Iteration 25800, loss = 0.0864782\nI0822 10:13:53.853390 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 10:13:53.853410 32502 solver.cpp:244]     Train net output #1: loss = 0.0864756 (* 1 = 0.0864756 loss)\nI0822 10:13:53.999184 32502 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0822 10:17:35.400343 32502 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0822 10:19:45.053520 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7157\nI0822 10:19:45.053866 32502 solver.cpp:404]     Test net output #1: loss = 1.31264 (* 1 = 1.31264 loss)\nI0822 10:19:47.147439 32502 solver.cpp:228] Iteration 25900, loss = 0.049099\nI0822 10:19:47.147502 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 10:19:47.147521 32502 solver.cpp:244]     Train net output #1: loss = 0.0490964 (* 1 = 0.0490964 loss)\nI0822 10:19:47.289131 32502 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0822 10:23:28.721235 32502 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0822 10:25:38.291327 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7006\nI0822 10:25:38.291673 32502 solver.cpp:404]     Test net output #1: loss = 1.24446 (* 1 = 1.24446 loss)\nI0822 10:25:40.384848 32502 solver.cpp:228] Iteration 26000, loss = 0.0972369\nI0822 10:25:40.384912 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 10:25:40.384929 32502 solver.cpp:244]     Train net output #1: loss = 0.0972343 (* 1 = 0.0972343 loss)\nI0822 10:25:40.522837 32502 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0822 10:29:21.772234 32502 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0822 10:31:31.386596 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7641\nI0822 10:31:31.386992 32502 solver.cpp:404]     Test net output #1: loss = 0.929348 (* 1 = 0.929348 loss)\nI0822 10:31:33.481235 32502 solver.cpp:228] Iteration 26100, loss = 0.0734944\nI0822 10:31:33.481299 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 10:31:33.481317 32502 solver.cpp:244]     Train net output #1: loss = 0.0734918 (* 1 = 0.0734918 loss)\nI0822 10:31:33.625490 32502 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0822 10:35:15.012949 32502 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0822 10:37:24.662359 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7127\nI0822 10:37:24.662744 32502 solver.cpp:404]     Test net output #1: loss = 1.17464 (* 1 = 1.17464 loss)\nI0822 10:37:26.755548 32502 solver.cpp:228] Iteration 26200, loss = 0.0297518\nI0822 10:37:26.755610 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 10:37:26.755628 32502 solver.cpp:244]     Train net output #1: loss = 0.0297492 (* 1 = 0.0297492 loss)\nI0822 10:37:26.899009 32502 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0822 10:41:08.205509 32502 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0822 10:43:17.824738 32502 solver.cpp:404]     Test net output #0: accuracy = 0.747\nI0822 10:43:17.825129 32502 solver.cpp:404]     Test net output #1: loss = 0.963813 (* 1 = 0.963813 loss)\nI0822 10:43:19.919337 32502 solver.cpp:228] Iteration 26300, loss = 0.0509319\nI0822 10:43:19.919402 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 10:43:19.919420 32502 solver.cpp:244]     Train net output #1: loss = 0.0509293 (* 1 = 0.0509293 loss)\nI0822 10:43:20.055397 32502 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0822 10:47:01.298612 32502 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0822 10:49:10.872866 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7952\nI0822 10:49:10.873255 32502 solver.cpp:404]     Test net output #1: loss = 0.776463 (* 1 = 0.776463 loss)\nI0822 10:49:12.966755 32502 solver.cpp:228] Iteration 26400, loss = 0.0504139\nI0822 10:49:12.966820 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 10:49:12.966838 32502 solver.cpp:244]     Train net output #1: loss = 0.0504113 (* 1 = 0.0504113 loss)\nI0822 10:49:13.108070 32502 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0822 10:52:54.370054 32502 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0822 10:55:03.996556 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7414\nI0822 10:55:03.996940 32502 solver.cpp:404]     Test net output #1: loss = 1.05957 (* 1 = 1.05957 loss)\nI0822 10:55:06.089233 32502 solver.cpp:228] Iteration 26500, loss = 0.0969407\nI0822 10:55:06.089296 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 10:55:06.089314 32502 solver.cpp:244]     Train net output #1: loss = 0.0969381 (* 1 = 0.0969381 loss)\nI0822 10:55:06.229951 32502 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0822 10:58:47.506484 32502 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0822 11:00:57.124933 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7682\nI0822 11:00:57.125306 32502 solver.cpp:404]     Test net output #1: loss = 1.02285 (* 1 = 1.02285 loss)\nI0822 11:00:59.217216 32502 solver.cpp:228] Iteration 26600, loss = 0.155326\nI0822 11:00:59.217279 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 11:00:59.217296 32502 solver.cpp:244]     Train net output #1: loss = 0.155324 (* 1 = 0.155324 loss)\nI0822 11:00:59.362247 32502 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0822 11:04:40.703680 32502 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0822 11:06:49.827147 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7468\nI0822 11:06:49.827471 32502 solver.cpp:404]     Test net output #1: loss = 1.21825 (* 1 = 1.21825 loss)\nI0822 11:06:51.919122 32502 solver.cpp:228] Iteration 26700, loss = 0.0335653\nI0822 11:06:51.919188 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 11:06:51.919205 32502 solver.cpp:244]     Train net output #1: loss = 0.0335627 (* 1 = 0.0335627 loss)\nI0822 11:06:52.059689 32502 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0822 11:10:33.379539 32502 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0822 11:12:42.928254 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7076\nI0822 11:12:42.928619 32502 solver.cpp:404]     Test net output #1: loss = 1.45678 (* 1 = 1.45678 loss)\nI0822 11:12:45.020776 32502 solver.cpp:228] Iteration 26800, loss = 0.0341485\nI0822 11:12:45.020839 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 11:12:45.020858 32502 solver.cpp:244]     Train net output #1: loss = 0.0341459 (* 1 = 0.0341459 loss)\nI0822 11:12:45.160394 32502 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0822 11:16:26.419771 32502 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0822 11:18:35.993227 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7475\nI0822 11:18:35.993557 32502 solver.cpp:404]     Test net output #1: loss = 1.07529 (* 1 = 1.07529 loss)\nI0822 11:18:38.085906 32502 solver.cpp:228] Iteration 26900, loss = 0.0469174\nI0822 11:18:38.085969 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 11:18:38.085991 32502 solver.cpp:244]     Train net output #1: loss = 0.0469148 (* 1 = 0.0469148 loss)\nI0822 11:18:38.223109 32502 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0822 11:22:19.362076 32502 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0822 11:24:28.830688 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7284\nI0822 11:24:28.831050 32502 solver.cpp:404]     Test net output #1: loss = 1.21074 (* 1 = 1.21074 loss)\nI0822 11:24:30.922855 32502 solver.cpp:228] Iteration 27000, loss = 0.107541\nI0822 11:24:30.922919 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 11:24:30.922937 32502 solver.cpp:244]     Train net output #1: loss = 0.107539 (* 1 = 0.107539 loss)\nI0822 11:24:31.062571 32502 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0822 11:28:12.315942 32502 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0822 11:30:21.818277 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7485\nI0822 11:30:21.818634 32502 solver.cpp:404]     Test net output #1: loss = 0.956774 (* 1 = 0.956774 loss)\nI0822 11:30:23.910756 32502 solver.cpp:228] Iteration 27100, loss = 0.103748\nI0822 11:30:23.910820 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 11:30:23.910840 32502 solver.cpp:244]     Train net output #1: loss = 0.103745 (* 1 = 0.103745 loss)\nI0822 11:30:24.052075 32502 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0822 11:34:05.285711 32502 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0822 11:36:14.564352 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7393\nI0822 11:36:14.564738 32502 solver.cpp:404]     Test net output #1: loss = 1.18179 (* 1 = 1.18179 loss)\nI0822 11:36:16.657676 32502 solver.cpp:228] Iteration 27200, loss = 0.074744\nI0822 11:36:16.657739 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 11:36:16.657757 32502 solver.cpp:244]     Train net output #1: loss = 0.0747413 (* 1 = 0.0747413 loss)\nI0822 11:36:16.797304 32502 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0822 11:39:57.990520 32502 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0822 11:42:07.652657 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6997\nI0822 11:42:07.653046 32502 solver.cpp:404]     Test net output #1: loss = 1.47942 (* 1 = 1.47942 loss)\nI0822 11:42:09.744993 32502 solver.cpp:228] Iteration 27300, loss = 0.0314403\nI0822 11:42:09.745056 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 11:42:09.745074 32502 solver.cpp:244]     Train net output #1: loss = 0.0314377 (* 1 = 0.0314377 loss)\nI0822 11:42:09.888206 32502 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0822 11:45:51.162133 32502 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0822 11:48:00.783442 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6718\nI0822 11:48:00.783810 32502 solver.cpp:404]     Test net output #1: loss = 1.68487 (* 1 = 1.68487 loss)\nI0822 11:48:02.874994 32502 solver.cpp:228] Iteration 27400, loss = 0.0739051\nI0822 11:48:02.875056 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 11:48:02.875073 32502 solver.cpp:244]     Train net output #1: loss = 0.0739025 (* 1 = 0.0739025 loss)\nI0822 11:48:03.020457 32502 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0822 11:51:44.292501 32502 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0822 11:53:53.929205 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6686\nI0822 11:53:53.929599 32502 solver.cpp:404]     Test net output #1: loss = 1.54289 (* 1 = 1.54289 loss)\nI0822 11:53:56.023550 32502 solver.cpp:228] Iteration 27500, loss = 0.0532864\nI0822 11:53:56.023613 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 11:53:56.023633 32502 solver.cpp:244]     Train net output #1: loss = 0.0532838 (* 1 = 0.0532838 loss)\nI0822 11:53:56.159457 32502 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0822 11:57:37.248260 32502 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0822 11:59:46.883821 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7071\nI0822 11:59:46.884227 32502 solver.cpp:404]     Test net output #1: loss = 1.39834 (* 1 = 1.39834 loss)\nI0822 11:59:48.977270 32502 solver.cpp:228] Iteration 27600, loss = 0.0411529\nI0822 11:59:48.977339 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 11:59:48.977363 32502 solver.cpp:244]     Train net output #1: loss = 0.0411502 (* 1 = 0.0411502 loss)\nI0822 11:59:49.113368 32502 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0822 12:03:29.672008 32502 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0822 12:05:39.277736 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6901\nI0822 12:05:39.278131 32502 solver.cpp:404]     Test net output #1: loss = 1.58083 (* 1 = 1.58083 loss)\nI0822 12:05:41.370620 32502 solver.cpp:228] Iteration 27700, loss = 0.0952369\nI0822 12:05:41.370685 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 12:05:41.370712 32502 solver.cpp:244]     Train net output #1: loss = 0.0952342 (* 1 = 0.0952342 loss)\nI0822 12:05:41.507333 32502 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0822 12:09:22.263607 32502 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0822 12:11:31.877372 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7218\nI0822 12:11:31.877737 32502 solver.cpp:404]     Test net output #1: loss = 1.27496 (* 1 = 1.27496 loss)\nI0822 12:11:33.969782 32502 solver.cpp:228] Iteration 27800, loss = 0.0394722\nI0822 12:11:33.969851 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 12:11:33.969877 32502 solver.cpp:244]     Train net output #1: loss = 0.0394696 (* 1 = 0.0394696 loss)\nI0822 12:11:34.102104 32502 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0822 12:15:14.741704 32502 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0822 12:17:24.356122 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7654\nI0822 12:17:24.356523 32502 solver.cpp:404]     Test net output #1: loss = 0.988442 (* 1 = 0.988442 loss)\nI0822 12:17:26.449645 32502 solver.cpp:228] Iteration 27900, loss = 0.0358092\nI0822 12:17:26.449712 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 12:17:26.449738 32502 solver.cpp:244]     Train net output #1: loss = 0.0358065 (* 1 = 0.0358065 loss)\nI0822 12:17:26.583930 32502 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0822 12:21:07.312273 32502 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0822 12:23:16.915170 32502 solver.cpp:404]     Test net output #0: accuracy = 0.698\nI0822 12:23:16.915575 32502 solver.cpp:404]     Test net output #1: loss = 1.28446 (* 1 = 1.28446 loss)\nI0822 12:23:19.007647 32502 solver.cpp:228] Iteration 28000, loss = 0.084974\nI0822 12:23:19.007715 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 12:23:19.007740 32502 solver.cpp:244]     Train net output #1: loss = 0.0849713 (* 1 = 0.0849713 loss)\nI0822 12:23:19.138929 32502 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0822 12:26:59.688508 32502 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0822 12:29:09.300916 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7438\nI0822 12:29:09.301283 32502 solver.cpp:404]     Test net output #1: loss = 1.01099 (* 1 = 1.01099 loss)\nI0822 12:29:11.394160 32502 solver.cpp:228] Iteration 28100, loss = 0.0305614\nI0822 12:29:11.394228 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 12:29:11.394253 32502 solver.cpp:244]     Train net output #1: loss = 0.0305587 (* 1 = 0.0305587 loss)\nI0822 12:29:11.523277 32502 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0822 12:32:52.162956 32502 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0822 12:35:01.747287 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7945\nI0822 12:35:01.747644 32502 solver.cpp:404]     Test net output #1: loss = 0.762015 (* 1 = 0.762015 loss)\nI0822 12:35:03.840582 32502 solver.cpp:228] Iteration 28200, loss = 0.071491\nI0822 12:35:03.840652 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 12:35:03.840678 32502 solver.cpp:244]     Train net output #1: loss = 0.0714883 (* 1 = 0.0714883 loss)\nI0822 12:35:03.975312 32502 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0822 12:38:44.494045 32502 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0822 12:40:54.082243 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7381\nI0822 12:40:54.082639 32502 solver.cpp:404]     Test net output #1: loss = 1.31595 (* 1 = 1.31595 loss)\nI0822 12:40:56.175544 32502 solver.cpp:228] Iteration 28300, loss = 0.0417225\nI0822 12:40:56.175614 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 12:40:56.175642 32502 solver.cpp:244]     Train net output #1: loss = 0.0417198 (* 1 = 0.0417198 loss)\nI0822 12:40:56.311695 32502 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0822 12:44:36.960501 32502 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0822 12:46:46.562911 32502 solver.cpp:404]     Test net output #0: accuracy = 0.785\nI0822 12:46:46.563297 32502 solver.cpp:404]     Test net output #1: loss = 0.83033 (* 1 = 0.83033 loss)\nI0822 12:46:48.655524 32502 solver.cpp:228] Iteration 28400, loss = 0.17319\nI0822 12:46:48.655587 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 12:46:48.655606 32502 solver.cpp:244]     Train net output #1: loss = 0.173187 (* 1 = 0.173187 loss)\nI0822 12:46:48.787907 32502 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0822 12:50:29.607182 32502 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0822 12:52:39.093747 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7103\nI0822 12:52:39.094141 32502 solver.cpp:404]     Test net output #1: loss = 1.35174 (* 1 = 1.35174 loss)\nI0822 12:52:41.187309 32502 solver.cpp:228] Iteration 28500, loss = 0.0315817\nI0822 12:52:41.187374 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 12:52:41.187392 32502 solver.cpp:244]     Train net output #1: loss = 0.031579 (* 1 = 0.031579 loss)\nI0822 12:52:41.317116 32502 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0822 12:56:21.965440 32502 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0822 12:58:31.004541 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6268\nI0822 12:58:31.004918 32502 solver.cpp:404]     Test net output #1: loss = 1.62615 (* 1 = 1.62615 loss)\nI0822 12:58:33.098139 32502 solver.cpp:228] Iteration 28600, loss = 0.0392632\nI0822 12:58:33.098204 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 12:58:33.098222 32502 solver.cpp:244]     Train net output #1: loss = 0.0392605 (* 1 = 0.0392605 loss)\nI0822 12:58:33.235411 32502 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0822 13:02:14.023139 32502 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0822 13:04:23.022039 32502 solver.cpp:404]     Test net output #0: accuracy = 0.765\nI0822 13:04:23.022374 32502 solver.cpp:404]     Test net output #1: loss = 1.07064 (* 1 = 1.07064 loss)\nI0822 13:04:25.114799 32502 solver.cpp:228] Iteration 28700, loss = 0.0534873\nI0822 13:04:25.114861 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 13:04:25.114879 32502 solver.cpp:244]     Train net output #1: loss = 0.0534847 (* 1 = 0.0534847 loss)\nI0822 13:04:25.253562 32502 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0822 13:08:06.114735 32502 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0822 13:10:14.936959 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7974\nI0822 13:10:14.937290 32502 solver.cpp:404]     Test net output #1: loss = 0.911517 (* 1 = 0.911517 loss)\nI0822 13:10:17.030127 32502 solver.cpp:228] Iteration 28800, loss = 0.0253435\nI0822 13:10:17.030191 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 13:10:17.030210 32502 solver.cpp:244]     Train net output #1: loss = 0.0253409 (* 1 = 0.0253409 loss)\nI0822 13:10:17.168314 32502 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0822 13:13:57.831496 32502 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0822 13:16:05.886265 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7659\nI0822 13:16:05.886610 32502 solver.cpp:404]     Test net output #1: loss = 0.934482 (* 1 = 0.934482 loss)\nI0822 13:16:07.974822 32502 solver.cpp:228] Iteration 28900, loss = 0.0300002\nI0822 13:16:07.974870 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:16:07.974885 32502 solver.cpp:244]     Train net output #1: loss = 0.0299976 (* 1 = 0.0299976 loss)\nI0822 13:16:08.112043 32502 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0822 13:19:48.534788 32502 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0822 13:21:56.516378 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6889\nI0822 13:21:56.516739 32502 solver.cpp:404]     Test net output #1: loss = 1.47563 (* 1 = 1.47563 loss)\nI0822 13:21:58.603543 32502 solver.cpp:228] Iteration 29000, loss = 0.0345476\nI0822 13:21:58.603591 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 13:21:58.603608 32502 solver.cpp:244]     Train net output #1: loss = 0.034545 (* 1 = 0.034545 loss)\nI0822 13:21:58.747189 32502 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0822 13:25:39.013914 32502 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0822 13:27:46.987699 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8069\nI0822 13:27:46.988064 32502 solver.cpp:404]     Test net output #1: loss = 0.771645 (* 1 = 0.771645 loss)\nI0822 13:27:49.075271 32502 solver.cpp:228] Iteration 29100, loss = 0.109863\nI0822 13:27:49.075320 32502 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0822 13:27:49.075335 32502 solver.cpp:244]     Train net output #1: loss = 0.10986 (* 1 = 0.10986 loss)\nI0822 13:27:49.209673 32502 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0822 13:31:29.473877 32502 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0822 13:33:37.439261 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7973\nI0822 13:33:37.439620 32502 solver.cpp:404]     Test net output #1: loss = 0.817782 (* 1 = 0.817782 loss)\nI0822 13:33:39.527004 32502 solver.cpp:228] Iteration 29200, loss = 0.0208847\nI0822 13:33:39.527051 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:33:39.527066 32502 solver.cpp:244]     Train net output #1: loss = 0.0208821 (* 1 = 0.0208821 loss)\nI0822 13:33:39.670692 32502 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0822 13:37:20.261209 32502 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0822 13:39:28.230366 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7907\nI0822 13:39:28.230715 32502 solver.cpp:404]     Test net output #1: loss = 0.735708 (* 1 = 0.735708 loss)\nI0822 13:39:30.317934 32502 solver.cpp:228] Iteration 29300, loss = 0.0299509\nI0822 13:39:30.317980 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 13:39:30.317996 32502 solver.cpp:244]     Train net output #1: loss = 0.0299483 (* 1 = 0.0299483 loss)\nI0822 13:39:30.455634 32502 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0822 13:43:10.929131 32502 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0822 13:45:18.926738 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7486\nI0822 13:45:18.927114 32502 solver.cpp:404]     Test net output #1: loss = 1.13919 (* 1 = 1.13919 loss)\nI0822 13:45:21.014482 32502 solver.cpp:228] Iteration 29400, loss = 0.056408\nI0822 13:45:21.014533 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 13:45:21.014549 32502 solver.cpp:244]     Train net output #1: loss = 0.0564054 (* 1 = 0.0564054 loss)\nI0822 13:45:21.151394 32502 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0822 13:49:01.746707 32502 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0822 13:51:09.749835 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6368\nI0822 13:51:09.750185 32502 solver.cpp:404]     Test net output #1: loss = 1.71218 (* 1 = 1.71218 loss)\nI0822 13:51:11.837740 32502 solver.cpp:228] Iteration 29500, loss = 0.11693\nI0822 13:51:11.837788 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 13:51:11.837805 32502 solver.cpp:244]     Train net output #1: loss = 0.116927 (* 1 = 0.116927 loss)\nI0822 13:51:11.973079 32502 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0822 13:54:52.220546 32502 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0822 13:57:00.226867 32502 solver.cpp:404]     Test net output #0: accuracy = 0.729\nI0822 13:57:00.227229 32502 solver.cpp:404]     Test net output #1: loss = 1.13722 (* 1 = 1.13722 loss)\nI0822 13:57:02.314597 32502 solver.cpp:228] Iteration 29600, loss = 0.0165002\nI0822 13:57:02.314646 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:57:02.314661 32502 solver.cpp:244]     Train net output #1: loss = 0.0164976 (* 1 = 0.0164976 loss)\nI0822 13:57:02.456254 32502 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0822 14:00:42.930794 32502 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0822 14:02:50.930773 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7932\nI0822 14:02:50.931169 32502 solver.cpp:404]     Test net output #1: loss = 0.850793 (* 1 = 0.850793 loss)\nI0822 14:02:53.018515 32502 solver.cpp:228] Iteration 29700, loss = 0.0346952\nI0822 14:02:53.018563 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 14:02:53.018580 32502 solver.cpp:244]     Train net output #1: loss = 0.0346926 (* 1 = 0.0346926 loss)\nI0822 14:02:53.160367 32502 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0822 14:06:33.496006 32502 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0822 14:08:41.487229 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7234\nI0822 14:08:41.487602 32502 solver.cpp:404]     Test net output #1: loss = 1.1953 (* 1 = 1.1953 loss)\nI0822 14:08:43.574805 32502 solver.cpp:228] Iteration 29800, loss = 0.025036\nI0822 14:08:43.574857 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:08:43.574873 32502 solver.cpp:244]     Train net output #1: loss = 0.0250334 (* 1 = 0.0250334 loss)\nI0822 14:08:43.715659 32502 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0822 14:12:24.157732 32502 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0822 14:14:32.134735 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7407\nI0822 14:14:32.135113 32502 solver.cpp:404]     Test net output #1: loss = 1.21587 (* 1 = 1.21587 loss)\nI0822 14:14:34.222993 32502 solver.cpp:228] Iteration 29900, loss = 0.033695\nI0822 14:14:34.223039 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 14:14:34.223057 32502 solver.cpp:244]     Train net output #1: loss = 0.0336925 (* 1 = 0.0336925 loss)\nI0822 14:14:34.358368 32502 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0822 14:18:14.761307 32502 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0822 14:20:22.729607 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7699\nI0822 14:20:22.729961 32502 solver.cpp:404]     Test net output #1: loss = 0.93613 (* 1 = 0.93613 loss)\nI0822 14:20:24.817307 32502 solver.cpp:228] Iteration 30000, loss = 0.0282832\nI0822 14:20:24.817353 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 14:20:24.817368 32502 solver.cpp:244]     Train net output #1: loss = 0.0282806 (* 1 = 0.0282806 loss)\nI0822 14:20:24.951282 32502 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0822 14:24:05.345548 32502 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0822 14:26:13.296026 32502 solver.cpp:404]     Test net output #0: accuracy = 0.779\nI0822 14:26:13.296399 32502 solver.cpp:404]     Test net output #1: loss = 0.967608 (* 1 = 0.967608 loss)\nI0822 14:26:15.383865 32502 solver.cpp:228] Iteration 30100, loss = 0.0296043\nI0822 14:26:15.383911 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 14:26:15.383927 32502 solver.cpp:244]     Train net output #1: loss = 0.0296017 (* 1 = 0.0296017 loss)\nI0822 14:26:15.528255 32502 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0822 14:29:56.132323 32502 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0822 14:32:04.101449 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7861\nI0822 14:32:04.101794 32502 solver.cpp:404]     Test net output #1: loss = 0.906415 (* 1 = 0.906415 loss)\nI0822 14:32:06.189525 32502 solver.cpp:228] Iteration 30200, loss = 0.0245083\nI0822 14:32:06.189574 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:32:06.189589 32502 solver.cpp:244]     Train net output #1: loss = 0.0245058 (* 1 = 0.0245058 loss)\nI0822 14:32:06.326477 32502 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0822 14:35:46.734936 32502 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0822 14:37:54.698563 32502 solver.cpp:404]     Test net output #0: accuracy = 0.758\nI0822 14:37:54.698935 32502 solver.cpp:404]     Test net output #1: loss = 0.93177 (* 1 = 0.93177 loss)\nI0822 14:37:56.785934 32502 solver.cpp:228] Iteration 30300, loss = 0.0495238\nI0822 14:37:56.785984 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 14:37:56.785998 32502 solver.cpp:244]     Train net output #1: loss = 0.0495212 (* 1 = 0.0495212 loss)\nI0822 14:37:56.923590 32502 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0822 14:41:37.256296 32502 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0822 14:43:45.213789 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7801\nI0822 14:43:45.214159 32502 solver.cpp:404]     Test net output #1: loss = 1.04736 (* 1 = 1.04736 loss)\nI0822 14:43:47.301722 32502 solver.cpp:228] Iteration 30400, loss = 0.078279\nI0822 14:43:47.301769 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 14:43:47.301785 32502 solver.cpp:244]     Train net output #1: loss = 0.0782765 (* 1 = 0.0782765 loss)\nI0822 14:43:47.437865 32502 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0822 14:47:27.985363 32502 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0822 14:49:35.934504 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7456\nI0822 14:49:35.934880 32502 solver.cpp:404]     Test net output #1: loss = 0.993755 (* 1 = 0.993755 loss)\nI0822 14:49:38.023190 32502 solver.cpp:228] Iteration 30500, loss = 0.0729367\nI0822 14:49:38.023236 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 14:49:38.023252 32502 solver.cpp:244]     Train net output #1: loss = 0.0729341 (* 1 = 0.0729341 loss)\nI0822 14:49:38.157223 32502 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0822 14:53:18.601186 32502 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0822 14:55:26.544493 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7127\nI0822 14:55:26.544876 32502 solver.cpp:404]     Test net output #1: loss = 1.23531 (* 1 = 1.23531 loss)\nI0822 14:55:28.632550 32502 solver.cpp:228] Iteration 30600, loss = 0.107968\nI0822 14:55:28.632597 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 14:55:28.632613 32502 solver.cpp:244]     Train net output #1: loss = 0.107966 (* 1 = 0.107966 loss)\nI0822 14:55:28.773679 32502 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0822 14:59:09.337275 32502 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0822 15:01:17.280861 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6938\nI0822 15:01:17.281241 32502 solver.cpp:404]     Test net output #1: loss = 1.4041 (* 1 = 1.4041 loss)\nI0822 15:01:19.368450 32502 solver.cpp:228] Iteration 30700, loss = 0.0454635\nI0822 15:01:19.368499 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 15:01:19.368513 32502 solver.cpp:244]     Train net output #1: loss = 0.045461 (* 1 = 0.045461 loss)\nI0822 15:01:19.506849 32502 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0822 15:04:59.877225 32502 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0822 15:07:07.883013 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6905\nI0822 15:07:07.883384 32502 solver.cpp:404]     Test net output #1: loss = 1.48422 (* 1 = 1.48422 loss)\nI0822 15:07:09.970851 32502 solver.cpp:228] Iteration 30800, loss = 0.0152609\nI0822 15:07:09.970899 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:07:09.970916 32502 solver.cpp:244]     Train net output #1: loss = 0.0152583 (* 1 = 0.0152583 loss)\nI0822 15:07:10.105594 32502 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0822 15:10:50.460577 32502 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0822 15:12:58.541262 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7194\nI0822 15:12:58.541657 32502 solver.cpp:404]     Test net output #1: loss = 1.08664 (* 1 = 1.08664 loss)\nI0822 15:13:00.629233 32502 solver.cpp:228] Iteration 30900, loss = 0.0204701\nI0822 15:13:00.629283 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 15:13:00.629299 32502 solver.cpp:244]     Train net output #1: loss = 0.0204676 (* 1 = 0.0204676 loss)\nI0822 15:13:00.768645 32502 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0822 15:16:41.167876 32502 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0822 15:18:49.186249 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7726\nI0822 15:18:49.186628 32502 solver.cpp:404]     Test net output #1: loss = 0.929528 (* 1 = 0.929528 loss)\nI0822 15:18:51.275216 32502 solver.cpp:228] Iteration 31000, loss = 0.100685\nI0822 15:18:51.275264 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 15:18:51.275280 32502 solver.cpp:244]     Train net output #1: loss = 0.100682 (* 1 = 0.100682 loss)\nI0822 15:18:51.414722 32502 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0822 15:22:32.142546 32502 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0822 15:24:40.150370 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7062\nI0822 15:24:40.150717 32502 solver.cpp:404]     Test net output #1: loss = 1.47278 (* 1 = 1.47278 loss)\nI0822 15:24:42.238729 32502 solver.cpp:228] Iteration 31100, loss = 0.0190134\nI0822 15:24:42.238780 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:24:42.238796 32502 solver.cpp:244]     Train net output #1: loss = 0.0190108 (* 1 = 0.0190108 loss)\nI0822 15:24:42.384845 32502 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0822 15:28:22.685667 32502 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0822 15:30:30.698004 32502 solver.cpp:404]     Test net output #0: accuracy = 0.766\nI0822 15:30:30.698386 32502 solver.cpp:404]     Test net output #1: loss = 0.941657 (* 1 = 0.941657 loss)\nI0822 15:30:32.786180 32502 solver.cpp:228] Iteration 31200, loss = 0.0296723\nI0822 15:30:32.786227 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 15:30:32.786243 32502 solver.cpp:244]     Train net output #1: loss = 0.0296697 (* 1 = 0.0296697 loss)\nI0822 15:30:32.922765 32502 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0822 15:34:13.015889 32502 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0822 15:36:21.026051 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8003\nI0822 15:36:21.026437 32502 solver.cpp:404]     Test net output #1: loss = 0.748404 (* 1 = 0.748404 loss)\nI0822 15:36:23.114068 32502 solver.cpp:228] Iteration 31300, loss = 0.0826862\nI0822 15:36:23.114114 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 15:36:23.114131 32502 solver.cpp:244]     Train net output #1: loss = 0.0826837 (* 1 = 0.0826837 loss)\nI0822 15:36:23.257637 32502 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0822 15:40:03.795147 32502 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0822 15:42:11.816928 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7886\nI0822 15:42:11.817313 32502 solver.cpp:404]     Test net output #1: loss = 0.93826 (* 1 = 0.93826 loss)\nI0822 15:42:13.906437 32502 solver.cpp:228] Iteration 31400, loss = 0.0117268\nI0822 15:42:13.906486 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:42:13.906502 32502 solver.cpp:244]     Train net output #1: loss = 0.0117242 (* 1 = 0.0117242 loss)\nI0822 15:42:14.041191 32502 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0822 15:45:54.308708 32502 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0822 15:48:02.332588 32502 solver.cpp:404]     Test net output #0: accuracy = 0.5747\nI0822 15:48:02.332975 32502 solver.cpp:404]     Test net output #1: loss = 2.90002 (* 1 = 2.90002 loss)\nI0822 15:48:04.421021 32502 solver.cpp:228] Iteration 31500, loss = 0.0425711\nI0822 15:48:04.421067 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 15:48:04.421083 32502 solver.cpp:244]     Train net output #1: loss = 0.0425685 (* 1 = 0.0425685 loss)\nI0822 15:48:04.559438 32502 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0822 15:51:45.003588 32502 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0822 15:53:53.057551 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7845\nI0822 15:53:53.057941 32502 solver.cpp:404]     Test net output #1: loss = 0.93781 (* 1 = 0.93781 loss)\nI0822 15:53:55.146100 32502 solver.cpp:228] Iteration 31600, loss = 0.0471334\nI0822 15:53:55.146145 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 15:53:55.146162 32502 solver.cpp:244]     Train net output #1: loss = 0.0471309 (* 1 = 0.0471309 loss)\nI0822 15:53:55.281551 32502 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0822 15:57:35.656293 32502 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0822 15:59:43.757133 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7796\nI0822 15:59:43.757508 32502 solver.cpp:404]     Test net output #1: loss = 0.914016 (* 1 = 0.914016 loss)\nI0822 15:59:45.846143 32502 solver.cpp:228] Iteration 31700, loss = 0.0724567\nI0822 15:59:45.846190 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 15:59:45.846207 32502 solver.cpp:244]     Train net output #1: loss = 0.0724542 (* 1 = 0.0724542 loss)\nI0822 15:59:45.983055 32502 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0822 16:03:26.310518 32502 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0822 16:05:34.402374 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7306\nI0822 16:05:34.402753 32502 solver.cpp:404]     Test net output #1: loss = 1.32671 (* 1 = 1.32671 loss)\nI0822 16:05:36.492725 32502 solver.cpp:228] Iteration 31800, loss = 0.0126697\nI0822 16:05:36.492776 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:05:36.492801 32502 solver.cpp:244]     Train net output #1: loss = 0.0126672 (* 1 = 0.0126672 loss)\nI0822 16:05:36.632323 32502 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0822 16:09:17.100661 32502 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0822 16:11:25.248400 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7209\nI0822 16:11:25.248781 32502 solver.cpp:404]     Test net output #1: loss = 1.30426 (* 1 = 1.30426 loss)\nI0822 16:11:27.332173 32502 solver.cpp:228] Iteration 31900, loss = 0.019207\nI0822 16:11:27.332224 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:11:27.332249 32502 solver.cpp:244]     Train net output #1: loss = 0.0192044 (* 1 = 0.0192044 loss)\nI0822 16:11:27.477414 32502 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0822 16:15:07.778692 32502 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0822 16:17:15.964262 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6981\nI0822 16:17:15.964668 32502 solver.cpp:404]     Test net output #1: loss = 1.31193 (* 1 = 1.31193 loss)\nI0822 16:17:18.048292 32502 solver.cpp:228] Iteration 32000, loss = 0.033676\nI0822 16:17:18.048344 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 16:17:18.048368 32502 solver.cpp:244]     Train net output #1: loss = 0.0336734 (* 1 = 0.0336734 loss)\nI0822 16:17:18.191756 32502 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0822 16:20:58.467316 32502 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0822 16:23:06.581117 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7\nI0822 16:23:06.581497 32502 solver.cpp:404]     Test net output #1: loss = 1.32199 (* 1 = 1.32199 loss)\nI0822 16:23:08.664903 32502 solver.cpp:228] Iteration 32100, loss = 0.0691302\nI0822 16:23:08.664954 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 16:23:08.664979 32502 solver.cpp:244]     Train net output #1: loss = 0.0691277 (* 1 = 0.0691277 loss)\nI0822 16:23:08.813926 32502 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0822 16:26:49.249709 32502 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0822 16:28:57.428329 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7924\nI0822 16:28:57.428706 32502 solver.cpp:404]     Test net output #1: loss = 0.895467 (* 1 = 0.895467 loss)\nI0822 16:28:59.512753 32502 solver.cpp:228] Iteration 32200, loss = 0.0281053\nI0822 16:28:59.512804 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 16:28:59.512828 32502 solver.cpp:244]     Train net output #1: loss = 0.0281028 (* 1 = 0.0281028 loss)\nI0822 16:28:59.660306 32502 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0822 16:32:40.093994 32502 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0822 16:34:48.085261 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7373\nI0822 16:34:48.085649 32502 solver.cpp:404]     Test net output #1: loss = 1.21617 (* 1 = 1.21617 loss)\nI0822 16:34:50.168612 32502 solver.cpp:228] Iteration 32300, loss = 0.124209\nI0822 16:34:50.168664 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 16:34:50.168689 32502 solver.cpp:244]     Train net output #1: loss = 0.124206 (* 1 = 0.124206 loss)\nI0822 16:34:50.314424 32502 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0822 16:38:30.518934 32502 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0822 16:40:38.511121 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7728\nI0822 16:40:38.511505 32502 solver.cpp:404]     Test net output #1: loss = 0.957314 (* 1 = 0.957314 loss)\nI0822 16:40:40.595384 32502 solver.cpp:228] Iteration 32400, loss = 0.0558037\nI0822 16:40:40.595438 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 16:40:40.595461 32502 solver.cpp:244]     Train net output #1: loss = 0.0558013 (* 1 = 0.0558013 loss)\nI0822 16:40:40.741253 32502 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0822 16:44:21.031481 32502 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0822 16:46:29.021654 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8044\nI0822 16:46:29.022011 32502 solver.cpp:404]     Test net output #1: loss = 0.809991 (* 1 = 0.809991 loss)\nI0822 16:46:31.104928 32502 solver.cpp:228] Iteration 32500, loss = 0.0650373\nI0822 16:46:31.104980 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 16:46:31.105005 32502 solver.cpp:244]     Train net output #1: loss = 0.0650348 (* 1 = 0.0650348 loss)\nI0822 16:46:31.251214 32502 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0822 16:50:11.631348 32502 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0822 16:52:19.661425 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8198\nI0822 16:52:19.661814 32502 solver.cpp:404]     Test net output #1: loss = 0.758229 (* 1 = 0.758229 loss)\nI0822 16:52:21.745846 32502 solver.cpp:228] Iteration 32600, loss = 0.0455017\nI0822 16:52:21.745898 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 16:52:21.745924 32502 solver.cpp:244]     Train net output #1: loss = 0.0454993 (* 1 = 0.0454993 loss)\nI0822 16:52:21.883023 32502 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0822 16:56:02.236111 32502 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0822 16:58:10.277564 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7538\nI0822 16:58:10.277945 32502 solver.cpp:404]     Test net output #1: loss = 1.01158 (* 1 = 1.01158 loss)\nI0822 16:58:12.361724 32502 solver.cpp:228] Iteration 32700, loss = 0.0145837\nI0822 16:58:12.361774 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:58:12.361799 32502 solver.cpp:244]     Train net output #1: loss = 0.0145812 (* 1 = 0.0145812 loss)\nI0822 16:58:12.506764 32502 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0822 17:01:53.017946 32502 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0822 17:04:01.059170 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6646\nI0822 17:04:01.059553 32502 solver.cpp:404]     Test net output #1: loss = 1.63235 (* 1 = 1.63235 loss)\nI0822 17:04:03.147678 32502 solver.cpp:228] Iteration 32800, loss = 0.0246358\nI0822 17:04:03.147728 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 17:04:03.147753 32502 solver.cpp:244]     Train net output #1: loss = 0.0246333 (* 1 = 0.0246333 loss)\nI0822 17:04:03.279480 32502 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0822 17:07:43.478554 32502 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0822 17:09:51.520025 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7362\nI0822 17:09:51.520414 32502 solver.cpp:404]     Test net output #1: loss = 1.04785 (* 1 = 1.04785 loss)\nI0822 17:09:53.610013 32502 solver.cpp:228] Iteration 32900, loss = 0.0371764\nI0822 17:09:53.610069 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 17:09:53.610093 32502 solver.cpp:244]     Train net output #1: loss = 0.0371739 (* 1 = 0.0371739 loss)\nI0822 17:09:53.749218 32502 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0822 17:13:34.113310 32502 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0822 17:15:42.153087 32502 solver.cpp:404]     Test net output #0: accuracy = 0.725\nI0822 17:15:42.153463 32502 solver.cpp:404]     Test net output #1: loss = 1.28821 (* 1 = 1.28821 loss)\nI0822 17:15:44.241767 32502 solver.cpp:228] Iteration 33000, loss = 0.0378602\nI0822 17:15:44.241816 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 17:15:44.241842 32502 solver.cpp:244]     Train net output #1: loss = 0.0378577 (* 1 = 0.0378577 loss)\nI0822 17:15:44.384902 32502 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0822 17:19:24.620543 32502 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0822 17:21:32.616206 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7348\nI0822 17:21:32.616590 32502 solver.cpp:404]     Test net output #1: loss = 1.20371 (* 1 = 1.20371 loss)\nI0822 17:21:34.704108 32502 solver.cpp:228] Iteration 33100, loss = 0.0483974\nI0822 17:21:34.704159 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 17:21:34.704183 32502 solver.cpp:244]     Train net output #1: loss = 0.0483949 (* 1 = 0.0483949 loss)\nI0822 17:21:34.841570 32502 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0822 17:25:15.117553 32502 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0822 17:27:23.113160 32502 solver.cpp:404]     Test net output #0: accuracy = 0.74\nI0822 17:27:23.113538 32502 solver.cpp:404]     Test net output #1: loss = 1.14145 (* 1 = 1.14145 loss)\nI0822 17:27:25.202059 32502 solver.cpp:228] Iteration 33200, loss = 0.0133371\nI0822 17:27:25.202113 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:27:25.202142 32502 solver.cpp:244]     Train net output #1: loss = 0.0133346 (* 1 = 0.0133346 loss)\nI0822 17:27:25.343176 32502 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0822 17:31:05.738181 32502 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0822 17:33:13.835290 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7225\nI0822 17:33:13.835690 32502 solver.cpp:404]     Test net output #1: loss = 1.28618 (* 1 = 1.28618 loss)\nI0822 17:33:15.924727 32502 solver.cpp:228] Iteration 33300, loss = 0.0286345\nI0822 17:33:15.924778 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:33:15.924803 32502 solver.cpp:244]     Train net output #1: loss = 0.028632 (* 1 = 0.028632 loss)\nI0822 17:33:16.062347 32502 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0822 17:36:56.498215 32502 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0822 17:39:04.494308 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7401\nI0822 17:39:04.494691 32502 solver.cpp:404]     Test net output #1: loss = 1.32779 (* 1 = 1.32779 loss)\nI0822 17:39:06.583083 32502 solver.cpp:228] Iteration 33400, loss = 0.0568109\nI0822 17:39:06.583135 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 17:39:06.583160 32502 solver.cpp:244]     Train net output #1: loss = 0.0568085 (* 1 = 0.0568085 loss)\nI0822 17:39:06.723711 32502 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0822 17:42:47.077879 32502 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0822 17:44:55.086020 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7434\nI0822 17:44:55.086400 32502 solver.cpp:404]     Test net output #1: loss = 1.21574 (* 1 = 1.21574 loss)\nI0822 17:44:57.174993 32502 solver.cpp:228] Iteration 33500, loss = 0.0502774\nI0822 17:44:57.175048 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 17:44:57.175074 32502 solver.cpp:244]     Train net output #1: loss = 0.0502749 (* 1 = 0.0502749 loss)\nI0822 17:44:57.311848 32502 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0822 17:48:37.858700 32502 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0822 17:50:45.857640 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7311\nI0822 17:50:45.858029 32502 solver.cpp:404]     Test net output #1: loss = 1.24496 (* 1 = 1.24496 loss)\nI0822 17:50:47.945164 32502 solver.cpp:228] Iteration 33600, loss = 0.077958\nI0822 17:50:47.945215 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 17:50:47.945240 32502 solver.cpp:244]     Train net output #1: loss = 0.0779555 (* 1 = 0.0779555 loss)\nI0822 17:50:48.080375 32502 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0822 17:54:28.557751 32502 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0822 17:56:36.548550 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7637\nI0822 17:56:36.548933 32502 solver.cpp:404]     Test net output #1: loss = 1.02565 (* 1 = 1.02565 loss)\nI0822 17:56:38.638093 32502 solver.cpp:228] Iteration 33700, loss = 0.0113102\nI0822 17:56:38.638145 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:56:38.638170 32502 solver.cpp:244]     Train net output #1: loss = 0.0113077 (* 1 = 0.0113077 loss)\nI0822 17:56:38.769800 32502 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0822 18:00:19.098770 32502 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0822 18:02:27.111677 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6986\nI0822 18:02:27.112064 32502 solver.cpp:404]     Test net output #1: loss = 1.45974 (* 1 = 1.45974 loss)\nI0822 18:02:29.201130 32502 solver.cpp:228] Iteration 33800, loss = 0.0341289\nI0822 18:02:29.201181 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 18:02:29.201205 32502 solver.cpp:244]     Train net output #1: loss = 0.0341265 (* 1 = 0.0341265 loss)\nI0822 18:02:29.343101 32502 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0822 18:06:09.701758 32502 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0822 18:08:17.812268 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7707\nI0822 18:08:17.812654 32502 solver.cpp:404]     Test net output #1: loss = 1.00428 (* 1 = 1.00428 loss)\nI0822 18:08:19.901206 32502 solver.cpp:228] Iteration 33900, loss = 0.101766\nI0822 18:08:19.901257 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 18:08:19.901281 32502 solver.cpp:244]     Train net output #1: loss = 0.101764 (* 1 = 0.101764 loss)\nI0822 18:08:20.038102 32502 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0822 18:12:00.308320 32502 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0822 18:14:08.443436 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7833\nI0822 18:14:08.443835 32502 solver.cpp:404]     Test net output #1: loss = 0.912164 (* 1 = 0.912164 loss)\nI0822 18:14:10.532585 32502 solver.cpp:228] Iteration 34000, loss = 0.0821121\nI0822 18:14:10.532634 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 18:14:10.532650 32502 solver.cpp:244]     Train net output #1: loss = 0.0821097 (* 1 = 0.0821097 loss)\nI0822 18:14:10.669111 32502 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0822 18:17:50.938017 32502 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0822 18:19:59.926316 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7738\nI0822 18:19:59.926725 32502 solver.cpp:404]     Test net output #1: loss = 1.13817 (* 1 = 1.13817 loss)\nI0822 18:20:02.020087 32502 solver.cpp:228] Iteration 34100, loss = 0.0344088\nI0822 18:20:02.020143 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 18:20:02.020161 32502 solver.cpp:244]     Train net output #1: loss = 0.0344064 (* 1 = 0.0344064 loss)\nI0822 18:20:02.155932 32502 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0822 18:23:42.531029 32502 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0822 18:25:50.650228 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7756\nI0822 18:25:50.650579 32502 solver.cpp:404]     Test net output #1: loss = 0.941861 (* 1 = 0.941861 loss)\nI0822 18:25:52.739001 32502 solver.cpp:228] Iteration 34200, loss = 0.128501\nI0822 18:25:52.739047 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 18:25:52.739063 32502 solver.cpp:244]     Train net output #1: loss = 0.128498 (* 1 = 0.128498 loss)\nI0822 18:25:52.878204 32502 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0822 18:29:33.113389 32502 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0822 18:31:41.127636 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7354\nI0822 18:31:41.128017 32502 solver.cpp:404]     Test net output #1: loss = 1.09718 (* 1 = 1.09718 loss)\nI0822 18:31:43.216130 32502 solver.cpp:228] Iteration 34300, loss = 0.0377317\nI0822 18:31:43.216176 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 18:31:43.216193 32502 solver.cpp:244]     Train net output #1: loss = 0.0377293 (* 1 = 0.0377293 loss)\nI0822 18:31:43.358522 32502 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0822 18:35:23.811357 32502 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0822 18:37:31.825723 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8083\nI0822 18:37:31.826089 32502 solver.cpp:404]     Test net output #1: loss = 0.799926 (* 1 = 0.799926 loss)\nI0822 18:37:33.914077 32502 solver.cpp:228] Iteration 34400, loss = 0.0413662\nI0822 18:37:33.914120 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 18:37:33.914137 32502 solver.cpp:244]     Train net output #1: loss = 0.0413638 (* 1 = 0.0413638 loss)\nI0822 18:37:34.056016 32502 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0822 18:41:14.190690 32502 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0822 18:43:22.216053 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7578\nI0822 18:43:22.216421 32502 solver.cpp:404]     Test net output #1: loss = 1.09992 (* 1 = 1.09992 loss)\nI0822 18:43:24.304595 32502 solver.cpp:228] Iteration 34500, loss = 0.0236364\nI0822 18:43:24.304641 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 18:43:24.304658 32502 solver.cpp:244]     Train net output #1: loss = 0.0236341 (* 1 = 0.0236341 loss)\nI0822 18:43:24.444077 32502 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0822 18:47:04.902938 32502 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0822 18:49:12.892127 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8269\nI0822 18:49:12.892488 32502 solver.cpp:404]     Test net output #1: loss = 0.670364 (* 1 = 0.670364 loss)\nI0822 18:49:14.980701 32502 solver.cpp:228] Iteration 34600, loss = 0.120629\nI0822 18:49:14.980748 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 18:49:14.980764 32502 solver.cpp:244]     Train net output #1: loss = 0.120627 (* 1 = 0.120627 loss)\nI0822 18:49:15.120707 32502 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0822 18:52:55.318663 32502 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0822 18:55:03.303525 32502 solver.cpp:404]     Test net output #0: accuracy = 0.792\nI0822 18:55:03.303890 32502 solver.cpp:404]     Test net output #1: loss = 0.802601 (* 1 = 0.802601 loss)\nI0822 18:55:05.392072 32502 solver.cpp:228] Iteration 34700, loss = 0.0573499\nI0822 18:55:05.392119 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 18:55:05.392137 32502 solver.cpp:244]     Train net output #1: loss = 0.0573476 (* 1 = 0.0573476 loss)\nI0822 18:55:05.527854 32502 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0822 18:58:45.898007 32502 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0822 19:00:53.909034 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6998\nI0822 19:00:53.909399 32502 solver.cpp:404]     Test net output #1: loss = 1.486 (* 1 = 1.486 loss)\nI0822 19:00:55.998034 32502 solver.cpp:228] Iteration 34800, loss = 0.0762157\nI0822 19:00:55.998085 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 19:00:55.998109 32502 solver.cpp:244]     Train net output #1: loss = 0.0762133 (* 1 = 0.0762133 loss)\nI0822 19:00:56.139852 32502 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0822 19:04:36.565342 32502 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0822 19:06:44.560050 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7431\nI0822 19:06:44.560400 32502 solver.cpp:404]     Test net output #1: loss = 1.16054 (* 1 = 1.16054 loss)\nI0822 19:06:46.648794 32502 solver.cpp:228] Iteration 34900, loss = 0.0540795\nI0822 19:06:46.648845 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 19:06:46.648869 32502 solver.cpp:244]     Train net output #1: loss = 0.0540771 (* 1 = 0.0540771 loss)\nI0822 19:06:46.782316 32502 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0822 19:10:26.959074 32502 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0822 19:12:34.966703 32502 solver.cpp:404]     Test net output #0: accuracy = 0.748\nI0822 19:12:34.967121 32502 solver.cpp:404]     Test net output #1: loss = 1.14131 (* 1 = 1.14131 loss)\nI0822 19:12:37.050479 32502 solver.cpp:228] Iteration 35000, loss = 0.0437409\nI0822 19:12:37.050530 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 19:12:37.050554 32502 solver.cpp:244]     Train net output #1: loss = 0.0437386 (* 1 = 0.0437386 loss)\nI0822 19:12:37.196213 32502 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0822 19:16:17.530959 32502 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0822 19:18:25.590128 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7511\nI0822 19:18:25.590488 32502 solver.cpp:404]     Test net output #1: loss = 1.16999 (* 1 = 1.16999 loss)\nI0822 19:18:27.673589 32502 solver.cpp:228] Iteration 35100, loss = 0.0546505\nI0822 19:18:27.673638 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 19:18:27.673662 32502 solver.cpp:244]     Train net output #1: loss = 0.0546482 (* 1 = 0.0546482 loss)\nI0822 19:18:27.810642 32502 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0822 19:22:07.955214 32502 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0822 19:24:15.999321 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7232\nI0822 19:24:15.999685 32502 solver.cpp:404]     Test net output #1: loss = 1.31136 (* 1 = 1.31136 loss)\nI0822 19:24:18.083544 32502 solver.cpp:228] Iteration 35200, loss = 0.052033\nI0822 19:24:18.083597 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 19:24:18.083622 32502 solver.cpp:244]     Train net output #1: loss = 0.0520307 (* 1 = 0.0520307 loss)\nI0822 19:24:18.243414 32502 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0822 19:27:59.618918 32502 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0822 19:30:09.303333 32502 solver.cpp:404]     Test net output #0: accuracy = 0.669\nI0822 19:30:09.303736 32502 solver.cpp:404]     Test net output #1: loss = 1.48879 (* 1 = 1.48879 loss)\nI0822 19:30:11.391659 32502 solver.cpp:228] Iteration 35300, loss = 0.0199434\nI0822 19:30:11.391722 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:30:11.391741 32502 solver.cpp:244]     Train net output #1: loss = 0.0199411 (* 1 = 0.0199411 loss)\nI0822 19:30:11.531853 32502 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0822 19:33:52.861032 32502 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0822 19:36:02.553297 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7412\nI0822 19:36:02.553699 32502 solver.cpp:404]     Test net output #1: loss = 1.20601 (* 1 = 1.20601 loss)\nI0822 19:36:04.641222 32502 solver.cpp:228] Iteration 35400, loss = 0.0884998\nI0822 19:36:04.641284 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 19:36:04.641304 32502 solver.cpp:244]     Train net output #1: loss = 0.0884975 (* 1 = 0.0884975 loss)\nI0822 19:36:04.791463 32502 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0822 19:39:46.282395 32502 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0822 19:41:55.984488 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6354\nI0822 19:41:55.984885 32502 solver.cpp:404]     Test net output #1: loss = 2.1789 (* 1 = 2.1789 loss)\nI0822 19:41:58.072607 32502 solver.cpp:228] Iteration 35500, loss = 0.0228251\nI0822 19:41:58.072670 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 19:41:58.072688 32502 solver.cpp:244]     Train net output #1: loss = 0.0228228 (* 1 = 0.0228228 loss)\nI0822 19:41:58.216888 32502 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0822 19:45:39.597513 32502 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0822 19:47:49.289857 32502 solver.cpp:404]     Test net output #0: accuracy = 0.701\nI0822 19:47:49.290302 32502 solver.cpp:404]     Test net output #1: loss = 1.48256 (* 1 = 1.48256 loss)\nI0822 19:47:51.377985 32502 solver.cpp:228] Iteration 35600, loss = 0.010368\nI0822 19:47:51.378053 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:47:51.378077 32502 solver.cpp:244]     Train net output #1: loss = 0.0103657 (* 1 = 0.0103657 loss)\nI0822 19:47:51.527688 32502 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0822 19:51:32.585067 32502 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0822 19:53:40.637295 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7358\nI0822 19:53:40.637677 32502 solver.cpp:404]     Test net output #1: loss = 1.29697 (* 1 = 1.29697 loss)\nI0822 19:53:42.727293 32502 solver.cpp:228] Iteration 35700, loss = 0.0139836\nI0822 19:53:42.727344 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:53:42.727368 32502 solver.cpp:244]     Train net output #1: loss = 0.0139813 (* 1 = 0.0139813 loss)\nI0822 19:53:42.871896 32502 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0822 19:57:23.809653 32502 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0822 19:59:31.856492 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7574\nI0822 19:59:31.856871 32502 solver.cpp:404]     Test net output #1: loss = 0.939068 (* 1 = 0.939068 loss)\nI0822 19:59:33.946774 32502 solver.cpp:228] Iteration 35800, loss = 0.0518744\nI0822 19:59:33.946825 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 19:59:33.946849 32502 solver.cpp:244]     Train net output #1: loss = 0.0518721 (* 1 = 0.0518721 loss)\nI0822 19:59:34.092994 32502 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0822 20:03:14.877372 32502 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0822 20:05:22.931303 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7327\nI0822 20:05:22.931673 32502 solver.cpp:404]     Test net output #1: loss = 1.04141 (* 1 = 1.04141 loss)\nI0822 20:05:25.020563 32502 solver.cpp:228] Iteration 35900, loss = 0.0466097\nI0822 20:05:25.020613 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 20:05:25.020638 32502 solver.cpp:244]     Train net output #1: loss = 0.0466074 (* 1 = 0.0466074 loss)\nI0822 20:05:25.167446 32502 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0822 20:09:06.291544 32502 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0822 20:11:14.349898 32502 solver.cpp:404]     Test net output #0: accuracy = 0.765\nI0822 20:11:14.350282 32502 solver.cpp:404]     Test net output #1: loss = 0.948234 (* 1 = 0.948234 loss)\nI0822 20:11:16.440171 32502 solver.cpp:228] Iteration 36000, loss = 0.0395783\nI0822 20:11:16.440222 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:11:16.440246 32502 solver.cpp:244]     Train net output #1: loss = 0.039576 (* 1 = 0.039576 loss)\nI0822 20:11:16.582170 32502 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0822 20:14:57.415751 32502 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0822 20:17:05.465399 32502 solver.cpp:404]     Test net output #0: accuracy = 0.757\nI0822 20:17:05.465777 32502 solver.cpp:404]     Test net output #1: loss = 1.11562 (* 1 = 1.11562 loss)\nI0822 20:17:07.555150 32502 solver.cpp:228] Iteration 36100, loss = 0.0385723\nI0822 20:17:07.555202 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 20:17:07.555227 32502 solver.cpp:244]     Train net output #1: loss = 0.03857 (* 1 = 0.03857 loss)\nI0822 20:17:07.702467 32502 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0822 20:20:48.524237 32502 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0822 20:22:56.582607 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7584\nI0822 20:22:56.582983 32502 solver.cpp:404]     Test net output #1: loss = 1.17004 (* 1 = 1.17004 loss)\nI0822 20:22:58.672078 32502 solver.cpp:228] Iteration 36200, loss = 0.0137313\nI0822 20:22:58.672128 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:22:58.672152 32502 solver.cpp:244]     Train net output #1: loss = 0.013729 (* 1 = 0.013729 loss)\nI0822 20:22:58.813851 32502 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0822 20:26:39.705757 32502 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0822 20:28:47.748212 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7927\nI0822 20:28:47.748541 32502 solver.cpp:404]     Test net output #1: loss = 0.868611 (* 1 = 0.868611 loss)\nI0822 20:28:49.837466 32502 solver.cpp:228] Iteration 36300, loss = 0.0333387\nI0822 20:28:49.837517 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 20:28:49.837541 32502 solver.cpp:244]     Train net output #1: loss = 0.0333364 (* 1 = 0.0333364 loss)\nI0822 20:28:49.980159 32502 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0822 20:32:30.721807 32502 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0822 20:34:38.795778 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8108\nI0822 20:34:38.796155 32502 solver.cpp:404]     Test net output #1: loss = 0.768416 (* 1 = 0.768416 loss)\nI0822 20:34:40.884677 32502 solver.cpp:228] Iteration 36400, loss = 0.0488199\nI0822 20:34:40.884727 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 20:34:40.884752 32502 solver.cpp:244]     Train net output #1: loss = 0.0488176 (* 1 = 0.0488176 loss)\nI0822 20:34:41.028934 32502 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0822 20:38:21.996153 32502 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0822 20:40:30.078553 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7234\nI0822 20:40:30.078910 32502 solver.cpp:404]     Test net output #1: loss = 1.14517 (* 1 = 1.14517 loss)\nI0822 20:40:32.167968 32502 solver.cpp:228] Iteration 36500, loss = 0.0652296\nI0822 20:40:32.168025 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 20:40:32.168050 32502 solver.cpp:244]     Train net output #1: loss = 0.0652272 (* 1 = 0.0652272 loss)\nI0822 20:40:32.313937 32502 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0822 20:44:13.108547 32502 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0822 20:46:21.182682 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7823\nI0822 20:46:21.183070 32502 solver.cpp:404]     Test net output #1: loss = 0.958784 (* 1 = 0.958784 loss)\nI0822 20:46:23.272804 32502 solver.cpp:228] Iteration 36600, loss = 0.0613691\nI0822 20:46:23.272855 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 20:46:23.272881 32502 solver.cpp:244]     Train net output #1: loss = 0.0613667 (* 1 = 0.0613667 loss)\nI0822 20:46:23.412762 32502 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0822 20:50:04.306336 32502 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0822 20:52:12.344944 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7554\nI0822 20:52:12.345324 32502 solver.cpp:404]     Test net output #1: loss = 1.06011 (* 1 = 1.06011 loss)\nI0822 20:52:14.434202 32502 solver.cpp:228] Iteration 36700, loss = 0.0247213\nI0822 20:52:14.434252 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:52:14.434276 32502 solver.cpp:244]     Train net output #1: loss = 0.0247189 (* 1 = 0.0247189 loss)\nI0822 20:52:14.581475 32502 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0822 20:55:55.427258 32502 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0822 20:58:03.455215 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7007\nI0822 20:58:03.455585 32502 solver.cpp:404]     Test net output #1: loss = 1.40294 (* 1 = 1.40294 loss)\nI0822 20:58:05.544463 32502 solver.cpp:228] Iteration 36800, loss = 0.0441384\nI0822 20:58:05.544514 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 20:58:05.544538 32502 solver.cpp:244]     Train net output #1: loss = 0.044136 (* 1 = 0.044136 loss)\nI0822 20:58:05.690716 32502 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0822 21:01:46.868808 32502 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0822 21:03:56.537243 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7865\nI0822 21:03:56.537675 32502 solver.cpp:404]     Test net output #1: loss = 0.916927 (* 1 = 0.916927 loss)\nI0822 21:03:58.629566 32502 solver.cpp:228] Iteration 36900, loss = 0.124101\nI0822 21:03:58.629631 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 21:03:58.629649 32502 solver.cpp:244]     Train net output #1: loss = 0.124098 (* 1 = 0.124098 loss)\nI0822 21:03:58.768122 32502 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0822 21:07:40.191331 32502 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0822 21:09:49.852883 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8016\nI0822 21:09:49.853294 32502 solver.cpp:404]     Test net output #1: loss = 0.863744 (* 1 = 0.863744 loss)\nI0822 21:09:51.945003 32502 solver.cpp:228] Iteration 37000, loss = 0.0500411\nI0822 21:09:51.945067 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 21:09:51.945086 32502 solver.cpp:244]     Train net output #1: loss = 0.0500388 (* 1 = 0.0500388 loss)\nI0822 21:09:52.087589 32502 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0822 21:13:33.434540 32502 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0822 21:15:43.088408 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8137\nI0822 21:15:43.088821 32502 solver.cpp:404]     Test net output #1: loss = 0.688451 (* 1 = 0.688451 loss)\nI0822 21:15:45.180951 32502 solver.cpp:228] Iteration 37100, loss = 0.0379225\nI0822 21:15:45.181015 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 21:15:45.181032 32502 solver.cpp:244]     Train net output #1: loss = 0.0379201 (* 1 = 0.0379201 loss)\nI0822 21:15:45.321086 32502 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0822 21:19:26.625938 32502 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0822 21:21:36.283717 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7553\nI0822 21:21:36.284163 32502 solver.cpp:404]     Test net output #1: loss = 1.06164 (* 1 = 1.06164 loss)\nI0822 21:21:38.377146 32502 solver.cpp:228] Iteration 37200, loss = 0.0128488\nI0822 21:21:38.377210 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:21:38.377228 32502 solver.cpp:244]     Train net output #1: loss = 0.0128464 (* 1 = 0.0128464 loss)\nI0822 21:21:38.517369 32502 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0822 21:25:19.850080 32502 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0822 21:27:29.504241 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6832\nI0822 21:27:29.504657 32502 solver.cpp:404]     Test net output #1: loss = 1.49069 (* 1 = 1.49069 loss)\nI0822 21:27:31.597342 32502 solver.cpp:228] Iteration 37300, loss = 0.10664\nI0822 21:27:31.597406 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 21:27:31.597424 32502 solver.cpp:244]     Train net output #1: loss = 0.106637 (* 1 = 0.106637 loss)\nI0822 21:27:31.735847 32502 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0822 21:31:13.054090 32502 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0822 21:33:22.717308 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8007\nI0822 21:33:22.717712 32502 solver.cpp:404]     Test net output #1: loss = 0.841241 (* 1 = 0.841241 loss)\nI0822 21:33:24.811079 32502 solver.cpp:228] Iteration 37400, loss = 0.105673\nI0822 21:33:24.811141 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0822 21:33:24.811157 32502 solver.cpp:244]     Train net output #1: loss = 0.10567 (* 1 = 0.10567 loss)\nI0822 21:33:24.947201 32502 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0822 21:37:06.274540 32502 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0822 21:39:15.932822 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6871\nI0822 21:39:15.933238 32502 solver.cpp:404]     Test net output #1: loss = 1.47456 (* 1 = 1.47456 loss)\nI0822 21:39:18.025382 32502 solver.cpp:228] Iteration 37500, loss = 0.0550005\nI0822 21:39:18.025449 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 21:39:18.025465 32502 solver.cpp:244]     Train net output #1: loss = 0.0549981 (* 1 = 0.0549981 loss)\nI0822 21:39:18.166141 32502 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0822 21:42:59.498242 32502 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0822 21:45:09.135447 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7126\nI0822 21:45:09.135861 32502 solver.cpp:404]     Test net output #1: loss = 1.26822 (* 1 = 1.26822 loss)\nI0822 21:45:11.229039 32502 solver.cpp:228] Iteration 37600, loss = 0.0550361\nI0822 21:45:11.229104 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 21:45:11.229122 32502 solver.cpp:244]     Train net output #1: loss = 0.0550337 (* 1 = 0.0550337 loss)\nI0822 21:45:11.365167 32502 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0822 21:48:52.611984 32502 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0822 21:51:02.261335 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8119\nI0822 21:51:02.261731 32502 solver.cpp:404]     Test net output #1: loss = 0.77737 (* 1 = 0.77737 loss)\nI0822 21:51:04.354311 32502 solver.cpp:228] Iteration 37700, loss = 0.0333058\nI0822 21:51:04.354375 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 21:51:04.354393 32502 solver.cpp:244]     Train net output #1: loss = 0.0333034 (* 1 = 0.0333034 loss)\nI0822 21:51:04.498877 32502 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0822 21:54:46.004627 32502 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0822 21:56:55.640333 32502 solver.cpp:404]     Test net output #0: accuracy = 0.797\nI0822 21:56:55.640738 32502 solver.cpp:404]     Test net output #1: loss = 0.881808 (* 1 = 0.881808 loss)\nI0822 21:56:57.733144 32502 solver.cpp:228] Iteration 37800, loss = 0.0547351\nI0822 21:56:57.733206 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 21:56:57.733224 32502 solver.cpp:244]     Train net output #1: loss = 0.0547327 (* 1 = 0.0547327 loss)\nI0822 21:56:57.875404 32502 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0822 22:00:39.079684 32502 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0822 22:02:48.715950 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7861\nI0822 22:02:48.716369 32502 solver.cpp:404]     Test net output #1: loss = 1.0436 (* 1 = 1.0436 loss)\nI0822 22:02:50.809770 32502 solver.cpp:228] Iteration 37900, loss = 0.0913149\nI0822 22:02:50.809834 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 22:02:50.809851 32502 solver.cpp:244]     Train net output #1: loss = 0.0913125 (* 1 = 0.0913125 loss)\nI0822 22:02:50.950688 32502 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0822 22:06:32.424909 32502 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0822 22:08:42.072266 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8035\nI0822 22:08:42.072688 32502 solver.cpp:404]     Test net output #1: loss = 0.84745 (* 1 = 0.84745 loss)\nI0822 22:08:44.165701 32502 solver.cpp:228] Iteration 38000, loss = 0.028652\nI0822 22:08:44.165765 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 22:08:44.165783 32502 solver.cpp:244]     Train net output #1: loss = 0.0286496 (* 1 = 0.0286496 loss)\nI0822 22:08:44.305903 32502 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0822 22:12:25.543607 32502 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0822 22:14:35.184658 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8083\nI0822 22:14:35.185065 32502 solver.cpp:404]     Test net output #1: loss = 0.817919 (* 1 = 0.817919 loss)\nI0822 22:14:37.278306 32502 solver.cpp:228] Iteration 38100, loss = 0.054645\nI0822 22:14:37.278369 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 22:14:37.278388 32502 solver.cpp:244]     Train net output #1: loss = 0.0546426 (* 1 = 0.0546426 loss)\nI0822 22:14:37.418432 32502 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0822 22:18:18.744587 32502 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0822 22:20:28.393190 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7341\nI0822 22:20:28.393591 32502 solver.cpp:404]     Test net output #1: loss = 1.24048 (* 1 = 1.24048 loss)\nI0822 22:20:30.486354 32502 solver.cpp:228] Iteration 38200, loss = 0.0940841\nI0822 22:20:30.486419 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 22:20:30.486438 32502 solver.cpp:244]     Train net output #1: loss = 0.0940817 (* 1 = 0.0940817 loss)\nI0822 22:20:30.621505 32502 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0822 22:24:11.933125 32502 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0822 22:26:21.574995 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8161\nI0822 22:26:21.575418 32502 solver.cpp:404]     Test net output #1: loss = 0.678007 (* 1 = 0.678007 loss)\nI0822 22:26:23.668097 32502 solver.cpp:228] Iteration 38300, loss = 0.0233302\nI0822 22:26:23.668161 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 22:26:23.668179 32502 solver.cpp:244]     Train net output #1: loss = 0.0233278 (* 1 = 0.0233278 loss)\nI0822 22:26:23.808349 32502 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0822 22:30:05.209298 32502 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0822 22:32:14.832417 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7764\nI0822 22:32:14.832837 32502 solver.cpp:404]     Test net output #1: loss = 0.973857 (* 1 = 0.973857 loss)\nI0822 22:32:16.925760 32502 solver.cpp:228] Iteration 38400, loss = 0.0205744\nI0822 22:32:16.925823 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 22:32:16.925843 32502 solver.cpp:244]     Train net output #1: loss = 0.0205719 (* 1 = 0.0205719 loss)\nI0822 22:32:17.069170 32502 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0822 22:35:58.576192 32502 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0822 22:38:08.241271 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8058\nI0822 22:38:08.241711 32502 solver.cpp:404]     Test net output #1: loss = 0.73121 (* 1 = 0.73121 loss)\nI0822 22:38:10.333233 32502 solver.cpp:228] Iteration 38500, loss = 0.0278115\nI0822 22:38:10.333297 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 22:38:10.333315 32502 solver.cpp:244]     Train net output #1: loss = 0.027809 (* 1 = 0.027809 loss)\nI0822 22:38:10.477216 32502 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0822 22:41:51.831290 32502 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0822 22:44:01.444872 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7851\nI0822 22:44:01.445314 32502 solver.cpp:404]     Test net output #1: loss = 0.952317 (* 1 = 0.952317 loss)\nI0822 22:44:03.535935 32502 solver.cpp:228] Iteration 38600, loss = 0.132333\nI0822 22:44:03.536000 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 22:44:03.536017 32502 solver.cpp:244]     Train net output #1: loss = 0.132331 (* 1 = 0.132331 loss)\nI0822 22:44:03.673079 32502 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0822 22:47:45.105746 32502 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0822 22:49:54.796841 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6668\nI0822 22:49:54.797252 32502 solver.cpp:404]     Test net output #1: loss = 1.76995 (* 1 = 1.76995 loss)\nI0822 22:49:56.889344 32502 solver.cpp:228] Iteration 38700, loss = 0.0376454\nI0822 22:49:56.889407 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 22:49:56.889426 32502 solver.cpp:244]     Train net output #1: loss = 0.037643 (* 1 = 0.037643 loss)\nI0822 22:49:57.024657 32502 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0822 22:53:38.219154 32502 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0822 22:55:47.919518 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7043\nI0822 22:55:47.919960 32502 solver.cpp:404]     Test net output #1: loss = 1.33204 (* 1 = 1.33204 loss)\nI0822 22:55:50.011348 32502 solver.cpp:228] Iteration 38800, loss = 0.0185453\nI0822 22:55:50.011412 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:55:50.011430 32502 solver.cpp:244]     Train net output #1: loss = 0.0185428 (* 1 = 0.0185428 loss)\nI0822 22:55:50.150713 32502 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0822 22:59:31.561396 32502 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0822 23:01:41.256080 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7827\nI0822 23:01:41.256489 32502 solver.cpp:404]     Test net output #1: loss = 0.907007 (* 1 = 0.907007 loss)\nI0822 23:01:43.348307 32502 solver.cpp:228] Iteration 38900, loss = 0.0186229\nI0822 23:01:43.348371 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:01:43.348390 32502 solver.cpp:244]     Train net output #1: loss = 0.0186204 (* 1 = 0.0186204 loss)\nI0822 23:01:43.485110 32502 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0822 23:05:24.836181 32502 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0822 23:07:34.538383 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8219\nI0822 23:07:34.538789 32502 solver.cpp:404]     Test net output #1: loss = 0.699982 (* 1 = 0.699982 loss)\nI0822 23:07:36.629885 32502 solver.cpp:228] Iteration 39000, loss = 0.0303326\nI0822 23:07:36.629947 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 23:07:36.629966 32502 solver.cpp:244]     Train net output #1: loss = 0.0303302 (* 1 = 0.0303302 loss)\nI0822 23:07:36.774415 32502 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0822 23:11:18.178653 32502 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0822 23:13:27.869577 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7258\nI0822 23:13:27.869984 32502 solver.cpp:404]     Test net output #1: loss = 1.28022 (* 1 = 1.28022 loss)\nI0822 23:13:29.961119 32502 solver.cpp:228] Iteration 39100, loss = 0.09291\nI0822 23:13:29.961184 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0822 23:13:29.961202 32502 solver.cpp:244]     Train net output #1: loss = 0.0929076 (* 1 = 0.0929076 loss)\nI0822 23:13:30.097893 32502 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0822 23:17:11.464146 32502 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0822 23:19:21.168128 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7594\nI0822 23:19:21.168541 32502 solver.cpp:404]     Test net output #1: loss = 1.09297 (* 1 = 1.09297 loss)\nI0822 23:19:23.259927 32502 solver.cpp:228] Iteration 39200, loss = 0.0536661\nI0822 23:19:23.259997 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 23:19:23.260016 32502 solver.cpp:244]     Train net output #1: loss = 0.0536637 (* 1 = 0.0536637 loss)\nI0822 23:19:23.401090 32502 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0822 23:23:04.679222 32502 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0822 23:25:14.337131 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7334\nI0822 23:25:14.337574 32502 solver.cpp:404]     Test net output #1: loss = 1.37867 (* 1 = 1.37867 loss)\nI0822 23:25:16.429183 32502 solver.cpp:228] Iteration 39300, loss = 0.0172779\nI0822 23:25:16.429247 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:25:16.429265 32502 solver.cpp:244]     Train net output #1: loss = 0.0172755 (* 1 = 0.0172755 loss)\nI0822 23:25:16.572553 32502 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0822 23:28:57.817849 32502 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0822 23:31:07.469085 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6555\nI0822 23:31:07.469447 32502 solver.cpp:404]     Test net output #1: loss = 1.67873 (* 1 = 1.67873 loss)\nI0822 23:31:09.560890 32502 solver.cpp:228] Iteration 39400, loss = 0.0380373\nI0822 23:31:09.560956 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0822 23:31:09.560978 32502 solver.cpp:244]     Train net output #1: loss = 0.0380348 (* 1 = 0.0380348 loss)\nI0822 23:31:09.701669 32502 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0822 23:34:50.943575 32502 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0822 23:37:00.606766 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6953\nI0822 23:37:00.607208 32502 solver.cpp:404]     Test net output #1: loss = 1.48108 (* 1 = 1.48108 loss)\nI0822 23:37:02.694667 32502 solver.cpp:228] Iteration 39500, loss = 0.0211432\nI0822 23:37:02.694730 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 23:37:02.694747 32502 solver.cpp:244]     Train net output #1: loss = 0.0211407 (* 1 = 0.0211407 loss)\nI0822 23:37:02.836341 32502 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0822 23:40:43.578321 32502 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0822 23:42:53.237244 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7378\nI0822 23:42:53.237658 32502 solver.cpp:404]     Test net output #1: loss = 1.28485 (* 1 = 1.28485 loss)\nI0822 23:42:55.324887 32502 solver.cpp:228] Iteration 39600, loss = 0.0205105\nI0822 23:42:55.324951 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 23:42:55.324970 32502 solver.cpp:244]     Train net output #1: loss = 0.0205081 (* 1 = 0.0205081 loss)\nI0822 23:42:55.461429 32502 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0822 23:46:36.233259 32502 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0822 23:48:45.906337 32502 solver.cpp:404]     Test net output #0: accuracy = 0.809\nI0822 23:48:45.906755 32502 solver.cpp:404]     Test net output #1: loss = 0.743403 (* 1 = 0.743403 loss)\nI0822 23:48:47.994812 32502 solver.cpp:228] Iteration 39700, loss = 0.0321422\nI0822 23:48:47.994876 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0822 23:48:47.994894 32502 solver.cpp:244]     Train net output #1: loss = 0.0321398 (* 1 = 0.0321398 loss)\nI0822 23:48:48.132550 32502 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0822 23:52:28.986507 32502 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0822 23:54:38.644325 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7903\nI0822 23:54:38.644745 32502 solver.cpp:404]     Test net output #1: loss = 0.789688 (* 1 = 0.789688 loss)\nI0822 23:54:40.732148 32502 solver.cpp:228] Iteration 39800, loss = 0.0154092\nI0822 23:54:40.732213 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:54:40.732230 32502 solver.cpp:244]     Train net output #1: loss = 0.0154068 (* 1 = 0.0154068 loss)\nI0822 23:54:40.870033 32502 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0822 23:58:21.652639 32502 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0823 00:00:31.302739 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7754\nI0823 00:00:31.303158 32502 solver.cpp:404]     Test net output #1: loss = 1.01677 (* 1 = 1.01677 loss)\nI0823 00:00:33.391413 32502 solver.cpp:228] Iteration 39900, loss = 0.0277944\nI0823 00:00:33.391479 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 00:00:33.391499 32502 solver.cpp:244]     Train net output #1: loss = 0.027792 (* 1 = 0.027792 loss)\nI0823 00:00:33.531210 32502 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0823 00:04:14.068522 32502 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0823 00:06:23.723307 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7404\nI0823 00:06:23.723716 32502 solver.cpp:404]     Test net output #1: loss = 1.22305 (* 1 = 1.22305 loss)\nI0823 00:06:25.811923 32502 solver.cpp:228] Iteration 40000, loss = 0.047055\nI0823 00:06:25.811990 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 00:06:25.812011 32502 solver.cpp:244]     Train net output #1: loss = 0.0470526 (* 1 = 0.0470526 loss)\nI0823 00:06:25.948287 32502 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0823 00:10:06.606055 32502 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0823 00:12:16.242269 32502 solver.cpp:404]     Test net output #0: accuracy = 0.725\nI0823 00:12:16.242679 32502 solver.cpp:404]     Test net output #1: loss = 1.2122 (* 1 = 1.2122 loss)\nI0823 00:12:18.330080 32502 solver.cpp:228] Iteration 40100, loss = 0.0302581\nI0823 00:12:18.330142 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 00:12:18.330162 32502 solver.cpp:244]     Train net output #1: loss = 0.0302557 (* 1 = 0.0302557 loss)\nI0823 00:12:18.468340 32502 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0823 00:15:59.090155 32502 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0823 00:18:08.734002 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7948\nI0823 00:18:08.734447 32502 solver.cpp:404]     Test net output #1: loss = 0.899846 (* 1 = 0.899846 loss)\nI0823 00:18:10.821857 32502 solver.cpp:228] Iteration 40200, loss = 0.0703279\nI0823 00:18:10.821920 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 00:18:10.821938 32502 solver.cpp:244]     Train net output #1: loss = 0.0703254 (* 1 = 0.0703254 loss)\nI0823 00:18:10.962288 32502 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0823 00:21:51.750998 32502 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0823 00:24:01.370868 32502 solver.cpp:404]     Test net output #0: accuracy = 0.752\nI0823 00:24:01.371300 32502 solver.cpp:404]     Test net output #1: loss = 1.0647 (* 1 = 1.0647 loss)\nI0823 00:24:03.458971 32502 solver.cpp:228] Iteration 40300, loss = 0.0446152\nI0823 00:24:03.459038 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 00:24:03.459055 32502 solver.cpp:244]     Train net output #1: loss = 0.0446127 (* 1 = 0.0446127 loss)\nI0823 00:24:03.609027 32502 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0823 00:27:44.385777 32502 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0823 00:29:54.035735 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7663\nI0823 00:29:54.036159 32502 solver.cpp:404]     Test net output #1: loss = 0.99478 (* 1 = 0.99478 loss)\nI0823 00:29:56.124193 32502 solver.cpp:228] Iteration 40400, loss = 0.0639273\nI0823 00:29:56.124255 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 00:29:56.124274 32502 solver.cpp:244]     Train net output #1: loss = 0.0639248 (* 1 = 0.0639248 loss)\nI0823 00:29:56.259480 32502 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0823 00:33:37.175380 32502 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0823 00:35:46.831792 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7055\nI0823 00:35:46.832237 32502 solver.cpp:404]     Test net output #1: loss = 1.27098 (* 1 = 1.27098 loss)\nI0823 00:35:48.920074 32502 solver.cpp:228] Iteration 40500, loss = 0.051869\nI0823 00:35:48.920140 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 00:35:48.920158 32502 solver.cpp:244]     Train net output #1: loss = 0.0518665 (* 1 = 0.0518665 loss)\nI0823 00:35:49.058099 32502 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0823 00:39:29.720535 32502 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0823 00:41:39.375577 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7882\nI0823 00:41:39.376011 32502 solver.cpp:404]     Test net output #1: loss = 0.839343 (* 1 = 0.839343 loss)\nI0823 00:41:41.463696 32502 solver.cpp:228] Iteration 40600, loss = 0.0849384\nI0823 00:41:41.463760 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0823 00:41:41.463779 32502 solver.cpp:244]     Train net output #1: loss = 0.0849359 (* 1 = 0.0849359 loss)\nI0823 00:41:41.599856 32502 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0823 00:45:21.033159 32502 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0823 00:47:29.402230 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6278\nI0823 00:47:29.402604 32502 solver.cpp:404]     Test net output #1: loss = 2.06155 (* 1 = 2.06155 loss)\nI0823 00:47:31.491529 32502 solver.cpp:228] Iteration 40700, loss = 0.0394905\nI0823 00:47:31.491603 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 00:47:31.491622 32502 solver.cpp:244]     Train net output #1: loss = 0.039488 (* 1 = 0.039488 loss)\nI0823 00:47:31.614980 32502 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0823 00:51:11.430680 32502 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0823 00:53:20.066319 32502 solver.cpp:404]     Test net output #0: accuracy = 0.754\nI0823 00:53:20.066738 32502 solver.cpp:404]     Test net output #1: loss = 1.12349 (* 1 = 1.12349 loss)\nI0823 00:53:22.156108 32502 solver.cpp:228] Iteration 40800, loss = 0.0427523\nI0823 00:53:22.156173 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 00:53:22.156191 32502 solver.cpp:244]     Train net output #1: loss = 0.0427499 (* 1 = 0.0427499 loss)\nI0823 00:53:22.289155 32502 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0823 00:57:02.726567 32502 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0823 00:59:12.416036 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8044\nI0823 00:59:12.416473 32502 solver.cpp:404]     Test net output #1: loss = 0.731808 (* 1 = 0.731808 loss)\nI0823 00:59:14.504380 32502 solver.cpp:228] Iteration 40900, loss = 0.0593672\nI0823 00:59:14.504442 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0823 00:59:14.504462 32502 solver.cpp:244]     Train net output #1: loss = 0.0593647 (* 1 = 0.0593647 loss)\nI0823 00:59:14.640494 32502 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0823 01:02:55.228889 32502 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0823 01:05:04.987339 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7863\nI0823 01:05:04.987742 32502 solver.cpp:404]     Test net output #1: loss = 0.86184 (* 1 = 0.86184 loss)\nI0823 01:05:07.075067 32502 solver.cpp:228] Iteration 41000, loss = 0.0103452\nI0823 01:05:07.075130 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:05:07.075150 32502 solver.cpp:244]     Train net output #1: loss = 0.0103428 (* 1 = 0.0103428 loss)\nI0823 01:05:07.208837 32502 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0823 01:08:47.088711 32502 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0823 01:10:56.838313 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8047\nI0823 01:10:56.838744 32502 solver.cpp:404]     Test net output #1: loss = 0.780666 (* 1 = 0.780666 loss)\nI0823 01:10:58.927098 32502 solver.cpp:228] Iteration 41100, loss = 0.0473084\nI0823 01:10:58.927162 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 01:10:58.927181 32502 solver.cpp:244]     Train net output #1: loss = 0.047306 (* 1 = 0.047306 loss)\nI0823 01:10:59.053819 32502 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0823 01:14:38.670925 32502 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0823 01:16:48.424715 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7477\nI0823 01:16:48.425163 32502 solver.cpp:404]     Test net output #1: loss = 1.08375 (* 1 = 1.08375 loss)\nI0823 01:16:50.513187 32502 solver.cpp:228] Iteration 41200, loss = 0.0898263\nI0823 01:16:50.513252 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0823 01:16:50.513270 32502 solver.cpp:244]     Train net output #1: loss = 0.0898238 (* 1 = 0.0898238 loss)\nI0823 01:16:50.638325 32502 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0823 01:20:30.198135 32502 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0823 01:22:39.936436 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7243\nI0823 01:22:39.936833 32502 solver.cpp:404]     Test net output #1: loss = 1.21124 (* 1 = 1.21124 loss)\nI0823 01:22:42.025164 32502 solver.cpp:228] Iteration 41300, loss = 0.0376559\nI0823 01:22:42.025229 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 01:22:42.025249 32502 solver.cpp:244]     Train net output #1: loss = 0.0376534 (* 1 = 0.0376534 loss)\nI0823 01:22:42.149899 32502 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0823 01:26:21.625205 32502 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0823 01:28:31.382860 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7827\nI0823 01:28:31.383275 32502 solver.cpp:404]     Test net output #1: loss = 0.94696 (* 1 = 0.94696 loss)\nI0823 01:28:33.471479 32502 solver.cpp:228] Iteration 41400, loss = 0.00580436\nI0823 01:28:33.471541 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:28:33.471560 32502 solver.cpp:244]     Train net output #1: loss = 0.0058019 (* 1 = 0.0058019 loss)\nI0823 01:28:33.605581 32502 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0823 01:32:13.034243 32502 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0823 01:34:22.785948 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7606\nI0823 01:34:22.786360 32502 solver.cpp:404]     Test net output #1: loss = 1.0386 (* 1 = 1.0386 loss)\nI0823 01:34:24.874020 32502 solver.cpp:228] Iteration 41500, loss = 0.0474313\nI0823 01:34:24.874083 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 01:34:24.874101 32502 solver.cpp:244]     Train net output #1: loss = 0.0474288 (* 1 = 0.0474288 loss)\nI0823 01:34:25.001504 32502 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0823 01:38:04.383582 32502 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0823 01:40:14.083732 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7347\nI0823 01:40:14.084153 32502 solver.cpp:404]     Test net output #1: loss = 1.367 (* 1 = 1.367 loss)\nI0823 01:40:16.171799 32502 solver.cpp:228] Iteration 41600, loss = 0.0655293\nI0823 01:40:16.171862 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 01:40:16.171880 32502 solver.cpp:244]     Train net output #1: loss = 0.0655269 (* 1 = 0.0655269 loss)\nI0823 01:40:16.295212 32502 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0823 01:43:55.713937 32502 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0823 01:46:05.410471 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7904\nI0823 01:46:05.410909 32502 solver.cpp:404]     Test net output #1: loss = 0.971375 (* 1 = 0.971375 loss)\nI0823 01:46:07.498708 32502 solver.cpp:228] Iteration 41700, loss = 0.0109087\nI0823 01:46:07.498771 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:46:07.498790 32502 solver.cpp:244]     Train net output #1: loss = 0.0109063 (* 1 = 0.0109063 loss)\nI0823 01:46:07.621790 32502 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0823 01:49:47.097131 32502 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0823 01:51:56.796255 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7745\nI0823 01:51:56.796654 32502 solver.cpp:404]     Test net output #1: loss = 1.17356 (* 1 = 1.17356 loss)\nI0823 01:51:58.884330 32502 solver.cpp:228] Iteration 41800, loss = 0.00721833\nI0823 01:51:58.884393 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:51:58.884410 32502 solver.cpp:244]     Train net output #1: loss = 0.00721589 (* 1 = 0.00721589 loss)\nI0823 01:51:59.014509 32502 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0823 01:55:38.382781 32502 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0823 01:57:48.067498 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7781\nI0823 01:57:48.067893 32502 solver.cpp:404]     Test net output #1: loss = 0.940342 (* 1 = 0.940342 loss)\nI0823 01:57:50.155462 32502 solver.cpp:228] Iteration 41900, loss = 0.0455338\nI0823 01:57:50.155525 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 01:57:50.155542 32502 solver.cpp:244]     Train net output #1: loss = 0.0455313 (* 1 = 0.0455313 loss)\nI0823 01:57:50.282493 32502 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0823 02:01:29.661619 32502 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0823 02:03:39.339931 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7945\nI0823 02:03:39.340354 32502 solver.cpp:404]     Test net output #1: loss = 0.860372 (* 1 = 0.860372 loss)\nI0823 02:03:41.428568 32502 solver.cpp:228] Iteration 42000, loss = 0.0170544\nI0823 02:03:41.428633 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 02:03:41.428652 32502 solver.cpp:244]     Train net output #1: loss = 0.0170519 (* 1 = 0.0170519 loss)\nI0823 02:03:41.550175 32502 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0823 02:07:20.893645 32502 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0823 02:09:30.582336 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7486\nI0823 02:09:30.582770 32502 solver.cpp:404]     Test net output #1: loss = 1.0249 (* 1 = 1.0249 loss)\nI0823 02:09:32.670274 32502 solver.cpp:228] Iteration 42100, loss = 0.0246582\nI0823 02:09:32.670338 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:09:32.670356 32502 solver.cpp:244]     Train net output #1: loss = 0.0246557 (* 1 = 0.0246557 loss)\nI0823 02:09:32.800196 32502 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0823 02:13:12.308046 32502 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0823 02:15:21.988409 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7604\nI0823 02:15:21.988847 32502 solver.cpp:404]     Test net output #1: loss = 1.15951 (* 1 = 1.15951 loss)\nI0823 02:15:24.077010 32502 solver.cpp:228] Iteration 42200, loss = 0.0202593\nI0823 02:15:24.077071 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:15:24.077090 32502 solver.cpp:244]     Train net output #1: loss = 0.0202569 (* 1 = 0.0202569 loss)\nI0823 02:15:24.203788 32502 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0823 02:19:03.679708 32502 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0823 02:21:13.360252 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7574\nI0823 02:21:13.360671 32502 solver.cpp:404]     Test net output #1: loss = 1.04991 (* 1 = 1.04991 loss)\nI0823 02:21:15.448477 32502 solver.cpp:228] Iteration 42300, loss = 0.0101434\nI0823 02:21:15.448541 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:21:15.448559 32502 solver.cpp:244]     Train net output #1: loss = 0.010141 (* 1 = 0.010141 loss)\nI0823 02:21:15.574759 32502 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0823 02:24:55.016505 32502 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0823 02:27:04.711231 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8134\nI0823 02:27:04.711640 32502 solver.cpp:404]     Test net output #1: loss = 0.694491 (* 1 = 0.694491 loss)\nI0823 02:27:06.799901 32502 solver.cpp:228] Iteration 42400, loss = 0.056137\nI0823 02:27:06.799967 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 02:27:06.799988 32502 solver.cpp:244]     Train net output #1: loss = 0.0561346 (* 1 = 0.0561346 loss)\nI0823 02:27:06.926251 32502 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0823 02:30:46.293279 32502 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0823 02:32:55.981127 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8127\nI0823 02:32:55.981568 32502 solver.cpp:404]     Test net output #1: loss = 0.792058 (* 1 = 0.792058 loss)\nI0823 02:32:58.069339 32502 solver.cpp:228] Iteration 42500, loss = 0.0521147\nI0823 02:32:58.069404 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 02:32:58.069422 32502 solver.cpp:244]     Train net output #1: loss = 0.0521123 (* 1 = 0.0521123 loss)\nI0823 02:32:58.195384 32502 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0823 02:36:37.702021 32502 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0823 02:38:47.576023 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7888\nI0823 02:38:47.576438 32502 solver.cpp:404]     Test net output #1: loss = 0.909406 (* 1 = 0.909406 loss)\nI0823 02:38:49.664795 32502 solver.cpp:228] Iteration 42600, loss = 0.0110888\nI0823 02:38:49.664865 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:38:49.664896 32502 solver.cpp:244]     Train net output #1: loss = 0.0110864 (* 1 = 0.0110864 loss)\nI0823 02:38:49.796944 32502 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0823 02:42:29.308768 32502 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0823 02:44:39.042547 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7965\nI0823 02:44:39.042994 32502 solver.cpp:404]     Test net output #1: loss = 0.801451 (* 1 = 0.801451 loss)\nI0823 02:44:41.131814 32502 solver.cpp:228] Iteration 42700, loss = 0.0302496\nI0823 02:44:41.131886 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 02:44:41.131913 32502 solver.cpp:244]     Train net output #1: loss = 0.0302472 (* 1 = 0.0302472 loss)\nI0823 02:44:41.255952 32502 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0823 02:48:20.809229 32502 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0823 02:50:30.513649 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7684\nI0823 02:50:30.514065 32502 solver.cpp:404]     Test net output #1: loss = 0.95551 (* 1 = 0.95551 loss)\nI0823 02:50:32.602615 32502 solver.cpp:228] Iteration 42800, loss = 0.0331092\nI0823 02:50:32.602679 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 02:50:32.602696 32502 solver.cpp:244]     Train net output #1: loss = 0.0331068 (* 1 = 0.0331068 loss)\nI0823 02:50:32.729905 32502 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0823 02:54:12.150897 32502 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0823 02:56:21.841863 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7696\nI0823 02:56:21.842288 32502 solver.cpp:404]     Test net output #1: loss = 1.00045 (* 1 = 1.00045 loss)\nI0823 02:56:23.930418 32502 solver.cpp:228] Iteration 42900, loss = 0.0442957\nI0823 02:56:23.930483 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 02:56:23.930502 32502 solver.cpp:244]     Train net output #1: loss = 0.0442933 (* 1 = 0.0442933 loss)\nI0823 02:56:24.062191 32502 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0823 03:00:03.495548 32502 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0823 03:02:13.168865 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6867\nI0823 03:02:13.169308 32502 solver.cpp:404]     Test net output #1: loss = 1.56689 (* 1 = 1.56689 loss)\nI0823 03:02:15.258162 32502 solver.cpp:228] Iteration 43000, loss = 0.0940191\nI0823 03:02:15.258229 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0823 03:02:15.258247 32502 solver.cpp:244]     Train net output #1: loss = 0.0940167 (* 1 = 0.0940167 loss)\nI0823 03:02:15.381068 32502 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0823 03:05:54.844673 32502 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0823 03:08:04.531888 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7834\nI0823 03:08:04.532335 32502 solver.cpp:404]     Test net output #1: loss = 0.908629 (* 1 = 0.908629 loss)\nI0823 03:08:06.621094 32502 solver.cpp:228] Iteration 43100, loss = 0.073498\nI0823 03:08:06.621160 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 03:08:06.621177 32502 solver.cpp:244]     Train net output #1: loss = 0.0734956 (* 1 = 0.0734956 loss)\nI0823 03:08:06.743510 32502 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0823 03:11:46.184906 32502 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0823 03:13:55.863484 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8111\nI0823 03:13:55.863889 32502 solver.cpp:404]     Test net output #1: loss = 0.805014 (* 1 = 0.805014 loss)\nI0823 03:13:57.953599 32502 solver.cpp:228] Iteration 43200, loss = 0.0586575\nI0823 03:13:57.953663 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 03:13:57.953681 32502 solver.cpp:244]     Train net output #1: loss = 0.0586551 (* 1 = 0.0586551 loss)\nI0823 03:13:58.077284 32502 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0823 03:17:37.552273 32502 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0823 03:19:47.259438 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7955\nI0823 03:19:47.259874 32502 solver.cpp:404]     Test net output #1: loss = 0.841757 (* 1 = 0.841757 loss)\nI0823 03:19:49.348389 32502 solver.cpp:228] Iteration 43300, loss = 0.0909931\nI0823 03:19:49.348453 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 03:19:49.348470 32502 solver.cpp:244]     Train net output #1: loss = 0.0909907 (* 1 = 0.0909907 loss)\nI0823 03:19:49.475437 32502 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0823 03:23:28.999743 32502 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0823 03:25:38.741614 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7741\nI0823 03:25:38.742032 32502 solver.cpp:404]     Test net output #1: loss = 1.01351 (* 1 = 1.01351 loss)\nI0823 03:25:40.830772 32502 solver.cpp:228] Iteration 43400, loss = 0.0334235\nI0823 03:25:40.830837 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 03:25:40.830855 32502 solver.cpp:244]     Train net output #1: loss = 0.0334211 (* 1 = 0.0334211 loss)\nI0823 03:25:40.956151 32502 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0823 03:29:20.497114 32502 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0823 03:31:30.241711 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8067\nI0823 03:31:30.242156 32502 solver.cpp:404]     Test net output #1: loss = 0.788833 (* 1 = 0.788833 loss)\nI0823 03:31:32.330951 32502 solver.cpp:228] Iteration 43500, loss = 0.00172089\nI0823 03:31:32.331015 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:31:32.331033 32502 solver.cpp:244]     Train net output #1: loss = 0.00171847 (* 1 = 0.00171847 loss)\nI0823 03:31:32.455020 32502 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0823 03:35:11.917031 32502 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0823 03:37:21.670632 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8241\nI0823 03:37:21.671077 32502 solver.cpp:404]     Test net output #1: loss = 0.742215 (* 1 = 0.742215 loss)\nI0823 03:37:23.760406 32502 solver.cpp:228] Iteration 43600, loss = 0.103989\nI0823 03:37:23.760470 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 03:37:23.760488 32502 solver.cpp:244]     Train net output #1: loss = 0.103987 (* 1 = 0.103987 loss)\nI0823 03:37:23.890033 32502 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0823 03:41:03.308897 32502 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0823 03:43:13.045877 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8144\nI0823 03:43:13.046324 32502 solver.cpp:404]     Test net output #1: loss = 0.693272 (* 1 = 0.693272 loss)\nI0823 03:43:15.135606 32502 solver.cpp:228] Iteration 43700, loss = 0.0828936\nI0823 03:43:15.135669 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 03:43:15.135687 32502 solver.cpp:244]     Train net output #1: loss = 0.0828912 (* 1 = 0.0828912 loss)\nI0823 03:43:15.265557 32502 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0823 03:46:54.787173 32502 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0823 03:49:04.518448 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7404\nI0823 03:49:04.518868 32502 solver.cpp:404]     Test net output #1: loss = 1.23481 (* 1 = 1.23481 loss)\nI0823 03:49:06.608160 32502 solver.cpp:228] Iteration 43800, loss = 0.0349825\nI0823 03:49:06.608228 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 03:49:06.608248 32502 solver.cpp:244]     Train net output #1: loss = 0.0349801 (* 1 = 0.0349801 loss)\nI0823 03:49:06.730445 32502 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0823 03:52:46.204159 32502 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0823 03:54:55.911237 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7741\nI0823 03:54:55.911681 32502 solver.cpp:404]     Test net output #1: loss = 1.04772 (* 1 = 1.04772 loss)\nI0823 03:54:58.000455 32502 solver.cpp:228] Iteration 43900, loss = 0.0387016\nI0823 03:54:58.000519 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 03:54:58.000537 32502 solver.cpp:244]     Train net output #1: loss = 0.0386991 (* 1 = 0.0386991 loss)\nI0823 03:54:58.129252 32502 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0823 03:58:37.614513 32502 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0823 04:00:47.317924 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7418\nI0823 04:00:47.318338 32502 solver.cpp:404]     Test net output #1: loss = 1.14396 (* 1 = 1.14396 loss)\nI0823 04:00:49.407896 32502 solver.cpp:228] Iteration 44000, loss = 0.0427013\nI0823 04:00:49.407960 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 04:00:49.407979 32502 solver.cpp:244]     Train net output #1: loss = 0.0426989 (* 1 = 0.0426989 loss)\nI0823 04:00:49.532817 32502 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0823 04:04:28.941910 32502 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0823 04:06:38.560328 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7689\nI0823 04:06:38.560770 32502 solver.cpp:404]     Test net output #1: loss = 1.06126 (* 1 = 1.06126 loss)\nI0823 04:06:40.649318 32502 solver.cpp:228] Iteration 44100, loss = 0.0302506\nI0823 04:06:40.649382 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 04:06:40.649401 32502 solver.cpp:244]     Train net output #1: loss = 0.0302482 (* 1 = 0.0302482 loss)\nI0823 04:06:40.774170 32502 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0823 04:10:20.213706 32502 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0823 04:12:29.833026 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8127\nI0823 04:12:29.833462 32502 solver.cpp:404]     Test net output #1: loss = 0.754165 (* 1 = 0.754165 loss)\nI0823 04:12:31.921437 32502 solver.cpp:228] Iteration 44200, loss = 0.0154901\nI0823 04:12:31.921502 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:12:31.921519 32502 solver.cpp:244]     Train net output #1: loss = 0.0154877 (* 1 = 0.0154877 loss)\nI0823 04:12:32.050371 32502 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0823 04:16:11.541352 32502 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0823 04:18:21.168390 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7683\nI0823 04:18:21.168836 32502 solver.cpp:404]     Test net output #1: loss = 1.15138 (* 1 = 1.15138 loss)\nI0823 04:18:23.257165 32502 solver.cpp:228] Iteration 44300, loss = 0.0477089\nI0823 04:18:23.257235 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 04:18:23.257253 32502 solver.cpp:244]     Train net output #1: loss = 0.0477065 (* 1 = 0.0477065 loss)\nI0823 04:18:23.381955 32502 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0823 04:22:02.864086 32502 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0823 04:24:12.481977 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7578\nI0823 04:24:12.482421 32502 solver.cpp:404]     Test net output #1: loss = 1.04747 (* 1 = 1.04747 loss)\nI0823 04:24:14.570878 32502 solver.cpp:228] Iteration 44400, loss = 0.0866038\nI0823 04:24:14.570940 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0823 04:24:14.570957 32502 solver.cpp:244]     Train net output #1: loss = 0.0866014 (* 1 = 0.0866014 loss)\nI0823 04:24:14.698092 32502 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0823 04:27:54.144245 32502 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0823 04:30:03.741758 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7339\nI0823 04:30:03.742172 32502 solver.cpp:404]     Test net output #1: loss = 1.14937 (* 1 = 1.14937 loss)\nI0823 04:30:05.830204 32502 solver.cpp:228] Iteration 44500, loss = 0.0382935\nI0823 04:30:05.830272 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 04:30:05.830289 32502 solver.cpp:244]     Train net output #1: loss = 0.0382911 (* 1 = 0.0382911 loss)\nI0823 04:30:05.952107 32502 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0823 04:33:45.297735 32502 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0823 04:35:54.901890 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7496\nI0823 04:35:54.902336 32502 solver.cpp:404]     Test net output #1: loss = 1.08357 (* 1 = 1.08357 loss)\nI0823 04:35:56.990468 32502 solver.cpp:228] Iteration 44600, loss = 0.0333113\nI0823 04:35:56.990533 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 04:35:56.990551 32502 solver.cpp:244]     Train net output #1: loss = 0.0333089 (* 1 = 0.0333089 loss)\nI0823 04:35:57.116808 32502 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0823 04:39:36.556020 32502 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0823 04:41:46.166657 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7678\nI0823 04:41:46.167068 32502 solver.cpp:404]     Test net output #1: loss = 0.897975 (* 1 = 0.897975 loss)\nI0823 04:41:48.254338 32502 solver.cpp:228] Iteration 44700, loss = 0.0347417\nI0823 04:41:48.254403 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0823 04:41:48.254420 32502 solver.cpp:244]     Train net output #1: loss = 0.0347393 (* 1 = 0.0347393 loss)\nI0823 04:41:48.381531 32502 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0823 04:45:27.895499 32502 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0823 04:47:37.499480 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6461\nI0823 04:47:37.499929 32502 solver.cpp:404]     Test net output #1: loss = 1.70718 (* 1 = 1.70718 loss)\nI0823 04:47:39.587028 32502 solver.cpp:228] Iteration 44800, loss = 0.0257614\nI0823 04:47:39.587091 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:47:39.587108 32502 solver.cpp:244]     Train net output #1: loss = 0.025759 (* 1 = 0.025759 loss)\nI0823 04:47:39.714639 32502 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0823 04:51:19.220541 32502 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0823 04:53:28.825444 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7294\nI0823 04:53:28.825850 32502 solver.cpp:404]     Test net output #1: loss = 1.1752 (* 1 = 1.1752 loss)\nI0823 04:53:30.914302 32502 solver.cpp:228] Iteration 44900, loss = 0.066861\nI0823 04:53:30.914368 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 04:53:30.914384 32502 solver.cpp:244]     Train net output #1: loss = 0.0668587 (* 1 = 0.0668587 loss)\nI0823 04:53:31.043661 32502 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0823 04:57:10.450678 32502 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0823 04:59:20.052002 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7321\nI0823 04:59:20.052451 32502 solver.cpp:404]     Test net output #1: loss = 1.07969 (* 1 = 1.07969 loss)\nI0823 04:59:22.140234 32502 solver.cpp:228] Iteration 45000, loss = 0.0128979\nI0823 04:59:22.140298 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:59:22.140316 32502 solver.cpp:244]     Train net output #1: loss = 0.0128956 (* 1 = 0.0128956 loss)\nI0823 04:59:22.265756 32502 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0823 05:03:01.716508 32502 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0823 05:05:11.341907 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7953\nI0823 05:05:11.342327 32502 solver.cpp:404]     Test net output #1: loss = 0.788835 (* 1 = 0.788835 loss)\nI0823 05:05:13.430323 32502 solver.cpp:228] Iteration 45100, loss = 0.029021\nI0823 05:05:13.430388 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:05:13.430405 32502 solver.cpp:244]     Train net output #1: loss = 0.0290186 (* 1 = 0.0290186 loss)\nI0823 05:05:13.563313 32502 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0823 05:08:53.024945 32502 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0823 05:11:02.688864 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8124\nI0823 05:11:02.689275 32502 solver.cpp:404]     Test net output #1: loss = 0.749744 (* 1 = 0.749744 loss)\nI0823 05:11:04.777493 32502 solver.cpp:228] Iteration 45200, loss = 0.0351558\nI0823 05:11:04.777556 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 05:11:04.777575 32502 solver.cpp:244]     Train net output #1: loss = 0.0351534 (* 1 = 0.0351534 loss)\nI0823 05:11:04.909726 32502 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0823 05:14:44.282944 32502 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0823 05:16:53.922458 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7452\nI0823 05:16:53.922915 32502 solver.cpp:404]     Test net output #1: loss = 1.2526 (* 1 = 1.2526 loss)\nI0823 05:16:56.009718 32502 solver.cpp:228] Iteration 45300, loss = 0.07917\nI0823 05:16:56.009783 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 05:16:56.009801 32502 solver.cpp:244]     Train net output #1: loss = 0.0791677 (* 1 = 0.0791677 loss)\nI0823 05:16:56.143184 32502 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0823 05:20:35.679733 32502 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0823 05:22:45.329329 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8323\nI0823 05:22:45.329751 32502 solver.cpp:404]     Test net output #1: loss = 0.630516 (* 1 = 0.630516 loss)\nI0823 05:22:47.417114 32502 solver.cpp:228] Iteration 45400, loss = 0.0809518\nI0823 05:22:47.417178 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 05:22:47.417196 32502 solver.cpp:244]     Train net output #1: loss = 0.0809494 (* 1 = 0.0809494 loss)\nI0823 05:22:47.544770 32502 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0823 05:26:26.942848 32502 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0823 05:28:36.603334 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7704\nI0823 05:28:36.603785 32502 solver.cpp:404]     Test net output #1: loss = 0.995047 (* 1 = 0.995047 loss)\nI0823 05:28:38.692828 32502 solver.cpp:228] Iteration 45500, loss = 0.0556367\nI0823 05:28:38.692899 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 05:28:38.692919 32502 solver.cpp:244]     Train net output #1: loss = 0.0556344 (* 1 = 0.0556344 loss)\nI0823 05:28:38.819674 32502 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0823 05:32:18.306550 32502 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0823 05:34:27.960268 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7629\nI0823 05:34:27.960702 32502 solver.cpp:404]     Test net output #1: loss = 1.05738 (* 1 = 1.05738 loss)\nI0823 05:34:30.048790 32502 solver.cpp:228] Iteration 45600, loss = 0.0292614\nI0823 05:34:30.048856 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 05:34:30.048879 32502 solver.cpp:244]     Train net output #1: loss = 0.029259 (* 1 = 0.029259 loss)\nI0823 05:34:30.175493 32502 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0823 05:38:09.732551 32502 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0823 05:40:19.463702 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7857\nI0823 05:40:19.464112 32502 solver.cpp:404]     Test net output #1: loss = 0.908596 (* 1 = 0.908596 loss)\nI0823 05:40:21.552763 32502 solver.cpp:228] Iteration 45700, loss = 0.0956401\nI0823 05:40:21.552829 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0823 05:40:21.552847 32502 solver.cpp:244]     Train net output #1: loss = 0.0956377 (* 1 = 0.0956377 loss)\nI0823 05:40:21.673807 32502 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0823 05:44:00.977047 32502 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0823 05:46:10.736392 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7474\nI0823 05:46:10.736807 32502 solver.cpp:404]     Test net output #1: loss = 1.15632 (* 1 = 1.15632 loss)\nI0823 05:46:12.825259 32502 solver.cpp:228] Iteration 45800, loss = 0.0797036\nI0823 05:46:12.825325 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 05:46:12.825342 32502 solver.cpp:244]     Train net output #1: loss = 0.0797013 (* 1 = 0.0797013 loss)\nI0823 05:46:12.955633 32502 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0823 05:49:52.490105 32502 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0823 05:52:02.247521 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7941\nI0823 05:52:02.247879 32502 solver.cpp:404]     Test net output #1: loss = 0.831047 (* 1 = 0.831047 loss)\nI0823 05:52:04.336102 32502 solver.cpp:228] Iteration 45900, loss = 0.0347989\nI0823 05:52:04.336165 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 05:52:04.336184 32502 solver.cpp:244]     Train net output #1: loss = 0.0347965 (* 1 = 0.0347965 loss)\nI0823 05:52:04.463510 32502 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0823 05:55:43.785681 32502 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0823 05:57:53.526705 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6708\nI0823 05:57:53.527145 32502 solver.cpp:404]     Test net output #1: loss = 1.90899 (* 1 = 1.90899 loss)\nI0823 05:57:55.616350 32502 solver.cpp:228] Iteration 46000, loss = 0.0335231\nI0823 05:57:55.616413 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 05:57:55.616431 32502 solver.cpp:244]     Train net output #1: loss = 0.0335207 (* 1 = 0.0335207 loss)\nI0823 05:57:55.739400 32502 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0823 06:01:35.145355 32502 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0823 06:03:44.859299 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8216\nI0823 06:03:44.859704 32502 solver.cpp:404]     Test net output #1: loss = 0.72266 (* 1 = 0.72266 loss)\nI0823 06:03:46.948519 32502 solver.cpp:228] Iteration 46100, loss = 0.0264814\nI0823 06:03:46.948585 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:03:46.948601 32502 solver.cpp:244]     Train net output #1: loss = 0.026479 (* 1 = 0.026479 loss)\nI0823 06:03:47.073061 32502 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0823 06:07:26.466161 32502 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0823 06:09:36.212318 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7329\nI0823 06:09:36.212733 32502 solver.cpp:404]     Test net output #1: loss = 1.02828 (* 1 = 1.02828 loss)\nI0823 06:09:38.301237 32502 solver.cpp:228] Iteration 46200, loss = 0.0719982\nI0823 06:09:38.301304 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 06:09:38.301321 32502 solver.cpp:244]     Train net output #1: loss = 0.0719958 (* 1 = 0.0719958 loss)\nI0823 06:09:38.426566 32502 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0823 06:13:17.814371 32502 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0823 06:15:27.482362 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8137\nI0823 06:15:27.482779 32502 solver.cpp:404]     Test net output #1: loss = 0.741005 (* 1 = 0.741005 loss)\nI0823 06:15:29.571280 32502 solver.cpp:228] Iteration 46300, loss = 0.0442381\nI0823 06:15:29.571346 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 06:15:29.571362 32502 solver.cpp:244]     Train net output #1: loss = 0.0442357 (* 1 = 0.0442357 loss)\nI0823 06:15:29.704012 32502 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0823 06:19:09.201839 32502 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0823 06:21:18.886953 32502 solver.cpp:404]     Test net output #0: accuracy = 0.775\nI0823 06:21:18.887392 32502 solver.cpp:404]     Test net output #1: loss = 0.940036 (* 1 = 0.940036 loss)\nI0823 06:21:20.976294 32502 solver.cpp:228] Iteration 46400, loss = 0.0297782\nI0823 06:21:20.976359 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 06:21:20.976377 32502 solver.cpp:244]     Train net output #1: loss = 0.0297759 (* 1 = 0.0297759 loss)\nI0823 06:21:21.107425 32502 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0823 06:25:00.555673 32502 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0823 06:27:10.228803 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8001\nI0823 06:27:10.229313 32502 solver.cpp:404]     Test net output #1: loss = 0.804656 (* 1 = 0.804656 loss)\nI0823 06:27:12.317889 32502 solver.cpp:228] Iteration 46500, loss = 0.041007\nI0823 06:27:12.317955 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 06:27:12.317971 32502 solver.cpp:244]     Train net output #1: loss = 0.0410046 (* 1 = 0.0410046 loss)\nI0823 06:27:12.450186 32502 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0823 06:30:51.805006 32502 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0823 06:33:01.476897 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7385\nI0823 06:33:01.477305 32502 solver.cpp:404]     Test net output #1: loss = 1.19827 (* 1 = 1.19827 loss)\nI0823 06:33:03.566565 32502 solver.cpp:228] Iteration 46600, loss = 0.152897\nI0823 06:33:03.566630 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0823 06:33:03.566648 32502 solver.cpp:244]     Train net output #1: loss = 0.152895 (* 1 = 0.152895 loss)\nI0823 06:33:03.692840 32502 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0823 06:36:43.076218 32502 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0823 06:38:52.803308 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7536\nI0823 06:38:52.803725 32502 solver.cpp:404]     Test net output #1: loss = 1.24967 (* 1 = 1.24967 loss)\nI0823 06:38:54.892498 32502 solver.cpp:228] Iteration 46700, loss = 0.00642083\nI0823 06:38:54.892561 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:38:54.892578 32502 solver.cpp:244]     Train net output #1: loss = 0.00641845 (* 1 = 0.00641845 loss)\nI0823 06:38:55.022639 32502 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0823 06:42:35.147245 32502 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0823 06:44:44.854835 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7761\nI0823 06:44:44.855267 32502 solver.cpp:404]     Test net output #1: loss = 0.892135 (* 1 = 0.892135 loss)\nI0823 06:44:46.947340 32502 solver.cpp:228] Iteration 46800, loss = 0.00378657\nI0823 06:44:46.947405 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:44:46.947423 32502 solver.cpp:244]     Train net output #1: loss = 0.00378419 (* 1 = 0.00378419 loss)\nI0823 06:44:47.085050 32502 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0823 06:48:27.665426 32502 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0823 06:50:37.352752 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8318\nI0823 06:50:37.353160 32502 solver.cpp:404]     Test net output #1: loss = 0.618115 (* 1 = 0.618115 loss)\nI0823 06:50:39.445392 32502 solver.cpp:228] Iteration 46900, loss = 0.0800118\nI0823 06:50:39.445458 32502 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0823 06:50:39.445477 32502 solver.cpp:244]     Train net output #1: loss = 0.0800094 (* 1 = 0.0800094 loss)\nI0823 06:50:39.581252 32502 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0823 06:54:19.884073 32502 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0823 06:56:27.898145 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7273\nI0823 06:56:27.898459 32502 solver.cpp:404]     Test net output #1: loss = 1.11558 (* 1 = 1.11558 loss)\nI0823 06:56:29.987233 32502 solver.cpp:228] Iteration 47000, loss = 0.0756516\nI0823 06:56:29.987282 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0823 06:56:29.987298 32502 solver.cpp:244]     Train net output #1: loss = 0.0756492 (* 1 = 0.0756492 loss)\nI0823 06:56:30.117924 32502 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0823 07:00:09.310246 32502 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0823 07:02:17.337193 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8012\nI0823 07:02:17.337574 32502 solver.cpp:404]     Test net output #1: loss = 0.772548 (* 1 = 0.772548 loss)\nI0823 07:02:19.426723 32502 solver.cpp:228] Iteration 47100, loss = 0.0152432\nI0823 07:02:19.426772 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:02:19.426797 32502 solver.cpp:244]     Train net output #1: loss = 0.0152408 (* 1 = 0.0152408 loss)\nI0823 07:02:19.550156 32502 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0823 07:05:58.683507 32502 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0823 07:08:06.722189 32502 solver.cpp:404]     Test net output #0: accuracy = 0.827\nI0823 07:08:06.722576 32502 solver.cpp:404]     Test net output #1: loss = 0.6048 (* 1 = 0.6048 loss)\nI0823 07:08:08.810616 32502 solver.cpp:228] Iteration 47200, loss = 0.069056\nI0823 07:08:08.810667 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 07:08:08.810693 32502 solver.cpp:244]     Train net output #1: loss = 0.0690537 (* 1 = 0.0690537 loss)\nI0823 07:08:08.937639 32502 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0823 07:11:47.992395 32502 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0823 07:13:56.020354 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7938\nI0823 07:13:56.020763 32502 solver.cpp:404]     Test net output #1: loss = 0.899144 (* 1 = 0.899144 loss)\nI0823 07:13:58.109889 32502 solver.cpp:228] Iteration 47300, loss = 0.0175018\nI0823 07:13:58.109939 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 07:13:58.109964 32502 solver.cpp:244]     Train net output #1: loss = 0.0174995 (* 1 = 0.0174995 loss)\nI0823 07:13:58.233765 32502 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0823 07:17:37.308179 32502 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0823 07:19:45.337285 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8189\nI0823 07:19:45.337676 32502 solver.cpp:404]     Test net output #1: loss = 0.735364 (* 1 = 0.735364 loss)\nI0823 07:19:47.426110 32502 solver.cpp:228] Iteration 47400, loss = 0.0756271\nI0823 07:19:47.426162 32502 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0823 07:19:47.426187 32502 solver.cpp:244]     Train net output #1: loss = 0.0756247 (* 1 = 0.0756247 loss)\nI0823 07:19:47.553052 32502 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0823 07:23:26.899818 32502 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0823 07:25:34.946959 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7957\nI0823 07:25:34.947369 32502 solver.cpp:404]     Test net output #1: loss = 0.856842 (* 1 = 0.856842 loss)\nI0823 07:25:37.035838 32502 solver.cpp:228] Iteration 47500, loss = 0.0756577\nI0823 07:25:37.035889 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 07:25:37.035913 32502 solver.cpp:244]     Train net output #1: loss = 0.0756553 (* 1 = 0.0756553 loss)\nI0823 07:25:37.175916 32502 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0823 07:29:17.321038 32502 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0823 07:31:25.369640 32502 solver.cpp:404]     Test net output #0: accuracy = 0.821\nI0823 07:31:25.370023 32502 solver.cpp:404]     Test net output #1: loss = 0.700364 (* 1 = 0.700364 loss)\nI0823 07:31:27.459115 32502 solver.cpp:228] Iteration 47600, loss = 0.015278\nI0823 07:31:27.459167 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:31:27.459190 32502 solver.cpp:244]     Train net output #1: loss = 0.0152756 (* 1 = 0.0152756 loss)\nI0823 07:31:27.596390 32502 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0823 07:35:07.746206 32502 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0823 07:37:15.800756 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7583\nI0823 07:37:15.801139 32502 solver.cpp:404]     Test net output #1: loss = 1.12391 (* 1 = 1.12391 loss)\nI0823 07:37:17.890151 32502 solver.cpp:228] Iteration 47700, loss = 0.0374109\nI0823 07:37:17.890202 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 07:37:17.890228 32502 solver.cpp:244]     Train net output #1: loss = 0.0374085 (* 1 = 0.0374085 loss)\nI0823 07:37:18.021567 32502 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0823 07:40:58.134860 32502 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0823 07:43:06.184317 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8023\nI0823 07:43:06.184696 32502 solver.cpp:404]     Test net output #1: loss = 0.741487 (* 1 = 0.741487 loss)\nI0823 07:43:08.273352 32502 solver.cpp:228] Iteration 47800, loss = 0.00870619\nI0823 07:43:08.273404 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:43:08.273429 32502 solver.cpp:244]     Train net output #1: loss = 0.00870378 (* 1 = 0.00870378 loss)\nI0823 07:43:08.416901 32502 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0823 07:46:48.552606 32502 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0823 07:48:56.585525 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7714\nI0823 07:48:56.585909 32502 solver.cpp:404]     Test net output #1: loss = 0.878131 (* 1 = 0.878131 loss)\nI0823 07:48:58.674975 32502 solver.cpp:228] Iteration 47900, loss = 0.0415056\nI0823 07:48:58.675026 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 07:48:58.675056 32502 solver.cpp:244]     Train net output #1: loss = 0.0415032 (* 1 = 0.0415032 loss)\nI0823 07:48:58.820204 32502 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0823 07:52:38.867893 32502 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0823 07:54:46.941064 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7486\nI0823 07:54:46.941376 32502 solver.cpp:404]     Test net output #1: loss = 1.17232 (* 1 = 1.17232 loss)\nI0823 07:54:49.030732 32502 solver.cpp:228] Iteration 48000, loss = 0.0908746\nI0823 07:54:49.030784 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 07:54:49.030810 32502 solver.cpp:244]     Train net output #1: loss = 0.0908722 (* 1 = 0.0908722 loss)\nI0823 07:54:49.168768 32502 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0823 07:58:29.254233 32502 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0823 08:00:37.323551 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7738\nI0823 08:00:37.323932 32502 solver.cpp:404]     Test net output #1: loss = 0.962722 (* 1 = 0.962722 loss)\nI0823 08:00:39.413413 32502 solver.cpp:228] Iteration 48100, loss = 0.0777862\nI0823 08:00:39.413463 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 08:00:39.413489 32502 solver.cpp:244]     Train net output #1: loss = 0.0777837 (* 1 = 0.0777837 loss)\nI0823 08:00:39.551699 32502 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0823 08:04:19.569752 32502 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0823 08:06:27.640125 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8068\nI0823 08:06:27.640516 32502 solver.cpp:404]     Test net output #1: loss = 0.854656 (* 1 = 0.854656 loss)\nI0823 08:06:29.729671 32502 solver.cpp:228] Iteration 48200, loss = 0.0143583\nI0823 08:06:29.729722 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:06:29.729745 32502 solver.cpp:244]     Train net output #1: loss = 0.0143558 (* 1 = 0.0143558 loss)\nI0823 08:06:29.865557 32502 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0823 08:10:09.925693 32502 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0823 08:12:17.993551 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7781\nI0823 08:12:17.993917 32502 solver.cpp:404]     Test net output #1: loss = 0.945016 (* 1 = 0.945016 loss)\nI0823 08:12:20.083333 32502 solver.cpp:228] Iteration 48300, loss = 0.0273292\nI0823 08:12:20.083385 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 08:12:20.083410 32502 solver.cpp:244]     Train net output #1: loss = 0.0273267 (* 1 = 0.0273267 loss)\nI0823 08:12:20.218413 32502 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0823 08:16:00.421891 32502 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0823 08:18:08.490825 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7661\nI0823 08:18:08.491217 32502 solver.cpp:404]     Test net output #1: loss = 1.13168 (* 1 = 1.13168 loss)\nI0823 08:18:10.580313 32502 solver.cpp:228] Iteration 48400, loss = 0.0802652\nI0823 08:18:10.580363 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 08:18:10.580389 32502 solver.cpp:244]     Train net output #1: loss = 0.0802627 (* 1 = 0.0802627 loss)\nI0823 08:18:10.709787 32502 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0823 08:21:50.710898 32502 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0823 08:23:58.776187 32502 solver.cpp:404]     Test net output #0: accuracy = 0.6868\nI0823 08:23:58.776587 32502 solver.cpp:404]     Test net output #1: loss = 1.6711 (* 1 = 1.6711 loss)\nI0823 08:24:00.865600 32502 solver.cpp:228] Iteration 48500, loss = 0.032421\nI0823 08:24:00.865651 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 08:24:00.865676 32502 solver.cpp:244]     Train net output #1: loss = 0.0324185 (* 1 = 0.0324185 loss)\nI0823 08:24:01.000656 32502 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0823 08:27:41.143702 32502 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0823 08:29:49.197154 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7068\nI0823 08:29:49.197540 32502 solver.cpp:404]     Test net output #1: loss = 1.46001 (* 1 = 1.46001 loss)\nI0823 08:29:51.287560 32502 solver.cpp:228] Iteration 48600, loss = 0.0286667\nI0823 08:29:51.287611 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 08:29:51.287634 32502 solver.cpp:244]     Train net output #1: loss = 0.0286642 (* 1 = 0.0286642 loss)\nI0823 08:29:51.415762 32502 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0823 08:33:31.564833 32502 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0823 08:35:39.607069 32502 solver.cpp:404]     Test net output #0: accuracy = 0.794601\nI0823 08:35:39.607461 32502 solver.cpp:404]     Test net output #1: loss = 0.90793 (* 1 = 0.90793 loss)\nI0823 08:35:41.696444 32502 solver.cpp:228] Iteration 48700, loss = 0.108624\nI0823 08:35:41.696493 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 08:35:41.696518 32502 solver.cpp:244]     Train net output #1: loss = 0.108621 (* 1 = 0.108621 loss)\nI0823 08:35:41.836110 32502 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0823 08:39:21.981070 32502 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0823 08:41:30.032752 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7923\nI0823 08:41:30.033133 32502 solver.cpp:404]     Test net output #1: loss = 0.854454 (* 1 = 0.854454 loss)\nI0823 08:41:32.122448 32502 solver.cpp:228] Iteration 48800, loss = 0.0530478\nI0823 08:41:32.122495 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 08:41:32.122511 32502 solver.cpp:244]     Train net output #1: loss = 0.0530453 (* 1 = 0.0530453 loss)\nI0823 08:41:32.259807 32502 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0823 08:45:12.418123 32502 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0823 08:47:20.413699 32502 solver.cpp:404]     Test net output #0: accuracy = 0.823\nI0823 08:47:20.414067 32502 solver.cpp:404]     Test net output #1: loss = 0.723541 (* 1 = 0.723541 loss)\nI0823 08:47:22.502249 32502 solver.cpp:228] Iteration 48900, loss = 0.0452832\nI0823 08:47:22.502295 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 08:47:22.502311 32502 solver.cpp:244]     Train net output #1: loss = 0.0452807 (* 1 = 0.0452807 loss)\nI0823 08:47:22.638808 32502 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0823 08:51:02.826589 32502 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0823 08:53:10.837249 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7637\nI0823 08:53:10.837628 32502 solver.cpp:404]     Test net output #1: loss = 0.947284 (* 1 = 0.947284 loss)\nI0823 08:53:12.927278 32502 solver.cpp:228] Iteration 49000, loss = 0.0483966\nI0823 08:53:12.927327 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 08:53:12.927343 32502 solver.cpp:244]     Train net output #1: loss = 0.048394 (* 1 = 0.048394 loss)\nI0823 08:53:13.058346 32502 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0823 08:56:53.181447 32502 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0823 08:59:01.215132 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8111\nI0823 08:59:01.215517 32502 solver.cpp:404]     Test net output #1: loss = 0.837384 (* 1 = 0.837384 loss)\nI0823 08:59:03.304018 32502 solver.cpp:228] Iteration 49100, loss = 0.0695006\nI0823 08:59:03.304067 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 08:59:03.304085 32502 solver.cpp:244]     Train net output #1: loss = 0.0694981 (* 1 = 0.0694981 loss)\nI0823 08:59:03.437111 32502 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0823 09:02:43.599352 32502 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0823 09:04:51.638557 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7919\nI0823 09:04:51.638942 32502 solver.cpp:404]     Test net output #1: loss = 0.886665 (* 1 = 0.886665 loss)\nI0823 09:04:53.728093 32502 solver.cpp:228] Iteration 49200, loss = 0.00918327\nI0823 09:04:53.728140 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:04:53.728157 32502 solver.cpp:244]     Train net output #1: loss = 0.00918072 (* 1 = 0.00918072 loss)\nI0823 09:04:53.865684 32502 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0823 09:08:34.041970 32502 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0823 09:10:42.075734 32502 solver.cpp:404]     Test net output #0: accuracy = 0.737\nI0823 09:10:42.076114 32502 solver.cpp:404]     Test net output #1: loss = 1.33898 (* 1 = 1.33898 loss)\nI0823 09:10:44.164077 32502 solver.cpp:228] Iteration 49300, loss = 0.0215698\nI0823 09:10:44.164124 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 09:10:44.164140 32502 solver.cpp:244]     Train net output #1: loss = 0.0215673 (* 1 = 0.0215673 loss)\nI0823 09:10:44.296548 32502 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0823 09:14:24.442243 32502 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0823 09:16:32.489150 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8203\nI0823 09:16:32.489534 32502 solver.cpp:404]     Test net output #1: loss = 0.68868 (* 1 = 0.68868 loss)\nI0823 09:16:34.578714 32502 solver.cpp:228] Iteration 49400, loss = 0.0257822\nI0823 09:16:34.578761 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:16:34.578784 32502 solver.cpp:244]     Train net output #1: loss = 0.0257796 (* 1 = 0.0257796 loss)\nI0823 09:16:34.717317 32502 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0823 09:20:14.801968 32502 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0823 09:22:22.846318 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8312\nI0823 09:22:22.846673 32502 solver.cpp:404]     Test net output #1: loss = 0.710601 (* 1 = 0.710601 loss)\nI0823 09:22:24.934481 32502 solver.cpp:228] Iteration 49500, loss = 0.0527459\nI0823 09:22:24.934527 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 09:22:24.934545 32502 solver.cpp:244]     Train net output #1: loss = 0.0527433 (* 1 = 0.0527433 loss)\nI0823 09:22:25.072437 32502 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0823 09:26:05.188030 32502 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0823 09:28:13.229043 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7965\nI0823 09:28:13.229429 32502 solver.cpp:404]     Test net output #1: loss = 0.766359 (* 1 = 0.766359 loss)\nI0823 09:28:15.316975 32502 solver.cpp:228] Iteration 49600, loss = 0.12183\nI0823 09:28:15.317023 32502 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0823 09:28:15.317039 32502 solver.cpp:244]     Train net output #1: loss = 0.121828 (* 1 = 0.121828 loss)\nI0823 09:28:15.457319 32502 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0823 09:31:55.603132 32502 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0823 09:34:03.641341 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7936\nI0823 09:34:03.641737 32502 solver.cpp:404]     Test net output #1: loss = 0.920107 (* 1 = 0.920107 loss)\nI0823 09:34:05.729971 32502 solver.cpp:228] Iteration 49700, loss = 0.050805\nI0823 09:34:05.730020 32502 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0823 09:34:05.730036 32502 solver.cpp:244]     Train net output #1: loss = 0.0508025 (* 1 = 0.0508025 loss)\nI0823 09:34:05.866652 32502 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0823 09:37:45.081312 32502 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0823 09:39:53.125022 32502 solver.cpp:404]     Test net output #0: accuracy = 0.7578\nI0823 09:39:53.125409 32502 solver.cpp:404]     Test net output #1: loss = 1.12311 (* 1 = 1.12311 loss)\nI0823 09:39:55.212977 32502 solver.cpp:228] Iteration 49800, loss = 0.00626928\nI0823 09:39:55.213024 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:39:55.213042 32502 solver.cpp:244]     Train net output #1: loss = 0.00626673 (* 1 = 0.00626673 loss)\nI0823 09:39:55.345741 32502 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0823 09:43:34.451227 32502 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0823 09:45:42.494079 32502 solver.cpp:404]     Test net output #0: accuracy = 0.727\nI0823 09:45:42.494464 32502 solver.cpp:404]     Test net output #1: loss = 1.21169 (* 1 = 1.21169 loss)\nI0823 09:45:44.583583 32502 solver.cpp:228] Iteration 49900, loss = 0.0549002\nI0823 09:45:44.583631 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 09:45:44.583647 32502 solver.cpp:244]     Train net output #1: loss = 0.0548976 (* 1 = 0.0548976 loss)\nI0823 09:45:44.710721 32502 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0823 09:49:23.726893 32502 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0823 09:51:31.771140 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8103\nI0823 09:51:31.771528 32502 solver.cpp:404]     Test net output #1: loss = 0.750615 (* 1 = 0.750615 loss)\nI0823 09:51:33.859925 32502 solver.cpp:228] Iteration 50000, loss = 0.0531595\nI0823 09:51:33.859973 32502 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0823 09:51:33.859990 32502 solver.cpp:244]     Train net output #1: loss = 0.0531569 (* 1 = 0.0531569 loss)\nI0823 09:51:33.991231 32502 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0823 09:51:33.991252 32502 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0823 09:55:13.135910 32502 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0823 09:57:21.180147 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8767\nI0823 09:57:21.180522 32502 solver.cpp:404]     Test net output #1: loss = 0.461097 (* 1 = 0.461097 loss)\nI0823 09:57:23.268971 32502 solver.cpp:228] Iteration 50100, loss = 0.0138057\nI0823 09:57:23.269016 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:57:23.269032 32502 solver.cpp:244]     Train net output #1: loss = 0.0138031 (* 1 = 0.0138031 loss)\nI0823 09:57:23.401195 32502 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0823 10:01:02.504014 32502 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0823 10:03:10.556807 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8853\nI0823 10:03:10.557199 32502 solver.cpp:404]     Test net output #1: loss = 0.421351 (* 1 = 0.421351 loss)\nI0823 10:03:12.646306 32502 solver.cpp:228] Iteration 50200, loss = 0.00487391\nI0823 10:03:12.646353 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:03:12.646368 32502 solver.cpp:244]     Train net output #1: loss = 0.00487131 (* 1 = 0.00487131 loss)\nI0823 10:03:12.777129 32502 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0823 10:06:51.819133 32502 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0823 10:08:59.884730 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8901\nI0823 10:08:59.885109 32502 solver.cpp:404]     Test net output #1: loss = 0.41103 (* 1 = 0.41103 loss)\nI0823 10:09:01.974402 32502 solver.cpp:228] Iteration 50300, loss = 0.00150947\nI0823 10:09:01.974445 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:09:01.974462 32502 solver.cpp:244]     Train net output #1: loss = 0.00150687 (* 1 = 0.00150687 loss)\nI0823 10:09:02.104127 32502 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0823 10:12:41.152336 32502 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0823 10:14:49.237047 32502 solver.cpp:404]     Test net output #0: accuracy = 0.892\nI0823 10:14:49.237433 32502 solver.cpp:404]     Test net output #1: loss = 0.408592 (* 1 = 0.408592 loss)\nI0823 10:14:51.326138 32502 solver.cpp:228] Iteration 50400, loss = 0.00359436\nI0823 10:14:51.326186 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:14:51.326201 32502 solver.cpp:244]     Train net output #1: loss = 0.00359177 (* 1 = 0.00359177 loss)\nI0823 10:14:51.447903 32502 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0823 10:18:30.435536 32502 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0823 10:20:38.477705 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8934\nI0823 10:20:38.478092 32502 solver.cpp:404]     Test net output #1: loss = 0.405003 (* 1 = 0.405003 loss)\nI0823 10:20:40.566082 32502 solver.cpp:228] Iteration 50500, loss = 0.00135772\nI0823 10:20:40.566128 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:20:40.566145 32502 solver.cpp:244]     Train net output #1: loss = 0.00135512 (* 1 = 0.00135512 loss)\nI0823 10:20:40.696965 32502 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0823 10:24:19.768635 32502 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0823 10:26:27.813654 32502 solver.cpp:404]     Test net output #0: accuracy = 0.892\nI0823 10:26:27.814083 32502 solver.cpp:404]     Test net output #1: loss = 0.405539 (* 1 = 0.405539 loss)\nI0823 10:26:29.901423 32502 solver.cpp:228] Iteration 50600, loss = 0.00197006\nI0823 10:26:29.901473 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:26:29.901489 32502 solver.cpp:244]     Train net output #1: loss = 0.00196747 (* 1 = 0.00196747 loss)\nI0823 10:26:30.034754 32502 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0823 10:30:09.005327 32502 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0823 10:32:17.044358 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8925\nI0823 10:32:17.044701 32502 solver.cpp:404]     Test net output #1: loss = 0.405622 (* 1 = 0.405622 loss)\nI0823 10:32:19.133261 32502 solver.cpp:228] Iteration 50700, loss = 0.00173683\nI0823 10:32:19.133308 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:32:19.133325 32502 solver.cpp:244]     Train net output #1: loss = 0.00173424 (* 1 = 0.00173424 loss)\nI0823 10:32:19.261085 32502 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0823 10:35:58.267979 32502 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0823 10:38:06.306180 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8928\nI0823 10:38:06.306560 32502 solver.cpp:404]     Test net output #1: loss = 0.406547 (* 1 = 0.406547 loss)\nI0823 10:38:08.395370 32502 solver.cpp:228] Iteration 50800, loss = 0.000842073\nI0823 10:38:08.395417 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:38:08.395433 32502 solver.cpp:244]     Train net output #1: loss = 0.000839477 (* 1 = 0.000839477 loss)\nI0823 10:38:08.521195 32502 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0823 10:41:47.586786 32502 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0823 10:43:55.624595 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8905\nI0823 10:43:55.624989 32502 solver.cpp:404]     Test net output #1: loss = 0.413751 (* 1 = 0.413751 loss)\nI0823 10:43:57.714093 32502 solver.cpp:228] Iteration 50900, loss = 0.00133766\nI0823 10:43:57.714141 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:43:57.714166 32502 solver.cpp:244]     Train net output #1: loss = 0.00133506 (* 1 = 0.00133506 loss)\nI0823 10:43:57.848831 32502 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0823 10:47:36.926376 32502 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0823 10:49:44.935731 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8888\nI0823 10:49:44.936116 32502 solver.cpp:404]     Test net output #1: loss = 0.421811 (* 1 = 0.421811 loss)\nI0823 10:49:47.024377 32502 solver.cpp:228] Iteration 51000, loss = 0.000891573\nI0823 10:49:47.024428 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:49:47.024452 32502 solver.cpp:244]     Train net output #1: loss = 0.000888977 (* 1 = 0.000888977 loss)\nI0823 10:49:47.154341 32502 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0823 10:53:26.141089 32502 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0823 10:55:34.156633 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8884\nI0823 10:55:34.157017 32502 solver.cpp:404]     Test net output #1: loss = 0.427915 (* 1 = 0.427915 loss)\nI0823 10:55:36.246291 32502 solver.cpp:228] Iteration 51100, loss = 0.00104303\nI0823 10:55:36.246343 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 10:55:36.246366 32502 solver.cpp:244]     Train net output #1: loss = 0.00104044 (* 1 = 0.00104044 loss)\nI0823 10:55:36.377012 32502 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0823 10:59:15.538859 32502 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0823 11:01:23.583626 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8874\nI0823 11:01:23.584014 32502 solver.cpp:404]     Test net output #1: loss = 0.434359 (* 1 = 0.434359 loss)\nI0823 11:01:25.672677 32502 solver.cpp:228] Iteration 51200, loss = 0.00125119\nI0823 11:01:25.672724 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:01:25.672740 32502 solver.cpp:244]     Train net output #1: loss = 0.0012486 (* 1 = 0.0012486 loss)\nI0823 11:01:25.800742 32502 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0823 11:05:04.810745 32502 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0823 11:07:12.791144 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8858\nI0823 11:07:12.791528 32502 solver.cpp:404]     Test net output #1: loss = 0.440018 (* 1 = 0.440018 loss)\nI0823 11:07:14.880707 32502 solver.cpp:228] Iteration 51300, loss = 0.00126085\nI0823 11:07:14.880754 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:07:14.880771 32502 solver.cpp:244]     Train net output #1: loss = 0.00125825 (* 1 = 0.00125825 loss)\nI0823 11:07:15.009402 32502 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0823 11:10:54.012339 32502 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0823 11:13:01.982532 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8851\nI0823 11:13:01.982858 32502 solver.cpp:404]     Test net output #1: loss = 0.447383 (* 1 = 0.447383 loss)\nI0823 11:13:04.071513 32502 solver.cpp:228] Iteration 51400, loss = 0.00137719\nI0823 11:13:04.071560 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:13:04.071578 32502 solver.cpp:244]     Train net output #1: loss = 0.0013746 (* 1 = 0.0013746 loss)\nI0823 11:13:04.199569 32502 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0823 11:16:43.217530 32502 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0823 11:18:51.175979 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8837\nI0823 11:18:51.176371 32502 solver.cpp:404]     Test net output #1: loss = 0.453728 (* 1 = 0.453728 loss)\nI0823 11:18:53.263424 32502 solver.cpp:228] Iteration 51500, loss = 0.000740403\nI0823 11:18:53.263471 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:18:53.263489 32502 solver.cpp:244]     Train net output #1: loss = 0.000737807 (* 1 = 0.000737807 loss)\nI0823 11:18:53.394459 32502 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0823 11:22:32.551928 32502 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0823 11:24:40.521136 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8821\nI0823 11:24:40.521528 32502 solver.cpp:404]     Test net output #1: loss = 0.460057 (* 1 = 0.460057 loss)\nI0823 11:24:42.611173 32502 solver.cpp:228] Iteration 51600, loss = 0.00196221\nI0823 11:24:42.611222 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:24:42.611238 32502 solver.cpp:244]     Train net output #1: loss = 0.00195962 (* 1 = 0.00195962 loss)\nI0823 11:24:42.737274 32502 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0823 11:28:21.895467 32502 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0823 11:30:29.858570 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8815\nI0823 11:30:29.858942 32502 solver.cpp:404]     Test net output #1: loss = 0.464125 (* 1 = 0.464125 loss)\nI0823 11:30:31.948694 32502 solver.cpp:228] Iteration 51700, loss = 0.000912108\nI0823 11:30:31.948745 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:30:31.948761 32502 solver.cpp:244]     Train net output #1: loss = 0.000909512 (* 1 = 0.000909512 loss)\nI0823 11:30:32.077498 32502 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0823 11:34:11.034713 32502 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0823 11:36:19.027046 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8797\nI0823 11:36:19.027422 32502 solver.cpp:404]     Test net output #1: loss = 0.467851 (* 1 = 0.467851 loss)\nI0823 11:36:21.117121 32502 solver.cpp:228] Iteration 51800, loss = 0.000972355\nI0823 11:36:21.117190 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:36:21.117207 32502 solver.cpp:244]     Train net output #1: loss = 0.000969759 (* 1 = 0.000969759 loss)\nI0823 11:36:21.248965 32502 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0823 11:40:01.779584 32502 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0823 11:42:11.406365 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8791\nI0823 11:42:11.406780 32502 solver.cpp:404]     Test net output #1: loss = 0.474307 (* 1 = 0.474307 loss)\nI0823 11:42:13.498258 32502 solver.cpp:228] Iteration 51900, loss = 0.000857222\nI0823 11:42:13.498322 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:42:13.498338 32502 solver.cpp:244]     Train net output #1: loss = 0.000854625 (* 1 = 0.000854625 loss)\nI0823 11:42:13.628222 32502 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0823 11:45:54.244232 32502 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0823 11:48:03.864718 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8779\nI0823 11:48:03.865229 32502 solver.cpp:404]     Test net output #1: loss = 0.478427 (* 1 = 0.478427 loss)\nI0823 11:48:05.957968 32502 solver.cpp:228] Iteration 52000, loss = 0.000490444\nI0823 11:48:05.958039 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:48:05.958055 32502 solver.cpp:244]     Train net output #1: loss = 0.000487848 (* 1 = 0.000487848 loss)\nI0823 11:48:06.094424 32502 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0823 11:51:46.593302 32502 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0823 11:53:56.209331 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8772\nI0823 11:53:56.209772 32502 solver.cpp:404]     Test net output #1: loss = 0.481213 (* 1 = 0.481213 loss)\nI0823 11:53:58.301959 32502 solver.cpp:228] Iteration 52100, loss = 0.00101826\nI0823 11:53:58.302026 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:53:58.302042 32502 solver.cpp:244]     Train net output #1: loss = 0.00101566 (* 1 = 0.00101566 loss)\nI0823 11:53:58.439324 32502 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0823 11:57:38.976722 32502 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0823 11:59:48.614168 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8771\nI0823 11:59:48.614604 32502 solver.cpp:404]     Test net output #1: loss = 0.484212 (* 1 = 0.484212 loss)\nI0823 11:59:50.706647 32502 solver.cpp:228] Iteration 52200, loss = 0.000759602\nI0823 11:59:50.706709 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 11:59:50.706727 32502 solver.cpp:244]     Train net output #1: loss = 0.000757006 (* 1 = 0.000757006 loss)\nI0823 11:59:50.839901 32502 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0823 12:03:31.276574 32502 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0823 12:05:40.906994 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8759\nI0823 12:05:40.907447 32502 solver.cpp:404]     Test net output #1: loss = 0.488272 (* 1 = 0.488272 loss)\nI0823 12:05:42.999014 32502 solver.cpp:228] Iteration 52300, loss = 0.000519028\nI0823 12:05:42.999079 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:05:42.999096 32502 solver.cpp:244]     Train net output #1: loss = 0.000516431 (* 1 = 0.000516431 loss)\nI0823 12:05:43.132589 32502 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0823 12:09:23.653264 32502 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0823 12:11:33.266180 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8744\nI0823 12:11:33.266620 32502 solver.cpp:404]     Test net output #1: loss = 0.493661 (* 1 = 0.493661 loss)\nI0823 12:11:35.358472 32502 solver.cpp:228] Iteration 52400, loss = 0.000591858\nI0823 12:11:35.358537 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:11:35.358556 32502 solver.cpp:244]     Train net output #1: loss = 0.000589261 (* 1 = 0.000589261 loss)\nI0823 12:11:35.489503 32502 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0823 12:15:16.104305 32502 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0823 12:17:25.729074 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8734\nI0823 12:17:25.729499 32502 solver.cpp:404]     Test net output #1: loss = 0.49512 (* 1 = 0.49512 loss)\nI0823 12:17:27.821480 32502 solver.cpp:228] Iteration 52500, loss = 0.000502625\nI0823 12:17:27.821542 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:17:27.821560 32502 solver.cpp:244]     Train net output #1: loss = 0.000500028 (* 1 = 0.000500028 loss)\nI0823 12:17:27.954397 32502 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0823 12:21:08.572010 32502 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0823 12:23:18.192600 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8728\nI0823 12:23:18.193040 32502 solver.cpp:404]     Test net output #1: loss = 0.498346 (* 1 = 0.498346 loss)\nI0823 12:23:20.284260 32502 solver.cpp:228] Iteration 52600, loss = 0.000913238\nI0823 12:23:20.284323 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:23:20.284343 32502 solver.cpp:244]     Train net output #1: loss = 0.000910641 (* 1 = 0.000910641 loss)\nI0823 12:23:20.416733 32502 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0823 12:27:01.005952 32502 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0823 12:29:10.698602 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8725\nI0823 12:29:10.699038 32502 solver.cpp:404]     Test net output #1: loss = 0.499407 (* 1 = 0.499407 loss)\nI0823 12:29:12.790936 32502 solver.cpp:228] Iteration 52700, loss = 0.000884243\nI0823 12:29:12.791007 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:29:12.791023 32502 solver.cpp:244]     Train net output #1: loss = 0.000881646 (* 1 = 0.000881646 loss)\nI0823 12:29:12.920413 32502 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0823 12:32:53.611991 32502 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0823 12:35:03.285980 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8724\nI0823 12:35:03.286417 32502 solver.cpp:404]     Test net output #1: loss = 0.502765 (* 1 = 0.502765 loss)\nI0823 12:35:05.377694 32502 solver.cpp:228] Iteration 52800, loss = 0.000645201\nI0823 12:35:05.377756 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:35:05.377774 32502 solver.cpp:244]     Train net output #1: loss = 0.000642604 (* 1 = 0.000642604 loss)\nI0823 12:35:05.514366 32502 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0823 12:38:46.027779 32502 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0823 12:40:55.712308 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8717\nI0823 12:40:55.712723 32502 solver.cpp:404]     Test net output #1: loss = 0.505154 (* 1 = 0.505154 loss)\nI0823 12:40:57.804517 32502 solver.cpp:228] Iteration 52900, loss = 0.000586039\nI0823 12:40:57.804581 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:40:57.804599 32502 solver.cpp:244]     Train net output #1: loss = 0.000583442 (* 1 = 0.000583442 loss)\nI0823 12:40:57.934594 32502 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0823 12:44:38.450847 32502 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0823 12:46:48.137933 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8712\nI0823 12:46:48.138392 32502 solver.cpp:404]     Test net output #1: loss = 0.507762 (* 1 = 0.507762 loss)\nI0823 12:46:50.229756 32502 solver.cpp:228] Iteration 53000, loss = 0.000428992\nI0823 12:46:50.229820 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:46:50.229836 32502 solver.cpp:244]     Train net output #1: loss = 0.000426395 (* 1 = 0.000426395 loss)\nI0823 12:46:50.359688 32502 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0823 12:50:30.900497 32502 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0823 12:52:40.600232 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8713\nI0823 12:52:40.600677 32502 solver.cpp:404]     Test net output #1: loss = 0.507982 (* 1 = 0.507982 loss)\nI0823 12:52:42.692968 32502 solver.cpp:228] Iteration 53100, loss = 0.000709503\nI0823 12:52:42.693037 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:52:42.693053 32502 solver.cpp:244]     Train net output #1: loss = 0.000706906 (* 1 = 0.000706906 loss)\nI0823 12:52:42.824589 32502 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0823 12:56:23.352957 32502 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0823 12:58:33.068652 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8717\nI0823 12:58:33.069111 32502 solver.cpp:404]     Test net output #1: loss = 0.508236 (* 1 = 0.508236 loss)\nI0823 12:58:35.161016 32502 solver.cpp:228] Iteration 53200, loss = 0.000502864\nI0823 12:58:35.161082 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 12:58:35.161099 32502 solver.cpp:244]     Train net output #1: loss = 0.000500268 (* 1 = 0.000500268 loss)\nI0823 12:58:35.294018 32502 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0823 13:02:15.769418 32502 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0823 13:04:25.427328 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8715\nI0823 13:04:25.427783 32502 solver.cpp:404]     Test net output #1: loss = 0.507562 (* 1 = 0.507562 loss)\nI0823 13:04:27.520023 32502 solver.cpp:228] Iteration 53300, loss = 0.000371821\nI0823 13:04:27.520088 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:04:27.520107 32502 solver.cpp:244]     Train net output #1: loss = 0.000369225 (* 1 = 0.000369225 loss)\nI0823 13:04:27.648614 32502 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0823 13:08:08.160323 32502 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0823 13:10:17.797718 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8714\nI0823 13:10:17.798166 32502 solver.cpp:404]     Test net output #1: loss = 0.509486 (* 1 = 0.509486 loss)\nI0823 13:10:19.890187 32502 solver.cpp:228] Iteration 53400, loss = 0.000649272\nI0823 13:10:19.890250 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:10:19.890269 32502 solver.cpp:244]     Train net output #1: loss = 0.000646676 (* 1 = 0.000646676 loss)\nI0823 13:10:20.020792 32502 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0823 13:14:00.675175 32502 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0823 13:16:10.329493 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8707\nI0823 13:16:10.329952 32502 solver.cpp:404]     Test net output #1: loss = 0.510796 (* 1 = 0.510796 loss)\nI0823 13:16:12.421401 32502 solver.cpp:228] Iteration 53500, loss = 0.000446187\nI0823 13:16:12.421465 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:16:12.421484 32502 solver.cpp:244]     Train net output #1: loss = 0.000443591 (* 1 = 0.000443591 loss)\nI0823 13:16:12.552655 32502 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0823 13:19:53.065490 32502 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0823 13:22:02.712839 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8703\nI0823 13:22:02.713205 32502 solver.cpp:404]     Test net output #1: loss = 0.512627 (* 1 = 0.512627 loss)\nI0823 13:22:04.805567 32502 solver.cpp:228] Iteration 53600, loss = 0.00111788\nI0823 13:22:04.805629 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:22:04.805646 32502 solver.cpp:244]     Train net output #1: loss = 0.00111528 (* 1 = 0.00111528 loss)\nI0823 13:22:04.935931 32502 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0823 13:25:45.549793 32502 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0823 13:27:55.207378 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8705\nI0823 13:27:55.207804 32502 solver.cpp:404]     Test net output #1: loss = 0.511366 (* 1 = 0.511366 loss)\nI0823 13:27:57.313038 32502 solver.cpp:228] Iteration 53700, loss = 0.000610634\nI0823 13:27:57.313104 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:27:57.313123 32502 solver.cpp:244]     Train net output #1: loss = 0.000608038 (* 1 = 0.000608038 loss)\nI0823 13:27:57.431265 32502 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0823 13:31:37.990363 32502 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0823 13:33:47.636656 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8704\nI0823 13:33:47.637084 32502 solver.cpp:404]     Test net output #1: loss = 0.51132 (* 1 = 0.51132 loss)\nI0823 13:33:49.728935 32502 solver.cpp:228] Iteration 53800, loss = 0.000618411\nI0823 13:33:49.729004 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:33:49.729022 32502 solver.cpp:244]     Train net output #1: loss = 0.000615815 (* 1 = 0.000615815 loss)\nI0823 13:33:49.871708 32502 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0823 13:37:30.467857 32502 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0823 13:39:40.097622 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8701\nI0823 13:39:40.098062 32502 solver.cpp:404]     Test net output #1: loss = 0.512612 (* 1 = 0.512612 loss)\nI0823 13:39:42.189376 32502 solver.cpp:228] Iteration 53900, loss = 0.000765121\nI0823 13:39:42.189438 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:39:42.189456 32502 solver.cpp:244]     Train net output #1: loss = 0.000762524 (* 1 = 0.000762524 loss)\nI0823 13:39:42.320857 32502 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0823 13:43:22.778019 32502 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0823 13:45:32.414757 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8706\nI0823 13:45:32.415194 32502 solver.cpp:404]     Test net output #1: loss = 0.511362 (* 1 = 0.511362 loss)\nI0823 13:45:34.507697 32502 solver.cpp:228] Iteration 54000, loss = 0.000472545\nI0823 13:45:34.507762 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:45:34.507781 32502 solver.cpp:244]     Train net output #1: loss = 0.000469949 (* 1 = 0.000469949 loss)\nI0823 13:45:34.643733 32502 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0823 13:49:15.179397 32502 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0823 13:51:24.820734 32502 solver.cpp:404]     Test net output #0: accuracy = 0.87\nI0823 13:51:24.821172 32502 solver.cpp:404]     Test net output #1: loss = 0.513176 (* 1 = 0.513176 loss)\nI0823 13:51:26.913234 32502 solver.cpp:228] Iteration 54100, loss = 0.000821236\nI0823 13:51:26.913300 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:51:26.913317 32502 solver.cpp:244]     Train net output #1: loss = 0.00081864 (* 1 = 0.00081864 loss)\nI0823 13:51:27.043455 32502 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0823 13:55:07.547233 32502 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0823 13:57:17.186830 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8705\nI0823 13:57:17.187275 32502 solver.cpp:404]     Test net output #1: loss = 0.512291 (* 1 = 0.512291 loss)\nI0823 13:57:19.278980 32502 solver.cpp:228] Iteration 54200, loss = 0.000610701\nI0823 13:57:19.279043 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 13:57:19.279060 32502 solver.cpp:244]     Train net output #1: loss = 0.000608104 (* 1 = 0.000608104 loss)\nI0823 13:57:19.409593 32502 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0823 14:00:59.897446 32502 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0823 14:03:09.511554 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8711\nI0823 14:03:09.512008 32502 solver.cpp:404]     Test net output #1: loss = 0.511118 (* 1 = 0.511118 loss)\nI0823 14:03:11.604352 32502 solver.cpp:228] Iteration 54300, loss = 0.000584178\nI0823 14:03:11.604414 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:03:11.604432 32502 solver.cpp:244]     Train net output #1: loss = 0.000581581 (* 1 = 0.000581581 loss)\nI0823 14:03:11.740089 32502 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0823 14:06:52.250433 32502 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0823 14:09:01.905419 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8701\nI0823 14:09:01.905845 32502 solver.cpp:404]     Test net output #1: loss = 0.512583 (* 1 = 0.512583 loss)\nI0823 14:09:03.998229 32502 solver.cpp:228] Iteration 54400, loss = 0.000676001\nI0823 14:09:03.998292 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:09:03.998311 32502 solver.cpp:244]     Train net output #1: loss = 0.000673405 (* 1 = 0.000673405 loss)\nI0823 14:09:04.132004 32502 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0823 14:12:44.664503 32502 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0823 14:14:54.325093 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8701\nI0823 14:14:54.325518 32502 solver.cpp:404]     Test net output #1: loss = 0.513069 (* 1 = 0.513069 loss)\nI0823 14:14:56.417789 32502 solver.cpp:228] Iteration 54500, loss = 0.00057178\nI0823 14:14:56.417855 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:14:56.417871 32502 solver.cpp:244]     Train net output #1: loss = 0.000569183 (* 1 = 0.000569183 loss)\nI0823 14:14:56.538689 32502 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0823 14:18:35.912381 32502 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0823 14:20:45.744194 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8692\nI0823 14:20:45.744617 32502 solver.cpp:404]     Test net output #1: loss = 0.513164 (* 1 = 0.513164 loss)\nI0823 14:20:47.836081 32502 solver.cpp:228] Iteration 54600, loss = 0.000711467\nI0823 14:20:47.836148 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:20:47.836172 32502 solver.cpp:244]     Train net output #1: loss = 0.00070887 (* 1 = 0.00070887 loss)\nI0823 14:20:47.963201 32502 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0823 14:24:27.347892 32502 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0823 14:26:37.051704 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8697\nI0823 14:26:37.052139 32502 solver.cpp:404]     Test net output #1: loss = 0.511473 (* 1 = 0.511473 loss)\nI0823 14:26:39.144937 32502 solver.cpp:228] Iteration 54700, loss = 0.000621825\nI0823 14:26:39.145004 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:26:39.145030 32502 solver.cpp:244]     Train net output #1: loss = 0.000619228 (* 1 = 0.000619228 loss)\nI0823 14:26:39.265058 32502 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0823 14:30:18.620157 32502 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0823 14:32:28.283314 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8701\nI0823 14:32:28.283730 32502 solver.cpp:404]     Test net output #1: loss = 0.511045 (* 1 = 0.511045 loss)\nI0823 14:32:30.376652 32502 solver.cpp:228] Iteration 54800, loss = 0.00051422\nI0823 14:32:30.376718 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:32:30.376744 32502 solver.cpp:244]     Train net output #1: loss = 0.000511624 (* 1 = 0.000511624 loss)\nI0823 14:32:30.516950 32502 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0823 14:36:11.149832 32502 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0823 14:38:20.820803 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8697\nI0823 14:38:20.821276 32502 solver.cpp:404]     Test net output #1: loss = 0.510404 (* 1 = 0.510404 loss)\nI0823 14:38:22.913401 32502 solver.cpp:228] Iteration 54900, loss = 0.000487393\nI0823 14:38:22.913467 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:38:22.913493 32502 solver.cpp:244]     Train net output #1: loss = 0.000484797 (* 1 = 0.000484797 loss)\nI0823 14:38:23.047406 32502 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0823 14:42:03.732419 32502 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0823 14:44:13.460935 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8692\nI0823 14:44:13.461395 32502 solver.cpp:404]     Test net output #1: loss = 0.512526 (* 1 = 0.512526 loss)\nI0823 14:44:15.552598 32502 solver.cpp:228] Iteration 55000, loss = 0.0004654\nI0823 14:44:15.552664 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:44:15.552690 32502 solver.cpp:244]     Train net output #1: loss = 0.000462803 (* 1 = 0.000462803 loss)\nI0823 14:44:15.685804 32502 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0823 14:47:56.324509 32502 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0823 14:50:06.042999 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8691\nI0823 14:50:06.043448 32502 solver.cpp:404]     Test net output #1: loss = 0.513493 (* 1 = 0.513493 loss)\nI0823 14:50:08.136023 32502 solver.cpp:228] Iteration 55100, loss = 0.00077395\nI0823 14:50:08.136090 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:50:08.136116 32502 solver.cpp:244]     Train net output #1: loss = 0.000771353 (* 1 = 0.000771353 loss)\nI0823 14:50:08.273382 32502 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0823 14:53:49.270175 32502 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0823 14:55:58.988772 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8693\nI0823 14:55:58.989249 32502 solver.cpp:404]     Test net output #1: loss = 0.510658 (* 1 = 0.510658 loss)\nI0823 14:56:01.082619 32502 solver.cpp:228] Iteration 55200, loss = 0.000684772\nI0823 14:56:01.082689 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 14:56:01.082712 32502 solver.cpp:244]     Train net output #1: loss = 0.000682175 (* 1 = 0.000682175 loss)\nI0823 14:56:01.222717 32502 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0823 14:59:42.036134 32502 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0823 15:01:51.774731 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8695\nI0823 15:01:51.775190 32502 solver.cpp:404]     Test net output #1: loss = 0.507647 (* 1 = 0.507647 loss)\nI0823 15:01:53.867362 32502 solver.cpp:228] Iteration 55300, loss = 0.000762021\nI0823 15:01:53.867427 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:01:53.867452 32502 solver.cpp:244]     Train net output #1: loss = 0.000759424 (* 1 = 0.000759424 loss)\nI0823 15:01:53.998317 32502 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0823 15:05:34.759125 32502 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0823 15:07:44.524567 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8697\nI0823 15:07:44.525017 32502 solver.cpp:404]     Test net output #1: loss = 0.506852 (* 1 = 0.506852 loss)\nI0823 15:07:46.616957 32502 solver.cpp:228] Iteration 55400, loss = 0.000478577\nI0823 15:07:46.617027 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:07:46.617051 32502 solver.cpp:244]     Train net output #1: loss = 0.000475981 (* 1 = 0.000475981 loss)\nI0823 15:07:46.756716 32502 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0823 15:11:27.818295 32502 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0823 15:13:37.563462 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8695\nI0823 15:13:37.563925 32502 solver.cpp:404]     Test net output #1: loss = 0.508065 (* 1 = 0.508065 loss)\nI0823 15:13:39.656801 32502 solver.cpp:228] Iteration 55500, loss = 0.000656363\nI0823 15:13:39.656873 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:13:39.656900 32502 solver.cpp:244]     Train net output #1: loss = 0.000653767 (* 1 = 0.000653767 loss)\nI0823 15:13:39.796835 32502 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0823 15:17:20.741289 32502 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0823 15:19:30.436102 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8696\nI0823 15:19:30.436544 32502 solver.cpp:404]     Test net output #1: loss = 0.506841 (* 1 = 0.506841 loss)\nI0823 15:19:32.529734 32502 solver.cpp:228] Iteration 55600, loss = 0.000755522\nI0823 15:19:32.529804 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:19:32.529830 32502 solver.cpp:244]     Train net output #1: loss = 0.000752926 (* 1 = 0.000752926 loss)\nI0823 15:19:32.664276 32502 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0823 15:23:13.284072 32502 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0823 15:25:22.972095 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8701\nI0823 15:25:22.972522 32502 solver.cpp:404]     Test net output #1: loss = 0.506046 (* 1 = 0.506046 loss)\nI0823 15:25:25.065608 32502 solver.cpp:228] Iteration 55700, loss = 0.000688554\nI0823 15:25:25.065675 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:25:25.065701 32502 solver.cpp:244]     Train net output #1: loss = 0.000685958 (* 1 = 0.000685958 loss)\nI0823 15:25:25.197871 32502 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0823 15:29:06.030046 32502 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0823 15:31:15.725553 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8701\nI0823 15:31:15.726001 32502 solver.cpp:404]     Test net output #1: loss = 0.504852 (* 1 = 0.504852 loss)\nI0823 15:31:17.818933 32502 solver.cpp:228] Iteration 55800, loss = 0.000648712\nI0823 15:31:17.819001 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:31:17.819026 32502 solver.cpp:244]     Train net output #1: loss = 0.000646116 (* 1 = 0.000646116 loss)\nI0823 15:31:17.956594 32502 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0823 15:34:58.842442 32502 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0823 15:37:08.548781 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8703\nI0823 15:37:08.549217 32502 solver.cpp:404]     Test net output #1: loss = 0.50427 (* 1 = 0.50427 loss)\nI0823 15:37:10.641786 32502 solver.cpp:228] Iteration 55900, loss = 0.000563355\nI0823 15:37:10.641855 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:37:10.641880 32502 solver.cpp:244]     Train net output #1: loss = 0.000560759 (* 1 = 0.000560759 loss)\nI0823 15:37:10.774997 32502 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0823 15:40:51.530120 32502 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0823 15:43:01.230453 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8704\nI0823 15:43:01.230922 32502 solver.cpp:404]     Test net output #1: loss = 0.503625 (* 1 = 0.503625 loss)\nI0823 15:43:03.323457 32502 solver.cpp:228] Iteration 56000, loss = 0.000702578\nI0823 15:43:03.323521 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:43:03.323546 32502 solver.cpp:244]     Train net output #1: loss = 0.000699981 (* 1 = 0.000699981 loss)\nI0823 15:43:03.454874 32502 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0823 15:46:44.207983 32502 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0823 15:48:53.925331 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8706\nI0823 15:48:53.925762 32502 solver.cpp:404]     Test net output #1: loss = 0.503222 (* 1 = 0.503222 loss)\nI0823 15:48:56.017621 32502 solver.cpp:228] Iteration 56100, loss = 0.000755743\nI0823 15:48:56.017684 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:48:56.017702 32502 solver.cpp:244]     Train net output #1: loss = 0.000753147 (* 1 = 0.000753147 loss)\nI0823 15:48:56.152268 32502 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0823 15:52:37.107908 32502 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0823 15:54:46.811259 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8702\nI0823 15:54:46.811681 32502 solver.cpp:404]     Test net output #1: loss = 0.502469 (* 1 = 0.502469 loss)\nI0823 15:54:48.904405 32502 solver.cpp:228] Iteration 56200, loss = 0.000522117\nI0823 15:54:48.904469 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 15:54:48.904486 32502 solver.cpp:244]     Train net output #1: loss = 0.000519521 (* 1 = 0.000519521 loss)\nI0823 15:54:49.040349 32502 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0823 15:58:29.892150 32502 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0823 16:00:39.575050 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8711\nI0823 16:00:39.575466 32502 solver.cpp:404]     Test net output #1: loss = 0.50225 (* 1 = 0.50225 loss)\nI0823 16:00:41.668079 32502 solver.cpp:228] Iteration 56300, loss = 0.000380564\nI0823 16:00:41.668143 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:00:41.668162 32502 solver.cpp:244]     Train net output #1: loss = 0.000377968 (* 1 = 0.000377968 loss)\nI0823 16:00:41.795227 32502 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0823 16:04:22.572696 32502 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0823 16:06:32.252202 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8705\nI0823 16:06:32.252665 32502 solver.cpp:404]     Test net output #1: loss = 0.50167 (* 1 = 0.50167 loss)\nI0823 16:06:34.345901 32502 solver.cpp:228] Iteration 56400, loss = 0.000602231\nI0823 16:06:34.345965 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:06:34.345988 32502 solver.cpp:244]     Train net output #1: loss = 0.000599635 (* 1 = 0.000599635 loss)\nI0823 16:06:34.479105 32502 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0823 16:10:15.296519 32502 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0823 16:12:24.973232 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8704\nI0823 16:12:24.973701 32502 solver.cpp:404]     Test net output #1: loss = 0.501539 (* 1 = 0.501539 loss)\nI0823 16:12:27.067137 32502 solver.cpp:228] Iteration 56500, loss = 0.000508679\nI0823 16:12:27.067201 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:12:27.067219 32502 solver.cpp:244]     Train net output #1: loss = 0.000506083 (* 1 = 0.000506083 loss)\nI0823 16:12:27.197546 32502 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0823 16:16:07.834560 32502 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0823 16:18:17.507905 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8697\nI0823 16:18:17.508359 32502 solver.cpp:404]     Test net output #1: loss = 0.503513 (* 1 = 0.503513 loss)\nI0823 16:18:19.600633 32502 solver.cpp:228] Iteration 56600, loss = 0.00091513\nI0823 16:18:19.600697 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:18:19.600714 32502 solver.cpp:244]     Train net output #1: loss = 0.000912533 (* 1 = 0.000912533 loss)\nI0823 16:18:19.730700 32502 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0823 16:22:00.409478 32502 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0823 16:24:10.076750 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8694\nI0823 16:24:10.077193 32502 solver.cpp:404]     Test net output #1: loss = 0.502292 (* 1 = 0.502292 loss)\nI0823 16:24:12.168891 32502 solver.cpp:228] Iteration 56700, loss = 0.000462894\nI0823 16:24:12.168953 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:24:12.168975 32502 solver.cpp:244]     Train net output #1: loss = 0.000460298 (* 1 = 0.000460298 loss)\nI0823 16:24:12.302904 32502 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0823 16:27:52.888353 32502 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0823 16:30:02.566329 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8708\nI0823 16:30:02.566737 32502 solver.cpp:404]     Test net output #1: loss = 0.499553 (* 1 = 0.499553 loss)\nI0823 16:30:04.658629 32502 solver.cpp:228] Iteration 56800, loss = 0.000593633\nI0823 16:30:04.658691 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:30:04.658710 32502 solver.cpp:244]     Train net output #1: loss = 0.000591037 (* 1 = 0.000591037 loss)\nI0823 16:30:04.792630 32502 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0823 16:33:45.611600 32502 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0823 16:35:55.288761 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8696\nI0823 16:35:55.289197 32502 solver.cpp:404]     Test net output #1: loss = 0.500779 (* 1 = 0.500779 loss)\nI0823 16:35:57.381355 32502 solver.cpp:228] Iteration 56900, loss = 0.000519015\nI0823 16:35:57.381417 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:35:57.381435 32502 solver.cpp:244]     Train net output #1: loss = 0.000516419 (* 1 = 0.000516419 loss)\nI0823 16:35:57.518685 32502 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0823 16:39:38.283423 32502 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0823 16:41:47.925107 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8702\nI0823 16:41:47.925529 32502 solver.cpp:404]     Test net output #1: loss = 0.500632 (* 1 = 0.500632 loss)\nI0823 16:41:50.017508 32502 solver.cpp:228] Iteration 57000, loss = 0.000496647\nI0823 16:41:50.017570 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:41:50.017590 32502 solver.cpp:244]     Train net output #1: loss = 0.000494051 (* 1 = 0.000494051 loss)\nI0823 16:41:50.156311 32502 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0823 16:45:30.798125 32502 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0823 16:47:40.427094 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8702\nI0823 16:47:40.427523 32502 solver.cpp:404]     Test net output #1: loss = 0.500252 (* 1 = 0.500252 loss)\nI0823 16:47:42.519330 32502 solver.cpp:228] Iteration 57100, loss = 0.000813667\nI0823 16:47:42.519395 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:47:42.519414 32502 solver.cpp:244]     Train net output #1: loss = 0.000811071 (* 1 = 0.000811071 loss)\nI0823 16:47:42.654135 32502 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0823 16:51:23.458060 32502 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0823 16:53:33.080749 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8711\nI0823 16:53:33.081173 32502 solver.cpp:404]     Test net output #1: loss = 0.497711 (* 1 = 0.497711 loss)\nI0823 16:53:35.173496 32502 solver.cpp:228] Iteration 57200, loss = 0.0006958\nI0823 16:53:35.173557 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:53:35.173575 32502 solver.cpp:244]     Train net output #1: loss = 0.000693204 (* 1 = 0.000693204 loss)\nI0823 16:53:35.311249 32502 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0823 16:57:16.183590 32502 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0823 16:59:25.866483 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8716\nI0823 16:59:25.866933 32502 solver.cpp:404]     Test net output #1: loss = 0.495723 (* 1 = 0.495723 loss)\nI0823 16:59:27.958907 32502 solver.cpp:228] Iteration 57300, loss = 0.000267843\nI0823 16:59:27.958976 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 16:59:27.958994 32502 solver.cpp:244]     Train net output #1: loss = 0.000265247 (* 1 = 0.000265247 loss)\nI0823 16:59:28.091691 32502 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0823 17:03:08.961913 32502 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0823 17:05:18.668777 32502 solver.cpp:404]     Test net output #0: accuracy = 0.871\nI0823 17:05:18.669245 32502 solver.cpp:404]     Test net output #1: loss = 0.498524 (* 1 = 0.498524 loss)\nI0823 17:05:20.761533 32502 solver.cpp:228] Iteration 57400, loss = 0.000628677\nI0823 17:05:20.761598 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:05:20.761616 32502 solver.cpp:244]     Train net output #1: loss = 0.000626081 (* 1 = 0.000626081 loss)\nI0823 17:05:20.896230 32502 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0823 17:09:01.600772 32502 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0823 17:11:11.302269 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8705\nI0823 17:11:11.302700 32502 solver.cpp:404]     Test net output #1: loss = 0.498779 (* 1 = 0.498779 loss)\nI0823 17:11:13.394811 32502 solver.cpp:228] Iteration 57500, loss = 0.000466555\nI0823 17:11:13.394876 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:11:13.394894 32502 solver.cpp:244]     Train net output #1: loss = 0.000463959 (* 1 = 0.000463959 loss)\nI0823 17:11:13.532076 32502 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0823 17:14:54.403748 32502 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0823 17:17:04.105774 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8708\nI0823 17:17:04.106243 32502 solver.cpp:404]     Test net output #1: loss = 0.499199 (* 1 = 0.499199 loss)\nI0823 17:17:06.198278 32502 solver.cpp:228] Iteration 57600, loss = 0.000945475\nI0823 17:17:06.198344 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:17:06.198362 32502 solver.cpp:244]     Train net output #1: loss = 0.000942879 (* 1 = 0.000942879 loss)\nI0823 17:17:06.332263 32502 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0823 17:20:47.162003 32502 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0823 17:22:56.866410 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8712\nI0823 17:22:56.866807 32502 solver.cpp:404]     Test net output #1: loss = 0.499015 (* 1 = 0.499015 loss)\nI0823 17:22:58.958328 32502 solver.cpp:228] Iteration 57700, loss = 0.000580448\nI0823 17:22:58.958391 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:22:58.958408 32502 solver.cpp:244]     Train net output #1: loss = 0.000577852 (* 1 = 0.000577852 loss)\nI0823 17:22:59.090392 32502 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0823 17:26:39.837260 32502 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0823 17:28:49.562432 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8714\nI0823 17:28:49.562893 32502 solver.cpp:404]     Test net output #1: loss = 0.496644 (* 1 = 0.496644 loss)\nI0823 17:28:51.654775 32502 solver.cpp:228] Iteration 57800, loss = 0.00049228\nI0823 17:28:51.654840 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:28:51.654858 32502 solver.cpp:244]     Train net output #1: loss = 0.000489684 (* 1 = 0.000489684 loss)\nI0823 17:28:51.786669 32502 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0823 17:32:32.528928 32502 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0823 17:34:42.177625 32502 solver.cpp:404]     Test net output #0: accuracy = 0.872\nI0823 17:34:42.178073 32502 solver.cpp:404]     Test net output #1: loss = 0.498135 (* 1 = 0.498135 loss)\nI0823 17:34:44.269641 32502 solver.cpp:228] Iteration 57900, loss = 0.000473856\nI0823 17:34:44.269704 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:34:44.269722 32502 solver.cpp:244]     Train net output #1: loss = 0.00047126 (* 1 = 0.00047126 loss)\nI0823 17:34:44.407672 32502 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0823 17:38:25.178289 32502 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0823 17:40:34.837214 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8717\nI0823 17:40:34.837669 32502 solver.cpp:404]     Test net output #1: loss = 0.496979 (* 1 = 0.496979 loss)\nI0823 17:40:36.929800 32502 solver.cpp:228] Iteration 58000, loss = 0.000580617\nI0823 17:40:36.929864 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:40:36.929883 32502 solver.cpp:244]     Train net output #1: loss = 0.000578021 (* 1 = 0.000578021 loss)\nI0823 17:40:37.067477 32502 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0823 17:44:17.797297 32502 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0823 17:46:27.431565 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8716\nI0823 17:46:27.432006 32502 solver.cpp:404]     Test net output #1: loss = 0.495907 (* 1 = 0.495907 loss)\nI0823 17:46:29.522356 32502 solver.cpp:228] Iteration 58100, loss = 0.000847053\nI0823 17:46:29.522418 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:46:29.522436 32502 solver.cpp:244]     Train net output #1: loss = 0.000844458 (* 1 = 0.000844458 loss)\nI0823 17:46:29.661056 32502 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0823 17:50:10.330586 32502 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0823 17:52:20.124644 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8723\nI0823 17:52:20.125061 32502 solver.cpp:404]     Test net output #1: loss = 0.494808 (* 1 = 0.494808 loss)\nI0823 17:52:22.217964 32502 solver.cpp:228] Iteration 58200, loss = 0.000489898\nI0823 17:52:22.218029 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:52:22.218047 32502 solver.cpp:244]     Train net output #1: loss = 0.000487302 (* 1 = 0.000487302 loss)\nI0823 17:52:22.356843 32502 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0823 17:56:03.214395 32502 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0823 17:58:12.860533 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8723\nI0823 17:58:12.860986 32502 solver.cpp:404]     Test net output #1: loss = 0.495113 (* 1 = 0.495113 loss)\nI0823 17:58:14.953829 32502 solver.cpp:228] Iteration 58300, loss = 0.000483649\nI0823 17:58:14.953898 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 17:58:14.953917 32502 solver.cpp:244]     Train net output #1: loss = 0.000481053 (* 1 = 0.000481053 loss)\nI0823 17:58:15.091974 32502 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0823 18:01:55.728394 32502 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0823 18:04:05.351109 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8722\nI0823 18:04:05.351547 32502 solver.cpp:404]     Test net output #1: loss = 0.494401 (* 1 = 0.494401 loss)\nI0823 18:04:07.443238 32502 solver.cpp:228] Iteration 58400, loss = 0.000537855\nI0823 18:04:07.443302 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:04:07.443321 32502 solver.cpp:244]     Train net output #1: loss = 0.000535259 (* 1 = 0.000535259 loss)\nI0823 18:04:07.577584 32502 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0823 18:07:48.086946 32502 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0823 18:09:57.715952 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8726\nI0823 18:09:57.716374 32502 solver.cpp:404]     Test net output #1: loss = 0.492414 (* 1 = 0.492414 loss)\nI0823 18:09:59.808279 32502 solver.cpp:228] Iteration 58500, loss = 0.000613092\nI0823 18:09:59.808346 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:09:59.808363 32502 solver.cpp:244]     Train net output #1: loss = 0.000610497 (* 1 = 0.000610497 loss)\nI0823 18:09:59.944028 32502 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0823 18:13:40.556574 32502 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0823 18:15:50.201146 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8723\nI0823 18:15:50.201575 32502 solver.cpp:404]     Test net output #1: loss = 0.49449 (* 1 = 0.49449 loss)\nI0823 18:15:52.294566 32502 solver.cpp:228] Iteration 58600, loss = 0.000769267\nI0823 18:15:52.294631 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:15:52.294649 32502 solver.cpp:244]     Train net output #1: loss = 0.000766671 (* 1 = 0.000766671 loss)\nI0823 18:15:52.431443 32502 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0823 18:19:32.949198 32502 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0823 18:21:42.609910 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8724\nI0823 18:21:42.610348 32502 solver.cpp:404]     Test net output #1: loss = 0.493432 (* 1 = 0.493432 loss)\nI0823 18:21:44.703142 32502 solver.cpp:228] Iteration 58700, loss = 0.000504373\nI0823 18:21:44.703207 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:21:44.703225 32502 solver.cpp:244]     Train net output #1: loss = 0.000501777 (* 1 = 0.000501777 loss)\nI0823 18:21:44.840626 32502 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0823 18:25:25.485471 32502 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0823 18:27:35.137940 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8731\nI0823 18:27:35.138375 32502 solver.cpp:404]     Test net output #1: loss = 0.490476 (* 1 = 0.490476 loss)\nI0823 18:27:37.229787 32502 solver.cpp:228] Iteration 58800, loss = 0.00042902\nI0823 18:27:37.229853 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:27:37.229871 32502 solver.cpp:244]     Train net output #1: loss = 0.000426424 (* 1 = 0.000426424 loss)\nI0823 18:27:37.367393 32502 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0823 18:31:18.109429 32502 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0823 18:33:27.750883 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8727\nI0823 18:33:27.751327 32502 solver.cpp:404]     Test net output #1: loss = 0.492469 (* 1 = 0.492469 loss)\nI0823 18:33:29.843323 32502 solver.cpp:228] Iteration 58900, loss = 0.000511896\nI0823 18:33:29.843389 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:33:29.843407 32502 solver.cpp:244]     Train net output #1: loss = 0.0005093 (* 1 = 0.0005093 loss)\nI0823 18:33:29.983176 32502 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0823 18:37:11.461401 32502 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0823 18:39:21.132375 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8729\nI0823 18:39:21.132834 32502 solver.cpp:404]     Test net output #1: loss = 0.492368 (* 1 = 0.492368 loss)\nI0823 18:39:23.225317 32502 solver.cpp:228] Iteration 59000, loss = 0.000609875\nI0823 18:39:23.225381 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:39:23.225399 32502 solver.cpp:244]     Train net output #1: loss = 0.000607279 (* 1 = 0.000607279 loss)\nI0823 18:39:23.361937 32502 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0823 18:43:04.620244 32502 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0823 18:45:14.331670 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8726\nI0823 18:45:14.332123 32502 solver.cpp:404]     Test net output #1: loss = 0.49409 (* 1 = 0.49409 loss)\nI0823 18:45:16.424908 32502 solver.cpp:228] Iteration 59100, loss = 0.000674763\nI0823 18:45:16.424974 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:45:16.424993 32502 solver.cpp:244]     Train net output #1: loss = 0.000672167 (* 1 = 0.000672167 loss)\nI0823 18:45:16.559962 32502 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0823 18:48:57.757437 32502 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0823 18:51:07.424217 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8731\nI0823 18:51:07.424639 32502 solver.cpp:404]     Test net output #1: loss = 0.491185 (* 1 = 0.491185 loss)\nI0823 18:51:09.518036 32502 solver.cpp:228] Iteration 59200, loss = 0.000510461\nI0823 18:51:09.518102 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:51:09.518121 32502 solver.cpp:244]     Train net output #1: loss = 0.000507865 (* 1 = 0.000507865 loss)\nI0823 18:51:09.659091 32502 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0823 18:54:50.913193 32502 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0823 18:57:00.582222 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8728\nI0823 18:57:00.582662 32502 solver.cpp:404]     Test net output #1: loss = 0.492243 (* 1 = 0.492243 loss)\nI0823 18:57:02.674125 32502 solver.cpp:228] Iteration 59300, loss = 0.000320386\nI0823 18:57:02.674190 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 18:57:02.674206 32502 solver.cpp:244]     Train net output #1: loss = 0.00031779 (* 1 = 0.00031779 loss)\nI0823 18:57:02.810784 32502 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0823 19:00:44.105960 32502 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0823 19:02:53.752203 32502 solver.cpp:404]     Test net output #0: accuracy = 0.873\nI0823 19:02:53.752661 32502 solver.cpp:404]     Test net output #1: loss = 0.494002 (* 1 = 0.494002 loss)\nI0823 19:02:55.845120 32502 solver.cpp:228] Iteration 59400, loss = 0.000615031\nI0823 19:02:55.845185 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:02:55.845203 32502 solver.cpp:244]     Train net output #1: loss = 0.000612435 (* 1 = 0.000612435 loss)\nI0823 19:02:55.982985 32502 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0823 19:06:37.283282 32502 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0823 19:08:46.945631 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8728\nI0823 19:08:46.946069 32502 solver.cpp:404]     Test net output #1: loss = 0.493246 (* 1 = 0.493246 loss)\nI0823 19:08:49.039098 32502 solver.cpp:228] Iteration 59500, loss = 0.000716448\nI0823 19:08:49.039165 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:08:49.039183 32502 solver.cpp:244]     Train net output #1: loss = 0.000713852 (* 1 = 0.000713852 loss)\nI0823 19:08:49.176365 32502 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0823 19:12:30.463084 32502 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0823 19:14:40.140456 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8725\nI0823 19:14:40.140923 32502 solver.cpp:404]     Test net output #1: loss = 0.493189 (* 1 = 0.493189 loss)\nI0823 19:14:42.234000 32502 solver.cpp:228] Iteration 59600, loss = 0.000658976\nI0823 19:14:42.234066 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:14:42.234083 32502 solver.cpp:244]     Train net output #1: loss = 0.00065638 (* 1 = 0.00065638 loss)\nI0823 19:14:42.372813 32502 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0823 19:18:23.811417 32502 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0823 19:20:33.535699 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8729\nI0823 19:20:33.536160 32502 solver.cpp:404]     Test net output #1: loss = 0.492694 (* 1 = 0.492694 loss)\nI0823 19:20:35.628006 32502 solver.cpp:228] Iteration 59700, loss = 0.000462182\nI0823 19:20:35.628072 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:20:35.628089 32502 solver.cpp:244]     Train net output #1: loss = 0.000459586 (* 1 = 0.000459586 loss)\nI0823 19:20:35.764361 32502 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0823 19:24:17.020303 32502 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0823 19:26:26.731134 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8732\nI0823 19:26:26.731571 32502 solver.cpp:404]     Test net output #1: loss = 0.491987 (* 1 = 0.491987 loss)\nI0823 19:26:28.825152 32502 solver.cpp:228] Iteration 59800, loss = 0.000441938\nI0823 19:26:28.825217 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:26:28.825234 32502 solver.cpp:244]     Train net output #1: loss = 0.000439342 (* 1 = 0.000439342 loss)\nI0823 19:26:28.962602 32502 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0823 19:30:10.379741 32502 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0823 19:32:20.027878 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8732\nI0823 19:32:20.028306 32502 solver.cpp:404]     Test net output #1: loss = 0.490853 (* 1 = 0.490853 loss)\nI0823 19:32:22.120699 32502 solver.cpp:228] Iteration 59900, loss = 0.000449767\nI0823 19:32:22.120764 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:32:22.120782 32502 solver.cpp:244]     Train net output #1: loss = 0.000447172 (* 1 = 0.000447172 loss)\nI0823 19:32:22.260664 32502 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0823 19:36:03.387058 32502 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0823 19:38:13.109877 32502 solver.cpp:404]     Test net output #0: accuracy = 0.873\nI0823 19:38:13.110321 32502 solver.cpp:404]     Test net output #1: loss = 0.49152 (* 1 = 0.49152 loss)\nI0823 19:38:15.202973 32502 solver.cpp:228] Iteration 60000, loss = 0.000422625\nI0823 19:38:15.203040 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:38:15.203066 32502 solver.cpp:244]     Train net output #1: loss = 0.00042003 (* 1 = 0.00042003 loss)\nI0823 19:38:15.344405 32502 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0823 19:41:56.708904 32502 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0823 19:44:06.436852 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8732\nI0823 19:44:06.437319 32502 solver.cpp:404]     Test net output #1: loss = 0.492488 (* 1 = 0.492488 loss)\nI0823 19:44:08.529578 32502 solver.cpp:228] Iteration 60100, loss = 0.000592678\nI0823 19:44:08.529647 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:44:08.529673 32502 solver.cpp:244]     Train net output #1: loss = 0.000590082 (* 1 = 0.000590082 loss)\nI0823 19:44:08.670907 32502 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0823 19:47:50.105607 32502 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0823 19:49:59.841733 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8738\nI0823 19:49:59.842207 32502 solver.cpp:404]     Test net output #1: loss = 0.4892 (* 1 = 0.4892 loss)\nI0823 19:50:01.936245 32502 solver.cpp:228] Iteration 60200, loss = 0.000549248\nI0823 19:50:01.936305 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:50:01.936331 32502 solver.cpp:244]     Train net output #1: loss = 0.000546652 (* 1 = 0.000546652 loss)\nI0823 19:50:02.077276 32502 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0823 19:53:42.804236 32502 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0823 19:55:52.524313 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8738\nI0823 19:55:52.524757 32502 solver.cpp:404]     Test net output #1: loss = 0.489559 (* 1 = 0.489559 loss)\nI0823 19:55:54.617393 32502 solver.cpp:228] Iteration 60300, loss = 0.000577931\nI0823 19:55:54.617461 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 19:55:54.617486 32502 solver.cpp:244]     Train net output #1: loss = 0.000575335 (* 1 = 0.000575335 loss)\nI0823 19:55:54.754333 32502 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0823 19:59:35.384138 32502 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0823 20:01:45.078249 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8738\nI0823 20:01:45.078685 32502 solver.cpp:404]     Test net output #1: loss = 0.489544 (* 1 = 0.489544 loss)\nI0823 20:01:47.170457 32502 solver.cpp:228] Iteration 60400, loss = 0.000519185\nI0823 20:01:47.170526 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:01:47.170552 32502 solver.cpp:244]     Train net output #1: loss = 0.000516589 (* 1 = 0.000516589 loss)\nI0823 20:01:47.304901 32502 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0823 20:05:27.827757 32502 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0823 20:07:37.493571 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8737\nI0823 20:07:37.494031 32502 solver.cpp:404]     Test net output #1: loss = 0.489267 (* 1 = 0.489267 loss)\nI0823 20:07:39.586122 32502 solver.cpp:228] Iteration 60500, loss = 0.000657947\nI0823 20:07:39.586186 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:07:39.586205 32502 solver.cpp:244]     Train net output #1: loss = 0.000655351 (* 1 = 0.000655351 loss)\nI0823 20:07:39.722929 32502 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0823 20:11:20.598757 32502 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0823 20:13:30.266945 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8738\nI0823 20:13:30.267376 32502 solver.cpp:404]     Test net output #1: loss = 0.491377 (* 1 = 0.491377 loss)\nI0823 20:13:32.358252 32502 solver.cpp:228] Iteration 60600, loss = 0.000787714\nI0823 20:13:32.358319 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:13:32.358336 32502 solver.cpp:244]     Train net output #1: loss = 0.000785118 (* 1 = 0.000785118 loss)\nI0823 20:13:32.489871 32502 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0823 20:17:13.185290 32502 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0823 20:19:22.856732 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8737\nI0823 20:19:22.857173 32502 solver.cpp:404]     Test net output #1: loss = 0.489949 (* 1 = 0.489949 loss)\nI0823 20:19:24.949086 32502 solver.cpp:228] Iteration 60700, loss = 0.000445504\nI0823 20:19:24.949151 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:19:24.949168 32502 solver.cpp:244]     Train net output #1: loss = 0.000442908 (* 1 = 0.000442908 loss)\nI0823 20:19:25.086474 32502 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0823 20:23:05.827337 32502 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0823 20:25:15.469982 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8738\nI0823 20:25:15.470425 32502 solver.cpp:404]     Test net output #1: loss = 0.490521 (* 1 = 0.490521 loss)\nI0823 20:25:17.562268 32502 solver.cpp:228] Iteration 60800, loss = 0.000430537\nI0823 20:25:17.562333 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:25:17.562351 32502 solver.cpp:244]     Train net output #1: loss = 0.000427941 (* 1 = 0.000427941 loss)\nI0823 20:25:17.694175 32502 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0823 20:28:58.350014 32502 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0823 20:31:08.000177 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8735\nI0823 20:31:08.000609 32502 solver.cpp:404]     Test net output #1: loss = 0.491234 (* 1 = 0.491234 loss)\nI0823 20:31:10.091958 32502 solver.cpp:228] Iteration 60900, loss = 0.000573626\nI0823 20:31:10.092025 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:31:10.092042 32502 solver.cpp:244]     Train net output #1: loss = 0.00057103 (* 1 = 0.00057103 loss)\nI0823 20:31:10.222585 32502 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0823 20:34:51.054008 32502 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0823 20:37:00.723413 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8737\nI0823 20:37:00.723882 32502 solver.cpp:404]     Test net output #1: loss = 0.490149 (* 1 = 0.490149 loss)\nI0823 20:37:02.816817 32502 solver.cpp:228] Iteration 61000, loss = 0.000563109\nI0823 20:37:02.816882 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:37:02.816907 32502 solver.cpp:244]     Train net output #1: loss = 0.000560514 (* 1 = 0.000560514 loss)\nI0823 20:37:02.951894 32502 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0823 20:40:43.647907 32502 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0823 20:42:53.280921 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8734\nI0823 20:42:53.281371 32502 solver.cpp:404]     Test net output #1: loss = 0.49043 (* 1 = 0.49043 loss)\nI0823 20:42:55.373172 32502 solver.cpp:228] Iteration 61100, loss = 0.000649413\nI0823 20:42:55.373235 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:42:55.373253 32502 solver.cpp:244]     Train net output #1: loss = 0.000646817 (* 1 = 0.000646817 loss)\nI0823 20:42:55.506191 32502 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0823 20:46:36.330487 32502 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0823 20:48:45.982877 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8743\nI0823 20:48:45.983340 32502 solver.cpp:404]     Test net output #1: loss = 0.488197 (* 1 = 0.488197 loss)\nI0823 20:48:48.075964 32502 solver.cpp:228] Iteration 61200, loss = 0.000446919\nI0823 20:48:48.076030 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:48:48.076047 32502 solver.cpp:244]     Train net output #1: loss = 0.000444323 (* 1 = 0.000444323 loss)\nI0823 20:48:48.209977 32502 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0823 20:52:29.081331 32502 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0823 20:54:38.729725 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8744\nI0823 20:54:38.730165 32502 solver.cpp:404]     Test net output #1: loss = 0.488169 (* 1 = 0.488169 loss)\nI0823 20:54:40.822372 32502 solver.cpp:228] Iteration 61300, loss = 0.000395481\nI0823 20:54:40.822437 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 20:54:40.822454 32502 solver.cpp:244]     Train net output #1: loss = 0.000392885 (* 1 = 0.000392885 loss)\nI0823 20:54:40.951993 32502 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0823 20:58:21.716043 32502 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0823 21:00:31.361353 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0823 21:00:31.361793 32502 solver.cpp:404]     Test net output #1: loss = 0.486905 (* 1 = 0.486905 loss)\nI0823 21:00:33.454075 32502 solver.cpp:228] Iteration 61400, loss = 0.000421913\nI0823 21:00:33.454140 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:00:33.454159 32502 solver.cpp:244]     Train net output #1: loss = 0.000419317 (* 1 = 0.000419317 loss)\nI0823 21:00:33.591476 32502 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0823 21:04:14.307950 32502 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0823 21:06:23.952042 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8749\nI0823 21:06:23.952476 32502 solver.cpp:404]     Test net output #1: loss = 0.487345 (* 1 = 0.487345 loss)\nI0823 21:06:26.045323 32502 solver.cpp:228] Iteration 61500, loss = 0.000510285\nI0823 21:06:26.045389 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:06:26.045408 32502 solver.cpp:244]     Train net output #1: loss = 0.000507689 (* 1 = 0.000507689 loss)\nI0823 21:06:26.178040 32502 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0823 21:10:06.967034 32502 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0823 21:12:16.632802 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0823 21:12:16.633242 32502 solver.cpp:404]     Test net output #1: loss = 0.486918 (* 1 = 0.486918 loss)\nI0823 21:12:18.725848 32502 solver.cpp:228] Iteration 61600, loss = 0.000638378\nI0823 21:12:18.725915 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:12:18.725940 32502 solver.cpp:244]     Train net output #1: loss = 0.000635783 (* 1 = 0.000635783 loss)\nI0823 21:12:18.858395 32502 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0823 21:15:59.557260 32502 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0823 21:18:09.236405 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0823 21:18:09.236879 32502 solver.cpp:404]     Test net output #1: loss = 0.486334 (* 1 = 0.486334 loss)\nI0823 21:18:11.330478 32502 solver.cpp:228] Iteration 61700, loss = 0.000510789\nI0823 21:18:11.330545 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:18:11.330570 32502 solver.cpp:244]     Train net output #1: loss = 0.000508194 (* 1 = 0.000508194 loss)\nI0823 21:18:11.460274 32502 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0823 21:21:52.133513 32502 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0823 21:24:01.794553 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8752\nI0823 21:24:01.795037 32502 solver.cpp:404]     Test net output #1: loss = 0.485953 (* 1 = 0.485953 loss)\nI0823 21:24:03.887686 32502 solver.cpp:228] Iteration 61800, loss = 0.000448524\nI0823 21:24:03.887756 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:24:03.887781 32502 solver.cpp:244]     Train net output #1: loss = 0.000445928 (* 1 = 0.000445928 loss)\nI0823 21:24:04.025897 32502 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0823 21:27:44.546057 32502 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0823 21:29:54.211431 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8756\nI0823 21:29:54.211894 32502 solver.cpp:404]     Test net output #1: loss = 0.486135 (* 1 = 0.486135 loss)\nI0823 21:29:56.304476 32502 solver.cpp:228] Iteration 61900, loss = 0.000669746\nI0823 21:29:56.304544 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:29:56.304570 32502 solver.cpp:244]     Train net output #1: loss = 0.00066715 (* 1 = 0.00066715 loss)\nI0823 21:29:56.440829 32502 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0823 21:33:37.145335 32502 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0823 21:35:46.916471 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0823 21:35:46.916920 32502 solver.cpp:404]     Test net output #1: loss = 0.487172 (* 1 = 0.487172 loss)\nI0823 21:35:49.009129 32502 solver.cpp:228] Iteration 62000, loss = 0.000551042\nI0823 21:35:49.009198 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:35:49.009223 32502 solver.cpp:244]     Train net output #1: loss = 0.000548447 (* 1 = 0.000548447 loss)\nI0823 21:35:49.146415 32502 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0823 21:39:29.905920 32502 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0823 21:41:39.793164 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8749\nI0823 21:41:39.793622 32502 solver.cpp:404]     Test net output #1: loss = 0.488484 (* 1 = 0.488484 loss)\nI0823 21:41:41.886495 32502 solver.cpp:228] Iteration 62100, loss = 0.000783221\nI0823 21:41:41.886564 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:41:41.886590 32502 solver.cpp:244]     Train net output #1: loss = 0.000780625 (* 1 = 0.000780625 loss)\nI0823 21:41:42.022940 32502 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0823 21:45:22.904045 32502 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0823 21:47:32.790591 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8747\nI0823 21:47:32.791049 32502 solver.cpp:404]     Test net output #1: loss = 0.488002 (* 1 = 0.488002 loss)\nI0823 21:47:34.880877 32502 solver.cpp:228] Iteration 62200, loss = 0.000542549\nI0823 21:47:34.880944 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:47:34.880967 32502 solver.cpp:244]     Train net output #1: loss = 0.000539953 (* 1 = 0.000539953 loss)\nI0823 21:47:35.022686 32502 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0823 21:51:15.826702 32502 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0823 21:53:25.519877 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0823 21:53:25.520328 32502 solver.cpp:404]     Test net output #1: loss = 0.485136 (* 1 = 0.485136 loss)\nI0823 21:53:27.609144 32502 solver.cpp:228] Iteration 62300, loss = 0.000412963\nI0823 21:53:27.609210 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:53:27.609226 32502 solver.cpp:244]     Train net output #1: loss = 0.000410367 (* 1 = 0.000410367 loss)\nI0823 21:53:27.750108 32502 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0823 21:57:08.538524 32502 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0823 21:59:18.240483 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8742\nI0823 21:59:18.240958 32502 solver.cpp:404]     Test net output #1: loss = 0.487362 (* 1 = 0.487362 loss)\nI0823 21:59:20.329470 32502 solver.cpp:228] Iteration 62400, loss = 0.000570822\nI0823 21:59:20.329535 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 21:59:20.329552 32502 solver.cpp:244]     Train net output #1: loss = 0.000568227 (* 1 = 0.000568227 loss)\nI0823 21:59:20.469274 32502 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0823 22:03:01.339236 32502 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0823 22:05:11.059613 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8749\nI0823 22:05:11.060060 32502 solver.cpp:404]     Test net output #1: loss = 0.486328 (* 1 = 0.486328 loss)\nI0823 22:05:13.147738 32502 solver.cpp:228] Iteration 62500, loss = 0.000473689\nI0823 22:05:13.147800 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:05:13.147817 32502 solver.cpp:244]     Train net output #1: loss = 0.000471093 (* 1 = 0.000471093 loss)\nI0823 22:05:13.284621 32502 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0823 22:08:54.158994 32502 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0823 22:11:03.827617 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8755\nI0823 22:11:03.828073 32502 solver.cpp:404]     Test net output #1: loss = 0.488584 (* 1 = 0.488584 loss)\nI0823 22:11:05.915853 32502 solver.cpp:228] Iteration 62600, loss = 0.00068364\nI0823 22:11:05.915920 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:11:05.915937 32502 solver.cpp:244]     Train net output #1: loss = 0.000681044 (* 1 = 0.000681044 loss)\nI0823 22:11:06.059286 32502 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0823 22:14:46.860942 32502 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0823 22:16:56.535183 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0823 22:16:56.535630 32502 solver.cpp:404]     Test net output #1: loss = 0.487524 (* 1 = 0.487524 loss)\nI0823 22:16:58.623162 32502 solver.cpp:228] Iteration 62700, loss = 0.000559965\nI0823 22:16:58.623227 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:16:58.623245 32502 solver.cpp:244]     Train net output #1: loss = 0.000557369 (* 1 = 0.000557369 loss)\nI0823 22:16:58.764230 32502 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0823 22:20:39.461433 32502 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0823 22:22:49.121029 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0823 22:22:49.121435 32502 solver.cpp:404]     Test net output #1: loss = 0.48731 (* 1 = 0.48731 loss)\nI0823 22:22:51.208668 32502 solver.cpp:228] Iteration 62800, loss = 0.000394426\nI0823 22:22:51.208730 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:22:51.208747 32502 solver.cpp:244]     Train net output #1: loss = 0.000391831 (* 1 = 0.000391831 loss)\nI0823 22:22:51.352910 32502 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0823 22:26:32.312954 32502 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0823 22:28:41.985175 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8746\nI0823 22:28:41.985613 32502 solver.cpp:404]     Test net output #1: loss = 0.488576 (* 1 = 0.488576 loss)\nI0823 22:28:44.073601 32502 solver.cpp:228] Iteration 62900, loss = 0.00043945\nI0823 22:28:44.073663 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:28:44.073683 32502 solver.cpp:244]     Train net output #1: loss = 0.000436855 (* 1 = 0.000436855 loss)\nI0823 22:28:44.214776 32502 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0823 22:32:24.911422 32502 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0823 22:34:34.583609 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8746\nI0823 22:34:34.584059 32502 solver.cpp:404]     Test net output #1: loss = 0.487393 (* 1 = 0.487393 loss)\nI0823 22:34:36.672400 32502 solver.cpp:228] Iteration 63000, loss = 0.000488624\nI0823 22:34:36.672463 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:34:36.672482 32502 solver.cpp:244]     Train net output #1: loss = 0.000486028 (* 1 = 0.000486028 loss)\nI0823 22:34:36.807265 32502 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0823 22:38:17.400993 32502 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0823 22:40:27.071341 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8743\nI0823 22:40:27.071797 32502 solver.cpp:404]     Test net output #1: loss = 0.488812 (* 1 = 0.488812 loss)\nI0823 22:40:29.160147 32502 solver.cpp:228] Iteration 63100, loss = 0.000596777\nI0823 22:40:29.160212 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:40:29.160229 32502 solver.cpp:244]     Train net output #1: loss = 0.000594182 (* 1 = 0.000594182 loss)\nI0823 22:40:29.296732 32502 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0823 22:44:09.876051 32502 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0823 22:46:19.523912 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8745\nI0823 22:46:19.524349 32502 solver.cpp:404]     Test net output #1: loss = 0.488639 (* 1 = 0.488639 loss)\nI0823 22:46:21.611593 32502 solver.cpp:228] Iteration 63200, loss = 0.000503532\nI0823 22:46:21.611657 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:46:21.611676 32502 solver.cpp:244]     Train net output #1: loss = 0.000500936 (* 1 = 0.000500936 loss)\nI0823 22:46:21.750255 32502 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0823 22:50:02.463328 32502 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0823 22:52:12.108381 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8752\nI0823 22:52:12.108815 32502 solver.cpp:404]     Test net output #1: loss = 0.486053 (* 1 = 0.486053 loss)\nI0823 22:52:14.197058 32502 solver.cpp:228] Iteration 63300, loss = 0.000367538\nI0823 22:52:14.197121 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:52:14.197139 32502 solver.cpp:244]     Train net output #1: loss = 0.000364942 (* 1 = 0.000364942 loss)\nI0823 22:52:14.334691 32502 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0823 22:55:55.171522 32502 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0823 22:58:04.820389 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0823 22:58:04.820824 32502 solver.cpp:404]     Test net output #1: loss = 0.487266 (* 1 = 0.487266 loss)\nI0823 22:58:06.908359 32502 solver.cpp:228] Iteration 63400, loss = 0.000532031\nI0823 22:58:06.908423 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 22:58:06.908442 32502 solver.cpp:244]     Train net output #1: loss = 0.000529435 (* 1 = 0.000529435 loss)\nI0823 22:58:07.044932 32502 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0823 23:01:47.812345 32502 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0823 23:03:57.458807 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8747\nI0823 23:03:57.459275 32502 solver.cpp:404]     Test net output #1: loss = 0.486148 (* 1 = 0.486148 loss)\nI0823 23:03:59.548584 32502 solver.cpp:228] Iteration 63500, loss = 0.00065845\nI0823 23:03:59.548650 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:03:59.548668 32502 solver.cpp:244]     Train net output #1: loss = 0.000655855 (* 1 = 0.000655855 loss)\nI0823 23:03:59.685669 32502 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0823 23:07:40.359412 32502 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0823 23:09:50.005596 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8744\nI0823 23:09:50.006043 32502 solver.cpp:404]     Test net output #1: loss = 0.486982 (* 1 = 0.486982 loss)\nI0823 23:09:52.095257 32502 solver.cpp:228] Iteration 63600, loss = 0.000593531\nI0823 23:09:52.095322 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:09:52.095340 32502 solver.cpp:244]     Train net output #1: loss = 0.000590935 (* 1 = 0.000590935 loss)\nI0823 23:09:52.231986 32502 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0823 23:13:33.110445 32502 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0823 23:15:42.762310 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8745\nI0823 23:15:42.762742 32502 solver.cpp:404]     Test net output #1: loss = 0.486295 (* 1 = 0.486295 loss)\nI0823 23:15:44.851228 32502 solver.cpp:228] Iteration 63700, loss = 0.000489507\nI0823 23:15:44.851291 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:15:44.851308 32502 solver.cpp:244]     Train net output #1: loss = 0.000486911 (* 1 = 0.000486911 loss)\nI0823 23:15:44.986502 32502 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0823 23:19:25.889478 32502 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0823 23:21:35.519191 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0823 23:21:35.519646 32502 solver.cpp:404]     Test net output #1: loss = 0.48507 (* 1 = 0.48507 loss)\nI0823 23:21:37.608324 32502 solver.cpp:228] Iteration 63800, loss = 0.000426521\nI0823 23:21:37.608388 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:21:37.608407 32502 solver.cpp:244]     Train net output #1: loss = 0.000423925 (* 1 = 0.000423925 loss)\nI0823 23:21:37.750203 32502 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0823 23:25:18.381884 32502 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0823 23:27:28.013979 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8754\nI0823 23:27:28.014444 32502 solver.cpp:404]     Test net output #1: loss = 0.484394 (* 1 = 0.484394 loss)\nI0823 23:27:30.102535 32502 solver.cpp:228] Iteration 63900, loss = 0.00061885\nI0823 23:27:30.102598 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:27:30.102617 32502 solver.cpp:244]     Train net output #1: loss = 0.000616254 (* 1 = 0.000616254 loss)\nI0823 23:27:30.240417 32502 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0823 23:31:11.152401 32502 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0823 23:33:20.784855 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0823 23:33:20.785331 32502 solver.cpp:404]     Test net output #1: loss = 0.484346 (* 1 = 0.484346 loss)\nI0823 23:33:22.874001 32502 solver.cpp:228] Iteration 64000, loss = 0.000537456\nI0823 23:33:22.874065 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:33:22.874083 32502 solver.cpp:244]     Train net output #1: loss = 0.00053486 (* 1 = 0.00053486 loss)\nI0823 23:33:23.011063 32502 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0823 23:37:03.709437 32502 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0823 23:39:13.347818 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8741\nI0823 23:39:13.348299 32502 solver.cpp:404]     Test net output #1: loss = 0.487366 (* 1 = 0.487366 loss)\nI0823 23:39:15.436947 32502 solver.cpp:228] Iteration 64100, loss = 0.000724538\nI0823 23:39:15.437016 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:39:15.437032 32502 solver.cpp:244]     Train net output #1: loss = 0.000721942 (* 1 = 0.000721942 loss)\nI0823 23:39:15.577714 32502 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0823 23:42:56.428736 32502 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0823 23:45:06.066505 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8749\nI0823 23:45:06.066969 32502 solver.cpp:404]     Test net output #1: loss = 0.484777 (* 1 = 0.484777 loss)\nI0823 23:45:08.154850 32502 solver.cpp:228] Iteration 64200, loss = 0.000528625\nI0823 23:45:08.154916 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:45:08.154934 32502 solver.cpp:244]     Train net output #1: loss = 0.00052603 (* 1 = 0.00052603 loss)\nI0823 23:45:08.296221 32502 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0823 23:48:49.094213 32502 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0823 23:50:58.786667 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0823 23:50:58.787122 32502 solver.cpp:404]     Test net output #1: loss = 0.482634 (* 1 = 0.482634 loss)\nI0823 23:51:00.874680 32502 solver.cpp:228] Iteration 64300, loss = 0.000402418\nI0823 23:51:00.874743 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:51:00.874763 32502 solver.cpp:244]     Train net output #1: loss = 0.000399822 (* 1 = 0.000399822 loss)\nI0823 23:51:01.016333 32502 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0823 23:54:41.783782 32502 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0823 23:56:51.470543 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8749\nI0823 23:56:51.471004 32502 solver.cpp:404]     Test net output #1: loss = 0.48302 (* 1 = 0.48302 loss)\nI0823 23:56:53.558434 32502 solver.cpp:228] Iteration 64400, loss = 0.000531664\nI0823 23:56:53.558497 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 23:56:53.558516 32502 solver.cpp:244]     Train net output #1: loss = 0.000529069 (* 1 = 0.000529069 loss)\nI0823 23:56:53.700326 32502 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0824 00:00:34.454361 32502 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0824 00:02:44.136564 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8746\nI0824 00:02:44.137027 32502 solver.cpp:404]     Test net output #1: loss = 0.483384 (* 1 = 0.483384 loss)\nI0824 00:02:46.224884 32502 solver.cpp:228] Iteration 64500, loss = 0.000679838\nI0824 00:02:46.224947 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:02:46.224966 32502 solver.cpp:244]     Train net output #1: loss = 0.000677242 (* 1 = 0.000677242 loss)\nI0824 00:02:46.371296 32502 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0824 00:06:27.151595 32502 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0824 00:08:36.840469 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0824 00:08:36.840931 32502 solver.cpp:404]     Test net output #1: loss = 0.485 (* 1 = 0.485 loss)\nI0824 00:08:38.928112 32502 solver.cpp:228] Iteration 64600, loss = 0.000665844\nI0824 00:08:38.928174 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:08:38.928194 32502 solver.cpp:244]     Train net output #1: loss = 0.000663249 (* 1 = 0.000663249 loss)\nI0824 00:08:39.070622 32502 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0824 00:12:19.790415 32502 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0824 00:14:29.484277 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8753\nI0824 00:14:29.484732 32502 solver.cpp:404]     Test net output #1: loss = 0.483009 (* 1 = 0.483009 loss)\nI0824 00:14:31.572111 32502 solver.cpp:228] Iteration 64700, loss = 0.000393572\nI0824 00:14:31.572175 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:14:31.572192 32502 solver.cpp:244]     Train net output #1: loss = 0.000390976 (* 1 = 0.000390976 loss)\nI0824 00:14:31.708444 32502 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0824 00:18:12.557128 32502 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0824 00:20:22.234755 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8755\nI0824 00:20:22.235194 32502 solver.cpp:404]     Test net output #1: loss = 0.484016 (* 1 = 0.484016 loss)\nI0824 00:20:24.322274 32502 solver.cpp:228] Iteration 64800, loss = 0.000305819\nI0824 00:20:24.322338 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:20:24.322355 32502 solver.cpp:244]     Train net output #1: loss = 0.000303223 (* 1 = 0.000303223 loss)\nI0824 00:20:24.465590 32502 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0824 00:24:05.017735 32502 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0824 00:26:14.673380 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8752\nI0824 00:26:14.673832 32502 solver.cpp:404]     Test net output #1: loss = 0.485889 (* 1 = 0.485889 loss)\nI0824 00:26:16.761514 32502 solver.cpp:228] Iteration 64900, loss = 0.000458548\nI0824 00:26:16.761577 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:26:16.761595 32502 solver.cpp:244]     Train net output #1: loss = 0.000455952 (* 1 = 0.000455952 loss)\nI0824 00:26:16.900369 32502 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0824 00:29:57.662721 32502 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0824 00:32:07.306875 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8748\nI0824 00:32:07.307313 32502 solver.cpp:404]     Test net output #1: loss = 0.483495 (* 1 = 0.483495 loss)\nI0824 00:32:09.394748 32502 solver.cpp:228] Iteration 65000, loss = 0.000560723\nI0824 00:32:09.394812 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:32:09.394830 32502 solver.cpp:244]     Train net output #1: loss = 0.000558127 (* 1 = 0.000558127 loss)\nI0824 00:32:09.533462 32502 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0824 00:35:50.206167 32502 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0824 00:37:59.872995 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0824 00:37:59.873441 32502 solver.cpp:404]     Test net output #1: loss = 0.484454 (* 1 = 0.484454 loss)\nI0824 00:38:01.961452 32502 solver.cpp:228] Iteration 65100, loss = 0.00061787\nI0824 00:38:01.961515 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:38:01.961534 32502 solver.cpp:244]     Train net output #1: loss = 0.000615275 (* 1 = 0.000615275 loss)\nI0824 00:38:02.102953 32502 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0824 00:41:42.823386 32502 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0824 00:43:51.711849 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8747\nI0824 00:43:51.712268 32502 solver.cpp:404]     Test net output #1: loss = 0.484845 (* 1 = 0.484845 loss)\nI0824 00:43:53.799940 32502 solver.cpp:228] Iteration 65200, loss = 0.000436117\nI0824 00:43:53.800009 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:43:53.800029 32502 solver.cpp:244]     Train net output #1: loss = 0.000433521 (* 1 = 0.000433521 loss)\nI0824 00:43:53.941610 32502 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0824 00:47:34.715698 32502 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0824 00:49:43.113395 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0824 00:49:43.113790 32502 solver.cpp:404]     Test net output #1: loss = 0.485004 (* 1 = 0.485004 loss)\nI0824 00:49:45.201776 32502 solver.cpp:228] Iteration 65300, loss = 0.000343649\nI0824 00:49:45.201839 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:49:45.201858 32502 solver.cpp:244]     Train net output #1: loss = 0.000341054 (* 1 = 0.000341054 loss)\nI0824 00:49:45.343757 32502 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0824 00:53:25.949553 32502 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0824 00:55:35.637589 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0824 00:55:35.638064 32502 solver.cpp:404]     Test net output #1: loss = 0.485031 (* 1 = 0.485031 loss)\nI0824 00:55:37.726166 32502 solver.cpp:228] Iteration 65400, loss = 0.00053212\nI0824 00:55:37.726236 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 00:55:37.726261 32502 solver.cpp:244]     Train net output #1: loss = 0.000529524 (* 1 = 0.000529524 loss)\nI0824 00:55:37.866837 32502 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0824 00:59:18.702410 32502 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0824 01:01:28.340345 32502 solver.cpp:404]     Test net output #0: accuracy = 0.874501\nI0824 01:01:28.340752 32502 solver.cpp:404]     Test net output #1: loss = 0.484849 (* 1 = 0.484849 loss)\nI0824 01:01:30.428908 32502 solver.cpp:228] Iteration 65500, loss = 0.000590348\nI0824 01:01:30.428977 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:01:30.429003 32502 solver.cpp:244]     Train net output #1: loss = 0.000587753 (* 1 = 0.000587753 loss)\nI0824 01:01:30.576453 32502 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0824 01:05:11.069053 32502 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0824 01:07:20.620407 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8749\nI0824 01:07:20.620853 32502 solver.cpp:404]     Test net output #1: loss = 0.487666 (* 1 = 0.487666 loss)\nI0824 01:07:22.708565 32502 solver.cpp:228] Iteration 65600, loss = 0.000661502\nI0824 01:07:22.708631 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:07:22.708655 32502 solver.cpp:244]     Train net output #1: loss = 0.000658907 (* 1 = 0.000658907 loss)\nI0824 01:07:22.852309 32502 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0824 01:11:03.569666 32502 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0824 01:13:13.378171 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8743\nI0824 01:13:13.378613 32502 solver.cpp:404]     Test net output #1: loss = 0.486281 (* 1 = 0.486281 loss)\nI0824 01:13:15.466320 32502 solver.cpp:228] Iteration 65700, loss = 0.00040351\nI0824 01:13:15.466385 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:13:15.466409 32502 solver.cpp:244]     Train net output #1: loss = 0.000400915 (* 1 = 0.000400915 loss)\nI0824 01:13:15.602526 32502 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0824 01:16:56.402369 32502 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0824 01:19:06.120458 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8749\nI0824 01:19:06.120930 32502 solver.cpp:404]     Test net output #1: loss = 0.484198 (* 1 = 0.484198 loss)\nI0824 01:19:08.208780 32502 solver.cpp:228] Iteration 65800, loss = 0.000301103\nI0824 01:19:08.208847 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:19:08.208873 32502 solver.cpp:244]     Train net output #1: loss = 0.000298508 (* 1 = 0.000298508 loss)\nI0824 01:19:08.355880 32502 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0824 01:22:49.343425 32502 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0824 01:24:59.082571 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8753\nI0824 01:24:59.083030 32502 solver.cpp:404]     Test net output #1: loss = 0.484078 (* 1 = 0.484078 loss)\nI0824 01:25:01.171272 32502 solver.cpp:228] Iteration 65900, loss = 0.000433985\nI0824 01:25:01.171339 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:25:01.171366 32502 solver.cpp:244]     Train net output #1: loss = 0.00043139 (* 1 = 0.00043139 loss)\nI0824 01:25:01.311540 32502 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0824 01:28:42.611596 32502 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0824 01:30:52.340133 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8758\nI0824 01:30:52.340574 32502 solver.cpp:404]     Test net output #1: loss = 0.481746 (* 1 = 0.481746 loss)\nI0824 01:30:54.428014 32502 solver.cpp:228] Iteration 66000, loss = 0.000508696\nI0824 01:30:54.428081 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:30:54.428105 32502 solver.cpp:244]     Train net output #1: loss = 0.000506101 (* 1 = 0.000506101 loss)\nI0824 01:30:54.570266 32502 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0824 01:34:35.824599 32502 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0824 01:36:45.548972 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8753\nI0824 01:36:45.549425 32502 solver.cpp:404]     Test net output #1: loss = 0.484588 (* 1 = 0.484588 loss)\nI0824 01:36:47.637331 32502 solver.cpp:228] Iteration 66100, loss = 0.000631735\nI0824 01:36:47.637398 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:36:47.637423 32502 solver.cpp:244]     Train net output #1: loss = 0.00062914 (* 1 = 0.00062914 loss)\nI0824 01:36:47.778905 32502 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0824 01:40:29.135576 32502 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0824 01:42:38.883462 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8748\nI0824 01:42:38.883918 32502 solver.cpp:404]     Test net output #1: loss = 0.484789 (* 1 = 0.484789 loss)\nI0824 01:42:40.971791 32502 solver.cpp:228] Iteration 66200, loss = 0.000390048\nI0824 01:42:40.971858 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:42:40.971884 32502 solver.cpp:244]     Train net output #1: loss = 0.000387453 (* 1 = 0.000387453 loss)\nI0824 01:42:41.115660 32502 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0824 01:46:22.481880 32502 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0824 01:48:32.273483 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0824 01:48:32.273964 32502 solver.cpp:404]     Test net output #1: loss = 0.483707 (* 1 = 0.483707 loss)\nI0824 01:48:34.362313 32502 solver.cpp:228] Iteration 66300, loss = 0.00038014\nI0824 01:48:34.362380 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:48:34.362404 32502 solver.cpp:244]     Train net output #1: loss = 0.000377545 (* 1 = 0.000377545 loss)\nI0824 01:48:34.510022 32502 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0824 01:52:15.926431 32502 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0824 01:54:25.699677 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8757\nI0824 01:54:25.700130 32502 solver.cpp:404]     Test net output #1: loss = 0.483506 (* 1 = 0.483506 loss)\nI0824 01:54:27.789106 32502 solver.cpp:228] Iteration 66400, loss = 0.0005154\nI0824 01:54:27.789170 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 01:54:27.789188 32502 solver.cpp:244]     Train net output #1: loss = 0.000512804 (* 1 = 0.000512804 loss)\nI0824 01:54:27.939739 32502 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0824 01:58:09.315366 32502 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0824 02:00:19.128974 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8756\nI0824 02:00:19.129420 32502 solver.cpp:404]     Test net output #1: loss = 0.481974 (* 1 = 0.481974 loss)\nI0824 02:00:21.217453 32502 solver.cpp:228] Iteration 66500, loss = 0.000521413\nI0824 02:00:21.217517 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:00:21.217536 32502 solver.cpp:244]     Train net output #1: loss = 0.000518817 (* 1 = 0.000518817 loss)\nI0824 02:00:21.365712 32502 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0824 02:04:02.565302 32502 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0824 02:06:12.448330 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8752\nI0824 02:06:12.448806 32502 solver.cpp:404]     Test net output #1: loss = 0.48392 (* 1 = 0.48392 loss)\nI0824 02:06:14.536537 32502 solver.cpp:228] Iteration 66600, loss = 0.000550578\nI0824 02:06:14.536602 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:06:14.536622 32502 solver.cpp:244]     Train net output #1: loss = 0.000547982 (* 1 = 0.000547982 loss)\nI0824 02:06:14.678719 32502 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0824 02:09:56.155288 32502 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0824 02:12:06.025662 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8749\nI0824 02:12:06.026152 32502 solver.cpp:404]     Test net output #1: loss = 0.483466 (* 1 = 0.483466 loss)\nI0824 02:12:08.113582 32502 solver.cpp:228] Iteration 66700, loss = 0.000414353\nI0824 02:12:08.113647 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:12:08.113667 32502 solver.cpp:244]     Train net output #1: loss = 0.000411757 (* 1 = 0.000411757 loss)\nI0824 02:12:08.253828 32502 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0824 02:15:49.577839 32502 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0824 02:17:59.303012 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0824 02:17:59.303484 32502 solver.cpp:404]     Test net output #1: loss = 0.483102 (* 1 = 0.483102 loss)\nI0824 02:18:01.390972 32502 solver.cpp:228] Iteration 66800, loss = 0.000362668\nI0824 02:18:01.391036 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:18:01.391054 32502 solver.cpp:244]     Train net output #1: loss = 0.000360072 (* 1 = 0.000360072 loss)\nI0824 02:18:01.536418 32502 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0824 02:21:42.824322 32502 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0824 02:23:52.527418 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8754\nI0824 02:23:52.527881 32502 solver.cpp:404]     Test net output #1: loss = 0.48468 (* 1 = 0.48468 loss)\nI0824 02:23:54.616617 32502 solver.cpp:228] Iteration 66900, loss = 0.000528892\nI0824 02:23:54.616683 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:23:54.616701 32502 solver.cpp:244]     Train net output #1: loss = 0.000526296 (* 1 = 0.000526296 loss)\nI0824 02:23:54.766835 32502 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0824 02:27:36.206727 32502 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0824 02:29:45.902775 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8755\nI0824 02:29:45.903241 32502 solver.cpp:404]     Test net output #1: loss = 0.482375 (* 1 = 0.482375 loss)\nI0824 02:29:47.991868 32502 solver.cpp:228] Iteration 67000, loss = 0.000484258\nI0824 02:29:47.991937 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:29:47.991955 32502 solver.cpp:244]     Train net output #1: loss = 0.000481662 (* 1 = 0.000481662 loss)\nI0824 02:29:48.139374 32502 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0824 02:33:29.415112 32502 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0824 02:35:39.117287 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8748\nI0824 02:35:39.117738 32502 solver.cpp:404]     Test net output #1: loss = 0.485707 (* 1 = 0.485707 loss)\nI0824 02:35:41.206058 32502 solver.cpp:228] Iteration 67100, loss = 0.000575101\nI0824 02:35:41.206122 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:35:41.206141 32502 solver.cpp:244]     Train net output #1: loss = 0.000572506 (* 1 = 0.000572506 loss)\nI0824 02:35:41.352241 32502 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0824 02:39:22.656667 32502 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0824 02:41:32.317757 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8743\nI0824 02:41:32.318224 32502 solver.cpp:404]     Test net output #1: loss = 0.484793 (* 1 = 0.484793 loss)\nI0824 02:41:34.406431 32502 solver.cpp:228] Iteration 67200, loss = 0.000374029\nI0824 02:41:34.406496 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:41:34.406512 32502 solver.cpp:244]     Train net output #1: loss = 0.000371434 (* 1 = 0.000371434 loss)\nI0824 02:41:34.552201 32502 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0824 02:45:15.899108 32502 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0824 02:47:25.555467 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0824 02:47:25.555944 32502 solver.cpp:404]     Test net output #1: loss = 0.483611 (* 1 = 0.483611 loss)\nI0824 02:47:27.643748 32502 solver.cpp:228] Iteration 67300, loss = 0.00030007\nI0824 02:47:27.643812 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:47:27.643831 32502 solver.cpp:244]     Train net output #1: loss = 0.000297475 (* 1 = 0.000297475 loss)\nI0824 02:47:27.788537 32502 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0824 02:51:09.219883 32502 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0824 02:53:18.871592 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8754\nI0824 02:53:18.872032 32502 solver.cpp:404]     Test net output #1: loss = 0.484272 (* 1 = 0.484272 loss)\nI0824 02:53:20.960448 32502 solver.cpp:228] Iteration 67400, loss = 0.000435323\nI0824 02:53:20.960511 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:53:20.960530 32502 solver.cpp:244]     Train net output #1: loss = 0.000432727 (* 1 = 0.000432727 loss)\nI0824 02:53:21.104961 32502 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0824 02:57:02.319700 32502 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0824 02:59:11.973814 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8747\nI0824 02:59:11.974264 32502 solver.cpp:404]     Test net output #1: loss = 0.485167 (* 1 = 0.485167 loss)\nI0824 02:59:14.063000 32502 solver.cpp:228] Iteration 67500, loss = 0.000486307\nI0824 02:59:14.063064 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 02:59:14.063083 32502 solver.cpp:244]     Train net output #1: loss = 0.000483711 (* 1 = 0.000483711 loss)\nI0824 02:59:14.202601 32502 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0824 03:02:55.411058 32502 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0824 03:05:05.050040 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8747\nI0824 03:05:05.050487 32502 solver.cpp:404]     Test net output #1: loss = 0.486209 (* 1 = 0.486209 loss)\nI0824 03:05:07.138600 32502 solver.cpp:228] Iteration 67600, loss = 0.00056497\nI0824 03:05:07.138664 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:05:07.138681 32502 solver.cpp:244]     Train net output #1: loss = 0.000562374 (* 1 = 0.000562374 loss)\nI0824 03:05:07.288935 32502 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0824 03:08:48.686799 32502 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0824 03:10:58.325016 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8746\nI0824 03:10:58.325444 32502 solver.cpp:404]     Test net output #1: loss = 0.48405 (* 1 = 0.48405 loss)\nI0824 03:11:00.414021 32502 solver.cpp:228] Iteration 67700, loss = 0.000461984\nI0824 03:11:00.414083 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:11:00.414103 32502 solver.cpp:244]     Train net output #1: loss = 0.000459388 (* 1 = 0.000459388 loss)\nI0824 03:11:00.562350 32502 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0824 03:14:41.842890 32502 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0824 03:16:51.457985 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8748\nI0824 03:16:51.458453 32502 solver.cpp:404]     Test net output #1: loss = 0.481618 (* 1 = 0.481618 loss)\nI0824 03:16:53.547793 32502 solver.cpp:228] Iteration 67800, loss = 0.00034434\nI0824 03:16:53.547859 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:16:53.547876 32502 solver.cpp:244]     Train net output #1: loss = 0.000341745 (* 1 = 0.000341745 loss)\nI0824 03:16:53.690812 32502 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0824 03:20:35.008021 32502 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0824 03:22:44.618101 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8746\nI0824 03:22:44.618567 32502 solver.cpp:404]     Test net output #1: loss = 0.486144 (* 1 = 0.486144 loss)\nI0824 03:22:46.707176 32502 solver.cpp:228] Iteration 67900, loss = 0.000444434\nI0824 03:22:46.707242 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:22:46.707260 32502 solver.cpp:244]     Train net output #1: loss = 0.000441839 (* 1 = 0.000441839 loss)\nI0824 03:22:46.855212 32502 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0824 03:26:28.233469 32502 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0824 03:28:37.870008 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0824 03:28:37.870477 32502 solver.cpp:404]     Test net output #1: loss = 0.484854 (* 1 = 0.484854 loss)\nI0824 03:28:39.958849 32502 solver.cpp:228] Iteration 68000, loss = 0.000443326\nI0824 03:28:39.958914 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:28:39.958935 32502 solver.cpp:244]     Train net output #1: loss = 0.000440731 (* 1 = 0.000440731 loss)\nI0824 03:28:40.104502 32502 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0824 03:32:21.501559 32502 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0824 03:34:31.137538 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8743\nI0824 03:34:31.138016 32502 solver.cpp:404]     Test net output #1: loss = 0.487357 (* 1 = 0.487357 loss)\nI0824 03:34:33.226385 32502 solver.cpp:228] Iteration 68100, loss = 0.000507726\nI0824 03:34:33.226454 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:34:33.226470 32502 solver.cpp:244]     Train net output #1: loss = 0.00050513 (* 1 = 0.00050513 loss)\nI0824 03:34:33.367199 32502 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0824 03:38:14.847640 32502 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0824 03:40:24.495812 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8745\nI0824 03:40:24.496253 32502 solver.cpp:404]     Test net output #1: loss = 0.485288 (* 1 = 0.485288 loss)\nI0824 03:40:26.584489 32502 solver.cpp:228] Iteration 68200, loss = 0.000464072\nI0824 03:40:26.584555 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:40:26.584573 32502 solver.cpp:244]     Train net output #1: loss = 0.000461477 (* 1 = 0.000461477 loss)\nI0824 03:40:26.726900 32502 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0824 03:44:07.871274 32502 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0824 03:46:17.517349 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8754\nI0824 03:46:17.517820 32502 solver.cpp:404]     Test net output #1: loss = 0.483532 (* 1 = 0.483532 loss)\nI0824 03:46:19.605845 32502 solver.cpp:228] Iteration 68300, loss = 0.000280095\nI0824 03:46:19.605909 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:46:19.605927 32502 solver.cpp:244]     Train net output #1: loss = 0.0002775 (* 1 = 0.0002775 loss)\nI0824 03:46:19.740411 32502 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0824 03:50:00.259816 32502 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0824 03:52:09.890926 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8755\nI0824 03:52:09.891366 32502 solver.cpp:404]     Test net output #1: loss = 0.482606 (* 1 = 0.482606 loss)\nI0824 03:52:11.979322 32502 solver.cpp:228] Iteration 68400, loss = 0.000499386\nI0824 03:52:11.979389 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:52:11.979408 32502 solver.cpp:244]     Train net output #1: loss = 0.00049679 (* 1 = 0.00049679 loss)\nI0824 03:52:12.116386 32502 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0824 03:55:52.608824 32502 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0824 03:58:02.260711 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8759\nI0824 03:58:02.261183 32502 solver.cpp:404]     Test net output #1: loss = 0.48158 (* 1 = 0.48158 loss)\nI0824 03:58:04.348958 32502 solver.cpp:228] Iteration 68500, loss = 0.000345742\nI0824 03:58:04.349021 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 03:58:04.349038 32502 solver.cpp:244]     Train net output #1: loss = 0.000343146 (* 1 = 0.000343146 loss)\nI0824 03:58:04.484622 32502 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0824 04:01:44.946480 32502 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0824 04:03:54.574060 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8759\nI0824 04:03:54.574525 32502 solver.cpp:404]     Test net output #1: loss = 0.484248 (* 1 = 0.484248 loss)\nI0824 04:03:56.662365 32502 solver.cpp:228] Iteration 68600, loss = 0.000487441\nI0824 04:03:56.662427 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:03:56.662446 32502 solver.cpp:244]     Train net output #1: loss = 0.000484845 (* 1 = 0.000484845 loss)\nI0824 04:03:56.801975 32502 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0824 04:07:37.291565 32502 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0824 04:09:46.935297 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8755\nI0824 04:09:46.935767 32502 solver.cpp:404]     Test net output #1: loss = 0.484236 (* 1 = 0.484236 loss)\nI0824 04:09:49.023200 32502 solver.cpp:228] Iteration 68700, loss = 0.00031155\nI0824 04:09:49.023263 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:09:49.023282 32502 solver.cpp:244]     Train net output #1: loss = 0.000308954 (* 1 = 0.000308954 loss)\nI0824 04:09:49.155645 32502 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0824 04:13:29.462767 32502 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0824 04:15:39.098685 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8755\nI0824 04:15:39.099160 32502 solver.cpp:404]     Test net output #1: loss = 0.485173 (* 1 = 0.485173 loss)\nI0824 04:15:41.186480 32502 solver.cpp:228] Iteration 68800, loss = 0.000280802\nI0824 04:15:41.186542 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:15:41.186559 32502 solver.cpp:244]     Train net output #1: loss = 0.000278206 (* 1 = 0.000278206 loss)\nI0824 04:15:41.311007 32502 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0824 04:19:20.724171 32502 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0824 04:21:30.382855 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8761\nI0824 04:21:30.383324 32502 solver.cpp:404]     Test net output #1: loss = 0.484556 (* 1 = 0.484556 loss)\nI0824 04:21:32.470508 32502 solver.cpp:228] Iteration 68900, loss = 0.000528642\nI0824 04:21:32.470573 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:21:32.470592 32502 solver.cpp:244]     Train net output #1: loss = 0.000526046 (* 1 = 0.000526046 loss)\nI0824 04:21:32.602124 32502 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0824 04:25:12.021543 32502 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0824 04:27:21.719926 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8757\nI0824 04:27:21.720408 32502 solver.cpp:404]     Test net output #1: loss = 0.484775 (* 1 = 0.484775 loss)\nI0824 04:27:23.808300 32502 solver.cpp:228] Iteration 69000, loss = 0.000445458\nI0824 04:27:23.808362 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:27:23.808379 32502 solver.cpp:244]     Train net output #1: loss = 0.000442862 (* 1 = 0.000442862 loss)\nI0824 04:27:23.940125 32502 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0824 04:31:03.358620 32502 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0824 04:33:13.072216 32502 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0824 04:33:13.072654 32502 solver.cpp:404]     Test net output #1: loss = 0.48722 (* 1 = 0.48722 loss)\nI0824 04:33:15.160439 32502 solver.cpp:228] Iteration 69100, loss = 0.00043529\nI0824 04:33:15.160501 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:33:15.160518 32502 solver.cpp:244]     Train net output #1: loss = 0.000432694 (* 1 = 0.000432694 loss)\nI0824 04:33:15.288262 32502 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0824 04:36:54.726284 32502 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0824 04:39:04.435426 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8751\nI0824 04:39:04.435889 32502 solver.cpp:404]     Test net output #1: loss = 0.486347 (* 1 = 0.486347 loss)\nI0824 04:39:06.524530 32502 solver.cpp:228] Iteration 69200, loss = 0.000422239\nI0824 04:39:06.524596 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:39:06.524612 32502 solver.cpp:244]     Train net output #1: loss = 0.000419643 (* 1 = 0.000419643 loss)\nI0824 04:39:06.655441 32502 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0824 04:42:46.083324 32502 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0824 04:44:55.891721 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8752\nI0824 04:44:55.892179 32502 solver.cpp:404]     Test net output #1: loss = 0.48476 (* 1 = 0.48476 loss)\nI0824 04:44:57.980598 32502 solver.cpp:228] Iteration 69300, loss = 0.000319219\nI0824 04:44:57.980666 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:44:57.980691 32502 solver.cpp:244]     Train net output #1: loss = 0.000316623 (* 1 = 0.000316623 loss)\nI0824 04:44:58.106567 32502 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0824 04:48:37.453433 32502 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0824 04:50:47.317801 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8764\nI0824 04:50:47.318274 32502 solver.cpp:404]     Test net output #1: loss = 0.484062 (* 1 = 0.484062 loss)\nI0824 04:50:49.405292 32502 solver.cpp:228] Iteration 69400, loss = 0.000515903\nI0824 04:50:49.405360 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:50:49.405385 32502 solver.cpp:244]     Train net output #1: loss = 0.000513307 (* 1 = 0.000513307 loss)\nI0824 04:50:49.530202 32502 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0824 04:54:28.899993 32502 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0824 04:56:38.596019 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8765\nI0824 04:56:38.596494 32502 solver.cpp:404]     Test net output #1: loss = 0.482596 (* 1 = 0.482596 loss)\nI0824 04:56:40.684077 32502 solver.cpp:228] Iteration 69500, loss = 0.000386845\nI0824 04:56:40.684147 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 04:56:40.684173 32502 solver.cpp:244]     Train net output #1: loss = 0.000384249 (* 1 = 0.000384249 loss)\nI0824 04:56:40.813791 32502 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0824 05:00:20.209084 32502 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0824 05:02:30.024369 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8767\nI0824 05:02:30.024842 32502 solver.cpp:404]     Test net output #1: loss = 0.484213 (* 1 = 0.484213 loss)\nI0824 05:02:32.112179 32502 solver.cpp:228] Iteration 69600, loss = 0.000531159\nI0824 05:02:32.112246 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:02:32.112269 32502 solver.cpp:244]     Train net output #1: loss = 0.000528563 (* 1 = 0.000528563 loss)\nI0824 05:02:32.238616 32502 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0824 05:06:11.732375 32502 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0824 05:08:21.394392 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8761\nI0824 05:08:21.394870 32502 solver.cpp:404]     Test net output #1: loss = 0.485852 (* 1 = 0.485852 loss)\nI0824 05:08:23.483235 32502 solver.cpp:228] Iteration 69700, loss = 0.000402226\nI0824 05:08:23.483304 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:08:23.483328 32502 solver.cpp:244]     Train net output #1: loss = 0.000399631 (* 1 = 0.000399631 loss)\nI0824 05:08:23.607681 32502 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0824 05:12:03.043473 32502 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0824 05:14:12.704563 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8762\nI0824 05:14:12.705006 32502 solver.cpp:404]     Test net output #1: loss = 0.483781 (* 1 = 0.483781 loss)\nI0824 05:14:14.793663 32502 solver.cpp:228] Iteration 69800, loss = 0.000314127\nI0824 05:14:14.793730 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:14:14.793756 32502 solver.cpp:244]     Train net output #1: loss = 0.000311532 (* 1 = 0.000311532 loss)\nI0824 05:14:14.919236 32502 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0824 05:17:54.303282 32502 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0824 05:20:04.062607 32502 solver.cpp:404]     Test net output #0: accuracy = 0.876\nI0824 05:20:04.063060 32502 solver.cpp:404]     Test net output #1: loss = 0.486956 (* 1 = 0.486956 loss)\nI0824 05:20:06.150892 32502 solver.cpp:228] Iteration 69900, loss = 0.000456494\nI0824 05:20:06.150959 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:20:06.150985 32502 solver.cpp:244]     Train net output #1: loss = 0.000453898 (* 1 = 0.000453898 loss)\nI0824 05:20:06.277892 32502 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0824 05:23:45.648499 32502 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0824 05:25:55.311543 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8763\nI0824 05:25:55.311995 32502 solver.cpp:404]     Test net output #1: loss = 0.484537 (* 1 = 0.484537 loss)\nI0824 05:25:57.399662 32502 solver.cpp:228] Iteration 70000, loss = 0.000450012\nI0824 05:25:57.399729 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:25:57.399755 32502 solver.cpp:244]     Train net output #1: loss = 0.000447416 (* 1 = 0.000447416 loss)\nI0824 05:25:57.525005 32502 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0824 05:25:57.525033 32502 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0824 05:29:36.915740 32502 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0824 05:31:46.560405 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8824\nI0824 05:31:46.560865 32502 solver.cpp:404]     Test net output #1: loss = 0.460155 (* 1 = 0.460155 loss)\nI0824 05:31:48.648721 32502 solver.cpp:228] Iteration 70100, loss = 0.000466165\nI0824 05:31:48.648788 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:31:48.648813 32502 solver.cpp:244]     Train net output #1: loss = 0.000463569 (* 1 = 0.000463569 loss)\nI0824 05:31:48.776854 32502 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0824 05:35:28.113131 32502 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0824 05:37:37.929152 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8889\nI0824 05:37:37.929616 32502 solver.cpp:404]     Test net output #1: loss = 0.439634 (* 1 = 0.439634 loss)\nI0824 05:37:40.017271 32502 solver.cpp:228] Iteration 70200, loss = 0.00035191\nI0824 05:37:40.017354 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:37:40.017374 32502 solver.cpp:244]     Train net output #1: loss = 0.000349314 (* 1 = 0.000349314 loss)\nI0824 05:37:40.150838 32502 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0824 05:41:19.601462 32502 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0824 05:43:29.376621 32502 solver.cpp:404]     Test net output #0: accuracy = 0.8925\nI0824 05:43:29.377061 32502 solver.cpp:404]     Test net output #1: loss = 0.423498 (* 1 = 0.423498 loss)\nI0824 05:43:31.465476 32502 solver.cpp:228] Iteration 70300, loss = 0.000306703\nI0824 05:43:31.465540 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:43:31.465557 32502 solver.cpp:244]     Train net output #1: loss = 0.000304107 (* 1 = 0.000304107 loss)\nI0824 05:43:31.592852 32502 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0824 05:47:11.016468 32502 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0824 05:49:20.677561 32502 solver.cpp:404]     Test net output #0: accuracy = 0.898\nI0824 05:49:20.678027 32502 solver.cpp:404]     Test net output #1: loss = 0.410392 (* 1 = 0.410392 loss)\nI0824 05:49:22.766860 32502 solver.cpp:228] Iteration 70400, loss = 0.000466241\nI0824 05:49:22.766929 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:49:22.766947 32502 solver.cpp:244]     Train net output #1: loss = 0.000463646 (* 1 = 0.000463646 loss)\nI0824 05:49:22.898092 32502 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0824 05:53:02.275625 32502 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0824 05:55:11.929715 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9005\nI0824 05:55:11.930177 32502 solver.cpp:404]     Test net output #1: loss = 0.399959 (* 1 = 0.399959 loss)\nI0824 05:55:14.017225 32502 solver.cpp:228] Iteration 70500, loss = 0.000432179\nI0824 05:55:14.017292 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 05:55:14.017316 32502 solver.cpp:244]     Train net output #1: loss = 0.000429583 (* 1 = 0.000429583 loss)\nI0824 05:55:14.146997 32502 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0824 05:58:53.624492 32502 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0824 06:01:03.272662 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9027\nI0824 06:01:03.273116 32502 solver.cpp:404]     Test net output #1: loss = 0.391456 (* 1 = 0.391456 loss)\nI0824 06:01:05.361613 32502 solver.cpp:228] Iteration 70600, loss = 0.000402986\nI0824 06:01:05.361680 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:01:05.361706 32502 solver.cpp:244]     Train net output #1: loss = 0.000400391 (* 1 = 0.000400391 loss)\nI0824 06:01:05.494954 32502 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0824 06:04:45.674451 32502 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0824 06:06:55.307745 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9051\nI0824 06:06:55.308229 32502 solver.cpp:404]     Test net output #1: loss = 0.384567 (* 1 = 0.384567 loss)\nI0824 06:06:57.395815 32502 solver.cpp:228] Iteration 70700, loss = 0.000362421\nI0824 06:06:57.395879 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:06:57.395898 32502 solver.cpp:244]     Train net output #1: loss = 0.000359825 (* 1 = 0.000359825 loss)\nI0824 06:06:57.532340 32502 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0824 06:10:38.536499 32502 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0824 06:12:48.168481 32502 solver.cpp:404]     Test net output #0: accuracy = 0.907\nI0824 06:12:48.168947 32502 solver.cpp:404]     Test net output #1: loss = 0.379298 (* 1 = 0.379298 loss)\nI0824 06:12:50.257602 32502 solver.cpp:228] Iteration 70800, loss = 0.000242194\nI0824 06:12:50.257668 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:12:50.257685 32502 solver.cpp:244]     Train net output #1: loss = 0.000239598 (* 1 = 0.000239598 loss)\nI0824 06:12:50.394176 32502 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0824 06:16:31.354825 32502 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0824 06:18:40.984915 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9089\nI0824 06:18:40.985397 32502 solver.cpp:404]     Test net output #1: loss = 0.374927 (* 1 = 0.374927 loss)\nI0824 06:18:43.073879 32502 solver.cpp:228] Iteration 70900, loss = 0.000505048\nI0824 06:18:43.073947 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:18:43.073966 32502 solver.cpp:244]     Train net output #1: loss = 0.000502452 (* 1 = 0.000502452 loss)\nI0824 06:18:43.213418 32502 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0824 06:22:24.061204 32502 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0824 06:24:33.695250 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9093\nI0824 06:24:33.695710 32502 solver.cpp:404]     Test net output #1: loss = 0.371549 (* 1 = 0.371549 loss)\nI0824 06:24:35.782896 32502 solver.cpp:228] Iteration 71000, loss = 0.000473387\nI0824 06:24:35.782964 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:24:35.782982 32502 solver.cpp:244]     Train net output #1: loss = 0.000470791 (* 1 = 0.000470791 loss)\nI0824 06:24:35.921397 32502 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0824 06:28:16.718847 32502 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0824 06:30:26.349596 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9089\nI0824 06:30:26.350054 32502 solver.cpp:404]     Test net output #1: loss = 0.36893 (* 1 = 0.36893 loss)\nI0824 06:30:28.437988 32502 solver.cpp:228] Iteration 71100, loss = 0.000426704\nI0824 06:30:28.438053 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:30:28.438072 32502 solver.cpp:244]     Train net output #1: loss = 0.000424108 (* 1 = 0.000424108 loss)\nI0824 06:30:28.579221 32502 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0824 06:34:09.648030 32502 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0824 06:36:19.345758 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9092\nI0824 06:36:19.346210 32502 solver.cpp:404]     Test net output #1: loss = 0.366722 (* 1 = 0.366722 loss)\nI0824 06:36:21.440286 32502 solver.cpp:228] Iteration 71200, loss = 0.000346906\nI0824 06:36:21.440351 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:36:21.440369 32502 solver.cpp:244]     Train net output #1: loss = 0.00034431 (* 1 = 0.00034431 loss)\nI0824 06:36:21.576931 32502 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0824 06:40:02.437561 32502 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0824 06:42:12.154712 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9092\nI0824 06:42:12.155170 32502 solver.cpp:404]     Test net output #1: loss = 0.364927 (* 1 = 0.364927 loss)\nI0824 06:42:14.243156 32502 solver.cpp:228] Iteration 71300, loss = 0.00027043\nI0824 06:42:14.243219 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:42:14.243237 32502 solver.cpp:244]     Train net output #1: loss = 0.000267835 (* 1 = 0.000267835 loss)\nI0824 06:42:14.377082 32502 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0824 06:45:54.985560 32502 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0824 06:48:04.726893 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9094\nI0824 06:48:04.727361 32502 solver.cpp:404]     Test net output #1: loss = 0.363463 (* 1 = 0.363463 loss)\nI0824 06:48:06.816301 32502 solver.cpp:228] Iteration 71400, loss = 0.000428884\nI0824 06:48:06.816366 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:48:06.816385 32502 solver.cpp:244]     Train net output #1: loss = 0.000426289 (* 1 = 0.000426289 loss)\nI0824 06:48:06.952611 32502 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0824 06:51:47.554174 32502 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0824 06:53:57.283350 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9103\nI0824 06:53:57.283824 32502 solver.cpp:404]     Test net output #1: loss = 0.362328 (* 1 = 0.362328 loss)\nI0824 06:53:59.371242 32502 solver.cpp:228] Iteration 71500, loss = 0.000436846\nI0824 06:53:59.371306 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:53:59.371335 32502 solver.cpp:244]     Train net output #1: loss = 0.00043425 (* 1 = 0.00043425 loss)\nI0824 06:53:59.518512 32502 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0824 06:57:40.285996 32502 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0824 06:59:50.017029 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9111\nI0824 06:59:50.017472 32502 solver.cpp:404]     Test net output #1: loss = 0.361328 (* 1 = 0.361328 loss)\nI0824 06:59:52.105185 32502 solver.cpp:228] Iteration 71600, loss = 0.000450836\nI0824 06:59:52.105247 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 06:59:52.105265 32502 solver.cpp:244]     Train net output #1: loss = 0.00044824 (* 1 = 0.00044824 loss)\nI0824 06:59:52.244515 32502 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0824 07:03:32.842149 32502 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0824 07:05:42.547430 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9114\nI0824 07:05:42.547874 32502 solver.cpp:404]     Test net output #1: loss = 0.360566 (* 1 = 0.360566 loss)\nI0824 07:05:44.635252 32502 solver.cpp:228] Iteration 71700, loss = 0.000335635\nI0824 07:05:44.635313 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:05:44.635332 32502 solver.cpp:244]     Train net output #1: loss = 0.000333039 (* 1 = 0.000333039 loss)\nI0824 07:05:44.774703 32502 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0824 07:09:25.586105 32502 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0824 07:11:35.262573 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9119\nI0824 07:11:35.263041 32502 solver.cpp:404]     Test net output #1: loss = 0.360051 (* 1 = 0.360051 loss)\nI0824 07:11:37.350415 32502 solver.cpp:228] Iteration 71800, loss = 0.000295292\nI0824 07:11:37.350481 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:11:37.350499 32502 solver.cpp:244]     Train net output #1: loss = 0.000292696 (* 1 = 0.000292696 loss)\nI0824 07:11:37.492831 32502 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0824 07:15:18.414474 32502 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0824 07:17:28.050921 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 07:17:28.051370 32502 solver.cpp:404]     Test net output #1: loss = 0.359492 (* 1 = 0.359492 loss)\nI0824 07:17:30.139281 32502 solver.cpp:228] Iteration 71900, loss = 0.00052841\nI0824 07:17:30.139344 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:17:30.139363 32502 solver.cpp:244]     Train net output #1: loss = 0.000525814 (* 1 = 0.000525814 loss)\nI0824 07:17:30.272043 32502 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0824 07:21:10.949651 32502 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0824 07:23:20.574519 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 07:23:20.574975 32502 solver.cpp:404]     Test net output #1: loss = 0.359134 (* 1 = 0.359134 loss)\nI0824 07:23:22.663048 32502 solver.cpp:228] Iteration 72000, loss = 0.000443614\nI0824 07:23:22.663115 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:23:22.663133 32502 solver.cpp:244]     Train net output #1: loss = 0.000441019 (* 1 = 0.000441019 loss)\nI0824 07:23:22.798338 32502 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0824 07:27:03.635288 32502 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0824 07:29:13.267724 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 07:29:13.268206 32502 solver.cpp:404]     Test net output #1: loss = 0.358776 (* 1 = 0.358776 loss)\nI0824 07:29:15.355576 32502 solver.cpp:228] Iteration 72100, loss = 0.000431659\nI0824 07:29:15.355638 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:29:15.355655 32502 solver.cpp:244]     Train net output #1: loss = 0.000429063 (* 1 = 0.000429063 loss)\nI0824 07:29:15.490424 32502 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0824 07:32:55.815627 32502 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0824 07:35:03.776507 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 07:35:03.776890 32502 solver.cpp:404]     Test net output #1: loss = 0.358466 (* 1 = 0.358466 loss)\nI0824 07:35:05.860621 32502 solver.cpp:228] Iteration 72200, loss = 0.000364364\nI0824 07:35:05.860671 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:35:05.860688 32502 solver.cpp:244]     Train net output #1: loss = 0.000361769 (* 1 = 0.000361769 loss)\nI0824 07:35:05.989054 32502 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0824 07:38:45.022372 32502 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0824 07:40:52.981374 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9109\nI0824 07:40:52.981791 32502 solver.cpp:404]     Test net output #1: loss = 0.358213 (* 1 = 0.358213 loss)\nI0824 07:40:55.064285 32502 solver.cpp:228] Iteration 72300, loss = 0.0002408\nI0824 07:40:55.064332 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:40:55.064348 32502 solver.cpp:244]     Train net output #1: loss = 0.000238204 (* 1 = 0.000238204 loss)\nI0824 07:40:55.192764 32502 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0824 07:44:34.301653 32502 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0824 07:46:42.259239 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9112\nI0824 07:46:42.259660 32502 solver.cpp:404]     Test net output #1: loss = 0.357981 (* 1 = 0.357981 loss)\nI0824 07:46:44.341924 32502 solver.cpp:228] Iteration 72400, loss = 0.00045688\nI0824 07:46:44.341974 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:46:44.342000 32502 solver.cpp:244]     Train net output #1: loss = 0.000454285 (* 1 = 0.000454285 loss)\nI0824 07:46:44.477771 32502 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0824 07:50:24.016777 32502 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0824 07:52:31.990948 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9112\nI0824 07:52:31.991336 32502 solver.cpp:404]     Test net output #1: loss = 0.357853 (* 1 = 0.357853 loss)\nI0824 07:52:34.079982 32502 solver.cpp:228] Iteration 72500, loss = 0.000361133\nI0824 07:52:34.080034 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:52:34.080056 32502 solver.cpp:244]     Train net output #1: loss = 0.000358537 (* 1 = 0.000358537 loss)\nI0824 07:52:34.218997 32502 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0824 07:56:14.487534 32502 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0824 07:58:22.443439 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9112\nI0824 07:58:22.443876 32502 solver.cpp:404]     Test net output #1: loss = 0.357666 (* 1 = 0.357666 loss)\nI0824 07:58:24.530961 32502 solver.cpp:228] Iteration 72600, loss = 0.00039613\nI0824 07:58:24.531008 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 07:58:24.531024 32502 solver.cpp:244]     Train net output #1: loss = 0.000393534 (* 1 = 0.000393534 loss)\nI0824 07:58:24.667814 32502 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0824 08:02:05.047919 32502 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0824 08:04:13.006556 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9113\nI0824 08:04:13.006923 32502 solver.cpp:404]     Test net output #1: loss = 0.357648 (* 1 = 0.357648 loss)\nI0824 08:04:15.094771 32502 solver.cpp:228] Iteration 72700, loss = 0.000373029\nI0824 08:04:15.094820 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:04:15.094841 32502 solver.cpp:244]     Train net output #1: loss = 0.000370433 (* 1 = 0.000370433 loss)\nI0824 08:04:15.227759 32502 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0824 08:07:55.495225 32502 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0824 08:10:03.456599 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9111\nI0824 08:10:03.456993 32502 solver.cpp:404]     Test net output #1: loss = 0.357578 (* 1 = 0.357578 loss)\nI0824 08:10:05.544838 32502 solver.cpp:228] Iteration 72800, loss = 0.000279941\nI0824 08:10:05.544885 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:10:05.544903 32502 solver.cpp:244]     Train net output #1: loss = 0.000277345 (* 1 = 0.000277345 loss)\nI0824 08:10:05.684211 32502 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0824 08:13:45.992605 32502 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0824 08:15:53.967136 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9109\nI0824 08:15:53.967548 32502 solver.cpp:404]     Test net output #1: loss = 0.357497 (* 1 = 0.357497 loss)\nI0824 08:15:56.055857 32502 solver.cpp:228] Iteration 72900, loss = 0.000498537\nI0824 08:15:56.055905 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:15:56.055922 32502 solver.cpp:244]     Train net output #1: loss = 0.000495941 (* 1 = 0.000495941 loss)\nI0824 08:15:56.197471 32502 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0824 08:19:36.733642 32502 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0824 08:21:44.697739 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9112\nI0824 08:21:44.698180 32502 solver.cpp:404]     Test net output #1: loss = 0.357388 (* 1 = 0.357388 loss)\nI0824 08:21:46.787820 32502 solver.cpp:228] Iteration 73000, loss = 0.000372995\nI0824 08:21:46.787870 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:21:46.787894 32502 solver.cpp:244]     Train net output #1: loss = 0.000370399 (* 1 = 0.000370399 loss)\nI0824 08:21:46.923336 32502 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0824 08:25:27.231097 32502 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0824 08:27:35.200009 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9113\nI0824 08:27:35.200429 32502 solver.cpp:404]     Test net output #1: loss = 0.357336 (* 1 = 0.357336 loss)\nI0824 08:27:37.289582 32502 solver.cpp:228] Iteration 73100, loss = 0.000426745\nI0824 08:27:37.289629 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:27:37.289646 32502 solver.cpp:244]     Train net output #1: loss = 0.00042415 (* 1 = 0.00042415 loss)\nI0824 08:27:37.426914 32502 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0824 08:31:17.622453 32502 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0824 08:33:25.592636 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9114\nI0824 08:33:25.593068 32502 solver.cpp:404]     Test net output #1: loss = 0.357404 (* 1 = 0.357404 loss)\nI0824 08:33:27.681099 32502 solver.cpp:228] Iteration 73200, loss = 0.000340783\nI0824 08:33:27.681149 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:33:27.681164 32502 solver.cpp:244]     Train net output #1: loss = 0.000338187 (* 1 = 0.000338187 loss)\nI0824 08:33:27.820528 32502 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0824 08:37:08.162752 32502 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0824 08:39:16.144078 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 08:39:16.144469 32502 solver.cpp:404]     Test net output #1: loss = 0.357429 (* 1 = 0.357429 loss)\nI0824 08:39:18.231902 32502 solver.cpp:228] Iteration 73300, loss = 0.000254659\nI0824 08:39:18.231948 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:39:18.231966 32502 solver.cpp:244]     Train net output #1: loss = 0.000252063 (* 1 = 0.000252063 loss)\nI0824 08:39:18.371330 32502 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0824 08:42:58.792260 32502 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0824 08:45:06.766324 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9113\nI0824 08:45:06.766717 32502 solver.cpp:404]     Test net output #1: loss = 0.35742 (* 1 = 0.35742 loss)\nI0824 08:45:08.853669 32502 solver.cpp:228] Iteration 73400, loss = 0.000506248\nI0824 08:45:08.853718 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:45:08.853734 32502 solver.cpp:244]     Train net output #1: loss = 0.000503652 (* 1 = 0.000503652 loss)\nI0824 08:45:08.991232 32502 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0824 08:48:49.524384 32502 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0824 08:50:57.498600 32502 solver.cpp:404]     Test net output #0: accuracy = 0.911\nI0824 08:50:57.499001 32502 solver.cpp:404]     Test net output #1: loss = 0.357391 (* 1 = 0.357391 loss)\nI0824 08:50:59.587118 32502 solver.cpp:228] Iteration 73500, loss = 0.000414698\nI0824 08:50:59.587169 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:50:59.587193 32502 solver.cpp:244]     Train net output #1: loss = 0.000412103 (* 1 = 0.000412103 loss)\nI0824 08:50:59.731501 32502 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0824 08:54:40.286756 32502 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0824 08:56:48.292394 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9109\nI0824 08:56:48.292876 32502 solver.cpp:404]     Test net output #1: loss = 0.357404 (* 1 = 0.357404 loss)\nI0824 08:56:50.381038 32502 solver.cpp:228] Iteration 73600, loss = 0.000432208\nI0824 08:56:50.381088 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 08:56:50.381111 32502 solver.cpp:244]     Train net output #1: loss = 0.000429612 (* 1 = 0.000429612 loss)\nI0824 08:56:50.520416 32502 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0824 09:00:30.996644 32502 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0824 09:02:38.992465 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9111\nI0824 09:02:38.992873 32502 solver.cpp:404]     Test net output #1: loss = 0.357446 (* 1 = 0.357446 loss)\nI0824 09:02:41.079767 32502 solver.cpp:228] Iteration 73700, loss = 0.000323575\nI0824 09:02:41.079818 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:02:41.079843 32502 solver.cpp:244]     Train net output #1: loss = 0.00032098 (* 1 = 0.00032098 loss)\nI0824 09:02:41.219220 32502 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0824 09:06:21.617450 32502 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0824 09:08:29.608839 32502 solver.cpp:404]     Test net output #0: accuracy = 0.911\nI0824 09:08:29.609264 32502 solver.cpp:404]     Test net output #1: loss = 0.357431 (* 1 = 0.357431 loss)\nI0824 09:08:31.696694 32502 solver.cpp:228] Iteration 73800, loss = 0.000258617\nI0824 09:08:31.696746 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:08:31.696770 32502 solver.cpp:244]     Train net output #1: loss = 0.000256021 (* 1 = 0.000256021 loss)\nI0824 09:08:31.841197 32502 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0824 09:12:12.145362 32502 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0824 09:14:20.160537 32502 solver.cpp:404]     Test net output #0: accuracy = 0.911\nI0824 09:14:20.160955 32502 solver.cpp:404]     Test net output #1: loss = 0.357521 (* 1 = 0.357521 loss)\nI0824 09:14:22.248860 32502 solver.cpp:228] Iteration 73900, loss = 0.00048937\nI0824 09:14:22.248908 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:14:22.248924 32502 solver.cpp:244]     Train net output #1: loss = 0.000486775 (* 1 = 0.000486775 loss)\nI0824 09:14:22.391991 32502 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0824 09:18:02.643412 32502 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0824 09:20:10.641151 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9112\nI0824 09:20:10.641556 32502 solver.cpp:404]     Test net output #1: loss = 0.357564 (* 1 = 0.357564 loss)\nI0824 09:20:12.729595 32502 solver.cpp:228] Iteration 74000, loss = 0.000370003\nI0824 09:20:12.729645 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:20:12.729660 32502 solver.cpp:244]     Train net output #1: loss = 0.000367408 (* 1 = 0.000367408 loss)\nI0824 09:20:12.867481 32502 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0824 09:23:53.207603 32502 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0824 09:26:01.208129 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9113\nI0824 09:26:01.208550 32502 solver.cpp:404]     Test net output #1: loss = 0.357571 (* 1 = 0.357571 loss)\nI0824 09:26:03.295909 32502 solver.cpp:228] Iteration 74100, loss = 0.000374704\nI0824 09:26:03.295958 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:26:03.295974 32502 solver.cpp:244]     Train net output #1: loss = 0.000372108 (* 1 = 0.000372108 loss)\nI0824 09:26:03.432153 32502 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0824 09:29:43.881266 32502 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0824 09:31:51.846650 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9113\nI0824 09:31:51.847086 32502 solver.cpp:404]     Test net output #1: loss = 0.357638 (* 1 = 0.357638 loss)\nI0824 09:31:53.934528 32502 solver.cpp:228] Iteration 74200, loss = 0.000388137\nI0824 09:31:53.934576 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:31:53.934592 32502 solver.cpp:244]     Train net output #1: loss = 0.000385542 (* 1 = 0.000385542 loss)\nI0824 09:31:54.072278 32502 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0824 09:35:34.522708 32502 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0824 09:37:42.505719 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9113\nI0824 09:37:42.506115 32502 solver.cpp:404]     Test net output #1: loss = 0.357643 (* 1 = 0.357643 loss)\nI0824 09:37:44.596349 32502 solver.cpp:228] Iteration 74300, loss = 0.000317735\nI0824 09:37:44.596396 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:37:44.596413 32502 solver.cpp:244]     Train net output #1: loss = 0.00031514 (* 1 = 0.00031514 loss)\nI0824 09:37:44.733198 32502 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0824 09:41:25.194983 32502 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0824 09:43:33.172523 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 09:43:33.172950 32502 solver.cpp:404]     Test net output #1: loss = 0.357665 (* 1 = 0.357665 loss)\nI0824 09:43:35.260025 32502 solver.cpp:228] Iteration 74400, loss = 0.000429854\nI0824 09:43:35.260076 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:43:35.260093 32502 solver.cpp:244]     Train net output #1: loss = 0.000427258 (* 1 = 0.000427258 loss)\nI0824 09:43:35.401934 32502 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0824 09:47:15.887459 32502 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0824 09:49:23.850180 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 09:49:23.850589 32502 solver.cpp:404]     Test net output #1: loss = 0.357716 (* 1 = 0.357716 loss)\nI0824 09:49:25.937778 32502 solver.cpp:228] Iteration 74500, loss = 0.00038054\nI0824 09:49:25.937825 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:49:25.937842 32502 solver.cpp:244]     Train net output #1: loss = 0.000377944 (* 1 = 0.000377944 loss)\nI0824 09:49:26.078567 32502 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0824 09:53:06.258601 32502 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0824 09:55:14.218858 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 09:55:14.219287 32502 solver.cpp:404]     Test net output #1: loss = 0.357751 (* 1 = 0.357751 loss)\nI0824 09:55:16.305554 32502 solver.cpp:228] Iteration 74600, loss = 0.000404829\nI0824 09:55:16.305603 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 09:55:16.305618 32502 solver.cpp:244]     Train net output #1: loss = 0.000402234 (* 1 = 0.000402234 loss)\nI0824 09:55:16.448227 32502 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0824 09:58:57.002816 32502 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0824 10:01:04.976727 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9117\nI0824 10:01:04.977133 32502 solver.cpp:404]     Test net output #1: loss = 0.357844 (* 1 = 0.357844 loss)\nI0824 10:01:07.064766 32502 solver.cpp:228] Iteration 74700, loss = 0.000305832\nI0824 10:01:07.064817 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:01:07.064834 32502 solver.cpp:244]     Train net output #1: loss = 0.000303237 (* 1 = 0.000303237 loss)\nI0824 10:01:07.202401 32502 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0824 10:04:47.431123 32502 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0824 10:06:56.740667 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9117\nI0824 10:06:56.741106 32502 solver.cpp:404]     Test net output #1: loss = 0.357837 (* 1 = 0.357837 loss)\nI0824 10:06:58.834223 32502 solver.cpp:228] Iteration 74800, loss = 0.000259016\nI0824 10:06:58.834290 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:06:58.834307 32502 solver.cpp:244]     Train net output #1: loss = 0.00025642 (* 1 = 0.00025642 loss)\nI0824 10:06:58.966094 32502 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0824 10:10:39.608722 32502 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0824 10:12:49.239506 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 10:12:49.239984 32502 solver.cpp:404]     Test net output #1: loss = 0.357886 (* 1 = 0.357886 loss)\nI0824 10:12:51.332217 32502 solver.cpp:228] Iteration 74900, loss = 0.00048969\nI0824 10:12:51.332283 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:12:51.332300 32502 solver.cpp:244]     Train net output #1: loss = 0.000487094 (* 1 = 0.000487094 loss)\nI0824 10:12:51.472985 32502 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0824 10:16:32.267660 32502 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0824 10:18:41.910626 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9117\nI0824 10:18:41.911089 32502 solver.cpp:404]     Test net output #1: loss = 0.357875 (* 1 = 0.357875 loss)\nI0824 10:18:44.002099 32502 solver.cpp:228] Iteration 75000, loss = 0.000505015\nI0824 10:18:44.002162 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:18:44.002180 32502 solver.cpp:244]     Train net output #1: loss = 0.000502419 (* 1 = 0.000502419 loss)\nI0824 10:18:44.134183 32502 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0824 10:22:24.971787 32502 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0824 10:24:34.627324 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 10:24:34.627806 32502 solver.cpp:404]     Test net output #1: loss = 0.35788 (* 1 = 0.35788 loss)\nI0824 10:24:36.721074 32502 solver.cpp:228] Iteration 75100, loss = 0.000417125\nI0824 10:24:36.721141 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:24:36.721160 32502 solver.cpp:244]     Train net output #1: loss = 0.000414529 (* 1 = 0.000414529 loss)\nI0824 10:24:36.855857 32502 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0824 10:28:17.492089 32502 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0824 10:30:27.138543 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9113\nI0824 10:30:27.138991 32502 solver.cpp:404]     Test net output #1: loss = 0.358007 (* 1 = 0.358007 loss)\nI0824 10:30:29.230895 32502 solver.cpp:228] Iteration 75200, loss = 0.000335244\nI0824 10:30:29.230960 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:30:29.230978 32502 solver.cpp:244]     Train net output #1: loss = 0.000332648 (* 1 = 0.000332648 loss)\nI0824 10:30:29.368718 32502 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0824 10:34:10.190690 32502 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0824 10:36:19.836971 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9117\nI0824 10:36:19.837451 32502 solver.cpp:404]     Test net output #1: loss = 0.358089 (* 1 = 0.358089 loss)\nI0824 10:36:21.928809 32502 solver.cpp:228] Iteration 75300, loss = 0.000242964\nI0824 10:36:21.928879 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:36:21.928897 32502 solver.cpp:244]     Train net output #1: loss = 0.000240368 (* 1 = 0.000240368 loss)\nI0824 10:36:22.065073 32502 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0824 10:40:02.983800 32502 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0824 10:42:12.578200 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 10:42:12.578654 32502 solver.cpp:404]     Test net output #1: loss = 0.358128 (* 1 = 0.358128 loss)\nI0824 10:42:14.670300 32502 solver.cpp:228] Iteration 75400, loss = 0.00048123\nI0824 10:42:14.670367 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:42:14.670392 32502 solver.cpp:244]     Train net output #1: loss = 0.000478635 (* 1 = 0.000478635 loss)\nI0824 10:42:14.800745 32502 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0824 10:45:55.569077 32502 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0824 10:48:05.193724 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 10:48:05.194195 32502 solver.cpp:404]     Test net output #1: loss = 0.358237 (* 1 = 0.358237 loss)\nI0824 10:48:07.285452 32502 solver.cpp:228] Iteration 75500, loss = 0.000426492\nI0824 10:48:07.285521 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:48:07.285545 32502 solver.cpp:244]     Train net output #1: loss = 0.000423896 (* 1 = 0.000423896 loss)\nI0824 10:48:07.427202 32502 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0824 10:51:48.118121 32502 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0824 10:53:57.738684 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9114\nI0824 10:53:57.739145 32502 solver.cpp:404]     Test net output #1: loss = 0.358223 (* 1 = 0.358223 loss)\nI0824 10:53:59.830229 32502 solver.cpp:228] Iteration 75600, loss = 0.000369945\nI0824 10:53:59.830296 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:53:59.830322 32502 solver.cpp:244]     Train net output #1: loss = 0.00036735 (* 1 = 0.00036735 loss)\nI0824 10:53:59.965857 32502 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0824 10:57:40.742069 32502 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0824 10:59:50.380015 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 10:59:50.380481 32502 solver.cpp:404]     Test net output #1: loss = 0.358325 (* 1 = 0.358325 loss)\nI0824 10:59:52.472457 32502 solver.cpp:228] Iteration 75700, loss = 0.000312056\nI0824 10:59:52.472524 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 10:59:52.472548 32502 solver.cpp:244]     Train net output #1: loss = 0.00030946 (* 1 = 0.00030946 loss)\nI0824 10:59:52.609582 32502 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0824 11:03:33.231866 32502 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0824 11:05:42.822839 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 11:05:42.826903 32502 solver.cpp:404]     Test net output #1: loss = 0.358348 (* 1 = 0.358348 loss)\nI0824 11:05:44.918324 32502 solver.cpp:228] Iteration 75800, loss = 0.000266425\nI0824 11:05:44.918388 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:05:44.918406 32502 solver.cpp:244]     Train net output #1: loss = 0.000263829 (* 1 = 0.000263829 loss)\nI0824 11:05:45.053364 32502 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0824 11:09:26.001154 32502 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0824 11:11:35.642217 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 11:11:35.642683 32502 solver.cpp:404]     Test net output #1: loss = 0.35838 (* 1 = 0.35838 loss)\nI0824 11:11:37.734403 32502 solver.cpp:228] Iteration 75900, loss = 0.000553903\nI0824 11:11:37.734467 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:11:37.734483 32502 solver.cpp:244]     Train net output #1: loss = 0.000551307 (* 1 = 0.000551307 loss)\nI0824 11:11:37.876129 32502 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0824 11:15:18.502152 32502 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0824 11:17:28.189518 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9114\nI0824 11:17:28.189980 32502 solver.cpp:404]     Test net output #1: loss = 0.358367 (* 1 = 0.358367 loss)\nI0824 11:17:30.281543 32502 solver.cpp:228] Iteration 76000, loss = 0.000407004\nI0824 11:17:30.281605 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:17:30.281623 32502 solver.cpp:244]     Train net output #1: loss = 0.000404408 (* 1 = 0.000404408 loss)\nI0824 11:17:30.418501 32502 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0824 11:21:11.279532 32502 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0824 11:23:20.930786 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 11:23:20.931263 32502 solver.cpp:404]     Test net output #1: loss = 0.358429 (* 1 = 0.358429 loss)\nI0824 11:23:23.022195 32502 solver.cpp:228] Iteration 76100, loss = 0.000384616\nI0824 11:23:23.022258 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:23:23.022276 32502 solver.cpp:244]     Train net output #1: loss = 0.00038202 (* 1 = 0.00038202 loss)\nI0824 11:23:23.155303 32502 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0824 11:27:03.931779 32502 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0824 11:29:13.599720 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9117\nI0824 11:29:13.600203 32502 solver.cpp:404]     Test net output #1: loss = 0.358485 (* 1 = 0.358485 loss)\nI0824 11:29:15.692020 32502 solver.cpp:228] Iteration 76200, loss = 0.000313296\nI0824 11:29:15.692082 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:29:15.692101 32502 solver.cpp:244]     Train net output #1: loss = 0.0003107 (* 1 = 0.0003107 loss)\nI0824 11:29:15.827453 32502 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0824 11:32:56.585518 32502 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0824 11:35:06.250175 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 11:35:06.250654 32502 solver.cpp:404]     Test net output #1: loss = 0.358452 (* 1 = 0.358452 loss)\nI0824 11:35:08.341596 32502 solver.cpp:228] Iteration 76300, loss = 0.000237379\nI0824 11:35:08.341660 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:35:08.341677 32502 solver.cpp:244]     Train net output #1: loss = 0.000234783 (* 1 = 0.000234783 loss)\nI0824 11:35:08.479291 32502 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0824 11:38:49.078073 32502 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0824 11:40:58.749148 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 11:40:58.749608 32502 solver.cpp:404]     Test net output #1: loss = 0.35853 (* 1 = 0.35853 loss)\nI0824 11:41:00.841143 32502 solver.cpp:228] Iteration 76400, loss = 0.000476283\nI0824 11:41:00.841207 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:41:00.841225 32502 solver.cpp:244]     Train net output #1: loss = 0.000473687 (* 1 = 0.000473687 loss)\nI0824 11:41:00.981312 32502 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0824 11:44:41.783306 32502 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0824 11:46:51.412535 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 11:46:51.412997 32502 solver.cpp:404]     Test net output #1: loss = 0.35857 (* 1 = 0.35857 loss)\nI0824 11:46:53.503839 32502 solver.cpp:228] Iteration 76500, loss = 0.000439398\nI0824 11:46:53.503904 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:46:53.503922 32502 solver.cpp:244]     Train net output #1: loss = 0.000436802 (* 1 = 0.000436802 loss)\nI0824 11:46:53.637028 32502 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0824 11:50:34.469166 32502 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0824 11:52:44.072979 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 11:52:44.073460 32502 solver.cpp:404]     Test net output #1: loss = 0.358578 (* 1 = 0.358578 loss)\nI0824 11:52:46.164131 32502 solver.cpp:228] Iteration 76600, loss = 0.000405942\nI0824 11:52:46.164194 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:52:46.164212 32502 solver.cpp:244]     Train net output #1: loss = 0.000403346 (* 1 = 0.000403346 loss)\nI0824 11:52:46.299185 32502 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0824 11:56:27.111914 32502 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0824 11:58:36.740161 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9117\nI0824 11:58:36.740622 32502 solver.cpp:404]     Test net output #1: loss = 0.358679 (* 1 = 0.358679 loss)\nI0824 11:58:38.832062 32502 solver.cpp:228] Iteration 76700, loss = 0.000272889\nI0824 11:58:38.832125 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 11:58:38.832144 32502 solver.cpp:244]     Train net output #1: loss = 0.000270294 (* 1 = 0.000270294 loss)\nI0824 11:58:38.967005 32502 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0824 12:02:20.304725 32502 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0824 12:04:29.925473 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9117\nI0824 12:04:29.925958 32502 solver.cpp:404]     Test net output #1: loss = 0.358736 (* 1 = 0.358736 loss)\nI0824 12:04:32.016562 32502 solver.cpp:228] Iteration 76800, loss = 0.0002909\nI0824 12:04:32.016626 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:04:32.016644 32502 solver.cpp:244]     Train net output #1: loss = 0.000288304 (* 1 = 0.000288304 loss)\nI0824 12:04:32.162475 32502 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0824 12:08:13.598501 32502 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0824 12:10:23.227592 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 12:10:23.228067 32502 solver.cpp:404]     Test net output #1: loss = 0.358802 (* 1 = 0.358802 loss)\nI0824 12:10:25.319676 32502 solver.cpp:228] Iteration 76900, loss = 0.000514964\nI0824 12:10:25.319739 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:10:25.319757 32502 solver.cpp:244]     Train net output #1: loss = 0.000512368 (* 1 = 0.000512368 loss)\nI0824 12:10:25.456737 32502 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0824 12:14:06.758826 32502 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0824 12:16:16.384100 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9114\nI0824 12:16:16.384582 32502 solver.cpp:404]     Test net output #1: loss = 0.358851 (* 1 = 0.358851 loss)\nI0824 12:16:18.476033 32502 solver.cpp:228] Iteration 77000, loss = 0.000385595\nI0824 12:16:18.476099 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:16:18.476114 32502 solver.cpp:244]     Train net output #1: loss = 0.000382999 (* 1 = 0.000382999 loss)\nI0824 12:16:18.617591 32502 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0824 12:20:00.059259 32502 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0824 12:22:09.671627 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9113\nI0824 12:22:09.672082 32502 solver.cpp:404]     Test net output #1: loss = 0.358887 (* 1 = 0.358887 loss)\nI0824 12:22:11.764999 32502 solver.cpp:228] Iteration 77100, loss = 0.00042488\nI0824 12:22:11.765065 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:22:11.765084 32502 solver.cpp:244]     Train net output #1: loss = 0.000422284 (* 1 = 0.000422284 loss)\nI0824 12:22:11.911320 32502 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0824 12:25:53.456879 32502 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0824 12:28:03.074903 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 12:28:03.075389 32502 solver.cpp:404]     Test net output #1: loss = 0.35896 (* 1 = 0.35896 loss)\nI0824 12:28:05.167444 32502 solver.cpp:228] Iteration 77200, loss = 0.000297986\nI0824 12:28:05.167510 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:28:05.167527 32502 solver.cpp:244]     Train net output #1: loss = 0.000295391 (* 1 = 0.000295391 loss)\nI0824 12:28:05.309442 32502 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0824 12:31:46.567457 32502 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0824 12:33:56.161466 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9114\nI0824 12:33:56.161948 32502 solver.cpp:404]     Test net output #1: loss = 0.359001 (* 1 = 0.359001 loss)\nI0824 12:33:58.253247 32502 solver.cpp:228] Iteration 77300, loss = 0.00025291\nI0824 12:33:58.253309 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:33:58.253327 32502 solver.cpp:244]     Train net output #1: loss = 0.000250314 (* 1 = 0.000250314 loss)\nI0824 12:33:58.397624 32502 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0824 12:37:39.818351 32502 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0824 12:39:49.424975 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9115\nI0824 12:39:49.425441 32502 solver.cpp:404]     Test net output #1: loss = 0.359033 (* 1 = 0.359033 loss)\nI0824 12:39:51.517424 32502 solver.cpp:228] Iteration 77400, loss = 0.000486303\nI0824 12:39:51.517485 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:39:51.517503 32502 solver.cpp:244]     Train net output #1: loss = 0.000483707 (* 1 = 0.000483707 loss)\nI0824 12:39:51.656008 32502 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0824 12:43:33.024565 32502 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0824 12:45:42.635754 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 12:45:42.636210 32502 solver.cpp:404]     Test net output #1: loss = 0.359092 (* 1 = 0.359092 loss)\nI0824 12:45:44.728004 32502 solver.cpp:228] Iteration 77500, loss = 0.00043787\nI0824 12:45:44.728067 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:45:44.728085 32502 solver.cpp:244]     Train net output #1: loss = 0.000435274 (* 1 = 0.000435274 loss)\nI0824 12:45:44.868000 32502 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0824 12:49:26.112107 32502 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0824 12:51:35.714692 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9119\nI0824 12:51:35.715174 32502 solver.cpp:404]     Test net output #1: loss = 0.359073 (* 1 = 0.359073 loss)\nI0824 12:51:37.807752 32502 solver.cpp:228] Iteration 77600, loss = 0.000384563\nI0824 12:51:37.807816 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:51:37.807833 32502 solver.cpp:244]     Train net output #1: loss = 0.000381967 (* 1 = 0.000381967 loss)\nI0824 12:51:37.946185 32502 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0824 12:55:19.400557 32502 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0824 12:57:29.001713 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 12:57:29.002200 32502 solver.cpp:404]     Test net output #1: loss = 0.359148 (* 1 = 0.359148 loss)\nI0824 12:57:31.094259 32502 solver.cpp:228] Iteration 77700, loss = 0.000372796\nI0824 12:57:31.094322 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 12:57:31.094341 32502 solver.cpp:244]     Train net output #1: loss = 0.0003702 (* 1 = 0.0003702 loss)\nI0824 12:57:31.236357 32502 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0824 13:01:12.723502 32502 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0824 13:03:22.342025 32502 solver.cpp:404]     Test net output #0: accuracy = 0.912\nI0824 13:03:22.342501 32502 solver.cpp:404]     Test net output #1: loss = 0.359139 (* 1 = 0.359139 loss)\nI0824 13:03:24.433455 32502 solver.cpp:228] Iteration 77800, loss = 0.000253002\nI0824 13:03:24.433518 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:03:24.433534 32502 solver.cpp:244]     Train net output #1: loss = 0.000250406 (* 1 = 0.000250406 loss)\nI0824 13:03:24.579725 32502 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0824 13:07:06.050038 32502 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0824 13:09:15.667812 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 13:09:15.668300 32502 solver.cpp:404]     Test net output #1: loss = 0.359212 (* 1 = 0.359212 loss)\nI0824 13:09:17.760017 32502 solver.cpp:228] Iteration 77900, loss = 0.000498582\nI0824 13:09:17.760080 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:09:17.760097 32502 solver.cpp:244]     Train net output #1: loss = 0.000495986 (* 1 = 0.000495986 loss)\nI0824 13:09:17.898624 32502 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0824 13:12:59.286909 32502 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0824 13:15:08.884780 32502 solver.cpp:404]     Test net output #0: accuracy = 0.912\nI0824 13:15:08.885246 32502 solver.cpp:404]     Test net output #1: loss = 0.359292 (* 1 = 0.359292 loss)\nI0824 13:15:10.977165 32502 solver.cpp:228] Iteration 78000, loss = 0.000363905\nI0824 13:15:10.977231 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:15:10.977249 32502 solver.cpp:244]     Train net output #1: loss = 0.000361309 (* 1 = 0.000361309 loss)\nI0824 13:15:11.119751 32502 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0824 13:18:52.422699 32502 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0824 13:21:02.038187 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 13:21:02.038660 32502 solver.cpp:404]     Test net output #1: loss = 0.359287 (* 1 = 0.359287 loss)\nI0824 13:21:04.133780 32502 solver.cpp:228] Iteration 78100, loss = 0.000352122\nI0824 13:21:04.133846 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:21:04.133870 32502 solver.cpp:244]     Train net output #1: loss = 0.000349526 (* 1 = 0.000349526 loss)\nI0824 13:21:04.275611 32502 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0824 13:24:45.695235 32502 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0824 13:26:55.332872 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 13:26:55.333345 32502 solver.cpp:404]     Test net output #1: loss = 0.359342 (* 1 = 0.359342 loss)\nI0824 13:26:57.425016 32502 solver.cpp:228] Iteration 78200, loss = 0.000312977\nI0824 13:26:57.425086 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:26:57.425112 32502 solver.cpp:244]     Train net output #1: loss = 0.000310382 (* 1 = 0.000310382 loss)\nI0824 13:26:57.569383 32502 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0824 13:30:38.951277 32502 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0824 13:32:48.647655 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9119\nI0824 13:32:48.648134 32502 solver.cpp:404]     Test net output #1: loss = 0.359375 (* 1 = 0.359375 loss)\nI0824 13:32:50.741257 32502 solver.cpp:228] Iteration 78300, loss = 0.000294827\nI0824 13:32:50.741325 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:32:50.741350 32502 solver.cpp:244]     Train net output #1: loss = 0.000292231 (* 1 = 0.000292231 loss)\nI0824 13:32:50.876973 32502 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0824 13:36:31.824518 32502 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0824 13:38:41.537416 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9119\nI0824 13:38:41.537900 32502 solver.cpp:404]     Test net output #1: loss = 0.359444 (* 1 = 0.359444 loss)\nI0824 13:38:43.629979 32502 solver.cpp:228] Iteration 78400, loss = 0.000475459\nI0824 13:38:43.630048 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:38:43.630074 32502 solver.cpp:244]     Train net output #1: loss = 0.000472863 (* 1 = 0.000472863 loss)\nI0824 13:38:43.769529 32502 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0824 13:42:24.388706 32502 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0824 13:44:34.165633 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 13:44:34.166122 32502 solver.cpp:404]     Test net output #1: loss = 0.359493 (* 1 = 0.359493 loss)\nI0824 13:44:36.258080 32502 solver.cpp:228] Iteration 78500, loss = 0.000374625\nI0824 13:44:36.258153 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:44:36.258180 32502 solver.cpp:244]     Train net output #1: loss = 0.000372029 (* 1 = 0.000372029 loss)\nI0824 13:44:36.382189 32502 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0824 13:48:15.770601 32502 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0824 13:50:25.491698 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9119\nI0824 13:50:25.492192 32502 solver.cpp:404]     Test net output #1: loss = 0.359432 (* 1 = 0.359432 loss)\nI0824 13:50:27.585800 32502 solver.cpp:228] Iteration 78600, loss = 0.00038784\nI0824 13:50:27.585867 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:50:27.585886 32502 solver.cpp:244]     Train net output #1: loss = 0.000385244 (* 1 = 0.000385244 loss)\nI0824 13:50:27.713610 32502 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0824 13:54:07.236416 32502 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0824 13:56:17.056918 32502 solver.cpp:404]     Test net output #0: accuracy = 0.912\nI0824 13:56:17.057400 32502 solver.cpp:404]     Test net output #1: loss = 0.359554 (* 1 = 0.359554 loss)\nI0824 13:56:19.150166 32502 solver.cpp:228] Iteration 78700, loss = 0.00030366\nI0824 13:56:19.150233 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 13:56:19.150257 32502 solver.cpp:244]     Train net output #1: loss = 0.000301065 (* 1 = 0.000301065 loss)\nI0824 13:56:19.278261 32502 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0824 13:59:58.852406 32502 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0824 14:02:08.668370 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9117\nI0824 14:02:08.668841 32502 solver.cpp:404]     Test net output #1: loss = 0.359594 (* 1 = 0.359594 loss)\nI0824 14:02:10.761945 32502 solver.cpp:228] Iteration 78800, loss = 0.000274393\nI0824 14:02:10.762014 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:02:10.762040 32502 solver.cpp:244]     Train net output #1: loss = 0.000271797 (* 1 = 0.000271797 loss)\nI0824 14:02:10.882886 32502 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0824 14:05:50.255805 32502 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0824 14:08:00.067831 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9116\nI0824 14:08:00.068295 32502 solver.cpp:404]     Test net output #1: loss = 0.359632 (* 1 = 0.359632 loss)\nI0824 14:08:02.161097 32502 solver.cpp:228] Iteration 78900, loss = 0.000499445\nI0824 14:08:02.161166 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:08:02.161192 32502 solver.cpp:244]     Train net output #1: loss = 0.000496849 (* 1 = 0.000496849 loss)\nI0824 14:08:02.277223 32502 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0824 14:11:41.609758 32502 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0824 14:13:51.403228 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 14:13:51.403712 32502 solver.cpp:404]     Test net output #1: loss = 0.35973 (* 1 = 0.35973 loss)\nI0824 14:13:53.496052 32502 solver.cpp:228] Iteration 79000, loss = 0.000392073\nI0824 14:13:53.496119 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:13:53.496146 32502 solver.cpp:244]     Train net output #1: loss = 0.000389477 (* 1 = 0.000389477 loss)\nI0824 14:13:53.621335 32502 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0824 14:17:33.056762 32502 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0824 14:19:42.855105 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 14:19:42.855581 32502 solver.cpp:404]     Test net output #1: loss = 0.359715 (* 1 = 0.359715 loss)\nI0824 14:19:44.948565 32502 solver.cpp:228] Iteration 79100, loss = 0.000422522\nI0824 14:19:44.948632 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:19:44.948655 32502 solver.cpp:244]     Train net output #1: loss = 0.000419927 (* 1 = 0.000419927 loss)\nI0824 14:19:45.072525 32502 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0824 14:23:24.567582 32502 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0824 14:25:34.234553 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9118\nI0824 14:25:34.235018 32502 solver.cpp:404]     Test net output #1: loss = 0.359863 (* 1 = 0.359863 loss)\nI0824 14:25:36.326977 32502 solver.cpp:228] Iteration 79200, loss = 0.000374043\nI0824 14:25:36.327044 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:25:36.327070 32502 solver.cpp:244]     Train net output #1: loss = 0.000371448 (* 1 = 0.000371448 loss)\nI0824 14:25:36.451264 32502 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0824 14:29:15.820389 32502 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0824 14:31:25.575978 32502 solver.cpp:404]     Test net output #0: accuracy = 0.912\nI0824 14:31:25.576465 32502 solver.cpp:404]     Test net output #1: loss = 0.359855 (* 1 = 0.359855 loss)\nI0824 14:31:27.668920 32502 solver.cpp:228] Iteration 79300, loss = 0.000261181\nI0824 14:31:27.668988 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:31:27.669013 32502 solver.cpp:244]     Train net output #1: loss = 0.000258585 (* 1 = 0.000258585 loss)\nI0824 14:31:27.795766 32502 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0824 14:35:07.193076 32502 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0824 14:37:16.942914 32502 solver.cpp:404]     Test net output #0: accuracy = 0.912\nI0824 14:37:16.943390 32502 solver.cpp:404]     Test net output #1: loss = 0.359933 (* 1 = 0.359933 loss)\nI0824 14:37:19.035604 32502 solver.cpp:228] Iteration 79400, loss = 0.000535439\nI0824 14:37:19.035671 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:37:19.035696 32502 solver.cpp:244]     Train net output #1: loss = 0.000532843 (* 1 = 0.000532843 loss)\nI0824 14:37:19.164945 32502 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0824 14:40:58.658920 32502 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0824 14:43:08.386075 32502 solver.cpp:404]     Test net output #0: accuracy = 0.912\nI0824 14:43:08.386540 32502 solver.cpp:404]     Test net output #1: loss = 0.359987 (* 1 = 0.359987 loss)\nI0824 14:43:10.479516 32502 solver.cpp:228] Iteration 79500, loss = 0.000393979\nI0824 14:43:10.479583 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:43:10.479610 32502 solver.cpp:244]     Train net output #1: loss = 0.000391383 (* 1 = 0.000391383 loss)\nI0824 14:43:10.607841 32502 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0824 14:46:50.038671 32502 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0824 14:48:59.810940 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9122\nI0824 14:48:59.811419 32502 solver.cpp:404]     Test net output #1: loss = 0.360012 (* 1 = 0.360012 loss)\nI0824 14:49:01.903864 32502 solver.cpp:228] Iteration 79600, loss = 0.00041294\nI0824 14:49:01.903931 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:49:01.903957 32502 solver.cpp:244]     Train net output #1: loss = 0.000410345 (* 1 = 0.000410345 loss)\nI0824 14:49:02.034330 32502 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0824 14:52:41.457428 32502 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0824 14:54:51.175024 32502 solver.cpp:404]     Test net output #0: accuracy = 0.912\nI0824 14:54:51.175513 32502 solver.cpp:404]     Test net output #1: loss = 0.360037 (* 1 = 0.360037 loss)\nI0824 14:54:53.268036 32502 solver.cpp:228] Iteration 79700, loss = 0.000337136\nI0824 14:54:53.268102 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 14:54:53.268128 32502 solver.cpp:244]     Train net output #1: loss = 0.000334541 (* 1 = 0.000334541 loss)\nI0824 14:54:53.401918 32502 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0824 14:58:32.796490 32502 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0824 15:00:42.557991 32502 solver.cpp:404]     Test net output #0: accuracy = 0.912\nI0824 15:00:42.558465 32502 solver.cpp:404]     Test net output #1: loss = 0.360115 (* 1 = 0.360115 loss)\nI0824 15:00:44.650600 32502 solver.cpp:228] Iteration 79800, loss = 0.000224023\nI0824 15:00:44.650666 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 15:00:44.650691 32502 solver.cpp:244]     Train net output #1: loss = 0.000221427 (* 1 = 0.000221427 loss)\nI0824 15:00:44.776798 32502 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0824 15:04:24.261090 32502 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0824 15:06:33.981508 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9121\nI0824 15:06:33.981998 32502 solver.cpp:404]     Test net output #1: loss = 0.360066 (* 1 = 0.360066 loss)\nI0824 15:06:36.074056 32502 solver.cpp:228] Iteration 79900, loss = 0.000448624\nI0824 15:06:36.074123 32502 solver.cpp:244]     Train net output #0: accuracy = 1\nI0824 15:06:36.074148 32502 solver.cpp:244]     Train net output #1: loss = 0.000446029 (* 1 = 0.000446029 loss)\nI0824 15:06:36.199395 32502 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0824 15:10:15.676353 32502 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35Res110Fig6b_iter_80000.caffemodel\nI0824 15:10:16.289876 32502 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35Res110Fig6b_iter_80000.solverstate\nI0824 15:10:17.003592 32502 solver.cpp:317] Iteration 80000, loss = 0.000389922\nI0824 15:10:17.003655 32502 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0824 15:12:26.597239 32502 solver.cpp:404]     Test net output #0: accuracy = 0.9121\nI0824 15:12:26.597717 32502 solver.cpp:404]     Test net output #1: loss = 0.360184 (* 1 = 0.360184 loss)\nI0824 15:12:26.597735 32502 solver.cpp:322] Optimization Done.\nI0824 15:12:37.019456 32502 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35Res20Fig6b",
    "content": "I0817 16:26:32.881129 17350 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:26:32.883517 17350 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:26:32.884734 17350 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:26:32.885952 17350 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:26:32.887163 17350 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:26:32.888391 17350 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:26:32.889617 17350 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:26:32.890853 17350 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:26:32.892081 17350 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:26:33.310765 17350 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35Res20Fig6b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\nI0817 16:26:33.313730 17350 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:26:33.325935 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:33.325994 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:33.326700 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:26:33.326753 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:26:33.326773 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:26:33.326809 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:26:33.326830 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:26:33.326848 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:26:33.326864 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:26:33.326881 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:26:33.326900 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:26:33.326917 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:26:33.326941 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:26:33.326961 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:26:33.326978 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:26:33.326995 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:26:33.327015 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:26:33.327033 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:26:33.327055 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:26:33.327074 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:26:33.327091 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:26:33.327109 17350 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:26:33.327888 17350 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 32\n      dim: 8\n      dim: 8\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer {\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer {\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\nI0817 16:26:33.328850 17350 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:26:33.330032 17350 net.cpp:100] Creating Layer dataLayer\nI0817 16:26:33.330102 17350 net.cpp:408] dataLayer -> data_top\nI0817 16:26:33.330308 17350 net.cpp:408] dataLayer -> label\nI0817 16:26:33.330425 17350 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:26:33.339315 17355 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0817 16:26:33.361804 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:33.369371 17350 net.cpp:150] Setting up dataLayer\nI0817 16:26:33.369431 17350 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:26:33.369443 17350 net.cpp:157] Top shape: 125 (125)\nI0817 16:26:33.369448 17350 net.cpp:165] Memory required for data: 1536500\nI0817 16:26:33.369463 17350 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:26:33.369477 17350 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:26:33.369484 17350 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:26:33.369500 17350 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:26:33.369515 17350 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:26:33.369585 17350 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:26:33.369602 17350 net.cpp:157] Top shape: 125 (125)\nI0817 16:26:33.369608 17350 net.cpp:157] Top shape: 125 (125)\nI0817 16:26:33.369613 17350 net.cpp:165] Memory required for data: 1537500\nI0817 16:26:33.369619 17350 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:26:33.369680 17350 net.cpp:100] Creating Layer pre_conv\nI0817 16:26:33.369693 17350 net.cpp:434] pre_conv <- data_top\nI0817 16:26:33.369702 17350 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:26:33.371420 17350 net.cpp:150] Setting up pre_conv\nI0817 16:26:33.371443 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.371449 17350 net.cpp:165] Memory required for data: 9729500\nI0817 16:26:33.371500 17350 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:26:33.371562 17350 net.cpp:100] Creating Layer pre_bn\nI0817 16:26:33.371573 17350 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:26:33.371587 17350 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:26:33.372174 17350 net.cpp:150] Setting up pre_bn\nI0817 16:26:33.372193 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.372191 17356 blocking_queue.cpp:50] Waiting for data\nI0817 16:26:33.372200 17350 net.cpp:165] Memory required for data: 17921500\nI0817 16:26:33.372242 17350 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:26:33.372294 17350 net.cpp:100] Creating Layer pre_scale\nI0817 16:26:33.372303 17350 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:26:33.372313 17350 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:26:33.372469 17350 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:26:33.372722 17350 net.cpp:150] Setting up pre_scale\nI0817 16:26:33.372738 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.372745 17350 net.cpp:165] Memory required for data: 26113500\nI0817 16:26:33.372756 17350 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:26:33.372807 17350 net.cpp:100] Creating Layer pre_relu\nI0817 16:26:33.372817 17350 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:26:33.372828 17350 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:26:33.372839 17350 net.cpp:150] Setting up pre_relu\nI0817 16:26:33.372848 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.372853 17350 net.cpp:165] Memory required for data: 34305500\nI0817 16:26:33.372858 17350 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:26:33.372865 17350 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:26:33.372870 17350 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:26:33.372887 17350 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:26:33.372897 17350 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:26:33.372946 17350 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:26:33.372956 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.372963 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.372967 17350 net.cpp:165] Memory required for data: 50689500\nI0817 16:26:33.372972 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:26:33.372987 17350 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:26:33.372993 17350 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:26:33.373001 17350 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:26:33.373317 17350 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:26:33.373332 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.373337 17350 net.cpp:165] Memory required for data: 58881500\nI0817 16:26:33.373353 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:26:33.373366 17350 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:26:33.373373 17350 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:26:33.373381 17350 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:26:33.373607 17350 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:26:33.373620 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.373625 17350 net.cpp:165] Memory required for data: 67073500\nI0817 16:26:33.373636 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:26:33.373646 17350 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:26:33.373651 17350 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:26:33.373661 17350 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.373713 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:26:33.373854 17350 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:26:33.373870 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.373877 17350 net.cpp:165] Memory required for data: 75265500\nI0817 16:26:33.373885 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:26:33.373893 17350 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:26:33.373899 17350 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:26:33.373906 17350 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.373915 17350 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:26:33.373922 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.373927 17350 net.cpp:165] Memory required for data: 83457500\nI0817 16:26:33.373932 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:26:33.373945 17350 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:26:33.373951 17350 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:26:33.373962 17350 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:26:33.374265 17350 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:26:33.374285 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.374290 17350 net.cpp:165] Memory required for data: 91649500\nI0817 16:26:33.374300 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:26:33.374311 17350 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:26:33.374316 17350 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:26:33.374327 17350 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:26:33.374557 17350 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:26:33.374570 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.374575 17350 net.cpp:165] Memory required for data: 99841500\nI0817 16:26:33.374589 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:26:33.374601 17350 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:26:33.374606 17350 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:26:33.374614 17350 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:26:33.374673 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:26:33.374816 17350 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:26:33.374830 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.374835 17350 net.cpp:165] Memory required for data: 108033500\nI0817 16:26:33.374845 17350 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:26:33.374894 17350 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:26:33.374905 17350 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:26:33.374912 17350 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:26:33.374924 17350 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:26:33.374994 17350 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:26:33.375012 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.375017 17350 net.cpp:165] Memory required for data: 116225500\nI0817 16:26:33.375023 17350 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:26:33.375031 17350 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:26:33.375036 17350 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:26:33.375044 17350 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:26:33.375053 17350 net.cpp:150] Setting up L1_b1_relu\nI0817 16:26:33.375061 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.375066 17350 net.cpp:165] Memory required for data: 124417500\nI0817 16:26:33.375069 17350 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:26:33.375082 17350 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:26:33.375087 17350 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:26:33.375094 17350 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:26:33.375103 17350 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:26:33.375146 17350 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:26:33.375159 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.375164 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.375169 17350 net.cpp:165] Memory required for data: 140801500\nI0817 16:26:33.375174 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:26:33.375185 17350 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:26:33.375191 17350 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:26:33.375203 17350 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:26:33.375515 17350 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:26:33.375529 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.375533 17350 net.cpp:165] Memory required for data: 148993500\nI0817 16:26:33.375542 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:26:33.375551 17350 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:26:33.375557 17350 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:26:33.375566 17350 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:26:33.375810 17350 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:26:33.375824 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.375829 17350 net.cpp:165] Memory required for data: 157185500\nI0817 16:26:33.375840 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:26:33.375852 17350 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:26:33.375857 17350 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:26:33.375869 17350 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.375922 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:26:33.376061 17350 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:26:33.376075 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.376080 17350 net.cpp:165] Memory required for data: 165377500\nI0817 16:26:33.376096 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:26:33.376104 17350 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:26:33.376109 17350 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:26:33.376119 17350 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.376129 17350 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:26:33.376135 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.376140 17350 net.cpp:165] Memory required for data: 173569500\nI0817 16:26:33.376145 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:26:33.376158 17350 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:26:33.376164 17350 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:26:33.376175 17350 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:26:33.376477 17350 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:26:33.376489 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.376494 17350 net.cpp:165] Memory required for data: 181761500\nI0817 16:26:33.376504 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:26:33.376513 17350 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:26:33.376518 17350 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:26:33.376526 17350 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:26:33.376761 17350 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:26:33.376775 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.376785 17350 net.cpp:165] Memory required for data: 189953500\nI0817 16:26:33.376806 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:26:33.376816 17350 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:26:33.376822 17350 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:26:33.376830 17350 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:26:33.376886 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:26:33.377025 17350 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:26:33.377038 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.377043 17350 net.cpp:165] Memory required for data: 198145500\nI0817 16:26:33.377053 17350 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:26:33.377061 17350 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:26:33.377066 17350 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:26:33.377074 17350 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:26:33.377084 17350 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:26:33.377115 17350 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:26:33.377125 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.377128 17350 net.cpp:165] Memory required for data: 206337500\nI0817 16:26:33.377133 17350 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:26:33.377144 17350 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:26:33.377149 17350 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:26:33.377156 17350 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:26:33.377166 17350 net.cpp:150] Setting up L1_b2_relu\nI0817 16:26:33.377172 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.377177 17350 net.cpp:165] Memory required for data: 214529500\nI0817 16:26:33.377182 17350 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:26:33.377189 17350 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:26:33.377194 17350 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:26:33.377202 17350 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:26:33.377210 17350 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:26:33.377255 17350 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:26:33.377267 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.377281 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.377285 17350 net.cpp:165] Memory required for data: 230913500\nI0817 16:26:33.377290 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:26:33.377308 17350 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:26:33.377315 17350 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:26:33.377323 17350 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:26:33.377629 17350 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:26:33.377642 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.377647 17350 net.cpp:165] Memory required for data: 239105500\nI0817 16:26:33.377656 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:26:33.377667 17350 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:26:33.377674 17350 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:26:33.377682 17350 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:26:33.377929 17350 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:26:33.377943 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.377948 17350 net.cpp:165] Memory required for data: 247297500\nI0817 16:26:33.377959 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:26:33.377967 17350 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:26:33.377972 17350 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:26:33.377980 17350 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.378033 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:26:33.378170 17350 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:26:33.378183 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.378188 17350 net.cpp:165] Memory required for data: 255489500\nI0817 16:26:33.378197 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:26:33.378206 17350 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:26:33.378211 17350 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:26:33.378221 17350 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.378231 17350 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:26:33.378237 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.378242 17350 net.cpp:165] Memory required for data: 263681500\nI0817 16:26:33.378247 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:26:33.378260 17350 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:26:33.378265 17350 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:26:33.378274 17350 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:26:33.378579 17350 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:26:33.378592 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.378597 17350 net.cpp:165] Memory required for data: 271873500\nI0817 16:26:33.378607 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:26:33.378621 17350 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:26:33.378628 17350 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:26:33.378638 17350 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:26:33.378880 17350 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:26:33.378893 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.378898 17350 net.cpp:165] Memory required for data: 280065500\nI0817 16:26:33.378908 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:26:33.378916 17350 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:26:33.378922 17350 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:26:33.378931 17350 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:26:33.378984 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:26:33.379117 17350 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:26:33.379129 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.379134 17350 net.cpp:165] Memory required for data: 288257500\nI0817 16:26:33.379150 17350 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:26:33.379163 17350 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:26:33.379169 17350 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:26:33.379175 17350 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:26:33.379184 17350 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:26:33.379216 17350 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:26:33.379226 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.379230 17350 net.cpp:165] Memory required for data: 296449500\nI0817 16:26:33.379235 17350 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:26:33.379243 17350 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:26:33.379248 17350 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:26:33.379258 17350 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:26:33.379267 17350 net.cpp:150] Setting up L1_b3_relu\nI0817 16:26:33.379274 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.379278 17350 net.cpp:165] Memory required for data: 304641500\nI0817 16:26:33.379283 17350 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:26:33.379290 17350 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:26:33.379295 17350 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:26:33.379304 17350 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:26:33.379314 17350 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:26:33.379354 17350 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:26:33.379365 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.379372 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.379376 17350 net.cpp:165] Memory required for data: 321025500\nI0817 16:26:33.379382 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:26:33.379396 17350 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:26:33.379401 17350 net.cpp:434] L2_b1_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:26:33.379410 17350 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:26:33.379721 17350 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:26:33.379734 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.379739 17350 net.cpp:165] Memory required for data: 323073500\nI0817 16:26:33.379750 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:26:33.379757 17350 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:26:33.379766 17350 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:26:33.379775 17350 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:26:33.380010 17350 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:26:33.380023 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.380028 17350 net.cpp:165] Memory required for data: 325121500\nI0817 16:26:33.380039 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:26:33.380048 17350 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:26:33.380053 17350 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:26:33.380060 17350 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.380115 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:26:33.380259 17350 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:26:33.380275 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.380280 17350 net.cpp:165] Memory required for data: 327169500\nI0817 16:26:33.380288 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:26:33.380296 17350 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:26:33.380302 17350 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:26:33.380309 17350 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.380326 17350 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:26:33.380332 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.380337 17350 net.cpp:165] Memory required for data: 329217500\nI0817 16:26:33.380342 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:26:33.380355 17350 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:26:33.380362 17350 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:26:33.380373 17350 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:26:33.380686 17350 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:26:33.380699 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.380704 17350 net.cpp:165] Memory required for data: 331265500\nI0817 16:26:33.380713 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:26:33.380725 17350 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:26:33.380731 17350 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:26:33.380741 17350 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:26:33.380996 17350 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:26:33.381011 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.381016 17350 net.cpp:165] Memory required for data: 333313500\nI0817 16:26:33.381026 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:26:33.381034 17350 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:26:33.381041 17350 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:26:33.381050 17350 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:26:33.381104 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:26:33.381242 17350 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:26:33.381254 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.381259 17350 net.cpp:165] Memory required for data: 335361500\nI0817 16:26:33.381268 17350 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:26:33.381279 17350 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:26:33.381284 17350 net.cpp:434] L2_b1_pool <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:26:33.381295 17350 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:26:33.381377 17350 net.cpp:150] Setting up L2_b1_pool\nI0817 16:26:33.381392 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.381397 17350 net.cpp:165] Memory required for data: 337409500\nI0817 16:26:33.381403 17350 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:26:33.381417 17350 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:26:33.381422 17350 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:26:33.381429 17350 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:26:33.381436 17350 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:26:33.381472 17350 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:26:33.381485 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.381490 17350 net.cpp:165] Memory required for data: 339457500\nI0817 16:26:33.381495 17350 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:26:33.381503 17350 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:26:33.381510 17350 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:26:33.381515 17350 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:26:33.381525 17350 net.cpp:150] Setting up L2_b1_relu\nI0817 16:26:33.381532 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.381536 17350 net.cpp:165] Memory required for data: 341505500\nI0817 16:26:33.381541 17350 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:26:33.381585 17350 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:26:33.381603 17350 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:26:33.383939 17350 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:26:33.383957 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.383962 17350 net.cpp:165] Memory required for data: 343553500\nI0817 16:26:33.383968 17350 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:26:33.383981 17350 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:26:33.383996 17350 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:26:33.384002 17350 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:26:33.384011 17350 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:26:33.384088 17350 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:26:33.384104 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.384109 17350 net.cpp:165] Memory required for data: 347649500\nI0817 16:26:33.384114 17350 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:26:33.384124 17350 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:26:33.384130 17350 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:26:33.384140 17350 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:26:33.384150 17350 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:26:33.384196 17350 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:26:33.384209 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.384217 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.384222 17350 net.cpp:165] Memory required for data: 355841500\nI0817 16:26:33.384227 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:26:33.384238 17350 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:26:33.384243 17350 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:26:33.384255 17350 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:26:33.385700 17350 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:26:33.385717 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.385722 17350 net.cpp:165] Memory required for data: 359937500\nI0817 16:26:33.385748 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:26:33.385759 17350 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:26:33.385766 17350 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:26:33.385776 17350 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:26:33.386018 17350 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:26:33.386032 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.386037 17350 net.cpp:165] Memory required for data: 364033500\nI0817 16:26:33.386049 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:26:33.386056 17350 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:26:33.386062 17350 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:26:33.386073 17350 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.386123 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:26:33.386266 17350 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:26:33.386279 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.386284 17350 net.cpp:165] Memory required for data: 368129500\nI0817 16:26:33.386293 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:26:33.386302 17350 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:26:33.386307 17350 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:26:33.386318 17350 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.386328 17350 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:26:33.386335 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.386339 17350 net.cpp:165] Memory required for data: 372225500\nI0817 16:26:33.386344 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:26:33.386358 17350 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:26:33.386364 17350 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:26:33.386373 17350 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:26:33.386824 17350 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:26:33.386838 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.386843 17350 net.cpp:165] Memory required for data: 376321500\nI0817 16:26:33.386860 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:26:33.386873 17350 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:26:33.386879 17350 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:26:33.386888 17350 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:26:33.387120 17350 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:26:33.387132 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.387137 17350 net.cpp:165] Memory required for data: 380417500\nI0817 16:26:33.387147 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:26:33.387156 17350 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:26:33.387162 17350 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:26:33.387169 17350 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:26:33.387224 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:26:33.387363 17350 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:26:33.387378 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.387383 17350 net.cpp:165] Memory required for data: 384513500\nI0817 16:26:33.387392 17350 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:26:33.387403 17350 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:26:33.387408 17350 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:26:33.387415 17350 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:26:33.387423 17350 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:26:33.387451 17350 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:26:33.387461 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.387465 17350 net.cpp:165] Memory required for data: 388609500\nI0817 16:26:33.387471 17350 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:26:33.387478 17350 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:26:33.387485 17350 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:26:33.387493 17350 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:26:33.387503 17350 net.cpp:150] Setting up L2_b2_relu\nI0817 16:26:33.387511 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.387514 17350 net.cpp:165] Memory required for data: 392705500\nI0817 16:26:33.387519 17350 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:26:33.387526 17350 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:26:33.387531 17350 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:26:33.387540 17350 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:26:33.387550 17350 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:26:33.387591 17350 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:26:33.387603 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.387609 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.387614 17350 net.cpp:165] Memory required for data: 400897500\nI0817 16:26:33.387619 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:26:33.387634 17350 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:26:33.387640 17350 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:26:33.387648 17350 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:26:33.388108 17350 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:26:33.388123 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.388128 17350 net.cpp:165] Memory required for data: 404993500\nI0817 16:26:33.388136 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:26:33.388149 17350 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:26:33.388154 17350 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:26:33.388162 17350 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:26:33.388399 17350 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:26:33.388418 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.388423 17350 net.cpp:165] Memory required for data: 409089500\nI0817 16:26:33.388434 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:26:33.388443 17350 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:26:33.388448 17350 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:26:33.388459 17350 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.388514 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:26:33.388655 17350 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:26:33.388667 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.388672 17350 net.cpp:165] Memory required for data: 413185500\nI0817 16:26:33.388681 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:26:33.388689 17350 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:26:33.388695 17350 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:26:33.388702 17350 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.388711 17350 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:26:33.388718 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.388722 17350 net.cpp:165] Memory required for data: 417281500\nI0817 16:26:33.388727 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:26:33.388747 17350 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:26:33.388753 17350 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:26:33.388766 17350 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:26:33.389219 17350 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:26:33.389232 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.389237 17350 net.cpp:165] Memory required for data: 421377500\nI0817 16:26:33.389247 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:26:33.389256 17350 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:26:33.389262 17350 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:26:33.389276 17350 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:26:33.389513 17350 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:26:33.389525 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.389530 17350 net.cpp:165] Memory required for data: 425473500\nI0817 16:26:33.389541 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:26:33.389552 17350 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:26:33.389559 17350 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:26:33.389566 17350 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:26:33.389617 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:26:33.389756 17350 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:26:33.389768 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.389773 17350 net.cpp:165] Memory required for data: 429569500\nI0817 16:26:33.389788 17350 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:26:33.389802 17350 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:26:33.389808 17350 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:26:33.389816 17350 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:26:33.389822 17350 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:26:33.389849 17350 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:26:33.389861 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.389866 17350 net.cpp:165] Memory required for data: 433665500\nI0817 16:26:33.389871 17350 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:26:33.389879 17350 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:26:33.389884 17350 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:26:33.389891 17350 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:26:33.389900 17350 net.cpp:150] Setting up L2_b3_relu\nI0817 16:26:33.389907 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.389919 17350 net.cpp:165] Memory required for data: 437761500\nI0817 16:26:33.389924 17350 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:26:33.389933 17350 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:26:33.389938 17350 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:26:33.389945 17350 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:26:33.389955 17350 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:26:33.390000 17350 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:26:33.390012 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.390018 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.390023 17350 net.cpp:165] Memory required for data: 445953500\nI0817 16:26:33.390028 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:26:33.390039 17350 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:26:33.390045 17350 net.cpp:434] L3_b1_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:26:33.390056 17350 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:26:33.390501 17350 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:26:33.390516 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.390521 17350 net.cpp:165] Memory required for data: 446977500\nI0817 16:26:33.390529 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:26:33.390538 17350 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:26:33.390544 17350 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:26:33.390554 17350 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:26:33.390805 17350 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:26:33.390820 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.390825 17350 net.cpp:165] Memory required for data: 448001500\nI0817 16:26:33.390836 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:26:33.390844 17350 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:26:33.390851 17350 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:26:33.390858 17350 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.390909 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:26:33.391057 17350 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:26:33.391070 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.391075 17350 net.cpp:165] Memory required for data: 449025500\nI0817 16:26:33.391084 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:26:33.391095 17350 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:26:33.391101 17350 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:26:33.391108 17350 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.391118 17350 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:26:33.391124 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.391129 17350 net.cpp:165] Memory required for data: 450049500\nI0817 16:26:33.391134 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:26:33.391147 17350 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:26:33.391154 17350 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:26:33.391161 17350 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:26:33.391613 17350 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:26:33.391628 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.391633 17350 net.cpp:165] Memory required for data: 451073500\nI0817 16:26:33.391641 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:26:33.391652 17350 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:26:33.391659 17350 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:26:33.391669 17350 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:26:33.391916 17350 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:26:33.391937 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.391942 17350 net.cpp:165] Memory required for data: 452097500\nI0817 16:26:33.391952 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:26:33.391961 17350 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:26:33.391966 17350 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:26:33.391978 17350 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:26:33.392030 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:26:33.392176 17350 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:26:33.392189 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.392194 17350 net.cpp:165] Memory required for data: 453121500\nI0817 16:26:33.392204 17350 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:26:33.392213 17350 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:26:33.392220 17350 net.cpp:434] L3_b1_pool <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:26:33.392230 17350 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:26:33.392266 17350 net.cpp:150] Setting up L3_b1_pool\nI0817 16:26:33.392276 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.392279 17350 net.cpp:165] Memory required for data: 454145500\nI0817 16:26:33.392285 17350 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:26:33.392293 17350 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:26:33.392299 17350 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:26:33.392307 17350 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:26:33.392316 17350 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:26:33.392346 17350 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:26:33.392355 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.392360 17350 net.cpp:165] Memory required for data: 455169500\nI0817 16:26:33.392365 17350 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:26:33.392374 17350 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:26:33.392379 17350 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:26:33.392385 17350 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:26:33.392395 17350 net.cpp:150] Setting up L3_b1_relu\nI0817 16:26:33.392401 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.392405 17350 net.cpp:165] Memory required for data: 456193500\nI0817 16:26:33.392410 17350 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:26:33.392422 17350 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:26:33.392429 17350 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:26:33.393656 17350 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:26:33.393674 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.393679 17350 net.cpp:165] Memory required for data: 457217500\nI0817 16:26:33.393685 17350 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:26:33.393694 17350 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:26:33.393700 17350 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:26:33.393708 17350 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:26:33.393718 17350 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:26:33.393757 17350 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:26:33.393772 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.393777 17350 net.cpp:165] Memory required for data: 459265500\nI0817 16:26:33.393790 17350 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:26:33.393797 17350 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:26:33.393803 17350 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:26:33.393810 17350 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:26:33.393821 17350 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:26:33.393870 17350 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:26:33.393882 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.393896 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.393901 17350 net.cpp:165] Memory required for data: 463361500\nI0817 16:26:33.393906 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:26:33.393920 17350 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:26:33.393926 17350 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:26:33.393935 17350 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:26:33.395911 17350 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:26:33.395928 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.395933 17350 net.cpp:165] Memory required for data: 465409500\nI0817 16:26:33.395943 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:26:33.395953 17350 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:26:33.395959 17350 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:26:33.395972 17350 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:26:33.396229 17350 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:26:33.396242 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.396247 17350 net.cpp:165] Memory required for data: 467457500\nI0817 16:26:33.396257 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:26:33.396266 17350 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:26:33.396272 17350 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:26:33.396283 17350 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.396338 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:26:33.396487 17350 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:26:33.396500 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.396505 17350 net.cpp:165] Memory required for data: 469505500\nI0817 16:26:33.396514 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:26:33.396525 17350 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:26:33.396531 17350 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:26:33.396538 17350 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.396548 17350 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:26:33.396555 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.396559 17350 net.cpp:165] Memory required for data: 471553500\nI0817 16:26:33.396564 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:26:33.396577 17350 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:26:33.396584 17350 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:26:33.396595 17350 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:26:33.397598 17350 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:26:33.397611 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.397616 17350 net.cpp:165] Memory required for data: 473601500\nI0817 16:26:33.397626 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:26:33.397635 17350 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:26:33.397641 17350 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:26:33.397652 17350 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:26:33.397908 17350 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:26:33.397924 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.397929 17350 net.cpp:165] Memory required for data: 475649500\nI0817 16:26:33.397940 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:26:33.397948 17350 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:26:33.397954 17350 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:26:33.397963 17350 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:26:33.398015 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:26:33.398162 17350 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:26:33.398175 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.398180 17350 net.cpp:165] Memory required for data: 477697500\nI0817 16:26:33.398200 17350 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:26:33.398211 17350 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:26:33.398217 17350 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:26:33.398224 17350 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:26:33.398231 17350 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:26:33.398267 17350 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:26:33.398277 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.398282 17350 net.cpp:165] Memory required for data: 479745500\nI0817 16:26:33.398286 17350 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:26:33.398293 17350 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:26:33.398299 17350 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:26:33.398306 17350 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:26:33.398315 17350 net.cpp:150] Setting up L3_b2_relu\nI0817 16:26:33.398322 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.398326 17350 net.cpp:165] Memory required for data: 481793500\nI0817 16:26:33.398331 17350 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:26:33.398339 17350 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:26:33.398344 17350 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:26:33.398355 17350 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:26:33.398365 17350 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:26:33.398407 17350 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:26:33.398418 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.398424 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.398429 17350 net.cpp:165] Memory required for data: 485889500\nI0817 16:26:33.398435 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:26:33.398449 17350 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:26:33.398455 17350 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:26:33.398464 17350 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:26:33.399472 17350 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:26:33.399488 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.399493 17350 net.cpp:165] Memory required for data: 487937500\nI0817 16:26:33.399503 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:26:33.399515 17350 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:26:33.399521 17350 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:26:33.399529 17350 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:26:33.399785 17350 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:26:33.399799 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.399804 17350 net.cpp:165] Memory required for data: 489985500\nI0817 16:26:33.399814 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:26:33.399827 17350 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:26:33.399833 17350 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:26:33.399842 17350 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.399899 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:26:33.400045 17350 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:26:33.400058 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.400063 17350 net.cpp:165] Memory required for data: 492033500\nI0817 16:26:33.400072 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:26:33.400082 17350 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:26:33.400089 17350 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:26:33.400095 17350 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.400104 17350 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:26:33.400118 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.400123 17350 net.cpp:165] Memory required for data: 494081500\nI0817 16:26:33.400128 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:26:33.400142 17350 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:26:33.400148 17350 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:26:33.400158 17350 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:26:33.401165 17350 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:26:33.401178 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.401183 17350 net.cpp:165] Memory required for data: 496129500\nI0817 16:26:33.401193 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:26:33.401202 17350 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:26:33.401208 17350 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:26:33.401222 17350 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:26:33.401474 17350 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:26:33.401490 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.401495 17350 net.cpp:165] Memory required for data: 498177500\nI0817 16:26:33.401527 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:26:33.401538 17350 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:26:33.401545 17350 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:26:33.401552 17350 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:26:33.401610 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:26:33.401759 17350 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:26:33.401772 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.401777 17350 net.cpp:165] Memory required for data: 500225500\nI0817 16:26:33.401792 17350 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:26:33.401801 17350 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:26:33.401808 17350 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:26:33.401813 17350 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:26:33.401821 17350 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:26:33.401854 17350 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:26:33.401862 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.401866 17350 net.cpp:165] Memory required for data: 502273500\nI0817 16:26:33.401871 17350 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:26:33.401882 17350 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:26:33.401888 17350 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:26:33.401895 17350 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:26:33.401904 17350 net.cpp:150] Setting up L3_b3_relu\nI0817 16:26:33.401911 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.401916 17350 net.cpp:165] Memory required for data: 504321500\nI0817 16:26:33.401921 17350 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:26:33.401932 17350 net.cpp:100] Creating Layer post_pool\nI0817 16:26:33.401937 17350 net.cpp:434] post_pool <- L3_b3_sum_eltwise_top\nI0817 16:26:33.401944 17350 net.cpp:408] post_pool -> post_pool\nI0817 16:26:33.401979 17350 net.cpp:150] Setting up post_pool\nI0817 16:26:33.401989 17350 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:26:33.401993 17350 net.cpp:165] Memory required for data: 504353500\nI0817 16:26:33.401999 17350 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:26:33.402076 17350 net.cpp:100] Creating Layer post_FC\nI0817 16:26:33.402088 17350 net.cpp:434] post_FC <- post_pool\nI0817 16:26:33.402097 17350 net.cpp:408] post_FC -> post_FC_top\nI0817 16:26:33.402323 17350 net.cpp:150] Setting up post_FC\nI0817 16:26:33.402339 17350 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:26:33.402344 17350 net.cpp:165] Memory required for data: 504358500\nI0817 16:26:33.402354 17350 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:26:33.402362 17350 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:26:33.402375 17350 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:26:33.402386 17350 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:26:33.402397 17350 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:26:33.402442 17350 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:26:33.402456 17350 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:26:33.402463 17350 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:26:33.402468 17350 net.cpp:165] Memory required for data: 504368500\nI0817 16:26:33.402473 17350 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:26:33.402516 17350 net.cpp:100] Creating Layer accuracy\nI0817 16:26:33.402526 17350 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:26:33.402534 17350 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:26:33.402542 17350 net.cpp:408] accuracy -> accuracy\nI0817 16:26:33.402583 17350 net.cpp:150] Setting up accuracy\nI0817 16:26:33.402595 17350 net.cpp:157] Top shape: (1)\nI0817 16:26:33.402601 17350 net.cpp:165] Memory required for data: 504368504\nI0817 16:26:33.402606 17350 layer_factory.hpp:77] Creating layer loss\nI0817 16:26:33.402614 17350 net.cpp:100] Creating Layer loss\nI0817 16:26:33.402621 17350 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:26:33.402627 17350 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:26:33.402634 17350 net.cpp:408] loss -> loss\nI0817 16:26:33.402678 17350 layer_factory.hpp:77] Creating layer loss\nI0817 16:26:33.402835 17350 net.cpp:150] Setting up loss\nI0817 16:26:33.402850 17350 net.cpp:157] Top shape: (1)\nI0817 16:26:33.402855 17350 net.cpp:160]     with loss weight 1\nI0817 16:26:33.402926 17350 net.cpp:165] Memory required for data: 504368508\nI0817 16:26:33.402935 17350 net.cpp:226] loss needs backward computation.\nI0817 16:26:33.402941 17350 net.cpp:228] accuracy does not need backward computation.\nI0817 16:26:33.402948 17350 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:26:33.402953 17350 net.cpp:226] post_FC needs backward computation.\nI0817 16:26:33.402958 17350 net.cpp:226] post_pool needs backward computation.\nI0817 16:26:33.402963 17350 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:26:33.402968 17350 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:26:33.402973 17350 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:26:33.402978 17350 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:26:33.402983 17350 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:26:33.402988 17350 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:26:33.402993 17350 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:26:33.402998 17350 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:26:33.403003 17350 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:26:33.403008 17350 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:26:33.403013 17350 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:26:33.403018 17350 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:26:33.403023 17350 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:26:33.403028 17350 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:26:33.403033 17350 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:26:33.403038 17350 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:26:33.403043 17350 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:26:33.403048 17350 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:26:33.403053 17350 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:26:33.403059 17350 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:26:33.403064 17350 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:26:33.403070 17350 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:26:33.403082 17350 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:26:33.403087 17350 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:26:33.403093 17350 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:26:33.403098 17350 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:26:33.403103 17350 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:26:33.403110 17350 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:26:33.403115 17350 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:26:33.403120 17350 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:26:33.403125 17350 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:26:33.403129 17350 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:26:33.403134 17350 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:26:33.403141 17350 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:26:33.403146 17350 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:26:33.403151 17350 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:26:33.403156 17350 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:26:33.403161 17350 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:26:33.403165 17350 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:26:33.403170 17350 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:26:33.403175 17350 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:26:33.403188 17350 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:26:33.403194 17350 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:26:33.403199 17350 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:26:33.403204 17350 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:26:33.403210 17350 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:26:33.403215 17350 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:26:33.403220 17350 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:26:33.403226 17350 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:26:33.403231 17350 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:26:33.403236 17350 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:26:33.403241 17350 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:26:33.403246 17350 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:26:33.403251 17350 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:26:33.403259 17350 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:26:33.403264 17350 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:26:33.403268 17350 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:26:33.403275 17350 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:26:33.403280 17350 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:26:33.403285 17350 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:26:33.403290 17350 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:26:33.403295 17350 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:26:33.403301 17350 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:26:33.403306 17350 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:26:33.403311 17350 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:26:33.403316 17350 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:26:33.403321 17350 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:26:33.403326 17350 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:26:33.403331 17350 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:26:33.403342 17350 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:26:33.403347 17350 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:26:33.403353 17350 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:26:33.403358 17350 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:26:33.403363 17350 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:26:33.403368 17350 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:26:33.403373 17350 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:26:33.403379 17350 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:26:33.403384 17350 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:26:33.403390 17350 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:26:33.403395 17350 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:26:33.403400 17350 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:26:33.403405 17350 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:26:33.403410 17350 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:26:33.403415 17350 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:26:33.403421 17350 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:26:33.403426 17350 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:26:33.403431 17350 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:26:33.403436 17350 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:26:33.403442 17350 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:26:33.403450 17350 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:26:33.403456 17350 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:26:33.403461 17350 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:26:33.403466 17350 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:26:33.403471 17350 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:26:33.403477 17350 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:26:33.403482 17350 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:26:33.403487 17350 net.cpp:226] pre_relu needs backward computation.\nI0817 16:26:33.403492 17350 net.cpp:226] pre_scale needs backward computation.\nI0817 16:26:33.403497 17350 net.cpp:226] pre_bn needs backward computation.\nI0817 16:26:33.403502 17350 net.cpp:226] pre_conv needs backward computation.\nI0817 16:26:33.403508 17350 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:26:33.403515 17350 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:26:33.403519 17350 net.cpp:270] This network produces output accuracy\nI0817 16:26:33.403525 17350 net.cpp:270] This network produces output loss\nI0817 16:26:33.403671 17350 net.cpp:283] Network initialization done.\nI0817 16:26:33.408032 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:33.408056 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:33.408105 17350 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:26:33.408265 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:26:33.408283 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:26:33.408293 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:26:33.408303 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:26:33.408311 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:26:33.408335 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:26:33.408344 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:26:33.408352 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:26:33.408361 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:26:33.408370 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:26:33.408382 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:26:33.408391 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:26:33.408401 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:26:33.408408 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:26:33.408417 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:26:33.408426 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:26:33.408437 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:26:33.408445 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:26:33.408454 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:26:33.408463 17350 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:26:33.409134 17350 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 32\n      dim: 8\n      dim: 8\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer {\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer {\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\nI0817 16:26:33.409708 17350 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:26:33.409940 17350 net.cpp:100] Creating Layer dataLayer\nI0817 16:26:33.409962 17350 net.cpp:408] dataLayer -> data_top\nI0817 16:26:33.409979 17350 net.cpp:408] dataLayer -> label\nI0817 16:26:33.409991 17350 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0817 16:26:33.416970 17357 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0817 16:26:33.417191 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:33.424573 17350 net.cpp:150] Setting up dataLayer\nI0817 16:26:33.424594 17350 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:26:33.424602 17350 net.cpp:157] Top shape: 125 (125)\nI0817 16:26:33.424607 17350 net.cpp:165] Memory required for data: 1536500\nI0817 16:26:33.424614 17350 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:26:33.424628 17350 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:26:33.424635 17350 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:26:33.424649 17350 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:26:33.424660 17350 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:26:33.424815 17350 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:26:33.424831 17350 net.cpp:157] Top shape: 125 (125)\nI0817 16:26:33.424841 17350 net.cpp:157] Top shape: 125 (125)\nI0817 16:26:33.424847 17350 net.cpp:165] Memory required for data: 1537500\nI0817 16:26:33.424854 17350 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:26:33.424868 17350 net.cpp:100] Creating Layer pre_conv\nI0817 16:26:33.424875 17350 net.cpp:434] pre_conv <- data_top\nI0817 16:26:33.424888 17350 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:26:33.425303 17350 net.cpp:150] Setting up pre_conv\nI0817 16:26:33.425328 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.425333 17350 net.cpp:165] Memory required for data: 9729500\nI0817 16:26:33.425348 17350 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:26:33.425364 17350 net.cpp:100] Creating Layer pre_bn\nI0817 16:26:33.425370 17350 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:26:33.425379 17350 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:26:33.425709 17350 net.cpp:150] Setting up pre_bn\nI0817 16:26:33.425725 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.425730 17350 net.cpp:165] Memory required for data: 17921500\nI0817 16:26:33.425750 17350 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:26:33.425760 17350 net.cpp:100] Creating Layer pre_scale\nI0817 16:26:33.425765 17350 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:26:33.425772 17350 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:26:33.425846 17350 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:26:33.426018 17350 net.cpp:150] Setting up pre_scale\nI0817 16:26:33.426034 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.426040 17350 net.cpp:165] Memory required for data: 26113500\nI0817 16:26:33.426049 17350 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:26:33.426064 17350 net.cpp:100] Creating Layer pre_relu\nI0817 16:26:33.426069 17350 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:26:33.426076 17350 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:26:33.426086 17350 net.cpp:150] Setting up pre_relu\nI0817 16:26:33.426093 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.426097 17350 net.cpp:165] Memory required for data: 34305500\nI0817 16:26:33.426102 17350 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:26:33.426108 17350 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:26:33.426116 17350 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:26:33.426127 17350 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:26:33.426137 17350 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:26:33.426192 17350 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:26:33.426208 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.426214 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.426219 17350 net.cpp:165] Memory required for data: 50689500\nI0817 16:26:33.426224 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:26:33.426235 17350 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:26:33.426244 17350 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:26:33.426254 17350 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:26:33.426682 17350 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:26:33.426697 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.426702 17350 net.cpp:165] Memory required for data: 58881500\nI0817 16:26:33.426718 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:26:33.426731 17350 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:26:33.426738 17350 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:26:33.426749 17350 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:26:33.427067 17350 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:26:33.427083 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.427090 17350 net.cpp:165] Memory required for data: 67073500\nI0817 16:26:33.427103 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:26:33.427114 17350 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:26:33.427120 17350 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:26:33.427145 17350 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.427212 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:26:33.427386 17350 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:26:33.427402 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.427407 17350 net.cpp:165] Memory required for data: 75265500\nI0817 16:26:33.427425 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:26:33.427434 17350 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:26:33.427443 17350 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:26:33.427450 17350 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.427464 17350 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:26:33.427470 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.427474 17350 net.cpp:165] Memory required for data: 83457500\nI0817 16:26:33.427479 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:26:33.427489 17350 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:26:33.427497 17350 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:26:33.427506 17350 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:26:33.428143 17350 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:26:33.428160 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.428167 17350 net.cpp:165] Memory required for data: 91649500\nI0817 16:26:33.428179 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:26:33.428191 17350 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:26:33.428198 17350 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:26:33.428208 17350 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:26:33.428514 17350 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:26:33.428526 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.428531 17350 net.cpp:165] Memory required for data: 99841500\nI0817 16:26:33.428547 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:26:33.428560 17350 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:26:33.428570 17350 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:26:33.428578 17350 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:26:33.428665 17350 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:26:33.428853 17350 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:26:33.428870 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.428875 17350 net.cpp:165] Memory required for data: 108033500\nI0817 16:26:33.428885 17350 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:26:33.428896 17350 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:26:33.428903 17350 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:26:33.428910 17350 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:26:33.428920 17350 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:26:33.428959 17350 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:26:33.428973 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.428977 17350 net.cpp:165] Memory required for data: 116225500\nI0817 16:26:33.428983 17350 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:26:33.428992 17350 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:26:33.428998 17350 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:26:33.429005 17350 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:26:33.429014 17350 net.cpp:150] Setting up L1_b1_relu\nI0817 16:26:33.429023 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.429028 17350 net.cpp:165] Memory required for data: 124417500\nI0817 16:26:33.429033 17350 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:26:33.429045 17350 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:26:33.429051 17350 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:26:33.429059 17350 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:26:33.429067 17350 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:26:33.429129 17350 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:26:33.429144 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.429162 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.429167 17350 net.cpp:165] Memory required for data: 140801500\nI0817 16:26:33.429172 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:26:33.429184 17350 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:26:33.429190 17350 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:26:33.429208 17350 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:26:33.429613 17350 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:26:33.429628 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.429633 17350 net.cpp:165] Memory required for data: 148993500\nI0817 16:26:33.429646 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:26:33.429654 17350 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:26:33.429661 17350 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:26:33.429668 17350 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:26:33.429973 17350 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:26:33.429988 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.429994 17350 net.cpp:165] Memory required for data: 157185500\nI0817 16:26:33.430006 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:26:33.430017 17350 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:26:33.430027 17350 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:26:33.430034 17350 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.430104 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:26:33.430282 17350 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:26:33.430295 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.430300 17350 net.cpp:165] Memory required for data: 165377500\nI0817 16:26:33.430313 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:26:33.430321 17350 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:26:33.430327 17350 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:26:33.430337 17350 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.430351 17350 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:26:33.430358 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.430362 17350 net.cpp:165] Memory required for data: 173569500\nI0817 16:26:33.430367 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:26:33.430382 17350 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:26:33.430388 17350 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:26:33.430397 17350 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:26:33.430805 17350 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:26:33.430821 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.430826 17350 net.cpp:165] Memory required for data: 181761500\nI0817 16:26:33.430838 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:26:33.430851 17350 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:26:33.430857 17350 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:26:33.430869 17350 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:26:33.431247 17350 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:26:33.431263 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.431270 17350 net.cpp:165] Memory required for data: 189953500\nI0817 16:26:33.431287 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:26:33.431298 17350 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:26:33.431304 17350 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:26:33.431314 17350 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:26:33.431380 17350 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:26:33.431557 17350 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:26:33.431571 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.431578 17350 net.cpp:165] Memory required for data: 198145500\nI0817 16:26:33.431596 17350 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:26:33.431608 17350 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:26:33.431617 17350 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:26:33.431625 17350 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:26:33.431633 17350 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:26:33.431675 17350 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:26:33.431685 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.431690 17350 net.cpp:165] Memory required for data: 206337500\nI0817 16:26:33.431695 17350 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:26:33.431702 17350 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:26:33.431709 17350 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:26:33.431722 17350 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:26:33.431731 17350 net.cpp:150] Setting up L1_b2_relu\nI0817 16:26:33.431738 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.431743 17350 net.cpp:165] Memory required for data: 214529500\nI0817 16:26:33.431751 17350 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:26:33.431758 17350 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:26:33.431763 17350 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:26:33.431771 17350 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:26:33.431787 17350 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:26:33.431841 17350 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:26:33.431854 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.431861 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.431865 17350 net.cpp:165] Memory required for data: 230913500\nI0817 16:26:33.431871 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:26:33.431885 17350 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:26:33.431890 17350 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:26:33.431902 17350 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:26:33.432278 17350 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:26:33.432293 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.432298 17350 net.cpp:165] Memory required for data: 239105500\nI0817 16:26:33.432308 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:26:33.432319 17350 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:26:33.432325 17350 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:26:33.432338 17350 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:26:33.432652 17350 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:26:33.432667 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.432672 17350 net.cpp:165] Memory required for data: 247297500\nI0817 16:26:33.432685 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:26:33.432698 17350 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:26:33.432704 17350 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:26:33.432713 17350 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.432771 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:26:33.432951 17350 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:26:33.432965 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.432972 17350 net.cpp:165] Memory required for data: 255489500\nI0817 16:26:33.432982 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:26:33.432993 17350 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:26:33.432999 17350 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:26:33.433009 17350 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.433027 17350 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:26:33.433034 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.433038 17350 net.cpp:165] Memory required for data: 263681500\nI0817 16:26:33.433043 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:26:33.433058 17350 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:26:33.433063 17350 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:26:33.433078 17350 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:26:33.433459 17350 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:26:33.433475 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.433481 17350 net.cpp:165] Memory required for data: 271873500\nI0817 16:26:33.433490 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:26:33.433502 17350 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:26:33.433511 17350 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:26:33.433549 17350 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:26:33.433902 17350 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:26:33.433917 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.433923 17350 net.cpp:165] Memory required for data: 280065500\nI0817 16:26:33.433935 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:26:33.433948 17350 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:26:33.433954 17350 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:26:33.433965 17350 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:26:33.434128 17350 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:26:33.434304 17350 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:26:33.434317 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.434324 17350 net.cpp:165] Memory required for data: 288257500\nI0817 16:26:33.434334 17350 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:26:33.434347 17350 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:26:33.434353 17350 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:26:33.434363 17350 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:26:33.434371 17350 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:26:33.434412 17350 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:26:33.434422 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.434427 17350 net.cpp:165] Memory required for data: 296449500\nI0817 16:26:33.434432 17350 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:26:33.434439 17350 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:26:33.434445 17350 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:26:33.434464 17350 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:26:33.434478 17350 net.cpp:150] Setting up L1_b3_relu\nI0817 16:26:33.434484 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.434489 17350 net.cpp:165] Memory required for data: 304641500\nI0817 16:26:33.434496 17350 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:26:33.434504 17350 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:26:33.434509 17350 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:26:33.434516 17350 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:26:33.434526 17350 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:26:33.434581 17350 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:26:33.434590 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.434600 17350 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:26:33.434607 17350 net.cpp:165] Memory required for data: 321025500\nI0817 16:26:33.434612 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:26:33.434622 17350 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:26:33.434638 17350 net.cpp:434] L2_b1_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:26:33.434651 17350 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:26:33.435027 17350 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:26:33.435041 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.435047 17350 net.cpp:165] Memory required for data: 323073500\nI0817 16:26:33.435056 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:26:33.435067 17350 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:26:33.435075 17350 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:26:33.435082 17350 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:26:33.435334 17350 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:26:33.435348 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.435353 17350 net.cpp:165] Memory required for data: 325121500\nI0817 16:26:33.435362 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:26:33.435374 17350 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:26:33.435379 17350 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:26:33.435387 17350 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.435442 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:26:33.435602 17350 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:26:33.435616 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.435621 17350 net.cpp:165] Memory required for data: 327169500\nI0817 16:26:33.435631 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:26:33.435642 17350 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:26:33.435647 17350 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:26:33.435654 17350 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.435663 17350 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:26:33.435670 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.435674 17350 net.cpp:165] Memory required for data: 329217500\nI0817 16:26:33.435679 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:26:33.435693 17350 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:26:33.435698 17350 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:26:33.435708 17350 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:26:33.436048 17350 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:26:33.436061 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.436066 17350 net.cpp:165] Memory required for data: 331265500\nI0817 16:26:33.436075 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:26:33.436084 17350 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:26:33.436089 17350 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:26:33.436100 17350 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:26:33.436357 17350 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:26:33.436369 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.436374 17350 net.cpp:165] Memory required for data: 333313500\nI0817 16:26:33.436384 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:26:33.436396 17350 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:26:33.436403 17350 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:26:33.436410 17350 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:26:33.436465 17350 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:26:33.436612 17350 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:26:33.436625 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.436630 17350 net.cpp:165] Memory required for data: 335361500\nI0817 16:26:33.436638 17350 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:26:33.436650 17350 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:26:33.436657 17350 net.cpp:434] L2_b1_pool <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:26:33.436668 17350 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:26:33.436704 17350 net.cpp:150] Setting up L2_b1_pool\nI0817 16:26:33.436714 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.436719 17350 net.cpp:165] Memory required for data: 337409500\nI0817 16:26:33.436724 17350 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:26:33.436733 17350 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:26:33.436738 17350 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:26:33.436748 17350 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:26:33.436755 17350 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:26:33.436794 17350 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:26:33.436803 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.436808 17350 net.cpp:165] Memory required for data: 339457500\nI0817 16:26:33.436813 17350 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:26:33.436820 17350 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:26:33.436825 17350 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:26:33.436836 17350 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:26:33.436846 17350 net.cpp:150] Setting up L2_b1_relu\nI0817 16:26:33.436852 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.436856 17350 net.cpp:165] Memory required for data: 341505500\nI0817 16:26:33.436861 17350 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:26:33.436869 17350 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:26:33.436877 17350 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:26:33.439075 17350 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:26:33.439095 17350 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:26:33.439100 17350 net.cpp:165] Memory required for data: 343553500\nI0817 16:26:33.439105 17350 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:26:33.439116 17350 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:26:33.439121 17350 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:26:33.439131 17350 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:26:33.439138 17350 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:26:33.439182 17350 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:26:33.439193 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.439198 17350 net.cpp:165] Memory required for data: 347649500\nI0817 16:26:33.439203 17350 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:26:33.439209 17350 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:26:33.439215 17350 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:26:33.439225 17350 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:26:33.439235 17350 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:26:33.439282 17350 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:26:33.439297 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.439304 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.439308 17350 net.cpp:165] Memory required for data: 355841500\nI0817 16:26:33.439313 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:26:33.439324 17350 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:26:33.439330 17350 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:26:33.439339 17350 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:26:33.439834 17350 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:26:33.439849 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.439854 17350 net.cpp:165] Memory required for data: 359937500\nI0817 16:26:33.439877 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:26:33.439889 17350 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:26:33.439896 17350 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:26:33.439904 17350 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:26:33.440158 17350 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:26:33.440179 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.440184 17350 net.cpp:165] Memory required for data: 364033500\nI0817 16:26:33.440196 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:26:33.440203 17350 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:26:33.440209 17350 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:26:33.440220 17350 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.440279 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:26:33.440429 17350 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:26:33.440443 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.440446 17350 net.cpp:165] Memory required for data: 368129500\nI0817 16:26:33.440456 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:26:33.440464 17350 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:26:33.440469 17350 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:26:33.440476 17350 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.440485 17350 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:26:33.440492 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.440497 17350 net.cpp:165] Memory required for data: 372225500\nI0817 16:26:33.440501 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:26:33.440515 17350 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:26:33.440521 17350 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:26:33.440531 17350 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:26:33.441015 17350 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:26:33.441030 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.441035 17350 net.cpp:165] Memory required for data: 376321500\nI0817 16:26:33.441045 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:26:33.441056 17350 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:26:33.441062 17350 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:26:33.441073 17350 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:26:33.441328 17350 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:26:33.441340 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.441345 17350 net.cpp:165] Memory required for data: 380417500\nI0817 16:26:33.441355 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:26:33.441364 17350 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:26:33.441370 17350 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:26:33.441376 17350 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:26:33.441434 17350 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:26:33.441591 17350 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:26:33.441606 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.441612 17350 net.cpp:165] Memory required for data: 384513500\nI0817 16:26:33.441620 17350 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:26:33.441628 17350 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:26:33.441634 17350 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:26:33.441642 17350 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:26:33.441649 17350 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:26:33.441680 17350 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:26:33.441691 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.441696 17350 net.cpp:165] Memory required for data: 388609500\nI0817 16:26:33.441701 17350 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:26:33.441709 17350 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:26:33.441714 17350 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:26:33.441723 17350 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:26:33.441733 17350 net.cpp:150] Setting up L2_b2_relu\nI0817 16:26:33.441740 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.441751 17350 net.cpp:165] Memory required for data: 392705500\nI0817 16:26:33.441756 17350 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:26:33.441763 17350 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:26:33.441768 17350 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:26:33.441778 17350 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:26:33.441795 17350 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:26:33.441843 17350 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:26:33.441854 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.441861 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.441865 17350 net.cpp:165] Memory required for data: 400897500\nI0817 16:26:33.441870 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:26:33.441884 17350 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:26:33.441890 17350 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:26:33.441900 17350 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:26:33.442378 17350 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:26:33.442391 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.442395 17350 net.cpp:165] Memory required for data: 404993500\nI0817 16:26:33.442404 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:26:33.442416 17350 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:26:33.442422 17350 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:26:33.442430 17350 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:26:33.442684 17350 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:26:33.442698 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.442703 17350 net.cpp:165] Memory required for data: 409089500\nI0817 16:26:33.442713 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:26:33.442720 17350 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:26:33.442726 17350 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:26:33.442734 17350 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.442800 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:26:33.442950 17350 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:26:33.442967 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.442972 17350 net.cpp:165] Memory required for data: 413185500\nI0817 16:26:33.442982 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:26:33.442989 17350 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:26:33.442996 17350 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:26:33.443002 17350 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.443012 17350 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:26:33.443018 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.443023 17350 net.cpp:165] Memory required for data: 417281500\nI0817 16:26:33.443027 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:26:33.443048 17350 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:26:33.443054 17350 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:26:33.443066 17350 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:26:33.443536 17350 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:26:33.443549 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.443554 17350 net.cpp:165] Memory required for data: 421377500\nI0817 16:26:33.443563 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:26:33.443572 17350 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:26:33.443578 17350 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:26:33.443585 17350 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:26:33.443851 17350 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:26:33.443872 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.443877 17350 net.cpp:165] Memory required for data: 425473500\nI0817 16:26:33.443888 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:26:33.443897 17350 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:26:33.443902 17350 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:26:33.443912 17350 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:26:33.443969 17350 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:26:33.444121 17350 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:26:33.444133 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.444139 17350 net.cpp:165] Memory required for data: 429569500\nI0817 16:26:33.444147 17350 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:26:33.444156 17350 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:26:33.444162 17350 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:26:33.444169 17350 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:26:33.444180 17350 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:26:33.444207 17350 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:26:33.444219 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.444224 17350 net.cpp:165] Memory required for data: 433665500\nI0817 16:26:33.444229 17350 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:26:33.444236 17350 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:26:33.444242 17350 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:26:33.444249 17350 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:26:33.444257 17350 net.cpp:150] Setting up L2_b3_relu\nI0817 16:26:33.444264 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.444269 17350 net.cpp:165] Memory required for data: 437761500\nI0817 16:26:33.444273 17350 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:26:33.444283 17350 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:26:33.444288 17350 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:26:33.444295 17350 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:26:33.444304 17350 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:26:33.444353 17350 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:26:33.444365 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.444370 17350 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:26:33.444375 17350 net.cpp:165] Memory required for data: 445953500\nI0817 16:26:33.444380 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:26:33.444391 17350 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:26:33.444396 17350 net.cpp:434] L3_b1_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:26:33.444408 17350 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:26:33.444905 17350 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:26:33.444921 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.444926 17350 net.cpp:165] Memory required for data: 446977500\nI0817 16:26:33.444934 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:26:33.444943 17350 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:26:33.444949 17350 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:26:33.444962 17350 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:26:33.445222 17350 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:26:33.445236 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.445240 17350 net.cpp:165] Memory required for data: 448001500\nI0817 16:26:33.445251 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:26:33.445262 17350 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:26:33.445276 17350 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:26:33.445284 17350 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.445339 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:26:33.445498 17350 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:26:33.445511 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.445516 17350 net.cpp:165] Memory required for data: 449025500\nI0817 16:26:33.445525 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:26:33.445533 17350 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:26:33.445538 17350 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:26:33.445549 17350 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:26:33.445559 17350 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:26:33.445565 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.445569 17350 net.cpp:165] Memory required for data: 450049500\nI0817 16:26:33.445574 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:26:33.445586 17350 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:26:33.445592 17350 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:26:33.445600 17350 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:26:33.446112 17350 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:26:33.446127 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.446132 17350 net.cpp:165] Memory required for data: 451073500\nI0817 16:26:33.446141 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:26:33.446153 17350 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:26:33.446159 17350 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:26:33.446167 17350 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:26:33.446435 17350 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:26:33.446449 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.446454 17350 net.cpp:165] Memory required for data: 452097500\nI0817 16:26:33.446463 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:26:33.446471 17350 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:26:33.446477 17350 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:26:33.446485 17350 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:26:33.446547 17350 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:26:33.446705 17350 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:26:33.446718 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.446723 17350 net.cpp:165] Memory required for data: 453121500\nI0817 16:26:33.446732 17350 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:26:33.446740 17350 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:26:33.446746 17350 net.cpp:434] L3_b1_pool <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:26:33.446758 17350 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:26:33.446799 17350 net.cpp:150] Setting up L3_b1_pool\nI0817 16:26:33.446812 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.446817 17350 net.cpp:165] Memory required for data: 454145500\nI0817 16:26:33.446823 17350 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:26:33.446831 17350 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:26:33.446836 17350 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:26:33.446843 17350 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:26:33.446853 17350 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:26:33.446887 17350 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:26:33.446897 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.446900 17350 net.cpp:165] Memory required for data: 455169500\nI0817 16:26:33.446905 17350 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:26:33.446913 17350 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:26:33.446918 17350 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:26:33.446925 17350 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:26:33.446941 17350 net.cpp:150] Setting up L3_b1_relu\nI0817 16:26:33.446949 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.446954 17350 net.cpp:165] Memory required for data: 456193500\nI0817 16:26:33.446959 17350 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:26:33.446969 17350 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:26:33.446977 17350 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:26:33.448210 17350 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:26:33.448227 17350 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:26:33.448232 17350 net.cpp:165] Memory required for data: 457217500\nI0817 16:26:33.448238 17350 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:26:33.448247 17350 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:26:33.448253 17350 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:26:33.448261 17350 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:26:33.448271 17350 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:26:33.448312 17350 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:26:33.448325 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.448330 17350 net.cpp:165] Memory required for data: 459265500\nI0817 16:26:33.448335 17350 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:26:33.448343 17350 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:26:33.448348 17350 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:26:33.448355 17350 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:26:33.448365 17350 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:26:33.448415 17350 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:26:33.448424 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.448431 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.448436 17350 net.cpp:165] Memory required for data: 463361500\nI0817 16:26:33.448441 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:26:33.448454 17350 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:26:33.448460 17350 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:26:33.448469 17350 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:26:33.449503 17350 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:26:33.449518 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.449523 17350 net.cpp:165] Memory required for data: 465409500\nI0817 16:26:33.449532 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:26:33.449542 17350 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:26:33.449548 17350 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:26:33.449558 17350 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:26:33.449839 17350 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:26:33.449856 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.449862 17350 net.cpp:165] Memory required for data: 467457500\nI0817 16:26:33.449872 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:26:33.449880 17350 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:26:33.449887 17350 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:26:33.449897 17350 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.449955 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:26:33.450111 17350 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:26:33.450124 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.450129 17350 net.cpp:165] Memory required for data: 469505500\nI0817 16:26:33.450139 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:26:33.450148 17350 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:26:33.450155 17350 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:26:33.450162 17350 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:26:33.450179 17350 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:26:33.450186 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.450191 17350 net.cpp:165] Memory required for data: 471553500\nI0817 16:26:33.450196 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:26:33.450209 17350 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:26:33.450214 17350 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:26:33.450223 17350 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:26:33.451267 17350 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:26:33.451282 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.451287 17350 net.cpp:165] Memory required for data: 473601500\nI0817 16:26:33.451295 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:26:33.451308 17350 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:26:33.451313 17350 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:26:33.451323 17350 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:26:33.451584 17350 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:26:33.451597 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.451601 17350 net.cpp:165] Memory required for data: 475649500\nI0817 16:26:33.451612 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:26:33.451622 17350 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:26:33.451627 17350 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:26:33.451637 17350 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:26:33.451695 17350 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:26:33.451864 17350 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:26:33.451877 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.451882 17350 net.cpp:165] Memory required for data: 477697500\nI0817 16:26:33.451891 17350 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:26:33.451900 17350 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:26:33.451906 17350 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:26:33.451913 17350 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:26:33.451923 17350 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:26:33.451961 17350 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:26:33.451972 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.451977 17350 net.cpp:165] Memory required for data: 479745500\nI0817 16:26:33.451982 17350 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:26:33.451989 17350 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:26:33.451994 17350 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:26:33.452004 17350 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:26:33.452013 17350 net.cpp:150] Setting up L3_b2_relu\nI0817 16:26:33.452020 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.452025 17350 net.cpp:165] Memory required for data: 481793500\nI0817 16:26:33.452029 17350 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:26:33.452036 17350 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:26:33.452041 17350 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:26:33.452049 17350 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:26:33.452057 17350 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:26:33.452105 17350 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:26:33.452117 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.452123 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.452128 17350 net.cpp:165] Memory required for data: 485889500\nI0817 16:26:33.452133 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:26:33.452143 17350 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:26:33.452149 17350 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:26:33.452167 17350 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:26:33.453212 17350 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:26:33.453227 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.453232 17350 net.cpp:165] Memory required for data: 487937500\nI0817 16:26:33.453239 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:26:33.453248 17350 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:26:33.453254 17350 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:26:33.453265 17350 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:26:33.453529 17350 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:26:33.453546 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.453550 17350 net.cpp:165] Memory required for data: 489985500\nI0817 16:26:33.453562 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:26:33.453569 17350 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:26:33.453575 17350 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:26:33.453583 17350 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.453639 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:26:33.453799 17350 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:26:33.453812 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.453817 17350 net.cpp:165] Memory required for data: 492033500\nI0817 16:26:33.453826 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:26:33.453838 17350 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:26:33.453845 17350 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:26:33.453851 17350 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:26:33.453861 17350 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:26:33.453867 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.453872 17350 net.cpp:165] Memory required for data: 494081500\nI0817 16:26:33.453877 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:26:33.453891 17350 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:26:33.453896 17350 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:26:33.453904 17350 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:26:33.455891 17350 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:26:33.455909 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.455914 17350 net.cpp:165] Memory required for data: 496129500\nI0817 16:26:33.455924 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:26:33.455934 17350 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:26:33.455940 17350 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:26:33.455950 17350 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:26:33.456214 17350 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:26:33.456229 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.456234 17350 net.cpp:165] Memory required for data: 498177500\nI0817 16:26:33.456269 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:26:33.456279 17350 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:26:33.456285 17350 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:26:33.456293 17350 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:26:33.456356 17350 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:26:33.456513 17350 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:26:33.456526 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.456532 17350 net.cpp:165] Memory required for data: 500225500\nI0817 16:26:33.456540 17350 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:26:33.456548 17350 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:26:33.456555 17350 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:26:33.456562 17350 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:26:33.456569 17350 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:26:33.456614 17350 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:26:33.456624 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.456629 17350 net.cpp:165] Memory required for data: 502273500\nI0817 16:26:33.456634 17350 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:26:33.456645 17350 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:26:33.456650 17350 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:26:33.456657 17350 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:26:33.456667 17350 net.cpp:150] Setting up L3_b3_relu\nI0817 16:26:33.456674 17350 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:26:33.456678 17350 net.cpp:165] Memory required for data: 504321500\nI0817 16:26:33.456683 17350 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:26:33.456693 17350 net.cpp:100] Creating Layer post_pool\nI0817 16:26:33.456699 17350 net.cpp:434] post_pool <- L3_b3_sum_eltwise_top\nI0817 16:26:33.456707 17350 net.cpp:408] post_pool -> post_pool\nI0817 16:26:33.456744 17350 net.cpp:150] Setting up post_pool\nI0817 16:26:33.456756 17350 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:26:33.456760 17350 net.cpp:165] Memory required for data: 504353500\nI0817 16:26:33.456765 17350 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:26:33.456775 17350 net.cpp:100] Creating Layer post_FC\nI0817 16:26:33.456786 17350 net.cpp:434] post_FC <- post_pool\nI0817 16:26:33.456796 17350 net.cpp:408] post_FC -> post_FC_top\nI0817 16:26:33.456948 17350 net.cpp:150] Setting up post_FC\nI0817 16:26:33.456961 17350 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:26:33.456965 17350 net.cpp:165] Memory required for data: 504358500\nI0817 16:26:33.456974 17350 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:26:33.456982 17350 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:26:33.456989 17350 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:26:33.456998 17350 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:26:33.457008 17350 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:26:33.457056 17350 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:26:33.457067 17350 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:26:33.457073 17350 net.cpp:157] Top shape: 125 10 (1250)\nI0817 16:26:33.457077 17350 net.cpp:165] Memory required for data: 504368500\nI0817 16:26:33.457082 17350 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:26:33.457093 17350 net.cpp:100] Creating Layer accuracy\nI0817 16:26:33.457099 17350 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:26:33.457106 17350 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:26:33.457113 17350 net.cpp:408] accuracy -> accuracy\nI0817 16:26:33.457125 17350 net.cpp:150] Setting up accuracy\nI0817 16:26:33.457132 17350 net.cpp:157] Top shape: (1)\nI0817 16:26:33.457137 17350 net.cpp:165] Memory required for data: 504368504\nI0817 16:26:33.457141 17350 layer_factory.hpp:77] Creating layer loss\nI0817 16:26:33.457149 17350 net.cpp:100] Creating Layer loss\nI0817 16:26:33.457154 17350 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:26:33.457160 17350 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:26:33.457167 17350 net.cpp:408] loss -> loss\nI0817 16:26:33.457178 17350 layer_factory.hpp:77] Creating layer loss\nI0817 16:26:33.457295 17350 net.cpp:150] Setting up loss\nI0817 16:26:33.457307 17350 net.cpp:157] Top shape: (1)\nI0817 16:26:33.457312 17350 net.cpp:160]     with loss weight 1\nI0817 16:26:33.457324 17350 net.cpp:165] Memory required for data: 504368508\nI0817 16:26:33.457330 17350 net.cpp:226] loss needs backward computation.\nI0817 16:26:33.457336 17350 net.cpp:228] accuracy does not need backward computation.\nI0817 16:26:33.457342 17350 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:26:33.457347 17350 net.cpp:226] post_FC needs backward computation.\nI0817 16:26:33.457352 17350 net.cpp:226] post_pool needs backward computation.\nI0817 16:26:33.457365 17350 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:26:33.457370 17350 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:26:33.457376 17350 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:26:33.457381 17350 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:26:33.457386 17350 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:26:33.457391 17350 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:26:33.457396 17350 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:26:33.457399 17350 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:26:33.457404 17350 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:26:33.457409 17350 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:26:33.457415 17350 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:26:33.457419 17350 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:26:33.457425 17350 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:26:33.457430 17350 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:26:33.457435 17350 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:26:33.457440 17350 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:26:33.457445 17350 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:26:33.457450 17350 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:26:33.457455 17350 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:26:33.457460 17350 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:26:33.457465 17350 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:26:33.457471 17350 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:26:33.457476 17350 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:26:33.457481 17350 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:26:33.457486 17350 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:26:33.457491 17350 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:26:33.457496 17350 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:26:33.457501 17350 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:26:33.457507 17350 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:26:33.457511 17350 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:26:33.457516 17350 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:26:33.457521 17350 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:26:33.457527 17350 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:26:33.457532 17350 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:26:33.457537 17350 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:26:33.457542 17350 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:26:33.457547 17350 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:26:33.457552 17350 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:26:33.457557 17350 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:26:33.457562 17350 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:26:33.457567 17350 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:26:33.457572 17350 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:26:33.457581 17350 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:26:33.457587 17350 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:26:33.457592 17350 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:26:33.457597 17350 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:26:33.457602 17350 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:26:33.457612 17350 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:26:33.457617 17350 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:26:33.457623 17350 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:26:33.457628 17350 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:26:33.457633 17350 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:26:33.457638 17350 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:26:33.457643 17350 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:26:33.457649 17350 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:26:33.457654 17350 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:26:33.457659 17350 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:26:33.457665 17350 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:26:33.457670 17350 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:26:33.457675 17350 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:26:33.457680 17350 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:26:33.457685 17350 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:26:33.457690 17350 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:26:33.457695 17350 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:26:33.457700 17350 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:26:33.457705 17350 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:26:33.457710 17350 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:26:33.457715 17350 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:26:33.457721 17350 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:26:33.457726 17350 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:26:33.457731 17350 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:26:33.457737 17350 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:26:33.457742 17350 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:26:33.457747 17350 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:26:33.457752 17350 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:26:33.457757 17350 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:26:33.457763 17350 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:26:33.457768 17350 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:26:33.457773 17350 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:26:33.457778 17350 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:26:33.457790 17350 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:26:33.457797 17350 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:26:33.457801 17350 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:26:33.457806 17350 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:26:33.457813 17350 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:26:33.457818 17350 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:26:33.457823 17350 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:26:33.457828 17350 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:26:33.457834 17350 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:26:33.457839 17350 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:26:33.457844 17350 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:26:33.457850 17350 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:26:33.457859 17350 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:26:33.457864 17350 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:26:33.457870 17350 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:26:33.457880 17350 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:26:33.457886 17350 net.cpp:226] pre_relu needs backward computation.\nI0817 16:26:33.457891 17350 net.cpp:226] pre_scale needs backward computation.\nI0817 16:26:33.457896 17350 net.cpp:226] pre_bn needs backward computation.\nI0817 16:26:33.457901 17350 net.cpp:226] pre_conv needs backward computation.\nI0817 16:26:33.457907 17350 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:26:33.457913 17350 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:26:33.457917 17350 net.cpp:270] This network produces output accuracy\nI0817 16:26:33.457924 17350 net.cpp:270] This network produces output loss\nI0817 16:26:33.458037 17350 net.cpp:283] Network initialization done.\nI0817 16:26:33.458343 17350 solver.cpp:60] Solver scaffolding done.\nI0817 16:26:33.667732 17350 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:26:33.960933 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:33.960974 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:33.966403 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:34.177644 17350 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:26:34.177721 17350 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:26:34.189250 17350 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:26:34.189327 17350 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:26:34.518535 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:34.518575 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:34.524657 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:34.741330 17350 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:26:34.741427 17350 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:26:34.757951 17350 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:26:34.758044 17350 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:26:35.113253 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:35.113296 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:35.120404 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:35.347069 17350 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:26:35.347192 17350 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:26:35.369678 17350 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:26:35.369801 17350 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:26:35.389717 17350 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:26:35.744580 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:35.744642 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:35.753116 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:35.976830 17350 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:26:35.977005 17350 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:26:36.005777 17350 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:26:36.005939 17350 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:26:36.421012 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:36.421074 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:36.429623 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:36.669847 17350 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:26:36.670018 17350 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:26:36.705663 17350 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:26:36.705821 17350 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:26:37.145884 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:37.145931 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:37.155046 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:37.399399 17350 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:26:37.399608 17350 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:26:37.441658 17350 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:26:37.441864 17350 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:26:37.902204 17350 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:26:37.902263 17350 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:26:37.912904 17350 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:26:37.968153 17377 blocking_queue.cpp:50] Waiting for data\nI0817 16:26:38.033272 17373 blocking_queue.cpp:50] Waiting for data\nI0817 16:26:38.271638 17350 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:26:38.271916 17350 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:26:38.320894 17350 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:26:38.321130 17350 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:26:38.366435 17350 parallel.cpp:425] Starting Optimization\nI0817 16:26:38.368225 17350 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:26:38.368309 17350 solver.cpp:280] Learning Rate Policy: multistep\nI0817 16:26:38.370664 17350 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:27:05.484081 17350 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0817 16:27:05.484344 17350 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:27:06.920585 17350 solver.cpp:228] Iteration 0, loss = 3.00656\nI0817 16:27:06.920636 17350 solver.cpp:244]     Train net output #0: accuracy = 0.064\nI0817 16:27:06.920655 17350 solver.cpp:244]     Train net output #1: loss = 3.00656 (* 1 = 3.00656 loss)\nI0817 16:27:07.041605 17350 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0817 16:27:54.471695 17350 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:28:21.398540 17350 solver.cpp:404]     Test net output #0: accuracy = 0.14316\nI0817 16:28:21.398632 17350 solver.cpp:404]     Test net output #1: loss = 2.39213 (* 1 = 2.39213 loss)\nI0817 16:28:21.821827 17350 solver.cpp:228] Iteration 100, loss = 2.03236\nI0817 16:28:21.821877 17350 solver.cpp:244]     Train net output #0: accuracy = 0.248\nI0817 16:28:21.821892 17350 solver.cpp:244]     Train net output #1: loss = 2.03236 (* 1 = 2.03236 loss)\nI0817 16:28:21.899992 17350 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0817 16:29:09.352573 17350 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:29:36.271711 17350 solver.cpp:404]     Test net output #0: accuracy = 0.21468\nI0817 16:29:36.271785 17350 solver.cpp:404]     Test net output #1: loss = 2.35132 (* 1 = 2.35132 loss)\nI0817 16:29:36.695022 17350 solver.cpp:228] Iteration 200, loss = 1.5672\nI0817 16:29:36.695066 17350 solver.cpp:244]     Train net output #0: accuracy = 0.352\nI0817 16:29:36.695085 17350 solver.cpp:244]     Train net output #1: loss = 1.5672 (* 1 = 1.5672 loss)\nI0817 16:29:36.775055 17350 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0817 16:30:24.224611 17350 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:30:51.139921 17350 solver.cpp:404]     Test net output #0: accuracy = 0.22328\nI0817 16:30:51.139997 17350 solver.cpp:404]     Test net output #1: loss = 2.86353 (* 1 = 2.86353 loss)\nI0817 16:30:51.562885 17350 solver.cpp:228] Iteration 300, loss = 1.11492\nI0817 16:30:51.562932 17350 solver.cpp:244]     Train net output #0: accuracy = 0.528\nI0817 16:30:51.562949 17350 solver.cpp:244]     Train net output #1: loss = 1.11492 (* 1 = 1.11492 loss)\nI0817 16:30:51.644718 17350 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0817 16:31:39.080049 17350 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:32:06.004629 17350 solver.cpp:404]     Test net output #0: accuracy = 0.27064\nI0817 16:32:06.004700 17350 solver.cpp:404]     Test net output #1: loss = 2.33998 (* 1 = 2.33998 loss)\nI0817 16:32:06.427323 17350 solver.cpp:228] Iteration 400, loss = 0.854874\nI0817 16:32:06.427366 17350 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0817 16:32:06.427381 17350 solver.cpp:244]     Train net output #1: loss = 0.854874 (* 1 = 0.854874 loss)\nI0817 16:32:06.507865 17350 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0817 16:32:53.898407 17350 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:33:20.827003 17350 solver.cpp:404]     Test net output #0: accuracy = 0.2614\nI0817 16:33:20.827083 17350 solver.cpp:404]     Test net output #1: loss = 3.01676 (* 1 = 3.01676 loss)\nI0817 16:33:21.250108 17350 solver.cpp:228] Iteration 500, loss = 0.696001\nI0817 16:33:21.250149 17350 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 16:33:21.250166 17350 solver.cpp:244]     Train net output #1: loss = 0.696001 (* 1 = 0.696001 loss)\nI0817 16:33:21.335193 17350 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0817 16:34:08.740209 17350 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:34:35.665937 17350 solver.cpp:404]     Test net output #0: accuracy = 0.11656\nI0817 16:34:35.666018 17350 solver.cpp:404]     Test net output #1: loss = 4.31825 (* 1 = 4.31825 loss)\nI0817 16:34:36.089191 17350 solver.cpp:228] Iteration 600, loss = 0.623518\nI0817 16:34:36.089232 17350 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 16:34:36.089249 17350 solver.cpp:244]     Train net output #1: loss = 0.623518 (* 1 = 0.623518 loss)\nI0817 16:34:36.171707 17350 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0817 16:35:23.607785 17350 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:35:50.532899 17350 solver.cpp:404]     Test net output #0: accuracy = 0.18264\nI0817 16:35:50.532974 17350 solver.cpp:404]     Test net output #1: loss = 3.56089 (* 1 = 3.56089 loss)\nI0817 16:35:50.956096 17350 solver.cpp:228] Iteration 700, loss = 0.542112\nI0817 16:35:50.956130 17350 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 16:35:50.956145 17350 solver.cpp:244]     Train net output #1: loss = 0.542112 (* 1 = 0.542112 loss)\nI0817 16:35:51.037575 17350 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0817 16:36:38.531222 17350 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:37:05.456202 17350 solver.cpp:404]     Test net output #0: accuracy = 0.20144\nI0817 16:37:05.456272 17350 solver.cpp:404]     Test net output #1: loss = 3.32496 (* 1 = 3.32496 loss)\nI0817 16:37:05.879036 17350 solver.cpp:228] Iteration 800, loss = 0.546762\nI0817 16:37:05.879070 17350 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 16:37:05.879086 17350 solver.cpp:244]     Train net output #1: loss = 0.546762 (* 1 = 0.546762 loss)\nI0817 16:37:05.959048 17350 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0817 16:37:53.453707 17350 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:38:20.377398 17350 solver.cpp:404]     Test net output #0: accuracy = 0.24032\nI0817 16:38:20.377471 17350 solver.cpp:404]     Test net output #1: loss = 3.16309 (* 1 = 3.16309 loss)\nI0817 16:38:20.800482 17350 solver.cpp:228] Iteration 900, loss = 0.469687\nI0817 16:38:20.800532 17350 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 16:38:20.800549 17350 solver.cpp:244]     Train net output #1: loss = 0.469687 (* 1 = 0.469687 loss)\nI0817 16:38:20.878860 17350 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0817 16:39:08.360095 17350 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:39:35.282588 17350 solver.cpp:404]     Test net output #0: accuracy = 0.33304\nI0817 16:39:35.282660 17350 solver.cpp:404]     Test net output #1: loss = 2.27169 (* 1 = 2.27169 loss)\nI0817 16:39:35.706387 17350 solver.cpp:228] Iteration 1000, loss = 0.378647\nI0817 16:39:35.706440 17350 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 16:39:35.706457 17350 solver.cpp:244]     Train net output #1: loss = 0.378647 (* 1 = 0.378647 loss)\nI0817 16:39:35.786788 17350 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0817 16:40:23.260180 17350 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:40:50.180966 17350 solver.cpp:404]     Test net output #0: accuracy = 0.3722\nI0817 16:40:50.181046 17350 solver.cpp:404]     Test net output #1: loss = 1.97651 (* 1 = 1.97651 loss)\nI0817 16:40:50.603962 17350 solver.cpp:228] Iteration 1100, loss = 0.397395\nI0817 16:40:50.604022 17350 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 16:40:50.604039 17350 solver.cpp:244]     Train net output #1: loss = 0.397395 (* 1 = 0.397395 loss)\nI0817 16:40:50.683614 17350 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0817 16:41:38.222630 17350 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:42:05.126551 17350 solver.cpp:404]     Test net output #0: accuracy = 0.45888\nI0817 16:42:05.126612 17350 solver.cpp:404]     Test net output #1: loss = 1.56821 (* 1 = 1.56821 loss)\nI0817 16:42:05.549609 17350 solver.cpp:228] Iteration 1200, loss = 0.355936\nI0817 16:42:05.549664 17350 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 16:42:05.549681 17350 solver.cpp:244]     Train net output #1: loss = 0.355936 (* 1 = 0.355936 loss)\nI0817 16:42:05.628829 17350 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0817 16:42:53.121976 17350 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 16:43:19.929600 17350 solver.cpp:404]     Test net output #0: accuracy = 0.4086\nI0817 16:43:19.929646 17350 solver.cpp:404]     Test net output #1: loss = 1.82369 (* 1 = 1.82369 loss)\nI0817 16:43:20.352485 17350 solver.cpp:228] Iteration 1300, loss = 0.394736\nI0817 16:43:20.352540 17350 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0817 16:43:20.352556 17350 solver.cpp:244]     Train net output #1: loss = 0.394736 (* 1 = 0.394736 loss)\nI0817 16:43:20.435878 17350 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0817 16:44:07.919991 17350 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 16:44:34.660624 17350 solver.cpp:404]     Test net output #0: accuracy = 0.42292\nI0817 16:44:34.660671 17350 solver.cpp:404]     Test net output #1: loss = 2.08839 (* 1 = 2.08839 loss)\nI0817 16:44:35.083583 17350 solver.cpp:228] Iteration 1400, loss = 0.300254\nI0817 16:44:35.083642 17350 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 16:44:35.083660 17350 solver.cpp:244]     Train net output #1: loss = 0.300254 (* 1 = 0.300254 loss)\nI0817 16:44:35.166689 17350 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0817 16:45:22.604569 17350 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 16:45:49.437222 17350 solver.cpp:404]     Test net output #0: accuracy = 0.39212\nI0817 16:45:49.437268 17350 solver.cpp:404]     Test net output #1: loss = 2.40713 (* 1 = 2.40713 loss)\nI0817 16:45:49.860286 17350 solver.cpp:228] Iteration 1500, loss = 0.277867\nI0817 16:45:49.860343 17350 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 16:45:49.860360 17350 solver.cpp:244]     Train net output #1: loss = 0.277867 (* 1 = 0.277867 loss)\nI0817 16:45:49.938100 17350 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0817 16:46:37.379371 17350 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 16:47:04.200642 17350 solver.cpp:404]     Test net output #0: accuracy = 0.49124\nI0817 16:47:04.200688 17350 solver.cpp:404]     Test net output #1: loss = 1.66292 (* 1 = 1.66292 loss)\nI0817 16:47:04.623548 17350 solver.cpp:228] Iteration 1600, loss = 0.307757\nI0817 16:47:04.623603 17350 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 16:47:04.623620 17350 solver.cpp:244]     Train net output #1: loss = 0.307757 (* 1 = 0.307757 loss)\nI0817 16:47:04.701282 17350 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0817 16:47:52.179041 17350 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 16:48:18.910368 17350 solver.cpp:404]     Test net output #0: accuracy = 0.53428\nI0817 16:48:18.910415 17350 solver.cpp:404]     Test net output #1: loss = 1.44267 (* 1 = 1.44267 loss)\nI0817 16:48:19.333528 17350 solver.cpp:228] Iteration 1700, loss = 0.338179\nI0817 16:48:19.333587 17350 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0817 16:48:19.333606 17350 solver.cpp:244]     Train net output #1: loss = 0.338179 (* 1 = 0.338179 loss)\nI0817 16:48:19.410853 17350 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0817 16:49:06.882966 17350 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 16:49:33.682134 17350 solver.cpp:404]     Test net output #0: accuracy = 0.51276\nI0817 16:49:33.682189 17350 solver.cpp:404]     Test net output #1: loss = 1.70985 (* 1 = 1.70985 loss)\nI0817 16:49:34.105177 17350 solver.cpp:228] Iteration 1800, loss = 0.411422\nI0817 16:49:34.105242 17350 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 16:49:34.105262 17350 solver.cpp:244]     Train net output #1: loss = 0.411422 (* 1 = 0.411422 loss)\nI0817 16:49:34.181052 17350 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0817 16:50:21.647346 17350 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 16:50:48.547739 17350 solver.cpp:404]     Test net output #0: accuracy = 0.42624\nI0817 16:50:48.547796 17350 solver.cpp:404]     Test net output #1: loss = 2.56629 (* 1 = 2.56629 loss)\nI0817 16:50:48.971475 17350 solver.cpp:228] Iteration 1900, loss = 0.311939\nI0817 16:50:48.971536 17350 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0817 16:50:48.971554 17350 solver.cpp:244]     Train net output #1: loss = 0.311939 (* 1 = 0.311939 loss)\nI0817 16:50:49.048782 17350 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0817 16:51:36.496121 17350 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 16:52:03.389981 17350 solver.cpp:404]     Test net output #0: accuracy = 0.46944\nI0817 16:52:03.390041 17350 solver.cpp:404]     Test net output #1: loss = 2.59477 (* 1 = 2.59477 loss)\nI0817 16:52:03.814401 17350 solver.cpp:228] Iteration 2000, loss = 0.22801\nI0817 16:52:03.814460 17350 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 16:52:03.814477 17350 solver.cpp:244]     Train net output #1: loss = 0.22801 (* 1 = 0.22801 loss)\nI0817 16:52:03.895478 17350 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0817 16:52:51.373131 17350 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 16:53:18.263687 17350 solver.cpp:404]     Test net output #0: accuracy = 0.58896\nI0817 16:53:18.263746 17350 solver.cpp:404]     Test net output #1: loss = 1.75534 (* 1 = 1.75534 loss)\nI0817 16:53:18.686643 17350 solver.cpp:228] Iteration 2100, loss = 0.226486\nI0817 16:53:18.686693 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 16:53:18.686712 17350 solver.cpp:244]     Train net output #1: loss = 0.226486 (* 1 = 0.226486 loss)\nI0817 16:53:18.767112 17350 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0817 16:54:06.274222 17350 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 16:54:33.166395 17350 solver.cpp:404]     Test net output #0: accuracy = 0.4672\nI0817 16:54:33.166452 17350 solver.cpp:404]     Test net output #1: loss = 2.7758 (* 1 = 2.7758 loss)\nI0817 16:54:33.590729 17350 solver.cpp:228] Iteration 2200, loss = 0.197457\nI0817 16:54:33.590780 17350 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 16:54:33.590797 17350 solver.cpp:244]     Train net output #1: loss = 0.197457 (* 1 = 0.197457 loss)\nI0817 16:54:33.671059 17350 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0817 16:55:21.244345 17350 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 16:55:48.127207 17350 solver.cpp:404]     Test net output #0: accuracy = 0.5268\nI0817 16:55:48.127269 17350 solver.cpp:404]     Test net output #1: loss = 2.36946 (* 1 = 2.36946 loss)\nI0817 16:55:48.551684 17350 solver.cpp:228] Iteration 2300, loss = 0.169073\nI0817 16:55:48.551733 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 16:55:48.551751 17350 solver.cpp:244]     Train net output #1: loss = 0.169073 (* 1 = 0.169073 loss)\nI0817 16:55:48.630645 17350 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0817 16:56:36.143306 17350 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 16:57:03.031980 17350 solver.cpp:404]     Test net output #0: accuracy = 0.49316\nI0817 16:57:03.032039 17350 solver.cpp:404]     Test net output #1: loss = 3.0885 (* 1 = 3.0885 loss)\nI0817 16:57:03.456259 17350 solver.cpp:228] Iteration 2400, loss = 0.245823\nI0817 16:57:03.456307 17350 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 16:57:03.456324 17350 solver.cpp:244]     Train net output #1: loss = 0.245823 (* 1 = 0.245823 loss)\nI0817 16:57:03.533115 17350 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0817 16:57:51.057382 17350 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 16:58:17.957509 17350 solver.cpp:404]     Test net output #0: accuracy = 0.63988\nI0817 16:58:17.957581 17350 solver.cpp:404]     Test net output #1: loss = 1.46739 (* 1 = 1.46739 loss)\nI0817 16:58:18.380645 17350 solver.cpp:228] Iteration 2500, loss = 0.274934\nI0817 16:58:18.380692 17350 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 16:58:18.380709 17350 solver.cpp:244]     Train net output #1: loss = 0.274934 (* 1 = 0.274934 loss)\nI0817 16:58:18.458981 17350 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0817 16:59:06.008623 17350 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 16:59:32.917059 17350 solver.cpp:404]     Test net output #0: accuracy = 0.5424\nI0817 16:59:32.917131 17350 solver.cpp:404]     Test net output #1: loss = 2.31783 (* 1 = 2.31783 loss)\nI0817 16:59:33.340425 17350 solver.cpp:228] Iteration 2600, loss = 0.128785\nI0817 16:59:33.340471 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 16:59:33.340487 17350 solver.cpp:244]     Train net output #1: loss = 0.128785 (* 1 = 0.128785 loss)\nI0817 16:59:33.424413 17350 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0817 17:00:20.948918 17350 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:00:47.860493 17350 solver.cpp:404]     Test net output #0: accuracy = 0.66868\nI0817 17:00:47.860564 17350 solver.cpp:404]     Test net output #1: loss = 1.40835 (* 1 = 1.40835 loss)\nI0817 17:00:48.283563 17350 solver.cpp:228] Iteration 2700, loss = 0.217967\nI0817 17:00:48.283612 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:00:48.283628 17350 solver.cpp:244]     Train net output #1: loss = 0.217967 (* 1 = 0.217967 loss)\nI0817 17:00:48.360939 17350 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0817 17:01:35.871335 17350 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:02:02.776332 17350 solver.cpp:404]     Test net output #0: accuracy = 0.56712\nI0817 17:02:02.776397 17350 solver.cpp:404]     Test net output #1: loss = 2.32564 (* 1 = 2.32564 loss)\nI0817 17:02:03.199457 17350 solver.cpp:228] Iteration 2800, loss = 0.155417\nI0817 17:02:03.199502 17350 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:02:03.199519 17350 solver.cpp:244]     Train net output #1: loss = 0.155417 (* 1 = 0.155417 loss)\nI0817 17:02:03.279042 17350 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0817 17:02:50.803845 17350 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 17:03:17.716430 17350 solver.cpp:404]     Test net output #0: accuracy = 0.6074\nI0817 17:03:17.716500 17350 solver.cpp:404]     Test net output #1: loss = 1.97578 (* 1 = 1.97578 loss)\nI0817 17:03:18.139436 17350 solver.cpp:228] Iteration 2900, loss = 0.186774\nI0817 17:03:18.139483 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:03:18.139499 17350 solver.cpp:244]     Train net output #1: loss = 0.186774 (* 1 = 0.186774 loss)\nI0817 17:03:18.215823 17350 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0817 17:04:05.729538 17350 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 17:04:32.640504 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69956\nI0817 17:04:32.640575 17350 solver.cpp:404]     Test net output #1: loss = 1.2552 (* 1 = 1.2552 loss)\nI0817 17:04:33.064576 17350 solver.cpp:228] Iteration 3000, loss = 0.230619\nI0817 17:04:33.064626 17350 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0817 17:04:33.064644 17350 solver.cpp:244]     Train net output #1: loss = 0.230619 (* 1 = 0.230619 loss)\nI0817 17:04:33.148182 17350 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0817 17:05:20.691087 17350 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 17:05:47.603497 17350 solver.cpp:404]     Test net output #0: accuracy = 0.63476\nI0817 17:05:47.603567 17350 solver.cpp:404]     Test net output #1: loss = 1.70684 (* 1 = 1.70684 loss)\nI0817 17:05:48.027866 17350 solver.cpp:228] Iteration 3100, loss = 0.107041\nI0817 17:05:48.027915 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:05:48.027932 17350 solver.cpp:244]     Train net output #1: loss = 0.107041 (* 1 = 0.107041 loss)\nI0817 17:05:48.109632 17350 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0817 17:06:35.627312 17350 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 17:07:02.546747 17350 solver.cpp:404]     Test net output #0: accuracy = 0.64776\nI0817 17:07:02.546815 17350 solver.cpp:404]     Test net output #1: loss = 1.52463 (* 1 = 1.52463 loss)\nI0817 17:07:02.971109 17350 solver.cpp:228] Iteration 3200, loss = 0.194934\nI0817 17:07:02.971158 17350 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:07:02.971176 17350 solver.cpp:244]     Train net output #1: loss = 0.194934 (* 1 = 0.194934 loss)\nI0817 17:07:03.048331 17350 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0817 17:07:50.488472 17350 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 17:08:17.401571 17350 solver.cpp:404]     Test net output #0: accuracy = 0.63064\nI0817 17:08:17.401639 17350 solver.cpp:404]     Test net output #1: loss = 1.66815 (* 1 = 1.66815 loss)\nI0817 17:08:17.825649 17350 solver.cpp:228] Iteration 3300, loss = 0.181469\nI0817 17:08:17.825700 17350 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:08:17.825716 17350 solver.cpp:244]     Train net output #1: loss = 0.181469 (* 1 = 0.181469 loss)\nI0817 17:08:17.906755 17350 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0817 17:09:05.377981 17350 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 17:09:32.292367 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7248\nI0817 17:09:32.292438 17350 solver.cpp:404]     Test net output #1: loss = 1.14417 (* 1 = 1.14417 loss)\nI0817 17:09:32.716832 17350 solver.cpp:228] Iteration 3400, loss = 0.160559\nI0817 17:09:32.716891 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:09:32.716908 17350 solver.cpp:244]     Train net output #1: loss = 0.160559 (* 1 = 0.160559 loss)\nI0817 17:09:32.797144 17350 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0817 17:10:20.273681 17350 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 17:10:47.184809 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68496\nI0817 17:10:47.184877 17350 solver.cpp:404]     Test net output #1: loss = 1.37695 (* 1 = 1.37695 loss)\nI0817 17:10:47.608969 17350 solver.cpp:228] Iteration 3500, loss = 0.0612164\nI0817 17:10:47.609030 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 17:10:47.609047 17350 solver.cpp:244]     Train net output #1: loss = 0.0612164 (* 1 = 0.0612164 loss)\nI0817 17:10:47.691062 17350 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0817 17:11:35.160396 17350 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 17:12:02.067898 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75896\nI0817 17:12:02.067955 17350 solver.cpp:404]     Test net output #1: loss = 0.868217 (* 1 = 0.868217 loss)\nI0817 17:12:02.491194 17350 solver.cpp:228] Iteration 3600, loss = 0.117731\nI0817 17:12:02.491257 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:12:02.491274 17350 solver.cpp:244]     Train net output #1: loss = 0.117731 (* 1 = 0.117731 loss)\nI0817 17:12:02.571386 17350 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0817 17:12:50.009039 17350 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 17:13:16.921732 17350 solver.cpp:404]     Test net output #0: accuracy = 0.61384\nI0817 17:13:16.921802 17350 solver.cpp:404]     Test net output #1: loss = 1.8389 (* 1 = 1.8389 loss)\nI0817 17:13:17.345141 17350 solver.cpp:228] Iteration 3700, loss = 0.124639\nI0817 17:13:17.345201 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:13:17.345222 17350 solver.cpp:244]     Train net output #1: loss = 0.124639 (* 1 = 0.124639 loss)\nI0817 17:13:17.428396 17350 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0817 17:14:04.927160 17350 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 17:14:31.836201 17350 solver.cpp:404]     Test net output #0: accuracy = 0.57904\nI0817 17:14:31.836277 17350 solver.cpp:404]     Test net output #1: loss = 1.94247 (* 1 = 1.94247 loss)\nI0817 17:14:32.260603 17350 solver.cpp:228] Iteration 3800, loss = 0.0990553\nI0817 17:14:32.260659 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:14:32.260677 17350 solver.cpp:244]     Train net output #1: loss = 0.0990554 (* 1 = 0.0990554 loss)\nI0817 17:14:32.343375 17350 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0817 17:15:19.843710 17350 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 17:15:46.754547 17350 solver.cpp:404]     Test net output #0: accuracy = 0.65416\nI0817 17:15:46.754616 17350 solver.cpp:404]     Test net output #1: loss = 1.38986 (* 1 = 1.38986 loss)\nI0817 17:15:47.177667 17350 solver.cpp:228] Iteration 3900, loss = 0.1233\nI0817 17:15:47.177726 17350 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 17:15:47.177744 17350 solver.cpp:244]     Train net output #1: loss = 0.1233 (* 1 = 0.1233 loss)\nI0817 17:15:47.258683 17350 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0817 17:16:34.767043 17350 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 17:17:01.681130 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72804\nI0817 17:17:01.681200 17350 solver.cpp:404]     Test net output #1: loss = 1.0982 (* 1 = 1.0982 loss)\nI0817 17:17:02.105181 17350 solver.cpp:228] Iteration 4000, loss = 0.14309\nI0817 17:17:02.105244 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:17:02.105262 17350 solver.cpp:244]     Train net output #1: loss = 0.14309 (* 1 = 0.14309 loss)\nI0817 17:17:02.182173 17350 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0817 17:17:49.670665 17350 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 17:18:16.585674 17350 solver.cpp:404]     Test net output #0: accuracy = 0.6264\nI0817 17:18:16.585743 17350 solver.cpp:404]     Test net output #1: loss = 2.00127 (* 1 = 2.00127 loss)\nI0817 17:18:17.010046 17350 solver.cpp:228] Iteration 4100, loss = 0.135503\nI0817 17:18:17.010105 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:18:17.010123 17350 solver.cpp:244]     Train net output #1: loss = 0.135503 (* 1 = 0.135503 loss)\nI0817 17:18:17.085417 17350 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0817 17:19:04.594055 17350 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 17:19:31.507836 17350 solver.cpp:404]     Test net output #0: accuracy = 0.58832\nI0817 17:19:31.507908 17350 solver.cpp:404]     Test net output #1: loss = 1.98193 (* 1 = 1.98193 loss)\nI0817 17:19:31.930752 17350 solver.cpp:228] Iteration 4200, loss = 0.090882\nI0817 17:19:31.930809 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 17:19:31.930827 17350 solver.cpp:244]     Train net output #1: loss = 0.090882 (* 1 = 0.090882 loss)\nI0817 17:19:32.005887 17350 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0817 17:20:19.480051 17350 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 17:20:46.387375 17350 solver.cpp:404]     Test net output #0: accuracy = 0.59296\nI0817 17:20:46.387445 17350 solver.cpp:404]     Test net output #1: loss = 1.99985 (* 1 = 1.99985 loss)\nI0817 17:20:46.810135 17350 solver.cpp:228] Iteration 4300, loss = 0.117\nI0817 17:20:46.810194 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:20:46.810211 17350 solver.cpp:244]     Train net output #1: loss = 0.117 (* 1 = 0.117 loss)\nI0817 17:20:46.889274 17350 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0817 17:21:34.396916 17350 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 17:22:01.310804 17350 solver.cpp:404]     Test net output #0: accuracy = 0.65536\nI0817 17:22:01.310861 17350 solver.cpp:404]     Test net output #1: loss = 1.78035 (* 1 = 1.78035 loss)\nI0817 17:22:01.733537 17350 solver.cpp:228] Iteration 4400, loss = 0.110908\nI0817 17:22:01.733592 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:22:01.733609 17350 solver.cpp:244]     Train net output #1: loss = 0.110908 (* 1 = 0.110908 loss)\nI0817 17:22:01.807634 17350 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0817 17:22:49.209779 17350 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 17:23:16.122550 17350 solver.cpp:404]     Test net output #0: accuracy = 0.64816\nI0817 17:23:16.122617 17350 solver.cpp:404]     Test net output #1: loss = 1.78565 (* 1 = 1.78565 loss)\nI0817 17:23:16.545634 17350 solver.cpp:228] Iteration 4500, loss = 0.127784\nI0817 17:23:16.545692 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:23:16.545711 17350 solver.cpp:244]     Train net output #1: loss = 0.127784 (* 1 = 0.127784 loss)\nI0817 17:23:16.624641 17350 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0817 17:24:04.028978 17350 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 17:24:30.940955 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71108\nI0817 17:24:30.941025 17350 solver.cpp:404]     Test net output #1: loss = 1.14531 (* 1 = 1.14531 loss)\nI0817 17:24:31.365134 17350 solver.cpp:228] Iteration 4600, loss = 0.10219\nI0817 17:24:31.365195 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:24:31.365212 17350 solver.cpp:244]     Train net output #1: loss = 0.10219 (* 1 = 0.10219 loss)\nI0817 17:24:31.436689 17350 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0817 17:25:18.873752 17350 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 17:25:45.789762 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72604\nI0817 17:25:45.789834 17350 solver.cpp:404]     Test net output #1: loss = 1.07986 (* 1 = 1.07986 loss)\nI0817 17:25:46.214059 17350 solver.cpp:228] Iteration 4700, loss = 0.0771545\nI0817 17:25:46.214118 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:25:46.214136 17350 solver.cpp:244]     Train net output #1: loss = 0.0771546 (* 1 = 0.0771546 loss)\nI0817 17:25:46.287603 17350 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0817 17:26:33.728711 17350 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 17:27:00.642359 17350 solver.cpp:404]     Test net output #0: accuracy = 0.63432\nI0817 17:27:00.642431 17350 solver.cpp:404]     Test net output #1: loss = 1.79726 (* 1 = 1.79726 loss)\nI0817 17:27:01.066771 17350 solver.cpp:228] Iteration 4800, loss = 0.120768\nI0817 17:27:01.066820 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:27:01.066838 17350 solver.cpp:244]     Train net output #1: loss = 0.120768 (* 1 = 0.120768 loss)\nI0817 17:27:01.145153 17350 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0817 17:27:48.576896 17350 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 17:28:15.490180 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69932\nI0817 17:28:15.490257 17350 solver.cpp:404]     Test net output #1: loss = 1.44388 (* 1 = 1.44388 loss)\nI0817 17:28:15.914233 17350 solver.cpp:228] Iteration 4900, loss = 0.0840587\nI0817 17:28:15.914283 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:28:15.914300 17350 solver.cpp:244]     Train net output #1: loss = 0.0840587 (* 1 = 0.0840587 loss)\nI0817 17:28:15.996757 17350 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0817 17:29:03.443513 17350 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 17:29:30.358043 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7068\nI0817 17:29:30.358113 17350 solver.cpp:404]     Test net output #1: loss = 1.25538 (* 1 = 1.25538 loss)\nI0817 17:29:30.781410 17350 solver.cpp:228] Iteration 5000, loss = 0.0903294\nI0817 17:29:30.781461 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:29:30.781477 17350 solver.cpp:244]     Train net output #1: loss = 0.0903294 (* 1 = 0.0903294 loss)\nI0817 17:29:30.854710 17350 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0817 17:30:18.322232 17350 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 17:30:45.235636 17350 solver.cpp:404]     Test net output #0: accuracy = 0.66048\nI0817 17:30:45.235707 17350 solver.cpp:404]     Test net output #1: loss = 1.73684 (* 1 = 1.73684 loss)\nI0817 17:30:45.660109 17350 solver.cpp:228] Iteration 5100, loss = 0.0875293\nI0817 17:30:45.660161 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:30:45.660177 17350 solver.cpp:244]     Train net output #1: loss = 0.0875293 (* 1 = 0.0875293 loss)\nI0817 17:30:45.738839 17350 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0817 17:31:33.181829 17350 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 17:32:00.094889 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69712\nI0817 17:32:00.094966 17350 solver.cpp:404]     Test net output #1: loss = 1.34162 (* 1 = 1.34162 loss)\nI0817 17:32:00.519235 17350 solver.cpp:228] Iteration 5200, loss = 0.107863\nI0817 17:32:00.519284 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:32:00.519301 17350 solver.cpp:244]     Train net output #1: loss = 0.107863 (* 1 = 0.107863 loss)\nI0817 17:32:00.596758 17350 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0817 17:32:48.034987 17350 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 17:33:14.932126 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68304\nI0817 17:33:14.932198 17350 solver.cpp:404]     Test net output #1: loss = 1.44473 (* 1 = 1.44473 loss)\nI0817 17:33:15.356240 17350 solver.cpp:228] Iteration 5300, loss = 0.182809\nI0817 17:33:15.356292 17350 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 17:33:15.356308 17350 solver.cpp:244]     Train net output #1: loss = 0.182809 (* 1 = 0.182809 loss)\nI0817 17:33:15.429914 17350 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0817 17:34:02.947899 17350 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 17:34:29.858335 17350 solver.cpp:404]     Test net output #0: accuracy = 0.65556\nI0817 17:34:29.858404 17350 solver.cpp:404]     Test net output #1: loss = 1.61808 (* 1 = 1.61808 loss)\nI0817 17:34:30.281435 17350 solver.cpp:228] Iteration 5400, loss = 0.220675\nI0817 17:34:30.281486 17350 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:34:30.281502 17350 solver.cpp:244]     Train net output #1: loss = 0.220675 (* 1 = 0.220675 loss)\nI0817 17:34:30.356004 17350 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0817 17:35:17.851963 17350 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 17:35:44.762080 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69916\nI0817 17:35:44.762151 17350 solver.cpp:404]     Test net output #1: loss = 1.33442 (* 1 = 1.33442 loss)\nI0817 17:35:45.185156 17350 solver.cpp:228] Iteration 5500, loss = 0.166828\nI0817 17:35:45.185206 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 17:35:45.185222 17350 solver.cpp:244]     Train net output #1: loss = 0.166828 (* 1 = 0.166828 loss)\nI0817 17:35:45.261987 17350 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0817 17:36:32.762182 17350 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 17:36:59.665900 17350 solver.cpp:404]     Test net output #0: accuracy = 0.55512\nI0817 17:36:59.665971 17350 solver.cpp:404]     Test net output #1: loss = 2.45123 (* 1 = 2.45123 loss)\nI0817 17:37:00.089030 17350 solver.cpp:228] Iteration 5600, loss = 0.195799\nI0817 17:37:00.089078 17350 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0817 17:37:00.089094 17350 solver.cpp:244]     Train net output #1: loss = 0.195799 (* 1 = 0.195799 loss)\nI0817 17:37:00.172124 17350 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0817 17:37:47.683789 17350 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 17:38:14.595330 17350 solver.cpp:404]     Test net output #0: accuracy = 0.58472\nI0817 17:38:14.595402 17350 solver.cpp:404]     Test net output #1: loss = 2.13929 (* 1 = 2.13929 loss)\nI0817 17:38:15.018856 17350 solver.cpp:228] Iteration 5700, loss = 0.090988\nI0817 17:38:15.018906 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:38:15.018924 17350 solver.cpp:244]     Train net output #1: loss = 0.090988 (* 1 = 0.090988 loss)\nI0817 17:38:15.098057 17350 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0817 17:39:02.516764 17350 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 17:39:29.432982 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74532\nI0817 17:39:29.433053 17350 solver.cpp:404]     Test net output #1: loss = 0.979119 (* 1 = 0.979119 loss)\nI0817 17:39:29.856909 17350 solver.cpp:228] Iteration 5800, loss = 0.0862161\nI0817 17:39:29.856972 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:39:29.856992 17350 solver.cpp:244]     Train net output #1: loss = 0.0862161 (* 1 = 0.0862161 loss)\nI0817 17:39:29.934708 17350 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0817 17:40:17.387974 17350 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 17:40:44.300642 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7646\nI0817 17:40:44.300710 17350 solver.cpp:404]     Test net output #1: loss = 0.89873 (* 1 = 0.89873 loss)\nI0817 17:40:44.723664 17350 solver.cpp:228] Iteration 5900, loss = 0.0905499\nI0817 17:40:44.723723 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:40:44.723742 17350 solver.cpp:244]     Train net output #1: loss = 0.0905498 (* 1 = 0.0905498 loss)\nI0817 17:40:44.803970 17350 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0817 17:41:32.308724 17350 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 17:41:59.221318 17350 solver.cpp:404]     Test net output #0: accuracy = 0.679\nI0817 17:41:59.221388 17350 solver.cpp:404]     Test net output #1: loss = 1.43055 (* 1 = 1.43055 loss)\nI0817 17:41:59.644130 17350 solver.cpp:228] Iteration 6000, loss = 0.134277\nI0817 17:41:59.644179 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:41:59.644196 17350 solver.cpp:244]     Train net output #1: loss = 0.134277 (* 1 = 0.134277 loss)\nI0817 17:41:59.719729 17350 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0817 17:42:47.212362 17350 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 17:43:14.126060 17350 solver.cpp:404]     Test net output #0: accuracy = 0.64344\nI0817 17:43:14.126123 17350 solver.cpp:404]     Test net output #1: loss = 1.72191 (* 1 = 1.72191 loss)\nI0817 17:43:14.550468 17350 solver.cpp:228] Iteration 6100, loss = 0.0842063\nI0817 17:43:14.550519 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:43:14.550536 17350 solver.cpp:244]     Train net output #1: loss = 0.0842062 (* 1 = 0.0842062 loss)\nI0817 17:43:14.624586 17350 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0817 17:44:02.138417 17350 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 17:44:29.049358 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7344\nI0817 17:44:29.049432 17350 solver.cpp:404]     Test net output #1: loss = 1.1036 (* 1 = 1.1036 loss)\nI0817 17:44:29.472254 17350 solver.cpp:228] Iteration 6200, loss = 0.0810235\nI0817 17:44:29.472297 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:44:29.472314 17350 solver.cpp:244]     Train net output #1: loss = 0.0810235 (* 1 = 0.0810235 loss)\nI0817 17:44:29.555269 17350 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0817 17:45:17.047559 17350 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 17:45:43.958216 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75152\nI0817 17:45:43.958287 17350 solver.cpp:404]     Test net output #1: loss = 0.977235 (* 1 = 0.977235 loss)\nI0817 17:45:44.381446 17350 solver.cpp:228] Iteration 6300, loss = 0.108874\nI0817 17:45:44.381491 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:45:44.381510 17350 solver.cpp:244]     Train net output #1: loss = 0.108874 (* 1 = 0.108874 loss)\nI0817 17:45:44.462077 17350 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0817 17:46:31.961325 17350 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 17:46:58.872315 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70136\nI0817 17:46:58.872386 17350 solver.cpp:404]     Test net output #1: loss = 1.27678 (* 1 = 1.27678 loss)\nI0817 17:46:59.295469 17350 solver.cpp:228] Iteration 6400, loss = 0.151553\nI0817 17:46:59.295514 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:46:59.295531 17350 solver.cpp:244]     Train net output #1: loss = 0.151553 (* 1 = 0.151553 loss)\nI0817 17:46:59.375999 17350 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0817 17:47:46.785094 17350 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 17:48:13.695422 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74144\nI0817 17:48:13.695492 17350 solver.cpp:404]     Test net output #1: loss = 1.14599 (* 1 = 1.14599 loss)\nI0817 17:48:14.118460 17350 solver.cpp:228] Iteration 6500, loss = 0.0661344\nI0817 17:48:14.118506 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:48:14.118523 17350 solver.cpp:244]     Train net output #1: loss = 0.0661344 (* 1 = 0.0661344 loss)\nI0817 17:48:14.193316 17350 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0817 17:49:01.738308 17350 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 17:49:28.648823 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72196\nI0817 17:49:28.648885 17350 solver.cpp:404]     Test net output #1: loss = 1.15306 (* 1 = 1.15306 loss)\nI0817 17:49:29.071739 17350 solver.cpp:228] Iteration 6600, loss = 0.128309\nI0817 17:49:29.071785 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:49:29.071801 17350 solver.cpp:244]     Train net output #1: loss = 0.128309 (* 1 = 0.128309 loss)\nI0817 17:49:29.152379 17350 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0817 17:50:16.575263 17350 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 17:50:43.498555 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72788\nI0817 17:50:43.498628 17350 solver.cpp:404]     Test net output #1: loss = 1.40998 (* 1 = 1.40998 loss)\nI0817 17:50:43.922878 17350 solver.cpp:228] Iteration 6700, loss = 0.101022\nI0817 17:50:43.922927 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 17:50:43.922945 17350 solver.cpp:244]     Train net output #1: loss = 0.101022 (* 1 = 0.101022 loss)\nI0817 17:50:43.995784 17350 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0817 17:51:31.410409 17350 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 17:51:58.324594 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72128\nI0817 17:51:58.324664 17350 solver.cpp:404]     Test net output #1: loss = 1.25122 (* 1 = 1.25122 loss)\nI0817 17:51:58.748764 17350 solver.cpp:228] Iteration 6800, loss = 0.0779451\nI0817 17:51:58.748811 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:51:58.748828 17350 solver.cpp:244]     Train net output #1: loss = 0.077945 (* 1 = 0.077945 loss)\nI0817 17:51:58.824723 17350 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0817 17:52:46.244302 17350 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 17:53:13.156625 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69796\nI0817 17:53:13.156695 17350 solver.cpp:404]     Test net output #1: loss = 1.46682 (* 1 = 1.46682 loss)\nI0817 17:53:13.580751 17350 solver.cpp:228] Iteration 6900, loss = 0.129114\nI0817 17:53:13.580801 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 17:53:13.580818 17350 solver.cpp:244]     Train net output #1: loss = 0.129114 (* 1 = 0.129114 loss)\nI0817 17:53:13.659596 17350 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0817 17:54:01.074262 17350 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 17:54:27.972146 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75964\nI0817 17:54:27.972218 17350 solver.cpp:404]     Test net output #1: loss = 0.942431 (* 1 = 0.942431 loss)\nI0817 17:54:28.396540 17350 solver.cpp:228] Iteration 7000, loss = 0.0717728\nI0817 17:54:28.396591 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 17:54:28.396608 17350 solver.cpp:244]     Train net output #1: loss = 0.0717727 (* 1 = 0.0717727 loss)\nI0817 17:54:28.472118 17350 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0817 17:55:15.879266 17350 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 17:55:42.775557 17350 solver.cpp:404]     Test net output #0: accuracy = 0.67892\nI0817 17:55:42.775622 17350 solver.cpp:404]     Test net output #1: loss = 1.41043 (* 1 = 1.41043 loss)\nI0817 17:55:43.199818 17350 solver.cpp:228] Iteration 7100, loss = 0.0929043\nI0817 17:55:43.199870 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 17:55:43.199887 17350 solver.cpp:244]     Train net output #1: loss = 0.0929042 (* 1 = 0.0929042 loss)\nI0817 17:55:43.278780 17350 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0817 17:56:30.686678 17350 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 17:56:57.579826 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72192\nI0817 17:56:57.579900 17350 solver.cpp:404]     Test net output #1: loss = 1.16047 (* 1 = 1.16047 loss)\nI0817 17:56:58.004091 17350 solver.cpp:228] Iteration 7200, loss = 0.0852243\nI0817 17:56:58.004135 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 17:56:58.004153 17350 solver.cpp:244]     Train net output #1: loss = 0.0852242 (* 1 = 0.0852242 loss)\nI0817 17:56:58.077941 17350 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0817 17:57:45.475527 17350 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 17:58:12.366539 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7478\nI0817 17:58:12.366614 17350 solver.cpp:404]     Test net output #1: loss = 1.08625 (* 1 = 1.08625 loss)\nI0817 17:58:12.789638 17350 solver.cpp:228] Iteration 7300, loss = 0.126283\nI0817 17:58:12.789683 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 17:58:12.789700 17350 solver.cpp:244]     Train net output #1: loss = 0.126283 (* 1 = 0.126283 loss)\nI0817 17:58:12.872071 17350 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0817 17:59:00.344830 17350 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 17:59:27.239173 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68236\nI0817 17:59:27.239243 17350 solver.cpp:404]     Test net output #1: loss = 1.62799 (* 1 = 1.62799 loss)\nI0817 17:59:27.662432 17350 solver.cpp:228] Iteration 7400, loss = 0.0474854\nI0817 17:59:27.662487 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 17:59:27.662503 17350 solver.cpp:244]     Train net output #1: loss = 0.0474853 (* 1 = 0.0474853 loss)\nI0817 17:59:27.734828 17350 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0817 18:00:15.199561 17350 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 18:00:42.090790 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73656\nI0817 18:00:42.090864 17350 solver.cpp:404]     Test net output #1: loss = 1.04737 (* 1 = 1.04737 loss)\nI0817 18:00:42.515022 17350 solver.cpp:228] Iteration 7500, loss = 0.131426\nI0817 18:00:42.515077 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:00:42.515094 17350 solver.cpp:244]     Train net output #1: loss = 0.131425 (* 1 = 0.131425 loss)\nI0817 18:00:42.597978 17350 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0817 18:01:30.078826 17350 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 18:01:56.885779 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70644\nI0817 18:01:56.885845 17350 solver.cpp:404]     Test net output #1: loss = 1.4142 (* 1 = 1.4142 loss)\nI0817 18:01:57.310039 17350 solver.cpp:228] Iteration 7600, loss = 0.0922548\nI0817 18:01:57.310097 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:01:57.310114 17350 solver.cpp:244]     Train net output #1: loss = 0.0922547 (* 1 = 0.0922547 loss)\nI0817 18:01:57.389237 17350 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0817 18:02:44.791115 17350 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 18:03:11.611380 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76892\nI0817 18:03:11.611428 17350 solver.cpp:404]     Test net output #1: loss = 1.01483 (* 1 = 1.01483 loss)\nI0817 18:03:12.035243 17350 solver.cpp:228] Iteration 7700, loss = 0.131136\nI0817 18:03:12.035298 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:03:12.035316 17350 solver.cpp:244]     Train net output #1: loss = 0.131136 (* 1 = 0.131136 loss)\nI0817 18:03:12.115314 17350 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0817 18:03:59.528311 17350 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 18:04:26.310292 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72832\nI0817 18:04:26.310338 17350 solver.cpp:404]     Test net output #1: loss = 1.22196 (* 1 = 1.22196 loss)\nI0817 18:04:26.734755 17350 solver.cpp:228] Iteration 7800, loss = 0.147549\nI0817 18:04:26.734815 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:04:26.734833 17350 solver.cpp:244]     Train net output #1: loss = 0.147548 (* 1 = 0.147548 loss)\nI0817 18:04:26.806993 17350 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0817 18:05:14.225338 17350 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 18:05:40.934201 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77892\nI0817 18:05:40.934247 17350 solver.cpp:404]     Test net output #1: loss = 0.948536 (* 1 = 0.948536 loss)\nI0817 18:05:41.358053 17350 solver.cpp:228] Iteration 7900, loss = 0.17644\nI0817 18:05:41.358111 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:05:41.358129 17350 solver.cpp:244]     Train net output #1: loss = 0.17644 (* 1 = 0.17644 loss)\nI0817 18:05:41.436765 17350 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0817 18:06:28.842849 17350 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 18:06:55.555389 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77272\nI0817 18:06:55.555438 17350 solver.cpp:404]     Test net output #1: loss = 0.993825 (* 1 = 0.993825 loss)\nI0817 18:06:55.978195 17350 solver.cpp:228] Iteration 8000, loss = 0.097409\nI0817 18:06:55.978255 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:06:55.978272 17350 solver.cpp:244]     Train net output #1: loss = 0.0974088 (* 1 = 0.0974088 loss)\nI0817 18:06:56.055557 17350 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0817 18:07:43.482551 17350 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 18:08:10.352231 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70608\nI0817 18:08:10.352278 17350 solver.cpp:404]     Test net output #1: loss = 1.26937 (* 1 = 1.26937 loss)\nI0817 18:08:10.775383 17350 solver.cpp:228] Iteration 8100, loss = 0.0892017\nI0817 18:08:10.775429 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:08:10.775446 17350 solver.cpp:244]     Train net output #1: loss = 0.0892016 (* 1 = 0.0892016 loss)\nI0817 18:08:10.846989 17350 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0817 18:08:58.340589 17350 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 18:09:25.220453 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79912\nI0817 18:09:25.220504 17350 solver.cpp:404]     Test net output #1: loss = 0.853685 (* 1 = 0.853685 loss)\nI0817 18:09:25.643347 17350 solver.cpp:228] Iteration 8200, loss = 0.0822542\nI0817 18:09:25.643406 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:09:25.643424 17350 solver.cpp:244]     Train net output #1: loss = 0.0822541 (* 1 = 0.0822541 loss)\nI0817 18:09:25.719403 17350 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0817 18:10:13.125044 17350 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 18:10:40.004036 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77256\nI0817 18:10:40.004082 17350 solver.cpp:404]     Test net output #1: loss = 0.829367 (* 1 = 0.829367 loss)\nI0817 18:10:40.428616 17350 solver.cpp:228] Iteration 8300, loss = 0.107244\nI0817 18:10:40.428673 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:10:40.428690 17350 solver.cpp:244]     Train net output #1: loss = 0.107244 (* 1 = 0.107244 loss)\nI0817 18:10:40.507661 17350 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0817 18:11:27.935727 17350 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 18:11:54.786520 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75844\nI0817 18:11:54.786567 17350 solver.cpp:404]     Test net output #1: loss = 1.04071 (* 1 = 1.04071 loss)\nI0817 18:11:55.209534 17350 solver.cpp:228] Iteration 8400, loss = 0.0690352\nI0817 18:11:55.209594 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:11:55.209612 17350 solver.cpp:244]     Train net output #1: loss = 0.0690351 (* 1 = 0.0690351 loss)\nI0817 18:11:55.287220 17350 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0817 18:12:42.715744 17350 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 18:13:09.573846 17350 solver.cpp:404]     Test net output #0: accuracy = 0.56892\nI0817 18:13:09.573895 17350 solver.cpp:404]     Test net output #1: loss = 2.35082 (* 1 = 2.35082 loss)\nI0817 18:13:09.997870 17350 solver.cpp:228] Iteration 8500, loss = 0.0573384\nI0817 18:13:09.997930 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:13:09.997952 17350 solver.cpp:244]     Train net output #1: loss = 0.0573383 (* 1 = 0.0573383 loss)\nI0817 18:13:10.072016 17350 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0817 18:13:57.493402 17350 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 18:14:24.368777 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75044\nI0817 18:14:24.368825 17350 solver.cpp:404]     Test net output #1: loss = 1.15076 (* 1 = 1.15076 loss)\nI0817 18:14:24.792806 17350 solver.cpp:228] Iteration 8600, loss = 0.0209119\nI0817 18:14:24.792867 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 18:14:24.792884 17350 solver.cpp:244]     Train net output #1: loss = 0.0209118 (* 1 = 0.0209118 loss)\nI0817 18:14:24.871639 17350 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0817 18:15:12.298666 17350 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 18:15:39.148092 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69784\nI0817 18:15:39.148142 17350 solver.cpp:404]     Test net output #1: loss = 1.36467 (* 1 = 1.36467 loss)\nI0817 18:15:39.572319 17350 solver.cpp:228] Iteration 8700, loss = 0.12129\nI0817 18:15:39.572376 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:15:39.572394 17350 solver.cpp:244]     Train net output #1: loss = 0.12129 (* 1 = 0.12129 loss)\nI0817 18:15:39.651113 17350 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0817 18:16:27.089455 17350 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 18:16:53.970116 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79876\nI0817 18:16:53.970165 17350 solver.cpp:404]     Test net output #1: loss = 0.812252 (* 1 = 0.812252 loss)\nI0817 18:16:54.394276 17350 solver.cpp:228] Iteration 8800, loss = 0.0439968\nI0817 18:16:54.394333 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:16:54.394351 17350 solver.cpp:244]     Train net output #1: loss = 0.0439967 (* 1 = 0.0439967 loss)\nI0817 18:16:54.465409 17350 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0817 18:17:41.872256 17350 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 18:18:08.753383 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71472\nI0817 18:18:08.753432 17350 solver.cpp:404]     Test net output #1: loss = 1.11495 (* 1 = 1.11495 loss)\nI0817 18:18:09.177743 17350 solver.cpp:228] Iteration 8900, loss = 0.0848331\nI0817 18:18:09.177803 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:18:09.177820 17350 solver.cpp:244]     Train net output #1: loss = 0.084833 (* 1 = 0.084833 loss)\nI0817 18:18:09.256881 17350 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0817 18:18:56.645040 17350 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 18:19:23.527182 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7694\nI0817 18:19:23.527231 17350 solver.cpp:404]     Test net output #1: loss = 0.892916 (* 1 = 0.892916 loss)\nI0817 18:19:23.951195 17350 solver.cpp:228] Iteration 9000, loss = 0.0848345\nI0817 18:19:23.951252 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:19:23.951270 17350 solver.cpp:244]     Train net output #1: loss = 0.0848344 (* 1 = 0.0848344 loss)\nI0817 18:19:24.027164 17350 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0817 18:20:11.502501 17350 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 18:20:38.371654 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75244\nI0817 18:20:38.371703 17350 solver.cpp:404]     Test net output #1: loss = 0.985683 (* 1 = 0.985683 loss)\nI0817 18:20:38.795941 17350 solver.cpp:228] Iteration 9100, loss = 0.0566374\nI0817 18:20:38.796006 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:20:38.796025 17350 solver.cpp:244]     Train net output #1: loss = 0.0566373 (* 1 = 0.0566373 loss)\nI0817 18:20:38.875418 17350 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0817 18:21:26.365772 17350 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 18:21:53.236403 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75548\nI0817 18:21:53.236450 17350 solver.cpp:404]     Test net output #1: loss = 1.02356 (* 1 = 1.02356 loss)\nI0817 18:21:53.660570 17350 solver.cpp:228] Iteration 9200, loss = 0.191661\nI0817 18:21:53.660627 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:21:53.660645 17350 solver.cpp:244]     Train net output #1: loss = 0.191661 (* 1 = 0.191661 loss)\nI0817 18:21:53.742287 17350 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0817 18:22:41.235239 17350 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 18:23:08.112435 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73264\nI0817 18:23:08.112483 17350 solver.cpp:404]     Test net output #1: loss = 1.09255 (* 1 = 1.09255 loss)\nI0817 18:23:08.536604 17350 solver.cpp:228] Iteration 9300, loss = 0.0680996\nI0817 18:23:08.536659 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:23:08.536676 17350 solver.cpp:244]     Train net output #1: loss = 0.0680995 (* 1 = 0.0680995 loss)\nI0817 18:23:08.609237 17350 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0817 18:23:56.104882 17350 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 18:24:22.805402 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68176\nI0817 18:24:22.805447 17350 solver.cpp:404]     Test net output #1: loss = 1.4045 (* 1 = 1.4045 loss)\nI0817 18:24:23.229612 17350 solver.cpp:228] Iteration 9400, loss = 0.153363\nI0817 18:24:23.229671 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:24:23.229687 17350 solver.cpp:244]     Train net output #1: loss = 0.153363 (* 1 = 0.153363 loss)\nI0817 18:24:23.304352 17350 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0817 18:25:10.763315 17350 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 18:25:37.580864 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72176\nI0817 18:25:37.580914 17350 solver.cpp:404]     Test net output #1: loss = 1.11191 (* 1 = 1.11191 loss)\nI0817 18:25:38.004679 17350 solver.cpp:228] Iteration 9500, loss = 0.125022\nI0817 18:25:38.004734 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 18:25:38.004751 17350 solver.cpp:244]     Train net output #1: loss = 0.125022 (* 1 = 0.125022 loss)\nI0817 18:25:38.080282 17350 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0817 18:26:25.575644 17350 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 18:26:52.458603 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7004\nI0817 18:26:52.458652 17350 solver.cpp:404]     Test net output #1: loss = 1.34716 (* 1 = 1.34716 loss)\nI0817 18:26:52.882709 17350 solver.cpp:228] Iteration 9600, loss = 0.0966143\nI0817 18:26:52.882761 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:26:52.882778 17350 solver.cpp:244]     Train net output #1: loss = 0.0966142 (* 1 = 0.0966142 loss)\nI0817 18:26:52.956603 17350 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0817 18:27:40.452183 17350 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 18:28:07.298550 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68848\nI0817 18:28:07.298599 17350 solver.cpp:404]     Test net output #1: loss = 1.63574 (* 1 = 1.63574 loss)\nI0817 18:28:07.722630 17350 solver.cpp:228] Iteration 9700, loss = 0.104577\nI0817 18:28:07.722685 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:28:07.722703 17350 solver.cpp:244]     Train net output #1: loss = 0.104577 (* 1 = 0.104577 loss)\nI0817 18:28:07.800642 17350 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0817 18:28:55.234283 17350 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 18:29:22.027420 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74964\nI0817 18:29:22.027469 17350 solver.cpp:404]     Test net output #1: loss = 1.06273 (* 1 = 1.06273 loss)\nI0817 18:29:22.452019 17350 solver.cpp:228] Iteration 9800, loss = 0.0996356\nI0817 18:29:22.452075 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:29:22.452092 17350 solver.cpp:244]     Train net output #1: loss = 0.0996355 (* 1 = 0.0996355 loss)\nI0817 18:29:22.530395 17350 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0817 18:30:10.050858 17350 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 18:30:36.917407 17350 solver.cpp:404]     Test net output #0: accuracy = 0.732\nI0817 18:30:36.917456 17350 solver.cpp:404]     Test net output #1: loss = 1.08714 (* 1 = 1.08714 loss)\nI0817 18:30:37.342206 17350 solver.cpp:228] Iteration 9900, loss = 0.122615\nI0817 18:30:37.342262 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:30:37.342279 17350 solver.cpp:244]     Train net output #1: loss = 0.122615 (* 1 = 0.122615 loss)\nI0817 18:30:37.419549 17350 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0817 18:31:24.937119 17350 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 18:31:51.790343 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73928\nI0817 18:31:51.790392 17350 solver.cpp:404]     Test net output #1: loss = 1.07311 (* 1 = 1.07311 loss)\nI0817 18:31:52.214956 17350 solver.cpp:228] Iteration 10000, loss = 0.0598622\nI0817 18:31:52.215013 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:31:52.215030 17350 solver.cpp:244]     Train net output #1: loss = 0.0598621 (* 1 = 0.0598621 loss)\nI0817 18:31:52.294857 17350 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0817 18:32:39.804411 17350 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0817 18:33:06.661289 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73136\nI0817 18:33:06.661337 17350 solver.cpp:404]     Test net output #1: loss = 1.05387 (* 1 = 1.05387 loss)\nI0817 18:33:07.085644 17350 solver.cpp:228] Iteration 10100, loss = 0.0893218\nI0817 18:33:07.085700 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:33:07.085718 17350 solver.cpp:244]     Train net output #1: loss = 0.0893216 (* 1 = 0.0893216 loss)\nI0817 18:33:07.165105 17350 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0817 18:33:54.683786 17350 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0817 18:34:21.561902 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76164\nI0817 18:34:21.561956 17350 solver.cpp:404]     Test net output #1: loss = 0.892852 (* 1 = 0.892852 loss)\nI0817 18:34:21.986717 17350 solver.cpp:228] Iteration 10200, loss = 0.0670554\nI0817 18:34:21.986775 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:34:21.986793 17350 solver.cpp:244]     Train net output #1: loss = 0.0670553 (* 1 = 0.0670553 loss)\nI0817 18:34:22.064878 17350 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0817 18:35:09.556092 17350 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0817 18:35:36.419078 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76504\nI0817 18:35:36.419124 17350 solver.cpp:404]     Test net output #1: loss = 0.890441 (* 1 = 0.890441 loss)\nI0817 18:35:36.843976 17350 solver.cpp:228] Iteration 10300, loss = 0.083232\nI0817 18:35:36.844033 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:35:36.844050 17350 solver.cpp:244]     Train net output #1: loss = 0.0832319 (* 1 = 0.0832319 loss)\nI0817 18:35:36.914918 17350 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0817 18:36:24.388795 17350 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0817 18:36:51.281064 17350 solver.cpp:404]     Test net output #0: accuracy = 0.732\nI0817 18:36:51.281112 17350 solver.cpp:404]     Test net output #1: loss = 1.0976 (* 1 = 1.0976 loss)\nI0817 18:36:51.705894 17350 solver.cpp:228] Iteration 10400, loss = 0.0626336\nI0817 18:36:51.705955 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:36:51.705974 17350 solver.cpp:244]     Train net output #1: loss = 0.0626335 (* 1 = 0.0626335 loss)\nI0817 18:36:51.780051 17350 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0817 18:37:39.252398 17350 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0817 18:38:06.129212 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70244\nI0817 18:38:06.129261 17350 solver.cpp:404]     Test net output #1: loss = 1.21687 (* 1 = 1.21687 loss)\nI0817 18:38:06.552467 17350 solver.cpp:228] Iteration 10500, loss = 0.0961519\nI0817 18:38:06.552510 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:38:06.552525 17350 solver.cpp:244]     Train net output #1: loss = 0.0961518 (* 1 = 0.0961518 loss)\nI0817 18:38:06.628661 17350 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0817 18:38:54.082255 17350 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0817 18:39:20.961998 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77172\nI0817 18:39:20.962045 17350 solver.cpp:404]     Test net output #1: loss = 0.893114 (* 1 = 0.893114 loss)\nI0817 18:39:21.386503 17350 solver.cpp:228] Iteration 10600, loss = 0.0715719\nI0817 18:39:21.386554 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:39:21.386570 17350 solver.cpp:244]     Train net output #1: loss = 0.0715718 (* 1 = 0.0715718 loss)\nI0817 18:39:21.465804 17350 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0817 18:40:08.920336 17350 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0817 18:40:35.805713 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7576\nI0817 18:40:35.805763 17350 solver.cpp:404]     Test net output #1: loss = 0.989472 (* 1 = 0.989472 loss)\nI0817 18:40:36.230067 17350 solver.cpp:228] Iteration 10700, loss = 0.110217\nI0817 18:40:36.230113 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:40:36.230129 17350 solver.cpp:244]     Train net output #1: loss = 0.110217 (* 1 = 0.110217 loss)\nI0817 18:40:36.302413 17350 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0817 18:41:23.777204 17350 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0817 18:41:50.483971 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78172\nI0817 18:41:50.484016 17350 solver.cpp:404]     Test net output #1: loss = 0.928944 (* 1 = 0.928944 loss)\nI0817 18:41:50.908543 17350 solver.cpp:228] Iteration 10800, loss = 0.084675\nI0817 18:41:50.908592 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:41:50.908609 17350 solver.cpp:244]     Train net output #1: loss = 0.084675 (* 1 = 0.084675 loss)\nI0817 18:41:50.983762 17350 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0817 18:42:38.456578 17350 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0817 18:43:05.173297 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75424\nI0817 18:43:05.173346 17350 solver.cpp:404]     Test net output #1: loss = 0.996943 (* 1 = 0.996943 loss)\nI0817 18:43:05.597862 17350 solver.cpp:228] Iteration 10900, loss = 0.0331536\nI0817 18:43:05.597911 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 18:43:05.597929 17350 solver.cpp:244]     Train net output #1: loss = 0.0331536 (* 1 = 0.0331536 loss)\nI0817 18:43:05.678340 17350 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0817 18:43:53.146633 17350 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0817 18:44:19.903072 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73788\nI0817 18:44:19.903118 17350 solver.cpp:404]     Test net output #1: loss = 1.05142 (* 1 = 1.05142 loss)\nI0817 18:44:20.327558 17350 solver.cpp:228] Iteration 11000, loss = 0.0759612\nI0817 18:44:20.327605 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:44:20.327622 17350 solver.cpp:244]     Train net output #1: loss = 0.0759611 (* 1 = 0.0759611 loss)\nI0817 18:44:20.405901 17350 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0817 18:45:07.912838 17350 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0817 18:45:34.653924 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74424\nI0817 18:45:34.653978 17350 solver.cpp:404]     Test net output #1: loss = 1.07222 (* 1 = 1.07222 loss)\nI0817 18:45:35.078595 17350 solver.cpp:228] Iteration 11100, loss = 0.0822339\nI0817 18:45:35.078645 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:45:35.078662 17350 solver.cpp:244]     Train net output #1: loss = 0.0822339 (* 1 = 0.0822339 loss)\nI0817 18:45:35.156603 17350 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0817 18:46:22.564854 17350 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0817 18:46:49.264652 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79008\nI0817 18:46:49.264698 17350 solver.cpp:404]     Test net output #1: loss = 0.848745 (* 1 = 0.848745 loss)\nI0817 18:46:49.689455 17350 solver.cpp:228] Iteration 11200, loss = 0.0831534\nI0817 18:46:49.689512 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:46:49.689530 17350 solver.cpp:244]     Train net output #1: loss = 0.0831534 (* 1 = 0.0831534 loss)\nI0817 18:46:49.761492 17350 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0817 18:47:37.210981 17350 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0817 18:48:03.903712 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72952\nI0817 18:48:03.903756 17350 solver.cpp:404]     Test net output #1: loss = 1.03865 (* 1 = 1.03865 loss)\nI0817 18:48:04.327916 17350 solver.cpp:228] Iteration 11300, loss = 0.16641\nI0817 18:48:04.327980 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 18:48:04.327997 17350 solver.cpp:244]     Train net output #1: loss = 0.16641 (* 1 = 0.16641 loss)\nI0817 18:48:04.407294 17350 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0817 18:48:51.836769 17350 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0817 18:49:18.704097 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76728\nI0817 18:49:18.704146 17350 solver.cpp:404]     Test net output #1: loss = 0.956331 (* 1 = 0.956331 loss)\nI0817 18:49:19.127465 17350 solver.cpp:228] Iteration 11400, loss = 0.0548227\nI0817 18:49:19.127517 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:49:19.127535 17350 solver.cpp:244]     Train net output #1: loss = 0.0548227 (* 1 = 0.0548227 loss)\nI0817 18:49:19.205162 17350 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0817 18:50:06.604429 17350 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0817 18:50:33.479007 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70732\nI0817 18:50:33.479058 17350 solver.cpp:404]     Test net output #1: loss = 1.28841 (* 1 = 1.28841 loss)\nI0817 18:50:33.903734 17350 solver.cpp:228] Iteration 11500, loss = 0.0770141\nI0817 18:50:33.903795 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 18:50:33.903813 17350 solver.cpp:244]     Train net output #1: loss = 0.077014 (* 1 = 0.077014 loss)\nI0817 18:50:33.975533 17350 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0817 18:51:21.394284 17350 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0817 18:51:48.281051 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78652\nI0817 18:51:48.281100 17350 solver.cpp:404]     Test net output #1: loss = 0.901127 (* 1 = 0.901127 loss)\nI0817 18:51:48.704988 17350 solver.cpp:228] Iteration 11600, loss = 0.089577\nI0817 18:51:48.705049 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:51:48.705067 17350 solver.cpp:244]     Train net output #1: loss = 0.0895769 (* 1 = 0.0895769 loss)\nI0817 18:51:48.784102 17350 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0817 18:52:36.217674 17350 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0817 18:53:03.097221 17350 solver.cpp:404]     Test net output #0: accuracy = 0.65876\nI0817 18:53:03.097270 17350 solver.cpp:404]     Test net output #1: loss = 1.80681 (* 1 = 1.80681 loss)\nI0817 18:53:03.520638 17350 solver.cpp:228] Iteration 11700, loss = 0.135897\nI0817 18:53:03.520696 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 18:53:03.520714 17350 solver.cpp:244]     Train net output #1: loss = 0.135897 (* 1 = 0.135897 loss)\nI0817 18:53:03.595628 17350 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0817 18:53:51.041497 17350 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0817 18:54:17.911761 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68164\nI0817 18:54:17.911808 17350 solver.cpp:404]     Test net output #1: loss = 1.48601 (* 1 = 1.48601 loss)\nI0817 18:54:18.335642 17350 solver.cpp:228] Iteration 11800, loss = 0.0923846\nI0817 18:54:18.335700 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:54:18.335717 17350 solver.cpp:244]     Train net output #1: loss = 0.0923846 (* 1 = 0.0923846 loss)\nI0817 18:54:18.414587 17350 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0817 18:55:05.847497 17350 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0817 18:55:32.706110 17350 solver.cpp:404]     Test net output #0: accuracy = 0.67964\nI0817 18:55:32.706158 17350 solver.cpp:404]     Test net output #1: loss = 1.66585 (* 1 = 1.66585 loss)\nI0817 18:55:33.131068 17350 solver.cpp:228] Iteration 11900, loss = 0.0679913\nI0817 18:55:33.131130 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:55:33.131148 17350 solver.cpp:244]     Train net output #1: loss = 0.0679912 (* 1 = 0.0679912 loss)\nI0817 18:55:33.204341 17350 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0817 18:56:20.624614 17350 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0817 18:56:47.484362 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75024\nI0817 18:56:47.484410 17350 solver.cpp:404]     Test net output #1: loss = 1.19301 (* 1 = 1.19301 loss)\nI0817 18:56:47.908876 17350 solver.cpp:228] Iteration 12000, loss = 0.0705977\nI0817 18:56:47.908934 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 18:56:47.908951 17350 solver.cpp:244]     Train net output #1: loss = 0.0705977 (* 1 = 0.0705977 loss)\nI0817 18:56:47.990226 17350 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0817 18:57:35.477804 17350 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0817 18:58:02.350744 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74988\nI0817 18:58:02.350792 17350 solver.cpp:404]     Test net output #1: loss = 1.0533 (* 1 = 1.0533 loss)\nI0817 18:58:02.775617 17350 solver.cpp:228] Iteration 12100, loss = 0.141664\nI0817 18:58:02.775674 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 18:58:02.775693 17350 solver.cpp:244]     Train net output #1: loss = 0.141663 (* 1 = 0.141663 loss)\nI0817 18:58:02.851579 17350 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0817 18:58:50.284284 17350 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0817 18:59:17.149174 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73872\nI0817 18:59:17.149221 17350 solver.cpp:404]     Test net output #1: loss = 1.28127 (* 1 = 1.28127 loss)\nI0817 18:59:17.573741 17350 solver.cpp:228] Iteration 12200, loss = 0.140024\nI0817 18:59:17.573797 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 18:59:17.573815 17350 solver.cpp:244]     Train net output #1: loss = 0.140024 (* 1 = 0.140024 loss)\nI0817 18:59:17.649219 17350 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0817 19:00:05.110996 17350 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0817 19:00:31.979459 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72216\nI0817 19:00:31.979506 17350 solver.cpp:404]     Test net output #1: loss = 1.2466 (* 1 = 1.2466 loss)\nI0817 19:00:32.404345 17350 solver.cpp:228] Iteration 12300, loss = 0.180435\nI0817 19:00:32.404402 17350 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0817 19:00:32.404419 17350 solver.cpp:244]     Train net output #1: loss = 0.180435 (* 1 = 0.180435 loss)\nI0817 19:00:32.480998 17350 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0817 19:01:19.939183 17350 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0817 19:01:46.738323 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76496\nI0817 19:01:46.738370 17350 solver.cpp:404]     Test net output #1: loss = 1.03246 (* 1 = 1.03246 loss)\nI0817 19:01:47.163481 17350 solver.cpp:228] Iteration 12400, loss = 0.0660598\nI0817 19:01:47.163540 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:01:47.163558 17350 solver.cpp:244]     Train net output #1: loss = 0.0660597 (* 1 = 0.0660597 loss)\nI0817 19:01:47.241485 17350 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0817 19:02:34.687016 17350 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0817 19:03:01.447216 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69504\nI0817 19:03:01.447258 17350 solver.cpp:404]     Test net output #1: loss = 1.48134 (* 1 = 1.48134 loss)\nI0817 19:03:01.870503 17350 solver.cpp:228] Iteration 12500, loss = 0.104879\nI0817 19:03:01.870559 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:03:01.870578 17350 solver.cpp:244]     Train net output #1: loss = 0.104879 (* 1 = 0.104879 loss)\nI0817 19:03:01.950438 17350 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0817 19:03:49.404673 17350 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0817 19:04:16.223387 17350 solver.cpp:404]     Test net output #0: accuracy = 0.67596\nI0817 19:04:16.223433 17350 solver.cpp:404]     Test net output #1: loss = 1.76771 (* 1 = 1.76771 loss)\nI0817 19:04:16.647058 17350 solver.cpp:228] Iteration 12600, loss = 0.175159\nI0817 19:04:16.647114 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:04:16.647130 17350 solver.cpp:244]     Train net output #1: loss = 0.175158 (* 1 = 0.175158 loss)\nI0817 19:04:16.728984 17350 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0817 19:05:04.235041 17350 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0817 19:05:31.114557 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75008\nI0817 19:05:31.114605 17350 solver.cpp:404]     Test net output #1: loss = 1.00283 (* 1 = 1.00283 loss)\nI0817 19:05:31.537907 17350 solver.cpp:228] Iteration 12700, loss = 0.0925467\nI0817 19:05:31.537964 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:05:31.537982 17350 solver.cpp:244]     Train net output #1: loss = 0.0925465 (* 1 = 0.0925465 loss)\nI0817 19:05:31.611192 17350 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0817 19:06:19.098597 17350 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0817 19:06:45.965730 17350 solver.cpp:404]     Test net output #0: accuracy = 0.777\nI0817 19:06:45.965780 17350 solver.cpp:404]     Test net output #1: loss = 0.948018 (* 1 = 0.948018 loss)\nI0817 19:06:46.389295 17350 solver.cpp:228] Iteration 12800, loss = 0.051011\nI0817 19:06:46.389351 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:06:46.389369 17350 solver.cpp:244]     Train net output #1: loss = 0.0510109 (* 1 = 0.0510109 loss)\nI0817 19:06:46.467304 17350 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0817 19:07:33.921020 17350 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0817 19:08:00.808311 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77008\nI0817 19:08:00.808359 17350 solver.cpp:404]     Test net output #1: loss = 1.01034 (* 1 = 1.01034 loss)\nI0817 19:08:01.231783 17350 solver.cpp:228] Iteration 12900, loss = 0.0934488\nI0817 19:08:01.231840 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:08:01.231858 17350 solver.cpp:244]     Train net output #1: loss = 0.0934487 (* 1 = 0.0934487 loss)\nI0817 19:08:01.311936 17350 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0817 19:08:48.817698 17350 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0817 19:09:15.693828 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76712\nI0817 19:09:15.693877 17350 solver.cpp:404]     Test net output #1: loss = 1.13433 (* 1 = 1.13433 loss)\nI0817 19:09:16.117166 17350 solver.cpp:228] Iteration 13000, loss = 0.0733316\nI0817 19:09:16.117224 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:09:16.117242 17350 solver.cpp:244]     Train net output #1: loss = 0.0733315 (* 1 = 0.0733315 loss)\nI0817 19:09:16.202306 17350 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0817 19:10:03.671046 17350 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0817 19:10:30.547227 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7168\nI0817 19:10:30.547274 17350 solver.cpp:404]     Test net output #1: loss = 1.28246 (* 1 = 1.28246 loss)\nI0817 19:10:30.970718 17350 solver.cpp:228] Iteration 13100, loss = 0.116047\nI0817 19:10:30.970775 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:10:30.970793 17350 solver.cpp:244]     Train net output #1: loss = 0.116047 (* 1 = 0.116047 loss)\nI0817 19:10:31.048794 17350 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0817 19:11:18.497768 17350 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0817 19:11:45.366039 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77896\nI0817 19:11:45.366086 17350 solver.cpp:404]     Test net output #1: loss = 1.00649 (* 1 = 1.00649 loss)\nI0817 19:11:45.789705 17350 solver.cpp:228] Iteration 13200, loss = 0.0837218\nI0817 19:11:45.789762 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:11:45.789779 17350 solver.cpp:244]     Train net output #1: loss = 0.0837217 (* 1 = 0.0837217 loss)\nI0817 19:11:45.865727 17350 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0817 19:12:33.306978 17350 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0817 19:13:00.177006 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76248\nI0817 19:13:00.177055 17350 solver.cpp:404]     Test net output #1: loss = 1.03007 (* 1 = 1.03007 loss)\nI0817 19:13:00.600333 17350 solver.cpp:228] Iteration 13300, loss = 0.0712003\nI0817 19:13:00.600389 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:13:00.600406 17350 solver.cpp:244]     Train net output #1: loss = 0.0712002 (* 1 = 0.0712002 loss)\nI0817 19:13:00.677624 17350 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0817 19:13:48.165597 17350 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0817 19:14:15.039922 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77348\nI0817 19:14:15.039968 17350 solver.cpp:404]     Test net output #1: loss = 0.898021 (* 1 = 0.898021 loss)\nI0817 19:14:15.463343 17350 solver.cpp:228] Iteration 13400, loss = 0.0607388\nI0817 19:14:15.463399 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:14:15.463416 17350 solver.cpp:244]     Train net output #1: loss = 0.0607387 (* 1 = 0.0607387 loss)\nI0817 19:14:15.547600 17350 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0817 19:15:03.045339 17350 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0817 19:15:29.917419 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72272\nI0817 19:15:29.917467 17350 solver.cpp:404]     Test net output #1: loss = 1.51094 (* 1 = 1.51094 loss)\nI0817 19:15:30.340801 17350 solver.cpp:228] Iteration 13500, loss = 0.074508\nI0817 19:15:30.340855 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:15:30.340873 17350 solver.cpp:244]     Train net output #1: loss = 0.0745079 (* 1 = 0.0745079 loss)\nI0817 19:15:30.415613 17350 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0817 19:16:17.980175 17350 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0817 19:16:44.739616 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70952\nI0817 19:16:44.739661 17350 solver.cpp:404]     Test net output #1: loss = 1.39266 (* 1 = 1.39266 loss)\nI0817 19:16:45.162909 17350 solver.cpp:228] Iteration 13600, loss = 0.0600183\nI0817 19:16:45.162964 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:16:45.162981 17350 solver.cpp:244]     Train net output #1: loss = 0.0600182 (* 1 = 0.0600182 loss)\nI0817 19:16:45.241451 17350 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0817 19:17:32.673087 17350 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0817 19:17:59.495152 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7982\nI0817 19:17:59.495201 17350 solver.cpp:404]     Test net output #1: loss = 0.824094 (* 1 = 0.824094 loss)\nI0817 19:17:59.918395 17350 solver.cpp:228] Iteration 13700, loss = 0.10357\nI0817 19:17:59.918437 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:17:59.918453 17350 solver.cpp:244]     Train net output #1: loss = 0.103569 (* 1 = 0.103569 loss)\nI0817 19:17:59.996980 17350 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0817 19:18:47.441705 17350 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0817 19:19:14.185467 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68472\nI0817 19:19:14.185513 17350 solver.cpp:404]     Test net output #1: loss = 1.75051 (* 1 = 1.75051 loss)\nI0817 19:19:14.609037 17350 solver.cpp:228] Iteration 13800, loss = 0.0928488\nI0817 19:19:14.609079 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:19:14.609097 17350 solver.cpp:244]     Train net output #1: loss = 0.0928486 (* 1 = 0.0928486 loss)\nI0817 19:19:14.687204 17350 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0817 19:20:02.106752 17350 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0817 19:20:28.917507 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75012\nI0817 19:20:28.917554 17350 solver.cpp:404]     Test net output #1: loss = 1.28898 (* 1 = 1.28898 loss)\nI0817 19:20:29.340878 17350 solver.cpp:228] Iteration 13900, loss = 0.0624749\nI0817 19:20:29.340920 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:20:29.340936 17350 solver.cpp:244]     Train net output #1: loss = 0.0624748 (* 1 = 0.0624748 loss)\nI0817 19:20:29.421567 17350 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0817 19:21:16.908783 17350 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0817 19:21:43.628079 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78788\nI0817 19:21:43.628125 17350 solver.cpp:404]     Test net output #1: loss = 0.914922 (* 1 = 0.914922 loss)\nI0817 19:21:44.051415 17350 solver.cpp:228] Iteration 14000, loss = 0.0998084\nI0817 19:21:44.051460 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:21:44.051476 17350 solver.cpp:244]     Train net output #1: loss = 0.0998083 (* 1 = 0.0998083 loss)\nI0817 19:21:44.130417 17350 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0817 19:22:31.609208 17350 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0817 19:22:58.444946 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79128\nI0817 19:22:58.444999 17350 solver.cpp:404]     Test net output #1: loss = 0.911605 (* 1 = 0.911605 loss)\nI0817 19:22:58.868779 17350 solver.cpp:228] Iteration 14100, loss = 0.10544\nI0817 19:22:58.868839 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:22:58.868856 17350 solver.cpp:244]     Train net output #1: loss = 0.10544 (* 1 = 0.10544 loss)\nI0817 19:22:58.944694 17350 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0817 19:23:46.356658 17350 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0817 19:24:13.173966 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76804\nI0817 19:24:13.174021 17350 solver.cpp:404]     Test net output #1: loss = 1.03877 (* 1 = 1.03877 loss)\nI0817 19:24:13.598569 17350 solver.cpp:228] Iteration 14200, loss = 0.0519607\nI0817 19:24:13.598625 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:24:13.598644 17350 solver.cpp:244]     Train net output #1: loss = 0.0519606 (* 1 = 0.0519606 loss)\nI0817 19:24:13.683009 17350 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0817 19:25:01.109652 17350 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0817 19:25:27.931373 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77424\nI0817 19:25:27.931422 17350 solver.cpp:404]     Test net output #1: loss = 0.937244 (* 1 = 0.937244 loss)\nI0817 19:25:28.355801 17350 solver.cpp:228] Iteration 14300, loss = 0.0999796\nI0817 19:25:28.355857 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:25:28.355875 17350 solver.cpp:244]     Train net output #1: loss = 0.0999795 (* 1 = 0.0999795 loss)\nI0817 19:25:28.437398 17350 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0817 19:26:15.788652 17350 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0817 19:26:42.575274 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7458\nI0817 19:26:42.575322 17350 solver.cpp:404]     Test net output #1: loss = 1.15264 (* 1 = 1.15264 loss)\nI0817 19:26:42.998754 17350 solver.cpp:228] Iteration 14400, loss = 0.0462054\nI0817 19:26:42.998808 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:26:42.998826 17350 solver.cpp:244]     Train net output #1: loss = 0.0462053 (* 1 = 0.0462053 loss)\nI0817 19:26:43.084411 17350 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0817 19:27:30.514037 17350 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0817 19:27:57.273463 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7466\nI0817 19:27:57.273510 17350 solver.cpp:404]     Test net output #1: loss = 1.06725 (* 1 = 1.06725 loss)\nI0817 19:27:57.696702 17350 solver.cpp:228] Iteration 14500, loss = 0.0770832\nI0817 19:27:57.696754 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:27:57.696772 17350 solver.cpp:244]     Train net output #1: loss = 0.0770831 (* 1 = 0.0770831 loss)\nI0817 19:27:57.777509 17350 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0817 19:28:45.189327 17350 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0817 19:29:11.916568 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7786\nI0817 19:29:11.916615 17350 solver.cpp:404]     Test net output #1: loss = 0.946176 (* 1 = 0.946176 loss)\nI0817 19:29:12.340117 17350 solver.cpp:228] Iteration 14600, loss = 0.137166\nI0817 19:29:12.340171 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:29:12.340188 17350 solver.cpp:244]     Train net output #1: loss = 0.137166 (* 1 = 0.137166 loss)\nI0817 19:29:12.414767 17350 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0817 19:29:59.840610 17350 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0817 19:30:26.650209 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76632\nI0817 19:30:26.650260 17350 solver.cpp:404]     Test net output #1: loss = 1.069 (* 1 = 1.069 loss)\nI0817 19:30:27.074401 17350 solver.cpp:228] Iteration 14700, loss = 0.117211\nI0817 19:30:27.074458 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:30:27.074477 17350 solver.cpp:244]     Train net output #1: loss = 0.117211 (* 1 = 0.117211 loss)\nI0817 19:30:27.147263 17350 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0817 19:31:14.515882 17350 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0817 19:31:41.378978 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77912\nI0817 19:31:41.379027 17350 solver.cpp:404]     Test net output #1: loss = 0.960641 (* 1 = 0.960641 loss)\nI0817 19:31:41.803712 17350 solver.cpp:228] Iteration 14800, loss = 0.0688798\nI0817 19:31:41.803763 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:31:41.803781 17350 solver.cpp:244]     Train net output #1: loss = 0.0688797 (* 1 = 0.0688797 loss)\nI0817 19:31:41.876029 17350 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0817 19:32:29.206220 17350 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0817 19:32:56.087581 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76388\nI0817 19:32:56.087630 17350 solver.cpp:404]     Test net output #1: loss = 0.988014 (* 1 = 0.988014 loss)\nI0817 19:32:56.511389 17350 solver.cpp:228] Iteration 14900, loss = 0.142436\nI0817 19:32:56.511441 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 19:32:56.511458 17350 solver.cpp:244]     Train net output #1: loss = 0.142436 (* 1 = 0.142436 loss)\nI0817 19:32:56.595818 17350 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0817 19:33:43.948210 17350 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0817 19:34:10.672551 17350 solver.cpp:404]     Test net output #0: accuracy = 0.6952\nI0817 19:34:10.672600 17350 solver.cpp:404]     Test net output #1: loss = 1.52349 (* 1 = 1.52349 loss)\nI0817 19:34:11.096168 17350 solver.cpp:228] Iteration 15000, loss = 0.0778378\nI0817 19:34:11.096216 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:34:11.096235 17350 solver.cpp:244]     Train net output #1: loss = 0.0778378 (* 1 = 0.0778378 loss)\nI0817 19:34:11.174423 17350 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0817 19:34:58.528270 17350 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0817 19:35:25.238179 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0817 19:35:25.238224 17350 solver.cpp:404]     Test net output #1: loss = 0.809103 (* 1 = 0.809103 loss)\nI0817 19:35:25.662679 17350 solver.cpp:228] Iteration 15100, loss = 0.148685\nI0817 19:35:25.662729 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:35:25.662745 17350 solver.cpp:244]     Train net output #1: loss = 0.148685 (* 1 = 0.148685 loss)\nI0817 19:35:25.738679 17350 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0817 19:36:13.064779 17350 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0817 19:36:39.773805 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75472\nI0817 19:36:39.773855 17350 solver.cpp:404]     Test net output #1: loss = 1.14311 (* 1 = 1.14311 loss)\nI0817 19:36:40.198567 17350 solver.cpp:228] Iteration 15200, loss = 0.119788\nI0817 19:36:40.198616 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 19:36:40.198632 17350 solver.cpp:244]     Train net output #1: loss = 0.119788 (* 1 = 0.119788 loss)\nI0817 19:36:40.277673 17350 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0817 19:37:27.597877 17350 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0817 19:37:54.321766 17350 solver.cpp:404]     Test net output #0: accuracy = 0.67744\nI0817 19:37:54.321812 17350 solver.cpp:404]     Test net output #1: loss = 1.55585 (* 1 = 1.55585 loss)\nI0817 19:37:54.745532 17350 solver.cpp:228] Iteration 15300, loss = 0.0337923\nI0817 19:37:54.745584 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:37:54.745600 17350 solver.cpp:244]     Train net output #1: loss = 0.0337922 (* 1 = 0.0337922 loss)\nI0817 19:37:54.824129 17350 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0817 19:38:42.141363 17350 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0817 19:39:08.893129 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78312\nI0817 19:39:08.893179 17350 solver.cpp:404]     Test net output #1: loss = 0.908162 (* 1 = 0.908162 loss)\nI0817 19:39:09.316352 17350 solver.cpp:228] Iteration 15400, loss = 0.0945732\nI0817 19:39:09.316402 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:39:09.316419 17350 solver.cpp:244]     Train net output #1: loss = 0.0945731 (* 1 = 0.0945731 loss)\nI0817 19:39:09.400985 17350 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0817 19:39:56.739606 17350 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0817 19:40:23.472981 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76648\nI0817 19:40:23.473026 17350 solver.cpp:404]     Test net output #1: loss = 1.15762 (* 1 = 1.15762 loss)\nI0817 19:40:23.897336 17350 solver.cpp:228] Iteration 15500, loss = 0.0871762\nI0817 19:40:23.897387 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:40:23.897404 17350 solver.cpp:244]     Train net output #1: loss = 0.0871761 (* 1 = 0.0871761 loss)\nI0817 19:40:23.972657 17350 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0817 19:41:11.348284 17350 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0817 19:41:38.049829 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74252\nI0817 19:41:38.049875 17350 solver.cpp:404]     Test net output #1: loss = 1.25681 (* 1 = 1.25681 loss)\nI0817 19:41:38.473891 17350 solver.cpp:228] Iteration 15600, loss = 0.0705124\nI0817 19:41:38.473949 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:41:38.473968 17350 solver.cpp:244]     Train net output #1: loss = 0.0705123 (* 1 = 0.0705123 loss)\nI0817 19:41:38.549960 17350 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0817 19:42:25.902472 17350 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0817 19:42:52.764631 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73748\nI0817 19:42:52.764679 17350 solver.cpp:404]     Test net output #1: loss = 1.14931 (* 1 = 1.14931 loss)\nI0817 19:42:53.189113 17350 solver.cpp:228] Iteration 15700, loss = 0.0264126\nI0817 19:42:53.189165 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 19:42:53.189182 17350 solver.cpp:244]     Train net output #1: loss = 0.0264124 (* 1 = 0.0264124 loss)\nI0817 19:42:53.264365 17350 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0817 19:43:40.609388 17350 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0817 19:44:07.474814 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74548\nI0817 19:44:07.474864 17350 solver.cpp:404]     Test net output #1: loss = 1.13409 (* 1 = 1.13409 loss)\nI0817 19:44:07.899446 17350 solver.cpp:228] Iteration 15800, loss = 0.0647234\nI0817 19:44:07.899497 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:44:07.899514 17350 solver.cpp:244]     Train net output #1: loss = 0.0647232 (* 1 = 0.0647232 loss)\nI0817 19:44:07.975872 17350 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0817 19:44:55.407857 17350 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0817 19:45:22.276036 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76088\nI0817 19:45:22.276084 17350 solver.cpp:404]     Test net output #1: loss = 1.07711 (* 1 = 1.07711 loss)\nI0817 19:45:22.700351 17350 solver.cpp:228] Iteration 15900, loss = 0.0499001\nI0817 19:45:22.700403 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:45:22.700420 17350 solver.cpp:244]     Train net output #1: loss = 0.0498999 (* 1 = 0.0498999 loss)\nI0817 19:45:22.778898 17350 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0817 19:46:10.181181 17350 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0817 19:46:37.054009 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79028\nI0817 19:46:37.054064 17350 solver.cpp:404]     Test net output #1: loss = 0.914918 (* 1 = 0.914918 loss)\nI0817 19:46:37.478648 17350 solver.cpp:228] Iteration 16000, loss = 0.032845\nI0817 19:46:37.478704 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:46:37.478730 17350 solver.cpp:244]     Train net output #1: loss = 0.0328448 (* 1 = 0.0328448 loss)\nI0817 19:46:37.557612 17350 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0817 19:47:24.910099 17350 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0817 19:47:51.785033 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74188\nI0817 19:47:51.785087 17350 solver.cpp:404]     Test net output #1: loss = 1.09735 (* 1 = 1.09735 loss)\nI0817 19:47:52.208310 17350 solver.cpp:228] Iteration 16100, loss = 0.0789256\nI0817 19:47:52.208360 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:47:52.208384 17350 solver.cpp:244]     Train net output #1: loss = 0.0789254 (* 1 = 0.0789254 loss)\nI0817 19:47:52.289494 17350 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0817 19:48:39.688457 17350 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0817 19:49:06.554201 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74908\nI0817 19:49:06.554255 17350 solver.cpp:404]     Test net output #1: loss = 1.04072 (* 1 = 1.04072 loss)\nI0817 19:49:06.977706 17350 solver.cpp:228] Iteration 16200, loss = 0.0660203\nI0817 19:49:06.977756 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:49:06.977782 17350 solver.cpp:244]     Train net output #1: loss = 0.0660201 (* 1 = 0.0660201 loss)\nI0817 19:49:07.053668 17350 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0817 19:49:54.460167 17350 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0817 19:50:21.319943 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7274\nI0817 19:50:21.319998 17350 solver.cpp:404]     Test net output #1: loss = 1.18653 (* 1 = 1.18653 loss)\nI0817 19:50:21.743398 17350 solver.cpp:228] Iteration 16300, loss = 0.0563729\nI0817 19:50:21.743449 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:50:21.743474 17350 solver.cpp:244]     Train net output #1: loss = 0.0563727 (* 1 = 0.0563727 loss)\nI0817 19:50:21.817597 17350 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0817 19:51:09.202662 17350 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0817 19:51:36.069278 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77644\nI0817 19:51:36.069332 17350 solver.cpp:404]     Test net output #1: loss = 0.892213 (* 1 = 0.892213 loss)\nI0817 19:51:36.492903 17350 solver.cpp:228] Iteration 16400, loss = 0.118808\nI0817 19:51:36.492959 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 19:51:36.492985 17350 solver.cpp:244]     Train net output #1: loss = 0.118808 (* 1 = 0.118808 loss)\nI0817 19:51:36.573400 17350 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0817 19:52:24.034111 17350 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0817 19:52:50.891325 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7848\nI0817 19:52:50.891389 17350 solver.cpp:404]     Test net output #1: loss = 0.982574 (* 1 = 0.982574 loss)\nI0817 19:52:51.314602 17350 solver.cpp:228] Iteration 16500, loss = 0.0535136\nI0817 19:52:51.314653 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 19:52:51.314678 17350 solver.cpp:244]     Train net output #1: loss = 0.0535134 (* 1 = 0.0535134 loss)\nI0817 19:52:51.390893 17350 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0817 19:53:38.765563 17350 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0817 19:54:05.644111 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7566\nI0817 19:54:05.644186 17350 solver.cpp:404]     Test net output #1: loss = 1.0422 (* 1 = 1.0422 loss)\nI0817 19:54:06.068691 17350 solver.cpp:228] Iteration 16600, loss = 0.0778213\nI0817 19:54:06.068750 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:54:06.068776 17350 solver.cpp:244]     Train net output #1: loss = 0.0778212 (* 1 = 0.0778212 loss)\nI0817 19:54:06.142542 17350 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0817 19:54:53.535751 17350 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0817 19:55:20.442010 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77232\nI0817 19:55:20.442085 17350 solver.cpp:404]     Test net output #1: loss = 0.979571 (* 1 = 0.979571 loss)\nI0817 19:55:20.866772 17350 solver.cpp:228] Iteration 16700, loss = 0.0303558\nI0817 19:55:20.866834 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 19:55:20.866859 17350 solver.cpp:244]     Train net output #1: loss = 0.0303557 (* 1 = 0.0303557 loss)\nI0817 19:55:20.947677 17350 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0817 19:56:08.313275 17350 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0817 19:56:35.159355 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80124\nI0817 19:56:35.159407 17350 solver.cpp:404]     Test net output #1: loss = 0.847986 (* 1 = 0.847986 loss)\nI0817 19:56:35.585669 17350 solver.cpp:228] Iteration 16800, loss = 0.0652533\nI0817 19:56:35.585731 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:56:35.585749 17350 solver.cpp:244]     Train net output #1: loss = 0.0652532 (* 1 = 0.0652532 loss)\nI0817 19:56:35.661500 17350 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0817 19:57:23.085757 17350 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0817 19:57:49.985237 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71076\nI0817 19:57:49.985306 17350 solver.cpp:404]     Test net output #1: loss = 1.36229 (* 1 = 1.36229 loss)\nI0817 19:57:50.409632 17350 solver.cpp:228] Iteration 16900, loss = 0.0924252\nI0817 19:57:50.409682 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 19:57:50.409699 17350 solver.cpp:244]     Train net output #1: loss = 0.092425 (* 1 = 0.092425 loss)\nI0817 19:57:50.485021 17350 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0817 19:58:37.906318 17350 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0817 19:59:04.805325 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7458\nI0817 19:59:04.805394 17350 solver.cpp:404]     Test net output #1: loss = 1.25712 (* 1 = 1.25712 loss)\nI0817 19:59:05.230247 17350 solver.cpp:228] Iteration 17000, loss = 0.0823143\nI0817 19:59:05.230294 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 19:59:05.230311 17350 solver.cpp:244]     Train net output #1: loss = 0.0823142 (* 1 = 0.0823142 loss)\nI0817 19:59:05.308709 17350 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0817 19:59:52.664222 17350 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0817 20:00:19.562530 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78952\nI0817 20:00:19.562599 17350 solver.cpp:404]     Test net output #1: loss = 0.873687 (* 1 = 0.873687 loss)\nI0817 20:00:19.987192 17350 solver.cpp:228] Iteration 17100, loss = 0.0579181\nI0817 20:00:19.987248 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:00:19.987265 17350 solver.cpp:244]     Train net output #1: loss = 0.0579179 (* 1 = 0.0579179 loss)\nI0817 20:00:20.064110 17350 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0817 20:01:07.508781 17350 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0817 20:01:34.408753 17350 solver.cpp:404]     Test net output #0: accuracy = 0.60452\nI0817 20:01:34.408823 17350 solver.cpp:404]     Test net output #1: loss = 1.96805 (* 1 = 1.96805 loss)\nI0817 20:01:34.833323 17350 solver.cpp:228] Iteration 17200, loss = 0.0769675\nI0817 20:01:34.833376 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:01:34.833394 17350 solver.cpp:244]     Train net output #1: loss = 0.0769674 (* 1 = 0.0769674 loss)\nI0817 20:01:34.915225 17350 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0817 20:02:22.314976 17350 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0817 20:02:49.217629 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75808\nI0817 20:02:49.217702 17350 solver.cpp:404]     Test net output #1: loss = 1.05302 (* 1 = 1.05302 loss)\nI0817 20:02:49.641100 17350 solver.cpp:228] Iteration 17300, loss = 0.0866784\nI0817 20:02:49.641158 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:02:49.641175 17350 solver.cpp:244]     Train net output #1: loss = 0.0866782 (* 1 = 0.0866782 loss)\nI0817 20:02:49.718464 17350 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0817 20:03:37.095780 17350 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0817 20:04:03.998176 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73044\nI0817 20:04:03.998246 17350 solver.cpp:404]     Test net output #1: loss = 1.18911 (* 1 = 1.18911 loss)\nI0817 20:04:04.421612 17350 solver.cpp:228] Iteration 17400, loss = 0.0582839\nI0817 20:04:04.421669 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:04:04.421686 17350 solver.cpp:244]     Train net output #1: loss = 0.0582836 (* 1 = 0.0582836 loss)\nI0817 20:04:04.495182 17350 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0817 20:04:51.912483 17350 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0817 20:05:18.816560 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70612\nI0817 20:05:18.816629 17350 solver.cpp:404]     Test net output #1: loss = 1.28963 (* 1 = 1.28963 loss)\nI0817 20:05:19.239858 17350 solver.cpp:228] Iteration 17500, loss = 0.106496\nI0817 20:05:19.239915 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:05:19.239933 17350 solver.cpp:244]     Train net output #1: loss = 0.106496 (* 1 = 0.106496 loss)\nI0817 20:05:19.317642 17350 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0817 20:06:06.715631 17350 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0817 20:06:33.621253 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78552\nI0817 20:06:33.621323 17350 solver.cpp:404]     Test net output #1: loss = 0.910128 (* 1 = 0.910128 loss)\nI0817 20:06:34.044759 17350 solver.cpp:228] Iteration 17600, loss = 0.0992502\nI0817 20:06:34.044816 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:06:34.044834 17350 solver.cpp:244]     Train net output #1: loss = 0.0992499 (* 1 = 0.0992499 loss)\nI0817 20:06:34.118110 17350 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0817 20:07:21.518321 17350 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0817 20:07:48.422060 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73044\nI0817 20:07:48.422127 17350 solver.cpp:404]     Test net output #1: loss = 1.37822 (* 1 = 1.37822 loss)\nI0817 20:07:48.846005 17350 solver.cpp:228] Iteration 17700, loss = 0.0739497\nI0817 20:07:48.846067 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:07:48.846086 17350 solver.cpp:244]     Train net output #1: loss = 0.0739495 (* 1 = 0.0739495 loss)\nI0817 20:07:48.925065 17350 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0817 20:08:36.335892 17350 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0817 20:09:03.243708 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78604\nI0817 20:09:03.243778 17350 solver.cpp:404]     Test net output #1: loss = 0.847251 (* 1 = 0.847251 loss)\nI0817 20:09:03.667197 17350 solver.cpp:228] Iteration 17800, loss = 0.123599\nI0817 20:09:03.667253 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:09:03.667273 17350 solver.cpp:244]     Train net output #1: loss = 0.123599 (* 1 = 0.123599 loss)\nI0817 20:09:03.741904 17350 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0817 20:09:51.134598 17350 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0817 20:10:18.033530 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78232\nI0817 20:10:18.033597 17350 solver.cpp:404]     Test net output #1: loss = 0.953722 (* 1 = 0.953722 loss)\nI0817 20:10:18.457047 17350 solver.cpp:228] Iteration 17900, loss = 0.0365881\nI0817 20:10:18.457106 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:10:18.457124 17350 solver.cpp:244]     Train net output #1: loss = 0.0365879 (* 1 = 0.0365879 loss)\nI0817 20:10:18.537894 17350 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0817 20:11:05.889839 17350 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0817 20:11:32.790030 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76924\nI0817 20:11:32.790102 17350 solver.cpp:404]     Test net output #1: loss = 1.09363 (* 1 = 1.09363 loss)\nI0817 20:11:33.214802 17350 solver.cpp:228] Iteration 18000, loss = 0.0270334\nI0817 20:11:33.214860 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:11:33.214879 17350 solver.cpp:244]     Train net output #1: loss = 0.0270333 (* 1 = 0.0270333 loss)\nI0817 20:11:33.293798 17350 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0817 20:12:20.668792 17350 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0817 20:12:47.570114 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68588\nI0817 20:12:47.570184 17350 solver.cpp:404]     Test net output #1: loss = 1.67477 (* 1 = 1.67477 loss)\nI0817 20:12:47.994825 17350 solver.cpp:228] Iteration 18100, loss = 0.131577\nI0817 20:12:47.994882 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:12:47.994900 17350 solver.cpp:244]     Train net output #1: loss = 0.131577 (* 1 = 0.131577 loss)\nI0817 20:12:48.071051 17350 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0817 20:13:35.438431 17350 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0817 20:14:02.339484 17350 solver.cpp:404]     Test net output #0: accuracy = 0.6954\nI0817 20:14:02.339555 17350 solver.cpp:404]     Test net output #1: loss = 1.56925 (* 1 = 1.56925 loss)\nI0817 20:14:02.764221 17350 solver.cpp:228] Iteration 18200, loss = 0.203074\nI0817 20:14:02.764279 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:14:02.764297 17350 solver.cpp:244]     Train net output #1: loss = 0.203073 (* 1 = 0.203073 loss)\nI0817 20:14:02.838522 17350 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0817 20:14:50.203066 17350 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0817 20:15:17.106729 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69384\nI0817 20:15:17.106797 17350 solver.cpp:404]     Test net output #1: loss = 1.47518 (* 1 = 1.47518 loss)\nI0817 20:15:17.531492 17350 solver.cpp:228] Iteration 18300, loss = 0.0662978\nI0817 20:15:17.531553 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:15:17.531571 17350 solver.cpp:244]     Train net output #1: loss = 0.0662975 (* 1 = 0.0662975 loss)\nI0817 20:15:17.609725 17350 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0817 20:16:04.962106 17350 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0817 20:16:31.863314 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75484\nI0817 20:16:31.863384 17350 solver.cpp:404]     Test net output #1: loss = 1.08527 (* 1 = 1.08527 loss)\nI0817 20:16:32.288507 17350 solver.cpp:228] Iteration 18400, loss = 0.0762512\nI0817 20:16:32.288566 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:16:32.288585 17350 solver.cpp:244]     Train net output #1: loss = 0.0762509 (* 1 = 0.0762509 loss)\nI0817 20:16:32.361707 17350 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0817 20:17:19.717939 17350 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0817 20:17:46.618083 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78112\nI0817 20:17:46.618151 17350 solver.cpp:404]     Test net output #1: loss = 0.879263 (* 1 = 0.879263 loss)\nI0817 20:17:47.042630 17350 solver.cpp:228] Iteration 18500, loss = 0.0739494\nI0817 20:17:47.042686 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:17:47.042704 17350 solver.cpp:244]     Train net output #1: loss = 0.0739491 (* 1 = 0.0739491 loss)\nI0817 20:17:47.120868 17350 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0817 20:18:34.550953 17350 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0817 20:19:01.455065 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74292\nI0817 20:19:01.455132 17350 solver.cpp:404]     Test net output #1: loss = 1.34998 (* 1 = 1.34998 loss)\nI0817 20:19:01.880071 17350 solver.cpp:228] Iteration 18600, loss = 0.0562073\nI0817 20:19:01.880127 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:19:01.880146 17350 solver.cpp:244]     Train net output #1: loss = 0.056207 (* 1 = 0.056207 loss)\nI0817 20:19:01.957592 17350 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0817 20:19:49.337714 17350 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0817 20:20:16.169803 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71956\nI0817 20:20:16.169867 17350 solver.cpp:404]     Test net output #1: loss = 1.29189 (* 1 = 1.29189 loss)\nI0817 20:20:16.594657 17350 solver.cpp:228] Iteration 18700, loss = 0.0636346\nI0817 20:20:16.594715 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:20:16.594733 17350 solver.cpp:244]     Train net output #1: loss = 0.0636343 (* 1 = 0.0636343 loss)\nI0817 20:20:16.675621 17350 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0817 20:21:04.038060 17350 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0817 20:21:30.766739 17350 solver.cpp:404]     Test net output #0: accuracy = 0.6856\nI0817 20:21:30.766799 17350 solver.cpp:404]     Test net output #1: loss = 1.72727 (* 1 = 1.72727 loss)\nI0817 20:21:31.191181 17350 solver.cpp:228] Iteration 18800, loss = 0.163637\nI0817 20:21:31.191243 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:21:31.191268 17350 solver.cpp:244]     Train net output #1: loss = 0.163636 (* 1 = 0.163636 loss)\nI0817 20:21:31.271666 17350 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0817 20:22:18.651651 17350 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0817 20:22:45.440986 17350 solver.cpp:404]     Test net output #0: accuracy = 0.64572\nI0817 20:22:45.441042 17350 solver.cpp:404]     Test net output #1: loss = 1.99774 (* 1 = 1.99774 loss)\nI0817 20:22:45.865814 17350 solver.cpp:228] Iteration 18900, loss = 0.0342263\nI0817 20:22:45.865878 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:22:45.865903 17350 solver.cpp:244]     Train net output #1: loss = 0.034226 (* 1 = 0.034226 loss)\nI0817 20:22:45.946661 17350 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0817 20:23:33.269939 17350 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0817 20:23:59.990048 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7446\nI0817 20:23:59.990109 17350 solver.cpp:404]     Test net output #1: loss = 1.19064 (* 1 = 1.19064 loss)\nI0817 20:24:00.415289 17350 solver.cpp:228] Iteration 19000, loss = 0.138372\nI0817 20:24:00.415354 17350 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0817 20:24:00.415379 17350 solver.cpp:244]     Train net output #1: loss = 0.138371 (* 1 = 0.138371 loss)\nI0817 20:24:00.497126 17350 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0817 20:24:47.875061 17350 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0817 20:25:14.602082 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76796\nI0817 20:25:14.602140 17350 solver.cpp:404]     Test net output #1: loss = 1.04024 (* 1 = 1.04024 loss)\nI0817 20:25:15.027050 17350 solver.cpp:228] Iteration 19100, loss = 0.103197\nI0817 20:25:15.027114 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:25:15.027140 17350 solver.cpp:244]     Train net output #1: loss = 0.103197 (* 1 = 0.103197 loss)\nI0817 20:25:15.105883 17350 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0817 20:26:02.446100 17350 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0817 20:26:29.188663 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77652\nI0817 20:26:29.188724 17350 solver.cpp:404]     Test net output #1: loss = 1.04062 (* 1 = 1.04062 loss)\nI0817 20:26:29.612665 17350 solver.cpp:228] Iteration 19200, loss = 0.0699573\nI0817 20:26:29.612728 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:26:29.612753 17350 solver.cpp:244]     Train net output #1: loss = 0.0699569 (* 1 = 0.0699569 loss)\nI0817 20:26:29.690973 17350 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0817 20:27:17.022385 17350 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0817 20:27:43.822867 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80208\nI0817 20:27:43.822933 17350 solver.cpp:404]     Test net output #1: loss = 0.873206 (* 1 = 0.873206 loss)\nI0817 20:27:44.247658 17350 solver.cpp:228] Iteration 19300, loss = 0.0649771\nI0817 20:27:44.247721 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:27:44.247747 17350 solver.cpp:244]     Train net output #1: loss = 0.0649767 (* 1 = 0.0649767 loss)\nI0817 20:27:44.319553 17350 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0817 20:28:31.723526 17350 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0817 20:28:58.433599 17350 solver.cpp:404]     Test net output #0: accuracy = 0.6616\nI0817 20:28:58.433650 17350 solver.cpp:404]     Test net output #1: loss = 1.95612 (* 1 = 1.95612 loss)\nI0817 20:28:58.858129 17350 solver.cpp:228] Iteration 19400, loss = 0.0494103\nI0817 20:28:58.858189 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:28:58.858207 17350 solver.cpp:244]     Train net output #1: loss = 0.0494099 (* 1 = 0.0494099 loss)\nI0817 20:28:58.937961 17350 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0817 20:29:46.333986 17350 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0817 20:30:13.110070 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77804\nI0817 20:30:13.110147 17350 solver.cpp:404]     Test net output #1: loss = 1.07611 (* 1 = 1.07611 loss)\nI0817 20:30:13.533643 17350 solver.cpp:228] Iteration 19500, loss = 0.0370102\nI0817 20:30:13.533685 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:30:13.533710 17350 solver.cpp:244]     Train net output #1: loss = 0.0370098 (* 1 = 0.0370098 loss)\nI0817 20:30:13.612067 17350 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0817 20:31:01.007016 17350 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0817 20:31:27.909881 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75236\nI0817 20:31:27.909955 17350 solver.cpp:404]     Test net output #1: loss = 1.33339 (* 1 = 1.33339 loss)\nI0817 20:31:28.333683 17350 solver.cpp:228] Iteration 19600, loss = 0.102791\nI0817 20:31:28.333724 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:31:28.333739 17350 solver.cpp:244]     Train net output #1: loss = 0.10279 (* 1 = 0.10279 loss)\nI0817 20:31:28.413924 17350 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0817 20:32:15.746378 17350 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0817 20:32:42.650877 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74716\nI0817 20:32:42.650954 17350 solver.cpp:404]     Test net output #1: loss = 1.09915 (* 1 = 1.09915 loss)\nI0817 20:32:43.074225 17350 solver.cpp:228] Iteration 19700, loss = 0.072091\nI0817 20:32:43.074281 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:32:43.074298 17350 solver.cpp:244]     Train net output #1: loss = 0.0720906 (* 1 = 0.0720906 loss)\nI0817 20:32:43.154610 17350 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0817 20:33:30.577513 17350 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0817 20:33:57.483067 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7864\nI0817 20:33:57.483139 17350 solver.cpp:404]     Test net output #1: loss = 0.993229 (* 1 = 0.993229 loss)\nI0817 20:33:57.907908 17350 solver.cpp:228] Iteration 19800, loss = 0.0385256\nI0817 20:33:57.907969 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:33:57.907987 17350 solver.cpp:244]     Train net output #1: loss = 0.0385253 (* 1 = 0.0385253 loss)\nI0817 20:33:57.988844 17350 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0817 20:34:45.329104 17350 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0817 20:35:12.232501 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77308\nI0817 20:35:12.232568 17350 solver.cpp:404]     Test net output #1: loss = 1.01177 (* 1 = 1.01177 loss)\nI0817 20:35:12.656155 17350 solver.cpp:228] Iteration 19900, loss = 0.0505939\nI0817 20:35:12.656211 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:35:12.656229 17350 solver.cpp:244]     Train net output #1: loss = 0.0505935 (* 1 = 0.0505935 loss)\nI0817 20:35:12.737706 17350 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0817 20:36:00.158095 17350 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0817 20:36:27.066969 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75228\nI0817 20:36:27.067039 17350 solver.cpp:404]     Test net output #1: loss = 1.14007 (* 1 = 1.14007 loss)\nI0817 20:36:27.491792 17350 solver.cpp:228] Iteration 20000, loss = 0.0838493\nI0817 20:36:27.491847 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:36:27.491864 17350 solver.cpp:244]     Train net output #1: loss = 0.0838489 (* 1 = 0.0838489 loss)\nI0817 20:36:27.567672 17350 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0817 20:37:14.996439 17350 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0817 20:37:41.904680 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81356\nI0817 20:37:41.904748 17350 solver.cpp:404]     Test net output #1: loss = 0.867012 (* 1 = 0.867012 loss)\nI0817 20:37:42.329396 17350 solver.cpp:228] Iteration 20100, loss = 0.0803094\nI0817 20:37:42.329452 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:37:42.329469 17350 solver.cpp:244]     Train net output #1: loss = 0.080309 (* 1 = 0.080309 loss)\nI0817 20:37:42.407910 17350 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0817 20:38:29.814242 17350 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0817 20:38:56.716941 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70784\nI0817 20:38:56.717015 17350 solver.cpp:404]     Test net output #1: loss = 1.74488 (* 1 = 1.74488 loss)\nI0817 20:38:57.141598 17350 solver.cpp:228] Iteration 20200, loss = 0.11566\nI0817 20:38:57.141650 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:38:57.141667 17350 solver.cpp:244]     Train net output #1: loss = 0.11566 (* 1 = 0.11566 loss)\nI0817 20:38:57.223660 17350 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0817 20:39:44.584118 17350 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0817 20:40:11.487820 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75288\nI0817 20:40:11.487887 17350 solver.cpp:404]     Test net output #1: loss = 1.2824 (* 1 = 1.2824 loss)\nI0817 20:40:11.912479 17350 solver.cpp:228] Iteration 20300, loss = 0.0699975\nI0817 20:40:11.912533 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:40:11.912549 17350 solver.cpp:244]     Train net output #1: loss = 0.0699972 (* 1 = 0.0699972 loss)\nI0817 20:40:11.992213 17350 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0817 20:40:59.374578 17350 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0817 20:41:26.282423 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7248\nI0817 20:41:26.282492 17350 solver.cpp:404]     Test net output #1: loss = 1.31478 (* 1 = 1.31478 loss)\nI0817 20:41:26.707016 17350 solver.cpp:228] Iteration 20400, loss = 0.0630496\nI0817 20:41:26.707068 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:41:26.707087 17350 solver.cpp:244]     Train net output #1: loss = 0.0630493 (* 1 = 0.0630493 loss)\nI0817 20:41:26.783354 17350 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0817 20:42:14.189899 17350 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0817 20:42:41.097415 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70476\nI0817 20:42:41.097486 17350 solver.cpp:404]     Test net output #1: loss = 1.52153 (* 1 = 1.52153 loss)\nI0817 20:42:41.522040 17350 solver.cpp:228] Iteration 20500, loss = 0.0361795\nI0817 20:42:41.522095 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 20:42:41.522114 17350 solver.cpp:244]     Train net output #1: loss = 0.0361792 (* 1 = 0.0361792 loss)\nI0817 20:42:41.603720 17350 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0817 20:43:29.006253 17350 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0817 20:43:55.912197 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73428\nI0817 20:43:55.912267 17350 solver.cpp:404]     Test net output #1: loss = 1.28924 (* 1 = 1.28924 loss)\nI0817 20:43:56.336418 17350 solver.cpp:228] Iteration 20600, loss = 0.0761404\nI0817 20:43:56.336473 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:43:56.336490 17350 solver.cpp:244]     Train net output #1: loss = 0.07614 (* 1 = 0.07614 loss)\nI0817 20:43:56.412120 17350 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0817 20:44:43.864110 17350 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0817 20:45:10.767691 17350 solver.cpp:404]     Test net output #0: accuracy = 0.62348\nI0817 20:45:10.767762 17350 solver.cpp:404]     Test net output #1: loss = 2.43583 (* 1 = 2.43583 loss)\nI0817 20:45:11.192332 17350 solver.cpp:228] Iteration 20700, loss = 0.0978586\nI0817 20:45:11.192389 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:45:11.192406 17350 solver.cpp:244]     Train net output #1: loss = 0.0978583 (* 1 = 0.0978583 loss)\nI0817 20:45:11.269120 17350 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0817 20:45:58.663208 17350 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0817 20:46:25.564002 17350 solver.cpp:404]     Test net output #0: accuracy = 0.60564\nI0817 20:46:25.564069 17350 solver.cpp:404]     Test net output #1: loss = 2.63851 (* 1 = 2.63851 loss)\nI0817 20:46:25.988418 17350 solver.cpp:228] Iteration 20800, loss = 0.104038\nI0817 20:46:25.988474 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:46:25.988492 17350 solver.cpp:244]     Train net output #1: loss = 0.104038 (* 1 = 0.104038 loss)\nI0817 20:46:26.061821 17350 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0817 20:47:13.481842 17350 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0817 20:47:40.221454 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7008\nI0817 20:47:40.221515 17350 solver.cpp:404]     Test net output #1: loss = 1.47945 (* 1 = 1.47945 loss)\nI0817 20:47:40.646001 17350 solver.cpp:228] Iteration 20900, loss = 0.0399714\nI0817 20:47:40.646057 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 20:47:40.646075 17350 solver.cpp:244]     Train net output #1: loss = 0.039971 (* 1 = 0.039971 loss)\nI0817 20:47:40.723589 17350 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0817 20:48:28.067869 17350 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0817 20:48:54.841325 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76768\nI0817 20:48:54.841377 17350 solver.cpp:404]     Test net output #1: loss = 1.04405 (* 1 = 1.04405 loss)\nI0817 20:48:55.266041 17350 solver.cpp:228] Iteration 21000, loss = 0.15009\nI0817 20:48:55.266098 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 20:48:55.266116 17350 solver.cpp:244]     Train net output #1: loss = 0.15009 (* 1 = 0.15009 loss)\nI0817 20:48:55.339129 17350 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0817 20:49:42.669229 17350 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0817 20:50:09.478323 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80104\nI0817 20:50:09.478374 17350 solver.cpp:404]     Test net output #1: loss = 0.806798 (* 1 = 0.806798 loss)\nI0817 20:50:09.902701 17350 solver.cpp:228] Iteration 21100, loss = 0.0916739\nI0817 20:50:09.902760 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 20:50:09.902777 17350 solver.cpp:244]     Train net output #1: loss = 0.0916735 (* 1 = 0.0916735 loss)\nI0817 20:50:09.982887 17350 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0817 20:50:57.318397 17350 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0817 20:51:24.052942 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72\nI0817 20:51:24.053004 17350 solver.cpp:404]     Test net output #1: loss = 1.34875 (* 1 = 1.34875 loss)\nI0817 20:51:24.477283 17350 solver.cpp:228] Iteration 21200, loss = 0.0443064\nI0817 20:51:24.477341 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 20:51:24.477358 17350 solver.cpp:244]     Train net output #1: loss = 0.044306 (* 1 = 0.044306 loss)\nI0817 20:51:24.556458 17350 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0817 20:52:11.865520 17350 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0817 20:52:38.696357 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71324\nI0817 20:52:38.696408 17350 solver.cpp:404]     Test net output #1: loss = 1.42837 (* 1 = 1.42837 loss)\nI0817 20:52:39.120957 17350 solver.cpp:228] Iteration 21300, loss = 0.0685322\nI0817 20:52:39.121018 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:52:39.121035 17350 solver.cpp:244]     Train net output #1: loss = 0.0685318 (* 1 = 0.0685318 loss)\nI0817 20:52:39.196008 17350 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0817 20:53:26.527110 17350 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0817 20:53:53.220346 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75496\nI0817 20:53:53.220398 17350 solver.cpp:404]     Test net output #1: loss = 1.13549 (* 1 = 1.13549 loss)\nI0817 20:53:53.644908 17350 solver.cpp:228] Iteration 21400, loss = 0.0582713\nI0817 20:53:53.644971 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:53:53.644989 17350 solver.cpp:244]     Train net output #1: loss = 0.0582708 (* 1 = 0.0582708 loss)\nI0817 20:53:53.719107 17350 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0817 20:54:41.030819 17350 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0817 20:55:07.902114 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69572\nI0817 20:55:07.902184 17350 solver.cpp:404]     Test net output #1: loss = 1.57276 (* 1 = 1.57276 loss)\nI0817 20:55:08.326992 17350 solver.cpp:228] Iteration 21500, loss = 0.114137\nI0817 20:55:08.327049 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 20:55:08.327066 17350 solver.cpp:244]     Train net output #1: loss = 0.114136 (* 1 = 0.114136 loss)\nI0817 20:55:08.402616 17350 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0817 20:55:55.781399 17350 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0817 20:56:22.700007 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70916\nI0817 20:56:22.700078 17350 solver.cpp:404]     Test net output #1: loss = 1.66938 (* 1 = 1.66938 loss)\nI0817 20:56:23.124766 17350 solver.cpp:228] Iteration 21600, loss = 0.0672907\nI0817 20:56:23.124825 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 20:56:23.124842 17350 solver.cpp:244]     Train net output #1: loss = 0.0672903 (* 1 = 0.0672903 loss)\nI0817 20:56:23.197412 17350 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0817 20:57:10.529417 17350 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0817 20:57:37.443485 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77472\nI0817 20:57:37.443554 17350 solver.cpp:404]     Test net output #1: loss = 1.10641 (* 1 = 1.10641 loss)\nI0817 20:57:37.868376 17350 solver.cpp:228] Iteration 21700, loss = 0.0573653\nI0817 20:57:37.868434 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:57:37.868453 17350 solver.cpp:244]     Train net output #1: loss = 0.0573649 (* 1 = 0.0573649 loss)\nI0817 20:57:37.949349 17350 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0817 20:58:25.326663 17350 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0817 20:58:52.236999 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72544\nI0817 20:58:52.237062 17350 solver.cpp:404]     Test net output #1: loss = 1.49744 (* 1 = 1.49744 loss)\nI0817 20:58:52.661761 17350 solver.cpp:228] Iteration 21800, loss = 0.0637025\nI0817 20:58:52.661819 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 20:58:52.661836 17350 solver.cpp:244]     Train net output #1: loss = 0.0637021 (* 1 = 0.0637021 loss)\nI0817 20:58:52.740483 17350 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0817 20:59:40.048359 17350 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0817 21:00:06.962661 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7458\nI0817 21:00:06.962726 17350 solver.cpp:404]     Test net output #1: loss = 1.16707 (* 1 = 1.16707 loss)\nI0817 21:00:07.387279 17350 solver.cpp:228] Iteration 21900, loss = 0.0654494\nI0817 21:00:07.387334 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:00:07.387352 17350 solver.cpp:244]     Train net output #1: loss = 0.065449 (* 1 = 0.065449 loss)\nI0817 21:00:07.465832 17350 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0817 21:00:54.793473 17350 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0817 21:01:21.707693 17350 solver.cpp:404]     Test net output #0: accuracy = 0.61728\nI0817 21:01:21.707759 17350 solver.cpp:404]     Test net output #1: loss = 2.52465 (* 1 = 2.52465 loss)\nI0817 21:01:22.132222 17350 solver.cpp:228] Iteration 22000, loss = 0.0691385\nI0817 21:01:22.132278 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:01:22.132297 17350 solver.cpp:244]     Train net output #1: loss = 0.0691381 (* 1 = 0.0691381 loss)\nI0817 21:01:22.208148 17350 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0817 21:02:09.560736 17350 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0817 21:02:36.500625 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73276\nI0817 21:02:36.500696 17350 solver.cpp:404]     Test net output #1: loss = 1.31557 (* 1 = 1.31557 loss)\nI0817 21:02:36.924147 17350 solver.cpp:228] Iteration 22100, loss = 0.0765462\nI0817 21:02:36.924203 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:02:36.924221 17350 solver.cpp:244]     Train net output #1: loss = 0.0765458 (* 1 = 0.0765458 loss)\nI0817 21:02:37.008275 17350 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0817 21:03:24.376478 17350 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0817 21:03:51.289917 17350 solver.cpp:404]     Test net output #0: accuracy = 0.65512\nI0817 21:03:51.289994 17350 solver.cpp:404]     Test net output #1: loss = 1.98456 (* 1 = 1.98456 loss)\nI0817 21:03:51.713337 17350 solver.cpp:228] Iteration 22200, loss = 0.0217208\nI0817 21:03:51.713392 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:03:51.713408 17350 solver.cpp:244]     Train net output #1: loss = 0.0217204 (* 1 = 0.0217204 loss)\nI0817 21:03:51.794514 17350 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0817 21:04:39.116196 17350 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0817 21:05:06.033634 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75868\nI0817 21:05:06.033704 17350 solver.cpp:404]     Test net output #1: loss = 1.11863 (* 1 = 1.11863 loss)\nI0817 21:05:06.457198 17350 solver.cpp:228] Iteration 22300, loss = 0.087811\nI0817 21:05:06.457238 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:05:06.457255 17350 solver.cpp:244]     Train net output #1: loss = 0.0878106 (* 1 = 0.0878106 loss)\nI0817 21:05:06.535845 17350 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0817 21:05:53.841848 17350 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0817 21:06:20.758649 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72004\nI0817 21:06:20.758721 17350 solver.cpp:404]     Test net output #1: loss = 1.36608 (* 1 = 1.36608 loss)\nI0817 21:06:21.182162 17350 solver.cpp:228] Iteration 22400, loss = 0.0565195\nI0817 21:06:21.182206 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:06:21.182224 17350 solver.cpp:244]     Train net output #1: loss = 0.0565192 (* 1 = 0.0565192 loss)\nI0817 21:06:21.261049 17350 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0817 21:07:08.646244 17350 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0817 21:07:35.563801 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7656\nI0817 21:07:35.563871 17350 solver.cpp:404]     Test net output #1: loss = 1.12502 (* 1 = 1.12502 loss)\nI0817 21:07:35.987795 17350 solver.cpp:228] Iteration 22500, loss = 0.0571227\nI0817 21:07:35.987838 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:07:35.987854 17350 solver.cpp:244]     Train net output #1: loss = 0.0571223 (* 1 = 0.0571223 loss)\nI0817 21:07:36.063402 17350 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0817 21:08:23.545696 17350 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0817 21:08:50.465561 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81004\nI0817 21:08:50.465633 17350 solver.cpp:404]     Test net output #1: loss = 0.822001 (* 1 = 0.822001 loss)\nI0817 21:08:50.889263 17350 solver.cpp:228] Iteration 22600, loss = 0.0879347\nI0817 21:08:50.889303 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:08:50.889319 17350 solver.cpp:244]     Train net output #1: loss = 0.0879343 (* 1 = 0.0879343 loss)\nI0817 21:08:50.963102 17350 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0817 21:09:38.429976 17350 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0817 21:10:05.347733 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77332\nI0817 21:10:05.347800 17350 solver.cpp:404]     Test net output #1: loss = 1.08892 (* 1 = 1.08892 loss)\nI0817 21:10:05.770809 17350 solver.cpp:228] Iteration 22700, loss = 0.0347178\nI0817 21:10:05.770855 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:10:05.770872 17350 solver.cpp:244]     Train net output #1: loss = 0.0347174 (* 1 = 0.0347174 loss)\nI0817 21:10:05.846987 17350 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0817 21:10:53.215682 17350 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0817 21:11:20.133512 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80312\nI0817 21:11:20.133584 17350 solver.cpp:404]     Test net output #1: loss = 0.932933 (* 1 = 0.932933 loss)\nI0817 21:11:20.556926 17350 solver.cpp:228] Iteration 22800, loss = 0.0849747\nI0817 21:11:20.556975 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:11:20.556993 17350 solver.cpp:244]     Train net output #1: loss = 0.0849743 (* 1 = 0.0849743 loss)\nI0817 21:11:20.640594 17350 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0817 21:12:07.997589 17350 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0817 21:12:34.916314 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78792\nI0817 21:12:34.916388 17350 solver.cpp:404]     Test net output #1: loss = 0.979225 (* 1 = 0.979225 loss)\nI0817 21:12:35.340070 17350 solver.cpp:228] Iteration 22900, loss = 0.071969\nI0817 21:12:35.340121 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:12:35.340137 17350 solver.cpp:244]     Train net output #1: loss = 0.0719686 (* 1 = 0.0719686 loss)\nI0817 21:12:35.417258 17350 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0817 21:13:22.705773 17350 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0817 21:13:49.316572 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71844\nI0817 21:13:49.316612 17350 solver.cpp:404]     Test net output #1: loss = 1.45065 (* 1 = 1.45065 loss)\nI0817 21:13:49.739915 17350 solver.cpp:228] Iteration 23000, loss = 0.0867742\nI0817 21:13:49.739960 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:13:49.739977 17350 solver.cpp:244]     Train net output #1: loss = 0.0867738 (* 1 = 0.0867738 loss)\nI0817 21:13:49.816673 17350 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0817 21:14:37.115360 17350 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0817 21:15:03.733006 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7632\nI0817 21:15:03.733045 17350 solver.cpp:404]     Test net output #1: loss = 1.2049 (* 1 = 1.2049 loss)\nI0817 21:15:04.156285 17350 solver.cpp:228] Iteration 23100, loss = 0.0852506\nI0817 21:15:04.156328 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:15:04.156345 17350 solver.cpp:244]     Train net output #1: loss = 0.0852502 (* 1 = 0.0852502 loss)\nI0817 21:15:04.232898 17350 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0817 21:15:51.407708 17350 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0817 21:16:18.020768 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73876\nI0817 21:16:18.020808 17350 solver.cpp:404]     Test net output #1: loss = 1.23949 (* 1 = 1.23949 loss)\nI0817 21:16:18.444104 17350 solver.cpp:228] Iteration 23200, loss = 0.0458787\nI0817 21:16:18.444147 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:16:18.444165 17350 solver.cpp:244]     Train net output #1: loss = 0.0458783 (* 1 = 0.0458783 loss)\nI0817 21:16:18.523746 17350 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0817 21:17:05.629321 17350 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0817 21:17:32.241621 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72772\nI0817 21:17:32.241658 17350 solver.cpp:404]     Test net output #1: loss = 1.18879 (* 1 = 1.18879 loss)\nI0817 21:17:32.663858 17350 solver.cpp:228] Iteration 23300, loss = 0.0457593\nI0817 21:17:32.663902 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:17:32.663918 17350 solver.cpp:244]     Train net output #1: loss = 0.0457589 (* 1 = 0.0457589 loss)\nI0817 21:17:32.739565 17350 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0817 21:18:19.849414 17350 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0817 21:18:46.461792 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77204\nI0817 21:18:46.461833 17350 solver.cpp:404]     Test net output #1: loss = 0.994323 (* 1 = 0.994323 loss)\nI0817 21:18:46.884001 17350 solver.cpp:228] Iteration 23400, loss = 0.10327\nI0817 21:18:46.884037 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:18:46.884053 17350 solver.cpp:244]     Train net output #1: loss = 0.10327 (* 1 = 0.10327 loss)\nI0817 21:18:46.961203 17350 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0817 21:19:34.042912 17350 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0817 21:20:00.653689 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70256\nI0817 21:20:00.653728 17350 solver.cpp:404]     Test net output #1: loss = 1.65226 (* 1 = 1.65226 loss)\nI0817 21:20:01.077180 17350 solver.cpp:228] Iteration 23500, loss = 0.0468101\nI0817 21:20:01.077217 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:20:01.077234 17350 solver.cpp:244]     Train net output #1: loss = 0.0468097 (* 1 = 0.0468097 loss)\nI0817 21:20:01.153710 17350 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0817 21:20:48.258553 17350 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0817 21:21:14.877127 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77968\nI0817 21:21:14.877166 17350 solver.cpp:404]     Test net output #1: loss = 1.10143 (* 1 = 1.10143 loss)\nI0817 21:21:15.300985 17350 solver.cpp:228] Iteration 23600, loss = 0.0524184\nI0817 21:21:15.301020 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:21:15.301036 17350 solver.cpp:244]     Train net output #1: loss = 0.052418 (* 1 = 0.052418 loss)\nI0817 21:21:15.384380 17350 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0817 21:22:02.487541 17350 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0817 21:22:29.108263 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73844\nI0817 21:22:29.108309 17350 solver.cpp:404]     Test net output #1: loss = 1.25514 (* 1 = 1.25514 loss)\nI0817 21:22:29.531790 17350 solver.cpp:228] Iteration 23700, loss = 0.108967\nI0817 21:22:29.531827 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:22:29.531843 17350 solver.cpp:244]     Train net output #1: loss = 0.108967 (* 1 = 0.108967 loss)\nI0817 21:22:29.608840 17350 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0817 21:23:16.714068 17350 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0817 21:23:43.330339 17350 solver.cpp:404]     Test net output #0: accuracy = 0.681\nI0817 21:23:43.330379 17350 solver.cpp:404]     Test net output #1: loss = 1.95135 (* 1 = 1.95135 loss)\nI0817 21:23:43.753665 17350 solver.cpp:228] Iteration 23800, loss = 0.1331\nI0817 21:23:43.753702 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 21:23:43.753720 17350 solver.cpp:244]     Train net output #1: loss = 0.133099 (* 1 = 0.133099 loss)\nI0817 21:23:43.832309 17350 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0817 21:24:30.895512 17350 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0817 21:24:57.517846 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76576\nI0817 21:24:57.517890 17350 solver.cpp:404]     Test net output #1: loss = 1.13219 (* 1 = 1.13219 loss)\nI0817 21:24:57.941325 17350 solver.cpp:228] Iteration 23900, loss = 0.0580424\nI0817 21:24:57.941365 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:24:57.941390 17350 solver.cpp:244]     Train net output #1: loss = 0.058042 (* 1 = 0.058042 loss)\nI0817 21:24:58.024286 17350 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0817 21:25:45.173832 17350 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0817 21:26:11.798887 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77812\nI0817 21:26:11.798923 17350 solver.cpp:404]     Test net output #1: loss = 1.00137 (* 1 = 1.00137 loss)\nI0817 21:26:12.221508 17350 solver.cpp:228] Iteration 24000, loss = 0.0352798\nI0817 21:26:12.221545 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:26:12.221562 17350 solver.cpp:244]     Train net output #1: loss = 0.0352794 (* 1 = 0.0352794 loss)\nI0817 21:26:12.302378 17350 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0817 21:26:59.420132 17350 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0817 21:27:26.040683 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77444\nI0817 21:27:26.040721 17350 solver.cpp:404]     Test net output #1: loss = 1.01933 (* 1 = 1.01933 loss)\nI0817 21:27:26.463552 17350 solver.cpp:228] Iteration 24100, loss = 0.0222508\nI0817 21:27:26.463593 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:27:26.463610 17350 solver.cpp:244]     Train net output #1: loss = 0.0222505 (* 1 = 0.0222505 loss)\nI0817 21:27:26.543416 17350 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0817 21:28:13.673010 17350 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0817 21:28:40.293612 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81\nI0817 21:28:40.293653 17350 solver.cpp:404]     Test net output #1: loss = 0.813745 (* 1 = 0.813745 loss)\nI0817 21:28:40.716292 17350 solver.cpp:228] Iteration 24200, loss = 0.17797\nI0817 21:28:40.716331 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:28:40.716346 17350 solver.cpp:244]     Train net output #1: loss = 0.17797 (* 1 = 0.17797 loss)\nI0817 21:28:40.795063 17350 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0817 21:29:27.952703 17350 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0817 21:29:54.571622 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76708\nI0817 21:29:54.571662 17350 solver.cpp:404]     Test net output #1: loss = 1.08552 (* 1 = 1.08552 loss)\nI0817 21:29:54.994639 17350 solver.cpp:228] Iteration 24300, loss = 0.114742\nI0817 21:29:54.994684 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 21:29:54.994701 17350 solver.cpp:244]     Train net output #1: loss = 0.114742 (* 1 = 0.114742 loss)\nI0817 21:29:55.077989 17350 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0817 21:30:42.240190 17350 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0817 21:31:08.862483 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7904\nI0817 21:31:08.862522 17350 solver.cpp:404]     Test net output #1: loss = 0.853289 (* 1 = 0.853289 loss)\nI0817 21:31:09.284956 17350 solver.cpp:228] Iteration 24400, loss = 0.0478671\nI0817 21:31:09.285001 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:31:09.285018 17350 solver.cpp:244]     Train net output #1: loss = 0.0478668 (* 1 = 0.0478668 loss)\nI0817 21:31:09.366070 17350 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0817 21:31:56.563819 17350 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0817 21:32:23.183887 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75188\nI0817 21:32:23.183936 17350 solver.cpp:404]     Test net output #1: loss = 1.15531 (* 1 = 1.15531 loss)\nI0817 21:32:23.606317 17350 solver.cpp:228] Iteration 24500, loss = 0.029144\nI0817 21:32:23.606362 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 21:32:23.606379 17350 solver.cpp:244]     Train net output #1: loss = 0.0291436 (* 1 = 0.0291436 loss)\nI0817 21:32:23.690327 17350 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0817 21:33:10.812686 17350 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0817 21:33:37.438424 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7466\nI0817 21:33:37.438463 17350 solver.cpp:404]     Test net output #1: loss = 1.29947 (* 1 = 1.29947 loss)\nI0817 21:33:37.862042 17350 solver.cpp:228] Iteration 24600, loss = 0.0692268\nI0817 21:33:37.862087 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:33:37.862104 17350 solver.cpp:244]     Train net output #1: loss = 0.0692265 (* 1 = 0.0692265 loss)\nI0817 21:33:37.941951 17350 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0817 21:34:25.097971 17350 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0817 21:34:51.720286 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76324\nI0817 21:34:51.720325 17350 solver.cpp:404]     Test net output #1: loss = 1.19316 (* 1 = 1.19316 loss)\nI0817 21:34:52.143652 17350 solver.cpp:228] Iteration 24700, loss = 0.0803118\nI0817 21:34:52.143697 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:34:52.143713 17350 solver.cpp:244]     Train net output #1: loss = 0.0803115 (* 1 = 0.0803115 loss)\nI0817 21:34:52.221719 17350 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0817 21:35:39.407683 17350 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0817 21:36:06.028673 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76472\nI0817 21:36:06.028714 17350 solver.cpp:404]     Test net output #1: loss = 1.06547 (* 1 = 1.06547 loss)\nI0817 21:36:06.451666 17350 solver.cpp:228] Iteration 24800, loss = 0.0534816\nI0817 21:36:06.451710 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:36:06.451728 17350 solver.cpp:244]     Train net output #1: loss = 0.0534813 (* 1 = 0.0534813 loss)\nI0817 21:36:06.536139 17350 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0817 21:36:53.681850 17350 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0817 21:37:20.297466 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71776\nI0817 21:37:20.297504 17350 solver.cpp:404]     Test net output #1: loss = 1.5185 (* 1 = 1.5185 loss)\nI0817 21:37:20.721319 17350 solver.cpp:228] Iteration 24900, loss = 0.10048\nI0817 21:37:20.721364 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:37:20.721380 17350 solver.cpp:244]     Train net output #1: loss = 0.10048 (* 1 = 0.10048 loss)\nI0817 21:37:20.801592 17350 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0817 21:38:07.904811 17350 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0817 21:38:34.523679 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77552\nI0817 21:38:34.523718 17350 solver.cpp:404]     Test net output #1: loss = 0.947096 (* 1 = 0.947096 loss)\nI0817 21:38:34.946225 17350 solver.cpp:228] Iteration 25000, loss = 0.0970898\nI0817 21:38:34.946267 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:38:34.946283 17350 solver.cpp:244]     Train net output #1: loss = 0.0970895 (* 1 = 0.0970895 loss)\nI0817 21:38:35.028879 17350 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0817 21:39:22.216022 17350 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0817 21:39:48.832849 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76112\nI0817 21:39:48.832890 17350 solver.cpp:404]     Test net output #1: loss = 1.0911 (* 1 = 1.0911 loss)\nI0817 21:39:49.255218 17350 solver.cpp:228] Iteration 25100, loss = 0.151464\nI0817 21:39:49.255261 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:39:49.255278 17350 solver.cpp:244]     Train net output #1: loss = 0.151463 (* 1 = 0.151463 loss)\nI0817 21:39:49.331528 17350 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0817 21:40:36.429285 17350 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0817 21:41:03.046890 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74644\nI0817 21:41:03.046936 17350 solver.cpp:404]     Test net output #1: loss = 1.23098 (* 1 = 1.23098 loss)\nI0817 21:41:03.469564 17350 solver.cpp:228] Iteration 25200, loss = 0.0582227\nI0817 21:41:03.469607 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:41:03.469624 17350 solver.cpp:244]     Train net output #1: loss = 0.0582224 (* 1 = 0.0582224 loss)\nI0817 21:41:03.544502 17350 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0817 21:41:50.700891 17350 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0817 21:42:17.320549 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7814\nI0817 21:42:17.320600 17350 solver.cpp:404]     Test net output #1: loss = 1.02666 (* 1 = 1.02666 loss)\nI0817 21:42:17.744262 17350 solver.cpp:228] Iteration 25300, loss = 0.0641855\nI0817 21:42:17.744305 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:42:17.744323 17350 solver.cpp:244]     Train net output #1: loss = 0.0641851 (* 1 = 0.0641851 loss)\nI0817 21:42:17.825016 17350 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0817 21:43:04.926417 17350 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0817 21:43:31.542771 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78504\nI0817 21:43:31.542811 17350 solver.cpp:404]     Test net output #1: loss = 0.977229 (* 1 = 0.977229 loss)\nI0817 21:43:31.966195 17350 solver.cpp:228] Iteration 25400, loss = 0.0690749\nI0817 21:43:31.966239 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:43:31.966258 17350 solver.cpp:244]     Train net output #1: loss = 0.0690745 (* 1 = 0.0690745 loss)\nI0817 21:43:32.047361 17350 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0817 21:44:19.143627 17350 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0817 21:44:45.759670 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77316\nI0817 21:44:45.759709 17350 solver.cpp:404]     Test net output #1: loss = 0.968054 (* 1 = 0.968054 loss)\nI0817 21:44:46.181982 17350 solver.cpp:228] Iteration 25500, loss = 0.0908928\nI0817 21:44:46.182025 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:44:46.182042 17350 solver.cpp:244]     Train net output #1: loss = 0.0908924 (* 1 = 0.0908924 loss)\nI0817 21:44:46.264850 17350 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0817 21:45:33.420487 17350 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0817 21:46:00.036516 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79312\nI0817 21:46:00.036557 17350 solver.cpp:404]     Test net output #1: loss = 0.952387 (* 1 = 0.952387 loss)\nI0817 21:46:00.459014 17350 solver.cpp:228] Iteration 25600, loss = 0.0736435\nI0817 21:46:00.459048 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:46:00.459064 17350 solver.cpp:244]     Train net output #1: loss = 0.0736431 (* 1 = 0.0736431 loss)\nI0817 21:46:00.539624 17350 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0817 21:46:47.685978 17350 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0817 21:47:14.302531 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76268\nI0817 21:47:14.302572 17350 solver.cpp:404]     Test net output #1: loss = 1.27855 (* 1 = 1.27855 loss)\nI0817 21:47:14.724769 17350 solver.cpp:228] Iteration 25700, loss = 0.0597073\nI0817 21:47:14.724799 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:47:14.724815 17350 solver.cpp:244]     Train net output #1: loss = 0.059707 (* 1 = 0.059707 loss)\nI0817 21:47:14.803684 17350 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0817 21:48:01.999418 17350 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0817 21:48:28.617154 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8064\nI0817 21:48:28.617207 17350 solver.cpp:404]     Test net output #1: loss = 0.847153 (* 1 = 0.847153 loss)\nI0817 21:48:29.039816 17350 solver.cpp:228] Iteration 25800, loss = 0.145294\nI0817 21:48:29.039851 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 21:48:29.039867 17350 solver.cpp:244]     Train net output #1: loss = 0.145294 (* 1 = 0.145294 loss)\nI0817 21:48:29.119341 17350 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0817 21:49:16.317615 17350 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0817 21:49:42.928688 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76372\nI0817 21:49:42.928740 17350 solver.cpp:404]     Test net output #1: loss = 1.08934 (* 1 = 1.08934 loss)\nI0817 21:49:43.351076 17350 solver.cpp:228] Iteration 25900, loss = 0.0968711\nI0817 21:49:43.351107 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:49:43.351125 17350 solver.cpp:244]     Train net output #1: loss = 0.0968707 (* 1 = 0.0968707 loss)\nI0817 21:49:43.427865 17350 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0817 21:50:30.569499 17350 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0817 21:50:57.183719 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68636\nI0817 21:50:57.183769 17350 solver.cpp:404]     Test net output #1: loss = 1.77606 (* 1 = 1.77606 loss)\nI0817 21:50:57.606194 17350 solver.cpp:228] Iteration 26000, loss = 0.0893558\nI0817 21:50:57.606221 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:50:57.606237 17350 solver.cpp:244]     Train net output #1: loss = 0.0893554 (* 1 = 0.0893554 loss)\nI0817 21:50:57.680593 17350 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0817 21:51:44.882261 17350 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0817 21:52:11.497107 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79264\nI0817 21:52:11.497159 17350 solver.cpp:404]     Test net output #1: loss = 0.946856 (* 1 = 0.946856 loss)\nI0817 21:52:11.919514 17350 solver.cpp:228] Iteration 26100, loss = 0.0542242\nI0817 21:52:11.919548 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 21:52:11.919564 17350 solver.cpp:244]     Train net output #1: loss = 0.0542239 (* 1 = 0.0542239 loss)\nI0817 21:52:11.999395 17350 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0817 21:52:59.227301 17350 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0817 21:53:25.842553 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75468\nI0817 21:53:25.842603 17350 solver.cpp:404]     Test net output #1: loss = 1.00543 (* 1 = 1.00543 loss)\nI0817 21:53:26.264984 17350 solver.cpp:228] Iteration 26200, loss = 0.0825462\nI0817 21:53:26.265015 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 21:53:26.265033 17350 solver.cpp:244]     Train net output #1: loss = 0.0825459 (* 1 = 0.0825459 loss)\nI0817 21:53:26.350265 17350 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0817 21:54:13.586921 17350 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0817 21:54:40.198179 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78996\nI0817 21:54:40.198230 17350 solver.cpp:404]     Test net output #1: loss = 0.804923 (* 1 = 0.804923 loss)\nI0817 21:54:40.620522 17350 solver.cpp:228] Iteration 26300, loss = 0.10438\nI0817 21:54:40.620554 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 21:54:40.620570 17350 solver.cpp:244]     Train net output #1: loss = 0.104379 (* 1 = 0.104379 loss)\nI0817 21:54:40.698271 17350 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0817 21:55:27.944349 17350 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0817 21:55:54.551676 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73812\nI0817 21:55:54.551725 17350 solver.cpp:404]     Test net output #1: loss = 1.36135 (* 1 = 1.36135 loss)\nI0817 21:55:54.973763 17350 solver.cpp:228] Iteration 26400, loss = 0.0252827\nI0817 21:55:54.973794 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 21:55:54.973810 17350 solver.cpp:244]     Train net output #1: loss = 0.0252823 (* 1 = 0.0252823 loss)\nI0817 21:55:55.053010 17350 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0817 21:56:42.306295 17350 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0817 21:57:08.918123 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81808\nI0817 21:57:08.918174 17350 solver.cpp:404]     Test net output #1: loss = 0.801912 (* 1 = 0.801912 loss)\nI0817 21:57:09.340353 17350 solver.cpp:228] Iteration 26500, loss = 0.0787224\nI0817 21:57:09.340384 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:57:09.340400 17350 solver.cpp:244]     Train net output #1: loss = 0.078722 (* 1 = 0.078722 loss)\nI0817 21:57:09.423233 17350 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0817 21:57:56.672181 17350 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0817 21:58:23.284247 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78396\nI0817 21:58:23.284298 17350 solver.cpp:404]     Test net output #1: loss = 0.957616 (* 1 = 0.957616 loss)\nI0817 21:58:23.706362 17350 solver.cpp:228] Iteration 26600, loss = 0.0823793\nI0817 21:58:23.706393 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 21:58:23.706408 17350 solver.cpp:244]     Train net output #1: loss = 0.0823789 (* 1 = 0.0823789 loss)\nI0817 21:58:23.788753 17350 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0817 21:59:11.017781 17350 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0817 21:59:37.627820 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7744\nI0817 21:59:37.627873 17350 solver.cpp:404]     Test net output #1: loss = 1.02195 (* 1 = 1.02195 loss)\nI0817 21:59:38.050245 17350 solver.cpp:228] Iteration 26700, loss = 0.0957948\nI0817 21:59:38.050276 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 21:59:38.050292 17350 solver.cpp:244]     Train net output #1: loss = 0.0957944 (* 1 = 0.0957944 loss)\nI0817 21:59:38.128612 17350 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0817 22:00:25.406129 17350 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0817 22:00:52.016893 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77016\nI0817 22:00:52.016944 17350 solver.cpp:404]     Test net output #1: loss = 0.950727 (* 1 = 0.950727 loss)\nI0817 22:00:52.439344 17350 solver.cpp:228] Iteration 26800, loss = 0.100839\nI0817 22:00:52.439379 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:00:52.439395 17350 solver.cpp:244]     Train net output #1: loss = 0.100838 (* 1 = 0.100838 loss)\nI0817 22:00:52.517024 17350 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0817 22:01:39.776417 17350 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0817 22:02:06.387816 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75676\nI0817 22:02:06.387866 17350 solver.cpp:404]     Test net output #1: loss = 1.15407 (* 1 = 1.15407 loss)\nI0817 22:02:06.810412 17350 solver.cpp:228] Iteration 26900, loss = 0.0645953\nI0817 22:02:06.810447 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:02:06.810463 17350 solver.cpp:244]     Train net output #1: loss = 0.0645949 (* 1 = 0.0645949 loss)\nI0817 22:02:06.889092 17350 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0817 22:02:54.102645 17350 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0817 22:03:20.725503 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75592\nI0817 22:03:20.725558 17350 solver.cpp:404]     Test net output #1: loss = 1.09825 (* 1 = 1.09825 loss)\nI0817 22:03:21.147871 17350 solver.cpp:228] Iteration 27000, loss = 0.098155\nI0817 22:03:21.147917 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:03:21.147941 17350 solver.cpp:244]     Train net output #1: loss = 0.0981546 (* 1 = 0.0981546 loss)\nI0817 22:03:21.231029 17350 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0817 22:04:08.433789 17350 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0817 22:04:35.049753 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7298\nI0817 22:04:35.049815 17350 solver.cpp:404]     Test net output #1: loss = 1.31727 (* 1 = 1.31727 loss)\nI0817 22:04:35.472982 17350 solver.cpp:228] Iteration 27100, loss = 0.0515994\nI0817 22:04:35.473028 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:04:35.473053 17350 solver.cpp:244]     Train net output #1: loss = 0.051599 (* 1 = 0.051599 loss)\nI0817 22:04:35.547245 17350 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0817 22:05:22.800581 17350 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0817 22:05:49.413554 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7728\nI0817 22:05:49.413609 17350 solver.cpp:404]     Test net output #1: loss = 1.16973 (* 1 = 1.16973 loss)\nI0817 22:05:49.837131 17350 solver.cpp:228] Iteration 27200, loss = 0.04904\nI0817 22:05:49.837175 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:05:49.837200 17350 solver.cpp:244]     Train net output #1: loss = 0.0490397 (* 1 = 0.0490397 loss)\nI0817 22:05:49.914314 17350 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0817 22:06:37.117350 17350 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0817 22:07:03.730703 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72504\nI0817 22:07:03.730753 17350 solver.cpp:404]     Test net output #1: loss = 1.47019 (* 1 = 1.47019 loss)\nI0817 22:07:04.152881 17350 solver.cpp:228] Iteration 27300, loss = 0.0337772\nI0817 22:07:04.152917 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:07:04.152933 17350 solver.cpp:244]     Train net output #1: loss = 0.0337768 (* 1 = 0.0337768 loss)\nI0817 22:07:04.236156 17350 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0817 22:07:51.527976 17350 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0817 22:08:18.138540 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79536\nI0817 22:08:18.138597 17350 solver.cpp:404]     Test net output #1: loss = 0.904417 (* 1 = 0.904417 loss)\nI0817 22:08:18.561792 17350 solver.cpp:228] Iteration 27400, loss = 0.0682066\nI0817 22:08:18.561830 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:08:18.561853 17350 solver.cpp:244]     Train net output #1: loss = 0.0682062 (* 1 = 0.0682062 loss)\nI0817 22:08:18.643229 17350 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0817 22:09:05.847734 17350 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0817 22:09:32.451234 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71128\nI0817 22:09:32.451284 17350 solver.cpp:404]     Test net output #1: loss = 1.56227 (* 1 = 1.56227 loss)\nI0817 22:09:32.874696 17350 solver.cpp:228] Iteration 27500, loss = 0.0425389\nI0817 22:09:32.874732 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:09:32.874749 17350 solver.cpp:244]     Train net output #1: loss = 0.0425385 (* 1 = 0.0425385 loss)\nI0817 22:09:32.952515 17350 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0817 22:10:20.148341 17350 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0817 22:10:46.754636 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75776\nI0817 22:10:46.754685 17350 solver.cpp:404]     Test net output #1: loss = 1.39513 (* 1 = 1.39513 loss)\nI0817 22:10:47.177880 17350 solver.cpp:228] Iteration 27600, loss = 0.0513577\nI0817 22:10:47.177924 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:10:47.177942 17350 solver.cpp:244]     Train net output #1: loss = 0.0513573 (* 1 = 0.0513573 loss)\nI0817 22:10:47.258522 17350 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0817 22:11:34.445861 17350 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0817 22:12:01.054405 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77192\nI0817 22:12:01.054455 17350 solver.cpp:404]     Test net output #1: loss = 0.984083 (* 1 = 0.984083 loss)\nI0817 22:12:01.477845 17350 solver.cpp:228] Iteration 27700, loss = 0.0976543\nI0817 22:12:01.477890 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:12:01.477906 17350 solver.cpp:244]     Train net output #1: loss = 0.097654 (* 1 = 0.097654 loss)\nI0817 22:12:01.555482 17350 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0817 22:12:48.778627 17350 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0817 22:13:15.394176 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79072\nI0817 22:13:15.394227 17350 solver.cpp:404]     Test net output #1: loss = 0.903333 (* 1 = 0.903333 loss)\nI0817 22:13:15.816642 17350 solver.cpp:228] Iteration 27800, loss = 0.0717811\nI0817 22:13:15.816679 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:13:15.816694 17350 solver.cpp:244]     Train net output #1: loss = 0.0717807 (* 1 = 0.0717807 loss)\nI0817 22:13:15.896852 17350 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0817 22:14:03.075450 17350 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0817 22:14:29.695660 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78524\nI0817 22:14:29.695711 17350 solver.cpp:404]     Test net output #1: loss = 0.882669 (* 1 = 0.882669 loss)\nI0817 22:14:30.118495 17350 solver.cpp:228] Iteration 27900, loss = 0.0544108\nI0817 22:14:30.118531 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:14:30.118547 17350 solver.cpp:244]     Train net output #1: loss = 0.0544104 (* 1 = 0.0544104 loss)\nI0817 22:14:30.194536 17350 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0817 22:15:17.364600 17350 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0817 22:15:43.983842 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77288\nI0817 22:15:43.983893 17350 solver.cpp:404]     Test net output #1: loss = 1.011 (* 1 = 1.011 loss)\nI0817 22:15:44.407203 17350 solver.cpp:228] Iteration 28000, loss = 0.0714379\nI0817 22:15:44.407238 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:15:44.407253 17350 solver.cpp:244]     Train net output #1: loss = 0.0714375 (* 1 = 0.0714375 loss)\nI0817 22:15:44.491461 17350 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0817 22:16:31.731747 17350 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0817 22:16:58.342664 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7724\nI0817 22:16:58.342715 17350 solver.cpp:404]     Test net output #1: loss = 0.955644 (* 1 = 0.955644 loss)\nI0817 22:16:58.766278 17350 solver.cpp:228] Iteration 28100, loss = 0.0839317\nI0817 22:16:58.766312 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:16:58.766329 17350 solver.cpp:244]     Train net output #1: loss = 0.0839313 (* 1 = 0.0839313 loss)\nI0817 22:16:58.844780 17350 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0817 22:17:46.091874 17350 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0817 22:18:12.702364 17350 solver.cpp:404]     Test net output #0: accuracy = 0.747\nI0817 22:18:12.702414 17350 solver.cpp:404]     Test net output #1: loss = 1.14493 (* 1 = 1.14493 loss)\nI0817 22:18:13.125176 17350 solver.cpp:228] Iteration 28200, loss = 0.128836\nI0817 22:18:13.125211 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:18:13.125226 17350 solver.cpp:244]     Train net output #1: loss = 0.128835 (* 1 = 0.128835 loss)\nI0817 22:18:13.200683 17350 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0817 22:19:00.386770 17350 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0817 22:19:26.997150 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76868\nI0817 22:19:26.997200 17350 solver.cpp:404]     Test net output #1: loss = 1.088 (* 1 = 1.088 loss)\nI0817 22:19:27.420557 17350 solver.cpp:228] Iteration 28300, loss = 0.0838927\nI0817 22:19:27.420591 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:19:27.420608 17350 solver.cpp:244]     Train net output #1: loss = 0.0838923 (* 1 = 0.0838923 loss)\nI0817 22:19:27.509953 17350 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0817 22:20:14.708276 17350 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0817 22:20:41.320631 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78084\nI0817 22:20:41.320683 17350 solver.cpp:404]     Test net output #1: loss = 0.910246 (* 1 = 0.910246 loss)\nI0817 22:20:41.743865 17350 solver.cpp:228] Iteration 28400, loss = 0.117202\nI0817 22:20:41.743898 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:20:41.743916 17350 solver.cpp:244]     Train net output #1: loss = 0.117202 (* 1 = 0.117202 loss)\nI0817 22:20:41.819599 17350 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0817 22:21:29.065057 17350 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0817 22:21:55.678402 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78008\nI0817 22:21:55.678452 17350 solver.cpp:404]     Test net output #1: loss = 1.04435 (* 1 = 1.04435 loss)\nI0817 22:21:56.100764 17350 solver.cpp:228] Iteration 28500, loss = 0.0574791\nI0817 22:21:56.100798 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:21:56.100821 17350 solver.cpp:244]     Train net output #1: loss = 0.0574787 (* 1 = 0.0574787 loss)\nI0817 22:21:56.178247 17350 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0817 22:22:43.409790 17350 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0817 22:23:10.022068 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76948\nI0817 22:23:10.022117 17350 solver.cpp:404]     Test net output #1: loss = 1.26503 (* 1 = 1.26503 loss)\nI0817 22:23:10.444653 17350 solver.cpp:228] Iteration 28600, loss = 0.0999975\nI0817 22:23:10.444687 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:23:10.444703 17350 solver.cpp:244]     Train net output #1: loss = 0.0999971 (* 1 = 0.0999971 loss)\nI0817 22:23:10.524740 17350 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0817 22:23:57.774498 17350 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0817 22:24:24.386291 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79944\nI0817 22:24:24.386343 17350 solver.cpp:404]     Test net output #1: loss = 0.886052 (* 1 = 0.886052 loss)\nI0817 22:24:24.809322 17350 solver.cpp:228] Iteration 28700, loss = 0.0285442\nI0817 22:24:24.809357 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 22:24:24.809375 17350 solver.cpp:244]     Train net output #1: loss = 0.0285439 (* 1 = 0.0285439 loss)\nI0817 22:24:24.891477 17350 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0817 22:25:12.155323 17350 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0817 22:25:38.766721 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77196\nI0817 22:25:38.766772 17350 solver.cpp:404]     Test net output #1: loss = 1.11391 (* 1 = 1.11391 loss)\nI0817 22:25:39.188995 17350 solver.cpp:228] Iteration 28800, loss = 0.0756275\nI0817 22:25:39.189028 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:25:39.189044 17350 solver.cpp:244]     Train net output #1: loss = 0.0756271 (* 1 = 0.0756271 loss)\nI0817 22:25:39.272516 17350 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0817 22:26:26.525854 17350 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0817 22:26:53.135846 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7652\nI0817 22:26:53.135896 17350 solver.cpp:404]     Test net output #1: loss = 1.1471 (* 1 = 1.1471 loss)\nI0817 22:26:53.558225 17350 solver.cpp:228] Iteration 28900, loss = 0.120193\nI0817 22:26:53.558256 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 22:26:53.558272 17350 solver.cpp:244]     Train net output #1: loss = 0.120192 (* 1 = 0.120192 loss)\nI0817 22:26:53.634541 17350 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0817 22:27:40.896989 17350 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0817 22:28:07.508435 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79624\nI0817 22:28:07.508486 17350 solver.cpp:404]     Test net output #1: loss = 0.869236 (* 1 = 0.869236 loss)\nI0817 22:28:07.930752 17350 solver.cpp:228] Iteration 29000, loss = 0.058926\nI0817 22:28:07.930780 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:28:07.930796 17350 solver.cpp:244]     Train net output #1: loss = 0.0589256 (* 1 = 0.0589256 loss)\nI0817 22:28:08.008329 17350 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0817 22:28:55.297610 17350 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0817 22:29:22.010393 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74592\nI0817 22:29:22.010438 17350 solver.cpp:404]     Test net output #1: loss = 1.15187 (* 1 = 1.15187 loss)\nI0817 22:29:22.434762 17350 solver.cpp:228] Iteration 29100, loss = 0.093724\nI0817 22:29:22.434824 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:29:22.434841 17350 solver.cpp:244]     Train net output #1: loss = 0.0937236 (* 1 = 0.0937236 loss)\nI0817 22:29:22.515274 17350 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0817 22:30:09.800921 17350 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0817 22:30:36.506695 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78388\nI0817 22:30:36.506739 17350 solver.cpp:404]     Test net output #1: loss = 0.986803 (* 1 = 0.986803 loss)\nI0817 22:30:36.931367 17350 solver.cpp:228] Iteration 29200, loss = 0.125432\nI0817 22:30:36.931426 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:30:36.931443 17350 solver.cpp:244]     Train net output #1: loss = 0.125431 (* 1 = 0.125431 loss)\nI0817 22:30:37.014389 17350 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0817 22:31:24.323608 17350 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0817 22:31:51.144037 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76376\nI0817 22:31:51.144088 17350 solver.cpp:404]     Test net output #1: loss = 1.04691 (* 1 = 1.04691 loss)\nI0817 22:31:51.567520 17350 solver.cpp:228] Iteration 29300, loss = 0.0569486\nI0817 22:31:51.567586 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:31:51.567611 17350 solver.cpp:244]     Train net output #1: loss = 0.0569483 (* 1 = 0.0569483 loss)\nI0817 22:31:51.648897 17350 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0817 22:32:38.950798 17350 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0817 22:33:05.770432 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68028\nI0817 22:33:05.770488 17350 solver.cpp:404]     Test net output #1: loss = 1.74912 (* 1 = 1.74912 loss)\nI0817 22:33:06.195629 17350 solver.cpp:228] Iteration 29400, loss = 0.0561508\nI0817 22:33:06.195693 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:33:06.195716 17350 solver.cpp:244]     Train net output #1: loss = 0.0561505 (* 1 = 0.0561505 loss)\nI0817 22:33:06.270623 17350 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0817 22:33:53.507624 17350 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0817 22:34:20.313702 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73292\nI0817 22:34:20.313758 17350 solver.cpp:404]     Test net output #1: loss = 1.22627 (* 1 = 1.22627 loss)\nI0817 22:34:20.737546 17350 solver.cpp:228] Iteration 29500, loss = 0.0978964\nI0817 22:34:20.737597 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:34:20.737623 17350 solver.cpp:244]     Train net output #1: loss = 0.0978961 (* 1 = 0.0978961 loss)\nI0817 22:34:20.814235 17350 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0817 22:35:08.053551 17350 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0817 22:35:34.866020 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76624\nI0817 22:35:34.866071 17350 solver.cpp:404]     Test net output #1: loss = 1.14568 (* 1 = 1.14568 loss)\nI0817 22:35:35.290710 17350 solver.cpp:228] Iteration 29600, loss = 0.0468243\nI0817 22:35:35.290763 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:35:35.290788 17350 solver.cpp:244]     Train net output #1: loss = 0.046824 (* 1 = 0.046824 loss)\nI0817 22:35:35.363740 17350 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0817 22:36:22.622961 17350 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0817 22:36:49.390372 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75304\nI0817 22:36:49.390425 17350 solver.cpp:404]     Test net output #1: loss = 1.18109 (* 1 = 1.18109 loss)\nI0817 22:36:49.815390 17350 solver.cpp:228] Iteration 29700, loss = 0.0369638\nI0817 22:36:49.815443 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:36:49.815467 17350 solver.cpp:244]     Train net output #1: loss = 0.0369635 (* 1 = 0.0369635 loss)\nI0817 22:36:49.889425 17350 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0817 22:37:37.149286 17350 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0817 22:38:03.927969 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80068\nI0817 22:38:03.928020 17350 solver.cpp:404]     Test net output #1: loss = 0.807506 (* 1 = 0.807506 loss)\nI0817 22:38:04.352429 17350 solver.cpp:228] Iteration 29800, loss = 0.0724273\nI0817 22:38:04.352480 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:38:04.352505 17350 solver.cpp:244]     Train net output #1: loss = 0.072427 (* 1 = 0.072427 loss)\nI0817 22:38:04.433954 17350 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0817 22:38:51.702445 17350 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0817 22:39:18.407938 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77268\nI0817 22:39:18.407989 17350 solver.cpp:404]     Test net output #1: loss = 0.924894 (* 1 = 0.924894 loss)\nI0817 22:39:18.832764 17350 solver.cpp:228] Iteration 29900, loss = 0.116374\nI0817 22:39:18.832811 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 22:39:18.832834 17350 solver.cpp:244]     Train net output #1: loss = 0.116373 (* 1 = 0.116373 loss)\nI0817 22:39:18.909719 17350 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0817 22:40:06.209899 17350 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0817 22:40:33.025192 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI0817 22:40:33.025248 17350 solver.cpp:404]     Test net output #1: loss = 0.893381 (* 1 = 0.893381 loss)\nI0817 22:40:33.449653 17350 solver.cpp:228] Iteration 30000, loss = 0.0642436\nI0817 22:40:33.449699 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:40:33.449723 17350 solver.cpp:244]     Train net output #1: loss = 0.0642433 (* 1 = 0.0642433 loss)\nI0817 22:40:33.531563 17350 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0817 22:41:20.764849 17350 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0817 22:41:47.594316 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75056\nI0817 22:41:47.594370 17350 solver.cpp:404]     Test net output #1: loss = 1.2645 (* 1 = 1.2645 loss)\nI0817 22:41:48.018604 17350 solver.cpp:228] Iteration 30100, loss = 0.0547437\nI0817 22:41:48.018649 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:41:48.018674 17350 solver.cpp:244]     Train net output #1: loss = 0.0547435 (* 1 = 0.0547435 loss)\nI0817 22:41:48.096225 17350 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0817 22:42:35.370308 17350 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0817 22:43:02.225971 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76064\nI0817 22:43:02.226023 17350 solver.cpp:404]     Test net output #1: loss = 1.10972 (* 1 = 1.10972 loss)\nI0817 22:43:02.648893 17350 solver.cpp:228] Iteration 30200, loss = 0.0341005\nI0817 22:43:02.648938 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:43:02.648963 17350 solver.cpp:244]     Train net output #1: loss = 0.0341003 (* 1 = 0.0341003 loss)\nI0817 22:43:02.728559 17350 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0817 22:43:50.017565 17350 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0817 22:44:16.837244 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7826\nI0817 22:44:16.837301 17350 solver.cpp:404]     Test net output #1: loss = 0.951235 (* 1 = 0.951235 loss)\nI0817 22:44:17.261430 17350 solver.cpp:228] Iteration 30300, loss = 0.0995037\nI0817 22:44:17.261477 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:44:17.261502 17350 solver.cpp:244]     Train net output #1: loss = 0.0995035 (* 1 = 0.0995035 loss)\nI0817 22:44:17.338871 17350 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0817 22:45:04.641641 17350 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0817 22:45:31.530500 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81744\nI0817 22:45:31.530556 17350 solver.cpp:404]     Test net output #1: loss = 0.883257 (* 1 = 0.883257 loss)\nI0817 22:45:31.954730 17350 solver.cpp:228] Iteration 30400, loss = 0.0518868\nI0817 22:45:31.954782 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:45:31.954805 17350 solver.cpp:244]     Train net output #1: loss = 0.0518865 (* 1 = 0.0518865 loss)\nI0817 22:45:32.028579 17350 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0817 22:46:19.338117 17350 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0817 22:46:46.121254 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75812\nI0817 22:46:46.121309 17350 solver.cpp:404]     Test net output #1: loss = 1.20409 (* 1 = 1.20409 loss)\nI0817 22:46:46.544389 17350 solver.cpp:228] Iteration 30500, loss = 0.0484448\nI0817 22:46:46.544440 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:46:46.544464 17350 solver.cpp:244]     Train net output #1: loss = 0.0484445 (* 1 = 0.0484445 loss)\nI0817 22:46:46.626976 17350 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0817 22:47:33.966302 17350 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0817 22:48:00.734676 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79908\nI0817 22:48:00.734730 17350 solver.cpp:404]     Test net output #1: loss = 0.844965 (* 1 = 0.844965 loss)\nI0817 22:48:01.157655 17350 solver.cpp:228] Iteration 30600, loss = 0.0381691\nI0817 22:48:01.157717 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:48:01.157742 17350 solver.cpp:244]     Train net output #1: loss = 0.0381688 (* 1 = 0.0381688 loss)\nI0817 22:48:01.243031 17350 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0817 22:48:48.572597 17350 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0817 22:49:15.348244 17350 solver.cpp:404]     Test net output #0: accuracy = 0.755\nI0817 22:49:15.348300 17350 solver.cpp:404]     Test net output #1: loss = 1.25905 (* 1 = 1.25905 loss)\nI0817 22:49:15.772023 17350 solver.cpp:228] Iteration 30700, loss = 0.10291\nI0817 22:49:15.772088 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:49:15.772114 17350 solver.cpp:244]     Train net output #1: loss = 0.10291 (* 1 = 0.10291 loss)\nI0817 22:49:15.848788 17350 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0817 22:50:03.170415 17350 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0817 22:50:29.969933 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75824\nI0817 22:50:29.969987 17350 solver.cpp:404]     Test net output #1: loss = 1.10126 (* 1 = 1.10126 loss)\nI0817 22:50:30.392638 17350 solver.cpp:228] Iteration 30800, loss = 0.0366734\nI0817 22:50:30.392698 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:50:30.392724 17350 solver.cpp:244]     Train net output #1: loss = 0.0366731 (* 1 = 0.0366731 loss)\nI0817 22:50:30.468345 17350 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0817 22:51:17.741906 17350 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0817 22:51:44.540235 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78324\nI0817 22:51:44.540290 17350 solver.cpp:404]     Test net output #1: loss = 0.922321 (* 1 = 0.922321 loss)\nI0817 22:51:44.964368 17350 solver.cpp:228] Iteration 30900, loss = 0.0788885\nI0817 22:51:44.964426 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 22:51:44.964449 17350 solver.cpp:244]     Train net output #1: loss = 0.0788882 (* 1 = 0.0788882 loss)\nI0817 22:51:45.040153 17350 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0817 22:52:32.329810 17350 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0817 22:52:59.205289 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78236\nI0817 22:52:59.205344 17350 solver.cpp:404]     Test net output #1: loss = 1.05142 (* 1 = 1.05142 loss)\nI0817 22:52:59.629381 17350 solver.cpp:228] Iteration 31000, loss = 0.0470475\nI0817 22:52:59.629434 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 22:52:59.629458 17350 solver.cpp:244]     Train net output #1: loss = 0.0470472 (* 1 = 0.0470472 loss)\nI0817 22:52:59.708627 17350 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0817 22:53:47.008282 17350 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0817 22:54:13.847149 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80272\nI0817 22:54:13.847209 17350 solver.cpp:404]     Test net output #1: loss = 0.94282 (* 1 = 0.94282 loss)\nI0817 22:54:14.270251 17350 solver.cpp:228] Iteration 31100, loss = 0.0795438\nI0817 22:54:14.270303 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:54:14.270328 17350 solver.cpp:244]     Train net output #1: loss = 0.0795435 (* 1 = 0.0795435 loss)\nI0817 22:54:14.345752 17350 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0817 22:55:01.645229 17350 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0817 22:55:28.519847 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79444\nI0817 22:55:28.519909 17350 solver.cpp:404]     Test net output #1: loss = 0.980628 (* 1 = 0.980628 loss)\nI0817 22:55:28.944349 17350 solver.cpp:228] Iteration 31200, loss = 0.0732436\nI0817 22:55:28.944399 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 22:55:28.944424 17350 solver.cpp:244]     Train net output #1: loss = 0.0732433 (* 1 = 0.0732433 loss)\nI0817 22:55:29.021627 17350 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0817 22:56:16.327131 17350 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0817 22:56:43.159785 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73444\nI0817 22:56:43.159849 17350 solver.cpp:404]     Test net output #1: loss = 1.19347 (* 1 = 1.19347 loss)\nI0817 22:56:43.583932 17350 solver.cpp:228] Iteration 31300, loss = 0.0497809\nI0817 22:56:43.583986 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:56:43.584010 17350 solver.cpp:244]     Train net output #1: loss = 0.0497806 (* 1 = 0.0497806 loss)\nI0817 22:56:43.656226 17350 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0817 22:57:30.886200 17350 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0817 22:57:57.867399 17350 solver.cpp:404]     Test net output #0: accuracy = 0.66352\nI0817 22:57:57.867473 17350 solver.cpp:404]     Test net output #1: loss = 1.78302 (* 1 = 1.78302 loss)\nI0817 22:57:58.290210 17350 solver.cpp:228] Iteration 31400, loss = 0.0529269\nI0817 22:57:58.290256 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 22:57:58.290280 17350 solver.cpp:244]     Train net output #1: loss = 0.0529266 (* 1 = 0.0529266 loss)\nI0817 22:57:58.370240 17350 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0817 22:58:45.644528 17350 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0817 22:59:12.649298 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77588\nI0817 22:59:12.649374 17350 solver.cpp:404]     Test net output #1: loss = 1.05269 (* 1 = 1.05269 loss)\nI0817 22:59:13.072243 17350 solver.cpp:228] Iteration 31500, loss = 0.0805303\nI0817 22:59:13.072288 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 22:59:13.072314 17350 solver.cpp:244]     Train net output #1: loss = 0.08053 (* 1 = 0.08053 loss)\nI0817 22:59:13.151218 17350 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0817 23:00:00.471990 17350 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0817 23:00:27.398733 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74632\nI0817 23:00:27.398804 17350 solver.cpp:404]     Test net output #1: loss = 1.24862 (* 1 = 1.24862 loss)\nI0817 23:00:27.823122 17350 solver.cpp:228] Iteration 31600, loss = 0.121117\nI0817 23:00:27.823173 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:00:27.823204 17350 solver.cpp:244]     Train net output #1: loss = 0.121117 (* 1 = 0.121117 loss)\nI0817 23:00:27.902386 17350 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0817 23:01:15.248253 17350 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0817 23:01:42.253721 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71832\nI0817 23:01:42.253800 17350 solver.cpp:404]     Test net output #1: loss = 1.37731 (* 1 = 1.37731 loss)\nI0817 23:01:42.678143 17350 solver.cpp:228] Iteration 31700, loss = 0.0686112\nI0817 23:01:42.678200 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:01:42.678225 17350 solver.cpp:244]     Train net output #1: loss = 0.0686109 (* 1 = 0.0686109 loss)\nI0817 23:01:42.750484 17350 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0817 23:02:30.061681 17350 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0817 23:02:57.072412 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80772\nI0817 23:02:57.072489 17350 solver.cpp:404]     Test net output #1: loss = 0.930352 (* 1 = 0.930352 loss)\nI0817 23:02:57.496711 17350 solver.cpp:228] Iteration 31800, loss = 0.0493391\nI0817 23:02:57.496773 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:02:57.496799 17350 solver.cpp:244]     Train net output #1: loss = 0.0493388 (* 1 = 0.0493388 loss)\nI0817 23:02:57.573359 17350 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0817 23:03:44.936244 17350 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0817 23:04:11.857892 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79244\nI0817 23:04:11.857966 17350 solver.cpp:404]     Test net output #1: loss = 0.902665 (* 1 = 0.902665 loss)\nI0817 23:04:12.280663 17350 solver.cpp:228] Iteration 31900, loss = 0.0420299\nI0817 23:04:12.280722 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:04:12.280748 17350 solver.cpp:244]     Train net output #1: loss = 0.0420296 (* 1 = 0.0420296 loss)\nI0817 23:04:12.354174 17350 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0817 23:04:59.695816 17350 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0817 23:05:26.638515 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79376\nI0817 23:05:26.638593 17350 solver.cpp:404]     Test net output #1: loss = 0.872126 (* 1 = 0.872126 loss)\nI0817 23:05:27.062989 17350 solver.cpp:228] Iteration 32000, loss = 0.184091\nI0817 23:05:27.063037 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 23:05:27.063062 17350 solver.cpp:244]     Train net output #1: loss = 0.18409 (* 1 = 0.18409 loss)\nI0817 23:05:27.143002 17350 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0817 23:06:14.399279 17350 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0817 23:06:41.308951 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7308\nI0817 23:06:41.309029 17350 solver.cpp:404]     Test net output #1: loss = 1.33493 (* 1 = 1.33493 loss)\nI0817 23:06:41.733213 17350 solver.cpp:228] Iteration 32100, loss = 0.0467558\nI0817 23:06:41.733260 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:06:41.733285 17350 solver.cpp:244]     Train net output #1: loss = 0.0467555 (* 1 = 0.0467555 loss)\nI0817 23:06:41.815264 17350 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0817 23:07:29.168620 17350 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0817 23:07:56.082365 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75088\nI0817 23:07:56.082440 17350 solver.cpp:404]     Test net output #1: loss = 1.08237 (* 1 = 1.08237 loss)\nI0817 23:07:56.510529 17350 solver.cpp:228] Iteration 32200, loss = 0.0427583\nI0817 23:07:56.510798 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:07:56.510901 17350 solver.cpp:244]     Train net output #1: loss = 0.042758 (* 1 = 0.042758 loss)\nI0817 23:07:56.581439 17350 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0817 23:08:43.914408 17350 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0817 23:09:10.851459 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75744\nI0817 23:09:10.851534 17350 solver.cpp:404]     Test net output #1: loss = 1.13373 (* 1 = 1.13373 loss)\nI0817 23:09:11.275869 17350 solver.cpp:228] Iteration 32300, loss = 0.128189\nI0817 23:09:11.275930 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:09:11.275955 17350 solver.cpp:244]     Train net output #1: loss = 0.128189 (* 1 = 0.128189 loss)\nI0817 23:09:11.357842 17350 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0817 23:09:58.681936 17350 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0817 23:10:25.662562 17350 solver.cpp:404]     Test net output #0: accuracy = 0.83124\nI0817 23:10:25.662636 17350 solver.cpp:404]     Test net output #1: loss = 0.685956 (* 1 = 0.685956 loss)\nI0817 23:10:26.086488 17350 solver.cpp:228] Iteration 32400, loss = 0.084786\nI0817 23:10:26.086549 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:10:26.086575 17350 solver.cpp:244]     Train net output #1: loss = 0.0847857 (* 1 = 0.0847857 loss)\nI0817 23:10:26.162947 17350 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0817 23:11:13.446657 17350 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0817 23:11:40.463255 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79844\nI0817 23:11:40.463327 17350 solver.cpp:404]     Test net output #1: loss = 0.861309 (* 1 = 0.861309 loss)\nI0817 23:11:40.887414 17350 solver.cpp:228] Iteration 32500, loss = 0.107939\nI0817 23:11:40.887475 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:11:40.887501 17350 solver.cpp:244]     Train net output #1: loss = 0.107939 (* 1 = 0.107939 loss)\nI0817 23:11:40.962208 17350 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0817 23:12:28.225688 17350 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0817 23:12:55.191736 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77768\nI0817 23:12:55.191812 17350 solver.cpp:404]     Test net output #1: loss = 0.981007 (* 1 = 0.981007 loss)\nI0817 23:12:55.615852 17350 solver.cpp:228] Iteration 32600, loss = 0.0830703\nI0817 23:12:55.615914 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:12:55.615941 17350 solver.cpp:244]     Train net output #1: loss = 0.08307 (* 1 = 0.08307 loss)\nI0817 23:12:55.693936 17350 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0817 23:13:43.008038 17350 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0817 23:14:09.925843 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8206\nI0817 23:14:09.925917 17350 solver.cpp:404]     Test net output #1: loss = 0.706094 (* 1 = 0.706094 loss)\nI0817 23:14:10.349099 17350 solver.cpp:228] Iteration 32700, loss = 0.042127\nI0817 23:14:10.349161 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:14:10.349192 17350 solver.cpp:244]     Train net output #1: loss = 0.0421267 (* 1 = 0.0421267 loss)\nI0817 23:14:10.431037 17350 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0817 23:14:57.748889 17350 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0817 23:15:24.663566 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79636\nI0817 23:15:24.663642 17350 solver.cpp:404]     Test net output #1: loss = 0.891134 (* 1 = 0.891134 loss)\nI0817 23:15:25.087620 17350 solver.cpp:228] Iteration 32800, loss = 0.024377\nI0817 23:15:25.087682 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:15:25.087708 17350 solver.cpp:244]     Train net output #1: loss = 0.0243767 (* 1 = 0.0243767 loss)\nI0817 23:15:25.169370 17350 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0817 23:16:12.514111 17350 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0817 23:16:39.429659 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75704\nI0817 23:16:39.429736 17350 solver.cpp:404]     Test net output #1: loss = 1.07257 (* 1 = 1.07257 loss)\nI0817 23:16:39.852596 17350 solver.cpp:228] Iteration 32900, loss = 0.0945413\nI0817 23:16:39.852659 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:16:39.852687 17350 solver.cpp:244]     Train net output #1: loss = 0.094541 (* 1 = 0.094541 loss)\nI0817 23:16:39.932206 17350 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0817 23:17:27.274914 17350 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0817 23:17:54.191412 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7744\nI0817 23:17:54.191489 17350 solver.cpp:404]     Test net output #1: loss = 0.982538 (* 1 = 0.982538 loss)\nI0817 23:17:54.614850 17350 solver.cpp:228] Iteration 33000, loss = 0.0855569\nI0817 23:17:54.614912 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:17:54.614939 17350 solver.cpp:244]     Train net output #1: loss = 0.0855566 (* 1 = 0.0855566 loss)\nI0817 23:17:54.690865 17350 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0817 23:18:41.978197 17350 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0817 23:19:08.915372 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8056\nI0817 23:19:08.915446 17350 solver.cpp:404]     Test net output #1: loss = 0.825173 (* 1 = 0.825173 loss)\nI0817 23:19:09.340191 17350 solver.cpp:228] Iteration 33100, loss = 0.0401528\nI0817 23:19:09.340255 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:19:09.340281 17350 solver.cpp:244]     Train net output #1: loss = 0.0401525 (* 1 = 0.0401525 loss)\nI0817 23:19:09.418084 17350 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0817 23:19:56.761880 17350 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0817 23:20:23.684533 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75324\nI0817 23:20:23.684608 17350 solver.cpp:404]     Test net output #1: loss = 1.1897 (* 1 = 1.1897 loss)\nI0817 23:20:24.108897 17350 solver.cpp:228] Iteration 33200, loss = 0.0621071\nI0817 23:20:24.109131 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:20:24.109163 17350 solver.cpp:244]     Train net output #1: loss = 0.0621067 (* 1 = 0.0621067 loss)\nI0817 23:20:24.183764 17350 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0817 23:21:11.568763 17350 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0817 23:21:38.523154 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78904\nI0817 23:21:38.523236 17350 solver.cpp:404]     Test net output #1: loss = 0.977068 (* 1 = 0.977068 loss)\nI0817 23:21:38.946000 17350 solver.cpp:228] Iteration 33300, loss = 0.131873\nI0817 23:21:38.946063 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:21:38.946086 17350 solver.cpp:244]     Train net output #1: loss = 0.131873 (* 1 = 0.131873 loss)\nI0817 23:21:39.029162 17350 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0817 23:22:26.395531 17350 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0817 23:22:53.323751 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76824\nI0817 23:22:53.323828 17350 solver.cpp:404]     Test net output #1: loss = 1.03339 (* 1 = 1.03339 loss)\nI0817 23:22:53.748034 17350 solver.cpp:228] Iteration 33400, loss = 0.109233\nI0817 23:22:53.748096 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0817 23:22:53.748122 17350 solver.cpp:244]     Train net output #1: loss = 0.109233 (* 1 = 0.109233 loss)\nI0817 23:22:53.821324 17350 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0817 23:23:41.161444 17350 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0817 23:24:08.194216 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77068\nI0817 23:24:08.194290 17350 solver.cpp:404]     Test net output #1: loss = 0.959823 (* 1 = 0.959823 loss)\nI0817 23:24:08.618576 17350 solver.cpp:228] Iteration 33500, loss = 0.0732633\nI0817 23:24:08.618636 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:24:08.618662 17350 solver.cpp:244]     Train net output #1: loss = 0.0732629 (* 1 = 0.0732629 loss)\nI0817 23:24:08.694722 17350 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0817 23:24:56.070677 17350 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0817 23:25:23.005856 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74828\nI0817 23:25:23.005933 17350 solver.cpp:404]     Test net output #1: loss = 1.21641 (* 1 = 1.21641 loss)\nI0817 23:25:23.428897 17350 solver.cpp:228] Iteration 33600, loss = 0.0627352\nI0817 23:25:23.428959 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:25:23.428984 17350 solver.cpp:244]     Train net output #1: loss = 0.0627348 (* 1 = 0.0627348 loss)\nI0817 23:25:23.510143 17350 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0817 23:26:10.868274 17350 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0817 23:26:37.798033 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77764\nI0817 23:26:37.798111 17350 solver.cpp:404]     Test net output #1: loss = 0.926532 (* 1 = 0.926532 loss)\nI0817 23:26:38.222544 17350 solver.cpp:228] Iteration 33700, loss = 0.071534\nI0817 23:26:38.222817 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:26:38.222908 17350 solver.cpp:244]     Train net output #1: loss = 0.0715336 (* 1 = 0.0715336 loss)\nI0817 23:26:38.299113 17350 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0817 23:27:25.691479 17350 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0817 23:27:52.616138 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74168\nI0817 23:27:52.616214 17350 solver.cpp:404]     Test net output #1: loss = 1.34029 (* 1 = 1.34029 loss)\nI0817 23:27:53.040328 17350 solver.cpp:228] Iteration 33800, loss = 0.0358099\nI0817 23:27:53.040388 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:27:53.040415 17350 solver.cpp:244]     Train net output #1: loss = 0.0358095 (* 1 = 0.0358095 loss)\nI0817 23:27:53.121984 17350 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0817 23:28:40.524186 17350 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0817 23:29:07.446543 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7768\nI0817 23:29:07.446619 17350 solver.cpp:404]     Test net output #1: loss = 1.18999 (* 1 = 1.18999 loss)\nI0817 23:29:07.870843 17350 solver.cpp:228] Iteration 33900, loss = 0.153246\nI0817 23:29:07.870890 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:29:07.870915 17350 solver.cpp:244]     Train net output #1: loss = 0.153246 (* 1 = 0.153246 loss)\nI0817 23:29:07.944592 17350 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0817 23:29:55.340350 17350 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0817 23:30:22.268374 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77476\nI0817 23:30:22.268448 17350 solver.cpp:404]     Test net output #1: loss = 1.12553 (* 1 = 1.12553 loss)\nI0817 23:30:22.691671 17350 solver.cpp:228] Iteration 34000, loss = 0.0368334\nI0817 23:30:22.691718 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:30:22.691745 17350 solver.cpp:244]     Train net output #1: loss = 0.036833 (* 1 = 0.036833 loss)\nI0817 23:30:22.774317 17350 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0817 23:31:10.187252 17350 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0817 23:31:37.193258 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78904\nI0817 23:31:37.193334 17350 solver.cpp:404]     Test net output #1: loss = 1.06163 (* 1 = 1.06163 loss)\nI0817 23:31:37.616549 17350 solver.cpp:228] Iteration 34100, loss = 0.109458\nI0817 23:31:37.616595 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:31:37.616621 17350 solver.cpp:244]     Train net output #1: loss = 0.109457 (* 1 = 0.109457 loss)\nI0817 23:31:37.692596 17350 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0817 23:32:25.117849 17350 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0817 23:32:52.142103 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76944\nI0817 23:32:52.142179 17350 solver.cpp:404]     Test net output #1: loss = 1.18843 (* 1 = 1.18843 loss)\nI0817 23:32:52.566745 17350 solver.cpp:228] Iteration 34200, loss = 0.0363136\nI0817 23:32:52.566790 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0817 23:32:52.566808 17350 solver.cpp:244]     Train net output #1: loss = 0.0363132 (* 1 = 0.0363132 loss)\nI0817 23:32:52.642895 17350 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0817 23:33:40.076342 17350 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0817 23:34:06.986294 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71336\nI0817 23:34:06.986362 17350 solver.cpp:404]     Test net output #1: loss = 1.38747 (* 1 = 1.38747 loss)\nI0817 23:34:07.409107 17350 solver.cpp:228] Iteration 34300, loss = 0.0685769\nI0817 23:34:07.409154 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:34:07.409173 17350 solver.cpp:244]     Train net output #1: loss = 0.0685765 (* 1 = 0.0685765 loss)\nI0817 23:34:07.485520 17350 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0817 23:34:54.818027 17350 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0817 23:35:21.731410 17350 solver.cpp:404]     Test net output #0: accuracy = 0.65748\nI0817 23:35:21.731478 17350 solver.cpp:404]     Test net output #1: loss = 2.33931 (* 1 = 2.33931 loss)\nI0817 23:35:22.154319 17350 solver.cpp:228] Iteration 34400, loss = 0.0725094\nI0817 23:35:22.154362 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:35:22.154378 17350 solver.cpp:244]     Train net output #1: loss = 0.072509 (* 1 = 0.072509 loss)\nI0817 23:35:22.230195 17350 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0817 23:36:09.543529 17350 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0817 23:36:36.454406 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69004\nI0817 23:36:36.454475 17350 solver.cpp:404]     Test net output #1: loss = 2.04778 (* 1 = 2.04778 loss)\nI0817 23:36:36.877696 17350 solver.cpp:228] Iteration 34500, loss = 0.103757\nI0817 23:36:36.877739 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0817 23:36:36.877756 17350 solver.cpp:244]     Train net output #1: loss = 0.103756 (* 1 = 0.103756 loss)\nI0817 23:36:36.953902 17350 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0817 23:37:24.341595 17350 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0817 23:37:51.259279 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74744\nI0817 23:37:51.259349 17350 solver.cpp:404]     Test net output #1: loss = 1.28312 (* 1 = 1.28312 loss)\nI0817 23:37:51.682797 17350 solver.cpp:228] Iteration 34600, loss = 0.0903374\nI0817 23:37:51.682839 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:37:51.682858 17350 solver.cpp:244]     Train net output #1: loss = 0.090337 (* 1 = 0.090337 loss)\nI0817 23:37:51.765190 17350 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0817 23:38:39.083302 17350 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0817 23:39:06.008303 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81328\nI0817 23:39:06.008375 17350 solver.cpp:404]     Test net output #1: loss = 0.75916 (* 1 = 0.75916 loss)\nI0817 23:39:06.431457 17350 solver.cpp:228] Iteration 34700, loss = 0.0893037\nI0817 23:39:06.431499 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0817 23:39:06.431514 17350 solver.cpp:244]     Train net output #1: loss = 0.0893033 (* 1 = 0.0893033 loss)\nI0817 23:39:06.515426 17350 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0817 23:39:53.920469 17350 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0817 23:40:20.840100 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76312\nI0817 23:40:20.840167 17350 solver.cpp:404]     Test net output #1: loss = 1.10079 (* 1 = 1.10079 loss)\nI0817 23:40:21.263798 17350 solver.cpp:228] Iteration 34800, loss = 0.0880337\nI0817 23:40:21.263844 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:40:21.263861 17350 solver.cpp:244]     Train net output #1: loss = 0.0880333 (* 1 = 0.0880333 loss)\nI0817 23:40:21.342304 17350 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0817 23:41:08.681103 17350 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0817 23:41:35.602150 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7014\nI0817 23:41:35.602221 17350 solver.cpp:404]     Test net output #1: loss = 1.48706 (* 1 = 1.48706 loss)\nI0817 23:41:36.025892 17350 solver.cpp:228] Iteration 34900, loss = 0.0741034\nI0817 23:41:36.025943 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:41:36.025961 17350 solver.cpp:244]     Train net output #1: loss = 0.0741031 (* 1 = 0.0741031 loss)\nI0817 23:41:36.102774 17350 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0817 23:42:23.468343 17350 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0817 23:42:50.380800 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75048\nI0817 23:42:50.380870 17350 solver.cpp:404]     Test net output #1: loss = 1.11777 (* 1 = 1.11777 loss)\nI0817 23:42:50.805321 17350 solver.cpp:228] Iteration 35000, loss = 0.122514\nI0817 23:42:50.805362 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:42:50.805379 17350 solver.cpp:244]     Train net output #1: loss = 0.122514 (* 1 = 0.122514 loss)\nI0817 23:42:50.886793 17350 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0817 23:43:38.337038 17350 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0817 23:44:05.258282 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7972\nI0817 23:44:05.258349 17350 solver.cpp:404]     Test net output #1: loss = 0.904445 (* 1 = 0.904445 loss)\nI0817 23:44:05.683894 17350 solver.cpp:228] Iteration 35100, loss = 0.0999356\nI0817 23:44:05.683945 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:44:05.683970 17350 solver.cpp:244]     Train net output #1: loss = 0.0999352 (* 1 = 0.0999352 loss)\nI0817 23:44:05.764062 17350 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0817 23:44:53.150576 17350 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0817 23:45:20.068899 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73676\nI0817 23:45:20.068976 17350 solver.cpp:404]     Test net output #1: loss = 1.29915 (* 1 = 1.29915 loss)\nI0817 23:45:20.493834 17350 solver.cpp:228] Iteration 35200, loss = 0.0739672\nI0817 23:45:20.493934 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:45:20.493962 17350 solver.cpp:244]     Train net output #1: loss = 0.0739669 (* 1 = 0.0739669 loss)\nI0817 23:45:20.573073 17350 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0817 23:46:08.030728 17350 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0817 23:46:34.953331 17350 solver.cpp:404]     Test net output #0: accuracy = 0.82848\nI0817 23:46:34.953408 17350 solver.cpp:404]     Test net output #1: loss = 0.749458 (* 1 = 0.749458 loss)\nI0817 23:46:35.378033 17350 solver.cpp:228] Iteration 35300, loss = 0.0287874\nI0817 23:46:35.378084 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:46:35.378101 17350 solver.cpp:244]     Train net output #1: loss = 0.028787 (* 1 = 0.028787 loss)\nI0817 23:46:35.456744 17350 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0817 23:47:22.921399 17350 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0817 23:47:49.836738 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80264\nI0817 23:47:49.836810 17350 solver.cpp:404]     Test net output #1: loss = 0.898769 (* 1 = 0.898769 loss)\nI0817 23:47:50.260382 17350 solver.cpp:228] Iteration 35400, loss = 0.154755\nI0817 23:47:50.260430 17350 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0817 23:47:50.260447 17350 solver.cpp:244]     Train net output #1: loss = 0.154755 (* 1 = 0.154755 loss)\nI0817 23:47:50.338310 17350 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0817 23:48:37.803378 17350 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0817 23:49:04.717137 17350 solver.cpp:404]     Test net output #0: accuracy = 0.82432\nI0817 23:49:04.717209 17350 solver.cpp:404]     Test net output #1: loss = 0.779449 (* 1 = 0.779449 loss)\nI0817 23:49:05.140549 17350 solver.cpp:228] Iteration 35500, loss = 0.054268\nI0817 23:49:05.140600 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:49:05.140617 17350 solver.cpp:244]     Train net output #1: loss = 0.0542676 (* 1 = 0.0542676 loss)\nI0817 23:49:05.219805 17350 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0817 23:49:52.670771 17350 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0817 23:50:19.587671 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75188\nI0817 23:50:19.587738 17350 solver.cpp:404]     Test net output #1: loss = 1.23607 (* 1 = 1.23607 loss)\nI0817 23:50:20.011018 17350 solver.cpp:228] Iteration 35600, loss = 0.0684015\nI0817 23:50:20.011067 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:50:20.011085 17350 solver.cpp:244]     Train net output #1: loss = 0.0684011 (* 1 = 0.0684011 loss)\nI0817 23:50:20.092219 17350 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0817 23:51:07.549471 17350 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0817 23:51:34.461992 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8052\nI0817 23:51:34.462064 17350 solver.cpp:404]     Test net output #1: loss = 0.866263 (* 1 = 0.866263 loss)\nI0817 23:51:34.886477 17350 solver.cpp:228] Iteration 35700, loss = 0.107839\nI0817 23:51:34.886528 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0817 23:51:34.886545 17350 solver.cpp:244]     Train net output #1: loss = 0.107839 (* 1 = 0.107839 loss)\nI0817 23:51:34.971868 17350 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0817 23:52:22.396275 17350 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0817 23:52:49.311172 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73436\nI0817 23:52:49.311242 17350 solver.cpp:404]     Test net output #1: loss = 1.46129 (* 1 = 1.46129 loss)\nI0817 23:52:49.735821 17350 solver.cpp:228] Iteration 35800, loss = 0.0377786\nI0817 23:52:49.735875 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:52:49.735894 17350 solver.cpp:244]     Train net output #1: loss = 0.0377783 (* 1 = 0.0377783 loss)\nI0817 23:52:49.809792 17350 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0817 23:53:37.223280 17350 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0817 23:54:04.133426 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79148\nI0817 23:54:04.133494 17350 solver.cpp:404]     Test net output #1: loss = 0.96532 (* 1 = 0.96532 loss)\nI0817 23:54:04.557276 17350 solver.cpp:228] Iteration 35900, loss = 0.0246655\nI0817 23:54:04.557329 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0817 23:54:04.557346 17350 solver.cpp:244]     Train net output #1: loss = 0.0246651 (* 1 = 0.0246651 loss)\nI0817 23:54:04.638191 17350 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0817 23:54:52.051241 17350 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0817 23:55:18.960476 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76092\nI0817 23:55:18.960546 17350 solver.cpp:404]     Test net output #1: loss = 1.16075 (* 1 = 1.16075 loss)\nI0817 23:55:19.385141 17350 solver.cpp:228] Iteration 36000, loss = 0.115616\nI0817 23:55:19.385200 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:55:19.385218 17350 solver.cpp:244]     Train net output #1: loss = 0.115615 (* 1 = 0.115615 loss)\nI0817 23:55:19.460448 17350 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0817 23:56:06.931088 17350 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0817 23:56:33.844827 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73744\nI0817 23:56:33.844898 17350 solver.cpp:404]     Test net output #1: loss = 1.2586 (* 1 = 1.2586 loss)\nI0817 23:56:34.268146 17350 solver.cpp:228] Iteration 36100, loss = 0.084777\nI0817 23:56:34.268204 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:56:34.268221 17350 solver.cpp:244]     Train net output #1: loss = 0.0847766 (* 1 = 0.0847766 loss)\nI0817 23:56:34.345923 17350 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0817 23:57:21.852949 17350 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0817 23:57:48.763278 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69548\nI0817 23:57:48.763350 17350 solver.cpp:404]     Test net output #1: loss = 1.53856 (* 1 = 1.53856 loss)\nI0817 23:57:49.186650 17350 solver.cpp:228] Iteration 36200, loss = 0.090596\nI0817 23:57:49.186708 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0817 23:57:49.186728 17350 solver.cpp:244]     Train net output #1: loss = 0.0905956 (* 1 = 0.0905956 loss)\nI0817 23:57:49.262450 17350 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0817 23:58:36.732435 17350 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0817 23:59:03.646178 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78564\nI0817 23:59:03.646246 17350 solver.cpp:404]     Test net output #1: loss = 1.009 (* 1 = 1.009 loss)\nI0817 23:59:04.069759 17350 solver.cpp:228] Iteration 36300, loss = 0.061415\nI0817 23:59:04.069818 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0817 23:59:04.069836 17350 solver.cpp:244]     Train net output #1: loss = 0.0614146 (* 1 = 0.0614146 loss)\nI0817 23:59:04.144735 17350 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0817 23:59:51.594211 17350 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0818 00:00:18.508487 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76648\nI0818 00:00:18.508554 17350 solver.cpp:404]     Test net output #1: loss = 1.09991 (* 1 = 1.09991 loss)\nI0818 00:00:18.931954 17350 solver.cpp:228] Iteration 36400, loss = 0.0996037\nI0818 00:00:18.932005 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:00:18.932023 17350 solver.cpp:244]     Train net output #1: loss = 0.0996033 (* 1 = 0.0996033 loss)\nI0818 00:00:19.009186 17350 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0818 00:01:06.483376 17350 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0818 00:01:33.400457 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81244\nI0818 00:01:33.400535 17350 solver.cpp:404]     Test net output #1: loss = 0.80134 (* 1 = 0.80134 loss)\nI0818 00:01:33.823665 17350 solver.cpp:228] Iteration 36500, loss = 0.0430003\nI0818 00:01:33.823714 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:01:33.823737 17350 solver.cpp:244]     Train net output #1: loss = 0.0429999 (* 1 = 0.0429999 loss)\nI0818 00:01:33.902235 17350 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0818 00:02:21.395395 17350 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0818 00:02:48.307487 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72652\nI0818 00:02:48.307564 17350 solver.cpp:404]     Test net output #1: loss = 1.49647 (* 1 = 1.49647 loss)\nI0818 00:02:48.730459 17350 solver.cpp:228] Iteration 36600, loss = 0.0993913\nI0818 00:02:48.730506 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:02:48.730530 17350 solver.cpp:244]     Train net output #1: loss = 0.0993908 (* 1 = 0.0993908 loss)\nI0818 00:02:48.808008 17350 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0818 00:03:36.248713 17350 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0818 00:04:03.159421 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81676\nI0818 00:04:03.159489 17350 solver.cpp:404]     Test net output #1: loss = 0.834778 (* 1 = 0.834778 loss)\nI0818 00:04:03.584177 17350 solver.cpp:228] Iteration 36700, loss = 0.0643053\nI0818 00:04:03.584223 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:04:03.584240 17350 solver.cpp:244]     Train net output #1: loss = 0.0643049 (* 1 = 0.0643049 loss)\nI0818 00:04:03.663262 17350 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0818 00:04:51.107673 17350 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0818 00:05:18.018892 17350 solver.cpp:404]     Test net output #0: accuracy = 0.57024\nI0818 00:05:18.018962 17350 solver.cpp:404]     Test net output #1: loss = 2.37187 (* 1 = 2.37187 loss)\nI0818 00:05:18.443511 17350 solver.cpp:228] Iteration 36800, loss = 0.0634534\nI0818 00:05:18.443557 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:05:18.443573 17350 solver.cpp:244]     Train net output #1: loss = 0.0634529 (* 1 = 0.0634529 loss)\nI0818 00:05:18.524756 17350 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0818 00:06:05.969537 17350 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0818 00:06:32.879344 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7666\nI0818 00:06:32.879415 17350 solver.cpp:404]     Test net output #1: loss = 1.21204 (* 1 = 1.21204 loss)\nI0818 00:06:33.304153 17350 solver.cpp:228] Iteration 36900, loss = 0.102302\nI0818 00:06:33.304198 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:06:33.304215 17350 solver.cpp:244]     Train net output #1: loss = 0.102301 (* 1 = 0.102301 loss)\nI0818 00:06:33.387754 17350 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0818 00:07:20.816162 17350 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0818 00:07:47.740319 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75172\nI0818 00:07:47.740397 17350 solver.cpp:404]     Test net output #1: loss = 1.18055 (* 1 = 1.18055 loss)\nI0818 00:07:48.164841 17350 solver.cpp:228] Iteration 37000, loss = 0.0637876\nI0818 00:07:48.164903 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:07:48.164935 17350 solver.cpp:244]     Train net output #1: loss = 0.0637872 (* 1 = 0.0637872 loss)\nI0818 00:07:48.242043 17350 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0818 00:08:35.748337 17350 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0818 00:09:02.658768 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79136\nI0818 00:09:02.658840 17350 solver.cpp:404]     Test net output #1: loss = 0.942807 (* 1 = 0.942807 loss)\nI0818 00:09:03.083468 17350 solver.cpp:228] Iteration 37100, loss = 0.0550493\nI0818 00:09:03.083528 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:09:03.083545 17350 solver.cpp:244]     Train net output #1: loss = 0.0550489 (* 1 = 0.0550489 loss)\nI0818 00:09:03.159729 17350 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0818 00:09:50.595741 17350 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0818 00:10:17.506979 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76556\nI0818 00:10:17.507045 17350 solver.cpp:404]     Test net output #1: loss = 1.18712 (* 1 = 1.18712 loss)\nI0818 00:10:17.931596 17350 solver.cpp:228] Iteration 37200, loss = 0.0661867\nI0818 00:10:17.931646 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:10:17.931663 17350 solver.cpp:244]     Train net output #1: loss = 0.0661862 (* 1 = 0.0661862 loss)\nI0818 00:10:18.008577 17350 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0818 00:11:05.525180 17350 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0818 00:11:32.427991 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75796\nI0818 00:11:32.428066 17350 solver.cpp:404]     Test net output #1: loss = 1.10921 (* 1 = 1.10921 loss)\nI0818 00:11:32.851402 17350 solver.cpp:228] Iteration 37300, loss = 0.0771529\nI0818 00:11:32.851450 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:11:32.851467 17350 solver.cpp:244]     Train net output #1: loss = 0.0771524 (* 1 = 0.0771524 loss)\nI0818 00:11:32.925173 17350 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0818 00:12:20.400743 17350 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0818 00:12:47.303201 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7656\nI0818 00:12:47.303277 17350 solver.cpp:404]     Test net output #1: loss = 1.13348 (* 1 = 1.13348 loss)\nI0818 00:12:47.727984 17350 solver.cpp:228] Iteration 37400, loss = 0.0766335\nI0818 00:12:47.728034 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:12:47.728050 17350 solver.cpp:244]     Train net output #1: loss = 0.0766331 (* 1 = 0.0766331 loss)\nI0818 00:12:47.804194 17350 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0818 00:13:35.307613 17350 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0818 00:14:02.192905 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74408\nI0818 00:14:02.192977 17350 solver.cpp:404]     Test net output #1: loss = 1.22856 (* 1 = 1.22856 loss)\nI0818 00:14:02.616582 17350 solver.cpp:228] Iteration 37500, loss = 0.0947539\nI0818 00:14:02.616628 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:14:02.616647 17350 solver.cpp:244]     Train net output #1: loss = 0.0947535 (* 1 = 0.0947535 loss)\nI0818 00:14:02.694376 17350 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0818 00:14:50.218621 17350 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0818 00:15:17.059006 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75956\nI0818 00:15:17.059059 17350 solver.cpp:404]     Test net output #1: loss = 1.19855 (* 1 = 1.19855 loss)\nI0818 00:15:17.482342 17350 solver.cpp:228] Iteration 37600, loss = 0.0230054\nI0818 00:15:17.482400 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:15:17.482419 17350 solver.cpp:244]     Train net output #1: loss = 0.023005 (* 1 = 0.023005 loss)\nI0818 00:15:17.561406 17350 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0818 00:16:05.053124 17350 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0818 00:16:31.915032 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76424\nI0818 00:16:31.915081 17350 solver.cpp:404]     Test net output #1: loss = 1.18503 (* 1 = 1.18503 loss)\nI0818 00:16:32.340077 17350 solver.cpp:228] Iteration 37700, loss = 0.0565839\nI0818 00:16:32.340138 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:16:32.340157 17350 solver.cpp:244]     Train net output #1: loss = 0.0565834 (* 1 = 0.0565834 loss)\nI0818 00:16:32.419330 17350 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0818 00:17:19.904764 17350 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0818 00:17:46.657469 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73272\nI0818 00:17:46.657516 17350 solver.cpp:404]     Test net output #1: loss = 1.35263 (* 1 = 1.35263 loss)\nI0818 00:17:47.082248 17350 solver.cpp:228] Iteration 37800, loss = 0.0626729\nI0818 00:17:47.082301 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:17:47.082319 17350 solver.cpp:244]     Train net output #1: loss = 0.0626725 (* 1 = 0.0626725 loss)\nI0818 00:17:47.162072 17350 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0818 00:18:34.595146 17350 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0818 00:19:01.457480 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75508\nI0818 00:19:01.457525 17350 solver.cpp:404]     Test net output #1: loss = 1.21866 (* 1 = 1.21866 loss)\nI0818 00:19:01.882035 17350 solver.cpp:228] Iteration 37900, loss = 0.0841283\nI0818 00:19:01.882086 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:19:01.882102 17350 solver.cpp:244]     Train net output #1: loss = 0.0841278 (* 1 = 0.0841278 loss)\nI0818 00:19:01.957648 17350 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0818 00:19:49.381080 17350 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0818 00:20:16.115320 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76392\nI0818 00:20:16.115370 17350 solver.cpp:404]     Test net output #1: loss = 1.23846 (* 1 = 1.23846 loss)\nI0818 00:20:16.540002 17350 solver.cpp:228] Iteration 38000, loss = 0.0519887\nI0818 00:20:16.540051 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:20:16.540067 17350 solver.cpp:244]     Train net output #1: loss = 0.0519882 (* 1 = 0.0519882 loss)\nI0818 00:20:16.620730 17350 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0818 00:21:04.052448 17350 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0818 00:21:30.862864 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75728\nI0818 00:21:30.862910 17350 solver.cpp:404]     Test net output #1: loss = 1.16407 (* 1 = 1.16407 loss)\nI0818 00:21:31.287606 17350 solver.cpp:228] Iteration 38100, loss = 0.082549\nI0818 00:21:31.287659 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:21:31.287678 17350 solver.cpp:244]     Train net output #1: loss = 0.0825485 (* 1 = 0.0825485 loss)\nI0818 00:21:31.365463 17350 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0818 00:22:18.888002 17350 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0818 00:22:45.737432 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76208\nI0818 00:22:45.737479 17350 solver.cpp:404]     Test net output #1: loss = 1.2001 (* 1 = 1.2001 loss)\nI0818 00:22:46.161200 17350 solver.cpp:228] Iteration 38200, loss = 0.092862\nI0818 00:22:46.161250 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 00:22:46.161267 17350 solver.cpp:244]     Train net output #1: loss = 0.0928615 (* 1 = 0.0928615 loss)\nI0818 00:22:46.239819 17350 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0818 00:23:33.713861 17350 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0818 00:24:00.581651 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74296\nI0818 00:24:00.581701 17350 solver.cpp:404]     Test net output #1: loss = 1.23624 (* 1 = 1.23624 loss)\nI0818 00:24:01.006381 17350 solver.cpp:228] Iteration 38300, loss = 0.0918479\nI0818 00:24:01.006430 17350 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 00:24:01.006448 17350 solver.cpp:244]     Train net output #1: loss = 0.0918475 (* 1 = 0.0918475 loss)\nI0818 00:24:01.086396 17350 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0818 00:24:48.532315 17350 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0818 00:25:15.354840 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7552\nI0818 00:25:15.354889 17350 solver.cpp:404]     Test net output #1: loss = 1.40421 (* 1 = 1.40421 loss)\nI0818 00:25:15.779505 17350 solver.cpp:228] Iteration 38400, loss = 0.0577498\nI0818 00:25:15.779559 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:25:15.779575 17350 solver.cpp:244]     Train net output #1: loss = 0.0577493 (* 1 = 0.0577493 loss)\nI0818 00:25:15.860757 17350 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0818 00:26:03.270488 17350 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0818 00:26:30.109937 17350 solver.cpp:404]     Test net output #0: accuracy = 0.82292\nI0818 00:26:30.109987 17350 solver.cpp:404]     Test net output #1: loss = 0.71573 (* 1 = 0.71573 loss)\nI0818 00:26:30.534387 17350 solver.cpp:228] Iteration 38500, loss = 0.0697232\nI0818 00:26:30.534441 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:26:30.534458 17350 solver.cpp:244]     Train net output #1: loss = 0.0697227 (* 1 = 0.0697227 loss)\nI0818 00:26:30.612491 17350 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0818 00:27:18.020295 17350 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0818 00:27:44.876350 17350 solver.cpp:404]     Test net output #0: accuracy = 0.772\nI0818 00:27:44.876399 17350 solver.cpp:404]     Test net output #1: loss = 1.0979 (* 1 = 1.0979 loss)\nI0818 00:27:45.300721 17350 solver.cpp:228] Iteration 38600, loss = 0.0691722\nI0818 00:27:45.300765 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:27:45.300781 17350 solver.cpp:244]     Train net output #1: loss = 0.0691717 (* 1 = 0.0691717 loss)\nI0818 00:27:45.381592 17350 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0818 00:28:32.769907 17350 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0818 00:28:59.602147 17350 solver.cpp:404]     Test net output #0: accuracy = 0.82456\nI0818 00:28:59.602196 17350 solver.cpp:404]     Test net output #1: loss = 0.704044 (* 1 = 0.704044 loss)\nI0818 00:29:00.026489 17350 solver.cpp:228] Iteration 38700, loss = 0.0619024\nI0818 00:29:00.026546 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:29:00.026562 17350 solver.cpp:244]     Train net output #1: loss = 0.0619019 (* 1 = 0.0619019 loss)\nI0818 00:29:00.105224 17350 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0818 00:29:47.514436 17350 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0818 00:30:14.313815 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78372\nI0818 00:30:14.313865 17350 solver.cpp:404]     Test net output #1: loss = 0.945827 (* 1 = 0.945827 loss)\nI0818 00:30:14.738090 17350 solver.cpp:228] Iteration 38800, loss = 0.063494\nI0818 00:30:14.738147 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:30:14.738164 17350 solver.cpp:244]     Train net output #1: loss = 0.0634934 (* 1 = 0.0634934 loss)\nI0818 00:30:14.819249 17350 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0818 00:31:02.326510 17350 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0818 00:31:29.125608 17350 solver.cpp:404]     Test net output #0: accuracy = 0.796\nI0818 00:31:29.125658 17350 solver.cpp:404]     Test net output #1: loss = 0.8336 (* 1 = 0.8336 loss)\nI0818 00:31:29.549885 17350 solver.cpp:228] Iteration 38900, loss = 0.0531865\nI0818 00:31:29.549926 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:31:29.549943 17350 solver.cpp:244]     Train net output #1: loss = 0.0531859 (* 1 = 0.0531859 loss)\nI0818 00:31:29.623592 17350 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0818 00:32:17.085392 17350 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0818 00:32:43.872522 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80176\nI0818 00:32:43.872573 17350 solver.cpp:404]     Test net output #1: loss = 0.853839 (* 1 = 0.853839 loss)\nI0818 00:32:44.295949 17350 solver.cpp:228] Iteration 39000, loss = 0.0529413\nI0818 00:32:44.295994 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:32:44.296011 17350 solver.cpp:244]     Train net output #1: loss = 0.0529407 (* 1 = 0.0529407 loss)\nI0818 00:32:44.370441 17350 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0818 00:33:31.838946 17350 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0818 00:33:58.633088 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78436\nI0818 00:33:58.633137 17350 solver.cpp:404]     Test net output #1: loss = 0.98188 (* 1 = 0.98188 loss)\nI0818 00:33:59.057114 17350 solver.cpp:228] Iteration 39100, loss = 0.0570597\nI0818 00:33:59.057169 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:33:59.057195 17350 solver.cpp:244]     Train net output #1: loss = 0.0570591 (* 1 = 0.0570591 loss)\nI0818 00:33:59.129639 17350 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0818 00:34:46.610384 17350 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0818 00:35:13.334972 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79532\nI0818 00:35:13.335018 17350 solver.cpp:404]     Test net output #1: loss = 0.905366 (* 1 = 0.905366 loss)\nI0818 00:35:13.759038 17350 solver.cpp:228] Iteration 39200, loss = 0.0606459\nI0818 00:35:13.759080 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:35:13.759097 17350 solver.cpp:244]     Train net output #1: loss = 0.0606454 (* 1 = 0.0606454 loss)\nI0818 00:35:13.831660 17350 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0818 00:36:01.305043 17350 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0818 00:36:28.060634 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74164\nI0818 00:36:28.060681 17350 solver.cpp:404]     Test net output #1: loss = 1.3877 (* 1 = 1.3877 loss)\nI0818 00:36:28.485345 17350 solver.cpp:228] Iteration 39300, loss = 0.0687253\nI0818 00:36:28.485394 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:36:28.485411 17350 solver.cpp:244]     Train net output #1: loss = 0.0687247 (* 1 = 0.0687247 loss)\nI0818 00:36:28.559726 17350 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0818 00:37:16.019377 17350 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0818 00:37:42.742040 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79328\nI0818 00:37:42.742090 17350 solver.cpp:404]     Test net output #1: loss = 0.986682 (* 1 = 0.986682 loss)\nI0818 00:37:43.166311 17350 solver.cpp:228] Iteration 39400, loss = 0.0253903\nI0818 00:37:43.166353 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:37:43.166370 17350 solver.cpp:244]     Train net output #1: loss = 0.0253898 (* 1 = 0.0253898 loss)\nI0818 00:37:43.243522 17350 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0818 00:38:30.746999 17350 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0818 00:38:57.576232 17350 solver.cpp:404]     Test net output #0: accuracy = 0.788\nI0818 00:38:57.576282 17350 solver.cpp:404]     Test net output #1: loss = 1.06667 (* 1 = 1.06667 loss)\nI0818 00:38:57.999804 17350 solver.cpp:228] Iteration 39500, loss = 0.0245974\nI0818 00:38:57.999847 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:38:57.999862 17350 solver.cpp:244]     Train net output #1: loss = 0.0245969 (* 1 = 0.0245969 loss)\nI0818 00:38:58.080179 17350 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0818 00:39:45.603513 17350 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0818 00:40:12.407837 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75268\nI0818 00:40:12.407884 17350 solver.cpp:404]     Test net output #1: loss = 1.24874 (* 1 = 1.24874 loss)\nI0818 00:40:12.832767 17350 solver.cpp:228] Iteration 39600, loss = 0.0938391\nI0818 00:40:12.832825 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 00:40:12.832844 17350 solver.cpp:244]     Train net output #1: loss = 0.0938386 (* 1 = 0.0938386 loss)\nI0818 00:40:12.907491 17350 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0818 00:41:00.412549 17350 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0818 00:41:27.141782 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71024\nI0818 00:41:27.141827 17350 solver.cpp:404]     Test net output #1: loss = 1.49051 (* 1 = 1.49051 loss)\nI0818 00:41:27.566460 17350 solver.cpp:228] Iteration 39700, loss = 0.0710527\nI0818 00:41:27.566519 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:41:27.566537 17350 solver.cpp:244]     Train net output #1: loss = 0.0710521 (* 1 = 0.0710521 loss)\nI0818 00:41:27.640486 17350 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0818 00:42:15.114673 17350 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0818 00:42:41.805886 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81672\nI0818 00:42:41.805939 17350 solver.cpp:404]     Test net output #1: loss = 0.756512 (* 1 = 0.756512 loss)\nI0818 00:42:42.230621 17350 solver.cpp:228] Iteration 39800, loss = 0.0474432\nI0818 00:42:42.230679 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:42:42.230696 17350 solver.cpp:244]     Train net output #1: loss = 0.0474426 (* 1 = 0.0474426 loss)\nI0818 00:42:42.308578 17350 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0818 00:43:29.815429 17350 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0818 00:43:56.524091 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7934\nI0818 00:43:56.524139 17350 solver.cpp:404]     Test net output #1: loss = 1.07551 (* 1 = 1.07551 loss)\nI0818 00:43:56.949071 17350 solver.cpp:228] Iteration 39900, loss = 0.0438415\nI0818 00:43:56.949132 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:43:56.949149 17350 solver.cpp:244]     Train net output #1: loss = 0.0438409 (* 1 = 0.0438409 loss)\nI0818 00:43:57.028547 17350 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0818 00:44:44.519814 17350 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0818 00:45:11.210675 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79264\nI0818 00:45:11.210726 17350 solver.cpp:404]     Test net output #1: loss = 1.03707 (* 1 = 1.03707 loss)\nI0818 00:45:11.635707 17350 solver.cpp:228] Iteration 40000, loss = 0.0306522\nI0818 00:45:11.635769 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:45:11.635794 17350 solver.cpp:244]     Train net output #1: loss = 0.0306516 (* 1 = 0.0306516 loss)\nI0818 00:45:11.715916 17350 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0818 00:45:59.138142 17350 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0818 00:46:25.824251 17350 solver.cpp:404]     Test net output #0: accuracy = 0.82264\nI0818 00:46:25.824314 17350 solver.cpp:404]     Test net output #1: loss = 0.78279 (* 1 = 0.78279 loss)\nI0818 00:46:26.249066 17350 solver.cpp:228] Iteration 40100, loss = 0.0679725\nI0818 00:46:26.249127 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:46:26.249146 17350 solver.cpp:244]     Train net output #1: loss = 0.0679719 (* 1 = 0.0679719 loss)\nI0818 00:46:26.326902 17350 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0818 00:47:13.797536 17350 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0818 00:47:40.482208 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79292\nI0818 00:47:40.482254 17350 solver.cpp:404]     Test net output #1: loss = 0.988632 (* 1 = 0.988632 loss)\nI0818 00:47:40.907097 17350 solver.cpp:228] Iteration 40200, loss = 0.07033\nI0818 00:47:40.907156 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:47:40.907174 17350 solver.cpp:244]     Train net output #1: loss = 0.0703294 (* 1 = 0.0703294 loss)\nI0818 00:47:40.987866 17350 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0818 00:48:28.391397 17350 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0818 00:48:55.069882 17350 solver.cpp:404]     Test net output #0: accuracy = 0.816\nI0818 00:48:55.069927 17350 solver.cpp:404]     Test net output #1: loss = 0.74429 (* 1 = 0.74429 loss)\nI0818 00:48:55.494858 17350 solver.cpp:228] Iteration 40300, loss = 0.110968\nI0818 00:48:55.494918 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 00:48:55.494936 17350 solver.cpp:244]     Train net output #1: loss = 0.110967 (* 1 = 0.110967 loss)\nI0818 00:48:55.573544 17350 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0818 00:49:42.949523 17350 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0818 00:50:09.661407 17350 solver.cpp:404]     Test net output #0: accuracy = 0.67524\nI0818 00:50:09.661454 17350 solver.cpp:404]     Test net output #1: loss = 1.72235 (* 1 = 1.72235 loss)\nI0818 00:50:10.086112 17350 solver.cpp:228] Iteration 40400, loss = 0.0651827\nI0818 00:50:10.086171 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:50:10.086189 17350 solver.cpp:244]     Train net output #1: loss = 0.0651821 (* 1 = 0.0651821 loss)\nI0818 00:50:10.163770 17350 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0818 00:50:57.569908 17350 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0818 00:51:24.300406 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78204\nI0818 00:51:24.300452 17350 solver.cpp:404]     Test net output #1: loss = 0.922516 (* 1 = 0.922516 loss)\nI0818 00:51:24.724663 17350 solver.cpp:228] Iteration 40500, loss = 0.0364822\nI0818 00:51:24.724707 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:51:24.724723 17350 solver.cpp:244]     Train net output #1: loss = 0.0364816 (* 1 = 0.0364816 loss)\nI0818 00:51:24.806824 17350 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0818 00:52:12.099714 17350 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0818 00:52:38.779523 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71916\nI0818 00:52:38.779578 17350 solver.cpp:404]     Test net output #1: loss = 1.35545 (* 1 = 1.35545 loss)\nI0818 00:52:39.203390 17350 solver.cpp:228] Iteration 40600, loss = 0.0752872\nI0818 00:52:39.203438 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:52:39.203454 17350 solver.cpp:244]     Train net output #1: loss = 0.0752866 (* 1 = 0.0752866 loss)\nI0818 00:52:39.282706 17350 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0818 00:53:26.526762 17350 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0818 00:53:53.320600 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78572\nI0818 00:53:53.320649 17350 solver.cpp:404]     Test net output #1: loss = 0.961456 (* 1 = 0.961456 loss)\nI0818 00:53:53.744822 17350 solver.cpp:228] Iteration 40700, loss = 0.116968\nI0818 00:53:53.744880 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 00:53:53.744899 17350 solver.cpp:244]     Train net output #1: loss = 0.116968 (* 1 = 0.116968 loss)\nI0818 00:53:53.825122 17350 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0818 00:54:41.125069 17350 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0818 00:55:07.875157 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79564\nI0818 00:55:07.875205 17350 solver.cpp:404]     Test net output #1: loss = 0.929607 (* 1 = 0.929607 loss)\nI0818 00:55:08.298395 17350 solver.cpp:228] Iteration 40800, loss = 0.0531842\nI0818 00:55:08.298452 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:55:08.298470 17350 solver.cpp:244]     Train net output #1: loss = 0.0531836 (* 1 = 0.0531836 loss)\nI0818 00:55:08.377686 17350 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0818 00:55:55.656563 17350 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0818 00:56:22.467226 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72444\nI0818 00:56:22.467275 17350 solver.cpp:404]     Test net output #1: loss = 1.37473 (* 1 = 1.37473 loss)\nI0818 00:56:22.890434 17350 solver.cpp:228] Iteration 40900, loss = 0.0676471\nI0818 00:56:22.890493 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 00:56:22.890511 17350 solver.cpp:244]     Train net output #1: loss = 0.0676465 (* 1 = 0.0676465 loss)\nI0818 00:56:22.967881 17350 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0818 00:57:10.234297 17350 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0818 00:57:37.085320 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81224\nI0818 00:57:37.085366 17350 solver.cpp:404]     Test net output #1: loss = 0.779464 (* 1 = 0.779464 loss)\nI0818 00:57:37.508200 17350 solver.cpp:228] Iteration 41000, loss = 0.0400805\nI0818 00:57:37.508256 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 00:57:37.508275 17350 solver.cpp:244]     Train net output #1: loss = 0.0400799 (* 1 = 0.0400799 loss)\nI0818 00:57:37.591603 17350 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0818 00:58:24.888306 17350 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0818 00:58:51.705644 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77748\nI0818 00:58:51.705691 17350 solver.cpp:404]     Test net output #1: loss = 0.902302 (* 1 = 0.902302 loss)\nI0818 00:58:52.128484 17350 solver.cpp:228] Iteration 41100, loss = 0.0927562\nI0818 00:58:52.128526 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 00:58:52.128543 17350 solver.cpp:244]     Train net output #1: loss = 0.0927556 (* 1 = 0.0927556 loss)\nI0818 00:58:52.212700 17350 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0818 00:59:39.503074 17350 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0818 01:00:06.328557 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76496\nI0818 01:00:06.328604 17350 solver.cpp:404]     Test net output #1: loss = 1.07686 (* 1 = 1.07686 loss)\nI0818 01:00:06.751502 17350 solver.cpp:228] Iteration 41200, loss = 0.0157203\nI0818 01:00:06.751545 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:00:06.751564 17350 solver.cpp:244]     Train net output #1: loss = 0.0157198 (* 1 = 0.0157198 loss)\nI0818 01:00:06.832474 17350 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0818 01:00:54.144059 17350 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0818 01:01:21.002292 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7718\nI0818 01:01:21.002341 17350 solver.cpp:404]     Test net output #1: loss = 1.07691 (* 1 = 1.07691 loss)\nI0818 01:01:21.425349 17350 solver.cpp:228] Iteration 41300, loss = 0.123759\nI0818 01:01:21.425390 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:01:21.425407 17350 solver.cpp:244]     Train net output #1: loss = 0.123758 (* 1 = 0.123758 loss)\nI0818 01:01:21.504380 17350 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0818 01:02:08.855420 17350 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0818 01:02:35.656363 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74332\nI0818 01:02:35.656410 17350 solver.cpp:404]     Test net output #1: loss = 1.28331 (* 1 = 1.28331 loss)\nI0818 01:02:36.079442 17350 solver.cpp:228] Iteration 41400, loss = 0.0642839\nI0818 01:02:36.079499 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:02:36.079515 17350 solver.cpp:244]     Train net output #1: loss = 0.0642834 (* 1 = 0.0642834 loss)\nI0818 01:02:36.157586 17350 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0818 01:03:23.515908 17350 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0818 01:03:50.352246 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75424\nI0818 01:03:50.352294 17350 solver.cpp:404]     Test net output #1: loss = 1.10609 (* 1 = 1.10609 loss)\nI0818 01:03:50.774951 17350 solver.cpp:228] Iteration 41500, loss = 0.104387\nI0818 01:03:50.775007 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:03:50.775030 17350 solver.cpp:244]     Train net output #1: loss = 0.104386 (* 1 = 0.104386 loss)\nI0818 01:03:50.856854 17350 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0818 01:04:38.170629 17350 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0818 01:05:04.998808 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80188\nI0818 01:05:04.998857 17350 solver.cpp:404]     Test net output #1: loss = 0.823479 (* 1 = 0.823479 loss)\nI0818 01:05:05.421957 17350 solver.cpp:228] Iteration 41600, loss = 0.0718063\nI0818 01:05:05.422019 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:05:05.422034 17350 solver.cpp:244]     Train net output #1: loss = 0.0718058 (* 1 = 0.0718058 loss)\nI0818 01:05:05.497789 17350 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0818 01:05:52.804441 17350 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0818 01:06:19.656613 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73772\nI0818 01:06:19.656659 17350 solver.cpp:404]     Test net output #1: loss = 1.37246 (* 1 = 1.37246 loss)\nI0818 01:06:20.079735 17350 solver.cpp:228] Iteration 41700, loss = 0.0373977\nI0818 01:06:20.079793 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:06:20.079813 17350 solver.cpp:244]     Train net output #1: loss = 0.0373971 (* 1 = 0.0373971 loss)\nI0818 01:06:20.158403 17350 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0818 01:07:07.477355 17350 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0818 01:07:34.321707 17350 solver.cpp:404]     Test net output #0: accuracy = 0.83692\nI0818 01:07:34.321754 17350 solver.cpp:404]     Test net output #1: loss = 0.685337 (* 1 = 0.685337 loss)\nI0818 01:07:34.744628 17350 solver.cpp:228] Iteration 41800, loss = 0.0578807\nI0818 01:07:34.744686 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:07:34.744704 17350 solver.cpp:244]     Train net output #1: loss = 0.0578802 (* 1 = 0.0578802 loss)\nI0818 01:07:34.826550 17350 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0818 01:08:22.132496 17350 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0818 01:08:48.890333 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79448\nI0818 01:08:48.890377 17350 solver.cpp:404]     Test net output #1: loss = 0.853477 (* 1 = 0.853477 loss)\nI0818 01:08:49.313295 17350 solver.cpp:228] Iteration 41900, loss = 0.053778\nI0818 01:08:49.313354 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:08:49.313372 17350 solver.cpp:244]     Train net output #1: loss = 0.0537774 (* 1 = 0.0537774 loss)\nI0818 01:08:49.395040 17350 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0818 01:09:36.713116 17350 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0818 01:10:03.430747 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75632\nI0818 01:10:03.430797 17350 solver.cpp:404]     Test net output #1: loss = 1.20677 (* 1 = 1.20677 loss)\nI0818 01:10:03.855015 17350 solver.cpp:228] Iteration 42000, loss = 0.0902796\nI0818 01:10:03.855077 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:10:03.855101 17350 solver.cpp:244]     Train net output #1: loss = 0.090279 (* 1 = 0.090279 loss)\nI0818 01:10:03.936385 17350 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0818 01:10:51.278558 17350 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0818 01:11:18.097133 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74836\nI0818 01:11:18.097180 17350 solver.cpp:404]     Test net output #1: loss = 1.15664 (* 1 = 1.15664 loss)\nI0818 01:11:18.520265 17350 solver.cpp:228] Iteration 42100, loss = 0.0962638\nI0818 01:11:18.520323 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:11:18.520340 17350 solver.cpp:244]     Train net output #1: loss = 0.0962632 (* 1 = 0.0962632 loss)\nI0818 01:11:18.598774 17350 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0818 01:12:05.935307 17350 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0818 01:12:32.695370 17350 solver.cpp:404]     Test net output #0: accuracy = 0.83412\nI0818 01:12:32.695420 17350 solver.cpp:404]     Test net output #1: loss = 0.689165 (* 1 = 0.689165 loss)\nI0818 01:12:33.118463 17350 solver.cpp:228] Iteration 42200, loss = 0.0768923\nI0818 01:12:33.118521 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:12:33.118540 17350 solver.cpp:244]     Train net output #1: loss = 0.0768917 (* 1 = 0.0768917 loss)\nI0818 01:12:33.193231 17350 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0818 01:13:20.508538 17350 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0818 01:13:47.347599 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74012\nI0818 01:13:47.347643 17350 solver.cpp:404]     Test net output #1: loss = 1.41668 (* 1 = 1.41668 loss)\nI0818 01:13:47.770561 17350 solver.cpp:228] Iteration 42300, loss = 0.0928999\nI0818 01:13:47.770617 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:13:47.770634 17350 solver.cpp:244]     Train net output #1: loss = 0.0928993 (* 1 = 0.0928993 loss)\nI0818 01:13:47.848439 17350 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0818 01:14:35.200073 17350 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0818 01:15:02.063237 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79956\nI0818 01:15:02.063284 17350 solver.cpp:404]     Test net output #1: loss = 0.993646 (* 1 = 0.993646 loss)\nI0818 01:15:02.486306 17350 solver.cpp:228] Iteration 42400, loss = 0.0551197\nI0818 01:15:02.486363 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:15:02.486382 17350 solver.cpp:244]     Train net output #1: loss = 0.0551191 (* 1 = 0.0551191 loss)\nI0818 01:15:02.567872 17350 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0818 01:15:49.889310 17350 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0818 01:16:16.607774 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77364\nI0818 01:16:16.607820 17350 solver.cpp:404]     Test net output #1: loss = 1.06716 (* 1 = 1.06716 loss)\nI0818 01:16:17.030513 17350 solver.cpp:228] Iteration 42500, loss = 0.0756915\nI0818 01:16:17.030571 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:16:17.030588 17350 solver.cpp:244]     Train net output #1: loss = 0.075691 (* 1 = 0.075691 loss)\nI0818 01:16:17.111660 17350 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0818 01:17:04.425727 17350 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0818 01:17:31.151229 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68512\nI0818 01:17:31.151273 17350 solver.cpp:404]     Test net output #1: loss = 1.81207 (* 1 = 1.81207 loss)\nI0818 01:17:31.574208 17350 solver.cpp:228] Iteration 42600, loss = 0.0598739\nI0818 01:17:31.574265 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:17:31.574283 17350 solver.cpp:244]     Train net output #1: loss = 0.0598734 (* 1 = 0.0598734 loss)\nI0818 01:17:31.655318 17350 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0818 01:18:18.957617 17350 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0818 01:18:45.699493 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72484\nI0818 01:18:45.699540 17350 solver.cpp:404]     Test net output #1: loss = 1.51544 (* 1 = 1.51544 loss)\nI0818 01:18:46.123581 17350 solver.cpp:228] Iteration 42700, loss = 0.0142636\nI0818 01:18:46.123639 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:18:46.123657 17350 solver.cpp:244]     Train net output #1: loss = 0.014263 (* 1 = 0.014263 loss)\nI0818 01:18:46.206800 17350 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0818 01:19:33.520825 17350 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0818 01:20:00.341251 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80904\nI0818 01:20:00.341297 17350 solver.cpp:404]     Test net output #1: loss = 0.791997 (* 1 = 0.791997 loss)\nI0818 01:20:00.764170 17350 solver.cpp:228] Iteration 42800, loss = 0.0308131\nI0818 01:20:00.764226 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:20:00.764245 17350 solver.cpp:244]     Train net output #1: loss = 0.0308126 (* 1 = 0.0308126 loss)\nI0818 01:20:00.839958 17350 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0818 01:20:48.159646 17350 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0818 01:21:14.971762 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7636\nI0818 01:21:14.971807 17350 solver.cpp:404]     Test net output #1: loss = 1.10283 (* 1 = 1.10283 loss)\nI0818 01:21:15.394588 17350 solver.cpp:228] Iteration 42900, loss = 0.0406375\nI0818 01:21:15.394644 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:21:15.394661 17350 solver.cpp:244]     Train net output #1: loss = 0.0406369 (* 1 = 0.0406369 loss)\nI0818 01:21:15.477843 17350 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0818 01:22:02.790520 17350 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0818 01:22:29.645980 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76808\nI0818 01:22:29.646034 17350 solver.cpp:404]     Test net output #1: loss = 1.11729 (* 1 = 1.11729 loss)\nI0818 01:22:30.069202 17350 solver.cpp:228] Iteration 43000, loss = 0.0480386\nI0818 01:22:30.069260 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:22:30.069278 17350 solver.cpp:244]     Train net output #1: loss = 0.0480381 (* 1 = 0.0480381 loss)\nI0818 01:22:30.147081 17350 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0818 01:23:17.452978 17350 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0818 01:23:44.299438 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80812\nI0818 01:23:44.299484 17350 solver.cpp:404]     Test net output #1: loss = 0.911296 (* 1 = 0.911296 loss)\nI0818 01:23:44.722134 17350 solver.cpp:228] Iteration 43100, loss = 0.0589383\nI0818 01:23:44.722192 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:23:44.722210 17350 solver.cpp:244]     Train net output #1: loss = 0.0589378 (* 1 = 0.0589378 loss)\nI0818 01:23:44.802179 17350 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0818 01:24:32.120247 17350 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0818 01:24:58.841006 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72796\nI0818 01:24:58.841055 17350 solver.cpp:404]     Test net output #1: loss = 1.3855 (* 1 = 1.3855 loss)\nI0818 01:24:59.263927 17350 solver.cpp:228] Iteration 43200, loss = 0.0397683\nI0818 01:24:59.263984 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:24:59.264001 17350 solver.cpp:244]     Train net output #1: loss = 0.0397678 (* 1 = 0.0397678 loss)\nI0818 01:24:59.344589 17350 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0818 01:25:46.640126 17350 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0818 01:26:13.359652 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74056\nI0818 01:26:13.359697 17350 solver.cpp:404]     Test net output #1: loss = 1.32856 (* 1 = 1.32856 loss)\nI0818 01:26:13.782686 17350 solver.cpp:228] Iteration 43300, loss = 0.0380746\nI0818 01:26:13.782744 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:26:13.782762 17350 solver.cpp:244]     Train net output #1: loss = 0.0380741 (* 1 = 0.0380741 loss)\nI0818 01:26:13.864434 17350 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0818 01:27:01.200513 17350 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0818 01:27:27.916697 17350 solver.cpp:404]     Test net output #0: accuracy = 0.82064\nI0818 01:27:27.916743 17350 solver.cpp:404]     Test net output #1: loss = 0.779031 (* 1 = 0.779031 loss)\nI0818 01:27:28.339809 17350 solver.cpp:228] Iteration 43400, loss = 0.0292412\nI0818 01:27:28.339864 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:27:28.339882 17350 solver.cpp:244]     Train net output #1: loss = 0.0292407 (* 1 = 0.0292407 loss)\nI0818 01:27:28.424593 17350 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0818 01:28:15.742904 17350 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0818 01:28:42.461335 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81504\nI0818 01:28:42.461380 17350 solver.cpp:404]     Test net output #1: loss = 0.767911 (* 1 = 0.767911 loss)\nI0818 01:28:42.884383 17350 solver.cpp:228] Iteration 43500, loss = 0.0555005\nI0818 01:28:42.884441 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:28:42.884459 17350 solver.cpp:244]     Train net output #1: loss = 0.0555001 (* 1 = 0.0555001 loss)\nI0818 01:28:42.972235 17350 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0818 01:29:30.250291 17350 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0818 01:29:56.967139 17350 solver.cpp:404]     Test net output #0: accuracy = 0.776\nI0818 01:29:56.967185 17350 solver.cpp:404]     Test net output #1: loss = 1.16798 (* 1 = 1.16798 loss)\nI0818 01:29:57.390019 17350 solver.cpp:228] Iteration 43600, loss = 0.0413402\nI0818 01:29:57.390061 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:29:57.390077 17350 solver.cpp:244]     Train net output #1: loss = 0.0413397 (* 1 = 0.0413397 loss)\nI0818 01:29:57.466236 17350 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0818 01:30:44.719483 17350 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0818 01:31:11.448915 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8028\nI0818 01:31:11.448963 17350 solver.cpp:404]     Test net output #1: loss = 0.94284 (* 1 = 0.94284 loss)\nI0818 01:31:11.871727 17350 solver.cpp:228] Iteration 43700, loss = 0.108302\nI0818 01:31:11.871764 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:31:11.871780 17350 solver.cpp:244]     Train net output #1: loss = 0.108301 (* 1 = 0.108301 loss)\nI0818 01:31:11.952426 17350 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0818 01:31:59.268858 17350 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0818 01:32:26.093369 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7812\nI0818 01:32:26.093430 17350 solver.cpp:404]     Test net output #1: loss = 1.1466 (* 1 = 1.1466 loss)\nI0818 01:32:26.516355 17350 solver.cpp:228] Iteration 43800, loss = 0.0413331\nI0818 01:32:26.516391 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:32:26.516407 17350 solver.cpp:244]     Train net output #1: loss = 0.0413326 (* 1 = 0.0413326 loss)\nI0818 01:32:26.594612 17350 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0818 01:33:13.884799 17350 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0818 01:33:40.719228 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80132\nI0818 01:33:40.719275 17350 solver.cpp:404]     Test net output #1: loss = 0.795239 (* 1 = 0.795239 loss)\nI0818 01:33:41.142328 17350 solver.cpp:228] Iteration 43900, loss = 0.0826784\nI0818 01:33:41.142366 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:33:41.142382 17350 solver.cpp:244]     Train net output #1: loss = 0.0826779 (* 1 = 0.0826779 loss)\nI0818 01:33:41.223527 17350 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0818 01:34:28.530097 17350 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0818 01:34:55.250461 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79072\nI0818 01:34:55.250507 17350 solver.cpp:404]     Test net output #1: loss = 0.98041 (* 1 = 0.98041 loss)\nI0818 01:34:55.673380 17350 solver.cpp:228] Iteration 44000, loss = 0.073465\nI0818 01:34:55.673424 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:34:55.673439 17350 solver.cpp:244]     Train net output #1: loss = 0.0734645 (* 1 = 0.0734645 loss)\nI0818 01:34:55.749222 17350 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0818 01:35:43.069867 17350 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0818 01:36:09.772035 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78864\nI0818 01:36:09.772083 17350 solver.cpp:404]     Test net output #1: loss = 1.12481 (* 1 = 1.12481 loss)\nI0818 01:36:10.195339 17350 solver.cpp:228] Iteration 44100, loss = 0.0652945\nI0818 01:36:10.195381 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:36:10.195399 17350 solver.cpp:244]     Train net output #1: loss = 0.065294 (* 1 = 0.065294 loss)\nI0818 01:36:10.270162 17350 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0818 01:36:57.518191 17350 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0818 01:37:24.352346 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79124\nI0818 01:37:24.352394 17350 solver.cpp:404]     Test net output #1: loss = 0.920934 (* 1 = 0.920934 loss)\nI0818 01:37:24.775405 17350 solver.cpp:228] Iteration 44200, loss = 0.0363449\nI0818 01:37:24.775450 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:37:24.775467 17350 solver.cpp:244]     Train net output #1: loss = 0.0363445 (* 1 = 0.0363445 loss)\nI0818 01:37:24.853693 17350 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0818 01:38:12.051081 17350 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0818 01:38:38.880439 17350 solver.cpp:404]     Test net output #0: accuracy = 0.73988\nI0818 01:38:38.880486 17350 solver.cpp:404]     Test net output #1: loss = 1.26588 (* 1 = 1.26588 loss)\nI0818 01:38:39.303521 17350 solver.cpp:228] Iteration 44300, loss = 0.0455146\nI0818 01:38:39.303580 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:38:39.303597 17350 solver.cpp:244]     Train net output #1: loss = 0.0455142 (* 1 = 0.0455142 loss)\nI0818 01:38:39.383492 17350 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0818 01:39:26.568372 17350 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0818 01:39:53.399852 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80272\nI0818 01:39:53.399895 17350 solver.cpp:404]     Test net output #1: loss = 0.97408 (* 1 = 0.97408 loss)\nI0818 01:39:53.822836 17350 solver.cpp:228] Iteration 44400, loss = 0.0203462\nI0818 01:39:53.822893 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 01:39:53.822911 17350 solver.cpp:244]     Train net output #1: loss = 0.0203457 (* 1 = 0.0203457 loss)\nI0818 01:39:53.903257 17350 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0818 01:40:41.099210 17350 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0818 01:41:07.951225 17350 solver.cpp:404]     Test net output #0: accuracy = 0.69492\nI0818 01:41:07.951272 17350 solver.cpp:404]     Test net output #1: loss = 1.84253 (* 1 = 1.84253 loss)\nI0818 01:41:08.375277 17350 solver.cpp:228] Iteration 44500, loss = 0.0778668\nI0818 01:41:08.375336 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:41:08.375355 17350 solver.cpp:244]     Train net output #1: loss = 0.0778663 (* 1 = 0.0778663 loss)\nI0818 01:41:08.457191 17350 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0818 01:41:55.647164 17350 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0818 01:42:22.452378 17350 solver.cpp:404]     Test net output #0: accuracy = 0.74464\nI0818 01:42:22.452432 17350 solver.cpp:404]     Test net output #1: loss = 1.36235 (* 1 = 1.36235 loss)\nI0818 01:42:22.876461 17350 solver.cpp:228] Iteration 44600, loss = 0.0462185\nI0818 01:42:22.876521 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:42:22.876540 17350 solver.cpp:244]     Train net output #1: loss = 0.0462181 (* 1 = 0.0462181 loss)\nI0818 01:42:22.952663 17350 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0818 01:43:10.138337 17350 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0818 01:43:36.937444 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75144\nI0818 01:43:36.937489 17350 solver.cpp:404]     Test net output #1: loss = 1.20494 (* 1 = 1.20494 loss)\nI0818 01:43:37.361650 17350 solver.cpp:228] Iteration 44700, loss = 0.0933205\nI0818 01:43:37.361711 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:43:37.361729 17350 solver.cpp:244]     Train net output #1: loss = 0.09332 (* 1 = 0.09332 loss)\nI0818 01:43:37.443012 17350 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0818 01:44:24.596412 17350 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0818 01:44:51.403779 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72088\nI0818 01:44:51.403825 17350 solver.cpp:404]     Test net output #1: loss = 1.51173 (* 1 = 1.51173 loss)\nI0818 01:44:51.827883 17350 solver.cpp:228] Iteration 44800, loss = 0.0874356\nI0818 01:44:51.827939 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:44:51.827958 17350 solver.cpp:244]     Train net output #1: loss = 0.0874351 (* 1 = 0.0874351 loss)\nI0818 01:44:51.904752 17350 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0818 01:45:39.074901 17350 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0818 01:46:05.817205 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76344\nI0818 01:46:05.817252 17350 solver.cpp:404]     Test net output #1: loss = 1.35005 (* 1 = 1.35005 loss)\nI0818 01:46:06.241891 17350 solver.cpp:228] Iteration 44900, loss = 0.045073\nI0818 01:46:06.241948 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:46:06.241966 17350 solver.cpp:244]     Train net output #1: loss = 0.0450725 (* 1 = 0.0450725 loss)\nI0818 01:46:06.318475 17350 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0818 01:46:53.466184 17350 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0818 01:47:20.270306 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75504\nI0818 01:47:20.270354 17350 solver.cpp:404]     Test net output #1: loss = 1.23218 (* 1 = 1.23218 loss)\nI0818 01:47:20.694722 17350 solver.cpp:228] Iteration 45000, loss = 0.0762596\nI0818 01:47:20.694779 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:47:20.694797 17350 solver.cpp:244]     Train net output #1: loss = 0.0762591 (* 1 = 0.0762591 loss)\nI0818 01:47:20.774556 17350 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0818 01:48:07.931409 17350 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0818 01:48:34.779955 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81664\nI0818 01:48:34.780006 17350 solver.cpp:404]     Test net output #1: loss = 0.859358 (* 1 = 0.859358 loss)\nI0818 01:48:35.204316 17350 solver.cpp:228] Iteration 45100, loss = 0.0621379\nI0818 01:48:35.204375 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:48:35.204392 17350 solver.cpp:244]     Train net output #1: loss = 0.0621374 (* 1 = 0.0621374 loss)\nI0818 01:48:35.278045 17350 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0818 01:49:22.448451 17350 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0818 01:49:49.146275 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78612\nI0818 01:49:49.146322 17350 solver.cpp:404]     Test net output #1: loss = 0.96164 (* 1 = 0.96164 loss)\nI0818 01:49:49.570443 17350 solver.cpp:228] Iteration 45200, loss = 0.0930195\nI0818 01:49:49.570502 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:49:49.570521 17350 solver.cpp:244]     Train net output #1: loss = 0.093019 (* 1 = 0.093019 loss)\nI0818 01:49:49.647851 17350 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0818 01:50:36.827047 17350 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0818 01:51:03.574321 17350 solver.cpp:404]     Test net output #0: accuracy = 0.71664\nI0818 01:51:03.574365 17350 solver.cpp:404]     Test net output #1: loss = 1.62233 (* 1 = 1.62233 loss)\nI0818 01:51:03.998328 17350 solver.cpp:228] Iteration 45300, loss = 0.0521959\nI0818 01:51:03.998387 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:51:03.998404 17350 solver.cpp:244]     Train net output #1: loss = 0.0521955 (* 1 = 0.0521955 loss)\nI0818 01:51:04.075978 17350 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0818 01:51:51.274170 17350 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0818 01:52:18.102754 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77444\nI0818 01:52:18.102813 17350 solver.cpp:404]     Test net output #1: loss = 1.01218 (* 1 = 1.01218 loss)\nI0818 01:52:18.525801 17350 solver.cpp:228] Iteration 45400, loss = 0.0389059\nI0818 01:52:18.525854 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:52:18.525871 17350 solver.cpp:244]     Train net output #1: loss = 0.0389055 (* 1 = 0.0389055 loss)\nI0818 01:52:18.606752 17350 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0818 01:53:05.740665 17350 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0818 01:53:32.635391 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75908\nI0818 01:53:32.635438 17350 solver.cpp:404]     Test net output #1: loss = 1.17607 (* 1 = 1.17607 loss)\nI0818 01:53:33.059278 17350 solver.cpp:228] Iteration 45500, loss = 0.0261226\nI0818 01:53:33.059332 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 01:53:33.059350 17350 solver.cpp:244]     Train net output #1: loss = 0.0261221 (* 1 = 0.0261221 loss)\nI0818 01:53:33.139572 17350 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0818 01:54:20.262959 17350 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0818 01:54:47.145082 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75784\nI0818 01:54:47.145131 17350 solver.cpp:404]     Test net output #1: loss = 1.09659 (* 1 = 1.09659 loss)\nI0818 01:54:47.569191 17350 solver.cpp:228] Iteration 45600, loss = 0.0515673\nI0818 01:54:47.569245 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:54:47.569262 17350 solver.cpp:244]     Train net output #1: loss = 0.0515668 (* 1 = 0.0515668 loss)\nI0818 01:54:47.651114 17350 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0818 01:55:34.833552 17350 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0818 01:56:01.713907 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7128\nI0818 01:56:01.713963 17350 solver.cpp:404]     Test net output #1: loss = 1.50531 (* 1 = 1.50531 loss)\nI0818 01:56:02.137930 17350 solver.cpp:228] Iteration 45700, loss = 0.100531\nI0818 01:56:02.137984 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 01:56:02.138001 17350 solver.cpp:244]     Train net output #1: loss = 0.100531 (* 1 = 0.100531 loss)\nI0818 01:56:02.221752 17350 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0818 01:56:49.376837 17350 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0818 01:57:16.276458 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79804\nI0818 01:57:16.276509 17350 solver.cpp:404]     Test net output #1: loss = 0.937177 (* 1 = 0.937177 loss)\nI0818 01:57:16.700392 17350 solver.cpp:228] Iteration 45800, loss = 0.0717993\nI0818 01:57:16.700444 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 01:57:16.700461 17350 solver.cpp:244]     Train net output #1: loss = 0.0717988 (* 1 = 0.0717988 loss)\nI0818 01:57:16.778154 17350 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0818 01:58:03.954871 17350 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0818 01:58:30.847297 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79948\nI0818 01:58:30.847347 17350 solver.cpp:404]     Test net output #1: loss = 0.971997 (* 1 = 0.971997 loss)\nI0818 01:58:31.270303 17350 solver.cpp:228] Iteration 45900, loss = 0.0936244\nI0818 01:58:31.270355 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 01:58:31.270373 17350 solver.cpp:244]     Train net output #1: loss = 0.0936239 (* 1 = 0.0936239 loss)\nI0818 01:58:31.353128 17350 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0818 01:59:18.529563 17350 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0818 01:59:45.297379 17350 solver.cpp:404]     Test net output #0: accuracy = 0.72024\nI0818 01:59:45.297430 17350 solver.cpp:404]     Test net output #1: loss = 1.46248 (* 1 = 1.46248 loss)\nI0818 01:59:45.720207 17350 solver.cpp:228] Iteration 46000, loss = 0.0678891\nI0818 01:59:45.720262 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 01:59:45.720280 17350 solver.cpp:244]     Train net output #1: loss = 0.0678887 (* 1 = 0.0678887 loss)\nI0818 01:59:45.800670 17350 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0818 02:00:32.938313 17350 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0818 02:00:59.839838 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77504\nI0818 02:00:59.839887 17350 solver.cpp:404]     Test net output #1: loss = 1.15308 (* 1 = 1.15308 loss)\nI0818 02:01:00.264147 17350 solver.cpp:228] Iteration 46100, loss = 0.0489704\nI0818 02:01:00.264201 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:01:00.264219 17350 solver.cpp:244]     Train net output #1: loss = 0.04897 (* 1 = 0.04897 loss)\nI0818 02:01:00.344583 17350 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0818 02:01:47.526159 17350 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0818 02:02:14.332100 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80408\nI0818 02:02:14.332151 17350 solver.cpp:404]     Test net output #1: loss = 0.944678 (* 1 = 0.944678 loss)\nI0818 02:02:14.756273 17350 solver.cpp:228] Iteration 46200, loss = 0.0685282\nI0818 02:02:14.756328 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:02:14.756345 17350 solver.cpp:244]     Train net output #1: loss = 0.0685278 (* 1 = 0.0685278 loss)\nI0818 02:02:14.831532 17350 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0818 02:03:02.007644 17350 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0818 02:03:28.725530 17350 solver.cpp:404]     Test net output #0: accuracy = 0.70964\nI0818 02:03:28.725579 17350 solver.cpp:404]     Test net output #1: loss = 1.61021 (* 1 = 1.61021 loss)\nI0818 02:03:29.149574 17350 solver.cpp:228] Iteration 46300, loss = 0.0735842\nI0818 02:03:29.149628 17350 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 02:03:29.149646 17350 solver.cpp:244]     Train net output #1: loss = 0.0735837 (* 1 = 0.0735837 loss)\nI0818 02:03:29.223654 17350 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0818 02:04:16.337846 17350 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0818 02:04:43.066349 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8068\nI0818 02:04:43.066397 17350 solver.cpp:404]     Test net output #1: loss = 0.887752 (* 1 = 0.887752 loss)\nI0818 02:04:43.490272 17350 solver.cpp:228] Iteration 46400, loss = 0.0980607\nI0818 02:04:43.490320 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:04:43.490337 17350 solver.cpp:244]     Train net output #1: loss = 0.0980602 (* 1 = 0.0980602 loss)\nI0818 02:04:43.568634 17350 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0818 02:05:30.712637 17350 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0818 02:05:57.431332 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7576\nI0818 02:05:57.431380 17350 solver.cpp:404]     Test net output #1: loss = 1.22414 (* 1 = 1.22414 loss)\nI0818 02:05:57.853860 17350 solver.cpp:228] Iteration 46500, loss = 0.0470636\nI0818 02:05:57.853911 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:05:57.853932 17350 solver.cpp:244]     Train net output #1: loss = 0.0470632 (* 1 = 0.0470632 loss)\nI0818 02:05:57.934497 17350 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0818 02:06:45.077253 17350 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0818 02:07:11.788208 17350 solver.cpp:404]     Test net output #0: accuracy = 0.794\nI0818 02:07:11.788257 17350 solver.cpp:404]     Test net output #1: loss = 0.919962 (* 1 = 0.919962 loss)\nI0818 02:07:12.211230 17350 solver.cpp:228] Iteration 46600, loss = 0.0310089\nI0818 02:07:12.211280 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:07:12.211297 17350 solver.cpp:244]     Train net output #1: loss = 0.0310085 (* 1 = 0.0310085 loss)\nI0818 02:07:12.293608 17350 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0818 02:07:59.457001 17350 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0818 02:08:26.256624 17350 solver.cpp:404]     Test net output #0: accuracy = 0.82276\nI0818 02:08:26.256675 17350 solver.cpp:404]     Test net output #1: loss = 0.871092 (* 1 = 0.871092 loss)\nI0818 02:08:26.680788 17350 solver.cpp:228] Iteration 46700, loss = 0.0702376\nI0818 02:08:26.680837 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:08:26.680855 17350 solver.cpp:244]     Train net output #1: loss = 0.0702371 (* 1 = 0.0702371 loss)\nI0818 02:08:26.757443 17350 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0818 02:09:13.960207 17350 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0818 02:09:40.787348 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7596\nI0818 02:09:40.787396 17350 solver.cpp:404]     Test net output #1: loss = 1.22283 (* 1 = 1.22283 loss)\nI0818 02:09:41.210342 17350 solver.cpp:228] Iteration 46800, loss = 0.0562265\nI0818 02:09:41.210392 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:09:41.210408 17350 solver.cpp:244]     Train net output #1: loss = 0.056226 (* 1 = 0.056226 loss)\nI0818 02:09:41.290539 17350 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0818 02:10:28.500313 17350 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0818 02:10:55.330775 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8136\nI0818 02:10:55.330824 17350 solver.cpp:404]     Test net output #1: loss = 0.695302 (* 1 = 0.695302 loss)\nI0818 02:10:55.753873 17350 solver.cpp:228] Iteration 46900, loss = 0.0866624\nI0818 02:10:55.753924 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:10:55.753947 17350 solver.cpp:244]     Train net output #1: loss = 0.0866619 (* 1 = 0.0866619 loss)\nI0818 02:10:55.834847 17350 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0818 02:11:43.039357 17350 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0818 02:12:09.857076 17350 solver.cpp:404]     Test net output #0: accuracy = 0.79908\nI0818 02:12:09.857125 17350 solver.cpp:404]     Test net output #1: loss = 0.793272 (* 1 = 0.793272 loss)\nI0818 02:12:10.279799 17350 solver.cpp:228] Iteration 47000, loss = 0.0938675\nI0818 02:12:10.279852 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:12:10.279870 17350 solver.cpp:244]     Train net output #1: loss = 0.0938669 (* 1 = 0.0938669 loss)\nI0818 02:12:10.362898 17350 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0818 02:12:57.602782 17350 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0818 02:13:24.454614 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77028\nI0818 02:13:24.454665 17350 solver.cpp:404]     Test net output #1: loss = 1.16982 (* 1 = 1.16982 loss)\nI0818 02:13:24.878988 17350 solver.cpp:228] Iteration 47100, loss = 0.0557666\nI0818 02:13:24.879043 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:13:24.879068 17350 solver.cpp:244]     Train net output #1: loss = 0.0557661 (* 1 = 0.0557661 loss)\nI0818 02:13:24.958441 17350 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0818 02:14:12.156303 17350 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0818 02:14:39.025250 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81596\nI0818 02:14:39.025298 17350 solver.cpp:404]     Test net output #1: loss = 0.808994 (* 1 = 0.808994 loss)\nI0818 02:14:39.449494 17350 solver.cpp:228] Iteration 47200, loss = 0.0806697\nI0818 02:14:39.449544 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:14:39.449561 17350 solver.cpp:244]     Train net output #1: loss = 0.0806692 (* 1 = 0.0806692 loss)\nI0818 02:14:39.526444 17350 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0818 02:15:26.743376 17350 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0818 02:15:53.596215 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78552\nI0818 02:15:53.596264 17350 solver.cpp:404]     Test net output #1: loss = 1.0213 (* 1 = 1.0213 loss)\nI0818 02:15:54.020670 17350 solver.cpp:228] Iteration 47300, loss = 0.042889\nI0818 02:15:54.020717 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:15:54.020735 17350 solver.cpp:244]     Train net output #1: loss = 0.0428885 (* 1 = 0.0428885 loss)\nI0818 02:15:54.096640 17350 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0818 02:16:41.259771 17350 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0818 02:17:07.999172 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7456\nI0818 02:17:07.999218 17350 solver.cpp:404]     Test net output #1: loss = 1.29053 (* 1 = 1.29053 loss)\nI0818 02:17:08.423385 17350 solver.cpp:228] Iteration 47400, loss = 0.0531421\nI0818 02:17:08.423436 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:17:08.423454 17350 solver.cpp:244]     Train net output #1: loss = 0.0531416 (* 1 = 0.0531416 loss)\nI0818 02:17:08.500000 17350 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0818 02:17:55.658794 17350 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0818 02:18:22.534281 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76604\nI0818 02:18:22.534330 17350 solver.cpp:404]     Test net output #1: loss = 1.15458 (* 1 = 1.15458 loss)\nI0818 02:18:22.958362 17350 solver.cpp:228] Iteration 47500, loss = 0.0389886\nI0818 02:18:22.958416 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:18:22.958433 17350 solver.cpp:244]     Train net output #1: loss = 0.0389881 (* 1 = 0.0389881 loss)\nI0818 02:18:23.036058 17350 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0818 02:19:10.213919 17350 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0818 02:19:37.075350 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75836\nI0818 02:19:37.075400 17350 solver.cpp:404]     Test net output #1: loss = 1.18967 (* 1 = 1.18967 loss)\nI0818 02:19:37.499707 17350 solver.cpp:228] Iteration 47600, loss = 0.0960902\nI0818 02:19:37.499752 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:19:37.499768 17350 solver.cpp:244]     Train net output #1: loss = 0.0960897 (* 1 = 0.0960897 loss)\nI0818 02:19:37.580649 17350 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0818 02:20:24.750905 17350 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0818 02:20:51.635274 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78224\nI0818 02:20:51.635324 17350 solver.cpp:404]     Test net output #1: loss = 1.00022 (* 1 = 1.00022 loss)\nI0818 02:20:52.059576 17350 solver.cpp:228] Iteration 47700, loss = 0.0823971\nI0818 02:20:52.059619 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:20:52.059636 17350 solver.cpp:244]     Train net output #1: loss = 0.0823965 (* 1 = 0.0823965 loss)\nI0818 02:20:52.139052 17350 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0818 02:21:39.303292 17350 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0818 02:22:06.172355 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7966\nI0818 02:22:06.172402 17350 solver.cpp:404]     Test net output #1: loss = 0.873843 (* 1 = 0.873843 loss)\nI0818 02:22:06.595510 17350 solver.cpp:228] Iteration 47800, loss = 0.0692125\nI0818 02:22:06.595563 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:22:06.595588 17350 solver.cpp:244]     Train net output #1: loss = 0.069212 (* 1 = 0.069212 loss)\nI0818 02:22:06.678751 17350 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0818 02:22:53.831578 17350 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0818 02:23:20.720710 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77764\nI0818 02:23:20.720759 17350 solver.cpp:404]     Test net output #1: loss = 0.964968 (* 1 = 0.964968 loss)\nI0818 02:23:21.143556 17350 solver.cpp:228] Iteration 47900, loss = 0.086941\nI0818 02:23:21.143602 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:23:21.143618 17350 solver.cpp:244]     Train net output #1: loss = 0.0869404 (* 1 = 0.0869404 loss)\nI0818 02:23:21.221962 17350 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0818 02:24:08.401070 17350 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0818 02:24:35.255996 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80248\nI0818 02:24:35.256047 17350 solver.cpp:404]     Test net output #1: loss = 0.911095 (* 1 = 0.911095 loss)\nI0818 02:24:35.679029 17350 solver.cpp:228] Iteration 48000, loss = 0.0516436\nI0818 02:24:35.679075 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:24:35.679092 17350 solver.cpp:244]     Train net output #1: loss = 0.0516431 (* 1 = 0.0516431 loss)\nI0818 02:24:35.754496 17350 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0818 02:25:22.920058 17350 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0818 02:25:49.739264 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7802\nI0818 02:25:49.739315 17350 solver.cpp:404]     Test net output #1: loss = 1.13517 (* 1 = 1.13517 loss)\nI0818 02:25:50.162274 17350 solver.cpp:228] Iteration 48100, loss = 0.0801031\nI0818 02:25:50.162319 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:25:50.162336 17350 solver.cpp:244]     Train net output #1: loss = 0.0801025 (* 1 = 0.0801025 loss)\nI0818 02:25:50.244261 17350 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0818 02:26:37.406314 17350 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0818 02:27:04.290751 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7904\nI0818 02:27:04.290801 17350 solver.cpp:404]     Test net output #1: loss = 1.01836 (* 1 = 1.01836 loss)\nI0818 02:27:04.715167 17350 solver.cpp:228] Iteration 48200, loss = 0.0487763\nI0818 02:27:04.715211 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:27:04.715227 17350 solver.cpp:244]     Train net output #1: loss = 0.0487757 (* 1 = 0.0487757 loss)\nI0818 02:27:04.796289 17350 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0818 02:27:51.939282 17350 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0818 02:28:18.749960 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81284\nI0818 02:28:18.750005 17350 solver.cpp:404]     Test net output #1: loss = 0.723906 (* 1 = 0.723906 loss)\nI0818 02:28:19.173998 17350 solver.cpp:228] Iteration 48300, loss = 0.0828909\nI0818 02:28:19.174046 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:28:19.174063 17350 solver.cpp:244]     Train net output #1: loss = 0.0828904 (* 1 = 0.0828904 loss)\nI0818 02:28:19.251147 17350 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0818 02:29:06.403980 17350 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0818 02:29:33.272323 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77508\nI0818 02:29:33.272373 17350 solver.cpp:404]     Test net output #1: loss = 1.04397 (* 1 = 1.04397 loss)\nI0818 02:29:33.695281 17350 solver.cpp:228] Iteration 48400, loss = 0.0453599\nI0818 02:29:33.695318 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:29:33.695334 17350 solver.cpp:244]     Train net output #1: loss = 0.0453594 (* 1 = 0.0453594 loss)\nI0818 02:29:33.774508 17350 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0818 02:30:20.913743 17350 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0818 02:30:47.617852 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76452\nI0818 02:30:47.617898 17350 solver.cpp:404]     Test net output #1: loss = 1.05363 (* 1 = 1.05363 loss)\nI0818 02:30:48.040674 17350 solver.cpp:228] Iteration 48500, loss = 0.0589429\nI0818 02:30:48.040712 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:30:48.040729 17350 solver.cpp:244]     Train net output #1: loss = 0.0589423 (* 1 = 0.0589423 loss)\nI0818 02:30:48.123008 17350 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0818 02:31:35.270298 17350 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0818 02:32:02.005810 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81668\nI0818 02:32:02.005856 17350 solver.cpp:404]     Test net output #1: loss = 0.749582 (* 1 = 0.749582 loss)\nI0818 02:32:02.428891 17350 solver.cpp:228] Iteration 48600, loss = 0.0788192\nI0818 02:32:02.428937 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 02:32:02.428954 17350 solver.cpp:244]     Train net output #1: loss = 0.0788186 (* 1 = 0.0788186 loss)\nI0818 02:32:02.510481 17350 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0818 02:32:49.655287 17350 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0818 02:33:16.541918 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77444\nI0818 02:33:16.541972 17350 solver.cpp:404]     Test net output #1: loss = 1.11078 (* 1 = 1.11078 loss)\nI0818 02:33:16.964536 17350 solver.cpp:228] Iteration 48700, loss = 0.0388413\nI0818 02:33:16.964576 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:33:16.964591 17350 solver.cpp:244]     Train net output #1: loss = 0.0388408 (* 1 = 0.0388408 loss)\nI0818 02:33:17.048458 17350 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0818 02:34:04.184237 17350 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0818 02:34:31.027845 17350 solver.cpp:404]     Test net output #0: accuracy = 0.75572\nI0818 02:34:31.027894 17350 solver.cpp:404]     Test net output #1: loss = 1.20219 (* 1 = 1.20219 loss)\nI0818 02:34:31.451016 17350 solver.cpp:228] Iteration 48800, loss = 0.0497916\nI0818 02:34:31.451057 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:34:31.451073 17350 solver.cpp:244]     Train net output #1: loss = 0.0497911 (* 1 = 0.0497911 loss)\nI0818 02:34:31.530992 17350 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0818 02:35:18.667069 17350 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0818 02:35:45.530046 17350 solver.cpp:404]     Test net output #0: accuracy = 0.81276\nI0818 02:35:45.530097 17350 solver.cpp:404]     Test net output #1: loss = 0.814272 (* 1 = 0.814272 loss)\nI0818 02:35:45.952992 17350 solver.cpp:228] Iteration 48900, loss = 0.050714\nI0818 02:35:45.953032 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:35:45.953048 17350 solver.cpp:244]     Train net output #1: loss = 0.0507135 (* 1 = 0.0507135 loss)\nI0818 02:35:46.034348 17350 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0818 02:36:33.140678 17350 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0818 02:36:59.876035 17350 solver.cpp:404]     Test net output #0: accuracy = 0.77932\nI0818 02:36:59.876080 17350 solver.cpp:404]     Test net output #1: loss = 1.10768 (* 1 = 1.10768 loss)\nI0818 02:37:00.299044 17350 solver.cpp:228] Iteration 49000, loss = 0.0819516\nI0818 02:37:00.299082 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:37:00.299098 17350 solver.cpp:244]     Train net output #1: loss = 0.0819511 (* 1 = 0.0819511 loss)\nI0818 02:37:00.378115 17350 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0818 02:37:47.549443 17350 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0818 02:38:14.354434 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80984\nI0818 02:38:14.354485 17350 solver.cpp:404]     Test net output #1: loss = 0.902255 (* 1 = 0.902255 loss)\nI0818 02:38:14.777412 17350 solver.cpp:228] Iteration 49100, loss = 0.0254609\nI0818 02:38:14.777451 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:38:14.777468 17350 solver.cpp:244]     Train net output #1: loss = 0.0254603 (* 1 = 0.0254603 loss)\nI0818 02:38:14.855998 17350 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0818 02:39:02.023331 17350 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0818 02:39:28.884167 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7896\nI0818 02:39:28.884217 17350 solver.cpp:404]     Test net output #1: loss = 1.06428 (* 1 = 1.06428 loss)\nI0818 02:39:29.307204 17350 solver.cpp:228] Iteration 49200, loss = 0.0238851\nI0818 02:39:29.307245 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:39:29.307260 17350 solver.cpp:244]     Train net output #1: loss = 0.0238846 (* 1 = 0.0238846 loss)\nI0818 02:39:29.390147 17350 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0818 02:40:16.542582 17350 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0818 02:40:43.425060 17350 solver.cpp:404]     Test net output #0: accuracy = 0.80972\nI0818 02:40:43.425110 17350 solver.cpp:404]     Test net output #1: loss = 0.836296 (* 1 = 0.836296 loss)\nI0818 02:40:43.847864 17350 solver.cpp:228] Iteration 49300, loss = 0.0583159\nI0818 02:40:43.847903 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:40:43.847919 17350 solver.cpp:244]     Train net output #1: loss = 0.0583154 (* 1 = 0.0583154 loss)\nI0818 02:40:43.930634 17350 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0818 02:41:31.067688 17350 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0818 02:41:57.948267 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8148\nI0818 02:41:57.948318 17350 solver.cpp:404]     Test net output #1: loss = 0.840374 (* 1 = 0.840374 loss)\nI0818 02:41:58.371304 17350 solver.cpp:228] Iteration 49400, loss = 0.0731978\nI0818 02:41:58.371342 17350 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 02:41:58.371358 17350 solver.cpp:244]     Train net output #1: loss = 0.0731972 (* 1 = 0.0731972 loss)\nI0818 02:41:58.455373 17350 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0818 02:42:45.604189 17350 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0818 02:43:12.498997 17350 solver.cpp:404]     Test net output #0: accuracy = 0.62152\nI0818 02:43:12.499047 17350 solver.cpp:404]     Test net output #1: loss = 2.76618 (* 1 = 2.76618 loss)\nI0818 02:43:12.922072 17350 solver.cpp:228] Iteration 49500, loss = 0.106188\nI0818 02:43:12.922111 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 02:43:12.922127 17350 solver.cpp:244]     Train net output #1: loss = 0.106187 (* 1 = 0.106187 loss)\nI0818 02:43:13.006263 17350 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0818 02:44:00.151731 17350 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0818 02:44:27.020462 17350 solver.cpp:404]     Test net output #0: accuracy = 0.68252\nI0818 02:44:27.020514 17350 solver.cpp:404]     Test net output #1: loss = 1.5733 (* 1 = 1.5733 loss)\nI0818 02:44:27.443434 17350 solver.cpp:228] Iteration 49600, loss = 0.0902388\nI0818 02:44:27.443464 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:44:27.443478 17350 solver.cpp:244]     Train net output #1: loss = 0.0902382 (* 1 = 0.0902382 loss)\nI0818 02:44:27.520742 17350 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0818 02:45:14.655656 17350 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0818 02:45:41.529695 17350 solver.cpp:404]     Test net output #0: accuracy = 0.7028\nI0818 02:45:41.529745 17350 solver.cpp:404]     Test net output #1: loss = 1.43706 (* 1 = 1.43706 loss)\nI0818 02:45:41.952805 17350 solver.cpp:228] Iteration 49700, loss = 0.106906\nI0818 02:45:41.952838 17350 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 02:45:41.952854 17350 solver.cpp:244]     Train net output #1: loss = 0.106905 (* 1 = 0.106905 loss)\nI0818 02:45:42.033958 17350 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0818 02:46:29.189385 17350 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0818 02:46:56.068081 17350 solver.cpp:404]     Test net output #0: accuracy = 0.76572\nI0818 02:46:56.068131 17350 solver.cpp:404]     Test net output #1: loss = 1.11681 (* 1 = 1.11681 loss)\nI0818 02:46:56.490980 17350 solver.cpp:228] Iteration 49800, loss = 0.0815392\nI0818 02:46:56.491020 17350 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 02:46:56.491036 17350 solver.cpp:244]     Train net output #1: loss = 0.0815387 (* 1 = 0.0815387 loss)\nI0818 02:46:56.572399 17350 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0818 02:47:43.800539 17350 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0818 02:48:10.681147 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78748\nI0818 02:48:10.681196 17350 solver.cpp:404]     Test net output #1: loss = 1.00348 (* 1 = 1.00348 loss)\nI0818 02:48:11.105154 17350 solver.cpp:228] Iteration 49900, loss = 0.0360005\nI0818 02:48:11.105199 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:48:11.105216 17350 solver.cpp:244]     Train net output #1: loss = 0.0359999 (* 1 = 0.0359999 loss)\nI0818 02:48:11.183610 17350 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0818 02:48:58.372428 17350 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0818 02:49:25.236570 17350 solver.cpp:404]     Test net output #0: accuracy = 0.78084\nI0818 02:49:25.236619 17350 solver.cpp:404]     Test net output #1: loss = 1.11415 (* 1 = 1.11415 loss)\nI0818 02:49:25.659405 17350 solver.cpp:228] Iteration 50000, loss = 0.0732314\nI0818 02:49:25.659451 17350 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 02:49:25.659467 17350 solver.cpp:244]     Train net output #1: loss = 0.0732308 (* 1 = 0.0732308 loss)\nI0818 02:49:25.737560 17350 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0818 02:49:25.737581 17350 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0818 02:50:12.953366 17350 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0818 02:50:39.831758 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85952\nI0818 02:50:39.831807 17350 solver.cpp:404]     Test net output #1: loss = 0.579945 (* 1 = 0.579945 loss)\nI0818 02:50:40.255856 17350 solver.cpp:228] Iteration 50100, loss = 0.0138086\nI0818 02:50:40.255899 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:50:40.255916 17350 solver.cpp:244]     Train net output #1: loss = 0.013808 (* 1 = 0.013808 loss)\nI0818 02:50:40.329337 17350 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0818 02:51:27.531816 17350 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0818 02:51:54.403628 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86676\nI0818 02:51:54.403677 17350 solver.cpp:404]     Test net output #1: loss = 0.526439 (* 1 = 0.526439 loss)\nI0818 02:51:54.827790 17350 solver.cpp:228] Iteration 50200, loss = 0.0193049\nI0818 02:51:54.827834 17350 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 02:51:54.827852 17350 solver.cpp:244]     Train net output #1: loss = 0.0193044 (* 1 = 0.0193044 loss)\nI0818 02:51:54.902428 17350 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0818 02:52:42.038077 17350 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0818 02:53:08.744778 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87292\nI0818 02:53:08.744824 17350 solver.cpp:404]     Test net output #1: loss = 0.487702 (* 1 = 0.487702 loss)\nI0818 02:53:09.167667 17350 solver.cpp:228] Iteration 50300, loss = 0.00350805\nI0818 02:53:09.167716 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:53:09.167732 17350 solver.cpp:244]     Train net output #1: loss = 0.00350749 (* 1 = 0.00350749 loss)\nI0818 02:53:09.250890 17350 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0818 02:53:56.385180 17350 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0818 02:54:23.113838 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8738\nI0818 02:54:23.113884 17350 solver.cpp:404]     Test net output #1: loss = 0.47189 (* 1 = 0.47189 loss)\nI0818 02:54:23.536881 17350 solver.cpp:228] Iteration 50400, loss = 0.00418062\nI0818 02:54:23.536932 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:54:23.536949 17350 solver.cpp:244]     Train net output #1: loss = 0.00418006 (* 1 = 0.00418006 loss)\nI0818 02:54:23.616931 17350 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0818 02:55:10.736379 17350 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0818 02:55:37.451004 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87428\nI0818 02:55:37.451048 17350 solver.cpp:404]     Test net output #1: loss = 0.467785 (* 1 = 0.467785 loss)\nI0818 02:55:37.875210 17350 solver.cpp:228] Iteration 50500, loss = 0.00319239\nI0818 02:55:37.875257 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:55:37.875273 17350 solver.cpp:244]     Train net output #1: loss = 0.00319182 (* 1 = 0.00319182 loss)\nI0818 02:55:37.958667 17350 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0818 02:56:25.142396 17350 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0818 02:56:51.858194 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87396\nI0818 02:56:51.858243 17350 solver.cpp:404]     Test net output #1: loss = 0.467968 (* 1 = 0.467968 loss)\nI0818 02:56:52.287420 17350 solver.cpp:228] Iteration 50600, loss = 0.00412373\nI0818 02:56:52.287462 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:56:52.287492 17350 solver.cpp:244]     Train net output #1: loss = 0.00412316 (* 1 = 0.00412316 loss)\nI0818 02:56:52.363111 17350 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0818 02:57:39.661350 17350 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0818 02:58:06.371929 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87332\nI0818 02:58:06.371975 17350 solver.cpp:404]     Test net output #1: loss = 0.47105 (* 1 = 0.47105 loss)\nI0818 02:58:06.794668 17350 solver.cpp:228] Iteration 50700, loss = 0.00157831\nI0818 02:58:06.794714 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:58:06.794731 17350 solver.cpp:244]     Train net output #1: loss = 0.00157775 (* 1 = 0.00157775 loss)\nI0818 02:58:06.873433 17350 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0818 02:58:54.098932 17350 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0818 02:59:20.854171 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87188\nI0818 02:59:20.854215 17350 solver.cpp:404]     Test net output #1: loss = 0.472141 (* 1 = 0.472141 loss)\nI0818 02:59:21.278374 17350 solver.cpp:228] Iteration 50800, loss = 0.00377509\nI0818 02:59:21.278434 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 02:59:21.278452 17350 solver.cpp:244]     Train net output #1: loss = 0.00377452 (* 1 = 0.00377452 loss)\nI0818 02:59:21.360980 17350 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0818 03:00:08.486250 17350 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0818 03:00:35.112438 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87196\nI0818 03:00:35.112490 17350 solver.cpp:404]     Test net output #1: loss = 0.472136 (* 1 = 0.472136 loss)\nI0818 03:00:35.534623 17350 solver.cpp:228] Iteration 50900, loss = 0.00355542\nI0818 03:00:35.534660 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:00:35.534677 17350 solver.cpp:244]     Train net output #1: loss = 0.00355486 (* 1 = 0.00355486 loss)\nI0818 03:00:35.615737 17350 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0818 03:01:22.709585 17350 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0818 03:01:49.333789 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87084\nI0818 03:01:49.333839 17350 solver.cpp:404]     Test net output #1: loss = 0.475902 (* 1 = 0.475902 loss)\nI0818 03:01:49.756337 17350 solver.cpp:228] Iteration 51000, loss = 0.00337923\nI0818 03:01:49.756382 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:01:49.756399 17350 solver.cpp:244]     Train net output #1: loss = 0.00337867 (* 1 = 0.00337867 loss)\nI0818 03:01:49.836522 17350 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0818 03:02:36.896001 17350 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0818 03:03:03.517091 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86916\nI0818 03:03:03.517145 17350 solver.cpp:404]     Test net output #1: loss = 0.478364 (* 1 = 0.478364 loss)\nI0818 03:03:03.939586 17350 solver.cpp:228] Iteration 51100, loss = 0.00207511\nI0818 03:03:03.939635 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:03:03.939657 17350 solver.cpp:244]     Train net output #1: loss = 0.00207455 (* 1 = 0.00207455 loss)\nI0818 03:03:04.024534 17350 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0818 03:03:51.090929 17350 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0818 03:04:17.714406 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86868\nI0818 03:04:17.714462 17350 solver.cpp:404]     Test net output #1: loss = 0.484994 (* 1 = 0.484994 loss)\nI0818 03:04:18.136368 17350 solver.cpp:228] Iteration 51200, loss = 0.00324852\nI0818 03:04:18.136412 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:04:18.136435 17350 solver.cpp:244]     Train net output #1: loss = 0.00324796 (* 1 = 0.00324796 loss)\nI0818 03:04:18.220460 17350 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0818 03:05:05.297232 17350 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0818 03:05:31.920325 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8686\nI0818 03:05:31.920380 17350 solver.cpp:404]     Test net output #1: loss = 0.484281 (* 1 = 0.484281 loss)\nI0818 03:05:32.342607 17350 solver.cpp:228] Iteration 51300, loss = 0.00280263\nI0818 03:05:32.342653 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:05:32.342676 17350 solver.cpp:244]     Train net output #1: loss = 0.00280206 (* 1 = 0.00280206 loss)\nI0818 03:05:32.420260 17350 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0818 03:06:19.485057 17350 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0818 03:06:46.108187 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86876\nI0818 03:06:46.108242 17350 solver.cpp:404]     Test net output #1: loss = 0.487838 (* 1 = 0.487838 loss)\nI0818 03:06:46.531265 17350 solver.cpp:228] Iteration 51400, loss = 0.00304543\nI0818 03:06:46.531312 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:06:46.531335 17350 solver.cpp:244]     Train net output #1: loss = 0.00304486 (* 1 = 0.00304486 loss)\nI0818 03:06:46.624531 17350 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0818 03:07:33.676669 17350 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0818 03:08:00.298573 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86576\nI0818 03:08:00.298629 17350 solver.cpp:404]     Test net output #1: loss = 0.494284 (* 1 = 0.494284 loss)\nI0818 03:08:00.721447 17350 solver.cpp:228] Iteration 51500, loss = 0.00199386\nI0818 03:08:00.721494 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:08:00.721518 17350 solver.cpp:244]     Train net output #1: loss = 0.0019933 (* 1 = 0.0019933 loss)\nI0818 03:08:00.802695 17350 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0818 03:08:47.872254 17350 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0818 03:09:14.495903 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86496\nI0818 03:09:14.495959 17350 solver.cpp:404]     Test net output #1: loss = 0.498014 (* 1 = 0.498014 loss)\nI0818 03:09:14.918594 17350 solver.cpp:228] Iteration 51600, loss = 0.00136489\nI0818 03:09:14.918642 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:09:14.918668 17350 solver.cpp:244]     Train net output #1: loss = 0.00136433 (* 1 = 0.00136433 loss)\nI0818 03:09:15.001988 17350 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0818 03:10:02.063880 17350 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0818 03:10:28.711787 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0818 03:10:28.711855 17350 solver.cpp:404]     Test net output #1: loss = 0.497831 (* 1 = 0.497831 loss)\nI0818 03:10:29.133759 17350 solver.cpp:228] Iteration 51700, loss = 0.0023227\nI0818 03:10:29.133808 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:10:29.133832 17350 solver.cpp:244]     Train net output #1: loss = 0.00232214 (* 1 = 0.00232214 loss)\nI0818 03:10:29.219637 17350 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0818 03:11:16.296679 17350 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0818 03:11:42.922708 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8638\nI0818 03:11:42.922763 17350 solver.cpp:404]     Test net output #1: loss = 0.500649 (* 1 = 0.500649 loss)\nI0818 03:11:43.345695 17350 solver.cpp:228] Iteration 51800, loss = 0.00220447\nI0818 03:11:43.345742 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:11:43.345767 17350 solver.cpp:244]     Train net output #1: loss = 0.00220391 (* 1 = 0.00220391 loss)\nI0818 03:11:43.424867 17350 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0818 03:12:30.481966 17350 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0818 03:12:57.104364 17350 solver.cpp:404]     Test net output #0: accuracy = 0.861\nI0818 03:12:57.104420 17350 solver.cpp:404]     Test net output #1: loss = 0.508799 (* 1 = 0.508799 loss)\nI0818 03:12:57.527400 17350 solver.cpp:228] Iteration 51900, loss = 0.00191446\nI0818 03:12:57.527449 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:12:57.527473 17350 solver.cpp:244]     Train net output #1: loss = 0.0019139 (* 1 = 0.0019139 loss)\nI0818 03:12:57.604895 17350 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0818 03:13:44.650288 17350 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0818 03:14:11.302199 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86184\nI0818 03:14:11.302256 17350 solver.cpp:404]     Test net output #1: loss = 0.509107 (* 1 = 0.509107 loss)\nI0818 03:14:11.725127 17350 solver.cpp:228] Iteration 52000, loss = 0.00250056\nI0818 03:14:11.725177 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:14:11.725200 17350 solver.cpp:244]     Train net output #1: loss = 0.0025 (* 1 = 0.0025 loss)\nI0818 03:14:11.804685 17350 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0818 03:14:58.892979 17350 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0818 03:15:25.538045 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85964\nI0818 03:15:25.538105 17350 solver.cpp:404]     Test net output #1: loss = 0.514964 (* 1 = 0.514964 loss)\nI0818 03:15:25.960449 17350 solver.cpp:228] Iteration 52100, loss = 0.0019011\nI0818 03:15:25.960497 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:15:25.960521 17350 solver.cpp:244]     Train net output #1: loss = 0.00190053 (* 1 = 0.00190053 loss)\nI0818 03:15:26.042176 17350 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0818 03:16:13.112249 17350 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0818 03:16:39.736603 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86108\nI0818 03:16:39.736660 17350 solver.cpp:404]     Test net output #1: loss = 0.514143 (* 1 = 0.514143 loss)\nI0818 03:16:40.159920 17350 solver.cpp:228] Iteration 52200, loss = 0.00190653\nI0818 03:16:40.159966 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:16:40.159991 17350 solver.cpp:244]     Train net output #1: loss = 0.00190596 (* 1 = 0.00190596 loss)\nI0818 03:16:40.243597 17350 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0818 03:17:27.311677 17350 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0818 03:17:53.946892 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85856\nI0818 03:17:53.946945 17350 solver.cpp:404]     Test net output #1: loss = 0.519555 (* 1 = 0.519555 loss)\nI0818 03:17:54.369343 17350 solver.cpp:228] Iteration 52300, loss = 0.00250001\nI0818 03:17:54.369387 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:17:54.369412 17350 solver.cpp:244]     Train net output #1: loss = 0.00249945 (* 1 = 0.00249945 loss)\nI0818 03:17:54.453681 17350 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0818 03:18:41.612184 17350 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0818 03:19:08.244596 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85916\nI0818 03:19:08.244650 17350 solver.cpp:404]     Test net output #1: loss = 0.519516 (* 1 = 0.519516 loss)\nI0818 03:19:08.667155 17350 solver.cpp:228] Iteration 52400, loss = 0.00165854\nI0818 03:19:08.667198 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:19:08.667222 17350 solver.cpp:244]     Train net output #1: loss = 0.00165797 (* 1 = 0.00165797 loss)\nI0818 03:19:08.746026 17350 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0818 03:19:55.858180 17350 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0818 03:20:22.508636 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85684\nI0818 03:20:22.508689 17350 solver.cpp:404]     Test net output #1: loss = 0.525109 (* 1 = 0.525109 loss)\nI0818 03:20:22.930415 17350 solver.cpp:228] Iteration 52500, loss = 0.00166449\nI0818 03:20:22.930457 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:20:22.930474 17350 solver.cpp:244]     Train net output #1: loss = 0.00166393 (* 1 = 0.00166393 loss)\nI0818 03:20:23.007318 17350 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0818 03:21:10.126617 17350 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0818 03:21:36.753875 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85636\nI0818 03:21:36.753926 17350 solver.cpp:404]     Test net output #1: loss = 0.528097 (* 1 = 0.528097 loss)\nI0818 03:21:37.176805 17350 solver.cpp:228] Iteration 52600, loss = 0.00188503\nI0818 03:21:37.176847 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:21:37.176863 17350 solver.cpp:244]     Train net output #1: loss = 0.00188447 (* 1 = 0.00188447 loss)\nI0818 03:21:37.253330 17350 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0818 03:22:24.332847 17350 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0818 03:22:50.953282 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85536\nI0818 03:22:50.953338 17350 solver.cpp:404]     Test net output #1: loss = 0.52766 (* 1 = 0.52766 loss)\nI0818 03:22:51.375399 17350 solver.cpp:228] Iteration 52700, loss = 0.00130755\nI0818 03:22:51.375443 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:22:51.375468 17350 solver.cpp:244]     Train net output #1: loss = 0.00130698 (* 1 = 0.00130698 loss)\nI0818 03:22:51.454792 17350 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0818 03:23:38.537271 17350 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0818 03:24:05.202306 17350 solver.cpp:404]     Test net output #0: accuracy = 0.856\nI0818 03:24:05.202363 17350 solver.cpp:404]     Test net output #1: loss = 0.529294 (* 1 = 0.529294 loss)\nI0818 03:24:05.625392 17350 solver.cpp:228] Iteration 52800, loss = 0.00159902\nI0818 03:24:05.625437 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:24:05.625460 17350 solver.cpp:244]     Train net output #1: loss = 0.00159846 (* 1 = 0.00159846 loss)\nI0818 03:24:05.702581 17350 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0818 03:24:52.836024 17350 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0818 03:25:19.476130 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85372\nI0818 03:25:19.476186 17350 solver.cpp:404]     Test net output #1: loss = 0.532082 (* 1 = 0.532082 loss)\nI0818 03:25:19.898903 17350 solver.cpp:228] Iteration 52900, loss = 0.00103646\nI0818 03:25:19.898949 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:25:19.898974 17350 solver.cpp:244]     Train net output #1: loss = 0.00103589 (* 1 = 0.00103589 loss)\nI0818 03:25:19.981012 17350 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0818 03:26:07.086604 17350 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0818 03:26:33.714443 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85496\nI0818 03:26:33.714499 17350 solver.cpp:404]     Test net output #1: loss = 0.532678 (* 1 = 0.532678 loss)\nI0818 03:26:34.137282 17350 solver.cpp:228] Iteration 53000, loss = 0.00151744\nI0818 03:26:34.137328 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:26:34.137352 17350 solver.cpp:244]     Train net output #1: loss = 0.00151687 (* 1 = 0.00151687 loss)\nI0818 03:26:34.217782 17350 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0818 03:27:21.330932 17350 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0818 03:27:48.001615 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85304\nI0818 03:27:48.001668 17350 solver.cpp:404]     Test net output #1: loss = 0.533662 (* 1 = 0.533662 loss)\nI0818 03:27:48.424070 17350 solver.cpp:228] Iteration 53100, loss = 0.00148214\nI0818 03:27:48.424119 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:27:48.424144 17350 solver.cpp:244]     Train net output #1: loss = 0.00148157 (* 1 = 0.00148157 loss)\nI0818 03:27:48.503139 17350 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0818 03:28:35.622050 17350 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0818 03:29:02.273571 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8544\nI0818 03:29:02.273625 17350 solver.cpp:404]     Test net output #1: loss = 0.535436 (* 1 = 0.535436 loss)\nI0818 03:29:02.696147 17350 solver.cpp:228] Iteration 53200, loss = 0.00103877\nI0818 03:29:02.696194 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:29:02.696218 17350 solver.cpp:244]     Train net output #1: loss = 0.00103821 (* 1 = 0.00103821 loss)\nI0818 03:29:02.779913 17350 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0818 03:29:49.923784 17350 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0818 03:30:16.562765 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85284\nI0818 03:30:16.562819 17350 solver.cpp:404]     Test net output #1: loss = 0.537376 (* 1 = 0.537376 loss)\nI0818 03:30:16.985555 17350 solver.cpp:228] Iteration 53300, loss = 0.00153373\nI0818 03:30:16.985604 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:30:16.985627 17350 solver.cpp:244]     Train net output #1: loss = 0.00153317 (* 1 = 0.00153317 loss)\nI0818 03:30:17.065716 17350 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0818 03:31:04.202109 17350 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0818 03:31:30.827994 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85376\nI0818 03:31:30.828049 17350 solver.cpp:404]     Test net output #1: loss = 0.538757 (* 1 = 0.538757 loss)\nI0818 03:31:31.250771 17350 solver.cpp:228] Iteration 53400, loss = 0.00115327\nI0818 03:31:31.250818 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:31:31.250844 17350 solver.cpp:244]     Train net output #1: loss = 0.00115271 (* 1 = 0.00115271 loss)\nI0818 03:31:31.328229 17350 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0818 03:32:18.452343 17350 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0818 03:32:45.074641 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85232\nI0818 03:32:45.074692 17350 solver.cpp:404]     Test net output #1: loss = 0.540244 (* 1 = 0.540244 loss)\nI0818 03:32:45.497061 17350 solver.cpp:228] Iteration 53500, loss = 0.00204595\nI0818 03:32:45.497102 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:32:45.497117 17350 solver.cpp:244]     Train net output #1: loss = 0.00204538 (* 1 = 0.00204538 loss)\nI0818 03:32:45.575276 17350 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0818 03:33:32.631207 17350 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0818 03:33:59.255365 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85408\nI0818 03:33:59.255416 17350 solver.cpp:404]     Test net output #1: loss = 0.538703 (* 1 = 0.538703 loss)\nI0818 03:33:59.678171 17350 solver.cpp:228] Iteration 53600, loss = 0.00143674\nI0818 03:33:59.678203 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:33:59.678218 17350 solver.cpp:244]     Train net output #1: loss = 0.00143618 (* 1 = 0.00143618 loss)\nI0818 03:33:59.757997 17350 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0818 03:34:46.890621 17350 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0818 03:35:13.511448 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85304\nI0818 03:35:13.511499 17350 solver.cpp:404]     Test net output #1: loss = 0.539227 (* 1 = 0.539227 loss)\nI0818 03:35:13.934152 17350 solver.cpp:228] Iteration 53700, loss = 0.00122714\nI0818 03:35:13.934185 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:35:13.934201 17350 solver.cpp:244]     Train net output #1: loss = 0.00122658 (* 1 = 0.00122658 loss)\nI0818 03:35:14.014786 17350 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0818 03:36:01.151764 17350 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0818 03:36:27.772444 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8538\nI0818 03:36:27.772495 17350 solver.cpp:404]     Test net output #1: loss = 0.540033 (* 1 = 0.540033 loss)\nI0818 03:36:28.195073 17350 solver.cpp:228] Iteration 53800, loss = 0.00144646\nI0818 03:36:28.195107 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:36:28.195123 17350 solver.cpp:244]     Train net output #1: loss = 0.00144589 (* 1 = 0.00144589 loss)\nI0818 03:36:28.272907 17350 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0818 03:37:15.410192 17350 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0818 03:37:42.026648 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85224\nI0818 03:37:42.026700 17350 solver.cpp:404]     Test net output #1: loss = 0.54055 (* 1 = 0.54055 loss)\nI0818 03:37:42.448796 17350 solver.cpp:228] Iteration 53900, loss = 0.00130752\nI0818 03:37:42.448827 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:37:42.448843 17350 solver.cpp:244]     Train net output #1: loss = 0.00130695 (* 1 = 0.00130695 loss)\nI0818 03:37:42.525411 17350 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0818 03:38:29.615834 17350 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0818 03:38:56.231348 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85432\nI0818 03:38:56.231396 17350 solver.cpp:404]     Test net output #1: loss = 0.53725 (* 1 = 0.53725 loss)\nI0818 03:38:56.653656 17350 solver.cpp:228] Iteration 54000, loss = 0.00156153\nI0818 03:38:56.653688 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:38:56.653703 17350 solver.cpp:244]     Train net output #1: loss = 0.00156097 (* 1 = 0.00156097 loss)\nI0818 03:38:56.732614 17350 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0818 03:39:43.832403 17350 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0818 03:40:10.451452 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85224\nI0818 03:40:10.451503 17350 solver.cpp:404]     Test net output #1: loss = 0.539749 (* 1 = 0.539749 loss)\nI0818 03:40:10.873724 17350 solver.cpp:228] Iteration 54100, loss = 0.00134577\nI0818 03:40:10.873759 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:40:10.873773 17350 solver.cpp:244]     Train net output #1: loss = 0.00134521 (* 1 = 0.00134521 loss)\nI0818 03:40:10.952148 17350 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0818 03:40:58.111568 17350 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0818 03:41:24.731117 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85288\nI0818 03:41:24.731168 17350 solver.cpp:404]     Test net output #1: loss = 0.540734 (* 1 = 0.540734 loss)\nI0818 03:41:25.154016 17350 solver.cpp:228] Iteration 54200, loss = 0.00146577\nI0818 03:41:25.154048 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:41:25.154063 17350 solver.cpp:244]     Train net output #1: loss = 0.0014652 (* 1 = 0.0014652 loss)\nI0818 03:41:25.241945 17350 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0818 03:42:12.363334 17350 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0818 03:42:38.984964 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8512\nI0818 03:42:38.985015 17350 solver.cpp:404]     Test net output #1: loss = 0.543584 (* 1 = 0.543584 loss)\nI0818 03:42:39.407620 17350 solver.cpp:228] Iteration 54300, loss = 0.00147514\nI0818 03:42:39.407656 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:42:39.407672 17350 solver.cpp:244]     Train net output #1: loss = 0.00147457 (* 1 = 0.00147457 loss)\nI0818 03:42:39.490664 17350 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0818 03:43:26.628072 17350 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0818 03:43:53.251684 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85264\nI0818 03:43:53.251735 17350 solver.cpp:404]     Test net output #1: loss = 0.541086 (* 1 = 0.541086 loss)\nI0818 03:43:53.674178 17350 solver.cpp:228] Iteration 54400, loss = 0.00147865\nI0818 03:43:53.674213 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:43:53.674229 17350 solver.cpp:244]     Train net output #1: loss = 0.00147809 (* 1 = 0.00147809 loss)\nI0818 03:43:53.754231 17350 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0818 03:44:40.870970 17350 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0818 03:45:07.490345 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85116\nI0818 03:45:07.490394 17350 solver.cpp:404]     Test net output #1: loss = 0.539608 (* 1 = 0.539608 loss)\nI0818 03:45:07.913255 17350 solver.cpp:228] Iteration 54500, loss = 0.00171418\nI0818 03:45:07.913291 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:45:07.913306 17350 solver.cpp:244]     Train net output #1: loss = 0.00171362 (* 1 = 0.00171362 loss)\nI0818 03:45:07.994819 17350 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0818 03:45:55.146482 17350 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0818 03:46:21.769731 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85336\nI0818 03:46:21.769780 17350 solver.cpp:404]     Test net output #1: loss = 0.537439 (* 1 = 0.537439 loss)\nI0818 03:46:22.192241 17350 solver.cpp:228] Iteration 54600, loss = 0.00114229\nI0818 03:46:22.192276 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:46:22.192291 17350 solver.cpp:244]     Train net output #1: loss = 0.00114173 (* 1 = 0.00114173 loss)\nI0818 03:46:22.272766 17350 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0818 03:47:09.539432 17350 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0818 03:47:36.162780 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85216\nI0818 03:47:36.162828 17350 solver.cpp:404]     Test net output #1: loss = 0.53959 (* 1 = 0.53959 loss)\nI0818 03:47:36.584974 17350 solver.cpp:228] Iteration 54700, loss = 0.00133625\nI0818 03:47:36.585011 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:47:36.585026 17350 solver.cpp:244]     Train net output #1: loss = 0.00133569 (* 1 = 0.00133569 loss)\nI0818 03:47:36.662037 17350 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0818 03:48:24.008424 17350 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0818 03:48:50.631616 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85268\nI0818 03:48:50.631667 17350 solver.cpp:404]     Test net output #1: loss = 0.54041 (* 1 = 0.54041 loss)\nI0818 03:48:51.054087 17350 solver.cpp:228] Iteration 54800, loss = 0.00123453\nI0818 03:48:51.054123 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:48:51.054139 17350 solver.cpp:244]     Train net output #1: loss = 0.00123397 (* 1 = 0.00123397 loss)\nI0818 03:48:51.136066 17350 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0818 03:49:38.458835 17350 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0818 03:50:05.080600 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8512\nI0818 03:50:05.080653 17350 solver.cpp:404]     Test net output #1: loss = 0.540598 (* 1 = 0.540598 loss)\nI0818 03:50:05.503235 17350 solver.cpp:228] Iteration 54900, loss = 0.00206588\nI0818 03:50:05.503271 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:50:05.503288 17350 solver.cpp:244]     Train net output #1: loss = 0.00206531 (* 1 = 0.00206531 loss)\nI0818 03:50:05.580883 17350 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0818 03:50:52.890965 17350 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0818 03:51:19.509806 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85344\nI0818 03:51:19.509857 17350 solver.cpp:404]     Test net output #1: loss = 0.538493 (* 1 = 0.538493 loss)\nI0818 03:51:19.932504 17350 solver.cpp:228] Iteration 55000, loss = 0.00133821\nI0818 03:51:19.932538 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:51:19.932552 17350 solver.cpp:244]     Train net output #1: loss = 0.00133765 (* 1 = 0.00133765 loss)\nI0818 03:51:20.009896 17350 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0818 03:52:07.307027 17350 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0818 03:52:33.928885 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8516\nI0818 03:52:33.928936 17350 solver.cpp:404]     Test net output #1: loss = 0.541193 (* 1 = 0.541193 loss)\nI0818 03:52:34.351235 17350 solver.cpp:228] Iteration 55100, loss = 0.00131867\nI0818 03:52:34.351274 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:52:34.351290 17350 solver.cpp:244]     Train net output #1: loss = 0.00131811 (* 1 = 0.00131811 loss)\nI0818 03:52:34.427053 17350 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0818 03:53:21.670753 17350 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0818 03:53:48.293031 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85324\nI0818 03:53:48.293081 17350 solver.cpp:404]     Test net output #1: loss = 0.540518 (* 1 = 0.540518 loss)\nI0818 03:53:48.716552 17350 solver.cpp:228] Iteration 55200, loss = 0.00103722\nI0818 03:53:48.716588 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:53:48.716604 17350 solver.cpp:244]     Train net output #1: loss = 0.00103666 (* 1 = 0.00103666 loss)\nI0818 03:53:48.794236 17350 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0818 03:54:36.119614 17350 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0818 03:55:02.741468 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85184\nI0818 03:55:02.741518 17350 solver.cpp:404]     Test net output #1: loss = 0.542303 (* 1 = 0.542303 loss)\nI0818 03:55:03.164014 17350 solver.cpp:228] Iteration 55300, loss = 0.00166865\nI0818 03:55:03.164049 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:55:03.164065 17350 solver.cpp:244]     Train net output #1: loss = 0.00166809 (* 1 = 0.00166809 loss)\nI0818 03:55:03.246244 17350 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0818 03:55:50.517308 17350 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0818 03:56:17.144616 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85168\nI0818 03:56:17.144666 17350 solver.cpp:404]     Test net output #1: loss = 0.542747 (* 1 = 0.542747 loss)\nI0818 03:56:17.567454 17350 solver.cpp:228] Iteration 55400, loss = 0.000941017\nI0818 03:56:17.567488 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:56:17.567504 17350 solver.cpp:244]     Train net output #1: loss = 0.000940453 (* 1 = 0.000940453 loss)\nI0818 03:56:17.648007 17350 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0818 03:57:04.940420 17350 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0818 03:57:31.574368 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85124\nI0818 03:57:31.574424 17350 solver.cpp:404]     Test net output #1: loss = 0.542182 (* 1 = 0.542182 loss)\nI0818 03:57:31.997566 17350 solver.cpp:228] Iteration 55500, loss = 0.0014195\nI0818 03:57:31.997607 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:57:31.997632 17350 solver.cpp:244]     Train net output #1: loss = 0.00141894 (* 1 = 0.00141894 loss)\nI0818 03:57:32.074591 17350 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0818 03:58:19.431361 17350 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0818 03:58:46.251639 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8524\nI0818 03:58:46.251689 17350 solver.cpp:404]     Test net output #1: loss = 0.538625 (* 1 = 0.538625 loss)\nI0818 03:58:46.676324 17350 solver.cpp:228] Iteration 55600, loss = 0.00127935\nI0818 03:58:46.676369 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 03:58:46.676385 17350 solver.cpp:244]     Train net output #1: loss = 0.00127879 (* 1 = 0.00127879 loss)\nI0818 03:58:46.756693 17350 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0818 03:59:34.200109 17350 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0818 04:00:00.957677 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85168\nI0818 04:00:00.957723 17350 solver.cpp:404]     Test net output #1: loss = 0.540121 (* 1 = 0.540121 loss)\nI0818 04:00:01.380991 17350 solver.cpp:228] Iteration 55700, loss = 0.000997159\nI0818 04:00:01.381038 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:00:01.381055 17350 solver.cpp:244]     Train net output #1: loss = 0.000996595 (* 1 = 0.000996595 loss)\nI0818 04:00:01.462978 17350 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0818 04:00:48.870018 17350 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0818 04:01:15.729158 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8534\nI0818 04:01:15.729203 17350 solver.cpp:404]     Test net output #1: loss = 0.540315 (* 1 = 0.540315 loss)\nI0818 04:01:16.152910 17350 solver.cpp:228] Iteration 55800, loss = 0.00105862\nI0818 04:01:16.152954 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:01:16.152971 17350 solver.cpp:244]     Train net output #1: loss = 0.00105805 (* 1 = 0.00105805 loss)\nI0818 04:01:16.231385 17350 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0818 04:02:03.384873 17350 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0818 04:02:30.275171 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85244\nI0818 04:02:30.275219 17350 solver.cpp:404]     Test net output #1: loss = 0.541511 (* 1 = 0.541511 loss)\nI0818 04:02:30.699400 17350 solver.cpp:228] Iteration 55900, loss = 0.00154266\nI0818 04:02:30.699446 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:02:30.699462 17350 solver.cpp:244]     Train net output #1: loss = 0.0015421 (* 1 = 0.0015421 loss)\nI0818 04:02:30.781045 17350 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0818 04:03:17.965749 17350 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0818 04:03:44.812276 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85296\nI0818 04:03:44.812325 17350 solver.cpp:404]     Test net output #1: loss = 0.53946 (* 1 = 0.53946 loss)\nI0818 04:03:45.235532 17350 solver.cpp:228] Iteration 56000, loss = 0.00160165\nI0818 04:03:45.235575 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:03:45.235592 17350 solver.cpp:244]     Train net output #1: loss = 0.00160108 (* 1 = 0.00160108 loss)\nI0818 04:03:45.319777 17350 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0818 04:04:32.506932 17350 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0818 04:04:59.365422 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85212\nI0818 04:04:59.365471 17350 solver.cpp:404]     Test net output #1: loss = 0.540959 (* 1 = 0.540959 loss)\nI0818 04:04:59.788437 17350 solver.cpp:228] Iteration 56100, loss = 0.00140311\nI0818 04:04:59.788482 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:04:59.788498 17350 solver.cpp:244]     Train net output #1: loss = 0.00140255 (* 1 = 0.00140255 loss)\nI0818 04:04:59.870977 17350 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0818 04:05:47.067152 17350 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0818 04:06:13.846631 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85268\nI0818 04:06:13.846675 17350 solver.cpp:404]     Test net output #1: loss = 0.535608 (* 1 = 0.535608 loss)\nI0818 04:06:14.269595 17350 solver.cpp:228] Iteration 56200, loss = 0.0013495\nI0818 04:06:14.269640 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:06:14.269656 17350 solver.cpp:244]     Train net output #1: loss = 0.00134894 (* 1 = 0.00134894 loss)\nI0818 04:06:14.347028 17350 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0818 04:07:01.501895 17350 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0818 04:07:28.325552 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85152\nI0818 04:07:28.325603 17350 solver.cpp:404]     Test net output #1: loss = 0.536612 (* 1 = 0.536612 loss)\nI0818 04:07:28.748761 17350 solver.cpp:228] Iteration 56300, loss = 0.00137363\nI0818 04:07:28.748806 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:07:28.748822 17350 solver.cpp:244]     Train net output #1: loss = 0.00137307 (* 1 = 0.00137307 loss)\nI0818 04:07:28.829725 17350 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0818 04:08:16.007369 17350 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0818 04:08:42.805599 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85176\nI0818 04:08:42.805649 17350 solver.cpp:404]     Test net output #1: loss = 0.539006 (* 1 = 0.539006 loss)\nI0818 04:08:43.228662 17350 solver.cpp:228] Iteration 56400, loss = 0.00139637\nI0818 04:08:43.228708 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:08:43.228724 17350 solver.cpp:244]     Train net output #1: loss = 0.00139581 (* 1 = 0.00139581 loss)\nI0818 04:08:43.304533 17350 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0818 04:09:30.474370 17350 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0818 04:09:57.314115 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85096\nI0818 04:09:57.314162 17350 solver.cpp:404]     Test net output #1: loss = 0.53946 (* 1 = 0.53946 loss)\nI0818 04:09:57.737251 17350 solver.cpp:228] Iteration 56500, loss = 0.00148317\nI0818 04:09:57.737298 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:09:57.737313 17350 solver.cpp:244]     Train net output #1: loss = 0.0014826 (* 1 = 0.0014826 loss)\nI0818 04:09:57.818948 17350 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0818 04:10:45.039585 17350 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0818 04:11:11.880791 17350 solver.cpp:404]     Test net output #0: accuracy = 0.853\nI0818 04:11:11.880841 17350 solver.cpp:404]     Test net output #1: loss = 0.538136 (* 1 = 0.538136 loss)\nI0818 04:11:12.304051 17350 solver.cpp:228] Iteration 56600, loss = 0.00159381\nI0818 04:11:12.304096 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:11:12.304112 17350 solver.cpp:244]     Train net output #1: loss = 0.00159325 (* 1 = 0.00159325 loss)\nI0818 04:11:12.384706 17350 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0818 04:11:59.595082 17350 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0818 04:12:26.328716 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8518\nI0818 04:12:26.328761 17350 solver.cpp:404]     Test net output #1: loss = 0.538393 (* 1 = 0.538393 loss)\nI0818 04:12:26.751768 17350 solver.cpp:228] Iteration 56700, loss = 0.00099895\nI0818 04:12:26.751811 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:12:26.751827 17350 solver.cpp:244]     Train net output #1: loss = 0.000998386 (* 1 = 0.000998386 loss)\nI0818 04:12:26.828188 17350 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0818 04:13:13.986085 17350 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0818 04:13:40.792094 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85248\nI0818 04:13:40.792143 17350 solver.cpp:404]     Test net output #1: loss = 0.539889 (* 1 = 0.539889 loss)\nI0818 04:13:41.214845 17350 solver.cpp:228] Iteration 56800, loss = 0.00143931\nI0818 04:13:41.214885 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:13:41.214901 17350 solver.cpp:244]     Train net output #1: loss = 0.00143875 (* 1 = 0.00143875 loss)\nI0818 04:13:41.293359 17350 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0818 04:14:28.420301 17350 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0818 04:14:55.324393 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85088\nI0818 04:14:55.324442 17350 solver.cpp:404]     Test net output #1: loss = 0.541505 (* 1 = 0.541505 loss)\nI0818 04:14:55.747463 17350 solver.cpp:228] Iteration 56900, loss = 0.00121712\nI0818 04:14:55.747505 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:14:55.747522 17350 solver.cpp:244]     Train net output #1: loss = 0.00121655 (* 1 = 0.00121655 loss)\nI0818 04:14:55.825579 17350 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0818 04:15:43.033644 17350 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0818 04:16:09.926401 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85288\nI0818 04:16:09.926451 17350 solver.cpp:404]     Test net output #1: loss = 0.539089 (* 1 = 0.539089 loss)\nI0818 04:16:10.349508 17350 solver.cpp:228] Iteration 57000, loss = 0.00141983\nI0818 04:16:10.349565 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:16:10.349582 17350 solver.cpp:244]     Train net output #1: loss = 0.00141926 (* 1 = 0.00141926 loss)\nI0818 04:16:10.424984 17350 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0818 04:16:57.631181 17350 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0818 04:17:24.519523 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85148\nI0818 04:17:24.519572 17350 solver.cpp:404]     Test net output #1: loss = 0.537087 (* 1 = 0.537087 loss)\nI0818 04:17:24.942395 17350 solver.cpp:228] Iteration 57100, loss = 0.00143919\nI0818 04:17:24.942453 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:17:24.942471 17350 solver.cpp:244]     Train net output #1: loss = 0.00143863 (* 1 = 0.00143863 loss)\nI0818 04:17:25.024771 17350 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0818 04:18:12.233166 17350 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0818 04:18:38.974705 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85216\nI0818 04:18:38.974751 17350 solver.cpp:404]     Test net output #1: loss = 0.538039 (* 1 = 0.538039 loss)\nI0818 04:18:39.397555 17350 solver.cpp:228] Iteration 57200, loss = 0.00116728\nI0818 04:18:39.397595 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:18:39.397611 17350 solver.cpp:244]     Train net output #1: loss = 0.00116672 (* 1 = 0.00116672 loss)\nI0818 04:18:39.476840 17350 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0818 04:19:26.686774 17350 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0818 04:19:53.401721 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85084\nI0818 04:19:53.401767 17350 solver.cpp:404]     Test net output #1: loss = 0.538115 (* 1 = 0.538115 loss)\nI0818 04:19:53.824836 17350 solver.cpp:228] Iteration 57300, loss = 0.00107696\nI0818 04:19:53.824888 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:19:53.824906 17350 solver.cpp:244]     Train net output #1: loss = 0.00107639 (* 1 = 0.00107639 loss)\nI0818 04:19:53.899624 17350 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0818 04:20:41.112393 17350 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0818 04:21:07.838034 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85252\nI0818 04:21:07.838080 17350 solver.cpp:404]     Test net output #1: loss = 0.536978 (* 1 = 0.536978 loss)\nI0818 04:21:08.262235 17350 solver.cpp:228] Iteration 57400, loss = 0.00126072\nI0818 04:21:08.262290 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:21:08.262306 17350 solver.cpp:244]     Train net output #1: loss = 0.00126016 (* 1 = 0.00126016 loss)\nI0818 04:21:08.344734 17350 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0818 04:21:55.601874 17350 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0818 04:22:22.310348 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85116\nI0818 04:22:22.310397 17350 solver.cpp:404]     Test net output #1: loss = 0.537197 (* 1 = 0.537197 loss)\nI0818 04:22:22.734621 17350 solver.cpp:228] Iteration 57500, loss = 0.00122372\nI0818 04:22:22.734683 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:22:22.734700 17350 solver.cpp:244]     Train net output #1: loss = 0.00122316 (* 1 = 0.00122316 loss)\nI0818 04:22:22.810420 17350 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0818 04:23:09.998163 17350 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0818 04:23:36.790207 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85292\nI0818 04:23:36.790253 17350 solver.cpp:404]     Test net output #1: loss = 0.534126 (* 1 = 0.534126 loss)\nI0818 04:23:37.214177 17350 solver.cpp:228] Iteration 57600, loss = 0.00144156\nI0818 04:23:37.214241 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:23:37.214259 17350 solver.cpp:244]     Train net output #1: loss = 0.001441 (* 1 = 0.001441 loss)\nI0818 04:23:37.291676 17350 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0818 04:24:24.474638 17350 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0818 04:24:51.220479 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85156\nI0818 04:24:51.220528 17350 solver.cpp:404]     Test net output #1: loss = 0.5355 (* 1 = 0.5355 loss)\nI0818 04:24:51.644601 17350 solver.cpp:228] Iteration 57700, loss = 0.0010275\nI0818 04:24:51.644659 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:24:51.644675 17350 solver.cpp:244]     Train net output #1: loss = 0.00102693 (* 1 = 0.00102693 loss)\nI0818 04:24:51.716693 17350 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0818 04:25:38.872805 17350 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0818 04:26:05.492342 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85244\nI0818 04:26:05.492391 17350 solver.cpp:404]     Test net output #1: loss = 0.533804 (* 1 = 0.533804 loss)\nI0818 04:26:05.914407 17350 solver.cpp:228] Iteration 57800, loss = 0.00128149\nI0818 04:26:05.914448 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:26:05.914465 17350 solver.cpp:244]     Train net output #1: loss = 0.00128093 (* 1 = 0.00128093 loss)\nI0818 04:26:05.997953 17350 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0818 04:26:53.115690 17350 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0818 04:27:19.740211 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85168\nI0818 04:27:19.740262 17350 solver.cpp:404]     Test net output #1: loss = 0.533961 (* 1 = 0.533961 loss)\nI0818 04:27:20.161834 17350 solver.cpp:228] Iteration 57900, loss = 0.00136702\nI0818 04:27:20.161877 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:27:20.161897 17350 solver.cpp:244]     Train net output #1: loss = 0.00136646 (* 1 = 0.00136646 loss)\nI0818 04:27:20.241914 17350 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0818 04:28:07.342692 17350 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0818 04:28:33.963608 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85236\nI0818 04:28:33.963657 17350 solver.cpp:404]     Test net output #1: loss = 0.53327 (* 1 = 0.53327 loss)\nI0818 04:28:34.385105 17350 solver.cpp:228] Iteration 58000, loss = 0.00127878\nI0818 04:28:34.385148 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:28:34.385164 17350 solver.cpp:244]     Train net output #1: loss = 0.00127822 (* 1 = 0.00127822 loss)\nI0818 04:28:34.465975 17350 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0818 04:29:21.613314 17350 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0818 04:29:48.234724 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85176\nI0818 04:29:48.234773 17350 solver.cpp:404]     Test net output #1: loss = 0.531252 (* 1 = 0.531252 loss)\nI0818 04:29:48.656671 17350 solver.cpp:228] Iteration 58100, loss = 0.00123099\nI0818 04:29:48.656711 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:29:48.656726 17350 solver.cpp:244]     Train net output #1: loss = 0.00123043 (* 1 = 0.00123043 loss)\nI0818 04:29:48.735443 17350 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0818 04:30:35.819479 17350 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0818 04:31:02.438769 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85376\nI0818 04:31:02.438817 17350 solver.cpp:404]     Test net output #1: loss = 0.530717 (* 1 = 0.530717 loss)\nI0818 04:31:02.860679 17350 solver.cpp:228] Iteration 58200, loss = 0.00125482\nI0818 04:31:02.860721 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:31:02.860738 17350 solver.cpp:244]     Train net output #1: loss = 0.00125425 (* 1 = 0.00125425 loss)\nI0818 04:31:02.939141 17350 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0818 04:31:50.038933 17350 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0818 04:32:16.658380 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85196\nI0818 04:32:16.658429 17350 solver.cpp:404]     Test net output #1: loss = 0.532737 (* 1 = 0.532737 loss)\nI0818 04:32:17.080107 17350 solver.cpp:228] Iteration 58300, loss = 0.00139967\nI0818 04:32:17.080138 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:32:17.080154 17350 solver.cpp:244]     Train net output #1: loss = 0.00139911 (* 1 = 0.00139911 loss)\nI0818 04:32:17.161571 17350 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0818 04:33:04.291126 17350 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0818 04:33:30.908879 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85232\nI0818 04:33:30.908932 17350 solver.cpp:404]     Test net output #1: loss = 0.533859 (* 1 = 0.533859 loss)\nI0818 04:33:31.330621 17350 solver.cpp:228] Iteration 58400, loss = 0.00147337\nI0818 04:33:31.330653 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:33:31.330668 17350 solver.cpp:244]     Train net output #1: loss = 0.00147281 (* 1 = 0.00147281 loss)\nI0818 04:33:31.410629 17350 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0818 04:34:18.584484 17350 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0818 04:34:45.203299 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8512\nI0818 04:34:45.203349 17350 solver.cpp:404]     Test net output #1: loss = 0.535185 (* 1 = 0.535185 loss)\nI0818 04:34:45.625144 17350 solver.cpp:228] Iteration 58500, loss = 0.00154088\nI0818 04:34:45.625171 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:34:45.625186 17350 solver.cpp:244]     Train net output #1: loss = 0.00154032 (* 1 = 0.00154032 loss)\nI0818 04:34:45.698364 17350 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0818 04:35:32.837071 17350 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0818 04:35:59.454691 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85264\nI0818 04:35:59.454741 17350 solver.cpp:404]     Test net output #1: loss = 0.530812 (* 1 = 0.530812 loss)\nI0818 04:35:59.876430 17350 solver.cpp:228] Iteration 58600, loss = 0.00126532\nI0818 04:35:59.876462 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:35:59.876478 17350 solver.cpp:244]     Train net output #1: loss = 0.00126475 (* 1 = 0.00126475 loss)\nI0818 04:35:59.960369 17350 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0818 04:36:47.121635 17350 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0818 04:37:13.735208 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85116\nI0818 04:37:13.735257 17350 solver.cpp:404]     Test net output #1: loss = 0.533349 (* 1 = 0.533349 loss)\nI0818 04:37:14.157006 17350 solver.cpp:228] Iteration 58700, loss = 0.00121648\nI0818 04:37:14.157037 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:37:14.157052 17350 solver.cpp:244]     Train net output #1: loss = 0.00121592 (* 1 = 0.00121592 loss)\nI0818 04:37:14.237613 17350 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0818 04:38:01.382787 17350 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0818 04:38:28.002180 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85268\nI0818 04:38:28.002230 17350 solver.cpp:404]     Test net output #1: loss = 0.531093 (* 1 = 0.531093 loss)\nI0818 04:38:28.424072 17350 solver.cpp:228] Iteration 58800, loss = 0.00137783\nI0818 04:38:28.424106 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:38:28.424123 17350 solver.cpp:244]     Train net output #1: loss = 0.00137726 (* 1 = 0.00137726 loss)\nI0818 04:38:28.502401 17350 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0818 04:39:15.644976 17350 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0818 04:39:42.262639 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85132\nI0818 04:39:42.262689 17350 solver.cpp:404]     Test net output #1: loss = 0.532051 (* 1 = 0.532051 loss)\nI0818 04:39:42.684464 17350 solver.cpp:228] Iteration 58900, loss = 0.00135002\nI0818 04:39:42.684499 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:39:42.684514 17350 solver.cpp:244]     Train net output #1: loss = 0.00134945 (* 1 = 0.00134945 loss)\nI0818 04:39:42.760859 17350 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0818 04:40:29.890123 17350 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0818 04:40:56.509085 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85108\nI0818 04:40:56.509135 17350 solver.cpp:404]     Test net output #1: loss = 0.535638 (* 1 = 0.535638 loss)\nI0818 04:40:56.930567 17350 solver.cpp:228] Iteration 59000, loss = 0.00135094\nI0818 04:40:56.930601 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:40:56.930618 17350 solver.cpp:244]     Train net output #1: loss = 0.00135037 (* 1 = 0.00135037 loss)\nI0818 04:40:57.007364 17350 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0818 04:41:44.166738 17350 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0818 04:42:10.788197 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85024\nI0818 04:42:10.788245 17350 solver.cpp:404]     Test net output #1: loss = 0.537226 (* 1 = 0.537226 loss)\nI0818 04:42:11.210033 17350 solver.cpp:228] Iteration 59100, loss = 0.00113452\nI0818 04:42:11.210067 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:42:11.210083 17350 solver.cpp:244]     Train net output #1: loss = 0.00113396 (* 1 = 0.00113396 loss)\nI0818 04:42:11.292929 17350 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0818 04:42:58.454509 17350 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0818 04:43:25.074570 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8522\nI0818 04:43:25.074621 17350 solver.cpp:404]     Test net output #1: loss = 0.535419 (* 1 = 0.535419 loss)\nI0818 04:43:25.496453 17350 solver.cpp:228] Iteration 59200, loss = 0.000979005\nI0818 04:43:25.496482 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:43:25.496497 17350 solver.cpp:244]     Train net output #1: loss = 0.000978441 (* 1 = 0.000978441 loss)\nI0818 04:43:25.576520 17350 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0818 04:44:12.731377 17350 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0818 04:44:39.349963 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85036\nI0818 04:44:39.350013 17350 solver.cpp:404]     Test net output #1: loss = 0.535724 (* 1 = 0.535724 loss)\nI0818 04:44:39.771543 17350 solver.cpp:228] Iteration 59300, loss = 0.00113408\nI0818 04:44:39.771574 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:44:39.771589 17350 solver.cpp:244]     Train net output #1: loss = 0.00113352 (* 1 = 0.00113352 loss)\nI0818 04:44:39.849453 17350 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0818 04:45:27.034353 17350 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0818 04:45:53.652142 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8522\nI0818 04:45:53.652190 17350 solver.cpp:404]     Test net output #1: loss = 0.530533 (* 1 = 0.530533 loss)\nI0818 04:45:54.073822 17350 solver.cpp:228] Iteration 59400, loss = 0.00102615\nI0818 04:45:54.073851 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:45:54.073866 17350 solver.cpp:244]     Train net output #1: loss = 0.00102559 (* 1 = 0.00102559 loss)\nI0818 04:45:54.155959 17350 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0818 04:46:41.306015 17350 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0818 04:47:07.923869 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85016\nI0818 04:47:07.923924 17350 solver.cpp:404]     Test net output #1: loss = 0.532722 (* 1 = 0.532722 loss)\nI0818 04:47:08.345777 17350 solver.cpp:228] Iteration 59500, loss = 0.001381\nI0818 04:47:08.345819 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:47:08.345835 17350 solver.cpp:244]     Train net output #1: loss = 0.00138043 (* 1 = 0.00138043 loss)\nI0818 04:47:08.423893 17350 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0818 04:47:55.553827 17350 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0818 04:48:22.171675 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85108\nI0818 04:48:22.171726 17350 solver.cpp:404]     Test net output #1: loss = 0.533668 (* 1 = 0.533668 loss)\nI0818 04:48:22.593212 17350 solver.cpp:228] Iteration 59600, loss = 0.00121345\nI0818 04:48:22.593252 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:48:22.593269 17350 solver.cpp:244]     Train net output #1: loss = 0.00121289 (* 1 = 0.00121289 loss)\nI0818 04:48:22.674984 17350 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0818 04:49:09.846335 17350 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0818 04:49:36.464805 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8498\nI0818 04:49:36.464854 17350 solver.cpp:404]     Test net output #1: loss = 0.534938 (* 1 = 0.534938 loss)\nI0818 04:49:36.886580 17350 solver.cpp:228] Iteration 59700, loss = 0.00116161\nI0818 04:49:36.886621 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:49:36.886637 17350 solver.cpp:244]     Train net output #1: loss = 0.00116104 (* 1 = 0.00116104 loss)\nI0818 04:49:36.970137 17350 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0818 04:50:24.133188 17350 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0818 04:50:50.752192 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85108\nI0818 04:50:50.752243 17350 solver.cpp:404]     Test net output #1: loss = 0.534286 (* 1 = 0.534286 loss)\nI0818 04:50:51.173789 17350 solver.cpp:228] Iteration 59800, loss = 0.00136\nI0818 04:50:51.173831 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:50:51.173847 17350 solver.cpp:244]     Train net output #1: loss = 0.00135943 (* 1 = 0.00135943 loss)\nI0818 04:50:51.257011 17350 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0818 04:51:38.414358 17350 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0818 04:52:05.039216 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84972\nI0818 04:52:05.039265 17350 solver.cpp:404]     Test net output #1: loss = 0.533443 (* 1 = 0.533443 loss)\nI0818 04:52:05.460849 17350 solver.cpp:228] Iteration 59900, loss = 0.00133065\nI0818 04:52:05.460892 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:52:05.460909 17350 solver.cpp:244]     Train net output #1: loss = 0.00133008 (* 1 = 0.00133008 loss)\nI0818 04:52:05.539342 17350 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0818 04:52:52.672163 17350 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0818 04:53:19.291908 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85068\nI0818 04:53:19.291957 17350 solver.cpp:404]     Test net output #1: loss = 0.53422 (* 1 = 0.53422 loss)\nI0818 04:53:19.714543 17350 solver.cpp:228] Iteration 60000, loss = 0.00115255\nI0818 04:53:19.714587 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:53:19.714603 17350 solver.cpp:244]     Train net output #1: loss = 0.00115199 (* 1 = 0.00115199 loss)\nI0818 04:53:19.793596 17350 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0818 04:54:06.901924 17350 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0818 04:54:33.524653 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84996\nI0818 04:54:33.524705 17350 solver.cpp:404]     Test net output #1: loss = 0.533982 (* 1 = 0.533982 loss)\nI0818 04:54:33.947448 17350 solver.cpp:228] Iteration 60100, loss = 0.00121604\nI0818 04:54:33.947492 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:54:33.947509 17350 solver.cpp:244]     Train net output #1: loss = 0.00121547 (* 1 = 0.00121547 loss)\nI0818 04:54:34.025459 17350 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0818 04:55:21.166282 17350 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0818 04:55:47.783804 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85236\nI0818 04:55:47.783855 17350 solver.cpp:404]     Test net output #1: loss = 0.529761 (* 1 = 0.529761 loss)\nI0818 04:55:48.206315 17350 solver.cpp:228] Iteration 60200, loss = 0.0014742\nI0818 04:55:48.206358 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:55:48.206374 17350 solver.cpp:244]     Train net output #1: loss = 0.00147364 (* 1 = 0.00147364 loss)\nI0818 04:55:48.280915 17350 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0818 04:56:35.382149 17350 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0818 04:57:02.003890 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85048\nI0818 04:57:02.003938 17350 solver.cpp:404]     Test net output #1: loss = 0.53301 (* 1 = 0.53301 loss)\nI0818 04:57:02.426523 17350 solver.cpp:228] Iteration 60300, loss = 0.00116138\nI0818 04:57:02.426568 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:57:02.426584 17350 solver.cpp:244]     Train net output #1: loss = 0.00116082 (* 1 = 0.00116082 loss)\nI0818 04:57:02.502452 17350 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0818 04:57:49.584879 17350 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0818 04:58:16.204154 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85204\nI0818 04:58:16.204203 17350 solver.cpp:404]     Test net output #1: loss = 0.530833 (* 1 = 0.530833 loss)\nI0818 04:58:16.626793 17350 solver.cpp:228] Iteration 60400, loss = 0.00133041\nI0818 04:58:16.626837 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:58:16.626854 17350 solver.cpp:244]     Train net output #1: loss = 0.00132984 (* 1 = 0.00132984 loss)\nI0818 04:58:16.705940 17350 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0818 04:59:03.817076 17350 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0818 04:59:30.435501 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85064\nI0818 04:59:30.435559 17350 solver.cpp:404]     Test net output #1: loss = 0.53178 (* 1 = 0.53178 loss)\nI0818 04:59:30.858041 17350 solver.cpp:228] Iteration 60500, loss = 0.00141761\nI0818 04:59:30.858084 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 04:59:30.858100 17350 solver.cpp:244]     Train net output #1: loss = 0.00141705 (* 1 = 0.00141705 loss)\nI0818 04:59:30.934015 17350 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0818 05:00:18.029265 17350 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0818 05:00:44.649531 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85128\nI0818 05:00:44.649580 17350 solver.cpp:404]     Test net output #1: loss = 0.531687 (* 1 = 0.531687 loss)\nI0818 05:00:45.072249 17350 solver.cpp:228] Iteration 60600, loss = 0.00127973\nI0818 05:00:45.072293 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:00:45.072310 17350 solver.cpp:244]     Train net output #1: loss = 0.00127917 (* 1 = 0.00127917 loss)\nI0818 05:00:45.151192 17350 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0818 05:01:32.229012 17350 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0818 05:01:58.843308 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8508\nI0818 05:01:58.843359 17350 solver.cpp:404]     Test net output #1: loss = 0.532024 (* 1 = 0.532024 loss)\nI0818 05:01:59.265758 17350 solver.cpp:228] Iteration 60700, loss = 0.00132348\nI0818 05:01:59.265802 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:01:59.265818 17350 solver.cpp:244]     Train net output #1: loss = 0.00132291 (* 1 = 0.00132291 loss)\nI0818 05:01:59.345927 17350 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0818 05:02:46.445083 17350 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0818 05:03:13.069653 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85152\nI0818 05:03:13.069710 17350 solver.cpp:404]     Test net output #1: loss = 0.529679 (* 1 = 0.529679 loss)\nI0818 05:03:13.493239 17350 solver.cpp:228] Iteration 60800, loss = 0.00114908\nI0818 05:03:13.493285 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:03:13.493309 17350 solver.cpp:244]     Train net output #1: loss = 0.00114851 (* 1 = 0.00114851 loss)\nI0818 05:03:13.571986 17350 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0818 05:04:00.641741 17350 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0818 05:04:27.262917 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84988\nI0818 05:04:27.262969 17350 solver.cpp:404]     Test net output #1: loss = 0.531912 (* 1 = 0.531912 loss)\nI0818 05:04:27.685322 17350 solver.cpp:228] Iteration 60900, loss = 0.00120291\nI0818 05:04:27.685366 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:04:27.685382 17350 solver.cpp:244]     Train net output #1: loss = 0.00120235 (* 1 = 0.00120235 loss)\nI0818 05:04:27.774377 17350 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0818 05:05:14.852152 17350 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0818 05:05:41.475256 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85128\nI0818 05:05:41.475308 17350 solver.cpp:404]     Test net output #1: loss = 0.530943 (* 1 = 0.530943 loss)\nI0818 05:05:41.897658 17350 solver.cpp:228] Iteration 61000, loss = 0.00122767\nI0818 05:05:41.897701 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:05:41.897718 17350 solver.cpp:244]     Train net output #1: loss = 0.0012271 (* 1 = 0.0012271 loss)\nI0818 05:05:41.979945 17350 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0818 05:06:29.033488 17350 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0818 05:06:55.655654 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8506\nI0818 05:06:55.655707 17350 solver.cpp:404]     Test net output #1: loss = 0.529933 (* 1 = 0.529933 loss)\nI0818 05:06:56.078065 17350 solver.cpp:228] Iteration 61100, loss = 0.00116502\nI0818 05:06:56.078109 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:06:56.078125 17350 solver.cpp:244]     Train net output #1: loss = 0.00116445 (* 1 = 0.00116445 loss)\nI0818 05:06:56.164950 17350 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0818 05:07:43.220758 17350 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0818 05:08:09.842733 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85256\nI0818 05:08:09.842785 17350 solver.cpp:404]     Test net output #1: loss = 0.528676 (* 1 = 0.528676 loss)\nI0818 05:08:10.265576 17350 solver.cpp:228] Iteration 61200, loss = 0.00109585\nI0818 05:08:10.265621 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:08:10.265637 17350 solver.cpp:244]     Train net output #1: loss = 0.00109529 (* 1 = 0.00109529 loss)\nI0818 05:08:10.342443 17350 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0818 05:08:57.411723 17350 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0818 05:09:24.036509 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85\nI0818 05:09:24.036561 17350 solver.cpp:404]     Test net output #1: loss = 0.531481 (* 1 = 0.531481 loss)\nI0818 05:09:24.460160 17350 solver.cpp:228] Iteration 61300, loss = 0.00138616\nI0818 05:09:24.460206 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:09:24.460222 17350 solver.cpp:244]     Train net output #1: loss = 0.00138559 (* 1 = 0.00138559 loss)\nI0818 05:09:24.537019 17350 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0818 05:10:11.596981 17350 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0818 05:10:38.220621 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85172\nI0818 05:10:38.220672 17350 solver.cpp:404]     Test net output #1: loss = 0.528801 (* 1 = 0.528801 loss)\nI0818 05:10:38.643388 17350 solver.cpp:228] Iteration 61400, loss = 0.00104844\nI0818 05:10:38.643425 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:10:38.643441 17350 solver.cpp:244]     Train net output #1: loss = 0.00104787 (* 1 = 0.00104787 loss)\nI0818 05:10:38.724263 17350 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0818 05:11:25.828337 17350 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0818 05:11:52.450654 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84988\nI0818 05:11:52.450704 17350 solver.cpp:404]     Test net output #1: loss = 0.532499 (* 1 = 0.532499 loss)\nI0818 05:11:52.873142 17350 solver.cpp:228] Iteration 61500, loss = 0.00124793\nI0818 05:11:52.873178 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:11:52.873194 17350 solver.cpp:244]     Train net output #1: loss = 0.00124736 (* 1 = 0.00124736 loss)\nI0818 05:11:52.954152 17350 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0818 05:12:40.022552 17350 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0818 05:13:06.654983 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85152\nI0818 05:13:06.655031 17350 solver.cpp:404]     Test net output #1: loss = 0.528643 (* 1 = 0.528643 loss)\nI0818 05:13:07.078372 17350 solver.cpp:228] Iteration 61600, loss = 0.00109223\nI0818 05:13:07.078409 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:13:07.078424 17350 solver.cpp:244]     Train net output #1: loss = 0.00109167 (* 1 = 0.00109167 loss)\nI0818 05:13:07.159970 17350 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0818 05:13:54.213282 17350 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0818 05:14:20.840637 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8502\nI0818 05:14:20.840687 17350 solver.cpp:404]     Test net output #1: loss = 0.52927 (* 1 = 0.52927 loss)\nI0818 05:14:21.263844 17350 solver.cpp:228] Iteration 61700, loss = 0.00116617\nI0818 05:14:21.263882 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:14:21.263898 17350 solver.cpp:244]     Train net output #1: loss = 0.00116561 (* 1 = 0.00116561 loss)\nI0818 05:14:21.347223 17350 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0818 05:15:08.425962 17350 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0818 05:15:35.047899 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8508\nI0818 05:15:35.047950 17350 solver.cpp:404]     Test net output #1: loss = 0.531544 (* 1 = 0.531544 loss)\nI0818 05:15:35.470401 17350 solver.cpp:228] Iteration 61800, loss = 0.00118582\nI0818 05:15:35.470435 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:15:35.470451 17350 solver.cpp:244]     Train net output #1: loss = 0.00118526 (* 1 = 0.00118526 loss)\nI0818 05:15:35.556742 17350 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0818 05:16:22.633524 17350 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0818 05:16:49.258359 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8496\nI0818 05:16:49.258410 17350 solver.cpp:404]     Test net output #1: loss = 0.531192 (* 1 = 0.531192 loss)\nI0818 05:16:49.680779 17350 solver.cpp:228] Iteration 61900, loss = 0.00122458\nI0818 05:16:49.680815 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:16:49.680830 17350 solver.cpp:244]     Train net output #1: loss = 0.00122402 (* 1 = 0.00122402 loss)\nI0818 05:16:49.757465 17350 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0818 05:17:36.820981 17350 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0818 05:18:03.440551 17350 solver.cpp:404]     Test net output #0: accuracy = 0.851\nI0818 05:18:03.440600 17350 solver.cpp:404]     Test net output #1: loss = 0.529346 (* 1 = 0.529346 loss)\nI0818 05:18:03.863559 17350 solver.cpp:228] Iteration 62000, loss = 0.00114804\nI0818 05:18:03.863595 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:18:03.863612 17350 solver.cpp:244]     Train net output #1: loss = 0.00114748 (* 1 = 0.00114748 loss)\nI0818 05:18:03.941431 17350 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0818 05:18:51.013437 17350 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0818 05:19:17.637184 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84888\nI0818 05:19:17.637234 17350 solver.cpp:404]     Test net output #1: loss = 0.530597 (* 1 = 0.530597 loss)\nI0818 05:19:18.059650 17350 solver.cpp:228] Iteration 62100, loss = 0.00103649\nI0818 05:19:18.059684 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:19:18.059700 17350 solver.cpp:244]     Train net output #1: loss = 0.00103592 (* 1 = 0.00103592 loss)\nI0818 05:19:18.138037 17350 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0818 05:20:05.285897 17350 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0818 05:20:31.905513 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85084\nI0818 05:20:31.905561 17350 solver.cpp:404]     Test net output #1: loss = 0.529472 (* 1 = 0.529472 loss)\nI0818 05:20:32.327940 17350 solver.cpp:228] Iteration 62200, loss = 0.00113263\nI0818 05:20:32.327970 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:20:32.327986 17350 solver.cpp:244]     Train net output #1: loss = 0.00113206 (* 1 = 0.00113206 loss)\nI0818 05:20:32.403995 17350 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0818 05:21:19.554512 17350 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0818 05:21:46.177544 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8488\nI0818 05:21:46.177594 17350 solver.cpp:404]     Test net output #1: loss = 0.529954 (* 1 = 0.529954 loss)\nI0818 05:21:46.600069 17350 solver.cpp:228] Iteration 62300, loss = 0.00109398\nI0818 05:21:46.600102 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:21:46.600117 17350 solver.cpp:244]     Train net output #1: loss = 0.00109341 (* 1 = 0.00109341 loss)\nI0818 05:21:46.676455 17350 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0818 05:22:33.833211 17350 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0818 05:23:00.456697 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8506\nI0818 05:23:00.456748 17350 solver.cpp:404]     Test net output #1: loss = 0.530062 (* 1 = 0.530062 loss)\nI0818 05:23:00.879283 17350 solver.cpp:228] Iteration 62400, loss = 0.00108976\nI0818 05:23:00.879320 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:23:00.879336 17350 solver.cpp:244]     Train net output #1: loss = 0.0010892 (* 1 = 0.0010892 loss)\nI0818 05:23:00.962790 17350 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0818 05:23:48.132654 17350 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0818 05:24:14.756821 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84964\nI0818 05:24:14.756872 17350 solver.cpp:404]     Test net output #1: loss = 0.530775 (* 1 = 0.530775 loss)\nI0818 05:24:15.180142 17350 solver.cpp:228] Iteration 62500, loss = 0.00130792\nI0818 05:24:15.180184 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:24:15.180200 17350 solver.cpp:244]     Train net output #1: loss = 0.00130736 (* 1 = 0.00130736 loss)\nI0818 05:24:15.257200 17350 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0818 05:25:02.361034 17350 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0818 05:25:28.985114 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85128\nI0818 05:25:28.985165 17350 solver.cpp:404]     Test net output #1: loss = 0.52802 (* 1 = 0.52802 loss)\nI0818 05:25:29.408450 17350 solver.cpp:228] Iteration 62600, loss = 0.00108824\nI0818 05:25:29.408491 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:25:29.408509 17350 solver.cpp:244]     Train net output #1: loss = 0.00108768 (* 1 = 0.00108768 loss)\nI0818 05:25:29.486301 17350 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0818 05:26:16.545241 17350 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0818 05:26:43.170212 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8498\nI0818 05:26:43.170262 17350 solver.cpp:404]     Test net output #1: loss = 0.529376 (* 1 = 0.529376 loss)\nI0818 05:26:43.593665 17350 solver.cpp:228] Iteration 62700, loss = 0.00126731\nI0818 05:26:43.593708 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:26:43.593724 17350 solver.cpp:244]     Train net output #1: loss = 0.00126675 (* 1 = 0.00126675 loss)\nI0818 05:26:43.673982 17350 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0818 05:27:30.743240 17350 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0818 05:27:57.367379 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85068\nI0818 05:27:57.367429 17350 solver.cpp:404]     Test net output #1: loss = 0.529358 (* 1 = 0.529358 loss)\nI0818 05:27:57.790890 17350 solver.cpp:228] Iteration 62800, loss = 0.00145866\nI0818 05:27:57.790930 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:27:57.790946 17350 solver.cpp:244]     Train net output #1: loss = 0.0014581 (* 1 = 0.0014581 loss)\nI0818 05:27:57.872594 17350 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0818 05:28:44.998895 17350 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0818 05:29:11.621438 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8498\nI0818 05:29:11.621490 17350 solver.cpp:404]     Test net output #1: loss = 0.529164 (* 1 = 0.529164 loss)\nI0818 05:29:12.044618 17350 solver.cpp:228] Iteration 62900, loss = 0.00107268\nI0818 05:29:12.044658 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:29:12.044674 17350 solver.cpp:244]     Train net output #1: loss = 0.00107212 (* 1 = 0.00107212 loss)\nI0818 05:29:12.126629 17350 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0818 05:29:59.218096 17350 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0818 05:30:25.843430 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85136\nI0818 05:30:25.843480 17350 solver.cpp:404]     Test net output #1: loss = 0.529061 (* 1 = 0.529061 loss)\nI0818 05:30:26.266088 17350 solver.cpp:228] Iteration 63000, loss = 0.0010558\nI0818 05:30:26.266127 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:30:26.266144 17350 solver.cpp:244]     Train net output #1: loss = 0.00105523 (* 1 = 0.00105523 loss)\nI0818 05:30:26.348845 17350 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0818 05:31:13.441726 17350 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0818 05:31:40.067379 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8488\nI0818 05:31:40.067431 17350 solver.cpp:404]     Test net output #1: loss = 0.53207 (* 1 = 0.53207 loss)\nI0818 05:31:40.490370 17350 solver.cpp:228] Iteration 63100, loss = 0.00111338\nI0818 05:31:40.490411 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:31:40.490427 17350 solver.cpp:244]     Train net output #1: loss = 0.00111281 (* 1 = 0.00111281 loss)\nI0818 05:31:40.571943 17350 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0818 05:32:27.692628 17350 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0818 05:32:54.315089 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85112\nI0818 05:32:54.315145 17350 solver.cpp:404]     Test net output #1: loss = 0.528289 (* 1 = 0.528289 loss)\nI0818 05:32:54.738627 17350 solver.cpp:228] Iteration 63200, loss = 0.00129878\nI0818 05:32:54.738665 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:32:54.738682 17350 solver.cpp:244]     Train net output #1: loss = 0.00129821 (* 1 = 0.00129821 loss)\nI0818 05:32:54.817600 17350 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0818 05:33:41.910356 17350 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0818 05:34:08.529078 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8496\nI0818 05:34:08.529131 17350 solver.cpp:404]     Test net output #1: loss = 0.530096 (* 1 = 0.530096 loss)\nI0818 05:34:08.952394 17350 solver.cpp:228] Iteration 63300, loss = 0.00126675\nI0818 05:34:08.952428 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:34:08.952445 17350 solver.cpp:244]     Train net output #1: loss = 0.00126619 (* 1 = 0.00126619 loss)\nI0818 05:34:09.032071 17350 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0818 05:34:56.126765 17350 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0818 05:35:22.744496 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85136\nI0818 05:35:22.744549 17350 solver.cpp:404]     Test net output #1: loss = 0.526246 (* 1 = 0.526246 loss)\nI0818 05:35:23.168176 17350 solver.cpp:228] Iteration 63400, loss = 0.0010057\nI0818 05:35:23.168210 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:35:23.168226 17350 solver.cpp:244]     Train net output #1: loss = 0.00100513 (* 1 = 0.00100513 loss)\nI0818 05:35:23.249150 17350 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0818 05:36:10.389452 17350 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0818 05:36:37.010609 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85004\nI0818 05:36:37.010661 17350 solver.cpp:404]     Test net output #1: loss = 0.526894 (* 1 = 0.526894 loss)\nI0818 05:36:37.433466 17350 solver.cpp:228] Iteration 63500, loss = 0.00110189\nI0818 05:36:37.433508 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:36:37.433533 17350 solver.cpp:244]     Train net output #1: loss = 0.00110133 (* 1 = 0.00110133 loss)\nI0818 05:36:37.512554 17350 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0818 05:37:24.740192 17350 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0818 05:37:51.363510 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85184\nI0818 05:37:51.363566 17350 solver.cpp:404]     Test net output #1: loss = 0.526614 (* 1 = 0.526614 loss)\nI0818 05:37:51.786473 17350 solver.cpp:228] Iteration 63600, loss = 0.00134838\nI0818 05:37:51.786514 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:37:51.786537 17350 solver.cpp:244]     Train net output #1: loss = 0.00134781 (* 1 = 0.00134781 loss)\nI0818 05:37:51.868048 17350 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0818 05:38:39.084295 17350 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0818 05:39:05.706423 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8494\nI0818 05:39:05.706480 17350 solver.cpp:404]     Test net output #1: loss = 0.532522 (* 1 = 0.532522 loss)\nI0818 05:39:06.129271 17350 solver.cpp:228] Iteration 63700, loss = 0.00103542\nI0818 05:39:06.129312 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:39:06.129338 17350 solver.cpp:244]     Train net output #1: loss = 0.00103486 (* 1 = 0.00103486 loss)\nI0818 05:39:06.213013 17350 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0818 05:39:53.477231 17350 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0818 05:40:20.101061 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85064\nI0818 05:40:20.101125 17350 solver.cpp:404]     Test net output #1: loss = 0.532176 (* 1 = 0.532176 loss)\nI0818 05:40:20.524935 17350 solver.cpp:228] Iteration 63800, loss = 0.00131813\nI0818 05:40:20.524972 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:40:20.524996 17350 solver.cpp:244]     Train net output #1: loss = 0.00131757 (* 1 = 0.00131757 loss)\nI0818 05:40:20.609781 17350 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0818 05:41:07.856746 17350 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0818 05:41:34.478600 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84976\nI0818 05:41:34.478653 17350 solver.cpp:404]     Test net output #1: loss = 0.529899 (* 1 = 0.529899 loss)\nI0818 05:41:34.900236 17350 solver.cpp:228] Iteration 63900, loss = 0.00109621\nI0818 05:41:34.900274 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:41:34.900298 17350 solver.cpp:244]     Train net output #1: loss = 0.00109564 (* 1 = 0.00109564 loss)\nI0818 05:41:34.986146 17350 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0818 05:42:22.229545 17350 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0818 05:42:48.855734 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85188\nI0818 05:42:48.855790 17350 solver.cpp:404]     Test net output #1: loss = 0.528047 (* 1 = 0.528047 loss)\nI0818 05:42:49.277995 17350 solver.cpp:228] Iteration 64000, loss = 0.00124366\nI0818 05:42:49.278036 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:42:49.278061 17350 solver.cpp:244]     Train net output #1: loss = 0.0012431 (* 1 = 0.0012431 loss)\nI0818 05:42:49.354352 17350 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0818 05:43:36.579569 17350 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0818 05:44:03.206656 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8506\nI0818 05:44:03.206710 17350 solver.cpp:404]     Test net output #1: loss = 0.53135 (* 1 = 0.53135 loss)\nI0818 05:44:03.629703 17350 solver.cpp:228] Iteration 64100, loss = 0.00103281\nI0818 05:44:03.629743 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:44:03.629767 17350 solver.cpp:244]     Train net output #1: loss = 0.00103224 (* 1 = 0.00103224 loss)\nI0818 05:44:03.707486 17350 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0818 05:44:50.968968 17350 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0818 05:45:17.594899 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85144\nI0818 05:45:17.594956 17350 solver.cpp:404]     Test net output #1: loss = 0.528452 (* 1 = 0.528452 loss)\nI0818 05:45:18.016707 17350 solver.cpp:228] Iteration 64200, loss = 0.00116248\nI0818 05:45:18.016746 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:45:18.016770 17350 solver.cpp:244]     Train net output #1: loss = 0.00116191 (* 1 = 0.00116191 loss)\nI0818 05:45:18.098098 17350 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0818 05:46:05.366081 17350 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0818 05:46:31.994412 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8504\nI0818 05:46:31.994469 17350 solver.cpp:404]     Test net output #1: loss = 0.529184 (* 1 = 0.529184 loss)\nI0818 05:46:32.417106 17350 solver.cpp:228] Iteration 64300, loss = 0.00104351\nI0818 05:46:32.417147 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:46:32.417171 17350 solver.cpp:244]     Train net output #1: loss = 0.00104295 (* 1 = 0.00104295 loss)\nI0818 05:46:32.499577 17350 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0818 05:47:19.740684 17350 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0818 05:47:46.365736 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8524\nI0818 05:47:46.365789 17350 solver.cpp:404]     Test net output #1: loss = 0.527887 (* 1 = 0.527887 loss)\nI0818 05:47:46.788267 17350 solver.cpp:228] Iteration 64400, loss = 0.00136671\nI0818 05:47:46.788305 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:47:46.788331 17350 solver.cpp:244]     Train net output #1: loss = 0.00136615 (* 1 = 0.00136615 loss)\nI0818 05:47:46.873028 17350 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0818 05:48:34.152665 17350 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0818 05:49:00.788385 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85008\nI0818 05:49:00.788440 17350 solver.cpp:404]     Test net output #1: loss = 0.528176 (* 1 = 0.528176 loss)\nI0818 05:49:01.211414 17350 solver.cpp:228] Iteration 64500, loss = 0.00105176\nI0818 05:49:01.211458 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:49:01.211482 17350 solver.cpp:244]     Train net output #1: loss = 0.0010512 (* 1 = 0.0010512 loss)\nI0818 05:49:01.292202 17350 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0818 05:49:48.589114 17350 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0818 05:50:15.224439 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85048\nI0818 05:50:15.224496 17350 solver.cpp:404]     Test net output #1: loss = 0.531083 (* 1 = 0.531083 loss)\nI0818 05:50:15.647296 17350 solver.cpp:228] Iteration 64600, loss = 0.00131688\nI0818 05:50:15.647339 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:50:15.647363 17350 solver.cpp:244]     Train net output #1: loss = 0.00131632 (* 1 = 0.00131632 loss)\nI0818 05:50:15.727915 17350 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0818 05:51:02.974023 17350 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0818 05:51:29.669083 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85016\nI0818 05:51:29.669140 17350 solver.cpp:404]     Test net output #1: loss = 0.529567 (* 1 = 0.529567 loss)\nI0818 05:51:30.092151 17350 solver.cpp:228] Iteration 64700, loss = 0.0011803\nI0818 05:51:30.092193 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:51:30.092217 17350 solver.cpp:244]     Train net output #1: loss = 0.00117974 (* 1 = 0.00117974 loss)\nI0818 05:51:30.169277 17350 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0818 05:52:17.456446 17350 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0818 05:52:44.112926 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85244\nI0818 05:52:44.112982 17350 solver.cpp:404]     Test net output #1: loss = 0.525878 (* 1 = 0.525878 loss)\nI0818 05:52:44.535650 17350 solver.cpp:228] Iteration 64800, loss = 0.00110187\nI0818 05:52:44.535691 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:52:44.535715 17350 solver.cpp:244]     Train net output #1: loss = 0.0011013 (* 1 = 0.0011013 loss)\nI0818 05:52:44.611829 17350 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0818 05:53:31.888454 17350 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0818 05:53:58.540999 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8506\nI0818 05:53:58.541056 17350 solver.cpp:404]     Test net output #1: loss = 0.52841 (* 1 = 0.52841 loss)\nI0818 05:53:58.963801 17350 solver.cpp:228] Iteration 64900, loss = 0.00115068\nI0818 05:53:58.963838 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:53:58.963863 17350 solver.cpp:244]     Train net output #1: loss = 0.00115012 (* 1 = 0.00115012 loss)\nI0818 05:53:59.048140 17350 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0818 05:54:46.341610 17350 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0818 05:55:12.989332 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85164\nI0818 05:55:12.989389 17350 solver.cpp:404]     Test net output #1: loss = 0.52738 (* 1 = 0.52738 loss)\nI0818 05:55:13.411991 17350 solver.cpp:228] Iteration 65000, loss = 0.00108195\nI0818 05:55:13.412032 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:55:13.412055 17350 solver.cpp:244]     Train net output #1: loss = 0.00108139 (* 1 = 0.00108139 loss)\nI0818 05:55:13.490963 17350 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0818 05:56:00.749634 17350 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0818 05:56:27.378448 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85056\nI0818 05:56:27.378504 17350 solver.cpp:404]     Test net output #1: loss = 0.526839 (* 1 = 0.526839 loss)\nI0818 05:56:27.801157 17350 solver.cpp:228] Iteration 65100, loss = 0.00101019\nI0818 05:56:27.801196 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:56:27.801221 17350 solver.cpp:244]     Train net output #1: loss = 0.00100963 (* 1 = 0.00100963 loss)\nI0818 05:56:27.881918 17350 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0818 05:57:15.174154 17350 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0818 05:57:41.805927 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85252\nI0818 05:57:41.805984 17350 solver.cpp:404]     Test net output #1: loss = 0.525894 (* 1 = 0.525894 loss)\nI0818 05:57:42.228859 17350 solver.cpp:228] Iteration 65200, loss = 0.00118494\nI0818 05:57:42.228899 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:57:42.228924 17350 solver.cpp:244]     Train net output #1: loss = 0.00118438 (* 1 = 0.00118438 loss)\nI0818 05:57:42.308064 17350 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0818 05:58:29.635360 17350 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0818 05:58:56.274268 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85016\nI0818 05:58:56.274319 17350 solver.cpp:404]     Test net output #1: loss = 0.529511 (* 1 = 0.529511 loss)\nI0818 05:58:56.696264 17350 solver.cpp:228] Iteration 65300, loss = 0.0013089\nI0818 05:58:56.696300 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 05:58:56.696316 17350 solver.cpp:244]     Train net output #1: loss = 0.00130833 (* 1 = 0.00130833 loss)\nI0818 05:58:56.780596 17350 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0818 05:59:44.053838 17350 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0818 06:00:10.682026 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85256\nI0818 06:00:10.682078 17350 solver.cpp:404]     Test net output #1: loss = 0.526013 (* 1 = 0.526013 loss)\nI0818 06:00:11.104120 17350 solver.cpp:228] Iteration 65400, loss = 0.00119588\nI0818 06:00:11.104161 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:00:11.104176 17350 solver.cpp:244]     Train net output #1: loss = 0.00119532 (* 1 = 0.00119532 loss)\nI0818 06:00:11.187888 17350 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0818 06:00:58.507258 17350 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0818 06:01:25.130909 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85024\nI0818 06:01:25.130959 17350 solver.cpp:404]     Test net output #1: loss = 0.52817 (* 1 = 0.52817 loss)\nI0818 06:01:25.552727 17350 solver.cpp:228] Iteration 65500, loss = 0.00137469\nI0818 06:01:25.552765 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:01:25.552783 17350 solver.cpp:244]     Train net output #1: loss = 0.00137413 (* 1 = 0.00137413 loss)\nI0818 06:01:25.633656 17350 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0818 06:02:12.885709 17350 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0818 06:02:39.507699 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85172\nI0818 06:02:39.507750 17350 solver.cpp:404]     Test net output #1: loss = 0.528354 (* 1 = 0.528354 loss)\nI0818 06:02:39.929227 17350 solver.cpp:228] Iteration 65600, loss = 0.00114307\nI0818 06:02:39.929266 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:02:39.929283 17350 solver.cpp:244]     Train net output #1: loss = 0.00114251 (* 1 = 0.00114251 loss)\nI0818 06:02:40.017191 17350 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0818 06:03:27.271189 17350 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0818 06:03:53.893082 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85008\nI0818 06:03:53.893131 17350 solver.cpp:404]     Test net output #1: loss = 0.52967 (* 1 = 0.52967 loss)\nI0818 06:03:54.315034 17350 solver.cpp:228] Iteration 65700, loss = 0.00115657\nI0818 06:03:54.315074 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:03:54.315090 17350 solver.cpp:244]     Train net output #1: loss = 0.00115601 (* 1 = 0.00115601 loss)\nI0818 06:03:54.398039 17350 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0818 06:04:41.681303 17350 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0818 06:05:08.305279 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85092\nI0818 06:05:08.305328 17350 solver.cpp:404]     Test net output #1: loss = 0.529245 (* 1 = 0.529245 loss)\nI0818 06:05:08.727100 17350 solver.cpp:228] Iteration 65800, loss = 0.00119111\nI0818 06:05:08.727139 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:05:08.727155 17350 solver.cpp:244]     Train net output #1: loss = 0.00119055 (* 1 = 0.00119055 loss)\nI0818 06:05:08.811240 17350 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0818 06:05:56.080879 17350 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0818 06:06:22.702033 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85084\nI0818 06:06:22.702082 17350 solver.cpp:404]     Test net output #1: loss = 0.529575 (* 1 = 0.529575 loss)\nI0818 06:06:23.123226 17350 solver.cpp:228] Iteration 65900, loss = 0.00100465\nI0818 06:06:23.123268 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:06:23.123284 17350 solver.cpp:244]     Train net output #1: loss = 0.00100409 (* 1 = 0.00100409 loss)\nI0818 06:06:23.205305 17350 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0818 06:07:10.487766 17350 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0818 06:07:37.109750 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85188\nI0818 06:07:37.109799 17350 solver.cpp:404]     Test net output #1: loss = 0.527177 (* 1 = 0.527177 loss)\nI0818 06:07:37.531765 17350 solver.cpp:228] Iteration 66000, loss = 0.00128112\nI0818 06:07:37.531800 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:07:37.531814 17350 solver.cpp:244]     Train net output #1: loss = 0.00128055 (* 1 = 0.00128055 loss)\nI0818 06:07:37.612193 17350 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0818 06:08:24.867683 17350 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0818 06:08:51.491536 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85004\nI0818 06:08:51.491587 17350 solver.cpp:404]     Test net output #1: loss = 0.531305 (* 1 = 0.531305 loss)\nI0818 06:08:51.913306 17350 solver.cpp:228] Iteration 66100, loss = 0.00107668\nI0818 06:08:51.913352 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:08:51.913367 17350 solver.cpp:244]     Train net output #1: loss = 0.00107612 (* 1 = 0.00107612 loss)\nI0818 06:08:51.995622 17350 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0818 06:09:39.271877 17350 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0818 06:10:05.896230 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84996\nI0818 06:10:05.896281 17350 solver.cpp:404]     Test net output #1: loss = 0.530478 (* 1 = 0.530478 loss)\nI0818 06:10:06.318109 17350 solver.cpp:228] Iteration 66200, loss = 0.00106343\nI0818 06:10:06.318151 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:10:06.318167 17350 solver.cpp:244]     Train net output #1: loss = 0.00106286 (* 1 = 0.00106286 loss)\nI0818 06:10:06.402307 17350 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0818 06:10:53.710206 17350 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0818 06:11:20.335091 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84912\nI0818 06:11:20.335140 17350 solver.cpp:404]     Test net output #1: loss = 0.532119 (* 1 = 0.532119 loss)\nI0818 06:11:20.756813 17350 solver.cpp:228] Iteration 66300, loss = 0.000971894\nI0818 06:11:20.756851 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:11:20.756871 17350 solver.cpp:244]     Train net output #1: loss = 0.000971329 (* 1 = 0.000971329 loss)\nI0818 06:11:20.837822 17350 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0818 06:12:08.132086 17350 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0818 06:12:34.752383 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8518\nI0818 06:12:34.752434 17350 solver.cpp:404]     Test net output #1: loss = 0.526002 (* 1 = 0.526002 loss)\nI0818 06:12:35.174018 17350 solver.cpp:228] Iteration 66400, loss = 0.00105829\nI0818 06:12:35.174057 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:12:35.174073 17350 solver.cpp:244]     Train net output #1: loss = 0.00105773 (* 1 = 0.00105773 loss)\nI0818 06:12:35.252048 17350 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0818 06:13:22.537415 17350 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0818 06:13:49.160629 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85096\nI0818 06:13:49.160681 17350 solver.cpp:404]     Test net output #1: loss = 0.527855 (* 1 = 0.527855 loss)\nI0818 06:13:49.582414 17350 solver.cpp:228] Iteration 66500, loss = 0.00126021\nI0818 06:13:49.582451 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:13:49.582468 17350 solver.cpp:244]     Train net output #1: loss = 0.00125965 (* 1 = 0.00125965 loss)\nI0818 06:13:49.659391 17350 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0818 06:14:36.975324 17350 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0818 06:15:03.602769 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85108\nI0818 06:15:03.602819 17350 solver.cpp:404]     Test net output #1: loss = 0.52989 (* 1 = 0.52989 loss)\nI0818 06:15:04.024926 17350 solver.cpp:228] Iteration 66600, loss = 0.000993817\nI0818 06:15:04.024966 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:15:04.024981 17350 solver.cpp:244]     Train net output #1: loss = 0.000993252 (* 1 = 0.000993252 loss)\nI0818 06:15:04.110079 17350 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0818 06:15:51.384630 17350 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0818 06:16:18.010736 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85016\nI0818 06:16:18.010787 17350 solver.cpp:404]     Test net output #1: loss = 0.52999 (* 1 = 0.52999 loss)\nI0818 06:16:18.432948 17350 solver.cpp:228] Iteration 66700, loss = 0.00113713\nI0818 06:16:18.432987 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:16:18.433003 17350 solver.cpp:244]     Train net output #1: loss = 0.00113656 (* 1 = 0.00113656 loss)\nI0818 06:16:18.513470 17350 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0818 06:17:05.820825 17350 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0818 06:17:32.442409 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85132\nI0818 06:17:32.442459 17350 solver.cpp:404]     Test net output #1: loss = 0.528054 (* 1 = 0.528054 loss)\nI0818 06:17:32.864346 17350 solver.cpp:228] Iteration 66800, loss = 0.00104804\nI0818 06:17:32.864385 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:17:32.864400 17350 solver.cpp:244]     Train net output #1: loss = 0.00104747 (* 1 = 0.00104747 loss)\nI0818 06:17:32.943636 17350 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0818 06:18:20.248915 17350 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0818 06:18:46.872306 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85156\nI0818 06:18:46.872356 17350 solver.cpp:404]     Test net output #1: loss = 0.527547 (* 1 = 0.527547 loss)\nI0818 06:18:47.294152 17350 solver.cpp:228] Iteration 66900, loss = 0.00117516\nI0818 06:18:47.294193 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:18:47.294209 17350 solver.cpp:244]     Train net output #1: loss = 0.0011746 (* 1 = 0.0011746 loss)\nI0818 06:18:47.371692 17350 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0818 06:19:34.622730 17350 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0818 06:20:01.245303 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85256\nI0818 06:20:01.245353 17350 solver.cpp:404]     Test net output #1: loss = 0.526583 (* 1 = 0.526583 loss)\nI0818 06:20:01.666829 17350 solver.cpp:228] Iteration 67000, loss = 0.0010502\nI0818 06:20:01.666868 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:20:01.666884 17350 solver.cpp:244]     Train net output #1: loss = 0.00104964 (* 1 = 0.00104964 loss)\nI0818 06:20:01.745412 17350 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0818 06:20:49.034241 17350 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0818 06:21:15.658072 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85072\nI0818 06:21:15.658123 17350 solver.cpp:404]     Test net output #1: loss = 0.528612 (* 1 = 0.528612 loss)\nI0818 06:21:16.080153 17350 solver.cpp:228] Iteration 67100, loss = 0.00104148\nI0818 06:21:16.080189 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:21:16.080205 17350 solver.cpp:244]     Train net output #1: loss = 0.00104092 (* 1 = 0.00104092 loss)\nI0818 06:21:16.158854 17350 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0818 06:22:03.435901 17350 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0818 06:22:30.059288 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85148\nI0818 06:22:30.059339 17350 solver.cpp:404]     Test net output #1: loss = 0.528954 (* 1 = 0.528954 loss)\nI0818 06:22:30.480928 17350 solver.cpp:228] Iteration 67200, loss = 0.00111646\nI0818 06:22:30.480965 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:22:30.480981 17350 solver.cpp:244]     Train net output #1: loss = 0.0011159 (* 1 = 0.0011159 loss)\nI0818 06:22:30.560303 17350 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0818 06:23:17.860968 17350 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0818 06:23:44.484185 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85056\nI0818 06:23:44.484236 17350 solver.cpp:404]     Test net output #1: loss = 0.530475 (* 1 = 0.530475 loss)\nI0818 06:23:44.905867 17350 solver.cpp:228] Iteration 67300, loss = 0.00116903\nI0818 06:23:44.905905 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:23:44.905920 17350 solver.cpp:244]     Train net output #1: loss = 0.00116846 (* 1 = 0.00116846 loss)\nI0818 06:23:44.981248 17350 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0818 06:24:32.255162 17350 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0818 06:24:58.877132 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8522\nI0818 06:24:58.877178 17350 solver.cpp:404]     Test net output #1: loss = 0.526005 (* 1 = 0.526005 loss)\nI0818 06:24:59.298761 17350 solver.cpp:228] Iteration 67400, loss = 0.00109689\nI0818 06:24:59.298797 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:24:59.298815 17350 solver.cpp:244]     Train net output #1: loss = 0.00109632 (* 1 = 0.00109632 loss)\nI0818 06:24:59.374264 17350 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0818 06:25:46.647840 17350 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0818 06:26:13.267496 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84964\nI0818 06:26:13.267547 17350 solver.cpp:404]     Test net output #1: loss = 0.531789 (* 1 = 0.531789 loss)\nI0818 06:26:13.689407 17350 solver.cpp:228] Iteration 67500, loss = 0.000879512\nI0818 06:26:13.689445 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:26:13.689460 17350 solver.cpp:244]     Train net output #1: loss = 0.000878947 (* 1 = 0.000878947 loss)\nI0818 06:26:13.763973 17350 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0818 06:27:01.074729 17350 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0818 06:27:27.699120 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85124\nI0818 06:27:27.699169 17350 solver.cpp:404]     Test net output #1: loss = 0.52981 (* 1 = 0.52981 loss)\nI0818 06:27:28.120726 17350 solver.cpp:228] Iteration 67600, loss = 0.00107026\nI0818 06:27:28.120764 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:27:28.120779 17350 solver.cpp:244]     Train net output #1: loss = 0.00106969 (* 1 = 0.00106969 loss)\nI0818 06:27:28.196120 17350 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0818 06:28:15.488564 17350 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0818 06:28:42.111338 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84884\nI0818 06:28:42.111390 17350 solver.cpp:404]     Test net output #1: loss = 0.531972 (* 1 = 0.531972 loss)\nI0818 06:28:42.533219 17350 solver.cpp:228] Iteration 67700, loss = 0.00125542\nI0818 06:28:42.533254 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:28:42.533272 17350 solver.cpp:244]     Train net output #1: loss = 0.00125486 (* 1 = 0.00125486 loss)\nI0818 06:28:42.608198 17350 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0818 06:29:29.903971 17350 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0818 06:29:56.524658 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85056\nI0818 06:29:56.524708 17350 solver.cpp:404]     Test net output #1: loss = 0.530257 (* 1 = 0.530257 loss)\nI0818 06:29:56.946187 17350 solver.cpp:228] Iteration 67800, loss = 0.001018\nI0818 06:29:56.946224 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:29:56.946240 17350 solver.cpp:244]     Train net output #1: loss = 0.00101743 (* 1 = 0.00101743 loss)\nI0818 06:29:57.029448 17350 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0818 06:30:44.320698 17350 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0818 06:31:10.942575 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84956\nI0818 06:31:10.942625 17350 solver.cpp:404]     Test net output #1: loss = 0.530566 (* 1 = 0.530566 loss)\nI0818 06:31:11.364388 17350 solver.cpp:228] Iteration 67900, loss = 0.00115335\nI0818 06:31:11.364425 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:31:11.364441 17350 solver.cpp:244]     Train net output #1: loss = 0.00115278 (* 1 = 0.00115278 loss)\nI0818 06:31:11.446915 17350 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0818 06:31:58.749102 17350 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0818 06:32:25.369320 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85096\nI0818 06:32:25.369370 17350 solver.cpp:404]     Test net output #1: loss = 0.527608 (* 1 = 0.527608 loss)\nI0818 06:32:25.790900 17350 solver.cpp:228] Iteration 68000, loss = 0.00107249\nI0818 06:32:25.790933 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:32:25.790949 17350 solver.cpp:244]     Train net output #1: loss = 0.00107193 (* 1 = 0.00107193 loss)\nI0818 06:32:25.869408 17350 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0818 06:33:13.163492 17350 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0818 06:33:39.776437 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84916\nI0818 06:33:39.776486 17350 solver.cpp:404]     Test net output #1: loss = 0.53285 (* 1 = 0.53285 loss)\nI0818 06:33:40.198271 17350 solver.cpp:228] Iteration 68100, loss = 0.00108561\nI0818 06:33:40.198304 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:33:40.198320 17350 solver.cpp:244]     Train net output #1: loss = 0.00108504 (* 1 = 0.00108504 loss)\nI0818 06:33:40.276487 17350 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0818 06:34:27.521574 17350 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0818 06:34:54.138964 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85088\nI0818 06:34:54.139014 17350 solver.cpp:404]     Test net output #1: loss = 0.529155 (* 1 = 0.529155 loss)\nI0818 06:34:54.560942 17350 solver.cpp:228] Iteration 68200, loss = 0.00108549\nI0818 06:34:54.560973 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:34:54.560989 17350 solver.cpp:244]     Train net output #1: loss = 0.00108492 (* 1 = 0.00108492 loss)\nI0818 06:34:54.641716 17350 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0818 06:35:41.911651 17350 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0818 06:36:08.530283 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84952\nI0818 06:36:08.530333 17350 solver.cpp:404]     Test net output #1: loss = 0.533456 (* 1 = 0.533456 loss)\nI0818 06:36:08.951859 17350 solver.cpp:228] Iteration 68300, loss = 0.00102243\nI0818 06:36:08.951895 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:36:08.951911 17350 solver.cpp:244]     Train net output #1: loss = 0.00102187 (* 1 = 0.00102187 loss)\nI0818 06:36:09.034549 17350 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0818 06:36:56.342707 17350 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0818 06:37:22.960525 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8506\nI0818 06:37:22.960575 17350 solver.cpp:404]     Test net output #1: loss = 0.532118 (* 1 = 0.532118 loss)\nI0818 06:37:23.382181 17350 solver.cpp:228] Iteration 68400, loss = 0.00116227\nI0818 06:37:23.382215 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:37:23.382231 17350 solver.cpp:244]     Train net output #1: loss = 0.0011617 (* 1 = 0.0011617 loss)\nI0818 06:37:23.462224 17350 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0818 06:38:10.770046 17350 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0818 06:38:37.390512 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84956\nI0818 06:38:37.390561 17350 solver.cpp:404]     Test net output #1: loss = 0.531814 (* 1 = 0.531814 loss)\nI0818 06:38:37.811986 17350 solver.cpp:228] Iteration 68500, loss = 0.00102205\nI0818 06:38:37.812021 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:38:37.812037 17350 solver.cpp:244]     Train net output #1: loss = 0.00102148 (* 1 = 0.00102148 loss)\nI0818 06:38:37.890099 17350 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0818 06:39:25.182353 17350 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0818 06:39:51.801481 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85124\nI0818 06:39:51.801532 17350 solver.cpp:404]     Test net output #1: loss = 0.531046 (* 1 = 0.531046 loss)\nI0818 06:39:52.223423 17350 solver.cpp:228] Iteration 68600, loss = 0.0013619\nI0818 06:39:52.223456 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:39:52.223472 17350 solver.cpp:244]     Train net output #1: loss = 0.00136134 (* 1 = 0.00136134 loss)\nI0818 06:39:52.298920 17350 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0818 06:40:39.524835 17350 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0818 06:41:06.140687 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85028\nI0818 06:41:06.140736 17350 solver.cpp:404]     Test net output #1: loss = 0.532553 (* 1 = 0.532553 loss)\nI0818 06:41:06.562299 17350 solver.cpp:228] Iteration 68700, loss = 0.00126363\nI0818 06:41:06.562332 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:41:06.562348 17350 solver.cpp:244]     Train net output #1: loss = 0.00126306 (* 1 = 0.00126306 loss)\nI0818 06:41:06.642966 17350 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0818 06:41:53.878746 17350 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0818 06:42:20.500455 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85176\nI0818 06:42:20.500505 17350 solver.cpp:404]     Test net output #1: loss = 0.526571 (* 1 = 0.526571 loss)\nI0818 06:42:20.922260 17350 solver.cpp:228] Iteration 68800, loss = 0.00106132\nI0818 06:42:20.922293 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:42:20.922309 17350 solver.cpp:244]     Train net output #1: loss = 0.00106075 (* 1 = 0.00106075 loss)\nI0818 06:42:21.004245 17350 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0818 06:43:08.230100 17350 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0818 06:43:34.848678 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84988\nI0818 06:43:34.848729 17350 solver.cpp:404]     Test net output #1: loss = 0.534258 (* 1 = 0.534258 loss)\nI0818 06:43:35.270416 17350 solver.cpp:228] Iteration 68900, loss = 0.00103767\nI0818 06:43:35.270452 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:43:35.270467 17350 solver.cpp:244]     Train net output #1: loss = 0.00103711 (* 1 = 0.00103711 loss)\nI0818 06:43:35.354254 17350 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0818 06:44:22.602751 17350 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0818 06:44:49.221303 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8502\nI0818 06:44:49.221354 17350 solver.cpp:404]     Test net output #1: loss = 0.53125 (* 1 = 0.53125 loss)\nI0818 06:44:49.643138 17350 solver.cpp:228] Iteration 69000, loss = 0.00100975\nI0818 06:44:49.643172 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:44:49.643187 17350 solver.cpp:244]     Train net output #1: loss = 0.00100919 (* 1 = 0.00100919 loss)\nI0818 06:44:49.718672 17350 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0818 06:45:36.998976 17350 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0818 06:46:03.623371 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84908\nI0818 06:46:03.623420 17350 solver.cpp:404]     Test net output #1: loss = 0.533557 (* 1 = 0.533557 loss)\nI0818 06:46:04.044996 17350 solver.cpp:228] Iteration 69100, loss = 0.00102959\nI0818 06:46:04.045030 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:46:04.045047 17350 solver.cpp:244]     Train net output #1: loss = 0.00102903 (* 1 = 0.00102903 loss)\nI0818 06:46:04.120247 17350 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0818 06:46:51.396045 17350 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0818 06:47:18.018082 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85008\nI0818 06:47:18.018132 17350 solver.cpp:404]     Test net output #1: loss = 0.53524 (* 1 = 0.53524 loss)\nI0818 06:47:18.439790 17350 solver.cpp:228] Iteration 69200, loss = 0.00096353\nI0818 06:47:18.439824 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:47:18.439839 17350 solver.cpp:244]     Train net output #1: loss = 0.000962966 (* 1 = 0.000962966 loss)\nI0818 06:47:18.511126 17350 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0818 06:48:05.805568 17350 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0818 06:48:32.428340 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84888\nI0818 06:48:32.428388 17350 solver.cpp:404]     Test net output #1: loss = 0.537139 (* 1 = 0.537139 loss)\nI0818 06:48:32.850221 17350 solver.cpp:228] Iteration 69300, loss = 0.000977138\nI0818 06:48:32.850257 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:48:32.850273 17350 solver.cpp:244]     Train net output #1: loss = 0.000976573 (* 1 = 0.000976573 loss)\nI0818 06:48:32.934427 17350 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0818 06:49:20.239406 17350 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0818 06:49:46.870195 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84988\nI0818 06:49:46.870244 17350 solver.cpp:404]     Test net output #1: loss = 0.53261 (* 1 = 0.53261 loss)\nI0818 06:49:47.293215 17350 solver.cpp:228] Iteration 69400, loss = 0.000901977\nI0818 06:49:47.293262 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:49:47.293285 17350 solver.cpp:244]     Train net output #1: loss = 0.000901413 (* 1 = 0.000901413 loss)\nI0818 06:49:47.371464 17350 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0818 06:50:34.706255 17350 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0818 06:51:01.336477 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84752\nI0818 06:51:01.336525 17350 solver.cpp:404]     Test net output #1: loss = 0.537633 (* 1 = 0.537633 loss)\nI0818 06:51:01.759613 17350 solver.cpp:228] Iteration 69500, loss = 0.00118937\nI0818 06:51:01.759661 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:51:01.759685 17350 solver.cpp:244]     Train net output #1: loss = 0.00118881 (* 1 = 0.00118881 loss)\nI0818 06:51:01.834585 17350 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0818 06:51:49.131615 17350 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0818 06:52:15.769268 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8494\nI0818 06:52:15.769318 17350 solver.cpp:404]     Test net output #1: loss = 0.533841 (* 1 = 0.533841 loss)\nI0818 06:52:16.191260 17350 solver.cpp:228] Iteration 69600, loss = 0.000902819\nI0818 06:52:16.191306 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:52:16.191330 17350 solver.cpp:244]     Train net output #1: loss = 0.000902255 (* 1 = 0.000902255 loss)\nI0818 06:52:16.267597 17350 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0818 06:53:03.496382 17350 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0818 06:53:30.122519 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8492\nI0818 06:53:30.122570 17350 solver.cpp:404]     Test net output #1: loss = 0.534628 (* 1 = 0.534628 loss)\nI0818 06:53:30.545400 17350 solver.cpp:228] Iteration 69700, loss = 0.00094652\nI0818 06:53:30.545447 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:53:30.545471 17350 solver.cpp:244]     Train net output #1: loss = 0.000945955 (* 1 = 0.000945955 loss)\nI0818 06:53:30.623188 17350 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0818 06:54:17.901000 17350 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0818 06:54:44.804330 17350 solver.cpp:404]     Test net output #0: accuracy = 0.851\nI0818 06:54:44.804404 17350 solver.cpp:404]     Test net output #1: loss = 0.530152 (* 1 = 0.530152 loss)\nI0818 06:54:45.228698 17350 solver.cpp:228] Iteration 69800, loss = 0.00105783\nI0818 06:54:45.228747 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:54:45.228765 17350 solver.cpp:244]     Train net output #1: loss = 0.00105726 (* 1 = 0.00105726 loss)\nI0818 06:54:45.308526 17350 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0818 06:55:32.643829 17350 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0818 06:55:59.533161 17350 solver.cpp:404]     Test net output #0: accuracy = 0.84952\nI0818 06:55:59.533236 17350 solver.cpp:404]     Test net output #1: loss = 0.536474 (* 1 = 0.536474 loss)\nI0818 06:55:59.955971 17350 solver.cpp:228] Iteration 69900, loss = 0.00105032\nI0818 06:55:59.956019 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:55:59.956035 17350 solver.cpp:244]     Train net output #1: loss = 0.00104975 (* 1 = 0.00104975 loss)\nI0818 06:56:00.035616 17350 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0818 06:56:47.379762 17350 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0818 06:57:14.273380 17350 solver.cpp:404]     Test net output #0: accuracy = 0.851\nI0818 06:57:14.273447 17350 solver.cpp:404]     Test net output #1: loss = 0.529748 (* 1 = 0.529748 loss)\nI0818 06:57:14.696450 17350 solver.cpp:228] Iteration 70000, loss = 0.0010314\nI0818 06:57:14.696501 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:57:14.696518 17350 solver.cpp:244]     Train net output #1: loss = 0.00103084 (* 1 = 0.00103084 loss)\nI0818 06:57:14.773387 17350 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0818 06:57:14.773411 17350 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0818 06:58:02.107861 17350 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0818 06:58:29.013195 17350 solver.cpp:404]     Test net output #0: accuracy = 0.85588\nI0818 06:58:29.013272 17350 solver.cpp:404]     Test net output #1: loss = 0.512109 (* 1 = 0.512109 loss)\nI0818 06:58:29.436564 17350 solver.cpp:228] Iteration 70100, loss = 0.00106813\nI0818 06:58:29.436607 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:58:29.436624 17350 solver.cpp:244]     Train net output #1: loss = 0.00106757 (* 1 = 0.00106757 loss)\nI0818 06:58:29.514206 17350 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0818 06:59:16.891122 17350 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0818 06:59:43.802295 17350 solver.cpp:404]     Test net output #0: accuracy = 0.86256\nI0818 06:59:43.802371 17350 solver.cpp:404]     Test net output #1: loss = 0.493301 (* 1 = 0.493301 loss)\nI0818 06:59:44.225185 17350 solver.cpp:228] Iteration 70200, loss = 0.00106436\nI0818 06:59:44.225229 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 06:59:44.225245 17350 solver.cpp:244]     Train net output #1: loss = 0.0010638 (* 1 = 0.0010638 loss)\nI0818 06:59:44.302028 17350 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0818 07:00:31.633401 17350 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0818 07:00:58.539868 17350 solver.cpp:404]     Test net output #0: accuracy = 0.867\nI0818 07:00:58.539942 17350 solver.cpp:404]     Test net output #1: loss = 0.478849 (* 1 = 0.478849 loss)\nI0818 07:00:58.962985 17350 solver.cpp:228] Iteration 70300, loss = 0.00103061\nI0818 07:00:58.963027 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:00:58.963042 17350 solver.cpp:244]     Train net output #1: loss = 0.00103005 (* 1 = 0.00103005 loss)\nI0818 07:00:59.046305 17350 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0818 07:01:46.427014 17350 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0818 07:02:13.326557 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87184\nI0818 07:02:13.326630 17350 solver.cpp:404]     Test net output #1: loss = 0.466938 (* 1 = 0.466938 loss)\nI0818 07:02:13.749197 17350 solver.cpp:228] Iteration 70400, loss = 0.00096494\nI0818 07:02:13.749238 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:02:13.749255 17350 solver.cpp:244]     Train net output #1: loss = 0.000964376 (* 1 = 0.000964376 loss)\nI0818 07:02:13.829522 17350 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0818 07:03:01.281530 17350 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0818 07:03:28.188627 17350 solver.cpp:404]     Test net output #0: accuracy = 0.874\nI0818 07:03:28.188700 17350 solver.cpp:404]     Test net output #1: loss = 0.457142 (* 1 = 0.457142 loss)\nI0818 07:03:28.611516 17350 solver.cpp:228] Iteration 70500, loss = 0.000836708\nI0818 07:03:28.611560 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:03:28.611577 17350 solver.cpp:244]     Train net output #1: loss = 0.000836144 (* 1 = 0.000836144 loss)\nI0818 07:03:28.692728 17350 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0818 07:04:16.166383 17350 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0818 07:04:43.093140 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87744\nI0818 07:04:43.093226 17350 solver.cpp:404]     Test net output #1: loss = 0.450312 (* 1 = 0.450312 loss)\nI0818 07:04:43.517444 17350 solver.cpp:228] Iteration 70600, loss = 0.00101968\nI0818 07:04:43.517488 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:04:43.517513 17350 solver.cpp:244]     Train net output #1: loss = 0.00101911 (* 1 = 0.00101911 loss)\nI0818 07:04:43.597579 17350 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0818 07:05:31.047869 17350 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0818 07:05:57.967998 17350 solver.cpp:404]     Test net output #0: accuracy = 0.87768\nI0818 07:05:57.968075 17350 solver.cpp:404]     Test net output #1: loss = 0.442937 (* 1 = 0.442937 loss)\nI0818 07:05:58.390822 17350 solver.cpp:228] Iteration 70700, loss = 0.000779368\nI0818 07:05:58.390857 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:05:58.390873 17350 solver.cpp:244]     Train net output #1: loss = 0.000778803 (* 1 = 0.000778803 loss)\nI0818 07:05:58.479445 17350 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0818 07:06:45.924140 17350 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0818 07:07:12.831208 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88016\nI0818 07:07:12.831280 17350 solver.cpp:404]     Test net output #1: loss = 0.439244 (* 1 = 0.439244 loss)\nI0818 07:07:13.254276 17350 solver.cpp:228] Iteration 70800, loss = 0.000892938\nI0818 07:07:13.254314 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:07:13.254329 17350 solver.cpp:244]     Train net output #1: loss = 0.000892374 (* 1 = 0.000892374 loss)\nI0818 07:07:13.333619 17350 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0818 07:08:00.721129 17350 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0818 07:08:27.634048 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0818 07:08:27.634119 17350 solver.cpp:404]     Test net output #1: loss = 0.434376 (* 1 = 0.434376 loss)\nI0818 07:08:28.058509 17350 solver.cpp:228] Iteration 70900, loss = 0.00108057\nI0818 07:08:28.058559 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:08:28.058576 17350 solver.cpp:244]     Train net output #1: loss = 0.00108 (* 1 = 0.00108 loss)\nI0818 07:08:28.137046 17350 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0818 07:09:15.537636 17350 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0818 07:09:42.451972 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0818 07:09:42.452042 17350 solver.cpp:404]     Test net output #1: loss = 0.433301 (* 1 = 0.433301 loss)\nI0818 07:09:42.876271 17350 solver.cpp:228] Iteration 71000, loss = 0.00103393\nI0818 07:09:42.876319 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:09:42.876338 17350 solver.cpp:244]     Train net output #1: loss = 0.00103336 (* 1 = 0.00103336 loss)\nI0818 07:09:42.953913 17350 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0818 07:10:30.326664 17350 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0818 07:10:57.236984 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88388\nI0818 07:10:57.237059 17350 solver.cpp:404]     Test net output #1: loss = 0.429033 (* 1 = 0.429033 loss)\nI0818 07:10:57.661358 17350 solver.cpp:228] Iteration 71100, loss = 0.000814\nI0818 07:10:57.661417 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:10:57.661435 17350 solver.cpp:244]     Train net output #1: loss = 0.000813436 (* 1 = 0.000813436 loss)\nI0818 07:10:57.742142 17350 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0818 07:11:45.103493 17350 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0818 07:12:12.002182 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88508\nI0818 07:12:12.002252 17350 solver.cpp:404]     Test net output #1: loss = 0.429738 (* 1 = 0.429738 loss)\nI0818 07:12:12.426262 17350 solver.cpp:228] Iteration 71200, loss = 0.000883476\nI0818 07:12:12.426321 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:12:12.426337 17350 solver.cpp:244]     Train net output #1: loss = 0.000882912 (* 1 = 0.000882912 loss)\nI0818 07:12:12.502753 17350 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0818 07:12:59.892694 17350 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0818 07:13:26.798812 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0818 07:13:26.798884 17350 solver.cpp:404]     Test net output #1: loss = 0.425901 (* 1 = 0.425901 loss)\nI0818 07:13:27.222965 17350 solver.cpp:228] Iteration 71300, loss = 0.00108008\nI0818 07:13:27.223021 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:13:27.223038 17350 solver.cpp:244]     Train net output #1: loss = 0.00107952 (* 1 = 0.00107952 loss)\nI0818 07:13:27.300348 17350 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0818 07:14:14.699664 17350 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0818 07:14:41.601404 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8844\nI0818 07:14:41.601480 17350 solver.cpp:404]     Test net output #1: loss = 0.428246 (* 1 = 0.428246 loss)\nI0818 07:14:42.025606 17350 solver.cpp:228] Iteration 71400, loss = 0.00107098\nI0818 07:14:42.025665 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:14:42.025682 17350 solver.cpp:244]     Train net output #1: loss = 0.00107042 (* 1 = 0.00107042 loss)\nI0818 07:14:42.104578 17350 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0818 07:15:29.446367 17350 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0818 07:15:56.349292 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88376\nI0818 07:15:56.349369 17350 solver.cpp:404]     Test net output #1: loss = 0.424228 (* 1 = 0.424228 loss)\nI0818 07:15:56.773535 17350 solver.cpp:228] Iteration 71500, loss = 0.00109407\nI0818 07:15:56.773597 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:15:56.773614 17350 solver.cpp:244]     Train net output #1: loss = 0.0010935 (* 1 = 0.0010935 loss)\nI0818 07:15:56.850905 17350 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0818 07:16:44.216564 17350 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0818 07:17:11.122516 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88508\nI0818 07:17:11.122589 17350 solver.cpp:404]     Test net output #1: loss = 0.427103 (* 1 = 0.427103 loss)\nI0818 07:17:11.547164 17350 solver.cpp:228] Iteration 71600, loss = 0.000837816\nI0818 07:17:11.547222 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:17:11.547241 17350 solver.cpp:244]     Train net output #1: loss = 0.000837251 (* 1 = 0.000837251 loss)\nI0818 07:17:11.622995 17350 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0818 07:17:59.015802 17350 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0818 07:18:25.909924 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88448\nI0818 07:18:25.910004 17350 solver.cpp:404]     Test net output #1: loss = 0.423431 (* 1 = 0.423431 loss)\nI0818 07:18:26.333930 17350 solver.cpp:228] Iteration 71700, loss = 0.00108846\nI0818 07:18:26.333989 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:18:26.334007 17350 solver.cpp:244]     Train net output #1: loss = 0.0010879 (* 1 = 0.0010879 loss)\nI0818 07:18:26.416468 17350 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0818 07:19:13.827822 17350 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0818 07:19:40.720281 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88532\nI0818 07:19:40.720355 17350 solver.cpp:404]     Test net output #1: loss = 0.426731 (* 1 = 0.426731 loss)\nI0818 07:19:41.143447 17350 solver.cpp:228] Iteration 71800, loss = 0.00112756\nI0818 07:19:41.143504 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:19:41.143522 17350 solver.cpp:244]     Train net output #1: loss = 0.001127 (* 1 = 0.001127 loss)\nI0818 07:19:41.220454 17350 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0818 07:20:28.681015 17350 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0818 07:20:55.574918 17350 solver.cpp:404]     Test net output #0: accuracy = 0.885401\nI0818 07:20:55.574998 17350 solver.cpp:404]     Test net output #1: loss = 0.423034 (* 1 = 0.423034 loss)\nI0818 07:20:55.997908 17350 solver.cpp:228] Iteration 71900, loss = 0.00112606\nI0818 07:20:55.997970 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:20:55.997989 17350 solver.cpp:244]     Train net output #1: loss = 0.00112549 (* 1 = 0.00112549 loss)\nI0818 07:20:56.076418 17350 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0818 07:21:43.534806 17350 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0818 07:22:10.417580 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88572\nI0818 07:22:10.417650 17350 solver.cpp:404]     Test net output #1: loss = 0.426823 (* 1 = 0.426823 loss)\nI0818 07:22:10.840994 17350 solver.cpp:228] Iteration 72000, loss = 0.00115104\nI0818 07:22:10.841050 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:22:10.841068 17350 solver.cpp:244]     Train net output #1: loss = 0.00115047 (* 1 = 0.00115047 loss)\nI0818 07:22:10.913419 17350 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0818 07:22:58.431602 17350 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0818 07:23:25.331845 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88564\nI0818 07:23:25.331918 17350 solver.cpp:404]     Test net output #1: loss = 0.422908 (* 1 = 0.422908 loss)\nI0818 07:23:25.754753 17350 solver.cpp:228] Iteration 72100, loss = 0.00103888\nI0818 07:23:25.754808 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:23:25.754825 17350 solver.cpp:244]     Train net output #1: loss = 0.00103831 (* 1 = 0.00103831 loss)\nI0818 07:23:25.826170 17350 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0818 07:24:13.348304 17350 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0818 07:24:40.241191 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88624\nI0818 07:24:40.241266 17350 solver.cpp:404]     Test net output #1: loss = 0.426824 (* 1 = 0.426824 loss)\nI0818 07:24:40.664168 17350 solver.cpp:228] Iteration 72200, loss = 0.00109743\nI0818 07:24:40.664225 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:24:40.664243 17350 solver.cpp:244]     Train net output #1: loss = 0.00109686 (* 1 = 0.00109686 loss)\nI0818 07:24:40.745471 17350 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0818 07:25:28.282202 17350 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0818 07:25:55.172112 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88636\nI0818 07:25:55.172188 17350 solver.cpp:404]     Test net output #1: loss = 0.422663 (* 1 = 0.422663 loss)\nI0818 07:25:55.595187 17350 solver.cpp:228] Iteration 72300, loss = 0.00105718\nI0818 07:25:55.595229 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:25:55.595247 17350 solver.cpp:244]     Train net output #1: loss = 0.00105662 (* 1 = 0.00105662 loss)\nI0818 07:25:55.675448 17350 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0818 07:26:43.181712 17350 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0818 07:27:10.077461 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88608\nI0818 07:27:10.077536 17350 solver.cpp:404]     Test net output #1: loss = 0.427117 (* 1 = 0.427117 loss)\nI0818 07:27:10.500551 17350 solver.cpp:228] Iteration 72400, loss = 0.00102903\nI0818 07:27:10.500593 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:27:10.500610 17350 solver.cpp:244]     Train net output #1: loss = 0.00102847 (* 1 = 0.00102847 loss)\nI0818 07:27:10.583405 17350 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0818 07:27:58.078145 17350 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0818 07:28:24.981518 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88656\nI0818 07:28:24.981592 17350 solver.cpp:404]     Test net output #1: loss = 0.423028 (* 1 = 0.423028 loss)\nI0818 07:28:25.404289 17350 solver.cpp:228] Iteration 72500, loss = 0.000937286\nI0818 07:28:25.404330 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:28:25.404346 17350 solver.cpp:244]     Train net output #1: loss = 0.000936721 (* 1 = 0.000936721 loss)\nI0818 07:28:25.482707 17350 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0818 07:29:13.015424 17350 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0818 07:29:39.912997 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8866\nI0818 07:29:39.913072 17350 solver.cpp:404]     Test net output #1: loss = 0.427496 (* 1 = 0.427496 loss)\nI0818 07:29:40.335803 17350 solver.cpp:228] Iteration 72600, loss = 0.00103864\nI0818 07:29:40.335850 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:29:40.335866 17350 solver.cpp:244]     Train net output #1: loss = 0.00103807 (* 1 = 0.00103807 loss)\nI0818 07:29:40.415503 17350 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0818 07:30:27.910055 17350 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0818 07:30:54.812782 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88632\nI0818 07:30:54.812863 17350 solver.cpp:404]     Test net output #1: loss = 0.423314 (* 1 = 0.423314 loss)\nI0818 07:30:55.235584 17350 solver.cpp:228] Iteration 72700, loss = 0.000967902\nI0818 07:30:55.235621 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:30:55.235637 17350 solver.cpp:244]     Train net output #1: loss = 0.000967338 (* 1 = 0.000967338 loss)\nI0818 07:30:55.311547 17350 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0818 07:31:42.834429 17350 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0818 07:32:09.736196 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0818 07:32:09.736266 17350 solver.cpp:404]     Test net output #1: loss = 0.427823 (* 1 = 0.427823 loss)\nI0818 07:32:10.159416 17350 solver.cpp:228] Iteration 72800, loss = 0.00114245\nI0818 07:32:10.159452 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:32:10.159468 17350 solver.cpp:244]     Train net output #1: loss = 0.00114188 (* 1 = 0.00114188 loss)\nI0818 07:32:10.230578 17350 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0818 07:32:57.725450 17350 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0818 07:33:24.622511 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88648\nI0818 07:33:24.622577 17350 solver.cpp:404]     Test net output #1: loss = 0.423503 (* 1 = 0.423503 loss)\nI0818 07:33:25.045434 17350 solver.cpp:228] Iteration 72900, loss = 0.00108224\nI0818 07:33:25.045480 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:33:25.045496 17350 solver.cpp:244]     Train net output #1: loss = 0.00108168 (* 1 = 0.00108168 loss)\nI0818 07:33:25.119911 17350 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0818 07:34:12.633304 17350 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0818 07:34:39.526278 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0818 07:34:39.526351 17350 solver.cpp:404]     Test net output #1: loss = 0.42821 (* 1 = 0.42821 loss)\nI0818 07:34:39.949313 17350 solver.cpp:228] Iteration 73000, loss = 0.000976624\nI0818 07:34:39.949354 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:34:39.949369 17350 solver.cpp:244]     Train net output #1: loss = 0.000976059 (* 1 = 0.000976059 loss)\nI0818 07:34:40.024477 17350 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0818 07:35:27.502359 17350 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0818 07:35:54.402874 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0818 07:35:54.402951 17350 solver.cpp:404]     Test net output #1: loss = 0.423843 (* 1 = 0.423843 loss)\nI0818 07:35:54.825991 17350 solver.cpp:228] Iteration 73100, loss = 0.00107314\nI0818 07:35:54.826035 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:35:54.826050 17350 solver.cpp:244]     Train net output #1: loss = 0.00107258 (* 1 = 0.00107258 loss)\nI0818 07:35:54.900872 17350 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0818 07:36:42.388231 17350 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0818 07:37:09.283615 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0818 07:37:09.283686 17350 solver.cpp:404]     Test net output #1: loss = 0.42812 (* 1 = 0.42812 loss)\nI0818 07:37:09.706538 17350 solver.cpp:228] Iteration 73200, loss = 0.00107525\nI0818 07:37:09.706583 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:37:09.706599 17350 solver.cpp:244]     Train net output #1: loss = 0.00107469 (* 1 = 0.00107469 loss)\nI0818 07:37:09.782891 17350 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0818 07:37:57.182955 17350 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0818 07:38:24.071951 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0818 07:38:24.072005 17350 solver.cpp:404]     Test net output #1: loss = 0.423801 (* 1 = 0.423801 loss)\nI0818 07:38:24.494606 17350 solver.cpp:228] Iteration 73300, loss = 0.00114548\nI0818 07:38:24.494648 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:38:24.494664 17350 solver.cpp:244]     Train net output #1: loss = 0.00114492 (* 1 = 0.00114492 loss)\nI0818 07:38:24.577419 17350 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0818 07:39:11.931308 17350 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0818 07:39:38.711957 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88692\nI0818 07:39:38.712008 17350 solver.cpp:404]     Test net output #1: loss = 0.428372 (* 1 = 0.428372 loss)\nI0818 07:39:39.134986 17350 solver.cpp:228] Iteration 73400, loss = 0.00091279\nI0818 07:39:39.135037 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:39:39.135056 17350 solver.cpp:244]     Train net output #1: loss = 0.000912226 (* 1 = 0.000912226 loss)\nI0818 07:39:39.210952 17350 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0818 07:40:26.587136 17350 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0818 07:40:53.322126 17350 solver.cpp:404]     Test net output #0: accuracy = 0.887\nI0818 07:40:53.322180 17350 solver.cpp:404]     Test net output #1: loss = 0.424029 (* 1 = 0.424029 loss)\nI0818 07:40:53.744931 17350 solver.cpp:228] Iteration 73500, loss = 0.00100044\nI0818 07:40:53.744982 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:40:53.745004 17350 solver.cpp:244]     Train net output #1: loss = 0.000999873 (* 1 = 0.000999873 loss)\nI0818 07:40:53.821611 17350 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0818 07:41:41.172778 17350 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0818 07:42:08.074954 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0818 07:42:08.075028 17350 solver.cpp:404]     Test net output #1: loss = 0.42868 (* 1 = 0.42868 loss)\nI0818 07:42:08.497763 17350 solver.cpp:228] Iteration 73600, loss = 0.000960484\nI0818 07:42:08.497812 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:42:08.497829 17350 solver.cpp:244]     Train net output #1: loss = 0.000959919 (* 1 = 0.000959919 loss)\nI0818 07:42:08.572365 17350 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0818 07:42:55.940245 17350 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0818 07:43:22.851282 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88692\nI0818 07:43:22.851352 17350 solver.cpp:404]     Test net output #1: loss = 0.424027 (* 1 = 0.424027 loss)\nI0818 07:43:23.275378 17350 solver.cpp:228] Iteration 73700, loss = 0.00112098\nI0818 07:43:23.275439 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:43:23.275456 17350 solver.cpp:244]     Train net output #1: loss = 0.00112041 (* 1 = 0.00112041 loss)\nI0818 07:43:23.356717 17350 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0818 07:44:10.726325 17350 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0818 07:44:37.638808 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88676\nI0818 07:44:37.638878 17350 solver.cpp:404]     Test net output #1: loss = 0.42866 (* 1 = 0.42866 loss)\nI0818 07:44:38.061934 17350 solver.cpp:228] Iteration 73800, loss = 0.00114237\nI0818 07:44:38.061996 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:44:38.062016 17350 solver.cpp:244]     Train net output #1: loss = 0.00114181 (* 1 = 0.00114181 loss)\nI0818 07:44:38.140656 17350 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0818 07:45:25.632506 17350 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0818 07:45:52.541664 17350 solver.cpp:404]     Test net output #0: accuracy = 0.887\nI0818 07:45:52.541735 17350 solver.cpp:404]     Test net output #1: loss = 0.42417 (* 1 = 0.42417 loss)\nI0818 07:45:52.964699 17350 solver.cpp:228] Iteration 73900, loss = 0.00104668\nI0818 07:45:52.964757 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:45:52.964776 17350 solver.cpp:244]     Train net output #1: loss = 0.00104611 (* 1 = 0.00104611 loss)\nI0818 07:45:53.041913 17350 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0818 07:46:40.542405 17350 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0818 07:47:07.455574 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88696\nI0818 07:47:07.455644 17350 solver.cpp:404]     Test net output #1: loss = 0.428972 (* 1 = 0.428972 loss)\nI0818 07:47:07.878341 17350 solver.cpp:228] Iteration 74000, loss = 0.00100166\nI0818 07:47:07.878397 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:47:07.878415 17350 solver.cpp:244]     Train net output #1: loss = 0.0010011 (* 1 = 0.0010011 loss)\nI0818 07:47:07.954237 17350 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0818 07:47:55.454522 17350 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0818 07:48:22.366156 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0818 07:48:22.366225 17350 solver.cpp:404]     Test net output #1: loss = 0.42456 (* 1 = 0.42456 loss)\nI0818 07:48:22.789384 17350 solver.cpp:228] Iteration 74100, loss = 0.000886715\nI0818 07:48:22.789440 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:48:22.789458 17350 solver.cpp:244]     Train net output #1: loss = 0.000886151 (* 1 = 0.000886151 loss)\nI0818 07:48:22.863888 17350 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0818 07:49:10.327337 17350 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0818 07:49:37.237869 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88692\nI0818 07:49:37.237938 17350 solver.cpp:404]     Test net output #1: loss = 0.429201 (* 1 = 0.429201 loss)\nI0818 07:49:37.661129 17350 solver.cpp:228] Iteration 74200, loss = 0.0010907\nI0818 07:49:37.661185 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:49:37.661201 17350 solver.cpp:244]     Train net output #1: loss = 0.00109014 (* 1 = 0.00109014 loss)\nI0818 07:49:37.743468 17350 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0818 07:50:25.164263 17350 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0818 07:50:52.076591 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88724\nI0818 07:50:52.076659 17350 solver.cpp:404]     Test net output #1: loss = 0.424557 (* 1 = 0.424557 loss)\nI0818 07:50:52.499438 17350 solver.cpp:228] Iteration 74300, loss = 0.00106985\nI0818 07:50:52.499495 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:50:52.499513 17350 solver.cpp:244]     Train net output #1: loss = 0.00106928 (* 1 = 0.00106928 loss)\nI0818 07:50:52.583577 17350 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0818 07:51:40.000633 17350 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0818 07:52:06.898787 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0818 07:52:06.898854 17350 solver.cpp:404]     Test net output #1: loss = 0.429228 (* 1 = 0.429228 loss)\nI0818 07:52:07.323119 17350 solver.cpp:228] Iteration 74400, loss = 0.00106476\nI0818 07:52:07.323176 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:52:07.323194 17350 solver.cpp:244]     Train net output #1: loss = 0.00106419 (* 1 = 0.00106419 loss)\nI0818 07:52:07.405786 17350 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0818 07:52:54.834836 17350 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0818 07:53:21.749019 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0818 07:53:21.749088 17350 solver.cpp:404]     Test net output #1: loss = 0.424633 (* 1 = 0.424633 loss)\nI0818 07:53:22.171895 17350 solver.cpp:228] Iteration 74500, loss = 0.00103684\nI0818 07:53:22.171941 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:53:22.171957 17350 solver.cpp:244]     Train net output #1: loss = 0.00103627 (* 1 = 0.00103627 loss)\nI0818 07:53:22.253798 17350 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0818 07:54:09.730958 17350 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0818 07:54:36.641674 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0818 07:54:36.641744 17350 solver.cpp:404]     Test net output #1: loss = 0.429356 (* 1 = 0.429356 loss)\nI0818 07:54:37.064759 17350 solver.cpp:228] Iteration 74600, loss = 0.00118267\nI0818 07:54:37.064807 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:54:37.064824 17350 solver.cpp:244]     Train net output #1: loss = 0.00118211 (* 1 = 0.00118211 loss)\nI0818 07:54:37.146494 17350 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0818 07:55:24.608422 17350 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0818 07:55:51.520633 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88704\nI0818 07:55:51.520700 17350 solver.cpp:404]     Test net output #1: loss = 0.424665 (* 1 = 0.424665 loss)\nI0818 07:55:51.943711 17350 solver.cpp:228] Iteration 74700, loss = 0.00099256\nI0818 07:55:51.943758 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:55:51.943774 17350 solver.cpp:244]     Train net output #1: loss = 0.000991996 (* 1 = 0.000991996 loss)\nI0818 07:55:52.026630 17350 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0818 07:56:39.505087 17350 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0818 07:57:06.415796 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88668\nI0818 07:57:06.415860 17350 solver.cpp:404]     Test net output #1: loss = 0.429323 (* 1 = 0.429323 loss)\nI0818 07:57:06.838776 17350 solver.cpp:228] Iteration 74800, loss = 0.00104731\nI0818 07:57:06.838826 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:57:06.838842 17350 solver.cpp:244]     Train net output #1: loss = 0.00104675 (* 1 = 0.00104675 loss)\nI0818 07:57:06.916038 17350 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0818 07:57:54.345206 17350 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0818 07:58:21.258365 17350 solver.cpp:404]     Test net output #0: accuracy = 0.887\nI0818 07:58:21.258436 17350 solver.cpp:404]     Test net output #1: loss = 0.424791 (* 1 = 0.424791 loss)\nI0818 07:58:21.681107 17350 solver.cpp:228] Iteration 74900, loss = 0.00102272\nI0818 07:58:21.681155 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:58:21.681172 17350 solver.cpp:244]     Train net output #1: loss = 0.00102216 (* 1 = 0.00102216 loss)\nI0818 07:58:21.759161 17350 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0818 07:59:09.194573 17350 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0818 07:59:36.096539 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88664\nI0818 07:59:36.096609 17350 solver.cpp:404]     Test net output #1: loss = 0.429424 (* 1 = 0.429424 loss)\nI0818 07:59:36.519387 17350 solver.cpp:228] Iteration 75000, loss = 0.000950071\nI0818 07:59:36.519430 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 07:59:36.519448 17350 solver.cpp:244]     Train net output #1: loss = 0.000949507 (* 1 = 0.000949507 loss)\nI0818 07:59:36.600479 17350 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0818 08:00:24.068122 17350 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0818 08:00:50.975267 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0818 08:00:50.975342 17350 solver.cpp:404]     Test net output #1: loss = 0.424763 (* 1 = 0.424763 loss)\nI0818 08:00:51.399624 17350 solver.cpp:228] Iteration 75100, loss = 0.000987014\nI0818 08:00:51.399679 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:00:51.399696 17350 solver.cpp:244]     Train net output #1: loss = 0.00098645 (* 1 = 0.00098645 loss)\nI0818 08:00:51.478492 17350 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0818 08:01:38.952769 17350 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0818 08:02:05.860059 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88676\nI0818 08:02:05.860126 17350 solver.cpp:404]     Test net output #1: loss = 0.42946 (* 1 = 0.42946 loss)\nI0818 08:02:06.284338 17350 solver.cpp:228] Iteration 75200, loss = 0.00100085\nI0818 08:02:06.284389 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:02:06.284404 17350 solver.cpp:244]     Train net output #1: loss = 0.00100029 (* 1 = 0.00100029 loss)\nI0818 08:02:06.364061 17350 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0818 08:02:53.862928 17350 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0818 08:03:20.773006 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88704\nI0818 08:03:20.773073 17350 solver.cpp:404]     Test net output #1: loss = 0.424897 (* 1 = 0.424897 loss)\nI0818 08:03:21.197397 17350 solver.cpp:228] Iteration 75300, loss = 0.000991851\nI0818 08:03:21.197443 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:03:21.197460 17350 solver.cpp:244]     Train net output #1: loss = 0.000991287 (* 1 = 0.000991287 loss)\nI0818 08:03:21.278105 17350 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0818 08:04:08.759757 17350 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0818 08:04:35.674566 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88676\nI0818 08:04:35.674638 17350 solver.cpp:404]     Test net output #1: loss = 0.429442 (* 1 = 0.429442 loss)\nI0818 08:04:36.098922 17350 solver.cpp:228] Iteration 75400, loss = 0.000926192\nI0818 08:04:36.098968 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:04:36.098984 17350 solver.cpp:244]     Train net output #1: loss = 0.000925628 (* 1 = 0.000925628 loss)\nI0818 08:04:36.178017 17350 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0818 08:05:23.587375 17350 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0818 08:05:50.500391 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88728\nI0818 08:05:50.500463 17350 solver.cpp:404]     Test net output #1: loss = 0.424874 (* 1 = 0.424874 loss)\nI0818 08:05:50.924805 17350 solver.cpp:228] Iteration 75500, loss = 0.000941008\nI0818 08:05:50.924850 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:05:50.924867 17350 solver.cpp:244]     Train net output #1: loss = 0.000940443 (* 1 = 0.000940443 loss)\nI0818 08:05:50.995777 17350 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0818 08:06:38.479177 17350 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0818 08:07:05.389426 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88676\nI0818 08:07:05.389495 17350 solver.cpp:404]     Test net output #1: loss = 0.429436 (* 1 = 0.429436 loss)\nI0818 08:07:05.813716 17350 solver.cpp:228] Iteration 75600, loss = 0.000972825\nI0818 08:07:05.813760 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:07:05.813776 17350 solver.cpp:244]     Train net output #1: loss = 0.000972261 (* 1 = 0.000972261 loss)\nI0818 08:07:05.890779 17350 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0818 08:07:53.343118 17350 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0818 08:08:20.255653 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88684\nI0818 08:08:20.255722 17350 solver.cpp:404]     Test net output #1: loss = 0.42479 (* 1 = 0.42479 loss)\nI0818 08:08:20.680110 17350 solver.cpp:228] Iteration 75700, loss = 0.000988739\nI0818 08:08:20.680167 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:08:20.680186 17350 solver.cpp:244]     Train net output #1: loss = 0.000988175 (* 1 = 0.000988175 loss)\nI0818 08:08:20.756796 17350 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0818 08:09:08.237934 17350 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0818 08:09:35.147519 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88676\nI0818 08:09:35.147588 17350 solver.cpp:404]     Test net output #1: loss = 0.42965 (* 1 = 0.42965 loss)\nI0818 08:09:35.571833 17350 solver.cpp:228] Iteration 75800, loss = 0.00110256\nI0818 08:09:35.571888 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:09:35.571907 17350 solver.cpp:244]     Train net output #1: loss = 0.001102 (* 1 = 0.001102 loss)\nI0818 08:09:35.649003 17350 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0818 08:10:23.102473 17350 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0818 08:10:50.014744 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8872\nI0818 08:10:50.014807 17350 solver.cpp:404]     Test net output #1: loss = 0.424985 (* 1 = 0.424985 loss)\nI0818 08:10:50.438751 17350 solver.cpp:228] Iteration 75900, loss = 0.000858989\nI0818 08:10:50.438809 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:10:50.438827 17350 solver.cpp:244]     Train net output #1: loss = 0.000858424 (* 1 = 0.000858424 loss)\nI0818 08:10:50.517820 17350 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0818 08:11:37.919000 17350 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0818 08:12:04.837672 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0818 08:12:04.837738 17350 solver.cpp:404]     Test net output #1: loss = 0.429702 (* 1 = 0.429702 loss)\nI0818 08:12:05.260454 17350 solver.cpp:228] Iteration 76000, loss = 0.00117649\nI0818 08:12:05.260512 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:12:05.260530 17350 solver.cpp:244]     Train net output #1: loss = 0.00117592 (* 1 = 0.00117592 loss)\nI0818 08:12:05.335805 17350 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0818 08:12:52.749411 17350 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0818 08:13:19.402546 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88676\nI0818 08:13:19.402603 17350 solver.cpp:404]     Test net output #1: loss = 0.424886 (* 1 = 0.424886 loss)\nI0818 08:13:19.828560 17350 solver.cpp:228] Iteration 76100, loss = 0.000941897\nI0818 08:13:19.828601 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:13:19.828618 17350 solver.cpp:244]     Train net output #1: loss = 0.000941333 (* 1 = 0.000941333 loss)\nI0818 08:13:19.901523 17350 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0818 08:14:07.305404 17350 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0818 08:14:34.227885 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0818 08:14:34.227957 17350 solver.cpp:404]     Test net output #1: loss = 0.429532 (* 1 = 0.429532 loss)\nI0818 08:14:34.652107 17350 solver.cpp:228] Iteration 76200, loss = 0.00111253\nI0818 08:14:34.652165 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:14:34.652182 17350 solver.cpp:244]     Train net output #1: loss = 0.00111197 (* 1 = 0.00111197 loss)\nI0818 08:14:34.729678 17350 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0818 08:15:22.140431 17350 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0818 08:15:49.063197 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88664\nI0818 08:15:49.063267 17350 solver.cpp:404]     Test net output #1: loss = 0.424975 (* 1 = 0.424975 loss)\nI0818 08:15:49.486223 17350 solver.cpp:228] Iteration 76300, loss = 0.00107782\nI0818 08:15:49.486269 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:15:49.486285 17350 solver.cpp:244]     Train net output #1: loss = 0.00107726 (* 1 = 0.00107726 loss)\nI0818 08:15:49.567883 17350 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0818 08:16:36.982877 17350 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0818 08:17:03.908422 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88632\nI0818 08:17:03.908494 17350 solver.cpp:404]     Test net output #1: loss = 0.429629 (* 1 = 0.429629 loss)\nI0818 08:17:04.331835 17350 solver.cpp:228] Iteration 76400, loss = 0.0010019\nI0818 08:17:04.331881 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:17:04.331898 17350 solver.cpp:244]     Train net output #1: loss = 0.00100134 (* 1 = 0.00100134 loss)\nI0818 08:17:04.405728 17350 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0818 08:17:51.819278 17350 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0818 08:18:18.742169 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0818 08:18:18.742240 17350 solver.cpp:404]     Test net output #1: loss = 0.424966 (* 1 = 0.424966 loss)\nI0818 08:18:19.165484 17350 solver.cpp:228] Iteration 76500, loss = 0.000987644\nI0818 08:18:19.165530 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:18:19.165546 17350 solver.cpp:244]     Train net output #1: loss = 0.00098708 (* 1 = 0.00098708 loss)\nI0818 08:18:19.242600 17350 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0818 08:19:06.652171 17350 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0818 08:19:33.577035 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0818 08:19:33.577103 17350 solver.cpp:404]     Test net output #1: loss = 0.429643 (* 1 = 0.429643 loss)\nI0818 08:19:34.000675 17350 solver.cpp:228] Iteration 76600, loss = 0.00103122\nI0818 08:19:34.000723 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:19:34.000741 17350 solver.cpp:244]     Train net output #1: loss = 0.00103065 (* 1 = 0.00103065 loss)\nI0818 08:19:34.083775 17350 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0818 08:20:21.515950 17350 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0818 08:20:48.442101 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88724\nI0818 08:20:48.442170 17350 solver.cpp:404]     Test net output #1: loss = 0.424871 (* 1 = 0.424871 loss)\nI0818 08:20:48.865057 17350 solver.cpp:228] Iteration 76700, loss = 0.000990289\nI0818 08:20:48.865103 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:20:48.865119 17350 solver.cpp:244]     Train net output #1: loss = 0.000989725 (* 1 = 0.000989725 loss)\nI0818 08:20:48.941351 17350 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0818 08:21:36.352448 17350 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0818 08:22:03.271342 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88672\nI0818 08:22:03.271406 17350 solver.cpp:404]     Test net output #1: loss = 0.429651 (* 1 = 0.429651 loss)\nI0818 08:22:03.694237 17350 solver.cpp:228] Iteration 76800, loss = 0.00103287\nI0818 08:22:03.694286 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:22:03.694303 17350 solver.cpp:244]     Train net output #1: loss = 0.00103231 (* 1 = 0.00103231 loss)\nI0818 08:22:03.772245 17350 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0818 08:22:51.167366 17350 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0818 08:23:18.096774 17350 solver.cpp:404]     Test net output #0: accuracy = 0.887001\nI0818 08:23:18.096849 17350 solver.cpp:404]     Test net output #1: loss = 0.425029 (* 1 = 0.425029 loss)\nI0818 08:23:18.520579 17350 solver.cpp:228] Iteration 76900, loss = 0.00101901\nI0818 08:23:18.520628 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:23:18.520644 17350 solver.cpp:244]     Train net output #1: loss = 0.00101845 (* 1 = 0.00101845 loss)\nI0818 08:23:18.603026 17350 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0818 08:24:06.003744 17350 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0818 08:24:32.924063 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88668\nI0818 08:24:32.924139 17350 solver.cpp:404]     Test net output #1: loss = 0.429643 (* 1 = 0.429643 loss)\nI0818 08:24:33.347409 17350 solver.cpp:228] Iteration 77000, loss = 0.000981341\nI0818 08:24:33.347456 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:24:33.347472 17350 solver.cpp:244]     Train net output #1: loss = 0.000980777 (* 1 = 0.000980777 loss)\nI0818 08:24:33.425808 17350 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0818 08:25:20.838526 17350 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0818 08:25:47.756418 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88684\nI0818 08:25:47.756494 17350 solver.cpp:404]     Test net output #1: loss = 0.425004 (* 1 = 0.425004 loss)\nI0818 08:25:48.179627 17350 solver.cpp:228] Iteration 77100, loss = 0.00102065\nI0818 08:25:48.179674 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:25:48.179692 17350 solver.cpp:244]     Train net output #1: loss = 0.00102008 (* 1 = 0.00102008 loss)\nI0818 08:25:48.259394 17350 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0818 08:26:35.790071 17350 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0818 08:27:02.709563 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88664\nI0818 08:27:02.709638 17350 solver.cpp:404]     Test net output #1: loss = 0.429669 (* 1 = 0.429669 loss)\nI0818 08:27:03.133957 17350 solver.cpp:228] Iteration 77200, loss = 0.00101207\nI0818 08:27:03.134008 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:27:03.134024 17350 solver.cpp:244]     Train net output #1: loss = 0.00101151 (* 1 = 0.00101151 loss)\nI0818 08:27:03.208786 17350 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0818 08:27:50.641147 17350 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0818 08:28:17.534546 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88684\nI0818 08:28:17.534622 17350 solver.cpp:404]     Test net output #1: loss = 0.42496 (* 1 = 0.42496 loss)\nI0818 08:28:17.959002 17350 solver.cpp:228] Iteration 77300, loss = 0.00106673\nI0818 08:28:17.959050 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:28:17.959066 17350 solver.cpp:244]     Train net output #1: loss = 0.00106616 (* 1 = 0.00106616 loss)\nI0818 08:28:18.038691 17350 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0818 08:29:05.521869 17350 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0818 08:29:32.406476 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88648\nI0818 08:29:32.406528 17350 solver.cpp:404]     Test net output #1: loss = 0.429565 (* 1 = 0.429565 loss)\nI0818 08:29:32.830811 17350 solver.cpp:228] Iteration 77400, loss = 0.00100885\nI0818 08:29:32.830863 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:29:32.830880 17350 solver.cpp:244]     Train net output #1: loss = 0.00100828 (* 1 = 0.00100828 loss)\nI0818 08:29:32.910198 17350 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0818 08:30:20.348443 17350 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0818 08:30:47.246681 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88696\nI0818 08:30:47.246731 17350 solver.cpp:404]     Test net output #1: loss = 0.425045 (* 1 = 0.425045 loss)\nI0818 08:30:47.670739 17350 solver.cpp:228] Iteration 77500, loss = 0.000940367\nI0818 08:30:47.670795 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:30:47.670812 17350 solver.cpp:244]     Train net output #1: loss = 0.000939803 (* 1 = 0.000939803 loss)\nI0818 08:30:47.745203 17350 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0818 08:31:35.176241 17350 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0818 08:32:02.051041 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88684\nI0818 08:32:02.051092 17350 solver.cpp:404]     Test net output #1: loss = 0.429745 (* 1 = 0.429745 loss)\nI0818 08:32:02.474095 17350 solver.cpp:228] Iteration 77600, loss = 0.000932857\nI0818 08:32:02.474148 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:32:02.474166 17350 solver.cpp:244]     Train net output #1: loss = 0.000932293 (* 1 = 0.000932293 loss)\nI0818 08:32:02.555138 17350 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0818 08:32:50.027092 17350 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0818 08:33:16.870681 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88696\nI0818 08:33:16.870725 17350 solver.cpp:404]     Test net output #1: loss = 0.425074 (* 1 = 0.425074 loss)\nI0818 08:33:17.293556 17350 solver.cpp:228] Iteration 77700, loss = 0.000950482\nI0818 08:33:17.293606 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:33:17.293622 17350 solver.cpp:244]     Train net output #1: loss = 0.000949917 (* 1 = 0.000949917 loss)\nI0818 08:33:17.372655 17350 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0818 08:34:04.897927 17350 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0818 08:34:31.780141 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0818 08:34:31.780190 17350 solver.cpp:404]     Test net output #1: loss = 0.429568 (* 1 = 0.429568 loss)\nI0818 08:34:32.203107 17350 solver.cpp:228] Iteration 77800, loss = 0.00104512\nI0818 08:34:32.203155 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:34:32.203172 17350 solver.cpp:244]     Train net output #1: loss = 0.00104456 (* 1 = 0.00104456 loss)\nI0818 08:34:32.286644 17350 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0818 08:35:19.793056 17350 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0818 08:35:46.626574 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88728\nI0818 08:35:46.626623 17350 solver.cpp:404]     Test net output #1: loss = 0.424916 (* 1 = 0.424916 loss)\nI0818 08:35:47.049178 17350 solver.cpp:228] Iteration 77900, loss = 0.000974086\nI0818 08:35:47.049226 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:35:47.049243 17350 solver.cpp:244]     Train net output #1: loss = 0.000973522 (* 1 = 0.000973522 loss)\nI0818 08:35:47.133023 17350 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0818 08:36:34.649463 17350 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0818 08:37:01.362285 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88632\nI0818 08:37:01.362331 17350 solver.cpp:404]     Test net output #1: loss = 0.429532 (* 1 = 0.429532 loss)\nI0818 08:37:01.785051 17350 solver.cpp:228] Iteration 78000, loss = 0.000924876\nI0818 08:37:01.785104 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:37:01.785120 17350 solver.cpp:244]     Train net output #1: loss = 0.000924311 (* 1 = 0.000924311 loss)\nI0818 08:37:01.868204 17350 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0818 08:37:49.357792 17350 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0818 08:38:16.248793 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0818 08:38:16.248842 17350 solver.cpp:404]     Test net output #1: loss = 0.425 (* 1 = 0.425 loss)\nI0818 08:38:16.671406 17350 solver.cpp:228] Iteration 78100, loss = 0.0010784\nI0818 08:38:16.671458 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:38:16.671475 17350 solver.cpp:244]     Train net output #1: loss = 0.00107784 (* 1 = 0.00107784 loss)\nI0818 08:38:16.753965 17350 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0818 08:39:04.225045 17350 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0818 08:39:31.078689 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88656\nI0818 08:39:31.078739 17350 solver.cpp:404]     Test net output #1: loss = 0.429667 (* 1 = 0.429667 loss)\nI0818 08:39:31.502449 17350 solver.cpp:228] Iteration 78200, loss = 0.00110812\nI0818 08:39:31.502499 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:39:31.502517 17350 solver.cpp:244]     Train net output #1: loss = 0.00110756 (* 1 = 0.00110756 loss)\nI0818 08:39:31.582603 17350 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0818 08:40:18.997994 17350 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0818 08:40:45.734833 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88692\nI0818 08:40:45.734882 17350 solver.cpp:404]     Test net output #1: loss = 0.424923 (* 1 = 0.424923 loss)\nI0818 08:40:46.158875 17350 solver.cpp:228] Iteration 78300, loss = 0.00102812\nI0818 08:40:46.158928 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:40:46.158946 17350 solver.cpp:244]     Train net output #1: loss = 0.00102756 (* 1 = 0.00102756 loss)\nI0818 08:40:46.239322 17350 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0818 08:41:33.675828 17350 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0818 08:42:00.458479 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88708\nI0818 08:42:00.458526 17350 solver.cpp:404]     Test net output #1: loss = 0.429541 (* 1 = 0.429541 loss)\nI0818 08:42:00.881377 17350 solver.cpp:228] Iteration 78400, loss = 0.00117981\nI0818 08:42:00.881430 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:42:00.881448 17350 solver.cpp:244]     Train net output #1: loss = 0.00117925 (* 1 = 0.00117925 loss)\nI0818 08:42:00.962620 17350 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0818 08:42:48.406424 17350 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0818 08:43:15.191783 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88772\nI0818 08:43:15.191831 17350 solver.cpp:404]     Test net output #1: loss = 0.424842 (* 1 = 0.424842 loss)\nI0818 08:43:15.616102 17350 solver.cpp:228] Iteration 78500, loss = 0.00112271\nI0818 08:43:15.616154 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:43:15.616171 17350 solver.cpp:244]     Train net output #1: loss = 0.00112214 (* 1 = 0.00112214 loss)\nI0818 08:43:15.696250 17350 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0818 08:44:03.187831 17350 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0818 08:44:30.039496 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88704\nI0818 08:44:30.039544 17350 solver.cpp:404]     Test net output #1: loss = 0.429624 (* 1 = 0.429624 loss)\nI0818 08:44:30.462942 17350 solver.cpp:228] Iteration 78600, loss = 0.000973569\nI0818 08:44:30.463001 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:44:30.463017 17350 solver.cpp:244]     Train net output #1: loss = 0.000973005 (* 1 = 0.000973005 loss)\nI0818 08:44:30.538630 17350 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0818 08:45:17.945731 17350 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0818 08:45:44.738064 17350 solver.cpp:404]     Test net output #0: accuracy = 0.887\nI0818 08:45:44.738111 17350 solver.cpp:404]     Test net output #1: loss = 0.424923 (* 1 = 0.424923 loss)\nI0818 08:45:45.160897 17350 solver.cpp:228] Iteration 78700, loss = 0.00103351\nI0818 08:45:45.160953 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:45:45.160970 17350 solver.cpp:244]     Train net output #1: loss = 0.00103295 (* 1 = 0.00103295 loss)\nI0818 08:45:45.236039 17350 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0818 08:46:32.663164 17350 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0818 08:46:59.364651 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88676\nI0818 08:46:59.364696 17350 solver.cpp:404]     Test net output #1: loss = 0.429413 (* 1 = 0.429413 loss)\nI0818 08:46:59.788815 17350 solver.cpp:228] Iteration 78800, loss = 0.00103285\nI0818 08:46:59.788866 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:46:59.788883 17350 solver.cpp:244]     Train net output #1: loss = 0.00103229 (* 1 = 0.00103229 loss)\nI0818 08:46:59.869982 17350 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0818 08:47:47.266397 17350 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0818 08:48:14.003285 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88724\nI0818 08:48:14.003334 17350 solver.cpp:404]     Test net output #1: loss = 0.424757 (* 1 = 0.424757 loss)\nI0818 08:48:14.427662 17350 solver.cpp:228] Iteration 78900, loss = 0.00092923\nI0818 08:48:14.427716 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:48:14.427732 17350 solver.cpp:244]     Train net output #1: loss = 0.000928666 (* 1 = 0.000928666 loss)\nI0818 08:48:14.502826 17350 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0818 08:49:01.920608 17350 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0818 08:49:28.765760 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88664\nI0818 08:49:28.765810 17350 solver.cpp:404]     Test net output #1: loss = 0.429372 (* 1 = 0.429372 loss)\nI0818 08:49:29.188624 17350 solver.cpp:228] Iteration 79000, loss = 0.00123517\nI0818 08:49:29.188674 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:49:29.188691 17350 solver.cpp:244]     Train net output #1: loss = 0.0012346 (* 1 = 0.0012346 loss)\nI0818 08:49:29.269420 17350 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0818 08:50:16.836683 17350 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0818 08:50:43.542969 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88716\nI0818 08:50:43.543021 17350 solver.cpp:404]     Test net output #1: loss = 0.424789 (* 1 = 0.424789 loss)\nI0818 08:50:43.966305 17350 solver.cpp:228] Iteration 79100, loss = 0.00114005\nI0818 08:50:43.966346 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:50:43.966363 17350 solver.cpp:244]     Train net output #1: loss = 0.00113948 (* 1 = 0.00113948 loss)\nI0818 08:50:44.048427 17350 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0818 08:51:31.564932 17350 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0818 08:51:58.441542 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88656\nI0818 08:51:58.441587 17350 solver.cpp:404]     Test net output #1: loss = 0.429317 (* 1 = 0.429317 loss)\nI0818 08:51:58.864523 17350 solver.cpp:228] Iteration 79200, loss = 0.00109421\nI0818 08:51:58.864567 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:51:58.864583 17350 solver.cpp:244]     Train net output #1: loss = 0.00109365 (* 1 = 0.00109365 loss)\nI0818 08:51:58.947005 17350 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0818 08:52:46.378311 17350 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0818 08:53:13.177455 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0818 08:53:13.177505 17350 solver.cpp:404]     Test net output #1: loss = 0.424867 (* 1 = 0.424867 loss)\nI0818 08:53:13.600358 17350 solver.cpp:228] Iteration 79300, loss = 0.000957828\nI0818 08:53:13.600399 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:53:13.600415 17350 solver.cpp:244]     Train net output #1: loss = 0.000957264 (* 1 = 0.000957264 loss)\nI0818 08:53:13.682162 17350 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0818 08:54:01.132339 17350 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0818 08:54:27.900396 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88636\nI0818 08:54:27.900445 17350 solver.cpp:404]     Test net output #1: loss = 0.429538 (* 1 = 0.429538 loss)\nI0818 08:54:28.323464 17350 solver.cpp:228] Iteration 79400, loss = 0.00101258\nI0818 08:54:28.323505 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:54:28.323523 17350 solver.cpp:244]     Train net output #1: loss = 0.00101202 (* 1 = 0.00101202 loss)\nI0818 08:54:28.404562 17350 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0818 08:55:15.815577 17350 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0818 08:55:42.557552 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0818 08:55:42.557598 17350 solver.cpp:404]     Test net output #1: loss = 0.424904 (* 1 = 0.424904 loss)\nI0818 08:55:42.980511 17350 solver.cpp:228] Iteration 79500, loss = 0.00093619\nI0818 08:55:42.980561 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:55:42.980578 17350 solver.cpp:244]     Train net output #1: loss = 0.000935626 (* 1 = 0.000935626 loss)\nI0818 08:55:43.055630 17350 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0818 08:56:30.463634 17350 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0818 08:56:57.194983 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88648\nI0818 08:56:57.195029 17350 solver.cpp:404]     Test net output #1: loss = 0.429394 (* 1 = 0.429394 loss)\nI0818 08:56:57.619262 17350 solver.cpp:228] Iteration 79600, loss = 0.000900771\nI0818 08:56:57.619313 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:56:57.619330 17350 solver.cpp:244]     Train net output #1: loss = 0.000900207 (* 1 = 0.000900207 loss)\nI0818 08:56:57.703536 17350 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0818 08:57:45.093489 17350 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0818 08:58:11.863373 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0818 08:58:11.863418 17350 solver.cpp:404]     Test net output #1: loss = 0.424785 (* 1 = 0.424785 loss)\nI0818 08:58:12.287937 17350 solver.cpp:228] Iteration 79700, loss = 0.000996734\nI0818 08:58:12.287986 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:58:12.288003 17350 solver.cpp:244]     Train net output #1: loss = 0.00099617 (* 1 = 0.00099617 loss)\nI0818 08:58:12.364754 17350 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0818 08:58:59.779428 17350 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0818 08:59:26.498858 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88648\nI0818 08:59:26.498904 17350 solver.cpp:404]     Test net output #1: loss = 0.429419 (* 1 = 0.429419 loss)\nI0818 08:59:26.923389 17350 solver.cpp:228] Iteration 79800, loss = 0.000905905\nI0818 08:59:26.923449 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 08:59:26.923467 17350 solver.cpp:244]     Train net output #1: loss = 0.000905341 (* 1 = 0.000905341 loss)\nI0818 08:59:27.006877 17350 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0818 09:00:14.460160 17350 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0818 09:00:41.178704 17350 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0818 09:00:41.178750 17350 solver.cpp:404]     Test net output #1: loss = 0.424855 (* 1 = 0.424855 loss)\nI0818 09:00:41.601974 17350 solver.cpp:228] Iteration 79900, loss = 0.00107241\nI0818 09:00:41.602035 17350 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 09:00:41.602051 17350 solver.cpp:244]     Train net output #1: loss = 0.00107185 (* 1 = 0.00107185 loss)\nI0818 09:00:41.678124 17350 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0818 09:01:29.080250 17350 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35Res20Fig6b_iter_80000.caffemodel\nI0818 09:01:29.155594 17350 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35Res20Fig6b_iter_80000.solverstate\nI0818 09:01:29.303200 17350 solver.cpp:317] Iteration 80000, loss = 0.000942314\nI0818 09:01:29.303238 17350 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0818 09:01:56.002094 17350 solver.cpp:404]     Test net output #0: accuracy = 0.88636\nI0818 09:01:56.002130 17350 solver.cpp:404]     Test net output #1: loss = 0.429519 (* 1 = 0.429519 loss)\nI0818 09:01:56.002141 17350 solver.cpp:322] Optimization Done.\nI0818 09:01:57.972158 17350 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35Tr10kTab1",
    "content": "I0821 08:27:51.145185 32360 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 08:27:51.147768 32360 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 08:27:51.148996 32360 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 08:27:51.150209 32360 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 08:27:51.151429 32360 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 08:27:51.152657 32360 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 08:27:51.153884 32360 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 08:27:51.155112 32360 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 08:27:51.156350 32360 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 08:27:51.575973 32360 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35Tr10kTab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\nI0821 08:27:51.580231 32360 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 08:27:51.597474 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:51.597555 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:51.598675 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 08:27:51.598742 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 08:27:51.598764 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:27:51.598783 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:27:51.598803 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:27:51.598820 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:27:51.598839 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:27:51.598856 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:27:51.598876 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:27:51.598893 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:27:51.598912 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:27:51.598928 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:27:51.598948 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:27:51.598966 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:27:51.598984 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:27:51.599002 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:27:51.599020 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:27:51.599037 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:27:51.599056 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:27:51.599073 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:27:51.599107 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:27:51.599124 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:27:51.599148 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:27:51.599169 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:27:51.599185 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:27:51.599201 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:27:51.599220 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:27:51.599237 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:27:51.599254 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:27:51.599282 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:27:51.599303 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:27:51.599319 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:27:51.599339 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:27:51.599354 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:27:51.599372 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:27:51.599390 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:27:51.599409 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:27:51.599426 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:27:51.599443 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:27:51.599462 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:27:51.599485 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:27:51.599503 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:27:51.599520 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:27:51.599539 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:27:51.599558 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:27:51.599575 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:27:51.599594 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:27:51.599609 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:27:51.599628 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:27:51.599645 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:27:51.599663 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:27:51.599689 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:27:51.599709 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:27:51.599726 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:27:51.599745 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:27:51.599759 32360 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:27:51.601516 32360 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train10k_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.9\nI0821 08:27:51.603627 32360 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:27:51.606606 32360 net.cpp:100] Creating Layer dataLayer\nI0821 08:27:51.606688 32360 net.cpp:408] dataLayer -> data_top\nI0821 08:27:51.606900 32360 net.cpp:408] dataLayer -> label\nI0821 08:27:51.607029 32360 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:27:51.692651 32365 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train10k_lmdb\nI0821 08:27:51.693138 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:51.700240 32360 net.cpp:150] Setting up dataLayer\nI0821 08:27:51.700317 32360 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 08:27:51.700331 32360 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:51.700337 32360 net.cpp:165] Memory required for data: 1536500\nI0821 08:27:51.700351 32360 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:27:51.700366 32360 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:27:51.700374 32360 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:27:51.700392 32360 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:27:51.700407 32360 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:27:51.700477 32360 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:27:51.700490 32360 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:51.700497 32360 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:51.700502 32360 net.cpp:165] Memory required for data: 1537500\nI0821 08:27:51.700507 32360 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:27:51.700575 32360 net.cpp:100] Creating Layer pre_conv\nI0821 08:27:51.700587 32360 net.cpp:434] pre_conv <- data_top\nI0821 08:27:51.700600 32360 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:27:51.702466 32360 net.cpp:150] Setting up pre_conv\nI0821 08:27:51.702486 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.702500 32360 net.cpp:165] Memory required for data: 9729500\nI0821 08:27:51.702572 32360 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:27:51.702651 32360 net.cpp:100] Creating Layer pre_bn\nI0821 08:27:51.702664 32360 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:27:51.702677 32360 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:27:51.702803 32366 blocking_queue.cpp:50] Waiting for data\nI0821 08:27:51.703016 32360 net.cpp:150] Setting up pre_bn\nI0821 08:27:51.703033 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.703039 32360 net.cpp:165] Memory required for data: 17921500\nI0821 08:27:51.703058 32360 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:27:51.703109 32360 net.cpp:100] Creating Layer pre_scale\nI0821 08:27:51.703119 32360 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:27:51.703127 32360 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:27:51.703315 32360 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:27:51.706779 32360 net.cpp:150] Setting up pre_scale\nI0821 08:27:51.706796 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.706802 32360 net.cpp:165] Memory required for data: 26113500\nI0821 08:27:51.706814 32360 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:27:51.706864 32360 net.cpp:100] Creating Layer pre_relu\nI0821 08:27:51.706874 32360 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:27:51.706883 32360 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:27:51.706902 32360 net.cpp:150] Setting up pre_relu\nI0821 08:27:51.706910 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.706915 32360 net.cpp:165] Memory required for data: 34305500\nI0821 08:27:51.706920 32360 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:27:51.706930 32360 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:27:51.706935 32360 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:27:51.706943 32360 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:27:51.706953 32360 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:27:51.707005 32360 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:27:51.707020 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.707027 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.707031 32360 net.cpp:165] Memory required for data: 50689500\nI0821 08:27:51.707037 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:27:51.707049 32360 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:27:51.707056 32360 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:27:51.707064 32360 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:27:51.707394 32360 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:27:51.707409 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.707414 32360 net.cpp:165] Memory required for data: 58881500\nI0821 08:27:51.707427 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:27:51.707442 32360 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:27:51.707449 32360 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:27:51.707460 32360 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:27:51.707695 32360 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:27:51.707710 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.707715 32360 net.cpp:165] Memory required for data: 67073500\nI0821 08:27:51.707726 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:27:51.707737 32360 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:27:51.707744 32360 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:27:51.707752 32360 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.707801 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:27:51.707940 32360 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:27:51.707953 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.707958 32360 net.cpp:165] Memory required for data: 75265500\nI0821 08:27:51.707968 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:27:51.707984 32360 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:27:51.707990 32360 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:27:51.708001 32360 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.708011 32360 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:27:51.708019 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.708024 32360 net.cpp:165] Memory required for data: 83457500\nI0821 08:27:51.708029 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:27:51.708043 32360 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:27:51.708050 32360 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:27:51.708057 32360 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:27:51.708365 32360 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:27:51.708379 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.708385 32360 net.cpp:165] Memory required for data: 91649500\nI0821 08:27:51.708395 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:27:51.708406 32360 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:27:51.708412 32360 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:27:51.708421 32360 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:27:51.708657 32360 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:27:51.708670 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.708675 32360 net.cpp:165] Memory required for data: 99841500\nI0821 08:27:51.708691 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:27:51.708703 32360 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:27:51.708710 32360 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:27:51.708720 32360 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:27:51.708775 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:27:51.708909 32360 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:27:51.708921 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.708926 32360 net.cpp:165] Memory required for data: 108033500\nI0821 08:27:51.708935 32360 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:27:51.708994 32360 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:27:51.709007 32360 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:27:51.709014 32360 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:27:51.709022 32360 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:27:51.709100 32360 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:27:51.709115 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.709120 32360 net.cpp:165] Memory required for data: 116225500\nI0821 08:27:51.709125 32360 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:27:51.709134 32360 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:27:51.709139 32360 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:27:51.709151 32360 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:27:51.709161 32360 net.cpp:150] Setting up L1_b1_relu\nI0821 08:27:51.709167 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.709172 32360 net.cpp:165] Memory required for data: 124417500\nI0821 08:27:51.709177 32360 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:51.709187 32360 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:51.709192 32360 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:27:51.709199 32360 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:27:51.709208 32360 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:27:51.709254 32360 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:51.709275 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.709281 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.709293 32360 net.cpp:165] Memory required for data: 140801500\nI0821 08:27:51.709300 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:27:51.709313 32360 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:27:51.709321 32360 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:27:51.709329 32360 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:27:51.709632 32360 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:27:51.709646 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.709651 32360 net.cpp:165] Memory required for data: 148993500\nI0821 08:27:51.709661 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:27:51.709669 32360 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:27:51.709676 32360 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:27:51.709698 32360 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:27:51.709939 32360 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:27:51.709952 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.709957 32360 net.cpp:165] Memory required for data: 157185500\nI0821 08:27:51.709967 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:27:51.709977 32360 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:27:51.709982 32360 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:27:51.709990 32360 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.710045 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:27:51.710181 32360 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:27:51.710194 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.710199 32360 net.cpp:165] Memory required for data: 165377500\nI0821 08:27:51.710208 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:27:51.710217 32360 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:27:51.710222 32360 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:27:51.710233 32360 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.710242 32360 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:27:51.710250 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.710254 32360 net.cpp:165] Memory required for data: 173569500\nI0821 08:27:51.710266 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:27:51.710279 32360 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:27:51.710284 32360 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:27:51.710296 32360 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:27:51.710597 32360 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:27:51.710611 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.710616 32360 net.cpp:165] Memory required for data: 181761500\nI0821 08:27:51.710625 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:27:51.710634 32360 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:27:51.710640 32360 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:27:51.710651 32360 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:27:51.710886 32360 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:27:51.710901 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.710906 32360 net.cpp:165] Memory required for data: 189953500\nI0821 08:27:51.710922 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:27:51.710930 32360 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:27:51.710937 32360 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:27:51.710947 32360 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:27:51.710999 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:27:51.711135 32360 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:27:51.711150 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.711155 32360 net.cpp:165] Memory required for data: 198145500\nI0821 08:27:51.711164 32360 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:27:51.711180 32360 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:27:51.711187 32360 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:27:51.711194 32360 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:27:51.711201 32360 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:27:51.711235 32360 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:27:51.711244 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.711248 32360 net.cpp:165] Memory required for data: 206337500\nI0821 08:27:51.711254 32360 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:27:51.711267 32360 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:27:51.711274 32360 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:27:51.711282 32360 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:27:51.711290 32360 net.cpp:150] Setting up L1_b2_relu\nI0821 08:27:51.711297 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.711302 32360 net.cpp:165] Memory required for data: 214529500\nI0821 08:27:51.711308 32360 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:51.711314 32360 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:51.711319 32360 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:27:51.711329 32360 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:27:51.711344 32360 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:27:51.711385 32360 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:51.711396 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.711403 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.711407 32360 net.cpp:165] Memory required for data: 230913500\nI0821 08:27:51.711412 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:27:51.711438 32360 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:27:51.711447 32360 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:27:51.711455 32360 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:27:51.711761 32360 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:27:51.711774 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.711779 32360 net.cpp:165] Memory required for data: 239105500\nI0821 08:27:51.711788 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:27:51.711800 32360 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:27:51.711807 32360 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:27:51.711814 32360 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:27:51.712045 32360 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:27:51.712059 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.712064 32360 net.cpp:165] Memory required for data: 247297500\nI0821 08:27:51.712074 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:27:51.712082 32360 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:27:51.712088 32360 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:27:51.712100 32360 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.712149 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:27:51.712291 32360 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:27:51.712307 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.712312 32360 net.cpp:165] Memory required for data: 255489500\nI0821 08:27:51.712322 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:27:51.712330 32360 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:27:51.712337 32360 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:27:51.712343 32360 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.712352 32360 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:27:51.712366 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.712371 32360 net.cpp:165] Memory required for data: 263681500\nI0821 08:27:51.712376 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:27:51.712390 32360 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:27:51.712396 32360 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:27:51.712407 32360 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:27:51.712713 32360 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:27:51.712726 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.712731 32360 net.cpp:165] Memory required for data: 271873500\nI0821 08:27:51.712740 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:27:51.712757 32360 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:27:51.712764 32360 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:27:51.712772 32360 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:27:51.713009 32360 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:27:51.713022 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.713027 32360 net.cpp:165] Memory required for data: 280065500\nI0821 08:27:51.713038 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:27:51.713047 32360 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:27:51.713053 32360 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:27:51.713064 32360 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:27:51.713115 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:27:51.713249 32360 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:27:51.713274 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.713279 32360 net.cpp:165] Memory required for data: 288257500\nI0821 08:27:51.713289 32360 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:27:51.713299 32360 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:27:51.713304 32360 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:27:51.713311 32360 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:27:51.713321 32360 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:27:51.713353 32360 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:27:51.713362 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.713367 32360 net.cpp:165] Memory required for data: 296449500\nI0821 08:27:51.713372 32360 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:27:51.713383 32360 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:27:51.713389 32360 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:27:51.713395 32360 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:27:51.713405 32360 net.cpp:150] Setting up L1_b3_relu\nI0821 08:27:51.713413 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.713418 32360 net.cpp:165] Memory required for data: 304641500\nI0821 08:27:51.713421 32360 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:51.713431 32360 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:51.713436 32360 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:27:51.713444 32360 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:27:51.713454 32360 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:27:51.713495 32360 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:51.713510 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.713516 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.713521 32360 net.cpp:165] Memory required for data: 321025500\nI0821 08:27:51.713526 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:27:51.713537 32360 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:27:51.713543 32360 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:27:51.713558 32360 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:27:51.713868 32360 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:27:51.713882 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.713887 32360 net.cpp:165] Memory required for data: 329217500\nI0821 08:27:51.713896 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:27:51.713907 32360 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:27:51.713913 32360 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:27:51.713922 32360 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:27:51.714157 32360 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:27:51.714169 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.714174 32360 net.cpp:165] Memory required for data: 337409500\nI0821 08:27:51.714184 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:27:51.714193 32360 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:27:51.714200 32360 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:27:51.714210 32360 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.714267 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:27:51.714411 32360 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:27:51.714424 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.714429 32360 net.cpp:165] Memory required for data: 345601500\nI0821 08:27:51.714439 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:27:51.714447 32360 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:27:51.714452 32360 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:27:51.714463 32360 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.714473 32360 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:27:51.714480 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.714485 32360 net.cpp:165] Memory required for data: 353793500\nI0821 08:27:51.714490 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:27:51.714504 32360 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:27:51.714509 32360 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:27:51.714519 32360 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:27:51.714828 32360 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:27:51.714843 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.714848 32360 net.cpp:165] Memory required for data: 361985500\nI0821 08:27:51.714856 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:27:51.714865 32360 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:27:51.714874 32360 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:27:51.714882 32360 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:27:51.715116 32360 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:27:51.715128 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.715133 32360 net.cpp:165] Memory required for data: 370177500\nI0821 08:27:51.715144 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:27:51.715155 32360 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:27:51.715162 32360 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:27:51.715169 32360 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:27:51.715224 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:27:51.715368 32360 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:27:51.715381 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.715386 32360 net.cpp:165] Memory required for data: 378369500\nI0821 08:27:51.715395 32360 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:27:51.715404 32360 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:27:51.715410 32360 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:27:51.715417 32360 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:27:51.715427 32360 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:27:51.715469 32360 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:27:51.715479 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.715484 32360 net.cpp:165] Memory required for data: 386561500\nI0821 08:27:51.715489 32360 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:27:51.715497 32360 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:27:51.715502 32360 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:27:51.715509 32360 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:27:51.715522 32360 net.cpp:150] Setting up L1_b4_relu\nI0821 08:27:51.715529 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.715533 32360 net.cpp:165] Memory required for data: 394753500\nI0821 08:27:51.715538 32360 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:51.715545 32360 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:51.715550 32360 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:27:51.715559 32360 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:27:51.715567 32360 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:27:51.715611 32360 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:51.715623 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.715629 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.715634 32360 net.cpp:165] Memory required for data: 411137500\nI0821 08:27:51.715639 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:27:51.715651 32360 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:27:51.715656 32360 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:27:51.715672 32360 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:27:51.715976 32360 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:27:51.715991 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.715996 32360 net.cpp:165] Memory required for data: 419329500\nI0821 08:27:51.716017 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:27:51.716027 32360 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:27:51.716032 32360 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:27:51.716043 32360 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:27:51.716289 32360 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:27:51.716302 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.716307 32360 net.cpp:165] Memory required for data: 427521500\nI0821 08:27:51.716317 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:27:51.716329 32360 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:27:51.716336 32360 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:27:51.716343 32360 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.716423 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:27:51.716627 32360 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:27:51.716644 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.716648 32360 net.cpp:165] Memory required for data: 435713500\nI0821 08:27:51.716658 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:27:51.716667 32360 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:27:51.716673 32360 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:27:51.716684 32360 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.716696 32360 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:27:51.716702 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.716706 32360 net.cpp:165] Memory required for data: 443905500\nI0821 08:27:51.716711 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:27:51.716725 32360 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:27:51.716738 32360 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:27:51.716747 32360 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:27:51.717061 32360 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:27:51.717075 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.717082 32360 net.cpp:165] Memory required for data: 452097500\nI0821 08:27:51.717089 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:27:51.717102 32360 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:27:51.717108 32360 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:27:51.717116 32360 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:27:51.717360 32360 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:27:51.717375 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.717380 32360 net.cpp:165] Memory required for data: 460289500\nI0821 08:27:51.717391 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:27:51.717399 32360 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:27:51.717406 32360 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:27:51.717416 32360 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:27:51.717468 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:27:51.717602 32360 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:27:51.717618 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.717623 32360 net.cpp:165] Memory required for data: 468481500\nI0821 08:27:51.717633 32360 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:27:51.717641 32360 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:27:51.717648 32360 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:27:51.717654 32360 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:27:51.717665 32360 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:27:51.717695 32360 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:27:51.717705 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.717710 32360 net.cpp:165] Memory required for data: 476673500\nI0821 08:27:51.717715 32360 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:27:51.717725 32360 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:27:51.717731 32360 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:27:51.717738 32360 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:27:51.717747 32360 net.cpp:150] Setting up L1_b5_relu\nI0821 08:27:51.717754 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.717758 32360 net.cpp:165] Memory required for data: 484865500\nI0821 08:27:51.717763 32360 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:51.717773 32360 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:51.717778 32360 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:27:51.717787 32360 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:27:51.717795 32360 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:27:51.717838 32360 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:51.717852 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.717859 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.717864 32360 net.cpp:165] Memory required for data: 501249500\nI0821 08:27:51.717869 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:27:51.717880 32360 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:27:51.717886 32360 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:27:51.717895 32360 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:27:51.718204 32360 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:27:51.718219 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.718224 32360 net.cpp:165] Memory required for data: 509441500\nI0821 08:27:51.718240 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:27:51.718252 32360 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:27:51.718263 32360 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:27:51.718273 32360 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:27:51.718513 32360 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:27:51.718526 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.718531 32360 net.cpp:165] Memory required for data: 517633500\nI0821 08:27:51.718541 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:27:51.718550 32360 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:27:51.718556 32360 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:27:51.718567 32360 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.718618 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:27:51.718760 32360 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:27:51.718773 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.718778 32360 net.cpp:165] Memory required for data: 525825500\nI0821 08:27:51.718787 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:27:51.718796 32360 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:27:51.718802 32360 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:27:51.718812 32360 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.718822 32360 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:27:51.718829 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.718834 32360 net.cpp:165] Memory required for data: 534017500\nI0821 08:27:51.718838 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:27:51.718852 32360 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:27:51.718858 32360 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:27:51.718868 32360 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:27:51.719178 32360 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:27:51.719192 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.719197 32360 net.cpp:165] Memory required for data: 542209500\nI0821 08:27:51.719207 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:27:51.719218 32360 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:27:51.719224 32360 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:27:51.719235 32360 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:27:51.719476 32360 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:27:51.719491 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.719496 32360 net.cpp:165] Memory required for data: 550401500\nI0821 08:27:51.719506 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:27:51.719516 32360 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:27:51.719521 32360 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:27:51.719532 32360 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:27:51.719584 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:27:51.719727 32360 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:27:51.719743 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.719748 32360 net.cpp:165] Memory required for data: 558593500\nI0821 08:27:51.719756 32360 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:27:51.719772 32360 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:27:51.719779 32360 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:27:51.719786 32360 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:27:51.719794 32360 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:27:51.719830 32360 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:27:51.719841 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.719846 32360 net.cpp:165] Memory required for data: 566785500\nI0821 08:27:51.719851 32360 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:27:51.719867 32360 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:27:51.719873 32360 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:27:51.719883 32360 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:27:51.719893 32360 net.cpp:150] Setting up L1_b6_relu\nI0821 08:27:51.719900 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.719905 32360 net.cpp:165] Memory required for data: 574977500\nI0821 08:27:51.719909 32360 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:51.719918 32360 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:51.719923 32360 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:27:51.719929 32360 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:27:51.719938 32360 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:27:51.719986 32360 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:51.719998 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.720005 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.720010 32360 net.cpp:165] Memory required for data: 591361500\nI0821 08:27:51.720015 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:27:51.720026 32360 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:27:51.720032 32360 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:27:51.720043 32360 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:27:51.720372 32360 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:27:51.720386 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.720391 32360 net.cpp:165] Memory required for data: 599553500\nI0821 08:27:51.720401 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:27:51.720410 32360 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:27:51.720417 32360 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:27:51.720427 32360 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:27:51.720669 32360 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:27:51.720682 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.720687 32360 net.cpp:165] Memory required for data: 607745500\nI0821 08:27:51.720698 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:27:51.720710 32360 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:27:51.720716 32360 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:27:51.720724 32360 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.720775 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:27:51.720916 32360 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:27:51.720929 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.720934 32360 net.cpp:165] Memory required for data: 615937500\nI0821 08:27:51.720943 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:27:51.720954 32360 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:27:51.720962 32360 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:27:51.720968 32360 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.720978 32360 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:27:51.720984 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.720989 32360 net.cpp:165] Memory required for data: 624129500\nI0821 08:27:51.720994 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:27:51.721009 32360 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:27:51.721015 32360 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:27:51.721026 32360 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:27:51.721343 32360 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:27:51.721357 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.721362 32360 net.cpp:165] Memory required for data: 632321500\nI0821 08:27:51.721379 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:27:51.721388 32360 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:27:51.721395 32360 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:27:51.721402 32360 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:27:51.721648 32360 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:27:51.721662 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.721666 32360 net.cpp:165] Memory required for data: 640513500\nI0821 08:27:51.721678 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:27:51.721688 32360 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:27:51.721695 32360 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:27:51.721702 32360 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:27:51.721758 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:27:51.721899 32360 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:27:51.721910 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.721915 32360 net.cpp:165] Memory required for data: 648705500\nI0821 08:27:51.721925 32360 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:27:51.721935 32360 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:27:51.721940 32360 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:27:51.721947 32360 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:27:51.721958 32360 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:27:51.721992 32360 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:27:51.722002 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.722007 32360 net.cpp:165] Memory required for data: 656897500\nI0821 08:27:51.722012 32360 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:27:51.722020 32360 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:27:51.722025 32360 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:27:51.722035 32360 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:27:51.722045 32360 net.cpp:150] Setting up L1_b7_relu\nI0821 08:27:51.722053 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.722057 32360 net.cpp:165] Memory required for data: 665089500\nI0821 08:27:51.722062 32360 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:51.722069 32360 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:51.722074 32360 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:27:51.722081 32360 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:27:51.722091 32360 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:27:51.722136 32360 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:51.722170 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.722178 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.722182 32360 net.cpp:165] Memory required for data: 681473500\nI0821 08:27:51.722188 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:27:51.722199 32360 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:27:51.722206 32360 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:27:51.722218 32360 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:27:51.722543 32360 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:27:51.722558 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.722563 32360 net.cpp:165] Memory required for data: 689665500\nI0821 08:27:51.722571 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:27:51.722580 32360 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:27:51.722586 32360 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:27:51.722599 32360 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:27:51.722851 32360 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:27:51.722864 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.722869 32360 net.cpp:165] Memory required for data: 697857500\nI0821 08:27:51.722880 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:27:51.722892 32360 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:27:51.722898 32360 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:27:51.722906 32360 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.722959 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:27:51.723104 32360 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:27:51.723117 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.723122 32360 net.cpp:165] Memory required for data: 706049500\nI0821 08:27:51.723131 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:27:51.723140 32360 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:27:51.723146 32360 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:27:51.723156 32360 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.723166 32360 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:27:51.723173 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.723178 32360 net.cpp:165] Memory required for data: 714241500\nI0821 08:27:51.723183 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:27:51.723197 32360 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:27:51.723203 32360 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:27:51.723213 32360 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:27:51.723539 32360 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:27:51.723554 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.723559 32360 net.cpp:165] Memory required for data: 722433500\nI0821 08:27:51.723568 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:27:51.723578 32360 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:27:51.723584 32360 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:27:51.723592 32360 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:27:51.723839 32360 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:27:51.723851 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.723856 32360 net.cpp:165] Memory required for data: 730625500\nI0821 08:27:51.723866 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:27:51.723879 32360 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:27:51.723886 32360 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:27:51.723893 32360 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:27:51.723949 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:27:51.724087 32360 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:27:51.724100 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.724105 32360 net.cpp:165] Memory required for data: 738817500\nI0821 08:27:51.724114 32360 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:27:51.724123 32360 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:27:51.724129 32360 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:27:51.724136 32360 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:27:51.724146 32360 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:27:51.724180 32360 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:27:51.724190 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.724195 32360 net.cpp:165] Memory required for data: 747009500\nI0821 08:27:51.724200 32360 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:27:51.724207 32360 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:27:51.724213 32360 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:27:51.724220 32360 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:27:51.724230 32360 net.cpp:150] Setting up L1_b8_relu\nI0821 08:27:51.724243 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.724248 32360 net.cpp:165] Memory required for data: 755201500\nI0821 08:27:51.724253 32360 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:51.724268 32360 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:51.724275 32360 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:27:51.724284 32360 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:27:51.724294 32360 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:27:51.724340 32360 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:51.724352 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.724359 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.724364 32360 net.cpp:165] Memory required for data: 771585500\nI0821 08:27:51.724369 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:27:51.724380 32360 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:27:51.724386 32360 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:27:51.724397 32360 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:27:51.724723 32360 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:27:51.724738 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.724742 32360 net.cpp:165] Memory required for data: 779777500\nI0821 08:27:51.724751 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:27:51.724763 32360 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:27:51.724771 32360 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:27:51.724779 32360 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:27:51.725025 32360 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:27:51.725039 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.725044 32360 net.cpp:165] Memory required for data: 787969500\nI0821 08:27:51.725054 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:27:51.725064 32360 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:27:51.725069 32360 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:27:51.725077 32360 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.725132 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:27:51.725283 32360 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:27:51.725297 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.725302 32360 net.cpp:165] Memory required for data: 796161500\nI0821 08:27:51.725311 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:27:51.725320 32360 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:27:51.725327 32360 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:27:51.725337 32360 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.725347 32360 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:27:51.725353 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.725358 32360 net.cpp:165] Memory required for data: 804353500\nI0821 08:27:51.725363 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:27:51.725376 32360 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:27:51.725383 32360 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:27:51.725390 32360 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:27:51.725711 32360 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:27:51.725724 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.725729 32360 net.cpp:165] Memory required for data: 812545500\nI0821 08:27:51.725739 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:27:51.725751 32360 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:27:51.725757 32360 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:27:51.725766 32360 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:27:51.726037 32360 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:27:51.726058 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.726063 32360 net.cpp:165] Memory required for data: 820737500\nI0821 08:27:51.726094 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:27:51.726106 32360 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:27:51.726114 32360 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:27:51.726120 32360 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:27:51.726176 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:27:51.726322 32360 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:27:51.726336 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.726341 32360 net.cpp:165] Memory required for data: 828929500\nI0821 08:27:51.726351 32360 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:27:51.726362 32360 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:27:51.726369 32360 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:27:51.726377 32360 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:27:51.726384 32360 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:27:51.726415 32360 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:27:51.726424 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.726429 32360 net.cpp:165] Memory required for data: 837121500\nI0821 08:27:51.726434 32360 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:27:51.726442 32360 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:27:51.726449 32360 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:27:51.726457 32360 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:27:51.726467 32360 net.cpp:150] Setting up L1_b9_relu\nI0821 08:27:51.726475 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.726480 32360 net.cpp:165] Memory required for data: 845313500\nI0821 08:27:51.726485 32360 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:51.726491 32360 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:51.726496 32360 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:27:51.726507 32360 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:27:51.726518 32360 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:27:51.726563 32360 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:51.726575 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.726582 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.726586 32360 net.cpp:165] Memory required for data: 861697500\nI0821 08:27:51.726591 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:27:51.726603 32360 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:27:51.726609 32360 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:27:51.726620 32360 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:27:51.726945 32360 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:27:51.726959 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.726964 32360 net.cpp:165] Memory required for data: 863745500\nI0821 08:27:51.726974 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:27:51.726985 32360 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:27:51.726991 32360 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:27:51.727000 32360 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:27:51.727237 32360 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:27:51.727249 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.727254 32360 net.cpp:165] Memory required for data: 865793500\nI0821 08:27:51.727272 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:27:51.727285 32360 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:27:51.727298 32360 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:27:51.727308 32360 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.727361 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:27:51.727505 32360 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:27:51.727519 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.727524 32360 net.cpp:165] Memory required for data: 867841500\nI0821 08:27:51.727532 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:27:51.727543 32360 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:27:51.727550 32360 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:27:51.727560 32360 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.727569 32360 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:27:51.727576 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.727581 32360 net.cpp:165] Memory required for data: 869889500\nI0821 08:27:51.727586 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:27:51.727597 32360 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:27:51.727602 32360 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:27:51.727613 32360 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:27:51.727931 32360 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:27:51.727944 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.727949 32360 net.cpp:165] Memory required for data: 871937500\nI0821 08:27:51.727958 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:27:51.727967 32360 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:27:51.727973 32360 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:27:51.727984 32360 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:27:51.728231 32360 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:27:51.728243 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.728250 32360 net.cpp:165] Memory required for data: 873985500\nI0821 08:27:51.728266 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:27:51.728278 32360 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:27:51.728284 32360 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:27:51.728292 32360 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:27:51.728348 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:27:51.728493 32360 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:27:51.728507 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.728513 32360 net.cpp:165] Memory required for data: 876033500\nI0821 08:27:51.728521 32360 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:27:51.728535 32360 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:27:51.728543 32360 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:27:51.728550 32360 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:27:51.728643 32360 net.cpp:150] Setting up L2_b1_pool\nI0821 08:27:51.728658 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.728663 32360 net.cpp:165] Memory required for data: 878081500\nI0821 08:27:51.728669 32360 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:27:51.728679 32360 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:27:51.728685 32360 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:27:51.728693 32360 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:27:51.728703 32360 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:27:51.728737 32360 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:27:51.728746 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.728751 32360 net.cpp:165] Memory required for data: 880129500\nI0821 08:27:51.728756 32360 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:27:51.728765 32360 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:27:51.728770 32360 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:27:51.728785 32360 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:27:51.728794 32360 net.cpp:150] Setting up L2_b1_relu\nI0821 08:27:51.728802 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.728806 32360 net.cpp:165] Memory required for data: 882177500\nI0821 08:27:51.728812 32360 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:27:51.728868 32360 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:27:51.728881 32360 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:27:51.731179 32360 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:27:51.731197 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.731204 32360 net.cpp:165] Memory required for data: 884225500\nI0821 08:27:51.731209 32360 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:27:51.731220 32360 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:27:51.731225 32360 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:27:51.731233 32360 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:27:51.731243 32360 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:27:51.731333 32360 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:27:51.731353 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.731359 32360 net.cpp:165] Memory required for data: 888321500\nI0821 08:27:51.731364 32360 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:51.731372 32360 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:51.731379 32360 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:27:51.731386 32360 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:27:51.731396 32360 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:27:51.731448 32360 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:51.731461 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.731467 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.731472 32360 net.cpp:165] Memory required for data: 896513500\nI0821 08:27:51.731477 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:27:51.731492 32360 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:27:51.731498 32360 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:27:51.731508 32360 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:27:51.732941 32360 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:27:51.732959 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.732964 32360 net.cpp:165] Memory required for data: 900609500\nI0821 08:27:51.732975 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:27:51.732987 32360 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:27:51.732995 32360 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:27:51.733002 32360 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:27:51.733252 32360 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:27:51.733273 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.733279 32360 net.cpp:165] Memory required for data: 904705500\nI0821 08:27:51.733290 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:27:51.733299 32360 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:27:51.733306 32360 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:27:51.733314 32360 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.733371 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:27:51.733525 32360 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:27:51.733538 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.733543 32360 net.cpp:165] Memory required for data: 908801500\nI0821 08:27:51.733552 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:27:51.733561 32360 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:27:51.733567 32360 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:27:51.733585 32360 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.733597 32360 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:27:51.733604 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.733609 32360 net.cpp:165] Memory required for data: 912897500\nI0821 08:27:51.733613 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:27:51.733625 32360 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:27:51.733631 32360 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:27:51.733642 32360 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:27:51.734104 32360 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:27:51.734118 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.734123 32360 net.cpp:165] Memory required for data: 916993500\nI0821 08:27:51.734133 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:27:51.734143 32360 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:27:51.734148 32360 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:27:51.734159 32360 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:27:51.734416 32360 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:27:51.734429 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.734434 32360 net.cpp:165] Memory required for data: 921089500\nI0821 08:27:51.734446 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:27:51.734457 32360 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:27:51.734463 32360 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:27:51.734472 32360 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:27:51.734526 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:27:51.734671 32360 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:27:51.734683 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.734688 32360 net.cpp:165] Memory required for data: 925185500\nI0821 08:27:51.734699 32360 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:27:51.734710 32360 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:27:51.734717 32360 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:27:51.734724 32360 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:27:51.734732 32360 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:27:51.734761 32360 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:27:51.734771 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.734776 32360 net.cpp:165] Memory required for data: 929281500\nI0821 08:27:51.734781 32360 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:27:51.734788 32360 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:27:51.734794 32360 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:27:51.734804 32360 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:27:51.734814 32360 net.cpp:150] Setting up L2_b2_relu\nI0821 08:27:51.734822 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.734827 32360 net.cpp:165] Memory required for data: 933377500\nI0821 08:27:51.734832 32360 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:51.734838 32360 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:51.734843 32360 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:27:51.734851 32360 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:27:51.734860 32360 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:27:51.734908 32360 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:51.734920 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.734926 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.734931 32360 net.cpp:165] Memory required for data: 941569500\nI0821 08:27:51.734936 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:27:51.734954 32360 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:27:51.734961 32360 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:27:51.734973 32360 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:27:51.735445 32360 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:27:51.735460 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.735466 32360 net.cpp:165] Memory required for data: 945665500\nI0821 08:27:51.735474 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:27:51.735484 32360 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:27:51.735491 32360 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:27:51.735502 32360 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:27:51.735745 32360 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:27:51.735759 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.735764 32360 net.cpp:165] Memory required for data: 949761500\nI0821 08:27:51.735774 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:27:51.735785 32360 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:27:51.735792 32360 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:27:51.735800 32360 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.735855 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:27:51.736003 32360 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:27:51.736016 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.736021 32360 net.cpp:165] Memory required for data: 953857500\nI0821 08:27:51.736030 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:27:51.736042 32360 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:27:51.736048 32360 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:27:51.736055 32360 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.736065 32360 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:27:51.736074 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.736079 32360 net.cpp:165] Memory required for data: 957953500\nI0821 08:27:51.736084 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:27:51.736095 32360 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:27:51.736101 32360 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:27:51.736112 32360 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:27:51.736578 32360 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:27:51.736593 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.736598 32360 net.cpp:165] Memory required for data: 962049500\nI0821 08:27:51.736608 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:27:51.736616 32360 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:27:51.736623 32360 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:27:51.736634 32360 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:27:51.736883 32360 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:27:51.736896 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.736901 32360 net.cpp:165] Memory required for data: 966145500\nI0821 08:27:51.736912 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:27:51.736923 32360 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:27:51.736930 32360 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:27:51.736937 32360 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:27:51.736994 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:27:51.737140 32360 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:27:51.737152 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.737157 32360 net.cpp:165] Memory required for data: 970241500\nI0821 08:27:51.737167 32360 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:27:51.737179 32360 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:27:51.737185 32360 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:27:51.737200 32360 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:27:51.737207 32360 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:27:51.737239 32360 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:27:51.737248 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.737253 32360 net.cpp:165] Memory required for data: 974337500\nI0821 08:27:51.737264 32360 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:27:51.737287 32360 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:27:51.737293 32360 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:27:51.737301 32360 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:27:51.737311 32360 net.cpp:150] Setting up L2_b3_relu\nI0821 08:27:51.737318 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.737323 32360 net.cpp:165] Memory required for data: 978433500\nI0821 08:27:51.737329 32360 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:51.737339 32360 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:51.737344 32360 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:27:51.737352 32360 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:27:51.737361 32360 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:27:51.737407 32360 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:51.737422 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.737429 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.737434 32360 net.cpp:165] Memory required for data: 986625500\nI0821 08:27:51.737439 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:27:51.737450 32360 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:27:51.737457 32360 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:27:51.737465 32360 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:27:51.737926 32360 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:27:51.737939 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.737944 32360 net.cpp:165] Memory required for data: 990721500\nI0821 08:27:51.737953 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:27:51.737965 32360 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:27:51.737972 32360 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:27:51.737979 32360 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:27:51.738227 32360 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:27:51.738240 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.738245 32360 net.cpp:165] Memory required for data: 994817500\nI0821 08:27:51.738255 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:27:51.738271 32360 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:27:51.738277 32360 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:27:51.738288 32360 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.738344 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:27:51.738488 32360 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:27:51.738502 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.738507 32360 net.cpp:165] Memory required for data: 998913500\nI0821 08:27:51.738515 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:27:51.738523 32360 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:27:51.738529 32360 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:27:51.738539 32360 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.738549 32360 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:27:51.738556 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.738561 32360 net.cpp:165] Memory required for data: 1003009500\nI0821 08:27:51.738566 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:27:51.738590 32360 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:27:51.738595 32360 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:27:51.738605 32360 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:27:51.739064 32360 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:27:51.739078 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.739084 32360 net.cpp:165] Memory required for data: 1007105500\nI0821 08:27:51.739092 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:27:51.739104 32360 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:27:51.739110 32360 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:27:51.739120 32360 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:27:51.739369 32360 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:27:51.739382 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.739387 32360 net.cpp:165] Memory required for data: 1011201500\nI0821 08:27:51.739398 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:27:51.739408 32360 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:27:51.739413 32360 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:27:51.739423 32360 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:27:51.739478 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:27:51.739627 32360 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:27:51.739640 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.739645 32360 net.cpp:165] Memory required for data: 1015297500\nI0821 08:27:51.739655 32360 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:27:51.739663 32360 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:27:51.739670 32360 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:27:51.739676 32360 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:27:51.739686 32360 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:27:51.739713 32360 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:27:51.739723 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.739727 32360 net.cpp:165] Memory required for data: 1019393500\nI0821 08:27:51.739733 32360 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:27:51.739743 32360 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:27:51.739750 32360 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:27:51.739758 32360 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:27:51.739766 32360 net.cpp:150] Setting up L2_b4_relu\nI0821 08:27:51.739773 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.739778 32360 net.cpp:165] Memory required for data: 1023489500\nI0821 08:27:51.739783 32360 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:51.739790 32360 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:51.739795 32360 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:27:51.739805 32360 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:27:51.739814 32360 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:27:51.739859 32360 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:51.739871 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.739877 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.739882 32360 net.cpp:165] Memory required for data: 1031681500\nI0821 08:27:51.739887 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:27:51.739902 32360 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:27:51.739908 32360 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:27:51.739917 32360 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:27:51.740402 32360 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:27:51.740424 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.740429 32360 net.cpp:165] Memory required for data: 1035777500\nI0821 08:27:51.740438 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:27:51.740452 32360 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:27:51.740458 32360 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:27:51.740465 32360 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:27:51.740720 32360 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:27:51.740732 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.740737 32360 net.cpp:165] Memory required for data: 1039873500\nI0821 08:27:51.740747 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:27:51.740757 32360 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:27:51.740763 32360 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:27:51.740773 32360 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.740828 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:27:51.740977 32360 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:27:51.740988 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.740993 32360 net.cpp:165] Memory required for data: 1043969500\nI0821 08:27:51.741003 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:27:51.741011 32360 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:27:51.741017 32360 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:27:51.741029 32360 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.741039 32360 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:27:51.741045 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.741050 32360 net.cpp:165] Memory required for data: 1048065500\nI0821 08:27:51.741055 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:27:51.741068 32360 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:27:51.741075 32360 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:27:51.741083 32360 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:27:51.741555 32360 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:27:51.741570 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.741575 32360 net.cpp:165] Memory required for data: 1052161500\nI0821 08:27:51.741585 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:27:51.741596 32360 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:27:51.741603 32360 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:27:51.741611 32360 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:27:51.741858 32360 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:27:51.741870 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.741875 32360 net.cpp:165] Memory required for data: 1056257500\nI0821 08:27:51.741886 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:27:51.741895 32360 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:27:51.741901 32360 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:27:51.741909 32360 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:27:51.741966 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:27:51.742115 32360 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:27:51.742130 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.742136 32360 net.cpp:165] Memory required for data: 1060353500\nI0821 08:27:51.742144 32360 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:27:51.742153 32360 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:27:51.742159 32360 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:27:51.742166 32360 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:27:51.742174 32360 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:27:51.742203 32360 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:27:51.742213 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.742224 32360 net.cpp:165] Memory required for data: 1064449500\nI0821 08:27:51.742230 32360 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:27:51.742238 32360 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:27:51.742244 32360 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:27:51.742254 32360 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:27:51.742269 32360 net.cpp:150] Setting up L2_b5_relu\nI0821 08:27:51.742277 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.742281 32360 net.cpp:165] Memory required for data: 1068545500\nI0821 08:27:51.742286 32360 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:51.742295 32360 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:51.742300 32360 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:27:51.742310 32360 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:27:51.742319 32360 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:27:51.742364 32360 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:51.742375 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.742383 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.742388 32360 net.cpp:165] Memory required for data: 1076737500\nI0821 08:27:51.742393 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:27:51.742406 32360 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:27:51.742413 32360 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:27:51.742422 32360 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:27:51.742895 32360 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:27:51.742909 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.742914 32360 net.cpp:165] Memory required for data: 1080833500\nI0821 08:27:51.742923 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:27:51.742934 32360 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:27:51.742941 32360 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:27:51.742949 32360 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:27:51.743197 32360 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:27:51.743211 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.743216 32360 net.cpp:165] Memory required for data: 1084929500\nI0821 08:27:51.743225 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:27:51.743234 32360 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:27:51.743240 32360 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:27:51.743252 32360 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.743314 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:27:51.743461 32360 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:27:51.743474 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.743479 32360 net.cpp:165] Memory required for data: 1089025500\nI0821 08:27:51.743489 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:27:51.743496 32360 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:27:51.743504 32360 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:27:51.743510 32360 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.743520 32360 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:27:51.743526 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.743531 32360 net.cpp:165] Memory required for data: 1093121500\nI0821 08:27:51.743536 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:27:51.743551 32360 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:27:51.743556 32360 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:27:51.743566 32360 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:27:51.744033 32360 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:27:51.744053 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.744060 32360 net.cpp:165] Memory required for data: 1097217500\nI0821 08:27:51.744068 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:27:51.744081 32360 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:27:51.744087 32360 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:27:51.744098 32360 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:27:51.744350 32360 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:27:51.744364 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.744369 32360 net.cpp:165] Memory required for data: 1101313500\nI0821 08:27:51.744379 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:27:51.744387 32360 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:27:51.744393 32360 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:27:51.744401 32360 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:27:51.744458 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:27:51.744604 32360 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:27:51.744621 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.744626 32360 net.cpp:165] Memory required for data: 1105409500\nI0821 08:27:51.744634 32360 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:27:51.744643 32360 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:27:51.744650 32360 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:27:51.744657 32360 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:27:51.744665 32360 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:27:51.744694 32360 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:27:51.744704 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.744709 32360 net.cpp:165] Memory required for data: 1109505500\nI0821 08:27:51.744714 32360 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:27:51.744722 32360 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:27:51.744729 32360 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:27:51.744738 32360 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:27:51.744747 32360 net.cpp:150] Setting up L2_b6_relu\nI0821 08:27:51.744755 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.744760 32360 net.cpp:165] Memory required for data: 1113601500\nI0821 08:27:51.744763 32360 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:51.744771 32360 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:51.744776 32360 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:27:51.744786 32360 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:27:51.744796 32360 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:27:51.744840 32360 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:51.744853 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.744858 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.744863 32360 net.cpp:165] Memory required for data: 1121793500\nI0821 08:27:51.744868 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:27:51.744882 32360 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:27:51.744889 32360 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:27:51.744899 32360 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:27:51.745373 32360 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:27:51.745388 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.745393 32360 net.cpp:165] Memory required for data: 1125889500\nI0821 08:27:51.745401 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:27:51.745414 32360 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:27:51.745427 32360 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:27:51.745436 32360 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:27:51.745688 32360 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:27:51.745702 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.745707 32360 net.cpp:165] Memory required for data: 1129985500\nI0821 08:27:51.745718 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:27:51.745726 32360 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:27:51.745733 32360 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:27:51.745740 32360 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.745798 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:27:51.745945 32360 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:27:51.745960 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.745966 32360 net.cpp:165] Memory required for data: 1134081500\nI0821 08:27:51.745975 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:27:51.745985 32360 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:27:51.745990 32360 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:27:51.745997 32360 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.746007 32360 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:27:51.746014 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.746019 32360 net.cpp:165] Memory required for data: 1138177500\nI0821 08:27:51.746024 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:27:51.746039 32360 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:27:51.746045 32360 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:27:51.746057 32360 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:27:51.746546 32360 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:27:51.746562 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.746567 32360 net.cpp:165] Memory required for data: 1142273500\nI0821 08:27:51.746575 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:27:51.746588 32360 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:27:51.746595 32360 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:27:51.746605 32360 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:27:51.746857 32360 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:27:51.746871 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.746876 32360 net.cpp:165] Memory required for data: 1146369500\nI0821 08:27:51.746886 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:27:51.746896 32360 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:27:51.746901 32360 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:27:51.746909 32360 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:27:51.746968 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:27:51.747117 32360 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:27:51.747128 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.747133 32360 net.cpp:165] Memory required for data: 1150465500\nI0821 08:27:51.747143 32360 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:27:51.747154 32360 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:27:51.747161 32360 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:27:51.747169 32360 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:27:51.747176 32360 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:27:51.747202 32360 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:27:51.747211 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.747216 32360 net.cpp:165] Memory required for data: 1154561500\nI0821 08:27:51.747222 32360 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:27:51.747232 32360 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:27:51.747238 32360 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:27:51.747253 32360 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:27:51.747269 32360 net.cpp:150] Setting up L2_b7_relu\nI0821 08:27:51.747277 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.747282 32360 net.cpp:165] Memory required for data: 1158657500\nI0821 08:27:51.747288 32360 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:51.747295 32360 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:51.747300 32360 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:27:51.747308 32360 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:27:51.747318 32360 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:27:51.747367 32360 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:51.747380 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.747386 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.747390 32360 net.cpp:165] Memory required for data: 1166849500\nI0821 08:27:51.747395 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:27:51.747409 32360 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:27:51.747416 32360 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:27:51.747426 32360 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:27:51.747896 32360 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:27:51.747910 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.747915 32360 net.cpp:165] Memory required for data: 1170945500\nI0821 08:27:51.747925 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:27:51.747936 32360 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:27:51.747943 32360 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:27:51.747953 32360 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:27:51.748205 32360 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:27:51.748219 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.748224 32360 net.cpp:165] Memory required for data: 1175041500\nI0821 08:27:51.748234 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:27:51.748244 32360 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:27:51.748250 32360 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:27:51.748257 32360 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.748323 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:27:51.748474 32360 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:27:51.748489 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.748494 32360 net.cpp:165] Memory required for data: 1179137500\nI0821 08:27:51.748503 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:27:51.748512 32360 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:27:51.748518 32360 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:27:51.748525 32360 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.748534 32360 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:27:51.748541 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.748546 32360 net.cpp:165] Memory required for data: 1183233500\nI0821 08:27:51.748551 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:27:51.748564 32360 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:27:51.748571 32360 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:27:51.748582 32360 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:27:51.749052 32360 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:27:51.749065 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.749070 32360 net.cpp:165] Memory required for data: 1187329500\nI0821 08:27:51.749078 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:27:51.749090 32360 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:27:51.749105 32360 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:27:51.749116 32360 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:27:51.749377 32360 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:27:51.749392 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.749397 32360 net.cpp:165] Memory required for data: 1191425500\nI0821 08:27:51.749406 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:27:51.749415 32360 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:27:51.749423 32360 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:27:51.749429 32360 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:27:51.749486 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:27:51.749632 32360 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:27:51.749644 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.749650 32360 net.cpp:165] Memory required for data: 1195521500\nI0821 08:27:51.749660 32360 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:27:51.749671 32360 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:27:51.749677 32360 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:27:51.749685 32360 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:27:51.749692 32360 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:27:51.749719 32360 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:27:51.749728 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.749733 32360 net.cpp:165] Memory required for data: 1199617500\nI0821 08:27:51.749738 32360 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:27:51.749748 32360 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:27:51.749755 32360 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:27:51.749763 32360 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:27:51.749771 32360 net.cpp:150] Setting up L2_b8_relu\nI0821 08:27:51.749778 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.749783 32360 net.cpp:165] Memory required for data: 1203713500\nI0821 08:27:51.749788 32360 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:51.749795 32360 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:51.749800 32360 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:27:51.749809 32360 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:27:51.749830 32360 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:27:51.749881 32360 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:51.749892 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.749899 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.749903 32360 net.cpp:165] Memory required for data: 1211905500\nI0821 08:27:51.749909 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:27:51.749923 32360 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:27:51.749929 32360 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:27:51.749943 32360 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:27:51.750417 32360 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:27:51.750432 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.750437 32360 net.cpp:165] Memory required for data: 1216001500\nI0821 08:27:51.750447 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:27:51.750458 32360 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:27:51.750465 32360 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:27:51.750476 32360 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:27:51.750727 32360 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:27:51.750739 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.750751 32360 net.cpp:165] Memory required for data: 1220097500\nI0821 08:27:51.750762 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:27:51.750772 32360 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:27:51.750778 32360 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:27:51.750787 32360 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.750845 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:27:51.750993 32360 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:27:51.751006 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.751011 32360 net.cpp:165] Memory required for data: 1224193500\nI0821 08:27:51.751020 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:27:51.751029 32360 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:27:51.751034 32360 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:27:51.751044 32360 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.751055 32360 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:27:51.751062 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.751066 32360 net.cpp:165] Memory required for data: 1228289500\nI0821 08:27:51.751071 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:27:51.751085 32360 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:27:51.751091 32360 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:27:51.751099 32360 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:27:51.751574 32360 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:27:51.751588 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.751593 32360 net.cpp:165] Memory required for data: 1232385500\nI0821 08:27:51.751602 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:27:51.751615 32360 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:27:51.751621 32360 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:27:51.751629 32360 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:27:51.751883 32360 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:27:51.751899 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.751904 32360 net.cpp:165] Memory required for data: 1236481500\nI0821 08:27:51.751947 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:27:51.751962 32360 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:27:51.751969 32360 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:27:51.751977 32360 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:27:51.752033 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:27:51.752182 32360 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:27:51.752194 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.752199 32360 net.cpp:165] Memory required for data: 1240577500\nI0821 08:27:51.752208 32360 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:27:51.752218 32360 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:27:51.752224 32360 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:27:51.752233 32360 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:27:51.752243 32360 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:27:51.752277 32360 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:27:51.752288 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.752293 32360 net.cpp:165] Memory required for data: 1244673500\nI0821 08:27:51.752300 32360 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:27:51.752310 32360 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:27:51.752316 32360 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:27:51.752323 32360 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:27:51.752332 32360 net.cpp:150] Setting up L2_b9_relu\nI0821 08:27:51.752341 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.752344 32360 net.cpp:165] Memory required for data: 1248769500\nI0821 08:27:51.752355 32360 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:51.752367 32360 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:51.752372 32360 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:27:51.752379 32360 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:27:51.752389 32360 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:27:51.752439 32360 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:51.752454 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.752460 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.752465 32360 net.cpp:165] Memory required for data: 1256961500\nI0821 08:27:51.752470 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:27:51.752481 32360 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:27:51.752488 32360 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:27:51.752496 32360 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:27:51.752969 32360 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:27:51.752984 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.752988 32360 net.cpp:165] Memory required for data: 1257985500\nI0821 08:27:51.752997 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:27:51.753010 32360 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:27:51.753015 32360 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:27:51.753024 32360 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:27:51.753291 32360 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:27:51.753304 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.753309 32360 net.cpp:165] Memory required for data: 1259009500\nI0821 08:27:51.753320 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:27:51.753332 32360 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:27:51.753338 32360 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:27:51.753348 32360 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.753404 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:27:51.753558 32360 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:27:51.753571 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.753576 32360 net.cpp:165] Memory required for data: 1260033500\nI0821 08:27:51.753585 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:27:51.753594 32360 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:27:51.753600 32360 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:27:51.753610 32360 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.753620 32360 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:27:51.753628 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.753633 32360 net.cpp:165] Memory required for data: 1261057500\nI0821 08:27:51.753638 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:27:51.753648 32360 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:27:51.753654 32360 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:27:51.753666 32360 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:27:51.754135 32360 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:27:51.754149 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.754154 32360 net.cpp:165] Memory required for data: 1262081500\nI0821 08:27:51.754163 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:27:51.754175 32360 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:27:51.754182 32360 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:27:51.754190 32360 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:27:51.754456 32360 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:27:51.754470 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.754482 32360 net.cpp:165] Memory required for data: 1263105500\nI0821 08:27:51.754492 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:27:51.754501 32360 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:27:51.754508 32360 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:27:51.754516 32360 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:27:51.754575 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:27:51.754729 32360 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:27:51.754741 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.754746 32360 net.cpp:165] Memory required for data: 1264129500\nI0821 08:27:51.754755 32360 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:27:51.754765 32360 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:27:51.754771 32360 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:27:51.754782 32360 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:27:51.754817 32360 net.cpp:150] Setting up L3_b1_pool\nI0821 08:27:51.754830 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.754837 32360 net.cpp:165] Memory required for data: 1265153500\nI0821 08:27:51.754842 32360 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:27:51.754850 32360 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:27:51.754856 32360 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:27:51.754863 32360 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:27:51.754870 32360 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:27:51.754904 32360 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:27:51.754914 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.754918 32360 net.cpp:165] Memory required for data: 1266177500\nI0821 08:27:51.754925 32360 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:27:51.754931 32360 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:27:51.754937 32360 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:27:51.754945 32360 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:27:51.754954 32360 net.cpp:150] Setting up L3_b1_relu\nI0821 08:27:51.754961 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.754966 32360 net.cpp:165] Memory required for data: 1267201500\nI0821 08:27:51.754971 32360 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:27:51.754979 32360 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:27:51.754989 32360 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:27:51.756204 32360 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:27:51.756225 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.756232 32360 net.cpp:165] Memory required for data: 1268225500\nI0821 08:27:51.756237 32360 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:27:51.756247 32360 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:27:51.756253 32360 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:27:51.756266 32360 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:27:51.756275 32360 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:27:51.756317 32360 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:27:51.756330 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.756335 32360 net.cpp:165] Memory required for data: 1270273500\nI0821 08:27:51.756340 32360 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:51.756347 32360 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:51.756356 32360 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:27:51.756364 32360 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:27:51.756374 32360 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:27:51.756424 32360 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:51.756436 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.756443 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.756454 32360 net.cpp:165] Memory required for data: 1274369500\nI0821 08:27:51.756460 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:27:51.756475 32360 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:27:51.756482 32360 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:27:51.756491 32360 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:27:51.758455 32360 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:27:51.758474 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.758479 32360 net.cpp:165] Memory required for data: 1276417500\nI0821 08:27:51.758489 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:27:51.758499 32360 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:27:51.758507 32360 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:27:51.758517 32360 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:27:51.758781 32360 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:27:51.758797 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.758802 32360 net.cpp:165] Memory required for data: 1278465500\nI0821 08:27:51.758813 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:27:51.758823 32360 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:27:51.758829 32360 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:27:51.758837 32360 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.758895 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:27:51.759053 32360 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:27:51.759065 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.759071 32360 net.cpp:165] Memory required for data: 1280513500\nI0821 08:27:51.759080 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:27:51.759088 32360 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:27:51.759095 32360 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:27:51.759105 32360 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.759115 32360 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:27:51.759124 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.759129 32360 net.cpp:165] Memory required for data: 1282561500\nI0821 08:27:51.759132 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:27:51.759146 32360 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:27:51.759153 32360 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:27:51.759162 32360 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:27:51.760184 32360 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:27:51.760198 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.760203 32360 net.cpp:165] Memory required for data: 1284609500\nI0821 08:27:51.760212 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:27:51.760226 32360 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:27:51.760232 32360 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:27:51.760243 32360 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:27:51.760511 32360 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:27:51.760525 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.760530 32360 net.cpp:165] Memory required for data: 1286657500\nI0821 08:27:51.760541 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:27:51.760550 32360 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:27:51.760556 32360 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:27:51.760565 32360 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:27:51.760623 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:27:51.760778 32360 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:27:51.760792 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.760797 32360 net.cpp:165] Memory required for data: 1288705500\nI0821 08:27:51.760805 32360 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:27:51.760823 32360 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:27:51.760829 32360 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:27:51.760836 32360 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:27:51.760846 32360 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:27:51.760881 32360 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:27:51.760896 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.760901 32360 net.cpp:165] Memory required for data: 1290753500\nI0821 08:27:51.760906 32360 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:27:51.760915 32360 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:27:51.760921 32360 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:27:51.760927 32360 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:27:51.760936 32360 net.cpp:150] Setting up L3_b2_relu\nI0821 08:27:51.760944 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.760948 32360 net.cpp:165] Memory required for data: 1292801500\nI0821 08:27:51.760953 32360 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:51.760963 32360 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:51.760969 32360 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:27:51.760977 32360 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:27:51.760987 32360 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:27:51.761037 32360 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:51.761049 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.761055 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.761060 32360 net.cpp:165] Memory required for data: 1296897500\nI0821 08:27:51.761065 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:27:51.761076 32360 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:27:51.761083 32360 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:27:51.761096 32360 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:27:51.762120 32360 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:27:51.762135 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.762140 32360 net.cpp:165] Memory required for data: 1298945500\nI0821 08:27:51.762150 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:27:51.762159 32360 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:27:51.762166 32360 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:27:51.762176 32360 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:27:51.762447 32360 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:27:51.762465 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.762470 32360 net.cpp:165] Memory required for data: 1300993500\nI0821 08:27:51.762480 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:27:51.762490 32360 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:27:51.762496 32360 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:27:51.762503 32360 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.762560 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:27:51.762715 32360 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:27:51.762727 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.762732 32360 net.cpp:165] Memory required for data: 1303041500\nI0821 08:27:51.762742 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:27:51.762753 32360 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:27:51.762759 32360 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:27:51.762766 32360 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.762776 32360 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:27:51.762784 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.762795 32360 net.cpp:165] Memory required for data: 1305089500\nI0821 08:27:51.762800 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:27:51.762814 32360 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:27:51.762820 32360 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:27:51.762830 32360 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:27:51.763851 32360 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:27:51.763865 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.763871 32360 net.cpp:165] Memory required for data: 1307137500\nI0821 08:27:51.763880 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:27:51.763892 32360 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:27:51.763900 32360 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:27:51.763911 32360 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:27:51.764176 32360 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:27:51.764189 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.764194 32360 net.cpp:165] Memory required for data: 1309185500\nI0821 08:27:51.764204 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:27:51.764214 32360 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:27:51.764220 32360 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:27:51.764230 32360 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:27:51.764295 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:27:51.764456 32360 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:27:51.764469 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.764474 32360 net.cpp:165] Memory required for data: 1311233500\nI0821 08:27:51.764484 32360 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:27:51.764493 32360 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:27:51.764500 32360 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:27:51.764508 32360 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:27:51.764519 32360 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:27:51.764555 32360 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:27:51.764567 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.764572 32360 net.cpp:165] Memory required for data: 1313281500\nI0821 08:27:51.764577 32360 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:27:51.764586 32360 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:27:51.764591 32360 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:27:51.764601 32360 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:27:51.764611 32360 net.cpp:150] Setting up L3_b3_relu\nI0821 08:27:51.764618 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.764623 32360 net.cpp:165] Memory required for data: 1315329500\nI0821 08:27:51.764628 32360 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:51.764636 32360 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:51.764641 32360 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:27:51.764648 32360 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:27:51.764657 32360 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:27:51.764706 32360 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:51.764719 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.764724 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.764729 32360 net.cpp:165] Memory required for data: 1319425500\nI0821 08:27:51.764734 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:27:51.764745 32360 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:27:51.764752 32360 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:27:51.764763 32360 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:27:51.765810 32360 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:27:51.765826 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.765831 32360 net.cpp:165] Memory required for data: 1321473500\nI0821 08:27:51.765841 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:27:51.765851 32360 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:27:51.765857 32360 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:27:51.765868 32360 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:27:51.766139 32360 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:27:51.766155 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.766160 32360 net.cpp:165] Memory required for data: 1323521500\nI0821 08:27:51.766171 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:27:51.766180 32360 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:27:51.766186 32360 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:27:51.766194 32360 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.766252 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:27:51.766418 32360 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:27:51.766433 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.766438 32360 net.cpp:165] Memory required for data: 1325569500\nI0821 08:27:51.766446 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:27:51.766458 32360 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:27:51.766463 32360 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:27:51.766471 32360 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.766481 32360 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:27:51.766489 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.766494 32360 net.cpp:165] Memory required for data: 1327617500\nI0821 08:27:51.766499 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:27:51.766511 32360 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:27:51.766517 32360 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:27:51.766526 32360 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:27:51.767556 32360 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:27:51.767570 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.767575 32360 net.cpp:165] Memory required for data: 1329665500\nI0821 08:27:51.767585 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:27:51.767599 32360 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:27:51.767606 32360 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:27:51.767617 32360 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:27:51.767886 32360 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:27:51.767899 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.767904 32360 net.cpp:165] Memory required for data: 1331713500\nI0821 08:27:51.767915 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:27:51.767925 32360 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:27:51.767930 32360 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:27:51.767941 32360 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:27:51.768000 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:27:51.768163 32360 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:27:51.768177 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.768180 32360 net.cpp:165] Memory required for data: 1333761500\nI0821 08:27:51.768190 32360 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:27:51.768199 32360 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:27:51.768205 32360 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:27:51.768215 32360 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:27:51.768224 32360 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:27:51.768267 32360 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:27:51.768285 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.768290 32360 net.cpp:165] Memory required for data: 1335809500\nI0821 08:27:51.768296 32360 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:27:51.768304 32360 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:27:51.768311 32360 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:27:51.768321 32360 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:27:51.768331 32360 net.cpp:150] Setting up L3_b4_relu\nI0821 08:27:51.768337 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.768342 32360 net.cpp:165] Memory required for data: 1337857500\nI0821 08:27:51.768347 32360 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:51.768354 32360 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:51.768359 32360 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:27:51.768366 32360 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:27:51.768376 32360 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:27:51.768429 32360 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:51.768440 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.768447 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.768452 32360 net.cpp:165] Memory required for data: 1341953500\nI0821 08:27:51.768457 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:27:51.768468 32360 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:27:51.768476 32360 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:27:51.768486 32360 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:27:51.769521 32360 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:27:51.769536 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.769541 32360 net.cpp:165] Memory required for data: 1344001500\nI0821 08:27:51.769551 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:27:51.769559 32360 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:27:51.769567 32360 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:27:51.769577 32360 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:27:51.770812 32360 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:27:51.770829 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.770835 32360 net.cpp:165] Memory required for data: 1346049500\nI0821 08:27:51.770846 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:27:51.770856 32360 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:27:51.770862 32360 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:27:51.770874 32360 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.770936 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:27:51.771097 32360 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:27:51.771111 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.771116 32360 net.cpp:165] Memory required for data: 1348097500\nI0821 08:27:51.771126 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:27:51.771134 32360 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:27:51.771140 32360 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:27:51.771152 32360 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.771162 32360 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:27:51.771169 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.771173 32360 net.cpp:165] Memory required for data: 1350145500\nI0821 08:27:51.771178 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:27:51.771193 32360 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:27:51.771199 32360 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:27:51.771208 32360 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:27:51.773233 32360 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:27:51.773252 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.773257 32360 net.cpp:165] Memory required for data: 1352193500\nI0821 08:27:51.773273 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:27:51.773288 32360 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:27:51.773294 32360 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:27:51.773305 32360 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:27:51.773566 32360 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:27:51.773578 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.773583 32360 net.cpp:165] Memory required for data: 1354241500\nI0821 08:27:51.773594 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:27:51.773603 32360 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:27:51.773609 32360 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:27:51.773620 32360 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:27:51.773677 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:27:51.773830 32360 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:27:51.773844 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.773849 32360 net.cpp:165] Memory required for data: 1356289500\nI0821 08:27:51.773857 32360 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:27:51.773867 32360 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:27:51.773874 32360 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:27:51.773880 32360 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:27:51.773891 32360 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:27:51.773927 32360 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:27:51.773936 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.773941 32360 net.cpp:165] Memory required for data: 1358337500\nI0821 08:27:51.773947 32360 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:27:51.773954 32360 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:27:51.773960 32360 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:27:51.773970 32360 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:27:51.773980 32360 net.cpp:150] Setting up L3_b5_relu\nI0821 08:27:51.773988 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.773993 32360 net.cpp:165] Memory required for data: 1360385500\nI0821 08:27:51.773996 32360 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:51.774003 32360 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:51.774009 32360 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:27:51.774016 32360 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:27:51.774026 32360 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:27:51.774073 32360 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:51.774085 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.774092 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.774096 32360 net.cpp:165] Memory required for data: 1364481500\nI0821 08:27:51.774101 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:27:51.774113 32360 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:27:51.774119 32360 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:27:51.774132 32360 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:27:51.775152 32360 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:27:51.775167 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.775172 32360 net.cpp:165] Memory required for data: 1366529500\nI0821 08:27:51.775182 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:27:51.775190 32360 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:27:51.775204 32360 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:27:51.775218 32360 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:27:51.775486 32360 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:27:51.775501 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.775506 32360 net.cpp:165] Memory required for data: 1368577500\nI0821 08:27:51.775516 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:27:51.775524 32360 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:27:51.775532 32360 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:27:51.775538 32360 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.775598 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:27:51.775749 32360 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:27:51.775763 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.775768 32360 net.cpp:165] Memory required for data: 1370625500\nI0821 08:27:51.775776 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:27:51.775787 32360 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:27:51.775794 32360 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:27:51.775801 32360 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.775811 32360 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:27:51.775820 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.775823 32360 net.cpp:165] Memory required for data: 1372673500\nI0821 08:27:51.775828 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:27:51.775842 32360 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:27:51.775848 32360 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:27:51.775857 32360 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:27:51.776868 32360 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:27:51.776883 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.776888 32360 net.cpp:165] Memory required for data: 1374721500\nI0821 08:27:51.776897 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:27:51.776909 32360 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:27:51.776916 32360 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:27:51.776927 32360 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:27:51.777186 32360 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:27:51.777200 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.777205 32360 net.cpp:165] Memory required for data: 1376769500\nI0821 08:27:51.777215 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:27:51.777225 32360 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:27:51.777230 32360 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:27:51.777241 32360 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:27:51.777304 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:27:51.777463 32360 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:27:51.777477 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.777482 32360 net.cpp:165] Memory required for data: 1378817500\nI0821 08:27:51.777490 32360 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:27:51.777503 32360 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:27:51.777509 32360 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:27:51.777516 32360 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:27:51.777524 32360 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:27:51.777559 32360 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:27:51.777570 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.777573 32360 net.cpp:165] Memory required for data: 1380865500\nI0821 08:27:51.777580 32360 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:27:51.777587 32360 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:27:51.777593 32360 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:27:51.777609 32360 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:27:51.777621 32360 net.cpp:150] Setting up L3_b6_relu\nI0821 08:27:51.777627 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.777632 32360 net.cpp:165] Memory required for data: 1382913500\nI0821 08:27:51.777637 32360 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:51.777644 32360 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:51.777649 32360 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:27:51.777657 32360 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:27:51.777667 32360 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:27:51.777715 32360 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:51.777727 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.777734 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.777739 32360 net.cpp:165] Memory required for data: 1387009500\nI0821 08:27:51.777743 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:27:51.777755 32360 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:27:51.777761 32360 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:27:51.777773 32360 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:27:51.778823 32360 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:27:51.778839 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.778844 32360 net.cpp:165] Memory required for data: 1389057500\nI0821 08:27:51.778853 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:27:51.778863 32360 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:27:51.778870 32360 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:27:51.778882 32360 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:27:51.779145 32360 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:27:51.779157 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.779162 32360 net.cpp:165] Memory required for data: 1391105500\nI0821 08:27:51.779173 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:27:51.779183 32360 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:27:51.779189 32360 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:27:51.779197 32360 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.779256 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:27:51.779422 32360 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:27:51.779438 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.779443 32360 net.cpp:165] Memory required for data: 1393153500\nI0821 08:27:51.779453 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:27:51.779485 32360 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:27:51.779495 32360 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:27:51.779502 32360 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.779513 32360 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:27:51.779521 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.779525 32360 net.cpp:165] Memory required for data: 1395201500\nI0821 08:27:51.779531 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:27:51.779546 32360 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:27:51.779552 32360 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:27:51.779561 32360 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:27:51.780586 32360 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:27:51.780601 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.780606 32360 net.cpp:165] Memory required for data: 1397249500\nI0821 08:27:51.780616 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:27:51.780627 32360 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:27:51.780639 32360 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:27:51.780652 32360 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:27:51.780917 32360 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:27:51.780933 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.780938 32360 net.cpp:165] Memory required for data: 1399297500\nI0821 08:27:51.780949 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:27:51.780958 32360 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:27:51.780964 32360 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:27:51.780972 32360 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:27:51.781029 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:27:51.781183 32360 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:27:51.781195 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.781200 32360 net.cpp:165] Memory required for data: 1401345500\nI0821 08:27:51.781209 32360 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:27:51.781221 32360 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:27:51.781227 32360 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:27:51.781234 32360 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:27:51.781242 32360 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:27:51.781286 32360 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:27:51.781297 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.781301 32360 net.cpp:165] Memory required for data: 1403393500\nI0821 08:27:51.781307 32360 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:27:51.781316 32360 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:27:51.781322 32360 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:27:51.781328 32360 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:27:51.781338 32360 net.cpp:150] Setting up L3_b7_relu\nI0821 08:27:51.781345 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.781349 32360 net.cpp:165] Memory required for data: 1405441500\nI0821 08:27:51.781354 32360 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:51.781361 32360 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:51.781366 32360 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:27:51.781376 32360 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:27:51.781388 32360 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:27:51.781433 32360 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:51.781445 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.781450 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.781455 32360 net.cpp:165] Memory required for data: 1409537500\nI0821 08:27:51.781461 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:27:51.781477 32360 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:27:51.781484 32360 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:27:51.781493 32360 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:27:51.782506 32360 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:27:51.782521 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.782526 32360 net.cpp:165] Memory required for data: 1411585500\nI0821 08:27:51.782536 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:27:51.782547 32360 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:27:51.782553 32360 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:27:51.782562 32360 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:27:51.782824 32360 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:27:51.782836 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.782841 32360 net.cpp:165] Memory required for data: 1413633500\nI0821 08:27:51.782858 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:27:51.782871 32360 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:27:51.782877 32360 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:27:51.782886 32360 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.782948 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:27:51.783104 32360 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:27:51.783118 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.783123 32360 net.cpp:165] Memory required for data: 1415681500\nI0821 08:27:51.783131 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:27:51.783143 32360 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:27:51.783149 32360 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:27:51.783156 32360 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.783166 32360 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:27:51.783176 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.783180 32360 net.cpp:165] Memory required for data: 1417729500\nI0821 08:27:51.783185 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:27:51.783196 32360 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:27:51.783202 32360 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:27:51.783213 32360 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:27:51.784222 32360 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:27:51.784237 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.784242 32360 net.cpp:165] Memory required for data: 1419777500\nI0821 08:27:51.784250 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:27:51.784265 32360 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:27:51.784273 32360 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:27:51.784286 32360 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:27:51.784554 32360 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:27:51.784569 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.784574 32360 net.cpp:165] Memory required for data: 1421825500\nI0821 08:27:51.784584 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:27:51.784592 32360 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:27:51.784600 32360 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:27:51.784606 32360 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:27:51.784665 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:27:51.784817 32360 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:27:51.784833 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.784838 32360 net.cpp:165] Memory required for data: 1423873500\nI0821 08:27:51.784847 32360 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:27:51.784857 32360 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:27:51.784863 32360 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:27:51.784870 32360 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:27:51.784878 32360 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:27:51.784914 32360 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:27:51.784924 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.784927 32360 net.cpp:165] Memory required for data: 1425921500\nI0821 08:27:51.784934 32360 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:27:51.784940 32360 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:27:51.784946 32360 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:27:51.784953 32360 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:27:51.784963 32360 net.cpp:150] Setting up L3_b8_relu\nI0821 08:27:51.784970 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.784976 32360 net.cpp:165] Memory required for data: 1427969500\nI0821 08:27:51.784979 32360 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:51.784993 32360 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:51.784999 32360 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:27:51.785009 32360 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:27:51.785019 32360 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:27:51.785065 32360 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:51.785080 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.785087 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.785092 32360 net.cpp:165] Memory required for data: 1432065500\nI0821 08:27:51.785097 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:27:51.785109 32360 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:27:51.785115 32360 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:27:51.785123 32360 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:27:51.787135 32360 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:27:51.787153 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.787158 32360 net.cpp:165] Memory required for data: 1434113500\nI0821 08:27:51.787168 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:27:51.787181 32360 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:27:51.787189 32360 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:27:51.787197 32360 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:27:51.787473 32360 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:27:51.787487 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.787492 32360 net.cpp:165] Memory required for data: 1436161500\nI0821 08:27:51.787503 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:27:51.787513 32360 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:27:51.787519 32360 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:27:51.787528 32360 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.787587 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:27:51.787745 32360 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:27:51.787758 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.787763 32360 net.cpp:165] Memory required for data: 1438209500\nI0821 08:27:51.787773 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:27:51.787781 32360 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:27:51.787787 32360 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:27:51.787798 32360 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.787808 32360 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:27:51.787817 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.787820 32360 net.cpp:165] Memory required for data: 1440257500\nI0821 08:27:51.787825 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:27:51.787839 32360 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:27:51.787847 32360 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:27:51.787855 32360 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:27:51.788872 32360 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:27:51.788887 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.788892 32360 net.cpp:165] Memory required for data: 1442305500\nI0821 08:27:51.788900 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:27:51.788913 32360 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:27:51.788920 32360 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:27:51.788928 32360 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:27:51.789193 32360 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:27:51.789206 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.789211 32360 net.cpp:165] Memory required for data: 1444353500\nI0821 08:27:51.789229 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:27:51.789242 32360 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:27:51.789249 32360 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:27:51.789257 32360 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:27:51.789327 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:27:51.789484 32360 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:27:51.789497 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.789502 32360 net.cpp:165] Memory required for data: 1446401500\nI0821 08:27:51.789511 32360 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:27:51.789525 32360 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:27:51.789530 32360 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:27:51.789538 32360 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:27:51.789548 32360 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:27:51.789582 32360 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:27:51.789590 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.789595 32360 net.cpp:165] Memory required for data: 1448449500\nI0821 08:27:51.789600 32360 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:27:51.789613 32360 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:27:51.789619 32360 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:27:51.789626 32360 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:27:51.789635 32360 net.cpp:150] Setting up L3_b9_relu\nI0821 08:27:51.789644 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.789647 32360 net.cpp:165] Memory required for data: 1450497500\nI0821 08:27:51.789652 32360 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:27:51.789661 32360 net.cpp:100] Creating Layer post_pool\nI0821 08:27:51.789666 32360 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 08:27:51.789674 32360 net.cpp:408] post_pool -> post_pool\nI0821 08:27:51.789710 32360 net.cpp:150] Setting up post_pool\nI0821 08:27:51.789721 32360 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 08:27:51.789726 32360 net.cpp:165] Memory required for data: 1450529500\nI0821 08:27:51.789731 32360 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:27:51.789822 32360 net.cpp:100] Creating Layer post_FC\nI0821 08:27:51.789836 32360 net.cpp:434] post_FC <- post_pool\nI0821 08:27:51.789845 32360 net.cpp:408] post_FC -> post_FC_top\nI0821 08:27:51.790112 32360 net.cpp:150] Setting up post_FC\nI0821 08:27:51.790127 32360 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:51.790132 32360 net.cpp:165] Memory required for data: 1450534500\nI0821 08:27:51.790143 32360 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:27:51.790150 32360 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:27:51.790158 32360 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:27:51.790165 32360 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:27:51.790179 32360 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:27:51.790225 32360 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:27:51.790237 32360 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:51.790243 32360 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:51.790248 32360 net.cpp:165] Memory required for data: 1450544500\nI0821 08:27:51.790253 32360 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:27:51.790311 32360 net.cpp:100] Creating Layer accuracy\nI0821 08:27:51.790324 32360 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:27:51.790333 32360 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:27:51.790340 32360 net.cpp:408] accuracy -> accuracy\nI0821 08:27:51.790388 32360 net.cpp:150] Setting up accuracy\nI0821 08:27:51.790401 32360 net.cpp:157] Top shape: (1)\nI0821 08:27:51.790406 32360 net.cpp:165] Memory required for data: 1450544504\nI0821 08:27:51.790419 32360 layer_factory.hpp:77] Creating layer loss\nI0821 08:27:51.790429 32360 net.cpp:100] Creating Layer loss\nI0821 08:27:51.790436 32360 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:27:51.790442 32360 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:27:51.790451 32360 net.cpp:408] loss -> loss\nI0821 08:27:51.791692 32360 layer_factory.hpp:77] Creating layer loss\nI0821 08:27:51.793085 32360 net.cpp:150] Setting up loss\nI0821 08:27:51.793102 32360 net.cpp:157] Top shape: (1)\nI0821 08:27:51.793107 32360 net.cpp:160]     with loss weight 1\nI0821 08:27:51.793192 32360 net.cpp:165] Memory required for data: 1450544508\nI0821 08:27:51.793201 32360 net.cpp:226] loss needs backward computation.\nI0821 08:27:51.793208 32360 net.cpp:228] accuracy does not need backward computation.\nI0821 08:27:51.793215 32360 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:27:51.793220 32360 net.cpp:226] post_FC needs backward computation.\nI0821 08:27:51.793226 32360 net.cpp:226] post_pool needs backward computation.\nI0821 08:27:51.793231 32360 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:27:51.793236 32360 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:27:51.793241 32360 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:27:51.793246 32360 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:27:51.793251 32360 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:27:51.793256 32360 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:27:51.793268 32360 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:27:51.793274 32360 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:27:51.793279 32360 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:27:51.793284 32360 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:27:51.793290 32360 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:27:51.793295 32360 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:27:51.793300 32360 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:27:51.793305 32360 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:27:51.793310 32360 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:27:51.793315 32360 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:27:51.793320 32360 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:27:51.793325 32360 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:27:51.793330 32360 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:27:51.793335 32360 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:27:51.793340 32360 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:27:51.793345 32360 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:27:51.793351 32360 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:27:51.793356 32360 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:27:51.793361 32360 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:27:51.793366 32360 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:27:51.793371 32360 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:27:51.793375 32360 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:27:51.793380 32360 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:27:51.793386 32360 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:27:51.793391 32360 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:27:51.793396 32360 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:27:51.793401 32360 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:27:51.793406 32360 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:27:51.793411 32360 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:27:51.793424 32360 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:27:51.793431 32360 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:27:51.793436 32360 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:27:51.793440 32360 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:27:51.793445 32360 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:27:51.793450 32360 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:27:51.793455 32360 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:27:51.793462 32360 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:27:51.793467 32360 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:27:51.793473 32360 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:27:51.793478 32360 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:27:51.793483 32360 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:27:51.793488 32360 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:27:51.793493 32360 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:27:51.793498 32360 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:27:51.793503 32360 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:27:51.793509 32360 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:27:51.793517 32360 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:27:51.793522 32360 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:27:51.793529 32360 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:27:51.793534 32360 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:27:51.793539 32360 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:27:51.793543 32360 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:27:51.793548 32360 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:27:51.793555 32360 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:27:51.793560 32360 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:27:51.793565 32360 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:27:51.793570 32360 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:27:51.793575 32360 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:27:51.793579 32360 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:27:51.793584 32360 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:27:51.793589 32360 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:27:51.793594 32360 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:27:51.793599 32360 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:27:51.793606 32360 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:27:51.793611 32360 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:27:51.793615 32360 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:27:51.793622 32360 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:27:51.793627 32360 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:27:51.793632 32360 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:27:51.793637 32360 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:27:51.793642 32360 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:27:51.793647 32360 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:27:51.793651 32360 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:27:51.793658 32360 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:27:51.793663 32360 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:27:51.793668 32360 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:27:51.793678 32360 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:27:51.793684 32360 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:27:51.793690 32360 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:27:51.793695 32360 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:27:51.793700 32360 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:27:51.793706 32360 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:27:51.793711 32360 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:27:51.793716 32360 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:27:51.793721 32360 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:27:51.793726 32360 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:27:51.793732 32360 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:27:51.793737 32360 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:27:51.793742 32360 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:27:51.793748 32360 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:27:51.793753 32360 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:27:51.793758 32360 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:27:51.793764 32360 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:27:51.793769 32360 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:27:51.793774 32360 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:27:51.793779 32360 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:27:51.793786 32360 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:27:51.793794 32360 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:27:51.793799 32360 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:27:51.793805 32360 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:27:51.793812 32360 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:27:51.793817 32360 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:27:51.793823 32360 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:27:51.793828 32360 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:27:51.793833 32360 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:27:51.793838 32360 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:27:51.793843 32360 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:27:51.793849 32360 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:27:51.793854 32360 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:27:51.793860 32360 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:27:51.793865 32360 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:27:51.793871 32360 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:27:51.793876 32360 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:27:51.793881 32360 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:27:51.793886 32360 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:27:51.793891 32360 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:27:51.793897 32360 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:27:51.793903 32360 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:27:51.793908 32360 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:27:51.793915 32360 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:27:51.793920 32360 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:27:51.793925 32360 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:27:51.793929 32360 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:27:51.793934 32360 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:27:51.793946 32360 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:27:51.793951 32360 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:27:51.793956 32360 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:27:51.793962 32360 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:27:51.793967 32360 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:27:51.793972 32360 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:27:51.793978 32360 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:27:51.793983 32360 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:27:51.793988 32360 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:27:51.793993 32360 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:27:51.793998 32360 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:27:51.794004 32360 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:27:51.794009 32360 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:27:51.794015 32360 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:27:51.794020 32360 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:27:51.794026 32360 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:27:51.794031 32360 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:27:51.794037 32360 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:27:51.794042 32360 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:27:51.794047 32360 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:27:51.794054 32360 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:27:51.794059 32360 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:27:51.794064 32360 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:27:51.794070 32360 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:27:51.794075 32360 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:27:51.794081 32360 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:27:51.794086 32360 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:27:51.794092 32360 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:27:51.794098 32360 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:27:51.794103 32360 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:27:51.794108 32360 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:27:51.794113 32360 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:27:51.794119 32360 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:27:51.794126 32360 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:27:51.794131 32360 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:27:51.794137 32360 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:27:51.794142 32360 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:27:51.794147 32360 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:27:51.794153 32360 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:27:51.794158 32360 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:27:51.794163 32360 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:27:51.794169 32360 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:27:51.794178 32360 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:27:51.794183 32360 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:27:51.794190 32360 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:27:51.794195 32360 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:27:51.794200 32360 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:27:51.794211 32360 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:27:51.794219 32360 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:27:51.794224 32360 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:27:51.794229 32360 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:27:51.794235 32360 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:27:51.794240 32360 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:27:51.794246 32360 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:27:51.794251 32360 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:27:51.794257 32360 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:27:51.794270 32360 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:27:51.794276 32360 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:27:51.794282 32360 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:27:51.794287 32360 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:27:51.794293 32360 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:27:51.794299 32360 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:27:51.794304 32360 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:27:51.794309 32360 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:27:51.794315 32360 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:27:51.794322 32360 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:27:51.794327 32360 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:27:51.794332 32360 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:27:51.794338 32360 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:27:51.794347 32360 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:27:51.794353 32360 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:27:51.794358 32360 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:27:51.794363 32360 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:27:51.794368 32360 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:27:51.794374 32360 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:27:51.794380 32360 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:27:51.794385 32360 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:27:51.794391 32360 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:27:51.794397 32360 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:27:51.794404 32360 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:27:51.794409 32360 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:27:51.794415 32360 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:27:51.794420 32360 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:27:51.794425 32360 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:27:51.794431 32360 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:27:51.794437 32360 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:27:51.794443 32360 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:27:51.794448 32360 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:27:51.794456 32360 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:27:51.794461 32360 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:27:51.794466 32360 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:27:51.794472 32360 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:27:51.794477 32360 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:27:51.794482 32360 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:27:51.794493 32360 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:27:51.794500 32360 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:27:51.794507 32360 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:27:51.794512 32360 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:27:51.794517 32360 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:27:51.794523 32360 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:27:51.794528 32360 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:27:51.794534 32360 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:27:51.794539 32360 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:27:51.794545 32360 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:27:51.794551 32360 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:27:51.794556 32360 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:27:51.794562 32360 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:27:51.794567 32360 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:27:51.794574 32360 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:27:51.794579 32360 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:27:51.794585 32360 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:27:51.794590 32360 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:27:51.794596 32360 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:27:51.794601 32360 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:27:51.794607 32360 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:27:51.794613 32360 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:27:51.794618 32360 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:27:51.794625 32360 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:27:51.794631 32360 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:27:51.794636 32360 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:27:51.794642 32360 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:27:51.794647 32360 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:27:51.794653 32360 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:27:51.794658 32360 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:27:51.794664 32360 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:27:51.794670 32360 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:27:51.794675 32360 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:27:51.794682 32360 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:27:51.794687 32360 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:27:51.794693 32360 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:27:51.794698 32360 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:27:51.794704 32360 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:27:51.794709 32360 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:27:51.794715 32360 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:27:51.794720 32360 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:27:51.794726 32360 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:27:51.794733 32360 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:27:51.794737 32360 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:27:51.794744 32360 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:27:51.794749 32360 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:27:51.794755 32360 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:27:51.794766 32360 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:27:51.794772 32360 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:27:51.794777 32360 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:27:51.794783 32360 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:27:51.794790 32360 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:27:51.794795 32360 net.cpp:226] pre_relu needs backward computation.\nI0821 08:27:51.794800 32360 net.cpp:226] pre_scale needs backward computation.\nI0821 08:27:51.794806 32360 net.cpp:226] pre_bn needs backward computation.\nI0821 08:27:51.794811 32360 net.cpp:226] pre_conv needs backward computation.\nI0821 08:27:51.794817 32360 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:27:51.794824 32360 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:27:51.794828 32360 net.cpp:270] This network produces output accuracy\nI0821 08:27:51.794836 32360 net.cpp:270] This network produces output loss\nI0821 08:27:51.795208 32360 net.cpp:283] Network initialization done.\nI0821 08:27:51.804689 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:51.804731 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:51.804800 32360 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 08:27:51.805181 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 08:27:51.805198 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 08:27:51.805208 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:27:51.805218 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:27:51.805228 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:27:51.805236 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:27:51.805245 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:27:51.805254 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:27:51.805272 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:27:51.805282 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:27:51.805291 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:27:51.805299 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:27:51.805308 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:27:51.805318 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:27:51.805327 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:27:51.805335 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:27:51.805344 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:27:51.805353 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:27:51.805362 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:27:51.805382 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:27:51.805392 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:27:51.805399 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:27:51.805411 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:27:51.805420 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:27:51.805429 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:27:51.805438 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:27:51.805446 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:27:51.805454 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:27:51.805464 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:27:51.805471 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:27:51.805480 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:27:51.805490 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:27:51.805498 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:27:51.805506 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:27:51.805516 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:27:51.805523 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:27:51.805532 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:27:51.805541 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:27:51.805549 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:27:51.805557 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:27:51.805569 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:27:51.805577 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:27:51.805585 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:27:51.805593 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:27:51.805603 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:27:51.805610 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:27:51.805620 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:27:51.805627 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:27:51.805636 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:27:51.805652 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:27:51.805661 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:27:51.805670 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:27:51.805680 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:27:51.805687 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:27:51.805697 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:27:51.805704 32360 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:27:51.807355 32360 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0821 08:27:51.809036 32360 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:27:51.809288 32360 net.cpp:100] Creating Layer dataLayer\nI0821 08:27:51.809311 32360 net.cpp:408] dataLayer -> data_top\nI0821 08:27:51.809329 32360 net.cpp:408] dataLayer -> label\nI0821 08:27:51.809341 32360 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:27:51.823268 32367 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 08:27:51.823521 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:51.830870 32360 net.cpp:150] Setting up dataLayer\nI0821 08:27:51.830899 32360 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 08:27:51.830926 32360 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:51.830934 32360 net.cpp:165] Memory required for data: 1536500\nI0821 08:27:51.830940 32360 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:27:51.830950 32360 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:27:51.830956 32360 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:27:51.830965 32360 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:27:51.830976 32360 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:27:51.831048 32360 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:27:51.831063 32360 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:51.831069 32360 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:51.831073 32360 net.cpp:165] Memory required for data: 1537500\nI0821 08:27:51.831079 32360 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:27:51.831095 32360 net.cpp:100] Creating Layer pre_conv\nI0821 08:27:51.831101 32360 net.cpp:434] pre_conv <- data_top\nI0821 08:27:51.831112 32360 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:27:51.831622 32360 net.cpp:150] Setting up pre_conv\nI0821 08:27:51.831650 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.831655 32360 net.cpp:165] Memory required for data: 9729500\nI0821 08:27:51.831670 32360 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:27:51.831683 32360 net.cpp:100] Creating Layer pre_bn\nI0821 08:27:51.831689 32360 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:27:51.831698 32360 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:27:51.832067 32360 net.cpp:150] Setting up pre_bn\nI0821 08:27:51.832082 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.832087 32360 net.cpp:165] Memory required for data: 17921500\nI0821 08:27:51.832103 32360 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:27:51.832113 32360 net.cpp:100] Creating Layer pre_scale\nI0821 08:27:51.832119 32360 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:27:51.832130 32360 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:27:51.832195 32360 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:27:51.832427 32360 net.cpp:150] Setting up pre_scale\nI0821 08:27:51.832444 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.832450 32360 net.cpp:165] Memory required for data: 26113500\nI0821 08:27:51.832459 32360 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:27:51.832468 32360 net.cpp:100] Creating Layer pre_relu\nI0821 08:27:51.832474 32360 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:27:51.832484 32360 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:27:51.832497 32360 net.cpp:150] Setting up pre_relu\nI0821 08:27:51.832505 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.832509 32360 net.cpp:165] Memory required for data: 34305500\nI0821 08:27:51.832515 32360 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:27:51.832522 32360 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:27:51.832527 32360 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:27:51.832537 32360 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:27:51.832547 32360 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:27:51.832610 32360 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:27:51.832623 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.832629 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.832634 32360 net.cpp:165] Memory required for data: 50689500\nI0821 08:27:51.832641 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:27:51.832653 32360 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:27:51.832659 32360 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:27:51.832671 32360 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:27:51.833065 32360 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:27:51.833078 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.833083 32360 net.cpp:165] Memory required for data: 58881500\nI0821 08:27:51.833098 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:27:51.833113 32360 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:27:51.833119 32360 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:27:51.833132 32360 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:27:51.833441 32360 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:27:51.833479 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.833487 32360 net.cpp:165] Memory required for data: 67073500\nI0821 08:27:51.833499 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:27:51.833508 32360 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:27:51.833514 32360 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:27:51.833523 32360 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.833636 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:27:51.833987 32360 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:27:51.834002 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.834017 32360 net.cpp:165] Memory required for data: 75265500\nI0821 08:27:51.834028 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:27:51.834038 32360 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:27:51.834048 32360 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:27:51.834056 32360 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.834070 32360 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:27:51.834079 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.834084 32360 net.cpp:165] Memory required for data: 83457500\nI0821 08:27:51.834089 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:27:51.834100 32360 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:27:51.834105 32360 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:27:51.834120 32360 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:27:51.834486 32360 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:27:51.834501 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.834507 32360 net.cpp:165] Memory required for data: 91649500\nI0821 08:27:51.834517 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:27:51.834525 32360 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:27:51.834532 32360 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:27:51.834543 32360 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:27:51.834908 32360 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:27:51.834923 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.834928 32360 net.cpp:165] Memory required for data: 99841500\nI0821 08:27:51.834946 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:27:51.834956 32360 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:27:51.834962 32360 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:27:51.834970 32360 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:27:51.835032 32360 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:27:51.835192 32360 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:27:51.835206 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.835211 32360 net.cpp:165] Memory required for data: 108033500\nI0821 08:27:51.835220 32360 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:27:51.835232 32360 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:27:51.835238 32360 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:27:51.835245 32360 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:27:51.835256 32360 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:27:51.835296 32360 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:27:51.835306 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.835311 32360 net.cpp:165] Memory required for data: 116225500\nI0821 08:27:51.835316 32360 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:27:51.835327 32360 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:27:51.835333 32360 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:27:51.835341 32360 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:27:51.835350 32360 net.cpp:150] Setting up L1_b1_relu\nI0821 08:27:51.835357 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.835379 32360 net.cpp:165] Memory required for data: 124417500\nI0821 08:27:51.835387 32360 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:51.835397 32360 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:51.835402 32360 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:27:51.835410 32360 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:27:51.835420 32360 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:27:51.835470 32360 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:51.835489 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.835496 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.835501 32360 net.cpp:165] Memory required for data: 140801500\nI0821 08:27:51.835506 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:27:51.835520 32360 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:27:51.835526 32360 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:27:51.835536 32360 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:27:51.835886 32360 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:27:51.835899 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.835904 32360 net.cpp:165] Memory required for data: 148993500\nI0821 08:27:51.835913 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:27:51.835925 32360 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:27:51.835932 32360 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:27:51.835940 32360 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:27:51.836215 32360 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:27:51.836228 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.836233 32360 net.cpp:165] Memory required for data: 157185500\nI0821 08:27:51.836243 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:27:51.836252 32360 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:27:51.836258 32360 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:27:51.836272 32360 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.836333 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:27:51.836519 32360 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:27:51.836532 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.836537 32360 net.cpp:165] Memory required for data: 165377500\nI0821 08:27:51.836546 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:27:51.836555 32360 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:27:51.836563 32360 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:27:51.836571 32360 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.836581 32360 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:27:51.836588 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.836592 32360 net.cpp:165] Memory required for data: 173569500\nI0821 08:27:51.836597 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:27:51.836611 32360 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:27:51.836616 32360 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:27:51.836627 32360 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:27:51.837211 32360 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:27:51.837227 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.837234 32360 net.cpp:165] Memory required for data: 181761500\nI0821 08:27:51.837245 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:27:51.837255 32360 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:27:51.837267 32360 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:27:51.837282 32360 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:27:51.837590 32360 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:27:51.837610 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.837616 32360 net.cpp:165] Memory required for data: 189953500\nI0821 08:27:51.837632 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:27:51.837641 32360 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:27:51.837651 32360 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:27:51.837662 32360 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:27:51.837730 32360 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:27:51.837909 32360 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:27:51.837929 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.837934 32360 net.cpp:165] Memory required for data: 198145500\nI0821 08:27:51.837951 32360 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:27:51.837960 32360 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:27:51.837967 32360 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:27:51.837975 32360 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:27:51.837981 32360 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:27:51.838023 32360 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:27:51.838035 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.838042 32360 net.cpp:165] Memory required for data: 206337500\nI0821 08:27:51.838047 32360 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:27:51.838054 32360 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:27:51.838060 32360 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:27:51.838068 32360 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:27:51.838080 32360 net.cpp:150] Setting up L1_b2_relu\nI0821 08:27:51.838088 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.838093 32360 net.cpp:165] Memory required for data: 214529500\nI0821 08:27:51.838098 32360 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:51.838107 32360 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:51.838114 32360 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:27:51.838121 32360 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:27:51.838131 32360 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:27:51.838183 32360 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:51.838198 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.838207 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.838210 32360 net.cpp:165] Memory required for data: 230913500\nI0821 08:27:51.838215 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:27:51.838229 32360 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:27:51.838237 32360 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:27:51.838246 32360 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:27:51.838645 32360 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:27:51.838662 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.838668 32360 net.cpp:165] Memory required for data: 239105500\nI0821 08:27:51.838678 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:27:51.838692 32360 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:27:51.838698 32360 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:27:51.838706 32360 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:27:51.839006 32360 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:27:51.839020 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.839026 32360 net.cpp:165] Memory required for data: 247297500\nI0821 08:27:51.839040 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:27:51.839049 32360 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:27:51.839056 32360 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:27:51.839066 32360 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.839129 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:27:51.839339 32360 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:27:51.839355 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.839361 32360 net.cpp:165] Memory required for data: 255489500\nI0821 08:27:51.839373 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:27:51.839382 32360 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:27:51.839388 32360 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:27:51.839395 32360 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.839418 32360 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:27:51.839426 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.839432 32360 net.cpp:165] Memory required for data: 263681500\nI0821 08:27:51.839437 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:27:51.839452 32360 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:27:51.839458 32360 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:27:51.839469 32360 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:27:51.839936 32360 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:27:51.839951 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.839956 32360 net.cpp:165] Memory required for data: 271873500\nI0821 08:27:51.839964 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:27:51.839985 32360 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:27:51.839993 32360 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:27:51.840001 32360 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:27:51.840396 32360 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:27:51.840411 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.840420 32360 net.cpp:165] Memory required for data: 280065500\nI0821 08:27:51.840431 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:27:51.840440 32360 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:27:51.840445 32360 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:27:51.840458 32360 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:27:51.840524 32360 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:27:51.840700 32360 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:27:51.840718 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.840723 32360 net.cpp:165] Memory required for data: 288257500\nI0821 08:27:51.840734 32360 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:27:51.840742 32360 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:27:51.840749 32360 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:27:51.840756 32360 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:27:51.840764 32360 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:27:51.840804 32360 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:27:51.840817 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.840822 32360 net.cpp:165] Memory required for data: 296449500\nI0821 08:27:51.840827 32360 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:27:51.840838 32360 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:27:51.840844 32360 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:27:51.840854 32360 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:27:51.840864 32360 net.cpp:150] Setting up L1_b3_relu\nI0821 08:27:51.840872 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.840876 32360 net.cpp:165] Memory required for data: 304641500\nI0821 08:27:51.840880 32360 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:51.840890 32360 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:51.840896 32360 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:27:51.840908 32360 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:27:51.840917 32360 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:27:51.840970 32360 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:51.840983 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.840991 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.840998 32360 net.cpp:165] Memory required for data: 321025500\nI0821 08:27:51.841004 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:27:51.841015 32360 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:27:51.841028 32360 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:27:51.841042 32360 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:27:51.841446 32360 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:27:51.841464 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.841469 32360 net.cpp:165] Memory required for data: 329217500\nI0821 08:27:51.841477 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:27:51.841490 32360 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:27:51.841500 32360 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:27:51.841508 32360 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:27:51.841830 32360 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:27:51.841843 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.841848 32360 net.cpp:165] Memory required for data: 337409500\nI0821 08:27:51.841859 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:27:51.841871 32360 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:27:51.841877 32360 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:27:51.841888 32360 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.841955 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:27:51.842129 32360 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:27:51.842145 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.842150 32360 net.cpp:165] Memory required for data: 345601500\nI0821 08:27:51.842159 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:27:51.842173 32360 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:27:51.842181 32360 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:27:51.842188 32360 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.842198 32360 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:27:51.842206 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.842212 32360 net.cpp:165] Memory required for data: 353793500\nI0821 08:27:51.842218 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:27:51.842231 32360 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:27:51.842237 32360 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:27:51.842250 32360 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:27:51.842658 32360 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:27:51.842672 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.842677 32360 net.cpp:165] Memory required for data: 361985500\nI0821 08:27:51.842687 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:27:51.842700 32360 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:27:51.842706 32360 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:27:51.842716 32360 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:27:51.843057 32360 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:27:51.843072 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.843077 32360 net.cpp:165] Memory required for data: 370177500\nI0821 08:27:51.843087 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:27:51.843096 32360 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:27:51.843102 32360 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:27:51.843116 32360 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:27:51.843184 32360 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:27:51.843379 32360 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:27:51.843395 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.843401 32360 net.cpp:165] Memory required for data: 378369500\nI0821 08:27:51.843410 32360 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:27:51.843420 32360 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:27:51.843425 32360 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:27:51.843432 32360 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:27:51.843451 32360 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:27:51.843494 32360 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:27:51.843505 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.843509 32360 net.cpp:165] Memory required for data: 386561500\nI0821 08:27:51.843515 32360 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:27:51.843531 32360 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:27:51.843539 32360 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:27:51.843547 32360 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:27:51.843557 32360 net.cpp:150] Setting up L1_b4_relu\nI0821 08:27:51.843564 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.843569 32360 net.cpp:165] Memory required for data: 394753500\nI0821 08:27:51.843575 32360 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:51.843585 32360 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:51.843590 32360 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:27:51.843597 32360 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:27:51.843611 32360 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:27:51.843663 32360 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:51.843678 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.843689 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.843694 32360 net.cpp:165] Memory required for data: 411137500\nI0821 08:27:51.843699 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:27:51.843710 32360 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:27:51.843716 32360 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:27:51.843725 32360 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:27:51.844146 32360 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:27:51.844161 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.844166 32360 net.cpp:165] Memory required for data: 419329500\nI0821 08:27:51.844200 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:27:51.844214 32360 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:27:51.844220 32360 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:27:51.844233 32360 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:27:51.844558 32360 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:27:51.844573 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.844580 32360 net.cpp:165] Memory required for data: 427521500\nI0821 08:27:51.844591 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:27:51.844600 32360 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:27:51.844606 32360 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:27:51.844617 32360 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.844686 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:27:51.844862 32360 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:27:51.844876 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.844880 32360 net.cpp:165] Memory required for data: 435713500\nI0821 08:27:51.844892 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:27:51.844904 32360 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:27:51.844910 32360 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:27:51.844918 32360 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.844930 32360 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:27:51.844938 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.844944 32360 net.cpp:165] Memory required for data: 443905500\nI0821 08:27:51.844947 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:27:51.844974 32360 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:27:51.844982 32360 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:27:51.844993 32360 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:27:51.845403 32360 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:27:51.845418 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.845423 32360 net.cpp:165] Memory required for data: 452097500\nI0821 08:27:51.845432 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:27:51.845448 32360 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:27:51.845455 32360 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:27:51.845464 32360 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:27:51.845758 32360 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:27:51.845772 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.845777 32360 net.cpp:165] Memory required for data: 460289500\nI0821 08:27:51.845788 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:27:51.845798 32360 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:27:51.845803 32360 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:27:51.845810 32360 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:27:51.845871 32360 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:27:51.846405 32360 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:27:51.846421 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.846426 32360 net.cpp:165] Memory required for data: 468481500\nI0821 08:27:51.846436 32360 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:27:51.846443 32360 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:27:51.846451 32360 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:27:51.846457 32360 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:27:51.846467 32360 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:27:51.846503 32360 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:27:51.846513 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.846518 32360 net.cpp:165] Memory required for data: 476673500\nI0821 08:27:51.846523 32360 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:27:51.846534 32360 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:27:51.846540 32360 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:27:51.846547 32360 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:27:51.846556 32360 net.cpp:150] Setting up L1_b5_relu\nI0821 08:27:51.846563 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.846568 32360 net.cpp:165] Memory required for data: 484865500\nI0821 08:27:51.846572 32360 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:51.846580 32360 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:51.846585 32360 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:27:51.846592 32360 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:27:51.846601 32360 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:27:51.846652 32360 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:51.846663 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.846670 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.846675 32360 net.cpp:165] Memory required for data: 501249500\nI0821 08:27:51.846679 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:27:51.846693 32360 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:27:51.846699 32360 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:27:51.846709 32360 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:27:51.847056 32360 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:27:51.847069 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.847081 32360 net.cpp:165] Memory required for data: 509441500\nI0821 08:27:51.847090 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:27:51.847102 32360 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:27:51.847110 32360 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:27:51.847122 32360 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:27:51.847404 32360 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:27:51.847417 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.847421 32360 net.cpp:165] Memory required for data: 517633500\nI0821 08:27:51.847432 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:27:51.847441 32360 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:27:51.847447 32360 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:27:51.847455 32360 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.847515 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:27:51.847679 32360 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:27:51.847693 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.847698 32360 net.cpp:165] Memory required for data: 525825500\nI0821 08:27:51.847707 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:27:51.847715 32360 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:27:51.847721 32360 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:27:51.847731 32360 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.847741 32360 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:27:51.847748 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.847753 32360 net.cpp:165] Memory required for data: 534017500\nI0821 08:27:51.847757 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:27:51.847770 32360 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:27:51.847776 32360 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:27:51.847786 32360 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:27:51.848142 32360 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:27:51.848156 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.848161 32360 net.cpp:165] Memory required for data: 542209500\nI0821 08:27:51.848170 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:27:51.848181 32360 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:27:51.848188 32360 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:27:51.848196 32360 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:27:51.848481 32360 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:27:51.848495 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.848500 32360 net.cpp:165] Memory required for data: 550401500\nI0821 08:27:51.848511 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:27:51.848520 32360 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:27:51.848526 32360 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:27:51.848533 32360 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:27:51.848595 32360 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:27:51.848757 32360 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:27:51.848770 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.848775 32360 net.cpp:165] Memory required for data: 558593500\nI0821 08:27:51.848784 32360 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:27:51.848805 32360 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:27:51.848811 32360 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:27:51.848819 32360 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:27:51.848826 32360 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:27:51.848863 32360 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:27:51.848875 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.848881 32360 net.cpp:165] Memory required for data: 566785500\nI0821 08:27:51.848893 32360 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:27:51.848901 32360 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:27:51.848906 32360 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:27:51.848913 32360 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:27:51.848922 32360 net.cpp:150] Setting up L1_b6_relu\nI0821 08:27:51.848930 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.848935 32360 net.cpp:165] Memory required for data: 574977500\nI0821 08:27:51.848939 32360 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:51.848947 32360 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:51.848951 32360 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:27:51.848961 32360 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:27:51.848971 32360 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:27:51.849020 32360 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:51.849031 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.849037 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.849042 32360 net.cpp:165] Memory required for data: 591361500\nI0821 08:27:51.849047 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:27:51.849061 32360 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:27:51.849067 32360 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:27:51.849076 32360 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:27:51.849440 32360 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:27:51.849454 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.849459 32360 net.cpp:165] Memory required for data: 599553500\nI0821 08:27:51.849468 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:27:51.849479 32360 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:27:51.849486 32360 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:27:51.849498 32360 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:27:51.849769 32360 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:27:51.849783 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.849788 32360 net.cpp:165] Memory required for data: 607745500\nI0821 08:27:51.849798 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:27:51.849807 32360 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:27:51.849813 32360 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:27:51.849823 32360 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.849881 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:27:51.850039 32360 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:27:51.850055 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.850060 32360 net.cpp:165] Memory required for data: 615937500\nI0821 08:27:51.850069 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:27:51.850077 32360 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:27:51.850083 32360 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:27:51.850090 32360 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.850100 32360 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:27:51.850107 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.850112 32360 net.cpp:165] Memory required for data: 624129500\nI0821 08:27:51.850116 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:27:51.850129 32360 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:27:51.850136 32360 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:27:51.850147 32360 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:27:51.850545 32360 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:27:51.850560 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.850571 32360 net.cpp:165] Memory required for data: 632321500\nI0821 08:27:51.850581 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:27:51.850594 32360 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:27:51.850600 32360 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:27:51.850611 32360 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:27:51.850883 32360 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:27:51.850895 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.850900 32360 net.cpp:165] Memory required for data: 640513500\nI0821 08:27:51.850911 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:27:51.850920 32360 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:27:51.850926 32360 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:27:51.850934 32360 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:27:51.850994 32360 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:27:51.851153 32360 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:27:51.851166 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.851171 32360 net.cpp:165] Memory required for data: 648705500\nI0821 08:27:51.851181 32360 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:27:51.851192 32360 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:27:51.851199 32360 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:27:51.851207 32360 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:27:51.851214 32360 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:27:51.851251 32360 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:27:51.851269 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.851275 32360 net.cpp:165] Memory required for data: 656897500\nI0821 08:27:51.851280 32360 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:27:51.851287 32360 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:27:51.851294 32360 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:27:51.851303 32360 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:27:51.851313 32360 net.cpp:150] Setting up L1_b7_relu\nI0821 08:27:51.851321 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.851325 32360 net.cpp:165] Memory required for data: 665089500\nI0821 08:27:51.851330 32360 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:51.851337 32360 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:51.851342 32360 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:27:51.851352 32360 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:27:51.851362 32360 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:27:51.851410 32360 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:51.851423 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.851429 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.851434 32360 net.cpp:165] Memory required for data: 681473500\nI0821 08:27:51.851439 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:27:51.851452 32360 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:27:51.851459 32360 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:27:51.851469 32360 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:27:51.851824 32360 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:27:51.851837 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.851842 32360 net.cpp:165] Memory required for data: 689665500\nI0821 08:27:51.851851 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:27:51.851863 32360 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:27:51.851869 32360 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:27:51.851887 32360 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:27:51.852165 32360 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:27:51.852180 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.852185 32360 net.cpp:165] Memory required for data: 697857500\nI0821 08:27:51.852195 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:27:51.852203 32360 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:27:51.852210 32360 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:27:51.852216 32360 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.852282 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:27:51.852460 32360 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:27:51.852474 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.852480 32360 net.cpp:165] Memory required for data: 706049500\nI0821 08:27:51.852489 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:27:51.852500 32360 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:27:51.852506 32360 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:27:51.852514 32360 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.852524 32360 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:27:51.852531 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.852536 32360 net.cpp:165] Memory required for data: 714241500\nI0821 08:27:51.852541 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:27:51.852555 32360 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:27:51.852560 32360 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:27:51.852571 32360 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:27:51.852931 32360 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:27:51.852944 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.852949 32360 net.cpp:165] Memory required for data: 722433500\nI0821 08:27:51.852958 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:27:51.852970 32360 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:27:51.852977 32360 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:27:51.852985 32360 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:27:51.853265 32360 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:27:51.853278 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.853283 32360 net.cpp:165] Memory required for data: 730625500\nI0821 08:27:51.853294 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:27:51.853303 32360 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:27:51.853309 32360 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:27:51.853317 32360 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:27:51.853379 32360 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:27:51.853539 32360 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:27:51.853552 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.853557 32360 net.cpp:165] Memory required for data: 738817500\nI0821 08:27:51.853566 32360 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:27:51.853575 32360 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:27:51.853581 32360 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:27:51.853588 32360 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:27:51.853600 32360 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:27:51.853632 32360 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:27:51.853643 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.853648 32360 net.cpp:165] Memory required for data: 747009500\nI0821 08:27:51.853653 32360 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:27:51.853664 32360 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:27:51.853670 32360 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:27:51.853677 32360 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:27:51.853693 32360 net.cpp:150] Setting up L1_b8_relu\nI0821 08:27:51.853701 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.853706 32360 net.cpp:165] Memory required for data: 755201500\nI0821 08:27:51.853710 32360 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:51.853718 32360 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:51.853723 32360 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:27:51.853730 32360 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:27:51.853739 32360 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:27:51.853788 32360 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:51.853801 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.853807 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.853811 32360 net.cpp:165] Memory required for data: 771585500\nI0821 08:27:51.853816 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:27:51.853829 32360 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:27:51.853837 32360 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:27:51.853845 32360 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:27:51.854207 32360 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:27:51.854225 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.854230 32360 net.cpp:165] Memory required for data: 779777500\nI0821 08:27:51.854239 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:27:51.854249 32360 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:27:51.854254 32360 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:27:51.854267 32360 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:27:51.854579 32360 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:27:51.854593 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.854598 32360 net.cpp:165] Memory required for data: 787969500\nI0821 08:27:51.854609 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:27:51.854621 32360 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:27:51.854627 32360 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:27:51.854635 32360 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.854697 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:27:51.854858 32360 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:27:51.854871 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.854876 32360 net.cpp:165] Memory required for data: 796161500\nI0821 08:27:51.854885 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:27:51.854893 32360 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:27:51.854899 32360 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:27:51.854912 32360 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.854921 32360 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:27:51.854928 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.854933 32360 net.cpp:165] Memory required for data: 804353500\nI0821 08:27:51.854938 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:27:51.854953 32360 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:27:51.854959 32360 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:27:51.854967 32360 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:27:51.855329 32360 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:27:51.855343 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.855350 32360 net.cpp:165] Memory required for data: 812545500\nI0821 08:27:51.855357 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:27:51.855370 32360 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:27:51.855376 32360 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:27:51.855391 32360 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:27:51.855669 32360 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:27:51.855684 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.855689 32360 net.cpp:165] Memory required for data: 820737500\nI0821 08:27:51.855720 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:27:51.855733 32360 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:27:51.855739 32360 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:27:51.855747 32360 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:27:51.855808 32360 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:27:51.855967 32360 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:27:51.855979 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.855984 32360 net.cpp:165] Memory required for data: 828929500\nI0821 08:27:51.855993 32360 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:27:51.856001 32360 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:27:51.856009 32360 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:27:51.856015 32360 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:27:51.856022 32360 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:27:51.856060 32360 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:27:51.856071 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.856076 32360 net.cpp:165] Memory required for data: 837121500\nI0821 08:27:51.856081 32360 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:27:51.856088 32360 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:27:51.856094 32360 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:27:51.856103 32360 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:27:51.856113 32360 net.cpp:150] Setting up L1_b9_relu\nI0821 08:27:51.856120 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.856125 32360 net.cpp:165] Memory required for data: 845313500\nI0821 08:27:51.856129 32360 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:51.856137 32360 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:51.856142 32360 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:27:51.856151 32360 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:27:51.856161 32360 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:27:51.856209 32360 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:51.856220 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.856227 32360 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:51.856231 32360 net.cpp:165] Memory required for data: 861697500\nI0821 08:27:51.856236 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:27:51.856251 32360 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:27:51.856257 32360 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:27:51.856274 32360 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:27:51.856642 32360 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:27:51.856657 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.856662 32360 net.cpp:165] Memory required for data: 863745500\nI0821 08:27:51.856670 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:27:51.856681 32360 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:27:51.856688 32360 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:27:51.856698 32360 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:27:51.856966 32360 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:27:51.856978 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.856983 32360 net.cpp:165] Memory required for data: 865793500\nI0821 08:27:51.857000 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:27:51.857010 32360 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:27:51.857017 32360 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:27:51.857026 32360 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.857086 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:27:51.857246 32360 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:27:51.857265 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.857271 32360 net.cpp:165] Memory required for data: 867841500\nI0821 08:27:51.857280 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:27:51.857288 32360 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:27:51.857295 32360 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:27:51.857301 32360 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.857311 32360 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:27:51.857319 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.857324 32360 net.cpp:165] Memory required for data: 869889500\nI0821 08:27:51.857328 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:27:51.857349 32360 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:27:51.857355 32360 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:27:51.857367 32360 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:27:51.857730 32360 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:27:51.857744 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.857749 32360 net.cpp:165] Memory required for data: 871937500\nI0821 08:27:51.857758 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:27:51.857769 32360 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:27:51.857776 32360 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:27:51.857787 32360 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:27:51.858050 32360 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:27:51.858063 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.858068 32360 net.cpp:165] Memory required for data: 873985500\nI0821 08:27:51.858078 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:27:51.858088 32360 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:27:51.858093 32360 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:27:51.858100 32360 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:27:51.858161 32360 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:27:51.858341 32360 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:27:51.858359 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.858366 32360 net.cpp:165] Memory required for data: 876033500\nI0821 08:27:51.858374 32360 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:27:51.858384 32360 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:27:51.858392 32360 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:27:51.858399 32360 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:27:51.858433 32360 net.cpp:150] Setting up L2_b1_pool\nI0821 08:27:51.858445 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.858450 32360 net.cpp:165] Memory required for data: 878081500\nI0821 08:27:51.858455 32360 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:27:51.858464 32360 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:27:51.858469 32360 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:27:51.858476 32360 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:27:51.858487 32360 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:27:51.858521 32360 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:27:51.858530 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.858536 32360 net.cpp:165] Memory required for data: 880129500\nI0821 08:27:51.858539 32360 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:27:51.858551 32360 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:27:51.858563 32360 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:27:51.858572 32360 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:27:51.858580 32360 net.cpp:150] Setting up L2_b1_relu\nI0821 08:27:51.858588 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.858592 32360 net.cpp:165] Memory required for data: 882177500\nI0821 08:27:51.858597 32360 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:27:51.858606 32360 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:27:51.858618 32360 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:27:51.860868 32360 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:27:51.860888 32360 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:51.860893 32360 net.cpp:165] Memory required for data: 884225500\nI0821 08:27:51.860898 32360 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:27:51.860908 32360 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:27:51.860915 32360 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:27:51.860922 32360 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:27:51.860934 32360 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:27:51.860976 32360 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:27:51.860990 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.860996 32360 net.cpp:165] Memory required for data: 888321500\nI0821 08:27:51.861001 32360 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:51.861007 32360 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:51.861013 32360 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:27:51.861024 32360 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:27:51.861034 32360 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:27:51.861084 32360 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:51.861096 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.861104 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.861107 32360 net.cpp:165] Memory required for data: 896513500\nI0821 08:27:51.861112 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:27:51.861126 32360 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:27:51.861133 32360 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:27:51.861145 32360 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:27:51.861661 32360 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:27:51.861676 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.861681 32360 net.cpp:165] Memory required for data: 900609500\nI0821 08:27:51.861690 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:27:51.861702 32360 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:27:51.861709 32360 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:27:51.861718 32360 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:27:51.861987 32360 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:27:51.862001 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.862006 32360 net.cpp:165] Memory required for data: 904705500\nI0821 08:27:51.862016 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:27:51.862028 32360 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:27:51.862035 32360 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:27:51.862042 32360 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.862102 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:27:51.862268 32360 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:27:51.862282 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.862287 32360 net.cpp:165] Memory required for data: 908801500\nI0821 08:27:51.862296 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:27:51.862308 32360 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:27:51.862321 32360 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:27:51.862331 32360 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.862342 32360 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:27:51.862349 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.862354 32360 net.cpp:165] Memory required for data: 912897500\nI0821 08:27:51.862360 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:27:51.862370 32360 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:27:51.862376 32360 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:27:51.862387 32360 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:27:51.862879 32360 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:27:51.862892 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.862897 32360 net.cpp:165] Memory required for data: 916993500\nI0821 08:27:51.862906 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:27:51.862916 32360 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:27:51.862922 32360 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:27:51.862933 32360 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:27:51.863196 32360 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:27:51.863209 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.863214 32360 net.cpp:165] Memory required for data: 921089500\nI0821 08:27:51.863224 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:27:51.863235 32360 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:27:51.863242 32360 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:27:51.863250 32360 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:27:51.863315 32360 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:27:51.863473 32360 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:27:51.863487 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.863492 32360 net.cpp:165] Memory required for data: 925185500\nI0821 08:27:51.863500 32360 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:27:51.863514 32360 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:27:51.863521 32360 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:27:51.863528 32360 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:27:51.863536 32360 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:27:51.863567 32360 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:27:51.863576 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.863581 32360 net.cpp:165] Memory required for data: 929281500\nI0821 08:27:51.863586 32360 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:27:51.863593 32360 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:27:51.863600 32360 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:27:51.863610 32360 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:27:51.863620 32360 net.cpp:150] Setting up L2_b2_relu\nI0821 08:27:51.863626 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.863631 32360 net.cpp:165] Memory required for data: 933377500\nI0821 08:27:51.863636 32360 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:51.863642 32360 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:51.863647 32360 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:27:51.863656 32360 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:27:51.863664 32360 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:27:51.863714 32360 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:51.863726 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.863732 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.863737 32360 net.cpp:165] Memory required for data: 941569500\nI0821 08:27:51.863749 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:27:51.863760 32360 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:27:51.863766 32360 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:27:51.863778 32360 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:27:51.864284 32360 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:27:51.864298 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.864303 32360 net.cpp:165] Memory required for data: 945665500\nI0821 08:27:51.864311 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:27:51.864321 32360 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:27:51.864327 32360 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:27:51.864338 32360 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:27:51.864606 32360 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:27:51.864619 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.864624 32360 net.cpp:165] Memory required for data: 949761500\nI0821 08:27:51.864634 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:27:51.864646 32360 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:27:51.864652 32360 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:27:51.864660 32360 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.864717 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:27:51.864877 32360 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:27:51.864890 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.864895 32360 net.cpp:165] Memory required for data: 953857500\nI0821 08:27:51.864904 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:27:51.864915 32360 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:27:51.864922 32360 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:27:51.864929 32360 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.864939 32360 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:27:51.864946 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.864951 32360 net.cpp:165] Memory required for data: 957953500\nI0821 08:27:51.864955 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:27:51.864969 32360 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:27:51.864975 32360 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:27:51.864987 32360 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:27:51.865481 32360 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:27:51.865496 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.865501 32360 net.cpp:165] Memory required for data: 962049500\nI0821 08:27:51.865510 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:27:51.865520 32360 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:27:51.865526 32360 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:27:51.865537 32360 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:27:51.865808 32360 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:27:51.865820 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.865825 32360 net.cpp:165] Memory required for data: 966145500\nI0821 08:27:51.865835 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:27:51.865847 32360 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:27:51.865854 32360 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:27:51.865861 32360 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:27:51.865918 32360 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:27:51.866073 32360 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:27:51.866086 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.866091 32360 net.cpp:165] Memory required for data: 970241500\nI0821 08:27:51.866101 32360 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:27:51.866112 32360 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:27:51.866125 32360 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:27:51.866132 32360 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:27:51.866140 32360 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:27:51.866171 32360 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:27:51.866183 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.866189 32360 net.cpp:165] Memory required for data: 974337500\nI0821 08:27:51.866194 32360 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:27:51.866214 32360 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:27:51.866220 32360 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:27:51.866228 32360 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:27:51.866238 32360 net.cpp:150] Setting up L2_b3_relu\nI0821 08:27:51.866245 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.866250 32360 net.cpp:165] Memory required for data: 978433500\nI0821 08:27:51.866255 32360 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:51.866267 32360 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:51.866273 32360 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:27:51.866284 32360 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:27:51.866294 32360 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:27:51.866344 32360 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:51.866365 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.866372 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.866376 32360 net.cpp:165] Memory required for data: 986625500\nI0821 08:27:51.866382 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:27:51.866392 32360 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:27:51.866400 32360 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:27:51.866408 32360 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:27:51.866901 32360 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:27:51.866915 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.866920 32360 net.cpp:165] Memory required for data: 990721500\nI0821 08:27:51.866930 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:27:51.866941 32360 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:27:51.866948 32360 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:27:51.866956 32360 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:27:51.867228 32360 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:27:51.867240 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.867245 32360 net.cpp:165] Memory required for data: 994817500\nI0821 08:27:51.867256 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:27:51.867270 32360 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:27:51.867277 32360 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:27:51.867288 32360 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.867348 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:27:51.867508 32360 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:27:51.867521 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.867525 32360 net.cpp:165] Memory required for data: 998913500\nI0821 08:27:51.867534 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:27:51.867542 32360 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:27:51.867549 32360 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:27:51.867560 32360 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.867570 32360 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:27:51.867578 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.867583 32360 net.cpp:165] Memory required for data: 1003009500\nI0821 08:27:51.867595 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:27:51.867609 32360 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:27:51.867615 32360 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:27:51.867624 32360 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:27:51.868119 32360 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:27:51.868131 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.868136 32360 net.cpp:165] Memory required for data: 1007105500\nI0821 08:27:51.868146 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:27:51.868157 32360 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:27:51.868165 32360 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:27:51.868172 32360 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:27:51.868443 32360 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:27:51.868456 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.868463 32360 net.cpp:165] Memory required for data: 1011201500\nI0821 08:27:51.868472 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:27:51.868480 32360 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:27:51.868487 32360 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:27:51.868494 32360 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:27:51.868556 32360 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:27:51.868717 32360 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:27:51.868732 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.868737 32360 net.cpp:165] Memory required for data: 1015297500\nI0821 08:27:51.868746 32360 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:27:51.868755 32360 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:27:51.868762 32360 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:27:51.868768 32360 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:27:51.868779 32360 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:27:51.868808 32360 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:27:51.868816 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.868821 32360 net.cpp:165] Memory required for data: 1019393500\nI0821 08:27:51.868826 32360 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:27:51.868834 32360 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:27:51.868839 32360 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:27:51.868849 32360 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:27:51.868860 32360 net.cpp:150] Setting up L2_b4_relu\nI0821 08:27:51.868866 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.868871 32360 net.cpp:165] Memory required for data: 1023489500\nI0821 08:27:51.868875 32360 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:51.868882 32360 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:51.868887 32360 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:27:51.868897 32360 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:27:51.868907 32360 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:27:51.868954 32360 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:51.868966 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.868973 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.868978 32360 net.cpp:165] Memory required for data: 1031681500\nI0821 08:27:51.868983 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:27:51.868998 32360 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:27:51.869004 32360 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:27:51.869014 32360 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:27:51.869525 32360 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:27:51.869540 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.869545 32360 net.cpp:165] Memory required for data: 1035777500\nI0821 08:27:51.869554 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:27:51.869566 32360 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:27:51.869573 32360 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:27:51.869581 32360 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:27:51.869851 32360 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:27:51.869864 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.869869 32360 net.cpp:165] Memory required for data: 1039873500\nI0821 08:27:51.869879 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:27:51.869889 32360 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:27:51.869894 32360 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:27:51.869904 32360 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.869962 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:27:51.870122 32360 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:27:51.870134 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.870139 32360 net.cpp:165] Memory required for data: 1043969500\nI0821 08:27:51.870148 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:27:51.870157 32360 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:27:51.870163 32360 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:27:51.870173 32360 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.870183 32360 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:27:51.870190 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.870195 32360 net.cpp:165] Memory required for data: 1048065500\nI0821 08:27:51.870200 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:27:51.870213 32360 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:27:51.870219 32360 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:27:51.870227 32360 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:27:51.870726 32360 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:27:51.870741 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.870746 32360 net.cpp:165] Memory required for data: 1052161500\nI0821 08:27:51.870755 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:27:51.870764 32360 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:27:51.870770 32360 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:27:51.870781 32360 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:27:51.871049 32360 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:27:51.871063 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.871068 32360 net.cpp:165] Memory required for data: 1056257500\nI0821 08:27:51.871078 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:27:51.871086 32360 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:27:51.871093 32360 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:27:51.871100 32360 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:27:51.871163 32360 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:27:51.871327 32360 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:27:51.871343 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.871348 32360 net.cpp:165] Memory required for data: 1060353500\nI0821 08:27:51.871357 32360 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:27:51.871366 32360 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:27:51.871372 32360 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:27:51.871381 32360 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:27:51.871387 32360 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:27:51.871419 32360 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:27:51.871435 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.871440 32360 net.cpp:165] Memory required for data: 1064449500\nI0821 08:27:51.871446 32360 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:27:51.871454 32360 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:27:51.871459 32360 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:27:51.871469 32360 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:27:51.871479 32360 net.cpp:150] Setting up L2_b5_relu\nI0821 08:27:51.871486 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.871490 32360 net.cpp:165] Memory required for data: 1068545500\nI0821 08:27:51.871495 32360 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:51.871502 32360 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:51.871507 32360 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:27:51.871517 32360 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:27:51.871527 32360 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:27:51.871577 32360 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:51.871587 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.871594 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.871598 32360 net.cpp:165] Memory required for data: 1076737500\nI0821 08:27:51.871603 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:27:51.871618 32360 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:27:51.871624 32360 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:27:51.871634 32360 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:27:51.872135 32360 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:27:51.872148 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.872153 32360 net.cpp:165] Memory required for data: 1080833500\nI0821 08:27:51.872162 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:27:51.872174 32360 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:27:51.872181 32360 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:27:51.872189 32360 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:27:51.872459 32360 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:27:51.872473 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.872478 32360 net.cpp:165] Memory required for data: 1084929500\nI0821 08:27:51.872488 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:27:51.872496 32360 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:27:51.872503 32360 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:27:51.872510 32360 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.872570 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:27:51.872726 32360 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:27:51.872741 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.872746 32360 net.cpp:165] Memory required for data: 1089025500\nI0821 08:27:51.872756 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:27:51.872763 32360 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:27:51.872769 32360 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:27:51.872777 32360 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.872786 32360 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:27:51.872794 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.872798 32360 net.cpp:165] Memory required for data: 1093121500\nI0821 08:27:51.872803 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:27:51.872817 32360 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:27:51.872822 32360 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:27:51.872835 32360 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:27:51.873342 32360 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:27:51.873358 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.873363 32360 net.cpp:165] Memory required for data: 1097217500\nI0821 08:27:51.873371 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:27:51.873383 32360 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:27:51.873390 32360 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:27:51.873401 32360 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:27:51.873663 32360 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:27:51.873677 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.873682 32360 net.cpp:165] Memory required for data: 1101313500\nI0821 08:27:51.873692 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:27:51.873700 32360 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:27:51.873706 32360 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:27:51.873714 32360 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:27:51.873775 32360 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:27:51.873926 32360 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:27:51.873939 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.873944 32360 net.cpp:165] Memory required for data: 1105409500\nI0821 08:27:51.873953 32360 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:27:51.873965 32360 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:27:51.873971 32360 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:27:51.873980 32360 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:27:51.873986 32360 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:27:51.874017 32360 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:27:51.874027 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.874032 32360 net.cpp:165] Memory required for data: 1109505500\nI0821 08:27:51.874037 32360 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:27:51.874045 32360 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:27:51.874052 32360 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:27:51.874060 32360 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:27:51.874070 32360 net.cpp:150] Setting up L2_b6_relu\nI0821 08:27:51.874078 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.874083 32360 net.cpp:165] Memory required for data: 1113601500\nI0821 08:27:51.874086 32360 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:51.874094 32360 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:51.874099 32360 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:27:51.874109 32360 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:27:51.874119 32360 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:27:51.874166 32360 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:51.874178 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.874186 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.874189 32360 net.cpp:165] Memory required for data: 1121793500\nI0821 08:27:51.874195 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:27:51.874210 32360 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:27:51.874217 32360 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:27:51.874227 32360 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:27:51.875731 32360 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:27:51.875748 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.875754 32360 net.cpp:165] Memory required for data: 1125889500\nI0821 08:27:51.875764 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:27:51.875784 32360 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:27:51.875792 32360 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:27:51.875800 32360 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:27:51.876068 32360 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:27:51.876082 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.876087 32360 net.cpp:165] Memory required for data: 1129985500\nI0821 08:27:51.876097 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:27:51.876109 32360 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:27:51.876116 32360 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:27:51.876123 32360 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.876184 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:27:51.876351 32360 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:27:51.876365 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.876370 32360 net.cpp:165] Memory required for data: 1134081500\nI0821 08:27:51.876379 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:27:51.876390 32360 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:27:51.876397 32360 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:27:51.876407 32360 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.876417 32360 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:27:51.876425 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.876430 32360 net.cpp:165] Memory required for data: 1138177500\nI0821 08:27:51.876435 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:27:51.876446 32360 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:27:51.876452 32360 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:27:51.876463 32360 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:27:51.876945 32360 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:27:51.876960 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.876965 32360 net.cpp:165] Memory required for data: 1142273500\nI0821 08:27:51.876973 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:27:51.876982 32360 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:27:51.876988 32360 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:27:51.877001 32360 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:27:51.877277 32360 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:27:51.877291 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.877296 32360 net.cpp:165] Memory required for data: 1146369500\nI0821 08:27:51.877307 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:27:51.877318 32360 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:27:51.877326 32360 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:27:51.877333 32360 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:27:51.877390 32360 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:27:51.877547 32360 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:27:51.877559 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.877564 32360 net.cpp:165] Memory required for data: 1150465500\nI0821 08:27:51.877573 32360 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:27:51.877585 32360 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:27:51.877593 32360 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:27:51.877599 32360 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:27:51.877607 32360 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:27:51.877637 32360 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:27:51.877647 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.877652 32360 net.cpp:165] Memory required for data: 1154561500\nI0821 08:27:51.877657 32360 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:27:51.877665 32360 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:27:51.877678 32360 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:27:51.877688 32360 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:27:51.877698 32360 net.cpp:150] Setting up L2_b7_relu\nI0821 08:27:51.877706 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.877710 32360 net.cpp:165] Memory required for data: 1158657500\nI0821 08:27:51.877715 32360 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:51.877722 32360 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:51.877727 32360 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:27:51.877735 32360 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:27:51.877744 32360 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:27:51.877797 32360 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:51.877810 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.877816 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.877821 32360 net.cpp:165] Memory required for data: 1166849500\nI0821 08:27:51.877849 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:27:51.877862 32360 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:27:51.877868 32360 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:27:51.877881 32360 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:27:51.878383 32360 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:27:51.878397 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.878403 32360 net.cpp:165] Memory required for data: 1170945500\nI0821 08:27:51.878412 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:27:51.878422 32360 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:27:51.878428 32360 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:27:51.878439 32360 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:27:51.878710 32360 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:27:51.878723 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.878728 32360 net.cpp:165] Memory required for data: 1175041500\nI0821 08:27:51.878738 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:27:51.878751 32360 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:27:51.878756 32360 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:27:51.878764 32360 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.878825 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:27:51.878983 32360 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:27:51.878994 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.878999 32360 net.cpp:165] Memory required for data: 1179137500\nI0821 08:27:51.879009 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:27:51.879019 32360 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:27:51.879025 32360 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:27:51.879034 32360 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.879043 32360 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:27:51.879053 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.879058 32360 net.cpp:165] Memory required for data: 1183233500\nI0821 08:27:51.879062 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:27:51.879073 32360 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:27:51.879079 32360 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:27:51.879091 32360 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:27:51.879583 32360 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:27:51.879597 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.879603 32360 net.cpp:165] Memory required for data: 1187329500\nI0821 08:27:51.879611 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:27:51.879627 32360 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:27:51.879634 32360 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:27:51.879645 32360 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:27:51.879918 32360 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:27:51.879931 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.879936 32360 net.cpp:165] Memory required for data: 1191425500\nI0821 08:27:51.879947 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:27:51.879958 32360 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:27:51.879966 32360 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:27:51.879973 32360 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:27:51.880031 32360 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:27:51.880192 32360 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:27:51.880204 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.880209 32360 net.cpp:165] Memory required for data: 1195521500\nI0821 08:27:51.880218 32360 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:27:51.880229 32360 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:27:51.880236 32360 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:27:51.880244 32360 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:27:51.880251 32360 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:27:51.880290 32360 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:27:51.880301 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.880306 32360 net.cpp:165] Memory required for data: 1199617500\nI0821 08:27:51.880311 32360 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:27:51.880319 32360 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:27:51.880326 32360 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:27:51.880336 32360 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:27:51.880345 32360 net.cpp:150] Setting up L2_b8_relu\nI0821 08:27:51.880352 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.880357 32360 net.cpp:165] Memory required for data: 1203713500\nI0821 08:27:51.880362 32360 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:51.880368 32360 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:51.880374 32360 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:27:51.880381 32360 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:27:51.880409 32360 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:27:51.880463 32360 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:51.880480 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.880486 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.880491 32360 net.cpp:165] Memory required for data: 1211905500\nI0821 08:27:51.880496 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:27:51.880508 32360 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:27:51.880515 32360 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:27:51.880527 32360 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:27:51.881021 32360 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:27:51.881034 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.881039 32360 net.cpp:165] Memory required for data: 1216001500\nI0821 08:27:51.881048 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:27:51.881058 32360 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:27:51.881064 32360 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:27:51.881075 32360 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:27:51.881363 32360 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:27:51.881384 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.881389 32360 net.cpp:165] Memory required for data: 1220097500\nI0821 08:27:51.881400 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:27:51.881412 32360 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:27:51.881418 32360 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:27:51.881427 32360 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.881485 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:27:51.881647 32360 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:27:51.881659 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.881664 32360 net.cpp:165] Memory required for data: 1224193500\nI0821 08:27:51.881673 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:27:51.881681 32360 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:27:51.881688 32360 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:27:51.881698 32360 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.881708 32360 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:27:51.881716 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.881721 32360 net.cpp:165] Memory required for data: 1228289500\nI0821 08:27:51.881726 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:27:51.881738 32360 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:27:51.881744 32360 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:27:51.881755 32360 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:27:51.883235 32360 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:27:51.883252 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.883257 32360 net.cpp:165] Memory required for data: 1232385500\nI0821 08:27:51.883273 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:27:51.883286 32360 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:27:51.883293 32360 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:27:51.883302 32360 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:27:51.883572 32360 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:27:51.883585 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.883590 32360 net.cpp:165] Memory required for data: 1236481500\nI0821 08:27:51.883641 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:27:51.883652 32360 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:27:51.883658 32360 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:27:51.883669 32360 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:27:51.883729 32360 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:27:51.883886 32360 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:27:51.883899 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.883904 32360 net.cpp:165] Memory required for data: 1240577500\nI0821 08:27:51.883913 32360 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:27:51.883922 32360 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:27:51.883929 32360 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:27:51.883936 32360 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:27:51.883947 32360 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:27:51.883975 32360 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:27:51.883985 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.883988 32360 net.cpp:165] Memory required for data: 1244673500\nI0821 08:27:51.883994 32360 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:27:51.884006 32360 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:27:51.884011 32360 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:27:51.884018 32360 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:27:51.884028 32360 net.cpp:150] Setting up L2_b9_relu\nI0821 08:27:51.884035 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.884047 32360 net.cpp:165] Memory required for data: 1248769500\nI0821 08:27:51.884053 32360 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:51.884060 32360 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:51.884066 32360 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:27:51.884076 32360 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:27:51.884086 32360 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:27:51.884135 32360 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:51.884150 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.884156 32360 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:51.884160 32360 net.cpp:165] Memory required for data: 1256961500\nI0821 08:27:51.884166 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:27:51.884177 32360 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:27:51.884183 32360 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:27:51.884193 32360 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:27:51.884703 32360 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:27:51.884718 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.884723 32360 net.cpp:165] Memory required for data: 1257985500\nI0821 08:27:51.884732 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:27:51.884744 32360 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:27:51.884752 32360 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:27:51.884759 32360 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:27:51.885035 32360 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:27:51.885049 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.885054 32360 net.cpp:165] Memory required for data: 1259009500\nI0821 08:27:51.885064 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:27:51.885077 32360 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:27:51.885083 32360 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:27:51.885092 32360 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.885150 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:27:51.885320 32360 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:27:51.885334 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.885339 32360 net.cpp:165] Memory required for data: 1260033500\nI0821 08:27:51.885349 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:27:51.885360 32360 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:27:51.885366 32360 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:27:51.885376 32360 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:27:51.885386 32360 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:27:51.885395 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.885398 32360 net.cpp:165] Memory required for data: 1261057500\nI0821 08:27:51.885403 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:27:51.885414 32360 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:27:51.885421 32360 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:27:51.885432 32360 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:27:51.885917 32360 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:27:51.885931 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.885936 32360 net.cpp:165] Memory required for data: 1262081500\nI0821 08:27:51.885944 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:27:51.885957 32360 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:27:51.885963 32360 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:27:51.885972 32360 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:27:51.886255 32360 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:27:51.886279 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.886286 32360 net.cpp:165] Memory required for data: 1263105500\nI0821 08:27:51.886296 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:27:51.886306 32360 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:27:51.886312 32360 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:27:51.886319 32360 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:27:51.886380 32360 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:27:51.886543 32360 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:27:51.886559 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.886564 32360 net.cpp:165] Memory required for data: 1264129500\nI0821 08:27:51.886574 32360 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:27:51.886584 32360 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:27:51.886590 32360 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:27:51.886598 32360 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:27:51.886636 32360 net.cpp:150] Setting up L3_b1_pool\nI0821 08:27:51.886647 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.886652 32360 net.cpp:165] Memory required for data: 1265153500\nI0821 08:27:51.886657 32360 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:27:51.886667 32360 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:27:51.886672 32360 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:27:51.886679 32360 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:27:51.886687 32360 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:27:51.886723 32360 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:27:51.886734 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.886739 32360 net.cpp:165] Memory required for data: 1266177500\nI0821 08:27:51.886744 32360 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:27:51.886754 32360 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:27:51.886759 32360 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:27:51.886766 32360 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:27:51.886775 32360 net.cpp:150] Setting up L3_b1_relu\nI0821 08:27:51.886782 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.886786 32360 net.cpp:165] Memory required for data: 1267201500\nI0821 08:27:51.886791 32360 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:27:51.886801 32360 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:27:51.886812 32360 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:27:51.888051 32360 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:27:51.888069 32360 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:51.888075 32360 net.cpp:165] Memory required for data: 1268225500\nI0821 08:27:51.888080 32360 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:27:51.888093 32360 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:27:51.888100 32360 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:27:51.888108 32360 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:27:51.888115 32360 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:27:51.888160 32360 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:27:51.888170 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.888175 32360 net.cpp:165] Memory required for data: 1270273500\nI0821 08:27:51.888180 32360 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:51.888188 32360 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:51.888195 32360 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:27:51.888204 32360 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:27:51.888216 32360 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:27:51.888278 32360 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:51.888290 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.888305 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.888310 32360 net.cpp:165] Memory required for data: 1274369500\nI0821 08:27:51.888315 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:27:51.888329 32360 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:27:51.888337 32360 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:27:51.888346 32360 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:27:51.889400 32360 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:27:51.889415 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.889420 32360 net.cpp:165] Memory required for data: 1276417500\nI0821 08:27:51.889428 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:27:51.889441 32360 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:27:51.889448 32360 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:27:51.889459 32360 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:27:51.889734 32360 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:27:51.889747 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.889752 32360 net.cpp:165] Memory required for data: 1278465500\nI0821 08:27:51.889763 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:27:51.889772 32360 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:27:51.889778 32360 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:27:51.889786 32360 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.889848 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:27:51.890010 32360 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:27:51.890023 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.890028 32360 net.cpp:165] Memory required for data: 1280513500\nI0821 08:27:51.890038 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:27:51.890046 32360 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:27:51.890053 32360 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:27:51.890064 32360 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:27:51.890074 32360 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:27:51.890080 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.890085 32360 net.cpp:165] Memory required for data: 1282561500\nI0821 08:27:51.890089 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:27:51.890103 32360 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:27:51.890110 32360 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:27:51.890118 32360 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:27:51.891161 32360 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:27:51.891176 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.891181 32360 net.cpp:165] Memory required for data: 1284609500\nI0821 08:27:51.891191 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:27:51.891202 32360 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:27:51.891209 32360 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:27:51.891218 32360 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:27:51.891506 32360 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:27:51.891520 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.891525 32360 net.cpp:165] Memory required for data: 1286657500\nI0821 08:27:51.891535 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:27:51.891548 32360 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:27:51.891556 32360 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:27:51.891563 32360 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:27:51.891625 32360 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:27:51.891789 32360 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:27:51.891803 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.891808 32360 net.cpp:165] Memory required for data: 1288705500\nI0821 08:27:51.891824 32360 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:27:51.891836 32360 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:27:51.891844 32360 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:27:51.891850 32360 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:27:51.891861 32360 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:27:51.891894 32360 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:27:51.891903 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.891908 32360 net.cpp:165] Memory required for data: 1290753500\nI0821 08:27:51.891913 32360 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:27:51.891924 32360 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:27:51.891932 32360 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:27:51.891938 32360 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:27:51.891947 32360 net.cpp:150] Setting up L3_b2_relu\nI0821 08:27:51.891955 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.891959 32360 net.cpp:165] Memory required for data: 1292801500\nI0821 08:27:51.891964 32360 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:51.891973 32360 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:51.891978 32360 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:27:51.891984 32360 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:27:51.891994 32360 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:27:51.892045 32360 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:51.892056 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.892062 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.892067 32360 net.cpp:165] Memory required for data: 1296897500\nI0821 08:27:51.892072 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:27:51.892086 32360 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:27:51.892092 32360 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:27:51.892102 32360 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:27:51.893144 32360 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:27:51.893158 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.893163 32360 net.cpp:165] Memory required for data: 1298945500\nI0821 08:27:51.893172 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:27:51.893184 32360 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:27:51.893191 32360 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:27:51.893203 32360 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:27:51.893479 32360 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:27:51.893492 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.893497 32360 net.cpp:165] Memory required for data: 1300993500\nI0821 08:27:51.893508 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:27:51.893517 32360 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:27:51.893523 32360 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:27:51.893534 32360 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.893594 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:27:51.893752 32360 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:27:51.893764 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.893769 32360 net.cpp:165] Memory required for data: 1303041500\nI0821 08:27:51.893779 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:27:51.893787 32360 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:27:51.893793 32360 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:27:51.893803 32360 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:27:51.893813 32360 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:27:51.893827 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.893832 32360 net.cpp:165] Memory required for data: 1305089500\nI0821 08:27:51.893837 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:27:51.893852 32360 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:27:51.893859 32360 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:27:51.893868 32360 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:27:51.894917 32360 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:27:51.894932 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.894937 32360 net.cpp:165] Memory required for data: 1307137500\nI0821 08:27:51.894945 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:27:51.894958 32360 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:27:51.894965 32360 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:27:51.894973 32360 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:27:51.895246 32360 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:27:51.895262 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.895269 32360 net.cpp:165] Memory required for data: 1309185500\nI0821 08:27:51.895280 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:27:51.895292 32360 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:27:51.895299 32360 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:27:51.895310 32360 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:27:51.895371 32360 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:27:51.895531 32360 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:27:51.895545 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.895550 32360 net.cpp:165] Memory required for data: 1311233500\nI0821 08:27:51.895558 32360 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:27:51.895567 32360 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:27:51.895575 32360 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:27:51.895581 32360 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:27:51.895592 32360 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:27:51.895625 32360 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:27:51.895634 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.895639 32360 net.cpp:165] Memory required for data: 1313281500\nI0821 08:27:51.895644 32360 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:27:51.895655 32360 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:27:51.895663 32360 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:27:51.895669 32360 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:27:51.895678 32360 net.cpp:150] Setting up L3_b3_relu\nI0821 08:27:51.895685 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.895690 32360 net.cpp:165] Memory required for data: 1315329500\nI0821 08:27:51.895695 32360 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:51.895702 32360 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:51.895707 32360 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:27:51.895714 32360 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:27:51.895725 32360 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:27:51.895774 32360 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:51.895787 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.895792 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.895797 32360 net.cpp:165] Memory required for data: 1319425500\nI0821 08:27:51.895802 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:27:51.895817 32360 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:27:51.895823 32360 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:27:51.895839 32360 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:27:51.896888 32360 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:27:51.896903 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.896908 32360 net.cpp:165] Memory required for data: 1321473500\nI0821 08:27:51.896917 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:27:51.896929 32360 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:27:51.896936 32360 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:27:51.896947 32360 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:27:51.897218 32360 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:27:51.897231 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.897236 32360 net.cpp:165] Memory required for data: 1323521500\nI0821 08:27:51.897246 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:27:51.897255 32360 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:27:51.897266 32360 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:27:51.897279 32360 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.897338 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:27:51.897503 32360 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:27:51.897516 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.897521 32360 net.cpp:165] Memory required for data: 1325569500\nI0821 08:27:51.897531 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:27:51.897539 32360 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:27:51.897545 32360 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:27:51.897555 32360 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:27:51.897565 32360 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:27:51.897573 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.897578 32360 net.cpp:165] Memory required for data: 1327617500\nI0821 08:27:51.897583 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:27:51.897598 32360 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:27:51.897603 32360 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:27:51.897613 32360 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:27:51.899636 32360 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:27:51.899653 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.899658 32360 net.cpp:165] Memory required for data: 1329665500\nI0821 08:27:51.899668 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:27:51.899682 32360 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:27:51.899688 32360 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:27:51.899699 32360 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:27:51.899976 32360 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:27:51.899991 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.899996 32360 net.cpp:165] Memory required for data: 1331713500\nI0821 08:27:51.900005 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:27:51.900014 32360 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:27:51.900022 32360 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:27:51.900032 32360 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:27:51.900091 32360 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:27:51.900254 32360 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:27:51.900274 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.900279 32360 net.cpp:165] Memory required for data: 1333761500\nI0821 08:27:51.900288 32360 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:27:51.900298 32360 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:27:51.900305 32360 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:27:51.900312 32360 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:27:51.900323 32360 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:27:51.900368 32360 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:27:51.900379 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.900383 32360 net.cpp:165] Memory required for data: 1335809500\nI0821 08:27:51.900389 32360 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:27:51.900398 32360 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:27:51.900403 32360 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:27:51.900413 32360 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:27:51.900424 32360 net.cpp:150] Setting up L3_b4_relu\nI0821 08:27:51.900431 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.900435 32360 net.cpp:165] Memory required for data: 1337857500\nI0821 08:27:51.900440 32360 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:51.900447 32360 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:51.900452 32360 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:27:51.900460 32360 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:27:51.900470 32360 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:27:51.900519 32360 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:51.900532 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.900538 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.900542 32360 net.cpp:165] Memory required for data: 1341953500\nI0821 08:27:51.900548 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:27:51.900560 32360 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:27:51.900566 32360 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:27:51.900578 32360 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:27:51.901615 32360 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:27:51.901630 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.901635 32360 net.cpp:165] Memory required for data: 1344001500\nI0821 08:27:51.901643 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:27:51.901654 32360 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:27:51.901660 32360 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:27:51.901672 32360 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:27:51.901948 32360 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:27:51.901963 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.901969 32360 net.cpp:165] Memory required for data: 1346049500\nI0821 08:27:51.901980 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:27:51.901989 32360 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:27:51.901995 32360 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:27:51.902004 32360 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.902065 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:27:51.902228 32360 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:27:51.902242 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.902247 32360 net.cpp:165] Memory required for data: 1348097500\nI0821 08:27:51.902256 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:27:51.902273 32360 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:27:51.902281 32360 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:27:51.902288 32360 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:27:51.902298 32360 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:27:51.902305 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.902310 32360 net.cpp:165] Memory required for data: 1350145500\nI0821 08:27:51.902315 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:27:51.902328 32360 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:27:51.902334 32360 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:27:51.902357 32360 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:27:51.903389 32360 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:27:51.903403 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.903409 32360 net.cpp:165] Memory required for data: 1352193500\nI0821 08:27:51.903419 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:27:51.903430 32360 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:27:51.903437 32360 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:27:51.903448 32360 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:27:51.903717 32360 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:27:51.903729 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.903734 32360 net.cpp:165] Memory required for data: 1354241500\nI0821 08:27:51.903744 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:27:51.903753 32360 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:27:51.903760 32360 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:27:51.903770 32360 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:27:51.903829 32360 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:27:51.903991 32360 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:27:51.904005 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.904009 32360 net.cpp:165] Memory required for data: 1356289500\nI0821 08:27:51.904018 32360 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:27:51.904028 32360 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:27:51.904034 32360 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:27:51.904044 32360 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:27:51.904053 32360 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:27:51.904089 32360 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:27:51.904101 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.904106 32360 net.cpp:165] Memory required for data: 1358337500\nI0821 08:27:51.904112 32360 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:27:51.904120 32360 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:27:51.904126 32360 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:27:51.904136 32360 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:27:51.904146 32360 net.cpp:150] Setting up L3_b5_relu\nI0821 08:27:51.904153 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.904158 32360 net.cpp:165] Memory required for data: 1360385500\nI0821 08:27:51.904163 32360 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:51.904170 32360 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:51.904175 32360 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:27:51.904182 32360 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:27:51.904192 32360 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:27:51.904243 32360 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:51.904255 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.904268 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.904273 32360 net.cpp:165] Memory required for data: 1364481500\nI0821 08:27:51.904278 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:27:51.904290 32360 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:27:51.904297 32360 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:27:51.904309 32360 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:27:51.905338 32360 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:27:51.905351 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.905357 32360 net.cpp:165] Memory required for data: 1366529500\nI0821 08:27:51.905372 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:27:51.905382 32360 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:27:51.905390 32360 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:27:51.905400 32360 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:27:51.905675 32360 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:27:51.905689 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.905692 32360 net.cpp:165] Memory required for data: 1368577500\nI0821 08:27:51.905704 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:27:51.905712 32360 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:27:51.905719 32360 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:27:51.905726 32360 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.905788 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:27:51.905947 32360 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:27:51.905962 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.905968 32360 net.cpp:165] Memory required for data: 1370625500\nI0821 08:27:51.905977 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:27:51.905985 32360 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:27:51.905992 32360 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:27:51.905999 32360 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:27:51.906009 32360 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:27:51.906016 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.906020 32360 net.cpp:165] Memory required for data: 1372673500\nI0821 08:27:51.906025 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:27:51.906039 32360 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:27:51.906045 32360 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:27:51.906054 32360 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:27:51.907079 32360 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:27:51.907094 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.907099 32360 net.cpp:165] Memory required for data: 1374721500\nI0821 08:27:51.907109 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:27:51.907122 32360 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:27:51.907130 32360 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:27:51.907141 32360 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:27:51.907421 32360 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:27:51.907434 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.907440 32360 net.cpp:165] Memory required for data: 1376769500\nI0821 08:27:51.907450 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:27:51.907459 32360 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:27:51.907466 32360 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:27:51.907476 32360 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:27:51.907536 32360 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:27:51.907694 32360 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:27:51.907707 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.907712 32360 net.cpp:165] Memory required for data: 1378817500\nI0821 08:27:51.907721 32360 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:27:51.907734 32360 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:27:51.907742 32360 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:27:51.907748 32360 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:27:51.907757 32360 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:27:51.907793 32360 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:27:51.907805 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.907810 32360 net.cpp:165] Memory required for data: 1380865500\nI0821 08:27:51.907815 32360 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:27:51.907824 32360 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:27:51.907836 32360 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:27:51.907846 32360 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:27:51.907856 32360 net.cpp:150] Setting up L3_b6_relu\nI0821 08:27:51.907863 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.907868 32360 net.cpp:165] Memory required for data: 1382913500\nI0821 08:27:51.907873 32360 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:51.907881 32360 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:51.907886 32360 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:27:51.907893 32360 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:27:51.907902 32360 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:27:51.907956 32360 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:51.907968 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.907974 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.907979 32360 net.cpp:165] Memory required for data: 1387009500\nI0821 08:27:51.907984 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:27:51.907996 32360 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:27:51.908002 32360 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:27:51.908015 32360 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:27:51.909047 32360 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:27:51.909062 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.909067 32360 net.cpp:165] Memory required for data: 1389057500\nI0821 08:27:51.909076 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:27:51.909088 32360 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:27:51.909096 32360 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:27:51.909103 32360 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:27:51.909386 32360 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:27:51.909399 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.909404 32360 net.cpp:165] Memory required for data: 1391105500\nI0821 08:27:51.909415 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:27:51.909425 32360 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:27:51.909430 32360 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:27:51.909437 32360 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.909502 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:27:51.909660 32360 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:27:51.909677 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.909682 32360 net.cpp:165] Memory required for data: 1393153500\nI0821 08:27:51.909692 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:27:51.909725 32360 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:27:51.909734 32360 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:27:51.909742 32360 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:27:51.909752 32360 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:27:51.909759 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.909765 32360 net.cpp:165] Memory required for data: 1395201500\nI0821 08:27:51.909770 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:27:51.909783 32360 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:27:51.909790 32360 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:27:51.909798 32360 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:27:51.910836 32360 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:27:51.910851 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.910856 32360 net.cpp:165] Memory required for data: 1397249500\nI0821 08:27:51.910866 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:27:51.910881 32360 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:27:51.910888 32360 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:27:51.910900 32360 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:27:51.911176 32360 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:27:51.911190 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.911195 32360 net.cpp:165] Memory required for data: 1399297500\nI0821 08:27:51.911204 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:27:51.911213 32360 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:27:51.911221 32360 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:27:51.911227 32360 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:27:51.911295 32360 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:27:51.911456 32360 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:27:51.911473 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.911478 32360 net.cpp:165] Memory required for data: 1401345500\nI0821 08:27:51.911487 32360 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:27:51.911496 32360 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:27:51.911502 32360 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:27:51.911509 32360 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:27:51.911517 32360 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:27:51.911556 32360 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:27:51.911568 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.911573 32360 net.cpp:165] Memory required for data: 1403393500\nI0821 08:27:51.911578 32360 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:27:51.911586 32360 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:27:51.911593 32360 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:27:51.911600 32360 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:27:51.911610 32360 net.cpp:150] Setting up L3_b7_relu\nI0821 08:27:51.911617 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.911622 32360 net.cpp:165] Memory required for data: 1405441500\nI0821 08:27:51.911626 32360 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:51.911633 32360 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:51.911638 32360 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:27:51.911649 32360 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:27:51.911659 32360 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:27:51.911705 32360 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:51.911717 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.911723 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.911728 32360 net.cpp:165] Memory required for data: 1409537500\nI0821 08:27:51.911733 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:27:51.911747 32360 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:27:51.911754 32360 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:27:51.911764 32360 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:27:51.913784 32360 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:27:51.913801 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.913806 32360 net.cpp:165] Memory required for data: 1411585500\nI0821 08:27:51.913816 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:27:51.913830 32360 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:27:51.913837 32360 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:27:51.913846 32360 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:27:51.914124 32360 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:27:51.914145 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.914150 32360 net.cpp:165] Memory required for data: 1413633500\nI0821 08:27:51.914161 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:27:51.914170 32360 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:27:51.914177 32360 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:27:51.914186 32360 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.914249 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:27:51.914423 32360 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:27:51.914438 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.914443 32360 net.cpp:165] Memory required for data: 1415681500\nI0821 08:27:51.914451 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:27:51.914463 32360 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:27:51.914469 32360 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:27:51.914475 32360 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:27:51.914489 32360 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:27:51.914496 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.914500 32360 net.cpp:165] Memory required for data: 1417729500\nI0821 08:27:51.914505 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:27:51.914516 32360 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:27:51.914525 32360 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:27:51.914535 32360 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:27:51.915565 32360 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:27:51.915580 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.915585 32360 net.cpp:165] Memory required for data: 1419777500\nI0821 08:27:51.915593 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:27:51.915606 32360 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:27:51.915613 32360 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:27:51.915621 32360 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:27:51.915895 32360 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:27:51.915908 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.915913 32360 net.cpp:165] Memory required for data: 1421825500\nI0821 08:27:51.915923 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:27:51.915935 32360 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:27:51.915942 32360 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:27:51.915951 32360 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:27:51.916012 32360 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:27:51.916173 32360 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:27:51.916187 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.916191 32360 net.cpp:165] Memory required for data: 1423873500\nI0821 08:27:51.916200 32360 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:27:51.916213 32360 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:27:51.916219 32360 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:27:51.916226 32360 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:27:51.916237 32360 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:27:51.916277 32360 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:27:51.916290 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.916294 32360 net.cpp:165] Memory required for data: 1425921500\nI0821 08:27:51.916299 32360 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:27:51.916312 32360 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:27:51.916319 32360 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:27:51.916326 32360 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:27:51.916337 32360 net.cpp:150] Setting up L3_b8_relu\nI0821 08:27:51.916343 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.916348 32360 net.cpp:165] Memory required for data: 1427969500\nI0821 08:27:51.916360 32360 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:51.916368 32360 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:51.916373 32360 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:27:51.916381 32360 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:27:51.916391 32360 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:27:51.916493 32360 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:51.916513 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.916527 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.916534 32360 net.cpp:165] Memory required for data: 1432065500\nI0821 08:27:51.916544 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:27:51.916568 32360 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:27:51.916579 32360 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:27:51.916592 32360 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:27:51.917639 32360 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:27:51.917654 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.917659 32360 net.cpp:165] Memory required for data: 1434113500\nI0821 08:27:51.917668 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:27:51.917681 32360 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:27:51.917688 32360 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:27:51.917699 32360 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:27:51.917973 32360 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:27:51.917986 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.917991 32360 net.cpp:165] Memory required for data: 1436161500\nI0821 08:27:51.918002 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:27:51.918011 32360 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:27:51.918018 32360 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:27:51.918025 32360 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.918090 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:27:51.918251 32360 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:27:51.918270 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.918275 32360 net.cpp:165] Memory required for data: 1438209500\nI0821 08:27:51.918285 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:27:51.918293 32360 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:27:51.918300 32360 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:27:51.918310 32360 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:27:51.918321 32360 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:27:51.918329 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.918332 32360 net.cpp:165] Memory required for data: 1440257500\nI0821 08:27:51.918337 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:27:51.918351 32360 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:27:51.918357 32360 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:27:51.918366 32360 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:27:51.919397 32360 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:27:51.919411 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.919416 32360 net.cpp:165] Memory required for data: 1442305500\nI0821 08:27:51.919425 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:27:51.919438 32360 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:27:51.919445 32360 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:27:51.919453 32360 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:27:51.919725 32360 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:27:51.919739 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.919750 32360 net.cpp:165] Memory required for data: 1444353500\nI0821 08:27:51.919761 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:27:51.919776 32360 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:27:51.919782 32360 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:27:51.919790 32360 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:27:51.919853 32360 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:27:51.920017 32360 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:27:51.920029 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.920034 32360 net.cpp:165] Memory required for data: 1446401500\nI0821 08:27:51.920043 32360 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:27:51.920055 32360 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:27:51.920063 32360 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:27:51.920070 32360 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:27:51.920080 32360 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:27:51.920115 32360 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:27:51.920127 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.920132 32360 net.cpp:165] Memory required for data: 1448449500\nI0821 08:27:51.920138 32360 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:27:51.920150 32360 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:27:51.920156 32360 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:27:51.920163 32360 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:27:51.920172 32360 net.cpp:150] Setting up L3_b9_relu\nI0821 08:27:51.920181 32360 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:51.920184 32360 net.cpp:165] Memory required for data: 1450497500\nI0821 08:27:51.920189 32360 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:27:51.920197 32360 net.cpp:100] Creating Layer post_pool\nI0821 08:27:51.920203 32360 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 08:27:51.920210 32360 net.cpp:408] post_pool -> post_pool\nI0821 08:27:51.920248 32360 net.cpp:150] Setting up post_pool\nI0821 08:27:51.920266 32360 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 08:27:51.920272 32360 net.cpp:165] Memory required for data: 1450529500\nI0821 08:27:51.920277 32360 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:27:51.920289 32360 net.cpp:100] Creating Layer post_FC\nI0821 08:27:51.920296 32360 net.cpp:434] post_FC <- post_pool\nI0821 08:27:51.920305 32360 net.cpp:408] post_FC -> post_FC_top\nI0821 08:27:51.920480 32360 net.cpp:150] Setting up post_FC\nI0821 08:27:51.920495 32360 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:51.920500 32360 net.cpp:165] Memory required for data: 1450534500\nI0821 08:27:51.920508 32360 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:27:51.920516 32360 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:27:51.920522 32360 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:27:51.920533 32360 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:27:51.920543 32360 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:27:51.920591 32360 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:27:51.920603 32360 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:51.920609 32360 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:51.920614 32360 net.cpp:165] Memory required for data: 1450544500\nI0821 08:27:51.920619 32360 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:27:51.920630 32360 net.cpp:100] Creating Layer accuracy\nI0821 08:27:51.920637 32360 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:27:51.920644 32360 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:27:51.920651 32360 net.cpp:408] accuracy -> accuracy\nI0821 08:27:51.920665 32360 net.cpp:150] Setting up accuracy\nI0821 08:27:51.920671 32360 net.cpp:157] Top shape: (1)\nI0821 08:27:51.920682 32360 net.cpp:165] Memory required for data: 1450544504\nI0821 08:27:51.920688 32360 layer_factory.hpp:77] Creating layer loss\nI0821 08:27:51.920696 32360 net.cpp:100] Creating Layer loss\nI0821 08:27:51.920701 32360 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:27:51.920708 32360 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:27:51.920716 32360 net.cpp:408] loss -> loss\nI0821 08:27:51.920727 32360 layer_factory.hpp:77] Creating layer loss\nI0821 08:27:51.920852 32360 net.cpp:150] Setting up loss\nI0821 08:27:51.920866 32360 net.cpp:157] Top shape: (1)\nI0821 08:27:51.920871 32360 net.cpp:160]     with loss weight 1\nI0821 08:27:51.920887 32360 net.cpp:165] Memory required for data: 1450544508\nI0821 08:27:51.920893 32360 net.cpp:226] loss needs backward computation.\nI0821 08:27:51.920899 32360 net.cpp:228] accuracy does not need backward computation.\nI0821 08:27:51.920905 32360 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:27:51.920912 32360 net.cpp:226] post_FC needs backward computation.\nI0821 08:27:51.920917 32360 net.cpp:226] post_pool needs backward computation.\nI0821 08:27:51.920920 32360 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:27:51.920925 32360 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:27:51.920931 32360 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:27:51.920935 32360 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:27:51.920940 32360 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:27:51.920945 32360 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:27:51.920950 32360 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:27:51.920954 32360 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:27:51.920960 32360 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:27:51.920965 32360 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:27:51.920970 32360 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:27:51.920975 32360 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:27:51.920980 32360 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:27:51.920985 32360 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:27:51.920990 32360 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:27:51.920995 32360 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:27:51.921000 32360 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:27:51.921005 32360 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:27:51.921010 32360 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:27:51.921015 32360 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:27:51.921020 32360 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:27:51.921025 32360 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:27:51.921031 32360 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:27:51.921036 32360 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:27:51.921041 32360 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:27:51.921046 32360 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:27:51.921051 32360 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:27:51.921056 32360 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:27:51.921061 32360 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:27:51.921066 32360 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:27:51.921072 32360 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:27:51.921077 32360 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:27:51.921082 32360 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:27:51.921087 32360 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:27:51.921098 32360 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:27:51.921103 32360 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:27:51.921108 32360 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:27:51.921113 32360 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:27:51.921118 32360 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:27:51.921124 32360 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:27:51.921129 32360 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:27:51.921134 32360 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:27:51.921139 32360 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:27:51.921144 32360 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:27:51.921150 32360 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:27:51.921155 32360 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:27:51.921160 32360 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:27:51.921165 32360 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:27:51.921170 32360 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:27:51.921178 32360 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:27:51.921185 32360 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:27:51.921190 32360 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:27:51.921195 32360 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:27:51.921200 32360 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:27:51.921206 32360 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:27:51.921211 32360 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:27:51.921216 32360 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:27:51.921221 32360 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:27:51.921226 32360 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:27:51.921231 32360 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:27:51.921236 32360 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:27:51.921241 32360 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:27:51.921247 32360 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:27:51.921252 32360 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:27:51.921257 32360 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:27:51.921270 32360 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:27:51.921275 32360 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:27:51.921281 32360 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:27:51.921286 32360 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:27:51.921291 32360 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:27:51.921298 32360 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:27:51.921303 32360 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:27:51.921308 32360 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:27:51.921314 32360 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:27:51.921319 32360 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:27:51.921324 32360 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:27:51.921329 32360 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:27:51.921334 32360 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:27:51.921340 32360 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:27:51.921345 32360 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:27:51.921351 32360 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:27:51.921362 32360 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:27:51.921368 32360 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:27:51.921373 32360 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:27:51.921380 32360 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:27:51.921386 32360 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:27:51.921391 32360 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:27:51.921396 32360 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:27:51.921401 32360 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:27:51.921406 32360 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:27:51.921411 32360 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:27:51.921416 32360 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:27:51.921422 32360 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:27:51.921427 32360 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:27:51.921432 32360 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:27:51.921438 32360 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:27:51.921443 32360 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:27:51.921448 32360 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:27:51.921458 32360 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:27:51.921463 32360 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:27:51.921468 32360 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:27:51.921473 32360 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:27:51.921479 32360 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:27:51.921485 32360 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:27:51.921490 32360 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:27:51.921496 32360 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:27:51.921501 32360 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:27:51.921506 32360 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:27:51.921512 32360 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:27:51.921517 32360 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:27:51.921522 32360 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:27:51.921528 32360 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:27:51.921533 32360 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:27:51.921540 32360 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:27:51.921545 32360 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:27:51.921550 32360 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:27:51.921555 32360 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:27:51.921561 32360 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:27:51.921566 32360 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:27:51.921571 32360 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:27:51.921576 32360 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:27:51.921581 32360 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:27:51.921587 32360 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:27:51.921592 32360 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:27:51.921598 32360 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:27:51.921604 32360 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:27:51.921609 32360 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:27:51.921614 32360 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:27:51.921625 32360 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:27:51.921631 32360 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:27:51.921636 32360 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:27:51.921641 32360 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:27:51.921648 32360 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:27:51.921653 32360 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:27:51.921658 32360 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:27:51.921664 32360 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:27:51.921670 32360 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:27:51.921675 32360 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:27:51.921681 32360 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:27:51.921686 32360 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:27:51.921691 32360 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:27:51.921696 32360 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:27:51.921702 32360 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:27:51.921707 32360 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:27:51.921713 32360 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:27:51.921720 32360 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:27:51.921725 32360 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:27:51.921730 32360 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:27:51.921736 32360 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:27:51.921741 32360 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:27:51.921746 32360 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:27:51.921751 32360 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:27:51.921757 32360 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:27:51.921763 32360 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:27:51.921769 32360 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:27:51.921775 32360 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:27:51.921780 32360 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:27:51.921787 32360 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:27:51.921792 32360 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:27:51.921797 32360 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:27:51.921802 32360 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:27:51.921808 32360 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:27:51.921813 32360 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:27:51.921819 32360 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:27:51.921824 32360 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:27:51.921830 32360 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:27:51.921836 32360 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:27:51.921845 32360 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:27:51.921851 32360 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:27:51.921857 32360 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:27:51.921864 32360 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:27:51.921869 32360 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:27:51.921875 32360 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:27:51.921880 32360 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:27:51.921886 32360 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:27:51.921896 32360 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:27:51.921903 32360 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:27:51.921910 32360 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:27:51.921916 32360 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:27:51.921921 32360 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:27:51.921926 32360 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:27:51.921932 32360 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:27:51.921937 32360 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:27:51.921942 32360 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:27:51.921948 32360 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:27:51.921954 32360 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:27:51.921960 32360 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:27:51.921965 32360 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:27:51.921972 32360 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:27:51.921977 32360 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:27:51.921983 32360 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:27:51.921988 32360 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:27:51.921993 32360 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:27:51.921999 32360 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:27:51.922004 32360 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:27:51.922010 32360 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:27:51.922016 32360 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:27:51.922022 32360 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:27:51.922029 32360 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:27:51.922034 32360 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:27:51.922039 32360 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:27:51.922045 32360 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:27:51.922051 32360 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:27:51.922056 32360 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:27:51.922062 32360 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:27:51.922068 32360 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:27:51.922073 32360 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:27:51.922080 32360 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:27:51.922086 32360 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:27:51.922091 32360 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:27:51.922097 32360 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:27:51.922102 32360 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:27:51.922108 32360 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:27:51.922114 32360 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:27:51.922119 32360 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:27:51.922125 32360 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:27:51.922132 32360 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:27:51.922137 32360 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:27:51.922165 32360 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:27:51.922173 32360 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:27:51.922179 32360 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:27:51.922185 32360 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:27:51.922191 32360 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:27:51.922204 32360 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:27:51.922209 32360 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:27:51.922215 32360 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:27:51.922221 32360 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:27:51.922227 32360 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:27:51.922236 32360 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:27:51.922242 32360 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:27:51.922247 32360 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:27:51.922253 32360 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:27:51.922264 32360 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:27:51.922271 32360 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:27:51.922277 32360 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:27:51.922283 32360 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:27:51.922289 32360 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:27:51.922296 32360 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:27:51.922302 32360 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:27:51.922307 32360 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:27:51.922313 32360 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:27:51.922319 32360 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:27:51.922324 32360 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:27:51.922330 32360 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:27:51.922335 32360 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:27:51.922341 32360 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:27:51.922348 32360 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:27:51.922353 32360 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:27:51.922359 32360 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:27:51.922365 32360 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:27:51.922370 32360 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:27:51.922376 32360 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:27:51.922381 32360 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:27:51.922387 32360 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:27:51.922394 32360 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:27:51.922399 32360 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:27:51.922405 32360 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:27:51.922410 32360 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:27:51.922417 32360 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:27:51.922423 32360 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:27:51.922428 32360 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:27:51.922435 32360 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:27:51.922441 32360 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:27:51.922446 32360 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:27:51.922451 32360 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:27:51.922457 32360 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:27:51.922463 32360 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:27:51.922469 32360 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:27:51.922477 32360 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:27:51.922483 32360 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:27:51.922495 32360 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:27:51.922502 32360 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:27:51.922508 32360 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:27:51.922513 32360 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:27:51.922519 32360 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:27:51.922525 32360 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:27:51.922531 32360 net.cpp:226] pre_relu needs backward computation.\nI0821 08:27:51.922536 32360 net.cpp:226] pre_scale needs backward computation.\nI0821 08:27:51.922541 32360 net.cpp:226] pre_bn needs backward computation.\nI0821 08:27:51.922546 32360 net.cpp:226] pre_conv needs backward computation.\nI0821 08:27:51.922554 32360 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:27:51.922560 32360 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:27:51.922564 32360 net.cpp:270] This network produces output accuracy\nI0821 08:27:51.922571 32360 net.cpp:270] This network produces output loss\nI0821 08:27:51.922906 32360 net.cpp:283] Network initialization done.\nI0821 08:27:51.923902 32360 solver.cpp:60] Solver scaffolding done.\nI0821 08:27:52.148077 32360 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 08:27:52.511056 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:52.511134 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:52.518121 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:52.737201 32360 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:52.737301 32360 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:52.771096 32360 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:52.771179 32360 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:53.219846 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:53.219914 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:53.227980 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:53.469507 32360 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:53.469614 32360 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:53.520604 32360 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:53.520707 32360 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:54.032121 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:54.032173 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:54.041913 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:54.315611 32360 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:54.315750 32360 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:54.386675 32360 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:54.386808 32360 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:54.470018 32360 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 08:27:54.954793 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:54.954846 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:54.964459 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:55.249929 32360 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:55.250082 32360 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:55.340512 32360 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:55.340662 32360 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:55.984622 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:55.984705 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:55.995322 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:56.306852 32360 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:56.307075 32360 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:56.419862 32360 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:56.420073 32360 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:57.138219 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:57.138278 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:57.149545 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:57.481472 32360 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:57.481675 32360 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:57.613543 32360 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:57.613746 32360 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:58.400008 32360 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:58.400085 32360 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:58.412434 32360 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:58.555862 32387 blocking_queue.cpp:50] Waiting for data\nI0821 08:27:58.702934 32374 blocking_queue.cpp:50] Waiting for data\nI0821 08:27:58.829721 32360 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:58.829962 32360 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:58.982961 32360 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:58.983207 32360 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:59.156105 32360 parallel.cpp:425] Starting Optimization\nI0821 08:27:59.157392 32360 solver.cpp:279] Solving Cifar-Resnet\nI0821 08:27:59.157407 32360 solver.cpp:280] Learning Rate Policy: multistep\nI0821 08:27:59.162693 32360 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 08:29:19.954583 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 08:29:19.954933 32360 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 08:29:23.945814 32360 solver.cpp:228] Iteration 0, loss = 3.94191\nI0821 08:29:23.945868 32360 solver.cpp:244]     Train net output #0: accuracy = 0.144\nI0821 08:29:23.945884 32360 solver.cpp:244]     Train net output #1: loss = 3.94191 (* 1 = 3.94191 loss)\nI0821 08:29:23.946095 32360 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0821 08:31:41.427881 32360 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 08:33:01.640352 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10836\nI0821 08:33:01.640661 32360 solver.cpp:404]     Test net output #1: loss = 5.9275 (* 1 = 5.9275 loss)\nI0821 08:33:02.926869 32360 solver.cpp:228] Iteration 100, loss = 2.29587\nI0821 08:33:02.926933 32360 solver.cpp:244]     Train net output #0: accuracy = 0.144\nI0821 08:33:02.926950 32360 solver.cpp:244]     Train net output #1: loss = 2.29587 (* 1 = 2.29587 loss)\nI0821 08:33:03.057564 32360 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0821 08:35:20.283113 32360 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 08:36:41.215878 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10152\nI0821 08:36:41.216189 32360 solver.cpp:404]     Test net output #1: loss = 2.30353 (* 1 = 2.30353 loss)\nI0821 08:36:42.515341 32360 solver.cpp:228] Iteration 200, loss = 2.29096\nI0821 08:36:42.515386 32360 solver.cpp:244]     Train net output #0: accuracy = 0.152\nI0821 08:36:42.515403 32360 solver.cpp:244]     Train net output #1: loss = 2.29096 (* 1 = 2.29096 loss)\nI0821 08:36:42.626368 32360 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0821 08:38:59.832490 32360 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 08:40:21.583380 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10032\nI0821 08:40:21.583689 32360 solver.cpp:404]     Test net output #1: loss = 2.30292 (* 1 = 2.30292 loss)\nI0821 08:40:22.897159 32360 solver.cpp:228] Iteration 300, loss = 2.28801\nI0821 08:40:22.897204 32360 solver.cpp:244]     Train net output #0: accuracy = 0.152\nI0821 08:40:22.897220 32360 solver.cpp:244]     Train net output #1: loss = 2.28801 (* 1 = 2.28801 loss)\nI0821 08:40:22.995241 32360 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0821 08:42:40.196913 32360 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 08:44:01.947881 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0821 08:44:01.948182 32360 solver.cpp:404]     Test net output #1: loss = 2.30312 (* 1 = 2.30312 loss)\nI0821 08:44:03.260746 32360 solver.cpp:228] Iteration 400, loss = 2.28582\nI0821 08:44:03.260788 32360 solver.cpp:244]     Train net output #0: accuracy = 0.152\nI0821 08:44:03.260805 32360 solver.cpp:244]     Train net output #1: loss = 2.28582 (* 1 = 2.28582 loss)\nI0821 08:44:03.353302 32360 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0821 08:46:20.578107 32360 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 08:47:42.326465 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0821 08:47:42.326750 32360 solver.cpp:404]     Test net output #1: loss = 2.30368 (* 1 = 2.30368 loss)\nI0821 08:47:43.639232 32360 solver.cpp:228] Iteration 500, loss = 2.26627\nI0821 08:47:43.639273 32360 solver.cpp:244]     Train net output #0: accuracy = 0.096\nI0821 08:47:43.639291 32360 solver.cpp:244]     Train net output #1: loss = 2.26627 (* 1 = 2.26627 loss)\nI0821 08:47:43.736188 32360 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0821 08:50:01.066844 32360 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 08:51:22.803040 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15424\nI0821 08:51:22.803325 32360 solver.cpp:404]     Test net output #1: loss = 2.28918 (* 1 = 2.28918 loss)\nI0821 08:51:24.115557 32360 solver.cpp:228] Iteration 600, loss = 2.07891\nI0821 08:51:24.115600 32360 solver.cpp:244]     Train net output #0: accuracy = 0.288\nI0821 08:51:24.115617 32360 solver.cpp:244]     Train net output #1: loss = 2.07891 (* 1 = 2.07891 loss)\nI0821 08:51:24.209257 32360 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0821 08:53:41.401742 32360 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 08:55:03.152401 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1342\nI0821 08:55:03.152695 32360 solver.cpp:404]     Test net output #1: loss = 3.34108 (* 1 = 3.34108 loss)\nI0821 08:55:04.463850 32360 solver.cpp:228] Iteration 700, loss = 1.55564\nI0821 08:55:04.463892 32360 solver.cpp:244]     Train net output #0: accuracy = 0.392\nI0821 08:55:04.463909 32360 solver.cpp:244]     Train net output #1: loss = 1.55564 (* 1 = 1.55564 loss)\nI0821 08:55:04.567679 32360 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0821 08:57:21.904178 32360 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 08:58:43.656796 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0821 08:58:43.657112 32360 solver.cpp:404]     Test net output #1: loss = 4.4991 (* 1 = 4.4991 loss)\nI0821 08:58:44.969976 32360 solver.cpp:228] Iteration 800, loss = 1.38611\nI0821 08:58:44.970017 32360 solver.cpp:244]     Train net output #0: accuracy = 0.464\nI0821 08:58:44.970034 32360 solver.cpp:244]     Train net output #1: loss = 1.38611 (* 1 = 1.38611 loss)\nI0821 08:58:45.065884 32360 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0821 09:01:02.288043 32360 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 09:02:24.033716 32360 solver.cpp:404]     Test net output #0: accuracy = 0.105\nI0821 09:02:24.034019 32360 solver.cpp:404]     Test net output #1: loss = 3.72036 (* 1 = 3.72036 loss)\nI0821 09:02:25.347075 32360 solver.cpp:228] Iteration 900, loss = 1.15854\nI0821 09:02:25.347118 32360 solver.cpp:244]     Train net output #0: accuracy = 0.608\nI0821 09:02:25.347136 32360 solver.cpp:244]     Train net output #1: loss = 1.15854 (* 1 = 1.15854 loss)\nI0821 09:02:25.445336 32360 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0821 09:04:42.691723 32360 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 09:06:04.453312 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1114\nI0821 09:06:04.453596 32360 solver.cpp:404]     Test net output #1: loss = 3.57103 (* 1 = 3.57103 loss)\nI0821 09:06:05.766659 32360 solver.cpp:228] Iteration 1000, loss = 1.14014\nI0821 09:06:05.766707 32360 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI0821 09:06:05.766724 32360 solver.cpp:244]     Train net output #1: loss = 1.14014 (* 1 = 1.14014 loss)\nI0821 09:06:05.866422 32360 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0821 09:08:23.190847 32360 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 09:09:44.957829 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10228\nI0821 09:09:44.958112 32360 solver.cpp:404]     Test net output #1: loss = 5.54667 (* 1 = 5.54667 loss)\nI0821 09:09:46.270365 32360 solver.cpp:228] Iteration 1100, loss = 0.833796\nI0821 09:09:46.270407 32360 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0821 09:09:46.270423 32360 solver.cpp:244]     Train net output #1: loss = 0.833796 (* 1 = 0.833796 loss)\nI0821 09:09:46.367156 32360 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0821 09:12:03.621296 32360 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 09:13:25.383757 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09992\nI0821 09:13:25.384037 32360 solver.cpp:404]     Test net output #1: loss = 12.1631 (* 1 = 12.1631 loss)\nI0821 09:13:26.696981 32360 solver.cpp:228] Iteration 1200, loss = 0.643556\nI0821 09:13:26.697023 32360 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0821 09:13:26.697039 32360 solver.cpp:244]     Train net output #1: loss = 0.643556 (* 1 = 0.643556 loss)\nI0821 09:13:26.802160 32360 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0821 09:15:44.009905 32360 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 09:17:05.782634 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10316\nI0821 09:17:05.782925 32360 solver.cpp:404]     Test net output #1: loss = 11.8655 (* 1 = 11.8655 loss)\nI0821 09:17:07.096088 32360 solver.cpp:228] Iteration 1300, loss = 0.475068\nI0821 09:17:07.096132 32360 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 09:17:07.096148 32360 solver.cpp:244]     Train net output #1: loss = 0.475068 (* 1 = 0.475068 loss)\nI0821 09:17:07.188673 32360 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0821 09:19:24.423725 32360 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 09:20:46.195370 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10952\nI0821 09:20:46.195672 32360 solver.cpp:404]     Test net output #1: loss = 15.3276 (* 1 = 15.3276 loss)\nI0821 09:20:47.508751 32360 solver.cpp:228] Iteration 1400, loss = 0.463062\nI0821 09:20:47.508795 32360 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 09:20:47.508810 32360 solver.cpp:244]     Train net output #1: loss = 0.463062 (* 1 = 0.463062 loss)\nI0821 09:20:47.601647 32360 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0821 09:23:04.775037 32360 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 09:24:26.462304 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1154\nI0821 09:24:26.462606 32360 solver.cpp:404]     Test net output #1: loss = 17.0091 (* 1 = 17.0091 loss)\nI0821 09:24:27.775517 32360 solver.cpp:228] Iteration 1500, loss = 0.21164\nI0821 09:24:27.775559 32360 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 09:24:27.775575 32360 solver.cpp:244]     Train net output #1: loss = 0.21164 (* 1 = 0.21164 loss)\nI0821 09:24:27.868217 32360 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0821 09:26:45.135124 32360 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 09:28:06.884749 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1134\nI0821 09:28:06.884990 32360 solver.cpp:404]     Test net output #1: loss = 20.2292 (* 1 = 20.2292 loss)\nI0821 09:28:08.196221 32360 solver.cpp:228] Iteration 1600, loss = 0.232995\nI0821 09:28:08.196265 32360 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 09:28:08.196281 32360 solver.cpp:244]     Train net output #1: loss = 0.232995 (* 1 = 0.232995 loss)\nI0821 09:28:08.293714 32360 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0821 09:30:25.524297 32360 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 09:31:46.842777 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13012\nI0821 09:31:46.842996 32360 solver.cpp:404]     Test net output #1: loss = 18.6074 (* 1 = 18.6074 loss)\nI0821 09:31:48.154855 32360 solver.cpp:228] Iteration 1700, loss = 0.0778886\nI0821 09:31:48.154899 32360 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 09:31:48.154916 32360 solver.cpp:244]     Train net output #1: loss = 0.0778886 (* 1 = 0.0778886 loss)\nI0821 09:31:48.261575 32360 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0821 09:34:05.505062 32360 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 09:35:26.785735 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1134\nI0821 09:35:26.785975 32360 solver.cpp:404]     Test net output #1: loss = 17.1034 (* 1 = 17.1034 loss)\nI0821 09:35:28.098270 32360 solver.cpp:228] Iteration 1800, loss = 0.081717\nI0821 09:35:28.098318 32360 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 09:35:28.098335 32360 solver.cpp:244]     Train net output #1: loss = 0.081717 (* 1 = 0.081717 loss)\nI0821 09:35:28.200466 32360 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0821 09:37:44.845333 32360 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 09:39:06.602473 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1104\nI0821 09:39:06.602708 32360 solver.cpp:404]     Test net output #1: loss = 16.1833 (* 1 = 16.1833 loss)\nI0821 09:39:07.915598 32360 solver.cpp:228] Iteration 1900, loss = 0.00180483\nI0821 09:39:07.915645 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:39:07.915668 32360 solver.cpp:244]     Train net output #1: loss = 0.00180482 (* 1 = 0.00180482 loss)\nI0821 09:39:08.012986 32360 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0821 09:41:25.197171 32360 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 09:42:46.862958 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09936\nI0821 09:42:46.863243 32360 solver.cpp:404]     Test net output #1: loss = 19.8354 (* 1 = 19.8354 loss)\nI0821 09:42:48.175158 32360 solver.cpp:228] Iteration 2000, loss = 0.000605187\nI0821 09:42:48.175204 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:42:48.175227 32360 solver.cpp:244]     Train net output #1: loss = 0.000605175 (* 1 = 0.000605175 loss)\nI0821 09:42:48.273778 32360 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0821 09:45:05.503172 32360 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 09:46:27.082785 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10056\nI0821 09:46:27.083008 32360 solver.cpp:404]     Test net output #1: loss = 18.6347 (* 1 = 18.6347 loss)\nI0821 09:46:28.395236 32360 solver.cpp:228] Iteration 2100, loss = 0.00064754\nI0821 09:46:28.395277 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:46:28.395292 32360 solver.cpp:244]     Train net output #1: loss = 0.000647527 (* 1 = 0.000647527 loss)\nI0821 09:46:28.495519 32360 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0821 09:48:45.799535 32360 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 09:50:07.284950 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0821 09:50:07.285173 32360 solver.cpp:404]     Test net output #1: loss = 17.0737 (* 1 = 17.0737 loss)\nI0821 09:50:08.596108 32360 solver.cpp:228] Iteration 2200, loss = 0.00054966\nI0821 09:50:08.596154 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:50:08.596176 32360 solver.cpp:244]     Train net output #1: loss = 0.000549647 (* 1 = 0.000549647 loss)\nI0821 09:50:08.693583 32360 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0821 09:52:26.001430 32360 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 09:53:47.269316 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1006\nI0821 09:53:47.269526 32360 solver.cpp:404]     Test net output #1: loss = 15.3965 (* 1 = 15.3965 loss)\nI0821 09:53:48.581286 32360 solver.cpp:228] Iteration 2300, loss = 0.000579643\nI0821 09:53:48.581331 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:53:48.581353 32360 solver.cpp:244]     Train net output #1: loss = 0.000579631 (* 1 = 0.000579631 loss)\nI0821 09:53:48.683017 32360 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0821 09:56:05.904752 32360 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 09:57:27.501554 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 09:57:27.501780 32360 solver.cpp:404]     Test net output #1: loss = 14.129 (* 1 = 14.129 loss)\nI0821 09:57:28.813364 32360 solver.cpp:228] Iteration 2400, loss = 0.000491342\nI0821 09:57:28.813417 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 09:57:28.813444 32360 solver.cpp:244]     Train net output #1: loss = 0.00049133 (* 1 = 0.00049133 loss)\nI0821 09:57:28.917860 32360 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0821 09:59:46.078021 32360 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 10:01:07.776366 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 10:01:07.776629 32360 solver.cpp:404]     Test net output #1: loss = 13.1551 (* 1 = 13.1551 loss)\nI0821 10:01:09.089674 32360 solver.cpp:228] Iteration 2500, loss = 0.000593367\nI0821 10:01:09.089735 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:01:09.089759 32360 solver.cpp:244]     Train net output #1: loss = 0.000593354 (* 1 = 0.000593354 loss)\nI0821 10:01:09.193086 32360 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0821 10:03:26.479987 32360 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 10:04:48.136286 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:04:48.136523 32360 solver.cpp:404]     Test net output #1: loss = 12.3307 (* 1 = 12.3307 loss)\nI0821 10:04:49.449179 32360 solver.cpp:228] Iteration 2600, loss = 0.000652651\nI0821 10:04:49.449234 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:04:49.449251 32360 solver.cpp:244]     Train net output #1: loss = 0.000652638 (* 1 = 0.000652638 loss)\nI0821 10:04:49.544791 32360 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0821 10:07:06.722995 32360 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 10:08:28.503489 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 10:08:28.503746 32360 solver.cpp:404]     Test net output #1: loss = 11.684 (* 1 = 11.684 loss)\nI0821 10:08:29.815927 32360 solver.cpp:228] Iteration 2700, loss = 0.000654185\nI0821 10:08:29.815979 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:08:29.815996 32360 solver.cpp:244]     Train net output #1: loss = 0.000654172 (* 1 = 0.000654172 loss)\nI0821 10:08:29.918434 32360 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0821 10:10:47.142746 32360 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 10:12:08.932366 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:12:08.932649 32360 solver.cpp:404]     Test net output #1: loss = 11.0747 (* 1 = 11.0747 loss)\nI0821 10:12:10.245820 32360 solver.cpp:228] Iteration 2800, loss = 0.000564197\nI0821 10:12:10.245877 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:12:10.245903 32360 solver.cpp:244]     Train net output #1: loss = 0.000564185 (* 1 = 0.000564185 loss)\nI0821 10:12:10.344524 32360 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0821 10:14:27.550038 32360 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 10:15:49.326918 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 10:15:49.327186 32360 solver.cpp:404]     Test net output #1: loss = 10.2823 (* 1 = 10.2823 loss)\nI0821 10:15:50.639052 32360 solver.cpp:228] Iteration 2900, loss = 0.000583241\nI0821 10:15:50.639106 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:15:50.639123 32360 solver.cpp:244]     Train net output #1: loss = 0.000583228 (* 1 = 0.000583228 loss)\nI0821 10:15:50.742419 32360 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0821 10:18:07.981989 32360 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 10:19:29.746807 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:19:29.747071 32360 solver.cpp:404]     Test net output #1: loss = 9.66207 (* 1 = 9.66207 loss)\nI0821 10:19:31.058709 32360 solver.cpp:228] Iteration 3000, loss = 0.000621845\nI0821 10:19:31.058763 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:19:31.058780 32360 solver.cpp:244]     Train net output #1: loss = 0.000621832 (* 1 = 0.000621832 loss)\nI0821 10:19:31.160878 32360 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0821 10:21:48.265239 32360 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 10:23:10.018429 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 10:23:10.018738 32360 solver.cpp:404]     Test net output #1: loss = 8.96986 (* 1 = 8.96986 loss)\nI0821 10:23:11.330404 32360 solver.cpp:228] Iteration 3100, loss = 0.000588012\nI0821 10:23:11.330461 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:23:11.330479 32360 solver.cpp:244]     Train net output #1: loss = 0.000587999 (* 1 = 0.000587999 loss)\nI0821 10:23:11.433043 32360 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0821 10:25:28.645795 32360 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 10:26:50.431254 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:26:50.431520 32360 solver.cpp:404]     Test net output #1: loss = 8.24183 (* 1 = 8.24183 loss)\nI0821 10:26:51.743793 32360 solver.cpp:228] Iteration 3200, loss = 0.000562807\nI0821 10:26:51.743836 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:26:51.743852 32360 solver.cpp:244]     Train net output #1: loss = 0.000562794 (* 1 = 0.000562794 loss)\nI0821 10:26:51.843654 32360 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0821 10:29:09.008011 32360 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 10:30:30.788530 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 10:30:30.788822 32360 solver.cpp:404]     Test net output #1: loss = 7.55872 (* 1 = 7.55872 loss)\nI0821 10:30:32.101074 32360 solver.cpp:228] Iteration 3300, loss = 0.000593226\nI0821 10:30:32.101127 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:30:32.101145 32360 solver.cpp:244]     Train net output #1: loss = 0.000593214 (* 1 = 0.000593214 loss)\nI0821 10:30:32.194456 32360 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0821 10:32:49.368222 32360 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 10:34:11.139235 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:34:11.139514 32360 solver.cpp:404]     Test net output #1: loss = 7.01164 (* 1 = 7.01164 loss)\nI0821 10:34:12.451660 32360 solver.cpp:228] Iteration 3400, loss = 0.000587138\nI0821 10:34:12.451714 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:34:12.451731 32360 solver.cpp:244]     Train net output #1: loss = 0.000587126 (* 1 = 0.000587126 loss)\nI0821 10:34:12.550185 32360 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0821 10:36:29.750143 32360 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 10:37:51.513690 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 10:37:51.513978 32360 solver.cpp:404]     Test net output #1: loss = 6.33187 (* 1 = 6.33187 loss)\nI0821 10:37:52.826361 32360 solver.cpp:228] Iteration 3500, loss = 0.000590136\nI0821 10:37:52.826416 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:37:52.826432 32360 solver.cpp:244]     Train net output #1: loss = 0.000590124 (* 1 = 0.000590124 loss)\nI0821 10:37:52.930018 32360 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0821 10:40:10.141216 32360 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 10:41:31.920691 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:41:31.920971 32360 solver.cpp:404]     Test net output #1: loss = 5.71822 (* 1 = 5.71822 loss)\nI0821 10:41:33.234308 32360 solver.cpp:228] Iteration 3600, loss = 0.00055416\nI0821 10:41:33.234359 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:41:33.234376 32360 solver.cpp:244]     Train net output #1: loss = 0.000554147 (* 1 = 0.000554147 loss)\nI0821 10:41:33.338461 32360 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0821 10:43:50.575711 32360 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 10:45:12.337855 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 10:45:12.338160 32360 solver.cpp:404]     Test net output #1: loss = 5.13938 (* 1 = 5.13938 loss)\nI0821 10:45:13.650734 32360 solver.cpp:228] Iteration 3700, loss = 0.000544798\nI0821 10:45:13.650787 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:45:13.650804 32360 solver.cpp:244]     Train net output #1: loss = 0.000544785 (* 1 = 0.000544785 loss)\nI0821 10:45:13.744117 32360 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0821 10:47:31.064023 32360 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 10:48:52.837765 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:48:52.838049 32360 solver.cpp:404]     Test net output #1: loss = 4.63466 (* 1 = 4.63466 loss)\nI0821 10:48:54.150632 32360 solver.cpp:228] Iteration 3800, loss = 0.000490138\nI0821 10:48:54.150684 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:48:54.150701 32360 solver.cpp:244]     Train net output #1: loss = 0.000490125 (* 1 = 0.000490125 loss)\nI0821 10:48:54.256847 32360 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0821 10:51:11.484513 32360 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 10:52:33.256727 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 10:52:33.257016 32360 solver.cpp:404]     Test net output #1: loss = 4.12901 (* 1 = 4.12901 loss)\nI0821 10:52:34.569711 32360 solver.cpp:228] Iteration 3900, loss = 0.000527372\nI0821 10:52:34.569762 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:52:34.569779 32360 solver.cpp:244]     Train net output #1: loss = 0.000527359 (* 1 = 0.000527359 loss)\nI0821 10:52:34.671669 32360 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0821 10:54:52.036386 32360 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 10:56:13.813184 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 10:56:13.813493 32360 solver.cpp:404]     Test net output #1: loss = 3.75419 (* 1 = 3.75419 loss)\nI0821 10:56:15.126324 32360 solver.cpp:228] Iteration 4000, loss = 0.000489743\nI0821 10:56:15.126377 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:56:15.126394 32360 solver.cpp:244]     Train net output #1: loss = 0.00048973 (* 1 = 0.00048973 loss)\nI0821 10:56:15.226863 32360 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0821 10:58:32.401350 32360 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 10:59:54.188670 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10032\nI0821 10:59:54.188976 32360 solver.cpp:404]     Test net output #1: loss = 3.45531 (* 1 = 3.45531 loss)\nI0821 10:59:55.501471 32360 solver.cpp:228] Iteration 4100, loss = 0.000430097\nI0821 10:59:55.501523 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 10:59:55.501540 32360 solver.cpp:244]     Train net output #1: loss = 0.000430084 (* 1 = 0.000430084 loss)\nI0821 10:59:55.596410 32360 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0821 11:02:12.789294 32360 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 11:03:34.556638 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 11:03:34.556919 32360 solver.cpp:404]     Test net output #1: loss = 3.23238 (* 1 = 3.23238 loss)\nI0821 11:03:35.869573 32360 solver.cpp:228] Iteration 4200, loss = 0.000430215\nI0821 11:03:35.869628 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:03:35.869644 32360 solver.cpp:244]     Train net output #1: loss = 0.000430202 (* 1 = 0.000430202 loss)\nI0821 11:03:35.966843 32360 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0821 11:05:53.146549 32360 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 11:07:14.913147 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10224\nI0821 11:07:14.913463 32360 solver.cpp:404]     Test net output #1: loss = 3.05839 (* 1 = 3.05839 loss)\nI0821 11:07:16.226153 32360 solver.cpp:228] Iteration 4300, loss = 0.000432432\nI0821 11:07:16.226208 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:07:16.226224 32360 solver.cpp:244]     Train net output #1: loss = 0.000432419 (* 1 = 0.000432419 loss)\nI0821 11:07:16.328590 32360 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0821 11:09:33.556717 32360 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 11:10:55.321686 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09868\nI0821 11:10:55.321964 32360 solver.cpp:404]     Test net output #1: loss = 2.93393 (* 1 = 2.93393 loss)\nI0821 11:10:56.633059 32360 solver.cpp:228] Iteration 4400, loss = 0.00040372\nI0821 11:10:56.633112 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:10:56.633128 32360 solver.cpp:244]     Train net output #1: loss = 0.000403707 (* 1 = 0.000403707 loss)\nI0821 11:10:56.736666 32360 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0821 11:13:13.956672 32360 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 11:14:35.745365 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09192\nI0821 11:14:35.745648 32360 solver.cpp:404]     Test net output #1: loss = 2.82963 (* 1 = 2.82963 loss)\nI0821 11:14:37.058240 32360 solver.cpp:228] Iteration 4500, loss = 0.00034874\nI0821 11:14:37.058295 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:14:37.058311 32360 solver.cpp:244]     Train net output #1: loss = 0.000348727 (* 1 = 0.000348727 loss)\nI0821 11:14:37.166456 32360 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0821 11:16:54.428388 32360 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 11:18:16.184010 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0828\nI0821 11:18:16.184319 32360 solver.cpp:404]     Test net output #1: loss = 2.76452 (* 1 = 2.76452 loss)\nI0821 11:18:17.495687 32360 solver.cpp:228] Iteration 4600, loss = 0.000350633\nI0821 11:18:17.495731 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:18:17.495746 32360 solver.cpp:244]     Train net output #1: loss = 0.000350621 (* 1 = 0.000350621 loss)\nI0821 11:18:17.599225 32360 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0821 11:20:34.686801 32360 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 11:21:56.472538 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09692\nI0821 11:21:56.472851 32360 solver.cpp:404]     Test net output #1: loss = 2.71094 (* 1 = 2.71094 loss)\nI0821 11:21:57.786178 32360 solver.cpp:228] Iteration 4700, loss = 0.000351378\nI0821 11:21:57.786232 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:21:57.786247 32360 solver.cpp:244]     Train net output #1: loss = 0.000351366 (* 1 = 0.000351366 loss)\nI0821 11:21:57.889497 32360 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0821 11:24:15.091022 32360 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 11:25:36.864787 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10244\nI0821 11:25:36.865090 32360 solver.cpp:404]     Test net output #1: loss = 2.6748 (* 1 = 2.6748 loss)\nI0821 11:25:38.176609 32360 solver.cpp:228] Iteration 4800, loss = 0.000341459\nI0821 11:25:38.176653 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:25:38.176669 32360 solver.cpp:244]     Train net output #1: loss = 0.000341447 (* 1 = 0.000341447 loss)\nI0821 11:25:38.273041 32360 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0821 11:27:55.545482 32360 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 11:29:17.438063 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09992\nI0821 11:29:17.438374 32360 solver.cpp:404]     Test net output #1: loss = 2.6428 (* 1 = 2.6428 loss)\nI0821 11:29:18.750808 32360 solver.cpp:228] Iteration 4900, loss = 0.00032282\nI0821 11:29:18.750852 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:29:18.750869 32360 solver.cpp:244]     Train net output #1: loss = 0.000322808 (* 1 = 0.000322808 loss)\nI0821 11:29:18.846788 32360 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0821 11:31:36.038853 32360 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 11:32:57.958248 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09784\nI0821 11:32:57.958539 32360 solver.cpp:404]     Test net output #1: loss = 2.6139 (* 1 = 2.6139 loss)\nI0821 11:32:59.271351 32360 solver.cpp:228] Iteration 5000, loss = 0.000333732\nI0821 11:32:59.271404 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:32:59.271420 32360 solver.cpp:244]     Train net output #1: loss = 0.000333719 (* 1 = 0.000333719 loss)\nI0821 11:32:59.372890 32360 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0821 11:35:16.481942 32360 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 11:36:38.380156 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09656\nI0821 11:36:38.380467 32360 solver.cpp:404]     Test net output #1: loss = 2.58362 (* 1 = 2.58362 loss)\nI0821 11:36:39.693356 32360 solver.cpp:228] Iteration 5100, loss = 0.000327748\nI0821 11:36:39.693410 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:36:39.693428 32360 solver.cpp:244]     Train net output #1: loss = 0.000327736 (* 1 = 0.000327736 loss)\nI0821 11:36:39.792790 32360 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0821 11:38:56.989209 32360 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 11:40:18.858201 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09724\nI0821 11:40:18.858495 32360 solver.cpp:404]     Test net output #1: loss = 2.56066 (* 1 = 2.56066 loss)\nI0821 11:40:20.171636 32360 solver.cpp:228] Iteration 5200, loss = 0.00029613\nI0821 11:40:20.171692 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:40:20.171710 32360 solver.cpp:244]     Train net output #1: loss = 0.000296117 (* 1 = 0.000296117 loss)\nI0821 11:40:20.267988 32360 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0821 11:42:37.452191 32360 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 11:43:59.237536 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09796\nI0821 11:43:59.237830 32360 solver.cpp:404]     Test net output #1: loss = 2.53582 (* 1 = 2.53582 loss)\nI0821 11:44:00.550415 32360 solver.cpp:228] Iteration 5300, loss = 0.0002873\nI0821 11:44:00.550468 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:44:00.550485 32360 solver.cpp:244]     Train net output #1: loss = 0.000287287 (* 1 = 0.000287287 loss)\nI0821 11:44:00.650550 32360 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0821 11:46:17.252871 32360 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 11:47:38.577237 32360 solver.cpp:404]     Test net output #0: accuracy = 0.099\nI0821 11:47:38.577498 32360 solver.cpp:404]     Test net output #1: loss = 2.51591 (* 1 = 2.51591 loss)\nI0821 11:47:39.889693 32360 solver.cpp:228] Iteration 5400, loss = 0.000324613\nI0821 11:47:39.889747 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:47:39.889763 32360 solver.cpp:244]     Train net output #1: loss = 0.000324601 (* 1 = 0.000324601 loss)\nI0821 11:47:39.988137 32360 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0821 11:49:57.225854 32360 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 11:51:18.534482 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10068\nI0821 11:51:18.534732 32360 solver.cpp:404]     Test net output #1: loss = 2.4945 (* 1 = 2.4945 loss)\nI0821 11:51:19.847385 32360 solver.cpp:228] Iteration 5500, loss = 0.000280267\nI0821 11:51:19.847440 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:51:19.847455 32360 solver.cpp:244]     Train net output #1: loss = 0.000280254 (* 1 = 0.000280254 loss)\nI0821 11:51:19.943501 32360 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0821 11:53:36.589056 32360 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 11:54:57.994854 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10244\nI0821 11:54:57.995059 32360 solver.cpp:404]     Test net output #1: loss = 2.47787 (* 1 = 2.47787 loss)\nI0821 11:54:59.308202 32360 solver.cpp:228] Iteration 5600, loss = 0.000281407\nI0821 11:54:59.308255 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:54:59.308272 32360 solver.cpp:244]     Train net output #1: loss = 0.000281394 (* 1 = 0.000281394 loss)\nI0821 11:54:59.413110 32360 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0821 11:57:16.672853 32360 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 11:58:38.389125 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1058\nI0821 11:58:38.389358 32360 solver.cpp:404]     Test net output #1: loss = 2.46424 (* 1 = 2.46424 loss)\nI0821 11:58:39.702086 32360 solver.cpp:228] Iteration 5700, loss = 0.000297591\nI0821 11:58:39.702142 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:58:39.702157 32360 solver.cpp:244]     Train net output #1: loss = 0.000297578 (* 1 = 0.000297578 loss)\nI0821 11:58:39.792691 32360 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0821 12:00:57.080924 32360 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 12:02:18.542946 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10932\nI0821 12:02:18.543210 32360 solver.cpp:404]     Test net output #1: loss = 2.45365 (* 1 = 2.45365 loss)\nI0821 12:02:19.854575 32360 solver.cpp:228] Iteration 5800, loss = 0.000293185\nI0821 12:02:19.854620 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:02:19.854636 32360 solver.cpp:244]     Train net output #1: loss = 0.000293172 (* 1 = 0.000293172 loss)\nI0821 12:02:19.955355 32360 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0821 12:04:37.193354 32360 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 12:05:58.979630 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1006\nI0821 12:05:58.979945 32360 solver.cpp:404]     Test net output #1: loss = 2.44654 (* 1 = 2.44654 loss)\nI0821 12:06:00.291442 32360 solver.cpp:228] Iteration 5900, loss = 0.000289064\nI0821 12:06:00.291496 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:06:00.291512 32360 solver.cpp:244]     Train net output #1: loss = 0.000289051 (* 1 = 0.000289051 loss)\nI0821 12:06:00.395241 32360 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0821 12:08:17.735512 32360 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 12:09:39.535993 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10008\nI0821 12:09:39.536279 32360 solver.cpp:404]     Test net output #1: loss = 2.44066 (* 1 = 2.44066 loss)\nI0821 12:09:40.848783 32360 solver.cpp:228] Iteration 6000, loss = 0.000286284\nI0821 12:09:40.848825 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:09:40.848842 32360 solver.cpp:244]     Train net output #1: loss = 0.000286271 (* 1 = 0.000286271 loss)\nI0821 12:09:40.951830 32360 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0821 12:11:58.174870 32360 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 12:13:19.955025 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1004\nI0821 12:13:19.955337 32360 solver.cpp:404]     Test net output #1: loss = 2.43759 (* 1 = 2.43759 loss)\nI0821 12:13:21.267441 32360 solver.cpp:228] Iteration 6100, loss = 0.000296435\nI0821 12:13:21.267495 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:13:21.267510 32360 solver.cpp:244]     Train net output #1: loss = 0.000296423 (* 1 = 0.000296423 loss)\nI0821 12:13:21.371673 32360 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0821 12:15:38.108997 32360 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 12:16:59.894562 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1\nI0821 12:16:59.894876 32360 solver.cpp:404]     Test net output #1: loss = 2.43525 (* 1 = 2.43525 loss)\nI0821 12:17:01.207736 32360 solver.cpp:228] Iteration 6200, loss = 0.000284617\nI0821 12:17:01.207790 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:17:01.207806 32360 solver.cpp:244]     Train net output #1: loss = 0.000284604 (* 1 = 0.000284604 loss)\nI0821 12:17:01.310672 32360 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0821 12:19:18.562547 32360 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 12:20:40.348865 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0821 12:20:40.349160 32360 solver.cpp:404]     Test net output #1: loss = 2.43277 (* 1 = 2.43277 loss)\nI0821 12:20:41.661819 32360 solver.cpp:228] Iteration 6300, loss = 0.0002877\nI0821 12:20:41.661873 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:20:41.661890 32360 solver.cpp:244]     Train net output #1: loss = 0.000287688 (* 1 = 0.000287688 loss)\nI0821 12:20:41.766667 32360 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0821 12:22:58.411439 32360 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 12:24:20.175295 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09996\nI0821 12:24:20.175611 32360 solver.cpp:404]     Test net output #1: loss = 2.43063 (* 1 = 2.43063 loss)\nI0821 12:24:21.487965 32360 solver.cpp:228] Iteration 6400, loss = 0.000288008\nI0821 12:24:21.488010 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:24:21.488025 32360 solver.cpp:244]     Train net output #1: loss = 0.000287996 (* 1 = 0.000287996 loss)\nI0821 12:24:21.587111 32360 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0821 12:26:38.835983 32360 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 12:28:00.608589 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10024\nI0821 12:28:00.608897 32360 solver.cpp:404]     Test net output #1: loss = 2.42996 (* 1 = 2.42996 loss)\nI0821 12:28:01.921926 32360 solver.cpp:228] Iteration 6500, loss = 0.000278196\nI0821 12:28:01.921978 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:28:01.921994 32360 solver.cpp:244]     Train net output #1: loss = 0.000278183 (* 1 = 0.000278183 loss)\nI0821 12:28:02.019516 32360 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0821 12:30:19.255020 32360 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 12:31:41.013113 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 12:31:41.013401 32360 solver.cpp:404]     Test net output #1: loss = 2.42799 (* 1 = 2.42799 loss)\nI0821 12:31:42.325718 32360 solver.cpp:228] Iteration 6600, loss = 0.000295143\nI0821 12:31:42.325773 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:31:42.325788 32360 solver.cpp:244]     Train net output #1: loss = 0.000295131 (* 1 = 0.000295131 loss)\nI0821 12:31:42.424501 32360 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0821 12:33:59.669786 32360 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 12:35:21.431584 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 12:35:21.431898 32360 solver.cpp:404]     Test net output #1: loss = 2.42688 (* 1 = 2.42688 loss)\nI0821 12:35:22.744453 32360 solver.cpp:228] Iteration 6700, loss = 0.00028128\nI0821 12:35:22.744510 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:35:22.744527 32360 solver.cpp:244]     Train net output #1: loss = 0.000281267 (* 1 = 0.000281267 loss)\nI0821 12:35:22.844830 32360 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0821 12:37:40.099313 32360 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 12:39:01.874337 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 12:39:01.874637 32360 solver.cpp:404]     Test net output #1: loss = 2.42216 (* 1 = 2.42216 loss)\nI0821 12:39:03.188030 32360 solver.cpp:228] Iteration 6800, loss = 0.000270404\nI0821 12:39:03.188071 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:39:03.188087 32360 solver.cpp:244]     Train net output #1: loss = 0.000270392 (* 1 = 0.000270392 loss)\nI0821 12:39:03.291188 32360 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0821 12:41:20.530210 32360 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 12:42:42.272439 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 12:42:42.272720 32360 solver.cpp:404]     Test net output #1: loss = 2.42239 (* 1 = 2.42239 loss)\nI0821 12:42:43.585247 32360 solver.cpp:228] Iteration 6900, loss = 0.000268382\nI0821 12:42:43.585299 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:42:43.585315 32360 solver.cpp:244]     Train net output #1: loss = 0.00026837 (* 1 = 0.00026837 loss)\nI0821 12:42:43.690306 32360 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0821 12:45:00.308068 32360 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 12:46:22.073317 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 12:46:22.073649 32360 solver.cpp:404]     Test net output #1: loss = 2.41765 (* 1 = 2.41765 loss)\nI0821 12:46:23.385108 32360 solver.cpp:228] Iteration 7000, loss = 0.000264275\nI0821 12:46:23.385160 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:46:23.385176 32360 solver.cpp:244]     Train net output #1: loss = 0.000264262 (* 1 = 0.000264262 loss)\nI0821 12:46:23.486466 32360 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0821 12:48:40.670661 32360 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 12:50:02.394644 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 12:50:02.394906 32360 solver.cpp:404]     Test net output #1: loss = 2.41415 (* 1 = 2.41415 loss)\nI0821 12:50:03.706498 32360 solver.cpp:228] Iteration 7100, loss = 0.00025941\nI0821 12:50:03.706550 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:50:03.706567 32360 solver.cpp:244]     Train net output #1: loss = 0.000259397 (* 1 = 0.000259397 loss)\nI0821 12:50:03.806030 32360 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0821 12:52:20.524610 32360 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 12:53:41.783840 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 12:53:41.784090 32360 solver.cpp:404]     Test net output #1: loss = 2.41024 (* 1 = 2.41024 loss)\nI0821 12:53:43.094882 32360 solver.cpp:228] Iteration 7200, loss = 0.00026267\nI0821 12:53:43.094934 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:53:43.094950 32360 solver.cpp:244]     Train net output #1: loss = 0.000262657 (* 1 = 0.000262657 loss)\nI0821 12:53:43.198606 32360 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0821 12:55:59.771497 32360 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 12:57:21.195991 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 12:57:21.196244 32360 solver.cpp:404]     Test net output #1: loss = 2.40767 (* 1 = 2.40767 loss)\nI0821 12:57:22.507748 32360 solver.cpp:228] Iteration 7300, loss = 0.000261334\nI0821 12:57:22.507802 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:57:22.507819 32360 solver.cpp:244]     Train net output #1: loss = 0.000261322 (* 1 = 0.000261322 loss)\nI0821 12:57:22.609650 32360 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0821 12:59:39.952505 32360 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 13:01:01.087587 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 13:01:01.087829 32360 solver.cpp:404]     Test net output #1: loss = 2.40411 (* 1 = 2.40411 loss)\nI0821 13:01:02.398392 32360 solver.cpp:228] Iteration 7400, loss = 0.000254004\nI0821 13:01:02.398437 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:01:02.398453 32360 solver.cpp:244]     Train net output #1: loss = 0.000253992 (* 1 = 0.000253992 loss)\nI0821 13:01:02.503595 32360 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0821 13:03:19.074005 32360 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 13:04:40.397706 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 13:04:40.397917 32360 solver.cpp:404]     Test net output #1: loss = 2.40033 (* 1 = 2.40033 loss)\nI0821 13:04:41.709022 32360 solver.cpp:228] Iteration 7500, loss = 0.000249321\nI0821 13:04:41.709076 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:04:41.709092 32360 solver.cpp:244]     Train net output #1: loss = 0.000249309 (* 1 = 0.000249309 loss)\nI0821 13:04:41.811645 32360 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0821 13:06:58.398416 32360 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 13:08:19.630363 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 13:08:19.630579 32360 solver.cpp:404]     Test net output #1: loss = 2.39588 (* 1 = 2.39588 loss)\nI0821 13:08:20.941814 32360 solver.cpp:228] Iteration 7600, loss = 0.00024486\nI0821 13:08:20.941853 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:08:20.941867 32360 solver.cpp:244]     Train net output #1: loss = 0.000244847 (* 1 = 0.000244847 loss)\nI0821 13:08:21.045231 32360 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0821 13:10:38.328850 32360 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 13:11:59.567207 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 13:11:59.567445 32360 solver.cpp:404]     Test net output #1: loss = 2.39312 (* 1 = 2.39312 loss)\nI0821 13:12:00.878854 32360 solver.cpp:228] Iteration 7700, loss = 0.000241693\nI0821 13:12:00.878898 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:12:00.878914 32360 solver.cpp:244]     Train net output #1: loss = 0.00024168 (* 1 = 0.00024168 loss)\nI0821 13:12:00.982219 32360 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0821 13:14:17.678614 32360 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 13:15:39.019924 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 13:15:39.020180 32360 solver.cpp:404]     Test net output #1: loss = 2.3891 (* 1 = 2.3891 loss)\nI0821 13:15:40.331359 32360 solver.cpp:228] Iteration 7800, loss = 0.000233625\nI0821 13:15:40.331405 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:15:40.331421 32360 solver.cpp:244]     Train net output #1: loss = 0.000233613 (* 1 = 0.000233613 loss)\nI0821 13:15:40.441995 32360 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0821 13:17:57.198967 32360 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 13:19:18.594197 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 13:19:18.594413 32360 solver.cpp:404]     Test net output #1: loss = 2.38542 (* 1 = 2.38542 loss)\nI0821 13:19:19.906433 32360 solver.cpp:228] Iteration 7900, loss = 0.000238452\nI0821 13:19:19.906484 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:19:19.906500 32360 solver.cpp:244]     Train net output #1: loss = 0.000238439 (* 1 = 0.000238439 loss)\nI0821 13:19:20.013856 32360 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0821 13:21:37.254212 32360 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 13:22:58.525475 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 13:22:58.525712 32360 solver.cpp:404]     Test net output #1: loss = 2.38208 (* 1 = 2.38208 loss)\nI0821 13:22:59.837057 32360 solver.cpp:228] Iteration 8000, loss = 0.000230567\nI0821 13:22:59.837103 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:22:59.837119 32360 solver.cpp:244]     Train net output #1: loss = 0.000230554 (* 1 = 0.000230554 loss)\nI0821 13:22:59.939352 32360 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0821 13:25:16.613692 32360 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 13:26:37.745920 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 13:26:37.746134 32360 solver.cpp:404]     Test net output #1: loss = 2.38039 (* 1 = 2.38039 loss)\nI0821 13:26:39.057735 32360 solver.cpp:228] Iteration 8100, loss = 0.000230789\nI0821 13:26:39.057777 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:26:39.057792 32360 solver.cpp:244]     Train net output #1: loss = 0.000230776 (* 1 = 0.000230776 loss)\nI0821 13:26:39.161140 32360 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0821 13:28:55.806841 32360 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 13:30:17.196039 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 13:30:17.196254 32360 solver.cpp:404]     Test net output #1: loss = 2.37921 (* 1 = 2.37921 loss)\nI0821 13:30:18.508904 32360 solver.cpp:228] Iteration 8200, loss = 0.000227357\nI0821 13:30:18.508958 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:30:18.508975 32360 solver.cpp:244]     Train net output #1: loss = 0.000227345 (* 1 = 0.000227345 loss)\nI0821 13:30:18.607985 32360 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0821 13:32:35.274132 32360 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 13:33:56.605068 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 13:33:56.605343 32360 solver.cpp:404]     Test net output #1: loss = 2.37848 (* 1 = 2.37848 loss)\nI0821 13:33:57.917708 32360 solver.cpp:228] Iteration 8300, loss = 0.000226377\nI0821 13:33:57.917762 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:33:57.917779 32360 solver.cpp:244]     Train net output #1: loss = 0.000226364 (* 1 = 0.000226364 loss)\nI0821 13:33:58.010527 32360 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0821 13:36:14.720571 32360 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 13:37:35.902669 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0821 13:37:35.902918 32360 solver.cpp:404]     Test net output #1: loss = 78.5854 (* 1 = 78.5854 loss)\nI0821 13:37:37.214418 32360 solver.cpp:228] Iteration 8400, loss = 2.03089\nI0821 13:37:37.214475 32360 solver.cpp:244]     Train net output #0: accuracy = 0.312\nI0821 13:37:37.214493 32360 solver.cpp:244]     Train net output #1: loss = 2.03089 (* 1 = 2.03089 loss)\nI0821 13:37:37.322170 32360 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0821 13:39:53.970355 32360 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 13:41:15.384398 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 13:41:15.384706 32360 solver.cpp:404]     Test net output #1: loss = 78.561 (* 1 = 78.561 loss)\nI0821 13:41:16.696175 32360 solver.cpp:228] Iteration 8500, loss = 1.8704\nI0821 13:41:16.696228 32360 solver.cpp:244]     Train net output #0: accuracy = 0.304\nI0821 13:41:16.696245 32360 solver.cpp:244]     Train net output #1: loss = 1.8704 (* 1 = 1.8704 loss)\nI0821 13:41:16.794893 32360 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0821 13:43:33.463366 32360 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 13:44:54.734467 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 13:44:54.734728 32360 solver.cpp:404]     Test net output #1: loss = 78.6448 (* 1 = 78.6448 loss)\nI0821 13:44:56.045821 32360 solver.cpp:228] Iteration 8600, loss = 1.60092\nI0821 13:44:56.045873 32360 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI0821 13:44:56.045891 32360 solver.cpp:244]     Train net output #1: loss = 1.60092 (* 1 = 1.60092 loss)\nI0821 13:44:56.149303 32360 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0821 13:47:12.778677 32360 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 13:48:34.017346 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0821 13:48:34.017549 32360 solver.cpp:404]     Test net output #1: loss = 78.5715 (* 1 = 78.5715 loss)\nI0821 13:48:35.329193 32360 solver.cpp:228] Iteration 8700, loss = 1.50016\nI0821 13:48:35.329246 32360 solver.cpp:244]     Train net output #0: accuracy = 0.424\nI0821 13:48:35.329264 32360 solver.cpp:244]     Train net output #1: loss = 1.50016 (* 1 = 1.50016 loss)\nI0821 13:48:35.432865 32360 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0821 13:50:52.077141 32360 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 13:52:13.413422 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09472\nI0821 13:52:13.413712 32360 solver.cpp:404]     Test net output #1: loss = 78.0641 (* 1 = 78.0641 loss)\nI0821 13:52:14.724179 32360 solver.cpp:228] Iteration 8800, loss = 1.38467\nI0821 13:52:14.724231 32360 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI0821 13:52:14.724249 32360 solver.cpp:244]     Train net output #1: loss = 1.38467 (* 1 = 1.38467 loss)\nI0821 13:52:14.826134 32360 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0821 13:54:32.138768 32360 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 13:55:53.547554 32360 solver.cpp:404]     Test net output #0: accuracy = 0.099\nI0821 13:55:53.547808 32360 solver.cpp:404]     Test net output #1: loss = 71.3339 (* 1 = 71.3339 loss)\nI0821 13:55:54.858891 32360 solver.cpp:228] Iteration 8900, loss = 1.18262\nI0821 13:55:54.858944 32360 solver.cpp:244]     Train net output #0: accuracy = 0.568\nI0821 13:55:54.858963 32360 solver.cpp:244]     Train net output #1: loss = 1.18262 (* 1 = 1.18262 loss)\nI0821 13:55:54.960115 32360 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0821 13:58:11.585919 32360 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 13:59:33.289631 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16824\nI0821 13:59:33.289924 32360 solver.cpp:404]     Test net output #1: loss = 16.308 (* 1 = 16.308 loss)\nI0821 13:59:34.600896 32360 solver.cpp:228] Iteration 9000, loss = 1.15867\nI0821 13:59:34.600950 32360 solver.cpp:244]     Train net output #0: accuracy = 0.568\nI0821 13:59:34.600967 32360 solver.cpp:244]     Train net output #1: loss = 1.15867 (* 1 = 1.15867 loss)\nI0821 13:59:34.701822 32360 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0821 14:01:52.049787 32360 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 14:03:13.702131 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10844\nI0821 14:03:13.702464 32360 solver.cpp:404]     Test net output #1: loss = 20.6152 (* 1 = 20.6152 loss)\nI0821 14:03:15.014678 32360 solver.cpp:228] Iteration 9100, loss = 1.01146\nI0821 14:03:15.014730 32360 solver.cpp:244]     Train net output #0: accuracy = 0.632\nI0821 14:03:15.014747 32360 solver.cpp:244]     Train net output #1: loss = 1.01146 (* 1 = 1.01146 loss)\nI0821 14:03:15.115486 32360 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0821 14:05:32.454159 32360 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 14:06:54.207957 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15548\nI0821 14:06:54.208253 32360 solver.cpp:404]     Test net output #1: loss = 9.92188 (* 1 = 9.92188 loss)\nI0821 14:06:55.519309 32360 solver.cpp:228] Iteration 9200, loss = 0.834948\nI0821 14:06:55.519362 32360 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0821 14:06:55.519378 32360 solver.cpp:244]     Train net output #1: loss = 0.834948 (* 1 = 0.834948 loss)\nI0821 14:06:55.614255 32360 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0821 14:09:12.897874 32360 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 14:10:34.651104 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14804\nI0821 14:10:34.651376 32360 solver.cpp:404]     Test net output #1: loss = 14.4446 (* 1 = 14.4446 loss)\nI0821 14:10:35.962455 32360 solver.cpp:228] Iteration 9300, loss = 0.716928\nI0821 14:10:35.962513 32360 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0821 14:10:35.962530 32360 solver.cpp:244]     Train net output #1: loss = 0.716928 (* 1 = 0.716928 loss)\nI0821 14:10:36.059033 32360 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0821 14:12:53.314903 32360 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 14:14:15.094857 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16648\nI0821 14:14:15.095156 32360 solver.cpp:404]     Test net output #1: loss = 10.5015 (* 1 = 10.5015 loss)\nI0821 14:14:16.406054 32360 solver.cpp:228] Iteration 9400, loss = 0.642643\nI0821 14:14:16.406106 32360 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0821 14:14:16.406122 32360 solver.cpp:244]     Train net output #1: loss = 0.642643 (* 1 = 0.642643 loss)\nI0821 14:14:16.505053 32360 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0821 14:16:33.738543 32360 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 14:17:55.512398 32360 solver.cpp:404]     Test net output #0: accuracy = 0.19432\nI0821 14:17:55.512675 32360 solver.cpp:404]     Test net output #1: loss = 7.11473 (* 1 = 7.11473 loss)\nI0821 14:17:56.823614 32360 solver.cpp:228] Iteration 9500, loss = 0.470466\nI0821 14:17:56.823669 32360 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 14:17:56.823686 32360 solver.cpp:244]     Train net output #1: loss = 0.470466 (* 1 = 0.470466 loss)\nI0821 14:17:56.923146 32360 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0821 14:20:14.276731 32360 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 14:21:36.050267 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1524\nI0821 14:21:36.050556 32360 solver.cpp:404]     Test net output #1: loss = 18.0983 (* 1 = 18.0983 loss)\nI0821 14:21:37.361755 32360 solver.cpp:228] Iteration 9600, loss = 0.466138\nI0821 14:21:37.361809 32360 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 14:21:37.361825 32360 solver.cpp:244]     Train net output #1: loss = 0.466138 (* 1 = 0.466138 loss)\nI0821 14:21:37.461910 32360 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0821 14:23:54.755512 32360 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 14:25:16.516355 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1808\nI0821 14:25:16.516683 32360 solver.cpp:404]     Test net output #1: loss = 14.0018 (* 1 = 14.0018 loss)\nI0821 14:25:17.827916 32360 solver.cpp:228] Iteration 9700, loss = 0.448566\nI0821 14:25:17.827970 32360 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0821 14:25:17.827985 32360 solver.cpp:244]     Train net output #1: loss = 0.448566 (* 1 = 0.448566 loss)\nI0821 14:25:17.924952 32360 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0821 14:27:34.587502 32360 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 14:28:56.349385 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15964\nI0821 14:28:56.349701 32360 solver.cpp:404]     Test net output #1: loss = 25.0775 (* 1 = 25.0775 loss)\nI0821 14:28:57.661135 32360 solver.cpp:228] Iteration 9800, loss = 0.432155\nI0821 14:28:57.661190 32360 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 14:28:57.661206 32360 solver.cpp:244]     Train net output #1: loss = 0.432155 (* 1 = 0.432155 loss)\nI0821 14:28:57.767509 32360 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0821 14:31:15.138051 32360 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 14:32:36.894472 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15932\nI0821 14:32:36.894773 32360 solver.cpp:404]     Test net output #1: loss = 24.0004 (* 1 = 24.0004 loss)\nI0821 14:32:38.205978 32360 solver.cpp:228] Iteration 9900, loss = 0.582642\nI0821 14:32:38.206032 32360 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0821 14:32:38.206048 32360 solver.cpp:244]     Train net output #1: loss = 0.582642 (* 1 = 0.582642 loss)\nI0821 14:32:38.306638 32360 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0821 14:34:55.582695 32360 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 14:36:17.333288 32360 solver.cpp:404]     Test net output #0: accuracy = 0.21116\nI0821 14:36:17.333586 32360 solver.cpp:404]     Test net output #1: loss = 11.7566 (* 1 = 11.7566 loss)\nI0821 14:36:18.644811 32360 solver.cpp:228] Iteration 10000, loss = 0.276845\nI0821 14:36:18.644863 32360 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 14:36:18.644881 32360 solver.cpp:244]     Train net output #1: loss = 0.276845 (* 1 = 0.276845 loss)\nI0821 14:36:18.740497 32360 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0821 14:38:35.414124 32360 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0821 14:39:57.162888 32360 solver.cpp:404]     Test net output #0: accuracy = 0.21556\nI0821 14:39:57.163164 32360 solver.cpp:404]     Test net output #1: loss = 10.8357 (* 1 = 10.8357 loss)\nI0821 14:39:58.474210 32360 solver.cpp:228] Iteration 10100, loss = 0.24236\nI0821 14:39:58.474262 32360 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 14:39:58.474278 32360 solver.cpp:244]     Train net output #1: loss = 0.24236 (* 1 = 0.24236 loss)\nI0821 14:39:58.575870 32360 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0821 14:42:15.946223 32360 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0821 14:43:37.695714 32360 solver.cpp:404]     Test net output #0: accuracy = 0.21668\nI0821 14:43:37.695997 32360 solver.cpp:404]     Test net output #1: loss = 10.1865 (* 1 = 10.1865 loss)\nI0821 14:43:39.007097 32360 solver.cpp:228] Iteration 10200, loss = 0.0850703\nI0821 14:43:39.007143 32360 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:43:39.007158 32360 solver.cpp:244]     Train net output #1: loss = 0.0850703 (* 1 = 0.0850703 loss)\nI0821 14:43:39.112826 32360 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0821 14:45:56.420961 32360 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0821 14:47:18.169761 32360 solver.cpp:404]     Test net output #0: accuracy = 0.19644\nI0821 14:47:18.170061 32360 solver.cpp:404]     Test net output #1: loss = 11.3017 (* 1 = 11.3017 loss)\nI0821 14:47:19.481109 32360 solver.cpp:228] Iteration 10300, loss = 0.242242\nI0821 14:47:19.481161 32360 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 14:47:19.481178 32360 solver.cpp:244]     Train net output #1: loss = 0.242242 (* 1 = 0.242242 loss)\nI0821 14:47:19.583940 32360 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0821 14:49:36.831759 32360 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0821 14:50:58.582646 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1636\nI0821 14:50:58.582953 32360 solver.cpp:404]     Test net output #1: loss = 8.6512 (* 1 = 8.6512 loss)\nI0821 14:50:59.893875 32360 solver.cpp:228] Iteration 10400, loss = 0.100901\nI0821 14:50:59.893926 32360 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 14:50:59.893941 32360 solver.cpp:244]     Train net output #1: loss = 0.100901 (* 1 = 0.100901 loss)\nI0821 14:50:59.996807 32360 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0821 14:53:17.258648 32360 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0821 14:54:39.032655 32360 solver.cpp:404]     Test net output #0: accuracy = 0.21156\nI0821 14:54:39.032932 32360 solver.cpp:404]     Test net output #1: loss = 6.1407 (* 1 = 6.1407 loss)\nI0821 14:54:40.344064 32360 solver.cpp:228] Iteration 10500, loss = 0.0303558\nI0821 14:54:40.344116 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 14:54:40.344133 32360 solver.cpp:244]     Train net output #1: loss = 0.0303558 (* 1 = 0.0303558 loss)\nI0821 14:54:40.451745 32360 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0821 14:56:57.095046 32360 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0821 14:58:18.875412 32360 solver.cpp:404]     Test net output #0: accuracy = 0.18424\nI0821 14:58:18.875711 32360 solver.cpp:404]     Test net output #1: loss = 5.70445 (* 1 = 5.70445 loss)\nI0821 14:58:20.187438 32360 solver.cpp:228] Iteration 10600, loss = 0.0917061\nI0821 14:58:20.187492 32360 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:58:20.187510 32360 solver.cpp:244]     Train net output #1: loss = 0.0917061 (* 1 = 0.0917061 loss)\nI0821 14:58:20.286577 32360 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0821 15:00:37.617720 32360 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0821 15:01:59.385396 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1736\nI0821 15:01:59.385706 32360 solver.cpp:404]     Test net output #1: loss = 7.25942 (* 1 = 7.25942 loss)\nI0821 15:02:00.697321 32360 solver.cpp:228] Iteration 10700, loss = 0.0671313\nI0821 15:02:00.697374 32360 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:02:00.697391 32360 solver.cpp:244]     Train net output #1: loss = 0.0671313 (* 1 = 0.0671313 loss)\nI0821 15:02:00.799401 32360 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0821 15:04:18.169049 32360 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0821 15:05:39.917872 32360 solver.cpp:404]     Test net output #0: accuracy = 0.22156\nI0821 15:05:39.918169 32360 solver.cpp:404]     Test net output #1: loss = 5.13493 (* 1 = 5.13493 loss)\nI0821 15:05:41.230181 32360 solver.cpp:228] Iteration 10800, loss = 0.00157354\nI0821 15:05:41.230234 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:05:41.230252 32360 solver.cpp:244]     Train net output #1: loss = 0.00157358 (* 1 = 0.00157358 loss)\nI0821 15:05:41.333829 32360 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0821 15:07:57.914959 32360 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0821 15:09:19.676499 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16884\nI0821 15:09:19.676775 32360 solver.cpp:404]     Test net output #1: loss = 5.31189 (* 1 = 5.31189 loss)\nI0821 15:09:20.988030 32360 solver.cpp:228] Iteration 10900, loss = 0.000747692\nI0821 15:09:20.988085 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:09:20.988101 32360 solver.cpp:244]     Train net output #1: loss = 0.000747728 (* 1 = 0.000747728 loss)\nI0821 15:09:21.093822 32360 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0821 15:11:38.429580 32360 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0821 15:13:00.178233 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15384\nI0821 15:13:00.178544 32360 solver.cpp:404]     Test net output #1: loss = 5.04213 (* 1 = 5.04213 loss)\nI0821 15:13:01.490155 32360 solver.cpp:228] Iteration 11000, loss = 0.000780046\nI0821 15:13:01.490208 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:13:01.490224 32360 solver.cpp:244]     Train net output #1: loss = 0.000780082 (* 1 = 0.000780082 loss)\nI0821 15:13:01.590665 32360 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0821 15:15:19.063395 32360 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0821 15:16:40.820555 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1502\nI0821 15:16:40.820871 32360 solver.cpp:404]     Test net output #1: loss = 4.61058 (* 1 = 4.61058 loss)\nI0821 15:16:42.132529 32360 solver.cpp:228] Iteration 11100, loss = 0.000873198\nI0821 15:16:42.132582 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:16:42.132599 32360 solver.cpp:244]     Train net output #1: loss = 0.000873234 (* 1 = 0.000873234 loss)\nI0821 15:16:42.231858 32360 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0821 15:18:59.711827 32360 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0821 15:20:21.462620 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14656\nI0821 15:20:21.462929 32360 solver.cpp:404]     Test net output #1: loss = 4.29767 (* 1 = 4.29767 loss)\nI0821 15:20:22.774212 32360 solver.cpp:228] Iteration 11200, loss = 0.000788815\nI0821 15:20:22.774262 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:20:22.774277 32360 solver.cpp:244]     Train net output #1: loss = 0.000788851 (* 1 = 0.000788851 loss)\nI0821 15:20:22.869851 32360 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0821 15:22:40.283941 32360 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0821 15:24:02.037472 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13836\nI0821 15:24:02.037766 32360 solver.cpp:404]     Test net output #1: loss = 4.03632 (* 1 = 4.03632 loss)\nI0821 15:24:03.349303 32360 solver.cpp:228] Iteration 11300, loss = 0.000997757\nI0821 15:24:03.349344 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:24:03.349359 32360 solver.cpp:244]     Train net output #1: loss = 0.000997792 (* 1 = 0.000997792 loss)\nI0821 15:24:03.448978 32360 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0821 15:26:21.177325 32360 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0821 15:27:42.944731 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1304\nI0821 15:27:42.945022 32360 solver.cpp:404]     Test net output #1: loss = 3.91718 (* 1 = 3.91718 loss)\nI0821 15:27:44.256403 32360 solver.cpp:228] Iteration 11400, loss = 0.000894694\nI0821 15:27:44.256449 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:27:44.256466 32360 solver.cpp:244]     Train net output #1: loss = 0.000894729 (* 1 = 0.000894729 loss)\nI0821 15:27:44.357226 32360 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0821 15:30:02.023573 32360 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0821 15:31:23.783805 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11704\nI0821 15:31:23.784111 32360 solver.cpp:404]     Test net output #1: loss = 3.91169 (* 1 = 3.91169 loss)\nI0821 15:31:25.095731 32360 solver.cpp:228] Iteration 11500, loss = 0.000802583\nI0821 15:31:25.095780 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:31:25.095798 32360 solver.cpp:244]     Train net output #1: loss = 0.000802619 (* 1 = 0.000802619 loss)\nI0821 15:31:25.201292 32360 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0821 15:33:42.855860 32360 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0821 15:35:04.617135 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10968\nI0821 15:35:04.617426 32360 solver.cpp:404]     Test net output #1: loss = 3.93854 (* 1 = 3.93854 loss)\nI0821 15:35:05.928596 32360 solver.cpp:228] Iteration 11600, loss = 0.00087811\nI0821 15:35:05.928647 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:35:05.928663 32360 solver.cpp:244]     Train net output #1: loss = 0.000878145 (* 1 = 0.000878145 loss)\nI0821 15:35:06.034763 32360 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0821 15:37:23.817621 32360 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0821 15:38:45.564297 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10296\nI0821 15:38:45.564612 32360 solver.cpp:404]     Test net output #1: loss = 4.04054 (* 1 = 4.04054 loss)\nI0821 15:38:46.875615 32360 solver.cpp:228] Iteration 11700, loss = 0.000947983\nI0821 15:38:46.875665 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:38:46.875681 32360 solver.cpp:244]     Train net output #1: loss = 0.000948018 (* 1 = 0.000948018 loss)\nI0821 15:38:46.984010 32360 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0821 15:41:05.045380 32360 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0821 15:42:26.772780 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10152\nI0821 15:42:26.773067 32360 solver.cpp:404]     Test net output #1: loss = 4.16598 (* 1 = 4.16598 loss)\nI0821 15:42:28.084287 32360 solver.cpp:228] Iteration 11800, loss = 0.00085669\nI0821 15:42:28.084339 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:42:28.084357 32360 solver.cpp:244]     Train net output #1: loss = 0.000856726 (* 1 = 0.000856726 loss)\nI0821 15:42:28.196406 32360 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0821 15:44:46.313933 32360 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0821 15:46:08.046658 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09892\nI0821 15:46:08.046950 32360 solver.cpp:404]     Test net output #1: loss = 4.25242 (* 1 = 4.25242 loss)\nI0821 15:46:09.358450 32360 solver.cpp:228] Iteration 11900, loss = 0.000718904\nI0821 15:46:09.358510 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:46:09.358527 32360 solver.cpp:244]     Train net output #1: loss = 0.00071894 (* 1 = 0.00071894 loss)\nI0821 15:46:09.469903 32360 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0821 15:48:27.588969 32360 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0821 15:49:49.340612 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0978\nI0821 15:49:49.340904 32360 solver.cpp:404]     Test net output #1: loss = 4.32545 (* 1 = 4.32545 loss)\nI0821 15:49:50.651414 32360 solver.cpp:228] Iteration 12000, loss = 0.000869337\nI0821 15:49:50.651469 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:49:50.651486 32360 solver.cpp:244]     Train net output #1: loss = 0.000869373 (* 1 = 0.000869373 loss)\nI0821 15:49:50.761482 32360 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0821 15:52:08.815403 32360 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0821 15:53:30.579867 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09624\nI0821 15:53:30.580162 32360 solver.cpp:404]     Test net output #1: loss = 4.35762 (* 1 = 4.35762 loss)\nI0821 15:53:31.891512 32360 solver.cpp:228] Iteration 12100, loss = 0.000758672\nI0821 15:53:31.891561 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:53:31.891577 32360 solver.cpp:244]     Train net output #1: loss = 0.000758707 (* 1 = 0.000758707 loss)\nI0821 15:53:31.997931 32360 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0821 15:55:49.980444 32360 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0821 15:57:11.752334 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0968\nI0821 15:57:11.752655 32360 solver.cpp:404]     Test net output #1: loss = 4.39467 (* 1 = 4.39467 loss)\nI0821 15:57:13.064151 32360 solver.cpp:228] Iteration 12200, loss = 0.000741463\nI0821 15:57:13.064190 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:57:13.064205 32360 solver.cpp:244]     Train net output #1: loss = 0.000741498 (* 1 = 0.000741498 loss)\nI0821 15:57:13.169705 32360 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0821 15:59:31.174055 32360 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0821 16:00:52.950585 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0968\nI0821 16:00:52.950901 32360 solver.cpp:404]     Test net output #1: loss = 4.28885 (* 1 = 4.28885 loss)\nI0821 16:00:54.262496 32360 solver.cpp:228] Iteration 12300, loss = 0.000657741\nI0821 16:00:54.262537 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:00:54.262553 32360 solver.cpp:244]     Train net output #1: loss = 0.000657776 (* 1 = 0.000657776 loss)\nI0821 16:00:54.375849 32360 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0821 16:03:12.322566 32360 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0821 16:04:34.098737 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0976\nI0821 16:04:34.099058 32360 solver.cpp:404]     Test net output #1: loss = 4.19725 (* 1 = 4.19725 loss)\nI0821 16:04:35.412226 32360 solver.cpp:228] Iteration 12400, loss = 0.000765249\nI0821 16:04:35.412269 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:04:35.412293 32360 solver.cpp:244]     Train net output #1: loss = 0.000765284 (* 1 = 0.000765284 loss)\nI0821 16:04:35.515239 32360 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0821 16:06:53.438316 32360 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0821 16:08:15.210000 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09708\nI0821 16:08:15.210317 32360 solver.cpp:404]     Test net output #1: loss = 4.03885 (* 1 = 4.03885 loss)\nI0821 16:08:16.523149 32360 solver.cpp:228] Iteration 12500, loss = 0.000806321\nI0821 16:08:16.523190 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:08:16.523206 32360 solver.cpp:244]     Train net output #1: loss = 0.000806356 (* 1 = 0.000806356 loss)\nI0821 16:08:16.626090 32360 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0821 16:10:34.464169 32360 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0821 16:11:56.080008 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09744\nI0821 16:11:56.080247 32360 solver.cpp:404]     Test net output #1: loss = 3.82611 (* 1 = 3.82611 loss)\nI0821 16:11:57.391312 32360 solver.cpp:228] Iteration 12600, loss = 0.000539712\nI0821 16:11:57.391356 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:11:57.391372 32360 solver.cpp:244]     Train net output #1: loss = 0.000539748 (* 1 = 0.000539748 loss)\nI0821 16:11:57.498592 32360 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0821 16:14:15.562250 32360 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0821 16:15:37.256350 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09676\nI0821 16:15:37.256582 32360 solver.cpp:404]     Test net output #1: loss = 3.62423 (* 1 = 3.62423 loss)\nI0821 16:15:38.568792 32360 solver.cpp:228] Iteration 12700, loss = 0.00067563\nI0821 16:15:38.568835 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:15:38.568850 32360 solver.cpp:244]     Train net output #1: loss = 0.000675665 (* 1 = 0.000675665 loss)\nI0821 16:15:38.671828 32360 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0821 16:17:56.596245 32360 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0821 16:19:18.207052 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09692\nI0821 16:19:18.207290 32360 solver.cpp:404]     Test net output #1: loss = 3.42835 (* 1 = 3.42835 loss)\nI0821 16:19:19.518880 32360 solver.cpp:228] Iteration 12800, loss = 0.000569124\nI0821 16:19:19.518920 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:19:19.518934 32360 solver.cpp:244]     Train net output #1: loss = 0.000569159 (* 1 = 0.000569159 loss)\nI0821 16:19:19.622493 32360 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0821 16:21:37.079041 32360 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0821 16:22:58.475499 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09644\nI0821 16:22:58.475792 32360 solver.cpp:404]     Test net output #1: loss = 3.22219 (* 1 = 3.22219 loss)\nI0821 16:22:59.788380 32360 solver.cpp:228] Iteration 12900, loss = 0.00047233\nI0821 16:22:59.788422 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:22:59.788439 32360 solver.cpp:244]     Train net output #1: loss = 0.000472366 (* 1 = 0.000472366 loss)\nI0821 16:22:59.890763 32360 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0821 16:25:17.860843 32360 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0821 16:26:39.237732 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09784\nI0821 16:26:39.237993 32360 solver.cpp:404]     Test net output #1: loss = 3.09108 (* 1 = 3.09108 loss)\nI0821 16:26:40.548982 32360 solver.cpp:228] Iteration 13000, loss = 0.000541025\nI0821 16:26:40.549026 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:26:40.549041 32360 solver.cpp:244]     Train net output #1: loss = 0.00054106 (* 1 = 0.00054106 loss)\nI0821 16:26:40.649493 32360 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0821 16:28:58.015115 32360 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0821 16:30:19.720726 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09456\nI0821 16:30:19.721002 32360 solver.cpp:404]     Test net output #1: loss = 2.9724 (* 1 = 2.9724 loss)\nI0821 16:30:21.032021 32360 solver.cpp:228] Iteration 13100, loss = 0.000469499\nI0821 16:30:21.032060 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:30:21.032076 32360 solver.cpp:244]     Train net output #1: loss = 0.000469534 (* 1 = 0.000469534 loss)\nI0821 16:30:21.142151 32360 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0821 16:32:39.150408 32360 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0821 16:34:00.824929 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09996\nI0821 16:34:00.825196 32360 solver.cpp:404]     Test net output #1: loss = 2.88701 (* 1 = 2.88701 loss)\nI0821 16:34:02.136361 32360 solver.cpp:228] Iteration 13200, loss = 0.000344624\nI0821 16:34:02.136399 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:34:02.136415 32360 solver.cpp:244]     Train net output #1: loss = 0.000344659 (* 1 = 0.000344659 loss)\nI0821 16:34:02.245405 32360 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0821 16:36:20.315407 32360 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0821 16:37:41.981464 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10184\nI0821 16:37:41.981724 32360 solver.cpp:404]     Test net output #1: loss = 2.82671 (* 1 = 2.82671 loss)\nI0821 16:37:43.292558 32360 solver.cpp:228] Iteration 13300, loss = 0.000488358\nI0821 16:37:43.292598 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:37:43.292613 32360 solver.cpp:244]     Train net output #1: loss = 0.000488394 (* 1 = 0.000488394 loss)\nI0821 16:37:43.398694 32360 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0821 16:40:01.392406 32360 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0821 16:41:23.101938 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0821 16:41:23.102210 32360 solver.cpp:404]     Test net output #1: loss = 2.77577 (* 1 = 2.77577 loss)\nI0821 16:41:24.413365 32360 solver.cpp:228] Iteration 13400, loss = 0.000308602\nI0821 16:41:24.413405 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:41:24.413420 32360 solver.cpp:244]     Train net output #1: loss = 0.000308638 (* 1 = 0.000308638 loss)\nI0821 16:41:24.516784 32360 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0821 16:43:42.587213 32360 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0821 16:45:04.155335 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0821 16:45:04.155573 32360 solver.cpp:404]     Test net output #1: loss = 2.73393 (* 1 = 2.73393 loss)\nI0821 16:45:05.466997 32360 solver.cpp:228] Iteration 13500, loss = 0.000364643\nI0821 16:45:05.467037 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:45:05.467053 32360 solver.cpp:244]     Train net output #1: loss = 0.000364679 (* 1 = 0.000364679 loss)\nI0821 16:45:05.574285 32360 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0821 16:47:23.543771 32360 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0821 16:48:45.137933 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1034\nI0821 16:48:45.138186 32360 solver.cpp:404]     Test net output #1: loss = 2.6947 (* 1 = 2.6947 loss)\nI0821 16:48:46.449723 32360 solver.cpp:228] Iteration 13600, loss = 0.000359152\nI0821 16:48:46.449764 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:48:46.449779 32360 solver.cpp:244]     Train net output #1: loss = 0.000359187 (* 1 = 0.000359187 loss)\nI0821 16:48:46.554321 32360 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0821 16:51:04.557116 32360 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0821 16:52:26.014511 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10492\nI0821 16:52:26.014770 32360 solver.cpp:404]     Test net output #1: loss = 2.66738 (* 1 = 2.66738 loss)\nI0821 16:52:27.325664 32360 solver.cpp:228] Iteration 13700, loss = 0.000244007\nI0821 16:52:27.325708 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:52:27.325726 32360 solver.cpp:244]     Train net output #1: loss = 0.000244042 (* 1 = 0.000244042 loss)\nI0821 16:52:27.435379 32360 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0821 16:54:45.281411 32360 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0821 16:56:06.580938 32360 solver.cpp:404]     Test net output #0: accuracy = 0.107\nI0821 16:56:06.581161 32360 solver.cpp:404]     Test net output #1: loss = 2.64571 (* 1 = 2.64571 loss)\nI0821 16:56:07.893118 32360 solver.cpp:228] Iteration 13800, loss = 0.000361417\nI0821 16:56:07.893162 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:56:07.893178 32360 solver.cpp:244]     Train net output #1: loss = 0.000361453 (* 1 = 0.000361453 loss)\nI0821 16:56:07.995455 32360 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0821 16:58:25.912269 32360 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0821 16:59:47.530678 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10804\nI0821 16:59:47.530891 32360 solver.cpp:404]     Test net output #1: loss = 2.62065 (* 1 = 2.62065 loss)\nI0821 16:59:48.842023 32360 solver.cpp:228] Iteration 13900, loss = 0.000340586\nI0821 16:59:48.842067 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:59:48.842082 32360 solver.cpp:244]     Train net output #1: loss = 0.000340621 (* 1 = 0.000340621 loss)\nI0821 16:59:48.943766 32360 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0821 17:02:07.088397 32360 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0821 17:03:28.759901 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1126\nI0821 17:03:28.760126 32360 solver.cpp:404]     Test net output #1: loss = 2.59542 (* 1 = 2.59542 loss)\nI0821 17:03:30.071746 32360 solver.cpp:228] Iteration 14000, loss = 0.000322834\nI0821 17:03:30.071790 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:03:30.071806 32360 solver.cpp:244]     Train net output #1: loss = 0.000322869 (* 1 = 0.000322869 loss)\nI0821 17:03:30.173588 32360 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0821 17:05:48.140019 32360 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0821 17:07:09.360846 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11232\nI0821 17:07:09.361073 32360 solver.cpp:404]     Test net output #1: loss = 2.5766 (* 1 = 2.5766 loss)\nI0821 17:07:10.672030 32360 solver.cpp:228] Iteration 14100, loss = 0.000288443\nI0821 17:07:10.672075 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:07:10.672091 32360 solver.cpp:244]     Train net output #1: loss = 0.000288478 (* 1 = 0.000288478 loss)\nI0821 17:07:10.776126 32360 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0821 17:09:28.653615 32360 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0821 17:10:50.001252 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0821 17:10:50.001531 32360 solver.cpp:404]     Test net output #1: loss = 78.6204 (* 1 = 78.6204 loss)\nI0821 17:10:51.314033 32360 solver.cpp:228] Iteration 14200, loss = 1.66901\nI0821 17:10:51.314077 32360 solver.cpp:244]     Train net output #0: accuracy = 0.368\nI0821 17:10:51.314095 32360 solver.cpp:244]     Train net output #1: loss = 1.66901 (* 1 = 1.66901 loss)\nI0821 17:10:51.424350 32360 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0821 17:13:09.578829 32360 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0821 17:14:31.178946 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0821 17:14:31.179244 32360 solver.cpp:404]     Test net output #1: loss = 78.5609 (* 1 = 78.5609 loss)\nI0821 17:14:32.492076 32360 solver.cpp:228] Iteration 14300, loss = 1.09356\nI0821 17:14:32.492121 32360 solver.cpp:244]     Train net output #0: accuracy = 0.6\nI0821 17:14:32.492138 32360 solver.cpp:244]     Train net output #1: loss = 1.09356 (* 1 = 1.09356 loss)\nI0821 17:14:32.597530 32360 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0821 17:16:50.651705 32360 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0821 17:18:12.446535 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0821 17:18:12.446854 32360 solver.cpp:404]     Test net output #1: loss = 74.387 (* 1 = 74.387 loss)\nI0821 17:18:13.759845 32360 solver.cpp:228] Iteration 14400, loss = 0.885511\nI0821 17:18:13.759888 32360 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0821 17:18:13.759904 32360 solver.cpp:244]     Train net output #1: loss = 0.885511 (* 1 = 0.885511 loss)\nI0821 17:18:13.862943 32360 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0821 17:20:31.834295 32360 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0821 17:21:53.598742 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0821 17:21:53.599061 32360 solver.cpp:404]     Test net output #1: loss = 51.0548 (* 1 = 51.0548 loss)\nI0821 17:21:54.910415 32360 solver.cpp:228] Iteration 14500, loss = 0.769003\nI0821 17:21:54.910456 32360 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0821 17:21:54.910472 32360 solver.cpp:244]     Train net output #1: loss = 0.769003 (* 1 = 0.769003 loss)\nI0821 17:21:55.026346 32360 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0821 17:24:13.016327 32360 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0821 17:25:34.780748 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15072\nI0821 17:25:34.781061 32360 solver.cpp:404]     Test net output #1: loss = 24.1667 (* 1 = 24.1667 loss)\nI0821 17:25:36.092308 32360 solver.cpp:228] Iteration 14600, loss = 0.505725\nI0821 17:25:36.092347 32360 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0821 17:25:36.092363 32360 solver.cpp:244]     Train net output #1: loss = 0.505725 (* 1 = 0.505725 loss)\nI0821 17:25:36.199403 32360 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0821 17:27:54.367292 32360 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0821 17:29:16.135365 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16912\nI0821 17:29:16.135684 32360 solver.cpp:404]     Test net output #1: loss = 20.3133 (* 1 = 20.3133 loss)\nI0821 17:29:17.447140 32360 solver.cpp:228] Iteration 14700, loss = 0.402364\nI0821 17:29:17.447180 32360 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0821 17:29:17.447194 32360 solver.cpp:244]     Train net output #1: loss = 0.402364 (* 1 = 0.402364 loss)\nI0821 17:29:17.556808 32360 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0821 17:31:35.649298 32360 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0821 17:32:57.407351 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1564\nI0821 17:32:57.407686 32360 solver.cpp:404]     Test net output #1: loss = 19.0815 (* 1 = 19.0815 loss)\nI0821 17:32:58.718663 32360 solver.cpp:228] Iteration 14800, loss = 0.309762\nI0821 17:32:58.718703 32360 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 17:32:58.718717 32360 solver.cpp:244]     Train net output #1: loss = 0.309762 (* 1 = 0.309762 loss)\nI0821 17:32:58.827924 32360 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0821 17:35:16.299979 32360 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0821 17:36:38.063031 32360 solver.cpp:404]     Test net output #0: accuracy = 0.137\nI0821 17:36:38.063347 32360 solver.cpp:404]     Test net output #1: loss = 27.3213 (* 1 = 27.3213 loss)\nI0821 17:36:39.375486 32360 solver.cpp:228] Iteration 14900, loss = 0.196274\nI0821 17:36:39.375526 32360 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 17:36:39.375542 32360 solver.cpp:244]     Train net output #1: loss = 0.196274 (* 1 = 0.196274 loss)\nI0821 17:36:39.479439 32360 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0821 17:38:57.596806 32360 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0821 17:40:19.353166 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1178\nI0821 17:40:19.353485 32360 solver.cpp:404]     Test net output #1: loss = 41.3549 (* 1 = 41.3549 loss)\nI0821 17:40:20.664852 32360 solver.cpp:228] Iteration 15000, loss = 0.215529\nI0821 17:40:20.664891 32360 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 17:40:20.664907 32360 solver.cpp:244]     Train net output #1: loss = 0.215529 (* 1 = 0.215529 loss)\nI0821 17:40:20.767304 32360 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0821 17:42:38.843143 32360 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0821 17:44:00.612730 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14748\nI0821 17:44:00.613028 32360 solver.cpp:404]     Test net output #1: loss = 27.281 (* 1 = 27.281 loss)\nI0821 17:44:01.925173 32360 solver.cpp:228] Iteration 15100, loss = 0.0499009\nI0821 17:44:01.925210 32360 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:44:01.925227 32360 solver.cpp:244]     Train net output #1: loss = 0.049901 (* 1 = 0.049901 loss)\nI0821 17:44:02.029005 32360 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0821 17:46:20.068375 32360 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0821 17:47:41.843969 32360 solver.cpp:404]     Test net output #0: accuracy = 0.12648\nI0821 17:47:41.844262 32360 solver.cpp:404]     Test net output #1: loss = 25.2289 (* 1 = 25.2289 loss)\nI0821 17:47:43.155748 32360 solver.cpp:228] Iteration 15200, loss = 0.140481\nI0821 17:47:43.155786 32360 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 17:47:43.155802 32360 solver.cpp:244]     Train net output #1: loss = 0.140481 (* 1 = 0.140481 loss)\nI0821 17:47:43.260488 32360 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0821 17:50:01.332607 32360 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0821 17:51:23.117437 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13208\nI0821 17:51:23.117735 32360 solver.cpp:404]     Test net output #1: loss = 29.0126 (* 1 = 29.0126 loss)\nI0821 17:51:24.429249 32360 solver.cpp:228] Iteration 15300, loss = 0.0820258\nI0821 17:51:24.429286 32360 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:51:24.429302 32360 solver.cpp:244]     Train net output #1: loss = 0.0820258 (* 1 = 0.0820258 loss)\nI0821 17:51:24.528710 32360 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0821 17:53:42.544404 32360 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0821 17:55:04.300671 32360 solver.cpp:404]     Test net output #0: accuracy = 0.24976\nI0821 17:55:04.300989 32360 solver.cpp:404]     Test net output #1: loss = 10.405 (* 1 = 10.405 loss)\nI0821 17:55:05.612514 32360 solver.cpp:228] Iteration 15400, loss = 0.0360422\nI0821 17:55:05.612553 32360 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:55:05.612568 32360 solver.cpp:244]     Train net output #1: loss = 0.0360423 (* 1 = 0.0360423 loss)\nI0821 17:55:05.719362 32360 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0821 17:57:23.760754 32360 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0821 17:58:45.532521 32360 solver.cpp:404]     Test net output #0: accuracy = 0.26784\nI0821 17:58:45.532840 32360 solver.cpp:404]     Test net output #1: loss = 8.49163 (* 1 = 8.49163 loss)\nI0821 17:58:46.844326 32360 solver.cpp:228] Iteration 15500, loss = 0.00144394\nI0821 17:58:46.844365 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:58:46.844380 32360 solver.cpp:244]     Train net output #1: loss = 0.00144399 (* 1 = 0.00144399 loss)\nI0821 17:58:46.956048 32360 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0821 18:01:04.980145 32360 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0821 18:02:26.754231 32360 solver.cpp:404]     Test net output #0: accuracy = 0.38536\nI0821 18:02:26.754537 32360 solver.cpp:404]     Test net output #1: loss = 3.56348 (* 1 = 3.56348 loss)\nI0821 18:02:28.066699 32360 solver.cpp:228] Iteration 15600, loss = 0.000175874\nI0821 18:02:28.066740 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:02:28.066756 32360 solver.cpp:244]     Train net output #1: loss = 0.00017593 (* 1 = 0.00017593 loss)\nI0821 18:02:28.170367 32360 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0821 18:04:46.177407 32360 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0821 18:06:07.936118 32360 solver.cpp:404]     Test net output #0: accuracy = 0.31756\nI0821 18:06:07.936434 32360 solver.cpp:404]     Test net output #1: loss = 3.49522 (* 1 = 3.49522 loss)\nI0821 18:06:09.248081 32360 solver.cpp:228] Iteration 15700, loss = 0.000244704\nI0821 18:06:09.248121 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:06:09.248137 32360 solver.cpp:244]     Train net output #1: loss = 0.00024476 (* 1 = 0.00024476 loss)\nI0821 18:06:09.356076 32360 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0821 18:08:27.383253 32360 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0821 18:09:49.146312 32360 solver.cpp:404]     Test net output #0: accuracy = 0.24088\nI0821 18:09:49.146634 32360 solver.cpp:404]     Test net output #1: loss = 3.89371 (* 1 = 3.89371 loss)\nI0821 18:09:50.458374 32360 solver.cpp:228] Iteration 15800, loss = 0.000206429\nI0821 18:09:50.458413 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:09:50.458428 32360 solver.cpp:244]     Train net output #1: loss = 0.000206485 (* 1 = 0.000206485 loss)\nI0821 18:09:50.561280 32360 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0821 18:12:08.509434 32360 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0821 18:13:30.284016 32360 solver.cpp:404]     Test net output #0: accuracy = 0.20404\nI0821 18:13:30.284337 32360 solver.cpp:404]     Test net output #1: loss = 4.06988 (* 1 = 4.06988 loss)\nI0821 18:13:31.597245 32360 solver.cpp:228] Iteration 15900, loss = 0.000241691\nI0821 18:13:31.597287 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:13:31.597303 32360 solver.cpp:244]     Train net output #1: loss = 0.000241747 (* 1 = 0.000241747 loss)\nI0821 18:13:31.703055 32360 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0821 18:15:49.711784 32360 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0821 18:17:11.484360 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1876\nI0821 18:17:11.484661 32360 solver.cpp:404]     Test net output #1: loss = 4.01164 (* 1 = 4.01164 loss)\nI0821 18:17:12.797381 32360 solver.cpp:228] Iteration 16000, loss = 0.000305667\nI0821 18:17:12.797423 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:17:12.797438 32360 solver.cpp:244]     Train net output #1: loss = 0.000305723 (* 1 = 0.000305723 loss)\nI0821 18:17:12.898102 32360 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0821 18:19:30.820787 32360 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0821 18:20:52.594000 32360 solver.cpp:404]     Test net output #0: accuracy = 0.18088\nI0821 18:20:52.594308 32360 solver.cpp:404]     Test net output #1: loss = 3.85045 (* 1 = 3.85045 loss)\nI0821 18:20:53.906334 32360 solver.cpp:228] Iteration 16100, loss = 0.000385602\nI0821 18:20:53.906378 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:20:53.906400 32360 solver.cpp:244]     Train net output #1: loss = 0.000385658 (* 1 = 0.000385658 loss)\nI0821 18:20:54.006077 32360 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0821 18:23:11.952499 32360 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0821 18:24:33.715759 32360 solver.cpp:404]     Test net output #0: accuracy = 0.17924\nI0821 18:24:33.716133 32360 solver.cpp:404]     Test net output #1: loss = 3.71791 (* 1 = 3.71791 loss)\nI0821 18:24:35.027833 32360 solver.cpp:228] Iteration 16200, loss = 0.000291272\nI0821 18:24:35.027875 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:24:35.027891 32360 solver.cpp:244]     Train net output #1: loss = 0.000291328 (* 1 = 0.000291328 loss)\nI0821 18:24:35.137796 32360 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0821 18:26:53.237917 32360 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0821 18:28:15.000905 32360 solver.cpp:404]     Test net output #0: accuracy = 0.17816\nI0821 18:28:15.001194 32360 solver.cpp:404]     Test net output #1: loss = 3.58588 (* 1 = 3.58588 loss)\nI0821 18:28:16.313006 32360 solver.cpp:228] Iteration 16300, loss = 0.000364662\nI0821 18:28:16.313048 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:28:16.313063 32360 solver.cpp:244]     Train net output #1: loss = 0.000364719 (* 1 = 0.000364719 loss)\nI0821 18:28:16.423426 32360 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0821 18:30:34.430301 32360 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0821 18:31:56.179600 32360 solver.cpp:404]     Test net output #0: accuracy = 0.18184\nI0821 18:31:56.179904 32360 solver.cpp:404]     Test net output #1: loss = 3.47597 (* 1 = 3.47597 loss)\nI0821 18:31:57.491605 32360 solver.cpp:228] Iteration 16400, loss = 0.000337763\nI0821 18:31:57.491647 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:31:57.491660 32360 solver.cpp:244]     Train net output #1: loss = 0.000337819 (* 1 = 0.000337819 loss)\nI0821 18:31:57.597322 32360 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0821 18:34:15.107339 32360 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0821 18:35:36.832020 32360 solver.cpp:404]     Test net output #0: accuracy = 0.18176\nI0821 18:35:36.832320 32360 solver.cpp:404]     Test net output #1: loss = 3.38963 (* 1 = 3.38963 loss)\nI0821 18:35:38.143141 32360 solver.cpp:228] Iteration 16500, loss = 0.000297561\nI0821 18:35:38.143190 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:35:38.143208 32360 solver.cpp:244]     Train net output #1: loss = 0.000297617 (* 1 = 0.000297617 loss)\nI0821 18:35:38.239547 32360 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0821 18:37:55.533576 32360 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0821 18:39:17.266511 32360 solver.cpp:404]     Test net output #0: accuracy = 0.17476\nI0821 18:39:17.266813 32360 solver.cpp:404]     Test net output #1: loss = 3.31612 (* 1 = 3.31612 loss)\nI0821 18:39:18.578294 32360 solver.cpp:228] Iteration 16600, loss = 0.000384263\nI0821 18:39:18.578341 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:39:18.578357 32360 solver.cpp:244]     Train net output #1: loss = 0.000384319 (* 1 = 0.000384319 loss)\nI0821 18:39:18.674654 32360 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0821 18:41:35.962085 32360 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0821 18:42:57.710835 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1626\nI0821 18:42:57.711140 32360 solver.cpp:404]     Test net output #1: loss = 3.25152 (* 1 = 3.25152 loss)\nI0821 18:42:59.022809 32360 solver.cpp:228] Iteration 16700, loss = 0.000335874\nI0821 18:42:59.022853 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:42:59.022871 32360 solver.cpp:244]     Train net output #1: loss = 0.00033593 (* 1 = 0.00033593 loss)\nI0821 18:42:59.128808 32360 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0821 18:45:16.427220 32360 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0821 18:46:38.214676 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1606\nI0821 18:46:38.214984 32360 solver.cpp:404]     Test net output #1: loss = 3.1934 (* 1 = 3.1934 loss)\nI0821 18:46:39.526731 32360 solver.cpp:228] Iteration 16800, loss = 0.000393765\nI0821 18:46:39.526777 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:46:39.526793 32360 solver.cpp:244]     Train net output #1: loss = 0.000393821 (* 1 = 0.000393821 loss)\nI0821 18:46:39.629914 32360 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0821 18:48:56.931689 32360 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0821 18:50:18.710904 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16248\nI0821 18:50:18.711213 32360 solver.cpp:404]     Test net output #1: loss = 3.13603 (* 1 = 3.13603 loss)\nI0821 18:50:20.023106 32360 solver.cpp:228] Iteration 16900, loss = 0.000318191\nI0821 18:50:20.023149 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:50:20.023165 32360 solver.cpp:244]     Train net output #1: loss = 0.000318247 (* 1 = 0.000318247 loss)\nI0821 18:50:20.129722 32360 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0821 18:52:37.450736 32360 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0821 18:53:59.221210 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16496\nI0821 18:53:59.221590 32360 solver.cpp:404]     Test net output #1: loss = 3.09382 (* 1 = 3.09382 loss)\nI0821 18:54:00.532990 32360 solver.cpp:228] Iteration 17000, loss = 0.000333801\nI0821 18:54:00.533036 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:54:00.533052 32360 solver.cpp:244]     Train net output #1: loss = 0.000333857 (* 1 = 0.000333857 loss)\nI0821 18:54:00.633327 32360 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0821 18:56:18.024852 32360 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0821 18:57:39.820618 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1628\nI0821 18:57:39.820935 32360 solver.cpp:404]     Test net output #1: loss = 3.04991 (* 1 = 3.04991 loss)\nI0821 18:57:41.133344 32360 solver.cpp:228] Iteration 17100, loss = 0.000303492\nI0821 18:57:41.133395 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:57:41.133412 32360 solver.cpp:244]     Train net output #1: loss = 0.000303548 (* 1 = 0.000303548 loss)\nI0821 18:57:41.236222 32360 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0821 18:59:58.637248 32360 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0821 19:01:20.405603 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15876\nI0821 19:01:20.405902 32360 solver.cpp:404]     Test net output #1: loss = 3.0145 (* 1 = 3.0145 loss)\nI0821 19:01:21.717864 32360 solver.cpp:228] Iteration 17200, loss = 0.000325431\nI0821 19:01:21.717916 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:01:21.717933 32360 solver.cpp:244]     Train net output #1: loss = 0.000325487 (* 1 = 0.000325487 loss)\nI0821 19:01:21.812994 32360 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0821 19:03:39.142480 32360 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0821 19:05:00.908705 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15128\nI0821 19:05:00.909024 32360 solver.cpp:404]     Test net output #1: loss = 2.97801 (* 1 = 2.97801 loss)\nI0821 19:05:02.219998 32360 solver.cpp:228] Iteration 17300, loss = 0.000294675\nI0821 19:05:02.220044 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:05:02.220062 32360 solver.cpp:244]     Train net output #1: loss = 0.000294731 (* 1 = 0.000294731 loss)\nI0821 19:05:02.321822 32360 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0821 19:07:19.637435 32360 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0821 19:08:41.426260 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14128\nI0821 19:08:41.426568 32360 solver.cpp:404]     Test net output #1: loss = 2.94822 (* 1 = 2.94822 loss)\nI0821 19:08:42.738062 32360 solver.cpp:228] Iteration 17400, loss = 0.000297189\nI0821 19:08:42.738111 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:08:42.738126 32360 solver.cpp:244]     Train net output #1: loss = 0.000297245 (* 1 = 0.000297245 loss)\nI0821 19:08:42.833043 32360 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0821 19:11:00.192102 32360 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0821 19:12:21.980783 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13244\nI0821 19:12:21.981102 32360 solver.cpp:404]     Test net output #1: loss = 2.92004 (* 1 = 2.92004 loss)\nI0821 19:12:23.293190 32360 solver.cpp:228] Iteration 17500, loss = 0.000314134\nI0821 19:12:23.293236 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:12:23.293251 32360 solver.cpp:244]     Train net output #1: loss = 0.00031419 (* 1 = 0.00031419 loss)\nI0821 19:12:23.387203 32360 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0821 19:14:40.669659 32360 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0821 19:16:02.447458 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11948\nI0821 19:16:02.447772 32360 solver.cpp:404]     Test net output #1: loss = 2.89466 (* 1 = 2.89466 loss)\nI0821 19:16:03.759589 32360 solver.cpp:228] Iteration 17600, loss = 0.000308616\nI0821 19:16:03.759634 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:16:03.759650 32360 solver.cpp:244]     Train net output #1: loss = 0.000308672 (* 1 = 0.000308672 loss)\nI0821 19:16:03.864969 32360 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0821 19:18:21.190145 32360 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0821 19:19:42.957731 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1128\nI0821 19:19:42.958050 32360 solver.cpp:404]     Test net output #1: loss = 2.87085 (* 1 = 2.87085 loss)\nI0821 19:19:44.269984 32360 solver.cpp:228] Iteration 17700, loss = 0.000324898\nI0821 19:19:44.270032 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:19:44.270048 32360 solver.cpp:244]     Train net output #1: loss = 0.000324954 (* 1 = 0.000324954 loss)\nI0821 19:19:44.373121 32360 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0821 19:22:01.715934 32360 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0821 19:23:23.481667 32360 solver.cpp:404]     Test net output #0: accuracy = 0.109\nI0821 19:23:23.481976 32360 solver.cpp:404]     Test net output #1: loss = 2.84887 (* 1 = 2.84887 loss)\nI0821 19:23:24.793686 32360 solver.cpp:228] Iteration 17800, loss = 0.000279981\nI0821 19:23:24.793733 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:23:24.793756 32360 solver.cpp:244]     Train net output #1: loss = 0.000280038 (* 1 = 0.000280038 loss)\nI0821 19:23:24.893072 32360 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0821 19:25:42.304786 32360 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0821 19:27:04.066205 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1052\nI0821 19:27:04.066504 32360 solver.cpp:404]     Test net output #1: loss = 2.8292 (* 1 = 2.8292 loss)\nI0821 19:27:05.377794 32360 solver.cpp:228] Iteration 17900, loss = 0.000287629\nI0821 19:27:05.377845 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:27:05.377861 32360 solver.cpp:244]     Train net output #1: loss = 0.000287685 (* 1 = 0.000287685 loss)\nI0821 19:27:05.482010 32360 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0821 19:29:22.847098 32360 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0821 19:30:44.598906 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10252\nI0821 19:30:44.599194 32360 solver.cpp:404]     Test net output #1: loss = 2.80534 (* 1 = 2.80534 loss)\nI0821 19:30:45.911985 32360 solver.cpp:228] Iteration 18000, loss = 0.000272557\nI0821 19:30:45.912036 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:30:45.912052 32360 solver.cpp:244]     Train net output #1: loss = 0.000272614 (* 1 = 0.000272614 loss)\nI0821 19:30:46.011957 32360 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0821 19:33:04.085331 32360 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0821 19:34:25.866758 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10112\nI0821 19:34:25.867055 32360 solver.cpp:404]     Test net output #1: loss = 2.78102 (* 1 = 2.78102 loss)\nI0821 19:34:27.182505 32360 solver.cpp:228] Iteration 18100, loss = 0.000254961\nI0821 19:34:27.182554 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:34:27.182570 32360 solver.cpp:244]     Train net output #1: loss = 0.000255018 (* 1 = 0.000255018 loss)\nI0821 19:34:27.285993 32360 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0821 19:36:45.284749 32360 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0821 19:38:07.053412 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0821 19:38:07.053736 32360 solver.cpp:404]     Test net output #1: loss = 2.75011 (* 1 = 2.75011 loss)\nI0821 19:38:08.368397 32360 solver.cpp:228] Iteration 18200, loss = 0.000221974\nI0821 19:38:08.368451 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:38:08.368469 32360 solver.cpp:244]     Train net output #1: loss = 0.00022203 (* 1 = 0.00022203 loss)\nI0821 19:38:08.476562 32360 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0821 19:40:26.534183 32360 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0821 19:41:48.306574 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10024\nI0821 19:41:48.306891 32360 solver.cpp:404]     Test net output #1: loss = 2.72052 (* 1 = 2.72052 loss)\nI0821 19:41:49.621116 32360 solver.cpp:228] Iteration 18300, loss = 0.000217186\nI0821 19:41:49.621163 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:41:49.621179 32360 solver.cpp:244]     Train net output #1: loss = 0.000217242 (* 1 = 0.000217242 loss)\nI0821 19:41:49.736510 32360 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0821 19:44:07.750747 32360 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0821 19:45:29.516336 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 19:45:29.516641 32360 solver.cpp:404]     Test net output #1: loss = 2.68787 (* 1 = 2.68787 loss)\nI0821 19:45:30.830739 32360 solver.cpp:228] Iteration 18400, loss = 0.000210723\nI0821 19:45:30.830786 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:45:30.830801 32360 solver.cpp:244]     Train net output #1: loss = 0.000210779 (* 1 = 0.000210779 loss)\nI0821 19:45:30.931938 32360 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0821 19:47:48.660959 32360 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0821 19:49:10.434067 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 19:49:10.434387 32360 solver.cpp:404]     Test net output #1: loss = 2.64771 (* 1 = 2.64771 loss)\nI0821 19:49:11.746196 32360 solver.cpp:228] Iteration 18500, loss = 0.000235167\nI0821 19:49:11.746242 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:49:11.746258 32360 solver.cpp:244]     Train net output #1: loss = 0.000235223 (* 1 = 0.000235223 loss)\nI0821 19:49:11.848680 32360 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0821 19:51:29.477252 32360 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0821 19:52:51.252046 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 19:52:51.252358 32360 solver.cpp:404]     Test net output #1: loss = 2.61199 (* 1 = 2.61199 loss)\nI0821 19:52:52.564036 32360 solver.cpp:228] Iteration 18600, loss = 0.000232327\nI0821 19:52:52.564081 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:52:52.564097 32360 solver.cpp:244]     Train net output #1: loss = 0.000232383 (* 1 = 0.000232383 loss)\nI0821 19:52:52.672439 32360 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0821 19:55:10.258402 32360 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0821 19:56:32.027743 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 19:56:32.028057 32360 solver.cpp:404]     Test net output #1: loss = 2.5766 (* 1 = 2.5766 loss)\nI0821 19:56:33.339887 32360 solver.cpp:228] Iteration 18700, loss = 0.000258739\nI0821 19:56:33.339931 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:56:33.339947 32360 solver.cpp:244]     Train net output #1: loss = 0.000258795 (* 1 = 0.000258795 loss)\nI0821 19:56:33.446753 32360 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0821 19:58:51.114992 32360 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0821 20:00:12.882035 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:00:12.882346 32360 solver.cpp:404]     Test net output #1: loss = 2.54712 (* 1 = 2.54712 loss)\nI0821 20:00:14.194365 32360 solver.cpp:228] Iteration 18800, loss = 0.000235749\nI0821 20:00:14.194406 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:00:14.194422 32360 solver.cpp:244]     Train net output #1: loss = 0.000235805 (* 1 = 0.000235805 loss)\nI0821 20:00:14.304615 32360 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0821 20:02:32.087712 32360 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0821 20:03:53.858775 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 20:03:53.859086 32360 solver.cpp:404]     Test net output #1: loss = 2.52186 (* 1 = 2.52186 loss)\nI0821 20:03:55.170622 32360 solver.cpp:228] Iteration 18900, loss = 0.000238457\nI0821 20:03:55.170672 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:03:55.170689 32360 solver.cpp:244]     Train net output #1: loss = 0.000238513 (* 1 = 0.000238513 loss)\nI0821 20:03:55.266033 32360 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0821 20:06:12.929824 32360 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0821 20:07:34.692203 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:07:34.692522 32360 solver.cpp:404]     Test net output #1: loss = 2.50571 (* 1 = 2.50571 loss)\nI0821 20:07:36.003458 32360 solver.cpp:228] Iteration 19000, loss = 0.000231609\nI0821 20:07:36.003509 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:07:36.003526 32360 solver.cpp:244]     Train net output #1: loss = 0.000231665 (* 1 = 0.000231665 loss)\nI0821 20:07:36.111248 32360 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0821 20:09:53.785586 32360 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0821 20:11:15.546566 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 20:11:15.546876 32360 solver.cpp:404]     Test net output #1: loss = 2.48651 (* 1 = 2.48651 loss)\nI0821 20:11:16.858407 32360 solver.cpp:228] Iteration 19100, loss = 0.000245069\nI0821 20:11:16.858461 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:11:16.858479 32360 solver.cpp:244]     Train net output #1: loss = 0.000245125 (* 1 = 0.000245125 loss)\nI0821 20:11:16.958640 32360 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0821 20:13:34.558694 32360 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0821 20:14:56.320857 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:14:56.321152 32360 solver.cpp:404]     Test net output #1: loss = 2.47245 (* 1 = 2.47245 loss)\nI0821 20:14:57.633239 32360 solver.cpp:228] Iteration 19200, loss = 0.000219941\nI0821 20:14:57.633282 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:14:57.633298 32360 solver.cpp:244]     Train net output #1: loss = 0.000219997 (* 1 = 0.000219997 loss)\nI0821 20:14:57.733844 32360 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0821 20:17:15.338455 32360 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0821 20:18:37.088281 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 20:18:37.088593 32360 solver.cpp:404]     Test net output #1: loss = 2.45735 (* 1 = 2.45735 loss)\nI0821 20:18:38.400238 32360 solver.cpp:228] Iteration 19300, loss = 0.000212044\nI0821 20:18:38.400279 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:18:38.400295 32360 solver.cpp:244]     Train net output #1: loss = 0.0002121 (* 1 = 0.0002121 loss)\nI0821 20:18:38.500015 32360 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0821 20:20:56.220165 32360 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0821 20:22:17.972936 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:22:17.973245 32360 solver.cpp:404]     Test net output #1: loss = 2.44634 (* 1 = 2.44634 loss)\nI0821 20:22:19.285094 32360 solver.cpp:228] Iteration 19400, loss = 0.000207594\nI0821 20:22:19.285147 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:22:19.285164 32360 solver.cpp:244]     Train net output #1: loss = 0.00020765 (* 1 = 0.00020765 loss)\nI0821 20:22:19.389050 32360 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0821 20:24:36.968946 32360 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0821 20:25:58.703258 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 20:25:58.703573 32360 solver.cpp:404]     Test net output #1: loss = 2.43404 (* 1 = 2.43404 loss)\nI0821 20:26:00.014578 32360 solver.cpp:228] Iteration 19500, loss = 0.000206618\nI0821 20:26:00.014626 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:26:00.014642 32360 solver.cpp:244]     Train net output #1: loss = 0.000206674 (* 1 = 0.000206674 loss)\nI0821 20:26:00.120297 32360 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0821 20:28:17.861680 32360 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0821 20:29:39.601676 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:29:39.601963 32360 solver.cpp:404]     Test net output #1: loss = 2.42526 (* 1 = 2.42526 loss)\nI0821 20:29:40.913679 32360 solver.cpp:228] Iteration 19600, loss = 0.000204797\nI0821 20:29:40.913719 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:29:40.913734 32360 solver.cpp:244]     Train net output #1: loss = 0.000204853 (* 1 = 0.000204853 loss)\nI0821 20:29:41.022444 32360 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0821 20:31:58.663354 32360 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0821 20:33:20.387322 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 20:33:20.387640 32360 solver.cpp:404]     Test net output #1: loss = 2.41447 (* 1 = 2.41447 loss)\nI0821 20:33:21.699565 32360 solver.cpp:228] Iteration 19700, loss = 0.000203384\nI0821 20:33:21.699606 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:33:21.699622 32360 solver.cpp:244]     Train net output #1: loss = 0.00020344 (* 1 = 0.00020344 loss)\nI0821 20:33:21.806756 32360 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0821 20:35:39.406868 32360 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0821 20:37:01.154229 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:37:01.154542 32360 solver.cpp:404]     Test net output #1: loss = 2.407 (* 1 = 2.407 loss)\nI0821 20:37:02.466514 32360 solver.cpp:228] Iteration 19800, loss = 0.000195255\nI0821 20:37:02.466550 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:37:02.466567 32360 solver.cpp:244]     Train net output #1: loss = 0.000195311 (* 1 = 0.000195311 loss)\nI0821 20:37:02.571866 32360 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0821 20:39:20.175206 32360 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0821 20:40:41.862910 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 20:40:41.863194 32360 solver.cpp:404]     Test net output #1: loss = 2.39764 (* 1 = 2.39764 loss)\nI0821 20:40:43.175076 32360 solver.cpp:228] Iteration 19900, loss = 0.000186958\nI0821 20:40:43.175122 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:40:43.175137 32360 solver.cpp:244]     Train net output #1: loss = 0.000187014 (* 1 = 0.000187014 loss)\nI0821 20:40:43.279544 32360 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0821 20:43:00.869930 32360 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0821 20:44:22.563056 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:44:22.563300 32360 solver.cpp:404]     Test net output #1: loss = 2.39206 (* 1 = 2.39206 loss)\nI0821 20:44:23.875200 32360 solver.cpp:228] Iteration 20000, loss = 0.000213638\nI0821 20:44:23.875239 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:44:23.875254 32360 solver.cpp:244]     Train net output #1: loss = 0.000213694 (* 1 = 0.000213694 loss)\nI0821 20:44:23.976392 32360 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0821 20:46:41.629663 32360 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0821 20:48:03.256902 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 20:48:03.257127 32360 solver.cpp:404]     Test net output #1: loss = 2.38503 (* 1 = 2.38503 loss)\nI0821 20:48:04.568684 32360 solver.cpp:228] Iteration 20100, loss = 0.000207392\nI0821 20:48:04.568727 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:48:04.568743 32360 solver.cpp:244]     Train net output #1: loss = 0.000207448 (* 1 = 0.000207448 loss)\nI0821 20:48:04.674391 32360 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0821 20:50:22.319900 32360 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0821 20:51:43.789244 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:51:43.789470 32360 solver.cpp:404]     Test net output #1: loss = 2.38057 (* 1 = 2.38057 loss)\nI0821 20:51:45.100767 32360 solver.cpp:228] Iteration 20200, loss = 0.000197353\nI0821 20:51:45.100811 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:51:45.100826 32360 solver.cpp:244]     Train net output #1: loss = 0.00019741 (* 1 = 0.00019741 loss)\nI0821 20:51:45.200556 32360 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0821 20:54:02.906183 32360 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0821 20:55:24.549859 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 20:55:24.550115 32360 solver.cpp:404]     Test net output #1: loss = 2.37428 (* 1 = 2.37428 loss)\nI0821 20:55:25.862041 32360 solver.cpp:228] Iteration 20300, loss = 0.000204746\nI0821 20:55:25.862087 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:55:25.862102 32360 solver.cpp:244]     Train net output #1: loss = 0.000204802 (* 1 = 0.000204802 loss)\nI0821 20:55:25.961107 32360 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0821 20:57:43.588515 32360 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0821 20:59:05.321036 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 20:59:05.321292 32360 solver.cpp:404]     Test net output #1: loss = 2.37083 (* 1 = 2.37083 loss)\nI0821 20:59:06.632964 32360 solver.cpp:228] Iteration 20400, loss = 0.000195578\nI0821 20:59:06.633005 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:59:06.633023 32360 solver.cpp:244]     Train net output #1: loss = 0.000195635 (* 1 = 0.000195635 loss)\nI0821 20:59:06.731636 32360 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0821 21:01:24.300446 32360 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0821 21:02:45.872972 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 21:02:45.873240 32360 solver.cpp:404]     Test net output #1: loss = 2.36513 (* 1 = 2.36513 loss)\nI0821 21:02:47.185084 32360 solver.cpp:228] Iteration 20500, loss = 0.00019897\nI0821 21:02:47.185137 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:02:47.185154 32360 solver.cpp:244]     Train net output #1: loss = 0.000199026 (* 1 = 0.000199026 loss)\nI0821 21:02:47.284548 32360 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0821 21:05:04.913399 32360 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0821 21:06:26.635783 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 21:06:26.636035 32360 solver.cpp:404]     Test net output #1: loss = 2.36147 (* 1 = 2.36147 loss)\nI0821 21:06:27.948776 32360 solver.cpp:228] Iteration 20600, loss = 0.000205686\nI0821 21:06:27.948830 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:06:27.948848 32360 solver.cpp:244]     Train net output #1: loss = 0.000205742 (* 1 = 0.000205742 loss)\nI0821 21:06:28.048249 32360 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0821 21:08:45.689893 32360 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0821 21:10:07.425457 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 21:10:07.425742 32360 solver.cpp:404]     Test net output #1: loss = 2.3569 (* 1 = 2.3569 loss)\nI0821 21:10:08.737689 32360 solver.cpp:228] Iteration 20700, loss = 0.000195681\nI0821 21:10:08.737740 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:10:08.737763 32360 solver.cpp:244]     Train net output #1: loss = 0.000195737 (* 1 = 0.000195737 loss)\nI0821 21:10:08.838539 32360 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0821 21:12:26.503767 32360 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0821 21:13:48.263016 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 21:13:48.263265 32360 solver.cpp:404]     Test net output #1: loss = 2.35431 (* 1 = 2.35431 loss)\nI0821 21:13:49.574956 32360 solver.cpp:228] Iteration 20800, loss = 0.000198709\nI0821 21:13:49.575004 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:13:49.575029 32360 solver.cpp:244]     Train net output #1: loss = 0.000198765 (* 1 = 0.000198765 loss)\nI0821 21:13:49.684352 32360 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0821 21:16:07.428325 32360 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0821 21:17:29.134001 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 21:17:29.134280 32360 solver.cpp:404]     Test net output #1: loss = 2.35049 (* 1 = 2.35049 loss)\nI0821 21:17:30.445940 32360 solver.cpp:228] Iteration 20900, loss = 0.000200268\nI0821 21:17:30.445981 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:17:30.445996 32360 solver.cpp:244]     Train net output #1: loss = 0.000200324 (* 1 = 0.000200324 loss)\nI0821 21:17:30.546149 32360 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0821 21:19:48.200290 32360 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0821 21:21:09.924226 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 21:21:09.924505 32360 solver.cpp:404]     Test net output #1: loss = 2.34861 (* 1 = 2.34861 loss)\nI0821 21:21:11.235612 32360 solver.cpp:228] Iteration 21000, loss = 0.000201895\nI0821 21:21:11.235653 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:21:11.235669 32360 solver.cpp:244]     Train net output #1: loss = 0.000201951 (* 1 = 0.000201951 loss)\nI0821 21:21:11.343591 32360 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0821 21:23:28.942342 32360 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0821 21:24:50.661993 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 21:24:50.662230 32360 solver.cpp:404]     Test net output #1: loss = 2.34582 (* 1 = 2.34582 loss)\nI0821 21:24:51.973850 32360 solver.cpp:228] Iteration 21100, loss = 0.000194651\nI0821 21:24:51.973902 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:24:51.973919 32360 solver.cpp:244]     Train net output #1: loss = 0.000194707 (* 1 = 0.000194707 loss)\nI0821 21:24:52.081104 32360 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0821 21:27:09.622025 32360 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0821 21:28:30.288242 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 21:28:30.288537 32360 solver.cpp:404]     Test net output #1: loss = 2.344 (* 1 = 2.344 loss)\nI0821 21:28:31.597111 32360 solver.cpp:228] Iteration 21200, loss = 0.000196965\nI0821 21:28:31.597148 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:28:31.597163 32360 solver.cpp:244]     Train net output #1: loss = 0.000197021 (* 1 = 0.000197021 loss)\nI0821 21:28:31.696571 32360 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0821 21:30:48.882536 32360 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0821 21:32:10.511131 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0821 21:32:10.511381 32360 solver.cpp:404]     Test net output #1: loss = 2.34105 (* 1 = 2.34105 loss)\nI0821 21:32:11.824376 32360 solver.cpp:228] Iteration 21300, loss = 0.000196276\nI0821 21:32:11.824436 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:32:11.824455 32360 solver.cpp:244]     Train net output #1: loss = 0.000196332 (* 1 = 0.000196332 loss)\nI0821 21:32:11.927055 32360 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0821 21:34:30.039819 32360 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0821 21:35:51.455682 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0821 21:35:51.455940 32360 solver.cpp:404]     Test net output #1: loss = 2.33955 (* 1 = 2.33955 loss)\nI0821 21:35:52.767853 32360 solver.cpp:228] Iteration 21400, loss = 0.000199452\nI0821 21:35:52.767909 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:35:52.767931 32360 solver.cpp:244]     Train net output #1: loss = 0.000199508 (* 1 = 0.000199508 loss)\nI0821 21:35:52.873035 32360 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0821 21:38:11.050362 32360 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0821 21:39:32.367246 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 21:39:32.367491 32360 solver.cpp:404]     Test net output #1: loss = 78.6169 (* 1 = 78.6169 loss)\nI0821 21:39:33.679337 32360 solver.cpp:228] Iteration 21500, loss = 1.89693\nI0821 21:39:33.679394 32360 solver.cpp:244]     Train net output #0: accuracy = 0.336\nI0821 21:39:33.679420 32360 solver.cpp:244]     Train net output #1: loss = 1.89693 (* 1 = 1.89693 loss)\nI0821 21:39:33.784843 32360 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0821 21:41:51.754693 32360 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0821 21:43:13.382791 32360 solver.cpp:404]     Test net output #0: accuracy = 0.12772\nI0821 21:43:13.383098 32360 solver.cpp:404]     Test net output #1: loss = 76.1819 (* 1 = 76.1819 loss)\nI0821 21:43:14.696429 32360 solver.cpp:228] Iteration 21600, loss = 1.57391\nI0821 21:43:14.696475 32360 solver.cpp:244]     Train net output #0: accuracy = 0.424\nI0821 21:43:14.696496 32360 solver.cpp:244]     Train net output #1: loss = 1.57391 (* 1 = 1.57391 loss)\nI0821 21:43:14.802670 32360 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0821 21:45:32.824445 32360 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0821 21:46:54.590929 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09892\nI0821 21:46:54.591168 32360 solver.cpp:404]     Test net output #1: loss = 78.6972 (* 1 = 78.6972 loss)\nI0821 21:46:55.903573 32360 solver.cpp:228] Iteration 21700, loss = 1.31387\nI0821 21:46:55.903615 32360 solver.cpp:244]     Train net output #0: accuracy = 0.568\nI0821 21:46:55.903632 32360 solver.cpp:244]     Train net output #1: loss = 1.31387 (* 1 = 1.31387 loss)\nI0821 21:46:56.006911 32360 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0821 21:49:14.084162 32360 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0821 21:50:35.821826 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09856\nI0821 21:50:35.822087 32360 solver.cpp:404]     Test net output #1: loss = 78.7287 (* 1 = 78.7287 loss)\nI0821 21:50:37.133564 32360 solver.cpp:228] Iteration 21800, loss = 1.23861\nI0821 21:50:37.133605 32360 solver.cpp:244]     Train net output #0: accuracy = 0.576\nI0821 21:50:37.133622 32360 solver.cpp:244]     Train net output #1: loss = 1.23861 (* 1 = 1.23861 loss)\nI0821 21:50:37.239889 32360 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0821 21:52:55.287704 32360 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0821 21:54:16.898727 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11116\nI0821 21:54:16.898969 32360 solver.cpp:404]     Test net output #1: loss = 77.5717 (* 1 = 77.5717 loss)\nI0821 21:54:18.210310 32360 solver.cpp:228] Iteration 21900, loss = 0.936854\nI0821 21:54:18.210352 32360 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0821 21:54:18.210371 32360 solver.cpp:244]     Train net output #1: loss = 0.936854 (* 1 = 0.936854 loss)\nI0821 21:54:18.316821 32360 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0821 21:56:36.448056 32360 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0821 21:57:57.614910 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0821 21:57:57.615129 32360 solver.cpp:404]     Test net output #1: loss = 78.5854 (* 1 = 78.5854 loss)\nI0821 21:57:58.926908 32360 solver.cpp:228] Iteration 22000, loss = 0.776918\nI0821 21:57:58.926954 32360 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0821 21:57:58.926968 32360 solver.cpp:244]     Train net output #1: loss = 0.776918 (* 1 = 0.776918 loss)\nI0821 21:57:59.035996 32360 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0821 22:00:17.184592 32360 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0821 22:01:38.387099 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10032\nI0821 22:01:38.387337 32360 solver.cpp:404]     Test net output #1: loss = 78.3953 (* 1 = 78.3953 loss)\nI0821 22:01:39.698933 32360 solver.cpp:228] Iteration 22100, loss = 0.792229\nI0821 22:01:39.698973 32360 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0821 22:01:39.698989 32360 solver.cpp:244]     Train net output #1: loss = 0.792229 (* 1 = 0.792229 loss)\nI0821 22:01:39.807576 32360 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0821 22:03:57.922030 32360 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0821 22:05:19.128986 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09684\nI0821 22:05:19.129233 32360 solver.cpp:404]     Test net output #1: loss = 77.7447 (* 1 = 77.7447 loss)\nI0821 22:05:20.440783 32360 solver.cpp:228] Iteration 22200, loss = 0.581122\nI0821 22:05:20.440824 32360 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0821 22:05:20.440840 32360 solver.cpp:244]     Train net output #1: loss = 0.581121 (* 1 = 0.581121 loss)\nI0821 22:05:20.546921 32360 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0821 22:07:38.577119 32360 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0821 22:08:59.906028 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09516\nI0821 22:08:59.906255 32360 solver.cpp:404]     Test net output #1: loss = 78.7159 (* 1 = 78.7159 loss)\nI0821 22:09:01.217788 32360 solver.cpp:228] Iteration 22300, loss = 0.457408\nI0821 22:09:01.217839 32360 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0821 22:09:01.217857 32360 solver.cpp:244]     Train net output #1: loss = 0.457408 (* 1 = 0.457408 loss)\nI0821 22:09:01.324697 32360 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0821 22:11:19.394322 32360 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0821 22:12:40.884542 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0821 22:12:40.884841 32360 solver.cpp:404]     Test net output #1: loss = 78.5854 (* 1 = 78.5854 loss)\nI0821 22:12:42.196430 32360 solver.cpp:228] Iteration 22400, loss = 0.340326\nI0821 22:12:42.196471 32360 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 22:12:42.196486 32360 solver.cpp:244]     Train net output #1: loss = 0.340325 (* 1 = 0.340325 loss)\nI0821 22:12:42.298745 32360 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0821 22:15:00.409113 32360 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0821 22:16:22.047370 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09928\nI0821 22:16:22.047598 32360 solver.cpp:404]     Test net output #1: loss = 78.1916 (* 1 = 78.1916 loss)\nI0821 22:16:23.359330 32360 solver.cpp:228] Iteration 22500, loss = 0.330407\nI0821 22:16:23.359371 32360 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0821 22:16:23.359387 32360 solver.cpp:244]     Train net output #1: loss = 0.330407 (* 1 = 0.330407 loss)\nI0821 22:16:23.467720 32360 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0821 22:18:41.548328 32360 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0821 22:20:03.145512 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1006\nI0821 22:20:03.145767 32360 solver.cpp:404]     Test net output #1: loss = 78.0301 (* 1 = 78.0301 loss)\nI0821 22:20:04.457533 32360 solver.cpp:228] Iteration 22600, loss = 0.216083\nI0821 22:20:04.457572 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 22:20:04.457587 32360 solver.cpp:244]     Train net output #1: loss = 0.216083 (* 1 = 0.216083 loss)\nI0821 22:20:04.566103 32360 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0821 22:22:22.591006 32360 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0821 22:23:44.288192 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10836\nI0821 22:23:44.288429 32360 solver.cpp:404]     Test net output #1: loss = 66.732 (* 1 = 66.732 loss)\nI0821 22:23:45.600277 32360 solver.cpp:228] Iteration 22700, loss = 0.186445\nI0821 22:23:45.600322 32360 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 22:23:45.600339 32360 solver.cpp:244]     Train net output #1: loss = 0.186444 (* 1 = 0.186444 loss)\nI0821 22:23:45.708416 32360 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0821 22:26:03.984694 32360 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0821 22:27:25.709655 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1294\nI0821 22:27:25.709895 32360 solver.cpp:404]     Test net output #1: loss = 52.3973 (* 1 = 52.3973 loss)\nI0821 22:27:27.022557 32360 solver.cpp:228] Iteration 22800, loss = 0.164491\nI0821 22:27:27.022603 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 22:27:27.022619 32360 solver.cpp:244]     Train net output #1: loss = 0.164491 (* 1 = 0.164491 loss)\nI0821 22:27:27.128976 32360 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0821 22:29:45.117926 32360 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0821 22:31:06.862740 32360 solver.cpp:404]     Test net output #0: accuracy = 0.12696\nI0821 22:31:06.863014 32360 solver.cpp:404]     Test net output #1: loss = 47.8566 (* 1 = 47.8566 loss)\nI0821 22:31:08.175915 32360 solver.cpp:228] Iteration 22900, loss = 0.13847\nI0821 22:31:08.175959 32360 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 22:31:08.175976 32360 solver.cpp:244]     Train net output #1: loss = 0.13847 (* 1 = 0.13847 loss)\nI0821 22:31:08.276527 32360 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0821 22:33:26.219769 32360 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0821 22:34:47.966455 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11964\nI0821 22:34:47.966713 32360 solver.cpp:404]     Test net output #1: loss = 46.6432 (* 1 = 46.6432 loss)\nI0821 22:34:49.278300 32360 solver.cpp:228] Iteration 23000, loss = 0.0859063\nI0821 22:34:49.278345 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 22:34:49.278362 32360 solver.cpp:244]     Train net output #1: loss = 0.085906 (* 1 = 0.085906 loss)\nI0821 22:34:49.383116 32360 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0821 22:37:07.548247 32360 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0821 22:38:29.221678 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15196\nI0821 22:38:29.221940 32360 solver.cpp:404]     Test net output #1: loss = 25.1627 (* 1 = 25.1627 loss)\nI0821 22:38:30.534584 32360 solver.cpp:228] Iteration 23100, loss = 0.0375129\nI0821 22:38:30.534629 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:38:30.534644 32360 solver.cpp:244]     Train net output #1: loss = 0.0375126 (* 1 = 0.0375126 loss)\nI0821 22:38:30.636163 32360 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0821 22:40:48.608099 32360 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0821 22:42:10.345511 32360 solver.cpp:404]     Test net output #0: accuracy = 0.27372\nI0821 22:42:10.345803 32360 solver.cpp:404]     Test net output #1: loss = 14.7675 (* 1 = 14.7675 loss)\nI0821 22:42:11.658762 32360 solver.cpp:228] Iteration 23200, loss = 0.0985937\nI0821 22:42:11.658807 32360 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 22:42:11.658824 32360 solver.cpp:244]     Train net output #1: loss = 0.0985935 (* 1 = 0.0985935 loss)\nI0821 22:42:11.775219 32360 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0821 22:44:29.726703 32360 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0821 22:45:51.327036 32360 solver.cpp:404]     Test net output #0: accuracy = 0.3084\nI0821 22:45:51.327297 32360 solver.cpp:404]     Test net output #1: loss = 7.7007 (* 1 = 7.7007 loss)\nI0821 22:45:52.639843 32360 solver.cpp:228] Iteration 23300, loss = 0.0264312\nI0821 22:45:52.639889 32360 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 22:45:52.639905 32360 solver.cpp:244]     Train net output #1: loss = 0.0264309 (* 1 = 0.0264309 loss)\nI0821 22:45:52.744638 32360 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0821 22:48:10.681510 32360 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0821 22:49:32.087519 32360 solver.cpp:404]     Test net output #0: accuracy = 0.25288\nI0821 22:49:32.087772 32360 solver.cpp:404]     Test net output #1: loss = 8.18765 (* 1 = 8.18765 loss)\nI0821 22:49:33.400612 32360 solver.cpp:228] Iteration 23400, loss = 0.000635777\nI0821 22:49:33.400655 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:49:33.400671 32360 solver.cpp:244]     Train net output #1: loss = 0.000635509 (* 1 = 0.000635509 loss)\nI0821 22:49:33.503976 32360 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0821 22:51:51.361923 32360 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0821 22:53:13.027487 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16836\nI0821 22:53:13.027765 32360 solver.cpp:404]     Test net output #1: loss = 10.9748 (* 1 = 10.9748 loss)\nI0821 22:53:14.338924 32360 solver.cpp:228] Iteration 23500, loss = 0.000591706\nI0821 22:53:14.338965 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:53:14.338980 32360 solver.cpp:244]     Train net output #1: loss = 0.000591438 (* 1 = 0.000591438 loss)\nI0821 22:53:14.445685 32360 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0821 22:55:32.375665 32360 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0821 22:56:54.088577 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13792\nI0821 22:56:54.088840 32360 solver.cpp:404]     Test net output #1: loss = 12.4996 (* 1 = 12.4996 loss)\nI0821 22:56:55.400617 32360 solver.cpp:228] Iteration 23600, loss = 0.000629825\nI0821 22:56:55.400660 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:56:55.400676 32360 solver.cpp:244]     Train net output #1: loss = 0.000629557 (* 1 = 0.000629557 loss)\nI0821 22:56:55.512403 32360 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0821 22:59:13.493209 32360 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0821 23:00:34.844264 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11676\nI0821 23:00:34.844518 32360 solver.cpp:404]     Test net output #1: loss = 13.9942 (* 1 = 13.9942 loss)\nI0821 23:00:36.156605 32360 solver.cpp:228] Iteration 23700, loss = 0.000603309\nI0821 23:00:36.156651 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:00:36.156666 32360 solver.cpp:244]     Train net output #1: loss = 0.000603041 (* 1 = 0.000603041 loss)\nI0821 23:00:36.266142 32360 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0821 23:02:54.174238 32360 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0821 23:04:15.792455 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11828\nI0821 23:04:15.792706 32360 solver.cpp:404]     Test net output #1: loss = 14.6819 (* 1 = 14.6819 loss)\nI0821 23:04:17.103886 32360 solver.cpp:228] Iteration 23800, loss = 0.000633922\nI0821 23:04:17.103929 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:04:17.103943 32360 solver.cpp:244]     Train net output #1: loss = 0.000633654 (* 1 = 0.000633654 loss)\nI0821 23:04:17.214831 32360 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0821 23:06:35.208484 32360 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0821 23:07:56.878860 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13176\nI0821 23:07:56.879101 32360 solver.cpp:404]     Test net output #1: loss = 14.2386 (* 1 = 14.2386 loss)\nI0821 23:07:58.191136 32360 solver.cpp:228] Iteration 23900, loss = 0.000723738\nI0821 23:07:58.191179 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:07:58.191195 32360 solver.cpp:244]     Train net output #1: loss = 0.00072347 (* 1 = 0.00072347 loss)\nI0821 23:07:58.303237 32360 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0821 23:10:16.375174 32360 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0821 23:11:37.925173 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1392\nI0821 23:11:37.925410 32360 solver.cpp:404]     Test net output #1: loss = 13.2788 (* 1 = 13.2788 loss)\nI0821 23:11:39.236954 32360 solver.cpp:228] Iteration 24000, loss = 0.000717974\nI0821 23:11:39.236999 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:11:39.237015 32360 solver.cpp:244]     Train net output #1: loss = 0.000717705 (* 1 = 0.000717705 loss)\nI0821 23:11:39.340778 32360 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0821 23:13:57.412426 32360 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0821 23:15:19.030925 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14408\nI0821 23:15:19.031183 32360 solver.cpp:404]     Test net output #1: loss = 12.3288 (* 1 = 12.3288 loss)\nI0821 23:15:20.342648 32360 solver.cpp:228] Iteration 24100, loss = 0.000745185\nI0821 23:15:20.342690 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:15:20.342706 32360 solver.cpp:244]     Train net output #1: loss = 0.000744917 (* 1 = 0.000744917 loss)\nI0821 23:15:20.445041 32360 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0821 23:17:38.493062 32360 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0821 23:19:00.237467 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14688\nI0821 23:19:00.237704 32360 solver.cpp:404]     Test net output #1: loss = 11.2003 (* 1 = 11.2003 loss)\nI0821 23:19:01.548841 32360 solver.cpp:228] Iteration 24200, loss = 0.000804733\nI0821 23:19:01.548884 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:19:01.548902 32360 solver.cpp:244]     Train net output #1: loss = 0.000804464 (* 1 = 0.000804464 loss)\nI0821 23:19:01.656682 32360 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0821 23:21:19.623282 32360 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0821 23:22:41.360998 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14608\nI0821 23:22:41.361305 32360 solver.cpp:404]     Test net output #1: loss = 10.1867 (* 1 = 10.1867 loss)\nI0821 23:22:42.672561 32360 solver.cpp:228] Iteration 24300, loss = 0.000869062\nI0821 23:22:42.672603 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:22:42.672619 32360 solver.cpp:244]     Train net output #1: loss = 0.000868794 (* 1 = 0.000868794 loss)\nI0821 23:22:42.779466 32360 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0821 23:25:00.829483 32360 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0821 23:26:22.536909 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1448\nI0821 23:26:22.537151 32360 solver.cpp:404]     Test net output #1: loss = 9.09528 (* 1 = 9.09528 loss)\nI0821 23:26:23.848928 32360 solver.cpp:228] Iteration 24400, loss = 0.000984474\nI0821 23:26:23.848971 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:26:23.848986 32360 solver.cpp:244]     Train net output #1: loss = 0.000984206 (* 1 = 0.000984206 loss)\nI0821 23:26:23.959623 32360 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0821 23:28:41.957020 32360 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0821 23:30:03.711802 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14164\nI0821 23:30:03.712070 32360 solver.cpp:404]     Test net output #1: loss = 8.20054 (* 1 = 8.20054 loss)\nI0821 23:30:05.024349 32360 solver.cpp:228] Iteration 24500, loss = 0.000812755\nI0821 23:30:05.024397 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:30:05.024415 32360 solver.cpp:244]     Train net output #1: loss = 0.000812487 (* 1 = 0.000812487 loss)\nI0821 23:30:05.132913 32360 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0821 23:32:23.344775 32360 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0821 23:33:45.084257 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13412\nI0821 23:33:45.084524 32360 solver.cpp:404]     Test net output #1: loss = 7.36383 (* 1 = 7.36383 loss)\nI0821 23:33:46.396365 32360 solver.cpp:228] Iteration 24600, loss = 0.00083851\nI0821 23:33:46.396409 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:33:46.396425 32360 solver.cpp:244]     Train net output #1: loss = 0.000838242 (* 1 = 0.000838242 loss)\nI0821 23:33:46.496693 32360 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0821 23:36:04.574717 32360 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0821 23:37:26.327625 32360 solver.cpp:404]     Test net output #0: accuracy = 0.12528\nI0821 23:37:26.327888 32360 solver.cpp:404]     Test net output #1: loss = 6.6901 (* 1 = 6.6901 loss)\nI0821 23:37:27.639377 32360 solver.cpp:228] Iteration 24700, loss = 0.000974893\nI0821 23:37:27.639420 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:37:27.639437 32360 solver.cpp:244]     Train net output #1: loss = 0.000974625 (* 1 = 0.000974625 loss)\nI0821 23:37:27.741751 32360 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0821 23:39:45.805444 32360 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0821 23:41:07.546310 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11624\nI0821 23:41:07.546628 32360 solver.cpp:404]     Test net output #1: loss = 6.18195 (* 1 = 6.18195 loss)\nI0821 23:41:08.857262 32360 solver.cpp:228] Iteration 24800, loss = 0.000915518\nI0821 23:41:08.857306 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:41:08.857323 32360 solver.cpp:244]     Train net output #1: loss = 0.00091525 (* 1 = 0.00091525 loss)\nI0821 23:41:08.961727 32360 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0821 23:43:27.035058 32360 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0821 23:44:48.779510 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11032\nI0821 23:44:48.779769 32360 solver.cpp:404]     Test net output #1: loss = 5.75498 (* 1 = 5.75498 loss)\nI0821 23:44:50.091158 32360 solver.cpp:228] Iteration 24900, loss = 0.000930631\nI0821 23:44:50.091202 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:44:50.091218 32360 solver.cpp:244]     Train net output #1: loss = 0.000930362 (* 1 = 0.000930362 loss)\nI0821 23:44:50.200057 32360 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0821 23:47:08.267925 32360 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0821 23:48:29.992029 32360 solver.cpp:404]     Test net output #0: accuracy = 0.108\nI0821 23:48:29.992269 32360 solver.cpp:404]     Test net output #1: loss = 5.42745 (* 1 = 5.42745 loss)\nI0821 23:48:31.303925 32360 solver.cpp:228] Iteration 25000, loss = 0.000825042\nI0821 23:48:31.303968 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:48:31.303984 32360 solver.cpp:244]     Train net output #1: loss = 0.000824774 (* 1 = 0.000824774 loss)\nI0821 23:48:31.413029 32360 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0821 23:50:49.321774 32360 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0821 23:52:11.041189 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10792\nI0821 23:52:11.041462 32360 solver.cpp:404]     Test net output #1: loss = 5.04523 (* 1 = 5.04523 loss)\nI0821 23:52:12.352804 32360 solver.cpp:228] Iteration 25100, loss = 0.000782308\nI0821 23:52:12.352846 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:52:12.352861 32360 solver.cpp:244]     Train net output #1: loss = 0.00078204 (* 1 = 0.00078204 loss)\nI0821 23:52:12.456056 32360 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0821 23:54:30.441552 32360 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0821 23:55:52.150806 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1066\nI0821 23:55:52.151046 32360 solver.cpp:404]     Test net output #1: loss = 4.80516 (* 1 = 4.80516 loss)\nI0821 23:55:53.462677 32360 solver.cpp:228] Iteration 25200, loss = 0.000835032\nI0821 23:55:53.462718 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:55:53.462733 32360 solver.cpp:244]     Train net output #1: loss = 0.000834764 (* 1 = 0.000834764 loss)\nI0821 23:55:53.577323 32360 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0821 23:58:11.676798 32360 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0821 23:59:33.350970 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10672\nI0821 23:59:33.351218 32360 solver.cpp:404]     Test net output #1: loss = 4.56949 (* 1 = 4.56949 loss)\nI0821 23:59:34.662372 32360 solver.cpp:228] Iteration 25300, loss = 0.000684603\nI0821 23:59:34.662410 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:59:34.662425 32360 solver.cpp:244]     Train net output #1: loss = 0.000684334 (* 1 = 0.000684334 loss)\nI0821 23:59:34.766275 32360 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0822 00:01:52.833921 32360 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0822 00:03:14.317203 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10612\nI0822 00:03:14.317472 32360 solver.cpp:404]     Test net output #1: loss = 4.40115 (* 1 = 4.40115 loss)\nI0822 00:03:15.628911 32360 solver.cpp:228] Iteration 25400, loss = 0.000746387\nI0822 00:03:15.628949 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:03:15.628965 32360 solver.cpp:244]     Train net output #1: loss = 0.000746119 (* 1 = 0.000746119 loss)\nI0822 00:03:15.731751 32360 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0822 00:05:33.751749 32360 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0822 00:06:55.331034 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10716\nI0822 00:06:55.331266 32360 solver.cpp:404]     Test net output #1: loss = 4.15921 (* 1 = 4.15921 loss)\nI0822 00:06:56.642213 32360 solver.cpp:228] Iteration 25500, loss = 0.000880267\nI0822 00:06:56.642253 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:06:56.642269 32360 solver.cpp:244]     Train net output #1: loss = 0.000879999 (* 1 = 0.000879999 loss)\nI0822 00:06:56.750752 32360 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0822 00:09:14.841238 32360 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0822 00:10:36.411200 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10424\nI0822 00:10:36.411510 32360 solver.cpp:404]     Test net output #1: loss = 4.08644 (* 1 = 4.08644 loss)\nI0822 00:10:37.722913 32360 solver.cpp:228] Iteration 25600, loss = 0.00078612\nI0822 00:10:37.722949 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:10:37.722965 32360 solver.cpp:244]     Train net output #1: loss = 0.000785852 (* 1 = 0.000785852 loss)\nI0822 00:10:37.832311 32360 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0822 00:12:55.892027 32360 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0822 00:14:17.627250 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10412\nI0822 00:14:17.627514 32360 solver.cpp:404]     Test net output #1: loss = 3.99396 (* 1 = 3.99396 loss)\nI0822 00:14:18.938930 32360 solver.cpp:228] Iteration 25700, loss = 0.000711655\nI0822 00:14:18.938971 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:14:18.938987 32360 solver.cpp:244]     Train net output #1: loss = 0.000711387 (* 1 = 0.000711387 loss)\nI0822 00:14:19.041069 32360 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0822 00:16:37.069387 32360 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0822 00:17:58.810514 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10304\nI0822 00:17:58.810761 32360 solver.cpp:404]     Test net output #1: loss = 3.81547 (* 1 = 3.81547 loss)\nI0822 00:18:00.122313 32360 solver.cpp:228] Iteration 25800, loss = 0.000691384\nI0822 00:18:00.122354 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:18:00.122370 32360 solver.cpp:244]     Train net output #1: loss = 0.000691116 (* 1 = 0.000691116 loss)\nI0822 00:18:00.229821 32360 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0822 00:20:18.235035 32360 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0822 00:21:40.022075 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10284\nI0822 00:21:40.022374 32360 solver.cpp:404]     Test net output #1: loss = 3.66852 (* 1 = 3.66852 loss)\nI0822 00:21:41.334875 32360 solver.cpp:228] Iteration 25900, loss = 0.000656964\nI0822 00:21:41.334921 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:21:41.334944 32360 solver.cpp:244]     Train net output #1: loss = 0.000656696 (* 1 = 0.000656696 loss)\nI0822 00:21:41.437067 32360 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0822 00:23:59.514853 32360 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0822 00:25:21.222362 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1014\nI0822 00:25:21.222609 32360 solver.cpp:404]     Test net output #1: loss = 3.53994 (* 1 = 3.53994 loss)\nI0822 00:25:22.534281 32360 solver.cpp:228] Iteration 26000, loss = 0.000573033\nI0822 00:25:22.534323 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:25:22.534339 32360 solver.cpp:244]     Train net output #1: loss = 0.000572764 (* 1 = 0.000572764 loss)\nI0822 00:25:22.639376 32360 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0822 00:27:40.646603 32360 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0822 00:29:02.374511 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10196\nI0822 00:29:02.374869 32360 solver.cpp:404]     Test net output #1: loss = 3.3795 (* 1 = 3.3795 loss)\nI0822 00:29:03.686664 32360 solver.cpp:228] Iteration 26100, loss = 0.000644566\nI0822 00:29:03.686704 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:29:03.686720 32360 solver.cpp:244]     Train net output #1: loss = 0.000644298 (* 1 = 0.000644298 loss)\nI0822 00:29:03.787116 32360 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0822 00:31:21.680984 32360 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0822 00:32:43.368338 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 00:32:43.368628 32360 solver.cpp:404]     Test net output #1: loss = 3.27258 (* 1 = 3.27258 loss)\nI0822 00:32:44.680435 32360 solver.cpp:228] Iteration 26200, loss = 0.000574242\nI0822 00:32:44.680479 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:32:44.680495 32360 solver.cpp:244]     Train net output #1: loss = 0.000573974 (* 1 = 0.000573974 loss)\nI0822 00:32:44.787550 32360 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0822 00:35:02.850129 32360 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0822 00:36:24.365217 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10056\nI0822 00:36:24.365458 32360 solver.cpp:404]     Test net output #1: loss = 3.16905 (* 1 = 3.16905 loss)\nI0822 00:36:25.676859 32360 solver.cpp:228] Iteration 26300, loss = 0.000577058\nI0822 00:36:25.676901 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:36:25.676916 32360 solver.cpp:244]     Train net output #1: loss = 0.000576789 (* 1 = 0.000576789 loss)\nI0822 00:36:25.779973 32360 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0822 00:38:43.761147 32360 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0822 00:40:05.306303 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 00:40:05.306574 32360 solver.cpp:404]     Test net output #1: loss = 3.10252 (* 1 = 3.10252 loss)\nI0822 00:40:06.618039 32360 solver.cpp:228] Iteration 26400, loss = 0.000513786\nI0822 00:40:06.618082 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:40:06.618098 32360 solver.cpp:244]     Train net output #1: loss = 0.000513518 (* 1 = 0.000513518 loss)\nI0822 00:40:06.720705 32360 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0822 00:42:24.829524 32360 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0822 00:43:45.832845 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 00:43:45.833098 32360 solver.cpp:404]     Test net output #1: loss = 2.99052 (* 1 = 2.99052 loss)\nI0822 00:43:47.144244 32360 solver.cpp:228] Iteration 26500, loss = 0.000526399\nI0822 00:43:47.144290 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:43:47.144305 32360 solver.cpp:244]     Train net output #1: loss = 0.00052613 (* 1 = 0.00052613 loss)\nI0822 00:43:47.256347 32360 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0822 00:46:05.427682 32360 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0822 00:47:26.399147 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09976\nI0822 00:47:26.399400 32360 solver.cpp:404]     Test net output #1: loss = 2.911 (* 1 = 2.911 loss)\nI0822 00:47:27.713596 32360 solver.cpp:228] Iteration 26600, loss = 0.000547366\nI0822 00:47:27.713649 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:47:27.713666 32360 solver.cpp:244]     Train net output #1: loss = 0.000547098 (* 1 = 0.000547098 loss)\nI0822 00:47:27.825145 32360 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0822 00:49:45.916754 32360 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0822 00:51:06.912820 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10044\nI0822 00:51:06.913089 32360 solver.cpp:404]     Test net output #1: loss = 2.83202 (* 1 = 2.83202 loss)\nI0822 00:51:08.224465 32360 solver.cpp:228] Iteration 26700, loss = 0.000571086\nI0822 00:51:08.224521 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:51:08.224539 32360 solver.cpp:244]     Train net output #1: loss = 0.000570818 (* 1 = 0.000570818 loss)\nI0822 00:51:08.330404 32360 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0822 00:53:26.298228 32360 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0822 00:54:47.861848 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09976\nI0822 00:54:47.862067 32360 solver.cpp:404]     Test net output #1: loss = 2.76363 (* 1 = 2.76363 loss)\nI0822 00:54:49.175685 32360 solver.cpp:228] Iteration 26800, loss = 0.000486842\nI0822 00:54:49.175737 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:54:49.175753 32360 solver.cpp:244]     Train net output #1: loss = 0.000486574 (* 1 = 0.000486574 loss)\nI0822 00:54:49.280750 32360 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0822 00:57:07.365159 32360 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0822 00:58:29.112736 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 00:58:29.113000 32360 solver.cpp:404]     Test net output #1: loss = 2.72066 (* 1 = 2.72066 loss)\nI0822 00:58:30.427356 32360 solver.cpp:228] Iteration 26900, loss = 0.000450293\nI0822 00:58:30.427412 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:58:30.427428 32360 solver.cpp:244]     Train net output #1: loss = 0.000450025 (* 1 = 0.000450025 loss)\nI0822 00:58:30.531393 32360 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0822 01:00:48.440750 32360 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0822 01:02:10.184370 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 01:02:10.184689 32360 solver.cpp:404]     Test net output #1: loss = 2.67139 (* 1 = 2.67139 loss)\nI0822 01:02:11.499780 32360 solver.cpp:228] Iteration 27000, loss = 0.000454606\nI0822 01:02:11.499838 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:02:11.499862 32360 solver.cpp:244]     Train net output #1: loss = 0.000454338 (* 1 = 0.000454338 loss)\nI0822 01:02:11.605581 32360 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0822 01:04:29.616653 32360 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0822 01:05:51.239748 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 01:05:51.239996 32360 solver.cpp:404]     Test net output #1: loss = 78.6204 (* 1 = 78.6204 loss)\nI0822 01:05:52.554944 32360 solver.cpp:228] Iteration 27100, loss = 1.54909\nI0822 01:05:52.554999 32360 solver.cpp:244]     Train net output #0: accuracy = 0.424\nI0822 01:05:52.555023 32360 solver.cpp:244]     Train net output #1: loss = 1.54909 (* 1 = 1.54909 loss)\nI0822 01:05:52.663630 32360 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0822 01:08:10.582828 32360 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0822 01:09:32.347628 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0822 01:09:32.347882 32360 solver.cpp:404]     Test net output #1: loss = 78.5575 (* 1 = 78.5575 loss)\nI0822 01:09:33.663170 32360 solver.cpp:228] Iteration 27200, loss = 1.13743\nI0822 01:09:33.663224 32360 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI0822 01:09:33.663249 32360 solver.cpp:244]     Train net output #1: loss = 1.13743 (* 1 = 1.13743 loss)\nI0822 01:09:33.768460 32360 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0822 01:11:51.739073 32360 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0822 01:13:13.491200 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0822 01:13:13.491533 32360 solver.cpp:404]     Test net output #1: loss = 78.6483 (* 1 = 78.6483 loss)\nI0822 01:13:14.806948 32360 solver.cpp:228] Iteration 27300, loss = 0.8438\nI0822 01:13:14.807005 32360 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0822 01:13:14.807030 32360 solver.cpp:244]     Train net output #1: loss = 0.8438 (* 1 = 0.8438 loss)\nI0822 01:13:14.909255 32360 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0822 01:15:32.874071 32360 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0822 01:16:54.644646 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0822 01:16:54.644966 32360 solver.cpp:404]     Test net output #1: loss = 78.3657 (* 1 = 78.3657 loss)\nI0822 01:16:55.959381 32360 solver.cpp:228] Iteration 27400, loss = 0.747161\nI0822 01:16:55.959444 32360 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0822 01:16:55.959468 32360 solver.cpp:244]     Train net output #1: loss = 0.747161 (* 1 = 0.747161 loss)\nI0822 01:16:56.062039 32360 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0822 01:19:14.063506 32360 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0822 01:20:35.839896 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 01:20:35.840191 32360 solver.cpp:404]     Test net output #1: loss = 78.5939 (* 1 = 78.5939 loss)\nI0822 01:20:37.155673 32360 solver.cpp:228] Iteration 27500, loss = 0.614031\nI0822 01:20:37.155730 32360 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0822 01:20:37.155755 32360 solver.cpp:244]     Train net output #1: loss = 0.614031 (* 1 = 0.614031 loss)\nI0822 01:20:37.260505 32360 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0822 01:22:55.255189 32360 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0822 01:24:17.036739 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09496\nI0822 01:24:17.037037 32360 solver.cpp:404]     Test net output #1: loss = 78.456 (* 1 = 78.456 loss)\nI0822 01:24:18.352754 32360 solver.cpp:228] Iteration 27600, loss = 0.35178\nI0822 01:24:18.352811 32360 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0822 01:24:18.352835 32360 solver.cpp:244]     Train net output #1: loss = 0.35178 (* 1 = 0.35178 loss)\nI0822 01:24:18.454576 32360 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0822 01:26:36.532259 32360 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0822 01:27:58.322942 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10436\nI0822 01:27:58.323251 32360 solver.cpp:404]     Test net output #1: loss = 76.6314 (* 1 = 76.6314 loss)\nI0822 01:27:59.637351 32360 solver.cpp:228] Iteration 27700, loss = 0.310276\nI0822 01:27:59.637403 32360 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0822 01:27:59.637420 32360 solver.cpp:244]     Train net output #1: loss = 0.310276 (* 1 = 0.310276 loss)\nI0822 01:27:59.742224 32360 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0822 01:30:17.900529 32360 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0822 01:31:39.692133 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10056\nI0822 01:31:39.692461 32360 solver.cpp:404]     Test net output #1: loss = 78.2687 (* 1 = 78.2687 loss)\nI0822 01:31:41.007834 32360 solver.cpp:228] Iteration 27800, loss = 0.205536\nI0822 01:31:41.007889 32360 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0822 01:31:41.007905 32360 solver.cpp:244]     Train net output #1: loss = 0.205536 (* 1 = 0.205536 loss)\nI0822 01:31:41.108068 32360 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0822 01:33:59.236017 32360 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0822 01:35:21.012615 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10288\nI0822 01:35:21.012936 32360 solver.cpp:404]     Test net output #1: loss = 77.0019 (* 1 = 77.0019 loss)\nI0822 01:35:22.327847 32360 solver.cpp:228] Iteration 27900, loss = 0.164075\nI0822 01:35:22.327900 32360 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 01:35:22.327919 32360 solver.cpp:244]     Train net output #1: loss = 0.164075 (* 1 = 0.164075 loss)\nI0822 01:35:22.432807 32360 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0822 01:37:40.396811 32360 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0822 01:39:02.183306 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11496\nI0822 01:39:02.183615 32360 solver.cpp:404]     Test net output #1: loss = 76.0138 (* 1 = 76.0138 loss)\nI0822 01:39:03.498549 32360 solver.cpp:228] Iteration 28000, loss = 0.202112\nI0822 01:39:03.498602 32360 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0822 01:39:03.498620 32360 solver.cpp:244]     Train net output #1: loss = 0.202112 (* 1 = 0.202112 loss)\nI0822 01:39:03.598433 32360 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0822 01:41:21.525809 32360 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0822 01:42:43.314357 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1004\nI0822 01:42:43.314664 32360 solver.cpp:404]     Test net output #1: loss = 77.3028 (* 1 = 77.3028 loss)\nI0822 01:42:44.629990 32360 solver.cpp:228] Iteration 28100, loss = 0.123143\nI0822 01:42:44.630031 32360 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0822 01:42:44.630048 32360 solver.cpp:244]     Train net output #1: loss = 0.123143 (* 1 = 0.123143 loss)\nI0822 01:42:44.730165 32360 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0822 01:45:02.685817 32360 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0822 01:46:24.495240 32360 solver.cpp:404]     Test net output #0: accuracy = 0.12332\nI0822 01:46:24.495550 32360 solver.cpp:404]     Test net output #1: loss = 67.1623 (* 1 = 67.1623 loss)\nI0822 01:46:25.811154 32360 solver.cpp:228] Iteration 28200, loss = 0.0665879\nI0822 01:46:25.811208 32360 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 01:46:25.811226 32360 solver.cpp:244]     Train net output #1: loss = 0.066588 (* 1 = 0.066588 loss)\nI0822 01:46:25.915493 32360 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0822 01:48:43.816715 32360 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0822 01:50:05.618701 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1364\nI0822 01:50:05.619024 32360 solver.cpp:404]     Test net output #1: loss = 57.5512 (* 1 = 57.5512 loss)\nI0822 01:50:06.935012 32360 solver.cpp:228] Iteration 28300, loss = 0.107526\nI0822 01:50:06.935066 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 01:50:06.935084 32360 solver.cpp:244]     Train net output #1: loss = 0.107526 (* 1 = 0.107526 loss)\nI0822 01:50:07.037519 32360 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0822 01:52:25.047355 32360 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0822 01:53:46.853924 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10056\nI0822 01:53:46.854231 32360 solver.cpp:404]     Test net output #1: loss = 71.2048 (* 1 = 71.2048 loss)\nI0822 01:53:48.168656 32360 solver.cpp:228] Iteration 28400, loss = 0.0769022\nI0822 01:53:48.168710 32360 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:53:48.168728 32360 solver.cpp:244]     Train net output #1: loss = 0.0769023 (* 1 = 0.0769023 loss)\nI0822 01:53:48.272439 32360 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0822 01:56:06.265363 32360 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0822 01:57:28.062635 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15736\nI0822 01:57:28.062966 32360 solver.cpp:404]     Test net output #1: loss = 32.2262 (* 1 = 32.2262 loss)\nI0822 01:57:29.377035 32360 solver.cpp:228] Iteration 28500, loss = 0.0306144\nI0822 01:57:29.377086 32360 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:57:29.377104 32360 solver.cpp:244]     Train net output #1: loss = 0.0306145 (* 1 = 0.0306145 loss)\nI0822 01:57:29.482779 32360 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0822 01:59:47.413811 32360 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0822 02:01:09.212162 32360 solver.cpp:404]     Test net output #0: accuracy = 0.19792\nI0822 02:01:09.212466 32360 solver.cpp:404]     Test net output #1: loss = 18.3713 (* 1 = 18.3713 loss)\nI0822 02:01:10.526703 32360 solver.cpp:228] Iteration 28600, loss = 0.00343655\nI0822 02:01:10.526758 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:01:10.526775 32360 solver.cpp:244]     Train net output #1: loss = 0.00343662 (* 1 = 0.00343662 loss)\nI0822 02:01:10.640285 32360 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0822 02:03:28.614449 32360 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0822 02:04:50.399391 32360 solver.cpp:404]     Test net output #0: accuracy = 0.44676\nI0822 02:04:50.399727 32360 solver.cpp:404]     Test net output #1: loss = 4.26333 (* 1 = 4.26333 loss)\nI0822 02:04:51.713701 32360 solver.cpp:228] Iteration 28700, loss = 0.000306063\nI0822 02:04:51.713755 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:04:51.713773 32360 solver.cpp:244]     Train net output #1: loss = 0.000306137 (* 1 = 0.000306137 loss)\nI0822 02:04:51.813450 32360 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0822 02:07:09.784853 32360 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0822 02:08:31.573884 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5202\nI0822 02:08:31.574205 32360 solver.cpp:404]     Test net output #1: loss = 2.26711 (* 1 = 2.26711 loss)\nI0822 02:08:32.889490 32360 solver.cpp:228] Iteration 28800, loss = 0.0002615\nI0822 02:08:32.889544 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:08:32.889561 32360 solver.cpp:244]     Train net output #1: loss = 0.000261575 (* 1 = 0.000261575 loss)\nI0822 02:08:32.994771 32360 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0822 02:10:51.076900 32360 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0822 02:12:12.850600 32360 solver.cpp:404]     Test net output #0: accuracy = 0.35376\nI0822 02:12:12.850919 32360 solver.cpp:404]     Test net output #1: loss = 3.21833 (* 1 = 3.21833 loss)\nI0822 02:12:14.165460 32360 solver.cpp:228] Iteration 28900, loss = 0.000310148\nI0822 02:12:14.165513 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:12:14.165530 32360 solver.cpp:244]     Train net output #1: loss = 0.000310222 (* 1 = 0.000310222 loss)\nI0822 02:12:14.270766 32360 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0822 02:14:32.201768 32360 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0822 02:15:53.994230 32360 solver.cpp:404]     Test net output #0: accuracy = 0.23088\nI0822 02:15:53.994541 32360 solver.cpp:404]     Test net output #1: loss = 4.19447 (* 1 = 4.19447 loss)\nI0822 02:15:55.310144 32360 solver.cpp:228] Iteration 29000, loss = 0.000339572\nI0822 02:15:55.310197 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:15:55.310214 32360 solver.cpp:244]     Train net output #1: loss = 0.000339646 (* 1 = 0.000339646 loss)\nI0822 02:15:55.412961 32360 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0822 02:18:13.430096 32360 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0822 02:19:35.203243 32360 solver.cpp:404]     Test net output #0: accuracy = 0.18408\nI0822 02:19:35.203565 32360 solver.cpp:404]     Test net output #1: loss = 4.52825 (* 1 = 4.52825 loss)\nI0822 02:19:36.518506 32360 solver.cpp:228] Iteration 29100, loss = 0.000367222\nI0822 02:19:36.518553 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:19:36.518574 32360 solver.cpp:244]     Train net output #1: loss = 0.000367297 (* 1 = 0.000367297 loss)\nI0822 02:19:36.624722 32360 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0822 02:21:54.507112 32360 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0822 02:23:16.260002 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16176\nI0822 02:23:16.260280 32360 solver.cpp:404]     Test net output #1: loss = 4.58289 (* 1 = 4.58289 loss)\nI0822 02:23:17.574883 32360 solver.cpp:228] Iteration 29200, loss = 0.000402778\nI0822 02:23:17.574937 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:23:17.574954 32360 solver.cpp:244]     Train net output #1: loss = 0.000402852 (* 1 = 0.000402852 loss)\nI0822 02:23:17.674983 32360 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0822 02:25:35.648398 32360 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0822 02:26:57.362946 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15704\nI0822 02:26:57.363261 32360 solver.cpp:404]     Test net output #1: loss = 4.52864 (* 1 = 4.52864 loss)\nI0822 02:26:58.677772 32360 solver.cpp:228] Iteration 29300, loss = 0.000451917\nI0822 02:26:58.677824 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:26:58.677841 32360 solver.cpp:244]     Train net output #1: loss = 0.000451991 (* 1 = 0.000451991 loss)\nI0822 02:26:58.788244 32360 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0822 02:29:16.739572 32360 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0822 02:30:38.494459 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15732\nI0822 02:30:38.494738 32360 solver.cpp:404]     Test net output #1: loss = 4.31287 (* 1 = 4.31287 loss)\nI0822 02:30:39.809617 32360 solver.cpp:228] Iteration 29400, loss = 0.000383872\nI0822 02:30:39.809672 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:30:39.809695 32360 solver.cpp:244]     Train net output #1: loss = 0.000383946 (* 1 = 0.000383946 loss)\nI0822 02:30:39.913619 32360 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0822 02:32:57.860747 32360 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0822 02:34:19.574705 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15612\nI0822 02:34:19.574975 32360 solver.cpp:404]     Test net output #1: loss = 4.10078 (* 1 = 4.10078 loss)\nI0822 02:34:20.889616 32360 solver.cpp:228] Iteration 29500, loss = 0.000460652\nI0822 02:34:20.889672 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:34:20.889689 32360 solver.cpp:244]     Train net output #1: loss = 0.000460727 (* 1 = 0.000460727 loss)\nI0822 02:34:20.998272 32360 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0822 02:36:39.176265 32360 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0822 02:38:00.715764 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1538\nI0822 02:38:00.716007 32360 solver.cpp:404]     Test net output #1: loss = 3.87683 (* 1 = 3.87683 loss)\nI0822 02:38:02.031422 32360 solver.cpp:228] Iteration 29600, loss = 0.000333853\nI0822 02:38:02.031473 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:38:02.031497 32360 solver.cpp:244]     Train net output #1: loss = 0.000333928 (* 1 = 0.000333928 loss)\nI0822 02:38:02.133442 32360 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0822 02:40:20.139158 32360 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0822 02:41:41.869726 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15268\nI0822 02:41:41.869992 32360 solver.cpp:404]     Test net output #1: loss = 3.70626 (* 1 = 3.70626 loss)\nI0822 02:41:43.185616 32360 solver.cpp:228] Iteration 29700, loss = 0.000400321\nI0822 02:41:43.185668 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:41:43.185686 32360 solver.cpp:244]     Train net output #1: loss = 0.000400396 (* 1 = 0.000400396 loss)\nI0822 02:41:43.289436 32360 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0822 02:44:01.231837 32360 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0822 02:45:22.956303 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14092\nI0822 02:45:22.956563 32360 solver.cpp:404]     Test net output #1: loss = 3.6077 (* 1 = 3.6077 loss)\nI0822 02:45:24.272025 32360 solver.cpp:228] Iteration 29800, loss = 0.000475469\nI0822 02:45:24.272079 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:45:24.272095 32360 solver.cpp:244]     Train net output #1: loss = 0.000475544 (* 1 = 0.000475544 loss)\nI0822 02:45:24.371585 32360 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0822 02:47:42.332932 32360 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0822 02:49:03.791775 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1316\nI0822 02:49:03.791999 32360 solver.cpp:404]     Test net output #1: loss = 3.48812 (* 1 = 3.48812 loss)\nI0822 02:49:05.106467 32360 solver.cpp:228] Iteration 29900, loss = 0.000375623\nI0822 02:49:05.106523 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:49:05.106539 32360 solver.cpp:244]     Train net output #1: loss = 0.000375697 (* 1 = 0.000375697 loss)\nI0822 02:49:05.215070 32360 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0822 02:51:23.194581 32360 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0822 02:52:44.901294 32360 solver.cpp:404]     Test net output #0: accuracy = 0.12668\nI0822 02:52:44.901587 32360 solver.cpp:404]     Test net output #1: loss = 3.38269 (* 1 = 3.38269 loss)\nI0822 02:52:46.216295 32360 solver.cpp:228] Iteration 30000, loss = 0.000390908\nI0822 02:52:46.216346 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:52:46.216363 32360 solver.cpp:244]     Train net output #1: loss = 0.000390982 (* 1 = 0.000390982 loss)\nI0822 02:52:46.324339 32360 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0822 02:55:04.574749 32360 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0822 02:56:26.305995 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11744\nI0822 02:56:26.306243 32360 solver.cpp:404]     Test net output #1: loss = 3.29332 (* 1 = 3.29332 loss)\nI0822 02:56:27.620211 32360 solver.cpp:228] Iteration 30100, loss = 0.000373571\nI0822 02:56:27.620261 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:56:27.620278 32360 solver.cpp:244]     Train net output #1: loss = 0.000373646 (* 1 = 0.000373646 loss)\nI0822 02:56:27.728559 32360 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0822 02:58:45.736402 32360 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0822 03:00:07.470387 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11152\nI0822 03:00:07.470676 32360 solver.cpp:404]     Test net output #1: loss = 3.20185 (* 1 = 3.20185 loss)\nI0822 03:00:08.786171 32360 solver.cpp:228] Iteration 30200, loss = 0.000377351\nI0822 03:00:08.786222 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:00:08.786239 32360 solver.cpp:244]     Train net output #1: loss = 0.000377425 (* 1 = 0.000377425 loss)\nI0822 03:00:08.888962 32360 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0822 03:02:26.911308 32360 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0822 03:03:48.637183 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10516\nI0822 03:03:48.637426 32360 solver.cpp:404]     Test net output #1: loss = 3.11727 (* 1 = 3.11727 loss)\nI0822 03:03:49.951772 32360 solver.cpp:228] Iteration 30300, loss = 0.000309023\nI0822 03:03:49.951822 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:03:49.951840 32360 solver.cpp:244]     Train net output #1: loss = 0.000309098 (* 1 = 0.000309098 loss)\nI0822 03:03:50.049559 32360 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0822 03:06:08.050509 32360 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0822 03:07:29.769387 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10092\nI0822 03:07:29.769666 32360 solver.cpp:404]     Test net output #1: loss = 3.04025 (* 1 = 3.04025 loss)\nI0822 03:07:31.085216 32360 solver.cpp:228] Iteration 30400, loss = 0.000381475\nI0822 03:07:31.085266 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:07:31.085283 32360 solver.cpp:244]     Train net output #1: loss = 0.00038155 (* 1 = 0.00038155 loss)\nI0822 03:07:31.190405 32360 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0822 03:09:49.241396 32360 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0822 03:11:10.965375 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1004\nI0822 03:11:10.965646 32360 solver.cpp:404]     Test net output #1: loss = 2.96552 (* 1 = 2.96552 loss)\nI0822 03:11:12.279976 32360 solver.cpp:228] Iteration 30500, loss = 0.000311065\nI0822 03:11:12.280026 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:11:12.280042 32360 solver.cpp:244]     Train net output #1: loss = 0.000311139 (* 1 = 0.000311139 loss)\nI0822 03:11:12.381479 32360 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0822 03:13:30.433312 32360 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0822 03:14:52.169283 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0822 03:14:52.169558 32360 solver.cpp:404]     Test net output #1: loss = 2.89824 (* 1 = 2.89824 loss)\nI0822 03:14:53.481062 32360 solver.cpp:228] Iteration 30600, loss = 0.000335866\nI0822 03:14:53.481113 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:14:53.481130 32360 solver.cpp:244]     Train net output #1: loss = 0.000335941 (* 1 = 0.000335941 loss)\nI0822 03:14:53.586207 32360 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0822 03:17:11.648614 32360 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0822 03:18:33.377374 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0822 03:18:33.377660 32360 solver.cpp:404]     Test net output #1: loss = 2.84997 (* 1 = 2.84997 loss)\nI0822 03:18:34.690007 32360 solver.cpp:228] Iteration 30700, loss = 0.000303934\nI0822 03:18:34.690057 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:18:34.690074 32360 solver.cpp:244]     Train net output #1: loss = 0.000304009 (* 1 = 0.000304009 loss)\nI0822 03:18:34.797190 32360 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0822 03:20:52.819361 32360 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0822 03:22:14.557898 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0822 03:22:14.558197 32360 solver.cpp:404]     Test net output #1: loss = 2.80214 (* 1 = 2.80214 loss)\nI0822 03:22:15.870141 32360 solver.cpp:228] Iteration 30800, loss = 0.000253253\nI0822 03:22:15.870198 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:22:15.870220 32360 solver.cpp:244]     Train net output #1: loss = 0.000253328 (* 1 = 0.000253328 loss)\nI0822 03:22:15.976166 32360 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0822 03:24:33.931437 32360 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0822 03:25:55.684749 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0822 03:25:55.685020 32360 solver.cpp:404]     Test net output #1: loss = 2.75898 (* 1 = 2.75898 loss)\nI0822 03:25:56.998332 32360 solver.cpp:228] Iteration 30900, loss = 0.000264093\nI0822 03:25:56.998597 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:25:56.998693 32360 solver.cpp:244]     Train net output #1: loss = 0.000264168 (* 1 = 0.000264168 loss)\nI0822 03:25:57.101608 32360 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0822 03:28:15.087110 32360 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0822 03:29:36.833567 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0822 03:29:36.833817 32360 solver.cpp:404]     Test net output #1: loss = 2.71977 (* 1 = 2.71977 loss)\nI0822 03:29:38.146090 32360 solver.cpp:228] Iteration 31000, loss = 0.00029872\nI0822 03:29:38.146145 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:29:38.146169 32360 solver.cpp:244]     Train net output #1: loss = 0.000298794 (* 1 = 0.000298794 loss)\nI0822 03:29:38.254235 32360 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0822 03:31:56.281996 32360 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0822 03:33:18.012979 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0822 03:33:18.013285 32360 solver.cpp:404]     Test net output #1: loss = 2.68672 (* 1 = 2.68672 loss)\nI0822 03:33:19.325153 32360 solver.cpp:228] Iteration 31100, loss = 0.000277999\nI0822 03:33:19.325208 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:33:19.325232 32360 solver.cpp:244]     Train net output #1: loss = 0.000278073 (* 1 = 0.000278073 loss)\nI0822 03:33:19.437516 32360 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0822 03:35:37.510632 32360 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0822 03:36:59.277927 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0822 03:36:59.278184 32360 solver.cpp:404]     Test net output #1: loss = 2.66209 (* 1 = 2.66209 loss)\nI0822 03:37:00.590276 32360 solver.cpp:228] Iteration 31200, loss = 0.000266061\nI0822 03:37:00.590332 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:37:00.590355 32360 solver.cpp:244]     Train net output #1: loss = 0.000266136 (* 1 = 0.000266136 loss)\nI0822 03:37:00.694341 32360 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0822 03:39:18.733288 32360 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0822 03:40:40.174199 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0822 03:40:40.174520 32360 solver.cpp:404]     Test net output #1: loss = 2.63217 (* 1 = 2.63217 loss)\nI0822 03:40:41.486426 32360 solver.cpp:228] Iteration 31300, loss = 0.000253389\nI0822 03:40:41.486482 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:40:41.486506 32360 solver.cpp:244]     Train net output #1: loss = 0.000253464 (* 1 = 0.000253464 loss)\nI0822 03:40:41.594306 32360 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0822 03:42:59.753583 32360 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0822 03:44:21.299147 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09988\nI0822 03:44:21.299480 32360 solver.cpp:404]     Test net output #1: loss = 2.61367 (* 1 = 2.61367 loss)\nI0822 03:44:22.611328 32360 solver.cpp:228] Iteration 31400, loss = 0.000293304\nI0822 03:44:22.611382 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:44:22.611412 32360 solver.cpp:244]     Train net output #1: loss = 0.000293378 (* 1 = 0.000293378 loss)\nI0822 03:44:22.716956 32360 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0822 03:46:40.770416 32360 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0822 03:48:02.547386 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10012\nI0822 03:48:02.547708 32360 solver.cpp:404]     Test net output #1: loss = 2.58782 (* 1 = 2.58782 loss)\nI0822 03:48:03.861115 32360 solver.cpp:228] Iteration 31500, loss = 0.000247056\nI0822 03:48:03.861161 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:48:03.861178 32360 solver.cpp:244]     Train net output #1: loss = 0.00024713 (* 1 = 0.00024713 loss)\nI0822 03:48:03.966130 32360 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0822 03:50:21.963671 32360 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0822 03:51:43.900861 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10092\nI0822 03:51:43.901175 32360 solver.cpp:404]     Test net output #1: loss = 2.57522 (* 1 = 2.57522 loss)\nI0822 03:51:45.212563 32360 solver.cpp:228] Iteration 31600, loss = 0.000252771\nI0822 03:51:45.212610 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:51:45.212627 32360 solver.cpp:244]     Train net output #1: loss = 0.000252845 (* 1 = 0.000252845 loss)\nI0822 03:51:45.327478 32360 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0822 03:54:03.386994 32360 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0822 03:55:25.287233 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10984\nI0822 03:55:25.287569 32360 solver.cpp:404]     Test net output #1: loss = 2.55971 (* 1 = 2.55971 loss)\nI0822 03:55:26.599979 32360 solver.cpp:228] Iteration 31700, loss = 0.000215068\nI0822 03:55:26.600033 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:55:26.600049 32360 solver.cpp:244]     Train net output #1: loss = 0.000215142 (* 1 = 0.000215142 loss)\nI0822 03:55:26.702613 32360 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0822 03:57:44.685808 32360 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0822 03:59:06.586303 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10064\nI0822 03:59:06.586637 32360 solver.cpp:404]     Test net output #1: loss = 2.54799 (* 1 = 2.54799 loss)\nI0822 03:59:07.899655 32360 solver.cpp:228] Iteration 31800, loss = 0.000245207\nI0822 03:59:07.899714 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:59:07.899730 32360 solver.cpp:244]     Train net output #1: loss = 0.000245282 (* 1 = 0.000245282 loss)\nI0822 03:59:08.005059 32360 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0822 04:01:26.018280 32360 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0822 04:02:47.891425 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1008\nI0822 04:02:47.891743 32360 solver.cpp:404]     Test net output #1: loss = 2.53301 (* 1 = 2.53301 loss)\nI0822 04:02:49.204573 32360 solver.cpp:228] Iteration 31900, loss = 0.000226208\nI0822 04:02:49.204617 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:02:49.204633 32360 solver.cpp:244]     Train net output #1: loss = 0.000226282 (* 1 = 0.000226282 loss)\nI0822 04:02:49.315868 32360 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0822 04:05:07.285331 32360 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0822 04:06:29.168828 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09944\nI0822 04:06:29.169137 32360 solver.cpp:404]     Test net output #1: loss = 2.52264 (* 1 = 2.52264 loss)\nI0822 04:06:30.482368 32360 solver.cpp:228] Iteration 32000, loss = 0.000245127\nI0822 04:06:30.482422 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:06:30.482439 32360 solver.cpp:244]     Train net output #1: loss = 0.000245202 (* 1 = 0.000245202 loss)\nI0822 04:06:30.592996 32360 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0822 04:08:48.659296 32360 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0822 04:10:10.448559 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 04:10:10.448890 32360 solver.cpp:404]     Test net output #1: loss = 2.5081 (* 1 = 2.5081 loss)\nI0822 04:10:11.762473 32360 solver.cpp:228] Iteration 32100, loss = 0.000226406\nI0822 04:10:11.762527 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:10:11.762544 32360 solver.cpp:244]     Train net output #1: loss = 0.000226481 (* 1 = 0.000226481 loss)\nI0822 04:10:11.869510 32360 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0822 04:12:29.830379 32360 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0822 04:13:51.727432 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 04:13:51.727771 32360 solver.cpp:404]     Test net output #1: loss = 2.49794 (* 1 = 2.49794 loss)\nI0822 04:13:53.041069 32360 solver.cpp:228] Iteration 32200, loss = 0.000236695\nI0822 04:13:53.041123 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:13:53.041139 32360 solver.cpp:244]     Train net output #1: loss = 0.00023677 (* 1 = 0.00023677 loss)\nI0822 04:13:53.148102 32360 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0822 04:16:11.309276 32360 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0822 04:17:33.204251 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 04:17:33.204586 32360 solver.cpp:404]     Test net output #1: loss = 2.4851 (* 1 = 2.4851 loss)\nI0822 04:17:34.517818 32360 solver.cpp:228] Iteration 32300, loss = 0.000217848\nI0822 04:17:34.517870 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:17:34.517887 32360 solver.cpp:244]     Train net output #1: loss = 0.000217923 (* 1 = 0.000217923 loss)\nI0822 04:17:34.620354 32360 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0822 04:19:52.686579 32360 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0822 04:21:14.335693 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 04:21:14.336019 32360 solver.cpp:404]     Test net output #1: loss = 2.47515 (* 1 = 2.47515 loss)\nI0822 04:21:15.651548 32360 solver.cpp:228] Iteration 32400, loss = 0.00019948\nI0822 04:21:15.651602 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:21:15.651618 32360 solver.cpp:244]     Train net output #1: loss = 0.000199554 (* 1 = 0.000199554 loss)\nI0822 04:21:15.751032 32360 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0822 04:23:33.766968 32360 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0822 04:24:55.352344 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 04:24:55.352622 32360 solver.cpp:404]     Test net output #1: loss = 2.46258 (* 1 = 2.46258 loss)\nI0822 04:24:56.666041 32360 solver.cpp:228] Iteration 32500, loss = 0.000217689\nI0822 04:24:56.666095 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:24:56.666113 32360 solver.cpp:244]     Train net output #1: loss = 0.000217763 (* 1 = 0.000217763 loss)\nI0822 04:24:56.771419 32360 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0822 04:27:14.719254 32360 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0822 04:28:36.090284 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 04:28:36.090577 32360 solver.cpp:404]     Test net output #1: loss = 2.45388 (* 1 = 2.45388 loss)\nI0822 04:28:37.403502 32360 solver.cpp:228] Iteration 32600, loss = 0.000212612\nI0822 04:28:37.403554 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:28:37.403571 32360 solver.cpp:244]     Train net output #1: loss = 0.000212686 (* 1 = 0.000212686 loss)\nI0822 04:28:37.507711 32360 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0822 04:30:55.451757 32360 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0822 04:32:16.999508 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 04:32:16.999817 32360 solver.cpp:404]     Test net output #1: loss = 2.44236 (* 1 = 2.44236 loss)\nI0822 04:32:18.311612 32360 solver.cpp:228] Iteration 32700, loss = 0.000221546\nI0822 04:32:18.311666 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:32:18.311689 32360 solver.cpp:244]     Train net output #1: loss = 0.00022162 (* 1 = 0.00022162 loss)\nI0822 04:32:18.411741 32360 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0822 04:34:36.289273 32360 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0822 04:35:57.715595 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 04:35:57.715921 32360 solver.cpp:404]     Test net output #1: loss = 2.43377 (* 1 = 2.43377 loss)\nI0822 04:35:59.029130 32360 solver.cpp:228] Iteration 32800, loss = 0.00021893\nI0822 04:35:59.029186 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:35:59.029203 32360 solver.cpp:244]     Train net output #1: loss = 0.000219004 (* 1 = 0.000219004 loss)\nI0822 04:35:59.137200 32360 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0822 04:38:17.100118 32360 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0822 04:39:38.519161 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 04:39:38.519457 32360 solver.cpp:404]     Test net output #1: loss = 2.42286 (* 1 = 2.42286 loss)\nI0822 04:39:39.832028 32360 solver.cpp:228] Iteration 32900, loss = 0.000230748\nI0822 04:39:39.832083 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:39:39.832100 32360 solver.cpp:244]     Train net output #1: loss = 0.000230822 (* 1 = 0.000230822 loss)\nI0822 04:39:39.935852 32360 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0822 04:41:57.890072 32360 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0822 04:43:19.315829 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 04:43:19.316155 32360 solver.cpp:404]     Test net output #1: loss = 2.41412 (* 1 = 2.41412 loss)\nI0822 04:43:20.628521 32360 solver.cpp:228] Iteration 33000, loss = 0.000198299\nI0822 04:43:20.628579 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:43:20.628603 32360 solver.cpp:244]     Train net output #1: loss = 0.000198373 (* 1 = 0.000198373 loss)\nI0822 04:43:20.732815 32360 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0822 04:45:38.779233 32360 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0822 04:47:00.686396 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 04:47:00.686722 32360 solver.cpp:404]     Test net output #1: loss = 2.40457 (* 1 = 2.40457 loss)\nI0822 04:47:01.999078 32360 solver.cpp:228] Iteration 33100, loss = 0.000212805\nI0822 04:47:01.999132 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:47:01.999156 32360 solver.cpp:244]     Train net output #1: loss = 0.000212879 (* 1 = 0.000212879 loss)\nI0822 04:47:02.107043 32360 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0822 04:49:20.008896 32360 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0822 04:50:41.809514 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 04:50:41.809856 32360 solver.cpp:404]     Test net output #1: loss = 2.39786 (* 1 = 2.39786 loss)\nI0822 04:50:43.125874 32360 solver.cpp:228] Iteration 33200, loss = 0.000203348\nI0822 04:50:43.125931 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:50:43.125954 32360 solver.cpp:244]     Train net output #1: loss = 0.000203422 (* 1 = 0.000203422 loss)\nI0822 04:50:43.229332 32360 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0822 04:53:01.213428 32360 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0822 04:54:22.977820 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 04:54:22.978138 32360 solver.cpp:404]     Test net output #1: loss = 2.38868 (* 1 = 2.38868 loss)\nI0822 04:54:24.294030 32360 solver.cpp:228] Iteration 33300, loss = 0.000213016\nI0822 04:54:24.294085 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:54:24.294109 32360 solver.cpp:244]     Train net output #1: loss = 0.000213091 (* 1 = 0.000213091 loss)\nI0822 04:54:24.394479 32360 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0822 04:56:42.340801 32360 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0822 04:58:04.095641 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 04:58:04.095966 32360 solver.cpp:404]     Test net output #1: loss = 2.38319 (* 1 = 2.38319 loss)\nI0822 04:58:05.411331 32360 solver.cpp:228] Iteration 33400, loss = 0.000203449\nI0822 04:58:05.411388 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:58:05.411412 32360 solver.cpp:244]     Train net output #1: loss = 0.000203524 (* 1 = 0.000203524 loss)\nI0822 04:58:05.519384 32360 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0822 05:00:23.421944 32360 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0822 05:01:45.190570 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 05:01:45.190903 32360 solver.cpp:404]     Test net output #1: loss = 2.37702 (* 1 = 2.37702 loss)\nI0822 05:01:46.507277 32360 solver.cpp:228] Iteration 33500, loss = 0.00021687\nI0822 05:01:46.507333 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:01:46.507357 32360 solver.cpp:244]     Train net output #1: loss = 0.000216945 (* 1 = 0.000216945 loss)\nI0822 05:01:46.614568 32360 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0822 05:04:04.563874 32360 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0822 05:05:26.346525 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 05:05:26.346879 32360 solver.cpp:404]     Test net output #1: loss = 78.5889 (* 1 = 78.5889 loss)\nI0822 05:05:27.662158 32360 solver.cpp:228] Iteration 33600, loss = 1.89686\nI0822 05:05:27.662214 32360 solver.cpp:244]     Train net output #0: accuracy = 0.32\nI0822 05:05:27.662238 32360 solver.cpp:244]     Train net output #1: loss = 1.89686 (* 1 = 1.89686 loss)\nI0822 05:05:27.765974 32360 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0822 05:07:45.704623 32360 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0822 05:09:07.465052 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 05:09:07.465373 32360 solver.cpp:404]     Test net output #1: loss = 78.6169 (* 1 = 78.6169 loss)\nI0822 05:09:08.781605 32360 solver.cpp:228] Iteration 33700, loss = 1.21964\nI0822 05:09:08.781661 32360 solver.cpp:244]     Train net output #0: accuracy = 0.552\nI0822 05:09:08.781693 32360 solver.cpp:244]     Train net output #1: loss = 1.21964 (* 1 = 1.21964 loss)\nI0822 05:09:08.883586 32360 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0822 05:11:26.750596 32360 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0822 05:12:48.448976 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 05:12:48.449298 32360 solver.cpp:404]     Test net output #1: loss = 78.5889 (* 1 = 78.5889 loss)\nI0822 05:12:49.764428 32360 solver.cpp:228] Iteration 33800, loss = 1.06663\nI0822 05:12:49.764484 32360 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0822 05:12:49.764510 32360 solver.cpp:244]     Train net output #1: loss = 1.06663 (* 1 = 1.06663 loss)\nI0822 05:12:49.866266 32360 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0822 05:15:07.962205 32360 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0822 05:16:29.724897 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 05:16:29.725175 32360 solver.cpp:404]     Test net output #1: loss = 78.5715 (* 1 = 78.5715 loss)\nI0822 05:16:31.040868 32360 solver.cpp:228] Iteration 33900, loss = 0.822667\nI0822 05:16:31.040916 32360 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0822 05:16:31.040940 32360 solver.cpp:244]     Train net output #1: loss = 0.822667 (* 1 = 0.822667 loss)\nI0822 05:16:31.141324 32360 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0822 05:18:49.095160 32360 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0822 05:20:10.655213 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09964\nI0822 05:20:10.655500 32360 solver.cpp:404]     Test net output #1: loss = 78.6344 (* 1 = 78.6344 loss)\nI0822 05:20:11.970707 32360 solver.cpp:228] Iteration 34000, loss = 0.684448\nI0822 05:20:11.970752 32360 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0822 05:20:11.970775 32360 solver.cpp:244]     Train net output #1: loss = 0.684448 (* 1 = 0.684448 loss)\nI0822 05:20:12.073392 32360 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0822 05:22:30.109781 32360 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0822 05:23:51.676167 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 05:23:51.676403 32360 solver.cpp:404]     Test net output #1: loss = 78.5715 (* 1 = 78.5715 loss)\nI0822 05:23:52.992177 32360 solver.cpp:228] Iteration 34100, loss = 0.673575\nI0822 05:23:52.992233 32360 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0822 05:23:52.992256 32360 solver.cpp:244]     Train net output #1: loss = 0.673575 (* 1 = 0.673575 loss)\nI0822 05:23:53.098615 32360 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0822 05:26:10.606001 32360 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0822 05:27:31.821375 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0822 05:27:31.821655 32360 solver.cpp:404]     Test net output #1: loss = 78.5854 (* 1 = 78.5854 loss)\nI0822 05:27:33.137276 32360 solver.cpp:228] Iteration 34200, loss = 0.449742\nI0822 05:27:33.137328 32360 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0822 05:27:33.137353 32360 solver.cpp:244]     Train net output #1: loss = 0.449742 (* 1 = 0.449742 loss)\nI0822 05:27:33.240301 32360 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0822 05:29:51.186532 32360 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0822 05:31:12.462916 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10472\nI0822 05:31:12.463209 32360 solver.cpp:404]     Test net output #1: loss = 76.4313 (* 1 = 76.4313 loss)\nI0822 05:31:13.776978 32360 solver.cpp:228] Iteration 34300, loss = 0.466599\nI0822 05:31:13.777030 32360 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0822 05:31:13.777055 32360 solver.cpp:244]     Train net output #1: loss = 0.466599 (* 1 = 0.466599 loss)\nI0822 05:31:13.880429 32360 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0822 05:33:31.850795 32360 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0822 05:34:53.482293 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0822 05:34:53.482553 32360 solver.cpp:404]     Test net output #1: loss = 78.5854 (* 1 = 78.5854 loss)\nI0822 05:34:54.796669 32360 solver.cpp:228] Iteration 34400, loss = 0.44693\nI0822 05:34:54.796725 32360 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0822 05:34:54.796749 32360 solver.cpp:244]     Train net output #1: loss = 0.44693 (* 1 = 0.44693 loss)\nI0822 05:34:54.903185 32360 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0822 05:37:12.930007 32360 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0822 05:38:34.488034 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 05:38:34.488288 32360 solver.cpp:404]     Test net output #1: loss = 78.6204 (* 1 = 78.6204 loss)\nI0822 05:38:35.803762 32360 solver.cpp:228] Iteration 34500, loss = 0.170066\nI0822 05:38:35.803817 32360 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 05:38:35.803840 32360 solver.cpp:244]     Train net output #1: loss = 0.170066 (* 1 = 0.170066 loss)\nI0822 05:38:35.905016 32360 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0822 05:40:53.908416 32360 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0822 05:42:15.557559 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0822 05:42:15.557852 32360 solver.cpp:404]     Test net output #1: loss = 78.5854 (* 1 = 78.5854 loss)\nI0822 05:42:16.872759 32360 solver.cpp:228] Iteration 34600, loss = 0.255104\nI0822 05:42:16.872812 32360 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0822 05:42:16.872835 32360 solver.cpp:244]     Train net output #1: loss = 0.255104 (* 1 = 0.255104 loss)\nI0822 05:42:16.977598 32360 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0822 05:44:35.125242 32360 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0822 05:45:56.867075 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 05:45:56.867352 32360 solver.cpp:404]     Test net output #1: loss = 78.4018 (* 1 = 78.4018 loss)\nI0822 05:45:58.182446 32360 solver.cpp:228] Iteration 34700, loss = 0.159424\nI0822 05:45:58.182502 32360 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 05:45:58.182528 32360 solver.cpp:244]     Train net output #1: loss = 0.159424 (* 1 = 0.159424 loss)\nI0822 05:45:58.280946 32360 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0822 05:48:16.230191 32360 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0822 05:49:37.503190 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10044\nI0822 05:49:37.503481 32360 solver.cpp:404]     Test net output #1: loss = 76.2218 (* 1 = 76.2218 loss)\nI0822 05:49:38.817497 32360 solver.cpp:228] Iteration 34800, loss = 0.106407\nI0822 05:49:38.817551 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 05:49:38.817575 32360 solver.cpp:244]     Train net output #1: loss = 0.106407 (* 1 = 0.106407 loss)\nI0822 05:49:38.925761 32360 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0822 05:51:56.958595 32360 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0822 05:53:18.334125 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10256\nI0822 05:53:18.334391 32360 solver.cpp:404]     Test net output #1: loss = 68.3089 (* 1 = 68.3089 loss)\nI0822 05:53:19.648319 32360 solver.cpp:228] Iteration 34900, loss = 0.142564\nI0822 05:53:19.648373 32360 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0822 05:53:19.648397 32360 solver.cpp:244]     Train net output #1: loss = 0.142564 (* 1 = 0.142564 loss)\nI0822 05:53:19.758832 32360 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0822 05:55:37.907138 32360 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0822 05:56:59.471487 32360 solver.cpp:404]     Test net output #0: accuracy = 0.133\nI0822 05:56:59.471742 32360 solver.cpp:404]     Test net output #1: loss = 33.8199 (* 1 = 33.8199 loss)\nI0822 05:57:00.787122 32360 solver.cpp:228] Iteration 35000, loss = 0.0978517\nI0822 05:57:00.787175 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 05:57:00.787191 32360 solver.cpp:244]     Train net output #1: loss = 0.0978517 (* 1 = 0.0978517 loss)\nI0822 05:57:00.889668 32360 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0822 05:59:18.792330 32360 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0822 06:00:40.390561 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14508\nI0822 06:00:40.390807 32360 solver.cpp:404]     Test net output #1: loss = 23.4481 (* 1 = 23.4481 loss)\nI0822 06:00:41.705970 32360 solver.cpp:228] Iteration 35100, loss = 0.0306011\nI0822 06:00:41.706022 32360 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:00:41.706039 32360 solver.cpp:244]     Train net output #1: loss = 0.0306011 (* 1 = 0.0306011 loss)\nI0822 06:00:41.813531 32360 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0822 06:02:59.809620 32360 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0822 06:04:21.260794 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1112\nI0822 06:04:21.261025 32360 solver.cpp:404]     Test net output #1: loss = 24.2681 (* 1 = 24.2681 loss)\nI0822 06:04:22.576694 32360 solver.cpp:228] Iteration 35200, loss = 0.0676455\nI0822 06:04:22.576746 32360 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 06:04:22.576762 32360 solver.cpp:244]     Train net output #1: loss = 0.0676455 (* 1 = 0.0676455 loss)\nI0822 06:04:22.674022 32360 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0822 06:06:40.619484 32360 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0822 06:08:02.220206 32360 solver.cpp:404]     Test net output #0: accuracy = 0.12884\nI0822 06:08:02.220458 32360 solver.cpp:404]     Test net output #1: loss = 15.4306 (* 1 = 15.4306 loss)\nI0822 06:08:03.535796 32360 solver.cpp:228] Iteration 35300, loss = 0.00884296\nI0822 06:08:03.535850 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:08:03.535866 32360 solver.cpp:244]     Train net output #1: loss = 0.00884291 (* 1 = 0.00884291 loss)\nI0822 06:08:03.645722 32360 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0822 06:10:21.795459 32360 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0822 06:11:43.092087 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13712\nI0822 06:11:43.092353 32360 solver.cpp:404]     Test net output #1: loss = 12.3599 (* 1 = 12.3599 loss)\nI0822 06:11:44.408040 32360 solver.cpp:228] Iteration 35400, loss = 0.00596547\nI0822 06:11:44.408092 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:11:44.408107 32360 solver.cpp:244]     Train net output #1: loss = 0.00596543 (* 1 = 0.00596543 loss)\nI0822 06:11:44.507905 32360 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0822 06:14:02.472226 32360 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0822 06:15:24.212177 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14496\nI0822 06:15:24.212451 32360 solver.cpp:404]     Test net output #1: loss = 9.02833 (* 1 = 9.02833 loss)\nI0822 06:15:25.527971 32360 solver.cpp:228] Iteration 35500, loss = 0.000432665\nI0822 06:15:25.528025 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:15:25.528043 32360 solver.cpp:244]     Train net output #1: loss = 0.000432619 (* 1 = 0.000432619 loss)\nI0822 06:15:25.626106 32360 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0822 06:17:43.632321 32360 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0822 06:19:05.407762 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15664\nI0822 06:19:05.408012 32360 solver.cpp:404]     Test net output #1: loss = 7.22164 (* 1 = 7.22164 loss)\nI0822 06:19:06.723240 32360 solver.cpp:228] Iteration 35600, loss = 0.000369543\nI0822 06:19:06.723296 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:19:06.723320 32360 solver.cpp:244]     Train net output #1: loss = 0.000369497 (* 1 = 0.000369497 loss)\nI0822 06:19:06.827145 32360 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0822 06:21:24.764839 32360 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0822 06:22:46.499908 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14988\nI0822 06:22:46.500216 32360 solver.cpp:404]     Test net output #1: loss = 6.49614 (* 1 = 6.49614 loss)\nI0822 06:22:47.815727 32360 solver.cpp:228] Iteration 35700, loss = 0.000343781\nI0822 06:22:47.815774 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:22:47.815798 32360 solver.cpp:244]     Train net output #1: loss = 0.000343735 (* 1 = 0.000343735 loss)\nI0822 06:22:47.919292 32360 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0822 06:25:05.969391 32360 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0822 06:26:27.353584 32360 solver.cpp:404]     Test net output #0: accuracy = 0.13348\nI0822 06:26:27.353835 32360 solver.cpp:404]     Test net output #1: loss = 5.96856 (* 1 = 5.96856 loss)\nI0822 06:26:28.668208 32360 solver.cpp:228] Iteration 35800, loss = 0.000460644\nI0822 06:26:28.668264 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:26:28.668289 32360 solver.cpp:244]     Train net output #1: loss = 0.000460598 (* 1 = 0.000460598 loss)\nI0822 06:26:28.778899 32360 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0822 06:28:46.737464 32360 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0822 06:30:08.478353 32360 solver.cpp:404]     Test net output #0: accuracy = 0.118\nI0822 06:30:08.478646 32360 solver.cpp:404]     Test net output #1: loss = 5.50334 (* 1 = 5.50334 loss)\nI0822 06:30:09.792502 32360 solver.cpp:228] Iteration 35900, loss = 0.000402398\nI0822 06:30:09.792560 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:30:09.792584 32360 solver.cpp:244]     Train net output #1: loss = 0.000402352 (* 1 = 0.000402352 loss)\nI0822 06:30:09.904496 32360 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0822 06:32:27.853801 32360 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0822 06:33:49.620631 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1052\nI0822 06:33:49.620910 32360 solver.cpp:404]     Test net output #1: loss = 5.20144 (* 1 = 5.20144 loss)\nI0822 06:33:50.936529 32360 solver.cpp:228] Iteration 36000, loss = 0.000441565\nI0822 06:33:50.936576 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:33:50.936599 32360 solver.cpp:244]     Train net output #1: loss = 0.000441519 (* 1 = 0.000441519 loss)\nI0822 06:33:51.044468 32360 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0822 06:36:09.140099 32360 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0822 06:37:30.880703 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10272\nI0822 06:37:30.880971 32360 solver.cpp:404]     Test net output #1: loss = 4.92917 (* 1 = 4.92917 loss)\nI0822 06:37:32.194941 32360 solver.cpp:228] Iteration 36100, loss = 0.000351949\nI0822 06:37:32.194988 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:37:32.195010 32360 solver.cpp:244]     Train net output #1: loss = 0.000351903 (* 1 = 0.000351903 loss)\nI0822 06:37:32.303321 32360 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0822 06:39:50.248670 32360 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0822 06:41:11.991345 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09968\nI0822 06:41:11.991657 32360 solver.cpp:404]     Test net output #1: loss = 4.80405 (* 1 = 4.80405 loss)\nI0822 06:41:13.307044 32360 solver.cpp:228] Iteration 36200, loss = 0.000396738\nI0822 06:41:13.307099 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:41:13.307122 32360 solver.cpp:244]     Train net output #1: loss = 0.000396692 (* 1 = 0.000396692 loss)\nI0822 06:41:13.409873 32360 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0822 06:43:31.350219 32360 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0822 06:44:53.103823 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0822 06:44:53.104104 32360 solver.cpp:404]     Test net output #1: loss = 4.65105 (* 1 = 4.65105 loss)\nI0822 06:44:54.419700 32360 solver.cpp:228] Iteration 36300, loss = 0.000314669\nI0822 06:44:54.419755 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:44:54.419780 32360 solver.cpp:244]     Train net output #1: loss = 0.000314623 (* 1 = 0.000314623 loss)\nI0822 06:44:54.520903 32360 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0822 06:47:12.417577 32360 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0822 06:48:33.988205 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 06:48:33.988461 32360 solver.cpp:404]     Test net output #1: loss = 4.47904 (* 1 = 4.47904 loss)\nI0822 06:48:35.302808 32360 solver.cpp:228] Iteration 36400, loss = 0.000358958\nI0822 06:48:35.302857 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:48:35.302881 32360 solver.cpp:244]     Train net output #1: loss = 0.000358912 (* 1 = 0.000358912 loss)\nI0822 06:48:35.407207 32360 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0822 06:50:53.289130 32360 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0822 06:52:14.997488 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 06:52:14.997777 32360 solver.cpp:404]     Test net output #1: loss = 4.29494 (* 1 = 4.29494 loss)\nI0822 06:52:16.312173 32360 solver.cpp:228] Iteration 36500, loss = 0.000373385\nI0822 06:52:16.312228 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:52:16.312252 32360 solver.cpp:244]     Train net output #1: loss = 0.000373339 (* 1 = 0.000373339 loss)\nI0822 06:52:16.411181 32360 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0822 06:54:34.287226 32360 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0822 06:55:56.008770 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 06:55:56.009052 32360 solver.cpp:404]     Test net output #1: loss = 4.10097 (* 1 = 4.10097 loss)\nI0822 06:55:57.323148 32360 solver.cpp:228] Iteration 36600, loss = 0.00033393\nI0822 06:55:57.323202 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:55:57.323225 32360 solver.cpp:244]     Train net output #1: loss = 0.000333884 (* 1 = 0.000333884 loss)\nI0822 06:55:57.436223 32360 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0822 06:58:15.576200 32360 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0822 06:59:37.310647 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 06:59:37.310981 32360 solver.cpp:404]     Test net output #1: loss = 3.92018 (* 1 = 3.92018 loss)\nI0822 06:59:38.625135 32360 solver.cpp:228] Iteration 36700, loss = 0.000369109\nI0822 06:59:38.625188 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:59:38.625212 32360 solver.cpp:244]     Train net output #1: loss = 0.000369063 (* 1 = 0.000369063 loss)\nI0822 06:59:38.726166 32360 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0822 07:01:56.702564 32360 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0822 07:03:18.439700 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09956\nI0822 07:03:18.440047 32360 solver.cpp:404]     Test net output #1: loss = 3.75894 (* 1 = 3.75894 loss)\nI0822 07:03:19.754258 32360 solver.cpp:228] Iteration 36800, loss = 0.000338799\nI0822 07:03:19.754312 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:03:19.754336 32360 solver.cpp:244]     Train net output #1: loss = 0.000338753 (* 1 = 0.000338753 loss)\nI0822 07:03:19.855896 32360 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0822 07:05:38.079771 32360 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0822 07:06:59.844501 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10044\nI0822 07:06:59.844832 32360 solver.cpp:404]     Test net output #1: loss = 3.61917 (* 1 = 3.61917 loss)\nI0822 07:07:01.161347 32360 solver.cpp:228] Iteration 36900, loss = 0.000412041\nI0822 07:07:01.161406 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:07:01.161429 32360 solver.cpp:244]     Train net output #1: loss = 0.000411995 (* 1 = 0.000411995 loss)\nI0822 07:07:01.262697 32360 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0822 07:09:19.172075 32360 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0822 07:10:40.998105 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09956\nI0822 07:10:40.998436 32360 solver.cpp:404]     Test net output #1: loss = 3.50023 (* 1 = 3.50023 loss)\nI0822 07:10:42.312165 32360 solver.cpp:228] Iteration 37000, loss = 0.000313648\nI0822 07:10:42.312222 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:10:42.312245 32360 solver.cpp:244]     Train net output #1: loss = 0.000313602 (* 1 = 0.000313602 loss)\nI0822 07:10:42.421022 32360 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0822 07:13:00.473834 32360 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0822 07:14:22.195169 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10044\nI0822 07:14:22.195497 32360 solver.cpp:404]     Test net output #1: loss = 3.37461 (* 1 = 3.37461 loss)\nI0822 07:14:23.510799 32360 solver.cpp:228] Iteration 37100, loss = 0.000340905\nI0822 07:14:23.510857 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:14:23.510880 32360 solver.cpp:244]     Train net output #1: loss = 0.000340859 (* 1 = 0.000340859 loss)\nI0822 07:14:23.611311 32360 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0822 07:16:41.529820 32360 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0822 07:18:03.046088 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0996\nI0822 07:18:03.046411 32360 solver.cpp:404]     Test net output #1: loss = 3.2829 (* 1 = 3.2829 loss)\nI0822 07:18:04.362464 32360 solver.cpp:228] Iteration 37200, loss = 0.000422866\nI0822 07:18:04.362517 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:18:04.362543 32360 solver.cpp:244]     Train net output #1: loss = 0.00042282 (* 1 = 0.00042282 loss)\nI0822 07:18:04.468845 32360 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0822 07:20:22.508739 32360 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0822 07:21:44.071521 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10088\nI0822 07:21:44.071833 32360 solver.cpp:404]     Test net output #1: loss = 3.17727 (* 1 = 3.17727 loss)\nI0822 07:21:45.386266 32360 solver.cpp:228] Iteration 37300, loss = 0.000330383\nI0822 07:21:45.386324 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:21:45.386349 32360 solver.cpp:244]     Train net output #1: loss = 0.000330337 (* 1 = 0.000330337 loss)\nI0822 07:21:45.490211 32360 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0822 07:24:03.681357 32360 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0822 07:25:25.408073 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09992\nI0822 07:25:25.408437 32360 solver.cpp:404]     Test net output #1: loss = 3.09145 (* 1 = 3.09145 loss)\nI0822 07:25:26.724195 32360 solver.cpp:228] Iteration 37400, loss = 0.000289416\nI0822 07:25:26.724241 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:25:26.724262 32360 solver.cpp:244]     Train net output #1: loss = 0.00028937 (* 1 = 0.00028937 loss)\nI0822 07:25:26.829771 32360 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0822 07:27:44.926539 32360 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0822 07:29:06.755875 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1008\nI0822 07:29:06.756191 32360 solver.cpp:404]     Test net output #1: loss = 3.01181 (* 1 = 3.01181 loss)\nI0822 07:29:08.070278 32360 solver.cpp:228] Iteration 37500, loss = 0.00032041\nI0822 07:29:08.070328 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:29:08.070353 32360 solver.cpp:244]     Train net output #1: loss = 0.000320364 (* 1 = 0.000320364 loss)\nI0822 07:29:08.171464 32360 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0822 07:31:26.319214 32360 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0822 07:32:48.165406 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 07:32:48.165765 32360 solver.cpp:404]     Test net output #1: loss = 2.94057 (* 1 = 2.94057 loss)\nI0822 07:32:49.479527 32360 solver.cpp:228] Iteration 37600, loss = 0.000278683\nI0822 07:32:49.479578 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:32:49.479601 32360 solver.cpp:244]     Train net output #1: loss = 0.000278637 (* 1 = 0.000278637 loss)\nI0822 07:32:49.583271 32360 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0822 07:35:07.647601 32360 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0822 07:36:29.525130 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1006\nI0822 07:36:29.525472 32360 solver.cpp:404]     Test net output #1: loss = 2.87186 (* 1 = 2.87186 loss)\nI0822 07:36:30.839692 32360 solver.cpp:228] Iteration 37700, loss = 0.000293685\nI0822 07:36:30.839745 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:36:30.839768 32360 solver.cpp:244]     Train net output #1: loss = 0.000293639 (* 1 = 0.000293639 loss)\nI0822 07:36:30.950626 32360 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0822 07:38:49.133260 32360 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0822 07:40:11.033005 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 07:40:11.033319 32360 solver.cpp:404]     Test net output #1: loss = 2.80821 (* 1 = 2.80821 loss)\nI0822 07:40:12.347447 32360 solver.cpp:228] Iteration 37800, loss = 0.000245901\nI0822 07:40:12.347498 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:40:12.347522 32360 solver.cpp:244]     Train net output #1: loss = 0.000245855 (* 1 = 0.000245855 loss)\nI0822 07:40:12.451205 32360 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0822 07:42:30.064836 32360 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0822 07:43:51.901615 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10072\nI0822 07:43:51.901963 32360 solver.cpp:404]     Test net output #1: loss = 2.75009 (* 1 = 2.75009 loss)\nI0822 07:43:53.216672 32360 solver.cpp:228] Iteration 37900, loss = 0.000260916\nI0822 07:43:53.216722 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:43:53.216747 32360 solver.cpp:244]     Train net output #1: loss = 0.00026087 (* 1 = 0.00026087 loss)\nI0822 07:43:53.322939 32360 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0822 07:46:11.341811 32360 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0822 07:47:33.234947 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09968\nI0822 07:47:33.235270 32360 solver.cpp:404]     Test net output #1: loss = 2.70239 (* 1 = 2.70239 loss)\nI0822 07:47:34.549640 32360 solver.cpp:228] Iteration 38000, loss = 0.000255026\nI0822 07:47:34.549692 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:47:34.549716 32360 solver.cpp:244]     Train net output #1: loss = 0.00025498 (* 1 = 0.00025498 loss)\nI0822 07:47:34.652289 32360 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0822 07:49:52.698549 32360 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0822 07:51:14.570955 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10072\nI0822 07:51:14.571293 32360 solver.cpp:404]     Test net output #1: loss = 2.65138 (* 1 = 2.65138 loss)\nI0822 07:51:15.886668 32360 solver.cpp:228] Iteration 38100, loss = 0.00020578\nI0822 07:51:15.886718 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:51:15.886741 32360 solver.cpp:244]     Train net output #1: loss = 0.000205734 (* 1 = 0.000205734 loss)\nI0822 07:51:15.992013 32360 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0822 07:53:33.953464 32360 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0822 07:54:55.805972 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0996\nI0822 07:54:55.806316 32360 solver.cpp:404]     Test net output #1: loss = 2.61567 (* 1 = 2.61567 loss)\nI0822 07:54:57.125587 32360 solver.cpp:228] Iteration 38200, loss = 0.000254917\nI0822 07:54:57.125638 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:54:57.125663 32360 solver.cpp:244]     Train net output #1: loss = 0.000254871 (* 1 = 0.000254871 loss)\nI0822 07:54:57.217190 32360 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0822 07:57:15.160892 32360 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0822 07:58:37.068439 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 07:58:37.068785 32360 solver.cpp:404]     Test net output #1: loss = 2.5797 (* 1 = 2.5797 loss)\nI0822 07:58:38.383437 32360 solver.cpp:228] Iteration 38300, loss = 0.0002297\nI0822 07:58:38.383487 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:58:38.383510 32360 solver.cpp:244]     Train net output #1: loss = 0.000229654 (* 1 = 0.000229654 loss)\nI0822 07:58:38.481516 32360 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0822 08:00:56.478284 32360 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0822 08:02:18.381192 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09944\nI0822 08:02:18.381506 32360 solver.cpp:404]     Test net output #1: loss = 2.55161 (* 1 = 2.55161 loss)\nI0822 08:02:19.695802 32360 solver.cpp:228] Iteration 38400, loss = 0.00021455\nI0822 08:02:19.695850 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:02:19.695874 32360 solver.cpp:244]     Train net output #1: loss = 0.000214504 (* 1 = 0.000214504 loss)\nI0822 08:02:19.804611 32360 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0822 08:04:37.928124 32360 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0822 08:05:59.813719 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10044\nI0822 08:05:59.814045 32360 solver.cpp:404]     Test net output #1: loss = 2.52657 (* 1 = 2.52657 loss)\nI0822 08:06:01.129518 32360 solver.cpp:228] Iteration 38500, loss = 0.000243242\nI0822 08:06:01.129570 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:06:01.129595 32360 solver.cpp:244]     Train net output #1: loss = 0.000243196 (* 1 = 0.000243196 loss)\nI0822 08:06:01.238185 32360 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0822 08:08:19.292582 32360 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0822 08:09:41.000618 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 08:09:41.000932 32360 solver.cpp:404]     Test net output #1: loss = 2.50813 (* 1 = 2.50813 loss)\nI0822 08:09:42.316633 32360 solver.cpp:228] Iteration 38600, loss = 0.000229824\nI0822 08:09:42.316689 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:09:42.316712 32360 solver.cpp:244]     Train net output #1: loss = 0.000229778 (* 1 = 0.000229778 loss)\nI0822 08:09:42.415371 32360 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0822 08:12:00.415810 32360 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0822 08:13:22.252913 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 08:13:22.253278 32360 solver.cpp:404]     Test net output #1: loss = 2.48877 (* 1 = 2.48877 loss)\nI0822 08:13:23.567975 32360 solver.cpp:228] Iteration 38700, loss = 0.000206346\nI0822 08:13:23.568024 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:13:23.568048 32360 solver.cpp:244]     Train net output #1: loss = 0.0002063 (* 1 = 0.0002063 loss)\nI0822 08:13:23.670616 32360 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0822 08:15:41.125274 32360 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0822 08:17:01.909679 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 08:17:01.909973 32360 solver.cpp:404]     Test net output #1: loss = 2.4741 (* 1 = 2.4741 loss)\nI0822 08:17:03.222292 32360 solver.cpp:228] Iteration 38800, loss = 0.000184259\nI0822 08:17:03.222331 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:17:03.222353 32360 solver.cpp:244]     Train net output #1: loss = 0.000184213 (* 1 = 0.000184213 loss)\nI0822 08:17:03.326275 32360 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0822 08:19:20.476775 32360 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0822 08:20:41.206440 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 08:20:41.206759 32360 solver.cpp:404]     Test net output #1: loss = 2.45826 (* 1 = 2.45826 loss)\nI0822 08:20:42.519650 32360 solver.cpp:228] Iteration 38900, loss = 0.00021727\nI0822 08:20:42.519690 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:20:42.519711 32360 solver.cpp:244]     Train net output #1: loss = 0.000217224 (* 1 = 0.000217224 loss)\nI0822 08:20:42.622663 32360 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0822 08:22:59.914160 32360 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0822 08:24:20.605020 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 08:24:20.605299 32360 solver.cpp:404]     Test net output #1: loss = 2.44764 (* 1 = 2.44764 loss)\nI0822 08:24:21.917758 32360 solver.cpp:228] Iteration 39000, loss = 0.000182539\nI0822 08:24:21.917794 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:24:21.917809 32360 solver.cpp:244]     Train net output #1: loss = 0.000182493 (* 1 = 0.000182493 loss)\nI0822 08:24:22.019485 32360 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0822 08:26:39.051276 32360 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0822 08:27:59.724364 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 08:27:59.724663 32360 solver.cpp:404]     Test net output #1: loss = 2.43441 (* 1 = 2.43441 loss)\nI0822 08:28:01.036469 32360 solver.cpp:228] Iteration 39100, loss = 0.000184028\nI0822 08:28:01.036504 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:28:01.036520 32360 solver.cpp:244]     Train net output #1: loss = 0.000183983 (* 1 = 0.000183983 loss)\nI0822 08:28:01.140352 32360 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0822 08:30:18.506096 32360 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0822 08:31:39.151459 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 08:31:39.151770 32360 solver.cpp:404]     Test net output #1: loss = 2.42678 (* 1 = 2.42678 loss)\nI0822 08:31:40.463029 32360 solver.cpp:228] Iteration 39200, loss = 0.000169869\nI0822 08:31:40.463065 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:31:40.463080 32360 solver.cpp:244]     Train net output #1: loss = 0.000169823 (* 1 = 0.000169823 loss)\nI0822 08:31:40.566048 32360 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0822 08:33:57.774375 32360 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0822 08:35:18.427208 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 08:35:18.427532 32360 solver.cpp:404]     Test net output #1: loss = 2.41638 (* 1 = 2.41638 loss)\nI0822 08:35:19.738407 32360 solver.cpp:228] Iteration 39300, loss = 0.000178674\nI0822 08:35:19.738442 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:35:19.738456 32360 solver.cpp:244]     Train net output #1: loss = 0.000178628 (* 1 = 0.000178628 loss)\nI0822 08:35:19.842175 32360 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0822 08:37:37.036788 32360 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0822 08:38:57.697489 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 08:38:57.697788 32360 solver.cpp:404]     Test net output #1: loss = 2.41081 (* 1 = 2.41081 loss)\nI0822 08:38:59.009356 32360 solver.cpp:228] Iteration 39400, loss = 0.000188702\nI0822 08:38:59.009393 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:38:59.009408 32360 solver.cpp:244]     Train net output #1: loss = 0.000188656 (* 1 = 0.000188656 loss)\nI0822 08:38:59.109339 32360 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0822 08:41:16.248227 32360 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0822 08:42:36.906922 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 08:42:36.907235 32360 solver.cpp:404]     Test net output #1: loss = 2.40257 (* 1 = 2.40257 loss)\nI0822 08:42:38.218324 32360 solver.cpp:228] Iteration 39500, loss = 0.00018229\nI0822 08:42:38.218360 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:42:38.218375 32360 solver.cpp:244]     Train net output #1: loss = 0.000182244 (* 1 = 0.000182244 loss)\nI0822 08:42:38.320407 32360 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0822 08:44:55.538720 32360 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0822 08:46:16.189173 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 08:46:16.189446 32360 solver.cpp:404]     Test net output #1: loss = 2.39808 (* 1 = 2.39808 loss)\nI0822 08:46:17.500646 32360 solver.cpp:228] Iteration 39600, loss = 0.000182451\nI0822 08:46:17.500679 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:46:17.500694 32360 solver.cpp:244]     Train net output #1: loss = 0.000182405 (* 1 = 0.000182405 loss)\nI0822 08:46:17.600383 32360 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0822 08:48:34.724551 32360 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0822 08:49:55.400647 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 08:49:55.400951 32360 solver.cpp:404]     Test net output #1: loss = 2.39136 (* 1 = 2.39136 loss)\nI0822 08:49:56.712201 32360 solver.cpp:228] Iteration 39700, loss = 0.000186589\nI0822 08:49:56.712235 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:49:56.712251 32360 solver.cpp:244]     Train net output #1: loss = 0.000186543 (* 1 = 0.000186543 loss)\nI0822 08:49:56.813935 32360 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0822 08:52:13.971709 32360 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0822 08:53:34.620026 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 08:53:34.620348 32360 solver.cpp:404]     Test net output #1: loss = 2.38724 (* 1 = 2.38724 loss)\nI0822 08:53:35.931691 32360 solver.cpp:228] Iteration 39800, loss = 0.000186727\nI0822 08:53:35.931727 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:53:35.931741 32360 solver.cpp:244]     Train net output #1: loss = 0.000186681 (* 1 = 0.000186681 loss)\nI0822 08:53:36.043040 32360 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0822 08:55:53.720619 32360 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0822 08:57:14.523150 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 08:57:14.523438 32360 solver.cpp:404]     Test net output #1: loss = 2.38181 (* 1 = 2.38181 loss)\nI0822 08:57:15.834836 32360 solver.cpp:228] Iteration 39900, loss = 0.00017413\nI0822 08:57:15.834870 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:57:15.834884 32360 solver.cpp:244]     Train net output #1: loss = 0.000174084 (* 1 = 0.000174084 loss)\nI0822 08:57:15.928696 32360 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0822 08:59:33.032873 32360 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0822 09:00:53.802400 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:00:53.802675 32360 solver.cpp:404]     Test net output #1: loss = 2.37865 (* 1 = 2.37865 loss)\nI0822 09:00:55.113890 32360 solver.cpp:228] Iteration 40000, loss = 0.000179832\nI0822 09:00:55.113925 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:00:55.113940 32360 solver.cpp:244]     Train net output #1: loss = 0.000179786 (* 1 = 0.000179786 loss)\nI0822 09:00:55.222663 32360 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0822 09:03:12.752744 32360 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0822 09:04:33.544456 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 09:04:33.544770 32360 solver.cpp:404]     Test net output #1: loss = 2.37426 (* 1 = 2.37426 loss)\nI0822 09:04:34.855998 32360 solver.cpp:228] Iteration 40100, loss = 0.000169028\nI0822 09:04:34.856031 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:04:34.856046 32360 solver.cpp:244]     Train net output #1: loss = 0.000168982 (* 1 = 0.000168982 loss)\nI0822 09:04:34.952715 32360 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0822 09:06:52.410569 32360 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0822 09:08:13.174737 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:08:13.175037 32360 solver.cpp:404]     Test net output #1: loss = 2.37239 (* 1 = 2.37239 loss)\nI0822 09:08:14.486268 32360 solver.cpp:228] Iteration 40200, loss = 0.000189343\nI0822 09:08:14.486299 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:08:14.486313 32360 solver.cpp:244]     Train net output #1: loss = 0.000189297 (* 1 = 0.000189297 loss)\nI0822 09:08:14.593003 32360 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0822 09:10:32.046689 32360 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0822 09:11:52.805279 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 09:11:52.805575 32360 solver.cpp:404]     Test net output #1: loss = 2.36849 (* 1 = 2.36849 loss)\nI0822 09:11:54.116505 32360 solver.cpp:228] Iteration 40300, loss = 0.000175576\nI0822 09:11:54.116536 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:11:54.116551 32360 solver.cpp:244]     Train net output #1: loss = 0.00017553 (* 1 = 0.00017553 loss)\nI0822 09:11:54.221882 32360 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0822 09:14:11.832448 32360 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0822 09:15:32.575117 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:15:32.575414 32360 solver.cpp:404]     Test net output #1: loss = 2.36718 (* 1 = 2.36718 loss)\nI0822 09:15:33.886518 32360 solver.cpp:228] Iteration 40400, loss = 0.000184413\nI0822 09:15:33.886548 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:15:33.886564 32360 solver.cpp:244]     Train net output #1: loss = 0.000184367 (* 1 = 0.000184367 loss)\nI0822 09:15:33.989624 32360 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0822 09:17:51.369421 32360 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0822 09:19:12.136015 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 09:19:12.136333 32360 solver.cpp:404]     Test net output #1: loss = 2.36401 (* 1 = 2.36401 loss)\nI0822 09:19:13.447319 32360 solver.cpp:228] Iteration 40500, loss = 0.000183767\nI0822 09:19:13.447355 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:19:13.447369 32360 solver.cpp:244]     Train net output #1: loss = 0.000183721 (* 1 = 0.000183721 loss)\nI0822 09:19:13.555819 32360 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0822 09:21:30.788923 32360 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0822 09:22:51.530462 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:22:51.530767 32360 solver.cpp:404]     Test net output #1: loss = 2.36384 (* 1 = 2.36384 loss)\nI0822 09:22:52.841831 32360 solver.cpp:228] Iteration 40600, loss = 0.000189571\nI0822 09:22:52.841862 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:22:52.841876 32360 solver.cpp:244]     Train net output #1: loss = 0.000189525 (* 1 = 0.000189525 loss)\nI0822 09:22:52.937659 32360 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0822 09:25:10.007675 32360 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0822 09:26:30.746428 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 09:26:30.746678 32360 solver.cpp:404]     Test net output #1: loss = 2.36155 (* 1 = 2.36155 loss)\nI0822 09:26:32.057562 32360 solver.cpp:228] Iteration 40700, loss = 0.000184436\nI0822 09:26:32.057596 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:26:32.057610 32360 solver.cpp:244]     Train net output #1: loss = 0.00018439 (* 1 = 0.00018439 loss)\nI0822 09:26:32.158387 32360 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0822 09:28:49.357650 32360 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0822 09:30:10.104171 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:30:10.104444 32360 solver.cpp:404]     Test net output #1: loss = 2.361 (* 1 = 2.361 loss)\nI0822 09:30:11.415241 32360 solver.cpp:228] Iteration 40800, loss = 0.000174916\nI0822 09:30:11.415277 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:30:11.415292 32360 solver.cpp:244]     Train net output #1: loss = 0.00017487 (* 1 = 0.00017487 loss)\nI0822 09:30:11.520356 32360 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0822 09:32:29.384089 32360 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0822 09:33:51.140983 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 09:33:51.141329 32360 solver.cpp:404]     Test net output #1: loss = 2.35851 (* 1 = 2.35851 loss)\nI0822 09:33:52.456758 32360 solver.cpp:228] Iteration 40900, loss = 0.00017747\nI0822 09:33:52.456799 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:33:52.456815 32360 solver.cpp:244]     Train net output #1: loss = 0.000177424 (* 1 = 0.000177424 loss)\nI0822 09:33:52.555507 32360 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0822 09:36:10.519124 32360 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0822 09:37:32.279095 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:37:32.279415 32360 solver.cpp:404]     Test net output #1: loss = 2.35845 (* 1 = 2.35845 loss)\nI0822 09:37:33.593325 32360 solver.cpp:228] Iteration 41000, loss = 0.000179099\nI0822 09:37:33.593366 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:37:33.593381 32360 solver.cpp:244]     Train net output #1: loss = 0.000179053 (* 1 = 0.000179053 loss)\nI0822 09:37:33.703650 32360 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0822 09:39:51.860047 32360 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0822 09:41:13.616212 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 09:41:13.616552 32360 solver.cpp:404]     Test net output #1: loss = 2.3568 (* 1 = 2.3568 loss)\nI0822 09:41:14.931006 32360 solver.cpp:228] Iteration 41100, loss = 0.000178878\nI0822 09:41:14.931054 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:41:14.931071 32360 solver.cpp:244]     Train net output #1: loss = 0.000178832 (* 1 = 0.000178832 loss)\nI0822 09:41:15.034873 32360 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0822 09:43:33.093032 32360 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0822 09:44:54.874214 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:44:54.874557 32360 solver.cpp:404]     Test net output #1: loss = 2.35608 (* 1 = 2.35608 loss)\nI0822 09:44:56.188446 32360 solver.cpp:228] Iteration 41200, loss = 0.000178275\nI0822 09:44:56.188499 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:44:56.188516 32360 solver.cpp:244]     Train net output #1: loss = 0.000178229 (* 1 = 0.000178229 loss)\nI0822 09:44:56.292495 32360 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0822 09:47:13.975327 32360 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0822 09:48:35.731034 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 09:48:35.731369 32360 solver.cpp:404]     Test net output #1: loss = 2.35404 (* 1 = 2.35404 loss)\nI0822 09:48:37.045061 32360 solver.cpp:228] Iteration 41300, loss = 0.000177198\nI0822 09:48:37.045101 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:48:37.045117 32360 solver.cpp:244]     Train net output #1: loss = 0.000177152 (* 1 = 0.000177152 loss)\nI0822 09:48:37.151803 32360 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0822 09:50:54.794080 32360 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0822 09:52:16.541786 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:52:16.542100 32360 solver.cpp:404]     Test net output #1: loss = 2.35377 (* 1 = 2.35377 loss)\nI0822 09:52:17.855799 32360 solver.cpp:228] Iteration 41400, loss = 0.000176046\nI0822 09:52:17.855844 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:52:17.855859 32360 solver.cpp:244]     Train net output #1: loss = 0.000176 (* 1 = 0.000176 loss)\nI0822 09:52:17.958605 32360 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0822 09:54:35.529147 32360 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0822 09:55:57.306254 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 09:55:57.306607 32360 solver.cpp:404]     Test net output #1: loss = 87.1934 (* 1 = 87.1934 loss)\nI0822 09:55:58.620626 32360 solver.cpp:228] Iteration 41500, loss = 2.31946\nI0822 09:55:58.620669 32360 solver.cpp:244]     Train net output #0: accuracy = 0.128\nI0822 09:55:58.620685 32360 solver.cpp:244]     Train net output #1: loss = 2.31946 (* 1 = 2.31946 loss)\nI0822 09:55:58.720765 32360 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0822 09:58:16.298553 32360 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0822 09:59:38.057940 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 09:59:38.058264 32360 solver.cpp:404]     Test net output #1: loss = 78.6448 (* 1 = 78.6448 loss)\nI0822 09:59:39.371845 32360 solver.cpp:228] Iteration 41600, loss = 1.88739\nI0822 09:59:39.371891 32360 solver.cpp:244]     Train net output #0: accuracy = 0.312\nI0822 09:59:39.371907 32360 solver.cpp:244]     Train net output #1: loss = 1.88739 (* 1 = 1.88739 loss)\nI0822 09:59:39.475555 32360 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0822 10:01:57.136651 32360 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0822 10:03:18.875396 32360 solver.cpp:404]     Test net output #0: accuracy = 0.08432\nI0822 10:03:18.875704 32360 solver.cpp:404]     Test net output #1: loss = 79.9723 (* 1 = 79.9723 loss)\nI0822 10:03:20.189553 32360 solver.cpp:228] Iteration 41700, loss = 1.46925\nI0822 10:03:20.189596 32360 solver.cpp:244]     Train net output #0: accuracy = 0.432\nI0822 10:03:20.189613 32360 solver.cpp:244]     Train net output #1: loss = 1.46925 (* 1 = 1.46925 loss)\nI0822 10:03:20.295720 32360 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0822 10:05:37.995688 32360 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0822 10:06:59.758857 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 10:06:59.759182 32360 solver.cpp:404]     Test net output #1: loss = 78.6448 (* 1 = 78.6448 loss)\nI0822 10:07:01.073140 32360 solver.cpp:228] Iteration 41800, loss = 1.23814\nI0822 10:07:01.073186 32360 solver.cpp:244]     Train net output #0: accuracy = 0.528\nI0822 10:07:01.073204 32360 solver.cpp:244]     Train net output #1: loss = 1.23814 (* 1 = 1.23814 loss)\nI0822 10:07:01.174926 32360 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0822 10:09:18.759057 32360 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0822 10:10:40.517127 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 10:10:40.517436 32360 solver.cpp:404]     Test net output #1: loss = 78.561 (* 1 = 78.561 loss)\nI0822 10:10:41.830906 32360 solver.cpp:228] Iteration 41900, loss = 0.956167\nI0822 10:10:41.830955 32360 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0822 10:10:41.830971 32360 solver.cpp:244]     Train net output #1: loss = 0.956167 (* 1 = 0.956167 loss)\nI0822 10:10:41.932763 32360 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0822 10:12:59.743501 32360 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0822 10:14:21.508005 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 10:14:21.508342 32360 solver.cpp:404]     Test net output #1: loss = 78.5889 (* 1 = 78.5889 loss)\nI0822 10:14:22.822603 32360 solver.cpp:228] Iteration 42000, loss = 0.829131\nI0822 10:14:22.822649 32360 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0822 10:14:22.822666 32360 solver.cpp:244]     Train net output #1: loss = 0.829131 (* 1 = 0.829131 loss)\nI0822 10:14:22.925310 32360 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0822 10:16:40.591887 32360 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0822 10:18:02.328713 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09924\nI0822 10:18:02.329052 32360 solver.cpp:404]     Test net output #1: loss = 78.2621 (* 1 = 78.2621 loss)\nI0822 10:18:03.643324 32360 solver.cpp:228] Iteration 42100, loss = 0.665346\nI0822 10:18:03.643364 32360 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0822 10:18:03.643378 32360 solver.cpp:244]     Train net output #1: loss = 0.665346 (* 1 = 0.665346 loss)\nI0822 10:18:03.745244 32360 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0822 10:20:21.456624 32360 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0822 10:21:43.207551 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11932\nI0822 10:21:43.207876 32360 solver.cpp:404]     Test net output #1: loss = 75.9199 (* 1 = 75.9199 loss)\nI0822 10:21:44.522065 32360 solver.cpp:228] Iteration 42200, loss = 0.7156\nI0822 10:21:44.522110 32360 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0822 10:21:44.522125 32360 solver.cpp:244]     Train net output #1: loss = 0.7156 (* 1 = 0.7156 loss)\nI0822 10:21:44.620378 32360 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0822 10:24:02.367877 32360 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0822 10:25:24.109071 32360 solver.cpp:404]     Test net output #0: accuracy = 0.0964\nI0822 10:25:24.109411 32360 solver.cpp:404]     Test net output #1: loss = 78.1192 (* 1 = 78.1192 loss)\nI0822 10:25:25.423671 32360 solver.cpp:228] Iteration 42300, loss = 0.505516\nI0822 10:25:25.423717 32360 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0822 10:25:25.423733 32360 solver.cpp:244]     Train net output #1: loss = 0.505516 (* 1 = 0.505516 loss)\nI0822 10:25:25.525060 32360 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0822 10:27:43.156294 32360 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0822 10:29:04.911996 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11988\nI0822 10:29:04.912322 32360 solver.cpp:404]     Test net output #1: loss = 74.3434 (* 1 = 74.3434 loss)\nI0822 10:29:06.226583 32360 solver.cpp:228] Iteration 42400, loss = 0.535967\nI0822 10:29:06.226629 32360 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0822 10:29:06.226645 32360 solver.cpp:244]     Train net output #1: loss = 0.535967 (* 1 = 0.535967 loss)\nI0822 10:29:06.324174 32360 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0822 10:31:23.901196 32360 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0822 10:32:45.652040 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09852\nI0822 10:32:45.652375 32360 solver.cpp:404]     Test net output #1: loss = 74.7299 (* 1 = 74.7299 loss)\nI0822 10:32:46.967191 32360 solver.cpp:228] Iteration 42500, loss = 0.479347\nI0822 10:32:46.967239 32360 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0822 10:32:46.967254 32360 solver.cpp:244]     Train net output #1: loss = 0.479347 (* 1 = 0.479347 loss)\nI0822 10:32:47.068125 32360 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0822 10:35:04.034479 32360 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0822 10:36:25.797539 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0822 10:36:25.797866 32360 solver.cpp:404]     Test net output #1: loss = 76.615 (* 1 = 76.615 loss)\nI0822 10:36:27.111611 32360 solver.cpp:228] Iteration 42600, loss = 0.340756\nI0822 10:36:27.111649 32360 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0822 10:36:27.111663 32360 solver.cpp:244]     Train net output #1: loss = 0.340756 (* 1 = 0.340756 loss)\nI0822 10:36:27.210650 32360 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0822 10:38:44.866255 32360 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0822 10:40:06.632800 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1054\nI0822 10:40:06.633113 32360 solver.cpp:404]     Test net output #1: loss = 73.226 (* 1 = 73.226 loss)\nI0822 10:40:07.947620 32360 solver.cpp:228] Iteration 42700, loss = 0.256746\nI0822 10:40:07.947657 32360 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0822 10:40:07.947671 32360 solver.cpp:244]     Train net output #1: loss = 0.256746 (* 1 = 0.256746 loss)\nI0822 10:40:08.052906 32360 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0822 10:42:25.835816 32360 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0822 10:43:47.606866 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09524\nI0822 10:43:47.607194 32360 solver.cpp:404]     Test net output #1: loss = 74.2961 (* 1 = 74.2961 loss)\nI0822 10:43:48.921380 32360 solver.cpp:228] Iteration 42800, loss = 0.258961\nI0822 10:43:48.921418 32360 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0822 10:43:48.921432 32360 solver.cpp:244]     Train net output #1: loss = 0.258961 (* 1 = 0.258961 loss)\nI0822 10:43:49.024549 32360 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0822 10:46:06.585510 32360 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0822 10:47:28.345355 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1196\nI0822 10:47:28.345685 32360 solver.cpp:404]     Test net output #1: loss = 44.9843 (* 1 = 44.9843 loss)\nI0822 10:47:29.660069 32360 solver.cpp:228] Iteration 42900, loss = 0.196791\nI0822 10:47:29.660106 32360 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 10:47:29.660121 32360 solver.cpp:244]     Train net output #1: loss = 0.196791 (* 1 = 0.196791 loss)\nI0822 10:47:29.764861 32360 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0822 10:49:47.447211 32360 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0822 10:51:09.215626 32360 solver.cpp:404]     Test net output #0: accuracy = 0.15224\nI0822 10:51:09.215968 32360 solver.cpp:404]     Test net output #1: loss = 41.835 (* 1 = 41.835 loss)\nI0822 10:51:10.530830 32360 solver.cpp:228] Iteration 43000, loss = 0.109972\nI0822 10:51:10.530867 32360 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0822 10:51:10.530882 32360 solver.cpp:244]     Train net output #1: loss = 0.109972 (* 1 = 0.109972 loss)\nI0822 10:51:10.629284 32360 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0822 10:53:27.713122 32360 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0822 10:54:49.469862 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16564\nI0822 10:54:49.470194 32360 solver.cpp:404]     Test net output #1: loss = 20.4112 (* 1 = 20.4112 loss)\nI0822 10:54:50.784626 32360 solver.cpp:228] Iteration 43100, loss = 0.140743\nI0822 10:54:50.784669 32360 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 10:54:50.784685 32360 solver.cpp:244]     Train net output #1: loss = 0.140743 (* 1 = 0.140743 loss)\nI0822 10:54:50.882575 32360 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0822 10:57:08.479836 32360 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0822 10:58:30.234704 32360 solver.cpp:404]     Test net output #0: accuracy = 0.18164\nI0822 10:58:30.235030 32360 solver.cpp:404]     Test net output #1: loss = 24.6344 (* 1 = 24.6344 loss)\nI0822 10:58:31.549491 32360 solver.cpp:228] Iteration 43200, loss = 0.12623\nI0822 10:58:31.549528 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 10:58:31.549543 32360 solver.cpp:244]     Train net output #1: loss = 0.12623 (* 1 = 0.12623 loss)\nI0822 10:58:31.646234 32360 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0822 11:00:48.627662 32360 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0822 11:02:10.378072 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14404\nI0822 11:02:10.378420 32360 solver.cpp:404]     Test net output #1: loss = 33.2885 (* 1 = 33.2885 loss)\nI0822 11:02:11.692518 32360 solver.cpp:228] Iteration 43300, loss = 0.178341\nI0822 11:02:11.692554 32360 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0822 11:02:11.692569 32360 solver.cpp:244]     Train net output #1: loss = 0.178341 (* 1 = 0.178341 loss)\nI0822 11:02:11.797256 32360 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0822 11:04:29.393280 32360 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0822 11:05:51.171186 32360 solver.cpp:404]     Test net output #0: accuracy = 0.14224\nI0822 11:05:51.171546 32360 solver.cpp:404]     Test net output #1: loss = 38.4759 (* 1 = 38.4759 loss)\nI0822 11:05:52.485985 32360 solver.cpp:228] Iteration 43400, loss = 0.214441\nI0822 11:05:52.486024 32360 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0822 11:05:52.486039 32360 solver.cpp:244]     Train net output #1: loss = 0.214441 (* 1 = 0.214441 loss)\nI0822 11:05:52.589098 32360 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0822 11:08:10.307199 32360 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0822 11:09:32.064213 32360 solver.cpp:404]     Test net output #0: accuracy = 0.3138\nI0822 11:09:32.064548 32360 solver.cpp:404]     Test net output #1: loss = 12.8561 (* 1 = 12.8561 loss)\nI0822 11:09:33.378782 32360 solver.cpp:228] Iteration 43500, loss = 0.0216842\nI0822 11:09:33.378818 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:09:33.378834 32360 solver.cpp:244]     Train net output #1: loss = 0.0216842 (* 1 = 0.0216842 loss)\nI0822 11:09:33.481822 32360 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0822 11:11:50.555297 32360 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0822 11:13:12.310174 32360 solver.cpp:404]     Test net output #0: accuracy = 0.36952\nI0822 11:13:12.310499 32360 solver.cpp:404]     Test net output #1: loss = 7.99346 (* 1 = 7.99346 loss)\nI0822 11:13:13.624233 32360 solver.cpp:228] Iteration 43600, loss = 0.00371437\nI0822 11:13:13.624270 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:13:13.624286 32360 solver.cpp:244]     Train net output #1: loss = 0.00371436 (* 1 = 0.00371436 loss)\nI0822 11:13:13.728072 32360 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0822 11:15:30.622542 32360 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0822 11:16:52.394956 32360 solver.cpp:404]     Test net output #0: accuracy = 0.42948\nI0822 11:16:52.395303 32360 solver.cpp:404]     Test net output #1: loss = 4.64963 (* 1 = 4.64963 loss)\nI0822 11:16:53.709523 32360 solver.cpp:228] Iteration 43700, loss = 0.000532907\nI0822 11:16:53.709559 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:16:53.709574 32360 solver.cpp:244]     Train net output #1: loss = 0.000532899 (* 1 = 0.000532899 loss)\nI0822 11:16:53.807575 32360 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0822 11:19:11.460777 32360 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0822 11:20:33.235075 32360 solver.cpp:404]     Test net output #0: accuracy = 0.32596\nI0822 11:20:33.235397 32360 solver.cpp:404]     Test net output #1: loss = 4.42508 (* 1 = 4.42508 loss)\nI0822 11:20:34.550045 32360 solver.cpp:228] Iteration 43800, loss = 0.000204509\nI0822 11:20:34.550083 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:20:34.550098 32360 solver.cpp:244]     Train net output #1: loss = 0.000204501 (* 1 = 0.000204501 loss)\nI0822 11:20:34.650704 32360 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0822 11:22:52.279047 32360 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0822 11:24:14.048712 32360 solver.cpp:404]     Test net output #0: accuracy = 0.26552\nI0822 11:24:14.049059 32360 solver.cpp:404]     Test net output #1: loss = 3.8475 (* 1 = 3.8475 loss)\nI0822 11:24:15.362823 32360 solver.cpp:228] Iteration 43900, loss = 0.000204305\nI0822 11:24:15.362864 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:24:15.362879 32360 solver.cpp:244]     Train net output #1: loss = 0.000204298 (* 1 = 0.000204298 loss)\nI0822 11:24:15.464520 32360 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0822 11:26:33.216658 32360 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0822 11:27:54.990831 32360 solver.cpp:404]     Test net output #0: accuracy = 0.16468\nI0822 11:27:54.991161 32360 solver.cpp:404]     Test net output #1: loss = 4.12062 (* 1 = 4.12062 loss)\nI0822 11:27:56.304787 32360 solver.cpp:228] Iteration 44000, loss = 0.000210802\nI0822 11:27:56.304828 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:27:56.304843 32360 solver.cpp:244]     Train net output #1: loss = 0.000210795 (* 1 = 0.000210795 loss)\nI0822 11:27:56.405345 32360 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0822 11:30:14.265692 32360 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0822 11:31:36.049335 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10712\nI0822 11:31:36.049702 32360 solver.cpp:404]     Test net output #1: loss = 4.63779 (* 1 = 4.63779 loss)\nI0822 11:31:37.364218 32360 solver.cpp:228] Iteration 44100, loss = 0.000324897\nI0822 11:31:37.364259 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:31:37.364275 32360 solver.cpp:244]     Train net output #1: loss = 0.000324889 (* 1 = 0.000324889 loss)\nI0822 11:31:37.468291 32360 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0822 11:33:54.445854 32360 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0822 11:35:16.206009 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10112\nI0822 11:35:16.206333 32360 solver.cpp:404]     Test net output #1: loss = 4.86604 (* 1 = 4.86604 loss)\nI0822 11:35:17.519793 32360 solver.cpp:228] Iteration 44200, loss = 0.000323386\nI0822 11:35:17.519832 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:35:17.519848 32360 solver.cpp:244]     Train net output #1: loss = 0.000323379 (* 1 = 0.000323379 loss)\nI0822 11:35:17.614794 32360 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0822 11:37:35.319514 32360 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0822 11:38:57.088142 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09992\nI0822 11:38:57.088472 32360 solver.cpp:404]     Test net output #1: loss = 4.88207 (* 1 = 4.88207 loss)\nI0822 11:38:58.402400 32360 solver.cpp:228] Iteration 44300, loss = 0.000243792\nI0822 11:38:58.402442 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:38:58.402457 32360 solver.cpp:244]     Train net output #1: loss = 0.000243785 (* 1 = 0.000243785 loss)\nI0822 11:38:58.501870 32360 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0822 11:41:16.196205 32360 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0822 11:42:37.957446 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 11:42:37.957782 32360 solver.cpp:404]     Test net output #1: loss = 4.8265 (* 1 = 4.8265 loss)\nI0822 11:42:39.271731 32360 solver.cpp:228] Iteration 44400, loss = 0.000335625\nI0822 11:42:39.271772 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:42:39.271788 32360 solver.cpp:244]     Train net output #1: loss = 0.000335618 (* 1 = 0.000335618 loss)\nI0822 11:42:39.373921 32360 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0822 11:44:56.352728 32360 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0822 11:46:18.109822 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 11:46:18.110173 32360 solver.cpp:404]     Test net output #1: loss = 4.77494 (* 1 = 4.77494 loss)\nI0822 11:46:19.423781 32360 solver.cpp:228] Iteration 44500, loss = 0.000415367\nI0822 11:46:19.423823 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:46:19.423840 32360 solver.cpp:244]     Train net output #1: loss = 0.000415359 (* 1 = 0.000415359 loss)\nI0822 11:46:19.522161 32360 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0822 11:48:36.659675 32360 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0822 11:49:58.418193 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 11:49:58.418543 32360 solver.cpp:404]     Test net output #1: loss = 4.77239 (* 1 = 4.77239 loss)\nI0822 11:49:59.731988 32360 solver.cpp:228] Iteration 44600, loss = 0.000411371\nI0822 11:49:59.732031 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:49:59.732048 32360 solver.cpp:244]     Train net output #1: loss = 0.000411363 (* 1 = 0.000411363 loss)\nI0822 11:49:59.831580 32360 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0822 11:52:16.835667 32360 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0822 11:53:38.578413 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 11:53:38.578744 32360 solver.cpp:404]     Test net output #1: loss = 4.71673 (* 1 = 4.71673 loss)\nI0822 11:53:39.893374 32360 solver.cpp:228] Iteration 44700, loss = 0.000446258\nI0822 11:53:39.893416 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:53:39.893431 32360 solver.cpp:244]     Train net output #1: loss = 0.00044625 (* 1 = 0.00044625 loss)\nI0822 11:53:39.997169 32360 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0822 11:55:57.115521 32360 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0822 11:57:18.855418 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 11:57:18.855751 32360 solver.cpp:404]     Test net output #1: loss = 4.60946 (* 1 = 4.60946 loss)\nI0822 11:57:20.169899 32360 solver.cpp:228] Iteration 44800, loss = 0.000349782\nI0822 11:57:20.169940 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:57:20.169955 32360 solver.cpp:244]     Train net output #1: loss = 0.000349775 (* 1 = 0.000349775 loss)\nI0822 11:57:20.273239 32360 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0822 11:59:37.215097 32360 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0822 12:00:58.963035 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:00:58.963366 32360 solver.cpp:404]     Test net output #1: loss = 4.50114 (* 1 = 4.50114 loss)\nI0822 12:01:00.277660 32360 solver.cpp:228] Iteration 44900, loss = 0.000299057\nI0822 12:01:00.277703 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:01:00.277719 32360 solver.cpp:244]     Train net output #1: loss = 0.000299049 (* 1 = 0.000299049 loss)\nI0822 12:01:00.380506 32360 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0822 12:03:17.453399 32360 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0822 12:04:39.211217 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 12:04:39.211567 32360 solver.cpp:404]     Test net output #1: loss = 4.35311 (* 1 = 4.35311 loss)\nI0822 12:04:40.525951 32360 solver.cpp:228] Iteration 45000, loss = 0.000331626\nI0822 12:04:40.525995 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:04:40.526011 32360 solver.cpp:244]     Train net output #1: loss = 0.000331618 (* 1 = 0.000331618 loss)\nI0822 12:04:40.628438 32360 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0822 12:06:57.640312 32360 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0822 12:08:19.536756 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:08:19.537089 32360 solver.cpp:404]     Test net output #1: loss = 4.18077 (* 1 = 4.18077 loss)\nI0822 12:08:20.851785 32360 solver.cpp:228] Iteration 45100, loss = 0.000405274\nI0822 12:08:20.851830 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:08:20.851846 32360 solver.cpp:244]     Train net output #1: loss = 0.000405266 (* 1 = 0.000405266 loss)\nI0822 12:08:20.949523 32360 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0822 12:10:37.946944 32360 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0822 12:11:59.800896 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 12:11:59.801242 32360 solver.cpp:404]     Test net output #1: loss = 4.01957 (* 1 = 4.01957 loss)\nI0822 12:12:01.115227 32360 solver.cpp:228] Iteration 45200, loss = 0.000302541\nI0822 12:12:01.115269 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:12:01.115285 32360 solver.cpp:244]     Train net output #1: loss = 0.000302534 (* 1 = 0.000302534 loss)\nI0822 12:12:01.216630 32360 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0822 12:14:18.251406 32360 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0822 12:15:40.069054 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:15:40.069406 32360 solver.cpp:404]     Test net output #1: loss = 3.8439 (* 1 = 3.8439 loss)\nI0822 12:15:41.384639 32360 solver.cpp:228] Iteration 45300, loss = 0.000335018\nI0822 12:15:41.384685 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:15:41.384701 32360 solver.cpp:244]     Train net output #1: loss = 0.00033501 (* 1 = 0.00033501 loss)\nI0822 12:15:41.482327 32360 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0822 12:17:58.496259 32360 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0822 12:19:20.315600 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 12:19:20.315960 32360 solver.cpp:404]     Test net output #1: loss = 3.69394 (* 1 = 3.69394 loss)\nI0822 12:19:21.630995 32360 solver.cpp:228] Iteration 45400, loss = 0.000372555\nI0822 12:19:21.631034 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:19:21.631050 32360 solver.cpp:244]     Train net output #1: loss = 0.000372547 (* 1 = 0.000372547 loss)\nI0822 12:19:21.733491 32360 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0822 12:21:39.412113 32360 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0822 12:23:01.204736 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:23:01.205088 32360 solver.cpp:404]     Test net output #1: loss = 3.56958 (* 1 = 3.56958 loss)\nI0822 12:23:02.520213 32360 solver.cpp:228] Iteration 45500, loss = 0.000373779\nI0822 12:23:02.520252 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:23:02.520267 32360 solver.cpp:244]     Train net output #1: loss = 0.000373771 (* 1 = 0.000373771 loss)\nI0822 12:23:02.624229 32360 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0822 12:25:19.869462 32360 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0822 12:26:41.648325 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 12:26:41.648650 32360 solver.cpp:404]     Test net output #1: loss = 3.41499 (* 1 = 3.41499 loss)\nI0822 12:26:42.964021 32360 solver.cpp:228] Iteration 45600, loss = 0.000310155\nI0822 12:26:42.964062 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:26:42.964077 32360 solver.cpp:244]     Train net output #1: loss = 0.000310147 (* 1 = 0.000310147 loss)\nI0822 12:26:43.073542 32360 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0822 12:29:00.708938 32360 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0822 12:30:22.467794 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:30:22.468116 32360 solver.cpp:404]     Test net output #1: loss = 3.27111 (* 1 = 3.27111 loss)\nI0822 12:30:23.782822 32360 solver.cpp:228] Iteration 45700, loss = 0.000286515\nI0822 12:30:23.782861 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:30:23.782876 32360 solver.cpp:244]     Train net output #1: loss = 0.000286507 (* 1 = 0.000286507 loss)\nI0822 12:30:23.887331 32360 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0822 12:32:41.626534 32360 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0822 12:34:03.433239 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 12:34:03.433586 32360 solver.cpp:404]     Test net output #1: loss = 3.14542 (* 1 = 3.14542 loss)\nI0822 12:34:04.748021 32360 solver.cpp:228] Iteration 45800, loss = 0.000292043\nI0822 12:34:04.748061 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:34:04.748077 32360 solver.cpp:244]     Train net output #1: loss = 0.000292035 (* 1 = 0.000292035 loss)\nI0822 12:34:04.849668 32360 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0822 12:36:22.533355 32360 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0822 12:37:44.422801 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:37:44.423152 32360 solver.cpp:404]     Test net output #1: loss = 3.04263 (* 1 = 3.04263 loss)\nI0822 12:37:45.738051 32360 solver.cpp:228] Iteration 45900, loss = 0.000319968\nI0822 12:37:45.738093 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:37:45.738108 32360 solver.cpp:244]     Train net output #1: loss = 0.00031996 (* 1 = 0.00031996 loss)\nI0822 12:37:45.847396 32360 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0822 12:40:03.487897 32360 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0822 12:41:25.241612 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 12:41:25.241950 32360 solver.cpp:404]     Test net output #1: loss = 2.94999 (* 1 = 2.94999 loss)\nI0822 12:41:26.555851 32360 solver.cpp:228] Iteration 46000, loss = 0.000327834\nI0822 12:41:26.555893 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:41:26.555909 32360 solver.cpp:244]     Train net output #1: loss = 0.000327827 (* 1 = 0.000327827 loss)\nI0822 12:41:26.659729 32360 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0822 12:43:44.229369 32360 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0822 12:45:05.994635 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:45:05.994969 32360 solver.cpp:404]     Test net output #1: loss = 2.87669 (* 1 = 2.87669 loss)\nI0822 12:45:07.310871 32360 solver.cpp:228] Iteration 46100, loss = 0.000323365\nI0822 12:45:07.310912 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:45:07.310927 32360 solver.cpp:244]     Train net output #1: loss = 0.000323357 (* 1 = 0.000323357 loss)\nI0822 12:45:07.415128 32360 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0822 12:47:25.128386 32360 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0822 12:48:46.902417 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 12:48:46.902750 32360 solver.cpp:404]     Test net output #1: loss = 2.80278 (* 1 = 2.80278 loss)\nI0822 12:48:48.219252 32360 solver.cpp:228] Iteration 46200, loss = 0.000243322\nI0822 12:48:48.219291 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:48:48.219313 32360 solver.cpp:244]     Train net output #1: loss = 0.000243314 (* 1 = 0.000243314 loss)\nI0822 12:48:48.322644 32360 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0822 12:51:05.832665 32360 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0822 12:52:27.584357 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:52:27.584704 32360 solver.cpp:404]     Test net output #1: loss = 2.74223 (* 1 = 2.74223 loss)\nI0822 12:52:28.898635 32360 solver.cpp:228] Iteration 46300, loss = 0.000314244\nI0822 12:52:28.898674 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:52:28.898690 32360 solver.cpp:244]     Train net output #1: loss = 0.000314237 (* 1 = 0.000314237 loss)\nI0822 12:52:29.010006 32360 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0822 12:54:46.558682 32360 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0822 12:56:08.343775 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 12:56:08.344118 32360 solver.cpp:404]     Test net output #1: loss = 2.6928 (* 1 = 2.6928 loss)\nI0822 12:56:09.658344 32360 solver.cpp:228] Iteration 46400, loss = 0.000261728\nI0822 12:56:09.658386 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:56:09.658402 32360 solver.cpp:244]     Train net output #1: loss = 0.000261721 (* 1 = 0.000261721 loss)\nI0822 12:56:09.768887 32360 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0822 12:58:27.406602 32360 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0822 12:59:49.181663 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 12:59:49.182020 32360 solver.cpp:404]     Test net output #1: loss = 2.64948 (* 1 = 2.64948 loss)\nI0822 12:59:50.497705 32360 solver.cpp:228] Iteration 46500, loss = 0.000269909\nI0822 12:59:50.497747 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:59:50.497762 32360 solver.cpp:244]     Train net output #1: loss = 0.000269901 (* 1 = 0.000269901 loss)\nI0822 12:59:50.604236 32360 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0822 13:02:08.204715 32360 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0822 13:03:29.974791 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 13:03:29.975147 32360 solver.cpp:404]     Test net output #1: loss = 2.6181 (* 1 = 2.6181 loss)\nI0822 13:03:31.289486 32360 solver.cpp:228] Iteration 46600, loss = 0.000239582\nI0822 13:03:31.289525 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:03:31.289541 32360 solver.cpp:244]     Train net output #1: loss = 0.000239574 (* 1 = 0.000239574 loss)\nI0822 13:03:31.399469 32360 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0822 13:05:49.058012 32360 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0822 13:07:10.835289 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 13:07:10.835640 32360 solver.cpp:404]     Test net output #1: loss = 2.58269 (* 1 = 2.58269 loss)\nI0822 13:07:12.150806 32360 solver.cpp:228] Iteration 46700, loss = 0.000227555\nI0822 13:07:12.150846 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:07:12.150861 32360 solver.cpp:244]     Train net output #1: loss = 0.000227547 (* 1 = 0.000227547 loss)\nI0822 13:07:12.256168 32360 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0822 13:09:29.931356 32360 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0822 13:10:51.718067 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 13:10:51.718420 32360 solver.cpp:404]     Test net output #1: loss = 2.55608 (* 1 = 2.55608 loss)\nI0822 13:10:53.033849 32360 solver.cpp:228] Iteration 46800, loss = 0.000187729\nI0822 13:10:53.033890 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:10:53.033906 32360 solver.cpp:244]     Train net output #1: loss = 0.000187721 (* 1 = 0.000187721 loss)\nI0822 13:10:53.142717 32360 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0822 13:13:10.687953 32360 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0822 13:14:32.451673 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 13:14:32.452029 32360 solver.cpp:404]     Test net output #1: loss = 2.53087 (* 1 = 2.53087 loss)\nI0822 13:14:33.766120 32360 solver.cpp:228] Iteration 46900, loss = 0.000261265\nI0822 13:14:33.766160 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:14:33.766176 32360 solver.cpp:244]     Train net output #1: loss = 0.000261257 (* 1 = 0.000261257 loss)\nI0822 13:14:33.865933 32360 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0822 13:16:51.410086 32360 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0822 13:18:13.174767 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 13:18:13.175097 32360 solver.cpp:404]     Test net output #1: loss = 2.51227 (* 1 = 2.51227 loss)\nI0822 13:18:14.490499 32360 solver.cpp:228] Iteration 47000, loss = 0.000209488\nI0822 13:18:14.490540 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:18:14.490556 32360 solver.cpp:244]     Train net output #1: loss = 0.00020948 (* 1 = 0.00020948 loss)\nI0822 13:18:14.592087 32360 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0822 13:20:32.163497 32360 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0822 13:21:53.931278 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 13:21:53.931607 32360 solver.cpp:404]     Test net output #1: loss = 2.49329 (* 1 = 2.49329 loss)\nI0822 13:21:55.247437 32360 solver.cpp:228] Iteration 47100, loss = 0.000205333\nI0822 13:21:55.247478 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:21:55.247493 32360 solver.cpp:244]     Train net output #1: loss = 0.000205326 (* 1 = 0.000205326 loss)\nI0822 13:21:55.348448 32360 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0822 13:24:13.044883 32360 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0822 13:25:34.819563 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 13:25:34.819910 32360 solver.cpp:404]     Test net output #1: loss = 2.47918 (* 1 = 2.47918 loss)\nI0822 13:25:36.134202 32360 solver.cpp:228] Iteration 47200, loss = 0.000201444\nI0822 13:25:36.134243 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:25:36.134259 32360 solver.cpp:244]     Train net output #1: loss = 0.000201436 (* 1 = 0.000201436 loss)\nI0822 13:25:36.245980 32360 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0822 13:27:54.019635 32360 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0822 13:29:15.776690 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 13:29:15.777045 32360 solver.cpp:404]     Test net output #1: loss = 2.46336 (* 1 = 2.46336 loss)\nI0822 13:29:17.091303 32360 solver.cpp:228] Iteration 47300, loss = 0.000188085\nI0822 13:29:17.091342 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:29:17.091358 32360 solver.cpp:244]     Train net output #1: loss = 0.000188078 (* 1 = 0.000188078 loss)\nI0822 13:29:17.195297 32360 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0822 13:31:34.844477 32360 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0822 13:32:56.578872 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 13:32:56.579216 32360 solver.cpp:404]     Test net output #1: loss = 2.45038 (* 1 = 2.45038 loss)\nI0822 13:32:57.893998 32360 solver.cpp:228] Iteration 47400, loss = 0.000208725\nI0822 13:32:57.894047 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:32:57.894070 32360 solver.cpp:244]     Train net output #1: loss = 0.000208717 (* 1 = 0.000208717 loss)\nI0822 13:32:57.995610 32360 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0822 13:35:15.602299 32360 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0822 13:36:37.370502 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 13:36:37.370857 32360 solver.cpp:404]     Test net output #1: loss = 2.43698 (* 1 = 2.43698 loss)\nI0822 13:36:38.686655 32360 solver.cpp:228] Iteration 47500, loss = 0.000196426\nI0822 13:36:38.686697 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:36:38.686713 32360 solver.cpp:244]     Train net output #1: loss = 0.000196418 (* 1 = 0.000196418 loss)\nI0822 13:36:38.794916 32360 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0822 13:38:56.426849 32360 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0822 13:40:18.239011 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 13:40:18.239352 32360 solver.cpp:404]     Test net output #1: loss = 2.4242 (* 1 = 2.4242 loss)\nI0822 13:40:19.557216 32360 solver.cpp:228] Iteration 47600, loss = 0.000191281\nI0822 13:40:19.557261 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:40:19.557278 32360 solver.cpp:244]     Train net output #1: loss = 0.000191273 (* 1 = 0.000191273 loss)\nI0822 13:40:19.659271 32360 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0822 13:42:37.173795 32360 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0822 13:43:58.622442 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 13:43:58.622787 32360 solver.cpp:404]     Test net output #1: loss = 2.4103 (* 1 = 2.4103 loss)\nI0822 13:43:59.937191 32360 solver.cpp:228] Iteration 47700, loss = 0.000205642\nI0822 13:43:59.937237 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:43:59.937252 32360 solver.cpp:244]     Train net output #1: loss = 0.000205635 (* 1 = 0.000205635 loss)\nI0822 13:44:00.045658 32360 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0822 13:46:17.603479 32360 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0822 13:47:39.289178 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 13:47:39.289531 32360 solver.cpp:404]     Test net output #1: loss = 2.39815 (* 1 = 2.39815 loss)\nI0822 13:47:40.602797 32360 solver.cpp:228] Iteration 47800, loss = 0.000185443\nI0822 13:47:40.602838 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:47:40.602854 32360 solver.cpp:244]     Train net output #1: loss = 0.000185436 (* 1 = 0.000185436 loss)\nI0822 13:47:40.704212 32360 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0822 13:49:58.317315 32360 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0822 13:51:19.139250 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 13:51:19.139566 32360 solver.cpp:404]     Test net output #1: loss = 2.38819 (* 1 = 2.38819 loss)\nI0822 13:51:20.451196 32360 solver.cpp:228] Iteration 47900, loss = 0.00021174\nI0822 13:51:20.451241 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:51:20.451258 32360 solver.cpp:244]     Train net output #1: loss = 0.000211732 (* 1 = 0.000211732 loss)\nI0822 13:51:20.567235 32360 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0822 13:53:37.734032 32360 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0822 13:54:58.494010 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 13:54:58.494277 32360 solver.cpp:404]     Test net output #1: loss = 2.38009 (* 1 = 2.38009 loss)\nI0822 13:54:59.806123 32360 solver.cpp:228] Iteration 48000, loss = 0.000196974\nI0822 13:54:59.806164 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:54:59.806180 32360 solver.cpp:244]     Train net output #1: loss = 0.000196966 (* 1 = 0.000196966 loss)\nI0822 13:54:59.916271 32360 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0822 13:57:17.080662 32360 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0822 13:58:37.853289 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 13:58:37.853574 32360 solver.cpp:404]     Test net output #1: loss = 78.561 (* 1 = 78.561 loss)\nI0822 13:58:39.165727 32360 solver.cpp:228] Iteration 48100, loss = 3.37511\nI0822 13:58:39.165768 32360 solver.cpp:244]     Train net output #0: accuracy = 0.08\nI0822 13:58:39.165786 32360 solver.cpp:244]     Train net output #1: loss = 3.37511 (* 1 = 3.37511 loss)\nI0822 13:58:39.268252 32360 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0822 14:00:56.405042 32360 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0822 14:02:17.161027 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 14:02:17.161324 32360 solver.cpp:404]     Test net output #1: loss = 78.6448 (* 1 = 78.6448 loss)\nI0822 14:02:18.472810 32360 solver.cpp:228] Iteration 48200, loss = 1.54927\nI0822 14:02:18.472851 32360 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI0822 14:02:18.472868 32360 solver.cpp:244]     Train net output #1: loss = 1.54927 (* 1 = 1.54927 loss)\nI0822 14:02:18.583016 32360 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0822 14:04:35.716249 32360 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0822 14:05:56.501358 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 14:05:56.501654 32360 solver.cpp:404]     Test net output #1: loss = 78.561 (* 1 = 78.561 loss)\nI0822 14:05:57.813068 32360 solver.cpp:228] Iteration 48300, loss = 1.18827\nI0822 14:05:57.813109 32360 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI0822 14:05:57.813125 32360 solver.cpp:244]     Train net output #1: loss = 1.18827 (* 1 = 1.18827 loss)\nI0822 14:05:57.917410 32360 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0822 14:08:15.055884 32360 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0822 14:09:35.814668 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 14:09:35.814954 32360 solver.cpp:404]     Test net output #1: loss = 78.561 (* 1 = 78.561 loss)\nI0822 14:09:37.126955 32360 solver.cpp:228] Iteration 48400, loss = 1.03443\nI0822 14:09:37.126996 32360 solver.cpp:244]     Train net output #0: accuracy = 0.616\nI0822 14:09:37.127013 32360 solver.cpp:244]     Train net output #1: loss = 1.03443 (* 1 = 1.03443 loss)\nI0822 14:09:37.231665 32360 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0822 14:11:54.283071 32360 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0822 14:13:15.034577 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 14:13:15.034881 32360 solver.cpp:404]     Test net output #1: loss = 78.6448 (* 1 = 78.6448 loss)\nI0822 14:13:16.346235 32360 solver.cpp:228] Iteration 48500, loss = 0.921761\nI0822 14:13:16.346276 32360 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0822 14:13:16.346292 32360 solver.cpp:244]     Train net output #1: loss = 0.921761 (* 1 = 0.921761 loss)\nI0822 14:13:16.451256 32360 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0822 14:15:33.553705 32360 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0822 14:16:54.318640 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 14:16:54.318899 32360 solver.cpp:404]     Test net output #1: loss = 78.5573 (* 1 = 78.5573 loss)\nI0822 14:16:55.629981 32360 solver.cpp:228] Iteration 48600, loss = 0.773755\nI0822 14:16:55.630019 32360 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0822 14:16:55.630035 32360 solver.cpp:244]     Train net output #1: loss = 0.773755 (* 1 = 0.773755 loss)\nI0822 14:16:55.732758 32360 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0822 14:19:12.947875 32360 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0822 14:20:33.696988 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 14:20:33.697295 32360 solver.cpp:404]     Test net output #1: loss = 78.6448 (* 1 = 78.6448 loss)\nI0822 14:20:35.009577 32360 solver.cpp:228] Iteration 48700, loss = 0.658042\nI0822 14:20:35.009619 32360 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0822 14:20:35.009635 32360 solver.cpp:244]     Train net output #1: loss = 0.658042 (* 1 = 0.658042 loss)\nI0822 14:20:35.113512 32360 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0822 14:22:52.254276 32360 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0822 14:24:13.032407 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 14:24:13.032644 32360 solver.cpp:404]     Test net output #1: loss = 78.5609 (* 1 = 78.5609 loss)\nI0822 14:24:14.344533 32360 solver.cpp:228] Iteration 48800, loss = 0.552734\nI0822 14:24:14.344583 32360 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0822 14:24:14.344607 32360 solver.cpp:244]     Train net output #1: loss = 0.552734 (* 1 = 0.552734 loss)\nI0822 14:24:14.456068 32360 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0822 14:26:31.500689 32360 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0822 14:27:52.254694 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10056\nI0822 14:27:52.255012 32360 solver.cpp:404]     Test net output #1: loss = 78.254 (* 1 = 78.254 loss)\nI0822 14:27:53.567165 32360 solver.cpp:228] Iteration 48900, loss = 0.550125\nI0822 14:27:53.567209 32360 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0822 14:27:53.567234 32360 solver.cpp:244]     Train net output #1: loss = 0.550125 (* 1 = 0.550125 loss)\nI0822 14:27:53.668906 32360 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0822 14:30:10.742424 32360 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0822 14:31:31.496798 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09516\nI0822 14:31:31.497041 32360 solver.cpp:404]     Test net output #1: loss = 67.4619 (* 1 = 67.4619 loss)\nI0822 14:31:32.809017 32360 solver.cpp:228] Iteration 49000, loss = 0.390916\nI0822 14:31:32.809064 32360 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0822 14:31:32.809088 32360 solver.cpp:244]     Train net output #1: loss = 0.390916 (* 1 = 0.390916 loss)\nI0822 14:31:32.918081 32360 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0822 14:33:50.336904 32360 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0822 14:35:11.107396 32360 solver.cpp:404]     Test net output #0: accuracy = 0.08668\nI0822 14:35:11.107679 32360 solver.cpp:404]     Test net output #1: loss = 64.9175 (* 1 = 64.9175 loss)\nI0822 14:35:12.419980 32360 solver.cpp:228] Iteration 49100, loss = 0.362698\nI0822 14:35:12.420025 32360 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0822 14:35:12.420048 32360 solver.cpp:244]     Train net output #1: loss = 0.362698 (* 1 = 0.362698 loss)\nI0822 14:35:12.523191 32360 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0822 14:37:30.007582 32360 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0822 14:38:50.784065 32360 solver.cpp:404]     Test net output #0: accuracy = 0.11948\nI0822 14:38:50.784334 32360 solver.cpp:404]     Test net output #1: loss = 63.0556 (* 1 = 63.0556 loss)\nI0822 14:38:52.096442 32360 solver.cpp:228] Iteration 49200, loss = 0.34484\nI0822 14:38:52.096487 32360 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0822 14:38:52.096510 32360 solver.cpp:244]     Train net output #1: loss = 0.34484 (* 1 = 0.34484 loss)\nI0822 14:38:52.203349 32360 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0822 14:41:09.464298 32360 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0822 14:42:30.249122 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09012\nI0822 14:42:30.249428 32360 solver.cpp:404]     Test net output #1: loss = 43.4746 (* 1 = 43.4746 loss)\nI0822 14:42:31.561852 32360 solver.cpp:228] Iteration 49300, loss = 0.200338\nI0822 14:42:31.561897 32360 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 14:42:31.561921 32360 solver.cpp:244]     Train net output #1: loss = 0.200338 (* 1 = 0.200338 loss)\nI0822 14:42:31.663552 32360 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0822 14:44:49.011535 32360 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0822 14:46:09.785506 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1058\nI0822 14:46:09.785789 32360 solver.cpp:404]     Test net output #1: loss = 63.8284 (* 1 = 63.8284 loss)\nI0822 14:46:11.098052 32360 solver.cpp:228] Iteration 49400, loss = 0.260272\nI0822 14:46:11.098088 32360 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0822 14:46:11.098109 32360 solver.cpp:244]     Train net output #1: loss = 0.260272 (* 1 = 0.260272 loss)\nI0822 14:46:11.205531 32360 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0822 14:48:28.525645 32360 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0822 14:49:49.307209 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10336\nI0822 14:49:49.307498 32360 solver.cpp:404]     Test net output #1: loss = 57.0465 (* 1 = 57.0465 loss)\nI0822 14:49:50.619838 32360 solver.cpp:228] Iteration 49500, loss = 0.187869\nI0822 14:49:50.619881 32360 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0822 14:49:50.619904 32360 solver.cpp:244]     Train net output #1: loss = 0.187869 (* 1 = 0.187869 loss)\nI0822 14:49:50.727716 32360 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0822 14:52:07.979718 32360 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0822 14:53:28.759879 32360 solver.cpp:404]     Test net output #0: accuracy = 0.10388\nI0822 14:53:28.760182 32360 solver.cpp:404]     Test net output #1: loss = 53.2399 (* 1 = 53.2399 loss)\nI0822 14:53:30.071997 32360 solver.cpp:228] Iteration 49600, loss = 0.0929398\nI0822 14:53:30.072041 32360 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 14:53:30.072063 32360 solver.cpp:244]     Train net output #1: loss = 0.0929398 (* 1 = 0.0929398 loss)\nI0822 14:53:30.178869 32360 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0822 14:55:47.496724 32360 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0822 14:57:08.270329 32360 solver.cpp:404]     Test net output #0: accuracy = 0.107\nI0822 14:57:08.270645 32360 solver.cpp:404]     Test net output #1: loss = 43.9834 (* 1 = 43.9834 loss)\nI0822 14:57:09.581845 32360 solver.cpp:228] Iteration 49700, loss = 0.191308\nI0822 14:57:09.581882 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 14:57:09.581905 32360 solver.cpp:244]     Train net output #1: loss = 0.191308 (* 1 = 0.191308 loss)\nI0822 14:57:09.684240 32360 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0822 14:59:27.033857 32360 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0822 15:00:47.803009 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1312\nI0822 15:00:47.803325 32360 solver.cpp:404]     Test net output #1: loss = 30.0378 (* 1 = 30.0378 loss)\nI0822 15:00:49.114274 32360 solver.cpp:228] Iteration 49800, loss = 0.178686\nI0822 15:00:49.114322 32360 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0822 15:00:49.114346 32360 solver.cpp:244]     Train net output #1: loss = 0.178686 (* 1 = 0.178686 loss)\nI0822 15:00:49.219080 32360 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0822 15:03:06.415428 32360 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0822 15:04:27.203728 32360 solver.cpp:404]     Test net output #0: accuracy = 0.1046\nI0822 15:04:27.204015 32360 solver.cpp:404]     Test net output #1: loss = 34.0517 (* 1 = 34.0517 loss)\nI0822 15:04:28.516083 32360 solver.cpp:228] Iteration 49900, loss = 0.0884633\nI0822 15:04:28.516129 32360 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 15:04:28.516152 32360 solver.cpp:244]     Train net output #1: loss = 0.0884633 (* 1 = 0.0884633 loss)\nI0822 15:04:28.619710 32360 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0822 15:06:45.844753 32360 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0822 15:08:06.599289 32360 solver.cpp:404]     Test net output #0: accuracy = 0.09284\nI0822 15:08:06.599601 32360 solver.cpp:404]     Test net output #1: loss = 27.1621 (* 1 = 27.1621 loss)\nI0822 15:08:07.910959 32360 solver.cpp:228] Iteration 50000, loss = 0.126587\nI0822 15:08:07.910995 32360 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 15:08:07.911017 32360 solver.cpp:244]     Train net output #1: loss = 0.126587 (* 1 = 0.126587 loss)\nI0822 15:08:08.019176 32360 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0822 15:08:08.019198 32360 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0822 15:10:25.279913 32360 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0822 15:11:45.929653 32360 solver.cpp:404]     Test net output #0: accuracy = 0.12956\nI0822 15:11:45.929940 32360 solver.cpp:404]     Test net output #1: loss = 17.5432 (* 1 = 17.5432 loss)\nI0822 15:11:47.240939 32360 solver.cpp:228] Iteration 50100, loss = 0.0103836\nI0822 15:11:47.240985 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:11:47.241008 32360 solver.cpp:244]     Train net output #1: loss = 0.0103836 (* 1 = 0.0103836 loss)\nI0822 15:11:47.344740 32360 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0822 15:14:04.594471 32360 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0822 15:15:25.262444 32360 solver.cpp:404]     Test net output #0: accuracy = 0.144\nI0822 15:15:25.262754 32360 solver.cpp:404]     Test net output #1: loss = 15.6069 (* 1 = 15.6069 loss)\nI0822 15:15:26.574141 32360 solver.cpp:228] Iteration 50200, loss = 0.00787356\nI0822 15:15:26.574187 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:15:26.574211 32360 solver.cpp:244]     Train net output #1: loss = 0.00787352 (* 1 = 0.00787352 loss)\nI0822 15:15:26.684762 32360 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0822 15:17:43.970950 32360 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0822 15:19:04.610916 32360 solver.cpp:404]     Test net output #0: accuracy = 0.157\nI0822 15:19:04.611227 32360 solver.cpp:404]     Test net output #1: loss = 13.8929 (* 1 = 13.8929 loss)\nI0822 15:19:05.924016 32360 solver.cpp:228] Iteration 50300, loss = 0.00545452\nI0822 15:19:05.924054 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:19:05.924077 32360 solver.cpp:244]     Train net output #1: loss = 0.00545449 (* 1 = 0.00545449 loss)\nI0822 15:19:06.025904 32360 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0822 15:21:23.280308 32360 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0822 15:22:43.981130 32360 solver.cpp:404]     Test net output #0: accuracy = 0.17148\nI0822 15:22:43.981437 32360 solver.cpp:404]     Test net output #1: loss = 12.2938 (* 1 = 12.2938 loss)\nI0822 15:22:45.292948 32360 solver.cpp:228] Iteration 50400, loss = 0.00416982\nI0822 15:22:45.292991 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:22:45.293017 32360 solver.cpp:244]     Train net output #1: loss = 0.00416979 (* 1 = 0.00416979 loss)\nI0822 15:22:45.398629 32360 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0822 15:25:02.744148 32360 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0822 15:26:23.403373 32360 solver.cpp:404]     Test net output #0: accuracy = 0.18448\nI0822 15:26:23.403625 32360 solver.cpp:404]     Test net output #1: loss = 10.848 (* 1 = 10.848 loss)\nI0822 15:26:24.714248 32360 solver.cpp:228] Iteration 50500, loss = 0.0028335\nI0822 15:26:24.714292 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:26:24.714320 32360 solver.cpp:244]     Train net output #1: loss = 0.00283347 (* 1 = 0.00283347 loss)\nI0822 15:26:24.821421 32360 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0822 15:28:42.054251 32360 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0822 15:30:02.725102 32360 solver.cpp:404]     Test net output #0: accuracy = 0.20572\nI0822 15:30:02.725415 32360 solver.cpp:404]     Test net output #1: loss = 9.46711 (* 1 = 9.46711 loss)\nI0822 15:30:04.037220 32360 solver.cpp:228] Iteration 50600, loss = 0.00213917\nI0822 15:30:04.037255 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:30:04.037276 32360 solver.cpp:244]     Train net output #1: loss = 0.00213914 (* 1 = 0.00213914 loss)\nI0822 15:30:04.142705 32360 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0822 15:32:21.541326 32360 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0822 15:33:42.216900 32360 solver.cpp:404]     Test net output #0: accuracy = 0.23068\nI0822 15:33:42.217197 32360 solver.cpp:404]     Test net output #1: loss = 8.20956 (* 1 = 8.20956 loss)\nI0822 15:33:43.528357 32360 solver.cpp:228] Iteration 50700, loss = 0.00258751\nI0822 15:33:43.528398 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:33:43.528420 32360 solver.cpp:244]     Train net output #1: loss = 0.00258748 (* 1 = 0.00258748 loss)\nI0822 15:33:43.635682 32360 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0822 15:36:00.948251 32360 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0822 15:37:21.637652 32360 solver.cpp:404]     Test net output #0: accuracy = 0.26344\nI0822 15:37:21.637965 32360 solver.cpp:404]     Test net output #1: loss = 7.10939 (* 1 = 7.10939 loss)\nI0822 15:37:22.948385 32360 solver.cpp:228] Iteration 50800, loss = 0.00200441\nI0822 15:37:22.948426 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:37:22.948448 32360 solver.cpp:244]     Train net output #1: loss = 0.00200438 (* 1 = 0.00200438 loss)\nI0822 15:37:23.056560 32360 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0822 15:39:40.290782 32360 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0822 15:41:00.940704 32360 solver.cpp:404]     Test net output #0: accuracy = 0.30156\nI0822 15:41:00.941018 32360 solver.cpp:404]     Test net output #1: loss = 6.20713 (* 1 = 6.20713 loss)\nI0822 15:41:02.251874 32360 solver.cpp:228] Iteration 50900, loss = 0.00194664\nI0822 15:41:02.251914 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:41:02.251938 32360 solver.cpp:244]     Train net output #1: loss = 0.00194661 (* 1 = 0.00194661 loss)\nI0822 15:41:02.359035 32360 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0822 15:43:19.741518 32360 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0822 15:44:40.537403 32360 solver.cpp:404]     Test net output #0: accuracy = 0.34528\nI0822 15:44:40.537653 32360 solver.cpp:404]     Test net output #1: loss = 5.26549 (* 1 = 5.26549 loss)\nI0822 15:44:41.849395 32360 solver.cpp:228] Iteration 51000, loss = 0.00142454\nI0822 15:44:41.849437 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:44:41.849460 32360 solver.cpp:244]     Train net output #1: loss = 0.00142451 (* 1 = 0.00142451 loss)\nI0822 15:44:41.954708 32360 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0822 15:46:59.263986 32360 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0822 15:48:20.034346 32360 solver.cpp:404]     Test net output #0: accuracy = 0.38456\nI0822 15:48:20.034653 32360 solver.cpp:404]     Test net output #1: loss = 4.55956 (* 1 = 4.55956 loss)\nI0822 15:48:21.346384 32360 solver.cpp:228] Iteration 51100, loss = 0.00188112\nI0822 15:48:21.346426 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:48:21.346449 32360 solver.cpp:244]     Train net output #1: loss = 0.00188109 (* 1 = 0.00188109 loss)\nI0822 15:48:21.446729 32360 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0822 15:50:38.738358 32360 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0822 15:51:59.523579 32360 solver.cpp:404]     Test net output #0: accuracy = 0.42196\nI0822 15:51:59.523859 32360 solver.cpp:404]     Test net output #1: loss = 3.98222 (* 1 = 3.98222 loss)\nI0822 15:52:00.835121 32360 solver.cpp:228] Iteration 51200, loss = 0.00140737\nI0822 15:52:00.835167 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:52:00.835192 32360 solver.cpp:244]     Train net output #1: loss = 0.00140734 (* 1 = 0.00140734 loss)\nI0822 15:52:00.942497 32360 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0822 15:54:18.176918 32360 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0822 15:55:38.935961 32360 solver.cpp:404]     Test net output #0: accuracy = 0.45588\nI0822 15:55:38.936291 32360 solver.cpp:404]     Test net output #1: loss = 3.51625 (* 1 = 3.51625 loss)\nI0822 15:55:40.248255 32360 solver.cpp:228] Iteration 51300, loss = 0.00173244\nI0822 15:55:40.248306 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:55:40.248332 32360 solver.cpp:244]     Train net output #1: loss = 0.00173241 (* 1 = 0.00173241 loss)\nI0822 15:55:40.359186 32360 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0822 15:57:57.588114 32360 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0822 15:59:18.339745 32360 solver.cpp:404]     Test net output #0: accuracy = 0.49316\nI0822 15:59:18.340077 32360 solver.cpp:404]     Test net output #1: loss = 3.1505 (* 1 = 3.1505 loss)\nI0822 15:59:19.651000 32360 solver.cpp:228] Iteration 51400, loss = 0.00114706\nI0822 15:59:19.651044 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:59:19.651067 32360 solver.cpp:244]     Train net output #1: loss = 0.00114703 (* 1 = 0.00114703 loss)\nI0822 15:59:19.754103 32360 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0822 16:01:37.069847 32360 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0822 16:02:57.844436 32360 solver.cpp:404]     Test net output #0: accuracy = 0.519\nI0822 16:02:57.844751 32360 solver.cpp:404]     Test net output #1: loss = 2.8515 (* 1 = 2.8515 loss)\nI0822 16:02:59.156328 32360 solver.cpp:228] Iteration 51500, loss = 0.00164697\nI0822 16:02:59.156378 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:02:59.156400 32360 solver.cpp:244]     Train net output #1: loss = 0.00164693 (* 1 = 0.00164693 loss)\nI0822 16:02:59.265193 32360 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0822 16:05:16.509330 32360 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0822 16:06:37.307256 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5452\nI0822 16:06:37.307595 32360 solver.cpp:404]     Test net output #1: loss = 2.63605 (* 1 = 2.63605 loss)\nI0822 16:06:38.618242 32360 solver.cpp:228] Iteration 51600, loss = 0.00124613\nI0822 16:06:38.618278 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:06:38.618305 32360 solver.cpp:244]     Train net output #1: loss = 0.0012461 (* 1 = 0.0012461 loss)\nI0822 16:06:38.727649 32360 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0822 16:08:56.234267 32360 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0822 16:10:16.989709 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5608\nI0822 16:10:16.990047 32360 solver.cpp:404]     Test net output #1: loss = 2.48505 (* 1 = 2.48505 loss)\nI0822 16:10:18.302244 32360 solver.cpp:228] Iteration 51700, loss = 0.00137782\nI0822 16:10:18.302290 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:10:18.302320 32360 solver.cpp:244]     Train net output #1: loss = 0.00137779 (* 1 = 0.00137779 loss)\nI0822 16:10:18.414676 32360 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0822 16:12:35.780771 32360 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0822 16:13:56.533535 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5772\nI0822 16:13:56.533875 32360 solver.cpp:404]     Test net output #1: loss = 2.36109 (* 1 = 2.36109 loss)\nI0822 16:13:57.841775 32360 solver.cpp:228] Iteration 51800, loss = 0.0011977\nI0822 16:13:57.841817 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:13:57.841840 32360 solver.cpp:244]     Train net output #1: loss = 0.00119767 (* 1 = 0.00119767 loss)\nI0822 16:13:57.948362 32360 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0822 16:16:14.948707 32360 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0822 16:17:35.727177 32360 solver.cpp:404]     Test net output #0: accuracy = 0.58304\nI0822 16:17:35.727511 32360 solver.cpp:404]     Test net output #1: loss = 2.28292 (* 1 = 2.28292 loss)\nI0822 16:17:37.035557 32360 solver.cpp:228] Iteration 51900, loss = 0.00120145\nI0822 16:17:37.035600 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:17:37.035624 32360 solver.cpp:244]     Train net output #1: loss = 0.00120142 (* 1 = 0.00120142 loss)\nI0822 16:17:37.137986 32360 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0822 16:19:54.090065 32360 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0822 16:21:14.869907 32360 solver.cpp:404]     Test net output #0: accuracy = 0.59556\nI0822 16:21:14.870232 32360 solver.cpp:404]     Test net output #1: loss = 2.21103 (* 1 = 2.21103 loss)\nI0822 16:21:16.178508 32360 solver.cpp:228] Iteration 52000, loss = 0.00137284\nI0822 16:21:16.178550 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:21:16.178567 32360 solver.cpp:244]     Train net output #1: loss = 0.0013728 (* 1 = 0.0013728 loss)\nI0822 16:21:16.286638 32360 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0822 16:23:33.236271 32360 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0822 16:24:53.996980 32360 solver.cpp:404]     Test net output #0: accuracy = 0.59556\nI0822 16:24:53.997311 32360 solver.cpp:404]     Test net output #1: loss = 2.19485 (* 1 = 2.19485 loss)\nI0822 16:24:55.304740 32360 solver.cpp:228] Iteration 52100, loss = 0.00105869\nI0822 16:24:55.304783 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:24:55.304800 32360 solver.cpp:244]     Train net output #1: loss = 0.00105866 (* 1 = 0.00105866 loss)\nI0822 16:24:55.406535 32360 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0822 16:27:12.443204 32360 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0822 16:28:33.212559 32360 solver.cpp:404]     Test net output #0: accuracy = 0.60228\nI0822 16:28:33.212875 32360 solver.cpp:404]     Test net output #1: loss = 2.15956 (* 1 = 2.15956 loss)\nI0822 16:28:34.521337 32360 solver.cpp:228] Iteration 52200, loss = 0.00126579\nI0822 16:28:34.521378 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:28:34.521394 32360 solver.cpp:244]     Train net output #1: loss = 0.00126576 (* 1 = 0.00126576 loss)\nI0822 16:28:34.623040 32360 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0822 16:30:51.622738 32360 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0822 16:32:12.390844 32360 solver.cpp:404]     Test net output #0: accuracy = 0.60264\nI0822 16:32:12.391177 32360 solver.cpp:404]     Test net output #1: loss = 2.15332 (* 1 = 2.15332 loss)\nI0822 16:32:13.699640 32360 solver.cpp:228] Iteration 52300, loss = 0.000792544\nI0822 16:32:13.699682 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:32:13.699698 32360 solver.cpp:244]     Train net output #1: loss = 0.000792513 (* 1 = 0.000792513 loss)\nI0822 16:32:13.806119 32360 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0822 16:34:30.761265 32360 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0822 16:35:51.528789 32360 solver.cpp:404]     Test net output #0: accuracy = 0.607\nI0822 16:35:51.529119 32360 solver.cpp:404]     Test net output #1: loss = 2.1385 (* 1 = 2.1385 loss)\nI0822 16:35:52.838116 32360 solver.cpp:228] Iteration 52400, loss = 0.000884429\nI0822 16:35:52.838160 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:35:52.838176 32360 solver.cpp:244]     Train net output #1: loss = 0.000884397 (* 1 = 0.000884397 loss)\nI0822 16:35:52.945842 32360 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0822 16:38:10.080868 32360 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0822 16:39:30.836064 32360 solver.cpp:404]     Test net output #0: accuracy = 0.60568\nI0822 16:39:30.836380 32360 solver.cpp:404]     Test net output #1: loss = 2.15808 (* 1 = 2.15808 loss)\nI0822 16:39:32.144017 32360 solver.cpp:228] Iteration 52500, loss = 0.000831211\nI0822 16:39:32.144058 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:39:32.144073 32360 solver.cpp:244]     Train net output #1: loss = 0.000831179 (* 1 = 0.000831179 loss)\nI0822 16:39:32.244380 32360 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0822 16:41:49.181717 32360 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0822 16:43:09.915462 32360 solver.cpp:404]     Test net output #0: accuracy = 0.60428\nI0822 16:43:09.915791 32360 solver.cpp:404]     Test net output #1: loss = 2.16601 (* 1 = 2.16601 loss)\nI0822 16:43:11.223141 32360 solver.cpp:228] Iteration 52600, loss = 0.000860432\nI0822 16:43:11.223182 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:43:11.223198 32360 solver.cpp:244]     Train net output #1: loss = 0.0008604 (* 1 = 0.0008604 loss)\nI0822 16:43:11.323374 32360 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0822 16:45:28.361284 32360 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0822 16:46:49.095554 32360 solver.cpp:404]     Test net output #0: accuracy = 0.6014\nI0822 16:46:49.095859 32360 solver.cpp:404]     Test net output #1: loss = 2.19024 (* 1 = 2.19024 loss)\nI0822 16:46:50.403555 32360 solver.cpp:228] Iteration 52700, loss = 0.00108272\nI0822 16:46:50.403595 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:46:50.403611 32360 solver.cpp:244]     Train net output #1: loss = 0.00108269 (* 1 = 0.00108269 loss)\nI0822 16:46:50.508654 32360 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0822 16:49:07.532824 32360 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0822 16:50:28.289149 32360 solver.cpp:404]     Test net output #0: accuracy = 0.60228\nI0822 16:50:28.289477 32360 solver.cpp:404]     Test net output #1: loss = 2.20253 (* 1 = 2.20253 loss)\nI0822 16:50:29.597812 32360 solver.cpp:228] Iteration 52800, loss = 0.00101931\nI0822 16:50:29.597852 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:50:29.597868 32360 solver.cpp:244]     Train net output #1: loss = 0.00101928 (* 1 = 0.00101928 loss)\nI0822 16:50:29.705152 32360 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0822 16:52:46.926980 32360 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0822 16:54:07.696266 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5982\nI0822 16:54:07.696588 32360 solver.cpp:404]     Test net output #1: loss = 2.23639 (* 1 = 2.23639 loss)\nI0822 16:54:09.004283 32360 solver.cpp:228] Iteration 52900, loss = 0.000991105\nI0822 16:54:09.004325 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:54:09.004341 32360 solver.cpp:244]     Train net output #1: loss = 0.000991073 (* 1 = 0.000991073 loss)\nI0822 16:54:09.113365 32360 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0822 16:56:26.114125 32360 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0822 16:57:46.896724 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5994\nI0822 16:57:46.897053 32360 solver.cpp:404]     Test net output #1: loss = 2.24875 (* 1 = 2.24875 loss)\nI0822 16:57:48.205066 32360 solver.cpp:228] Iteration 53000, loss = 0.000833038\nI0822 16:57:48.205107 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:57:48.205123 32360 solver.cpp:244]     Train net output #1: loss = 0.000833006 (* 1 = 0.000833006 loss)\nI0822 16:57:48.305120 32360 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0822 17:00:05.310374 32360 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0822 17:01:26.066138 32360 solver.cpp:404]     Test net output #0: accuracy = 0.59528\nI0822 17:01:26.066444 32360 solver.cpp:404]     Test net output #1: loss = 2.28248 (* 1 = 2.28248 loss)\nI0822 17:01:27.374042 32360 solver.cpp:228] Iteration 53100, loss = 0.00100069\nI0822 17:01:27.374083 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:01:27.374099 32360 solver.cpp:244]     Train net output #1: loss = 0.00100066 (* 1 = 0.00100066 loss)\nI0822 17:01:27.481587 32360 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0822 17:03:44.633286 32360 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0822 17:05:05.416069 32360 solver.cpp:404]     Test net output #0: accuracy = 0.59636\nI0822 17:05:05.416407 32360 solver.cpp:404]     Test net output #1: loss = 2.293 (* 1 = 2.293 loss)\nI0822 17:05:06.725415 32360 solver.cpp:228] Iteration 53200, loss = 0.000702818\nI0822 17:05:06.725459 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:05:06.725476 32360 solver.cpp:244]     Train net output #1: loss = 0.000702786 (* 1 = 0.000702786 loss)\nI0822 17:05:06.826314 32360 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0822 17:07:24.097915 32360 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0822 17:08:44.854020 32360 solver.cpp:404]     Test net output #0: accuracy = 0.59072\nI0822 17:08:44.854357 32360 solver.cpp:404]     Test net output #1: loss = 2.32555 (* 1 = 2.32555 loss)\nI0822 17:08:46.162279 32360 solver.cpp:228] Iteration 53300, loss = 0.000860727\nI0822 17:08:46.162328 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:08:46.162345 32360 solver.cpp:244]     Train net output #1: loss = 0.000860695 (* 1 = 0.000860695 loss)\nI0822 17:08:46.261986 32360 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0822 17:11:03.197211 32360 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0822 17:12:23.940675 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5924\nI0822 17:12:23.941011 32360 solver.cpp:404]     Test net output #1: loss = 2.32748 (* 1 = 2.32748 loss)\nI0822 17:12:25.249681 32360 solver.cpp:228] Iteration 53400, loss = 0.000943329\nI0822 17:12:25.249727 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:12:25.249742 32360 solver.cpp:244]     Train net output #1: loss = 0.000943297 (* 1 = 0.000943297 loss)\nI0822 17:12:25.350284 32360 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0822 17:14:42.249285 32360 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0822 17:16:03.096918 32360 solver.cpp:404]     Test net output #0: accuracy = 0.58764\nI0822 17:16:03.097252 32360 solver.cpp:404]     Test net output #1: loss = 2.35663 (* 1 = 2.35663 loss)\nI0822 17:16:04.405637 32360 solver.cpp:228] Iteration 53500, loss = 0.000940895\nI0822 17:16:04.405675 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:16:04.405699 32360 solver.cpp:244]     Train net output #1: loss = 0.000940863 (* 1 = 0.000940863 loss)\nI0822 17:16:04.517385 32360 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0822 17:18:21.697244 32360 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0822 17:19:42.596078 32360 solver.cpp:404]     Test net output #0: accuracy = 0.58928\nI0822 17:19:42.596388 32360 solver.cpp:404]     Test net output #1: loss = 2.36347 (* 1 = 2.36347 loss)\nI0822 17:19:43.905141 32360 solver.cpp:228] Iteration 53600, loss = 0.000738329\nI0822 17:19:43.905187 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:19:43.905211 32360 solver.cpp:244]     Train net output #1: loss = 0.000738298 (* 1 = 0.000738298 loss)\nI0822 17:19:44.008636 32360 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0822 17:22:01.083847 32360 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0822 17:23:21.846438 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5842\nI0822 17:23:21.846773 32360 solver.cpp:404]     Test net output #1: loss = 2.39584 (* 1 = 2.39584 loss)\nI0822 17:23:23.157835 32360 solver.cpp:228] Iteration 53700, loss = 0.000982761\nI0822 17:23:23.157878 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:23:23.157896 32360 solver.cpp:244]     Train net output #1: loss = 0.000982729 (* 1 = 0.000982729 loss)\nI0822 17:23:23.259766 32360 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0822 17:25:40.473510 32360 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0822 17:27:01.160292 32360 solver.cpp:404]     Test net output #0: accuracy = 0.584\nI0822 17:27:01.160629 32360 solver.cpp:404]     Test net output #1: loss = 2.40694 (* 1 = 2.40694 loss)\nI0822 17:27:02.471552 32360 solver.cpp:228] Iteration 53800, loss = 0.000807062\nI0822 17:27:02.471593 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:27:02.471609 32360 solver.cpp:244]     Train net output #1: loss = 0.00080703 (* 1 = 0.00080703 loss)\nI0822 17:27:02.566591 32360 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0822 17:29:19.553584 32360 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0822 17:30:40.213129 32360 solver.cpp:404]     Test net output #0: accuracy = 0.57856\nI0822 17:30:40.213459 32360 solver.cpp:404]     Test net output #1: loss = 2.44235 (* 1 = 2.44235 loss)\nI0822 17:30:41.521842 32360 solver.cpp:228] Iteration 53900, loss = 0.00095155\nI0822 17:30:41.521883 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:30:41.521899 32360 solver.cpp:244]     Train net output #1: loss = 0.000951518 (* 1 = 0.000951518 loss)\nI0822 17:30:41.623384 32360 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0822 17:32:58.717103 32360 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0822 17:34:19.416170 32360 solver.cpp:404]     Test net output #0: accuracy = 0.58152\nI0822 17:34:19.416499 32360 solver.cpp:404]     Test net output #1: loss = 2.44231 (* 1 = 2.44231 loss)\nI0822 17:34:20.724606 32360 solver.cpp:228] Iteration 54000, loss = 0.000670087\nI0822 17:34:20.724649 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:34:20.724668 32360 solver.cpp:244]     Train net output #1: loss = 0.000670055 (* 1 = 0.000670055 loss)\nI0822 17:34:20.835041 32360 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0822 17:36:38.068982 32360 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0822 17:37:58.707837 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5762\nI0822 17:37:58.708176 32360 solver.cpp:404]     Test net output #1: loss = 2.47979 (* 1 = 2.47979 loss)\nI0822 17:38:00.015700 32360 solver.cpp:228] Iteration 54100, loss = 0.000732148\nI0822 17:38:00.015743 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:38:00.015758 32360 solver.cpp:244]     Train net output #1: loss = 0.000732116 (* 1 = 0.000732116 loss)\nI0822 17:38:00.124986 32360 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0822 17:40:17.592170 32360 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0822 17:41:38.241439 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5776\nI0822 17:41:38.241768 32360 solver.cpp:404]     Test net output #1: loss = 2.47991 (* 1 = 2.47991 loss)\nI0822 17:41:39.549063 32360 solver.cpp:228] Iteration 54200, loss = 0.000700379\nI0822 17:41:39.549108 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:41:39.549123 32360 solver.cpp:244]     Train net output #1: loss = 0.000700347 (* 1 = 0.000700347 loss)\nI0822 17:41:39.664309 32360 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0822 17:43:57.165120 32360 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0822 17:45:17.799465 32360 solver.cpp:404]     Test net output #0: accuracy = 0.57268\nI0822 17:45:17.799795 32360 solver.cpp:404]     Test net output #1: loss = 2.50871 (* 1 = 2.50871 loss)\nI0822 17:45:19.106946 32360 solver.cpp:228] Iteration 54300, loss = 0.000658561\nI0822 17:45:19.106987 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:45:19.107003 32360 solver.cpp:244]     Train net output #1: loss = 0.000658529 (* 1 = 0.000658529 loss)\nI0822 17:45:19.216528 32360 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0822 17:47:36.727243 32360 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0822 17:48:57.361135 32360 solver.cpp:404]     Test net output #0: accuracy = 0.57548\nI0822 17:48:57.361452 32360 solver.cpp:404]     Test net output #1: loss = 2.50263 (* 1 = 2.50263 loss)\nI0822 17:48:58.669204 32360 solver.cpp:228] Iteration 54400, loss = 0.00073431\nI0822 17:48:58.669246 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:48:58.669262 32360 solver.cpp:244]     Train net output #1: loss = 0.000734278 (* 1 = 0.000734278 loss)\nI0822 17:48:58.779553 32360 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0822 17:51:16.335341 32360 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0822 17:52:36.989440 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56964\nI0822 17:52:36.989775 32360 solver.cpp:404]     Test net output #1: loss = 2.53877 (* 1 = 2.53877 loss)\nI0822 17:52:38.297701 32360 solver.cpp:228] Iteration 54500, loss = 0.000751267\nI0822 17:52:38.297746 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:52:38.297761 32360 solver.cpp:244]     Train net output #1: loss = 0.000751235 (* 1 = 0.000751235 loss)\nI0822 17:52:38.405432 32360 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0822 17:54:55.257458 32360 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0822 17:56:15.896167 32360 solver.cpp:404]     Test net output #0: accuracy = 0.57172\nI0822 17:56:15.896502 32360 solver.cpp:404]     Test net output #1: loss = 2.5452 (* 1 = 2.5452 loss)\nI0822 17:56:17.203807 32360 solver.cpp:228] Iteration 54600, loss = 0.000749542\nI0822 17:56:17.203850 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:56:17.203866 32360 solver.cpp:244]     Train net output #1: loss = 0.00074951 (* 1 = 0.00074951 loss)\nI0822 17:56:17.307713 32360 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0822 17:58:34.520208 32360 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0822 17:59:55.144140 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56732\nI0822 17:59:55.144450 32360 solver.cpp:404]     Test net output #1: loss = 2.55888 (* 1 = 2.55888 loss)\nI0822 17:59:56.451966 32360 solver.cpp:228] Iteration 54700, loss = 0.000675316\nI0822 17:59:56.451999 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:59:56.452014 32360 solver.cpp:244]     Train net output #1: loss = 0.000675284 (* 1 = 0.000675284 loss)\nI0822 17:59:56.556942 32360 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0822 18:02:13.297195 32360 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0822 18:03:34.035696 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5702\nI0822 18:03:34.036023 32360 solver.cpp:404]     Test net output #1: loss = 2.56197 (* 1 = 2.56197 loss)\nI0822 18:03:35.344801 32360 solver.cpp:228] Iteration 54800, loss = 0.000537257\nI0822 18:03:35.344846 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:03:35.344861 32360 solver.cpp:244]     Train net output #1: loss = 0.000537225 (* 1 = 0.000537225 loss)\nI0822 18:03:35.450441 32360 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0822 18:05:52.205443 32360 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0822 18:07:12.955044 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56456\nI0822 18:07:12.955363 32360 solver.cpp:404]     Test net output #1: loss = 2.58304 (* 1 = 2.58304 loss)\nI0822 18:07:14.264530 32360 solver.cpp:228] Iteration 54900, loss = 0.000656072\nI0822 18:07:14.264575 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:07:14.264590 32360 solver.cpp:244]     Train net output #1: loss = 0.00065604 (* 1 = 0.00065604 loss)\nI0822 18:07:14.371450 32360 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0822 18:09:31.072960 32360 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0822 18:10:51.831176 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56636\nI0822 18:10:51.831508 32360 solver.cpp:404]     Test net output #1: loss = 2.59627 (* 1 = 2.59627 loss)\nI0822 18:10:53.140216 32360 solver.cpp:228] Iteration 55000, loss = 0.000749208\nI0822 18:10:53.140260 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:10:53.140275 32360 solver.cpp:244]     Train net output #1: loss = 0.000749176 (* 1 = 0.000749176 loss)\nI0822 18:10:53.240969 32360 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0822 18:13:10.061369 32360 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0822 18:14:30.803588 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56248\nI0822 18:14:30.803917 32360 solver.cpp:404]     Test net output #1: loss = 2.60503 (* 1 = 2.60503 loss)\nI0822 18:14:32.112679 32360 solver.cpp:228] Iteration 55100, loss = 0.000595558\nI0822 18:14:32.112723 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:14:32.112740 32360 solver.cpp:244]     Train net output #1: loss = 0.000595526 (* 1 = 0.000595526 loss)\nI0822 18:14:32.219667 32360 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0822 18:16:48.948678 32360 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0822 18:18:09.747730 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56556\nI0822 18:18:09.748049 32360 solver.cpp:404]     Test net output #1: loss = 2.6009 (* 1 = 2.6009 loss)\nI0822 18:18:11.057379 32360 solver.cpp:228] Iteration 55200, loss = 0.000717243\nI0822 18:18:11.057421 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:18:11.057437 32360 solver.cpp:244]     Train net output #1: loss = 0.000717211 (* 1 = 0.000717211 loss)\nI0822 18:18:11.162732 32360 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0822 18:20:27.909477 32360 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0822 18:21:48.648002 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56044\nI0822 18:21:48.648344 32360 solver.cpp:404]     Test net output #1: loss = 2.62466 (* 1 = 2.62466 loss)\nI0822 18:21:49.957132 32360 solver.cpp:228] Iteration 55300, loss = 0.000582844\nI0822 18:21:49.957177 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:21:49.957193 32360 solver.cpp:244]     Train net output #1: loss = 0.000582813 (* 1 = 0.000582813 loss)\nI0822 18:21:50.064976 32360 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0822 18:24:06.889092 32360 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0822 18:25:27.653805 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56368\nI0822 18:25:27.654126 32360 solver.cpp:404]     Test net output #1: loss = 2.61538 (* 1 = 2.61538 loss)\nI0822 18:25:28.962657 32360 solver.cpp:228] Iteration 55400, loss = 0.000468493\nI0822 18:25:28.962702 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:25:28.962718 32360 solver.cpp:244]     Train net output #1: loss = 0.000468461 (* 1 = 0.000468461 loss)\nI0822 18:25:29.068873 32360 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0822 18:27:46.263664 32360 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0822 18:29:07.024703 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55892\nI0822 18:29:07.025035 32360 solver.cpp:404]     Test net output #1: loss = 2.63946 (* 1 = 2.63946 loss)\nI0822 18:29:08.333729 32360 solver.cpp:228] Iteration 55500, loss = 0.000734426\nI0822 18:29:08.333773 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:29:08.333788 32360 solver.cpp:244]     Train net output #1: loss = 0.000734395 (* 1 = 0.000734395 loss)\nI0822 18:29:08.437278 32360 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0822 18:31:25.643481 32360 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0822 18:32:46.394743 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56388\nI0822 18:32:46.395057 32360 solver.cpp:404]     Test net output #1: loss = 2.61297 (* 1 = 2.61297 loss)\nI0822 18:32:47.703771 32360 solver.cpp:228] Iteration 55600, loss = 0.000708802\nI0822 18:32:47.703815 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:32:47.703831 32360 solver.cpp:244]     Train net output #1: loss = 0.00070877 (* 1 = 0.00070877 loss)\nI0822 18:32:47.811832 32360 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0822 18:35:04.529552 32360 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0822 18:36:25.300413 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55992\nI0822 18:36:25.300743 32360 solver.cpp:404]     Test net output #1: loss = 2.62472 (* 1 = 2.62472 loss)\nI0822 18:36:26.608899 32360 solver.cpp:228] Iteration 55700, loss = 0.000671044\nI0822 18:36:26.608943 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:36:26.608958 32360 solver.cpp:244]     Train net output #1: loss = 0.000671012 (* 1 = 0.000671012 loss)\nI0822 18:36:26.716951 32360 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0822 18:38:43.428576 32360 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0822 18:40:04.191406 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5628\nI0822 18:40:04.191730 32360 solver.cpp:404]     Test net output #1: loss = 2.62347 (* 1 = 2.62347 loss)\nI0822 18:40:05.499557 32360 solver.cpp:228] Iteration 55800, loss = 0.000554592\nI0822 18:40:05.499599 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:40:05.499615 32360 solver.cpp:244]     Train net output #1: loss = 0.00055456 (* 1 = 0.00055456 loss)\nI0822 18:40:05.599851 32360 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0822 18:42:22.309617 32360 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0822 18:43:43.076678 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55652\nI0822 18:43:43.076985 32360 solver.cpp:404]     Test net output #1: loss = 2.65611 (* 1 = 2.65611 loss)\nI0822 18:43:44.385695 32360 solver.cpp:228] Iteration 55900, loss = 0.00065294\nI0822 18:43:44.385740 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:43:44.385756 32360 solver.cpp:244]     Train net output #1: loss = 0.000652908 (* 1 = 0.000652908 loss)\nI0822 18:43:44.486259 32360 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0822 18:46:01.639861 32360 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0822 18:47:22.387725 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56084\nI0822 18:47:22.388025 32360 solver.cpp:404]     Test net output #1: loss = 2.64147 (* 1 = 2.64147 loss)\nI0822 18:47:23.696368 32360 solver.cpp:228] Iteration 56000, loss = 0.000701262\nI0822 18:47:23.696413 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:47:23.696427 32360 solver.cpp:244]     Train net output #1: loss = 0.000701231 (* 1 = 0.000701231 loss)\nI0822 18:47:23.804616 32360 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0822 18:49:40.460633 32360 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0822 18:51:01.225869 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55676\nI0822 18:51:01.226197 32360 solver.cpp:404]     Test net output #1: loss = 2.65517 (* 1 = 2.65517 loss)\nI0822 18:51:02.535234 32360 solver.cpp:228] Iteration 56100, loss = 0.000704216\nI0822 18:51:02.535277 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:51:02.535292 32360 solver.cpp:244]     Train net output #1: loss = 0.000704184 (* 1 = 0.000704184 loss)\nI0822 18:51:02.637045 32360 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0822 18:53:19.388679 32360 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0822 18:54:40.175433 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55916\nI0822 18:54:40.175766 32360 solver.cpp:404]     Test net output #1: loss = 2.65318 (* 1 = 2.65318 loss)\nI0822 18:54:41.483705 32360 solver.cpp:228] Iteration 56200, loss = 0.000620966\nI0822 18:54:41.483750 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:54:41.483767 32360 solver.cpp:244]     Train net output #1: loss = 0.000620934 (* 1 = 0.000620934 loss)\nI0822 18:54:41.585508 32360 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0822 18:56:58.352694 32360 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0822 18:58:19.144047 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55468\nI0822 18:58:19.144371 32360 solver.cpp:404]     Test net output #1: loss = 2.66878 (* 1 = 2.66878 loss)\nI0822 18:58:20.453009 32360 solver.cpp:228] Iteration 56300, loss = 0.000633639\nI0822 18:58:20.453054 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:58:20.453071 32360 solver.cpp:244]     Train net output #1: loss = 0.000633607 (* 1 = 0.000633607 loss)\nI0822 18:58:20.560992 32360 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0822 19:00:37.864845 32360 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0822 19:01:58.661898 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5606\nI0822 19:01:58.662210 32360 solver.cpp:404]     Test net output #1: loss = 2.62817 (* 1 = 2.62817 loss)\nI0822 19:01:59.970626 32360 solver.cpp:228] Iteration 56400, loss = 0.000636202\nI0822 19:01:59.970672 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:01:59.970688 32360 solver.cpp:244]     Train net output #1: loss = 0.00063617 (* 1 = 0.00063617 loss)\nI0822 19:02:00.072824 32360 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0822 19:04:16.799711 32360 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0822 19:05:37.580072 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55652\nI0822 19:05:37.580382 32360 solver.cpp:404]     Test net output #1: loss = 2.64282 (* 1 = 2.64282 loss)\nI0822 19:05:38.888780 32360 solver.cpp:228] Iteration 56500, loss = 0.00057223\nI0822 19:05:38.888826 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:05:38.888844 32360 solver.cpp:244]     Train net output #1: loss = 0.000572198 (* 1 = 0.000572198 loss)\nI0822 19:05:38.990602 32360 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0822 19:07:55.747793 32360 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0822 19:09:16.536206 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55988\nI0822 19:09:16.536536 32360 solver.cpp:404]     Test net output #1: loss = 2.63136 (* 1 = 2.63136 loss)\nI0822 19:09:17.844558 32360 solver.cpp:228] Iteration 56600, loss = 0.000526948\nI0822 19:09:17.844604 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:09:17.844619 32360 solver.cpp:244]     Train net output #1: loss = 0.000526916 (* 1 = 0.000526916 loss)\nI0822 19:09:17.945472 32360 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0822 19:11:34.758292 32360 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0822 19:12:55.514474 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55584\nI0822 19:12:55.514816 32360 solver.cpp:404]     Test net output #1: loss = 2.64274 (* 1 = 2.64274 loss)\nI0822 19:12:56.822613 32360 solver.cpp:228] Iteration 56700, loss = 0.000518513\nI0822 19:12:56.822659 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:12:56.822675 32360 solver.cpp:244]     Train net output #1: loss = 0.000518481 (* 1 = 0.000518481 loss)\nI0822 19:12:56.928655 32360 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0822 19:15:13.751108 32360 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0822 19:16:34.498579 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55852\nI0822 19:16:34.498914 32360 solver.cpp:404]     Test net output #1: loss = 2.64719 (* 1 = 2.64719 loss)\nI0822 19:16:35.806412 32360 solver.cpp:228] Iteration 56800, loss = 0.00074384\nI0822 19:16:35.806457 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:16:35.806473 32360 solver.cpp:244]     Train net output #1: loss = 0.000743808 (* 1 = 0.000743808 loss)\nI0822 19:16:35.908512 32360 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0822 19:18:53.064183 32360 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0822 19:20:13.820621 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55536\nI0822 19:20:13.820960 32360 solver.cpp:404]     Test net output #1: loss = 2.65201 (* 1 = 2.65201 loss)\nI0822 19:20:15.128602 32360 solver.cpp:228] Iteration 56900, loss = 0.000611019\nI0822 19:20:15.128648 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:20:15.128662 32360 solver.cpp:244]     Train net output #1: loss = 0.000610987 (* 1 = 0.000610987 loss)\nI0822 19:20:15.238246 32360 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0822 19:22:31.980080 32360 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0822 19:23:52.769371 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5596\nI0822 19:23:52.769702 32360 solver.cpp:404]     Test net output #1: loss = 2.63714 (* 1 = 2.63714 loss)\nI0822 19:23:54.078076 32360 solver.cpp:228] Iteration 57000, loss = 0.000562792\nI0822 19:23:54.078121 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:23:54.078138 32360 solver.cpp:244]     Train net output #1: loss = 0.00056276 (* 1 = 0.00056276 loss)\nI0822 19:23:54.187432 32360 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0822 19:26:10.915422 32360 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0822 19:27:31.680377 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55556\nI0822 19:27:31.680713 32360 solver.cpp:404]     Test net output #1: loss = 2.64603 (* 1 = 2.64603 loss)\nI0822 19:27:32.988770 32360 solver.cpp:228] Iteration 57100, loss = 0.00056723\nI0822 19:27:32.988814 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:27:32.988829 32360 solver.cpp:244]     Train net output #1: loss = 0.000567198 (* 1 = 0.000567198 loss)\nI0822 19:27:33.096257 32360 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0822 19:29:50.153240 32360 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0822 19:31:10.910315 32360 solver.cpp:404]     Test net output #0: accuracy = 0.56088\nI0822 19:31:10.910650 32360 solver.cpp:404]     Test net output #1: loss = 2.61827 (* 1 = 2.61827 loss)\nI0822 19:31:12.218318 32360 solver.cpp:228] Iteration 57200, loss = 0.000683561\nI0822 19:31:12.218363 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:31:12.218379 32360 solver.cpp:244]     Train net output #1: loss = 0.000683529 (* 1 = 0.000683529 loss)\nI0822 19:31:12.327819 32360 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0822 19:33:29.447513 32360 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0822 19:34:50.215313 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55596\nI0822 19:34:50.215636 32360 solver.cpp:404]     Test net output #1: loss = 2.64538 (* 1 = 2.64538 loss)\nI0822 19:34:51.523133 32360 solver.cpp:228] Iteration 57300, loss = 0.000577577\nI0822 19:34:51.523176 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:34:51.523192 32360 solver.cpp:244]     Train net output #1: loss = 0.000577545 (* 1 = 0.000577545 loss)\nI0822 19:34:51.629475 32360 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0822 19:37:08.372555 32360 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0822 19:38:29.152572 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55804\nI0822 19:38:29.152909 32360 solver.cpp:404]     Test net output #1: loss = 2.64899 (* 1 = 2.64899 loss)\nI0822 19:38:30.460708 32360 solver.cpp:228] Iteration 57400, loss = 0.000530199\nI0822 19:38:30.460752 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:38:30.460767 32360 solver.cpp:244]     Train net output #1: loss = 0.000530167 (* 1 = 0.000530167 loss)\nI0822 19:38:30.563237 32360 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0822 19:40:47.200491 32360 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0822 19:42:08.004582 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55592\nI0822 19:42:08.004904 32360 solver.cpp:404]     Test net output #1: loss = 2.64648 (* 1 = 2.64648 loss)\nI0822 19:42:09.313004 32360 solver.cpp:228] Iteration 57500, loss = 0.0006652\nI0822 19:42:09.313046 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:42:09.313061 32360 solver.cpp:244]     Train net output #1: loss = 0.000665168 (* 1 = 0.000665168 loss)\nI0822 19:42:09.426610 32360 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0822 19:44:26.114039 32360 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0822 19:45:46.796377 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5596\nI0822 19:45:46.796707 32360 solver.cpp:404]     Test net output #1: loss = 2.63999 (* 1 = 2.63999 loss)\nI0822 19:45:48.105077 32360 solver.cpp:228] Iteration 57600, loss = 0.000646493\nI0822 19:45:48.105118 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:45:48.105134 32360 solver.cpp:244]     Train net output #1: loss = 0.000646461 (* 1 = 0.000646461 loss)\nI0822 19:45:48.215632 32360 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0822 19:48:04.851974 32360 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0822 19:49:25.529815 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5536\nI0822 19:49:25.530153 32360 solver.cpp:404]     Test net output #1: loss = 2.66873 (* 1 = 2.66873 loss)\nI0822 19:49:26.837810 32360 solver.cpp:228] Iteration 57700, loss = 0.000561003\nI0822 19:49:26.837853 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:49:26.837869 32360 solver.cpp:244]     Train net output #1: loss = 0.000560971 (* 1 = 0.000560971 loss)\nI0822 19:49:26.946485 32360 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0822 19:51:44.114043 32360 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0822 19:53:04.773599 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55676\nI0822 19:53:04.773928 32360 solver.cpp:404]     Test net output #1: loss = 2.65409 (* 1 = 2.65409 loss)\nI0822 19:53:06.082953 32360 solver.cpp:228] Iteration 57800, loss = 0.000579844\nI0822 19:53:06.082998 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:53:06.083014 32360 solver.cpp:244]     Train net output #1: loss = 0.000579812 (* 1 = 0.000579812 loss)\nI0822 19:53:06.187935 32360 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0822 19:55:23.291916 32360 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0822 19:56:43.970649 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55432\nI0822 19:56:43.970984 32360 solver.cpp:404]     Test net output #1: loss = 2.66164 (* 1 = 2.66164 loss)\nI0822 19:56:45.278815 32360 solver.cpp:228] Iteration 57900, loss = 0.000602722\nI0822 19:56:45.278856 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:56:45.278872 32360 solver.cpp:244]     Train net output #1: loss = 0.00060269 (* 1 = 0.00060269 loss)\nI0822 19:56:45.381171 32360 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0822 19:59:02.473469 32360 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0822 20:00:23.109340 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55716\nI0822 20:00:23.109674 32360 solver.cpp:404]     Test net output #1: loss = 2.64996 (* 1 = 2.64996 loss)\nI0822 20:00:24.417218 32360 solver.cpp:228] Iteration 58000, loss = 0.000586525\nI0822 20:00:24.417261 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:00:24.417276 32360 solver.cpp:244]     Train net output #1: loss = 0.000586493 (* 1 = 0.000586493 loss)\nI0822 20:00:24.518193 32360 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0822 20:02:41.226843 32360 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0822 20:04:01.853911 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55284\nI0822 20:04:01.854243 32360 solver.cpp:404]     Test net output #1: loss = 2.67235 (* 1 = 2.67235 loss)\nI0822 20:04:03.161965 32360 solver.cpp:228] Iteration 58100, loss = 0.000540209\nI0822 20:04:03.162009 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:04:03.162024 32360 solver.cpp:244]     Train net output #1: loss = 0.000540177 (* 1 = 0.000540177 loss)\nI0822 20:04:03.268997 32360 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0822 20:06:19.995225 32360 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0822 20:07:40.643250 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55596\nI0822 20:07:40.643592 32360 solver.cpp:404]     Test net output #1: loss = 2.66192 (* 1 = 2.66192 loss)\nI0822 20:07:41.950817 32360 solver.cpp:228] Iteration 58200, loss = 0.000553844\nI0822 20:07:41.950860 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:07:41.950876 32360 solver.cpp:244]     Train net output #1: loss = 0.000553812 (* 1 = 0.000553812 loss)\nI0822 20:07:42.052225 32360 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0822 20:09:58.751770 32360 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0822 20:11:19.389432 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55308\nI0822 20:11:19.389771 32360 solver.cpp:404]     Test net output #1: loss = 2.66627 (* 1 = 2.66627 loss)\nI0822 20:11:20.697999 32360 solver.cpp:228] Iteration 58300, loss = 0.00069599\nI0822 20:11:20.698043 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:11:20.698060 32360 solver.cpp:244]     Train net output #1: loss = 0.000695958 (* 1 = 0.000695958 loss)\nI0822 20:11:20.795246 32360 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0822 20:13:37.911769 32360 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0822 20:14:58.604027 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55604\nI0822 20:14:58.604357 32360 solver.cpp:404]     Test net output #1: loss = 2.658 (* 1 = 2.658 loss)\nI0822 20:14:59.913705 32360 solver.cpp:228] Iteration 58400, loss = 0.000822314\nI0822 20:14:59.913753 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:14:59.913776 32360 solver.cpp:244]     Train net output #1: loss = 0.000822282 (* 1 = 0.000822282 loss)\nI0822 20:15:00.013339 32360 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0822 20:17:16.707875 32360 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0822 20:18:37.491055 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55396\nI0822 20:18:37.491391 32360 solver.cpp:404]     Test net output #1: loss = 2.65711 (* 1 = 2.65711 loss)\nI0822 20:18:38.799587 32360 solver.cpp:228] Iteration 58500, loss = 0.000607668\nI0822 20:18:38.799635 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:18:38.799657 32360 solver.cpp:244]     Train net output #1: loss = 0.000607636 (* 1 = 0.000607636 loss)\nI0822 20:18:38.904064 32360 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0822 20:20:55.700008 32360 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0822 20:22:16.493412 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55812\nI0822 20:22:16.493775 32360 solver.cpp:404]     Test net output #1: loss = 2.63551 (* 1 = 2.63551 loss)\nI0822 20:22:17.802199 32360 solver.cpp:228] Iteration 58600, loss = 0.000575467\nI0822 20:22:17.802247 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:22:17.802275 32360 solver.cpp:244]     Train net output #1: loss = 0.000575435 (* 1 = 0.000575435 loss)\nI0822 20:22:17.908229 32360 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0822 20:24:34.654426 32360 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0822 20:25:55.428473 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55392\nI0822 20:25:55.428812 32360 solver.cpp:404]     Test net output #1: loss = 2.66239 (* 1 = 2.66239 loss)\nI0822 20:25:56.738061 32360 solver.cpp:228] Iteration 58700, loss = 0.000533752\nI0822 20:25:56.738109 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:25:56.738132 32360 solver.cpp:244]     Train net output #1: loss = 0.00053372 (* 1 = 0.00053372 loss)\nI0822 20:25:56.835605 32360 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0822 20:28:13.958103 32360 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0822 20:29:34.730562 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5576\nI0822 20:29:34.730892 32360 solver.cpp:404]     Test net output #1: loss = 2.64465 (* 1 = 2.64465 loss)\nI0822 20:29:36.039674 32360 solver.cpp:228] Iteration 58800, loss = 0.000499804\nI0822 20:29:36.039722 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:29:36.039746 32360 solver.cpp:244]     Train net output #1: loss = 0.000499772 (* 1 = 0.000499772 loss)\nI0822 20:29:36.145469 32360 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0822 20:31:52.845409 32360 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0822 20:33:13.636061 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55344\nI0822 20:33:13.636404 32360 solver.cpp:404]     Test net output #1: loss = 2.65906 (* 1 = 2.65906 loss)\nI0822 20:33:14.944933 32360 solver.cpp:228] Iteration 58900, loss = 0.000472212\nI0822 20:33:14.944981 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:33:14.945005 32360 solver.cpp:244]     Train net output #1: loss = 0.00047218 (* 1 = 0.00047218 loss)\nI0822 20:33:15.054726 32360 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0822 20:35:31.850841 32360 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0822 20:36:52.621789 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55904\nI0822 20:36:52.622129 32360 solver.cpp:404]     Test net output #1: loss = 2.61604 (* 1 = 2.61604 loss)\nI0822 20:36:53.930485 32360 solver.cpp:228] Iteration 59000, loss = 0.000651839\nI0822 20:36:53.930534 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:36:53.930557 32360 solver.cpp:244]     Train net output #1: loss = 0.000651807 (* 1 = 0.000651807 loss)\nI0822 20:36:54.040885 32360 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0822 20:39:11.150527 32360 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0822 20:40:31.914680 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55484\nI0822 20:40:31.915021 32360 solver.cpp:404]     Test net output #1: loss = 2.63713 (* 1 = 2.63713 loss)\nI0822 20:40:33.223137 32360 solver.cpp:228] Iteration 59100, loss = 0.000483534\nI0822 20:40:33.223186 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:40:33.223208 32360 solver.cpp:244]     Train net output #1: loss = 0.000483502 (* 1 = 0.000483502 loss)\nI0822 20:40:33.326539 32360 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0822 20:42:50.403210 32360 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0822 20:44:11.173328 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55812\nI0822 20:44:11.173645 32360 solver.cpp:404]     Test net output #1: loss = 2.6219 (* 1 = 2.6219 loss)\nI0822 20:44:12.482071 32360 solver.cpp:228] Iteration 59200, loss = 0.000518727\nI0822 20:44:12.482118 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:44:12.482141 32360 solver.cpp:244]     Train net output #1: loss = 0.000518695 (* 1 = 0.000518695 loss)\nI0822 20:44:12.581725 32360 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0822 20:46:29.292744 32360 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0822 20:47:50.070632 32360 solver.cpp:404]     Test net output #0: accuracy = 0.554\nI0822 20:47:50.070963 32360 solver.cpp:404]     Test net output #1: loss = 2.63575 (* 1 = 2.63575 loss)\nI0822 20:47:51.378283 32360 solver.cpp:228] Iteration 59300, loss = 0.000523704\nI0822 20:47:51.378329 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:47:51.378351 32360 solver.cpp:244]     Train net output #1: loss = 0.000523672 (* 1 = 0.000523672 loss)\nI0822 20:47:51.485844 32360 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0822 20:50:08.636518 32360 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0822 20:51:29.402386 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55724\nI0822 20:51:29.402777 32360 solver.cpp:404]     Test net output #1: loss = 2.62938 (* 1 = 2.62938 loss)\nI0822 20:51:30.711735 32360 solver.cpp:228] Iteration 59400, loss = 0.0005825\nI0822 20:51:30.711781 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:51:30.711803 32360 solver.cpp:244]     Train net output #1: loss = 0.000582468 (* 1 = 0.000582468 loss)\nI0822 20:51:30.819569 32360 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0822 20:53:47.595765 32360 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0822 20:55:08.365423 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55192\nI0822 20:55:08.365726 32360 solver.cpp:404]     Test net output #1: loss = 2.65458 (* 1 = 2.65458 loss)\nI0822 20:55:09.673396 32360 solver.cpp:228] Iteration 59500, loss = 0.000456098\nI0822 20:55:09.673437 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:55:09.673454 32360 solver.cpp:244]     Train net output #1: loss = 0.000456067 (* 1 = 0.000456067 loss)\nI0822 20:55:09.777433 32360 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0822 20:57:26.842545 32360 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0822 20:58:47.614159 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55644\nI0822 20:58:47.614495 32360 solver.cpp:404]     Test net output #1: loss = 2.63326 (* 1 = 2.63326 loss)\nI0822 20:58:48.923048 32360 solver.cpp:228] Iteration 59600, loss = 0.000735223\nI0822 20:58:48.923094 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:58:48.923108 32360 solver.cpp:244]     Train net output #1: loss = 0.000735191 (* 1 = 0.000735191 loss)\nI0822 20:58:49.026016 32360 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0822 21:01:05.796165 32360 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0822 21:02:26.558585 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55384\nI0822 21:02:26.558938 32360 solver.cpp:404]     Test net output #1: loss = 2.64222 (* 1 = 2.64222 loss)\nI0822 21:02:27.867303 32360 solver.cpp:228] Iteration 59700, loss = 0.000587378\nI0822 21:02:27.867352 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:02:27.867368 32360 solver.cpp:244]     Train net output #1: loss = 0.000587346 (* 1 = 0.000587346 loss)\nI0822 21:02:27.974519 32360 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0822 21:04:44.913931 32360 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0822 21:06:05.681138 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5554\nI0822 21:06:05.681468 32360 solver.cpp:404]     Test net output #1: loss = 2.63743 (* 1 = 2.63743 loss)\nI0822 21:06:06.988991 32360 solver.cpp:228] Iteration 59800, loss = 0.000531877\nI0822 21:06:06.989035 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:06:06.989050 32360 solver.cpp:244]     Train net output #1: loss = 0.000531845 (* 1 = 0.000531845 loss)\nI0822 21:06:07.093302 32360 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0822 21:08:24.061149 32360 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0822 21:09:44.833132 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55308\nI0822 21:09:44.833464 32360 solver.cpp:404]     Test net output #1: loss = 2.64172 (* 1 = 2.64172 loss)\nI0822 21:09:46.141526 32360 solver.cpp:228] Iteration 59900, loss = 0.000568958\nI0822 21:09:46.141569 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:09:46.141585 32360 solver.cpp:244]     Train net output #1: loss = 0.000568926 (* 1 = 0.000568926 loss)\nI0822 21:09:46.249943 32360 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0822 21:12:03.097782 32360 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0822 21:13:24.795871 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55608\nI0822 21:13:24.796176 32360 solver.cpp:404]     Test net output #1: loss = 2.63202 (* 1 = 2.63202 loss)\nI0822 21:13:26.109117 32360 solver.cpp:228] Iteration 60000, loss = 0.000616394\nI0822 21:13:26.109163 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:13:26.109179 32360 solver.cpp:244]     Train net output #1: loss = 0.000616362 (* 1 = 0.000616362 loss)\nI0822 21:13:26.211202 32360 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0822 21:15:43.218317 32360 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0822 21:17:04.904677 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55288\nI0822 21:17:04.904966 32360 solver.cpp:404]     Test net output #1: loss = 2.63647 (* 1 = 2.63647 loss)\nI0822 21:17:06.218129 32360 solver.cpp:228] Iteration 60100, loss = 0.000612983\nI0822 21:17:06.218174 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:17:06.218189 32360 solver.cpp:244]     Train net output #1: loss = 0.000612951 (* 1 = 0.000612951 loss)\nI0822 21:17:06.319958 32360 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0822 21:19:23.400713 32360 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0822 21:20:45.088255 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55544\nI0822 21:20:45.088562 32360 solver.cpp:404]     Test net output #1: loss = 2.63288 (* 1 = 2.63288 loss)\nI0822 21:20:46.402231 32360 solver.cpp:228] Iteration 60200, loss = 0.000606009\nI0822 21:20:46.402276 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:20:46.402292 32360 solver.cpp:244]     Train net output #1: loss = 0.000605977 (* 1 = 0.000605977 loss)\nI0822 21:20:46.508867 32360 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0822 21:23:03.586676 32360 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0822 21:24:25.225467 32360 solver.cpp:404]     Test net output #0: accuracy = 0.552\nI0822 21:24:25.225725 32360 solver.cpp:404]     Test net output #1: loss = 2.64797 (* 1 = 2.64797 loss)\nI0822 21:24:26.537523 32360 solver.cpp:228] Iteration 60300, loss = 0.000534139\nI0822 21:24:26.537569 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:24:26.537585 32360 solver.cpp:244]     Train net output #1: loss = 0.000534107 (* 1 = 0.000534107 loss)\nI0822 21:24:26.644043 32360 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0822 21:26:44.297643 32360 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0822 21:28:05.973083 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5546\nI0822 21:28:05.973390 32360 solver.cpp:404]     Test net output #1: loss = 2.63691 (* 1 = 2.63691 loss)\nI0822 21:28:07.285681 32360 solver.cpp:228] Iteration 60400, loss = 0.000628482\nI0822 21:28:07.285718 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:28:07.285733 32360 solver.cpp:244]     Train net output #1: loss = 0.00062845 (* 1 = 0.00062845 loss)\nI0822 21:28:07.394857 32360 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0822 21:30:24.483597 32360 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0822 21:31:46.217728 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55\nI0822 21:31:46.218020 32360 solver.cpp:404]     Test net output #1: loss = 2.64307 (* 1 = 2.64307 loss)\nI0822 21:31:47.529367 32360 solver.cpp:228] Iteration 60500, loss = 0.000638712\nI0822 21:31:47.529412 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:31:47.529428 32360 solver.cpp:244]     Train net output #1: loss = 0.00063868 (* 1 = 0.00063868 loss)\nI0822 21:31:47.637497 32360 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0822 21:34:05.249699 32360 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0822 21:35:26.989056 32360 solver.cpp:404]     Test net output #0: accuracy = 0.554\nI0822 21:35:26.989325 32360 solver.cpp:404]     Test net output #1: loss = 2.62632 (* 1 = 2.62632 loss)\nI0822 21:35:28.300475 32360 solver.cpp:228] Iteration 60600, loss = 0.000541836\nI0822 21:35:28.300518 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:35:28.300534 32360 solver.cpp:244]     Train net output #1: loss = 0.000541804 (* 1 = 0.000541804 loss)\nI0822 21:35:28.407505 32360 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0822 21:37:45.987421 32360 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0822 21:39:07.724957 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55052\nI0822 21:39:07.725244 32360 solver.cpp:404]     Test net output #1: loss = 2.62915 (* 1 = 2.62915 loss)\nI0822 21:39:09.037386 32360 solver.cpp:228] Iteration 60700, loss = 0.000433401\nI0822 21:39:09.037432 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:39:09.037448 32360 solver.cpp:244]     Train net output #1: loss = 0.000433369 (* 1 = 0.000433369 loss)\nI0822 21:39:09.151098 32360 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0822 21:41:26.707523 32360 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0822 21:42:48.112262 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5558\nI0822 21:42:48.112614 32360 solver.cpp:404]     Test net output #1: loss = 2.60335 (* 1 = 2.60335 loss)\nI0822 21:42:49.425871 32360 solver.cpp:228] Iteration 60800, loss = 0.0005814\nI0822 21:42:49.425915 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:42:49.425932 32360 solver.cpp:244]     Train net output #1: loss = 0.000581368 (* 1 = 0.000581368 loss)\nI0822 21:42:49.523563 32360 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0822 21:45:07.108949 32360 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0822 21:46:28.416157 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5514\nI0822 21:46:28.416476 32360 solver.cpp:404]     Test net output #1: loss = 2.62441 (* 1 = 2.62441 loss)\nI0822 21:46:29.728960 32360 solver.cpp:228] Iteration 60900, loss = 0.000574605\nI0822 21:46:29.729005 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:46:29.729022 32360 solver.cpp:244]     Train net output #1: loss = 0.000574573 (* 1 = 0.000574573 loss)\nI0822 21:46:29.835975 32360 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0822 21:48:47.471923 32360 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0822 21:50:09.167798 32360 solver.cpp:404]     Test net output #0: accuracy = 0.554\nI0822 21:50:09.168118 32360 solver.cpp:404]     Test net output #1: loss = 2.60971 (* 1 = 2.60971 loss)\nI0822 21:50:10.482497 32360 solver.cpp:228] Iteration 61000, loss = 0.000654062\nI0822 21:50:10.482544 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:50:10.482568 32360 solver.cpp:244]     Train net output #1: loss = 0.00065403 (* 1 = 0.00065403 loss)\nI0822 21:50:10.581677 32360 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0822 21:52:27.767663 32360 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0822 21:53:49.510562 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55028\nI0822 21:53:49.510869 32360 solver.cpp:404]     Test net output #1: loss = 2.62573 (* 1 = 2.62573 loss)\nI0822 21:53:50.826134 32360 solver.cpp:228] Iteration 61100, loss = 0.000565481\nI0822 21:53:50.826182 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:53:50.826198 32360 solver.cpp:244]     Train net output #1: loss = 0.000565449 (* 1 = 0.000565449 loss)\nI0822 21:53:50.926957 32360 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0822 21:56:08.080123 32360 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0822 21:57:29.830541 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55332\nI0822 21:57:29.830827 32360 solver.cpp:404]     Test net output #1: loss = 2.60568 (* 1 = 2.60568 loss)\nI0822 21:57:31.144528 32360 solver.cpp:228] Iteration 61200, loss = 0.00066271\nI0822 21:57:31.144578 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:57:31.144594 32360 solver.cpp:244]     Train net output #1: loss = 0.000662678 (* 1 = 0.000662678 loss)\nI0822 21:57:31.243388 32360 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0822 21:59:48.893411 32360 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0822 22:01:10.645450 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55008\nI0822 22:01:10.645750 32360 solver.cpp:404]     Test net output #1: loss = 2.62884 (* 1 = 2.62884 loss)\nI0822 22:01:11.960402 32360 solver.cpp:228] Iteration 61300, loss = 0.000528036\nI0822 22:01:11.960443 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:01:11.960458 32360 solver.cpp:244]     Train net output #1: loss = 0.000528004 (* 1 = 0.000528004 loss)\nI0822 22:01:12.058253 32360 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0822 22:03:29.719367 32360 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0822 22:04:50.419626 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55348\nI0822 22:04:50.419966 32360 solver.cpp:404]     Test net output #1: loss = 2.61521 (* 1 = 2.61521 loss)\nI0822 22:04:51.731382 32360 solver.cpp:228] Iteration 61400, loss = 0.000411328\nI0822 22:04:51.731420 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:04:51.731436 32360 solver.cpp:244]     Train net output #1: loss = 0.000411296 (* 1 = 0.000411296 loss)\nI0822 22:04:51.845229 32360 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0822 22:07:08.980653 32360 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0822 22:08:29.663995 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54964\nI0822 22:08:29.664342 32360 solver.cpp:404]     Test net output #1: loss = 2.62168 (* 1 = 2.62168 loss)\nI0822 22:08:30.976636 32360 solver.cpp:228] Iteration 61500, loss = 0.000545646\nI0822 22:08:30.976671 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:08:30.976686 32360 solver.cpp:244]     Train net output #1: loss = 0.000545614 (* 1 = 0.000545614 loss)\nI0822 22:08:31.080612 32360 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0822 22:10:48.240490 32360 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0822 22:12:09.991611 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55296\nI0822 22:12:09.991948 32360 solver.cpp:404]     Test net output #1: loss = 2.61585 (* 1 = 2.61585 loss)\nI0822 22:12:11.308064 32360 solver.cpp:228] Iteration 61600, loss = 0.000563256\nI0822 22:12:11.308116 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:12:11.308132 32360 solver.cpp:244]     Train net output #1: loss = 0.000563224 (* 1 = 0.000563224 loss)\nI0822 22:12:11.409674 32360 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0822 22:14:28.811903 32360 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0822 22:15:50.572060 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54976\nI0822 22:15:50.572360 32360 solver.cpp:404]     Test net output #1: loss = 2.61834 (* 1 = 2.61834 loss)\nI0822 22:15:51.887607 32360 solver.cpp:228] Iteration 61700, loss = 0.000484987\nI0822 22:15:51.887650 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:15:51.887670 32360 solver.cpp:244]     Train net output #1: loss = 0.000484955 (* 1 = 0.000484955 loss)\nI0822 22:15:51.991638 32360 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0822 22:18:09.980382 32360 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0822 22:19:31.721416 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55352\nI0822 22:19:31.721698 32360 solver.cpp:404]     Test net output #1: loss = 2.59776 (* 1 = 2.59776 loss)\nI0822 22:19:33.036568 32360 solver.cpp:228] Iteration 61800, loss = 0.000455403\nI0822 22:19:33.036622 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:19:33.036638 32360 solver.cpp:244]     Train net output #1: loss = 0.000455371 (* 1 = 0.000455371 loss)\nI0822 22:19:33.135977 32360 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0822 22:21:50.481204 32360 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0822 22:23:12.206784 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54892\nI0822 22:23:12.207141 32360 solver.cpp:404]     Test net output #1: loss = 2.63281 (* 1 = 2.63281 loss)\nI0822 22:23:13.522053 32360 solver.cpp:228] Iteration 61900, loss = 0.000502773\nI0822 22:23:13.522095 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:23:13.522111 32360 solver.cpp:244]     Train net output #1: loss = 0.000502741 (* 1 = 0.000502741 loss)\nI0822 22:23:13.631340 32360 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0822 22:25:31.039964 32360 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0822 22:26:52.807132 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55176\nI0822 22:26:52.807428 32360 solver.cpp:404]     Test net output #1: loss = 2.61865 (* 1 = 2.61865 loss)\nI0822 22:26:54.123055 32360 solver.cpp:228] Iteration 62000, loss = 0.000582605\nI0822 22:26:54.123098 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:26:54.123114 32360 solver.cpp:244]     Train net output #1: loss = 0.000582573 (* 1 = 0.000582573 loss)\nI0822 22:26:54.229580 32360 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0822 22:29:12.198367 32360 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0822 22:30:33.946682 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54792\nI0822 22:30:33.947000 32360 solver.cpp:404]     Test net output #1: loss = 2.63651 (* 1 = 2.63651 loss)\nI0822 22:30:35.263206 32360 solver.cpp:228] Iteration 62100, loss = 0.000498215\nI0822 22:30:35.263249 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:30:35.263265 32360 solver.cpp:244]     Train net output #1: loss = 0.000498183 (* 1 = 0.000498183 loss)\nI0822 22:30:35.368497 32360 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0822 22:32:53.395473 32360 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0822 22:34:15.169383 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55176\nI0822 22:34:15.169651 32360 solver.cpp:404]     Test net output #1: loss = 2.61073 (* 1 = 2.61073 loss)\nI0822 22:34:16.484827 32360 solver.cpp:228] Iteration 62200, loss = 0.00069449\nI0822 22:34:16.484880 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:34:16.484899 32360 solver.cpp:244]     Train net output #1: loss = 0.000694458 (* 1 = 0.000694458 loss)\nI0822 22:34:16.584163 32360 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0822 22:36:34.569325 32360 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0822 22:37:56.349869 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54844\nI0822 22:37:56.350195 32360 solver.cpp:404]     Test net output #1: loss = 2.6194 (* 1 = 2.6194 loss)\nI0822 22:37:57.665844 32360 solver.cpp:228] Iteration 62300, loss = 0.00044437\nI0822 22:37:57.665900 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:37:57.665917 32360 solver.cpp:244]     Train net output #1: loss = 0.000444338 (* 1 = 0.000444338 loss)\nI0822 22:37:57.768301 32360 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0822 22:40:15.786223 32360 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0822 22:41:37.408027 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5526\nI0822 22:41:37.408366 32360 solver.cpp:404]     Test net output #1: loss = 2.6008 (* 1 = 2.6008 loss)\nI0822 22:41:38.723966 32360 solver.cpp:228] Iteration 62400, loss = 0.000473009\nI0822 22:41:38.724011 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:41:38.724026 32360 solver.cpp:244]     Train net output #1: loss = 0.000472977 (* 1 = 0.000472977 loss)\nI0822 22:41:38.826448 32360 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0822 22:43:56.773540 32360 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0822 22:45:18.544095 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54876\nI0822 22:45:18.544454 32360 solver.cpp:404]     Test net output #1: loss = 2.62172 (* 1 = 2.62172 loss)\nI0822 22:45:19.859390 32360 solver.cpp:228] Iteration 62500, loss = 0.000518329\nI0822 22:45:19.859436 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:45:19.859452 32360 solver.cpp:244]     Train net output #1: loss = 0.000518297 (* 1 = 0.000518297 loss)\nI0822 22:45:19.962630 32360 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0822 22:47:37.931110 32360 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0822 22:48:59.718899 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5512\nI0822 22:48:59.719259 32360 solver.cpp:404]     Test net output #1: loss = 2.61286 (* 1 = 2.61286 loss)\nI0822 22:49:01.031080 32360 solver.cpp:228] Iteration 62600, loss = 0.000531911\nI0822 22:49:01.031136 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:49:01.031152 32360 solver.cpp:244]     Train net output #1: loss = 0.000531879 (* 1 = 0.000531879 loss)\nI0822 22:49:01.133873 32360 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0822 22:51:19.178535 32360 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0822 22:52:40.980115 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54648\nI0822 22:52:40.980468 32360 solver.cpp:404]     Test net output #1: loss = 2.64157 (* 1 = 2.64157 loss)\nI0822 22:52:42.292690 32360 solver.cpp:228] Iteration 62700, loss = 0.000460273\nI0822 22:52:42.292743 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:52:42.292759 32360 solver.cpp:244]     Train net output #1: loss = 0.000460241 (* 1 = 0.000460241 loss)\nI0822 22:52:42.394965 32360 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0822 22:55:00.091511 32360 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0822 22:56:21.876299 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5506\nI0822 22:56:21.876699 32360 solver.cpp:404]     Test net output #1: loss = 2.60465 (* 1 = 2.60465 loss)\nI0822 22:56:23.188180 32360 solver.cpp:228] Iteration 62800, loss = 0.000529046\nI0822 22:56:23.188233 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:56:23.188251 32360 solver.cpp:244]     Train net output #1: loss = 0.000529015 (* 1 = 0.000529015 loss)\nI0822 22:56:23.292996 32360 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0822 22:58:41.269989 32360 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0822 23:00:03.056048 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54768\nI0822 23:00:03.056398 32360 solver.cpp:404]     Test net output #1: loss = 2.62339 (* 1 = 2.62339 loss)\nI0822 23:00:04.368325 32360 solver.cpp:228] Iteration 62900, loss = 0.000489091\nI0822 23:00:04.368378 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:00:04.368396 32360 solver.cpp:244]     Train net output #1: loss = 0.000489059 (* 1 = 0.000489059 loss)\nI0822 23:00:04.478128 32360 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0822 23:02:22.422551 32360 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0822 23:03:44.190773 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55248\nI0822 23:03:44.191135 32360 solver.cpp:404]     Test net output #1: loss = 2.5849 (* 1 = 2.5849 loss)\nI0822 23:03:45.503294 32360 solver.cpp:228] Iteration 63000, loss = 0.000472411\nI0822 23:03:45.503350 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:03:45.503366 32360 solver.cpp:244]     Train net output #1: loss = 0.000472379 (* 1 = 0.000472379 loss)\nI0822 23:03:45.608580 32360 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0822 23:06:03.576660 32360 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0822 23:07:25.237534 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54696\nI0822 23:07:25.237833 32360 solver.cpp:404]     Test net output #1: loss = 2.62715 (* 1 = 2.62715 loss)\nI0822 23:07:26.550480 32360 solver.cpp:228] Iteration 63100, loss = 0.000616977\nI0822 23:07:26.550539 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:07:26.550556 32360 solver.cpp:244]     Train net output #1: loss = 0.000616945 (* 1 = 0.000616945 loss)\nI0822 23:07:26.660043 32360 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0822 23:09:44.790138 32360 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0822 23:11:06.523233 32360 solver.cpp:404]     Test net output #0: accuracy = 0.55028\nI0822 23:11:06.523555 32360 solver.cpp:404]     Test net output #1: loss = 2.60317 (* 1 = 2.60317 loss)\nI0822 23:11:07.835470 32360 solver.cpp:228] Iteration 63200, loss = 0.000494575\nI0822 23:11:07.835530 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:11:07.835548 32360 solver.cpp:244]     Train net output #1: loss = 0.000494543 (* 1 = 0.000494543 loss)\nI0822 23:11:07.945549 32360 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0822 23:13:26.286108 32360 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0822 23:14:48.017485 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5478\nI0822 23:14:48.017765 32360 solver.cpp:404]     Test net output #1: loss = 2.61731 (* 1 = 2.61731 loss)\nI0822 23:14:49.329588 32360 solver.cpp:228] Iteration 63300, loss = 0.00051517\nI0822 23:14:49.329644 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:14:49.329661 32360 solver.cpp:244]     Train net output #1: loss = 0.000515138 (* 1 = 0.000515138 loss)\nI0822 23:14:49.436089 32360 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0822 23:17:07.211940 32360 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0822 23:18:28.966879 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54948\nI0822 23:18:28.967170 32360 solver.cpp:404]     Test net output #1: loss = 2.60545 (* 1 = 2.60545 loss)\nI0822 23:18:30.279418 32360 solver.cpp:228] Iteration 63400, loss = 0.000486069\nI0822 23:18:30.279469 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:18:30.279485 32360 solver.cpp:244]     Train net output #1: loss = 0.000486037 (* 1 = 0.000486037 loss)\nI0822 23:18:30.383577 32360 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0822 23:20:48.645309 32360 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0822 23:22:10.383138 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54568\nI0822 23:22:10.383504 32360 solver.cpp:404]     Test net output #1: loss = 2.62872 (* 1 = 2.62872 loss)\nI0822 23:22:11.695425 32360 solver.cpp:228] Iteration 63500, loss = 0.000533299\nI0822 23:22:11.695478 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:22:11.695500 32360 solver.cpp:244]     Train net output #1: loss = 0.000533268 (* 1 = 0.000533268 loss)\nI0822 23:22:11.799335 32360 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0822 23:24:30.115213 32360 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0822 23:25:51.839427 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54948\nI0822 23:25:51.839725 32360 solver.cpp:404]     Test net output #1: loss = 2.60487 (* 1 = 2.60487 loss)\nI0822 23:25:53.151696 32360 solver.cpp:228] Iteration 63600, loss = 0.000438484\nI0822 23:25:53.151749 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:25:53.151767 32360 solver.cpp:244]     Train net output #1: loss = 0.000438452 (* 1 = 0.000438452 loss)\nI0822 23:25:53.253078 32360 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0822 23:28:10.998483 32360 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0822 23:29:32.688768 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54516\nI0822 23:29:32.689041 32360 solver.cpp:404]     Test net output #1: loss = 2.63297 (* 1 = 2.63297 loss)\nI0822 23:29:34.001410 32360 solver.cpp:228] Iteration 63700, loss = 0.000488781\nI0822 23:29:34.001464 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:29:34.001482 32360 solver.cpp:244]     Train net output #1: loss = 0.000488749 (* 1 = 0.000488749 loss)\nI0822 23:29:34.109014 32360 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0822 23:31:51.893440 32360 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0822 23:33:13.188410 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54908\nI0822 23:33:13.188724 32360 solver.cpp:404]     Test net output #1: loss = 2.59761 (* 1 = 2.59761 loss)\nI0822 23:33:14.500630 32360 solver.cpp:228] Iteration 63800, loss = 0.000651458\nI0822 23:33:14.500684 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:33:14.500701 32360 solver.cpp:244]     Train net output #1: loss = 0.000651426 (* 1 = 0.000651426 loss)\nI0822 23:33:14.608465 32360 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0822 23:35:32.900300 32360 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0822 23:36:54.575754 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5452\nI0822 23:36:54.576061 32360 solver.cpp:404]     Test net output #1: loss = 2.62521 (* 1 = 2.62521 loss)\nI0822 23:36:55.888178 32360 solver.cpp:228] Iteration 63900, loss = 0.000458905\nI0822 23:36:55.888232 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:36:55.888249 32360 solver.cpp:244]     Train net output #1: loss = 0.000458873 (* 1 = 0.000458873 loss)\nI0822 23:36:55.999150 32360 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0822 23:39:14.178400 32360 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0822 23:40:35.889914 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54908\nI0822 23:40:35.890228 32360 solver.cpp:404]     Test net output #1: loss = 2.59961 (* 1 = 2.59961 loss)\nI0822 23:40:37.201776 32360 solver.cpp:228] Iteration 64000, loss = 0.000398486\nI0822 23:40:37.201829 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:40:37.201848 32360 solver.cpp:244]     Train net output #1: loss = 0.000398454 (* 1 = 0.000398454 loss)\nI0822 23:40:37.309870 32360 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0822 23:42:55.622882 32360 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0822 23:44:16.900631 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54524\nI0822 23:44:16.900943 32360 solver.cpp:404]     Test net output #1: loss = 2.62569 (* 1 = 2.62569 loss)\nI0822 23:44:18.212513 32360 solver.cpp:228] Iteration 64100, loss = 0.000397951\nI0822 23:44:18.212561 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:44:18.212576 32360 solver.cpp:244]     Train net output #1: loss = 0.000397919 (* 1 = 0.000397919 loss)\nI0822 23:44:18.321501 32360 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0822 23:46:36.056581 32360 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0822 23:47:57.463942 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54884\nI0822 23:47:57.464220 32360 solver.cpp:404]     Test net output #1: loss = 2.59899 (* 1 = 2.59899 loss)\nI0822 23:47:58.776507 32360 solver.cpp:228] Iteration 64200, loss = 0.000494398\nI0822 23:47:58.776548 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:47:58.776564 32360 solver.cpp:244]     Train net output #1: loss = 0.000494366 (* 1 = 0.000494366 loss)\nI0822 23:47:58.883291 32360 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0822 23:50:17.188302 32360 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0822 23:51:38.892735 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54408\nI0822 23:51:38.893043 32360 solver.cpp:404]     Test net output #1: loss = 2.62584 (* 1 = 2.62584 loss)\nI0822 23:51:40.206014 32360 solver.cpp:228] Iteration 64300, loss = 0.000434586\nI0822 23:51:40.206065 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:51:40.206082 32360 solver.cpp:244]     Train net output #1: loss = 0.000434554 (* 1 = 0.000434554 loss)\nI0822 23:51:40.314688 32360 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0822 23:53:57.993008 32360 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0822 23:55:19.751046 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54628\nI0822 23:55:19.751386 32360 solver.cpp:404]     Test net output #1: loss = 2.61552 (* 1 = 2.61552 loss)\nI0822 23:55:21.062912 32360 solver.cpp:228] Iteration 64400, loss = 0.000533879\nI0822 23:55:21.062966 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:55:21.062983 32360 solver.cpp:244]     Train net output #1: loss = 0.000533847 (* 1 = 0.000533847 loss)\nI0822 23:55:21.172003 32360 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0822 23:57:39.315848 32360 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0822 23:59:01.107656 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5426\nI0822 23:59:01.108005 32360 solver.cpp:404]     Test net output #1: loss = 2.64197 (* 1 = 2.64197 loss)\nI0822 23:59:02.420076 32360 solver.cpp:228] Iteration 64500, loss = 0.000526173\nI0822 23:59:02.420130 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:59:02.420147 32360 solver.cpp:244]     Train net output #1: loss = 0.000526141 (* 1 = 0.000526141 loss)\nI0822 23:59:02.532109 32360 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0823 00:01:20.744866 32360 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0823 00:02:42.526157 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54572\nI0823 00:02:42.526535 32360 solver.cpp:404]     Test net output #1: loss = 2.62649 (* 1 = 2.62649 loss)\nI0823 00:02:43.838541 32360 solver.cpp:228] Iteration 64600, loss = 0.000479827\nI0823 00:02:43.838595 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:02:43.838611 32360 solver.cpp:244]     Train net output #1: loss = 0.000479795 (* 1 = 0.000479795 loss)\nI0823 00:02:43.942183 32360 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0823 00:05:02.120126 32360 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0823 00:06:23.896364 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54328\nI0823 00:06:23.896725 32360 solver.cpp:404]     Test net output #1: loss = 2.6407 (* 1 = 2.6407 loss)\nI0823 00:06:25.208580 32360 solver.cpp:228] Iteration 64700, loss = 0.000486132\nI0823 00:06:25.208631 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:06:25.208647 32360 solver.cpp:244]     Train net output #1: loss = 0.000486101 (* 1 = 0.000486101 loss)\nI0823 00:06:25.315187 32360 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0823 00:08:43.089215 32360 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0823 00:10:04.876539 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54672\nI0823 00:10:04.876868 32360 solver.cpp:404]     Test net output #1: loss = 2.61099 (* 1 = 2.61099 loss)\nI0823 00:10:06.189132 32360 solver.cpp:228] Iteration 64800, loss = 0.000501299\nI0823 00:10:06.189180 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:10:06.189196 32360 solver.cpp:244]     Train net output #1: loss = 0.000501267 (* 1 = 0.000501267 loss)\nI0823 00:10:06.292668 32360 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0823 00:12:24.590526 32360 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0823 00:13:46.364742 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5428\nI0823 00:13:46.365097 32360 solver.cpp:404]     Test net output #1: loss = 2.63918 (* 1 = 2.63918 loss)\nI0823 00:13:47.676697 32360 solver.cpp:228] Iteration 64900, loss = 0.000404648\nI0823 00:13:47.676744 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:13:47.676761 32360 solver.cpp:244]     Train net output #1: loss = 0.000404616 (* 1 = 0.000404616 loss)\nI0823 00:13:47.786332 32360 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0823 00:16:05.493046 32360 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0823 00:17:27.280908 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54648\nI0823 00:17:27.281291 32360 solver.cpp:404]     Test net output #1: loss = 2.60873 (* 1 = 2.60873 loss)\nI0823 00:17:28.593536 32360 solver.cpp:228] Iteration 65000, loss = 0.000501379\nI0823 00:17:28.593580 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:17:28.593595 32360 solver.cpp:244]     Train net output #1: loss = 0.000501348 (* 1 = 0.000501348 loss)\nI0823 00:17:28.704211 32360 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0823 00:19:46.980221 32360 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0823 00:21:08.755262 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54252\nI0823 00:21:08.755630 32360 solver.cpp:404]     Test net output #1: loss = 2.63927 (* 1 = 2.63927 loss)\nI0823 00:21:10.067481 32360 solver.cpp:228] Iteration 65100, loss = 0.000412422\nI0823 00:21:10.067522 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:21:10.067536 32360 solver.cpp:244]     Train net output #1: loss = 0.00041239 (* 1 = 0.00041239 loss)\nI0823 00:21:10.179622 32360 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0823 00:23:28.417299 32360 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0823 00:24:50.177364 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54472\nI0823 00:24:50.177743 32360 solver.cpp:404]     Test net output #1: loss = 2.62206 (* 1 = 2.62206 loss)\nI0823 00:24:51.489616 32360 solver.cpp:228] Iteration 65200, loss = 0.000447516\nI0823 00:24:51.489662 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:24:51.489678 32360 solver.cpp:244]     Train net output #1: loss = 0.000447484 (* 1 = 0.000447484 loss)\nI0823 00:24:51.602493 32360 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0823 00:27:09.796018 32360 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0823 00:28:31.542378 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54324\nI0823 00:28:31.542764 32360 solver.cpp:404]     Test net output #1: loss = 2.62396 (* 1 = 2.62396 loss)\nI0823 00:28:32.854799 32360 solver.cpp:228] Iteration 65300, loss = 0.000371599\nI0823 00:28:32.854847 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:28:32.854862 32360 solver.cpp:244]     Train net output #1: loss = 0.000371568 (* 1 = 0.000371568 loss)\nI0823 00:28:32.961495 32360 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0823 00:30:50.665958 32360 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0823 00:32:12.418085 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5458\nI0823 00:32:12.418478 32360 solver.cpp:404]     Test net output #1: loss = 2.61168 (* 1 = 2.61168 loss)\nI0823 00:32:13.730497 32360 solver.cpp:228] Iteration 65400, loss = 0.000470951\nI0823 00:32:13.730546 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:32:13.730561 32360 solver.cpp:244]     Train net output #1: loss = 0.000470919 (* 1 = 0.000470919 loss)\nI0823 00:32:13.841212 32360 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0823 00:34:32.140974 32360 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0823 00:35:53.924883 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54492\nI0823 00:35:53.925264 32360 solver.cpp:404]     Test net output #1: loss = 2.61019 (* 1 = 2.61019 loss)\nI0823 00:35:55.237432 32360 solver.cpp:228] Iteration 65500, loss = 0.000495199\nI0823 00:35:55.237480 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:35:55.237501 32360 solver.cpp:244]     Train net output #1: loss = 0.000495167 (* 1 = 0.000495167 loss)\nI0823 00:35:55.343946 32360 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0823 00:38:13.620239 32360 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0823 00:39:35.391824 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54668\nI0823 00:39:35.392186 32360 solver.cpp:404]     Test net output #1: loss = 2.59849 (* 1 = 2.59849 loss)\nI0823 00:39:36.704715 32360 solver.cpp:228] Iteration 65600, loss = 0.000358527\nI0823 00:39:36.704766 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:39:36.704782 32360 solver.cpp:244]     Train net output #1: loss = 0.000358495 (* 1 = 0.000358495 loss)\nI0823 00:39:36.818106 32360 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0823 00:41:54.692493 32360 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0823 00:43:16.087664 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54332\nI0823 00:43:16.087973 32360 solver.cpp:404]     Test net output #1: loss = 2.62542 (* 1 = 2.62542 loss)\nI0823 00:43:17.400122 32360 solver.cpp:228] Iteration 65700, loss = 0.00046693\nI0823 00:43:17.400172 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:43:17.400189 32360 solver.cpp:244]     Train net output #1: loss = 0.000466898 (* 1 = 0.000466898 loss)\nI0823 00:43:17.501245 32360 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0823 00:45:35.165478 32360 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0823 00:46:56.166774 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54528\nI0823 00:46:56.167058 32360 solver.cpp:404]     Test net output #1: loss = 2.60955 (* 1 = 2.60955 loss)\nI0823 00:46:57.478359 32360 solver.cpp:228] Iteration 65800, loss = 0.000413609\nI0823 00:46:57.478415 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:46:57.478432 32360 solver.cpp:244]     Train net output #1: loss = 0.000413577 (* 1 = 0.000413577 loss)\nI0823 00:46:57.580945 32360 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0823 00:49:15.137961 32360 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0823 00:50:36.116390 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54336\nI0823 00:50:36.116695 32360 solver.cpp:404]     Test net output #1: loss = 2.61925 (* 1 = 2.61925 loss)\nI0823 00:50:37.428650 32360 solver.cpp:228] Iteration 65900, loss = 0.000476275\nI0823 00:50:37.428704 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:50:37.428720 32360 solver.cpp:244]     Train net output #1: loss = 0.000476243 (* 1 = 0.000476243 loss)\nI0823 00:50:37.527915 32360 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0823 00:52:54.504180 32360 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0823 00:54:15.812055 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5466\nI0823 00:54:15.812399 32360 solver.cpp:404]     Test net output #1: loss = 2.59171 (* 1 = 2.59171 loss)\nI0823 00:54:17.124459 32360 solver.cpp:228] Iteration 66000, loss = 0.000405271\nI0823 00:54:17.124516 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:54:17.124534 32360 solver.cpp:244]     Train net output #1: loss = 0.000405239 (* 1 = 0.000405239 loss)\nI0823 00:54:17.226776 32360 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0823 00:56:34.904670 32360 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0823 00:57:56.630544 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54336\nI0823 00:57:56.630844 32360 solver.cpp:404]     Test net output #1: loss = 2.61829 (* 1 = 2.61829 loss)\nI0823 00:57:57.942461 32360 solver.cpp:228] Iteration 66100, loss = 0.000386341\nI0823 00:57:57.942515 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:57:57.942533 32360 solver.cpp:244]     Train net output #1: loss = 0.000386309 (* 1 = 0.000386309 loss)\nI0823 00:57:58.043148 32360 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0823 01:00:15.788952 32360 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0823 01:01:37.512902 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54664\nI0823 01:01:37.513226 32360 solver.cpp:404]     Test net output #1: loss = 2.59452 (* 1 = 2.59452 loss)\nI0823 01:01:38.825017 32360 solver.cpp:228] Iteration 66200, loss = 0.00039942\nI0823 01:01:38.825067 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:01:38.825083 32360 solver.cpp:244]     Train net output #1: loss = 0.000399388 (* 1 = 0.000399388 loss)\nI0823 01:01:38.928017 32360 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0823 01:03:56.052235 32360 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0823 01:05:17.451494 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54348\nI0823 01:05:17.451764 32360 solver.cpp:404]     Test net output #1: loss = 2.61061 (* 1 = 2.61061 loss)\nI0823 01:05:18.763514 32360 solver.cpp:228] Iteration 66300, loss = 0.000427263\nI0823 01:05:18.763566 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:05:18.763581 32360 solver.cpp:244]     Train net output #1: loss = 0.000427231 (* 1 = 0.000427231 loss)\nI0823 01:05:18.865942 32360 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0823 01:07:36.546205 32360 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0823 01:08:57.789309 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54584\nI0823 01:08:57.789614 32360 solver.cpp:404]     Test net output #1: loss = 2.59438 (* 1 = 2.59438 loss)\nI0823 01:08:59.101537 32360 solver.cpp:228] Iteration 66400, loss = 0.000387962\nI0823 01:08:59.101588 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:08:59.101603 32360 solver.cpp:244]     Train net output #1: loss = 0.00038793 (* 1 = 0.00038793 loss)\nI0823 01:08:59.206651 32360 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0823 01:11:17.016806 32360 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0823 01:12:38.262990 32360 solver.cpp:404]     Test net output #0: accuracy = 0.543\nI0823 01:12:38.263279 32360 solver.cpp:404]     Test net output #1: loss = 2.61515 (* 1 = 2.61515 loss)\nI0823 01:12:39.575186 32360 solver.cpp:228] Iteration 66500, loss = 0.000393768\nI0823 01:12:39.575239 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:12:39.575256 32360 solver.cpp:244]     Train net output #1: loss = 0.000393736 (* 1 = 0.000393736 loss)\nI0823 01:12:39.683123 32360 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0823 01:14:56.869516 32360 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0823 01:16:18.272879 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5452\nI0823 01:16:18.273162 32360 solver.cpp:404]     Test net output #1: loss = 2.59317 (* 1 = 2.59317 loss)\nI0823 01:16:19.584995 32360 solver.cpp:228] Iteration 66600, loss = 0.000468769\nI0823 01:16:19.585047 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:16:19.585065 32360 solver.cpp:244]     Train net output #1: loss = 0.000468738 (* 1 = 0.000468738 loss)\nI0823 01:16:19.685595 32360 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0823 01:18:37.427170 32360 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0823 01:19:59.167006 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54084\nI0823 01:19:59.167306 32360 solver.cpp:404]     Test net output #1: loss = 2.62812 (* 1 = 2.62812 loss)\nI0823 01:20:00.479341 32360 solver.cpp:228] Iteration 66700, loss = 0.000428912\nI0823 01:20:00.479393 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:20:00.479409 32360 solver.cpp:244]     Train net output #1: loss = 0.00042888 (* 1 = 0.00042888 loss)\nI0823 01:20:00.581542 32360 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0823 01:22:18.296399 32360 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0823 01:23:39.823457 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54552\nI0823 01:23:39.823765 32360 solver.cpp:404]     Test net output #1: loss = 2.58855 (* 1 = 2.58855 loss)\nI0823 01:23:41.135891 32360 solver.cpp:228] Iteration 66800, loss = 0.000380112\nI0823 01:23:41.135934 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:23:41.135951 32360 solver.cpp:244]     Train net output #1: loss = 0.000380081 (* 1 = 0.000380081 loss)\nI0823 01:23:41.233423 32360 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0823 01:25:58.863847 32360 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0823 01:27:20.313654 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54236\nI0823 01:27:20.313932 32360 solver.cpp:404]     Test net output #1: loss = 2.61659 (* 1 = 2.61659 loss)\nI0823 01:27:21.625744 32360 solver.cpp:228] Iteration 66900, loss = 0.000408039\nI0823 01:27:21.625798 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:27:21.625815 32360 solver.cpp:244]     Train net output #1: loss = 0.000408007 (* 1 = 0.000408007 loss)\nI0823 01:27:21.732630 32360 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0823 01:29:39.438663 32360 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0823 01:31:01.118069 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54456\nI0823 01:31:01.118366 32360 solver.cpp:404]     Test net output #1: loss = 2.59015 (* 1 = 2.59015 loss)\nI0823 01:31:02.431339 32360 solver.cpp:228] Iteration 67000, loss = 0.000365586\nI0823 01:31:02.431390 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:31:02.431406 32360 solver.cpp:244]     Train net output #1: loss = 0.000365555 (* 1 = 0.000365555 loss)\nI0823 01:31:02.532747 32360 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0823 01:33:20.188076 32360 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0823 01:34:41.899021 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54196\nI0823 01:34:41.899293 32360 solver.cpp:404]     Test net output #1: loss = 2.62164 (* 1 = 2.62164 loss)\nI0823 01:34:43.212874 32360 solver.cpp:228] Iteration 67100, loss = 0.00047326\nI0823 01:34:43.212927 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:34:43.212944 32360 solver.cpp:244]     Train net output #1: loss = 0.000473228 (* 1 = 0.000473228 loss)\nI0823 01:34:43.312089 32360 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0823 01:37:00.486902 32360 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0823 01:38:21.954993 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54548\nI0823 01:38:21.955278 32360 solver.cpp:404]     Test net output #1: loss = 2.58597 (* 1 = 2.58597 loss)\nI0823 01:38:23.268319 32360 solver.cpp:228] Iteration 67200, loss = 0.000391059\nI0823 01:38:23.268373 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:38:23.268389 32360 solver.cpp:244]     Train net output #1: loss = 0.000391028 (* 1 = 0.000391028 loss)\nI0823 01:38:23.369978 32360 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0823 01:40:40.584621 32360 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0823 01:42:02.039793 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54332\nI0823 01:42:02.040062 32360 solver.cpp:404]     Test net output #1: loss = 2.59718 (* 1 = 2.59718 loss)\nI0823 01:42:03.352028 32360 solver.cpp:228] Iteration 67300, loss = 0.000375724\nI0823 01:42:03.352072 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:42:03.352087 32360 solver.cpp:244]     Train net output #1: loss = 0.000375692 (* 1 = 0.000375692 loss)\nI0823 01:42:03.454469 32360 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0823 01:44:21.114807 32360 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0823 01:45:42.859971 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54424\nI0823 01:45:42.860244 32360 solver.cpp:404]     Test net output #1: loss = 2.60241 (* 1 = 2.60241 loss)\nI0823 01:45:44.172876 32360 solver.cpp:228] Iteration 67400, loss = 0.000477881\nI0823 01:45:44.172919 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:45:44.172936 32360 solver.cpp:244]     Train net output #1: loss = 0.00047785 (* 1 = 0.00047785 loss)\nI0823 01:45:44.271731 32360 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0823 01:48:02.020813 32360 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0823 01:49:23.714334 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54152\nI0823 01:49:23.714643 32360 solver.cpp:404]     Test net output #1: loss = 2.61829 (* 1 = 2.61829 loss)\nI0823 01:49:25.027735 32360 solver.cpp:228] Iteration 67500, loss = 0.000393932\nI0823 01:49:25.027778 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:49:25.027793 32360 solver.cpp:244]     Train net output #1: loss = 0.0003939 (* 1 = 0.0003939 loss)\nI0823 01:49:25.130517 32360 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0823 01:51:43.441839 32360 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0823 01:53:04.819766 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54336\nI0823 01:53:04.820104 32360 solver.cpp:404]     Test net output #1: loss = 2.60613 (* 1 = 2.60613 loss)\nI0823 01:53:06.135861 32360 solver.cpp:228] Iteration 67600, loss = 0.000435763\nI0823 01:53:06.135906 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:53:06.135921 32360 solver.cpp:244]     Train net output #1: loss = 0.000435731 (* 1 = 0.000435731 loss)\nI0823 01:53:06.237463 32360 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0823 01:55:24.497974 32360 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0823 01:56:45.848877 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54064\nI0823 01:56:45.849155 32360 solver.cpp:404]     Test net output #1: loss = 2.62452 (* 1 = 2.62452 loss)\nI0823 01:56:47.165105 32360 solver.cpp:228] Iteration 67700, loss = 0.000473069\nI0823 01:56:47.165148 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:56:47.165163 32360 solver.cpp:244]     Train net output #1: loss = 0.000473037 (* 1 = 0.000473037 loss)\nI0823 01:56:47.272935 32360 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0823 01:59:04.970201 32360 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0823 02:00:26.230947 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54364\nI0823 02:00:26.231266 32360 solver.cpp:404]     Test net output #1: loss = 2.58924 (* 1 = 2.58924 loss)\nI0823 02:00:27.547152 32360 solver.cpp:228] Iteration 67800, loss = 0.000422343\nI0823 02:00:27.547194 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:00:27.547209 32360 solver.cpp:244]     Train net output #1: loss = 0.000422311 (* 1 = 0.000422311 loss)\nI0823 02:00:27.657616 32360 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0823 02:02:45.887604 32360 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0823 02:04:07.161406 32360 solver.cpp:404]     Test net output #0: accuracy = 0.53936\nI0823 02:04:07.161694 32360 solver.cpp:404]     Test net output #1: loss = 2.63092 (* 1 = 2.63092 loss)\nI0823 02:04:08.476359 32360 solver.cpp:228] Iteration 67900, loss = 0.000376829\nI0823 02:04:08.476402 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:04:08.476416 32360 solver.cpp:244]     Train net output #1: loss = 0.000376798 (* 1 = 0.000376798 loss)\nI0823 02:04:08.579980 32360 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0823 02:06:26.843231 32360 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0823 02:07:48.005735 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54248\nI0823 02:07:48.006003 32360 solver.cpp:404]     Test net output #1: loss = 2.61696 (* 1 = 2.61696 loss)\nI0823 02:07:49.319936 32360 solver.cpp:228] Iteration 68000, loss = 0.000424133\nI0823 02:07:49.319975 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:07:49.319990 32360 solver.cpp:244]     Train net output #1: loss = 0.000424101 (* 1 = 0.000424101 loss)\nI0823 02:07:49.428140 32360 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0823 02:10:07.844775 32360 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0823 02:11:29.017632 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54036\nI0823 02:11:29.017921 32360 solver.cpp:404]     Test net output #1: loss = 2.62876 (* 1 = 2.62876 loss)\nI0823 02:11:30.331981 32360 solver.cpp:228] Iteration 68100, loss = 0.000403633\nI0823 02:11:30.332020 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:11:30.332033 32360 solver.cpp:244]     Train net output #1: loss = 0.000403601 (* 1 = 0.000403601 loss)\nI0823 02:11:30.435433 32360 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0823 02:13:48.657021 32360 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0823 02:15:09.833950 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54228\nI0823 02:15:09.834262 32360 solver.cpp:404]     Test net output #1: loss = 2.61347 (* 1 = 2.61347 loss)\nI0823 02:15:11.147955 32360 solver.cpp:228] Iteration 68200, loss = 0.000384204\nI0823 02:15:11.147994 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:15:11.148010 32360 solver.cpp:244]     Train net output #1: loss = 0.000384173 (* 1 = 0.000384173 loss)\nI0823 02:15:11.259507 32360 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0823 02:17:29.680918 32360 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0823 02:18:51.050016 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54036\nI0823 02:18:51.050307 32360 solver.cpp:404]     Test net output #1: loss = 2.62775 (* 1 = 2.62775 loss)\nI0823 02:18:52.363443 32360 solver.cpp:228] Iteration 68300, loss = 0.000438551\nI0823 02:18:52.363485 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:18:52.363500 32360 solver.cpp:244]     Train net output #1: loss = 0.00043852 (* 1 = 0.00043852 loss)\nI0823 02:18:52.464679 32360 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0823 02:21:10.285815 32360 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0823 02:22:31.975242 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54168\nI0823 02:22:31.975566 32360 solver.cpp:404]     Test net output #1: loss = 2.6069 (* 1 = 2.6069 loss)\nI0823 02:22:33.287096 32360 solver.cpp:228] Iteration 68400, loss = 0.000458517\nI0823 02:22:33.287134 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:22:33.287150 32360 solver.cpp:244]     Train net output #1: loss = 0.000458485 (* 1 = 0.000458485 loss)\nI0823 02:22:33.382539 32360 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0823 02:24:50.911145 32360 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0823 02:26:12.521049 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5406\nI0823 02:26:12.521334 32360 solver.cpp:404]     Test net output #1: loss = 2.61558 (* 1 = 2.61558 loss)\nI0823 02:26:13.833060 32360 solver.cpp:228] Iteration 68500, loss = 0.000490108\nI0823 02:26:13.833096 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:26:13.833112 32360 solver.cpp:244]     Train net output #1: loss = 0.000490076 (* 1 = 0.000490076 loss)\nI0823 02:26:13.935914 32360 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0823 02:28:31.673367 32360 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0823 02:29:53.402729 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54304\nI0823 02:29:53.403031 32360 solver.cpp:404]     Test net output #1: loss = 2.59298 (* 1 = 2.59298 loss)\nI0823 02:29:54.714824 32360 solver.cpp:228] Iteration 68600, loss = 0.00037805\nI0823 02:29:54.714860 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:29:54.714876 32360 solver.cpp:244]     Train net output #1: loss = 0.000378018 (* 1 = 0.000378018 loss)\nI0823 02:29:54.821039 32360 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0823 02:32:12.428257 32360 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0823 02:33:34.052104 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54048\nI0823 02:33:34.052398 32360 solver.cpp:404]     Test net output #1: loss = 2.60948 (* 1 = 2.60948 loss)\nI0823 02:33:35.364346 32360 solver.cpp:228] Iteration 68700, loss = 0.000363887\nI0823 02:33:35.364384 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:33:35.364399 32360 solver.cpp:244]     Train net output #1: loss = 0.000363855 (* 1 = 0.000363855 loss)\nI0823 02:33:35.467499 32360 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0823 02:35:53.263140 32360 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0823 02:37:15.031141 32360 solver.cpp:404]     Test net output #0: accuracy = 0.543\nI0823 02:37:15.031507 32360 solver.cpp:404]     Test net output #1: loss = 2.60445 (* 1 = 2.60445 loss)\nI0823 02:37:16.342701 32360 solver.cpp:228] Iteration 68800, loss = 0.000406345\nI0823 02:37:16.342738 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:37:16.342753 32360 solver.cpp:244]     Train net output #1: loss = 0.000406313 (* 1 = 0.000406313 loss)\nI0823 02:37:16.451324 32360 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0823 02:39:34.093883 32360 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0823 02:40:55.868212 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54048\nI0823 02:40:55.868576 32360 solver.cpp:404]     Test net output #1: loss = 2.61248 (* 1 = 2.61248 loss)\nI0823 02:40:57.179941 32360 solver.cpp:228] Iteration 68900, loss = 0.000359556\nI0823 02:40:57.179983 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:40:57.179998 32360 solver.cpp:244]     Train net output #1: loss = 0.000359524 (* 1 = 0.000359524 loss)\nI0823 02:40:57.282613 32360 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0823 02:43:14.855700 32360 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0823 02:44:36.632668 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54332\nI0823 02:44:36.633018 32360 solver.cpp:404]     Test net output #1: loss = 2.59165 (* 1 = 2.59165 loss)\nI0823 02:44:37.945509 32360 solver.cpp:228] Iteration 69000, loss = 0.000356348\nI0823 02:44:37.945551 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:44:37.945567 32360 solver.cpp:244]     Train net output #1: loss = 0.000356317 (* 1 = 0.000356317 loss)\nI0823 02:44:38.046401 32360 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0823 02:46:55.692560 32360 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0823 02:48:17.452280 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5382\nI0823 02:48:17.452648 32360 solver.cpp:404]     Test net output #1: loss = 2.63162 (* 1 = 2.63162 loss)\nI0823 02:48:18.764706 32360 solver.cpp:228] Iteration 69100, loss = 0.000341777\nI0823 02:48:18.764746 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:48:18.764762 32360 solver.cpp:244]     Train net output #1: loss = 0.000341745 (* 1 = 0.000341745 loss)\nI0823 02:48:18.861130 32360 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0823 02:50:36.483237 32360 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0823 02:51:58.253163 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54308\nI0823 02:51:58.253540 32360 solver.cpp:404]     Test net output #1: loss = 2.58225 (* 1 = 2.58225 loss)\nI0823 02:51:59.566309 32360 solver.cpp:228] Iteration 69200, loss = 0.000449218\nI0823 02:51:59.566351 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:51:59.566367 32360 solver.cpp:244]     Train net output #1: loss = 0.000449186 (* 1 = 0.000449186 loss)\nI0823 02:51:59.674043 32360 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0823 02:54:17.395961 32360 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0823 02:55:39.175124 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54008\nI0823 02:55:39.175494 32360 solver.cpp:404]     Test net output #1: loss = 2.60607 (* 1 = 2.60607 loss)\nI0823 02:55:40.487601 32360 solver.cpp:228] Iteration 69300, loss = 0.000460559\nI0823 02:55:40.487643 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:55:40.487659 32360 solver.cpp:244]     Train net output #1: loss = 0.000460527 (* 1 = 0.000460527 loss)\nI0823 02:55:40.590914 32360 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0823 02:57:57.728513 32360 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0823 02:59:19.495376 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5434\nI0823 02:59:19.495721 32360 solver.cpp:404]     Test net output #1: loss = 2.57202 (* 1 = 2.57202 loss)\nI0823 02:59:20.807853 32360 solver.cpp:228] Iteration 69400, loss = 0.000373172\nI0823 02:59:20.807896 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:59:20.807912 32360 solver.cpp:244]     Train net output #1: loss = 0.00037314 (* 1 = 0.00037314 loss)\nI0823 02:59:20.916306 32360 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0823 03:01:38.621340 32360 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0823 03:03:00.390830 32360 solver.cpp:404]     Test net output #0: accuracy = 0.542\nI0823 03:03:00.391198 32360 solver.cpp:404]     Test net output #1: loss = 2.58762 (* 1 = 2.58762 loss)\nI0823 03:03:01.704497 32360 solver.cpp:228] Iteration 69500, loss = 0.000438599\nI0823 03:03:01.704538 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:03:01.704555 32360 solver.cpp:244]     Train net output #1: loss = 0.000438568 (* 1 = 0.000438568 loss)\nI0823 03:03:01.803896 32360 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0823 03:05:18.907573 32360 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0823 03:06:40.686277 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54276\nI0823 03:06:40.686635 32360 solver.cpp:404]     Test net output #1: loss = 2.58219 (* 1 = 2.58219 loss)\nI0823 03:06:41.999336 32360 solver.cpp:228] Iteration 69600, loss = 0.000433101\nI0823 03:06:41.999379 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:06:41.999397 32360 solver.cpp:244]     Train net output #1: loss = 0.00043307 (* 1 = 0.00043307 loss)\nI0823 03:06:42.103953 32360 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0823 03:08:59.721158 32360 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0823 03:10:21.495744 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54028\nI0823 03:10:21.496090 32360 solver.cpp:404]     Test net output #1: loss = 2.60646 (* 1 = 2.60646 loss)\nI0823 03:10:22.808037 32360 solver.cpp:228] Iteration 69700, loss = 0.000392038\nI0823 03:10:22.808079 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:10:22.808094 32360 solver.cpp:244]     Train net output #1: loss = 0.000392007 (* 1 = 0.000392007 loss)\nI0823 03:10:22.916067 32360 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0823 03:12:40.551919 32360 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0823 03:14:02.325012 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54356\nI0823 03:14:02.325389 32360 solver.cpp:404]     Test net output #1: loss = 2.57616 (* 1 = 2.57616 loss)\nI0823 03:14:03.637413 32360 solver.cpp:228] Iteration 69800, loss = 0.000367232\nI0823 03:14:03.637459 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:14:03.637475 32360 solver.cpp:244]     Train net output #1: loss = 0.0003672 (* 1 = 0.0003672 loss)\nI0823 03:14:03.738517 32360 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0823 03:16:21.380720 32360 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0823 03:17:43.154994 32360 solver.cpp:404]     Test net output #0: accuracy = 0.54088\nI0823 03:17:43.155369 32360 solver.cpp:404]     Test net output #1: loss = 2.59758 (* 1 = 2.59758 loss)\nI0823 03:17:44.467746 32360 solver.cpp:228] Iteration 69900, loss = 0.00032276\nI0823 03:17:44.467784 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:17:44.467799 32360 solver.cpp:244]     Train net output #1: loss = 0.000322729 (* 1 = 0.000322729 loss)\nI0823 03:17:44.578491 32360 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0823 03:20:01.744385 32360 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0823 03:21:23.537930 32360 solver.cpp:404]     Test net output #0: accuracy = 0.542\nI0823 03:21:23.538280 32360 solver.cpp:404]     Test net output #1: loss = 2.5863 (* 1 = 2.5863 loss)\nI0823 03:21:24.850555 32360 solver.cpp:228] Iteration 70000, loss = 0.000390357\nI0823 03:21:24.850595 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:21:24.850610 32360 solver.cpp:244]     Train net output #1: loss = 0.000390325 (* 1 = 0.000390325 loss)\nI0823 03:21:24.950374 32360 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0823 03:21:24.950398 32360 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0823 03:23:42.062695 32360 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0823 03:25:03.856001 32360 solver.cpp:404]     Test net output #0: accuracy = 0.5622\nI0823 03:25:03.856377 32360 solver.cpp:404]     Test net output #1: loss = 2.44636 (* 1 = 2.44636 loss)\nI0823 03:25:05.168617 32360 solver.cpp:228] Iteration 70100, loss = 0.000481868\nI0823 03:25:05.168658 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:25:05.168673 32360 solver.cpp:244]     Train net output #1: loss = 0.000481836 (* 1 = 0.000481836 loss)\nI0823 03:25:05.272137 32360 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0823 03:27:22.347825 32360 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0823 03:28:44.125358 32360 solver.cpp:404]     Test net output #0: accuracy = 0.588\nI0823 03:28:44.125721 32360 solver.cpp:404]     Test net output #1: loss = 2.27545 (* 1 = 2.27545 loss)\nI0823 03:28:45.437496 32360 solver.cpp:228] Iteration 70200, loss = 0.000391583\nI0823 03:28:45.437538 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:28:45.437553 32360 solver.cpp:244]     Train net output #1: loss = 0.000391551 (* 1 = 0.000391551 loss)\nI0823 03:28:45.545161 32360 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0823 03:31:03.124907 32360 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0823 03:32:24.905172 32360 solver.cpp:404]     Test net output #0: accuracy = 0.60652\nI0823 03:32:24.905550 32360 solver.cpp:404]     Test net output #1: loss = 2.16872 (* 1 = 2.16872 loss)\nI0823 03:32:26.217547 32360 solver.cpp:228] Iteration 70300, loss = 0.000383441\nI0823 03:32:26.217586 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:32:26.217602 32360 solver.cpp:244]     Train net output #1: loss = 0.000383409 (* 1 = 0.000383409 loss)\nI0823 03:32:26.329272 32360 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0823 03:34:44.012246 32360 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0823 03:36:05.816149 32360 solver.cpp:404]     Test net output #0: accuracy = 0.62632\nI0823 03:36:05.816512 32360 solver.cpp:404]     Test net output #1: loss = 2.05107 (* 1 = 2.05107 loss)\nI0823 03:36:07.128660 32360 solver.cpp:228] Iteration 70400, loss = 0.00038561\nI0823 03:36:07.128702 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:36:07.128718 32360 solver.cpp:244]     Train net output #1: loss = 0.000385579 (* 1 = 0.000385579 loss)\nI0823 03:36:07.235563 32360 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0823 03:38:24.960602 32360 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0823 03:39:46.765991 32360 solver.cpp:404]     Test net output #0: accuracy = 0.63692\nI0823 03:39:46.766372 32360 solver.cpp:404]     Test net output #1: loss = 1.98637 (* 1 = 1.98637 loss)\nI0823 03:39:48.079668 32360 solver.cpp:228] Iteration 70500, loss = 0.000340272\nI0823 03:39:48.079715 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:39:48.079730 32360 solver.cpp:244]     Train net output #1: loss = 0.00034024 (* 1 = 0.00034024 loss)\nI0823 03:39:48.183675 32360 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0823 03:42:05.919301 32360 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0823 03:43:27.710005 32360 solver.cpp:404]     Test net output #0: accuracy = 0.65436\nI0823 03:43:27.710382 32360 solver.cpp:404]     Test net output #1: loss = 1.90813 (* 1 = 1.90813 loss)\nI0823 03:43:29.022181 32360 solver.cpp:228] Iteration 70600, loss = 0.000361\nI0823 03:43:29.022225 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:43:29.022240 32360 solver.cpp:244]     Train net output #1: loss = 0.000360969 (* 1 = 0.000360969 loss)\nI0823 03:43:29.126648 32360 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0823 03:45:46.746852 32360 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0823 03:47:08.533210 32360 solver.cpp:404]     Test net output #0: accuracy = 0.6592\nI0823 03:47:08.533583 32360 solver.cpp:404]     Test net output #1: loss = 1.8716 (* 1 = 1.8716 loss)\nI0823 03:47:09.846761 32360 solver.cpp:228] Iteration 70700, loss = 0.00037015\nI0823 03:47:09.846802 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:47:09.846819 32360 solver.cpp:244]     Train net output #1: loss = 0.000370118 (* 1 = 0.000370118 loss)\nI0823 03:47:09.949995 32360 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0823 03:49:27.795830 32360 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0823 03:50:49.580415 32360 solver.cpp:404]     Test net output #0: accuracy = 0.67176\nI0823 03:50:49.580772 32360 solver.cpp:404]     Test net output #1: loss = 1.81837 (* 1 = 1.81837 loss)\nI0823 03:50:50.892345 32360 solver.cpp:228] Iteration 70800, loss = 0.00032987\nI0823 03:50:50.892390 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:50:50.892405 32360 solver.cpp:244]     Train net output #1: loss = 0.000329838 (* 1 = 0.000329838 loss)\nI0823 03:50:50.999922 32360 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0823 03:53:09.419046 32360 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0823 03:54:31.201668 32360 solver.cpp:404]     Test net output #0: accuracy = 0.67564\nI0823 03:54:31.202049 32360 solver.cpp:404]     Test net output #1: loss = 1.79867 (* 1 = 1.79867 loss)\nI0823 03:54:32.513962 32360 solver.cpp:228] Iteration 70900, loss = 0.000430045\nI0823 03:54:32.514005 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:54:32.514021 32360 solver.cpp:244]     Train net output #1: loss = 0.000430013 (* 1 = 0.000430013 loss)\nI0823 03:54:32.622984 32360 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0823 03:56:50.889235 32360 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0823 03:58:12.693148 32360 solver.cpp:404]     Test net output #0: accuracy = 0.6846\nI0823 03:58:12.693529 32360 solver.cpp:404]     Test net output #1: loss = 1.76284 (* 1 = 1.76284 loss)\nI0823 03:58:14.005651 32360 solver.cpp:228] Iteration 71000, loss = 0.000301798\nI0823 03:58:14.005692 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:58:14.005708 32360 solver.cpp:244]     Train net output #1: loss = 0.000301766 (* 1 = 0.000301766 loss)\nI0823 03:58:14.113029 32360 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0823 04:00:32.485914 32360 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0823 04:01:54.273986 32360 solver.cpp:404]     Test net output #0: accuracy = 0.68688\nI0823 04:01:54.274361 32360 solver.cpp:404]     Test net output #1: loss = 1.75246 (* 1 = 1.75246 loss)\nI0823 04:01:55.586191 32360 solver.cpp:228] Iteration 71100, loss = 0.00038669\nI0823 04:01:55.586235 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:01:55.586249 32360 solver.cpp:244]     Train net output #1: loss = 0.000386658 (* 1 = 0.000386658 loss)\nI0823 04:01:55.699723 32360 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0823 04:04:14.042939 32360 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0823 04:05:35.839118 32360 solver.cpp:404]     Test net output #0: accuracy = 0.69412\nI0823 04:05:35.839483 32360 solver.cpp:404]     Test net output #1: loss = 1.7265 (* 1 = 1.7265 loss)\nI0823 04:05:37.150887 32360 solver.cpp:228] Iteration 71200, loss = 0.000391701\nI0823 04:05:37.150933 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:05:37.150949 32360 solver.cpp:244]     Train net output #1: loss = 0.000391669 (* 1 = 0.000391669 loss)\nI0823 04:05:37.260252 32360 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0823 04:07:55.521476 32360 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0823 04:09:17.308634 32360 solver.cpp:404]     Test net output #0: accuracy = 0.69424\nI0823 04:09:17.309054 32360 solver.cpp:404]     Test net output #1: loss = 1.72444 (* 1 = 1.72444 loss)\nI0823 04:09:18.621217 32360 solver.cpp:228] Iteration 71300, loss = 0.000416308\nI0823 04:09:18.621256 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:09:18.621271 32360 solver.cpp:244]     Train net output #1: loss = 0.000416276 (* 1 = 0.000416276 loss)\nI0823 04:09:18.729001 32360 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0823 04:11:36.992348 32360 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0823 04:12:58.791821 32360 solver.cpp:404]     Test net output #0: accuracy = 0.69912\nI0823 04:12:58.792207 32360 solver.cpp:404]     Test net output #1: loss = 1.70586 (* 1 = 1.70586 loss)\nI0823 04:13:00.104851 32360 solver.cpp:228] Iteration 71400, loss = 0.000397284\nI0823 04:13:00.104890 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:13:00.104905 32360 solver.cpp:244]     Train net output #1: loss = 0.000397252 (* 1 = 0.000397252 loss)\nI0823 04:13:00.211241 32360 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0823 04:15:18.064591 32360 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0823 04:16:39.963421 32360 solver.cpp:404]     Test net output #0: accuracy = 0.69824\nI0823 04:16:39.963781 32360 solver.cpp:404]     Test net output #1: loss = 1.70779 (* 1 = 1.70779 loss)\nI0823 04:16:41.277102 32360 solver.cpp:228] Iteration 71500, loss = 0.000321069\nI0823 04:16:41.277143 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:16:41.277159 32360 solver.cpp:244]     Train net output #1: loss = 0.000321037 (* 1 = 0.000321037 loss)\nI0823 04:16:41.386840 32360 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0823 04:18:59.306046 32360 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0823 04:20:21.078928 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70364\nI0823 04:20:21.079308 32360 solver.cpp:404]     Test net output #1: loss = 1.69335 (* 1 = 1.69335 loss)\nI0823 04:20:22.391497 32360 solver.cpp:228] Iteration 71600, loss = 0.000318109\nI0823 04:20:22.391538 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:20:22.391554 32360 solver.cpp:244]     Train net output #1: loss = 0.000318077 (* 1 = 0.000318077 loss)\nI0823 04:20:22.499989 32360 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0823 04:22:40.842236 32360 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0823 04:24:02.737951 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70172\nI0823 04:24:02.738343 32360 solver.cpp:404]     Test net output #1: loss = 1.6991 (* 1 = 1.6991 loss)\nI0823 04:24:04.051939 32360 solver.cpp:228] Iteration 71700, loss = 0.000464232\nI0823 04:24:04.051980 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:24:04.051995 32360 solver.cpp:244]     Train net output #1: loss = 0.000464201 (* 1 = 0.000464201 loss)\nI0823 04:24:04.155848 32360 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0823 04:26:22.543170 32360 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0823 04:27:44.367988 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70632\nI0823 04:27:44.368350 32360 solver.cpp:404]     Test net output #1: loss = 1.68818 (* 1 = 1.68818 loss)\nI0823 04:27:45.681596 32360 solver.cpp:228] Iteration 71800, loss = 0.000411238\nI0823 04:27:45.681635 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:27:45.681651 32360 solver.cpp:244]     Train net output #1: loss = 0.000411206 (* 1 = 0.000411206 loss)\nI0823 04:27:45.792618 32360 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0823 04:30:04.111202 32360 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0823 04:31:25.888638 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70404\nI0823 04:31:25.889034 32360 solver.cpp:404]     Test net output #1: loss = 1.69478 (* 1 = 1.69478 loss)\nI0823 04:31:27.200709 32360 solver.cpp:228] Iteration 71900, loss = 0.000429869\nI0823 04:31:27.200752 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:31:27.200767 32360 solver.cpp:244]     Train net output #1: loss = 0.000429837 (* 1 = 0.000429837 loss)\nI0823 04:31:27.306972 32360 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0823 04:33:45.543370 32360 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0823 04:35:07.371315 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70752\nI0823 04:35:07.371697 32360 solver.cpp:404]     Test net output #1: loss = 1.68501 (* 1 = 1.68501 loss)\nI0823 04:35:08.684394 32360 solver.cpp:228] Iteration 72000, loss = 0.000432865\nI0823 04:35:08.684435 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:35:08.684449 32360 solver.cpp:244]     Train net output #1: loss = 0.000432833 (* 1 = 0.000432833 loss)\nI0823 04:35:08.797511 32360 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0823 04:37:27.040164 32360 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0823 04:38:48.878607 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70624\nI0823 04:38:48.878994 32360 solver.cpp:404]     Test net output #1: loss = 1.69276 (* 1 = 1.69276 loss)\nI0823 04:38:50.190831 32360 solver.cpp:228] Iteration 72100, loss = 0.000435685\nI0823 04:38:50.190871 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:38:50.190886 32360 solver.cpp:244]     Train net output #1: loss = 0.000435653 (* 1 = 0.000435653 loss)\nI0823 04:38:50.299340 32360 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0823 04:41:08.622669 32360 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0823 04:42:30.408113 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70824\nI0823 04:42:30.408485 32360 solver.cpp:404]     Test net output #1: loss = 1.68394 (* 1 = 1.68394 loss)\nI0823 04:42:31.720495 32360 solver.cpp:228] Iteration 72200, loss = 0.000447831\nI0823 04:42:31.720536 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:42:31.720552 32360 solver.cpp:244]     Train net output #1: loss = 0.000447799 (* 1 = 0.000447799 loss)\nI0823 04:42:31.832501 32360 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0823 04:44:50.048020 32360 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0823 04:46:11.817807 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70628\nI0823 04:46:11.818188 32360 solver.cpp:404]     Test net output #1: loss = 1.69245 (* 1 = 1.69245 loss)\nI0823 04:46:13.129482 32360 solver.cpp:228] Iteration 72300, loss = 0.000348639\nI0823 04:46:13.129520 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:46:13.129535 32360 solver.cpp:244]     Train net output #1: loss = 0.000348607 (* 1 = 0.000348607 loss)\nI0823 04:46:13.236337 32360 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0823 04:48:31.548144 32360 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0823 04:49:53.294651 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70924\nI0823 04:49:53.295048 32360 solver.cpp:404]     Test net output #1: loss = 1.68457 (* 1 = 1.68457 loss)\nI0823 04:49:54.608232 32360 solver.cpp:228] Iteration 72400, loss = 0.000310096\nI0823 04:49:54.608268 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:49:54.608284 32360 solver.cpp:244]     Train net output #1: loss = 0.000310064 (* 1 = 0.000310064 loss)\nI0823 04:49:54.714361 32360 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0823 04:52:12.973429 32360 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0823 04:53:34.713858 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70832\nI0823 04:53:34.714221 32360 solver.cpp:404]     Test net output #1: loss = 1.69339 (* 1 = 1.69339 loss)\nI0823 04:53:36.027199 32360 solver.cpp:228] Iteration 72500, loss = 0.000354896\nI0823 04:53:36.027238 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:53:36.027253 32360 solver.cpp:244]     Train net output #1: loss = 0.000354864 (* 1 = 0.000354864 loss)\nI0823 04:53:36.133226 32360 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0823 04:55:54.510114 32360 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0823 04:57:16.259742 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71004\nI0823 04:57:16.260104 32360 solver.cpp:404]     Test net output #1: loss = 1.68514 (* 1 = 1.68514 loss)\nI0823 04:57:17.573220 32360 solver.cpp:228] Iteration 72600, loss = 0.00038574\nI0823 04:57:17.573261 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:57:17.573276 32360 solver.cpp:244]     Train net output #1: loss = 0.000385708 (* 1 = 0.000385708 loss)\nI0823 04:57:17.680270 32360 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0823 04:59:36.023296 32360 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0823 05:00:57.797332 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70892\nI0823 05:00:57.797715 32360 solver.cpp:404]     Test net output #1: loss = 1.69474 (* 1 = 1.69474 loss)\nI0823 05:00:59.111593 32360 solver.cpp:228] Iteration 72700, loss = 0.000428755\nI0823 05:00:59.111630 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:00:59.111646 32360 solver.cpp:244]     Train net output #1: loss = 0.000428723 (* 1 = 0.000428723 loss)\nI0823 05:00:59.223670 32360 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0823 05:03:17.078367 32360 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0823 05:04:38.837538 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71104\nI0823 05:04:38.837934 32360 solver.cpp:404]     Test net output #1: loss = 1.68691 (* 1 = 1.68691 loss)\nI0823 05:04:40.150945 32360 solver.cpp:228] Iteration 72800, loss = 0.000363731\nI0823 05:04:40.150986 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:04:40.151001 32360 solver.cpp:244]     Train net output #1: loss = 0.000363699 (* 1 = 0.000363699 loss)\nI0823 05:04:40.255265 32360 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0823 05:06:58.515633 32360 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0823 05:08:20.202559 32360 solver.cpp:404]     Test net output #0: accuracy = 0.70932\nI0823 05:08:20.202929 32360 solver.cpp:404]     Test net output #1: loss = 1.69613 (* 1 = 1.69613 loss)\nI0823 05:08:21.514771 32360 solver.cpp:228] Iteration 72900, loss = 0.000368901\nI0823 05:08:21.514809 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:08:21.514823 32360 solver.cpp:244]     Train net output #1: loss = 0.000368869 (* 1 = 0.000368869 loss)\nI0823 05:08:21.623872 32360 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0823 05:10:39.893115 32360 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0823 05:12:01.537248 32360 solver.cpp:404]     Test net output #0: accuracy = 0.711\nI0823 05:12:01.537531 32360 solver.cpp:404]     Test net output #1: loss = 1.68809 (* 1 = 1.68809 loss)\nI0823 05:12:02.850375 32360 solver.cpp:228] Iteration 73000, loss = 0.000493218\nI0823 05:12:02.850415 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:12:02.850430 32360 solver.cpp:244]     Train net output #1: loss = 0.000493186 (* 1 = 0.000493186 loss)\nI0823 05:12:02.957180 32360 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0823 05:14:21.375181 32360 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0823 05:15:43.104912 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71\nI0823 05:15:43.105206 32360 solver.cpp:404]     Test net output #1: loss = 1.69754 (* 1 = 1.69754 loss)\nI0823 05:15:44.417995 32360 solver.cpp:228] Iteration 73100, loss = 0.000396807\nI0823 05:15:44.418038 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:15:44.418053 32360 solver.cpp:244]     Train net output #1: loss = 0.000396775 (* 1 = 0.000396775 loss)\nI0823 05:15:44.526259 32360 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0823 05:18:02.699694 32360 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0823 05:19:24.392020 32360 solver.cpp:404]     Test net output #0: accuracy = 0.711\nI0823 05:19:24.392310 32360 solver.cpp:404]     Test net output #1: loss = 1.68934 (* 1 = 1.68934 loss)\nI0823 05:19:25.703963 32360 solver.cpp:228] Iteration 73200, loss = 0.000427603\nI0823 05:19:25.704007 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:19:25.704022 32360 solver.cpp:244]     Train net output #1: loss = 0.000427571 (* 1 = 0.000427571 loss)\nI0823 05:19:25.816424 32360 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0823 05:21:44.049240 32360 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0823 05:23:05.741955 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71056\nI0823 05:23:05.742307 32360 solver.cpp:404]     Test net output #1: loss = 1.69929 (* 1 = 1.69929 loss)\nI0823 05:23:07.053747 32360 solver.cpp:228] Iteration 73300, loss = 0.000397901\nI0823 05:23:07.053793 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:23:07.053810 32360 solver.cpp:244]     Train net output #1: loss = 0.000397869 (* 1 = 0.000397869 loss)\nI0823 05:23:07.164026 32360 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0823 05:25:25.351253 32360 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0823 05:26:46.850165 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71132\nI0823 05:26:46.850492 32360 solver.cpp:404]     Test net output #1: loss = 1.69073 (* 1 = 1.69073 loss)\nI0823 05:26:48.162494 32360 solver.cpp:228] Iteration 73400, loss = 0.00035691\nI0823 05:26:48.162539 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:26:48.162555 32360 solver.cpp:244]     Train net output #1: loss = 0.000356879 (* 1 = 0.000356879 loss)\nI0823 05:26:48.265491 32360 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0823 05:29:06.400398 32360 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0823 05:30:27.742852 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71056\nI0823 05:30:27.743137 32360 solver.cpp:404]     Test net output #1: loss = 1.70014 (* 1 = 1.70014 loss)\nI0823 05:30:29.056205 32360 solver.cpp:228] Iteration 73500, loss = 0.000325564\nI0823 05:30:29.056248 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:30:29.056264 32360 solver.cpp:244]     Train net output #1: loss = 0.000325533 (* 1 = 0.000325533 loss)\nI0823 05:30:29.161582 32360 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0823 05:32:47.387655 32360 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0823 05:34:08.731114 32360 solver.cpp:404]     Test net output #0: accuracy = 0.7112\nI0823 05:34:08.731413 32360 solver.cpp:404]     Test net output #1: loss = 1.69147 (* 1 = 1.69147 loss)\nI0823 05:34:10.043257 32360 solver.cpp:228] Iteration 73600, loss = 0.000422886\nI0823 05:34:10.043300 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:34:10.043318 32360 solver.cpp:244]     Train net output #1: loss = 0.000422854 (* 1 = 0.000422854 loss)\nI0823 05:34:10.154031 32360 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0823 05:36:28.353145 32360 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0823 05:37:49.675801 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71132\nI0823 05:37:49.676069 32360 solver.cpp:404]     Test net output #1: loss = 1.7012 (* 1 = 1.7012 loss)\nI0823 05:37:50.987859 32360 solver.cpp:228] Iteration 73700, loss = 0.00037486\nI0823 05:37:50.987902 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:37:50.987918 32360 solver.cpp:244]     Train net output #1: loss = 0.000374829 (* 1 = 0.000374829 loss)\nI0823 05:37:51.103153 32360 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0823 05:40:09.322742 32360 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0823 05:41:30.724591 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71128\nI0823 05:41:30.724906 32360 solver.cpp:404]     Test net output #1: loss = 1.69294 (* 1 = 1.69294 loss)\nI0823 05:41:32.036501 32360 solver.cpp:228] Iteration 73800, loss = 0.000380563\nI0823 05:41:32.036546 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:41:32.036563 32360 solver.cpp:244]     Train net output #1: loss = 0.000380531 (* 1 = 0.000380531 loss)\nI0823 05:41:32.144037 32360 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0823 05:43:49.943744 32360 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0823 05:45:11.603293 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71104\nI0823 05:45:11.603586 32360 solver.cpp:404]     Test net output #1: loss = 1.70291 (* 1 = 1.70291 loss)\nI0823 05:45:12.915513 32360 solver.cpp:228] Iteration 73900, loss = 0.000375329\nI0823 05:45:12.915556 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:45:12.915572 32360 solver.cpp:244]     Train net output #1: loss = 0.000375297 (* 1 = 0.000375297 loss)\nI0823 05:45:13.024355 32360 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0823 05:47:30.805444 32360 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0823 05:48:52.511296 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71128\nI0823 05:48:52.511581 32360 solver.cpp:404]     Test net output #1: loss = 1.69517 (* 1 = 1.69517 loss)\nI0823 05:48:53.823683 32360 solver.cpp:228] Iteration 74000, loss = 0.000401366\nI0823 05:48:53.823726 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:48:53.823742 32360 solver.cpp:244]     Train net output #1: loss = 0.000401334 (* 1 = 0.000401334 loss)\nI0823 05:48:53.933600 32360 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0823 05:51:12.079710 32360 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0823 05:52:33.695528 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71128\nI0823 05:52:33.695842 32360 solver.cpp:404]     Test net output #1: loss = 1.70435 (* 1 = 1.70435 loss)\nI0823 05:52:35.009903 32360 solver.cpp:228] Iteration 74100, loss = 0.000416313\nI0823 05:52:35.009951 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:52:35.009974 32360 solver.cpp:244]     Train net output #1: loss = 0.000416282 (* 1 = 0.000416282 loss)\nI0823 05:52:35.118857 32360 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0823 05:54:53.342661 32360 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0823 05:56:14.949084 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71128\nI0823 05:56:14.949383 32360 solver.cpp:404]     Test net output #1: loss = 1.69545 (* 1 = 1.69545 loss)\nI0823 05:56:16.262768 32360 solver.cpp:228] Iteration 74200, loss = 0.000360398\nI0823 05:56:16.262814 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:56:16.262830 32360 solver.cpp:244]     Train net output #1: loss = 0.000360366 (* 1 = 0.000360366 loss)\nI0823 05:56:16.369796 32360 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0823 05:58:34.666749 32360 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0823 05:59:56.304908 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71136\nI0823 05:59:56.305187 32360 solver.cpp:404]     Test net output #1: loss = 1.70498 (* 1 = 1.70498 loss)\nI0823 05:59:57.616593 32360 solver.cpp:228] Iteration 74300, loss = 0.000367305\nI0823 05:59:57.616639 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:59:57.616655 32360 solver.cpp:244]     Train net output #1: loss = 0.000367273 (* 1 = 0.000367273 loss)\nI0823 05:59:57.729045 32360 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0823 06:02:15.475978 32360 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0823 06:03:36.970923 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71196\nI0823 06:03:36.971210 32360 solver.cpp:404]     Test net output #1: loss = 1.6962 (* 1 = 1.6962 loss)\nI0823 06:03:38.282907 32360 solver.cpp:228] Iteration 74400, loss = 0.000322743\nI0823 06:03:38.282953 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:03:38.282966 32360 solver.cpp:244]     Train net output #1: loss = 0.000322711 (* 1 = 0.000322711 loss)\nI0823 06:03:38.390300 32360 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0823 06:05:56.643882 32360 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0823 06:07:17.976025 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71184\nI0823 06:07:17.976311 32360 solver.cpp:404]     Test net output #1: loss = 1.70564 (* 1 = 1.70564 loss)\nI0823 06:07:19.288708 32360 solver.cpp:228] Iteration 74500, loss = 0.000393399\nI0823 06:07:19.288753 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:07:19.288769 32360 solver.cpp:244]     Train net output #1: loss = 0.000393367 (* 1 = 0.000393367 loss)\nI0823 06:07:19.401401 32360 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0823 06:09:37.627118 32360 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0823 06:10:59.160420 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71184\nI0823 06:10:59.160706 32360 solver.cpp:404]     Test net output #1: loss = 1.69664 (* 1 = 1.69664 loss)\nI0823 06:11:00.472234 32360 solver.cpp:228] Iteration 74600, loss = 0.000407386\nI0823 06:11:00.472280 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:11:00.472296 32360 solver.cpp:244]     Train net output #1: loss = 0.000407355 (* 1 = 0.000407355 loss)\nI0823 06:11:00.581233 32360 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0823 06:13:19.002835 32360 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0823 06:14:40.583884 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71212\nI0823 06:14:40.584166 32360 solver.cpp:404]     Test net output #1: loss = 1.70608 (* 1 = 1.70608 loss)\nI0823 06:14:41.896450 32360 solver.cpp:228] Iteration 74700, loss = 0.000391338\nI0823 06:14:41.896495 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:14:41.896512 32360 solver.cpp:244]     Train net output #1: loss = 0.000391306 (* 1 = 0.000391306 loss)\nI0823 06:14:42.005468 32360 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0823 06:17:00.283265 32360 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0823 06:18:21.496609 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71264\nI0823 06:18:21.496896 32360 solver.cpp:404]     Test net output #1: loss = 1.69709 (* 1 = 1.69709 loss)\nI0823 06:18:22.810870 32360 solver.cpp:228] Iteration 74800, loss = 0.000364582\nI0823 06:18:22.810920 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:18:22.810935 32360 solver.cpp:244]     Train net output #1: loss = 0.00036455 (* 1 = 0.00036455 loss)\nI0823 06:18:22.920686 32360 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0823 06:20:41.299123 32360 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0823 06:22:02.878784 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71244\nI0823 06:22:02.879079 32360 solver.cpp:404]     Test net output #1: loss = 1.70648 (* 1 = 1.70648 loss)\nI0823 06:22:04.191323 32360 solver.cpp:228] Iteration 74900, loss = 0.000378439\nI0823 06:22:04.191364 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:22:04.191380 32360 solver.cpp:244]     Train net output #1: loss = 0.000378407 (* 1 = 0.000378407 loss)\nI0823 06:22:04.305176 32360 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0823 06:24:22.126627 32360 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0823 06:25:43.779075 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71184\nI0823 06:25:43.779397 32360 solver.cpp:404]     Test net output #1: loss = 1.69757 (* 1 = 1.69757 loss)\nI0823 06:25:45.091851 32360 solver.cpp:228] Iteration 75000, loss = 0.000386686\nI0823 06:25:45.091893 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:25:45.091909 32360 solver.cpp:244]     Train net output #1: loss = 0.000386654 (* 1 = 0.000386654 loss)\nI0823 06:25:45.195197 32360 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0823 06:28:03.407157 32360 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0823 06:29:24.607558 32360 solver.cpp:404]     Test net output #0: accuracy = 0.712\nI0823 06:29:24.607874 32360 solver.cpp:404]     Test net output #1: loss = 1.70688 (* 1 = 1.70688 loss)\nI0823 06:29:25.919435 32360 solver.cpp:228] Iteration 75100, loss = 0.000357961\nI0823 06:29:25.919477 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:29:25.919492 32360 solver.cpp:244]     Train net output #1: loss = 0.00035793 (* 1 = 0.00035793 loss)\nI0823 06:29:26.033277 32360 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0823 06:31:44.334374 32360 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0823 06:33:05.941882 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71224\nI0823 06:33:05.942250 32360 solver.cpp:404]     Test net output #1: loss = 1.6979 (* 1 = 1.6979 loss)\nI0823 06:33:07.253690 32360 solver.cpp:228] Iteration 75200, loss = 0.000297017\nI0823 06:33:07.253731 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:33:07.253751 32360 solver.cpp:244]     Train net output #1: loss = 0.000296985 (* 1 = 0.000296985 loss)\nI0823 06:33:07.359990 32360 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0823 06:35:25.514209 32360 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0823 06:36:46.987116 32360 solver.cpp:404]     Test net output #0: accuracy = 0.712\nI0823 06:36:46.987447 32360 solver.cpp:404]     Test net output #1: loss = 1.70772 (* 1 = 1.70772 loss)\nI0823 06:36:48.298868 32360 solver.cpp:228] Iteration 75300, loss = 0.000380655\nI0823 06:36:48.298909 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:36:48.298924 32360 solver.cpp:244]     Train net output #1: loss = 0.000380623 (* 1 = 0.000380623 loss)\nI0823 06:36:48.415647 32360 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0823 06:39:06.645756 32360 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0823 06:40:27.885859 32360 solver.cpp:404]     Test net output #0: accuracy = 0.7116\nI0823 06:40:27.886193 32360 solver.cpp:404]     Test net output #1: loss = 1.69885 (* 1 = 1.69885 loss)\nI0823 06:40:29.198218 32360 solver.cpp:228] Iteration 75400, loss = 0.000371533\nI0823 06:40:29.198261 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:40:29.198277 32360 solver.cpp:244]     Train net output #1: loss = 0.000371501 (* 1 = 0.000371501 loss)\nI0823 06:40:29.311229 32360 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0823 06:42:46.981693 32360 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0823 06:44:08.714049 32360 solver.cpp:404]     Test net output #0: accuracy = 0.7118\nI0823 06:44:08.714339 32360 solver.cpp:404]     Test net output #1: loss = 1.70797 (* 1 = 1.70797 loss)\nI0823 06:44:10.027470 32360 solver.cpp:228] Iteration 75500, loss = 0.000403776\nI0823 06:44:10.027515 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:44:10.027530 32360 solver.cpp:244]     Train net output #1: loss = 0.000403744 (* 1 = 0.000403744 loss)\nI0823 06:44:10.137279 32360 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0823 06:46:27.906942 32360 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0823 06:47:49.525544 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71212\nI0823 06:47:49.525835 32360 solver.cpp:404]     Test net output #1: loss = 1.69872 (* 1 = 1.69872 loss)\nI0823 06:47:50.838742 32360 solver.cpp:228] Iteration 75600, loss = 0.000394733\nI0823 06:47:50.838786 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:47:50.838802 32360 solver.cpp:244]     Train net output #1: loss = 0.000394701 (* 1 = 0.000394701 loss)\nI0823 06:47:50.944079 32360 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0823 06:50:09.267594 32360 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0823 06:51:30.924124 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71212\nI0823 06:51:30.924435 32360 solver.cpp:404]     Test net output #1: loss = 1.70798 (* 1 = 1.70798 loss)\nI0823 06:51:32.235731 32360 solver.cpp:228] Iteration 75700, loss = 0.00039656\nI0823 06:51:32.235780 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:51:32.235797 32360 solver.cpp:244]     Train net output #1: loss = 0.000396528 (* 1 = 0.000396528 loss)\nI0823 06:51:32.349148 32360 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0823 06:53:50.642149 32360 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0823 06:55:12.278775 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71216\nI0823 06:55:12.279080 32360 solver.cpp:404]     Test net output #1: loss = 1.69845 (* 1 = 1.69845 loss)\nI0823 06:55:13.592341 32360 solver.cpp:228] Iteration 75800, loss = 0.000385273\nI0823 06:55:13.592386 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:55:13.592401 32360 solver.cpp:244]     Train net output #1: loss = 0.000385241 (* 1 = 0.000385241 loss)\nI0823 06:55:13.695318 32360 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0823 06:57:31.937712 32360 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0823 06:58:53.645804 32360 solver.cpp:404]     Test net output #0: accuracy = 0.7116\nI0823 06:58:53.646085 32360 solver.cpp:404]     Test net output #1: loss = 1.70816 (* 1 = 1.70816 loss)\nI0823 06:58:54.959092 32360 solver.cpp:228] Iteration 75900, loss = 0.000390708\nI0823 06:58:54.959136 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:58:54.959152 32360 solver.cpp:244]     Train net output #1: loss = 0.000390676 (* 1 = 0.000390676 loss)\nI0823 06:58:55.067602 32360 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0823 07:01:13.459493 32360 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0823 07:02:35.146719 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71188\nI0823 07:02:35.147088 32360 solver.cpp:404]     Test net output #1: loss = 1.69909 (* 1 = 1.69909 loss)\nI0823 07:02:36.459805 32360 solver.cpp:228] Iteration 76000, loss = 0.000264152\nI0823 07:02:36.459851 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:02:36.459867 32360 solver.cpp:244]     Train net output #1: loss = 0.00026412 (* 1 = 0.00026412 loss)\nI0823 07:02:36.570803 32360 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0823 07:04:54.839720 32360 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0823 07:06:16.568804 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71188\nI0823 07:06:16.569089 32360 solver.cpp:404]     Test net output #1: loss = 1.70872 (* 1 = 1.70872 loss)\nI0823 07:06:17.882488 32360 solver.cpp:228] Iteration 76100, loss = 0.000377299\nI0823 07:06:17.882532 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:06:17.882549 32360 solver.cpp:244]     Train net output #1: loss = 0.000377267 (* 1 = 0.000377267 loss)\nI0823 07:06:17.996546 32360 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0823 07:08:35.845264 32360 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0823 07:09:57.586982 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71172\nI0823 07:09:57.587270 32360 solver.cpp:404]     Test net output #1: loss = 1.69947 (* 1 = 1.69947 loss)\nI0823 07:09:58.900414 32360 solver.cpp:228] Iteration 76200, loss = 0.00038273\nI0823 07:09:58.900460 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:09:58.900475 32360 solver.cpp:244]     Train net output #1: loss = 0.000382698 (* 1 = 0.000382698 loss)\nI0823 07:09:59.008636 32360 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0823 07:12:17.301712 32360 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0823 07:13:39.035518 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71204\nI0823 07:13:39.035827 32360 solver.cpp:404]     Test net output #1: loss = 1.7088 (* 1 = 1.7088 loss)\nI0823 07:13:40.349397 32360 solver.cpp:228] Iteration 76300, loss = 0.000374693\nI0823 07:13:40.349442 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:13:40.349457 32360 solver.cpp:244]     Train net output #1: loss = 0.000374661 (* 1 = 0.000374661 loss)\nI0823 07:13:40.456339 32360 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0823 07:15:58.735723 32360 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0823 07:17:20.457420 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71188\nI0823 07:17:20.457751 32360 solver.cpp:404]     Test net output #1: loss = 1.69974 (* 1 = 1.69974 loss)\nI0823 07:17:21.769986 32360 solver.cpp:228] Iteration 76400, loss = 0.000454561\nI0823 07:17:21.770030 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:17:21.770045 32360 solver.cpp:244]     Train net output #1: loss = 0.000454529 (* 1 = 0.000454529 loss)\nI0823 07:17:21.879382 32360 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0823 07:19:40.219472 32360 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0823 07:21:01.738122 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71184\nI0823 07:21:01.738420 32360 solver.cpp:404]     Test net output #1: loss = 1.70903 (* 1 = 1.70903 loss)\nI0823 07:21:03.051107 32360 solver.cpp:228] Iteration 76500, loss = 0.000347273\nI0823 07:21:03.051148 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:21:03.051164 32360 solver.cpp:244]     Train net output #1: loss = 0.000347241 (* 1 = 0.000347241 loss)\nI0823 07:21:03.159611 32360 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0823 07:23:21.453541 32360 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0823 07:24:42.796957 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71168\nI0823 07:24:42.797243 32360 solver.cpp:404]     Test net output #1: loss = 1.70007 (* 1 = 1.70007 loss)\nI0823 07:24:44.110153 32360 solver.cpp:228] Iteration 76600, loss = 0.000339666\nI0823 07:24:44.110196 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:24:44.110213 32360 solver.cpp:244]     Train net output #1: loss = 0.000339635 (* 1 = 0.000339635 loss)\nI0823 07:24:44.218173 32360 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0823 07:27:02.640002 32360 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0823 07:28:24.137255 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71168\nI0823 07:28:24.137562 32360 solver.cpp:404]     Test net output #1: loss = 1.70894 (* 1 = 1.70894 loss)\nI0823 07:28:25.451150 32360 solver.cpp:228] Iteration 76700, loss = 0.000428355\nI0823 07:28:25.451195 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:28:25.451210 32360 solver.cpp:244]     Train net output #1: loss = 0.000428323 (* 1 = 0.000428323 loss)\nI0823 07:28:25.556047 32360 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0823 07:30:43.933442 32360 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0823 07:32:05.420047 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71168\nI0823 07:32:05.420370 32360 solver.cpp:404]     Test net output #1: loss = 1.69949 (* 1 = 1.69949 loss)\nI0823 07:32:06.734484 32360 solver.cpp:228] Iteration 76800, loss = 0.000345027\nI0823 07:32:06.734529 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:32:06.734545 32360 solver.cpp:244]     Train net output #1: loss = 0.000344995 (* 1 = 0.000344995 loss)\nI0823 07:32:06.847437 32360 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0823 07:34:25.232333 32360 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0823 07:35:46.796206 32360 solver.cpp:404]     Test net output #0: accuracy = 0.7118\nI0823 07:35:46.796507 32360 solver.cpp:404]     Test net output #1: loss = 1.70845 (* 1 = 1.70845 loss)\nI0823 07:35:48.109468 32360 solver.cpp:228] Iteration 76900, loss = 0.000455355\nI0823 07:35:48.109513 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:35:48.109529 32360 solver.cpp:244]     Train net output #1: loss = 0.000455323 (* 1 = 0.000455323 loss)\nI0823 07:35:48.214262 32360 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0823 07:38:06.558373 32360 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0823 07:39:28.181726 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71132\nI0823 07:39:28.182011 32360 solver.cpp:404]     Test net output #1: loss = 1.69951 (* 1 = 1.69951 loss)\nI0823 07:39:29.494913 32360 solver.cpp:228] Iteration 77000, loss = 0.00035726\nI0823 07:39:29.494958 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:39:29.494974 32360 solver.cpp:244]     Train net output #1: loss = 0.000357228 (* 1 = 0.000357228 loss)\nI0823 07:39:29.603291 32360 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0823 07:41:47.937897 32360 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0823 07:43:09.414366 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71112\nI0823 07:43:09.414680 32360 solver.cpp:404]     Test net output #1: loss = 1.70888 (* 1 = 1.70888 loss)\nI0823 07:43:10.728279 32360 solver.cpp:228] Iteration 77100, loss = 0.000383899\nI0823 07:43:10.728324 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:43:10.728340 32360 solver.cpp:244]     Train net output #1: loss = 0.000383867 (* 1 = 0.000383867 loss)\nI0823 07:43:10.838343 32360 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0823 07:45:29.058979 32360 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0823 07:46:50.678251 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71156\nI0823 07:46:50.678568 32360 solver.cpp:404]     Test net output #1: loss = 1.69996 (* 1 = 1.69996 loss)\nI0823 07:46:51.990188 32360 solver.cpp:228] Iteration 77200, loss = 0.000341867\nI0823 07:46:51.990227 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:46:51.990243 32360 solver.cpp:244]     Train net output #1: loss = 0.000341835 (* 1 = 0.000341835 loss)\nI0823 07:46:52.103230 32360 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0823 07:49:10.471277 32360 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0823 07:50:32.092725 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71128\nI0823 07:50:32.093020 32360 solver.cpp:404]     Test net output #1: loss = 1.70884 (* 1 = 1.70884 loss)\nI0823 07:50:33.404867 32360 solver.cpp:228] Iteration 77300, loss = 0.0003472\nI0823 07:50:33.404906 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:50:33.404922 32360 solver.cpp:244]     Train net output #1: loss = 0.000347169 (* 1 = 0.000347169 loss)\nI0823 07:50:33.506379 32360 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0823 07:52:51.763348 32360 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0823 07:54:13.094868 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71172\nI0823 07:54:13.095134 32360 solver.cpp:404]     Test net output #1: loss = 1.69913 (* 1 = 1.69913 loss)\nI0823 07:54:14.407169 32360 solver.cpp:228] Iteration 77400, loss = 0.000356747\nI0823 07:54:14.407212 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:54:14.407227 32360 solver.cpp:244]     Train net output #1: loss = 0.000356715 (* 1 = 0.000356715 loss)\nI0823 07:54:14.519193 32360 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0823 07:56:32.888636 32360 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0823 07:57:54.255975 32360 solver.cpp:404]     Test net output #0: accuracy = 0.7118\nI0823 07:57:54.256290 32360 solver.cpp:404]     Test net output #1: loss = 1.70837 (* 1 = 1.70837 loss)\nI0823 07:57:55.569061 32360 solver.cpp:228] Iteration 77500, loss = 0.000389682\nI0823 07:57:55.569103 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:57:55.569118 32360 solver.cpp:244]     Train net output #1: loss = 0.00038965 (* 1 = 0.00038965 loss)\nI0823 07:57:55.673821 32360 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0823 08:00:13.953533 32360 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0823 08:01:35.423738 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71172\nI0823 08:01:35.424129 32360 solver.cpp:404]     Test net output #1: loss = 1.69905 (* 1 = 1.69905 loss)\nI0823 08:01:36.736397 32360 solver.cpp:228] Iteration 77600, loss = 0.000398222\nI0823 08:01:36.736439 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:01:36.736461 32360 solver.cpp:244]     Train net output #1: loss = 0.00039819 (* 1 = 0.00039819 loss)\nI0823 08:01:36.845636 32360 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0823 08:03:55.133124 32360 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0823 08:05:16.907853 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71192\nI0823 08:05:16.908222 32360 solver.cpp:404]     Test net output #1: loss = 1.70804 (* 1 = 1.70804 loss)\nI0823 08:05:18.221140 32360 solver.cpp:228] Iteration 77700, loss = 0.00036481\nI0823 08:05:18.221182 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:05:18.221199 32360 solver.cpp:244]     Train net output #1: loss = 0.000364778 (* 1 = 0.000364778 loss)\nI0823 08:05:18.328027 32360 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0823 08:07:36.699568 32360 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0823 08:08:58.468729 32360 solver.cpp:404]     Test net output #0: accuracy = 0.712\nI0823 08:08:58.469094 32360 solver.cpp:404]     Test net output #1: loss = 1.69881 (* 1 = 1.69881 loss)\nI0823 08:08:59.782030 32360 solver.cpp:228] Iteration 77800, loss = 0.000429567\nI0823 08:08:59.782073 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:08:59.782088 32360 solver.cpp:244]     Train net output #1: loss = 0.000429535 (* 1 = 0.000429535 loss)\nI0823 08:08:59.892976 32360 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0823 08:11:18.252689 32360 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0823 08:12:40.025739 32360 solver.cpp:404]     Test net output #0: accuracy = 0.712\nI0823 08:12:40.026124 32360 solver.cpp:404]     Test net output #1: loss = 1.70834 (* 1 = 1.70834 loss)\nI0823 08:12:41.339361 32360 solver.cpp:228] Iteration 77900, loss = 0.000381844\nI0823 08:12:41.339401 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:12:41.339416 32360 solver.cpp:244]     Train net output #1: loss = 0.000381813 (* 1 = 0.000381813 loss)\nI0823 08:12:41.450196 32360 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0823 08:14:59.822041 32360 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0823 08:16:20.635273 32360 solver.cpp:404]     Test net output #0: accuracy = 0.712\nI0823 08:16:20.635645 32360 solver.cpp:404]     Test net output #1: loss = 1.69903 (* 1 = 1.69903 loss)\nI0823 08:16:21.944933 32360 solver.cpp:228] Iteration 78000, loss = 0.000417978\nI0823 08:16:21.944983 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:16:21.945008 32360 solver.cpp:244]     Train net output #1: loss = 0.000417946 (* 1 = 0.000417946 loss)\nI0823 08:16:22.059232 32360 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0823 08:18:39.630990 32360 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0823 08:20:00.412647 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71212\nI0823 08:20:00.412959 32360 solver.cpp:404]     Test net output #1: loss = 1.70855 (* 1 = 1.70855 loss)\nI0823 08:20:01.724853 32360 solver.cpp:228] Iteration 78100, loss = 0.000384862\nI0823 08:20:01.724897 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:20:01.724920 32360 solver.cpp:244]     Train net output #1: loss = 0.00038483 (* 1 = 0.00038483 loss)\nI0823 08:20:01.831784 32360 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0823 08:22:19.391820 32360 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0823 08:23:40.161165 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71196\nI0823 08:23:40.161479 32360 solver.cpp:404]     Test net output #1: loss = 1.69954 (* 1 = 1.69954 loss)\nI0823 08:23:41.472735 32360 solver.cpp:228] Iteration 78200, loss = 0.000302083\nI0823 08:23:41.472779 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:23:41.472795 32360 solver.cpp:244]     Train net output #1: loss = 0.000302051 (* 1 = 0.000302051 loss)\nI0823 08:23:41.581595 32360 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0823 08:25:59.150172 32360 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0823 08:27:19.926625 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71184\nI0823 08:27:19.926955 32360 solver.cpp:404]     Test net output #1: loss = 1.70922 (* 1 = 1.70922 loss)\nI0823 08:27:21.238181 32360 solver.cpp:228] Iteration 78300, loss = 0.000380434\nI0823 08:27:21.238225 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:27:21.238241 32360 solver.cpp:244]     Train net output #1: loss = 0.000380402 (* 1 = 0.000380402 loss)\nI0823 08:27:21.351073 32360 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0823 08:29:38.977005 32360 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0823 08:30:59.746676 32360 solver.cpp:404]     Test net output #0: accuracy = 0.7118\nI0823 08:30:59.747030 32360 solver.cpp:404]     Test net output #1: loss = 1.69975 (* 1 = 1.69975 loss)\nI0823 08:31:01.058501 32360 solver.cpp:228] Iteration 78400, loss = 0.000451744\nI0823 08:31:01.058544 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:31:01.058560 32360 solver.cpp:244]     Train net output #1: loss = 0.000451712 (* 1 = 0.000451712 loss)\nI0823 08:31:01.168774 32360 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0823 08:33:19.136011 32360 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0823 08:34:39.912122 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71192\nI0823 08:34:39.912469 32360 solver.cpp:404]     Test net output #1: loss = 1.70911 (* 1 = 1.70911 loss)\nI0823 08:34:41.224292 32360 solver.cpp:228] Iteration 78500, loss = 0.000342883\nI0823 08:34:41.224341 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:34:41.224357 32360 solver.cpp:244]     Train net output #1: loss = 0.000342851 (* 1 = 0.000342851 loss)\nI0823 08:34:41.332808 32360 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0823 08:36:59.333442 32360 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0823 08:38:20.112360 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71192\nI0823 08:38:20.112650 32360 solver.cpp:404]     Test net output #1: loss = 1.69995 (* 1 = 1.69995 loss)\nI0823 08:38:21.424259 32360 solver.cpp:228] Iteration 78600, loss = 0.000435897\nI0823 08:38:21.424309 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:38:21.424329 32360 solver.cpp:244]     Train net output #1: loss = 0.000435865 (* 1 = 0.000435865 loss)\nI0823 08:38:21.527561 32360 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0823 08:40:39.580260 32360 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0823 08:42:00.551188 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71212\nI0823 08:42:00.551563 32360 solver.cpp:404]     Test net output #1: loss = 1.70942 (* 1 = 1.70942 loss)\nI0823 08:42:01.863972 32360 solver.cpp:228] Iteration 78700, loss = 0.000366518\nI0823 08:42:01.864017 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:42:01.864034 32360 solver.cpp:244]     Train net output #1: loss = 0.000366487 (* 1 = 0.000366487 loss)\nI0823 08:42:01.976280 32360 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0823 08:44:20.045972 32360 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0823 08:45:40.831831 32360 solver.cpp:404]     Test net output #0: accuracy = 0.7122\nI0823 08:45:40.832130 32360 solver.cpp:404]     Test net output #1: loss = 1.69989 (* 1 = 1.69989 loss)\nI0823 08:45:42.144788 32360 solver.cpp:228] Iteration 78800, loss = 0.000358401\nI0823 08:45:42.144835 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:45:42.144860 32360 solver.cpp:244]     Train net output #1: loss = 0.000358369 (* 1 = 0.000358369 loss)\nI0823 08:45:42.258110 32360 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0823 08:48:00.317157 32360 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0823 08:49:21.085677 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71212\nI0823 08:49:21.085973 32360 solver.cpp:404]     Test net output #1: loss = 1.70882 (* 1 = 1.70882 loss)\nI0823 08:49:22.398389 32360 solver.cpp:228] Iteration 78900, loss = 0.000291734\nI0823 08:49:22.398437 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:49:22.398460 32360 solver.cpp:244]     Train net output #1: loss = 0.000291702 (* 1 = 0.000291702 loss)\nI0823 08:49:22.508818 32360 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0823 08:51:40.119768 32360 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0823 08:53:00.888788 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71212\nI0823 08:53:00.889155 32360 solver.cpp:404]     Test net output #1: loss = 1.69937 (* 1 = 1.69937 loss)\nI0823 08:53:02.201452 32360 solver.cpp:228] Iteration 79000, loss = 0.00045339\nI0823 08:53:02.201500 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:53:02.201525 32360 solver.cpp:244]     Train net output #1: loss = 0.000453358 (* 1 = 0.000453358 loss)\nI0823 08:53:02.310914 32360 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0823 08:55:20.418118 32360 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0823 08:56:41.194443 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71192\nI0823 08:56:41.194787 32360 solver.cpp:404]     Test net output #1: loss = 1.70861 (* 1 = 1.70861 loss)\nI0823 08:56:42.506867 32360 solver.cpp:228] Iteration 79100, loss = 0.000363344\nI0823 08:56:42.506917 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:56:42.506940 32360 solver.cpp:244]     Train net output #1: loss = 0.000363312 (* 1 = 0.000363312 loss)\nI0823 08:56:42.613373 32360 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0823 08:59:00.789479 32360 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0823 09:00:21.566483 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71204\nI0823 09:00:21.566833 32360 solver.cpp:404]     Test net output #1: loss = 1.69908 (* 1 = 1.69908 loss)\nI0823 09:00:22.879328 32360 solver.cpp:228] Iteration 79200, loss = 0.00042321\nI0823 09:00:22.879374 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:00:22.879398 32360 solver.cpp:244]     Train net output #1: loss = 0.000423178 (* 1 = 0.000423178 loss)\nI0823 09:00:22.986722 32360 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0823 09:02:41.049438 32360 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0823 09:04:01.833880 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71216\nI0823 09:04:01.834249 32360 solver.cpp:404]     Test net output #1: loss = 1.70836 (* 1 = 1.70836 loss)\nI0823 09:04:03.146500 32360 solver.cpp:228] Iteration 79300, loss = 0.000376995\nI0823 09:04:03.146544 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:04:03.146560 32360 solver.cpp:244]     Train net output #1: loss = 0.000376963 (* 1 = 0.000376963 loss)\nI0823 09:04:03.252521 32360 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0823 09:06:21.327152 32360 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0823 09:07:42.119318 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71196\nI0823 09:07:42.119622 32360 solver.cpp:404]     Test net output #1: loss = 1.69894 (* 1 = 1.69894 loss)\nI0823 09:07:43.432198 32360 solver.cpp:228] Iteration 79400, loss = 0.000380675\nI0823 09:07:43.432241 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:07:43.432257 32360 solver.cpp:244]     Train net output #1: loss = 0.000380643 (* 1 = 0.000380643 loss)\nI0823 09:07:43.538683 32360 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0823 09:10:01.548923 32360 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0823 09:11:22.333757 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71192\nI0823 09:11:22.334082 32360 solver.cpp:404]     Test net output #1: loss = 1.7087 (* 1 = 1.7087 loss)\nI0823 09:11:23.645387 32360 solver.cpp:228] Iteration 79500, loss = 0.000386941\nI0823 09:11:23.645432 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:11:23.645448 32360 solver.cpp:244]     Train net output #1: loss = 0.000386909 (* 1 = 0.000386909 loss)\nI0823 09:11:23.759337 32360 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0823 09:13:41.815886 32360 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0823 09:15:02.605965 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71188\nI0823 09:15:02.606276 32360 solver.cpp:404]     Test net output #1: loss = 1.69909 (* 1 = 1.69909 loss)\nI0823 09:15:03.918248 32360 solver.cpp:228] Iteration 79600, loss = 0.000310543\nI0823 09:15:03.918292 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:15:03.918313 32360 solver.cpp:244]     Train net output #1: loss = 0.000310511 (* 1 = 0.000310511 loss)\nI0823 09:15:04.027637 32360 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0823 09:17:22.189512 32360 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0823 09:18:42.970940 32360 solver.cpp:404]     Test net output #0: accuracy = 0.712\nI0823 09:18:42.971273 32360 solver.cpp:404]     Test net output #1: loss = 1.70796 (* 1 = 1.70796 loss)\nI0823 09:18:44.283495 32360 solver.cpp:228] Iteration 79700, loss = 0.000356007\nI0823 09:18:44.283540 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:18:44.283556 32360 solver.cpp:244]     Train net output #1: loss = 0.000355976 (* 1 = 0.000355976 loss)\nI0823 09:18:44.386813 32360 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0823 09:21:02.352186 32360 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0823 09:22:23.096168 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71188\nI0823 09:22:23.096604 32360 solver.cpp:404]     Test net output #1: loss = 1.69881 (* 1 = 1.69881 loss)\nI0823 09:22:24.408308 32360 solver.cpp:228] Iteration 79800, loss = 0.000463315\nI0823 09:22:24.408354 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:22:24.408370 32360 solver.cpp:244]     Train net output #1: loss = 0.000463284 (* 1 = 0.000463284 loss)\nI0823 09:22:24.520102 32360 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0823 09:24:42.684253 32360 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0823 09:26:03.357298 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71236\nI0823 09:26:03.357589 32360 solver.cpp:404]     Test net output #1: loss = 1.70827 (* 1 = 1.70827 loss)\nI0823 09:26:04.669579 32360 solver.cpp:228] Iteration 79900, loss = 0.000388068\nI0823 09:26:04.669623 32360 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:26:04.669639 32360 solver.cpp:244]     Train net output #1: loss = 0.000388036 (* 1 = 0.000388036 loss)\nI0823 09:26:04.781203 32360 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0823 09:28:22.944552 32360 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35Tr10kTab1_iter_80000.caffemodel\nI0823 09:28:23.167196 32360 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35Tr10kTab1_iter_80000.solverstate\nI0823 09:28:23.612074 32360 solver.cpp:317] Iteration 80000, loss = 0.000372331\nI0823 09:28:23.612112 32360 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0823 09:29:44.257022 32360 solver.cpp:404]     Test net output #0: accuracy = 0.71204\nI0823 09:29:44.257340 32360 solver.cpp:404]     Test net output #1: loss = 1.69902 (* 1 = 1.69902 loss)\nI0823 09:29:44.257357 32360 solver.cpp:322] Optimization Done.\nI0823 09:29:49.582212 32360 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35Tr20kTab1",
    "content": "I0821 08:27:21.013604 32551 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 08:27:21.016525 32551 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 08:27:21.017740 32551 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 08:27:21.019124 32551 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 08:27:21.020334 32551 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 08:27:21.021556 32551 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 08:27:21.022786 32551 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 08:27:21.024009 32551 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 08:27:21.025235 32551 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 08:27:21.442981 32551 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35Tr20kTab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\nI0821 08:27:21.447613 32551 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 08:27:21.466842 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:21.466923 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:21.468076 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 08:27:21.468138 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 08:27:21.468163 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:27:21.468183 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:27:21.468202 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:27:21.468219 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:27:21.468238 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:27:21.468257 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:27:21.468277 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:27:21.468296 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:27:21.468315 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:27:21.468330 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:27:21.468350 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:27:21.468369 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:27:21.468389 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:27:21.468407 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:27:21.468425 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:27:21.468443 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:27:21.468462 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:27:21.468482 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:27:21.468513 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:27:21.468533 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:27:21.468559 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:27:21.468577 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:27:21.468596 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:27:21.468611 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:27:21.468631 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:27:21.468647 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:27:21.468664 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:27:21.468683 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:27:21.468713 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:27:21.468731 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:27:21.468750 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:27:21.468766 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:27:21.468786 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:27:21.468804 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:27:21.468824 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:27:21.468842 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:27:21.468860 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:27:21.468878 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:27:21.468902 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:27:21.468919 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:27:21.468937 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:27:21.468955 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:27:21.468974 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:27:21.468992 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:27:21.469010 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:27:21.469027 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:27:21.469045 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:27:21.469061 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:27:21.469079 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:27:21.469105 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:27:21.469125 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:27:21.469143 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:27:21.469161 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:27:21.469177 32551 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:27:21.470960 32551 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train20k_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.9\nI0821 08:27:21.473067 32551 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:27:21.475574 32551 net.cpp:100] Creating Layer dataLayer\nI0821 08:27:21.475654 32551 net.cpp:408] dataLayer -> data_top\nI0821 08:27:21.475900 32551 net.cpp:408] dataLayer -> label\nI0821 08:27:21.476032 32551 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:27:21.565357 32556 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train20k_lmdb\nI0821 08:27:21.565879 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:21.573057 32551 net.cpp:150] Setting up dataLayer\nI0821 08:27:21.573143 32551 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 08:27:21.573163 32551 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:21.573171 32551 net.cpp:165] Memory required for data: 1536500\nI0821 08:27:21.573191 32551 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:27:21.573212 32551 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:27:21.573225 32551 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:27:21.573251 32551 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:27:21.573273 32551 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:27:21.573397 32551 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:27:21.573420 32551 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:21.573433 32551 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:21.573443 32551 net.cpp:165] Memory required for data: 1537500\nI0821 08:27:21.573454 32551 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:27:21.573535 32551 net.cpp:100] Creating Layer pre_conv\nI0821 08:27:21.573551 32551 net.cpp:434] pre_conv <- data_top\nI0821 08:27:21.573572 32551 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:27:21.575321 32551 net.cpp:150] Setting up pre_conv\nI0821 08:27:21.575351 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.575361 32551 net.cpp:165] Memory required for data: 9729500\nI0821 08:27:21.575455 32551 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:27:21.575575 32551 net.cpp:100] Creating Layer pre_bn\nI0821 08:27:21.575592 32551 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:27:21.575614 32551 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:27:21.575796 32557 blocking_queue.cpp:50] Waiting for data\nI0821 08:27:21.575996 32551 net.cpp:150] Setting up pre_bn\nI0821 08:27:21.576020 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.576030 32551 net.cpp:165] Memory required for data: 17921500\nI0821 08:27:21.576059 32551 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:27:21.576133 32551 net.cpp:100] Creating Layer pre_scale\nI0821 08:27:21.576148 32551 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:27:21.576164 32551 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:27:21.576391 32551 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:27:21.579126 32551 net.cpp:150] Setting up pre_scale\nI0821 08:27:21.579154 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.579164 32551 net.cpp:165] Memory required for data: 26113500\nI0821 08:27:21.579183 32551 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:27:21.579263 32551 net.cpp:100] Creating Layer pre_relu\nI0821 08:27:21.579282 32551 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:27:21.579298 32551 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:27:21.579319 32551 net.cpp:150] Setting up pre_relu\nI0821 08:27:21.579334 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.579344 32551 net.cpp:165] Memory required for data: 34305500\nI0821 08:27:21.579355 32551 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:27:21.579375 32551 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:27:21.579386 32551 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:27:21.579401 32551 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:27:21.579421 32551 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:27:21.579502 32551 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:27:21.579529 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.579543 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.579552 32551 net.cpp:165] Memory required for data: 50689500\nI0821 08:27:21.579562 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:27:21.579583 32551 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:27:21.579596 32551 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:27:21.579613 32551 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:27:21.579995 32551 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:27:21.580016 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.580026 32551 net.cpp:165] Memory required for data: 58881500\nI0821 08:27:21.580049 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:27:21.580075 32551 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:27:21.580087 32551 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:27:21.580108 32551 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:27:21.580384 32551 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:27:21.580404 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.580413 32551 net.cpp:165] Memory required for data: 67073500\nI0821 08:27:21.580435 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:27:21.580456 32551 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:27:21.580468 32551 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:27:21.580484 32551 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.580567 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:27:21.580742 32551 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:27:21.580761 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.580772 32551 net.cpp:165] Memory required for data: 75265500\nI0821 08:27:21.580790 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:27:21.580814 32551 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:27:21.580824 32551 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:27:21.580844 32551 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.580863 32551 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:27:21.580878 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.580888 32551 net.cpp:165] Memory required for data: 83457500\nI0821 08:27:21.580898 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:27:21.580924 32551 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:27:21.580937 32551 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:27:21.580965 32551 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:27:21.581300 32551 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:27:21.581320 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.581329 32551 net.cpp:165] Memory required for data: 91649500\nI0821 08:27:21.581347 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:27:21.581368 32551 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:27:21.581380 32551 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:27:21.581398 32551 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:27:21.581661 32551 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:27:21.581681 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.581691 32551 net.cpp:165] Memory required for data: 99841500\nI0821 08:27:21.581717 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:27:21.581740 32551 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:27:21.581753 32551 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:27:21.581773 32551 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:27:21.581856 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:27:21.582037 32551 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:27:21.582057 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.582067 32551 net.cpp:165] Memory required for data: 108033500\nI0821 08:27:21.582085 32551 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:27:21.582160 32551 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:27:21.582176 32551 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:27:21.582190 32551 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:27:21.582206 32551 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:27:21.582307 32551 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:27:21.582326 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.582336 32551 net.cpp:165] Memory required for data: 116225500\nI0821 08:27:21.582346 32551 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:27:21.582361 32551 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:27:21.582372 32551 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:27:21.582391 32551 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:27:21.582411 32551 net.cpp:150] Setting up L1_b1_relu\nI0821 08:27:21.582425 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.582434 32551 net.cpp:165] Memory required for data: 124417500\nI0821 08:27:21.582444 32551 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:21.582460 32551 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:21.582470 32551 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:27:21.582485 32551 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:27:21.582505 32551 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:27:21.582582 32551 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:21.582602 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.582614 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.582633 32551 net.cpp:165] Memory required for data: 140801500\nI0821 08:27:21.582644 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:27:21.582669 32551 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:27:21.582681 32551 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:27:21.582700 32551 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:27:21.583056 32551 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:27:21.583077 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.583087 32551 net.cpp:165] Memory required for data: 148993500\nI0821 08:27:21.583106 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:27:21.583129 32551 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:27:21.583142 32551 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:27:21.583159 32551 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:27:21.583431 32551 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:27:21.583451 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.583461 32551 net.cpp:165] Memory required for data: 157185500\nI0821 08:27:21.583482 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:27:21.583499 32551 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:27:21.583510 32551 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:27:21.583525 32551 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.583612 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:27:21.583786 32551 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:27:21.583804 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.583814 32551 net.cpp:165] Memory required for data: 165377500\nI0821 08:27:21.583832 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:27:21.583848 32551 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:27:21.583859 32551 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:27:21.583878 32551 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.583896 32551 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:27:21.583910 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.583920 32551 net.cpp:165] Memory required for data: 173569500\nI0821 08:27:21.583930 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:27:21.583956 32551 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:27:21.583969 32551 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:27:21.583992 32551 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:27:21.584336 32551 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:27:21.584357 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.584367 32551 net.cpp:165] Memory required for data: 181761500\nI0821 08:27:21.584383 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:27:21.584400 32551 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:27:21.584410 32551 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:27:21.584434 32551 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:27:21.584702 32551 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:27:21.584725 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.584738 32551 net.cpp:165] Memory required for data: 189953500\nI0821 08:27:21.584766 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:27:21.584784 32551 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:27:21.584795 32551 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:27:21.584818 32551 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:27:21.584910 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:27:21.585088 32551 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:27:21.585111 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.585120 32551 net.cpp:165] Memory required for data: 198145500\nI0821 08:27:21.585139 32551 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:27:21.585165 32551 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:27:21.585177 32551 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:27:21.585191 32551 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:27:21.585206 32551 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:27:21.585264 32551 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:27:21.585285 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.585294 32551 net.cpp:165] Memory required for data: 206337500\nI0821 08:27:21.585305 32551 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:27:21.585319 32551 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:27:21.585330 32551 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:27:21.585345 32551 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:27:21.585362 32551 net.cpp:150] Setting up L1_b2_relu\nI0821 08:27:21.585376 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.585386 32551 net.cpp:165] Memory required for data: 214529500\nI0821 08:27:21.585395 32551 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:21.585410 32551 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:21.585420 32551 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:27:21.585440 32551 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:27:21.585460 32551 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:27:21.585535 32551 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:21.585561 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.585577 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.585587 32551 net.cpp:165] Memory required for data: 230913500\nI0821 08:27:21.585597 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:27:21.585616 32551 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:27:21.585628 32551 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:27:21.585645 32551 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:27:21.586009 32551 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:27:21.586030 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.586040 32551 net.cpp:165] Memory required for data: 239105500\nI0821 08:27:21.586057 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:27:21.586078 32551 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:27:21.586091 32551 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:27:21.586107 32551 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:27:21.586372 32551 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:27:21.586391 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.586401 32551 net.cpp:165] Memory required for data: 247297500\nI0821 08:27:21.586422 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:27:21.586439 32551 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:27:21.586452 32551 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:27:21.586472 32551 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.586555 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:27:21.586730 32551 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:27:21.586751 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.586761 32551 net.cpp:165] Memory required for data: 255489500\nI0821 08:27:21.586781 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:27:21.586796 32551 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:27:21.586807 32551 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:27:21.586822 32551 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.586839 32551 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:27:21.586863 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.586874 32551 net.cpp:165] Memory required for data: 263681500\nI0821 08:27:21.586884 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:27:21.586913 32551 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:27:21.586925 32551 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:27:21.586956 32551 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:27:21.587294 32551 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:27:21.587314 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.587324 32551 net.cpp:165] Memory required for data: 271873500\nI0821 08:27:21.587342 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:27:21.587370 32551 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:27:21.587383 32551 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:27:21.587400 32551 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:27:21.587661 32551 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:27:21.587680 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.587690 32551 net.cpp:165] Memory required for data: 280065500\nI0821 08:27:21.587713 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:27:21.587729 32551 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:27:21.587741 32551 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:27:21.587760 32551 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:27:21.587847 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:27:21.588027 32551 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:27:21.588047 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.588057 32551 net.cpp:165] Memory required for data: 288257500\nI0821 08:27:21.588074 32551 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:27:21.588091 32551 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:27:21.588102 32551 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:27:21.588114 32551 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:27:21.588135 32551 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:27:21.588187 32551 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:27:21.588210 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.588222 32551 net.cpp:165] Memory required for data: 296449500\nI0821 08:27:21.588232 32551 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:27:21.588245 32551 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:27:21.588256 32551 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:27:21.588270 32551 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:27:21.588287 32551 net.cpp:150] Setting up L1_b3_relu\nI0821 08:27:21.588302 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.588311 32551 net.cpp:165] Memory required for data: 304641500\nI0821 08:27:21.588320 32551 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:21.588340 32551 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:21.588351 32551 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:27:21.588366 32551 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:27:21.588385 32551 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:27:21.588457 32551 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:21.588480 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.588495 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.588503 32551 net.cpp:165] Memory required for data: 321025500\nI0821 08:27:21.588512 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:27:21.588532 32551 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:27:21.588544 32551 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:27:21.588572 32551 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:27:21.588949 32551 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:27:21.588970 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.588980 32551 net.cpp:165] Memory required for data: 329217500\nI0821 08:27:21.588999 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:27:21.589018 32551 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:27:21.589030 32551 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:27:21.589047 32551 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:27:21.589325 32551 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:27:21.589344 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.589354 32551 net.cpp:165] Memory required for data: 337409500\nI0821 08:27:21.589375 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:27:21.589396 32551 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:27:21.589408 32551 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:27:21.589424 32551 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.589509 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:27:21.589685 32551 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:27:21.589704 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.589715 32551 net.cpp:165] Memory required for data: 345601500\nI0821 08:27:21.589732 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:27:21.589748 32551 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:27:21.589759 32551 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:27:21.589777 32551 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.589797 32551 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:27:21.589812 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.589821 32551 net.cpp:165] Memory required for data: 353793500\nI0821 08:27:21.589831 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:27:21.589856 32551 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:27:21.589869 32551 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:27:21.589885 32551 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:27:21.590248 32551 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:27:21.590268 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.590278 32551 net.cpp:165] Memory required for data: 361985500\nI0821 08:27:21.590296 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:27:21.590317 32551 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:27:21.590328 32551 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:27:21.590345 32551 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:27:21.590615 32551 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:27:21.590636 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.590646 32551 net.cpp:165] Memory required for data: 370177500\nI0821 08:27:21.590667 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:27:21.590687 32551 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:27:21.590698 32551 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:27:21.590713 32551 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:27:21.590806 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:27:21.590991 32551 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:27:21.591011 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.591019 32551 net.cpp:165] Memory required for data: 378369500\nI0821 08:27:21.591037 32551 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:27:21.591055 32551 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:27:21.591066 32551 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:27:21.591078 32551 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:27:21.591099 32551 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:27:21.591166 32551 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:27:21.591184 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.591194 32551 net.cpp:165] Memory required for data: 386561500\nI0821 08:27:21.591205 32551 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:27:21.591219 32551 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:27:21.591230 32551 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:27:21.591248 32551 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:27:21.591267 32551 net.cpp:150] Setting up L1_b4_relu\nI0821 08:27:21.591282 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.591291 32551 net.cpp:165] Memory required for data: 394753500\nI0821 08:27:21.591301 32551 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:21.591315 32551 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:21.591326 32551 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:27:21.591341 32551 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:27:21.591359 32551 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:27:21.591439 32551 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:21.591459 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.591472 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.591482 32551 net.cpp:165] Memory required for data: 411137500\nI0821 08:27:21.591492 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:27:21.591512 32551 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:27:21.591524 32551 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:27:21.591547 32551 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:27:21.591908 32551 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:27:21.591928 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.591938 32551 net.cpp:165] Memory required for data: 419329500\nI0821 08:27:21.591982 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:27:21.592002 32551 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:27:21.592015 32551 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:27:21.592036 32551 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:27:21.592315 32551 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:27:21.592335 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.592345 32551 net.cpp:165] Memory required for data: 427521500\nI0821 08:27:21.592366 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:27:21.592387 32551 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:27:21.592398 32551 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:27:21.592414 32551 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.592499 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:27:21.592675 32551 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:27:21.592694 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.592705 32551 net.cpp:165] Memory required for data: 435713500\nI0821 08:27:21.592721 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:27:21.592737 32551 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:27:21.592749 32551 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:27:21.592768 32551 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.592788 32551 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:27:21.592803 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.592811 32551 net.cpp:165] Memory required for data: 443905500\nI0821 08:27:21.592821 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:27:21.592849 32551 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:27:21.592871 32551 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:27:21.592890 32551 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:27:21.593258 32551 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:27:21.593281 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.593291 32551 net.cpp:165] Memory required for data: 452097500\nI0821 08:27:21.593308 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:27:21.593330 32551 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:27:21.593343 32551 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:27:21.593358 32551 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:27:21.593631 32551 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:27:21.593649 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.593659 32551 net.cpp:165] Memory required for data: 460289500\nI0821 08:27:21.593680 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:27:21.593698 32551 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:27:21.593708 32551 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:27:21.593729 32551 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:27:21.593818 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:27:21.594003 32551 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:27:21.594023 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.594033 32551 net.cpp:165] Memory required for data: 468481500\nI0821 08:27:21.594050 32551 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:27:21.594068 32551 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:27:21.594079 32551 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:27:21.594091 32551 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:27:21.594111 32551 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:27:21.594163 32551 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:27:21.594187 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.594198 32551 net.cpp:165] Memory required for data: 476673500\nI0821 08:27:21.594208 32551 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:27:21.594223 32551 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:27:21.594234 32551 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:27:21.594249 32551 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:27:21.594267 32551 net.cpp:150] Setting up L1_b5_relu\nI0821 08:27:21.594281 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.594290 32551 net.cpp:165] Memory required for data: 484865500\nI0821 08:27:21.594300 32551 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:21.594319 32551 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:21.594331 32551 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:27:21.594347 32551 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:27:21.594367 32551 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:27:21.594441 32551 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:21.594463 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.594478 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.594488 32551 net.cpp:165] Memory required for data: 501249500\nI0821 08:27:21.594498 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:27:21.594518 32551 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:27:21.594532 32551 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:27:21.594548 32551 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:27:21.594907 32551 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:27:21.594926 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.594936 32551 net.cpp:165] Memory required for data: 509441500\nI0821 08:27:21.594971 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:27:21.594993 32551 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:27:21.595006 32551 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:27:21.595022 32551 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:27:21.595306 32551 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:27:21.595326 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.595336 32551 net.cpp:165] Memory required for data: 517633500\nI0821 08:27:21.595357 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:27:21.595374 32551 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:27:21.595386 32551 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:27:21.595407 32551 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.595490 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:27:21.595667 32551 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:27:21.595687 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.595696 32551 net.cpp:165] Memory required for data: 525825500\nI0821 08:27:21.595715 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:27:21.595729 32551 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:27:21.595741 32551 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:27:21.595760 32551 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.595780 32551 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:27:21.595794 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.595803 32551 net.cpp:165] Memory required for data: 534017500\nI0821 08:27:21.595813 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:27:21.595839 32551 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:27:21.595851 32551 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:27:21.595868 32551 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:27:21.596246 32551 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:27:21.596266 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.596276 32551 net.cpp:165] Memory required for data: 542209500\nI0821 08:27:21.596292 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:27:21.596309 32551 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:27:21.596325 32551 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:27:21.596343 32551 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:27:21.596616 32551 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:27:21.596634 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.596643 32551 net.cpp:165] Memory required for data: 550401500\nI0821 08:27:21.596664 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:27:21.596680 32551 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:27:21.596693 32551 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:27:21.596712 32551 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:27:21.596801 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:27:21.596992 32551 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:27:21.597015 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.597025 32551 net.cpp:165] Memory required for data: 558593500\nI0821 08:27:21.597043 32551 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:27:21.597071 32551 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:27:21.597084 32551 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:27:21.597098 32551 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:27:21.597115 32551 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:27:21.597173 32551 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:27:21.597192 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.597201 32551 net.cpp:165] Memory required for data: 566785500\nI0821 08:27:21.597213 32551 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:27:21.597239 32551 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:27:21.597250 32551 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:27:21.597270 32551 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:27:21.597290 32551 net.cpp:150] Setting up L1_b6_relu\nI0821 08:27:21.597306 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.597313 32551 net.cpp:165] Memory required for data: 574977500\nI0821 08:27:21.597323 32551 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:21.597337 32551 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:21.597348 32551 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:27:21.597363 32551 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:27:21.597383 32551 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:27:21.597460 32551 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:21.597477 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.597491 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.597501 32551 net.cpp:165] Memory required for data: 591361500\nI0821 08:27:21.597510 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:27:21.597532 32551 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:27:21.597543 32551 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:27:21.597565 32551 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:27:21.597932 32551 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:27:21.597959 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.597968 32551 net.cpp:165] Memory required for data: 599553500\nI0821 08:27:21.597987 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:27:21.598004 32551 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:27:21.598016 32551 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:27:21.598037 32551 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:27:21.598311 32551 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:27:21.598330 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.598340 32551 net.cpp:165] Memory required for data: 607745500\nI0821 08:27:21.598362 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:27:21.598382 32551 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:27:21.598394 32551 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:27:21.598409 32551 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.598495 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:27:21.598671 32551 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:27:21.598690 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.598701 32551 net.cpp:165] Memory required for data: 615937500\nI0821 08:27:21.598718 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:27:21.598738 32551 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:27:21.598750 32551 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:27:21.598765 32551 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.598784 32551 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:27:21.598798 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.598809 32551 net.cpp:165] Memory required for data: 624129500\nI0821 08:27:21.598819 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:27:21.598845 32551 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:27:21.598858 32551 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:27:21.598881 32551 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:27:21.599256 32551 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:27:21.599277 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.599285 32551 net.cpp:165] Memory required for data: 632321500\nI0821 08:27:21.599314 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:27:21.599333 32551 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:27:21.599345 32551 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:27:21.599366 32551 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:27:21.599650 32551 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:27:21.599669 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.599679 32551 net.cpp:165] Memory required for data: 640513500\nI0821 08:27:21.599701 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:27:21.599722 32551 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:27:21.599735 32551 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:27:21.599750 32551 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:27:21.599839 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:27:21.600023 32551 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:27:21.600044 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.600052 32551 net.cpp:165] Memory required for data: 648705500\nI0821 08:27:21.600071 32551 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:27:21.600088 32551 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:27:21.600100 32551 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:27:21.600116 32551 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:27:21.600134 32551 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:27:21.600191 32551 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:27:21.600210 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.600220 32551 net.cpp:165] Memory required for data: 656897500\nI0821 08:27:21.600229 32551 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:27:21.600244 32551 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:27:21.600255 32551 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:27:21.600275 32551 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:27:21.600293 32551 net.cpp:150] Setting up L1_b7_relu\nI0821 08:27:21.600308 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.600317 32551 net.cpp:165] Memory required for data: 665089500\nI0821 08:27:21.600327 32551 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:21.600342 32551 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:21.600353 32551 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:27:21.600366 32551 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:27:21.600385 32551 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:27:21.600461 32551 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:21.600478 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.600492 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.600500 32551 net.cpp:165] Memory required for data: 681473500\nI0821 08:27:21.600510 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:27:21.600531 32551 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:27:21.600544 32551 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:27:21.600566 32551 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:27:21.600934 32551 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:27:21.600960 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.600970 32551 net.cpp:165] Memory required for data: 689665500\nI0821 08:27:21.600988 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:27:21.601006 32551 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:27:21.601016 32551 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:27:21.601039 32551 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:27:21.601359 32551 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:27:21.601379 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.601389 32551 net.cpp:165] Memory required for data: 697857500\nI0821 08:27:21.601411 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:27:21.601433 32551 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:27:21.601446 32551 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:27:21.601461 32551 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.601544 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:27:21.601725 32551 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:27:21.601744 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.601754 32551 net.cpp:165] Memory required for data: 706049500\nI0821 08:27:21.601773 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:27:21.601791 32551 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:27:21.601804 32551 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:27:21.601819 32551 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.601837 32551 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:27:21.601851 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.601861 32551 net.cpp:165] Memory required for data: 714241500\nI0821 08:27:21.601871 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:27:21.601897 32551 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:27:21.601908 32551 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:27:21.601929 32551 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:27:21.602296 32551 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:27:21.602316 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.602326 32551 net.cpp:165] Memory required for data: 722433500\nI0821 08:27:21.602344 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:27:21.602361 32551 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:27:21.602372 32551 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:27:21.602388 32551 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:27:21.602670 32551 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:27:21.602689 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.602699 32551 net.cpp:165] Memory required for data: 730625500\nI0821 08:27:21.602720 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:27:21.602742 32551 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:27:21.602754 32551 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:27:21.602771 32551 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:27:21.602861 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:27:21.603047 32551 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:27:21.603067 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.603076 32551 net.cpp:165] Memory required for data: 738817500\nI0821 08:27:21.603094 32551 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:27:21.603111 32551 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:27:21.603122 32551 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:27:21.603137 32551 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:27:21.603157 32551 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:27:21.603215 32551 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:27:21.603233 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.603243 32551 net.cpp:165] Memory required for data: 747009500\nI0821 08:27:21.603252 32551 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:27:21.603267 32551 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:27:21.603278 32551 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:27:21.603298 32551 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:27:21.603317 32551 net.cpp:150] Setting up L1_b8_relu\nI0821 08:27:21.603341 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.603351 32551 net.cpp:165] Memory required for data: 755201500\nI0821 08:27:21.603361 32551 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:21.603376 32551 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:21.603386 32551 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:27:21.603402 32551 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:27:21.603420 32551 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:27:21.603502 32551 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:21.603521 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.603534 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.603543 32551 net.cpp:165] Memory required for data: 771585500\nI0821 08:27:21.603554 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:27:21.603574 32551 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:27:21.603586 32551 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:27:21.603610 32551 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:27:21.603992 32551 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:27:21.604013 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.604022 32551 net.cpp:165] Memory required for data: 779777500\nI0821 08:27:21.604040 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:27:21.604065 32551 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:27:21.604079 32551 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:27:21.604100 32551 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:27:21.604377 32551 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:27:21.604395 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.604405 32551 net.cpp:165] Memory required for data: 787969500\nI0821 08:27:21.604426 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:27:21.604444 32551 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:27:21.604455 32551 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:27:21.604470 32551 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.604560 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:27:21.604742 32551 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:27:21.604760 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.604769 32551 net.cpp:165] Memory required for data: 796161500\nI0821 08:27:21.604787 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:27:21.604804 32551 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:27:21.604815 32551 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:27:21.604832 32551 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.604853 32551 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:27:21.604867 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.604876 32551 net.cpp:165] Memory required for data: 804353500\nI0821 08:27:21.604887 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:27:21.604913 32551 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:27:21.604925 32551 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:27:21.604951 32551 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:27:21.605324 32551 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:27:21.605345 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.605355 32551 net.cpp:165] Memory required for data: 812545500\nI0821 08:27:21.605372 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:27:21.605393 32551 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:27:21.605406 32551 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:27:21.605423 32551 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:27:21.605720 32551 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:27:21.605742 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.605754 32551 net.cpp:165] Memory required for data: 820737500\nI0821 08:27:21.605805 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:27:21.605823 32551 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:27:21.605835 32551 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:27:21.605850 32551 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:27:21.605945 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:27:21.606133 32551 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:27:21.606153 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.606163 32551 net.cpp:165] Memory required for data: 828929500\nI0821 08:27:21.606181 32551 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:27:21.606202 32551 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:27:21.606215 32551 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:27:21.606228 32551 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:27:21.606245 32551 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:27:21.606297 32551 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:27:21.606314 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.606324 32551 net.cpp:165] Memory required for data: 837121500\nI0821 08:27:21.606334 32551 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:27:21.606348 32551 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:27:21.606360 32551 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:27:21.606380 32551 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:27:21.606400 32551 net.cpp:150] Setting up L1_b9_relu\nI0821 08:27:21.606415 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.606423 32551 net.cpp:165] Memory required for data: 845313500\nI0821 08:27:21.606433 32551 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:21.606447 32551 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:21.606458 32551 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:27:21.606480 32551 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:27:21.606501 32551 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:27:21.606581 32551 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:21.606601 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.606616 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.606626 32551 net.cpp:165] Memory required for data: 861697500\nI0821 08:27:21.606636 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:27:21.606654 32551 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:27:21.606667 32551 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:27:21.606689 32551 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:27:21.607067 32551 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:27:21.607089 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.607098 32551 net.cpp:165] Memory required for data: 863745500\nI0821 08:27:21.607117 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:27:21.607138 32551 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:27:21.607151 32551 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:27:21.607167 32551 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:27:21.607445 32551 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:27:21.607463 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.607473 32551 net.cpp:165] Memory required for data: 865793500\nI0821 08:27:21.607496 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:27:21.607519 32551 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:27:21.607540 32551 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:27:21.607558 32551 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.607650 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:27:21.607833 32551 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:27:21.607853 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.607862 32551 net.cpp:165] Memory required for data: 867841500\nI0821 08:27:21.607882 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:27:21.607902 32551 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:27:21.607913 32551 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:27:21.607931 32551 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.607959 32551 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:27:21.607975 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.607985 32551 net.cpp:165] Memory required for data: 869889500\nI0821 08:27:21.607995 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:27:21.608014 32551 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:27:21.608027 32551 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:27:21.608047 32551 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:27:21.608408 32551 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:27:21.608428 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.608438 32551 net.cpp:165] Memory required for data: 871937500\nI0821 08:27:21.608455 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:27:21.608471 32551 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:27:21.608484 32551 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:27:21.608507 32551 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:27:21.608788 32551 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:27:21.608808 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.608817 32551 net.cpp:165] Memory required for data: 873985500\nI0821 08:27:21.608839 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:27:21.608860 32551 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:27:21.608871 32551 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:27:21.608887 32551 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:27:21.608981 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:27:21.609166 32551 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:27:21.609185 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.609194 32551 net.cpp:165] Memory required for data: 876033500\nI0821 08:27:21.609213 32551 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:27:21.609237 32551 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:27:21.609251 32551 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:27:21.609267 32551 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:27:21.609375 32551 net.cpp:150] Setting up L2_b1_pool\nI0821 08:27:21.609395 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.609405 32551 net.cpp:165] Memory required for data: 878081500\nI0821 08:27:21.609416 32551 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:27:21.609431 32551 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:27:21.609442 32551 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:27:21.609455 32551 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:27:21.609477 32551 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:27:21.609531 32551 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:27:21.609552 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.609562 32551 net.cpp:165] Memory required for data: 880129500\nI0821 08:27:21.609572 32551 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:27:21.609587 32551 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:27:21.609599 32551 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:27:21.609622 32551 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:27:21.609642 32551 net.cpp:150] Setting up L2_b1_relu\nI0821 08:27:21.609658 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.609666 32551 net.cpp:165] Memory required for data: 882177500\nI0821 08:27:21.609675 32551 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:27:21.609750 32551 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:27:21.609769 32551 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:27:21.612113 32551 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:27:21.612136 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.612146 32551 net.cpp:165] Memory required for data: 884225500\nI0821 08:27:21.612157 32551 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:27:21.612174 32551 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:27:21.612186 32551 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:27:21.612200 32551 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:27:21.612221 32551 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:27:21.612320 32551 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:27:21.612346 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.612356 32551 net.cpp:165] Memory required for data: 888321500\nI0821 08:27:21.612366 32551 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:21.612381 32551 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:21.612392 32551 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:27:21.612407 32551 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:27:21.612427 32551 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:27:21.612515 32551 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:21.612537 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.612551 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.612560 32551 net.cpp:165] Memory required for data: 896513500\nI0821 08:27:21.612571 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:27:21.612596 32551 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:27:21.612609 32551 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:27:21.612628 32551 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:27:21.614122 32551 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:27:21.614145 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.614154 32551 net.cpp:165] Memory required for data: 900609500\nI0821 08:27:21.614172 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:27:21.614195 32551 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:27:21.614207 32551 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:27:21.614224 32551 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:27:21.614516 32551 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:27:21.614538 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.614549 32551 net.cpp:165] Memory required for data: 904705500\nI0821 08:27:21.614573 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:27:21.614589 32551 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:27:21.614601 32551 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:27:21.614617 32551 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.614704 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:27:21.614893 32551 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:27:21.614912 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.614922 32551 net.cpp:165] Memory required for data: 908801500\nI0821 08:27:21.614946 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:27:21.614964 32551 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:27:21.614976 32551 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:27:21.615005 32551 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.615025 32551 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:27:21.615041 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.615049 32551 net.cpp:165] Memory required for data: 912897500\nI0821 08:27:21.615058 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:27:21.615079 32551 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:27:21.615092 32551 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:27:21.615113 32551 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:27:21.615610 32551 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:27:21.615630 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.615640 32551 net.cpp:165] Memory required for data: 916993500\nI0821 08:27:21.615658 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:27:21.615675 32551 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:27:21.615687 32551 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:27:21.615708 32551 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:27:21.616004 32551 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:27:21.616025 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.616034 32551 net.cpp:165] Memory required for data: 921089500\nI0821 08:27:21.616056 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:27:21.616077 32551 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:27:21.616089 32551 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:27:21.616106 32551 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:27:21.616192 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:27:21.616377 32551 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:27:21.616396 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.616406 32551 net.cpp:165] Memory required for data: 925185500\nI0821 08:27:21.616423 32551 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:27:21.616446 32551 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:27:21.616457 32551 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:27:21.616470 32551 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:27:21.616487 32551 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:27:21.616539 32551 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:27:21.616556 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.616566 32551 net.cpp:165] Memory required for data: 929281500\nI0821 08:27:21.616576 32551 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:27:21.616590 32551 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:27:21.616602 32551 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:27:21.616621 32551 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:27:21.616641 32551 net.cpp:150] Setting up L2_b2_relu\nI0821 08:27:21.616655 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.616664 32551 net.cpp:165] Memory required for data: 933377500\nI0821 08:27:21.616674 32551 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:21.616688 32551 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:21.616700 32551 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:27:21.616715 32551 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:27:21.616734 32551 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:27:21.616818 32551 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:21.616837 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.616850 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.616859 32551 net.cpp:165] Memory required for data: 941569500\nI0821 08:27:21.616870 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:27:21.616899 32551 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:27:21.616914 32551 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:27:21.616935 32551 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:27:21.617446 32551 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:27:21.617466 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.617476 32551 net.cpp:165] Memory required for data: 945665500\nI0821 08:27:21.617496 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:27:21.617516 32551 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:27:21.617528 32551 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:27:21.617545 32551 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:27:21.617995 32551 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:27:21.618016 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.618026 32551 net.cpp:165] Memory required for data: 949761500\nI0821 08:27:21.618047 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:27:21.618068 32551 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:27:21.618080 32551 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:27:21.618096 32551 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.618186 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:27:21.618367 32551 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:27:21.618386 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.618396 32551 net.cpp:165] Memory required for data: 953857500\nI0821 08:27:21.618413 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:27:21.618433 32551 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:27:21.618445 32551 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:27:21.618459 32551 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.618484 32551 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:27:21.618499 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.618507 32551 net.cpp:165] Memory required for data: 957953500\nI0821 08:27:21.618517 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:27:21.618537 32551 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:27:21.618551 32551 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:27:21.618573 32551 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:27:21.619091 32551 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:27:21.619112 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.619122 32551 net.cpp:165] Memory required for data: 962049500\nI0821 08:27:21.619140 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:27:21.619158 32551 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:27:21.619169 32551 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:27:21.619190 32551 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:27:21.619477 32551 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:27:21.619496 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.619505 32551 net.cpp:165] Memory required for data: 966145500\nI0821 08:27:21.619526 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:27:21.619547 32551 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:27:21.619560 32551 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:27:21.619576 32551 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:27:21.619663 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:27:21.619853 32551 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:27:21.619874 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.619884 32551 net.cpp:165] Memory required for data: 970241500\nI0821 08:27:21.619901 32551 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:27:21.619923 32551 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:27:21.619935 32551 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:27:21.619966 32551 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:27:21.619982 32551 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:27:21.620031 32551 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:27:21.620048 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.620055 32551 net.cpp:165] Memory required for data: 974337500\nI0821 08:27:21.620064 32551 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:27:21.620093 32551 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:27:21.620105 32551 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:27:21.620118 32551 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:27:21.620133 32551 net.cpp:150] Setting up L2_b3_relu\nI0821 08:27:21.620146 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.620156 32551 net.cpp:165] Memory required for data: 978433500\nI0821 08:27:21.620165 32551 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:21.620183 32551 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:21.620193 32551 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:27:21.620206 32551 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:27:21.620223 32551 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:27:21.620287 32551 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:21.620307 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.620321 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.620329 32551 net.cpp:165] Memory required for data: 986625500\nI0821 08:27:21.620339 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:27:21.620359 32551 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:27:21.620373 32551 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:27:21.620390 32551 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:27:21.620909 32551 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:27:21.620929 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.620944 32551 net.cpp:165] Memory required for data: 990721500\nI0821 08:27:21.620965 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:27:21.620985 32551 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:27:21.620998 32551 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:27:21.621016 32551 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:27:21.621299 32551 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:27:21.621318 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.621330 32551 net.cpp:165] Memory required for data: 994817500\nI0821 08:27:21.621351 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:27:21.621367 32551 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:27:21.621378 32551 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:27:21.621398 32551 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.621487 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:27:21.621670 32551 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:27:21.621690 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.621698 32551 net.cpp:165] Memory required for data: 998913500\nI0821 08:27:21.621717 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:27:21.621733 32551 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:27:21.621744 32551 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:27:21.621762 32551 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.621783 32551 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:27:21.621798 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.621807 32551 net.cpp:165] Memory required for data: 1003009500\nI0821 08:27:21.621817 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:27:21.621851 32551 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:27:21.621865 32551 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:27:21.621881 32551 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:27:21.622395 32551 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:27:21.622414 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.622423 32551 net.cpp:165] Memory required for data: 1007105500\nI0821 08:27:21.622442 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:27:21.622467 32551 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:27:21.622479 32551 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:27:21.622495 32551 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:27:21.622778 32551 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:27:21.622797 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.622807 32551 net.cpp:165] Memory required for data: 1011201500\nI0821 08:27:21.622828 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:27:21.622845 32551 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:27:21.622856 32551 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:27:21.622876 32551 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:27:21.622972 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:27:21.623155 32551 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:27:21.623174 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.623184 32551 net.cpp:165] Memory required for data: 1015297500\nI0821 08:27:21.623203 32551 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:27:21.623219 32551 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:27:21.623230 32551 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:27:21.623245 32551 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:27:21.623268 32551 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:27:21.623317 32551 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:27:21.623335 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.623344 32551 net.cpp:165] Memory required for data: 1019393500\nI0821 08:27:21.623354 32551 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:27:21.623374 32551 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:27:21.623386 32551 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:27:21.623401 32551 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:27:21.623420 32551 net.cpp:150] Setting up L2_b4_relu\nI0821 08:27:21.623435 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.623442 32551 net.cpp:165] Memory required for data: 1023489500\nI0821 08:27:21.623452 32551 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:21.623466 32551 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:21.623477 32551 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:27:21.623497 32551 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:27:21.623517 32551 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:27:21.623592 32551 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:21.623615 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.623630 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.623638 32551 net.cpp:165] Memory required for data: 1031681500\nI0821 08:27:21.623649 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:27:21.623668 32551 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:27:21.623680 32551 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:27:21.623699 32551 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:27:21.624217 32551 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:27:21.624244 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.624255 32551 net.cpp:165] Memory required for data: 1035777500\nI0821 08:27:21.624274 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:27:21.624295 32551 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:27:21.624307 32551 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:27:21.624323 32551 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:27:21.624609 32551 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:27:21.624627 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.624636 32551 net.cpp:165] Memory required for data: 1039873500\nI0821 08:27:21.624658 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:27:21.624675 32551 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:27:21.624686 32551 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:27:21.624706 32551 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.624797 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:27:21.624994 32551 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:27:21.625015 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.625025 32551 net.cpp:165] Memory required for data: 1043969500\nI0821 08:27:21.625043 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:27:21.625059 32551 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:27:21.625071 32551 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:27:21.625089 32551 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.625110 32551 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:27:21.625124 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.625135 32551 net.cpp:165] Memory required for data: 1048065500\nI0821 08:27:21.625145 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:27:21.625170 32551 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:27:21.625183 32551 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:27:21.625200 32551 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:27:21.625715 32551 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:27:21.625735 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.625746 32551 net.cpp:165] Memory required for data: 1052161500\nI0821 08:27:21.625763 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:27:21.625789 32551 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:27:21.625802 32551 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:27:21.625819 32551 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:27:21.626104 32551 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:27:21.626123 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.626133 32551 net.cpp:165] Memory required for data: 1056257500\nI0821 08:27:21.626155 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:27:21.626173 32551 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:27:21.626184 32551 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:27:21.626199 32551 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:27:21.626291 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:27:21.626473 32551 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:27:21.626497 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.626507 32551 net.cpp:165] Memory required for data: 1060353500\nI0821 08:27:21.626524 32551 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:27:21.626541 32551 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:27:21.626552 32551 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:27:21.626565 32551 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:27:21.626581 32551 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:27:21.626631 32551 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:27:21.626649 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.626668 32551 net.cpp:165] Memory required for data: 1064449500\nI0821 08:27:21.626680 32551 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:27:21.626694 32551 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:27:21.626706 32551 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:27:21.626725 32551 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:27:21.626744 32551 net.cpp:150] Setting up L2_b5_relu\nI0821 08:27:21.626760 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.626768 32551 net.cpp:165] Memory required for data: 1068545500\nI0821 08:27:21.626778 32551 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:21.626792 32551 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:21.626802 32551 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:27:21.626822 32551 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:27:21.626843 32551 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:27:21.626924 32551 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:21.626950 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.626965 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.626973 32551 net.cpp:165] Memory required for data: 1076737500\nI0821 08:27:21.627068 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:27:21.627099 32551 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:27:21.627115 32551 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:27:21.627133 32551 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:27:21.627789 32551 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:27:21.627810 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.627820 32551 net.cpp:165] Memory required for data: 1080833500\nI0821 08:27:21.627837 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:27:21.627859 32551 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:27:21.627872 32551 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:27:21.627888 32551 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:27:21.628186 32551 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:27:21.628206 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.628216 32551 net.cpp:165] Memory required for data: 1084929500\nI0821 08:27:21.628237 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:27:21.628253 32551 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:27:21.628265 32551 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:27:21.628291 32551 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.628387 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:27:21.628574 32551 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:27:21.628593 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.628603 32551 net.cpp:165] Memory required for data: 1089025500\nI0821 08:27:21.628623 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:27:21.628638 32551 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:27:21.628649 32551 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:27:21.628664 32551 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.628687 32551 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:27:21.628702 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.628712 32551 net.cpp:165] Memory required for data: 1093121500\nI0821 08:27:21.628721 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:27:21.628742 32551 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:27:21.628759 32551 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:27:21.628777 32551 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:27:21.629298 32551 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:27:21.629326 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.629338 32551 net.cpp:165] Memory required for data: 1097217500\nI0821 08:27:21.629355 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:27:21.629376 32551 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:27:21.629390 32551 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:27:21.629410 32551 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:27:21.629686 32551 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:27:21.629706 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.629716 32551 net.cpp:165] Memory required for data: 1101313500\nI0821 08:27:21.629737 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:27:21.629755 32551 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:27:21.629765 32551 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:27:21.629781 32551 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:27:21.629873 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:27:21.630060 32551 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:27:21.630084 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.630095 32551 net.cpp:165] Memory required for data: 1105409500\nI0821 08:27:21.630112 32551 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:27:21.630129 32551 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:27:21.630141 32551 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:27:21.630156 32551 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:27:21.630172 32551 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:27:21.630224 32551 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:27:21.630242 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.630252 32551 net.cpp:165] Memory required for data: 1109505500\nI0821 08:27:21.630262 32551 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:27:21.630275 32551 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:27:21.630287 32551 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:27:21.630306 32551 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:27:21.630326 32551 net.cpp:150] Setting up L2_b6_relu\nI0821 08:27:21.630340 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.630349 32551 net.cpp:165] Memory required for data: 1113601500\nI0821 08:27:21.630360 32551 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:21.630374 32551 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:21.630386 32551 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:27:21.630410 32551 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:27:21.630430 32551 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:27:21.630509 32551 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:21.630532 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.630545 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.630555 32551 net.cpp:165] Memory required for data: 1121793500\nI0821 08:27:21.630565 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:27:21.630590 32551 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:27:21.630605 32551 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:27:21.630623 32551 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:27:21.631155 32551 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:27:21.631175 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.631184 32551 net.cpp:165] Memory required for data: 1125889500\nI0821 08:27:21.631202 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:27:21.631227 32551 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:27:21.631247 32551 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:27:21.631265 32551 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:27:21.631562 32551 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:27:21.631582 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.631592 32551 net.cpp:165] Memory required for data: 1129985500\nI0821 08:27:21.631613 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:27:21.631629 32551 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:27:21.631640 32551 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:27:21.631655 32551 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.631752 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:27:21.631937 32551 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:27:21.631965 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.631975 32551 net.cpp:165] Memory required for data: 1134081500\nI0821 08:27:21.631994 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:27:21.632009 32551 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:27:21.632021 32551 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:27:21.632035 32551 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.632055 32551 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:27:21.632068 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.632078 32551 net.cpp:165] Memory required for data: 1138177500\nI0821 08:27:21.632089 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:27:21.632117 32551 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:27:21.632129 32551 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:27:21.632150 32551 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:27:21.632724 32551 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:27:21.632745 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.632755 32551 net.cpp:165] Memory required for data: 1142273500\nI0821 08:27:21.632772 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:27:21.632794 32551 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:27:21.632807 32551 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:27:21.632828 32551 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:27:21.633123 32551 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:27:21.633142 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.633152 32551 net.cpp:165] Memory required for data: 1146369500\nI0821 08:27:21.633173 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:27:21.633190 32551 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:27:21.633203 32551 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:27:21.633218 32551 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:27:21.633312 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:27:21.633499 32551 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:27:21.633518 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.633527 32551 net.cpp:165] Memory required for data: 1150465500\nI0821 08:27:21.633546 32551 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:27:21.633568 32551 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:27:21.633579 32551 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:27:21.633594 32551 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:27:21.633610 32551 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:27:21.633656 32551 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:27:21.633674 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.633683 32551 net.cpp:165] Memory required for data: 1154561500\nI0821 08:27:21.633694 32551 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:27:21.633713 32551 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:27:21.633726 32551 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:27:21.633754 32551 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:27:21.633774 32551 net.cpp:150] Setting up L2_b7_relu\nI0821 08:27:21.633788 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.633797 32551 net.cpp:165] Memory required for data: 1158657500\nI0821 08:27:21.633807 32551 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:21.633821 32551 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:21.633831 32551 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:27:21.633846 32551 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:27:21.633867 32551 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:27:21.633962 32551 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:21.633982 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.633996 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.634006 32551 net.cpp:165] Memory required for data: 1166849500\nI0821 08:27:21.634016 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:27:21.634042 32551 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:27:21.634055 32551 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:27:21.634073 32551 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:27:21.634594 32551 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:27:21.634614 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.634624 32551 net.cpp:165] Memory required for data: 1170945500\nI0821 08:27:21.634641 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:27:21.634665 32551 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:27:21.634678 32551 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:27:21.634699 32551 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:27:21.634995 32551 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:27:21.635015 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.635025 32551 net.cpp:165] Memory required for data: 1175041500\nI0821 08:27:21.635046 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:27:21.635063 32551 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:27:21.635074 32551 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:27:21.635090 32551 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.635182 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:27:21.635372 32551 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:27:21.635396 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.635406 32551 net.cpp:165] Memory required for data: 1179137500\nI0821 08:27:21.635424 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:27:21.635439 32551 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:27:21.635450 32551 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:27:21.635465 32551 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.635484 32551 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:27:21.635499 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.635509 32551 net.cpp:165] Memory required for data: 1183233500\nI0821 08:27:21.635519 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:27:21.635543 32551 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:27:21.635556 32551 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:27:21.635578 32551 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:27:21.636101 32551 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:27:21.636121 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.636132 32551 net.cpp:165] Memory required for data: 1187329500\nI0821 08:27:21.636149 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:27:21.636171 32551 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:27:21.636193 32551 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:27:21.636215 32551 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:27:21.636514 32551 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:27:21.636533 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.636543 32551 net.cpp:165] Memory required for data: 1191425500\nI0821 08:27:21.636564 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:27:21.636581 32551 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:27:21.636593 32551 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:27:21.636610 32551 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:27:21.636703 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:27:21.636888 32551 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:27:21.636909 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.636917 32551 net.cpp:165] Memory required for data: 1195521500\nI0821 08:27:21.636935 32551 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:27:21.636963 32551 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:27:21.636977 32551 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:27:21.636991 32551 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:27:21.637007 32551 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:27:21.637055 32551 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:27:21.637073 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.637084 32551 net.cpp:165] Memory required for data: 1199617500\nI0821 08:27:21.637094 32551 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:27:21.637112 32551 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:27:21.637125 32551 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:27:21.637140 32551 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:27:21.637158 32551 net.cpp:150] Setting up L2_b8_relu\nI0821 08:27:21.637173 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.637181 32551 net.cpp:165] Memory required for data: 1203713500\nI0821 08:27:21.637192 32551 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:21.637205 32551 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:21.637217 32551 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:27:21.637230 32551 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:27:21.637269 32551 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:27:21.637357 32551 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:21.637377 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.637390 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.637400 32551 net.cpp:165] Memory required for data: 1211905500\nI0821 08:27:21.637410 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:27:21.637441 32551 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:27:21.637456 32551 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:27:21.637480 32551 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:27:21.638005 32551 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:27:21.638025 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.638036 32551 net.cpp:165] Memory required for data: 1216001500\nI0821 08:27:21.638053 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:27:21.638074 32551 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:27:21.638087 32551 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:27:21.638108 32551 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:27:21.638396 32551 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:27:21.638414 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.638433 32551 net.cpp:165] Memory required for data: 1220097500\nI0821 08:27:21.638456 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:27:21.638473 32551 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:27:21.638486 32551 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:27:21.638500 32551 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.638599 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:27:21.638787 32551 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:27:21.638805 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.638815 32551 net.cpp:165] Memory required for data: 1224193500\nI0821 08:27:21.638833 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:27:21.638849 32551 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:27:21.638860 32551 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:27:21.638880 32551 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.638900 32551 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:27:21.638913 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.638922 32551 net.cpp:165] Memory required for data: 1228289500\nI0821 08:27:21.638932 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:27:21.638965 32551 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:27:21.638979 32551 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:27:21.638998 32551 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:27:21.639513 32551 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:27:21.639533 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.639542 32551 net.cpp:165] Memory required for data: 1232385500\nI0821 08:27:21.639560 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:27:21.639582 32551 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:27:21.639595 32551 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:27:21.639611 32551 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:27:21.639906 32551 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:27:21.639928 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.639945 32551 net.cpp:165] Memory required for data: 1236481500\nI0821 08:27:21.640015 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:27:21.640041 32551 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:27:21.640055 32551 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:27:21.640071 32551 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:27:21.640166 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:27:21.640350 32551 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:27:21.640369 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.640378 32551 net.cpp:165] Memory required for data: 1240577500\nI0821 08:27:21.640396 32551 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:27:21.640413 32551 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:27:21.640425 32551 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:27:21.640439 32551 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:27:21.640463 32551 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:27:21.640511 32551 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:27:21.640528 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.640537 32551 net.cpp:165] Memory required for data: 1244673500\nI0821 08:27:21.640547 32551 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:27:21.640566 32551 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:27:21.640578 32551 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:27:21.640594 32551 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:27:21.640612 32551 net.cpp:150] Setting up L2_b9_relu\nI0821 08:27:21.640627 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.640636 32551 net.cpp:165] Memory required for data: 1248769500\nI0821 08:27:21.640653 32551 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:21.640674 32551 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:21.640686 32551 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:27:21.640703 32551 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:27:21.640723 32551 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:27:21.640812 32551 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:21.640836 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.640851 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.640861 32551 net.cpp:165] Memory required for data: 1256961500\nI0821 08:27:21.640872 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:27:21.640892 32551 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:27:21.640904 32551 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:27:21.640924 32551 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:27:21.641453 32551 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:27:21.641474 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.641482 32551 net.cpp:165] Memory required for data: 1257985500\nI0821 08:27:21.641500 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:27:21.641522 32551 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:27:21.641535 32551 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:27:21.641551 32551 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:27:21.641854 32551 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:27:21.641872 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.641882 32551 net.cpp:165] Memory required for data: 1259009500\nI0821 08:27:21.641904 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:27:21.641929 32551 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:27:21.641948 32551 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:27:21.641971 32551 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.642066 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:27:21.642262 32551 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:27:21.642282 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.642290 32551 net.cpp:165] Memory required for data: 1260033500\nI0821 08:27:21.642309 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:27:21.642325 32551 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:27:21.642338 32551 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:27:21.642354 32551 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.642375 32551 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:27:21.642390 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.642398 32551 net.cpp:165] Memory required for data: 1261057500\nI0821 08:27:21.642410 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:27:21.642429 32551 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:27:21.642441 32551 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:27:21.642462 32551 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:27:21.642992 32551 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:27:21.643013 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.643023 32551 net.cpp:165] Memory required for data: 1262081500\nI0821 08:27:21.643040 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:27:21.643061 32551 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:27:21.643074 32551 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:27:21.643090 32551 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:27:21.643391 32551 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:27:21.643410 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.643427 32551 net.cpp:165] Memory required for data: 1263105500\nI0821 08:27:21.643450 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:27:21.643467 32551 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:27:21.643481 32551 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:27:21.643494 32551 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:27:21.643594 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:27:21.643788 32551 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:27:21.643807 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.643816 32551 net.cpp:165] Memory required for data: 1264129500\nI0821 08:27:21.643836 32551 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:27:21.643851 32551 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:27:21.643863 32551 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:27:21.643884 32551 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:27:21.643949 32551 net.cpp:150] Setting up L3_b1_pool\nI0821 08:27:21.643980 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.643990 32551 net.cpp:165] Memory required for data: 1265153500\nI0821 08:27:21.644001 32551 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:27:21.644017 32551 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:27:21.644029 32551 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:27:21.644042 32551 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:27:21.644057 32551 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:27:21.644117 32551 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:27:21.644135 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.644145 32551 net.cpp:165] Memory required for data: 1266177500\nI0821 08:27:21.644155 32551 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:27:21.644170 32551 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:27:21.644182 32551 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:27:21.644196 32551 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:27:21.644214 32551 net.cpp:150] Setting up L3_b1_relu\nI0821 08:27:21.644229 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.644238 32551 net.cpp:165] Memory required for data: 1267201500\nI0821 08:27:21.644248 32551 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:27:21.644269 32551 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:27:21.644284 32551 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:27:21.645547 32551 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:27:21.645575 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.645586 32551 net.cpp:165] Memory required for data: 1268225500\nI0821 08:27:21.645596 32551 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:27:21.645612 32551 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:27:21.645624 32551 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:27:21.645638 32551 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:27:21.645654 32551 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:27:21.645717 32551 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:27:21.645740 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.645750 32551 net.cpp:165] Memory required for data: 1270273500\nI0821 08:27:21.645759 32551 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:21.645778 32551 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:21.645789 32551 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:27:21.645805 32551 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:27:21.645825 32551 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:27:21.645917 32551 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:21.645938 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.645959 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.645979 32551 net.cpp:165] Memory required for data: 1274369500\nI0821 08:27:21.645990 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:27:21.646015 32551 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:27:21.646029 32551 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:27:21.646049 32551 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:27:21.648105 32551 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:27:21.648133 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.648142 32551 net.cpp:165] Memory required for data: 1276417500\nI0821 08:27:21.648161 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:27:21.648180 32551 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:27:21.648191 32551 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:27:21.648213 32551 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:27:21.648516 32551 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:27:21.648540 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.648550 32551 net.cpp:165] Memory required for data: 1278465500\nI0821 08:27:21.648572 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:27:21.648589 32551 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:27:21.648602 32551 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:27:21.648618 32551 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.648707 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:27:21.648901 32551 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:27:21.648921 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.648931 32551 net.cpp:165] Memory required for data: 1280513500\nI0821 08:27:21.648957 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:27:21.648972 32551 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:27:21.648984 32551 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:27:21.649004 32551 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.649024 32551 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:27:21.649039 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.649049 32551 net.cpp:165] Memory required for data: 1282561500\nI0821 08:27:21.649058 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:27:21.649085 32551 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:27:21.649097 32551 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:27:21.649114 32551 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:27:21.650192 32551 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:27:21.650213 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.650223 32551 net.cpp:165] Memory required for data: 1284609500\nI0821 08:27:21.650241 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:27:21.650264 32551 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:27:21.650275 32551 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:27:21.650296 32551 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:27:21.650593 32551 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:27:21.650612 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.650622 32551 net.cpp:165] Memory required for data: 1286657500\nI0821 08:27:21.650645 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:27:21.650661 32551 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:27:21.650673 32551 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:27:21.650693 32551 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:27:21.650787 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:27:21.650991 32551 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:27:21.651010 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.651021 32551 net.cpp:165] Memory required for data: 1288705500\nI0821 08:27:21.651038 32551 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:27:21.651067 32551 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:27:21.651079 32551 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:27:21.651093 32551 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:27:21.651114 32551 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:27:21.651177 32551 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:27:21.651199 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.651209 32551 net.cpp:165] Memory required for data: 1290753500\nI0821 08:27:21.651221 32551 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:27:21.651235 32551 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:27:21.651247 32551 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:27:21.651262 32551 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:27:21.651280 32551 net.cpp:150] Setting up L3_b2_relu\nI0821 08:27:21.651294 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.651304 32551 net.cpp:165] Memory required for data: 1292801500\nI0821 08:27:21.651314 32551 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:21.651332 32551 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:21.651345 32551 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:27:21.651360 32551 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:27:21.651381 32551 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:27:21.651470 32551 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:21.651489 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.651501 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.651511 32551 net.cpp:165] Memory required for data: 1296897500\nI0821 08:27:21.651521 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:27:21.651542 32551 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:27:21.651556 32551 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:27:21.651578 32551 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:27:21.652668 32551 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:27:21.652688 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.652698 32551 net.cpp:165] Memory required for data: 1298945500\nI0821 08:27:21.652716 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:27:21.652732 32551 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:27:21.652745 32551 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:27:21.652766 32551 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:27:21.653077 32551 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:27:21.653102 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.653115 32551 net.cpp:165] Memory required for data: 1300993500\nI0821 08:27:21.653136 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:27:21.653152 32551 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:27:21.653164 32551 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:27:21.653180 32551 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.653270 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:27:21.653463 32551 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:27:21.653483 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.653492 32551 net.cpp:165] Memory required for data: 1303041500\nI0821 08:27:21.653511 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:27:21.653530 32551 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:27:21.653542 32551 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:27:21.653556 32551 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.653575 32551 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:27:21.653589 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.653607 32551 net.cpp:165] Memory required for data: 1305089500\nI0821 08:27:21.653620 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:27:21.653643 32551 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:27:21.653657 32551 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:27:21.653674 32551 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:27:21.654762 32551 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:27:21.654783 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.654793 32551 net.cpp:165] Memory required for data: 1307137500\nI0821 08:27:21.654810 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:27:21.654831 32551 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:27:21.654844 32551 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:27:21.654868 32551 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:27:21.655172 32551 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:27:21.655191 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.655200 32551 net.cpp:165] Memory required for data: 1309185500\nI0821 08:27:21.655222 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:27:21.655239 32551 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:27:21.655251 32551 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:27:21.655272 32551 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:27:21.655362 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:27:21.655555 32551 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:27:21.655575 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.655583 32551 net.cpp:165] Memory required for data: 1311233500\nI0821 08:27:21.655602 32551 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:27:21.655619 32551 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:27:21.655632 32551 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:27:21.655644 32551 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:27:21.655666 32551 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:27:21.655727 32551 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:27:21.655746 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.655756 32551 net.cpp:165] Memory required for data: 1313281500\nI0821 08:27:21.655766 32551 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:27:21.655781 32551 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:27:21.655792 32551 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:27:21.655812 32551 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:27:21.655830 32551 net.cpp:150] Setting up L3_b3_relu\nI0821 08:27:21.655846 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.655854 32551 net.cpp:165] Memory required for data: 1315329500\nI0821 08:27:21.655864 32551 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:21.655879 32551 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:21.655890 32551 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:27:21.655905 32551 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:27:21.655925 32551 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:27:21.656010 32551 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:21.656029 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.656044 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.656052 32551 net.cpp:165] Memory required for data: 1319425500\nI0821 08:27:21.656064 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:27:21.656083 32551 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:27:21.656096 32551 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:27:21.656121 32551 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:27:21.657222 32551 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:27:21.657243 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.657253 32551 net.cpp:165] Memory required for data: 1321473500\nI0821 08:27:21.657269 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:27:21.657287 32551 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:27:21.657299 32551 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:27:21.657320 32551 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:27:21.657634 32551 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:27:21.657654 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.657662 32551 net.cpp:165] Memory required for data: 1323521500\nI0821 08:27:21.657685 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:27:21.657701 32551 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:27:21.657713 32551 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:27:21.657728 32551 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.657827 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:27:21.658041 32551 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:27:21.658061 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.658071 32551 net.cpp:165] Memory required for data: 1325569500\nI0821 08:27:21.658090 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:27:21.658110 32551 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:27:21.658123 32551 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:27:21.658138 32551 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.658156 32551 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:27:21.658170 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.658180 32551 net.cpp:165] Memory required for data: 1327617500\nI0821 08:27:21.658190 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:27:21.658215 32551 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:27:21.658228 32551 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:27:21.658247 32551 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:27:21.659337 32551 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:27:21.659358 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.659368 32551 net.cpp:165] Memory required for data: 1329665500\nI0821 08:27:21.659385 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:27:21.659407 32551 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:27:21.659420 32551 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:27:21.659441 32551 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:27:21.659742 32551 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:27:21.659762 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.659772 32551 net.cpp:165] Memory required for data: 1331713500\nI0821 08:27:21.659793 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:27:21.659809 32551 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:27:21.659821 32551 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:27:21.659840 32551 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:27:21.659934 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:27:21.660142 32551 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:27:21.660161 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.660171 32551 net.cpp:165] Memory required for data: 1333761500\nI0821 08:27:21.660190 32551 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:27:21.660207 32551 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:27:21.660218 32551 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:27:21.660236 32551 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:27:21.660254 32551 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:27:21.660315 32551 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:27:21.660342 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.660352 32551 net.cpp:165] Memory required for data: 1335809500\nI0821 08:27:21.660363 32551 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:27:21.660377 32551 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:27:21.660388 32551 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:27:21.660406 32551 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:27:21.660426 32551 net.cpp:150] Setting up L3_b4_relu\nI0821 08:27:21.660440 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.660449 32551 net.cpp:165] Memory required for data: 1337857500\nI0821 08:27:21.660459 32551 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:21.660472 32551 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:21.660483 32551 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:27:21.660498 32551 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:27:21.660518 32551 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:27:21.660604 32551 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:21.660626 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.660640 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.660648 32551 net.cpp:165] Memory required for data: 1341953500\nI0821 08:27:21.660660 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:27:21.660681 32551 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:27:21.660692 32551 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:27:21.660717 32551 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:27:21.661814 32551 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:27:21.661835 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.661846 32551 net.cpp:165] Memory required for data: 1344001500\nI0821 08:27:21.661864 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:27:21.661881 32551 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:27:21.661892 32551 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:27:21.661913 32551 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:27:21.663270 32551 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:27:21.663292 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.663302 32551 net.cpp:165] Memory required for data: 1346049500\nI0821 08:27:21.663326 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:27:21.663342 32551 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:27:21.663354 32551 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:27:21.663375 32551 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.663468 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:27:21.663666 32551 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:27:21.663686 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.663697 32551 net.cpp:165] Memory required for data: 1348097500\nI0821 08:27:21.663714 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:27:21.663730 32551 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:27:21.663743 32551 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:27:21.663761 32551 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.663781 32551 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:27:21.663796 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.663805 32551 net.cpp:165] Memory required for data: 1350145500\nI0821 08:27:21.663815 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:27:21.663841 32551 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:27:21.663854 32551 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:27:21.663877 32551 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:27:21.665966 32551 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:27:21.665989 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.665999 32551 net.cpp:165] Memory required for data: 1352193500\nI0821 08:27:21.666018 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:27:21.666040 32551 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:27:21.666052 32551 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:27:21.666075 32551 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:27:21.666366 32551 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:27:21.666385 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.666395 32551 net.cpp:165] Memory required for data: 1354241500\nI0821 08:27:21.666419 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:27:21.666435 32551 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:27:21.666446 32551 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:27:21.666466 32551 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:27:21.666558 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:27:21.666751 32551 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:27:21.666771 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.666781 32551 net.cpp:165] Memory required for data: 1356289500\nI0821 08:27:21.666800 32551 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:27:21.666817 32551 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:27:21.666829 32551 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:27:21.666842 32551 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:27:21.666863 32551 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:27:21.666923 32551 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:27:21.666949 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.666960 32551 net.cpp:165] Memory required for data: 1358337500\nI0821 08:27:21.666971 32551 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:27:21.666985 32551 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:27:21.666997 32551 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:27:21.667016 32551 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:27:21.667037 32551 net.cpp:150] Setting up L3_b5_relu\nI0821 08:27:21.667050 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.667060 32551 net.cpp:165] Memory required for data: 1360385500\nI0821 08:27:21.667070 32551 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:21.667084 32551 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:21.667095 32551 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:27:21.667111 32551 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:27:21.667132 32551 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:27:21.667214 32551 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:21.667235 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.667248 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.667258 32551 net.cpp:165] Memory required for data: 1364481500\nI0821 08:27:21.667268 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:27:21.667289 32551 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:27:21.667300 32551 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:27:21.667323 32551 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:27:21.668400 32551 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:27:21.668421 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.668431 32551 net.cpp:165] Memory required for data: 1366529500\nI0821 08:27:21.668448 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:27:21.668467 32551 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:27:21.668485 32551 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:27:21.668507 32551 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:27:21.668812 32551 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:27:21.668833 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.668843 32551 net.cpp:165] Memory required for data: 1368577500\nI0821 08:27:21.668865 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:27:21.668882 32551 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:27:21.668895 32551 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:27:21.668910 32551 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.669006 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:27:21.669193 32551 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:27:21.669217 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.669229 32551 net.cpp:165] Memory required for data: 1370625500\nI0821 08:27:21.669247 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:27:21.669263 32551 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:27:21.669275 32551 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:27:21.669291 32551 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.669309 32551 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:27:21.669324 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.669334 32551 net.cpp:165] Memory required for data: 1372673500\nI0821 08:27:21.669343 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:27:21.669373 32551 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:27:21.669386 32551 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:27:21.669404 32551 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:27:21.670467 32551 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:27:21.670488 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.670498 32551 net.cpp:165] Memory required for data: 1374721500\nI0821 08:27:21.670516 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:27:21.670541 32551 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:27:21.670554 32551 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:27:21.670575 32551 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:27:21.670862 32551 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:27:21.670881 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.670891 32551 net.cpp:165] Memory required for data: 1376769500\nI0821 08:27:21.670912 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:27:21.670929 32551 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:27:21.670948 32551 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:27:21.670971 32551 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:27:21.671068 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:27:21.671258 32551 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:27:21.671277 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.671288 32551 net.cpp:165] Memory required for data: 1378817500\nI0821 08:27:21.671305 32551 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:27:21.671327 32551 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:27:21.671340 32551 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:27:21.671353 32551 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:27:21.671370 32551 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:27:21.671428 32551 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:27:21.671447 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.671458 32551 net.cpp:165] Memory required for data: 1380865500\nI0821 08:27:21.671466 32551 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:27:21.671481 32551 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:27:21.671494 32551 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:27:21.671521 32551 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:27:21.671541 32551 net.cpp:150] Setting up L3_b6_relu\nI0821 08:27:21.671557 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.671566 32551 net.cpp:165] Memory required for data: 1382913500\nI0821 08:27:21.671576 32551 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:21.671591 32551 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:21.671602 32551 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:27:21.671618 32551 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:27:21.671638 32551 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:27:21.671722 32551 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:21.671742 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.671756 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.671764 32551 net.cpp:165] Memory required for data: 1387009500\nI0821 08:27:21.671775 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:27:21.671794 32551 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:27:21.671808 32551 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:27:21.671830 32551 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:27:21.672909 32551 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:27:21.672930 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.672945 32551 net.cpp:165] Memory required for data: 1389057500\nI0821 08:27:21.672965 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:27:21.672987 32551 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:27:21.673001 32551 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:27:21.673017 32551 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:27:21.673372 32551 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:27:21.673393 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.673403 32551 net.cpp:165] Memory required for data: 1391105500\nI0821 08:27:21.673424 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:27:21.673441 32551 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:27:21.673454 32551 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:27:21.673470 32551 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.673563 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:27:21.673754 32551 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:27:21.673779 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.673789 32551 net.cpp:165] Memory required for data: 1393153500\nI0821 08:27:21.673807 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:27:21.673852 32551 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:27:21.673868 32551 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:27:21.673883 32551 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.673902 32551 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:27:21.673918 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.673926 32551 net.cpp:165] Memory required for data: 1395201500\nI0821 08:27:21.673938 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:27:21.673972 32551 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:27:21.673985 32551 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:27:21.674003 32551 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:27:21.675082 32551 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:27:21.675103 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.675112 32551 net.cpp:165] Memory required for data: 1397249500\nI0821 08:27:21.675130 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:27:21.675148 32551 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:27:21.675168 32551 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:27:21.675190 32551 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:27:21.675508 32551 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:27:21.675529 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.675537 32551 net.cpp:165] Memory required for data: 1399297500\nI0821 08:27:21.675559 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:27:21.675576 32551 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:27:21.675588 32551 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:27:21.675603 32551 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:27:21.675696 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:27:21.675899 32551 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:27:21.675920 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.675930 32551 net.cpp:165] Memory required for data: 1401345500\nI0821 08:27:21.675956 32551 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:27:21.675977 32551 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:27:21.675992 32551 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:27:21.676004 32551 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:27:21.676020 32551 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:27:21.676080 32551 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:27:21.676098 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.676107 32551 net.cpp:165] Memory required for data: 1403393500\nI0821 08:27:21.676118 32551 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:27:21.676132 32551 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:27:21.676144 32551 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:27:21.676157 32551 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:27:21.676177 32551 net.cpp:150] Setting up L3_b7_relu\nI0821 08:27:21.676192 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.676200 32551 net.cpp:165] Memory required for data: 1405441500\nI0821 08:27:21.676210 32551 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:21.676224 32551 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:21.676235 32551 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:27:21.676256 32551 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:27:21.676278 32551 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:27:21.676358 32551 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:21.676378 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.676391 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.676401 32551 net.cpp:165] Memory required for data: 1409537500\nI0821 08:27:21.676411 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:27:21.676436 32551 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:27:21.676451 32551 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:27:21.676470 32551 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:27:21.677553 32551 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:27:21.677574 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.677584 32551 net.cpp:165] Memory required for data: 1411585500\nI0821 08:27:21.677603 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:27:21.677624 32551 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:27:21.677637 32551 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:27:21.677654 32551 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:27:21.677965 32551 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:27:21.677985 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.677995 32551 net.cpp:165] Memory required for data: 1413633500\nI0821 08:27:21.678026 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:27:21.678050 32551 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:27:21.678064 32551 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:27:21.678081 32551 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.678179 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:27:21.678376 32551 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:27:21.678395 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.678405 32551 net.cpp:165] Memory required for data: 1415681500\nI0821 08:27:21.678423 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:27:21.678442 32551 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:27:21.678455 32551 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:27:21.678469 32551 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.678494 32551 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:27:21.678508 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.678519 32551 net.cpp:165] Memory required for data: 1417729500\nI0821 08:27:21.678529 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:27:21.678548 32551 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:27:21.678561 32551 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:27:21.678583 32551 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:27:21.679658 32551 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:27:21.679678 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.679688 32551 net.cpp:165] Memory required for data: 1419777500\nI0821 08:27:21.679707 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:27:21.679724 32551 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:27:21.679738 32551 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:27:21.679759 32551 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:27:21.680068 32551 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:27:21.680088 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.680099 32551 net.cpp:165] Memory required for data: 1421825500\nI0821 08:27:21.680119 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:27:21.680135 32551 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:27:21.680146 32551 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:27:21.680162 32551 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:27:21.680255 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:27:21.680449 32551 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:27:21.680471 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.680482 32551 net.cpp:165] Memory required for data: 1423873500\nI0821 08:27:21.680501 32551 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:27:21.680521 32551 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:27:21.680532 32551 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:27:21.680546 32551 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:27:21.680562 32551 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:27:21.680621 32551 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:27:21.680641 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.680651 32551 net.cpp:165] Memory required for data: 1425921500\nI0821 08:27:21.680661 32551 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:27:21.680676 32551 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:27:21.680688 32551 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:27:21.680702 32551 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:27:21.680721 32551 net.cpp:150] Setting up L3_b8_relu\nI0821 08:27:21.680737 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.680745 32551 net.cpp:165] Memory required for data: 1427969500\nI0821 08:27:21.680755 32551 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:21.680778 32551 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:21.680790 32551 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:27:21.680809 32551 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:27:21.680830 32551 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:27:21.680912 32551 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:21.680938 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.680960 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.680970 32551 net.cpp:165] Memory required for data: 1432065500\nI0821 08:27:21.680980 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:27:21.681000 32551 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:27:21.681013 32551 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:27:21.681031 32551 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:27:21.683118 32551 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:27:21.683140 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.683151 32551 net.cpp:165] Memory required for data: 1434113500\nI0821 08:27:21.683168 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:27:21.683190 32551 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:27:21.683203 32551 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:27:21.683220 32551 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:27:21.683521 32551 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:27:21.683539 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.683549 32551 net.cpp:165] Memory required for data: 1436161500\nI0821 08:27:21.683571 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:27:21.683588 32551 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:27:21.683599 32551 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:27:21.683615 32551 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.683709 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:27:21.683902 32551 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:27:21.683923 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.683933 32551 net.cpp:165] Memory required for data: 1438209500\nI0821 08:27:21.683957 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:27:21.683974 32551 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:27:21.683986 32551 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:27:21.684006 32551 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.684026 32551 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:27:21.684041 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.684051 32551 net.cpp:165] Memory required for data: 1440257500\nI0821 08:27:21.684059 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:27:21.684084 32551 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:27:21.684098 32551 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:27:21.684115 32551 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:27:21.685191 32551 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:27:21.685211 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.685221 32551 net.cpp:165] Memory required for data: 1442305500\nI0821 08:27:21.685240 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:27:21.685261 32551 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:27:21.685274 32551 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:27:21.685292 32551 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:27:21.685595 32551 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:27:21.685613 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.685623 32551 net.cpp:165] Memory required for data: 1444353500\nI0821 08:27:21.685655 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:27:21.685676 32551 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:27:21.685690 32551 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:27:21.685706 32551 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:27:21.685806 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:27:21.686003 32551 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:27:21.686023 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.686033 32551 net.cpp:165] Memory required for data: 1446401500\nI0821 08:27:21.686051 32551 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:27:21.686072 32551 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:27:21.686085 32551 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:27:21.686100 32551 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:27:21.686120 32551 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:27:21.686175 32551 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:27:21.686193 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.686203 32551 net.cpp:165] Memory required for data: 1448449500\nI0821 08:27:21.686213 32551 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:27:21.686234 32551 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:27:21.686247 32551 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:27:21.686262 32551 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:27:21.686281 32551 net.cpp:150] Setting up L3_b9_relu\nI0821 08:27:21.686295 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.686305 32551 net.cpp:165] Memory required for data: 1450497500\nI0821 08:27:21.686314 32551 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:27:21.686329 32551 net.cpp:100] Creating Layer post_pool\nI0821 08:27:21.686342 32551 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 08:27:21.686358 32551 net.cpp:408] post_pool -> post_pool\nI0821 08:27:21.686419 32551 net.cpp:150] Setting up post_pool\nI0821 08:27:21.686439 32551 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 08:27:21.686450 32551 net.cpp:165] Memory required for data: 1450529500\nI0821 08:27:21.686460 32551 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:27:21.686571 32551 net.cpp:100] Creating Layer post_FC\nI0821 08:27:21.686589 32551 net.cpp:434] post_FC <- post_pool\nI0821 08:27:21.686606 32551 net.cpp:408] post_FC -> post_FC_top\nI0821 08:27:21.686909 32551 net.cpp:150] Setting up post_FC\nI0821 08:27:21.686930 32551 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:21.686946 32551 net.cpp:165] Memory required for data: 1450534500\nI0821 08:27:21.686966 32551 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:27:21.686981 32551 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:27:21.686993 32551 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:27:21.687014 32551 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:27:21.687036 32551 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:27:21.687116 32551 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:27:21.687136 32551 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:21.687150 32551 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:21.687158 32551 net.cpp:165] Memory required for data: 1450544500\nI0821 08:27:21.687170 32551 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:27:21.687235 32551 net.cpp:100] Creating Layer accuracy\nI0821 08:27:21.687252 32551 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:27:21.687264 32551 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:27:21.687280 32551 net.cpp:408] accuracy -> accuracy\nI0821 08:27:21.687350 32551 net.cpp:150] Setting up accuracy\nI0821 08:27:21.687369 32551 net.cpp:157] Top shape: (1)\nI0821 08:27:21.687378 32551 net.cpp:165] Memory required for data: 1450544504\nI0821 08:27:21.687397 32551 layer_factory.hpp:77] Creating layer loss\nI0821 08:27:21.687413 32551 net.cpp:100] Creating Layer loss\nI0821 08:27:21.687425 32551 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:27:21.687439 32551 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:27:21.687454 32551 net.cpp:408] loss -> loss\nI0821 08:27:21.688480 32551 layer_factory.hpp:77] Creating layer loss\nI0821 08:27:21.689651 32551 net.cpp:150] Setting up loss\nI0821 08:27:21.689671 32551 net.cpp:157] Top shape: (1)\nI0821 08:27:21.689682 32551 net.cpp:160]     with loss weight 1\nI0821 08:27:21.689792 32551 net.cpp:165] Memory required for data: 1450544508\nI0821 08:27:21.689807 32551 net.cpp:226] loss needs backward computation.\nI0821 08:27:21.689818 32551 net.cpp:228] accuracy does not need backward computation.\nI0821 08:27:21.689831 32551 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:27:21.689841 32551 net.cpp:226] post_FC needs backward computation.\nI0821 08:27:21.689851 32551 net.cpp:226] post_pool needs backward computation.\nI0821 08:27:21.689860 32551 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:27:21.689869 32551 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:27:21.689880 32551 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:27:21.689889 32551 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:27:21.689899 32551 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:27:21.689910 32551 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:27:21.689919 32551 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:27:21.689929 32551 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:27:21.689945 32551 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:27:21.689959 32551 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:27:21.689968 32551 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:27:21.689978 32551 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:27:21.689990 32551 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:27:21.689999 32551 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:27:21.690009 32551 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:27:21.690019 32551 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:27:21.690029 32551 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:27:21.690038 32551 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:27:21.690048 32551 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:27:21.690058 32551 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:27:21.690069 32551 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:27:21.690079 32551 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:27:21.690090 32551 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:27:21.690100 32551 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:27:21.690110 32551 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:27:21.690121 32551 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:27:21.690130 32551 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:27:21.690140 32551 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:27:21.690150 32551 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:27:21.690160 32551 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:27:21.690171 32551 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:27:21.690181 32551 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:27:21.690192 32551 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:27:21.690202 32551 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:27:21.690212 32551 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:27:21.690230 32551 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:27:21.690241 32551 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:27:21.690251 32551 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:27:21.690261 32551 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:27:21.690271 32551 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:27:21.690282 32551 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:27:21.690291 32551 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:27:21.690302 32551 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:27:21.690312 32551 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:27:21.690322 32551 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:27:21.690332 32551 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:27:21.690342 32551 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:27:21.690352 32551 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:27:21.690362 32551 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:27:21.690372 32551 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:27:21.690392 32551 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:27:21.690404 32551 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:27:21.690415 32551 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:27:21.690425 32551 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:27:21.690436 32551 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:27:21.690446 32551 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:27:21.690456 32551 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:27:21.690465 32551 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:27:21.690476 32551 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:27:21.690486 32551 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:27:21.690496 32551 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:27:21.690506 32551 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:27:21.690518 32551 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:27:21.690528 32551 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:27:21.690538 32551 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:27:21.690549 32551 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:27:21.690559 32551 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:27:21.690568 32551 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:27:21.690578 32551 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:27:21.690589 32551 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:27:21.690600 32551 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:27:21.690610 32551 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:27:21.690621 32551 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:27:21.690631 32551 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:27:21.690642 32551 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:27:21.690652 32551 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:27:21.690662 32551 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:27:21.690672 32551 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:27:21.690683 32551 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:27:21.690695 32551 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:27:21.690704 32551 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:27:21.690716 32551 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:27:21.690735 32551 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:27:21.690747 32551 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:27:21.690758 32551 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:27:21.690768 32551 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:27:21.690779 32551 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:27:21.690790 32551 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:27:21.690801 32551 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:27:21.690810 32551 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:27:21.690820 32551 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:27:21.690831 32551 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:27:21.690842 32551 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:27:21.690852 32551 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:27:21.690862 32551 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:27:21.690873 32551 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:27:21.690883 32551 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:27:21.690894 32551 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:27:21.690904 32551 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:27:21.690920 32551 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:27:21.690932 32551 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:27:21.690948 32551 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:27:21.690963 32551 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:27:21.690973 32551 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:27:21.690984 32551 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:27:21.690994 32551 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:27:21.691005 32551 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:27:21.691016 32551 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:27:21.691025 32551 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:27:21.691036 32551 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:27:21.691046 32551 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:27:21.691057 32551 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:27:21.691068 32551 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:27:21.691079 32551 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:27:21.691090 32551 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:27:21.691102 32551 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:27:21.691112 32551 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:27:21.691121 32551 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:27:21.691133 32551 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:27:21.691143 32551 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:27:21.691153 32551 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:27:21.691164 32551 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:27:21.691174 32551 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:27:21.691185 32551 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:27:21.691195 32551 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:27:21.691207 32551 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:27:21.691218 32551 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:27:21.691228 32551 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:27:21.691239 32551 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:27:21.691249 32551 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:27:21.691267 32551 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:27:21.691279 32551 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:27:21.691290 32551 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:27:21.691303 32551 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:27:21.691313 32551 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:27:21.691324 32551 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:27:21.691334 32551 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:27:21.691345 32551 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:27:21.691355 32551 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:27:21.691366 32551 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:27:21.691375 32551 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:27:21.691386 32551 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:27:21.691397 32551 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:27:21.691408 32551 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:27:21.691417 32551 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:27:21.691429 32551 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:27:21.691439 32551 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:27:21.691450 32551 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:27:21.691460 32551 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:27:21.691470 32551 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:27:21.691483 32551 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:27:21.691493 32551 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:27:21.691504 32551 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:27:21.691514 32551 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:27:21.691524 32551 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:27:21.691536 32551 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:27:21.691546 32551 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:27:21.691557 32551 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:27:21.691568 32551 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:27:21.691578 32551 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:27:21.691588 32551 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:27:21.691599 32551 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:27:21.691612 32551 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:27:21.691622 32551 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:27:21.691632 32551 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:27:21.691642 32551 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:27:21.691653 32551 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:27:21.691664 32551 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:27:21.691680 32551 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:27:21.691692 32551 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:27:21.691702 32551 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:27:21.691712 32551 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:27:21.691725 32551 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:27:21.691735 32551 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:27:21.691747 32551 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:27:21.691757 32551 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:27:21.691768 32551 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:27:21.691788 32551 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:27:21.691800 32551 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:27:21.691810 32551 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:27:21.691821 32551 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:27:21.691833 32551 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:27:21.691843 32551 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:27:21.691854 32551 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:27:21.691864 32551 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:27:21.691874 32551 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:27:21.691885 32551 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:27:21.691896 32551 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:27:21.691907 32551 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:27:21.691918 32551 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:27:21.691929 32551 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:27:21.691947 32551 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:27:21.691959 32551 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:27:21.691970 32551 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:27:21.691982 32551 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:27:21.691992 32551 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:27:21.692004 32551 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:27:21.692015 32551 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:27:21.692026 32551 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:27:21.692037 32551 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:27:21.692049 32551 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:27:21.692059 32551 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:27:21.692070 32551 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:27:21.692080 32551 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:27:21.692091 32551 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:27:21.692102 32551 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:27:21.692114 32551 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:27:21.692126 32551 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:27:21.692137 32551 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:27:21.692147 32551 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:27:21.692157 32551 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:27:21.692168 32551 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:27:21.692179 32551 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:27:21.692189 32551 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:27:21.692200 32551 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:27:21.692212 32551 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:27:21.692224 32551 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:27:21.692235 32551 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:27:21.692247 32551 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:27:21.692257 32551 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:27:21.692267 32551 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:27:21.692278 32551 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:27:21.692289 32551 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:27:21.692299 32551 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:27:21.692318 32551 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:27:21.692330 32551 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:27:21.692342 32551 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:27:21.692353 32551 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:27:21.692364 32551 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:27:21.692374 32551 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:27:21.692386 32551 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:27:21.692397 32551 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:27:21.692409 32551 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:27:21.692418 32551 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:27:21.692430 32551 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:27:21.692442 32551 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:27:21.692453 32551 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:27:21.692463 32551 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:27:21.692476 32551 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:27:21.692487 32551 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:27:21.692497 32551 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:27:21.692509 32551 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:27:21.692520 32551 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:27:21.692530 32551 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:27:21.692541 32551 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:27:21.692553 32551 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:27:21.692564 32551 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:27:21.692574 32551 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:27:21.692586 32551 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:27:21.692597 32551 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:27:21.692610 32551 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:27:21.692620 32551 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:27:21.692631 32551 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:27:21.692641 32551 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:27:21.692652 32551 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:27:21.692663 32551 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:27:21.692674 32551 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:27:21.692687 32551 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:27:21.692698 32551 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:27:21.692708 32551 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:27:21.692719 32551 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:27:21.692731 32551 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:27:21.692741 32551 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:27:21.692752 32551 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:27:21.692764 32551 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:27:21.692775 32551 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:27:21.692785 32551 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:27:21.692795 32551 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:27:21.692808 32551 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:27:21.692819 32551 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:27:21.692831 32551 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:27:21.692849 32551 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:27:21.692862 32551 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:27:21.692873 32551 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:27:21.692883 32551 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:27:21.692894 32551 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:27:21.692905 32551 net.cpp:226] pre_relu needs backward computation.\nI0821 08:27:21.692916 32551 net.cpp:226] pre_scale needs backward computation.\nI0821 08:27:21.692926 32551 net.cpp:226] pre_bn needs backward computation.\nI0821 08:27:21.692937 32551 net.cpp:226] pre_conv needs backward computation.\nI0821 08:27:21.692958 32551 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:27:21.692971 32551 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:27:21.692981 32551 net.cpp:270] This network produces output accuracy\nI0821 08:27:21.692992 32551 net.cpp:270] This network produces output loss\nI0821 08:27:21.693403 32551 net.cpp:283] Network initialization done.\nI0821 08:27:21.702860 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:21.702910 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:21.702992 32551 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 08:27:21.703397 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 08:27:21.703423 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 08:27:21.703443 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:27:21.703462 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:27:21.703482 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:27:21.703501 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:27:21.703521 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:27:21.703539 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:27:21.703562 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:27:21.703579 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:27:21.703599 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:27:21.703616 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:27:21.703635 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:27:21.703654 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:27:21.703673 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:27:21.703691 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:27:21.703709 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:27:21.703728 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:27:21.703748 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:27:21.703776 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:27:21.703797 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:27:21.703816 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:27:21.703840 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:27:21.703860 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:27:21.703878 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:27:21.703896 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:27:21.703913 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:27:21.703932 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:27:21.703956 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:27:21.703976 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:27:21.703996 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:27:21.704015 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:27:21.704035 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:27:21.704051 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:27:21.704069 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:27:21.704087 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:27:21.704107 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:27:21.704125 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:27:21.704143 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:27:21.704160 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:27:21.704185 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:27:21.704202 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:27:21.704219 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:27:21.704236 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:27:21.704255 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:27:21.704272 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:27:21.704293 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:27:21.704308 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:27:21.704327 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:27:21.704355 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:27:21.704375 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:27:21.704391 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:27:21.704411 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:27:21.704428 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:27:21.704448 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:27:21.704464 32551 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:27:21.706166 32551 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0821 08:27:21.707932 32551 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:27:21.708758 32551 net.cpp:100] Creating Layer dataLayer\nI0821 08:27:21.708787 32551 net.cpp:408] dataLayer -> data_top\nI0821 08:27:21.708814 32551 net.cpp:408] dataLayer -> label\nI0821 08:27:21.708838 32551 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:27:21.722687 32558 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 08:27:21.722918 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:21.730199 32551 net.cpp:150] Setting up dataLayer\nI0821 08:27:21.730232 32551 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 08:27:21.730247 32551 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:21.730257 32551 net.cpp:165] Memory required for data: 1536500\nI0821 08:27:21.730271 32551 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:27:21.730289 32551 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:27:21.730304 32551 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:27:21.730319 32551 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:27:21.730345 32551 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:27:21.730486 32551 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:27:21.730507 32551 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:21.730521 32551 net.cpp:157] Top shape: 125 (125)\nI0821 08:27:21.730530 32551 net.cpp:165] Memory required for data: 1537500\nI0821 08:27:21.730540 32551 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:27:21.730602 32551 net.cpp:100] Creating Layer pre_conv\nI0821 08:27:21.730618 32551 net.cpp:434] pre_conv <- data_top\nI0821 08:27:21.730664 32551 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:27:21.731156 32551 net.cpp:150] Setting up pre_conv\nI0821 08:27:21.731189 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.731200 32551 net.cpp:165] Memory required for data: 9729500\nI0821 08:27:21.731230 32551 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:27:21.731256 32551 net.cpp:100] Creating Layer pre_bn\nI0821 08:27:21.731267 32551 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:27:21.731283 32551 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:27:21.731703 32551 net.cpp:150] Setting up pre_bn\nI0821 08:27:21.731724 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.731734 32551 net.cpp:165] Memory required for data: 17921500\nI0821 08:27:21.731766 32551 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:27:21.731793 32551 net.cpp:100] Creating Layer pre_scale\nI0821 08:27:21.731806 32551 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:27:21.731822 32551 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:27:21.731948 32551 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:27:21.732165 32551 net.cpp:150] Setting up pre_scale\nI0821 08:27:21.732187 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.732195 32551 net.cpp:165] Memory required for data: 26113500\nI0821 08:27:21.732216 32551 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:27:21.732233 32551 net.cpp:100] Creating Layer pre_relu\nI0821 08:27:21.732245 32551 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:27:21.732264 32551 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:27:21.732283 32551 net.cpp:150] Setting up pre_relu\nI0821 08:27:21.732296 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.732305 32551 net.cpp:165] Memory required for data: 34305500\nI0821 08:27:21.732319 32551 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:27:21.732332 32551 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:27:21.732342 32551 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:27:21.732365 32551 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:27:21.732390 32551 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:27:21.732542 32551 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:27:21.732563 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.732575 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.732589 32551 net.cpp:165] Memory required for data: 50689500\nI0821 08:27:21.732599 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:27:21.732620 32551 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:27:21.732630 32551 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:27:21.732656 32551 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:27:21.733134 32551 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:27:21.733155 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.733163 32551 net.cpp:165] Memory required for data: 58881500\nI0821 08:27:21.733189 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:27:21.733214 32551 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:27:21.733227 32551 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:27:21.733244 32551 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:27:21.733588 32551 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:27:21.733611 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.733623 32551 net.cpp:165] Memory required for data: 67073500\nI0821 08:27:21.733644 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:27:21.733660 32551 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:27:21.733670 32551 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:27:21.733685 32551 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.733784 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:27:21.733989 32551 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:27:21.734009 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.734027 32551 net.cpp:165] Memory required for data: 75265500\nI0821 08:27:21.734046 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:27:21.734066 32551 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:27:21.734079 32551 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:27:21.734093 32551 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.734110 32551 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:27:21.734125 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.734134 32551 net.cpp:165] Memory required for data: 83457500\nI0821 08:27:21.734143 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:27:21.734169 32551 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:27:21.734180 32551 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:27:21.734201 32551 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:27:21.734596 32551 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:27:21.734616 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.734624 32551 net.cpp:165] Memory required for data: 91649500\nI0821 08:27:21.734642 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:27:21.734659 32551 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:27:21.734670 32551 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:27:21.734690 32551 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:27:21.735002 32551 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:27:21.735021 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.735031 32551 net.cpp:165] Memory required for data: 99841500\nI0821 08:27:21.735064 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:27:21.735081 32551 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:27:21.735092 32551 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:27:21.735108 32551 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:27:21.735195 32551 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:27:21.735590 32551 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:27:21.735610 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.735620 32551 net.cpp:165] Memory required for data: 108033500\nI0821 08:27:21.735637 32551 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:27:21.735658 32551 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:27:21.735671 32551 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:27:21.735685 32551 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:27:21.735707 32551 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:27:21.735767 32551 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:27:21.735788 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.735800 32551 net.cpp:165] Memory required for data: 116225500\nI0821 08:27:21.735810 32551 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:27:21.735829 32551 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:27:21.735842 32551 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:27:21.735857 32551 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:27:21.735873 32551 net.cpp:150] Setting up L1_b1_relu\nI0821 08:27:21.735888 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.735896 32551 net.cpp:165] Memory required for data: 124417500\nI0821 08:27:21.735906 32551 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:21.735922 32551 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:21.735934 32551 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:27:21.735957 32551 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:27:21.735977 32551 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:27:21.736060 32551 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:27:21.736093 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.736109 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.736116 32551 net.cpp:165] Memory required for data: 140801500\nI0821 08:27:21.736129 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:27:21.736157 32551 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:27:21.736172 32551 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:27:21.736191 32551 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:27:21.736620 32551 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:27:21.736644 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.736657 32551 net.cpp:165] Memory required for data: 148993500\nI0821 08:27:21.736678 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:27:21.736701 32551 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:27:21.736717 32551 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:27:21.736733 32551 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:27:21.737098 32551 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:27:21.737120 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.737133 32551 net.cpp:165] Memory required for data: 157185500\nI0821 08:27:21.737155 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:27:21.737175 32551 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:27:21.737186 32551 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:27:21.737201 32551 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.737303 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:27:21.737556 32551 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:27:21.737578 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.737591 32551 net.cpp:165] Memory required for data: 165377500\nI0821 08:27:21.737610 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:27:21.737624 32551 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:27:21.737635 32551 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:27:21.737655 32551 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.737678 32551 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:27:21.737691 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.737704 32551 net.cpp:165] Memory required for data: 173569500\nI0821 08:27:21.737715 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:27:21.737742 32551 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:27:21.737756 32551 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:27:21.737776 32551 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:27:21.738224 32551 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:27:21.738246 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.738256 32551 net.cpp:165] Memory required for data: 181761500\nI0821 08:27:21.738277 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:27:21.738303 32551 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:27:21.738319 32551 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:27:21.738337 32551 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:27:21.738700 32551 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:27:21.738726 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.738739 32551 net.cpp:165] Memory required for data: 189953500\nI0821 08:27:21.738771 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:27:21.738790 32551 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:27:21.738801 32551 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:27:21.738821 32551 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:27:21.738934 32551 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:27:21.739148 32551 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:27:21.739171 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.739181 32551 net.cpp:165] Memory required for data: 198145500\nI0821 08:27:21.739212 32551 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:27:21.739234 32551 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:27:21.739249 32551 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:27:21.739262 32551 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:27:21.739281 32551 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:27:21.739346 32551 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:27:21.739369 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.739387 32551 net.cpp:165] Memory required for data: 206337500\nI0821 08:27:21.739398 32551 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:27:21.739413 32551 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:27:21.739426 32551 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:27:21.739441 32551 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:27:21.739460 32551 net.cpp:150] Setting up L1_b2_relu\nI0821 08:27:21.739473 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.739482 32551 net.cpp:165] Memory required for data: 214529500\nI0821 08:27:21.739497 32551 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:21.739511 32551 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:21.739521 32551 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:27:21.739544 32551 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:27:21.739564 32551 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:27:21.739653 32551 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:27:21.739675 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.739691 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.739701 32551 net.cpp:165] Memory required for data: 230913500\nI0821 08:27:21.739713 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:27:21.739740 32551 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:27:21.739758 32551 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:27:21.739776 32551 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:27:21.740228 32551 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:27:21.740249 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.740258 32551 net.cpp:165] Memory required for data: 239105500\nI0821 08:27:21.740278 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:27:21.740296 32551 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:27:21.740321 32551 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:27:21.740340 32551 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:27:21.740726 32551 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:27:21.740747 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.740757 32551 net.cpp:165] Memory required for data: 247297500\nI0821 08:27:21.740782 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:27:21.740798 32551 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:27:21.740808 32551 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:27:21.740823 32551 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.740936 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:27:21.741160 32551 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:27:21.741183 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.741192 32551 net.cpp:165] Memory required for data: 255489500\nI0821 08:27:21.741211 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:27:21.741230 32551 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:27:21.741241 32551 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:27:21.741259 32551 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.741291 32551 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:27:21.741307 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.741315 32551 net.cpp:165] Memory required for data: 263681500\nI0821 08:27:21.741328 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:27:21.741355 32551 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:27:21.741367 32551 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:27:21.741389 32551 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:27:21.741833 32551 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:27:21.741853 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.741865 32551 net.cpp:165] Memory required for data: 271873500\nI0821 08:27:21.741884 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:27:21.741914 32551 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:27:21.741927 32551 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:27:21.741955 32551 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:27:21.742317 32551 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:27:21.742339 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.742349 32551 net.cpp:165] Memory required for data: 280065500\nI0821 08:27:21.742372 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:27:21.742388 32551 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:27:21.742398 32551 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:27:21.742416 32551 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:27:21.742522 32551 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:27:21.742736 32551 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:27:21.742756 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.742766 32551 net.cpp:165] Memory required for data: 288257500\nI0821 08:27:21.742784 32551 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:27:21.742805 32551 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:27:21.742820 32551 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:27:21.742833 32551 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:27:21.742852 32551 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:27:21.742918 32551 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:27:21.742945 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.742956 32551 net.cpp:165] Memory required for data: 296449500\nI0821 08:27:21.742967 32551 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:27:21.742981 32551 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:27:21.742995 32551 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:27:21.743021 32551 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:27:21.743043 32551 net.cpp:150] Setting up L1_b3_relu\nI0821 08:27:21.743058 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.743067 32551 net.cpp:165] Memory required for data: 304641500\nI0821 08:27:21.743082 32551 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:21.743094 32551 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:21.743104 32551 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:27:21.743124 32551 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:27:21.743144 32551 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:27:21.743239 32551 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:27:21.743258 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.743270 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.743279 32551 net.cpp:165] Memory required for data: 321025500\nI0821 08:27:21.743291 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:27:21.743317 32551 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:27:21.743340 32551 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:27:21.743360 32551 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:27:21.743837 32551 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:27:21.743858 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.743867 32551 net.cpp:165] Memory required for data: 329217500\nI0821 08:27:21.743885 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:27:21.743911 32551 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:27:21.743924 32551 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:27:21.743959 32551 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:27:21.744319 32551 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:27:21.744338 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.744348 32551 net.cpp:165] Memory required for data: 337409500\nI0821 08:27:21.744374 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:27:21.744390 32551 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:27:21.744403 32551 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:27:21.744421 32551 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.744524 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:27:21.744742 32551 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:27:21.744763 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.744772 32551 net.cpp:165] Memory required for data: 345601500\nI0821 08:27:21.744791 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:27:21.744810 32551 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:27:21.744822 32551 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:27:21.744835 32551 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.744854 32551 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:27:21.744868 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.744876 32551 net.cpp:165] Memory required for data: 353793500\nI0821 08:27:21.744886 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:27:21.744910 32551 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:27:21.744922 32551 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:27:21.744951 32551 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:27:21.745347 32551 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:27:21.745367 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.745375 32551 net.cpp:165] Memory required for data: 361985500\nI0821 08:27:21.745393 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:27:21.745415 32551 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:27:21.745427 32551 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:27:21.745442 32551 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:27:21.745755 32551 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:27:21.745775 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.745785 32551 net.cpp:165] Memory required for data: 370177500\nI0821 08:27:21.745806 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:27:21.745822 32551 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:27:21.745834 32551 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:27:21.745853 32551 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:27:21.745950 32551 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:27:21.746145 32551 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:27:21.746168 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.746178 32551 net.cpp:165] Memory required for data: 378369500\nI0821 08:27:21.746197 32551 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:27:21.746214 32551 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:27:21.746227 32551 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:27:21.746239 32551 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:27:21.746264 32551 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:27:21.746325 32551 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:27:21.746345 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.746354 32551 net.cpp:165] Memory required for data: 386561500\nI0821 08:27:21.746364 32551 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:27:21.746383 32551 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:27:21.746397 32551 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:27:21.746410 32551 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:27:21.746429 32551 net.cpp:150] Setting up L1_b4_relu\nI0821 08:27:21.746444 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.746454 32551 net.cpp:165] Memory required for data: 394753500\nI0821 08:27:21.746464 32551 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:21.746476 32551 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:21.746487 32551 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:27:21.746507 32551 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:27:21.746527 32551 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:27:21.746608 32551 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:27:21.746634 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.746647 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.746656 32551 net.cpp:165] Memory required for data: 411137500\nI0821 08:27:21.746666 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:27:21.746691 32551 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:27:21.746704 32551 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:27:21.746723 32551 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:27:21.747123 32551 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:27:21.747143 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.747153 32551 net.cpp:165] Memory required for data: 419329500\nI0821 08:27:21.747189 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:27:21.747211 32551 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:27:21.747225 32551 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:27:21.747241 32551 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:27:21.747557 32551 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:27:21.747576 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.747586 32551 net.cpp:165] Memory required for data: 427521500\nI0821 08:27:21.747606 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:27:21.747623 32551 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:27:21.747633 32551 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:27:21.747648 32551 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.747753 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:27:21.747956 32551 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:27:21.747977 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.747985 32551 net.cpp:165] Memory required for data: 435713500\nI0821 08:27:21.748003 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:27:21.748018 32551 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:27:21.748029 32551 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:27:21.748047 32551 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.748067 32551 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:27:21.748081 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.748090 32551 net.cpp:165] Memory required for data: 443905500\nI0821 08:27:21.748100 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:27:21.748132 32551 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:27:21.748145 32551 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:27:21.748167 32551 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:27:21.748572 32551 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:27:21.748592 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.748602 32551 net.cpp:165] Memory required for data: 452097500\nI0821 08:27:21.748620 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:27:21.748669 32551 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:27:21.748684 32551 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:27:21.748703 32551 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:27:21.749042 32551 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:27:21.749061 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.749070 32551 net.cpp:165] Memory required for data: 460289500\nI0821 08:27:21.749091 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:27:21.749107 32551 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:27:21.749119 32551 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:27:21.749132 32551 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:27:21.749231 32551 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:27:21.749429 32551 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:27:21.749449 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.749457 32551 net.cpp:165] Memory required for data: 468481500\nI0821 08:27:21.749475 32551 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:27:21.749491 32551 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:27:21.749503 32551 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:27:21.749516 32551 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:27:21.749536 32551 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:27:21.749591 32551 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:27:21.749609 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.749617 32551 net.cpp:165] Memory required for data: 476673500\nI0821 08:27:21.749627 32551 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:27:21.749651 32551 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:27:21.749663 32551 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:27:21.749677 32551 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:27:21.749696 32551 net.cpp:150] Setting up L1_b5_relu\nI0821 08:27:21.749711 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.749719 32551 net.cpp:165] Memory required for data: 484865500\nI0821 08:27:21.749728 32551 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:21.749742 32551 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:21.749752 32551 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:27:21.749766 32551 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:27:21.749784 32551 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:27:21.749871 32551 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:27:21.749891 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.749904 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.749913 32551 net.cpp:165] Memory required for data: 501249500\nI0821 08:27:21.749923 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:27:21.749953 32551 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:27:21.749966 32551 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:27:21.749984 32551 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:27:21.750385 32551 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:27:21.750404 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.750422 32551 net.cpp:165] Memory required for data: 509441500\nI0821 08:27:21.750440 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:27:21.750463 32551 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:27:21.750474 32551 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:27:21.750490 32551 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:27:21.750804 32551 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:27:21.750823 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.750833 32551 net.cpp:165] Memory required for data: 517633500\nI0821 08:27:21.750854 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:27:21.750870 32551 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:27:21.750880 32551 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:27:21.750895 32551 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.751003 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:27:21.751219 32551 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:27:21.751240 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.751247 32551 net.cpp:165] Memory required for data: 525825500\nI0821 08:27:21.751266 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:27:21.751279 32551 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:27:21.751291 32551 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:27:21.751309 32551 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.751328 32551 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:27:21.751343 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.751351 32551 net.cpp:165] Memory required for data: 534017500\nI0821 08:27:21.751360 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:27:21.751384 32551 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:27:21.751397 32551 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:27:21.751415 32551 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:27:21.751804 32551 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:27:21.751824 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.751834 32551 net.cpp:165] Memory required for data: 542209500\nI0821 08:27:21.751852 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:27:21.751868 32551 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:27:21.751879 32551 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:27:21.751899 32551 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:27:21.752226 32551 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:27:21.752250 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.752260 32551 net.cpp:165] Memory required for data: 550401500\nI0821 08:27:21.752281 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:27:21.752297 32551 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:27:21.752308 32551 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:27:21.752322 32551 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:27:21.752418 32551 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:27:21.752615 32551 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:27:21.752635 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.752643 32551 net.cpp:165] Memory required for data: 558593500\nI0821 08:27:21.752660 32551 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:27:21.752694 32551 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:27:21.752707 32551 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:27:21.752720 32551 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:27:21.752737 32551 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:27:21.752797 32551 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:27:21.752817 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.752826 32551 net.cpp:165] Memory required for data: 566785500\nI0821 08:27:21.752846 32551 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:27:21.752862 32551 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:27:21.752872 32551 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:27:21.752887 32551 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:27:21.752905 32551 net.cpp:150] Setting up L1_b6_relu\nI0821 08:27:21.752919 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.752929 32551 net.cpp:165] Memory required for data: 574977500\nI0821 08:27:21.752938 32551 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:21.752961 32551 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:21.752972 32551 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:27:21.752991 32551 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:27:21.753011 32551 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:27:21.753094 32551 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:27:21.753116 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.753129 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.753137 32551 net.cpp:165] Memory required for data: 591361500\nI0821 08:27:21.753149 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:27:21.753172 32551 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:27:21.753185 32551 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:27:21.753202 32551 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:27:21.753600 32551 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:27:21.753620 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.753629 32551 net.cpp:165] Memory required for data: 599553500\nI0821 08:27:21.753646 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:27:21.753669 32551 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:27:21.753680 32551 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:27:21.753696 32551 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:27:21.754045 32551 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:27:21.754065 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.754075 32551 net.cpp:165] Memory required for data: 607745500\nI0821 08:27:21.754096 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:27:21.754112 32551 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:27:21.754123 32551 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:27:21.754142 32551 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.754240 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:27:21.754441 32551 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:27:21.754463 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.754474 32551 net.cpp:165] Memory required for data: 615937500\nI0821 08:27:21.754492 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:27:21.754508 32551 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:27:21.754518 32551 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:27:21.754531 32551 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.754549 32551 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:27:21.754564 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.754572 32551 net.cpp:165] Memory required for data: 624129500\nI0821 08:27:21.754581 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:27:21.754606 32551 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:27:21.754617 32551 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:27:21.754637 32551 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:27:21.755069 32551 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:27:21.755090 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.755110 32551 net.cpp:165] Memory required for data: 632321500\nI0821 08:27:21.755127 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:27:21.755148 32551 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:27:21.755161 32551 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:27:21.755182 32551 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:27:21.755486 32551 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:27:21.755506 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.755514 32551 net.cpp:165] Memory required for data: 640513500\nI0821 08:27:21.755535 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:27:21.755551 32551 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:27:21.755563 32551 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:27:21.755578 32551 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:27:21.755674 32551 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:27:21.755874 32551 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:27:21.755894 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.755904 32551 net.cpp:165] Memory required for data: 648705500\nI0821 08:27:21.755923 32551 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:27:21.755950 32551 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:27:21.755964 32551 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:27:21.755977 32551 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:27:21.755995 32551 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:27:21.756055 32551 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:27:21.756073 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.756083 32551 net.cpp:165] Memory required for data: 656897500\nI0821 08:27:21.756093 32551 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:27:21.756108 32551 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:27:21.756119 32551 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:27:21.756136 32551 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:27:21.756155 32551 net.cpp:150] Setting up L1_b7_relu\nI0821 08:27:21.756170 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.756178 32551 net.cpp:165] Memory required for data: 665089500\nI0821 08:27:21.756188 32551 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:21.756203 32551 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:21.756214 32551 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:27:21.756232 32551 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:27:21.756254 32551 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:27:21.756337 32551 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:27:21.756357 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.756371 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.756379 32551 net.cpp:165] Memory required for data: 681473500\nI0821 08:27:21.756389 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:27:21.756414 32551 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:27:21.756428 32551 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:27:21.756445 32551 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:27:21.756870 32551 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:27:21.756888 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.756898 32551 net.cpp:165] Memory required for data: 689665500\nI0821 08:27:21.756916 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:27:21.756937 32551 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:27:21.756956 32551 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:27:21.756986 32551 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:27:21.757329 32551 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:27:21.757349 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.757359 32551 net.cpp:165] Memory required for data: 697857500\nI0821 08:27:21.757378 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:27:21.757395 32551 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:27:21.757406 32551 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:27:21.757426 32551 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.757524 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:27:21.757720 32551 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:27:21.757743 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.757753 32551 net.cpp:165] Memory required for data: 706049500\nI0821 08:27:21.757771 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:27:21.757786 32551 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:27:21.757797 32551 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:27:21.757810 32551 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.757829 32551 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:27:21.757844 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.757853 32551 net.cpp:165] Memory required for data: 714241500\nI0821 08:27:21.757861 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:27:21.757886 32551 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:27:21.757899 32551 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:27:21.757920 32551 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:27:21.758344 32551 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:27:21.758364 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.758374 32551 net.cpp:165] Memory required for data: 722433500\nI0821 08:27:21.758391 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:27:21.758412 32551 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:27:21.758425 32551 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:27:21.758445 32551 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:27:21.758764 32551 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:27:21.758783 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.758793 32551 net.cpp:165] Memory required for data: 730625500\nI0821 08:27:21.758813 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:27:21.758829 32551 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:27:21.758841 32551 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:27:21.758854 32551 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:27:21.758960 32551 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:27:21.759161 32551 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:27:21.759181 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.759191 32551 net.cpp:165] Memory required for data: 738817500\nI0821 08:27:21.759207 32551 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:27:21.759228 32551 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:27:21.759240 32551 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:27:21.759253 32551 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:27:21.759268 32551 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:27:21.759328 32551 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:27:21.759346 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.759356 32551 net.cpp:165] Memory required for data: 747009500\nI0821 08:27:21.759366 32551 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:27:21.759380 32551 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:27:21.759390 32551 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:27:21.759409 32551 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:27:21.759436 32551 net.cpp:150] Setting up L1_b8_relu\nI0821 08:27:21.759451 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.759460 32551 net.cpp:165] Memory required for data: 755201500\nI0821 08:27:21.759469 32551 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:21.759483 32551 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:21.759495 32551 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:27:21.759515 32551 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:27:21.759534 32551 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:27:21.759616 32551 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:27:21.759637 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.759651 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.759660 32551 net.cpp:165] Memory required for data: 771585500\nI0821 08:27:21.759670 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:27:21.759696 32551 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:27:21.759708 32551 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:27:21.759727 32551 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:27:21.760139 32551 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:27:21.760165 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.760175 32551 net.cpp:165] Memory required for data: 779777500\nI0821 08:27:21.760193 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:27:21.760210 32551 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:27:21.760222 32551 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:27:21.760243 32551 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:27:21.760562 32551 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:27:21.760581 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.760591 32551 net.cpp:165] Memory required for data: 787969500\nI0821 08:27:21.760613 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:27:21.760637 32551 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:27:21.760650 32551 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:27:21.760665 32551 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.760759 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:27:21.760970 32551 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:27:21.760990 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.761000 32551 net.cpp:165] Memory required for data: 796161500\nI0821 08:27:21.761018 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:27:21.761034 32551 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:27:21.761045 32551 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:27:21.761065 32551 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.761085 32551 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:27:21.761101 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.761111 32551 net.cpp:165] Memory required for data: 804353500\nI0821 08:27:21.761121 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:27:21.761144 32551 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:27:21.761157 32551 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:27:21.761175 32551 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:27:21.761574 32551 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:27:21.761595 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.761603 32551 net.cpp:165] Memory required for data: 812545500\nI0821 08:27:21.761621 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:27:21.761642 32551 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:27:21.761656 32551 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:27:21.761680 32551 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:27:21.762017 32551 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:27:21.762037 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.762046 32551 net.cpp:165] Memory required for data: 820737500\nI0821 08:27:21.762101 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:27:21.762125 32551 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:27:21.762138 32551 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:27:21.762157 32551 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:27:21.762248 32551 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:27:21.762446 32551 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:27:21.762465 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.762475 32551 net.cpp:165] Memory required for data: 828929500\nI0821 08:27:21.762493 32551 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:27:21.762511 32551 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:27:21.762521 32551 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:27:21.762534 32551 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:27:21.762549 32551 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:27:21.762611 32551 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:27:21.762629 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.762639 32551 net.cpp:165] Memory required for data: 837121500\nI0821 08:27:21.762650 32551 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:27:21.762663 32551 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:27:21.762679 32551 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:27:21.762693 32551 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:27:21.762712 32551 net.cpp:150] Setting up L1_b9_relu\nI0821 08:27:21.762728 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.762737 32551 net.cpp:165] Memory required for data: 845313500\nI0821 08:27:21.762747 32551 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:21.762760 32551 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:21.762773 32551 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:27:21.762791 32551 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:27:21.762814 32551 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:27:21.762899 32551 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:27:21.762920 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.762933 32551 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:27:21.762949 32551 net.cpp:165] Memory required for data: 861697500\nI0821 08:27:21.762961 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:27:21.762986 32551 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:27:21.763000 32551 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:27:21.763018 32551 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:27:21.763428 32551 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:27:21.763449 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.763458 32551 net.cpp:165] Memory required for data: 863745500\nI0821 08:27:21.763476 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:27:21.763499 32551 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:27:21.763510 32551 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:27:21.763527 32551 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:27:21.763833 32551 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:27:21.763852 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.763861 32551 net.cpp:165] Memory required for data: 865793500\nI0821 08:27:21.763891 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:27:21.763908 32551 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:27:21.763919 32551 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:27:21.763945 32551 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.764046 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:27:21.764247 32551 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:27:21.764266 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.764276 32551 net.cpp:165] Memory required for data: 867841500\nI0821 08:27:21.764293 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:27:21.764309 32551 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:27:21.764322 32551 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:27:21.764340 32551 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.764360 32551 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:27:21.764374 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.764384 32551 net.cpp:165] Memory required for data: 869889500\nI0821 08:27:21.764394 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:27:21.764418 32551 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:27:21.764431 32551 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:27:21.764449 32551 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:27:21.764856 32551 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:27:21.764876 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.764886 32551 net.cpp:165] Memory required for data: 871937500\nI0821 08:27:21.764904 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:27:21.764925 32551 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:27:21.764937 32551 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:27:21.764962 32551 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:27:21.765264 32551 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:27:21.765285 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.765293 32551 net.cpp:165] Memory required for data: 873985500\nI0821 08:27:21.765314 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:27:21.765331 32551 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:27:21.765342 32551 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:27:21.765357 32551 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:27:21.765455 32551 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:27:21.765647 32551 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:27:21.765671 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.765681 32551 net.cpp:165] Memory required for data: 876033500\nI0821 08:27:21.765698 32551 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:27:21.765717 32551 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:27:21.765728 32551 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:27:21.765744 32551 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:27:21.765799 32551 net.cpp:150] Setting up L2_b1_pool\nI0821 08:27:21.765818 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.765827 32551 net.cpp:165] Memory required for data: 878081500\nI0821 08:27:21.765838 32551 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:27:21.765853 32551 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:27:21.765871 32551 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:27:21.765884 32551 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:27:21.765899 32551 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:27:21.765969 32551 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:27:21.765988 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.765997 32551 net.cpp:165] Memory required for data: 880129500\nI0821 08:27:21.766008 32551 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:27:21.766022 32551 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:27:21.766042 32551 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:27:21.766057 32551 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:27:21.766077 32551 net.cpp:150] Setting up L2_b1_relu\nI0821 08:27:21.766093 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.766101 32551 net.cpp:165] Memory required for data: 882177500\nI0821 08:27:21.766111 32551 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:27:21.766129 32551 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:27:21.766149 32551 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:27:21.768438 32551 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:27:21.768461 32551 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:27:21.768471 32551 net.cpp:165] Memory required for data: 884225500\nI0821 08:27:21.768482 32551 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:27:21.768503 32551 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:27:21.768517 32551 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:27:21.768529 32551 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:27:21.768545 32551 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:27:21.768610 32551 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:27:21.768633 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.768645 32551 net.cpp:165] Memory required for data: 888321500\nI0821 08:27:21.768654 32551 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:21.768668 32551 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:21.768679 32551 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:27:21.768699 32551 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:27:21.768720 32551 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:27:21.768810 32551 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:27:21.768836 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.768851 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.768860 32551 net.cpp:165] Memory required for data: 896513500\nI0821 08:27:21.768870 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:27:21.768889 32551 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:27:21.768903 32551 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:27:21.768926 32551 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:27:21.769477 32551 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:27:21.769497 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.769507 32551 net.cpp:165] Memory required for data: 900609500\nI0821 08:27:21.769526 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:27:21.769546 32551 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:27:21.769558 32551 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:27:21.769575 32551 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:27:21.769886 32551 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:27:21.769908 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.769919 32551 net.cpp:165] Memory required for data: 904705500\nI0821 08:27:21.769948 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:27:21.769964 32551 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:27:21.769978 32551 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:27:21.769994 32551 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.770092 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:27:21.770288 32551 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:27:21.770308 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.770316 32551 net.cpp:165] Memory required for data: 908801500\nI0821 08:27:21.770335 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:27:21.770350 32551 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:27:21.770371 32551 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:27:21.770391 32551 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.770411 32551 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:27:21.770426 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.770434 32551 net.cpp:165] Memory required for data: 912897500\nI0821 08:27:21.770443 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:27:21.770464 32551 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:27:21.770476 32551 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:27:21.770498 32551 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:27:21.771034 32551 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:27:21.771054 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.771064 32551 net.cpp:165] Memory required for data: 916993500\nI0821 08:27:21.771080 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:27:21.771097 32551 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:27:21.771109 32551 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:27:21.771131 32551 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:27:21.771430 32551 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:27:21.771448 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.771458 32551 net.cpp:165] Memory required for data: 921089500\nI0821 08:27:21.771479 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:27:21.771500 32551 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:27:21.771513 32551 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:27:21.771528 32551 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:27:21.771620 32551 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:27:21.771817 32551 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:27:21.771837 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.771847 32551 net.cpp:165] Memory required for data: 925185500\nI0821 08:27:21.771865 32551 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:27:21.771886 32551 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:27:21.771898 32551 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:27:21.771911 32551 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:27:21.771931 32551 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:27:21.771987 32551 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:27:21.772006 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.772016 32551 net.cpp:165] Memory required for data: 929281500\nI0821 08:27:21.772025 32551 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:27:21.772039 32551 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:27:21.772052 32551 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:27:21.772070 32551 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:27:21.772089 32551 net.cpp:150] Setting up L2_b2_relu\nI0821 08:27:21.772104 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.772114 32551 net.cpp:165] Memory required for data: 933377500\nI0821 08:27:21.772122 32551 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:21.772137 32551 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:21.772148 32551 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:27:21.772162 32551 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:27:21.772181 32551 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:27:21.772264 32551 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:27:21.772281 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.772294 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.772303 32551 net.cpp:165] Memory required for data: 941569500\nI0821 08:27:21.772322 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:27:21.772343 32551 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:27:21.772356 32551 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:27:21.772378 32551 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:27:21.772913 32551 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:27:21.772933 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.772950 32551 net.cpp:165] Memory required for data: 945665500\nI0821 08:27:21.772969 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:27:21.772991 32551 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:27:21.773005 32551 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:27:21.773021 32551 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:27:21.773386 32551 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:27:21.773406 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.773416 32551 net.cpp:165] Memory required for data: 949761500\nI0821 08:27:21.773437 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:27:21.773458 32551 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:27:21.773471 32551 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:27:21.773488 32551 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.773578 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:27:21.773774 32551 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:27:21.773793 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.773802 32551 net.cpp:165] Memory required for data: 953857500\nI0821 08:27:21.773820 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:27:21.773840 32551 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:27:21.773851 32551 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:27:21.773870 32551 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.773890 32551 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:27:21.773905 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.773913 32551 net.cpp:165] Memory required for data: 957953500\nI0821 08:27:21.773923 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:27:21.773950 32551 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:27:21.773964 32551 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:27:21.773988 32551 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:27:21.774524 32551 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:27:21.774544 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.774554 32551 net.cpp:165] Memory required for data: 962049500\nI0821 08:27:21.774572 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:27:21.774590 32551 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:27:21.774602 32551 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:27:21.774623 32551 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:27:21.774926 32551 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:27:21.774951 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.774962 32551 net.cpp:165] Memory required for data: 966145500\nI0821 08:27:21.774983 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:27:21.775004 32551 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:27:21.775017 32551 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:27:21.775033 32551 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:27:21.775132 32551 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:27:21.775331 32551 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:27:21.775349 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.775358 32551 net.cpp:165] Memory required for data: 970241500\nI0821 08:27:21.775377 32551 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:27:21.775398 32551 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:27:21.775419 32551 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:27:21.775434 32551 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:27:21.775450 32551 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:27:21.775504 32551 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:27:21.775522 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.775532 32551 net.cpp:165] Memory required for data: 974337500\nI0821 08:27:21.775542 32551 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:27:21.775574 32551 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:27:21.775588 32551 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:27:21.775604 32551 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:27:21.775621 32551 net.cpp:150] Setting up L2_b3_relu\nI0821 08:27:21.775635 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.775645 32551 net.cpp:165] Memory required for data: 978433500\nI0821 08:27:21.775655 32551 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:21.775673 32551 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:21.775686 32551 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:27:21.775701 32551 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:27:21.775722 32551 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:27:21.775804 32551 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:27:21.775828 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.775843 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.775852 32551 net.cpp:165] Memory required for data: 986625500\nI0821 08:27:21.775861 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:27:21.775880 32551 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:27:21.775893 32551 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:27:21.775912 32551 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:27:21.776458 32551 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:27:21.776479 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.776489 32551 net.cpp:165] Memory required for data: 990721500\nI0821 08:27:21.776506 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:27:21.776527 32551 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:27:21.776540 32551 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:27:21.776556 32551 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:27:21.776866 32551 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:27:21.776885 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.776895 32551 net.cpp:165] Memory required for data: 994817500\nI0821 08:27:21.776916 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:27:21.776932 32551 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:27:21.776950 32551 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:27:21.776973 32551 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.777063 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:27:21.777259 32551 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:27:21.777278 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.777288 32551 net.cpp:165] Memory required for data: 998913500\nI0821 08:27:21.777307 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:27:21.777321 32551 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:27:21.777333 32551 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:27:21.777354 32551 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.777374 32551 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:27:21.777387 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.777396 32551 net.cpp:165] Memory required for data: 1003009500\nI0821 08:27:21.777417 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:27:21.777442 32551 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:27:21.777456 32551 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:27:21.777473 32551 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:27:21.778014 32551 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:27:21.778034 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.778044 32551 net.cpp:165] Memory required for data: 1007105500\nI0821 08:27:21.778062 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:27:21.778084 32551 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:27:21.778096 32551 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:27:21.778113 32551 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:27:21.778416 32551 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:27:21.778435 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.778445 32551 net.cpp:165] Memory required for data: 1011201500\nI0821 08:27:21.778466 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:27:21.778483 32551 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:27:21.778496 32551 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:27:21.778517 32551 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:27:21.778609 32551 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:27:21.778806 32551 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:27:21.778825 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.778834 32551 net.cpp:165] Memory required for data: 1015297500\nI0821 08:27:21.778853 32551 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:27:21.778870 32551 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:27:21.778882 32551 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:27:21.778895 32551 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:27:21.778919 32551 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:27:21.778975 32551 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:27:21.778993 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.779003 32551 net.cpp:165] Memory required for data: 1019393500\nI0821 08:27:21.779013 32551 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:27:21.779032 32551 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:27:21.779044 32551 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:27:21.779059 32551 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:27:21.779078 32551 net.cpp:150] Setting up L2_b4_relu\nI0821 08:27:21.779093 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.779101 32551 net.cpp:165] Memory required for data: 1023489500\nI0821 08:27:21.779111 32551 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:21.779130 32551 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:21.779141 32551 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:27:21.779156 32551 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:27:21.779176 32551 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:27:21.779260 32551 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:27:21.779287 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.779302 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.779312 32551 net.cpp:165] Memory required for data: 1031681500\nI0821 08:27:21.779322 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:27:21.779342 32551 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:27:21.779355 32551 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:27:21.779372 32551 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:27:21.779934 32551 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:27:21.779961 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.779971 32551 net.cpp:165] Memory required for data: 1035777500\nI0821 08:27:21.779989 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:27:21.780011 32551 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:27:21.780023 32551 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:27:21.780040 32551 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:27:21.780347 32551 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:27:21.780366 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.780376 32551 net.cpp:165] Memory required for data: 1039873500\nI0821 08:27:21.780397 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:27:21.780413 32551 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:27:21.780426 32551 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:27:21.780445 32551 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.780537 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:27:21.780738 32551 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:27:21.780757 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.780767 32551 net.cpp:165] Memory required for data: 1043969500\nI0821 08:27:21.780786 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:27:21.780800 32551 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:27:21.780812 32551 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:27:21.780831 32551 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.780850 32551 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:27:21.780865 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.780875 32551 net.cpp:165] Memory required for data: 1048065500\nI0821 08:27:21.780885 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:27:21.780910 32551 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:27:21.780922 32551 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:27:21.780946 32551 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:27:21.781492 32551 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:27:21.781512 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.781522 32551 net.cpp:165] Memory required for data: 1052161500\nI0821 08:27:21.781539 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:27:21.781565 32551 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:27:21.781579 32551 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:27:21.781595 32551 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:27:21.781894 32551 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:27:21.781913 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.781924 32551 net.cpp:165] Memory required for data: 1056257500\nI0821 08:27:21.781952 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:27:21.781970 32551 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:27:21.781982 32551 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:27:21.781998 32551 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:27:21.782090 32551 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:27:21.782281 32551 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:27:21.782304 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.782313 32551 net.cpp:165] Memory required for data: 1060353500\nI0821 08:27:21.782331 32551 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:27:21.782347 32551 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:27:21.782358 32551 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:27:21.782371 32551 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:27:21.782392 32551 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:27:21.782440 32551 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:27:21.782467 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.782479 32551 net.cpp:165] Memory required for data: 1064449500\nI0821 08:27:21.782490 32551 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:27:21.782505 32551 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:27:21.782516 32551 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:27:21.782534 32551 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:27:21.782554 32551 net.cpp:150] Setting up L2_b5_relu\nI0821 08:27:21.782569 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.782578 32551 net.cpp:165] Memory required for data: 1068545500\nI0821 08:27:21.782589 32551 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:21.782603 32551 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:21.782613 32551 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:27:21.782634 32551 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:27:21.782655 32551 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:27:21.782740 32551 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:27:21.782763 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.782776 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.782786 32551 net.cpp:165] Memory required for data: 1076737500\nI0821 08:27:21.782797 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:27:21.782822 32551 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:27:21.782836 32551 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:27:21.782855 32551 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:27:21.783411 32551 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:27:21.783432 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.783440 32551 net.cpp:165] Memory required for data: 1080833500\nI0821 08:27:21.783458 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:27:21.783480 32551 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:27:21.783493 32551 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:27:21.783509 32551 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:27:21.783813 32551 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:27:21.783833 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.783843 32551 net.cpp:165] Memory required for data: 1084929500\nI0821 08:27:21.783864 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:27:21.783882 32551 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:27:21.783893 32551 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:27:21.783917 32551 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.784013 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:27:21.784210 32551 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:27:21.784229 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.784240 32551 net.cpp:165] Memory required for data: 1089025500\nI0821 08:27:21.784257 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:27:21.784273 32551 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:27:21.784284 32551 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:27:21.784303 32551 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.784323 32551 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:27:21.784337 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.784348 32551 net.cpp:165] Memory required for data: 1093121500\nI0821 08:27:21.784358 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:27:21.784384 32551 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:27:21.784396 32551 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:27:21.784415 32551 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:27:21.784978 32551 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:27:21.784999 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.785009 32551 net.cpp:165] Memory required for data: 1097217500\nI0821 08:27:21.785027 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:27:21.785044 32551 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:27:21.785055 32551 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:27:21.785079 32551 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:27:21.785377 32551 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:27:21.785396 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.785405 32551 net.cpp:165] Memory required for data: 1101313500\nI0821 08:27:21.785428 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:27:21.785444 32551 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:27:21.785455 32551 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:27:21.785471 32551 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:27:21.785565 32551 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:27:21.785758 32551 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:27:21.785781 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.785791 32551 net.cpp:165] Memory required for data: 1105409500\nI0821 08:27:21.785810 32551 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:27:21.785827 32551 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:27:21.785838 32551 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:27:21.785852 32551 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:27:21.785868 32551 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:27:21.785920 32551 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:27:21.785945 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.785957 32551 net.cpp:165] Memory required for data: 1109505500\nI0821 08:27:21.785967 32551 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:27:21.785981 32551 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:27:21.785993 32551 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:27:21.786011 32551 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:27:21.786031 32551 net.cpp:150] Setting up L2_b6_relu\nI0821 08:27:21.786046 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.786056 32551 net.cpp:165] Memory required for data: 1113601500\nI0821 08:27:21.786065 32551 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:21.786079 32551 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:21.786090 32551 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:27:21.786115 32551 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:27:21.786136 32551 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:27:21.786221 32551 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:27:21.786242 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.786254 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.786263 32551 net.cpp:165] Memory required for data: 1121793500\nI0821 08:27:21.786274 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:27:21.786300 32551 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:27:21.786314 32551 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:27:21.786334 32551 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:27:21.787880 32551 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:27:21.787902 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.787914 32551 net.cpp:165] Memory required for data: 1125889500\nI0821 08:27:21.787931 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:27:21.787971 32551 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:27:21.787986 32551 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:27:21.788004 32551 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:27:21.788316 32551 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:27:21.788338 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.788349 32551 net.cpp:165] Memory required for data: 1129985500\nI0821 08:27:21.788372 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:27:21.788388 32551 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:27:21.788400 32551 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:27:21.788416 32551 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.788507 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:27:21.788707 32551 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:27:21.788727 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.788736 32551 net.cpp:165] Memory required for data: 1134081500\nI0821 08:27:21.788754 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:27:21.788769 32551 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:27:21.788781 32551 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:27:21.788801 32551 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.788820 32551 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:27:21.788836 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.788846 32551 net.cpp:165] Memory required for data: 1138177500\nI0821 08:27:21.788856 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:27:21.788877 32551 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:27:21.788888 32551 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:27:21.788910 32551 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:27:21.789448 32551 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:27:21.789468 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.789477 32551 net.cpp:165] Memory required for data: 1142273500\nI0821 08:27:21.789495 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:27:21.789512 32551 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:27:21.789525 32551 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:27:21.789546 32551 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:27:21.789850 32551 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:27:21.789870 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.789880 32551 net.cpp:165] Memory required for data: 1146369500\nI0821 08:27:21.789901 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:27:21.789921 32551 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:27:21.789932 32551 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:27:21.789955 32551 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:27:21.790045 32551 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:27:21.790240 32551 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:27:21.790258 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.790268 32551 net.cpp:165] Memory required for data: 1150465500\nI0821 08:27:21.790287 32551 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:27:21.790308 32551 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:27:21.790320 32551 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:27:21.790334 32551 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:27:21.790354 32551 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:27:21.790403 32551 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:27:21.790421 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.790432 32551 net.cpp:165] Memory required for data: 1154561500\nI0821 08:27:21.790442 32551 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:27:21.790457 32551 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:27:21.790477 32551 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:27:21.790498 32551 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:27:21.790516 32551 net.cpp:150] Setting up L2_b7_relu\nI0821 08:27:21.790531 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.790541 32551 net.cpp:165] Memory required for data: 1158657500\nI0821 08:27:21.790550 32551 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:21.790565 32551 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:21.790575 32551 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:27:21.790591 32551 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:27:21.790611 32551 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:27:21.790702 32551 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:27:21.790722 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.790736 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.790745 32551 net.cpp:165] Memory required for data: 1166849500\nI0821 08:27:21.790755 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:27:21.790776 32551 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:27:21.790787 32551 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:27:21.790810 32551 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:27:21.791355 32551 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:27:21.791375 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.791384 32551 net.cpp:165] Memory required for data: 1170945500\nI0821 08:27:21.791402 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:27:21.791424 32551 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:27:21.791436 32551 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:27:21.791452 32551 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:27:21.791762 32551 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:27:21.791781 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.791791 32551 net.cpp:165] Memory required for data: 1175041500\nI0821 08:27:21.791813 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:27:21.791836 32551 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:27:21.791848 32551 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:27:21.791863 32551 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.791960 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:27:21.792155 32551 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:27:21.792174 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.792184 32551 net.cpp:165] Memory required for data: 1179137500\nI0821 08:27:21.792202 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:27:21.792222 32551 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:27:21.792234 32551 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:27:21.792254 32551 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.792275 32551 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:27:21.792289 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.792299 32551 net.cpp:165] Memory required for data: 1183233500\nI0821 08:27:21.792309 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:27:21.792330 32551 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:27:21.792343 32551 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:27:21.792366 32551 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:27:21.792898 32551 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:27:21.792917 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.792927 32551 net.cpp:165] Memory required for data: 1187329500\nI0821 08:27:21.792951 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:27:21.792979 32551 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:27:21.792992 32551 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:27:21.793016 32551 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:27:21.793344 32551 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:27:21.793365 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.793375 32551 net.cpp:165] Memory required for data: 1191425500\nI0821 08:27:21.793397 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:27:21.793418 32551 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:27:21.793431 32551 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:27:21.793447 32551 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:27:21.793539 32551 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:27:21.793740 32551 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:27:21.793758 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.793768 32551 net.cpp:165] Memory required for data: 1195521500\nI0821 08:27:21.793787 32551 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:27:21.793807 32551 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:27:21.793819 32551 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:27:21.793833 32551 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:27:21.793849 32551 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:27:21.793902 32551 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:27:21.793921 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.793931 32551 net.cpp:165] Memory required for data: 1199617500\nI0821 08:27:21.793947 32551 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:27:21.793964 32551 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:27:21.793977 32551 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:27:21.793994 32551 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:27:21.794014 32551 net.cpp:150] Setting up L2_b8_relu\nI0821 08:27:21.794029 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.794037 32551 net.cpp:165] Memory required for data: 1203713500\nI0821 08:27:21.794049 32551 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:21.794062 32551 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:21.794073 32551 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:27:21.794087 32551 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:27:21.794126 32551 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:27:21.794214 32551 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:27:21.794239 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.794253 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.794263 32551 net.cpp:165] Memory required for data: 1211905500\nI0821 08:27:21.794273 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:27:21.794293 32551 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:27:21.794306 32551 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:27:21.794328 32551 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:27:21.794870 32551 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:27:21.794890 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.794900 32551 net.cpp:165] Memory required for data: 1216001500\nI0821 08:27:21.794919 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:27:21.794936 32551 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:27:21.794955 32551 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:27:21.794978 32551 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:27:21.795305 32551 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:27:21.795331 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.795341 32551 net.cpp:165] Memory required for data: 1220097500\nI0821 08:27:21.795364 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:27:21.795385 32551 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:27:21.795397 32551 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:27:21.795413 32551 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.795503 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:27:21.795706 32551 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:27:21.795724 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.795734 32551 net.cpp:165] Memory required for data: 1224193500\nI0821 08:27:21.795753 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:27:21.795771 32551 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:27:21.795783 32551 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:27:21.795799 32551 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.795817 32551 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:27:21.795830 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.795840 32551 net.cpp:165] Memory required for data: 1228289500\nI0821 08:27:21.795850 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:27:21.795879 32551 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:27:21.795893 32551 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:27:21.795915 32551 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:27:21.797439 32551 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:27:21.797461 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.797472 32551 net.cpp:165] Memory required for data: 1232385500\nI0821 08:27:21.797492 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:27:21.797513 32551 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:27:21.797525 32551 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:27:21.797544 32551 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:27:21.797850 32551 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:27:21.797873 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.797884 32551 net.cpp:165] Memory required for data: 1236481500\nI0821 08:27:21.797963 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:27:21.797991 32551 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:27:21.798004 32551 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:27:21.798020 32551 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:27:21.798106 32551 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:27:21.798295 32551 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:27:21.798316 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.798324 32551 net.cpp:165] Memory required for data: 1240577500\nI0821 08:27:21.798343 32551 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:27:21.798359 32551 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:27:21.798372 32551 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:27:21.798384 32551 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:27:21.798404 32551 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:27:21.798452 32551 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:27:21.798470 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.798480 32551 net.cpp:165] Memory required for data: 1244673500\nI0821 08:27:21.798491 32551 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:27:21.798514 32551 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:27:21.798527 32551 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:27:21.798542 32551 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:27:21.798560 32551 net.cpp:150] Setting up L2_b9_relu\nI0821 08:27:21.798574 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.798591 32551 net.cpp:165] Memory required for data: 1248769500\nI0821 08:27:21.798602 32551 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:21.798620 32551 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:21.798632 32551 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:27:21.798648 32551 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:27:21.798666 32551 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:27:21.798758 32551 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:27:21.798781 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.798796 32551 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:27:21.798805 32551 net.cpp:165] Memory required for data: 1256961500\nI0821 08:27:21.798815 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:27:21.798835 32551 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:27:21.798848 32551 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:27:21.798866 32551 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:27:21.799422 32551 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:27:21.799443 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.799453 32551 net.cpp:165] Memory required for data: 1257985500\nI0821 08:27:21.799470 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:27:21.799492 32551 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:27:21.799504 32551 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:27:21.799521 32551 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:27:21.799830 32551 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:27:21.799850 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.799860 32551 net.cpp:165] Memory required for data: 1259009500\nI0821 08:27:21.799882 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:27:21.799903 32551 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:27:21.799916 32551 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:27:21.799935 32551 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.800041 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:27:21.800246 32551 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:27:21.800266 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.800276 32551 net.cpp:165] Memory required for data: 1260033500\nI0821 08:27:21.800293 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:27:21.800309 32551 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:27:21.800320 32551 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:27:21.800340 32551 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:27:21.800360 32551 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:27:21.800374 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.800384 32551 net.cpp:165] Memory required for data: 1261057500\nI0821 08:27:21.800395 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:27:21.800413 32551 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:27:21.800426 32551 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:27:21.800446 32551 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:27:21.800992 32551 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:27:21.801012 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.801023 32551 net.cpp:165] Memory required for data: 1262081500\nI0821 08:27:21.801040 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:27:21.801066 32551 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:27:21.801080 32551 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:27:21.801098 32551 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:27:21.801419 32551 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:27:21.801445 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.801455 32551 net.cpp:165] Memory required for data: 1263105500\nI0821 08:27:21.801477 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:27:21.801493 32551 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:27:21.801506 32551 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:27:21.801520 32551 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:27:21.801614 32551 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:27:21.801821 32551 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:27:21.801841 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.801851 32551 net.cpp:165] Memory required for data: 1264129500\nI0821 08:27:21.801868 32551 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:27:21.801884 32551 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:27:21.801898 32551 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:27:21.801918 32551 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:27:21.801983 32551 net.cpp:150] Setting up L3_b1_pool\nI0821 08:27:21.802008 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.802018 32551 net.cpp:165] Memory required for data: 1265153500\nI0821 08:27:21.802029 32551 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:27:21.802044 32551 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:27:21.802055 32551 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:27:21.802068 32551 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:27:21.802084 32551 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:27:21.802146 32551 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:27:21.802165 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.802175 32551 net.cpp:165] Memory required for data: 1266177500\nI0821 08:27:21.802184 32551 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:27:21.802198 32551 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:27:21.802211 32551 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:27:21.802224 32551 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:27:21.802242 32551 net.cpp:150] Setting up L3_b1_relu\nI0821 08:27:21.802258 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.802268 32551 net.cpp:165] Memory required for data: 1267201500\nI0821 08:27:21.802276 32551 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:27:21.802300 32551 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:27:21.802316 32551 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:27:21.803586 32551 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:27:21.803611 32551 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:27:21.803622 32551 net.cpp:165] Memory required for data: 1268225500\nI0821 08:27:21.803633 32551 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:27:21.803648 32551 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:27:21.803661 32551 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:27:21.803674 32551 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:27:21.803689 32551 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:27:21.803757 32551 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:27:21.803779 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.803789 32551 net.cpp:165] Memory required for data: 1270273500\nI0821 08:27:21.803799 32551 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:21.803822 32551 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:21.803834 32551 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:27:21.803850 32551 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:27:21.803870 32551 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:27:21.803972 32551 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:27:21.803990 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.804013 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.804024 32551 net.cpp:165] Memory required for data: 1274369500\nI0821 08:27:21.804035 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:27:21.804060 32551 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:27:21.804072 32551 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:27:21.804091 32551 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:27:21.805176 32551 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:27:21.805197 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.805207 32551 net.cpp:165] Memory required for data: 1276417500\nI0821 08:27:21.805225 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:27:21.805248 32551 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:27:21.805259 32551 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:27:21.805280 32551 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:27:21.805589 32551 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:27:21.805608 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.805619 32551 net.cpp:165] Memory required for data: 1278465500\nI0821 08:27:21.805640 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:27:21.805657 32551 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:27:21.805670 32551 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:27:21.805690 32551 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.805784 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:27:21.806006 32551 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:27:21.806026 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.806036 32551 net.cpp:165] Memory required for data: 1280513500\nI0821 08:27:21.806054 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:27:21.806071 32551 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:27:21.806083 32551 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:27:21.806102 32551 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:27:21.806121 32551 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:27:21.806136 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.806146 32551 net.cpp:165] Memory required for data: 1282561500\nI0821 08:27:21.806157 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:27:21.806181 32551 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:27:21.806195 32551 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:27:21.806212 32551 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:27:21.807313 32551 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:27:21.807334 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.807343 32551 net.cpp:165] Memory required for data: 1284609500\nI0821 08:27:21.807361 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:27:21.807384 32551 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:27:21.807395 32551 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:27:21.807412 32551 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:27:21.807723 32551 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:27:21.807744 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.807752 32551 net.cpp:165] Memory required for data: 1286657500\nI0821 08:27:21.807775 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:27:21.807796 32551 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:27:21.807808 32551 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:27:21.807828 32551 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:27:21.807919 32551 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:27:21.808125 32551 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:27:21.808145 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.808153 32551 net.cpp:165] Memory required for data: 1288705500\nI0821 08:27:21.808182 32551 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:27:21.808198 32551 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:27:21.808212 32551 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:27:21.808224 32551 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:27:21.808245 32551 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:27:21.808303 32551 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:27:21.808323 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.808334 32551 net.cpp:165] Memory required for data: 1290753500\nI0821 08:27:21.808346 32551 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:27:21.808363 32551 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:27:21.808377 32551 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:27:21.808390 32551 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:27:21.808409 32551 net.cpp:150] Setting up L3_b2_relu\nI0821 08:27:21.808423 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.808434 32551 net.cpp:165] Memory required for data: 1292801500\nI0821 08:27:21.808444 32551 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:21.808457 32551 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:21.808467 32551 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:27:21.808483 32551 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:27:21.808501 32551 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:27:21.808588 32551 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:27:21.808607 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.808620 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.808629 32551 net.cpp:165] Memory required for data: 1296897500\nI0821 08:27:21.808640 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:27:21.808665 32551 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:27:21.808679 32551 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:27:21.808698 32551 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:27:21.809793 32551 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:27:21.809814 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.809823 32551 net.cpp:165] Memory required for data: 1298945500\nI0821 08:27:21.809841 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:27:21.809862 32551 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:27:21.809875 32551 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:27:21.809896 32551 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:27:21.810205 32551 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:27:21.810225 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.810235 32551 net.cpp:165] Memory required for data: 1300993500\nI0821 08:27:21.810256 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:27:21.810272 32551 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:27:21.810283 32551 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:27:21.810303 32551 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.810395 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:27:21.810600 32551 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:27:21.810618 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.810627 32551 net.cpp:165] Memory required for data: 1303041500\nI0821 08:27:21.810647 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:27:21.810662 32551 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:27:21.810673 32551 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:27:21.810693 32551 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:27:21.810712 32551 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:27:21.810736 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.810746 32551 net.cpp:165] Memory required for data: 1305089500\nI0821 08:27:21.810756 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:27:21.810783 32551 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:27:21.810796 32551 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:27:21.810813 32551 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:27:21.811898 32551 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:27:21.811923 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.811934 32551 net.cpp:165] Memory required for data: 1307137500\nI0821 08:27:21.811960 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:27:21.811978 32551 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:27:21.811990 32551 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:27:21.812011 32551 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:27:21.812335 32551 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:27:21.812355 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.812364 32551 net.cpp:165] Memory required for data: 1309185500\nI0821 08:27:21.812387 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:27:21.812409 32551 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:27:21.812422 32551 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:27:21.812438 32551 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:27:21.812528 32551 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:27:21.812727 32551 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:27:21.812747 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.812755 32551 net.cpp:165] Memory required for data: 1311233500\nI0821 08:27:21.812773 32551 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:27:21.812791 32551 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:27:21.812803 32551 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:27:21.812816 32551 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:27:21.812839 32551 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:27:21.812894 32551 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:27:21.812912 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.812922 32551 net.cpp:165] Memory required for data: 1313281500\nI0821 08:27:21.812932 32551 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:27:21.812958 32551 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:27:21.812973 32551 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:27:21.812986 32551 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:27:21.813005 32551 net.cpp:150] Setting up L3_b3_relu\nI0821 08:27:21.813020 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.813030 32551 net.cpp:165] Memory required for data: 1315329500\nI0821 08:27:21.813040 32551 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:21.813055 32551 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:21.813064 32551 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:27:21.813081 32551 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:27:21.813100 32551 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:27:21.813194 32551 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:27:21.813212 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.813226 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.813235 32551 net.cpp:165] Memory required for data: 1319425500\nI0821 08:27:21.813246 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:27:21.813273 32551 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:27:21.813287 32551 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:27:21.813314 32551 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:27:21.814421 32551 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:27:21.814442 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.814452 32551 net.cpp:165] Memory required for data: 1321473500\nI0821 08:27:21.814471 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:27:21.814496 32551 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:27:21.814509 32551 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:27:21.814532 32551 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:27:21.814852 32551 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:27:21.814872 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.814882 32551 net.cpp:165] Memory required for data: 1323521500\nI0821 08:27:21.814903 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:27:21.814920 32551 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:27:21.814931 32551 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:27:21.814959 32551 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.815049 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:27:21.815248 32551 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:27:21.815268 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.815277 32551 net.cpp:165] Memory required for data: 1325569500\nI0821 08:27:21.815296 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:27:21.815311 32551 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:27:21.815322 32551 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:27:21.815341 32551 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:27:21.815362 32551 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:27:21.815376 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.815387 32551 net.cpp:165] Memory required for data: 1327617500\nI0821 08:27:21.815397 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:27:21.815421 32551 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:27:21.815435 32551 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:27:21.815456 32551 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:27:21.817533 32551 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:27:21.817556 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.817566 32551 net.cpp:165] Memory required for data: 1329665500\nI0821 08:27:21.817585 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:27:21.817607 32551 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:27:21.817620 32551 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:27:21.817641 32551 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:27:21.818143 32551 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:27:21.818166 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.818176 32551 net.cpp:165] Memory required for data: 1331713500\nI0821 08:27:21.818197 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:27:21.818214 32551 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:27:21.818226 32551 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:27:21.818248 32551 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:27:21.818341 32551 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:27:21.818547 32551 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:27:21.818565 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.818574 32551 net.cpp:165] Memory required for data: 1333761500\nI0821 08:27:21.818593 32551 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:27:21.818614 32551 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:27:21.818627 32551 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:27:21.818641 32551 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:27:21.818657 32551 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:27:21.818728 32551 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:27:21.818747 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.818758 32551 net.cpp:165] Memory required for data: 1335809500\nI0821 08:27:21.818768 32551 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:27:21.818783 32551 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:27:21.818794 32551 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:27:21.818812 32551 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:27:21.818833 32551 net.cpp:150] Setting up L3_b4_relu\nI0821 08:27:21.818848 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.818858 32551 net.cpp:165] Memory required for data: 1337857500\nI0821 08:27:21.818867 32551 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:21.818881 32551 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:21.818892 32551 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:27:21.818908 32551 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:27:21.818928 32551 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:27:21.819023 32551 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:27:21.819043 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.819057 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.819067 32551 net.cpp:165] Memory required for data: 1341953500\nI0821 08:27:21.819077 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:27:21.819097 32551 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:27:21.819110 32551 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:27:21.819133 32551 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:27:21.820224 32551 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:27:21.820245 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.820255 32551 net.cpp:165] Memory required for data: 1344001500\nI0821 08:27:21.820272 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:27:21.820289 32551 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:27:21.820302 32551 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:27:21.820322 32551 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:27:21.820641 32551 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:27:21.820660 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.820670 32551 net.cpp:165] Memory required for data: 1346049500\nI0821 08:27:21.820691 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:27:21.820708 32551 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:27:21.820720 32551 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:27:21.820735 32551 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.820834 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:27:21.821039 32551 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:27:21.821065 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.821075 32551 net.cpp:165] Memory required for data: 1348097500\nI0821 08:27:21.821094 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:27:21.821110 32551 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:27:21.821122 32551 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:27:21.821138 32551 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:27:21.821157 32551 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:27:21.821171 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.821182 32551 net.cpp:165] Memory required for data: 1350145500\nI0821 08:27:21.821192 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:27:21.821215 32551 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:27:21.821228 32551 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:27:21.821256 32551 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:27:21.822343 32551 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:27:21.822365 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.822373 32551 net.cpp:165] Memory required for data: 1352193500\nI0821 08:27:21.822391 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:27:21.822417 32551 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:27:21.822429 32551 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:27:21.822451 32551 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:27:21.822751 32551 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:27:21.822770 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.822780 32551 net.cpp:165] Memory required for data: 1354241500\nI0821 08:27:21.822801 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:27:21.822818 32551 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:27:21.822829 32551 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:27:21.822851 32551 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:27:21.822949 32551 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:27:21.823151 32551 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:27:21.823170 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.823180 32551 net.cpp:165] Memory required for data: 1356289500\nI0821 08:27:21.823199 32551 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:27:21.823220 32551 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:27:21.823232 32551 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:27:21.823246 32551 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:27:21.823262 32551 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:27:21.823323 32551 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:27:21.823341 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.823350 32551 net.cpp:165] Memory required for data: 1358337500\nI0821 08:27:21.823360 32551 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:27:21.823375 32551 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:27:21.823386 32551 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:27:21.823405 32551 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:27:21.823424 32551 net.cpp:150] Setting up L3_b5_relu\nI0821 08:27:21.823438 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.823447 32551 net.cpp:165] Memory required for data: 1360385500\nI0821 08:27:21.823457 32551 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:21.823472 32551 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:21.823482 32551 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:27:21.823498 32551 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:27:21.823518 32551 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:27:21.823606 32551 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:27:21.823626 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.823639 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.823648 32551 net.cpp:165] Memory required for data: 1364481500\nI0821 08:27:21.823659 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:27:21.823679 32551 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:27:21.823693 32551 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:27:21.823716 32551 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:27:21.824801 32551 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:27:21.824822 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.824832 32551 net.cpp:165] Memory required for data: 1366529500\nI0821 08:27:21.824858 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:27:21.824880 32551 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:27:21.824893 32551 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:27:21.824910 32551 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:27:21.825238 32551 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:27:21.825258 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.825268 32551 net.cpp:165] Memory required for data: 1368577500\nI0821 08:27:21.825289 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:27:21.825306 32551 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:27:21.825318 32551 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:27:21.825333 32551 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.825430 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:27:21.825631 32551 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:27:21.825654 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.825664 32551 net.cpp:165] Memory required for data: 1370625500\nI0821 08:27:21.825682 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:27:21.825700 32551 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:27:21.825711 32551 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:27:21.825726 32551 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:27:21.825745 32551 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:27:21.825759 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.825768 32551 net.cpp:165] Memory required for data: 1372673500\nI0821 08:27:21.825778 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:27:21.825809 32551 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:27:21.825821 32551 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:27:21.825839 32551 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:27:21.826915 32551 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:27:21.826936 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.826953 32551 net.cpp:165] Memory required for data: 1374721500\nI0821 08:27:21.826972 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:27:21.826992 32551 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:27:21.827004 32551 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:27:21.827019 32551 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:27:21.827566 32551 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:27:21.827586 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.827596 32551 net.cpp:165] Memory required for data: 1376769500\nI0821 08:27:21.827627 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:27:21.827647 32551 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:27:21.827661 32551 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:27:21.827675 32551 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:27:21.827780 32551 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:27:21.827986 32551 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:27:21.828006 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.828016 32551 net.cpp:165] Memory required for data: 1378817500\nI0821 08:27:21.828034 32551 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:27:21.828057 32551 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:27:21.828069 32551 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:27:21.828083 32551 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:27:21.828099 32551 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:27:21.828161 32551 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:27:21.828179 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.828189 32551 net.cpp:165] Memory required for data: 1380865500\nI0821 08:27:21.828199 32551 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:27:21.828213 32551 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:27:21.828234 32551 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:27:21.828253 32551 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:27:21.828274 32551 net.cpp:150] Setting up L3_b6_relu\nI0821 08:27:21.828289 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.828297 32551 net.cpp:165] Memory required for data: 1382913500\nI0821 08:27:21.828307 32551 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:21.828321 32551 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:21.828332 32551 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:27:21.828348 32551 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:27:21.828367 32551 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:27:21.828456 32551 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:27:21.828475 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.828488 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.828497 32551 net.cpp:165] Memory required for data: 1387009500\nI0821 08:27:21.828508 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:27:21.828528 32551 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:27:21.828541 32551 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:27:21.828565 32551 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:27:21.829658 32551 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:27:21.829679 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.829689 32551 net.cpp:165] Memory required for data: 1389057500\nI0821 08:27:21.829706 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:27:21.829731 32551 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:27:21.829744 32551 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:27:21.829762 32551 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:27:21.830087 32551 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:27:21.830107 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.830116 32551 net.cpp:165] Memory required for data: 1391105500\nI0821 08:27:21.830138 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:27:21.830154 32551 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:27:21.830165 32551 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:27:21.830183 32551 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.830279 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:27:21.830482 32551 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:27:21.830500 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.830509 32551 net.cpp:165] Memory required for data: 1393153500\nI0821 08:27:21.830528 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:27:21.830579 32551 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:27:21.830595 32551 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:27:21.830610 32551 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:27:21.830631 32551 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:27:21.830644 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.830654 32551 net.cpp:165] Memory required for data: 1395201500\nI0821 08:27:21.830665 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:27:21.830691 32551 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:27:21.830704 32551 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:27:21.830724 32551 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:27:21.831809 32551 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:27:21.831830 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.831840 32551 net.cpp:165] Memory required for data: 1397249500\nI0821 08:27:21.831857 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:27:21.831882 32551 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:27:21.831895 32551 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:27:21.831918 32551 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:27:21.832248 32551 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:27:21.832268 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.832278 32551 net.cpp:165] Memory required for data: 1399297500\nI0821 08:27:21.832299 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:27:21.832315 32551 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:27:21.832329 32551 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:27:21.832341 32551 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:27:21.832437 32551 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:27:21.832674 32551 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:27:21.832698 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.832708 32551 net.cpp:165] Memory required for data: 1401345500\nI0821 08:27:21.832727 32551 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:27:21.832744 32551 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:27:21.832756 32551 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:27:21.832769 32551 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:27:21.832785 32551 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:27:21.832847 32551 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:27:21.832866 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.832875 32551 net.cpp:165] Memory required for data: 1403393500\nI0821 08:27:21.832886 32551 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:27:21.832901 32551 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:27:21.832913 32551 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:27:21.832927 32551 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:27:21.832953 32551 net.cpp:150] Setting up L3_b7_relu\nI0821 08:27:21.832968 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.832978 32551 net.cpp:165] Memory required for data: 1405441500\nI0821 08:27:21.832988 32551 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:21.833008 32551 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:21.833019 32551 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:27:21.833035 32551 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:27:21.833055 32551 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:27:21.833140 32551 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:27:21.833163 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.833178 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.833187 32551 net.cpp:165] Memory required for data: 1409537500\nI0821 08:27:21.833197 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:27:21.833216 32551 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:27:21.833230 32551 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:27:21.833248 32551 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:27:21.835316 32551 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:27:21.835338 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.835350 32551 net.cpp:165] Memory required for data: 1411585500\nI0821 08:27:21.835367 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:27:21.835389 32551 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:27:21.835402 32551 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:27:21.835423 32551 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:27:21.835750 32551 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:27:21.835778 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.835789 32551 net.cpp:165] Memory required for data: 1413633500\nI0821 08:27:21.835811 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:27:21.835829 32551 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:27:21.835840 32551 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:27:21.835856 32551 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.835958 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:27:21.836160 32551 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:27:21.836180 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.836189 32551 net.cpp:165] Memory required for data: 1415681500\nI0821 08:27:21.836207 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:27:21.836223 32551 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:27:21.836235 32551 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:27:21.836254 32551 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:27:21.836274 32551 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:27:21.836288 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.836298 32551 net.cpp:165] Memory required for data: 1417729500\nI0821 08:27:21.836308 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:27:21.836333 32551 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:27:21.836345 32551 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:27:21.836364 32551 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:27:21.837446 32551 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:27:21.837467 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.837477 32551 net.cpp:165] Memory required for data: 1419777500\nI0821 08:27:21.837494 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:27:21.837517 32551 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:27:21.837529 32551 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:27:21.837546 32551 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:27:21.837859 32551 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:27:21.837878 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.837888 32551 net.cpp:165] Memory required for data: 1421825500\nI0821 08:27:21.837910 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:27:21.837932 32551 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:27:21.837954 32551 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:27:21.837970 32551 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:27:21.838075 32551 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:27:21.838274 32551 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:27:21.838294 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.838302 32551 net.cpp:165] Memory required for data: 1423873500\nI0821 08:27:21.838320 32551 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:27:21.838346 32551 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:27:21.838359 32551 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:27:21.838374 32551 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:27:21.838394 32551 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:27:21.838451 32551 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:27:21.838470 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.838479 32551 net.cpp:165] Memory required for data: 1425921500\nI0821 08:27:21.838488 32551 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:27:21.838510 32551 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:27:21.838523 32551 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:27:21.838537 32551 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:27:21.838557 32551 net.cpp:150] Setting up L3_b8_relu\nI0821 08:27:21.838572 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.838580 32551 net.cpp:165] Memory required for data: 1427969500\nI0821 08:27:21.838599 32551 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:21.838614 32551 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:21.838625 32551 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:27:21.838641 32551 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:27:21.838663 32551 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:27:21.838750 32551 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:27:21.838769 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.838783 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.838793 32551 net.cpp:165] Memory required for data: 1432065500\nI0821 08:27:21.838804 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:27:21.838829 32551 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:27:21.838843 32551 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:27:21.838862 32551 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:27:21.839947 32551 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:27:21.839968 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.839978 32551 net.cpp:165] Memory required for data: 1434113500\nI0821 08:27:21.839996 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:27:21.840018 32551 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:27:21.840031 32551 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:27:21.840051 32551 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:27:21.840360 32551 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:27:21.840379 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.840389 32551 net.cpp:165] Memory required for data: 1436161500\nI0821 08:27:21.840409 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:27:21.840425 32551 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:27:21.840437 32551 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:27:21.840458 32551 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.840550 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:27:21.840754 32551 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:27:21.840773 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.840783 32551 net.cpp:165] Memory required for data: 1438209500\nI0821 08:27:21.840802 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:27:21.840817 32551 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:27:21.840828 32551 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:27:21.840848 32551 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:27:21.840869 32551 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:27:21.840883 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.840893 32551 net.cpp:165] Memory required for data: 1440257500\nI0821 08:27:21.840903 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:27:21.840929 32551 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:27:21.840950 32551 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:27:21.840970 32551 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:27:21.842038 32551 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:27:21.842059 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.842069 32551 net.cpp:165] Memory required for data: 1442305500\nI0821 08:27:21.842087 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:27:21.842109 32551 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:27:21.842123 32551 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:27:21.842139 32551 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:27:21.842455 32551 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:27:21.842475 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.842494 32551 net.cpp:165] Memory required for data: 1444353500\nI0821 08:27:21.842516 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:27:21.842538 32551 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:27:21.842551 32551 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:27:21.842571 32551 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:27:21.842666 32551 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:27:21.842871 32551 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:27:21.842891 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.842900 32551 net.cpp:165] Memory required for data: 1446401500\nI0821 08:27:21.842919 32551 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:27:21.842936 32551 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:27:21.842955 32551 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:27:21.842970 32551 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:27:21.842991 32551 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:27:21.843050 32551 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:27:21.843067 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.843076 32551 net.cpp:165] Memory required for data: 1448449500\nI0821 08:27:21.843086 32551 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:27:21.843106 32551 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:27:21.843118 32551 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:27:21.843132 32551 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:27:21.843152 32551 net.cpp:150] Setting up L3_b9_relu\nI0821 08:27:21.843166 32551 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:27:21.843176 32551 net.cpp:165] Memory required for data: 1450497500\nI0821 08:27:21.843186 32551 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:27:21.843201 32551 net.cpp:100] Creating Layer post_pool\nI0821 08:27:21.843214 32551 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 08:27:21.843230 32551 net.cpp:408] post_pool -> post_pool\nI0821 08:27:21.843293 32551 net.cpp:150] Setting up post_pool\nI0821 08:27:21.843312 32551 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 08:27:21.843322 32551 net.cpp:165] Memory required for data: 1450529500\nI0821 08:27:21.843333 32551 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:27:21.843351 32551 net.cpp:100] Creating Layer post_FC\nI0821 08:27:21.843364 32551 net.cpp:434] post_FC <- post_pool\nI0821 08:27:21.843381 32551 net.cpp:408] post_FC -> post_FC_top\nI0821 08:27:21.843583 32551 net.cpp:150] Setting up post_FC\nI0821 08:27:21.843602 32551 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:21.843611 32551 net.cpp:165] Memory required for data: 1450534500\nI0821 08:27:21.843629 32551 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:27:21.843644 32551 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:27:21.843657 32551 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:27:21.843675 32551 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:27:21.843698 32551 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:27:21.843780 32551 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:27:21.843806 32551 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:21.843819 32551 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:27:21.843829 32551 net.cpp:165] Memory required for data: 1450544500\nI0821 08:27:21.843839 32551 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:27:21.843854 32551 net.cpp:100] Creating Layer accuracy\nI0821 08:27:21.843866 32551 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:27:21.843879 32551 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:27:21.843894 32551 net.cpp:408] accuracy -> accuracy\nI0821 08:27:21.843919 32551 net.cpp:150] Setting up accuracy\nI0821 08:27:21.843932 32551 net.cpp:157] Top shape: (1)\nI0821 08:27:21.843961 32551 net.cpp:165] Memory required for data: 1450544504\nI0821 08:27:21.843971 32551 layer_factory.hpp:77] Creating layer loss\nI0821 08:27:21.843986 32551 net.cpp:100] Creating Layer loss\nI0821 08:27:21.843998 32551 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:27:21.844012 32551 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:27:21.844033 32551 net.cpp:408] loss -> loss\nI0821 08:27:21.844055 32551 layer_factory.hpp:77] Creating layer loss\nI0821 08:27:21.844224 32551 net.cpp:150] Setting up loss\nI0821 08:27:21.844244 32551 net.cpp:157] Top shape: (1)\nI0821 08:27:21.844252 32551 net.cpp:160]     with loss weight 1\nI0821 08:27:21.844276 32551 net.cpp:165] Memory required for data: 1450544508\nI0821 08:27:21.844290 32551 net.cpp:226] loss needs backward computation.\nI0821 08:27:21.844300 32551 net.cpp:228] accuracy does not need backward computation.\nI0821 08:27:21.844310 32551 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:27:21.844321 32551 net.cpp:226] post_FC needs backward computation.\nI0821 08:27:21.844331 32551 net.cpp:226] post_pool needs backward computation.\nI0821 08:27:21.844339 32551 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:27:21.844349 32551 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:27:21.844359 32551 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:27:21.844368 32551 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:27:21.844378 32551 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:27:21.844388 32551 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:27:21.844398 32551 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:27:21.844408 32551 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:27:21.844416 32551 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:27:21.844426 32551 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:27:21.844436 32551 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:27:21.844446 32551 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:27:21.844456 32551 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:27:21.844466 32551 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:27:21.844476 32551 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:27:21.844486 32551 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:27:21.844496 32551 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:27:21.844506 32551 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:27:21.844516 32551 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:27:21.844525 32551 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:27:21.844537 32551 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:27:21.844545 32551 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:27:21.844557 32551 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:27:21.844566 32551 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:27:21.844576 32551 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:27:21.844585 32551 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:27:21.844595 32551 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:27:21.844605 32551 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:27:21.844615 32551 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:27:21.844625 32551 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:27:21.844636 32551 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:27:21.844646 32551 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:27:21.844657 32551 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:27:21.844667 32551 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:27:21.844686 32551 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:27:21.844696 32551 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:27:21.844707 32551 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:27:21.844715 32551 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:27:21.844725 32551 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:27:21.844738 32551 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:27:21.844754 32551 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:27:21.844764 32551 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:27:21.844775 32551 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:27:21.844785 32551 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:27:21.844796 32551 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:27:21.844807 32551 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:27:21.844817 32551 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:27:21.844826 32551 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:27:21.844836 32551 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:27:21.844847 32551 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:27:21.844858 32551 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:27:21.844868 32551 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:27:21.844878 32551 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:27:21.844888 32551 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:27:21.844899 32551 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:27:21.844909 32551 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:27:21.844919 32551 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:27:21.844928 32551 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:27:21.844938 32551 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:27:21.844959 32551 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:27:21.844969 32551 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:27:21.844980 32551 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:27:21.844990 32551 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:27:21.845000 32551 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:27:21.845010 32551 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:27:21.845021 32551 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:27:21.845031 32551 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:27:21.845041 32551 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:27:21.845051 32551 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:27:21.845062 32551 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:27:21.845072 32551 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:27:21.845082 32551 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:27:21.845093 32551 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:27:21.845103 32551 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:27:21.845113 32551 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:27:21.845124 32551 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:27:21.845134 32551 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:27:21.845144 32551 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:27:21.845154 32551 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:27:21.845163 32551 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:27:21.845173 32551 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:27:21.845199 32551 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:27:21.845211 32551 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:27:21.845221 32551 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:27:21.845232 32551 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:27:21.845242 32551 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:27:21.845253 32551 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:27:21.845263 32551 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:27:21.845273 32551 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:27:21.845290 32551 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:27:21.845301 32551 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:27:21.845311 32551 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:27:21.845321 32551 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:27:21.845332 32551 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:27:21.845342 32551 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:27:21.845353 32551 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:27:21.845363 32551 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:27:21.845374 32551 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:27:21.845386 32551 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:27:21.845396 32551 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:27:21.845405 32551 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:27:21.845417 32551 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:27:21.845427 32551 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:27:21.845438 32551 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:27:21.845448 32551 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:27:21.845458 32551 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:27:21.845469 32551 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:27:21.845479 32551 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:27:21.845489 32551 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:27:21.845499 32551 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:27:21.845510 32551 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:27:21.845520 32551 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:27:21.845531 32551 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:27:21.845541 32551 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:27:21.845552 32551 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:27:21.845562 32551 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:27:21.845573 32551 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:27:21.845584 32551 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:27:21.845595 32551 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:27:21.845605 32551 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:27:21.845615 32551 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:27:21.845625 32551 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:27:21.845636 32551 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:27:21.845646 32551 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:27:21.845657 32551 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:27:21.845669 32551 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:27:21.845679 32551 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:27:21.845688 32551 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:27:21.845708 32551 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:27:21.845719 32551 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:27:21.845729 32551 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:27:21.845739 32551 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:27:21.845751 32551 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:27:21.845762 32551 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:27:21.845772 32551 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:27:21.845783 32551 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:27:21.845793 32551 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:27:21.845803 32551 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:27:21.845813 32551 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:27:21.845824 32551 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:27:21.845834 32551 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:27:21.845844 32551 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:27:21.845854 32551 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:27:21.845865 32551 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:27:21.845875 32551 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:27:21.845886 32551 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:27:21.845896 32551 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:27:21.845907 32551 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:27:21.845918 32551 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:27:21.845928 32551 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:27:21.845938 32551 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:27:21.845958 32551 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:27:21.845969 32551 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:27:21.845980 32551 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:27:21.845991 32551 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:27:21.846004 32551 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:27:21.846014 32551 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:27:21.846025 32551 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:27:21.846041 32551 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:27:21.846051 32551 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:27:21.846062 32551 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:27:21.846073 32551 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:27:21.846084 32551 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:27:21.846096 32551 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:27:21.846105 32551 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:27:21.846117 32551 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:27:21.846127 32551 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:27:21.846138 32551 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:27:21.846150 32551 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:27:21.846160 32551 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:27:21.846170 32551 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:27:21.846181 32551 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:27:21.846192 32551 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:27:21.846204 32551 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:27:21.846216 32551 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:27:21.846235 32551 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:27:21.846246 32551 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:27:21.846257 32551 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:27:21.846268 32551 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:27:21.846279 32551 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:27:21.846290 32551 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:27:21.846302 32551 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:27:21.846312 32551 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:27:21.846321 32551 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:27:21.846333 32551 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:27:21.846345 32551 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:27:21.846355 32551 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:27:21.846366 32551 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:27:21.846379 32551 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:27:21.846390 32551 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:27:21.846400 32551 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:27:21.846411 32551 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:27:21.846422 32551 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:27:21.846432 32551 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:27:21.846442 32551 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:27:21.846454 32551 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:27:21.846465 32551 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:27:21.846475 32551 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:27:21.846487 32551 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:27:21.846498 32551 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:27:21.846510 32551 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:27:21.846520 32551 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:27:21.846531 32551 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:27:21.846541 32551 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:27:21.846554 32551 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:27:21.846565 32551 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:27:21.846575 32551 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:27:21.846585 32551 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:27:21.846597 32551 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:27:21.846607 32551 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:27:21.846619 32551 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:27:21.846631 32551 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:27:21.846640 32551 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:27:21.846652 32551 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:27:21.846663 32551 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:27:21.846674 32551 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:27:21.846685 32551 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:27:21.846696 32551 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:27:21.846709 32551 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:27:21.846719 32551 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:27:21.846729 32551 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:27:21.846740 32551 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:27:21.846751 32551 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:27:21.846771 32551 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:27:21.846782 32551 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:27:21.846794 32551 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:27:21.846806 32551 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:27:21.846817 32551 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:27:21.846829 32551 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:27:21.846840 32551 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:27:21.846851 32551 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:27:21.846863 32551 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:27:21.846873 32551 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:27:21.846884 32551 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:27:21.846894 32551 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:27:21.846906 32551 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:27:21.846917 32551 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:27:21.846928 32551 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:27:21.846951 32551 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:27:21.846964 32551 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:27:21.846976 32551 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:27:21.846987 32551 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:27:21.846998 32551 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:27:21.847008 32551 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:27:21.847019 32551 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:27:21.847031 32551 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:27:21.847043 32551 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:27:21.847053 32551 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:27:21.847065 32551 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:27:21.847076 32551 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:27:21.847086 32551 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:27:21.847097 32551 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:27:21.847108 32551 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:27:21.847120 32551 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:27:21.847131 32551 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:27:21.847141 32551 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:27:21.847153 32551 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:27:21.847163 32551 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:27:21.847175 32551 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:27:21.847187 32551 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:27:21.847198 32551 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:27:21.847208 32551 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:27:21.847220 32551 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:27:21.847230 32551 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:27:21.847241 32551 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:27:21.847252 32551 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:27:21.847265 32551 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:27:21.847275 32551 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:27:21.847286 32551 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:27:21.847297 32551 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:27:21.847321 32551 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:27:21.847333 32551 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:27:21.847343 32551 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:27:21.847354 32551 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:27:21.847367 32551 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:27:21.847378 32551 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:27:21.847388 32551 net.cpp:226] pre_relu needs backward computation.\nI0821 08:27:21.847398 32551 net.cpp:226] pre_scale needs backward computation.\nI0821 08:27:21.847409 32551 net.cpp:226] pre_bn needs backward computation.\nI0821 08:27:21.847424 32551 net.cpp:226] pre_conv needs backward computation.\nI0821 08:27:21.847437 32551 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:27:21.847450 32551 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:27:21.847460 32551 net.cpp:270] This network produces output accuracy\nI0821 08:27:21.847471 32551 net.cpp:270] This network produces output loss\nI0821 08:27:21.847826 32551 net.cpp:283] Network initialization done.\nI0821 08:27:21.848875 32551 solver.cpp:60] Solver scaffolding done.\nI0821 08:27:22.071991 32551 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 08:27:22.425187 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:22.425251 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:22.432003 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:22.662765 32551 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:22.662884 32551 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:22.697557 32551 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:22.697669 32551 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:23.142143 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:23.142212 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:23.150151 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:23.394423 32551 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:23.394532 32551 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:23.446144 32551 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:23.446249 32551 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:23.964520 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:23.964596 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:23.973137 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:24.245249 32551 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:24.245417 32551 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:24.316412 32551 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:24.316570 32551 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:24.399824 32551 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 08:27:24.888185 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:24.888240 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:24.897981 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:25.188877 32551 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:25.189036 32551 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:25.280685 32551 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:25.280844 32551 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:25.923328 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:25.923391 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:25.933782 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:26.248437 32551 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:26.248649 32551 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:26.361232 32551 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:26.361439 32551 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:27.067593 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:27.067663 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:27.079104 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:27.422519 32551 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:27.422767 32551 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:27.556257 32551 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:27.556494 32551 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:28.331598 32551 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:27:28.331660 32551 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:27:28.343521 32551 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:27:28.439134 32566 blocking_queue.cpp:50] Waiting for data\nI0821 08:27:28.536134 32563 blocking_queue.cpp:50] Waiting for data\nI0821 08:27:28.772685 32551 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:27:28.772927 32551 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:27:28.925736 32551 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:27:28.925964 32551 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:27:29.097342 32551 parallel.cpp:425] Starting Optimization\nI0821 08:27:29.099225 32551 solver.cpp:279] Solving Cifar-Resnet\nI0821 08:27:29.099241 32551 solver.cpp:280] Learning Rate Policy: multistep\nI0821 08:27:29.103056 32551 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 08:28:50.225746 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 08:28:50.226032 32551 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 08:28:54.248658 32551 solver.cpp:228] Iteration 0, loss = 4.74879\nI0821 08:28:54.248702 32551 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0821 08:28:54.248718 32551 solver.cpp:244]     Train net output #1: loss = 4.74879 (* 1 = 4.74879 loss)\nI0821 08:28:54.278235 32551 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0821 08:31:10.001933 32551 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 08:32:30.617982 32551 solver.cpp:404]     Test net output #0: accuracy = 0.13652\nI0821 08:32:30.618260 32551 solver.cpp:404]     Test net output #1: loss = 7.23885 (* 1 = 7.23885 loss)\nI0821 08:32:31.926129 32551 solver.cpp:228] Iteration 100, loss = 2.24077\nI0821 08:32:31.926183 32551 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI0821 08:32:31.926213 32551 solver.cpp:244]     Train net output #1: loss = 2.24077 (* 1 = 2.24077 loss)\nI0821 08:32:31.998826 32551 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0821 08:34:47.485852 32551 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 08:36:08.043251 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1462\nI0821 08:36:08.043510 32551 solver.cpp:404]     Test net output #1: loss = 2.35936 (* 1 = 2.35936 loss)\nI0821 08:36:09.350245 32551 solver.cpp:228] Iteration 200, loss = 2.1319\nI0821 08:36:09.350284 32551 solver.cpp:244]     Train net output #0: accuracy = 0.168\nI0821 08:36:09.350301 32551 solver.cpp:244]     Train net output #1: loss = 2.1319 (* 1 = 2.1319 loss)\nI0821 08:36:09.432042 32551 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0821 08:38:24.917726 32551 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 08:39:45.433912 32551 solver.cpp:404]     Test net output #0: accuracy = 0.20328\nI0821 08:39:45.434152 32551 solver.cpp:404]     Test net output #1: loss = 2.11517 (* 1 = 2.11517 loss)\nI0821 08:39:46.741024 32551 solver.cpp:228] Iteration 300, loss = 1.929\nI0821 08:39:46.741063 32551 solver.cpp:244]     Train net output #0: accuracy = 0.264\nI0821 08:39:46.741080 32551 solver.cpp:244]     Train net output #1: loss = 1.929 (* 1 = 1.929 loss)\nI0821 08:39:46.824744 32551 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0821 08:42:02.376641 32551 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 08:43:22.871175 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12364\nI0821 08:43:22.871440 32551 solver.cpp:404]     Test net output #1: loss = 2.60118 (* 1 = 2.60118 loss)\nI0821 08:43:24.178555 32551 solver.cpp:228] Iteration 400, loss = 1.82315\nI0821 08:43:24.178594 32551 solver.cpp:244]     Train net output #0: accuracy = 0.344\nI0821 08:43:24.178611 32551 solver.cpp:244]     Train net output #1: loss = 1.82315 (* 1 = 1.82315 loss)\nI0821 08:43:24.259367 32551 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0821 08:45:39.845319 32551 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 08:47:00.328202 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12952\nI0821 08:47:00.328460 32551 solver.cpp:404]     Test net output #1: loss = 3.94059 (* 1 = 3.94059 loss)\nI0821 08:47:01.635541 32551 solver.cpp:228] Iteration 500, loss = 1.61219\nI0821 08:47:01.635581 32551 solver.cpp:244]     Train net output #0: accuracy = 0.416\nI0821 08:47:01.635596 32551 solver.cpp:244]     Train net output #1: loss = 1.61219 (* 1 = 1.61219 loss)\nI0821 08:47:01.713004 32551 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0821 08:49:17.235239 32551 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 08:50:37.727918 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 08:50:37.728178 32551 solver.cpp:404]     Test net output #1: loss = 4.84808 (* 1 = 4.84808 loss)\nI0821 08:50:39.035244 32551 solver.cpp:228] Iteration 600, loss = 1.35511\nI0821 08:50:39.035284 32551 solver.cpp:244]     Train net output #0: accuracy = 0.488\nI0821 08:50:39.035300 32551 solver.cpp:244]     Train net output #1: loss = 1.35511 (* 1 = 1.35511 loss)\nI0821 08:50:39.111420 32551 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0821 08:52:54.652875 32551 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 08:54:15.152375 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0821 08:54:15.152628 32551 solver.cpp:404]     Test net output #1: loss = 5.01379 (* 1 = 5.01379 loss)\nI0821 08:54:16.459497 32551 solver.cpp:228] Iteration 700, loss = 1.17723\nI0821 08:54:16.459538 32551 solver.cpp:244]     Train net output #0: accuracy = 0.624\nI0821 08:54:16.459554 32551 solver.cpp:244]     Train net output #1: loss = 1.17723 (* 1 = 1.17723 loss)\nI0821 08:54:16.534417 32551 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0821 08:56:32.094542 32551 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 08:57:52.595391 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 08:57:52.595654 32551 solver.cpp:404]     Test net output #1: loss = 4.91601 (* 1 = 4.91601 loss)\nI0821 08:57:53.903203 32551 solver.cpp:228] Iteration 800, loss = 1.11673\nI0821 08:57:53.903245 32551 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0821 08:57:53.903262 32551 solver.cpp:244]     Train net output #1: loss = 1.11673 (* 1 = 1.11673 loss)\nI0821 08:57:53.985672 32551 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0821 09:00:09.518853 32551 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 09:01:30.034992 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0821 09:01:30.035254 32551 solver.cpp:404]     Test net output #1: loss = 5.26841 (* 1 = 5.26841 loss)\nI0821 09:01:31.342283 32551 solver.cpp:228] Iteration 900, loss = 1.00253\nI0821 09:01:31.342326 32551 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0821 09:01:31.342342 32551 solver.cpp:244]     Train net output #1: loss = 1.00253 (* 1 = 1.00253 loss)\nI0821 09:01:31.421592 32551 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0821 09:03:46.962713 32551 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 09:05:07.467684 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 09:05:07.467973 32551 solver.cpp:404]     Test net output #1: loss = 5.77397 (* 1 = 5.77397 loss)\nI0821 09:05:08.775517 32551 solver.cpp:228] Iteration 1000, loss = 0.902869\nI0821 09:05:08.775560 32551 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0821 09:05:08.775576 32551 solver.cpp:244]     Train net output #1: loss = 0.902869 (* 1 = 0.902869 loss)\nI0821 09:05:08.851976 32551 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0821 09:07:24.296068 32551 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 09:08:44.792740 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0996\nI0821 09:08:44.792999 32551 solver.cpp:404]     Test net output #1: loss = 5.76863 (* 1 = 5.76863 loss)\nI0821 09:08:46.100855 32551 solver.cpp:228] Iteration 1100, loss = 0.809103\nI0821 09:08:46.100899 32551 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0821 09:08:46.100915 32551 solver.cpp:244]     Train net output #1: loss = 0.809103 (* 1 = 0.809103 loss)\nI0821 09:08:46.181280 32551 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0821 09:11:01.730432 32551 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 09:12:22.243573 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 09:12:22.243841 32551 solver.cpp:404]     Test net output #1: loss = 5.66467 (* 1 = 5.66467 loss)\nI0821 09:12:23.550856 32551 solver.cpp:228] Iteration 1200, loss = 0.660781\nI0821 09:12:23.550900 32551 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0821 09:12:23.550916 32551 solver.cpp:244]     Train net output #1: loss = 0.660781 (* 1 = 0.660781 loss)\nI0821 09:12:23.630300 32551 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0821 09:14:39.111300 32551 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 09:15:59.640861 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0821 09:15:59.641115 32551 solver.cpp:404]     Test net output #1: loss = 6.04295 (* 1 = 6.04295 loss)\nI0821 09:16:00.948307 32551 solver.cpp:228] Iteration 1300, loss = 0.729355\nI0821 09:16:00.948351 32551 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0821 09:16:00.948369 32551 solver.cpp:244]     Train net output #1: loss = 0.729355 (* 1 = 0.729355 loss)\nI0821 09:16:01.025416 32551 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0821 09:18:16.427556 32551 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 09:19:36.961378 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 09:19:36.961628 32551 solver.cpp:404]     Test net output #1: loss = 5.8552 (* 1 = 5.8552 loss)\nI0821 09:19:38.269510 32551 solver.cpp:228] Iteration 1400, loss = 0.466364\nI0821 09:19:38.269559 32551 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 09:19:38.269587 32551 solver.cpp:244]     Train net output #1: loss = 0.466364 (* 1 = 0.466364 loss)\nI0821 09:19:38.344276 32551 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0821 09:21:53.800614 32551 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 09:23:14.308605 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10236\nI0821 09:23:14.308847 32551 solver.cpp:404]     Test net output #1: loss = 5.89294 (* 1 = 5.89294 loss)\nI0821 09:23:15.616669 32551 solver.cpp:228] Iteration 1500, loss = 0.443408\nI0821 09:23:15.616720 32551 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0821 09:23:15.616739 32551 solver.cpp:244]     Train net output #1: loss = 0.443408 (* 1 = 0.443408 loss)\nI0821 09:23:15.694625 32551 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0821 09:25:31.138489 32551 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 09:26:51.529817 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11416\nI0821 09:26:51.530050 32551 solver.cpp:404]     Test net output #1: loss = 5.37836 (* 1 = 5.37836 loss)\nI0821 09:26:52.838390 32551 solver.cpp:228] Iteration 1600, loss = 0.490498\nI0821 09:26:52.838434 32551 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0821 09:26:52.838450 32551 solver.cpp:244]     Train net output #1: loss = 0.490498 (* 1 = 0.490498 loss)\nI0821 09:26:52.915833 32551 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0821 09:29:08.384065 32551 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 09:30:28.790817 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11852\nI0821 09:30:28.791071 32551 solver.cpp:404]     Test net output #1: loss = 5.55985 (* 1 = 5.55985 loss)\nI0821 09:30:30.098207 32551 solver.cpp:228] Iteration 1700, loss = 0.364183\nI0821 09:30:30.098253 32551 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 09:30:30.098268 32551 solver.cpp:244]     Train net output #1: loss = 0.364183 (* 1 = 0.364183 loss)\nI0821 09:30:30.176388 32551 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0821 09:32:45.669046 32551 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 09:34:06.063773 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1152\nI0821 09:34:06.064018 32551 solver.cpp:404]     Test net output #1: loss = 5.36563 (* 1 = 5.36563 loss)\nI0821 09:34:07.372210 32551 solver.cpp:228] Iteration 1800, loss = 0.513282\nI0821 09:34:07.372254 32551 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0821 09:34:07.372270 32551 solver.cpp:244]     Train net output #1: loss = 0.513282 (* 1 = 0.513282 loss)\nI0821 09:34:07.445654 32551 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0821 09:36:22.846333 32551 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 09:37:43.211621 32551 solver.cpp:404]     Test net output #0: accuracy = 0.15728\nI0821 09:37:43.211874 32551 solver.cpp:404]     Test net output #1: loss = 5.07196 (* 1 = 5.07196 loss)\nI0821 09:37:44.518808 32551 solver.cpp:228] Iteration 1900, loss = 0.272008\nI0821 09:37:44.518852 32551 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 09:37:44.518868 32551 solver.cpp:244]     Train net output #1: loss = 0.272008 (* 1 = 0.272008 loss)\nI0821 09:37:44.605391 32551 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0821 09:40:00.140496 32551 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 09:41:20.564827 32551 solver.cpp:404]     Test net output #0: accuracy = 0.15476\nI0821 09:41:20.565058 32551 solver.cpp:404]     Test net output #1: loss = 4.5816 (* 1 = 4.5816 loss)\nI0821 09:41:21.872014 32551 solver.cpp:228] Iteration 2000, loss = 0.195692\nI0821 09:41:21.872056 32551 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 09:41:21.872072 32551 solver.cpp:244]     Train net output #1: loss = 0.195692 (* 1 = 0.195692 loss)\nI0821 09:41:21.950412 32551 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0821 09:43:37.639036 32551 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 09:44:58.054044 32551 solver.cpp:404]     Test net output #0: accuracy = 0.15632\nI0821 09:44:58.054283 32551 solver.cpp:404]     Test net output #1: loss = 4.72337 (* 1 = 4.72337 loss)\nI0821 09:44:59.361604 32551 solver.cpp:228] Iteration 2100, loss = 0.223386\nI0821 09:44:59.361645 32551 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 09:44:59.361662 32551 solver.cpp:244]     Train net output #1: loss = 0.223386 (* 1 = 0.223386 loss)\nI0821 09:44:59.444741 32551 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0821 09:47:15.362277 32551 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 09:48:35.770545 32551 solver.cpp:404]     Test net output #0: accuracy = 0.21\nI0821 09:48:35.770787 32551 solver.cpp:404]     Test net output #1: loss = 4.41259 (* 1 = 4.41259 loss)\nI0821 09:48:37.078763 32551 solver.cpp:228] Iteration 2200, loss = 0.158326\nI0821 09:48:37.078805 32551 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 09:48:37.078821 32551 solver.cpp:244]     Train net output #1: loss = 0.158326 (* 1 = 0.158326 loss)\nI0821 09:48:37.161885 32551 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0821 09:50:52.793334 32551 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 09:52:13.223644 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2002\nI0821 09:52:13.223919 32551 solver.cpp:404]     Test net output #1: loss = 4.25884 (* 1 = 4.25884 loss)\nI0821 09:52:14.530930 32551 solver.cpp:228] Iteration 2300, loss = 0.205015\nI0821 09:52:14.530975 32551 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 09:52:14.530992 32551 solver.cpp:244]     Train net output #1: loss = 0.205015 (* 1 = 0.205015 loss)\nI0821 09:52:14.609133 32551 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0821 09:54:30.340916 32551 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 09:55:50.748915 32551 solver.cpp:404]     Test net output #0: accuracy = 0.17588\nI0821 09:55:50.749178 32551 solver.cpp:404]     Test net output #1: loss = 4.50224 (* 1 = 4.50224 loss)\nI0821 09:55:52.057046 32551 solver.cpp:228] Iteration 2400, loss = 0.137446\nI0821 09:55:52.057092 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 09:55:52.057108 32551 solver.cpp:244]     Train net output #1: loss = 0.137445 (* 1 = 0.137445 loss)\nI0821 09:55:52.136790 32551 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0821 09:58:07.987551 32551 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 09:59:28.456297 32551 solver.cpp:404]     Test net output #0: accuracy = 0.22368\nI0821 09:59:28.456555 32551 solver.cpp:404]     Test net output #1: loss = 4.11521 (* 1 = 4.11521 loss)\nI0821 09:59:29.763856 32551 solver.cpp:228] Iteration 2500, loss = 0.236187\nI0821 09:59:29.763902 32551 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 09:59:29.763918 32551 solver.cpp:244]     Train net output #1: loss = 0.236187 (* 1 = 0.236187 loss)\nI0821 09:59:29.845938 32551 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0821 10:01:45.481990 32551 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 10:03:06.000380 32551 solver.cpp:404]     Test net output #0: accuracy = 0.16892\nI0821 10:03:06.000641 32551 solver.cpp:404]     Test net output #1: loss = 4.77034 (* 1 = 4.77034 loss)\nI0821 10:03:07.309078 32551 solver.cpp:228] Iteration 2600, loss = 0.177453\nI0821 10:03:07.309124 32551 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:03:07.309140 32551 solver.cpp:244]     Train net output #1: loss = 0.177453 (* 1 = 0.177453 loss)\nI0821 10:03:07.384608 32551 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0821 10:05:23.024847 32551 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 10:06:43.529345 32551 solver.cpp:404]     Test net output #0: accuracy = 0.16576\nI0821 10:06:43.529604 32551 solver.cpp:404]     Test net output #1: loss = 5.17127 (* 1 = 5.17127 loss)\nI0821 10:06:44.837654 32551 solver.cpp:228] Iteration 2700, loss = 0.120859\nI0821 10:06:44.837699 32551 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:06:44.837723 32551 solver.cpp:244]     Train net output #1: loss = 0.120859 (* 1 = 0.120859 loss)\nI0821 10:06:44.917359 32551 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0821 10:09:00.432031 32551 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 10:10:20.943675 32551 solver.cpp:404]     Test net output #0: accuracy = 0.17656\nI0821 10:10:20.943928 32551 solver.cpp:404]     Test net output #1: loss = 4.87021 (* 1 = 4.87021 loss)\nI0821 10:10:22.251122 32551 solver.cpp:228] Iteration 2800, loss = 0.0969647\nI0821 10:10:22.251168 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:10:22.251185 32551 solver.cpp:244]     Train net output #1: loss = 0.0969647 (* 1 = 0.0969647 loss)\nI0821 10:10:22.329251 32551 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0821 10:12:38.120812 32551 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 10:13:58.624527 32551 solver.cpp:404]     Test net output #0: accuracy = 0.20464\nI0821 10:13:58.624786 32551 solver.cpp:404]     Test net output #1: loss = 4.15388 (* 1 = 4.15388 loss)\nI0821 10:13:59.929116 32551 solver.cpp:228] Iteration 2900, loss = 0.0745529\nI0821 10:13:59.929160 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 10:13:59.929177 32551 solver.cpp:244]     Train net output #1: loss = 0.0745529 (* 1 = 0.0745529 loss)\nI0821 10:14:00.007200 32551 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0821 10:16:15.904376 32551 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 10:17:36.418326 32551 solver.cpp:404]     Test net output #0: accuracy = 0.17156\nI0821 10:17:36.418571 32551 solver.cpp:404]     Test net output #1: loss = 4.99687 (* 1 = 4.99687 loss)\nI0821 10:17:37.723537 32551 solver.cpp:228] Iteration 3000, loss = 0.104072\nI0821 10:17:37.723582 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:17:37.723598 32551 solver.cpp:244]     Train net output #1: loss = 0.104072 (* 1 = 0.104072 loss)\nI0821 10:17:37.796140 32551 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0821 10:19:53.842905 32551 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 10:21:14.372308 32551 solver.cpp:404]     Test net output #0: accuracy = 0.20768\nI0821 10:21:14.372570 32551 solver.cpp:404]     Test net output #1: loss = 4.12975 (* 1 = 4.12975 loss)\nI0821 10:21:15.676693 32551 solver.cpp:228] Iteration 3100, loss = 0.0546572\nI0821 10:21:15.676741 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 10:21:15.676758 32551 solver.cpp:244]     Train net output #1: loss = 0.0546572 (* 1 = 0.0546572 loss)\nI0821 10:21:15.756960 32551 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0821 10:23:31.483801 32551 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 10:24:51.993798 32551 solver.cpp:404]     Test net output #0: accuracy = 0.19432\nI0821 10:24:51.994046 32551 solver.cpp:404]     Test net output #1: loss = 4.01349 (* 1 = 4.01349 loss)\nI0821 10:24:53.301254 32551 solver.cpp:228] Iteration 3200, loss = 0.0810701\nI0821 10:24:53.301296 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:24:53.301313 32551 solver.cpp:244]     Train net output #1: loss = 0.0810701 (* 1 = 0.0810701 loss)\nI0821 10:24:53.376060 32551 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0821 10:27:09.226486 32551 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 10:28:29.738199 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2512\nI0821 10:28:29.738445 32551 solver.cpp:404]     Test net output #1: loss = 3.17552 (* 1 = 3.17552 loss)\nI0821 10:28:31.041970 32551 solver.cpp:228] Iteration 3300, loss = 0.0758754\nI0821 10:28:31.042012 32551 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:28:31.042029 32551 solver.cpp:244]     Train net output #1: loss = 0.0758754 (* 1 = 0.0758754 loss)\nI0821 10:28:31.118520 32551 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0821 10:30:46.894448 32551 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 10:32:07.418737 32551 solver.cpp:404]     Test net output #0: accuracy = 0.22892\nI0821 10:32:07.419008 32551 solver.cpp:404]     Test net output #1: loss = 3.42884 (* 1 = 3.42884 loss)\nI0821 10:32:08.723235 32551 solver.cpp:228] Iteration 3400, loss = 0.0641401\nI0821 10:32:08.723278 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:32:08.723294 32551 solver.cpp:244]     Train net output #1: loss = 0.0641401 (* 1 = 0.0641401 loss)\nI0821 10:32:08.802098 32551 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0821 10:34:24.722458 32551 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 10:35:45.255707 32551 solver.cpp:404]     Test net output #0: accuracy = 0.22824\nI0821 10:35:45.255956 32551 solver.cpp:404]     Test net output #1: loss = 3.39205 (* 1 = 3.39205 loss)\nI0821 10:35:46.560075 32551 solver.cpp:228] Iteration 3500, loss = 0.162838\nI0821 10:35:46.560117 32551 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:35:46.560133 32551 solver.cpp:244]     Train net output #1: loss = 0.162838 (* 1 = 0.162838 loss)\nI0821 10:35:46.643972 32551 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0821 10:38:02.620301 32551 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 10:39:23.149670 32551 solver.cpp:404]     Test net output #0: accuracy = 0.24272\nI0821 10:39:23.149940 32551 solver.cpp:404]     Test net output #1: loss = 3.26789 (* 1 = 3.26789 loss)\nI0821 10:39:24.453883 32551 solver.cpp:228] Iteration 3600, loss = 0.091093\nI0821 10:39:24.453927 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:39:24.453944 32551 solver.cpp:244]     Train net output #1: loss = 0.0910931 (* 1 = 0.0910931 loss)\nI0821 10:39:24.531286 32551 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0821 10:41:40.483904 32551 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 10:43:01.012058 32551 solver.cpp:404]     Test net output #0: accuracy = 0.23488\nI0821 10:43:01.012331 32551 solver.cpp:404]     Test net output #1: loss = 3.33555 (* 1 = 3.33555 loss)\nI0821 10:43:02.316074 32551 solver.cpp:228] Iteration 3700, loss = 0.0983642\nI0821 10:43:02.316117 32551 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:43:02.316133 32551 solver.cpp:244]     Train net output #1: loss = 0.0983643 (* 1 = 0.0983643 loss)\nI0821 10:43:02.391799 32551 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0821 10:45:18.125244 32551 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 10:46:38.630102 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2438\nI0821 10:46:38.630365 32551 solver.cpp:404]     Test net output #1: loss = 3.35018 (* 1 = 3.35018 loss)\nI0821 10:46:39.933557 32551 solver.cpp:228] Iteration 3800, loss = 0.039414\nI0821 10:46:39.933601 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 10:46:39.933616 32551 solver.cpp:244]     Train net output #1: loss = 0.039414 (* 1 = 0.039414 loss)\nI0821 10:46:40.017117 32551 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0821 10:48:55.905171 32551 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 10:50:16.419461 32551 solver.cpp:404]     Test net output #0: accuracy = 0.28636\nI0821 10:50:16.419728 32551 solver.cpp:404]     Test net output #1: loss = 2.76583 (* 1 = 2.76583 loss)\nI0821 10:50:17.723346 32551 solver.cpp:228] Iteration 3900, loss = 0.0741404\nI0821 10:50:17.723389 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:50:17.723405 32551 solver.cpp:244]     Train net output #1: loss = 0.0741404 (* 1 = 0.0741404 loss)\nI0821 10:50:17.803793 32551 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0821 10:52:33.793982 32551 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 10:53:54.314760 32551 solver.cpp:404]     Test net output #0: accuracy = 0.28648\nI0821 10:53:54.315019 32551 solver.cpp:404]     Test net output #1: loss = 3.03571 (* 1 = 3.03571 loss)\nI0821 10:53:55.618736 32551 solver.cpp:228] Iteration 4000, loss = 0.145066\nI0821 10:53:55.618777 32551 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:53:55.618793 32551 solver.cpp:244]     Train net output #1: loss = 0.145066 (* 1 = 0.145066 loss)\nI0821 10:53:55.699759 32551 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0821 10:56:11.704380 32551 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 10:57:32.201074 32551 solver.cpp:404]     Test net output #0: accuracy = 0.23908\nI0821 10:57:32.201340 32551 solver.cpp:404]     Test net output #1: loss = 3.44658 (* 1 = 3.44658 loss)\nI0821 10:57:33.504570 32551 solver.cpp:228] Iteration 4100, loss = 0.0440576\nI0821 10:57:33.504614 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 10:57:33.504631 32551 solver.cpp:244]     Train net output #1: loss = 0.0440577 (* 1 = 0.0440577 loss)\nI0821 10:57:33.586652 32551 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0821 10:59:49.550552 32551 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 11:01:10.082146 32551 solver.cpp:404]     Test net output #0: accuracy = 0.29028\nI0821 11:01:10.082386 32551 solver.cpp:404]     Test net output #1: loss = 3.18538 (* 1 = 3.18538 loss)\nI0821 11:01:11.386387 32551 solver.cpp:228] Iteration 4200, loss = 0.0530394\nI0821 11:01:11.386433 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:01:11.386449 32551 solver.cpp:244]     Train net output #1: loss = 0.0530394 (* 1 = 0.0530394 loss)\nI0821 11:01:11.469355 32551 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0821 11:03:27.434249 32551 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 11:04:47.934437 32551 solver.cpp:404]     Test net output #0: accuracy = 0.268\nI0821 11:04:47.934682 32551 solver.cpp:404]     Test net output #1: loss = 3.26618 (* 1 = 3.26618 loss)\nI0821 11:04:49.239456 32551 solver.cpp:228] Iteration 4300, loss = 0.0947848\nI0821 11:04:49.239502 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:04:49.239519 32551 solver.cpp:244]     Train net output #1: loss = 0.0947849 (* 1 = 0.0947849 loss)\nI0821 11:04:49.314419 32551 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0821 11:07:05.206322 32551 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 11:08:25.722301 32551 solver.cpp:404]     Test net output #0: accuracy = 0.16976\nI0821 11:08:25.722563 32551 solver.cpp:404]     Test net output #1: loss = 4.3057 (* 1 = 4.3057 loss)\nI0821 11:08:27.026094 32551 solver.cpp:228] Iteration 4400, loss = 0.0346004\nI0821 11:08:27.026140 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 11:08:27.026156 32551 solver.cpp:244]     Train net output #1: loss = 0.0346005 (* 1 = 0.0346005 loss)\nI0821 11:08:27.108448 32551 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0821 11:10:43.016505 32551 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 11:12:03.526110 32551 solver.cpp:404]     Test net output #0: accuracy = 0.325\nI0821 11:12:03.526355 32551 solver.cpp:404]     Test net output #1: loss = 3.41703 (* 1 = 3.41703 loss)\nI0821 11:12:04.829735 32551 solver.cpp:228] Iteration 4500, loss = 0.0824127\nI0821 11:12:04.829784 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:12:04.829802 32551 solver.cpp:244]     Train net output #1: loss = 0.0824128 (* 1 = 0.0824128 loss)\nI0821 11:12:04.911439 32551 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0821 11:14:20.621778 32551 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 11:15:41.116951 32551 solver.cpp:404]     Test net output #0: accuracy = 0.28932\nI0821 11:15:41.117215 32551 solver.cpp:404]     Test net output #1: loss = 2.75717 (* 1 = 2.75717 loss)\nI0821 11:15:42.420661 32551 solver.cpp:228] Iteration 4600, loss = 0.0416179\nI0821 11:15:42.420707 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:15:42.420723 32551 solver.cpp:244]     Train net output #1: loss = 0.041618 (* 1 = 0.041618 loss)\nI0821 11:15:42.498705 32551 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0821 11:17:58.440794 32551 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 11:19:18.936751 32551 solver.cpp:404]     Test net output #0: accuracy = 0.25144\nI0821 11:19:18.937021 32551 solver.cpp:404]     Test net output #1: loss = 3.5356 (* 1 = 3.5356 loss)\nI0821 11:19:20.240417 32551 solver.cpp:228] Iteration 4700, loss = 0.0289399\nI0821 11:19:20.240460 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 11:19:20.240475 32551 solver.cpp:244]     Train net output #1: loss = 0.02894 (* 1 = 0.02894 loss)\nI0821 11:19:20.322795 32551 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0821 11:21:36.281184 32551 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 11:22:56.782466 32551 solver.cpp:404]     Test net output #0: accuracy = 0.327\nI0821 11:22:56.782732 32551 solver.cpp:404]     Test net output #1: loss = 3.14042 (* 1 = 3.14042 loss)\nI0821 11:22:58.086423 32551 solver.cpp:228] Iteration 4800, loss = 0.0929536\nI0821 11:22:58.086467 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:22:58.086483 32551 solver.cpp:244]     Train net output #1: loss = 0.0929537 (* 1 = 0.0929537 loss)\nI0821 11:22:58.166288 32551 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0821 11:25:14.088157 32551 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 11:26:34.558171 32551 solver.cpp:404]     Test net output #0: accuracy = 0.25896\nI0821 11:26:34.558428 32551 solver.cpp:404]     Test net output #1: loss = 3.49098 (* 1 = 3.49098 loss)\nI0821 11:26:35.862041 32551 solver.cpp:228] Iteration 4900, loss = 0.0564282\nI0821 11:26:35.862084 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:26:35.862102 32551 solver.cpp:244]     Train net output #1: loss = 0.0564282 (* 1 = 0.0564282 loss)\nI0821 11:26:35.945976 32551 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0821 11:28:51.932684 32551 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 11:30:12.429347 32551 solver.cpp:404]     Test net output #0: accuracy = 0.23\nI0821 11:30:12.429601 32551 solver.cpp:404]     Test net output #1: loss = 3.7843 (* 1 = 3.7843 loss)\nI0821 11:30:13.733528 32551 solver.cpp:228] Iteration 5000, loss = 0.11481\nI0821 11:30:13.733570 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:30:13.733587 32551 solver.cpp:244]     Train net output #1: loss = 0.11481 (* 1 = 0.11481 loss)\nI0821 11:30:13.817948 32551 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0821 11:32:29.766845 32551 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 11:33:50.269006 32551 solver.cpp:404]     Test net output #0: accuracy = 0.24912\nI0821 11:33:50.269264 32551 solver.cpp:404]     Test net output #1: loss = 3.57089 (* 1 = 3.57089 loss)\nI0821 11:33:51.572770 32551 solver.cpp:228] Iteration 5100, loss = 0.0633991\nI0821 11:33:51.572818 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:33:51.572835 32551 solver.cpp:244]     Train net output #1: loss = 0.0633992 (* 1 = 0.0633992 loss)\nI0821 11:33:51.652190 32551 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0821 11:36:07.652072 32551 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 11:37:28.140002 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2542\nI0821 11:37:28.140265 32551 solver.cpp:404]     Test net output #1: loss = 3.52482 (* 1 = 3.52482 loss)\nI0821 11:37:29.443150 32551 solver.cpp:228] Iteration 5200, loss = 0.0745668\nI0821 11:37:29.443192 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:37:29.443208 32551 solver.cpp:244]     Train net output #1: loss = 0.0745668 (* 1 = 0.0745668 loss)\nI0821 11:37:29.523720 32551 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0821 11:39:45.468672 32551 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 11:41:05.978441 32551 solver.cpp:404]     Test net output #0: accuracy = 0.27988\nI0821 11:41:05.978699 32551 solver.cpp:404]     Test net output #1: loss = 4.14307 (* 1 = 4.14307 loss)\nI0821 11:41:07.282675 32551 solver.cpp:228] Iteration 5300, loss = 0.106057\nI0821 11:41:07.282719 32551 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 11:41:07.282735 32551 solver.cpp:244]     Train net output #1: loss = 0.106057 (* 1 = 0.106057 loss)\nI0821 11:41:07.363596 32551 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0821 11:43:23.300966 32551 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 11:44:43.713459 32551 solver.cpp:404]     Test net output #0: accuracy = 0.38364\nI0821 11:44:43.713724 32551 solver.cpp:404]     Test net output #1: loss = 2.81208 (* 1 = 2.81208 loss)\nI0821 11:44:45.017477 32551 solver.cpp:228] Iteration 5400, loss = 0.040882\nI0821 11:44:45.017520 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:44:45.017537 32551 solver.cpp:244]     Train net output #1: loss = 0.040882 (* 1 = 0.040882 loss)\nI0821 11:44:45.103528 32551 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0821 11:47:01.031817 32551 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 11:48:21.430918 32551 solver.cpp:404]     Test net output #0: accuracy = 0.28772\nI0821 11:48:21.431174 32551 solver.cpp:404]     Test net output #1: loss = 4.16788 (* 1 = 4.16788 loss)\nI0821 11:48:22.735348 32551 solver.cpp:228] Iteration 5500, loss = 0.0824553\nI0821 11:48:22.735393 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:48:22.735409 32551 solver.cpp:244]     Train net output #1: loss = 0.0824553 (* 1 = 0.0824553 loss)\nI0821 11:48:22.815690 32551 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0821 11:50:38.642925 32551 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 11:51:59.052965 32551 solver.cpp:404]     Test net output #0: accuracy = 0.42072\nI0821 11:51:59.053230 32551 solver.cpp:404]     Test net output #1: loss = 2.32617 (* 1 = 2.32617 loss)\nI0821 11:52:00.356870 32551 solver.cpp:228] Iteration 5600, loss = 0.0827661\nI0821 11:52:00.356916 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:52:00.356932 32551 solver.cpp:244]     Train net output #1: loss = 0.0827661 (* 1 = 0.0827661 loss)\nI0821 11:52:00.437885 32551 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0821 11:54:16.355742 32551 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 11:55:36.755480 32551 solver.cpp:404]     Test net output #0: accuracy = 0.28364\nI0821 11:55:36.755731 32551 solver.cpp:404]     Test net output #1: loss = 3.9516 (* 1 = 3.9516 loss)\nI0821 11:55:38.059592 32551 solver.cpp:228] Iteration 5700, loss = 0.0776952\nI0821 11:55:38.059634 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:55:38.059650 32551 solver.cpp:244]     Train net output #1: loss = 0.0776953 (* 1 = 0.0776953 loss)\nI0821 11:55:38.136802 32551 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0821 11:57:54.096843 32551 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 11:59:14.501698 32551 solver.cpp:404]     Test net output #0: accuracy = 0.3688\nI0821 11:59:14.501963 32551 solver.cpp:404]     Test net output #1: loss = 3.51753 (* 1 = 3.51753 loss)\nI0821 11:59:15.805419 32551 solver.cpp:228] Iteration 5800, loss = 0.100524\nI0821 11:59:15.805461 32551 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:59:15.805477 32551 solver.cpp:244]     Train net output #1: loss = 0.100524 (* 1 = 0.100524 loss)\nI0821 11:59:15.886274 32551 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0821 12:01:31.571312 32551 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 12:02:51.961828 32551 solver.cpp:404]     Test net output #0: accuracy = 0.38276\nI0821 12:02:51.962107 32551 solver.cpp:404]     Test net output #1: loss = 3.51166 (* 1 = 3.51166 loss)\nI0821 12:02:53.265485 32551 solver.cpp:228] Iteration 5900, loss = 0.0713701\nI0821 12:02:53.265527 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:02:53.265542 32551 solver.cpp:244]     Train net output #1: loss = 0.0713702 (* 1 = 0.0713702 loss)\nI0821 12:02:53.346027 32551 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0821 12:05:09.319267 32551 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 12:06:29.738270 32551 solver.cpp:404]     Test net output #0: accuracy = 0.3442\nI0821 12:06:29.738534 32551 solver.cpp:404]     Test net output #1: loss = 3.44681 (* 1 = 3.44681 loss)\nI0821 12:06:31.042134 32551 solver.cpp:228] Iteration 6000, loss = 0.0421665\nI0821 12:06:31.042174 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:06:31.042191 32551 solver.cpp:244]     Train net output #1: loss = 0.0421666 (* 1 = 0.0421666 loss)\nI0821 12:06:31.119469 32551 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0821 12:08:47.056471 32551 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 12:10:07.437708 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4606\nI0821 12:10:07.437963 32551 solver.cpp:404]     Test net output #1: loss = 2.6156 (* 1 = 2.6156 loss)\nI0821 12:10:08.742015 32551 solver.cpp:228] Iteration 6100, loss = 0.0961365\nI0821 12:10:08.742054 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:10:08.742071 32551 solver.cpp:244]     Train net output #1: loss = 0.0961365 (* 1 = 0.0961365 loss)\nI0821 12:10:08.823261 32551 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0821 12:12:24.729702 32551 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 12:13:45.127187 32551 solver.cpp:404]     Test net output #0: accuracy = 0.38356\nI0821 12:13:45.127435 32551 solver.cpp:404]     Test net output #1: loss = 3.45165 (* 1 = 3.45165 loss)\nI0821 12:13:46.430244 32551 solver.cpp:228] Iteration 6200, loss = 0.0707293\nI0821 12:13:46.430285 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:13:46.430300 32551 solver.cpp:244]     Train net output #1: loss = 0.0707293 (* 1 = 0.0707293 loss)\nI0821 12:13:46.514642 32551 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0821 12:16:02.298678 32551 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 12:17:22.793505 32551 solver.cpp:404]     Test net output #0: accuracy = 0.44224\nI0821 12:17:22.793743 32551 solver.cpp:404]     Test net output #1: loss = 2.66291 (* 1 = 2.66291 loss)\nI0821 12:17:24.097054 32551 solver.cpp:228] Iteration 6300, loss = 0.0760623\nI0821 12:17:24.097095 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:17:24.097111 32551 solver.cpp:244]     Train net output #1: loss = 0.0760624 (* 1 = 0.0760624 loss)\nI0821 12:17:24.178392 32551 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0821 12:19:40.085916 32551 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 12:21:00.573706 32551 solver.cpp:404]     Test net output #0: accuracy = 0.39532\nI0821 12:21:00.573979 32551 solver.cpp:404]     Test net output #1: loss = 2.8829 (* 1 = 2.8829 loss)\nI0821 12:21:01.877719 32551 solver.cpp:228] Iteration 6400, loss = 0.064482\nI0821 12:21:01.877763 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:21:01.877789 32551 solver.cpp:244]     Train net output #1: loss = 0.064482 (* 1 = 0.064482 loss)\nI0821 12:21:01.954805 32551 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0821 12:23:17.898685 32551 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 12:24:38.463984 32551 solver.cpp:404]     Test net output #0: accuracy = 0.39484\nI0821 12:24:38.464267 32551 solver.cpp:404]     Test net output #1: loss = 2.86953 (* 1 = 2.86953 loss)\nI0821 12:24:39.769047 32551 solver.cpp:228] Iteration 6500, loss = 0.0807486\nI0821 12:24:39.769096 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:24:39.769119 32551 solver.cpp:244]     Train net output #1: loss = 0.0807487 (* 1 = 0.0807487 loss)\nI0821 12:24:39.847084 32551 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0821 12:26:55.827179 32551 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 12:28:16.322212 32551 solver.cpp:404]     Test net output #0: accuracy = 0.41192\nI0821 12:28:16.322465 32551 solver.cpp:404]     Test net output #1: loss = 2.40424 (* 1 = 2.40424 loss)\nI0821 12:28:17.626981 32551 solver.cpp:228] Iteration 6600, loss = 0.0238294\nI0821 12:28:17.627027 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:28:17.627053 32551 solver.cpp:244]     Train net output #1: loss = 0.0238294 (* 1 = 0.0238294 loss)\nI0821 12:28:17.708047 32551 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0821 12:30:33.668531 32551 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 12:31:54.174655 32551 solver.cpp:404]     Test net output #0: accuracy = 0.38068\nI0821 12:31:54.174926 32551 solver.cpp:404]     Test net output #1: loss = 2.88335 (* 1 = 2.88335 loss)\nI0821 12:31:55.479579 32551 solver.cpp:228] Iteration 6700, loss = 0.0276761\nI0821 12:31:55.479626 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:31:55.479650 32551 solver.cpp:244]     Train net output #1: loss = 0.0276761 (* 1 = 0.0276761 loss)\nI0821 12:31:55.554618 32551 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0821 12:34:11.506173 32551 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 12:35:31.997027 32551 solver.cpp:404]     Test net output #0: accuracy = 0.38704\nI0821 12:35:31.997290 32551 solver.cpp:404]     Test net output #1: loss = 2.17566 (* 1 = 2.17566 loss)\nI0821 12:35:33.301270 32551 solver.cpp:228] Iteration 6800, loss = 0.0309143\nI0821 12:35:33.301314 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:35:33.301331 32551 solver.cpp:244]     Train net output #1: loss = 0.0309143 (* 1 = 0.0309143 loss)\nI0821 12:35:33.381439 32551 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0821 12:37:49.430164 32551 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 12:39:09.978332 32551 solver.cpp:404]     Test net output #0: accuracy = 0.41388\nI0821 12:39:09.978677 32551 solver.cpp:404]     Test net output #1: loss = 2.41089 (* 1 = 2.41089 loss)\nI0821 12:39:11.282131 32551 solver.cpp:228] Iteration 6900, loss = 0.0286485\nI0821 12:39:11.282174 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:39:11.282198 32551 solver.cpp:244]     Train net output #1: loss = 0.0286486 (* 1 = 0.0286486 loss)\nI0821 12:39:11.356725 32551 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0821 12:41:27.393563 32551 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 12:42:47.923480 32551 solver.cpp:404]     Test net output #0: accuracy = 0.3424\nI0821 12:42:47.923755 32551 solver.cpp:404]     Test net output #1: loss = 3.27116 (* 1 = 3.27116 loss)\nI0821 12:42:49.227270 32551 solver.cpp:228] Iteration 7000, loss = 0.0281649\nI0821 12:42:49.227318 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:42:49.227342 32551 solver.cpp:244]     Train net output #1: loss = 0.028165 (* 1 = 0.028165 loss)\nI0821 12:42:49.309370 32551 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0821 12:45:05.291522 32551 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 12:46:25.823616 32551 solver.cpp:404]     Test net output #0: accuracy = 0.3674\nI0821 12:46:25.823881 32551 solver.cpp:404]     Test net output #1: loss = 2.2611 (* 1 = 2.2611 loss)\nI0821 12:46:27.128938 32551 solver.cpp:228] Iteration 7100, loss = 0.0384317\nI0821 12:46:27.128984 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:46:27.129007 32551 solver.cpp:244]     Train net output #1: loss = 0.0384318 (* 1 = 0.0384318 loss)\nI0821 12:46:27.209908 32551 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0821 12:48:43.121942 32551 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 12:50:03.658517 32551 solver.cpp:404]     Test net output #0: accuracy = 0.36584\nI0821 12:50:03.658778 32551 solver.cpp:404]     Test net output #1: loss = 2.20676 (* 1 = 2.20676 loss)\nI0821 12:50:04.962584 32551 solver.cpp:228] Iteration 7200, loss = 0.0216371\nI0821 12:50:04.962630 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:50:04.962652 32551 solver.cpp:244]     Train net output #1: loss = 0.0216372 (* 1 = 0.0216372 loss)\nI0821 12:50:05.040132 32551 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0821 12:52:21.041486 32551 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 12:53:41.569015 32551 solver.cpp:404]     Test net output #0: accuracy = 0.34276\nI0821 12:53:41.569284 32551 solver.cpp:404]     Test net output #1: loss = 2.70298 (* 1 = 2.70298 loss)\nI0821 12:53:42.873777 32551 solver.cpp:228] Iteration 7300, loss = 0.0196256\nI0821 12:53:42.873823 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:53:42.873847 32551 solver.cpp:244]     Train net output #1: loss = 0.0196257 (* 1 = 0.0196257 loss)\nI0821 12:53:42.953496 32551 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0821 12:55:58.815079 32551 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 12:57:19.354882 32551 solver.cpp:404]     Test net output #0: accuracy = 0.29652\nI0821 12:57:19.355191 32551 solver.cpp:404]     Test net output #1: loss = 3.16481 (* 1 = 3.16481 loss)\nI0821 12:57:20.660064 32551 solver.cpp:228] Iteration 7400, loss = 0.0150155\nI0821 12:57:20.660111 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:57:20.660135 32551 solver.cpp:244]     Train net output #1: loss = 0.0150156 (* 1 = 0.0150156 loss)\nI0821 12:57:20.734985 32551 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0821 12:59:36.667462 32551 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 13:00:57.205515 32551 solver.cpp:404]     Test net output #0: accuracy = 0.394\nI0821 13:00:57.205793 32551 solver.cpp:404]     Test net output #1: loss = 2.90506 (* 1 = 2.90506 loss)\nI0821 13:00:58.510640 32551 solver.cpp:228] Iteration 7500, loss = 0.0311718\nI0821 13:00:58.510686 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:00:58.510720 32551 solver.cpp:244]     Train net output #1: loss = 0.0311719 (* 1 = 0.0311719 loss)\nI0821 13:00:58.592056 32551 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0821 13:03:14.665041 32551 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 13:04:35.188794 32551 solver.cpp:404]     Test net output #0: accuracy = 0.32792\nI0821 13:04:35.189039 32551 solver.cpp:404]     Test net output #1: loss = 3.44401 (* 1 = 3.44401 loss)\nI0821 13:04:36.493711 32551 solver.cpp:228] Iteration 7600, loss = 0.0163627\nI0821 13:04:36.493758 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:04:36.493782 32551 solver.cpp:244]     Train net output #1: loss = 0.0163628 (* 1 = 0.0163628 loss)\nI0821 13:04:36.576707 32551 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0821 13:06:52.187443 32551 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 13:08:12.683359 32551 solver.cpp:404]     Test net output #0: accuracy = 0.32668\nI0821 13:08:12.683615 32551 solver.cpp:404]     Test net output #1: loss = 3.30987 (* 1 = 3.30987 loss)\nI0821 13:08:13.987905 32551 solver.cpp:228] Iteration 7700, loss = 0.054551\nI0821 13:08:13.987951 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:08:13.987967 32551 solver.cpp:244]     Train net output #1: loss = 0.0545511 (* 1 = 0.0545511 loss)\nI0821 13:08:14.069823 32551 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0821 13:10:29.703879 32551 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 13:11:50.215867 32551 solver.cpp:404]     Test net output #0: accuracy = 0.32524\nI0821 13:11:50.216116 32551 solver.cpp:404]     Test net output #1: loss = 3.20645 (* 1 = 3.20645 loss)\nI0821 13:11:51.521067 32551 solver.cpp:228] Iteration 7800, loss = 0.0514365\nI0821 13:11:51.521113 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:11:51.521131 32551 solver.cpp:244]     Train net output #1: loss = 0.0514367 (* 1 = 0.0514367 loss)\nI0821 13:11:51.600358 32551 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0821 13:14:07.461969 32551 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 13:15:27.959106 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2862\nI0821 13:15:27.959369 32551 solver.cpp:404]     Test net output #1: loss = 4.42787 (* 1 = 4.42787 loss)\nI0821 13:15:29.264430 32551 solver.cpp:228] Iteration 7900, loss = 0.0431463\nI0821 13:15:29.264475 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:15:29.264492 32551 solver.cpp:244]     Train net output #1: loss = 0.0431465 (* 1 = 0.0431465 loss)\nI0821 13:15:29.343961 32551 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0821 13:17:45.301301 32551 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 13:19:05.802384 32551 solver.cpp:404]     Test net output #0: accuracy = 0.38828\nI0821 13:19:05.802644 32551 solver.cpp:404]     Test net output #1: loss = 2.62747 (* 1 = 2.62747 loss)\nI0821 13:19:07.106408 32551 solver.cpp:228] Iteration 8000, loss = 0.0372851\nI0821 13:19:07.106453 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:19:07.106470 32551 solver.cpp:244]     Train net output #1: loss = 0.0372852 (* 1 = 0.0372852 loss)\nI0821 13:19:07.188810 32551 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0821 13:21:23.088002 32551 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 13:22:43.590447 32551 solver.cpp:404]     Test net output #0: accuracy = 0.40696\nI0821 13:22:43.590715 32551 solver.cpp:404]     Test net output #1: loss = 2.50342 (* 1 = 2.50342 loss)\nI0821 13:22:44.894839 32551 solver.cpp:228] Iteration 8100, loss = 0.0271955\nI0821 13:22:44.894881 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:22:44.894897 32551 solver.cpp:244]     Train net output #1: loss = 0.0271956 (* 1 = 0.0271956 loss)\nI0821 13:22:44.973888 32551 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0821 13:25:00.853569 32551 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 13:26:21.360352 32551 solver.cpp:404]     Test net output #0: accuracy = 0.42448\nI0821 13:26:21.360612 32551 solver.cpp:404]     Test net output #1: loss = 2.2027 (* 1 = 2.2027 loss)\nI0821 13:26:22.665684 32551 solver.cpp:228] Iteration 8200, loss = 0.017978\nI0821 13:26:22.665729 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:26:22.665745 32551 solver.cpp:244]     Train net output #1: loss = 0.0179781 (* 1 = 0.0179781 loss)\nI0821 13:26:22.741669 32551 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0821 13:28:38.276876 32551 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 13:29:58.813217 32551 solver.cpp:404]     Test net output #0: accuracy = 0.46028\nI0821 13:29:58.813467 32551 solver.cpp:404]     Test net output #1: loss = 2.36777 (* 1 = 2.36777 loss)\nI0821 13:30:00.117779 32551 solver.cpp:228] Iteration 8300, loss = 0.0491477\nI0821 13:30:00.117825 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:30:00.117849 32551 solver.cpp:244]     Train net output #1: loss = 0.0491478 (* 1 = 0.0491478 loss)\nI0821 13:30:00.200283 32551 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0821 13:32:15.632145 32551 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 13:33:36.139938 32551 solver.cpp:404]     Test net output #0: accuracy = 0.45432\nI0821 13:33:36.140182 32551 solver.cpp:404]     Test net output #1: loss = 2.19349 (* 1 = 2.19349 loss)\nI0821 13:33:37.443724 32551 solver.cpp:228] Iteration 8400, loss = 0.034391\nI0821 13:33:37.443766 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:33:37.443783 32551 solver.cpp:244]     Train net output #1: loss = 0.0343911 (* 1 = 0.0343911 loss)\nI0821 13:33:37.519850 32551 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0821 13:35:53.061200 32551 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 13:37:13.579701 32551 solver.cpp:404]     Test net output #0: accuracy = 0.52516\nI0821 13:37:13.579953 32551 solver.cpp:404]     Test net output #1: loss = 1.89342 (* 1 = 1.89342 loss)\nI0821 13:37:14.884029 32551 solver.cpp:228] Iteration 8500, loss = 0.0530312\nI0821 13:37:14.884071 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:37:14.884088 32551 solver.cpp:244]     Train net output #1: loss = 0.0530313 (* 1 = 0.0530313 loss)\nI0821 13:37:14.958225 32551 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0821 13:39:30.405005 32551 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 13:40:50.920236 32551 solver.cpp:404]     Test net output #0: accuracy = 0.44928\nI0821 13:40:50.920500 32551 solver.cpp:404]     Test net output #1: loss = 2.39425 (* 1 = 2.39425 loss)\nI0821 13:40:52.224061 32551 solver.cpp:228] Iteration 8600, loss = 0.0433011\nI0821 13:40:52.224102 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:40:52.224118 32551 solver.cpp:244]     Train net output #1: loss = 0.0433012 (* 1 = 0.0433012 loss)\nI0821 13:40:52.301909 32551 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0821 13:43:07.732664 32551 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 13:44:28.253509 32551 solver.cpp:404]     Test net output #0: accuracy = 0.40888\nI0821 13:44:28.253772 32551 solver.cpp:404]     Test net output #1: loss = 2.98935 (* 1 = 2.98935 loss)\nI0821 13:44:29.557193 32551 solver.cpp:228] Iteration 8700, loss = 0.112721\nI0821 13:44:29.557238 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:44:29.557255 32551 solver.cpp:244]     Train net output #1: loss = 0.112721 (* 1 = 0.112721 loss)\nI0821 13:44:29.641329 32551 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0821 13:46:45.138190 32551 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 13:48:05.653004 32551 solver.cpp:404]     Test net output #0: accuracy = 0.32808\nI0821 13:48:05.653276 32551 solver.cpp:404]     Test net output #1: loss = 4.20012 (* 1 = 4.20012 loss)\nI0821 13:48:06.958129 32551 solver.cpp:228] Iteration 8800, loss = 0.0519066\nI0821 13:48:06.958175 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:48:06.958190 32551 solver.cpp:244]     Train net output #1: loss = 0.0519067 (* 1 = 0.0519067 loss)\nI0821 13:48:07.035156 32551 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0821 13:50:22.731747 32551 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 13:51:43.269992 32551 solver.cpp:404]     Test net output #0: accuracy = 0.52464\nI0821 13:51:43.270264 32551 solver.cpp:404]     Test net output #1: loss = 2.24078 (* 1 = 2.24078 loss)\nI0821 13:51:44.575567 32551 solver.cpp:228] Iteration 8900, loss = 0.0568291\nI0821 13:51:44.575608 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:51:44.575623 32551 solver.cpp:244]     Train net output #1: loss = 0.0568292 (* 1 = 0.0568292 loss)\nI0821 13:51:44.654731 32551 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0821 13:54:00.103745 32551 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 13:55:20.614023 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49532\nI0821 13:55:20.614284 32551 solver.cpp:404]     Test net output #1: loss = 2.42261 (* 1 = 2.42261 loss)\nI0821 13:55:21.918751 32551 solver.cpp:228] Iteration 9000, loss = 0.0746328\nI0821 13:55:21.918792 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:55:21.918807 32551 solver.cpp:244]     Train net output #1: loss = 0.0746329 (* 1 = 0.0746329 loss)\nI0821 13:55:22.005092 32551 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0821 13:57:37.850422 32551 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 13:58:58.254542 32551 solver.cpp:404]     Test net output #0: accuracy = 0.53536\nI0821 13:58:58.254806 32551 solver.cpp:404]     Test net output #1: loss = 2.03913 (* 1 = 2.03913 loss)\nI0821 13:58:59.559303 32551 solver.cpp:228] Iteration 9100, loss = 0.0371378\nI0821 13:58:59.559342 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:58:59.559360 32551 solver.cpp:244]     Train net output #1: loss = 0.037138 (* 1 = 0.037138 loss)\nI0821 13:58:59.646226 32551 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0821 14:01:15.060871 32551 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 14:02:35.464951 32551 solver.cpp:404]     Test net output #0: accuracy = 0.42304\nI0821 14:02:35.465234 32551 solver.cpp:404]     Test net output #1: loss = 3.19752 (* 1 = 3.19752 loss)\nI0821 14:02:36.769831 32551 solver.cpp:228] Iteration 9200, loss = 0.0635746\nI0821 14:02:36.769871 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:02:36.769888 32551 solver.cpp:244]     Train net output #1: loss = 0.0635747 (* 1 = 0.0635747 loss)\nI0821 14:02:36.853220 32551 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0821 14:04:52.613430 32551 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 14:06:13.023162 32551 solver.cpp:404]     Test net output #0: accuracy = 0.42404\nI0821 14:06:13.023432 32551 solver.cpp:404]     Test net output #1: loss = 3.02461 (* 1 = 3.02461 loss)\nI0821 14:06:14.327432 32551 solver.cpp:228] Iteration 9300, loss = 0.0427351\nI0821 14:06:14.327471 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:06:14.327486 32551 solver.cpp:244]     Train net output #1: loss = 0.0427352 (* 1 = 0.0427352 loss)\nI0821 14:06:14.408324 32551 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0821 14:08:30.150630 32551 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 14:09:50.570233 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55948\nI0821 14:09:50.570500 32551 solver.cpp:404]     Test net output #1: loss = 1.82064 (* 1 = 1.82064 loss)\nI0821 14:09:51.875499 32551 solver.cpp:228] Iteration 9400, loss = 0.0415061\nI0821 14:09:51.875540 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:09:51.875556 32551 solver.cpp:244]     Train net output #1: loss = 0.0415062 (* 1 = 0.0415062 loss)\nI0821 14:09:51.956868 32551 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0821 14:12:07.294152 32551 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 14:13:27.686017 32551 solver.cpp:404]     Test net output #0: accuracy = 0.43656\nI0821 14:13:27.686295 32551 solver.cpp:404]     Test net output #1: loss = 3.51933 (* 1 = 3.51933 loss)\nI0821 14:13:28.991196 32551 solver.cpp:228] Iteration 9500, loss = 0.0437962\nI0821 14:13:28.991237 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:13:28.991253 32551 solver.cpp:244]     Train net output #1: loss = 0.0437963 (* 1 = 0.0437963 loss)\nI0821 14:13:29.071566 32551 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0821 14:15:44.523068 32551 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 14:17:04.923606 32551 solver.cpp:404]     Test net output #0: accuracy = 0.44452\nI0821 14:17:04.923873 32551 solver.cpp:404]     Test net output #1: loss = 3.76634 (* 1 = 3.76634 loss)\nI0821 14:17:06.228430 32551 solver.cpp:228] Iteration 9600, loss = 0.0205301\nI0821 14:17:06.228472 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 14:17:06.228488 32551 solver.cpp:244]     Train net output #1: loss = 0.0205303 (* 1 = 0.0205303 loss)\nI0821 14:17:06.308801 32551 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0821 14:19:22.040150 32551 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 14:20:42.446842 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49904\nI0821 14:20:42.447109 32551 solver.cpp:404]     Test net output #1: loss = 3.25423 (* 1 = 3.25423 loss)\nI0821 14:20:43.751688 32551 solver.cpp:228] Iteration 9700, loss = 0.129648\nI0821 14:20:43.751730 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 14:20:43.751745 32551 solver.cpp:244]     Train net output #1: loss = 0.129648 (* 1 = 0.129648 loss)\nI0821 14:20:43.835187 32551 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0821 14:22:59.397805 32551 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 14:24:19.795538 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5576\nI0821 14:24:19.795807 32551 solver.cpp:404]     Test net output #1: loss = 2.58267 (* 1 = 2.58267 loss)\nI0821 14:24:21.100659 32551 solver.cpp:228] Iteration 9800, loss = 0.0373832\nI0821 14:24:21.100703 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:24:21.100718 32551 solver.cpp:244]     Train net output #1: loss = 0.0373833 (* 1 = 0.0373833 loss)\nI0821 14:24:21.176224 32551 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0821 14:26:36.836427 32551 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 14:27:57.263828 32551 solver.cpp:404]     Test net output #0: accuracy = 0.46584\nI0821 14:27:57.264101 32551 solver.cpp:404]     Test net output #1: loss = 2.80461 (* 1 = 2.80461 loss)\nI0821 14:27:58.569689 32551 solver.cpp:228] Iteration 9900, loss = 0.0657788\nI0821 14:27:58.569732 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:27:58.569749 32551 solver.cpp:244]     Train net output #1: loss = 0.065779 (* 1 = 0.065779 loss)\nI0821 14:27:58.651876 32551 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0821 14:30:14.598011 32551 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 14:31:34.996345 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5436\nI0821 14:31:34.996608 32551 solver.cpp:404]     Test net output #1: loss = 2.66081 (* 1 = 2.66081 loss)\nI0821 14:31:36.301693 32551 solver.cpp:228] Iteration 10000, loss = 0.0309453\nI0821 14:31:36.301736 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:31:36.301753 32551 solver.cpp:244]     Train net output #1: loss = 0.0309455 (* 1 = 0.0309455 loss)\nI0821 14:31:36.385074 32551 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0821 14:33:52.550603 32551 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0821 14:35:13.061591 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50144\nI0821 14:35:13.061821 32551 solver.cpp:404]     Test net output #1: loss = 3.4697 (* 1 = 3.4697 loss)\nI0821 14:35:14.366369 32551 solver.cpp:228] Iteration 10100, loss = 0.0218741\nI0821 14:35:14.366412 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:35:14.366428 32551 solver.cpp:244]     Train net output #1: loss = 0.0218742 (* 1 = 0.0218742 loss)\nI0821 14:35:14.450798 32551 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0821 14:37:30.052858 32551 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0821 14:38:50.559757 32551 solver.cpp:404]     Test net output #0: accuracy = 0.48416\nI0821 14:38:50.560022 32551 solver.cpp:404]     Test net output #1: loss = 3.00425 (* 1 = 3.00425 loss)\nI0821 14:38:51.865005 32551 solver.cpp:228] Iteration 10200, loss = 0.0163412\nI0821 14:38:51.865052 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 14:38:51.865068 32551 solver.cpp:244]     Train net output #1: loss = 0.0163413 (* 1 = 0.0163413 loss)\nI0821 14:38:51.944541 32551 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0821 14:41:07.510810 32551 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0821 14:42:28.032367 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59036\nI0821 14:42:28.032639 32551 solver.cpp:404]     Test net output #1: loss = 2.02876 (* 1 = 2.02876 loss)\nI0821 14:42:29.337312 32551 solver.cpp:228] Iteration 10300, loss = 0.0361618\nI0821 14:42:29.337355 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:42:29.337373 32551 solver.cpp:244]     Train net output #1: loss = 0.036162 (* 1 = 0.036162 loss)\nI0821 14:42:29.424019 32551 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0821 14:44:45.092129 32551 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0821 14:46:05.618954 32551 solver.cpp:404]     Test net output #0: accuracy = 0.547\nI0821 14:46:05.619232 32551 solver.cpp:404]     Test net output #1: loss = 2.40838 (* 1 = 2.40838 loss)\nI0821 14:46:06.923393 32551 solver.cpp:228] Iteration 10400, loss = 0.0614019\nI0821 14:46:06.923437 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:46:06.923454 32551 solver.cpp:244]     Train net output #1: loss = 0.0614021 (* 1 = 0.0614021 loss)\nI0821 14:46:07.008723 32551 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0821 14:48:22.769363 32551 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0821 14:49:43.288586 32551 solver.cpp:404]     Test net output #0: accuracy = 0.53344\nI0821 14:49:43.288856 32551 solver.cpp:404]     Test net output #1: loss = 2.42722 (* 1 = 2.42722 loss)\nI0821 14:49:44.593489 32551 solver.cpp:228] Iteration 10500, loss = 0.0268022\nI0821 14:49:44.593535 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:49:44.593551 32551 solver.cpp:244]     Train net output #1: loss = 0.0268023 (* 1 = 0.0268023 loss)\nI0821 14:49:44.673002 32551 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0821 14:52:00.190423 32551 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0821 14:53:20.680737 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51584\nI0821 14:53:20.681018 32551 solver.cpp:404]     Test net output #1: loss = 2.85883 (* 1 = 2.85883 loss)\nI0821 14:53:21.985502 32551 solver.cpp:228] Iteration 10600, loss = 0.0675101\nI0821 14:53:21.985548 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:53:21.985563 32551 solver.cpp:244]     Train net output #1: loss = 0.0675103 (* 1 = 0.0675103 loss)\nI0821 14:53:22.068089 32551 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0821 14:55:37.776520 32551 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0821 14:56:58.302919 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5196\nI0821 14:56:58.303195 32551 solver.cpp:404]     Test net output #1: loss = 2.46659 (* 1 = 2.46659 loss)\nI0821 14:56:59.607910 32551 solver.cpp:228] Iteration 10700, loss = 0.0349227\nI0821 14:56:59.607956 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:56:59.607972 32551 solver.cpp:244]     Train net output #1: loss = 0.0349228 (* 1 = 0.0349228 loss)\nI0821 14:56:59.691524 32551 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0821 14:59:15.490042 32551 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0821 15:00:35.999131 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58252\nI0821 15:00:35.999402 32551 solver.cpp:404]     Test net output #1: loss = 1.96661 (* 1 = 1.96661 loss)\nI0821 15:00:37.304282 32551 solver.cpp:228] Iteration 10800, loss = 0.0432717\nI0821 15:00:37.304328 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:00:37.304345 32551 solver.cpp:244]     Train net output #1: loss = 0.0432719 (* 1 = 0.0432719 loss)\nI0821 15:00:37.386036 32551 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0821 15:02:52.841596 32551 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0821 15:04:13.336827 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54324\nI0821 15:04:13.337108 32551 solver.cpp:404]     Test net output #1: loss = 2.6279 (* 1 = 2.6279 loss)\nI0821 15:04:14.641582 32551 solver.cpp:228] Iteration 10900, loss = 0.00978152\nI0821 15:04:14.641628 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:04:14.641644 32551 solver.cpp:244]     Train net output #1: loss = 0.00978168 (* 1 = 0.00978168 loss)\nI0821 15:04:14.730443 32551 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0821 15:06:30.524058 32551 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0821 15:07:51.037259 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54572\nI0821 15:07:51.037534 32551 solver.cpp:404]     Test net output #1: loss = 2.2659 (* 1 = 2.2659 loss)\nI0821 15:07:52.342100 32551 solver.cpp:228] Iteration 11000, loss = 0.0120014\nI0821 15:07:52.342146 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:07:52.342162 32551 solver.cpp:244]     Train net output #1: loss = 0.0120016 (* 1 = 0.0120016 loss)\nI0821 15:07:52.424080 32551 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0821 15:10:08.045318 32551 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0821 15:11:28.578918 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58484\nI0821 15:11:28.579210 32551 solver.cpp:404]     Test net output #1: loss = 2.32304 (* 1 = 2.32304 loss)\nI0821 15:11:29.884069 32551 solver.cpp:228] Iteration 11100, loss = 0.0387328\nI0821 15:11:29.884114 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:11:29.884130 32551 solver.cpp:244]     Train net output #1: loss = 0.038733 (* 1 = 0.038733 loss)\nI0821 15:11:29.965862 32551 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0821 15:13:46.067966 32551 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0821 15:15:06.601212 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58832\nI0821 15:15:06.601480 32551 solver.cpp:404]     Test net output #1: loss = 2.40028 (* 1 = 2.40028 loss)\nI0821 15:15:07.905385 32551 solver.cpp:228] Iteration 11200, loss = 0.037915\nI0821 15:15:07.905431 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:15:07.905447 32551 solver.cpp:244]     Train net output #1: loss = 0.0379151 (* 1 = 0.0379151 loss)\nI0821 15:15:07.990252 32551 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0821 15:17:23.593278 32551 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0821 15:18:44.126835 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60172\nI0821 15:18:44.127117 32551 solver.cpp:404]     Test net output #1: loss = 2.03123 (* 1 = 2.03123 loss)\nI0821 15:18:45.431319 32551 solver.cpp:228] Iteration 11300, loss = 0.0894589\nI0821 15:18:45.431368 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:18:45.431393 32551 solver.cpp:244]     Train net output #1: loss = 0.089459 (* 1 = 0.089459 loss)\nI0821 15:18:45.508640 32551 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0821 15:21:01.215458 32551 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0821 15:22:21.713716 32551 solver.cpp:404]     Test net output #0: accuracy = 0.56816\nI0821 15:22:21.714013 32551 solver.cpp:404]     Test net output #1: loss = 2.48785 (* 1 = 2.48785 loss)\nI0821 15:22:23.018580 32551 solver.cpp:228] Iteration 11400, loss = 0.0201506\nI0821 15:22:23.018627 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:22:23.018645 32551 solver.cpp:244]     Train net output #1: loss = 0.0201507 (* 1 = 0.0201507 loss)\nI0821 15:22:23.104435 32551 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0821 15:24:38.774205 32551 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0821 15:25:59.283223 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57968\nI0821 15:25:59.283494 32551 solver.cpp:404]     Test net output #1: loss = 2.30575 (* 1 = 2.30575 loss)\nI0821 15:26:00.588093 32551 solver.cpp:228] Iteration 11500, loss = 0.0434597\nI0821 15:26:00.588138 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:26:00.588155 32551 solver.cpp:244]     Train net output #1: loss = 0.0434598 (* 1 = 0.0434598 loss)\nI0821 15:26:00.673517 32551 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0821 15:28:16.499903 32551 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0821 15:29:37.015393 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55724\nI0821 15:29:37.015642 32551 solver.cpp:404]     Test net output #1: loss = 2.45079 (* 1 = 2.45079 loss)\nI0821 15:29:38.319859 32551 solver.cpp:228] Iteration 11600, loss = 0.0320883\nI0821 15:29:38.319903 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:29:38.319918 32551 solver.cpp:244]     Train net output #1: loss = 0.0320885 (* 1 = 0.0320885 loss)\nI0821 15:29:38.402608 32551 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0821 15:31:53.809543 32551 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0821 15:33:14.318120 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4976\nI0821 15:33:14.318385 32551 solver.cpp:404]     Test net output #1: loss = 4.02795 (* 1 = 4.02795 loss)\nI0821 15:33:15.622601 32551 solver.cpp:228] Iteration 11700, loss = 0.0284185\nI0821 15:33:15.622645 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:33:15.622663 32551 solver.cpp:244]     Train net output #1: loss = 0.0284187 (* 1 = 0.0284187 loss)\nI0821 15:33:15.704890 32551 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0821 15:35:31.307692 32551 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0821 15:36:51.829903 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50192\nI0821 15:36:51.830090 32551 solver.cpp:404]     Test net output #1: loss = 3.40822 (* 1 = 3.40822 loss)\nI0821 15:36:53.133729 32551 solver.cpp:228] Iteration 11800, loss = 0.0222101\nI0821 15:36:53.133774 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:36:53.133803 32551 solver.cpp:244]     Train net output #1: loss = 0.0222103 (* 1 = 0.0222103 loss)\nI0821 15:36:53.213424 32551 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0821 15:39:08.851461 32551 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0821 15:40:29.361569 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59888\nI0821 15:40:29.361776 32551 solver.cpp:404]     Test net output #1: loss = 2.12391 (* 1 = 2.12391 loss)\nI0821 15:40:30.665403 32551 solver.cpp:228] Iteration 11900, loss = 0.0208479\nI0821 15:40:30.665449 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:40:30.665464 32551 solver.cpp:244]     Train net output #1: loss = 0.0208482 (* 1 = 0.0208482 loss)\nI0821 15:40:30.744367 32551 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0821 15:42:46.683521 32551 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0821 15:44:07.192397 32551 solver.cpp:404]     Test net output #0: accuracy = 0.52464\nI0821 15:44:07.192625 32551 solver.cpp:404]     Test net output #1: loss = 3.01971 (* 1 = 3.01971 loss)\nI0821 15:44:08.496150 32551 solver.cpp:228] Iteration 12000, loss = 0.0598242\nI0821 15:44:08.496196 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:44:08.496212 32551 solver.cpp:244]     Train net output #1: loss = 0.0598244 (* 1 = 0.0598244 loss)\nI0821 15:44:08.579524 32551 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0821 15:46:24.200672 32551 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0821 15:47:44.708735 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6116\nI0821 15:47:44.708983 32551 solver.cpp:404]     Test net output #1: loss = 2.06771 (* 1 = 2.06771 loss)\nI0821 15:47:46.012636 32551 solver.cpp:228] Iteration 12100, loss = 0.0427325\nI0821 15:47:46.012682 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:47:46.012699 32551 solver.cpp:244]     Train net output #1: loss = 0.0427327 (* 1 = 0.0427327 loss)\nI0821 15:47:46.092224 32551 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0821 15:50:01.732619 32551 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0821 15:51:22.254233 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59752\nI0821 15:51:22.254487 32551 solver.cpp:404]     Test net output #1: loss = 1.93217 (* 1 = 1.93217 loss)\nI0821 15:51:23.558641 32551 solver.cpp:228] Iteration 12200, loss = 0.040213\nI0821 15:51:23.558686 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:51:23.558703 32551 solver.cpp:244]     Train net output #1: loss = 0.0402133 (* 1 = 0.0402133 loss)\nI0821 15:51:23.640285 32551 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0821 15:53:39.334435 32551 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0821 15:54:59.865433 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57468\nI0821 15:54:59.865629 32551 solver.cpp:404]     Test net output #1: loss = 2.28267 (* 1 = 2.28267 loss)\nI0821 15:55:01.169140 32551 solver.cpp:228] Iteration 12300, loss = 0.0531576\nI0821 15:55:01.169185 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:55:01.169203 32551 solver.cpp:244]     Train net output #1: loss = 0.0531579 (* 1 = 0.0531579 loss)\nI0821 15:55:01.252807 32551 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0821 15:57:16.781652 32551 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0821 15:58:37.308226 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5398\nI0821 15:58:37.308462 32551 solver.cpp:404]     Test net output #1: loss = 3.05529 (* 1 = 3.05529 loss)\nI0821 15:58:38.612604 32551 solver.cpp:228] Iteration 12400, loss = 0.0587996\nI0821 15:58:38.612650 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:58:38.612666 32551 solver.cpp:244]     Train net output #1: loss = 0.0587998 (* 1 = 0.0587998 loss)\nI0821 15:58:38.695593 32551 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0821 16:00:54.090080 32551 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0821 16:02:14.584601 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6188\nI0821 16:02:14.584848 32551 solver.cpp:404]     Test net output #1: loss = 1.85492 (* 1 = 1.85492 loss)\nI0821 16:02:15.888214 32551 solver.cpp:228] Iteration 12500, loss = 0.0179742\nI0821 16:02:15.888259 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:02:15.888275 32551 solver.cpp:244]     Train net output #1: loss = 0.0179744 (* 1 = 0.0179744 loss)\nI0821 16:02:15.968866 32551 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0821 16:04:31.767839 32551 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0821 16:05:52.276151 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57324\nI0821 16:05:52.276397 32551 solver.cpp:404]     Test net output #1: loss = 2.19286 (* 1 = 2.19286 loss)\nI0821 16:05:53.580085 32551 solver.cpp:228] Iteration 12600, loss = 0.00815017\nI0821 16:05:53.580127 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:05:53.580143 32551 solver.cpp:244]     Train net output #1: loss = 0.00815036 (* 1 = 0.00815036 loss)\nI0821 16:05:53.659363 32551 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0821 16:08:09.501971 32551 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0821 16:09:30.013891 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5154\nI0821 16:09:30.014113 32551 solver.cpp:404]     Test net output #1: loss = 3.13926 (* 1 = 3.13926 loss)\nI0821 16:09:31.316845 32551 solver.cpp:228] Iteration 12700, loss = 0.0114627\nI0821 16:09:31.316890 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:09:31.316905 32551 solver.cpp:244]     Train net output #1: loss = 0.0114629 (* 1 = 0.0114629 loss)\nI0821 16:09:31.402035 32551 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0821 16:11:46.849874 32551 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0821 16:13:07.352921 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58304\nI0821 16:13:07.353178 32551 solver.cpp:404]     Test net output #1: loss = 2.56763 (* 1 = 2.56763 loss)\nI0821 16:13:08.657156 32551 solver.cpp:228] Iteration 12800, loss = 0.0997779\nI0821 16:13:08.657199 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 16:13:08.657215 32551 solver.cpp:244]     Train net output #1: loss = 0.0997781 (* 1 = 0.0997781 loss)\nI0821 16:13:08.743537 32551 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0821 16:15:24.292193 32551 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0821 16:16:44.665518 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57872\nI0821 16:16:44.665745 32551 solver.cpp:404]     Test net output #1: loss = 2.32536 (* 1 = 2.32536 loss)\nI0821 16:16:45.969269 32551 solver.cpp:228] Iteration 12900, loss = 0.0119954\nI0821 16:16:45.969313 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:16:45.969329 32551 solver.cpp:244]     Train net output #1: loss = 0.0119956 (* 1 = 0.0119956 loss)\nI0821 16:16:46.054033 32551 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0821 16:19:01.576304 32551 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0821 16:20:21.975534 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62748\nI0821 16:20:21.975785 32551 solver.cpp:404]     Test net output #1: loss = 1.85501 (* 1 = 1.85501 loss)\nI0821 16:20:23.278959 32551 solver.cpp:228] Iteration 13000, loss = 0.0527758\nI0821 16:20:23.279002 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:20:23.279019 32551 solver.cpp:244]     Train net output #1: loss = 0.052776 (* 1 = 0.052776 loss)\nI0821 16:20:23.361567 32551 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0821 16:22:39.290964 32551 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0821 16:23:59.706313 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55864\nI0821 16:23:59.706562 32551 solver.cpp:404]     Test net output #1: loss = 2.50642 (* 1 = 2.50642 loss)\nI0821 16:24:01.010191 32551 solver.cpp:228] Iteration 13100, loss = 0.0423003\nI0821 16:24:01.010234 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:24:01.010251 32551 solver.cpp:244]     Train net output #1: loss = 0.0423005 (* 1 = 0.0423005 loss)\nI0821 16:24:01.092033 32551 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0821 16:26:16.762683 32551 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0821 16:27:37.158100 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6284\nI0821 16:27:37.158309 32551 solver.cpp:404]     Test net output #1: loss = 1.81634 (* 1 = 1.81634 loss)\nI0821 16:27:38.462133 32551 solver.cpp:228] Iteration 13200, loss = 0.0810104\nI0821 16:27:38.462177 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:27:38.462193 32551 solver.cpp:244]     Train net output #1: loss = 0.0810106 (* 1 = 0.0810106 loss)\nI0821 16:27:38.546689 32551 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0821 16:29:54.011185 32551 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0821 16:31:14.454352 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54524\nI0821 16:31:14.454607 32551 solver.cpp:404]     Test net output #1: loss = 2.72918 (* 1 = 2.72918 loss)\nI0821 16:31:15.758649 32551 solver.cpp:228] Iteration 13300, loss = 0.0561363\nI0821 16:31:15.758695 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:31:15.758711 32551 solver.cpp:244]     Train net output #1: loss = 0.0561366 (* 1 = 0.0561366 loss)\nI0821 16:31:15.841866 32551 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0821 16:33:31.517535 32551 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0821 16:34:53.151990 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59108\nI0821 16:34:53.152305 32551 solver.cpp:404]     Test net output #1: loss = 2.31565 (* 1 = 2.31565 loss)\nI0821 16:34:54.460327 32551 solver.cpp:228] Iteration 13400, loss = 0.0347516\nI0821 16:34:54.460386 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:34:54.460404 32551 solver.cpp:244]     Train net output #1: loss = 0.0347519 (* 1 = 0.0347519 loss)\nI0821 16:34:54.539816 32551 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0821 16:37:10.340520 32551 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0821 16:38:31.973312 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58684\nI0821 16:38:31.973620 32551 solver.cpp:404]     Test net output #1: loss = 2.19829 (* 1 = 2.19829 loss)\nI0821 16:38:33.282707 32551 solver.cpp:228] Iteration 13500, loss = 0.00837006\nI0821 16:38:33.282766 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:38:33.282785 32551 solver.cpp:244]     Train net output #1: loss = 0.00837029 (* 1 = 0.00837029 loss)\nI0821 16:38:33.360342 32551 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0821 16:40:49.258314 32551 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0821 16:42:10.826405 32551 solver.cpp:404]     Test net output #0: accuracy = 0.555\nI0821 16:42:10.826617 32551 solver.cpp:404]     Test net output #1: loss = 2.39629 (* 1 = 2.39629 loss)\nI0821 16:42:12.131331 32551 solver.cpp:228] Iteration 13600, loss = 0.0638758\nI0821 16:42:12.131376 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:42:12.131393 32551 solver.cpp:244]     Train net output #1: loss = 0.063876 (* 1 = 0.063876 loss)\nI0821 16:42:12.217473 32551 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0821 16:44:27.713886 32551 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0821 16:45:48.118770 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5828\nI0821 16:45:48.119015 32551 solver.cpp:404]     Test net output #1: loss = 2.51058 (* 1 = 2.51058 loss)\nI0821 16:45:49.423548 32551 solver.cpp:228] Iteration 13700, loss = 0.0513207\nI0821 16:45:49.423593 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:45:49.423609 32551 solver.cpp:244]     Train net output #1: loss = 0.0513209 (* 1 = 0.0513209 loss)\nI0821 16:45:49.500007 32551 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0821 16:48:04.954460 32551 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0821 16:49:25.475741 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57184\nI0821 16:49:25.475950 32551 solver.cpp:404]     Test net output #1: loss = 2.31251 (* 1 = 2.31251 loss)\nI0821 16:49:26.779608 32551 solver.cpp:228] Iteration 13800, loss = 0.0764411\nI0821 16:49:26.779654 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:49:26.779671 32551 solver.cpp:244]     Train net output #1: loss = 0.0764413 (* 1 = 0.0764413 loss)\nI0821 16:49:26.861034 32551 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0821 16:51:42.695202 32551 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0821 16:53:03.221257 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61856\nI0821 16:53:03.221501 32551 solver.cpp:404]     Test net output #1: loss = 2.00474 (* 1 = 2.00474 loss)\nI0821 16:53:04.525542 32551 solver.cpp:228] Iteration 13900, loss = 0.071745\nI0821 16:53:04.525590 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 16:53:04.525616 32551 solver.cpp:244]     Train net output #1: loss = 0.0717453 (* 1 = 0.0717453 loss)\nI0821 16:53:04.608461 32551 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0821 16:55:20.182920 32551 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0821 16:56:40.707139 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5122\nI0821 16:56:40.707370 32551 solver.cpp:404]     Test net output #1: loss = 3.18936 (* 1 = 3.18936 loss)\nI0821 16:56:42.010522 32551 solver.cpp:228] Iteration 14000, loss = 0.0636257\nI0821 16:56:42.010572 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 16:56:42.010596 32551 solver.cpp:244]     Train net output #1: loss = 0.0636259 (* 1 = 0.0636259 loss)\nI0821 16:56:42.090417 32551 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0821 16:58:57.921582 32551 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0821 17:00:18.443382 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57316\nI0821 17:00:18.443639 32551 solver.cpp:404]     Test net output #1: loss = 2.76553 (* 1 = 2.76553 loss)\nI0821 17:00:19.748615 32551 solver.cpp:228] Iteration 14100, loss = 0.0463775\nI0821 17:00:19.748662 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:00:19.748687 32551 solver.cpp:244]     Train net output #1: loss = 0.0463777 (* 1 = 0.0463777 loss)\nI0821 17:00:19.825708 32551 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0821 17:02:35.337889 32551 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0821 17:03:55.858927 32551 solver.cpp:404]     Test net output #0: accuracy = 0.64616\nI0821 17:03:55.859169 32551 solver.cpp:404]     Test net output #1: loss = 1.71097 (* 1 = 1.71097 loss)\nI0821 17:03:57.164002 32551 solver.cpp:228] Iteration 14200, loss = 0.0373894\nI0821 17:03:57.164050 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:03:57.164075 32551 solver.cpp:244]     Train net output #1: loss = 0.0373896 (* 1 = 0.0373896 loss)\nI0821 17:03:57.242705 32551 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0821 17:06:12.778375 32551 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0821 17:07:33.301049 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59892\nI0821 17:07:33.301283 32551 solver.cpp:404]     Test net output #1: loss = 2.23756 (* 1 = 2.23756 loss)\nI0821 17:07:34.606508 32551 solver.cpp:228] Iteration 14300, loss = 0.0428043\nI0821 17:07:34.606556 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:07:34.606580 32551 solver.cpp:244]     Train net output #1: loss = 0.0428045 (* 1 = 0.0428045 loss)\nI0821 17:07:34.689378 32551 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0821 17:09:50.352700 32551 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0821 17:11:10.855180 32551 solver.cpp:404]     Test net output #0: accuracy = 0.64316\nI0821 17:11:10.855409 32551 solver.cpp:404]     Test net output #1: loss = 2.06368 (* 1 = 2.06368 loss)\nI0821 17:11:12.158982 32551 solver.cpp:228] Iteration 14400, loss = 0.0118225\nI0821 17:11:12.159030 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:11:12.159055 32551 solver.cpp:244]     Train net output #1: loss = 0.0118228 (* 1 = 0.0118228 loss)\nI0821 17:11:12.245229 32551 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0821 17:13:27.889219 32551 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0821 17:14:48.381988 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5926\nI0821 17:14:48.382254 32551 solver.cpp:404]     Test net output #1: loss = 2.60877 (* 1 = 2.60877 loss)\nI0821 17:14:49.685906 32551 solver.cpp:228] Iteration 14500, loss = 0.0445321\nI0821 17:14:49.685953 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:14:49.685977 32551 solver.cpp:244]     Train net output #1: loss = 0.0445323 (* 1 = 0.0445323 loss)\nI0821 17:14:49.767619 32551 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0821 17:17:05.498983 32551 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0821 17:18:26.022394 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59784\nI0821 17:18:26.022660 32551 solver.cpp:404]     Test net output #1: loss = 2.4204 (* 1 = 2.4204 loss)\nI0821 17:18:27.326159 32551 solver.cpp:228] Iteration 14600, loss = 0.0177647\nI0821 17:18:27.326205 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:18:27.326228 32551 solver.cpp:244]     Train net output #1: loss = 0.0177649 (* 1 = 0.0177649 loss)\nI0821 17:18:27.402590 32551 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0821 17:20:43.002743 32551 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0821 17:22:03.514129 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55252\nI0821 17:22:03.514369 32551 solver.cpp:404]     Test net output #1: loss = 3.04875 (* 1 = 3.04875 loss)\nI0821 17:22:04.817824 32551 solver.cpp:228] Iteration 14700, loss = 0.0741214\nI0821 17:22:04.817867 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 17:22:04.817893 32551 solver.cpp:244]     Train net output #1: loss = 0.0741216 (* 1 = 0.0741216 loss)\nI0821 17:22:04.900152 32551 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0821 17:24:20.428427 32551 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0821 17:25:40.942898 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55248\nI0821 17:25:40.943145 32551 solver.cpp:404]     Test net output #1: loss = 2.90035 (* 1 = 2.90035 loss)\nI0821 17:25:42.246911 32551 solver.cpp:228] Iteration 14800, loss = 0.0507653\nI0821 17:25:42.246953 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:25:42.246979 32551 solver.cpp:244]     Train net output #1: loss = 0.0507655 (* 1 = 0.0507655 loss)\nI0821 17:25:42.336761 32551 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0821 17:27:57.796759 32551 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0821 17:29:18.350539 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62776\nI0821 17:29:18.350817 32551 solver.cpp:404]     Test net output #1: loss = 2.053 (* 1 = 2.053 loss)\nI0821 17:29:19.654713 32551 solver.cpp:228] Iteration 14900, loss = 0.020108\nI0821 17:29:19.654754 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:29:19.654772 32551 solver.cpp:244]     Train net output #1: loss = 0.0201082 (* 1 = 0.0201082 loss)\nI0821 17:29:19.735749 32551 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0821 17:31:35.399291 32551 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0821 17:32:55.939143 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61024\nI0821 17:32:55.939419 32551 solver.cpp:404]     Test net output #1: loss = 2.43384 (* 1 = 2.43384 loss)\nI0821 17:32:57.242483 32551 solver.cpp:228] Iteration 15000, loss = 0.00570885\nI0821 17:32:57.242527 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:32:57.242542 32551 solver.cpp:244]     Train net output #1: loss = 0.00570907 (* 1 = 0.00570907 loss)\nI0821 17:32:57.325196 32551 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0821 17:35:13.010301 32551 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0821 17:36:33.538326 32551 solver.cpp:404]     Test net output #0: accuracy = 0.46804\nI0821 17:36:33.538589 32551 solver.cpp:404]     Test net output #1: loss = 4.78869 (* 1 = 4.78869 loss)\nI0821 17:36:34.842238 32551 solver.cpp:228] Iteration 15100, loss = 0.0264276\nI0821 17:36:34.842279 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:36:34.842295 32551 solver.cpp:244]     Train net output #1: loss = 0.0264278 (* 1 = 0.0264278 loss)\nI0821 17:36:34.919330 32551 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0821 17:38:50.272452 32551 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0821 17:40:10.809394 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57976\nI0821 17:40:10.809661 32551 solver.cpp:404]     Test net output #1: loss = 2.87861 (* 1 = 2.87861 loss)\nI0821 17:40:12.113678 32551 solver.cpp:228] Iteration 15200, loss = 0.0277432\nI0821 17:40:12.113725 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:40:12.113741 32551 solver.cpp:244]     Train net output #1: loss = 0.0277435 (* 1 = 0.0277435 loss)\nI0821 17:40:12.199391 32551 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0821 17:42:27.783565 32551 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0821 17:43:48.301329 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57232\nI0821 17:43:48.301605 32551 solver.cpp:404]     Test net output #1: loss = 2.755 (* 1 = 2.755 loss)\nI0821 17:43:49.605341 32551 solver.cpp:228] Iteration 15300, loss = 0.0561743\nI0821 17:43:49.605386 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:43:49.605410 32551 solver.cpp:244]     Train net output #1: loss = 0.0561746 (* 1 = 0.0561746 loss)\nI0821 17:43:49.684324 32551 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0821 17:46:05.413063 32551 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0821 17:47:25.927217 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5886\nI0821 17:47:25.927495 32551 solver.cpp:404]     Test net output #1: loss = 2.68943 (* 1 = 2.68943 loss)\nI0821 17:47:27.231603 32551 solver.cpp:228] Iteration 15400, loss = 0.0149605\nI0821 17:47:27.231652 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:47:27.231675 32551 solver.cpp:244]     Train net output #1: loss = 0.0149608 (* 1 = 0.0149608 loss)\nI0821 17:47:27.318421 32551 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0821 17:49:43.034793 32551 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0821 17:51:03.567287 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51568\nI0821 17:51:03.567569 32551 solver.cpp:404]     Test net output #1: loss = 3.52881 (* 1 = 3.52881 loss)\nI0821 17:51:04.871006 32551 solver.cpp:228] Iteration 15500, loss = 0.00336235\nI0821 17:51:04.871054 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:51:04.871078 32551 solver.cpp:244]     Train net output #1: loss = 0.0033626 (* 1 = 0.0033626 loss)\nI0821 17:51:04.952836 32551 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0821 17:53:20.533056 32551 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0821 17:54:41.062633 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5908\nI0821 17:54:41.062904 32551 solver.cpp:404]     Test net output #1: loss = 2.59645 (* 1 = 2.59645 loss)\nI0821 17:54:42.367799 32551 solver.cpp:228] Iteration 15600, loss = 0.0290269\nI0821 17:54:42.367848 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:54:42.367872 32551 solver.cpp:244]     Train net output #1: loss = 0.0290272 (* 1 = 0.0290272 loss)\nI0821 17:54:42.450107 32551 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0821 17:56:58.173019 32551 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0821 17:58:18.708765 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62012\nI0821 17:58:18.709046 32551 solver.cpp:404]     Test net output #1: loss = 2.30479 (* 1 = 2.30479 loss)\nI0821 17:58:20.012611 32551 solver.cpp:228] Iteration 15700, loss = 0.0563617\nI0821 17:58:20.012660 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 17:58:20.012686 32551 solver.cpp:244]     Train net output #1: loss = 0.0563619 (* 1 = 0.0563619 loss)\nI0821 17:58:20.092027 32551 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0821 18:00:35.633491 32551 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0821 18:01:56.170634 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62224\nI0821 18:01:56.170899 32551 solver.cpp:404]     Test net output #1: loss = 2.70517 (* 1 = 2.70517 loss)\nI0821 18:01:57.474534 32551 solver.cpp:228] Iteration 15800, loss = 0.0104342\nI0821 18:01:57.474581 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:01:57.474606 32551 solver.cpp:244]     Train net output #1: loss = 0.0104345 (* 1 = 0.0104345 loss)\nI0821 18:01:57.549623 32551 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0821 18:04:13.223506 32551 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0821 18:05:33.756624 32551 solver.cpp:404]     Test net output #0: accuracy = 0.47236\nI0821 18:05:33.756886 32551 solver.cpp:404]     Test net output #1: loss = 4.19782 (* 1 = 4.19782 loss)\nI0821 18:05:35.060547 32551 solver.cpp:228] Iteration 15900, loss = 0.0259989\nI0821 18:05:35.060595 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:05:35.060621 32551 solver.cpp:244]     Train net output #1: loss = 0.0259991 (* 1 = 0.0259991 loss)\nI0821 18:05:35.139083 32551 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0821 18:07:50.833619 32551 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0821 18:09:11.359421 32551 solver.cpp:404]     Test net output #0: accuracy = 0.553\nI0821 18:09:11.359701 32551 solver.cpp:404]     Test net output #1: loss = 2.4405 (* 1 = 2.4405 loss)\nI0821 18:09:12.663375 32551 solver.cpp:228] Iteration 16000, loss = 0.0740043\nI0821 18:09:12.663422 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:09:12.663446 32551 solver.cpp:244]     Train net output #1: loss = 0.0740045 (* 1 = 0.0740045 loss)\nI0821 18:09:12.747236 32551 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0821 18:11:28.313256 32551 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0821 18:12:49.444233 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6392\nI0821 18:12:49.444540 32551 solver.cpp:404]     Test net output #1: loss = 1.73619 (* 1 = 1.73619 loss)\nI0821 18:12:50.753298 32551 solver.cpp:228] Iteration 16100, loss = 0.0405425\nI0821 18:12:50.753358 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:12:50.753376 32551 solver.cpp:244]     Train net output #1: loss = 0.0405427 (* 1 = 0.0405427 loss)\nI0821 18:12:50.834350 32551 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0821 18:15:06.566067 32551 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0821 18:16:28.106699 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6198\nI0821 18:16:28.106974 32551 solver.cpp:404]     Test net output #1: loss = 2.01216 (* 1 = 2.01216 loss)\nI0821 18:16:29.416568 32551 solver.cpp:228] Iteration 16200, loss = 0.0423537\nI0821 18:16:29.416630 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:16:29.416647 32551 solver.cpp:244]     Train net output #1: loss = 0.0423539 (* 1 = 0.0423539 loss)\nI0821 18:16:29.489616 32551 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0821 18:18:45.145097 32551 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0821 18:20:06.600843 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58392\nI0821 18:20:06.601138 32551 solver.cpp:404]     Test net output #1: loss = 2.43336 (* 1 = 2.43336 loss)\nI0821 18:20:07.910429 32551 solver.cpp:228] Iteration 16300, loss = 0.0161681\nI0821 18:20:07.910488 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:20:07.910506 32551 solver.cpp:244]     Train net output #1: loss = 0.0161683 (* 1 = 0.0161683 loss)\nI0821 18:20:07.989629 32551 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0821 18:22:23.679770 32551 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0821 18:23:45.269763 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58096\nI0821 18:23:45.270068 32551 solver.cpp:404]     Test net output #1: loss = 2.22603 (* 1 = 2.22603 loss)\nI0821 18:23:46.579468 32551 solver.cpp:228] Iteration 16400, loss = 0.060889\nI0821 18:23:46.579530 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 18:23:46.579546 32551 solver.cpp:244]     Train net output #1: loss = 0.0608892 (* 1 = 0.0608892 loss)\nI0821 18:23:46.660586 32551 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0821 18:26:02.327443 32551 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0821 18:27:23.957204 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51324\nI0821 18:27:23.957525 32551 solver.cpp:404]     Test net output #1: loss = 3.00432 (* 1 = 3.00432 loss)\nI0821 18:27:25.267199 32551 solver.cpp:228] Iteration 16500, loss = 0.0435396\nI0821 18:27:25.267261 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:27:25.267277 32551 solver.cpp:244]     Train net output #1: loss = 0.0435397 (* 1 = 0.0435397 loss)\nI0821 18:27:25.340893 32551 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0821 18:29:40.984346 32551 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0821 18:31:02.598490 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59224\nI0821 18:31:02.598804 32551 solver.cpp:404]     Test net output #1: loss = 2.27329 (* 1 = 2.27329 loss)\nI0821 18:31:03.908129 32551 solver.cpp:228] Iteration 16600, loss = 0.0810549\nI0821 18:31:03.908191 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:31:03.908210 32551 solver.cpp:244]     Train net output #1: loss = 0.0810551 (* 1 = 0.0810551 loss)\nI0821 18:31:03.989320 32551 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0821 18:33:19.860067 32551 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0821 18:34:41.462631 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62752\nI0821 18:34:41.462940 32551 solver.cpp:404]     Test net output #1: loss = 2.00485 (* 1 = 2.00485 loss)\nI0821 18:34:42.771219 32551 solver.cpp:228] Iteration 16700, loss = 0.0548801\nI0821 18:34:42.771281 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:34:42.771299 32551 solver.cpp:244]     Train net output #1: loss = 0.0548803 (* 1 = 0.0548803 loss)\nI0821 18:34:42.845654 32551 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0821 18:36:58.907904 32551 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0821 18:38:20.526470 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6042\nI0821 18:38:20.526782 32551 solver.cpp:404]     Test net output #1: loss = 2.2632 (* 1 = 2.2632 loss)\nI0821 18:38:21.835187 32551 solver.cpp:228] Iteration 16800, loss = 0.0047636\nI0821 18:38:21.835247 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:38:21.835263 32551 solver.cpp:244]     Train net output #1: loss = 0.00476382 (* 1 = 0.00476382 loss)\nI0821 18:38:21.911278 32551 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0821 18:40:38.089025 32551 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0821 18:41:59.700904 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5654\nI0821 18:41:59.701208 32551 solver.cpp:404]     Test net output #1: loss = 2.27995 (* 1 = 2.27995 loss)\nI0821 18:42:01.009588 32551 solver.cpp:228] Iteration 16900, loss = 0.0528805\nI0821 18:42:01.009647 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:42:01.009665 32551 solver.cpp:244]     Train net output #1: loss = 0.0528808 (* 1 = 0.0528808 loss)\nI0821 18:42:01.087945 32551 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0821 18:44:17.382818 32551 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0821 18:45:38.971873 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63856\nI0821 18:45:38.972183 32551 solver.cpp:404]     Test net output #1: loss = 1.64535 (* 1 = 1.64535 loss)\nI0821 18:45:40.280104 32551 solver.cpp:228] Iteration 17000, loss = 0.00939546\nI0821 18:45:40.280164 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:45:40.280182 32551 solver.cpp:244]     Train net output #1: loss = 0.00939568 (* 1 = 0.00939568 loss)\nI0821 18:45:40.356288 32551 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0821 18:47:56.518303 32551 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0821 18:49:18.130820 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54896\nI0821 18:49:18.131129 32551 solver.cpp:404]     Test net output #1: loss = 2.71172 (* 1 = 2.71172 loss)\nI0821 18:49:19.440497 32551 solver.cpp:228] Iteration 17100, loss = 0.065295\nI0821 18:49:19.440557 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:49:19.440573 32551 solver.cpp:244]     Train net output #1: loss = 0.0652952 (* 1 = 0.0652952 loss)\nI0821 18:49:19.520722 32551 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0821 18:51:35.638731 32551 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0821 18:52:56.774394 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61264\nI0821 18:52:56.774655 32551 solver.cpp:404]     Test net output #1: loss = 2.23023 (* 1 = 2.23023 loss)\nI0821 18:52:58.083245 32551 solver.cpp:228] Iteration 17200, loss = 0.0409899\nI0821 18:52:58.083304 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:52:58.083322 32551 solver.cpp:244]     Train net output #1: loss = 0.0409901 (* 1 = 0.0409901 loss)\nI0821 18:52:58.159440 32551 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0821 18:55:14.354332 32551 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0821 18:56:35.934422 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61216\nI0821 18:56:35.934648 32551 solver.cpp:404]     Test net output #1: loss = 2.5237 (* 1 = 2.5237 loss)\nI0821 18:56:37.243114 32551 solver.cpp:228] Iteration 17300, loss = 0.0493073\nI0821 18:56:37.243175 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:56:37.243192 32551 solver.cpp:244]     Train net output #1: loss = 0.0493075 (* 1 = 0.0493075 loss)\nI0821 18:56:37.324383 32551 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0821 18:58:53.592716 32551 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0821 19:00:15.173745 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58428\nI0821 19:00:15.173982 32551 solver.cpp:404]     Test net output #1: loss = 2.52339 (* 1 = 2.52339 loss)\nI0821 19:00:16.482530 32551 solver.cpp:228] Iteration 17400, loss = 0.0647927\nI0821 19:00:16.482595 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:00:16.482614 32551 solver.cpp:244]     Train net output #1: loss = 0.0647929 (* 1 = 0.0647929 loss)\nI0821 19:00:16.561667 32551 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0821 19:02:32.767421 32551 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0821 19:03:54.365456 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63972\nI0821 19:03:54.365691 32551 solver.cpp:404]     Test net output #1: loss = 1.94386 (* 1 = 1.94386 loss)\nI0821 19:03:55.674834 32551 solver.cpp:228] Iteration 17500, loss = 0.0218948\nI0821 19:03:55.674901 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:03:55.674918 32551 solver.cpp:244]     Train net output #1: loss = 0.021895 (* 1 = 0.021895 loss)\nI0821 19:03:55.746953 32551 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0821 19:06:11.796393 32551 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0821 19:07:33.376822 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60412\nI0821 19:07:33.377063 32551 solver.cpp:404]     Test net output #1: loss = 2.30406 (* 1 = 2.30406 loss)\nI0821 19:07:34.685843 32551 solver.cpp:228] Iteration 17600, loss = 0.0680212\nI0821 19:07:34.685909 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:07:34.685926 32551 solver.cpp:244]     Train net output #1: loss = 0.0680214 (* 1 = 0.0680214 loss)\nI0821 19:07:34.762667 32551 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0821 19:09:50.809173 32551 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0821 19:11:12.389487 32551 solver.cpp:404]     Test net output #0: accuracy = 0.591\nI0821 19:11:12.389758 32551 solver.cpp:404]     Test net output #1: loss = 2.46833 (* 1 = 2.46833 loss)\nI0821 19:11:13.698587 32551 solver.cpp:228] Iteration 17700, loss = 0.0499346\nI0821 19:11:13.698647 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:11:13.698667 32551 solver.cpp:244]     Train net output #1: loss = 0.0499348 (* 1 = 0.0499348 loss)\nI0821 19:11:13.778949 32551 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0821 19:13:29.992480 32551 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0821 19:14:51.577414 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60716\nI0821 19:14:51.577637 32551 solver.cpp:404]     Test net output #1: loss = 2.30117 (* 1 = 2.30117 loss)\nI0821 19:14:52.886308 32551 solver.cpp:228] Iteration 17800, loss = 0.0285546\nI0821 19:14:52.886366 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:14:52.886384 32551 solver.cpp:244]     Train net output #1: loss = 0.0285548 (* 1 = 0.0285548 loss)\nI0821 19:14:52.959512 32551 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0821 19:17:09.169769 32551 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0821 19:18:30.747640 32551 solver.cpp:404]     Test net output #0: accuracy = 0.53356\nI0821 19:18:30.747879 32551 solver.cpp:404]     Test net output #1: loss = 2.62847 (* 1 = 2.62847 loss)\nI0821 19:18:32.056309 32551 solver.cpp:228] Iteration 17900, loss = 0.0714682\nI0821 19:18:32.056371 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:18:32.056388 32551 solver.cpp:244]     Train net output #1: loss = 0.0714684 (* 1 = 0.0714684 loss)\nI0821 19:18:32.134428 32551 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0821 19:20:48.301578 32551 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0821 19:22:09.849546 32551 solver.cpp:404]     Test net output #0: accuracy = 0.56028\nI0821 19:22:09.849812 32551 solver.cpp:404]     Test net output #1: loss = 2.60177 (* 1 = 2.60177 loss)\nI0821 19:22:11.158627 32551 solver.cpp:228] Iteration 18000, loss = 0.0195054\nI0821 19:22:11.158687 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:22:11.158704 32551 solver.cpp:244]     Train net output #1: loss = 0.0195056 (* 1 = 0.0195056 loss)\nI0821 19:22:11.233897 32551 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0821 19:24:27.441931 32551 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0821 19:25:49.000867 32551 solver.cpp:404]     Test net output #0: accuracy = 0.43832\nI0821 19:25:49.001124 32551 solver.cpp:404]     Test net output #1: loss = 4.62722 (* 1 = 4.62722 loss)\nI0821 19:25:50.310431 32551 solver.cpp:228] Iteration 18100, loss = 0.059098\nI0821 19:25:50.310494 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 19:25:50.310513 32551 solver.cpp:244]     Train net output #1: loss = 0.0590983 (* 1 = 0.0590983 loss)\nI0821 19:25:50.390864 32551 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0821 19:28:06.674155 32551 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0821 19:29:28.261283 32551 solver.cpp:404]     Test net output #0: accuracy = 0.48704\nI0821 19:29:28.261534 32551 solver.cpp:404]     Test net output #1: loss = 3.73029 (* 1 = 3.73029 loss)\nI0821 19:29:29.570960 32551 solver.cpp:228] Iteration 18200, loss = 0.0409191\nI0821 19:29:29.571020 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:29:29.571038 32551 solver.cpp:244]     Train net output #1: loss = 0.0409193 (* 1 = 0.0409193 loss)\nI0821 19:29:29.650141 32551 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0821 19:31:45.826009 32551 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0821 19:33:07.384194 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61824\nI0821 19:33:07.384474 32551 solver.cpp:404]     Test net output #1: loss = 2.33671 (* 1 = 2.33671 loss)\nI0821 19:33:08.694095 32551 solver.cpp:228] Iteration 18300, loss = 0.0677894\nI0821 19:33:08.694156 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:33:08.694173 32551 solver.cpp:244]     Train net output #1: loss = 0.0677896 (* 1 = 0.0677896 loss)\nI0821 19:33:08.768322 32551 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0821 19:35:24.979051 32551 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0821 19:36:46.563674 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63788\nI0821 19:36:46.563904 32551 solver.cpp:404]     Test net output #1: loss = 1.9933 (* 1 = 1.9933 loss)\nI0821 19:36:47.873385 32551 solver.cpp:228] Iteration 18400, loss = 0.0203937\nI0821 19:36:47.873445 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:36:47.873463 32551 solver.cpp:244]     Train net output #1: loss = 0.020394 (* 1 = 0.020394 loss)\nI0821 19:36:47.953167 32551 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0821 19:39:04.124550 32551 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0821 19:40:25.694831 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61636\nI0821 19:40:25.695106 32551 solver.cpp:404]     Test net output #1: loss = 2.4511 (* 1 = 2.4511 loss)\nI0821 19:40:27.004067 32551 solver.cpp:228] Iteration 18500, loss = 0.0574083\nI0821 19:40:27.004128 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:40:27.004145 32551 solver.cpp:244]     Train net output #1: loss = 0.0574085 (* 1 = 0.0574085 loss)\nI0821 19:40:27.079107 32551 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0821 19:42:43.309739 32551 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0821 19:44:04.801089 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6598\nI0821 19:44:04.801312 32551 solver.cpp:404]     Test net output #1: loss = 1.67039 (* 1 = 1.67039 loss)\nI0821 19:44:06.110307 32551 solver.cpp:228] Iteration 18600, loss = 0.0152254\nI0821 19:44:06.110365 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:44:06.110383 32551 solver.cpp:244]     Train net output #1: loss = 0.0152257 (* 1 = 0.0152257 loss)\nI0821 19:44:06.184105 32551 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0821 19:46:22.342471 32551 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0821 19:47:43.645689 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63596\nI0821 19:47:43.645917 32551 solver.cpp:404]     Test net output #1: loss = 1.84331 (* 1 = 1.84331 loss)\nI0821 19:47:44.954373 32551 solver.cpp:228] Iteration 18700, loss = 0.03573\nI0821 19:47:44.954432 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:47:44.954449 32551 solver.cpp:244]     Train net output #1: loss = 0.0357303 (* 1 = 0.0357303 loss)\nI0821 19:47:45.030957 32551 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0821 19:50:01.243330 32551 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0821 19:51:22.626977 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60668\nI0821 19:51:22.627235 32551 solver.cpp:404]     Test net output #1: loss = 2.15097 (* 1 = 2.15097 loss)\nI0821 19:51:23.936662 32551 solver.cpp:228] Iteration 18800, loss = 0.0326332\nI0821 19:51:23.936720 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:51:23.936738 32551 solver.cpp:244]     Train net output #1: loss = 0.0326334 (* 1 = 0.0326334 loss)\nI0821 19:51:24.008713 32551 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0821 19:53:40.217876 32551 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0821 19:55:01.709205 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63416\nI0821 19:55:01.709451 32551 solver.cpp:404]     Test net output #1: loss = 2.08528 (* 1 = 2.08528 loss)\nI0821 19:55:03.018270 32551 solver.cpp:228] Iteration 18900, loss = 0.0425291\nI0821 19:55:03.018327 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:55:03.018344 32551 solver.cpp:244]     Train net output #1: loss = 0.0425293 (* 1 = 0.0425293 loss)\nI0821 19:55:03.094815 32551 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0821 19:57:19.020041 32551 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0821 19:58:40.553133 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60312\nI0821 19:58:40.553390 32551 solver.cpp:404]     Test net output #1: loss = 2.42198 (* 1 = 2.42198 loss)\nI0821 19:58:41.862382 32551 solver.cpp:228] Iteration 19000, loss = 0.0192022\nI0821 19:58:41.862442 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:58:41.862459 32551 solver.cpp:244]     Train net output #1: loss = 0.0192024 (* 1 = 0.0192024 loss)\nI0821 19:58:41.939759 32551 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0821 20:00:58.052094 32551 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0821 20:02:19.608800 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54776\nI0821 20:02:19.609107 32551 solver.cpp:404]     Test net output #1: loss = 2.85697 (* 1 = 2.85697 loss)\nI0821 20:02:20.918551 32551 solver.cpp:228] Iteration 19100, loss = 0.0445383\nI0821 20:02:20.918612 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:02:20.918629 32551 solver.cpp:244]     Train net output #1: loss = 0.0445386 (* 1 = 0.0445386 loss)\nI0821 20:02:20.993844 32551 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0821 20:04:37.148200 32551 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0821 20:05:58.694933 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60408\nI0821 20:05:58.695157 32551 solver.cpp:404]     Test net output #1: loss = 2.41018 (* 1 = 2.41018 loss)\nI0821 20:06:00.004148 32551 solver.cpp:228] Iteration 19200, loss = 0.05149\nI0821 20:06:00.004205 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 20:06:00.004225 32551 solver.cpp:244]     Train net output #1: loss = 0.0514903 (* 1 = 0.0514903 loss)\nI0821 20:06:00.082790 32551 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0821 20:08:16.266306 32551 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0821 20:09:37.829618 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51784\nI0821 20:09:37.829874 32551 solver.cpp:404]     Test net output #1: loss = 3.85649 (* 1 = 3.85649 loss)\nI0821 20:09:39.137307 32551 solver.cpp:228] Iteration 19300, loss = 0.00494295\nI0821 20:09:39.137367 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:09:39.137385 32551 solver.cpp:244]     Train net output #1: loss = 0.00494325 (* 1 = 0.00494325 loss)\nI0821 20:09:39.213755 32551 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0821 20:11:55.330515 32551 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0821 20:13:16.919158 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5994\nI0821 20:13:16.919448 32551 solver.cpp:404]     Test net output #1: loss = 2.61537 (* 1 = 2.61537 loss)\nI0821 20:13:18.227334 32551 solver.cpp:228] Iteration 19400, loss = 0.00798729\nI0821 20:13:18.227394 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:13:18.227411 32551 solver.cpp:244]     Train net output #1: loss = 0.00798758 (* 1 = 0.00798758 loss)\nI0821 20:13:18.305173 32551 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0821 20:15:34.433977 32551 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0821 20:16:55.910334 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62964\nI0821 20:16:55.910547 32551 solver.cpp:404]     Test net output #1: loss = 2.71807 (* 1 = 2.71807 loss)\nI0821 20:16:57.219609 32551 solver.cpp:228] Iteration 19500, loss = 0.00949996\nI0821 20:16:57.219669 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:16:57.219687 32551 solver.cpp:244]     Train net output #1: loss = 0.00950025 (* 1 = 0.00950025 loss)\nI0821 20:16:57.298593 32551 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0821 20:19:13.425990 32551 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0821 20:20:34.900758 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50008\nI0821 20:20:34.901052 32551 solver.cpp:404]     Test net output #1: loss = 3.34534 (* 1 = 3.34534 loss)\nI0821 20:20:36.209669 32551 solver.cpp:228] Iteration 19600, loss = 0.01734\nI0821 20:20:36.209729 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:20:36.209748 32551 solver.cpp:244]     Train net output #1: loss = 0.0173403 (* 1 = 0.0173403 loss)\nI0821 20:20:36.285471 32551 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0821 20:22:52.493657 32551 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0821 20:24:14.113289 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61028\nI0821 20:24:14.113596 32551 solver.cpp:404]     Test net output #1: loss = 2.53927 (* 1 = 2.53927 loss)\nI0821 20:24:15.422750 32551 solver.cpp:228] Iteration 19700, loss = 0.0573559\nI0821 20:24:15.422807 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 20:24:15.422827 32551 solver.cpp:244]     Train net output #1: loss = 0.0573562 (* 1 = 0.0573562 loss)\nI0821 20:24:15.497668 32551 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0821 20:26:31.702388 32551 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0821 20:27:53.295567 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60176\nI0821 20:27:53.295898 32551 solver.cpp:404]     Test net output #1: loss = 2.52986 (* 1 = 2.52986 loss)\nI0821 20:27:54.603720 32551 solver.cpp:228] Iteration 19800, loss = 0.0178249\nI0821 20:27:54.603776 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:27:54.603801 32551 solver.cpp:244]     Train net output #1: loss = 0.0178252 (* 1 = 0.0178252 loss)\nI0821 20:27:54.683408 32551 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0821 20:30:10.760339 32551 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0821 20:31:32.352936 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63584\nI0821 20:31:32.353238 32551 solver.cpp:404]     Test net output #1: loss = 1.84029 (* 1 = 1.84029 loss)\nI0821 20:31:33.662475 32551 solver.cpp:228] Iteration 19900, loss = 0.0244363\nI0821 20:31:33.662531 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:31:33.662549 32551 solver.cpp:244]     Train net output #1: loss = 0.0244366 (* 1 = 0.0244366 loss)\nI0821 20:31:33.742058 32551 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0821 20:33:49.693110 32551 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0821 20:35:11.323657 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63752\nI0821 20:35:11.323962 32551 solver.cpp:404]     Test net output #1: loss = 1.87984 (* 1 = 1.87984 loss)\nI0821 20:35:12.632961 32551 solver.cpp:228] Iteration 20000, loss = 0.0221363\nI0821 20:35:12.633013 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:35:12.633029 32551 solver.cpp:244]     Train net output #1: loss = 0.0221366 (* 1 = 0.0221366 loss)\nI0821 20:35:12.707026 32551 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0821 20:37:28.681582 32551 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0821 20:38:50.306097 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55696\nI0821 20:38:50.306427 32551 solver.cpp:404]     Test net output #1: loss = 2.45199 (* 1 = 2.45199 loss)\nI0821 20:38:51.614936 32551 solver.cpp:228] Iteration 20100, loss = 0.00432836\nI0821 20:38:51.614990 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:38:51.615006 32551 solver.cpp:244]     Train net output #1: loss = 0.00432864 (* 1 = 0.00432864 loss)\nI0821 20:38:51.697540 32551 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0821 20:41:07.707139 32551 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0821 20:42:29.311908 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60292\nI0821 20:42:29.312222 32551 solver.cpp:404]     Test net output #1: loss = 2.29721 (* 1 = 2.29721 loss)\nI0821 20:42:30.620578 32551 solver.cpp:228] Iteration 20200, loss = 0.0225807\nI0821 20:42:30.620638 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:42:30.620657 32551 solver.cpp:244]     Train net output #1: loss = 0.022581 (* 1 = 0.022581 loss)\nI0821 20:42:30.699915 32551 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0821 20:44:46.825192 32551 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0821 20:46:08.423280 32551 solver.cpp:404]     Test net output #0: accuracy = 0.64664\nI0821 20:46:08.423585 32551 solver.cpp:404]     Test net output #1: loss = 2.03092 (* 1 = 2.03092 loss)\nI0821 20:46:09.731662 32551 solver.cpp:228] Iteration 20300, loss = 0.0473731\nI0821 20:46:09.731721 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:46:09.731739 32551 solver.cpp:244]     Train net output #1: loss = 0.0473734 (* 1 = 0.0473734 loss)\nI0821 20:46:09.814455 32551 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0821 20:48:26.127413 32551 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0821 20:49:47.731613 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6466\nI0821 20:49:47.731942 32551 solver.cpp:404]     Test net output #1: loss = 2.02977 (* 1 = 2.02977 loss)\nI0821 20:49:49.039894 32551 solver.cpp:228] Iteration 20400, loss = 0.053721\nI0821 20:49:49.039955 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 20:49:49.039973 32551 solver.cpp:244]     Train net output #1: loss = 0.0537212 (* 1 = 0.0537212 loss)\nI0821 20:49:49.120045 32551 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0821 20:52:05.319859 32551 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0821 20:53:26.928515 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58252\nI0821 20:53:26.928937 32551 solver.cpp:404]     Test net output #1: loss = 2.37565 (* 1 = 2.37565 loss)\nI0821 20:53:28.236443 32551 solver.cpp:228] Iteration 20500, loss = 0.0189178\nI0821 20:53:28.236502 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:53:28.236521 32551 solver.cpp:244]     Train net output #1: loss = 0.0189181 (* 1 = 0.0189181 loss)\nI0821 20:53:28.325018 32551 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0821 20:55:44.903362 32551 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0821 20:57:06.480337 32551 solver.cpp:404]     Test net output #0: accuracy = 0.66452\nI0821 20:57:06.480669 32551 solver.cpp:404]     Test net output #1: loss = 1.64387 (* 1 = 1.64387 loss)\nI0821 20:57:07.789031 32551 solver.cpp:228] Iteration 20600, loss = 0.0241791\nI0821 20:57:07.789093 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:57:07.789110 32551 solver.cpp:244]     Train net output #1: loss = 0.0241794 (* 1 = 0.0241794 loss)\nI0821 20:57:07.872658 32551 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0821 20:59:24.075027 32551 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0821 21:00:45.641930 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62908\nI0821 21:00:45.642177 32551 solver.cpp:404]     Test net output #1: loss = 2.13992 (* 1 = 2.13992 loss)\nI0821 21:00:46.950414 32551 solver.cpp:228] Iteration 20700, loss = 0.0371071\nI0821 21:00:46.950474 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:00:46.950490 32551 solver.cpp:244]     Train net output #1: loss = 0.0371074 (* 1 = 0.0371074 loss)\nI0821 21:00:47.031605 32551 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0821 21:03:03.233728 32551 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0821 21:04:24.813943 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63664\nI0821 21:04:24.814189 32551 solver.cpp:404]     Test net output #1: loss = 1.89837 (* 1 = 1.89837 loss)\nI0821 21:04:26.123126 32551 solver.cpp:228] Iteration 20800, loss = 0.117777\nI0821 21:04:26.123184 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 21:04:26.123203 32551 solver.cpp:244]     Train net output #1: loss = 0.117777 (* 1 = 0.117777 loss)\nI0821 21:04:26.205201 32551 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0821 21:06:42.336872 32551 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0821 21:08:03.886188 32551 solver.cpp:404]     Test net output #0: accuracy = 0.621\nI0821 21:08:03.886421 32551 solver.cpp:404]     Test net output #1: loss = 2.35547 (* 1 = 2.35547 loss)\nI0821 21:08:05.196112 32551 solver.cpp:228] Iteration 20900, loss = 0.0302998\nI0821 21:08:05.196177 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:08:05.196202 32551 solver.cpp:244]     Train net output #1: loss = 0.0303001 (* 1 = 0.0303001 loss)\nI0821 21:08:05.270269 32551 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0821 21:10:21.589735 32551 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0821 21:11:43.162732 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61956\nI0821 21:11:43.162971 32551 solver.cpp:404]     Test net output #1: loss = 2.1677 (* 1 = 2.1677 loss)\nI0821 21:11:44.471470 32551 solver.cpp:228] Iteration 21000, loss = 0.0348975\nI0821 21:11:44.471529 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:11:44.471547 32551 solver.cpp:244]     Train net output #1: loss = 0.0348978 (* 1 = 0.0348978 loss)\nI0821 21:11:44.547163 32551 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0821 21:14:00.519306 32551 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0821 21:15:22.083124 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55196\nI0821 21:15:22.083376 32551 solver.cpp:404]     Test net output #1: loss = 2.80282 (* 1 = 2.80282 loss)\nI0821 21:15:23.392729 32551 solver.cpp:228] Iteration 21100, loss = 0.0160335\nI0821 21:15:23.392791 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:15:23.392808 32551 solver.cpp:244]     Train net output #1: loss = 0.0160338 (* 1 = 0.0160338 loss)\nI0821 21:15:23.466358 32551 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0821 21:17:39.459257 32551 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0821 21:19:01.023507 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57868\nI0821 21:19:01.023738 32551 solver.cpp:404]     Test net output #1: loss = 2.09094 (* 1 = 2.09094 loss)\nI0821 21:19:02.333282 32551 solver.cpp:228] Iteration 21200, loss = 0.00593704\nI0821 21:19:02.333343 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:19:02.333359 32551 solver.cpp:244]     Train net output #1: loss = 0.00593733 (* 1 = 0.00593733 loss)\nI0821 21:19:02.413832 32551 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0821 21:21:18.614706 32551 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0821 21:22:39.936033 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59276\nI0821 21:22:39.936291 32551 solver.cpp:404]     Test net output #1: loss = 1.68676 (* 1 = 1.68676 loss)\nI0821 21:22:41.245860 32551 solver.cpp:228] Iteration 21300, loss = 0.011839\nI0821 21:22:41.245925 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:22:41.245944 32551 solver.cpp:244]     Train net output #1: loss = 0.0118393 (* 1 = 0.0118393 loss)\nI0821 21:22:41.328671 32551 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0821 21:24:57.707960 32551 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0821 21:26:18.692209 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51648\nI0821 21:26:18.692422 32551 solver.cpp:404]     Test net output #1: loss = 2.22862 (* 1 = 2.22862 loss)\nI0821 21:26:20.001672 32551 solver.cpp:228] Iteration 21400, loss = 0.00428587\nI0821 21:26:20.001732 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:26:20.001750 32551 solver.cpp:244]     Train net output #1: loss = 0.00428615 (* 1 = 0.00428615 loss)\nI0821 21:26:20.084769 32551 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0821 21:28:36.202396 32551 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0821 21:29:57.155310 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51344\nI0821 21:29:57.155536 32551 solver.cpp:404]     Test net output #1: loss = 2.72433 (* 1 = 2.72433 loss)\nI0821 21:29:58.464316 32551 solver.cpp:228] Iteration 21500, loss = 0.133859\nI0821 21:29:58.464377 32551 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 21:29:58.464395 32551 solver.cpp:244]     Train net output #1: loss = 0.133859 (* 1 = 0.133859 loss)\nI0821 21:29:58.542827 32551 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0821 21:32:14.761147 32551 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0821 21:33:36.251392 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58024\nI0821 21:33:36.251688 32551 solver.cpp:404]     Test net output #1: loss = 2.61654 (* 1 = 2.61654 loss)\nI0821 21:33:37.570216 32551 solver.cpp:228] Iteration 21600, loss = 0.10243\nI0821 21:33:37.570281 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 21:33:37.570299 32551 solver.cpp:244]     Train net output #1: loss = 0.102431 (* 1 = 0.102431 loss)\nI0821 21:33:37.636878 32551 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0821 21:35:53.760242 32551 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0821 21:37:15.346977 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61892\nI0821 21:37:15.347235 32551 solver.cpp:404]     Test net output #1: loss = 2.34102 (* 1 = 2.34102 loss)\nI0821 21:37:16.656589 32551 solver.cpp:228] Iteration 21700, loss = 0.0223855\nI0821 21:37:16.656648 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:37:16.656666 32551 solver.cpp:244]     Train net output #1: loss = 0.0223858 (* 1 = 0.0223858 loss)\nI0821 21:37:16.736595 32551 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0821 21:39:32.726254 32551 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0821 21:40:53.493633 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63876\nI0821 21:40:53.493937 32551 solver.cpp:404]     Test net output #1: loss = 1.88141 (* 1 = 1.88141 loss)\nI0821 21:40:54.798107 32551 solver.cpp:228] Iteration 21800, loss = 0.000548779\nI0821 21:40:54.798153 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:40:54.798169 32551 solver.cpp:244]     Train net output #1: loss = 0.000549042 (* 1 = 0.000549042 loss)\nI0821 21:40:54.880863 32551 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0821 21:43:10.754277 32551 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0821 21:44:31.298513 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57356\nI0821 21:44:31.298820 32551 solver.cpp:404]     Test net output #1: loss = 2.0567 (* 1 = 2.0567 loss)\nI0821 21:44:32.603866 32551 solver.cpp:228] Iteration 21900, loss = 0.000571421\nI0821 21:44:32.603912 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:44:32.603929 32551 solver.cpp:244]     Train net output #1: loss = 0.000571687 (* 1 = 0.000571687 loss)\nI0821 21:44:32.689322 32551 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0821 21:46:48.732046 32551 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0821 21:48:09.325405 32551 solver.cpp:404]     Test net output #0: accuracy = 0.44672\nI0821 21:48:09.325685 32551 solver.cpp:404]     Test net output #1: loss = 2.48517 (* 1 = 2.48517 loss)\nI0821 21:48:10.630291 32551 solver.cpp:228] Iteration 22000, loss = 0.0001841\nI0821 21:48:10.630337 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:48:10.630353 32551 solver.cpp:244]     Train net output #1: loss = 0.000184365 (* 1 = 0.000184365 loss)\nI0821 21:48:10.712393 32551 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0821 21:50:26.896600 32551 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0821 21:51:47.484100 32551 solver.cpp:404]     Test net output #0: accuracy = 0.3236\nI0821 21:51:47.484390 32551 solver.cpp:404]     Test net output #1: loss = 2.94777 (* 1 = 2.94777 loss)\nI0821 21:51:48.790025 32551 solver.cpp:228] Iteration 22100, loss = 0.000228936\nI0821 21:51:48.790074 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:51:48.790098 32551 solver.cpp:244]     Train net output #1: loss = 0.000229202 (* 1 = 0.000229202 loss)\nI0821 21:51:48.877454 32551 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0821 21:54:05.001899 32551 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0821 21:55:25.582284 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2484\nI0821 21:55:25.582628 32551 solver.cpp:404]     Test net output #1: loss = 3.22548 (* 1 = 3.22548 loss)\nI0821 21:55:26.886662 32551 solver.cpp:228] Iteration 22200, loss = 0.000268759\nI0821 21:55:26.886708 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:55:26.886723 32551 solver.cpp:244]     Train net output #1: loss = 0.000269025 (* 1 = 0.000269025 loss)\nI0821 21:55:26.976255 32551 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0821 21:57:42.864449 32551 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0821 21:59:03.385084 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2144\nI0821 21:59:03.385373 32551 solver.cpp:404]     Test net output #1: loss = 3.36374 (* 1 = 3.36374 loss)\nI0821 21:59:04.689285 32551 solver.cpp:228] Iteration 22300, loss = 0.000255866\nI0821 21:59:04.689332 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:59:04.689347 32551 solver.cpp:244]     Train net output #1: loss = 0.000256132 (* 1 = 0.000256132 loss)\nI0821 21:59:04.773155 32551 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0821 22:01:20.750988 32551 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0821 22:02:41.259996 32551 solver.cpp:404]     Test net output #0: accuracy = 0.20432\nI0821 22:02:41.260284 32551 solver.cpp:404]     Test net output #1: loss = 3.36546 (* 1 = 3.36546 loss)\nI0821 22:02:42.564316 32551 solver.cpp:228] Iteration 22400, loss = 0.000317379\nI0821 22:02:42.564362 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:02:42.564378 32551 solver.cpp:244]     Train net output #1: loss = 0.000317645 (* 1 = 0.000317645 loss)\nI0821 22:02:42.651068 32551 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0821 22:04:58.431421 32551 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0821 22:06:18.913815 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1964\nI0821 22:06:18.914088 32551 solver.cpp:404]     Test net output #1: loss = 3.34902 (* 1 = 3.34902 loss)\nI0821 22:06:20.217147 32551 solver.cpp:228] Iteration 22500, loss = 0.000423325\nI0821 22:06:20.217190 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:06:20.217205 32551 solver.cpp:244]     Train net output #1: loss = 0.000423591 (* 1 = 0.000423591 loss)\nI0821 22:06:20.301441 32551 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0821 22:08:35.987414 32551 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0821 22:09:56.494993 32551 solver.cpp:404]     Test net output #0: accuracy = 0.18928\nI0821 22:09:56.495280 32551 solver.cpp:404]     Test net output #1: loss = 3.34857 (* 1 = 3.34857 loss)\nI0821 22:09:57.799403 32551 solver.cpp:228] Iteration 22600, loss = 0.000405206\nI0821 22:09:57.799445 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:09:57.799461 32551 solver.cpp:244]     Train net output #1: loss = 0.000405472 (* 1 = 0.000405472 loss)\nI0821 22:09:57.883249 32551 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0821 22:12:13.506163 32551 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0821 22:13:34.005235 32551 solver.cpp:404]     Test net output #0: accuracy = 0.17196\nI0821 22:13:34.005501 32551 solver.cpp:404]     Test net output #1: loss = 3.34895 (* 1 = 3.34895 loss)\nI0821 22:13:35.310091 32551 solver.cpp:228] Iteration 22700, loss = 0.000360279\nI0821 22:13:35.310135 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:13:35.310151 32551 solver.cpp:244]     Train net output #1: loss = 0.000360545 (* 1 = 0.000360545 loss)\nI0821 22:13:35.397655 32551 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0821 22:15:51.138978 32551 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0821 22:17:11.641583 32551 solver.cpp:404]     Test net output #0: accuracy = 0.14888\nI0821 22:17:11.641870 32551 solver.cpp:404]     Test net output #1: loss = 3.35174 (* 1 = 3.35174 loss)\nI0821 22:17:12.945932 32551 solver.cpp:228] Iteration 22800, loss = 0.000484127\nI0821 22:17:12.945981 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:17:12.945996 32551 solver.cpp:244]     Train net output #1: loss = 0.000484393 (* 1 = 0.000484393 loss)\nI0821 22:17:13.024523 32551 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0821 22:19:28.727041 32551 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0821 22:20:49.234474 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11828\nI0821 22:20:49.234767 32551 solver.cpp:404]     Test net output #1: loss = 3.33441 (* 1 = 3.33441 loss)\nI0821 22:20:50.538938 32551 solver.cpp:228] Iteration 22900, loss = 0.000370491\nI0821 22:20:50.538982 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:20:50.539000 32551 solver.cpp:244]     Train net output #1: loss = 0.000370757 (* 1 = 0.000370757 loss)\nI0821 22:20:50.619971 32551 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0821 22:23:06.574370 32551 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0821 22:24:27.059929 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10192\nI0821 22:24:27.060215 32551 solver.cpp:404]     Test net output #1: loss = 3.28083 (* 1 = 3.28083 loss)\nI0821 22:24:28.364997 32551 solver.cpp:228] Iteration 23000, loss = 0.000352089\nI0821 22:24:28.365041 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:24:28.365056 32551 solver.cpp:244]     Train net output #1: loss = 0.000352355 (* 1 = 0.000352355 loss)\nI0821 22:24:28.452064 32551 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0821 22:26:44.054718 32551 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0821 22:28:04.563530 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09568\nI0821 22:28:04.563820 32551 solver.cpp:404]     Test net output #1: loss = 3.19162 (* 1 = 3.19162 loss)\nI0821 22:28:05.867555 32551 solver.cpp:228] Iteration 23100, loss = 0.000332202\nI0821 22:28:05.867599 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:28:05.867615 32551 solver.cpp:244]     Train net output #1: loss = 0.000332468 (* 1 = 0.000332468 loss)\nI0821 22:28:05.948205 32551 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0821 22:30:21.878986 32551 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0821 22:31:42.361868 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09928\nI0821 22:31:42.362088 32551 solver.cpp:404]     Test net output #1: loss = 3.11494 (* 1 = 3.11494 loss)\nI0821 22:31:43.666455 32551 solver.cpp:228] Iteration 23200, loss = 0.000390208\nI0821 22:31:43.666499 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:31:43.666517 32551 solver.cpp:244]     Train net output #1: loss = 0.000390474 (* 1 = 0.000390474 loss)\nI0821 22:31:43.753036 32551 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0821 22:33:59.462059 32551 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0821 22:35:19.944583 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09716\nI0821 22:35:19.945003 32551 solver.cpp:404]     Test net output #1: loss = 3.02795 (* 1 = 3.02795 loss)\nI0821 22:35:21.249215 32551 solver.cpp:228] Iteration 23300, loss = 0.000386026\nI0821 22:35:21.249259 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:35:21.249274 32551 solver.cpp:244]     Train net output #1: loss = 0.000386292 (* 1 = 0.000386292 loss)\nI0821 22:35:21.335469 32551 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0821 22:37:37.030357 32551 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0821 22:38:57.502270 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09796\nI0821 22:38:57.502521 32551 solver.cpp:404]     Test net output #1: loss = 2.93933 (* 1 = 2.93933 loss)\nI0821 22:38:58.806298 32551 solver.cpp:228] Iteration 23400, loss = 0.000319874\nI0821 22:38:58.806341 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:38:58.806357 32551 solver.cpp:244]     Train net output #1: loss = 0.00032014 (* 1 = 0.00032014 loss)\nI0821 22:38:58.895220 32551 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0821 22:41:14.535874 32551 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0821 22:42:35.040411 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09604\nI0821 22:42:35.040685 32551 solver.cpp:404]     Test net output #1: loss = 2.84821 (* 1 = 2.84821 loss)\nI0821 22:42:36.344566 32551 solver.cpp:228] Iteration 23500, loss = 0.000320896\nI0821 22:42:36.344609 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:42:36.344624 32551 solver.cpp:244]     Train net output #1: loss = 0.000321162 (* 1 = 0.000321162 loss)\nI0821 22:42:36.423401 32551 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0821 22:44:52.186144 32551 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0821 22:46:12.693795 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09752\nI0821 22:46:12.694043 32551 solver.cpp:404]     Test net output #1: loss = 2.77474 (* 1 = 2.77474 loss)\nI0821 22:46:13.997731 32551 solver.cpp:228] Iteration 23600, loss = 0.000352094\nI0821 22:46:13.997778 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:46:13.997795 32551 solver.cpp:244]     Train net output #1: loss = 0.00035236 (* 1 = 0.00035236 loss)\nI0821 22:46:14.081266 32551 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0821 22:48:30.079846 32551 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0821 22:49:50.576143 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0952\nI0821 22:49:50.576411 32551 solver.cpp:404]     Test net output #1: loss = 2.70127 (* 1 = 2.70127 loss)\nI0821 22:49:51.880436 32551 solver.cpp:228] Iteration 23700, loss = 0.000303177\nI0821 22:49:51.880483 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:49:51.880499 32551 solver.cpp:244]     Train net output #1: loss = 0.000303443 (* 1 = 0.000303443 loss)\nI0821 22:49:51.967139 32551 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0821 22:52:07.975497 32551 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0821 22:53:28.475469 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09672\nI0821 22:53:28.475715 32551 solver.cpp:404]     Test net output #1: loss = 2.64169 (* 1 = 2.64169 loss)\nI0821 22:53:29.779170 32551 solver.cpp:228] Iteration 23800, loss = 0.000274277\nI0821 22:53:29.779216 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:53:29.779232 32551 solver.cpp:244]     Train net output #1: loss = 0.000274543 (* 1 = 0.000274543 loss)\nI0821 22:53:29.860455 32551 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0821 22:55:45.767980 32551 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0821 22:57:06.253589 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09632\nI0821 22:57:06.253859 32551 solver.cpp:404]     Test net output #1: loss = 2.59412 (* 1 = 2.59412 loss)\nI0821 22:57:07.557123 32551 solver.cpp:228] Iteration 23900, loss = 0.000306929\nI0821 22:57:07.557170 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:57:07.557188 32551 solver.cpp:244]     Train net output #1: loss = 0.000307195 (* 1 = 0.000307195 loss)\nI0821 22:57:07.638054 32551 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0821 22:59:23.616678 32551 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0821 23:00:44.089223 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09968\nI0821 23:00:44.089491 32551 solver.cpp:404]     Test net output #1: loss = 2.55532 (* 1 = 2.55532 loss)\nI0821 23:00:45.392834 32551 solver.cpp:228] Iteration 24000, loss = 0.000282637\nI0821 23:00:45.392881 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:00:45.392897 32551 solver.cpp:244]     Train net output #1: loss = 0.000282903 (* 1 = 0.000282903 loss)\nI0821 23:00:45.478667 32551 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0821 23:03:01.611603 32551 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0821 23:04:22.092967 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09792\nI0821 23:04:22.093209 32551 solver.cpp:404]     Test net output #1: loss = 2.52636 (* 1 = 2.52636 loss)\nI0821 23:04:23.397677 32551 solver.cpp:228] Iteration 24100, loss = 0.000262202\nI0821 23:04:23.397730 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:04:23.397754 32551 solver.cpp:244]     Train net output #1: loss = 0.000262468 (* 1 = 0.000262468 loss)\nI0821 23:04:23.477633 32551 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0821 23:06:39.165791 32551 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0821 23:07:59.575942 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 23:07:59.576154 32551 solver.cpp:404]     Test net output #1: loss = 2.50406 (* 1 = 2.50406 loss)\nI0821 23:08:00.880578 32551 solver.cpp:228] Iteration 24200, loss = 0.000245235\nI0821 23:08:00.880626 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:08:00.880650 32551 solver.cpp:244]     Train net output #1: loss = 0.000245501 (* 1 = 0.000245501 loss)\nI0821 23:08:00.965932 32551 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0821 23:10:16.828688 32551 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0821 23:11:37.213414 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09688\nI0821 23:11:37.213670 32551 solver.cpp:404]     Test net output #1: loss = 2.48387 (* 1 = 2.48387 loss)\nI0821 23:11:38.518081 32551 solver.cpp:228] Iteration 24300, loss = 0.000277652\nI0821 23:11:38.518131 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:11:38.518154 32551 solver.cpp:244]     Train net output #1: loss = 0.000277918 (* 1 = 0.000277918 loss)\nI0821 23:11:38.598426 32551 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0821 23:13:54.452606 32551 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0821 23:15:14.888654 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09704\nI0821 23:15:14.888916 32551 solver.cpp:404]     Test net output #1: loss = 2.4677 (* 1 = 2.4677 loss)\nI0821 23:15:16.193326 32551 solver.cpp:228] Iteration 24400, loss = 0.000256583\nI0821 23:15:16.193377 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:15:16.193400 32551 solver.cpp:244]     Train net output #1: loss = 0.000256849 (* 1 = 0.000256849 loss)\nI0821 23:15:16.277545 32551 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0821 23:17:32.251395 32551 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0821 23:18:52.686735 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09624\nI0821 23:18:52.686991 32551 solver.cpp:404]     Test net output #1: loss = 2.45776 (* 1 = 2.45776 loss)\nI0821 23:18:53.991318 32551 solver.cpp:228] Iteration 24500, loss = 0.000288425\nI0821 23:18:53.991367 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:18:53.991392 32551 solver.cpp:244]     Train net output #1: loss = 0.000288691 (* 1 = 0.000288691 loss)\nI0821 23:18:54.074964 32551 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0821 23:21:10.047675 32551 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0821 23:22:30.498044 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09752\nI0821 23:22:30.498275 32551 solver.cpp:404]     Test net output #1: loss = 2.4452 (* 1 = 2.4452 loss)\nI0821 23:22:31.802971 32551 solver.cpp:228] Iteration 24600, loss = 0.000250558\nI0821 23:22:31.803022 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:22:31.803046 32551 solver.cpp:244]     Train net output #1: loss = 0.000250824 (* 1 = 0.000250824 loss)\nI0821 23:22:31.884306 32551 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0821 23:24:47.891677 32551 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0821 23:26:08.315129 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09656\nI0821 23:26:08.315383 32551 solver.cpp:404]     Test net output #1: loss = 2.43674 (* 1 = 2.43674 loss)\nI0821 23:26:09.619791 32551 solver.cpp:228] Iteration 24700, loss = 0.000260606\nI0821 23:26:09.619839 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:26:09.619863 32551 solver.cpp:244]     Train net output #1: loss = 0.000260872 (* 1 = 0.000260872 loss)\nI0821 23:26:09.707139 32551 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0821 23:28:25.742566 32551 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0821 23:29:46.168428 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0978\nI0821 23:29:46.168704 32551 solver.cpp:404]     Test net output #1: loss = 2.42773 (* 1 = 2.42773 loss)\nI0821 23:29:47.473564 32551 solver.cpp:228] Iteration 24800, loss = 0.000261467\nI0821 23:29:47.473613 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:29:47.473637 32551 solver.cpp:244]     Train net output #1: loss = 0.000261732 (* 1 = 0.000261732 loss)\nI0821 23:29:47.554862 32551 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0821 23:32:03.281934 32551 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0821 23:33:23.710322 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09736\nI0821 23:33:23.710578 32551 solver.cpp:404]     Test net output #1: loss = 2.42274 (* 1 = 2.42274 loss)\nI0821 23:33:25.015334 32551 solver.cpp:228] Iteration 24900, loss = 0.000276517\nI0821 23:33:25.015383 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:33:25.015406 32551 solver.cpp:244]     Train net output #1: loss = 0.000276783 (* 1 = 0.000276783 loss)\nI0821 23:33:25.098387 32551 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0821 23:35:41.283793 32551 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0821 23:37:01.690112 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0996\nI0821 23:37:01.690325 32551 solver.cpp:404]     Test net output #1: loss = 2.41722 (* 1 = 2.41722 loss)\nI0821 23:37:02.998102 32551 solver.cpp:228] Iteration 25000, loss = 0.000244833\nI0821 23:37:02.998150 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:37:02.998174 32551 solver.cpp:244]     Train net output #1: loss = 0.000245099 (* 1 = 0.000245099 loss)\nI0821 23:37:03.077299 32551 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0821 23:39:18.882688 32551 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0821 23:40:39.374999 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10596\nI0821 23:40:39.375231 32551 solver.cpp:404]     Test net output #1: loss = 2.41409 (* 1 = 2.41409 loss)\nI0821 23:40:40.682327 32551 solver.cpp:228] Iteration 25100, loss = 0.000252751\nI0821 23:40:40.682374 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:40:40.682389 32551 solver.cpp:244]     Train net output #1: loss = 0.000253017 (* 1 = 0.000253017 loss)\nI0821 23:40:40.764842 32551 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0821 23:42:56.696012 32551 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0821 23:44:17.203541 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11104\nI0821 23:44:17.203794 32551 solver.cpp:404]     Test net output #1: loss = 2.408 (* 1 = 2.408 loss)\nI0821 23:44:18.510447 32551 solver.cpp:228] Iteration 25200, loss = 0.000259859\nI0821 23:44:18.510491 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:44:18.510507 32551 solver.cpp:244]     Train net output #1: loss = 0.000260125 (* 1 = 0.000260125 loss)\nI0821 23:44:18.608969 32551 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0821 23:46:34.665555 32551 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0821 23:47:55.146139 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10264\nI0821 23:47:55.146361 32551 solver.cpp:404]     Test net output #1: loss = 2.4051 (* 1 = 2.4051 loss)\nI0821 23:47:56.454375 32551 solver.cpp:228] Iteration 25300, loss = 0.000244994\nI0821 23:47:56.454419 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:47:56.454437 32551 solver.cpp:244]     Train net output #1: loss = 0.00024526 (* 1 = 0.00024526 loss)\nI0821 23:47:56.538369 32551 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0821 23:50:12.735342 32551 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0821 23:51:33.297132 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10172\nI0821 23:51:33.297365 32551 solver.cpp:404]     Test net output #1: loss = 2.39884 (* 1 = 2.39884 loss)\nI0821 23:51:34.606515 32551 solver.cpp:228] Iteration 25400, loss = 0.000249397\nI0821 23:51:34.606564 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:51:34.606587 32551 solver.cpp:244]     Train net output #1: loss = 0.000249663 (* 1 = 0.000249663 loss)\nI0821 23:51:34.683416 32551 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0821 23:53:50.663447 32551 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0821 23:55:12.296917 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09968\nI0821 23:55:12.297232 32551 solver.cpp:404]     Test net output #1: loss = 2.39392 (* 1 = 2.39392 loss)\nI0821 23:55:13.608652 32551 solver.cpp:228] Iteration 25500, loss = 0.000250473\nI0821 23:55:13.608711 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:55:13.608736 32551 solver.cpp:244]     Train net output #1: loss = 0.000250739 (* 1 = 0.000250739 loss)\nI0821 23:55:13.691380 32551 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0821 23:57:29.663460 32551 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0821 23:58:51.290298 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0821 23:58:51.290619 32551 solver.cpp:404]     Test net output #1: loss = 2.38783 (* 1 = 2.38783 loss)\nI0821 23:58:52.600908 32551 solver.cpp:228] Iteration 25600, loss = 0.000238961\nI0821 23:58:52.600966 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:58:52.600991 32551 solver.cpp:244]     Train net output #1: loss = 0.000239227 (* 1 = 0.000239227 loss)\nI0821 23:58:52.676911 32551 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0822 00:01:08.566722 32551 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0822 00:02:30.164954 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09956\nI0822 00:02:30.165257 32551 solver.cpp:404]     Test net output #1: loss = 2.38369 (* 1 = 2.38369 loss)\nI0822 00:02:31.477043 32551 solver.cpp:228] Iteration 25700, loss = 0.0002515\nI0822 00:02:31.477103 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:02:31.477119 32551 solver.cpp:244]     Train net output #1: loss = 0.000251766 (* 1 = 0.000251766 loss)\nI0822 00:02:31.564141 32551 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0822 00:04:47.484201 32551 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0822 00:06:09.107301 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 00:06:09.107637 32551 solver.cpp:404]     Test net output #1: loss = 2.37872 (* 1 = 2.37872 loss)\nI0822 00:06:10.419126 32551 solver.cpp:228] Iteration 25800, loss = 0.000240037\nI0822 00:06:10.419185 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:06:10.419203 32551 solver.cpp:244]     Train net output #1: loss = 0.000240303 (* 1 = 0.000240303 loss)\nI0822 00:06:10.497943 32551 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0822 00:08:26.579548 32551 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0822 00:09:48.200824 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 00:09:48.201167 32551 solver.cpp:404]     Test net output #1: loss = 2.37556 (* 1 = 2.37556 loss)\nI0822 00:09:49.512466 32551 solver.cpp:228] Iteration 25900, loss = 0.000241007\nI0822 00:09:49.512526 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:09:49.512542 32551 solver.cpp:244]     Train net output #1: loss = 0.000241273 (* 1 = 0.000241273 loss)\nI0822 00:09:49.594457 32551 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0822 00:12:05.660518 32551 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0822 00:13:27.271363 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09996\nI0822 00:13:27.271673 32551 solver.cpp:404]     Test net output #1: loss = 2.37175 (* 1 = 2.37175 loss)\nI0822 00:13:28.583528 32551 solver.cpp:228] Iteration 26000, loss = 0.000227494\nI0822 00:13:28.583586 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:13:28.583602 32551 solver.cpp:244]     Train net output #1: loss = 0.00022776 (* 1 = 0.00022776 loss)\nI0822 00:13:28.660291 32551 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0822 00:15:44.730845 32551 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0822 00:17:06.347066 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10024\nI0822 00:17:06.347399 32551 solver.cpp:404]     Test net output #1: loss = 2.36897 (* 1 = 2.36897 loss)\nI0822 00:17:07.658607 32551 solver.cpp:228] Iteration 26100, loss = 0.000230298\nI0822 00:17:07.658665 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:17:07.658684 32551 solver.cpp:244]     Train net output #1: loss = 0.000230564 (* 1 = 0.000230564 loss)\nI0822 00:17:07.733896 32551 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0822 00:19:23.691154 32551 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0822 00:20:45.280092 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0822 00:20:45.280395 32551 solver.cpp:404]     Test net output #1: loss = 2.36628 (* 1 = 2.36628 loss)\nI0822 00:20:46.591529 32551 solver.cpp:228] Iteration 26200, loss = 0.000226331\nI0822 00:20:46.591586 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:20:46.591604 32551 solver.cpp:244]     Train net output #1: loss = 0.000226597 (* 1 = 0.000226597 loss)\nI0822 00:20:46.674190 32551 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0822 00:23:02.799520 32551 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0822 00:24:24.411200 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0822 00:24:24.411535 32551 solver.cpp:404]     Test net output #1: loss = 2.36467 (* 1 = 2.36467 loss)\nI0822 00:24:25.723435 32551 solver.cpp:228] Iteration 26300, loss = 0.000223778\nI0822 00:24:25.723496 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:24:25.723515 32551 solver.cpp:244]     Train net output #1: loss = 0.000224044 (* 1 = 0.000224044 loss)\nI0822 00:24:25.803323 32551 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0822 00:26:41.786310 32551 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0822 00:28:03.398532 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10024\nI0822 00:28:03.398878 32551 solver.cpp:404]     Test net output #1: loss = 2.36285 (* 1 = 2.36285 loss)\nI0822 00:28:04.709275 32551 solver.cpp:228] Iteration 26400, loss = 0.000224793\nI0822 00:28:04.709336 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:28:04.709353 32551 solver.cpp:244]     Train net output #1: loss = 0.000225059 (* 1 = 0.000225059 loss)\nI0822 00:28:04.784920 32551 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0822 00:30:20.768097 32551 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0822 00:31:42.382879 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10024\nI0822 00:31:42.383191 32551 solver.cpp:404]     Test net output #1: loss = 2.36304 (* 1 = 2.36304 loss)\nI0822 00:31:43.695437 32551 solver.cpp:228] Iteration 26500, loss = 0.000220367\nI0822 00:31:43.695497 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:31:43.695514 32551 solver.cpp:244]     Train net output #1: loss = 0.000220633 (* 1 = 0.000220633 loss)\nI0822 00:31:43.776203 32551 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0822 00:33:59.896159 32551 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0822 00:35:21.542836 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 00:35:21.543171 32551 solver.cpp:404]     Test net output #1: loss = 2.36151 (* 1 = 2.36151 loss)\nI0822 00:35:22.855553 32551 solver.cpp:228] Iteration 26600, loss = 0.000220116\nI0822 00:35:22.855614 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:35:22.855639 32551 solver.cpp:244]     Train net output #1: loss = 0.000220382 (* 1 = 0.000220382 loss)\nI0822 00:35:22.929383 32551 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0822 00:37:39.023794 32551 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0822 00:39:00.661291 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09976\nI0822 00:39:00.661609 32551 solver.cpp:404]     Test net output #1: loss = 2.36203 (* 1 = 2.36203 loss)\nI0822 00:39:01.972913 32551 solver.cpp:228] Iteration 26700, loss = 0.000218617\nI0822 00:39:01.972976 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:39:01.973002 32551 solver.cpp:244]     Train net output #1: loss = 0.000218883 (* 1 = 0.000218883 loss)\nI0822 00:39:02.051919 32551 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0822 00:41:18.054654 32551 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0822 00:42:39.427485 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09936\nI0822 00:42:39.427767 32551 solver.cpp:404]     Test net output #1: loss = 2.3621 (* 1 = 2.3621 loss)\nI0822 00:42:40.740043 32551 solver.cpp:228] Iteration 26800, loss = 0.00021605\nI0822 00:42:40.740105 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:42:40.740123 32551 solver.cpp:244]     Train net output #1: loss = 0.000216316 (* 1 = 0.000216316 loss)\nI0822 00:42:40.821905 32551 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0822 00:44:56.857475 32551 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0822 00:46:17.542295 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09932\nI0822 00:46:17.542582 32551 solver.cpp:404]     Test net output #1: loss = 2.36367 (* 1 = 2.36367 loss)\nI0822 00:46:18.854017 32551 solver.cpp:228] Iteration 26900, loss = 0.000212684\nI0822 00:46:18.854074 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:46:18.854090 32551 solver.cpp:244]     Train net output #1: loss = 0.000212949 (* 1 = 0.000212949 loss)\nI0822 00:46:18.941335 32551 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0822 00:48:34.878463 32551 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0822 00:49:55.580363 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0996\nI0822 00:49:55.580649 32551 solver.cpp:404]     Test net output #1: loss = 2.36647 (* 1 = 2.36647 loss)\nI0822 00:49:56.891141 32551 solver.cpp:228] Iteration 27000, loss = 0.000214591\nI0822 00:49:56.891199 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:49:56.891216 32551 solver.cpp:244]     Train net output #1: loss = 0.000214857 (* 1 = 0.000214857 loss)\nI0822 00:49:56.973605 32551 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0822 00:52:12.991466 32551 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0822 00:53:34.122570 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1004\nI0822 00:53:34.122920 32551 solver.cpp:404]     Test net output #1: loss = 2.37031 (* 1 = 2.37031 loss)\nI0822 00:53:35.434257 32551 solver.cpp:228] Iteration 27100, loss = 0.00021091\nI0822 00:53:35.434312 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:53:35.434329 32551 solver.cpp:244]     Train net output #1: loss = 0.000211176 (* 1 = 0.000211176 loss)\nI0822 00:53:35.508455 32551 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0822 00:55:51.501003 32551 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0822 00:57:13.067735 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10276\nI0822 00:57:13.068049 32551 solver.cpp:404]     Test net output #1: loss = 2.37387 (* 1 = 2.37387 loss)\nI0822 00:57:14.379355 32551 solver.cpp:228] Iteration 27200, loss = 0.000207358\nI0822 00:57:14.379418 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:57:14.379436 32551 solver.cpp:244]     Train net output #1: loss = 0.000207624 (* 1 = 0.000207624 loss)\nI0822 00:57:14.459767 32551 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0822 00:59:30.575070 32551 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0822 01:00:52.149128 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10324\nI0822 01:00:52.149452 32551 solver.cpp:404]     Test net output #1: loss = 2.37971 (* 1 = 2.37971 loss)\nI0822 01:00:53.461155 32551 solver.cpp:228] Iteration 27300, loss = 0.000208735\nI0822 01:00:53.461215 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:00:53.461231 32551 solver.cpp:244]     Train net output #1: loss = 0.000209 (* 1 = 0.000209 loss)\nI0822 01:00:53.539615 32551 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0822 01:03:09.636662 32551 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0822 01:04:31.213402 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10736\nI0822 01:04:31.213690 32551 solver.cpp:404]     Test net output #1: loss = 2.38339 (* 1 = 2.38339 loss)\nI0822 01:04:32.525106 32551 solver.cpp:228] Iteration 27400, loss = 0.000204041\nI0822 01:04:32.525163 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:04:32.525182 32551 solver.cpp:244]     Train net output #1: loss = 0.000204307 (* 1 = 0.000204307 loss)\nI0822 01:04:32.604080 32551 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0822 01:06:48.535825 32551 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0822 01:08:10.130585 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10748\nI0822 01:08:10.130908 32551 solver.cpp:404]     Test net output #1: loss = 2.39003 (* 1 = 2.39003 loss)\nI0822 01:08:11.442428 32551 solver.cpp:228] Iteration 27500, loss = 0.00020575\nI0822 01:08:11.442487 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:08:11.442503 32551 solver.cpp:244]     Train net output #1: loss = 0.000206016 (* 1 = 0.000206016 loss)\nI0822 01:08:11.519358 32551 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0822 01:10:27.530705 32551 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0822 01:11:49.154165 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10884\nI0822 01:11:49.154479 32551 solver.cpp:404]     Test net output #1: loss = 2.39549 (* 1 = 2.39549 loss)\nI0822 01:11:50.466691 32551 solver.cpp:228] Iteration 27600, loss = 0.000201797\nI0822 01:11:50.466751 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:11:50.466768 32551 solver.cpp:244]     Train net output #1: loss = 0.000202063 (* 1 = 0.000202063 loss)\nI0822 01:11:50.548271 32551 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0822 01:14:06.727262 32551 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0822 01:15:28.320423 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11024\nI0822 01:15:28.320736 32551 solver.cpp:404]     Test net output #1: loss = 2.40092 (* 1 = 2.40092 loss)\nI0822 01:15:29.632259 32551 solver.cpp:228] Iteration 27700, loss = 0.000201676\nI0822 01:15:29.632319 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:15:29.632336 32551 solver.cpp:244]     Train net output #1: loss = 0.000201941 (* 1 = 0.000201941 loss)\nI0822 01:15:29.704411 32551 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0822 01:17:45.907280 32551 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0822 01:19:07.521520 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10796\nI0822 01:19:07.521844 32551 solver.cpp:404]     Test net output #1: loss = 2.40388 (* 1 = 2.40388 loss)\nI0822 01:19:08.833988 32551 solver.cpp:228] Iteration 27800, loss = 0.000199888\nI0822 01:19:08.834048 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:19:08.834064 32551 solver.cpp:244]     Train net output #1: loss = 0.000200154 (* 1 = 0.000200154 loss)\nI0822 01:19:08.908381 32551 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0822 01:21:25.025848 32551 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0822 01:22:45.434784 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0822 01:22:45.435019 32551 solver.cpp:404]     Test net output #1: loss = 78.6483 (* 1 = 78.6483 loss)\nI0822 01:22:46.742842 32551 solver.cpp:228] Iteration 27900, loss = 1.94083\nI0822 01:22:46.742885 32551 solver.cpp:244]     Train net output #0: accuracy = 0.216\nI0822 01:22:46.742902 32551 solver.cpp:244]     Train net output #1: loss = 1.94083 (* 1 = 1.94083 loss)\nI0822 01:22:46.817431 32551 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0822 01:25:02.712821 32551 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0822 01:26:23.105830 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0822 01:26:23.106073 32551 solver.cpp:404]     Test net output #1: loss = 78.5575 (* 1 = 78.5575 loss)\nI0822 01:26:24.414125 32551 solver.cpp:228] Iteration 28000, loss = 1.76352\nI0822 01:26:24.414168 32551 solver.cpp:244]     Train net output #0: accuracy = 0.344\nI0822 01:26:24.414186 32551 solver.cpp:244]     Train net output #1: loss = 1.76352 (* 1 = 1.76352 loss)\nI0822 01:26:24.496593 32551 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0822 01:28:40.144428 32551 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0822 01:30:00.556031 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0822 01:30:00.556277 32551 solver.cpp:404]     Test net output #1: loss = 78.6483 (* 1 = 78.6483 loss)\nI0822 01:30:01.864145 32551 solver.cpp:228] Iteration 28100, loss = 1.59707\nI0822 01:30:01.864188 32551 solver.cpp:244]     Train net output #0: accuracy = 0.384\nI0822 01:30:01.864205 32551 solver.cpp:244]     Train net output #1: loss = 1.59707 (* 1 = 1.59707 loss)\nI0822 01:30:01.942306 32551 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0822 01:32:17.618410 32551 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0822 01:33:38.013027 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10588\nI0822 01:33:38.013275 32551 solver.cpp:404]     Test net output #1: loss = 77.0121 (* 1 = 77.0121 loss)\nI0822 01:33:39.320685 32551 solver.cpp:228] Iteration 28200, loss = 1.44489\nI0822 01:33:39.320730 32551 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI0822 01:33:39.320747 32551 solver.cpp:244]     Train net output #1: loss = 1.44489 (* 1 = 1.44489 loss)\nI0822 01:33:39.396231 32551 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0822 01:35:55.181032 32551 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0822 01:37:15.552114 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10324\nI0822 01:37:15.552348 32551 solver.cpp:404]     Test net output #1: loss = 77.9348 (* 1 = 77.9348 loss)\nI0822 01:37:16.859143 32551 solver.cpp:228] Iteration 28300, loss = 1.21149\nI0822 01:37:16.859185 32551 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI0822 01:37:16.859201 32551 solver.cpp:244]     Train net output #1: loss = 1.21149 (* 1 = 1.21149 loss)\nI0822 01:37:16.936872 32551 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0822 01:39:32.778306 32551 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0822 01:40:53.143790 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0822 01:40:53.144032 32551 solver.cpp:404]     Test net output #1: loss = 78.5575 (* 1 = 78.5575 loss)\nI0822 01:40:54.451553 32551 solver.cpp:228] Iteration 28400, loss = 0.984399\nI0822 01:40:54.451596 32551 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0822 01:40:54.451612 32551 solver.cpp:244]     Train net output #1: loss = 0.984399 (* 1 = 0.984399 loss)\nI0822 01:40:54.537199 32551 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0822 01:43:10.475548 32551 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0822 01:44:30.871188 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09948\nI0822 01:44:30.871443 32551 solver.cpp:404]     Test net output #1: loss = 78.6483 (* 1 = 78.6483 loss)\nI0822 01:44:32.178160 32551 solver.cpp:228] Iteration 28500, loss = 0.887682\nI0822 01:44:32.178205 32551 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0822 01:44:32.178221 32551 solver.cpp:244]     Train net output #1: loss = 0.887682 (* 1 = 0.887682 loss)\nI0822 01:44:32.256829 32551 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0822 01:46:48.098268 32551 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0822 01:48:08.509433 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10072\nI0822 01:48:08.509681 32551 solver.cpp:404]     Test net output #1: loss = 78.2627 (* 1 = 78.2627 loss)\nI0822 01:48:09.817487 32551 solver.cpp:228] Iteration 28600, loss = 0.721672\nI0822 01:48:09.817531 32551 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0822 01:48:09.817548 32551 solver.cpp:244]     Train net output #1: loss = 0.721672 (* 1 = 0.721672 loss)\nI0822 01:48:09.899797 32551 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0822 01:50:25.464440 32551 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0822 01:51:45.897176 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1028\nI0822 01:51:45.897392 32551 solver.cpp:404]     Test net output #1: loss = 57.261 (* 1 = 57.261 loss)\nI0822 01:51:47.204300 32551 solver.cpp:228] Iteration 28700, loss = 0.746765\nI0822 01:51:47.204344 32551 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0822 01:51:47.204360 32551 solver.cpp:244]     Train net output #1: loss = 0.746765 (* 1 = 0.746765 loss)\nI0822 01:51:47.289510 32551 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0822 01:54:03.215925 32551 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0822 01:55:23.674190 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1052\nI0822 01:55:23.674461 32551 solver.cpp:404]     Test net output #1: loss = 55.7537 (* 1 = 55.7537 loss)\nI0822 01:55:24.981384 32551 solver.cpp:228] Iteration 28800, loss = 0.601205\nI0822 01:55:24.981426 32551 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0822 01:55:24.981442 32551 solver.cpp:244]     Train net output #1: loss = 0.601205 (* 1 = 0.601205 loss)\nI0822 01:55:25.062903 32551 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0822 01:57:40.731235 32551 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0822 01:59:01.264046 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10328\nI0822 01:59:01.264315 32551 solver.cpp:404]     Test net output #1: loss = 64.0181 (* 1 = 64.0181 loss)\nI0822 01:59:02.571996 32551 solver.cpp:228] Iteration 28900, loss = 0.603586\nI0822 01:59:02.572041 32551 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0822 01:59:02.572057 32551 solver.cpp:244]     Train net output #1: loss = 0.603586 (* 1 = 0.603586 loss)\nI0822 01:59:02.650552 32551 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0822 02:01:18.279157 32551 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0822 02:02:38.809957 32551 solver.cpp:404]     Test net output #0: accuracy = 0.07452\nI0822 02:02:38.810230 32551 solver.cpp:404]     Test net output #1: loss = 68.1763 (* 1 = 68.1763 loss)\nI0822 02:02:40.118229 32551 solver.cpp:228] Iteration 29000, loss = 0.482071\nI0822 02:02:40.118276 32551 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0822 02:02:40.118300 32551 solver.cpp:244]     Train net output #1: loss = 0.482071 (* 1 = 0.482071 loss)\nI0822 02:02:40.201458 32551 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0822 02:04:56.115298 32551 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0822 02:06:16.614122 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 02:06:16.614372 32551 solver.cpp:404]     Test net output #1: loss = 57.8835 (* 1 = 57.8835 loss)\nI0822 02:06:17.922008 32551 solver.cpp:228] Iteration 29100, loss = 0.516843\nI0822 02:06:17.922052 32551 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0822 02:06:17.922068 32551 solver.cpp:244]     Train net output #1: loss = 0.516843 (* 1 = 0.516843 loss)\nI0822 02:06:18.003207 32551 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0822 02:08:33.681247 32551 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0822 02:09:54.198109 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10692\nI0822 02:09:54.198381 32551 solver.cpp:404]     Test net output #1: loss = 20.4065 (* 1 = 20.4065 loss)\nI0822 02:09:55.504678 32551 solver.cpp:228] Iteration 29200, loss = 0.490519\nI0822 02:09:55.504721 32551 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0822 02:09:55.504737 32551 solver.cpp:244]     Train net output #1: loss = 0.490519 (* 1 = 0.490519 loss)\nI0822 02:09:55.584743 32551 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0822 02:12:11.270203 32551 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0822 02:13:31.764314 32551 solver.cpp:404]     Test net output #0: accuracy = 0.15144\nI0822 02:13:31.764570 32551 solver.cpp:404]     Test net output #1: loss = 7.5296 (* 1 = 7.5296 loss)\nI0822 02:13:33.072722 32551 solver.cpp:228] Iteration 29300, loss = 0.311751\nI0822 02:13:33.072764 32551 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0822 02:13:33.072780 32551 solver.cpp:244]     Train net output #1: loss = 0.311751 (* 1 = 0.311751 loss)\nI0822 02:13:33.153396 32551 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0822 02:15:48.993093 32551 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0822 02:17:09.489106 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1416\nI0822 02:17:09.489403 32551 solver.cpp:404]     Test net output #1: loss = 8.44234 (* 1 = 8.44234 loss)\nI0822 02:17:10.797394 32551 solver.cpp:228] Iteration 29400, loss = 0.448018\nI0822 02:17:10.797437 32551 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0822 02:17:10.797453 32551 solver.cpp:244]     Train net output #1: loss = 0.448018 (* 1 = 0.448018 loss)\nI0822 02:17:10.879927 32551 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0822 02:19:26.536777 32551 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0822 02:20:47.051578 32551 solver.cpp:404]     Test net output #0: accuracy = 0.16512\nI0822 02:20:47.051863 32551 solver.cpp:404]     Test net output #1: loss = 11.4023 (* 1 = 11.4023 loss)\nI0822 02:20:48.359086 32551 solver.cpp:228] Iteration 29500, loss = 0.361236\nI0822 02:20:48.359127 32551 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0822 02:20:48.359143 32551 solver.cpp:244]     Train net output #1: loss = 0.361236 (* 1 = 0.361236 loss)\nI0822 02:20:48.441817 32551 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0822 02:23:04.196436 32551 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0822 02:24:24.718405 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1388\nI0822 02:24:24.718711 32551 solver.cpp:404]     Test net output #1: loss = 8.38856 (* 1 = 8.38856 loss)\nI0822 02:24:26.027763 32551 solver.cpp:228] Iteration 29600, loss = 0.314505\nI0822 02:24:26.027806 32551 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0822 02:24:26.027822 32551 solver.cpp:244]     Train net output #1: loss = 0.314505 (* 1 = 0.314505 loss)\nI0822 02:24:26.103276 32551 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0822 02:26:41.912760 32551 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0822 02:28:02.437088 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12684\nI0822 02:28:02.437379 32551 solver.cpp:404]     Test net output #1: loss = 6.81067 (* 1 = 6.81067 loss)\nI0822 02:28:03.745461 32551 solver.cpp:228] Iteration 29700, loss = 0.238787\nI0822 02:28:03.745503 32551 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0822 02:28:03.745519 32551 solver.cpp:244]     Train net output #1: loss = 0.238787 (* 1 = 0.238787 loss)\nI0822 02:28:03.824290 32551 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0822 02:30:19.895699 32551 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0822 02:31:40.426609 32551 solver.cpp:404]     Test net output #0: accuracy = 0.16772\nI0822 02:31:40.426913 32551 solver.cpp:404]     Test net output #1: loss = 11.0082 (* 1 = 11.0082 loss)\nI0822 02:31:41.735249 32551 solver.cpp:228] Iteration 29800, loss = 0.197879\nI0822 02:31:41.735291 32551 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0822 02:31:41.735307 32551 solver.cpp:244]     Train net output #1: loss = 0.197879 (* 1 = 0.197879 loss)\nI0822 02:31:41.818949 32551 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0822 02:33:57.775964 32551 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0822 02:35:18.282641 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1396\nI0822 02:35:18.282938 32551 solver.cpp:404]     Test net output #1: loss = 18.5412 (* 1 = 18.5412 loss)\nI0822 02:35:19.590950 32551 solver.cpp:228] Iteration 29900, loss = 0.283434\nI0822 02:35:19.590993 32551 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0822 02:35:19.591009 32551 solver.cpp:244]     Train net output #1: loss = 0.283434 (* 1 = 0.283434 loss)\nI0822 02:35:19.667495 32551 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0822 02:37:35.642608 32551 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0822 02:38:57.252549 32551 solver.cpp:404]     Test net output #0: accuracy = 0.13184\nI0822 02:38:57.252859 32551 solver.cpp:404]     Test net output #1: loss = 26.0882 (* 1 = 26.0882 loss)\nI0822 02:38:58.564793 32551 solver.cpp:228] Iteration 30000, loss = 0.168613\nI0822 02:38:58.564852 32551 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0822 02:38:58.564869 32551 solver.cpp:244]     Train net output #1: loss = 0.168613 (* 1 = 0.168613 loss)\nI0822 02:38:58.642962 32551 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0822 02:41:14.973909 32551 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0822 02:42:36.557763 32551 solver.cpp:404]     Test net output #0: accuracy = 0.17408\nI0822 02:42:36.558068 32551 solver.cpp:404]     Test net output #1: loss = 9.47783 (* 1 = 9.47783 loss)\nI0822 02:42:37.868968 32551 solver.cpp:228] Iteration 30100, loss = 0.139277\nI0822 02:42:37.869027 32551 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 02:42:37.869043 32551 solver.cpp:244]     Train net output #1: loss = 0.139277 (* 1 = 0.139277 loss)\nI0822 02:42:37.947527 32551 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0822 02:44:54.161978 32551 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0822 02:46:15.768811 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11776\nI0822 02:46:15.769135 32551 solver.cpp:404]     Test net output #1: loss = 19.7791 (* 1 = 19.7791 loss)\nI0822 02:46:17.079856 32551 solver.cpp:228] Iteration 30200, loss = 0.144593\nI0822 02:46:17.079916 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 02:46:17.079934 32551 solver.cpp:244]     Train net output #1: loss = 0.144593 (* 1 = 0.144593 loss)\nI0822 02:46:17.157892 32551 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0822 02:48:33.351686 32551 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0822 02:49:54.944178 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12992\nI0822 02:49:54.944474 32551 solver.cpp:404]     Test net output #1: loss = 13.1137 (* 1 = 13.1137 loss)\nI0822 02:49:56.254884 32551 solver.cpp:228] Iteration 30300, loss = 0.240206\nI0822 02:49:56.254942 32551 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0822 02:49:56.254961 32551 solver.cpp:244]     Train net output #1: loss = 0.240206 (* 1 = 0.240206 loss)\nI0822 02:49:56.330221 32551 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0822 02:52:12.487552 32551 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0822 02:53:34.126811 32551 solver.cpp:404]     Test net output #0: accuracy = 0.21112\nI0822 02:53:34.127161 32551 solver.cpp:404]     Test net output #1: loss = 10.7823 (* 1 = 10.7823 loss)\nI0822 02:53:35.439918 32551 solver.cpp:228] Iteration 30400, loss = 0.143863\nI0822 02:53:35.439977 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 02:53:35.439993 32551 solver.cpp:244]     Train net output #1: loss = 0.143863 (* 1 = 0.143863 loss)\nI0822 02:53:35.515923 32551 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0822 02:55:51.810689 32551 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0822 02:57:13.416820 32551 solver.cpp:404]     Test net output #0: accuracy = 0.14212\nI0822 02:57:13.417148 32551 solver.cpp:404]     Test net output #1: loss = 16.0797 (* 1 = 16.0797 loss)\nI0822 02:57:14.728667 32551 solver.cpp:228] Iteration 30500, loss = 0.182926\nI0822 02:57:14.728727 32551 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0822 02:57:14.728744 32551 solver.cpp:244]     Train net output #1: loss = 0.182926 (* 1 = 0.182926 loss)\nI0822 02:57:14.803498 32551 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0822 02:59:30.929538 32551 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0822 03:00:52.537768 32551 solver.cpp:404]     Test net output #0: accuracy = 0.15532\nI0822 03:00:52.538112 32551 solver.cpp:404]     Test net output #1: loss = 11.2629 (* 1 = 11.2629 loss)\nI0822 03:00:53.849746 32551 solver.cpp:228] Iteration 30600, loss = 0.15961\nI0822 03:00:53.849807 32551 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 03:00:53.849823 32551 solver.cpp:244]     Train net output #1: loss = 0.15961 (* 1 = 0.15961 loss)\nI0822 03:00:53.930382 32551 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0822 03:03:09.914307 32551 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0822 03:04:31.539827 32551 solver.cpp:404]     Test net output #0: accuracy = 0.18088\nI0822 03:04:31.540175 32551 solver.cpp:404]     Test net output #1: loss = 11.8961 (* 1 = 11.8961 loss)\nI0822 03:04:32.851451 32551 solver.cpp:228] Iteration 30700, loss = 0.0609518\nI0822 03:04:32.851512 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:04:32.851529 32551 solver.cpp:244]     Train net output #1: loss = 0.0609518 (* 1 = 0.0609518 loss)\nI0822 03:04:32.925433 32551 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0822 03:06:49.176162 32551 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0822 03:08:10.809098 32551 solver.cpp:404]     Test net output #0: accuracy = 0.17292\nI0822 03:08:10.809419 32551 solver.cpp:404]     Test net output #1: loss = 11.8835 (* 1 = 11.8835 loss)\nI0822 03:08:12.121302 32551 solver.cpp:228] Iteration 30800, loss = 0.100608\nI0822 03:08:12.121359 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 03:08:12.121376 32551 solver.cpp:244]     Train net output #1: loss = 0.100608 (* 1 = 0.100608 loss)\nI0822 03:08:12.194361 32551 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0822 03:10:28.487869 32551 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0822 03:11:50.115236 32551 solver.cpp:404]     Test net output #0: accuracy = 0.19188\nI0822 03:11:50.115564 32551 solver.cpp:404]     Test net output #1: loss = 9.96604 (* 1 = 9.96604 loss)\nI0822 03:11:51.427211 32551 solver.cpp:228] Iteration 30900, loss = 0.0739917\nI0822 03:11:51.427268 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 03:11:51.427284 32551 solver.cpp:244]     Train net output #1: loss = 0.0739917 (* 1 = 0.0739917 loss)\nI0822 03:11:51.509433 32551 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0822 03:14:07.679163 32551 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0822 03:15:29.307059 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12172\nI0822 03:15:29.307380 32551 solver.cpp:404]     Test net output #1: loss = 16.1312 (* 1 = 16.1312 loss)\nI0822 03:15:30.617790 32551 solver.cpp:228] Iteration 31000, loss = 0.219261\nI0822 03:15:30.617841 32551 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0822 03:15:30.617863 32551 solver.cpp:244]     Train net output #1: loss = 0.219261 (* 1 = 0.219261 loss)\nI0822 03:15:30.694661 32551 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0822 03:17:46.944092 32551 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0822 03:19:08.585165 32551 solver.cpp:404]     Test net output #0: accuracy = 0.14336\nI0822 03:19:08.585508 32551 solver.cpp:404]     Test net output #1: loss = 14.4397 (* 1 = 14.4397 loss)\nI0822 03:19:09.897208 32551 solver.cpp:228] Iteration 31100, loss = 0.0728591\nI0822 03:19:09.897267 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 03:19:09.897285 32551 solver.cpp:244]     Train net output #1: loss = 0.0728591 (* 1 = 0.0728591 loss)\nI0822 03:19:09.978230 32551 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0822 03:21:26.117000 32551 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0822 03:22:47.752939 32551 solver.cpp:404]     Test net output #0: accuracy = 0.13408\nI0822 03:22:47.753279 32551 solver.cpp:404]     Test net output #1: loss = 13.996 (* 1 = 13.996 loss)\nI0822 03:22:49.064462 32551 solver.cpp:228] Iteration 31200, loss = 0.045449\nI0822 03:22:49.064520 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:22:49.064545 32551 solver.cpp:244]     Train net output #1: loss = 0.045449 (* 1 = 0.045449 loss)\nI0822 03:22:49.139681 32551 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0822 03:25:05.217291 32551 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0822 03:26:26.841142 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10812\nI0822 03:26:26.841495 32551 solver.cpp:404]     Test net output #1: loss = 12.9659 (* 1 = 12.9659 loss)\nI0822 03:26:28.153138 32551 solver.cpp:228] Iteration 31300, loss = 0.070474\nI0822 03:26:28.153199 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 03:26:28.153224 32551 solver.cpp:244]     Train net output #1: loss = 0.0704739 (* 1 = 0.0704739 loss)\nI0822 03:26:28.230448 32551 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0822 03:28:44.165947 32551 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0822 03:30:05.885231 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10288\nI0822 03:30:05.885550 32551 solver.cpp:404]     Test net output #1: loss = 15.3406 (* 1 = 15.3406 loss)\nI0822 03:30:07.196280 32551 solver.cpp:228] Iteration 31400, loss = 0.088508\nI0822 03:30:07.196336 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 03:30:07.196363 32551 solver.cpp:244]     Train net output #1: loss = 0.0885079 (* 1 = 0.0885079 loss)\nI0822 03:30:07.273979 32551 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0822 03:32:23.457823 32551 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0822 03:33:45.185910 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10916\nI0822 03:33:45.186245 32551 solver.cpp:404]     Test net output #1: loss = 14.4656 (* 1 = 14.4656 loss)\nI0822 03:33:46.498128 32551 solver.cpp:228] Iteration 31500, loss = 0.0760487\nI0822 03:33:46.498185 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 03:33:46.498211 32551 solver.cpp:244]     Train net output #1: loss = 0.0760486 (* 1 = 0.0760486 loss)\nI0822 03:33:46.570976 32551 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0822 03:36:02.712112 32551 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0822 03:37:24.464373 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10508\nI0822 03:37:24.464720 32551 solver.cpp:404]     Test net output #1: loss = 17.9357 (* 1 = 17.9357 loss)\nI0822 03:37:25.777290 32551 solver.cpp:228] Iteration 31600, loss = 0.0680247\nI0822 03:37:25.777345 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 03:37:25.777362 32551 solver.cpp:244]     Train net output #1: loss = 0.0680246 (* 1 = 0.0680246 loss)\nI0822 03:37:25.857467 32551 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0822 03:39:41.880220 32551 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0822 03:41:03.529108 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10492\nI0822 03:41:03.529440 32551 solver.cpp:404]     Test net output #1: loss = 15.4758 (* 1 = 15.4758 loss)\nI0822 03:41:04.843854 32551 solver.cpp:228] Iteration 31700, loss = 0.0410862\nI0822 03:41:04.843914 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:41:04.843932 32551 solver.cpp:244]     Train net output #1: loss = 0.0410861 (* 1 = 0.0410861 loss)\nI0822 03:41:04.927189 32551 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0822 03:43:21.139609 32551 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0822 03:44:42.769798 32551 solver.cpp:404]     Test net output #0: accuracy = 0.14512\nI0822 03:44:42.770148 32551 solver.cpp:404]     Test net output #1: loss = 13.4982 (* 1 = 13.4982 loss)\nI0822 03:44:44.081660 32551 solver.cpp:228] Iteration 31800, loss = 0.0630766\nI0822 03:44:44.081715 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 03:44:44.081733 32551 solver.cpp:244]     Train net output #1: loss = 0.0630765 (* 1 = 0.0630765 loss)\nI0822 03:44:44.152608 32551 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0822 03:47:00.316663 32551 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0822 03:48:21.930650 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12884\nI0822 03:48:21.930995 32551 solver.cpp:404]     Test net output #1: loss = 12.7847 (* 1 = 12.7847 loss)\nI0822 03:48:23.241376 32551 solver.cpp:228] Iteration 31900, loss = 0.0573919\nI0822 03:48:23.241431 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 03:48:23.241449 32551 solver.cpp:244]     Train net output #1: loss = 0.0573918 (* 1 = 0.0573918 loss)\nI0822 03:48:23.321547 32551 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0822 03:50:39.561985 32551 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0822 03:52:01.194391 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0822 03:52:01.194711 32551 solver.cpp:404]     Test net output #1: loss = 15.8273 (* 1 = 15.8273 loss)\nI0822 03:52:02.505004 32551 solver.cpp:228] Iteration 32000, loss = 0.0122508\nI0822 03:52:02.505059 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:52:02.505076 32551 solver.cpp:244]     Train net output #1: loss = 0.0122507 (* 1 = 0.0122507 loss)\nI0822 03:52:02.581851 32551 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0822 03:54:18.853277 32551 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0822 03:55:40.469473 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 03:55:40.469820 32551 solver.cpp:404]     Test net output #1: loss = 17.8683 (* 1 = 17.8683 loss)\nI0822 03:55:41.780114 32551 solver.cpp:228] Iteration 32100, loss = 0.0823349\nI0822 03:55:41.780174 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:55:41.780190 32551 solver.cpp:244]     Train net output #1: loss = 0.0823349 (* 1 = 0.0823349 loss)\nI0822 03:55:41.857564 32551 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0822 03:57:57.984072 32551 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0822 03:59:19.609261 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10324\nI0822 03:59:19.609580 32551 solver.cpp:404]     Test net output #1: loss = 15.5558 (* 1 = 15.5558 loss)\nI0822 03:59:20.919991 32551 solver.cpp:228] Iteration 32200, loss = 0.0441809\nI0822 03:59:20.920048 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:59:20.920068 32551 solver.cpp:244]     Train net output #1: loss = 0.0441808 (* 1 = 0.0441808 loss)\nI0822 03:59:20.993319 32551 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0822 04:01:37.285043 32551 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0822 04:02:58.860002 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1144\nI0822 04:02:58.860321 32551 solver.cpp:404]     Test net output #1: loss = 10.5723 (* 1 = 10.5723 loss)\nI0822 04:03:00.171027 32551 solver.cpp:228] Iteration 32300, loss = 0.0704173\nI0822 04:03:00.171082 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:03:00.171099 32551 solver.cpp:244]     Train net output #1: loss = 0.0704172 (* 1 = 0.0704172 loss)\nI0822 04:03:00.253373 32551 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0822 04:05:16.466490 32551 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0822 04:06:38.078244 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 04:06:38.078567 32551 solver.cpp:404]     Test net output #1: loss = 16.1792 (* 1 = 16.1792 loss)\nI0822 04:06:39.389426 32551 solver.cpp:228] Iteration 32400, loss = 0.044438\nI0822 04:06:39.389482 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:06:39.389500 32551 solver.cpp:244]     Train net output #1: loss = 0.044438 (* 1 = 0.044438 loss)\nI0822 04:06:39.464550 32551 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0822 04:08:55.386643 32551 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0822 04:10:17.012051 32551 solver.cpp:404]     Test net output #0: accuracy = 0.102\nI0822 04:10:17.012364 32551 solver.cpp:404]     Test net output #1: loss = 16.4826 (* 1 = 16.4826 loss)\nI0822 04:10:18.322860 32551 solver.cpp:228] Iteration 32500, loss = 0.0189072\nI0822 04:10:18.322916 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:10:18.322937 32551 solver.cpp:244]     Train net output #1: loss = 0.0189071 (* 1 = 0.0189071 loss)\nI0822 04:10:18.401707 32551 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0822 04:12:34.473278 32551 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0822 04:13:56.096406 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0822 04:13:56.096730 32551 solver.cpp:404]     Test net output #1: loss = 13.8281 (* 1 = 13.8281 loss)\nI0822 04:13:57.406642 32551 solver.cpp:228] Iteration 32600, loss = 0.0172823\nI0822 04:13:57.406697 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:13:57.406715 32551 solver.cpp:244]     Train net output #1: loss = 0.0172822 (* 1 = 0.0172822 loss)\nI0822 04:13:57.481993 32551 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0822 04:16:13.535249 32551 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0822 04:17:35.160526 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 04:17:35.160867 32551 solver.cpp:404]     Test net output #1: loss = 12.8974 (* 1 = 12.8974 loss)\nI0822 04:17:36.470916 32551 solver.cpp:228] Iteration 32700, loss = 0.0645424\nI0822 04:17:36.470974 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:17:36.470991 32551 solver.cpp:244]     Train net output #1: loss = 0.0645423 (* 1 = 0.0645423 loss)\nI0822 04:17:36.546298 32551 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0822 04:19:52.713924 32551 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0822 04:21:14.334496 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10488\nI0822 04:21:14.334836 32551 solver.cpp:404]     Test net output #1: loss = 11.1195 (* 1 = 11.1195 loss)\nI0822 04:21:15.645334 32551 solver.cpp:228] Iteration 32800, loss = 0.0577009\nI0822 04:21:15.645391 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:21:15.645408 32551 solver.cpp:244]     Train net output #1: loss = 0.0577008 (* 1 = 0.0577008 loss)\nI0822 04:21:15.725853 32551 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0822 04:23:31.749426 32551 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0822 04:24:53.374532 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11028\nI0822 04:24:53.374886 32551 solver.cpp:404]     Test net output #1: loss = 12.5445 (* 1 = 12.5445 loss)\nI0822 04:24:54.685655 32551 solver.cpp:228] Iteration 32900, loss = 0.0109865\nI0822 04:24:54.685711 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:24:54.685729 32551 solver.cpp:244]     Train net output #1: loss = 0.0109864 (* 1 = 0.0109864 loss)\nI0822 04:24:54.763041 32551 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0822 04:27:10.732000 32551 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0822 04:28:32.340991 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11248\nI0822 04:28:32.341313 32551 solver.cpp:404]     Test net output #1: loss = 7.95235 (* 1 = 7.95235 loss)\nI0822 04:28:33.651125 32551 solver.cpp:228] Iteration 33000, loss = 0.0717321\nI0822 04:28:33.651180 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 04:28:33.651196 32551 solver.cpp:244]     Train net output #1: loss = 0.071732 (* 1 = 0.071732 loss)\nI0822 04:28:33.731159 32551 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0822 04:30:49.767287 32551 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0822 04:32:11.324826 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10004\nI0822 04:32:11.325119 32551 solver.cpp:404]     Test net output #1: loss = 12.1619 (* 1 = 12.1619 loss)\nI0822 04:32:12.634976 32551 solver.cpp:228] Iteration 33100, loss = 0.0943415\nI0822 04:32:12.635031 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 04:32:12.635048 32551 solver.cpp:244]     Train net output #1: loss = 0.0943414 (* 1 = 0.0943414 loss)\nI0822 04:32:12.713510 32551 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0822 04:34:28.723198 32551 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0822 04:35:50.376891 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1044\nI0822 04:35:50.377224 32551 solver.cpp:404]     Test net output #1: loss = 14.0278 (* 1 = 14.0278 loss)\nI0822 04:35:51.688352 32551 solver.cpp:228] Iteration 33200, loss = 0.0487898\nI0822 04:35:51.688405 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:35:51.688422 32551 solver.cpp:244]     Train net output #1: loss = 0.0487897 (* 1 = 0.0487897 loss)\nI0822 04:35:51.768520 32551 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0822 04:38:07.660171 32551 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0822 04:39:29.252805 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09996\nI0822 04:39:29.253137 32551 solver.cpp:404]     Test net output #1: loss = 14.1615 (* 1 = 14.1615 loss)\nI0822 04:39:30.563694 32551 solver.cpp:228] Iteration 33300, loss = 0.0277576\nI0822 04:39:30.563746 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:39:30.563765 32551 solver.cpp:244]     Train net output #1: loss = 0.0277575 (* 1 = 0.0277575 loss)\nI0822 04:39:30.647192 32551 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0822 04:41:46.744812 32551 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0822 04:43:08.336951 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10124\nI0822 04:43:08.337301 32551 solver.cpp:404]     Test net output #1: loss = 13.7825 (* 1 = 13.7825 loss)\nI0822 04:43:09.647608 32551 solver.cpp:228] Iteration 33400, loss = 0.0303725\nI0822 04:43:09.647661 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:43:09.647678 32551 solver.cpp:244]     Train net output #1: loss = 0.0303724 (* 1 = 0.0303724 loss)\nI0822 04:43:09.722693 32551 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0822 04:45:25.587869 32551 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0822 04:46:47.186200 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10328\nI0822 04:46:47.186529 32551 solver.cpp:404]     Test net output #1: loss = 12.2677 (* 1 = 12.2677 loss)\nI0822 04:46:48.497808 32551 solver.cpp:228] Iteration 33500, loss = 0.0521659\nI0822 04:46:48.497859 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:46:48.497875 32551 solver.cpp:244]     Train net output #1: loss = 0.0521658 (* 1 = 0.0521658 loss)\nI0822 04:46:48.578385 32551 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0822 04:49:04.548075 32551 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0822 04:50:26.130887 32551 solver.cpp:404]     Test net output #0: accuracy = 0.114\nI0822 04:50:26.131214 32551 solver.cpp:404]     Test net output #1: loss = 10.5191 (* 1 = 10.5191 loss)\nI0822 04:50:27.441308 32551 solver.cpp:228] Iteration 33600, loss = 0.0287258\nI0822 04:50:27.441356 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:50:27.441373 32551 solver.cpp:244]     Train net output #1: loss = 0.0287257 (* 1 = 0.0287257 loss)\nI0822 04:50:27.519711 32551 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0822 04:52:43.386210 32551 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0822 04:54:04.990803 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10624\nI0822 04:54:04.991133 32551 solver.cpp:404]     Test net output #1: loss = 12.1147 (* 1 = 12.1147 loss)\nI0822 04:54:06.302116 32551 solver.cpp:228] Iteration 33700, loss = 0.0143708\nI0822 04:54:06.302168 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:54:06.302186 32551 solver.cpp:244]     Train net output #1: loss = 0.0143708 (* 1 = 0.0143708 loss)\nI0822 04:54:06.383980 32551 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0822 04:56:22.387192 32551 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0822 04:57:43.968005 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10564\nI0822 04:57:43.968334 32551 solver.cpp:404]     Test net output #1: loss = 11.8356 (* 1 = 11.8356 loss)\nI0822 04:57:45.278883 32551 solver.cpp:228] Iteration 33800, loss = 0.0314284\nI0822 04:57:45.278939 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:57:45.278955 32551 solver.cpp:244]     Train net output #1: loss = 0.0314283 (* 1 = 0.0314283 loss)\nI0822 04:57:45.357774 32551 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0822 05:00:01.282130 32551 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0822 05:01:22.819211 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11356\nI0822 05:01:22.819562 32551 solver.cpp:404]     Test net output #1: loss = 9.35984 (* 1 = 9.35984 loss)\nI0822 05:01:24.129292 32551 solver.cpp:228] Iteration 33900, loss = 0.074525\nI0822 05:01:24.129344 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 05:01:24.129361 32551 solver.cpp:244]     Train net output #1: loss = 0.0745249 (* 1 = 0.0745249 loss)\nI0822 05:01:24.215972 32551 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0822 05:03:40.149878 32551 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0822 05:05:01.744902 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12072\nI0822 05:05:01.745174 32551 solver.cpp:404]     Test net output #1: loss = 11.4448 (* 1 = 11.4448 loss)\nI0822 05:05:03.055855 32551 solver.cpp:228] Iteration 34000, loss = 0.0308792\nI0822 05:05:03.055907 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:05:03.055929 32551 solver.cpp:244]     Train net output #1: loss = 0.0308791 (* 1 = 0.0308791 loss)\nI0822 05:05:03.136627 32551 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0822 05:07:19.391647 32551 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0822 05:08:40.941552 32551 solver.cpp:404]     Test net output #0: accuracy = 0.13008\nI0822 05:08:40.941797 32551 solver.cpp:404]     Test net output #1: loss = 9.89995 (* 1 = 9.89995 loss)\nI0822 05:08:42.252938 32551 solver.cpp:228] Iteration 34100, loss = 0.0196155\nI0822 05:08:42.252991 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:08:42.253008 32551 solver.cpp:244]     Train net output #1: loss = 0.0196154 (* 1 = 0.0196154 loss)\nI0822 05:08:42.326026 32551 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0822 05:10:58.259320 32551 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0822 05:12:19.802000 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2568\nI0822 05:12:19.802302 32551 solver.cpp:404]     Test net output #1: loss = 6.85004 (* 1 = 6.85004 loss)\nI0822 05:12:21.112982 32551 solver.cpp:228] Iteration 34200, loss = 0.0453698\nI0822 05:12:21.113037 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 05:12:21.113055 32551 solver.cpp:244]     Train net output #1: loss = 0.0453697 (* 1 = 0.0453697 loss)\nI0822 05:12:21.189599 32551 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0822 05:14:37.201853 32551 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0822 05:15:58.778832 32551 solver.cpp:404]     Test net output #0: accuracy = 0.2192\nI0822 05:15:58.779142 32551 solver.cpp:404]     Test net output #1: loss = 8.62272 (* 1 = 8.62272 loss)\nI0822 05:16:00.089699 32551 solver.cpp:228] Iteration 34300, loss = 0.0267721\nI0822 05:16:00.089753 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:16:00.089771 32551 solver.cpp:244]     Train net output #1: loss = 0.0267721 (* 1 = 0.0267721 loss)\nI0822 05:16:00.170286 32551 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0822 05:18:16.227721 32551 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0822 05:19:37.839437 32551 solver.cpp:404]     Test net output #0: accuracy = 0.13148\nI0822 05:19:37.839740 32551 solver.cpp:404]     Test net output #1: loss = 10.7738 (* 1 = 10.7738 loss)\nI0822 05:19:39.150782 32551 solver.cpp:228] Iteration 34400, loss = 0.0618996\nI0822 05:19:39.150836 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 05:19:39.150853 32551 solver.cpp:244]     Train net output #1: loss = 0.0618996 (* 1 = 0.0618996 loss)\nI0822 05:19:39.223526 32551 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0822 05:21:55.320904 32551 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0822 05:23:16.865223 32551 solver.cpp:404]     Test net output #0: accuracy = 0.14528\nI0822 05:23:16.865525 32551 solver.cpp:404]     Test net output #1: loss = 7.55926 (* 1 = 7.55926 loss)\nI0822 05:23:18.175976 32551 solver.cpp:228] Iteration 34500, loss = 0.0173044\nI0822 05:23:18.176033 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:23:18.176050 32551 solver.cpp:244]     Train net output #1: loss = 0.0173044 (* 1 = 0.0173044 loss)\nI0822 05:23:18.251646 32551 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0822 05:25:34.247804 32551 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0822 05:26:55.848965 32551 solver.cpp:404]     Test net output #0: accuracy = 0.14996\nI0822 05:26:55.849265 32551 solver.cpp:404]     Test net output #1: loss = 6.14541 (* 1 = 6.14541 loss)\nI0822 05:26:57.159729 32551 solver.cpp:228] Iteration 34600, loss = 0.0163131\nI0822 05:26:57.159783 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:26:57.159799 32551 solver.cpp:244]     Train net output #1: loss = 0.016313 (* 1 = 0.016313 loss)\nI0822 05:26:57.241641 32551 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0822 05:29:13.187798 32551 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0822 05:30:34.767849 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1156\nI0822 05:30:34.768172 32551 solver.cpp:404]     Test net output #1: loss = 13.1876 (* 1 = 13.1876 loss)\nI0822 05:30:36.078619 32551 solver.cpp:228] Iteration 34700, loss = 0.0336419\nI0822 05:30:36.078672 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:30:36.078689 32551 solver.cpp:244]     Train net output #1: loss = 0.0336419 (* 1 = 0.0336419 loss)\nI0822 05:30:36.157454 32551 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0822 05:32:52.255523 32551 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0822 05:34:13.825976 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10228\nI0822 05:34:13.826277 32551 solver.cpp:404]     Test net output #1: loss = 12.7735 (* 1 = 12.7735 loss)\nI0822 05:34:15.137060 32551 solver.cpp:228] Iteration 34800, loss = 0.0706527\nI0822 05:34:15.137102 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:34:15.137118 32551 solver.cpp:244]     Train net output #1: loss = 0.0706527 (* 1 = 0.0706527 loss)\nI0822 05:34:15.216032 32551 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0822 05:36:31.166836 32551 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0822 05:37:52.745317 32551 solver.cpp:404]     Test net output #0: accuracy = 0.13496\nI0822 05:37:52.745635 32551 solver.cpp:404]     Test net output #1: loss = 6.69739 (* 1 = 6.69739 loss)\nI0822 05:37:54.055786 32551 solver.cpp:228] Iteration 34900, loss = 0.0587646\nI0822 05:37:54.055845 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 05:37:54.055862 32551 solver.cpp:244]     Train net output #1: loss = 0.0587646 (* 1 = 0.0587646 loss)\nI0822 05:37:54.134543 32551 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0822 05:40:10.218827 32551 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0822 05:41:31.816839 32551 solver.cpp:404]     Test net output #0: accuracy = 0.14356\nI0822 05:41:31.817168 32551 solver.cpp:404]     Test net output #1: loss = 10.506 (* 1 = 10.506 loss)\nI0822 05:41:33.127885 32551 solver.cpp:228] Iteration 35000, loss = 0.0514495\nI0822 05:41:33.127946 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 05:41:33.127964 32551 solver.cpp:244]     Train net output #1: loss = 0.0514495 (* 1 = 0.0514495 loss)\nI0822 05:41:33.205845 32551 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0822 05:43:49.189615 32551 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0822 05:45:10.772675 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1618\nI0822 05:45:10.772996 32551 solver.cpp:404]     Test net output #1: loss = 9.70453 (* 1 = 9.70453 loss)\nI0822 05:45:12.082752 32551 solver.cpp:228] Iteration 35100, loss = 0.0246142\nI0822 05:45:12.082808 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:45:12.082826 32551 solver.cpp:244]     Train net output #1: loss = 0.0246142 (* 1 = 0.0246142 loss)\nI0822 05:45:12.162019 32551 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0822 05:47:28.097012 32551 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0822 05:48:49.693500 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1212\nI0822 05:48:49.693851 32551 solver.cpp:404]     Test net output #1: loss = 12.9144 (* 1 = 12.9144 loss)\nI0822 05:48:51.003938 32551 solver.cpp:228] Iteration 35200, loss = 0.0190038\nI0822 05:48:51.003993 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:48:51.004011 32551 solver.cpp:244]     Train net output #1: loss = 0.0190038 (* 1 = 0.0190038 loss)\nI0822 05:48:51.080721 32551 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0822 05:51:07.120533 32551 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0822 05:52:28.734676 32551 solver.cpp:404]     Test net output #0: accuracy = 0.21628\nI0822 05:52:28.735025 32551 solver.cpp:404]     Test net output #1: loss = 5.64456 (* 1 = 5.64456 loss)\nI0822 05:52:30.045542 32551 solver.cpp:228] Iteration 35300, loss = 0.00685627\nI0822 05:52:30.045593 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:52:30.045610 32551 solver.cpp:244]     Train net output #1: loss = 0.00685627 (* 1 = 0.00685627 loss)\nI0822 05:52:30.123780 32551 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0822 05:54:46.145308 32551 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0822 05:56:07.780783 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50244\nI0822 05:56:07.781112 32551 solver.cpp:404]     Test net output #1: loss = 2.44102 (* 1 = 2.44102 loss)\nI0822 05:56:09.091276 32551 solver.cpp:228] Iteration 35400, loss = 0.0382975\nI0822 05:56:09.091336 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:56:09.091353 32551 solver.cpp:244]     Train net output #1: loss = 0.0382975 (* 1 = 0.0382975 loss)\nI0822 05:56:09.176968 32551 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0822 05:58:25.283282 32551 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0822 05:59:46.872089 32551 solver.cpp:404]     Test net output #0: accuracy = 0.29896\nI0822 05:59:46.872422 32551 solver.cpp:404]     Test net output #1: loss = 4.41618 (* 1 = 4.41618 loss)\nI0822 05:59:48.181011 32551 solver.cpp:228] Iteration 35500, loss = 0.0435088\nI0822 05:59:48.181071 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:59:48.181090 32551 solver.cpp:244]     Train net output #1: loss = 0.0435088 (* 1 = 0.0435088 loss)\nI0822 05:59:48.258882 32551 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0822 06:02:04.069913 32551 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0822 06:03:25.688227 32551 solver.cpp:404]     Test net output #0: accuracy = 0.36088\nI0822 06:03:25.688546 32551 solver.cpp:404]     Test net output #1: loss = 3.70279 (* 1 = 3.70279 loss)\nI0822 06:03:26.996873 32551 solver.cpp:228] Iteration 35600, loss = 0.0201471\nI0822 06:03:26.996933 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:03:26.996951 32551 solver.cpp:244]     Train net output #1: loss = 0.0201471 (* 1 = 0.0201471 loss)\nI0822 06:03:27.074681 32551 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0822 06:05:43.153262 32551 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0822 06:07:04.783320 32551 solver.cpp:404]     Test net output #0: accuracy = 0.48776\nI0822 06:07:04.783664 32551 solver.cpp:404]     Test net output #1: loss = 2.31513 (* 1 = 2.31513 loss)\nI0822 06:07:06.093273 32551 solver.cpp:228] Iteration 35700, loss = 0.0475527\nI0822 06:07:06.093317 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:07:06.093333 32551 solver.cpp:244]     Train net output #1: loss = 0.0475527 (* 1 = 0.0475527 loss)\nI0822 06:07:06.173038 32551 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0822 06:09:22.154276 32551 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0822 06:10:43.780396 32551 solver.cpp:404]     Test net output #0: accuracy = 0.36192\nI0822 06:10:43.780712 32551 solver.cpp:404]     Test net output #1: loss = 3.70898 (* 1 = 3.70898 loss)\nI0822 06:10:45.090556 32551 solver.cpp:228] Iteration 35800, loss = 0.046411\nI0822 06:10:45.090616 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:10:45.090636 32551 solver.cpp:244]     Train net output #1: loss = 0.046411 (* 1 = 0.046411 loss)\nI0822 06:10:45.171396 32551 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0822 06:13:01.081249 32551 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0822 06:14:22.641261 32551 solver.cpp:404]     Test net output #0: accuracy = 0.37692\nI0822 06:14:22.641535 32551 solver.cpp:404]     Test net output #1: loss = 3.9584 (* 1 = 3.9584 loss)\nI0822 06:14:23.950383 32551 solver.cpp:228] Iteration 35900, loss = 0.0442314\nI0822 06:14:23.950441 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:14:23.950459 32551 solver.cpp:244]     Train net output #1: loss = 0.0442314 (* 1 = 0.0442314 loss)\nI0822 06:14:24.031886 32551 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0822 06:16:39.775064 32551 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0822 06:18:01.332728 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4356\nI0822 06:18:01.332989 32551 solver.cpp:404]     Test net output #1: loss = 3.20787 (* 1 = 3.20787 loss)\nI0822 06:18:02.641041 32551 solver.cpp:228] Iteration 36000, loss = 0.0105568\nI0822 06:18:02.641099 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:18:02.641116 32551 solver.cpp:244]     Train net output #1: loss = 0.0105569 (* 1 = 0.0105569 loss)\nI0822 06:18:02.717031 32551 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0822 06:20:18.742653 32551 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0822 06:21:40.331704 32551 solver.cpp:404]     Test net output #0: accuracy = 0.43188\nI0822 06:21:40.331971 32551 solver.cpp:404]     Test net output #1: loss = 3.34903 (* 1 = 3.34903 loss)\nI0822 06:21:41.639788 32551 solver.cpp:228] Iteration 36100, loss = 0.0115803\nI0822 06:21:41.639849 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:21:41.639868 32551 solver.cpp:244]     Train net output #1: loss = 0.0115803 (* 1 = 0.0115803 loss)\nI0822 06:21:41.720696 32551 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0822 06:23:57.877660 32551 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0822 06:25:19.460517 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51436\nI0822 06:25:19.460750 32551 solver.cpp:404]     Test net output #1: loss = 3.51689 (* 1 = 3.51689 loss)\nI0822 06:25:20.768306 32551 solver.cpp:228] Iteration 36200, loss = 0.0107401\nI0822 06:25:20.768364 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:25:20.768383 32551 solver.cpp:244]     Train net output #1: loss = 0.0107401 (* 1 = 0.0107401 loss)\nI0822 06:25:20.845589 32551 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0822 06:27:36.994551 32551 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0822 06:28:58.554327 32551 solver.cpp:404]     Test net output #0: accuracy = 0.3588\nI0822 06:28:58.554584 32551 solver.cpp:404]     Test net output #1: loss = 4.49859 (* 1 = 4.49859 loss)\nI0822 06:28:59.862360 32551 solver.cpp:228] Iteration 36300, loss = 0.0373638\nI0822 06:28:59.862418 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:28:59.862437 32551 solver.cpp:244]     Train net output #1: loss = 0.0373638 (* 1 = 0.0373638 loss)\nI0822 06:28:59.938683 32551 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0822 06:31:16.134748 32551 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0822 06:32:37.589781 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58388\nI0822 06:32:37.590073 32551 solver.cpp:404]     Test net output #1: loss = 2.0716 (* 1 = 2.0716 loss)\nI0822 06:32:38.897460 32551 solver.cpp:228] Iteration 36400, loss = 0.019487\nI0822 06:32:38.897521 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:32:38.897538 32551 solver.cpp:244]     Train net output #1: loss = 0.019487 (* 1 = 0.019487 loss)\nI0822 06:32:38.983852 32551 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0822 06:34:55.163092 32551 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0822 06:36:16.575747 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60368\nI0822 06:36:16.575990 32551 solver.cpp:404]     Test net output #1: loss = 1.54335 (* 1 = 1.54335 loss)\nI0822 06:36:17.884735 32551 solver.cpp:228] Iteration 36500, loss = 0.0503734\nI0822 06:36:17.884800 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:36:17.884817 32551 solver.cpp:244]     Train net output #1: loss = 0.0503734 (* 1 = 0.0503734 loss)\nI0822 06:36:17.960996 32551 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0822 06:38:34.126308 32551 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0822 06:39:55.697104 32551 solver.cpp:404]     Test net output #0: accuracy = 0.45236\nI0822 06:39:55.697363 32551 solver.cpp:404]     Test net output #1: loss = 3.00924 (* 1 = 3.00924 loss)\nI0822 06:39:57.005792 32551 solver.cpp:228] Iteration 36600, loss = 0.0457945\nI0822 06:39:57.005854 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:39:57.005872 32551 solver.cpp:244]     Train net output #1: loss = 0.0457945 (* 1 = 0.0457945 loss)\nI0822 06:39:57.078907 32551 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0822 06:42:13.255946 32551 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0822 06:43:34.505815 32551 solver.cpp:404]     Test net output #0: accuracy = 0.27024\nI0822 06:43:34.506125 32551 solver.cpp:404]     Test net output #1: loss = 5.03276 (* 1 = 5.03276 loss)\nI0822 06:43:35.815057 32551 solver.cpp:228] Iteration 36700, loss = 0.00754209\nI0822 06:43:35.815104 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:43:35.815129 32551 solver.cpp:244]     Train net output #1: loss = 0.00754212 (* 1 = 0.00754212 loss)\nI0822 06:43:35.893920 32551 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0822 06:45:51.955139 32551 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0822 06:47:12.498586 32551 solver.cpp:404]     Test net output #0: accuracy = 0.25216\nI0822 06:47:12.498900 32551 solver.cpp:404]     Test net output #1: loss = 4.71913 (* 1 = 4.71913 loss)\nI0822 06:47:13.806989 32551 solver.cpp:228] Iteration 36800, loss = 0.0170011\nI0822 06:47:13.807034 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:47:13.807049 32551 solver.cpp:244]     Train net output #1: loss = 0.0170011 (* 1 = 0.0170011 loss)\nI0822 06:47:13.881227 32551 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0822 06:49:29.895629 32551 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0822 06:50:50.397820 32551 solver.cpp:404]     Test net output #0: accuracy = 0.46912\nI0822 06:50:50.398125 32551 solver.cpp:404]     Test net output #1: loss = 2.45163 (* 1 = 2.45163 loss)\nI0822 06:50:51.705795 32551 solver.cpp:228] Iteration 36900, loss = 0.111676\nI0822 06:50:51.705839 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 06:50:51.705855 32551 solver.cpp:244]     Train net output #1: loss = 0.111676 (* 1 = 0.111676 loss)\nI0822 06:50:51.784037 32551 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0822 06:53:07.669764 32551 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0822 06:54:28.154462 32551 solver.cpp:404]     Test net output #0: accuracy = 0.36488\nI0822 06:54:28.154775 32551 solver.cpp:404]     Test net output #1: loss = 4.09218 (* 1 = 4.09218 loss)\nI0822 06:54:29.462985 32551 solver.cpp:228] Iteration 37000, loss = 0.0358564\nI0822 06:54:29.463029 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:54:29.463047 32551 solver.cpp:244]     Train net output #1: loss = 0.0358565 (* 1 = 0.0358565 loss)\nI0822 06:54:29.545449 32551 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0822 06:56:45.538550 32551 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0822 06:58:06.014585 32551 solver.cpp:404]     Test net output #0: accuracy = 0.15176\nI0822 06:58:06.014891 32551 solver.cpp:404]     Test net output #1: loss = 9.3185 (* 1 = 9.3185 loss)\nI0822 06:58:07.323189 32551 solver.cpp:228] Iteration 37100, loss = 0.0667851\nI0822 06:58:07.323233 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:58:07.323249 32551 solver.cpp:244]     Train net output #1: loss = 0.0667852 (* 1 = 0.0667852 loss)\nI0822 06:58:07.398596 32551 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0822 07:00:23.316059 32551 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0822 07:01:43.800002 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54936\nI0822 07:01:43.800304 32551 solver.cpp:404]     Test net output #1: loss = 2.20008 (* 1 = 2.20008 loss)\nI0822 07:01:45.108525 32551 solver.cpp:228] Iteration 37200, loss = 0.0557865\nI0822 07:01:45.108569 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:01:45.108585 32551 solver.cpp:244]     Train net output #1: loss = 0.0557866 (* 1 = 0.0557866 loss)\nI0822 07:01:45.189183 32551 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0822 07:04:01.039904 32551 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0822 07:05:21.523313 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57476\nI0822 07:05:21.523627 32551 solver.cpp:404]     Test net output #1: loss = 1.98541 (* 1 = 1.98541 loss)\nI0822 07:05:22.831245 32551 solver.cpp:228] Iteration 37300, loss = 0.0403466\nI0822 07:05:22.831291 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:05:22.831307 32551 solver.cpp:244]     Train net output #1: loss = 0.0403468 (* 1 = 0.0403468 loss)\nI0822 07:05:22.914203 32551 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0822 07:07:38.662871 32551 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0822 07:08:59.167490 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58412\nI0822 07:08:59.167800 32551 solver.cpp:404]     Test net output #1: loss = 2.37831 (* 1 = 2.37831 loss)\nI0822 07:09:00.477217 32551 solver.cpp:228] Iteration 37400, loss = 0.0138225\nI0822 07:09:00.477263 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:09:00.477280 32551 solver.cpp:244]     Train net output #1: loss = 0.0138226 (* 1 = 0.0138226 loss)\nI0822 07:09:00.558455 32551 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0822 07:11:16.331377 32551 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0822 07:12:36.854893 32551 solver.cpp:404]     Test net output #0: accuracy = 0.539\nI0822 07:12:36.855221 32551 solver.cpp:404]     Test net output #1: loss = 2.17862 (* 1 = 2.17862 loss)\nI0822 07:12:38.165788 32551 solver.cpp:228] Iteration 37500, loss = 0.0395981\nI0822 07:12:38.165832 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:12:38.165849 32551 solver.cpp:244]     Train net output #1: loss = 0.0395982 (* 1 = 0.0395982 loss)\nI0822 07:12:38.245270 32551 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0822 07:14:53.909066 32551 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0822 07:16:14.390030 32551 solver.cpp:404]     Test net output #0: accuracy = 0.37688\nI0822 07:16:14.390344 32551 solver.cpp:404]     Test net output #1: loss = 4.18309 (* 1 = 4.18309 loss)\nI0822 07:16:15.697137 32551 solver.cpp:228] Iteration 37600, loss = 0.00648324\nI0822 07:16:15.697178 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:16:15.697194 32551 solver.cpp:244]     Train net output #1: loss = 0.00648337 (* 1 = 0.00648337 loss)\nI0822 07:16:15.782207 32551 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0822 07:18:31.432330 32551 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0822 07:19:51.913744 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4014\nI0822 07:19:51.914043 32551 solver.cpp:404]     Test net output #1: loss = 3.40213 (* 1 = 3.40213 loss)\nI0822 07:19:53.220284 32551 solver.cpp:228] Iteration 37700, loss = 0.0514508\nI0822 07:19:53.220327 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:19:53.220343 32551 solver.cpp:244]     Train net output #1: loss = 0.0514509 (* 1 = 0.0514509 loss)\nI0822 07:19:53.297354 32551 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0822 07:22:09.019103 32551 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0822 07:23:29.497409 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4562\nI0822 07:23:29.497653 32551 solver.cpp:404]     Test net output #1: loss = 3.25847 (* 1 = 3.25847 loss)\nI0822 07:23:30.804918 32551 solver.cpp:228] Iteration 37800, loss = 0.0218324\nI0822 07:23:30.804960 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:23:30.804975 32551 solver.cpp:244]     Train net output #1: loss = 0.0218325 (* 1 = 0.0218325 loss)\nI0822 07:23:30.878314 32551 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0822 07:25:46.440485 32551 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0822 07:27:06.920295 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49576\nI0822 07:27:06.920563 32551 solver.cpp:404]     Test net output #1: loss = 3.19707 (* 1 = 3.19707 loss)\nI0822 07:27:08.227566 32551 solver.cpp:228] Iteration 37900, loss = 0.0548977\nI0822 07:27:08.227610 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:27:08.227627 32551 solver.cpp:244]     Train net output #1: loss = 0.0548978 (* 1 = 0.0548978 loss)\nI0822 07:27:08.311398 32551 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0822 07:29:23.798537 32551 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0822 07:30:44.279232 32551 solver.cpp:404]     Test net output #0: accuracy = 0.36616\nI0822 07:30:44.279503 32551 solver.cpp:404]     Test net output #1: loss = 4.28785 (* 1 = 4.28785 loss)\nI0822 07:30:45.586185 32551 solver.cpp:228] Iteration 38000, loss = 0.0735567\nI0822 07:30:45.586230 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:30:45.586246 32551 solver.cpp:244]     Train net output #1: loss = 0.0735569 (* 1 = 0.0735569 loss)\nI0822 07:30:45.665812 32551 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0822 07:33:01.294411 32551 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0822 07:34:21.774569 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54632\nI0822 07:34:21.774821 32551 solver.cpp:404]     Test net output #1: loss = 2.47343 (* 1 = 2.47343 loss)\nI0822 07:34:23.081717 32551 solver.cpp:228] Iteration 38100, loss = 0.0342765\nI0822 07:34:23.081768 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:34:23.081787 32551 solver.cpp:244]     Train net output #1: loss = 0.0342766 (* 1 = 0.0342766 loss)\nI0822 07:34:23.164610 32551 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0822 07:36:38.842612 32551 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0822 07:37:59.325312 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54108\nI0822 07:37:59.325598 32551 solver.cpp:404]     Test net output #1: loss = 2.56642 (* 1 = 2.56642 loss)\nI0822 07:38:00.632324 32551 solver.cpp:228] Iteration 38200, loss = 0.0710496\nI0822 07:38:00.632369 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 07:38:00.632385 32551 solver.cpp:244]     Train net output #1: loss = 0.0710498 (* 1 = 0.0710498 loss)\nI0822 07:38:00.716825 32551 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0822 07:40:16.375922 32551 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0822 07:41:36.840313 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5752\nI0822 07:41:36.840553 32551 solver.cpp:404]     Test net output #1: loss = 2.68112 (* 1 = 2.68112 loss)\nI0822 07:41:38.147627 32551 solver.cpp:228] Iteration 38300, loss = 0.0300381\nI0822 07:41:38.147672 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:41:38.147688 32551 solver.cpp:244]     Train net output #1: loss = 0.0300382 (* 1 = 0.0300382 loss)\nI0822 07:41:38.226471 32551 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0822 07:43:53.883002 32551 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0822 07:45:14.353273 32551 solver.cpp:404]     Test net output #0: accuracy = 0.529\nI0822 07:45:14.353518 32551 solver.cpp:404]     Test net output #1: loss = 2.77491 (* 1 = 2.77491 loss)\nI0822 07:45:15.659792 32551 solver.cpp:228] Iteration 38400, loss = 0.029991\nI0822 07:45:15.659837 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:45:15.659852 32551 solver.cpp:244]     Train net output #1: loss = 0.0299912 (* 1 = 0.0299912 loss)\nI0822 07:45:15.739722 32551 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0822 07:47:31.333057 32551 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0822 07:48:51.800771 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62916\nI0822 07:48:51.801035 32551 solver.cpp:404]     Test net output #1: loss = 1.84277 (* 1 = 1.84277 loss)\nI0822 07:48:53.107985 32551 solver.cpp:228] Iteration 38500, loss = 0.0453375\nI0822 07:48:53.108027 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:48:53.108043 32551 solver.cpp:244]     Train net output #1: loss = 0.0453376 (* 1 = 0.0453376 loss)\nI0822 07:48:53.187829 32551 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0822 07:51:08.810544 32551 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0822 07:52:29.283033 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5454\nI0822 07:52:29.283327 32551 solver.cpp:404]     Test net output #1: loss = 2.64674 (* 1 = 2.64674 loss)\nI0822 07:52:30.590158 32551 solver.cpp:228] Iteration 38600, loss = 0.00613993\nI0822 07:52:30.590201 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:52:30.590217 32551 solver.cpp:244]     Train net output #1: loss = 0.00614003 (* 1 = 0.00614003 loss)\nI0822 07:52:30.670203 32551 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0822 07:54:46.246120 32551 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0822 07:56:06.715237 32551 solver.cpp:404]     Test net output #0: accuracy = 0.68188\nI0822 07:56:06.715517 32551 solver.cpp:404]     Test net output #1: loss = 1.66949 (* 1 = 1.66949 loss)\nI0822 07:56:08.023067 32551 solver.cpp:228] Iteration 38700, loss = 0.0319758\nI0822 07:56:08.023113 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:56:08.023128 32551 solver.cpp:244]     Train net output #1: loss = 0.0319759 (* 1 = 0.0319759 loss)\nI0822 07:56:08.100740 32551 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0822 07:58:23.703086 32551 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0822 07:59:44.159348 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6282\nI0822 07:59:44.159618 32551 solver.cpp:404]     Test net output #1: loss = 1.77514 (* 1 = 1.77514 loss)\nI0822 07:59:45.466534 32551 solver.cpp:228] Iteration 38800, loss = 0.0230179\nI0822 07:59:45.466578 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:59:45.466595 32551 solver.cpp:244]     Train net output #1: loss = 0.023018 (* 1 = 0.023018 loss)\nI0822 07:59:45.542367 32551 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0822 08:02:01.105545 32551 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0822 08:03:21.571395 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5356\nI0822 08:03:21.571697 32551 solver.cpp:404]     Test net output #1: loss = 2.4762 (* 1 = 2.4762 loss)\nI0822 08:03:22.878366 32551 solver.cpp:228] Iteration 38900, loss = 0.039744\nI0822 08:03:22.878410 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:03:22.878427 32551 solver.cpp:244]     Train net output #1: loss = 0.0397441 (* 1 = 0.0397441 loss)\nI0822 08:03:22.952517 32551 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0822 08:05:38.601848 32551 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0822 08:06:59.070734 32551 solver.cpp:404]     Test net output #0: accuracy = 0.47552\nI0822 08:06:59.071034 32551 solver.cpp:404]     Test net output #1: loss = 2.79463 (* 1 = 2.79463 loss)\nI0822 08:07:00.377950 32551 solver.cpp:228] Iteration 39000, loss = 0.0186257\nI0822 08:07:00.377993 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:07:00.378008 32551 solver.cpp:244]     Train net output #1: loss = 0.0186259 (* 1 = 0.0186259 loss)\nI0822 08:07:00.458520 32551 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0822 08:09:16.062593 32551 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0822 08:10:36.505178 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54144\nI0822 08:10:36.505477 32551 solver.cpp:404]     Test net output #1: loss = 2.65675 (* 1 = 2.65675 loss)\nI0822 08:10:37.812063 32551 solver.cpp:228] Iteration 39100, loss = 0.0781546\nI0822 08:10:37.812106 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 08:10:37.812122 32551 solver.cpp:244]     Train net output #1: loss = 0.0781547 (* 1 = 0.0781547 loss)\nI0822 08:10:37.889183 32551 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0822 08:12:53.556299 32551 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0822 08:14:13.907681 32551 solver.cpp:404]     Test net output #0: accuracy = 0.64132\nI0822 08:14:13.907997 32551 solver.cpp:404]     Test net output #1: loss = 1.96741 (* 1 = 1.96741 loss)\nI0822 08:14:15.215183 32551 solver.cpp:228] Iteration 39200, loss = 0.0259936\nI0822 08:14:15.215227 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:14:15.215243 32551 solver.cpp:244]     Train net output #1: loss = 0.0259938 (* 1 = 0.0259938 loss)\nI0822 08:14:15.292043 32551 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0822 08:16:31.068670 32551 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0822 08:17:51.438289 32551 solver.cpp:404]     Test net output #0: accuracy = 0.66784\nI0822 08:17:51.438591 32551 solver.cpp:404]     Test net output #1: loss = 1.76387 (* 1 = 1.76387 loss)\nI0822 08:17:52.745985 32551 solver.cpp:228] Iteration 39300, loss = 0.0109332\nI0822 08:17:52.746028 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:17:52.746044 32551 solver.cpp:244]     Train net output #1: loss = 0.0109333 (* 1 = 0.0109333 loss)\nI0822 08:17:52.823598 32551 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0822 08:20:08.337635 32551 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0822 08:21:28.742905 32551 solver.cpp:404]     Test net output #0: accuracy = 0.67468\nI0822 08:21:28.743222 32551 solver.cpp:404]     Test net output #1: loss = 1.57178 (* 1 = 1.57178 loss)\nI0822 08:21:30.049890 32551 solver.cpp:228] Iteration 39400, loss = 0.0348048\nI0822 08:21:30.049931 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:21:30.049947 32551 solver.cpp:244]     Train net output #1: loss = 0.034805 (* 1 = 0.034805 loss)\nI0822 08:21:30.123000 32551 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0822 08:23:45.727226 32551 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0822 08:25:06.088234 32551 solver.cpp:404]     Test net output #0: accuracy = 0.65632\nI0822 08:25:06.088521 32551 solver.cpp:404]     Test net output #1: loss = 1.89886 (* 1 = 1.89886 loss)\nI0822 08:25:07.395517 32551 solver.cpp:228] Iteration 39500, loss = 0.00356391\nI0822 08:25:07.395557 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:25:07.395573 32551 solver.cpp:244]     Train net output #1: loss = 0.00356407 (* 1 = 0.00356407 loss)\nI0822 08:25:07.473568 32551 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0822 08:27:23.209182 32551 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0822 08:28:43.582975 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57808\nI0822 08:28:43.583272 32551 solver.cpp:404]     Test net output #1: loss = 2.29823 (* 1 = 2.29823 loss)\nI0822 08:28:44.889843 32551 solver.cpp:228] Iteration 39600, loss = 0.0399835\nI0822 08:28:44.889883 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:28:44.889899 32551 solver.cpp:244]     Train net output #1: loss = 0.0399837 (* 1 = 0.0399837 loss)\nI0822 08:28:44.976804 32551 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0822 08:31:00.642995 32551 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0822 08:32:21.008941 32551 solver.cpp:404]     Test net output #0: accuracy = 0.67868\nI0822 08:32:21.009250 32551 solver.cpp:404]     Test net output #1: loss = 1.57909 (* 1 = 1.57909 loss)\nI0822 08:32:22.315729 32551 solver.cpp:228] Iteration 39700, loss = 0.0044428\nI0822 08:32:22.315773 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:32:22.315789 32551 solver.cpp:244]     Train net output #1: loss = 0.00444296 (* 1 = 0.00444296 loss)\nI0822 08:32:22.403595 32551 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0822 08:34:37.952283 32551 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0822 08:35:58.324357 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58928\nI0822 08:35:58.324652 32551 solver.cpp:404]     Test net output #1: loss = 2.27267 (* 1 = 2.27267 loss)\nI0822 08:35:59.631927 32551 solver.cpp:228] Iteration 39800, loss = 0.0120489\nI0822 08:35:59.631965 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:35:59.631980 32551 solver.cpp:244]     Train net output #1: loss = 0.012049 (* 1 = 0.012049 loss)\nI0822 08:35:59.713955 32551 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0822 08:38:15.409279 32551 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0822 08:39:35.808552 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62184\nI0822 08:39:35.808861 32551 solver.cpp:404]     Test net output #1: loss = 2.05023 (* 1 = 2.05023 loss)\nI0822 08:39:37.116485 32551 solver.cpp:228] Iteration 39900, loss = 0.0178272\nI0822 08:39:37.116524 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:39:37.116540 32551 solver.cpp:244]     Train net output #1: loss = 0.0178273 (* 1 = 0.0178273 loss)\nI0822 08:39:37.200507 32551 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0822 08:41:52.834367 32551 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0822 08:43:13.233145 32551 solver.cpp:404]     Test net output #0: accuracy = 0.43064\nI0822 08:43:13.233456 32551 solver.cpp:404]     Test net output #1: loss = 4.09112 (* 1 = 4.09112 loss)\nI0822 08:43:14.539670 32551 solver.cpp:228] Iteration 40000, loss = 0.0306301\nI0822 08:43:14.539714 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:43:14.539731 32551 solver.cpp:244]     Train net output #1: loss = 0.0306303 (* 1 = 0.0306303 loss)\nI0822 08:43:14.624060 32551 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0822 08:45:30.363420 32551 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0822 08:46:50.856320 32551 solver.cpp:404]     Test net output #0: accuracy = 0.68668\nI0822 08:46:50.856642 32551 solver.cpp:404]     Test net output #1: loss = 1.29981 (* 1 = 1.29981 loss)\nI0822 08:46:52.164744 32551 solver.cpp:228] Iteration 40100, loss = 0.0936094\nI0822 08:46:52.164791 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:46:52.164808 32551 solver.cpp:244]     Train net output #1: loss = 0.0936096 (* 1 = 0.0936096 loss)\nI0822 08:46:52.242707 32551 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0822 08:49:08.060428 32551 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0822 08:50:29.422431 32551 solver.cpp:404]     Test net output #0: accuracy = 0.70496\nI0822 08:50:29.422690 32551 solver.cpp:404]     Test net output #1: loss = 1.49521 (* 1 = 1.49521 loss)\nI0822 08:50:30.733655 32551 solver.cpp:228] Iteration 40200, loss = 0.0601382\nI0822 08:50:30.733716 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:50:30.733734 32551 solver.cpp:244]     Train net output #1: loss = 0.0601384 (* 1 = 0.0601384 loss)\nI0822 08:50:30.812258 32551 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0822 08:52:46.989246 32551 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0822 08:54:08.519260 32551 solver.cpp:404]     Test net output #0: accuracy = 0.67196\nI0822 08:54:08.519526 32551 solver.cpp:404]     Test net output #1: loss = 1.71936 (* 1 = 1.71936 loss)\nI0822 08:54:09.829928 32551 solver.cpp:228] Iteration 40300, loss = 0.0311492\nI0822 08:54:09.829991 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:54:09.830018 32551 solver.cpp:244]     Train net output #1: loss = 0.0311493 (* 1 = 0.0311493 loss)\nI0822 08:54:09.906586 32551 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0822 08:56:26.172806 32551 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0822 08:57:47.680438 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61128\nI0822 08:57:47.680681 32551 solver.cpp:404]     Test net output #1: loss = 2.08746 (* 1 = 2.08746 loss)\nI0822 08:57:48.990664 32551 solver.cpp:228] Iteration 40400, loss = 0.00514295\nI0822 08:57:48.990725 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:57:48.990742 32551 solver.cpp:244]     Train net output #1: loss = 0.00514312 (* 1 = 0.00514312 loss)\nI0822 08:57:49.073719 32551 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0822 09:00:05.147992 32551 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0822 09:01:26.669813 32551 solver.cpp:404]     Test net output #0: accuracy = 0.48328\nI0822 09:01:26.670069 32551 solver.cpp:404]     Test net output #1: loss = 2.50125 (* 1 = 2.50125 loss)\nI0822 09:01:27.981775 32551 solver.cpp:228] Iteration 40500, loss = 0.00221366\nI0822 09:01:27.981837 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:01:27.981853 32551 solver.cpp:244]     Train net output #1: loss = 0.00221382 (* 1 = 0.00221382 loss)\nI0822 09:01:28.057773 32551 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0822 09:03:44.306957 32551 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0822 09:05:05.749336 32551 solver.cpp:404]     Test net output #0: accuracy = 0.56072\nI0822 09:05:05.749575 32551 solver.cpp:404]     Test net output #1: loss = 1.80627 (* 1 = 1.80627 loss)\nI0822 09:05:07.060199 32551 solver.cpp:228] Iteration 40600, loss = 0.00537993\nI0822 09:05:07.060261 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:05:07.060278 32551 solver.cpp:244]     Train net output #1: loss = 0.00538008 (* 1 = 0.00538008 loss)\nI0822 09:05:07.135815 32551 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0822 09:07:23.342831 32551 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0822 09:08:44.880894 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63824\nI0822 09:08:44.881136 32551 solver.cpp:404]     Test net output #1: loss = 1.71716 (* 1 = 1.71716 loss)\nI0822 09:08:46.191946 32551 solver.cpp:228] Iteration 40700, loss = 0.0871274\nI0822 09:08:46.192008 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 09:08:46.192025 32551 solver.cpp:244]     Train net output #1: loss = 0.0871276 (* 1 = 0.0871276 loss)\nI0822 09:08:46.273902 32551 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0822 09:11:02.478937 32551 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0822 09:12:23.700064 32551 solver.cpp:404]     Test net output #0: accuracy = 0.65916\nI0822 09:12:23.700351 32551 solver.cpp:404]     Test net output #1: loss = 1.85833 (* 1 = 1.85833 loss)\nI0822 09:12:25.010900 32551 solver.cpp:228] Iteration 40800, loss = 0.0316112\nI0822 09:12:25.010954 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:12:25.010972 32551 solver.cpp:244]     Train net output #1: loss = 0.0316113 (* 1 = 0.0316113 loss)\nI0822 09:12:25.086938 32551 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0822 09:14:41.347331 32551 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0822 09:16:02.801942 32551 solver.cpp:404]     Test net output #0: accuracy = 0.72284\nI0822 09:16:02.802217 32551 solver.cpp:404]     Test net output #1: loss = 1.32512 (* 1 = 1.32512 loss)\nI0822 09:16:04.113028 32551 solver.cpp:228] Iteration 40900, loss = 0.044254\nI0822 09:16:04.113081 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 09:16:04.113098 32551 solver.cpp:244]     Train net output #1: loss = 0.0442541 (* 1 = 0.0442541 loss)\nI0822 09:16:04.188673 32551 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0822 09:18:20.165801 32551 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0822 09:19:41.592950 32551 solver.cpp:404]     Test net output #0: accuracy = 0.70184\nI0822 09:19:41.593200 32551 solver.cpp:404]     Test net output #1: loss = 1.28116 (* 1 = 1.28116 loss)\nI0822 09:19:42.903592 32551 solver.cpp:228] Iteration 41000, loss = 0.00527027\nI0822 09:19:42.903645 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:19:42.903661 32551 solver.cpp:244]     Train net output #1: loss = 0.0052704 (* 1 = 0.0052704 loss)\nI0822 09:19:42.981359 32551 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0822 09:21:59.064846 32551 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0822 09:23:20.525125 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61564\nI0822 09:23:20.525413 32551 solver.cpp:404]     Test net output #1: loss = 1.63719 (* 1 = 1.63719 loss)\nI0822 09:23:21.835276 32551 solver.cpp:228] Iteration 41100, loss = 0.0775941\nI0822 09:23:21.835330 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 09:23:21.835345 32551 solver.cpp:244]     Train net output #1: loss = 0.0775942 (* 1 = 0.0775942 loss)\nI0822 09:23:21.919018 32551 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0822 09:25:37.928647 32551 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0822 09:26:59.413741 32551 solver.cpp:404]     Test net output #0: accuracy = 0.65976\nI0822 09:26:59.414021 32551 solver.cpp:404]     Test net output #1: loss = 1.47475 (* 1 = 1.47475 loss)\nI0822 09:27:00.724292 32551 solver.cpp:228] Iteration 41200, loss = 0.00988566\nI0822 09:27:00.724349 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:27:00.724365 32551 solver.cpp:244]     Train net output #1: loss = 0.00988579 (* 1 = 0.00988579 loss)\nI0822 09:27:00.812942 32551 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0822 09:29:16.769317 32551 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0822 09:30:38.229312 32551 solver.cpp:404]     Test net output #0: accuracy = 0.71384\nI0822 09:30:38.229614 32551 solver.cpp:404]     Test net output #1: loss = 1.28923 (* 1 = 1.28923 loss)\nI0822 09:30:39.539656 32551 solver.cpp:228] Iteration 41300, loss = 0.0326283\nI0822 09:30:39.539713 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 09:30:39.539731 32551 solver.cpp:244]     Train net output #1: loss = 0.0326285 (* 1 = 0.0326285 loss)\nI0822 09:30:39.619441 32551 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0822 09:32:55.661737 32551 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0822 09:34:17.035944 32551 solver.cpp:404]     Test net output #0: accuracy = 0.53284\nI0822 09:34:17.036221 32551 solver.cpp:404]     Test net output #1: loss = 2.24744 (* 1 = 2.24744 loss)\nI0822 09:34:18.346263 32551 solver.cpp:228] Iteration 41400, loss = 0.0171534\nI0822 09:34:18.346318 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:34:18.346334 32551 solver.cpp:244]     Train net output #1: loss = 0.0171535 (* 1 = 0.0171535 loss)\nI0822 09:34:18.424013 32551 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0822 09:36:34.362354 32551 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0822 09:37:55.558562 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62544\nI0822 09:37:55.558835 32551 solver.cpp:404]     Test net output #1: loss = 1.67608 (* 1 = 1.67608 loss)\nI0822 09:37:56.869626 32551 solver.cpp:228] Iteration 41500, loss = 0.0212268\nI0822 09:37:56.869683 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:37:56.869699 32551 solver.cpp:244]     Train net output #1: loss = 0.0212269 (* 1 = 0.0212269 loss)\nI0822 09:37:56.949566 32551 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0822 09:40:12.968767 32551 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0822 09:41:34.518784 32551 solver.cpp:404]     Test net output #0: accuracy = 0.66696\nI0822 09:41:34.519035 32551 solver.cpp:404]     Test net output #1: loss = 1.35799 (* 1 = 1.35799 loss)\nI0822 09:41:35.830318 32551 solver.cpp:228] Iteration 41600, loss = 0.0854322\nI0822 09:41:35.830380 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 09:41:35.830399 32551 solver.cpp:244]     Train net output #1: loss = 0.0854324 (* 1 = 0.0854324 loss)\nI0822 09:41:35.905120 32551 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0822 09:43:51.794665 32551 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0822 09:45:13.312548 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6368\nI0822 09:45:13.312814 32551 solver.cpp:404]     Test net output #1: loss = 1.96694 (* 1 = 1.96694 loss)\nI0822 09:45:14.625125 32551 solver.cpp:228] Iteration 41700, loss = 0.055198\nI0822 09:45:14.625188 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 09:45:14.625207 32551 solver.cpp:244]     Train net output #1: loss = 0.0551981 (* 1 = 0.0551981 loss)\nI0822 09:45:14.706243 32551 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0822 09:47:30.711613 32551 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0822 09:48:52.248133 32551 solver.cpp:404]     Test net output #0: accuracy = 0.56248\nI0822 09:48:52.248411 32551 solver.cpp:404]     Test net output #1: loss = 3.03697 (* 1 = 3.03697 loss)\nI0822 09:48:53.560147 32551 solver.cpp:228] Iteration 41800, loss = 0.0269324\nI0822 09:48:53.560206 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:48:53.560225 32551 solver.cpp:244]     Train net output #1: loss = 0.0269325 (* 1 = 0.0269325 loss)\nI0822 09:48:53.643088 32551 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0822 09:51:09.743890 32551 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0822 09:52:31.165390 32551 solver.cpp:404]     Test net output #0: accuracy = 0.53836\nI0822 09:52:31.165702 32551 solver.cpp:404]     Test net output #1: loss = 2.99592 (* 1 = 2.99592 loss)\nI0822 09:52:32.476897 32551 solver.cpp:228] Iteration 41900, loss = 0.0208112\nI0822 09:52:32.476958 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:52:32.476975 32551 solver.cpp:244]     Train net output #1: loss = 0.0208113 (* 1 = 0.0208113 loss)\nI0822 09:52:32.558691 32551 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0822 09:54:48.530575 32551 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0822 09:56:09.596696 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6256\nI0822 09:56:09.596948 32551 solver.cpp:404]     Test net output #1: loss = 1.90014 (* 1 = 1.90014 loss)\nI0822 09:56:10.908855 32551 solver.cpp:228] Iteration 42000, loss = 0.052453\nI0822 09:56:10.908922 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 09:56:10.908941 32551 solver.cpp:244]     Train net output #1: loss = 0.0524531 (* 1 = 0.0524531 loss)\nI0822 09:56:10.988024 32551 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0822 09:58:27.086220 32551 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0822 09:59:48.542183 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61624\nI0822 09:59:48.542438 32551 solver.cpp:404]     Test net output #1: loss = 1.89497 (* 1 = 1.89497 loss)\nI0822 09:59:49.853842 32551 solver.cpp:228] Iteration 42100, loss = 0.00735025\nI0822 09:59:49.853907 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:59:49.853925 32551 solver.cpp:244]     Train net output #1: loss = 0.00735039 (* 1 = 0.00735039 loss)\nI0822 09:59:49.930037 32551 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0822 10:02:05.979475 32551 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0822 10:03:27.320505 32551 solver.cpp:404]     Test net output #0: accuracy = 0.75636\nI0822 10:03:27.320780 32551 solver.cpp:404]     Test net output #1: loss = 1.23497 (* 1 = 1.23497 loss)\nI0822 10:03:28.632246 32551 solver.cpp:228] Iteration 42200, loss = 0.00754261\nI0822 10:03:28.632306 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:03:28.632323 32551 solver.cpp:244]     Train net output #1: loss = 0.00754276 (* 1 = 0.00754276 loss)\nI0822 10:03:28.704337 32551 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0822 10:05:44.611377 32551 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0822 10:07:05.836344 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58072\nI0822 10:07:05.836596 32551 solver.cpp:404]     Test net output #1: loss = 2.37101 (* 1 = 2.37101 loss)\nI0822 10:07:07.148020 32551 solver.cpp:228] Iteration 42300, loss = 0.0206568\nI0822 10:07:07.148080 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:07:07.148098 32551 solver.cpp:244]     Train net output #1: loss = 0.0206569 (* 1 = 0.0206569 loss)\nI0822 10:07:07.226001 32551 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0822 10:09:23.193495 32551 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0822 10:10:44.549090 32551 solver.cpp:404]     Test net output #0: accuracy = 0.47336\nI0822 10:10:44.549362 32551 solver.cpp:404]     Test net output #1: loss = 3.44198 (* 1 = 3.44198 loss)\nI0822 10:10:45.860779 32551 solver.cpp:228] Iteration 42400, loss = 0.0362028\nI0822 10:10:45.860839 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:10:45.860857 32551 solver.cpp:244]     Train net output #1: loss = 0.0362029 (* 1 = 0.0362029 loss)\nI0822 10:10:45.941045 32551 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0822 10:13:01.932041 32551 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0822 10:14:23.335206 32551 solver.cpp:404]     Test net output #0: accuracy = 0.63684\nI0822 10:14:23.335472 32551 solver.cpp:404]     Test net output #1: loss = 1.89648 (* 1 = 1.89648 loss)\nI0822 10:14:24.646667 32551 solver.cpp:228] Iteration 42500, loss = 0.0304639\nI0822 10:14:24.646724 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:14:24.646742 32551 solver.cpp:244]     Train net output #1: loss = 0.030464 (* 1 = 0.030464 loss)\nI0822 10:14:24.723429 32551 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0822 10:16:40.692107 32551 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0822 10:18:02.054234 32551 solver.cpp:404]     Test net output #0: accuracy = 0.70972\nI0822 10:18:02.054491 32551 solver.cpp:404]     Test net output #1: loss = 1.41081 (* 1 = 1.41081 loss)\nI0822 10:18:03.364157 32551 solver.cpp:228] Iteration 42600, loss = 0.0721764\nI0822 10:18:03.364215 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:18:03.364233 32551 solver.cpp:244]     Train net output #1: loss = 0.0721765 (* 1 = 0.0721765 loss)\nI0822 10:18:03.445302 32551 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0822 10:20:19.503702 32551 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0822 10:21:40.805554 32551 solver.cpp:404]     Test net output #0: accuracy = 0.65084\nI0822 10:21:40.805785 32551 solver.cpp:404]     Test net output #1: loss = 1.72871 (* 1 = 1.72871 loss)\nI0822 10:21:42.115610 32551 solver.cpp:228] Iteration 42700, loss = 0.0151359\nI0822 10:21:42.115667 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:21:42.115685 32551 solver.cpp:244]     Train net output #1: loss = 0.0151361 (* 1 = 0.0151361 loss)\nI0822 10:21:42.195569 32551 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0822 10:23:58.208892 32551 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0822 10:25:19.370965 32551 solver.cpp:404]     Test net output #0: accuracy = 0.67772\nI0822 10:25:19.371206 32551 solver.cpp:404]     Test net output #1: loss = 1.75756 (* 1 = 1.75756 loss)\nI0822 10:25:20.681674 32551 solver.cpp:228] Iteration 42800, loss = 0.0135745\nI0822 10:25:20.681737 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:25:20.681756 32551 solver.cpp:244]     Train net output #1: loss = 0.0135746 (* 1 = 0.0135746 loss)\nI0822 10:25:20.760745 32551 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0822 10:27:36.732873 32551 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0822 10:28:58.043107 32551 solver.cpp:404]     Test net output #0: accuracy = 0.714\nI0822 10:28:58.043347 32551 solver.cpp:404]     Test net output #1: loss = 1.44553 (* 1 = 1.44553 loss)\nI0822 10:28:59.353516 32551 solver.cpp:228] Iteration 42900, loss = 0.00277367\nI0822 10:28:59.353579 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:28:59.353596 32551 solver.cpp:244]     Train net output #1: loss = 0.00277382 (* 1 = 0.00277382 loss)\nI0822 10:28:59.435528 32551 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0822 10:31:15.238108 32551 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0822 10:32:36.579313 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54232\nI0822 10:32:36.579593 32551 solver.cpp:404]     Test net output #1: loss = 2.71776 (* 1 = 2.71776 loss)\nI0822 10:32:37.887225 32551 solver.cpp:228] Iteration 43000, loss = 0.034866\nI0822 10:32:37.887286 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:32:37.887305 32551 solver.cpp:244]     Train net output #1: loss = 0.0348662 (* 1 = 0.0348662 loss)\nI0822 10:32:37.964809 32551 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0822 10:34:53.675936 32551 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0822 10:36:15.164176 32551 solver.cpp:404]     Test net output #0: accuracy = 0.46644\nI0822 10:36:15.164446 32551 solver.cpp:404]     Test net output #1: loss = 3.24471 (* 1 = 3.24471 loss)\nI0822 10:36:16.472407 32551 solver.cpp:228] Iteration 43100, loss = 0.0126451\nI0822 10:36:16.472468 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:36:16.472486 32551 solver.cpp:244]     Train net output #1: loss = 0.0126453 (* 1 = 0.0126453 loss)\nI0822 10:36:16.549764 32551 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0822 10:38:32.411123 32551 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0822 10:39:53.803056 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62212\nI0822 10:39:53.803316 32551 solver.cpp:404]     Test net output #1: loss = 1.79005 (* 1 = 1.79005 loss)\nI0822 10:39:55.111109 32551 solver.cpp:228] Iteration 43200, loss = 0.0122639\nI0822 10:39:55.111169 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:39:55.111187 32551 solver.cpp:244]     Train net output #1: loss = 0.0122641 (* 1 = 0.0122641 loss)\nI0822 10:39:55.186553 32551 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0822 10:42:10.875996 32551 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0822 10:43:32.417620 32551 solver.cpp:404]     Test net output #0: accuracy = 0.71008\nI0822 10:43:32.417865 32551 solver.cpp:404]     Test net output #1: loss = 1.43619 (* 1 = 1.43619 loss)\nI0822 10:43:33.726119 32551 solver.cpp:228] Iteration 43300, loss = 0.0381665\nI0822 10:43:33.726177 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:43:33.726196 32551 solver.cpp:244]     Train net output #1: loss = 0.0381667 (* 1 = 0.0381667 loss)\nI0822 10:43:33.807126 32551 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0822 10:45:49.788924 32551 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0822 10:47:11.347039 32551 solver.cpp:404]     Test net output #0: accuracy = 0.64504\nI0822 10:47:11.347378 32551 solver.cpp:404]     Test net output #1: loss = 1.9185 (* 1 = 1.9185 loss)\nI0822 10:47:12.655519 32551 solver.cpp:228] Iteration 43400, loss = 0.0280826\nI0822 10:47:12.655583 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:47:12.655608 32551 solver.cpp:244]     Train net output #1: loss = 0.0280827 (* 1 = 0.0280827 loss)\nI0822 10:47:12.736379 32551 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0822 10:49:28.412852 32551 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0822 10:50:49.989253 32551 solver.cpp:404]     Test net output #0: accuracy = 0.64564\nI0822 10:50:49.989578 32551 solver.cpp:404]     Test net output #1: loss = 2.07655 (* 1 = 2.07655 loss)\nI0822 10:50:51.298758 32551 solver.cpp:228] Iteration 43500, loss = 0.0202029\nI0822 10:50:51.298823 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:50:51.298849 32551 solver.cpp:244]     Train net output #1: loss = 0.0202031 (* 1 = 0.0202031 loss)\nI0822 10:50:51.371798 32551 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0822 10:53:07.221057 32551 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0822 10:54:28.781934 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6444\nI0822 10:54:28.782258 32551 solver.cpp:404]     Test net output #1: loss = 1.60218 (* 1 = 1.60218 loss)\nI0822 10:54:30.091467 32551 solver.cpp:228] Iteration 43600, loss = 0.0215114\nI0822 10:54:30.091533 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:54:30.091558 32551 solver.cpp:244]     Train net output #1: loss = 0.0215116 (* 1 = 0.0215116 loss)\nI0822 10:54:30.175045 32551 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0822 10:56:45.971379 32551 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0822 10:58:07.531220 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61848\nI0822 10:58:07.531527 32551 solver.cpp:404]     Test net output #1: loss = 2.26087 (* 1 = 2.26087 loss)\nI0822 10:58:08.839262 32551 solver.cpp:228] Iteration 43700, loss = 0.0367452\nI0822 10:58:08.839323 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:58:08.839340 32551 solver.cpp:244]     Train net output #1: loss = 0.0367453 (* 1 = 0.0367453 loss)\nI0822 10:58:08.922631 32551 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0822 11:00:24.568063 32551 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0822 11:01:45.035840 32551 solver.cpp:404]     Test net output #0: accuracy = 0.74312\nI0822 11:01:45.036144 32551 solver.cpp:404]     Test net output #1: loss = 1.18562 (* 1 = 1.18562 loss)\nI0822 11:01:46.340577 32551 solver.cpp:228] Iteration 43800, loss = 0.00633695\nI0822 11:01:46.340626 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:01:46.340651 32551 solver.cpp:244]     Train net output #1: loss = 0.00633708 (* 1 = 0.00633708 loss)\nI0822 11:01:46.421887 32551 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0822 11:04:01.874544 32551 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0822 11:05:22.366900 32551 solver.cpp:404]     Test net output #0: accuracy = 0.76404\nI0822 11:05:22.367163 32551 solver.cpp:404]     Test net output #1: loss = 1.114 (* 1 = 1.114 loss)\nI0822 11:05:23.671687 32551 solver.cpp:228] Iteration 43900, loss = 0.00483574\nI0822 11:05:23.671735 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:05:23.671759 32551 solver.cpp:244]     Train net output #1: loss = 0.00483587 (* 1 = 0.00483587 loss)\nI0822 11:05:23.756480 32551 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0822 11:07:39.377727 32551 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0822 11:08:59.876847 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54824\nI0822 11:08:59.877157 32551 solver.cpp:404]     Test net output #1: loss = 2.71615 (* 1 = 2.71615 loss)\nI0822 11:09:01.181531 32551 solver.cpp:228] Iteration 44000, loss = 0.0133586\nI0822 11:09:01.181576 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:09:01.181599 32551 solver.cpp:244]     Train net output #1: loss = 0.0133587 (* 1 = 0.0133587 loss)\nI0822 11:09:01.265859 32551 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0822 11:11:17.028234 32551 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0822 11:12:37.508393 32551 solver.cpp:404]     Test net output #0: accuracy = 0.46128\nI0822 11:12:37.508852 32551 solver.cpp:404]     Test net output #1: loss = 3.14771 (* 1 = 3.14771 loss)\nI0822 11:12:38.813377 32551 solver.cpp:228] Iteration 44100, loss = 0.0441085\nI0822 11:12:38.813421 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:12:38.813436 32551 solver.cpp:244]     Train net output #1: loss = 0.0441086 (* 1 = 0.0441086 loss)\nI0822 11:12:38.900840 32551 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0822 11:14:54.381090 32551 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0822 11:16:14.855736 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57968\nI0822 11:16:14.856037 32551 solver.cpp:404]     Test net output #1: loss = 2.69473 (* 1 = 2.69473 loss)\nI0822 11:16:16.160274 32551 solver.cpp:228] Iteration 44200, loss = 0.105517\nI0822 11:16:16.160318 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 11:16:16.160334 32551 solver.cpp:244]     Train net output #1: loss = 0.105517 (* 1 = 0.105517 loss)\nI0822 11:16:16.242097 32551 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0822 11:18:31.977175 32551 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0822 11:19:52.465656 32551 solver.cpp:404]     Test net output #0: accuracy = 0.70404\nI0822 11:19:52.465925 32551 solver.cpp:404]     Test net output #1: loss = 1.43419 (* 1 = 1.43419 loss)\nI0822 11:19:53.770633 32551 solver.cpp:228] Iteration 44300, loss = 0.0238237\nI0822 11:19:53.770680 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:19:53.770706 32551 solver.cpp:244]     Train net output #1: loss = 0.0238239 (* 1 = 0.0238239 loss)\nI0822 11:19:53.850250 32551 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0822 11:22:09.671329 32551 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0822 11:23:30.156023 32551 solver.cpp:404]     Test net output #0: accuracy = 0.736\nI0822 11:23:30.156294 32551 solver.cpp:404]     Test net output #1: loss = 1.25829 (* 1 = 1.25829 loss)\nI0822 11:23:31.461108 32551 solver.cpp:228] Iteration 44400, loss = 0.0109371\nI0822 11:23:31.461158 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:23:31.461182 32551 solver.cpp:244]     Train net output #1: loss = 0.0109373 (* 1 = 0.0109373 loss)\nI0822 11:23:31.545518 32551 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0822 11:25:47.475407 32551 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0822 11:27:07.951055 32551 solver.cpp:404]     Test net output #0: accuracy = 0.68292\nI0822 11:27:07.951309 32551 solver.cpp:404]     Test net output #1: loss = 1.83854 (* 1 = 1.83854 loss)\nI0822 11:27:09.255667 32551 solver.cpp:228] Iteration 44500, loss = 0.00354448\nI0822 11:27:09.255717 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:27:09.255740 32551 solver.cpp:244]     Train net output #1: loss = 0.00354461 (* 1 = 0.00354461 loss)\nI0822 11:27:09.337491 32551 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0822 11:29:25.091321 32551 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0822 11:30:45.580092 32551 solver.cpp:404]     Test net output #0: accuracy = 0.73272\nI0822 11:30:45.580349 32551 solver.cpp:404]     Test net output #1: loss = 1.35923 (* 1 = 1.35923 loss)\nI0822 11:30:46.884733 32551 solver.cpp:228] Iteration 44600, loss = 0.0177569\nI0822 11:30:46.884781 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:30:46.884805 32551 solver.cpp:244]     Train net output #1: loss = 0.017757 (* 1 = 0.017757 loss)\nI0822 11:30:46.964370 32551 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0822 11:33:02.601805 32551 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0822 11:34:23.083443 32551 solver.cpp:404]     Test net output #0: accuracy = 0.67744\nI0822 11:34:23.083730 32551 solver.cpp:404]     Test net output #1: loss = 1.69005 (* 1 = 1.69005 loss)\nI0822 11:34:24.389744 32551 solver.cpp:228] Iteration 44700, loss = 0.0143454\nI0822 11:34:24.389792 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:34:24.389814 32551 solver.cpp:244]     Train net output #1: loss = 0.0143456 (* 1 = 0.0143456 loss)\nI0822 11:34:24.470021 32551 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0822 11:36:40.168980 32551 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0822 11:38:00.649147 32551 solver.cpp:404]     Test net output #0: accuracy = 0.45648\nI0822 11:38:00.649400 32551 solver.cpp:404]     Test net output #1: loss = 3.1676 (* 1 = 3.1676 loss)\nI0822 11:38:01.952447 32551 solver.cpp:228] Iteration 44800, loss = 0.0281157\nI0822 11:38:01.952494 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:38:01.952510 32551 solver.cpp:244]     Train net output #1: loss = 0.0281159 (* 1 = 0.0281159 loss)\nI0822 11:38:02.036005 32551 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0822 11:40:17.450408 32551 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0822 11:41:37.922051 32551 solver.cpp:404]     Test net output #0: accuracy = 0.69208\nI0822 11:41:37.922274 32551 solver.cpp:404]     Test net output #1: loss = 1.51609 (* 1 = 1.51609 loss)\nI0822 11:41:39.226258 32551 solver.cpp:228] Iteration 44900, loss = 0.0738184\nI0822 11:41:39.226302 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 11:41:39.226318 32551 solver.cpp:244]     Train net output #1: loss = 0.0738185 (* 1 = 0.0738185 loss)\nI0822 11:41:39.305361 32551 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0822 11:43:54.703809 32551 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0822 11:45:15.186218 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62872\nI0822 11:45:15.186461 32551 solver.cpp:404]     Test net output #1: loss = 2.34721 (* 1 = 2.34721 loss)\nI0822 11:45:16.490017 32551 solver.cpp:228] Iteration 45000, loss = 0.0417889\nI0822 11:45:16.490063 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 11:45:16.490080 32551 solver.cpp:244]     Train net output #1: loss = 0.0417891 (* 1 = 0.0417891 loss)\nI0822 11:45:16.569757 32551 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0822 11:47:32.030792 32551 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0822 11:48:52.511023 32551 solver.cpp:404]     Test net output #0: accuracy = 0.72768\nI0822 11:48:52.511310 32551 solver.cpp:404]     Test net output #1: loss = 1.34109 (* 1 = 1.34109 loss)\nI0822 11:48:53.815049 32551 solver.cpp:228] Iteration 45100, loss = 0.0178007\nI0822 11:48:53.815093 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:48:53.815109 32551 solver.cpp:244]     Train net output #1: loss = 0.0178009 (* 1 = 0.0178009 loss)\nI0822 11:48:53.900360 32551 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0822 11:51:09.306386 32551 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0822 11:52:29.783020 32551 solver.cpp:404]     Test net output #0: accuracy = 0.68608\nI0822 11:52:29.783272 32551 solver.cpp:404]     Test net output #1: loss = 1.75446 (* 1 = 1.75446 loss)\nI0822 11:52:31.087339 32551 solver.cpp:228] Iteration 45200, loss = 0.00331283\nI0822 11:52:31.087386 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:52:31.087401 32551 solver.cpp:244]     Train net output #1: loss = 0.00331298 (* 1 = 0.00331298 loss)\nI0822 11:52:31.172128 32551 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0822 11:54:46.981896 32551 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0822 11:56:07.454674 32551 solver.cpp:404]     Test net output #0: accuracy = 0.71236\nI0822 11:56:07.454965 32551 solver.cpp:404]     Test net output #1: loss = 1.26007 (* 1 = 1.26007 loss)\nI0822 11:56:08.758787 32551 solver.cpp:228] Iteration 45300, loss = 0.00104594\nI0822 11:56:08.758838 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:56:08.758854 32551 solver.cpp:244]     Train net output #1: loss = 0.0010461 (* 1 = 0.0010461 loss)\nI0822 11:56:08.836285 32551 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0822 11:58:24.182664 32551 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0822 11:59:44.653717 32551 solver.cpp:404]     Test net output #0: accuracy = 0.37388\nI0822 11:59:44.653978 32551 solver.cpp:404]     Test net output #1: loss = 3.27225 (* 1 = 3.27225 loss)\nI0822 11:59:45.957798 32551 solver.cpp:228] Iteration 45400, loss = 0.000276783\nI0822 11:59:45.957844 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:59:45.957860 32551 solver.cpp:244]     Train net output #1: loss = 0.00027694 (* 1 = 0.00027694 loss)\nI0822 11:59:46.042488 32551 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0822 12:02:01.491255 32551 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0822 12:03:21.992869 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12876\nI0822 12:03:21.993162 32551 solver.cpp:404]     Test net output #1: loss = 5.92702 (* 1 = 5.92702 loss)\nI0822 12:03:23.297453 32551 solver.cpp:228] Iteration 45500, loss = 0.000192898\nI0822 12:03:23.297497 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:03:23.297513 32551 solver.cpp:244]     Train net output #1: loss = 0.000193055 (* 1 = 0.000193055 loss)\nI0822 12:03:23.390763 32551 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0822 12:05:39.235949 32551 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0822 12:06:59.723866 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10096\nI0822 12:06:59.724117 32551 solver.cpp:404]     Test net output #1: loss = 7.15415 (* 1 = 7.15415 loss)\nI0822 12:07:01.028825 32551 solver.cpp:228] Iteration 45600, loss = 0.000196415\nI0822 12:07:01.028872 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:07:01.028887 32551 solver.cpp:244]     Train net output #1: loss = 0.000196571 (* 1 = 0.000196571 loss)\nI0822 12:07:01.114641 32551 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0822 12:09:17.034487 32551 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0822 12:10:37.528975 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 12:10:37.529222 32551 solver.cpp:404]     Test net output #1: loss = 6.7308 (* 1 = 6.7308 loss)\nI0822 12:10:38.832785 32551 solver.cpp:228] Iteration 45700, loss = 0.00023273\nI0822 12:10:38.832830 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:10:38.832846 32551 solver.cpp:244]     Train net output #1: loss = 0.000232887 (* 1 = 0.000232887 loss)\nI0822 12:10:38.915570 32551 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0822 12:12:54.391491 32551 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0822 12:14:14.898596 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 12:14:14.898864 32551 solver.cpp:404]     Test net output #1: loss = 6.01124 (* 1 = 6.01124 loss)\nI0822 12:14:16.203883 32551 solver.cpp:228] Iteration 45800, loss = 0.000300988\nI0822 12:14:16.203932 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:14:16.203948 32551 solver.cpp:244]     Train net output #1: loss = 0.000301145 (* 1 = 0.000301145 loss)\nI0822 12:14:16.286149 32551 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0822 12:16:32.228327 32551 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0822 12:17:52.733796 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 12:17:52.734113 32551 solver.cpp:404]     Test net output #1: loss = 5.39325 (* 1 = 5.39325 loss)\nI0822 12:17:54.037767 32551 solver.cpp:228] Iteration 45900, loss = 0.000296848\nI0822 12:17:54.037816 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:17:54.037832 32551 solver.cpp:244]     Train net output #1: loss = 0.000297005 (* 1 = 0.000297005 loss)\nI0822 12:17:54.117530 32551 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0822 12:20:09.525315 32551 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0822 12:21:29.378901 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 12:21:29.379215 32551 solver.cpp:404]     Test net output #1: loss = 4.94632 (* 1 = 4.94632 loss)\nI0822 12:21:30.669842 32551 solver.cpp:228] Iteration 46000, loss = 0.000349117\nI0822 12:21:30.669883 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:21:30.669898 32551 solver.cpp:244]     Train net output #1: loss = 0.000349274 (* 1 = 0.000349274 loss)\nI0822 12:21:30.768285 32551 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0822 12:23:46.369300 32551 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0822 12:25:06.104270 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 12:25:06.104573 32551 solver.cpp:404]     Test net output #1: loss = 4.58749 (* 1 = 4.58749 loss)\nI0822 12:25:07.398111 32551 solver.cpp:228] Iteration 46100, loss = 0.000327112\nI0822 12:25:07.398154 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:25:07.398169 32551 solver.cpp:244]     Train net output #1: loss = 0.000327268 (* 1 = 0.000327268 loss)\nI0822 12:25:07.494671 32551 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0822 12:27:23.123419 32551 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0822 12:28:42.860960 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 12:28:42.861266 32551 solver.cpp:404]     Test net output #1: loss = 4.30086 (* 1 = 4.30086 loss)\nI0822 12:28:44.154569 32551 solver.cpp:228] Iteration 46200, loss = 0.000375489\nI0822 12:28:44.154613 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:28:44.154629 32551 solver.cpp:244]     Train net output #1: loss = 0.000375645 (* 1 = 0.000375645 loss)\nI0822 12:28:44.249016 32551 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0822 12:30:59.961518 32551 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0822 12:32:19.703954 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 12:32:19.704262 32551 solver.cpp:404]     Test net output #1: loss = 4.04361 (* 1 = 4.04361 loss)\nI0822 12:32:20.996141 32551 solver.cpp:228] Iteration 46300, loss = 0.00034343\nI0822 12:32:20.996189 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:32:20.996206 32551 solver.cpp:244]     Train net output #1: loss = 0.000343587 (* 1 = 0.000343587 loss)\nI0822 12:32:21.094696 32551 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0822 12:34:36.767190 32551 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0822 12:35:56.503320 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 12:35:56.503631 32551 solver.cpp:404]     Test net output #1: loss = 3.8112 (* 1 = 3.8112 loss)\nI0822 12:35:57.795842 32551 solver.cpp:228] Iteration 46400, loss = 0.000335409\nI0822 12:35:57.795888 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:35:57.795904 32551 solver.cpp:244]     Train net output #1: loss = 0.000335565 (* 1 = 0.000335565 loss)\nI0822 12:35:57.892729 32551 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0822 12:38:13.797456 32551 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0822 12:39:33.531391 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 12:39:33.531690 32551 solver.cpp:404]     Test net output #1: loss = 3.64388 (* 1 = 3.64388 loss)\nI0822 12:39:34.824105 32551 solver.cpp:228] Iteration 46500, loss = 0.000316955\nI0822 12:39:34.824151 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:39:34.824167 32551 solver.cpp:244]     Train net output #1: loss = 0.000317112 (* 1 = 0.000317112 loss)\nI0822 12:39:34.917882 32551 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0822 12:41:50.598929 32551 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0822 12:43:10.335106 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 12:43:10.335433 32551 solver.cpp:404]     Test net output #1: loss = 3.49062 (* 1 = 3.49062 loss)\nI0822 12:43:11.627876 32551 solver.cpp:228] Iteration 46600, loss = 0.000375338\nI0822 12:43:11.627921 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:43:11.627938 32551 solver.cpp:244]     Train net output #1: loss = 0.000375495 (* 1 = 0.000375495 loss)\nI0822 12:43:11.724581 32551 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0822 12:45:27.464588 32551 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0822 12:46:47.135077 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 12:46:47.135396 32551 solver.cpp:404]     Test net output #1: loss = 3.38461 (* 1 = 3.38461 loss)\nI0822 12:46:48.427666 32551 solver.cpp:228] Iteration 46700, loss = 0.000376687\nI0822 12:46:48.427711 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:46:48.427728 32551 solver.cpp:244]     Train net output #1: loss = 0.000376843 (* 1 = 0.000376843 loss)\nI0822 12:46:48.525252 32551 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0822 12:49:04.223587 32551 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0822 12:50:23.897408 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 12:50:23.897716 32551 solver.cpp:404]     Test net output #1: loss = 3.25398 (* 1 = 3.25398 loss)\nI0822 12:50:25.190201 32551 solver.cpp:228] Iteration 46800, loss = 0.000314587\nI0822 12:50:25.190246 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:50:25.190263 32551 solver.cpp:244]     Train net output #1: loss = 0.000314744 (* 1 = 0.000314744 loss)\nI0822 12:50:25.286612 32551 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0822 12:52:41.007339 32551 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0822 12:54:00.674072 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 12:54:00.674383 32551 solver.cpp:404]     Test net output #1: loss = 3.12217 (* 1 = 3.12217 loss)\nI0822 12:54:01.966969 32551 solver.cpp:228] Iteration 46900, loss = 0.0003166\nI0822 12:54:01.967012 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:54:01.967028 32551 solver.cpp:244]     Train net output #1: loss = 0.000316757 (* 1 = 0.000316757 loss)\nI0822 12:54:02.059301 32551 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0822 12:56:17.854681 32551 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0822 12:57:37.519917 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 12:57:37.520182 32551 solver.cpp:404]     Test net output #1: loss = 3.02575 (* 1 = 3.02575 loss)\nI0822 12:57:38.812770 32551 solver.cpp:228] Iteration 47000, loss = 0.00031913\nI0822 12:57:38.812816 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:57:38.812832 32551 solver.cpp:244]     Train net output #1: loss = 0.000319286 (* 1 = 0.000319286 loss)\nI0822 12:57:38.907951 32551 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0822 12:59:54.662853 32551 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0822 13:01:14.325920 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09972\nI0822 13:01:14.326259 32551 solver.cpp:404]     Test net output #1: loss = 2.93507 (* 1 = 2.93507 loss)\nI0822 13:01:15.619581 32551 solver.cpp:228] Iteration 47100, loss = 0.000395624\nI0822 13:01:15.619626 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:01:15.619642 32551 solver.cpp:244]     Train net output #1: loss = 0.00039578 (* 1 = 0.00039578 loss)\nI0822 13:01:15.711722 32551 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0822 13:03:31.309650 32551 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0822 13:04:50.971817 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0822 13:04:50.972110 32551 solver.cpp:404]     Test net output #1: loss = 2.84339 (* 1 = 2.84339 loss)\nI0822 13:04:52.264353 32551 solver.cpp:228] Iteration 47200, loss = 0.000296385\nI0822 13:04:52.264396 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:04:52.264413 32551 solver.cpp:244]     Train net output #1: loss = 0.000296541 (* 1 = 0.000296541 loss)\nI0822 13:04:52.363528 32551 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0822 13:07:07.946069 32551 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0822 13:08:27.616631 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 13:08:27.616922 32551 solver.cpp:404]     Test net output #1: loss = 2.77752 (* 1 = 2.77752 loss)\nI0822 13:08:28.909611 32551 solver.cpp:228] Iteration 47300, loss = 0.000315251\nI0822 13:08:28.909653 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:08:28.909669 32551 solver.cpp:244]     Train net output #1: loss = 0.000315408 (* 1 = 0.000315408 loss)\nI0822 13:08:29.004350 32551 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0822 13:10:44.567808 32551 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0822 13:12:04.255753 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10024\nI0822 13:12:04.256049 32551 solver.cpp:404]     Test net output #1: loss = 2.71877 (* 1 = 2.71877 loss)\nI0822 13:12:05.548398 32551 solver.cpp:228] Iteration 47400, loss = 0.000271603\nI0822 13:12:05.548442 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:12:05.548466 32551 solver.cpp:244]     Train net output #1: loss = 0.00027176 (* 1 = 0.00027176 loss)\nI0822 13:12:05.648555 32551 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0822 13:14:21.197623 32551 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0822 13:15:40.870049 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09952\nI0822 13:15:40.870309 32551 solver.cpp:404]     Test net output #1: loss = 2.6653 (* 1 = 2.6653 loss)\nI0822 13:15:42.163803 32551 solver.cpp:228] Iteration 47500, loss = 0.000264009\nI0822 13:15:42.163848 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:15:42.163872 32551 solver.cpp:244]     Train net output #1: loss = 0.000264166 (* 1 = 0.000264166 loss)\nI0822 13:15:42.258390 32551 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0822 13:17:57.917191 32551 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0822 13:19:17.660082 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10036\nI0822 13:19:17.660331 32551 solver.cpp:404]     Test net output #1: loss = 2.61881 (* 1 = 2.61881 loss)\nI0822 13:19:18.952811 32551 solver.cpp:228] Iteration 47600, loss = 0.000274728\nI0822 13:19:18.952858 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:19:18.952883 32551 solver.cpp:244]     Train net output #1: loss = 0.000274885 (* 1 = 0.000274885 loss)\nI0822 13:19:19.049183 32551 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0822 13:21:34.556444 32551 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0822 13:22:54.304785 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0996\nI0822 13:22:54.305069 32551 solver.cpp:404]     Test net output #1: loss = 2.58579 (* 1 = 2.58579 loss)\nI0822 13:22:55.598661 32551 solver.cpp:228] Iteration 47700, loss = 0.000281381\nI0822 13:22:55.598714 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:22:55.598739 32551 solver.cpp:244]     Train net output #1: loss = 0.000281538 (* 1 = 0.000281538 loss)\nI0822 13:22:55.694608 32551 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0822 13:25:11.250917 32551 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0822 13:26:30.969244 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10232\nI0822 13:26:30.969537 32551 solver.cpp:404]     Test net output #1: loss = 2.53886 (* 1 = 2.53886 loss)\nI0822 13:26:32.262153 32551 solver.cpp:228] Iteration 47800, loss = 0.000260963\nI0822 13:26:32.262195 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:26:32.262212 32551 solver.cpp:244]     Train net output #1: loss = 0.000261119 (* 1 = 0.000261119 loss)\nI0822 13:26:32.356210 32551 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0822 13:28:48.041775 32551 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0822 13:30:07.753535 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1056\nI0822 13:30:07.753785 32551 solver.cpp:404]     Test net output #1: loss = 2.50569 (* 1 = 2.50569 loss)\nI0822 13:30:09.046149 32551 solver.cpp:228] Iteration 47900, loss = 0.000262228\nI0822 13:30:09.046190 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:30:09.046205 32551 solver.cpp:244]     Train net output #1: loss = 0.000262385 (* 1 = 0.000262385 loss)\nI0822 13:30:09.143940 32551 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0822 13:32:24.673521 32551 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0822 13:33:44.390635 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10844\nI0822 13:33:44.390882 32551 solver.cpp:404]     Test net output #1: loss = 2.47499 (* 1 = 2.47499 loss)\nI0822 13:33:45.684128 32551 solver.cpp:228] Iteration 48000, loss = 0.000251119\nI0822 13:33:45.684171 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:33:45.684188 32551 solver.cpp:244]     Train net output #1: loss = 0.000251276 (* 1 = 0.000251276 loss)\nI0822 13:33:45.774036 32551 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0822 13:36:01.390658 32551 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0822 13:37:21.114125 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10928\nI0822 13:37:21.114382 32551 solver.cpp:404]     Test net output #1: loss = 2.45556 (* 1 = 2.45556 loss)\nI0822 13:37:22.406785 32551 solver.cpp:228] Iteration 48100, loss = 0.000253229\nI0822 13:37:22.406833 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:37:22.406857 32551 solver.cpp:244]     Train net output #1: loss = 0.000253385 (* 1 = 0.000253385 loss)\nI0822 13:37:22.500035 32551 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0822 13:39:38.089634 32551 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0822 13:40:57.797343 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1102\nI0822 13:40:57.797608 32551 solver.cpp:404]     Test net output #1: loss = 2.4336 (* 1 = 2.4336 loss)\nI0822 13:40:59.089871 32551 solver.cpp:228] Iteration 48200, loss = 0.000250833\nI0822 13:40:59.089911 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:40:59.089927 32551 solver.cpp:244]     Train net output #1: loss = 0.00025099 (* 1 = 0.00025099 loss)\nI0822 13:40:59.184226 32551 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0822 13:43:14.724180 32551 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0822 13:44:34.434028 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11024\nI0822 13:44:34.434317 32551 solver.cpp:404]     Test net output #1: loss = 2.41746 (* 1 = 2.41746 loss)\nI0822 13:44:35.727000 32551 solver.cpp:228] Iteration 48300, loss = 0.000247961\nI0822 13:44:35.727046 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:44:35.727061 32551 solver.cpp:244]     Train net output #1: loss = 0.000248118 (* 1 = 0.000248118 loss)\nI0822 13:44:35.816329 32551 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0822 13:46:51.415506 32551 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0822 13:48:11.117602 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10844\nI0822 13:48:11.117861 32551 solver.cpp:404]     Test net output #1: loss = 2.40577 (* 1 = 2.40577 loss)\nI0822 13:48:12.410750 32551 solver.cpp:228] Iteration 48400, loss = 0.00025674\nI0822 13:48:12.410792 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:48:12.410814 32551 solver.cpp:244]     Train net output #1: loss = 0.000256896 (* 1 = 0.000256896 loss)\nI0822 13:48:12.501508 32551 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0822 13:50:27.975409 32551 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0822 13:51:47.683313 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10908\nI0822 13:51:47.683573 32551 solver.cpp:404]     Test net output #1: loss = 2.39688 (* 1 = 2.39688 loss)\nI0822 13:51:48.975381 32551 solver.cpp:228] Iteration 48500, loss = 0.000246464\nI0822 13:51:48.975425 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:51:48.975440 32551 solver.cpp:244]     Train net output #1: loss = 0.000246621 (* 1 = 0.000246621 loss)\nI0822 13:51:49.067416 32551 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0822 13:54:04.843781 32551 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0822 13:55:24.551789 32551 solver.cpp:404]     Test net output #0: accuracy = 0.107\nI0822 13:55:24.552045 32551 solver.cpp:404]     Test net output #1: loss = 2.38822 (* 1 = 2.38822 loss)\nI0822 13:55:25.844251 32551 solver.cpp:228] Iteration 48600, loss = 0.000228676\nI0822 13:55:25.844295 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:55:25.844310 32551 solver.cpp:244]     Train net output #1: loss = 0.000228832 (* 1 = 0.000228832 loss)\nI0822 13:55:25.937379 32551 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0822 13:57:41.689757 32551 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0822 13:59:01.404973 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10932\nI0822 13:59:01.405230 32551 solver.cpp:404]     Test net output #1: loss = 2.38099 (* 1 = 2.38099 loss)\nI0822 13:59:02.697548 32551 solver.cpp:228] Iteration 48700, loss = 0.000243524\nI0822 13:59:02.697592 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:59:02.697607 32551 solver.cpp:244]     Train net output #1: loss = 0.000243681 (* 1 = 0.000243681 loss)\nI0822 13:59:02.795483 32551 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0822 14:01:18.420126 32551 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0822 14:02:38.120848 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1078\nI0822 14:02:38.121106 32551 solver.cpp:404]     Test net output #1: loss = 2.37549 (* 1 = 2.37549 loss)\nI0822 14:02:39.413600 32551 solver.cpp:228] Iteration 48800, loss = 0.000220887\nI0822 14:02:39.413642 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:02:39.413658 32551 solver.cpp:244]     Train net output #1: loss = 0.000221043 (* 1 = 0.000221043 loss)\nI0822 14:02:39.512049 32551 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0822 14:04:55.002882 32551 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0822 14:06:14.708542 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11684\nI0822 14:06:14.708772 32551 solver.cpp:404]     Test net output #1: loss = 2.37055 (* 1 = 2.37055 loss)\nI0822 14:06:16.001062 32551 solver.cpp:228] Iteration 48900, loss = 0.000239917\nI0822 14:06:16.001106 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:06:16.001121 32551 solver.cpp:244]     Train net output #1: loss = 0.000240074 (* 1 = 0.000240074 loss)\nI0822 14:06:16.098230 32551 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0822 14:08:31.764250 32551 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0822 14:09:51.471321 32551 solver.cpp:404]     Test net output #0: accuracy = 0.11484\nI0822 14:09:51.471582 32551 solver.cpp:404]     Test net output #1: loss = 2.36672 (* 1 = 2.36672 loss)\nI0822 14:09:52.763972 32551 solver.cpp:228] Iteration 49000, loss = 0.000228919\nI0822 14:09:52.764015 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:09:52.764032 32551 solver.cpp:244]     Train net output #1: loss = 0.000229075 (* 1 = 0.000229075 loss)\nI0822 14:09:52.859285 32551 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0822 14:12:08.416311 32551 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0822 14:13:28.122511 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10796\nI0822 14:13:28.122742 32551 solver.cpp:404]     Test net output #1: loss = 2.36452 (* 1 = 2.36452 loss)\nI0822 14:13:29.415793 32551 solver.cpp:228] Iteration 49100, loss = 0.000237465\nI0822 14:13:29.415838 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:13:29.415854 32551 solver.cpp:244]     Train net output #1: loss = 0.000237622 (* 1 = 0.000237622 loss)\nI0822 14:13:29.514961 32551 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0822 14:15:45.090127 32551 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0822 14:17:04.811801 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10284\nI0822 14:17:04.812096 32551 solver.cpp:404]     Test net output #1: loss = 2.36355 (* 1 = 2.36355 loss)\nI0822 14:17:06.105789 32551 solver.cpp:228] Iteration 49200, loss = 0.000222344\nI0822 14:17:06.105836 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:17:06.105860 32551 solver.cpp:244]     Train net output #1: loss = 0.000222501 (* 1 = 0.000222501 loss)\nI0822 14:17:06.203889 32551 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0822 14:19:21.781988 32551 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0822 14:20:41.505749 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10148\nI0822 14:20:41.506027 32551 solver.cpp:404]     Test net output #1: loss = 2.36429 (* 1 = 2.36429 loss)\nI0822 14:20:42.799031 32551 solver.cpp:228] Iteration 49300, loss = 0.00022391\nI0822 14:20:42.799077 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:20:42.799101 32551 solver.cpp:244]     Train net output #1: loss = 0.000224066 (* 1 = 0.000224066 loss)\nI0822 14:20:42.900527 32551 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0822 14:22:58.396564 32551 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0822 14:24:18.116555 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 14:24:18.116827 32551 solver.cpp:404]     Test net output #1: loss = 2.36474 (* 1 = 2.36474 loss)\nI0822 14:24:19.409567 32551 solver.cpp:228] Iteration 49400, loss = 0.000214329\nI0822 14:24:19.409613 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:24:19.409637 32551 solver.cpp:244]     Train net output #1: loss = 0.000214485 (* 1 = 0.000214485 loss)\nI0822 14:24:19.505336 32551 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0822 14:26:35.060245 32551 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0822 14:27:54.774135 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0822 14:27:54.774406 32551 solver.cpp:404]     Test net output #1: loss = 2.36663 (* 1 = 2.36663 loss)\nI0822 14:27:56.068377 32551 solver.cpp:228] Iteration 49500, loss = 0.000209304\nI0822 14:27:56.068423 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:27:56.068439 32551 solver.cpp:244]     Train net output #1: loss = 0.000209461 (* 1 = 0.000209461 loss)\nI0822 14:27:56.161219 32551 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0822 14:30:11.652393 32551 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0822 14:31:31.360781 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09992\nI0822 14:31:31.361050 32551 solver.cpp:404]     Test net output #1: loss = 2.36913 (* 1 = 2.36913 loss)\nI0822 14:31:32.653781 32551 solver.cpp:228] Iteration 49600, loss = 0.000217168\nI0822 14:31:32.653826 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:31:32.653842 32551 solver.cpp:244]     Train net output #1: loss = 0.000217325 (* 1 = 0.000217325 loss)\nI0822 14:31:32.745646 32551 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0822 14:33:48.338789 32551 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0822 14:35:08.053458 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0822 14:35:08.053755 32551 solver.cpp:404]     Test net output #1: loss = 2.37225 (* 1 = 2.37225 loss)\nI0822 14:35:09.346804 32551 solver.cpp:228] Iteration 49700, loss = 0.00020789\nI0822 14:35:09.346846 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:35:09.346863 32551 solver.cpp:244]     Train net output #1: loss = 0.000208047 (* 1 = 0.000208047 loss)\nI0822 14:35:09.440690 32551 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0822 14:37:25.050475 32551 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0822 14:38:44.767108 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10048\nI0822 14:38:44.767361 32551 solver.cpp:404]     Test net output #1: loss = 78.561 (* 1 = 78.561 loss)\nI0822 14:38:46.059542 32551 solver.cpp:228] Iteration 49800, loss = 4.15616\nI0822 14:38:46.059587 32551 solver.cpp:244]     Train net output #0: accuracy = 0.144\nI0822 14:38:46.059602 32551 solver.cpp:244]     Train net output #1: loss = 4.15616 (* 1 = 4.15616 loss)\nI0822 14:38:46.157784 32551 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0822 14:41:01.565711 32551 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0822 14:42:21.293360 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 14:42:21.293654 32551 solver.cpp:404]     Test net output #1: loss = 78.6204 (* 1 = 78.6204 loss)\nI0822 14:42:22.585403 32551 solver.cpp:228] Iteration 49900, loss = 1.92392\nI0822 14:42:22.585444 32551 solver.cpp:244]     Train net output #0: accuracy = 0.224\nI0822 14:42:22.585461 32551 solver.cpp:244]     Train net output #1: loss = 1.92392 (* 1 = 1.92392 loss)\nI0822 14:42:22.677886 32551 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0822 14:44:38.208489 32551 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0822 14:45:57.934341 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10428\nI0822 14:45:57.934615 32551 solver.cpp:404]     Test net output #1: loss = 78.2291 (* 1 = 78.2291 loss)\nI0822 14:45:59.226750 32551 solver.cpp:228] Iteration 50000, loss = 1.63483\nI0822 14:45:59.226794 32551 solver.cpp:244]     Train net output #0: accuracy = 0.352\nI0822 14:45:59.226809 32551 solver.cpp:244]     Train net output #1: loss = 1.63483 (* 1 = 1.63483 loss)\nI0822 14:45:59.321192 32551 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0822 14:45:59.321214 32551 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0822 14:48:15.153607 32551 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0822 14:49:34.884915 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0822 14:49:34.885223 32551 solver.cpp:404]     Test net output #1: loss = 78.6204 (* 1 = 78.6204 loss)\nI0822 14:49:36.178042 32551 solver.cpp:228] Iteration 50100, loss = 1.59884\nI0822 14:49:36.178083 32551 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI0822 14:49:36.178099 32551 solver.cpp:244]     Train net output #1: loss = 1.59884 (* 1 = 1.59884 loss)\nI0822 14:49:36.268843 32551 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0822 14:51:51.830919 32551 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0822 14:53:11.562302 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1002\nI0822 14:53:11.562559 32551 solver.cpp:404]     Test net output #1: loss = 78.5854 (* 1 = 78.5854 loss)\nI0822 14:53:12.854346 32551 solver.cpp:228] Iteration 50200, loss = 1.57082\nI0822 14:53:12.854383 32551 solver.cpp:244]     Train net output #0: accuracy = 0.448\nI0822 14:53:12.854399 32551 solver.cpp:244]     Train net output #1: loss = 1.57082 (* 1 = 1.57082 loss)\nI0822 14:53:12.950029 32551 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0822 14:55:28.639770 32551 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0822 14:56:48.356786 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0822 14:56:48.357084 32551 solver.cpp:404]     Test net output #1: loss = 78.6169 (* 1 = 78.6169 loss)\nI0822 14:56:49.649314 32551 solver.cpp:228] Iteration 50300, loss = 1.45085\nI0822 14:56:49.649353 32551 solver.cpp:244]     Train net output #0: accuracy = 0.504\nI0822 14:56:49.649369 32551 solver.cpp:244]     Train net output #1: loss = 1.45085 (* 1 = 1.45085 loss)\nI0822 14:56:49.744527 32551 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0822 14:59:05.339227 32551 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0822 15:00:25.032891 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10076\nI0822 15:00:25.033151 32551 solver.cpp:404]     Test net output #1: loss = 78.528 (* 1 = 78.528 loss)\nI0822 15:00:26.325181 32551 solver.cpp:228] Iteration 50400, loss = 1.37968\nI0822 15:00:26.325218 32551 solver.cpp:244]     Train net output #0: accuracy = 0.504\nI0822 15:00:26.325234 32551 solver.cpp:244]     Train net output #1: loss = 1.37968 (* 1 = 1.37968 loss)\nI0822 15:00:26.423357 32551 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0822 15:02:42.253234 32551 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0822 15:04:01.910022 32551 solver.cpp:404]     Test net output #0: accuracy = 0.0836\nI0822 15:04:01.910255 32551 solver.cpp:404]     Test net output #1: loss = 75.994 (* 1 = 75.994 loss)\nI0822 15:04:03.202520 32551 solver.cpp:228] Iteration 50500, loss = 1.29298\nI0822 15:04:03.202561 32551 solver.cpp:244]     Train net output #0: accuracy = 0.544\nI0822 15:04:03.202579 32551 solver.cpp:244]     Train net output #1: loss = 1.29298 (* 1 = 1.29298 loss)\nI0822 15:04:03.301414 32551 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0822 15:06:18.947788 32551 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0822 15:07:38.592514 32551 solver.cpp:404]     Test net output #0: accuracy = 0.06296\nI0822 15:07:38.592805 32551 solver.cpp:404]     Test net output #1: loss = 54.772 (* 1 = 54.772 loss)\nI0822 15:07:39.884665 32551 solver.cpp:228] Iteration 50600, loss = 1.25308\nI0822 15:07:39.884706 32551 solver.cpp:244]     Train net output #0: accuracy = 0.56\nI0822 15:07:39.884723 32551 solver.cpp:244]     Train net output #1: loss = 1.25308 (* 1 = 1.25308 loss)\nI0822 15:07:39.985316 32551 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0822 15:09:55.656635 32551 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0822 15:11:15.306435 32551 solver.cpp:404]     Test net output #0: accuracy = 0.05436\nI0822 15:11:15.306744 32551 solver.cpp:404]     Test net output #1: loss = 30.5019 (* 1 = 30.5019 loss)\nI0822 15:11:16.599318 32551 solver.cpp:228] Iteration 50700, loss = 1.19356\nI0822 15:11:16.599359 32551 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI0822 15:11:16.599375 32551 solver.cpp:244]     Train net output #1: loss = 1.19356 (* 1 = 1.19356 loss)\nI0822 15:11:16.690650 32551 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0822 15:13:32.157832 32551 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0822 15:14:51.811944 32551 solver.cpp:404]     Test net output #0: accuracy = 0.09096\nI0822 15:14:51.812234 32551 solver.cpp:404]     Test net output #1: loss = 14.182 (* 1 = 14.182 loss)\nI0822 15:14:53.104257 32551 solver.cpp:228] Iteration 50800, loss = 1.12991\nI0822 15:14:53.104300 32551 solver.cpp:244]     Train net output #0: accuracy = 0.6\nI0822 15:14:53.104317 32551 solver.cpp:244]     Train net output #1: loss = 1.12991 (* 1 = 1.12991 loss)\nI0822 15:14:53.200279 32551 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0822 15:17:08.648419 32551 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0822 15:18:28.302780 32551 solver.cpp:404]     Test net output #0: accuracy = 0.10424\nI0822 15:18:28.303059 32551 solver.cpp:404]     Test net output #1: loss = 9.33282 (* 1 = 9.33282 loss)\nI0822 15:18:29.595782 32551 solver.cpp:228] Iteration 50900, loss = 1.04299\nI0822 15:18:29.595824 32551 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI0822 15:18:29.595842 32551 solver.cpp:244]     Train net output #1: loss = 1.04299 (* 1 = 1.04299 loss)\nI0822 15:18:29.691179 32551 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0822 15:20:45.197860 32551 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0822 15:22:04.853068 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12756\nI0822 15:22:04.853364 32551 solver.cpp:404]     Test net output #1: loss = 6.40614 (* 1 = 6.40614 loss)\nI0822 15:22:06.145565 32551 solver.cpp:228] Iteration 51000, loss = 1.02442\nI0822 15:22:06.145606 32551 solver.cpp:244]     Train net output #0: accuracy = 0.624\nI0822 15:22:06.145624 32551 solver.cpp:244]     Train net output #1: loss = 1.02442 (* 1 = 1.02442 loss)\nI0822 15:22:06.242499 32551 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0822 15:24:21.692070 32551 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0822 15:25:41.339149 32551 solver.cpp:404]     Test net output #0: accuracy = 0.12076\nI0822 15:25:41.339401 32551 solver.cpp:404]     Test net output #1: loss = 5.13812 (* 1 = 5.13812 loss)\nI0822 15:25:42.631961 32551 solver.cpp:228] Iteration 51100, loss = 0.984615\nI0822 15:25:42.632005 32551 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0822 15:25:42.632021 32551 solver.cpp:244]     Train net output #1: loss = 0.984615 (* 1 = 0.984615 loss)\nI0822 15:25:42.726713 32551 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0822 15:27:58.273803 32551 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0822 15:29:17.928431 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1594\nI0822 15:29:17.928728 32551 solver.cpp:404]     Test net output #1: loss = 5.41061 (* 1 = 5.41061 loss)\nI0822 15:29:19.221444 32551 solver.cpp:228] Iteration 51200, loss = 0.980506\nI0822 15:29:19.221487 32551 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0822 15:29:19.221503 32551 solver.cpp:244]     Train net output #1: loss = 0.980506 (* 1 = 0.980506 loss)\nI0822 15:29:19.316117 32551 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0822 15:31:34.789427 32551 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0822 15:32:54.431902 32551 solver.cpp:404]     Test net output #0: accuracy = 0.19516\nI0822 15:32:54.432204 32551 solver.cpp:404]     Test net output #1: loss = 4.1638 (* 1 = 4.1638 loss)\nI0822 15:32:55.724431 32551 solver.cpp:228] Iteration 51300, loss = 0.90995\nI0822 15:32:55.724473 32551 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0822 15:32:55.724490 32551 solver.cpp:244]     Train net output #1: loss = 0.90995 (* 1 = 0.90995 loss)\nI0822 15:32:55.822711 32551 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0822 15:35:11.354235 32551 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0822 15:36:31.073014 32551 solver.cpp:404]     Test net output #0: accuracy = 0.21976\nI0822 15:36:31.073299 32551 solver.cpp:404]     Test net output #1: loss = 3.4476 (* 1 = 3.4476 loss)\nI0822 15:36:32.365494 32551 solver.cpp:228] Iteration 51400, loss = 0.800581\nI0822 15:36:32.365536 32551 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0822 15:36:32.365553 32551 solver.cpp:244]     Train net output #1: loss = 0.800581 (* 1 = 0.800581 loss)\nI0822 15:36:32.458556 32551 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0822 15:38:47.907218 32551 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0822 15:40:07.624850 32551 solver.cpp:404]     Test net output #0: accuracy = 0.1448\nI0822 15:40:07.625145 32551 solver.cpp:404]     Test net output #1: loss = 5.93555 (* 1 = 5.93555 loss)\nI0822 15:40:08.916718 32551 solver.cpp:228] Iteration 51500, loss = 0.835876\nI0822 15:40:08.916765 32551 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0822 15:40:08.916782 32551 solver.cpp:244]     Train net output #1: loss = 0.835876 (* 1 = 0.835876 loss)\nI0822 15:40:09.010229 32551 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0822 15:42:24.864990 32551 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0822 15:43:44.588750 32551 solver.cpp:404]     Test net output #0: accuracy = 0.16108\nI0822 15:43:44.589030 32551 solver.cpp:404]     Test net output #1: loss = 6.55444 (* 1 = 6.55444 loss)\nI0822 15:43:45.882618 32551 solver.cpp:228] Iteration 51600, loss = 0.786228\nI0822 15:43:45.882665 32551 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0822 15:43:45.882681 32551 solver.cpp:244]     Train net output #1: loss = 0.786228 (* 1 = 0.786228 loss)\nI0822 15:43:45.979807 32551 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0822 15:46:01.646631 32551 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0822 15:47:21.368538 32551 solver.cpp:404]     Test net output #0: accuracy = 0.15344\nI0822 15:47:21.368827 32551 solver.cpp:404]     Test net output #1: loss = 6.43633 (* 1 = 6.43633 loss)\nI0822 15:47:22.660141 32551 solver.cpp:228] Iteration 51700, loss = 0.899416\nI0822 15:47:22.660185 32551 solver.cpp:244]     Train net output #0: accuracy = 0.624\nI0822 15:47:22.660202 32551 solver.cpp:244]     Train net output #1: loss = 0.899416 (* 1 = 0.899416 loss)\nI0822 15:47:22.756532 32551 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0822 15:49:38.318769 32551 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0822 15:50:58.040971 32551 solver.cpp:404]     Test net output #0: accuracy = 0.16192\nI0822 15:50:58.041273 32551 solver.cpp:404]     Test net output #1: loss = 5.96005 (* 1 = 5.96005 loss)\nI0822 15:50:59.333905 32551 solver.cpp:228] Iteration 51800, loss = 0.801163\nI0822 15:50:59.333950 32551 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0822 15:50:59.333966 32551 solver.cpp:244]     Train net output #1: loss = 0.801163 (* 1 = 0.801163 loss)\nI0822 15:50:59.433445 32551 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0822 15:53:14.988622 32551 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0822 15:54:34.706429 32551 solver.cpp:404]     Test net output #0: accuracy = 0.15444\nI0822 15:54:34.706727 32551 solver.cpp:404]     Test net output #1: loss = 6.00928 (* 1 = 6.00928 loss)\nI0822 15:54:35.999797 32551 solver.cpp:228] Iteration 51900, loss = 0.878763\nI0822 15:54:35.999842 32551 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0822 15:54:35.999858 32551 solver.cpp:244]     Train net output #1: loss = 0.878763 (* 1 = 0.878763 loss)\nI0822 15:54:36.096568 32551 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0822 15:56:51.567533 32551 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0822 15:58:11.282435 32551 solver.cpp:404]     Test net output #0: accuracy = 0.19776\nI0822 15:58:11.282706 32551 solver.cpp:404]     Test net output #1: loss = 5.24819 (* 1 = 5.24819 loss)\nI0822 15:58:12.575161 32551 solver.cpp:228] Iteration 52000, loss = 0.573562\nI0822 15:58:12.575206 32551 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0822 15:58:12.575222 32551 solver.cpp:244]     Train net output #1: loss = 0.573562 (* 1 = 0.573562 loss)\nI0822 15:58:12.672036 32551 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0822 16:00:28.494990 32551 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0822 16:01:48.208906 32551 solver.cpp:404]     Test net output #0: accuracy = 0.19564\nI0822 16:01:48.209293 32551 solver.cpp:404]     Test net output #1: loss = 4.93458 (* 1 = 4.93458 loss)\nI0822 16:01:49.501767 32551 solver.cpp:228] Iteration 52100, loss = 0.631753\nI0822 16:01:49.501811 32551 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0822 16:01:49.501828 32551 solver.cpp:244]     Train net output #1: loss = 0.631753 (* 1 = 0.631753 loss)\nI0822 16:01:49.597923 32551 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0822 16:04:05.186766 32551 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0822 16:05:24.907433 32551 solver.cpp:404]     Test net output #0: accuracy = 0.18908\nI0822 16:05:24.907723 32551 solver.cpp:404]     Test net output #1: loss = 5.0004 (* 1 = 5.0004 loss)\nI0822 16:05:26.200467 32551 solver.cpp:228] Iteration 52200, loss = 0.564568\nI0822 16:05:26.200512 32551 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0822 16:05:26.200527 32551 solver.cpp:244]     Train net output #1: loss = 0.564568 (* 1 = 0.564568 loss)\nI0822 16:05:26.301611 32551 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0822 16:07:41.990103 32551 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0822 16:09:01.709635 32551 solver.cpp:404]     Test net output #0: accuracy = 0.27836\nI0822 16:09:01.709894 32551 solver.cpp:404]     Test net output #1: loss = 4.1526 (* 1 = 4.1526 loss)\nI0822 16:09:03.001902 32551 solver.cpp:228] Iteration 52300, loss = 0.587912\nI0822 16:09:03.001946 32551 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0822 16:09:03.001961 32551 solver.cpp:244]     Train net output #1: loss = 0.587912 (* 1 = 0.587912 loss)\nI0822 16:09:03.096180 32551 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0822 16:11:18.851060 32551 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0822 16:12:38.575358 32551 solver.cpp:404]     Test net output #0: accuracy = 0.22612\nI0822 16:12:38.575646 32551 solver.cpp:404]     Test net output #1: loss = 5.08684 (* 1 = 5.08684 loss)\nI0822 16:12:39.867205 32551 solver.cpp:228] Iteration 52400, loss = 0.387513\nI0822 16:12:39.867249 32551 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0822 16:12:39.867265 32551 solver.cpp:244]     Train net output #1: loss = 0.387513 (* 1 = 0.387513 loss)\nI0822 16:12:39.965376 32551 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0822 16:14:55.574165 32551 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0822 16:16:15.293593 32551 solver.cpp:404]     Test net output #0: accuracy = 0.22788\nI0822 16:16:15.293851 32551 solver.cpp:404]     Test net output #1: loss = 5.76125 (* 1 = 5.76125 loss)\nI0822 16:16:16.585783 32551 solver.cpp:228] Iteration 52500, loss = 0.597516\nI0822 16:16:16.585827 32551 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0822 16:16:16.585844 32551 solver.cpp:244]     Train net output #1: loss = 0.597516 (* 1 = 0.597516 loss)\nI0822 16:16:16.681871 32551 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0822 16:18:32.459058 32551 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0822 16:19:52.177992 32551 solver.cpp:404]     Test net output #0: accuracy = 0.25356\nI0822 16:19:52.178290 32551 solver.cpp:404]     Test net output #1: loss = 5.01066 (* 1 = 5.01066 loss)\nI0822 16:19:53.470198 32551 solver.cpp:228] Iteration 52600, loss = 0.493256\nI0822 16:19:53.470242 32551 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0822 16:19:53.470257 32551 solver.cpp:244]     Train net output #1: loss = 0.493256 (* 1 = 0.493256 loss)\nI0822 16:19:53.567431 32551 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0822 16:22:09.429942 32551 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0822 16:23:29.149149 32551 solver.cpp:404]     Test net output #0: accuracy = 0.24988\nI0822 16:23:29.149406 32551 solver.cpp:404]     Test net output #1: loss = 5.64945 (* 1 = 5.64945 loss)\nI0822 16:23:30.441380 32551 solver.cpp:228] Iteration 52700, loss = 0.453219\nI0822 16:23:30.441423 32551 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0822 16:23:30.441439 32551 solver.cpp:244]     Train net output #1: loss = 0.453219 (* 1 = 0.453219 loss)\nI0822 16:23:30.538228 32551 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0822 16:25:46.260062 32551 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0822 16:27:05.979975 32551 solver.cpp:404]     Test net output #0: accuracy = 0.17392\nI0822 16:27:05.980274 32551 solver.cpp:404]     Test net output #1: loss = 9.11285 (* 1 = 9.11285 loss)\nI0822 16:27:07.272455 32551 solver.cpp:228] Iteration 52800, loss = 0.43806\nI0822 16:27:07.272500 32551 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0822 16:27:07.272516 32551 solver.cpp:244]     Train net output #1: loss = 0.43806 (* 1 = 0.43806 loss)\nI0822 16:27:07.371068 32551 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0822 16:29:23.017710 32551 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0822 16:30:42.721066 32551 solver.cpp:404]     Test net output #0: accuracy = 0.23424\nI0822 16:30:42.721331 32551 solver.cpp:404]     Test net output #1: loss = 6.06812 (* 1 = 6.06812 loss)\nI0822 16:30:44.013680 32551 solver.cpp:228] Iteration 52900, loss = 0.52897\nI0822 16:30:44.013720 32551 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0822 16:30:44.013736 32551 solver.cpp:244]     Train net output #1: loss = 0.52897 (* 1 = 0.52897 loss)\nI0822 16:30:44.111894 32551 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0822 16:32:59.729702 32551 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0822 16:34:19.462627 32551 solver.cpp:404]     Test net output #0: accuracy = 0.23668\nI0822 16:34:19.462901 32551 solver.cpp:404]     Test net output #1: loss = 6.82992 (* 1 = 6.82992 loss)\nI0822 16:34:20.754861 32551 solver.cpp:228] Iteration 53000, loss = 0.367219\nI0822 16:34:20.754902 32551 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0822 16:34:20.754920 32551 solver.cpp:244]     Train net output #1: loss = 0.367219 (* 1 = 0.367219 loss)\nI0822 16:34:20.849906 32551 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0822 16:36:36.416321 32551 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0822 16:37:56.155714 32551 solver.cpp:404]     Test net output #0: accuracy = 0.32872\nI0822 16:37:56.156020 32551 solver.cpp:404]     Test net output #1: loss = 4.33714 (* 1 = 4.33714 loss)\nI0822 16:37:57.449034 32551 solver.cpp:228] Iteration 53100, loss = 0.333175\nI0822 16:37:57.449077 32551 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0822 16:37:57.449093 32551 solver.cpp:244]     Train net output #1: loss = 0.333175 (* 1 = 0.333175 loss)\nI0822 16:37:57.544805 32551 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0822 16:40:12.997670 32551 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0822 16:41:32.728840 32551 solver.cpp:404]     Test net output #0: accuracy = 0.3\nI0822 16:41:32.729100 32551 solver.cpp:404]     Test net output #1: loss = 5.82116 (* 1 = 5.82116 loss)\nI0822 16:41:34.021219 32551 solver.cpp:228] Iteration 53200, loss = 0.43902\nI0822 16:41:34.021260 32551 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0822 16:41:34.021275 32551 solver.cpp:244]     Train net output #1: loss = 0.43902 (* 1 = 0.43902 loss)\nI0822 16:41:34.120462 32551 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0822 16:43:49.587997 32551 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0822 16:45:09.326148 32551 solver.cpp:404]     Test net output #0: accuracy = 0.27196\nI0822 16:45:09.326437 32551 solver.cpp:404]     Test net output #1: loss = 6.80169 (* 1 = 6.80169 loss)\nI0822 16:45:10.618798 32551 solver.cpp:228] Iteration 53300, loss = 0.319071\nI0822 16:45:10.618839 32551 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0822 16:45:10.618855 32551 solver.cpp:244]     Train net output #1: loss = 0.319071 (* 1 = 0.319071 loss)\nI0822 16:45:10.715889 32551 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0822 16:47:26.288980 32551 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0822 16:48:46.018235 32551 solver.cpp:404]     Test net output #0: accuracy = 0.32316\nI0822 16:48:46.018543 32551 solver.cpp:404]     Test net output #1: loss = 6.54715 (* 1 = 6.54715 loss)\nI0822 16:48:47.310915 32551 solver.cpp:228] Iteration 53400, loss = 0.279882\nI0822 16:48:47.310956 32551 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0822 16:48:47.310971 32551 solver.cpp:244]     Train net output #1: loss = 0.279882 (* 1 = 0.279882 loss)\nI0822 16:48:47.409010 32551 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0822 16:51:03.093262 32551 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0822 16:52:22.819236 32551 solver.cpp:404]     Test net output #0: accuracy = 0.31908\nI0822 16:52:22.819511 32551 solver.cpp:404]     Test net output #1: loss = 7.00697 (* 1 = 7.00697 loss)\nI0822 16:52:24.111876 32551 solver.cpp:228] Iteration 53500, loss = 0.238562\nI0822 16:52:24.111917 32551 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0822 16:52:24.111932 32551 solver.cpp:244]     Train net output #1: loss = 0.238562 (* 1 = 0.238562 loss)\nI0822 16:52:24.205998 32551 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0822 16:54:39.780640 32551 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0822 16:55:59.521162 32551 solver.cpp:404]     Test net output #0: accuracy = 0.32068\nI0822 16:55:59.521419 32551 solver.cpp:404]     Test net output #1: loss = 7.18818 (* 1 = 7.18818 loss)\nI0822 16:56:00.814005 32551 solver.cpp:228] Iteration 53600, loss = 0.192738\nI0822 16:56:00.814046 32551 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0822 16:56:00.814062 32551 solver.cpp:244]     Train net output #1: loss = 0.192738 (* 1 = 0.192738 loss)\nI0822 16:56:00.912624 32551 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0822 16:58:16.486969 32551 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0822 16:59:36.218411 32551 solver.cpp:404]     Test net output #0: accuracy = 0.34012\nI0822 16:59:36.218709 32551 solver.cpp:404]     Test net output #1: loss = 6.62882 (* 1 = 6.62882 loss)\nI0822 16:59:37.511701 32551 solver.cpp:228] Iteration 53700, loss = 0.332589\nI0822 16:59:37.511749 32551 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0822 16:59:37.511766 32551 solver.cpp:244]     Train net output #1: loss = 0.332589 (* 1 = 0.332589 loss)\nI0822 16:59:37.605933 32551 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0822 17:01:53.468727 32551 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0822 17:03:13.193934 32551 solver.cpp:404]     Test net output #0: accuracy = 0.32636\nI0822 17:03:13.194249 32551 solver.cpp:404]     Test net output #1: loss = 6.87906 (* 1 = 6.87906 loss)\nI0822 17:03:14.486415 32551 solver.cpp:228] Iteration 53800, loss = 0.374731\nI0822 17:03:14.486457 32551 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0822 17:03:14.486474 32551 solver.cpp:244]     Train net output #1: loss = 0.374731 (* 1 = 0.374731 loss)\nI0822 17:03:14.583564 32551 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0822 17:05:30.257692 32551 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0822 17:06:49.993075 32551 solver.cpp:404]     Test net output #0: accuracy = 0.36092\nI0822 17:06:49.993350 32551 solver.cpp:404]     Test net output #1: loss = 6.37689 (* 1 = 6.37689 loss)\nI0822 17:06:51.287292 32551 solver.cpp:228] Iteration 53900, loss = 0.123589\nI0822 17:06:51.287333 32551 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 17:06:51.287348 32551 solver.cpp:244]     Train net output #1: loss = 0.123589 (* 1 = 0.123589 loss)\nI0822 17:06:51.383838 32551 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0822 17:09:06.986691 32551 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0822 17:10:26.715000 32551 solver.cpp:404]     Test net output #0: accuracy = 0.31212\nI0822 17:10:26.715297 32551 solver.cpp:404]     Test net output #1: loss = 7.76306 (* 1 = 7.76306 loss)\nI0822 17:10:28.007726 32551 solver.cpp:228] Iteration 54000, loss = 0.248063\nI0822 17:10:28.007774 32551 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0822 17:10:28.007791 32551 solver.cpp:244]     Train net output #1: loss = 0.248063 (* 1 = 0.248063 loss)\nI0822 17:10:28.098989 32551 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0822 17:12:43.710449 32551 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0822 17:14:03.438580 32551 solver.cpp:404]     Test net output #0: accuracy = 0.37416\nI0822 17:14:03.438859 32551 solver.cpp:404]     Test net output #1: loss = 6.19106 (* 1 = 6.19106 loss)\nI0822 17:14:04.731459 32551 solver.cpp:228] Iteration 54100, loss = 0.232953\nI0822 17:14:04.731503 32551 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0822 17:14:04.731518 32551 solver.cpp:244]     Train net output #1: loss = 0.232953 (* 1 = 0.232953 loss)\nI0822 17:14:04.822057 32551 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0822 17:16:20.395059 32551 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0822 17:17:40.067395 32551 solver.cpp:404]     Test net output #0: accuracy = 0.40996\nI0822 17:17:40.067698 32551 solver.cpp:404]     Test net output #1: loss = 5.39733 (* 1 = 5.39733 loss)\nI0822 17:17:41.360508 32551 solver.cpp:228] Iteration 54200, loss = 0.142768\nI0822 17:17:41.360549 32551 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 17:17:41.360565 32551 solver.cpp:244]     Train net output #1: loss = 0.142768 (* 1 = 0.142768 loss)\nI0822 17:17:41.457558 32551 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0822 17:19:57.128551 32551 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0822 17:21:16.801141 32551 solver.cpp:404]     Test net output #0: accuracy = 0.34196\nI0822 17:21:16.801412 32551 solver.cpp:404]     Test net output #1: loss = 8.48818 (* 1 = 8.48818 loss)\nI0822 17:21:18.094965 32551 solver.cpp:228] Iteration 54300, loss = 0.271351\nI0822 17:21:18.095006 32551 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0822 17:21:18.095022 32551 solver.cpp:244]     Train net output #1: loss = 0.271351 (* 1 = 0.271351 loss)\nI0822 17:21:18.190852 32551 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0822 17:23:33.774976 32551 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0822 17:24:53.450232 32551 solver.cpp:404]     Test net output #0: accuracy = 0.39424\nI0822 17:24:53.450491 32551 solver.cpp:404]     Test net output #1: loss = 6.35528 (* 1 = 6.35528 loss)\nI0822 17:24:54.742831 32551 solver.cpp:228] Iteration 54400, loss = 0.217028\nI0822 17:24:54.742873 32551 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0822 17:24:54.742889 32551 solver.cpp:244]     Train net output #1: loss = 0.217028 (* 1 = 0.217028 loss)\nI0822 17:24:54.832239 32551 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0822 17:27:10.498373 32551 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0822 17:28:30.177695 32551 solver.cpp:404]     Test net output #0: accuracy = 0.35732\nI0822 17:28:30.177989 32551 solver.cpp:404]     Test net output #1: loss = 7.54653 (* 1 = 7.54653 loss)\nI0822 17:28:31.471086 32551 solver.cpp:228] Iteration 54500, loss = 0.160704\nI0822 17:28:31.471129 32551 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 17:28:31.471145 32551 solver.cpp:244]     Train net output #1: loss = 0.160704 (* 1 = 0.160704 loss)\nI0822 17:28:31.561269 32551 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0822 17:30:47.036341 32551 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0822 17:32:06.710305 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4102\nI0822 17:32:06.710609 32551 solver.cpp:404]     Test net output #1: loss = 5.50066 (* 1 = 5.50066 loss)\nI0822 17:32:08.002874 32551 solver.cpp:228] Iteration 54600, loss = 0.132997\nI0822 17:32:08.002918 32551 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0822 17:32:08.002934 32551 solver.cpp:244]     Train net output #1: loss = 0.132997 (* 1 = 0.132997 loss)\nI0822 17:32:08.097563 32551 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0822 17:34:23.703188 32551 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0822 17:35:43.376430 32551 solver.cpp:404]     Test net output #0: accuracy = 0.40108\nI0822 17:35:43.376715 32551 solver.cpp:404]     Test net output #1: loss = 6.59045 (* 1 = 6.59045 loss)\nI0822 17:35:44.668783 32551 solver.cpp:228] Iteration 54700, loss = 0.174776\nI0822 17:35:44.668826 32551 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0822 17:35:44.668843 32551 solver.cpp:244]     Train net output #1: loss = 0.174776 (* 1 = 0.174776 loss)\nI0822 17:35:44.767479 32551 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0822 17:38:00.516950 32551 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0822 17:39:20.178897 32551 solver.cpp:404]     Test net output #0: accuracy = 0.41544\nI0822 17:39:20.179219 32551 solver.cpp:404]     Test net output #1: loss = 6.05504 (* 1 = 6.05504 loss)\nI0822 17:39:21.471494 32551 solver.cpp:228] Iteration 54800, loss = 0.155505\nI0822 17:39:21.471536 32551 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0822 17:39:21.471552 32551 solver.cpp:244]     Train net output #1: loss = 0.155505 (* 1 = 0.155505 loss)\nI0822 17:39:21.566773 32551 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0822 17:41:37.174819 32551 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0822 17:42:56.838337 32551 solver.cpp:404]     Test net output #0: accuracy = 0.38036\nI0822 17:42:56.838644 32551 solver.cpp:404]     Test net output #1: loss = 8.36782 (* 1 = 8.36782 loss)\nI0822 17:42:58.131237 32551 solver.cpp:228] Iteration 54900, loss = 0.128693\nI0822 17:42:58.131279 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 17:42:58.131296 32551 solver.cpp:244]     Train net output #1: loss = 0.128693 (* 1 = 0.128693 loss)\nI0822 17:42:58.230273 32551 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0822 17:45:13.733567 32551 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0822 17:46:33.400336 32551 solver.cpp:404]     Test net output #0: accuracy = 0.40516\nI0822 17:46:33.400591 32551 solver.cpp:404]     Test net output #1: loss = 7.84556 (* 1 = 7.84556 loss)\nI0822 17:46:34.693051 32551 solver.cpp:228] Iteration 55000, loss = 0.0992966\nI0822 17:46:34.693095 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 17:46:34.693111 32551 solver.cpp:244]     Train net output #1: loss = 0.0992967 (* 1 = 0.0992967 loss)\nI0822 17:46:34.792560 32551 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0822 17:48:50.364444 32551 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0822 17:50:10.035579 32551 solver.cpp:404]     Test net output #0: accuracy = 0.46176\nI0822 17:50:10.035882 32551 solver.cpp:404]     Test net output #1: loss = 5.6095 (* 1 = 5.6095 loss)\nI0822 17:50:11.328439 32551 solver.cpp:228] Iteration 55100, loss = 0.123036\nI0822 17:50:11.328483 32551 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0822 17:50:11.328500 32551 solver.cpp:244]     Train net output #1: loss = 0.123036 (* 1 = 0.123036 loss)\nI0822 17:50:11.425055 32551 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0822 17:52:27.133941 32551 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0822 17:53:46.881299 32551 solver.cpp:404]     Test net output #0: accuracy = 0.39556\nI0822 17:53:46.881548 32551 solver.cpp:404]     Test net output #1: loss = 7.69105 (* 1 = 7.69105 loss)\nI0822 17:53:48.174049 32551 solver.cpp:228] Iteration 55200, loss = 0.0537998\nI0822 17:53:48.174093 32551 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 17:53:48.174109 32551 solver.cpp:244]     Train net output #1: loss = 0.0537998 (* 1 = 0.0537998 loss)\nI0822 17:53:48.272714 32551 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0822 17:56:03.959517 32551 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0822 17:57:23.696337 32551 solver.cpp:404]     Test net output #0: accuracy = 0.45064\nI0822 17:57:23.696602 32551 solver.cpp:404]     Test net output #1: loss = 5.56068 (* 1 = 5.56068 loss)\nI0822 17:57:24.989450 32551 solver.cpp:228] Iteration 55300, loss = 0.05178\nI0822 17:57:24.989495 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 17:57:24.989511 32551 solver.cpp:244]     Train net output #1: loss = 0.0517801 (* 1 = 0.0517801 loss)\nI0822 17:57:25.084230 32551 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0822 17:59:40.875385 32551 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0822 18:01:00.610683 32551 solver.cpp:404]     Test net output #0: accuracy = 0.45328\nI0822 18:01:00.611004 32551 solver.cpp:404]     Test net output #1: loss = 6.34365 (* 1 = 6.34365 loss)\nI0822 18:01:01.903434 32551 solver.cpp:228] Iteration 55400, loss = 0.100071\nI0822 18:01:01.903475 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 18:01:01.903491 32551 solver.cpp:244]     Train net output #1: loss = 0.100071 (* 1 = 0.100071 loss)\nI0822 18:01:02.001250 32551 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0822 18:03:17.573830 32551 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0822 18:04:37.313290 32551 solver.cpp:404]     Test net output #0: accuracy = 0.47876\nI0822 18:04:37.313531 32551 solver.cpp:404]     Test net output #1: loss = 5.183 (* 1 = 5.183 loss)\nI0822 18:04:38.605505 32551 solver.cpp:228] Iteration 55500, loss = 0.0903998\nI0822 18:04:38.605545 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 18:04:38.605562 32551 solver.cpp:244]     Train net output #1: loss = 0.0903998 (* 1 = 0.0903998 loss)\nI0822 18:04:38.698663 32551 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0822 18:06:54.299460 32551 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0822 18:08:14.037705 32551 solver.cpp:404]     Test net output #0: accuracy = 0.40032\nI0822 18:08:14.037971 32551 solver.cpp:404]     Test net output #1: loss = 7.60276 (* 1 = 7.60276 loss)\nI0822 18:08:15.331302 32551 solver.cpp:228] Iteration 55600, loss = 0.112496\nI0822 18:08:15.331343 32551 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 18:08:15.331359 32551 solver.cpp:244]     Train net output #1: loss = 0.112496 (* 1 = 0.112496 loss)\nI0822 18:08:15.423229 32551 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0822 18:10:31.000001 32551 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0822 18:11:50.736685 32551 solver.cpp:404]     Test net output #0: accuracy = 0.44544\nI0822 18:11:50.736985 32551 solver.cpp:404]     Test net output #1: loss = 6.37575 (* 1 = 6.37575 loss)\nI0822 18:11:52.029013 32551 solver.cpp:228] Iteration 55700, loss = 0.0619901\nI0822 18:11:52.029055 32551 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 18:11:52.029072 32551 solver.cpp:244]     Train net output #1: loss = 0.0619901 (* 1 = 0.0619901 loss)\nI0822 18:11:52.128226 32551 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0822 18:14:07.804038 32551 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0822 18:15:27.548913 32551 solver.cpp:404]     Test net output #0: accuracy = 0.42908\nI0822 18:15:27.549208 32551 solver.cpp:404]     Test net output #1: loss = 6.53868 (* 1 = 6.53868 loss)\nI0822 18:15:28.842865 32551 solver.cpp:228] Iteration 55800, loss = 0.125047\nI0822 18:15:28.842906 32551 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 18:15:28.842922 32551 solver.cpp:244]     Train net output #1: loss = 0.125047 (* 1 = 0.125047 loss)\nI0822 18:15:28.939595 32551 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0822 18:17:44.541517 32551 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0822 18:19:04.279508 32551 solver.cpp:404]     Test net output #0: accuracy = 0.3674\nI0822 18:19:04.279811 32551 solver.cpp:404]     Test net output #1: loss = 8.50072 (* 1 = 8.50072 loss)\nI0822 18:19:05.572582 32551 solver.cpp:228] Iteration 55900, loss = 0.101996\nI0822 18:19:05.572623 32551 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 18:19:05.572638 32551 solver.cpp:244]     Train net output #1: loss = 0.101996 (* 1 = 0.101996 loss)\nI0822 18:19:05.670367 32551 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0822 18:21:21.229565 32551 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0822 18:22:40.967906 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49196\nI0822 18:22:40.968226 32551 solver.cpp:404]     Test net output #1: loss = 4.88137 (* 1 = 4.88137 loss)\nI0822 18:22:42.260685 32551 solver.cpp:228] Iteration 56000, loss = 0.0215128\nI0822 18:22:42.260726 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:22:42.260748 32551 solver.cpp:244]     Train net output #1: loss = 0.0215128 (* 1 = 0.0215128 loss)\nI0822 18:22:42.358464 32551 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0822 18:24:57.979959 32551 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0822 18:26:17.710227 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55908\nI0822 18:26:17.710499 32551 solver.cpp:404]     Test net output #1: loss = 3.80779 (* 1 = 3.80779 loss)\nI0822 18:26:19.003816 32551 solver.cpp:228] Iteration 56100, loss = 0.0260139\nI0822 18:26:19.003862 32551 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 18:26:19.003880 32551 solver.cpp:244]     Train net output #1: loss = 0.026014 (* 1 = 0.026014 loss)\nI0822 18:26:19.100240 32551 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0822 18:28:34.923197 32551 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0822 18:29:54.656810 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50708\nI0822 18:29:54.657119 32551 solver.cpp:404]     Test net output #1: loss = 5.00539 (* 1 = 5.00539 loss)\nI0822 18:29:55.950618 32551 solver.cpp:228] Iteration 56200, loss = 0.0128337\nI0822 18:29:55.950664 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:29:55.950680 32551 solver.cpp:244]     Train net output #1: loss = 0.0128337 (* 1 = 0.0128337 loss)\nI0822 18:29:56.042866 32551 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0822 18:32:11.883117 32551 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0822 18:33:31.612490 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55308\nI0822 18:33:31.612772 32551 solver.cpp:404]     Test net output #1: loss = 3.98354 (* 1 = 3.98354 loss)\nI0822 18:33:32.905854 32551 solver.cpp:228] Iteration 56300, loss = 0.00115958\nI0822 18:33:32.905900 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:33:32.905916 32551 solver.cpp:244]     Train net output #1: loss = 0.00115961 (* 1 = 0.00115961 loss)\nI0822 18:33:33.004554 32551 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0822 18:35:48.674449 32551 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0822 18:37:08.401063 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5672\nI0822 18:37:08.401351 32551 solver.cpp:404]     Test net output #1: loss = 3.64128 (* 1 = 3.64128 loss)\nI0822 18:37:09.693313 32551 solver.cpp:228] Iteration 56400, loss = 0.000671674\nI0822 18:37:09.693361 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:37:09.693377 32551 solver.cpp:244]     Train net output #1: loss = 0.000671704 (* 1 = 0.000671704 loss)\nI0822 18:37:09.790285 32551 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0822 18:39:25.384269 32551 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0822 18:40:45.086421 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57552\nI0822 18:40:45.086714 32551 solver.cpp:404]     Test net output #1: loss = 3.36434 (* 1 = 3.36434 loss)\nI0822 18:40:46.379210 32551 solver.cpp:228] Iteration 56500, loss = 0.000517488\nI0822 18:40:46.379256 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:40:46.379271 32551 solver.cpp:244]     Train net output #1: loss = 0.000517518 (* 1 = 0.000517518 loss)\nI0822 18:40:46.478191 32551 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0822 18:43:02.026286 32551 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0822 18:44:21.722663 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58748\nI0822 18:44:21.722904 32551 solver.cpp:404]     Test net output #1: loss = 3.11335 (* 1 = 3.11335 loss)\nI0822 18:44:23.015480 32551 solver.cpp:228] Iteration 56600, loss = 0.000543317\nI0822 18:44:23.015523 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:44:23.015540 32551 solver.cpp:244]     Train net output #1: loss = 0.000543347 (* 1 = 0.000543347 loss)\nI0822 18:44:23.109859 32551 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0822 18:46:38.737301 32551 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0822 18:47:58.430032 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59052\nI0822 18:47:58.430313 32551 solver.cpp:404]     Test net output #1: loss = 2.99508 (* 1 = 2.99508 loss)\nI0822 18:47:59.727291 32551 solver.cpp:228] Iteration 56700, loss = 0.000554442\nI0822 18:47:59.727337 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:47:59.727354 32551 solver.cpp:244]     Train net output #1: loss = 0.000554471 (* 1 = 0.000554471 loss)\nI0822 18:47:59.819650 32551 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0822 18:50:15.674873 32551 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0822 18:51:35.363077 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5932\nI0822 18:51:35.363338 32551 solver.cpp:404]     Test net output #1: loss = 2.88837 (* 1 = 2.88837 loss)\nI0822 18:51:36.655489 32551 solver.cpp:228] Iteration 56800, loss = 0.000540912\nI0822 18:51:36.655534 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:51:36.655551 32551 solver.cpp:244]     Train net output #1: loss = 0.000540942 (* 1 = 0.000540942 loss)\nI0822 18:51:36.753787 32551 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0822 18:53:52.315704 32551 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0822 18:55:12.010699 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58732\nI0822 18:55:12.011010 32551 solver.cpp:404]     Test net output #1: loss = 2.85054 (* 1 = 2.85054 loss)\nI0822 18:55:13.303076 32551 solver.cpp:228] Iteration 56900, loss = 0.000488266\nI0822 18:55:13.303119 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:55:13.303136 32551 solver.cpp:244]     Train net output #1: loss = 0.000488296 (* 1 = 0.000488296 loss)\nI0822 18:55:13.400475 32551 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0822 18:57:28.994987 32551 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0822 18:58:48.701282 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59064\nI0822 18:58:48.701541 32551 solver.cpp:404]     Test net output #1: loss = 2.77644 (* 1 = 2.77644 loss)\nI0822 18:58:49.993543 32551 solver.cpp:228] Iteration 57000, loss = 0.000524339\nI0822 18:58:49.993589 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:58:49.993607 32551 solver.cpp:244]     Train net output #1: loss = 0.000524369 (* 1 = 0.000524369 loss)\nI0822 18:58:50.088881 32551 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0822 19:01:05.758510 32551 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0822 19:02:25.468269 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5868\nI0822 19:02:25.468578 32551 solver.cpp:404]     Test net output #1: loss = 2.76954 (* 1 = 2.76954 loss)\nI0822 19:02:26.760895 32551 solver.cpp:228] Iteration 57100, loss = 0.00052412\nI0822 19:02:26.760937 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:02:26.760953 32551 solver.cpp:244]     Train net output #1: loss = 0.00052415 (* 1 = 0.00052415 loss)\nI0822 19:02:26.859843 32551 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0822 19:04:42.515786 32551 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0822 19:06:02.224990 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58612\nI0822 19:06:02.225286 32551 solver.cpp:404]     Test net output #1: loss = 2.76754 (* 1 = 2.76754 loss)\nI0822 19:06:03.517365 32551 solver.cpp:228] Iteration 57200, loss = 0.000442457\nI0822 19:06:03.517407 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:06:03.517423 32551 solver.cpp:244]     Train net output #1: loss = 0.000442487 (* 1 = 0.000442487 loss)\nI0822 19:06:03.613965 32551 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0822 19:08:19.221518 32551 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0822 19:09:38.923883 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57924\nI0822 19:09:38.924190 32551 solver.cpp:404]     Test net output #1: loss = 2.7879 (* 1 = 2.7879 loss)\nI0822 19:09:40.216645 32551 solver.cpp:228] Iteration 57300, loss = 0.000402266\nI0822 19:09:40.216686 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:09:40.216703 32551 solver.cpp:244]     Train net output #1: loss = 0.000402296 (* 1 = 0.000402296 loss)\nI0822 19:09:40.313822 32551 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0822 19:11:55.912408 32551 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0822 19:13:15.611986 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5788\nI0822 19:13:15.612274 32551 solver.cpp:404]     Test net output #1: loss = 2.79608 (* 1 = 2.79608 loss)\nI0822 19:13:16.905153 32551 solver.cpp:228] Iteration 57400, loss = 0.000411984\nI0822 19:13:16.905195 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:13:16.905211 32551 solver.cpp:244]     Train net output #1: loss = 0.000412014 (* 1 = 0.000412014 loss)\nI0822 19:13:17.003767 32551 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0822 19:15:32.748371 32551 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0822 19:16:52.452908 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5696\nI0822 19:16:52.453182 32551 solver.cpp:404]     Test net output #1: loss = 2.84262 (* 1 = 2.84262 loss)\nI0822 19:16:53.745620 32551 solver.cpp:228] Iteration 57500, loss = 0.000439913\nI0822 19:16:53.745662 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:16:53.745678 32551 solver.cpp:244]     Train net output #1: loss = 0.000439942 (* 1 = 0.000439942 loss)\nI0822 19:16:53.839810 32551 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0822 19:19:09.603184 32551 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0822 19:20:29.309437 32551 solver.cpp:404]     Test net output #0: accuracy = 0.57048\nI0822 19:20:29.309741 32551 solver.cpp:404]     Test net output #1: loss = 2.8438 (* 1 = 2.8438 loss)\nI0822 19:20:30.601683 32551 solver.cpp:228] Iteration 57600, loss = 0.000337603\nI0822 19:20:30.601727 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:20:30.601748 32551 solver.cpp:244]     Train net output #1: loss = 0.000337633 (* 1 = 0.000337633 loss)\nI0822 19:20:30.698699 32551 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0822 19:22:46.399768 32551 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0822 19:24:06.102164 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5622\nI0822 19:24:06.102471 32551 solver.cpp:404]     Test net output #1: loss = 2.90437 (* 1 = 2.90437 loss)\nI0822 19:24:07.394243 32551 solver.cpp:228] Iteration 57700, loss = 0.000356217\nI0822 19:24:07.394287 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:24:07.394304 32551 solver.cpp:244]     Train net output #1: loss = 0.000356247 (* 1 = 0.000356247 loss)\nI0822 19:24:07.490170 32551 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0822 19:26:23.315868 32551 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0822 19:27:43.002761 32551 solver.cpp:404]     Test net output #0: accuracy = 0.561\nI0822 19:27:43.003063 32551 solver.cpp:404]     Test net output #1: loss = 2.91094 (* 1 = 2.91094 loss)\nI0822 19:27:44.295079 32551 solver.cpp:228] Iteration 57800, loss = 0.000324029\nI0822 19:27:44.295122 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:27:44.295138 32551 solver.cpp:244]     Train net output #1: loss = 0.000324059 (* 1 = 0.000324059 loss)\nI0822 19:27:44.391644 32551 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0822 19:30:00.007690 32551 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0822 19:31:19.695560 32551 solver.cpp:404]     Test net output #0: accuracy = 0.55276\nI0822 19:31:19.695878 32551 solver.cpp:404]     Test net output #1: loss = 2.97725 (* 1 = 2.97725 loss)\nI0822 19:31:20.987866 32551 solver.cpp:228] Iteration 57900, loss = 0.00031718\nI0822 19:31:20.987911 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:31:20.987926 32551 solver.cpp:244]     Train net output #1: loss = 0.00031721 (* 1 = 0.00031721 loss)\nI0822 19:31:21.085564 32551 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0822 19:33:36.697172 32551 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0822 19:34:56.319437 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5514\nI0822 19:34:56.319710 32551 solver.cpp:404]     Test net output #1: loss = 2.9945 (* 1 = 2.9945 loss)\nI0822 19:34:57.611752 32551 solver.cpp:228] Iteration 58000, loss = 0.00034249\nI0822 19:34:57.611794 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:34:57.611811 32551 solver.cpp:244]     Train net output #1: loss = 0.00034252 (* 1 = 0.00034252 loss)\nI0822 19:34:57.705337 32551 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0822 19:37:13.343791 32551 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0822 19:38:32.978714 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54416\nI0822 19:38:32.978999 32551 solver.cpp:404]     Test net output #1: loss = 3.04933 (* 1 = 3.04933 loss)\nI0822 19:38:34.270849 32551 solver.cpp:228] Iteration 58100, loss = 0.000397714\nI0822 19:38:34.270894 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:38:34.270910 32551 solver.cpp:244]     Train net output #1: loss = 0.000397744 (* 1 = 0.000397744 loss)\nI0822 19:38:34.368139 32551 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0822 19:40:50.256762 32551 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0822 19:42:09.899102 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54488\nI0822 19:42:09.899368 32551 solver.cpp:404]     Test net output #1: loss = 3.04129 (* 1 = 3.04129 loss)\nI0822 19:42:11.191282 32551 solver.cpp:228] Iteration 58200, loss = 0.000346317\nI0822 19:42:11.191326 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:42:11.191342 32551 solver.cpp:244]     Train net output #1: loss = 0.000346346 (* 1 = 0.000346346 loss)\nI0822 19:42:11.287227 32551 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0822 19:44:26.948885 32551 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0822 19:45:46.590801 32551 solver.cpp:404]     Test net output #0: accuracy = 0.53672\nI0822 19:45:46.591100 32551 solver.cpp:404]     Test net output #1: loss = 3.1079 (* 1 = 3.1079 loss)\nI0822 19:45:47.883858 32551 solver.cpp:228] Iteration 58300, loss = 0.000415977\nI0822 19:45:47.883904 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:45:47.883920 32551 solver.cpp:244]     Train net output #1: loss = 0.000416006 (* 1 = 0.000416006 loss)\nI0822 19:45:47.979385 32551 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0822 19:48:03.588446 32551 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0822 19:49:23.228499 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5362\nI0822 19:49:23.228806 32551 solver.cpp:404]     Test net output #1: loss = 3.12685 (* 1 = 3.12685 loss)\nI0822 19:49:24.520555 32551 solver.cpp:228] Iteration 58400, loss = 0.000415499\nI0822 19:49:24.520601 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:49:24.520617 32551 solver.cpp:244]     Train net output #1: loss = 0.000415528 (* 1 = 0.000415528 loss)\nI0822 19:49:24.611624 32551 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0822 19:51:40.454499 32551 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0822 19:53:00.097416 32551 solver.cpp:404]     Test net output #0: accuracy = 0.52976\nI0822 19:53:00.097718 32551 solver.cpp:404]     Test net output #1: loss = 3.1707 (* 1 = 3.1707 loss)\nI0822 19:53:01.390736 32551 solver.cpp:228] Iteration 58500, loss = 0.00025459\nI0822 19:53:01.390781 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:53:01.390797 32551 solver.cpp:244]     Train net output #1: loss = 0.00025462 (* 1 = 0.00025462 loss)\nI0822 19:53:01.483191 32551 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0822 19:55:17.294581 32551 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0822 19:56:36.929879 32551 solver.cpp:404]     Test net output #0: accuracy = 0.53068\nI0822 19:56:36.930250 32551 solver.cpp:404]     Test net output #1: loss = 3.15934 (* 1 = 3.15934 loss)\nI0822 19:56:38.222775 32551 solver.cpp:228] Iteration 58600, loss = 0.000411964\nI0822 19:56:38.222822 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:56:38.222838 32551 solver.cpp:244]     Train net output #1: loss = 0.000411994 (* 1 = 0.000411994 loss)\nI0822 19:56:38.323073 32551 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0822 19:58:54.134029 32551 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0822 20:00:13.767798 32551 solver.cpp:404]     Test net output #0: accuracy = 0.52316\nI0822 20:00:13.768061 32551 solver.cpp:404]     Test net output #1: loss = 3.22075 (* 1 = 3.22075 loss)\nI0822 20:00:15.060374 32551 solver.cpp:228] Iteration 58700, loss = 0.000298372\nI0822 20:00:15.060420 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:00:15.060436 32551 solver.cpp:244]     Train net output #1: loss = 0.000298402 (* 1 = 0.000298402 loss)\nI0822 20:00:15.153929 32551 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0822 20:02:30.867633 32551 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0822 20:03:50.511081 32551 solver.cpp:404]     Test net output #0: accuracy = 0.52492\nI0822 20:03:50.511384 32551 solver.cpp:404]     Test net output #1: loss = 3.20737 (* 1 = 3.20737 loss)\nI0822 20:03:51.804445 32551 solver.cpp:228] Iteration 58800, loss = 0.000351873\nI0822 20:03:51.804491 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:03:51.804507 32551 solver.cpp:244]     Train net output #1: loss = 0.000351903 (* 1 = 0.000351903 loss)\nI0822 20:03:51.895676 32551 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0822 20:06:07.667641 32551 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0822 20:07:27.332015 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51732\nI0822 20:07:27.332314 32551 solver.cpp:404]     Test net output #1: loss = 3.25303 (* 1 = 3.25303 loss)\nI0822 20:07:28.625100 32551 solver.cpp:228] Iteration 58900, loss = 0.000350426\nI0822 20:07:28.625146 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:07:28.625162 32551 solver.cpp:244]     Train net output #1: loss = 0.000350456 (* 1 = 0.000350456 loss)\nI0822 20:07:28.719244 32551 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0822 20:09:44.367544 32551 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0822 20:11:04.077237 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51972\nI0822 20:11:04.077528 32551 solver.cpp:404]     Test net output #1: loss = 3.2349 (* 1 = 3.2349 loss)\nI0822 20:11:05.369668 32551 solver.cpp:228] Iteration 59000, loss = 0.000387263\nI0822 20:11:05.369714 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:11:05.369730 32551 solver.cpp:244]     Train net output #1: loss = 0.000387292 (* 1 = 0.000387292 loss)\nI0822 20:11:05.466820 32551 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0822 20:13:21.461266 32551 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0822 20:14:41.164263 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51412\nI0822 20:14:41.164547 32551 solver.cpp:404]     Test net output #1: loss = 3.2845 (* 1 = 3.2845 loss)\nI0822 20:14:42.457832 32551 solver.cpp:228] Iteration 59100, loss = 0.000250327\nI0822 20:14:42.457880 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:14:42.457895 32551 solver.cpp:244]     Train net output #1: loss = 0.000250357 (* 1 = 0.000250357 loss)\nI0822 20:14:42.551471 32551 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0822 20:16:58.198410 32551 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0822 20:18:17.898917 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51636\nI0822 20:18:17.899181 32551 solver.cpp:404]     Test net output #1: loss = 3.27876 (* 1 = 3.27876 loss)\nI0822 20:18:19.188594 32551 solver.cpp:228] Iteration 59200, loss = 0.000333779\nI0822 20:18:19.188640 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:18:19.188657 32551 solver.cpp:244]     Train net output #1: loss = 0.000333809 (* 1 = 0.000333809 loss)\nI0822 20:18:19.289516 32551 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0822 20:20:35.073359 32551 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0822 20:21:54.785387 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50896\nI0822 20:21:54.785681 32551 solver.cpp:404]     Test net output #1: loss = 3.32762 (* 1 = 3.32762 loss)\nI0822 20:21:56.078169 32551 solver.cpp:228] Iteration 59300, loss = 0.000375562\nI0822 20:21:56.078217 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:21:56.078233 32551 solver.cpp:244]     Train net output #1: loss = 0.000375592 (* 1 = 0.000375592 loss)\nI0822 20:21:56.173282 32551 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0822 20:24:11.941365 32551 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0822 20:25:31.648380 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51252\nI0822 20:25:31.648684 32551 solver.cpp:404]     Test net output #1: loss = 3.31031 (* 1 = 3.31031 loss)\nI0822 20:25:32.940819 32551 solver.cpp:228] Iteration 59400, loss = 0.000322887\nI0822 20:25:32.940865 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:25:32.940881 32551 solver.cpp:244]     Train net output #1: loss = 0.000322917 (* 1 = 0.000322917 loss)\nI0822 20:25:33.039523 32551 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0822 20:27:48.752127 32551 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0822 20:29:08.462091 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5058\nI0822 20:29:08.462393 32551 solver.cpp:404]     Test net output #1: loss = 3.35815 (* 1 = 3.35815 loss)\nI0822 20:29:09.754940 32551 solver.cpp:228] Iteration 59500, loss = 0.000271135\nI0822 20:29:09.754988 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:29:09.755004 32551 solver.cpp:244]     Train net output #1: loss = 0.000271165 (* 1 = 0.000271165 loss)\nI0822 20:29:09.852967 32551 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0822 20:31:25.531472 32551 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0822 20:32:45.239339 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50892\nI0822 20:32:45.239641 32551 solver.cpp:404]     Test net output #1: loss = 3.33745 (* 1 = 3.33745 loss)\nI0822 20:32:46.531306 32551 solver.cpp:228] Iteration 59600, loss = 0.00033354\nI0822 20:32:46.531349 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:32:46.531365 32551 solver.cpp:244]     Train net output #1: loss = 0.00033357 (* 1 = 0.00033357 loss)\nI0822 20:32:46.631387 32551 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0822 20:35:02.280496 32551 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0822 20:36:21.983868 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50416\nI0822 20:36:21.984172 32551 solver.cpp:404]     Test net output #1: loss = 3.3674 (* 1 = 3.3674 loss)\nI0822 20:36:23.276494 32551 solver.cpp:228] Iteration 59700, loss = 0.000399175\nI0822 20:36:23.276535 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:36:23.276552 32551 solver.cpp:244]     Train net output #1: loss = 0.000399205 (* 1 = 0.000399205 loss)\nI0822 20:36:23.372908 32551 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0822 20:38:39.239843 32551 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0822 20:39:58.953694 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50688\nI0822 20:39:58.954020 32551 solver.cpp:404]     Test net output #1: loss = 3.36457 (* 1 = 3.36457 loss)\nI0822 20:40:00.246379 32551 solver.cpp:228] Iteration 59800, loss = 0.000375301\nI0822 20:40:00.246425 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:40:00.246443 32551 solver.cpp:244]     Train net output #1: loss = 0.000375331 (* 1 = 0.000375331 loss)\nI0822 20:40:00.341575 32551 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0822 20:42:15.880555 32551 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0822 20:43:35.594224 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50092\nI0822 20:43:35.594521 32551 solver.cpp:404]     Test net output #1: loss = 3.4019 (* 1 = 3.4019 loss)\nI0822 20:43:36.886531 32551 solver.cpp:228] Iteration 59900, loss = 0.000320698\nI0822 20:43:36.886577 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:43:36.886592 32551 solver.cpp:244]     Train net output #1: loss = 0.000320728 (* 1 = 0.000320728 loss)\nI0822 20:43:36.993922 32551 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0822 20:45:52.850411 32551 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0822 20:47:12.555119 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50504\nI0822 20:47:12.555421 32551 solver.cpp:404]     Test net output #1: loss = 3.37546 (* 1 = 3.37546 loss)\nI0822 20:47:13.848459 32551 solver.cpp:228] Iteration 60000, loss = 0.000371782\nI0822 20:47:13.848503 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:47:13.848518 32551 solver.cpp:244]     Train net output #1: loss = 0.000371812 (* 1 = 0.000371812 loss)\nI0822 20:47:13.943825 32551 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0822 20:49:29.464097 32551 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0822 20:50:49.158620 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49944\nI0822 20:50:49.158917 32551 solver.cpp:404]     Test net output #1: loss = 3.3974 (* 1 = 3.3974 loss)\nI0822 20:50:50.451568 32551 solver.cpp:228] Iteration 60100, loss = 0.000414484\nI0822 20:50:50.451611 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:50:50.451627 32551 solver.cpp:244]     Train net output #1: loss = 0.000414514 (* 1 = 0.000414514 loss)\nI0822 20:50:50.547252 32551 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0822 20:53:06.075417 32551 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0822 20:54:25.769325 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5018\nI0822 20:54:25.769587 32551 solver.cpp:404]     Test net output #1: loss = 3.39731 (* 1 = 3.39731 loss)\nI0822 20:54:27.062943 32551 solver.cpp:228] Iteration 60200, loss = 0.000385954\nI0822 20:54:27.062988 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:54:27.063004 32551 solver.cpp:244]     Train net output #1: loss = 0.000385984 (* 1 = 0.000385984 loss)\nI0822 20:54:27.154644 32551 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0822 20:56:42.664108 32551 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0822 20:58:02.360811 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49936\nI0822 20:58:02.361115 32551 solver.cpp:404]     Test net output #1: loss = 3.40182 (* 1 = 3.40182 loss)\nI0822 20:58:03.653337 32551 solver.cpp:228] Iteration 60300, loss = 0.000296583\nI0822 20:58:03.653383 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:58:03.653399 32551 solver.cpp:244]     Train net output #1: loss = 0.000296612 (* 1 = 0.000296612 loss)\nI0822 20:58:03.743962 32551 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0822 21:00:19.236654 32551 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0822 21:01:38.929641 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50224\nI0822 21:01:38.929934 32551 solver.cpp:404]     Test net output #1: loss = 3.38426 (* 1 = 3.38426 loss)\nI0822 21:01:40.221276 32551 solver.cpp:228] Iteration 60400, loss = 0.00035026\nI0822 21:01:40.221318 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:01:40.221334 32551 solver.cpp:244]     Train net output #1: loss = 0.00035029 (* 1 = 0.00035029 loss)\nI0822 21:01:40.312589 32551 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0822 21:03:55.864239 32551 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0822 21:05:15.550307 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49836\nI0822 21:05:15.550595 32551 solver.cpp:404]     Test net output #1: loss = 3.39657 (* 1 = 3.39657 loss)\nI0822 21:05:16.843369 32551 solver.cpp:228] Iteration 60500, loss = 0.000392205\nI0822 21:05:16.843410 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:05:16.843426 32551 solver.cpp:244]     Train net output #1: loss = 0.000392235 (* 1 = 0.000392235 loss)\nI0822 21:05:16.941893 32551 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0822 21:07:32.466264 32551 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0822 21:08:52.147927 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5022\nI0822 21:08:52.148216 32551 solver.cpp:404]     Test net output #1: loss = 3.36482 (* 1 = 3.36482 loss)\nI0822 21:08:53.440863 32551 solver.cpp:228] Iteration 60600, loss = 0.000340484\nI0822 21:08:53.440908 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:08:53.440924 32551 solver.cpp:244]     Train net output #1: loss = 0.000340514 (* 1 = 0.000340514 loss)\nI0822 21:08:53.536113 32551 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0822 21:11:09.101728 32551 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0822 21:12:28.796805 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49472\nI0822 21:12:28.797111 32551 solver.cpp:404]     Test net output #1: loss = 3.4112 (* 1 = 3.4112 loss)\nI0822 21:12:30.090450 32551 solver.cpp:228] Iteration 60700, loss = 0.000310378\nI0822 21:12:30.090492 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:12:30.090508 32551 solver.cpp:244]     Train net output #1: loss = 0.000310407 (* 1 = 0.000310407 loss)\nI0822 21:12:30.177155 32551 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0822 21:14:45.618165 32551 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0822 21:16:05.305248 32551 solver.cpp:404]     Test net output #0: accuracy = 0.50148\nI0822 21:16:05.305550 32551 solver.cpp:404]     Test net output #1: loss = 3.35357 (* 1 = 3.35357 loss)\nI0822 21:16:06.599066 32551 solver.cpp:228] Iteration 60800, loss = 0.000389091\nI0822 21:16:06.599110 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:16:06.599125 32551 solver.cpp:244]     Train net output #1: loss = 0.000389121 (* 1 = 0.000389121 loss)\nI0822 21:16:06.690444 32551 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0822 21:18:22.255435 32551 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0822 21:19:41.965474 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49644\nI0822 21:19:41.965795 32551 solver.cpp:404]     Test net output #1: loss = 3.37056 (* 1 = 3.37056 loss)\nI0822 21:19:43.258745 32551 solver.cpp:228] Iteration 60900, loss = 0.000358383\nI0822 21:19:43.258788 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:19:43.258805 32551 solver.cpp:244]     Train net output #1: loss = 0.000358413 (* 1 = 0.000358413 loss)\nI0822 21:19:43.356256 32551 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0822 21:21:58.917845 32551 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0822 21:23:18.619756 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5006\nI0822 21:23:18.620080 32551 solver.cpp:404]     Test net output #1: loss = 3.35399 (* 1 = 3.35399 loss)\nI0822 21:23:19.912987 32551 solver.cpp:228] Iteration 61000, loss = 0.000426091\nI0822 21:23:19.913033 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:23:19.913049 32551 solver.cpp:244]     Train net output #1: loss = 0.000426121 (* 1 = 0.000426121 loss)\nI0822 21:23:20.004321 32551 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0822 21:25:35.543481 32551 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0822 21:26:55.243053 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49636\nI0822 21:26:55.243329 32551 solver.cpp:404]     Test net output #1: loss = 3.37117 (* 1 = 3.37117 loss)\nI0822 21:26:56.535799 32551 solver.cpp:228] Iteration 61100, loss = 0.000458088\nI0822 21:26:56.535842 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:26:56.535858 32551 solver.cpp:244]     Train net output #1: loss = 0.000458117 (* 1 = 0.000458117 loss)\nI0822 21:26:56.626849 32551 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0822 21:29:12.321722 32551 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0822 21:30:32.030707 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49776\nI0822 21:30:32.030998 32551 solver.cpp:404]     Test net output #1: loss = 3.37483 (* 1 = 3.37483 loss)\nI0822 21:30:33.323751 32551 solver.cpp:228] Iteration 61200, loss = 0.000338669\nI0822 21:30:33.323796 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:30:33.323812 32551 solver.cpp:244]     Train net output #1: loss = 0.000338698 (* 1 = 0.000338698 loss)\nI0822 21:30:33.420779 32551 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0822 21:32:48.935878 32551 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0822 21:34:08.633209 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49276\nI0822 21:34:08.633476 32551 solver.cpp:404]     Test net output #1: loss = 3.39321 (* 1 = 3.39321 loss)\nI0822 21:34:09.925987 32551 solver.cpp:228] Iteration 61300, loss = 0.000476719\nI0822 21:34:09.926034 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:34:09.926050 32551 solver.cpp:244]     Train net output #1: loss = 0.000476749 (* 1 = 0.000476749 loss)\nI0822 21:34:10.023073 32551 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0822 21:36:25.597772 32551 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0822 21:37:45.284236 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49808\nI0822 21:37:45.284543 32551 solver.cpp:404]     Test net output #1: loss = 3.35146 (* 1 = 3.35146 loss)\nI0822 21:37:46.577306 32551 solver.cpp:228] Iteration 61400, loss = 0.00030385\nI0822 21:37:46.577348 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:37:46.577363 32551 solver.cpp:244]     Train net output #1: loss = 0.00030388 (* 1 = 0.00030388 loss)\nI0822 21:37:46.667364 32551 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0822 21:40:02.241747 32551 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0822 21:41:21.938797 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49496\nI0822 21:41:21.939079 32551 solver.cpp:404]     Test net output #1: loss = 3.36334 (* 1 = 3.36334 loss)\nI0822 21:41:23.231906 32551 solver.cpp:228] Iteration 61500, loss = 0.000440233\nI0822 21:41:23.231950 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:41:23.231966 32551 solver.cpp:244]     Train net output #1: loss = 0.000440263 (* 1 = 0.000440263 loss)\nI0822 21:41:23.326928 32551 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0822 21:43:38.912238 32551 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0822 21:44:58.610306 32551 solver.cpp:404]     Test net output #0: accuracy = 0.498\nI0822 21:44:58.610574 32551 solver.cpp:404]     Test net output #1: loss = 3.34606 (* 1 = 3.34606 loss)\nI0822 21:44:59.903167 32551 solver.cpp:228] Iteration 61600, loss = 0.000341816\nI0822 21:44:59.903213 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:44:59.903228 32551 solver.cpp:244]     Train net output #1: loss = 0.000341846 (* 1 = 0.000341846 loss)\nI0822 21:45:00.000933 32551 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0822 21:47:15.618731 32551 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0822 21:48:35.310021 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49424\nI0822 21:48:35.310310 32551 solver.cpp:404]     Test net output #1: loss = 3.35857 (* 1 = 3.35857 loss)\nI0822 21:48:36.603768 32551 solver.cpp:228] Iteration 61700, loss = 0.000308111\nI0822 21:48:36.603813 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:48:36.603828 32551 solver.cpp:244]     Train net output #1: loss = 0.000308141 (* 1 = 0.000308141 loss)\nI0822 21:48:36.692358 32551 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0822 21:50:52.267278 32551 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0822 21:52:11.903264 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49672\nI0822 21:52:11.903532 32551 solver.cpp:404]     Test net output #1: loss = 3.34867 (* 1 = 3.34867 loss)\nI0822 21:52:13.196681 32551 solver.cpp:228] Iteration 61800, loss = 0.000346034\nI0822 21:52:13.196727 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:52:13.196743 32551 solver.cpp:244]     Train net output #1: loss = 0.000346064 (* 1 = 0.000346064 loss)\nI0822 21:52:13.296852 32551 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0822 21:54:28.954915 32551 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0822 21:55:48.591699 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4908\nI0822 21:55:48.592033 32551 solver.cpp:404]     Test net output #1: loss = 3.38172 (* 1 = 3.38172 loss)\nI0822 21:55:49.885115 32551 solver.cpp:228] Iteration 61900, loss = 0.000353148\nI0822 21:55:49.885160 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:55:49.885176 32551 solver.cpp:244]     Train net output #1: loss = 0.000353178 (* 1 = 0.000353178 loss)\nI0822 21:55:49.974591 32551 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0822 21:58:05.606981 32551 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0822 21:59:25.239133 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4946\nI0822 21:59:25.239408 32551 solver.cpp:404]     Test net output #1: loss = 3.36113 (* 1 = 3.36113 loss)\nI0822 21:59:26.531755 32551 solver.cpp:228] Iteration 62000, loss = 0.000464055\nI0822 21:59:26.531800 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:59:26.531816 32551 solver.cpp:244]     Train net output #1: loss = 0.000464085 (* 1 = 0.000464085 loss)\nI0822 21:59:26.630529 32551 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0822 22:01:42.325745 32551 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0822 22:03:01.953202 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49052\nI0822 22:03:01.953459 32551 solver.cpp:404]     Test net output #1: loss = 3.37952 (* 1 = 3.37952 loss)\nI0822 22:03:03.244977 32551 solver.cpp:228] Iteration 62100, loss = 0.000402162\nI0822 22:03:03.245023 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:03:03.245038 32551 solver.cpp:244]     Train net output #1: loss = 0.000402192 (* 1 = 0.000402192 loss)\nI0822 22:03:03.345248 32551 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0822 22:05:19.019363 32551 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0822 22:06:38.639257 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4952\nI0822 22:06:38.639521 32551 solver.cpp:404]     Test net output #1: loss = 3.34701 (* 1 = 3.34701 loss)\nI0822 22:06:39.932044 32551 solver.cpp:228] Iteration 62200, loss = 0.000358604\nI0822 22:06:39.932088 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:06:39.932103 32551 solver.cpp:244]     Train net output #1: loss = 0.000358634 (* 1 = 0.000358634 loss)\nI0822 22:06:40.020383 32551 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0822 22:08:55.588618 32551 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0822 22:10:15.211849 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4912\nI0822 22:10:15.212111 32551 solver.cpp:404]     Test net output #1: loss = 3.35885 (* 1 = 3.35885 loss)\nI0822 22:10:16.504652 32551 solver.cpp:228] Iteration 62300, loss = 0.000285444\nI0822 22:10:16.504696 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:10:16.504712 32551 solver.cpp:244]     Train net output #1: loss = 0.000285473 (* 1 = 0.000285473 loss)\nI0822 22:10:16.593214 32551 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0822 22:12:32.183703 32551 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0822 22:13:51.806660 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49772\nI0822 22:13:51.806936 32551 solver.cpp:404]     Test net output #1: loss = 3.31162 (* 1 = 3.31162 loss)\nI0822 22:13:53.098820 32551 solver.cpp:228] Iteration 62400, loss = 0.000308329\nI0822 22:13:53.098865 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:13:53.098881 32551 solver.cpp:244]     Train net output #1: loss = 0.000308359 (* 1 = 0.000308359 loss)\nI0822 22:13:53.195915 32551 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0822 22:16:08.809559 32551 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0822 22:17:28.438175 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49304\nI0822 22:17:28.438488 32551 solver.cpp:404]     Test net output #1: loss = 3.32508 (* 1 = 3.32508 loss)\nI0822 22:17:29.730232 32551 solver.cpp:228] Iteration 62500, loss = 0.000354911\nI0822 22:17:29.730278 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:17:29.730293 32551 solver.cpp:244]     Train net output #1: loss = 0.000354941 (* 1 = 0.000354941 loss)\nI0822 22:17:29.824159 32551 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0822 22:19:45.466248 32551 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0822 22:21:05.094357 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4964\nI0822 22:21:05.094666 32551 solver.cpp:404]     Test net output #1: loss = 3.31022 (* 1 = 3.31022 loss)\nI0822 22:21:06.386678 32551 solver.cpp:228] Iteration 62600, loss = 0.000371792\nI0822 22:21:06.386723 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:21:06.386739 32551 solver.cpp:244]     Train net output #1: loss = 0.000371822 (* 1 = 0.000371822 loss)\nI0822 22:21:06.485328 32551 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0822 22:23:22.120293 32551 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0822 22:24:41.812964 32551 solver.cpp:404]     Test net output #0: accuracy = 0.48924\nI0822 22:24:41.813201 32551 solver.cpp:404]     Test net output #1: loss = 3.3671 (* 1 = 3.3671 loss)\nI0822 22:24:43.104854 32551 solver.cpp:228] Iteration 62700, loss = 0.000323196\nI0822 22:24:43.104898 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:24:43.104915 32551 solver.cpp:244]     Train net output #1: loss = 0.000323226 (* 1 = 0.000323226 loss)\nI0822 22:24:43.205153 32551 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0822 22:26:58.860952 32551 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0822 22:28:18.562970 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4942\nI0822 22:28:18.563277 32551 solver.cpp:404]     Test net output #1: loss = 3.33135 (* 1 = 3.33135 loss)\nI0822 22:28:19.854887 32551 solver.cpp:228] Iteration 62800, loss = 0.000379713\nI0822 22:28:19.854933 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:28:19.854949 32551 solver.cpp:244]     Train net output #1: loss = 0.000379743 (* 1 = 0.000379743 loss)\nI0822 22:28:19.956809 32551 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0822 22:30:35.632748 32551 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0822 22:31:55.345294 32551 solver.cpp:404]     Test net output #0: accuracy = 0.48932\nI0822 22:31:55.345607 32551 solver.cpp:404]     Test net output #1: loss = 3.34631 (* 1 = 3.34631 loss)\nI0822 22:31:56.639160 32551 solver.cpp:228] Iteration 62900, loss = 0.000265089\nI0822 22:31:56.639210 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:31:56.639235 32551 solver.cpp:244]     Train net output #1: loss = 0.000265119 (* 1 = 0.000265119 loss)\nI0822 22:31:56.734606 32551 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0822 22:34:12.347823 32551 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0822 22:35:32.067885 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49628\nI0822 22:35:32.068186 32551 solver.cpp:404]     Test net output #1: loss = 3.29821 (* 1 = 3.29821 loss)\nI0822 22:35:33.361857 32551 solver.cpp:228] Iteration 63000, loss = 0.000283402\nI0822 22:35:33.361907 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:35:33.361930 32551 solver.cpp:244]     Train net output #1: loss = 0.000283432 (* 1 = 0.000283432 loss)\nI0822 22:35:33.454396 32551 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0822 22:37:49.187031 32551 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0822 22:39:08.889328 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49144\nI0822 22:39:08.889600 32551 solver.cpp:404]     Test net output #1: loss = 3.32391 (* 1 = 3.32391 loss)\nI0822 22:39:10.183580 32551 solver.cpp:228] Iteration 63100, loss = 0.000307821\nI0822 22:39:10.183630 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:39:10.183655 32551 solver.cpp:244]     Train net output #1: loss = 0.00030785 (* 1 = 0.00030785 loss)\nI0822 22:39:10.276268 32551 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0822 22:41:26.193395 32551 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0822 22:42:45.893957 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49564\nI0822 22:42:45.894224 32551 solver.cpp:404]     Test net output #1: loss = 3.28658 (* 1 = 3.28658 loss)\nI0822 22:42:47.188107 32551 solver.cpp:228] Iteration 63200, loss = 0.000323837\nI0822 22:42:47.188153 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:42:47.188170 32551 solver.cpp:244]     Train net output #1: loss = 0.000323867 (* 1 = 0.000323867 loss)\nI0822 22:42:47.283479 32551 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0822 22:45:02.893453 32551 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0822 22:46:22.622637 32551 solver.cpp:404]     Test net output #0: accuracy = 0.48864\nI0822 22:46:22.622926 32551 solver.cpp:404]     Test net output #1: loss = 3.33337 (* 1 = 3.33337 loss)\nI0822 22:46:23.916862 32551 solver.cpp:228] Iteration 63300, loss = 0.000324926\nI0822 22:46:23.916908 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:46:23.916923 32551 solver.cpp:244]     Train net output #1: loss = 0.000324956 (* 1 = 0.000324956 loss)\nI0822 22:46:24.011348 32551 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0822 22:48:39.629956 32551 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0822 22:49:59.334867 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49564\nI0822 22:49:59.335139 32551 solver.cpp:404]     Test net output #1: loss = 3.27244 (* 1 = 3.27244 loss)\nI0822 22:50:00.630326 32551 solver.cpp:228] Iteration 63400, loss = 0.000323871\nI0822 22:50:00.630373 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:50:00.630388 32551 solver.cpp:244]     Train net output #1: loss = 0.000323901 (* 1 = 0.000323901 loss)\nI0822 22:50:00.725875 32551 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0822 22:52:16.641497 32551 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0822 22:53:36.346554 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49004\nI0822 22:53:36.346848 32551 solver.cpp:404]     Test net output #1: loss = 3.30829 (* 1 = 3.30829 loss)\nI0822 22:53:37.639951 32551 solver.cpp:228] Iteration 63500, loss = 0.000350051\nI0822 22:53:37.639997 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:53:37.640012 32551 solver.cpp:244]     Train net output #1: loss = 0.000350081 (* 1 = 0.000350081 loss)\nI0822 22:53:37.734030 32551 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0822 22:55:53.502056 32551 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0822 22:57:13.206591 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49604\nI0822 22:57:13.206872 32551 solver.cpp:404]     Test net output #1: loss = 3.26782 (* 1 = 3.26782 loss)\nI0822 22:57:14.496078 32551 solver.cpp:228] Iteration 63600, loss = 0.000298994\nI0822 22:57:14.496124 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:57:14.496140 32551 solver.cpp:244]     Train net output #1: loss = 0.000299024 (* 1 = 0.000299024 loss)\nI0822 22:57:14.600816 32551 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0822 22:59:30.171702 32551 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0822 23:00:49.880739 32551 solver.cpp:404]     Test net output #0: accuracy = 0.48956\nI0822 23:00:49.881062 32551 solver.cpp:404]     Test net output #1: loss = 3.30247 (* 1 = 3.30247 loss)\nI0822 23:00:51.170334 32551 solver.cpp:228] Iteration 63700, loss = 0.000322036\nI0822 23:00:51.170380 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:00:51.170395 32551 solver.cpp:244]     Train net output #1: loss = 0.000322066 (* 1 = 0.000322066 loss)\nI0822 23:00:51.267099 32551 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0822 23:03:06.896315 32551 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0822 23:04:26.594027 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49576\nI0822 23:04:26.594327 32551 solver.cpp:404]     Test net output #1: loss = 3.26163 (* 1 = 3.26163 loss)\nI0822 23:04:27.883962 32551 solver.cpp:228] Iteration 63800, loss = 0.000400375\nI0822 23:04:27.884008 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:04:27.884026 32551 solver.cpp:244]     Train net output #1: loss = 0.000400404 (* 1 = 0.000400404 loss)\nI0822 23:04:27.984351 32551 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0822 23:06:43.831917 32551 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0822 23:08:03.536298 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49172\nI0822 23:08:03.536592 32551 solver.cpp:404]     Test net output #1: loss = 3.29243 (* 1 = 3.29243 loss)\nI0822 23:08:04.825888 32551 solver.cpp:228] Iteration 63900, loss = 0.000356747\nI0822 23:08:04.825934 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:08:04.825950 32551 solver.cpp:244]     Train net output #1: loss = 0.000356777 (* 1 = 0.000356777 loss)\nI0822 23:08:04.920567 32551 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0822 23:10:20.608930 32551 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0822 23:11:40.316325 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49536\nI0822 23:11:40.316644 32551 solver.cpp:404]     Test net output #1: loss = 3.26406 (* 1 = 3.26406 loss)\nI0822 23:11:41.606030 32551 solver.cpp:228] Iteration 64000, loss = 0.000269763\nI0822 23:11:41.606076 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:11:41.606093 32551 solver.cpp:244]     Train net output #1: loss = 0.000269793 (* 1 = 0.000269793 loss)\nI0822 23:11:41.707587 32551 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0822 23:13:57.265446 32551 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0822 23:15:16.977324 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49328\nI0822 23:15:16.977598 32551 solver.cpp:404]     Test net output #1: loss = 3.2698 (* 1 = 3.2698 loss)\nI0822 23:15:18.267421 32551 solver.cpp:228] Iteration 64100, loss = 0.000325472\nI0822 23:15:18.267465 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:15:18.267482 32551 solver.cpp:244]     Train net output #1: loss = 0.000325502 (* 1 = 0.000325502 loss)\nI0822 23:15:18.362818 32551 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0822 23:17:33.909056 32551 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0822 23:18:53.615898 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49544\nI0822 23:18:53.616183 32551 solver.cpp:404]     Test net output #1: loss = 3.25456 (* 1 = 3.25456 loss)\nI0822 23:18:54.905796 32551 solver.cpp:228] Iteration 64200, loss = 0.00037816\nI0822 23:18:54.905840 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:18:54.905855 32551 solver.cpp:244]     Train net output #1: loss = 0.00037819 (* 1 = 0.00037819 loss)\nI0822 23:18:55.007144 32551 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0822 23:21:10.777446 32551 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0822 23:22:30.485097 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49184\nI0822 23:22:30.485409 32551 solver.cpp:404]     Test net output #1: loss = 3.27994 (* 1 = 3.27994 loss)\nI0822 23:22:31.774420 32551 solver.cpp:228] Iteration 64300, loss = 0.000433363\nI0822 23:22:31.774464 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:22:31.774480 32551 solver.cpp:244]     Train net output #1: loss = 0.000433393 (* 1 = 0.000433393 loss)\nI0822 23:22:31.870350 32551 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0822 23:24:47.417309 32551 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0822 23:26:07.120303 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49836\nI0822 23:26:07.120615 32551 solver.cpp:404]     Test net output #1: loss = 3.22575 (* 1 = 3.22575 loss)\nI0822 23:26:08.410610 32551 solver.cpp:228] Iteration 64400, loss = 0.000359831\nI0822 23:26:08.410655 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:26:08.410671 32551 solver.cpp:244]     Train net output #1: loss = 0.000359861 (* 1 = 0.000359861 loss)\nI0822 23:26:08.510202 32551 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0822 23:28:23.913472 32551 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0822 23:29:43.621878 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49312\nI0822 23:29:43.622191 32551 solver.cpp:404]     Test net output #1: loss = 3.25937 (* 1 = 3.25937 loss)\nI0822 23:29:44.911557 32551 solver.cpp:228] Iteration 64500, loss = 0.000319581\nI0822 23:29:44.911602 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:29:44.911618 32551 solver.cpp:244]     Train net output #1: loss = 0.000319611 (* 1 = 0.000319611 loss)\nI0822 23:29:45.005889 32551 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0822 23:32:00.629282 32551 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0822 23:33:20.350317 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49596\nI0822 23:33:20.350633 32551 solver.cpp:404]     Test net output #1: loss = 3.24088 (* 1 = 3.24088 loss)\nI0822 23:33:21.640707 32551 solver.cpp:228] Iteration 64600, loss = 0.000411731\nI0822 23:33:21.640750 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:33:21.640766 32551 solver.cpp:244]     Train net output #1: loss = 0.000411761 (* 1 = 0.000411761 loss)\nI0822 23:33:21.735702 32551 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0822 23:35:37.207432 32551 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0822 23:36:56.920050 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49208\nI0822 23:36:56.920305 32551 solver.cpp:404]     Test net output #1: loss = 3.25767 (* 1 = 3.25767 loss)\nI0822 23:36:58.209805 32551 solver.cpp:228] Iteration 64700, loss = 0.000343866\nI0822 23:36:58.209849 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:36:58.209866 32551 solver.cpp:244]     Train net output #1: loss = 0.000343896 (* 1 = 0.000343896 loss)\nI0822 23:36:58.303262 32551 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0822 23:39:13.971199 32551 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0822 23:40:33.674458 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49764\nI0822 23:40:33.674726 32551 solver.cpp:404]     Test net output #1: loss = 3.21579 (* 1 = 3.21579 loss)\nI0822 23:40:34.965458 32551 solver.cpp:228] Iteration 64800, loss = 0.000349863\nI0822 23:40:34.965503 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:40:34.965518 32551 solver.cpp:244]     Train net output #1: loss = 0.000349892 (* 1 = 0.000349892 loss)\nI0822 23:40:35.057140 32551 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0822 23:42:50.926204 32551 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0822 23:44:10.630834 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49392\nI0822 23:44:10.631098 32551 solver.cpp:404]     Test net output #1: loss = 3.22753 (* 1 = 3.22753 loss)\nI0822 23:44:11.921680 32551 solver.cpp:228] Iteration 64900, loss = 0.000351815\nI0822 23:44:11.921725 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:44:11.921741 32551 solver.cpp:244]     Train net output #1: loss = 0.000351845 (* 1 = 0.000351845 loss)\nI0822 23:44:12.019565 32551 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0822 23:46:27.653628 32551 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0822 23:47:47.367020 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49644\nI0822 23:47:47.367292 32551 solver.cpp:404]     Test net output #1: loss = 3.22237 (* 1 = 3.22237 loss)\nI0822 23:47:48.660601 32551 solver.cpp:228] Iteration 65000, loss = 0.000315551\nI0822 23:47:48.660648 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:47:48.660663 32551 solver.cpp:244]     Train net output #1: loss = 0.000315581 (* 1 = 0.000315581 loss)\nI0822 23:47:48.755925 32551 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0822 23:50:04.328197 32551 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0822 23:51:24.031436 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49436\nI0822 23:51:24.031718 32551 solver.cpp:404]     Test net output #1: loss = 3.22854 (* 1 = 3.22854 loss)\nI0822 23:51:25.320893 32551 solver.cpp:228] Iteration 65100, loss = 0.000307054\nI0822 23:51:25.320936 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:51:25.320951 32551 solver.cpp:244]     Train net output #1: loss = 0.000307084 (* 1 = 0.000307084 loss)\nI0822 23:51:25.417104 32551 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0822 23:53:41.206351 32551 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0822 23:55:00.917173 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4952\nI0822 23:55:00.917421 32551 solver.cpp:404]     Test net output #1: loss = 3.21848 (* 1 = 3.21848 loss)\nI0822 23:55:02.206712 32551 solver.cpp:228] Iteration 65200, loss = 0.000388979\nI0822 23:55:02.206751 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:55:02.206768 32551 solver.cpp:244]     Train net output #1: loss = 0.000389009 (* 1 = 0.000389009 loss)\nI0822 23:55:02.300570 32551 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0822 23:57:17.998132 32551 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0822 23:58:37.709975 32551 solver.cpp:404]     Test net output #0: accuracy = 0.491\nI0822 23:58:37.710245 32551 solver.cpp:404]     Test net output #1: loss = 3.25167 (* 1 = 3.25167 loss)\nI0822 23:58:38.999969 32551 solver.cpp:228] Iteration 65300, loss = 0.000327197\nI0822 23:58:39.000010 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:58:39.000027 32551 solver.cpp:244]     Train net output #1: loss = 0.000327227 (* 1 = 0.000327227 loss)\nI0822 23:58:39.094380 32551 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0823 00:00:54.597944 32551 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0823 00:02:14.485611 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49516\nI0823 00:02:14.485997 32551 solver.cpp:404]     Test net output #1: loss = 3.21513 (* 1 = 3.21513 loss)\nI0823 00:02:15.781217 32551 solver.cpp:228] Iteration 65400, loss = 0.000349459\nI0823 00:02:15.781277 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:02:15.781296 32551 solver.cpp:244]     Train net output #1: loss = 0.000349489 (* 1 = 0.000349489 loss)\nI0823 00:02:15.874335 32551 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0823 00:04:31.656247 32551 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0823 00:05:52.483369 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49192\nI0823 00:05:52.483731 32551 solver.cpp:404]     Test net output #1: loss = 3.24199 (* 1 = 3.24199 loss)\nI0823 00:05:53.777601 32551 solver.cpp:228] Iteration 65500, loss = 0.0003264\nI0823 00:05:53.777660 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:05:53.777678 32551 solver.cpp:244]     Train net output #1: loss = 0.000326429 (* 1 = 0.000326429 loss)\nI0823 00:05:53.874104 32551 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0823 00:08:09.618132 32551 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0823 00:09:30.489259 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49616\nI0823 00:09:30.489645 32551 solver.cpp:404]     Test net output #1: loss = 3.19957 (* 1 = 3.19957 loss)\nI0823 00:09:31.784493 32551 solver.cpp:228] Iteration 65600, loss = 0.00036778\nI0823 00:09:31.784556 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:09:31.784581 32551 solver.cpp:244]     Train net output #1: loss = 0.000367809 (* 1 = 0.000367809 loss)\nI0823 00:09:31.870961 32551 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0823 00:11:47.746278 32551 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0823 00:13:08.584890 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49104\nI0823 00:13:08.585263 32551 solver.cpp:404]     Test net output #1: loss = 3.2203 (* 1 = 3.2203 loss)\nI0823 00:13:09.879390 32551 solver.cpp:228] Iteration 65700, loss = 0.000339072\nI0823 00:13:09.879453 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:13:09.879477 32551 solver.cpp:244]     Train net output #1: loss = 0.000339102 (* 1 = 0.000339102 loss)\nI0823 00:13:09.975992 32551 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0823 00:15:25.673254 32551 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0823 00:16:46.493074 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4968\nI0823 00:16:46.493453 32551 solver.cpp:404]     Test net output #1: loss = 3.17933 (* 1 = 3.17933 loss)\nI0823 00:16:47.786640 32551 solver.cpp:228] Iteration 65800, loss = 0.000331921\nI0823 00:16:47.786707 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:16:47.786732 32551 solver.cpp:244]     Train net output #1: loss = 0.000331951 (* 1 = 0.000331951 loss)\nI0823 00:16:47.876345 32551 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0823 00:19:03.635222 32551 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0823 00:20:24.466976 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49268\nI0823 00:20:24.467368 32551 solver.cpp:404]     Test net output #1: loss = 3.20865 (* 1 = 3.20865 loss)\nI0823 00:20:25.761849 32551 solver.cpp:228] Iteration 65900, loss = 0.000297916\nI0823 00:20:25.761914 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:20:25.761940 32551 solver.cpp:244]     Train net output #1: loss = 0.000297946 (* 1 = 0.000297946 loss)\nI0823 00:20:25.856098 32551 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0823 00:22:41.654927 32551 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0823 00:24:02.544885 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49628\nI0823 00:24:02.545259 32551 solver.cpp:404]     Test net output #1: loss = 3.18277 (* 1 = 3.18277 loss)\nI0823 00:24:03.840145 32551 solver.cpp:228] Iteration 66000, loss = 0.000335304\nI0823 00:24:03.840206 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:24:03.840231 32551 solver.cpp:244]     Train net output #1: loss = 0.000335334 (* 1 = 0.000335334 loss)\nI0823 00:24:03.934337 32551 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0823 00:26:19.712821 32551 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0823 00:27:40.510709 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49204\nI0823 00:27:40.511099 32551 solver.cpp:404]     Test net output #1: loss = 3.2122 (* 1 = 3.2122 loss)\nI0823 00:27:41.805928 32551 solver.cpp:228] Iteration 66100, loss = 0.000357723\nI0823 00:27:41.805987 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:27:41.806005 32551 solver.cpp:244]     Train net output #1: loss = 0.000357753 (* 1 = 0.000357753 loss)\nI0823 00:27:41.900584 32551 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0823 00:29:57.737749 32551 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0823 00:31:18.524266 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49644\nI0823 00:31:18.524646 32551 solver.cpp:404]     Test net output #1: loss = 3.1787 (* 1 = 3.1787 loss)\nI0823 00:31:19.818774 32551 solver.cpp:228] Iteration 66200, loss = 0.000326904\nI0823 00:31:19.818835 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:31:19.818852 32551 solver.cpp:244]     Train net output #1: loss = 0.000326933 (* 1 = 0.000326933 loss)\nI0823 00:31:19.914569 32551 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0823 00:33:35.616782 32551 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0823 00:34:56.404063 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4932\nI0823 00:34:56.404449 32551 solver.cpp:404]     Test net output #1: loss = 3.18816 (* 1 = 3.18816 loss)\nI0823 00:34:57.699156 32551 solver.cpp:228] Iteration 66300, loss = 0.000378607\nI0823 00:34:57.699215 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:34:57.699234 32551 solver.cpp:244]     Train net output #1: loss = 0.000378637 (* 1 = 0.000378637 loss)\nI0823 00:34:57.791148 32551 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0823 00:37:13.895581 32551 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0823 00:38:34.695077 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49772\nI0823 00:38:34.695444 32551 solver.cpp:404]     Test net output #1: loss = 3.15773 (* 1 = 3.15773 loss)\nI0823 00:38:35.990294 32551 solver.cpp:228] Iteration 66400, loss = 0.000375765\nI0823 00:38:35.990355 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:38:35.990372 32551 solver.cpp:244]     Train net output #1: loss = 0.000375795 (* 1 = 0.000375795 loss)\nI0823 00:38:36.084720 32551 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0823 00:40:51.800496 32551 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0823 00:42:12.535558 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49116\nI0823 00:42:12.535897 32551 solver.cpp:404]     Test net output #1: loss = 3.20087 (* 1 = 3.20087 loss)\nI0823 00:42:13.831010 32551 solver.cpp:228] Iteration 66500, loss = 0.000295939\nI0823 00:42:13.831073 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:42:13.831090 32551 solver.cpp:244]     Train net output #1: loss = 0.000295969 (* 1 = 0.000295969 loss)\nI0823 00:42:13.925096 32551 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0823 00:44:29.641332 32551 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0823 00:45:49.550096 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49264\nI0823 00:45:49.550426 32551 solver.cpp:404]     Test net output #1: loss = 3.20325 (* 1 = 3.20325 loss)\nI0823 00:45:50.845312 32551 solver.cpp:228] Iteration 66600, loss = 0.000363153\nI0823 00:45:50.845376 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:45:50.845394 32551 solver.cpp:244]     Train net output #1: loss = 0.000363183 (* 1 = 0.000363183 loss)\nI0823 00:45:50.936244 32551 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0823 00:48:06.712380 32551 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0823 00:49:26.630605 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4924\nI0823 00:49:26.630941 32551 solver.cpp:404]     Test net output #1: loss = 3.17655 (* 1 = 3.17655 loss)\nI0823 00:49:27.925496 32551 solver.cpp:228] Iteration 66700, loss = 0.000317393\nI0823 00:49:27.925559 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:49:27.925576 32551 solver.cpp:244]     Train net output #1: loss = 0.000317423 (* 1 = 0.000317423 loss)\nI0823 00:49:28.015307 32551 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0823 00:51:43.762779 32551 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0823 00:53:03.764855 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49732\nI0823 00:53:03.765228 32551 solver.cpp:404]     Test net output #1: loss = 3.1455 (* 1 = 3.1455 loss)\nI0823 00:53:05.058859 32551 solver.cpp:228] Iteration 66800, loss = 0.000360128\nI0823 00:53:05.058923 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:53:05.058943 32551 solver.cpp:244]     Train net output #1: loss = 0.000360158 (* 1 = 0.000360158 loss)\nI0823 00:53:05.155313 32551 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0823 00:55:20.846019 32551 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0823 00:56:40.557788 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49268\nI0823 00:56:40.558043 32551 solver.cpp:404]     Test net output #1: loss = 3.1852 (* 1 = 3.1852 loss)\nI0823 00:56:41.847972 32551 solver.cpp:228] Iteration 66900, loss = 0.000317416\nI0823 00:56:41.848018 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:56:41.848034 32551 solver.cpp:244]     Train net output #1: loss = 0.000317446 (* 1 = 0.000317446 loss)\nI0823 00:56:41.945935 32551 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0823 00:58:57.564889 32551 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0823 01:00:17.273669 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4962\nI0823 01:00:17.273979 32551 solver.cpp:404]     Test net output #1: loss = 3.14976 (* 1 = 3.14976 loss)\nI0823 01:00:18.563736 32551 solver.cpp:228] Iteration 67000, loss = 0.000364245\nI0823 01:00:18.563781 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:00:18.563797 32551 solver.cpp:244]     Train net output #1: loss = 0.000364275 (* 1 = 0.000364275 loss)\nI0823 01:00:18.661342 32551 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0823 01:02:34.414119 32551 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0823 01:03:54.129896 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4922\nI0823 01:03:54.130184 32551 solver.cpp:404]     Test net output #1: loss = 3.16886 (* 1 = 3.16886 loss)\nI0823 01:03:55.419555 32551 solver.cpp:228] Iteration 67100, loss = 0.000376677\nI0823 01:03:55.419600 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:03:55.419615 32551 solver.cpp:244]     Train net output #1: loss = 0.000376707 (* 1 = 0.000376707 loss)\nI0823 01:03:55.516475 32551 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0823 01:06:11.218300 32551 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0823 01:07:30.934924 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49768\nI0823 01:07:30.935230 32551 solver.cpp:404]     Test net output #1: loss = 3.12784 (* 1 = 3.12784 loss)\nI0823 01:07:32.224675 32551 solver.cpp:228] Iteration 67200, loss = 0.000376687\nI0823 01:07:32.224725 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:07:32.224741 32551 solver.cpp:244]     Train net output #1: loss = 0.000376716 (* 1 = 0.000376716 loss)\nI0823 01:07:32.323810 32551 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0823 01:09:47.883008 32551 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0823 01:11:07.601722 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49276\nI0823 01:11:07.602030 32551 solver.cpp:404]     Test net output #1: loss = 3.15663 (* 1 = 3.15663 loss)\nI0823 01:11:08.891839 32551 solver.cpp:228] Iteration 67300, loss = 0.000367099\nI0823 01:11:08.891882 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:11:08.891899 32551 solver.cpp:244]     Train net output #1: loss = 0.000367128 (* 1 = 0.000367128 loss)\nI0823 01:11:08.989245 32551 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0823 01:13:24.916584 32551 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0823 01:14:44.626350 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49512\nI0823 01:14:44.626605 32551 solver.cpp:404]     Test net output #1: loss = 3.14015 (* 1 = 3.14015 loss)\nI0823 01:14:45.916559 32551 solver.cpp:228] Iteration 67400, loss = 0.000259155\nI0823 01:14:45.916604 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:14:45.916620 32551 solver.cpp:244]     Train net output #1: loss = 0.000259185 (* 1 = 0.000259185 loss)\nI0823 01:14:46.010954 32551 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0823 01:17:01.564817 32551 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0823 01:18:21.272055 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49328\nI0823 01:18:21.272353 32551 solver.cpp:404]     Test net output #1: loss = 3.14591 (* 1 = 3.14591 loss)\nI0823 01:18:22.562736 32551 solver.cpp:228] Iteration 67500, loss = 0.000377536\nI0823 01:18:22.562779 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:18:22.562794 32551 solver.cpp:244]     Train net output #1: loss = 0.000377566 (* 1 = 0.000377566 loss)\nI0823 01:18:22.662238 32551 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0823 01:20:38.379022 32551 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0823 01:21:58.098414 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4968\nI0823 01:21:58.098711 32551 solver.cpp:404]     Test net output #1: loss = 3.11966 (* 1 = 3.11966 loss)\nI0823 01:21:59.389041 32551 solver.cpp:228] Iteration 67600, loss = 0.000361685\nI0823 01:21:59.389084 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:21:59.389101 32551 solver.cpp:244]     Train net output #1: loss = 0.000361715 (* 1 = 0.000361715 loss)\nI0823 01:21:59.481817 32551 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0823 01:24:15.144176 32551 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0823 01:25:34.858973 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49232\nI0823 01:25:34.859251 32551 solver.cpp:404]     Test net output #1: loss = 3.15041 (* 1 = 3.15041 loss)\nI0823 01:25:36.148232 32551 solver.cpp:228] Iteration 67700, loss = 0.000383032\nI0823 01:25:36.148277 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:25:36.148291 32551 solver.cpp:244]     Train net output #1: loss = 0.000383062 (* 1 = 0.000383062 loss)\nI0823 01:25:36.245713 32551 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0823 01:27:51.790906 32551 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0823 01:29:11.496783 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49676\nI0823 01:29:11.497081 32551 solver.cpp:404]     Test net output #1: loss = 3.11276 (* 1 = 3.11276 loss)\nI0823 01:29:12.786562 32551 solver.cpp:228] Iteration 67800, loss = 0.00031611\nI0823 01:29:12.786607 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:29:12.786623 32551 solver.cpp:244]     Train net output #1: loss = 0.00031614 (* 1 = 0.00031614 loss)\nI0823 01:29:12.891911 32551 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0823 01:31:28.389938 32551 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0823 01:32:48.093515 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49364\nI0823 01:32:48.093832 32551 solver.cpp:404]     Test net output #1: loss = 3.13644 (* 1 = 3.13644 loss)\nI0823 01:32:49.382969 32551 solver.cpp:228] Iteration 67900, loss = 0.000287535\nI0823 01:32:49.383011 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:32:49.383028 32551 solver.cpp:244]     Train net output #1: loss = 0.000287565 (* 1 = 0.000287565 loss)\nI0823 01:32:49.483649 32551 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0823 01:35:04.998265 32551 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0823 01:36:24.696169 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49516\nI0823 01:36:24.696488 32551 solver.cpp:404]     Test net output #1: loss = 3.1235 (* 1 = 3.1235 loss)\nI0823 01:36:25.985976 32551 solver.cpp:228] Iteration 68000, loss = 0.000311024\nI0823 01:36:25.986016 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:36:25.986033 32551 solver.cpp:244]     Train net output #1: loss = 0.000311054 (* 1 = 0.000311054 loss)\nI0823 01:36:26.085304 32551 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0823 01:38:41.442760 32551 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0823 01:40:01.146975 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49208\nI0823 01:40:01.147291 32551 solver.cpp:404]     Test net output #1: loss = 3.14625 (* 1 = 3.14625 loss)\nI0823 01:40:02.436451 32551 solver.cpp:228] Iteration 68100, loss = 0.000312126\nI0823 01:40:02.436494 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:40:02.436511 32551 solver.cpp:244]     Train net output #1: loss = 0.000312156 (* 1 = 0.000312156 loss)\nI0823 01:40:02.531220 32551 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0823 01:42:17.962575 32551 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0823 01:43:37.670051 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4978\nI0823 01:43:37.670354 32551 solver.cpp:404]     Test net output #1: loss = 3.09657 (* 1 = 3.09657 loss)\nI0823 01:43:38.960160 32551 solver.cpp:228] Iteration 68200, loss = 0.000297591\nI0823 01:43:38.960206 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:43:38.960222 32551 solver.cpp:244]     Train net output #1: loss = 0.000297621 (* 1 = 0.000297621 loss)\nI0823 01:43:39.057581 32551 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0823 01:45:55.080682 32551 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0823 01:47:14.784198 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49124\nI0823 01:47:14.784481 32551 solver.cpp:404]     Test net output #1: loss = 3.14784 (* 1 = 3.14784 loss)\nI0823 01:47:16.073334 32551 solver.cpp:228] Iteration 68300, loss = 0.000323841\nI0823 01:47:16.073380 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:47:16.073395 32551 solver.cpp:244]     Train net output #1: loss = 0.000323871 (* 1 = 0.000323871 loss)\nI0823 01:47:16.168810 32551 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0823 01:49:31.963505 32551 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0823 01:50:51.660972 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49432\nI0823 01:50:51.661309 32551 solver.cpp:404]     Test net output #1: loss = 3.13299 (* 1 = 3.13299 loss)\nI0823 01:50:52.949875 32551 solver.cpp:228] Iteration 68400, loss = 0.000321239\nI0823 01:50:52.949921 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:50:52.949937 32551 solver.cpp:244]     Train net output #1: loss = 0.000321269 (* 1 = 0.000321269 loss)\nI0823 01:50:53.050132 32551 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0823 01:53:08.771286 32551 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0823 01:54:28.529229 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4914\nI0823 01:54:28.529542 32551 solver.cpp:404]     Test net output #1: loss = 3.14322 (* 1 = 3.14322 loss)\nI0823 01:54:29.819319 32551 solver.cpp:228] Iteration 68500, loss = 0.000385375\nI0823 01:54:29.819366 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:54:29.819391 32551 solver.cpp:244]     Train net output #1: loss = 0.000385405 (* 1 = 0.000385405 loss)\nI0823 01:54:29.916432 32551 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0823 01:56:45.450721 32551 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0823 01:58:05.204372 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49284\nI0823 01:58:05.204674 32551 solver.cpp:404]     Test net output #1: loss = 3.12854 (* 1 = 3.12854 loss)\nI0823 01:58:06.495152 32551 solver.cpp:228] Iteration 68600, loss = 0.000366443\nI0823 01:58:06.495200 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:58:06.495224 32551 solver.cpp:244]     Train net output #1: loss = 0.000366473 (* 1 = 0.000366473 loss)\nI0823 01:58:06.587472 32551 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0823 02:00:22.422081 32551 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0823 02:01:42.175830 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49004\nI0823 02:01:42.176153 32551 solver.cpp:404]     Test net output #1: loss = 3.14902 (* 1 = 3.14902 loss)\nI0823 02:01:43.466686 32551 solver.cpp:228] Iteration 68700, loss = 0.000262579\nI0823 02:01:43.466737 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:01:43.466763 32551 solver.cpp:244]     Train net output #1: loss = 0.000262609 (* 1 = 0.000262609 loss)\nI0823 02:01:43.570071 32551 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0823 02:03:59.427585 32551 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0823 02:05:19.173890 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49628\nI0823 02:05:19.174201 32551 solver.cpp:404]     Test net output #1: loss = 3.09832 (* 1 = 3.09832 loss)\nI0823 02:05:20.465180 32551 solver.cpp:228] Iteration 68800, loss = 0.000303314\nI0823 02:05:20.465226 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:05:20.465242 32551 solver.cpp:244]     Train net output #1: loss = 0.000303344 (* 1 = 0.000303344 loss)\nI0823 02:05:20.557377 32551 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0823 02:07:36.126296 32551 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0823 02:08:55.869984 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49432\nI0823 02:08:55.870306 32551 solver.cpp:404]     Test net output #1: loss = 3.10408 (* 1 = 3.10408 loss)\nI0823 02:08:57.163275 32551 solver.cpp:228] Iteration 68900, loss = 0.000349529\nI0823 02:08:57.163319 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:08:57.163336 32551 solver.cpp:244]     Train net output #1: loss = 0.000349559 (* 1 = 0.000349559 loss)\nI0823 02:08:57.258563 32551 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0823 02:11:12.885365 32551 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0823 02:12:32.631285 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49652\nI0823 02:12:32.631616 32551 solver.cpp:404]     Test net output #1: loss = 3.09513 (* 1 = 3.09513 loss)\nI0823 02:12:33.924427 32551 solver.cpp:228] Iteration 69000, loss = 0.000337521\nI0823 02:12:33.924471 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:12:33.924487 32551 solver.cpp:244]     Train net output #1: loss = 0.000337551 (* 1 = 0.000337551 loss)\nI0823 02:12:34.021112 32551 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0823 02:14:49.574340 32551 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0823 02:16:09.324292 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49372\nI0823 02:16:09.324618 32551 solver.cpp:404]     Test net output #1: loss = 3.1163 (* 1 = 3.1163 loss)\nI0823 02:16:10.617342 32551 solver.cpp:228] Iteration 69100, loss = 0.000336477\nI0823 02:16:10.617386 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:16:10.617403 32551 solver.cpp:244]     Train net output #1: loss = 0.000336506 (* 1 = 0.000336506 loss)\nI0823 02:16:10.712661 32551 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0823 02:18:26.545879 32551 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0823 02:19:46.303781 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4956\nI0823 02:19:46.304111 32551 solver.cpp:404]     Test net output #1: loss = 3.09155 (* 1 = 3.09155 loss)\nI0823 02:19:47.596962 32551 solver.cpp:228] Iteration 69200, loss = 0.000374148\nI0823 02:19:47.597007 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:19:47.597023 32551 solver.cpp:244]     Train net output #1: loss = 0.000374178 (* 1 = 0.000374178 loss)\nI0823 02:19:47.687232 32551 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0823 02:22:03.386103 32551 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0823 02:23:23.099520 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49288\nI0823 02:23:23.099864 32551 solver.cpp:404]     Test net output #1: loss = 3.12973 (* 1 = 3.12973 loss)\nI0823 02:23:24.391983 32551 solver.cpp:228] Iteration 69300, loss = 0.000343286\nI0823 02:23:24.392030 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:23:24.392045 32551 solver.cpp:244]     Train net output #1: loss = 0.000343316 (* 1 = 0.000343316 loss)\nI0823 02:23:24.492457 32551 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0823 02:25:40.419271 32551 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0823 02:27:00.109158 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49704\nI0823 02:27:00.109495 32551 solver.cpp:404]     Test net output #1: loss = 3.07987 (* 1 = 3.07987 loss)\nI0823 02:27:01.401996 32551 solver.cpp:228] Iteration 69400, loss = 0.000330933\nI0823 02:27:01.402039 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:27:01.402055 32551 solver.cpp:244]     Train net output #1: loss = 0.000330963 (* 1 = 0.000330963 loss)\nI0823 02:27:01.492722 32551 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0823 02:29:17.011270 32551 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0823 02:30:36.704159 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49532\nI0823 02:30:36.704497 32551 solver.cpp:404]     Test net output #1: loss = 3.08715 (* 1 = 3.08715 loss)\nI0823 02:30:37.997794 32551 solver.cpp:228] Iteration 69500, loss = 0.000346614\nI0823 02:30:37.997838 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:30:37.997853 32551 solver.cpp:244]     Train net output #1: loss = 0.000346644 (* 1 = 0.000346644 loss)\nI0823 02:30:38.085120 32551 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0823 02:32:53.658843 32551 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0823 02:34:13.347791 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4982\nI0823 02:34:13.348106 32551 solver.cpp:404]     Test net output #1: loss = 3.06404 (* 1 = 3.06404 loss)\nI0823 02:34:14.640014 32551 solver.cpp:228] Iteration 69600, loss = 0.000427865\nI0823 02:34:14.640059 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:34:14.640075 32551 solver.cpp:244]     Train net output #1: loss = 0.000427895 (* 1 = 0.000427895 loss)\nI0823 02:34:14.734457 32551 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0823 02:36:30.359951 32551 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0823 02:37:50.050694 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49516\nI0823 02:37:50.051054 32551 solver.cpp:404]     Test net output #1: loss = 3.0816 (* 1 = 3.0816 loss)\nI0823 02:37:51.343542 32551 solver.cpp:228] Iteration 69700, loss = 0.000356595\nI0823 02:37:51.343587 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:37:51.343603 32551 solver.cpp:244]     Train net output #1: loss = 0.000356624 (* 1 = 0.000356624 loss)\nI0823 02:37:51.441612 32551 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0823 02:40:07.008646 32551 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0823 02:41:26.696863 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4964\nI0823 02:41:26.697216 32551 solver.cpp:404]     Test net output #1: loss = 3.06985 (* 1 = 3.06985 loss)\nI0823 02:41:27.989629 32551 solver.cpp:228] Iteration 69800, loss = 0.00036753\nI0823 02:41:27.989675 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:41:27.989691 32551 solver.cpp:244]     Train net output #1: loss = 0.00036756 (* 1 = 0.00036756 loss)\nI0823 02:41:28.079514 32551 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0823 02:43:43.697561 32551 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0823 02:45:03.376881 32551 solver.cpp:404]     Test net output #0: accuracy = 0.49324\nI0823 02:45:03.377209 32551 solver.cpp:404]     Test net output #1: loss = 3.10164 (* 1 = 3.10164 loss)\nI0823 02:45:04.669802 32551 solver.cpp:228] Iteration 69900, loss = 0.000310442\nI0823 02:45:04.669848 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:45:04.669864 32551 solver.cpp:244]     Train net output #1: loss = 0.000310472 (* 1 = 0.000310472 loss)\nI0823 02:45:04.764256 32551 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0823 02:47:20.436760 32551 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0823 02:48:40.119596 32551 solver.cpp:404]     Test net output #0: accuracy = 0.4966\nI0823 02:48:40.119953 32551 solver.cpp:404]     Test net output #1: loss = 3.06644 (* 1 = 3.06644 loss)\nI0823 02:48:41.412787 32551 solver.cpp:228] Iteration 70000, loss = 0.00031863\nI0823 02:48:41.412835 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:48:41.412852 32551 solver.cpp:244]     Train net output #1: loss = 0.000318659 (* 1 = 0.000318659 loss)\nI0823 02:48:41.505429 32551 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0823 02:48:41.505450 32551 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0823 02:50:57.221846 32551 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0823 02:52:16.903062 32551 solver.cpp:404]     Test net output #0: accuracy = 0.51512\nI0823 02:52:16.903403 32551 solver.cpp:404]     Test net output #1: loss = 2.93266 (* 1 = 2.93266 loss)\nI0823 02:52:18.195899 32551 solver.cpp:228] Iteration 70100, loss = 0.000315196\nI0823 02:52:18.195945 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:52:18.195962 32551 solver.cpp:244]     Train net output #1: loss = 0.000315226 (* 1 = 0.000315226 loss)\nI0823 02:52:18.286581 32551 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0823 02:54:33.863808 32551 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0823 02:55:53.540724 32551 solver.cpp:404]     Test net output #0: accuracy = 0.53408\nI0823 02:55:53.541064 32551 solver.cpp:404]     Test net output #1: loss = 2.76878 (* 1 = 2.76878 loss)\nI0823 02:55:54.833884 32551 solver.cpp:228] Iteration 70200, loss = 0.000287698\nI0823 02:55:54.833925 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:55:54.833941 32551 solver.cpp:244]     Train net output #1: loss = 0.000287728 (* 1 = 0.000287728 loss)\nI0823 02:55:54.932154 32551 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0823 02:58:10.444687 32551 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0823 02:59:30.192674 32551 solver.cpp:404]     Test net output #0: accuracy = 0.54788\nI0823 02:59:30.192996 32551 solver.cpp:404]     Test net output #1: loss = 2.6825 (* 1 = 2.6825 loss)\nI0823 02:59:31.485460 32551 solver.cpp:228] Iteration 70300, loss = 0.000275808\nI0823 02:59:31.485502 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:59:31.485517 32551 solver.cpp:244]     Train net output #1: loss = 0.000275838 (* 1 = 0.000275838 loss)\nI0823 02:59:31.580479 32551 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0823 03:01:47.069706 32551 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0823 03:03:06.812228 32551 solver.cpp:404]     Test net output #0: accuracy = 0.56312\nI0823 03:03:06.812573 32551 solver.cpp:404]     Test net output #1: loss = 2.56787 (* 1 = 2.56787 loss)\nI0823 03:03:08.104270 32551 solver.cpp:228] Iteration 70400, loss = 0.000303713\nI0823 03:03:08.104315 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:03:08.104331 32551 solver.cpp:244]     Train net output #1: loss = 0.000303742 (* 1 = 0.000303742 loss)\nI0823 03:03:08.193116 32551 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0823 03:05:23.865144 32551 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0823 03:06:43.612136 32551 solver.cpp:404]     Test net output #0: accuracy = 0.56968\nI0823 03:06:43.612481 32551 solver.cpp:404]     Test net output #1: loss = 2.52261 (* 1 = 2.52261 loss)\nI0823 03:06:44.904588 32551 solver.cpp:228] Iteration 70500, loss = 0.000381857\nI0823 03:06:44.904630 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:06:44.904646 32551 solver.cpp:244]     Train net output #1: loss = 0.000381887 (* 1 = 0.000381887 loss)\nI0823 03:06:45.000463 32551 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0823 03:09:00.558926 32551 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0823 03:10:20.296298 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58328\nI0823 03:10:20.296656 32551 solver.cpp:404]     Test net output #1: loss = 2.44541 (* 1 = 2.44541 loss)\nI0823 03:10:21.588688 32551 solver.cpp:228] Iteration 70600, loss = 0.000327156\nI0823 03:10:21.588729 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:10:21.588747 32551 solver.cpp:244]     Train net output #1: loss = 0.000327185 (* 1 = 0.000327185 loss)\nI0823 03:10:21.687063 32551 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0823 03:12:37.254907 32551 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0823 03:13:56.994575 32551 solver.cpp:404]     Test net output #0: accuracy = 0.58316\nI0823 03:13:56.994913 32551 solver.cpp:404]     Test net output #1: loss = 2.42799 (* 1 = 2.42799 loss)\nI0823 03:13:58.287235 32551 solver.cpp:228] Iteration 70700, loss = 0.000296785\nI0823 03:13:58.287278 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:13:58.287293 32551 solver.cpp:244]     Train net output #1: loss = 0.000296815 (* 1 = 0.000296815 loss)\nI0823 03:13:58.376920 32551 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0823 03:16:13.968067 32551 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0823 03:17:33.714448 32551 solver.cpp:404]     Test net output #0: accuracy = 0.5948\nI0823 03:17:33.714798 32551 solver.cpp:404]     Test net output #1: loss = 2.3731 (* 1 = 2.3731 loss)\nI0823 03:17:35.007208 32551 solver.cpp:228] Iteration 70800, loss = 0.00037346\nI0823 03:17:35.007251 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:17:35.007266 32551 solver.cpp:244]     Train net output #1: loss = 0.00037349 (* 1 = 0.00037349 loss)\nI0823 03:17:35.098031 32551 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0823 03:19:50.705803 32551 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0823 03:21:10.445543 32551 solver.cpp:404]     Test net output #0: accuracy = 0.59588\nI0823 03:21:10.445890 32551 solver.cpp:404]     Test net output #1: loss = 2.37021 (* 1 = 2.37021 loss)\nI0823 03:21:11.738437 32551 solver.cpp:228] Iteration 70900, loss = 0.000321111\nI0823 03:21:11.738479 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:21:11.738494 32551 solver.cpp:244]     Train net output #1: loss = 0.000321141 (* 1 = 0.000321141 loss)\nI0823 03:21:11.827425 32551 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0823 03:23:27.438035 32551 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0823 03:24:47.184723 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6042\nI0823 03:24:47.185082 32551 solver.cpp:404]     Test net output #1: loss = 2.33112 (* 1 = 2.33112 loss)\nI0823 03:24:48.476598 32551 solver.cpp:228] Iteration 71000, loss = 0.000274135\nI0823 03:24:48.476639 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:24:48.476655 32551 solver.cpp:244]     Train net output #1: loss = 0.000274164 (* 1 = 0.000274164 loss)\nI0823 03:24:48.566335 32551 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0823 03:27:04.247925 32551 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0823 03:28:23.998766 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60236\nI0823 03:28:23.999109 32551 solver.cpp:404]     Test net output #1: loss = 2.33881 (* 1 = 2.33881 loss)\nI0823 03:28:25.290768 32551 solver.cpp:228] Iteration 71100, loss = 0.000311391\nI0823 03:28:25.290812 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:28:25.290827 32551 solver.cpp:244]     Train net output #1: loss = 0.000311421 (* 1 = 0.000311421 loss)\nI0823 03:28:25.387225 32551 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0823 03:30:41.011032 32551 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0823 03:32:00.758565 32551 solver.cpp:404]     Test net output #0: accuracy = 0.60944\nI0823 03:32:00.758913 32551 solver.cpp:404]     Test net output #1: loss = 2.31173 (* 1 = 2.31173 loss)\nI0823 03:32:02.050897 32551 solver.cpp:228] Iteration 71200, loss = 0.000284092\nI0823 03:32:02.050947 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:32:02.050964 32551 solver.cpp:244]     Train net output #1: loss = 0.000284121 (* 1 = 0.000284121 loss)\nI0823 03:32:02.147891 32551 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0823 03:34:17.764246 32551 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0823 03:35:37.508059 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6094\nI0823 03:35:37.508386 32551 solver.cpp:404]     Test net output #1: loss = 2.32379 (* 1 = 2.32379 loss)\nI0823 03:35:38.800284 32551 solver.cpp:228] Iteration 71300, loss = 0.000265499\nI0823 03:35:38.800328 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:35:38.800343 32551 solver.cpp:244]     Train net output #1: loss = 0.000265529 (* 1 = 0.000265529 loss)\nI0823 03:35:38.896411 32551 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0823 03:37:54.512181 32551 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0823 03:39:14.255465 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61668\nI0823 03:39:14.255821 32551 solver.cpp:404]     Test net output #1: loss = 2.30333 (* 1 = 2.30333 loss)\nI0823 03:39:15.548141 32551 solver.cpp:228] Iteration 71400, loss = 0.000359691\nI0823 03:39:15.548182 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:39:15.548198 32551 solver.cpp:244]     Train net output #1: loss = 0.00035972 (* 1 = 0.00035972 loss)\nI0823 03:39:15.644552 32551 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0823 03:41:31.223142 32551 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0823 03:42:50.964937 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61448\nI0823 03:42:50.965288 32551 solver.cpp:404]     Test net output #1: loss = 2.32073 (* 1 = 2.32073 loss)\nI0823 03:42:52.257606 32551 solver.cpp:228] Iteration 71500, loss = 0.000321332\nI0823 03:42:52.257647 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:42:52.257661 32551 solver.cpp:244]     Train net output #1: loss = 0.000321362 (* 1 = 0.000321362 loss)\nI0823 03:42:52.355671 32551 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0823 03:45:07.950225 32551 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0823 03:46:27.694306 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61708\nI0823 03:46:27.694653 32551 solver.cpp:404]     Test net output #1: loss = 2.30403 (* 1 = 2.30403 loss)\nI0823 03:46:28.986855 32551 solver.cpp:228] Iteration 71600, loss = 0.000303232\nI0823 03:46:28.986896 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:46:28.986912 32551 solver.cpp:244]     Train net output #1: loss = 0.000303262 (* 1 = 0.000303262 loss)\nI0823 03:46:29.084628 32551 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0823 03:48:44.676162 32551 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0823 03:50:04.432884 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61536\nI0823 03:50:04.433220 32551 solver.cpp:404]     Test net output #1: loss = 2.32165 (* 1 = 2.32165 loss)\nI0823 03:50:05.725272 32551 solver.cpp:228] Iteration 71700, loss = 0.000297009\nI0823 03:50:05.725314 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:50:05.725330 32551 solver.cpp:244]     Train net output #1: loss = 0.000297038 (* 1 = 0.000297038 loss)\nI0823 03:50:05.822218 32551 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0823 03:52:21.407130 32551 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0823 03:53:41.152951 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61928\nI0823 03:53:41.153300 32551 solver.cpp:404]     Test net output #1: loss = 2.3084 (* 1 = 2.3084 loss)\nI0823 03:53:42.445883 32551 solver.cpp:228] Iteration 71800, loss = 0.000352477\nI0823 03:53:42.445929 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:53:42.445945 32551 solver.cpp:244]     Train net output #1: loss = 0.000352507 (* 1 = 0.000352507 loss)\nI0823 03:53:42.541321 32551 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0823 03:55:58.260064 32551 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0823 03:57:18.006184 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61776\nI0823 03:57:18.006517 32551 solver.cpp:404]     Test net output #1: loss = 2.32756 (* 1 = 2.32756 loss)\nI0823 03:57:19.300441 32551 solver.cpp:228] Iteration 71900, loss = 0.000322557\nI0823 03:57:19.300487 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:57:19.300503 32551 solver.cpp:244]     Train net output #1: loss = 0.000322587 (* 1 = 0.000322587 loss)\nI0823 03:57:19.387691 32551 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0823 03:59:35.141338 32551 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0823 04:00:54.915015 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62036\nI0823 04:00:54.915330 32551 solver.cpp:404]     Test net output #1: loss = 2.31556 (* 1 = 2.31556 loss)\nI0823 04:00:56.208113 32551 solver.cpp:228] Iteration 72000, loss = 0.000453415\nI0823 04:00:56.208159 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:00:56.208175 32551 solver.cpp:244]     Train net output #1: loss = 0.000453445 (* 1 = 0.000453445 loss)\nI0823 04:00:56.300782 32551 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0823 04:03:12.081722 32551 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0823 04:04:31.826048 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61864\nI0823 04:04:31.826395 32551 solver.cpp:404]     Test net output #1: loss = 2.33389 (* 1 = 2.33389 loss)\nI0823 04:04:33.119524 32551 solver.cpp:228] Iteration 72100, loss = 0.000329276\nI0823 04:04:33.119570 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:04:33.119585 32551 solver.cpp:244]     Train net output #1: loss = 0.000329306 (* 1 = 0.000329306 loss)\nI0823 04:04:33.217434 32551 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0823 04:06:48.760083 32551 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0823 04:08:08.500550 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62196\nI0823 04:08:08.500895 32551 solver.cpp:404]     Test net output #1: loss = 2.32283 (* 1 = 2.32283 loss)\nI0823 04:08:09.793171 32551 solver.cpp:228] Iteration 72200, loss = 0.000276668\nI0823 04:08:09.793217 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:08:09.793233 32551 solver.cpp:244]     Train net output #1: loss = 0.000276698 (* 1 = 0.000276698 loss)\nI0823 04:08:09.883595 32551 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0823 04:10:25.506590 32551 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0823 04:11:45.240556 32551 solver.cpp:404]     Test net output #0: accuracy = 0.61904\nI0823 04:11:45.240901 32551 solver.cpp:404]     Test net output #1: loss = 2.34186 (* 1 = 2.34186 loss)\nI0823 04:11:46.533514 32551 solver.cpp:228] Iteration 72300, loss = 0.000294659\nI0823 04:11:46.533560 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:11:46.533576 32551 solver.cpp:244]     Train net output #1: loss = 0.000294689 (* 1 = 0.000294689 loss)\nI0823 04:11:46.625972 32551 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0823 04:14:02.408535 32551 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0823 04:15:22.151199 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62304\nI0823 04:15:22.151558 32551 solver.cpp:404]     Test net output #1: loss = 2.33064 (* 1 = 2.33064 loss)\nI0823 04:15:23.444924 32551 solver.cpp:228] Iteration 72400, loss = 0.000423342\nI0823 04:15:23.444970 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:15:23.444985 32551 solver.cpp:244]     Train net output #1: loss = 0.000423372 (* 1 = 0.000423372 loss)\nI0823 04:15:23.533879 32551 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0823 04:17:39.247509 32551 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0823 04:18:58.982175 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62044\nI0823 04:18:58.982518 32551 solver.cpp:404]     Test net output #1: loss = 2.34779 (* 1 = 2.34779 loss)\nI0823 04:19:00.276054 32551 solver.cpp:228] Iteration 72500, loss = 0.00031188\nI0823 04:19:00.276099 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:19:00.276115 32551 solver.cpp:244]     Train net output #1: loss = 0.00031191 (* 1 = 0.00031191 loss)\nI0823 04:19:00.369303 32551 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0823 04:21:16.145300 32551 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0823 04:22:35.884516 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62412\nI0823 04:22:35.884872 32551 solver.cpp:404]     Test net output #1: loss = 2.33631 (* 1 = 2.33631 loss)\nI0823 04:22:37.178412 32551 solver.cpp:228] Iteration 72600, loss = 0.00034999\nI0823 04:22:37.178457 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:22:37.178473 32551 solver.cpp:244]     Train net output #1: loss = 0.000350019 (* 1 = 0.000350019 loss)\nI0823 04:22:37.271252 32551 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0823 04:24:52.927884 32551 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0823 04:26:12.664886 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62088\nI0823 04:26:12.665283 32551 solver.cpp:404]     Test net output #1: loss = 2.35401 (* 1 = 2.35401 loss)\nI0823 04:26:13.958562 32551 solver.cpp:228] Iteration 72700, loss = 0.000354687\nI0823 04:26:13.958607 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:26:13.958623 32551 solver.cpp:244]     Train net output #1: loss = 0.000354716 (* 1 = 0.000354716 loss)\nI0823 04:26:14.048369 32551 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0823 04:28:29.628171 32551 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0823 04:29:49.364480 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62384\nI0823 04:29:49.364812 32551 solver.cpp:404]     Test net output #1: loss = 2.34251 (* 1 = 2.34251 loss)\nI0823 04:29:50.658828 32551 solver.cpp:228] Iteration 72800, loss = 0.000227767\nI0823 04:29:50.658872 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:29:50.658887 32551 solver.cpp:244]     Train net output #1: loss = 0.000227797 (* 1 = 0.000227797 loss)\nI0823 04:29:50.751425 32551 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0823 04:32:06.503208 32551 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0823 04:33:26.234001 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62084\nI0823 04:33:26.234349 32551 solver.cpp:404]     Test net output #1: loss = 2.35882 (* 1 = 2.35882 loss)\nI0823 04:33:27.526826 32551 solver.cpp:228] Iteration 72900, loss = 0.000304873\nI0823 04:33:27.526870 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:33:27.526886 32551 solver.cpp:244]     Train net output #1: loss = 0.000304902 (* 1 = 0.000304902 loss)\nI0823 04:33:27.623731 32551 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0823 04:35:43.231694 32551 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0823 04:37:02.960477 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62412\nI0823 04:37:02.960788 32551 solver.cpp:404]     Test net output #1: loss = 2.347 (* 1 = 2.347 loss)\nI0823 04:37:04.253478 32551 solver.cpp:228] Iteration 73000, loss = 0.000446894\nI0823 04:37:04.253520 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:37:04.253535 32551 solver.cpp:244]     Train net output #1: loss = 0.000446924 (* 1 = 0.000446924 loss)\nI0823 04:37:04.351660 32551 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0823 04:39:20.428974 32551 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0823 04:40:40.098402 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62028\nI0823 04:40:40.098739 32551 solver.cpp:404]     Test net output #1: loss = 2.36362 (* 1 = 2.36362 loss)\nI0823 04:40:41.392231 32551 solver.cpp:228] Iteration 73100, loss = 0.000318146\nI0823 04:40:41.392276 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:40:41.392290 32551 solver.cpp:244]     Train net output #1: loss = 0.000318175 (* 1 = 0.000318175 loss)\nI0823 04:40:41.481206 32551 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0823 04:42:57.189146 32551 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0823 04:44:16.865897 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62336\nI0823 04:44:16.866256 32551 solver.cpp:404]     Test net output #1: loss = 2.35159 (* 1 = 2.35159 loss)\nI0823 04:44:18.158241 32551 solver.cpp:228] Iteration 73200, loss = 0.000338591\nI0823 04:44:18.158284 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:44:18.158299 32551 solver.cpp:244]     Train net output #1: loss = 0.000338621 (* 1 = 0.000338621 loss)\nI0823 04:44:18.248275 32551 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0823 04:46:34.045112 32551 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0823 04:47:53.713757 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62028\nI0823 04:47:53.714118 32551 solver.cpp:404]     Test net output #1: loss = 2.36818 (* 1 = 2.36818 loss)\nI0823 04:47:55.006973 32551 solver.cpp:228] Iteration 73300, loss = 0.000328092\nI0823 04:47:55.007015 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:47:55.007035 32551 solver.cpp:244]     Train net output #1: loss = 0.000328122 (* 1 = 0.000328122 loss)\nI0823 04:47:55.099303 32551 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0823 04:50:11.131119 32551 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0823 04:51:30.797497 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62324\nI0823 04:51:30.797852 32551 solver.cpp:404]     Test net output #1: loss = 2.35601 (* 1 = 2.35601 loss)\nI0823 04:51:32.091426 32551 solver.cpp:228] Iteration 73400, loss = 0.000335517\nI0823 04:51:32.091472 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:51:32.091488 32551 solver.cpp:244]     Train net output #1: loss = 0.000335547 (* 1 = 0.000335547 loss)\nI0823 04:51:32.185518 32551 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0823 04:53:47.893187 32551 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0823 04:55:07.571292 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62036\nI0823 04:55:07.571643 32551 solver.cpp:404]     Test net output #1: loss = 2.37168 (* 1 = 2.37168 loss)\nI0823 04:55:08.865191 32551 solver.cpp:228] Iteration 73500, loss = 0.000243938\nI0823 04:55:08.865236 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:55:08.865252 32551 solver.cpp:244]     Train net output #1: loss = 0.000243968 (* 1 = 0.000243968 loss)\nI0823 04:55:08.958626 32551 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0823 04:57:24.792881 32551 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0823 04:58:44.471788 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6232\nI0823 04:58:44.472141 32551 solver.cpp:404]     Test net output #1: loss = 2.35871 (* 1 = 2.35871 loss)\nI0823 04:58:45.765833 32551 solver.cpp:228] Iteration 73600, loss = 0.000398101\nI0823 04:58:45.765877 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:58:45.765892 32551 solver.cpp:244]     Train net output #1: loss = 0.000398131 (* 1 = 0.000398131 loss)\nI0823 04:58:45.862401 32551 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0823 05:01:01.695610 32551 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0823 05:02:21.375591 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62036\nI0823 05:02:21.375929 32551 solver.cpp:404]     Test net output #1: loss = 2.3744 (* 1 = 2.3744 loss)\nI0823 05:02:22.668814 32551 solver.cpp:228] Iteration 73700, loss = 0.000372911\nI0823 05:02:22.668859 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:02:22.668874 32551 solver.cpp:244]     Train net output #1: loss = 0.00037294 (* 1 = 0.00037294 loss)\nI0823 05:02:22.765142 32551 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0823 05:04:38.542886 32551 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0823 05:05:58.219424 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62296\nI0823 05:05:58.219775 32551 solver.cpp:404]     Test net output #1: loss = 2.36051 (* 1 = 2.36051 loss)\nI0823 05:05:59.513798 32551 solver.cpp:228] Iteration 73800, loss = 0.000292177\nI0823 05:05:59.513842 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:05:59.513859 32551 solver.cpp:244]     Train net output #1: loss = 0.000292207 (* 1 = 0.000292207 loss)\nI0823 05:05:59.600461 32551 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0823 05:08:15.441174 32551 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0823 05:09:35.124775 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6198\nI0823 05:09:35.125135 32551 solver.cpp:404]     Test net output #1: loss = 2.37564 (* 1 = 2.37564 loss)\nI0823 05:09:36.418560 32551 solver.cpp:228] Iteration 73900, loss = 0.000281477\nI0823 05:09:36.418604 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:09:36.418620 32551 solver.cpp:244]     Train net output #1: loss = 0.000281507 (* 1 = 0.000281507 loss)\nI0823 05:09:36.508146 32551 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0823 05:11:51.985317 32551 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0823 05:13:11.718690 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62288\nI0823 05:13:11.719051 32551 solver.cpp:404]     Test net output #1: loss = 2.36231 (* 1 = 2.36231 loss)\nI0823 05:13:13.012248 32551 solver.cpp:228] Iteration 74000, loss = 0.000275395\nI0823 05:13:13.012295 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:13:13.012318 32551 solver.cpp:244]     Train net output #1: loss = 0.000275425 (* 1 = 0.000275425 loss)\nI0823 05:13:13.108235 32551 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0823 05:15:29.047683 32551 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0823 05:16:48.869364 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62036\nI0823 05:16:48.869725 32551 solver.cpp:404]     Test net output #1: loss = 2.3778 (* 1 = 2.3778 loss)\nI0823 05:16:50.163615 32551 solver.cpp:228] Iteration 74100, loss = 0.000338417\nI0823 05:16:50.163662 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:16:50.163686 32551 solver.cpp:244]     Train net output #1: loss = 0.000338447 (* 1 = 0.000338447 loss)\nI0823 05:16:50.256952 32551 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0823 05:19:05.844996 32551 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0823 05:20:25.588327 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6238\nI0823 05:20:25.588685 32551 solver.cpp:404]     Test net output #1: loss = 2.36461 (* 1 = 2.36461 loss)\nI0823 05:20:26.883412 32551 solver.cpp:228] Iteration 74200, loss = 0.000315014\nI0823 05:20:26.883460 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:20:26.883476 32551 solver.cpp:244]     Train net output #1: loss = 0.000315044 (* 1 = 0.000315044 loss)\nI0823 05:20:26.978621 32551 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0823 05:22:42.677685 32551 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0823 05:24:02.485883 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6202\nI0823 05:24:02.486244 32551 solver.cpp:404]     Test net output #1: loss = 2.38008 (* 1 = 2.38008 loss)\nI0823 05:24:03.779552 32551 solver.cpp:228] Iteration 74300, loss = 0.000323968\nI0823 05:24:03.779602 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:24:03.779625 32551 solver.cpp:244]     Train net output #1: loss = 0.000323997 (* 1 = 0.000323997 loss)\nI0823 05:24:03.877830 32551 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0823 05:26:19.706151 32551 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0823 05:27:39.512362 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62304\nI0823 05:27:39.512734 32551 solver.cpp:404]     Test net output #1: loss = 2.36687 (* 1 = 2.36687 loss)\nI0823 05:27:40.806005 32551 solver.cpp:228] Iteration 74400, loss = 0.000250473\nI0823 05:27:40.806054 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:27:40.806078 32551 solver.cpp:244]     Train net output #1: loss = 0.000250502 (* 1 = 0.000250502 loss)\nI0823 05:27:40.895700 32551 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0823 05:29:56.546739 32551 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0823 05:31:16.348062 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62016\nI0823 05:31:16.348407 32551 solver.cpp:404]     Test net output #1: loss = 2.38203 (* 1 = 2.38203 loss)\nI0823 05:31:17.641732 32551 solver.cpp:228] Iteration 74500, loss = 0.000299216\nI0823 05:31:17.641780 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:31:17.641804 32551 solver.cpp:244]     Train net output #1: loss = 0.000299246 (* 1 = 0.000299246 loss)\nI0823 05:31:17.736655 32551 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0823 05:33:33.481292 32551 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0823 05:34:53.241701 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62276\nI0823 05:34:53.242074 32551 solver.cpp:404]     Test net output #1: loss = 2.36823 (* 1 = 2.36823 loss)\nI0823 05:34:54.536674 32551 solver.cpp:228] Iteration 74600, loss = 0.000348319\nI0823 05:34:54.536720 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:34:54.536743 32551 solver.cpp:244]     Train net output #1: loss = 0.000348349 (* 1 = 0.000348349 loss)\nI0823 05:34:54.629456 32551 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0823 05:37:10.668295 32551 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0823 05:38:30.437422 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62048\nI0823 05:38:30.437767 32551 solver.cpp:404]     Test net output #1: loss = 2.38267 (* 1 = 2.38267 loss)\nI0823 05:38:31.731457 32551 solver.cpp:228] Iteration 74700, loss = 0.000338784\nI0823 05:38:31.731508 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:38:31.731533 32551 solver.cpp:244]     Train net output #1: loss = 0.000338814 (* 1 = 0.000338814 loss)\nI0823 05:38:31.827754 32551 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0823 05:40:47.882068 32551 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0823 05:42:07.643321 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62284\nI0823 05:42:07.643657 32551 solver.cpp:404]     Test net output #1: loss = 2.36864 (* 1 = 2.36864 loss)\nI0823 05:42:08.936218 32551 solver.cpp:228] Iteration 74800, loss = 0.000364503\nI0823 05:42:08.936265 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:42:08.936280 32551 solver.cpp:244]     Train net output #1: loss = 0.000364533 (* 1 = 0.000364533 loss)\nI0823 05:42:09.033947 32551 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0823 05:44:24.850538 32551 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0823 05:45:44.597512 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62044\nI0823 05:45:44.597843 32551 solver.cpp:404]     Test net output #1: loss = 2.3841 (* 1 = 2.3841 loss)\nI0823 05:45:45.891064 32551 solver.cpp:228] Iteration 74900, loss = 0.000381395\nI0823 05:45:45.891111 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:45:45.891126 32551 solver.cpp:244]     Train net output #1: loss = 0.000381425 (* 1 = 0.000381425 loss)\nI0823 05:45:45.985636 32551 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0823 05:48:01.612670 32551 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0823 05:49:21.371526 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6232\nI0823 05:49:21.371879 32551 solver.cpp:404]     Test net output #1: loss = 2.3698 (* 1 = 2.3698 loss)\nI0823 05:49:22.665856 32551 solver.cpp:228] Iteration 75000, loss = 0.000308546\nI0823 05:49:22.665902 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:49:22.665918 32551 solver.cpp:244]     Train net output #1: loss = 0.000308575 (* 1 = 0.000308575 loss)\nI0823 05:49:22.756237 32551 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0823 05:51:38.582546 32551 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0823 05:52:58.336766 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62032\nI0823 05:52:58.337124 32551 solver.cpp:404]     Test net output #1: loss = 2.38515 (* 1 = 2.38515 loss)\nI0823 05:52:59.630708 32551 solver.cpp:228] Iteration 75100, loss = 0.000341427\nI0823 05:52:59.630753 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:52:59.630769 32551 solver.cpp:244]     Train net output #1: loss = 0.000341457 (* 1 = 0.000341457 loss)\nI0823 05:52:59.728687 32551 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0823 05:55:15.586642 32551 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0823 05:56:35.338891 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62348\nI0823 05:56:35.339242 32551 solver.cpp:404]     Test net output #1: loss = 2.37054 (* 1 = 2.37054 loss)\nI0823 05:56:36.632516 32551 solver.cpp:228] Iteration 75200, loss = 0.000323968\nI0823 05:56:36.632561 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:56:36.632577 32551 solver.cpp:244]     Train net output #1: loss = 0.000323997 (* 1 = 0.000323997 loss)\nI0823 05:56:36.723213 32551 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0823 05:58:52.406937 32551 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0823 06:00:12.157464 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62136\nI0823 06:00:12.157824 32551 solver.cpp:404]     Test net output #1: loss = 2.38434 (* 1 = 2.38434 loss)\nI0823 06:00:13.450911 32551 solver.cpp:228] Iteration 75300, loss = 0.000385091\nI0823 06:00:13.450956 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:00:13.450973 32551 solver.cpp:244]     Train net output #1: loss = 0.000385121 (* 1 = 0.000385121 loss)\nI0823 06:00:13.547430 32551 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0823 06:02:29.155772 32551 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0823 06:03:48.918062 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6234\nI0823 06:03:48.918400 32551 solver.cpp:404]     Test net output #1: loss = 2.37079 (* 1 = 2.37079 loss)\nI0823 06:03:50.212291 32551 solver.cpp:228] Iteration 75400, loss = 0.000275727\nI0823 06:03:50.212334 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:03:50.212352 32551 solver.cpp:244]     Train net output #1: loss = 0.000275757 (* 1 = 0.000275757 loss)\nI0823 06:03:50.307576 32551 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0823 06:06:06.173832 32551 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0823 06:07:25.930315 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62104\nI0823 06:07:25.930671 32551 solver.cpp:404]     Test net output #1: loss = 2.38533 (* 1 = 2.38533 loss)\nI0823 06:07:27.224439 32551 solver.cpp:228] Iteration 75500, loss = 0.000313218\nI0823 06:07:27.224483 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:07:27.224499 32551 solver.cpp:244]     Train net output #1: loss = 0.000313248 (* 1 = 0.000313248 loss)\nI0823 06:07:27.312649 32551 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0823 06:09:43.065784 32551 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0823 06:11:02.957454 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62328\nI0823 06:11:02.957831 32551 solver.cpp:404]     Test net output #1: loss = 2.37092 (* 1 = 2.37092 loss)\nI0823 06:11:04.250748 32551 solver.cpp:228] Iteration 75600, loss = 0.000248171\nI0823 06:11:04.250795 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:11:04.250819 32551 solver.cpp:244]     Train net output #1: loss = 0.000248201 (* 1 = 0.000248201 loss)\nI0823 06:11:04.340762 32551 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0823 06:13:20.086520 32551 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0823 06:14:39.983392 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62096\nI0823 06:14:39.983747 32551 solver.cpp:404]     Test net output #1: loss = 2.38595 (* 1 = 2.38595 loss)\nI0823 06:14:41.277920 32551 solver.cpp:228] Iteration 75700, loss = 0.000297695\nI0823 06:14:41.277971 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:14:41.277994 32551 solver.cpp:244]     Train net output #1: loss = 0.000297725 (* 1 = 0.000297725 loss)\nI0823 06:14:41.370676 32551 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0823 06:16:57.003520 32551 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0823 06:18:16.903589 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62336\nI0823 06:18:16.903947 32551 solver.cpp:404]     Test net output #1: loss = 2.3721 (* 1 = 2.3721 loss)\nI0823 06:18:18.197448 32551 solver.cpp:228] Iteration 75800, loss = 0.000272031\nI0823 06:18:18.197492 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:18:18.197517 32551 solver.cpp:244]     Train net output #1: loss = 0.000272061 (* 1 = 0.000272061 loss)\nI0823 06:18:18.289265 32551 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0823 06:20:33.945300 32551 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0823 06:21:53.827293 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62088\nI0823 06:21:53.827652 32551 solver.cpp:404]     Test net output #1: loss = 2.38651 (* 1 = 2.38651 loss)\nI0823 06:21:55.120451 32551 solver.cpp:228] Iteration 75900, loss = 0.000342071\nI0823 06:21:55.120493 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:21:55.120518 32551 solver.cpp:244]     Train net output #1: loss = 0.0003421 (* 1 = 0.0003421 loss)\nI0823 06:21:55.211997 32551 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0823 06:24:11.019186 32551 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0823 06:25:31.820446 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62352\nI0823 06:25:31.820847 32551 solver.cpp:404]     Test net output #1: loss = 2.37246 (* 1 = 2.37246 loss)\nI0823 06:25:33.114837 32551 solver.cpp:228] Iteration 76000, loss = 0.00033404\nI0823 06:25:33.114886 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:25:33.114902 32551 solver.cpp:244]     Train net output #1: loss = 0.00033407 (* 1 = 0.00033407 loss)\nI0823 06:25:33.207984 32551 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0823 06:27:48.861312 32551 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0823 06:29:08.643019 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62128\nI0823 06:29:08.643391 32551 solver.cpp:404]     Test net output #1: loss = 2.38633 (* 1 = 2.38633 loss)\nI0823 06:29:09.936064 32551 solver.cpp:228] Iteration 76100, loss = 0.000372027\nI0823 06:29:09.936108 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:29:09.936132 32551 solver.cpp:244]     Train net output #1: loss = 0.000372057 (* 1 = 0.000372057 loss)\nI0823 06:29:10.030228 32551 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0823 06:31:25.799652 32551 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0823 06:32:45.567273 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62332\nI0823 06:32:45.567637 32551 solver.cpp:404]     Test net output #1: loss = 2.37269 (* 1 = 2.37269 loss)\nI0823 06:32:46.861582 32551 solver.cpp:228] Iteration 76200, loss = 0.000225179\nI0823 06:32:46.861623 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:32:46.861637 32551 solver.cpp:244]     Train net output #1: loss = 0.000225208 (* 1 = 0.000225208 loss)\nI0823 06:32:46.958293 32551 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0823 06:35:02.651510 32551 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0823 06:36:22.404048 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62076\nI0823 06:36:22.404389 32551 solver.cpp:404]     Test net output #1: loss = 2.3872 (* 1 = 2.3872 loss)\nI0823 06:36:23.697465 32551 solver.cpp:228] Iteration 76300, loss = 0.000272099\nI0823 06:36:23.697506 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:36:23.697523 32551 solver.cpp:244]     Train net output #1: loss = 0.000272129 (* 1 = 0.000272129 loss)\nI0823 06:36:23.792129 32551 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0823 06:38:39.374795 32551 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0823 06:39:59.131072 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62312\nI0823 06:39:59.131404 32551 solver.cpp:404]     Test net output #1: loss = 2.37262 (* 1 = 2.37262 loss)\nI0823 06:40:00.423964 32551 solver.cpp:228] Iteration 76400, loss = 0.000308588\nI0823 06:40:00.424005 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:40:00.424021 32551 solver.cpp:244]     Train net output #1: loss = 0.000308618 (* 1 = 0.000308618 loss)\nI0823 06:40:00.519806 32551 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0823 06:42:16.190381 32551 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0823 06:43:35.941310 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62104\nI0823 06:43:35.941646 32551 solver.cpp:404]     Test net output #1: loss = 2.38692 (* 1 = 2.38692 loss)\nI0823 06:43:37.234894 32551 solver.cpp:228] Iteration 76500, loss = 0.000341394\nI0823 06:43:37.234935 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:43:37.234951 32551 solver.cpp:244]     Train net output #1: loss = 0.000341424 (* 1 = 0.000341424 loss)\nI0823 06:43:37.322438 32551 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0823 06:45:52.934823 32551 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0823 06:47:12.687643 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62356\nI0823 06:47:12.687996 32551 solver.cpp:404]     Test net output #1: loss = 2.37216 (* 1 = 2.37216 loss)\nI0823 06:47:13.980317 32551 solver.cpp:228] Iteration 76600, loss = 0.000294393\nI0823 06:47:13.980361 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:47:13.980377 32551 solver.cpp:244]     Train net output #1: loss = 0.000294423 (* 1 = 0.000294423 loss)\nI0823 06:47:14.074251 32551 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0823 06:49:29.842070 32551 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0823 06:50:49.593101 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62104\nI0823 06:50:49.593451 32551 solver.cpp:404]     Test net output #1: loss = 2.38672 (* 1 = 2.38672 loss)\nI0823 06:50:50.886406 32551 solver.cpp:228] Iteration 76700, loss = 0.000346193\nI0823 06:50:50.886448 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:50:50.886466 32551 solver.cpp:244]     Train net output #1: loss = 0.000346222 (* 1 = 0.000346222 loss)\nI0823 06:50:50.978926 32551 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0823 06:53:06.672668 32551 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0823 06:54:26.414428 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62384\nI0823 06:54:26.414793 32551 solver.cpp:404]     Test net output #1: loss = 2.37266 (* 1 = 2.37266 loss)\nI0823 06:54:27.706887 32551 solver.cpp:228] Iteration 76800, loss = 0.000303206\nI0823 06:54:27.706930 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:54:27.706948 32551 solver.cpp:244]     Train net output #1: loss = 0.000303236 (* 1 = 0.000303236 loss)\nI0823 06:54:27.799371 32551 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0823 06:56:43.383002 32551 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0823 06:58:03.032236 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62096\nI0823 06:58:03.032585 32551 solver.cpp:404]     Test net output #1: loss = 2.38742 (* 1 = 2.38742 loss)\nI0823 06:58:04.325031 32551 solver.cpp:228] Iteration 76900, loss = 0.000298194\nI0823 06:58:04.325073 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:58:04.325089 32551 solver.cpp:244]     Train net output #1: loss = 0.000298224 (* 1 = 0.000298224 loss)\nI0823 06:58:04.413993 32551 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0823 07:00:20.171790 32551 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0823 07:01:39.821177 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62316\nI0823 07:01:39.821534 32551 solver.cpp:404]     Test net output #1: loss = 2.37337 (* 1 = 2.37337 loss)\nI0823 07:01:41.113523 32551 solver.cpp:228] Iteration 77000, loss = 0.000269449\nI0823 07:01:41.113566 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:01:41.113582 32551 solver.cpp:244]     Train net output #1: loss = 0.000269479 (* 1 = 0.000269479 loss)\nI0823 07:01:41.209556 32551 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0823 07:03:56.994668 32551 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0823 07:05:16.650602 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6204\nI0823 07:05:16.650964 32551 solver.cpp:404]     Test net output #1: loss = 2.38725 (* 1 = 2.38725 loss)\nI0823 07:05:17.943573 32551 solver.cpp:228] Iteration 77100, loss = 0.0002872\nI0823 07:05:17.943617 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:05:17.943634 32551 solver.cpp:244]     Train net output #1: loss = 0.00028723 (* 1 = 0.00028723 loss)\nI0823 07:05:18.034126 32551 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0823 07:07:33.611084 32551 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0823 07:08:53.261937 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62312\nI0823 07:08:53.262292 32551 solver.cpp:404]     Test net output #1: loss = 2.37249 (* 1 = 2.37249 loss)\nI0823 07:08:54.554630 32551 solver.cpp:228] Iteration 77200, loss = 0.000332781\nI0823 07:08:54.554672 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:08:54.554687 32551 solver.cpp:244]     Train net output #1: loss = 0.000332811 (* 1 = 0.000332811 loss)\nI0823 07:08:54.642673 32551 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0823 07:11:10.514298 32551 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0823 07:12:30.162983 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62076\nI0823 07:12:30.163322 32551 solver.cpp:404]     Test net output #1: loss = 2.38737 (* 1 = 2.38737 loss)\nI0823 07:12:31.455487 32551 solver.cpp:228] Iteration 77300, loss = 0.000294998\nI0823 07:12:31.455531 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:12:31.455546 32551 solver.cpp:244]     Train net output #1: loss = 0.000295028 (* 1 = 0.000295028 loss)\nI0823 07:12:31.548493 32551 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0823 07:14:47.119988 32551 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0823 07:16:06.760517 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62356\nI0823 07:16:06.760844 32551 solver.cpp:404]     Test net output #1: loss = 2.37314 (* 1 = 2.37314 loss)\nI0823 07:16:08.053282 32551 solver.cpp:228] Iteration 77400, loss = 0.000278215\nI0823 07:16:08.053325 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:16:08.053341 32551 solver.cpp:244]     Train net output #1: loss = 0.000278245 (* 1 = 0.000278245 loss)\nI0823 07:16:08.142598 32551 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0823 07:18:23.746598 32551 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0823 07:19:43.399344 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62104\nI0823 07:19:43.399677 32551 solver.cpp:404]     Test net output #1: loss = 2.38708 (* 1 = 2.38708 loss)\nI0823 07:19:44.691699 32551 solver.cpp:228] Iteration 77500, loss = 0.000314373\nI0823 07:19:44.691742 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:19:44.691763 32551 solver.cpp:244]     Train net output #1: loss = 0.000314403 (* 1 = 0.000314403 loss)\nI0823 07:19:44.786403 32551 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0823 07:22:00.431006 32551 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0823 07:23:20.079874 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62376\nI0823 07:23:20.080227 32551 solver.cpp:404]     Test net output #1: loss = 2.37266 (* 1 = 2.37266 loss)\nI0823 07:23:21.372697 32551 solver.cpp:228] Iteration 77600, loss = 0.000409134\nI0823 07:23:21.372742 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:23:21.372762 32551 solver.cpp:244]     Train net output #1: loss = 0.000409164 (* 1 = 0.000409164 loss)\nI0823 07:23:21.472879 32551 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0823 07:25:36.983386 32551 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0823 07:26:56.628898 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62096\nI0823 07:26:56.629250 32551 solver.cpp:404]     Test net output #1: loss = 2.38699 (* 1 = 2.38699 loss)\nI0823 07:26:57.921030 32551 solver.cpp:228] Iteration 77700, loss = 0.000252494\nI0823 07:26:57.921074 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:26:57.921092 32551 solver.cpp:244]     Train net output #1: loss = 0.000252524 (* 1 = 0.000252524 loss)\nI0823 07:26:58.022001 32551 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0823 07:29:13.714269 32551 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0823 07:30:33.389649 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6234\nI0823 07:30:33.389986 32551 solver.cpp:404]     Test net output #1: loss = 2.37244 (* 1 = 2.37244 loss)\nI0823 07:30:34.682773 32551 solver.cpp:228] Iteration 77800, loss = 0.00030047\nI0823 07:30:34.682818 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:30:34.682834 32551 solver.cpp:244]     Train net output #1: loss = 0.000300499 (* 1 = 0.000300499 loss)\nI0823 07:30:34.778347 32551 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0823 07:32:50.578788 32551 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0823 07:34:10.286375 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62064\nI0823 07:34:10.286672 32551 solver.cpp:404]     Test net output #1: loss = 2.38743 (* 1 = 2.38743 loss)\nI0823 07:34:11.579596 32551 solver.cpp:228] Iteration 77900, loss = 0.000344839\nI0823 07:34:11.579643 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:34:11.579658 32551 solver.cpp:244]     Train net output #1: loss = 0.000344869 (* 1 = 0.000344869 loss)\nI0823 07:34:11.674921 32551 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0823 07:36:27.153489 32551 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0823 07:37:46.857764 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6234\nI0823 07:37:46.858084 32551 solver.cpp:404]     Test net output #1: loss = 2.37335 (* 1 = 2.37335 loss)\nI0823 07:37:48.152261 32551 solver.cpp:228] Iteration 78000, loss = 0.000331158\nI0823 07:37:48.152307 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:37:48.152323 32551 solver.cpp:244]     Train net output #1: loss = 0.000331188 (* 1 = 0.000331188 loss)\nI0823 07:37:48.245556 32551 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0823 07:40:03.778271 32551 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0823 07:41:23.486522 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62048\nI0823 07:41:23.486852 32551 solver.cpp:404]     Test net output #1: loss = 2.38794 (* 1 = 2.38794 loss)\nI0823 07:41:24.782917 32551 solver.cpp:228] Iteration 78100, loss = 0.000288509\nI0823 07:41:24.782963 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:41:24.782979 32551 solver.cpp:244]     Train net output #1: loss = 0.000288539 (* 1 = 0.000288539 loss)\nI0823 07:41:24.870283 32551 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0823 07:43:40.509567 32551 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0823 07:45:00.235249 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6236\nI0823 07:45:00.235558 32551 solver.cpp:404]     Test net output #1: loss = 2.37312 (* 1 = 2.37312 loss)\nI0823 07:45:01.528290 32551 solver.cpp:228] Iteration 78200, loss = 0.000314105\nI0823 07:45:01.528333 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:45:01.528349 32551 solver.cpp:244]     Train net output #1: loss = 0.000314135 (* 1 = 0.000314135 loss)\nI0823 07:45:01.622707 32551 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0823 07:47:17.180681 32551 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0823 07:48:36.916837 32551 solver.cpp:404]     Test net output #0: accuracy = 0.621\nI0823 07:48:36.917142 32551 solver.cpp:404]     Test net output #1: loss = 2.38714 (* 1 = 2.38714 loss)\nI0823 07:48:38.209575 32551 solver.cpp:228] Iteration 78300, loss = 0.000271017\nI0823 07:48:38.209619 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:48:38.209635 32551 solver.cpp:244]     Train net output #1: loss = 0.000271047 (* 1 = 0.000271047 loss)\nI0823 07:48:38.302882 32551 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0823 07:50:53.827249 32551 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0823 07:52:13.565647 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6236\nI0823 07:52:13.565934 32551 solver.cpp:404]     Test net output #1: loss = 2.37249 (* 1 = 2.37249 loss)\nI0823 07:52:14.858202 32551 solver.cpp:228] Iteration 78400, loss = 0.00032064\nI0823 07:52:14.858247 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:52:14.858264 32551 solver.cpp:244]     Train net output #1: loss = 0.00032067 (* 1 = 0.00032067 loss)\nI0823 07:52:14.957950 32551 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0823 07:54:30.890444 32551 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0823 07:55:50.621819 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62084\nI0823 07:55:50.622105 32551 solver.cpp:404]     Test net output #1: loss = 2.38726 (* 1 = 2.38726 loss)\nI0823 07:55:51.914611 32551 solver.cpp:228] Iteration 78500, loss = 0.0002516\nI0823 07:55:51.914654 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:55:51.914670 32551 solver.cpp:244]     Train net output #1: loss = 0.00025163 (* 1 = 0.00025163 loss)\nI0823 07:55:52.014766 32551 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0823 07:58:07.697553 32551 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0823 07:59:27.421828 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62364\nI0823 07:59:27.422159 32551 solver.cpp:404]     Test net output #1: loss = 2.3724 (* 1 = 2.3724 loss)\nI0823 07:59:28.714704 32551 solver.cpp:228] Iteration 78600, loss = 0.000334899\nI0823 07:59:28.714752 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:59:28.714768 32551 solver.cpp:244]     Train net output #1: loss = 0.000334928 (* 1 = 0.000334928 loss)\nI0823 07:59:28.809276 32551 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0823 08:01:44.851796 32551 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0823 08:03:04.570130 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62096\nI0823 08:03:04.570464 32551 solver.cpp:404]     Test net output #1: loss = 2.38691 (* 1 = 2.38691 loss)\nI0823 08:03:05.863188 32551 solver.cpp:228] Iteration 78700, loss = 0.000232718\nI0823 08:03:05.863235 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:03:05.863251 32551 solver.cpp:244]     Train net output #1: loss = 0.000232747 (* 1 = 0.000232747 loss)\nI0823 08:03:05.954474 32551 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0823 08:05:22.042184 32551 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0823 08:06:41.761984 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6236\nI0823 08:06:41.762249 32551 solver.cpp:404]     Test net output #1: loss = 2.37142 (* 1 = 2.37142 loss)\nI0823 08:06:43.054165 32551 solver.cpp:228] Iteration 78800, loss = 0.000339301\nI0823 08:06:43.054210 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:06:43.054227 32551 solver.cpp:244]     Train net output #1: loss = 0.000339331 (* 1 = 0.000339331 loss)\nI0823 08:06:43.145071 32551 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0823 08:08:59.158485 32551 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0823 08:10:18.885181 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62124\nI0823 08:10:18.885514 32551 solver.cpp:404]     Test net output #1: loss = 2.38609 (* 1 = 2.38609 loss)\nI0823 08:10:20.178169 32551 solver.cpp:228] Iteration 78900, loss = 0.000283971\nI0823 08:10:20.178215 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:10:20.178231 32551 solver.cpp:244]     Train net output #1: loss = 0.000284001 (* 1 = 0.000284001 loss)\nI0823 08:10:20.269397 32551 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0823 08:12:36.178768 32551 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0823 08:13:55.915504 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62344\nI0823 08:13:55.915822 32551 solver.cpp:404]     Test net output #1: loss = 2.37163 (* 1 = 2.37163 loss)\nI0823 08:13:57.208191 32551 solver.cpp:228] Iteration 79000, loss = 0.000350516\nI0823 08:13:57.208238 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:13:57.208254 32551 solver.cpp:244]     Train net output #1: loss = 0.000350546 (* 1 = 0.000350546 loss)\nI0823 08:13:57.300789 32551 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0823 08:16:13.034225 32551 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0823 08:17:32.756021 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62096\nI0823 08:17:32.756306 32551 solver.cpp:404]     Test net output #1: loss = 2.38662 (* 1 = 2.38662 loss)\nI0823 08:17:34.048658 32551 solver.cpp:228] Iteration 79100, loss = 0.000274599\nI0823 08:17:34.048704 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:17:34.048722 32551 solver.cpp:244]     Train net output #1: loss = 0.000274629 (* 1 = 0.000274629 loss)\nI0823 08:17:34.147248 32551 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0823 08:19:49.967558 32551 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0823 08:21:09.687543 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6236\nI0823 08:21:09.687886 32551 solver.cpp:404]     Test net output #1: loss = 2.3716 (* 1 = 2.3716 loss)\nI0823 08:21:10.980077 32551 solver.cpp:228] Iteration 79200, loss = 0.000368874\nI0823 08:21:10.980120 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:21:10.980136 32551 solver.cpp:244]     Train net output #1: loss = 0.000368904 (* 1 = 0.000368904 loss)\nI0823 08:21:11.075125 32551 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0823 08:23:26.727627 32551 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0823 08:24:46.447309 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62072\nI0823 08:24:46.447609 32551 solver.cpp:404]     Test net output #1: loss = 2.38611 (* 1 = 2.38611 loss)\nI0823 08:24:47.739974 32551 solver.cpp:228] Iteration 79300, loss = 0.000363391\nI0823 08:24:47.740021 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:24:47.740038 32551 solver.cpp:244]     Train net output #1: loss = 0.000363421 (* 1 = 0.000363421 loss)\nI0823 08:24:47.834452 32551 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0823 08:27:03.907866 32551 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0823 08:28:23.636574 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62348\nI0823 08:28:23.636860 32551 solver.cpp:404]     Test net output #1: loss = 2.37144 (* 1 = 2.37144 loss)\nI0823 08:28:24.929684 32551 solver.cpp:228] Iteration 79400, loss = 0.000372941\nI0823 08:28:24.929738 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:28:24.929755 32551 solver.cpp:244]     Train net output #1: loss = 0.000372971 (* 1 = 0.000372971 loss)\nI0823 08:28:25.026897 32551 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0823 08:30:40.956562 32551 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0823 08:32:00.686552 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62068\nI0823 08:32:00.686838 32551 solver.cpp:404]     Test net output #1: loss = 2.38666 (* 1 = 2.38666 loss)\nI0823 08:32:01.979259 32551 solver.cpp:228] Iteration 79500, loss = 0.000227812\nI0823 08:32:01.979305 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:32:01.979322 32551 solver.cpp:244]     Train net output #1: loss = 0.000227842 (* 1 = 0.000227842 loss)\nI0823 08:32:02.077116 32551 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0823 08:34:18.029198 32551 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0823 08:35:37.754129 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62296\nI0823 08:35:37.754418 32551 solver.cpp:404]     Test net output #1: loss = 2.37195 (* 1 = 2.37195 loss)\nI0823 08:35:39.046489 32551 solver.cpp:228] Iteration 79600, loss = 0.000298082\nI0823 08:35:39.046535 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:35:39.046551 32551 solver.cpp:244]     Train net output #1: loss = 0.000298111 (* 1 = 0.000298111 loss)\nI0823 08:35:39.146809 32551 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0823 08:37:54.801795 32551 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0823 08:39:14.528542 32551 solver.cpp:404]     Test net output #0: accuracy = 0.6204\nI0823 08:39:14.528870 32551 solver.cpp:404]     Test net output #1: loss = 2.38614 (* 1 = 2.38614 loss)\nI0823 08:39:15.821442 32551 solver.cpp:228] Iteration 79700, loss = 0.000324306\nI0823 08:39:15.821490 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:39:15.821506 32551 solver.cpp:244]     Train net output #1: loss = 0.000324336 (* 1 = 0.000324336 loss)\nI0823 08:39:15.915508 32551 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0823 08:41:32.073737 32551 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0823 08:42:51.792815 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62364\nI0823 08:42:51.793099 32551 solver.cpp:404]     Test net output #1: loss = 2.37127 (* 1 = 2.37127 loss)\nI0823 08:42:53.085850 32551 solver.cpp:228] Iteration 79800, loss = 0.000292192\nI0823 08:42:53.085896 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:42:53.085912 32551 solver.cpp:244]     Train net output #1: loss = 0.000292222 (* 1 = 0.000292222 loss)\nI0823 08:42:53.177356 32551 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0823 08:45:09.111145 32551 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0823 08:46:28.833665 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62124\nI0823 08:46:28.833956 32551 solver.cpp:404]     Test net output #1: loss = 2.38516 (* 1 = 2.38516 loss)\nI0823 08:46:30.126480 32551 solver.cpp:228] Iteration 79900, loss = 0.000309593\nI0823 08:46:30.126528 32551 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:46:30.126544 32551 solver.cpp:244]     Train net output #1: loss = 0.000309623 (* 1 = 0.000309623 loss)\nI0823 08:46:30.223139 32551 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0823 08:48:46.090764 32551 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35Tr20kTab1_iter_80000.caffemodel\nI0823 08:48:46.314913 32551 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35Tr20kTab1_iter_80000.solverstate\nI0823 08:48:46.754020 32551 solver.cpp:317] Iteration 80000, loss = 0.000258012\nI0823 08:48:46.754066 32551 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0823 08:50:06.464388 32551 solver.cpp:404]     Test net output #0: accuracy = 0.62328\nI0823 08:50:06.464733 32551 solver.cpp:404]     Test net output #1: loss = 2.37129 (* 1 = 2.37129 loss)\nI0823 08:50:06.464746 32551 solver.cpp:322] Optimization Done.\nI0823 08:50:12.739056 32551 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35Tr30kTab1",
    "content": "I0821 08:26:50.890583 32487 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 08:26:50.893173 32487 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 08:26:50.894392 32487 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 08:26:50.895608 32487 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 08:26:50.896817 32487 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 08:26:50.898041 32487 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 08:26:50.899276 32487 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 08:26:50.900506 32487 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 08:26:50.901731 32487 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 08:26:51.323670 32487 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35Tr30kTab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\nI0821 08:26:51.326954 32487 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 08:26:51.343184 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:51.343268 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:51.344347 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 08:26:51.344401 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 08:26:51.344419 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:26:51.344429 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:26:51.344439 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:26:51.344447 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:26:51.344456 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:26:51.344465 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:26:51.344475 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:26:51.344485 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:26:51.344493 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:26:51.344501 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:26:51.344511 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:26:51.344521 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:26:51.344529 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:26:51.344538 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:26:51.344547 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:26:51.344555 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:26:51.344565 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:26:51.344573 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:26:51.344596 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:26:51.344606 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:26:51.344619 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:26:51.344630 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:26:51.344638 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:26:51.344646 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:26:51.344656 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:26:51.344665 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:26:51.344673 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:26:51.344682 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:26:51.344691 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:26:51.344700 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:26:51.344709 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:26:51.344717 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:26:51.344727 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:26:51.344735 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:26:51.344745 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:26:51.344753 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:26:51.344763 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:26:51.344771 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:26:51.344784 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:26:51.344792 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:26:51.344800 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:26:51.344810 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:26:51.344820 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:26:51.344827 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:26:51.344837 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:26:51.344844 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:26:51.344854 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:26:51.344862 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:26:51.344871 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:26:51.344887 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:26:51.344897 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:26:51.344907 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:26:51.344915 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:26:51.344923 32487 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:26:51.346650 32487 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train30k_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.9\nI0821 08:26:51.348577 32487 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:26:51.350949 32487 net.cpp:100] Creating Layer dataLayer\nI0821 08:26:51.351004 32487 net.cpp:408] dataLayer -> data_top\nI0821 08:26:51.351199 32487 net.cpp:408] dataLayer -> label\nI0821 08:26:51.351312 32487 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:26:51.430696 32492 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train30k_lmdb\nI0821 08:26:51.431208 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:51.438661 32487 net.cpp:150] Setting up dataLayer\nI0821 08:26:51.438730 32487 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 08:26:51.438743 32487 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:51.438750 32487 net.cpp:165] Memory required for data: 1536500\nI0821 08:26:51.438765 32487 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:26:51.438781 32487 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:26:51.438788 32487 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:26:51.438805 32487 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:26:51.438820 32487 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:26:51.438891 32487 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:26:51.438908 32487 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:51.438915 32487 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:51.438920 32487 net.cpp:165] Memory required for data: 1537500\nI0821 08:26:51.438926 32487 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:26:51.438992 32487 net.cpp:100] Creating Layer pre_conv\nI0821 08:26:51.439005 32487 net.cpp:434] pre_conv <- data_top\nI0821 08:26:51.439015 32487 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:26:51.440860 32487 net.cpp:150] Setting up pre_conv\nI0821 08:26:51.440879 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.440886 32487 net.cpp:165] Memory required for data: 9729500\nI0821 08:26:51.440963 32487 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:26:51.441041 32487 net.cpp:100] Creating Layer pre_bn\nI0821 08:26:51.441054 32487 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:26:51.441064 32487 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:26:51.441159 32493 blocking_queue.cpp:50] Waiting for data\nI0821 08:26:51.441413 32487 net.cpp:150] Setting up pre_bn\nI0821 08:26:51.441431 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.441437 32487 net.cpp:165] Memory required for data: 17921500\nI0821 08:26:51.441455 32487 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:26:51.441509 32487 net.cpp:100] Creating Layer pre_scale\nI0821 08:26:51.441519 32487 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:26:51.441530 32487 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:26:51.441707 32487 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:26:51.444936 32487 net.cpp:150] Setting up pre_scale\nI0821 08:26:51.444953 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.444959 32487 net.cpp:165] Memory required for data: 26113500\nI0821 08:26:51.444970 32487 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:26:51.445019 32487 net.cpp:100] Creating Layer pre_relu\nI0821 08:26:51.445029 32487 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:26:51.445040 32487 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:26:51.445052 32487 net.cpp:150] Setting up pre_relu\nI0821 08:26:51.445060 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.445065 32487 net.cpp:165] Memory required for data: 34305500\nI0821 08:26:51.445070 32487 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:26:51.445078 32487 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:26:51.445083 32487 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:26:51.445094 32487 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:26:51.445106 32487 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:26:51.445153 32487 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:26:51.445166 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.445173 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.445178 32487 net.cpp:165] Memory required for data: 50689500\nI0821 08:26:51.445184 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:26:51.445196 32487 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:26:51.445202 32487 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:26:51.445214 32487 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:26:51.445533 32487 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:26:51.445549 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.445555 32487 net.cpp:165] Memory required for data: 58881500\nI0821 08:26:51.445567 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:26:51.445582 32487 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:26:51.445588 32487 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:26:51.445597 32487 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:26:51.445827 32487 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:26:51.445842 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.445848 32487 net.cpp:165] Memory required for data: 67073500\nI0821 08:26:51.445859 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:26:51.445868 32487 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:26:51.445874 32487 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:26:51.445883 32487 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.445937 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:26:51.446080 32487 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:26:51.446094 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.446099 32487 net.cpp:165] Memory required for data: 75265500\nI0821 08:26:51.446108 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:26:51.446127 32487 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:26:51.446135 32487 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:26:51.446142 32487 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.446157 32487 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:26:51.446164 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.446169 32487 net.cpp:165] Memory required for data: 83457500\nI0821 08:26:51.446174 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:26:51.446187 32487 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:26:51.446192 32487 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:26:51.446203 32487 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:26:51.446511 32487 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:26:51.446526 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.446532 32487 net.cpp:165] Memory required for data: 91649500\nI0821 08:26:51.446540 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:26:51.446550 32487 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:26:51.446557 32487 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:26:51.446568 32487 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:26:51.446799 32487 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:26:51.446813 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.446818 32487 net.cpp:165] Memory required for data: 99841500\nI0821 08:26:51.446835 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:26:51.446846 32487 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:26:51.446851 32487 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:26:51.446859 32487 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:26:51.446915 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:26:51.447054 32487 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:26:51.447067 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.447073 32487 net.cpp:165] Memory required for data: 108033500\nI0821 08:26:51.447082 32487 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:26:51.447139 32487 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:26:51.447150 32487 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:26:51.447158 32487 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:26:51.447170 32487 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:26:51.447248 32487 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:26:51.447269 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.447275 32487 net.cpp:165] Memory required for data: 116225500\nI0821 08:26:51.447281 32487 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:26:51.447294 32487 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:26:51.447300 32487 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:26:51.447309 32487 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:26:51.447319 32487 net.cpp:150] Setting up L1_b1_relu\nI0821 08:26:51.447326 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.447330 32487 net.cpp:165] Memory required for data: 124417500\nI0821 08:26:51.447335 32487 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:51.447345 32487 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:51.447350 32487 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:26:51.447357 32487 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:26:51.447366 32487 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:26:51.447418 32487 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:51.447432 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.447438 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.447450 32487 net.cpp:165] Memory required for data: 140801500\nI0821 08:26:51.447456 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:26:51.447470 32487 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:26:51.447477 32487 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:26:51.447487 32487 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:26:51.447794 32487 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:26:51.447808 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.447813 32487 net.cpp:165] Memory required for data: 148993500\nI0821 08:26:51.447824 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:26:51.447839 32487 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:26:51.447846 32487 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:26:51.447857 32487 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:26:51.448097 32487 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:26:51.448112 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.448117 32487 net.cpp:165] Memory required for data: 157185500\nI0821 08:26:51.448127 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:26:51.448137 32487 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:26:51.448143 32487 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:26:51.448153 32487 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.448207 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:26:51.448359 32487 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:26:51.448376 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.448382 32487 net.cpp:165] Memory required for data: 165377500\nI0821 08:26:51.448392 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:26:51.448400 32487 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:26:51.448406 32487 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:26:51.448415 32487 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.448423 32487 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:26:51.448431 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.448436 32487 net.cpp:165] Memory required for data: 173569500\nI0821 08:26:51.448441 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:26:51.448454 32487 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:26:51.448462 32487 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:26:51.448472 32487 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:26:51.448777 32487 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:26:51.448791 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.448796 32487 net.cpp:165] Memory required for data: 181761500\nI0821 08:26:51.448806 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:26:51.448817 32487 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:26:51.448824 32487 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:26:51.448835 32487 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:26:51.449076 32487 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:26:51.449090 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.449095 32487 net.cpp:165] Memory required for data: 189953500\nI0821 08:26:51.449110 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:26:51.449122 32487 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:26:51.449129 32487 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:26:51.449137 32487 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:26:51.449193 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:26:51.449340 32487 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:26:51.449354 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.449360 32487 net.cpp:165] Memory required for data: 198145500\nI0821 08:26:51.449369 32487 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:26:51.449386 32487 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:26:51.449393 32487 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:26:51.449400 32487 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:26:51.449410 32487 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:26:51.449445 32487 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:26:51.449458 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.449463 32487 net.cpp:165] Memory required for data: 206337500\nI0821 08:26:51.449468 32487 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:26:51.449476 32487 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:26:51.449481 32487 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:26:51.449488 32487 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:26:51.449501 32487 net.cpp:150] Setting up L1_b2_relu\nI0821 08:26:51.449508 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.449513 32487 net.cpp:165] Memory required for data: 214529500\nI0821 08:26:51.449518 32487 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:51.449525 32487 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:51.449532 32487 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:26:51.449538 32487 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:26:51.449548 32487 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:26:51.449591 32487 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:51.449604 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.449610 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.449615 32487 net.cpp:165] Memory required for data: 230913500\nI0821 08:26:51.449620 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:26:51.449631 32487 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:26:51.449638 32487 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:26:51.449651 32487 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:26:51.449951 32487 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:26:51.449965 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.449970 32487 net.cpp:165] Memory required for data: 239105500\nI0821 08:26:51.449980 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:26:51.449990 32487 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:26:51.449995 32487 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:26:51.450006 32487 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:26:51.450244 32487 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:26:51.450263 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.450268 32487 net.cpp:165] Memory required for data: 247297500\nI0821 08:26:51.450279 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:26:51.450292 32487 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:26:51.450299 32487 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:26:51.450306 32487 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.450358 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:26:51.450502 32487 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:26:51.450515 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.450521 32487 net.cpp:165] Memory required for data: 255489500\nI0821 08:26:51.450531 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:26:51.450538 32487 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:26:51.450544 32487 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:26:51.450554 32487 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.450565 32487 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:26:51.450579 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.450584 32487 net.cpp:165] Memory required for data: 263681500\nI0821 08:26:51.450589 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:26:51.450604 32487 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:26:51.450610 32487 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:26:51.450619 32487 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:26:51.450925 32487 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:26:51.450939 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.450944 32487 net.cpp:165] Memory required for data: 271873500\nI0821 08:26:51.450953 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:26:51.450968 32487 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:26:51.450975 32487 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:26:51.450989 32487 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:26:51.451227 32487 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:26:51.451241 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.451246 32487 net.cpp:165] Memory required for data: 280065500\nI0821 08:26:51.451263 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:26:51.451275 32487 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:26:51.451282 32487 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:26:51.451290 32487 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:26:51.451344 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:26:51.451483 32487 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:26:51.451496 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.451501 32487 net.cpp:165] Memory required for data: 288257500\nI0821 08:26:51.451510 32487 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:26:51.451522 32487 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:26:51.451529 32487 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:26:51.451536 32487 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:26:51.451545 32487 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:26:51.451578 32487 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:26:51.451591 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.451596 32487 net.cpp:165] Memory required for data: 296449500\nI0821 08:26:51.451601 32487 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:26:51.451608 32487 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:26:51.451614 32487 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:26:51.451624 32487 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:26:51.451634 32487 net.cpp:150] Setting up L1_b3_relu\nI0821 08:26:51.451642 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.451647 32487 net.cpp:165] Memory required for data: 304641500\nI0821 08:26:51.451652 32487 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:51.451658 32487 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:51.451664 32487 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:26:51.451671 32487 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:26:51.451681 32487 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:26:51.451727 32487 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:51.451740 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.451746 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.451751 32487 net.cpp:165] Memory required for data: 321025500\nI0821 08:26:51.451757 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:26:51.451768 32487 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:26:51.451774 32487 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:26:51.451797 32487 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:26:51.452107 32487 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:26:51.452121 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.452126 32487 net.cpp:165] Memory required for data: 329217500\nI0821 08:26:51.452136 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:26:51.452144 32487 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:26:51.452150 32487 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:26:51.452162 32487 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:26:51.452411 32487 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:26:51.452425 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.452430 32487 net.cpp:165] Memory required for data: 337409500\nI0821 08:26:51.452440 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:26:51.452452 32487 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:26:51.452460 32487 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:26:51.452467 32487 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.452522 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:26:51.452667 32487 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:26:51.452680 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.452685 32487 net.cpp:165] Memory required for data: 345601500\nI0821 08:26:51.452695 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:26:51.452706 32487 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:26:51.452713 32487 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:26:51.452720 32487 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.452731 32487 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:26:51.452739 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.452744 32487 net.cpp:165] Memory required for data: 353793500\nI0821 08:26:51.452749 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:26:51.452762 32487 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:26:51.452769 32487 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:26:51.452780 32487 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:26:51.453090 32487 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:26:51.453104 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.453109 32487 net.cpp:165] Memory required for data: 361985500\nI0821 08:26:51.453119 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:26:51.453127 32487 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:26:51.453133 32487 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:26:51.453141 32487 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:26:51.453393 32487 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:26:51.453408 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.453413 32487 net.cpp:165] Memory required for data: 370177500\nI0821 08:26:51.453428 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:26:51.453436 32487 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:26:51.453444 32487 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:26:51.453454 32487 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:26:51.453505 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:26:51.453644 32487 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:26:51.453657 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.453662 32487 net.cpp:165] Memory required for data: 378369500\nI0821 08:26:51.453671 32487 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:26:51.453685 32487 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:26:51.453691 32487 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:26:51.453697 32487 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:26:51.453709 32487 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:26:51.453749 32487 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:26:51.453759 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.453764 32487 net.cpp:165] Memory required for data: 386561500\nI0821 08:26:51.453770 32487 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:26:51.453781 32487 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:26:51.453788 32487 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:26:51.453795 32487 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:26:51.453804 32487 net.cpp:150] Setting up L1_b4_relu\nI0821 08:26:51.453811 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.453816 32487 net.cpp:165] Memory required for data: 394753500\nI0821 08:26:51.453821 32487 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:51.453829 32487 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:51.453833 32487 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:26:51.453841 32487 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:26:51.453850 32487 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:26:51.453896 32487 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:51.453908 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.453914 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.453919 32487 net.cpp:165] Memory required for data: 411137500\nI0821 08:26:51.453924 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:26:51.453938 32487 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:26:51.453945 32487 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:26:51.453954 32487 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:26:51.454269 32487 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:26:51.454283 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.454289 32487 net.cpp:165] Memory required for data: 419329500\nI0821 08:26:51.454313 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:26:51.454326 32487 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:26:51.454334 32487 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:26:51.454342 32487 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:26:51.454581 32487 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:26:51.454597 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.454603 32487 net.cpp:165] Memory required for data: 427521500\nI0821 08:26:51.454614 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:26:51.454623 32487 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:26:51.454629 32487 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:26:51.454638 32487 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.454689 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:26:51.454835 32487 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:26:51.454849 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.454854 32487 net.cpp:165] Memory required for data: 435713500\nI0821 08:26:51.454864 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:26:51.454875 32487 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:26:51.454881 32487 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:26:51.454890 32487 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.454900 32487 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:26:51.454906 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.454911 32487 net.cpp:165] Memory required for data: 443905500\nI0821 08:26:51.454916 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:26:51.454931 32487 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:26:51.454943 32487 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:26:51.454955 32487 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:26:51.455276 32487 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:26:51.455291 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.455296 32487 net.cpp:165] Memory required for data: 452097500\nI0821 08:26:51.455305 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:26:51.455315 32487 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:26:51.455322 32487 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:26:51.455332 32487 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:26:51.455569 32487 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:26:51.455582 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.455587 32487 net.cpp:165] Memory required for data: 460289500\nI0821 08:26:51.455598 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:26:51.455610 32487 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:26:51.455616 32487 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:26:51.455624 32487 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:26:51.455677 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:26:51.455821 32487 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:26:51.455833 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.455839 32487 net.cpp:165] Memory required for data: 468481500\nI0821 08:26:51.455848 32487 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:26:51.455860 32487 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:26:51.455868 32487 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:26:51.455874 32487 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:26:51.455883 32487 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:26:51.455916 32487 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:26:51.455929 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.455934 32487 net.cpp:165] Memory required for data: 476673500\nI0821 08:26:51.455940 32487 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:26:51.455947 32487 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:26:51.455953 32487 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:26:51.455962 32487 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:26:51.455973 32487 net.cpp:150] Setting up L1_b5_relu\nI0821 08:26:51.455981 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.455986 32487 net.cpp:165] Memory required for data: 484865500\nI0821 08:26:51.455991 32487 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:51.455997 32487 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:51.456003 32487 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:26:51.456010 32487 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:26:51.456020 32487 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:26:51.456065 32487 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:51.456077 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.456084 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.456089 32487 net.cpp:165] Memory required for data: 501249500\nI0821 08:26:51.456094 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:26:51.456106 32487 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:26:51.456112 32487 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:26:51.456125 32487 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:26:51.456442 32487 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:26:51.456457 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.456462 32487 net.cpp:165] Memory required for data: 509441500\nI0821 08:26:51.456478 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:26:51.456490 32487 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:26:51.456496 32487 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:26:51.456506 32487 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:26:51.456748 32487 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:26:51.456761 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.456766 32487 net.cpp:165] Memory required for data: 517633500\nI0821 08:26:51.456776 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:26:51.456789 32487 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:26:51.456795 32487 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:26:51.456802 32487 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.456856 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:26:51.456996 32487 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:26:51.457010 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.457015 32487 net.cpp:165] Memory required for data: 525825500\nI0821 08:26:51.457023 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:26:51.457036 32487 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:26:51.457041 32487 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:26:51.457049 32487 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.457059 32487 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:26:51.457067 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.457072 32487 net.cpp:165] Memory required for data: 534017500\nI0821 08:26:51.457075 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:26:51.457090 32487 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:26:51.457096 32487 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:26:51.457108 32487 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:26:51.457430 32487 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:26:51.457444 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.457450 32487 net.cpp:165] Memory required for data: 542209500\nI0821 08:26:51.457459 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:26:51.457468 32487 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:26:51.457474 32487 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:26:51.457482 32487 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:26:51.457726 32487 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:26:51.457739 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.457744 32487 net.cpp:165] Memory required for data: 550401500\nI0821 08:26:51.457756 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:26:51.457767 32487 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:26:51.457774 32487 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:26:51.457782 32487 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:26:51.457837 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:26:51.457978 32487 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:26:51.457991 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.457998 32487 net.cpp:165] Memory required for data: 558593500\nI0821 08:26:51.458006 32487 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:26:51.458024 32487 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:26:51.458032 32487 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:26:51.458039 32487 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:26:51.458047 32487 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:26:51.458081 32487 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:26:51.458091 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.458096 32487 net.cpp:165] Memory required for data: 566785500\nI0821 08:26:51.458102 32487 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:26:51.458122 32487 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:26:51.458128 32487 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:26:51.458137 32487 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:26:51.458145 32487 net.cpp:150] Setting up L1_b6_relu\nI0821 08:26:51.458153 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.458158 32487 net.cpp:165] Memory required for data: 574977500\nI0821 08:26:51.458163 32487 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:51.458170 32487 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:51.458176 32487 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:26:51.458184 32487 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:26:51.458192 32487 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:26:51.458240 32487 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:51.458253 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.458266 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.458271 32487 net.cpp:165] Memory required for data: 591361500\nI0821 08:26:51.458276 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:26:51.458290 32487 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:26:51.458297 32487 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:26:51.458307 32487 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:26:51.458619 32487 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:26:51.458633 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.458638 32487 net.cpp:165] Memory required for data: 599553500\nI0821 08:26:51.458648 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:26:51.458660 32487 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:26:51.458667 32487 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:26:51.458675 32487 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:26:51.458920 32487 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:26:51.458935 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.458940 32487 net.cpp:165] Memory required for data: 607745500\nI0821 08:26:51.458950 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:26:51.458959 32487 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:26:51.458966 32487 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:26:51.458973 32487 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.459028 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:26:51.459172 32487 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:26:51.459187 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.459192 32487 net.cpp:165] Memory required for data: 615937500\nI0821 08:26:51.459200 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:26:51.459209 32487 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:26:51.459215 32487 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:26:51.459228 32487 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.459237 32487 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:26:51.459245 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.459250 32487 net.cpp:165] Memory required for data: 624129500\nI0821 08:26:51.459261 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:26:51.459276 32487 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:26:51.459283 32487 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:26:51.459292 32487 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:26:51.459606 32487 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:26:51.459620 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.459626 32487 net.cpp:165] Memory required for data: 632321500\nI0821 08:26:51.459642 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:26:51.459656 32487 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:26:51.459662 32487 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:26:51.459671 32487 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:26:51.459913 32487 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:26:51.459929 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.459934 32487 net.cpp:165] Memory required for data: 640513500\nI0821 08:26:51.459945 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:26:51.459955 32487 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:26:51.459961 32487 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:26:51.459969 32487 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:26:51.460023 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:26:51.460165 32487 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:26:51.460178 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.460185 32487 net.cpp:165] Memory required for data: 648705500\nI0821 08:26:51.460193 32487 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:26:51.460206 32487 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:26:51.460212 32487 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:26:51.460219 32487 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:26:51.460230 32487 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:26:51.460268 32487 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:26:51.460281 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.460286 32487 net.cpp:165] Memory required for data: 656897500\nI0821 08:26:51.460292 32487 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:26:51.460304 32487 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:26:51.460310 32487 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:26:51.460317 32487 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:26:51.460327 32487 net.cpp:150] Setting up L1_b7_relu\nI0821 08:26:51.460335 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.460340 32487 net.cpp:165] Memory required for data: 665089500\nI0821 08:26:51.460345 32487 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:51.460351 32487 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:51.460357 32487 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:26:51.460364 32487 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:26:51.460374 32487 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:26:51.460422 32487 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:51.460433 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.460440 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.460445 32487 net.cpp:165] Memory required for data: 681473500\nI0821 08:26:51.460450 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:26:51.460464 32487 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:26:51.460471 32487 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:26:51.460480 32487 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:26:51.460799 32487 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:26:51.460813 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.460819 32487 net.cpp:165] Memory required for data: 689665500\nI0821 08:26:51.460827 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:26:51.460841 32487 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:26:51.460849 32487 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:26:51.460856 32487 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:26:51.461112 32487 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:26:51.461127 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.461132 32487 net.cpp:165] Memory required for data: 697857500\nI0821 08:26:51.461141 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:26:51.461151 32487 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:26:51.461158 32487 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:26:51.461164 32487 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.461220 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:26:51.461376 32487 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:26:51.461390 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.461395 32487 net.cpp:165] Memory required for data: 706049500\nI0821 08:26:51.461405 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:26:51.461413 32487 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:26:51.461419 32487 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:26:51.461429 32487 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.461441 32487 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:26:51.461447 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.461452 32487 net.cpp:165] Memory required for data: 714241500\nI0821 08:26:51.461457 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:26:51.461468 32487 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:26:51.461474 32487 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:26:51.461485 32487 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:26:51.461802 32487 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:26:51.461815 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.461822 32487 net.cpp:165] Memory required for data: 722433500\nI0821 08:26:51.461829 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:26:51.461839 32487 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:26:51.461845 32487 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:26:51.461856 32487 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:26:51.462106 32487 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:26:51.462124 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.462131 32487 net.cpp:165] Memory required for data: 730625500\nI0821 08:26:51.462141 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:26:51.462151 32487 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:26:51.462157 32487 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:26:51.462164 32487 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:26:51.462218 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:26:51.462369 32487 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:26:51.462383 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.462389 32487 net.cpp:165] Memory required for data: 738817500\nI0821 08:26:51.462399 32487 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:26:51.462410 32487 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:26:51.462417 32487 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:26:51.462424 32487 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:26:51.462435 32487 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:26:51.462466 32487 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:26:51.462478 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.462483 32487 net.cpp:165] Memory required for data: 747009500\nI0821 08:26:51.462489 32487 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:26:51.462496 32487 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:26:51.462502 32487 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:26:51.462512 32487 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:26:51.462523 32487 net.cpp:150] Setting up L1_b8_relu\nI0821 08:26:51.462538 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.462543 32487 net.cpp:165] Memory required for data: 755201500\nI0821 08:26:51.462548 32487 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:51.462555 32487 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:51.462560 32487 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:26:51.462568 32487 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:26:51.462577 32487 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:26:51.462625 32487 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:51.462637 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.462644 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.462648 32487 net.cpp:165] Memory required for data: 771585500\nI0821 08:26:51.462654 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:26:51.462668 32487 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:26:51.462674 32487 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:26:51.462684 32487 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:26:51.463004 32487 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:26:51.463021 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.463027 32487 net.cpp:165] Memory required for data: 779777500\nI0821 08:26:51.463037 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:26:51.463048 32487 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:26:51.463055 32487 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:26:51.463063 32487 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:26:51.463316 32487 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:26:51.463330 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.463335 32487 net.cpp:165] Memory required for data: 787969500\nI0821 08:26:51.463346 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:26:51.463354 32487 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:26:51.463361 32487 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:26:51.463371 32487 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.463428 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:26:51.463572 32487 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:26:51.463587 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.463593 32487 net.cpp:165] Memory required for data: 796161500\nI0821 08:26:51.463603 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:26:51.463610 32487 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:26:51.463618 32487 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:26:51.463624 32487 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.463634 32487 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:26:51.463641 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.463646 32487 net.cpp:165] Memory required for data: 804353500\nI0821 08:26:51.463651 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:26:51.463667 32487 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:26:51.463675 32487 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:26:51.463685 32487 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:26:51.464005 32487 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:26:51.464020 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.464025 32487 net.cpp:165] Memory required for data: 812545500\nI0821 08:26:51.464035 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:26:51.464046 32487 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:26:51.464053 32487 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:26:51.464064 32487 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:26:51.464320 32487 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:26:51.464334 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.464339 32487 net.cpp:165] Memory required for data: 820737500\nI0821 08:26:51.464371 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:26:51.464381 32487 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:26:51.464388 32487 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:26:51.464398 32487 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:26:51.464455 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:26:51.464597 32487 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:26:51.464610 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.464617 32487 net.cpp:165] Memory required for data: 828929500\nI0821 08:26:51.464625 32487 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:26:51.464634 32487 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:26:51.464642 32487 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:26:51.464648 32487 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:26:51.464655 32487 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:26:51.464687 32487 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:26:51.464696 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.464701 32487 net.cpp:165] Memory required for data: 837121500\nI0821 08:26:51.464706 32487 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:26:51.464717 32487 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:26:51.464723 32487 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:26:51.464731 32487 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:26:51.464740 32487 net.cpp:150] Setting up L1_b9_relu\nI0821 08:26:51.464748 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.464753 32487 net.cpp:165] Memory required for data: 845313500\nI0821 08:26:51.464757 32487 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:51.464769 32487 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:51.464776 32487 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:26:51.464783 32487 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:26:51.464792 32487 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:26:51.464838 32487 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:51.464850 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.464857 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.464862 32487 net.cpp:165] Memory required for data: 861697500\nI0821 08:26:51.464867 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:26:51.464881 32487 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:26:51.464889 32487 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:26:51.464897 32487 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:26:51.465220 32487 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:26:51.465235 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.465240 32487 net.cpp:165] Memory required for data: 863745500\nI0821 08:26:51.465250 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:26:51.465267 32487 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:26:51.465276 32487 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:26:51.465286 32487 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:26:51.465526 32487 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:26:51.465540 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.465545 32487 net.cpp:165] Memory required for data: 865793500\nI0821 08:26:51.465556 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:26:51.465565 32487 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:26:51.465577 32487 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:26:51.465586 32487 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.465643 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:26:51.465783 32487 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:26:51.465796 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.465801 32487 net.cpp:165] Memory required for data: 867841500\nI0821 08:26:51.465811 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:26:51.465822 32487 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:26:51.465828 32487 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:26:51.465837 32487 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.465847 32487 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:26:51.465854 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.465858 32487 net.cpp:165] Memory required for data: 869889500\nI0821 08:26:51.465864 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:26:51.465878 32487 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:26:51.465884 32487 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:26:51.465893 32487 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:26:51.466210 32487 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:26:51.466224 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.466229 32487 net.cpp:165] Memory required for data: 871937500\nI0821 08:26:51.466238 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:26:51.466250 32487 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:26:51.466264 32487 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:26:51.466274 32487 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:26:51.466517 32487 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:26:51.466536 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.466542 32487 net.cpp:165] Memory required for data: 873985500\nI0821 08:26:51.466552 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:26:51.466562 32487 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:26:51.466567 32487 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:26:51.466575 32487 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:26:51.466629 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:26:51.466779 32487 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:26:51.466792 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.466799 32487 net.cpp:165] Memory required for data: 876033500\nI0821 08:26:51.466807 32487 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:26:51.466817 32487 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:26:51.466825 32487 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:26:51.466835 32487 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:26:51.466926 32487 net.cpp:150] Setting up L2_b1_pool\nI0821 08:26:51.466941 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.466946 32487 net.cpp:165] Memory required for data: 878081500\nI0821 08:26:51.466953 32487 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:26:51.466965 32487 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:26:51.466972 32487 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:26:51.466979 32487 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:26:51.466987 32487 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:26:51.467020 32487 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:26:51.467031 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.467034 32487 net.cpp:165] Memory required for data: 880129500\nI0821 08:26:51.467041 32487 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:26:51.467048 32487 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:26:51.467053 32487 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:26:51.467072 32487 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:26:51.467082 32487 net.cpp:150] Setting up L2_b1_relu\nI0821 08:26:51.467090 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.467095 32487 net.cpp:165] Memory required for data: 882177500\nI0821 08:26:51.467100 32487 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:26:51.467151 32487 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:26:51.467165 32487 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:26:51.469475 32487 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:26:51.469494 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.469499 32487 net.cpp:165] Memory required for data: 884225500\nI0821 08:26:51.469506 32487 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:26:51.469519 32487 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:26:51.469527 32487 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:26:51.469534 32487 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:26:51.469542 32487 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:26:51.469627 32487 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:26:51.469643 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.469648 32487 net.cpp:165] Memory required for data: 888321500\nI0821 08:26:51.469655 32487 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:51.469662 32487 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:51.469668 32487 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:26:51.469681 32487 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:26:51.469691 32487 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:26:51.469741 32487 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:51.469756 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.469763 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.469769 32487 net.cpp:165] Memory required for data: 896513500\nI0821 08:26:51.469774 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:26:51.469785 32487 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:26:51.469792 32487 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:26:51.469801 32487 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:26:51.471242 32487 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:26:51.471266 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.471271 32487 net.cpp:165] Memory required for data: 900609500\nI0821 08:26:51.471282 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:26:51.471295 32487 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:26:51.471302 32487 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:26:51.471314 32487 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:26:51.471560 32487 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:26:51.471573 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.471578 32487 net.cpp:165] Memory required for data: 904705500\nI0821 08:26:51.471590 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:26:51.471598 32487 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:26:51.471606 32487 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:26:51.471612 32487 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.471673 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:26:51.471822 32487 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:26:51.471835 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.471840 32487 net.cpp:165] Memory required for data: 908801500\nI0821 08:26:51.471849 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:26:51.471861 32487 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:26:51.471868 32487 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:26:51.471884 32487 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.471895 32487 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:26:51.471904 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.471909 32487 net.cpp:165] Memory required for data: 912897500\nI0821 08:26:51.471913 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:26:51.471928 32487 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:26:51.471935 32487 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:26:51.471946 32487 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:26:51.472419 32487 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:26:51.472434 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.472440 32487 net.cpp:165] Memory required for data: 916993500\nI0821 08:26:51.472448 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:26:51.472461 32487 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:26:51.472468 32487 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:26:51.472479 32487 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:26:51.472724 32487 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:26:51.472743 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.472749 32487 net.cpp:165] Memory required for data: 921089500\nI0821 08:26:51.472760 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:26:51.472769 32487 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:26:51.472776 32487 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:26:51.472784 32487 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:26:51.472839 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:26:51.472986 32487 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:26:51.473000 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.473004 32487 net.cpp:165] Memory required for data: 925185500\nI0821 08:26:51.473013 32487 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:26:51.473022 32487 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:26:51.473029 32487 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:26:51.473039 32487 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:26:51.473048 32487 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:26:51.473075 32487 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:26:51.473085 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.473090 32487 net.cpp:165] Memory required for data: 929281500\nI0821 08:26:51.473095 32487 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:26:51.473106 32487 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:26:51.473112 32487 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:26:51.473120 32487 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:26:51.473129 32487 net.cpp:150] Setting up L2_b2_relu\nI0821 08:26:51.473137 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.473141 32487 net.cpp:165] Memory required for data: 933377500\nI0821 08:26:51.473146 32487 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:51.473153 32487 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:51.473160 32487 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:26:51.473166 32487 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:26:51.473176 32487 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:26:51.473223 32487 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:51.473235 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.473242 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.473248 32487 net.cpp:165] Memory required for data: 941569500\nI0821 08:26:51.473253 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:26:51.473282 32487 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:26:51.473290 32487 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:26:51.473300 32487 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:26:51.473764 32487 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:26:51.473779 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.473784 32487 net.cpp:165] Memory required for data: 945665500\nI0821 08:26:51.473793 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:26:51.473805 32487 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:26:51.473812 32487 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:26:51.473824 32487 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:26:51.474071 32487 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:26:51.474083 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.474088 32487 net.cpp:165] Memory required for data: 949761500\nI0821 08:26:51.474099 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:26:51.474107 32487 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:26:51.474114 32487 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:26:51.474122 32487 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.474180 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:26:51.474336 32487 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:26:51.474350 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.474356 32487 net.cpp:165] Memory required for data: 953857500\nI0821 08:26:51.474365 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:26:51.474380 32487 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:26:51.474385 32487 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:26:51.474393 32487 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.474403 32487 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:26:51.474411 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.474416 32487 net.cpp:165] Memory required for data: 957953500\nI0821 08:26:51.474421 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:26:51.474434 32487 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:26:51.474442 32487 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:26:51.474449 32487 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:26:51.474911 32487 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:26:51.474925 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.474931 32487 net.cpp:165] Memory required for data: 962049500\nI0821 08:26:51.474939 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:26:51.474951 32487 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:26:51.474958 32487 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:26:51.474967 32487 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:26:51.475214 32487 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:26:51.475231 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.475237 32487 net.cpp:165] Memory required for data: 966145500\nI0821 08:26:51.475247 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:26:51.475261 32487 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:26:51.475268 32487 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:26:51.475277 32487 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:26:51.475333 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:26:51.475482 32487 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:26:51.475497 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.475502 32487 net.cpp:165] Memory required for data: 970241500\nI0821 08:26:51.475510 32487 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:26:51.475519 32487 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:26:51.475527 32487 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:26:51.475540 32487 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:26:51.475553 32487 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:26:51.475582 32487 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:26:51.475592 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.475597 32487 net.cpp:165] Memory required for data: 974337500\nI0821 08:26:51.475602 32487 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:26:51.475625 32487 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:26:51.475634 32487 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:26:51.475642 32487 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:26:51.475653 32487 net.cpp:150] Setting up L2_b3_relu\nI0821 08:26:51.475661 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.475666 32487 net.cpp:165] Memory required for data: 978433500\nI0821 08:26:51.475670 32487 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:51.475678 32487 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:51.475683 32487 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:26:51.475692 32487 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:26:51.475700 32487 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:26:51.475749 32487 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:51.475761 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.475769 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.475772 32487 net.cpp:165] Memory required for data: 986625500\nI0821 08:26:51.475778 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:26:51.475790 32487 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:26:51.475795 32487 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:26:51.475807 32487 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:26:51.476272 32487 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:26:51.476287 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.476294 32487 net.cpp:165] Memory required for data: 990721500\nI0821 08:26:51.476302 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:26:51.476311 32487 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:26:51.476317 32487 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:26:51.476335 32487 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:26:51.476583 32487 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:26:51.476595 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.476600 32487 net.cpp:165] Memory required for data: 994817500\nI0821 08:26:51.476611 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:26:51.476624 32487 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:26:51.476629 32487 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:26:51.476639 32487 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.476693 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:26:51.476842 32487 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:26:51.476856 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.476861 32487 net.cpp:165] Memory required for data: 998913500\nI0821 08:26:51.476871 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:26:51.476881 32487 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:26:51.476888 32487 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:26:51.476896 32487 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.476905 32487 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:26:51.476912 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.476917 32487 net.cpp:165] Memory required for data: 1003009500\nI0821 08:26:51.476922 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:26:51.476943 32487 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:26:51.476950 32487 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:26:51.476961 32487 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:26:51.477428 32487 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:26:51.477444 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.477449 32487 net.cpp:165] Memory required for data: 1007105500\nI0821 08:26:51.477458 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:26:51.477468 32487 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:26:51.477474 32487 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:26:51.477486 32487 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:26:51.477733 32487 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:26:51.477746 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.477751 32487 net.cpp:165] Memory required for data: 1011201500\nI0821 08:26:51.477762 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:26:51.477774 32487 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:26:51.477780 32487 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:26:51.477788 32487 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:26:51.477846 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:26:51.477994 32487 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:26:51.478008 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.478013 32487 net.cpp:165] Memory required for data: 1015297500\nI0821 08:26:51.478021 32487 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:26:51.478036 32487 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:26:51.478044 32487 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:26:51.478050 32487 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:26:51.478058 32487 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:26:51.478090 32487 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:26:51.478099 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.478104 32487 net.cpp:165] Memory required for data: 1019393500\nI0821 08:26:51.478109 32487 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:26:51.478117 32487 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:26:51.478123 32487 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:26:51.478130 32487 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:26:51.478142 32487 net.cpp:150] Setting up L2_b4_relu\nI0821 08:26:51.478150 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.478155 32487 net.cpp:165] Memory required for data: 1023489500\nI0821 08:26:51.478160 32487 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:51.478168 32487 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:51.478173 32487 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:26:51.478180 32487 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:26:51.478190 32487 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:26:51.478238 32487 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:51.478250 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.478262 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.478267 32487 net.cpp:165] Memory required for data: 1031681500\nI0821 08:26:51.478273 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:26:51.478284 32487 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:26:51.478291 32487 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:26:51.478302 32487 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:26:51.478767 32487 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:26:51.478788 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.478793 32487 net.cpp:165] Memory required for data: 1035777500\nI0821 08:26:51.478802 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:26:51.478812 32487 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:26:51.478818 32487 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:26:51.478829 32487 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:26:51.479081 32487 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:26:51.479095 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.479100 32487 net.cpp:165] Memory required for data: 1039873500\nI0821 08:26:51.479111 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:26:51.479123 32487 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:26:51.479130 32487 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:26:51.479137 32487 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.479193 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:26:51.479353 32487 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:26:51.479367 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.479372 32487 net.cpp:165] Memory required for data: 1043969500\nI0821 08:26:51.479382 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:26:51.479393 32487 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:26:51.479399 32487 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:26:51.479408 32487 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.479418 32487 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:26:51.479424 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.479429 32487 net.cpp:165] Memory required for data: 1048065500\nI0821 08:26:51.479434 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:26:51.479450 32487 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:26:51.479457 32487 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:26:51.479468 32487 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:26:51.479928 32487 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:26:51.479943 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.479948 32487 net.cpp:165] Memory required for data: 1052161500\nI0821 08:26:51.479957 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:26:51.479966 32487 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:26:51.479972 32487 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:26:51.479984 32487 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:26:51.480232 32487 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:26:51.480244 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.480250 32487 net.cpp:165] Memory required for data: 1056257500\nI0821 08:26:51.480265 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:26:51.480278 32487 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:26:51.480285 32487 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:26:51.480293 32487 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:26:51.480346 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:26:51.480494 32487 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:26:51.480509 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.480514 32487 net.cpp:165] Memory required for data: 1060353500\nI0821 08:26:51.480522 32487 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:26:51.480531 32487 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:26:51.480538 32487 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:26:51.480545 32487 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:26:51.480556 32487 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:26:51.480582 32487 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:26:51.480595 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.480607 32487 net.cpp:165] Memory required for data: 1064449500\nI0821 08:26:51.480613 32487 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:26:51.480621 32487 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:26:51.480628 32487 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:26:51.480634 32487 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:26:51.480644 32487 net.cpp:150] Setting up L2_b5_relu\nI0821 08:26:51.480651 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.480656 32487 net.cpp:165] Memory required for data: 1068545500\nI0821 08:26:51.480661 32487 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:51.480671 32487 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:51.480677 32487 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:26:51.480684 32487 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:26:51.480695 32487 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:26:51.480742 32487 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:51.480756 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.480762 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.480767 32487 net.cpp:165] Memory required for data: 1076737500\nI0821 08:26:51.480772 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:26:51.480783 32487 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:26:51.480790 32487 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:26:51.480801 32487 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:26:51.481276 32487 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:26:51.481292 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.481297 32487 net.cpp:165] Memory required for data: 1080833500\nI0821 08:26:51.481305 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:26:51.481315 32487 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:26:51.481322 32487 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:26:51.481333 32487 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:26:51.481586 32487 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:26:51.481600 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.481606 32487 net.cpp:165] Memory required for data: 1084929500\nI0821 08:26:51.481616 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:26:51.481628 32487 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:26:51.481634 32487 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:26:51.481642 32487 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.481698 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:26:51.481843 32487 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:26:51.481856 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.481863 32487 net.cpp:165] Memory required for data: 1089025500\nI0821 08:26:51.481871 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:26:51.481879 32487 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:26:51.481886 32487 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:26:51.481896 32487 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.481907 32487 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:26:51.481914 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.481920 32487 net.cpp:165] Memory required for data: 1093121500\nI0821 08:26:51.481925 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:26:51.481940 32487 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:26:51.481947 32487 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:26:51.481958 32487 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:26:51.482424 32487 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:26:51.482445 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.482450 32487 net.cpp:165] Memory required for data: 1097217500\nI0821 08:26:51.482460 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:26:51.482470 32487 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:26:51.482475 32487 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:26:51.482484 32487 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:26:51.482729 32487 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:26:51.482743 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.482748 32487 net.cpp:165] Memory required for data: 1101313500\nI0821 08:26:51.482759 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:26:51.482769 32487 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:26:51.482774 32487 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:26:51.482785 32487 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:26:51.482841 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:26:51.482992 32487 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:26:51.483006 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.483011 32487 net.cpp:165] Memory required for data: 1105409500\nI0821 08:26:51.483021 32487 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:26:51.483029 32487 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:26:51.483036 32487 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:26:51.483043 32487 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:26:51.483053 32487 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:26:51.483081 32487 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:26:51.483094 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.483099 32487 net.cpp:165] Memory required for data: 1109505500\nI0821 08:26:51.483105 32487 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:26:51.483113 32487 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:26:51.483119 32487 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:26:51.483126 32487 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:26:51.483135 32487 net.cpp:150] Setting up L2_b6_relu\nI0821 08:26:51.483142 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.483147 32487 net.cpp:165] Memory required for data: 1113601500\nI0821 08:26:51.483152 32487 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:51.483162 32487 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:51.483168 32487 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:26:51.483176 32487 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:26:51.483186 32487 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:26:51.483230 32487 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:51.483245 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.483253 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.483263 32487 net.cpp:165] Memory required for data: 1121793500\nI0821 08:26:51.483268 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:26:51.483280 32487 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:26:51.483288 32487 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:26:51.483296 32487 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:26:51.483763 32487 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:26:51.483781 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.483786 32487 net.cpp:165] Memory required for data: 1125889500\nI0821 08:26:51.483795 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:26:51.483805 32487 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:26:51.483819 32487 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:26:51.483832 32487 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:26:51.484084 32487 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:26:51.484097 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.484102 32487 net.cpp:165] Memory required for data: 1129985500\nI0821 08:26:51.484113 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:26:51.484125 32487 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:26:51.484133 32487 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:26:51.484139 32487 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.484192 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:26:51.484350 32487 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:26:51.484365 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.484370 32487 net.cpp:165] Memory required for data: 1134081500\nI0821 08:26:51.484380 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:26:51.484387 32487 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:26:51.484395 32487 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:26:51.484406 32487 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.484417 32487 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:26:51.484426 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.484429 32487 net.cpp:165] Memory required for data: 1138177500\nI0821 08:26:51.484434 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:26:51.484448 32487 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:26:51.484455 32487 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:26:51.484464 32487 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:26:51.484935 32487 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:26:51.484949 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.484954 32487 net.cpp:165] Memory required for data: 1142273500\nI0821 08:26:51.484963 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:26:51.484975 32487 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:26:51.484982 32487 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:26:51.484990 32487 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:26:51.485244 32487 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:26:51.485261 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.485267 32487 net.cpp:165] Memory required for data: 1146369500\nI0821 08:26:51.485277 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:26:51.485287 32487 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:26:51.485293 32487 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:26:51.485306 32487 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:26:51.485363 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:26:51.485513 32487 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:26:51.485527 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.485532 32487 net.cpp:165] Memory required for data: 1150465500\nI0821 08:26:51.485540 32487 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:26:51.485549 32487 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:26:51.485556 32487 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:26:51.485563 32487 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:26:51.485574 32487 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:26:51.485601 32487 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:26:51.485611 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.485616 32487 net.cpp:165] Memory required for data: 1154561500\nI0821 08:26:51.485621 32487 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:26:51.485632 32487 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:26:51.485638 32487 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:26:51.485652 32487 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:26:51.485663 32487 net.cpp:150] Setting up L2_b7_relu\nI0821 08:26:51.485671 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.485677 32487 net.cpp:165] Memory required for data: 1158657500\nI0821 08:26:51.485680 32487 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:51.485690 32487 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:51.485697 32487 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:26:51.485704 32487 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:26:51.485714 32487 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:26:51.485759 32487 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:51.485774 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.485782 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.485787 32487 net.cpp:165] Memory required for data: 1166849500\nI0821 08:26:51.485792 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:26:51.485803 32487 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:26:51.485810 32487 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:26:51.485819 32487 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:26:51.486300 32487 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:26:51.486315 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.486320 32487 net.cpp:165] Memory required for data: 1170945500\nI0821 08:26:51.486330 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:26:51.486341 32487 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:26:51.486348 32487 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:26:51.486357 32487 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:26:51.486616 32487 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:26:51.486629 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.486634 32487 net.cpp:165] Memory required for data: 1175041500\nI0821 08:26:51.486645 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:26:51.486654 32487 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:26:51.486661 32487 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:26:51.486671 32487 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.486728 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:26:51.486883 32487 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:26:51.486896 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.486901 32487 net.cpp:165] Memory required for data: 1179137500\nI0821 08:26:51.486912 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:26:51.486919 32487 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:26:51.486925 32487 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:26:51.486937 32487 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.486948 32487 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:26:51.486954 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.486959 32487 net.cpp:165] Memory required for data: 1183233500\nI0821 08:26:51.486963 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:26:51.486979 32487 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:26:51.486985 32487 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:26:51.486994 32487 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:26:51.487637 32487 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:26:51.487653 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.487659 32487 net.cpp:165] Memory required for data: 1187329500\nI0821 08:26:51.487668 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:26:51.487681 32487 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:26:51.487696 32487 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:26:51.487706 32487 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:26:51.487962 32487 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:26:51.487975 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.487980 32487 net.cpp:165] Memory required for data: 1191425500\nI0821 08:26:51.487990 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:26:51.488000 32487 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:26:51.488006 32487 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:26:51.488018 32487 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:26:51.488075 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:26:51.488229 32487 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:26:51.488242 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.488247 32487 net.cpp:165] Memory required for data: 1195521500\nI0821 08:26:51.488262 32487 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:26:51.488272 32487 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:26:51.488279 32487 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:26:51.488286 32487 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:26:51.488297 32487 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:26:51.488327 32487 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:26:51.488335 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.488340 32487 net.cpp:165] Memory required for data: 1199617500\nI0821 08:26:51.488346 32487 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:26:51.488356 32487 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:26:51.488363 32487 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:26:51.488370 32487 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:26:51.488380 32487 net.cpp:150] Setting up L2_b8_relu\nI0821 08:26:51.488387 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.488392 32487 net.cpp:165] Memory required for data: 1203713500\nI0821 08:26:51.488397 32487 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:51.488404 32487 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:51.488409 32487 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:26:51.488420 32487 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:26:51.488443 32487 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:26:51.488492 32487 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:51.488504 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.488512 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.488517 32487 net.cpp:165] Memory required for data: 1211905500\nI0821 08:26:51.488521 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:26:51.488536 32487 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:26:51.488543 32487 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:26:51.488557 32487 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:26:51.489029 32487 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:26:51.489043 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.489048 32487 net.cpp:165] Memory required for data: 1216001500\nI0821 08:26:51.489058 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:26:51.489070 32487 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:26:51.489078 32487 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:26:51.489085 32487 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:26:51.489341 32487 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:26:51.489356 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.489367 32487 net.cpp:165] Memory required for data: 1220097500\nI0821 08:26:51.489379 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:26:51.489389 32487 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:26:51.489395 32487 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:26:51.489403 32487 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.489464 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:26:51.489615 32487 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:26:51.489630 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.489636 32487 net.cpp:165] Memory required for data: 1224193500\nI0821 08:26:51.489645 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:26:51.489653 32487 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:26:51.489660 32487 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:26:51.489667 32487 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.489677 32487 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:26:51.489684 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.489689 32487 net.cpp:165] Memory required for data: 1228289500\nI0821 08:26:51.489694 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:26:51.489708 32487 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:26:51.489715 32487 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:26:51.489727 32487 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:26:51.490195 32487 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:26:51.490211 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.490216 32487 net.cpp:165] Memory required for data: 1232385500\nI0821 08:26:51.490224 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:26:51.490237 32487 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:26:51.490244 32487 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:26:51.490260 32487 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:26:51.490517 32487 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:26:51.490531 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.490536 32487 net.cpp:165] Memory required for data: 1236481500\nI0821 08:26:51.490579 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:26:51.490592 32487 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:26:51.490598 32487 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:26:51.490607 32487 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:26:51.490669 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:26:51.490825 32487 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:26:51.490839 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.490844 32487 net.cpp:165] Memory required for data: 1240577500\nI0821 08:26:51.490854 32487 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:26:51.490866 32487 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:26:51.490873 32487 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:26:51.490880 32487 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:26:51.490888 32487 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:26:51.490919 32487 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:26:51.490929 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.490934 32487 net.cpp:165] Memory required for data: 1244673500\nI0821 08:26:51.490939 32487 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:26:51.490947 32487 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:26:51.490953 32487 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:26:51.490963 32487 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:26:51.490974 32487 net.cpp:150] Setting up L2_b9_relu\nI0821 08:26:51.490981 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.490986 32487 net.cpp:165] Memory required for data: 1248769500\nI0821 08:26:51.490998 32487 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:51.491006 32487 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:51.491011 32487 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:26:51.491022 32487 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:26:51.491034 32487 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:26:51.491084 32487 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:51.491096 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.491103 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.491108 32487 net.cpp:165] Memory required for data: 1256961500\nI0821 08:26:51.491113 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:26:51.491124 32487 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:26:51.491132 32487 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:26:51.491143 32487 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:26:51.491624 32487 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:26:51.491638 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.491644 32487 net.cpp:165] Memory required for data: 1257985500\nI0821 08:26:51.491653 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:26:51.491662 32487 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:26:51.491669 32487 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:26:51.491680 32487 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:26:51.491952 32487 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:26:51.491966 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.491971 32487 net.cpp:165] Memory required for data: 1259009500\nI0821 08:26:51.491981 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:26:51.491991 32487 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:26:51.491997 32487 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:26:51.492004 32487 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.492063 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:26:51.492218 32487 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:26:51.492231 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.492236 32487 net.cpp:165] Memory required for data: 1260033500\nI0821 08:26:51.492245 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:26:51.492262 32487 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:26:51.492270 32487 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:26:51.492277 32487 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.492287 32487 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:26:51.492295 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.492300 32487 net.cpp:165] Memory required for data: 1261057500\nI0821 08:26:51.492305 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:26:51.492318 32487 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:26:51.492326 32487 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:26:51.492333 32487 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:26:51.492815 32487 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:26:51.492830 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.492835 32487 net.cpp:165] Memory required for data: 1262081500\nI0821 08:26:51.492844 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:26:51.492856 32487 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:26:51.492863 32487 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:26:51.492875 32487 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:26:51.493134 32487 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:26:51.493147 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.493160 32487 net.cpp:165] Memory required for data: 1263105500\nI0821 08:26:51.493170 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:26:51.493180 32487 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:26:51.493185 32487 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:26:51.493197 32487 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:26:51.493261 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:26:51.493428 32487 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:26:51.493441 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.493448 32487 net.cpp:165] Memory required for data: 1264129500\nI0821 08:26:51.493456 32487 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:26:51.493468 32487 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:26:51.493475 32487 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:26:51.493484 32487 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:26:51.493522 32487 net.cpp:150] Setting up L3_b1_pool\nI0821 08:26:51.493532 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.493537 32487 net.cpp:165] Memory required for data: 1265153500\nI0821 08:26:51.493542 32487 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:26:51.493551 32487 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:26:51.493557 32487 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:26:51.493564 32487 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:26:51.493574 32487 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:26:51.493607 32487 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:26:51.493616 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.493621 32487 net.cpp:165] Memory required for data: 1266177500\nI0821 08:26:51.493628 32487 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:26:51.493635 32487 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:26:51.493641 32487 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:26:51.493651 32487 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:26:51.493662 32487 net.cpp:150] Setting up L3_b1_relu\nI0821 08:26:51.493669 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.493674 32487 net.cpp:165] Memory required for data: 1267201500\nI0821 08:26:51.493680 32487 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:26:51.493688 32487 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:26:51.493696 32487 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:26:51.494910 32487 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:26:51.494928 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.494935 32487 net.cpp:165] Memory required for data: 1268225500\nI0821 08:26:51.494940 32487 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:26:51.494949 32487 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:26:51.494956 32487 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:26:51.494963 32487 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:26:51.494974 32487 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:26:51.495015 32487 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:26:51.495029 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.495034 32487 net.cpp:165] Memory required for data: 1270273500\nI0821 08:26:51.495039 32487 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:51.495046 32487 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:51.495052 32487 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:26:51.495060 32487 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:26:51.495074 32487 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:26:51.495122 32487 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:51.495134 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.495141 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.495154 32487 net.cpp:165] Memory required for data: 1274369500\nI0821 08:26:51.495160 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:26:51.495175 32487 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:26:51.495182 32487 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:26:51.495193 32487 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:26:51.497156 32487 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:26:51.497174 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.497179 32487 net.cpp:165] Memory required for data: 1276417500\nI0821 08:26:51.497189 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:26:51.497202 32487 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:26:51.497210 32487 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:26:51.497220 32487 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:26:51.497493 32487 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:26:51.497509 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.497514 32487 net.cpp:165] Memory required for data: 1278465500\nI0821 08:26:51.497524 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:26:51.497534 32487 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:26:51.497540 32487 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:26:51.497548 32487 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.497609 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:26:51.497766 32487 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:26:51.497781 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.497786 32487 net.cpp:165] Memory required for data: 1280513500\nI0821 08:26:51.497797 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:26:51.497804 32487 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:26:51.497812 32487 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:26:51.497818 32487 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.497828 32487 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:26:51.497835 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.497840 32487 net.cpp:165] Memory required for data: 1282561500\nI0821 08:26:51.497845 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:26:51.497860 32487 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:26:51.497866 32487 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:26:51.497879 32487 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:26:51.498908 32487 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:26:51.498924 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.498929 32487 net.cpp:165] Memory required for data: 1284609500\nI0821 08:26:51.498939 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:26:51.498950 32487 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:26:51.498957 32487 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:26:51.498965 32487 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:26:51.499231 32487 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:26:51.499245 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.499250 32487 net.cpp:165] Memory required for data: 1286657500\nI0821 08:26:51.499266 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:26:51.499279 32487 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:26:51.499285 32487 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:26:51.499294 32487 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:26:51.499356 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:26:51.499514 32487 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:26:51.499527 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.499532 32487 net.cpp:165] Memory required for data: 1288705500\nI0821 08:26:51.499542 32487 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:26:51.499562 32487 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:26:51.499569 32487 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:26:51.499577 32487 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:26:51.499585 32487 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:26:51.499624 32487 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:26:51.499635 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.499640 32487 net.cpp:165] Memory required for data: 1290753500\nI0821 08:26:51.499646 32487 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:26:51.499655 32487 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:26:51.499660 32487 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:26:51.499672 32487 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:26:51.499683 32487 net.cpp:150] Setting up L3_b2_relu\nI0821 08:26:51.499691 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.499696 32487 net.cpp:165] Memory required for data: 1292801500\nI0821 08:26:51.499701 32487 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:51.499708 32487 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:51.499714 32487 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:26:51.499722 32487 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:26:51.499732 32487 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:26:51.499781 32487 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:51.499794 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.499800 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.499805 32487 net.cpp:165] Memory required for data: 1296897500\nI0821 08:26:51.499810 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:26:51.499825 32487 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:26:51.499831 32487 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:26:51.499841 32487 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:26:51.500860 32487 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:26:51.500875 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.500881 32487 net.cpp:165] Memory required for data: 1298945500\nI0821 08:26:51.500890 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:26:51.500902 32487 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:26:51.500910 32487 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:26:51.500918 32487 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:26:51.501180 32487 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:26:51.501194 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.501199 32487 net.cpp:165] Memory required for data: 1300993500\nI0821 08:26:51.501209 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:26:51.501219 32487 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:26:51.501224 32487 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:26:51.501232 32487 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.501301 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:26:51.501461 32487 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:26:51.501473 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.501479 32487 net.cpp:165] Memory required for data: 1303041500\nI0821 08:26:51.501488 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:26:51.501497 32487 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:26:51.501503 32487 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:26:51.501510 32487 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.501524 32487 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:26:51.501531 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.501543 32487 net.cpp:165] Memory required for data: 1305089500\nI0821 08:26:51.501549 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:26:51.501560 32487 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:26:51.501569 32487 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:26:51.501579 32487 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:26:51.502599 32487 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:26:51.502614 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.502619 32487 net.cpp:165] Memory required for data: 1307137500\nI0821 08:26:51.502627 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:26:51.502640 32487 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:26:51.502647 32487 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:26:51.502655 32487 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:26:51.502923 32487 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:26:51.502938 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.502943 32487 net.cpp:165] Memory required for data: 1309185500\nI0821 08:26:51.502952 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:26:51.502964 32487 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:26:51.502971 32487 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:26:51.502979 32487 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:26:51.503041 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:26:51.503201 32487 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:26:51.503214 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.503219 32487 net.cpp:165] Memory required for data: 1311233500\nI0821 08:26:51.503229 32487 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:26:51.503242 32487 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:26:51.503248 32487 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:26:51.503260 32487 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:26:51.503273 32487 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:26:51.503309 32487 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:26:51.503320 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.503325 32487 net.cpp:165] Memory required for data: 1313281500\nI0821 08:26:51.503330 32487 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:26:51.503342 32487 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:26:51.503348 32487 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:26:51.503356 32487 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:26:51.503365 32487 net.cpp:150] Setting up L3_b3_relu\nI0821 08:26:51.503373 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.503377 32487 net.cpp:165] Memory required for data: 1315329500\nI0821 08:26:51.503382 32487 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:51.503391 32487 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:51.503396 32487 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:26:51.503403 32487 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:26:51.503413 32487 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:26:51.503463 32487 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:51.503474 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.503481 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.503486 32487 net.cpp:165] Memory required for data: 1319425500\nI0821 08:26:51.503491 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:26:51.503506 32487 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:26:51.503513 32487 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:26:51.503522 32487 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:26:51.504554 32487 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:26:51.504570 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.504575 32487 net.cpp:165] Memory required for data: 1321473500\nI0821 08:26:51.504583 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:26:51.504596 32487 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:26:51.504603 32487 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:26:51.504614 32487 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:26:51.504884 32487 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:26:51.504896 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.504901 32487 net.cpp:165] Memory required for data: 1323521500\nI0821 08:26:51.504912 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:26:51.504921 32487 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:26:51.504927 32487 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:26:51.504935 32487 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.504997 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:26:51.505154 32487 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:26:51.505168 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.505173 32487 net.cpp:165] Memory required for data: 1325569500\nI0821 08:26:51.505182 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:26:51.505190 32487 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:26:51.505198 32487 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:26:51.505208 32487 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.505218 32487 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:26:51.505225 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.505230 32487 net.cpp:165] Memory required for data: 1327617500\nI0821 08:26:51.505235 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:26:51.505249 32487 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:26:51.505262 32487 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:26:51.505272 32487 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:26:51.506309 32487 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:26:51.506323 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.506328 32487 net.cpp:165] Memory required for data: 1329665500\nI0821 08:26:51.506338 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:26:51.506350 32487 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:26:51.506357 32487 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:26:51.506366 32487 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:26:51.506639 32487 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:26:51.506652 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.506657 32487 net.cpp:165] Memory required for data: 1331713500\nI0821 08:26:51.506669 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:26:51.506680 32487 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:26:51.506687 32487 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:26:51.506695 32487 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:26:51.506757 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:26:51.506920 32487 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:26:51.506934 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.506939 32487 net.cpp:165] Memory required for data: 1333761500\nI0821 08:26:51.506948 32487 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:26:51.506960 32487 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:26:51.506968 32487 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:26:51.506975 32487 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:26:51.506985 32487 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:26:51.507019 32487 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:26:51.507036 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.507041 32487 net.cpp:165] Memory required for data: 1335809500\nI0821 08:26:51.507046 32487 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:26:51.507058 32487 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:26:51.507064 32487 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:26:51.507072 32487 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:26:51.507082 32487 net.cpp:150] Setting up L3_b4_relu\nI0821 08:26:51.507089 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.507094 32487 net.cpp:165] Memory required for data: 1337857500\nI0821 08:26:51.507099 32487 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:51.507107 32487 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:51.507112 32487 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:26:51.507119 32487 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:26:51.507129 32487 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:26:51.507179 32487 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:51.507191 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.507199 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.507203 32487 net.cpp:165] Memory required for data: 1341953500\nI0821 08:26:51.507208 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:26:51.507222 32487 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:26:51.507230 32487 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:26:51.507239 32487 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:26:51.508266 32487 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:26:51.508281 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.508287 32487 net.cpp:165] Memory required for data: 1344001500\nI0821 08:26:51.508296 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:26:51.508308 32487 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:26:51.508316 32487 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:26:51.508327 32487 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:26:51.509558 32487 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:26:51.509575 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.509580 32487 net.cpp:165] Memory required for data: 1346049500\nI0821 08:26:51.509593 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:26:51.509605 32487 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:26:51.509613 32487 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:26:51.509620 32487 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.509687 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:26:51.509847 32487 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:26:51.509860 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.509865 32487 net.cpp:165] Memory required for data: 1348097500\nI0821 08:26:51.509876 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:26:51.509886 32487 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:26:51.509893 32487 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:26:51.509903 32487 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.509914 32487 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:26:51.509922 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.509927 32487 net.cpp:165] Memory required for data: 1350145500\nI0821 08:26:51.509932 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:26:51.509943 32487 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:26:51.509949 32487 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:26:51.509960 32487 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:26:51.511950 32487 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:26:51.511968 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.511975 32487 net.cpp:165] Memory required for data: 1352193500\nI0821 08:26:51.511983 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:26:51.511996 32487 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:26:51.512004 32487 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:26:51.512013 32487 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:26:51.512286 32487 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:26:51.512301 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.512306 32487 net.cpp:165] Memory required for data: 1354241500\nI0821 08:26:51.512317 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:26:51.512329 32487 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:26:51.512336 32487 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:26:51.512344 32487 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:26:51.512405 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:26:51.512560 32487 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:26:51.512573 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.512579 32487 net.cpp:165] Memory required for data: 1356289500\nI0821 08:26:51.512588 32487 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:26:51.512600 32487 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:26:51.512609 32487 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:26:51.512615 32487 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:26:51.512626 32487 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:26:51.512660 32487 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:26:51.512671 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.512676 32487 net.cpp:165] Memory required for data: 1358337500\nI0821 08:26:51.512681 32487 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:26:51.512693 32487 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:26:51.512699 32487 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:26:51.512707 32487 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:26:51.512717 32487 net.cpp:150] Setting up L3_b5_relu\nI0821 08:26:51.512724 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.512728 32487 net.cpp:165] Memory required for data: 1360385500\nI0821 08:26:51.512733 32487 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:51.512742 32487 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:51.512747 32487 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:26:51.512754 32487 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:26:51.512763 32487 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:26:51.512811 32487 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:51.512823 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.512830 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.512835 32487 net.cpp:165] Memory required for data: 1364481500\nI0821 08:26:51.512840 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:26:51.512853 32487 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:26:51.512861 32487 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:26:51.512871 32487 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:26:51.513882 32487 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:26:51.513897 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.513902 32487 net.cpp:165] Memory required for data: 1366529500\nI0821 08:26:51.513912 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:26:51.513924 32487 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:26:51.513939 32487 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:26:51.513953 32487 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:26:51.514210 32487 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:26:51.514225 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.514230 32487 net.cpp:165] Memory required for data: 1368577500\nI0821 08:26:51.514240 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:26:51.514250 32487 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:26:51.514261 32487 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:26:51.514269 32487 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.514331 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:26:51.514489 32487 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:26:51.514503 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.514508 32487 net.cpp:165] Memory required for data: 1370625500\nI0821 08:26:51.514518 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:26:51.514525 32487 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:26:51.514533 32487 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:26:51.514544 32487 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.514555 32487 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:26:51.514562 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.514567 32487 net.cpp:165] Memory required for data: 1372673500\nI0821 08:26:51.514572 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:26:51.514586 32487 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:26:51.514593 32487 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:26:51.514602 32487 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:26:51.515616 32487 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:26:51.515632 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.515637 32487 net.cpp:165] Memory required for data: 1374721500\nI0821 08:26:51.515646 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:26:51.515658 32487 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:26:51.515666 32487 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:26:51.515674 32487 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:26:51.515936 32487 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:26:51.515950 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.515955 32487 net.cpp:165] Memory required for data: 1376769500\nI0821 08:26:51.515965 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:26:51.515978 32487 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:26:51.515985 32487 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:26:51.515993 32487 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:26:51.516054 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:26:51.516211 32487 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:26:51.516224 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.516229 32487 net.cpp:165] Memory required for data: 1378817500\nI0821 08:26:51.516238 32487 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:26:51.516252 32487 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:26:51.516264 32487 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:26:51.516273 32487 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:26:51.516283 32487 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:26:51.516319 32487 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:26:51.516330 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.516335 32487 net.cpp:165] Memory required for data: 1380865500\nI0821 08:26:51.516340 32487 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:26:51.516352 32487 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:26:51.516360 32487 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:26:51.516373 32487 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:26:51.516384 32487 net.cpp:150] Setting up L3_b6_relu\nI0821 08:26:51.516392 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.516396 32487 net.cpp:165] Memory required for data: 1382913500\nI0821 08:26:51.516402 32487 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:51.516409 32487 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:51.516414 32487 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:26:51.516422 32487 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:26:51.516433 32487 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:26:51.516481 32487 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:51.516494 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.516501 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.516505 32487 net.cpp:165] Memory required for data: 1387009500\nI0821 08:26:51.516510 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:26:51.516528 32487 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:26:51.516535 32487 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:26:51.516544 32487 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:26:51.517557 32487 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:26:51.517573 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.517578 32487 net.cpp:165] Memory required for data: 1389057500\nI0821 08:26:51.517587 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:26:51.517601 32487 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:26:51.517607 32487 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:26:51.517618 32487 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:26:51.517877 32487 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:26:51.517891 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.517896 32487 net.cpp:165] Memory required for data: 1391105500\nI0821 08:26:51.517907 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:26:51.517915 32487 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:26:51.517922 32487 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:26:51.517933 32487 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.517992 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:26:51.518149 32487 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:26:51.518162 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.518168 32487 net.cpp:165] Memory required for data: 1393153500\nI0821 08:26:51.518177 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:26:51.518211 32487 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:26:51.518220 32487 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:26:51.518229 32487 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.518239 32487 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:26:51.518246 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.518251 32487 net.cpp:165] Memory required for data: 1395201500\nI0821 08:26:51.518263 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:26:51.518278 32487 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:26:51.518285 32487 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:26:51.518295 32487 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:26:51.519321 32487 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:26:51.519336 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.519342 32487 net.cpp:165] Memory required for data: 1397249500\nI0821 08:26:51.519351 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:26:51.519363 32487 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:26:51.519377 32487 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:26:51.519389 32487 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:26:51.519657 32487 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:26:51.519671 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.519676 32487 net.cpp:165] Memory required for data: 1399297500\nI0821 08:26:51.519686 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:26:51.519695 32487 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:26:51.519701 32487 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:26:51.519709 32487 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:26:51.519770 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:26:51.519929 32487 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:26:51.519942 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.519948 32487 net.cpp:165] Memory required for data: 1401345500\nI0821 08:26:51.519958 32487 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:26:51.519966 32487 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:26:51.519973 32487 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:26:51.519980 32487 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:26:51.519991 32487 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:26:51.520025 32487 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:26:51.520040 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.520045 32487 net.cpp:165] Memory required for data: 1403393500\nI0821 08:26:51.520051 32487 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:26:51.520058 32487 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:26:51.520064 32487 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:26:51.520072 32487 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:26:51.520081 32487 net.cpp:150] Setting up L3_b7_relu\nI0821 08:26:51.520088 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.520093 32487 net.cpp:165] Memory required for data: 1405441500\nI0821 08:26:51.520098 32487 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:51.520109 32487 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:51.520117 32487 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:26:51.520123 32487 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:26:51.520134 32487 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:26:51.520184 32487 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:51.520195 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.520201 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.520206 32487 net.cpp:165] Memory required for data: 1409537500\nI0821 08:26:51.520211 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:26:51.520222 32487 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:26:51.520229 32487 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:26:51.520241 32487 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:26:51.521270 32487 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:26:51.521284 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.521289 32487 net.cpp:165] Memory required for data: 1411585500\nI0821 08:26:51.521298 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:26:51.521308 32487 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:26:51.521315 32487 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:26:51.521327 32487 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:26:51.521594 32487 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:26:51.521610 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.521615 32487 net.cpp:165] Memory required for data: 1413633500\nI0821 08:26:51.521633 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:26:51.521642 32487 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:26:51.521649 32487 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:26:51.521656 32487 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.521723 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:26:51.521883 32487 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:26:51.521896 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.521901 32487 net.cpp:165] Memory required for data: 1415681500\nI0821 08:26:51.521911 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:26:51.521919 32487 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:26:51.521929 32487 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:26:51.521936 32487 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.521947 32487 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:26:51.521955 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.521960 32487 net.cpp:165] Memory required for data: 1417729500\nI0821 08:26:51.521965 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:26:51.521978 32487 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:26:51.521984 32487 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:26:51.521993 32487 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:26:51.523006 32487 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:26:51.523021 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.523027 32487 net.cpp:165] Memory required for data: 1419777500\nI0821 08:26:51.523036 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:26:51.523049 32487 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:26:51.523057 32487 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:26:51.523068 32487 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:26:51.523339 32487 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:26:51.523353 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.523358 32487 net.cpp:165] Memory required for data: 1421825500\nI0821 08:26:51.523370 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:26:51.523378 32487 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:26:51.523385 32487 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:26:51.523396 32487 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:26:51.523454 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:26:51.523612 32487 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:26:51.523624 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.523629 32487 net.cpp:165] Memory required for data: 1423873500\nI0821 08:26:51.523638 32487 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:26:51.523648 32487 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:26:51.523654 32487 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:26:51.523663 32487 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:26:51.523672 32487 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:26:51.523710 32487 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:26:51.523721 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.523726 32487 net.cpp:165] Memory required for data: 1425921500\nI0821 08:26:51.523731 32487 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:26:51.523739 32487 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:26:51.523746 32487 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:26:51.523753 32487 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:26:51.523766 32487 net.cpp:150] Setting up L3_b8_relu\nI0821 08:26:51.523774 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.523778 32487 net.cpp:165] Memory required for data: 1427969500\nI0821 08:26:51.523783 32487 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:51.523797 32487 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:51.523803 32487 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:26:51.523811 32487 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:26:51.523821 32487 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:26:51.523874 32487 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:51.523885 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.523892 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.523897 32487 net.cpp:165] Memory required for data: 1432065500\nI0821 08:26:51.523902 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:26:51.523913 32487 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:26:51.523921 32487 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:26:51.523932 32487 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:26:51.525919 32487 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:26:51.525938 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.525943 32487 net.cpp:165] Memory required for data: 1434113500\nI0821 08:26:51.525952 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:26:51.525964 32487 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:26:51.525974 32487 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:26:51.525982 32487 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:26:51.526247 32487 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:26:51.526266 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.526271 32487 net.cpp:165] Memory required for data: 1436161500\nI0821 08:26:51.526283 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:26:51.526293 32487 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:26:51.526299 32487 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:26:51.526310 32487 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.526371 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:26:51.526532 32487 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:26:51.526546 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.526551 32487 net.cpp:165] Memory required for data: 1438209500\nI0821 08:26:51.526561 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:26:51.526571 32487 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:26:51.526578 32487 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:26:51.526587 32487 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.526597 32487 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:26:51.526603 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.526608 32487 net.cpp:165] Memory required for data: 1440257500\nI0821 08:26:51.526613 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:26:51.526628 32487 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:26:51.526634 32487 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:26:51.526648 32487 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:26:51.527674 32487 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:26:51.527689 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.527694 32487 net.cpp:165] Memory required for data: 1442305500\nI0821 08:26:51.527704 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:26:51.527714 32487 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:26:51.527719 32487 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:26:51.527731 32487 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:26:51.528000 32487 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:26:51.528017 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.528023 32487 net.cpp:165] Memory required for data: 1444353500\nI0821 08:26:51.528040 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:26:51.528050 32487 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:26:51.528057 32487 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:26:51.528065 32487 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:26:51.528125 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:26:51.528291 32487 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:26:51.528306 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.528311 32487 net.cpp:165] Memory required for data: 1446401500\nI0821 08:26:51.528321 32487 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:26:51.528332 32487 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:26:51.528339 32487 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:26:51.528347 32487 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:26:51.528355 32487 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:26:51.528391 32487 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:26:51.528404 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.528409 32487 net.cpp:165] Memory required for data: 1448449500\nI0821 08:26:51.528414 32487 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:26:51.528422 32487 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:26:51.528429 32487 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:26:51.528436 32487 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:26:51.528445 32487 net.cpp:150] Setting up L3_b9_relu\nI0821 08:26:51.528453 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.528457 32487 net.cpp:165] Memory required for data: 1450497500\nI0821 08:26:51.528462 32487 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:26:51.528470 32487 net.cpp:100] Creating Layer post_pool\nI0821 08:26:51.528476 32487 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 08:26:51.528487 32487 net.cpp:408] post_pool -> post_pool\nI0821 08:26:51.528523 32487 net.cpp:150] Setting up post_pool\nI0821 08:26:51.528535 32487 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 08:26:51.528540 32487 net.cpp:165] Memory required for data: 1450529500\nI0821 08:26:51.528545 32487 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:26:51.528640 32487 net.cpp:100] Creating Layer post_FC\nI0821 08:26:51.528653 32487 net.cpp:434] post_FC <- post_pool\nI0821 08:26:51.528669 32487 net.cpp:408] post_FC -> post_FC_top\nI0821 08:26:51.528936 32487 net.cpp:150] Setting up post_FC\nI0821 08:26:51.528954 32487 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:51.528959 32487 net.cpp:165] Memory required for data: 1450534500\nI0821 08:26:51.528969 32487 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:26:51.528980 32487 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:26:51.528986 32487 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:26:51.528995 32487 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:26:51.529006 32487 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:26:51.529058 32487 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:26:51.529072 32487 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:51.529078 32487 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:51.529083 32487 net.cpp:165] Memory required for data: 1450544500\nI0821 08:26:51.529088 32487 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:26:51.529135 32487 net.cpp:100] Creating Layer accuracy\nI0821 08:26:51.529148 32487 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:26:51.529156 32487 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:26:51.529165 32487 net.cpp:408] accuracy -> accuracy\nI0821 08:26:51.529211 32487 net.cpp:150] Setting up accuracy\nI0821 08:26:51.529225 32487 net.cpp:157] Top shape: (1)\nI0821 08:26:51.529230 32487 net.cpp:165] Memory required for data: 1450544504\nI0821 08:26:51.529245 32487 layer_factory.hpp:77] Creating layer loss\nI0821 08:26:51.529260 32487 net.cpp:100] Creating Layer loss\nI0821 08:26:51.529268 32487 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:26:51.529275 32487 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:26:51.529286 32487 net.cpp:408] loss -> loss\nI0821 08:26:51.530463 32487 layer_factory.hpp:77] Creating layer loss\nI0821 08:26:51.531838 32487 net.cpp:150] Setting up loss\nI0821 08:26:51.531857 32487 net.cpp:157] Top shape: (1)\nI0821 08:26:51.531862 32487 net.cpp:160]     with loss weight 1\nI0821 08:26:51.531951 32487 net.cpp:165] Memory required for data: 1450544508\nI0821 08:26:51.531960 32487 net.cpp:226] loss needs backward computation.\nI0821 08:26:51.531967 32487 net.cpp:228] accuracy does not need backward computation.\nI0821 08:26:51.531973 32487 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:26:51.531980 32487 net.cpp:226] post_FC needs backward computation.\nI0821 08:26:51.531985 32487 net.cpp:226] post_pool needs backward computation.\nI0821 08:26:51.531989 32487 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:26:51.531994 32487 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:26:51.532001 32487 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:26:51.532006 32487 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:26:51.532011 32487 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:26:51.532016 32487 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:26:51.532021 32487 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:26:51.532025 32487 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:26:51.532030 32487 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:26:51.532035 32487 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:26:51.532042 32487 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:26:51.532047 32487 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:26:51.532052 32487 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:26:51.532058 32487 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:26:51.532063 32487 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:26:51.532068 32487 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:26:51.532073 32487 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:26:51.532078 32487 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:26:51.532083 32487 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:26:51.532091 32487 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:26:51.532097 32487 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:26:51.532104 32487 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:26:51.532109 32487 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:26:51.532114 32487 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:26:51.532119 32487 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:26:51.532124 32487 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:26:51.532130 32487 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:26:51.532135 32487 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:26:51.532140 32487 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:26:51.532145 32487 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:26:51.532150 32487 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:26:51.532155 32487 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:26:51.532161 32487 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:26:51.532166 32487 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:26:51.532171 32487 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:26:51.532183 32487 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:26:51.532189 32487 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:26:51.532194 32487 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:26:51.532200 32487 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:26:51.532205 32487 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:26:51.532210 32487 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:26:51.532215 32487 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:26:51.532222 32487 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:26:51.532227 32487 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:26:51.532232 32487 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:26:51.532238 32487 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:26:51.532243 32487 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:26:51.532248 32487 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:26:51.532258 32487 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:26:51.532265 32487 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:26:51.532271 32487 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:26:51.532276 32487 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:26:51.532282 32487 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:26:51.532289 32487 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:26:51.532294 32487 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:26:51.532299 32487 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:26:51.532305 32487 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:26:51.532310 32487 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:26:51.532315 32487 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:26:51.532320 32487 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:26:51.532325 32487 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:26:51.532331 32487 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:26:51.532337 32487 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:26:51.532342 32487 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:26:51.532347 32487 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:26:51.532353 32487 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:26:51.532358 32487 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:26:51.532363 32487 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:26:51.532368 32487 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:26:51.532374 32487 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:26:51.532379 32487 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:26:51.532388 32487 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:26:51.532394 32487 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:26:51.532400 32487 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:26:51.532405 32487 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:26:51.532411 32487 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:26:51.532416 32487 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:26:51.532421 32487 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:26:51.532426 32487 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:26:51.532433 32487 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:26:51.532438 32487 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:26:51.532444 32487 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:26:51.532455 32487 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:26:51.532461 32487 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:26:51.532467 32487 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:26:51.532472 32487 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:26:51.532479 32487 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:26:51.532483 32487 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:26:51.532490 32487 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:26:51.532495 32487 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:26:51.532500 32487 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:26:51.532505 32487 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:26:51.532510 32487 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:26:51.532516 32487 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:26:51.532521 32487 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:26:51.532527 32487 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:26:51.532532 32487 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:26:51.532538 32487 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:26:51.532543 32487 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:26:51.532548 32487 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:26:51.532553 32487 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:26:51.532558 32487 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:26:51.532564 32487 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:26:51.532570 32487 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:26:51.532575 32487 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:26:51.532580 32487 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:26:51.532586 32487 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:26:51.532591 32487 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:26:51.532596 32487 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:26:51.532601 32487 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:26:51.532608 32487 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:26:51.532613 32487 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:26:51.532618 32487 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:26:51.532624 32487 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:26:51.532629 32487 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:26:51.532634 32487 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:26:51.532639 32487 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:26:51.532644 32487 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:26:51.532650 32487 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:26:51.532655 32487 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:26:51.532660 32487 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:26:51.532666 32487 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:26:51.532671 32487 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:26:51.532677 32487 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:26:51.532682 32487 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:26:51.532690 32487 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:26:51.532694 32487 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:26:51.532701 32487 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:26:51.532706 32487 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:26:51.532711 32487 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:26:51.532721 32487 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:26:51.532727 32487 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:26:51.532732 32487 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:26:51.532738 32487 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:26:51.532744 32487 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:26:51.532749 32487 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:26:51.532755 32487 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:26:51.532763 32487 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:26:51.532769 32487 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:26:51.532775 32487 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:26:51.532780 32487 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:26:51.532786 32487 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:26:51.532791 32487 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:26:51.532797 32487 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:26:51.532802 32487 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:26:51.532809 32487 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:26:51.532814 32487 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:26:51.532820 32487 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:26:51.532825 32487 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:26:51.532831 32487 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:26:51.532836 32487 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:26:51.532842 32487 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:26:51.532847 32487 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:26:51.532855 32487 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:26:51.532860 32487 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:26:51.532866 32487 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:26:51.532871 32487 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:26:51.532876 32487 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:26:51.532882 32487 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:26:51.532887 32487 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:26:51.532892 32487 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:26:51.532898 32487 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:26:51.532904 32487 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:26:51.532910 32487 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:26:51.532915 32487 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:26:51.532922 32487 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:26:51.532927 32487 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:26:51.532933 32487 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:26:51.532938 32487 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:26:51.532944 32487 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:26:51.532949 32487 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:26:51.532955 32487 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:26:51.532960 32487 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:26:51.532966 32487 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:26:51.532974 32487 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:26:51.532979 32487 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:26:51.532984 32487 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:26:51.532995 32487 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:26:51.533001 32487 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:26:51.533007 32487 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:26:51.533013 32487 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:26:51.533018 32487 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:26:51.533025 32487 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:26:51.533030 32487 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:26:51.533035 32487 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:26:51.533041 32487 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:26:51.533047 32487 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:26:51.533052 32487 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:26:51.533059 32487 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:26:51.533064 32487 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:26:51.533071 32487 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:26:51.533076 32487 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:26:51.533082 32487 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:26:51.533087 32487 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:26:51.533092 32487 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:26:51.533097 32487 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:26:51.533103 32487 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:26:51.533108 32487 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:26:51.533114 32487 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:26:51.533119 32487 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:26:51.533125 32487 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:26:51.533131 32487 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:26:51.533138 32487 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:26:51.533143 32487 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:26:51.533149 32487 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:26:51.533154 32487 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:26:51.533159 32487 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:26:51.533165 32487 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:26:51.533171 32487 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:26:51.533176 32487 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:26:51.533182 32487 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:26:51.533190 32487 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:26:51.533195 32487 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:26:51.533200 32487 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:26:51.533205 32487 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:26:51.533211 32487 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:26:51.533217 32487 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:26:51.533223 32487 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:26:51.533229 32487 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:26:51.533236 32487 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:26:51.533241 32487 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:26:51.533246 32487 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:26:51.533252 32487 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:26:51.533263 32487 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:26:51.533274 32487 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:26:51.533282 32487 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:26:51.533288 32487 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:26:51.533293 32487 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:26:51.533299 32487 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:26:51.533305 32487 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:26:51.533311 32487 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:26:51.533318 32487 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:26:51.533323 32487 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:26:51.533329 32487 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:26:51.533334 32487 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:26:51.533339 32487 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:26:51.533345 32487 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:26:51.533350 32487 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:26:51.533357 32487 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:26:51.533362 32487 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:26:51.533368 32487 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:26:51.533375 32487 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:26:51.533380 32487 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:26:51.533385 32487 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:26:51.533391 32487 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:26:51.533397 32487 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:26:51.533403 32487 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:26:51.533408 32487 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:26:51.533416 32487 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:26:51.533421 32487 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:26:51.533427 32487 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:26:51.533432 32487 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:26:51.533437 32487 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:26:51.533443 32487 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:26:51.533449 32487 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:26:51.533455 32487 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:26:51.533462 32487 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:26:51.533466 32487 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:26:51.533476 32487 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:26:51.533483 32487 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:26:51.533488 32487 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:26:51.533494 32487 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:26:51.533500 32487 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:26:51.533505 32487 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:26:51.533511 32487 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:26:51.533517 32487 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:26:51.533524 32487 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:26:51.533529 32487 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:26:51.533536 32487 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:26:51.533542 32487 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:26:51.533548 32487 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:26:51.533560 32487 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:26:51.533565 32487 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:26:51.533571 32487 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:26:51.533577 32487 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:26:51.533583 32487 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:26:51.533588 32487 net.cpp:226] pre_relu needs backward computation.\nI0821 08:26:51.533594 32487 net.cpp:226] pre_scale needs backward computation.\nI0821 08:26:51.533599 32487 net.cpp:226] pre_bn needs backward computation.\nI0821 08:26:51.533604 32487 net.cpp:226] pre_conv needs backward computation.\nI0821 08:26:51.533612 32487 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:26:51.533618 32487 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:26:51.533622 32487 net.cpp:270] This network produces output accuracy\nI0821 08:26:51.533629 32487 net.cpp:270] This network produces output loss\nI0821 08:26:51.533999 32487 net.cpp:283] Network initialization done.\nI0821 08:26:51.543476 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:51.543524 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:51.543591 32487 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 08:26:51.543972 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 08:26:51.543992 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 08:26:51.544001 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:26:51.544010 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:26:51.544020 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:26:51.544029 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:26:51.544039 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:26:51.544047 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:26:51.544057 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:26:51.544066 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:26:51.544076 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:26:51.544085 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:26:51.544093 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:26:51.544102 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:26:51.544111 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:26:51.544121 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:26:51.544129 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:26:51.544137 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:26:51.544147 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:26:51.544167 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:26:51.544178 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:26:51.544186 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:26:51.544198 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:26:51.544208 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:26:51.544217 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:26:51.544225 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:26:51.544234 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:26:51.544244 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:26:51.544251 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:26:51.544268 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:26:51.544278 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:26:51.544287 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:26:51.544296 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:26:51.544306 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:26:51.544314 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:26:51.544322 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:26:51.544332 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:26:51.544340 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:26:51.544349 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:26:51.544358 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:26:51.544369 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:26:51.544378 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:26:51.544387 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:26:51.544395 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:26:51.544404 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:26:51.544414 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:26:51.544422 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:26:51.544430 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:26:51.544440 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:26:51.544457 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:26:51.544466 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:26:51.544476 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:26:51.544484 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:26:51.544492 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:26:51.544502 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:26:51.544509 32487 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:26:51.546154 32487 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0821 08:26:51.547756 32487 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:26:51.547994 32487 net.cpp:100] Creating Layer dataLayer\nI0821 08:26:51.548017 32487 net.cpp:408] dataLayer -> data_top\nI0821 08:26:51.548032 32487 net.cpp:408] dataLayer -> label\nI0821 08:26:51.548045 32487 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:26:51.561215 32494 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 08:26:51.561499 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:51.568760 32487 net.cpp:150] Setting up dataLayer\nI0821 08:26:51.568781 32487 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 08:26:51.568789 32487 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:51.568795 32487 net.cpp:165] Memory required for data: 1536500\nI0821 08:26:51.568801 32487 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:26:51.568837 32487 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:26:51.568848 32487 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:26:51.568857 32487 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:26:51.568869 32487 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:26:51.569008 32487 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:26:51.569022 32487 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:51.569030 32487 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:51.569034 32487 net.cpp:165] Memory required for data: 1537500\nI0821 08:26:51.569041 32487 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:26:51.569059 32487 net.cpp:100] Creating Layer pre_conv\nI0821 08:26:51.569067 32487 net.cpp:434] pre_conv <- data_top\nI0821 08:26:51.569078 32487 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:26:51.569471 32487 net.cpp:150] Setting up pre_conv\nI0821 08:26:51.569496 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.569501 32487 net.cpp:165] Memory required for data: 9729500\nI0821 08:26:51.569520 32487 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:26:51.569533 32487 net.cpp:100] Creating Layer pre_bn\nI0821 08:26:51.569540 32487 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:26:51.569552 32487 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:26:51.569941 32487 net.cpp:150] Setting up pre_bn\nI0821 08:26:51.569957 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.569962 32487 net.cpp:165] Memory required for data: 17921500\nI0821 08:26:51.569984 32487 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:26:51.569995 32487 net.cpp:100] Creating Layer pre_scale\nI0821 08:26:51.570001 32487 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:26:51.570009 32487 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:26:51.570075 32487 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:26:51.570282 32487 net.cpp:150] Setting up pre_scale\nI0821 08:26:51.570297 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.570302 32487 net.cpp:165] Memory required for data: 26113500\nI0821 08:26:51.570315 32487 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:26:51.570328 32487 net.cpp:100] Creating Layer pre_relu\nI0821 08:26:51.570334 32487 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:26:51.570343 32487 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:26:51.570358 32487 net.cpp:150] Setting up pre_relu\nI0821 08:26:51.570366 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.570370 32487 net.cpp:165] Memory required for data: 34305500\nI0821 08:26:51.570375 32487 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:26:51.570382 32487 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:26:51.570387 32487 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:26:51.570395 32487 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:26:51.570408 32487 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:26:51.570467 32487 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:26:51.570480 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.570487 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.570492 32487 net.cpp:165] Memory required for data: 50689500\nI0821 08:26:51.570497 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:26:51.570518 32487 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:26:51.570524 32487 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:26:51.570533 32487 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:26:51.570940 32487 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:26:51.570955 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.570960 32487 net.cpp:165] Memory required for data: 58881500\nI0821 08:26:51.570974 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:26:51.570991 32487 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:26:51.570998 32487 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:26:51.571009 32487 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:26:51.571501 32487 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:26:51.571517 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.571523 32487 net.cpp:165] Memory required for data: 67073500\nI0821 08:26:51.571537 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:26:51.571548 32487 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:26:51.571554 32487 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:26:51.571566 32487 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.571633 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:26:51.571813 32487 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:26:51.571841 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.571854 32487 net.cpp:165] Memory required for data: 75265500\nI0821 08:26:51.571866 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:26:51.571877 32487 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:26:51.571882 32487 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:26:51.571892 32487 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.571902 32487 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:26:51.571909 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.571914 32487 net.cpp:165] Memory required for data: 83457500\nI0821 08:26:51.571919 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:26:51.571933 32487 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:26:51.571940 32487 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:26:51.571955 32487 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:26:51.572325 32487 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:26:51.572340 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.572346 32487 net.cpp:165] Memory required for data: 91649500\nI0821 08:26:51.572355 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:26:51.572367 32487 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:26:51.572374 32487 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:26:51.572386 32487 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:26:51.572731 32487 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:26:51.572747 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.572752 32487 net.cpp:165] Memory required for data: 99841500\nI0821 08:26:51.572767 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:26:51.572777 32487 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:26:51.572782 32487 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:26:51.572794 32487 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:26:51.572856 32487 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:26:51.573017 32487 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:26:51.573035 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.573040 32487 net.cpp:165] Memory required for data: 108033500\nI0821 08:26:51.573050 32487 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:26:51.573060 32487 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:26:51.573065 32487 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:26:51.573072 32487 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:26:51.573081 32487 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:26:51.573118 32487 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:26:51.573130 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.573135 32487 net.cpp:165] Memory required for data: 116225500\nI0821 08:26:51.573140 32487 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:26:51.573148 32487 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:26:51.573154 32487 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:26:51.573161 32487 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:26:51.573171 32487 net.cpp:150] Setting up L1_b1_relu\nI0821 08:26:51.573179 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.573184 32487 net.cpp:165] Memory required for data: 124417500\nI0821 08:26:51.573187 32487 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:51.573196 32487 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:51.573202 32487 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:26:51.573212 32487 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:26:51.573222 32487 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:26:51.573276 32487 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:51.573299 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.573307 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.573312 32487 net.cpp:165] Memory required for data: 140801500\nI0821 08:26:51.573318 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:26:51.573329 32487 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:26:51.573338 32487 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:26:51.573346 32487 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:26:51.573721 32487 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:26:51.573737 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.573742 32487 net.cpp:165] Memory required for data: 148993500\nI0821 08:26:51.573751 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:26:51.573765 32487 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:26:51.573771 32487 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:26:51.573779 32487 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:26:51.574174 32487 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:26:51.574189 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.574195 32487 net.cpp:165] Memory required for data: 157185500\nI0821 08:26:51.574205 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:26:51.574218 32487 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:26:51.574224 32487 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:26:51.574232 32487 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.574301 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:26:51.574463 32487 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:26:51.574477 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.574482 32487 net.cpp:165] Memory required for data: 165377500\nI0821 08:26:51.574491 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:26:51.574501 32487 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:26:51.574506 32487 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:26:51.574517 32487 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.574527 32487 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:26:51.574534 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.574539 32487 net.cpp:165] Memory required for data: 173569500\nI0821 08:26:51.574544 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:26:51.574560 32487 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:26:51.574566 32487 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:26:51.574574 32487 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:26:51.575014 32487 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:26:51.575032 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.575037 32487 net.cpp:165] Memory required for data: 181761500\nI0821 08:26:51.575050 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:26:51.575064 32487 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:26:51.575070 32487 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:26:51.575083 32487 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:26:51.575395 32487 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:26:51.575410 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.575415 32487 net.cpp:165] Memory required for data: 189953500\nI0821 08:26:51.575434 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:26:51.575448 32487 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:26:51.575454 32487 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:26:51.575464 32487 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:26:51.575534 32487 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:26:51.575726 32487 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:26:51.575740 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.575747 32487 net.cpp:165] Memory required for data: 198145500\nI0821 08:26:51.575767 32487 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:26:51.575783 32487 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:26:51.575793 32487 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:26:51.575803 32487 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:26:51.575811 32487 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:26:51.575855 32487 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:26:51.575866 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.575871 32487 net.cpp:165] Memory required for data: 206337500\nI0821 08:26:51.575876 32487 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:26:51.575884 32487 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:26:51.575891 32487 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:26:51.575904 32487 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:26:51.575917 32487 net.cpp:150] Setting up L1_b2_relu\nI0821 08:26:51.575923 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.575928 32487 net.cpp:165] Memory required for data: 214529500\nI0821 08:26:51.575934 32487 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:51.575943 32487 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:51.575949 32487 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:26:51.575958 32487 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:26:51.575968 32487 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:26:51.576021 32487 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:51.576035 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.576045 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.576050 32487 net.cpp:165] Memory required for data: 230913500\nI0821 08:26:51.576056 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:26:51.576066 32487 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:26:51.576072 32487 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:26:51.576087 32487 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:26:51.576501 32487 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:26:51.576519 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.576524 32487 net.cpp:165] Memory required for data: 239105500\nI0821 08:26:51.576534 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:26:51.576544 32487 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:26:51.576552 32487 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:26:51.576566 32487 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:26:51.576963 32487 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:26:51.576979 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.576985 32487 net.cpp:165] Memory required for data: 247297500\nI0821 08:26:51.576997 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:26:51.577010 32487 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:26:51.577018 32487 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:26:51.577028 32487 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.577131 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:26:51.577327 32487 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:26:51.577342 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.577347 32487 net.cpp:165] Memory required for data: 255489500\nI0821 08:26:51.577359 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:26:51.577368 32487 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:26:51.577375 32487 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:26:51.577386 32487 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.577406 32487 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:26:51.577415 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.577420 32487 net.cpp:165] Memory required for data: 263681500\nI0821 08:26:51.577425 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:26:51.577440 32487 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:26:51.577450 32487 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:26:51.577462 32487 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:26:51.577857 32487 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:26:51.577872 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.577878 32487 net.cpp:165] Memory required for data: 271873500\nI0821 08:26:51.577888 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:26:51.577903 32487 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:26:51.577909 32487 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:26:51.577921 32487 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:26:51.578243 32487 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:26:51.578268 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.578274 32487 net.cpp:165] Memory required for data: 280065500\nI0821 08:26:51.578285 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:26:51.578300 32487 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:26:51.578310 32487 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:26:51.578320 32487 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:26:51.578383 32487 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:26:51.578567 32487 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:26:51.578583 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.578588 32487 net.cpp:165] Memory required for data: 288257500\nI0821 08:26:51.578598 32487 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:26:51.578610 32487 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:26:51.578618 32487 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:26:51.578627 32487 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:26:51.578636 32487 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:26:51.578675 32487 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:26:51.578685 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.578691 32487 net.cpp:165] Memory required for data: 296449500\nI0821 08:26:51.578698 32487 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:26:51.578706 32487 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:26:51.578713 32487 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:26:51.578723 32487 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:26:51.578737 32487 net.cpp:150] Setting up L1_b3_relu\nI0821 08:26:51.578744 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.578749 32487 net.cpp:165] Memory required for data: 304641500\nI0821 08:26:51.578754 32487 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:51.578761 32487 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:51.578766 32487 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:26:51.578778 32487 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:26:51.578786 32487 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:26:51.578840 32487 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:51.578855 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.578861 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.578866 32487 net.cpp:165] Memory required for data: 321025500\nI0821 08:26:51.578871 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:26:51.578886 32487 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:26:51.578899 32487 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:26:51.578912 32487 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:26:51.579352 32487 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:26:51.579368 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.579376 32487 net.cpp:165] Memory required for data: 329217500\nI0821 08:26:51.579386 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:26:51.579396 32487 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:26:51.579402 32487 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:26:51.579413 32487 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:26:51.579731 32487 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:26:51.579746 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.579751 32487 net.cpp:165] Memory required for data: 337409500\nI0821 08:26:51.579762 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:26:51.579774 32487 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:26:51.579780 32487 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:26:51.579788 32487 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.579857 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:26:51.580044 32487 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:26:51.580058 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.580065 32487 net.cpp:165] Memory required for data: 345601500\nI0821 08:26:51.580073 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:26:51.580081 32487 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:26:51.580087 32487 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:26:51.580104 32487 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.580116 32487 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:26:51.580123 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.580130 32487 net.cpp:165] Memory required for data: 353793500\nI0821 08:26:51.580137 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:26:51.580152 32487 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:26:51.580157 32487 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:26:51.580179 32487 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:26:51.580584 32487 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:26:51.580600 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.580606 32487 net.cpp:165] Memory required for data: 361985500\nI0821 08:26:51.580616 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:26:51.580626 32487 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:26:51.580632 32487 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:26:51.580641 32487 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:26:51.580957 32487 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:26:51.580973 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.580978 32487 net.cpp:165] Memory required for data: 370177500\nI0821 08:26:51.580989 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:26:51.581004 32487 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:26:51.581012 32487 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:26:51.581024 32487 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:26:51.581089 32487 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:26:51.581296 32487 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:26:51.581311 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.581316 32487 net.cpp:165] Memory required for data: 378369500\nI0821 08:26:51.581329 32487 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:26:51.581342 32487 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:26:51.581349 32487 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:26:51.581357 32487 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:26:51.581372 32487 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:26:51.581418 32487 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:26:51.581428 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.581432 32487 net.cpp:165] Memory required for data: 386561500\nI0821 08:26:51.581440 32487 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:26:51.581449 32487 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:26:51.581454 32487 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:26:51.581465 32487 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:26:51.581475 32487 net.cpp:150] Setting up L1_b4_relu\nI0821 08:26:51.581486 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.581491 32487 net.cpp:165] Memory required for data: 394753500\nI0821 08:26:51.581496 32487 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:51.581503 32487 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:51.581508 32487 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:26:51.581516 32487 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:26:51.581526 32487 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:26:51.581585 32487 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:51.581598 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.581606 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.581610 32487 net.cpp:165] Memory required for data: 411137500\nI0821 08:26:51.581616 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:26:51.581629 32487 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:26:51.581636 32487 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:26:51.581650 32487 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:26:51.582060 32487 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:26:51.582075 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.582080 32487 net.cpp:165] Memory required for data: 419329500\nI0821 08:26:51.582105 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:26:51.582118 32487 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:26:51.582125 32487 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:26:51.582137 32487 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:26:51.582461 32487 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:26:51.582475 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.582484 32487 net.cpp:165] Memory required for data: 427521500\nI0821 08:26:51.582495 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:26:51.582507 32487 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:26:51.582515 32487 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:26:51.582522 32487 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.582590 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:26:51.582780 32487 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:26:51.582797 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.582803 32487 net.cpp:165] Memory required for data: 435713500\nI0821 08:26:51.582811 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:26:51.582819 32487 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:26:51.582826 32487 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:26:51.582836 32487 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.582847 32487 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:26:51.582857 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.582864 32487 net.cpp:165] Memory required for data: 443905500\nI0821 08:26:51.582868 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:26:51.582891 32487 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:26:51.582900 32487 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:26:51.582908 32487 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:26:51.583948 32487 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:26:51.583964 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.583969 32487 net.cpp:165] Memory required for data: 452097500\nI0821 08:26:51.583979 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:26:51.583992 32487 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:26:51.583998 32487 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:26:51.584007 32487 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:26:51.584296 32487 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:26:51.584311 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.584316 32487 net.cpp:165] Memory required for data: 460289500\nI0821 08:26:51.584326 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:26:51.584336 32487 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:26:51.584342 32487 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:26:51.584352 32487 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:26:51.584412 32487 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:26:51.584578 32487 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:26:51.584590 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.584595 32487 net.cpp:165] Memory required for data: 468481500\nI0821 08:26:51.584605 32487 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:26:51.584614 32487 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:26:51.584620 32487 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:26:51.584627 32487 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:26:51.584637 32487 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:26:51.584672 32487 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:26:51.584687 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.584693 32487 net.cpp:165] Memory required for data: 476673500\nI0821 08:26:51.584698 32487 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:26:51.584707 32487 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:26:51.584712 32487 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:26:51.584719 32487 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:26:51.584728 32487 net.cpp:150] Setting up L1_b5_relu\nI0821 08:26:51.584736 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.584740 32487 net.cpp:165] Memory required for data: 484865500\nI0821 08:26:51.584746 32487 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:51.584758 32487 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:51.584764 32487 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:26:51.584771 32487 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:26:51.584781 32487 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:26:51.584831 32487 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:51.584843 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.584851 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.584856 32487 net.cpp:165] Memory required for data: 501249500\nI0821 08:26:51.584861 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:26:51.584872 32487 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:26:51.584877 32487 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:26:51.584889 32487 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:26:51.585240 32487 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:26:51.585260 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.585273 32487 net.cpp:165] Memory required for data: 509441500\nI0821 08:26:51.585283 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:26:51.585292 32487 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:26:51.585299 32487 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:26:51.585307 32487 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:26:51.585582 32487 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:26:51.585597 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.585602 32487 net.cpp:165] Memory required for data: 517633500\nI0821 08:26:51.585613 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:26:51.585625 32487 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:26:51.585631 32487 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:26:51.585639 32487 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.585701 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:26:51.585865 32487 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:26:51.585878 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.585883 32487 net.cpp:165] Memory required for data: 525825500\nI0821 08:26:51.585892 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:26:51.585901 32487 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:26:51.585906 32487 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:26:51.585917 32487 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.585927 32487 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:26:51.585935 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.585939 32487 net.cpp:165] Memory required for data: 534017500\nI0821 08:26:51.585944 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:26:51.585960 32487 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:26:51.585968 32487 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:26:51.585975 32487 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:26:51.586339 32487 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:26:51.586354 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.586359 32487 net.cpp:165] Memory required for data: 542209500\nI0821 08:26:51.586369 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:26:51.586380 32487 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:26:51.586387 32487 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:26:51.586400 32487 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:26:51.586671 32487 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:26:51.586685 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.586690 32487 net.cpp:165] Memory required for data: 550401500\nI0821 08:26:51.586701 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:26:51.586709 32487 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:26:51.586715 32487 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:26:51.586726 32487 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:26:51.586786 32487 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:26:51.586944 32487 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:26:51.586961 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.586966 32487 net.cpp:165] Memory required for data: 558593500\nI0821 08:26:51.586975 32487 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:26:51.586994 32487 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:26:51.587002 32487 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:26:51.587008 32487 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:26:51.587016 32487 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:26:51.587055 32487 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:26:51.587065 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.587070 32487 net.cpp:165] Memory required for data: 566785500\nI0821 08:26:51.587082 32487 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:26:51.587091 32487 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:26:51.587096 32487 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:26:51.587106 32487 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:26:51.587117 32487 net.cpp:150] Setting up L1_b6_relu\nI0821 08:26:51.587124 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.587129 32487 net.cpp:165] Memory required for data: 574977500\nI0821 08:26:51.587134 32487 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:51.587141 32487 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:51.587146 32487 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:26:51.587154 32487 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:26:51.587164 32487 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:26:51.587216 32487 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:51.587229 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.587235 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.587240 32487 net.cpp:165] Memory required for data: 591361500\nI0821 08:26:51.587245 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:26:51.587262 32487 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:26:51.587270 32487 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:26:51.587285 32487 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:26:51.587815 32487 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:26:51.587831 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.587836 32487 net.cpp:165] Memory required for data: 599553500\nI0821 08:26:51.587846 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:26:51.587857 32487 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:26:51.587862 32487 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:26:51.587891 32487 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:26:51.588181 32487 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:26:51.588196 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.588201 32487 net.cpp:165] Memory required for data: 607745500\nI0821 08:26:51.588212 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:26:51.588224 32487 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:26:51.588232 32487 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:26:51.588239 32487 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.588306 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:26:51.588471 32487 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:26:51.588485 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.588490 32487 net.cpp:165] Memory required for data: 615937500\nI0821 08:26:51.588500 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:26:51.588508 32487 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:26:51.588515 32487 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:26:51.588526 32487 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.588536 32487 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:26:51.588543 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.588547 32487 net.cpp:165] Memory required for data: 624129500\nI0821 08:26:51.588552 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:26:51.588567 32487 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:26:51.588572 32487 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:26:51.588583 32487 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:26:51.588944 32487 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:26:51.588959 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.588971 32487 net.cpp:165] Memory required for data: 632321500\nI0821 08:26:51.588981 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:26:51.588990 32487 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:26:51.588996 32487 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:26:51.589005 32487 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:26:51.589288 32487 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:26:51.589303 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.589308 32487 net.cpp:165] Memory required for data: 640513500\nI0821 08:26:51.589318 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:26:51.589330 32487 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:26:51.589337 32487 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:26:51.589345 32487 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:26:51.589406 32487 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:26:51.589573 32487 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:26:51.589587 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.589593 32487 net.cpp:165] Memory required for data: 648705500\nI0821 08:26:51.589602 32487 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:26:51.589612 32487 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:26:51.589618 32487 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:26:51.589625 32487 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:26:51.589669 32487 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:26:51.589715 32487 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:26:51.589726 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.589731 32487 net.cpp:165] Memory required for data: 656897500\nI0821 08:26:51.589736 32487 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:26:51.589745 32487 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:26:51.589751 32487 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:26:51.589757 32487 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:26:51.589771 32487 net.cpp:150] Setting up L1_b7_relu\nI0821 08:26:51.589778 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.589783 32487 net.cpp:165] Memory required for data: 665089500\nI0821 08:26:51.589788 32487 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:51.589795 32487 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:51.589800 32487 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:26:51.589807 32487 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:26:51.589818 32487 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:26:51.589869 32487 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:51.589881 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.589889 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.589893 32487 net.cpp:165] Memory required for data: 681473500\nI0821 08:26:51.589898 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:26:51.589910 32487 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:26:51.589915 32487 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:26:51.589927 32487 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:26:51.590291 32487 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:26:51.590306 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.590311 32487 net.cpp:165] Memory required for data: 689665500\nI0821 08:26:51.590319 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:26:51.590328 32487 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:26:51.590335 32487 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:26:51.590353 32487 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:26:51.590636 32487 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:26:51.590649 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.590654 32487 net.cpp:165] Memory required for data: 697857500\nI0821 08:26:51.590664 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:26:51.590677 32487 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:26:51.590683 32487 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:26:51.590692 32487 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.590749 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:26:51.590911 32487 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:26:51.590924 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.590930 32487 net.cpp:165] Memory required for data: 706049500\nI0821 08:26:51.590939 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:26:51.590947 32487 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:26:51.590953 32487 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:26:51.590965 32487 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.590975 32487 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:26:51.590982 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.590986 32487 net.cpp:165] Memory required for data: 714241500\nI0821 08:26:51.590991 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:26:51.591006 32487 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:26:51.591012 32487 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:26:51.591019 32487 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:26:51.591387 32487 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:26:51.591401 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.591406 32487 net.cpp:165] Memory required for data: 722433500\nI0821 08:26:51.591415 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:26:51.591428 32487 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:26:51.591434 32487 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:26:51.591442 32487 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:26:51.591735 32487 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:26:51.591750 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.591755 32487 net.cpp:165] Memory required for data: 730625500\nI0821 08:26:51.591766 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:26:51.591775 32487 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:26:51.591781 32487 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:26:51.591792 32487 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:26:51.591855 32487 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:26:51.592018 32487 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:26:51.592032 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.592037 32487 net.cpp:165] Memory required for data: 738817500\nI0821 08:26:51.592046 32487 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:26:51.592056 32487 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:26:51.592061 32487 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:26:51.592069 32487 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:26:51.592080 32487 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:26:51.592114 32487 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:26:51.592129 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.592135 32487 net.cpp:165] Memory required for data: 747009500\nI0821 08:26:51.592140 32487 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:26:51.592149 32487 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:26:51.592154 32487 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:26:51.592161 32487 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:26:51.592178 32487 net.cpp:150] Setting up L1_b8_relu\nI0821 08:26:51.592186 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.592191 32487 net.cpp:165] Memory required for data: 755201500\nI0821 08:26:51.592195 32487 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:51.592206 32487 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:51.592212 32487 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:26:51.592219 32487 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:26:51.592229 32487 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:26:51.592288 32487 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:51.592301 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.592308 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.592314 32487 net.cpp:165] Memory required for data: 771585500\nI0821 08:26:51.592319 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:26:51.592329 32487 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:26:51.592336 32487 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:26:51.592348 32487 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:26:51.592715 32487 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:26:51.592730 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.592736 32487 net.cpp:165] Memory required for data: 779777500\nI0821 08:26:51.592744 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:26:51.592756 32487 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:26:51.592763 32487 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:26:51.592772 32487 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:26:51.593060 32487 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:26:51.593073 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.593078 32487 net.cpp:165] Memory required for data: 787969500\nI0821 08:26:51.593088 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:26:51.593097 32487 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:26:51.593103 32487 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:26:51.593111 32487 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.593173 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:26:51.593348 32487 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:26:51.593361 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.593367 32487 net.cpp:165] Memory required for data: 796161500\nI0821 08:26:51.593376 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:26:51.593385 32487 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:26:51.593391 32487 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:26:51.593401 32487 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.593412 32487 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:26:51.593420 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.593425 32487 net.cpp:165] Memory required for data: 804353500\nI0821 08:26:51.593430 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:26:51.593442 32487 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:26:51.593449 32487 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:26:51.593613 32487 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:26:51.593979 32487 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:26:51.593994 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.593999 32487 net.cpp:165] Memory required for data: 812545500\nI0821 08:26:51.594008 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:26:51.594018 32487 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:26:51.594024 32487 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:26:51.594043 32487 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:26:51.594329 32487 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:26:51.594348 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.594354 32487 net.cpp:165] Memory required for data: 820737500\nI0821 08:26:51.594385 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:26:51.594398 32487 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:26:51.594403 32487 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:26:51.594411 32487 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:26:51.594472 32487 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:26:51.594637 32487 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:26:51.594651 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.594656 32487 net.cpp:165] Memory required for data: 828929500\nI0821 08:26:51.594666 32487 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:26:51.594676 32487 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:26:51.594681 32487 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:26:51.594688 32487 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:26:51.594707 32487 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:26:51.594741 32487 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:26:51.594753 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.594758 32487 net.cpp:165] Memory required for data: 837121500\nI0821 08:26:51.594763 32487 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:26:51.594770 32487 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:26:51.594776 32487 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:26:51.594787 32487 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:26:51.594797 32487 net.cpp:150] Setting up L1_b9_relu\nI0821 08:26:51.594805 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.594810 32487 net.cpp:165] Memory required for data: 845313500\nI0821 08:26:51.594815 32487 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:51.594821 32487 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:51.594827 32487 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:26:51.594837 32487 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:26:51.594847 32487 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:26:51.594899 32487 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:51.594913 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.594918 32487 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:51.594923 32487 net.cpp:165] Memory required for data: 861697500\nI0821 08:26:51.594928 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:26:51.594939 32487 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:26:51.594946 32487 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:26:51.594959 32487 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:26:51.595325 32487 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:26:51.595340 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.595346 32487 net.cpp:165] Memory required for data: 863745500\nI0821 08:26:51.595355 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:26:51.595365 32487 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:26:51.595371 32487 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:26:51.595382 32487 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:26:51.595656 32487 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:26:51.595670 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.595675 32487 net.cpp:165] Memory required for data: 865793500\nI0821 08:26:51.595693 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:26:51.595708 32487 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:26:51.595715 32487 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:26:51.595723 32487 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.595783 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:26:51.595948 32487 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:26:51.595962 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.595966 32487 net.cpp:165] Memory required for data: 867841500\nI0821 08:26:51.595976 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:26:51.595988 32487 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:26:51.595993 32487 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:26:51.596001 32487 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.596011 32487 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:26:51.596019 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.596024 32487 net.cpp:165] Memory required for data: 869889500\nI0821 08:26:51.596029 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:26:51.596041 32487 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:26:51.596048 32487 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:26:51.596060 32487 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:26:51.596426 32487 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:26:51.596441 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.596447 32487 net.cpp:165] Memory required for data: 871937500\nI0821 08:26:51.596456 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:26:51.596465 32487 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:26:51.596472 32487 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:26:51.596482 32487 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:26:51.596753 32487 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:26:51.596767 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.596772 32487 net.cpp:165] Memory required for data: 873985500\nI0821 08:26:51.596782 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:26:51.596794 32487 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:26:51.596801 32487 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:26:51.596808 32487 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:26:51.596868 32487 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:26:51.597030 32487 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:26:51.597043 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.597048 32487 net.cpp:165] Memory required for data: 876033500\nI0821 08:26:51.597057 32487 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:26:51.597070 32487 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:26:51.597077 32487 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:26:51.597086 32487 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:26:51.597120 32487 net.cpp:150] Setting up L2_b1_pool\nI0821 08:26:51.597131 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.597136 32487 net.cpp:165] Memory required for data: 878081500\nI0821 08:26:51.597141 32487 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:26:51.597149 32487 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:26:51.597156 32487 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:26:51.597162 32487 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:26:51.597172 32487 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:26:51.597206 32487 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:26:51.597218 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.597223 32487 net.cpp:165] Memory required for data: 880129500\nI0821 08:26:51.597229 32487 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:26:51.597236 32487 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:26:51.597249 32487 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:26:51.597263 32487 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:26:51.597275 32487 net.cpp:150] Setting up L2_b1_relu\nI0821 08:26:51.597282 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.597287 32487 net.cpp:165] Memory required for data: 882177500\nI0821 08:26:51.597291 32487 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:26:51.597306 32487 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:26:51.597314 32487 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:26:51.599545 32487 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:26:51.599565 32487 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:51.599570 32487 net.cpp:165] Memory required for data: 884225500\nI0821 08:26:51.599576 32487 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:26:51.599586 32487 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:26:51.599592 32487 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:26:51.599601 32487 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:26:51.599611 32487 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:26:51.599656 32487 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:26:51.599671 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.599678 32487 net.cpp:165] Memory required for data: 888321500\nI0821 08:26:51.599683 32487 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:51.599690 32487 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:51.599696 32487 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:26:51.599704 32487 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:26:51.599714 32487 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:26:51.599771 32487 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:51.599783 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.599791 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.599795 32487 net.cpp:165] Memory required for data: 896513500\nI0821 08:26:51.599800 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:26:51.599814 32487 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:26:51.599822 32487 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:26:51.599831 32487 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:26:51.600352 32487 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:26:51.600366 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.600373 32487 net.cpp:165] Memory required for data: 900609500\nI0821 08:26:51.600381 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:26:51.600394 32487 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:26:51.600401 32487 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:26:51.600409 32487 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:26:51.600680 32487 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:26:51.600694 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.600699 32487 net.cpp:165] Memory required for data: 904705500\nI0821 08:26:51.600710 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:26:51.600719 32487 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:26:51.600725 32487 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:26:51.600733 32487 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.600795 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:26:51.600953 32487 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:26:51.600970 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.600975 32487 net.cpp:165] Memory required for data: 908801500\nI0821 08:26:51.600986 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:26:51.600993 32487 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:26:51.601008 32487 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:26:51.601016 32487 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.601027 32487 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:26:51.601035 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.601039 32487 net.cpp:165] Memory required for data: 912897500\nI0821 08:26:51.601044 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:26:51.601059 32487 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:26:51.601065 32487 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:26:51.601078 32487 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:26:51.601582 32487 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:26:51.601598 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.601603 32487 net.cpp:165] Memory required for data: 916993500\nI0821 08:26:51.601613 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:26:51.601624 32487 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:26:51.601631 32487 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:26:51.601644 32487 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:26:51.601915 32487 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:26:51.601929 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.601934 32487 net.cpp:165] Memory required for data: 921089500\nI0821 08:26:51.601945 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:26:51.601954 32487 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:26:51.601960 32487 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:26:51.601969 32487 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:26:51.602030 32487 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:26:51.602191 32487 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:26:51.602205 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.602210 32487 net.cpp:165] Memory required for data: 925185500\nI0821 08:26:51.602218 32487 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:26:51.602231 32487 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:26:51.602237 32487 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:26:51.602246 32487 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:26:51.602253 32487 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:26:51.602289 32487 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:26:51.602299 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.602304 32487 net.cpp:165] Memory required for data: 929281500\nI0821 08:26:51.602309 32487 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:26:51.602321 32487 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:26:51.602327 32487 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:26:51.602335 32487 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:26:51.602344 32487 net.cpp:150] Setting up L2_b2_relu\nI0821 08:26:51.602352 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.602356 32487 net.cpp:165] Memory required for data: 933377500\nI0821 08:26:51.602362 32487 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:51.602370 32487 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:51.602375 32487 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:26:51.602382 32487 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:26:51.602391 32487 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:26:51.602442 32487 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:51.602455 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.602463 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.602466 32487 net.cpp:165] Memory required for data: 941569500\nI0821 08:26:51.602479 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:26:51.602494 32487 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:26:51.602501 32487 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:26:51.602511 32487 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:26:51.603013 32487 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:26:51.603027 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.603032 32487 net.cpp:165] Memory required for data: 945665500\nI0821 08:26:51.603041 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:26:51.603054 32487 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:26:51.603061 32487 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:26:51.603072 32487 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:26:51.603349 32487 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:26:51.603363 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.603369 32487 net.cpp:165] Memory required for data: 949761500\nI0821 08:26:51.603380 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:26:51.603389 32487 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:26:51.603395 32487 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:26:51.603402 32487 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.603464 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:26:51.603627 32487 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:26:51.603644 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.603649 32487 net.cpp:165] Memory required for data: 953857500\nI0821 08:26:51.603658 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:26:51.603667 32487 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:26:51.603673 32487 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:26:51.603682 32487 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.603691 32487 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:26:51.603698 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.603703 32487 net.cpp:165] Memory required for data: 957953500\nI0821 08:26:51.603708 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:26:51.603721 32487 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:26:51.603727 32487 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:26:51.603739 32487 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:26:51.604240 32487 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:26:51.604259 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.604265 32487 net.cpp:165] Memory required for data: 962049500\nI0821 08:26:51.604274 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:26:51.604288 32487 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:26:51.604295 32487 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:26:51.604306 32487 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:26:51.604583 32487 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:26:51.604596 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.604601 32487 net.cpp:165] Memory required for data: 966145500\nI0821 08:26:51.604611 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:26:51.604620 32487 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:26:51.604626 32487 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:26:51.604635 32487 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:26:51.604696 32487 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:26:51.604857 32487 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:26:51.604871 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.604876 32487 net.cpp:165] Memory required for data: 970241500\nI0821 08:26:51.604884 32487 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:26:51.604897 32487 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:26:51.604910 32487 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:26:51.604918 32487 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:26:51.604926 32487 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:26:51.604955 32487 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:26:51.604967 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.604972 32487 net.cpp:165] Memory required for data: 974337500\nI0821 08:26:51.604977 32487 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:26:51.605000 32487 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:26:51.605006 32487 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:26:51.605013 32487 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:26:51.605023 32487 net.cpp:150] Setting up L2_b3_relu\nI0821 08:26:51.605031 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.605036 32487 net.cpp:165] Memory required for data: 978433500\nI0821 08:26:51.605041 32487 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:51.605048 32487 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:51.605053 32487 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:26:51.605060 32487 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:26:51.605070 32487 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:26:51.605123 32487 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:51.605135 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.605142 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.605147 32487 net.cpp:165] Memory required for data: 986625500\nI0821 08:26:51.605152 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:26:51.605164 32487 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:26:51.605170 32487 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:26:51.605182 32487 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:26:51.605700 32487 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:26:51.605715 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.605720 32487 net.cpp:165] Memory required for data: 990721500\nI0821 08:26:51.605729 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:26:51.605741 32487 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:26:51.605748 32487 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:26:51.605757 32487 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:26:51.606034 32487 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:26:51.606050 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.606055 32487 net.cpp:165] Memory required for data: 994817500\nI0821 08:26:51.606066 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:26:51.606076 32487 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:26:51.606081 32487 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:26:51.606089 32487 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.606149 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:26:51.606323 32487 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:26:51.606338 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.606343 32487 net.cpp:165] Memory required for data: 998913500\nI0821 08:26:51.606353 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:26:51.606360 32487 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:26:51.606367 32487 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:26:51.606377 32487 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.606389 32487 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:26:51.606395 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.606400 32487 net.cpp:165] Memory required for data: 1003009500\nI0821 08:26:51.606413 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:26:51.606426 32487 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:26:51.606431 32487 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:26:51.606443 32487 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:26:51.606936 32487 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:26:51.606951 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.606956 32487 net.cpp:165] Memory required for data: 1007105500\nI0821 08:26:51.606966 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:26:51.606974 32487 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:26:51.606981 32487 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:26:51.606992 32487 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:26:51.607272 32487 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:26:51.607286 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.607292 32487 net.cpp:165] Memory required for data: 1011201500\nI0821 08:26:51.607302 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:26:51.607316 32487 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:26:51.607322 32487 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:26:51.607331 32487 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:26:51.607390 32487 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:26:51.607555 32487 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:26:51.607568 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.607573 32487 net.cpp:165] Memory required for data: 1015297500\nI0821 08:26:51.607583 32487 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:26:51.607594 32487 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:26:51.607601 32487 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:26:51.607609 32487 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:26:51.607617 32487 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:26:51.607650 32487 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:26:51.607662 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.607667 32487 net.cpp:165] Memory required for data: 1019393500\nI0821 08:26:51.607672 32487 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:26:51.607681 32487 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:26:51.607686 32487 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:26:51.607697 32487 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:26:51.607707 32487 net.cpp:150] Setting up L2_b4_relu\nI0821 08:26:51.607714 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.607719 32487 net.cpp:165] Memory required for data: 1023489500\nI0821 08:26:51.607724 32487 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:51.607731 32487 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:51.607738 32487 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:26:51.607744 32487 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:26:51.607754 32487 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:26:51.607806 32487 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:51.607818 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.607825 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.607831 32487 net.cpp:165] Memory required for data: 1031681500\nI0821 08:26:51.607836 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:26:51.607846 32487 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:26:51.607852 32487 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:26:51.607864 32487 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:26:51.608379 32487 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:26:51.608394 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.608400 32487 net.cpp:165] Memory required for data: 1035777500\nI0821 08:26:51.608409 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:26:51.608419 32487 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:26:51.608425 32487 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:26:51.608438 32487 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:26:51.608708 32487 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:26:51.608722 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.608727 32487 net.cpp:165] Memory required for data: 1039873500\nI0821 08:26:51.608737 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:26:51.608749 32487 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:26:51.608757 32487 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:26:51.608764 32487 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.608824 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:26:51.608992 32487 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:26:51.609005 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.609011 32487 net.cpp:165] Memory required for data: 1043969500\nI0821 08:26:51.609020 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:26:51.609031 32487 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:26:51.609038 32487 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:26:51.609045 32487 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.609060 32487 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:26:51.609066 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.609071 32487 net.cpp:165] Memory required for data: 1048065500\nI0821 08:26:51.609076 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:26:51.609087 32487 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:26:51.609092 32487 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:26:51.609104 32487 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:26:51.609604 32487 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:26:51.609619 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.609624 32487 net.cpp:165] Memory required for data: 1052161500\nI0821 08:26:51.609633 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:26:51.609643 32487 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:26:51.609649 32487 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:26:51.609661 32487 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:26:51.609933 32487 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:26:51.609947 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.609952 32487 net.cpp:165] Memory required for data: 1056257500\nI0821 08:26:51.609962 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:26:51.609974 32487 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:26:51.609982 32487 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:26:51.609989 32487 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:26:51.610047 32487 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:26:51.610206 32487 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:26:51.610220 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.610225 32487 net.cpp:165] Memory required for data: 1060353500\nI0821 08:26:51.610234 32487 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:26:51.610246 32487 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:26:51.610260 32487 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:26:51.610267 32487 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:26:51.610275 32487 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:26:51.610308 32487 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:26:51.610325 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.610330 32487 net.cpp:165] Memory required for data: 1064449500\nI0821 08:26:51.610335 32487 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:26:51.610344 32487 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:26:51.610350 32487 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:26:51.610360 32487 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:26:51.610370 32487 net.cpp:150] Setting up L2_b5_relu\nI0821 08:26:51.610378 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.610383 32487 net.cpp:165] Memory required for data: 1068545500\nI0821 08:26:51.610388 32487 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:51.610394 32487 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:51.610400 32487 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:26:51.610407 32487 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:26:51.610417 32487 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:26:51.610468 32487 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:51.610481 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.610487 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.610492 32487 net.cpp:165] Memory required for data: 1076737500\nI0821 08:26:51.610497 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:26:51.610508 32487 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:26:51.610515 32487 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:26:51.610527 32487 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:26:51.611027 32487 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:26:51.611042 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.611047 32487 net.cpp:165] Memory required for data: 1080833500\nI0821 08:26:51.611055 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:26:51.611064 32487 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:26:51.611071 32487 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:26:51.611088 32487 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:26:51.611364 32487 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:26:51.611378 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.611383 32487 net.cpp:165] Memory required for data: 1084929500\nI0821 08:26:51.611393 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:26:51.611405 32487 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:26:51.611413 32487 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:26:51.611420 32487 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.611479 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:26:51.611640 32487 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:26:51.611654 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.611660 32487 net.cpp:165] Memory required for data: 1089025500\nI0821 08:26:51.611668 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:26:51.611680 32487 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:26:51.611687 32487 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:26:51.611696 32487 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.611706 32487 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:26:51.611712 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.611717 32487 net.cpp:165] Memory required for data: 1093121500\nI0821 08:26:51.611722 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:26:51.611737 32487 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:26:51.611743 32487 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:26:51.611754 32487 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:26:51.612252 32487 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:26:51.612272 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.612278 32487 net.cpp:165] Memory required for data: 1097217500\nI0821 08:26:51.612287 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:26:51.612296 32487 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:26:51.612303 32487 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:26:51.612314 32487 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:26:51.612587 32487 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:26:51.612601 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.612607 32487 net.cpp:165] Memory required for data: 1101313500\nI0821 08:26:51.612617 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:26:51.612628 32487 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:26:51.612635 32487 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:26:51.612643 32487 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:26:51.612704 32487 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:26:51.612864 32487 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:26:51.612879 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.612884 32487 net.cpp:165] Memory required for data: 1105409500\nI0821 08:26:51.612892 32487 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:26:51.612906 32487 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:26:51.612915 32487 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:26:51.612921 32487 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:26:51.612929 32487 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:26:51.612958 32487 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:26:51.612973 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.612978 32487 net.cpp:165] Memory required for data: 1109505500\nI0821 08:26:51.612984 32487 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:26:51.612992 32487 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:26:51.612998 32487 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:26:51.613004 32487 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:26:51.613014 32487 net.cpp:150] Setting up L2_b6_relu\nI0821 08:26:51.613021 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.613026 32487 net.cpp:165] Memory required for data: 1113601500\nI0821 08:26:51.613031 32487 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:51.613041 32487 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:51.613047 32487 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:26:51.613055 32487 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:26:51.613065 32487 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:26:51.613118 32487 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:51.613131 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.613137 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.613142 32487 net.cpp:165] Memory required for data: 1121793500\nI0821 08:26:51.613147 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:26:51.613158 32487 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:26:51.613165 32487 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:26:51.613178 32487 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:26:51.614678 32487 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:26:51.614696 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.614702 32487 net.cpp:165] Memory required for data: 1125889500\nI0821 08:26:51.614712 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:26:51.614733 32487 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:26:51.614740 32487 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:26:51.614749 32487 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:26:51.615017 32487 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:26:51.615031 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.615036 32487 net.cpp:165] Memory required for data: 1129985500\nI0821 08:26:51.615047 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:26:51.615056 32487 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:26:51.615063 32487 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:26:51.615070 32487 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.615137 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:26:51.615306 32487 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:26:51.615324 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.615329 32487 net.cpp:165] Memory required for data: 1134081500\nI0821 08:26:51.615339 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:26:51.615347 32487 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:26:51.615355 32487 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:26:51.615361 32487 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.615371 32487 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:26:51.615380 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.615383 32487 net.cpp:165] Memory required for data: 1138177500\nI0821 08:26:51.615388 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:26:51.615403 32487 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:26:51.615411 32487 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:26:51.615422 32487 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:26:51.615911 32487 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:26:51.615924 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.615931 32487 net.cpp:165] Memory required for data: 1142273500\nI0821 08:26:51.615939 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:26:51.615952 32487 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:26:51.615958 32487 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:26:51.615969 32487 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:26:51.616240 32487 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:26:51.616259 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.616266 32487 net.cpp:165] Memory required for data: 1146369500\nI0821 08:26:51.616276 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:26:51.616286 32487 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:26:51.616292 32487 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:26:51.616300 32487 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:26:51.616361 32487 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:26:51.616518 32487 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:26:51.616531 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.616536 32487 net.cpp:165] Memory required for data: 1150465500\nI0821 08:26:51.616545 32487 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:26:51.616559 32487 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:26:51.616565 32487 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:26:51.616572 32487 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:26:51.616580 32487 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:26:51.616610 32487 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:26:51.616619 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.616623 32487 net.cpp:165] Memory required for data: 1154561500\nI0821 08:26:51.616629 32487 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:26:51.616641 32487 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:26:51.616653 32487 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:26:51.616662 32487 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:26:51.616672 32487 net.cpp:150] Setting up L2_b7_relu\nI0821 08:26:51.616679 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.616684 32487 net.cpp:165] Memory required for data: 1158657500\nI0821 08:26:51.616689 32487 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:51.616696 32487 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:51.616703 32487 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:26:51.616709 32487 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:26:51.616719 32487 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:26:51.616773 32487 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:51.616786 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.616792 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.616797 32487 net.cpp:165] Memory required for data: 1166849500\nI0821 08:26:51.616802 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:26:51.616816 32487 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:26:51.616823 32487 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:26:51.616833 32487 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:26:51.617331 32487 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:26:51.617347 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.617352 32487 net.cpp:165] Memory required for data: 1170945500\nI0821 08:26:51.617360 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:26:51.617373 32487 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:26:51.617380 32487 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:26:51.617391 32487 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:26:51.617662 32487 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:26:51.617676 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.617681 32487 net.cpp:165] Memory required for data: 1175041500\nI0821 08:26:51.617691 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:26:51.617700 32487 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:26:51.617707 32487 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:26:51.617714 32487 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.617775 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:26:51.617933 32487 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:26:51.617949 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.617955 32487 net.cpp:165] Memory required for data: 1179137500\nI0821 08:26:51.617964 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:26:51.617972 32487 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:26:51.617979 32487 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:26:51.617986 32487 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.617996 32487 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:26:51.618003 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.618008 32487 net.cpp:165] Memory required for data: 1183233500\nI0821 08:26:51.618012 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:26:51.618026 32487 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:26:51.618032 32487 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:26:51.618044 32487 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:26:51.618546 32487 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:26:51.618561 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.618566 32487 net.cpp:165] Memory required for data: 1187329500\nI0821 08:26:51.618574 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:26:51.618594 32487 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:26:51.618602 32487 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:26:51.618613 32487 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:26:51.618891 32487 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:26:51.618904 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.618909 32487 net.cpp:165] Memory required for data: 1191425500\nI0821 08:26:51.618921 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:26:51.618929 32487 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:26:51.618937 32487 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:26:51.618943 32487 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:26:51.619005 32487 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:26:51.619163 32487 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:26:51.619176 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.619181 32487 net.cpp:165] Memory required for data: 1195521500\nI0821 08:26:51.619191 32487 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:26:51.619204 32487 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:26:51.619210 32487 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:26:51.619217 32487 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:26:51.619225 32487 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:26:51.619259 32487 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:26:51.619271 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.619276 32487 net.cpp:165] Memory required for data: 1199617500\nI0821 08:26:51.619282 32487 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:26:51.619292 32487 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:26:51.619299 32487 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:26:51.619307 32487 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:26:51.619318 32487 net.cpp:150] Setting up L2_b8_relu\nI0821 08:26:51.619324 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.619329 32487 net.cpp:165] Memory required for data: 1203713500\nI0821 08:26:51.619333 32487 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:51.619341 32487 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:51.619346 32487 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:26:51.619354 32487 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:26:51.619379 32487 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:26:51.619434 32487 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:51.619447 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.619455 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.619459 32487 net.cpp:165] Memory required for data: 1211905500\nI0821 08:26:51.619465 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:26:51.619479 32487 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:26:51.619487 32487 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:26:51.619498 32487 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:26:51.620000 32487 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:26:51.620014 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.620019 32487 net.cpp:165] Memory required for data: 1216001500\nI0821 08:26:51.620028 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:26:51.620041 32487 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:26:51.620048 32487 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:26:51.620059 32487 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:26:51.620342 32487 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:26:51.620363 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.620369 32487 net.cpp:165] Memory required for data: 1220097500\nI0821 08:26:51.620380 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:26:51.620389 32487 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:26:51.620395 32487 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:26:51.620404 32487 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.620471 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:26:51.620633 32487 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:26:51.620646 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.620652 32487 net.cpp:165] Memory required for data: 1224193500\nI0821 08:26:51.620661 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:26:51.620669 32487 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:26:51.620676 32487 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:26:51.620687 32487 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.620697 32487 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:26:51.620705 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.620709 32487 net.cpp:165] Memory required for data: 1228289500\nI0821 08:26:51.620714 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:26:51.620731 32487 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:26:51.620738 32487 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:26:51.620746 32487 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:26:51.622237 32487 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:26:51.622262 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.622267 32487 net.cpp:165] Memory required for data: 1232385500\nI0821 08:26:51.622277 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:26:51.622287 32487 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:26:51.622298 32487 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:26:51.622308 32487 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:26:51.622575 32487 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:26:51.622589 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.622594 32487 net.cpp:165] Memory required for data: 1236481500\nI0821 08:26:51.622648 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:26:51.622660 32487 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:26:51.622668 32487 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:26:51.622674 32487 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:26:51.622738 32487 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:26:51.622898 32487 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:26:51.622912 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.622917 32487 net.cpp:165] Memory required for data: 1240577500\nI0821 08:26:51.622926 32487 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:26:51.622939 32487 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:26:51.622947 32487 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:26:51.622954 32487 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:26:51.622964 32487 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:26:51.622993 32487 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:26:51.623003 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.623008 32487 net.cpp:165] Memory required for data: 1244673500\nI0821 08:26:51.623013 32487 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:26:51.623021 32487 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:26:51.623028 32487 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:26:51.623037 32487 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:26:51.623049 32487 net.cpp:150] Setting up L2_b9_relu\nI0821 08:26:51.623055 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.623069 32487 net.cpp:165] Memory required for data: 1248769500\nI0821 08:26:51.623075 32487 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:51.623082 32487 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:51.623087 32487 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:26:51.623098 32487 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:26:51.623109 32487 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:26:51.623162 32487 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:51.623174 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.623181 32487 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:51.623185 32487 net.cpp:165] Memory required for data: 1256961500\nI0821 08:26:51.623191 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:26:51.623203 32487 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:26:51.623209 32487 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:26:51.623221 32487 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:26:51.623735 32487 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:26:51.623750 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.623756 32487 net.cpp:165] Memory required for data: 1257985500\nI0821 08:26:51.623765 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:26:51.623778 32487 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:26:51.623785 32487 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:26:51.623795 32487 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:26:51.624074 32487 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:26:51.624089 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.624094 32487 net.cpp:165] Memory required for data: 1259009500\nI0821 08:26:51.624104 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:26:51.624114 32487 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:26:51.624119 32487 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:26:51.624127 32487 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.624188 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:26:51.624366 32487 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:26:51.624387 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.624392 32487 net.cpp:165] Memory required for data: 1260033500\nI0821 08:26:51.624402 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:26:51.624410 32487 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:26:51.624418 32487 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:26:51.624424 32487 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:26:51.624434 32487 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:26:51.624441 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.624446 32487 net.cpp:165] Memory required for data: 1261057500\nI0821 08:26:51.624452 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:26:51.624466 32487 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:26:51.624472 32487 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:26:51.624485 32487 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:26:51.624984 32487 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:26:51.624997 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.625003 32487 net.cpp:165] Memory required for data: 1262081500\nI0821 08:26:51.625012 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:26:51.625025 32487 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:26:51.625031 32487 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:26:51.625039 32487 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:26:51.625324 32487 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:26:51.625344 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.625349 32487 net.cpp:165] Memory required for data: 1263105500\nI0821 08:26:51.625360 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:26:51.625372 32487 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:26:51.625380 32487 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:26:51.625387 32487 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:26:51.625447 32487 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:26:51.625617 32487 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:26:51.625630 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.625636 32487 net.cpp:165] Memory required for data: 1264129500\nI0821 08:26:51.625645 32487 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:26:51.625658 32487 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:26:51.625665 32487 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:26:51.625674 32487 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:26:51.625712 32487 net.cpp:150] Setting up L3_b1_pool\nI0821 08:26:51.625722 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.625727 32487 net.cpp:165] Memory required for data: 1265153500\nI0821 08:26:51.625732 32487 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:26:51.625744 32487 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:26:51.625751 32487 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:26:51.625757 32487 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:26:51.625766 32487 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:26:51.625798 32487 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:26:51.625808 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.625813 32487 net.cpp:165] Memory required for data: 1266177500\nI0821 08:26:51.625818 32487 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:26:51.625826 32487 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:26:51.625833 32487 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:26:51.625844 32487 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:26:51.625854 32487 net.cpp:150] Setting up L3_b1_relu\nI0821 08:26:51.625861 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.625866 32487 net.cpp:165] Memory required for data: 1267201500\nI0821 08:26:51.625871 32487 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:26:51.625880 32487 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:26:51.625887 32487 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:26:51.627131 32487 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:26:51.627149 32487 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:51.627156 32487 net.cpp:165] Memory required for data: 1268225500\nI0821 08:26:51.627161 32487 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:26:51.627171 32487 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:26:51.627177 32487 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:26:51.627188 32487 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:26:51.627197 32487 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:26:51.627243 32487 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:26:51.627262 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.627269 32487 net.cpp:165] Memory required for data: 1270273500\nI0821 08:26:51.627274 32487 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:51.627281 32487 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:51.627287 32487 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:26:51.627298 32487 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:26:51.627310 32487 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:26:51.627363 32487 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:51.627379 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.627394 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.627399 32487 net.cpp:165] Memory required for data: 1274369500\nI0821 08:26:51.627405 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:26:51.627418 32487 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:26:51.627424 32487 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:26:51.627434 32487 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:26:51.628489 32487 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:26:51.628504 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.628510 32487 net.cpp:165] Memory required for data: 1276417500\nI0821 08:26:51.628520 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:26:51.628532 32487 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:26:51.628540 32487 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:26:51.628548 32487 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:26:51.628824 32487 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:26:51.628839 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.628844 32487 net.cpp:165] Memory required for data: 1278465500\nI0821 08:26:51.628854 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:26:51.628866 32487 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:26:51.628873 32487 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:26:51.628881 32487 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.628944 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:26:51.629109 32487 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:26:51.629122 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.629127 32487 net.cpp:165] Memory required for data: 1280513500\nI0821 08:26:51.629137 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:26:51.629146 32487 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:26:51.629153 32487 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:26:51.629163 32487 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:26:51.629173 32487 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:26:51.629181 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.629185 32487 net.cpp:165] Memory required for data: 1282561500\nI0821 08:26:51.629191 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:26:51.629204 32487 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:26:51.629209 32487 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:26:51.629225 32487 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:26:51.630281 32487 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:26:51.630296 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.630301 32487 net.cpp:165] Memory required for data: 1284609500\nI0821 08:26:51.630311 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:26:51.630323 32487 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:26:51.630331 32487 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:26:51.630338 32487 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:26:51.630611 32487 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:26:51.630625 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.630630 32487 net.cpp:165] Memory required for data: 1286657500\nI0821 08:26:51.630640 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:26:51.630650 32487 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:26:51.630656 32487 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:26:51.630664 32487 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:26:51.630728 32487 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:26:51.630888 32487 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:26:51.630905 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.630910 32487 net.cpp:165] Memory required for data: 1288705500\nI0821 08:26:51.630928 32487 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:26:51.630937 32487 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:26:51.630944 32487 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:26:51.630951 32487 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:26:51.630959 32487 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:26:51.630996 32487 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:26:51.631007 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.631012 32487 net.cpp:165] Memory required for data: 1290753500\nI0821 08:26:51.631017 32487 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:26:51.631028 32487 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:26:51.631034 32487 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:26:51.631042 32487 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:26:51.631052 32487 net.cpp:150] Setting up L3_b2_relu\nI0821 08:26:51.631059 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.631063 32487 net.cpp:165] Memory required for data: 1292801500\nI0821 08:26:51.631068 32487 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:51.631078 32487 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:51.631085 32487 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:26:51.631093 32487 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:26:51.631103 32487 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:26:51.631150 32487 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:51.631165 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.631172 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.631177 32487 net.cpp:165] Memory required for data: 1296897500\nI0821 08:26:51.631182 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:26:51.631193 32487 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:26:51.631201 32487 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:26:51.631209 32487 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:26:51.632266 32487 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:26:51.632282 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.632287 32487 net.cpp:165] Memory required for data: 1298945500\nI0821 08:26:51.632297 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:26:51.632309 32487 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:26:51.632316 32487 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:26:51.632324 32487 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:26:51.632602 32487 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:26:51.632616 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.632621 32487 net.cpp:165] Memory required for data: 1300993500\nI0821 08:26:51.632632 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:26:51.632642 32487 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:26:51.632649 32487 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:26:51.632660 32487 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.632722 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:26:51.632889 32487 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:26:51.632902 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.632907 32487 net.cpp:165] Memory required for data: 1303041500\nI0821 08:26:51.632916 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:26:51.632925 32487 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:26:51.632931 32487 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:26:51.632941 32487 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:26:51.632952 32487 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:26:51.632967 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.632972 32487 net.cpp:165] Memory required for data: 1305089500\nI0821 08:26:51.632977 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:26:51.632988 32487 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:26:51.632994 32487 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:26:51.633007 32487 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:26:51.634053 32487 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:26:51.634068 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.634073 32487 net.cpp:165] Memory required for data: 1307137500\nI0821 08:26:51.634083 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:26:51.634095 32487 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:26:51.634104 32487 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:26:51.634111 32487 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:26:51.634392 32487 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:26:51.634407 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.634412 32487 net.cpp:165] Memory required for data: 1309185500\nI0821 08:26:51.634423 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:26:51.634431 32487 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:26:51.634438 32487 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:26:51.634445 32487 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:26:51.634511 32487 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:26:51.634677 32487 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:26:51.634690 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.634696 32487 net.cpp:165] Memory required for data: 1311233500\nI0821 08:26:51.634704 32487 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:26:51.634713 32487 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:26:51.634721 32487 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:26:51.634727 32487 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:26:51.634738 32487 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:26:51.634773 32487 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:26:51.634788 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.634793 32487 net.cpp:165] Memory required for data: 1313281500\nI0821 08:26:51.634799 32487 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:26:51.634806 32487 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:26:51.634812 32487 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:26:51.634819 32487 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:26:51.634829 32487 net.cpp:150] Setting up L3_b3_relu\nI0821 08:26:51.634837 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.634841 32487 net.cpp:165] Memory required for data: 1315329500\nI0821 08:26:51.634846 32487 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:51.634856 32487 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:51.634862 32487 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:26:51.634871 32487 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:26:51.634879 32487 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:26:51.634927 32487 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:51.634943 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.634949 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.634954 32487 net.cpp:165] Memory required for data: 1319425500\nI0821 08:26:51.634959 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:26:51.634970 32487 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:26:51.634977 32487 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:26:51.634994 32487 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:26:51.636045 32487 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:26:51.636063 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.636070 32487 net.cpp:165] Memory required for data: 1321473500\nI0821 08:26:51.636078 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:26:51.636087 32487 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:26:51.636095 32487 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:26:51.636106 32487 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:26:51.636389 32487 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:26:51.636402 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.636407 32487 net.cpp:165] Memory required for data: 1323521500\nI0821 08:26:51.636417 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:26:51.636430 32487 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:26:51.636437 32487 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:26:51.636445 32487 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.636505 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:26:51.636670 32487 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:26:51.636684 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.636689 32487 net.cpp:165] Memory required for data: 1325569500\nI0821 08:26:51.636699 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:26:51.636708 32487 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:26:51.636713 32487 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:26:51.636724 32487 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:26:51.636735 32487 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:26:51.636744 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.636747 32487 net.cpp:165] Memory required for data: 1327617500\nI0821 08:26:51.636752 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:26:51.636770 32487 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:26:51.636776 32487 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:26:51.636785 32487 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:26:51.638803 32487 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:26:51.638820 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.638826 32487 net.cpp:165] Memory required for data: 1329665500\nI0821 08:26:51.638836 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:26:51.638849 32487 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:26:51.638857 32487 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:26:51.638866 32487 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:26:51.639147 32487 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:26:51.639160 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.639166 32487 net.cpp:165] Memory required for data: 1331713500\nI0821 08:26:51.639176 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:26:51.639189 32487 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:26:51.639195 32487 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:26:51.639206 32487 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:26:51.639276 32487 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:26:51.639448 32487 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:26:51.639462 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.639467 32487 net.cpp:165] Memory required for data: 1333761500\nI0821 08:26:51.639477 32487 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:26:51.639487 32487 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:26:51.639493 32487 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:26:51.639500 32487 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:26:51.639511 32487 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:26:51.639555 32487 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:26:51.639565 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.639570 32487 net.cpp:165] Memory required for data: 1335809500\nI0821 08:26:51.639576 32487 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:26:51.639590 32487 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:26:51.639597 32487 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:26:51.639605 32487 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:26:51.639616 32487 net.cpp:150] Setting up L3_b4_relu\nI0821 08:26:51.639622 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.639627 32487 net.cpp:165] Memory required for data: 1337857500\nI0821 08:26:51.639632 32487 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:51.639639 32487 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:51.639645 32487 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:26:51.639652 32487 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:26:51.639662 32487 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:26:51.639714 32487 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:51.639726 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.639734 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.639739 32487 net.cpp:165] Memory required for data: 1341953500\nI0821 08:26:51.639744 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:26:51.639758 32487 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:26:51.639765 32487 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:26:51.639775 32487 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:26:51.640806 32487 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:26:51.640821 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.640826 32487 net.cpp:165] Memory required for data: 1344001500\nI0821 08:26:51.640836 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:26:51.640849 32487 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:26:51.640856 32487 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:26:51.640869 32487 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:26:51.641142 32487 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:26:51.641155 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.641160 32487 net.cpp:165] Memory required for data: 1346049500\nI0821 08:26:51.641171 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:26:51.641180 32487 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:26:51.641187 32487 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:26:51.641198 32487 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.641265 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:26:51.641430 32487 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:26:51.641444 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.641449 32487 net.cpp:165] Memory required for data: 1348097500\nI0821 08:26:51.641458 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:26:51.641468 32487 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:26:51.641474 32487 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:26:51.641485 32487 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:26:51.641496 32487 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:26:51.641505 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.641510 32487 net.cpp:165] Memory required for data: 1350145500\nI0821 08:26:51.641515 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:26:51.641528 32487 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:26:51.641535 32487 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:26:51.641553 32487 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:26:51.642590 32487 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:26:51.642604 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.642611 32487 net.cpp:165] Memory required for data: 1352193500\nI0821 08:26:51.642619 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:26:51.642628 32487 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:26:51.642635 32487 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:26:51.642648 32487 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:26:51.642926 32487 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:26:51.642940 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.642946 32487 net.cpp:165] Memory required for data: 1354241500\nI0821 08:26:51.642956 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:26:51.642968 32487 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:26:51.642976 32487 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:26:51.642983 32487 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:26:51.643043 32487 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:26:51.643208 32487 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:26:51.643220 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.643225 32487 net.cpp:165] Memory required for data: 1356289500\nI0821 08:26:51.643234 32487 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:26:51.643244 32487 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:26:51.643250 32487 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:26:51.643265 32487 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:26:51.643277 32487 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:26:51.643312 32487 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:26:51.643323 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.643328 32487 net.cpp:165] Memory required for data: 1358337500\nI0821 08:26:51.643334 32487 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:26:51.643345 32487 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:26:51.643352 32487 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:26:51.643359 32487 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:26:51.643369 32487 net.cpp:150] Setting up L3_b5_relu\nI0821 08:26:51.643378 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.643381 32487 net.cpp:165] Memory required for data: 1360385500\nI0821 08:26:51.643391 32487 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:51.643399 32487 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:51.643404 32487 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:26:51.643411 32487 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:26:51.643421 32487 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:26:51.643472 32487 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:51.643484 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.643491 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.643496 32487 net.cpp:165] Memory required for data: 1364481500\nI0821 08:26:51.643501 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:26:51.643515 32487 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:26:51.643522 32487 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:26:51.643532 32487 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:26:51.644567 32487 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:26:51.644582 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.644588 32487 net.cpp:165] Memory required for data: 1366529500\nI0821 08:26:51.644604 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:26:51.644618 32487 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:26:51.644624 32487 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:26:51.644635 32487 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:26:51.644912 32487 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:26:51.644927 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.644932 32487 net.cpp:165] Memory required for data: 1368577500\nI0821 08:26:51.644942 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:26:51.644951 32487 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:26:51.644958 32487 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:26:51.644968 32487 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.645032 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:26:51.645202 32487 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:26:51.645215 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.645221 32487 net.cpp:165] Memory required for data: 1370625500\nI0821 08:26:51.645231 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:26:51.645242 32487 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:26:51.645249 32487 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:26:51.645262 32487 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:26:51.645273 32487 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:26:51.645282 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.645287 32487 net.cpp:165] Memory required for data: 1372673500\nI0821 08:26:51.645292 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:26:51.645306 32487 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:26:51.645313 32487 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:26:51.645324 32487 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:26:51.646361 32487 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:26:51.646378 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.646383 32487 net.cpp:165] Memory required for data: 1374721500\nI0821 08:26:51.646391 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:26:51.646400 32487 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:26:51.646407 32487 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:26:51.646419 32487 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:26:51.646695 32487 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:26:51.646711 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.646718 32487 net.cpp:165] Memory required for data: 1376769500\nI0821 08:26:51.646728 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:26:51.646736 32487 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:26:51.646744 32487 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:26:51.646751 32487 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:26:51.646811 32487 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:26:51.646975 32487 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:26:51.646988 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.646993 32487 net.cpp:165] Memory required for data: 1378817500\nI0821 08:26:51.647002 32487 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:26:51.647016 32487 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:26:51.647022 32487 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:26:51.647029 32487 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:26:51.647037 32487 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:26:51.647076 32487 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:26:51.647089 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.647094 32487 net.cpp:165] Memory required for data: 1380865500\nI0821 08:26:51.647099 32487 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:26:51.647107 32487 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:26:51.647120 32487 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:26:51.647128 32487 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:26:51.647140 32487 net.cpp:150] Setting up L3_b6_relu\nI0821 08:26:51.647146 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.647151 32487 net.cpp:165] Memory required for data: 1382913500\nI0821 08:26:51.647156 32487 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:51.647163 32487 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:51.647168 32487 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:26:51.647182 32487 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:26:51.647193 32487 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:26:51.647244 32487 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:51.647260 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.647269 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.647274 32487 net.cpp:165] Memory required for data: 1387009500\nI0821 08:26:51.647279 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:26:51.647295 32487 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:26:51.647302 32487 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:26:51.647312 32487 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:26:51.648355 32487 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:26:51.648371 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.648376 32487 net.cpp:165] Memory required for data: 1389057500\nI0821 08:26:51.648386 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:26:51.648398 32487 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:26:51.648406 32487 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:26:51.648414 32487 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:26:51.648689 32487 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:26:51.648701 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.648706 32487 net.cpp:165] Memory required for data: 1391105500\nI0821 08:26:51.648717 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:26:51.648730 32487 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:26:51.648736 32487 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:26:51.648746 32487 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.648808 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:26:51.648972 32487 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:26:51.648985 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.648990 32487 net.cpp:165] Memory required for data: 1393153500\nI0821 08:26:51.648999 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:26:51.649036 32487 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:26:51.649045 32487 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:26:51.649055 32487 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:26:51.649065 32487 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:26:51.649072 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.649077 32487 net.cpp:165] Memory required for data: 1395201500\nI0821 08:26:51.649082 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:26:51.649093 32487 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:26:51.649099 32487 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:26:51.649108 32487 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:26:51.650151 32487 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:26:51.650164 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.650171 32487 net.cpp:165] Memory required for data: 1397249500\nI0821 08:26:51.650179 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:26:51.650199 32487 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:26:51.650207 32487 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:26:51.650221 32487 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:26:51.650506 32487 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:26:51.650519 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.650524 32487 net.cpp:165] Memory required for data: 1399297500\nI0821 08:26:51.650535 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:26:51.650544 32487 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:26:51.650550 32487 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:26:51.650562 32487 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:26:51.650624 32487 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:26:51.650791 32487 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:26:51.650804 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.650810 32487 net.cpp:165] Memory required for data: 1401345500\nI0821 08:26:51.650818 32487 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:26:51.650831 32487 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:26:51.650838 32487 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:26:51.650846 32487 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:26:51.650854 32487 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:26:51.650892 32487 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:26:51.650904 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.650909 32487 net.cpp:165] Memory required for data: 1403393500\nI0821 08:26:51.650914 32487 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:26:51.650923 32487 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:26:51.650928 32487 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:26:51.650938 32487 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:26:51.650949 32487 net.cpp:150] Setting up L3_b7_relu\nI0821 08:26:51.650956 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.650961 32487 net.cpp:165] Memory required for data: 1405441500\nI0821 08:26:51.650966 32487 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:51.650974 32487 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:51.650979 32487 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:26:51.650987 32487 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:26:51.650997 32487 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:26:51.651049 32487 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:51.651062 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.651068 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.651072 32487 net.cpp:165] Memory required for data: 1409537500\nI0821 08:26:51.651078 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:26:51.651089 32487 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:26:51.651096 32487 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:26:51.651108 32487 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:26:51.653126 32487 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:26:51.653146 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.653151 32487 net.cpp:165] Memory required for data: 1411585500\nI0821 08:26:51.653161 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:26:51.653175 32487 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:26:51.653183 32487 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:26:51.653192 32487 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:26:51.653482 32487 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:26:51.653504 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.653511 32487 net.cpp:165] Memory required for data: 1413633500\nI0821 08:26:51.653522 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:26:51.653534 32487 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:26:51.653542 32487 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:26:51.653549 32487 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.653616 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:26:51.653786 32487 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:26:51.653800 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.653805 32487 net.cpp:165] Memory required for data: 1415681500\nI0821 08:26:51.653815 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:26:51.653826 32487 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:26:51.653833 32487 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:26:51.653841 32487 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:26:51.653854 32487 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:26:51.653861 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.653867 32487 net.cpp:165] Memory required for data: 1417729500\nI0821 08:26:51.653872 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:26:51.653883 32487 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:26:51.653889 32487 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:26:51.653901 32487 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:26:51.654930 32487 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:26:51.654947 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.654952 32487 net.cpp:165] Memory required for data: 1419777500\nI0821 08:26:51.654960 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:26:51.654969 32487 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:26:51.654976 32487 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:26:51.654989 32487 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:26:51.655277 32487 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:26:51.655292 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.655297 32487 net.cpp:165] Memory required for data: 1421825500\nI0821 08:26:51.655308 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:26:51.655315 32487 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:26:51.655323 32487 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:26:51.655330 32487 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:26:51.655395 32487 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:26:51.655556 32487 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:26:51.655572 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.655578 32487 net.cpp:165] Memory required for data: 1423873500\nI0821 08:26:51.655587 32487 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:26:51.655597 32487 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:26:51.655603 32487 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:26:51.655611 32487 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:26:51.655619 32487 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:26:51.655656 32487 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:26:51.655669 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.655674 32487 net.cpp:165] Memory required for data: 1425921500\nI0821 08:26:51.655679 32487 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:26:51.655688 32487 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:26:51.655694 32487 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:26:51.655700 32487 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:26:51.655710 32487 net.cpp:150] Setting up L3_b8_relu\nI0821 08:26:51.655717 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.655722 32487 net.cpp:165] Memory required for data: 1427969500\nI0821 08:26:51.655735 32487 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:51.655742 32487 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:51.655748 32487 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:26:51.655760 32487 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:26:51.655771 32487 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:26:51.655818 32487 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:51.655835 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.655843 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.655848 32487 net.cpp:165] Memory required for data: 1432065500\nI0821 08:26:51.655853 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:26:51.655864 32487 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:26:51.655870 32487 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:26:51.655880 32487 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:26:51.656908 32487 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:26:51.656924 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.656929 32487 net.cpp:165] Memory required for data: 1434113500\nI0821 08:26:51.656937 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:26:51.656950 32487 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:26:51.656957 32487 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:26:51.656966 32487 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:26:51.657248 32487 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:26:51.657266 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.657271 32487 net.cpp:165] Memory required for data: 1436161500\nI0821 08:26:51.657282 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:26:51.657294 32487 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:26:51.657301 32487 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:26:51.657310 32487 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.657377 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:26:51.657547 32487 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:26:51.657560 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.657567 32487 net.cpp:165] Memory required for data: 1438209500\nI0821 08:26:51.657575 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:26:51.657586 32487 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:26:51.657593 32487 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:26:51.657604 32487 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:26:51.657615 32487 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:26:51.657622 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.657627 32487 net.cpp:165] Memory required for data: 1440257500\nI0821 08:26:51.657632 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:26:51.657644 32487 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:26:51.657649 32487 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:26:51.657660 32487 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:26:51.658694 32487 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:26:51.658710 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.658715 32487 net.cpp:165] Memory required for data: 1442305500\nI0821 08:26:51.658723 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:26:51.658735 32487 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:26:51.658743 32487 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:26:51.658752 32487 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:26:51.659029 32487 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:26:51.659044 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.659055 32487 net.cpp:165] Memory required for data: 1444353500\nI0821 08:26:51.659066 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:26:51.659076 32487 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:26:51.659082 32487 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:26:51.659090 32487 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:26:51.659154 32487 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:26:51.659325 32487 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:26:51.659343 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.659348 32487 net.cpp:165] Memory required for data: 1446401500\nI0821 08:26:51.659358 32487 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:26:51.659368 32487 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:26:51.659374 32487 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:26:51.659381 32487 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:26:51.659389 32487 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:26:51.659427 32487 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:26:51.659440 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.659445 32487 net.cpp:165] Memory required for data: 1448449500\nI0821 08:26:51.659449 32487 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:26:51.659457 32487 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:26:51.659463 32487 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:26:51.659471 32487 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:26:51.659481 32487 net.cpp:150] Setting up L3_b9_relu\nI0821 08:26:51.659487 32487 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:51.659492 32487 net.cpp:165] Memory required for data: 1450497500\nI0821 08:26:51.659497 32487 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:26:51.659508 32487 net.cpp:100] Creating Layer post_pool\nI0821 08:26:51.659514 32487 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 08:26:51.659523 32487 net.cpp:408] post_pool -> post_pool\nI0821 08:26:51.659559 32487 net.cpp:150] Setting up post_pool\nI0821 08:26:51.659570 32487 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 08:26:51.659575 32487 net.cpp:165] Memory required for data: 1450529500\nI0821 08:26:51.659580 32487 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:26:51.659592 32487 net.cpp:100] Creating Layer post_FC\nI0821 08:26:51.659598 32487 net.cpp:434] post_FC <- post_pool\nI0821 08:26:51.659610 32487 net.cpp:408] post_FC -> post_FC_top\nI0821 08:26:51.659783 32487 net.cpp:150] Setting up post_FC\nI0821 08:26:51.659797 32487 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:51.659802 32487 net.cpp:165] Memory required for data: 1450534500\nI0821 08:26:51.659811 32487 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:26:51.659823 32487 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:26:51.659831 32487 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:26:51.659844 32487 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:26:51.659855 32487 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:26:51.659906 32487 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:26:51.659919 32487 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:51.659925 32487 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:51.659930 32487 net.cpp:165] Memory required for data: 1450544500\nI0821 08:26:51.659935 32487 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:26:51.659943 32487 net.cpp:100] Creating Layer accuracy\nI0821 08:26:51.659950 32487 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:26:51.659957 32487 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:26:51.659965 32487 net.cpp:408] accuracy -> accuracy\nI0821 08:26:51.659977 32487 net.cpp:150] Setting up accuracy\nI0821 08:26:51.659984 32487 net.cpp:157] Top shape: (1)\nI0821 08:26:51.659996 32487 net.cpp:165] Memory required for data: 1450544504\nI0821 08:26:51.660002 32487 layer_factory.hpp:77] Creating layer loss\nI0821 08:26:51.660014 32487 net.cpp:100] Creating Layer loss\nI0821 08:26:51.660022 32487 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:26:51.660028 32487 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:26:51.660035 32487 net.cpp:408] loss -> loss\nI0821 08:26:51.660048 32487 layer_factory.hpp:77] Creating layer loss\nI0821 08:26:51.660176 32487 net.cpp:150] Setting up loss\nI0821 08:26:51.660192 32487 net.cpp:157] Top shape: (1)\nI0821 08:26:51.660197 32487 net.cpp:160]     with loss weight 1\nI0821 08:26:51.660213 32487 net.cpp:165] Memory required for data: 1450544508\nI0821 08:26:51.660220 32487 net.cpp:226] loss needs backward computation.\nI0821 08:26:51.660226 32487 net.cpp:228] accuracy does not need backward computation.\nI0821 08:26:51.660233 32487 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:26:51.660238 32487 net.cpp:226] post_FC needs backward computation.\nI0821 08:26:51.660243 32487 net.cpp:226] post_pool needs backward computation.\nI0821 08:26:51.660248 32487 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:26:51.660259 32487 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:26:51.660264 32487 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:26:51.660270 32487 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:26:51.660275 32487 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:26:51.660280 32487 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:26:51.660285 32487 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:26:51.660290 32487 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:26:51.660295 32487 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:26:51.660300 32487 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:26:51.660305 32487 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:26:51.660310 32487 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:26:51.660316 32487 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:26:51.660321 32487 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:26:51.660326 32487 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:26:51.660331 32487 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:26:51.660336 32487 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:26:51.660341 32487 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:26:51.660346 32487 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:26:51.660352 32487 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:26:51.660357 32487 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:26:51.660362 32487 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:26:51.660368 32487 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:26:51.660373 32487 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:26:51.660378 32487 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:26:51.660383 32487 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:26:51.660388 32487 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:26:51.660393 32487 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:26:51.660398 32487 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:26:51.660403 32487 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:26:51.660408 32487 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:26:51.660413 32487 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:26:51.660419 32487 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:26:51.660424 32487 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:26:51.660436 32487 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:26:51.660442 32487 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:26:51.660447 32487 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:26:51.660452 32487 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:26:51.660459 32487 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:26:51.660464 32487 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:26:51.660470 32487 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:26:51.660475 32487 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:26:51.660480 32487 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:26:51.660485 32487 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:26:51.660490 32487 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:26:51.660495 32487 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:26:51.660501 32487 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:26:51.660506 32487 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:26:51.660511 32487 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:26:51.660521 32487 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:26:51.660526 32487 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:26:51.660531 32487 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:26:51.660537 32487 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:26:51.660542 32487 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:26:51.660547 32487 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:26:51.660553 32487 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:26:51.660558 32487 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:26:51.660563 32487 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:26:51.660568 32487 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:26:51.660574 32487 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:26:51.660579 32487 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:26:51.660584 32487 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:26:51.660590 32487 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:26:51.660595 32487 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:26:51.660600 32487 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:26:51.660606 32487 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:26:51.660611 32487 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:26:51.660616 32487 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:26:51.660621 32487 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:26:51.660627 32487 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:26:51.660632 32487 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:26:51.660639 32487 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:26:51.660643 32487 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:26:51.660650 32487 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:26:51.660655 32487 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:26:51.660660 32487 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:26:51.660665 32487 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:26:51.660670 32487 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:26:51.660676 32487 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:26:51.660681 32487 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:26:51.660686 32487 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:26:51.660698 32487 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:26:51.660703 32487 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:26:51.660709 32487 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:26:51.660715 32487 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:26:51.660720 32487 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:26:51.660725 32487 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:26:51.660732 32487 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:26:51.660737 32487 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:26:51.660742 32487 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:26:51.660748 32487 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:26:51.660753 32487 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:26:51.660758 32487 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:26:51.660763 32487 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:26:51.660768 32487 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:26:51.660774 32487 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:26:51.660779 32487 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:26:51.660784 32487 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:26:51.660790 32487 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:26:51.660795 32487 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:26:51.660800 32487 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:26:51.660806 32487 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:26:51.660812 32487 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:26:51.660817 32487 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:26:51.660823 32487 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:26:51.660830 32487 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:26:51.660835 32487 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:26:51.660840 32487 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:26:51.660845 32487 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:26:51.660851 32487 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:26:51.660856 32487 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:26:51.660861 32487 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:26:51.660866 32487 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:26:51.660872 32487 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:26:51.660877 32487 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:26:51.660888 32487 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:26:51.660894 32487 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:26:51.660900 32487 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:26:51.660905 32487 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:26:51.660912 32487 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:26:51.660917 32487 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:26:51.660922 32487 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:26:51.660928 32487 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:26:51.660933 32487 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:26:51.660938 32487 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:26:51.660944 32487 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:26:51.660949 32487 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:26:51.660955 32487 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:26:51.660965 32487 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:26:51.660971 32487 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:26:51.660977 32487 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:26:51.660982 32487 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:26:51.660989 32487 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:26:51.660995 32487 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:26:51.661000 32487 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:26:51.661005 32487 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:26:51.661010 32487 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:26:51.661016 32487 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:26:51.661022 32487 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:26:51.661027 32487 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:26:51.661032 32487 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:26:51.661037 32487 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:26:51.661043 32487 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:26:51.661049 32487 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:26:51.661054 32487 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:26:51.661061 32487 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:26:51.661065 32487 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:26:51.661072 32487 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:26:51.661077 32487 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:26:51.661082 32487 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:26:51.661087 32487 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:26:51.661093 32487 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:26:51.661099 32487 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:26:51.661104 32487 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:26:51.661110 32487 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:26:51.661116 32487 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:26:51.661121 32487 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:26:51.661128 32487 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:26:51.661134 32487 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:26:51.661139 32487 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:26:51.661144 32487 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:26:51.661149 32487 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:26:51.661155 32487 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:26:51.661161 32487 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:26:51.661166 32487 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:26:51.661172 32487 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:26:51.661178 32487 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:26:51.661183 32487 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:26:51.661190 32487 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:26:51.661195 32487 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:26:51.661201 32487 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:26:51.661206 32487 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:26:51.661211 32487 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:26:51.661217 32487 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:26:51.661223 32487 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:26:51.661233 32487 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:26:51.661239 32487 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:26:51.661245 32487 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:26:51.661252 32487 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:26:51.661262 32487 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:26:51.661269 32487 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:26:51.661275 32487 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:26:51.661280 32487 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:26:51.661286 32487 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:26:51.661291 32487 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:26:51.661298 32487 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:26:51.661303 32487 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:26:51.661309 32487 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:26:51.661314 32487 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:26:51.661320 32487 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:26:51.661326 32487 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:26:51.661331 32487 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:26:51.661336 32487 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:26:51.661342 32487 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:26:51.661347 32487 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:26:51.661353 32487 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:26:51.661360 32487 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:26:51.661365 32487 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:26:51.661370 32487 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:26:51.661376 32487 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:26:51.661382 32487 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:26:51.661388 32487 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:26:51.661393 32487 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:26:51.661399 32487 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:26:51.661404 32487 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:26:51.661411 32487 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:26:51.661417 32487 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:26:51.661422 32487 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:26:51.661428 32487 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:26:51.661433 32487 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:26:51.661439 32487 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:26:51.661445 32487 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:26:51.661451 32487 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:26:51.661456 32487 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:26:51.661463 32487 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:26:51.661468 32487 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:26:51.661473 32487 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:26:51.661479 32487 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:26:51.661486 32487 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:26:51.661491 32487 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:26:51.661497 32487 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:26:51.661504 32487 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:26:51.661509 32487 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:26:51.661520 32487 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:26:51.661526 32487 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:26:51.661532 32487 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:26:51.661538 32487 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:26:51.661545 32487 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:26:51.661550 32487 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:26:51.661556 32487 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:26:51.661561 32487 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:26:51.661567 32487 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:26:51.661573 32487 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:26:51.661578 32487 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:26:51.661587 32487 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:26:51.661593 32487 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:26:51.661599 32487 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:26:51.661605 32487 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:26:51.661612 32487 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:26:51.661617 32487 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:26:51.661623 32487 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:26:51.661629 32487 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:26:51.661634 32487 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:26:51.661640 32487 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:26:51.661646 32487 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:26:51.661653 32487 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:26:51.661659 32487 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:26:51.661664 32487 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:26:51.661670 32487 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:26:51.661675 32487 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:26:51.661681 32487 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:26:51.661687 32487 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:26:51.661694 32487 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:26:51.661698 32487 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:26:51.661705 32487 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:26:51.661710 32487 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:26:51.661716 32487 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:26:51.661722 32487 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:26:51.661728 32487 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:26:51.661734 32487 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:26:51.661741 32487 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:26:51.661746 32487 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:26:51.661751 32487 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:26:51.661757 32487 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:26:51.661763 32487 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:26:51.661769 32487 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:26:51.661775 32487 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:26:51.661780 32487 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:26:51.661787 32487 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:26:51.661793 32487 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:26:51.661805 32487 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:26:51.661813 32487 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:26:51.661818 32487 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:26:51.661823 32487 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:26:51.661829 32487 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:26:51.661835 32487 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:26:51.661841 32487 net.cpp:226] pre_relu needs backward computation.\nI0821 08:26:51.661847 32487 net.cpp:226] pre_scale needs backward computation.\nI0821 08:26:51.661852 32487 net.cpp:226] pre_bn needs backward computation.\nI0821 08:26:51.661859 32487 net.cpp:226] pre_conv needs backward computation.\nI0821 08:26:51.661864 32487 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:26:51.661871 32487 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:26:51.661876 32487 net.cpp:270] This network produces output accuracy\nI0821 08:26:51.661882 32487 net.cpp:270] This network produces output loss\nI0821 08:26:51.662209 32487 net.cpp:283] Network initialization done.\nI0821 08:26:51.663210 32487 solver.cpp:60] Solver scaffolding done.\nI0821 08:26:51.885808 32487 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 08:26:52.243007 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:52.243072 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:52.249951 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:52.477975 32487 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:52.478087 32487 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:52.512969 32487 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:52.513077 32487 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:52.960865 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:52.960935 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:52.968735 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:53.212172 32487 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:53.212282 32487 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:53.263859 32487 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:53.263963 32487 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:53.779995 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:53.780066 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:53.789203 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:54.061455 32487 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:54.061590 32487 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:54.132686 32487 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:54.132817 32487 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:54.215852 32487 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 08:26:54.701500 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:54.701562 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:54.711115 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:55.005882 32487 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:55.006042 32487 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:55.098160 32487 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:55.098317 32487 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:55.747084 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:55.747148 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:55.757344 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:56.075757 32487 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:56.075937 32487 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:56.186949 32487 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:56.187126 32487 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:56.896184 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:56.896256 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:56.907805 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:57.249569 32487 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:57.249814 32487 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:57.382038 32487 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:57.382272 32487 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:58.156783 32487 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:58.156847 32487 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:58.169075 32487 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:58.341547 32514 blocking_queue.cpp:50] Waiting for data\nI0821 08:26:58.472053 32505 blocking_queue.cpp:50] Waiting for data\nI0821 08:26:58.587648 32487 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:58.588052 32487 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:58.740684 32487 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:58.740941 32487 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:58.910543 32487 parallel.cpp:425] Starting Optimization\nI0821 08:26:58.912410 32487 solver.cpp:279] Solving Cifar-Resnet\nI0821 08:26:58.912427 32487 solver.cpp:280] Learning Rate Policy: multistep\nI0821 08:26:58.916806 32487 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 08:28:20.860613 32487 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 08:28:20.860898 32487 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 08:28:24.814352 32487 solver.cpp:228] Iteration 0, loss = 5.47785\nI0821 08:28:24.814409 32487 solver.cpp:244]     Train net output #0: accuracy = 0.144\nI0821 08:28:24.814427 32487 solver.cpp:244]     Train net output #1: loss = 5.47785 (* 1 = 5.47785 loss)\nI0821 08:28:24.858517 32487 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0821 08:30:42.195233 32487 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 08:32:04.527741 32487 solver.cpp:404]     Test net output #0: accuracy = 0.1514\nI0821 08:32:04.527977 32487 solver.cpp:404]     Test net output #1: loss = 3.51698 (* 1 = 3.51698 loss)\nI0821 08:32:05.852001 32487 solver.cpp:228] Iteration 100, loss = 2.24955\nI0821 08:32:05.852049 32487 solver.cpp:244]     Train net output #0: accuracy = 0.128\nI0821 08:32:05.852066 32487 solver.cpp:244]     Train net output #1: loss = 2.24955 (* 1 = 2.24955 loss)\nI0821 08:32:05.934176 32487 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0821 08:34:23.018990 32487 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 08:35:45.345304 32487 solver.cpp:404]     Test net output #0: accuracy = 0.18952\nI0821 08:35:45.345607 32487 solver.cpp:404]     Test net output #1: loss = 2.17552 (* 1 = 2.17552 loss)\nI0821 08:35:46.669984 32487 solver.cpp:228] Iteration 200, loss = 2.12718\nI0821 08:35:46.670030 32487 solver.cpp:244]     Train net output #0: accuracy = 0.224\nI0821 08:35:46.670047 32487 solver.cpp:244]     Train net output #1: loss = 2.12718 (* 1 = 2.12718 loss)\nI0821 08:35:46.752454 32487 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0821 08:38:03.648932 32487 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 08:39:25.976428 32487 solver.cpp:404]     Test net output #0: accuracy = 0.20884\nI0821 08:39:25.976734 32487 solver.cpp:404]     Test net output #1: loss = 2.38901 (* 1 = 2.38901 loss)\nI0821 08:39:27.300323 32487 solver.cpp:228] Iteration 300, loss = 1.86744\nI0821 08:39:27.300364 32487 solver.cpp:244]     Train net output #0: accuracy = 0.336\nI0821 08:39:27.300380 32487 solver.cpp:244]     Train net output #1: loss = 1.86744 (* 1 = 1.86744 loss)\nI0821 08:39:27.378165 32487 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0821 08:41:44.400454 32487 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 08:43:06.722116 32487 solver.cpp:404]     Test net output #0: accuracy = 0.20292\nI0821 08:43:06.722429 32487 solver.cpp:404]     Test net output #1: loss = 2.79343 (* 1 = 2.79343 loss)\nI0821 08:43:08.046177 32487 solver.cpp:228] Iteration 400, loss = 1.73917\nI0821 08:43:08.046218 32487 solver.cpp:244]     Train net output #0: accuracy = 0.336\nI0821 08:43:08.046233 32487 solver.cpp:244]     Train net output #1: loss = 1.73917 (* 1 = 1.73917 loss)\nI0821 08:43:08.125092 32487 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0821 08:45:25.008193 32487 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 08:46:47.339030 32487 solver.cpp:404]     Test net output #0: accuracy = 0.2078\nI0821 08:46:47.339325 32487 solver.cpp:404]     Test net output #1: loss = 3.31252 (* 1 = 3.31252 loss)\nI0821 08:46:48.663300 32487 solver.cpp:228] Iteration 500, loss = 1.51634\nI0821 08:46:48.663342 32487 solver.cpp:244]     Train net output #0: accuracy = 0.456\nI0821 08:46:48.663358 32487 solver.cpp:244]     Train net output #1: loss = 1.51634 (* 1 = 1.51634 loss)\nI0821 08:46:48.738548 32487 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0821 08:49:05.558949 32487 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 08:50:27.881423 32487 solver.cpp:404]     Test net output #0: accuracy = 0.14612\nI0821 08:50:27.881700 32487 solver.cpp:404]     Test net output #1: loss = 3.07522 (* 1 = 3.07522 loss)\nI0821 08:50:29.205638 32487 solver.cpp:228] Iteration 600, loss = 1.31231\nI0821 08:50:29.205682 32487 solver.cpp:244]     Train net output #0: accuracy = 0.536\nI0821 08:50:29.205696 32487 solver.cpp:244]     Train net output #1: loss = 1.31231 (* 1 = 1.31231 loss)\nI0821 08:50:29.285228 32487 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0821 08:52:46.136466 32487 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 08:54:08.452606 32487 solver.cpp:404]     Test net output #0: accuracy = 0.14184\nI0821 08:54:08.452908 32487 solver.cpp:404]     Test net output #1: loss = 3.75249 (* 1 = 3.75249 loss)\nI0821 08:54:09.775913 32487 solver.cpp:228] Iteration 700, loss = 1.24874\nI0821 08:54:09.775954 32487 solver.cpp:244]     Train net output #0: accuracy = 0.576\nI0821 08:54:09.775969 32487 solver.cpp:244]     Train net output #1: loss = 1.24874 (* 1 = 1.24874 loss)\nI0821 08:54:09.853801 32487 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0821 08:56:26.799212 32487 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 08:57:49.111214 32487 solver.cpp:404]     Test net output #0: accuracy = 0.16348\nI0821 08:57:49.111518 32487 solver.cpp:404]     Test net output #1: loss = 3.1699 (* 1 = 3.1699 loss)\nI0821 08:57:50.435209 32487 solver.cpp:228] Iteration 800, loss = 1.27036\nI0821 08:57:50.435248 32487 solver.cpp:244]     Train net output #0: accuracy = 0.528\nI0821 08:57:50.435264 32487 solver.cpp:244]     Train net output #1: loss = 1.27036 (* 1 = 1.27036 loss)\nI0821 08:57:50.519587 32487 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0821 09:00:07.384924 32487 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 09:01:29.705827 32487 solver.cpp:404]     Test net output #0: accuracy = 0.11084\nI0821 09:01:29.706107 32487 solver.cpp:404]     Test net output #1: loss = 3.83019 (* 1 = 3.83019 loss)\nI0821 09:01:31.029534 32487 solver.cpp:228] Iteration 900, loss = 0.983277\nI0821 09:01:31.029574 32487 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0821 09:01:31.029589 32487 solver.cpp:244]     Train net output #1: loss = 0.983277 (* 1 = 0.983277 loss)\nI0821 09:01:31.113461 32487 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0821 09:03:47.952221 32487 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 09:05:10.224511 32487 solver.cpp:404]     Test net output #0: accuracy = 0.10728\nI0821 09:05:10.224725 32487 solver.cpp:404]     Test net output #1: loss = 4.68244 (* 1 = 4.68244 loss)\nI0821 09:05:11.548053 32487 solver.cpp:228] Iteration 1000, loss = 0.868767\nI0821 09:05:11.548094 32487 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0821 09:05:11.548108 32487 solver.cpp:244]     Train net output #1: loss = 0.868767 (* 1 = 0.868767 loss)\nI0821 09:05:11.619992 32487 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0821 09:07:28.411831 32487 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 09:08:50.680585 32487 solver.cpp:404]     Test net output #0: accuracy = 0.10788\nI0821 09:08:50.680822 32487 solver.cpp:404]     Test net output #1: loss = 5.32063 (* 1 = 5.32063 loss)\nI0821 09:08:52.004308 32487 solver.cpp:228] Iteration 1100, loss = 0.756149\nI0821 09:08:52.004353 32487 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0821 09:08:52.004367 32487 solver.cpp:244]     Train net output #1: loss = 0.756149 (* 1 = 0.756149 loss)\nI0821 09:08:52.084920 32487 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0821 09:11:08.842818 32487 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 09:12:30.963572 32487 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 09:12:30.963819 32487 solver.cpp:404]     Test net output #1: loss = 5.66326 (* 1 = 5.66326 loss)\nI0821 09:12:32.287981 32487 solver.cpp:228] Iteration 1200, loss = 0.755936\nI0821 09:12:32.288020 32487 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0821 09:12:32.288035 32487 solver.cpp:244]     Train net output #1: loss = 0.755936 (* 1 = 0.755936 loss)\nI0821 09:12:32.373816 32487 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0821 09:14:49.236730 32487 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 09:16:11.394557 32487 solver.cpp:404]     Test net output #0: accuracy = 0.1024\nI0821 09:16:11.394796 32487 solver.cpp:404]     Test net output #1: loss = 5.45614 (* 1 = 5.45614 loss)\nI0821 09:16:12.718858 32487 solver.cpp:228] Iteration 1300, loss = 0.656782\nI0821 09:16:12.718894 32487 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0821 09:16:12.718909 32487 solver.cpp:244]     Train net output #1: loss = 0.656782 (* 1 = 0.656782 loss)\nI0821 09:16:12.791451 32487 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0821 09:18:29.614645 32487 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 09:19:51.796452 32487 solver.cpp:404]     Test net output #0: accuracy = 0.1024\nI0821 09:19:51.796697 32487 solver.cpp:404]     Test net output #1: loss = 5.1784 (* 1 = 5.1784 loss)\nI0821 09:19:53.120230 32487 solver.cpp:228] Iteration 1400, loss = 0.598348\nI0821 09:19:53.120270 32487 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0821 09:19:53.120283 32487 solver.cpp:244]     Train net output #1: loss = 0.598348 (* 1 = 0.598348 loss)\nI0821 09:19:53.193797 32487 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0821 09:22:10.037765 32487 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 09:23:32.230646 32487 solver.cpp:404]     Test net output #0: accuracy = 0.11624\nI0821 09:23:32.230862 32487 solver.cpp:404]     Test net output #1: loss = 4.83933 (* 1 = 4.83933 loss)\nI0821 09:23:33.554231 32487 solver.cpp:228] Iteration 1500, loss = 0.615086\nI0821 09:23:33.554272 32487 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0821 09:23:33.554287 32487 solver.cpp:244]     Train net output #1: loss = 0.615086 (* 1 = 0.615086 loss)\nI0821 09:23:33.625993 32487 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0821 09:25:50.423918 32487 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 09:27:12.484678 32487 solver.cpp:404]     Test net output #0: accuracy = 0.12404\nI0821 09:27:12.484920 32487 solver.cpp:404]     Test net output #1: loss = 5.11808 (* 1 = 5.11808 loss)\nI0821 09:27:13.807708 32487 solver.cpp:228] Iteration 1600, loss = 0.539274\nI0821 09:27:13.807749 32487 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0821 09:27:13.807765 32487 solver.cpp:244]     Train net output #1: loss = 0.539274 (* 1 = 0.539274 loss)\nI0821 09:27:13.879963 32487 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0821 09:29:30.666713 32487 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 09:30:52.532974 32487 solver.cpp:404]     Test net output #0: accuracy = 0.10564\nI0821 09:30:52.533223 32487 solver.cpp:404]     Test net output #1: loss = 5.0172 (* 1 = 5.0172 loss)\nI0821 09:30:53.856799 32487 solver.cpp:228] Iteration 1700, loss = 0.43418\nI0821 09:30:53.856840 32487 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 09:30:53.856855 32487 solver.cpp:244]     Train net output #1: loss = 0.43418 (* 1 = 0.43418 loss)\nI0821 09:30:53.934928 32487 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0821 09:33:10.746917 32487 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 09:34:33.047076 32487 solver.cpp:404]     Test net output #0: accuracy = 0.13548\nI0821 09:34:33.047313 32487 solver.cpp:404]     Test net output #1: loss = 4.05278 (* 1 = 4.05278 loss)\nI0821 09:34:34.370882 32487 solver.cpp:228] Iteration 1800, loss = 0.483451\nI0821 09:34:34.370924 32487 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 09:34:34.370939 32487 solver.cpp:244]     Train net output #1: loss = 0.483451 (* 1 = 0.483451 loss)\nI0821 09:34:34.449259 32487 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0821 09:36:51.188130 32487 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 09:38:13.476558 32487 solver.cpp:404]     Test net output #0: accuracy = 0.12272\nI0821 09:38:13.476769 32487 solver.cpp:404]     Test net output #1: loss = 4.22943 (* 1 = 4.22943 loss)\nI0821 09:38:14.800058 32487 solver.cpp:228] Iteration 1900, loss = 0.308138\nI0821 09:38:14.800101 32487 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 09:38:14.800114 32487 solver.cpp:244]     Train net output #1: loss = 0.308138 (* 1 = 0.308138 loss)\nI0821 09:38:14.876528 32487 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0821 09:40:31.684309 32487 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 09:41:53.978421 32487 solver.cpp:404]     Test net output #0: accuracy = 0.153\nI0821 09:41:53.978641 32487 solver.cpp:404]     Test net output #1: loss = 4.37452 (* 1 = 4.37452 loss)\nI0821 09:41:55.302075 32487 solver.cpp:228] Iteration 2000, loss = 0.353918\nI0821 09:41:55.302117 32487 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 09:41:55.302132 32487 solver.cpp:244]     Train net output #1: loss = 0.353918 (* 1 = 0.353918 loss)\nI0821 09:41:55.378839 32487 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0821 09:44:12.241294 32487 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 09:45:34.504659 32487 solver.cpp:404]     Test net output #0: accuracy = 0.17572\nI0821 09:45:34.504906 32487 solver.cpp:404]     Test net output #1: loss = 3.72988 (* 1 = 3.72988 loss)\nI0821 09:45:35.828837 32487 solver.cpp:228] Iteration 2100, loss = 0.33603\nI0821 09:45:35.828879 32487 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 09:45:35.828896 32487 solver.cpp:244]     Train net output #1: loss = 0.33603 (* 1 = 0.33603 loss)\nI0821 09:45:35.905717 32487 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0821 09:47:52.693750 32487 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 09:49:14.964671 32487 solver.cpp:404]     Test net output #0: accuracy = 0.16732\nI0821 09:49:14.964912 32487 solver.cpp:404]     Test net output #1: loss = 4.08967 (* 1 = 4.08967 loss)\nI0821 09:49:16.289206 32487 solver.cpp:228] Iteration 2200, loss = 0.203344\nI0821 09:49:16.289247 32487 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 09:49:16.289260 32487 solver.cpp:244]     Train net output #1: loss = 0.203344 (* 1 = 0.203344 loss)\nI0821 09:49:16.364130 32487 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0821 09:51:33.281657 32487 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 09:52:55.230211 32487 solver.cpp:404]     Test net output #0: accuracy = 0.16256\nI0821 09:52:55.230432 32487 solver.cpp:404]     Test net output #1: loss = 4.16558 (* 1 = 4.16558 loss)\nI0821 09:52:56.554116 32487 solver.cpp:228] Iteration 2300, loss = 0.336763\nI0821 09:52:56.554157 32487 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 09:52:56.554170 32487 solver.cpp:244]     Train net output #1: loss = 0.336763 (* 1 = 0.336763 loss)\nI0821 09:52:56.638499 32487 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0821 09:55:13.423569 32487 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 09:56:35.706792 32487 solver.cpp:404]     Test net output #0: accuracy = 0.26268\nI0821 09:56:35.707017 32487 solver.cpp:404]     Test net output #1: loss = 2.96643 (* 1 = 2.96643 loss)\nI0821 09:56:37.030289 32487 solver.cpp:228] Iteration 2400, loss = 0.307723\nI0821 09:56:37.030329 32487 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 09:56:37.030344 32487 solver.cpp:244]     Train net output #1: loss = 0.307723 (* 1 = 0.307723 loss)\nI0821 09:56:37.105720 32487 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0821 09:58:53.910460 32487 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 10:00:16.154510 32487 solver.cpp:404]     Test net output #0: accuracy = 0.22408\nI0821 10:00:16.154747 32487 solver.cpp:404]     Test net output #1: loss = 3.36721 (* 1 = 3.36721 loss)\nI0821 10:00:17.478418 32487 solver.cpp:228] Iteration 2500, loss = 0.180312\nI0821 10:00:17.478461 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:00:17.478480 32487 solver.cpp:244]     Train net output #1: loss = 0.180312 (* 1 = 0.180312 loss)\nI0821 10:00:17.552126 32487 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0821 10:02:34.342186 32487 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 10:03:56.610698 32487 solver.cpp:404]     Test net output #0: accuracy = 0.28148\nI0821 10:03:56.610919 32487 solver.cpp:404]     Test net output #1: loss = 3.21754 (* 1 = 3.21754 loss)\nI0821 10:03:57.933930 32487 solver.cpp:228] Iteration 2600, loss = 0.206128\nI0821 10:03:57.933971 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:03:57.933986 32487 solver.cpp:244]     Train net output #1: loss = 0.206128 (* 1 = 0.206128 loss)\nI0821 10:03:58.011943 32487 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0821 10:06:14.814236 32487 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 10:07:37.051467 32487 solver.cpp:404]     Test net output #0: accuracy = 0.20364\nI0821 10:07:37.051688 32487 solver.cpp:404]     Test net output #1: loss = 4.62513 (* 1 = 4.62513 loss)\nI0821 10:07:38.375566 32487 solver.cpp:228] Iteration 2700, loss = 0.184198\nI0821 10:07:38.375608 32487 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 10:07:38.375623 32487 solver.cpp:244]     Train net output #1: loss = 0.184197 (* 1 = 0.184197 loss)\nI0821 10:07:38.456086 32487 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0821 10:09:55.273602 32487 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 10:11:17.529199 32487 solver.cpp:404]     Test net output #0: accuracy = 0.255\nI0821 10:11:17.529448 32487 solver.cpp:404]     Test net output #1: loss = 4.04596 (* 1 = 4.04596 loss)\nI0821 10:11:18.853271 32487 solver.cpp:228] Iteration 2800, loss = 0.148148\nI0821 10:11:18.853314 32487 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:11:18.853337 32487 solver.cpp:244]     Train net output #1: loss = 0.148148 (* 1 = 0.148148 loss)\nI0821 10:11:18.928406 32487 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0821 10:13:35.704473 32487 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 10:14:57.983983 32487 solver.cpp:404]     Test net output #0: accuracy = 0.2508\nI0821 10:14:57.984200 32487 solver.cpp:404]     Test net output #1: loss = 3.50759 (* 1 = 3.50759 loss)\nI0821 10:14:59.307435 32487 solver.cpp:228] Iteration 2900, loss = 0.188019\nI0821 10:14:59.307481 32487 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:14:59.307495 32487 solver.cpp:244]     Train net output #1: loss = 0.188019 (* 1 = 0.188019 loss)\nI0821 10:14:59.381104 32487 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0821 10:17:16.145267 32487 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 10:18:38.390467 32487 solver.cpp:404]     Test net output #0: accuracy = 0.23444\nI0821 10:18:38.390681 32487 solver.cpp:404]     Test net output #1: loss = 5.23087 (* 1 = 5.23087 loss)\nI0821 10:18:39.714140 32487 solver.cpp:228] Iteration 3000, loss = 0.166749\nI0821 10:18:39.714179 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:18:39.714195 32487 solver.cpp:244]     Train net output #1: loss = 0.166749 (* 1 = 0.166749 loss)\nI0821 10:18:39.787909 32487 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0821 10:20:56.530203 32487 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 10:22:18.380573 32487 solver.cpp:404]     Test net output #0: accuracy = 0.21784\nI0821 10:22:18.380831 32487 solver.cpp:404]     Test net output #1: loss = 4.04974 (* 1 = 4.04974 loss)\nI0821 10:22:19.703891 32487 solver.cpp:228] Iteration 3100, loss = 0.129469\nI0821 10:22:19.703934 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:22:19.703949 32487 solver.cpp:244]     Train net output #1: loss = 0.129469 (* 1 = 0.129469 loss)\nI0821 10:22:19.784668 32487 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0821 10:24:36.579656 32487 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 10:25:58.365447 32487 solver.cpp:404]     Test net output #0: accuracy = 0.28628\nI0821 10:25:58.365674 32487 solver.cpp:404]     Test net output #1: loss = 3.48888 (* 1 = 3.48888 loss)\nI0821 10:25:59.689410 32487 solver.cpp:228] Iteration 3200, loss = 0.135565\nI0821 10:25:59.689450 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:25:59.689469 32487 solver.cpp:244]     Train net output #1: loss = 0.135565 (* 1 = 0.135565 loss)\nI0821 10:25:59.765501 32487 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0821 10:28:16.610458 32487 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 10:29:38.686245 32487 solver.cpp:404]     Test net output #0: accuracy = 0.2582\nI0821 10:29:38.686477 32487 solver.cpp:404]     Test net output #1: loss = 5.21097 (* 1 = 5.21097 loss)\nI0821 10:29:40.009609 32487 solver.cpp:228] Iteration 3300, loss = 0.191728\nI0821 10:29:40.009647 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:29:40.009663 32487 solver.cpp:244]     Train net output #1: loss = 0.191728 (* 1 = 0.191728 loss)\nI0821 10:29:40.084084 32487 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0821 10:31:56.960465 32487 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 10:33:18.679292 32487 solver.cpp:404]     Test net output #0: accuracy = 0.25912\nI0821 10:33:18.679509 32487 solver.cpp:404]     Test net output #1: loss = 4.52664 (* 1 = 4.52664 loss)\nI0821 10:33:20.003281 32487 solver.cpp:228] Iteration 3400, loss = 0.142532\nI0821 10:33:20.003324 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:33:20.003338 32487 solver.cpp:244]     Train net output #1: loss = 0.142532 (* 1 = 0.142532 loss)\nI0821 10:33:20.079778 32487 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0821 10:35:36.821583 32487 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 10:36:58.547066 32487 solver.cpp:404]     Test net output #0: accuracy = 0.275\nI0821 10:36:58.547288 32487 solver.cpp:404]     Test net output #1: loss = 3.63828 (* 1 = 3.63828 loss)\nI0821 10:36:59.871047 32487 solver.cpp:228] Iteration 3500, loss = 0.247275\nI0821 10:36:59.871088 32487 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 10:36:59.871104 32487 solver.cpp:244]     Train net output #1: loss = 0.247275 (* 1 = 0.247275 loss)\nI0821 10:36:59.955844 32487 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0821 10:39:16.775650 32487 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 10:40:38.917307 32487 solver.cpp:404]     Test net output #0: accuracy = 0.42884\nI0821 10:40:38.917574 32487 solver.cpp:404]     Test net output #1: loss = 2.22901 (* 1 = 2.22901 loss)\nI0821 10:40:40.242354 32487 solver.cpp:228] Iteration 3600, loss = 0.124185\nI0821 10:40:40.242408 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:40:40.242424 32487 solver.cpp:244]     Train net output #1: loss = 0.124185 (* 1 = 0.124185 loss)\nI0821 10:40:40.322235 32487 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0821 10:42:57.202658 32487 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 10:44:19.702522 32487 solver.cpp:404]     Test net output #0: accuracy = 0.22352\nI0821 10:44:19.702754 32487 solver.cpp:404]     Test net output #1: loss = 4.80835 (* 1 = 4.80835 loss)\nI0821 10:44:21.027645 32487 solver.cpp:228] Iteration 3700, loss = 0.14621\nI0821 10:44:21.027698 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:44:21.027720 32487 solver.cpp:244]     Train net output #1: loss = 0.14621 (* 1 = 0.14621 loss)\nI0821 10:44:21.107465 32487 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0821 10:46:37.991849 32487 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 10:48:00.226428 32487 solver.cpp:404]     Test net output #0: accuracy = 0.24624\nI0821 10:48:00.226645 32487 solver.cpp:404]     Test net output #1: loss = 3.84045 (* 1 = 3.84045 loss)\nI0821 10:48:01.550627 32487 solver.cpp:228] Iteration 3800, loss = 0.2607\nI0821 10:48:01.550680 32487 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 10:48:01.550696 32487 solver.cpp:244]     Train net output #1: loss = 0.2607 (* 1 = 0.2607 loss)\nI0821 10:48:01.624274 32487 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0821 10:50:18.502671 32487 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 10:51:40.733265 32487 solver.cpp:404]     Test net output #0: accuracy = 0.30184\nI0821 10:51:40.733492 32487 solver.cpp:404]     Test net output #1: loss = 3.6178 (* 1 = 3.6178 loss)\nI0821 10:51:42.058775 32487 solver.cpp:228] Iteration 3900, loss = 0.177711\nI0821 10:51:42.058827 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 10:51:42.058843 32487 solver.cpp:244]     Train net output #1: loss = 0.177711 (* 1 = 0.177711 loss)\nI0821 10:51:42.138088 32487 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0821 10:53:59.026160 32487 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 10:55:21.298842 32487 solver.cpp:404]     Test net output #0: accuracy = 0.27572\nI0821 10:55:21.299052 32487 solver.cpp:404]     Test net output #1: loss = 3.77419 (* 1 = 3.77419 loss)\nI0821 10:55:22.624271 32487 solver.cpp:228] Iteration 4000, loss = 0.120752\nI0821 10:55:22.624322 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:55:22.624339 32487 solver.cpp:244]     Train net output #1: loss = 0.120753 (* 1 = 0.120753 loss)\nI0821 10:55:22.693202 32487 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0821 10:57:39.569310 32487 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 10:59:01.769744 32487 solver.cpp:404]     Test net output #0: accuracy = 0.27384\nI0821 10:59:01.769968 32487 solver.cpp:404]     Test net output #1: loss = 4.0234 (* 1 = 4.0234 loss)\nI0821 10:59:03.096276 32487 solver.cpp:228] Iteration 4100, loss = 0.166458\nI0821 10:59:03.096328 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:59:03.096344 32487 solver.cpp:244]     Train net output #1: loss = 0.166458 (* 1 = 0.166458 loss)\nI0821 10:59:03.173319 32487 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0821 11:01:20.085562 32487 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 11:02:42.348513 32487 solver.cpp:404]     Test net output #0: accuracy = 0.32396\nI0821 11:02:42.348765 32487 solver.cpp:404]     Test net output #1: loss = 3.40932 (* 1 = 3.40932 loss)\nI0821 11:02:43.674448 32487 solver.cpp:228] Iteration 4200, loss = 0.0631198\nI0821 11:02:43.674501 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:02:43.674517 32487 solver.cpp:244]     Train net output #1: loss = 0.0631199 (* 1 = 0.0631199 loss)\nI0821 11:02:43.751471 32487 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0821 11:05:00.597827 32487 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 11:06:22.877310 32487 solver.cpp:404]     Test net output #0: accuracy = 0.17268\nI0821 11:06:22.877527 32487 solver.cpp:404]     Test net output #1: loss = 7.52937 (* 1 = 7.52937 loss)\nI0821 11:06:24.203019 32487 solver.cpp:228] Iteration 4300, loss = 0.113264\nI0821 11:06:24.203063 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:06:24.203078 32487 solver.cpp:244]     Train net output #1: loss = 0.113264 (* 1 = 0.113264 loss)\nI0821 11:06:24.271540 32487 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0821 11:08:41.141824 32487 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 11:10:03.382944 32487 solver.cpp:404]     Test net output #0: accuracy = 0.1702\nI0821 11:10:03.383188 32487 solver.cpp:404]     Test net output #1: loss = 7.3614 (* 1 = 7.3614 loss)\nI0821 11:10:04.707535 32487 solver.cpp:228] Iteration 4400, loss = 0.243349\nI0821 11:10:04.707587 32487 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 11:10:04.707602 32487 solver.cpp:244]     Train net output #1: loss = 0.243349 (* 1 = 0.243349 loss)\nI0821 11:10:04.782532 32487 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0821 11:12:21.661267 32487 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 11:13:43.882334 32487 solver.cpp:404]     Test net output #0: accuracy = 0.32896\nI0821 11:13:43.882580 32487 solver.cpp:404]     Test net output #1: loss = 2.89905 (* 1 = 2.89905 loss)\nI0821 11:13:45.208755 32487 solver.cpp:228] Iteration 4500, loss = 0.147307\nI0821 11:13:45.208808 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 11:13:45.208824 32487 solver.cpp:244]     Train net output #1: loss = 0.147307 (* 1 = 0.147307 loss)\nI0821 11:13:45.288637 32487 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0821 11:16:02.220258 32487 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 11:17:24.424727 32487 solver.cpp:404]     Test net output #0: accuracy = 0.23512\nI0821 11:17:24.424965 32487 solver.cpp:404]     Test net output #1: loss = 4.57597 (* 1 = 4.57597 loss)\nI0821 11:17:25.750035 32487 solver.cpp:228] Iteration 4600, loss = 0.0581326\nI0821 11:17:25.750088 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:17:25.750104 32487 solver.cpp:244]     Train net output #1: loss = 0.0581327 (* 1 = 0.0581327 loss)\nI0821 11:17:25.821755 32487 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0821 11:19:42.722434 32487 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 11:21:04.978911 32487 solver.cpp:404]     Test net output #0: accuracy = 0.2218\nI0821 11:21:04.979133 32487 solver.cpp:404]     Test net output #1: loss = 4.03745 (* 1 = 4.03745 loss)\nI0821 11:21:06.304198 32487 solver.cpp:228] Iteration 4700, loss = 0.109109\nI0821 11:21:06.304250 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:21:06.304266 32487 solver.cpp:244]     Train net output #1: loss = 0.109109 (* 1 = 0.109109 loss)\nI0821 11:21:06.379253 32487 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0821 11:23:23.581347 32487 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 11:24:45.859421 32487 solver.cpp:404]     Test net output #0: accuracy = 0.27824\nI0821 11:24:45.859637 32487 solver.cpp:404]     Test net output #1: loss = 3.649 (* 1 = 3.649 loss)\nI0821 11:24:47.184046 32487 solver.cpp:228] Iteration 4800, loss = 0.0827635\nI0821 11:24:47.184099 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:24:47.184116 32487 solver.cpp:244]     Train net output #1: loss = 0.0827636 (* 1 = 0.0827636 loss)\nI0821 11:24:47.269356 32487 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0821 11:27:04.813702 32487 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 11:28:27.075546 32487 solver.cpp:404]     Test net output #0: accuracy = 0.22488\nI0821 11:28:27.075781 32487 solver.cpp:404]     Test net output #1: loss = 5.29526 (* 1 = 5.29526 loss)\nI0821 11:28:28.399560 32487 solver.cpp:228] Iteration 4900, loss = 0.103524\nI0821 11:28:28.399615 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:28:28.399631 32487 solver.cpp:244]     Train net output #1: loss = 0.103524 (* 1 = 0.103524 loss)\nI0821 11:28:28.479703 32487 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0821 11:30:46.294844 32487 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 11:32:08.524843 32487 solver.cpp:404]     Test net output #0: accuracy = 0.17072\nI0821 11:32:08.525087 32487 solver.cpp:404]     Test net output #1: loss = 6.34092 (* 1 = 6.34092 loss)\nI0821 11:32:09.849437 32487 solver.cpp:228] Iteration 5000, loss = 0.165058\nI0821 11:32:09.849489 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:32:09.849505 32487 solver.cpp:244]     Train net output #1: loss = 0.165058 (* 1 = 0.165058 loss)\nI0821 11:32:09.936913 32487 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0821 11:34:27.431533 32487 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 11:35:49.684345 32487 solver.cpp:404]     Test net output #0: accuracy = 0.313\nI0821 11:35:49.684592 32487 solver.cpp:404]     Test net output #1: loss = 3.03131 (* 1 = 3.03131 loss)\nI0821 11:35:51.009136 32487 solver.cpp:228] Iteration 5100, loss = 0.121235\nI0821 11:35:51.009184 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:35:51.009199 32487 solver.cpp:244]     Train net output #1: loss = 0.121235 (* 1 = 0.121235 loss)\nI0821 11:35:51.094573 32487 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0821 11:38:08.712182 32487 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 11:39:30.983232 32487 solver.cpp:404]     Test net output #0: accuracy = 0.23864\nI0821 11:39:30.983455 32487 solver.cpp:404]     Test net output #1: loss = 4.61965 (* 1 = 4.61965 loss)\nI0821 11:39:32.307585 32487 solver.cpp:228] Iteration 5200, loss = 0.111827\nI0821 11:39:32.307631 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:39:32.307647 32487 solver.cpp:244]     Train net output #1: loss = 0.111827 (* 1 = 0.111827 loss)\nI0821 11:39:32.386489 32487 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0821 11:41:49.935009 32487 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 11:43:12.147616 32487 solver.cpp:404]     Test net output #0: accuracy = 0.25464\nI0821 11:43:12.147855 32487 solver.cpp:404]     Test net output #1: loss = 4.66125 (* 1 = 4.66125 loss)\nI0821 11:43:13.473908 32487 solver.cpp:228] Iteration 5300, loss = 0.189978\nI0821 11:43:13.473959 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 11:43:13.473975 32487 solver.cpp:244]     Train net output #1: loss = 0.189978 (* 1 = 0.189978 loss)\nI0821 11:43:13.555100 32487 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0821 11:45:31.104918 32487 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 11:46:53.313765 32487 solver.cpp:404]     Test net output #0: accuracy = 0.31112\nI0821 11:46:53.313988 32487 solver.cpp:404]     Test net output #1: loss = 3.24162 (* 1 = 3.24162 loss)\nI0821 11:46:54.638113 32487 solver.cpp:228] Iteration 5400, loss = 0.174968\nI0821 11:46:54.638164 32487 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 11:46:54.638180 32487 solver.cpp:244]     Train net output #1: loss = 0.174968 (* 1 = 0.174968 loss)\nI0821 11:46:54.722966 32487 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0821 11:49:12.530876 32487 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 11:50:34.780447 32487 solver.cpp:404]     Test net output #0: accuracy = 0.22368\nI0821 11:50:34.780722 32487 solver.cpp:404]     Test net output #1: loss = 5.3608 (* 1 = 5.3608 loss)\nI0821 11:50:36.106272 32487 solver.cpp:228] Iteration 5500, loss = 0.100288\nI0821 11:50:36.106323 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:50:36.106339 32487 solver.cpp:244]     Train net output #1: loss = 0.100289 (* 1 = 0.100289 loss)\nI0821 11:50:36.185578 32487 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0821 11:52:53.765753 32487 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 11:54:16.011322 32487 solver.cpp:404]     Test net output #0: accuracy = 0.2076\nI0821 11:54:16.011561 32487 solver.cpp:404]     Test net output #1: loss = 5.77101 (* 1 = 5.77101 loss)\nI0821 11:54:17.336776 32487 solver.cpp:228] Iteration 5600, loss = 0.13007\nI0821 11:54:17.336827 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:54:17.336843 32487 solver.cpp:244]     Train net output #1: loss = 0.13007 (* 1 = 0.13007 loss)\nI0821 11:54:17.419675 32487 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0821 11:56:34.982429 32487 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 11:57:57.123276 32487 solver.cpp:404]     Test net output #0: accuracy = 0.34476\nI0821 11:57:57.123497 32487 solver.cpp:404]     Test net output #1: loss = 2.72625 (* 1 = 2.72625 loss)\nI0821 11:57:58.448973 32487 solver.cpp:228] Iteration 5700, loss = 0.116534\nI0821 11:57:58.449021 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:57:58.449038 32487 solver.cpp:244]     Train net output #1: loss = 0.116534 (* 1 = 0.116534 loss)\nI0821 11:57:58.529168 32487 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0821 12:00:16.116117 32487 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 12:01:38.067190 32487 solver.cpp:404]     Test net output #0: accuracy = 0.2234\nI0821 12:01:38.067447 32487 solver.cpp:404]     Test net output #1: loss = 5.62071 (* 1 = 5.62071 loss)\nI0821 12:01:39.391618 32487 solver.cpp:228] Iteration 5800, loss = 0.101297\nI0821 12:01:39.391670 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:01:39.391686 32487 solver.cpp:244]     Train net output #1: loss = 0.101297 (* 1 = 0.101297 loss)\nI0821 12:01:39.476943 32487 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0821 12:03:57.031364 32487 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 12:05:19.053364 32487 solver.cpp:404]     Test net output #0: accuracy = 0.25928\nI0821 12:05:19.053568 32487 solver.cpp:404]     Test net output #1: loss = 4.10606 (* 1 = 4.10606 loss)\nI0821 12:05:20.377773 32487 solver.cpp:228] Iteration 5900, loss = 0.0941856\nI0821 12:05:20.377826 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:05:20.377842 32487 solver.cpp:244]     Train net output #1: loss = 0.0941856 (* 1 = 0.0941856 loss)\nI0821 12:05:20.464053 32487 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0821 12:07:38.027055 32487 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 12:09:00.298957 32487 solver.cpp:404]     Test net output #0: accuracy = 0.27276\nI0821 12:09:00.299175 32487 solver.cpp:404]     Test net output #1: loss = 3.56077 (* 1 = 3.56077 loss)\nI0821 12:09:01.623627 32487 solver.cpp:228] Iteration 6000, loss = 0.0404455\nI0821 12:09:01.623679 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:09:01.623695 32487 solver.cpp:244]     Train net output #1: loss = 0.0404455 (* 1 = 0.0404455 loss)\nI0821 12:09:01.710136 32487 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0821 12:11:19.245620 32487 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 12:12:41.519378 32487 solver.cpp:404]     Test net output #0: accuracy = 0.25068\nI0821 12:12:41.519634 32487 solver.cpp:404]     Test net output #1: loss = 5.07981 (* 1 = 5.07981 loss)\nI0821 12:12:42.844380 32487 solver.cpp:228] Iteration 6100, loss = 0.0937211\nI0821 12:12:42.844440 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 12:12:42.844458 32487 solver.cpp:244]     Train net output #1: loss = 0.0937211 (* 1 = 0.0937211 loss)\nI0821 12:12:42.932250 32487 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0821 12:15:00.503115 32487 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 12:16:22.635097 32487 solver.cpp:404]     Test net output #0: accuracy = 0.28404\nI0821 12:16:22.635303 32487 solver.cpp:404]     Test net output #1: loss = 3.93923 (* 1 = 3.93923 loss)\nI0821 12:16:23.960778 32487 solver.cpp:228] Iteration 6200, loss = 0.104738\nI0821 12:16:23.960831 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 12:16:23.960849 32487 solver.cpp:244]     Train net output #1: loss = 0.104739 (* 1 = 0.104739 loss)\nI0821 12:16:24.049845 32487 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0821 12:18:41.606744 32487 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 12:20:03.094554 32487 solver.cpp:404]     Test net output #0: accuracy = 0.35256\nI0821 12:20:03.094811 32487 solver.cpp:404]     Test net output #1: loss = 3.65536 (* 1 = 3.65536 loss)\nI0821 12:20:04.417470 32487 solver.cpp:228] Iteration 6300, loss = 0.0838443\nI0821 12:20:04.417511 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:20:04.417536 32487 solver.cpp:244]     Train net output #1: loss = 0.0838443 (* 1 = 0.0838443 loss)\nI0821 12:20:04.504276 32487 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0821 12:22:22.104454 32487 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 12:23:43.591315 32487 solver.cpp:404]     Test net output #0: accuracy = 0.21372\nI0821 12:23:43.591589 32487 solver.cpp:404]     Test net output #1: loss = 7.54277 (* 1 = 7.54277 loss)\nI0821 12:23:44.912873 32487 solver.cpp:228] Iteration 6400, loss = 0.0477938\nI0821 12:23:44.912919 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:23:44.912942 32487 solver.cpp:244]     Train net output #1: loss = 0.0477938 (* 1 = 0.0477938 loss)\nI0821 12:23:44.996331 32487 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0821 12:26:02.527017 32487 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 12:27:24.047511 32487 solver.cpp:404]     Test net output #0: accuracy = 0.26992\nI0821 12:27:24.047786 32487 solver.cpp:404]     Test net output #1: loss = 4.89958 (* 1 = 4.89958 loss)\nI0821 12:27:25.369623 32487 solver.cpp:228] Iteration 6500, loss = 0.0679828\nI0821 12:27:25.369658 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:27:25.369681 32487 solver.cpp:244]     Train net output #1: loss = 0.0679828 (* 1 = 0.0679828 loss)\nI0821 12:27:25.450515 32487 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0821 12:29:42.939388 32487 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 12:31:04.483803 32487 solver.cpp:404]     Test net output #0: accuracy = 0.416\nI0821 12:31:04.484071 32487 solver.cpp:404]     Test net output #1: loss = 2.75478 (* 1 = 2.75478 loss)\nI0821 12:31:05.806159 32487 solver.cpp:228] Iteration 6600, loss = 0.0622359\nI0821 12:31:05.806197 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 12:31:05.806221 32487 solver.cpp:244]     Train net output #1: loss = 0.0622359 (* 1 = 0.0622359 loss)\nI0821 12:31:05.897109 32487 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0821 12:33:23.331642 32487 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 12:34:44.894105 32487 solver.cpp:404]     Test net output #0: accuracy = 0.31852\nI0821 12:34:44.894381 32487 solver.cpp:404]     Test net output #1: loss = 4.25808 (* 1 = 4.25808 loss)\nI0821 12:34:46.216115 32487 solver.cpp:228] Iteration 6700, loss = 0.0826699\nI0821 12:34:46.216161 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 12:34:46.216183 32487 solver.cpp:244]     Train net output #1: loss = 0.0826699 (* 1 = 0.0826699 loss)\nI0821 12:34:46.298218 32487 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0821 12:37:03.718626 32487 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 12:38:25.252933 32487 solver.cpp:404]     Test net output #0: accuracy = 0.274\nI0821 12:38:25.253208 32487 solver.cpp:404]     Test net output #1: loss = 5.42079 (* 1 = 5.42079 loss)\nI0821 12:38:26.573966 32487 solver.cpp:228] Iteration 6800, loss = 0.164188\nI0821 12:38:26.574008 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 12:38:26.574033 32487 solver.cpp:244]     Train net output #1: loss = 0.164188 (* 1 = 0.164188 loss)\nI0821 12:38:26.661375 32487 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0821 12:40:44.175051 32487 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 12:42:05.660099 32487 solver.cpp:404]     Test net output #0: accuracy = 0.38692\nI0821 12:42:05.660368 32487 solver.cpp:404]     Test net output #1: loss = 3.14841 (* 1 = 3.14841 loss)\nI0821 12:42:06.982741 32487 solver.cpp:228] Iteration 6900, loss = 0.109243\nI0821 12:42:06.982777 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:42:06.982800 32487 solver.cpp:244]     Train net output #1: loss = 0.109243 (* 1 = 0.109243 loss)\nI0821 12:42:07.068019 32487 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0821 12:44:24.414160 32487 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 12:45:45.784483 32487 solver.cpp:404]     Test net output #0: accuracy = 0.32904\nI0821 12:45:45.784729 32487 solver.cpp:404]     Test net output #1: loss = 4.09564 (* 1 = 4.09564 loss)\nI0821 12:45:47.106201 32487 solver.cpp:228] Iteration 7000, loss = 0.0495933\nI0821 12:45:47.106243 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:45:47.106259 32487 solver.cpp:244]     Train net output #1: loss = 0.0495934 (* 1 = 0.0495934 loss)\nI0821 12:45:47.192093 32487 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0821 12:48:04.719197 32487 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 12:49:26.097875 32487 solver.cpp:404]     Test net output #0: accuracy = 0.33864\nI0821 12:49:26.098122 32487 solver.cpp:404]     Test net output #1: loss = 4.58141 (* 1 = 4.58141 loss)\nI0821 12:49:27.419251 32487 solver.cpp:228] Iteration 7100, loss = 0.0707769\nI0821 12:49:27.419296 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:49:27.419312 32487 solver.cpp:244]     Train net output #1: loss = 0.070777 (* 1 = 0.070777 loss)\nI0821 12:49:27.507477 32487 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0821 12:51:44.986064 32487 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 12:53:06.358350 32487 solver.cpp:404]     Test net output #0: accuracy = 0.39508\nI0821 12:53:06.358608 32487 solver.cpp:404]     Test net output #1: loss = 3.405 (* 1 = 3.405 loss)\nI0821 12:53:07.680230 32487 solver.cpp:228] Iteration 7200, loss = 0.0771144\nI0821 12:53:07.680274 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 12:53:07.680290 32487 solver.cpp:244]     Train net output #1: loss = 0.0771144 (* 1 = 0.0771144 loss)\nI0821 12:53:07.771245 32487 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0821 12:55:25.259145 32487 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 12:56:46.627197 32487 solver.cpp:404]     Test net output #0: accuracy = 0.38444\nI0821 12:56:46.627459 32487 solver.cpp:404]     Test net output #1: loss = 3.46195 (* 1 = 3.46195 loss)\nI0821 12:56:47.949430 32487 solver.cpp:228] Iteration 7300, loss = 0.0852263\nI0821 12:56:47.949477 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 12:56:47.949493 32487 solver.cpp:244]     Train net output #1: loss = 0.0852263 (* 1 = 0.0852263 loss)\nI0821 12:56:48.037163 32487 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0821 12:59:05.470481 32487 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 13:00:26.839432 32487 solver.cpp:404]     Test net output #0: accuracy = 0.25404\nI0821 13:00:26.839710 32487 solver.cpp:404]     Test net output #1: loss = 5.111 (* 1 = 5.111 loss)\nI0821 13:00:28.161022 32487 solver.cpp:228] Iteration 7400, loss = 0.0832845\nI0821 13:00:28.161067 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 13:00:28.161084 32487 solver.cpp:244]     Train net output #1: loss = 0.0832846 (* 1 = 0.0832846 loss)\nI0821 13:00:28.254835 32487 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0821 13:02:45.712005 32487 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 13:04:07.087806 32487 solver.cpp:404]     Test net output #0: accuracy = 0.38756\nI0821 13:04:07.088130 32487 solver.cpp:404]     Test net output #1: loss = 2.88925 (* 1 = 2.88925 loss)\nI0821 13:04:08.409739 32487 solver.cpp:228] Iteration 7500, loss = 0.0847479\nI0821 13:04:08.409783 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 13:04:08.409801 32487 solver.cpp:244]     Train net output #1: loss = 0.0847479 (* 1 = 0.0847479 loss)\nI0821 13:04:08.490520 32487 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0821 13:06:25.938416 32487 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 13:07:47.313575 32487 solver.cpp:404]     Test net output #0: accuracy = 0.41752\nI0821 13:07:47.313841 32487 solver.cpp:404]     Test net output #1: loss = 3.3022 (* 1 = 3.3022 loss)\nI0821 13:07:48.635160 32487 solver.cpp:228] Iteration 7600, loss = 0.0402971\nI0821 13:07:48.635205 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:07:48.635221 32487 solver.cpp:244]     Train net output #1: loss = 0.0402971 (* 1 = 0.0402971 loss)\nI0821 13:07:48.720510 32487 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0821 13:10:06.138813 32487 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 13:11:27.527768 32487 solver.cpp:404]     Test net output #0: accuracy = 0.4172\nI0821 13:11:27.528039 32487 solver.cpp:404]     Test net output #1: loss = 3.18414 (* 1 = 3.18414 loss)\nI0821 13:11:28.852658 32487 solver.cpp:228] Iteration 7700, loss = 0.0964334\nI0821 13:11:28.852694 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 13:11:28.852710 32487 solver.cpp:244]     Train net output #1: loss = 0.0964334 (* 1 = 0.0964334 loss)\nI0821 13:11:28.939329 32487 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0821 13:13:46.556051 32487 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 13:15:07.930590 32487 solver.cpp:404]     Test net output #0: accuracy = 0.41688\nI0821 13:15:07.930852 32487 solver.cpp:404]     Test net output #1: loss = 3.10979 (* 1 = 3.10979 loss)\nI0821 13:15:09.253401 32487 solver.cpp:228] Iteration 7800, loss = 0.0875922\nI0821 13:15:09.253437 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:15:09.253453 32487 solver.cpp:244]     Train net output #1: loss = 0.0875922 (* 1 = 0.0875922 loss)\nI0821 13:15:09.342924 32487 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0821 13:17:26.830196 32487 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 13:18:48.324481 32487 solver.cpp:404]     Test net output #0: accuracy = 0.47512\nI0821 13:18:48.324759 32487 solver.cpp:404]     Test net output #1: loss = 2.97729 (* 1 = 2.97729 loss)\nI0821 13:18:49.646262 32487 solver.cpp:228] Iteration 7900, loss = 0.0698449\nI0821 13:18:49.646298 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:18:49.646313 32487 solver.cpp:244]     Train net output #1: loss = 0.069845 (* 1 = 0.069845 loss)\nI0821 13:18:49.732484 32487 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0821 13:21:07.251960 32487 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 13:22:28.750969 32487 solver.cpp:404]     Test net output #0: accuracy = 0.41092\nI0821 13:22:28.751241 32487 solver.cpp:404]     Test net output #1: loss = 3.90462 (* 1 = 3.90462 loss)\nI0821 13:22:30.073539 32487 solver.cpp:228] Iteration 8000, loss = 0.18696\nI0821 13:22:30.073587 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 13:22:30.073604 32487 solver.cpp:244]     Train net output #1: loss = 0.18696 (* 1 = 0.18696 loss)\nI0821 13:22:30.157011 32487 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0821 13:24:47.574734 32487 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 13:26:09.080946 32487 solver.cpp:404]     Test net output #0: accuracy = 0.4824\nI0821 13:26:09.081217 32487 solver.cpp:404]     Test net output #1: loss = 3.05566 (* 1 = 3.05566 loss)\nI0821 13:26:10.403292 32487 solver.cpp:228] Iteration 8100, loss = 0.0689507\nI0821 13:26:10.403337 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:26:10.403355 32487 solver.cpp:244]     Train net output #1: loss = 0.0689508 (* 1 = 0.0689508 loss)\nI0821 13:26:10.492043 32487 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0821 13:28:27.959837 32487 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 13:29:49.458556 32487 solver.cpp:404]     Test net output #0: accuracy = 0.4354\nI0821 13:29:49.458822 32487 solver.cpp:404]     Test net output #1: loss = 3.33677 (* 1 = 3.33677 loss)\nI0821 13:29:50.780666 32487 solver.cpp:228] Iteration 8200, loss = 0.0420637\nI0821 13:29:50.780701 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 13:29:50.780717 32487 solver.cpp:244]     Train net output #1: loss = 0.0420637 (* 1 = 0.0420637 loss)\nI0821 13:29:50.877809 32487 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0821 13:32:08.401829 32487 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 13:33:29.888507 32487 solver.cpp:404]     Test net output #0: accuracy = 0.2994\nI0821 13:33:29.888787 32487 solver.cpp:404]     Test net output #1: loss = 5.97372 (* 1 = 5.97372 loss)\nI0821 13:33:31.210927 32487 solver.cpp:228] Iteration 8300, loss = 0.10948\nI0821 13:33:31.210963 32487 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 13:33:31.210979 32487 solver.cpp:244]     Train net output #1: loss = 0.10948 (* 1 = 0.10948 loss)\nI0821 13:33:31.302038 32487 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0821 13:35:48.783160 32487 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 13:37:10.279812 32487 solver.cpp:404]     Test net output #0: accuracy = 0.49464\nI0821 13:37:10.280076 32487 solver.cpp:404]     Test net output #1: loss = 2.9104 (* 1 = 2.9104 loss)\nI0821 13:37:11.601953 32487 solver.cpp:228] Iteration 8400, loss = 0.0523199\nI0821 13:37:11.601999 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:37:11.602015 32487 solver.cpp:244]     Train net output #1: loss = 0.0523199 (* 1 = 0.0523199 loss)\nI0821 13:37:11.692867 32487 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0821 13:39:29.092538 32487 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 13:40:50.579036 32487 solver.cpp:404]     Test net output #0: accuracy = 0.49608\nI0821 13:40:50.579304 32487 solver.cpp:404]     Test net output #1: loss = 2.99962 (* 1 = 2.99962 loss)\nI0821 13:40:51.901878 32487 solver.cpp:228] Iteration 8500, loss = 0.0488531\nI0821 13:40:51.901914 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:40:51.901929 32487 solver.cpp:244]     Train net output #1: loss = 0.0488531 (* 1 = 0.0488531 loss)\nI0821 13:40:51.992298 32487 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0821 13:43:09.390178 32487 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 13:44:30.898653 32487 solver.cpp:404]     Test net output #0: accuracy = 0.415\nI0821 13:44:30.898929 32487 solver.cpp:404]     Test net output #1: loss = 3.89909 (* 1 = 3.89909 loss)\nI0821 13:44:32.222200 32487 solver.cpp:228] Iteration 8600, loss = 0.0801311\nI0821 13:44:32.222245 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 13:44:32.222262 32487 solver.cpp:244]     Train net output #1: loss = 0.0801312 (* 1 = 0.0801312 loss)\nI0821 13:44:32.303908 32487 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0821 13:46:49.730144 32487 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 13:48:11.232744 32487 solver.cpp:404]     Test net output #0: accuracy = 0.45416\nI0821 13:48:11.233008 32487 solver.cpp:404]     Test net output #1: loss = 3.63539 (* 1 = 3.63539 loss)\nI0821 13:48:12.555696 32487 solver.cpp:228] Iteration 8700, loss = 0.10912\nI0821 13:48:12.555730 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 13:48:12.555745 32487 solver.cpp:244]     Train net output #1: loss = 0.10912 (* 1 = 0.10912 loss)\nI0821 13:48:12.634346 32487 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0821 13:50:29.987812 32487 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 13:51:51.490680 32487 solver.cpp:404]     Test net output #0: accuracy = 0.49292\nI0821 13:51:51.490949 32487 solver.cpp:404]     Test net output #1: loss = 2.86966 (* 1 = 2.86966 loss)\nI0821 13:51:52.813993 32487 solver.cpp:228] Iteration 8800, loss = 0.0448736\nI0821 13:51:52.814029 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:51:52.814044 32487 solver.cpp:244]     Train net output #1: loss = 0.0448737 (* 1 = 0.0448737 loss)\nI0821 13:51:52.893164 32487 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0821 13:54:10.276661 32487 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 13:55:31.780388 32487 solver.cpp:404]     Test net output #0: accuracy = 0.45368\nI0821 13:55:31.780661 32487 solver.cpp:404]     Test net output #1: loss = 2.89127 (* 1 = 2.89127 loss)\nI0821 13:55:33.102300 32487 solver.cpp:228] Iteration 8900, loss = 0.0519904\nI0821 13:55:33.102344 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:55:33.102361 32487 solver.cpp:244]     Train net output #1: loss = 0.0519904 (* 1 = 0.0519904 loss)\nI0821 13:55:33.191828 32487 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0821 13:57:50.578593 32487 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 13:59:12.077426 32487 solver.cpp:404]     Test net output #0: accuracy = 0.47924\nI0821 13:59:12.077706 32487 solver.cpp:404]     Test net output #1: loss = 2.68225 (* 1 = 2.68225 loss)\nI0821 13:59:13.399514 32487 solver.cpp:228] Iteration 9000, loss = 0.0452964\nI0821 13:59:13.399561 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:59:13.399577 32487 solver.cpp:244]     Train net output #1: loss = 0.0452964 (* 1 = 0.0452964 loss)\nI0821 13:59:13.485766 32487 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0821 14:01:30.823504 32487 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 14:02:52.325047 32487 solver.cpp:404]     Test net output #0: accuracy = 0.35768\nI0821 14:02:52.325322 32487 solver.cpp:404]     Test net output #1: loss = 4.15049 (* 1 = 4.15049 loss)\nI0821 14:02:53.647656 32487 solver.cpp:228] Iteration 9100, loss = 0.0204112\nI0821 14:02:53.647699 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 14:02:53.647716 32487 solver.cpp:244]     Train net output #1: loss = 0.0204112 (* 1 = 0.0204112 loss)\nI0821 14:02:53.737715 32487 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0821 14:05:11.227414 32487 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 14:06:32.716471 32487 solver.cpp:404]     Test net output #0: accuracy = 0.49556\nI0821 14:06:32.716749 32487 solver.cpp:404]     Test net output #1: loss = 2.50652 (* 1 = 2.50652 loss)\nI0821 14:06:34.039501 32487 solver.cpp:228] Iteration 9200, loss = 0.0407061\nI0821 14:06:34.039543 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:06:34.039566 32487 solver.cpp:244]     Train net output #1: loss = 0.0407061 (* 1 = 0.0407061 loss)\nI0821 14:06:34.127601 32487 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0821 14:08:51.606179 32487 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 14:10:13.093889 32487 solver.cpp:404]     Test net output #0: accuracy = 0.5552\nI0821 14:10:13.094172 32487 solver.cpp:404]     Test net output #1: loss = 2.30739 (* 1 = 2.30739 loss)\nI0821 14:10:14.417131 32487 solver.cpp:228] Iteration 9300, loss = 0.137464\nI0821 14:10:14.417174 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 14:10:14.417191 32487 solver.cpp:244]     Train net output #1: loss = 0.137464 (* 1 = 0.137464 loss)\nI0821 14:10:14.497251 32487 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0821 14:12:31.984678 32487 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 14:13:53.493413 32487 solver.cpp:404]     Test net output #0: accuracy = 0.50388\nI0821 14:13:53.493680 32487 solver.cpp:404]     Test net output #1: loss = 2.8056 (* 1 = 2.8056 loss)\nI0821 14:13:54.815065 32487 solver.cpp:228] Iteration 9400, loss = 0.0502911\nI0821 14:13:54.815110 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:13:54.815127 32487 solver.cpp:244]     Train net output #1: loss = 0.0502911 (* 1 = 0.0502911 loss)\nI0821 14:13:54.896837 32487 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0821 14:16:12.320529 32487 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 14:17:33.807267 32487 solver.cpp:404]     Test net output #0: accuracy = 0.43444\nI0821 14:17:33.807534 32487 solver.cpp:404]     Test net output #1: loss = 3.30079 (* 1 = 3.30079 loss)\nI0821 14:17:35.130733 32487 solver.cpp:228] Iteration 9500, loss = 0.0911927\nI0821 14:17:35.130767 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 14:17:35.130784 32487 solver.cpp:244]     Train net output #1: loss = 0.0911926 (* 1 = 0.0911926 loss)\nI0821 14:17:35.219414 32487 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0821 14:19:52.756952 32487 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 14:21:14.255961 32487 solver.cpp:404]     Test net output #0: accuracy = 0.41576\nI0821 14:21:14.256222 32487 solver.cpp:404]     Test net output #1: loss = 3.75359 (* 1 = 3.75359 loss)\nI0821 14:21:15.579500 32487 solver.cpp:228] Iteration 9600, loss = 0.0520659\nI0821 14:21:15.579536 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:21:15.579556 32487 solver.cpp:244]     Train net output #1: loss = 0.0520658 (* 1 = 0.0520658 loss)\nI0821 14:21:15.664892 32487 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0821 14:23:33.092710 32487 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 14:24:54.578059 32487 solver.cpp:404]     Test net output #0: accuracy = 0.46712\nI0821 14:24:54.578330 32487 solver.cpp:404]     Test net output #1: loss = 3.12188 (* 1 = 3.12188 loss)\nI0821 14:24:55.900861 32487 solver.cpp:228] Iteration 9700, loss = 0.0523044\nI0821 14:24:55.900905 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 14:24:55.900923 32487 solver.cpp:244]     Train net output #1: loss = 0.0523043 (* 1 = 0.0523043 loss)\nI0821 14:24:55.982200 32487 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0821 14:27:13.477874 32487 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 14:28:34.960052 32487 solver.cpp:404]     Test net output #0: accuracy = 0.4672\nI0821 14:28:34.960302 32487 solver.cpp:404]     Test net output #1: loss = 3.09233 (* 1 = 3.09233 loss)\nI0821 14:28:36.283110 32487 solver.cpp:228] Iteration 9800, loss = 0.0612964\nI0821 14:28:36.283155 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:28:36.283171 32487 solver.cpp:244]     Train net output #1: loss = 0.0612964 (* 1 = 0.0612964 loss)\nI0821 14:28:36.367180 32487 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0821 14:30:53.886087 32487 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 14:32:15.374792 32487 solver.cpp:404]     Test net output #0: accuracy = 0.52876\nI0821 14:32:15.375064 32487 solver.cpp:404]     Test net output #1: loss = 2.56824 (* 1 = 2.56824 loss)\nI0821 14:32:16.696665 32487 solver.cpp:228] Iteration 9900, loss = 0.0560783\nI0821 14:32:16.696707 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:32:16.696723 32487 solver.cpp:244]     Train net output #1: loss = 0.0560783 (* 1 = 0.0560783 loss)\nI0821 14:32:16.783133 32487 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0821 14:34:34.332180 32487 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 14:35:55.789419 32487 solver.cpp:404]     Test net output #0: accuracy = 0.48436\nI0821 14:35:55.789686 32487 solver.cpp:404]     Test net output #1: loss = 2.69128 (* 1 = 2.69128 loss)\nI0821 14:35:57.110551 32487 solver.cpp:228] Iteration 10000, loss = 0.0547146\nI0821 14:35:57.110595 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:35:57.110612 32487 solver.cpp:244]     Train net output #1: loss = 0.0547146 (* 1 = 0.0547146 loss)\nI0821 14:35:57.200073 32487 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0821 14:38:14.762397 32487 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0821 14:39:36.196480 32487 solver.cpp:404]     Test net output #0: accuracy = 0.45904\nI0821 14:39:36.196725 32487 solver.cpp:404]     Test net output #1: loss = 3.25695 (* 1 = 3.25695 loss)\nI0821 14:39:37.517745 32487 solver.cpp:228] Iteration 10100, loss = 0.0703904\nI0821 14:39:37.517779 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:39:37.517796 32487 solver.cpp:244]     Train net output #1: loss = 0.0703903 (* 1 = 0.0703903 loss)\nI0821 14:39:37.607228 32487 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0821 14:41:55.050916 32487 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0821 14:43:16.481454 32487 solver.cpp:404]     Test net output #0: accuracy = 0.47612\nI0821 14:43:16.481726 32487 solver.cpp:404]     Test net output #1: loss = 3.22269 (* 1 = 3.22269 loss)\nI0821 14:43:17.802867 32487 solver.cpp:228] Iteration 10200, loss = 0.0456973\nI0821 14:43:17.802913 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:43:17.802929 32487 solver.cpp:244]     Train net output #1: loss = 0.0456972 (* 1 = 0.0456972 loss)\nI0821 14:43:17.888355 32487 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0821 14:45:35.430598 32487 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0821 14:46:56.865991 32487 solver.cpp:404]     Test net output #0: accuracy = 0.51492\nI0821 14:46:56.866266 32487 solver.cpp:404]     Test net output #1: loss = 3.19398 (* 1 = 3.19398 loss)\nI0821 14:46:58.187782 32487 solver.cpp:228] Iteration 10300, loss = 0.0288385\nI0821 14:46:58.187818 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:46:58.187834 32487 solver.cpp:244]     Train net output #1: loss = 0.0288385 (* 1 = 0.0288385 loss)\nI0821 14:46:58.273975 32487 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0821 14:49:15.713896 32487 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0821 14:50:37.153493 32487 solver.cpp:404]     Test net output #0: accuracy = 0.42232\nI0821 14:50:37.153753 32487 solver.cpp:404]     Test net output #1: loss = 3.73944 (* 1 = 3.73944 loss)\nI0821 14:50:38.475077 32487 solver.cpp:228] Iteration 10400, loss = 0.0831717\nI0821 14:50:38.475122 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 14:50:38.475139 32487 solver.cpp:244]     Train net output #1: loss = 0.0831716 (* 1 = 0.0831716 loss)\nI0821 14:50:38.559427 32487 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0821 14:52:55.937081 32487 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0821 14:54:17.371515 32487 solver.cpp:404]     Test net output #0: accuracy = 0.54168\nI0821 14:54:17.371785 32487 solver.cpp:404]     Test net output #1: loss = 2.35215 (* 1 = 2.35215 loss)\nI0821 14:54:18.694515 32487 solver.cpp:228] Iteration 10500, loss = 0.0652215\nI0821 14:54:18.694551 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:54:18.694567 32487 solver.cpp:244]     Train net output #1: loss = 0.0652214 (* 1 = 0.0652214 loss)\nI0821 14:54:18.780268 32487 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0821 14:56:36.200682 32487 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0821 14:57:57.634191 32487 solver.cpp:404]     Test net output #0: accuracy = 0.54744\nI0821 14:57:57.634469 32487 solver.cpp:404]     Test net output #1: loss = 2.59194 (* 1 = 2.59194 loss)\nI0821 14:57:58.957170 32487 solver.cpp:228] Iteration 10600, loss = 0.0188173\nI0821 14:57:58.957204 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 14:57:58.957221 32487 solver.cpp:244]     Train net output #1: loss = 0.0188172 (* 1 = 0.0188172 loss)\nI0821 14:57:59.041826 32487 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0821 15:00:16.574488 32487 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0821 15:01:37.910919 32487 solver.cpp:404]     Test net output #0: accuracy = 0.56636\nI0821 15:01:37.911178 32487 solver.cpp:404]     Test net output #1: loss = 2.28964 (* 1 = 2.28964 loss)\nI0821 15:01:39.232388 32487 solver.cpp:228] Iteration 10700, loss = 0.136536\nI0821 15:01:39.232432 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 15:01:39.232450 32487 solver.cpp:244]     Train net output #1: loss = 0.136536 (* 1 = 0.136536 loss)\nI0821 15:01:39.317932 32487 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0821 15:03:57.023185 32487 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0821 15:05:18.349198 32487 solver.cpp:404]     Test net output #0: accuracy = 0.55352\nI0821 15:05:18.349438 32487 solver.cpp:404]     Test net output #1: loss = 2.47399 (* 1 = 2.47399 loss)\nI0821 15:05:19.670609 32487 solver.cpp:228] Iteration 10800, loss = 0.221673\nI0821 15:05:19.670653 32487 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 15:05:19.670670 32487 solver.cpp:244]     Train net output #1: loss = 0.221673 (* 1 = 0.221673 loss)\nI0821 15:05:19.754200 32487 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0821 15:07:37.240067 32487 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0821 15:08:58.568069 32487 solver.cpp:404]     Test net output #0: accuracy = 0.45592\nI0821 15:08:58.568346 32487 solver.cpp:404]     Test net output #1: loss = 3.37256 (* 1 = 3.37256 loss)\nI0821 15:08:59.890000 32487 solver.cpp:228] Iteration 10900, loss = 0.0770623\nI0821 15:08:59.890040 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:08:59.890056 32487 solver.cpp:244]     Train net output #1: loss = 0.0770622 (* 1 = 0.0770622 loss)\nI0821 15:08:59.975778 32487 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0821 15:11:17.570322 32487 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0821 15:12:38.890411 32487 solver.cpp:404]     Test net output #0: accuracy = 0.37084\nI0821 15:12:38.890686 32487 solver.cpp:404]     Test net output #1: loss = 4.88286 (* 1 = 4.88286 loss)\nI0821 15:12:40.211936 32487 solver.cpp:228] Iteration 11000, loss = 0.0749317\nI0821 15:12:40.211977 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:12:40.211993 32487 solver.cpp:244]     Train net output #1: loss = 0.0749316 (* 1 = 0.0749316 loss)\nI0821 15:12:40.304517 32487 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0821 15:14:57.804540 32487 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0821 15:16:19.127310 32487 solver.cpp:404]     Test net output #0: accuracy = 0.4028\nI0821 15:16:19.127588 32487 solver.cpp:404]     Test net output #1: loss = 4.597 (* 1 = 4.597 loss)\nI0821 15:16:20.449335 32487 solver.cpp:228] Iteration 11100, loss = 0.0553274\nI0821 15:16:20.449368 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:16:20.449383 32487 solver.cpp:244]     Train net output #1: loss = 0.0553273 (* 1 = 0.0553273 loss)\nI0821 15:16:20.532203 32487 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0821 15:18:37.938223 32487 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0821 15:19:59.249544 32487 solver.cpp:404]     Test net output #0: accuracy = 0.59556\nI0821 15:19:59.249795 32487 solver.cpp:404]     Test net output #1: loss = 2.12233 (* 1 = 2.12233 loss)\nI0821 15:20:00.571231 32487 solver.cpp:228] Iteration 11200, loss = 0.0586835\nI0821 15:20:00.571264 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:20:00.571279 32487 solver.cpp:244]     Train net output #1: loss = 0.0586833 (* 1 = 0.0586833 loss)\nI0821 15:20:00.651170 32487 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0821 15:22:18.012362 32487 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0821 15:23:39.334846 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62476\nI0821 15:23:39.335103 32487 solver.cpp:404]     Test net output #1: loss = 2.0056 (* 1 = 2.0056 loss)\nI0821 15:23:40.656263 32487 solver.cpp:228] Iteration 11300, loss = 0.0856822\nI0821 15:23:40.656301 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:23:40.656317 32487 solver.cpp:244]     Train net output #1: loss = 0.085682 (* 1 = 0.085682 loss)\nI0821 15:23:40.748363 32487 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0821 15:25:58.273190 32487 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0821 15:27:19.597100 32487 solver.cpp:404]     Test net output #0: accuracy = 0.57844\nI0821 15:27:19.597368 32487 solver.cpp:404]     Test net output #1: loss = 2.40652 (* 1 = 2.40652 loss)\nI0821 15:27:20.918620 32487 solver.cpp:228] Iteration 11400, loss = 0.0395445\nI0821 15:27:20.918653 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:27:20.918669 32487 solver.cpp:244]     Train net output #1: loss = 0.0395443 (* 1 = 0.0395443 loss)\nI0821 15:27:21.009440 32487 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0821 15:29:38.528719 32487 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0821 15:30:59.854076 32487 solver.cpp:404]     Test net output #0: accuracy = 0.55232\nI0821 15:30:59.854356 32487 solver.cpp:404]     Test net output #1: loss = 2.78856 (* 1 = 2.78856 loss)\nI0821 15:31:01.175920 32487 solver.cpp:228] Iteration 11500, loss = 0.0479925\nI0821 15:31:01.175953 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:31:01.175968 32487 solver.cpp:244]     Train net output #1: loss = 0.0479923 (* 1 = 0.0479923 loss)\nI0821 15:31:01.264351 32487 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0821 15:33:18.592161 32487 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0821 15:34:40.044003 32487 solver.cpp:404]     Test net output #0: accuracy = 0.50736\nI0821 15:34:40.044283 32487 solver.cpp:404]     Test net output #1: loss = 2.83695 (* 1 = 2.83695 loss)\nI0821 15:34:41.366253 32487 solver.cpp:228] Iteration 11600, loss = 0.0665769\nI0821 15:34:41.366288 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:34:41.366308 32487 solver.cpp:244]     Train net output #1: loss = 0.0665767 (* 1 = 0.0665767 loss)\nI0821 15:34:41.442399 32487 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0821 15:36:57.942044 32487 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0821 15:38:19.400346 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65384\nI0821 15:38:19.400606 32487 solver.cpp:404]     Test net output #1: loss = 1.64192 (* 1 = 1.64192 loss)\nI0821 15:38:20.721603 32487 solver.cpp:228] Iteration 11700, loss = 0.0401\nI0821 15:38:20.721637 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:38:20.721652 32487 solver.cpp:244]     Train net output #1: loss = 0.0400997 (* 1 = 0.0400997 loss)\nI0821 15:38:20.799592 32487 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0821 15:40:37.390449 32487 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0821 15:41:58.837363 32487 solver.cpp:404]     Test net output #0: accuracy = 0.42932\nI0821 15:41:58.837637 32487 solver.cpp:404]     Test net output #1: loss = 3.91989 (* 1 = 3.91989 loss)\nI0821 15:42:00.159026 32487 solver.cpp:228] Iteration 11800, loss = 0.0291603\nI0821 15:42:00.159061 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:42:00.159076 32487 solver.cpp:244]     Train net output #1: loss = 0.0291601 (* 1 = 0.0291601 loss)\nI0821 15:42:00.236363 32487 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0821 15:44:16.878031 32487 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0821 15:45:38.325227 32487 solver.cpp:404]     Test net output #0: accuracy = 0.46796\nI0821 15:45:38.325505 32487 solver.cpp:404]     Test net output #1: loss = 3.86998 (* 1 = 3.86998 loss)\nI0821 15:45:39.647002 32487 solver.cpp:228] Iteration 11900, loss = 0.129017\nI0821 15:45:39.647044 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 15:45:39.647060 32487 solver.cpp:244]     Train net output #1: loss = 0.129017 (* 1 = 0.129017 loss)\nI0821 15:45:39.732837 32487 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0821 15:47:56.390394 32487 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0821 15:49:17.835932 32487 solver.cpp:404]     Test net output #0: accuracy = 0.49828\nI0821 15:49:17.836213 32487 solver.cpp:404]     Test net output #1: loss = 3.24002 (* 1 = 3.24002 loss)\nI0821 15:49:19.158643 32487 solver.cpp:228] Iteration 12000, loss = 0.0552717\nI0821 15:49:19.158679 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:49:19.158695 32487 solver.cpp:244]     Train net output #1: loss = 0.0552715 (* 1 = 0.0552715 loss)\nI0821 15:49:19.232439 32487 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0821 15:51:35.948925 32487 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0821 15:52:57.472648 32487 solver.cpp:404]     Test net output #0: accuracy = 0.56008\nI0821 15:52:57.472926 32487 solver.cpp:404]     Test net output #1: loss = 3.05484 (* 1 = 3.05484 loss)\nI0821 15:52:58.794037 32487 solver.cpp:228] Iteration 12100, loss = 0.0535927\nI0821 15:52:58.794072 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:52:58.794088 32487 solver.cpp:244]     Train net output #1: loss = 0.0535924 (* 1 = 0.0535924 loss)\nI0821 15:52:58.877785 32487 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0821 15:55:15.475474 32487 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0821 15:56:36.934947 32487 solver.cpp:404]     Test net output #0: accuracy = 0.57092\nI0821 15:56:36.935210 32487 solver.cpp:404]     Test net output #1: loss = 2.57691 (* 1 = 2.57691 loss)\nI0821 15:56:38.257493 32487 solver.cpp:228] Iteration 12200, loss = 0.0514028\nI0821 15:56:38.257527 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 15:56:38.257544 32487 solver.cpp:244]     Train net output #1: loss = 0.0514025 (* 1 = 0.0514025 loss)\nI0821 15:56:38.335551 32487 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0821 15:58:54.954380 32487 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0821 16:00:17.168664 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63304\nI0821 16:00:17.168977 32487 solver.cpp:404]     Test net output #1: loss = 1.88382 (* 1 = 1.88382 loss)\nI0821 16:00:18.493938 32487 solver.cpp:228] Iteration 12300, loss = 0.0782511\nI0821 16:00:18.493978 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:00:18.493993 32487 solver.cpp:244]     Train net output #1: loss = 0.0782509 (* 1 = 0.0782509 loss)\nI0821 16:00:18.570336 32487 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0821 16:02:35.160434 32487 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0821 16:03:56.594450 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65116\nI0821 16:03:56.594723 32487 solver.cpp:404]     Test net output #1: loss = 1.82346 (* 1 = 1.82346 loss)\nI0821 16:03:57.916088 32487 solver.cpp:228] Iteration 12400, loss = 0.0452005\nI0821 16:03:57.916122 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:03:57.916138 32487 solver.cpp:244]     Train net output #1: loss = 0.0452002 (* 1 = 0.0452002 loss)\nI0821 16:03:57.994145 32487 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0821 16:06:14.611171 32487 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0821 16:07:36.042311 32487 solver.cpp:404]     Test net output #0: accuracy = 0.60944\nI0821 16:07:36.042590 32487 solver.cpp:404]     Test net output #1: loss = 2.19429 (* 1 = 2.19429 loss)\nI0821 16:07:37.364320 32487 solver.cpp:228] Iteration 12500, loss = 0.124869\nI0821 16:07:37.364369 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 16:07:37.364387 32487 solver.cpp:244]     Train net output #1: loss = 0.124869 (* 1 = 0.124869 loss)\nI0821 16:07:37.440544 32487 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0821 16:09:54.117945 32487 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0821 16:11:15.561961 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62228\nI0821 16:11:15.562240 32487 solver.cpp:404]     Test net output #1: loss = 2.18296 (* 1 = 2.18296 loss)\nI0821 16:11:16.883232 32487 solver.cpp:228] Iteration 12600, loss = 0.03812\nI0821 16:11:16.883267 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:11:16.883283 32487 solver.cpp:244]     Train net output #1: loss = 0.0381197 (* 1 = 0.0381197 loss)\nI0821 16:11:16.964357 32487 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0821 16:13:33.566439 32487 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0821 16:14:55.017262 32487 solver.cpp:404]     Test net output #0: accuracy = 0.52184\nI0821 16:14:55.017562 32487 solver.cpp:404]     Test net output #1: loss = 2.97728 (* 1 = 2.97728 loss)\nI0821 16:14:56.338925 32487 solver.cpp:228] Iteration 12700, loss = 0.0807373\nI0821 16:14:56.338969 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:14:56.338986 32487 solver.cpp:244]     Train net output #1: loss = 0.080737 (* 1 = 0.080737 loss)\nI0821 16:14:56.419606 32487 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0821 16:17:13.043229 32487 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0821 16:18:34.488557 32487 solver.cpp:404]     Test net output #0: accuracy = 0.57\nI0821 16:18:34.488826 32487 solver.cpp:404]     Test net output #1: loss = 2.76005 (* 1 = 2.76005 loss)\nI0821 16:18:35.810430 32487 solver.cpp:228] Iteration 12800, loss = 0.0438294\nI0821 16:18:35.810470 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:18:35.810487 32487 solver.cpp:244]     Train net output #1: loss = 0.0438292 (* 1 = 0.0438292 loss)\nI0821 16:18:35.888124 32487 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0821 16:20:52.647735 32487 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0821 16:22:14.091368 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62564\nI0821 16:22:14.091631 32487 solver.cpp:404]     Test net output #1: loss = 2.01928 (* 1 = 2.01928 loss)\nI0821 16:22:15.413147 32487 solver.cpp:228] Iteration 12900, loss = 0.0172966\nI0821 16:22:15.413180 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:22:15.413194 32487 solver.cpp:244]     Train net output #1: loss = 0.0172963 (* 1 = 0.0172963 loss)\nI0821 16:22:15.487639 32487 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0821 16:24:32.055490 32487 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0821 16:25:53.500361 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66536\nI0821 16:25:53.500623 32487 solver.cpp:404]     Test net output #1: loss = 1.6985 (* 1 = 1.6985 loss)\nI0821 16:25:54.821560 32487 solver.cpp:228] Iteration 13000, loss = 0.00699834\nI0821 16:25:54.821601 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:25:54.821617 32487 solver.cpp:244]     Train net output #1: loss = 0.00699805 (* 1 = 0.00699805 loss)\nI0821 16:25:54.898221 32487 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0821 16:28:11.610327 32487 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0821 16:29:33.061624 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62088\nI0821 16:29:33.061902 32487 solver.cpp:404]     Test net output #1: loss = 2.26469 (* 1 = 2.26469 loss)\nI0821 16:29:34.383121 32487 solver.cpp:228] Iteration 13100, loss = 0.0549341\nI0821 16:29:34.383162 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:29:34.383178 32487 solver.cpp:244]     Train net output #1: loss = 0.0549338 (* 1 = 0.0549338 loss)\nI0821 16:29:34.459071 32487 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0821 16:31:51.128445 32487 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0821 16:33:12.577970 32487 solver.cpp:404]     Test net output #0: accuracy = 0.59656\nI0821 16:33:12.578250 32487 solver.cpp:404]     Test net output #1: loss = 2.41085 (* 1 = 2.41085 loss)\nI0821 16:33:13.900023 32487 solver.cpp:228] Iteration 13200, loss = 0.0594728\nI0821 16:33:13.900056 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:33:13.900072 32487 solver.cpp:244]     Train net output #1: loss = 0.0594725 (* 1 = 0.0594725 loss)\nI0821 16:33:13.980063 32487 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0821 16:35:30.622212 32487 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0821 16:36:52.069602 32487 solver.cpp:404]     Test net output #0: accuracy = 0.61576\nI0821 16:36:52.069856 32487 solver.cpp:404]     Test net output #1: loss = 2.27061 (* 1 = 2.27061 loss)\nI0821 16:36:53.391975 32487 solver.cpp:228] Iteration 13300, loss = 0.0425132\nI0821 16:36:53.392020 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:36:53.392036 32487 solver.cpp:244]     Train net output #1: loss = 0.0425129 (* 1 = 0.0425129 loss)\nI0821 16:36:53.469974 32487 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0821 16:39:10.098871 32487 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0821 16:40:31.535115 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6596\nI0821 16:40:31.535395 32487 solver.cpp:404]     Test net output #1: loss = 1.88294 (* 1 = 1.88294 loss)\nI0821 16:40:32.856638 32487 solver.cpp:228] Iteration 13400, loss = 0.0643303\nI0821 16:40:32.856672 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:40:32.856688 32487 solver.cpp:244]     Train net output #1: loss = 0.06433 (* 1 = 0.06433 loss)\nI0821 16:40:32.937402 32487 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0821 16:42:49.556470 32487 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0821 16:44:10.995445 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64604\nI0821 16:44:10.995714 32487 solver.cpp:404]     Test net output #1: loss = 2.09093 (* 1 = 2.09093 loss)\nI0821 16:44:12.316762 32487 solver.cpp:228] Iteration 13500, loss = 0.0593076\nI0821 16:44:12.316795 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:44:12.316812 32487 solver.cpp:244]     Train net output #1: loss = 0.0593074 (* 1 = 0.0593074 loss)\nI0821 16:44:12.395812 32487 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0821 16:46:29.123792 32487 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0821 16:47:50.559593 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6282\nI0821 16:47:50.559857 32487 solver.cpp:404]     Test net output #1: loss = 2.13138 (* 1 = 2.13138 loss)\nI0821 16:47:51.881413 32487 solver.cpp:228] Iteration 13600, loss = 0.135545\nI0821 16:47:51.881448 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 16:47:51.881463 32487 solver.cpp:244]     Train net output #1: loss = 0.135545 (* 1 = 0.135545 loss)\nI0821 16:47:51.960932 32487 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0821 16:50:08.578199 32487 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0821 16:51:30.012681 32487 solver.cpp:404]     Test net output #0: accuracy = 0.642\nI0821 16:51:30.012941 32487 solver.cpp:404]     Test net output #1: loss = 2.05956 (* 1 = 2.05956 loss)\nI0821 16:51:31.334619 32487 solver.cpp:228] Iteration 13700, loss = 0.132644\nI0821 16:51:31.334658 32487 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 16:51:31.334674 32487 solver.cpp:244]     Train net output #1: loss = 0.132644 (* 1 = 0.132644 loss)\nI0821 16:51:31.410625 32487 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0821 16:53:48.041479 32487 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0821 16:55:09.476711 32487 solver.cpp:404]     Test net output #0: accuracy = 0.628\nI0821 16:55:09.476984 32487 solver.cpp:404]     Test net output #1: loss = 1.97243 (* 1 = 1.97243 loss)\nI0821 16:55:10.797618 32487 solver.cpp:228] Iteration 13800, loss = 0.048832\nI0821 16:55:10.797662 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:55:10.797678 32487 solver.cpp:244]     Train net output #1: loss = 0.0488316 (* 1 = 0.0488316 loss)\nI0821 16:55:10.871268 32487 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0821 16:57:27.549525 32487 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0821 16:58:48.974472 32487 solver.cpp:404]     Test net output #0: accuracy = 0.645\nI0821 16:58:48.974731 32487 solver.cpp:404]     Test net output #1: loss = 1.83955 (* 1 = 1.83955 loss)\nI0821 16:58:50.296912 32487 solver.cpp:228] Iteration 13900, loss = 0.103027\nI0821 16:58:50.296947 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:58:50.296962 32487 solver.cpp:244]     Train net output #1: loss = 0.103026 (* 1 = 0.103026 loss)\nI0821 16:58:50.378952 32487 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0821 17:01:06.998031 32487 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0821 17:02:28.429397 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63008\nI0821 17:02:28.429664 32487 solver.cpp:404]     Test net output #1: loss = 2.17316 (* 1 = 2.17316 loss)\nI0821 17:02:29.751561 32487 solver.cpp:228] Iteration 14000, loss = 0.0810931\nI0821 17:02:29.751605 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 17:02:29.751621 32487 solver.cpp:244]     Train net output #1: loss = 0.0810928 (* 1 = 0.0810928 loss)\nI0821 17:02:29.829896 32487 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0821 17:04:46.673540 32487 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0821 17:06:08.132694 32487 solver.cpp:404]     Test net output #0: accuracy = 0.60732\nI0821 17:06:08.132975 32487 solver.cpp:404]     Test net output #1: loss = 2.39634 (* 1 = 2.39634 loss)\nI0821 17:06:09.455049 32487 solver.cpp:228] Iteration 14100, loss = 0.0151722\nI0821 17:06:09.455096 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:06:09.455118 32487 solver.cpp:244]     Train net output #1: loss = 0.0151718 (* 1 = 0.0151718 loss)\nI0821 17:06:09.540441 32487 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0821 17:08:26.388762 32487 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0821 17:09:47.824524 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65668\nI0821 17:09:47.824807 32487 solver.cpp:404]     Test net output #1: loss = 1.81929 (* 1 = 1.81929 loss)\nI0821 17:09:49.145858 32487 solver.cpp:228] Iteration 14200, loss = 0.0495982\nI0821 17:09:49.145905 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:09:49.145931 32487 solver.cpp:244]     Train net output #1: loss = 0.0495979 (* 1 = 0.0495979 loss)\nI0821 17:09:49.233155 32487 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0821 17:12:06.079195 32487 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0821 17:13:27.517546 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67456\nI0821 17:13:27.517822 32487 solver.cpp:404]     Test net output #1: loss = 1.72737 (* 1 = 1.72737 loss)\nI0821 17:13:28.839779 32487 solver.cpp:228] Iteration 14300, loss = 0.0591663\nI0821 17:13:28.839825 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:13:28.839848 32487 solver.cpp:244]     Train net output #1: loss = 0.0591659 (* 1 = 0.0591659 loss)\nI0821 17:13:28.916188 32487 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0821 17:15:45.796706 32487 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0821 17:17:07.111977 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64592\nI0821 17:17:07.112267 32487 solver.cpp:404]     Test net output #1: loss = 2.02799 (* 1 = 2.02799 loss)\nI0821 17:17:08.434360 32487 solver.cpp:228] Iteration 14400, loss = 0.0701566\nI0821 17:17:08.434406 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 17:17:08.434429 32487 solver.cpp:244]     Train net output #1: loss = 0.0701563 (* 1 = 0.0701563 loss)\nI0821 17:17:08.510794 32487 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0821 17:19:25.429663 32487 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0821 17:20:46.742296 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70544\nI0821 17:20:46.742564 32487 solver.cpp:404]     Test net output #1: loss = 1.69133 (* 1 = 1.69133 loss)\nI0821 17:20:48.063913 32487 solver.cpp:228] Iteration 14500, loss = 0.05904\nI0821 17:20:48.063961 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:20:48.063984 32487 solver.cpp:244]     Train net output #1: loss = 0.0590396 (* 1 = 0.0590396 loss)\nI0821 17:20:48.149607 32487 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0821 17:23:04.967674 32487 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0821 17:24:26.275231 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68236\nI0821 17:24:26.275532 32487 solver.cpp:404]     Test net output #1: loss = 1.67131 (* 1 = 1.67131 loss)\nI0821 17:24:27.597980 32487 solver.cpp:228] Iteration 14600, loss = 0.0817296\nI0821 17:24:27.598028 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 17:24:27.598052 32487 solver.cpp:244]     Train net output #1: loss = 0.0817292 (* 1 = 0.0817292 loss)\nI0821 17:24:27.672469 32487 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0821 17:26:44.468133 32487 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0821 17:28:05.776181 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6206\nI0821 17:28:05.776471 32487 solver.cpp:404]     Test net output #1: loss = 2.31217 (* 1 = 2.31217 loss)\nI0821 17:28:07.097621 32487 solver.cpp:228] Iteration 14700, loss = 0.0448789\nI0821 17:28:07.097666 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:28:07.097689 32487 solver.cpp:244]     Train net output #1: loss = 0.0448785 (* 1 = 0.0448785 loss)\nI0821 17:28:07.180181 32487 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0821 17:30:23.985553 32487 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0821 17:31:45.287542 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69472\nI0821 17:31:45.287824 32487 solver.cpp:404]     Test net output #1: loss = 1.70216 (* 1 = 1.70216 loss)\nI0821 17:31:46.608608 32487 solver.cpp:228] Iteration 14800, loss = 0.038551\nI0821 17:31:46.608654 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:31:46.608678 32487 solver.cpp:244]     Train net output #1: loss = 0.0385505 (* 1 = 0.0385505 loss)\nI0821 17:31:46.683428 32487 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0821 17:34:03.459791 32487 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0821 17:35:24.761873 32487 solver.cpp:404]     Test net output #0: accuracy = 0.5792\nI0821 17:35:24.762156 32487 solver.cpp:404]     Test net output #1: loss = 2.67648 (* 1 = 2.67648 loss)\nI0821 17:35:26.083205 32487 solver.cpp:228] Iteration 14900, loss = 0.0433166\nI0821 17:35:26.083251 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:35:26.083281 32487 solver.cpp:244]     Train net output #1: loss = 0.0433161 (* 1 = 0.0433161 loss)\nI0821 17:35:26.167740 32487 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0821 17:37:43.202365 32487 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0821 17:39:04.516993 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65928\nI0821 17:39:04.517277 32487 solver.cpp:404]     Test net output #1: loss = 2.07206 (* 1 = 2.07206 loss)\nI0821 17:39:05.838766 32487 solver.cpp:228] Iteration 15000, loss = 0.0595328\nI0821 17:39:05.838809 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:39:05.838832 32487 solver.cpp:244]     Train net output #1: loss = 0.0595323 (* 1 = 0.0595323 loss)\nI0821 17:39:05.921536 32487 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0821 17:41:22.888154 32487 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0821 17:42:44.203485 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68396\nI0821 17:42:44.203771 32487 solver.cpp:404]     Test net output #1: loss = 1.79266 (* 1 = 1.79266 loss)\nI0821 17:42:45.527439 32487 solver.cpp:228] Iteration 15100, loss = 0.0809089\nI0821 17:42:45.527482 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:42:45.527505 32487 solver.cpp:244]     Train net output #1: loss = 0.0809085 (* 1 = 0.0809085 loss)\nI0821 17:42:45.607704 32487 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0821 17:45:02.523118 32487 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0821 17:46:23.831773 32487 solver.cpp:404]     Test net output #0: accuracy = 0.61764\nI0821 17:46:23.832056 32487 solver.cpp:404]     Test net output #1: loss = 2.06046 (* 1 = 2.06046 loss)\nI0821 17:46:25.153352 32487 solver.cpp:228] Iteration 15200, loss = 0.0439543\nI0821 17:46:25.153395 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:46:25.153419 32487 solver.cpp:244]     Train net output #1: loss = 0.0439539 (* 1 = 0.0439539 loss)\nI0821 17:46:25.231359 32487 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0821 17:48:42.038794 32487 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0821 17:50:03.450717 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68024\nI0821 17:50:03.450994 32487 solver.cpp:404]     Test net output #1: loss = 1.88546 (* 1 = 1.88546 loss)\nI0821 17:50:04.772066 32487 solver.cpp:228] Iteration 15300, loss = 0.0212851\nI0821 17:50:04.772110 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:50:04.772135 32487 solver.cpp:244]     Train net output #1: loss = 0.0212847 (* 1 = 0.0212847 loss)\nI0821 17:50:04.860455 32487 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0821 17:52:22.051599 32487 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0821 17:53:43.494031 32487 solver.cpp:404]     Test net output #0: accuracy = 0.5634\nI0821 17:53:43.494320 32487 solver.cpp:404]     Test net output #1: loss = 3.40472 (* 1 = 3.40472 loss)\nI0821 17:53:44.815487 32487 solver.cpp:228] Iteration 15400, loss = 0.0197504\nI0821 17:53:44.815533 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:53:44.815557 32487 solver.cpp:244]     Train net output #1: loss = 0.01975 (* 1 = 0.01975 loss)\nI0821 17:53:44.892174 32487 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0821 17:56:01.935140 32487 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0821 17:57:23.402706 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68744\nI0821 17:57:23.402977 32487 solver.cpp:404]     Test net output #1: loss = 1.69094 (* 1 = 1.69094 loss)\nI0821 17:57:24.724094 32487 solver.cpp:228] Iteration 15500, loss = 0.0850152\nI0821 17:57:24.724140 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 17:57:24.724164 32487 solver.cpp:244]     Train net output #1: loss = 0.0850148 (* 1 = 0.0850148 loss)\nI0821 17:57:24.800168 32487 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0821 17:59:41.746343 32487 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0821 18:01:03.205538 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68308\nI0821 18:01:03.205806 32487 solver.cpp:404]     Test net output #1: loss = 1.65678 (* 1 = 1.65678 loss)\nI0821 18:01:04.527137 32487 solver.cpp:228] Iteration 15600, loss = 0.037237\nI0821 18:01:04.527184 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:01:04.527206 32487 solver.cpp:244]     Train net output #1: loss = 0.0372366 (* 1 = 0.0372366 loss)\nI0821 18:01:04.611420 32487 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0821 18:03:21.541980 32487 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0821 18:04:43.002058 32487 solver.cpp:404]     Test net output #0: accuracy = 0.57664\nI0821 18:04:43.002323 32487 solver.cpp:404]     Test net output #1: loss = 3.17026 (* 1 = 3.17026 loss)\nI0821 18:04:44.324494 32487 solver.cpp:228] Iteration 15700, loss = 0.0702061\nI0821 18:04:44.324542 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 18:04:44.324565 32487 solver.cpp:244]     Train net output #1: loss = 0.0702058 (* 1 = 0.0702058 loss)\nI0821 18:04:44.402650 32487 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0821 18:07:01.296169 32487 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0821 18:08:22.762351 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64244\nI0821 18:08:22.762686 32487 solver.cpp:404]     Test net output #1: loss = 2.14152 (* 1 = 2.14152 loss)\nI0821 18:08:24.084656 32487 solver.cpp:228] Iteration 15800, loss = 0.0627697\nI0821 18:08:24.084702 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:08:24.084727 32487 solver.cpp:244]     Train net output #1: loss = 0.0627693 (* 1 = 0.0627693 loss)\nI0821 18:08:24.166363 32487 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0821 18:10:40.922780 32487 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0821 18:12:02.384909 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65112\nI0821 18:12:02.385186 32487 solver.cpp:404]     Test net output #1: loss = 1.87038 (* 1 = 1.87038 loss)\nI0821 18:12:03.708065 32487 solver.cpp:228] Iteration 15900, loss = 0.0691715\nI0821 18:12:03.708111 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:12:03.708137 32487 solver.cpp:244]     Train net output #1: loss = 0.0691711 (* 1 = 0.0691711 loss)\nI0821 18:12:03.781867 32487 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0821 18:14:20.450743 32487 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0821 18:15:41.917321 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64324\nI0821 18:15:41.917600 32487 solver.cpp:404]     Test net output #1: loss = 2.065 (* 1 = 2.065 loss)\nI0821 18:15:43.239658 32487 solver.cpp:228] Iteration 16000, loss = 0.0517775\nI0821 18:15:43.239706 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:15:43.239730 32487 solver.cpp:244]     Train net output #1: loss = 0.0517772 (* 1 = 0.0517772 loss)\nI0821 18:15:43.319483 32487 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0821 18:18:00.047154 32487 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0821 18:19:21.514973 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67188\nI0821 18:19:21.515250 32487 solver.cpp:404]     Test net output #1: loss = 1.81009 (* 1 = 1.81009 loss)\nI0821 18:19:22.837529 32487 solver.cpp:228] Iteration 16100, loss = 0.0526661\nI0821 18:19:22.837576 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:19:22.837600 32487 solver.cpp:244]     Train net output #1: loss = 0.0526658 (* 1 = 0.0526658 loss)\nI0821 18:19:22.914700 32487 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0821 18:21:39.583258 32487 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0821 18:23:01.049167 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69488\nI0821 18:23:01.049456 32487 solver.cpp:404]     Test net output #1: loss = 1.53255 (* 1 = 1.53255 loss)\nI0821 18:23:02.372526 32487 solver.cpp:228] Iteration 16200, loss = 0.0475846\nI0821 18:23:02.372565 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:23:02.372586 32487 solver.cpp:244]     Train net output #1: loss = 0.0475843 (* 1 = 0.0475843 loss)\nI0821 18:23:02.453197 32487 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0821 18:25:19.189355 32487 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0821 18:26:40.654026 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7064\nI0821 18:26:40.654314 32487 solver.cpp:404]     Test net output #1: loss = 1.56063 (* 1 = 1.56063 loss)\nI0821 18:26:41.976331 32487 solver.cpp:228] Iteration 16300, loss = 0.032844\nI0821 18:26:41.976379 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:26:41.976402 32487 solver.cpp:244]     Train net output #1: loss = 0.0328437 (* 1 = 0.0328437 loss)\nI0821 18:26:42.053594 32487 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0821 18:28:58.716164 32487 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0821 18:30:20.190007 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6876\nI0821 18:30:20.190296 32487 solver.cpp:404]     Test net output #1: loss = 1.66721 (* 1 = 1.66721 loss)\nI0821 18:30:21.512439 32487 solver.cpp:228] Iteration 16400, loss = 0.0622571\nI0821 18:30:21.512475 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:30:21.512490 32487 solver.cpp:244]     Train net output #1: loss = 0.0622568 (* 1 = 0.0622568 loss)\nI0821 18:30:21.593307 32487 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0821 18:32:38.256368 32487 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0821 18:33:59.713006 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68528\nI0821 18:33:59.713275 32487 solver.cpp:404]     Test net output #1: loss = 1.6064 (* 1 = 1.6064 loss)\nI0821 18:34:01.034952 32487 solver.cpp:228] Iteration 16500, loss = 0.010242\nI0821 18:34:01.034987 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:34:01.035001 32487 solver.cpp:244]     Train net output #1: loss = 0.0102417 (* 1 = 0.0102417 loss)\nI0821 18:34:01.110296 32487 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0821 18:36:17.807327 32487 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0821 18:37:39.270579 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67132\nI0821 18:37:39.270856 32487 solver.cpp:404]     Test net output #1: loss = 1.84657 (* 1 = 1.84657 loss)\nI0821 18:37:40.593221 32487 solver.cpp:228] Iteration 16600, loss = 0.0878017\nI0821 18:37:40.593263 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:37:40.593281 32487 solver.cpp:244]     Train net output #1: loss = 0.0878013 (* 1 = 0.0878013 loss)\nI0821 18:37:40.671835 32487 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0821 18:39:57.202742 32487 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0821 18:41:18.675559 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68456\nI0821 18:41:18.675815 32487 solver.cpp:404]     Test net output #1: loss = 1.76069 (* 1 = 1.76069 loss)\nI0821 18:41:19.997088 32487 solver.cpp:228] Iteration 16700, loss = 0.0913283\nI0821 18:41:19.997134 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 18:41:19.997151 32487 solver.cpp:244]     Train net output #1: loss = 0.0913279 (* 1 = 0.0913279 loss)\nI0821 18:41:20.077029 32487 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0821 18:43:36.724180 32487 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0821 18:44:58.176523 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64356\nI0821 18:44:58.176786 32487 solver.cpp:404]     Test net output #1: loss = 1.9603 (* 1 = 1.9603 loss)\nI0821 18:44:59.498466 32487 solver.cpp:228] Iteration 16800, loss = 0.0365359\nI0821 18:44:59.498502 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:44:59.498517 32487 solver.cpp:244]     Train net output #1: loss = 0.0365355 (* 1 = 0.0365355 loss)\nI0821 18:44:59.571151 32487 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0821 18:47:16.239207 32487 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0821 18:48:37.689988 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67016\nI0821 18:48:37.690286 32487 solver.cpp:404]     Test net output #1: loss = 1.67306 (* 1 = 1.67306 loss)\nI0821 18:48:39.012094 32487 solver.cpp:228] Iteration 16900, loss = 0.0829429\nI0821 18:48:39.012140 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:48:39.012157 32487 solver.cpp:244]     Train net output #1: loss = 0.0829426 (* 1 = 0.0829426 loss)\nI0821 18:48:39.090314 32487 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0821 18:50:55.745591 32487 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0821 18:52:17.206032 32487 solver.cpp:404]     Test net output #0: accuracy = 0.59568\nI0821 18:52:17.206321 32487 solver.cpp:404]     Test net output #1: loss = 2.34296 (* 1 = 2.34296 loss)\nI0821 18:52:18.527787 32487 solver.cpp:228] Iteration 17000, loss = 0.0528438\nI0821 18:52:18.527824 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:52:18.527840 32487 solver.cpp:244]     Train net output #1: loss = 0.0528434 (* 1 = 0.0528434 loss)\nI0821 18:52:18.609313 32487 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0821 18:54:35.222579 32487 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0821 18:55:56.683924 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65932\nI0821 18:55:56.684178 32487 solver.cpp:404]     Test net output #1: loss = 2.0365 (* 1 = 2.0365 loss)\nI0821 18:55:58.005988 32487 solver.cpp:228] Iteration 17100, loss = 0.0295087\nI0821 18:55:58.006034 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:55:58.006052 32487 solver.cpp:244]     Train net output #1: loss = 0.0295083 (* 1 = 0.0295083 loss)\nI0821 18:55:58.082765 32487 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0821 18:58:14.657982 32487 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0821 18:59:36.118799 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65836\nI0821 18:59:36.119084 32487 solver.cpp:404]     Test net output #1: loss = 1.98021 (* 1 = 1.98021 loss)\nI0821 18:59:37.440654 32487 solver.cpp:228] Iteration 17200, loss = 0.0452958\nI0821 18:59:37.440698 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:59:37.440716 32487 solver.cpp:244]     Train net output #1: loss = 0.0452954 (* 1 = 0.0452954 loss)\nI0821 18:59:37.518618 32487 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0821 19:01:54.153961 32487 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0821 19:03:15.615612 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6062\nI0821 19:03:15.615895 32487 solver.cpp:404]     Test net output #1: loss = 2.45934 (* 1 = 2.45934 loss)\nI0821 19:03:16.937593 32487 solver.cpp:228] Iteration 17300, loss = 0.0196254\nI0821 19:03:16.937629 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:03:16.937644 32487 solver.cpp:244]     Train net output #1: loss = 0.019625 (* 1 = 0.019625 loss)\nI0821 19:03:17.019419 32487 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0821 19:05:33.673125 32487 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0821 19:06:55.131152 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62204\nI0821 19:06:55.131438 32487 solver.cpp:404]     Test net output #1: loss = 2.57674 (* 1 = 2.57674 loss)\nI0821 19:06:56.454263 32487 solver.cpp:228] Iteration 17400, loss = 0.0651898\nI0821 19:06:56.454298 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:06:56.454313 32487 solver.cpp:244]     Train net output #1: loss = 0.0651893 (* 1 = 0.0651893 loss)\nI0821 19:06:56.538569 32487 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0821 19:09:13.192304 32487 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0821 19:10:34.653847 32487 solver.cpp:404]     Test net output #0: accuracy = 0.61228\nI0821 19:10:34.654129 32487 solver.cpp:404]     Test net output #1: loss = 2.42202 (* 1 = 2.42202 loss)\nI0821 19:10:35.975463 32487 solver.cpp:228] Iteration 17500, loss = 0.0472182\nI0821 19:10:35.975497 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:10:35.975512 32487 solver.cpp:244]     Train net output #1: loss = 0.0472177 (* 1 = 0.0472177 loss)\nI0821 19:10:36.057965 32487 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0821 19:12:52.771757 32487 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0821 19:14:14.238512 32487 solver.cpp:404]     Test net output #0: accuracy = 0.59608\nI0821 19:14:14.238788 32487 solver.cpp:404]     Test net output #1: loss = 2.73292 (* 1 = 2.73292 loss)\nI0821 19:14:15.560473 32487 solver.cpp:228] Iteration 17600, loss = 0.0839102\nI0821 19:14:15.560508 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:14:15.560524 32487 solver.cpp:244]     Train net output #1: loss = 0.0839098 (* 1 = 0.0839098 loss)\nI0821 19:14:15.644248 32487 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0821 19:16:32.319095 32487 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0821 19:17:53.782941 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67248\nI0821 19:17:53.783205 32487 solver.cpp:404]     Test net output #1: loss = 1.72007 (* 1 = 1.72007 loss)\nI0821 19:17:55.104931 32487 solver.cpp:228] Iteration 17700, loss = 0.052057\nI0821 19:17:55.104966 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:17:55.104982 32487 solver.cpp:244]     Train net output #1: loss = 0.0520565 (* 1 = 0.0520565 loss)\nI0821 19:17:55.180704 32487 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0821 19:20:11.891733 32487 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0821 19:21:33.354693 32487 solver.cpp:404]     Test net output #0: accuracy = 0.59824\nI0821 19:21:33.354974 32487 solver.cpp:404]     Test net output #1: loss = 2.31184 (* 1 = 2.31184 loss)\nI0821 19:21:34.677052 32487 solver.cpp:228] Iteration 17800, loss = 0.018105\nI0821 19:21:34.677088 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:21:34.677103 32487 solver.cpp:244]     Train net output #1: loss = 0.0181045 (* 1 = 0.0181045 loss)\nI0821 19:21:34.753412 32487 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0821 19:23:51.298413 32487 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0821 19:25:12.757114 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66812\nI0821 19:25:12.757396 32487 solver.cpp:404]     Test net output #1: loss = 1.85205 (* 1 = 1.85205 loss)\nI0821 19:25:14.079747 32487 solver.cpp:228] Iteration 17900, loss = 0.0235178\nI0821 19:25:14.079792 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:25:14.079808 32487 solver.cpp:244]     Train net output #1: loss = 0.0235173 (* 1 = 0.0235173 loss)\nI0821 19:25:14.158329 32487 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0821 19:27:30.903583 32487 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0821 19:28:52.369138 32487 solver.cpp:404]     Test net output #0: accuracy = 0.59476\nI0821 19:28:52.369421 32487 solver.cpp:404]     Test net output #1: loss = 2.44729 (* 1 = 2.44729 loss)\nI0821 19:28:53.691370 32487 solver.cpp:228] Iteration 18000, loss = 0.131623\nI0821 19:28:53.691412 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:28:53.691428 32487 solver.cpp:244]     Train net output #1: loss = 0.131622 (* 1 = 0.131622 loss)\nI0821 19:28:53.768170 32487 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0821 19:31:10.496454 32487 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0821 19:32:31.856477 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67348\nI0821 19:32:31.856757 32487 solver.cpp:404]     Test net output #1: loss = 1.94015 (* 1 = 1.94015 loss)\nI0821 19:32:33.179026 32487 solver.cpp:228] Iteration 18100, loss = 0.0315578\nI0821 19:32:33.179062 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:32:33.179078 32487 solver.cpp:244]     Train net output #1: loss = 0.0315573 (* 1 = 0.0315573 loss)\nI0821 19:32:33.253165 32487 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0821 19:34:49.946404 32487 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0821 19:36:11.283501 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64664\nI0821 19:36:11.283783 32487 solver.cpp:404]     Test net output #1: loss = 1.9511 (* 1 = 1.9511 loss)\nI0821 19:36:12.605512 32487 solver.cpp:228] Iteration 18200, loss = 0.0335058\nI0821 19:36:12.605547 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:36:12.605563 32487 solver.cpp:244]     Train net output #1: loss = 0.0335053 (* 1 = 0.0335053 loss)\nI0821 19:36:12.693114 32487 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0821 19:38:29.364948 32487 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0821 19:39:50.701156 32487 solver.cpp:404]     Test net output #0: accuracy = 0.59936\nI0821 19:39:50.701447 32487 solver.cpp:404]     Test net output #1: loss = 2.37028 (* 1 = 2.37028 loss)\nI0821 19:39:52.022524 32487 solver.cpp:228] Iteration 18300, loss = 0.0729399\nI0821 19:39:52.022559 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:39:52.022574 32487 solver.cpp:244]     Train net output #1: loss = 0.0729394 (* 1 = 0.0729394 loss)\nI0821 19:39:52.105175 32487 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0821 19:42:08.736424 32487 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0821 19:43:30.064244 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65688\nI0821 19:43:30.064518 32487 solver.cpp:404]     Test net output #1: loss = 2.13838 (* 1 = 2.13838 loss)\nI0821 19:43:31.385898 32487 solver.cpp:228] Iteration 18400, loss = 0.0435078\nI0821 19:43:31.385936 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:43:31.385951 32487 solver.cpp:244]     Train net output #1: loss = 0.0435073 (* 1 = 0.0435073 loss)\nI0821 19:43:31.461359 32487 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0821 19:45:48.129854 32487 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0821 19:47:09.476094 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65276\nI0821 19:47:09.476375 32487 solver.cpp:404]     Test net output #1: loss = 2.04429 (* 1 = 2.04429 loss)\nI0821 19:47:10.798316 32487 solver.cpp:228] Iteration 18500, loss = 0.0292827\nI0821 19:47:10.798349 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:47:10.798364 32487 solver.cpp:244]     Train net output #1: loss = 0.0292822 (* 1 = 0.0292822 loss)\nI0821 19:47:10.874425 32487 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0821 19:49:27.462266 32487 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0821 19:50:48.800704 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67304\nI0821 19:50:48.800977 32487 solver.cpp:404]     Test net output #1: loss = 1.75979 (* 1 = 1.75979 loss)\nI0821 19:50:50.123070 32487 solver.cpp:228] Iteration 18600, loss = 0.0718618\nI0821 19:50:50.123116 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:50:50.123131 32487 solver.cpp:244]     Train net output #1: loss = 0.0718613 (* 1 = 0.0718613 loss)\nI0821 19:50:50.205318 32487 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0821 19:53:06.788666 32487 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0821 19:54:28.123765 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64012\nI0821 19:54:28.124018 32487 solver.cpp:404]     Test net output #1: loss = 2.19742 (* 1 = 2.19742 loss)\nI0821 19:54:29.445197 32487 solver.cpp:228] Iteration 18700, loss = 0.0157665\nI0821 19:54:29.445231 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:54:29.445246 32487 solver.cpp:244]     Train net output #1: loss = 0.015766 (* 1 = 0.015766 loss)\nI0821 19:54:29.522902 32487 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0821 19:56:46.104356 32487 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0821 19:58:07.446749 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62208\nI0821 19:58:07.447002 32487 solver.cpp:404]     Test net output #1: loss = 2.15522 (* 1 = 2.15522 loss)\nI0821 19:58:08.768426 32487 solver.cpp:228] Iteration 18800, loss = 0.0217593\nI0821 19:58:08.768468 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:58:08.768486 32487 solver.cpp:244]     Train net output #1: loss = 0.0217588 (* 1 = 0.0217588 loss)\nI0821 19:58:08.849120 32487 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0821 20:00:25.491232 32487 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0821 20:01:46.836422 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65984\nI0821 20:01:46.836673 32487 solver.cpp:404]     Test net output #1: loss = 1.84455 (* 1 = 1.84455 loss)\nI0821 20:01:48.157966 32487 solver.cpp:228] Iteration 18900, loss = 0.0526609\nI0821 20:01:48.158012 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:01:48.158028 32487 solver.cpp:244]     Train net output #1: loss = 0.0526605 (* 1 = 0.0526605 loss)\nI0821 20:01:48.235980 32487 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0821 20:04:05.174739 32487 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0821 20:05:27.489349 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63424\nI0821 20:05:27.489657 32487 solver.cpp:404]     Test net output #1: loss = 2.23036 (* 1 = 2.23036 loss)\nI0821 20:05:28.815737 32487 solver.cpp:228] Iteration 19000, loss = 0.0715401\nI0821 20:05:28.815780 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 20:05:28.815796 32487 solver.cpp:244]     Train net output #1: loss = 0.0715396 (* 1 = 0.0715396 loss)\nI0821 20:05:28.892477 32487 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0821 20:07:45.822012 32487 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0821 20:09:08.134336 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65412\nI0821 20:09:08.134655 32487 solver.cpp:404]     Test net output #1: loss = 1.93777 (* 1 = 1.93777 loss)\nI0821 20:09:09.460345 32487 solver.cpp:228] Iteration 19100, loss = 0.0483999\nI0821 20:09:09.460387 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:09:09.460408 32487 solver.cpp:244]     Train net output #1: loss = 0.0483994 (* 1 = 0.0483994 loss)\nI0821 20:09:09.540314 32487 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0821 20:11:26.539000 32487 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0821 20:12:48.843214 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7098\nI0821 20:12:48.843528 32487 solver.cpp:404]     Test net output #1: loss = 1.59122 (* 1 = 1.59122 loss)\nI0821 20:12:50.169368 32487 solver.cpp:228] Iteration 19200, loss = 0.104639\nI0821 20:12:50.169410 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 20:12:50.169426 32487 solver.cpp:244]     Train net output #1: loss = 0.104639 (* 1 = 0.104639 loss)\nI0821 20:12:50.253648 32487 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0821 20:15:07.671164 32487 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0821 20:16:29.132000 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6562\nI0821 20:16:29.132221 32487 solver.cpp:404]     Test net output #1: loss = 1.83536 (* 1 = 1.83536 loss)\nI0821 20:16:30.454128 32487 solver.cpp:228] Iteration 19300, loss = 0.0134041\nI0821 20:16:30.454164 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:16:30.454179 32487 solver.cpp:244]     Train net output #1: loss = 0.0134036 (* 1 = 0.0134036 loss)\nI0821 20:16:30.545814 32487 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0821 20:18:48.111898 32487 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0821 20:20:09.621373 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66168\nI0821 20:20:09.621631 32487 solver.cpp:404]     Test net output #1: loss = 1.81905 (* 1 = 1.81905 loss)\nI0821 20:20:10.945272 32487 solver.cpp:228] Iteration 19400, loss = 0.0387128\nI0821 20:20:10.945312 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:20:10.945329 32487 solver.cpp:244]     Train net output #1: loss = 0.0387123 (* 1 = 0.0387123 loss)\nI0821 20:20:11.029234 32487 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0821 20:22:28.681160 32487 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0821 20:23:50.167199 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66456\nI0821 20:23:50.167433 32487 solver.cpp:404]     Test net output #1: loss = 2.17776 (* 1 = 2.17776 loss)\nI0821 20:23:51.489190 32487 solver.cpp:228] Iteration 19500, loss = 0.0592805\nI0821 20:23:51.489231 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 20:23:51.489248 32487 solver.cpp:244]     Train net output #1: loss = 0.0592801 (* 1 = 0.0592801 loss)\nI0821 20:23:51.579680 32487 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0821 20:26:09.289357 32487 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0821 20:27:30.784490 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64168\nI0821 20:27:30.784732 32487 solver.cpp:404]     Test net output #1: loss = 2.09033 (* 1 = 2.09033 loss)\nI0821 20:27:32.106431 32487 solver.cpp:228] Iteration 19600, loss = 0.0320394\nI0821 20:27:32.106472 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:27:32.106488 32487 solver.cpp:244]     Train net output #1: loss = 0.032039 (* 1 = 0.032039 loss)\nI0821 20:27:32.193434 32487 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0821 20:29:49.712812 32487 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0821 20:31:11.210448 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6126\nI0821 20:31:11.210712 32487 solver.cpp:404]     Test net output #1: loss = 2.33522 (* 1 = 2.33522 loss)\nI0821 20:31:12.533078 32487 solver.cpp:228] Iteration 19700, loss = 0.040071\nI0821 20:31:12.533121 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:31:12.533138 32487 solver.cpp:244]     Train net output #1: loss = 0.0400705 (* 1 = 0.0400705 loss)\nI0821 20:31:12.618849 32487 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0821 20:33:30.194376 32487 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0821 20:34:51.693369 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69372\nI0821 20:34:51.693624 32487 solver.cpp:404]     Test net output #1: loss = 1.68232 (* 1 = 1.68232 loss)\nI0821 20:34:53.015554 32487 solver.cpp:228] Iteration 19800, loss = 0.0960807\nI0821 20:34:53.015597 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 20:34:53.015614 32487 solver.cpp:244]     Train net output #1: loss = 0.0960803 (* 1 = 0.0960803 loss)\nI0821 20:34:53.097990 32487 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0821 20:37:10.695252 32487 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0821 20:38:32.198959 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6448\nI0821 20:38:32.199208 32487 solver.cpp:404]     Test net output #1: loss = 2.18783 (* 1 = 2.18783 loss)\nI0821 20:38:33.523080 32487 solver.cpp:228] Iteration 19900, loss = 0.06609\nI0821 20:38:33.523126 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 20:38:33.523144 32487 solver.cpp:244]     Train net output #1: loss = 0.0660896 (* 1 = 0.0660896 loss)\nI0821 20:38:33.610587 32487 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0821 20:40:51.348891 32487 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0821 20:42:12.864030 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67432\nI0821 20:42:12.864265 32487 solver.cpp:404]     Test net output #1: loss = 1.94851 (* 1 = 1.94851 loss)\nI0821 20:42:14.187708 32487 solver.cpp:228] Iteration 20000, loss = 0.0218494\nI0821 20:42:14.187753 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:42:14.187770 32487 solver.cpp:244]     Train net output #1: loss = 0.021849 (* 1 = 0.021849 loss)\nI0821 20:42:14.268379 32487 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0821 20:44:31.764363 32487 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0821 20:45:53.281548 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6574\nI0821 20:45:53.281816 32487 solver.cpp:404]     Test net output #1: loss = 1.77452 (* 1 = 1.77452 loss)\nI0821 20:45:54.604650 32487 solver.cpp:228] Iteration 20100, loss = 0.0515716\nI0821 20:45:54.604696 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:45:54.604712 32487 solver.cpp:244]     Train net output #1: loss = 0.0515712 (* 1 = 0.0515712 loss)\nI0821 20:45:54.686594 32487 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0821 20:48:12.245510 32487 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0821 20:49:33.759099 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70904\nI0821 20:49:33.759328 32487 solver.cpp:404]     Test net output #1: loss = 1.53095 (* 1 = 1.53095 loss)\nI0821 20:49:35.082428 32487 solver.cpp:228] Iteration 20200, loss = 0.0767486\nI0821 20:49:35.082473 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 20:49:35.082490 32487 solver.cpp:244]     Train net output #1: loss = 0.0767482 (* 1 = 0.0767482 loss)\nI0821 20:49:35.172915 32487 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0821 20:51:52.769659 32487 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0821 20:53:14.287626 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68412\nI0821 20:53:14.287894 32487 solver.cpp:404]     Test net output #1: loss = 1.77238 (* 1 = 1.77238 loss)\nI0821 20:53:15.610702 32487 solver.cpp:228] Iteration 20300, loss = 0.0621209\nI0821 20:53:15.610745 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:53:15.610762 32487 solver.cpp:244]     Train net output #1: loss = 0.0621206 (* 1 = 0.0621206 loss)\nI0821 20:53:15.691864 32487 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0821 20:55:33.292245 32487 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0821 20:56:54.816205 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65848\nI0821 20:56:54.816457 32487 solver.cpp:404]     Test net output #1: loss = 2.00676 (* 1 = 2.00676 loss)\nI0821 20:56:56.139544 32487 solver.cpp:228] Iteration 20400, loss = 0.0530486\nI0821 20:56:56.139578 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:56:56.139595 32487 solver.cpp:244]     Train net output #1: loss = 0.0530483 (* 1 = 0.0530483 loss)\nI0821 20:56:56.225143 32487 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0821 20:59:13.935020 32487 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0821 21:00:35.465885 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69396\nI0821 21:00:35.466151 32487 solver.cpp:404]     Test net output #1: loss = 1.72883 (* 1 = 1.72883 loss)\nI0821 21:00:36.788518 32487 solver.cpp:228] Iteration 20500, loss = 0.0398008\nI0821 21:00:36.788563 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:00:36.788579 32487 solver.cpp:244]     Train net output #1: loss = 0.0398005 (* 1 = 0.0398005 loss)\nI0821 21:00:36.871160 32487 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0821 21:02:54.438812 32487 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0821 21:04:15.962378 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69684\nI0821 21:04:15.962631 32487 solver.cpp:404]     Test net output #1: loss = 1.74223 (* 1 = 1.74223 loss)\nI0821 21:04:17.284888 32487 solver.cpp:228] Iteration 20600, loss = 0.0367203\nI0821 21:04:17.284922 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:04:17.284939 32487 solver.cpp:244]     Train net output #1: loss = 0.0367199 (* 1 = 0.0367199 loss)\nI0821 21:04:17.373153 32487 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0821 21:06:34.917937 32487 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0821 21:07:56.443930 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69976\nI0821 21:07:56.444197 32487 solver.cpp:404]     Test net output #1: loss = 1.73751 (* 1 = 1.73751 loss)\nI0821 21:07:57.766747 32487 solver.cpp:228] Iteration 20700, loss = 0.0546249\nI0821 21:07:57.766788 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:07:57.766804 32487 solver.cpp:244]     Train net output #1: loss = 0.0546245 (* 1 = 0.0546245 loss)\nI0821 21:07:57.849486 32487 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0821 21:10:15.352375 32487 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0821 21:11:36.891716 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6968\nI0821 21:11:36.891940 32487 solver.cpp:404]     Test net output #1: loss = 1.67353 (* 1 = 1.67353 loss)\nI0821 21:11:38.215358 32487 solver.cpp:228] Iteration 20800, loss = 0.0638559\nI0821 21:11:38.215402 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:11:38.215418 32487 solver.cpp:244]     Train net output #1: loss = 0.0638555 (* 1 = 0.0638555 loss)\nI0821 21:11:38.294699 32487 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0821 21:13:55.858754 32487 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0821 21:15:17.391512 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71276\nI0821 21:15:17.391783 32487 solver.cpp:404]     Test net output #1: loss = 1.4242 (* 1 = 1.4242 loss)\nI0821 21:15:18.714375 32487 solver.cpp:228] Iteration 20900, loss = 0.0326033\nI0821 21:15:18.714407 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:15:18.714423 32487 solver.cpp:244]     Train net output #1: loss = 0.0326029 (* 1 = 0.0326029 loss)\nI0821 21:15:18.800420 32487 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0821 21:17:36.247992 32487 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0821 21:18:57.769409 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68664\nI0821 21:18:57.769682 32487 solver.cpp:404]     Test net output #1: loss = 1.66464 (* 1 = 1.66464 loss)\nI0821 21:18:59.091992 32487 solver.cpp:228] Iteration 21000, loss = 0.0317689\nI0821 21:18:59.092036 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:18:59.092052 32487 solver.cpp:244]     Train net output #1: loss = 0.0317685 (* 1 = 0.0317685 loss)\nI0821 21:18:59.185112 32487 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0821 21:21:16.733119 32487 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0821 21:22:38.259201 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65076\nI0821 21:22:38.259467 32487 solver.cpp:404]     Test net output #1: loss = 1.9666 (* 1 = 1.9666 loss)\nI0821 21:22:39.581961 32487 solver.cpp:228] Iteration 21100, loss = 0.0401192\nI0821 21:22:39.582005 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:22:39.582022 32487 solver.cpp:244]     Train net output #1: loss = 0.0401188 (* 1 = 0.0401188 loss)\nI0821 21:22:39.666138 32487 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0821 21:24:57.266234 32487 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0821 21:26:18.790825 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68304\nI0821 21:26:18.791054 32487 solver.cpp:404]     Test net output #1: loss = 1.54501 (* 1 = 1.54501 loss)\nI0821 21:26:20.113876 32487 solver.cpp:228] Iteration 21200, loss = 0.108221\nI0821 21:26:20.113911 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 21:26:20.113926 32487 solver.cpp:244]     Train net output #1: loss = 0.10822 (* 1 = 0.10822 loss)\nI0821 21:26:20.203413 32487 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0821 21:28:37.645694 32487 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0821 21:29:59.171888 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69256\nI0821 21:29:59.172183 32487 solver.cpp:404]     Test net output #1: loss = 1.67106 (* 1 = 1.67106 loss)\nI0821 21:30:00.493991 32487 solver.cpp:228] Iteration 21300, loss = 0.0407073\nI0821 21:30:00.494035 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:30:00.494050 32487 solver.cpp:244]     Train net output #1: loss = 0.0407069 (* 1 = 0.0407069 loss)\nI0821 21:30:00.586866 32487 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0821 21:32:18.269556 32487 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0821 21:33:39.798513 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67404\nI0821 21:33:39.798776 32487 solver.cpp:404]     Test net output #1: loss = 1.87436 (* 1 = 1.87436 loss)\nI0821 21:33:41.122587 32487 solver.cpp:228] Iteration 21400, loss = 0.0439533\nI0821 21:33:41.122622 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:33:41.122637 32487 solver.cpp:244]     Train net output #1: loss = 0.0439529 (* 1 = 0.0439529 loss)\nI0821 21:33:41.208346 32487 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0821 21:35:58.818758 32487 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0821 21:37:20.342561 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6078\nI0821 21:37:20.342847 32487 solver.cpp:404]     Test net output #1: loss = 2.60496 (* 1 = 2.60496 loss)\nI0821 21:37:21.666000 32487 solver.cpp:228] Iteration 21500, loss = 0.0331524\nI0821 21:37:21.666035 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:37:21.666051 32487 solver.cpp:244]     Train net output #1: loss = 0.033152 (* 1 = 0.033152 loss)\nI0821 21:37:21.746682 32487 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0821 21:39:39.422124 32487 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0821 21:41:00.940661 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66184\nI0821 21:41:00.940891 32487 solver.cpp:404]     Test net output #1: loss = 1.94652 (* 1 = 1.94652 loss)\nI0821 21:41:02.263352 32487 solver.cpp:228] Iteration 21600, loss = 0.0652235\nI0821 21:41:02.263394 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 21:41:02.263411 32487 solver.cpp:244]     Train net output #1: loss = 0.0652231 (* 1 = 0.0652231 loss)\nI0821 21:41:02.345633 32487 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0821 21:43:19.891737 32487 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0821 21:44:41.407510 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71516\nI0821 21:44:41.407732 32487 solver.cpp:404]     Test net output #1: loss = 1.40789 (* 1 = 1.40789 loss)\nI0821 21:44:42.729743 32487 solver.cpp:228] Iteration 21700, loss = 0.0824253\nI0821 21:44:42.729776 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:44:42.729791 32487 solver.cpp:244]     Train net output #1: loss = 0.0824249 (* 1 = 0.0824249 loss)\nI0821 21:44:42.822496 32487 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0821 21:47:00.374296 32487 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0821 21:48:21.877661 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72588\nI0821 21:48:21.877934 32487 solver.cpp:404]     Test net output #1: loss = 1.44266 (* 1 = 1.44266 loss)\nI0821 21:48:23.200647 32487 solver.cpp:228] Iteration 21800, loss = 0.0221309\nI0821 21:48:23.200691 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:48:23.200707 32487 solver.cpp:244]     Train net output #1: loss = 0.0221305 (* 1 = 0.0221305 loss)\nI0821 21:48:23.289657 32487 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0821 21:50:41.012380 32487 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0821 21:52:02.442723 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67876\nI0821 21:52:02.442998 32487 solver.cpp:404]     Test net output #1: loss = 1.74831 (* 1 = 1.74831 loss)\nI0821 21:52:03.765259 32487 solver.cpp:228] Iteration 21900, loss = 0.0218267\nI0821 21:52:03.765293 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:52:03.765310 32487 solver.cpp:244]     Train net output #1: loss = 0.0218263 (* 1 = 0.0218263 loss)\nI0821 21:52:03.854569 32487 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0821 21:54:21.337721 32487 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0821 21:55:42.776043 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66576\nI0821 21:55:42.776304 32487 solver.cpp:404]     Test net output #1: loss = 2.05251 (* 1 = 2.05251 loss)\nI0821 21:55:44.098506 32487 solver.cpp:228] Iteration 22000, loss = 0.0508416\nI0821 21:55:44.098552 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:55:44.098569 32487 solver.cpp:244]     Train net output #1: loss = 0.0508412 (* 1 = 0.0508412 loss)\nI0821 21:55:44.183574 32487 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0821 21:58:01.793583 32487 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0821 21:59:23.227159 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68224\nI0821 21:59:23.227422 32487 solver.cpp:404]     Test net output #1: loss = 1.7981 (* 1 = 1.7981 loss)\nI0821 21:59:24.550206 32487 solver.cpp:228] Iteration 22100, loss = 0.0363835\nI0821 21:59:24.550252 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:59:24.550268 32487 solver.cpp:244]     Train net output #1: loss = 0.0363831 (* 1 = 0.0363831 loss)\nI0821 21:59:24.641819 32487 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0821 22:01:42.108646 32487 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0821 22:03:03.549906 32487 solver.cpp:404]     Test net output #0: accuracy = 0.697\nI0821 22:03:03.550182 32487 solver.cpp:404]     Test net output #1: loss = 1.6991 (* 1 = 1.6991 loss)\nI0821 22:03:04.872970 32487 solver.cpp:228] Iteration 22200, loss = 0.0491986\nI0821 22:03:04.873004 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 22:03:04.873020 32487 solver.cpp:244]     Train net output #1: loss = 0.0491982 (* 1 = 0.0491982 loss)\nI0821 22:03:04.961053 32487 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0821 22:05:22.512897 32487 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0821 22:06:43.947476 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6624\nI0821 22:06:43.947696 32487 solver.cpp:404]     Test net output #1: loss = 2.12661 (* 1 = 2.12661 loss)\nI0821 22:06:45.270124 32487 solver.cpp:228] Iteration 22300, loss = 0.0221572\nI0821 22:06:45.270169 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:06:45.270184 32487 solver.cpp:244]     Train net output #1: loss = 0.0221568 (* 1 = 0.0221568 loss)\nI0821 22:06:45.363098 32487 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0821 22:09:03.057881 32487 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0821 22:10:24.486935 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65884\nI0821 22:10:24.487162 32487 solver.cpp:404]     Test net output #1: loss = 2.05028 (* 1 = 2.05028 loss)\nI0821 22:10:25.809697 32487 solver.cpp:228] Iteration 22400, loss = 0.0368547\nI0821 22:10:25.809741 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:10:25.809757 32487 solver.cpp:244]     Train net output #1: loss = 0.0368543 (* 1 = 0.0368543 loss)\nI0821 22:10:25.900996 32487 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0821 22:12:43.357931 32487 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0821 22:14:04.781611 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71528\nI0821 22:14:04.781843 32487 solver.cpp:404]     Test net output #1: loss = 1.40884 (* 1 = 1.40884 loss)\nI0821 22:14:06.104724 32487 solver.cpp:228] Iteration 22500, loss = 0.0587116\nI0821 22:14:06.104759 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 22:14:06.104774 32487 solver.cpp:244]     Train net output #1: loss = 0.0587112 (* 1 = 0.0587112 loss)\nI0821 22:14:06.191016 32487 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0821 22:16:23.564174 32487 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0821 22:17:44.991660 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68416\nI0821 22:17:44.991930 32487 solver.cpp:404]     Test net output #1: loss = 1.90127 (* 1 = 1.90127 loss)\nI0821 22:17:46.314441 32487 solver.cpp:228] Iteration 22600, loss = 0.0127994\nI0821 22:17:46.314484 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:17:46.314501 32487 solver.cpp:244]     Train net output #1: loss = 0.012799 (* 1 = 0.012799 loss)\nI0821 22:17:46.405061 32487 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0821 22:20:03.941005 32487 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0821 22:21:25.367995 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68056\nI0821 22:21:25.368232 32487 solver.cpp:404]     Test net output #1: loss = 1.70375 (* 1 = 1.70375 loss)\nI0821 22:21:26.690688 32487 solver.cpp:228] Iteration 22700, loss = 0.026314\nI0821 22:21:26.690733 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 22:21:26.690749 32487 solver.cpp:244]     Train net output #1: loss = 0.0263136 (* 1 = 0.0263136 loss)\nI0821 22:21:26.772672 32487 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0821 22:23:44.268462 32487 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0821 22:25:05.796918 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72408\nI0821 22:25:05.797149 32487 solver.cpp:404]     Test net output #1: loss = 1.46514 (* 1 = 1.46514 loss)\nI0821 22:25:07.119581 32487 solver.cpp:228] Iteration 22800, loss = 0.0537251\nI0821 22:25:07.119627 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 22:25:07.119643 32487 solver.cpp:244]     Train net output #1: loss = 0.0537248 (* 1 = 0.0537248 loss)\nI0821 22:25:07.210407 32487 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0821 22:27:24.694489 32487 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0821 22:28:46.230312 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72144\nI0821 22:28:46.230566 32487 solver.cpp:404]     Test net output #1: loss = 1.39261 (* 1 = 1.39261 loss)\nI0821 22:28:47.552511 32487 solver.cpp:228] Iteration 22900, loss = 0.0762406\nI0821 22:28:47.552557 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 22:28:47.552573 32487 solver.cpp:244]     Train net output #1: loss = 0.0762403 (* 1 = 0.0762403 loss)\nI0821 22:28:47.643352 32487 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0821 22:31:05.139492 32487 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0821 22:32:26.666895 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65572\nI0821 22:32:26.667156 32487 solver.cpp:404]     Test net output #1: loss = 2.32742 (* 1 = 2.32742 loss)\nI0821 22:32:27.989145 32487 solver.cpp:228] Iteration 23000, loss = 0.0512171\nI0821 22:32:27.989192 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:32:27.989207 32487 solver.cpp:244]     Train net output #1: loss = 0.0512168 (* 1 = 0.0512168 loss)\nI0821 22:32:28.078227 32487 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0821 22:34:45.523764 32487 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0821 22:36:07.053025 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66052\nI0821 22:36:07.053252 32487 solver.cpp:404]     Test net output #1: loss = 1.9165 (* 1 = 1.9165 loss)\nI0821 22:36:08.375612 32487 solver.cpp:228] Iteration 23100, loss = 0.0146807\nI0821 22:36:08.375658 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:36:08.375674 32487 solver.cpp:244]     Train net output #1: loss = 0.0146803 (* 1 = 0.0146803 loss)\nI0821 22:36:08.466605 32487 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0821 22:38:25.899708 32487 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0821 22:39:47.429404 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7044\nI0821 22:39:47.429674 32487 solver.cpp:404]     Test net output #1: loss = 1.70566 (* 1 = 1.70566 loss)\nI0821 22:39:48.752267 32487 solver.cpp:228] Iteration 23200, loss = 0.027918\nI0821 22:39:48.752303 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 22:39:48.752319 32487 solver.cpp:244]     Train net output #1: loss = 0.0279177 (* 1 = 0.0279177 loss)\nI0821 22:39:48.835551 32487 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0821 22:42:06.516634 32487 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0821 22:43:28.057646 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6792\nI0821 22:43:28.057914 32487 solver.cpp:404]     Test net output #1: loss = 1.84824 (* 1 = 1.84824 loss)\nI0821 22:43:29.380585 32487 solver.cpp:228] Iteration 23300, loss = 0.0941568\nI0821 22:43:29.380632 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 22:43:29.380650 32487 solver.cpp:244]     Train net output #1: loss = 0.0941565 (* 1 = 0.0941565 loss)\nI0821 22:43:29.462765 32487 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0821 22:45:46.924557 32487 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0821 22:47:08.478754 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6156\nI0821 22:47:08.478981 32487 solver.cpp:404]     Test net output #1: loss = 2.43438 (* 1 = 2.43438 loss)\nI0821 22:47:09.801275 32487 solver.cpp:228] Iteration 23400, loss = 0.00839286\nI0821 22:47:09.801326 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:47:09.801347 32487 solver.cpp:244]     Train net output #1: loss = 0.0083925 (* 1 = 0.0083925 loss)\nI0821 22:47:09.884413 32487 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0821 22:49:27.597312 32487 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0821 22:50:49.146106 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66232\nI0821 22:50:49.146383 32487 solver.cpp:404]     Test net output #1: loss = 2.09874 (* 1 = 2.09874 loss)\nI0821 22:50:50.468111 32487 solver.cpp:228] Iteration 23500, loss = 0.0625461\nI0821 22:50:50.468156 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:50:50.468173 32487 solver.cpp:244]     Train net output #1: loss = 0.0625457 (* 1 = 0.0625457 loss)\nI0821 22:50:50.557860 32487 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0821 22:53:08.150969 32487 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0821 22:54:29.694782 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71332\nI0821 22:54:29.695050 32487 solver.cpp:404]     Test net output #1: loss = 1.50735 (* 1 = 1.50735 loss)\nI0821 22:54:31.017670 32487 solver.cpp:228] Iteration 23600, loss = 0.042011\nI0821 22:54:31.017717 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:54:31.017735 32487 solver.cpp:244]     Train net output #1: loss = 0.0420106 (* 1 = 0.0420106 loss)\nI0821 22:54:31.106840 32487 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0821 22:56:48.512166 32487 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0821 22:58:10.042122 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68204\nI0821 22:58:10.042389 32487 solver.cpp:404]     Test net output #1: loss = 1.78131 (* 1 = 1.78131 loss)\nI0821 22:58:11.364210 32487 solver.cpp:228] Iteration 23700, loss = 0.0946337\nI0821 22:58:11.364256 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 22:58:11.364272 32487 solver.cpp:244]     Train net output #1: loss = 0.0946333 (* 1 = 0.0946333 loss)\nI0821 22:58:11.448151 32487 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0821 23:00:28.975649 32487 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0821 23:01:50.509680 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72832\nI0821 23:01:50.509933 32487 solver.cpp:404]     Test net output #1: loss = 1.42613 (* 1 = 1.42613 loss)\nI0821 23:01:51.832305 32487 solver.cpp:228] Iteration 23800, loss = 0.0221574\nI0821 23:01:51.832355 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:01:51.832372 32487 solver.cpp:244]     Train net output #1: loss = 0.022157 (* 1 = 0.022157 loss)\nI0821 23:01:51.921027 32487 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0821 23:04:09.474120 32487 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0821 23:05:31.008831 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6988\nI0821 23:05:31.009095 32487 solver.cpp:404]     Test net output #1: loss = 1.61318 (* 1 = 1.61318 loss)\nI0821 23:05:32.331377 32487 solver.cpp:228] Iteration 23900, loss = 0.0560309\nI0821 23:05:32.331421 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 23:05:32.331439 32487 solver.cpp:244]     Train net output #1: loss = 0.0560305 (* 1 = 0.0560305 loss)\nI0821 23:05:32.416613 32487 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0821 23:07:49.878455 32487 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0821 23:09:11.410305 32487 solver.cpp:404]     Test net output #0: accuracy = 0.59864\nI0821 23:09:11.410547 32487 solver.cpp:404]     Test net output #1: loss = 2.45556 (* 1 = 2.45556 loss)\nI0821 23:09:12.732568 32487 solver.cpp:228] Iteration 24000, loss = 0.0777899\nI0821 23:09:12.732611 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 23:09:12.732627 32487 solver.cpp:244]     Train net output #1: loss = 0.0777896 (* 1 = 0.0777896 loss)\nI0821 23:09:12.820616 32487 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0821 23:11:30.339567 32487 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0821 23:12:51.868652 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74532\nI0821 23:12:51.868913 32487 solver.cpp:404]     Test net output #1: loss = 1.50605 (* 1 = 1.50605 loss)\nI0821 23:12:53.191748 32487 solver.cpp:228] Iteration 24100, loss = 0.0157698\nI0821 23:12:53.191781 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:12:53.191797 32487 solver.cpp:244]     Train net output #1: loss = 0.0157694 (* 1 = 0.0157694 loss)\nI0821 23:12:53.273877 32487 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0821 23:15:10.663661 32487 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0821 23:16:32.200445 32487 solver.cpp:404]     Test net output #0: accuracy = 0.725\nI0821 23:16:32.200700 32487 solver.cpp:404]     Test net output #1: loss = 1.47251 (* 1 = 1.47251 loss)\nI0821 23:16:33.523763 32487 solver.cpp:228] Iteration 24200, loss = 0.0766861\nI0821 23:16:33.523809 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 23:16:33.523826 32487 solver.cpp:244]     Train net output #1: loss = 0.0766858 (* 1 = 0.0766858 loss)\nI0821 23:16:33.612378 32487 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0821 23:18:51.028069 32487 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0821 23:20:12.558071 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71196\nI0821 23:20:12.558336 32487 solver.cpp:404]     Test net output #1: loss = 1.63478 (* 1 = 1.63478 loss)\nI0821 23:20:13.881181 32487 solver.cpp:228] Iteration 24300, loss = 0.0847539\nI0821 23:20:13.881227 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 23:20:13.881243 32487 solver.cpp:244]     Train net output #1: loss = 0.0847536 (* 1 = 0.0847536 loss)\nI0821 23:20:13.961820 32487 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0821 23:22:31.551636 32487 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0821 23:23:53.093241 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6974\nI0821 23:23:53.093524 32487 solver.cpp:404]     Test net output #1: loss = 1.88451 (* 1 = 1.88451 loss)\nI0821 23:23:54.415998 32487 solver.cpp:228] Iteration 24400, loss = 0.0260881\nI0821 23:23:54.416044 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:23:54.416061 32487 solver.cpp:244]     Train net output #1: loss = 0.0260878 (* 1 = 0.0260878 loss)\nI0821 23:23:54.501499 32487 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0821 23:26:12.085994 32487 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0821 23:27:33.614131 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70472\nI0821 23:27:33.614400 32487 solver.cpp:404]     Test net output #1: loss = 1.54433 (* 1 = 1.54433 loss)\nI0821 23:27:34.936920 32487 solver.cpp:228] Iteration 24500, loss = 0.0313845\nI0821 23:27:34.936964 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:27:34.936981 32487 solver.cpp:244]     Train net output #1: loss = 0.0313842 (* 1 = 0.0313842 loss)\nI0821 23:27:35.027482 32487 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0821 23:29:52.634752 32487 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0821 23:31:14.168336 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73352\nI0821 23:31:14.168609 32487 solver.cpp:404]     Test net output #1: loss = 1.39278 (* 1 = 1.39278 loss)\nI0821 23:31:15.490939 32487 solver.cpp:228] Iteration 24600, loss = 0.079436\nI0821 23:31:15.490983 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 23:31:15.490998 32487 solver.cpp:244]     Train net output #1: loss = 0.0794357 (* 1 = 0.0794357 loss)\nI0821 23:31:15.575651 32487 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0821 23:33:33.134809 32487 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0821 23:34:54.664674 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72956\nI0821 23:34:54.664913 32487 solver.cpp:404]     Test net output #1: loss = 1.45521 (* 1 = 1.45521 loss)\nI0821 23:34:55.987243 32487 solver.cpp:228] Iteration 24700, loss = 0.0696003\nI0821 23:34:55.987288 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 23:34:55.987304 32487 solver.cpp:244]     Train net output #1: loss = 0.0695999 (* 1 = 0.0695999 loss)\nI0821 23:34:56.080791 32487 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0821 23:37:13.568169 32487 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0821 23:38:35.089884 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73628\nI0821 23:38:35.090157 32487 solver.cpp:404]     Test net output #1: loss = 1.29984 (* 1 = 1.29984 loss)\nI0821 23:38:36.412498 32487 solver.cpp:228] Iteration 24800, loss = 0.0557437\nI0821 23:38:36.412544 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 23:38:36.412559 32487 solver.cpp:244]     Train net output #1: loss = 0.0557434 (* 1 = 0.0557434 loss)\nI0821 23:38:36.504616 32487 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0821 23:40:54.052999 32487 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0821 23:42:15.576719 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71684\nI0821 23:42:15.576987 32487 solver.cpp:404]     Test net output #1: loss = 1.52419 (* 1 = 1.52419 loss)\nI0821 23:42:16.899627 32487 solver.cpp:228] Iteration 24900, loss = 0.0129592\nI0821 23:42:16.899663 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:42:16.899678 32487 solver.cpp:244]     Train net output #1: loss = 0.0129588 (* 1 = 0.0129588 loss)\nI0821 23:42:16.984480 32487 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0821 23:44:34.368962 32487 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0821 23:45:55.888957 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68044\nI0821 23:45:55.889230 32487 solver.cpp:404]     Test net output #1: loss = 1.76625 (* 1 = 1.76625 loss)\nI0821 23:45:57.211308 32487 solver.cpp:228] Iteration 25000, loss = 0.0179077\nI0821 23:45:57.211356 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:45:57.211374 32487 solver.cpp:244]     Train net output #1: loss = 0.0179074 (* 1 = 0.0179074 loss)\nI0821 23:45:57.293248 32487 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0821 23:48:14.737933 32487 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0821 23:49:36.265393 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71536\nI0821 23:49:36.265648 32487 solver.cpp:404]     Test net output #1: loss = 1.52687 (* 1 = 1.52687 loss)\nI0821 23:49:37.587730 32487 solver.cpp:228] Iteration 25100, loss = 0.00711569\nI0821 23:49:37.587765 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:49:37.587780 32487 solver.cpp:244]     Train net output #1: loss = 0.00711535 (* 1 = 0.00711535 loss)\nI0821 23:49:37.673149 32487 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0821 23:51:55.288005 32487 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0821 23:53:16.814040 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71716\nI0821 23:53:16.814316 32487 solver.cpp:404]     Test net output #1: loss = 1.36849 (* 1 = 1.36849 loss)\nI0821 23:53:18.136359 32487 solver.cpp:228] Iteration 25200, loss = 0.0248539\nI0821 23:53:18.136406 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 23:53:18.136422 32487 solver.cpp:244]     Train net output #1: loss = 0.0248535 (* 1 = 0.0248535 loss)\nI0821 23:53:18.223569 32487 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0821 23:55:35.667657 32487 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0821 23:56:57.195989 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70936\nI0821 23:56:57.196274 32487 solver.cpp:404]     Test net output #1: loss = 1.64111 (* 1 = 1.64111 loss)\nI0821 23:56:58.518021 32487 solver.cpp:228] Iteration 25300, loss = 0.0585203\nI0821 23:56:58.518065 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 23:56:58.518082 32487 solver.cpp:244]     Train net output #1: loss = 0.05852 (* 1 = 0.05852 loss)\nI0821 23:56:58.608723 32487 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0821 23:59:16.117964 32487 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0822 00:00:37.644440 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62668\nI0822 00:00:37.644735 32487 solver.cpp:404]     Test net output #1: loss = 2.31294 (* 1 = 2.31294 loss)\nI0822 00:00:38.966413 32487 solver.cpp:228] Iteration 25400, loss = 0.050247\nI0822 00:00:38.966459 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:00:38.966475 32487 solver.cpp:244]     Train net output #1: loss = 0.0502467 (* 1 = 0.0502467 loss)\nI0822 00:00:39.054538 32487 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0822 00:02:56.590270 32487 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0822 00:04:18.115587 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65316\nI0822 00:04:18.115854 32487 solver.cpp:404]     Test net output #1: loss = 2.00461 (* 1 = 2.00461 loss)\nI0822 00:04:19.438073 32487 solver.cpp:228] Iteration 25500, loss = 0.0827558\nI0822 00:04:19.438108 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 00:04:19.438124 32487 solver.cpp:244]     Train net output #1: loss = 0.0827555 (* 1 = 0.0827555 loss)\nI0822 00:04:19.521184 32487 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0822 00:06:37.015704 32487 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0822 00:07:58.439124 32487 solver.cpp:404]     Test net output #0: accuracy = 0.61292\nI0822 00:07:58.439414 32487 solver.cpp:404]     Test net output #1: loss = 2.27713 (* 1 = 2.27713 loss)\nI0822 00:07:59.761945 32487 solver.cpp:228] Iteration 25600, loss = 0.0377556\nI0822 00:07:59.761984 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:07:59.762001 32487 solver.cpp:244]     Train net output #1: loss = 0.0377552 (* 1 = 0.0377552 loss)\nI0822 00:07:59.853215 32487 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0822 00:10:17.555428 32487 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0822 00:11:38.987820 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64756\nI0822 00:11:38.988085 32487 solver.cpp:404]     Test net output #1: loss = 2.12007 (* 1 = 2.12007 loss)\nI0822 00:11:40.310461 32487 solver.cpp:228] Iteration 25700, loss = 0.0640922\nI0822 00:11:40.310493 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:11:40.310509 32487 solver.cpp:244]     Train net output #1: loss = 0.0640918 (* 1 = 0.0640918 loss)\nI0822 00:11:40.401636 32487 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0822 00:13:57.922565 32487 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0822 00:15:19.351835 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63044\nI0822 00:15:19.352123 32487 solver.cpp:404]     Test net output #1: loss = 2.33617 (* 1 = 2.33617 loss)\nI0822 00:15:20.674006 32487 solver.cpp:228] Iteration 25800, loss = 0.0430567\nI0822 00:15:20.674049 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 00:15:20.674065 32487 solver.cpp:244]     Train net output #1: loss = 0.0430563 (* 1 = 0.0430563 loss)\nI0822 00:15:20.756608 32487 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0822 00:17:38.205361 32487 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0822 00:18:59.631817 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63312\nI0822 00:18:59.632093 32487 solver.cpp:404]     Test net output #1: loss = 2.30756 (* 1 = 2.30756 loss)\nI0822 00:19:00.954075 32487 solver.cpp:228] Iteration 25900, loss = 0.0511827\nI0822 00:19:00.954116 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:19:00.954133 32487 solver.cpp:244]     Train net output #1: loss = 0.0511824 (* 1 = 0.0511824 loss)\nI0822 00:19:01.035303 32487 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0822 00:21:18.473007 32487 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0822 00:22:39.895138 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73636\nI0822 00:22:39.895438 32487 solver.cpp:404]     Test net output #1: loss = 1.33166 (* 1 = 1.33166 loss)\nI0822 00:22:41.217988 32487 solver.cpp:228] Iteration 26000, loss = 0.0303759\nI0822 00:22:41.218029 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:22:41.218045 32487 solver.cpp:244]     Train net output #1: loss = 0.0303756 (* 1 = 0.0303756 loss)\nI0822 00:22:41.298367 32487 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0822 00:24:58.725489 32487 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0822 00:26:20.141916 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71868\nI0822 00:26:20.142211 32487 solver.cpp:404]     Test net output #1: loss = 1.42779 (* 1 = 1.42779 loss)\nI0822 00:26:21.464704 32487 solver.cpp:228] Iteration 26100, loss = 0.024383\nI0822 00:26:21.464745 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:26:21.464761 32487 solver.cpp:244]     Train net output #1: loss = 0.0243827 (* 1 = 0.0243827 loss)\nI0822 00:26:21.545646 32487 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0822 00:28:39.097734 32487 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0822 00:30:00.512073 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72732\nI0822 00:30:00.512375 32487 solver.cpp:404]     Test net output #1: loss = 1.60484 (* 1 = 1.60484 loss)\nI0822 00:30:01.833667 32487 solver.cpp:228] Iteration 26200, loss = 0.0621371\nI0822 00:30:01.833708 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 00:30:01.833724 32487 solver.cpp:244]     Train net output #1: loss = 0.0621368 (* 1 = 0.0621368 loss)\nI0822 00:30:01.920707 32487 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0822 00:32:19.474927 32487 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0822 00:33:40.892051 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7434\nI0822 00:33:40.892348 32487 solver.cpp:404]     Test net output #1: loss = 1.27967 (* 1 = 1.27967 loss)\nI0822 00:33:42.213994 32487 solver.cpp:228] Iteration 26300, loss = 0.0228182\nI0822 00:33:42.214027 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:33:42.214042 32487 solver.cpp:244]     Train net output #1: loss = 0.0228179 (* 1 = 0.0228179 loss)\nI0822 00:33:42.302517 32487 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0822 00:35:59.760637 32487 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0822 00:37:21.178373 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67668\nI0822 00:37:21.178669 32487 solver.cpp:404]     Test net output #1: loss = 2.05992 (* 1 = 2.05992 loss)\nI0822 00:37:22.501155 32487 solver.cpp:228] Iteration 26400, loss = 0.0440711\nI0822 00:37:22.501196 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 00:37:22.501212 32487 solver.cpp:244]     Train net output #1: loss = 0.0440708 (* 1 = 0.0440708 loss)\nI0822 00:37:22.589382 32487 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0822 00:39:40.044142 32487 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0822 00:41:01.574971 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70488\nI0822 00:41:01.575268 32487 solver.cpp:404]     Test net output #1: loss = 1.67542 (* 1 = 1.67542 loss)\nI0822 00:41:02.897565 32487 solver.cpp:228] Iteration 26500, loss = 0.0432393\nI0822 00:41:02.897605 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:41:02.897622 32487 solver.cpp:244]     Train net output #1: loss = 0.043239 (* 1 = 0.043239 loss)\nI0822 00:41:02.988386 32487 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0822 00:43:20.493500 32487 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0822 00:44:41.998797 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65148\nI0822 00:44:41.999066 32487 solver.cpp:404]     Test net output #1: loss = 2.15775 (* 1 = 2.15775 loss)\nI0822 00:44:43.321375 32487 solver.cpp:228] Iteration 26600, loss = 0.0119177\nI0822 00:44:43.321424 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:44:43.321441 32487 solver.cpp:244]     Train net output #1: loss = 0.0119173 (* 1 = 0.0119173 loss)\nI0822 00:44:43.411403 32487 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0822 00:47:00.969745 32487 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0822 00:48:22.444497 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72464\nI0822 00:48:22.444751 32487 solver.cpp:404]     Test net output #1: loss = 1.57124 (* 1 = 1.57124 loss)\nI0822 00:48:23.767040 32487 solver.cpp:228] Iteration 26700, loss = 0.0463571\nI0822 00:48:23.767091 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:48:23.767109 32487 solver.cpp:244]     Train net output #1: loss = 0.0463567 (* 1 = 0.0463567 loss)\nI0822 00:48:23.855721 32487 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0822 00:50:41.387981 32487 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0822 00:52:02.892010 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7106\nI0822 00:52:02.892261 32487 solver.cpp:404]     Test net output #1: loss = 1.61573 (* 1 = 1.61573 loss)\nI0822 00:52:04.215306 32487 solver.cpp:228] Iteration 26800, loss = 0.028685\nI0822 00:52:04.215353 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:52:04.215368 32487 solver.cpp:244]     Train net output #1: loss = 0.0286846 (* 1 = 0.0286846 loss)\nI0822 00:52:04.301470 32487 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0822 00:54:21.874178 32487 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0822 00:55:43.417184 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7412\nI0822 00:55:43.417464 32487 solver.cpp:404]     Test net output #1: loss = 1.29702 (* 1 = 1.29702 loss)\nI0822 00:55:44.740882 32487 solver.cpp:228] Iteration 26900, loss = 0.0596982\nI0822 00:55:44.740927 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:55:44.740943 32487 solver.cpp:244]     Train net output #1: loss = 0.0596979 (* 1 = 0.0596979 loss)\nI0822 00:55:44.830080 32487 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0822 00:58:02.318886 32487 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0822 00:59:23.848544 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7234\nI0822 00:59:23.848809 32487 solver.cpp:404]     Test net output #1: loss = 1.45008 (* 1 = 1.45008 loss)\nI0822 00:59:25.171675 32487 solver.cpp:228] Iteration 27000, loss = 0.0594222\nI0822 00:59:25.171720 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 00:59:25.171737 32487 solver.cpp:244]     Train net output #1: loss = 0.0594219 (* 1 = 0.0594219 loss)\nI0822 00:59:25.263463 32487 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0822 01:01:42.900852 32487 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0822 01:03:04.425814 32487 solver.cpp:404]     Test net output #0: accuracy = 0.60224\nI0822 01:03:04.426089 32487 solver.cpp:404]     Test net output #1: loss = 2.51433 (* 1 = 2.51433 loss)\nI0822 01:03:05.748852 32487 solver.cpp:228] Iteration 27100, loss = 0.00735924\nI0822 01:03:05.748894 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:03:05.748910 32487 solver.cpp:244]     Train net output #1: loss = 0.00735887 (* 1 = 0.00735887 loss)\nI0822 01:03:05.837708 32487 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0822 01:05:23.562795 32487 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0822 01:06:45.080101 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69672\nI0822 01:06:45.080379 32487 solver.cpp:404]     Test net output #1: loss = 1.69238 (* 1 = 1.69238 loss)\nI0822 01:06:46.402809 32487 solver.cpp:228] Iteration 27200, loss = 0.033155\nI0822 01:06:46.402851 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:06:46.402868 32487 solver.cpp:244]     Train net output #1: loss = 0.0331546 (* 1 = 0.0331546 loss)\nI0822 01:06:46.491387 32487 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0822 01:09:04.163075 32487 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0822 01:10:25.692582 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72496\nI0822 01:10:25.692833 32487 solver.cpp:404]     Test net output #1: loss = 1.51743 (* 1 = 1.51743 loss)\nI0822 01:10:27.015292 32487 solver.cpp:228] Iteration 27300, loss = 0.0218085\nI0822 01:10:27.015332 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:10:27.015353 32487 solver.cpp:244]     Train net output #1: loss = 0.0218081 (* 1 = 0.0218081 loss)\nI0822 01:10:27.105829 32487 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0822 01:12:44.802726 32487 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0822 01:14:06.328171 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73688\nI0822 01:14:06.328449 32487 solver.cpp:404]     Test net output #1: loss = 1.39007 (* 1 = 1.39007 loss)\nI0822 01:14:07.650982 32487 solver.cpp:228] Iteration 27400, loss = 0.0458353\nI0822 01:14:07.651026 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:14:07.651041 32487 solver.cpp:244]     Train net output #1: loss = 0.0458349 (* 1 = 0.0458349 loss)\nI0822 01:14:07.733614 32487 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0822 01:16:25.336887 32487 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0822 01:17:46.857862 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72872\nI0822 01:17:46.858134 32487 solver.cpp:404]     Test net output #1: loss = 1.39185 (* 1 = 1.39185 loss)\nI0822 01:17:48.180310 32487 solver.cpp:228] Iteration 27500, loss = 0.0245565\nI0822 01:17:48.180361 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:17:48.180377 32487 solver.cpp:244]     Train net output #1: loss = 0.0245561 (* 1 = 0.0245561 loss)\nI0822 01:17:48.266008 32487 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0822 01:20:05.695657 32487 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0822 01:21:27.211506 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6824\nI0822 01:21:27.211758 32487 solver.cpp:404]     Test net output #1: loss = 1.83158 (* 1 = 1.83158 loss)\nI0822 01:21:28.534377 32487 solver.cpp:228] Iteration 27600, loss = 0.0434123\nI0822 01:21:28.534421 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:21:28.534438 32487 solver.cpp:244]     Train net output #1: loss = 0.0434119 (* 1 = 0.0434119 loss)\nI0822 01:21:28.624891 32487 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0822 01:23:46.056535 32487 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0822 01:25:07.574044 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68568\nI0822 01:25:07.574277 32487 solver.cpp:404]     Test net output #1: loss = 1.69702 (* 1 = 1.69702 loss)\nI0822 01:25:08.897389 32487 solver.cpp:228] Iteration 27700, loss = 0.0666885\nI0822 01:25:08.897425 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 01:25:08.897440 32487 solver.cpp:244]     Train net output #1: loss = 0.0666881 (* 1 = 0.0666881 loss)\nI0822 01:25:08.979115 32487 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0822 01:27:26.486407 32487 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0822 01:28:48.010807 32487 solver.cpp:404]     Test net output #0: accuracy = 0.731\nI0822 01:28:48.011065 32487 solver.cpp:404]     Test net output #1: loss = 1.38392 (* 1 = 1.38392 loss)\nI0822 01:28:49.333592 32487 solver.cpp:228] Iteration 27800, loss = 0.00922738\nI0822 01:28:49.333627 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:28:49.333643 32487 solver.cpp:244]     Train net output #1: loss = 0.00922697 (* 1 = 0.00922697 loss)\nI0822 01:28:49.415868 32487 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0822 01:31:06.834894 32487 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0822 01:32:28.354432 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74932\nI0822 01:32:28.354665 32487 solver.cpp:404]     Test net output #1: loss = 1.38188 (* 1 = 1.38188 loss)\nI0822 01:32:29.678277 32487 solver.cpp:228] Iteration 27900, loss = 0.0315241\nI0822 01:32:29.678326 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:32:29.678344 32487 solver.cpp:244]     Train net output #1: loss = 0.0315237 (* 1 = 0.0315237 loss)\nI0822 01:32:29.766283 32487 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0822 01:34:47.352385 32487 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0822 01:36:08.871596 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6952\nI0822 01:36:08.871871 32487 solver.cpp:404]     Test net output #1: loss = 1.88448 (* 1 = 1.88448 loss)\nI0822 01:36:10.195113 32487 solver.cpp:228] Iteration 28000, loss = 0.0504785\nI0822 01:36:10.195147 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 01:36:10.195163 32487 solver.cpp:244]     Train net output #1: loss = 0.0504781 (* 1 = 0.0504781 loss)\nI0822 01:36:10.276978 32487 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0822 01:38:27.934608 32487 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0822 01:39:49.459873 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66352\nI0822 01:39:49.460151 32487 solver.cpp:404]     Test net output #1: loss = 2.28848 (* 1 = 2.28848 loss)\nI0822 01:39:50.783140 32487 solver.cpp:228] Iteration 28100, loss = 0.02995\nI0822 01:39:50.783185 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:39:50.783201 32487 solver.cpp:244]     Train net output #1: loss = 0.0299496 (* 1 = 0.0299496 loss)\nI0822 01:39:50.863029 32487 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0822 01:42:08.267511 32487 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0822 01:43:29.790894 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74016\nI0822 01:43:29.791184 32487 solver.cpp:404]     Test net output #1: loss = 1.31706 (* 1 = 1.31706 loss)\nI0822 01:43:31.113574 32487 solver.cpp:228] Iteration 28200, loss = 0.0370183\nI0822 01:43:31.113615 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 01:43:31.113632 32487 solver.cpp:244]     Train net output #1: loss = 0.0370179 (* 1 = 0.0370179 loss)\nI0822 01:43:31.200366 32487 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0822 01:45:48.637092 32487 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0822 01:47:10.160032 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69628\nI0822 01:47:10.160322 32487 solver.cpp:404]     Test net output #1: loss = 1.90549 (* 1 = 1.90549 loss)\nI0822 01:47:11.482820 32487 solver.cpp:228] Iteration 28300, loss = 0.0291684\nI0822 01:47:11.482862 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:47:11.482878 32487 solver.cpp:244]     Train net output #1: loss = 0.029168 (* 1 = 0.029168 loss)\nI0822 01:47:11.572768 32487 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0822 01:49:28.976189 32487 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0822 01:50:50.497010 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7146\nI0822 01:50:50.497303 32487 solver.cpp:404]     Test net output #1: loss = 1.52132 (* 1 = 1.52132 loss)\nI0822 01:50:51.815644 32487 solver.cpp:228] Iteration 28400, loss = 0.0397045\nI0822 01:50:51.815687 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:50:51.815704 32487 solver.cpp:244]     Train net output #1: loss = 0.0397041 (* 1 = 0.0397041 loss)\nI0822 01:50:51.904187 32487 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0822 01:53:09.524138 32487 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0822 01:54:31.046772 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72856\nI0822 01:54:31.047052 32487 solver.cpp:404]     Test net output #1: loss = 1.48679 (* 1 = 1.48679 loss)\nI0822 01:54:32.365134 32487 solver.cpp:228] Iteration 28500, loss = 0.0555095\nI0822 01:54:32.365177 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:54:32.365195 32487 solver.cpp:244]     Train net output #1: loss = 0.0555091 (* 1 = 0.0555091 loss)\nI0822 01:54:32.455174 32487 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0822 01:56:49.918311 32487 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0822 01:58:11.460580 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72944\nI0822 01:58:11.460871 32487 solver.cpp:404]     Test net output #1: loss = 1.54506 (* 1 = 1.54506 loss)\nI0822 01:58:12.779920 32487 solver.cpp:228] Iteration 28600, loss = 0.037941\nI0822 01:58:12.779954 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 01:58:12.779971 32487 solver.cpp:244]     Train net output #1: loss = 0.0379406 (* 1 = 0.0379406 loss)\nI0822 01:58:12.867879 32487 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0822 02:00:30.427868 32487 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0822 02:01:51.956463 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67404\nI0822 02:01:51.956795 32487 solver.cpp:404]     Test net output #1: loss = 1.84304 (* 1 = 1.84304 loss)\nI0822 02:01:53.275625 32487 solver.cpp:228] Iteration 28700, loss = 0.0557931\nI0822 02:01:53.275668 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 02:01:53.275684 32487 solver.cpp:244]     Train net output #1: loss = 0.0557928 (* 1 = 0.0557928 loss)\nI0822 02:01:53.366410 32487 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0822 02:04:10.825158 32487 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0822 02:05:32.354554 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71632\nI0822 02:05:32.354854 32487 solver.cpp:404]     Test net output #1: loss = 1.66999 (* 1 = 1.66999 loss)\nI0822 02:05:33.672616 32487 solver.cpp:228] Iteration 28800, loss = 0.0378838\nI0822 02:05:33.672659 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 02:05:33.672677 32487 solver.cpp:244]     Train net output #1: loss = 0.0378834 (* 1 = 0.0378834 loss)\nI0822 02:05:33.758174 32487 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0822 02:07:51.199656 32487 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0822 02:09:12.724033 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70884\nI0822 02:09:12.724328 32487 solver.cpp:404]     Test net output #1: loss = 1.62041 (* 1 = 1.62041 loss)\nI0822 02:09:14.041579 32487 solver.cpp:228] Iteration 28900, loss = 0.0253686\nI0822 02:09:14.041625 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 02:09:14.041642 32487 solver.cpp:244]     Train net output #1: loss = 0.0253682 (* 1 = 0.0253682 loss)\nI0822 02:09:14.132060 32487 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0822 02:11:31.648478 32487 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0822 02:12:53.176743 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68196\nI0822 02:12:53.177047 32487 solver.cpp:404]     Test net output #1: loss = 1.95446 (* 1 = 1.95446 loss)\nI0822 02:12:54.495314 32487 solver.cpp:228] Iteration 29000, loss = 0.0454094\nI0822 02:12:54.495358 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 02:12:54.495374 32487 solver.cpp:244]     Train net output #1: loss = 0.0454091 (* 1 = 0.0454091 loss)\nI0822 02:12:54.581423 32487 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0822 02:15:12.189648 32487 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0822 02:16:33.729089 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65488\nI0822 02:16:33.729375 32487 solver.cpp:404]     Test net output #1: loss = 2.23739 (* 1 = 2.23739 loss)\nI0822 02:16:35.047000 32487 solver.cpp:228] Iteration 29100, loss = 0.00943508\nI0822 02:16:35.047042 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:16:35.047058 32487 solver.cpp:244]     Train net output #1: loss = 0.00943472 (* 1 = 0.00943472 loss)\nI0822 02:16:35.136170 32487 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0822 02:18:52.672070 32487 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0822 02:20:14.204921 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68264\nI0822 02:20:14.205215 32487 solver.cpp:404]     Test net output #1: loss = 1.93694 (* 1 = 1.93694 loss)\nI0822 02:20:15.523833 32487 solver.cpp:228] Iteration 29200, loss = 0.0275516\nI0822 02:20:15.523876 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 02:20:15.523893 32487 solver.cpp:244]     Train net output #1: loss = 0.0275513 (* 1 = 0.0275513 loss)\nI0822 02:20:15.607856 32487 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0822 02:22:33.143569 32487 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0822 02:23:54.579609 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72416\nI0822 02:23:54.579903 32487 solver.cpp:404]     Test net output #1: loss = 1.42309 (* 1 = 1.42309 loss)\nI0822 02:23:55.898068 32487 solver.cpp:228] Iteration 29300, loss = 0.0213717\nI0822 02:23:55.898109 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:23:55.898125 32487 solver.cpp:244]     Train net output #1: loss = 0.0213713 (* 1 = 0.0213713 loss)\nI0822 02:23:55.984676 32487 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0822 02:26:13.497357 32487 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0822 02:27:34.929311 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64248\nI0822 02:27:34.929566 32487 solver.cpp:404]     Test net output #1: loss = 2.11263 (* 1 = 2.11263 loss)\nI0822 02:27:36.248687 32487 solver.cpp:228] Iteration 29400, loss = 0.0446738\nI0822 02:27:36.248731 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 02:27:36.248747 32487 solver.cpp:244]     Train net output #1: loss = 0.0446734 (* 1 = 0.0446734 loss)\nI0822 02:27:36.341742 32487 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0822 02:29:53.869427 32487 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0822 02:31:15.297950 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73832\nI0822 02:31:15.298218 32487 solver.cpp:404]     Test net output #1: loss = 1.34122 (* 1 = 1.34122 loss)\nI0822 02:31:16.616785 32487 solver.cpp:228] Iteration 29500, loss = 0.0165355\nI0822 02:31:16.616827 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:31:16.616842 32487 solver.cpp:244]     Train net output #1: loss = 0.0165352 (* 1 = 0.0165352 loss)\nI0822 02:31:16.701800 32487 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0822 02:33:34.281107 32487 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0822 02:34:55.704957 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72776\nI0822 02:34:55.705206 32487 solver.cpp:404]     Test net output #1: loss = 1.4118 (* 1 = 1.4118 loss)\nI0822 02:34:57.023257 32487 solver.cpp:228] Iteration 29600, loss = 0.0172002\nI0822 02:34:57.023298 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:34:57.023313 32487 solver.cpp:244]     Train net output #1: loss = 0.0171998 (* 1 = 0.0171998 loss)\nI0822 02:34:57.112715 32487 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0822 02:37:14.650869 32487 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0822 02:38:36.077675 32487 solver.cpp:404]     Test net output #0: accuracy = 0.725\nI0822 02:38:36.077951 32487 solver.cpp:404]     Test net output #1: loss = 1.40042 (* 1 = 1.40042 loss)\nI0822 02:38:37.396000 32487 solver.cpp:228] Iteration 29700, loss = 0.0107888\nI0822 02:38:37.396042 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:38:37.396059 32487 solver.cpp:244]     Train net output #1: loss = 0.0107885 (* 1 = 0.0107885 loss)\nI0822 02:38:37.490702 32487 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0822 02:40:55.059583 32487 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0822 02:42:16.493783 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73672\nI0822 02:42:16.494061 32487 solver.cpp:404]     Test net output #1: loss = 1.26142 (* 1 = 1.26142 loss)\nI0822 02:42:17.811533 32487 solver.cpp:228] Iteration 29800, loss = 0.00764393\nI0822 02:42:17.811578 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:42:17.811599 32487 solver.cpp:244]     Train net output #1: loss = 0.00764357 (* 1 = 0.00764357 loss)\nI0822 02:42:17.907940 32487 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0822 02:44:35.478989 32487 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0822 02:45:56.901929 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68524\nI0822 02:45:56.902156 32487 solver.cpp:404]     Test net output #1: loss = 1.69836 (* 1 = 1.69836 loss)\nI0822 02:45:58.221349 32487 solver.cpp:228] Iteration 29900, loss = 0.0199693\nI0822 02:45:58.221393 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:45:58.221410 32487 solver.cpp:244]     Train net output #1: loss = 0.0199689 (* 1 = 0.0199689 loss)\nI0822 02:45:58.312527 32487 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0822 02:48:15.919787 32487 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0822 02:49:37.344070 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72064\nI0822 02:49:37.344329 32487 solver.cpp:404]     Test net output #1: loss = 1.39746 (* 1 = 1.39746 loss)\nI0822 02:49:38.662636 32487 solver.cpp:228] Iteration 30000, loss = 0.0519094\nI0822 02:49:38.662681 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 02:49:38.662698 32487 solver.cpp:244]     Train net output #1: loss = 0.0519091 (* 1 = 0.0519091 loss)\nI0822 02:49:38.757181 32487 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0822 02:51:56.210973 32487 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0822 02:53:17.632562 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6526\nI0822 02:53:17.632808 32487 solver.cpp:404]     Test net output #1: loss = 2.08654 (* 1 = 2.08654 loss)\nI0822 02:53:18.951840 32487 solver.cpp:228] Iteration 30100, loss = 0.0284115\nI0822 02:53:18.951885 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:53:18.951901 32487 solver.cpp:244]     Train net output #1: loss = 0.0284111 (* 1 = 0.0284111 loss)\nI0822 02:53:19.043529 32487 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0822 02:55:36.495558 32487 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0822 02:56:58.022055 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71508\nI0822 02:56:58.022341 32487 solver.cpp:404]     Test net output #1: loss = 1.62293 (* 1 = 1.62293 loss)\nI0822 02:56:59.341114 32487 solver.cpp:228] Iteration 30200, loss = 0.0298118\nI0822 02:56:59.341159 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:56:59.341176 32487 solver.cpp:244]     Train net output #1: loss = 0.0298115 (* 1 = 0.0298115 loss)\nI0822 02:56:59.433888 32487 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0822 02:59:17.048566 32487 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0822 03:00:38.568822 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68252\nI0822 03:00:38.569075 32487 solver.cpp:404]     Test net output #1: loss = 1.8867 (* 1 = 1.8867 loss)\nI0822 03:00:39.888115 32487 solver.cpp:228] Iteration 30300, loss = 0.0193746\nI0822 03:00:39.888160 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:00:39.888176 32487 solver.cpp:244]     Train net output #1: loss = 0.0193742 (* 1 = 0.0193742 loss)\nI0822 03:00:39.973197 32487 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0822 03:02:57.539561 32487 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0822 03:04:19.065436 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6764\nI0822 03:04:19.065711 32487 solver.cpp:404]     Test net output #1: loss = 1.95812 (* 1 = 1.95812 loss)\nI0822 03:04:20.384202 32487 solver.cpp:228] Iteration 30400, loss = 0.0160396\nI0822 03:04:20.384249 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:04:20.384265 32487 solver.cpp:244]     Train net output #1: loss = 0.0160393 (* 1 = 0.0160393 loss)\nI0822 03:04:20.476033 32487 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0822 03:06:38.006600 32487 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0822 03:07:59.529037 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66164\nI0822 03:07:59.529295 32487 solver.cpp:404]     Test net output #1: loss = 2.05049 (* 1 = 2.05049 loss)\nI0822 03:08:00.847854 32487 solver.cpp:228] Iteration 30500, loss = 0.0121956\nI0822 03:08:00.847898 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:08:00.847915 32487 solver.cpp:244]     Train net output #1: loss = 0.0121952 (* 1 = 0.0121952 loss)\nI0822 03:08:00.933856 32487 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0822 03:10:18.437993 32487 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0822 03:11:39.961705 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76104\nI0822 03:11:39.961957 32487 solver.cpp:404]     Test net output #1: loss = 1.20816 (* 1 = 1.20816 loss)\nI0822 03:11:41.280927 32487 solver.cpp:228] Iteration 30600, loss = 0.0995772\nI0822 03:11:41.280971 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0822 03:11:41.280987 32487 solver.cpp:244]     Train net output #1: loss = 0.0995768 (* 1 = 0.0995768 loss)\nI0822 03:11:41.367029 32487 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0822 03:13:58.947203 32487 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0822 03:15:20.462286 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74232\nI0822 03:15:20.462559 32487 solver.cpp:404]     Test net output #1: loss = 1.35548 (* 1 = 1.35548 loss)\nI0822 03:15:21.780764 32487 solver.cpp:228] Iteration 30700, loss = 0.0124461\nI0822 03:15:21.780807 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:15:21.780822 32487 solver.cpp:244]     Train net output #1: loss = 0.0124457 (* 1 = 0.0124457 loss)\nI0822 03:15:21.880841 32487 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0822 03:17:39.394647 32487 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0822 03:19:00.930819 32487 solver.cpp:404]     Test net output #0: accuracy = 0.60872\nI0822 03:19:00.931103 32487 solver.cpp:404]     Test net output #1: loss = 2.58204 (* 1 = 2.58204 loss)\nI0822 03:19:02.248615 32487 solver.cpp:228] Iteration 30800, loss = 0.0233674\nI0822 03:19:02.248654 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:19:02.248670 32487 solver.cpp:244]     Train net output #1: loss = 0.023367 (* 1 = 0.023367 loss)\nI0822 03:19:02.338538 32487 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0822 03:21:19.936462 32487 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0822 03:22:41.462013 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62948\nI0822 03:22:41.462273 32487 solver.cpp:404]     Test net output #1: loss = 2.38971 (* 1 = 2.38971 loss)\nI0822 03:22:42.781000 32487 solver.cpp:228] Iteration 30900, loss = 0.0289757\nI0822 03:22:42.781040 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:22:42.781056 32487 solver.cpp:244]     Train net output #1: loss = 0.0289753 (* 1 = 0.0289753 loss)\nI0822 03:22:42.870367 32487 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0822 03:25:00.474925 32487 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0822 03:26:22.003926 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65464\nI0822 03:26:22.004210 32487 solver.cpp:404]     Test net output #1: loss = 2.05755 (* 1 = 2.05755 loss)\nI0822 03:26:23.322551 32487 solver.cpp:228] Iteration 31000, loss = 0.0428331\nI0822 03:26:23.322592 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:26:23.322607 32487 solver.cpp:244]     Train net output #1: loss = 0.0428327 (* 1 = 0.0428327 loss)\nI0822 03:26:23.419883 32487 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0822 03:28:41.047528 32487 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0822 03:30:02.576189 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75848\nI0822 03:30:02.576454 32487 solver.cpp:404]     Test net output #1: loss = 1.15327 (* 1 = 1.15327 loss)\nI0822 03:30:03.894027 32487 solver.cpp:228] Iteration 31100, loss = 0.034985\nI0822 03:30:03.894069 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:30:03.894085 32487 solver.cpp:244]     Train net output #1: loss = 0.0349846 (* 1 = 0.0349846 loss)\nI0822 03:30:03.985103 32487 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0822 03:32:21.835793 32487 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0822 03:33:43.364240 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67044\nI0822 03:33:43.364495 32487 solver.cpp:404]     Test net output #1: loss = 2.05351 (* 1 = 2.05351 loss)\nI0822 03:33:44.682512 32487 solver.cpp:228] Iteration 31200, loss = 0.0160989\nI0822 03:33:44.682551 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:33:44.682567 32487 solver.cpp:244]     Train net output #1: loss = 0.0160985 (* 1 = 0.0160985 loss)\nI0822 03:33:44.771634 32487 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0822 03:36:02.483326 32487 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0822 03:37:24.003293 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67996\nI0822 03:37:24.003556 32487 solver.cpp:404]     Test net output #1: loss = 1.85474 (* 1 = 1.85474 loss)\nI0822 03:37:25.322192 32487 solver.cpp:228] Iteration 31300, loss = 0.0381195\nI0822 03:37:25.322239 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:37:25.322257 32487 solver.cpp:244]     Train net output #1: loss = 0.0381191 (* 1 = 0.0381191 loss)\nI0822 03:37:25.414762 32487 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0822 03:39:42.922602 32487 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0822 03:41:04.437340 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67284\nI0822 03:41:04.437615 32487 solver.cpp:404]     Test net output #1: loss = 1.97179 (* 1 = 1.97179 loss)\nI0822 03:41:05.756148 32487 solver.cpp:228] Iteration 31400, loss = 0.0154276\nI0822 03:41:05.756193 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:41:05.756211 32487 solver.cpp:244]     Train net output #1: loss = 0.0154273 (* 1 = 0.0154273 loss)\nI0822 03:41:05.849004 32487 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0822 03:43:23.248914 32487 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0822 03:44:44.757769 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73608\nI0822 03:44:44.758049 32487 solver.cpp:404]     Test net output #1: loss = 1.38785 (* 1 = 1.38785 loss)\nI0822 03:44:46.075788 32487 solver.cpp:228] Iteration 31500, loss = 0.0204515\nI0822 03:44:46.075829 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:44:46.075845 32487 solver.cpp:244]     Train net output #1: loss = 0.0204511 (* 1 = 0.0204511 loss)\nI0822 03:44:46.156833 32487 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0822 03:47:03.510334 32487 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0822 03:48:25.025590 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76028\nI0822 03:48:25.025871 32487 solver.cpp:404]     Test net output #1: loss = 1.21283 (* 1 = 1.21283 loss)\nI0822 03:48:26.344339 32487 solver.cpp:228] Iteration 31600, loss = 0.0453488\nI0822 03:48:26.344383 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:48:26.344398 32487 solver.cpp:244]     Train net output #1: loss = 0.0453484 (* 1 = 0.0453484 loss)\nI0822 03:48:26.431216 32487 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0822 03:50:44.053635 32487 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0822 03:52:05.571530 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7384\nI0822 03:52:05.571808 32487 solver.cpp:404]     Test net output #1: loss = 1.32587 (* 1 = 1.32587 loss)\nI0822 03:52:06.890827 32487 solver.cpp:228] Iteration 31700, loss = 0.0179524\nI0822 03:52:06.890868 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:52:06.890884 32487 solver.cpp:244]     Train net output #1: loss = 0.017952 (* 1 = 0.017952 loss)\nI0822 03:52:06.980523 32487 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0822 03:54:24.660405 32487 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0822 03:55:46.175782 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70396\nI0822 03:55:46.176043 32487 solver.cpp:404]     Test net output #1: loss = 1.5636 (* 1 = 1.5636 loss)\nI0822 03:55:47.495041 32487 solver.cpp:228] Iteration 31800, loss = 0.0451445\nI0822 03:55:47.495085 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 03:55:47.495100 32487 solver.cpp:244]     Train net output #1: loss = 0.0451441 (* 1 = 0.0451441 loss)\nI0822 03:55:47.582909 32487 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0822 03:58:05.203948 32487 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0822 03:59:26.721122 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75168\nI0822 03:59:26.721405 32487 solver.cpp:404]     Test net output #1: loss = 1.31291 (* 1 = 1.31291 loss)\nI0822 03:59:28.040740 32487 solver.cpp:228] Iteration 31900, loss = 0.0440164\nI0822 03:59:28.040784 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 03:59:28.040801 32487 solver.cpp:244]     Train net output #1: loss = 0.044016 (* 1 = 0.044016 loss)\nI0822 03:59:28.131709 32487 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0822 04:01:45.699198 32487 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0822 04:03:07.215791 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69476\nI0822 04:03:07.216073 32487 solver.cpp:404]     Test net output #1: loss = 1.59466 (* 1 = 1.59466 loss)\nI0822 04:03:08.534071 32487 solver.cpp:228] Iteration 32000, loss = 0.0583392\nI0822 04:03:08.534113 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:03:08.534129 32487 solver.cpp:244]     Train net output #1: loss = 0.0583389 (* 1 = 0.0583389 loss)\nI0822 04:03:08.621526 32487 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0822 04:05:26.369143 32487 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0822 04:06:47.898980 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75064\nI0822 04:06:47.899247 32487 solver.cpp:404]     Test net output #1: loss = 1.2172 (* 1 = 1.2172 loss)\nI0822 04:06:49.218526 32487 solver.cpp:228] Iteration 32100, loss = 0.0263289\nI0822 04:06:49.218574 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:06:49.218590 32487 solver.cpp:244]     Train net output #1: loss = 0.0263286 (* 1 = 0.0263286 loss)\nI0822 04:06:49.313757 32487 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0822 04:09:07.108971 32487 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0822 04:10:28.630862 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6342\nI0822 04:10:28.631146 32487 solver.cpp:404]     Test net output #1: loss = 2.3368 (* 1 = 2.3368 loss)\nI0822 04:10:29.949913 32487 solver.cpp:228] Iteration 32200, loss = 0.0951814\nI0822 04:10:29.949954 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 04:10:29.949971 32487 solver.cpp:244]     Train net output #1: loss = 0.095181 (* 1 = 0.095181 loss)\nI0822 04:10:30.042141 32487 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0822 04:12:47.522931 32487 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0822 04:14:09.049613 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69512\nI0822 04:14:09.049878 32487 solver.cpp:404]     Test net output #1: loss = 1.8919 (* 1 = 1.8919 loss)\nI0822 04:14:10.367929 32487 solver.cpp:228] Iteration 32300, loss = 0.0401586\nI0822 04:14:10.367971 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:14:10.367987 32487 solver.cpp:244]     Train net output #1: loss = 0.0401582 (* 1 = 0.0401582 loss)\nI0822 04:14:10.458591 32487 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0822 04:16:27.872887 32487 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0822 04:17:49.403930 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71976\nI0822 04:17:49.404186 32487 solver.cpp:404]     Test net output #1: loss = 1.49837 (* 1 = 1.49837 loss)\nI0822 04:17:50.723376 32487 solver.cpp:228] Iteration 32400, loss = 0.0490264\nI0822 04:17:50.723420 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:17:50.723438 32487 solver.cpp:244]     Train net output #1: loss = 0.049026 (* 1 = 0.049026 loss)\nI0822 04:17:50.819996 32487 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0822 04:20:08.299904 32487 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0822 04:21:29.818276 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71964\nI0822 04:21:29.818552 32487 solver.cpp:404]     Test net output #1: loss = 1.57366 (* 1 = 1.57366 loss)\nI0822 04:21:31.137225 32487 solver.cpp:228] Iteration 32500, loss = 0.0324824\nI0822 04:21:31.137271 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:21:31.137289 32487 solver.cpp:244]     Train net output #1: loss = 0.032482 (* 1 = 0.032482 loss)\nI0822 04:21:31.232975 32487 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0822 04:23:48.708576 32487 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0822 04:25:10.240454 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67436\nI0822 04:25:10.240751 32487 solver.cpp:404]     Test net output #1: loss = 2.16208 (* 1 = 2.16208 loss)\nI0822 04:25:11.559403 32487 solver.cpp:228] Iteration 32600, loss = 0.0922931\nI0822 04:25:11.559449 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:25:11.559466 32487 solver.cpp:244]     Train net output #1: loss = 0.0922927 (* 1 = 0.0922927 loss)\nI0822 04:25:11.653385 32487 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0822 04:27:29.083562 32487 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0822 04:28:50.614804 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71744\nI0822 04:28:50.615104 32487 solver.cpp:404]     Test net output #1: loss = 1.58382 (* 1 = 1.58382 loss)\nI0822 04:28:51.934218 32487 solver.cpp:228] Iteration 32700, loss = 0.021966\nI0822 04:28:51.934264 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:28:51.934281 32487 solver.cpp:244]     Train net output #1: loss = 0.0219657 (* 1 = 0.0219657 loss)\nI0822 04:28:52.019870 32487 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0822 04:31:09.500056 32487 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0822 04:32:31.033377 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70572\nI0822 04:32:31.033673 32487 solver.cpp:404]     Test net output #1: loss = 1.74357 (* 1 = 1.74357 loss)\nI0822 04:32:32.352813 32487 solver.cpp:228] Iteration 32800, loss = 0.0377985\nI0822 04:32:32.352859 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:32:32.352875 32487 solver.cpp:244]     Train net output #1: loss = 0.0377981 (* 1 = 0.0377981 loss)\nI0822 04:32:32.438400 32487 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0822 04:34:50.040005 32487 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0822 04:36:11.575634 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75028\nI0822 04:36:11.575934 32487 solver.cpp:404]     Test net output #1: loss = 1.27445 (* 1 = 1.27445 loss)\nI0822 04:36:12.895056 32487 solver.cpp:228] Iteration 32900, loss = 0.0161486\nI0822 04:36:12.895100 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:36:12.895117 32487 solver.cpp:244]     Train net output #1: loss = 0.0161482 (* 1 = 0.0161482 loss)\nI0822 04:36:12.981390 32487 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0822 04:38:30.415801 32487 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0822 04:39:51.844544 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7172\nI0822 04:39:51.844849 32487 solver.cpp:404]     Test net output #1: loss = 1.56263 (* 1 = 1.56263 loss)\nI0822 04:39:53.163841 32487 solver.cpp:228] Iteration 33000, loss = 0.0265974\nI0822 04:39:53.163887 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:39:53.163903 32487 solver.cpp:244]     Train net output #1: loss = 0.026597 (* 1 = 0.026597 loss)\nI0822 04:39:53.249327 32487 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0822 04:42:10.727974 32487 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0822 04:43:32.153865 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72484\nI0822 04:43:32.154173 32487 solver.cpp:404]     Test net output #1: loss = 1.47548 (* 1 = 1.47548 loss)\nI0822 04:43:33.472955 32487 solver.cpp:228] Iteration 33100, loss = 0.0271307\nI0822 04:43:33.473001 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:43:33.473018 32487 solver.cpp:244]     Train net output #1: loss = 0.0271304 (* 1 = 0.0271304 loss)\nI0822 04:43:33.567440 32487 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0822 04:45:50.986708 32487 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0822 04:47:12.407019 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74968\nI0822 04:47:12.407320 32487 solver.cpp:404]     Test net output #1: loss = 1.3302 (* 1 = 1.3302 loss)\nI0822 04:47:13.725894 32487 solver.cpp:228] Iteration 33200, loss = 0.109509\nI0822 04:47:13.725939 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 04:47:13.725956 32487 solver.cpp:244]     Train net output #1: loss = 0.109509 (* 1 = 0.109509 loss)\nI0822 04:47:13.821701 32487 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0822 04:49:31.493175 32487 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0822 04:50:52.925629 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70372\nI0822 04:50:52.925915 32487 solver.cpp:404]     Test net output #1: loss = 1.67614 (* 1 = 1.67614 loss)\nI0822 04:50:54.244191 32487 solver.cpp:228] Iteration 33300, loss = 0.0322154\nI0822 04:50:54.244235 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:50:54.244253 32487 solver.cpp:244]     Train net output #1: loss = 0.0322151 (* 1 = 0.0322151 loss)\nI0822 04:50:54.332630 32487 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0822 04:53:11.958995 32487 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0822 04:54:33.386821 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73784\nI0822 04:54:33.387116 32487 solver.cpp:404]     Test net output #1: loss = 1.31136 (* 1 = 1.31136 loss)\nI0822 04:54:34.705219 32487 solver.cpp:228] Iteration 33400, loss = 0.0821883\nI0822 04:54:34.705263 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:54:34.705281 32487 solver.cpp:244]     Train net output #1: loss = 0.082188 (* 1 = 0.082188 loss)\nI0822 04:54:34.803781 32487 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0822 04:56:52.499389 32487 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0822 04:58:13.922026 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75564\nI0822 04:58:13.922329 32487 solver.cpp:404]     Test net output #1: loss = 1.19989 (* 1 = 1.19989 loss)\nI0822 04:58:15.240121 32487 solver.cpp:228] Iteration 33500, loss = 0.0273904\nI0822 04:58:15.240166 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:58:15.240183 32487 solver.cpp:244]     Train net output #1: loss = 0.02739 (* 1 = 0.02739 loss)\nI0822 04:58:15.328428 32487 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0822 05:00:32.976651 32487 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0822 05:01:54.427845 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74344\nI0822 05:01:54.428145 32487 solver.cpp:404]     Test net output #1: loss = 1.37069 (* 1 = 1.37069 loss)\nI0822 05:01:55.746723 32487 solver.cpp:228] Iteration 33600, loss = 0.0507153\nI0822 05:01:55.746768 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:01:55.746785 32487 solver.cpp:244]     Train net output #1: loss = 0.050715 (* 1 = 0.050715 loss)\nI0822 05:01:55.841898 32487 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0822 05:04:13.653976 32487 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0822 05:05:35.146865 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75724\nI0822 05:05:35.147167 32487 solver.cpp:404]     Test net output #1: loss = 1.20884 (* 1 = 1.20884 loss)\nI0822 05:05:36.465149 32487 solver.cpp:228] Iteration 33700, loss = 0.0168873\nI0822 05:05:36.465195 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:05:36.465212 32487 solver.cpp:244]     Train net output #1: loss = 0.016887 (* 1 = 0.016887 loss)\nI0822 05:05:36.563594 32487 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0822 05:07:54.313400 32487 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0822 05:09:15.857906 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70932\nI0822 05:09:15.858198 32487 solver.cpp:404]     Test net output #1: loss = 1.53738 (* 1 = 1.53738 loss)\nI0822 05:09:17.176092 32487 solver.cpp:228] Iteration 33800, loss = 0.0654916\nI0822 05:09:17.176139 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 05:09:17.176156 32487 solver.cpp:244]     Train net output #1: loss = 0.0654913 (* 1 = 0.0654913 loss)\nI0822 05:09:17.264111 32487 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0822 05:11:34.986766 32487 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0822 05:12:56.566522 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65536\nI0822 05:12:56.566833 32487 solver.cpp:404]     Test net output #1: loss = 2.2729 (* 1 = 2.2729 loss)\nI0822 05:12:57.885882 32487 solver.cpp:228] Iteration 33900, loss = 0.0226218\nI0822 05:12:57.885928 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:12:57.885946 32487 solver.cpp:244]     Train net output #1: loss = 0.0226215 (* 1 = 0.0226215 loss)\nI0822 05:12:57.976013 32487 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0822 05:15:15.792052 32487 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0822 05:16:37.315151 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74324\nI0822 05:16:37.315454 32487 solver.cpp:404]     Test net output #1: loss = 1.35976 (* 1 = 1.35976 loss)\nI0822 05:16:38.633635 32487 solver.cpp:228] Iteration 34000, loss = 0.031622\nI0822 05:16:38.633680 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:16:38.633697 32487 solver.cpp:244]     Train net output #1: loss = 0.0316217 (* 1 = 0.0316217 loss)\nI0822 05:16:38.723403 32487 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0822 05:18:56.565452 32487 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0822 05:20:18.082363 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73284\nI0822 05:20:18.082669 32487 solver.cpp:404]     Test net output #1: loss = 1.61394 (* 1 = 1.61394 loss)\nI0822 05:20:19.400626 32487 solver.cpp:228] Iteration 34100, loss = 0.00677861\nI0822 05:20:19.400672 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:20:19.400688 32487 solver.cpp:244]     Train net output #1: loss = 0.00677831 (* 1 = 0.00677831 loss)\nI0822 05:20:19.489873 32487 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0822 05:22:37.234630 32487 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0822 05:23:58.761616 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64396\nI0822 05:23:58.761920 32487 solver.cpp:404]     Test net output #1: loss = 2.2171 (* 1 = 2.2171 loss)\nI0822 05:24:00.081558 32487 solver.cpp:228] Iteration 34200, loss = 0.0335807\nI0822 05:24:00.081605 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:24:00.081621 32487 solver.cpp:244]     Train net output #1: loss = 0.0335804 (* 1 = 0.0335804 loss)\nI0822 05:24:00.178251 32487 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0822 05:26:17.941357 32487 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0822 05:27:39.460786 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66184\nI0822 05:27:39.461086 32487 solver.cpp:404]     Test net output #1: loss = 1.8875 (* 1 = 1.8875 loss)\nI0822 05:27:40.779106 32487 solver.cpp:228] Iteration 34300, loss = 0.0368098\nI0822 05:27:40.779152 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:27:40.779170 32487 solver.cpp:244]     Train net output #1: loss = 0.0368095 (* 1 = 0.0368095 loss)\nI0822 05:27:40.870957 32487 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0822 05:29:58.825861 32487 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0822 05:31:20.346642 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72344\nI0822 05:31:20.346930 32487 solver.cpp:404]     Test net output #1: loss = 1.46037 (* 1 = 1.46037 loss)\nI0822 05:31:21.665897 32487 solver.cpp:228] Iteration 34400, loss = 0.0373597\nI0822 05:31:21.665942 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:31:21.665958 32487 solver.cpp:244]     Train net output #1: loss = 0.0373593 (* 1 = 0.0373593 loss)\nI0822 05:31:21.756182 32487 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0822 05:33:39.517367 32487 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0822 05:35:01.082542 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70048\nI0822 05:35:01.082836 32487 solver.cpp:404]     Test net output #1: loss = 1.61398 (* 1 = 1.61398 loss)\nI0822 05:35:02.402817 32487 solver.cpp:228] Iteration 34500, loss = 0.080932\nI0822 05:35:02.402863 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 05:35:02.402880 32487 solver.cpp:244]     Train net output #1: loss = 0.0809316 (* 1 = 0.0809316 loss)\nI0822 05:35:02.494752 32487 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0822 05:37:20.317136 32487 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0822 05:38:41.881403 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7232\nI0822 05:38:41.881697 32487 solver.cpp:404]     Test net output #1: loss = 1.59744 (* 1 = 1.59744 loss)\nI0822 05:38:43.201038 32487 solver.cpp:228] Iteration 34600, loss = 0.0198948\nI0822 05:38:43.201086 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:38:43.201102 32487 solver.cpp:244]     Train net output #1: loss = 0.0198945 (* 1 = 0.0198945 loss)\nI0822 05:38:43.288199 32487 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0822 05:41:01.092481 32487 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0822 05:42:22.629335 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72604\nI0822 05:42:22.629642 32487 solver.cpp:404]     Test net output #1: loss = 1.44948 (* 1 = 1.44948 loss)\nI0822 05:42:23.948177 32487 solver.cpp:228] Iteration 34700, loss = 0.0308502\nI0822 05:42:23.948223 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:42:23.948240 32487 solver.cpp:244]     Train net output #1: loss = 0.0308499 (* 1 = 0.0308499 loss)\nI0822 05:42:24.036286 32487 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0822 05:44:41.655279 32487 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0822 05:46:03.177160 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73048\nI0822 05:46:03.177445 32487 solver.cpp:404]     Test net output #1: loss = 1.35212 (* 1 = 1.35212 loss)\nI0822 05:46:04.495620 32487 solver.cpp:228] Iteration 34800, loss = 0.0802882\nI0822 05:46:04.495666 32487 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 05:46:04.495683 32487 solver.cpp:244]     Train net output #1: loss = 0.0802879 (* 1 = 0.0802879 loss)\nI0822 05:46:04.587546 32487 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0822 05:48:22.291782 32487 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0822 05:49:43.817387 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68392\nI0822 05:49:43.817680 32487 solver.cpp:404]     Test net output #1: loss = 1.94954 (* 1 = 1.94954 loss)\nI0822 05:49:45.135926 32487 solver.cpp:228] Iteration 34900, loss = 0.031859\nI0822 05:49:45.135972 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:49:45.135988 32487 solver.cpp:244]     Train net output #1: loss = 0.0318587 (* 1 = 0.0318587 loss)\nI0822 05:49:45.227196 32487 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0822 05:52:02.849071 32487 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0822 05:53:24.371134 32487 solver.cpp:404]     Test net output #0: accuracy = 0.734\nI0822 05:53:24.371467 32487 solver.cpp:404]     Test net output #1: loss = 1.51028 (* 1 = 1.51028 loss)\nI0822 05:53:25.690197 32487 solver.cpp:228] Iteration 35000, loss = 0.0496886\nI0822 05:53:25.690244 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:53:25.690265 32487 solver.cpp:244]     Train net output #1: loss = 0.0496882 (* 1 = 0.0496882 loss)\nI0822 05:53:25.786727 32487 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0822 05:55:43.452750 32487 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0822 05:57:04.973320 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75236\nI0822 05:57:04.973603 32487 solver.cpp:404]     Test net output #1: loss = 1.25735 (* 1 = 1.25735 loss)\nI0822 05:57:06.292810 32487 solver.cpp:228] Iteration 35100, loss = 0.0234892\nI0822 05:57:06.292855 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:57:06.292870 32487 solver.cpp:244]     Train net output #1: loss = 0.0234889 (* 1 = 0.0234889 loss)\nI0822 05:57:06.383550 32487 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0822 05:59:24.153133 32487 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0822 06:00:45.671952 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63224\nI0822 06:00:45.672248 32487 solver.cpp:404]     Test net output #1: loss = 2.13973 (* 1 = 2.13973 loss)\nI0822 06:00:46.991482 32487 solver.cpp:228] Iteration 35200, loss = 0.0163045\nI0822 06:00:46.991529 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:00:46.991546 32487 solver.cpp:244]     Train net output #1: loss = 0.0163041 (* 1 = 0.0163041 loss)\nI0822 06:00:47.077385 32487 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0822 06:03:04.721227 32487 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0822 06:04:26.241789 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71812\nI0822 06:04:26.242100 32487 solver.cpp:404]     Test net output #1: loss = 1.59107 (* 1 = 1.59107 loss)\nI0822 06:04:27.560816 32487 solver.cpp:228] Iteration 35300, loss = 0.0124161\nI0822 06:04:27.560861 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:04:27.560878 32487 solver.cpp:244]     Train net output #1: loss = 0.0124159 (* 1 = 0.0124159 loss)\nI0822 06:04:27.651335 32487 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0822 06:06:45.294836 32487 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0822 06:08:06.810712 32487 solver.cpp:404]     Test net output #0: accuracy = 0.736\nI0822 06:08:06.811002 32487 solver.cpp:404]     Test net output #1: loss = 1.31131 (* 1 = 1.31131 loss)\nI0822 06:08:08.129456 32487 solver.cpp:228] Iteration 35400, loss = 0.0378706\nI0822 06:08:08.129503 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 06:08:08.129518 32487 solver.cpp:244]     Train net output #1: loss = 0.0378703 (* 1 = 0.0378703 loss)\nI0822 06:08:08.221525 32487 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0822 06:10:25.948576 32487 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0822 06:11:47.453390 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67016\nI0822 06:11:47.453680 32487 solver.cpp:404]     Test net output #1: loss = 1.93198 (* 1 = 1.93198 loss)\nI0822 06:11:48.771806 32487 solver.cpp:228] Iteration 35500, loss = 0.0088659\nI0822 06:11:48.771853 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:11:48.771870 32487 solver.cpp:244]     Train net output #1: loss = 0.00886562 (* 1 = 0.00886562 loss)\nI0822 06:11:48.864878 32487 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0822 06:14:06.644266 32487 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0822 06:15:28.154974 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70684\nI0822 06:15:28.155261 32487 solver.cpp:404]     Test net output #1: loss = 1.53137 (* 1 = 1.53137 loss)\nI0822 06:15:29.473527 32487 solver.cpp:228] Iteration 35600, loss = 0.0910715\nI0822 06:15:29.473573 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 06:15:29.473590 32487 solver.cpp:244]     Train net output #1: loss = 0.0910712 (* 1 = 0.0910712 loss)\nI0822 06:15:29.569847 32487 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0822 06:17:47.356566 32487 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0822 06:19:08.874454 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63632\nI0822 06:19:08.874756 32487 solver.cpp:404]     Test net output #1: loss = 2.23433 (* 1 = 2.23433 loss)\nI0822 06:19:10.192651 32487 solver.cpp:228] Iteration 35700, loss = 0.0562996\nI0822 06:19:10.192695 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 06:19:10.192713 32487 solver.cpp:244]     Train net output #1: loss = 0.0562993 (* 1 = 0.0562993 loss)\nI0822 06:19:10.281741 32487 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0822 06:21:27.984961 32487 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0822 06:22:49.514470 32487 solver.cpp:404]     Test net output #0: accuracy = 0.78028\nI0822 06:22:49.514780 32487 solver.cpp:404]     Test net output #1: loss = 1.05054 (* 1 = 1.05054 loss)\nI0822 06:22:50.832674 32487 solver.cpp:228] Iteration 35800, loss = 0.02675\nI0822 06:22:50.832720 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:22:50.832737 32487 solver.cpp:244]     Train net output #1: loss = 0.0267497 (* 1 = 0.0267497 loss)\nI0822 06:22:50.931252 32487 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0822 06:25:08.608744 32487 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0822 06:26:30.141407 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71792\nI0822 06:26:30.141706 32487 solver.cpp:404]     Test net output #1: loss = 1.61493 (* 1 = 1.61493 loss)\nI0822 06:26:31.459903 32487 solver.cpp:228] Iteration 35900, loss = 0.148692\nI0822 06:26:31.459949 32487 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0822 06:26:31.459964 32487 solver.cpp:244]     Train net output #1: loss = 0.148692 (* 1 = 0.148692 loss)\nI0822 06:26:31.556252 32487 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0822 06:28:49.237768 32487 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0822 06:30:10.765127 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68164\nI0822 06:30:10.765435 32487 solver.cpp:404]     Test net output #1: loss = 1.7228 (* 1 = 1.7228 loss)\nI0822 06:30:12.084122 32487 solver.cpp:228] Iteration 36000, loss = 0.00465867\nI0822 06:30:12.084167 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:30:12.084184 32487 solver.cpp:244]     Train net output #1: loss = 0.00465839 (* 1 = 0.00465839 loss)\nI0822 06:30:12.176105 32487 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0822 06:32:29.856473 32487 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0822 06:33:51.385124 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69972\nI0822 06:33:51.385448 32487 solver.cpp:404]     Test net output #1: loss = 1.53545 (* 1 = 1.53545 loss)\nI0822 06:33:52.703477 32487 solver.cpp:228] Iteration 36100, loss = 0.0147007\nI0822 06:33:52.703523 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:33:52.703539 32487 solver.cpp:244]     Train net output #1: loss = 0.0147004 (* 1 = 0.0147004 loss)\nI0822 06:33:52.794543 32487 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0822 06:36:10.554697 32487 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0822 06:37:32.066851 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70476\nI0822 06:37:32.067155 32487 solver.cpp:404]     Test net output #1: loss = 1.62399 (* 1 = 1.62399 loss)\nI0822 06:37:33.384930 32487 solver.cpp:228] Iteration 36200, loss = 0.0149465\nI0822 06:37:33.384974 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:37:33.384990 32487 solver.cpp:244]     Train net output #1: loss = 0.0149462 (* 1 = 0.0149462 loss)\nI0822 06:37:33.480976 32487 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0822 06:39:51.269202 32487 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0822 06:41:12.778298 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74204\nI0822 06:41:12.778599 32487 solver.cpp:404]     Test net output #1: loss = 1.30994 (* 1 = 1.30994 loss)\nI0822 06:41:14.096159 32487 solver.cpp:228] Iteration 36300, loss = 0.0467654\nI0822 06:41:14.096204 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:41:14.096220 32487 solver.cpp:244]     Train net output #1: loss = 0.0467651 (* 1 = 0.0467651 loss)\nI0822 06:41:14.194838 32487 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0822 06:43:31.988662 32487 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0822 06:44:53.503193 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67696\nI0822 06:44:53.503499 32487 solver.cpp:404]     Test net output #1: loss = 2.30198 (* 1 = 2.30198 loss)\nI0822 06:44:54.821472 32487 solver.cpp:228] Iteration 36400, loss = 0.0119947\nI0822 06:44:54.821514 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:44:54.821532 32487 solver.cpp:244]     Train net output #1: loss = 0.0119945 (* 1 = 0.0119945 loss)\nI0822 06:44:54.909219 32487 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0822 06:47:12.715139 32487 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0822 06:48:34.243541 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7028\nI0822 06:48:34.243836 32487 solver.cpp:404]     Test net output #1: loss = 1.6111 (* 1 = 1.6111 loss)\nI0822 06:48:35.561573 32487 solver.cpp:228] Iteration 36500, loss = 0.113412\nI0822 06:48:35.561615 32487 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0822 06:48:35.561632 32487 solver.cpp:244]     Train net output #1: loss = 0.113411 (* 1 = 0.113411 loss)\nI0822 06:48:35.650265 32487 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0822 06:50:53.359043 32487 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0822 06:52:14.882345 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73728\nI0822 06:52:14.882637 32487 solver.cpp:404]     Test net output #1: loss = 1.44503 (* 1 = 1.44503 loss)\nI0822 06:52:16.200748 32487 solver.cpp:228] Iteration 36600, loss = 0.0249282\nI0822 06:52:16.200791 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:52:16.200808 32487 solver.cpp:244]     Train net output #1: loss = 0.0249279 (* 1 = 0.0249279 loss)\nI0822 06:52:16.296321 32487 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0822 06:54:33.992719 32487 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0822 06:55:55.415990 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72624\nI0822 06:55:55.416290 32487 solver.cpp:404]     Test net output #1: loss = 1.74783 (* 1 = 1.74783 loss)\nI0822 06:55:56.734305 32487 solver.cpp:228] Iteration 36700, loss = 0.0245779\nI0822 06:55:56.734347 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:55:56.734364 32487 solver.cpp:244]     Train net output #1: loss = 0.0245776 (* 1 = 0.0245776 loss)\nI0822 06:55:56.819955 32487 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0822 06:58:14.509529 32487 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0822 06:59:35.934101 32487 solver.cpp:404]     Test net output #0: accuracy = 0.61976\nI0822 06:59:35.934397 32487 solver.cpp:404]     Test net output #1: loss = 2.98957 (* 1 = 2.98957 loss)\nI0822 06:59:37.252496 32487 solver.cpp:228] Iteration 36800, loss = 0.0313194\nI0822 06:59:37.252537 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:59:37.252552 32487 solver.cpp:244]     Train net output #1: loss = 0.0313191 (* 1 = 0.0313191 loss)\nI0822 06:59:37.343196 32487 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0822 07:01:55.191284 32487 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0822 07:03:16.611225 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72472\nI0822 07:03:16.611557 32487 solver.cpp:404]     Test net output #1: loss = 1.50447 (* 1 = 1.50447 loss)\nI0822 07:03:17.929199 32487 solver.cpp:228] Iteration 36900, loss = 0.032898\nI0822 07:03:17.929239 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:03:17.929260 32487 solver.cpp:244]     Train net output #1: loss = 0.0328977 (* 1 = 0.0328977 loss)\nI0822 07:03:18.027143 32487 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0822 07:05:35.797912 32487 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0822 07:06:57.225409 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73288\nI0822 07:06:57.225709 32487 solver.cpp:404]     Test net output #1: loss = 1.45977 (* 1 = 1.45977 loss)\nI0822 07:06:58.543571 32487 solver.cpp:228] Iteration 37000, loss = 0.00487465\nI0822 07:06:58.543613 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:06:58.543629 32487 solver.cpp:244]     Train net output #1: loss = 0.00487437 (* 1 = 0.00487437 loss)\nI0822 07:06:58.636879 32487 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0822 07:09:16.400384 32487 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0822 07:10:37.827301 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7016\nI0822 07:10:37.827601 32487 solver.cpp:404]     Test net output #1: loss = 1.59626 (* 1 = 1.59626 loss)\nI0822 07:10:39.145781 32487 solver.cpp:228] Iteration 37100, loss = 0.0281153\nI0822 07:10:39.145822 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:10:39.145838 32487 solver.cpp:244]     Train net output #1: loss = 0.028115 (* 1 = 0.028115 loss)\nI0822 07:10:39.239050 32487 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0822 07:12:57.117223 32487 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0822 07:14:18.539770 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76212\nI0822 07:14:18.540069 32487 solver.cpp:404]     Test net output #1: loss = 1.12157 (* 1 = 1.12157 loss)\nI0822 07:14:19.858808 32487 solver.cpp:228] Iteration 37200, loss = 0.037037\nI0822 07:14:19.858850 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:14:19.858866 32487 solver.cpp:244]     Train net output #1: loss = 0.0370367 (* 1 = 0.0370367 loss)\nI0822 07:14:19.958464 32487 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0822 07:16:37.769700 32487 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0822 07:17:59.189152 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76684\nI0822 07:17:59.189453 32487 solver.cpp:404]     Test net output #1: loss = 1.26445 (* 1 = 1.26445 loss)\nI0822 07:18:00.507388 32487 solver.cpp:228] Iteration 37300, loss = 0.0429875\nI0822 07:18:00.507431 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:18:00.507447 32487 solver.cpp:244]     Train net output #1: loss = 0.0429872 (* 1 = 0.0429872 loss)\nI0822 07:18:00.599642 32487 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0822 07:20:18.396445 32487 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0822 07:21:39.812131 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65736\nI0822 07:21:39.812441 32487 solver.cpp:404]     Test net output #1: loss = 2.28555 (* 1 = 2.28555 loss)\nI0822 07:21:41.129639 32487 solver.cpp:228] Iteration 37400, loss = 0.0128645\nI0822 07:21:41.129678 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:21:41.129693 32487 solver.cpp:244]     Train net output #1: loss = 0.0128643 (* 1 = 0.0128643 loss)\nI0822 07:21:41.220906 32487 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0822 07:23:59.055763 32487 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0822 07:25:20.477366 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74428\nI0822 07:25:20.477681 32487 solver.cpp:404]     Test net output #1: loss = 1.33192 (* 1 = 1.33192 loss)\nI0822 07:25:21.795881 32487 solver.cpp:228] Iteration 37500, loss = 0.026923\nI0822 07:25:21.795919 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:25:21.795935 32487 solver.cpp:244]     Train net output #1: loss = 0.0269227 (* 1 = 0.0269227 loss)\nI0822 07:25:21.889310 32487 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0822 07:27:39.646466 32487 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0822 07:29:01.115830 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74716\nI0822 07:29:01.116113 32487 solver.cpp:404]     Test net output #1: loss = 1.25443 (* 1 = 1.25443 loss)\nI0822 07:29:02.433672 32487 solver.cpp:228] Iteration 37600, loss = 0.0458297\nI0822 07:29:02.433713 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:29:02.433730 32487 solver.cpp:244]     Train net output #1: loss = 0.0458294 (* 1 = 0.0458294 loss)\nI0822 07:29:02.525527 32487 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0822 07:31:20.351900 32487 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0822 07:32:41.868631 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76856\nI0822 07:32:41.868949 32487 solver.cpp:404]     Test net output #1: loss = 1.16587 (* 1 = 1.16587 loss)\nI0822 07:32:43.187351 32487 solver.cpp:228] Iteration 37700, loss = 0.0495452\nI0822 07:32:43.187392 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:32:43.187408 32487 solver.cpp:244]     Train net output #1: loss = 0.0495449 (* 1 = 0.0495449 loss)\nI0822 07:32:43.277393 32487 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0822 07:35:01.050062 32487 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0822 07:36:22.570787 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66168\nI0822 07:36:22.571074 32487 solver.cpp:404]     Test net output #1: loss = 2.02367 (* 1 = 2.02367 loss)\nI0822 07:36:23.889397 32487 solver.cpp:228] Iteration 37800, loss = 0.0109653\nI0822 07:36:23.889439 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:36:23.889456 32487 solver.cpp:244]     Train net output #1: loss = 0.010965 (* 1 = 0.010965 loss)\nI0822 07:36:23.984009 32487 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0822 07:38:41.670469 32487 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0822 07:40:03.194304 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7336\nI0822 07:40:03.194576 32487 solver.cpp:404]     Test net output #1: loss = 1.43207 (* 1 = 1.43207 loss)\nI0822 07:40:04.512112 32487 solver.cpp:228] Iteration 37900, loss = 0.0632031\nI0822 07:40:04.512153 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 07:40:04.512169 32487 solver.cpp:244]     Train net output #1: loss = 0.0632028 (* 1 = 0.0632028 loss)\nI0822 07:40:04.609622 32487 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0822 07:42:22.412626 32487 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0822 07:43:43.928251 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76872\nI0822 07:43:43.928565 32487 solver.cpp:404]     Test net output #1: loss = 1.10304 (* 1 = 1.10304 loss)\nI0822 07:43:45.246101 32487 solver.cpp:228] Iteration 38000, loss = 0.0267427\nI0822 07:43:45.246141 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:43:45.246157 32487 solver.cpp:244]     Train net output #1: loss = 0.0267424 (* 1 = 0.0267424 loss)\nI0822 07:43:45.340034 32487 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0822 07:46:03.016508 32487 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0822 07:47:24.537250 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7768\nI0822 07:47:24.537564 32487 solver.cpp:404]     Test net output #1: loss = 1.1816 (* 1 = 1.1816 loss)\nI0822 07:47:25.855396 32487 solver.cpp:228] Iteration 38100, loss = 0.0266208\nI0822 07:47:25.855438 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:47:25.855454 32487 solver.cpp:244]     Train net output #1: loss = 0.0266205 (* 1 = 0.0266205 loss)\nI0822 07:47:25.947778 32487 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0822 07:49:43.758709 32487 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0822 07:51:05.281134 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69096\nI0822 07:51:05.281440 32487 solver.cpp:404]     Test net output #1: loss = 1.89704 (* 1 = 1.89704 loss)\nI0822 07:51:06.599282 32487 solver.cpp:228] Iteration 38200, loss = 0.00778979\nI0822 07:51:06.599326 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:51:06.599342 32487 solver.cpp:244]     Train net output #1: loss = 0.00778951 (* 1 = 0.00778951 loss)\nI0822 07:51:06.691468 32487 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0822 07:53:24.506392 32487 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0822 07:54:46.523475 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72404\nI0822 07:54:46.523775 32487 solver.cpp:404]     Test net output #1: loss = 1.55545 (* 1 = 1.55545 loss)\nI0822 07:54:47.847354 32487 solver.cpp:228] Iteration 38300, loss = 0.0388394\nI0822 07:54:47.847396 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:54:47.847411 32487 solver.cpp:244]     Train net output #1: loss = 0.0388391 (* 1 = 0.0388391 loss)\nI0822 07:54:47.935511 32487 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0822 07:57:05.922152 32487 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0822 07:58:28.263284 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75716\nI0822 07:58:28.263587 32487 solver.cpp:404]     Test net output #1: loss = 1.22241 (* 1 = 1.22241 loss)\nI0822 07:58:29.587296 32487 solver.cpp:228] Iteration 38400, loss = 0.00885066\nI0822 07:58:29.587342 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:58:29.587357 32487 solver.cpp:244]     Train net output #1: loss = 0.00885039 (* 1 = 0.00885039 loss)\nI0822 07:58:29.677503 32487 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0822 08:00:47.723451 32487 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0822 08:02:09.805469 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76116\nI0822 08:02:09.805742 32487 solver.cpp:404]     Test net output #1: loss = 1.3204 (* 1 = 1.3204 loss)\nI0822 08:02:11.128032 32487 solver.cpp:228] Iteration 38500, loss = 0.011529\nI0822 08:02:11.128074 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:02:11.128092 32487 solver.cpp:244]     Train net output #1: loss = 0.0115287 (* 1 = 0.0115287 loss)\nI0822 08:02:11.221112 32487 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0822 08:04:29.168557 32487 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0822 08:05:51.234900 32487 solver.cpp:404]     Test net output #0: accuracy = 0.636\nI0822 08:05:51.235200 32487 solver.cpp:404]     Test net output #1: loss = 2.28384 (* 1 = 2.28384 loss)\nI0822 08:05:52.557978 32487 solver.cpp:228] Iteration 38600, loss = 0.051139\nI0822 08:05:52.558019 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:05:52.558037 32487 solver.cpp:244]     Train net output #1: loss = 0.0511387 (* 1 = 0.0511387 loss)\nI0822 08:05:52.650607 32487 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0822 08:08:10.630760 32487 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0822 08:09:32.640218 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74148\nI0822 08:09:32.640516 32487 solver.cpp:404]     Test net output #1: loss = 1.40178 (* 1 = 1.40178 loss)\nI0822 08:09:33.963073 32487 solver.cpp:228] Iteration 38700, loss = 0.049879\nI0822 08:09:33.963116 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:09:33.963132 32487 solver.cpp:244]     Train net output #1: loss = 0.0498788 (* 1 = 0.0498788 loss)\nI0822 08:09:34.057073 32487 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0822 08:11:52.022137 32487 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0822 08:13:14.233304 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70308\nI0822 08:13:14.233654 32487 solver.cpp:404]     Test net output #1: loss = 1.81483 (* 1 = 1.81483 loss)\nI0822 08:13:15.555891 32487 solver.cpp:228] Iteration 38800, loss = 0.0313541\nI0822 08:13:15.555933 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:13:15.555949 32487 solver.cpp:244]     Train net output #1: loss = 0.0313538 (* 1 = 0.0313538 loss)\nI0822 08:13:15.649549 32487 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0822 08:15:33.479831 32487 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0822 08:16:55.855939 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68256\nI0822 08:16:55.856271 32487 solver.cpp:404]     Test net output #1: loss = 1.88116 (* 1 = 1.88116 loss)\nI0822 08:16:57.178647 32487 solver.cpp:228] Iteration 38900, loss = 0.00877612\nI0822 08:16:57.178688 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:16:57.178704 32487 solver.cpp:244]     Train net output #1: loss = 0.00877583 (* 1 = 0.00877583 loss)\nI0822 08:16:57.270373 32487 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0822 08:19:15.272107 32487 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0822 08:20:37.657034 32487 solver.cpp:404]     Test net output #0: accuracy = 0.77012\nI0822 08:20:37.657349 32487 solver.cpp:404]     Test net output #1: loss = 1.12945 (* 1 = 1.12945 loss)\nI0822 08:20:38.979993 32487 solver.cpp:228] Iteration 39000, loss = 0.0455052\nI0822 08:20:38.980036 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:20:38.980049 32487 solver.cpp:244]     Train net output #1: loss = 0.0455049 (* 1 = 0.0455049 loss)\nI0822 08:20:39.070103 32487 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0822 08:22:57.070711 32487 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0822 08:24:19.442183 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67316\nI0822 08:24:19.442515 32487 solver.cpp:404]     Test net output #1: loss = 1.91078 (* 1 = 1.91078 loss)\nI0822 08:24:20.764858 32487 solver.cpp:228] Iteration 39100, loss = 0.0103278\nI0822 08:24:20.764899 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:24:20.764914 32487 solver.cpp:244]     Train net output #1: loss = 0.0103275 (* 1 = 0.0103275 loss)\nI0822 08:24:20.853102 32487 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0822 08:26:38.858561 32487 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0822 08:28:01.262001 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69256\nI0822 08:28:01.262373 32487 solver.cpp:404]     Test net output #1: loss = 1.72926 (* 1 = 1.72926 loss)\nI0822 08:28:02.585086 32487 solver.cpp:228] Iteration 39200, loss = 0.0424435\nI0822 08:28:02.585130 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:28:02.585153 32487 solver.cpp:244]     Train net output #1: loss = 0.0424432 (* 1 = 0.0424432 loss)\nI0822 08:28:02.670083 32487 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0822 08:30:20.591995 32487 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0822 08:31:43.001909 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74704\nI0822 08:31:43.002234 32487 solver.cpp:404]     Test net output #1: loss = 1.35766 (* 1 = 1.35766 loss)\nI0822 08:31:44.325057 32487 solver.cpp:228] Iteration 39300, loss = 0.0272801\nI0822 08:31:44.325095 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:31:44.325119 32487 solver.cpp:244]     Train net output #1: loss = 0.0272798 (* 1 = 0.0272798 loss)\nI0822 08:31:44.409462 32487 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0822 08:34:02.377264 32487 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0822 08:35:24.774638 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76568\nI0822 08:35:24.774991 32487 solver.cpp:404]     Test net output #1: loss = 1.22365 (* 1 = 1.22365 loss)\nI0822 08:35:26.097700 32487 solver.cpp:228] Iteration 39400, loss = 0.0373018\nI0822 08:35:26.097738 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 08:35:26.097754 32487 solver.cpp:244]     Train net output #1: loss = 0.0373015 (* 1 = 0.0373015 loss)\nI0822 08:35:26.179193 32487 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0822 08:37:44.024119 32487 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0822 08:39:06.409965 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68756\nI0822 08:39:06.410315 32487 solver.cpp:404]     Test net output #1: loss = 1.93284 (* 1 = 1.93284 loss)\nI0822 08:39:07.733033 32487 solver.cpp:228] Iteration 39500, loss = 0.00527329\nI0822 08:39:07.733069 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:39:07.733085 32487 solver.cpp:244]     Train net output #1: loss = 0.00527297 (* 1 = 0.00527297 loss)\nI0822 08:39:07.824308 32487 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0822 08:41:25.778631 32487 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0822 08:42:48.150851 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76452\nI0822 08:42:48.151191 32487 solver.cpp:404]     Test net output #1: loss = 1.26871 (* 1 = 1.26871 loss)\nI0822 08:42:49.474282 32487 solver.cpp:228] Iteration 39600, loss = 0.0183216\nI0822 08:42:49.474318 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:42:49.474333 32487 solver.cpp:244]     Train net output #1: loss = 0.0183213 (* 1 = 0.0183213 loss)\nI0822 08:42:49.567744 32487 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0822 08:45:07.675830 32487 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0822 08:46:30.066757 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7086\nI0822 08:46:30.067106 32487 solver.cpp:404]     Test net output #1: loss = 1.7275 (* 1 = 1.7275 loss)\nI0822 08:46:31.390130 32487 solver.cpp:228] Iteration 39700, loss = 0.0327857\nI0822 08:46:31.390167 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:46:31.390183 32487 solver.cpp:244]     Train net output #1: loss = 0.0327853 (* 1 = 0.0327853 loss)\nI0822 08:46:31.485878 32487 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0822 08:48:49.319458 32487 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0822 08:50:11.702512 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71496\nI0822 08:50:11.702852 32487 solver.cpp:404]     Test net output #1: loss = 1.77525 (* 1 = 1.77525 loss)\nI0822 08:50:13.026077 32487 solver.cpp:228] Iteration 39800, loss = 0.0217901\nI0822 08:50:13.026113 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:50:13.026129 32487 solver.cpp:244]     Train net output #1: loss = 0.0217898 (* 1 = 0.0217898 loss)\nI0822 08:50:13.118271 32487 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0822 08:52:31.115227 32487 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0822 08:53:53.490082 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69368\nI0822 08:53:53.490407 32487 solver.cpp:404]     Test net output #1: loss = 1.90817 (* 1 = 1.90817 loss)\nI0822 08:53:54.812773 32487 solver.cpp:228] Iteration 39900, loss = 0.015674\nI0822 08:53:54.812816 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:53:54.812832 32487 solver.cpp:244]     Train net output #1: loss = 0.0156737 (* 1 = 0.0156737 loss)\nI0822 08:53:54.906709 32487 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0822 08:56:12.809590 32487 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0822 08:57:35.178875 32487 solver.cpp:404]     Test net output #0: accuracy = 0.62016\nI0822 08:57:35.179230 32487 solver.cpp:404]     Test net output #1: loss = 2.64889 (* 1 = 2.64889 loss)\nI0822 08:57:36.501243 32487 solver.cpp:228] Iteration 40000, loss = 0.0334642\nI0822 08:57:36.501284 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:57:36.501301 32487 solver.cpp:244]     Train net output #1: loss = 0.0334639 (* 1 = 0.0334639 loss)\nI0822 08:57:36.582248 32487 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0822 08:59:54.223902 32487 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0822 09:01:16.603619 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7298\nI0822 09:01:16.603958 32487 solver.cpp:404]     Test net output #1: loss = 1.54151 (* 1 = 1.54151 loss)\nI0822 09:01:17.926628 32487 solver.cpp:228] Iteration 40100, loss = 0.0417879\nI0822 09:01:17.926668 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:01:17.926684 32487 solver.cpp:244]     Train net output #1: loss = 0.0417876 (* 1 = 0.0417876 loss)\nI0822 09:01:18.011538 32487 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0822 09:03:35.639653 32487 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0822 09:04:58.023192 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74528\nI0822 09:04:58.023525 32487 solver.cpp:404]     Test net output #1: loss = 1.18277 (* 1 = 1.18277 loss)\nI0822 09:04:59.345980 32487 solver.cpp:228] Iteration 40200, loss = 0.0305271\nI0822 09:04:59.346022 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 09:04:59.346038 32487 solver.cpp:244]     Train net output #1: loss = 0.0305268 (* 1 = 0.0305268 loss)\nI0822 09:04:59.438552 32487 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0822 09:07:17.399154 32487 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0822 09:08:39.776110 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76552\nI0822 09:08:39.776468 32487 solver.cpp:404]     Test net output #1: loss = 1.16705 (* 1 = 1.16705 loss)\nI0822 09:08:41.099406 32487 solver.cpp:228] Iteration 40300, loss = 0.0251454\nI0822 09:08:41.099452 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:08:41.099467 32487 solver.cpp:244]     Train net output #1: loss = 0.0251452 (* 1 = 0.0251452 loss)\nI0822 09:08:41.182358 32487 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0822 09:10:58.831039 32487 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0822 09:12:21.203018 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69008\nI0822 09:12:21.203382 32487 solver.cpp:404]     Test net output #1: loss = 1.65807 (* 1 = 1.65807 loss)\nI0822 09:12:22.527386 32487 solver.cpp:228] Iteration 40400, loss = 0.0215883\nI0822 09:12:22.527426 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:12:22.527447 32487 solver.cpp:244]     Train net output #1: loss = 0.0215881 (* 1 = 0.0215881 loss)\nI0822 09:12:22.619333 32487 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0822 09:14:40.289912 32487 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0822 09:16:02.670522 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75636\nI0822 09:16:02.670867 32487 solver.cpp:404]     Test net output #1: loss = 1.25934 (* 1 = 1.25934 loss)\nI0822 09:16:03.993290 32487 solver.cpp:228] Iteration 40500, loss = 0.0355556\nI0822 09:16:03.993331 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 09:16:03.993347 32487 solver.cpp:244]     Train net output #1: loss = 0.0355553 (* 1 = 0.0355553 loss)\nI0822 09:16:04.080978 32487 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0822 09:18:21.813974 32487 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0822 09:19:44.207185 32487 solver.cpp:404]     Test net output #0: accuracy = 0.78044\nI0822 09:19:44.207520 32487 solver.cpp:404]     Test net output #1: loss = 0.998614 (* 1 = 0.998614 loss)\nI0822 09:19:45.530149 32487 solver.cpp:228] Iteration 40600, loss = 0.0661847\nI0822 09:19:45.530191 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 09:19:45.530207 32487 solver.cpp:244]     Train net output #1: loss = 0.0661844 (* 1 = 0.0661844 loss)\nI0822 09:19:45.628099 32487 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0822 09:22:03.393589 32487 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0822 09:23:25.771136 32487 solver.cpp:404]     Test net output #0: accuracy = 0.64076\nI0822 09:23:25.771488 32487 solver.cpp:404]     Test net output #1: loss = 2.2939 (* 1 = 2.2939 loss)\nI0822 09:23:27.093922 32487 solver.cpp:228] Iteration 40700, loss = 0.047224\nI0822 09:23:27.093976 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 09:23:27.093994 32487 solver.cpp:244]     Train net output #1: loss = 0.0472237 (* 1 = 0.0472237 loss)\nI0822 09:23:27.177139 32487 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0822 09:25:44.918872 32487 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0822 09:27:07.302507 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75976\nI0822 09:27:07.302868 32487 solver.cpp:404]     Test net output #1: loss = 1.17221 (* 1 = 1.17221 loss)\nI0822 09:27:08.625193 32487 solver.cpp:228] Iteration 40800, loss = 0.0101814\nI0822 09:27:08.625246 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:27:08.625262 32487 solver.cpp:244]     Train net output #1: loss = 0.0101811 (* 1 = 0.0101811 loss)\nI0822 09:27:08.713541 32487 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0822 09:29:26.406903 32487 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0822 09:30:48.786440 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63452\nI0822 09:30:48.786764 32487 solver.cpp:404]     Test net output #1: loss = 2.31984 (* 1 = 2.31984 loss)\nI0822 09:30:50.108878 32487 solver.cpp:228] Iteration 40900, loss = 0.00868466\nI0822 09:30:50.108930 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:30:50.108947 32487 solver.cpp:244]     Train net output #1: loss = 0.00868435 (* 1 = 0.00868435 loss)\nI0822 09:30:50.199165 32487 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0822 09:33:07.901662 32487 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0822 09:34:30.276906 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72616\nI0822 09:34:30.277235 32487 solver.cpp:404]     Test net output #1: loss = 1.43037 (* 1 = 1.43037 loss)\nI0822 09:34:31.599509 32487 solver.cpp:228] Iteration 41000, loss = 0.0361069\nI0822 09:34:31.599562 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 09:34:31.599578 32487 solver.cpp:244]     Train net output #1: loss = 0.0361066 (* 1 = 0.0361066 loss)\nI0822 09:34:31.684887 32487 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0822 09:36:49.319141 32487 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0822 09:38:11.692250 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73136\nI0822 09:38:11.692610 32487 solver.cpp:404]     Test net output #1: loss = 1.4688 (* 1 = 1.4688 loss)\nI0822 09:38:13.015347 32487 solver.cpp:228] Iteration 41100, loss = 0.00842845\nI0822 09:38:13.015390 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:38:13.015408 32487 solver.cpp:244]     Train net output #1: loss = 0.00842813 (* 1 = 0.00842813 loss)\nI0822 09:38:13.108798 32487 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0822 09:40:30.800941 32487 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0822 09:41:53.174917 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7218\nI0822 09:41:53.175266 32487 solver.cpp:404]     Test net output #1: loss = 1.48006 (* 1 = 1.48006 loss)\nI0822 09:41:54.497555 32487 solver.cpp:228] Iteration 41200, loss = 0.0527676\nI0822 09:41:54.497597 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 09:41:54.497614 32487 solver.cpp:244]     Train net output #1: loss = 0.0527672 (* 1 = 0.0527672 loss)\nI0822 09:41:54.579566 32487 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0822 09:44:12.177381 32487 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0822 09:45:34.559654 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7552\nI0822 09:45:34.559980 32487 solver.cpp:404]     Test net output #1: loss = 1.23619 (* 1 = 1.23619 loss)\nI0822 09:45:35.882122 32487 solver.cpp:228] Iteration 41300, loss = 0.0732904\nI0822 09:45:35.882165 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 09:45:35.882182 32487 solver.cpp:244]     Train net output #1: loss = 0.0732901 (* 1 = 0.0732901 loss)\nI0822 09:45:35.968946 32487 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0822 09:47:53.647332 32487 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0822 09:49:16.030956 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75628\nI0822 09:49:16.031304 32487 solver.cpp:404]     Test net output #1: loss = 1.22557 (* 1 = 1.22557 loss)\nI0822 09:49:17.354269 32487 solver.cpp:228] Iteration 41400, loss = 0.00895261\nI0822 09:49:17.354320 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:49:17.354337 32487 solver.cpp:244]     Train net output #1: loss = 0.00895227 (* 1 = 0.00895227 loss)\nI0822 09:49:17.437636 32487 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0822 09:51:35.322021 32487 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0822 09:52:57.684140 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6854\nI0822 09:52:57.684489 32487 solver.cpp:404]     Test net output #1: loss = 1.66998 (* 1 = 1.66998 loss)\nI0822 09:52:59.007067 32487 solver.cpp:228] Iteration 41500, loss = 0.0547168\nI0822 09:52:59.007119 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 09:52:59.007136 32487 solver.cpp:244]     Train net output #1: loss = 0.0547165 (* 1 = 0.0547165 loss)\nI0822 09:52:59.095616 32487 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0822 09:55:16.846773 32487 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0822 09:56:39.223393 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7364\nI0822 09:56:39.223750 32487 solver.cpp:404]     Test net output #1: loss = 1.33711 (* 1 = 1.33711 loss)\nI0822 09:56:40.547318 32487 solver.cpp:228] Iteration 41600, loss = 0.0449697\nI0822 09:56:40.547372 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:56:40.547389 32487 solver.cpp:244]     Train net output #1: loss = 0.0449694 (* 1 = 0.0449694 loss)\nI0822 09:56:40.637935 32487 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0822 09:58:58.236969 32487 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0822 10:00:20.619359 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74572\nI0822 10:00:20.619702 32487 solver.cpp:404]     Test net output #1: loss = 1.28223 (* 1 = 1.28223 loss)\nI0822 10:00:21.942019 32487 solver.cpp:228] Iteration 41700, loss = 0.00800398\nI0822 10:00:21.942064 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:00:21.942078 32487 solver.cpp:244]     Train net output #1: loss = 0.00800364 (* 1 = 0.00800364 loss)\nI0822 10:00:22.022971 32487 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0822 10:02:39.656627 32487 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0822 10:04:02.039235 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72792\nI0822 10:04:02.039569 32487 solver.cpp:404]     Test net output #1: loss = 1.38225 (* 1 = 1.38225 loss)\nI0822 10:04:03.361672 32487 solver.cpp:228] Iteration 41800, loss = 0.0508751\nI0822 10:04:03.361719 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:04:03.361737 32487 solver.cpp:244]     Train net output #1: loss = 0.0508747 (* 1 = 0.0508747 loss)\nI0822 10:04:03.443169 32487 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0822 10:06:21.078627 32487 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0822 10:07:43.451164 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65596\nI0822 10:07:43.451503 32487 solver.cpp:404]     Test net output #1: loss = 1.8112 (* 1 = 1.8112 loss)\nI0822 10:07:44.773653 32487 solver.cpp:228] Iteration 41900, loss = 0.0407373\nI0822 10:07:44.773699 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:07:44.773716 32487 solver.cpp:244]     Train net output #1: loss = 0.040737 (* 1 = 0.040737 loss)\nI0822 10:07:44.856077 32487 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0822 10:10:02.517181 32487 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0822 10:11:24.888810 32487 solver.cpp:404]     Test net output #0: accuracy = 0.695\nI0822 10:11:24.889158 32487 solver.cpp:404]     Test net output #1: loss = 1.56689 (* 1 = 1.56689 loss)\nI0822 10:11:26.212201 32487 solver.cpp:228] Iteration 42000, loss = 0.0160862\nI0822 10:11:26.212249 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:11:26.212265 32487 solver.cpp:244]     Train net output #1: loss = 0.0160859 (* 1 = 0.0160859 loss)\nI0822 10:11:26.293071 32487 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0822 10:13:43.963066 32487 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0822 10:15:06.330816 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73128\nI0822 10:15:06.331172 32487 solver.cpp:404]     Test net output #1: loss = 1.49946 (* 1 = 1.49946 loss)\nI0822 10:15:07.653782 32487 solver.cpp:228] Iteration 42100, loss = 0.0222819\nI0822 10:15:07.653833 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:15:07.653851 32487 solver.cpp:244]     Train net output #1: loss = 0.0222815 (* 1 = 0.0222815 loss)\nI0822 10:15:07.747264 32487 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0822 10:17:25.647027 32487 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0822 10:18:48.017151 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6874\nI0822 10:18:48.017489 32487 solver.cpp:404]     Test net output #1: loss = 2.0936 (* 1 = 2.0936 loss)\nI0822 10:18:49.339689 32487 solver.cpp:228] Iteration 42200, loss = 0.055093\nI0822 10:18:49.339735 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:18:49.339752 32487 solver.cpp:244]     Train net output #1: loss = 0.0550927 (* 1 = 0.0550927 loss)\nI0822 10:18:49.427587 32487 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0822 10:21:07.056766 32487 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0822 10:22:29.434350 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66932\nI0822 10:22:29.434713 32487 solver.cpp:404]     Test net output #1: loss = 2.39172 (* 1 = 2.39172 loss)\nI0822 10:22:30.757308 32487 solver.cpp:228] Iteration 42300, loss = 0.0185129\nI0822 10:22:30.757355 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:22:30.757372 32487 solver.cpp:244]     Train net output #1: loss = 0.0185125 (* 1 = 0.0185125 loss)\nI0822 10:22:30.844224 32487 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0822 10:24:48.564987 32487 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0822 10:26:10.914072 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69876\nI0822 10:26:10.914409 32487 solver.cpp:404]     Test net output #1: loss = 1.91929 (* 1 = 1.91929 loss)\nI0822 10:26:12.236471 32487 solver.cpp:228] Iteration 42400, loss = 0.0429615\nI0822 10:26:12.236519 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:26:12.236536 32487 solver.cpp:244]     Train net output #1: loss = 0.0429611 (* 1 = 0.0429611 loss)\nI0822 10:26:12.319526 32487 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0822 10:28:30.060528 32487 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0822 10:29:52.410969 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73716\nI0822 10:29:52.411319 32487 solver.cpp:404]     Test net output #1: loss = 1.43406 (* 1 = 1.43406 loss)\nI0822 10:29:53.733455 32487 solver.cpp:228] Iteration 42500, loss = 0.00695386\nI0822 10:29:53.733503 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:29:53.733520 32487 solver.cpp:244]     Train net output #1: loss = 0.00695349 (* 1 = 0.00695349 loss)\nI0822 10:29:53.823034 32487 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0822 10:32:11.501600 32487 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0822 10:33:33.850725 32487 solver.cpp:404]     Test net output #0: accuracy = 0.77492\nI0822 10:33:33.851055 32487 solver.cpp:404]     Test net output #1: loss = 1.15531 (* 1 = 1.15531 loss)\nI0822 10:33:35.173578 32487 solver.cpp:228] Iteration 42600, loss = 0.0558621\nI0822 10:33:35.173629 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:33:35.173645 32487 solver.cpp:244]     Train net output #1: loss = 0.0558618 (* 1 = 0.0558618 loss)\nI0822 10:33:35.261950 32487 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0822 10:35:52.958050 32487 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0822 10:37:15.301877 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73784\nI0822 10:37:15.302211 32487 solver.cpp:404]     Test net output #1: loss = 1.46631 (* 1 = 1.46631 loss)\nI0822 10:37:16.626090 32487 solver.cpp:228] Iteration 42700, loss = 0.00639021\nI0822 10:37:16.626129 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:37:16.626145 32487 solver.cpp:244]     Train net output #1: loss = 0.00638984 (* 1 = 0.00638984 loss)\nI0822 10:37:16.712007 32487 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0822 10:39:34.589270 32487 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0822 10:40:56.954581 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75656\nI0822 10:40:56.954905 32487 solver.cpp:404]     Test net output #1: loss = 1.27387 (* 1 = 1.27387 loss)\nI0822 10:40:58.278971 32487 solver.cpp:228] Iteration 42800, loss = 0.0442932\nI0822 10:40:58.279016 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:40:58.279039 32487 solver.cpp:244]     Train net output #1: loss = 0.0442928 (* 1 = 0.0442928 loss)\nI0822 10:40:58.358494 32487 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0822 10:43:16.047354 32487 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0822 10:44:38.416270 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76792\nI0822 10:44:38.416633 32487 solver.cpp:404]     Test net output #1: loss = 1.19684 (* 1 = 1.19684 loss)\nI0822 10:44:39.740005 32487 solver.cpp:228] Iteration 42900, loss = 0.0311565\nI0822 10:44:39.740047 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:44:39.740069 32487 solver.cpp:244]     Train net output #1: loss = 0.0311562 (* 1 = 0.0311562 loss)\nI0822 10:44:39.831957 32487 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0822 10:46:57.497601 32487 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0822 10:48:19.869675 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71732\nI0822 10:48:19.870034 32487 solver.cpp:404]     Test net output #1: loss = 1.75736 (* 1 = 1.75736 loss)\nI0822 10:48:21.193439 32487 solver.cpp:228] Iteration 43000, loss = 0.0413002\nI0822 10:48:21.193480 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:48:21.193503 32487 solver.cpp:244]     Train net output #1: loss = 0.0412998 (* 1 = 0.0412998 loss)\nI0822 10:48:21.284236 32487 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0822 10:50:38.981377 32487 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0822 10:52:01.323724 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71544\nI0822 10:52:01.324056 32487 solver.cpp:404]     Test net output #1: loss = 1.60369 (* 1 = 1.60369 loss)\nI0822 10:52:02.647598 32487 solver.cpp:228] Iteration 43100, loss = 0.0637125\nI0822 10:52:02.647644 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:52:02.647660 32487 solver.cpp:244]     Train net output #1: loss = 0.0637121 (* 1 = 0.0637121 loss)\nI0822 10:52:02.735724 32487 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0822 10:54:20.371824 32487 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0822 10:55:42.718924 32487 solver.cpp:404]     Test net output #0: accuracy = 0.78088\nI0822 10:55:42.719280 32487 solver.cpp:404]     Test net output #1: loss = 1.07173 (* 1 = 1.07173 loss)\nI0822 10:55:44.042320 32487 solver.cpp:228] Iteration 43200, loss = 0.0166518\nI0822 10:55:44.042357 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:55:44.042374 32487 solver.cpp:244]     Train net output #1: loss = 0.0166515 (* 1 = 0.0166515 loss)\nI0822 10:55:44.129405 32487 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0822 10:58:01.785027 32487 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0822 10:59:24.122941 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72452\nI0822 10:59:24.123311 32487 solver.cpp:404]     Test net output #1: loss = 1.6208 (* 1 = 1.6208 loss)\nI0822 10:59:25.445783 32487 solver.cpp:228] Iteration 43300, loss = 0.0185719\nI0822 10:59:25.445823 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:59:25.445839 32487 solver.cpp:244]     Train net output #1: loss = 0.0185715 (* 1 = 0.0185715 loss)\nI0822 10:59:25.535338 32487 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0822 11:01:43.415390 32487 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0822 11:03:05.745481 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72448\nI0822 11:03:05.745831 32487 solver.cpp:404]     Test net output #1: loss = 1.60731 (* 1 = 1.60731 loss)\nI0822 11:03:07.067896 32487 solver.cpp:228] Iteration 43400, loss = 0.013757\nI0822 11:03:07.067934 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:03:07.067950 32487 solver.cpp:244]     Train net output #1: loss = 0.0137566 (* 1 = 0.0137566 loss)\nI0822 11:03:07.158856 32487 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0822 11:05:25.000628 32487 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0822 11:06:47.346177 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7818\nI0822 11:06:47.346534 32487 solver.cpp:404]     Test net output #1: loss = 1.11038 (* 1 = 1.11038 loss)\nI0822 11:06:48.668419 32487 solver.cpp:228] Iteration 43500, loss = 0.0199472\nI0822 11:06:48.668462 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:06:48.668476 32487 solver.cpp:244]     Train net output #1: loss = 0.0199468 (* 1 = 0.0199468 loss)\nI0822 11:06:48.756589 32487 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0822 11:09:06.454839 32487 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0822 11:10:28.805429 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74772\nI0822 11:10:28.805785 32487 solver.cpp:404]     Test net output #1: loss = 1.32478 (* 1 = 1.32478 loss)\nI0822 11:10:30.128288 32487 solver.cpp:228] Iteration 43600, loss = 0.0133775\nI0822 11:10:30.128327 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:10:30.128343 32487 solver.cpp:244]     Train net output #1: loss = 0.0133771 (* 1 = 0.0133771 loss)\nI0822 11:10:30.212373 32487 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0822 11:12:47.874250 32487 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0822 11:14:10.234004 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75968\nI0822 11:14:10.234359 32487 solver.cpp:404]     Test net output #1: loss = 1.18775 (* 1 = 1.18775 loss)\nI0822 11:14:11.557982 32487 solver.cpp:228] Iteration 43700, loss = 0.0114207\nI0822 11:14:11.558022 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:14:11.558038 32487 solver.cpp:244]     Train net output #1: loss = 0.0114203 (* 1 = 0.0114203 loss)\nI0822 11:14:11.642753 32487 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0822 11:16:29.357281 32487 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0822 11:17:51.708773 32487 solver.cpp:404]     Test net output #0: accuracy = 0.77884\nI0822 11:17:51.709106 32487 solver.cpp:404]     Test net output #1: loss = 1.09257 (* 1 = 1.09257 loss)\nI0822 11:17:53.032357 32487 solver.cpp:228] Iteration 43800, loss = 0.0270114\nI0822 11:17:53.032395 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:17:53.032411 32487 solver.cpp:244]     Train net output #1: loss = 0.027011 (* 1 = 0.027011 loss)\nI0822 11:17:53.119596 32487 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0822 11:20:10.881054 32487 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0822 11:21:33.240381 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74132\nI0822 11:21:33.240723 32487 solver.cpp:404]     Test net output #1: loss = 1.32543 (* 1 = 1.32543 loss)\nI0822 11:21:34.563148 32487 solver.cpp:228] Iteration 43900, loss = 0.0137473\nI0822 11:21:34.563186 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:21:34.563201 32487 solver.cpp:244]     Train net output #1: loss = 0.013747 (* 1 = 0.013747 loss)\nI0822 11:21:34.647771 32487 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0822 11:23:52.309711 32487 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0822 11:25:14.658466 32487 solver.cpp:404]     Test net output #0: accuracy = 0.63348\nI0822 11:25:14.658820 32487 solver.cpp:404]     Test net output #1: loss = 2.09573 (* 1 = 2.09573 loss)\nI0822 11:25:15.981209 32487 solver.cpp:228] Iteration 44000, loss = 0.00469984\nI0822 11:25:15.981246 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:25:15.981261 32487 solver.cpp:244]     Train net output #1: loss = 0.00469949 (* 1 = 0.00469949 loss)\nI0822 11:25:16.063431 32487 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0822 11:27:33.654891 32487 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0822 11:28:56.009017 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69484\nI0822 11:28:56.009373 32487 solver.cpp:404]     Test net output #1: loss = 1.76247 (* 1 = 1.76247 loss)\nI0822 11:28:57.331259 32487 solver.cpp:228] Iteration 44100, loss = 0.0518028\nI0822 11:28:57.331298 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:28:57.331315 32487 solver.cpp:244]     Train net output #1: loss = 0.0518025 (* 1 = 0.0518025 loss)\nI0822 11:28:57.417925 32487 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0822 11:31:14.963188 32487 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0822 11:32:37.308324 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70036\nI0822 11:32:37.308679 32487 solver.cpp:404]     Test net output #1: loss = 1.80579 (* 1 = 1.80579 loss)\nI0822 11:32:38.631211 32487 solver.cpp:228] Iteration 44200, loss = 0.0488307\nI0822 11:32:38.631245 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:32:38.631260 32487 solver.cpp:244]     Train net output #1: loss = 0.0488303 (* 1 = 0.0488303 loss)\nI0822 11:32:38.719606 32487 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0822 11:34:56.534756 32487 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0822 11:36:18.885130 32487 solver.cpp:404]     Test net output #0: accuracy = 0.67364\nI0822 11:36:18.885465 32487 solver.cpp:404]     Test net output #1: loss = 2.02336 (* 1 = 2.02336 loss)\nI0822 11:36:20.207882 32487 solver.cpp:228] Iteration 44300, loss = 0.0788693\nI0822 11:36:20.207921 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 11:36:20.207936 32487 solver.cpp:244]     Train net output #1: loss = 0.078869 (* 1 = 0.078869 loss)\nI0822 11:36:20.293608 32487 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0822 11:38:38.053527 32487 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0822 11:40:00.386914 32487 solver.cpp:404]     Test net output #0: accuracy = 0.77308\nI0822 11:40:00.387243 32487 solver.cpp:404]     Test net output #1: loss = 1.13117 (* 1 = 1.13117 loss)\nI0822 11:40:01.708902 32487 solver.cpp:228] Iteration 44400, loss = 0.0601901\nI0822 11:40:01.708940 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 11:40:01.708956 32487 solver.cpp:244]     Train net output #1: loss = 0.0601898 (* 1 = 0.0601898 loss)\nI0822 11:40:01.793973 32487 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0822 11:42:19.548887 32487 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0822 11:43:41.863525 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73476\nI0822 11:43:41.863883 32487 solver.cpp:404]     Test net output #1: loss = 1.43147 (* 1 = 1.43147 loss)\nI0822 11:43:43.185742 32487 solver.cpp:228] Iteration 44500, loss = 0.0197449\nI0822 11:43:43.185780 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:43:43.185796 32487 solver.cpp:244]     Train net output #1: loss = 0.0197446 (* 1 = 0.0197446 loss)\nI0822 11:43:43.273838 32487 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0822 11:46:00.959242 32487 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0822 11:47:23.290491 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80128\nI0822 11:47:23.290822 32487 solver.cpp:404]     Test net output #1: loss = 0.952511 (* 1 = 0.952511 loss)\nI0822 11:47:24.612943 32487 solver.cpp:228] Iteration 44600, loss = 0.0536163\nI0822 11:47:24.612984 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:47:24.613000 32487 solver.cpp:244]     Train net output #1: loss = 0.053616 (* 1 = 0.053616 loss)\nI0822 11:47:24.698576 32487 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0822 11:49:41.651226 32487 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0822 11:51:03.978430 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79092\nI0822 11:51:03.978770 32487 solver.cpp:404]     Test net output #1: loss = 0.964856 (* 1 = 0.964856 loss)\nI0822 11:51:05.300828 32487 solver.cpp:228] Iteration 44700, loss = 0.0348878\nI0822 11:51:05.300869 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:51:05.300885 32487 solver.cpp:244]     Train net output #1: loss = 0.0348875 (* 1 = 0.0348875 loss)\nI0822 11:51:05.381846 32487 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0822 11:53:22.816782 32487 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0822 11:54:45.128953 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76908\nI0822 11:54:45.129305 32487 solver.cpp:404]     Test net output #1: loss = 1.10085 (* 1 = 1.10085 loss)\nI0822 11:54:46.451370 32487 solver.cpp:228] Iteration 44800, loss = 0.0235062\nI0822 11:54:46.451418 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:54:46.451436 32487 solver.cpp:244]     Train net output #1: loss = 0.0235059 (* 1 = 0.0235059 loss)\nI0822 11:54:46.531266 32487 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0822 11:57:03.364825 32487 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0822 11:58:25.681524 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71388\nI0822 11:58:25.681879 32487 solver.cpp:404]     Test net output #1: loss = 1.30961 (* 1 = 1.30961 loss)\nI0822 11:58:27.004050 32487 solver.cpp:228] Iteration 44900, loss = 0.00951355\nI0822 11:58:27.004101 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:58:27.004118 32487 solver.cpp:244]     Train net output #1: loss = 0.00951321 (* 1 = 0.00951321 loss)\nI0822 11:58:27.085332 32487 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0822 12:00:43.835626 32487 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0822 12:02:06.145436 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74004\nI0822 12:02:06.145748 32487 solver.cpp:404]     Test net output #1: loss = 1.32972 (* 1 = 1.32972 loss)\nI0822 12:02:07.467535 32487 solver.cpp:228] Iteration 45000, loss = 0.0306309\nI0822 12:02:07.467582 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 12:02:07.467600 32487 solver.cpp:244]     Train net output #1: loss = 0.0306305 (* 1 = 0.0306305 loss)\nI0822 12:02:07.547936 32487 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0822 12:04:24.359195 32487 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0822 12:05:46.684900 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71732\nI0822 12:05:46.685230 32487 solver.cpp:404]     Test net output #1: loss = 1.61103 (* 1 = 1.61103 loss)\nI0822 12:05:48.006847 32487 solver.cpp:228] Iteration 45100, loss = 0.0214918\nI0822 12:05:48.006897 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 12:05:48.006914 32487 solver.cpp:244]     Train net output #1: loss = 0.0214915 (* 1 = 0.0214915 loss)\nI0822 12:05:48.091789 32487 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0822 12:08:04.908203 32487 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0822 12:09:27.236271 32487 solver.cpp:404]     Test net output #0: accuracy = 0.78172\nI0822 12:09:27.236634 32487 solver.cpp:404]     Test net output #1: loss = 1.0734 (* 1 = 1.0734 loss)\nI0822 12:09:28.558522 32487 solver.cpp:228] Iteration 45200, loss = 0.0401613\nI0822 12:09:28.558568 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 12:09:28.558584 32487 solver.cpp:244]     Train net output #1: loss = 0.040161 (* 1 = 0.040161 loss)\nI0822 12:09:28.631958 32487 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0822 12:11:45.509085 32487 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0822 12:13:07.829237 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7046\nI0822 12:13:07.829583 32487 solver.cpp:404]     Test net output #1: loss = 1.6454 (* 1 = 1.6454 loss)\nI0822 12:13:09.151698 32487 solver.cpp:228] Iteration 45300, loss = 0.019044\nI0822 12:13:09.151746 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 12:13:09.151762 32487 solver.cpp:244]     Train net output #1: loss = 0.0190437 (* 1 = 0.0190437 loss)\nI0822 12:13:09.224395 32487 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0822 12:15:26.040289 32487 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0822 12:16:48.355069 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74844\nI0822 12:16:48.355451 32487 solver.cpp:404]     Test net output #1: loss = 1.15853 (* 1 = 1.15853 loss)\nI0822 12:16:49.677147 32487 solver.cpp:228] Iteration 45400, loss = 0.0197327\nI0822 12:16:49.677191 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 12:16:49.677206 32487 solver.cpp:244]     Train net output #1: loss = 0.0197324 (* 1 = 0.0197324 loss)\nI0822 12:16:49.759287 32487 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0822 12:19:06.544301 32487 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0822 12:20:28.868208 32487 solver.cpp:404]     Test net output #0: accuracy = 0.705\nI0822 12:20:28.868557 32487 solver.cpp:404]     Test net output #1: loss = 1.75531 (* 1 = 1.75531 loss)\nI0822 12:20:30.190532 32487 solver.cpp:228] Iteration 45500, loss = 0.0342207\nI0822 12:20:30.190577 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 12:20:30.190592 32487 solver.cpp:244]     Train net output #1: loss = 0.0342204 (* 1 = 0.0342204 loss)\nI0822 12:20:30.271817 32487 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0822 12:22:46.998095 32487 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0822 12:24:09.329285 32487 solver.cpp:404]     Test net output #0: accuracy = 0.78208\nI0822 12:24:09.329624 32487 solver.cpp:404]     Test net output #1: loss = 1.0932 (* 1 = 1.0932 loss)\nI0822 12:24:10.651711 32487 solver.cpp:228] Iteration 45600, loss = 0.0106715\nI0822 12:24:10.651762 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:24:10.651778 32487 solver.cpp:244]     Train net output #1: loss = 0.0106712 (* 1 = 0.0106712 loss)\nI0822 12:24:10.729939 32487 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0822 12:26:27.424401 32487 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0822 12:27:49.757309 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7708\nI0822 12:27:49.757673 32487 solver.cpp:404]     Test net output #1: loss = 1.20528 (* 1 = 1.20528 loss)\nI0822 12:27:51.079480 32487 solver.cpp:228] Iteration 45700, loss = 0.0440031\nI0822 12:27:51.079532 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 12:27:51.079550 32487 solver.cpp:244]     Train net output #1: loss = 0.0440028 (* 1 = 0.0440028 loss)\nI0822 12:27:51.164840 32487 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0822 12:30:07.922168 32487 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0822 12:31:30.241245 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7642\nI0822 12:31:30.241582 32487 solver.cpp:404]     Test net output #1: loss = 1.2435 (* 1 = 1.2435 loss)\nI0822 12:31:31.563915 32487 solver.cpp:228] Iteration 45800, loss = 0.0324074\nI0822 12:31:31.563967 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 12:31:31.563983 32487 solver.cpp:244]     Train net output #1: loss = 0.0324071 (* 1 = 0.0324071 loss)\nI0822 12:31:31.636607 32487 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0822 12:33:48.375380 32487 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0822 12:35:10.691109 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69936\nI0822 12:35:10.691416 32487 solver.cpp:404]     Test net output #1: loss = 1.66938 (* 1 = 1.66938 loss)\nI0822 12:35:12.014447 32487 solver.cpp:228] Iteration 45900, loss = 0.0267836\nI0822 12:35:12.014499 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 12:35:12.014516 32487 solver.cpp:244]     Train net output #1: loss = 0.0267834 (* 1 = 0.0267834 loss)\nI0822 12:35:12.095813 32487 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0822 12:37:28.821064 32487 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0822 12:38:51.139516 32487 solver.cpp:404]     Test net output #0: accuracy = 0.65456\nI0822 12:38:51.139876 32487 solver.cpp:404]     Test net output #1: loss = 2.11988 (* 1 = 2.11988 loss)\nI0822 12:38:52.462764 32487 solver.cpp:228] Iteration 46000, loss = 0.0114852\nI0822 12:38:52.462812 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:38:52.462828 32487 solver.cpp:244]     Train net output #1: loss = 0.0114849 (* 1 = 0.0114849 loss)\nI0822 12:38:52.541368 32487 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0822 12:41:09.277127 32487 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0822 12:42:31.596418 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69452\nI0822 12:42:31.596750 32487 solver.cpp:404]     Test net output #1: loss = 1.95061 (* 1 = 1.95061 loss)\nI0822 12:42:32.919728 32487 solver.cpp:228] Iteration 46100, loss = 0.0437934\nI0822 12:42:32.919775 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 12:42:32.919792 32487 solver.cpp:244]     Train net output #1: loss = 0.0437931 (* 1 = 0.0437931 loss)\nI0822 12:42:32.995576 32487 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0822 12:44:49.753099 32487 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0822 12:46:12.073465 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70744\nI0822 12:46:12.073822 32487 solver.cpp:404]     Test net output #1: loss = 1.43301 (* 1 = 1.43301 loss)\nI0822 12:46:13.396977 32487 solver.cpp:228] Iteration 46200, loss = 0.0536595\nI0822 12:46:13.397029 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 12:46:13.397044 32487 solver.cpp:244]     Train net output #1: loss = 0.0536592 (* 1 = 0.0536592 loss)\nI0822 12:46:13.468940 32487 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0822 12:48:30.190708 32487 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0822 12:49:52.510020 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76876\nI0822 12:49:52.510372 32487 solver.cpp:404]     Test net output #1: loss = 1.1732 (* 1 = 1.1732 loss)\nI0822 12:49:53.832491 32487 solver.cpp:228] Iteration 46300, loss = 0.0100056\nI0822 12:49:53.832542 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:49:53.832558 32487 solver.cpp:244]     Train net output #1: loss = 0.0100054 (* 1 = 0.0100054 loss)\nI0822 12:49:53.915968 32487 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0822 12:52:10.754468 32487 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0822 12:53:33.069484 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75968\nI0822 12:53:33.069839 32487 solver.cpp:404]     Test net output #1: loss = 1.2705 (* 1 = 1.2705 loss)\nI0822 12:53:34.392529 32487 solver.cpp:228] Iteration 46400, loss = 0.107491\nI0822 12:53:34.392580 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 12:53:34.392596 32487 solver.cpp:244]     Train net output #1: loss = 0.107491 (* 1 = 0.107491 loss)\nI0822 12:53:34.467242 32487 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0822 12:55:51.349627 32487 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0822 12:57:13.667430 32487 solver.cpp:404]     Test net output #0: accuracy = 0.57616\nI0822 12:57:13.667791 32487 solver.cpp:404]     Test net output #1: loss = 2.95191 (* 1 = 2.95191 loss)\nI0822 12:57:14.989723 32487 solver.cpp:228] Iteration 46500, loss = 0.0459619\nI0822 12:57:14.989771 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 12:57:14.989787 32487 solver.cpp:244]     Train net output #1: loss = 0.0459616 (* 1 = 0.0459616 loss)\nI0822 12:57:15.062465 32487 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0822 12:59:31.803308 32487 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0822 13:00:54.124205 32487 solver.cpp:404]     Test net output #0: accuracy = 0.77532\nI0822 13:00:54.124559 32487 solver.cpp:404]     Test net output #1: loss = 1.0603 (* 1 = 1.0603 loss)\nI0822 13:00:55.446655 32487 solver.cpp:228] Iteration 46600, loss = 0.0460804\nI0822 13:00:55.446699 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 13:00:55.446715 32487 solver.cpp:244]     Train net output #1: loss = 0.0460801 (* 1 = 0.0460801 loss)\nI0822 13:00:55.528898 32487 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0822 13:03:12.287488 32487 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0822 13:04:34.621484 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74576\nI0822 13:04:34.621836 32487 solver.cpp:404]     Test net output #1: loss = 1.3392 (* 1 = 1.3392 loss)\nI0822 13:04:35.944252 32487 solver.cpp:228] Iteration 46700, loss = 0.00792427\nI0822 13:04:35.944296 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:04:35.944313 32487 solver.cpp:244]     Train net output #1: loss = 0.00792399 (* 1 = 0.00792399 loss)\nI0822 13:04:36.019258 32487 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0822 13:06:52.725083 32487 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0822 13:08:15.052065 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74412\nI0822 13:08:15.052443 32487 solver.cpp:404]     Test net output #1: loss = 1.18628 (* 1 = 1.18628 loss)\nI0822 13:08:16.374979 32487 solver.cpp:228] Iteration 46800, loss = 0.0039523\nI0822 13:08:16.375025 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:08:16.375042 32487 solver.cpp:244]     Train net output #1: loss = 0.00395203 (* 1 = 0.00395203 loss)\nI0822 13:08:16.453074 32487 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0822 13:10:33.170893 32487 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0822 13:11:55.511093 32487 solver.cpp:404]     Test net output #0: accuracy = 0.71196\nI0822 13:11:55.511431 32487 solver.cpp:404]     Test net output #1: loss = 1.36102 (* 1 = 1.36102 loss)\nI0822 13:11:56.833570 32487 solver.cpp:228] Iteration 46900, loss = 0.0234823\nI0822 13:11:56.833616 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 13:11:56.833632 32487 solver.cpp:244]     Train net output #1: loss = 0.023482 (* 1 = 0.023482 loss)\nI0822 13:11:56.908665 32487 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0822 13:14:13.671725 32487 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0822 13:15:36.013942 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69832\nI0822 13:15:36.014329 32487 solver.cpp:404]     Test net output #1: loss = 1.7594 (* 1 = 1.7594 loss)\nI0822 13:15:37.336228 32487 solver.cpp:228] Iteration 47000, loss = 0.0132888\nI0822 13:15:37.336275 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 13:15:37.336293 32487 solver.cpp:244]     Train net output #1: loss = 0.0132885 (* 1 = 0.0132885 loss)\nI0822 13:15:37.417665 32487 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0822 13:17:54.288296 32487 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0822 13:19:16.643348 32487 solver.cpp:404]     Test net output #0: accuracy = 0.68628\nI0822 13:19:16.643715 32487 solver.cpp:404]     Test net output #1: loss = 1.85499 (* 1 = 1.85499 loss)\nI0822 13:19:17.965090 32487 solver.cpp:228] Iteration 47100, loss = 0.0148395\nI0822 13:19:17.965142 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 13:19:17.965158 32487 solver.cpp:244]     Train net output #1: loss = 0.0148392 (* 1 = 0.0148392 loss)\nI0822 13:19:18.045667 32487 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0822 13:21:34.802466 32487 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0822 13:22:57.124832 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72132\nI0822 13:22:57.125185 32487 solver.cpp:404]     Test net output #1: loss = 1.53908 (* 1 = 1.53908 loss)\nI0822 13:22:58.446689 32487 solver.cpp:228] Iteration 47200, loss = 0.0151908\nI0822 13:22:58.446743 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 13:22:58.446759 32487 solver.cpp:244]     Train net output #1: loss = 0.0151905 (* 1 = 0.0151905 loss)\nI0822 13:22:58.522210 32487 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0822 13:25:15.238423 32487 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0822 13:26:37.560539 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74908\nI0822 13:26:37.560873 32487 solver.cpp:404]     Test net output #1: loss = 1.27145 (* 1 = 1.27145 loss)\nI0822 13:26:38.882745 32487 solver.cpp:228] Iteration 47300, loss = 0.0137538\nI0822 13:26:38.882797 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:26:38.882813 32487 solver.cpp:244]     Train net output #1: loss = 0.0137535 (* 1 = 0.0137535 loss)\nI0822 13:26:38.958272 32487 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0822 13:28:55.837038 32487 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0822 13:30:18.160375 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75432\nI0822 13:30:18.160713 32487 solver.cpp:404]     Test net output #1: loss = 1.35156 (* 1 = 1.35156 loss)\nI0822 13:30:19.482703 32487 solver.cpp:228] Iteration 47400, loss = 0.0057277\nI0822 13:30:19.482746 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:30:19.482762 32487 solver.cpp:244]     Train net output #1: loss = 0.00572744 (* 1 = 0.00572744 loss)\nI0822 13:30:19.556982 32487 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0822 13:32:36.493696 32487 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0822 13:33:58.817262 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7326\nI0822 13:33:58.817622 32487 solver.cpp:404]     Test net output #1: loss = 1.4687 (* 1 = 1.4687 loss)\nI0822 13:34:00.139578 32487 solver.cpp:228] Iteration 47500, loss = 0.0329551\nI0822 13:34:00.139631 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 13:34:00.139647 32487 solver.cpp:244]     Train net output #1: loss = 0.0329549 (* 1 = 0.0329549 loss)\nI0822 13:34:00.218663 32487 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0822 13:36:16.985090 32487 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0822 13:37:39.299558 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73124\nI0822 13:37:39.299957 32487 solver.cpp:404]     Test net output #1: loss = 1.60822 (* 1 = 1.60822 loss)\nI0822 13:37:40.622170 32487 solver.cpp:228] Iteration 47600, loss = 0.0491006\nI0822 13:37:40.622218 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 13:37:40.622236 32487 solver.cpp:244]     Train net output #1: loss = 0.0491003 (* 1 = 0.0491003 loss)\nI0822 13:37:40.698132 32487 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0822 13:39:57.596612 32487 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0822 13:41:19.918200 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75464\nI0822 13:41:19.918552 32487 solver.cpp:404]     Test net output #1: loss = 1.3072 (* 1 = 1.3072 loss)\nI0822 13:41:21.240360 32487 solver.cpp:228] Iteration 47700, loss = 0.0823492\nI0822 13:41:21.240413 32487 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 13:41:21.240432 32487 solver.cpp:244]     Train net output #1: loss = 0.0823489 (* 1 = 0.0823489 loss)\nI0822 13:41:21.325479 32487 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0822 13:43:38.191540 32487 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0822 13:45:00.508335 32487 solver.cpp:404]     Test net output #0: accuracy = 0.77452\nI0822 13:45:00.508679 32487 solver.cpp:404]     Test net output #1: loss = 1.07027 (* 1 = 1.07027 loss)\nI0822 13:45:01.831074 32487 solver.cpp:228] Iteration 47800, loss = 0.0129664\nI0822 13:45:01.831125 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:45:01.831142 32487 solver.cpp:244]     Train net output #1: loss = 0.0129661 (* 1 = 0.0129661 loss)\nI0822 13:45:01.915449 32487 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0822 13:47:18.815821 32487 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0822 13:48:41.133474 32487 solver.cpp:404]     Test net output #0: accuracy = 0.763\nI0822 13:48:41.133833 32487 solver.cpp:404]     Test net output #1: loss = 1.18469 (* 1 = 1.18469 loss)\nI0822 13:48:42.455819 32487 solver.cpp:228] Iteration 47900, loss = 0.0215819\nI0822 13:48:42.455870 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 13:48:42.455888 32487 solver.cpp:244]     Train net output #1: loss = 0.0215816 (* 1 = 0.0215816 loss)\nI0822 13:48:42.535765 32487 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0822 13:50:59.241148 32487 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0822 13:52:21.547621 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7704\nI0822 13:52:21.547946 32487 solver.cpp:404]     Test net output #1: loss = 1.04167 (* 1 = 1.04167 loss)\nI0822 13:52:22.869979 32487 solver.cpp:228] Iteration 48000, loss = 0.010847\nI0822 13:52:22.870031 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:52:22.870048 32487 solver.cpp:244]     Train net output #1: loss = 0.0108467 (* 1 = 0.0108467 loss)\nI0822 13:52:22.951115 32487 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0822 13:54:39.717937 32487 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0822 13:56:02.032155 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75808\nI0822 13:56:02.032492 32487 solver.cpp:404]     Test net output #1: loss = 1.28567 (* 1 = 1.28567 loss)\nI0822 13:56:03.354467 32487 solver.cpp:228] Iteration 48100, loss = 0.0350733\nI0822 13:56:03.354521 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 13:56:03.354538 32487 solver.cpp:244]     Train net output #1: loss = 0.0350729 (* 1 = 0.0350729 loss)\nI0822 13:56:03.430833 32487 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0822 13:58:20.244597 32487 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0822 13:59:42.564879 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76144\nI0822 13:59:42.565237 32487 solver.cpp:404]     Test net output #1: loss = 1.19364 (* 1 = 1.19364 loss)\nI0822 13:59:43.887239 32487 solver.cpp:228] Iteration 48200, loss = 0.0102072\nI0822 13:59:43.887289 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:59:43.887305 32487 solver.cpp:244]     Train net output #1: loss = 0.0102069 (* 1 = 0.0102069 loss)\nI0822 13:59:43.968930 32487 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0822 14:02:00.856729 32487 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0822 14:03:23.155004 32487 solver.cpp:404]     Test net output #0: accuracy = 0.74604\nI0822 14:03:23.155364 32487 solver.cpp:404]     Test net output #1: loss = 1.2712 (* 1 = 1.2712 loss)\nI0822 14:03:24.478582 32487 solver.cpp:228] Iteration 48300, loss = 0.0133285\nI0822 14:03:24.478626 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:03:24.478651 32487 solver.cpp:244]     Train net output #1: loss = 0.0133282 (* 1 = 0.0133282 loss)\nI0822 14:03:24.557518 32487 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0822 14:05:41.121601 32487 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0822 14:07:02.487568 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76092\nI0822 14:07:02.487823 32487 solver.cpp:404]     Test net output #1: loss = 1.41529 (* 1 = 1.41529 loss)\nI0822 14:07:03.806177 32487 solver.cpp:228] Iteration 48400, loss = 0.0205398\nI0822 14:07:03.806221 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:07:03.806238 32487 solver.cpp:244]     Train net output #1: loss = 0.0205394 (* 1 = 0.0205394 loss)\nI0822 14:07:03.883581 32487 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0822 14:09:20.476048 32487 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0822 14:10:41.841228 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7678\nI0822 14:10:41.841486 32487 solver.cpp:404]     Test net output #1: loss = 1.13196 (* 1 = 1.13196 loss)\nI0822 14:10:43.159587 32487 solver.cpp:228] Iteration 48500, loss = 0.0306068\nI0822 14:10:43.159623 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 14:10:43.159638 32487 solver.cpp:244]     Train net output #1: loss = 0.0306064 (* 1 = 0.0306064 loss)\nI0822 14:10:43.242182 32487 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0822 14:13:00.525178 32487 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0822 14:14:21.875471 32487 solver.cpp:404]     Test net output #0: accuracy = 0.73844\nI0822 14:14:21.875732 32487 solver.cpp:404]     Test net output #1: loss = 1.24259 (* 1 = 1.24259 loss)\nI0822 14:14:23.193416 32487 solver.cpp:228] Iteration 48600, loss = 0.0140232\nI0822 14:14:23.193450 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:14:23.193466 32487 solver.cpp:244]     Train net output #1: loss = 0.0140229 (* 1 = 0.0140229 loss)\nI0822 14:14:23.286201 32487 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0822 14:16:40.766007 32487 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0822 14:18:02.137969 32487 solver.cpp:404]     Test net output #0: accuracy = 0.66536\nI0822 14:18:02.138273 32487 solver.cpp:404]     Test net output #1: loss = 1.87559 (* 1 = 1.87559 loss)\nI0822 14:18:03.456804 32487 solver.cpp:228] Iteration 48700, loss = 0.0484258\nI0822 14:18:03.456840 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 14:18:03.456856 32487 solver.cpp:244]     Train net output #1: loss = 0.0484255 (* 1 = 0.0484255 loss)\nI0822 14:18:03.547415 32487 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0822 14:20:21.008978 32487 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0822 14:21:42.481221 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79476\nI0822 14:21:42.481540 32487 solver.cpp:404]     Test net output #1: loss = 1.01331 (* 1 = 1.01331 loss)\nI0822 14:21:43.799523 32487 solver.cpp:228] Iteration 48800, loss = 0.0320893\nI0822 14:21:43.799559 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 14:21:43.799576 32487 solver.cpp:244]     Train net output #1: loss = 0.0320889 (* 1 = 0.0320889 loss)\nI0822 14:21:43.889116 32487 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0822 14:24:01.559929 32487 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0822 14:25:23.041740 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80088\nI0822 14:25:23.041996 32487 solver.cpp:404]     Test net output #1: loss = 0.88903 (* 1 = 0.88903 loss)\nI0822 14:25:24.359938 32487 solver.cpp:228] Iteration 48900, loss = 0.0514381\nI0822 14:25:24.359983 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 14:25:24.360000 32487 solver.cpp:244]     Train net output #1: loss = 0.0514378 (* 1 = 0.0514378 loss)\nI0822 14:25:24.447852 32487 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0822 14:27:42.111704 32487 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0822 14:29:03.585456 32487 solver.cpp:404]     Test net output #0: accuracy = 0.6342\nI0822 14:29:03.585697 32487 solver.cpp:404]     Test net output #1: loss = 2.45371 (* 1 = 2.45371 loss)\nI0822 14:29:04.904139 32487 solver.cpp:228] Iteration 49000, loss = 0.0386215\nI0822 14:29:04.904175 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:29:04.904191 32487 solver.cpp:244]     Train net output #1: loss = 0.0386211 (* 1 = 0.0386211 loss)\nI0822 14:29:04.995718 32487 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0822 14:31:22.637666 32487 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0822 14:32:44.127965 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7374\nI0822 14:32:44.128265 32487 solver.cpp:404]     Test net output #1: loss = 1.46649 (* 1 = 1.46649 loss)\nI0822 14:32:45.446998 32487 solver.cpp:228] Iteration 49100, loss = 0.0128986\nI0822 14:32:45.447044 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:32:45.447060 32487 solver.cpp:244]     Train net output #1: loss = 0.0128983 (* 1 = 0.0128983 loss)\nI0822 14:32:45.542403 32487 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0822 14:35:03.304404 32487 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0822 14:36:24.786989 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69368\nI0822 14:36:24.787277 32487 solver.cpp:404]     Test net output #1: loss = 1.8443 (* 1 = 1.8443 loss)\nI0822 14:36:26.105821 32487 solver.cpp:228] Iteration 49200, loss = 0.0240068\nI0822 14:36:26.105859 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:36:26.105875 32487 solver.cpp:244]     Train net output #1: loss = 0.0240065 (* 1 = 0.0240065 loss)\nI0822 14:36:26.195888 32487 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0822 14:38:43.865351 32487 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0822 14:40:05.349658 32487 solver.cpp:404]     Test net output #0: accuracy = 0.78408\nI0822 14:40:05.349949 32487 solver.cpp:404]     Test net output #1: loss = 1.04231 (* 1 = 1.04231 loss)\nI0822 14:40:06.668144 32487 solver.cpp:228] Iteration 49300, loss = 0.00848598\nI0822 14:40:06.668190 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:40:06.668206 32487 solver.cpp:244]     Train net output #1: loss = 0.00848569 (* 1 = 0.00848569 loss)\nI0822 14:40:06.758813 32487 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0822 14:42:24.335646 32487 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0822 14:43:45.813791 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76664\nI0822 14:43:45.814050 32487 solver.cpp:404]     Test net output #1: loss = 1.10787 (* 1 = 1.10787 loss)\nI0822 14:43:47.131503 32487 solver.cpp:228] Iteration 49400, loss = 0.0403277\nI0822 14:43:47.131548 32487 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 14:43:47.131564 32487 solver.cpp:244]     Train net output #1: loss = 0.0403274 (* 1 = 0.0403274 loss)\nI0822 14:43:47.220139 32487 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0822 14:46:04.848001 32487 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0822 14:47:26.310598 32487 solver.cpp:404]     Test net output #0: accuracy = 0.756\nI0822 14:47:26.310853 32487 solver.cpp:404]     Test net output #1: loss = 1.1995 (* 1 = 1.1995 loss)\nI0822 14:47:27.628971 32487 solver.cpp:228] Iteration 49500, loss = 0.0258423\nI0822 14:47:27.629007 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:47:27.629024 32487 solver.cpp:244]     Train net output #1: loss = 0.025842 (* 1 = 0.025842 loss)\nI0822 14:47:27.723186 32487 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0822 14:49:45.335609 32487 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0822 14:51:06.800073 32487 solver.cpp:404]     Test net output #0: accuracy = 0.72424\nI0822 14:51:06.800330 32487 solver.cpp:404]     Test net output #1: loss = 1.71091 (* 1 = 1.71091 loss)\nI0822 14:51:08.117696 32487 solver.cpp:228] Iteration 49600, loss = 0.00524694\nI0822 14:51:08.117741 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:51:08.117758 32487 solver.cpp:244]     Train net output #1: loss = 0.00524667 (* 1 = 0.00524667 loss)\nI0822 14:51:08.207917 32487 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0822 14:53:25.905012 32487 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0822 14:54:47.375805 32487 solver.cpp:404]     Test net output #0: accuracy = 0.69424\nI0822 14:54:47.376103 32487 solver.cpp:404]     Test net output #1: loss = 1.74675 (* 1 = 1.74675 loss)\nI0822 14:54:48.694480 32487 solver.cpp:228] Iteration 49700, loss = 0.0297417\nI0822 14:54:48.694515 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:54:48.694530 32487 solver.cpp:244]     Train net output #1: loss = 0.0297415 (* 1 = 0.0297415 loss)\nI0822 14:54:48.784128 32487 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0822 14:57:06.501094 32487 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0822 14:58:27.975345 32487 solver.cpp:404]     Test net output #0: accuracy = 0.78344\nI0822 14:58:27.975630 32487 solver.cpp:404]     Test net output #1: loss = 1.02948 (* 1 = 1.02948 loss)\nI0822 14:58:29.293766 32487 solver.cpp:228] Iteration 49800, loss = 0.0253619\nI0822 14:58:29.293802 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:58:29.293817 32487 solver.cpp:244]     Train net output #1: loss = 0.0253616 (* 1 = 0.0253616 loss)\nI0822 14:58:29.383250 32487 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0822 15:00:47.089007 32487 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0822 15:02:08.567762 32487 solver.cpp:404]     Test net output #0: accuracy = 0.76476\nI0822 15:02:08.568025 32487 solver.cpp:404]     Test net output #1: loss = 1.11759 (* 1 = 1.11759 loss)\nI0822 15:02:09.886169 32487 solver.cpp:228] Iteration 49900, loss = 0.0275367\nI0822 15:02:09.886205 32487 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 15:02:09.886220 32487 solver.cpp:244]     Train net output #1: loss = 0.0275364 (* 1 = 0.0275364 loss)\nI0822 15:02:09.973443 32487 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0822 15:04:27.616127 32487 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0822 15:05:49.096179 32487 solver.cpp:404]     Test net output #0: accuracy = 0.70624\nI0822 15:05:49.096442 32487 solver.cpp:404]     Test net output #1: loss = 1.45723 (* 1 = 1.45723 loss)\nI0822 15:05:50.414800 32487 solver.cpp:228] Iteration 50000, loss = 0.0377171\nI0822 15:05:50.414836 32487 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 15:05:50.414852 32487 solver.cpp:244]     Train net output #1: loss = 0.0377168 (* 1 = 0.0377168 loss)\nI0822 15:05:50.509341 32487 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0822 15:05:50.509361 32487 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0822 15:08:08.134781 32487 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0822 15:09:29.614316 32487 solver.cpp:404]     Test net output #0: accuracy = 0.75076\nI0822 15:09:29.614599 32487 solver.cpp:404]     Test net output #1: loss = 1.20768 (* 1 = 1.20768 loss)\nI0822 15:09:30.933100 32487 solver.cpp:228] Iteration 50100, loss = 0.00153035\nI0822 15:09:30.933146 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:09:30.933162 32487 solver.cpp:244]     Train net output #1: loss = 0.00153007 (* 1 = 0.00153007 loss)\nI0822 15:09:31.023382 32487 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0822 15:11:48.698655 32487 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0822 15:13:10.177537 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7706\nI0822 15:13:10.177815 32487 solver.cpp:404]     Test net output #1: loss = 1.08848 (* 1 = 1.08848 loss)\nI0822 15:13:11.495751 32487 solver.cpp:228] Iteration 50200, loss = 0.000571973\nI0822 15:13:11.495787 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:13:11.495801 32487 solver.cpp:244]     Train net output #1: loss = 0.000571697 (* 1 = 0.000571697 loss)\nI0822 15:13:11.588716 32487 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0822 15:15:29.298054 32487 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0822 15:16:50.772188 32487 solver.cpp:404]     Test net output #0: accuracy = 0.77684\nI0822 15:16:50.772482 32487 solver.cpp:404]     Test net output #1: loss = 1.04463 (* 1 = 1.04463 loss)\nI0822 15:16:52.089926 32487 solver.cpp:228] Iteration 50300, loss = 0.000589818\nI0822 15:16:52.089960 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:16:52.089975 32487 solver.cpp:244]     Train net output #1: loss = 0.000589542 (* 1 = 0.000589542 loss)\nI0822 15:16:52.180312 32487 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0822 15:19:09.934062 32487 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0822 15:20:31.416496 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7848\nI0822 15:20:31.416795 32487 solver.cpp:404]     Test net output #1: loss = 1.00304 (* 1 = 1.00304 loss)\nI0822 15:20:32.734880 32487 solver.cpp:228] Iteration 50400, loss = 0.000584639\nI0822 15:20:32.734930 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:20:32.734954 32487 solver.cpp:244]     Train net output #1: loss = 0.000584362 (* 1 = 0.000584362 loss)\nI0822 15:20:32.830584 32487 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0822 15:22:50.638741 32487 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0822 15:24:12.096312 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7882\nI0822 15:24:12.096632 32487 solver.cpp:404]     Test net output #1: loss = 0.982141 (* 1 = 0.982141 loss)\nI0822 15:24:13.415077 32487 solver.cpp:228] Iteration 50500, loss = 0.000233942\nI0822 15:24:13.415115 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:24:13.415138 32487 solver.cpp:244]     Train net output #1: loss = 0.000233666 (* 1 = 0.000233666 loss)\nI0822 15:24:13.506983 32487 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0822 15:26:31.227422 32487 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0822 15:27:52.704123 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79052\nI0822 15:27:52.704401 32487 solver.cpp:404]     Test net output #1: loss = 0.963719 (* 1 = 0.963719 loss)\nI0822 15:27:54.023072 32487 solver.cpp:228] Iteration 50600, loss = 0.000535288\nI0822 15:27:54.023113 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:27:54.023135 32487 solver.cpp:244]     Train net output #1: loss = 0.000535012 (* 1 = 0.000535012 loss)\nI0822 15:27:54.117221 32487 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0822 15:30:11.885637 32487 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0822 15:31:33.351845 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79204\nI0822 15:31:33.352108 32487 solver.cpp:404]     Test net output #1: loss = 0.951405 (* 1 = 0.951405 loss)\nI0822 15:31:34.670519 32487 solver.cpp:228] Iteration 50700, loss = 0.000507829\nI0822 15:31:34.670568 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:31:34.670593 32487 solver.cpp:244]     Train net output #1: loss = 0.000507553 (* 1 = 0.000507553 loss)\nI0822 15:31:34.768007 32487 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0822 15:33:52.438397 32487 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0822 15:35:13.901253 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79396\nI0822 15:35:13.901567 32487 solver.cpp:404]     Test net output #1: loss = 0.941416 (* 1 = 0.941416 loss)\nI0822 15:35:15.219990 32487 solver.cpp:228] Iteration 50800, loss = 0.00017885\nI0822 15:35:15.220029 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:35:15.220052 32487 solver.cpp:244]     Train net output #1: loss = 0.000178574 (* 1 = 0.000178574 loss)\nI0822 15:35:15.307154 32487 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0822 15:37:32.893270 32487 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0822 15:38:54.353526 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79364\nI0822 15:38:54.353837 32487 solver.cpp:404]     Test net output #1: loss = 0.930116 (* 1 = 0.930116 loss)\nI0822 15:38:55.671166 32487 solver.cpp:228] Iteration 50900, loss = 0.000473551\nI0822 15:38:55.671216 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:38:55.671239 32487 solver.cpp:244]     Train net output #1: loss = 0.000473275 (* 1 = 0.000473275 loss)\nI0822 15:38:55.769712 32487 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0822 15:41:13.381693 32487 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0822 15:42:34.841809 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79648\nI0822 15:42:34.842097 32487 solver.cpp:404]     Test net output #1: loss = 0.924564 (* 1 = 0.924564 loss)\nI0822 15:42:36.161329 32487 solver.cpp:228] Iteration 51000, loss = 0.000359701\nI0822 15:42:36.161378 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:42:36.161402 32487 solver.cpp:244]     Train net output #1: loss = 0.000359425 (* 1 = 0.000359425 loss)\nI0822 15:42:36.253126 32487 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0822 15:44:53.949800 32487 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0822 15:46:15.415477 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79584\nI0822 15:46:15.415787 32487 solver.cpp:404]     Test net output #1: loss = 0.918609 (* 1 = 0.918609 loss)\nI0822 15:46:16.733474 32487 solver.cpp:228] Iteration 51100, loss = 0.000117321\nI0822 15:46:16.733528 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:46:16.733552 32487 solver.cpp:244]     Train net output #1: loss = 0.000117045 (* 1 = 0.000117045 loss)\nI0822 15:46:16.825448 32487 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0822 15:48:34.368525 32487 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0822 15:49:55.832010 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79568\nI0822 15:49:55.832304 32487 solver.cpp:404]     Test net output #1: loss = 0.918594 (* 1 = 0.918594 loss)\nI0822 15:49:57.150560 32487 solver.cpp:228] Iteration 51200, loss = 0.000438363\nI0822 15:49:57.150609 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:49:57.150630 32487 solver.cpp:244]     Train net output #1: loss = 0.000438087 (* 1 = 0.000438087 loss)\nI0822 15:49:57.241312 32487 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0822 15:52:14.805711 32487 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0822 15:53:36.269512 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7962\nI0822 15:53:36.269783 32487 solver.cpp:404]     Test net output #1: loss = 0.911563 (* 1 = 0.911563 loss)\nI0822 15:53:37.587469 32487 solver.cpp:228] Iteration 51300, loss = 0.000285866\nI0822 15:53:37.587518 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:53:37.587543 32487 solver.cpp:244]     Train net output #1: loss = 0.00028559 (* 1 = 0.00028559 loss)\nI0822 15:53:37.680502 32487 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0822 15:55:55.464568 32487 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0822 15:57:16.919973 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79784\nI0822 15:57:16.920267 32487 solver.cpp:404]     Test net output #1: loss = 0.909979 (* 1 = 0.909979 loss)\nI0822 15:57:18.239154 32487 solver.cpp:228] Iteration 51400, loss = 0.000214359\nI0822 15:57:18.239192 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:57:18.239214 32487 solver.cpp:244]     Train net output #1: loss = 0.000214083 (* 1 = 0.000214083 loss)\nI0822 15:57:18.326189 32487 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0822 15:59:36.003693 32487 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0822 16:00:57.421607 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79744\nI0822 16:00:57.421912 32487 solver.cpp:404]     Test net output #1: loss = 0.9058 (* 1 = 0.9058 loss)\nI0822 16:00:58.740406 32487 solver.cpp:228] Iteration 51500, loss = 0.000466918\nI0822 16:00:58.740445 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:00:58.740468 32487 solver.cpp:244]     Train net output #1: loss = 0.000466642 (* 1 = 0.000466642 loss)\nI0822 16:00:58.832911 32487 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0822 16:03:16.545967 32487 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0822 16:04:37.895552 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7974\nI0822 16:04:37.895817 32487 solver.cpp:404]     Test net output #1: loss = 0.906527 (* 1 = 0.906527 loss)\nI0822 16:04:39.213215 32487 solver.cpp:228] Iteration 51600, loss = 0.000391462\nI0822 16:04:39.213265 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:04:39.213289 32487 solver.cpp:244]     Train net output #1: loss = 0.000391185 (* 1 = 0.000391185 loss)\nI0822 16:04:39.302355 32487 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0822 16:06:56.879789 32487 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0822 16:08:18.239840 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79904\nI0822 16:08:18.240151 32487 solver.cpp:404]     Test net output #1: loss = 0.900488 (* 1 = 0.900488 loss)\nI0822 16:08:19.558675 32487 solver.cpp:228] Iteration 51700, loss = 0.00017796\nI0822 16:08:19.558710 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:08:19.558732 32487 solver.cpp:244]     Train net output #1: loss = 0.000177684 (* 1 = 0.000177684 loss)\nI0822 16:08:19.656951 32487 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0822 16:10:37.334578 32487 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0822 16:11:58.688756 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79976\nI0822 16:11:58.689013 32487 solver.cpp:404]     Test net output #1: loss = 0.89906 (* 1 = 0.89906 loss)\nI0822 16:12:00.006690 32487 solver.cpp:228] Iteration 51800, loss = 0.000347622\nI0822 16:12:00.006724 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:12:00.006739 32487 solver.cpp:244]     Train net output #1: loss = 0.000347346 (* 1 = 0.000347346 loss)\nI0822 16:12:00.099524 32487 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0822 16:14:17.875720 32487 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0822 16:15:39.235514 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79884\nI0822 16:15:39.235787 32487 solver.cpp:404]     Test net output #1: loss = 0.89682 (* 1 = 0.89682 loss)\nI0822 16:15:40.553620 32487 solver.cpp:228] Iteration 51900, loss = 0.000358248\nI0822 16:15:40.553654 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:15:40.553669 32487 solver.cpp:244]     Train net output #1: loss = 0.000357972 (* 1 = 0.000357972 loss)\nI0822 16:15:40.644783 32487 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0822 16:17:58.444561 32487 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0822 16:19:19.794785 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79932\nI0822 16:19:19.795086 32487 solver.cpp:404]     Test net output #1: loss = 0.894747 (* 1 = 0.894747 loss)\nI0822 16:19:21.112840 32487 solver.cpp:228] Iteration 52000, loss = 0.000154928\nI0822 16:19:21.112884 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:19:21.112900 32487 solver.cpp:244]     Train net output #1: loss = 0.000154652 (* 1 = 0.000154652 loss)\nI0822 16:19:21.202173 32487 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0822 16:21:38.897511 32487 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0822 16:23:00.241920 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7984\nI0822 16:23:00.242223 32487 solver.cpp:404]     Test net output #1: loss = 0.893889 (* 1 = 0.893889 loss)\nI0822 16:23:01.560998 32487 solver.cpp:228] Iteration 52100, loss = 0.000247238\nI0822 16:23:01.561043 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:23:01.561058 32487 solver.cpp:244]     Train net output #1: loss = 0.000246962 (* 1 = 0.000246962 loss)\nI0822 16:23:01.654304 32487 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0822 16:25:19.411020 32487 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0822 16:26:40.764948 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79892\nI0822 16:26:40.765244 32487 solver.cpp:404]     Test net output #1: loss = 0.89167 (* 1 = 0.89167 loss)\nI0822 16:26:42.083811 32487 solver.cpp:228] Iteration 52200, loss = 0.00033392\nI0822 16:26:42.083847 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:26:42.083861 32487 solver.cpp:244]     Train net output #1: loss = 0.000333643 (* 1 = 0.000333643 loss)\nI0822 16:26:42.173550 32487 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0822 16:28:59.705636 32487 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0822 16:30:21.065770 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7984\nI0822 16:30:21.066071 32487 solver.cpp:404]     Test net output #1: loss = 0.889965 (* 1 = 0.889965 loss)\nI0822 16:30:22.383117 32487 solver.cpp:228] Iteration 52300, loss = 0.000149128\nI0822 16:30:22.383159 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:30:22.383175 32487 solver.cpp:244]     Train net output #1: loss = 0.000148852 (* 1 = 0.000148852 loss)\nI0822 16:30:22.462956 32487 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0822 16:32:39.266363 32487 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0822 16:34:00.618955 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79888\nI0822 16:34:00.619240 32487 solver.cpp:404]     Test net output #1: loss = 0.887051 (* 1 = 0.887051 loss)\nI0822 16:34:01.936702 32487 solver.cpp:228] Iteration 52400, loss = 0.000312255\nI0822 16:34:01.936744 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:34:01.936760 32487 solver.cpp:244]     Train net output #1: loss = 0.000311978 (* 1 = 0.000311978 loss)\nI0822 16:34:02.018787 32487 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0822 16:36:18.928611 32487 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0822 16:37:40.402891 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79784\nI0822 16:37:40.403170 32487 solver.cpp:404]     Test net output #1: loss = 0.885427 (* 1 = 0.885427 loss)\nI0822 16:37:41.720553 32487 solver.cpp:228] Iteration 52500, loss = 0.000367688\nI0822 16:37:41.720597 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:37:41.720613 32487 solver.cpp:244]     Train net output #1: loss = 0.000367412 (* 1 = 0.000367412 loss)\nI0822 16:37:41.802671 32487 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0822 16:39:58.859432 32487 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0822 16:41:20.319649 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7974\nI0822 16:41:20.319958 32487 solver.cpp:404]     Test net output #1: loss = 0.88389 (* 1 = 0.88389 loss)\nI0822 16:41:21.637656 32487 solver.cpp:228] Iteration 52600, loss = 0.000153846\nI0822 16:41:21.637697 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:41:21.637713 32487 solver.cpp:244]     Train net output #1: loss = 0.000153569 (* 1 = 0.000153569 loss)\nI0822 16:41:21.718296 32487 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0822 16:43:38.603540 32487 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0822 16:45:00.078164 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79716\nI0822 16:45:00.078470 32487 solver.cpp:404]     Test net output #1: loss = 0.882223 (* 1 = 0.882223 loss)\nI0822 16:45:01.396034 32487 solver.cpp:228] Iteration 52700, loss = 0.000291464\nI0822 16:45:01.396071 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:45:01.396086 32487 solver.cpp:244]     Train net output #1: loss = 0.000291188 (* 1 = 0.000291188 loss)\nI0822 16:45:01.488217 32487 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0822 16:47:18.434012 32487 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0822 16:48:39.896046 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79708\nI0822 16:48:39.896350 32487 solver.cpp:404]     Test net output #1: loss = 0.88041 (* 1 = 0.88041 loss)\nI0822 16:48:41.213661 32487 solver.cpp:228] Iteration 52800, loss = 0.000369871\nI0822 16:48:41.213699 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:48:41.213716 32487 solver.cpp:244]     Train net output #1: loss = 0.000369595 (* 1 = 0.000369595 loss)\nI0822 16:48:41.303503 32487 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0822 16:50:58.258976 32487 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0822 16:52:19.726569 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79636\nI0822 16:52:19.726837 32487 solver.cpp:404]     Test net output #1: loss = 0.878677 (* 1 = 0.878677 loss)\nI0822 16:52:21.044265 32487 solver.cpp:228] Iteration 52900, loss = 0.000209205\nI0822 16:52:21.044308 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:52:21.044333 32487 solver.cpp:244]     Train net output #1: loss = 0.000208928 (* 1 = 0.000208928 loss)\nI0822 16:52:21.127279 32487 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0822 16:54:38.070361 32487 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0822 16:55:59.546386 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79684\nI0822 16:55:59.546650 32487 solver.cpp:404]     Test net output #1: loss = 0.877986 (* 1 = 0.877986 loss)\nI0822 16:56:00.864483 32487 solver.cpp:228] Iteration 53000, loss = 0.000297862\nI0822 16:56:00.864524 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:56:00.864540 32487 solver.cpp:244]     Train net output #1: loss = 0.000297586 (* 1 = 0.000297586 loss)\nI0822 16:56:00.955433 32487 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0822 16:58:17.921947 32487 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0822 16:59:39.391337 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7962\nI0822 16:59:39.391614 32487 solver.cpp:404]     Test net output #1: loss = 0.877655 (* 1 = 0.877655 loss)\nI0822 16:59:40.708947 32487 solver.cpp:228] Iteration 53100, loss = 0.000253336\nI0822 16:59:40.708988 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:59:40.709003 32487 solver.cpp:244]     Train net output #1: loss = 0.00025306 (* 1 = 0.00025306 loss)\nI0822 16:59:40.790994 32487 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0822 17:01:57.608278 32487 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0822 17:03:19.086341 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7978\nI0822 17:03:19.086645 32487 solver.cpp:404]     Test net output #1: loss = 0.873121 (* 1 = 0.873121 loss)\nI0822 17:03:20.403800 32487 solver.cpp:228] Iteration 53200, loss = 0.000157025\nI0822 17:03:20.403842 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:03:20.403858 32487 solver.cpp:244]     Train net output #1: loss = 0.000156749 (* 1 = 0.000156749 loss)\nI0822 17:03:20.490013 32487 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0822 17:05:37.237512 32487 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0822 17:06:58.714812 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79724\nI0822 17:06:58.715101 32487 solver.cpp:404]     Test net output #1: loss = 0.873393 (* 1 = 0.873393 loss)\nI0822 17:07:00.032511 32487 solver.cpp:228] Iteration 53300, loss = 0.000364465\nI0822 17:07:00.032554 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:07:00.032570 32487 solver.cpp:244]     Train net output #1: loss = 0.000364188 (* 1 = 0.000364188 loss)\nI0822 17:07:00.115927 32487 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0822 17:09:17.134292 32487 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0822 17:10:38.604600 32487 solver.cpp:404]     Test net output #0: accuracy = 0.798\nI0822 17:10:38.604892 32487 solver.cpp:404]     Test net output #1: loss = 0.870886 (* 1 = 0.870886 loss)\nI0822 17:10:39.922926 32487 solver.cpp:228] Iteration 53400, loss = 0.000353333\nI0822 17:10:39.922971 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:10:39.922987 32487 solver.cpp:244]     Train net output #1: loss = 0.000353057 (* 1 = 0.000353057 loss)\nI0822 17:10:40.012495 32487 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0822 17:12:56.945827 32487 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0822 17:14:18.418898 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79744\nI0822 17:14:18.419181 32487 solver.cpp:404]     Test net output #1: loss = 0.868772 (* 1 = 0.868772 loss)\nI0822 17:14:19.737468 32487 solver.cpp:228] Iteration 53500, loss = 0.000191985\nI0822 17:14:19.737514 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:14:19.737529 32487 solver.cpp:244]     Train net output #1: loss = 0.000191709 (* 1 = 0.000191709 loss)\nI0822 17:14:19.816972 32487 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0822 17:16:36.732092 32487 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0822 17:17:58.203274 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79836\nI0822 17:17:58.203564 32487 solver.cpp:404]     Test net output #1: loss = 0.867289 (* 1 = 0.867289 loss)\nI0822 17:17:59.521421 32487 solver.cpp:228] Iteration 53600, loss = 0.0003722\nI0822 17:17:59.521466 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:17:59.521482 32487 solver.cpp:244]     Train net output #1: loss = 0.000371923 (* 1 = 0.000371923 loss)\nI0822 17:17:59.611552 32487 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0822 17:20:16.729558 32487 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0822 17:21:38.202116 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79744\nI0822 17:21:38.202392 32487 solver.cpp:404]     Test net output #1: loss = 0.866747 (* 1 = 0.866747 loss)\nI0822 17:21:39.519495 32487 solver.cpp:228] Iteration 53700, loss = 0.000339235\nI0822 17:21:39.519539 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:21:39.519556 32487 solver.cpp:244]     Train net output #1: loss = 0.000338959 (* 1 = 0.000338959 loss)\nI0822 17:21:39.603502 32487 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0822 17:23:56.639539 32487 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0822 17:25:18.111233 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79828\nI0822 17:25:18.111523 32487 solver.cpp:404]     Test net output #1: loss = 0.863579 (* 1 = 0.863579 loss)\nI0822 17:25:19.428894 32487 solver.cpp:228] Iteration 53800, loss = 0.000147706\nI0822 17:25:19.428937 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:25:19.428953 32487 solver.cpp:244]     Train net output #1: loss = 0.00014743 (* 1 = 0.00014743 loss)\nI0822 17:25:19.514431 32487 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0822 17:27:36.639212 32487 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0822 17:28:58.118825 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79764\nI0822 17:28:58.119117 32487 solver.cpp:404]     Test net output #1: loss = 0.862925 (* 1 = 0.862925 loss)\nI0822 17:28:59.437203 32487 solver.cpp:228] Iteration 53900, loss = 0.000269747\nI0822 17:28:59.437248 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:28:59.437264 32487 solver.cpp:244]     Train net output #1: loss = 0.000269471 (* 1 = 0.000269471 loss)\nI0822 17:28:59.525583 32487 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0822 17:31:16.425645 32487 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0822 17:32:37.895874 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79872\nI0822 17:32:37.896163 32487 solver.cpp:404]     Test net output #1: loss = 0.861107 (* 1 = 0.861107 loss)\nI0822 17:32:39.213291 32487 solver.cpp:228] Iteration 54000, loss = 0.000387528\nI0822 17:32:39.213335 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:32:39.213351 32487 solver.cpp:244]     Train net output #1: loss = 0.000387252 (* 1 = 0.000387252 loss)\nI0822 17:32:39.300103 32487 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0822 17:34:56.209889 32487 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0822 17:36:17.678912 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79796\nI0822 17:36:17.679216 32487 solver.cpp:404]     Test net output #1: loss = 0.856795 (* 1 = 0.856795 loss)\nI0822 17:36:18.996593 32487 solver.cpp:228] Iteration 54100, loss = 0.000190535\nI0822 17:36:18.996637 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:36:18.996652 32487 solver.cpp:244]     Train net output #1: loss = 0.000190259 (* 1 = 0.000190259 loss)\nI0822 17:36:19.082931 32487 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0822 17:38:36.104706 32487 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0822 17:39:57.582643 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79892\nI0822 17:39:57.582967 32487 solver.cpp:404]     Test net output #1: loss = 0.856679 (* 1 = 0.856679 loss)\nI0822 17:39:58.900423 32487 solver.cpp:228] Iteration 54200, loss = 0.000264273\nI0822 17:39:58.900466 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:39:58.900482 32487 solver.cpp:244]     Train net output #1: loss = 0.000263997 (* 1 = 0.000263997 loss)\nI0822 17:39:58.987463 32487 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0822 17:42:15.830938 32487 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0822 17:43:37.295707 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79768\nI0822 17:43:37.295980 32487 solver.cpp:404]     Test net output #1: loss = 0.854845 (* 1 = 0.854845 loss)\nI0822 17:43:38.613831 32487 solver.cpp:228] Iteration 54300, loss = 0.000299393\nI0822 17:43:38.613878 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:43:38.613894 32487 solver.cpp:244]     Train net output #1: loss = 0.000299116 (* 1 = 0.000299116 loss)\nI0822 17:43:38.703338 32487 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0822 17:45:55.712702 32487 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0822 17:47:17.192528 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79964\nI0822 17:47:17.192837 32487 solver.cpp:404]     Test net output #1: loss = 0.851606 (* 1 = 0.851606 loss)\nI0822 17:47:18.510579 32487 solver.cpp:228] Iteration 54400, loss = 0.000180133\nI0822 17:47:18.510625 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:47:18.510641 32487 solver.cpp:244]     Train net output #1: loss = 0.000179856 (* 1 = 0.000179856 loss)\nI0822 17:47:18.592350 32487 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0822 17:49:35.520112 32487 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0822 17:50:56.996471 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79808\nI0822 17:50:56.996781 32487 solver.cpp:404]     Test net output #1: loss = 0.85093 (* 1 = 0.85093 loss)\nI0822 17:50:58.314477 32487 solver.cpp:228] Iteration 54500, loss = 0.00028856\nI0822 17:50:58.314523 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:50:58.314539 32487 solver.cpp:244]     Train net output #1: loss = 0.000288284 (* 1 = 0.000288284 loss)\nI0822 17:50:58.400472 32487 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0822 17:53:15.307451 32487 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0822 17:54:36.784381 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79924\nI0822 17:54:36.784685 32487 solver.cpp:404]     Test net output #1: loss = 0.84955 (* 1 = 0.84955 loss)\nI0822 17:54:38.101948 32487 solver.cpp:228] Iteration 54600, loss = 0.000384309\nI0822 17:54:38.101991 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:54:38.102008 32487 solver.cpp:244]     Train net output #1: loss = 0.000384033 (* 1 = 0.000384033 loss)\nI0822 17:54:38.193578 32487 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0822 17:56:55.121155 32487 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0822 17:58:16.592661 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79892\nI0822 17:58:16.592919 32487 solver.cpp:404]     Test net output #1: loss = 0.847173 (* 1 = 0.847173 loss)\nI0822 17:58:17.910423 32487 solver.cpp:228] Iteration 54700, loss = 0.000166802\nI0822 17:58:17.910467 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:58:17.910483 32487 solver.cpp:244]     Train net output #1: loss = 0.000166526 (* 1 = 0.000166526 loss)\nI0822 17:58:18.002887 32487 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0822 18:00:35.152144 32487 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0822 18:01:56.636698 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79968\nI0822 18:01:56.636965 32487 solver.cpp:404]     Test net output #1: loss = 0.84558 (* 1 = 0.84558 loss)\nI0822 18:01:57.955337 32487 solver.cpp:228] Iteration 54800, loss = 0.000361606\nI0822 18:01:57.955381 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:01:57.955399 32487 solver.cpp:244]     Train net output #1: loss = 0.00036133 (* 1 = 0.00036133 loss)\nI0822 18:01:58.036559 32487 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0822 18:04:14.934556 32487 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0822 18:05:36.401751 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7988\nI0822 18:05:36.402055 32487 solver.cpp:404]     Test net output #1: loss = 0.844603 (* 1 = 0.844603 loss)\nI0822 18:05:37.720124 32487 solver.cpp:228] Iteration 54900, loss = 0.000366057\nI0822 18:05:37.720167 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:05:37.720183 32487 solver.cpp:244]     Train net output #1: loss = 0.000365781 (* 1 = 0.000365781 loss)\nI0822 18:05:37.798985 32487 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0822 18:07:54.697095 32487 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0822 18:09:16.158001 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79996\nI0822 18:09:16.158304 32487 solver.cpp:404]     Test net output #1: loss = 0.84014 (* 1 = 0.84014 loss)\nI0822 18:09:17.477020 32487 solver.cpp:228] Iteration 55000, loss = 0.000190872\nI0822 18:09:17.477066 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:09:17.477082 32487 solver.cpp:244]     Train net output #1: loss = 0.000190596 (* 1 = 0.000190596 loss)\nI0822 18:09:17.560132 32487 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0822 18:11:34.381176 32487 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0822 18:12:55.848337 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79936\nI0822 18:12:55.848630 32487 solver.cpp:404]     Test net output #1: loss = 0.839524 (* 1 = 0.839524 loss)\nI0822 18:12:57.166465 32487 solver.cpp:228] Iteration 55100, loss = 0.000350467\nI0822 18:12:57.166508 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:12:57.166524 32487 solver.cpp:244]     Train net output #1: loss = 0.000350191 (* 1 = 0.000350191 loss)\nI0822 18:12:57.249195 32487 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0822 18:15:14.068104 32487 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0822 18:16:35.547921 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80032\nI0822 18:16:35.548213 32487 solver.cpp:404]     Test net output #1: loss = 0.835538 (* 1 = 0.835538 loss)\nI0822 18:16:36.866981 32487 solver.cpp:228] Iteration 55200, loss = 0.000384309\nI0822 18:16:36.867022 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:16:36.867038 32487 solver.cpp:244]     Train net output #1: loss = 0.000384033 (* 1 = 0.000384033 loss)\nI0822 18:16:36.952524 32487 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0822 18:18:53.833197 32487 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0822 18:20:15.195260 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7994\nI0822 18:20:15.195567 32487 solver.cpp:404]     Test net output #1: loss = 0.834648 (* 1 = 0.834648 loss)\nI0822 18:20:16.513744 32487 solver.cpp:228] Iteration 55300, loss = 0.000197763\nI0822 18:20:16.513789 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:20:16.513806 32487 solver.cpp:244]     Train net output #1: loss = 0.000197487 (* 1 = 0.000197487 loss)\nI0822 18:20:16.594406 32487 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0822 18:22:33.530278 32487 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0822 18:23:54.893494 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80036\nI0822 18:23:54.893767 32487 solver.cpp:404]     Test net output #1: loss = 0.833412 (* 1 = 0.833412 loss)\nI0822 18:23:56.212227 32487 solver.cpp:228] Iteration 55400, loss = 0.000264093\nI0822 18:23:56.212277 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:23:56.212294 32487 solver.cpp:244]     Train net output #1: loss = 0.000263817 (* 1 = 0.000263817 loss)\nI0822 18:23:56.297487 32487 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0822 18:26:13.159126 32487 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0822 18:27:34.531939 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79876\nI0822 18:27:34.532244 32487 solver.cpp:404]     Test net output #1: loss = 0.832271 (* 1 = 0.832271 loss)\nI0822 18:27:35.851048 32487 solver.cpp:228] Iteration 55500, loss = 0.000383472\nI0822 18:27:35.851092 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:27:35.851109 32487 solver.cpp:244]     Train net output #1: loss = 0.000383195 (* 1 = 0.000383195 loss)\nI0822 18:27:35.931149 32487 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0822 18:29:52.758265 32487 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0822 18:31:14.132490 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79968\nI0822 18:31:14.132807 32487 solver.cpp:404]     Test net output #1: loss = 0.830507 (* 1 = 0.830507 loss)\nI0822 18:31:15.451097 32487 solver.cpp:228] Iteration 55600, loss = 0.00021766\nI0822 18:31:15.451140 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:31:15.451156 32487 solver.cpp:244]     Train net output #1: loss = 0.000217384 (* 1 = 0.000217384 loss)\nI0822 18:31:15.532434 32487 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0822 18:33:32.529888 32487 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0822 18:34:53.915592 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7986\nI0822 18:34:53.915899 32487 solver.cpp:404]     Test net output #1: loss = 0.830841 (* 1 = 0.830841 loss)\nI0822 18:34:55.233315 32487 solver.cpp:228] Iteration 55700, loss = 0.00032658\nI0822 18:34:55.233359 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:34:55.233377 32487 solver.cpp:244]     Train net output #1: loss = 0.000326304 (* 1 = 0.000326304 loss)\nI0822 18:34:55.313849 32487 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0822 18:37:12.124501 32487 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0822 18:38:33.510098 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80004\nI0822 18:38:33.510411 32487 solver.cpp:404]     Test net output #1: loss = 0.827432 (* 1 = 0.827432 loss)\nI0822 18:38:34.828048 32487 solver.cpp:228] Iteration 55800, loss = 0.000405579\nI0822 18:38:34.828089 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:38:34.828105 32487 solver.cpp:244]     Train net output #1: loss = 0.000405303 (* 1 = 0.000405303 loss)\nI0822 18:38:34.910377 32487 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0822 18:40:51.731194 32487 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0822 18:42:13.111558 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79972\nI0822 18:42:13.111870 32487 solver.cpp:404]     Test net output #1: loss = 0.825624 (* 1 = 0.825624 loss)\nI0822 18:42:14.429697 32487 solver.cpp:228] Iteration 55900, loss = 0.00019925\nI0822 18:42:14.429739 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:42:14.429755 32487 solver.cpp:244]     Train net output #1: loss = 0.000198974 (* 1 = 0.000198974 loss)\nI0822 18:42:14.524641 32487 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0822 18:44:31.954867 32487 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0822 18:45:53.334404 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79996\nI0822 18:45:53.334668 32487 solver.cpp:404]     Test net output #1: loss = 0.82493 (* 1 = 0.82493 loss)\nI0822 18:45:54.656136 32487 solver.cpp:228] Iteration 56000, loss = 0.000399517\nI0822 18:45:54.656168 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:45:54.656183 32487 solver.cpp:244]     Train net output #1: loss = 0.000399241 (* 1 = 0.000399241 loss)\nI0822 18:45:54.732290 32487 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0822 18:48:11.374882 32487 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0822 18:49:32.766845 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79976\nI0822 18:49:32.767137 32487 solver.cpp:404]     Test net output #1: loss = 0.82328 (* 1 = 0.82328 loss)\nI0822 18:49:34.088919 32487 solver.cpp:228] Iteration 56100, loss = 0.000368024\nI0822 18:49:34.088953 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:49:34.088968 32487 solver.cpp:244]     Train net output #1: loss = 0.000367748 (* 1 = 0.000367748 loss)\nI0822 18:49:34.168128 32487 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0822 18:51:50.752081 32487 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0822 18:53:12.245205 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80012\nI0822 18:53:12.245499 32487 solver.cpp:404]     Test net output #1: loss = 0.822003 (* 1 = 0.822003 loss)\nI0822 18:53:13.567731 32487 solver.cpp:228] Iteration 56200, loss = 0.000240575\nI0822 18:53:13.567775 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:53:13.567790 32487 solver.cpp:244]     Train net output #1: loss = 0.000240298 (* 1 = 0.000240298 loss)\nI0822 18:53:13.650372 32487 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0822 18:55:30.249374 32487 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0822 18:56:51.745798 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79948\nI0822 18:56:51.746105 32487 solver.cpp:404]     Test net output #1: loss = 0.820536 (* 1 = 0.820536 loss)\nI0822 18:56:53.068035 32487 solver.cpp:228] Iteration 56300, loss = 0.000381222\nI0822 18:56:53.068069 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:56:53.068084 32487 solver.cpp:244]     Train net output #1: loss = 0.000380945 (* 1 = 0.000380945 loss)\nI0822 18:56:53.144105 32487 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0822 18:59:09.807214 32487 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0822 19:00:31.308444 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80032\nI0822 19:00:31.308729 32487 solver.cpp:404]     Test net output #1: loss = 0.820376 (* 1 = 0.820376 loss)\nI0822 19:00:32.631083 32487 solver.cpp:228] Iteration 56400, loss = 0.000460529\nI0822 19:00:32.631116 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:00:32.631132 32487 solver.cpp:244]     Train net output #1: loss = 0.000460253 (* 1 = 0.000460253 loss)\nI0822 19:00:32.710315 32487 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0822 19:02:49.308764 32487 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0822 19:04:10.800909 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79952\nI0822 19:04:10.801195 32487 solver.cpp:404]     Test net output #1: loss = 0.81886 (* 1 = 0.81886 loss)\nI0822 19:04:12.123714 32487 solver.cpp:228] Iteration 56500, loss = 0.000196159\nI0822 19:04:12.123756 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:04:12.123772 32487 solver.cpp:244]     Train net output #1: loss = 0.000195882 (* 1 = 0.000195882 loss)\nI0822 19:04:12.198516 32487 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0822 19:06:28.771024 32487 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0822 19:07:50.264586 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80028\nI0822 19:07:50.264856 32487 solver.cpp:404]     Test net output #1: loss = 0.817694 (* 1 = 0.817694 loss)\nI0822 19:07:51.586998 32487 solver.cpp:228] Iteration 56600, loss = 0.000313681\nI0822 19:07:51.587031 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:07:51.587046 32487 solver.cpp:244]     Train net output #1: loss = 0.000313405 (* 1 = 0.000313405 loss)\nI0822 19:07:51.660645 32487 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0822 19:10:08.245401 32487 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0822 19:11:29.731022 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79936\nI0822 19:11:29.731328 32487 solver.cpp:404]     Test net output #1: loss = 0.81656 (* 1 = 0.81656 loss)\nI0822 19:11:31.053477 32487 solver.cpp:228] Iteration 56700, loss = 0.000437825\nI0822 19:11:31.053508 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:11:31.053522 32487 solver.cpp:244]     Train net output #1: loss = 0.000437549 (* 1 = 0.000437549 loss)\nI0822 19:11:31.136253 32487 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0822 19:13:47.846587 32487 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0822 19:15:09.342954 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80048\nI0822 19:15:09.343246 32487 solver.cpp:404]     Test net output #1: loss = 0.813705 (* 1 = 0.813705 loss)\nI0822 19:15:10.665225 32487 solver.cpp:228] Iteration 56800, loss = 0.000210776\nI0822 19:15:10.665259 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:15:10.665274 32487 solver.cpp:244]     Train net output #1: loss = 0.0002105 (* 1 = 0.0002105 loss)\nI0822 19:15:10.743764 32487 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0822 19:17:27.362154 32487 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0822 19:18:48.863766 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79968\nI0822 19:18:48.864054 32487 solver.cpp:404]     Test net output #1: loss = 0.81385 (* 1 = 0.81385 loss)\nI0822 19:18:50.186164 32487 solver.cpp:228] Iteration 56900, loss = 0.000440334\nI0822 19:18:50.186197 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:18:50.186213 32487 solver.cpp:244]     Train net output #1: loss = 0.000440058 (* 1 = 0.000440058 loss)\nI0822 19:18:50.268805 32487 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0822 19:21:06.815764 32487 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0822 19:22:28.319183 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80024\nI0822 19:22:28.319492 32487 solver.cpp:404]     Test net output #1: loss = 0.811424 (* 1 = 0.811424 loss)\nI0822 19:22:29.641434 32487 solver.cpp:228] Iteration 57000, loss = 0.000335426\nI0822 19:22:29.641466 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:22:29.641481 32487 solver.cpp:244]     Train net output #1: loss = 0.00033515 (* 1 = 0.00033515 loss)\nI0822 19:22:29.719872 32487 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0822 19:24:46.282004 32487 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0822 19:26:07.781618 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79944\nI0822 19:26:07.781903 32487 solver.cpp:404]     Test net output #1: loss = 0.810068 (* 1 = 0.810068 loss)\nI0822 19:26:09.103718 32487 solver.cpp:228] Iteration 57100, loss = 0.000245304\nI0822 19:26:09.103759 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:26:09.103775 32487 solver.cpp:244]     Train net output #1: loss = 0.000245028 (* 1 = 0.000245028 loss)\nI0822 19:26:09.179096 32487 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0822 19:28:25.838248 32487 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0822 19:29:47.335855 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80064\nI0822 19:29:47.336161 32487 solver.cpp:404]     Test net output #1: loss = 0.809105 (* 1 = 0.809105 loss)\nI0822 19:29:48.658226 32487 solver.cpp:228] Iteration 57200, loss = 0.00029972\nI0822 19:29:48.658263 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:29:48.658279 32487 solver.cpp:244]     Train net output #1: loss = 0.000299444 (* 1 = 0.000299444 loss)\nI0822 19:29:48.737820 32487 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0822 19:32:05.414763 32487 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0822 19:33:26.912415 32487 solver.cpp:404]     Test net output #0: accuracy = 0.7996\nI0822 19:33:26.912670 32487 solver.cpp:404]     Test net output #1: loss = 0.808869 (* 1 = 0.808869 loss)\nI0822 19:33:28.235527 32487 solver.cpp:228] Iteration 57300, loss = 0.000358674\nI0822 19:33:28.235561 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:33:28.235576 32487 solver.cpp:244]     Train net output #1: loss = 0.000358398 (* 1 = 0.000358398 loss)\nI0822 19:33:28.316262 32487 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0822 19:35:44.971385 32487 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0822 19:37:06.473748 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80084\nI0822 19:37:06.474061 32487 solver.cpp:404]     Test net output #1: loss = 0.807263 (* 1 = 0.807263 loss)\nI0822 19:37:07.797183 32487 solver.cpp:228] Iteration 57400, loss = 0.00022641\nI0822 19:37:07.797217 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:37:07.797232 32487 solver.cpp:244]     Train net output #1: loss = 0.000226134 (* 1 = 0.000226134 loss)\nI0822 19:37:07.878710 32487 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0822 19:39:24.539611 32487 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0822 19:40:46.035993 32487 solver.cpp:404]     Test net output #0: accuracy = 0.79984\nI0822 19:40:46.036310 32487 solver.cpp:404]     Test net output #1: loss = 0.807186 (* 1 = 0.807186 loss)\nI0822 19:40:47.358655 32487 solver.cpp:228] Iteration 57500, loss = 0.0003476\nI0822 19:40:47.358687 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:40:47.358703 32487 solver.cpp:244]     Train net output #1: loss = 0.000347323 (* 1 = 0.000347323 loss)\nI0822 19:40:47.435981 32487 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0822 19:43:04.059768 32487 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0822 19:44:25.549218 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80088\nI0822 19:44:25.549504 32487 solver.cpp:404]     Test net output #1: loss = 0.806546 (* 1 = 0.806546 loss)\nI0822 19:44:26.871732 32487 solver.cpp:228] Iteration 57600, loss = 0.000328029\nI0822 19:44:26.871773 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:44:26.871788 32487 solver.cpp:244]     Train net output #1: loss = 0.000327753 (* 1 = 0.000327753 loss)\nI0822 19:44:26.951189 32487 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0822 19:46:43.583827 32487 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0822 19:48:05.072808 32487 solver.cpp:404]     Test net output #0: accuracy = 0.801\nI0822 19:48:05.073081 32487 solver.cpp:404]     Test net output #1: loss = 0.803533 (* 1 = 0.803533 loss)\nI0822 19:48:06.395951 32487 solver.cpp:228] Iteration 57700, loss = 0.000262566\nI0822 19:48:06.395984 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:48:06.395999 32487 solver.cpp:244]     Train net output #1: loss = 0.00026229 (* 1 = 0.00026229 loss)\nI0822 19:48:06.478904 32487 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0822 19:50:23.100816 32487 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0822 19:51:44.590150 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80132\nI0822 19:51:44.590466 32487 solver.cpp:404]     Test net output #1: loss = 0.803234 (* 1 = 0.803234 loss)\nI0822 19:51:45.911749 32487 solver.cpp:228] Iteration 57800, loss = 0.000378842\nI0822 19:51:45.911789 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:51:45.911804 32487 solver.cpp:244]     Train net output #1: loss = 0.000378565 (* 1 = 0.000378565 loss)\nI0822 19:51:45.987126 32487 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0822 19:54:02.584646 32487 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0822 19:55:24.059854 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80068\nI0822 19:55:24.060135 32487 solver.cpp:404]     Test net output #1: loss = 0.802386 (* 1 = 0.802386 loss)\nI0822 19:55:25.381958 32487 solver.cpp:228] Iteration 57900, loss = 0.000408825\nI0822 19:55:25.381989 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:55:25.382004 32487 solver.cpp:244]     Train net output #1: loss = 0.000408549 (* 1 = 0.000408549 loss)\nI0822 19:55:25.454704 32487 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0822 19:57:42.044332 32487 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0822 19:59:03.527943 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80108\nI0822 19:59:03.528259 32487 solver.cpp:404]     Test net output #1: loss = 0.801201 (* 1 = 0.801201 loss)\nI0822 19:59:04.850899 32487 solver.cpp:228] Iteration 58000, loss = 0.000225585\nI0822 19:59:04.850932 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:59:04.850946 32487 solver.cpp:244]     Train net output #1: loss = 0.000225309 (* 1 = 0.000225309 loss)\nI0822 19:59:04.933151 32487 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0822 20:01:21.530606 32487 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0822 20:02:43.022390 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8008\nI0822 20:02:43.022699 32487 solver.cpp:404]     Test net output #1: loss = 0.799888 (* 1 = 0.799888 loss)\nI0822 20:02:44.344889 32487 solver.cpp:228] Iteration 58100, loss = 0.000337828\nI0822 20:02:44.344923 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:02:44.344936 32487 solver.cpp:244]     Train net output #1: loss = 0.000337552 (* 1 = 0.000337552 loss)\nI0822 20:02:44.420634 32487 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0822 20:05:00.995076 32487 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0822 20:06:22.470652 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80136\nI0822 20:06:22.470919 32487 solver.cpp:404]     Test net output #1: loss = 0.800541 (* 1 = 0.800541 loss)\nI0822 20:06:23.793081 32487 solver.cpp:228] Iteration 58200, loss = 0.000410991\nI0822 20:06:23.793114 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:06:23.793129 32487 solver.cpp:244]     Train net output #1: loss = 0.000410715 (* 1 = 0.000410715 loss)\nI0822 20:06:23.867038 32487 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0822 20:08:40.499029 32487 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0822 20:10:01.985054 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8006\nI0822 20:10:01.985345 32487 solver.cpp:404]     Test net output #1: loss = 0.799453 (* 1 = 0.799453 loss)\nI0822 20:10:03.307618 32487 solver.cpp:228] Iteration 58300, loss = 0.000276608\nI0822 20:10:03.307651 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:10:03.307667 32487 solver.cpp:244]     Train net output #1: loss = 0.000276331 (* 1 = 0.000276331 loss)\nI0822 20:10:03.385712 32487 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0822 20:12:19.933677 32487 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0822 20:13:41.420871 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80168\nI0822 20:13:41.421156 32487 solver.cpp:404]     Test net output #1: loss = 0.797889 (* 1 = 0.797889 loss)\nI0822 20:13:42.742897 32487 solver.cpp:228] Iteration 58400, loss = 0.000346934\nI0822 20:13:42.742931 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:13:42.742946 32487 solver.cpp:244]     Train net output #1: loss = 0.000346658 (* 1 = 0.000346658 loss)\nI0822 20:13:42.822237 32487 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0822 20:15:59.442451 32487 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0822 20:17:20.936544 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8014\nI0822 20:17:20.936825 32487 solver.cpp:404]     Test net output #1: loss = 0.796142 (* 1 = 0.796142 loss)\nI0822 20:17:22.258801 32487 solver.cpp:228] Iteration 58500, loss = 0.000388098\nI0822 20:17:22.258836 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:17:22.258850 32487 solver.cpp:244]     Train net output #1: loss = 0.000387821 (* 1 = 0.000387821 loss)\nI0822 20:17:22.339471 32487 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0822 20:19:39.130187 32487 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0822 20:21:00.624326 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80188\nI0822 20:21:00.624614 32487 solver.cpp:404]     Test net output #1: loss = 0.795218 (* 1 = 0.795218 loss)\nI0822 20:21:01.947772 32487 solver.cpp:228] Iteration 58600, loss = 0.000225989\nI0822 20:21:01.947805 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:21:01.947820 32487 solver.cpp:244]     Train net output #1: loss = 0.000225712 (* 1 = 0.000225712 loss)\nI0822 20:21:02.021843 32487 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0822 20:23:18.935622 32487 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0822 20:24:40.428462 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80156\nI0822 20:24:40.428752 32487 solver.cpp:404]     Test net output #1: loss = 0.794112 (* 1 = 0.794112 loss)\nI0822 20:24:41.751755 32487 solver.cpp:228] Iteration 58700, loss = 0.000338701\nI0822 20:24:41.751791 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:24:41.751806 32487 solver.cpp:244]     Train net output #1: loss = 0.000338425 (* 1 = 0.000338425 loss)\nI0822 20:24:41.834031 32487 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0822 20:26:58.759598 32487 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0822 20:28:20.247781 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8022\nI0822 20:28:20.248052 32487 solver.cpp:404]     Test net output #1: loss = 0.793343 (* 1 = 0.793343 loss)\nI0822 20:28:21.571280 32487 solver.cpp:228] Iteration 58800, loss = 0.00037629\nI0822 20:28:21.571316 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:28:21.571331 32487 solver.cpp:244]     Train net output #1: loss = 0.000376014 (* 1 = 0.000376014 loss)\nI0822 20:28:21.648044 32487 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0822 20:30:38.584555 32487 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0822 20:32:00.066727 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80208\nI0822 20:32:00.066992 32487 solver.cpp:404]     Test net output #1: loss = 0.791184 (* 1 = 0.791184 loss)\nI0822 20:32:01.389365 32487 solver.cpp:228] Iteration 58900, loss = 0.000201548\nI0822 20:32:01.389401 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:32:01.389416 32487 solver.cpp:244]     Train net output #1: loss = 0.000201272 (* 1 = 0.000201272 loss)\nI0822 20:32:01.472987 32487 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0822 20:34:18.427122 32487 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0822 20:35:39.809886 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80208\nI0822 20:35:39.810184 32487 solver.cpp:404]     Test net output #1: loss = 0.791602 (* 1 = 0.791602 loss)\nI0822 20:35:41.132385 32487 solver.cpp:228] Iteration 59000, loss = 0.000378245\nI0822 20:35:41.132421 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:35:41.132436 32487 solver.cpp:244]     Train net output #1: loss = 0.000377968 (* 1 = 0.000377968 loss)\nI0822 20:35:41.216437 32487 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0822 20:37:58.177120 32487 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0822 20:39:19.551925 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80204\nI0822 20:39:19.552217 32487 solver.cpp:404]     Test net output #1: loss = 0.789185 (* 1 = 0.789185 loss)\nI0822 20:39:20.874354 32487 solver.cpp:228] Iteration 59100, loss = 0.000454432\nI0822 20:39:20.874392 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:39:20.874414 32487 solver.cpp:244]     Train net output #1: loss = 0.000454156 (* 1 = 0.000454156 loss)\nI0822 20:39:20.963539 32487 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0822 20:41:37.947335 32487 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0822 20:42:59.340993 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80296\nI0822 20:42:59.341311 32487 solver.cpp:404]     Test net output #1: loss = 0.787897 (* 1 = 0.787897 loss)\nI0822 20:43:00.664422 32487 solver.cpp:228] Iteration 59200, loss = 0.000255477\nI0822 20:43:00.664458 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:43:00.664481 32487 solver.cpp:244]     Train net output #1: loss = 0.000255201 (* 1 = 0.000255201 loss)\nI0822 20:43:00.744972 32487 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0822 20:45:17.651439 32487 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0822 20:46:39.031806 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80136\nI0822 20:46:39.032112 32487 solver.cpp:404]     Test net output #1: loss = 0.788419 (* 1 = 0.788419 loss)\nI0822 20:46:40.354301 32487 solver.cpp:228] Iteration 59300, loss = 0.000453801\nI0822 20:46:40.354336 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:46:40.354351 32487 solver.cpp:244]     Train net output #1: loss = 0.000453525 (* 1 = 0.000453525 loss)\nI0822 20:46:40.440250 32487 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0822 20:48:57.308874 32487 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0822 20:50:18.689685 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8024\nI0822 20:50:18.690037 32487 solver.cpp:404]     Test net output #1: loss = 0.78684 (* 1 = 0.78684 loss)\nI0822 20:50:20.013166 32487 solver.cpp:228] Iteration 59400, loss = 0.000374652\nI0822 20:50:20.013200 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:50:20.013216 32487 solver.cpp:244]     Train net output #1: loss = 0.000374376 (* 1 = 0.000374376 loss)\nI0822 20:50:20.099500 32487 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0822 20:52:37.074048 32487 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0822 20:53:58.488991 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80204\nI0822 20:53:58.489342 32487 solver.cpp:404]     Test net output #1: loss = 0.785129 (* 1 = 0.785129 loss)\nI0822 20:53:59.813516 32487 solver.cpp:228] Iteration 59500, loss = 0.000283797\nI0822 20:53:59.813573 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:53:59.813591 32487 solver.cpp:244]     Train net output #1: loss = 0.000283521 (* 1 = 0.000283521 loss)\nI0822 20:53:59.894969 32487 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0822 20:56:17.120476 32487 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0822 20:57:39.454279 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80272\nI0822 20:57:39.454659 32487 solver.cpp:404]     Test net output #1: loss = 0.784491 (* 1 = 0.784491 loss)\nI0822 20:57:40.779876 32487 solver.cpp:228] Iteration 59600, loss = 0.000368424\nI0822 20:57:40.779918 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:57:40.779933 32487 solver.cpp:244]     Train net output #1: loss = 0.000368148 (* 1 = 0.000368148 loss)\nI0822 20:57:40.856470 32487 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0822 20:59:57.912606 32487 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0822 21:01:20.246186 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80168\nI0822 21:01:20.246529 32487 solver.cpp:404]     Test net output #1: loss = 0.784166 (* 1 = 0.784166 loss)\nI0822 21:01:21.571339 32487 solver.cpp:228] Iteration 59700, loss = 0.000374582\nI0822 21:01:21.571382 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:01:21.571396 32487 solver.cpp:244]     Train net output #1: loss = 0.000374305 (* 1 = 0.000374305 loss)\nI0822 21:01:21.655773 32487 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0822 21:03:38.793030 32487 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0822 21:05:01.142197 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8022\nI0822 21:05:01.142575 32487 solver.cpp:404]     Test net output #1: loss = 0.783195 (* 1 = 0.783195 loss)\nI0822 21:05:02.469254 32487 solver.cpp:228] Iteration 59800, loss = 0.000222269\nI0822 21:05:02.469295 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:05:02.469310 32487 solver.cpp:244]     Train net output #1: loss = 0.000221993 (* 1 = 0.000221993 loss)\nI0822 21:05:02.551535 32487 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0822 21:07:19.669940 32487 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0822 21:08:42.012269 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80136\nI0822 21:08:42.012651 32487 solver.cpp:404]     Test net output #1: loss = 0.781915 (* 1 = 0.781915 loss)\nI0822 21:08:43.339769 32487 solver.cpp:228] Iteration 59900, loss = 0.000365782\nI0822 21:08:43.339812 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:08:43.339826 32487 solver.cpp:244]     Train net output #1: loss = 0.000365506 (* 1 = 0.000365506 loss)\nI0822 21:08:43.411842 32487 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0822 21:11:00.515583 32487 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0822 21:12:22.861582 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8022\nI0822 21:12:22.861941 32487 solver.cpp:404]     Test net output #1: loss = 0.782899 (* 1 = 0.782899 loss)\nI0822 21:12:24.187265 32487 solver.cpp:228] Iteration 60000, loss = 0.000385322\nI0822 21:12:24.187304 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:12:24.187319 32487 solver.cpp:244]     Train net output #1: loss = 0.000385046 (* 1 = 0.000385046 loss)\nI0822 21:12:24.268249 32487 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0822 21:14:41.437299 32487 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0822 21:16:03.781361 32487 solver.cpp:404]     Test net output #0: accuracy = 0.802\nI0822 21:16:03.781749 32487 solver.cpp:404]     Test net output #1: loss = 0.780957 (* 1 = 0.780957 loss)\nI0822 21:16:05.107133 32487 solver.cpp:228] Iteration 60100, loss = 0.000299232\nI0822 21:16:05.107180 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:16:05.107197 32487 solver.cpp:244]     Train net output #1: loss = 0.000298956 (* 1 = 0.000298956 loss)\nI0822 21:16:05.186560 32487 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0822 21:18:22.321032 32487 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0822 21:19:44.660193 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0822 21:19:44.660567 32487 solver.cpp:404]     Test net output #1: loss = 0.781137 (* 1 = 0.781137 loss)\nI0822 21:19:45.985613 32487 solver.cpp:228] Iteration 60200, loss = 0.000407685\nI0822 21:19:45.985652 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:19:45.985667 32487 solver.cpp:244]     Train net output #1: loss = 0.000407409 (* 1 = 0.000407409 loss)\nI0822 21:19:46.070489 32487 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0822 21:22:03.352572 32487 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0822 21:23:25.690321 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0822 21:23:25.690668 32487 solver.cpp:404]     Test net output #1: loss = 0.780244 (* 1 = 0.780244 loss)\nI0822 21:23:27.016068 32487 solver.cpp:228] Iteration 60300, loss = 0.000445532\nI0822 21:23:27.016118 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:23:27.016134 32487 solver.cpp:244]     Train net output #1: loss = 0.000445255 (* 1 = 0.000445255 loss)\nI0822 21:23:27.091156 32487 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0822 21:25:44.328541 32487 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0822 21:27:06.662696 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80288\nI0822 21:27:06.663050 32487 solver.cpp:404]     Test net output #1: loss = 0.779342 (* 1 = 0.779342 loss)\nI0822 21:27:07.988553 32487 solver.cpp:228] Iteration 60400, loss = 0.000280574\nI0822 21:27:07.988590 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:27:07.988605 32487 solver.cpp:244]     Train net output #1: loss = 0.000280298 (* 1 = 0.000280298 loss)\nI0822 21:27:08.071521 32487 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0822 21:29:25.113276 32487 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0822 21:30:47.449414 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80156\nI0822 21:30:47.449760 32487 solver.cpp:404]     Test net output #1: loss = 0.77763 (* 1 = 0.77763 loss)\nI0822 21:30:48.774646 32487 solver.cpp:228] Iteration 60500, loss = 0.00038207\nI0822 21:30:48.774688 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:30:48.774703 32487 solver.cpp:244]     Train net output #1: loss = 0.000381794 (* 1 = 0.000381794 loss)\nI0822 21:30:48.854478 32487 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0822 21:33:05.871152 32487 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0822 21:34:28.204114 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80276\nI0822 21:34:28.204499 32487 solver.cpp:404]     Test net output #1: loss = 0.776069 (* 1 = 0.776069 loss)\nI0822 21:34:29.530174 32487 solver.cpp:228] Iteration 60600, loss = 0.00036853\nI0822 21:34:29.530216 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:34:29.530232 32487 solver.cpp:244]     Train net output #1: loss = 0.000368253 (* 1 = 0.000368253 loss)\nI0822 21:34:29.611279 32487 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0822 21:36:46.717408 32487 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0822 21:38:09.044396 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80284\nI0822 21:38:09.044787 32487 solver.cpp:404]     Test net output #1: loss = 0.774645 (* 1 = 0.774645 loss)\nI0822 21:38:10.371155 32487 solver.cpp:228] Iteration 60700, loss = 0.000282779\nI0822 21:38:10.371197 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:38:10.371212 32487 solver.cpp:244]     Train net output #1: loss = 0.000282503 (* 1 = 0.000282503 loss)\nI0822 21:38:10.445333 32487 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0822 21:40:27.459291 32487 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0822 21:41:49.786882 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0822 21:41:49.787256 32487 solver.cpp:404]     Test net output #1: loss = 0.776432 (* 1 = 0.776432 loss)\nI0822 21:41:51.112349 32487 solver.cpp:228] Iteration 60800, loss = 0.000429199\nI0822 21:41:51.112402 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:41:51.112418 32487 solver.cpp:244]     Train net output #1: loss = 0.000428923 (* 1 = 0.000428923 loss)\nI0822 21:41:51.188525 32487 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0822 21:44:08.301183 32487 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0822 21:45:30.625372 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80236\nI0822 21:45:30.625758 32487 solver.cpp:404]     Test net output #1: loss = 0.775581 (* 1 = 0.775581 loss)\nI0822 21:45:31.950925 32487 solver.cpp:228] Iteration 60900, loss = 0.000456702\nI0822 21:45:31.950966 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:45:31.950981 32487 solver.cpp:244]     Train net output #1: loss = 0.000456426 (* 1 = 0.000456426 loss)\nI0822 21:45:32.033350 32487 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0822 21:47:49.133481 32487 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0822 21:49:11.458539 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80236\nI0822 21:49:11.458920 32487 solver.cpp:404]     Test net output #1: loss = 0.776606 (* 1 = 0.776606 loss)\nI0822 21:49:12.784441 32487 solver.cpp:228] Iteration 61000, loss = 0.000267802\nI0822 21:49:12.784493 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:49:12.784510 32487 solver.cpp:244]     Train net output #1: loss = 0.000267526 (* 1 = 0.000267526 loss)\nI0822 21:49:12.859568 32487 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0822 21:51:29.986758 32487 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0822 21:52:52.311010 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80196\nI0822 21:52:52.311352 32487 solver.cpp:404]     Test net output #1: loss = 0.776102 (* 1 = 0.776102 loss)\nI0822 21:52:53.636901 32487 solver.cpp:228] Iteration 61100, loss = 0.000376789\nI0822 21:52:53.636945 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:52:53.636960 32487 solver.cpp:244]     Train net output #1: loss = 0.000376513 (* 1 = 0.000376513 loss)\nI0822 21:52:53.717803 32487 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0822 21:55:10.838017 32487 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0822 21:56:33.179888 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80236\nI0822 21:56:33.180240 32487 solver.cpp:404]     Test net output #1: loss = 0.776272 (* 1 = 0.776272 loss)\nI0822 21:56:34.505394 32487 solver.cpp:228] Iteration 61200, loss = 0.000380724\nI0822 21:56:34.505442 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:56:34.505460 32487 solver.cpp:244]     Train net output #1: loss = 0.000380447 (* 1 = 0.000380447 loss)\nI0822 21:56:34.577832 32487 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0822 21:58:51.636696 32487 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0822 22:00:13.974313 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80204\nI0822 22:00:13.974660 32487 solver.cpp:404]     Test net output #1: loss = 0.774364 (* 1 = 0.774364 loss)\nI0822 22:00:15.300438 32487 solver.cpp:228] Iteration 61300, loss = 0.000279839\nI0822 22:00:15.300483 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:00:15.300499 32487 solver.cpp:244]     Train net output #1: loss = 0.000279563 (* 1 = 0.000279563 loss)\nI0822 22:00:15.380981 32487 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0822 22:02:32.392812 32487 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0822 22:03:54.736125 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8028\nI0822 22:03:54.736515 32487 solver.cpp:404]     Test net output #1: loss = 0.773974 (* 1 = 0.773974 loss)\nI0822 22:03:56.061739 32487 solver.cpp:228] Iteration 61400, loss = 0.000357564\nI0822 22:03:56.061780 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:03:56.061796 32487 solver.cpp:244]     Train net output #1: loss = 0.000357287 (* 1 = 0.000357287 loss)\nI0822 22:03:56.146600 32487 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0822 22:06:13.289278 32487 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0822 22:07:35.624817 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80276\nI0822 22:07:35.625191 32487 solver.cpp:404]     Test net output #1: loss = 0.772139 (* 1 = 0.772139 loss)\nI0822 22:07:36.950525 32487 solver.cpp:228] Iteration 61500, loss = 0.000396109\nI0822 22:07:36.950567 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:07:36.950583 32487 solver.cpp:244]     Train net output #1: loss = 0.000395833 (* 1 = 0.000395833 loss)\nI0822 22:07:37.023330 32487 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0822 22:09:54.099377 32487 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0822 22:11:16.426910 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80384\nI0822 22:11:16.427278 32487 solver.cpp:404]     Test net output #1: loss = 0.771577 (* 1 = 0.771577 loss)\nI0822 22:11:17.752109 32487 solver.cpp:228] Iteration 61600, loss = 0.000291471\nI0822 22:11:17.752161 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:11:17.752177 32487 solver.cpp:244]     Train net output #1: loss = 0.000291195 (* 1 = 0.000291195 loss)\nI0822 22:11:17.834980 32487 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0822 22:13:34.924705 32487 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0822 22:14:57.268635 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8032\nI0822 22:14:57.269011 32487 solver.cpp:404]     Test net output #1: loss = 0.770641 (* 1 = 0.770641 loss)\nI0822 22:14:58.594195 32487 solver.cpp:228] Iteration 61700, loss = 0.000353502\nI0822 22:14:58.594247 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:14:58.594264 32487 solver.cpp:244]     Train net output #1: loss = 0.000353225 (* 1 = 0.000353225 loss)\nI0822 22:14:58.668498 32487 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0822 22:17:15.691597 32487 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0822 22:18:38.034730 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80368\nI0822 22:18:38.035085 32487 solver.cpp:404]     Test net output #1: loss = 0.770679 (* 1 = 0.770679 loss)\nI0822 22:18:39.360317 32487 solver.cpp:228] Iteration 61800, loss = 0.00033311\nI0822 22:18:39.360360 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:18:39.360376 32487 solver.cpp:244]     Train net output #1: loss = 0.000332833 (* 1 = 0.000332833 loss)\nI0822 22:18:39.435247 32487 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0822 22:20:56.594110 32487 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0822 22:22:18.939785 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8036\nI0822 22:22:18.940157 32487 solver.cpp:404]     Test net output #1: loss = 0.76908 (* 1 = 0.76908 loss)\nI0822 22:22:20.265524 32487 solver.cpp:228] Iteration 61900, loss = 0.000249681\nI0822 22:22:20.265576 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:22:20.265594 32487 solver.cpp:244]     Train net output #1: loss = 0.000249404 (* 1 = 0.000249404 loss)\nI0822 22:22:20.339041 32487 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0822 22:24:37.345528 32487 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0822 22:25:59.690712 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80368\nI0822 22:25:59.691079 32487 solver.cpp:404]     Test net output #1: loss = 0.771204 (* 1 = 0.771204 loss)\nI0822 22:26:01.016494 32487 solver.cpp:228] Iteration 62000, loss = 0.000345273\nI0822 22:26:01.016540 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:26:01.016556 32487 solver.cpp:244]     Train net output #1: loss = 0.000344996 (* 1 = 0.000344996 loss)\nI0822 22:26:01.089733 32487 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0822 22:28:18.255487 32487 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0822 22:29:40.619148 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8032\nI0822 22:29:40.619508 32487 solver.cpp:404]     Test net output #1: loss = 0.768886 (* 1 = 0.768886 loss)\nI0822 22:29:41.944818 32487 solver.cpp:228] Iteration 62100, loss = 0.000380148\nI0822 22:29:41.944859 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:29:41.944874 32487 solver.cpp:244]     Train net output #1: loss = 0.000379871 (* 1 = 0.000379871 loss)\nI0822 22:29:42.022982 32487 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0822 22:31:59.235929 32487 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0822 22:33:21.629441 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0822 22:33:21.629793 32487 solver.cpp:404]     Test net output #1: loss = 0.76961 (* 1 = 0.76961 loss)\nI0822 22:33:22.955611 32487 solver.cpp:228] Iteration 62200, loss = 0.00030876\nI0822 22:33:22.955653 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:33:22.955669 32487 solver.cpp:244]     Train net output #1: loss = 0.000308484 (* 1 = 0.000308484 loss)\nI0822 22:33:23.033229 32487 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0822 22:35:40.059669 32487 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0822 22:37:02.455036 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8036\nI0822 22:37:02.455426 32487 solver.cpp:404]     Test net output #1: loss = 0.767569 (* 1 = 0.767569 loss)\nI0822 22:37:03.780974 32487 solver.cpp:228] Iteration 62300, loss = 0.000404752\nI0822 22:37:03.781028 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:37:03.781044 32487 solver.cpp:244]     Train net output #1: loss = 0.000404476 (* 1 = 0.000404476 loss)\nI0822 22:37:03.856287 32487 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0822 22:39:20.929195 32487 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0822 22:40:43.313524 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80396\nI0822 22:40:43.313894 32487 solver.cpp:404]     Test net output #1: loss = 0.76843 (* 1 = 0.76843 loss)\nI0822 22:40:44.639389 32487 solver.cpp:228] Iteration 62400, loss = 0.000418352\nI0822 22:40:44.639433 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:40:44.639451 32487 solver.cpp:244]     Train net output #1: loss = 0.000418076 (* 1 = 0.000418076 loss)\nI0822 22:40:44.716768 32487 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0822 22:43:01.793649 32487 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0822 22:44:24.178598 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80424\nI0822 22:44:24.178956 32487 solver.cpp:404]     Test net output #1: loss = 0.765181 (* 1 = 0.765181 loss)\nI0822 22:44:25.504935 32487 solver.cpp:228] Iteration 62500, loss = 0.000307768\nI0822 22:44:25.504987 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:44:25.505002 32487 solver.cpp:244]     Train net output #1: loss = 0.000307492 (* 1 = 0.000307492 loss)\nI0822 22:44:25.586069 32487 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0822 22:46:42.695919 32487 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0822 22:48:05.076804 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80428\nI0822 22:48:05.077162 32487 solver.cpp:404]     Test net output #1: loss = 0.766315 (* 1 = 0.766315 loss)\nI0822 22:48:06.403729 32487 solver.cpp:228] Iteration 62600, loss = 0.000379669\nI0822 22:48:06.403771 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:48:06.403787 32487 solver.cpp:244]     Train net output #1: loss = 0.000379392 (* 1 = 0.000379392 loss)\nI0822 22:48:06.485546 32487 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0822 22:50:23.599947 32487 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0822 22:51:45.980085 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8038\nI0822 22:51:45.980448 32487 solver.cpp:404]     Test net output #1: loss = 0.766021 (* 1 = 0.766021 loss)\nI0822 22:51:47.306354 32487 solver.cpp:228] Iteration 62700, loss = 0.000367816\nI0822 22:51:47.306397 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:51:47.306413 32487 solver.cpp:244]     Train net output #1: loss = 0.00036754 (* 1 = 0.00036754 loss)\nI0822 22:51:47.390784 32487 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0822 22:54:04.672159 32487 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0822 22:55:27.043064 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80448\nI0822 22:55:27.043424 32487 solver.cpp:404]     Test net output #1: loss = 0.764658 (* 1 = 0.764658 loss)\nI0822 22:55:28.369359 32487 solver.cpp:228] Iteration 62800, loss = 0.000323918\nI0822 22:55:28.369405 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:55:28.369424 32487 solver.cpp:244]     Train net output #1: loss = 0.000323642 (* 1 = 0.000323642 loss)\nI0822 22:55:28.440985 32487 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0822 22:57:45.707986 32487 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0822 22:59:08.087486 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80384\nI0822 22:59:08.087836 32487 solver.cpp:404]     Test net output #1: loss = 0.764183 (* 1 = 0.764183 loss)\nI0822 22:59:09.413744 32487 solver.cpp:228] Iteration 62900, loss = 0.000346639\nI0822 22:59:09.413789 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:59:09.413805 32487 solver.cpp:244]     Train net output #1: loss = 0.000346362 (* 1 = 0.000346362 loss)\nI0822 22:59:09.489279 32487 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0822 23:01:26.630967 32487 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0822 23:02:49.002252 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80416\nI0822 23:02:49.002635 32487 solver.cpp:404]     Test net output #1: loss = 0.764898 (* 1 = 0.764898 loss)\nI0822 23:02:50.328371 32487 solver.cpp:228] Iteration 63000, loss = 0.00040133\nI0822 23:02:50.328419 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:02:50.328441 32487 solver.cpp:244]     Train net output #1: loss = 0.000401053 (* 1 = 0.000401053 loss)\nI0822 23:02:50.408536 32487 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0822 23:05:07.584493 32487 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0822 23:06:29.960444 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80416\nI0822 23:06:29.960832 32487 solver.cpp:404]     Test net output #1: loss = 0.76245 (* 1 = 0.76245 loss)\nI0822 23:06:31.286537 32487 solver.cpp:228] Iteration 63100, loss = 0.000324393\nI0822 23:06:31.286579 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:06:31.286595 32487 solver.cpp:244]     Train net output #1: loss = 0.000324117 (* 1 = 0.000324117 loss)\nI0822 23:06:31.362474 32487 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0822 23:08:48.587332 32487 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0822 23:10:10.964431 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80416\nI0822 23:10:10.964799 32487 solver.cpp:404]     Test net output #1: loss = 0.764636 (* 1 = 0.764636 loss)\nI0822 23:10:12.290801 32487 solver.cpp:228] Iteration 63200, loss = 0.000342205\nI0822 23:10:12.290843 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:10:12.290858 32487 solver.cpp:244]     Train net output #1: loss = 0.000341928 (* 1 = 0.000341928 loss)\nI0822 23:10:12.371402 32487 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0822 23:12:29.547008 32487 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0822 23:13:51.937451 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80344\nI0822 23:13:51.938019 32487 solver.cpp:404]     Test net output #1: loss = 0.764326 (* 1 = 0.764326 loss)\nI0822 23:13:53.263425 32487 solver.cpp:228] Iteration 63300, loss = 0.000362068\nI0822 23:13:53.263468 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:13:53.263484 32487 solver.cpp:244]     Train net output #1: loss = 0.000361791 (* 1 = 0.000361791 loss)\nI0822 23:13:53.343443 32487 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0822 23:16:10.440013 32487 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0822 23:17:32.819098 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80416\nI0822 23:17:32.819490 32487 solver.cpp:404]     Test net output #1: loss = 0.765441 (* 1 = 0.765441 loss)\nI0822 23:17:34.144582 32487 solver.cpp:228] Iteration 63400, loss = 0.0003045\nI0822 23:17:34.144624 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:17:34.144640 32487 solver.cpp:244]     Train net output #1: loss = 0.000304224 (* 1 = 0.000304224 loss)\nI0822 23:17:34.228175 32487 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0822 23:19:51.422375 32487 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0822 23:21:13.809171 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80372\nI0822 23:21:13.809517 32487 solver.cpp:404]     Test net output #1: loss = 0.762954 (* 1 = 0.762954 loss)\nI0822 23:21:15.135139 32487 solver.cpp:228] Iteration 63500, loss = 0.00041462\nI0822 23:21:15.135190 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:21:15.135207 32487 solver.cpp:244]     Train net output #1: loss = 0.000414344 (* 1 = 0.000414344 loss)\nI0822 23:21:15.212805 32487 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0822 23:23:32.537628 32487 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0822 23:24:54.919554 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80452\nI0822 23:24:54.919910 32487 solver.cpp:404]     Test net output #1: loss = 0.76375 (* 1 = 0.76375 loss)\nI0822 23:24:56.245278 32487 solver.cpp:228] Iteration 63600, loss = 0.000353895\nI0822 23:24:56.245328 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:24:56.245347 32487 solver.cpp:244]     Train net output #1: loss = 0.000353619 (* 1 = 0.000353619 loss)\nI0822 23:24:56.320173 32487 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0822 23:27:13.484441 32487 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0822 23:28:35.857066 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80392\nI0822 23:28:35.857451 32487 solver.cpp:404]     Test net output #1: loss = 0.761718 (* 1 = 0.761718 loss)\nI0822 23:28:37.183205 32487 solver.cpp:228] Iteration 63700, loss = 0.000306036\nI0822 23:28:37.183245 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:28:37.183261 32487 solver.cpp:244]     Train net output #1: loss = 0.000305759 (* 1 = 0.000305759 loss)\nI0822 23:28:37.256150 32487 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0822 23:30:54.374411 32487 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0822 23:32:16.750934 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0822 23:32:16.751291 32487 solver.cpp:404]     Test net output #1: loss = 0.76227 (* 1 = 0.76227 loss)\nI0822 23:32:18.076457 32487 solver.cpp:228] Iteration 63800, loss = 0.000392668\nI0822 23:32:18.076509 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:32:18.076526 32487 solver.cpp:244]     Train net output #1: loss = 0.000392392 (* 1 = 0.000392392 loss)\nI0822 23:32:18.151033 32487 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0822 23:34:35.248440 32487 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0822 23:35:57.611861 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80444\nI0822 23:35:57.612223 32487 solver.cpp:404]     Test net output #1: loss = 0.76025 (* 1 = 0.76025 loss)\nI0822 23:35:58.937834 32487 solver.cpp:228] Iteration 63900, loss = 0.000351889\nI0822 23:35:58.937875 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:35:58.937891 32487 solver.cpp:244]     Train net output #1: loss = 0.000351613 (* 1 = 0.000351613 loss)\nI0822 23:35:59.016177 32487 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0822 23:38:16.239590 32487 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0822 23:39:38.601588 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80428\nI0822 23:39:38.601964 32487 solver.cpp:404]     Test net output #1: loss = 0.761386 (* 1 = 0.761386 loss)\nI0822 23:39:39.927078 32487 solver.cpp:228] Iteration 64000, loss = 0.000289825\nI0822 23:39:39.927124 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:39:39.927140 32487 solver.cpp:244]     Train net output #1: loss = 0.000289549 (* 1 = 0.000289549 loss)\nI0822 23:39:40.002602 32487 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0822 23:41:57.332078 32487 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0822 23:43:19.690800 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80348\nI0822 23:43:19.691149 32487 solver.cpp:404]     Test net output #1: loss = 0.761114 (* 1 = 0.761114 loss)\nI0822 23:43:21.017180 32487 solver.cpp:228] Iteration 64100, loss = 0.000351355\nI0822 23:43:21.017223 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:43:21.017240 32487 solver.cpp:244]     Train net output #1: loss = 0.000351079 (* 1 = 0.000351079 loss)\nI0822 23:43:21.089041 32487 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0822 23:45:38.333068 32487 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0822 23:47:00.773681 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80408\nI0822 23:47:00.774031 32487 solver.cpp:404]     Test net output #1: loss = 0.76218 (* 1 = 0.76218 loss)\nI0822 23:47:02.100189 32487 solver.cpp:228] Iteration 64200, loss = 0.000330159\nI0822 23:47:02.100245 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:47:02.100270 32487 solver.cpp:244]     Train net output #1: loss = 0.000329882 (* 1 = 0.000329882 loss)\nI0822 23:47:02.173640 32487 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0822 23:49:19.373142 32487 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0822 23:50:41.802585 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80384\nI0822 23:50:41.802934 32487 solver.cpp:404]     Test net output #1: loss = 0.759103 (* 1 = 0.759103 loss)\nI0822 23:50:43.129918 32487 solver.cpp:228] Iteration 64300, loss = 0.000264663\nI0822 23:50:43.129961 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:50:43.129977 32487 solver.cpp:244]     Train net output #1: loss = 0.000264387 (* 1 = 0.000264387 loss)\nI0822 23:50:43.209852 32487 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0822 23:53:00.452271 32487 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0822 23:54:22.840802 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80476\nI0822 23:54:22.841173 32487 solver.cpp:404]     Test net output #1: loss = 0.758935 (* 1 = 0.758935 loss)\nI0822 23:54:24.166676 32487 solver.cpp:228] Iteration 64400, loss = 0.000349877\nI0822 23:54:24.166716 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:54:24.166733 32487 solver.cpp:244]     Train net output #1: loss = 0.000349601 (* 1 = 0.000349601 loss)\nI0822 23:54:24.240149 32487 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0822 23:56:41.514436 32487 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0822 23:58:03.899685 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80448\nI0822 23:58:03.900055 32487 solver.cpp:404]     Test net output #1: loss = 0.758382 (* 1 = 0.758382 loss)\nI0822 23:58:05.225673 32487 solver.cpp:228] Iteration 64500, loss = 0.00038485\nI0822 23:58:05.225719 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:58:05.225735 32487 solver.cpp:244]     Train net output #1: loss = 0.000384573 (* 1 = 0.000384573 loss)\nI0822 23:58:05.299024 32487 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0823 00:00:22.439069 32487 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0823 00:01:44.829669 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80492\nI0823 00:01:44.830036 32487 solver.cpp:404]     Test net output #1: loss = 0.758644 (* 1 = 0.758644 loss)\nI0823 00:01:46.155495 32487 solver.cpp:228] Iteration 64600, loss = 0.000265544\nI0823 00:01:46.155544 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:01:46.155560 32487 solver.cpp:244]     Train net output #1: loss = 0.000265268 (* 1 = 0.000265268 loss)\nI0823 00:01:46.227967 32487 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0823 00:04:03.320097 32487 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0823 00:05:25.711411 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80408\nI0823 00:05:25.711891 32487 solver.cpp:404]     Test net output #1: loss = 0.758122 (* 1 = 0.758122 loss)\nI0823 00:05:27.037492 32487 solver.cpp:228] Iteration 64700, loss = 0.000330139\nI0823 00:05:27.037535 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:05:27.037551 32487 solver.cpp:244]     Train net output #1: loss = 0.000329863 (* 1 = 0.000329863 loss)\nI0823 00:05:27.111999 32487 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0823 00:07:44.305300 32487 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0823 00:09:06.692590 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0823 00:09:06.692950 32487 solver.cpp:404]     Test net output #1: loss = 0.760117 (* 1 = 0.760117 loss)\nI0823 00:09:08.018934 32487 solver.cpp:228] Iteration 64800, loss = 0.00040041\nI0823 00:09:08.018970 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:09:08.018985 32487 solver.cpp:244]     Train net output #1: loss = 0.000400134 (* 1 = 0.000400134 loss)\nI0823 00:09:08.099735 32487 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0823 00:11:25.198570 32487 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0823 00:12:47.581487 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80352\nI0823 00:12:47.581845 32487 solver.cpp:404]     Test net output #1: loss = 0.7588 (* 1 = 0.7588 loss)\nI0823 00:12:48.907454 32487 solver.cpp:228] Iteration 64900, loss = 0.000277622\nI0823 00:12:48.907507 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:12:48.907524 32487 solver.cpp:244]     Train net output #1: loss = 0.000277345 (* 1 = 0.000277345 loss)\nI0823 00:12:48.992149 32487 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0823 00:15:06.139425 32487 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0823 00:16:28.515302 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80452\nI0823 00:16:28.515668 32487 solver.cpp:404]     Test net output #1: loss = 0.759832 (* 1 = 0.759832 loss)\nI0823 00:16:29.840286 32487 solver.cpp:228] Iteration 65000, loss = 0.000362476\nI0823 00:16:29.840338 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:16:29.840354 32487 solver.cpp:244]     Train net output #1: loss = 0.000362199 (* 1 = 0.000362199 loss)\nI0823 00:16:29.922307 32487 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0823 00:18:47.061905 32487 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0823 00:20:09.437782 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80384\nI0823 00:20:09.438127 32487 solver.cpp:404]     Test net output #1: loss = 0.758416 (* 1 = 0.758416 loss)\nI0823 00:20:10.763124 32487 solver.cpp:228] Iteration 65100, loss = 0.000329126\nI0823 00:20:10.763176 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:20:10.763193 32487 solver.cpp:244]     Train net output #1: loss = 0.00032885 (* 1 = 0.00032885 loss)\nI0823 00:20:10.844524 32487 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0823 00:22:27.985697 32487 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0823 00:23:50.367446 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8052\nI0823 00:23:50.367846 32487 solver.cpp:404]     Test net output #1: loss = 0.758553 (* 1 = 0.758553 loss)\nI0823 00:23:51.693109 32487 solver.cpp:228] Iteration 65200, loss = 0.000320715\nI0823 00:23:51.693159 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:23:51.693176 32487 solver.cpp:244]     Train net output #1: loss = 0.000320439 (* 1 = 0.000320439 loss)\nI0823 00:23:51.777137 32487 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0823 00:26:08.947824 32487 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0823 00:27:31.314450 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80348\nI0823 00:27:31.314837 32487 solver.cpp:404]     Test net output #1: loss = 0.757628 (* 1 = 0.757628 loss)\nI0823 00:27:32.639816 32487 solver.cpp:228] Iteration 65300, loss = 0.000337975\nI0823 00:27:32.639855 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:27:32.639871 32487 solver.cpp:244]     Train net output #1: loss = 0.000337698 (* 1 = 0.000337698 loss)\nI0823 00:27:32.723670 32487 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0823 00:29:50.419158 32487 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0823 00:31:11.970199 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80508\nI0823 00:31:11.970532 32487 solver.cpp:404]     Test net output #1: loss = 0.758666 (* 1 = 0.758666 loss)\nI0823 00:31:13.294333 32487 solver.cpp:228] Iteration 65400, loss = 0.000363625\nI0823 00:31:13.294373 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:31:13.294395 32487 solver.cpp:244]     Train net output #1: loss = 0.000363349 (* 1 = 0.000363349 loss)\nI0823 00:31:13.379601 32487 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0823 00:33:31.050832 32487 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0823 00:34:52.587010 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80476\nI0823 00:34:52.587291 32487 solver.cpp:404]     Test net output #1: loss = 0.756017 (* 1 = 0.756017 loss)\nI0823 00:34:53.910934 32487 solver.cpp:228] Iteration 65500, loss = 0.000290848\nI0823 00:34:53.910969 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:34:53.910984 32487 solver.cpp:244]     Train net output #1: loss = 0.000290571 (* 1 = 0.000290571 loss)\nI0823 00:34:53.993373 32487 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0823 00:37:11.633023 32487 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0823 00:38:33.163332 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80484\nI0823 00:38:33.163599 32487 solver.cpp:404]     Test net output #1: loss = 0.756868 (* 1 = 0.756868 loss)\nI0823 00:38:34.486879 32487 solver.cpp:228] Iteration 65600, loss = 0.000334712\nI0823 00:38:34.486914 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:38:34.486929 32487 solver.cpp:244]     Train net output #1: loss = 0.000334436 (* 1 = 0.000334436 loss)\nI0823 00:38:34.569936 32487 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0823 00:40:52.247356 32487 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0823 00:42:13.798051 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80404\nI0823 00:42:13.798354 32487 solver.cpp:404]     Test net output #1: loss = 0.756073 (* 1 = 0.756073 loss)\nI0823 00:42:15.122122 32487 solver.cpp:228] Iteration 65700, loss = 0.00030946\nI0823 00:42:15.122162 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:42:15.122179 32487 solver.cpp:244]     Train net output #1: loss = 0.000309184 (* 1 = 0.000309184 loss)\nI0823 00:42:15.215713 32487 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0823 00:44:32.871842 32487 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0823 00:45:54.370034 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80536\nI0823 00:45:54.370337 32487 solver.cpp:404]     Test net output #1: loss = 0.757228 (* 1 = 0.757228 loss)\nI0823 00:45:55.694211 32487 solver.cpp:228] Iteration 65800, loss = 0.000313933\nI0823 00:45:55.694247 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:45:55.694262 32487 solver.cpp:244]     Train net output #1: loss = 0.000313656 (* 1 = 0.000313656 loss)\nI0823 00:45:55.792328 32487 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0823 00:48:13.416872 32487 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0823 00:49:35.044817 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80376\nI0823 00:49:35.045152 32487 solver.cpp:404]     Test net output #1: loss = 0.757994 (* 1 = 0.757994 loss)\nI0823 00:49:36.372306 32487 solver.cpp:228] Iteration 65900, loss = 0.000360081\nI0823 00:49:36.372350 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:49:36.372370 32487 solver.cpp:244]     Train net output #1: loss = 0.000359805 (* 1 = 0.000359805 loss)\nI0823 00:49:36.454311 32487 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0823 00:51:54.010206 32487 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0823 00:53:15.571890 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8048\nI0823 00:53:15.572198 32487 solver.cpp:404]     Test net output #1: loss = 0.759297 (* 1 = 0.759297 loss)\nI0823 00:53:16.895845 32487 solver.cpp:228] Iteration 66000, loss = 0.00035742\nI0823 00:53:16.895889 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:53:16.895905 32487 solver.cpp:244]     Train net output #1: loss = 0.000357144 (* 1 = 0.000357144 loss)\nI0823 00:53:16.985366 32487 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0823 00:55:34.492815 32487 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0823 00:56:56.039414 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80388\nI0823 00:56:56.039708 32487 solver.cpp:404]     Test net output #1: loss = 0.756765 (* 1 = 0.756765 loss)\nI0823 00:56:57.362702 32487 solver.cpp:228] Iteration 66100, loss = 0.00031181\nI0823 00:56:57.362736 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:56:57.362752 32487 solver.cpp:244]     Train net output #1: loss = 0.000311534 (* 1 = 0.000311534 loss)\nI0823 00:56:57.449878 32487 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0823 00:59:15.013581 32487 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0823 01:00:36.554960 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80452\nI0823 01:00:36.555280 32487 solver.cpp:404]     Test net output #1: loss = 0.758896 (* 1 = 0.758896 loss)\nI0823 01:00:37.878419 32487 solver.cpp:228] Iteration 66200, loss = 0.000337849\nI0823 01:00:37.878460 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:00:37.878476 32487 solver.cpp:244]     Train net output #1: loss = 0.000337573 (* 1 = 0.000337573 loss)\nI0823 01:00:37.961052 32487 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0823 01:02:55.678123 32487 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0823 01:04:17.222461 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80416\nI0823 01:04:17.222789 32487 solver.cpp:404]     Test net output #1: loss = 0.757482 (* 1 = 0.757482 loss)\nI0823 01:04:18.545614 32487 solver.cpp:228] Iteration 66300, loss = 0.000350942\nI0823 01:04:18.545647 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:04:18.545662 32487 solver.cpp:244]     Train net output #1: loss = 0.000350666 (* 1 = 0.000350666 loss)\nI0823 01:04:18.636656 32487 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0823 01:06:36.115205 32487 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0823 01:07:57.561432 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80532\nI0823 01:07:57.561750 32487 solver.cpp:404]     Test net output #1: loss = 0.7582 (* 1 = 0.7582 loss)\nI0823 01:07:58.884405 32487 solver.cpp:228] Iteration 66400, loss = 0.000327305\nI0823 01:07:58.884447 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:07:58.884464 32487 solver.cpp:244]     Train net output #1: loss = 0.000327029 (* 1 = 0.000327029 loss)\nI0823 01:07:58.965672 32487 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0823 01:10:16.468637 32487 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0823 01:11:37.918958 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80416\nI0823 01:11:37.919246 32487 solver.cpp:404]     Test net output #1: loss = 0.758235 (* 1 = 0.758235 loss)\nI0823 01:11:39.241632 32487 solver.cpp:228] Iteration 66500, loss = 0.00033921\nI0823 01:11:39.241673 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:11:39.241688 32487 solver.cpp:244]     Train net output #1: loss = 0.000338933 (* 1 = 0.000338933 loss)\nI0823 01:11:39.332094 32487 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0823 01:13:56.953732 32487 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0823 01:15:18.412097 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80504\nI0823 01:15:18.412422 32487 solver.cpp:404]     Test net output #1: loss = 0.7586 (* 1 = 0.7586 loss)\nI0823 01:15:19.735076 32487 solver.cpp:228] Iteration 66600, loss = 0.000323831\nI0823 01:15:19.735119 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:15:19.735136 32487 solver.cpp:244]     Train net output #1: loss = 0.000323554 (* 1 = 0.000323554 loss)\nI0823 01:15:19.818532 32487 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0823 01:17:37.405679 32487 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0823 01:18:58.860170 32487 solver.cpp:404]     Test net output #0: accuracy = 0.804\nI0823 01:18:58.860472 32487 solver.cpp:404]     Test net output #1: loss = 0.75754 (* 1 = 0.75754 loss)\nI0823 01:19:00.182363 32487 solver.cpp:228] Iteration 66700, loss = 0.000321407\nI0823 01:19:00.182404 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:19:00.182418 32487 solver.cpp:244]     Train net output #1: loss = 0.000321131 (* 1 = 0.000321131 loss)\nI0823 01:19:00.268550 32487 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0823 01:21:17.822762 32487 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0823 01:22:39.273959 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80428\nI0823 01:22:39.274271 32487 solver.cpp:404]     Test net output #1: loss = 0.75855 (* 1 = 0.75855 loss)\nI0823 01:22:40.597106 32487 solver.cpp:228] Iteration 66800, loss = 0.000318584\nI0823 01:22:40.597148 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:22:40.597164 32487 solver.cpp:244]     Train net output #1: loss = 0.000318308 (* 1 = 0.000318308 loss)\nI0823 01:22:40.682370 32487 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0823 01:24:58.307144 32487 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0823 01:26:19.757170 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80372\nI0823 01:26:19.757490 32487 solver.cpp:404]     Test net output #1: loss = 0.75705 (* 1 = 0.75705 loss)\nI0823 01:26:21.079792 32487 solver.cpp:228] Iteration 66900, loss = 0.000336032\nI0823 01:26:21.079834 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:26:21.079849 32487 solver.cpp:244]     Train net output #1: loss = 0.000335755 (* 1 = 0.000335755 loss)\nI0823 01:26:21.160768 32487 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0823 01:28:38.734586 32487 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0823 01:30:00.189257 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80484\nI0823 01:30:00.189579 32487 solver.cpp:404]     Test net output #1: loss = 0.75749 (* 1 = 0.75749 loss)\nI0823 01:30:01.511996 32487 solver.cpp:228] Iteration 67000, loss = 0.000303484\nI0823 01:30:01.512029 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:30:01.512045 32487 solver.cpp:244]     Train net output #1: loss = 0.000303208 (* 1 = 0.000303208 loss)\nI0823 01:30:01.602978 32487 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0823 01:32:19.153568 32487 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0823 01:33:40.611011 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8042\nI0823 01:33:40.611321 32487 solver.cpp:404]     Test net output #1: loss = 0.756541 (* 1 = 0.756541 loss)\nI0823 01:33:41.934914 32487 solver.cpp:228] Iteration 67100, loss = 0.000352657\nI0823 01:33:41.934962 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:33:41.934985 32487 solver.cpp:244]     Train net output #1: loss = 0.000352381 (* 1 = 0.000352381 loss)\nI0823 01:33:42.025642 32487 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0823 01:35:59.578320 32487 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0823 01:37:21.039572 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8054\nI0823 01:37:21.039898 32487 solver.cpp:404]     Test net output #1: loss = 0.756018 (* 1 = 0.756018 loss)\nI0823 01:37:22.363512 32487 solver.cpp:228] Iteration 67200, loss = 0.00036775\nI0823 01:37:22.363548 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:37:22.363570 32487 solver.cpp:244]     Train net output #1: loss = 0.000367474 (* 1 = 0.000367474 loss)\nI0823 01:37:22.445017 32487 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0823 01:39:40.131395 32487 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0823 01:41:01.615659 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80476\nI0823 01:41:01.615988 32487 solver.cpp:404]     Test net output #1: loss = 0.753629 (* 1 = 0.753629 loss)\nI0823 01:41:02.939427 32487 solver.cpp:228] Iteration 67300, loss = 0.000272669\nI0823 01:41:02.939473 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:41:02.939504 32487 solver.cpp:244]     Train net output #1: loss = 0.000272393 (* 1 = 0.000272393 loss)\nI0823 01:41:03.020675 32487 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0823 01:43:20.633570 32487 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0823 01:44:42.183955 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8058\nI0823 01:44:42.184269 32487 solver.cpp:404]     Test net output #1: loss = 0.754602 (* 1 = 0.754602 loss)\nI0823 01:44:43.507830 32487 solver.cpp:228] Iteration 67400, loss = 0.000303193\nI0823 01:44:43.507875 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:44:43.507899 32487 solver.cpp:244]     Train net output #1: loss = 0.000302917 (* 1 = 0.000302917 loss)\nI0823 01:44:43.591100 32487 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0823 01:47:01.112035 32487 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0823 01:48:22.664389 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8044\nI0823 01:48:22.664705 32487 solver.cpp:404]     Test net output #1: loss = 0.755003 (* 1 = 0.755003 loss)\nI0823 01:48:23.988065 32487 solver.cpp:228] Iteration 67500, loss = 0.00029577\nI0823 01:48:23.988113 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:48:23.988137 32487 solver.cpp:244]     Train net output #1: loss = 0.000295494 (* 1 = 0.000295494 loss)\nI0823 01:48:24.079048 32487 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0823 01:50:41.588551 32487 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0823 01:52:03.140321 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80548\nI0823 01:52:03.140647 32487 solver.cpp:404]     Test net output #1: loss = 0.75699 (* 1 = 0.75699 loss)\nI0823 01:52:04.463865 32487 solver.cpp:228] Iteration 67600, loss = 0.000291888\nI0823 01:52:04.463907 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:52:04.463923 32487 solver.cpp:244]     Train net output #1: loss = 0.000291611 (* 1 = 0.000291611 loss)\nI0823 01:52:04.553949 32487 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0823 01:54:22.103487 32487 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0823 01:55:43.640817 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80464\nI0823 01:55:43.641111 32487 solver.cpp:404]     Test net output #1: loss = 0.755144 (* 1 = 0.755144 loss)\nI0823 01:55:44.964077 32487 solver.cpp:228] Iteration 67700, loss = 0.000339601\nI0823 01:55:44.964123 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:55:44.964139 32487 solver.cpp:244]     Train net output #1: loss = 0.000339325 (* 1 = 0.000339325 loss)\nI0823 01:55:45.048224 32487 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0823 01:58:02.673071 32487 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0823 01:59:24.211884 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80524\nI0823 01:59:24.212177 32487 solver.cpp:404]     Test net output #1: loss = 0.75796 (* 1 = 0.75796 loss)\nI0823 01:59:25.534544 32487 solver.cpp:228] Iteration 67800, loss = 0.000346703\nI0823 01:59:25.534588 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:59:25.534605 32487 solver.cpp:244]     Train net output #1: loss = 0.000346427 (* 1 = 0.000346427 loss)\nI0823 01:59:25.620611 32487 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0823 02:01:43.295382 32487 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0823 02:03:04.834026 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8048\nI0823 02:03:04.834360 32487 solver.cpp:404]     Test net output #1: loss = 0.754892 (* 1 = 0.754892 loss)\nI0823 02:03:06.156669 32487 solver.cpp:228] Iteration 67900, loss = 0.000304367\nI0823 02:03:06.156715 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:03:06.156738 32487 solver.cpp:244]     Train net output #1: loss = 0.00030409 (* 1 = 0.00030409 loss)\nI0823 02:03:06.240221 32487 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0823 02:05:23.848246 32487 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0823 02:06:45.380686 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80564\nI0823 02:06:45.380957 32487 solver.cpp:404]     Test net output #1: loss = 0.756716 (* 1 = 0.756716 loss)\nI0823 02:06:46.703352 32487 solver.cpp:228] Iteration 68000, loss = 0.000332118\nI0823 02:06:46.703398 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:06:46.703414 32487 solver.cpp:244]     Train net output #1: loss = 0.000331842 (* 1 = 0.000331842 loss)\nI0823 02:06:46.786792 32487 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0823 02:09:04.367286 32487 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0823 02:10:25.908326 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8048\nI0823 02:10:25.908640 32487 solver.cpp:404]     Test net output #1: loss = 0.755368 (* 1 = 0.755368 loss)\nI0823 02:10:27.231025 32487 solver.cpp:228] Iteration 68100, loss = 0.000357974\nI0823 02:10:27.231060 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:10:27.231076 32487 solver.cpp:244]     Train net output #1: loss = 0.000357697 (* 1 = 0.000357697 loss)\nI0823 02:10:27.319208 32487 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0823 02:12:45.033985 32487 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0823 02:14:06.567025 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80568\nI0823 02:14:06.567353 32487 solver.cpp:404]     Test net output #1: loss = 0.756672 (* 1 = 0.756672 loss)\nI0823 02:14:07.889643 32487 solver.cpp:228] Iteration 68200, loss = 0.000332563\nI0823 02:14:07.889688 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:14:07.889703 32487 solver.cpp:244]     Train net output #1: loss = 0.000332286 (* 1 = 0.000332286 loss)\nI0823 02:14:07.972434 32487 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0823 02:16:25.551440 32487 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0823 02:17:47.081280 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80492\nI0823 02:17:47.081578 32487 solver.cpp:404]     Test net output #1: loss = 0.757526 (* 1 = 0.757526 loss)\nI0823 02:17:48.403812 32487 solver.cpp:228] Iteration 68300, loss = 0.000334402\nI0823 02:17:48.403846 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:17:48.403861 32487 solver.cpp:244]     Train net output #1: loss = 0.000334126 (* 1 = 0.000334126 loss)\nI0823 02:17:48.492386 32487 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0823 02:20:06.041625 32487 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0823 02:21:27.561821 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80576\nI0823 02:21:27.562120 32487 solver.cpp:404]     Test net output #1: loss = 0.760028 (* 1 = 0.760028 loss)\nI0823 02:21:28.884374 32487 solver.cpp:228] Iteration 68400, loss = 0.000338013\nI0823 02:21:28.884419 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:21:28.884435 32487 solver.cpp:244]     Train net output #1: loss = 0.000337736 (* 1 = 0.000337736 loss)\nI0823 02:21:28.970831 32487 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0823 02:23:46.576917 32487 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0823 02:25:08.102067 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8048\nI0823 02:25:08.102370 32487 solver.cpp:404]     Test net output #1: loss = 0.757084 (* 1 = 0.757084 loss)\nI0823 02:25:09.425092 32487 solver.cpp:228] Iteration 68500, loss = 0.000288755\nI0823 02:25:09.425127 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:25:09.425143 32487 solver.cpp:244]     Train net output #1: loss = 0.000288479 (* 1 = 0.000288479 loss)\nI0823 02:25:09.514730 32487 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0823 02:27:27.039871 32487 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0823 02:28:48.573793 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80588\nI0823 02:28:48.574089 32487 solver.cpp:404]     Test net output #1: loss = 0.758402 (* 1 = 0.758402 loss)\nI0823 02:28:49.897569 32487 solver.cpp:228] Iteration 68600, loss = 0.000329877\nI0823 02:28:49.897615 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:28:49.897631 32487 solver.cpp:244]     Train net output #1: loss = 0.0003296 (* 1 = 0.0003296 loss)\nI0823 02:28:49.986423 32487 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0823 02:31:07.585810 32487 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0823 02:32:29.116021 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80488\nI0823 02:32:29.116349 32487 solver.cpp:404]     Test net output #1: loss = 0.756273 (* 1 = 0.756273 loss)\nI0823 02:32:30.439600 32487 solver.cpp:228] Iteration 68700, loss = 0.000332611\nI0823 02:32:30.439635 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:32:30.439651 32487 solver.cpp:244]     Train net output #1: loss = 0.000332335 (* 1 = 0.000332335 loss)\nI0823 02:32:30.520735 32487 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0823 02:34:48.029956 32487 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0823 02:36:09.562527 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80604\nI0823 02:36:09.562836 32487 solver.cpp:404]     Test net output #1: loss = 0.755712 (* 1 = 0.755712 loss)\nI0823 02:36:10.885895 32487 solver.cpp:228] Iteration 68800, loss = 0.000308718\nI0823 02:36:10.885941 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:36:10.885957 32487 solver.cpp:244]     Train net output #1: loss = 0.000308441 (* 1 = 0.000308441 loss)\nI0823 02:36:10.981854 32487 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0823 02:38:28.574370 32487 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0823 02:39:50.109067 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80496\nI0823 02:39:50.109375 32487 solver.cpp:404]     Test net output #1: loss = 0.756672 (* 1 = 0.756672 loss)\nI0823 02:39:51.432365 32487 solver.cpp:228] Iteration 68900, loss = 0.000338209\nI0823 02:39:51.432400 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:39:51.432416 32487 solver.cpp:244]     Train net output #1: loss = 0.000337932 (* 1 = 0.000337932 loss)\nI0823 02:39:51.511973 32487 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0823 02:42:09.113704 32487 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0823 02:43:30.656186 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80504\nI0823 02:43:30.656514 32487 solver.cpp:404]     Test net output #1: loss = 0.759042 (* 1 = 0.759042 loss)\nI0823 02:43:31.979877 32487 solver.cpp:228] Iteration 69000, loss = 0.000304333\nI0823 02:43:31.979923 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:43:31.979939 32487 solver.cpp:244]     Train net output #1: loss = 0.000304057 (* 1 = 0.000304057 loss)\nI0823 02:43:32.066114 32487 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0823 02:45:49.679869 32487 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0823 02:47:11.215255 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80476\nI0823 02:47:11.215550 32487 solver.cpp:404]     Test net output #1: loss = 0.755422 (* 1 = 0.755422 loss)\nI0823 02:47:12.539228 32487 solver.cpp:228] Iteration 69100, loss = 0.000340892\nI0823 02:47:12.539273 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:47:12.539290 32487 solver.cpp:244]     Train net output #1: loss = 0.000340615 (* 1 = 0.000340615 loss)\nI0823 02:47:12.622534 32487 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0823 02:49:30.164031 32487 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0823 02:50:51.701782 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8062\nI0823 02:50:51.702100 32487 solver.cpp:404]     Test net output #1: loss = 0.757625 (* 1 = 0.757625 loss)\nI0823 02:50:53.024493 32487 solver.cpp:228] Iteration 69200, loss = 0.000328578\nI0823 02:50:53.024528 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:50:53.024543 32487 solver.cpp:244]     Train net output #1: loss = 0.000328301 (* 1 = 0.000328301 loss)\nI0823 02:50:53.111902 32487 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0823 02:53:10.637790 32487 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0823 02:54:32.167168 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80524\nI0823 02:54:32.167457 32487 solver.cpp:404]     Test net output #1: loss = 0.757329 (* 1 = 0.757329 loss)\nI0823 02:54:33.489914 32487 solver.cpp:228] Iteration 69300, loss = 0.000293392\nI0823 02:54:33.489960 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:54:33.489975 32487 solver.cpp:244]     Train net output #1: loss = 0.000293115 (* 1 = 0.000293115 loss)\nI0823 02:54:33.577077 32487 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0823 02:56:51.161934 32487 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0823 02:58:12.692570 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80636\nI0823 02:58:12.692898 32487 solver.cpp:404]     Test net output #1: loss = 0.757262 (* 1 = 0.757262 loss)\nI0823 02:58:14.016367 32487 solver.cpp:228] Iteration 69400, loss = 0.000303435\nI0823 02:58:14.016413 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:58:14.016429 32487 solver.cpp:244]     Train net output #1: loss = 0.000303158 (* 1 = 0.000303158 loss)\nI0823 02:58:14.101207 32487 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0823 03:00:31.698545 32487 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0823 03:01:53.236999 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80444\nI0823 03:01:53.237303 32487 solver.cpp:404]     Test net output #1: loss = 0.757785 (* 1 = 0.757785 loss)\nI0823 03:01:54.560004 32487 solver.cpp:228] Iteration 69500, loss = 0.000302056\nI0823 03:01:54.560050 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:01:54.560065 32487 solver.cpp:244]     Train net output #1: loss = 0.00030178 (* 1 = 0.00030178 loss)\nI0823 03:01:54.642494 32487 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0823 03:04:12.214040 32487 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0823 03:05:33.755867 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8058\nI0823 03:05:33.756203 32487 solver.cpp:404]     Test net output #1: loss = 0.758625 (* 1 = 0.758625 loss)\nI0823 03:05:35.074988 32487 solver.cpp:228] Iteration 69600, loss = 0.000325886\nI0823 03:05:35.075039 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:05:35.075064 32487 solver.cpp:244]     Train net output #1: loss = 0.00032561 (* 1 = 0.00032561 loss)\nI0823 03:05:35.168328 32487 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0823 03:07:52.815805 32487 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0823 03:09:14.357800 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80488\nI0823 03:09:14.358093 32487 solver.cpp:404]     Test net output #1: loss = 0.75671 (* 1 = 0.75671 loss)\nI0823 03:09:15.681609 32487 solver.cpp:228] Iteration 69700, loss = 0.000275375\nI0823 03:09:15.681660 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:09:15.681685 32487 solver.cpp:244]     Train net output #1: loss = 0.000275098 (* 1 = 0.000275098 loss)\nI0823 03:09:15.769706 32487 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0823 03:11:33.495832 32487 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0823 03:12:55.040627 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80608\nI0823 03:12:55.040973 32487 solver.cpp:404]     Test net output #1: loss = 0.759061 (* 1 = 0.759061 loss)\nI0823 03:12:56.364699 32487 solver.cpp:228] Iteration 69800, loss = 0.000311279\nI0823 03:12:56.364735 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:12:56.364750 32487 solver.cpp:244]     Train net output #1: loss = 0.000311003 (* 1 = 0.000311003 loss)\nI0823 03:12:56.444192 32487 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0823 03:15:14.008164 32487 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0823 03:16:35.552639 32487 solver.cpp:404]     Test net output #0: accuracy = 0.80504\nI0823 03:16:35.552960 32487 solver.cpp:404]     Test net output #1: loss = 0.75685 (* 1 = 0.75685 loss)\nI0823 03:16:36.875278 32487 solver.cpp:228] Iteration 69900, loss = 0.000314323\nI0823 03:16:36.875329 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:16:36.875345 32487 solver.cpp:244]     Train net output #1: loss = 0.000314046 (* 1 = 0.000314046 loss)\nI0823 03:16:36.960420 32487 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0823 03:18:54.537009 32487 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0823 03:20:16.068188 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8064\nI0823 03:20:16.068538 32487 solver.cpp:404]     Test net output #1: loss = 0.757662 (* 1 = 0.757662 loss)\nI0823 03:20:17.390549 32487 solver.cpp:228] Iteration 70000, loss = 0.000283916\nI0823 03:20:17.390594 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:20:17.390609 32487 solver.cpp:244]     Train net output #1: loss = 0.000283639 (* 1 = 0.000283639 loss)\nI0823 03:20:17.480918 32487 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0823 03:20:17.480938 32487 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0823 03:22:35.136015 32487 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0823 03:23:56.616551 32487 solver.cpp:404]     Test net output #0: accuracy = 0.81464\nI0823 03:23:56.616899 32487 solver.cpp:404]     Test net output #1: loss = 0.720122 (* 1 = 0.720122 loss)\nI0823 03:23:57.940256 32487 solver.cpp:228] Iteration 70100, loss = 0.000335803\nI0823 03:23:57.940304 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:23:57.940318 32487 solver.cpp:244]     Train net output #1: loss = 0.000335527 (* 1 = 0.000335527 loss)\nI0823 03:23:58.030921 32487 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0823 03:26:15.627022 32487 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0823 03:27:37.052124 32487 solver.cpp:404]     Test net output #0: accuracy = 0.82568\nI0823 03:27:37.052469 32487 solver.cpp:404]     Test net output #1: loss = 0.690351 (* 1 = 0.690351 loss)\nI0823 03:27:38.375171 32487 solver.cpp:228] Iteration 70200, loss = 0.000306574\nI0823 03:27:38.375216 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:27:38.375232 32487 solver.cpp:244]     Train net output #1: loss = 0.000306298 (* 1 = 0.000306298 loss)\nI0823 03:27:38.464723 32487 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0823 03:29:56.029651 32487 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0823 03:31:17.462069 32487 solver.cpp:404]     Test net output #0: accuracy = 0.83236\nI0823 03:31:17.462415 32487 solver.cpp:404]     Test net output #1: loss = 0.660556 (* 1 = 0.660556 loss)\nI0823 03:31:18.785830 32487 solver.cpp:228] Iteration 70300, loss = 0.000303079\nI0823 03:31:18.785873 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:31:18.785889 32487 solver.cpp:244]     Train net output #1: loss = 0.000302802 (* 1 = 0.000302802 loss)\nI0823 03:31:18.873667 32487 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0823 03:33:36.399293 32487 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0823 03:34:57.828191 32487 solver.cpp:404]     Test net output #0: accuracy = 0.84032\nI0823 03:34:57.828553 32487 solver.cpp:404]     Test net output #1: loss = 0.643077 (* 1 = 0.643077 loss)\nI0823 03:34:59.150916 32487 solver.cpp:228] Iteration 70400, loss = 0.000324021\nI0823 03:34:59.150961 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:34:59.150977 32487 solver.cpp:244]     Train net output #1: loss = 0.000323745 (* 1 = 0.000323745 loss)\nI0823 03:34:59.232161 32487 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0823 03:37:16.821961 32487 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0823 03:38:38.246536 32487 solver.cpp:404]     Test net output #0: accuracy = 0.84484\nI0823 03:38:38.246894 32487 solver.cpp:404]     Test net output #1: loss = 0.622047 (* 1 = 0.622047 loss)\nI0823 03:38:39.570313 32487 solver.cpp:228] Iteration 70500, loss = 0.000318357\nI0823 03:38:39.570363 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:38:39.570386 32487 solver.cpp:244]     Train net output #1: loss = 0.000318081 (* 1 = 0.000318081 loss)\nI0823 03:38:39.654090 32487 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0823 03:40:57.192821 32487 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0823 03:42:18.706900 32487 solver.cpp:404]     Test net output #0: accuracy = 0.84888\nI0823 03:42:18.707242 32487 solver.cpp:404]     Test net output #1: loss = 0.612663 (* 1 = 0.612663 loss)\nI0823 03:42:20.030263 32487 solver.cpp:228] Iteration 70600, loss = 0.000338203\nI0823 03:42:20.030311 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:42:20.030335 32487 solver.cpp:244]     Train net output #1: loss = 0.000337926 (* 1 = 0.000337926 loss)\nI0823 03:42:20.119742 32487 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0823 03:44:37.638499 32487 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0823 03:45:59.115110 32487 solver.cpp:404]     Test net output #0: accuracy = 0.84992\nI0823 03:45:59.115458 32487 solver.cpp:404]     Test net output #1: loss = 0.596988 (* 1 = 0.596988 loss)\nI0823 03:46:00.438591 32487 solver.cpp:228] Iteration 70700, loss = 0.000320656\nI0823 03:46:00.438638 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:46:00.438663 32487 solver.cpp:244]     Train net output #1: loss = 0.00032038 (* 1 = 0.00032038 loss)\nI0823 03:46:00.528373 32487 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0823 03:48:18.099073 32487 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0823 03:49:39.572319 32487 solver.cpp:404]     Test net output #0: accuracy = 0.85208\nI0823 03:49:39.572648 32487 solver.cpp:404]     Test net output #1: loss = 0.593687 (* 1 = 0.593687 loss)\nI0823 03:49:40.896323 32487 solver.cpp:228] Iteration 70800, loss = 0.000321545\nI0823 03:49:40.896373 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:49:40.896395 32487 solver.cpp:244]     Train net output #1: loss = 0.000321269 (* 1 = 0.000321269 loss)\nI0823 03:49:40.984067 32487 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0823 03:51:58.552426 32487 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0823 03:53:20.007556 32487 solver.cpp:404]     Test net output #0: accuracy = 0.85472\nI0823 03:53:20.007870 32487 solver.cpp:404]     Test net output #1: loss = 0.581213 (* 1 = 0.581213 loss)\nI0823 03:53:21.331202 32487 solver.cpp:228] Iteration 70900, loss = 0.000325018\nI0823 03:53:21.331250 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:53:21.331275 32487 solver.cpp:244]     Train net output #1: loss = 0.000324741 (* 1 = 0.000324741 loss)\nI0823 03:53:21.418968 32487 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0823 03:55:39.085103 32487 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0823 03:57:00.526523 32487 solver.cpp:404]     Test net output #0: accuracy = 0.855\nI0823 03:57:00.526820 32487 solver.cpp:404]     Test net output #1: loss = 0.581573 (* 1 = 0.581573 loss)\nI0823 03:57:01.849086 32487 solver.cpp:228] Iteration 71000, loss = 0.000304432\nI0823 03:57:01.849129 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:57:01.849153 32487 solver.cpp:244]     Train net output #1: loss = 0.000304155 (* 1 = 0.000304155 loss)\nI0823 03:57:01.947769 32487 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0823 03:59:19.566473 32487 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0823 04:00:41.105851 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8566\nI0823 04:00:41.106164 32487 solver.cpp:404]     Test net output #1: loss = 0.57113 (* 1 = 0.57113 loss)\nI0823 04:00:42.428768 32487 solver.cpp:228] Iteration 71100, loss = 0.000290996\nI0823 04:00:42.428814 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:00:42.428838 32487 solver.cpp:244]     Train net output #1: loss = 0.000290719 (* 1 = 0.000290719 loss)\nI0823 04:00:42.514478 32487 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0823 04:03:00.114313 32487 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0823 04:04:21.651744 32487 solver.cpp:404]     Test net output #0: accuracy = 0.85632\nI0823 04:04:21.652076 32487 solver.cpp:404]     Test net output #1: loss = 0.573854 (* 1 = 0.573854 loss)\nI0823 04:04:22.974918 32487 solver.cpp:228] Iteration 71200, loss = 0.000305133\nI0823 04:04:22.974951 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:04:22.974967 32487 solver.cpp:244]     Train net output #1: loss = 0.000304856 (* 1 = 0.000304856 loss)\nI0823 04:04:23.057005 32487 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0823 04:06:40.654085 32487 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0823 04:08:02.196293 32487 solver.cpp:404]     Test net output #0: accuracy = 0.85852\nI0823 04:08:02.196609 32487 solver.cpp:404]     Test net output #1: loss = 0.56434 (* 1 = 0.56434 loss)\nI0823 04:08:03.518836 32487 solver.cpp:228] Iteration 71300, loss = 0.000310696\nI0823 04:08:03.518882 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:08:03.518898 32487 solver.cpp:244]     Train net output #1: loss = 0.00031042 (* 1 = 0.00031042 loss)\nI0823 04:08:03.604145 32487 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0823 04:10:21.173846 32487 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0823 04:11:42.720731 32487 solver.cpp:404]     Test net output #0: accuracy = 0.85808\nI0823 04:11:42.721053 32487 solver.cpp:404]     Test net output #1: loss = 0.569195 (* 1 = 0.569195 loss)\nI0823 04:11:44.044198 32487 solver.cpp:228] Iteration 71400, loss = 0.000302371\nI0823 04:11:44.044240 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:11:44.044256 32487 solver.cpp:244]     Train net output #1: loss = 0.000302095 (* 1 = 0.000302095 loss)\nI0823 04:11:44.136461 32487 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0823 04:14:01.832988 32487 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0823 04:15:23.379135 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86012\nI0823 04:15:23.379453 32487 solver.cpp:404]     Test net output #1: loss = 0.560487 (* 1 = 0.560487 loss)\nI0823 04:15:24.702994 32487 solver.cpp:228] Iteration 71500, loss = 0.000303412\nI0823 04:15:24.703035 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:15:24.703050 32487 solver.cpp:244]     Train net output #1: loss = 0.000303136 (* 1 = 0.000303136 loss)\nI0823 04:15:24.787688 32487 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0823 04:17:42.429045 32487 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0823 04:19:03.970515 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86\nI0823 04:19:03.970857 32487 solver.cpp:404]     Test net output #1: loss = 0.566249 (* 1 = 0.566249 loss)\nI0823 04:19:05.293138 32487 solver.cpp:228] Iteration 71600, loss = 0.00031942\nI0823 04:19:05.293179 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:19:05.293195 32487 solver.cpp:244]     Train net output #1: loss = 0.000319143 (* 1 = 0.000319143 loss)\nI0823 04:19:05.378201 32487 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0823 04:21:23.011550 32487 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0823 04:22:44.544487 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86208\nI0823 04:22:44.544847 32487 solver.cpp:404]     Test net output #1: loss = 0.558062 (* 1 = 0.558062 loss)\nI0823 04:22:45.868368 32487 solver.cpp:228] Iteration 71700, loss = 0.000316805\nI0823 04:22:45.868412 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:22:45.868428 32487 solver.cpp:244]     Train net output #1: loss = 0.000316529 (* 1 = 0.000316529 loss)\nI0823 04:22:45.952558 32487 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0823 04:25:03.438411 32487 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0823 04:26:24.975086 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86212\nI0823 04:26:24.975450 32487 solver.cpp:404]     Test net output #1: loss = 0.564538 (* 1 = 0.564538 loss)\nI0823 04:26:26.298115 32487 solver.cpp:228] Iteration 71800, loss = 0.000284594\nI0823 04:26:26.298159 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:26:26.298176 32487 solver.cpp:244]     Train net output #1: loss = 0.000284318 (* 1 = 0.000284318 loss)\nI0823 04:26:26.386876 32487 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0823 04:28:44.090766 32487 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0823 04:30:05.618916 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86436\nI0823 04:30:05.619254 32487 solver.cpp:404]     Test net output #1: loss = 0.556393 (* 1 = 0.556393 loss)\nI0823 04:30:06.942569 32487 solver.cpp:228] Iteration 71900, loss = 0.000311773\nI0823 04:30:06.942612 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:30:06.942629 32487 solver.cpp:244]     Train net output #1: loss = 0.000311497 (* 1 = 0.000311497 loss)\nI0823 04:30:07.027048 32487 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0823 04:32:24.672426 32487 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0823 04:33:46.211480 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86296\nI0823 04:33:46.211833 32487 solver.cpp:404]     Test net output #1: loss = 0.563542 (* 1 = 0.563542 loss)\nI0823 04:33:47.534358 32487 solver.cpp:228] Iteration 72000, loss = 0.000272656\nI0823 04:33:47.534399 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:33:47.534415 32487 solver.cpp:244]     Train net output #1: loss = 0.00027238 (* 1 = 0.00027238 loss)\nI0823 04:33:47.616150 32487 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0823 04:36:05.097424 32487 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0823 04:37:26.636940 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86444\nI0823 04:37:26.637291 32487 solver.cpp:404]     Test net output #1: loss = 0.555494 (* 1 = 0.555494 loss)\nI0823 04:37:27.960129 32487 solver.cpp:228] Iteration 72100, loss = 0.000255072\nI0823 04:37:27.960160 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:37:27.960176 32487 solver.cpp:244]     Train net output #1: loss = 0.000254795 (* 1 = 0.000254795 loss)\nI0823 04:37:28.041756 32487 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0823 04:39:45.678833 32487 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0823 04:41:07.211884 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86264\nI0823 04:41:07.212229 32487 solver.cpp:404]     Test net output #1: loss = 0.562773 (* 1 = 0.562773 loss)\nI0823 04:41:08.534783 32487 solver.cpp:228] Iteration 72200, loss = 0.000290522\nI0823 04:41:08.534817 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:41:08.534833 32487 solver.cpp:244]     Train net output #1: loss = 0.000290246 (* 1 = 0.000290246 loss)\nI0823 04:41:08.616134 32487 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0823 04:43:26.215121 32487 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0823 04:44:47.755842 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86452\nI0823 04:44:47.756188 32487 solver.cpp:404]     Test net output #1: loss = 0.554897 (* 1 = 0.554897 loss)\nI0823 04:44:49.078614 32487 solver.cpp:228] Iteration 72300, loss = 0.000298718\nI0823 04:44:49.078657 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:44:49.078673 32487 solver.cpp:244]     Train net output #1: loss = 0.000298442 (* 1 = 0.000298442 loss)\nI0823 04:44:49.173358 32487 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0823 04:47:06.656646 32487 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0823 04:48:28.197010 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8628\nI0823 04:48:28.197356 32487 solver.cpp:404]     Test net output #1: loss = 0.56246 (* 1 = 0.56246 loss)\nI0823 04:48:29.520395 32487 solver.cpp:228] Iteration 72400, loss = 0.000271621\nI0823 04:48:29.520440 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:48:29.520457 32487 solver.cpp:244]     Train net output #1: loss = 0.000271345 (* 1 = 0.000271345 loss)\nI0823 04:48:29.606721 32487 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0823 04:50:47.360865 32487 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0823 04:52:08.904925 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86448\nI0823 04:52:08.905266 32487 solver.cpp:404]     Test net output #1: loss = 0.554589 (* 1 = 0.554589 loss)\nI0823 04:52:10.228448 32487 solver.cpp:228] Iteration 72500, loss = 0.000295477\nI0823 04:52:10.228495 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:52:10.228519 32487 solver.cpp:244]     Train net output #1: loss = 0.000295201 (* 1 = 0.000295201 loss)\nI0823 04:52:10.320888 32487 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0823 04:54:28.167577 32487 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0823 04:55:49.707077 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86244\nI0823 04:55:49.707439 32487 solver.cpp:404]     Test net output #1: loss = 0.562436 (* 1 = 0.562436 loss)\nI0823 04:55:51.030844 32487 solver.cpp:228] Iteration 72600, loss = 0.000287974\nI0823 04:55:51.030891 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:55:51.030915 32487 solver.cpp:244]     Train net output #1: loss = 0.000287698 (* 1 = 0.000287698 loss)\nI0823 04:55:51.121306 32487 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0823 04:58:08.810029 32487 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0823 04:59:30.369575 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86416\nI0823 04:59:30.369922 32487 solver.cpp:404]     Test net output #1: loss = 0.554531 (* 1 = 0.554531 loss)\nI0823 04:59:31.693513 32487 solver.cpp:228] Iteration 72700, loss = 0.00029131\nI0823 04:59:31.693559 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:59:31.693574 32487 solver.cpp:244]     Train net output #1: loss = 0.000291034 (* 1 = 0.000291034 loss)\nI0823 04:59:31.777937 32487 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0823 05:01:49.371587 32487 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0823 05:03:10.909149 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86216\nI0823 05:03:10.909512 32487 solver.cpp:404]     Test net output #1: loss = 0.56247 (* 1 = 0.56247 loss)\nI0823 05:03:12.233032 32487 solver.cpp:228] Iteration 72800, loss = 0.000319447\nI0823 05:03:12.233079 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:03:12.233094 32487 solver.cpp:244]     Train net output #1: loss = 0.000319171 (* 1 = 0.000319171 loss)\nI0823 05:03:12.318420 32487 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0823 05:05:30.066522 32487 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0823 05:06:51.609490 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86372\nI0823 05:06:51.609822 32487 solver.cpp:404]     Test net output #1: loss = 0.554551 (* 1 = 0.554551 loss)\nI0823 05:06:52.932266 32487 solver.cpp:228] Iteration 72900, loss = 0.000293856\nI0823 05:06:52.932313 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:06:52.932330 32487 solver.cpp:244]     Train net output #1: loss = 0.000293579 (* 1 = 0.000293579 loss)\nI0823 05:06:53.020408 32487 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0823 05:09:10.751008 32487 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0823 05:10:32.295488 32487 solver.cpp:404]     Test net output #0: accuracy = 0.862641\nI0823 05:10:32.295809 32487 solver.cpp:404]     Test net output #1: loss = 0.562528 (* 1 = 0.562528 loss)\nI0823 05:10:33.617980 32487 solver.cpp:228] Iteration 73000, loss = 0.000288707\nI0823 05:10:33.618026 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:10:33.618043 32487 solver.cpp:244]     Train net output #1: loss = 0.00028843 (* 1 = 0.00028843 loss)\nI0823 05:10:33.701580 32487 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0823 05:12:51.456887 32487 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0823 05:14:12.994799 32487 solver.cpp:404]     Test net output #0: accuracy = 0.864\nI0823 05:14:12.995143 32487 solver.cpp:404]     Test net output #1: loss = 0.554574 (* 1 = 0.554574 loss)\nI0823 05:14:14.317983 32487 solver.cpp:228] Iteration 73100, loss = 0.000294608\nI0823 05:14:14.318027 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:14:14.318042 32487 solver.cpp:244]     Train net output #1: loss = 0.000294332 (* 1 = 0.000294332 loss)\nI0823 05:14:14.408327 32487 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0823 05:16:32.132997 32487 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0823 05:17:53.670835 32487 solver.cpp:404]     Test net output #0: accuracy = 0.862801\nI0823 05:17:53.671183 32487 solver.cpp:404]     Test net output #1: loss = 0.562689 (* 1 = 0.562689 loss)\nI0823 05:17:54.993309 32487 solver.cpp:228] Iteration 73200, loss = 0.000305179\nI0823 05:17:54.993355 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:17:54.993371 32487 solver.cpp:244]     Train net output #1: loss = 0.000304903 (* 1 = 0.000304903 loss)\nI0823 05:17:55.079099 32487 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0823 05:20:12.660310 32487 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0823 05:21:34.198314 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8644\nI0823 05:21:34.198665 32487 solver.cpp:404]     Test net output #1: loss = 0.554687 (* 1 = 0.554687 loss)\nI0823 05:21:35.521265 32487 solver.cpp:228] Iteration 73300, loss = 0.000314267\nI0823 05:21:35.521309 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:21:35.521332 32487 solver.cpp:244]     Train net output #1: loss = 0.000313991 (* 1 = 0.000313991 loss)\nI0823 05:21:35.606040 32487 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0823 05:23:53.211622 32487 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0823 05:25:14.748023 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86352\nI0823 05:25:14.748371 32487 solver.cpp:404]     Test net output #1: loss = 0.562824 (* 1 = 0.562824 loss)\nI0823 05:25:16.070351 32487 solver.cpp:228] Iteration 73400, loss = 0.000329916\nI0823 05:25:16.070396 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:25:16.070412 32487 solver.cpp:244]     Train net output #1: loss = 0.00032964 (* 1 = 0.00032964 loss)\nI0823 05:25:16.157183 32487 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0823 05:27:33.886998 32487 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0823 05:28:55.416662 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86468\nI0823 05:28:55.417013 32487 solver.cpp:404]     Test net output #1: loss = 0.554894 (* 1 = 0.554894 loss)\nI0823 05:28:56.739642 32487 solver.cpp:228] Iteration 73500, loss = 0.000309834\nI0823 05:28:56.739687 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:28:56.739703 32487 solver.cpp:244]     Train net output #1: loss = 0.000309558 (* 1 = 0.000309558 loss)\nI0823 05:28:56.831019 32487 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0823 05:31:14.597262 32487 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0823 05:32:36.458166 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86352\nI0823 05:32:36.458545 32487 solver.cpp:404]     Test net output #1: loss = 0.56301 (* 1 = 0.56301 loss)\nI0823 05:32:37.784466 32487 solver.cpp:228] Iteration 73600, loss = 0.000292163\nI0823 05:32:37.784508 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:32:37.784525 32487 solver.cpp:244]     Train net output #1: loss = 0.000291887 (* 1 = 0.000291887 loss)\nI0823 05:32:37.863070 32487 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0823 05:34:55.057919 32487 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0823 05:36:17.143460 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8644\nI0823 05:36:17.143771 32487 solver.cpp:404]     Test net output #1: loss = 0.554948 (* 1 = 0.554948 loss)\nI0823 05:36:18.469365 32487 solver.cpp:228] Iteration 73700, loss = 0.00033005\nI0823 05:36:18.469403 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:36:18.469419 32487 solver.cpp:244]     Train net output #1: loss = 0.000329774 (* 1 = 0.000329774 loss)\nI0823 05:36:18.547060 32487 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0823 05:38:35.753197 32487 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0823 05:39:57.817989 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8632\nI0823 05:39:57.818279 32487 solver.cpp:404]     Test net output #1: loss = 0.563201 (* 1 = 0.563201 loss)\nI0823 05:39:59.143236 32487 solver.cpp:228] Iteration 73800, loss = 0.000300547\nI0823 05:39:59.143275 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:39:59.143292 32487 solver.cpp:244]     Train net output #1: loss = 0.00030027 (* 1 = 0.00030027 loss)\nI0823 05:39:59.226990 32487 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0823 05:42:16.429172 32487 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0823 05:43:38.610393 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86464\nI0823 05:43:38.610663 32487 solver.cpp:404]     Test net output #1: loss = 0.555137 (* 1 = 0.555137 loss)\nI0823 05:43:39.935119 32487 solver.cpp:228] Iteration 73900, loss = 0.000329263\nI0823 05:43:39.935161 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:43:39.935176 32487 solver.cpp:244]     Train net output #1: loss = 0.000328986 (* 1 = 0.000328986 loss)\nI0823 05:43:40.020071 32487 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0823 05:45:57.390447 32487 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0823 05:47:19.629647 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86372\nI0823 05:47:19.629945 32487 solver.cpp:404]     Test net output #1: loss = 0.563327 (* 1 = 0.563327 loss)\nI0823 05:47:20.954396 32487 solver.cpp:228] Iteration 74000, loss = 0.00035406\nI0823 05:47:20.954435 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:47:20.954450 32487 solver.cpp:244]     Train net output #1: loss = 0.000353784 (* 1 = 0.000353784 loss)\nI0823 05:47:21.033722 32487 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0823 05:49:38.412348 32487 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0823 05:51:00.267177 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86488\nI0823 05:51:00.267515 32487 solver.cpp:404]     Test net output #1: loss = 0.555334 (* 1 = 0.555334 loss)\nI0823 05:51:01.593209 32487 solver.cpp:228] Iteration 74100, loss = 0.000281249\nI0823 05:51:01.593251 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:51:01.593273 32487 solver.cpp:244]     Train net output #1: loss = 0.000280972 (* 1 = 0.000280972 loss)\nI0823 05:51:01.666467 32487 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0823 05:53:18.832808 32487 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0823 05:54:40.605923 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8636\nI0823 05:54:40.606209 32487 solver.cpp:404]     Test net output #1: loss = 0.56353 (* 1 = 0.56353 loss)\nI0823 05:54:41.930959 32487 solver.cpp:228] Iteration 74200, loss = 0.000258249\nI0823 05:54:41.931010 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:54:41.931035 32487 solver.cpp:244]     Train net output #1: loss = 0.000257973 (* 1 = 0.000257973 loss)\nI0823 05:54:42.010953 32487 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0823 05:56:59.193987 32487 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0823 05:58:21.174331 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 05:58:21.174646 32487 solver.cpp:404]     Test net output #1: loss = 0.555337 (* 1 = 0.555337 loss)\nI0823 05:58:22.499500 32487 solver.cpp:228] Iteration 74300, loss = 0.000324606\nI0823 05:58:22.499541 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:58:22.499562 32487 solver.cpp:244]     Train net output #1: loss = 0.00032433 (* 1 = 0.00032433 loss)\nI0823 05:58:22.580677 32487 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0823 06:00:39.774209 32487 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0823 06:02:02.021616 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86336\nI0823 06:02:02.021929 32487 solver.cpp:404]     Test net output #1: loss = 0.563616 (* 1 = 0.563616 loss)\nI0823 06:02:03.346886 32487 solver.cpp:228] Iteration 74400, loss = 0.000300226\nI0823 06:02:03.346935 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:02:03.346959 32487 solver.cpp:244]     Train net output #1: loss = 0.000299949 (* 1 = 0.000299949 loss)\nI0823 06:02:03.426327 32487 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0823 06:04:20.563642 32487 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0823 06:05:42.819293 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86484\nI0823 06:05:42.819589 32487 solver.cpp:404]     Test net output #1: loss = 0.555499 (* 1 = 0.555499 loss)\nI0823 06:05:44.145287 32487 solver.cpp:228] Iteration 74500, loss = 0.000263485\nI0823 06:05:44.145330 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:05:44.145354 32487 solver.cpp:244]     Train net output #1: loss = 0.000263209 (* 1 = 0.000263209 loss)\nI0823 06:05:44.227555 32487 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0823 06:08:01.464546 32487 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0823 06:09:23.736719 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86336\nI0823 06:09:23.737007 32487 solver.cpp:404]     Test net output #1: loss = 0.563704 (* 1 = 0.563704 loss)\nI0823 06:09:25.063107 32487 solver.cpp:228] Iteration 74600, loss = 0.000311898\nI0823 06:09:25.063153 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:09:25.063174 32487 solver.cpp:244]     Train net output #1: loss = 0.000311622 (* 1 = 0.000311622 loss)\nI0823 06:09:25.146344 32487 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0823 06:11:42.363117 32487 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0823 06:13:04.602144 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86496\nI0823 06:13:04.602465 32487 solver.cpp:404]     Test net output #1: loss = 0.555636 (* 1 = 0.555636 loss)\nI0823 06:13:05.927623 32487 solver.cpp:228] Iteration 74700, loss = 0.000314477\nI0823 06:13:05.927677 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:13:05.927700 32487 solver.cpp:244]     Train net output #1: loss = 0.0003142 (* 1 = 0.0003142 loss)\nI0823 06:13:06.009830 32487 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0823 06:15:23.155963 32487 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0823 06:16:45.379113 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86344\nI0823 06:16:45.379405 32487 solver.cpp:404]     Test net output #1: loss = 0.56382 (* 1 = 0.56382 loss)\nI0823 06:16:46.704257 32487 solver.cpp:228] Iteration 74800, loss = 0.000260028\nI0823 06:16:46.704308 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:16:46.704334 32487 solver.cpp:244]     Train net output #1: loss = 0.000259752 (* 1 = 0.000259752 loss)\nI0823 06:16:46.782630 32487 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0823 06:19:03.943765 32487 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0823 06:20:26.177126 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86488\nI0823 06:20:26.177431 32487 solver.cpp:404]     Test net output #1: loss = 0.555601 (* 1 = 0.555601 loss)\nI0823 06:20:27.503105 32487 solver.cpp:228] Iteration 74900, loss = 0.00030865\nI0823 06:20:27.503151 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:20:27.503173 32487 solver.cpp:244]     Train net output #1: loss = 0.000308373 (* 1 = 0.000308373 loss)\nI0823 06:20:27.585554 32487 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0823 06:22:44.795156 32487 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0823 06:24:07.055624 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86332\nI0823 06:24:07.055932 32487 solver.cpp:404]     Test net output #1: loss = 0.563889 (* 1 = 0.563889 loss)\nI0823 06:24:08.381825 32487 solver.cpp:228] Iteration 75000, loss = 0.000287359\nI0823 06:24:08.381881 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:24:08.381904 32487 solver.cpp:244]     Train net output #1: loss = 0.000287083 (* 1 = 0.000287083 loss)\nI0823 06:24:08.461498 32487 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0823 06:26:25.583997 32487 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0823 06:27:47.819952 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8648\nI0823 06:27:47.820269 32487 solver.cpp:404]     Test net output #1: loss = 0.555734 (* 1 = 0.555734 loss)\nI0823 06:27:49.146157 32487 solver.cpp:228] Iteration 75100, loss = 0.000256128\nI0823 06:27:49.146210 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:27:49.146234 32487 solver.cpp:244]     Train net output #1: loss = 0.000255852 (* 1 = 0.000255852 loss)\nI0823 06:27:49.229252 32487 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0823 06:30:06.473696 32487 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0823 06:31:28.705480 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8632\nI0823 06:31:28.705854 32487 solver.cpp:404]     Test net output #1: loss = 0.564019 (* 1 = 0.564019 loss)\nI0823 06:31:30.030683 32487 solver.cpp:228] Iteration 75200, loss = 0.000344728\nI0823 06:31:30.030737 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:31:30.030761 32487 solver.cpp:244]     Train net output #1: loss = 0.000344451 (* 1 = 0.000344451 loss)\nI0823 06:31:30.106981 32487 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0823 06:33:47.233978 32487 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0823 06:35:09.514039 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8648\nI0823 06:35:09.514331 32487 solver.cpp:404]     Test net output #1: loss = 0.555929 (* 1 = 0.555929 loss)\nI0823 06:35:10.839087 32487 solver.cpp:228] Iteration 75300, loss = 0.000310353\nI0823 06:35:10.839141 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:35:10.839165 32487 solver.cpp:244]     Train net output #1: loss = 0.000310077 (* 1 = 0.000310077 loss)\nI0823 06:35:10.919805 32487 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0823 06:37:28.045809 32487 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0823 06:38:50.154950 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8632\nI0823 06:38:50.155233 32487 solver.cpp:404]     Test net output #1: loss = 0.564161 (* 1 = 0.564161 loss)\nI0823 06:38:51.482040 32487 solver.cpp:228] Iteration 75400, loss = 0.000269731\nI0823 06:38:51.482081 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:38:51.482097 32487 solver.cpp:244]     Train net output #1: loss = 0.000269454 (* 1 = 0.000269454 loss)\nI0823 06:38:51.559698 32487 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0823 06:41:08.699573 32487 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0823 06:42:31.039481 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 06:42:31.039854 32487 solver.cpp:404]     Test net output #1: loss = 0.555975 (* 1 = 0.555975 loss)\nI0823 06:42:32.365221 32487 solver.cpp:228] Iteration 75500, loss = 0.00027415\nI0823 06:42:32.365262 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:42:32.365278 32487 solver.cpp:244]     Train net output #1: loss = 0.000273874 (* 1 = 0.000273874 loss)\nI0823 06:42:32.443500 32487 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0823 06:44:49.572166 32487 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0823 06:46:11.914456 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86344\nI0823 06:46:11.914759 32487 solver.cpp:404]     Test net output #1: loss = 0.564341 (* 1 = 0.564341 loss)\nI0823 06:46:13.241000 32487 solver.cpp:228] Iteration 75600, loss = 0.000280841\nI0823 06:46:13.241050 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:46:13.241066 32487 solver.cpp:244]     Train net output #1: loss = 0.000280565 (* 1 = 0.000280565 loss)\nI0823 06:46:13.318117 32487 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0823 06:48:30.412055 32487 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0823 06:49:52.782445 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86488\nI0823 06:49:52.782752 32487 solver.cpp:404]     Test net output #1: loss = 0.556113 (* 1 = 0.556113 loss)\nI0823 06:49:54.108989 32487 solver.cpp:228] Iteration 75700, loss = 0.00026133\nI0823 06:49:54.109030 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:49:54.109045 32487 solver.cpp:244]     Train net output #1: loss = 0.000261053 (* 1 = 0.000261053 loss)\nI0823 06:49:54.182795 32487 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0823 06:52:11.313550 32487 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0823 06:53:33.491111 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 06:53:33.491410 32487 solver.cpp:404]     Test net output #1: loss = 0.56428 (* 1 = 0.56428 loss)\nI0823 06:53:34.817575 32487 solver.cpp:228] Iteration 75800, loss = 0.000314499\nI0823 06:53:34.817620 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:53:34.817641 32487 solver.cpp:244]     Train net output #1: loss = 0.000314223 (* 1 = 0.000314223 loss)\nI0823 06:53:34.893863 32487 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0823 06:55:52.009737 32487 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0823 06:57:14.386188 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 06:57:14.386478 32487 solver.cpp:404]     Test net output #1: loss = 0.556158 (* 1 = 0.556158 loss)\nI0823 06:57:15.713255 32487 solver.cpp:228] Iteration 75900, loss = 0.000286766\nI0823 06:57:15.713299 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:57:15.713315 32487 solver.cpp:244]     Train net output #1: loss = 0.00028649 (* 1 = 0.00028649 loss)\nI0823 06:57:15.792084 32487 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0823 06:59:32.890841 32487 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0823 07:00:55.240998 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 07:00:55.241294 32487 solver.cpp:404]     Test net output #1: loss = 0.564326 (* 1 = 0.564326 loss)\nI0823 07:00:56.566642 32487 solver.cpp:228] Iteration 76000, loss = 0.000305086\nI0823 07:00:56.566695 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:00:56.566714 32487 solver.cpp:244]     Train net output #1: loss = 0.00030481 (* 1 = 0.00030481 loss)\nI0823 07:00:56.650732 32487 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0823 07:03:13.809970 32487 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0823 07:04:36.137810 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 07:04:36.138098 32487 solver.cpp:404]     Test net output #1: loss = 0.556119 (* 1 = 0.556119 loss)\nI0823 07:04:37.465011 32487 solver.cpp:228] Iteration 76100, loss = 0.000285894\nI0823 07:04:37.465056 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:04:37.465072 32487 solver.cpp:244]     Train net output #1: loss = 0.000285618 (* 1 = 0.000285618 loss)\nI0823 07:04:37.540205 32487 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0823 07:06:54.682019 32487 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0823 07:08:16.961506 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86352\nI0823 07:08:16.961797 32487 solver.cpp:404]     Test net output #1: loss = 0.564398 (* 1 = 0.564398 loss)\nI0823 07:08:18.287919 32487 solver.cpp:228] Iteration 76200, loss = 0.000314656\nI0823 07:08:18.287961 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:08:18.287977 32487 solver.cpp:244]     Train net output #1: loss = 0.000314379 (* 1 = 0.000314379 loss)\nI0823 07:08:18.366920 32487 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0823 07:10:35.672852 32487 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0823 07:11:57.971292 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 07:11:57.971580 32487 solver.cpp:404]     Test net output #1: loss = 0.556201 (* 1 = 0.556201 loss)\nI0823 07:11:59.298234 32487 solver.cpp:228] Iteration 76300, loss = 0.000292677\nI0823 07:11:59.298280 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:11:59.298295 32487 solver.cpp:244]     Train net output #1: loss = 0.000292401 (* 1 = 0.000292401 loss)\nI0823 07:11:59.371055 32487 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0823 07:14:16.552597 32487 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0823 07:15:38.888350 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 07:15:38.888682 32487 solver.cpp:404]     Test net output #1: loss = 0.564462 (* 1 = 0.564462 loss)\nI0823 07:15:40.214206 32487 solver.cpp:228] Iteration 76400, loss = 0.000283826\nI0823 07:15:40.214251 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:15:40.214267 32487 solver.cpp:244]     Train net output #1: loss = 0.00028355 (* 1 = 0.00028355 loss)\nI0823 07:15:40.288666 32487 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0823 07:17:57.462920 32487 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0823 07:19:19.805812 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86508\nI0823 07:19:19.806094 32487 solver.cpp:404]     Test net output #1: loss = 0.556344 (* 1 = 0.556344 loss)\nI0823 07:19:21.131419 32487 solver.cpp:228] Iteration 76500, loss = 0.000319595\nI0823 07:19:21.131467 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:19:21.131484 32487 solver.cpp:244]     Train net output #1: loss = 0.000319318 (* 1 = 0.000319318 loss)\nI0823 07:19:21.208114 32487 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0823 07:21:38.337514 32487 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0823 07:23:00.654573 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 07:23:00.654920 32487 solver.cpp:404]     Test net output #1: loss = 0.564488 (* 1 = 0.564488 loss)\nI0823 07:23:01.980352 32487 solver.cpp:228] Iteration 76600, loss = 0.000264309\nI0823 07:23:01.980401 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:23:01.980418 32487 solver.cpp:244]     Train net output #1: loss = 0.000264033 (* 1 = 0.000264033 loss)\nI0823 07:23:02.058387 32487 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0823 07:25:19.219635 32487 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0823 07:26:41.548511 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 07:26:41.548832 32487 solver.cpp:404]     Test net output #1: loss = 0.556198 (* 1 = 0.556198 loss)\nI0823 07:26:42.873652 32487 solver.cpp:228] Iteration 76700, loss = 0.000303699\nI0823 07:26:42.873698 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:26:42.873714 32487 solver.cpp:244]     Train net output #1: loss = 0.000303423 (* 1 = 0.000303423 loss)\nI0823 07:26:42.953629 32487 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0823 07:29:00.080941 32487 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0823 07:30:22.361665 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86352\nI0823 07:30:22.361959 32487 solver.cpp:404]     Test net output #1: loss = 0.564572 (* 1 = 0.564572 loss)\nI0823 07:30:23.687281 32487 solver.cpp:228] Iteration 76800, loss = 0.000288945\nI0823 07:30:23.687332 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:30:23.687350 32487 solver.cpp:244]     Train net output #1: loss = 0.000288668 (* 1 = 0.000288668 loss)\nI0823 07:30:23.762580 32487 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0823 07:32:40.773911 32487 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0823 07:34:03.033210 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 07:34:03.033596 32487 solver.cpp:404]     Test net output #1: loss = 0.556312 (* 1 = 0.556312 loss)\nI0823 07:34:04.359789 32487 solver.cpp:228] Iteration 76900, loss = 0.000285723\nI0823 07:34:04.359830 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:34:04.359845 32487 solver.cpp:244]     Train net output #1: loss = 0.000285446 (* 1 = 0.000285446 loss)\nI0823 07:34:04.441520 32487 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0823 07:36:21.678438 32487 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0823 07:37:44.029784 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86328\nI0823 07:37:44.030067 32487 solver.cpp:404]     Test net output #1: loss = 0.56449 (* 1 = 0.56449 loss)\nI0823 07:37:45.356055 32487 solver.cpp:228] Iteration 77000, loss = 0.000294044\nI0823 07:37:45.356094 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:37:45.356111 32487 solver.cpp:244]     Train net output #1: loss = 0.000293767 (* 1 = 0.000293767 loss)\nI0823 07:37:45.433123 32487 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0823 07:40:02.574488 32487 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0823 07:41:24.843888 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 07:41:24.844262 32487 solver.cpp:404]     Test net output #1: loss = 0.556349 (* 1 = 0.556349 loss)\nI0823 07:41:26.169520 32487 solver.cpp:228] Iteration 77100, loss = 0.000317065\nI0823 07:41:26.169570 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:41:26.169586 32487 solver.cpp:244]     Train net output #1: loss = 0.000316789 (* 1 = 0.000316789 loss)\nI0823 07:41:26.248865 32487 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0823 07:43:43.359431 32487 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0823 07:45:05.228281 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 07:45:05.228570 32487 solver.cpp:404]     Test net output #1: loss = 0.564561 (* 1 = 0.564561 loss)\nI0823 07:45:06.553948 32487 solver.cpp:228] Iteration 77200, loss = 0.000310504\nI0823 07:45:06.554000 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:45:06.554016 32487 solver.cpp:244]     Train net output #1: loss = 0.000310227 (* 1 = 0.000310227 loss)\nI0823 07:45:06.631701 32487 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0823 07:47:23.781448 32487 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0823 07:48:45.976843 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 07:48:45.977115 32487 solver.cpp:404]     Test net output #1: loss = 0.556348 (* 1 = 0.556348 loss)\nI0823 07:48:47.301893 32487 solver.cpp:228] Iteration 77300, loss = 0.000290006\nI0823 07:48:47.301944 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:48:47.301959 32487 solver.cpp:244]     Train net output #1: loss = 0.00028973 (* 1 = 0.00028973 loss)\nI0823 07:48:47.379272 32487 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0823 07:51:04.489866 32487 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0823 07:52:26.386414 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86328\nI0823 07:52:26.386803 32487 solver.cpp:404]     Test net output #1: loss = 0.564733 (* 1 = 0.564733 loss)\nI0823 07:52:27.713919 32487 solver.cpp:228] Iteration 77400, loss = 0.00030215\nI0823 07:52:27.713961 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:52:27.713984 32487 solver.cpp:244]     Train net output #1: loss = 0.000301873 (* 1 = 0.000301873 loss)\nI0823 07:52:27.792209 32487 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0823 07:54:45.155035 32487 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0823 07:56:07.073756 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 07:56:07.074039 32487 solver.cpp:404]     Test net output #1: loss = 0.556474 (* 1 = 0.556474 loss)\nI0823 07:56:08.399152 32487 solver.cpp:228] Iteration 77500, loss = 0.000323313\nI0823 07:56:08.399194 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:56:08.399216 32487 solver.cpp:244]     Train net output #1: loss = 0.000323037 (* 1 = 0.000323037 loss)\nI0823 07:56:08.479022 32487 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0823 07:58:25.725360 32487 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0823 07:59:47.608260 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8632\nI0823 07:59:47.608584 32487 solver.cpp:404]     Test net output #1: loss = 0.564772 (* 1 = 0.564772 loss)\nI0823 07:59:48.934492 32487 solver.cpp:228] Iteration 77600, loss = 0.000290192\nI0823 07:59:48.934540 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:59:48.934556 32487 solver.cpp:244]     Train net output #1: loss = 0.000289915 (* 1 = 0.000289915 loss)\nI0823 07:59:49.008402 32487 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0823 08:02:06.154464 32487 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0823 08:03:28.496160 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 08:03:28.496500 32487 solver.cpp:404]     Test net output #1: loss = 0.556579 (* 1 = 0.556579 loss)\nI0823 08:03:29.822010 32487 solver.cpp:228] Iteration 77700, loss = 0.000315569\nI0823 08:03:29.822058 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:03:29.822075 32487 solver.cpp:244]     Train net output #1: loss = 0.000315293 (* 1 = 0.000315293 loss)\nI0823 08:03:29.904817 32487 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0823 08:05:47.291200 32487 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0823 08:07:09.598223 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86328\nI0823 08:07:09.598548 32487 solver.cpp:404]     Test net output #1: loss = 0.564793 (* 1 = 0.564793 loss)\nI0823 08:07:10.924401 32487 solver.cpp:228] Iteration 77800, loss = 0.00026071\nI0823 08:07:10.924451 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:07:10.924468 32487 solver.cpp:244]     Train net output #1: loss = 0.000260433 (* 1 = 0.000260433 loss)\nI0823 08:07:11.003896 32487 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0823 08:09:28.185839 32487 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0823 08:10:50.514359 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 08:10:50.514703 32487 solver.cpp:404]     Test net output #1: loss = 0.556508 (* 1 = 0.556508 loss)\nI0823 08:10:51.839808 32487 solver.cpp:228] Iteration 77900, loss = 0.000323407\nI0823 08:10:51.839859 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:10:51.839875 32487 solver.cpp:244]     Train net output #1: loss = 0.000323131 (* 1 = 0.000323131 loss)\nI0823 08:10:51.921741 32487 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0823 08:13:09.103834 32487 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0823 08:14:31.443871 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86328\nI0823 08:14:31.444160 32487 solver.cpp:404]     Test net output #1: loss = 0.564852 (* 1 = 0.564852 loss)\nI0823 08:14:32.770818 32487 solver.cpp:228] Iteration 78000, loss = 0.000316991\nI0823 08:14:32.770866 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:14:32.770884 32487 solver.cpp:244]     Train net output #1: loss = 0.000316715 (* 1 = 0.000316715 loss)\nI0823 08:14:32.848245 32487 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0823 08:16:50.089159 32487 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0823 08:18:12.062749 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 08:18:12.063056 32487 solver.cpp:404]     Test net output #1: loss = 0.556636 (* 1 = 0.556636 loss)\nI0823 08:18:13.389197 32487 solver.cpp:228] Iteration 78100, loss = 0.000284607\nI0823 08:18:13.389238 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:18:13.389256 32487 solver.cpp:244]     Train net output #1: loss = 0.00028433 (* 1 = 0.00028433 loss)\nI0823 08:18:13.468098 32487 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0823 08:20:30.719223 32487 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0823 08:21:53.028240 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86328\nI0823 08:21:53.028579 32487 solver.cpp:404]     Test net output #1: loss = 0.564928 (* 1 = 0.564928 loss)\nI0823 08:21:54.355357 32487 solver.cpp:228] Iteration 78200, loss = 0.000286388\nI0823 08:21:54.355406 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:21:54.355422 32487 solver.cpp:244]     Train net output #1: loss = 0.000286111 (* 1 = 0.000286111 loss)\nI0823 08:21:54.427462 32487 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0823 08:24:11.582787 32487 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0823 08:25:33.445413 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 08:25:33.445701 32487 solver.cpp:404]     Test net output #1: loss = 0.556767 (* 1 = 0.556767 loss)\nI0823 08:25:34.771069 32487 solver.cpp:228] Iteration 78300, loss = 0.000301646\nI0823 08:25:34.771111 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:25:34.771126 32487 solver.cpp:244]     Train net output #1: loss = 0.00030137 (* 1 = 0.00030137 loss)\nI0823 08:25:34.844410 32487 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0823 08:27:52.062546 32487 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0823 08:29:13.964170 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86328\nI0823 08:29:13.964463 32487 solver.cpp:404]     Test net output #1: loss = 0.565036 (* 1 = 0.565036 loss)\nI0823 08:29:15.289942 32487 solver.cpp:228] Iteration 78400, loss = 0.000298027\nI0823 08:29:15.289994 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:29:15.290011 32487 solver.cpp:244]     Train net output #1: loss = 0.00029775 (* 1 = 0.00029775 loss)\nI0823 08:29:15.372521 32487 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0823 08:31:32.688882 32487 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0823 08:32:54.885571 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 08:32:54.885974 32487 solver.cpp:404]     Test net output #1: loss = 0.556753 (* 1 = 0.556753 loss)\nI0823 08:32:56.211879 32487 solver.cpp:228] Iteration 78500, loss = 0.000315427\nI0823 08:32:56.211928 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:32:56.211943 32487 solver.cpp:244]     Train net output #1: loss = 0.000315151 (* 1 = 0.000315151 loss)\nI0823 08:32:56.295938 32487 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0823 08:35:13.453016 32487 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0823 08:36:35.796862 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 08:36:35.797214 32487 solver.cpp:404]     Test net output #1: loss = 0.565134 (* 1 = 0.565134 loss)\nI0823 08:36:37.121969 32487 solver.cpp:228] Iteration 78600, loss = 0.000291753\nI0823 08:36:37.122016 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:36:37.122033 32487 solver.cpp:244]     Train net output #1: loss = 0.000291476 (* 1 = 0.000291476 loss)\nI0823 08:36:37.200687 32487 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0823 08:38:54.492002 32487 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0823 08:40:16.825937 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 08:40:16.826306 32487 solver.cpp:404]     Test net output #1: loss = 0.556859 (* 1 = 0.556859 loss)\nI0823 08:40:18.151417 32487 solver.cpp:228] Iteration 78700, loss = 0.000298153\nI0823 08:40:18.151456 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:40:18.151473 32487 solver.cpp:244]     Train net output #1: loss = 0.000297877 (* 1 = 0.000297877 loss)\nI0823 08:40:18.227100 32487 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0823 08:42:35.409049 32487 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0823 08:43:57.741765 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 08:43:57.742120 32487 solver.cpp:404]     Test net output #1: loss = 0.565162 (* 1 = 0.565162 loss)\nI0823 08:43:59.067773 32487 solver.cpp:228] Iteration 78800, loss = 0.000307333\nI0823 08:43:59.067814 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:43:59.067829 32487 solver.cpp:244]     Train net output #1: loss = 0.000307056 (* 1 = 0.000307056 loss)\nI0823 08:43:59.145150 32487 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0823 08:46:16.516254 32487 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0823 08:47:38.849225 32487 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0823 08:47:38.849599 32487 solver.cpp:404]     Test net output #1: loss = 0.556968 (* 1 = 0.556968 loss)\nI0823 08:47:40.175197 32487 solver.cpp:228] Iteration 78900, loss = 0.000288618\nI0823 08:47:40.175238 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:47:40.175253 32487 solver.cpp:244]     Train net output #1: loss = 0.000288341 (* 1 = 0.000288341 loss)\nI0823 08:47:40.250253 32487 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0823 08:49:57.534945 32487 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0823 08:51:19.859422 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 08:51:19.859791 32487 solver.cpp:404]     Test net output #1: loss = 0.56523 (* 1 = 0.56523 loss)\nI0823 08:51:21.185981 32487 solver.cpp:228] Iteration 79000, loss = 0.000266342\nI0823 08:51:21.186023 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:51:21.186038 32487 solver.cpp:244]     Train net output #1: loss = 0.000266065 (* 1 = 0.000266065 loss)\nI0823 08:51:21.264616 32487 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0823 08:53:38.465950 32487 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0823 08:55:00.816179 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86504\nI0823 08:55:00.816560 32487 solver.cpp:404]     Test net output #1: loss = 0.556943 (* 1 = 0.556943 loss)\nI0823 08:55:02.143611 32487 solver.cpp:228] Iteration 79100, loss = 0.000291537\nI0823 08:55:02.143654 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:55:02.143671 32487 solver.cpp:244]     Train net output #1: loss = 0.00029126 (* 1 = 0.00029126 loss)\nI0823 08:55:02.223321 32487 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0823 08:57:19.538525 32487 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0823 08:58:41.923696 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 08:58:41.924074 32487 solver.cpp:404]     Test net output #1: loss = 0.565363 (* 1 = 0.565363 loss)\nI0823 08:58:43.250419 32487 solver.cpp:228] Iteration 79200, loss = 0.000308951\nI0823 08:58:43.250461 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:58:43.250475 32487 solver.cpp:244]     Train net output #1: loss = 0.000308675 (* 1 = 0.000308675 loss)\nI0823 08:58:43.328230 32487 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0823 09:01:00.550410 32487 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0823 09:02:22.931363 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86504\nI0823 09:02:22.931736 32487 solver.cpp:404]     Test net output #1: loss = 0.557061 (* 1 = 0.557061 loss)\nI0823 09:02:24.258358 32487 solver.cpp:228] Iteration 79300, loss = 0.00029643\nI0823 09:02:24.258401 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:02:24.258417 32487 solver.cpp:244]     Train net output #1: loss = 0.000296153 (* 1 = 0.000296153 loss)\nI0823 09:02:24.331301 32487 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0823 09:04:41.755589 32487 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0823 09:06:04.173009 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86344\nI0823 09:06:04.173398 32487 solver.cpp:404]     Test net output #1: loss = 0.565371 (* 1 = 0.565371 loss)\nI0823 09:06:05.500165 32487 solver.cpp:228] Iteration 79400, loss = 0.000271751\nI0823 09:06:05.500211 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:06:05.500236 32487 solver.cpp:244]     Train net output #1: loss = 0.000271474 (* 1 = 0.000271474 loss)\nI0823 09:06:05.569948 32487 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0823 09:08:22.764995 32487 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0823 09:09:45.147950 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86516\nI0823 09:09:45.148360 32487 solver.cpp:404]     Test net output #1: loss = 0.557139 (* 1 = 0.557139 loss)\nI0823 09:09:46.475440 32487 solver.cpp:228] Iteration 79500, loss = 0.000298406\nI0823 09:09:46.475495 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:09:46.475520 32487 solver.cpp:244]     Train net output #1: loss = 0.000298129 (* 1 = 0.000298129 loss)\nI0823 09:09:46.553107 32487 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0823 09:12:03.930305 32487 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0823 09:13:26.423068 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86344\nI0823 09:13:26.423444 32487 solver.cpp:404]     Test net output #1: loss = 0.565414 (* 1 = 0.565414 loss)\nI0823 09:13:27.749946 32487 solver.cpp:228] Iteration 79600, loss = 0.000351065\nI0823 09:13:27.749992 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:13:27.750016 32487 solver.cpp:244]     Train net output #1: loss = 0.000350788 (* 1 = 0.000350788 loss)\nI0823 09:13:27.831709 32487 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0823 09:15:45.039422 32487 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0823 09:17:07.380270 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86524\nI0823 09:17:07.380648 32487 solver.cpp:404]     Test net output #1: loss = 0.557126 (* 1 = 0.557126 loss)\nI0823 09:17:08.706360 32487 solver.cpp:228] Iteration 79700, loss = 0.000328785\nI0823 09:17:08.706408 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:17:08.706430 32487 solver.cpp:244]     Train net output #1: loss = 0.000328509 (* 1 = 0.000328509 loss)\nI0823 09:17:08.786970 32487 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0823 09:19:25.968430 32487 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0823 09:20:48.367360 32487 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0823 09:20:48.367768 32487 solver.cpp:404]     Test net output #1: loss = 0.565498 (* 1 = 0.565498 loss)\nI0823 09:20:49.693435 32487 solver.cpp:228] Iteration 79800, loss = 0.000293666\nI0823 09:20:49.693480 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:20:49.693503 32487 solver.cpp:244]     Train net output #1: loss = 0.000293389 (* 1 = 0.000293389 loss)\nI0823 09:20:49.769800 32487 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0823 09:23:06.979022 32487 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0823 09:24:29.378373 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86492\nI0823 09:24:29.378762 32487 solver.cpp:404]     Test net output #1: loss = 0.557232 (* 1 = 0.557232 loss)\nI0823 09:24:30.705755 32487 solver.cpp:228] Iteration 79900, loss = 0.00026082\nI0823 09:24:30.705801 32487 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:24:30.705823 32487 solver.cpp:244]     Train net output #1: loss = 0.000260544 (* 1 = 0.000260544 loss)\nI0823 09:24:30.786027 32487 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0823 09:26:47.988529 32487 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35Tr30kTab1_iter_80000.caffemodel\nI0823 09:26:48.214370 32487 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35Tr30kTab1_iter_80000.solverstate\nI0823 09:26:48.662662 32487 solver.cpp:317] Iteration 80000, loss = 0.000301123\nI0823 09:26:48.662701 32487 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0823 09:28:10.180264 32487 solver.cpp:404]     Test net output #0: accuracy = 0.86356\nI0823 09:28:10.180598 32487 solver.cpp:404]     Test net output #1: loss = 0.565506 (* 1 = 0.565506 loss)\nI0823 09:28:10.180618 32487 solver.cpp:322] Optimization Done.\nI0823 09:28:15.457790 32487 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lr35Tr40kTab1",
    "content": "I0821 08:26:20.819046 32262 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0821 08:26:20.821985 32262 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0821 08:26:20.823201 32262 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0821 08:26:20.824414 32262 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0821 08:26:20.825629 32262 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0821 08:26:20.826860 32262 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0821 08:26:20.828088 32262 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0821 08:26:20.829320 32262 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0821 08:26:20.830549 32262 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0821 08:26:21.249194 32262 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0.35\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"multistep\"\ngamma: 0.1\nmomentum: 0.9\nweight_decay: 0.0001\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/lr35Tr40kTab1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nstepvalue: 50000\nstepvalue: 70000\nI0821 08:26:21.254227 32262 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0821 08:26:21.272640 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:21.272724 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:21.273869 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0821 08:26:21.273936 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0821 08:26:21.273957 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:26:21.273975 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:26:21.273995 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:26:21.274019 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:26:21.274039 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:26:21.274057 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:26:21.274077 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:26:21.274096 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:26:21.274123 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:26:21.274140 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:26:21.274159 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:26:21.274178 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:26:21.274196 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:26:21.274214 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:26:21.274232 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:26:21.274250 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:26:21.274269 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:26:21.274288 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:26:21.274322 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:26:21.274343 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:26:21.274366 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:26:21.274385 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:26:21.274402 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:26:21.274416 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:26:21.274435 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:26:21.274451 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:26:21.274468 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:26:21.274487 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:26:21.274505 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:26:21.274523 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:26:21.274543 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:26:21.274559 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:26:21.274577 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:26:21.274595 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:26:21.274615 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:26:21.274632 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:26:21.274652 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:26:21.274668 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:26:21.274694 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:26:21.274710 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:26:21.274729 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:26:21.274745 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:26:21.274765 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:26:21.274785 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:26:21.274803 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:26:21.274819 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:26:21.274837 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:26:21.274854 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:26:21.274873 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:26:21.274897 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:26:21.274917 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:26:21.274936 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:26:21.274955 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:26:21.274971 32262 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:26:21.276737 32262 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train40k_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.9\nI0821 08:26:21.278980 32262 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:26:21.281666 32262 net.cpp:100] Creating Layer dataLayer\nI0821 08:26:21.281746 32262 net.cpp:408] dataLayer -> data_top\nI0821 08:26:21.281961 32262 net.cpp:408] dataLayer -> label\nI0821 08:26:21.282090 32262 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:26:21.360497 32267 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train40k_lmdb\nI0821 08:26:21.360913 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:21.368046 32262 net.cpp:150] Setting up dataLayer\nI0821 08:26:21.368135 32262 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 08:26:21.368157 32262 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:21.368168 32262 net.cpp:165] Memory required for data: 1536500\nI0821 08:26:21.368191 32262 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:26:21.368214 32262 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:26:21.368229 32262 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:26:21.368263 32262 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:26:21.368291 32262 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:26:21.368394 32262 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:26:21.368418 32262 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:21.368433 32262 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:21.368443 32262 net.cpp:165] Memory required for data: 1537500\nI0821 08:26:21.368453 32262 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:26:21.368537 32262 net.cpp:100] Creating Layer pre_conv\nI0821 08:26:21.368552 32262 net.cpp:434] pre_conv <- data_top\nI0821 08:26:21.368569 32262 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:26:21.370388 32262 net.cpp:150] Setting up pre_conv\nI0821 08:26:21.370411 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.370422 32262 net.cpp:165] Memory required for data: 9729500\nI0821 08:26:21.370524 32262 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:26:21.370640 32262 net.cpp:100] Creating Layer pre_bn\nI0821 08:26:21.370656 32262 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:26:21.370673 32262 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:26:21.370695 32268 blocking_queue.cpp:50] Waiting for data\nI0821 08:26:21.371034 32262 net.cpp:150] Setting up pre_bn\nI0821 08:26:21.371057 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.371068 32262 net.cpp:165] Memory required for data: 17921500\nI0821 08:26:21.371098 32262 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:26:21.371176 32262 net.cpp:100] Creating Layer pre_scale\nI0821 08:26:21.371192 32262 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:26:21.371213 32262 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:26:21.371436 32262 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:26:21.373611 32262 net.cpp:150] Setting up pre_scale\nI0821 08:26:21.373633 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.373644 32262 net.cpp:165] Memory required for data: 26113500\nI0821 08:26:21.373663 32262 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:26:21.373733 32262 net.cpp:100] Creating Layer pre_relu\nI0821 08:26:21.373750 32262 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:26:21.373765 32262 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:26:21.373785 32262 net.cpp:150] Setting up pre_relu\nI0821 08:26:21.373800 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.373809 32262 net.cpp:165] Memory required for data: 34305500\nI0821 08:26:21.373821 32262 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:26:21.373839 32262 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:26:21.373852 32262 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:26:21.373865 32262 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:26:21.373889 32262 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:26:21.373970 32262 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:26:21.373988 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.374001 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.374011 32262 net.cpp:165] Memory required for data: 50689500\nI0821 08:26:21.374022 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:26:21.374042 32262 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:26:21.374053 32262 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:26:21.374078 32262 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:26:21.374431 32262 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:26:21.374454 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.374464 32262 net.cpp:165] Memory required for data: 58881500\nI0821 08:26:21.374486 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:26:21.374511 32262 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:26:21.374524 32262 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:26:21.374541 32262 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:26:21.374819 32262 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:26:21.374840 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.374851 32262 net.cpp:165] Memory required for data: 67073500\nI0821 08:26:21.374874 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:26:21.374891 32262 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:26:21.374902 32262 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:26:21.374917 32262 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.375002 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:26:21.375181 32262 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:26:21.375201 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.375211 32262 net.cpp:165] Memory required for data: 75265500\nI0821 08:26:21.375228 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:26:21.375257 32262 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:26:21.375269 32262 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:26:21.375283 32262 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.375303 32262 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:26:21.375318 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.375327 32262 net.cpp:165] Memory required for data: 83457500\nI0821 08:26:21.375336 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:26:21.375365 32262 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:26:21.375376 32262 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:26:21.375397 32262 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:26:21.375733 32262 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:26:21.375752 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.375762 32262 net.cpp:165] Memory required for data: 91649500\nI0821 08:26:21.375780 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:26:21.375797 32262 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:26:21.375808 32262 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:26:21.375829 32262 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:26:21.376107 32262 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:26:21.376127 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.376137 32262 net.cpp:165] Memory required for data: 99841500\nI0821 08:26:21.376169 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:26:21.376186 32262 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:26:21.376199 32262 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:26:21.376214 32262 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:26:21.376297 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:26:21.376468 32262 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:26:21.376487 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.376497 32262 net.cpp:165] Memory required for data: 108033500\nI0821 08:26:21.376516 32262 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:26:21.376591 32262 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:26:21.376607 32262 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:26:21.376619 32262 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:26:21.376639 32262 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:26:21.376736 32262 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:26:21.376756 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.376766 32262 net.cpp:165] Memory required for data: 116225500\nI0821 08:26:21.376777 32262 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:26:21.376796 32262 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:26:21.376808 32262 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:26:21.376823 32262 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:26:21.376842 32262 net.cpp:150] Setting up L1_b1_relu\nI0821 08:26:21.376857 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.376866 32262 net.cpp:165] Memory required for data: 124417500\nI0821 08:26:21.376876 32262 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:21.376893 32262 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:21.376904 32262 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:26:21.376919 32262 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:26:21.376938 32262 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:26:21.377014 32262 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:21.377032 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.377045 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.377064 32262 net.cpp:165] Memory required for data: 140801500\nI0821 08:26:21.377075 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:26:21.377099 32262 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:26:21.377121 32262 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:26:21.377140 32262 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:26:21.377483 32262 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:26:21.377503 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.377513 32262 net.cpp:165] Memory required for data: 148993500\nI0821 08:26:21.377530 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:26:21.377554 32262 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:26:21.377566 32262 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:26:21.377586 32262 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:26:21.377856 32262 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:26:21.377876 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.377887 32262 net.cpp:165] Memory required for data: 157185500\nI0821 08:26:21.377909 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:26:21.377926 32262 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:26:21.377938 32262 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:26:21.377954 32262 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.378041 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:26:21.378221 32262 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:26:21.378240 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.378249 32262 net.cpp:165] Memory required for data: 165377500\nI0821 08:26:21.378268 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:26:21.378289 32262 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:26:21.378300 32262 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:26:21.378315 32262 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.378334 32262 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:26:21.378348 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.378358 32262 net.cpp:165] Memory required for data: 173569500\nI0821 08:26:21.378368 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:26:21.378392 32262 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:26:21.378406 32262 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:26:21.378427 32262 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:26:21.378778 32262 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:26:21.378798 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.378806 32262 net.cpp:165] Memory required for data: 181761500\nI0821 08:26:21.378824 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:26:21.378845 32262 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:26:21.378857 32262 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:26:21.378873 32262 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:26:21.379158 32262 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:26:21.379178 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.379186 32262 net.cpp:165] Memory required for data: 189953500\nI0821 08:26:21.379215 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:26:21.379242 32262 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:26:21.379256 32262 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:26:21.379271 32262 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:26:21.379366 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:26:21.379537 32262 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:26:21.379556 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.379565 32262 net.cpp:165] Memory required for data: 198145500\nI0821 08:26:21.379583 32262 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:26:21.379609 32262 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:26:21.379621 32262 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:26:21.379634 32262 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:26:21.379654 32262 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:26:21.379709 32262 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:26:21.379726 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.379736 32262 net.cpp:165] Memory required for data: 206337500\nI0821 08:26:21.379747 32262 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:26:21.379762 32262 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:26:21.379772 32262 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:26:21.379786 32262 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:26:21.379806 32262 net.cpp:150] Setting up L1_b2_relu\nI0821 08:26:21.379818 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.379828 32262 net.cpp:165] Memory required for data: 214529500\nI0821 08:26:21.379838 32262 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:21.379856 32262 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:21.379868 32262 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:26:21.379883 32262 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:26:21.379901 32262 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:26:21.379979 32262 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:21.379997 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.380012 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.380022 32262 net.cpp:165] Memory required for data: 230913500\nI0821 08:26:21.380033 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:26:21.380051 32262 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:26:21.380064 32262 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:26:21.380085 32262 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:26:21.380440 32262 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:26:21.380460 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.380470 32262 net.cpp:165] Memory required for data: 239105500\nI0821 08:26:21.380487 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:26:21.380504 32262 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:26:21.380515 32262 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:26:21.380530 32262 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:26:21.380803 32262 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:26:21.380822 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.380832 32262 net.cpp:165] Memory required for data: 247297500\nI0821 08:26:21.380852 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:26:21.380874 32262 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:26:21.380887 32262 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:26:21.380903 32262 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.380995 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:26:21.381175 32262 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:26:21.381194 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.381203 32262 net.cpp:165] Memory required for data: 255489500\nI0821 08:26:21.381222 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:26:21.381237 32262 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:26:21.381248 32262 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:26:21.381271 32262 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.381291 32262 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:26:21.381314 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.381325 32262 net.cpp:165] Memory required for data: 263681500\nI0821 08:26:21.381335 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:26:21.381359 32262 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:26:21.381372 32262 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:26:21.381388 32262 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:26:21.381736 32262 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:26:21.381754 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.381763 32262 net.cpp:165] Memory required for data: 271873500\nI0821 08:26:21.381781 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:26:21.381808 32262 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:26:21.381819 32262 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:26:21.381840 32262 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:26:21.382122 32262 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:26:21.382141 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.382150 32262 net.cpp:165] Memory required for data: 280065500\nI0821 08:26:21.382172 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:26:21.382194 32262 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:26:21.382205 32262 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:26:21.382221 32262 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:26:21.382305 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:26:21.382477 32262 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:26:21.382496 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.382505 32262 net.cpp:165] Memory required for data: 288257500\nI0821 08:26:21.382524 32262 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:26:21.382540 32262 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:26:21.382551 32262 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:26:21.382570 32262 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:26:21.382587 32262 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:26:21.382644 32262 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:26:21.382663 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.382673 32262 net.cpp:165] Memory required for data: 296449500\nI0821 08:26:21.382683 32262 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:26:21.382696 32262 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:26:21.382709 32262 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:26:21.382727 32262 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:26:21.382746 32262 net.cpp:150] Setting up L1_b3_relu\nI0821 08:26:21.382761 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.382771 32262 net.cpp:165] Memory required for data: 304641500\nI0821 08:26:21.382779 32262 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:21.382794 32262 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:21.382805 32262 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:26:21.382819 32262 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:26:21.382838 32262 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:26:21.382915 32262 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:21.382928 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.382935 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.382939 32262 net.cpp:165] Memory required for data: 321025500\nI0821 08:26:21.382946 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:26:21.382956 32262 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:26:21.382962 32262 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:26:21.382982 32262 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:26:21.383299 32262 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:26:21.383317 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.383327 32262 net.cpp:165] Memory required for data: 329217500\nI0821 08:26:21.383343 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:26:21.383360 32262 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:26:21.383371 32262 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:26:21.383396 32262 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:26:21.383672 32262 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:26:21.383690 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.383700 32262 net.cpp:165] Memory required for data: 337409500\nI0821 08:26:21.383721 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:26:21.383741 32262 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:26:21.383754 32262 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:26:21.383769 32262 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.383854 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:26:21.384029 32262 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:26:21.384048 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.384058 32262 net.cpp:165] Memory required for data: 345601500\nI0821 08:26:21.384076 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:26:21.384095 32262 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:26:21.384114 32262 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:26:21.384131 32262 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.384150 32262 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:26:21.384165 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.384174 32262 net.cpp:165] Memory required for data: 353793500\nI0821 08:26:21.384184 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:26:21.384209 32262 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:26:21.384222 32262 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:26:21.384243 32262 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:26:21.384593 32262 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:26:21.384613 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.384623 32262 net.cpp:165] Memory required for data: 361985500\nI0821 08:26:21.384640 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:26:21.384656 32262 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:26:21.384667 32262 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:26:21.384683 32262 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:26:21.384961 32262 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:26:21.384981 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.384990 32262 net.cpp:165] Memory required for data: 370177500\nI0821 08:26:21.385016 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:26:21.385035 32262 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:26:21.385046 32262 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:26:21.385066 32262 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:26:21.385154 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:26:21.385334 32262 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:26:21.385352 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.385361 32262 net.cpp:165] Memory required for data: 378369500\nI0821 08:26:21.385380 32262 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:26:21.385401 32262 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:26:21.385412 32262 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:26:21.385426 32262 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:26:21.385442 32262 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:26:21.385509 32262 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:26:21.385526 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.385537 32262 net.cpp:165] Memory required for data: 386561500\nI0821 08:26:21.385547 32262 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:26:21.385562 32262 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:26:21.385573 32262 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:26:21.385592 32262 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:26:21.385612 32262 net.cpp:150] Setting up L1_b4_relu\nI0821 08:26:21.385627 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.385637 32262 net.cpp:165] Memory required for data: 394753500\nI0821 08:26:21.385646 32262 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:21.385659 32262 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:21.385670 32262 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:26:21.385685 32262 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:26:21.385704 32262 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:26:21.385784 32262 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:21.385803 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.385817 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.385825 32262 net.cpp:165] Memory required for data: 411137500\nI0821 08:26:21.385836 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:26:21.385860 32262 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:26:21.385874 32262 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:26:21.385892 32262 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:26:21.386219 32262 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:26:21.386237 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.386246 32262 net.cpp:165] Memory required for data: 419329500\nI0821 08:26:21.386282 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:26:21.386304 32262 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:26:21.386317 32262 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:26:21.386334 32262 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:26:21.386603 32262 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:26:21.386623 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.386632 32262 net.cpp:165] Memory required for data: 427521500\nI0821 08:26:21.386653 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:26:21.386674 32262 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:26:21.386687 32262 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:26:21.386703 32262 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.386787 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:26:21.386965 32262 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:26:21.386983 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.386993 32262 net.cpp:165] Memory required for data: 435713500\nI0821 08:26:21.387012 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:26:21.387032 32262 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:26:21.387043 32262 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:26:21.387058 32262 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.387076 32262 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:26:21.387089 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.387099 32262 net.cpp:165] Memory required for data: 443905500\nI0821 08:26:21.387118 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:26:21.387146 32262 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:26:21.387167 32262 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:26:21.387189 32262 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:26:21.387554 32262 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:26:21.387574 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.387583 32262 net.cpp:165] Memory required for data: 452097500\nI0821 08:26:21.387601 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:26:21.387619 32262 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:26:21.387630 32262 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:26:21.387646 32262 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:26:21.387922 32262 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:26:21.387941 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.387950 32262 net.cpp:165] Memory required for data: 460289500\nI0821 08:26:21.387971 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:26:21.387991 32262 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:26:21.388003 32262 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:26:21.388018 32262 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:26:21.388113 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:26:21.388288 32262 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:26:21.388306 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.388316 32262 net.cpp:165] Memory required for data: 468481500\nI0821 08:26:21.388334 32262 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:26:21.388352 32262 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:26:21.388363 32262 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:26:21.388376 32262 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:26:21.388397 32262 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:26:21.388455 32262 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:26:21.388473 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.388484 32262 net.cpp:165] Memory required for data: 476673500\nI0821 08:26:21.388494 32262 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:26:21.388509 32262 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:26:21.388520 32262 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:26:21.388538 32262 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:26:21.388557 32262 net.cpp:150] Setting up L1_b5_relu\nI0821 08:26:21.388572 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.388581 32262 net.cpp:165] Memory required for data: 484865500\nI0821 08:26:21.388592 32262 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:21.388605 32262 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:21.388615 32262 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:26:21.388629 32262 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:26:21.388649 32262 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:26:21.388730 32262 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:21.388746 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.388761 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.388770 32262 net.cpp:165] Memory required for data: 501249500\nI0821 08:26:21.388780 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:26:21.388800 32262 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:26:21.388813 32262 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:26:21.388834 32262 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:26:21.389200 32262 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:26:21.389220 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.389230 32262 net.cpp:165] Memory required for data: 509441500\nI0821 08:26:21.389256 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:26:21.389273 32262 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:26:21.389286 32262 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:26:21.389305 32262 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:26:21.389606 32262 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:26:21.389627 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.389636 32262 net.cpp:165] Memory required for data: 517633500\nI0821 08:26:21.389657 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:26:21.389678 32262 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:26:21.389690 32262 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:26:21.389706 32262 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.389791 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:26:21.389972 32262 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:26:21.389991 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.390002 32262 net.cpp:165] Memory required for data: 525825500\nI0821 08:26:21.390019 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:26:21.390035 32262 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:26:21.390046 32262 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:26:21.390064 32262 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.390085 32262 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:26:21.390100 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.390117 32262 net.cpp:165] Memory required for data: 534017500\nI0821 08:26:21.390127 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:26:21.390152 32262 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:26:21.390164 32262 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:26:21.390185 32262 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:26:21.390539 32262 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:26:21.390559 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.390568 32262 net.cpp:165] Memory required for data: 542209500\nI0821 08:26:21.390586 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:26:21.390604 32262 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:26:21.390614 32262 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:26:21.390630 32262 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:26:21.390907 32262 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:26:21.390926 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.390935 32262 net.cpp:165] Memory required for data: 550401500\nI0821 08:26:21.390957 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:26:21.390977 32262 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:26:21.390990 32262 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:26:21.391005 32262 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:26:21.391093 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:26:21.391278 32262 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:26:21.391296 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.391305 32262 net.cpp:165] Memory required for data: 558593500\nI0821 08:26:21.391324 32262 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:26:21.391352 32262 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:26:21.391364 32262 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:26:21.391378 32262 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:26:21.391399 32262 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:26:21.391453 32262 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:26:21.391471 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.391481 32262 net.cpp:165] Memory required for data: 566785500\nI0821 08:26:21.391492 32262 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:26:21.391521 32262 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:26:21.391533 32262 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:26:21.391547 32262 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:26:21.391566 32262 net.cpp:150] Setting up L1_b6_relu\nI0821 08:26:21.391582 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.391592 32262 net.cpp:165] Memory required for data: 574977500\nI0821 08:26:21.391600 32262 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:21.391613 32262 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:21.391625 32262 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:26:21.391640 32262 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:26:21.391659 32262 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:26:21.391738 32262 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:21.391757 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.391769 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.391779 32262 net.cpp:165] Memory required for data: 591361500\nI0821 08:26:21.391789 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:26:21.391813 32262 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:26:21.391826 32262 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:26:21.391845 32262 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:26:21.392220 32262 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:26:21.392240 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.392248 32262 net.cpp:165] Memory required for data: 599553500\nI0821 08:26:21.392266 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:26:21.392289 32262 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:26:21.392302 32262 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:26:21.392319 32262 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:26:21.392593 32262 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:26:21.392612 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.392622 32262 net.cpp:165] Memory required for data: 607745500\nI0821 08:26:21.392643 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:26:21.392660 32262 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:26:21.392671 32262 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:26:21.392686 32262 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.392776 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:26:21.392951 32262 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:26:21.392969 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.392978 32262 net.cpp:165] Memory required for data: 615937500\nI0821 08:26:21.392997 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:26:21.393012 32262 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:26:21.393023 32262 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:26:21.393043 32262 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.393064 32262 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:26:21.393077 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.393087 32262 net.cpp:165] Memory required for data: 624129500\nI0821 08:26:21.393096 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:26:21.393124 32262 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:26:21.393138 32262 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:26:21.393159 32262 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:26:21.393520 32262 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:26:21.393540 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.393549 32262 net.cpp:165] Memory required for data: 632321500\nI0821 08:26:21.393577 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:26:21.393595 32262 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:26:21.393607 32262 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:26:21.393627 32262 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:26:21.393916 32262 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:26:21.393939 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.393949 32262 net.cpp:165] Memory required for data: 640513500\nI0821 08:26:21.393970 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:26:21.393986 32262 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:26:21.393997 32262 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:26:21.394012 32262 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:26:21.394098 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:26:21.394280 32262 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:26:21.394302 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.394312 32262 net.cpp:165] Memory required for data: 648705500\nI0821 08:26:21.394330 32262 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:26:21.394353 32262 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:26:21.394367 32262 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:26:21.394381 32262 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:26:21.394402 32262 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:26:21.394455 32262 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:26:21.394475 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.394485 32262 net.cpp:165] Memory required for data: 656897500\nI0821 08:26:21.394495 32262 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:26:21.394515 32262 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:26:21.394526 32262 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:26:21.394541 32262 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:26:21.394559 32262 net.cpp:150] Setting up L1_b7_relu\nI0821 08:26:21.394572 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.394582 32262 net.cpp:165] Memory required for data: 665089500\nI0821 08:26:21.394593 32262 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:21.394606 32262 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:21.394616 32262 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:26:21.394631 32262 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:26:21.394651 32262 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:26:21.394731 32262 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:21.394752 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.394767 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.394775 32262 net.cpp:165] Memory required for data: 681473500\nI0821 08:26:21.394785 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:26:21.394809 32262 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:26:21.394824 32262 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:26:21.394840 32262 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:26:21.395191 32262 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:26:21.395212 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.395220 32262 net.cpp:165] Memory required for data: 689665500\nI0821 08:26:21.395237 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:26:21.395261 32262 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:26:21.395273 32262 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:26:21.395289 32262 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:26:21.395596 32262 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:26:21.395619 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.395630 32262 net.cpp:165] Memory required for data: 697857500\nI0821 08:26:21.395651 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:26:21.395668 32262 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:26:21.395679 32262 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:26:21.395694 32262 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.395781 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:26:21.395959 32262 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:26:21.395979 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.395988 32262 net.cpp:165] Memory required for data: 706049500\nI0821 08:26:21.396006 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:26:21.396026 32262 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:26:21.396039 32262 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:26:21.396055 32262 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.396075 32262 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:26:21.396090 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.396098 32262 net.cpp:165] Memory required for data: 714241500\nI0821 08:26:21.396116 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:26:21.396137 32262 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:26:21.396150 32262 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:26:21.396172 32262 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:26:21.396530 32262 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:26:21.396550 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.396559 32262 net.cpp:165] Memory required for data: 722433500\nI0821 08:26:21.396577 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:26:21.396595 32262 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:26:21.396606 32262 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:26:21.396627 32262 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:26:21.396914 32262 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:26:21.396934 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.396944 32262 net.cpp:165] Memory required for data: 730625500\nI0821 08:26:21.396965 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:26:21.396986 32262 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:26:21.396997 32262 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:26:21.397013 32262 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:26:21.397099 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:26:21.397285 32262 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:26:21.397303 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.397313 32262 net.cpp:165] Memory required for data: 738817500\nI0821 08:26:21.397330 32262 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:26:21.397351 32262 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:26:21.397363 32262 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:26:21.397377 32262 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:26:21.397392 32262 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:26:21.397449 32262 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:26:21.397467 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.397477 32262 net.cpp:165] Memory required for data: 747009500\nI0821 08:26:21.397487 32262 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:26:21.397502 32262 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:26:21.397514 32262 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:26:21.397533 32262 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:26:21.397553 32262 net.cpp:150] Setting up L1_b8_relu\nI0821 08:26:21.397578 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.397588 32262 net.cpp:165] Memory required for data: 755201500\nI0821 08:26:21.397598 32262 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:21.397611 32262 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:21.397622 32262 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:26:21.397637 32262 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:26:21.397656 32262 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:26:21.397739 32262 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:21.397758 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.397771 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.397781 32262 net.cpp:165] Memory required for data: 771585500\nI0821 08:26:21.397791 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:26:21.397815 32262 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:26:21.397828 32262 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:26:21.397847 32262 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:26:21.398234 32262 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:26:21.398258 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.398269 32262 net.cpp:165] Memory required for data: 779777500\nI0821 08:26:21.398288 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:26:21.398305 32262 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:26:21.398321 32262 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:26:21.398339 32262 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:26:21.398615 32262 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:26:21.398634 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.398644 32262 net.cpp:165] Memory required for data: 787969500\nI0821 08:26:21.398665 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:26:21.398681 32262 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:26:21.398692 32262 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:26:21.398712 32262 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.398799 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:26:21.398980 32262 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:26:21.399003 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.399013 32262 net.cpp:165] Memory required for data: 796161500\nI0821 08:26:21.399031 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:26:21.399046 32262 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:26:21.399057 32262 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:26:21.399072 32262 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.399091 32262 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:26:21.399111 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.399122 32262 net.cpp:165] Memory required for data: 804353500\nI0821 08:26:21.399132 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:26:21.399157 32262 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:26:21.399169 32262 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:26:21.399191 32262 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:26:21.399561 32262 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:26:21.399581 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.399590 32262 net.cpp:165] Memory required for data: 812545500\nI0821 08:26:21.399608 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:26:21.399631 32262 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:26:21.399642 32262 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:26:21.399662 32262 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:26:21.399976 32262 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:26:21.399996 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.400007 32262 net.cpp:165] Memory required for data: 820737500\nI0821 08:26:21.400058 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:26:21.400075 32262 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:26:21.400089 32262 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:26:21.400115 32262 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:26:21.400202 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:26:21.400379 32262 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:26:21.400398 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.400408 32262 net.cpp:165] Memory required for data: 828929500\nI0821 08:26:21.400426 32262 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:26:21.400446 32262 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:26:21.400459 32262 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:26:21.400472 32262 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:26:21.400488 32262 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:26:21.400542 32262 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:26:21.400559 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.400569 32262 net.cpp:165] Memory required for data: 837121500\nI0821 08:26:21.400579 32262 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:26:21.400598 32262 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:26:21.400610 32262 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:26:21.400624 32262 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:26:21.400642 32262 net.cpp:150] Setting up L1_b9_relu\nI0821 08:26:21.400658 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.400667 32262 net.cpp:165] Memory required for data: 845313500\nI0821 08:26:21.400677 32262 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:21.400698 32262 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:21.400710 32262 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:26:21.400725 32262 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:26:21.400745 32262 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:26:21.400827 32262 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:21.400848 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.400862 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.400871 32262 net.cpp:165] Memory required for data: 861697500\nI0821 08:26:21.400882 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:26:21.400897 32262 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:26:21.400904 32262 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:26:21.400913 32262 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:26:21.401243 32262 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:26:21.401262 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.401270 32262 net.cpp:165] Memory required for data: 863745500\nI0821 08:26:21.401286 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:26:21.401309 32262 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:26:21.401321 32262 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:26:21.401341 32262 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:26:21.401626 32262 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:26:21.401645 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.401655 32262 net.cpp:165] Memory required for data: 865793500\nI0821 08:26:21.401677 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:26:21.401693 32262 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:26:21.401712 32262 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:26:21.401728 32262 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.401824 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:26:21.402003 32262 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:26:21.402021 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.402031 32262 net.cpp:165] Memory required for data: 867841500\nI0821 08:26:21.402050 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:26:21.402065 32262 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:26:21.402081 32262 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:26:21.402097 32262 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.402125 32262 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:26:21.402140 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.402149 32262 net.cpp:165] Memory required for data: 869889500\nI0821 08:26:21.402159 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:26:21.402184 32262 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:26:21.402196 32262 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:26:21.402212 32262 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:26:21.402577 32262 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:26:21.402596 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.402606 32262 net.cpp:165] Memory required for data: 871937500\nI0821 08:26:21.402623 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:26:21.402644 32262 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:26:21.402657 32262 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:26:21.402673 32262 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:26:21.402952 32262 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:26:21.402976 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.402986 32262 net.cpp:165] Memory required for data: 873985500\nI0821 08:26:21.403007 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:26:21.403023 32262 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:26:21.403034 32262 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:26:21.403050 32262 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:26:21.403142 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:26:21.403331 32262 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:26:21.403348 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.403358 32262 net.cpp:165] Memory required for data: 876033500\nI0821 08:26:21.403378 32262 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:26:21.403394 32262 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:26:21.403406 32262 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:26:21.403427 32262 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:26:21.403532 32262 net.cpp:150] Setting up L2_b1_pool\nI0821 08:26:21.403553 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.403561 32262 net.cpp:165] Memory required for data: 878081500\nI0821 08:26:21.403573 32262 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:26:21.403592 32262 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:26:21.403605 32262 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:26:21.403620 32262 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:26:21.403635 32262 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:26:21.403692 32262 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:26:21.403712 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.403722 32262 net.cpp:165] Memory required for data: 880129500\nI0821 08:26:21.403733 32262 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:26:21.403748 32262 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:26:21.403759 32262 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:26:21.403786 32262 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:26:21.403810 32262 net.cpp:150] Setting up L2_b1_relu\nI0821 08:26:21.403823 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.403832 32262 net.cpp:165] Memory required for data: 882177500\nI0821 08:26:21.403842 32262 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:26:21.403913 32262 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:26:21.403933 32262 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:26:21.406280 32262 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:26:21.406302 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.406312 32262 net.cpp:165] Memory required for data: 884225500\nI0821 08:26:21.406324 32262 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:26:21.406345 32262 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:26:21.406358 32262 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:26:21.406373 32262 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:26:21.406388 32262 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:26:21.406494 32262 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:26:21.406514 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.406523 32262 net.cpp:165] Memory required for data: 888321500\nI0821 08:26:21.406534 32262 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:21.406549 32262 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:21.406560 32262 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:26:21.406579 32262 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:26:21.406601 32262 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:26:21.406680 32262 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:21.406703 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.406716 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.406726 32262 net.cpp:165] Memory required for data: 896513500\nI0821 08:26:21.406738 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:26:21.406757 32262 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:26:21.406769 32262 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:26:21.406788 32262 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:26:21.408287 32262 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:26:21.408308 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.408318 32262 net.cpp:165] Memory required for data: 900609500\nI0821 08:26:21.408335 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:26:21.408356 32262 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:26:21.408370 32262 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:26:21.408390 32262 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:26:21.408681 32262 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:26:21.408700 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.408710 32262 net.cpp:165] Memory required for data: 904705500\nI0821 08:26:21.408732 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:26:21.408749 32262 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:26:21.408761 32262 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:26:21.408777 32262 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.408869 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:26:21.409051 32262 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:26:21.409071 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.409080 32262 net.cpp:165] Memory required for data: 908801500\nI0821 08:26:21.409106 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:26:21.409128 32262 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:26:21.409140 32262 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:26:21.409165 32262 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.409184 32262 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:26:21.409199 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.409209 32262 net.cpp:165] Memory required for data: 912897500\nI0821 08:26:21.409219 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:26:21.409245 32262 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:26:21.409258 32262 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:26:21.409276 32262 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:26:21.409788 32262 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:26:21.409808 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.409818 32262 net.cpp:165] Memory required for data: 916993500\nI0821 08:26:21.409837 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:26:21.409858 32262 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:26:21.409873 32262 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:26:21.409890 32262 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:26:21.410183 32262 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:26:21.410207 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.410217 32262 net.cpp:165] Memory required for data: 921089500\nI0821 08:26:21.410238 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:26:21.410254 32262 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:26:21.410266 32262 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:26:21.410282 32262 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:26:21.410370 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:26:21.410554 32262 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:26:21.410573 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.410583 32262 net.cpp:165] Memory required for data: 925185500\nI0821 08:26:21.410600 32262 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:26:21.410617 32262 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:26:21.410629 32262 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:26:21.410642 32262 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:26:21.410663 32262 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:26:21.410712 32262 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:26:21.410728 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.410738 32262 net.cpp:165] Memory required for data: 929281500\nI0821 08:26:21.410749 32262 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:26:21.410768 32262 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:26:21.410779 32262 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:26:21.410794 32262 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:26:21.410812 32262 net.cpp:150] Setting up L2_b2_relu\nI0821 08:26:21.410827 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.410836 32262 net.cpp:165] Memory required for data: 933377500\nI0821 08:26:21.410845 32262 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:21.410861 32262 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:21.410871 32262 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:26:21.410887 32262 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:26:21.410905 32262 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:26:21.410990 32262 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:21.411010 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.411022 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.411031 32262 net.cpp:165] Memory required for data: 941569500\nI0821 08:26:21.411042 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:26:21.411075 32262 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:26:21.411089 32262 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:26:21.411116 32262 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:26:21.411617 32262 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:26:21.411638 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.411648 32262 net.cpp:165] Memory required for data: 945665500\nI0821 08:26:21.411665 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:26:21.411686 32262 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:26:21.411698 32262 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:26:21.411720 32262 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:26:21.412000 32262 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:26:21.412019 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.412029 32262 net.cpp:165] Memory required for data: 949761500\nI0821 08:26:21.412050 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:26:21.412065 32262 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:26:21.412077 32262 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:26:21.412092 32262 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.412214 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:26:21.412395 32262 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:26:21.412415 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.412423 32262 net.cpp:165] Memory required for data: 953857500\nI0821 08:26:21.412442 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:26:21.412457 32262 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:26:21.412468 32262 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:26:21.412487 32262 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.412508 32262 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:26:21.412521 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.412531 32262 net.cpp:165] Memory required for data: 957953500\nI0821 08:26:21.412541 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:26:21.412565 32262 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:26:21.412580 32262 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:26:21.412597 32262 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:26:21.413122 32262 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:26:21.413142 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.413152 32262 net.cpp:165] Memory required for data: 962049500\nI0821 08:26:21.413169 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:26:21.413192 32262 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:26:21.413203 32262 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:26:21.413220 32262 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:26:21.413508 32262 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:26:21.413530 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.413542 32262 net.cpp:165] Memory required for data: 966145500\nI0821 08:26:21.413563 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:26:21.413579 32262 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:26:21.413590 32262 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:26:21.413606 32262 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:26:21.413693 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:26:21.413880 32262 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:26:21.413899 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.413908 32262 net.cpp:165] Memory required for data: 970241500\nI0821 08:26:21.413926 32262 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:26:21.413944 32262 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:26:21.413954 32262 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:26:21.413976 32262 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:26:21.414000 32262 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:26:21.414049 32262 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:26:21.414068 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.414077 32262 net.cpp:165] Memory required for data: 974337500\nI0821 08:26:21.414088 32262 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:26:21.414129 32262 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:26:21.414144 32262 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:26:21.414160 32262 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:26:21.414180 32262 net.cpp:150] Setting up L2_b3_relu\nI0821 08:26:21.414194 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.414203 32262 net.cpp:165] Memory required for data: 978433500\nI0821 08:26:21.414214 32262 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:21.414229 32262 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:21.414242 32262 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:26:21.414255 32262 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:26:21.414273 32262 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:26:21.414361 32262 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:21.414381 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.414396 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.414404 32262 net.cpp:165] Memory required for data: 986625500\nI0821 08:26:21.414415 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:26:21.414435 32262 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:26:21.414448 32262 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:26:21.414470 32262 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:26:21.414994 32262 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:26:21.415014 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.415024 32262 net.cpp:165] Memory required for data: 990721500\nI0821 08:26:21.415042 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:26:21.415060 32262 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:26:21.415071 32262 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:26:21.415092 32262 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:26:21.415383 32262 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:26:21.415402 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.415412 32262 net.cpp:165] Memory required for data: 994817500\nI0821 08:26:21.415432 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:26:21.415454 32262 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:26:21.415467 32262 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:26:21.415482 32262 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.415573 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:26:21.415756 32262 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:26:21.415776 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.415784 32262 net.cpp:165] Memory required for data: 998913500\nI0821 08:26:21.415802 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:26:21.415822 32262 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:26:21.415834 32262 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:26:21.415849 32262 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.415868 32262 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:26:21.415884 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.415892 32262 net.cpp:165] Memory required for data: 1003009500\nI0821 08:26:21.415904 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:26:21.415943 32262 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:26:21.415952 32262 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:26:21.415964 32262 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:26:21.416503 32262 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:26:21.416522 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.416532 32262 net.cpp:165] Memory required for data: 1007105500\nI0821 08:26:21.416549 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:26:21.416566 32262 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:26:21.416579 32262 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:26:21.416599 32262 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:26:21.416890 32262 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:26:21.416909 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.416918 32262 net.cpp:165] Memory required for data: 1011201500\nI0821 08:26:21.416939 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:26:21.416960 32262 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:26:21.416972 32262 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:26:21.416987 32262 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:26:21.417076 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:26:21.417268 32262 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:26:21.417289 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.417299 32262 net.cpp:165] Memory required for data: 1015297500\nI0821 08:26:21.417315 32262 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:26:21.417333 32262 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:26:21.417345 32262 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:26:21.417363 32262 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:26:21.417382 32262 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:26:21.417428 32262 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:26:21.417449 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.417460 32262 net.cpp:165] Memory required for data: 1019393500\nI0821 08:26:21.417470 32262 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:26:21.417484 32262 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:26:21.417496 32262 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:26:21.417510 32262 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:26:21.417528 32262 net.cpp:150] Setting up L2_b4_relu\nI0821 08:26:21.417543 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.417552 32262 net.cpp:165] Memory required for data: 1023489500\nI0821 08:26:21.417562 32262 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:21.417580 32262 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:21.417593 32262 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:26:21.417606 32262 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:26:21.417626 32262 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:26:21.417706 32262 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:21.417723 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.417737 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.417745 32262 net.cpp:165] Memory required for data: 1031681500\nI0821 08:26:21.417755 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:26:21.417775 32262 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:26:21.417788 32262 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:26:21.417809 32262 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:26:21.418354 32262 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:26:21.418380 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.418390 32262 net.cpp:165] Memory required for data: 1035777500\nI0821 08:26:21.418409 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:26:21.418427 32262 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:26:21.418438 32262 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:26:21.418459 32262 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:26:21.418746 32262 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:26:21.418766 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.418774 32262 net.cpp:165] Memory required for data: 1039873500\nI0821 08:26:21.418797 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:26:21.418817 32262 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:26:21.418828 32262 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:26:21.418844 32262 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.418934 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:26:21.419127 32262 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:26:21.419147 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.419157 32262 net.cpp:165] Memory required for data: 1043969500\nI0821 08:26:21.419174 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:26:21.419194 32262 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:26:21.419206 32262 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:26:21.419221 32262 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.419241 32262 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:26:21.419255 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.419265 32262 net.cpp:165] Memory required for data: 1048065500\nI0821 08:26:21.419275 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:26:21.419301 32262 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:26:21.419312 32262 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:26:21.419335 32262 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:26:21.419842 32262 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:26:21.419862 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.419870 32262 net.cpp:165] Memory required for data: 1052161500\nI0821 08:26:21.419888 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:26:21.419905 32262 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:26:21.419916 32262 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:26:21.419932 32262 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:26:21.420228 32262 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:26:21.420248 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.420258 32262 net.cpp:165] Memory required for data: 1056257500\nI0821 08:26:21.420279 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:26:21.420295 32262 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:26:21.420306 32262 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:26:21.420326 32262 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:26:21.420414 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:26:21.420603 32262 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:26:21.420621 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.420630 32262 net.cpp:165] Memory required for data: 1060353500\nI0821 08:26:21.420650 32262 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:26:21.420666 32262 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:26:21.420677 32262 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:26:21.420691 32262 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:26:21.420711 32262 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:26:21.420759 32262 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:26:21.420783 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.420800 32262 net.cpp:165] Memory required for data: 1064449500\nI0821 08:26:21.420811 32262 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:26:21.420831 32262 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:26:21.420842 32262 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:26:21.420857 32262 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:26:21.420876 32262 net.cpp:150] Setting up L2_b5_relu\nI0821 08:26:21.420892 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.420902 32262 net.cpp:165] Memory required for data: 1068545500\nI0821 08:26:21.420908 32262 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:21.420920 32262 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:21.420927 32262 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:26:21.420934 32262 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:26:21.420945 32262 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:26:21.420999 32262 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:21.421015 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.421027 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.421036 32262 net.cpp:165] Memory required for data: 1076737500\nI0821 08:26:21.421046 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:26:21.421068 32262 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:26:21.421080 32262 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:26:21.421109 32262 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:26:21.421620 32262 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:26:21.421639 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.421649 32262 net.cpp:165] Memory required for data: 1080833500\nI0821 08:26:21.421666 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:26:21.421684 32262 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:26:21.421695 32262 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:26:21.421715 32262 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:26:21.422010 32262 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:26:21.422029 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.422039 32262 net.cpp:165] Memory required for data: 1084929500\nI0821 08:26:21.422060 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:26:21.422081 32262 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:26:21.422093 32262 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:26:21.422116 32262 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.422204 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:26:21.422389 32262 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:26:21.422407 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.422417 32262 net.cpp:165] Memory required for data: 1089025500\nI0821 08:26:21.422435 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:26:21.422451 32262 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:26:21.422462 32262 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:26:21.422482 32262 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.422502 32262 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:26:21.422516 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.422526 32262 net.cpp:165] Memory required for data: 1093121500\nI0821 08:26:21.422538 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:26:21.422564 32262 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:26:21.422577 32262 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:26:21.422600 32262 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:26:21.423122 32262 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:26:21.423149 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.423158 32262 net.cpp:165] Memory required for data: 1097217500\nI0821 08:26:21.423177 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:26:21.423193 32262 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:26:21.423205 32262 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:26:21.423223 32262 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:26:21.423511 32262 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:26:21.423529 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.423538 32262 net.cpp:165] Memory required for data: 1101313500\nI0821 08:26:21.423560 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:26:21.423576 32262 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:26:21.423588 32262 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:26:21.423609 32262 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:26:21.423696 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:26:21.423879 32262 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:26:21.423904 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.423913 32262 net.cpp:165] Memory required for data: 1105409500\nI0821 08:26:21.423931 32262 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:26:21.423949 32262 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:26:21.423959 32262 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:26:21.423972 32262 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:26:21.423992 32262 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:26:21.424039 32262 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:26:21.424057 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.424067 32262 net.cpp:165] Memory required for data: 1109505500\nI0821 08:26:21.424077 32262 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:26:21.424095 32262 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:26:21.424115 32262 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:26:21.424132 32262 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:26:21.424150 32262 net.cpp:150] Setting up L2_b6_relu\nI0821 08:26:21.424166 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.424175 32262 net.cpp:165] Memory required for data: 1113601500\nI0821 08:26:21.424185 32262 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:21.424207 32262 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:21.424219 32262 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:26:21.424235 32262 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:26:21.424255 32262 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:26:21.424336 32262 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:21.424362 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.424377 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.424387 32262 net.cpp:165] Memory required for data: 1121793500\nI0821 08:26:21.424397 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:26:21.424417 32262 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:26:21.424430 32262 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:26:21.424448 32262 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:26:21.424962 32262 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:26:21.424983 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.424993 32262 net.cpp:165] Memory required for data: 1125889500\nI0821 08:26:21.425009 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:26:21.425030 32262 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:26:21.425050 32262 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:26:21.425068 32262 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:26:21.425375 32262 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:26:21.425395 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.425405 32262 net.cpp:165] Memory required for data: 1129985500\nI0821 08:26:21.425426 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:26:21.425443 32262 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:26:21.425454 32262 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:26:21.425474 32262 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.425565 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:26:21.425752 32262 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:26:21.425771 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.425781 32262 net.cpp:165] Memory required for data: 1134081500\nI0821 08:26:21.425799 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:26:21.425814 32262 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:26:21.425825 32262 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:26:21.425848 32262 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.425868 32262 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:26:21.425881 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.425891 32262 net.cpp:165] Memory required for data: 1138177500\nI0821 08:26:21.425901 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:26:21.425926 32262 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:26:21.425940 32262 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:26:21.425957 32262 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:26:21.426486 32262 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:26:21.426506 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.426514 32262 net.cpp:165] Memory required for data: 1142273500\nI0821 08:26:21.426532 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:26:21.426554 32262 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:26:21.426566 32262 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:26:21.426584 32262 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:26:21.426875 32262 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:26:21.426894 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.426904 32262 net.cpp:165] Memory required for data: 1146369500\nI0821 08:26:21.426925 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:26:21.426941 32262 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:26:21.426954 32262 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:26:21.426975 32262 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:26:21.427064 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:26:21.427256 32262 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:26:21.427275 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.427284 32262 net.cpp:165] Memory required for data: 1150465500\nI0821 08:26:21.427302 32262 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:26:21.427320 32262 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:26:21.427330 32262 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:26:21.427345 32262 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:26:21.427364 32262 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:26:21.427412 32262 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:26:21.427430 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.427440 32262 net.cpp:165] Memory required for data: 1154561500\nI0821 08:26:21.427451 32262 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:26:21.427469 32262 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:26:21.427482 32262 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:26:21.427506 32262 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:26:21.427525 32262 net.cpp:150] Setting up L2_b7_relu\nI0821 08:26:21.427541 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.427551 32262 net.cpp:165] Memory required for data: 1158657500\nI0821 08:26:21.427559 32262 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:21.427572 32262 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:21.427583 32262 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:26:21.427603 32262 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:26:21.427625 32262 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:26:21.427706 32262 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:21.427731 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.427744 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.427753 32262 net.cpp:165] Memory required for data: 1166849500\nI0821 08:26:21.427764 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:26:21.427784 32262 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:26:21.427798 32262 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:26:21.427815 32262 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:26:21.428349 32262 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:26:21.428369 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.428378 32262 net.cpp:165] Memory required for data: 1170945500\nI0821 08:26:21.428395 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:26:21.428416 32262 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:26:21.428429 32262 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:26:21.428445 32262 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:26:21.428737 32262 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:26:21.428755 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.428766 32262 net.cpp:165] Memory required for data: 1175041500\nI0821 08:26:21.428786 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:26:21.428802 32262 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:26:21.428813 32262 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:26:21.428833 32262 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.428925 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:26:21.429131 32262 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:26:21.429149 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.429158 32262 net.cpp:165] Memory required for data: 1179137500\nI0821 08:26:21.429177 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:26:21.429193 32262 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:26:21.429203 32262 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:26:21.429224 32262 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.429244 32262 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:26:21.429257 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.429267 32262 net.cpp:165] Memory required for data: 1183233500\nI0821 08:26:21.429277 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:26:21.429302 32262 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:26:21.429316 32262 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:26:21.429333 32262 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:26:21.429852 32262 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:26:21.429872 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.429883 32262 net.cpp:165] Memory required for data: 1187329500\nI0821 08:26:21.429900 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:26:21.429921 32262 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:26:21.429944 32262 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:26:21.429963 32262 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:26:21.430269 32262 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:26:21.430287 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.430297 32262 net.cpp:165] Memory required for data: 1191425500\nI0821 08:26:21.430320 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:26:21.430335 32262 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:26:21.430346 32262 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:26:21.430362 32262 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:26:21.430455 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:26:21.430640 32262 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:26:21.430662 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.430673 32262 net.cpp:165] Memory required for data: 1195521500\nI0821 08:26:21.430691 32262 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:26:21.430707 32262 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:26:21.430719 32262 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:26:21.430732 32262 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:26:21.430748 32262 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:26:21.430800 32262 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:26:21.430819 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.430829 32262 net.cpp:165] Memory required for data: 1199617500\nI0821 08:26:21.430838 32262 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:26:21.430852 32262 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:26:21.430865 32262 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:26:21.430884 32262 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:26:21.430903 32262 net.cpp:150] Setting up L2_b8_relu\nI0821 08:26:21.430917 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.430927 32262 net.cpp:165] Memory required for data: 1203713500\nI0821 08:26:21.430936 32262 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:21.430951 32262 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:21.430963 32262 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:26:21.430982 32262 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:26:21.431022 32262 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:26:21.431115 32262 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:21.431135 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.431149 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.431159 32262 net.cpp:165] Memory required for data: 1211905500\nI0821 08:26:21.431169 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:26:21.431198 32262 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:26:21.431212 32262 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:26:21.431238 32262 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:26:21.431757 32262 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:26:21.431777 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.431787 32262 net.cpp:165] Memory required for data: 1216001500\nI0821 08:26:21.431804 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:26:21.431821 32262 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:26:21.431838 32262 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:26:21.431855 32262 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:26:21.432155 32262 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:26:21.432174 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.432194 32262 net.cpp:165] Memory required for data: 1220097500\nI0821 08:26:21.432217 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:26:21.432234 32262 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:26:21.432245 32262 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:26:21.432260 32262 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.432358 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:26:21.432543 32262 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:26:21.432565 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.432575 32262 net.cpp:165] Memory required for data: 1224193500\nI0821 08:26:21.432593 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:26:21.432608 32262 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:26:21.432621 32262 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:26:21.432633 32262 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.432652 32262 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:26:21.432667 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.432677 32262 net.cpp:165] Memory required for data: 1228289500\nI0821 08:26:21.432687 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:26:21.432713 32262 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:26:21.432726 32262 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:26:21.432749 32262 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:26:21.433271 32262 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:26:21.433291 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.433300 32262 net.cpp:165] Memory required for data: 1232385500\nI0821 08:26:21.433318 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:26:21.433339 32262 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:26:21.433352 32262 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:26:21.433373 32262 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:26:21.433667 32262 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:26:21.433686 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.433696 32262 net.cpp:165] Memory required for data: 1236481500\nI0821 08:26:21.433764 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:26:21.433787 32262 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:26:21.433800 32262 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:26:21.433816 32262 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:26:21.433918 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:26:21.434110 32262 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:26:21.434130 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.434139 32262 net.cpp:165] Memory required for data: 1240577500\nI0821 08:26:21.434159 32262 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:26:21.434180 32262 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:26:21.434192 32262 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:26:21.434206 32262 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:26:21.434222 32262 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:26:21.434274 32262 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:26:21.434293 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.434303 32262 net.cpp:165] Memory required for data: 1244673500\nI0821 08:26:21.434314 32262 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:26:21.434327 32262 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:26:21.434340 32262 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:26:21.434357 32262 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:26:21.434377 32262 net.cpp:150] Setting up L2_b9_relu\nI0821 08:26:21.434392 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.434401 32262 net.cpp:165] Memory required for data: 1248769500\nI0821 08:26:21.434420 32262 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:21.434435 32262 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:21.434447 32262 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:26:21.434466 32262 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:26:21.434487 32262 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:26:21.434573 32262 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:21.434592 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.434605 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.434614 32262 net.cpp:165] Memory required for data: 1256961500\nI0821 08:26:21.434625 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:26:21.434645 32262 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:26:21.434659 32262 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:26:21.434681 32262 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:26:21.435217 32262 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:26:21.435237 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.435247 32262 net.cpp:165] Memory required for data: 1257985500\nI0821 08:26:21.435266 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:26:21.435282 32262 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:26:21.435294 32262 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:26:21.435315 32262 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:26:21.435619 32262 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:26:21.435642 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.435652 32262 net.cpp:165] Memory required for data: 1259009500\nI0821 08:26:21.435673 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:26:21.435690 32262 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:26:21.435703 32262 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:26:21.435719 32262 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.435806 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:26:21.436002 32262 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:26:21.436022 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.436031 32262 net.cpp:165] Memory required for data: 1260033500\nI0821 08:26:21.436049 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:26:21.436069 32262 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:26:21.436081 32262 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:26:21.436095 32262 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.436121 32262 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:26:21.436137 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.436147 32262 net.cpp:165] Memory required for data: 1261057500\nI0821 08:26:21.436157 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:26:21.436182 32262 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:26:21.436195 32262 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:26:21.436213 32262 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:26:21.436738 32262 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:26:21.436758 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.436767 32262 net.cpp:165] Memory required for data: 1262081500\nI0821 08:26:21.436785 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:26:21.436806 32262 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:26:21.436820 32262 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:26:21.436841 32262 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:26:21.437146 32262 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:26:21.437166 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.437183 32262 net.cpp:165] Memory required for data: 1263105500\nI0821 08:26:21.437206 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:26:21.437222 32262 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:26:21.437234 32262 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:26:21.437254 32262 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:26:21.437350 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:26:21.437543 32262 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:26:21.437563 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.437572 32262 net.cpp:165] Memory required for data: 1264129500\nI0821 08:26:21.437592 32262 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:26:21.437608 32262 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:26:21.437619 32262 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:26:21.437639 32262 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:26:21.437710 32262 net.cpp:150] Setting up L3_b1_pool\nI0821 08:26:21.437727 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.437736 32262 net.cpp:165] Memory required for data: 1265153500\nI0821 08:26:21.437747 32262 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:26:21.437764 32262 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:26:21.437775 32262 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:26:21.437788 32262 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:26:21.437808 32262 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:26:21.437865 32262 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:26:21.437883 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.437894 32262 net.cpp:165] Memory required for data: 1266177500\nI0821 08:26:21.437904 32262 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:26:21.437918 32262 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:26:21.437930 32262 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:26:21.437945 32262 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:26:21.437963 32262 net.cpp:150] Setting up L3_b1_relu\nI0821 08:26:21.437978 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.437988 32262 net.cpp:165] Memory required for data: 1267201500\nI0821 08:26:21.437997 32262 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:26:21.438019 32262 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:26:21.438035 32262 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:26:21.439308 32262 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:26:21.439330 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.439340 32262 net.cpp:165] Memory required for data: 1268225500\nI0821 08:26:21.439352 32262 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:26:21.439369 32262 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:26:21.439381 32262 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:26:21.439394 32262 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:26:21.439414 32262 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:26:21.439476 32262 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:26:21.439502 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.439512 32262 net.cpp:165] Memory required for data: 1270273500\nI0821 08:26:21.439522 32262 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:21.439535 32262 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:21.439548 32262 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:26:21.439563 32262 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:26:21.439587 32262 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:26:21.439676 32262 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:21.439694 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.439707 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.439726 32262 net.cpp:165] Memory required for data: 1274369500\nI0821 08:26:21.439738 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:26:21.439762 32262 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:26:21.439776 32262 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:26:21.439795 32262 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:26:21.441829 32262 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:26:21.441851 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.441861 32262 net.cpp:165] Memory required for data: 1276417500\nI0821 08:26:21.441879 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:26:21.441901 32262 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:26:21.441915 32262 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:26:21.441931 32262 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:26:21.442250 32262 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:26:21.442270 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.442279 32262 net.cpp:165] Memory required for data: 1278465500\nI0821 08:26:21.442301 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:26:21.442318 32262 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:26:21.442329 32262 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:26:21.442345 32262 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.442440 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:26:21.442629 32262 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:26:21.442652 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.442662 32262 net.cpp:165] Memory required for data: 1280513500\nI0821 08:26:21.442682 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:26:21.442698 32262 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:26:21.442708 32262 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:26:21.442723 32262 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.442744 32262 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:26:21.442757 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.442767 32262 net.cpp:165] Memory required for data: 1282561500\nI0821 08:26:21.442777 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:26:21.442801 32262 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:26:21.442816 32262 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:26:21.442833 32262 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:26:21.443907 32262 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:26:21.443927 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.443936 32262 net.cpp:165] Memory required for data: 1284609500\nI0821 08:26:21.443953 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:26:21.443975 32262 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:26:21.443989 32262 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:26:21.444006 32262 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:26:21.444308 32262 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:26:21.444329 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.444337 32262 net.cpp:165] Memory required for data: 1286657500\nI0821 08:26:21.444358 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:26:21.444375 32262 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:26:21.444386 32262 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:26:21.444407 32262 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:26:21.444496 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:26:21.444691 32262 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:26:21.444710 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.444720 32262 net.cpp:165] Memory required for data: 1288705500\nI0821 08:26:21.444738 32262 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:26:21.444768 32262 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:26:21.444782 32262 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:26:21.444797 32262 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:26:21.444813 32262 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:26:21.444875 32262 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:26:21.444897 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.444907 32262 net.cpp:165] Memory required for data: 1290753500\nI0821 08:26:21.444918 32262 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:26:21.444933 32262 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:26:21.444944 32262 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:26:21.444964 32262 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:26:21.444985 32262 net.cpp:150] Setting up L3_b2_relu\nI0821 08:26:21.444999 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.445009 32262 net.cpp:165] Memory required for data: 1292801500\nI0821 08:26:21.445020 32262 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:21.445034 32262 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:21.445044 32262 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:26:21.445060 32262 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:26:21.445078 32262 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:26:21.445173 32262 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:21.445190 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.445204 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.445212 32262 net.cpp:165] Memory required for data: 1296897500\nI0821 08:26:21.445224 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:26:21.445243 32262 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:26:21.445256 32262 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:26:21.445279 32262 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:26:21.446352 32262 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:26:21.446372 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.446382 32262 net.cpp:165] Memory required for data: 1298945500\nI0821 08:26:21.446399 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:26:21.446421 32262 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:26:21.446434 32262 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:26:21.446451 32262 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:26:21.446746 32262 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:26:21.446765 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.446775 32262 net.cpp:165] Memory required for data: 1300993500\nI0821 08:26:21.446796 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:26:21.446812 32262 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:26:21.446825 32262 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:26:21.446840 32262 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.446938 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:26:21.447134 32262 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:26:21.447154 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.447165 32262 net.cpp:165] Memory required for data: 1303041500\nI0821 08:26:21.447182 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:26:21.447197 32262 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:26:21.447209 32262 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:26:21.447223 32262 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.447242 32262 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:26:21.447257 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.447274 32262 net.cpp:165] Memory required for data: 1305089500\nI0821 08:26:21.447285 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:26:21.447309 32262 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:26:21.447324 32262 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:26:21.447346 32262 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:26:21.448417 32262 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:26:21.448437 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.448447 32262 net.cpp:165] Memory required for data: 1307137500\nI0821 08:26:21.448465 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:26:21.448489 32262 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:26:21.448503 32262 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:26:21.448519 32262 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:26:21.448887 32262 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:26:21.448907 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.448917 32262 net.cpp:165] Memory required for data: 1309185500\nI0821 08:26:21.448937 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:26:21.448958 32262 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:26:21.448971 32262 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:26:21.448987 32262 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:26:21.449084 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:26:21.449285 32262 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:26:21.449303 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.449313 32262 net.cpp:165] Memory required for data: 1311233500\nI0821 08:26:21.449331 32262 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:26:21.449353 32262 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:26:21.449367 32262 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:26:21.449379 32262 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:26:21.449396 32262 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:26:21.449457 32262 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:26:21.449476 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.449486 32262 net.cpp:165] Memory required for data: 1313281500\nI0821 08:26:21.449496 32262 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:26:21.449511 32262 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:26:21.449522 32262 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:26:21.449542 32262 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:26:21.449563 32262 net.cpp:150] Setting up L3_b3_relu\nI0821 08:26:21.449578 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.449586 32262 net.cpp:165] Memory required for data: 1315329500\nI0821 08:26:21.449596 32262 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:21.449611 32262 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:21.449621 32262 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:26:21.449637 32262 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:26:21.449658 32262 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:26:21.449744 32262 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:21.449764 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.449777 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.449786 32262 net.cpp:165] Memory required for data: 1319425500\nI0821 08:26:21.449797 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:26:21.449821 32262 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:26:21.449836 32262 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:26:21.449854 32262 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:26:21.450947 32262 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:26:21.450968 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.450976 32262 net.cpp:165] Memory required for data: 1321473500\nI0821 08:26:21.450995 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:26:21.451017 32262 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:26:21.451030 32262 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:26:21.451048 32262 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:26:21.451362 32262 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:26:21.451381 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.451390 32262 net.cpp:165] Memory required for data: 1323521500\nI0821 08:26:21.451411 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:26:21.451428 32262 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:26:21.451441 32262 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:26:21.451457 32262 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.451551 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:26:21.451745 32262 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:26:21.451764 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.451773 32262 net.cpp:165] Memory required for data: 1325569500\nI0821 08:26:21.451792 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:26:21.451808 32262 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:26:21.451819 32262 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:26:21.451839 32262 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.451860 32262 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:26:21.451874 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.451884 32262 net.cpp:165] Memory required for data: 1327617500\nI0821 08:26:21.451894 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:26:21.451918 32262 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:26:21.451932 32262 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:26:21.451951 32262 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:26:21.453030 32262 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:26:21.453050 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.453060 32262 net.cpp:165] Memory required for data: 1329665500\nI0821 08:26:21.453078 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:26:21.453099 32262 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:26:21.453119 32262 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:26:21.453136 32262 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:26:21.453444 32262 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:26:21.453462 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.453471 32262 net.cpp:165] Memory required for data: 1331713500\nI0821 08:26:21.453493 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:26:21.453514 32262 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:26:21.453526 32262 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:26:21.453542 32262 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:26:21.453636 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:26:21.453842 32262 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:26:21.453861 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.453871 32262 net.cpp:165] Memory required for data: 1333761500\nI0821 08:26:21.453888 32262 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:26:21.453909 32262 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:26:21.453922 32262 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:26:21.453935 32262 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:26:21.453956 32262 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:26:21.454015 32262 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:26:21.454041 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.454052 32262 net.cpp:165] Memory required for data: 1335809500\nI0821 08:26:21.454063 32262 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:26:21.454082 32262 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:26:21.454094 32262 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:26:21.454118 32262 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:26:21.454138 32262 net.cpp:150] Setting up L3_b4_relu\nI0821 08:26:21.454154 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.454162 32262 net.cpp:165] Memory required for data: 1337857500\nI0821 08:26:21.454172 32262 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:21.454186 32262 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:21.454197 32262 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:26:21.454212 32262 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:26:21.454232 32262 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:26:21.454315 32262 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:21.454334 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.454346 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.454356 32262 net.cpp:165] Memory required for data: 1341953500\nI0821 08:26:21.454366 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:26:21.454391 32262 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:26:21.454404 32262 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:26:21.454424 32262 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:26:21.455497 32262 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:26:21.455516 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.455526 32262 net.cpp:165] Memory required for data: 1344001500\nI0821 08:26:21.455543 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:26:21.455564 32262 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:26:21.455576 32262 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:26:21.455598 32262 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:26:21.456904 32262 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:26:21.456925 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.456936 32262 net.cpp:165] Memory required for data: 1346049500\nI0821 08:26:21.456959 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:26:21.456979 32262 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:26:21.456991 32262 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:26:21.457007 32262 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.457114 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:26:21.457310 32262 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:26:21.457330 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.457340 32262 net.cpp:165] Memory required for data: 1348097500\nI0821 08:26:21.457358 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:26:21.457378 32262 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:26:21.457391 32262 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:26:21.457406 32262 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.457430 32262 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:26:21.457445 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.457456 32262 net.cpp:165] Memory required for data: 1350145500\nI0821 08:26:21.457466 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:26:21.457486 32262 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:26:21.457499 32262 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:26:21.457521 32262 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:26:21.459590 32262 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:26:21.459614 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.459623 32262 net.cpp:165] Memory required for data: 1352193500\nI0821 08:26:21.459641 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:26:21.459663 32262 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:26:21.459676 32262 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:26:21.459693 32262 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:26:21.459987 32262 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:26:21.460007 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.460017 32262 net.cpp:165] Memory required for data: 1354241500\nI0821 08:26:21.460038 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:26:21.460059 32262 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:26:21.460073 32262 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:26:21.460089 32262 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:26:21.460189 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:26:21.460379 32262 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:26:21.460398 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.460408 32262 net.cpp:165] Memory required for data: 1356289500\nI0821 08:26:21.460427 32262 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:26:21.460448 32262 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:26:21.460461 32262 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:26:21.460475 32262 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:26:21.460496 32262 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:26:21.460551 32262 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:26:21.460569 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.460579 32262 net.cpp:165] Memory required for data: 1358337500\nI0821 08:26:21.460589 32262 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:26:21.460609 32262 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:26:21.460623 32262 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:26:21.460636 32262 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:26:21.460655 32262 net.cpp:150] Setting up L3_b5_relu\nI0821 08:26:21.460670 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.460680 32262 net.cpp:165] Memory required for data: 1360385500\nI0821 08:26:21.460690 32262 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:21.460703 32262 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:21.460714 32262 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:26:21.460729 32262 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:26:21.460749 32262 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:26:21.460834 32262 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:21.460855 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.460867 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.460876 32262 net.cpp:165] Memory required for data: 1364481500\nI0821 08:26:21.460887 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:26:21.460911 32262 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:26:21.460925 32262 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:26:21.460943 32262 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:26:21.462007 32262 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:26:21.462028 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.462038 32262 net.cpp:165] Memory required for data: 1366529500\nI0821 08:26:21.462056 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:26:21.462076 32262 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:26:21.462097 32262 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:26:21.462123 32262 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:26:21.462425 32262 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:26:21.462445 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.462455 32262 net.cpp:165] Memory required for data: 1368577500\nI0821 08:26:21.462476 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:26:21.462491 32262 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:26:21.462503 32262 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:26:21.462518 32262 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.462613 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:26:21.462801 32262 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:26:21.462821 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.462831 32262 net.cpp:165] Memory required for data: 1370625500\nI0821 08:26:21.462848 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:26:21.462864 32262 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:26:21.462877 32262 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:26:21.462898 32262 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.462918 32262 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:26:21.462932 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.462941 32262 net.cpp:165] Memory required for data: 1372673500\nI0821 08:26:21.462951 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:26:21.462981 32262 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:26:21.462996 32262 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:26:21.463014 32262 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:26:21.464073 32262 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:26:21.464093 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.464108 32262 net.cpp:165] Memory required for data: 1374721500\nI0821 08:26:21.464128 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:26:21.464149 32262 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:26:21.464162 32262 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:26:21.464179 32262 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:26:21.464470 32262 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:26:21.464489 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.464498 32262 net.cpp:165] Memory required for data: 1376769500\nI0821 08:26:21.464519 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:26:21.464543 32262 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:26:21.464555 32262 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:26:21.464571 32262 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:26:21.464663 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:26:21.464854 32262 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:26:21.464872 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.464882 32262 net.cpp:165] Memory required for data: 1378817500\nI0821 08:26:21.464900 32262 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:26:21.464921 32262 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:26:21.464933 32262 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:26:21.464947 32262 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:26:21.464967 32262 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:26:21.465023 32262 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:26:21.465041 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.465051 32262 net.cpp:165] Memory required for data: 1380865500\nI0821 08:26:21.465061 32262 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:26:21.465080 32262 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:26:21.465093 32262 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:26:21.465122 32262 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:26:21.465144 32262 net.cpp:150] Setting up L3_b6_relu\nI0821 08:26:21.465159 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.465168 32262 net.cpp:165] Memory required for data: 1382913500\nI0821 08:26:21.465178 32262 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:21.465193 32262 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:21.465204 32262 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:26:21.465219 32262 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:26:21.465240 32262 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:26:21.465322 32262 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:21.465340 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.465354 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.465363 32262 net.cpp:165] Memory required for data: 1387009500\nI0821 08:26:21.465374 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:26:21.465399 32262 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:26:21.465412 32262 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:26:21.465430 32262 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:26:21.466500 32262 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:26:21.466521 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.466531 32262 net.cpp:165] Memory required for data: 1389057500\nI0821 08:26:21.466548 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:26:21.466570 32262 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:26:21.466583 32262 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:26:21.466610 32262 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:26:21.466918 32262 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:26:21.466938 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.466948 32262 net.cpp:165] Memory required for data: 1391105500\nI0821 08:26:21.466969 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:26:21.466985 32262 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:26:21.466997 32262 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:26:21.467018 32262 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.467113 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:26:21.467305 32262 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:26:21.467325 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.467334 32262 net.cpp:165] Memory required for data: 1393153500\nI0821 08:26:21.467353 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:26:21.467406 32262 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:26:21.467422 32262 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:26:21.467435 32262 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.467455 32262 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:26:21.467470 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.467479 32262 net.cpp:165] Memory required for data: 1395201500\nI0821 08:26:21.467491 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:26:21.467515 32262 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:26:21.467530 32262 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:26:21.467546 32262 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:26:21.468614 32262 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:26:21.468634 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.468644 32262 net.cpp:165] Memory required for data: 1397249500\nI0821 08:26:21.468662 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:26:21.468683 32262 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:26:21.468704 32262 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:26:21.468722 32262 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:26:21.469038 32262 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:26:21.469058 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.469068 32262 net.cpp:165] Memory required for data: 1399297500\nI0821 08:26:21.469089 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:26:21.469111 32262 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:26:21.469125 32262 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:26:21.469141 32262 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:26:21.469238 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:26:21.469429 32262 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:26:21.469449 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.469458 32262 net.cpp:165] Memory required for data: 1401345500\nI0821 08:26:21.469476 32262 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:26:21.469494 32262 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:26:21.469506 32262 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:26:21.469519 32262 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:26:21.469539 32262 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:26:21.469595 32262 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:26:21.469619 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.469630 32262 net.cpp:165] Memory required for data: 1403393500\nI0821 08:26:21.469640 32262 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:26:21.469655 32262 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:26:21.469667 32262 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:26:21.469681 32262 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:26:21.469700 32262 net.cpp:150] Setting up L3_b7_relu\nI0821 08:26:21.469715 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.469724 32262 net.cpp:165] Memory required for data: 1405441500\nI0821 08:26:21.469734 32262 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:21.469756 32262 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:21.469768 32262 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:26:21.469784 32262 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:26:21.469804 32262 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:26:21.469890 32262 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:21.469911 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.469925 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.469935 32262 net.cpp:165] Memory required for data: 1409537500\nI0821 08:26:21.469945 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:26:21.469965 32262 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:26:21.469979 32262 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:26:21.470001 32262 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:26:21.471062 32262 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:26:21.471082 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.471091 32262 net.cpp:165] Memory required for data: 1411585500\nI0821 08:26:21.471117 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:26:21.471135 32262 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:26:21.471148 32262 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:26:21.471169 32262 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:26:21.471490 32262 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:26:21.471513 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.471524 32262 net.cpp:165] Memory required for data: 1413633500\nI0821 08:26:21.471554 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:26:21.471570 32262 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:26:21.471582 32262 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:26:21.471598 32262 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.471695 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:26:21.471894 32262 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:26:21.471913 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.471922 32262 net.cpp:165] Memory required for data: 1415681500\nI0821 08:26:21.471941 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:26:21.471957 32262 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:26:21.471969 32262 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:26:21.471988 32262 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.472009 32262 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:26:21.472023 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.472033 32262 net.cpp:165] Memory required for data: 1417729500\nI0821 08:26:21.472043 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:26:21.472069 32262 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:26:21.472081 32262 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:26:21.472100 32262 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:26:21.473167 32262 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:26:21.473187 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.473196 32262 net.cpp:165] Memory required for data: 1419777500\nI0821 08:26:21.473214 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:26:21.473237 32262 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:26:21.473249 32262 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:26:21.473271 32262 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:26:21.473564 32262 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:26:21.473584 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.473593 32262 net.cpp:165] Memory required for data: 1421825500\nI0821 08:26:21.473615 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:26:21.473631 32262 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:26:21.473642 32262 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:26:21.473657 32262 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:26:21.473752 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:26:21.473945 32262 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:26:21.473964 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.473974 32262 net.cpp:165] Memory required for data: 1423873500\nI0821 08:26:21.473992 32262 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:26:21.474010 32262 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:26:21.474021 32262 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:26:21.474035 32262 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:26:21.474057 32262 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:26:21.474119 32262 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:26:21.474143 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.474153 32262 net.cpp:165] Memory required for data: 1425921500\nI0821 08:26:21.474164 32262 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:26:21.474179 32262 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:26:21.474190 32262 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:26:21.474205 32262 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:26:21.474223 32262 net.cpp:150] Setting up L3_b8_relu\nI0821 08:26:21.474237 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.474247 32262 net.cpp:165] Memory required for data: 1427969500\nI0821 08:26:21.474256 32262 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:21.474283 32262 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:21.474297 32262 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:26:21.474313 32262 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:26:21.474334 32262 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:26:21.474416 32262 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:21.474433 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.474447 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.474455 32262 net.cpp:165] Memory required for data: 1432065500\nI0821 08:26:21.474467 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:26:21.474486 32262 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:26:21.474499 32262 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:26:21.474522 32262 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:26:21.476583 32262 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:26:21.476603 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.476613 32262 net.cpp:165] Memory required for data: 1434113500\nI0821 08:26:21.476631 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:26:21.476653 32262 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:26:21.476666 32262 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:26:21.476687 32262 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:26:21.477010 32262 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:26:21.477030 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.477038 32262 net.cpp:165] Memory required for data: 1436161500\nI0821 08:26:21.477061 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:26:21.477077 32262 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:26:21.477087 32262 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:26:21.477116 32262 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.477212 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:26:21.477427 32262 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:26:21.477447 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.477457 32262 net.cpp:165] Memory required for data: 1438209500\nI0821 08:26:21.477475 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:26:21.477490 32262 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:26:21.477502 32262 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:26:21.477529 32262 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.477548 32262 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:26:21.477563 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.477573 32262 net.cpp:165] Memory required for data: 1440257500\nI0821 08:26:21.477584 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:26:21.477609 32262 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:26:21.477622 32262 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:26:21.477643 32262 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:26:21.478763 32262 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:26:21.478785 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.478794 32262 net.cpp:165] Memory required for data: 1442305500\nI0821 08:26:21.478812 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:26:21.478830 32262 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:26:21.478842 32262 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:26:21.478868 32262 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:26:21.479187 32262 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:26:21.479210 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.479221 32262 net.cpp:165] Memory required for data: 1444353500\nI0821 08:26:21.479252 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:26:21.479269 32262 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:26:21.479280 32262 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:26:21.479296 32262 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:26:21.479391 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:26:21.479588 32262 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:26:21.479606 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.479615 32262 net.cpp:165] Memory required for data: 1446401500\nI0821 08:26:21.479634 32262 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:26:21.479655 32262 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:26:21.479666 32262 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:26:21.479681 32262 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:26:21.479696 32262 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:26:21.479756 32262 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:26:21.479775 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.479785 32262 net.cpp:165] Memory required for data: 1448449500\nI0821 08:26:21.479795 32262 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:26:21.479809 32262 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:26:21.479821 32262 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:26:21.479835 32262 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:26:21.479854 32262 net.cpp:150] Setting up L3_b9_relu\nI0821 08:26:21.479871 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.479879 32262 net.cpp:165] Memory required for data: 1450497500\nI0821 08:26:21.479889 32262 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:26:21.479905 32262 net.cpp:100] Creating Layer post_pool\nI0821 08:26:21.479918 32262 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 08:26:21.479938 32262 net.cpp:408] post_pool -> post_pool\nI0821 08:26:21.479998 32262 net.cpp:150] Setting up post_pool\nI0821 08:26:21.480018 32262 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 08:26:21.480028 32262 net.cpp:165] Memory required for data: 1450529500\nI0821 08:26:21.480039 32262 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:26:21.480157 32262 net.cpp:100] Creating Layer post_FC\nI0821 08:26:21.480175 32262 net.cpp:434] post_FC <- post_pool\nI0821 08:26:21.480199 32262 net.cpp:408] post_FC -> post_FC_top\nI0821 08:26:21.480499 32262 net.cpp:150] Setting up post_FC\nI0821 08:26:21.480520 32262 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:21.480530 32262 net.cpp:165] Memory required for data: 1450534500\nI0821 08:26:21.480548 32262 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:26:21.480564 32262 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:26:21.480576 32262 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:26:21.480597 32262 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:26:21.480618 32262 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:26:21.480708 32262 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:26:21.480728 32262 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:21.480741 32262 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:21.480751 32262 net.cpp:165] Memory required for data: 1450544500\nI0821 08:26:21.480762 32262 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:26:21.480823 32262 net.cpp:100] Creating Layer accuracy\nI0821 08:26:21.480839 32262 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:26:21.480852 32262 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:26:21.480868 32262 net.cpp:408] accuracy -> accuracy\nI0821 08:26:21.480942 32262 net.cpp:150] Setting up accuracy\nI0821 08:26:21.480962 32262 net.cpp:157] Top shape: (1)\nI0821 08:26:21.480971 32262 net.cpp:165] Memory required for data: 1450544504\nI0821 08:26:21.480990 32262 layer_factory.hpp:77] Creating layer loss\nI0821 08:26:21.481005 32262 net.cpp:100] Creating Layer loss\nI0821 08:26:21.481017 32262 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:26:21.481030 32262 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:26:21.481050 32262 net.cpp:408] loss -> loss\nI0821 08:26:21.482496 32262 layer_factory.hpp:77] Creating layer loss\nI0821 08:26:21.483839 32262 net.cpp:150] Setting up loss\nI0821 08:26:21.483860 32262 net.cpp:157] Top shape: (1)\nI0821 08:26:21.483870 32262 net.cpp:160]     with loss weight 1\nI0821 08:26:21.483978 32262 net.cpp:165] Memory required for data: 1450544508\nI0821 08:26:21.483994 32262 net.cpp:226] loss needs backward computation.\nI0821 08:26:21.484006 32262 net.cpp:228] accuracy does not need backward computation.\nI0821 08:26:21.484019 32262 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:26:21.484028 32262 net.cpp:226] post_FC needs backward computation.\nI0821 08:26:21.484037 32262 net.cpp:226] post_pool needs backward computation.\nI0821 08:26:21.484047 32262 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:26:21.484057 32262 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:26:21.484067 32262 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:26:21.484077 32262 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:26:21.484087 32262 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:26:21.484098 32262 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:26:21.484115 32262 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:26:21.484125 32262 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:26:21.484135 32262 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:26:21.484146 32262 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:26:21.484156 32262 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:26:21.484166 32262 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:26:21.484176 32262 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:26:21.484187 32262 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:26:21.484197 32262 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:26:21.484207 32262 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:26:21.484217 32262 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:26:21.484227 32262 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:26:21.484237 32262 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:26:21.484248 32262 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:26:21.484259 32262 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:26:21.484271 32262 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:26:21.484280 32262 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:26:21.484290 32262 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:26:21.484310 32262 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:26:21.484323 32262 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:26:21.484333 32262 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:26:21.484342 32262 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:26:21.484354 32262 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:26:21.484364 32262 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:26:21.484375 32262 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:26:21.484385 32262 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:26:21.484396 32262 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:26:21.484405 32262 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:26:21.484416 32262 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:26:21.484436 32262 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:26:21.484447 32262 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:26:21.484457 32262 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:26:21.484467 32262 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:26:21.484478 32262 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:26:21.484489 32262 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:26:21.484500 32262 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:26:21.484511 32262 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:26:21.484520 32262 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:26:21.484531 32262 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:26:21.484544 32262 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:26:21.484553 32262 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:26:21.484562 32262 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:26:21.484573 32262 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:26:21.484585 32262 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:26:21.484596 32262 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:26:21.484606 32262 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:26:21.484616 32262 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:26:21.484627 32262 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:26:21.484639 32262 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:26:21.484649 32262 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:26:21.484660 32262 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:26:21.484670 32262 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:26:21.484680 32262 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:26:21.484691 32262 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:26:21.484702 32262 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:26:21.484712 32262 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:26:21.484724 32262 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:26:21.484733 32262 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:26:21.484745 32262 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:26:21.484755 32262 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:26:21.484763 32262 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:26:21.484773 32262 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:26:21.484783 32262 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:26:21.484794 32262 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:26:21.484804 32262 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:26:21.484814 32262 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:26:21.484827 32262 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:26:21.484836 32262 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:26:21.484848 32262 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:26:21.484858 32262 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:26:21.484874 32262 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:26:21.484884 32262 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:26:21.484895 32262 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:26:21.484906 32262 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:26:21.484917 32262 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:26:21.484928 32262 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:26:21.484946 32262 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:26:21.484956 32262 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:26:21.484968 32262 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:26:21.484980 32262 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:26:21.484990 32262 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:26:21.485000 32262 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:26:21.485011 32262 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:26:21.485023 32262 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:26:21.485033 32262 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:26:21.485043 32262 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:26:21.485054 32262 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:26:21.485064 32262 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:26:21.485074 32262 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:26:21.485085 32262 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:26:21.485096 32262 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:26:21.485117 32262 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:26:21.485128 32262 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:26:21.485139 32262 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:26:21.485150 32262 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:26:21.485162 32262 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:26:21.485172 32262 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:26:21.485183 32262 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:26:21.485194 32262 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:26:21.485205 32262 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:26:21.485215 32262 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:26:21.485227 32262 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:26:21.485239 32262 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:26:21.485247 32262 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:26:21.485257 32262 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:26:21.485270 32262 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:26:21.485280 32262 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:26:21.485291 32262 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:26:21.485301 32262 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:26:21.485311 32262 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:26:21.485321 32262 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:26:21.485332 32262 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:26:21.485343 32262 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:26:21.485353 32262 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:26:21.485363 32262 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:26:21.485374 32262 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:26:21.485386 32262 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:26:21.485397 32262 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:26:21.485406 32262 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:26:21.485417 32262 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:26:21.485429 32262 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:26:21.485440 32262 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:26:21.485450 32262 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:26:21.485460 32262 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:26:21.485478 32262 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:26:21.485489 32262 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:26:21.485501 32262 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:26:21.485512 32262 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:26:21.485522 32262 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:26:21.485534 32262 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:26:21.485544 32262 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:26:21.485555 32262 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:26:21.485565 32262 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:26:21.485575 32262 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:26:21.485586 32262 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:26:21.485596 32262 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:26:21.485607 32262 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:26:21.485623 32262 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:26:21.485635 32262 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:26:21.485646 32262 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:26:21.485656 32262 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:26:21.485667 32262 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:26:21.485677 32262 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:26:21.485687 32262 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:26:21.485698 32262 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:26:21.485709 32262 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:26:21.485720 32262 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:26:21.485731 32262 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:26:21.485743 32262 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:26:21.485754 32262 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:26:21.485764 32262 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:26:21.485775 32262 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:26:21.485787 32262 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:26:21.485796 32262 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:26:21.485807 32262 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:26:21.485818 32262 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:26:21.485829 32262 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:26:21.485839 32262 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:26:21.485851 32262 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:26:21.485862 32262 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:26:21.485873 32262 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:26:21.485884 32262 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:26:21.485894 32262 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:26:21.485905 32262 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:26:21.485916 32262 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:26:21.485927 32262 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:26:21.485939 32262 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:26:21.485949 32262 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:26:21.485961 32262 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:26:21.485971 32262 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:26:21.485982 32262 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:26:21.486004 32262 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:26:21.486016 32262 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:26:21.486027 32262 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:26:21.486039 32262 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:26:21.486050 32262 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:26:21.486062 32262 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:26:21.486071 32262 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:26:21.486083 32262 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:26:21.486094 32262 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:26:21.486114 32262 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:26:21.486125 32262 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:26:21.486137 32262 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:26:21.486148 32262 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:26:21.486160 32262 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:26:21.486171 32262 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:26:21.486181 32262 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:26:21.486192 32262 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:26:21.486203 32262 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:26:21.486214 32262 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:26:21.486225 32262 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:26:21.486237 32262 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:26:21.486248 32262 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:26:21.486259 32262 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:26:21.486270 32262 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:26:21.486281 32262 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:26:21.486292 32262 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:26:21.486304 32262 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:26:21.486315 32262 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:26:21.486326 32262 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:26:21.486337 32262 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:26:21.486349 32262 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:26:21.486361 32262 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:26:21.486371 32262 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:26:21.486383 32262 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:26:21.486394 32262 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:26:21.486404 32262 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:26:21.486415 32262 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:26:21.486426 32262 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:26:21.486438 32262 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:26:21.486449 32262 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:26:21.486460 32262 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:26:21.486472 32262 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:26:21.486482 32262 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:26:21.486495 32262 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:26:21.486506 32262 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:26:21.486516 32262 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:26:21.486526 32262 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:26:21.486546 32262 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:26:21.486558 32262 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:26:21.486569 32262 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:26:21.486579 32262 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:26:21.486591 32262 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:26:21.486603 32262 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:26:21.486614 32262 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:26:21.486625 32262 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:26:21.486635 32262 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:26:21.486647 32262 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:26:21.486660 32262 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:26:21.486670 32262 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:26:21.486680 32262 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:26:21.486692 32262 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:26:21.486704 32262 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:26:21.486714 32262 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:26:21.486727 32262 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:26:21.486738 32262 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:26:21.486749 32262 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:26:21.486759 32262 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:26:21.486771 32262 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:26:21.486783 32262 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:26:21.486793 32262 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:26:21.486804 32262 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:26:21.486817 32262 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:26:21.486827 32262 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:26:21.486840 32262 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:26:21.486850 32262 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:26:21.486861 32262 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:26:21.486871 32262 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:26:21.486882 32262 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:26:21.486893 32262 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:26:21.486905 32262 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:26:21.486917 32262 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:26:21.486927 32262 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:26:21.486938 32262 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:26:21.486950 32262 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:26:21.486965 32262 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:26:21.486976 32262 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:26:21.486987 32262 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:26:21.486999 32262 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:26:21.487011 32262 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:26:21.487023 32262 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:26:21.487033 32262 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:26:21.487046 32262 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:26:21.487057 32262 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:26:21.487068 32262 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:26:21.487087 32262 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:26:21.487097 32262 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:26:21.487118 32262 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:26:21.487131 32262 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:26:21.487143 32262 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:26:21.487154 32262 net.cpp:226] pre_relu needs backward computation.\nI0821 08:26:21.487165 32262 net.cpp:226] pre_scale needs backward computation.\nI0821 08:26:21.487175 32262 net.cpp:226] pre_bn needs backward computation.\nI0821 08:26:21.487186 32262 net.cpp:226] pre_conv needs backward computation.\nI0821 08:26:21.487198 32262 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:26:21.487212 32262 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:26:21.487221 32262 net.cpp:270] This network produces output accuracy\nI0821 08:26:21.487233 32262 net.cpp:270] This network produces output loss\nI0821 08:26:21.487627 32262 net.cpp:283] Network initialization done.\nI0821 08:26:21.497231 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:21.497280 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:21.497355 32262 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0821 08:26:21.497762 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0821 08:26:21.497788 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0821 08:26:21.497807 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0821 08:26:21.497826 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0821 08:26:21.497848 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0821 08:26:21.497865 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0821 08:26:21.497885 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0821 08:26:21.497902 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0821 08:26:21.497922 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0821 08:26:21.497941 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0821 08:26:21.497961 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0821 08:26:21.497977 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0821 08:26:21.497995 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0821 08:26:21.498013 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0821 08:26:21.498033 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0821 08:26:21.498050 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0821 08:26:21.498070 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0821 08:26:21.498086 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0821 08:26:21.498113 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0821 08:26:21.498142 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0821 08:26:21.498159 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0821 08:26:21.498168 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0821 08:26:21.498181 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0821 08:26:21.498190 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0821 08:26:21.498199 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0821 08:26:21.498208 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0821 08:26:21.498216 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0821 08:26:21.498224 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0821 08:26:21.498234 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0821 08:26:21.498241 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0821 08:26:21.498250 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0821 08:26:21.498260 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0821 08:26:21.498268 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0821 08:26:21.498276 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0821 08:26:21.498286 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0821 08:26:21.498293 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0821 08:26:21.498302 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0821 08:26:21.498311 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0821 08:26:21.498319 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0821 08:26:21.498327 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0821 08:26:21.498339 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0821 08:26:21.498347 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0821 08:26:21.498355 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0821 08:26:21.498363 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0821 08:26:21.498373 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0821 08:26:21.498380 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0821 08:26:21.498389 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0821 08:26:21.498397 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0821 08:26:21.498406 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0821 08:26:21.498422 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0821 08:26:21.498431 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0821 08:26:21.498440 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0821 08:26:21.498448 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0821 08:26:21.498457 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0821 08:26:21.498466 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0821 08:26:21.498474 32262 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0821 08:26:21.500128 32262 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0821 08:26:21.501907 32262 layer_factory.hpp:77] Creating layer dataLayer\nI0821 08:26:21.502722 32262 net.cpp:100] Creating Layer dataLayer\nI0821 08:26:21.502751 32262 net.cpp:408] dataLayer -> data_top\nI0821 08:26:21.502779 32262 net.cpp:408] dataLayer -> label\nI0821 08:26:21.502801 32262 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0821 08:26:21.511818 32269 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0821 08:26:21.512105 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:21.519568 32262 net.cpp:150] Setting up dataLayer\nI0821 08:26:21.519594 32262 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0821 08:26:21.519610 32262 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:21.519619 32262 net.cpp:165] Memory required for data: 1536500\nI0821 08:26:21.519634 32262 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0821 08:26:21.519651 32262 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0821 08:26:21.519665 32262 net.cpp:434] label_dataLayer_1_split <- label\nI0821 08:26:21.519685 32262 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0821 08:26:21.519707 32262 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0821 08:26:21.519840 32262 net.cpp:150] Setting up label_dataLayer_1_split\nI0821 08:26:21.519863 32262 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:21.519879 32262 net.cpp:157] Top shape: 125 (125)\nI0821 08:26:21.519891 32262 net.cpp:165] Memory required for data: 1537500\nI0821 08:26:21.519902 32262 layer_factory.hpp:77] Creating layer pre_conv\nI0821 08:26:21.519927 32262 net.cpp:100] Creating Layer pre_conv\nI0821 08:26:21.519939 32262 net.cpp:434] pre_conv <- data_top\nI0821 08:26:21.519960 32262 net.cpp:408] pre_conv -> pre_conv_top\nI0821 08:26:21.520411 32262 net.cpp:150] Setting up pre_conv\nI0821 08:26:21.520442 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.520453 32262 net.cpp:165] Memory required for data: 9729500\nI0821 08:26:21.520478 32262 layer_factory.hpp:77] Creating layer pre_bn\nI0821 08:26:21.520503 32262 net.cpp:100] Creating Layer pre_bn\nI0821 08:26:21.520516 32262 net.cpp:434] pre_bn <- pre_conv_top\nI0821 08:26:21.520539 32262 net.cpp:408] pre_bn -> pre_bn_top\nI0821 08:26:21.520953 32262 net.cpp:150] Setting up pre_bn\nI0821 08:26:21.520974 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.520983 32262 net.cpp:165] Memory required for data: 17921500\nI0821 08:26:21.521010 32262 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:26:21.521034 32262 net.cpp:100] Creating Layer pre_scale\nI0821 08:26:21.521049 32262 net.cpp:434] pre_scale <- pre_bn_top\nI0821 08:26:21.521064 32262 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0821 08:26:21.521214 32262 layer_factory.hpp:77] Creating layer pre_scale\nI0821 08:26:21.521448 32262 net.cpp:150] Setting up pre_scale\nI0821 08:26:21.521471 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.521479 32262 net.cpp:165] Memory required for data: 26113500\nI0821 08:26:21.521502 32262 layer_factory.hpp:77] Creating layer pre_relu\nI0821 08:26:21.521518 32262 net.cpp:100] Creating Layer pre_relu\nI0821 08:26:21.521528 32262 net.cpp:434] pre_relu <- pre_bn_top\nI0821 08:26:21.521548 32262 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0821 08:26:21.521569 32262 net.cpp:150] Setting up pre_relu\nI0821 08:26:21.521584 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.521592 32262 net.cpp:165] Memory required for data: 34305500\nI0821 08:26:21.521606 32262 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0821 08:26:21.521625 32262 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0821 08:26:21.521637 32262 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0821 08:26:21.521653 32262 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0821 08:26:21.521672 32262 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0821 08:26:21.521765 32262 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0821 08:26:21.521788 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.521802 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.521812 32262 net.cpp:165] Memory required for data: 50689500\nI0821 08:26:21.521826 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0821 08:26:21.521888 32262 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0821 08:26:21.521903 32262 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0821 08:26:21.521925 32262 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0821 08:26:21.522552 32262 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0821 08:26:21.522575 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.522585 32262 net.cpp:165] Memory required for data: 58881500\nI0821 08:26:21.522608 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0821 08:26:21.522632 32262 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0821 08:26:21.522647 32262 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0821 08:26:21.522670 32262 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0821 08:26:21.523037 32262 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0821 08:26:21.523061 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.523069 32262 net.cpp:165] Memory required for data: 67073500\nI0821 08:26:21.523089 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:26:21.523113 32262 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0821 08:26:21.523123 32262 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0821 08:26:21.523138 32262 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.523241 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0821 08:26:21.523437 32262 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0821 08:26:21.523458 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.523475 32262 net.cpp:165] Memory required for data: 75265500\nI0821 08:26:21.523494 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0821 08:26:21.523510 32262 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0821 08:26:21.523521 32262 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0821 08:26:21.523540 32262 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.523560 32262 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0821 08:26:21.523574 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.523583 32262 net.cpp:165] Memory required for data: 83457500\nI0821 08:26:21.523593 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0821 08:26:21.523613 32262 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0821 08:26:21.523625 32262 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0821 08:26:21.523646 32262 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0821 08:26:21.524049 32262 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0821 08:26:21.524068 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.524078 32262 net.cpp:165] Memory required for data: 91649500\nI0821 08:26:21.524096 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0821 08:26:21.524119 32262 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0821 08:26:21.524132 32262 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0821 08:26:21.524152 32262 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0821 08:26:21.524458 32262 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0821 08:26:21.524482 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.524492 32262 net.cpp:165] Memory required for data: 99841500\nI0821 08:26:21.524520 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:26:21.524539 32262 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0821 08:26:21.524551 32262 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0821 08:26:21.524567 32262 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0821 08:26:21.524682 32262 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0821 08:26:21.524901 32262 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0821 08:26:21.524921 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.524931 32262 net.cpp:165] Memory required for data: 108033500\nI0821 08:26:21.524948 32262 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0821 08:26:21.524966 32262 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0821 08:26:21.524976 32262 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0821 08:26:21.524997 32262 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0821 08:26:21.525018 32262 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0821 08:26:21.525092 32262 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0821 08:26:21.525117 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.525128 32262 net.cpp:165] Memory required for data: 116225500\nI0821 08:26:21.525140 32262 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0821 08:26:21.525158 32262 net.cpp:100] Creating Layer L1_b1_relu\nI0821 08:26:21.525169 32262 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0821 08:26:21.525184 32262 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0821 08:26:21.525202 32262 net.cpp:150] Setting up L1_b1_relu\nI0821 08:26:21.525218 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.525228 32262 net.cpp:165] Memory required for data: 124417500\nI0821 08:26:21.525238 32262 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:21.525254 32262 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:21.525265 32262 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0821 08:26:21.525280 32262 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:26:21.525302 32262 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:26:21.525388 32262 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0821 08:26:21.525421 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.525434 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.525449 32262 net.cpp:165] Memory required for data: 140801500\nI0821 08:26:21.525460 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0821 08:26:21.525485 32262 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0821 08:26:21.525497 32262 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0821 08:26:21.525524 32262 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0821 08:26:21.525979 32262 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0821 08:26:21.526001 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.526010 32262 net.cpp:165] Memory required for data: 148993500\nI0821 08:26:21.526028 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0821 08:26:21.526049 32262 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0821 08:26:21.526063 32262 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0821 08:26:21.526087 32262 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0821 08:26:21.526437 32262 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0821 08:26:21.526459 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.526470 32262 net.cpp:165] Memory required for data: 157185500\nI0821 08:26:21.526491 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:26:21.526507 32262 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0821 08:26:21.526517 32262 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0821 08:26:21.526535 32262 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.526646 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0821 08:26:21.526859 32262 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0821 08:26:21.526880 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.526888 32262 net.cpp:165] Memory required for data: 165377500\nI0821 08:26:21.526909 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0821 08:26:21.526931 32262 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0821 08:26:21.526942 32262 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0821 08:26:21.526955 32262 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.526978 32262 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0821 08:26:21.526996 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.527005 32262 net.cpp:165] Memory required for data: 173569500\nI0821 08:26:21.527020 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0821 08:26:21.527045 32262 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0821 08:26:21.527061 32262 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0821 08:26:21.527087 32262 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0821 08:26:21.527590 32262 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0821 08:26:21.527611 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.527621 32262 net.cpp:165] Memory required for data: 181761500\nI0821 08:26:21.527642 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0821 08:26:21.527670 32262 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0821 08:26:21.527688 32262 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0821 08:26:21.527705 32262 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0821 08:26:21.528121 32262 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0821 08:26:21.528147 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.528158 32262 net.cpp:165] Memory required for data: 189953500\nI0821 08:26:21.528187 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:26:21.528208 32262 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0821 08:26:21.528219 32262 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0821 08:26:21.528250 32262 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0821 08:26:21.528342 32262 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0821 08:26:21.528520 32262 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0821 08:26:21.528543 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.528553 32262 net.cpp:165] Memory required for data: 198145500\nI0821 08:26:21.528583 32262 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0821 08:26:21.528599 32262 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0821 08:26:21.528612 32262 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0821 08:26:21.528626 32262 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0821 08:26:21.528646 32262 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0821 08:26:21.528707 32262 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0821 08:26:21.528729 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.528738 32262 net.cpp:165] Memory required for data: 206337500\nI0821 08:26:21.528748 32262 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0821 08:26:21.528765 32262 net.cpp:100] Creating Layer L1_b2_relu\nI0821 08:26:21.528777 32262 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0821 08:26:21.528790 32262 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0821 08:26:21.528811 32262 net.cpp:150] Setting up L1_b2_relu\nI0821 08:26:21.528826 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.528836 32262 net.cpp:165] Memory required for data: 214529500\nI0821 08:26:21.528844 32262 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:21.528862 32262 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:21.528877 32262 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0821 08:26:21.528892 32262 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:26:21.528913 32262 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:26:21.529001 32262 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0821 08:26:21.529027 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.529042 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.529050 32262 net.cpp:165] Memory required for data: 230913500\nI0821 08:26:21.529064 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0821 08:26:21.529083 32262 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0821 08:26:21.529096 32262 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0821 08:26:21.529137 32262 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0821 08:26:21.529580 32262 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0821 08:26:21.529600 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.529610 32262 net.cpp:165] Memory required for data: 239105500\nI0821 08:26:21.529631 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0821 08:26:21.529652 32262 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0821 08:26:21.529667 32262 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0821 08:26:21.529683 32262 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0821 08:26:21.530045 32262 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0821 08:26:21.530067 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.530076 32262 net.cpp:165] Memory required for data: 247297500\nI0821 08:26:21.530110 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:26:21.530130 32262 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0821 08:26:21.530143 32262 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0821 08:26:21.530164 32262 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.530287 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0821 08:26:21.530514 32262 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0821 08:26:21.530539 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.530552 32262 net.cpp:165] Memory required for data: 255489500\nI0821 08:26:21.530570 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0821 08:26:21.530588 32262 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0821 08:26:21.530601 32262 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0821 08:26:21.530614 32262 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.530642 32262 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0821 08:26:21.530660 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.530671 32262 net.cpp:165] Memory required for data: 263681500\nI0821 08:26:21.530680 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0821 08:26:21.530710 32262 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0821 08:26:21.530724 32262 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0821 08:26:21.530750 32262 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0821 08:26:21.531198 32262 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0821 08:26:21.531217 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.531230 32262 net.cpp:165] Memory required for data: 271873500\nI0821 08:26:21.531247 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0821 08:26:21.531282 32262 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0821 08:26:21.531296 32262 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0821 08:26:21.531316 32262 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0821 08:26:21.531677 32262 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0821 08:26:21.531697 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.531709 32262 net.cpp:165] Memory required for data: 280065500\nI0821 08:26:21.531731 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:26:21.531750 32262 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0821 08:26:21.531762 32262 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0821 08:26:21.531785 32262 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0821 08:26:21.531885 32262 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0821 08:26:21.532109 32262 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0821 08:26:21.532136 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.532147 32262 net.cpp:165] Memory required for data: 288257500\nI0821 08:26:21.532169 32262 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0821 08:26:21.532186 32262 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0821 08:26:21.532196 32262 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0821 08:26:21.532209 32262 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0821 08:26:21.532224 32262 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0821 08:26:21.532294 32262 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0821 08:26:21.532313 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.532325 32262 net.cpp:165] Memory required for data: 296449500\nI0821 08:26:21.532335 32262 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0821 08:26:21.532358 32262 net.cpp:100] Creating Layer L1_b3_relu\nI0821 08:26:21.532369 32262 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0821 08:26:21.532385 32262 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0821 08:26:21.532404 32262 net.cpp:150] Setting up L1_b3_relu\nI0821 08:26:21.532418 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.532431 32262 net.cpp:165] Memory required for data: 304641500\nI0821 08:26:21.532443 32262 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:21.532464 32262 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:21.532480 32262 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0821 08:26:21.532495 32262 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:26:21.532517 32262 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:26:21.532609 32262 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0821 08:26:21.532630 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.532644 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.532655 32262 net.cpp:165] Memory required for data: 321025500\nI0821 08:26:21.532665 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0821 08:26:21.532690 32262 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0821 08:26:21.532711 32262 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0821 08:26:21.532733 32262 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0821 08:26:21.533200 32262 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0821 08:26:21.533221 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.533231 32262 net.cpp:165] Memory required for data: 329217500\nI0821 08:26:21.533252 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0821 08:26:21.533268 32262 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0821 08:26:21.533285 32262 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0821 08:26:21.533303 32262 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0821 08:26:21.533668 32262 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0821 08:26:21.533686 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.533695 32262 net.cpp:165] Memory required for data: 337409500\nI0821 08:26:21.533721 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:26:21.533737 32262 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0821 08:26:21.533748 32262 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0821 08:26:21.533772 32262 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.533877 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0821 08:26:21.534096 32262 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0821 08:26:21.534129 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.534140 32262 net.cpp:165] Memory required for data: 345601500\nI0821 08:26:21.534162 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0821 08:26:21.534178 32262 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0821 08:26:21.534188 32262 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0821 08:26:21.534202 32262 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.534221 32262 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0821 08:26:21.534238 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.534247 32262 net.cpp:165] Memory required for data: 353793500\nI0821 08:26:21.534257 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0821 08:26:21.534286 32262 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0821 08:26:21.534298 32262 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0821 08:26:21.534327 32262 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0821 08:26:21.534734 32262 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0821 08:26:21.534754 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.534762 32262 net.cpp:165] Memory required for data: 361985500\nI0821 08:26:21.534780 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0821 08:26:21.534801 32262 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0821 08:26:21.534812 32262 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0821 08:26:21.534833 32262 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0821 08:26:21.535162 32262 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0821 08:26:21.535183 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.535193 32262 net.cpp:165] Memory required for data: 370177500\nI0821 08:26:21.535214 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:26:21.535229 32262 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0821 08:26:21.535240 32262 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0821 08:26:21.535259 32262 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0821 08:26:21.535353 32262 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0821 08:26:21.535545 32262 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0821 08:26:21.535564 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.535574 32262 net.cpp:165] Memory required for data: 378369500\nI0821 08:26:21.535593 32262 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0821 08:26:21.535607 32262 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0821 08:26:21.535619 32262 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0821 08:26:21.535632 32262 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0821 08:26:21.535661 32262 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0821 08:26:21.535719 32262 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0821 08:26:21.535742 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.535753 32262 net.cpp:165] Memory required for data: 386561500\nI0821 08:26:21.535764 32262 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0821 08:26:21.535778 32262 net.cpp:100] Creating Layer L1_b4_relu\nI0821 08:26:21.535790 32262 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0821 08:26:21.535804 32262 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0821 08:26:21.535822 32262 net.cpp:150] Setting up L1_b4_relu\nI0821 08:26:21.535836 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.535846 32262 net.cpp:165] Memory required for data: 394753500\nI0821 08:26:21.535856 32262 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:21.535876 32262 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:21.535887 32262 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0821 08:26:21.535902 32262 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:26:21.535920 32262 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:26:21.536003 32262 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0821 08:26:21.536027 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.536042 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.536051 32262 net.cpp:165] Memory required for data: 411137500\nI0821 08:26:21.536062 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0821 08:26:21.536080 32262 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0821 08:26:21.536093 32262 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0821 08:26:21.536123 32262 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0821 08:26:21.536535 32262 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0821 08:26:21.536556 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.536566 32262 net.cpp:165] Memory required for data: 419329500\nI0821 08:26:21.536602 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0821 08:26:21.536624 32262 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0821 08:26:21.536638 32262 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0821 08:26:21.536653 32262 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0821 08:26:21.536976 32262 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0821 08:26:21.536996 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.537006 32262 net.cpp:165] Memory required for data: 427521500\nI0821 08:26:21.537026 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:26:21.537042 32262 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0821 08:26:21.537053 32262 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0821 08:26:21.537077 32262 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.537179 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0821 08:26:21.537381 32262 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0821 08:26:21.537400 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.537410 32262 net.cpp:165] Memory required for data: 435713500\nI0821 08:26:21.537427 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0821 08:26:21.537446 32262 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0821 08:26:21.537458 32262 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0821 08:26:21.537472 32262 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.537489 32262 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0821 08:26:21.537504 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.537513 32262 net.cpp:165] Memory required for data: 443905500\nI0821 08:26:21.537523 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0821 08:26:21.537554 32262 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0821 08:26:21.537567 32262 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0821 08:26:21.537587 32262 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0821 08:26:21.538007 32262 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0821 08:26:21.538028 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.538038 32262 net.cpp:165] Memory required for data: 452097500\nI0821 08:26:21.538053 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0821 08:26:21.538075 32262 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0821 08:26:21.538087 32262 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0821 08:26:21.538115 32262 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0821 08:26:21.538439 32262 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0821 08:26:21.538458 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.538467 32262 net.cpp:165] Memory required for data: 460289500\nI0821 08:26:21.538488 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:26:21.538503 32262 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0821 08:26:21.538514 32262 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0821 08:26:21.538528 32262 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0821 08:26:21.538630 32262 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0821 08:26:21.538883 32262 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0821 08:26:21.538928 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.538938 32262 net.cpp:165] Memory required for data: 468481500\nI0821 08:26:21.538954 32262 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0821 08:26:21.538975 32262 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0821 08:26:21.538987 32262 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0821 08:26:21.539000 32262 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0821 08:26:21.539016 32262 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0821 08:26:21.539077 32262 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0821 08:26:21.539098 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.539115 32262 net.cpp:165] Memory required for data: 476673500\nI0821 08:26:21.539126 32262 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0821 08:26:21.539140 32262 net.cpp:100] Creating Layer L1_b5_relu\nI0821 08:26:21.539151 32262 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0821 08:26:21.539170 32262 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0821 08:26:21.539189 32262 net.cpp:150] Setting up L1_b5_relu\nI0821 08:26:21.539202 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.539211 32262 net.cpp:165] Memory required for data: 484865500\nI0821 08:26:21.539222 32262 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:21.539234 32262 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:21.539244 32262 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0821 08:26:21.539258 32262 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:26:21.539278 32262 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:26:21.539368 32262 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0821 08:26:21.539386 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.539399 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.539408 32262 net.cpp:165] Memory required for data: 501249500\nI0821 08:26:21.539418 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0821 08:26:21.539443 32262 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0821 08:26:21.539454 32262 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0821 08:26:21.539472 32262 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0821 08:26:21.539885 32262 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0821 08:26:21.539906 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.539923 32262 net.cpp:165] Memory required for data: 509441500\nI0821 08:26:21.539942 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0821 08:26:21.539961 32262 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0821 08:26:21.539973 32262 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0821 08:26:21.539993 32262 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0821 08:26:21.540316 32262 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0821 08:26:21.540336 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.540345 32262 net.cpp:165] Memory required for data: 517633500\nI0821 08:26:21.540365 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:26:21.540381 32262 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0821 08:26:21.540391 32262 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0821 08:26:21.540405 32262 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.540508 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0821 08:26:21.540709 32262 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0821 08:26:21.540727 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.540737 32262 net.cpp:165] Memory required for data: 525825500\nI0821 08:26:21.540755 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0821 08:26:21.540773 32262 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0821 08:26:21.540786 32262 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0821 08:26:21.540798 32262 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.540815 32262 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0821 08:26:21.540829 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.540838 32262 net.cpp:165] Memory required for data: 534017500\nI0821 08:26:21.540848 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0821 08:26:21.540871 32262 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0821 08:26:21.540884 32262 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0821 08:26:21.540905 32262 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0821 08:26:21.541323 32262 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0821 08:26:21.541343 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.541352 32262 net.cpp:165] Memory required for data: 542209500\nI0821 08:26:21.541369 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0821 08:26:21.541391 32262 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0821 08:26:21.541404 32262 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0821 08:26:21.541419 32262 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0821 08:26:21.541765 32262 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0821 08:26:21.541785 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.541795 32262 net.cpp:165] Memory required for data: 550401500\nI0821 08:26:21.541815 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:26:21.541831 32262 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0821 08:26:21.541841 32262 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0821 08:26:21.541856 32262 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0821 08:26:21.541957 32262 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0821 08:26:21.542166 32262 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0821 08:26:21.542186 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.542194 32262 net.cpp:165] Memory required for data: 558593500\nI0821 08:26:21.542212 32262 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0821 08:26:21.542238 32262 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0821 08:26:21.542251 32262 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0821 08:26:21.542264 32262 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0821 08:26:21.542284 32262 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0821 08:26:21.542343 32262 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0821 08:26:21.542362 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.542372 32262 net.cpp:165] Memory required for data: 566785500\nI0821 08:26:21.542393 32262 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0821 08:26:21.542408 32262 net.cpp:100] Creating Layer L1_b6_relu\nI0821 08:26:21.542418 32262 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0821 08:26:21.542433 32262 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0821 08:26:21.542450 32262 net.cpp:150] Setting up L1_b6_relu\nI0821 08:26:21.542465 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.542475 32262 net.cpp:165] Memory required for data: 574977500\nI0821 08:26:21.542484 32262 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:21.542501 32262 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:21.542512 32262 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0821 08:26:21.542527 32262 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:26:21.542546 32262 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:26:21.542630 32262 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0821 08:26:21.542656 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.542670 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.542680 32262 net.cpp:165] Memory required for data: 591361500\nI0821 08:26:21.542690 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0821 08:26:21.542708 32262 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0821 08:26:21.542721 32262 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0821 08:26:21.542738 32262 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0821 08:26:21.543143 32262 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0821 08:26:21.543164 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.543172 32262 net.cpp:165] Memory required for data: 599553500\nI0821 08:26:21.543190 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0821 08:26:21.543210 32262 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0821 08:26:21.543222 32262 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0821 08:26:21.543237 32262 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0821 08:26:21.543565 32262 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0821 08:26:21.543586 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.543596 32262 net.cpp:165] Memory required for data: 607745500\nI0821 08:26:21.543615 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:26:21.543630 32262 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0821 08:26:21.543642 32262 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0821 08:26:21.543660 32262 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.543759 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0821 08:26:21.543965 32262 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0821 08:26:21.543984 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.543993 32262 net.cpp:165] Memory required for data: 615937500\nI0821 08:26:21.544010 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0821 08:26:21.544025 32262 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0821 08:26:21.544037 32262 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0821 08:26:21.544055 32262 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.544075 32262 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0821 08:26:21.544088 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.544097 32262 net.cpp:165] Memory required for data: 624129500\nI0821 08:26:21.544114 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0821 08:26:21.544138 32262 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0821 08:26:21.544149 32262 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0821 08:26:21.544165 32262 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0821 08:26:21.545045 32262 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0821 08:26:21.545084 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.545109 32262 net.cpp:165] Memory required for data: 632321500\nI0821 08:26:21.545130 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0821 08:26:21.545145 32262 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0821 08:26:21.545162 32262 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0821 08:26:21.545179 32262 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0821 08:26:21.545507 32262 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0821 08:26:21.545526 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.545536 32262 net.cpp:165] Memory required for data: 640513500\nI0821 08:26:21.545557 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:26:21.545572 32262 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0821 08:26:21.545583 32262 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0821 08:26:21.545603 32262 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0821 08:26:21.545697 32262 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0821 08:26:21.545897 32262 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0821 08:26:21.545920 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.545931 32262 net.cpp:165] Memory required for data: 648705500\nI0821 08:26:21.545949 32262 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0821 08:26:21.545965 32262 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0821 08:26:21.545977 32262 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0821 08:26:21.545990 32262 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0821 08:26:21.546005 32262 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0821 08:26:21.546066 32262 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0821 08:26:21.546087 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.546095 32262 net.cpp:165] Memory required for data: 656897500\nI0821 08:26:21.546113 32262 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0821 08:26:21.546133 32262 net.cpp:100] Creating Layer L1_b7_relu\nI0821 08:26:21.546144 32262 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0821 08:26:21.546159 32262 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0821 08:26:21.546176 32262 net.cpp:150] Setting up L1_b7_relu\nI0821 08:26:21.546191 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.546201 32262 net.cpp:165] Memory required for data: 665089500\nI0821 08:26:21.546211 32262 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:21.546228 32262 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:21.546241 32262 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0821 08:26:21.546255 32262 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:26:21.546274 32262 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:26:21.546358 32262 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0821 08:26:21.546382 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.546396 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.546406 32262 net.cpp:165] Memory required for data: 681473500\nI0821 08:26:21.546416 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0821 08:26:21.546434 32262 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0821 08:26:21.546447 32262 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0821 08:26:21.546465 32262 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0821 08:26:21.546883 32262 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0821 08:26:21.546903 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.546912 32262 net.cpp:165] Memory required for data: 689665500\nI0821 08:26:21.546929 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0821 08:26:21.546949 32262 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0821 08:26:21.546962 32262 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0821 08:26:21.546985 32262 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0821 08:26:21.547334 32262 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0821 08:26:21.547354 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.547363 32262 net.cpp:165] Memory required for data: 697857500\nI0821 08:26:21.547384 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:26:21.547399 32262 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0821 08:26:21.547410 32262 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0821 08:26:21.547430 32262 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.547528 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0821 08:26:21.547734 32262 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0821 08:26:21.547754 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.547763 32262 net.cpp:165] Memory required for data: 706049500\nI0821 08:26:21.547780 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0821 08:26:21.547796 32262 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0821 08:26:21.547806 32262 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0821 08:26:21.547819 32262 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.547837 32262 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0821 08:26:21.547852 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.547861 32262 net.cpp:165] Memory required for data: 714241500\nI0821 08:26:21.547870 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0821 08:26:21.547894 32262 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0821 08:26:21.547907 32262 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0821 08:26:21.547927 32262 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0821 08:26:21.548352 32262 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0821 08:26:21.548373 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.548382 32262 net.cpp:165] Memory required for data: 722433500\nI0821 08:26:21.548398 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0821 08:26:21.548419 32262 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0821 08:26:21.548432 32262 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0821 08:26:21.548451 32262 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0821 08:26:21.548781 32262 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0821 08:26:21.548800 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.548810 32262 net.cpp:165] Memory required for data: 730625500\nI0821 08:26:21.548830 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:26:21.548846 32262 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0821 08:26:21.548857 32262 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0821 08:26:21.548877 32262 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0821 08:26:21.548977 32262 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0821 08:26:21.549187 32262 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0821 08:26:21.549206 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.549216 32262 net.cpp:165] Memory required for data: 738817500\nI0821 08:26:21.549233 32262 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0821 08:26:21.549254 32262 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0821 08:26:21.549268 32262 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0821 08:26:21.549279 32262 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0821 08:26:21.549294 32262 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0821 08:26:21.549357 32262 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0821 08:26:21.549374 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.549384 32262 net.cpp:165] Memory required for data: 747009500\nI0821 08:26:21.549393 32262 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0821 08:26:21.549407 32262 net.cpp:100] Creating Layer L1_b8_relu\nI0821 08:26:21.549417 32262 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0821 08:26:21.549437 32262 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0821 08:26:21.549463 32262 net.cpp:150] Setting up L1_b8_relu\nI0821 08:26:21.549479 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.549487 32262 net.cpp:165] Memory required for data: 755201500\nI0821 08:26:21.549496 32262 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:21.549510 32262 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:21.549520 32262 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0821 08:26:21.549541 32262 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:26:21.549561 32262 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:26:21.549645 32262 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0821 08:26:21.549665 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.549679 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.549687 32262 net.cpp:165] Memory required for data: 771585500\nI0821 08:26:21.549697 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0821 08:26:21.549721 32262 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0821 08:26:21.549733 32262 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0821 08:26:21.549751 32262 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0821 08:26:21.550170 32262 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0821 08:26:21.550194 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.550204 32262 net.cpp:165] Memory required for data: 779777500\nI0821 08:26:21.550222 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0821 08:26:21.550238 32262 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0821 08:26:21.550251 32262 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0821 08:26:21.550273 32262 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0821 08:26:21.550588 32262 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0821 08:26:21.550608 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.550617 32262 net.cpp:165] Memory required for data: 787969500\nI0821 08:26:21.550638 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:26:21.550659 32262 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0821 08:26:21.550671 32262 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0821 08:26:21.550688 32262 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.550779 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0821 08:26:21.550982 32262 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0821 08:26:21.551000 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.551010 32262 net.cpp:165] Memory required for data: 796161500\nI0821 08:26:21.551028 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0821 08:26:21.551048 32262 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0821 08:26:21.551060 32262 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0821 08:26:21.551075 32262 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.551095 32262 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0821 08:26:21.551116 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.551126 32262 net.cpp:165] Memory required for data: 804353500\nI0821 08:26:21.551136 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0821 08:26:21.551161 32262 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0821 08:26:21.551173 32262 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0821 08:26:21.551194 32262 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0821 08:26:21.551592 32262 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0821 08:26:21.551612 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.551621 32262 net.cpp:165] Memory required for data: 812545500\nI0821 08:26:21.551640 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0821 08:26:21.551656 32262 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0821 08:26:21.551667 32262 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0821 08:26:21.551697 32262 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0821 08:26:21.552024 32262 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0821 08:26:21.552044 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.552054 32262 net.cpp:165] Memory required for data: 820737500\nI0821 08:26:21.552114 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:26:21.552134 32262 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0821 08:26:21.552145 32262 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0821 08:26:21.552165 32262 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0821 08:26:21.552258 32262 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0821 08:26:21.552459 32262 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0821 08:26:21.552479 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.552487 32262 net.cpp:165] Memory required for data: 828929500\nI0821 08:26:21.552505 32262 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0821 08:26:21.552522 32262 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0821 08:26:21.552533 32262 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0821 08:26:21.552547 32262 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0821 08:26:21.552567 32262 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0821 08:26:21.552624 32262 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0821 08:26:21.552642 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.552652 32262 net.cpp:165] Memory required for data: 837121500\nI0821 08:26:21.552662 32262 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0821 08:26:21.552680 32262 net.cpp:100] Creating Layer L1_b9_relu\nI0821 08:26:21.552693 32262 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0821 08:26:21.552707 32262 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0821 08:26:21.552726 32262 net.cpp:150] Setting up L1_b9_relu\nI0821 08:26:21.552741 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.552749 32262 net.cpp:165] Memory required for data: 845313500\nI0821 08:26:21.552759 32262 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:21.552778 32262 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:21.552789 32262 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0821 08:26:21.552810 32262 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:26:21.552830 32262 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:26:21.552917 32262 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0821 08:26:21.552940 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.552955 32262 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0821 08:26:21.552965 32262 net.cpp:165] Memory required for data: 861697500\nI0821 08:26:21.552974 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0821 08:26:21.552994 32262 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0821 08:26:21.553007 32262 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0821 08:26:21.553025 32262 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0821 08:26:21.553442 32262 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0821 08:26:21.553462 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.553472 32262 net.cpp:165] Memory required for data: 863745500\nI0821 08:26:21.553489 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0821 08:26:21.553510 32262 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0821 08:26:21.553522 32262 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0821 08:26:21.553539 32262 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0821 08:26:21.553848 32262 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0821 08:26:21.553867 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.553877 32262 net.cpp:165] Memory required for data: 865793500\nI0821 08:26:21.553906 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:26:21.553923 32262 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0821 08:26:21.553935 32262 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0821 08:26:21.553954 32262 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.554056 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0821 08:26:21.554262 32262 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0821 08:26:21.554281 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.554291 32262 net.cpp:165] Memory required for data: 867841500\nI0821 08:26:21.554309 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0821 08:26:21.554324 32262 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0821 08:26:21.554335 32262 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0821 08:26:21.554354 32262 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.554374 32262 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0821 08:26:21.554389 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.554399 32262 net.cpp:165] Memory required for data: 869889500\nI0821 08:26:21.554409 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0821 08:26:21.554433 32262 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0821 08:26:21.554446 32262 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0821 08:26:21.554463 32262 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0821 08:26:21.554873 32262 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0821 08:26:21.554893 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.554903 32262 net.cpp:165] Memory required for data: 871937500\nI0821 08:26:21.554919 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0821 08:26:21.554940 32262 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0821 08:26:21.554952 32262 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0821 08:26:21.554968 32262 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0821 08:26:21.555287 32262 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0821 08:26:21.555306 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.555315 32262 net.cpp:165] Memory required for data: 873985500\nI0821 08:26:21.555337 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:26:21.555352 32262 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0821 08:26:21.555363 32262 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0821 08:26:21.555383 32262 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0821 08:26:21.555480 32262 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0821 08:26:21.555677 32262 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0821 08:26:21.555696 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.555704 32262 net.cpp:165] Memory required for data: 876033500\nI0821 08:26:21.555722 32262 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0821 08:26:21.555739 32262 net.cpp:100] Creating Layer L2_b1_pool\nI0821 08:26:21.555752 32262 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0821 08:26:21.555771 32262 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0821 08:26:21.555824 32262 net.cpp:150] Setting up L2_b1_pool\nI0821 08:26:21.555845 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.555855 32262 net.cpp:165] Memory required for data: 878081500\nI0821 08:26:21.555866 32262 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0821 08:26:21.555887 32262 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0821 08:26:21.555899 32262 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0821 08:26:21.555912 32262 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0821 08:26:21.555928 32262 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0821 08:26:21.555990 32262 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0821 08:26:21.556010 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.556018 32262 net.cpp:165] Memory required for data: 880129500\nI0821 08:26:21.556028 32262 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0821 08:26:21.556041 32262 net.cpp:100] Creating Layer L2_b1_relu\nI0821 08:26:21.556062 32262 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0821 08:26:21.556078 32262 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0821 08:26:21.556097 32262 net.cpp:150] Setting up L2_b1_relu\nI0821 08:26:21.556120 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.556129 32262 net.cpp:165] Memory required for data: 882177500\nI0821 08:26:21.556139 32262 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0821 08:26:21.556156 32262 net.cpp:100] Creating Layer L2_b1_zeros\nI0821 08:26:21.556176 32262 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0821 08:26:21.558477 32262 net.cpp:150] Setting up L2_b1_zeros\nI0821 08:26:21.558499 32262 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0821 08:26:21.558509 32262 net.cpp:165] Memory required for data: 884225500\nI0821 08:26:21.558519 32262 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0821 08:26:21.558542 32262 net.cpp:100] Creating Layer L2_b1_concat0\nI0821 08:26:21.558553 32262 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0821 08:26:21.558568 32262 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0821 08:26:21.558583 32262 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0821 08:26:21.558650 32262 net.cpp:150] Setting up L2_b1_concat0\nI0821 08:26:21.558670 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.558678 32262 net.cpp:165] Memory required for data: 888321500\nI0821 08:26:21.558688 32262 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:21.558702 32262 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:21.558713 32262 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0821 08:26:21.558732 32262 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:26:21.558753 32262 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:26:21.558845 32262 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0821 08:26:21.558866 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.558881 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.558889 32262 net.cpp:165] Memory required for data: 896513500\nI0821 08:26:21.558899 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0821 08:26:21.558924 32262 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0821 08:26:21.558938 32262 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0821 08:26:21.558956 32262 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0821 08:26:21.559515 32262 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0821 08:26:21.559535 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.559545 32262 net.cpp:165] Memory required for data: 900609500\nI0821 08:26:21.559563 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0821 08:26:21.559586 32262 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0821 08:26:21.559597 32262 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0821 08:26:21.559617 32262 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0821 08:26:21.559928 32262 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0821 08:26:21.559948 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.559958 32262 net.cpp:165] Memory required for data: 904705500\nI0821 08:26:21.559979 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:26:21.559995 32262 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0821 08:26:21.560006 32262 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0821 08:26:21.560022 32262 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.560124 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0821 08:26:21.560322 32262 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0821 08:26:21.560341 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.560351 32262 net.cpp:165] Memory required for data: 908801500\nI0821 08:26:21.560369 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0821 08:26:21.560385 32262 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0821 08:26:21.560405 32262 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0821 08:26:21.560425 32262 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.560446 32262 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0821 08:26:21.560461 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.560470 32262 net.cpp:165] Memory required for data: 912897500\nI0821 08:26:21.560480 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0821 08:26:21.560508 32262 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0821 08:26:21.560520 32262 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0821 08:26:21.560539 32262 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0821 08:26:21.561076 32262 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0821 08:26:21.561096 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.561113 32262 net.cpp:165] Memory required for data: 916993500\nI0821 08:26:21.561131 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0821 08:26:21.561152 32262 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0821 08:26:21.561166 32262 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0821 08:26:21.561182 32262 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0821 08:26:21.561486 32262 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0821 08:26:21.561507 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.561517 32262 net.cpp:165] Memory required for data: 921089500\nI0821 08:26:21.561538 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:26:21.561554 32262 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0821 08:26:21.561566 32262 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0821 08:26:21.561580 32262 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0821 08:26:21.561673 32262 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0821 08:26:21.561877 32262 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0821 08:26:21.561895 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.561905 32262 net.cpp:165] Memory required for data: 925185500\nI0821 08:26:21.561923 32262 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0821 08:26:21.561939 32262 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0821 08:26:21.561950 32262 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0821 08:26:21.561964 32262 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0821 08:26:21.561985 32262 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0821 08:26:21.562034 32262 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0821 08:26:21.562052 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.562062 32262 net.cpp:165] Memory required for data: 929281500\nI0821 08:26:21.562072 32262 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0821 08:26:21.562085 32262 net.cpp:100] Creating Layer L2_b2_relu\nI0821 08:26:21.562098 32262 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0821 08:26:21.562125 32262 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0821 08:26:21.562145 32262 net.cpp:150] Setting up L2_b2_relu\nI0821 08:26:21.562161 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.562170 32262 net.cpp:165] Memory required for data: 933377500\nI0821 08:26:21.562180 32262 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:21.562194 32262 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:21.562206 32262 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0821 08:26:21.562219 32262 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:26:21.562237 32262 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:26:21.562321 32262 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0821 08:26:21.562340 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.562352 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.562361 32262 net.cpp:165] Memory required for data: 941569500\nI0821 08:26:21.562381 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0821 08:26:21.562405 32262 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0821 08:26:21.562418 32262 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0821 08:26:21.562438 32262 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0821 08:26:21.562976 32262 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0821 08:26:21.562996 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.563006 32262 net.cpp:165] Memory required for data: 945665500\nI0821 08:26:21.563024 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0821 08:26:21.563045 32262 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0821 08:26:21.563057 32262 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0821 08:26:21.563076 32262 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0821 08:26:21.563395 32262 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0821 08:26:21.563417 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.563427 32262 net.cpp:165] Memory required for data: 949761500\nI0821 08:26:21.563448 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:26:21.563464 32262 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0821 08:26:21.563477 32262 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0821 08:26:21.563493 32262 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.563585 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0821 08:26:21.563786 32262 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0821 08:26:21.563804 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.563814 32262 net.cpp:165] Memory required for data: 953857500\nI0821 08:26:21.563832 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0821 08:26:21.563848 32262 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0821 08:26:21.563859 32262 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0821 08:26:21.563879 32262 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.563899 32262 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0821 08:26:21.563915 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.563925 32262 net.cpp:165] Memory required for data: 957953500\nI0821 08:26:21.563935 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0821 08:26:21.563961 32262 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0821 08:26:21.563973 32262 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0821 08:26:21.563992 32262 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0821 08:26:21.564548 32262 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0821 08:26:21.564566 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.564576 32262 net.cpp:165] Memory required for data: 962049500\nI0821 08:26:21.564594 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0821 08:26:21.564610 32262 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0821 08:26:21.564621 32262 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0821 08:26:21.564642 32262 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0821 08:26:21.564954 32262 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0821 08:26:21.564973 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.564983 32262 net.cpp:165] Memory required for data: 966145500\nI0821 08:26:21.565004 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:26:21.565026 32262 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0821 08:26:21.565037 32262 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0821 08:26:21.565053 32262 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0821 08:26:21.565152 32262 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0821 08:26:21.565351 32262 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0821 08:26:21.565369 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.565379 32262 net.cpp:165] Memory required for data: 970241500\nI0821 08:26:21.565397 32262 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0821 08:26:21.565418 32262 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0821 08:26:21.565438 32262 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0821 08:26:21.565451 32262 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0821 08:26:21.565472 32262 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0821 08:26:21.565522 32262 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0821 08:26:21.565541 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.565551 32262 net.cpp:165] Memory required for data: 974337500\nI0821 08:26:21.565560 32262 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0821 08:26:21.565592 32262 net.cpp:100] Creating Layer L2_b3_relu\nI0821 08:26:21.565605 32262 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0821 08:26:21.565620 32262 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0821 08:26:21.565639 32262 net.cpp:150] Setting up L2_b3_relu\nI0821 08:26:21.565654 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.565665 32262 net.cpp:165] Memory required for data: 978433500\nI0821 08:26:21.565675 32262 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:21.565692 32262 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:21.565704 32262 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0821 08:26:21.565719 32262 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:26:21.565738 32262 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:26:21.565825 32262 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0821 08:26:21.565845 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.565857 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.565867 32262 net.cpp:165] Memory required for data: 986625500\nI0821 08:26:21.565877 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0821 08:26:21.565897 32262 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0821 08:26:21.565906 32262 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0821 08:26:21.565919 32262 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0821 08:26:21.566435 32262 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0821 08:26:21.566454 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.566464 32262 net.cpp:165] Memory required for data: 990721500\nI0821 08:26:21.566480 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0821 08:26:21.566496 32262 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0821 08:26:21.566509 32262 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0821 08:26:21.566531 32262 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0821 08:26:21.566854 32262 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0821 08:26:21.566874 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.566882 32262 net.cpp:165] Memory required for data: 994817500\nI0821 08:26:21.566903 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:26:21.566925 32262 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0821 08:26:21.566936 32262 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0821 08:26:21.566952 32262 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.567046 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0821 08:26:21.567252 32262 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0821 08:26:21.567271 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.567281 32262 net.cpp:165] Memory required for data: 998913500\nI0821 08:26:21.567298 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0821 08:26:21.567318 32262 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0821 08:26:21.567330 32262 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0821 08:26:21.567345 32262 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.567364 32262 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0821 08:26:21.567378 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.567389 32262 net.cpp:165] Memory required for data: 1003009500\nI0821 08:26:21.567409 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0821 08:26:21.567435 32262 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0821 08:26:21.567447 32262 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0821 08:26:21.567471 32262 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0821 08:26:21.568003 32262 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0821 08:26:21.568023 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.568033 32262 net.cpp:165] Memory required for data: 1007105500\nI0821 08:26:21.568050 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0821 08:26:21.568066 32262 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0821 08:26:21.568078 32262 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0821 08:26:21.568094 32262 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0821 08:26:21.568409 32262 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0821 08:26:21.568428 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.568439 32262 net.cpp:165] Memory required for data: 1011201500\nI0821 08:26:21.568459 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:26:21.568475 32262 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0821 08:26:21.568486 32262 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0821 08:26:21.568508 32262 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0821 08:26:21.568603 32262 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0821 08:26:21.568806 32262 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0821 08:26:21.568825 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.568835 32262 net.cpp:165] Memory required for data: 1015297500\nI0821 08:26:21.568853 32262 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0821 08:26:21.568871 32262 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0821 08:26:21.568883 32262 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0821 08:26:21.568902 32262 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0821 08:26:21.568920 32262 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0821 08:26:21.568954 32262 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0821 08:26:21.568967 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.568972 32262 net.cpp:165] Memory required for data: 1019393500\nI0821 08:26:21.568979 32262 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0821 08:26:21.568985 32262 net.cpp:100] Creating Layer L2_b4_relu\nI0821 08:26:21.568991 32262 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0821 08:26:21.568998 32262 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0821 08:26:21.569011 32262 net.cpp:150] Setting up L2_b4_relu\nI0821 08:26:21.569017 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.569022 32262 net.cpp:165] Memory required for data: 1023489500\nI0821 08:26:21.569026 32262 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:21.569036 32262 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:21.569042 32262 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0821 08:26:21.569049 32262 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:26:21.569059 32262 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:26:21.569118 32262 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0821 08:26:21.569135 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.569147 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.569156 32262 net.cpp:165] Memory required for data: 1031681500\nI0821 08:26:21.569166 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0821 08:26:21.569185 32262 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0821 08:26:21.569197 32262 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0821 08:26:21.569219 32262 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0821 08:26:21.569797 32262 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0821 08:26:21.569818 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.569828 32262 net.cpp:165] Memory required for data: 1035777500\nI0821 08:26:21.569845 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0821 08:26:21.569864 32262 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0821 08:26:21.569875 32262 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0821 08:26:21.569895 32262 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0821 08:26:21.570217 32262 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0821 08:26:21.570237 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.570246 32262 net.cpp:165] Memory required for data: 1039873500\nI0821 08:26:21.570267 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:26:21.570288 32262 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0821 08:26:21.570300 32262 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0821 08:26:21.570317 32262 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.570410 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0821 08:26:21.570607 32262 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0821 08:26:21.570626 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.570636 32262 net.cpp:165] Memory required for data: 1043969500\nI0821 08:26:21.570653 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0821 08:26:21.570669 32262 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0821 08:26:21.570680 32262 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0821 08:26:21.570699 32262 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.570719 32262 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0821 08:26:21.570734 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.570742 32262 net.cpp:165] Memory required for data: 1048065500\nI0821 08:26:21.570752 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0821 08:26:21.570777 32262 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0821 08:26:21.570791 32262 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0821 08:26:21.570807 32262 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0821 08:26:21.571359 32262 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0821 08:26:21.571379 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.571388 32262 net.cpp:165] Memory required for data: 1052161500\nI0821 08:26:21.571406 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0821 08:26:21.571429 32262 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0821 08:26:21.571440 32262 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0821 08:26:21.571457 32262 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0821 08:26:21.571775 32262 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0821 08:26:21.571794 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.571805 32262 net.cpp:165] Memory required for data: 1056257500\nI0821 08:26:21.571825 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:26:21.571841 32262 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0821 08:26:21.571853 32262 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0821 08:26:21.571873 32262 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0821 08:26:21.571971 32262 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0821 08:26:21.572175 32262 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0821 08:26:21.572194 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.572204 32262 net.cpp:165] Memory required for data: 1060353500\nI0821 08:26:21.572222 32262 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0821 08:26:21.572239 32262 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0821 08:26:21.572252 32262 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0821 08:26:21.572264 32262 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0821 08:26:21.572285 32262 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0821 08:26:21.572336 32262 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0821 08:26:21.572362 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.572372 32262 net.cpp:165] Memory required for data: 1064449500\nI0821 08:26:21.572383 32262 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0821 08:26:21.572402 32262 net.cpp:100] Creating Layer L2_b5_relu\nI0821 08:26:21.572413 32262 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0821 08:26:21.572428 32262 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0821 08:26:21.572446 32262 net.cpp:150] Setting up L2_b5_relu\nI0821 08:26:21.572461 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.572470 32262 net.cpp:165] Memory required for data: 1068545500\nI0821 08:26:21.572480 32262 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:21.572500 32262 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:21.572512 32262 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0821 08:26:21.572528 32262 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:26:21.572547 32262 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:26:21.572635 32262 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0821 08:26:21.572659 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.572672 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.572682 32262 net.cpp:165] Memory required for data: 1076737500\nI0821 08:26:21.572692 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0821 08:26:21.572711 32262 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0821 08:26:21.572724 32262 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0821 08:26:21.572742 32262 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0821 08:26:21.573308 32262 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0821 08:26:21.573329 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.573338 32262 net.cpp:165] Memory required for data: 1080833500\nI0821 08:26:21.573356 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0821 08:26:21.573377 32262 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0821 08:26:21.573390 32262 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0821 08:26:21.573406 32262 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0821 08:26:21.573714 32262 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0821 08:26:21.573732 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.573741 32262 net.cpp:165] Memory required for data: 1084929500\nI0821 08:26:21.573762 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:26:21.573778 32262 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0821 08:26:21.573789 32262 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0821 08:26:21.573813 32262 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.573909 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0821 08:26:21.574115 32262 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0821 08:26:21.574136 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.574144 32262 net.cpp:165] Memory required for data: 1089025500\nI0821 08:26:21.574162 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0821 08:26:21.574178 32262 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0821 08:26:21.574189 32262 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0821 08:26:21.574208 32262 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.574229 32262 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0821 08:26:21.574242 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.574252 32262 net.cpp:165] Memory required for data: 1093121500\nI0821 08:26:21.574262 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0821 08:26:21.574286 32262 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0821 08:26:21.574300 32262 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0821 08:26:21.574317 32262 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0821 08:26:21.574884 32262 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0821 08:26:21.574905 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.574915 32262 net.cpp:165] Memory required for data: 1097217500\nI0821 08:26:21.574932 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0821 08:26:21.574954 32262 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0821 08:26:21.574966 32262 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0821 08:26:21.574985 32262 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0821 08:26:21.575300 32262 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0821 08:26:21.575320 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.575330 32262 net.cpp:165] Memory required for data: 1101313500\nI0821 08:26:21.575350 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:26:21.575367 32262 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0821 08:26:21.575379 32262 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0821 08:26:21.575399 32262 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0821 08:26:21.575494 32262 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0821 08:26:21.575690 32262 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0821 08:26:21.575709 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.575718 32262 net.cpp:165] Memory required for data: 1105409500\nI0821 08:26:21.575736 32262 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0821 08:26:21.575754 32262 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0821 08:26:21.575765 32262 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0821 08:26:21.575778 32262 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0821 08:26:21.575798 32262 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0821 08:26:21.575848 32262 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0821 08:26:21.575866 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.575875 32262 net.cpp:165] Memory required for data: 1109505500\nI0821 08:26:21.575886 32262 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0821 08:26:21.575904 32262 net.cpp:100] Creating Layer L2_b6_relu\nI0821 08:26:21.575917 32262 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0821 08:26:21.575932 32262 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0821 08:26:21.575949 32262 net.cpp:150] Setting up L2_b6_relu\nI0821 08:26:21.575963 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.575973 32262 net.cpp:165] Memory required for data: 1113601500\nI0821 08:26:21.575983 32262 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:21.575996 32262 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:21.576009 32262 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0821 08:26:21.576032 32262 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:26:21.576055 32262 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:26:21.576143 32262 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0821 08:26:21.576167 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.576182 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.576191 32262 net.cpp:165] Memory required for data: 1121793500\nI0821 08:26:21.576201 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0821 08:26:21.576220 32262 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0821 08:26:21.576233 32262 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0821 08:26:21.576251 32262 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0821 08:26:21.577805 32262 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0821 08:26:21.577827 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.577837 32262 net.cpp:165] Memory required for data: 1125889500\nI0821 08:26:21.577855 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0821 08:26:21.577888 32262 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0821 08:26:21.577900 32262 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0821 08:26:21.577924 32262 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0821 08:26:21.578251 32262 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0821 08:26:21.578271 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.578281 32262 net.cpp:165] Memory required for data: 1129985500\nI0821 08:26:21.578303 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:26:21.578320 32262 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0821 08:26:21.578330 32262 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0821 08:26:21.578346 32262 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.578443 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0821 08:26:21.578641 32262 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0821 08:26:21.578660 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.578670 32262 net.cpp:165] Memory required for data: 1134081500\nI0821 08:26:21.578687 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0821 08:26:21.578703 32262 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0821 08:26:21.578716 32262 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0821 08:26:21.578738 32262 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.578759 32262 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0821 08:26:21.578773 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.578783 32262 net.cpp:165] Memory required for data: 1138177500\nI0821 08:26:21.578794 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0821 08:26:21.578817 32262 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0821 08:26:21.578831 32262 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0821 08:26:21.578848 32262 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0821 08:26:21.579396 32262 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0821 08:26:21.579416 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.579426 32262 net.cpp:165] Memory required for data: 1142273500\nI0821 08:26:21.579443 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0821 08:26:21.579465 32262 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0821 08:26:21.579478 32262 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0821 08:26:21.579495 32262 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0821 08:26:21.579803 32262 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0821 08:26:21.579826 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.579836 32262 net.cpp:165] Memory required for data: 1146369500\nI0821 08:26:21.579857 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:26:21.579874 32262 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0821 08:26:21.579885 32262 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0821 08:26:21.579901 32262 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0821 08:26:21.579994 32262 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0821 08:26:21.580199 32262 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0821 08:26:21.580219 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.580229 32262 net.cpp:165] Memory required for data: 1150465500\nI0821 08:26:21.580246 32262 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0821 08:26:21.580263 32262 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0821 08:26:21.580276 32262 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0821 08:26:21.580288 32262 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0821 08:26:21.580309 32262 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0821 08:26:21.580359 32262 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0821 08:26:21.580376 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.580387 32262 net.cpp:165] Memory required for data: 1154561500\nI0821 08:26:21.580397 32262 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0821 08:26:21.580415 32262 net.cpp:100] Creating Layer L2_b7_relu\nI0821 08:26:21.580436 32262 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0821 08:26:21.580452 32262 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0821 08:26:21.580471 32262 net.cpp:150] Setting up L2_b7_relu\nI0821 08:26:21.580487 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.580497 32262 net.cpp:165] Memory required for data: 1158657500\nI0821 08:26:21.580507 32262 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:21.580520 32262 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:21.580531 32262 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0821 08:26:21.580546 32262 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:26:21.580566 32262 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:26:21.580662 32262 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0821 08:26:21.580680 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.580694 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.580703 32262 net.cpp:165] Memory required for data: 1166849500\nI0821 08:26:21.580713 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0821 08:26:21.580739 32262 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0821 08:26:21.580752 32262 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0821 08:26:21.580771 32262 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0821 08:26:21.581318 32262 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0821 08:26:21.581338 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.581348 32262 net.cpp:165] Memory required for data: 1170945500\nI0821 08:26:21.581367 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0821 08:26:21.581387 32262 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0821 08:26:21.581400 32262 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0821 08:26:21.581416 32262 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0821 08:26:21.581733 32262 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0821 08:26:21.581756 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.581768 32262 net.cpp:165] Memory required for data: 1175041500\nI0821 08:26:21.581789 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:26:21.581805 32262 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0821 08:26:21.581817 32262 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0821 08:26:21.581833 32262 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.581928 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0821 08:26:21.582134 32262 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0821 08:26:21.582154 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.582162 32262 net.cpp:165] Memory required for data: 1179137500\nI0821 08:26:21.582181 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0821 08:26:21.582196 32262 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0821 08:26:21.582207 32262 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0821 08:26:21.582226 32262 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.582247 32262 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0821 08:26:21.582262 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.582273 32262 net.cpp:165] Memory required for data: 1183233500\nI0821 08:26:21.582283 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0821 08:26:21.582305 32262 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0821 08:26:21.582319 32262 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0821 08:26:21.582336 32262 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0821 08:26:21.582872 32262 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0821 08:26:21.582892 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.582902 32262 net.cpp:165] Memory required for data: 1187329500\nI0821 08:26:21.582921 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0821 08:26:21.582949 32262 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0821 08:26:21.582963 32262 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0821 08:26:21.582979 32262 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0821 08:26:21.583312 32262 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0821 08:26:21.583331 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.583341 32262 net.cpp:165] Memory required for data: 1191425500\nI0821 08:26:21.583362 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:26:21.583382 32262 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0821 08:26:21.583395 32262 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0821 08:26:21.583411 32262 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0821 08:26:21.583504 32262 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0821 08:26:21.583706 32262 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0821 08:26:21.583725 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.583735 32262 net.cpp:165] Memory required for data: 1195521500\nI0821 08:26:21.583753 32262 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0821 08:26:21.583775 32262 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0821 08:26:21.583786 32262 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0821 08:26:21.583801 32262 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0821 08:26:21.583823 32262 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0821 08:26:21.583873 32262 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0821 08:26:21.583890 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.583900 32262 net.cpp:165] Memory required for data: 1199617500\nI0821 08:26:21.583911 32262 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0821 08:26:21.583925 32262 net.cpp:100] Creating Layer L2_b8_relu\nI0821 08:26:21.583936 32262 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0821 08:26:21.583953 32262 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0821 08:26:21.583972 32262 net.cpp:150] Setting up L2_b8_relu\nI0821 08:26:21.583987 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.583997 32262 net.cpp:165] Memory required for data: 1203713500\nI0821 08:26:21.584007 32262 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:21.584020 32262 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:21.584031 32262 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0821 08:26:21.584046 32262 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:26:21.584085 32262 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:26:21.584187 32262 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0821 08:26:21.584205 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.584219 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.584229 32262 net.cpp:165] Memory required for data: 1211905500\nI0821 08:26:21.584239 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0821 08:26:21.584259 32262 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0821 08:26:21.584271 32262 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0821 08:26:21.584295 32262 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0821 08:26:21.584846 32262 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0821 08:26:21.584864 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.584874 32262 net.cpp:165] Memory required for data: 1216001500\nI0821 08:26:21.584892 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0821 08:26:21.584909 32262 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0821 08:26:21.584921 32262 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0821 08:26:21.584941 32262 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0821 08:26:21.585281 32262 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0821 08:26:21.585307 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.585317 32262 net.cpp:165] Memory required for data: 1220097500\nI0821 08:26:21.585340 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:26:21.585361 32262 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0821 08:26:21.585373 32262 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0821 08:26:21.585388 32262 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.585480 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0821 08:26:21.585683 32262 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0821 08:26:21.585702 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.585711 32262 net.cpp:165] Memory required for data: 1224193500\nI0821 08:26:21.585729 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0821 08:26:21.585749 32262 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0821 08:26:21.585762 32262 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0821 08:26:21.585777 32262 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.585795 32262 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0821 08:26:21.585820 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.585830 32262 net.cpp:165] Memory required for data: 1228289500\nI0821 08:26:21.585840 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0821 08:26:21.585860 32262 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0821 08:26:21.585872 32262 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0821 08:26:21.585896 32262 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0821 08:26:21.587442 32262 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0821 08:26:21.587465 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.587473 32262 net.cpp:165] Memory required for data: 1232385500\nI0821 08:26:21.587491 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0821 08:26:21.587513 32262 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0821 08:26:21.587525 32262 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0821 08:26:21.587548 32262 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0821 08:26:21.587852 32262 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0821 08:26:21.587874 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.587885 32262 net.cpp:165] Memory required for data: 1236481500\nI0821 08:26:21.587960 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:26:21.587985 32262 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0821 08:26:21.587998 32262 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0821 08:26:21.588014 32262 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0821 08:26:21.588119 32262 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0821 08:26:21.588310 32262 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0821 08:26:21.588330 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.588338 32262 net.cpp:165] Memory required for data: 1240577500\nI0821 08:26:21.588357 32262 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0821 08:26:21.588373 32262 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0821 08:26:21.588385 32262 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0821 08:26:21.588398 32262 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0821 08:26:21.588420 32262 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0821 08:26:21.588469 32262 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0821 08:26:21.588498 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.588508 32262 net.cpp:165] Memory required for data: 1244673500\nI0821 08:26:21.588518 32262 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0821 08:26:21.588533 32262 net.cpp:100] Creating Layer L2_b9_relu\nI0821 08:26:21.588546 32262 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0821 08:26:21.588560 32262 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0821 08:26:21.588578 32262 net.cpp:150] Setting up L2_b9_relu\nI0821 08:26:21.588593 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.588610 32262 net.cpp:165] Memory required for data: 1248769500\nI0821 08:26:21.588621 32262 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:21.588640 32262 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:21.588652 32262 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0821 08:26:21.588668 32262 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:26:21.588688 32262 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:26:21.588780 32262 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0821 08:26:21.588798 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.588812 32262 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0821 08:26:21.588821 32262 net.cpp:165] Memory required for data: 1256961500\nI0821 08:26:21.588831 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0821 08:26:21.588851 32262 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0821 08:26:21.588865 32262 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0821 08:26:21.588887 32262 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0821 08:26:21.589434 32262 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0821 08:26:21.589454 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.589463 32262 net.cpp:165] Memory required for data: 1257985500\nI0821 08:26:21.589480 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0821 08:26:21.589498 32262 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0821 08:26:21.589509 32262 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0821 08:26:21.589531 32262 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0821 08:26:21.589869 32262 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0821 08:26:21.589892 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.589903 32262 net.cpp:165] Memory required for data: 1259009500\nI0821 08:26:21.589926 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:26:21.589942 32262 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0821 08:26:21.589953 32262 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0821 08:26:21.589968 32262 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.590060 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0821 08:26:21.590270 32262 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0821 08:26:21.590289 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.590298 32262 net.cpp:165] Memory required for data: 1260033500\nI0821 08:26:21.590317 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0821 08:26:21.590332 32262 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0821 08:26:21.590343 32262 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0821 08:26:21.590363 32262 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0821 08:26:21.590384 32262 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0821 08:26:21.590396 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.590406 32262 net.cpp:165] Memory required for data: 1261057500\nI0821 08:26:21.590416 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0821 08:26:21.590440 32262 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0821 08:26:21.590453 32262 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0821 08:26:21.590471 32262 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0821 08:26:21.591018 32262 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0821 08:26:21.591037 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.591047 32262 net.cpp:165] Memory required for data: 1262081500\nI0821 08:26:21.591063 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0821 08:26:21.591089 32262 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0821 08:26:21.591107 32262 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0821 08:26:21.591131 32262 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0821 08:26:21.591472 32262 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0821 08:26:21.591498 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.591508 32262 net.cpp:165] Memory required for data: 1263105500\nI0821 08:26:21.591531 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:26:21.591547 32262 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0821 08:26:21.591558 32262 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0821 08:26:21.591578 32262 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0821 08:26:21.591670 32262 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0821 08:26:21.591873 32262 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0821 08:26:21.591892 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.591902 32262 net.cpp:165] Memory required for data: 1264129500\nI0821 08:26:21.591920 32262 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0821 08:26:21.591938 32262 net.cpp:100] Creating Layer L3_b1_pool\nI0821 08:26:21.591948 32262 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0821 08:26:21.591969 32262 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0821 08:26:21.592033 32262 net.cpp:150] Setting up L3_b1_pool\nI0821 08:26:21.592053 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.592063 32262 net.cpp:165] Memory required for data: 1265153500\nI0821 08:26:21.592073 32262 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0821 08:26:21.592089 32262 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0821 08:26:21.592108 32262 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0821 08:26:21.592123 32262 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0821 08:26:21.592142 32262 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0821 08:26:21.592201 32262 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0821 08:26:21.592221 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.592231 32262 net.cpp:165] Memory required for data: 1266177500\nI0821 08:26:21.592241 32262 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0821 08:26:21.592255 32262 net.cpp:100] Creating Layer L3_b1_relu\nI0821 08:26:21.592267 32262 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0821 08:26:21.592279 32262 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0821 08:26:21.592299 32262 net.cpp:150] Setting up L3_b1_relu\nI0821 08:26:21.592314 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.592322 32262 net.cpp:165] Memory required for data: 1267201500\nI0821 08:26:21.592332 32262 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0821 08:26:21.592355 32262 net.cpp:100] Creating Layer L3_b1_zeros\nI0821 08:26:21.592371 32262 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0821 08:26:21.593639 32262 net.cpp:150] Setting up L3_b1_zeros\nI0821 08:26:21.593662 32262 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0821 08:26:21.593672 32262 net.cpp:165] Memory required for data: 1268225500\nI0821 08:26:21.593683 32262 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0821 08:26:21.593698 32262 net.cpp:100] Creating Layer L3_b1_concat0\nI0821 08:26:21.593710 32262 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0821 08:26:21.593722 32262 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0821 08:26:21.593744 32262 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0821 08:26:21.593809 32262 net.cpp:150] Setting up L3_b1_concat0\nI0821 08:26:21.593838 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.593849 32262 net.cpp:165] Memory required for data: 1270273500\nI0821 08:26:21.593860 32262 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:21.593874 32262 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:21.593885 32262 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0821 08:26:21.593899 32262 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:26:21.593910 32262 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:26:21.593973 32262 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0821 08:26:21.593983 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.593997 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.594002 32262 net.cpp:165] Memory required for data: 1274369500\nI0821 08:26:21.594007 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0821 08:26:21.594022 32262 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0821 08:26:21.594028 32262 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0821 08:26:21.594038 32262 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0821 08:26:21.595118 32262 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0821 08:26:21.595137 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.595146 32262 net.cpp:165] Memory required for data: 1276417500\nI0821 08:26:21.595163 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0821 08:26:21.595185 32262 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0821 08:26:21.595198 32262 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0821 08:26:21.595219 32262 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0821 08:26:21.595536 32262 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0821 08:26:21.595556 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.595566 32262 net.cpp:165] Memory required for data: 1278465500\nI0821 08:26:21.595587 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:26:21.595603 32262 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0821 08:26:21.595615 32262 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0821 08:26:21.595635 32262 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.595729 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0821 08:26:21.595930 32262 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0821 08:26:21.595949 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.595957 32262 net.cpp:165] Memory required for data: 1280513500\nI0821 08:26:21.595975 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0821 08:26:21.595994 32262 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0821 08:26:21.596006 32262 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0821 08:26:21.596021 32262 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0821 08:26:21.596040 32262 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0821 08:26:21.596055 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.596063 32262 net.cpp:165] Memory required for data: 1282561500\nI0821 08:26:21.596073 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0821 08:26:21.596097 32262 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0821 08:26:21.596117 32262 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0821 08:26:21.596140 32262 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0821 08:26:21.597231 32262 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0821 08:26:21.597251 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.597260 32262 net.cpp:165] Memory required for data: 1284609500\nI0821 08:26:21.597278 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0821 08:26:21.597296 32262 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0821 08:26:21.597308 32262 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0821 08:26:21.597329 32262 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0821 08:26:21.597646 32262 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0821 08:26:21.597669 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.597681 32262 net.cpp:165] Memory required for data: 1286657500\nI0821 08:26:21.597702 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:26:21.597718 32262 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0821 08:26:21.597729 32262 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0821 08:26:21.597744 32262 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0821 08:26:21.597836 32262 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0821 08:26:21.598040 32262 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0821 08:26:21.598059 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.598069 32262 net.cpp:165] Memory required for data: 1288705500\nI0821 08:26:21.598095 32262 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0821 08:26:21.598124 32262 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0821 08:26:21.598137 32262 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0821 08:26:21.598151 32262 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0821 08:26:21.598167 32262 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0821 08:26:21.598230 32262 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0821 08:26:21.598251 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.598261 32262 net.cpp:165] Memory required for data: 1290753500\nI0821 08:26:21.598271 32262 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0821 08:26:21.598286 32262 net.cpp:100] Creating Layer L3_b2_relu\nI0821 08:26:21.598297 32262 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0821 08:26:21.598311 32262 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0821 08:26:21.598330 32262 net.cpp:150] Setting up L3_b2_relu\nI0821 08:26:21.598345 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.598353 32262 net.cpp:165] Memory required for data: 1292801500\nI0821 08:26:21.598363 32262 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:21.598376 32262 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:21.598387 32262 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0821 08:26:21.598407 32262 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:26:21.598428 32262 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:26:21.598512 32262 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0821 08:26:21.598531 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.598544 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.598553 32262 net.cpp:165] Memory required for data: 1296897500\nI0821 08:26:21.598564 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0821 08:26:21.598588 32262 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0821 08:26:21.598603 32262 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0821 08:26:21.598620 32262 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0821 08:26:21.599722 32262 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0821 08:26:21.599742 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.599752 32262 net.cpp:165] Memory required for data: 1298945500\nI0821 08:26:21.599771 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0821 08:26:21.599792 32262 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0821 08:26:21.599804 32262 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0821 08:26:21.599822 32262 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0821 08:26:21.600141 32262 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0821 08:26:21.600160 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.600169 32262 net.cpp:165] Memory required for data: 1300993500\nI0821 08:26:21.600191 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:26:21.600208 32262 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0821 08:26:21.600219 32262 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0821 08:26:21.600239 32262 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.600333 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0821 08:26:21.600535 32262 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0821 08:26:21.600553 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.600564 32262 net.cpp:165] Memory required for data: 1303041500\nI0821 08:26:21.600581 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0821 08:26:21.600600 32262 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0821 08:26:21.600612 32262 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0821 08:26:21.600626 32262 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0821 08:26:21.600646 32262 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0821 08:26:21.600667 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.600678 32262 net.cpp:165] Memory required for data: 1305089500\nI0821 08:26:21.600688 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0821 08:26:21.600716 32262 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0821 08:26:21.600729 32262 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0821 08:26:21.600752 32262 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0821 08:26:21.601837 32262 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0821 08:26:21.601858 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.601867 32262 net.cpp:165] Memory required for data: 1307137500\nI0821 08:26:21.601884 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0821 08:26:21.601902 32262 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0821 08:26:21.601915 32262 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0821 08:26:21.601935 32262 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0821 08:26:21.602255 32262 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0821 08:26:21.602282 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.602293 32262 net.cpp:165] Memory required for data: 1309185500\nI0821 08:26:21.602314 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:26:21.602330 32262 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0821 08:26:21.602342 32262 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0821 08:26:21.602357 32262 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0821 08:26:21.602452 32262 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0821 08:26:21.602654 32262 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0821 08:26:21.602672 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.602682 32262 net.cpp:165] Memory required for data: 1311233500\nI0821 08:26:21.602700 32262 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0821 08:26:21.602721 32262 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0821 08:26:21.602733 32262 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0821 08:26:21.602747 32262 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0821 08:26:21.602763 32262 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0821 08:26:21.602823 32262 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0821 08:26:21.602840 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.602850 32262 net.cpp:165] Memory required for data: 1313281500\nI0821 08:26:21.602861 32262 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0821 08:26:21.602875 32262 net.cpp:100] Creating Layer L3_b3_relu\nI0821 08:26:21.602888 32262 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0821 08:26:21.602902 32262 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0821 08:26:21.602921 32262 net.cpp:150] Setting up L3_b3_relu\nI0821 08:26:21.602936 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.602946 32262 net.cpp:165] Memory required for data: 1315329500\nI0821 08:26:21.602955 32262 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:21.602968 32262 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:21.602980 32262 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0821 08:26:21.602999 32262 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:26:21.603021 32262 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:26:21.603104 32262 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0821 08:26:21.603123 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.603137 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.603145 32262 net.cpp:165] Memory required for data: 1319425500\nI0821 08:26:21.603155 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0821 08:26:21.603183 32262 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0821 08:26:21.603196 32262 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0821 08:26:21.603225 32262 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0821 08:26:21.604332 32262 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0821 08:26:21.604353 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.604363 32262 net.cpp:165] Memory required for data: 1321473500\nI0821 08:26:21.604382 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0821 08:26:21.604404 32262 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0821 08:26:21.604416 32262 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0821 08:26:21.604434 32262 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0821 08:26:21.604742 32262 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0821 08:26:21.604760 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.604770 32262 net.cpp:165] Memory required for data: 1323521500\nI0821 08:26:21.604791 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:26:21.604811 32262 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0821 08:26:21.604825 32262 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0821 08:26:21.604840 32262 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.604933 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0821 08:26:21.605098 32262 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0821 08:26:21.605120 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.605129 32262 net.cpp:165] Memory required for data: 1325569500\nI0821 08:26:21.605147 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0821 08:26:21.605165 32262 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0821 08:26:21.605178 32262 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0821 08:26:21.605192 32262 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0821 08:26:21.605211 32262 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0821 08:26:21.605226 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.605235 32262 net.cpp:165] Memory required for data: 1327617500\nI0821 08:26:21.605244 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0821 08:26:21.605269 32262 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0821 08:26:21.605281 32262 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0821 08:26:21.605304 32262 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0821 08:26:21.607376 32262 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0821 08:26:21.607398 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.607409 32262 net.cpp:165] Memory required for data: 1329665500\nI0821 08:26:21.607426 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0821 08:26:21.607450 32262 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0821 08:26:21.607462 32262 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0821 08:26:21.607480 32262 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0821 08:26:21.607791 32262 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0821 08:26:21.607810 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.607820 32262 net.cpp:165] Memory required for data: 1331713500\nI0821 08:26:21.607842 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:26:21.607862 32262 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0821 08:26:21.607875 32262 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0821 08:26:21.607892 32262 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0821 08:26:21.607986 32262 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0821 08:26:21.608194 32262 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0821 08:26:21.608212 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.608222 32262 net.cpp:165] Memory required for data: 1333761500\nI0821 08:26:21.608240 32262 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0821 08:26:21.608263 32262 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0821 08:26:21.608274 32262 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0821 08:26:21.608289 32262 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0821 08:26:21.608304 32262 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0821 08:26:21.608373 32262 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0821 08:26:21.608392 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.608402 32262 net.cpp:165] Memory required for data: 1335809500\nI0821 08:26:21.608412 32262 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0821 08:26:21.608427 32262 net.cpp:100] Creating Layer L3_b4_relu\nI0821 08:26:21.608438 32262 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0821 08:26:21.608458 32262 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0821 08:26:21.608477 32262 net.cpp:150] Setting up L3_b4_relu\nI0821 08:26:21.608492 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.608501 32262 net.cpp:165] Memory required for data: 1337857500\nI0821 08:26:21.608510 32262 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:21.608525 32262 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:21.608536 32262 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0821 08:26:21.608552 32262 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:26:21.608572 32262 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:26:21.608657 32262 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0821 08:26:21.608675 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.608688 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.608698 32262 net.cpp:165] Memory required for data: 1341953500\nI0821 08:26:21.608708 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0821 08:26:21.608728 32262 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0821 08:26:21.608741 32262 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0821 08:26:21.608762 32262 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0821 08:26:21.609851 32262 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0821 08:26:21.609872 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.609881 32262 net.cpp:165] Memory required for data: 1344001500\nI0821 08:26:21.609900 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0821 08:26:21.609922 32262 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0821 08:26:21.609935 32262 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0821 08:26:21.609952 32262 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0821 08:26:21.610276 32262 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0821 08:26:21.610296 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.610306 32262 net.cpp:165] Memory required for data: 1346049500\nI0821 08:26:21.610327 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:26:21.610342 32262 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0821 08:26:21.610353 32262 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0821 08:26:21.610369 32262 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.610466 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0821 08:26:21.610668 32262 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0821 08:26:21.610687 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.610695 32262 net.cpp:165] Memory required for data: 1348097500\nI0821 08:26:21.610714 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0821 08:26:21.610729 32262 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0821 08:26:21.610740 32262 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0821 08:26:21.610755 32262 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0821 08:26:21.610775 32262 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0821 08:26:21.610790 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.610800 32262 net.cpp:165] Memory required for data: 1350145500\nI0821 08:26:21.610810 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0821 08:26:21.610836 32262 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0821 08:26:21.610849 32262 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0821 08:26:21.610880 32262 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0821 08:26:21.611937 32262 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0821 08:26:21.611958 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.611966 32262 net.cpp:165] Memory required for data: 1352193500\nI0821 08:26:21.611984 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0821 08:26:21.612005 32262 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0821 08:26:21.612017 32262 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0821 08:26:21.612035 32262 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0821 08:26:21.612347 32262 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0821 08:26:21.612367 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.612377 32262 net.cpp:165] Memory required for data: 1354241500\nI0821 08:26:21.612398 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:26:21.612422 32262 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0821 08:26:21.612437 32262 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0821 08:26:21.612452 32262 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0821 08:26:21.612551 32262 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0821 08:26:21.612748 32262 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0821 08:26:21.612766 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.612776 32262 net.cpp:165] Memory required for data: 1356289500\nI0821 08:26:21.612794 32262 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0821 08:26:21.612814 32262 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0821 08:26:21.612828 32262 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0821 08:26:21.612840 32262 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0821 08:26:21.612861 32262 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0821 08:26:21.612918 32262 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0821 08:26:21.612937 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.612947 32262 net.cpp:165] Memory required for data: 1358337500\nI0821 08:26:21.612957 32262 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0821 08:26:21.612977 32262 net.cpp:100] Creating Layer L3_b5_relu\nI0821 08:26:21.612988 32262 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0821 08:26:21.613003 32262 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0821 08:26:21.613021 32262 net.cpp:150] Setting up L3_b5_relu\nI0821 08:26:21.613036 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.613045 32262 net.cpp:165] Memory required for data: 1360385500\nI0821 08:26:21.613055 32262 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:21.613070 32262 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:21.613080 32262 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0821 08:26:21.613095 32262 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:26:21.613121 32262 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:26:21.613209 32262 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0821 08:26:21.613230 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.613245 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.613255 32262 net.cpp:165] Memory required for data: 1364481500\nI0821 08:26:21.613265 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0821 08:26:21.613289 32262 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0821 08:26:21.613303 32262 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0821 08:26:21.613322 32262 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0821 08:26:21.614413 32262 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0821 08:26:21.614434 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.614444 32262 net.cpp:165] Memory required for data: 1366529500\nI0821 08:26:21.614470 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0821 08:26:21.614495 32262 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0821 08:26:21.614508 32262 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0821 08:26:21.614526 32262 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0821 08:26:21.614873 32262 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0821 08:26:21.614893 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.614902 32262 net.cpp:165] Memory required for data: 1368577500\nI0821 08:26:21.614924 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:26:21.614940 32262 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0821 08:26:21.614953 32262 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0821 08:26:21.614969 32262 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.615067 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0821 08:26:21.615272 32262 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0821 08:26:21.615291 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.615301 32262 net.cpp:165] Memory required for data: 1370625500\nI0821 08:26:21.615319 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0821 08:26:21.615335 32262 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0821 08:26:21.615347 32262 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0821 08:26:21.615366 32262 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0821 08:26:21.615387 32262 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0821 08:26:21.615401 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.615411 32262 net.cpp:165] Memory required for data: 1372673500\nI0821 08:26:21.615420 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0821 08:26:21.615448 32262 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0821 08:26:21.615463 32262 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0821 08:26:21.615479 32262 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0821 08:26:21.616613 32262 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0821 08:26:21.616634 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.616644 32262 net.cpp:165] Memory required for data: 1374721500\nI0821 08:26:21.616662 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0821 08:26:21.616684 32262 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0821 08:26:21.616698 32262 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0821 08:26:21.616714 32262 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0821 08:26:21.617029 32262 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0821 08:26:21.617048 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.617056 32262 net.cpp:165] Memory required for data: 1376769500\nI0821 08:26:21.617077 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:26:21.617099 32262 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0821 08:26:21.617118 32262 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0821 08:26:21.617136 32262 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0821 08:26:21.617229 32262 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0821 08:26:21.617430 32262 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0821 08:26:21.617449 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.617458 32262 net.cpp:165] Memory required for data: 1378817500\nI0821 08:26:21.617477 32262 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0821 08:26:21.617499 32262 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0821 08:26:21.617512 32262 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0821 08:26:21.617527 32262 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0821 08:26:21.617547 32262 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0821 08:26:21.617606 32262 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0821 08:26:21.617624 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.617633 32262 net.cpp:165] Memory required for data: 1380865500\nI0821 08:26:21.617643 32262 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0821 08:26:21.617662 32262 net.cpp:100] Creating Layer L3_b6_relu\nI0821 08:26:21.617684 32262 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0821 08:26:21.617699 32262 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0821 08:26:21.617719 32262 net.cpp:150] Setting up L3_b6_relu\nI0821 08:26:21.617735 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.617744 32262 net.cpp:165] Memory required for data: 1382913500\nI0821 08:26:21.617754 32262 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:21.617769 32262 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:21.617780 32262 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0821 08:26:21.617795 32262 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:26:21.617815 32262 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:26:21.617908 32262 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0821 08:26:21.617926 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.617940 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.617949 32262 net.cpp:165] Memory required for data: 1387009500\nI0821 08:26:21.617959 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0821 08:26:21.617985 32262 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0821 08:26:21.617997 32262 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0821 08:26:21.618016 32262 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0821 08:26:21.619093 32262 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0821 08:26:21.619119 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.619128 32262 net.cpp:165] Memory required for data: 1389057500\nI0821 08:26:21.619148 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0821 08:26:21.619168 32262 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0821 08:26:21.619181 32262 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0821 08:26:21.619207 32262 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0821 08:26:21.619532 32262 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0821 08:26:21.619551 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.619561 32262 net.cpp:165] Memory required for data: 1391105500\nI0821 08:26:21.619582 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:26:21.619599 32262 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0821 08:26:21.619611 32262 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0821 08:26:21.619626 32262 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.619724 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0821 08:26:21.619922 32262 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0821 08:26:21.619941 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.619951 32262 net.cpp:165] Memory required for data: 1393153500\nI0821 08:26:21.619969 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0821 08:26:21.620021 32262 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0821 08:26:21.620036 32262 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0821 08:26:21.620052 32262 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0821 08:26:21.620072 32262 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0821 08:26:21.620086 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.620095 32262 net.cpp:165] Memory required for data: 1395201500\nI0821 08:26:21.620115 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0821 08:26:21.620141 32262 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0821 08:26:21.620153 32262 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0821 08:26:21.620172 32262 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0821 08:26:21.621269 32262 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0821 08:26:21.621290 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.621300 32262 net.cpp:165] Memory required for data: 1397249500\nI0821 08:26:21.621317 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0821 08:26:21.621351 32262 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0821 08:26:21.621363 32262 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0821 08:26:21.621381 32262 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0821 08:26:21.621703 32262 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0821 08:26:21.621723 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.621733 32262 net.cpp:165] Memory required for data: 1399297500\nI0821 08:26:21.621754 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:26:21.621772 32262 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0821 08:26:21.621783 32262 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0821 08:26:21.621798 32262 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0821 08:26:21.621903 32262 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0821 08:26:21.622112 32262 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0821 08:26:21.622131 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.622140 32262 net.cpp:165] Memory required for data: 1401345500\nI0821 08:26:21.622159 32262 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0821 08:26:21.622175 32262 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0821 08:26:21.622187 32262 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0821 08:26:21.622200 32262 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0821 08:26:21.622220 32262 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0821 08:26:21.622277 32262 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0821 08:26:21.622301 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.622313 32262 net.cpp:165] Memory required for data: 1403393500\nI0821 08:26:21.622323 32262 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0821 08:26:21.622336 32262 net.cpp:100] Creating Layer L3_b7_relu\nI0821 08:26:21.622349 32262 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0821 08:26:21.622362 32262 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0821 08:26:21.622380 32262 net.cpp:150] Setting up L3_b7_relu\nI0821 08:26:21.622395 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.622406 32262 net.cpp:165] Memory required for data: 1405441500\nI0821 08:26:21.622416 32262 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:21.622433 32262 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:21.622444 32262 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0821 08:26:21.622460 32262 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:26:21.622481 32262 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:26:21.622570 32262 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0821 08:26:21.622588 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.622602 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.622611 32262 net.cpp:165] Memory required for data: 1409537500\nI0821 08:26:21.622622 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0821 08:26:21.622642 32262 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0821 08:26:21.622654 32262 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0821 08:26:21.622678 32262 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0821 08:26:21.624755 32262 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0821 08:26:21.624776 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.624788 32262 net.cpp:165] Memory required for data: 1411585500\nI0821 08:26:21.624805 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0821 08:26:21.624827 32262 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0821 08:26:21.624840 32262 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0821 08:26:21.624862 32262 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0821 08:26:21.625205 32262 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0821 08:26:21.625231 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.625242 32262 net.cpp:165] Memory required for data: 1413633500\nI0821 08:26:21.625264 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:26:21.625280 32262 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0821 08:26:21.625291 32262 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0821 08:26:21.625314 32262 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.625411 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0821 08:26:21.625613 32262 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0821 08:26:21.625633 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.625641 32262 net.cpp:165] Memory required for data: 1415681500\nI0821 08:26:21.625660 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0821 08:26:21.625675 32262 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0821 08:26:21.625687 32262 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0821 08:26:21.625706 32262 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0821 08:26:21.625726 32262 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0821 08:26:21.625741 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.625751 32262 net.cpp:165] Memory required for data: 1417729500\nI0821 08:26:21.625761 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0821 08:26:21.625787 32262 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0821 08:26:21.625799 32262 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0821 08:26:21.625816 32262 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0821 08:26:21.626898 32262 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0821 08:26:21.626922 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.626933 32262 net.cpp:165] Memory required for data: 1419777500\nI0821 08:26:21.626951 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0821 08:26:21.626968 32262 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0821 08:26:21.626981 32262 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0821 08:26:21.627002 32262 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0821 08:26:21.627316 32262 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0821 08:26:21.627336 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.627346 32262 net.cpp:165] Memory required for data: 1421825500\nI0821 08:26:21.627367 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:26:21.627388 32262 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0821 08:26:21.627401 32262 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0821 08:26:21.627416 32262 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0821 08:26:21.627511 32262 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0821 08:26:21.627713 32262 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0821 08:26:21.627732 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.627743 32262 net.cpp:165] Memory required for data: 1423873500\nI0821 08:26:21.627760 32262 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0821 08:26:21.627776 32262 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0821 08:26:21.627789 32262 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0821 08:26:21.627802 32262 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0821 08:26:21.627823 32262 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0821 08:26:21.627879 32262 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0821 08:26:21.627898 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.627908 32262 net.cpp:165] Memory required for data: 1425921500\nI0821 08:26:21.627918 32262 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0821 08:26:21.627936 32262 net.cpp:100] Creating Layer L3_b8_relu\nI0821 08:26:21.627949 32262 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0821 08:26:21.627964 32262 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0821 08:26:21.627982 32262 net.cpp:150] Setting up L3_b8_relu\nI0821 08:26:21.627997 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.628007 32262 net.cpp:165] Memory required for data: 1427969500\nI0821 08:26:21.628024 32262 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:21.628039 32262 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:21.628051 32262 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0821 08:26:21.628067 32262 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:26:21.628085 32262 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:26:21.628183 32262 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0821 08:26:21.628202 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.628216 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.628224 32262 net.cpp:165] Memory required for data: 1432065500\nI0821 08:26:21.628235 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0821 08:26:21.628259 32262 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0821 08:26:21.628273 32262 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0821 08:26:21.628291 32262 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0821 08:26:21.629374 32262 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0821 08:26:21.629393 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.629402 32262 net.cpp:165] Memory required for data: 1434113500\nI0821 08:26:21.629420 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0821 08:26:21.629441 32262 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0821 08:26:21.629453 32262 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0821 08:26:21.629474 32262 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0821 08:26:21.629784 32262 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0821 08:26:21.629804 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.629813 32262 net.cpp:165] Memory required for data: 1436161500\nI0821 08:26:21.629834 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:26:21.629850 32262 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0821 08:26:21.629863 32262 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0821 08:26:21.629883 32262 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.629981 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0821 08:26:21.630188 32262 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0821 08:26:21.630208 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.630216 32262 net.cpp:165] Memory required for data: 1438209500\nI0821 08:26:21.630234 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0821 08:26:21.630249 32262 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0821 08:26:21.630261 32262 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0821 08:26:21.630280 32262 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0821 08:26:21.630300 32262 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0821 08:26:21.630314 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.630323 32262 net.cpp:165] Memory required for data: 1440257500\nI0821 08:26:21.630333 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0821 08:26:21.630358 32262 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0821 08:26:21.630372 32262 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0821 08:26:21.630393 32262 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0821 08:26:21.631474 32262 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0821 08:26:21.631494 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.631505 32262 net.cpp:165] Memory required for data: 1442305500\nI0821 08:26:21.631521 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0821 08:26:21.631538 32262 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0821 08:26:21.631551 32262 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0821 08:26:21.631577 32262 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0821 08:26:21.631889 32262 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0821 08:26:21.631914 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.631933 32262 net.cpp:165] Memory required for data: 1444353500\nI0821 08:26:21.631956 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:26:21.631973 32262 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0821 08:26:21.631986 32262 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0821 08:26:21.632000 32262 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0821 08:26:21.632099 32262 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0821 08:26:21.632306 32262 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0821 08:26:21.632326 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.632335 32262 net.cpp:165] Memory required for data: 1446401500\nI0821 08:26:21.632354 32262 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0821 08:26:21.632377 32262 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0821 08:26:21.632390 32262 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0821 08:26:21.632405 32262 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0821 08:26:21.632421 32262 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0821 08:26:21.632483 32262 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0821 08:26:21.632501 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.632510 32262 net.cpp:165] Memory required for data: 1448449500\nI0821 08:26:21.632519 32262 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0821 08:26:21.632534 32262 net.cpp:100] Creating Layer L3_b9_relu\nI0821 08:26:21.632545 32262 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0821 08:26:21.632560 32262 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0821 08:26:21.632578 32262 net.cpp:150] Setting up L3_b9_relu\nI0821 08:26:21.632593 32262 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0821 08:26:21.632602 32262 net.cpp:165] Memory required for data: 1450497500\nI0821 08:26:21.632611 32262 layer_factory.hpp:77] Creating layer post_pool\nI0821 08:26:21.632627 32262 net.cpp:100] Creating Layer post_pool\nI0821 08:26:21.632637 32262 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0821 08:26:21.632658 32262 net.cpp:408] post_pool -> post_pool\nI0821 08:26:21.632719 32262 net.cpp:150] Setting up post_pool\nI0821 08:26:21.632738 32262 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0821 08:26:21.632747 32262 net.cpp:165] Memory required for data: 1450529500\nI0821 08:26:21.632758 32262 layer_factory.hpp:77] Creating layer post_FC\nI0821 08:26:21.632777 32262 net.cpp:100] Creating Layer post_FC\nI0821 08:26:21.632788 32262 net.cpp:434] post_FC <- post_pool\nI0821 08:26:21.632810 32262 net.cpp:408] post_FC -> post_FC_top\nI0821 08:26:21.633013 32262 net.cpp:150] Setting up post_FC\nI0821 08:26:21.633033 32262 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:21.633041 32262 net.cpp:165] Memory required for data: 1450534500\nI0821 08:26:21.633059 32262 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0821 08:26:21.633074 32262 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0821 08:26:21.633085 32262 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0821 08:26:21.633112 32262 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0821 08:26:21.633134 32262 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0821 08:26:21.633229 32262 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0821 08:26:21.633249 32262 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:21.633260 32262 net.cpp:157] Top shape: 125 10 (1250)\nI0821 08:26:21.633270 32262 net.cpp:165] Memory required for data: 1450544500\nI0821 08:26:21.633280 32262 layer_factory.hpp:77] Creating layer accuracy\nI0821 08:26:21.633296 32262 net.cpp:100] Creating Layer accuracy\nI0821 08:26:21.633306 32262 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0821 08:26:21.633321 32262 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0821 08:26:21.633335 32262 net.cpp:408] accuracy -> accuracy\nI0821 08:26:21.633360 32262 net.cpp:150] Setting up accuracy\nI0821 08:26:21.633375 32262 net.cpp:157] Top shape: (1)\nI0821 08:26:21.633394 32262 net.cpp:165] Memory required for data: 1450544504\nI0821 08:26:21.633405 32262 layer_factory.hpp:77] Creating layer loss\nI0821 08:26:21.633419 32262 net.cpp:100] Creating Layer loss\nI0821 08:26:21.633431 32262 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0821 08:26:21.633445 32262 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0821 08:26:21.633463 32262 net.cpp:408] loss -> loss\nI0821 08:26:21.633488 32262 layer_factory.hpp:77] Creating layer loss\nI0821 08:26:21.633646 32262 net.cpp:150] Setting up loss\nI0821 08:26:21.633664 32262 net.cpp:157] Top shape: (1)\nI0821 08:26:21.633673 32262 net.cpp:160]     with loss weight 1\nI0821 08:26:21.633698 32262 net.cpp:165] Memory required for data: 1450544508\nI0821 08:26:21.633710 32262 net.cpp:226] loss needs backward computation.\nI0821 08:26:21.633721 32262 net.cpp:228] accuracy does not need backward computation.\nI0821 08:26:21.633733 32262 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0821 08:26:21.633744 32262 net.cpp:226] post_FC needs backward computation.\nI0821 08:26:21.633754 32262 net.cpp:226] post_pool needs backward computation.\nI0821 08:26:21.633762 32262 net.cpp:226] L3_b9_relu needs backward computation.\nI0821 08:26:21.633772 32262 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0821 08:26:21.633783 32262 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0821 08:26:21.633792 32262 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0821 08:26:21.633802 32262 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0821 08:26:21.633813 32262 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0821 08:26:21.633823 32262 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0821 08:26:21.633832 32262 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0821 08:26:21.633842 32262 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0821 08:26:21.633852 32262 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0821 08:26:21.633862 32262 net.cpp:226] L3_b8_relu needs backward computation.\nI0821 08:26:21.633872 32262 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0821 08:26:21.633883 32262 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0821 08:26:21.633893 32262 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0821 08:26:21.633903 32262 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0821 08:26:21.633913 32262 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0821 08:26:21.633924 32262 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0821 08:26:21.633934 32262 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0821 08:26:21.633944 32262 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0821 08:26:21.633955 32262 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0821 08:26:21.633965 32262 net.cpp:226] L3_b7_relu needs backward computation.\nI0821 08:26:21.633975 32262 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0821 08:26:21.633985 32262 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0821 08:26:21.633994 32262 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0821 08:26:21.634009 32262 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0821 08:26:21.634021 32262 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0821 08:26:21.634030 32262 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0821 08:26:21.634040 32262 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0821 08:26:21.634050 32262 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0821 08:26:21.634060 32262 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0821 08:26:21.634070 32262 net.cpp:226] L3_b6_relu needs backward computation.\nI0821 08:26:21.634081 32262 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0821 08:26:21.634093 32262 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0821 08:26:21.634109 32262 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0821 08:26:21.634132 32262 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0821 08:26:21.634143 32262 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0821 08:26:21.634153 32262 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0821 08:26:21.634163 32262 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0821 08:26:21.634174 32262 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0821 08:26:21.634186 32262 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0821 08:26:21.634196 32262 net.cpp:226] L3_b5_relu needs backward computation.\nI0821 08:26:21.634207 32262 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0821 08:26:21.634217 32262 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0821 08:26:21.634227 32262 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0821 08:26:21.634238 32262 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0821 08:26:21.634248 32262 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0821 08:26:21.634258 32262 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0821 08:26:21.634268 32262 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0821 08:26:21.634279 32262 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0821 08:26:21.634289 32262 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0821 08:26:21.634299 32262 net.cpp:226] L3_b4_relu needs backward computation.\nI0821 08:26:21.634310 32262 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0821 08:26:21.634320 32262 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0821 08:26:21.634330 32262 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0821 08:26:21.634341 32262 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0821 08:26:21.634351 32262 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0821 08:26:21.634361 32262 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0821 08:26:21.634371 32262 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0821 08:26:21.634380 32262 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0821 08:26:21.634392 32262 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0821 08:26:21.634402 32262 net.cpp:226] L3_b3_relu needs backward computation.\nI0821 08:26:21.634413 32262 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0821 08:26:21.634423 32262 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0821 08:26:21.634433 32262 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0821 08:26:21.634443 32262 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0821 08:26:21.634454 32262 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0821 08:26:21.634464 32262 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0821 08:26:21.634474 32262 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0821 08:26:21.634485 32262 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0821 08:26:21.634495 32262 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0821 08:26:21.634505 32262 net.cpp:226] L3_b2_relu needs backward computation.\nI0821 08:26:21.634516 32262 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0821 08:26:21.634528 32262 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0821 08:26:21.634537 32262 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0821 08:26:21.634548 32262 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0821 08:26:21.634558 32262 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0821 08:26:21.634568 32262 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0821 08:26:21.634577 32262 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0821 08:26:21.634588 32262 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0821 08:26:21.634610 32262 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0821 08:26:21.634621 32262 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0821 08:26:21.634641 32262 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0821 08:26:21.634654 32262 net.cpp:226] L3_b1_relu needs backward computation.\nI0821 08:26:21.634662 32262 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0821 08:26:21.634675 32262 net.cpp:226] L3_b1_pool needs backward computation.\nI0821 08:26:21.634685 32262 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0821 08:26:21.634696 32262 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0821 08:26:21.634707 32262 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0821 08:26:21.634718 32262 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0821 08:26:21.634728 32262 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0821 08:26:21.634738 32262 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0821 08:26:21.634749 32262 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0821 08:26:21.634760 32262 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0821 08:26:21.634770 32262 net.cpp:226] L2_b9_relu needs backward computation.\nI0821 08:26:21.634781 32262 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0821 08:26:21.634793 32262 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0821 08:26:21.634804 32262 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0821 08:26:21.634814 32262 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0821 08:26:21.634824 32262 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0821 08:26:21.634835 32262 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0821 08:26:21.634845 32262 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0821 08:26:21.634855 32262 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0821 08:26:21.634865 32262 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0821 08:26:21.634877 32262 net.cpp:226] L2_b8_relu needs backward computation.\nI0821 08:26:21.634888 32262 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0821 08:26:21.634899 32262 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0821 08:26:21.634909 32262 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0821 08:26:21.634920 32262 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0821 08:26:21.634932 32262 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0821 08:26:21.634941 32262 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0821 08:26:21.634950 32262 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0821 08:26:21.634961 32262 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0821 08:26:21.634973 32262 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0821 08:26:21.634984 32262 net.cpp:226] L2_b7_relu needs backward computation.\nI0821 08:26:21.634994 32262 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0821 08:26:21.635005 32262 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0821 08:26:21.635016 32262 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0821 08:26:21.635027 32262 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0821 08:26:21.635038 32262 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0821 08:26:21.635048 32262 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0821 08:26:21.635059 32262 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0821 08:26:21.635068 32262 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0821 08:26:21.635079 32262 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0821 08:26:21.635090 32262 net.cpp:226] L2_b6_relu needs backward computation.\nI0821 08:26:21.635107 32262 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0821 08:26:21.635119 32262 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0821 08:26:21.635129 32262 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0821 08:26:21.635141 32262 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0821 08:26:21.635160 32262 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0821 08:26:21.635171 32262 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0821 08:26:21.635181 32262 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0821 08:26:21.635192 32262 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0821 08:26:21.635202 32262 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0821 08:26:21.635213 32262 net.cpp:226] L2_b5_relu needs backward computation.\nI0821 08:26:21.635223 32262 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0821 08:26:21.635236 32262 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0821 08:26:21.635246 32262 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0821 08:26:21.635257 32262 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0821 08:26:21.635267 32262 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0821 08:26:21.635277 32262 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0821 08:26:21.635288 32262 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0821 08:26:21.635298 32262 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0821 08:26:21.635309 32262 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0821 08:26:21.635319 32262 net.cpp:226] L2_b4_relu needs backward computation.\nI0821 08:26:21.635330 32262 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0821 08:26:21.635349 32262 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0821 08:26:21.635360 32262 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0821 08:26:21.635372 32262 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0821 08:26:21.635382 32262 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0821 08:26:21.635392 32262 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0821 08:26:21.635403 32262 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0821 08:26:21.635414 32262 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0821 08:26:21.635426 32262 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0821 08:26:21.635435 32262 net.cpp:226] L2_b3_relu needs backward computation.\nI0821 08:26:21.635447 32262 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0821 08:26:21.635458 32262 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0821 08:26:21.635468 32262 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0821 08:26:21.635479 32262 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0821 08:26:21.635490 32262 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0821 08:26:21.635500 32262 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0821 08:26:21.635510 32262 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0821 08:26:21.635520 32262 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0821 08:26:21.635534 32262 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0821 08:26:21.635545 32262 net.cpp:226] L2_b2_relu needs backward computation.\nI0821 08:26:21.635555 32262 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0821 08:26:21.635566 32262 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0821 08:26:21.635577 32262 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0821 08:26:21.635587 32262 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0821 08:26:21.635598 32262 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0821 08:26:21.635609 32262 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0821 08:26:21.635619 32262 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0821 08:26:21.635630 32262 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0821 08:26:21.635641 32262 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0821 08:26:21.635653 32262 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0821 08:26:21.635664 32262 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0821 08:26:21.635682 32262 net.cpp:226] L2_b1_relu needs backward computation.\nI0821 08:26:21.635694 32262 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0821 08:26:21.635705 32262 net.cpp:226] L2_b1_pool needs backward computation.\nI0821 08:26:21.635716 32262 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0821 08:26:21.635727 32262 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0821 08:26:21.635740 32262 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0821 08:26:21.635751 32262 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0821 08:26:21.635761 32262 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0821 08:26:21.635771 32262 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0821 08:26:21.635782 32262 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0821 08:26:21.635792 32262 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0821 08:26:21.635803 32262 net.cpp:226] L1_b9_relu needs backward computation.\nI0821 08:26:21.635813 32262 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0821 08:26:21.635825 32262 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0821 08:26:21.635836 32262 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0821 08:26:21.635848 32262 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0821 08:26:21.635859 32262 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0821 08:26:21.635870 32262 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0821 08:26:21.635881 32262 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0821 08:26:21.635891 32262 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0821 08:26:21.635902 32262 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0821 08:26:21.635913 32262 net.cpp:226] L1_b8_relu needs backward computation.\nI0821 08:26:21.635923 32262 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0821 08:26:21.635936 32262 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0821 08:26:21.635946 32262 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0821 08:26:21.635957 32262 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0821 08:26:21.635968 32262 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0821 08:26:21.635978 32262 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0821 08:26:21.635989 32262 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0821 08:26:21.636001 32262 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0821 08:26:21.636013 32262 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0821 08:26:21.636023 32262 net.cpp:226] L1_b7_relu needs backward computation.\nI0821 08:26:21.636034 32262 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0821 08:26:21.636045 32262 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0821 08:26:21.636056 32262 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0821 08:26:21.636067 32262 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0821 08:26:21.636080 32262 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0821 08:26:21.636090 32262 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0821 08:26:21.636107 32262 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0821 08:26:21.636121 32262 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0821 08:26:21.636132 32262 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0821 08:26:21.636143 32262 net.cpp:226] L1_b6_relu needs backward computation.\nI0821 08:26:21.636154 32262 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0821 08:26:21.636167 32262 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0821 08:26:21.636178 32262 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0821 08:26:21.636188 32262 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0821 08:26:21.636199 32262 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0821 08:26:21.636209 32262 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0821 08:26:21.636229 32262 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0821 08:26:21.636240 32262 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0821 08:26:21.636252 32262 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0821 08:26:21.636263 32262 net.cpp:226] L1_b5_relu needs backward computation.\nI0821 08:26:21.636274 32262 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0821 08:26:21.636286 32262 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0821 08:26:21.636297 32262 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0821 08:26:21.636308 32262 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0821 08:26:21.636319 32262 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0821 08:26:21.636329 32262 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0821 08:26:21.636340 32262 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0821 08:26:21.636351 32262 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0821 08:26:21.636363 32262 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0821 08:26:21.636374 32262 net.cpp:226] L1_b4_relu needs backward computation.\nI0821 08:26:21.636384 32262 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0821 08:26:21.636396 32262 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0821 08:26:21.636407 32262 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0821 08:26:21.636418 32262 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0821 08:26:21.636430 32262 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0821 08:26:21.636440 32262 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0821 08:26:21.636451 32262 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0821 08:26:21.636462 32262 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0821 08:26:21.636473 32262 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0821 08:26:21.636485 32262 net.cpp:226] L1_b3_relu needs backward computation.\nI0821 08:26:21.636497 32262 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0821 08:26:21.636507 32262 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0821 08:26:21.636519 32262 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0821 08:26:21.636530 32262 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0821 08:26:21.636541 32262 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0821 08:26:21.636553 32262 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0821 08:26:21.636564 32262 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0821 08:26:21.636574 32262 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0821 08:26:21.636586 32262 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0821 08:26:21.636598 32262 net.cpp:226] L1_b2_relu needs backward computation.\nI0821 08:26:21.636610 32262 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0821 08:26:21.636620 32262 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0821 08:26:21.636631 32262 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0821 08:26:21.636643 32262 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0821 08:26:21.636656 32262 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0821 08:26:21.636665 32262 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0821 08:26:21.636677 32262 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0821 08:26:21.636693 32262 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0821 08:26:21.636705 32262 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0821 08:26:21.636718 32262 net.cpp:226] L1_b1_relu needs backward computation.\nI0821 08:26:21.636729 32262 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0821 08:26:21.636741 32262 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0821 08:26:21.636752 32262 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0821 08:26:21.636775 32262 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0821 08:26:21.636786 32262 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0821 08:26:21.636796 32262 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0821 08:26:21.636807 32262 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0821 08:26:21.636819 32262 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0821 08:26:21.636831 32262 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0821 08:26:21.636842 32262 net.cpp:226] pre_relu needs backward computation.\nI0821 08:26:21.636852 32262 net.cpp:226] pre_scale needs backward computation.\nI0821 08:26:21.636863 32262 net.cpp:226] pre_bn needs backward computation.\nI0821 08:26:21.636874 32262 net.cpp:226] pre_conv needs backward computation.\nI0821 08:26:21.636888 32262 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0821 08:26:21.636901 32262 net.cpp:228] dataLayer does not need backward computation.\nI0821 08:26:21.636910 32262 net.cpp:270] This network produces output accuracy\nI0821 08:26:21.636922 32262 net.cpp:270] This network produces output loss\nI0821 08:26:21.637277 32262 net.cpp:283] Network initialization done.\nI0821 08:26:21.638309 32262 solver.cpp:60] Solver scaffolding done.\nI0821 08:26:21.861805 32262 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0821 08:26:22.225452 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:22.225531 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:22.232743 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:22.457470 32262 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:22.457583 32262 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:22.492343 32262 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:22.492451 32262 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:22.947831 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:22.947909 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:22.955992 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:23.204366 32262 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:23.204504 32262 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:23.256642 32262 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:23.256774 32262 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:23.777340 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:23.777420 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:23.786137 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:24.060263 32262 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:24.060431 32262 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:24.132500 32262 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:24.132660 32262 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:24.216028 32262 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0821 08:26:24.700485 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:24.700554 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:24.710036 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:25.003808 32262 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:25.003963 32262 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:25.096154 32262 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:25.096310 32262 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:25.751413 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:25.751477 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:25.762214 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:26.073966 32262 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:26.074187 32262 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:26.187834 32262 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:26.188045 32262 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:26.902938 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:26.903002 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:26.914638 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:27.256600 32262 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:27.256842 32262 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:27.389981 32262 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:27.390218 32262 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:28.174055 32262 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0821 08:26:28.174129 32262 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0821 08:26:28.186915 32262 data_layer.cpp:41] output data size: 125,3,32,32\nI0821 08:26:28.304388 32289 blocking_queue.cpp:50] Waiting for data\nI0821 08:26:28.408511 32289 blocking_queue.cpp:50] Waiting for data\nI0821 08:26:28.627290 32262 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0821 08:26:28.627563 32262 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0821 08:26:28.779662 32262 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0821 08:26:28.779920 32262 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0821 08:26:28.951553 32262 parallel.cpp:425] Starting Optimization\nI0821 08:26:28.954071 32262 solver.cpp:279] Solving Cifar-Resnet\nI0821 08:26:28.954087 32262 solver.cpp:280] Learning Rate Policy: multistep\nI0821 08:26:28.959164 32262 solver.cpp:337] Iteration 0, Testing net (#0)\nI0821 08:27:50.598208 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0821 08:27:50.598505 32262 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0821 08:27:54.639474 32262 solver.cpp:228] Iteration 0, loss = 3.22659\nI0821 08:27:54.639515 32262 solver.cpp:244]     Train net output #0: accuracy = 0.168\nI0821 08:27:54.639531 32262 solver.cpp:244]     Train net output #1: loss = 3.22659 (* 1 = 3.22659 loss)\nI0821 08:27:54.693383 32262 sgd_solver.cpp:166] Iteration 0, lr = 0.35\nI0821 08:30:13.441771 32262 solver.cpp:337] Iteration 100, Testing net (#0)\nI0821 08:31:34.299950 32262 solver.cpp:404]     Test net output #0: accuracy = 0.12072\nI0821 08:31:34.300220 32262 solver.cpp:404]     Test net output #1: loss = 2.4071 (* 1 = 2.4071 loss)\nI0821 08:31:35.614675 32262 solver.cpp:228] Iteration 100, loss = 2.2612\nI0821 08:31:35.614708 32262 solver.cpp:244]     Train net output #0: accuracy = 0.176\nI0821 08:31:35.614723 32262 solver.cpp:244]     Train net output #1: loss = 2.2612 (* 1 = 2.2612 loss)\nI0821 08:31:35.712222 32262 sgd_solver.cpp:166] Iteration 100, lr = 0.35\nI0821 08:33:54.235366 32262 solver.cpp:337] Iteration 200, Testing net (#0)\nI0821 08:35:15.074113 32262 solver.cpp:404]     Test net output #0: accuracy = 0.13884\nI0821 08:35:15.074373 32262 solver.cpp:404]     Test net output #1: loss = 3.29879 (* 1 = 3.29879 loss)\nI0821 08:35:16.388984 32262 solver.cpp:228] Iteration 200, loss = 1.93779\nI0821 08:35:16.389016 32262 solver.cpp:244]     Train net output #0: accuracy = 0.28\nI0821 08:35:16.389031 32262 solver.cpp:244]     Train net output #1: loss = 1.93779 (* 1 = 1.93779 loss)\nI0821 08:35:16.486819 32262 sgd_solver.cpp:166] Iteration 200, lr = 0.35\nI0821 08:37:35.139698 32262 solver.cpp:337] Iteration 300, Testing net (#0)\nI0821 08:38:55.960909 32262 solver.cpp:404]     Test net output #0: accuracy = 0.13232\nI0821 08:38:55.961170 32262 solver.cpp:404]     Test net output #1: loss = 3.16215 (* 1 = 3.16215 loss)\nI0821 08:38:57.275254 32262 solver.cpp:228] Iteration 300, loss = 1.88815\nI0821 08:38:57.275295 32262 solver.cpp:244]     Train net output #0: accuracy = 0.216\nI0821 08:38:57.275312 32262 solver.cpp:244]     Train net output #1: loss = 1.88815 (* 1 = 1.88815 loss)\nI0821 08:38:57.374785 32262 sgd_solver.cpp:166] Iteration 300, lr = 0.35\nI0821 08:41:16.055523 32262 solver.cpp:337] Iteration 400, Testing net (#0)\nI0821 08:42:36.854303 32262 solver.cpp:404]     Test net output #0: accuracy = 0.15144\nI0821 08:42:36.854562 32262 solver.cpp:404]     Test net output #1: loss = 2.35163 (* 1 = 2.35163 loss)\nI0821 08:42:38.165267 32262 solver.cpp:228] Iteration 400, loss = 1.49289\nI0821 08:42:38.165302 32262 solver.cpp:244]     Train net output #0: accuracy = 0.432\nI0821 08:42:38.165318 32262 solver.cpp:244]     Train net output #1: loss = 1.49289 (* 1 = 1.49289 loss)\nI0821 08:42:38.268944 32262 sgd_solver.cpp:166] Iteration 400, lr = 0.35\nI0821 08:44:56.890784 32262 solver.cpp:337] Iteration 500, Testing net (#0)\nI0821 08:46:17.705389 32262 solver.cpp:404]     Test net output #0: accuracy = 0.16788\nI0821 08:46:17.705648 32262 solver.cpp:404]     Test net output #1: loss = 2.35685 (* 1 = 2.35685 loss)\nI0821 08:46:19.016942 32262 solver.cpp:228] Iteration 500, loss = 1.50198\nI0821 08:46:19.016983 32262 solver.cpp:244]     Train net output #0: accuracy = 0.424\nI0821 08:46:19.017000 32262 solver.cpp:244]     Train net output #1: loss = 1.50198 (* 1 = 1.50198 loss)\nI0821 08:46:19.115854 32262 sgd_solver.cpp:166] Iteration 500, lr = 0.35\nI0821 08:48:37.734926 32262 solver.cpp:337] Iteration 600, Testing net (#0)\nI0821 08:49:58.520226 32262 solver.cpp:404]     Test net output #0: accuracy = 0.13304\nI0821 08:49:58.520483 32262 solver.cpp:404]     Test net output #1: loss = 2.57899 (* 1 = 2.57899 loss)\nI0821 08:49:59.831184 32262 solver.cpp:228] Iteration 600, loss = 1.20853\nI0821 08:49:59.831228 32262 solver.cpp:244]     Train net output #0: accuracy = 0.576\nI0821 08:49:59.831243 32262 solver.cpp:244]     Train net output #1: loss = 1.20853 (* 1 = 1.20853 loss)\nI0821 08:49:59.935588 32262 sgd_solver.cpp:166] Iteration 600, lr = 0.35\nI0821 08:52:18.580250 32262 solver.cpp:337] Iteration 700, Testing net (#0)\nI0821 08:53:39.385236 32262 solver.cpp:404]     Test net output #0: accuracy = 0.23248\nI0821 08:53:39.385500 32262 solver.cpp:404]     Test net output #1: loss = 2.47538 (* 1 = 2.47538 loss)\nI0821 08:53:40.696687 32262 solver.cpp:228] Iteration 700, loss = 1.37742\nI0821 08:53:40.696732 32262 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI0821 08:53:40.696749 32262 solver.cpp:244]     Train net output #1: loss = 1.37742 (* 1 = 1.37742 loss)\nI0821 08:53:40.792318 32262 sgd_solver.cpp:166] Iteration 700, lr = 0.35\nI0821 08:55:59.432987 32262 solver.cpp:337] Iteration 800, Testing net (#0)\nI0821 08:57:20.256527 32262 solver.cpp:404]     Test net output #0: accuracy = 0.26716\nI0821 08:57:20.256796 32262 solver.cpp:404]     Test net output #1: loss = 2.27369 (* 1 = 2.27369 loss)\nI0821 08:57:21.568222 32262 solver.cpp:228] Iteration 800, loss = 1.13242\nI0821 08:57:21.568259 32262 solver.cpp:244]     Train net output #0: accuracy = 0.568\nI0821 08:57:21.568274 32262 solver.cpp:244]     Train net output #1: loss = 1.13242 (* 1 = 1.13242 loss)\nI0821 08:57:21.666502 32262 sgd_solver.cpp:166] Iteration 800, lr = 0.35\nI0821 08:59:40.311475 32262 solver.cpp:337] Iteration 900, Testing net (#0)\nI0821 09:01:01.152688 32262 solver.cpp:404]     Test net output #0: accuracy = 0.28792\nI0821 09:01:01.152986 32262 solver.cpp:404]     Test net output #1: loss = 2.22156 (* 1 = 2.22156 loss)\nI0821 09:01:02.465019 32262 solver.cpp:228] Iteration 900, loss = 1.19715\nI0821 09:01:02.465051 32262 solver.cpp:244]     Train net output #0: accuracy = 0.552\nI0821 09:01:02.465067 32262 solver.cpp:244]     Train net output #1: loss = 1.19715 (* 1 = 1.19715 loss)\nI0821 09:01:02.563483 32262 sgd_solver.cpp:166] Iteration 900, lr = 0.35\nI0821 09:03:21.189393 32262 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0821 09:04:42.013743 32262 solver.cpp:404]     Test net output #0: accuracy = 0.1394\nI0821 09:04:42.014006 32262 solver.cpp:404]     Test net output #1: loss = 3.29903 (* 1 = 3.29903 loss)\nI0821 09:04:43.325513 32262 solver.cpp:228] Iteration 1000, loss = 1.00229\nI0821 09:04:43.325554 32262 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0821 09:04:43.325572 32262 solver.cpp:244]     Train net output #1: loss = 1.00229 (* 1 = 1.00229 loss)\nI0821 09:04:43.428647 32262 sgd_solver.cpp:166] Iteration 1000, lr = 0.35\nI0821 09:07:02.220912 32262 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0821 09:08:23.021682 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10096\nI0821 09:08:23.021947 32262 solver.cpp:404]     Test net output #1: loss = 4.86024 (* 1 = 4.86024 loss)\nI0821 09:08:24.332391 32262 solver.cpp:228] Iteration 1100, loss = 1.07529\nI0821 09:08:24.332437 32262 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0821 09:08:24.332453 32262 solver.cpp:244]     Train net output #1: loss = 1.07529 (* 1 = 1.07529 loss)\nI0821 09:08:24.431898 32262 sgd_solver.cpp:166] Iteration 1100, lr = 0.35\nI0821 09:10:43.105072 32262 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0821 09:12:03.928800 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10028\nI0821 09:12:03.929047 32262 solver.cpp:404]     Test net output #1: loss = 5.55669 (* 1 = 5.55669 loss)\nI0821 09:12:05.241040 32262 solver.cpp:228] Iteration 1200, loss = 0.886596\nI0821 09:12:05.241084 32262 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0821 09:12:05.241101 32262 solver.cpp:244]     Train net output #1: loss = 0.886596 (* 1 = 0.886596 loss)\nI0821 09:12:05.337708 32262 sgd_solver.cpp:166] Iteration 1200, lr = 0.35\nI0821 09:14:24.011292 32262 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0821 09:15:44.809777 32262 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 09:15:44.810039 32262 solver.cpp:404]     Test net output #1: loss = 6.43317 (* 1 = 6.43317 loss)\nI0821 09:15:46.121675 32262 solver.cpp:228] Iteration 1300, loss = 0.880534\nI0821 09:15:46.121711 32262 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0821 09:15:46.121726 32262 solver.cpp:244]     Train net output #1: loss = 0.880534 (* 1 = 0.880534 loss)\nI0821 09:15:46.220574 32262 sgd_solver.cpp:166] Iteration 1300, lr = 0.35\nI0821 09:18:04.868783 32262 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0821 09:19:25.611557 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 09:19:25.611819 32262 solver.cpp:404]     Test net output #1: loss = 8.39312 (* 1 = 8.39312 loss)\nI0821 09:19:26.923516 32262 solver.cpp:228] Iteration 1400, loss = 0.67074\nI0821 09:19:26.923549 32262 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0821 09:19:26.923563 32262 solver.cpp:244]     Train net output #1: loss = 0.67074 (* 1 = 0.67074 loss)\nI0821 09:19:27.019628 32262 sgd_solver.cpp:166] Iteration 1400, lr = 0.35\nI0821 09:21:45.654431 32262 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0821 09:23:06.395532 32262 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 09:23:06.395771 32262 solver.cpp:404]     Test net output #1: loss = 10.8362 (* 1 = 10.8362 loss)\nI0821 09:23:07.706840 32262 solver.cpp:228] Iteration 1500, loss = 0.696272\nI0821 09:23:07.706887 32262 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0821 09:23:07.706902 32262 solver.cpp:244]     Train net output #1: loss = 0.696272 (* 1 = 0.696272 loss)\nI0821 09:23:07.806272 32262 sgd_solver.cpp:166] Iteration 1500, lr = 0.35\nI0821 09:25:26.478006 32262 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0821 09:26:47.202266 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 09:26:47.202497 32262 solver.cpp:404]     Test net output #1: loss = 7.41589 (* 1 = 7.41589 loss)\nI0821 09:26:48.513316 32262 solver.cpp:228] Iteration 1600, loss = 0.649458\nI0821 09:26:48.513360 32262 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0821 09:26:48.513376 32262 solver.cpp:244]     Train net output #1: loss = 0.649458 (* 1 = 0.649458 loss)\nI0821 09:26:48.612864 32262 sgd_solver.cpp:166] Iteration 1600, lr = 0.35\nI0821 09:29:07.250178 32262 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0821 09:30:27.971169 32262 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 09:30:27.971485 32262 solver.cpp:404]     Test net output #1: loss = 7.63379 (* 1 = 7.63379 loss)\nI0821 09:30:29.283679 32262 solver.cpp:228] Iteration 1700, loss = 0.484112\nI0821 09:30:29.283722 32262 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0821 09:30:29.283738 32262 solver.cpp:244]     Train net output #1: loss = 0.484112 (* 1 = 0.484112 loss)\nI0821 09:30:29.380008 32262 sgd_solver.cpp:166] Iteration 1700, lr = 0.35\nI0821 09:32:47.976634 32262 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0821 09:34:08.697382 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10016\nI0821 09:34:08.697640 32262 solver.cpp:404]     Test net output #1: loss = 5.86729 (* 1 = 5.86729 loss)\nI0821 09:34:10.008675 32262 solver.cpp:228] Iteration 1800, loss = 0.520968\nI0821 09:34:10.008720 32262 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0821 09:34:10.008736 32262 solver.cpp:244]     Train net output #1: loss = 0.520968 (* 1 = 0.520968 loss)\nI0821 09:34:10.109060 32262 sgd_solver.cpp:166] Iteration 1800, lr = 0.35\nI0821 09:36:28.815712 32262 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0821 09:37:49.544088 32262 solver.cpp:404]     Test net output #0: accuracy = 0.09984\nI0821 09:37:49.544349 32262 solver.cpp:404]     Test net output #1: loss = 6.21281 (* 1 = 6.21281 loss)\nI0821 09:37:50.855815 32262 solver.cpp:228] Iteration 1900, loss = 0.474135\nI0821 09:37:50.855861 32262 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0821 09:37:50.855877 32262 solver.cpp:244]     Train net output #1: loss = 0.474135 (* 1 = 0.474135 loss)\nI0821 09:37:50.952312 32262 sgd_solver.cpp:166] Iteration 1900, lr = 0.35\nI0821 09:40:09.561000 32262 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0821 09:41:30.281867 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10196\nI0821 09:41:30.282097 32262 solver.cpp:404]     Test net output #1: loss = 6.10331 (* 1 = 6.10331 loss)\nI0821 09:41:31.593966 32262 solver.cpp:228] Iteration 2000, loss = 0.542422\nI0821 09:41:31.594008 32262 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0821 09:41:31.594024 32262 solver.cpp:244]     Train net output #1: loss = 0.542422 (* 1 = 0.542422 loss)\nI0821 09:41:31.688716 32262 sgd_solver.cpp:166] Iteration 2000, lr = 0.35\nI0821 09:43:50.229866 32262 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0821 09:45:10.938422 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10372\nI0821 09:45:10.938683 32262 solver.cpp:404]     Test net output #1: loss = 5.74742 (* 1 = 5.74742 loss)\nI0821 09:45:12.250383 32262 solver.cpp:228] Iteration 2100, loss = 0.345075\nI0821 09:45:12.250418 32262 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 09:45:12.250433 32262 solver.cpp:244]     Train net output #1: loss = 0.345075 (* 1 = 0.345075 loss)\nI0821 09:45:12.348878 32262 sgd_solver.cpp:166] Iteration 2100, lr = 0.35\nI0821 09:47:31.014150 32262 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0821 09:48:51.792737 32262 solver.cpp:404]     Test net output #0: accuracy = 0.1008\nI0821 09:48:51.793001 32262 solver.cpp:404]     Test net output #1: loss = 8.29855 (* 1 = 8.29855 loss)\nI0821 09:48:53.105056 32262 solver.cpp:228] Iteration 2200, loss = 0.350485\nI0821 09:48:53.105108 32262 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 09:48:53.105134 32262 solver.cpp:244]     Train net output #1: loss = 0.350485 (* 1 = 0.350485 loss)\nI0821 09:48:53.206264 32262 sgd_solver.cpp:166] Iteration 2200, lr = 0.35\nI0821 09:51:11.895773 32262 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0821 09:52:32.704021 32262 solver.cpp:404]     Test net output #0: accuracy = 0.10584\nI0821 09:52:32.704293 32262 solver.cpp:404]     Test net output #1: loss = 5.71185 (* 1 = 5.71185 loss)\nI0821 09:52:34.016659 32262 solver.cpp:228] Iteration 2300, loss = 0.304659\nI0821 09:52:34.016701 32262 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0821 09:52:34.016717 32262 solver.cpp:244]     Train net output #1: loss = 0.304659 (* 1 = 0.304659 loss)\nI0821 09:52:34.119297 32262 sgd_solver.cpp:166] Iteration 2300, lr = 0.35\nI0821 09:54:52.771209 32262 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0821 09:56:14.170482 32262 solver.cpp:404]     Test net output #0: accuracy = 0.13372\nI0821 09:56:14.170744 32262 solver.cpp:404]     Test net output #1: loss = 4.36846 (* 1 = 4.36846 loss)\nI0821 09:56:15.497895 32262 solver.cpp:228] Iteration 2400, loss = 0.290072\nI0821 09:56:15.497930 32262 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0821 09:56:15.497944 32262 solver.cpp:244]     Train net output #1: loss = 0.290072 (* 1 = 0.290072 loss)\nI0821 09:56:15.582612 32262 sgd_solver.cpp:166] Iteration 2400, lr = 0.35\nI0821 09:58:34.191552 32262 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0821 09:59:55.891961 32262 solver.cpp:404]     Test net output #0: accuracy = 0.17644\nI0821 09:59:55.892230 32262 solver.cpp:404]     Test net output #1: loss = 4.63713 (* 1 = 4.63713 loss)\nI0821 09:59:57.218739 32262 solver.cpp:228] Iteration 2500, loss = 0.425577\nI0821 09:59:57.218775 32262 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0821 09:59:57.218789 32262 solver.cpp:244]     Train net output #1: loss = 0.425577 (* 1 = 0.425577 loss)\nI0821 09:59:57.303551 32262 sgd_solver.cpp:166] Iteration 2500, lr = 0.35\nI0821 10:02:15.849684 32262 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0821 10:03:37.587786 32262 solver.cpp:404]     Test net output #0: accuracy = 0.26416\nI0821 10:03:37.588027 32262 solver.cpp:404]     Test net output #1: loss = 3.6599 (* 1 = 3.6599 loss)\nI0821 10:03:38.914469 32262 solver.cpp:228] Iteration 2600, loss = 0.309858\nI0821 10:03:38.914501 32262 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0821 10:03:38.914516 32262 solver.cpp:244]     Train net output #1: loss = 0.309858 (* 1 = 0.309858 loss)\nI0821 10:03:38.994472 32262 sgd_solver.cpp:166] Iteration 2600, lr = 0.35\nI0821 10:05:57.515264 32262 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0821 10:07:19.193964 32262 solver.cpp:404]     Test net output #0: accuracy = 0.25408\nI0821 10:07:19.194229 32262 solver.cpp:404]     Test net output #1: loss = 3.39885 (* 1 = 3.39885 loss)\nI0821 10:07:20.522325 32262 solver.cpp:228] Iteration 2700, loss = 0.263835\nI0821 10:07:20.522361 32262 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 10:07:20.522382 32262 solver.cpp:244]     Train net output #1: loss = 0.263835 (* 1 = 0.263835 loss)\nI0821 10:07:20.606966 32262 sgd_solver.cpp:166] Iteration 2700, lr = 0.35\nI0821 10:09:39.190081 32262 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0821 10:11:00.934608 32262 solver.cpp:404]     Test net output #0: accuracy = 0.28324\nI0821 10:11:00.934878 32262 solver.cpp:404]     Test net output #1: loss = 3.70986 (* 1 = 3.70986 loss)\nI0821 10:11:02.262354 32262 solver.cpp:228] Iteration 2800, loss = 0.185205\nI0821 10:11:02.262392 32262 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 10:11:02.262415 32262 solver.cpp:244]     Train net output #1: loss = 0.185205 (* 1 = 0.185205 loss)\nI0821 10:11:02.345571 32262 sgd_solver.cpp:166] Iteration 2800, lr = 0.35\nI0821 10:13:20.884376 32262 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0821 10:14:42.580900 32262 solver.cpp:404]     Test net output #0: accuracy = 0.21628\nI0821 10:14:42.581168 32262 solver.cpp:404]     Test net output #1: loss = 4.11168 (* 1 = 4.11168 loss)\nI0821 10:14:43.909106 32262 solver.cpp:228] Iteration 2900, loss = 0.219041\nI0821 10:14:43.909142 32262 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0821 10:14:43.909165 32262 solver.cpp:244]     Train net output #1: loss = 0.219042 (* 1 = 0.219042 loss)\nI0821 10:14:43.991302 32262 sgd_solver.cpp:166] Iteration 2900, lr = 0.35\nI0821 10:17:02.604853 32262 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0821 10:18:24.377218 32262 solver.cpp:404]     Test net output #0: accuracy = 0.14728\nI0821 10:18:24.377485 32262 solver.cpp:404]     Test net output #1: loss = 6.86055 (* 1 = 6.86055 loss)\nI0821 10:18:25.703968 32262 solver.cpp:228] Iteration 3000, loss = 0.25468\nI0821 10:18:25.704005 32262 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0821 10:18:25.704026 32262 solver.cpp:244]     Train net output #1: loss = 0.25468 (* 1 = 0.25468 loss)\nI0821 10:18:25.786118 32262 sgd_solver.cpp:166] Iteration 3000, lr = 0.35\nI0821 10:20:44.386364 32262 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0821 10:22:06.122675 32262 solver.cpp:404]     Test net output #0: accuracy = 0.32008\nI0821 10:22:06.122937 32262 solver.cpp:404]     Test net output #1: loss = 3.97344 (* 1 = 3.97344 loss)\nI0821 10:22:07.450036 32262 solver.cpp:228] Iteration 3100, loss = 0.138616\nI0821 10:22:07.450072 32262 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 10:22:07.450093 32262 solver.cpp:244]     Train net output #1: loss = 0.138616 (* 1 = 0.138616 loss)\nI0821 10:22:07.530910 32262 sgd_solver.cpp:166] Iteration 3100, lr = 0.35\nI0821 10:24:26.111052 32262 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0821 10:25:47.859208 32262 solver.cpp:404]     Test net output #0: accuracy = 0.36536\nI0821 10:25:47.859436 32262 solver.cpp:404]     Test net output #1: loss = 2.92469 (* 1 = 2.92469 loss)\nI0821 10:25:49.187206 32262 solver.cpp:228] Iteration 3200, loss = 0.199535\nI0821 10:25:49.187242 32262 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0821 10:25:49.187263 32262 solver.cpp:244]     Train net output #1: loss = 0.199536 (* 1 = 0.199536 loss)\nI0821 10:25:49.268502 32262 sgd_solver.cpp:166] Iteration 3200, lr = 0.35\nI0821 10:28:07.826144 32262 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0821 10:29:29.549114 32262 solver.cpp:404]     Test net output #0: accuracy = 0.42948\nI0821 10:29:29.549383 32262 solver.cpp:404]     Test net output #1: loss = 2.38687 (* 1 = 2.38687 loss)\nI0821 10:29:30.877169 32262 solver.cpp:228] Iteration 3300, loss = 0.14902\nI0821 10:29:30.877205 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:29:30.877228 32262 solver.cpp:244]     Train net output #1: loss = 0.14902 (* 1 = 0.14902 loss)\nI0821 10:29:30.962985 32262 sgd_solver.cpp:166] Iteration 3300, lr = 0.35\nI0821 10:31:49.544436 32262 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0821 10:33:11.234916 32262 solver.cpp:404]     Test net output #0: accuracy = 0.35328\nI0821 10:33:11.235185 32262 solver.cpp:404]     Test net output #1: loss = 3.72812 (* 1 = 3.72812 loss)\nI0821 10:33:12.563150 32262 solver.cpp:228] Iteration 3400, loss = 0.166431\nI0821 10:33:12.563186 32262 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:33:12.563201 32262 solver.cpp:244]     Train net output #1: loss = 0.166431 (* 1 = 0.166431 loss)\nI0821 10:33:12.648331 32262 sgd_solver.cpp:166] Iteration 3400, lr = 0.35\nI0821 10:35:31.191854 32262 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0821 10:36:52.904556 32262 solver.cpp:404]     Test net output #0: accuracy = 0.4056\nI0821 10:36:52.904816 32262 solver.cpp:404]     Test net output #1: loss = 3.66413 (* 1 = 3.66413 loss)\nI0821 10:36:54.231380 32262 solver.cpp:228] Iteration 3500, loss = 0.165937\nI0821 10:36:54.231415 32262 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 10:36:54.231431 32262 solver.cpp:244]     Train net output #1: loss = 0.165937 (* 1 = 0.165937 loss)\nI0821 10:36:54.316751 32262 sgd_solver.cpp:166] Iteration 3500, lr = 0.35\nI0821 10:39:12.992488 32262 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0821 10:40:34.723093 32262 solver.cpp:404]     Test net output #0: accuracy = 0.41332\nI0821 10:40:34.723363 32262 solver.cpp:404]     Test net output #1: loss = 2.55664 (* 1 = 2.55664 loss)\nI0821 10:40:36.049988 32262 solver.cpp:228] Iteration 3600, loss = 0.17975\nI0821 10:40:36.050024 32262 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 10:40:36.050038 32262 solver.cpp:244]     Train net output #1: loss = 0.17975 (* 1 = 0.17975 loss)\nI0821 10:40:36.131278 32262 sgd_solver.cpp:166] Iteration 3600, lr = 0.35\nI0821 10:42:54.784768 32262 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0821 10:44:16.485589 32262 solver.cpp:404]     Test net output #0: accuracy = 0.42108\nI0821 10:44:16.485854 32262 solver.cpp:404]     Test net output #1: loss = 2.88607 (* 1 = 2.88607 loss)\nI0821 10:44:17.813067 32262 solver.cpp:228] Iteration 3700, loss = 0.184099\nI0821 10:44:17.813108 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:44:17.813123 32262 solver.cpp:244]     Train net output #1: loss = 0.184099 (* 1 = 0.184099 loss)\nI0821 10:44:17.892961 32262 sgd_solver.cpp:166] Iteration 3700, lr = 0.35\nI0821 10:46:36.635392 32262 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0821 10:47:58.314466 32262 solver.cpp:404]     Test net output #0: accuracy = 0.47896\nI0821 10:47:58.314728 32262 solver.cpp:404]     Test net output #1: loss = 2.58065 (* 1 = 2.58065 loss)\nI0821 10:47:59.641345 32262 solver.cpp:228] Iteration 3800, loss = 0.128355\nI0821 10:47:59.641379 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:47:59.641394 32262 solver.cpp:244]     Train net output #1: loss = 0.128355 (* 1 = 0.128355 loss)\nI0821 10:47:59.725982 32262 sgd_solver.cpp:166] Iteration 3800, lr = 0.35\nI0821 10:50:18.318642 32262 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0821 10:51:40.006438 32262 solver.cpp:404]     Test net output #0: accuracy = 0.43988\nI0821 10:51:40.006701 32262 solver.cpp:404]     Test net output #1: loss = 2.54388 (* 1 = 2.54388 loss)\nI0821 10:51:41.333294 32262 solver.cpp:228] Iteration 3900, loss = 0.163892\nI0821 10:51:41.333329 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:51:41.333343 32262 solver.cpp:244]     Train net output #1: loss = 0.163893 (* 1 = 0.163893 loss)\nI0821 10:51:41.415868 32262 sgd_solver.cpp:166] Iteration 3900, lr = 0.35\nI0821 10:54:00.139377 32262 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0821 10:55:21.910320 32262 solver.cpp:404]     Test net output #0: accuracy = 0.50188\nI0821 10:55:21.910580 32262 solver.cpp:404]     Test net output #1: loss = 2.05674 (* 1 = 2.05674 loss)\nI0821 10:55:23.236791 32262 solver.cpp:228] Iteration 4000, loss = 0.0799771\nI0821 10:55:23.236827 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 10:55:23.236841 32262 solver.cpp:244]     Train net output #1: loss = 0.0799772 (* 1 = 0.0799772 loss)\nI0821 10:55:23.317730 32262 sgd_solver.cpp:166] Iteration 4000, lr = 0.35\nI0821 10:57:42.056831 32262 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0821 10:59:03.755370 32262 solver.cpp:404]     Test net output #0: accuracy = 0.43488\nI0821 10:59:03.755620 32262 solver.cpp:404]     Test net output #1: loss = 2.38471 (* 1 = 2.38471 loss)\nI0821 10:59:05.081907 32262 solver.cpp:228] Iteration 4100, loss = 0.114914\nI0821 10:59:05.081943 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 10:59:05.081956 32262 solver.cpp:244]     Train net output #1: loss = 0.114914 (* 1 = 0.114914 loss)\nI0821 10:59:05.166751 32262 sgd_solver.cpp:166] Iteration 4100, lr = 0.35\nI0821 11:01:23.819010 32262 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0821 11:02:45.535895 32262 solver.cpp:404]     Test net output #0: accuracy = 0.53464\nI0821 11:02:45.536165 32262 solver.cpp:404]     Test net output #1: loss = 2.03351 (* 1 = 2.03351 loss)\nI0821 11:02:46.863693 32262 solver.cpp:228] Iteration 4200, loss = 0.101034\nI0821 11:02:46.863729 32262 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 11:02:46.863742 32262 solver.cpp:244]     Train net output #1: loss = 0.101034 (* 1 = 0.101034 loss)\nI0821 11:02:46.944814 32262 sgd_solver.cpp:166] Iteration 4200, lr = 0.35\nI0821 11:05:05.631973 32262 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0821 11:06:27.303628 32262 solver.cpp:404]     Test net output #0: accuracy = 0.44304\nI0821 11:06:27.303892 32262 solver.cpp:404]     Test net output #1: loss = 2.15151 (* 1 = 2.15151 loss)\nI0821 11:06:28.631381 32262 solver.cpp:228] Iteration 4300, loss = 0.0826006\nI0821 11:06:28.631415 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:06:28.631430 32262 solver.cpp:244]     Train net output #1: loss = 0.0826008 (* 1 = 0.0826008 loss)\nI0821 11:06:28.711441 32262 sgd_solver.cpp:166] Iteration 4300, lr = 0.35\nI0821 11:08:47.342248 32262 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0821 11:10:09.020493 32262 solver.cpp:404]     Test net output #0: accuracy = 0.44036\nI0821 11:10:09.020746 32262 solver.cpp:404]     Test net output #1: loss = 2.79081 (* 1 = 2.79081 loss)\nI0821 11:10:10.348094 32262 solver.cpp:228] Iteration 4400, loss = 0.071863\nI0821 11:10:10.348130 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:10:10.348150 32262 solver.cpp:244]     Train net output #1: loss = 0.0718631 (* 1 = 0.0718631 loss)\nI0821 11:10:10.432858 32262 sgd_solver.cpp:166] Iteration 4400, lr = 0.35\nI0821 11:12:29.152685 32262 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0821 11:13:50.841240 32262 solver.cpp:404]     Test net output #0: accuracy = 0.42024\nI0821 11:13:50.841501 32262 solver.cpp:404]     Test net output #1: loss = 2.29755 (* 1 = 2.29755 loss)\nI0821 11:13:52.168365 32262 solver.cpp:228] Iteration 4500, loss = 0.0464187\nI0821 11:13:52.168401 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 11:13:52.168416 32262 solver.cpp:244]     Train net output #1: loss = 0.0464189 (* 1 = 0.0464189 loss)\nI0821 11:13:52.250385 32262 sgd_solver.cpp:166] Iteration 4500, lr = 0.35\nI0821 11:16:10.920193 32262 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0821 11:17:32.598611 32262 solver.cpp:404]     Test net output #0: accuracy = 0.54292\nI0821 11:17:32.598870 32262 solver.cpp:404]     Test net output #1: loss = 2.07339 (* 1 = 2.07339 loss)\nI0821 11:17:33.925920 32262 solver.cpp:228] Iteration 4600, loss = 0.0563852\nI0821 11:17:33.925958 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:17:33.925972 32262 solver.cpp:244]     Train net output #1: loss = 0.0563853 (* 1 = 0.0563853 loss)\nI0821 11:17:34.007809 32262 sgd_solver.cpp:166] Iteration 4600, lr = 0.35\nI0821 11:19:52.619453 32262 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0821 11:21:14.308545 32262 solver.cpp:404]     Test net output #0: accuracy = 0.35532\nI0821 11:21:14.308806 32262 solver.cpp:404]     Test net output #1: loss = 3.03202 (* 1 = 3.03202 loss)\nI0821 11:21:15.636198 32262 solver.cpp:228] Iteration 4700, loss = 0.0461674\nI0821 11:21:15.636234 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:21:15.636250 32262 solver.cpp:244]     Train net output #1: loss = 0.0461675 (* 1 = 0.0461675 loss)\nI0821 11:21:15.723204 32262 sgd_solver.cpp:166] Iteration 4700, lr = 0.35\nI0821 11:23:34.439913 32262 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0821 11:24:56.209969 32262 solver.cpp:404]     Test net output #0: accuracy = 0.52428\nI0821 11:24:56.210234 32262 solver.cpp:404]     Test net output #1: loss = 1.73763 (* 1 = 1.73763 loss)\nI0821 11:24:57.537302 32262 solver.cpp:228] Iteration 4800, loss = 0.139985\nI0821 11:24:57.537338 32262 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 11:24:57.537353 32262 solver.cpp:244]     Train net output #1: loss = 0.139985 (* 1 = 0.139985 loss)\nI0821 11:24:57.624495 32262 sgd_solver.cpp:166] Iteration 4800, lr = 0.35\nI0821 11:27:16.220841 32262 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0821 11:28:37.915572 32262 solver.cpp:404]     Test net output #0: accuracy = 0.21632\nI0821 11:28:37.915854 32262 solver.cpp:404]     Test net output #1: loss = 4.29459 (* 1 = 4.29459 loss)\nI0821 11:28:39.243182 32262 solver.cpp:228] Iteration 4900, loss = 0.185218\nI0821 11:28:39.243218 32262 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0821 11:28:39.243233 32262 solver.cpp:244]     Train net output #1: loss = 0.185218 (* 1 = 0.185218 loss)\nI0821 11:28:39.322763 32262 sgd_solver.cpp:166] Iteration 4900, lr = 0.35\nI0821 11:30:57.967365 32262 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0821 11:32:19.752781 32262 solver.cpp:404]     Test net output #0: accuracy = 0.3774\nI0821 11:32:19.753044 32262 solver.cpp:404]     Test net output #1: loss = 2.96312 (* 1 = 2.96312 loss)\nI0821 11:32:21.080727 32262 solver.cpp:228] Iteration 5000, loss = 0.135997\nI0821 11:32:21.080762 32262 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:32:21.080777 32262 solver.cpp:244]     Train net output #1: loss = 0.135997 (* 1 = 0.135997 loss)\nI0821 11:32:21.164723 32262 sgd_solver.cpp:166] Iteration 5000, lr = 0.35\nI0821 11:34:39.878198 32262 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0821 11:36:01.524602 32262 solver.cpp:404]     Test net output #0: accuracy = 0.35948\nI0821 11:36:01.524863 32262 solver.cpp:404]     Test net output #1: loss = 3.64957 (* 1 = 3.64957 loss)\nI0821 11:36:02.851920 32262 solver.cpp:228] Iteration 5100, loss = 0.119337\nI0821 11:36:02.851955 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:36:02.851970 32262 solver.cpp:244]     Train net output #1: loss = 0.119337 (* 1 = 0.119337 loss)\nI0821 11:36:02.935605 32262 sgd_solver.cpp:166] Iteration 5100, lr = 0.35\nI0821 11:38:21.628062 32262 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0821 11:39:43.323863 32262 solver.cpp:404]     Test net output #0: accuracy = 0.34432\nI0821 11:39:43.324123 32262 solver.cpp:404]     Test net output #1: loss = 3.17171 (* 1 = 3.17171 loss)\nI0821 11:39:44.651103 32262 solver.cpp:228] Iteration 5200, loss = 0.0803865\nI0821 11:39:44.651139 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 11:39:44.651159 32262 solver.cpp:244]     Train net output #1: loss = 0.0803866 (* 1 = 0.0803866 loss)\nI0821 11:39:44.731376 32262 sgd_solver.cpp:166] Iteration 5200, lr = 0.35\nI0821 11:42:03.403370 32262 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0821 11:43:25.164328 32262 solver.cpp:404]     Test net output #0: accuracy = 0.44108\nI0821 11:43:25.164603 32262 solver.cpp:404]     Test net output #1: loss = 2.21005 (* 1 = 2.21005 loss)\nI0821 11:43:26.491104 32262 solver.cpp:228] Iteration 5300, loss = 0.0831538\nI0821 11:43:26.491140 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 11:43:26.491160 32262 solver.cpp:244]     Train net output #1: loss = 0.0831539 (* 1 = 0.0831539 loss)\nI0821 11:43:26.576385 32262 sgd_solver.cpp:166] Iteration 5300, lr = 0.35\nI0821 11:45:45.266482 32262 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0821 11:47:06.971091 32262 solver.cpp:404]     Test net output #0: accuracy = 0.4892\nI0821 11:47:06.971360 32262 solver.cpp:404]     Test net output #1: loss = 2.05062 (* 1 = 2.05062 loss)\nI0821 11:47:08.297889 32262 solver.cpp:228] Iteration 5400, loss = 0.0630089\nI0821 11:47:08.297924 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 11:47:08.297940 32262 solver.cpp:244]     Train net output #1: loss = 0.063009 (* 1 = 0.063009 loss)\nI0821 11:47:08.379542 32262 sgd_solver.cpp:166] Iteration 5400, lr = 0.35\nI0821 11:49:26.950567 32262 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0821 11:50:48.686097 32262 solver.cpp:404]     Test net output #0: accuracy = 0.3734\nI0821 11:50:48.686377 32262 solver.cpp:404]     Test net output #1: loss = 2.99424 (* 1 = 2.99424 loss)\nI0821 11:50:50.013070 32262 solver.cpp:228] Iteration 5500, loss = 0.104561\nI0821 11:50:50.013108 32262 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:50:50.013123 32262 solver.cpp:244]     Train net output #1: loss = 0.104561 (* 1 = 0.104561 loss)\nI0821 11:50:50.096374 32262 sgd_solver.cpp:166] Iteration 5500, lr = 0.35\nI0821 11:53:09.141104 32262 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0821 11:54:30.838394 32262 solver.cpp:404]     Test net output #0: accuracy = 0.40692\nI0821 11:54:30.838656 32262 solver.cpp:404]     Test net output #1: loss = 2.88258 (* 1 = 2.88258 loss)\nI0821 11:54:32.169116 32262 solver.cpp:228] Iteration 5600, loss = 0.118296\nI0821 11:54:32.169159 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 11:54:32.169183 32262 solver.cpp:244]     Train net output #1: loss = 0.118296 (* 1 = 0.118296 loss)\nI0821 11:54:32.255925 32262 sgd_solver.cpp:166] Iteration 5600, lr = 0.35\nI0821 11:56:51.355165 32262 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0821 11:58:13.001788 32262 solver.cpp:404]     Test net output #0: accuracy = 0.49216\nI0821 11:58:13.002043 32262 solver.cpp:404]     Test net output #1: loss = 1.9852 (* 1 = 1.9852 loss)\nI0821 11:58:14.332898 32262 solver.cpp:228] Iteration 5700, loss = 0.0823429\nI0821 11:58:14.332937 32262 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 11:58:14.332958 32262 solver.cpp:244]     Train net output #1: loss = 0.082343 (* 1 = 0.082343 loss)\nI0821 11:58:14.418678 32262 sgd_solver.cpp:166] Iteration 5700, lr = 0.35\nI0821 12:00:33.650347 32262 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0821 12:01:55.277575 32262 solver.cpp:404]     Test net output #0: accuracy = 0.43956\nI0821 12:01:55.277842 32262 solver.cpp:404]     Test net output #1: loss = 2.32493 (* 1 = 2.32493 loss)\nI0821 12:01:56.608610 32262 solver.cpp:228] Iteration 5800, loss = 0.0699622\nI0821 12:01:56.608646 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:01:56.608661 32262 solver.cpp:244]     Train net output #1: loss = 0.0699623 (* 1 = 0.0699623 loss)\nI0821 12:01:56.694946 32262 sgd_solver.cpp:166] Iteration 5800, lr = 0.35\nI0821 12:04:15.846086 32262 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0821 12:05:37.459826 32262 solver.cpp:404]     Test net output #0: accuracy = 0.43012\nI0821 12:05:37.460083 32262 solver.cpp:404]     Test net output #1: loss = 2.81319 (* 1 = 2.81319 loss)\nI0821 12:05:38.790088 32262 solver.cpp:228] Iteration 5900, loss = 0.0548826\nI0821 12:05:38.790122 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:05:38.790138 32262 solver.cpp:244]     Train net output #1: loss = 0.0548827 (* 1 = 0.0548827 loss)\nI0821 12:05:38.876608 32262 sgd_solver.cpp:166] Iteration 5900, lr = 0.35\nI0821 12:07:58.058467 32262 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0821 12:09:19.727455 32262 solver.cpp:404]     Test net output #0: accuracy = 0.50588\nI0821 12:09:19.727718 32262 solver.cpp:404]     Test net output #1: loss = 2.00525 (* 1 = 2.00525 loss)\nI0821 12:09:21.058257 32262 solver.cpp:228] Iteration 6000, loss = 0.0626305\nI0821 12:09:21.058293 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:09:21.058310 32262 solver.cpp:244]     Train net output #1: loss = 0.0626306 (* 1 = 0.0626306 loss)\nI0821 12:09:21.143662 32262 sgd_solver.cpp:166] Iteration 6000, lr = 0.35\nI0821 12:11:40.257179 32262 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0821 12:13:01.964494 32262 solver.cpp:404]     Test net output #0: accuracy = 0.43964\nI0821 12:13:01.964771 32262 solver.cpp:404]     Test net output #1: loss = 2.89914 (* 1 = 2.89914 loss)\nI0821 12:13:03.295797 32262 solver.cpp:228] Iteration 6100, loss = 0.0688235\nI0821 12:13:03.295831 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:13:03.295847 32262 solver.cpp:244]     Train net output #1: loss = 0.0688236 (* 1 = 0.0688236 loss)\nI0821 12:13:03.382644 32262 sgd_solver.cpp:166] Iteration 6100, lr = 0.35\nI0821 12:15:22.516041 32262 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0821 12:16:44.226949 32262 solver.cpp:404]     Test net output #0: accuracy = 0.35684\nI0821 12:16:44.227234 32262 solver.cpp:404]     Test net output #1: loss = 4.29256 (* 1 = 4.29256 loss)\nI0821 12:16:45.557251 32262 solver.cpp:228] Iteration 6200, loss = 0.0822788\nI0821 12:16:45.557286 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:16:45.557302 32262 solver.cpp:244]     Train net output #1: loss = 0.0822789 (* 1 = 0.0822789 loss)\nI0821 12:16:45.643947 32262 sgd_solver.cpp:166] Iteration 6200, lr = 0.35\nI0821 12:19:04.788359 32262 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0821 12:20:26.446177 32262 solver.cpp:404]     Test net output #0: accuracy = 0.47672\nI0821 12:20:26.446447 32262 solver.cpp:404]     Test net output #1: loss = 2.84157 (* 1 = 2.84157 loss)\nI0821 12:20:27.779197 32262 solver.cpp:228] Iteration 6300, loss = 0.0837686\nI0821 12:20:27.779232 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:20:27.779247 32262 solver.cpp:244]     Train net output #1: loss = 0.0837687 (* 1 = 0.0837687 loss)\nI0821 12:20:27.860251 32262 sgd_solver.cpp:166] Iteration 6300, lr = 0.35\nI0821 12:22:47.051918 32262 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0821 12:24:08.704402 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6638\nI0821 12:24:08.704641 32262 solver.cpp:404]     Test net output #1: loss = 1.29269 (* 1 = 1.29269 loss)\nI0821 12:24:10.034431 32262 solver.cpp:228] Iteration 6400, loss = 0.0555398\nI0821 12:24:10.034467 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:24:10.034482 32262 solver.cpp:244]     Train net output #1: loss = 0.0555399 (* 1 = 0.0555399 loss)\nI0821 12:24:10.119834 32262 sgd_solver.cpp:166] Iteration 6400, lr = 0.35\nI0821 12:26:29.193450 32262 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0821 12:27:50.852990 32262 solver.cpp:404]     Test net output #0: accuracy = 0.57264\nI0821 12:27:50.853242 32262 solver.cpp:404]     Test net output #1: loss = 1.73592 (* 1 = 1.73592 loss)\nI0821 12:27:52.184236 32262 solver.cpp:228] Iteration 6500, loss = 0.0598186\nI0821 12:27:52.184272 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 12:27:52.184288 32262 solver.cpp:244]     Train net output #1: loss = 0.0598186 (* 1 = 0.0598186 loss)\nI0821 12:27:52.270391 32262 sgd_solver.cpp:166] Iteration 6500, lr = 0.35\nI0821 12:30:11.320034 32262 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0821 12:31:32.973237 32262 solver.cpp:404]     Test net output #0: accuracy = 0.48688\nI0821 12:31:32.973501 32262 solver.cpp:404]     Test net output #1: loss = 2.53221 (* 1 = 2.53221 loss)\nI0821 12:31:34.304298 32262 solver.cpp:228] Iteration 6600, loss = 0.0662547\nI0821 12:31:34.304334 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:31:34.304349 32262 solver.cpp:244]     Train net output #1: loss = 0.0662547 (* 1 = 0.0662547 loss)\nI0821 12:31:34.389780 32262 sgd_solver.cpp:166] Iteration 6600, lr = 0.35\nI0821 12:33:53.534538 32262 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0821 12:35:15.189815 32262 solver.cpp:404]     Test net output #0: accuracy = 0.5516\nI0821 12:35:15.190083 32262 solver.cpp:404]     Test net output #1: loss = 2.02325 (* 1 = 2.02325 loss)\nI0821 12:35:16.520516 32262 solver.cpp:228] Iteration 6700, loss = 0.0251803\nI0821 12:35:16.520552 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:35:16.520567 32262 solver.cpp:244]     Train net output #1: loss = 0.0251803 (* 1 = 0.0251803 loss)\nI0821 12:35:16.609591 32262 sgd_solver.cpp:166] Iteration 6700, lr = 0.35\nI0821 12:37:35.750543 32262 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0821 12:38:57.397123 32262 solver.cpp:404]     Test net output #0: accuracy = 0.31576\nI0821 12:38:57.397384 32262 solver.cpp:404]     Test net output #1: loss = 3.84364 (* 1 = 3.84364 loss)\nI0821 12:38:58.728503 32262 solver.cpp:228] Iteration 6800, loss = 0.021374\nI0821 12:38:58.728538 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:38:58.728554 32262 solver.cpp:244]     Train net output #1: loss = 0.021374 (* 1 = 0.021374 loss)\nI0821 12:38:58.808709 32262 sgd_solver.cpp:166] Iteration 6800, lr = 0.35\nI0821 12:41:17.907441 32262 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0821 12:42:39.625720 32262 solver.cpp:404]     Test net output #0: accuracy = 0.60056\nI0821 12:42:39.625986 32262 solver.cpp:404]     Test net output #1: loss = 1.58834 (* 1 = 1.58834 loss)\nI0821 12:42:40.957545 32262 solver.cpp:228] Iteration 6900, loss = 0.0259915\nI0821 12:42:40.957579 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 12:42:40.957594 32262 solver.cpp:244]     Train net output #1: loss = 0.0259916 (* 1 = 0.0259916 loss)\nI0821 12:42:41.039727 32262 sgd_solver.cpp:166] Iteration 6900, lr = 0.35\nI0821 12:45:00.238539 32262 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0821 12:46:21.925737 32262 solver.cpp:404]     Test net output #0: accuracy = 0.49616\nI0821 12:46:21.926002 32262 solver.cpp:404]     Test net output #1: loss = 2.75633 (* 1 = 2.75633 loss)\nI0821 12:46:23.257478 32262 solver.cpp:228] Iteration 7000, loss = 0.0884668\nI0821 12:46:23.257514 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:46:23.257530 32262 solver.cpp:244]     Train net output #1: loss = 0.0884669 (* 1 = 0.0884669 loss)\nI0821 12:46:23.344225 32262 sgd_solver.cpp:166] Iteration 7000, lr = 0.35\nI0821 12:48:42.584597 32262 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0821 12:50:04.230511 32262 solver.cpp:404]     Test net output #0: accuracy = 0.52888\nI0821 12:50:04.230782 32262 solver.cpp:404]     Test net output #1: loss = 2.43388 (* 1 = 2.43388 loss)\nI0821 12:50:05.562261 32262 solver.cpp:228] Iteration 7100, loss = 0.0404858\nI0821 12:50:05.562300 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 12:50:05.562324 32262 solver.cpp:244]     Train net output #1: loss = 0.0404858 (* 1 = 0.0404858 loss)\nI0821 12:50:05.645884 32262 sgd_solver.cpp:166] Iteration 7100, lr = 0.35\nI0821 12:52:24.770488 32262 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0821 12:53:46.415838 32262 solver.cpp:404]     Test net output #0: accuracy = 0.4444\nI0821 12:53:46.416115 32262 solver.cpp:404]     Test net output #1: loss = 3.17541 (* 1 = 3.17541 loss)\nI0821 12:53:47.746454 32262 solver.cpp:228] Iteration 7200, loss = 0.0760512\nI0821 12:53:47.746490 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 12:53:47.746505 32262 solver.cpp:244]     Train net output #1: loss = 0.0760512 (* 1 = 0.0760512 loss)\nI0821 12:53:47.825160 32262 sgd_solver.cpp:166] Iteration 7200, lr = 0.35\nI0821 12:56:06.496804 32262 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0821 12:57:28.146453 32262 solver.cpp:404]     Test net output #0: accuracy = 0.42676\nI0821 12:57:28.146716 32262 solver.cpp:404]     Test net output #1: loss = 3.94189 (* 1 = 3.94189 loss)\nI0821 12:57:29.476294 32262 solver.cpp:228] Iteration 7300, loss = 0.0746659\nI0821 12:57:29.476328 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 12:57:29.476344 32262 solver.cpp:244]     Train net output #1: loss = 0.074666 (* 1 = 0.074666 loss)\nI0821 12:57:29.556237 32262 sgd_solver.cpp:166] Iteration 7300, lr = 0.35\nI0821 12:59:48.255026 32262 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0821 13:01:09.908246 32262 solver.cpp:404]     Test net output #0: accuracy = 0.32852\nI0821 13:01:09.908493 32262 solver.cpp:404]     Test net output #1: loss = 4.22919 (* 1 = 4.22919 loss)\nI0821 13:01:11.239365 32262 solver.cpp:228] Iteration 7400, loss = 0.0359532\nI0821 13:01:11.239400 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:01:11.239416 32262 solver.cpp:244]     Train net output #1: loss = 0.0359533 (* 1 = 0.0359533 loss)\nI0821 13:01:11.316561 32262 sgd_solver.cpp:166] Iteration 7400, lr = 0.35\nI0821 13:03:30.052034 32262 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0821 13:04:51.705974 32262 solver.cpp:404]     Test net output #0: accuracy = 0.3766\nI0821 13:04:51.706245 32262 solver.cpp:404]     Test net output #1: loss = 4.11705 (* 1 = 4.11705 loss)\nI0821 13:04:53.037575 32262 solver.cpp:228] Iteration 7500, loss = 0.0492221\nI0821 13:04:53.037621 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:04:53.037636 32262 solver.cpp:244]     Train net output #1: loss = 0.0492222 (* 1 = 0.0492222 loss)\nI0821 13:04:53.119355 32262 sgd_solver.cpp:166] Iteration 7500, lr = 0.35\nI0821 13:07:11.828362 32262 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0821 13:08:33.497776 32262 solver.cpp:404]     Test net output #0: accuracy = 0.32588\nI0821 13:08:33.498029 32262 solver.cpp:404]     Test net output #1: loss = 3.31288 (* 1 = 3.31288 loss)\nI0821 13:08:34.828241 32262 solver.cpp:228] Iteration 7600, loss = 0.06939\nI0821 13:08:34.828285 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 13:08:34.828301 32262 solver.cpp:244]     Train net output #1: loss = 0.0693901 (* 1 = 0.0693901 loss)\nI0821 13:08:34.909035 32262 sgd_solver.cpp:166] Iteration 7600, lr = 0.35\nI0821 13:10:53.599448 32262 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0821 13:12:15.268901 32262 solver.cpp:404]     Test net output #0: accuracy = 0.16456\nI0821 13:12:15.269182 32262 solver.cpp:404]     Test net output #1: loss = 7.79423 (* 1 = 7.79423 loss)\nI0821 13:12:16.600035 32262 solver.cpp:228] Iteration 7700, loss = 0.11303\nI0821 13:12:16.600078 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:12:16.600095 32262 solver.cpp:244]     Train net output #1: loss = 0.11303 (* 1 = 0.11303 loss)\nI0821 13:12:16.680157 32262 sgd_solver.cpp:166] Iteration 7700, lr = 0.35\nI0821 13:14:35.367575 32262 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0821 13:15:57.034502 32262 solver.cpp:404]     Test net output #0: accuracy = 0.54556\nI0821 13:15:57.034777 32262 solver.cpp:404]     Test net output #1: loss = 2.17971 (* 1 = 2.17971 loss)\nI0821 13:15:58.365147 32262 solver.cpp:228] Iteration 7800, loss = 0.0848856\nI0821 13:15:58.365192 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:15:58.365208 32262 solver.cpp:244]     Train net output #1: loss = 0.0848857 (* 1 = 0.0848857 loss)\nI0821 13:15:58.444228 32262 sgd_solver.cpp:166] Iteration 7800, lr = 0.35\nI0821 13:18:17.059507 32262 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0821 13:19:38.728916 32262 solver.cpp:404]     Test net output #0: accuracy = 0.5434\nI0821 13:19:38.729182 32262 solver.cpp:404]     Test net output #1: loss = 1.99431 (* 1 = 1.99431 loss)\nI0821 13:19:40.059711 32262 solver.cpp:228] Iteration 7900, loss = 0.0491236\nI0821 13:19:40.059758 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:19:40.059774 32262 solver.cpp:244]     Train net output #1: loss = 0.0491237 (* 1 = 0.0491237 loss)\nI0821 13:19:40.142741 32262 sgd_solver.cpp:166] Iteration 7900, lr = 0.35\nI0821 13:21:58.781683 32262 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0821 13:23:20.454995 32262 solver.cpp:404]     Test net output #0: accuracy = 0.4014\nI0821 13:23:20.455221 32262 solver.cpp:404]     Test net output #1: loss = 4.52014 (* 1 = 4.52014 loss)\nI0821 13:23:21.786067 32262 solver.cpp:228] Iteration 8000, loss = 0.0523407\nI0821 13:23:21.786111 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:23:21.786126 32262 solver.cpp:244]     Train net output #1: loss = 0.0523408 (* 1 = 0.0523408 loss)\nI0821 13:23:21.867880 32262 sgd_solver.cpp:166] Iteration 8000, lr = 0.35\nI0821 13:25:40.516077 32262 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0821 13:27:02.184556 32262 solver.cpp:404]     Test net output #0: accuracy = 0.4228\nI0821 13:27:02.184746 32262 solver.cpp:404]     Test net output #1: loss = 2.98892 (* 1 = 2.98892 loss)\nI0821 13:27:03.514868 32262 solver.cpp:228] Iteration 8100, loss = 0.0502969\nI0821 13:27:03.514912 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 13:27:03.514930 32262 solver.cpp:244]     Train net output #1: loss = 0.050297 (* 1 = 0.050297 loss)\nI0821 13:27:03.591686 32262 sgd_solver.cpp:166] Iteration 8100, lr = 0.35\nI0821 13:29:22.290457 32262 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0821 13:30:43.965741 32262 solver.cpp:404]     Test net output #0: accuracy = 0.51088\nI0821 13:30:43.965989 32262 solver.cpp:404]     Test net output #1: loss = 2.19641 (* 1 = 2.19641 loss)\nI0821 13:30:45.296097 32262 solver.cpp:228] Iteration 8200, loss = 0.0770076\nI0821 13:30:45.296141 32262 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0821 13:30:45.296157 32262 solver.cpp:244]     Train net output #1: loss = 0.0770077 (* 1 = 0.0770077 loss)\nI0821 13:30:45.378423 32262 sgd_solver.cpp:166] Iteration 8200, lr = 0.35\nI0821 13:33:03.995705 32262 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0821 13:34:25.677565 32262 solver.cpp:404]     Test net output #0: accuracy = 0.54204\nI0821 13:34:25.677820 32262 solver.cpp:404]     Test net output #1: loss = 2.14475 (* 1 = 2.14475 loss)\nI0821 13:34:27.008071 32262 solver.cpp:228] Iteration 8300, loss = 0.0886881\nI0821 13:34:27.008117 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 13:34:27.008133 32262 solver.cpp:244]     Train net output #1: loss = 0.0886882 (* 1 = 0.0886882 loss)\nI0821 13:34:27.087723 32262 sgd_solver.cpp:166] Iteration 8300, lr = 0.35\nI0821 13:36:45.643486 32262 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0821 13:38:07.310571 32262 solver.cpp:404]     Test net output #0: accuracy = 0.55572\nI0821 13:38:07.310820 32262 solver.cpp:404]     Test net output #1: loss = 2.07145 (* 1 = 2.07145 loss)\nI0821 13:38:08.641748 32262 solver.cpp:228] Iteration 8400, loss = 0.0754955\nI0821 13:38:08.641793 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:38:08.641808 32262 solver.cpp:244]     Train net output #1: loss = 0.0754956 (* 1 = 0.0754956 loss)\nI0821 13:38:08.724719 32262 sgd_solver.cpp:166] Iteration 8400, lr = 0.35\nI0821 13:40:27.369676 32262 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0821 13:41:49.031000 32262 solver.cpp:404]     Test net output #0: accuracy = 0.44548\nI0821 13:41:49.031226 32262 solver.cpp:404]     Test net output #1: loss = 3.46361 (* 1 = 3.46361 loss)\nI0821 13:41:50.361301 32262 solver.cpp:228] Iteration 8500, loss = 0.102221\nI0821 13:41:50.361346 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:41:50.361362 32262 solver.cpp:244]     Train net output #1: loss = 0.102221 (* 1 = 0.102221 loss)\nI0821 13:41:50.438545 32262 sgd_solver.cpp:166] Iteration 8500, lr = 0.35\nI0821 13:44:09.045676 32262 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0821 13:45:30.707583 32262 solver.cpp:404]     Test net output #0: accuracy = 0.39056\nI0821 13:45:30.707834 32262 solver.cpp:404]     Test net output #1: loss = 3.3241 (* 1 = 3.3241 loss)\nI0821 13:45:32.038138 32262 solver.cpp:228] Iteration 8600, loss = 0.064963\nI0821 13:45:32.038184 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 13:45:32.038200 32262 solver.cpp:244]     Train net output #1: loss = 0.0649631 (* 1 = 0.0649631 loss)\nI0821 13:45:32.119595 32262 sgd_solver.cpp:166] Iteration 8600, lr = 0.35\nI0821 13:47:50.768458 32262 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0821 13:49:12.418432 32262 solver.cpp:404]     Test net output #0: accuracy = 0.40652\nI0821 13:49:12.418643 32262 solver.cpp:404]     Test net output #1: loss = 3.85054 (* 1 = 3.85054 loss)\nI0821 13:49:13.748656 32262 solver.cpp:228] Iteration 8700, loss = 0.0420632\nI0821 13:49:13.748702 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 13:49:13.748718 32262 solver.cpp:244]     Train net output #1: loss = 0.0420633 (* 1 = 0.0420633 loss)\nI0821 13:49:13.826397 32262 sgd_solver.cpp:166] Iteration 8700, lr = 0.35\nI0821 13:51:32.486063 32262 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0821 13:52:54.097738 32262 solver.cpp:404]     Test net output #0: accuracy = 0.50836\nI0821 13:52:54.097949 32262 solver.cpp:404]     Test net output #1: loss = 2.68949 (* 1 = 2.68949 loss)\nI0821 13:52:55.428330 32262 solver.cpp:228] Iteration 8800, loss = 0.0454222\nI0821 13:52:55.428375 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 13:52:55.428390 32262 solver.cpp:244]     Train net output #1: loss = 0.0454223 (* 1 = 0.0454223 loss)\nI0821 13:52:55.507794 32262 sgd_solver.cpp:166] Iteration 8800, lr = 0.35\nI0821 13:55:14.128484 32262 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0821 13:56:35.741497 32262 solver.cpp:404]     Test net output #0: accuracy = 0.53636\nI0821 13:56:35.741708 32262 solver.cpp:404]     Test net output #1: loss = 2.60871 (* 1 = 2.60871 loss)\nI0821 13:56:37.072057 32262 solver.cpp:228] Iteration 8900, loss = 0.0154515\nI0821 13:56:37.072103 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 13:56:37.072118 32262 solver.cpp:244]     Train net output #1: loss = 0.0154516 (* 1 = 0.0154516 loss)\nI0821 13:56:37.153002 32262 sgd_solver.cpp:166] Iteration 8900, lr = 0.35\nI0821 13:58:55.785825 32262 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0821 14:00:17.394635 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6168\nI0821 14:00:17.394841 32262 solver.cpp:404]     Test net output #1: loss = 1.93391 (* 1 = 1.93391 loss)\nI0821 14:00:18.725208 32262 solver.cpp:228] Iteration 9000, loss = 0.0472065\nI0821 14:00:18.725250 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:00:18.725265 32262 solver.cpp:244]     Train net output #1: loss = 0.0472067 (* 1 = 0.0472067 loss)\nI0821 14:00:18.807096 32262 sgd_solver.cpp:166] Iteration 9000, lr = 0.35\nI0821 14:02:37.389930 32262 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0821 14:03:59.003180 32262 solver.cpp:404]     Test net output #0: accuracy = 0.61152\nI0821 14:03:59.003407 32262 solver.cpp:404]     Test net output #1: loss = 1.73057 (* 1 = 1.73057 loss)\nI0821 14:04:00.333587 32262 solver.cpp:228] Iteration 9100, loss = 0.0483016\nI0821 14:04:00.333631 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:04:00.333647 32262 solver.cpp:244]     Train net output #1: loss = 0.0483017 (* 1 = 0.0483017 loss)\nI0821 14:04:00.415015 32262 sgd_solver.cpp:166] Iteration 9100, lr = 0.35\nI0821 14:06:19.154644 32262 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0821 14:07:40.769315 32262 solver.cpp:404]     Test net output #0: accuracy = 0.54864\nI0821 14:07:40.769527 32262 solver.cpp:404]     Test net output #1: loss = 2.3856 (* 1 = 2.3856 loss)\nI0821 14:07:42.100308 32262 solver.cpp:228] Iteration 9200, loss = 0.0565293\nI0821 14:07:42.100353 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:07:42.100368 32262 solver.cpp:244]     Train net output #1: loss = 0.0565294 (* 1 = 0.0565294 loss)\nI0821 14:07:42.179502 32262 sgd_solver.cpp:166] Iteration 9200, lr = 0.35\nI0821 14:10:00.776397 32262 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0821 14:11:22.389739 32262 solver.cpp:404]     Test net output #0: accuracy = 0.4624\nI0821 14:11:22.389962 32262 solver.cpp:404]     Test net output #1: loss = 3.70052 (* 1 = 3.70052 loss)\nI0821 14:11:23.720269 32262 solver.cpp:228] Iteration 9300, loss = 0.0567814\nI0821 14:11:23.720314 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:11:23.720329 32262 solver.cpp:244]     Train net output #1: loss = 0.0567815 (* 1 = 0.0567815 loss)\nI0821 14:11:23.797569 32262 sgd_solver.cpp:166] Iteration 9300, lr = 0.35\nI0821 14:13:42.483944 32262 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0821 14:15:04.104619 32262 solver.cpp:404]     Test net output #0: accuracy = 0.55224\nI0821 14:15:04.104853 32262 solver.cpp:404]     Test net output #1: loss = 2.28084 (* 1 = 2.28084 loss)\nI0821 14:15:05.434613 32262 solver.cpp:228] Iteration 9400, loss = 0.106208\nI0821 14:15:05.434658 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 14:15:05.434674 32262 solver.cpp:244]     Train net output #1: loss = 0.106208 (* 1 = 0.106208 loss)\nI0821 14:15:05.514426 32262 sgd_solver.cpp:166] Iteration 9400, lr = 0.35\nI0821 14:17:24.126693 32262 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0821 14:18:45.744559 32262 solver.cpp:404]     Test net output #0: accuracy = 0.63644\nI0821 14:18:45.744781 32262 solver.cpp:404]     Test net output #1: loss = 1.5819 (* 1 = 1.5819 loss)\nI0821 14:18:47.075299 32262 solver.cpp:228] Iteration 9500, loss = 0.0347112\nI0821 14:18:47.075342 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 14:18:47.075359 32262 solver.cpp:244]     Train net output #1: loss = 0.0347113 (* 1 = 0.0347113 loss)\nI0821 14:18:47.150754 32262 sgd_solver.cpp:166] Iteration 9500, lr = 0.35\nI0821 14:21:05.761859 32262 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0821 14:22:27.381333 32262 solver.cpp:404]     Test net output #0: accuracy = 0.54776\nI0821 14:22:27.381597 32262 solver.cpp:404]     Test net output #1: loss = 2.55376 (* 1 = 2.55376 loss)\nI0821 14:22:28.711283 32262 solver.cpp:228] Iteration 9600, loss = 0.0204738\nI0821 14:22:28.711318 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 14:22:28.711333 32262 solver.cpp:244]     Train net output #1: loss = 0.0204739 (* 1 = 0.0204739 loss)\nI0821 14:22:28.792862 32262 sgd_solver.cpp:166] Iteration 9600, lr = 0.35\nI0821 14:24:47.461963 32262 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0821 14:26:09.111042 32262 solver.cpp:404]     Test net output #0: accuracy = 0.4454\nI0821 14:26:09.111277 32262 solver.cpp:404]     Test net output #1: loss = 4.42605 (* 1 = 4.42605 loss)\nI0821 14:26:10.443109 32262 solver.cpp:228] Iteration 9700, loss = 0.089365\nI0821 14:26:10.443147 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:26:10.443169 32262 solver.cpp:244]     Train net output #1: loss = 0.0893652 (* 1 = 0.0893652 loss)\nI0821 14:26:10.523385 32262 sgd_solver.cpp:166] Iteration 9700, lr = 0.35\nI0821 14:28:29.189299 32262 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0821 14:29:50.866370 32262 solver.cpp:404]     Test net output #0: accuracy = 0.62124\nI0821 14:29:50.866585 32262 solver.cpp:404]     Test net output #1: loss = 1.89709 (* 1 = 1.89709 loss)\nI0821 14:29:52.197700 32262 solver.cpp:228] Iteration 9800, loss = 0.0619326\nI0821 14:29:52.197738 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 14:29:52.197760 32262 solver.cpp:244]     Train net output #1: loss = 0.0619327 (* 1 = 0.0619327 loss)\nI0821 14:29:52.278272 32262 sgd_solver.cpp:166] Iteration 9800, lr = 0.35\nI0821 14:32:10.993597 32262 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0821 14:33:32.668697 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64584\nI0821 14:33:32.668907 32262 solver.cpp:404]     Test net output #1: loss = 1.80147 (* 1 = 1.80147 loss)\nI0821 14:33:33.999229 32262 solver.cpp:228] Iteration 9900, loss = 0.0692682\nI0821 14:33:33.999269 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 14:33:33.999290 32262 solver.cpp:244]     Train net output #1: loss = 0.0692684 (* 1 = 0.0692684 loss)\nI0821 14:33:34.067454 32262 sgd_solver.cpp:166] Iteration 9900, lr = 0.35\nI0821 14:35:52.124894 32262 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0821 14:37:13.792558 32262 solver.cpp:404]     Test net output #0: accuracy = 0.67364\nI0821 14:37:13.792807 32262 solver.cpp:404]     Test net output #1: loss = 1.46675 (* 1 = 1.46675 loss)\nI0821 14:37:15.123265 32262 solver.cpp:228] Iteration 10000, loss = 0.0896103\nI0821 14:37:15.123301 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 14:37:15.123317 32262 solver.cpp:244]     Train net output #1: loss = 0.0896104 (* 1 = 0.0896104 loss)\nI0821 14:37:15.192750 32262 sgd_solver.cpp:166] Iteration 10000, lr = 0.35\nI0821 14:39:33.212477 32262 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0821 14:40:54.872534 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70776\nI0821 14:40:54.872747 32262 solver.cpp:404]     Test net output #1: loss = 1.26137 (* 1 = 1.26137 loss)\nI0821 14:40:56.203486 32262 solver.cpp:228] Iteration 10100, loss = 0.0561682\nI0821 14:40:56.203522 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:40:56.203538 32262 solver.cpp:244]     Train net output #1: loss = 0.0561683 (* 1 = 0.0561683 loss)\nI0821 14:40:56.273270 32262 sgd_solver.cpp:166] Iteration 10100, lr = 0.35\nI0821 14:43:14.281388 32262 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0821 14:44:35.954015 32262 solver.cpp:404]     Test net output #0: accuracy = 0.62104\nI0821 14:44:35.954249 32262 solver.cpp:404]     Test net output #1: loss = 1.79049 (* 1 = 1.79049 loss)\nI0821 14:44:37.285564 32262 solver.cpp:228] Iteration 10200, loss = 0.0558174\nI0821 14:44:37.285600 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 14:44:37.285616 32262 solver.cpp:244]     Train net output #1: loss = 0.0558176 (* 1 = 0.0558176 loss)\nI0821 14:44:37.351348 32262 sgd_solver.cpp:166] Iteration 10200, lr = 0.35\nI0821 14:46:55.325536 32262 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0821 14:48:17.005437 32262 solver.cpp:404]     Test net output #0: accuracy = 0.63808\nI0821 14:48:17.005666 32262 solver.cpp:404]     Test net output #1: loss = 1.82476 (* 1 = 1.82476 loss)\nI0821 14:48:18.336251 32262 solver.cpp:228] Iteration 10300, loss = 0.0972735\nI0821 14:48:18.336288 32262 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 14:48:18.336311 32262 solver.cpp:244]     Train net output #1: loss = 0.0972736 (* 1 = 0.0972736 loss)\nI0821 14:48:18.408872 32262 sgd_solver.cpp:166] Iteration 10300, lr = 0.35\nI0821 14:50:36.370455 32262 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0821 14:51:58.053220 32262 solver.cpp:404]     Test net output #0: accuracy = 0.40948\nI0821 14:51:58.053478 32262 solver.cpp:404]     Test net output #1: loss = 3.62529 (* 1 = 3.62529 loss)\nI0821 14:51:59.384433 32262 solver.cpp:228] Iteration 10400, loss = 0.0908789\nI0821 14:51:59.384472 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 14:51:59.384495 32262 solver.cpp:244]     Train net output #1: loss = 0.090879 (* 1 = 0.090879 loss)\nI0821 14:51:59.454731 32262 sgd_solver.cpp:166] Iteration 10400, lr = 0.35\nI0821 14:54:17.400460 32262 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0821 14:55:39.071404 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68292\nI0821 14:55:39.071666 32262 solver.cpp:404]     Test net output #1: loss = 1.49727 (* 1 = 1.49727 loss)\nI0821 14:55:40.398473 32262 solver.cpp:228] Iteration 10500, loss = 0.0213843\nI0821 14:55:40.398510 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 14:55:40.398533 32262 solver.cpp:244]     Train net output #1: loss = 0.0213844 (* 1 = 0.0213844 loss)\nI0821 14:55:40.480670 32262 sgd_solver.cpp:166] Iteration 10500, lr = 0.35\nI0821 14:57:58.482267 32262 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0821 14:59:20.161870 32262 solver.cpp:404]     Test net output #0: accuracy = 0.59752\nI0821 14:59:20.162119 32262 solver.cpp:404]     Test net output #1: loss = 1.97127 (* 1 = 1.97127 loss)\nI0821 14:59:21.489423 32262 solver.cpp:228] Iteration 10600, loss = 0.017625\nI0821 14:59:21.489460 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 14:59:21.489482 32262 solver.cpp:244]     Train net output #1: loss = 0.0176252 (* 1 = 0.0176252 loss)\nI0821 14:59:21.568637 32262 sgd_solver.cpp:166] Iteration 10600, lr = 0.35\nI0821 15:01:39.575479 32262 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0821 15:03:01.241137 32262 solver.cpp:404]     Test net output #0: accuracy = 0.65904\nI0821 15:03:01.241402 32262 solver.cpp:404]     Test net output #1: loss = 1.83146 (* 1 = 1.83146 loss)\nI0821 15:03:02.568665 32262 solver.cpp:228] Iteration 10700, loss = 0.110957\nI0821 15:03:02.568702 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 15:03:02.568724 32262 solver.cpp:244]     Train net output #1: loss = 0.110957 (* 1 = 0.110957 loss)\nI0821 15:03:02.647722 32262 sgd_solver.cpp:166] Iteration 10700, lr = 0.35\nI0821 15:05:20.645112 32262 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0821 15:06:42.311498 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64768\nI0821 15:06:42.311739 32262 solver.cpp:404]     Test net output #1: loss = 1.79233 (* 1 = 1.79233 loss)\nI0821 15:06:43.638144 32262 solver.cpp:228] Iteration 10800, loss = 0.0888913\nI0821 15:06:43.638192 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 15:06:43.638217 32262 solver.cpp:244]     Train net output #1: loss = 0.0888914 (* 1 = 0.0888914 loss)\nI0821 15:06:43.718371 32262 sgd_solver.cpp:166] Iteration 10800, lr = 0.35\nI0821 15:09:01.700219 32262 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0821 15:10:23.375944 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64844\nI0821 15:10:23.376199 32262 solver.cpp:404]     Test net output #1: loss = 1.72847 (* 1 = 1.72847 loss)\nI0821 15:10:24.702944 32262 solver.cpp:228] Iteration 10900, loss = 0.0271755\nI0821 15:10:24.702993 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:10:24.703016 32262 solver.cpp:244]     Train net output #1: loss = 0.0271756 (* 1 = 0.0271756 loss)\nI0821 15:10:24.777730 32262 sgd_solver.cpp:166] Iteration 10900, lr = 0.35\nI0821 15:12:42.808959 32262 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0821 15:14:04.483933 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70252\nI0821 15:14:04.484154 32262 solver.cpp:404]     Test net output #1: loss = 1.39367 (* 1 = 1.39367 loss)\nI0821 15:14:05.811050 32262 solver.cpp:228] Iteration 11000, loss = 0.0314117\nI0821 15:14:05.811100 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:14:05.811127 32262 solver.cpp:244]     Train net output #1: loss = 0.0314118 (* 1 = 0.0314118 loss)\nI0821 15:14:05.886162 32262 sgd_solver.cpp:166] Iteration 11000, lr = 0.35\nI0821 15:16:23.811067 32262 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0821 15:17:45.478134 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73972\nI0821 15:17:45.478389 32262 solver.cpp:404]     Test net output #1: loss = 1.04247 (* 1 = 1.04247 loss)\nI0821 15:17:46.806046 32262 solver.cpp:228] Iteration 11100, loss = 0.0622324\nI0821 15:17:46.806087 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:17:46.806109 32262 solver.cpp:244]     Train net output #1: loss = 0.0622325 (* 1 = 0.0622325 loss)\nI0821 15:17:46.879523 32262 sgd_solver.cpp:166] Iteration 11100, lr = 0.35\nI0821 15:20:04.843915 32262 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0821 15:21:26.513617 32262 solver.cpp:404]     Test net output #0: accuracy = 0.63412\nI0821 15:21:26.513870 32262 solver.cpp:404]     Test net output #1: loss = 1.82956 (* 1 = 1.82956 loss)\nI0821 15:21:27.842370 32262 solver.cpp:228] Iteration 11200, loss = 0.053875\nI0821 15:21:27.842408 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:21:27.842432 32262 solver.cpp:244]     Train net output #1: loss = 0.0538751 (* 1 = 0.0538751 loss)\nI0821 15:21:27.921793 32262 sgd_solver.cpp:166] Iteration 11200, lr = 0.35\nI0821 15:23:45.893811 32262 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0821 15:25:07.542510 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6444\nI0821 15:25:07.542745 32262 solver.cpp:404]     Test net output #1: loss = 1.67608 (* 1 = 1.67608 loss)\nI0821 15:25:08.869832 32262 solver.cpp:228] Iteration 11300, loss = 0.0394865\nI0821 15:25:08.869864 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:25:08.869879 32262 solver.cpp:244]     Train net output #1: loss = 0.0394866 (* 1 = 0.0394866 loss)\nI0821 15:25:08.939662 32262 sgd_solver.cpp:166] Iteration 11300, lr = 0.35\nI0821 15:27:26.954336 32262 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0821 15:28:48.599386 32262 solver.cpp:404]     Test net output #0: accuracy = 0.55352\nI0821 15:28:48.599586 32262 solver.cpp:404]     Test net output #1: loss = 2.37621 (* 1 = 2.37621 loss)\nI0821 15:28:49.925576 32262 solver.cpp:228] Iteration 11400, loss = 0.0420163\nI0821 15:28:49.925611 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:28:49.925626 32262 solver.cpp:244]     Train net output #1: loss = 0.0420163 (* 1 = 0.0420163 loss)\nI0821 15:28:49.999387 32262 sgd_solver.cpp:166] Iteration 11400, lr = 0.35\nI0821 15:31:08.006172 32262 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0821 15:32:29.651654 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64632\nI0821 15:32:29.651906 32262 solver.cpp:404]     Test net output #1: loss = 1.94916 (* 1 = 1.94916 loss)\nI0821 15:32:30.977843 32262 solver.cpp:228] Iteration 11500, loss = 0.069272\nI0821 15:32:30.977875 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:32:30.977891 32262 solver.cpp:244]     Train net output #1: loss = 0.0692721 (* 1 = 0.0692721 loss)\nI0821 15:32:31.053588 32262 sgd_solver.cpp:166] Iteration 11500, lr = 0.35\nI0821 15:34:48.995822 32262 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0821 15:36:10.643503 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68124\nI0821 15:36:10.643771 32262 solver.cpp:404]     Test net output #1: loss = 1.73526 (* 1 = 1.73526 loss)\nI0821 15:36:11.970541 32262 solver.cpp:228] Iteration 11600, loss = 0.0597053\nI0821 15:36:11.970576 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:36:11.970590 32262 solver.cpp:244]     Train net output #1: loss = 0.0597054 (* 1 = 0.0597054 loss)\nI0821 15:36:12.044867 32262 sgd_solver.cpp:166] Iteration 11600, lr = 0.35\nI0821 15:38:30.054401 32262 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0821 15:39:51.703203 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72884\nI0821 15:39:51.703477 32262 solver.cpp:404]     Test net output #1: loss = 1.31681 (* 1 = 1.31681 loss)\nI0821 15:39:53.030256 32262 solver.cpp:228] Iteration 11700, loss = 0.023927\nI0821 15:39:53.030292 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 15:39:53.030308 32262 solver.cpp:244]     Train net output #1: loss = 0.0239271 (* 1 = 0.0239271 loss)\nI0821 15:39:53.104892 32262 sgd_solver.cpp:166] Iteration 11700, lr = 0.35\nI0821 15:42:11.091998 32262 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0821 15:43:32.748157 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73928\nI0821 15:43:32.748425 32262 solver.cpp:404]     Test net output #1: loss = 1.29021 (* 1 = 1.29021 loss)\nI0821 15:43:34.075613 32262 solver.cpp:228] Iteration 11800, loss = 0.0493874\nI0821 15:43:34.075649 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 15:43:34.075664 32262 solver.cpp:244]     Train net output #1: loss = 0.0493874 (* 1 = 0.0493874 loss)\nI0821 15:43:34.150863 32262 sgd_solver.cpp:166] Iteration 11800, lr = 0.35\nI0821 15:45:52.131227 32262 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0821 15:47:13.777019 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73792\nI0821 15:47:13.777257 32262 solver.cpp:404]     Test net output #1: loss = 1.18868 (* 1 = 1.18868 loss)\nI0821 15:47:15.103880 32262 solver.cpp:228] Iteration 11900, loss = 0.0336812\nI0821 15:47:15.103919 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:47:15.103937 32262 solver.cpp:244]     Train net output #1: loss = 0.0336812 (* 1 = 0.0336812 loss)\nI0821 15:47:15.179667 32262 sgd_solver.cpp:166] Iteration 11900, lr = 0.35\nI0821 15:49:33.071279 32262 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0821 15:50:54.723512 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72768\nI0821 15:50:54.723726 32262 solver.cpp:404]     Test net output #1: loss = 1.14528 (* 1 = 1.14528 loss)\nI0821 15:50:56.050237 32262 solver.cpp:228] Iteration 12000, loss = 0.0261001\nI0821 15:50:56.050273 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:50:56.050289 32262 solver.cpp:244]     Train net output #1: loss = 0.0261001 (* 1 = 0.0261001 loss)\nI0821 15:50:56.128703 32262 sgd_solver.cpp:166] Iteration 12000, lr = 0.35\nI0821 15:53:14.181349 32262 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0821 15:54:35.832520 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68092\nI0821 15:54:35.832751 32262 solver.cpp:404]     Test net output #1: loss = 1.52648 (* 1 = 1.52648 loss)\nI0821 15:54:37.158772 32262 solver.cpp:228] Iteration 12100, loss = 0.0476641\nI0821 15:54:37.158808 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 15:54:37.158824 32262 solver.cpp:244]     Train net output #1: loss = 0.0476642 (* 1 = 0.0476642 loss)\nI0821 15:54:37.236223 32262 sgd_solver.cpp:166] Iteration 12100, lr = 0.35\nI0821 15:56:55.255399 32262 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0821 15:58:16.912488 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76552\nI0821 15:58:16.912721 32262 solver.cpp:404]     Test net output #1: loss = 0.860269 (* 1 = 0.860269 loss)\nI0821 15:58:18.239375 32262 solver.cpp:228] Iteration 12200, loss = 0.0403368\nI0821 15:58:18.239410 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 15:58:18.239425 32262 solver.cpp:244]     Train net output #1: loss = 0.0403368 (* 1 = 0.0403368 loss)\nI0821 15:58:18.312324 32262 sgd_solver.cpp:166] Iteration 12200, lr = 0.35\nI0821 16:00:36.333206 32262 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0821 16:01:57.988214 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76536\nI0821 16:01:57.988461 32262 solver.cpp:404]     Test net output #1: loss = 0.899394 (* 1 = 0.899394 loss)\nI0821 16:01:59.315030 32262 solver.cpp:228] Iteration 12300, loss = 0.0755064\nI0821 16:01:59.315066 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:01:59.315083 32262 solver.cpp:244]     Train net output #1: loss = 0.0755064 (* 1 = 0.0755064 loss)\nI0821 16:01:59.388738 32262 sgd_solver.cpp:166] Iteration 12300, lr = 0.35\nI0821 16:04:17.411540 32262 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0821 16:05:39.091734 32262 solver.cpp:404]     Test net output #0: accuracy = 0.69708\nI0821 16:05:39.091954 32262 solver.cpp:404]     Test net output #1: loss = 1.6488 (* 1 = 1.6488 loss)\nI0821 16:05:40.419692 32262 solver.cpp:228] Iteration 12400, loss = 0.0417437\nI0821 16:05:40.419731 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:05:40.419754 32262 solver.cpp:244]     Train net output #1: loss = 0.0417437 (* 1 = 0.0417437 loss)\nI0821 16:05:40.490907 32262 sgd_solver.cpp:166] Iteration 12400, lr = 0.35\nI0821 16:07:58.477187 32262 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0821 16:09:20.107527 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76604\nI0821 16:09:20.107744 32262 solver.cpp:404]     Test net output #1: loss = 0.992924 (* 1 = 0.992924 loss)\nI0821 16:09:21.435835 32262 solver.cpp:228] Iteration 12500, loss = 0.0843555\nI0821 16:09:21.435879 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 16:09:21.435904 32262 solver.cpp:244]     Train net output #1: loss = 0.0843555 (* 1 = 0.0843555 loss)\nI0821 16:09:21.507741 32262 sgd_solver.cpp:166] Iteration 12500, lr = 0.35\nI0821 16:11:39.464869 32262 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0821 16:13:01.089505 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74716\nI0821 16:13:01.089747 32262 solver.cpp:404]     Test net output #1: loss = 1.14251 (* 1 = 1.14251 loss)\nI0821 16:13:02.415772 32262 solver.cpp:228] Iteration 12600, loss = 0.0354699\nI0821 16:13:02.415812 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:13:02.415833 32262 solver.cpp:244]     Train net output #1: loss = 0.0354699 (* 1 = 0.0354699 loss)\nI0821 16:13:02.487915 32262 sgd_solver.cpp:166] Iteration 12600, lr = 0.35\nI0821 16:15:20.485705 32262 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0821 16:16:42.111486 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72168\nI0821 16:16:42.111702 32262 solver.cpp:404]     Test net output #1: loss = 1.29157 (* 1 = 1.29157 loss)\nI0821 16:16:43.438652 32262 solver.cpp:228] Iteration 12700, loss = 0.0375873\nI0821 16:16:43.438690 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:16:43.438714 32262 solver.cpp:244]     Train net output #1: loss = 0.0375873 (* 1 = 0.0375873 loss)\nI0821 16:16:43.519081 32262 sgd_solver.cpp:166] Iteration 12700, lr = 0.35\nI0821 16:19:01.495163 32262 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0821 16:20:23.125268 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64956\nI0821 16:20:23.125527 32262 solver.cpp:404]     Test net output #1: loss = 1.85594 (* 1 = 1.85594 loss)\nI0821 16:20:24.453099 32262 solver.cpp:228] Iteration 12800, loss = 0.0418822\nI0821 16:20:24.453141 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:20:24.453164 32262 solver.cpp:244]     Train net output #1: loss = 0.0418822 (* 1 = 0.0418822 loss)\nI0821 16:20:24.524242 32262 sgd_solver.cpp:166] Iteration 12800, lr = 0.35\nI0821 16:22:42.510280 32262 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0821 16:24:04.132550 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6932\nI0821 16:24:04.132805 32262 solver.cpp:404]     Test net output #1: loss = 1.46851 (* 1 = 1.46851 loss)\nI0821 16:24:05.459328 32262 solver.cpp:228] Iteration 12900, loss = 0.0110404\nI0821 16:24:05.459365 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:24:05.459380 32262 solver.cpp:244]     Train net output #1: loss = 0.0110404 (* 1 = 0.0110404 loss)\nI0821 16:24:05.538503 32262 sgd_solver.cpp:166] Iteration 12900, lr = 0.35\nI0821 16:26:23.637064 32262 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0821 16:27:45.258273 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76488\nI0821 16:27:45.258505 32262 solver.cpp:404]     Test net output #1: loss = 0.943099 (* 1 = 0.943099 loss)\nI0821 16:27:46.584738 32262 solver.cpp:228] Iteration 13000, loss = 0.0246976\nI0821 16:27:46.584774 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:27:46.584789 32262 solver.cpp:244]     Train net output #1: loss = 0.0246977 (* 1 = 0.0246977 loss)\nI0821 16:27:46.671562 32262 sgd_solver.cpp:166] Iteration 13000, lr = 0.35\nI0821 16:30:05.258682 32262 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0821 16:31:26.882464 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74304\nI0821 16:31:26.882680 32262 solver.cpp:404]     Test net output #1: loss = 1.07513 (* 1 = 1.07513 loss)\nI0821 16:31:28.208930 32262 solver.cpp:228] Iteration 13100, loss = 0.026044\nI0821 16:31:28.208966 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:31:28.208981 32262 solver.cpp:244]     Train net output #1: loss = 0.0260441 (* 1 = 0.0260441 loss)\nI0821 16:31:28.294975 32262 sgd_solver.cpp:166] Iteration 13100, lr = 0.35\nI0821 16:33:46.900074 32262 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0821 16:35:08.519165 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0821 16:35:08.519403 32262 solver.cpp:404]     Test net output #1: loss = 0.790395 (* 1 = 0.790395 loss)\nI0821 16:35:09.845433 32262 solver.cpp:228] Iteration 13200, loss = 0.00798273\nI0821 16:35:09.845469 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 16:35:09.845485 32262 solver.cpp:244]     Train net output #1: loss = 0.0079828 (* 1 = 0.0079828 loss)\nI0821 16:35:09.928756 32262 sgd_solver.cpp:166] Iteration 13200, lr = 0.35\nI0821 16:37:28.549458 32262 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0821 16:38:50.171535 32262 solver.cpp:404]     Test net output #0: accuracy = 0.694\nI0821 16:38:50.171768 32262 solver.cpp:404]     Test net output #1: loss = 1.41817 (* 1 = 1.41817 loss)\nI0821 16:38:51.498263 32262 solver.cpp:228] Iteration 13300, loss = 0.0581743\nI0821 16:38:51.498299 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 16:38:51.498314 32262 solver.cpp:244]     Train net output #1: loss = 0.0581743 (* 1 = 0.0581743 loss)\nI0821 16:38:51.583791 32262 sgd_solver.cpp:166] Iteration 13300, lr = 0.35\nI0821 16:41:09.910377 32262 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0821 16:42:31.551559 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76488\nI0821 16:42:31.551806 32262 solver.cpp:404]     Test net output #1: loss = 0.969329 (* 1 = 0.969329 loss)\nI0821 16:42:32.878680 32262 solver.cpp:228] Iteration 13400, loss = 0.144494\nI0821 16:42:32.878718 32262 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0821 16:42:32.878733 32262 solver.cpp:244]     Train net output #1: loss = 0.144495 (* 1 = 0.144495 loss)\nI0821 16:42:32.955122 32262 sgd_solver.cpp:166] Iteration 13400, lr = 0.35\nI0821 16:44:50.932739 32262 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0821 16:46:12.615892 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76808\nI0821 16:46:12.616179 32262 solver.cpp:404]     Test net output #1: loss = 0.925714 (* 1 = 0.925714 loss)\nI0821 16:46:13.942744 32262 solver.cpp:228] Iteration 13500, loss = 0.0209979\nI0821 16:46:13.942778 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 16:46:13.942793 32262 solver.cpp:244]     Train net output #1: loss = 0.020998 (* 1 = 0.020998 loss)\nI0821 16:46:14.021662 32262 sgd_solver.cpp:166] Iteration 13500, lr = 0.35\nI0821 16:48:32.120769 32262 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0821 16:49:53.791453 32262 solver.cpp:404]     Test net output #0: accuracy = 0.59208\nI0821 16:49:53.791700 32262 solver.cpp:404]     Test net output #1: loss = 2.4826 (* 1 = 2.4826 loss)\nI0821 16:49:55.117974 32262 solver.cpp:228] Iteration 13600, loss = 0.0568401\nI0821 16:49:55.118008 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:49:55.118023 32262 solver.cpp:244]     Train net output #1: loss = 0.0568401 (* 1 = 0.0568401 loss)\nI0821 16:49:55.198391 32262 sgd_solver.cpp:166] Iteration 13600, lr = 0.35\nI0821 16:52:13.271420 32262 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0821 16:53:34.939602 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80236\nI0821 16:53:34.939860 32262 solver.cpp:404]     Test net output #1: loss = 0.813236 (* 1 = 0.813236 loss)\nI0821 16:53:36.265980 32262 solver.cpp:228] Iteration 13700, loss = 0.0344241\nI0821 16:53:36.266016 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:53:36.266032 32262 solver.cpp:244]     Train net output #1: loss = 0.0344242 (* 1 = 0.0344242 loss)\nI0821 16:53:36.342314 32262 sgd_solver.cpp:166] Iteration 13700, lr = 0.35\nI0821 16:55:54.340550 32262 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0821 16:57:15.997324 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77788\nI0821 16:57:15.997522 32262 solver.cpp:404]     Test net output #1: loss = 0.943609 (* 1 = 0.943609 loss)\nI0821 16:57:17.324973 32262 solver.cpp:228] Iteration 13800, loss = 0.0462184\nI0821 16:57:17.325009 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 16:57:17.325026 32262 solver.cpp:244]     Train net output #1: loss = 0.0462184 (* 1 = 0.0462184 loss)\nI0821 16:57:17.401001 32262 sgd_solver.cpp:166] Iteration 13800, lr = 0.35\nI0821 16:59:35.352972 32262 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0821 17:00:56.998615 32262 solver.cpp:404]     Test net output #0: accuracy = 0.67492\nI0821 17:00:56.998860 32262 solver.cpp:404]     Test net output #1: loss = 1.64707 (* 1 = 1.64707 loss)\nI0821 17:00:58.325861 32262 solver.cpp:228] Iteration 13900, loss = 0.034518\nI0821 17:00:58.325897 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:00:58.325917 32262 solver.cpp:244]     Train net output #1: loss = 0.034518 (* 1 = 0.034518 loss)\nI0821 17:00:58.409406 32262 sgd_solver.cpp:166] Iteration 13900, lr = 0.35\nI0821 17:03:16.417537 32262 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0821 17:04:38.063151 32262 solver.cpp:404]     Test net output #0: accuracy = 0.65932\nI0821 17:04:38.063410 32262 solver.cpp:404]     Test net output #1: loss = 1.96596 (* 1 = 1.96596 loss)\nI0821 17:04:39.391214 32262 solver.cpp:228] Iteration 14000, loss = 0.0696928\nI0821 17:04:39.391250 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:04:39.391266 32262 solver.cpp:244]     Train net output #1: loss = 0.0696928 (* 1 = 0.0696928 loss)\nI0821 17:04:39.462515 32262 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0821 17:06:57.439019 32262 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0821 17:08:19.097164 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68136\nI0821 17:08:19.097416 32262 solver.cpp:404]     Test net output #1: loss = 1.76031 (* 1 = 1.76031 loss)\nI0821 17:08:20.424877 32262 solver.cpp:228] Iteration 14100, loss = 0.0172554\nI0821 17:08:20.424919 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:08:20.424937 32262 solver.cpp:244]     Train net output #1: loss = 0.0172555 (* 1 = 0.0172555 loss)\nI0821 17:08:20.504256 32262 sgd_solver.cpp:166] Iteration 14100, lr = 0.35\nI0821 17:10:38.483148 32262 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0821 17:12:00.139899 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77056\nI0821 17:12:00.140173 32262 solver.cpp:404]     Test net output #1: loss = 1.0841 (* 1 = 1.0841 loss)\nI0821 17:12:01.467625 32262 solver.cpp:228] Iteration 14200, loss = 0.0528554\nI0821 17:12:01.467661 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:12:01.467676 32262 solver.cpp:244]     Train net output #1: loss = 0.0528555 (* 1 = 0.0528555 loss)\nI0821 17:12:01.545676 32262 sgd_solver.cpp:166] Iteration 14200, lr = 0.35\nI0821 17:14:19.560716 32262 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0821 17:15:41.210947 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72288\nI0821 17:15:41.211201 32262 solver.cpp:404]     Test net output #1: loss = 1.25981 (* 1 = 1.25981 loss)\nI0821 17:15:42.538266 32262 solver.cpp:228] Iteration 14300, loss = 0.0466779\nI0821 17:15:42.538301 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:15:42.538316 32262 solver.cpp:244]     Train net output #1: loss = 0.046678 (* 1 = 0.046678 loss)\nI0821 17:15:42.615288 32262 sgd_solver.cpp:166] Iteration 14300, lr = 0.35\nI0821 17:18:00.575356 32262 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0821 17:19:22.228835 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76888\nI0821 17:19:22.229056 32262 solver.cpp:404]     Test net output #1: loss = 1.04077 (* 1 = 1.04077 loss)\nI0821 17:19:23.555640 32262 solver.cpp:228] Iteration 14400, loss = 0.0808177\nI0821 17:19:23.555675 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:19:23.555691 32262 solver.cpp:244]     Train net output #1: loss = 0.0808178 (* 1 = 0.0808178 loss)\nI0821 17:19:23.628191 32262 sgd_solver.cpp:166] Iteration 14400, lr = 0.35\nI0821 17:21:41.614825 32262 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0821 17:23:03.266201 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76096\nI0821 17:23:03.266418 32262 solver.cpp:404]     Test net output #1: loss = 1.07554 (* 1 = 1.07554 loss)\nI0821 17:23:04.594102 32262 solver.cpp:228] Iteration 14500, loss = 0.0211499\nI0821 17:23:04.594138 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:23:04.594154 32262 solver.cpp:244]     Train net output #1: loss = 0.02115 (* 1 = 0.02115 loss)\nI0821 17:23:04.669538 32262 sgd_solver.cpp:166] Iteration 14500, lr = 0.35\nI0821 17:25:22.635053 32262 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0821 17:26:44.299837 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71156\nI0821 17:26:44.300103 32262 solver.cpp:404]     Test net output #1: loss = 1.39215 (* 1 = 1.39215 loss)\nI0821 17:26:45.626137 32262 solver.cpp:228] Iteration 14600, loss = 0.0127568\nI0821 17:26:45.626173 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:26:45.626188 32262 solver.cpp:244]     Train net output #1: loss = 0.0127569 (* 1 = 0.0127569 loss)\nI0821 17:26:45.706455 32262 sgd_solver.cpp:166] Iteration 14600, lr = 0.35\nI0821 17:29:03.711788 32262 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0821 17:30:25.364650 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68436\nI0821 17:30:25.364904 32262 solver.cpp:404]     Test net output #1: loss = 1.66105 (* 1 = 1.66105 loss)\nI0821 17:30:26.691589 32262 solver.cpp:228] Iteration 14700, loss = 0.0518771\nI0821 17:30:26.691625 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:30:26.691642 32262 solver.cpp:244]     Train net output #1: loss = 0.0518772 (* 1 = 0.0518772 loss)\nI0821 17:30:26.772061 32262 sgd_solver.cpp:166] Iteration 14700, lr = 0.35\nI0821 17:32:44.815917 32262 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0821 17:34:06.470553 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64484\nI0821 17:34:06.470783 32262 solver.cpp:404]     Test net output #1: loss = 1.87512 (* 1 = 1.87512 loss)\nI0821 17:34:07.797960 32262 solver.cpp:228] Iteration 14800, loss = 0.0160892\nI0821 17:34:07.797997 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:34:07.798013 32262 solver.cpp:244]     Train net output #1: loss = 0.0160894 (* 1 = 0.0160894 loss)\nI0821 17:34:07.878059 32262 sgd_solver.cpp:166] Iteration 14800, lr = 0.35\nI0821 17:36:25.834738 32262 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0821 17:37:47.484787 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7508\nI0821 17:37:47.485028 32262 solver.cpp:404]     Test net output #1: loss = 1.02494 (* 1 = 1.02494 loss)\nI0821 17:37:48.812037 32262 solver.cpp:228] Iteration 14900, loss = 0.035565\nI0821 17:37:48.812073 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:37:48.812089 32262 solver.cpp:244]     Train net output #1: loss = 0.0355652 (* 1 = 0.0355652 loss)\nI0821 17:37:48.890084 32262 sgd_solver.cpp:166] Iteration 14900, lr = 0.35\nI0821 17:40:06.893822 32262 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0821 17:41:28.543524 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7472\nI0821 17:41:28.543725 32262 solver.cpp:404]     Test net output #1: loss = 1.05019 (* 1 = 1.05019 loss)\nI0821 17:41:29.870609 32262 solver.cpp:228] Iteration 15000, loss = 0.0338462\nI0821 17:41:29.870645 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:41:29.870661 32262 solver.cpp:244]     Train net output #1: loss = 0.0338463 (* 1 = 0.0338463 loss)\nI0821 17:41:29.949120 32262 sgd_solver.cpp:166] Iteration 15000, lr = 0.35\nI0821 17:43:47.976099 32262 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0821 17:45:09.639107 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7826\nI0821 17:45:09.639339 32262 solver.cpp:404]     Test net output #1: loss = 0.87845 (* 1 = 0.87845 loss)\nI0821 17:45:10.965842 32262 solver.cpp:228] Iteration 15100, loss = 0.0557825\nI0821 17:45:10.965878 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 17:45:10.965893 32262 solver.cpp:244]     Train net output #1: loss = 0.0557826 (* 1 = 0.0557826 loss)\nI0821 17:45:11.042619 32262 sgd_solver.cpp:166] Iteration 15100, lr = 0.35\nI0821 17:47:29.080559 32262 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0821 17:48:50.749655 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76308\nI0821 17:48:50.749917 32262 solver.cpp:404]     Test net output #1: loss = 1.03298 (* 1 = 1.03298 loss)\nI0821 17:48:52.077805 32262 solver.cpp:228] Iteration 15200, loss = 0.00842723\nI0821 17:48:52.077841 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 17:48:52.077857 32262 solver.cpp:244]     Train net output #1: loss = 0.00842733 (* 1 = 0.00842733 loss)\nI0821 17:48:52.149405 32262 sgd_solver.cpp:166] Iteration 15200, lr = 0.35\nI0821 17:51:10.155585 32262 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0821 17:52:31.810797 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6818\nI0821 17:52:31.811053 32262 solver.cpp:404]     Test net output #1: loss = 1.61522 (* 1 = 1.61522 loss)\nI0821 17:52:33.137854 32262 solver.cpp:228] Iteration 15300, loss = 0.0758679\nI0821 17:52:33.137890 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:52:33.137905 32262 solver.cpp:244]     Train net output #1: loss = 0.0758679 (* 1 = 0.0758679 loss)\nI0821 17:52:33.215016 32262 sgd_solver.cpp:166] Iteration 15300, lr = 0.35\nI0821 17:54:51.294414 32262 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0821 17:56:12.945382 32262 solver.cpp:404]     Test net output #0: accuracy = 0.67744\nI0821 17:56:12.945617 32262 solver.cpp:404]     Test net output #1: loss = 1.94976 (* 1 = 1.94976 loss)\nI0821 17:56:14.272145 32262 solver.cpp:228] Iteration 15400, loss = 0.0362628\nI0821 17:56:14.272179 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 17:56:14.272194 32262 solver.cpp:244]     Train net output #1: loss = 0.0362629 (* 1 = 0.0362629 loss)\nI0821 17:56:14.347245 32262 sgd_solver.cpp:166] Iteration 15400, lr = 0.35\nI0821 17:58:32.387186 32262 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0821 17:59:54.041997 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75404\nI0821 17:59:54.042287 32262 solver.cpp:404]     Test net output #1: loss = 1.07682 (* 1 = 1.07682 loss)\nI0821 17:59:55.368686 32262 solver.cpp:228] Iteration 15500, loss = 0.0269877\nI0821 17:59:55.368721 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 17:59:55.368737 32262 solver.cpp:244]     Train net output #1: loss = 0.0269878 (* 1 = 0.0269878 loss)\nI0821 17:59:55.443301 32262 sgd_solver.cpp:166] Iteration 15500, lr = 0.35\nI0821 18:02:13.501183 32262 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0821 18:03:35.151849 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73592\nI0821 18:03:35.152119 32262 solver.cpp:404]     Test net output #1: loss = 1.21422 (* 1 = 1.21422 loss)\nI0821 18:03:36.478809 32262 solver.cpp:228] Iteration 15600, loss = 0.0269198\nI0821 18:03:36.478845 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:03:36.478860 32262 solver.cpp:244]     Train net output #1: loss = 0.0269199 (* 1 = 0.0269199 loss)\nI0821 18:03:36.553779 32262 sgd_solver.cpp:166] Iteration 15600, lr = 0.35\nI0821 18:05:54.613200 32262 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0821 18:07:16.263860 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8102\nI0821 18:07:16.264128 32262 solver.cpp:404]     Test net output #1: loss = 0.83366 (* 1 = 0.83366 loss)\nI0821 18:07:17.591116 32262 solver.cpp:228] Iteration 15700, loss = 0.0138659\nI0821 18:07:17.591153 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:07:17.591169 32262 solver.cpp:244]     Train net output #1: loss = 0.013866 (* 1 = 0.013866 loss)\nI0821 18:07:17.664793 32262 sgd_solver.cpp:166] Iteration 15700, lr = 0.35\nI0821 18:09:35.754523 32262 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0821 18:10:57.402794 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71156\nI0821 18:10:57.403064 32262 solver.cpp:404]     Test net output #1: loss = 1.38939 (* 1 = 1.38939 loss)\nI0821 18:10:58.729195 32262 solver.cpp:228] Iteration 15800, loss = 0.0201131\nI0821 18:10:58.729229 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:10:58.729244 32262 solver.cpp:244]     Train net output #1: loss = 0.0201132 (* 1 = 0.0201132 loss)\nI0821 18:10:58.810106 32262 sgd_solver.cpp:166] Iteration 15800, lr = 0.35\nI0821 18:13:16.851315 32262 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0821 18:14:38.515918 32262 solver.cpp:404]     Test net output #0: accuracy = 0.65036\nI0821 18:14:38.516189 32262 solver.cpp:404]     Test net output #1: loss = 1.63111 (* 1 = 1.63111 loss)\nI0821 18:14:39.843626 32262 solver.cpp:228] Iteration 15900, loss = 0.120954\nI0821 18:14:39.843659 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 18:14:39.843674 32262 solver.cpp:244]     Train net output #1: loss = 0.120954 (* 1 = 0.120954 loss)\nI0821 18:14:39.915194 32262 sgd_solver.cpp:166] Iteration 15900, lr = 0.35\nI0821 18:16:57.933761 32262 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0821 18:18:19.585283 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79428\nI0821 18:18:19.585556 32262 solver.cpp:404]     Test net output #1: loss = 0.850679 (* 1 = 0.850679 loss)\nI0821 18:18:20.912199 32262 solver.cpp:228] Iteration 16000, loss = 0.0263308\nI0821 18:18:20.912236 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:18:20.912251 32262 solver.cpp:244]     Train net output #1: loss = 0.0263309 (* 1 = 0.0263309 loss)\nI0821 18:18:20.989181 32262 sgd_solver.cpp:166] Iteration 16000, lr = 0.35\nI0821 18:20:39.020504 32262 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0821 18:22:00.668138 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7342\nI0821 18:22:00.668422 32262 solver.cpp:404]     Test net output #1: loss = 1.16706 (* 1 = 1.16706 loss)\nI0821 18:22:01.995904 32262 solver.cpp:228] Iteration 16100, loss = 0.0652976\nI0821 18:22:01.995941 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 18:22:01.995956 32262 solver.cpp:244]     Train net output #1: loss = 0.0652977 (* 1 = 0.0652977 loss)\nI0821 18:22:02.072542 32262 sgd_solver.cpp:166] Iteration 16100, lr = 0.35\nI0821 18:24:20.156944 32262 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0821 18:25:41.749847 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72896\nI0821 18:25:41.750110 32262 solver.cpp:404]     Test net output #1: loss = 1.21763 (* 1 = 1.21763 loss)\nI0821 18:25:43.076023 32262 solver.cpp:228] Iteration 16200, loss = 0.0447667\nI0821 18:25:43.076056 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:25:43.076071 32262 solver.cpp:244]     Train net output #1: loss = 0.0447668 (* 1 = 0.0447668 loss)\nI0821 18:25:43.155167 32262 sgd_solver.cpp:166] Iteration 16200, lr = 0.35\nI0821 18:28:01.278414 32262 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0821 18:29:22.875739 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70796\nI0821 18:29:22.875967 32262 solver.cpp:404]     Test net output #1: loss = 1.50725 (* 1 = 1.50725 loss)\nI0821 18:29:24.202263 32262 solver.cpp:228] Iteration 16300, loss = 0.0467181\nI0821 18:29:24.202296 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:29:24.202311 32262 solver.cpp:244]     Train net output #1: loss = 0.0467182 (* 1 = 0.0467182 loss)\nI0821 18:29:24.277742 32262 sgd_solver.cpp:166] Iteration 16300, lr = 0.35\nI0821 18:31:42.349793 32262 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0821 18:33:03.942574 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77368\nI0821 18:33:03.942834 32262 solver.cpp:404]     Test net output #1: loss = 0.924001 (* 1 = 0.924001 loss)\nI0821 18:33:05.268878 32262 solver.cpp:228] Iteration 16400, loss = 0.0564877\nI0821 18:33:05.268910 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:33:05.268925 32262 solver.cpp:244]     Train net output #1: loss = 0.0564878 (* 1 = 0.0564878 loss)\nI0821 18:33:05.346091 32262 sgd_solver.cpp:166] Iteration 16400, lr = 0.35\nI0821 18:35:23.569628 32262 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0821 18:36:45.158460 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76612\nI0821 18:36:45.158735 32262 solver.cpp:404]     Test net output #1: loss = 0.986816 (* 1 = 0.986816 loss)\nI0821 18:36:46.488844 32262 solver.cpp:228] Iteration 16500, loss = 0.044721\nI0821 18:36:46.488878 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:36:46.488893 32262 solver.cpp:244]     Train net output #1: loss = 0.0447211 (* 1 = 0.0447211 loss)\nI0821 18:36:46.570606 32262 sgd_solver.cpp:166] Iteration 16500, lr = 0.35\nI0821 18:39:05.251438 32262 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0821 18:40:26.846035 32262 solver.cpp:404]     Test net output #0: accuracy = 0.62304\nI0821 18:40:26.846297 32262 solver.cpp:404]     Test net output #1: loss = 2.10565 (* 1 = 2.10565 loss)\nI0821 18:40:28.176836 32262 solver.cpp:228] Iteration 16600, loss = 0.0218788\nI0821 18:40:28.176870 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:40:28.176887 32262 solver.cpp:244]     Train net output #1: loss = 0.0218789 (* 1 = 0.0218789 loss)\nI0821 18:40:28.260681 32262 sgd_solver.cpp:166] Iteration 16600, lr = 0.35\nI0821 18:42:46.843618 32262 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0821 18:44:08.449558 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71564\nI0821 18:44:08.449820 32262 solver.cpp:404]     Test net output #1: loss = 1.36433 (* 1 = 1.36433 loss)\nI0821 18:44:09.780095 32262 solver.cpp:228] Iteration 16700, loss = 0.0397105\nI0821 18:44:09.780130 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:44:09.780145 32262 solver.cpp:244]     Train net output #1: loss = 0.0397106 (* 1 = 0.0397106 loss)\nI0821 18:44:09.864962 32262 sgd_solver.cpp:166] Iteration 16700, lr = 0.35\nI0821 18:46:28.548516 32262 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0821 18:47:50.137373 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79568\nI0821 18:47:50.137646 32262 solver.cpp:404]     Test net output #1: loss = 0.85526 (* 1 = 0.85526 loss)\nI0821 18:47:51.468529 32262 solver.cpp:228] Iteration 16800, loss = 0.0120399\nI0821 18:47:51.468564 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:47:51.468578 32262 solver.cpp:244]     Train net output #1: loss = 0.01204 (* 1 = 0.01204 loss)\nI0821 18:47:51.542809 32262 sgd_solver.cpp:166] Iteration 16800, lr = 0.35\nI0821 18:50:10.265504 32262 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0821 18:51:31.865068 32262 solver.cpp:404]     Test net output #0: accuracy = 0.744\nI0821 18:51:31.865335 32262 solver.cpp:404]     Test net output #1: loss = 1.38413 (* 1 = 1.38413 loss)\nI0821 18:51:33.195436 32262 solver.cpp:228] Iteration 16900, loss = 0.0449261\nI0821 18:51:33.195472 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 18:51:33.195487 32262 solver.cpp:244]     Train net output #1: loss = 0.0449261 (* 1 = 0.0449261 loss)\nI0821 18:51:33.272321 32262 sgd_solver.cpp:166] Iteration 16900, lr = 0.35\nI0821 18:53:52.070158 32262 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0821 18:55:13.672540 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72212\nI0821 18:55:13.672802 32262 solver.cpp:404]     Test net output #1: loss = 1.62145 (* 1 = 1.62145 loss)\nI0821 18:55:15.003070 32262 solver.cpp:228] Iteration 17000, loss = 0.0208324\nI0821 18:55:15.003103 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 18:55:15.003119 32262 solver.cpp:244]     Train net output #1: loss = 0.0208325 (* 1 = 0.0208325 loss)\nI0821 18:55:15.082433 32262 sgd_solver.cpp:166] Iteration 17000, lr = 0.35\nI0821 18:57:33.784212 32262 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0821 18:58:55.396430 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76636\nI0821 18:58:55.396670 32262 solver.cpp:404]     Test net output #1: loss = 1.25097 (* 1 = 1.25097 loss)\nI0821 18:58:56.727371 32262 solver.cpp:228] Iteration 17100, loss = 0.0173918\nI0821 18:58:56.727403 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 18:58:56.727418 32262 solver.cpp:244]     Train net output #1: loss = 0.0173919 (* 1 = 0.0173919 loss)\nI0821 18:58:56.798689 32262 sgd_solver.cpp:166] Iteration 17100, lr = 0.35\nI0821 19:01:15.383194 32262 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0821 19:02:37.044746 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6878\nI0821 19:02:37.045089 32262 solver.cpp:404]     Test net output #1: loss = 1.92239 (* 1 = 1.92239 loss)\nI0821 19:02:38.374857 32262 solver.cpp:228] Iteration 17200, loss = 0.0992016\nI0821 19:02:38.374889 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:02:38.374903 32262 solver.cpp:244]     Train net output #1: loss = 0.0992017 (* 1 = 0.0992017 loss)\nI0821 19:02:38.456197 32262 sgd_solver.cpp:166] Iteration 17200, lr = 0.35\nI0821 19:04:57.231278 32262 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0821 19:06:18.891299 32262 solver.cpp:404]     Test net output #0: accuracy = 0.65932\nI0821 19:06:18.891544 32262 solver.cpp:404]     Test net output #1: loss = 2.05331 (* 1 = 2.05331 loss)\nI0821 19:06:20.222281 32262 solver.cpp:228] Iteration 17300, loss = 0.119243\nI0821 19:06:20.222316 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 19:06:20.222331 32262 solver.cpp:244]     Train net output #1: loss = 0.119243 (* 1 = 0.119243 loss)\nI0821 19:06:20.297564 32262 sgd_solver.cpp:166] Iteration 17300, lr = 0.35\nI0821 19:08:39.075549 32262 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0821 19:10:00.732122 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72332\nI0821 19:10:00.732370 32262 solver.cpp:404]     Test net output #1: loss = 1.39043 (* 1 = 1.39043 loss)\nI0821 19:10:02.062079 32262 solver.cpp:228] Iteration 17400, loss = 0.0345557\nI0821 19:10:02.062113 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:10:02.062127 32262 solver.cpp:244]     Train net output #1: loss = 0.0345558 (* 1 = 0.0345558 loss)\nI0821 19:10:02.142320 32262 sgd_solver.cpp:166] Iteration 17400, lr = 0.35\nI0821 19:12:20.736919 32262 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0821 19:13:42.391623 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64732\nI0821 19:13:42.391894 32262 solver.cpp:404]     Test net output #1: loss = 2.22072 (* 1 = 2.22072 loss)\nI0821 19:13:43.722147 32262 solver.cpp:228] Iteration 17500, loss = 0.0469085\nI0821 19:13:43.722182 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:13:43.722196 32262 solver.cpp:244]     Train net output #1: loss = 0.0469086 (* 1 = 0.0469086 loss)\nI0821 19:13:43.795928 32262 sgd_solver.cpp:166] Iteration 17500, lr = 0.35\nI0821 19:16:02.437701 32262 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0821 19:17:24.094656 32262 solver.cpp:404]     Test net output #0: accuracy = 0.69092\nI0821 19:17:24.094924 32262 solver.cpp:404]     Test net output #1: loss = 1.70268 (* 1 = 1.70268 loss)\nI0821 19:17:25.425822 32262 solver.cpp:228] Iteration 17600, loss = 0.0298642\nI0821 19:17:25.425855 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:17:25.425870 32262 solver.cpp:244]     Train net output #1: loss = 0.0298642 (* 1 = 0.0298642 loss)\nI0821 19:17:25.505762 32262 sgd_solver.cpp:166] Iteration 17600, lr = 0.35\nI0821 19:19:44.279918 32262 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0821 19:21:05.946020 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78868\nI0821 19:21:05.946262 32262 solver.cpp:404]     Test net output #1: loss = 0.925318 (* 1 = 0.925318 loss)\nI0821 19:21:07.276697 32262 solver.cpp:228] Iteration 17700, loss = 0.0188237\nI0821 19:21:07.276731 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 19:21:07.276746 32262 solver.cpp:244]     Train net output #1: loss = 0.0188238 (* 1 = 0.0188238 loss)\nI0821 19:21:07.358232 32262 sgd_solver.cpp:166] Iteration 17700, lr = 0.35\nI0821 19:23:26.127775 32262 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0821 19:24:47.795945 32262 solver.cpp:404]     Test net output #0: accuracy = 0.60508\nI0821 19:24:47.796231 32262 solver.cpp:404]     Test net output #1: loss = 2.0996 (* 1 = 2.0996 loss)\nI0821 19:24:49.126457 32262 solver.cpp:228] Iteration 17800, loss = 0.0119658\nI0821 19:24:49.126489 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:24:49.126503 32262 solver.cpp:244]     Train net output #1: loss = 0.0119658 (* 1 = 0.0119658 loss)\nI0821 19:24:49.206871 32262 sgd_solver.cpp:166] Iteration 17800, lr = 0.35\nI0821 19:27:07.961639 32262 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0821 19:28:29.634910 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73704\nI0821 19:28:29.635191 32262 solver.cpp:404]     Test net output #1: loss = 1.28586 (* 1 = 1.28586 loss)\nI0821 19:28:30.965657 32262 solver.cpp:228] Iteration 17900, loss = 0.0479986\nI0821 19:28:30.965692 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:28:30.965706 32262 solver.cpp:244]     Train net output #1: loss = 0.0479986 (* 1 = 0.0479986 loss)\nI0821 19:28:31.040908 32262 sgd_solver.cpp:166] Iteration 17900, lr = 0.35\nI0821 19:30:49.594063 32262 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0821 19:32:11.269742 32262 solver.cpp:404]     Test net output #0: accuracy = 0.629\nI0821 19:32:11.269999 32262 solver.cpp:404]     Test net output #1: loss = 2.10504 (* 1 = 2.10504 loss)\nI0821 19:32:12.599680 32262 solver.cpp:228] Iteration 18000, loss = 0.0404837\nI0821 19:32:12.599714 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:32:12.599730 32262 solver.cpp:244]     Train net output #1: loss = 0.0404837 (* 1 = 0.0404837 loss)\nI0821 19:32:12.677939 32262 sgd_solver.cpp:166] Iteration 18000, lr = 0.35\nI0821 19:34:31.108173 32262 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0821 19:35:52.777410 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76732\nI0821 19:35:52.777668 32262 solver.cpp:404]     Test net output #1: loss = 1.10085 (* 1 = 1.10085 loss)\nI0821 19:35:54.108264 32262 solver.cpp:228] Iteration 18100, loss = 0.0469333\nI0821 19:35:54.108297 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 19:35:54.108312 32262 solver.cpp:244]     Train net output #1: loss = 0.0469334 (* 1 = 0.0469334 loss)\nI0821 19:35:54.186923 32262 sgd_solver.cpp:166] Iteration 18100, lr = 0.35\nI0821 19:38:12.775497 32262 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0821 19:39:34.440142 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75376\nI0821 19:39:34.440433 32262 solver.cpp:404]     Test net output #1: loss = 1.07126 (* 1 = 1.07126 loss)\nI0821 19:39:35.770434 32262 solver.cpp:228] Iteration 18200, loss = 0.0426073\nI0821 19:39:35.770467 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 19:39:35.770483 32262 solver.cpp:244]     Train net output #1: loss = 0.0426074 (* 1 = 0.0426074 loss)\nI0821 19:39:35.853605 32262 sgd_solver.cpp:166] Iteration 18200, lr = 0.35\nI0821 19:41:54.566150 32262 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0821 19:43:16.233927 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72236\nI0821 19:43:16.234213 32262 solver.cpp:404]     Test net output #1: loss = 1.31043 (* 1 = 1.31043 loss)\nI0821 19:43:17.564496 32262 solver.cpp:228] Iteration 18300, loss = 0.052324\nI0821 19:43:17.564533 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 19:43:17.564556 32262 solver.cpp:244]     Train net output #1: loss = 0.0523241 (* 1 = 0.0523241 loss)\nI0821 19:43:17.644799 32262 sgd_solver.cpp:166] Iteration 18300, lr = 0.35\nI0821 19:45:36.127220 32262 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0821 19:46:57.792225 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73432\nI0821 19:46:57.792510 32262 solver.cpp:404]     Test net output #1: loss = 1.36537 (* 1 = 1.36537 loss)\nI0821 19:46:59.124086 32262 solver.cpp:228] Iteration 18400, loss = 0.00242323\nI0821 19:46:59.124121 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:46:59.124145 32262 solver.cpp:244]     Train net output #1: loss = 0.00242333 (* 1 = 0.00242333 loss)\nI0821 19:46:59.205090 32262 sgd_solver.cpp:166] Iteration 18400, lr = 0.35\nI0821 19:49:17.719568 32262 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0821 19:50:39.384953 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70824\nI0821 19:50:39.385206 32262 solver.cpp:404]     Test net output #1: loss = 1.45825 (* 1 = 1.45825 loss)\nI0821 19:50:40.716594 32262 solver.cpp:228] Iteration 18500, loss = 0.015114\nI0821 19:50:40.716632 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 19:50:40.716653 32262 solver.cpp:244]     Train net output #1: loss = 0.0151141 (* 1 = 0.0151141 loss)\nI0821 19:50:40.798347 32262 sgd_solver.cpp:166] Iteration 18500, lr = 0.35\nI0821 19:52:59.390570 32262 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0821 19:54:21.066453 32262 solver.cpp:404]     Test net output #0: accuracy = 0.782\nI0821 19:54:21.066732 32262 solver.cpp:404]     Test net output #1: loss = 0.892993 (* 1 = 0.892993 loss)\nI0821 19:54:22.397918 32262 solver.cpp:228] Iteration 18600, loss = 0.0623595\nI0821 19:54:22.397953 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 19:54:22.397971 32262 solver.cpp:244]     Train net output #1: loss = 0.0623596 (* 1 = 0.0623596 loss)\nI0821 19:54:22.475064 32262 sgd_solver.cpp:166] Iteration 18600, lr = 0.35\nI0821 19:56:41.246139 32262 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0821 19:58:02.920733 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75856\nI0821 19:58:02.921010 32262 solver.cpp:404]     Test net output #1: loss = 1.21881 (* 1 = 1.21881 loss)\nI0821 19:58:04.251380 32262 solver.cpp:228] Iteration 18700, loss = 0.0697345\nI0821 19:58:04.251416 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0821 19:58:04.251430 32262 solver.cpp:244]     Train net output #1: loss = 0.0697345 (* 1 = 0.0697345 loss)\nI0821 19:58:04.331022 32262 sgd_solver.cpp:166] Iteration 18700, lr = 0.35\nI0821 20:00:22.895097 32262 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0821 20:01:44.569277 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74212\nI0821 20:01:44.569556 32262 solver.cpp:404]     Test net output #1: loss = 1.1302 (* 1 = 1.1302 loss)\nI0821 20:01:45.900547 32262 solver.cpp:228] Iteration 18800, loss = 0.0957819\nI0821 20:01:45.900583 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 20:01:45.900598 32262 solver.cpp:244]     Train net output #1: loss = 0.095782 (* 1 = 0.095782 loss)\nI0821 20:01:45.972090 32262 sgd_solver.cpp:166] Iteration 18800, lr = 0.35\nI0821 20:04:04.536043 32262 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0821 20:05:26.206979 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73984\nI0821 20:05:26.207233 32262 solver.cpp:404]     Test net output #1: loss = 1.36061 (* 1 = 1.36061 loss)\nI0821 20:05:27.537680 32262 solver.cpp:228] Iteration 18900, loss = 0.0137423\nI0821 20:05:27.537714 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:05:27.537729 32262 solver.cpp:244]     Train net output #1: loss = 0.0137423 (* 1 = 0.0137423 loss)\nI0821 20:05:27.619885 32262 sgd_solver.cpp:166] Iteration 18900, lr = 0.35\nI0821 20:07:46.571426 32262 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0821 20:09:08.238059 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64952\nI0821 20:09:08.238332 32262 solver.cpp:404]     Test net output #1: loss = 1.8118 (* 1 = 1.8118 loss)\nI0821 20:09:09.569463 32262 solver.cpp:228] Iteration 19000, loss = 0.0138867\nI0821 20:09:09.569496 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:09:09.569512 32262 solver.cpp:244]     Train net output #1: loss = 0.0138867 (* 1 = 0.0138867 loss)\nI0821 20:09:09.648483 32262 sgd_solver.cpp:166] Iteration 19000, lr = 0.35\nI0821 20:11:28.762359 32262 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0821 20:12:50.436614 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75168\nI0821 20:12:50.436900 32262 solver.cpp:404]     Test net output #1: loss = 1.22873 (* 1 = 1.22873 loss)\nI0821 20:12:51.767042 32262 solver.cpp:228] Iteration 19100, loss = 0.028984\nI0821 20:12:51.767078 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:12:51.767093 32262 solver.cpp:244]     Train net output #1: loss = 0.0289841 (* 1 = 0.0289841 loss)\nI0821 20:12:51.851466 32262 sgd_solver.cpp:166] Iteration 19100, lr = 0.35\nI0821 20:15:10.975168 32262 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0821 20:16:32.626360 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78848\nI0821 20:16:32.626633 32262 solver.cpp:404]     Test net output #1: loss = 0.973177 (* 1 = 0.973177 loss)\nI0821 20:16:33.956900 32262 solver.cpp:228] Iteration 19200, loss = 0.032907\nI0821 20:16:33.956934 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:16:33.956949 32262 solver.cpp:244]     Train net output #1: loss = 0.032907 (* 1 = 0.032907 loss)\nI0821 20:16:34.041780 32262 sgd_solver.cpp:166] Iteration 19200, lr = 0.35\nI0821 20:18:53.198055 32262 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0821 20:20:14.855614 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75264\nI0821 20:20:14.855890 32262 solver.cpp:404]     Test net output #1: loss = 1.11705 (* 1 = 1.11705 loss)\nI0821 20:20:16.186024 32262 solver.cpp:228] Iteration 19300, loss = 0.0992398\nI0821 20:20:16.186058 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 20:20:16.186074 32262 solver.cpp:244]     Train net output #1: loss = 0.0992398 (* 1 = 0.0992398 loss)\nI0821 20:20:16.273042 32262 sgd_solver.cpp:166] Iteration 19300, lr = 0.35\nI0821 20:22:35.420878 32262 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0821 20:23:57.082448 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64212\nI0821 20:23:57.082721 32262 solver.cpp:404]     Test net output #1: loss = 1.93206 (* 1 = 1.93206 loss)\nI0821 20:23:58.413166 32262 solver.cpp:228] Iteration 19400, loss = 0.0324475\nI0821 20:23:58.413198 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:23:58.413213 32262 solver.cpp:244]     Train net output #1: loss = 0.0324475 (* 1 = 0.0324475 loss)\nI0821 20:23:58.499871 32262 sgd_solver.cpp:166] Iteration 19400, lr = 0.35\nI0821 20:26:17.722990 32262 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0821 20:27:39.378659 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7884\nI0821 20:27:39.378935 32262 solver.cpp:404]     Test net output #1: loss = 0.971664 (* 1 = 0.971664 loss)\nI0821 20:27:40.709455 32262 solver.cpp:228] Iteration 19500, loss = 0.0302581\nI0821 20:27:40.709489 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:27:40.709504 32262 solver.cpp:244]     Train net output #1: loss = 0.0302581 (* 1 = 0.0302581 loss)\nI0821 20:27:40.796061 32262 sgd_solver.cpp:166] Iteration 19500, lr = 0.35\nI0821 20:30:00.005239 32262 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0821 20:31:21.663470 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72752\nI0821 20:31:21.663753 32262 solver.cpp:404]     Test net output #1: loss = 1.22567 (* 1 = 1.22567 loss)\nI0821 20:31:22.994684 32262 solver.cpp:228] Iteration 19600, loss = 0.0469009\nI0821 20:31:22.994719 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:31:22.994735 32262 solver.cpp:244]     Train net output #1: loss = 0.0469009 (* 1 = 0.0469009 loss)\nI0821 20:31:23.080674 32262 sgd_solver.cpp:166] Iteration 19600, lr = 0.35\nI0821 20:33:42.245069 32262 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0821 20:35:03.899390 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7344\nI0821 20:35:03.899664 32262 solver.cpp:404]     Test net output #1: loss = 1.24222 (* 1 = 1.24222 loss)\nI0821 20:35:05.231560 32262 solver.cpp:228] Iteration 19700, loss = 0.0500391\nI0821 20:35:05.231595 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 20:35:05.231611 32262 solver.cpp:244]     Train net output #1: loss = 0.0500392 (* 1 = 0.0500392 loss)\nI0821 20:35:05.318156 32262 sgd_solver.cpp:166] Iteration 19700, lr = 0.35\nI0821 20:37:24.567236 32262 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0821 20:38:46.216938 32262 solver.cpp:404]     Test net output #0: accuracy = 0.734\nI0821 20:38:46.217226 32262 solver.cpp:404]     Test net output #1: loss = 1.35099 (* 1 = 1.35099 loss)\nI0821 20:38:47.547780 32262 solver.cpp:228] Iteration 19800, loss = 0.0246649\nI0821 20:38:47.547814 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:38:47.547830 32262 solver.cpp:244]     Train net output #1: loss = 0.0246649 (* 1 = 0.0246649 loss)\nI0821 20:38:47.631690 32262 sgd_solver.cpp:166] Iteration 19800, lr = 0.35\nI0821 20:41:06.867944 32262 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0821 20:42:28.475481 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78564\nI0821 20:42:28.475725 32262 solver.cpp:404]     Test net output #1: loss = 0.8747 (* 1 = 0.8747 loss)\nI0821 20:42:29.806777 32262 solver.cpp:228] Iteration 19900, loss = 0.0104609\nI0821 20:42:29.806812 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:42:29.806826 32262 solver.cpp:244]     Train net output #1: loss = 0.010461 (* 1 = 0.010461 loss)\nI0821 20:42:29.889257 32262 sgd_solver.cpp:166] Iteration 19900, lr = 0.35\nI0821 20:44:49.072172 32262 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0821 20:46:10.665113 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73032\nI0821 20:46:10.665406 32262 solver.cpp:404]     Test net output #1: loss = 1.42748 (* 1 = 1.42748 loss)\nI0821 20:46:11.996196 32262 solver.cpp:228] Iteration 20000, loss = 0.0289306\nI0821 20:46:11.996232 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:46:11.996246 32262 solver.cpp:244]     Train net output #1: loss = 0.0289306 (* 1 = 0.0289306 loss)\nI0821 20:46:12.081080 32262 sgd_solver.cpp:166] Iteration 20000, lr = 0.35\nI0821 20:48:31.323648 32262 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0821 20:49:52.923650 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75964\nI0821 20:49:52.923908 32262 solver.cpp:404]     Test net output #1: loss = 0.967664 (* 1 = 0.967664 loss)\nI0821 20:49:54.254470 32262 solver.cpp:228] Iteration 20100, loss = 0.0427608\nI0821 20:49:54.254505 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 20:49:54.254520 32262 solver.cpp:244]     Train net output #1: loss = 0.0427609 (* 1 = 0.0427609 loss)\nI0821 20:49:54.340174 32262 sgd_solver.cpp:166] Iteration 20100, lr = 0.35\nI0821 20:52:13.504418 32262 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0821 20:53:35.096879 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8036\nI0821 20:53:35.097175 32262 solver.cpp:404]     Test net output #1: loss = 0.793424 (* 1 = 0.793424 loss)\nI0821 20:53:36.428860 32262 solver.cpp:228] Iteration 20200, loss = 0.0419297\nI0821 20:53:36.428895 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 20:53:36.428910 32262 solver.cpp:244]     Train net output #1: loss = 0.0419298 (* 1 = 0.0419298 loss)\nI0821 20:53:36.509974 32262 sgd_solver.cpp:166] Iteration 20200, lr = 0.35\nI0821 20:55:55.705945 32262 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0821 20:57:17.284708 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78504\nI0821 20:57:17.284988 32262 solver.cpp:404]     Test net output #1: loss = 0.999075 (* 1 = 0.999075 loss)\nI0821 20:57:18.616147 32262 solver.cpp:228] Iteration 20300, loss = 0.00982659\nI0821 20:57:18.616178 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 20:57:18.616194 32262 solver.cpp:244]     Train net output #1: loss = 0.00982665 (* 1 = 0.00982665 loss)\nI0821 20:57:18.703881 32262 sgd_solver.cpp:166] Iteration 20300, lr = 0.35\nI0821 20:59:37.890215 32262 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0821 21:00:59.476929 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76828\nI0821 21:00:59.477201 32262 solver.cpp:404]     Test net output #1: loss = 0.892198 (* 1 = 0.892198 loss)\nI0821 21:01:00.808178 32262 solver.cpp:228] Iteration 20400, loss = 0.0214029\nI0821 21:01:00.808212 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:01:00.808228 32262 solver.cpp:244]     Train net output #1: loss = 0.0214029 (* 1 = 0.0214029 loss)\nI0821 21:01:00.890544 32262 sgd_solver.cpp:166] Iteration 20400, lr = 0.35\nI0821 21:03:20.024981 32262 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0821 21:04:41.615655 32262 solver.cpp:404]     Test net output #0: accuracy = 0.65468\nI0821 21:04:41.615948 32262 solver.cpp:404]     Test net output #1: loss = 1.62773 (* 1 = 1.62773 loss)\nI0821 21:04:42.945654 32262 solver.cpp:228] Iteration 20500, loss = 0.0196613\nI0821 21:04:42.945688 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:04:42.945703 32262 solver.cpp:244]     Train net output #1: loss = 0.0196613 (* 1 = 0.0196613 loss)\nI0821 21:04:43.029637 32262 sgd_solver.cpp:166] Iteration 20500, lr = 0.35\nI0821 21:07:02.095613 32262 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0821 21:08:23.691792 32262 solver.cpp:404]     Test net output #0: accuracy = 0.65948\nI0821 21:08:23.692075 32262 solver.cpp:404]     Test net output #1: loss = 1.59936 (* 1 = 1.59936 loss)\nI0821 21:08:25.021960 32262 solver.cpp:228] Iteration 20600, loss = 0.0433939\nI0821 21:08:25.021993 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:08:25.022008 32262 solver.cpp:244]     Train net output #1: loss = 0.0433939 (* 1 = 0.0433939 loss)\nI0821 21:08:25.105136 32262 sgd_solver.cpp:166] Iteration 20600, lr = 0.35\nI0821 21:10:44.254910 32262 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0821 21:12:05.913712 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81436\nI0821 21:12:05.914012 32262 solver.cpp:404]     Test net output #1: loss = 0.802765 (* 1 = 0.802765 loss)\nI0821 21:12:07.247793 32262 solver.cpp:228] Iteration 20700, loss = 0.017701\nI0821 21:12:07.247836 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:12:07.247851 32262 solver.cpp:244]     Train net output #1: loss = 0.0177011 (* 1 = 0.0177011 loss)\nI0821 21:12:07.330019 32262 sgd_solver.cpp:166] Iteration 20700, lr = 0.35\nI0821 21:14:26.522761 32262 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0821 21:15:48.890686 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6744\nI0821 21:15:48.890959 32262 solver.cpp:404]     Test net output #1: loss = 1.46054 (* 1 = 1.46054 loss)\nI0821 21:15:50.224057 32262 solver.cpp:228] Iteration 20800, loss = 0.0664723\nI0821 21:15:50.224100 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:15:50.224115 32262 solver.cpp:244]     Train net output #1: loss = 0.0664723 (* 1 = 0.0664723 loss)\nI0821 21:15:50.303957 32262 sgd_solver.cpp:166] Iteration 20800, lr = 0.35\nI0821 21:18:09.581612 32262 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0821 21:19:31.715391 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73988\nI0821 21:19:31.715672 32262 solver.cpp:404]     Test net output #1: loss = 1.21201 (* 1 = 1.21201 loss)\nI0821 21:19:33.049849 32262 solver.cpp:228] Iteration 20900, loss = 0.0615601\nI0821 21:19:33.049892 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:19:33.049907 32262 solver.cpp:244]     Train net output #1: loss = 0.0615602 (* 1 = 0.0615602 loss)\nI0821 21:19:33.129357 32262 sgd_solver.cpp:166] Iteration 20900, lr = 0.35\nI0821 21:21:52.440848 32262 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0821 21:23:14.860911 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68376\nI0821 21:23:14.861248 32262 solver.cpp:404]     Test net output #1: loss = 1.56823 (* 1 = 1.56823 loss)\nI0821 21:23:16.194998 32262 solver.cpp:228] Iteration 21000, loss = 0.0202609\nI0821 21:23:16.195047 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:23:16.195063 32262 solver.cpp:244]     Train net output #1: loss = 0.020261 (* 1 = 0.020261 loss)\nI0821 21:23:16.277849 32262 sgd_solver.cpp:166] Iteration 21000, lr = 0.35\nI0821 21:25:35.605031 32262 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0821 21:26:57.991509 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81184\nI0821 21:26:57.991813 32262 solver.cpp:404]     Test net output #1: loss = 0.784798 (* 1 = 0.784798 loss)\nI0821 21:26:59.325986 32262 solver.cpp:228] Iteration 21100, loss = 0.0251189\nI0821 21:26:59.326028 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:26:59.326047 32262 solver.cpp:244]     Train net output #1: loss = 0.0251189 (* 1 = 0.0251189 loss)\nI0821 21:26:59.407615 32262 sgd_solver.cpp:166] Iteration 21100, lr = 0.35\nI0821 21:29:18.742518 32262 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0821 21:30:40.993679 32262 solver.cpp:404]     Test net output #0: accuracy = 0.704\nI0821 21:30:40.993984 32262 solver.cpp:404]     Test net output #1: loss = 1.52283 (* 1 = 1.52283 loss)\nI0821 21:30:42.328122 32262 solver.cpp:228] Iteration 21200, loss = 0.0229367\nI0821 21:30:42.328166 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:30:42.328181 32262 solver.cpp:244]     Train net output #1: loss = 0.0229367 (* 1 = 0.0229367 loss)\nI0821 21:30:42.406317 32262 sgd_solver.cpp:166] Iteration 21200, lr = 0.35\nI0821 21:33:01.768476 32262 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0821 21:34:24.263550 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79316\nI0821 21:34:24.263880 32262 solver.cpp:404]     Test net output #1: loss = 0.924729 (* 1 = 0.924729 loss)\nI0821 21:34:25.597579 32262 solver.cpp:228] Iteration 21300, loss = 0.0827153\nI0821 21:34:25.597621 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 21:34:25.597636 32262 solver.cpp:244]     Train net output #1: loss = 0.0827154 (* 1 = 0.0827154 loss)\nI0821 21:34:25.676389 32262 sgd_solver.cpp:166] Iteration 21300, lr = 0.35\nI0821 21:36:45.067927 32262 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0821 21:38:07.568222 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79728\nI0821 21:38:07.568554 32262 solver.cpp:404]     Test net output #1: loss = 0.802663 (* 1 = 0.802663 loss)\nI0821 21:38:08.902905 32262 solver.cpp:228] Iteration 21400, loss = 0.0247567\nI0821 21:38:08.902947 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 21:38:08.902961 32262 solver.cpp:244]     Train net output #1: loss = 0.0247568 (* 1 = 0.0247568 loss)\nI0821 21:38:08.985718 32262 sgd_solver.cpp:166] Iteration 21400, lr = 0.35\nI0821 21:40:28.328022 32262 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0821 21:41:50.825898 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70376\nI0821 21:41:50.826216 32262 solver.cpp:404]     Test net output #1: loss = 1.55091 (* 1 = 1.55091 loss)\nI0821 21:41:52.158807 32262 solver.cpp:228] Iteration 21500, loss = 0.015803\nI0821 21:41:52.158851 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:41:52.158867 32262 solver.cpp:244]     Train net output #1: loss = 0.0158031 (* 1 = 0.0158031 loss)\nI0821 21:41:52.243455 32262 sgd_solver.cpp:166] Iteration 21500, lr = 0.35\nI0821 21:44:11.574599 32262 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0821 21:45:34.069180 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71228\nI0821 21:45:34.069489 32262 solver.cpp:404]     Test net output #1: loss = 1.59646 (* 1 = 1.59646 loss)\nI0821 21:45:35.402140 32262 solver.cpp:228] Iteration 21600, loss = 0.0170446\nI0821 21:45:35.402184 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:45:35.402199 32262 solver.cpp:244]     Train net output #1: loss = 0.0170446 (* 1 = 0.0170446 loss)\nI0821 21:45:35.480330 32262 sgd_solver.cpp:166] Iteration 21600, lr = 0.35\nI0821 21:47:54.767892 32262 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0821 21:49:17.266189 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64564\nI0821 21:49:17.266497 32262 solver.cpp:404]     Test net output #1: loss = 2.04189 (* 1 = 2.04189 loss)\nI0821 21:49:18.600806 32262 solver.cpp:228] Iteration 21700, loss = 0.0608473\nI0821 21:49:18.600847 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 21:49:18.600862 32262 solver.cpp:244]     Train net output #1: loss = 0.0608474 (* 1 = 0.0608474 loss)\nI0821 21:49:18.683594 32262 sgd_solver.cpp:166] Iteration 21700, lr = 0.35\nI0821 21:51:38.033841 32262 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0821 21:53:00.524536 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74036\nI0821 21:53:00.524837 32262 solver.cpp:404]     Test net output #1: loss = 1.26158 (* 1 = 1.26158 loss)\nI0821 21:53:01.857450 32262 solver.cpp:228] Iteration 21800, loss = 0.0148365\nI0821 21:53:01.857491 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 21:53:01.857506 32262 solver.cpp:244]     Train net output #1: loss = 0.0148366 (* 1 = 0.0148366 loss)\nI0821 21:53:01.940929 32262 sgd_solver.cpp:166] Iteration 21800, lr = 0.35\nI0821 21:55:21.275055 32262 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0821 21:56:43.785912 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68848\nI0821 21:56:43.786223 32262 solver.cpp:404]     Test net output #1: loss = 1.54408 (* 1 = 1.54408 loss)\nI0821 21:56:45.119029 32262 solver.cpp:228] Iteration 21900, loss = 0.07873\nI0821 21:56:45.119076 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 21:56:45.119091 32262 solver.cpp:244]     Train net output #1: loss = 0.0787301 (* 1 = 0.0787301 loss)\nI0821 21:56:45.202564 32262 sgd_solver.cpp:166] Iteration 21900, lr = 0.35\nI0821 21:59:04.555732 32262 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0821 22:00:27.063750 32262 solver.cpp:404]     Test net output #0: accuracy = 0.64276\nI0821 22:00:27.064067 32262 solver.cpp:404]     Test net output #1: loss = 2.03722 (* 1 = 2.03722 loss)\nI0821 22:00:28.396679 32262 solver.cpp:228] Iteration 22000, loss = 0.0332319\nI0821 22:00:28.396723 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:00:28.396739 32262 solver.cpp:244]     Train net output #1: loss = 0.033232 (* 1 = 0.033232 loss)\nI0821 22:00:28.478875 32262 sgd_solver.cpp:166] Iteration 22000, lr = 0.35\nI0821 22:02:47.770495 32262 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0821 22:04:10.273931 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78432\nI0821 22:04:10.274241 32262 solver.cpp:404]     Test net output #1: loss = 1.02183 (* 1 = 1.02183 loss)\nI0821 22:04:11.606956 32262 solver.cpp:228] Iteration 22100, loss = 0.0454351\nI0821 22:04:11.606999 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 22:04:11.607015 32262 solver.cpp:244]     Train net output #1: loss = 0.0454352 (* 1 = 0.0454352 loss)\nI0821 22:04:11.688289 32262 sgd_solver.cpp:166] Iteration 22100, lr = 0.35\nI0821 22:06:31.025647 32262 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0821 22:07:53.528874 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68768\nI0821 22:07:53.529191 32262 solver.cpp:404]     Test net output #1: loss = 1.56428 (* 1 = 1.56428 loss)\nI0821 22:07:54.861809 32262 solver.cpp:228] Iteration 22200, loss = 0.00508165\nI0821 22:07:54.861851 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:07:54.861866 32262 solver.cpp:244]     Train net output #1: loss = 0.00508174 (* 1 = 0.00508174 loss)\nI0821 22:07:54.943212 32262 sgd_solver.cpp:166] Iteration 22200, lr = 0.35\nI0821 22:10:14.320714 32262 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0821 22:11:36.824899 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74824\nI0821 22:11:36.825235 32262 solver.cpp:404]     Test net output #1: loss = 1.21454 (* 1 = 1.21454 loss)\nI0821 22:11:38.158006 32262 solver.cpp:228] Iteration 22300, loss = 0.00881383\nI0821 22:11:38.158053 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:11:38.158068 32262 solver.cpp:244]     Train net output #1: loss = 0.00881393 (* 1 = 0.00881393 loss)\nI0821 22:11:38.236872 32262 sgd_solver.cpp:166] Iteration 22300, lr = 0.35\nI0821 22:13:57.643901 32262 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0821 22:15:20.158252 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6504\nI0821 22:15:20.158581 32262 solver.cpp:404]     Test net output #1: loss = 1.89162 (* 1 = 1.89162 loss)\nI0821 22:15:21.491153 32262 solver.cpp:228] Iteration 22400, loss = 0.0540346\nI0821 22:15:21.491194 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:15:21.491209 32262 solver.cpp:244]     Train net output #1: loss = 0.0540347 (* 1 = 0.0540347 loss)\nI0821 22:15:21.578138 32262 sgd_solver.cpp:166] Iteration 22400, lr = 0.35\nI0821 22:17:41.048326 32262 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0821 22:19:03.553400 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80616\nI0821 22:19:03.553704 32262 solver.cpp:404]     Test net output #1: loss = 0.822032 (* 1 = 0.822032 loss)\nI0821 22:19:04.886662 32262 solver.cpp:228] Iteration 22500, loss = 0.0536317\nI0821 22:19:04.886704 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 22:19:04.886719 32262 solver.cpp:244]     Train net output #1: loss = 0.0536318 (* 1 = 0.0536318 loss)\nI0821 22:19:04.970371 32262 sgd_solver.cpp:166] Iteration 22500, lr = 0.35\nI0821 22:21:24.302875 32262 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0821 22:22:46.794834 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75844\nI0821 22:22:46.795136 32262 solver.cpp:404]     Test net output #1: loss = 1.06538 (* 1 = 1.06538 loss)\nI0821 22:22:48.128154 32262 solver.cpp:228] Iteration 22600, loss = 0.0328491\nI0821 22:22:48.128196 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 22:22:48.128211 32262 solver.cpp:244]     Train net output #1: loss = 0.0328491 (* 1 = 0.0328491 loss)\nI0821 22:22:48.207535 32262 sgd_solver.cpp:166] Iteration 22600, lr = 0.35\nI0821 22:25:07.570837 32262 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0821 22:26:30.046463 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82316\nI0821 22:26:30.046763 32262 solver.cpp:404]     Test net output #1: loss = 0.671607 (* 1 = 0.671607 loss)\nI0821 22:26:31.379920 32262 solver.cpp:228] Iteration 22700, loss = 0.0307691\nI0821 22:26:31.379963 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 22:26:31.379978 32262 solver.cpp:244]     Train net output #1: loss = 0.0307692 (* 1 = 0.0307692 loss)\nI0821 22:26:31.459172 32262 sgd_solver.cpp:166] Iteration 22700, lr = 0.35\nI0821 22:28:50.779532 32262 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0821 22:30:13.154749 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72952\nI0821 22:30:13.155053 32262 solver.cpp:404]     Test net output #1: loss = 1.23288 (* 1 = 1.23288 loss)\nI0821 22:30:14.488124 32262 solver.cpp:228] Iteration 22800, loss = 0.0658198\nI0821 22:30:14.488165 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:30:14.488180 32262 solver.cpp:244]     Train net output #1: loss = 0.0658199 (* 1 = 0.0658199 loss)\nI0821 22:30:14.570580 32262 sgd_solver.cpp:166] Iteration 22800, lr = 0.35\nI0821 22:32:33.922255 32262 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0821 22:33:56.429893 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80828\nI0821 22:33:56.430208 32262 solver.cpp:404]     Test net output #1: loss = 0.861251 (* 1 = 0.861251 loss)\nI0821 22:33:57.764677 32262 solver.cpp:228] Iteration 22900, loss = 0.0285602\nI0821 22:33:57.764720 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 22:33:57.764735 32262 solver.cpp:244]     Train net output #1: loss = 0.0285603 (* 1 = 0.0285603 loss)\nI0821 22:33:57.848073 32262 sgd_solver.cpp:166] Iteration 22900, lr = 0.35\nI0821 22:36:17.248625 32262 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0821 22:37:39.761627 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71476\nI0821 22:37:39.761950 32262 solver.cpp:404]     Test net output #1: loss = 1.52733 (* 1 = 1.52733 loss)\nI0821 22:37:41.095898 32262 solver.cpp:228] Iteration 23000, loss = 0.0125149\nI0821 22:37:41.095937 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:37:41.095952 32262 solver.cpp:244]     Train net output #1: loss = 0.012515 (* 1 = 0.012515 loss)\nI0821 22:37:41.177470 32262 sgd_solver.cpp:166] Iteration 23000, lr = 0.35\nI0821 22:40:00.480515 32262 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0821 22:41:22.979938 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72476\nI0821 22:41:22.980269 32262 solver.cpp:404]     Test net output #1: loss = 1.34295 (* 1 = 1.34295 loss)\nI0821 22:41:24.314364 32262 solver.cpp:228] Iteration 23100, loss = 0.0229474\nI0821 22:41:24.314404 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:41:24.314419 32262 solver.cpp:244]     Train net output #1: loss = 0.0229475 (* 1 = 0.0229475 loss)\nI0821 22:41:24.395763 32262 sgd_solver.cpp:166] Iteration 23100, lr = 0.35\nI0821 22:43:43.727388 32262 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0821 22:45:06.229593 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74044\nI0821 22:45:06.229933 32262 solver.cpp:404]     Test net output #1: loss = 1.38194 (* 1 = 1.38194 loss)\nI0821 22:45:07.564126 32262 solver.cpp:228] Iteration 23200, loss = 0.0166102\nI0821 22:45:07.564167 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 22:45:07.564180 32262 solver.cpp:244]     Train net output #1: loss = 0.0166103 (* 1 = 0.0166103 loss)\nI0821 22:45:07.644870 32262 sgd_solver.cpp:166] Iteration 23200, lr = 0.35\nI0821 22:47:26.738258 32262 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0821 22:48:49.177732 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78192\nI0821 22:48:49.178045 32262 solver.cpp:404]     Test net output #1: loss = 0.960899 (* 1 = 0.960899 loss)\nI0821 22:48:50.512377 32262 solver.cpp:228] Iteration 23300, loss = 0.01379\nI0821 22:48:50.512418 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:48:50.512431 32262 solver.cpp:244]     Train net output #1: loss = 0.0137901 (* 1 = 0.0137901 loss)\nI0821 22:48:50.587771 32262 sgd_solver.cpp:166] Iteration 23300, lr = 0.35\nI0821 22:51:09.396378 32262 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0821 22:52:31.891266 32262 solver.cpp:404]     Test net output #0: accuracy = 0.6908\nI0821 22:52:31.891585 32262 solver.cpp:404]     Test net output #1: loss = 1.51412 (* 1 = 1.51412 loss)\nI0821 22:52:33.225792 32262 solver.cpp:228] Iteration 23400, loss = 0.0563487\nI0821 22:52:33.225831 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 22:52:33.225845 32262 solver.cpp:244]     Train net output #1: loss = 0.0563488 (* 1 = 0.0563488 loss)\nI0821 22:52:33.304880 32262 sgd_solver.cpp:166] Iteration 23400, lr = 0.35\nI0821 22:54:52.033296 32262 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0821 22:56:14.585988 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7688\nI0821 22:56:14.586314 32262 solver.cpp:404]     Test net output #1: loss = 1.0701 (* 1 = 1.0701 loss)\nI0821 22:56:15.920698 32262 solver.cpp:228] Iteration 23500, loss = 0.0107094\nI0821 22:56:15.920739 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:56:15.920753 32262 solver.cpp:244]     Train net output #1: loss = 0.0107095 (* 1 = 0.0107095 loss)\nI0821 22:56:15.993520 32262 sgd_solver.cpp:166] Iteration 23500, lr = 0.35\nI0821 22:58:34.807032 32262 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0821 22:59:57.384166 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72044\nI0821 22:59:57.384511 32262 solver.cpp:404]     Test net output #1: loss = 1.43403 (* 1 = 1.43403 loss)\nI0821 22:59:58.719390 32262 solver.cpp:228] Iteration 23600, loss = 0.0230023\nI0821 22:59:58.719429 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 22:59:58.719444 32262 solver.cpp:244]     Train net output #1: loss = 0.0230023 (* 1 = 0.0230023 loss)\nI0821 22:59:58.799557 32262 sgd_solver.cpp:166] Iteration 23600, lr = 0.35\nI0821 23:02:17.550169 32262 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0821 23:03:40.208655 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77332\nI0821 23:03:40.209007 32262 solver.cpp:404]     Test net output #1: loss = 1.06999 (* 1 = 1.06999 loss)\nI0821 23:03:41.538988 32262 solver.cpp:228] Iteration 23700, loss = 0.0380231\nI0821 23:03:41.539026 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 23:03:41.539041 32262 solver.cpp:244]     Train net output #1: loss = 0.0380232 (* 1 = 0.0380232 loss)\nI0821 23:03:41.619307 32262 sgd_solver.cpp:166] Iteration 23700, lr = 0.35\nI0821 23:06:00.355228 32262 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0821 23:07:22.905045 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70552\nI0821 23:07:22.905352 32262 solver.cpp:404]     Test net output #1: loss = 1.58441 (* 1 = 1.58441 loss)\nI0821 23:07:24.236536 32262 solver.cpp:228] Iteration 23800, loss = 0.0298452\nI0821 23:07:24.236572 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 23:07:24.236588 32262 solver.cpp:244]     Train net output #1: loss = 0.0298452 (* 1 = 0.0298452 loss)\nI0821 23:07:24.320075 32262 sgd_solver.cpp:166] Iteration 23800, lr = 0.35\nI0821 23:09:43.077733 32262 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0821 23:11:05.699743 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74468\nI0821 23:11:05.700057 32262 solver.cpp:404]     Test net output #1: loss = 1.39766 (* 1 = 1.39766 loss)\nI0821 23:11:07.030493 32262 solver.cpp:228] Iteration 23900, loss = 0.0246445\nI0821 23:11:07.030529 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 23:11:07.030545 32262 solver.cpp:244]     Train net output #1: loss = 0.0246446 (* 1 = 0.0246446 loss)\nI0821 23:11:07.111986 32262 sgd_solver.cpp:166] Iteration 23900, lr = 0.35\nI0821 23:13:25.764324 32262 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0821 23:14:48.379276 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72684\nI0821 23:14:48.379642 32262 solver.cpp:404]     Test net output #1: loss = 1.24264 (* 1 = 1.24264 loss)\nI0821 23:14:49.709803 32262 solver.cpp:228] Iteration 24000, loss = 0.0458246\nI0821 23:14:49.709838 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 23:14:49.709854 32262 solver.cpp:244]     Train net output #1: loss = 0.0458246 (* 1 = 0.0458246 loss)\nI0821 23:14:49.787725 32262 sgd_solver.cpp:166] Iteration 24000, lr = 0.35\nI0821 23:17:08.562880 32262 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0821 23:18:31.198812 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75672\nI0821 23:18:31.199126 32262 solver.cpp:404]     Test net output #1: loss = 1.14353 (* 1 = 1.14353 loss)\nI0821 23:18:32.529917 32262 solver.cpp:228] Iteration 24100, loss = 0.0144671\nI0821 23:18:32.529958 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:18:32.529973 32262 solver.cpp:244]     Train net output #1: loss = 0.0144671 (* 1 = 0.0144671 loss)\nI0821 23:18:32.609762 32262 sgd_solver.cpp:166] Iteration 24100, lr = 0.35\nI0821 23:20:51.348513 32262 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0821 23:22:13.899837 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70104\nI0821 23:22:13.900140 32262 solver.cpp:404]     Test net output #1: loss = 1.78508 (* 1 = 1.78508 loss)\nI0821 23:22:15.231595 32262 solver.cpp:228] Iteration 24200, loss = 0.0539842\nI0821 23:22:15.231639 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 23:22:15.231655 32262 solver.cpp:244]     Train net output #1: loss = 0.0539842 (* 1 = 0.0539842 loss)\nI0821 23:22:15.313915 32262 sgd_solver.cpp:166] Iteration 24200, lr = 0.35\nI0821 23:24:34.166175 32262 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0821 23:25:56.719657 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77356\nI0821 23:25:56.719992 32262 solver.cpp:404]     Test net output #1: loss = 1.05857 (* 1 = 1.05857 loss)\nI0821 23:25:58.050062 32262 solver.cpp:228] Iteration 24300, loss = 0.01556\nI0821 23:25:58.050106 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:25:58.050122 32262 solver.cpp:244]     Train net output #1: loss = 0.0155601 (* 1 = 0.0155601 loss)\nI0821 23:25:58.131690 32262 sgd_solver.cpp:166] Iteration 24300, lr = 0.35\nI0821 23:28:17.113065 32262 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0821 23:29:39.758039 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77016\nI0821 23:29:39.758330 32262 solver.cpp:404]     Test net output #1: loss = 1.10386 (* 1 = 1.10386 loss)\nI0821 23:29:41.089603 32262 solver.cpp:228] Iteration 24400, loss = 0.061106\nI0821 23:29:41.089648 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 23:29:41.089663 32262 solver.cpp:244]     Train net output #1: loss = 0.061106 (* 1 = 0.061106 loss)\nI0821 23:29:41.170245 32262 sgd_solver.cpp:166] Iteration 24400, lr = 0.35\nI0821 23:32:00.007630 32262 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0821 23:33:22.561228 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80952\nI0821 23:33:22.561554 32262 solver.cpp:404]     Test net output #1: loss = 0.807134 (* 1 = 0.807134 loss)\nI0821 23:33:23.893180 32262 solver.cpp:228] Iteration 24500, loss = 0.0264647\nI0821 23:33:23.893224 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:33:23.893239 32262 solver.cpp:244]     Train net output #1: loss = 0.0264647 (* 1 = 0.0264647 loss)\nI0821 23:33:23.973623 32262 sgd_solver.cpp:166] Iteration 24500, lr = 0.35\nI0821 23:35:42.922248 32262 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0821 23:37:05.553791 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78792\nI0821 23:37:05.554098 32262 solver.cpp:404]     Test net output #1: loss = 0.912606 (* 1 = 0.912606 loss)\nI0821 23:37:06.885620 32262 solver.cpp:228] Iteration 24600, loss = 0.00794677\nI0821 23:37:06.885664 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:37:06.885680 32262 solver.cpp:244]     Train net output #1: loss = 0.00794679 (* 1 = 0.00794679 loss)\nI0821 23:37:06.962213 32262 sgd_solver.cpp:166] Iteration 24600, lr = 0.35\nI0821 23:39:25.910944 32262 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0821 23:40:48.467139 32262 solver.cpp:404]     Test net output #0: accuracy = 0.802\nI0821 23:40:48.467452 32262 solver.cpp:404]     Test net output #1: loss = 0.880572 (* 1 = 0.880572 loss)\nI0821 23:40:49.798614 32262 solver.cpp:228] Iteration 24700, loss = 0.0652931\nI0821 23:40:49.798657 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0821 23:40:49.798672 32262 solver.cpp:244]     Train net output #1: loss = 0.0652931 (* 1 = 0.0652931 loss)\nI0821 23:40:49.878497 32262 sgd_solver.cpp:166] Iteration 24700, lr = 0.35\nI0821 23:43:08.742725 32262 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0821 23:44:31.268900 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76808\nI0821 23:44:31.269213 32262 solver.cpp:404]     Test net output #1: loss = 1.08989 (* 1 = 1.08989 loss)\nI0821 23:44:32.600946 32262 solver.cpp:228] Iteration 24800, loss = 0.0191197\nI0821 23:44:32.600991 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0821 23:44:32.601006 32262 solver.cpp:244]     Train net output #1: loss = 0.0191197 (* 1 = 0.0191197 loss)\nI0821 23:44:32.677181 32262 sgd_solver.cpp:166] Iteration 24800, lr = 0.35\nI0821 23:46:51.587548 32262 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0821 23:48:14.121321 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78424\nI0821 23:48:14.121642 32262 solver.cpp:404]     Test net output #1: loss = 0.953545 (* 1 = 0.953545 loss)\nI0821 23:48:15.452127 32262 solver.cpp:228] Iteration 24900, loss = 0.0181161\nI0821 23:48:15.452169 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:48:15.452185 32262 solver.cpp:244]     Train net output #1: loss = 0.0181161 (* 1 = 0.0181161 loss)\nI0821 23:48:15.533252 32262 sgd_solver.cpp:166] Iteration 24900, lr = 0.35\nI0821 23:50:34.430378 32262 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0821 23:51:56.949015 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80864\nI0821 23:51:56.949332 32262 solver.cpp:404]     Test net output #1: loss = 0.759423 (* 1 = 0.759423 loss)\nI0821 23:51:58.280884 32262 solver.cpp:228] Iteration 25000, loss = 0.0160213\nI0821 23:51:58.280925 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:51:58.280941 32262 solver.cpp:244]     Train net output #1: loss = 0.0160213 (* 1 = 0.0160213 loss)\nI0821 23:51:58.358371 32262 sgd_solver.cpp:166] Iteration 25000, lr = 0.35\nI0821 23:54:17.254118 32262 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0821 23:55:39.780020 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7714\nI0821 23:55:39.780354 32262 solver.cpp:404]     Test net output #1: loss = 1.10045 (* 1 = 1.10045 loss)\nI0821 23:55:41.111661 32262 solver.cpp:228] Iteration 25100, loss = 0.0452258\nI0821 23:55:41.111703 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0821 23:55:41.111719 32262 solver.cpp:244]     Train net output #1: loss = 0.0452258 (* 1 = 0.0452258 loss)\nI0821 23:55:41.192384 32262 sgd_solver.cpp:166] Iteration 25100, lr = 0.35\nI0821 23:58:00.157016 32262 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0821 23:59:22.682294 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79028\nI0821 23:59:22.682626 32262 solver.cpp:404]     Test net output #1: loss = 0.951464 (* 1 = 0.951464 loss)\nI0821 23:59:24.014094 32262 solver.cpp:228] Iteration 25200, loss = 0.0202453\nI0821 23:59:24.014137 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0821 23:59:24.014153 32262 solver.cpp:244]     Train net output #1: loss = 0.0202454 (* 1 = 0.0202454 loss)\nI0821 23:59:24.095666 32262 sgd_solver.cpp:166] Iteration 25200, lr = 0.35\nI0822 00:01:43.031483 32262 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0822 00:03:05.630813 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80208\nI0822 00:03:05.631140 32262 solver.cpp:404]     Test net output #1: loss = 0.781756 (* 1 = 0.781756 loss)\nI0822 00:03:06.963055 32262 solver.cpp:228] Iteration 25300, loss = 0.0408299\nI0822 00:03:06.963099 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:03:06.963115 32262 solver.cpp:244]     Train net output #1: loss = 0.0408299 (* 1 = 0.0408299 loss)\nI0822 00:03:07.043612 32262 sgd_solver.cpp:166] Iteration 25300, lr = 0.35\nI0822 00:05:25.936331 32262 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0822 00:06:48.556990 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7042\nI0822 00:06:48.557332 32262 solver.cpp:404]     Test net output #1: loss = 1.45751 (* 1 = 1.45751 loss)\nI0822 00:06:49.888820 32262 solver.cpp:228] Iteration 25400, loss = 0.0802103\nI0822 00:06:49.888859 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 00:06:49.888875 32262 solver.cpp:244]     Train net output #1: loss = 0.0802104 (* 1 = 0.0802104 loss)\nI0822 00:06:49.969604 32262 sgd_solver.cpp:166] Iteration 25400, lr = 0.35\nI0822 00:09:08.918061 32262 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0822 00:10:31.499974 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78096\nI0822 00:10:31.500283 32262 solver.cpp:404]     Test net output #1: loss = 1.00579 (* 1 = 1.00579 loss)\nI0822 00:10:32.830921 32262 solver.cpp:228] Iteration 25500, loss = 0.0467319\nI0822 00:10:32.830960 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:10:32.830976 32262 solver.cpp:244]     Train net output #1: loss = 0.046732 (* 1 = 0.046732 loss)\nI0822 00:10:32.910429 32262 sgd_solver.cpp:166] Iteration 25500, lr = 0.35\nI0822 00:12:51.852591 32262 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0822 00:14:14.383747 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77296\nI0822 00:14:14.384063 32262 solver.cpp:404]     Test net output #1: loss = 1.08956 (* 1 = 1.08956 loss)\nI0822 00:14:15.715534 32262 solver.cpp:228] Iteration 25600, loss = 0.011931\nI0822 00:14:15.715579 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:14:15.715593 32262 solver.cpp:244]     Train net output #1: loss = 0.011931 (* 1 = 0.011931 loss)\nI0822 00:14:15.795152 32262 sgd_solver.cpp:166] Iteration 25600, lr = 0.35\nI0822 00:16:34.668038 32262 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0822 00:17:57.206390 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76952\nI0822 00:17:57.206704 32262 solver.cpp:404]     Test net output #1: loss = 1.03884 (* 1 = 1.03884 loss)\nI0822 00:17:58.537433 32262 solver.cpp:228] Iteration 25700, loss = 0.0522018\nI0822 00:17:58.537474 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 00:17:58.537489 32262 solver.cpp:244]     Train net output #1: loss = 0.0522019 (* 1 = 0.0522019 loss)\nI0822 00:17:58.618238 32262 sgd_solver.cpp:166] Iteration 25700, lr = 0.35\nI0822 00:20:17.477128 32262 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0822 00:21:40.034857 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75868\nI0822 00:21:40.035193 32262 solver.cpp:404]     Test net output #1: loss = 1.25474 (* 1 = 1.25474 loss)\nI0822 00:21:41.366643 32262 solver.cpp:228] Iteration 25800, loss = 0.0580225\nI0822 00:21:41.366685 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:21:41.366700 32262 solver.cpp:244]     Train net output #1: loss = 0.0580225 (* 1 = 0.0580225 loss)\nI0822 00:21:41.441339 32262 sgd_solver.cpp:166] Iteration 25800, lr = 0.35\nI0822 00:24:00.269660 32262 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0822 00:25:22.776324 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76488\nI0822 00:25:22.776639 32262 solver.cpp:404]     Test net output #1: loss = 0.978975 (* 1 = 0.978975 loss)\nI0822 00:25:24.108302 32262 solver.cpp:228] Iteration 25900, loss = 0.0191508\nI0822 00:25:24.108343 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:25:24.108358 32262 solver.cpp:244]     Train net output #1: loss = 0.0191508 (* 1 = 0.0191508 loss)\nI0822 00:25:24.184826 32262 sgd_solver.cpp:166] Iteration 25900, lr = 0.35\nI0822 00:27:43.067224 32262 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0822 00:29:05.580262 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80276\nI0822 00:29:05.580605 32262 solver.cpp:404]     Test net output #1: loss = 0.993443 (* 1 = 0.993443 loss)\nI0822 00:29:06.911991 32262 solver.cpp:228] Iteration 26000, loss = 0.0291339\nI0822 00:29:06.912032 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:29:06.912048 32262 solver.cpp:244]     Train net output #1: loss = 0.029134 (* 1 = 0.029134 loss)\nI0822 00:29:06.986511 32262 sgd_solver.cpp:166] Iteration 26000, lr = 0.35\nI0822 00:31:25.695367 32262 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0822 00:32:48.200445 32262 solver.cpp:404]     Test net output #0: accuracy = 0.794\nI0822 00:32:48.200773 32262 solver.cpp:404]     Test net output #1: loss = 0.954654 (* 1 = 0.954654 loss)\nI0822 00:32:49.530820 32262 solver.cpp:228] Iteration 26100, loss = 0.021097\nI0822 00:32:49.530858 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:32:49.530874 32262 solver.cpp:244]     Train net output #1: loss = 0.021097 (* 1 = 0.021097 loss)\nI0822 00:32:49.614152 32262 sgd_solver.cpp:166] Iteration 26100, lr = 0.35\nI0822 00:35:08.339134 32262 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0822 00:36:30.854719 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80528\nI0822 00:36:30.855056 32262 solver.cpp:404]     Test net output #1: loss = 0.78135 (* 1 = 0.78135 loss)\nI0822 00:36:32.184991 32262 solver.cpp:228] Iteration 26200, loss = 0.0411443\nI0822 00:36:32.185032 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:36:32.185048 32262 solver.cpp:244]     Train net output #1: loss = 0.0411443 (* 1 = 0.0411443 loss)\nI0822 00:36:32.265202 32262 sgd_solver.cpp:166] Iteration 26200, lr = 0.35\nI0822 00:38:50.886482 32262 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0822 00:40:13.406854 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77876\nI0822 00:40:13.407174 32262 solver.cpp:404]     Test net output #1: loss = 0.946856 (* 1 = 0.946856 loss)\nI0822 00:40:14.737198 32262 solver.cpp:228] Iteration 26300, loss = 0.0169573\nI0822 00:40:14.737238 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 00:40:14.737254 32262 solver.cpp:244]     Train net output #1: loss = 0.0169573 (* 1 = 0.0169573 loss)\nI0822 00:40:14.818455 32262 sgd_solver.cpp:166] Iteration 26300, lr = 0.35\nI0822 00:42:33.744038 32262 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0822 00:43:55.953553 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7754\nI0822 00:43:55.953832 32262 solver.cpp:404]     Test net output #1: loss = 1.02388 (* 1 = 1.02388 loss)\nI0822 00:43:57.283738 32262 solver.cpp:228] Iteration 26400, loss = 0.0120208\nI0822 00:43:57.283782 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:43:57.283805 32262 solver.cpp:244]     Train net output #1: loss = 0.0120209 (* 1 = 0.0120209 loss)\nI0822 00:43:57.362970 32262 sgd_solver.cpp:166] Iteration 26400, lr = 0.35\nI0822 00:46:16.096168 32262 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0822 00:47:38.071295 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7628\nI0822 00:47:38.071575 32262 solver.cpp:404]     Test net output #1: loss = 1.21056 (* 1 = 1.21056 loss)\nI0822 00:47:39.402935 32262 solver.cpp:228] Iteration 26500, loss = 0.0055378\nI0822 00:47:39.402977 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 00:47:39.403000 32262 solver.cpp:244]     Train net output #1: loss = 0.00553786 (* 1 = 0.00553786 loss)\nI0822 00:47:39.484644 32262 sgd_solver.cpp:166] Iteration 26500, lr = 0.35\nI0822 00:49:58.242967 32262 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0822 00:51:20.354728 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75544\nI0822 00:51:20.355015 32262 solver.cpp:404]     Test net output #1: loss = 1.11128 (* 1 = 1.11128 loss)\nI0822 00:51:21.685988 32262 solver.cpp:228] Iteration 26600, loss = 0.100445\nI0822 00:51:21.686033 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 00:51:21.686054 32262 solver.cpp:244]     Train net output #1: loss = 0.100445 (* 1 = 0.100445 loss)\nI0822 00:51:21.769929 32262 sgd_solver.cpp:166] Iteration 26600, lr = 0.35\nI0822 00:53:40.626375 32262 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0822 00:55:03.267760 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68692\nI0822 00:55:03.268098 32262 solver.cpp:404]     Test net output #1: loss = 1.62785 (* 1 = 1.62785 loss)\nI0822 00:55:04.598326 32262 solver.cpp:228] Iteration 26700, loss = 0.107682\nI0822 00:55:04.598371 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 00:55:04.598394 32262 solver.cpp:244]     Train net output #1: loss = 0.107682 (* 1 = 0.107682 loss)\nI0822 00:55:04.682374 32262 sgd_solver.cpp:166] Iteration 26700, lr = 0.35\nI0822 00:57:23.554421 32262 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0822 00:58:46.185997 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68468\nI0822 00:58:46.186345 32262 solver.cpp:404]     Test net output #1: loss = 1.72592 (* 1 = 1.72592 loss)\nI0822 00:58:47.517772 32262 solver.cpp:228] Iteration 26800, loss = 0.058952\nI0822 00:58:47.517817 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 00:58:47.517840 32262 solver.cpp:244]     Train net output #1: loss = 0.058952 (* 1 = 0.058952 loss)\nI0822 00:58:47.599165 32262 sgd_solver.cpp:166] Iteration 26800, lr = 0.35\nI0822 01:01:06.404047 32262 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0822 01:02:28.910341 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79096\nI0822 01:02:28.910670 32262 solver.cpp:404]     Test net output #1: loss = 1.11324 (* 1 = 1.11324 loss)\nI0822 01:02:30.241926 32262 solver.cpp:228] Iteration 26900, loss = 0.00845262\nI0822 01:02:30.241971 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:02:30.241993 32262 solver.cpp:244]     Train net output #1: loss = 0.00845264 (* 1 = 0.00845264 loss)\nI0822 01:02:30.318235 32262 sgd_solver.cpp:166] Iteration 26900, lr = 0.35\nI0822 01:04:49.221755 32262 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0822 01:06:11.843832 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77752\nI0822 01:06:11.844151 32262 solver.cpp:404]     Test net output #1: loss = 0.991886 (* 1 = 0.991886 loss)\nI0822 01:06:13.175704 32262 solver.cpp:228] Iteration 27000, loss = 0.0540882\nI0822 01:06:13.175745 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 01:06:13.175768 32262 solver.cpp:244]     Train net output #1: loss = 0.0540882 (* 1 = 0.0540882 loss)\nI0822 01:06:13.252975 32262 sgd_solver.cpp:166] Iteration 27000, lr = 0.35\nI0822 01:08:32.116356 32262 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0822 01:09:54.711993 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79772\nI0822 01:09:54.712339 32262 solver.cpp:404]     Test net output #1: loss = 0.854089 (* 1 = 0.854089 loss)\nI0822 01:09:56.042771 32262 solver.cpp:228] Iteration 27100, loss = 0.0112746\nI0822 01:09:56.042811 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:09:56.042834 32262 solver.cpp:244]     Train net output #1: loss = 0.0112746 (* 1 = 0.0112746 loss)\nI0822 01:09:56.122937 32262 sgd_solver.cpp:166] Iteration 27100, lr = 0.35\nI0822 01:12:15.017642 32262 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0822 01:13:37.624672 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73164\nI0822 01:13:37.625015 32262 solver.cpp:404]     Test net output #1: loss = 1.1879 (* 1 = 1.1879 loss)\nI0822 01:13:38.956315 32262 solver.cpp:228] Iteration 27200, loss = 0.0321634\nI0822 01:13:38.956358 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:13:38.956382 32262 solver.cpp:244]     Train net output #1: loss = 0.0321634 (* 1 = 0.0321634 loss)\nI0822 01:13:39.035495 32262 sgd_solver.cpp:166] Iteration 27200, lr = 0.35\nI0822 01:15:57.966068 32262 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0822 01:17:20.592880 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73624\nI0822 01:17:20.593199 32262 solver.cpp:404]     Test net output #1: loss = 1.25729 (* 1 = 1.25729 loss)\nI0822 01:17:21.924338 32262 solver.cpp:228] Iteration 27300, loss = 0.0270756\nI0822 01:17:21.924381 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:17:21.924404 32262 solver.cpp:244]     Train net output #1: loss = 0.0270756 (* 1 = 0.0270756 loss)\nI0822 01:17:22.004336 32262 sgd_solver.cpp:166] Iteration 27300, lr = 0.35\nI0822 01:19:40.760107 32262 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0822 01:21:03.299496 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79148\nI0822 01:21:03.299803 32262 solver.cpp:404]     Test net output #1: loss = 0.91726 (* 1 = 0.91726 loss)\nI0822 01:21:04.631479 32262 solver.cpp:228] Iteration 27400, loss = 0.0688254\nI0822 01:21:04.631523 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 01:21:04.631546 32262 solver.cpp:244]     Train net output #1: loss = 0.0688254 (* 1 = 0.0688254 loss)\nI0822 01:21:04.710147 32262 sgd_solver.cpp:166] Iteration 27400, lr = 0.35\nI0822 01:23:23.555637 32262 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0822 01:24:46.091971 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75368\nI0822 01:24:46.092295 32262 solver.cpp:404]     Test net output #1: loss = 1.06404 (* 1 = 1.06404 loss)\nI0822 01:24:47.423753 32262 solver.cpp:228] Iteration 27500, loss = 0.0655779\nI0822 01:24:47.423799 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:24:47.423820 32262 solver.cpp:244]     Train net output #1: loss = 0.065578 (* 1 = 0.065578 loss)\nI0822 01:24:47.500483 32262 sgd_solver.cpp:166] Iteration 27500, lr = 0.35\nI0822 01:27:06.316081 32262 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0822 01:28:28.856731 32262 solver.cpp:404]     Test net output #0: accuracy = 0.59356\nI0822 01:28:28.857048 32262 solver.cpp:404]     Test net output #1: loss = 2.55339 (* 1 = 2.55339 loss)\nI0822 01:28:30.188493 32262 solver.cpp:228] Iteration 27600, loss = 0.0434336\nI0822 01:28:30.188535 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:28:30.188558 32262 solver.cpp:244]     Train net output #1: loss = 0.0434336 (* 1 = 0.0434336 loss)\nI0822 01:28:30.265703 32262 sgd_solver.cpp:166] Iteration 27600, lr = 0.35\nI0822 01:30:49.108129 32262 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0822 01:32:11.645395 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73828\nI0822 01:32:11.645733 32262 solver.cpp:404]     Test net output #1: loss = 1.2267 (* 1 = 1.2267 loss)\nI0822 01:32:12.975687 32262 solver.cpp:228] Iteration 27700, loss = 0.0399962\nI0822 01:32:12.975733 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:32:12.975756 32262 solver.cpp:244]     Train net output #1: loss = 0.0399962 (* 1 = 0.0399962 loss)\nI0822 01:32:13.052460 32262 sgd_solver.cpp:166] Iteration 27700, lr = 0.35\nI0822 01:34:31.899677 32262 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0822 01:35:54.434964 32262 solver.cpp:404]     Test net output #0: accuracy = 0.66752\nI0822 01:35:54.435315 32262 solver.cpp:404]     Test net output #1: loss = 1.78641 (* 1 = 1.78641 loss)\nI0822 01:35:55.767163 32262 solver.cpp:228] Iteration 27800, loss = 0.0182452\nI0822 01:35:55.767208 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:35:55.767232 32262 solver.cpp:244]     Train net output #1: loss = 0.0182452 (* 1 = 0.0182452 loss)\nI0822 01:35:55.848323 32262 sgd_solver.cpp:166] Iteration 27800, lr = 0.35\nI0822 01:38:14.697057 32262 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0822 01:39:37.239677 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76204\nI0822 01:39:37.240032 32262 solver.cpp:404]     Test net output #1: loss = 1.13369 (* 1 = 1.13369 loss)\nI0822 01:39:38.571588 32262 solver.cpp:228] Iteration 27900, loss = 0.01734\nI0822 01:39:38.571632 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:39:38.571655 32262 solver.cpp:244]     Train net output #1: loss = 0.01734 (* 1 = 0.01734 loss)\nI0822 01:39:38.650065 32262 sgd_solver.cpp:166] Iteration 27900, lr = 0.35\nI0822 01:41:57.494853 32262 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0822 01:43:20.019881 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73576\nI0822 01:43:20.020196 32262 solver.cpp:404]     Test net output #1: loss = 1.28629 (* 1 = 1.28629 loss)\nI0822 01:43:21.351682 32262 solver.cpp:228] Iteration 28000, loss = 0.0197533\nI0822 01:43:21.351725 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:43:21.351748 32262 solver.cpp:244]     Train net output #1: loss = 0.0197533 (* 1 = 0.0197533 loss)\nI0822 01:43:21.428742 32262 sgd_solver.cpp:166] Iteration 28000, lr = 0.35\nI0822 01:45:40.228469 32262 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0822 01:47:02.772361 32262 solver.cpp:404]     Test net output #0: accuracy = 0.723\nI0822 01:47:02.772680 32262 solver.cpp:404]     Test net output #1: loss = 1.36734 (* 1 = 1.36734 loss)\nI0822 01:47:04.104244 32262 solver.cpp:228] Iteration 28100, loss = 0.0677751\nI0822 01:47:04.104296 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 01:47:04.104320 32262 solver.cpp:244]     Train net output #1: loss = 0.0677751 (* 1 = 0.0677751 loss)\nI0822 01:47:04.185071 32262 sgd_solver.cpp:166] Iteration 28100, lr = 0.35\nI0822 01:49:23.010607 32262 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0822 01:50:45.533748 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76696\nI0822 01:50:45.534083 32262 solver.cpp:404]     Test net output #1: loss = 1.31661 (* 1 = 1.31661 loss)\nI0822 01:50:46.864166 32262 solver.cpp:228] Iteration 28200, loss = 0.00704158\nI0822 01:50:46.864210 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 01:50:46.864233 32262 solver.cpp:244]     Train net output #1: loss = 0.00704161 (* 1 = 0.00704161 loss)\nI0822 01:50:46.945643 32262 sgd_solver.cpp:166] Iteration 28200, lr = 0.35\nI0822 01:53:05.788350 32262 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0822 01:54:28.304610 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82356\nI0822 01:54:28.304962 32262 solver.cpp:404]     Test net output #1: loss = 0.773678 (* 1 = 0.773678 loss)\nI0822 01:54:29.636194 32262 solver.cpp:228] Iteration 28300, loss = 0.0212225\nI0822 01:54:29.636240 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 01:54:29.636263 32262 solver.cpp:244]     Train net output #1: loss = 0.0212226 (* 1 = 0.0212226 loss)\nI0822 01:54:29.715888 32262 sgd_solver.cpp:166] Iteration 28300, lr = 0.35\nI0822 01:56:48.558559 32262 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0822 01:58:11.085877 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78552\nI0822 01:58:11.086221 32262 solver.cpp:404]     Test net output #1: loss = 1.16308 (* 1 = 1.16308 loss)\nI0822 01:58:12.417250 32262 solver.cpp:228] Iteration 28400, loss = 0.0353047\nI0822 01:58:12.417301 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 01:58:12.417325 32262 solver.cpp:244]     Train net output #1: loss = 0.0353047 (* 1 = 0.0353047 loss)\nI0822 01:58:12.497632 32262 sgd_solver.cpp:166] Iteration 28400, lr = 0.35\nI0822 02:00:31.298552 32262 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0822 02:01:53.925413 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77724\nI0822 02:01:53.925745 32262 solver.cpp:404]     Test net output #1: loss = 1.03262 (* 1 = 1.03262 loss)\nI0822 02:01:55.257175 32262 solver.cpp:228] Iteration 28500, loss = 0.023422\nI0822 02:01:55.257220 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:01:55.257244 32262 solver.cpp:244]     Train net output #1: loss = 0.023422 (* 1 = 0.023422 loss)\nI0822 02:01:55.335295 32262 sgd_solver.cpp:166] Iteration 28500, lr = 0.35\nI0822 02:04:14.127995 32262 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0822 02:05:36.641155 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73324\nI0822 02:05:36.641510 32262 solver.cpp:404]     Test net output #1: loss = 1.44692 (* 1 = 1.44692 loss)\nI0822 02:05:37.972772 32262 solver.cpp:228] Iteration 28600, loss = 0.0145697\nI0822 02:05:37.972817 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:05:37.972841 32262 solver.cpp:244]     Train net output #1: loss = 0.0145698 (* 1 = 0.0145698 loss)\nI0822 02:05:38.052873 32262 sgd_solver.cpp:166] Iteration 28600, lr = 0.35\nI0822 02:07:56.927502 32262 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0822 02:09:19.456579 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75668\nI0822 02:09:19.456923 32262 solver.cpp:404]     Test net output #1: loss = 1.09754 (* 1 = 1.09754 loss)\nI0822 02:09:20.788086 32262 solver.cpp:228] Iteration 28700, loss = 0.0232794\nI0822 02:09:20.788133 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:09:20.788157 32262 solver.cpp:244]     Train net output #1: loss = 0.0232795 (* 1 = 0.0232795 loss)\nI0822 02:09:20.865614 32262 sgd_solver.cpp:166] Iteration 28700, lr = 0.35\nI0822 02:11:39.681340 32262 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0822 02:13:02.270200 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7286\nI0822 02:13:02.270547 32262 solver.cpp:404]     Test net output #1: loss = 1.40024 (* 1 = 1.40024 loss)\nI0822 02:13:03.600970 32262 solver.cpp:228] Iteration 28800, loss = 0.0125314\nI0822 02:13:03.601016 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:13:03.601040 32262 solver.cpp:244]     Train net output #1: loss = 0.0125315 (* 1 = 0.0125315 loss)\nI0822 02:13:03.685472 32262 sgd_solver.cpp:166] Iteration 28800, lr = 0.35\nI0822 02:15:22.511909 32262 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0822 02:16:44.909940 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78604\nI0822 02:16:44.910241 32262 solver.cpp:404]     Test net output #1: loss = 0.946558 (* 1 = 0.946558 loss)\nI0822 02:16:46.240563 32262 solver.cpp:228] Iteration 28900, loss = 0.0491204\nI0822 02:16:46.240610 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:16:46.240633 32262 solver.cpp:244]     Train net output #1: loss = 0.0491205 (* 1 = 0.0491205 loss)\nI0822 02:16:46.322052 32262 sgd_solver.cpp:166] Iteration 28900, lr = 0.35\nI0822 02:19:05.101236 32262 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0822 02:20:27.403842 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72124\nI0822 02:20:27.404161 32262 solver.cpp:404]     Test net output #1: loss = 1.37953 (* 1 = 1.37953 loss)\nI0822 02:20:28.734237 32262 solver.cpp:228] Iteration 29000, loss = 0.043135\nI0822 02:20:28.734285 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 02:20:28.734310 32262 solver.cpp:244]     Train net output #1: loss = 0.043135 (* 1 = 0.043135 loss)\nI0822 02:20:28.810817 32262 sgd_solver.cpp:166] Iteration 29000, lr = 0.35\nI0822 02:22:47.640611 32262 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0822 02:24:10.046710 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71552\nI0822 02:24:10.046998 32262 solver.cpp:404]     Test net output #1: loss = 1.32972 (* 1 = 1.32972 loss)\nI0822 02:24:11.379961 32262 solver.cpp:228] Iteration 29100, loss = 0.0221043\nI0822 02:24:11.380007 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:24:11.380023 32262 solver.cpp:244]     Train net output #1: loss = 0.0221043 (* 1 = 0.0221043 loss)\nI0822 02:24:11.460711 32262 sgd_solver.cpp:166] Iteration 29100, lr = 0.35\nI0822 02:26:30.325577 32262 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0822 02:27:52.835681 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76344\nI0822 02:27:52.835992 32262 solver.cpp:404]     Test net output #1: loss = 1.00463 (* 1 = 1.00463 loss)\nI0822 02:27:54.167441 32262 solver.cpp:228] Iteration 29200, loss = 0.0783696\nI0822 02:27:54.167485 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 02:27:54.167501 32262 solver.cpp:244]     Train net output #1: loss = 0.0783697 (* 1 = 0.0783697 loss)\nI0822 02:27:54.249147 32262 sgd_solver.cpp:166] Iteration 29200, lr = 0.35\nI0822 02:30:13.081280 32262 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0822 02:31:35.595456 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81016\nI0822 02:31:35.595773 32262 solver.cpp:404]     Test net output #1: loss = 0.777626 (* 1 = 0.777626 loss)\nI0822 02:31:36.926574 32262 solver.cpp:228] Iteration 29300, loss = 0.0869819\nI0822 02:31:36.926617 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 02:31:36.926632 32262 solver.cpp:244]     Train net output #1: loss = 0.0869819 (* 1 = 0.0869819 loss)\nI0822 02:31:37.009989 32262 sgd_solver.cpp:166] Iteration 29300, lr = 0.35\nI0822 02:33:55.781077 32262 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0822 02:35:18.301306 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79424\nI0822 02:35:18.301625 32262 solver.cpp:404]     Test net output #1: loss = 0.847214 (* 1 = 0.847214 loss)\nI0822 02:35:19.632004 32262 solver.cpp:228] Iteration 29400, loss = 0.00791187\nI0822 02:35:19.632045 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:35:19.632066 32262 solver.cpp:244]     Train net output #1: loss = 0.00791193 (* 1 = 0.00791193 loss)\nI0822 02:35:19.707603 32262 sgd_solver.cpp:166] Iteration 29400, lr = 0.35\nI0822 02:37:38.552157 32262 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0822 02:39:01.064326 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79356\nI0822 02:39:01.064736 32262 solver.cpp:404]     Test net output #1: loss = 0.943055 (* 1 = 0.943055 loss)\nI0822 02:39:02.394466 32262 solver.cpp:228] Iteration 29500, loss = 0.0212956\nI0822 02:39:02.394508 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:39:02.394525 32262 solver.cpp:244]     Train net output #1: loss = 0.0212956 (* 1 = 0.0212956 loss)\nI0822 02:39:02.473871 32262 sgd_solver.cpp:166] Iteration 29500, lr = 0.35\nI0822 02:41:21.345473 32262 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0822 02:42:43.836690 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76312\nI0822 02:42:43.837033 32262 solver.cpp:404]     Test net output #1: loss = 1.14754 (* 1 = 1.14754 loss)\nI0822 02:42:45.167672 32262 solver.cpp:228] Iteration 29600, loss = 0.0250155\nI0822 02:42:45.167714 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 02:42:45.167731 32262 solver.cpp:244]     Train net output #1: loss = 0.0250156 (* 1 = 0.0250156 loss)\nI0822 02:42:45.249946 32262 sgd_solver.cpp:166] Iteration 29600, lr = 0.35\nI0822 02:45:04.107158 32262 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0822 02:46:26.612337 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79648\nI0822 02:46:26.612656 32262 solver.cpp:404]     Test net output #1: loss = 0.848193 (* 1 = 0.848193 loss)\nI0822 02:46:27.943616 32262 solver.cpp:228] Iteration 29700, loss = 0.0221008\nI0822 02:46:27.943657 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:46:27.943673 32262 solver.cpp:244]     Train net output #1: loss = 0.0221009 (* 1 = 0.0221009 loss)\nI0822 02:46:28.023681 32262 sgd_solver.cpp:166] Iteration 29700, lr = 0.35\nI0822 02:48:46.835306 32262 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0822 02:50:09.338064 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74336\nI0822 02:50:09.338395 32262 solver.cpp:404]     Test net output #1: loss = 1.18343 (* 1 = 1.18343 loss)\nI0822 02:50:10.669245 32262 solver.cpp:228] Iteration 29800, loss = 0.028013\nI0822 02:50:10.669288 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 02:50:10.669304 32262 solver.cpp:244]     Train net output #1: loss = 0.0280131 (* 1 = 0.0280131 loss)\nI0822 02:50:10.751390 32262 sgd_solver.cpp:166] Iteration 29800, lr = 0.35\nI0822 02:52:29.561234 32262 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0822 02:53:52.057822 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7894\nI0822 02:53:52.058161 32262 solver.cpp:404]     Test net output #1: loss = 1.02164 (* 1 = 1.02164 loss)\nI0822 02:53:53.389408 32262 solver.cpp:228] Iteration 29900, loss = 0.0126334\nI0822 02:53:53.389451 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 02:53:53.389467 32262 solver.cpp:244]     Train net output #1: loss = 0.0126335 (* 1 = 0.0126335 loss)\nI0822 02:53:53.466723 32262 sgd_solver.cpp:166] Iteration 29900, lr = 0.35\nI0822 02:56:12.348919 32262 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0822 02:57:34.838698 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77344\nI0822 02:57:34.839054 32262 solver.cpp:404]     Test net output #1: loss = 1.0712 (* 1 = 1.0712 loss)\nI0822 02:57:36.169028 32262 solver.cpp:228] Iteration 30000, loss = 0.0244781\nI0822 02:57:36.169073 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 02:57:36.169090 32262 solver.cpp:244]     Train net output #1: loss = 0.0244782 (* 1 = 0.0244782 loss)\nI0822 02:57:36.245971 32262 sgd_solver.cpp:166] Iteration 30000, lr = 0.35\nI0822 02:59:55.132086 32262 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0822 03:01:17.516582 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77076\nI0822 03:01:17.516896 32262 solver.cpp:404]     Test net output #1: loss = 1.16546 (* 1 = 1.16546 loss)\nI0822 03:01:18.846945 32262 solver.cpp:228] Iteration 30100, loss = 0.0102298\nI0822 03:01:18.846983 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:01:18.846999 32262 solver.cpp:244]     Train net output #1: loss = 0.0102299 (* 1 = 0.0102299 loss)\nI0822 03:01:18.928161 32262 sgd_solver.cpp:166] Iteration 30100, lr = 0.35\nI0822 03:03:37.802151 32262 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0822 03:05:00.212885 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76944\nI0822 03:05:00.213212 32262 solver.cpp:404]     Test net output #1: loss = 0.900032 (* 1 = 0.900032 loss)\nI0822 03:05:01.543223 32262 solver.cpp:228] Iteration 30200, loss = 0.0486401\nI0822 03:05:01.543267 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 03:05:01.543282 32262 solver.cpp:244]     Train net output #1: loss = 0.0486402 (* 1 = 0.0486402 loss)\nI0822 03:05:01.619560 32262 sgd_solver.cpp:166] Iteration 30200, lr = 0.35\nI0822 03:07:20.408352 32262 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0822 03:08:42.918886 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76428\nI0822 03:08:42.919239 32262 solver.cpp:404]     Test net output #1: loss = 1.00561 (* 1 = 1.00561 loss)\nI0822 03:08:44.249277 32262 solver.cpp:228] Iteration 30300, loss = 0.0594379\nI0822 03:08:44.249318 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:08:44.249335 32262 solver.cpp:244]     Train net output #1: loss = 0.059438 (* 1 = 0.059438 loss)\nI0822 03:08:44.332262 32262 sgd_solver.cpp:166] Iteration 30300, lr = 0.35\nI0822 03:11:03.239558 32262 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0822 03:12:25.743187 32262 solver.cpp:404]     Test net output #0: accuracy = 0.685\nI0822 03:12:25.743521 32262 solver.cpp:404]     Test net output #1: loss = 1.78231 (* 1 = 1.78231 loss)\nI0822 03:12:27.073459 32262 solver.cpp:228] Iteration 30400, loss = 0.0244038\nI0822 03:12:27.073501 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:12:27.073518 32262 solver.cpp:244]     Train net output #1: loss = 0.0244039 (* 1 = 0.0244039 loss)\nI0822 03:12:27.155618 32262 sgd_solver.cpp:166] Iteration 30400, lr = 0.35\nI0822 03:14:45.952045 32262 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0822 03:16:08.446655 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78232\nI0822 03:16:08.447001 32262 solver.cpp:404]     Test net output #1: loss = 0.906877 (* 1 = 0.906877 loss)\nI0822 03:16:09.777009 32262 solver.cpp:228] Iteration 30500, loss = 0.0317289\nI0822 03:16:09.777050 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:16:09.777070 32262 solver.cpp:244]     Train net output #1: loss = 0.031729 (* 1 = 0.031729 loss)\nI0822 03:16:09.855314 32262 sgd_solver.cpp:166] Iteration 30500, lr = 0.35\nI0822 03:18:28.657044 32262 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0822 03:19:51.182332 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74424\nI0822 03:19:51.182677 32262 solver.cpp:404]     Test net output #1: loss = 1.29485 (* 1 = 1.29485 loss)\nI0822 03:19:52.513866 32262 solver.cpp:228] Iteration 30600, loss = 0.0115327\nI0822 03:19:52.513908 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:19:52.513924 32262 solver.cpp:244]     Train net output #1: loss = 0.0115328 (* 1 = 0.0115328 loss)\nI0822 03:19:52.590986 32262 sgd_solver.cpp:166] Iteration 30600, lr = 0.35\nI0822 03:22:11.348709 32262 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0822 03:23:33.860767 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76404\nI0822 03:23:33.861094 32262 solver.cpp:404]     Test net output #1: loss = 1.00415 (* 1 = 1.00415 loss)\nI0822 03:23:35.190804 32262 solver.cpp:228] Iteration 30700, loss = 0.0122352\nI0822 03:23:35.190850 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:23:35.190865 32262 solver.cpp:244]     Train net output #1: loss = 0.0122352 (* 1 = 0.0122352 loss)\nI0822 03:23:35.276202 32262 sgd_solver.cpp:166] Iteration 30700, lr = 0.35\nI0822 03:25:54.113700 32262 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0822 03:27:16.623767 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71404\nI0822 03:27:16.624117 32262 solver.cpp:404]     Test net output #1: loss = 1.44884 (* 1 = 1.44884 loss)\nI0822 03:27:17.954886 32262 solver.cpp:228] Iteration 30800, loss = 0.0191215\nI0822 03:27:17.954931 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:27:17.954947 32262 solver.cpp:244]     Train net output #1: loss = 0.0191216 (* 1 = 0.0191216 loss)\nI0822 03:27:18.037901 32262 sgd_solver.cpp:166] Iteration 30800, lr = 0.35\nI0822 03:29:36.876317 32262 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0822 03:30:59.378669 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68412\nI0822 03:30:59.379011 32262 solver.cpp:404]     Test net output #1: loss = 1.72929 (* 1 = 1.72929 loss)\nI0822 03:31:00.709190 32262 solver.cpp:228] Iteration 30900, loss = 0.056644\nI0822 03:31:00.709235 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:31:00.709251 32262 solver.cpp:244]     Train net output #1: loss = 0.0566441 (* 1 = 0.0566441 loss)\nI0822 03:31:00.791103 32262 sgd_solver.cpp:166] Iteration 30900, lr = 0.35\nI0822 03:33:19.606758 32262 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0822 03:34:42.128818 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80876\nI0822 03:34:42.129163 32262 solver.cpp:404]     Test net output #1: loss = 0.935085 (* 1 = 0.935085 loss)\nI0822 03:34:43.458982 32262 solver.cpp:228] Iteration 31000, loss = 0.0557033\nI0822 03:34:43.459030 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 03:34:43.459048 32262 solver.cpp:244]     Train net output #1: loss = 0.0557034 (* 1 = 0.0557034 loss)\nI0822 03:34:43.540973 32262 sgd_solver.cpp:166] Iteration 31000, lr = 0.35\nI0822 03:37:02.398053 32262 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0822 03:38:24.936388 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81988\nI0822 03:38:24.936733 32262 solver.cpp:404]     Test net output #1: loss = 0.866866 (* 1 = 0.866866 loss)\nI0822 03:38:26.266470 32262 solver.cpp:228] Iteration 31100, loss = 0.0330948\nI0822 03:38:26.266513 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:38:26.266530 32262 solver.cpp:244]     Train net output #1: loss = 0.0330949 (* 1 = 0.0330949 loss)\nI0822 03:38:26.345366 32262 sgd_solver.cpp:166] Iteration 31100, lr = 0.35\nI0822 03:40:45.127553 32262 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0822 03:42:07.668012 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77676\nI0822 03:42:07.668335 32262 solver.cpp:404]     Test net output #1: loss = 1.03522 (* 1 = 1.03522 loss)\nI0822 03:42:08.998702 32262 solver.cpp:228] Iteration 31200, loss = 0.0181927\nI0822 03:42:08.998741 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:42:08.998756 32262 solver.cpp:244]     Train net output #1: loss = 0.0181928 (* 1 = 0.0181928 loss)\nI0822 03:42:09.078148 32262 sgd_solver.cpp:166] Iteration 31200, lr = 0.35\nI0822 03:44:27.850271 32262 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0822 03:45:50.399276 32262 solver.cpp:404]     Test net output #0: accuracy = 0.796\nI0822 03:45:50.399593 32262 solver.cpp:404]     Test net output #1: loss = 0.818233 (* 1 = 0.818233 loss)\nI0822 03:45:51.729830 32262 solver.cpp:228] Iteration 31300, loss = 0.0103529\nI0822 03:45:51.729871 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 03:45:51.729887 32262 solver.cpp:244]     Train net output #1: loss = 0.010353 (* 1 = 0.010353 loss)\nI0822 03:45:51.813083 32262 sgd_solver.cpp:166] Iteration 31300, lr = 0.35\nI0822 03:48:10.529651 32262 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0822 03:49:33.077541 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8204\nI0822 03:49:33.077884 32262 solver.cpp:404]     Test net output #1: loss = 0.762236 (* 1 = 0.762236 loss)\nI0822 03:49:34.408915 32262 solver.cpp:228] Iteration 31400, loss = 0.0253482\nI0822 03:49:34.408954 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:49:34.408970 32262 solver.cpp:244]     Train net output #1: loss = 0.0253483 (* 1 = 0.0253483 loss)\nI0822 03:49:34.486717 32262 sgd_solver.cpp:166] Iteration 31400, lr = 0.35\nI0822 03:51:53.256714 32262 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0822 03:53:15.796491 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79968\nI0822 03:53:15.796789 32262 solver.cpp:404]     Test net output #1: loss = 0.907567 (* 1 = 0.907567 loss)\nI0822 03:53:17.126438 32262 solver.cpp:228] Iteration 31500, loss = 0.02135\nI0822 03:53:17.126477 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:53:17.126492 32262 solver.cpp:244]     Train net output #1: loss = 0.0213501 (* 1 = 0.0213501 loss)\nI0822 03:53:17.206321 32262 sgd_solver.cpp:166] Iteration 31500, lr = 0.35\nI0822 03:55:36.323328 32262 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0822 03:56:58.871013 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77272\nI0822 03:56:58.871358 32262 solver.cpp:404]     Test net output #1: loss = 1.11145 (* 1 = 1.11145 loss)\nI0822 03:57:00.201256 32262 solver.cpp:228] Iteration 31600, loss = 0.0331241\nI0822 03:57:00.201298 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 03:57:00.201314 32262 solver.cpp:244]     Train net output #1: loss = 0.0331243 (* 1 = 0.0331243 loss)\nI0822 03:57:00.287241 32262 sgd_solver.cpp:166] Iteration 31600, lr = 0.35\nI0822 03:59:19.585724 32262 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0822 04:00:42.136296 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70704\nI0822 04:00:42.136631 32262 solver.cpp:404]     Test net output #1: loss = 1.80856 (* 1 = 1.80856 loss)\nI0822 04:00:43.466197 32262 solver.cpp:228] Iteration 31700, loss = 0.0737836\nI0822 04:00:43.466239 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 04:00:43.466255 32262 solver.cpp:244]     Train net output #1: loss = 0.0737838 (* 1 = 0.0737838 loss)\nI0822 04:00:43.553026 32262 sgd_solver.cpp:166] Iteration 31700, lr = 0.35\nI0822 04:03:02.877856 32262 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0822 04:04:25.423159 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73128\nI0822 04:04:25.423497 32262 solver.cpp:404]     Test net output #1: loss = 1.53817 (* 1 = 1.53817 loss)\nI0822 04:04:26.753499 32262 solver.cpp:228] Iteration 31800, loss = 0.0348764\nI0822 04:04:26.753540 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:04:26.753554 32262 solver.cpp:244]     Train net output #1: loss = 0.0348765 (* 1 = 0.0348765 loss)\nI0822 04:04:26.839095 32262 sgd_solver.cpp:166] Iteration 31800, lr = 0.35\nI0822 04:06:46.196638 32262 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0822 04:08:08.715685 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75332\nI0822 04:08:08.715999 32262 solver.cpp:404]     Test net output #1: loss = 1.25861 (* 1 = 1.25861 loss)\nI0822 04:08:10.047547 32262 solver.cpp:228] Iteration 31900, loss = 0.0260137\nI0822 04:08:10.047586 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:08:10.047601 32262 solver.cpp:244]     Train net output #1: loss = 0.0260138 (* 1 = 0.0260138 loss)\nI0822 04:08:10.127630 32262 sgd_solver.cpp:166] Iteration 31900, lr = 0.35\nI0822 04:10:29.469597 32262 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0822 04:11:51.984176 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7928\nI0822 04:11:51.984493 32262 solver.cpp:404]     Test net output #1: loss = 1.00576 (* 1 = 1.00576 loss)\nI0822 04:11:53.315963 32262 solver.cpp:228] Iteration 32000, loss = 0.00741776\nI0822 04:11:53.316000 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:11:53.316015 32262 solver.cpp:244]     Train net output #1: loss = 0.00741787 (* 1 = 0.00741787 loss)\nI0822 04:11:53.396672 32262 sgd_solver.cpp:166] Iteration 32000, lr = 0.35\nI0822 04:14:12.721523 32262 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0822 04:15:35.238306 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78332\nI0822 04:15:35.238711 32262 solver.cpp:404]     Test net output #1: loss = 1.0609 (* 1 = 1.0609 loss)\nI0822 04:15:36.568529 32262 solver.cpp:228] Iteration 32100, loss = 0.0374133\nI0822 04:15:36.568567 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:15:36.568583 32262 solver.cpp:244]     Train net output #1: loss = 0.0374134 (* 1 = 0.0374134 loss)\nI0822 04:15:36.650202 32262 sgd_solver.cpp:166] Iteration 32100, lr = 0.35\nI0822 04:17:55.933856 32262 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0822 04:19:18.447355 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7246\nI0822 04:19:18.447700 32262 solver.cpp:404]     Test net output #1: loss = 1.4506 (* 1 = 1.4506 loss)\nI0822 04:19:19.777705 32262 solver.cpp:228] Iteration 32200, loss = 0.040316\nI0822 04:19:19.777742 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:19:19.777758 32262 solver.cpp:244]     Train net output #1: loss = 0.0403162 (* 1 = 0.0403162 loss)\nI0822 04:19:19.863445 32262 sgd_solver.cpp:166] Iteration 32200, lr = 0.35\nI0822 04:21:39.219817 32262 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0822 04:23:01.733278 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71596\nI0822 04:23:01.733609 32262 solver.cpp:404]     Test net output #1: loss = 1.2115 (* 1 = 1.2115 loss)\nI0822 04:23:03.063762 32262 solver.cpp:228] Iteration 32300, loss = 0.0176212\nI0822 04:23:03.063799 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:23:03.063813 32262 solver.cpp:244]     Train net output #1: loss = 0.0176213 (* 1 = 0.0176213 loss)\nI0822 04:23:03.146705 32262 sgd_solver.cpp:166] Iteration 32300, lr = 0.35\nI0822 04:25:22.455204 32262 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0822 04:26:44.973402 32262 solver.cpp:404]     Test net output #0: accuracy = 0.65656\nI0822 04:26:44.973748 32262 solver.cpp:404]     Test net output #1: loss = 1.44813 (* 1 = 1.44813 loss)\nI0822 04:26:46.304263 32262 solver.cpp:228] Iteration 32400, loss = 0.0513945\nI0822 04:26:46.304301 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:26:46.304316 32262 solver.cpp:244]     Train net output #1: loss = 0.0513946 (* 1 = 0.0513946 loss)\nI0822 04:26:46.388712 32262 sgd_solver.cpp:166] Iteration 32400, lr = 0.35\nI0822 04:29:05.771661 32262 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0822 04:30:28.283936 32262 solver.cpp:404]     Test net output #0: accuracy = 0.69904\nI0822 04:30:28.284281 32262 solver.cpp:404]     Test net output #1: loss = 1.58323 (* 1 = 1.58323 loss)\nI0822 04:30:29.613813 32262 solver.cpp:228] Iteration 32500, loss = 0.0360013\nI0822 04:30:29.613852 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:30:29.613867 32262 solver.cpp:244]     Train net output #1: loss = 0.0360014 (* 1 = 0.0360014 loss)\nI0822 04:30:29.700891 32262 sgd_solver.cpp:166] Iteration 32500, lr = 0.35\nI0822 04:32:49.020553 32262 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0822 04:34:11.536815 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7616\nI0822 04:34:11.537158 32262 solver.cpp:404]     Test net output #1: loss = 1.03648 (* 1 = 1.03648 loss)\nI0822 04:34:12.867275 32262 solver.cpp:228] Iteration 32600, loss = 0.0426887\nI0822 04:34:12.867312 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 04:34:12.867327 32262 solver.cpp:244]     Train net output #1: loss = 0.0426888 (* 1 = 0.0426888 loss)\nI0822 04:34:12.954555 32262 sgd_solver.cpp:166] Iteration 32600, lr = 0.35\nI0822 04:36:32.311797 32262 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0822 04:37:54.815803 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77716\nI0822 04:37:54.816128 32262 solver.cpp:404]     Test net output #1: loss = 0.907567 (* 1 = 0.907567 loss)\nI0822 04:37:56.146070 32262 solver.cpp:228] Iteration 32700, loss = 0.0726964\nI0822 04:37:56.146108 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:37:56.146123 32262 solver.cpp:244]     Train net output #1: loss = 0.0726965 (* 1 = 0.0726965 loss)\nI0822 04:37:56.235203 32262 sgd_solver.cpp:166] Iteration 32700, lr = 0.35\nI0822 04:40:15.651789 32262 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0822 04:41:38.160064 32262 solver.cpp:404]     Test net output #0: accuracy = 0.779\nI0822 04:41:38.160405 32262 solver.cpp:404]     Test net output #1: loss = 1.00165 (* 1 = 1.00165 loss)\nI0822 04:41:39.490588 32262 solver.cpp:228] Iteration 32800, loss = 0.0267301\nI0822 04:41:39.490625 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:41:39.490640 32262 solver.cpp:244]     Train net output #1: loss = 0.0267302 (* 1 = 0.0267302 loss)\nI0822 04:41:39.573287 32262 sgd_solver.cpp:166] Iteration 32800, lr = 0.35\nI0822 04:43:58.824875 32262 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0822 04:45:21.333851 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78164\nI0822 04:45:21.334199 32262 solver.cpp:404]     Test net output #1: loss = 1.00151 (* 1 = 1.00151 loss)\nI0822 04:45:22.664412 32262 solver.cpp:228] Iteration 32900, loss = 0.0126537\nI0822 04:45:22.664453 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:45:22.664466 32262 solver.cpp:244]     Train net output #1: loss = 0.0126538 (* 1 = 0.0126538 loss)\nI0822 04:45:22.753705 32262 sgd_solver.cpp:166] Iteration 32900, lr = 0.35\nI0822 04:47:42.113026 32262 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0822 04:49:04.619741 32262 solver.cpp:404]     Test net output #0: accuracy = 0.5722\nI0822 04:49:04.620069 32262 solver.cpp:404]     Test net output #1: loss = 2.4588 (* 1 = 2.4588 loss)\nI0822 04:49:05.950709 32262 solver.cpp:228] Iteration 33000, loss = 0.0102276\nI0822 04:49:05.950748 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 04:49:05.950762 32262 solver.cpp:244]     Train net output #1: loss = 0.0102277 (* 1 = 0.0102277 loss)\nI0822 04:49:06.036525 32262 sgd_solver.cpp:166] Iteration 33000, lr = 0.35\nI0822 04:51:25.349571 32262 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0822 04:52:47.849334 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78912\nI0822 04:52:47.849650 32262 solver.cpp:404]     Test net output #1: loss = 0.995554 (* 1 = 0.995554 loss)\nI0822 04:52:49.180243 32262 solver.cpp:228] Iteration 33100, loss = 0.0244762\nI0822 04:52:49.180284 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 04:52:49.180299 32262 solver.cpp:244]     Train net output #1: loss = 0.0244763 (* 1 = 0.0244763 loss)\nI0822 04:52:49.265882 32262 sgd_solver.cpp:166] Iteration 33100, lr = 0.35\nI0822 04:55:08.555449 32262 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0822 04:56:31.072715 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72804\nI0822 04:56:31.073050 32262 solver.cpp:404]     Test net output #1: loss = 1.41257 (* 1 = 1.41257 loss)\nI0822 04:56:32.403067 32262 solver.cpp:228] Iteration 33200, loss = 0.0385552\nI0822 04:56:32.403110 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 04:56:32.403125 32262 solver.cpp:244]     Train net output #1: loss = 0.0385553 (* 1 = 0.0385553 loss)\nI0822 04:56:32.484171 32262 sgd_solver.cpp:166] Iteration 33200, lr = 0.35\nI0822 04:58:51.765394 32262 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0822 05:00:14.300982 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76556\nI0822 05:00:14.301322 32262 solver.cpp:404]     Test net output #1: loss = 1.28276 (* 1 = 1.28276 loss)\nI0822 05:00:15.630794 32262 solver.cpp:228] Iteration 33300, loss = 0.0623933\nI0822 05:00:15.630832 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 05:00:15.630847 32262 solver.cpp:244]     Train net output #1: loss = 0.0623934 (* 1 = 0.0623934 loss)\nI0822 05:00:15.716253 32262 sgd_solver.cpp:166] Iteration 33300, lr = 0.35\nI0822 05:02:34.903930 32262 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0822 05:03:57.408179 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74444\nI0822 05:03:57.408524 32262 solver.cpp:404]     Test net output #1: loss = 1.24027 (* 1 = 1.24027 loss)\nI0822 05:03:58.738183 32262 solver.cpp:228] Iteration 33400, loss = 0.0393797\nI0822 05:03:58.738220 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:03:58.738235 32262 solver.cpp:244]     Train net output #1: loss = 0.0393797 (* 1 = 0.0393797 loss)\nI0822 05:03:58.818601 32262 sgd_solver.cpp:166] Iteration 33400, lr = 0.35\nI0822 05:06:17.647487 32262 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0822 05:07:40.146852 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78724\nI0822 05:07:40.147176 32262 solver.cpp:404]     Test net output #1: loss = 0.974942 (* 1 = 0.974942 loss)\nI0822 05:07:41.477567 32262 solver.cpp:228] Iteration 33500, loss = 0.0307172\nI0822 05:07:41.477607 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:07:41.477622 32262 solver.cpp:244]     Train net output #1: loss = 0.0307172 (* 1 = 0.0307172 loss)\nI0822 05:07:41.557708 32262 sgd_solver.cpp:166] Iteration 33500, lr = 0.35\nI0822 05:10:00.411152 32262 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0822 05:11:22.903861 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8064\nI0822 05:11:22.904201 32262 solver.cpp:404]     Test net output #1: loss = 0.759093 (* 1 = 0.759093 loss)\nI0822 05:11:24.233821 32262 solver.cpp:228] Iteration 33600, loss = 0.0265065\nI0822 05:11:24.233860 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:11:24.233875 32262 solver.cpp:244]     Train net output #1: loss = 0.0265066 (* 1 = 0.0265066 loss)\nI0822 05:11:24.315675 32262 sgd_solver.cpp:166] Iteration 33600, lr = 0.35\nI0822 05:13:43.256989 32262 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0822 05:15:05.795990 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80284\nI0822 05:15:05.796340 32262 solver.cpp:404]     Test net output #1: loss = 0.779351 (* 1 = 0.779351 loss)\nI0822 05:15:07.127570 32262 solver.cpp:228] Iteration 33700, loss = 0.0362009\nI0822 05:15:07.127612 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:15:07.127636 32262 solver.cpp:244]     Train net output #1: loss = 0.036201 (* 1 = 0.036201 loss)\nI0822 05:15:07.206298 32262 sgd_solver.cpp:166] Iteration 33700, lr = 0.35\nI0822 05:17:26.082305 32262 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0822 05:18:48.698549 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80944\nI0822 05:18:48.698897 32262 solver.cpp:404]     Test net output #1: loss = 0.823975 (* 1 = 0.823975 loss)\nI0822 05:18:50.029984 32262 solver.cpp:228] Iteration 33800, loss = 0.00773413\nI0822 05:18:50.030028 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:18:50.030051 32262 solver.cpp:244]     Train net output #1: loss = 0.00773421 (* 1 = 0.00773421 loss)\nI0822 05:18:50.110576 32262 sgd_solver.cpp:166] Iteration 33800, lr = 0.35\nI0822 05:21:08.954062 32262 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0822 05:22:31.514479 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80168\nI0822 05:22:31.514822 32262 solver.cpp:404]     Test net output #1: loss = 0.867825 (* 1 = 0.867825 loss)\nI0822 05:22:32.846527 32262 solver.cpp:228] Iteration 33900, loss = 0.0591814\nI0822 05:22:32.846571 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:22:32.846595 32262 solver.cpp:244]     Train net output #1: loss = 0.0591815 (* 1 = 0.0591815 loss)\nI0822 05:22:32.922235 32262 sgd_solver.cpp:166] Iteration 33900, lr = 0.35\nI0822 05:24:51.742008 32262 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0822 05:26:14.412068 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77792\nI0822 05:26:14.412400 32262 solver.cpp:404]     Test net output #1: loss = 0.873058 (* 1 = 0.873058 loss)\nI0822 05:26:15.743681 32262 solver.cpp:228] Iteration 34000, loss = 0.0411227\nI0822 05:26:15.743728 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:26:15.743752 32262 solver.cpp:244]     Train net output #1: loss = 0.0411227 (* 1 = 0.0411227 loss)\nI0822 05:26:15.822194 32262 sgd_solver.cpp:166] Iteration 34000, lr = 0.35\nI0822 05:28:34.600313 32262 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0822 05:29:57.215610 32262 solver.cpp:404]     Test net output #0: accuracy = 0.71856\nI0822 05:29:57.215960 32262 solver.cpp:404]     Test net output #1: loss = 1.19867 (* 1 = 1.19867 loss)\nI0822 05:29:58.547032 32262 solver.cpp:228] Iteration 34100, loss = 0.0751847\nI0822 05:29:58.547077 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 05:29:58.547101 32262 solver.cpp:244]     Train net output #1: loss = 0.0751848 (* 1 = 0.0751848 loss)\nI0822 05:29:58.624013 32262 sgd_solver.cpp:166] Iteration 34100, lr = 0.35\nI0822 05:32:17.421381 32262 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0822 05:33:39.955787 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7582\nI0822 05:33:39.956136 32262 solver.cpp:404]     Test net output #1: loss = 1.06306 (* 1 = 1.06306 loss)\nI0822 05:33:41.287358 32262 solver.cpp:228] Iteration 34200, loss = 0.0880307\nI0822 05:33:41.287402 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 05:33:41.287426 32262 solver.cpp:244]     Train net output #1: loss = 0.0880307 (* 1 = 0.0880307 loss)\nI0822 05:33:41.365558 32262 sgd_solver.cpp:166] Iteration 34200, lr = 0.35\nI0822 05:36:00.156671 32262 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0822 05:37:22.680238 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75236\nI0822 05:37:22.680568 32262 solver.cpp:404]     Test net output #1: loss = 1.20415 (* 1 = 1.20415 loss)\nI0822 05:37:24.011346 32262 solver.cpp:228] Iteration 34300, loss = 0.0273592\nI0822 05:37:24.011389 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:37:24.011412 32262 solver.cpp:244]     Train net output #1: loss = 0.0273593 (* 1 = 0.0273593 loss)\nI0822 05:37:24.092716 32262 sgd_solver.cpp:166] Iteration 34300, lr = 0.35\nI0822 05:39:43.008095 32262 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0822 05:41:05.533145 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79012\nI0822 05:41:05.533483 32262 solver.cpp:404]     Test net output #1: loss = 1.15048 (* 1 = 1.15048 loss)\nI0822 05:41:06.864625 32262 solver.cpp:228] Iteration 34400, loss = 0.0118008\nI0822 05:41:06.864670 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:41:06.864692 32262 solver.cpp:244]     Train net output #1: loss = 0.0118009 (* 1 = 0.0118009 loss)\nI0822 05:41:06.946738 32262 sgd_solver.cpp:166] Iteration 34400, lr = 0.35\nI0822 05:43:25.812029 32262 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0822 05:44:48.337460 32262 solver.cpp:404]     Test net output #0: accuracy = 0.795\nI0822 05:44:48.337811 32262 solver.cpp:404]     Test net output #1: loss = 0.955158 (* 1 = 0.955158 loss)\nI0822 05:44:49.669287 32262 solver.cpp:228] Iteration 34500, loss = 0.0236207\nI0822 05:44:49.669329 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:44:49.669353 32262 solver.cpp:244]     Train net output #1: loss = 0.0236208 (* 1 = 0.0236208 loss)\nI0822 05:44:49.750936 32262 sgd_solver.cpp:166] Iteration 34500, lr = 0.35\nI0822 05:47:08.633591 32262 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0822 05:48:31.184481 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75612\nI0822 05:48:31.184831 32262 solver.cpp:404]     Test net output #1: loss = 1.04767 (* 1 = 1.04767 loss)\nI0822 05:48:32.515846 32262 solver.cpp:228] Iteration 34600, loss = 0.0251037\nI0822 05:48:32.515887 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 05:48:32.515909 32262 solver.cpp:244]     Train net output #1: loss = 0.0251038 (* 1 = 0.0251038 loss)\nI0822 05:48:32.593011 32262 sgd_solver.cpp:166] Iteration 34600, lr = 0.35\nI0822 05:50:51.455740 32262 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0822 05:52:14.008528 32262 solver.cpp:404]     Test net output #0: accuracy = 0.784\nI0822 05:52:14.009135 32262 solver.cpp:404]     Test net output #1: loss = 0.874599 (* 1 = 0.874599 loss)\nI0822 05:52:15.340286 32262 solver.cpp:228] Iteration 34700, loss = 0.0346452\nI0822 05:52:15.340333 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 05:52:15.340358 32262 solver.cpp:244]     Train net output #1: loss = 0.0346453 (* 1 = 0.0346453 loss)\nI0822 05:52:15.426326 32262 sgd_solver.cpp:166] Iteration 34700, lr = 0.35\nI0822 05:54:34.255427 32262 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0822 05:55:56.820073 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78644\nI0822 05:55:56.820407 32262 solver.cpp:404]     Test net output #1: loss = 0.904987 (* 1 = 0.904987 loss)\nI0822 05:55:58.152719 32262 solver.cpp:228] Iteration 34800, loss = 0.0761986\nI0822 05:55:58.152765 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 05:55:58.152787 32262 solver.cpp:244]     Train net output #1: loss = 0.0761986 (* 1 = 0.0761986 loss)\nI0822 05:55:58.233947 32262 sgd_solver.cpp:166] Iteration 34800, lr = 0.35\nI0822 05:58:17.158612 32262 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0822 05:59:39.726708 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77148\nI0822 05:59:39.727052 32262 solver.cpp:404]     Test net output #1: loss = 1.11382 (* 1 = 1.11382 loss)\nI0822 05:59:41.058876 32262 solver.cpp:228] Iteration 34900, loss = 0.0102847\nI0822 05:59:41.058923 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 05:59:41.058945 32262 solver.cpp:244]     Train net output #1: loss = 0.0102847 (* 1 = 0.0102847 loss)\nI0822 05:59:41.135663 32262 sgd_solver.cpp:166] Iteration 34900, lr = 0.35\nI0822 06:02:00.013878 32262 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0822 06:03:22.567147 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7446\nI0822 06:03:22.567497 32262 solver.cpp:404]     Test net output #1: loss = 1.57958 (* 1 = 1.57958 loss)\nI0822 06:03:23.898807 32262 solver.cpp:228] Iteration 35000, loss = 0.0391698\nI0822 06:03:23.898852 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 06:03:23.898869 32262 solver.cpp:244]     Train net output #1: loss = 0.0391698 (* 1 = 0.0391698 loss)\nI0822 06:03:23.978747 32262 sgd_solver.cpp:166] Iteration 35000, lr = 0.35\nI0822 06:05:42.808626 32262 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0822 06:07:05.372612 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68936\nI0822 06:07:05.372961 32262 solver.cpp:404]     Test net output #1: loss = 1.73897 (* 1 = 1.73897 loss)\nI0822 06:07:06.704707 32262 solver.cpp:228] Iteration 35100, loss = 0.0282381\nI0822 06:07:06.704752 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:07:06.704777 32262 solver.cpp:244]     Train net output #1: loss = 0.0282382 (* 1 = 0.0282382 loss)\nI0822 06:07:06.781826 32262 sgd_solver.cpp:166] Iteration 35100, lr = 0.35\nI0822 06:09:25.599650 32262 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0822 06:10:47.436650 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78912\nI0822 06:10:47.436959 32262 solver.cpp:404]     Test net output #1: loss = 1.05704 (* 1 = 1.05704 loss)\nI0822 06:10:48.765120 32262 solver.cpp:228] Iteration 35200, loss = 0.0501356\nI0822 06:10:48.765157 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 06:10:48.765173 32262 solver.cpp:244]     Train net output #1: loss = 0.0501356 (* 1 = 0.0501356 loss)\nI0822 06:10:48.845021 32262 sgd_solver.cpp:166] Iteration 35200, lr = 0.35\nI0822 06:13:07.684247 32262 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0822 06:14:30.253821 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7212\nI0822 06:14:30.254148 32262 solver.cpp:404]     Test net output #1: loss = 1.54905 (* 1 = 1.54905 loss)\nI0822 06:14:31.585934 32262 solver.cpp:228] Iteration 35300, loss = 0.0255734\nI0822 06:14:31.585980 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:14:31.585999 32262 solver.cpp:244]     Train net output #1: loss = 0.0255735 (* 1 = 0.0255735 loss)\nI0822 06:14:31.660840 32262 sgd_solver.cpp:166] Iteration 35300, lr = 0.35\nI0822 06:16:50.461091 32262 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0822 06:18:13.025840 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75256\nI0822 06:18:13.026175 32262 solver.cpp:404]     Test net output #1: loss = 1.22202 (* 1 = 1.22202 loss)\nI0822 06:18:14.357465 32262 solver.cpp:228] Iteration 35400, loss = 0.0509278\nI0822 06:18:14.357507 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:18:14.357523 32262 solver.cpp:244]     Train net output #1: loss = 0.0509279 (* 1 = 0.0509279 loss)\nI0822 06:18:14.439801 32262 sgd_solver.cpp:166] Iteration 35400, lr = 0.35\nI0822 06:20:33.320343 32262 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0822 06:21:55.868614 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70968\nI0822 06:21:55.868955 32262 solver.cpp:404]     Test net output #1: loss = 1.45808 (* 1 = 1.45808 loss)\nI0822 06:21:57.199584 32262 solver.cpp:228] Iteration 35500, loss = 0.0523709\nI0822 06:21:57.199625 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 06:21:57.199650 32262 solver.cpp:244]     Train net output #1: loss = 0.052371 (* 1 = 0.052371 loss)\nI0822 06:21:57.276948 32262 sgd_solver.cpp:166] Iteration 35500, lr = 0.35\nI0822 06:24:16.062173 32262 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0822 06:25:38.600210 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72404\nI0822 06:25:38.600536 32262 solver.cpp:404]     Test net output #1: loss = 1.43135 (* 1 = 1.43135 loss)\nI0822 06:25:39.931365 32262 solver.cpp:228] Iteration 35600, loss = 0.0471886\nI0822 06:25:39.931411 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 06:25:39.931434 32262 solver.cpp:244]     Train net output #1: loss = 0.0471887 (* 1 = 0.0471887 loss)\nI0822 06:25:40.010149 32262 sgd_solver.cpp:166] Iteration 35600, lr = 0.35\nI0822 06:27:58.853431 32262 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0822 06:29:21.445930 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8032\nI0822 06:29:21.446282 32262 solver.cpp:404]     Test net output #1: loss = 1.0491 (* 1 = 1.0491 loss)\nI0822 06:29:22.777808 32262 solver.cpp:228] Iteration 35700, loss = 0.0264242\nI0822 06:29:22.777853 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:29:22.777869 32262 solver.cpp:244]     Train net output #1: loss = 0.0264243 (* 1 = 0.0264243 loss)\nI0822 06:29:22.859997 32262 sgd_solver.cpp:166] Iteration 35700, lr = 0.35\nI0822 06:31:41.681051 32262 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0822 06:33:03.987828 32262 solver.cpp:404]     Test net output #0: accuracy = 0.83376\nI0822 06:33:03.988164 32262 solver.cpp:404]     Test net output #1: loss = 0.805401 (* 1 = 0.805401 loss)\nI0822 06:33:05.319294 32262 solver.cpp:228] Iteration 35800, loss = 0.0354972\nI0822 06:33:05.319337 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:33:05.319353 32262 solver.cpp:244]     Train net output #1: loss = 0.0354973 (* 1 = 0.0354973 loss)\nI0822 06:33:05.397066 32262 sgd_solver.cpp:166] Iteration 35800, lr = 0.35\nI0822 06:35:24.217205 32262 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0822 06:36:46.587257 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78952\nI0822 06:36:46.587553 32262 solver.cpp:404]     Test net output #1: loss = 0.929883 (* 1 = 0.929883 loss)\nI0822 06:36:47.918715 32262 solver.cpp:228] Iteration 35900, loss = 0.044785\nI0822 06:36:47.918758 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 06:36:47.918776 32262 solver.cpp:244]     Train net output #1: loss = 0.044785 (* 1 = 0.044785 loss)\nI0822 06:36:47.994933 32262 sgd_solver.cpp:166] Iteration 35900, lr = 0.35\nI0822 06:39:06.778829 32262 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0822 06:40:29.297170 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75256\nI0822 06:40:29.297462 32262 solver.cpp:404]     Test net output #1: loss = 1.18034 (* 1 = 1.18034 loss)\nI0822 06:40:30.628842 32262 solver.cpp:228] Iteration 36000, loss = 0.0532424\nI0822 06:40:30.628887 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:40:30.628903 32262 solver.cpp:244]     Train net output #1: loss = 0.0532424 (* 1 = 0.0532424 loss)\nI0822 06:40:30.706622 32262 sgd_solver.cpp:166] Iteration 36000, lr = 0.35\nI0822 06:42:49.562778 32262 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0822 06:44:12.010998 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7382\nI0822 06:44:12.011299 32262 solver.cpp:404]     Test net output #1: loss = 1.25894 (* 1 = 1.25894 loss)\nI0822 06:44:13.342866 32262 solver.cpp:228] Iteration 36100, loss = 0.0440779\nI0822 06:44:13.342909 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 06:44:13.342926 32262 solver.cpp:244]     Train net output #1: loss = 0.044078 (* 1 = 0.044078 loss)\nI0822 06:44:13.423252 32262 sgd_solver.cpp:166] Iteration 36100, lr = 0.35\nI0822 06:46:32.305217 32262 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0822 06:47:54.742795 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7338\nI0822 06:47:54.743113 32262 solver.cpp:404]     Test net output #1: loss = 1.54561 (* 1 = 1.54561 loss)\nI0822 06:47:56.073657 32262 solver.cpp:228] Iteration 36200, loss = 0.0161527\nI0822 06:47:56.073700 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:47:56.073717 32262 solver.cpp:244]     Train net output #1: loss = 0.0161528 (* 1 = 0.0161528 loss)\nI0822 06:47:56.156239 32262 sgd_solver.cpp:166] Iteration 36200, lr = 0.35\nI0822 06:50:15.032493 32262 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0822 06:51:37.153450 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80148\nI0822 06:51:37.153726 32262 solver.cpp:404]     Test net output #1: loss = 0.870541 (* 1 = 0.870541 loss)\nI0822 06:51:38.484812 32262 solver.cpp:228] Iteration 36300, loss = 0.0268799\nI0822 06:51:38.484855 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 06:51:38.484871 32262 solver.cpp:244]     Train net output #1: loss = 0.02688 (* 1 = 0.02688 loss)\nI0822 06:51:38.566565 32262 sgd_solver.cpp:166] Iteration 36300, lr = 0.35\nI0822 06:53:57.440899 32262 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0822 06:55:19.723124 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76456\nI0822 06:55:19.723420 32262 solver.cpp:404]     Test net output #1: loss = 1.20058 (* 1 = 1.20058 loss)\nI0822 06:55:21.055160 32262 solver.cpp:228] Iteration 36400, loss = 0.00857713\nI0822 06:55:21.055203 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 06:55:21.055218 32262 solver.cpp:244]     Train net output #1: loss = 0.00857721 (* 1 = 0.00857721 loss)\nI0822 06:55:21.138176 32262 sgd_solver.cpp:166] Iteration 36400, lr = 0.35\nI0822 06:57:40.030701 32262 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0822 06:59:02.434690 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80468\nI0822 06:59:02.434968 32262 solver.cpp:404]     Test net output #1: loss = 0.91302 (* 1 = 0.91302 loss)\nI0822 06:59:03.766206 32262 solver.cpp:228] Iteration 36500, loss = 0.0567408\nI0822 06:59:03.766249 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 06:59:03.766265 32262 solver.cpp:244]     Train net output #1: loss = 0.0567409 (* 1 = 0.0567409 loss)\nI0822 06:59:03.843353 32262 sgd_solver.cpp:166] Iteration 36500, lr = 0.35\nI0822 07:01:22.671629 32262 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0822 07:02:45.147637 32262 solver.cpp:404]     Test net output #0: accuracy = 0.83032\nI0822 07:02:45.147979 32262 solver.cpp:404]     Test net output #1: loss = 0.747844 (* 1 = 0.747844 loss)\nI0822 07:02:46.479635 32262 solver.cpp:228] Iteration 36600, loss = 0.0670798\nI0822 07:02:46.479679 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:02:46.479696 32262 solver.cpp:244]     Train net output #1: loss = 0.0670798 (* 1 = 0.0670798 loss)\nI0822 07:02:46.559167 32262 sgd_solver.cpp:166] Iteration 36600, lr = 0.35\nI0822 07:05:05.409446 32262 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0822 07:06:27.914983 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70416\nI0822 07:06:27.915263 32262 solver.cpp:404]     Test net output #1: loss = 1.56879 (* 1 = 1.56879 loss)\nI0822 07:06:29.246572 32262 solver.cpp:228] Iteration 36700, loss = 0.0679374\nI0822 07:06:29.246614 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:06:29.246630 32262 solver.cpp:244]     Train net output #1: loss = 0.0679375 (* 1 = 0.0679375 loss)\nI0822 07:06:29.326696 32262 sgd_solver.cpp:166] Iteration 36700, lr = 0.35\nI0822 07:08:48.172341 32262 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0822 07:10:10.663764 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70292\nI0822 07:10:10.664090 32262 solver.cpp:404]     Test net output #1: loss = 1.46353 (* 1 = 1.46353 loss)\nI0822 07:10:11.995563 32262 solver.cpp:228] Iteration 36800, loss = 0.101897\nI0822 07:10:11.995606 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 07:10:11.995622 32262 solver.cpp:244]     Train net output #1: loss = 0.101897 (* 1 = 0.101897 loss)\nI0822 07:10:12.073549 32262 sgd_solver.cpp:166] Iteration 36800, lr = 0.35\nI0822 07:12:30.882210 32262 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0822 07:13:53.304533 32262 solver.cpp:404]     Test net output #0: accuracy = 0.69956\nI0822 07:13:53.304889 32262 solver.cpp:404]     Test net output #1: loss = 1.75074 (* 1 = 1.75074 loss)\nI0822 07:13:54.636368 32262 solver.cpp:228] Iteration 36900, loss = 0.0386987\nI0822 07:13:54.636411 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:13:54.636427 32262 solver.cpp:244]     Train net output #1: loss = 0.0386988 (* 1 = 0.0386988 loss)\nI0822 07:13:54.713474 32262 sgd_solver.cpp:166] Iteration 36900, lr = 0.35\nI0822 07:16:13.515725 32262 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0822 07:17:36.063827 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76904\nI0822 07:17:36.064187 32262 solver.cpp:404]     Test net output #1: loss = 0.99993 (* 1 = 0.99993 loss)\nI0822 07:17:37.394074 32262 solver.cpp:228] Iteration 37000, loss = 0.0445673\nI0822 07:17:37.394122 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:17:37.394145 32262 solver.cpp:244]     Train net output #1: loss = 0.0445674 (* 1 = 0.0445674 loss)\nI0822 07:17:37.478147 32262 sgd_solver.cpp:166] Iteration 37000, lr = 0.35\nI0822 07:19:56.240545 32262 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0822 07:21:18.788935 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75024\nI0822 07:21:18.789296 32262 solver.cpp:404]     Test net output #1: loss = 1.08714 (* 1 = 1.08714 loss)\nI0822 07:21:20.120102 32262 solver.cpp:228] Iteration 37100, loss = 0.0138858\nI0822 07:21:20.120148 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:21:20.120172 32262 solver.cpp:244]     Train net output #1: loss = 0.0138859 (* 1 = 0.0138859 loss)\nI0822 07:21:20.195927 32262 sgd_solver.cpp:166] Iteration 37100, lr = 0.35\nI0822 07:23:38.995229 32262 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0822 07:25:01.524327 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7894\nI0822 07:25:01.524652 32262 solver.cpp:404]     Test net output #1: loss = 0.910948 (* 1 = 0.910948 loss)\nI0822 07:25:02.855453 32262 solver.cpp:228] Iteration 37200, loss = 0.0404468\nI0822 07:25:02.855496 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:25:02.855511 32262 solver.cpp:244]     Train net output #1: loss = 0.040447 (* 1 = 0.040447 loss)\nI0822 07:25:02.934628 32262 sgd_solver.cpp:166] Iteration 37200, lr = 0.35\nI0822 07:27:21.861949 32262 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0822 07:28:44.381620 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82976\nI0822 07:28:44.381958 32262 solver.cpp:404]     Test net output #1: loss = 0.693479 (* 1 = 0.693479 loss)\nI0822 07:28:45.713683 32262 solver.cpp:228] Iteration 37300, loss = 0.0133162\nI0822 07:28:45.713726 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:28:45.713742 32262 solver.cpp:244]     Train net output #1: loss = 0.0133163 (* 1 = 0.0133163 loss)\nI0822 07:28:45.793805 32262 sgd_solver.cpp:166] Iteration 37300, lr = 0.35\nI0822 07:31:04.696400 32262 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0822 07:32:27.207195 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7218\nI0822 07:32:27.207540 32262 solver.cpp:404]     Test net output #1: loss = 1.28007 (* 1 = 1.28007 loss)\nI0822 07:32:28.538082 32262 solver.cpp:228] Iteration 37400, loss = 0.0167588\nI0822 07:32:28.538122 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:32:28.538138 32262 solver.cpp:244]     Train net output #1: loss = 0.0167589 (* 1 = 0.0167589 loss)\nI0822 07:32:28.617326 32262 sgd_solver.cpp:166] Iteration 37400, lr = 0.35\nI0822 07:34:47.472470 32262 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0822 07:36:09.977075 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80536\nI0822 07:36:09.977429 32262 solver.cpp:404]     Test net output #1: loss = 0.89068 (* 1 = 0.89068 loss)\nI0822 07:36:11.308434 32262 solver.cpp:228] Iteration 37500, loss = 0.0211107\nI0822 07:36:11.308476 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:36:11.308491 32262 solver.cpp:244]     Train net output #1: loss = 0.0211108 (* 1 = 0.0211108 loss)\nI0822 07:36:11.392087 32262 sgd_solver.cpp:166] Iteration 37500, lr = 0.35\nI0822 07:38:30.154989 32262 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0822 07:39:52.660150 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77436\nI0822 07:39:52.660501 32262 solver.cpp:404]     Test net output #1: loss = 0.990882 (* 1 = 0.990882 loss)\nI0822 07:39:53.991806 32262 solver.cpp:228] Iteration 37600, loss = 0.0826316\nI0822 07:39:53.991849 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:39:53.991865 32262 solver.cpp:244]     Train net output #1: loss = 0.0826316 (* 1 = 0.0826316 loss)\nI0822 07:39:54.069547 32262 sgd_solver.cpp:166] Iteration 37600, lr = 0.35\nI0822 07:42:12.950141 32262 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0822 07:43:35.459529 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7716\nI0822 07:43:35.459856 32262 solver.cpp:404]     Test net output #1: loss = 1.09596 (* 1 = 1.09596 loss)\nI0822 07:43:36.791335 32262 solver.cpp:228] Iteration 37700, loss = 0.0171604\nI0822 07:43:36.791378 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:43:36.791393 32262 solver.cpp:244]     Train net output #1: loss = 0.0171604 (* 1 = 0.0171604 loss)\nI0822 07:43:36.866051 32262 sgd_solver.cpp:166] Iteration 37700, lr = 0.35\nI0822 07:45:55.597146 32262 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0822 07:47:17.802400 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7556\nI0822 07:47:17.802700 32262 solver.cpp:404]     Test net output #1: loss = 1.35743 (* 1 = 1.35743 loss)\nI0822 07:47:19.130132 32262 solver.cpp:228] Iteration 37800, loss = 0.0250269\nI0822 07:47:19.130167 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 07:47:19.130182 32262 solver.cpp:244]     Train net output #1: loss = 0.025027 (* 1 = 0.025027 loss)\nI0822 07:47:19.213922 32262 sgd_solver.cpp:166] Iteration 37800, lr = 0.35\nI0822 07:49:38.031873 32262 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0822 07:51:00.548660 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76308\nI0822 07:51:00.548987 32262 solver.cpp:404]     Test net output #1: loss = 1.07145 (* 1 = 1.07145 loss)\nI0822 07:51:01.880748 32262 solver.cpp:228] Iteration 37900, loss = 0.0204679\nI0822 07:51:01.880790 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 07:51:01.880806 32262 solver.cpp:244]     Train net output #1: loss = 0.020468 (* 1 = 0.020468 loss)\nI0822 07:51:01.961364 32262 sgd_solver.cpp:166] Iteration 37900, lr = 0.35\nI0822 07:53:20.779899 32262 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0822 07:54:43.288621 32262 solver.cpp:404]     Test net output #0: accuracy = 0.62668\nI0822 07:54:43.288944 32262 solver.cpp:404]     Test net output #1: loss = 1.90779 (* 1 = 1.90779 loss)\nI0822 07:54:44.619241 32262 solver.cpp:228] Iteration 38000, loss = 0.0259504\nI0822 07:54:44.619279 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 07:54:44.619295 32262 solver.cpp:244]     Train net output #1: loss = 0.0259505 (* 1 = 0.0259505 loss)\nI0822 07:54:44.700626 32262 sgd_solver.cpp:166] Iteration 38000, lr = 0.35\nI0822 07:57:03.502358 32262 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0822 07:58:26.021873 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79964\nI0822 07:58:26.022218 32262 solver.cpp:404]     Test net output #1: loss = 1.00197 (* 1 = 1.00197 loss)\nI0822 07:58:27.352336 32262 solver.cpp:228] Iteration 38100, loss = 0.0522122\nI0822 07:58:27.352377 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 07:58:27.352392 32262 solver.cpp:244]     Train net output #1: loss = 0.0522123 (* 1 = 0.0522123 loss)\nI0822 07:58:27.434648 32262 sgd_solver.cpp:166] Iteration 38100, lr = 0.35\nI0822 08:00:46.218040 32262 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0822 08:02:08.733892 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79396\nI0822 08:02:08.734232 32262 solver.cpp:404]     Test net output #1: loss = 0.962567 (* 1 = 0.962567 loss)\nI0822 08:02:10.063989 32262 solver.cpp:228] Iteration 38200, loss = 0.0351252\nI0822 08:02:10.064028 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:02:10.064044 32262 solver.cpp:244]     Train net output #1: loss = 0.0351253 (* 1 = 0.0351253 loss)\nI0822 08:02:10.140214 32262 sgd_solver.cpp:166] Iteration 38200, lr = 0.35\nI0822 08:04:28.960332 32262 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0822 08:05:51.497074 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76004\nI0822 08:05:51.497431 32262 solver.cpp:404]     Test net output #1: loss = 1.16427 (* 1 = 1.16427 loss)\nI0822 08:05:52.827445 32262 solver.cpp:228] Iteration 38300, loss = 0.00768813\nI0822 08:05:52.827484 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:05:52.827499 32262 solver.cpp:244]     Train net output #1: loss = 0.00768823 (* 1 = 0.00768823 loss)\nI0822 08:05:52.911532 32262 sgd_solver.cpp:166] Iteration 38300, lr = 0.35\nI0822 08:08:11.791345 32262 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0822 08:09:34.320279 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80116\nI0822 08:09:34.320632 32262 solver.cpp:404]     Test net output #1: loss = 0.99061 (* 1 = 0.99061 loss)\nI0822 08:09:35.650317 32262 solver.cpp:228] Iteration 38400, loss = 0.0469785\nI0822 08:09:35.650353 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:09:35.650368 32262 solver.cpp:244]     Train net output #1: loss = 0.0469786 (* 1 = 0.0469786 loss)\nI0822 08:09:35.731503 32262 sgd_solver.cpp:166] Iteration 38400, lr = 0.35\nI0822 08:11:54.566920 32262 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0822 08:13:17.073887 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8178\nI0822 08:13:17.074239 32262 solver.cpp:404]     Test net output #1: loss = 0.746791 (* 1 = 0.746791 loss)\nI0822 08:13:18.404065 32262 solver.cpp:228] Iteration 38500, loss = 0.0472213\nI0822 08:13:18.404103 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:13:18.404119 32262 solver.cpp:244]     Train net output #1: loss = 0.0472214 (* 1 = 0.0472214 loss)\nI0822 08:13:18.485481 32262 sgd_solver.cpp:166] Iteration 38500, lr = 0.35\nI0822 08:15:37.393755 32262 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0822 08:16:59.921958 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77468\nI0822 08:16:59.922317 32262 solver.cpp:404]     Test net output #1: loss = 1.12077 (* 1 = 1.12077 loss)\nI0822 08:17:01.252332 32262 solver.cpp:228] Iteration 38600, loss = 0.0170329\nI0822 08:17:01.252368 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:17:01.252383 32262 solver.cpp:244]     Train net output #1: loss = 0.017033 (* 1 = 0.017033 loss)\nI0822 08:17:01.332353 32262 sgd_solver.cpp:166] Iteration 38600, lr = 0.35\nI0822 08:19:20.095029 32262 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0822 08:20:42.643086 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81392\nI0822 08:20:42.643409 32262 solver.cpp:404]     Test net output #1: loss = 0.809124 (* 1 = 0.809124 loss)\nI0822 08:20:43.973521 32262 solver.cpp:228] Iteration 38700, loss = 0.0364103\nI0822 08:20:43.973561 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 08:20:43.973577 32262 solver.cpp:244]     Train net output #1: loss = 0.0364105 (* 1 = 0.0364105 loss)\nI0822 08:20:44.048030 32262 sgd_solver.cpp:166] Iteration 38700, lr = 0.35\nI0822 08:23:02.763897 32262 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0822 08:24:25.346076 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70068\nI0822 08:24:25.346408 32262 solver.cpp:404]     Test net output #1: loss = 1.51784 (* 1 = 1.51784 loss)\nI0822 08:24:26.678098 32262 solver.cpp:228] Iteration 38800, loss = 0.0466716\nI0822 08:24:26.678139 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:24:26.678162 32262 solver.cpp:244]     Train net output #1: loss = 0.0466717 (* 1 = 0.0466717 loss)\nI0822 08:24:26.757982 32262 sgd_solver.cpp:166] Iteration 38800, lr = 0.35\nI0822 08:26:45.648391 32262 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0822 08:28:08.195356 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7904\nI0822 08:28:08.195688 32262 solver.cpp:404]     Test net output #1: loss = 0.983257 (* 1 = 0.983257 loss)\nI0822 08:28:09.527195 32262 solver.cpp:228] Iteration 38900, loss = 0.0206799\nI0822 08:28:09.527236 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:28:09.527261 32262 solver.cpp:244]     Train net output #1: loss = 0.02068 (* 1 = 0.02068 loss)\nI0822 08:28:09.609174 32262 sgd_solver.cpp:166] Iteration 38900, lr = 0.35\nI0822 08:30:28.504269 32262 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0822 08:31:51.057770 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82484\nI0822 08:31:51.058125 32262 solver.cpp:404]     Test net output #1: loss = 0.776835 (* 1 = 0.776835 loss)\nI0822 08:31:52.389705 32262 solver.cpp:228] Iteration 39000, loss = 0.084826\nI0822 08:31:52.389749 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 08:31:52.389772 32262 solver.cpp:244]     Train net output #1: loss = 0.0848261 (* 1 = 0.0848261 loss)\nI0822 08:31:52.467381 32262 sgd_solver.cpp:166] Iteration 39000, lr = 0.35\nI0822 08:34:11.284142 32262 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0822 08:35:33.834663 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7832\nI0822 08:35:33.835023 32262 solver.cpp:404]     Test net output #1: loss = 1.08533 (* 1 = 1.08533 loss)\nI0822 08:35:35.166726 32262 solver.cpp:228] Iteration 39100, loss = 0.0229057\nI0822 08:35:35.166771 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:35:35.166793 32262 solver.cpp:244]     Train net output #1: loss = 0.0229058 (* 1 = 0.0229058 loss)\nI0822 08:35:35.241538 32262 sgd_solver.cpp:166] Iteration 39100, lr = 0.35\nI0822 08:37:54.011641 32262 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0822 08:39:16.529027 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76048\nI0822 08:39:16.529479 32262 solver.cpp:404]     Test net output #1: loss = 1.06313 (* 1 = 1.06313 loss)\nI0822 08:39:17.859766 32262 solver.cpp:228] Iteration 39200, loss = 0.0273527\nI0822 08:39:17.859807 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 08:39:17.859822 32262 solver.cpp:244]     Train net output #1: loss = 0.0273528 (* 1 = 0.0273528 loss)\nI0822 08:39:17.938020 32262 sgd_solver.cpp:166] Iteration 39200, lr = 0.35\nI0822 08:41:36.721508 32262 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0822 08:42:59.235261 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79652\nI0822 08:42:59.235605 32262 solver.cpp:404]     Test net output #1: loss = 0.813982 (* 1 = 0.813982 loss)\nI0822 08:43:00.565891 32262 solver.cpp:228] Iteration 39300, loss = 0.0064686\nI0822 08:43:00.565929 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:43:00.565944 32262 solver.cpp:244]     Train net output #1: loss = 0.0064687 (* 1 = 0.0064687 loss)\nI0822 08:43:00.647840 32262 sgd_solver.cpp:166] Iteration 39300, lr = 0.35\nI0822 08:45:19.548863 32262 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0822 08:46:42.074100 32262 solver.cpp:404]     Test net output #0: accuracy = 0.65324\nI0822 08:46:42.074450 32262 solver.cpp:404]     Test net output #1: loss = 2.18682 (* 1 = 2.18682 loss)\nI0822 08:46:43.404316 32262 solver.cpp:228] Iteration 39400, loss = 0.0379781\nI0822 08:46:43.404358 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 08:46:43.404373 32262 solver.cpp:244]     Train net output #1: loss = 0.0379782 (* 1 = 0.0379782 loss)\nI0822 08:46:43.488314 32262 sgd_solver.cpp:166] Iteration 39400, lr = 0.35\nI0822 08:49:02.383702 32262 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0822 08:50:24.909545 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76864\nI0822 08:50:24.909860 32262 solver.cpp:404]     Test net output #1: loss = 1.00355 (* 1 = 1.00355 loss)\nI0822 08:50:26.240458 32262 solver.cpp:228] Iteration 39500, loss = 0.00707548\nI0822 08:50:26.240499 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 08:50:26.240514 32262 solver.cpp:244]     Train net output #1: loss = 0.00707559 (* 1 = 0.00707559 loss)\nI0822 08:50:26.320760 32262 sgd_solver.cpp:166] Iteration 39500, lr = 0.35\nI0822 08:52:45.111193 32262 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0822 08:54:07.630956 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79924\nI0822 08:54:07.631309 32262 solver.cpp:404]     Test net output #1: loss = 0.933014 (* 1 = 0.933014 loss)\nI0822 08:54:08.961297 32262 solver.cpp:228] Iteration 39600, loss = 0.0397208\nI0822 08:54:08.961334 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 08:54:08.961349 32262 solver.cpp:244]     Train net output #1: loss = 0.0397209 (* 1 = 0.0397209 loss)\nI0822 08:54:09.042433 32262 sgd_solver.cpp:166] Iteration 39600, lr = 0.35\nI0822 08:56:27.731772 32262 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0822 08:57:50.255112 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77128\nI0822 08:57:50.255461 32262 solver.cpp:404]     Test net output #1: loss = 1.03791 (* 1 = 1.03791 loss)\nI0822 08:57:51.585732 32262 solver.cpp:228] Iteration 39700, loss = 0.0310564\nI0822 08:57:51.585774 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 08:57:51.585790 32262 solver.cpp:244]     Train net output #1: loss = 0.0310564 (* 1 = 0.0310564 loss)\nI0822 08:57:51.669888 32262 sgd_solver.cpp:166] Iteration 39700, lr = 0.35\nI0822 09:00:10.482699 32262 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0822 09:01:32.991832 32262 solver.cpp:404]     Test net output #0: accuracy = 0.744\nI0822 09:01:32.992136 32262 solver.cpp:404]     Test net output #1: loss = 1.11685 (* 1 = 1.11685 loss)\nI0822 09:01:34.322314 32262 solver.cpp:228] Iteration 39800, loss = 0.0231966\nI0822 09:01:34.322353 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:01:34.322368 32262 solver.cpp:244]     Train net output #1: loss = 0.0231966 (* 1 = 0.0231966 loss)\nI0822 09:01:34.404146 32262 sgd_solver.cpp:166] Iteration 39800, lr = 0.35\nI0822 09:03:53.158746 32262 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0822 09:05:15.680315 32262 solver.cpp:404]     Test net output #0: accuracy = 0.61376\nI0822 09:05:15.680661 32262 solver.cpp:404]     Test net output #1: loss = 2.01604 (* 1 = 2.01604 loss)\nI0822 09:05:17.010799 32262 solver.cpp:228] Iteration 39900, loss = 0.0769762\nI0822 09:05:17.010838 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:05:17.010854 32262 solver.cpp:244]     Train net output #1: loss = 0.0769763 (* 1 = 0.0769763 loss)\nI0822 09:05:17.087399 32262 sgd_solver.cpp:166] Iteration 39900, lr = 0.35\nI0822 09:07:35.787211 32262 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0822 09:08:58.305940 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73888\nI0822 09:08:58.306272 32262 solver.cpp:404]     Test net output #1: loss = 1.26901 (* 1 = 1.26901 loss)\nI0822 09:08:59.636306 32262 solver.cpp:228] Iteration 40000, loss = 0.0337346\nI0822 09:08:59.636346 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:08:59.636363 32262 solver.cpp:244]     Train net output #1: loss = 0.0337347 (* 1 = 0.0337347 loss)\nI0822 09:08:59.720769 32262 sgd_solver.cpp:166] Iteration 40000, lr = 0.35\nI0822 09:11:18.490523 32262 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0822 09:12:40.992780 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78792\nI0822 09:12:40.993162 32262 solver.cpp:404]     Test net output #1: loss = 0.897986 (* 1 = 0.897986 loss)\nI0822 09:12:42.323137 32262 solver.cpp:228] Iteration 40100, loss = 0.0174976\nI0822 09:12:42.323177 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:12:42.323194 32262 solver.cpp:244]     Train net output #1: loss = 0.0174977 (* 1 = 0.0174977 loss)\nI0822 09:12:42.403430 32262 sgd_solver.cpp:166] Iteration 40100, lr = 0.35\nI0822 09:15:01.164867 32262 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0822 09:16:23.315367 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76508\nI0822 09:16:23.315691 32262 solver.cpp:404]     Test net output #1: loss = 1.13882 (* 1 = 1.13882 loss)\nI0822 09:16:24.645784 32262 solver.cpp:228] Iteration 40200, loss = 0.00715588\nI0822 09:16:24.645830 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:16:24.645846 32262 solver.cpp:244]     Train net output #1: loss = 0.00715595 (* 1 = 0.00715595 loss)\nI0822 09:16:24.725355 32262 sgd_solver.cpp:166] Iteration 40200, lr = 0.35\nI0822 09:18:43.547104 32262 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0822 09:20:06.069193 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76828\nI0822 09:20:06.069502 32262 solver.cpp:404]     Test net output #1: loss = 1.10821 (* 1 = 1.10821 loss)\nI0822 09:20:07.400050 32262 solver.cpp:228] Iteration 40300, loss = 0.0299512\nI0822 09:20:07.400094 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:20:07.400110 32262 solver.cpp:244]     Train net output #1: loss = 0.0299513 (* 1 = 0.0299513 loss)\nI0822 09:20:07.477141 32262 sgd_solver.cpp:166] Iteration 40300, lr = 0.35\nI0822 09:22:26.351804 32262 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0822 09:23:48.693681 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77204\nI0822 09:23:48.693990 32262 solver.cpp:404]     Test net output #1: loss = 1.1069 (* 1 = 1.1069 loss)\nI0822 09:23:50.025779 32262 solver.cpp:228] Iteration 40400, loss = 0.011426\nI0822 09:23:50.025825 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:23:50.025847 32262 solver.cpp:244]     Train net output #1: loss = 0.0114261 (* 1 = 0.0114261 loss)\nI0822 09:23:50.107635 32262 sgd_solver.cpp:166] Iteration 40400, lr = 0.35\nI0822 09:26:08.937976 32262 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0822 09:27:31.285894 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7824\nI0822 09:27:31.286190 32262 solver.cpp:404]     Test net output #1: loss = 0.903766 (* 1 = 0.903766 loss)\nI0822 09:27:32.616808 32262 solver.cpp:228] Iteration 40500, loss = 0.0269009\nI0822 09:27:32.616856 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:27:32.616879 32262 solver.cpp:244]     Train net output #1: loss = 0.026901 (* 1 = 0.026901 loss)\nI0822 09:27:32.699300 32262 sgd_solver.cpp:166] Iteration 40500, lr = 0.35\nI0822 09:29:51.525920 32262 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0822 09:31:13.858593 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7828\nI0822 09:31:13.858919 32262 solver.cpp:404]     Test net output #1: loss = 1.04383 (* 1 = 1.04383 loss)\nI0822 09:31:15.190901 32262 solver.cpp:228] Iteration 40600, loss = 0.0446141\nI0822 09:31:15.190948 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 09:31:15.190971 32262 solver.cpp:244]     Train net output #1: loss = 0.0446141 (* 1 = 0.0446141 loss)\nI0822 09:31:15.265282 32262 sgd_solver.cpp:166] Iteration 40600, lr = 0.35\nI0822 09:33:34.123395 32262 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0822 09:34:56.468585 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82124\nI0822 09:34:56.468890 32262 solver.cpp:404]     Test net output #1: loss = 0.757669 (* 1 = 0.757669 loss)\nI0822 09:34:57.799527 32262 solver.cpp:228] Iteration 40700, loss = 0.0265711\nI0822 09:34:57.799574 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:34:57.799597 32262 solver.cpp:244]     Train net output #1: loss = 0.0265712 (* 1 = 0.0265712 loss)\nI0822 09:34:57.877790 32262 sgd_solver.cpp:166] Iteration 40700, lr = 0.35\nI0822 09:37:16.792868 32262 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0822 09:38:39.294045 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75748\nI0822 09:38:39.294353 32262 solver.cpp:404]     Test net output #1: loss = 1.21428 (* 1 = 1.21428 loss)\nI0822 09:38:40.626519 32262 solver.cpp:228] Iteration 40800, loss = 0.0794435\nI0822 09:38:40.626566 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 09:38:40.626590 32262 solver.cpp:244]     Train net output #1: loss = 0.0794436 (* 1 = 0.0794436 loss)\nI0822 09:38:40.707722 32262 sgd_solver.cpp:166] Iteration 40800, lr = 0.35\nI0822 09:40:59.546959 32262 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0822 09:42:22.054716 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77256\nI0822 09:42:22.055044 32262 solver.cpp:404]     Test net output #1: loss = 1.26956 (* 1 = 1.26956 loss)\nI0822 09:42:23.386806 32262 solver.cpp:228] Iteration 40900, loss = 0.032335\nI0822 09:42:23.386853 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:42:23.386876 32262 solver.cpp:244]     Train net output #1: loss = 0.032335 (* 1 = 0.032335 loss)\nI0822 09:42:23.470702 32262 sgd_solver.cpp:166] Iteration 40900, lr = 0.35\nI0822 09:44:42.305127 32262 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0822 09:46:04.827165 32262 solver.cpp:404]     Test net output #0: accuracy = 0.83592\nI0822 09:46:04.827474 32262 solver.cpp:404]     Test net output #1: loss = 0.687911 (* 1 = 0.687911 loss)\nI0822 09:46:06.159306 32262 solver.cpp:228] Iteration 41000, loss = 0.0452824\nI0822 09:46:06.159353 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 09:46:06.159376 32262 solver.cpp:244]     Train net output #1: loss = 0.0452825 (* 1 = 0.0452825 loss)\nI0822 09:46:06.245606 32262 sgd_solver.cpp:166] Iteration 41000, lr = 0.35\nI0822 09:48:25.067780 32262 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0822 09:49:47.599334 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80352\nI0822 09:49:47.599668 32262 solver.cpp:404]     Test net output #1: loss = 0.83105 (* 1 = 0.83105 loss)\nI0822 09:49:48.930022 32262 solver.cpp:228] Iteration 41100, loss = 0.00625827\nI0822 09:49:48.930078 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 09:49:48.930104 32262 solver.cpp:244]     Train net output #1: loss = 0.00625832 (* 1 = 0.00625832 loss)\nI0822 09:49:49.014209 32262 sgd_solver.cpp:166] Iteration 41100, lr = 0.35\nI0822 09:52:07.898361 32262 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0822 09:53:30.417848 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79608\nI0822 09:53:30.418210 32262 solver.cpp:404]     Test net output #1: loss = 0.888345 (* 1 = 0.888345 loss)\nI0822 09:53:31.749341 32262 solver.cpp:228] Iteration 41200, loss = 0.00768232\nI0822 09:53:31.749389 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:53:31.749413 32262 solver.cpp:244]     Train net output #1: loss = 0.00768237 (* 1 = 0.00768237 loss)\nI0822 09:53:31.826843 32262 sgd_solver.cpp:166] Iteration 41200, lr = 0.35\nI0822 09:55:50.657080 32262 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0822 09:57:13.164237 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78436\nI0822 09:57:13.164566 32262 solver.cpp:404]     Test net output #1: loss = 0.950053 (* 1 = 0.950053 loss)\nI0822 09:57:14.496783 32262 solver.cpp:228] Iteration 41300, loss = 0.0548365\nI0822 09:57:14.496829 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 09:57:14.496851 32262 solver.cpp:244]     Train net output #1: loss = 0.0548365 (* 1 = 0.0548365 loss)\nI0822 09:57:14.575366 32262 sgd_solver.cpp:166] Iteration 41300, lr = 0.35\nI0822 09:59:33.387668 32262 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0822 10:00:55.899982 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75712\nI0822 10:00:55.900292 32262 solver.cpp:404]     Test net output #1: loss = 1.10047 (* 1 = 1.10047 loss)\nI0822 10:00:57.231261 32262 solver.cpp:228] Iteration 41400, loss = 0.0080888\nI0822 10:00:57.231302 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:00:57.231318 32262 solver.cpp:244]     Train net output #1: loss = 0.00808885 (* 1 = 0.00808885 loss)\nI0822 10:00:57.311604 32262 sgd_solver.cpp:166] Iteration 41400, lr = 0.35\nI0822 10:03:16.184605 32262 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0822 10:04:38.694970 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75016\nI0822 10:04:38.695305 32262 solver.cpp:404]     Test net output #1: loss = 0.969273 (* 1 = 0.969273 loss)\nI0822 10:04:40.025625 32262 solver.cpp:228] Iteration 41500, loss = 0.0749471\nI0822 10:04:40.025668 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 10:04:40.025684 32262 solver.cpp:244]     Train net output #1: loss = 0.0749472 (* 1 = 0.0749472 loss)\nI0822 10:04:40.106341 32262 sgd_solver.cpp:166] Iteration 41500, lr = 0.35\nI0822 10:06:58.921038 32262 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0822 10:08:21.413012 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82396\nI0822 10:08:21.413377 32262 solver.cpp:404]     Test net output #1: loss = 0.747214 (* 1 = 0.747214 loss)\nI0822 10:08:22.743870 32262 solver.cpp:228] Iteration 41600, loss = 0.0423247\nI0822 10:08:22.743909 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:08:22.743924 32262 solver.cpp:244]     Train net output #1: loss = 0.0423248 (* 1 = 0.0423248 loss)\nI0822 10:08:22.826501 32262 sgd_solver.cpp:166] Iteration 41600, lr = 0.35\nI0822 10:10:41.697814 32262 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0822 10:12:04.213271 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75944\nI0822 10:12:04.213604 32262 solver.cpp:404]     Test net output #1: loss = 1.1436 (* 1 = 1.1436 loss)\nI0822 10:12:05.544819 32262 solver.cpp:228] Iteration 41700, loss = 0.050032\nI0822 10:12:05.544857 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:12:05.544873 32262 solver.cpp:244]     Train net output #1: loss = 0.050032 (* 1 = 0.050032 loss)\nI0822 10:12:05.628429 32262 sgd_solver.cpp:166] Iteration 41700, lr = 0.35\nI0822 10:14:24.469925 32262 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0822 10:15:46.980720 32262 solver.cpp:404]     Test net output #0: accuracy = 0.83168\nI0822 10:15:46.981108 32262 solver.cpp:404]     Test net output #1: loss = 0.77382 (* 1 = 0.77382 loss)\nI0822 10:15:48.310865 32262 solver.cpp:228] Iteration 41800, loss = 0.0374848\nI0822 10:15:48.310909 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:15:48.310925 32262 solver.cpp:244]     Train net output #1: loss = 0.0374849 (* 1 = 0.0374849 loss)\nI0822 10:15:48.389350 32262 sgd_solver.cpp:166] Iteration 41800, lr = 0.35\nI0822 10:18:07.300853 32262 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0822 10:19:29.823734 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78928\nI0822 10:19:29.824071 32262 solver.cpp:404]     Test net output #1: loss = 0.934297 (* 1 = 0.934297 loss)\nI0822 10:19:31.154060 32262 solver.cpp:228] Iteration 41900, loss = 0.0485061\nI0822 10:19:31.154099 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:19:31.154115 32262 solver.cpp:244]     Train net output #1: loss = 0.0485062 (* 1 = 0.0485062 loss)\nI0822 10:19:31.235883 32262 sgd_solver.cpp:166] Iteration 41900, lr = 0.35\nI0822 10:21:50.111634 32262 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0822 10:23:12.656208 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81424\nI0822 10:23:12.656517 32262 solver.cpp:404]     Test net output #1: loss = 0.876478 (* 1 = 0.876478 loss)\nI0822 10:23:13.987663 32262 solver.cpp:228] Iteration 42000, loss = 0.0111258\nI0822 10:23:13.987704 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:23:13.987720 32262 solver.cpp:244]     Train net output #1: loss = 0.0111259 (* 1 = 0.0111259 loss)\nI0822 10:23:14.065100 32262 sgd_solver.cpp:166] Iteration 42000, lr = 0.35\nI0822 10:25:32.799034 32262 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0822 10:26:55.351773 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80612\nI0822 10:26:55.352110 32262 solver.cpp:404]     Test net output #1: loss = 0.793807 (* 1 = 0.793807 loss)\nI0822 10:26:56.681951 32262 solver.cpp:228] Iteration 42100, loss = 0.0078422\nI0822 10:26:56.681995 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:26:56.682009 32262 solver.cpp:244]     Train net output #1: loss = 0.00784224 (* 1 = 0.00784224 loss)\nI0822 10:26:56.761773 32262 sgd_solver.cpp:166] Iteration 42100, lr = 0.35\nI0822 10:29:15.510990 32262 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0822 10:30:38.065593 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7648\nI0822 10:30:38.065912 32262 solver.cpp:404]     Test net output #1: loss = 1.10388 (* 1 = 1.10388 loss)\nI0822 10:30:39.395956 32262 solver.cpp:228] Iteration 42200, loss = 0.0289792\nI0822 10:30:39.395995 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:30:39.396010 32262 solver.cpp:244]     Train net output #1: loss = 0.0289792 (* 1 = 0.0289792 loss)\nI0822 10:30:39.478106 32262 sgd_solver.cpp:166] Iteration 42200, lr = 0.35\nI0822 10:32:58.280467 32262 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0822 10:34:20.827641 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7812\nI0822 10:34:20.827970 32262 solver.cpp:404]     Test net output #1: loss = 0.970753 (* 1 = 0.970753 loss)\nI0822 10:34:22.157165 32262 solver.cpp:228] Iteration 42300, loss = 0.0747082\nI0822 10:34:22.157212 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:34:22.157228 32262 solver.cpp:244]     Train net output #1: loss = 0.0747082 (* 1 = 0.0747082 loss)\nI0822 10:34:22.240463 32262 sgd_solver.cpp:166] Iteration 42300, lr = 0.35\nI0822 10:36:41.016324 32262 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0822 10:38:03.598505 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74728\nI0822 10:38:03.598832 32262 solver.cpp:404]     Test net output #1: loss = 1.27386 (* 1 = 1.27386 loss)\nI0822 10:38:04.929111 32262 solver.cpp:228] Iteration 42400, loss = 0.0550108\nI0822 10:38:04.929149 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 10:38:04.929164 32262 solver.cpp:244]     Train net output #1: loss = 0.0550108 (* 1 = 0.0550108 loss)\nI0822 10:38:05.010164 32262 sgd_solver.cpp:166] Iteration 42400, lr = 0.35\nI0822 10:40:23.873809 32262 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0822 10:41:46.451329 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7784\nI0822 10:41:46.451691 32262 solver.cpp:404]     Test net output #1: loss = 1.29053 (* 1 = 1.29053 loss)\nI0822 10:41:47.781988 32262 solver.cpp:228] Iteration 42500, loss = 0.0882679\nI0822 10:41:47.782032 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 10:41:47.782053 32262 solver.cpp:244]     Train net output #1: loss = 0.088268 (* 1 = 0.088268 loss)\nI0822 10:41:47.864676 32262 sgd_solver.cpp:166] Iteration 42500, lr = 0.35\nI0822 10:44:06.693300 32262 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0822 10:45:29.254997 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73448\nI0822 10:45:29.255313 32262 solver.cpp:404]     Test net output #1: loss = 1.36911 (* 1 = 1.36911 loss)\nI0822 10:45:30.585177 32262 solver.cpp:228] Iteration 42600, loss = 0.00134078\nI0822 10:45:30.585217 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:45:30.585232 32262 solver.cpp:244]     Train net output #1: loss = 0.00134082 (* 1 = 0.00134082 loss)\nI0822 10:45:30.668256 32262 sgd_solver.cpp:166] Iteration 42600, lr = 0.35\nI0822 10:47:49.483639 32262 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0822 10:49:12.027941 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82716\nI0822 10:49:12.028273 32262 solver.cpp:404]     Test net output #1: loss = 0.796664 (* 1 = 0.796664 loss)\nI0822 10:49:13.359045 32262 solver.cpp:228] Iteration 42700, loss = 0.0294415\nI0822 10:49:13.359086 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:49:13.359102 32262 solver.cpp:244]     Train net output #1: loss = 0.0294416 (* 1 = 0.0294416 loss)\nI0822 10:49:13.437674 32262 sgd_solver.cpp:166] Iteration 42700, lr = 0.35\nI0822 10:51:32.240221 32262 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0822 10:52:54.759491 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73828\nI0822 10:52:54.759804 32262 solver.cpp:404]     Test net output #1: loss = 1.30392 (* 1 = 1.30392 loss)\nI0822 10:52:56.091434 32262 solver.cpp:228] Iteration 42800, loss = 0.0128523\nI0822 10:52:56.091475 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 10:52:56.091490 32262 solver.cpp:244]     Train net output #1: loss = 0.0128524 (* 1 = 0.0128524 loss)\nI0822 10:52:56.170963 32262 sgd_solver.cpp:166] Iteration 42800, lr = 0.35\nI0822 10:55:15.014487 32262 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0822 10:56:37.481304 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75592\nI0822 10:56:37.481555 32262 solver.cpp:404]     Test net output #1: loss = 1.1533 (* 1 = 1.1533 loss)\nI0822 10:56:38.811954 32262 solver.cpp:228] Iteration 42900, loss = 0.00965514\nI0822 10:56:38.811997 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 10:56:38.812012 32262 solver.cpp:244]     Train net output #1: loss = 0.00965519 (* 1 = 0.00965519 loss)\nI0822 10:56:38.895237 32262 sgd_solver.cpp:166] Iteration 42900, lr = 0.35\nI0822 10:58:57.730727 32262 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0822 11:00:20.197470 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70212\nI0822 11:00:20.197734 32262 solver.cpp:404]     Test net output #1: loss = 1.28428 (* 1 = 1.28428 loss)\nI0822 11:00:21.528177 32262 solver.cpp:228] Iteration 43000, loss = 0.0853391\nI0822 11:00:21.528220 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 11:00:21.528236 32262 solver.cpp:244]     Train net output #1: loss = 0.0853391 (* 1 = 0.0853391 loss)\nI0822 11:00:21.603219 32262 sgd_solver.cpp:166] Iteration 43000, lr = 0.35\nI0822 11:02:40.404000 32262 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0822 11:04:02.884464 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81592\nI0822 11:04:02.884872 32262 solver.cpp:404]     Test net output #1: loss = 0.855975 (* 1 = 0.855975 loss)\nI0822 11:04:04.216395 32262 solver.cpp:228] Iteration 43100, loss = 0.0820471\nI0822 11:04:04.216437 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 11:04:04.216452 32262 solver.cpp:244]     Train net output #1: loss = 0.0820471 (* 1 = 0.0820471 loss)\nI0822 11:04:04.295125 32262 sgd_solver.cpp:166] Iteration 43100, lr = 0.35\nI0822 11:06:23.077272 32262 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0822 11:07:45.580021 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77652\nI0822 11:07:45.580304 32262 solver.cpp:404]     Test net output #1: loss = 1.10261 (* 1 = 1.10261 loss)\nI0822 11:07:46.910352 32262 solver.cpp:228] Iteration 43200, loss = 0.0471253\nI0822 11:07:46.910394 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:07:46.910410 32262 solver.cpp:244]     Train net output #1: loss = 0.0471253 (* 1 = 0.0471253 loss)\nI0822 11:07:46.991914 32262 sgd_solver.cpp:166] Iteration 43200, lr = 0.35\nI0822 11:10:05.780311 32262 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0822 11:11:28.284406 32262 solver.cpp:404]     Test net output #0: accuracy = 0.70664\nI0822 11:11:28.284685 32262 solver.cpp:404]     Test net output #1: loss = 1.76696 (* 1 = 1.76696 loss)\nI0822 11:11:29.614853 32262 solver.cpp:228] Iteration 43300, loss = 0.0427566\nI0822 11:11:29.614897 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:11:29.614912 32262 solver.cpp:244]     Train net output #1: loss = 0.0427566 (* 1 = 0.0427566 loss)\nI0822 11:11:29.692072 32262 sgd_solver.cpp:166] Iteration 43300, lr = 0.35\nI0822 11:13:48.482318 32262 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0822 11:15:11.010430 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7412\nI0822 11:15:11.010674 32262 solver.cpp:404]     Test net output #1: loss = 1.25331 (* 1 = 1.25331 loss)\nI0822 11:15:12.340788 32262 solver.cpp:228] Iteration 43400, loss = 0.0279375\nI0822 11:15:12.340831 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:15:12.340847 32262 solver.cpp:244]     Train net output #1: loss = 0.0279376 (* 1 = 0.0279376 loss)\nI0822 11:15:12.418464 32262 sgd_solver.cpp:166] Iteration 43400, lr = 0.35\nI0822 11:17:31.231675 32262 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0822 11:18:53.773494 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81272\nI0822 11:18:53.773736 32262 solver.cpp:404]     Test net output #1: loss = 0.827324 (* 1 = 0.827324 loss)\nI0822 11:18:55.104307 32262 solver.cpp:228] Iteration 43500, loss = 0.0570628\nI0822 11:18:55.104348 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:18:55.104363 32262 solver.cpp:244]     Train net output #1: loss = 0.0570628 (* 1 = 0.0570628 loss)\nI0822 11:18:55.180094 32262 sgd_solver.cpp:166] Iteration 43500, lr = 0.35\nI0822 11:21:13.964606 32262 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0822 11:22:36.501976 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79684\nI0822 11:22:36.502271 32262 solver.cpp:404]     Test net output #1: loss = 0.962475 (* 1 = 0.962475 loss)\nI0822 11:22:37.832720 32262 solver.cpp:228] Iteration 43600, loss = 0.0438998\nI0822 11:22:37.832764 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 11:22:37.832779 32262 solver.cpp:244]     Train net output #1: loss = 0.0438999 (* 1 = 0.0438999 loss)\nI0822 11:22:37.911734 32262 sgd_solver.cpp:166] Iteration 43600, lr = 0.35\nI0822 11:24:56.661590 32262 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0822 11:26:19.191665 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80456\nI0822 11:26:19.191937 32262 solver.cpp:404]     Test net output #1: loss = 0.838403 (* 1 = 0.838403 loss)\nI0822 11:26:20.522581 32262 solver.cpp:228] Iteration 43700, loss = 0.00356645\nI0822 11:26:20.522624 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:26:20.522639 32262 solver.cpp:244]     Train net output #1: loss = 0.00356654 (* 1 = 0.00356654 loss)\nI0822 11:26:20.605098 32262 sgd_solver.cpp:166] Iteration 43700, lr = 0.35\nI0822 11:28:39.460985 32262 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0822 11:30:01.997429 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75572\nI0822 11:30:01.997680 32262 solver.cpp:404]     Test net output #1: loss = 1.2324 (* 1 = 1.2324 loss)\nI0822 11:30:03.328711 32262 solver.cpp:228] Iteration 43800, loss = 0.0403618\nI0822 11:30:03.328753 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:30:03.328769 32262 solver.cpp:244]     Train net output #1: loss = 0.0403619 (* 1 = 0.0403619 loss)\nI0822 11:30:03.408092 32262 sgd_solver.cpp:166] Iteration 43800, lr = 0.35\nI0822 11:32:22.211612 32262 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0822 11:33:44.730933 32262 solver.cpp:404]     Test net output #0: accuracy = 0.786\nI0822 11:33:44.731202 32262 solver.cpp:404]     Test net output #1: loss = 1.02964 (* 1 = 1.02964 loss)\nI0822 11:33:46.061372 32262 solver.cpp:228] Iteration 43900, loss = 0.0594814\nI0822 11:33:46.061415 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:33:46.061430 32262 solver.cpp:244]     Train net output #1: loss = 0.0594815 (* 1 = 0.0594815 loss)\nI0822 11:33:46.140977 32262 sgd_solver.cpp:166] Iteration 43900, lr = 0.35\nI0822 11:36:04.901959 32262 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0822 11:37:27.378733 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77644\nI0822 11:37:27.378983 32262 solver.cpp:404]     Test net output #1: loss = 1.25314 (* 1 = 1.25314 loss)\nI0822 11:37:28.709192 32262 solver.cpp:228] Iteration 44000, loss = 0.0237708\nI0822 11:37:28.709235 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:37:28.709250 32262 solver.cpp:244]     Train net output #1: loss = 0.0237709 (* 1 = 0.0237709 loss)\nI0822 11:37:28.786325 32262 sgd_solver.cpp:166] Iteration 44000, lr = 0.35\nI0822 11:39:47.503198 32262 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0822 11:41:10.007057 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79956\nI0822 11:41:10.007352 32262 solver.cpp:404]     Test net output #1: loss = 0.9332 (* 1 = 0.9332 loss)\nI0822 11:41:11.338286 32262 solver.cpp:228] Iteration 44100, loss = 0.011026\nI0822 11:41:11.338330 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:41:11.338345 32262 solver.cpp:244]     Train net output #1: loss = 0.0110261 (* 1 = 0.0110261 loss)\nI0822 11:41:11.413533 32262 sgd_solver.cpp:166] Iteration 44100, lr = 0.35\nI0822 11:43:30.127818 32262 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0822 11:44:52.608820 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8152\nI0822 11:44:52.609071 32262 solver.cpp:404]     Test net output #1: loss = 0.801029 (* 1 = 0.801029 loss)\nI0822 11:44:53.940269 32262 solver.cpp:228] Iteration 44200, loss = 0.035467\nI0822 11:44:53.940313 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:44:53.940328 32262 solver.cpp:244]     Train net output #1: loss = 0.0354671 (* 1 = 0.0354671 loss)\nI0822 11:44:54.022763 32262 sgd_solver.cpp:166] Iteration 44200, lr = 0.35\nI0822 11:47:12.835211 32262 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0822 11:48:35.360971 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8108\nI0822 11:48:35.361313 32262 solver.cpp:404]     Test net output #1: loss = 0.875182 (* 1 = 0.875182 loss)\nI0822 11:48:36.692873 32262 solver.cpp:228] Iteration 44300, loss = 0.0185938\nI0822 11:48:36.692915 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 11:48:36.692929 32262 solver.cpp:244]     Train net output #1: loss = 0.0185939 (* 1 = 0.0185939 loss)\nI0822 11:48:36.773716 32262 sgd_solver.cpp:166] Iteration 44300, lr = 0.35\nI0822 11:50:55.532070 32262 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0822 11:52:18.040330 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77176\nI0822 11:52:18.040678 32262 solver.cpp:404]     Test net output #1: loss = 1.18274 (* 1 = 1.18274 loss)\nI0822 11:52:19.371978 32262 solver.cpp:228] Iteration 44400, loss = 0.0454275\nI0822 11:52:19.372020 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 11:52:19.372035 32262 solver.cpp:244]     Train net output #1: loss = 0.0454276 (* 1 = 0.0454276 loss)\nI0822 11:52:19.451094 32262 sgd_solver.cpp:166] Iteration 44400, lr = 0.35\nI0822 11:54:38.308518 32262 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0822 11:56:00.831820 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77048\nI0822 11:56:00.832173 32262 solver.cpp:404]     Test net output #1: loss = 1.05654 (* 1 = 1.05654 loss)\nI0822 11:56:02.163583 32262 solver.cpp:228] Iteration 44500, loss = 0.0726635\nI0822 11:56:02.163625 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:56:02.163641 32262 solver.cpp:244]     Train net output #1: loss = 0.0726637 (* 1 = 0.0726637 loss)\nI0822 11:56:02.240447 32262 sgd_solver.cpp:166] Iteration 44500, lr = 0.35\nI0822 11:58:20.991616 32262 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0822 11:59:43.505327 32262 solver.cpp:404]     Test net output #0: accuracy = 0.72876\nI0822 11:59:43.505682 32262 solver.cpp:404]     Test net output #1: loss = 1.40549 (* 1 = 1.40549 loss)\nI0822 11:59:44.837047 32262 solver.cpp:228] Iteration 44600, loss = 0.0269882\nI0822 11:59:44.837092 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 11:59:44.837107 32262 solver.cpp:244]     Train net output #1: loss = 0.0269883 (* 1 = 0.0269883 loss)\nI0822 11:59:44.917945 32262 sgd_solver.cpp:166] Iteration 44600, lr = 0.35\nI0822 12:02:03.625497 32262 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0822 12:03:26.132505 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7582\nI0822 12:03:26.132846 32262 solver.cpp:404]     Test net output #1: loss = 1.28308 (* 1 = 1.28308 loss)\nI0822 12:03:27.463557 32262 solver.cpp:228] Iteration 44700, loss = 0.0175781\nI0822 12:03:27.463601 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:03:27.463618 32262 solver.cpp:244]     Train net output #1: loss = 0.0175782 (* 1 = 0.0175782 loss)\nI0822 12:03:27.543629 32262 sgd_solver.cpp:166] Iteration 44700, lr = 0.35\nI0822 12:05:46.340378 32262 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0822 12:07:08.881084 32262 solver.cpp:404]     Test net output #0: accuracy = 0.796\nI0822 12:07:08.881443 32262 solver.cpp:404]     Test net output #1: loss = 0.929678 (* 1 = 0.929678 loss)\nI0822 12:07:10.212728 32262 solver.cpp:228] Iteration 44800, loss = 0.0067648\nI0822 12:07:10.212770 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:07:10.212786 32262 solver.cpp:244]     Train net output #1: loss = 0.00676492 (* 1 = 0.00676492 loss)\nI0822 12:07:10.294124 32262 sgd_solver.cpp:166] Iteration 44800, lr = 0.35\nI0822 12:09:29.027366 32262 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0822 12:10:51.604414 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79596\nI0822 12:10:51.604748 32262 solver.cpp:404]     Test net output #1: loss = 1.03799 (* 1 = 1.03799 loss)\nI0822 12:10:52.936482 32262 solver.cpp:228] Iteration 44900, loss = 0.0110363\nI0822 12:10:52.936527 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:10:52.936544 32262 solver.cpp:244]     Train net output #1: loss = 0.0110365 (* 1 = 0.0110365 loss)\nI0822 12:10:53.017150 32262 sgd_solver.cpp:166] Iteration 44900, lr = 0.35\nI0822 12:13:11.735188 32262 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0822 12:14:34.287068 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81724\nI0822 12:14:34.287423 32262 solver.cpp:404]     Test net output #1: loss = 0.781757 (* 1 = 0.781757 loss)\nI0822 12:14:35.618700 32262 solver.cpp:228] Iteration 45000, loss = 0.0099929\nI0822 12:14:35.618743 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 12:14:35.618760 32262 solver.cpp:244]     Train net output #1: loss = 0.00999302 (* 1 = 0.00999302 loss)\nI0822 12:14:35.696893 32262 sgd_solver.cpp:166] Iteration 45000, lr = 0.35\nI0822 12:16:54.505156 32262 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0822 12:18:17.054533 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78812\nI0822 12:18:17.054780 32262 solver.cpp:404]     Test net output #1: loss = 0.906229 (* 1 = 0.906229 loss)\nI0822 12:18:18.385138 32262 solver.cpp:228] Iteration 45100, loss = 0.0119204\nI0822 12:18:18.385181 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:18:18.385197 32262 solver.cpp:244]     Train net output #1: loss = 0.0119206 (* 1 = 0.0119206 loss)\nI0822 12:18:18.463063 32262 sgd_solver.cpp:166] Iteration 45100, lr = 0.35\nI0822 12:20:37.228171 32262 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0822 12:21:59.770553 32262 solver.cpp:404]     Test net output #0: accuracy = 0.76268\nI0822 12:21:59.770804 32262 solver.cpp:404]     Test net output #1: loss = 0.875415 (* 1 = 0.875415 loss)\nI0822 12:22:01.102708 32262 solver.cpp:228] Iteration 45200, loss = 0.00513188\nI0822 12:22:01.102752 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:22:01.102768 32262 solver.cpp:244]     Train net output #1: loss = 0.00513202 (* 1 = 0.00513202 loss)\nI0822 12:22:01.182814 32262 sgd_solver.cpp:166] Iteration 45200, lr = 0.35\nI0822 12:24:19.888329 32262 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0822 12:25:42.448169 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79008\nI0822 12:25:42.448452 32262 solver.cpp:404]     Test net output #1: loss = 0.949022 (* 1 = 0.949022 loss)\nI0822 12:25:43.780527 32262 solver.cpp:228] Iteration 45300, loss = 0.034549\nI0822 12:25:43.780575 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 12:25:43.780597 32262 solver.cpp:244]     Train net output #1: loss = 0.0345492 (* 1 = 0.0345492 loss)\nI0822 12:25:43.854854 32262 sgd_solver.cpp:166] Iteration 45300, lr = 0.35\nI0822 12:28:02.646847 32262 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0822 12:29:25.174370 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77796\nI0822 12:29:25.174650 32262 solver.cpp:404]     Test net output #1: loss = 1.06407 (* 1 = 1.06407 loss)\nI0822 12:29:26.505698 32262 solver.cpp:228] Iteration 45400, loss = 0.114749\nI0822 12:29:26.505744 32262 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0822 12:29:26.505769 32262 solver.cpp:244]     Train net output #1: loss = 0.114749 (* 1 = 0.114749 loss)\nI0822 12:29:26.581709 32262 sgd_solver.cpp:166] Iteration 45400, lr = 0.35\nI0822 12:31:45.321280 32262 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0822 12:33:07.587157 32262 solver.cpp:404]     Test net output #0: accuracy = 0.703\nI0822 12:33:07.587462 32262 solver.cpp:404]     Test net output #1: loss = 1.78061 (* 1 = 1.78061 loss)\nI0822 12:33:08.919270 32262 solver.cpp:228] Iteration 45500, loss = 0.0260245\nI0822 12:33:08.919317 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 12:33:08.919342 32262 solver.cpp:244]     Train net output #1: loss = 0.0260246 (* 1 = 0.0260246 loss)\nI0822 12:33:08.996505 32262 sgd_solver.cpp:166] Iteration 45500, lr = 0.35\nI0822 12:35:27.825880 32262 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0822 12:36:50.323308 32262 solver.cpp:404]     Test net output #0: accuracy = 0.67512\nI0822 12:36:50.323570 32262 solver.cpp:404]     Test net output #1: loss = 1.82874 (* 1 = 1.82874 loss)\nI0822 12:36:51.654620 32262 solver.cpp:228] Iteration 45600, loss = 0.0244659\nI0822 12:36:51.654664 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 12:36:51.654687 32262 solver.cpp:244]     Train net output #1: loss = 0.024466 (* 1 = 0.024466 loss)\nI0822 12:36:51.734792 32262 sgd_solver.cpp:166] Iteration 45600, lr = 0.35\nI0822 12:39:10.515939 32262 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0822 12:40:33.068197 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79292\nI0822 12:40:33.068509 32262 solver.cpp:404]     Test net output #1: loss = 0.980419 (* 1 = 0.980419 loss)\nI0822 12:40:34.400164 32262 solver.cpp:228] Iteration 45700, loss = 0.00304246\nI0822 12:40:34.400209 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:40:34.400238 32262 solver.cpp:244]     Train net output #1: loss = 0.00304261 (* 1 = 0.00304261 loss)\nI0822 12:40:34.480852 32262 sgd_solver.cpp:166] Iteration 45700, lr = 0.35\nI0822 12:42:53.231572 32262 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0822 12:44:15.770539 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77912\nI0822 12:44:15.770793 32262 solver.cpp:404]     Test net output #1: loss = 1.01908 (* 1 = 1.01908 loss)\nI0822 12:44:17.102461 32262 solver.cpp:228] Iteration 45800, loss = 0.0126468\nI0822 12:44:17.102507 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:44:17.102530 32262 solver.cpp:244]     Train net output #1: loss = 0.0126469 (* 1 = 0.0126469 loss)\nI0822 12:44:17.177762 32262 sgd_solver.cpp:166] Iteration 45800, lr = 0.35\nI0822 12:46:35.968598 32262 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0822 12:47:58.499116 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77944\nI0822 12:47:58.499408 32262 solver.cpp:404]     Test net output #1: loss = 1.06646 (* 1 = 1.06646 loss)\nI0822 12:47:59.830409 32262 solver.cpp:228] Iteration 45900, loss = 0.0072957\nI0822 12:47:59.830452 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 12:47:59.830474 32262 solver.cpp:244]     Train net output #1: loss = 0.00729584 (* 1 = 0.00729584 loss)\nI0822 12:47:59.910406 32262 sgd_solver.cpp:166] Iteration 45900, lr = 0.35\nI0822 12:50:18.735780 32262 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0822 12:51:41.273465 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77416\nI0822 12:51:41.273756 32262 solver.cpp:404]     Test net output #1: loss = 1.27199 (* 1 = 1.27199 loss)\nI0822 12:51:42.604967 32262 solver.cpp:228] Iteration 46000, loss = 0.0776333\nI0822 12:51:42.605012 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 12:51:42.605036 32262 solver.cpp:244]     Train net output #1: loss = 0.0776335 (* 1 = 0.0776335 loss)\nI0822 12:51:42.682719 32262 sgd_solver.cpp:166] Iteration 46000, lr = 0.35\nI0822 12:54:01.431931 32262 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0822 12:55:23.975953 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74112\nI0822 12:55:23.976321 32262 solver.cpp:404]     Test net output #1: loss = 1.31498 (* 1 = 1.31498 loss)\nI0822 12:55:25.307570 32262 solver.cpp:228] Iteration 46100, loss = 0.079628\nI0822 12:55:25.307615 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 12:55:25.307638 32262 solver.cpp:244]     Train net output #1: loss = 0.0796282 (* 1 = 0.0796282 loss)\nI0822 12:55:25.387991 32262 sgd_solver.cpp:166] Iteration 46100, lr = 0.35\nI0822 12:57:44.202638 32262 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0822 12:59:06.772773 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77344\nI0822 12:59:06.773109 32262 solver.cpp:404]     Test net output #1: loss = 1.07502 (* 1 = 1.07502 loss)\nI0822 12:59:08.104136 32262 solver.cpp:228] Iteration 46200, loss = 0.0501918\nI0822 12:59:08.104178 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 12:59:08.104202 32262 solver.cpp:244]     Train net output #1: loss = 0.050192 (* 1 = 0.050192 loss)\nI0822 12:59:08.184005 32262 sgd_solver.cpp:166] Iteration 46200, lr = 0.35\nI0822 13:01:26.939134 32262 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0822 13:02:49.491569 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81884\nI0822 13:02:49.491909 32262 solver.cpp:404]     Test net output #1: loss = 0.753576 (* 1 = 0.753576 loss)\nI0822 13:02:50.822870 32262 solver.cpp:228] Iteration 46300, loss = 0.00642857\nI0822 13:02:50.822916 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:02:50.822938 32262 solver.cpp:244]     Train net output #1: loss = 0.00642876 (* 1 = 0.00642876 loss)\nI0822 13:02:50.902293 32262 sgd_solver.cpp:166] Iteration 46300, lr = 0.35\nI0822 13:05:09.686774 32262 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0822 13:06:32.277065 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78876\nI0822 13:06:32.277415 32262 solver.cpp:404]     Test net output #1: loss = 0.908359 (* 1 = 0.908359 loss)\nI0822 13:06:33.609179 32262 solver.cpp:228] Iteration 46400, loss = 0.00853545\nI0822 13:06:33.609223 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:06:33.609252 32262 solver.cpp:244]     Train net output #1: loss = 0.00853563 (* 1 = 0.00853563 loss)\nI0822 13:06:33.685722 32262 sgd_solver.cpp:166] Iteration 46400, lr = 0.35\nI0822 13:08:52.362447 32262 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0822 13:10:14.799527 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80844\nI0822 13:10:14.799865 32262 solver.cpp:404]     Test net output #1: loss = 0.771785 (* 1 = 0.771785 loss)\nI0822 13:10:16.130995 32262 solver.cpp:228] Iteration 46500, loss = 0.0281595\nI0822 13:10:16.131041 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 13:10:16.131063 32262 solver.cpp:244]     Train net output #1: loss = 0.0281597 (* 1 = 0.0281597 loss)\nI0822 13:10:16.212774 32262 sgd_solver.cpp:166] Iteration 46500, lr = 0.35\nI0822 13:12:34.929942 32262 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0822 13:13:57.034595 32262 solver.cpp:404]     Test net output #0: accuracy = 0.73216\nI0822 13:13:57.034857 32262 solver.cpp:404]     Test net output #1: loss = 1.51662 (* 1 = 1.51662 loss)\nI0822 13:13:58.364331 32262 solver.cpp:228] Iteration 46600, loss = 0.0469198\nI0822 13:13:58.364377 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 13:13:58.364400 32262 solver.cpp:244]     Train net output #1: loss = 0.0469199 (* 1 = 0.0469199 loss)\nI0822 13:13:58.447067 32262 sgd_solver.cpp:166] Iteration 46600, lr = 0.35\nI0822 13:16:17.160188 32262 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0822 13:17:39.322450 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78136\nI0822 13:17:39.322707 32262 solver.cpp:404]     Test net output #1: loss = 1.10371 (* 1 = 1.10371 loss)\nI0822 13:17:40.654157 32262 solver.cpp:228] Iteration 46700, loss = 0.0780871\nI0822 13:17:40.654204 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 13:17:40.654232 32262 solver.cpp:244]     Train net output #1: loss = 0.0780873 (* 1 = 0.0780873 loss)\nI0822 13:17:40.732924 32262 sgd_solver.cpp:166] Iteration 46700, lr = 0.35\nI0822 13:19:59.428490 32262 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0822 13:21:21.896482 32262 solver.cpp:404]     Test net output #0: accuracy = 0.79976\nI0822 13:21:21.896764 32262 solver.cpp:404]     Test net output #1: loss = 0.893129 (* 1 = 0.893129 loss)\nI0822 13:21:23.228184 32262 solver.cpp:228] Iteration 46800, loss = 0.03364\nI0822 13:21:23.228235 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 13:21:23.228260 32262 solver.cpp:244]     Train net output #1: loss = 0.0336402 (* 1 = 0.0336402 loss)\nI0822 13:21:23.307938 32262 sgd_solver.cpp:166] Iteration 46800, lr = 0.35\nI0822 13:23:42.066043 32262 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0822 13:25:04.373764 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82828\nI0822 13:25:04.374065 32262 solver.cpp:404]     Test net output #1: loss = 0.786841 (* 1 = 0.786841 loss)\nI0822 13:25:05.705705 32262 solver.cpp:228] Iteration 46900, loss = 0.0328789\nI0822 13:25:05.705751 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 13:25:05.705775 32262 solver.cpp:244]     Train net output #1: loss = 0.0328791 (* 1 = 0.0328791 loss)\nI0822 13:25:05.785050 32262 sgd_solver.cpp:166] Iteration 46900, lr = 0.35\nI0822 13:27:24.500833 32262 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0822 13:28:46.824357 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81532\nI0822 13:28:46.824653 32262 solver.cpp:404]     Test net output #1: loss = 0.893082 (* 1 = 0.893082 loss)\nI0822 13:28:48.155623 32262 solver.cpp:228] Iteration 47000, loss = 0.0209576\nI0822 13:28:48.155665 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 13:28:48.155680 32262 solver.cpp:244]     Train net output #1: loss = 0.0209577 (* 1 = 0.0209577 loss)\nI0822 13:28:48.235388 32262 sgd_solver.cpp:166] Iteration 47000, lr = 0.35\nI0822 13:31:07.023468 32262 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0822 13:32:29.399451 32262 solver.cpp:404]     Test net output #0: accuracy = 0.78124\nI0822 13:32:29.399781 32262 solver.cpp:404]     Test net output #1: loss = 0.939191 (* 1 = 0.939191 loss)\nI0822 13:32:30.731344 32262 solver.cpp:228] Iteration 47100, loss = 0.00983337\nI0822 13:32:30.731386 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:32:30.731402 32262 solver.cpp:244]     Train net output #1: loss = 0.00983354 (* 1 = 0.00983354 loss)\nI0822 13:32:30.811877 32262 sgd_solver.cpp:166] Iteration 47100, lr = 0.35\nI0822 13:34:49.517251 32262 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0822 13:36:12.001857 32262 solver.cpp:404]     Test net output #0: accuracy = 0.67832\nI0822 13:36:12.002135 32262 solver.cpp:404]     Test net output #1: loss = 1.66108 (* 1 = 1.66108 loss)\nI0822 13:36:13.333153 32262 solver.cpp:228] Iteration 47200, loss = 0.0112339\nI0822 13:36:13.333194 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:36:13.333209 32262 solver.cpp:244]     Train net output #1: loss = 0.011234 (* 1 = 0.011234 loss)\nI0822 13:36:13.414422 32262 sgd_solver.cpp:166] Iteration 47200, lr = 0.35\nI0822 13:38:32.164793 32262 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0822 13:39:54.637675 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80948\nI0822 13:39:54.637929 32262 solver.cpp:404]     Test net output #1: loss = 0.742177 (* 1 = 0.742177 loss)\nI0822 13:39:55.968036 32262 solver.cpp:228] Iteration 47300, loss = 0.02491\nI0822 13:39:55.968086 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 13:39:55.968102 32262 solver.cpp:244]     Train net output #1: loss = 0.0249102 (* 1 = 0.0249102 loss)\nI0822 13:39:56.048210 32262 sgd_solver.cpp:166] Iteration 47300, lr = 0.35\nI0822 13:42:14.729382 32262 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0822 13:43:37.230940 32262 solver.cpp:404]     Test net output #0: accuracy = 0.74816\nI0822 13:43:37.231189 32262 solver.cpp:404]     Test net output #1: loss = 1.1407 (* 1 = 1.1407 loss)\nI0822 13:43:38.561316 32262 solver.cpp:228] Iteration 47400, loss = 0.0106788\nI0822 13:43:38.561357 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:43:38.561372 32262 solver.cpp:244]     Train net output #1: loss = 0.0106789 (* 1 = 0.0106789 loss)\nI0822 13:43:38.640568 32262 sgd_solver.cpp:166] Iteration 47400, lr = 0.35\nI0822 13:45:57.323053 32262 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0822 13:47:19.794945 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82024\nI0822 13:47:19.795230 32262 solver.cpp:404]     Test net output #1: loss = 0.769468 (* 1 = 0.769468 loss)\nI0822 13:47:21.125669 32262 solver.cpp:228] Iteration 47500, loss = 0.0297988\nI0822 13:47:21.125710 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 13:47:21.125725 32262 solver.cpp:244]     Train net output #1: loss = 0.0297989 (* 1 = 0.0297989 loss)\nI0822 13:47:21.204686 32262 sgd_solver.cpp:166] Iteration 47500, lr = 0.35\nI0822 13:49:39.899075 32262 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0822 13:51:02.394521 32262 solver.cpp:404]     Test net output #0: accuracy = 0.75448\nI0822 13:51:02.394816 32262 solver.cpp:404]     Test net output #1: loss = 1.1144 (* 1 = 1.1144 loss)\nI0822 13:51:03.725272 32262 solver.cpp:228] Iteration 47600, loss = 0.00579354\nI0822 13:51:03.725317 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:51:03.725332 32262 solver.cpp:244]     Train net output #1: loss = 0.0057937 (* 1 = 0.0057937 loss)\nI0822 13:51:03.805408 32262 sgd_solver.cpp:166] Iteration 47600, lr = 0.35\nI0822 13:53:22.562052 32262 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0822 13:54:45.045195 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0822 13:54:45.045469 32262 solver.cpp:404]     Test net output #1: loss = 0.856746 (* 1 = 0.856746 loss)\nI0822 13:54:46.375195 32262 solver.cpp:228] Iteration 47700, loss = 0.0480977\nI0822 13:54:46.375237 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 13:54:46.375254 32262 solver.cpp:244]     Train net output #1: loss = 0.0480979 (* 1 = 0.0480979 loss)\nI0822 13:54:46.454682 32262 sgd_solver.cpp:166] Iteration 47700, lr = 0.35\nI0822 13:57:05.213235 32262 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0822 13:58:27.675750 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7048\nI0822 13:58:27.676038 32262 solver.cpp:404]     Test net output #1: loss = 1.80766 (* 1 = 1.80766 loss)\nI0822 13:58:29.005676 32262 solver.cpp:228] Iteration 47800, loss = 0.0100904\nI0822 13:58:29.005717 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 13:58:29.005733 32262 solver.cpp:244]     Train net output #1: loss = 0.0100905 (* 1 = 0.0100905 loss)\nI0822 13:58:29.090530 32262 sgd_solver.cpp:166] Iteration 47800, lr = 0.35\nI0822 14:00:47.815512 32262 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0822 14:02:10.277164 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7948\nI0822 14:02:10.277493 32262 solver.cpp:404]     Test net output #1: loss = 0.858325 (* 1 = 0.858325 loss)\nI0822 14:02:11.607056 32262 solver.cpp:228] Iteration 47900, loss = 0.0288797\nI0822 14:02:11.607102 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 14:02:11.607118 32262 solver.cpp:244]     Train net output #1: loss = 0.0288798 (* 1 = 0.0288798 loss)\nI0822 14:02:11.683574 32262 sgd_solver.cpp:166] Iteration 47900, lr = 0.35\nI0822 14:04:30.416141 32262 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0822 14:05:52.886346 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80236\nI0822 14:05:52.886622 32262 solver.cpp:404]     Test net output #1: loss = 0.877793 (* 1 = 0.877793 loss)\nI0822 14:05:54.216430 32262 solver.cpp:228] Iteration 48000, loss = 0.00347105\nI0822 14:05:54.216470 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:05:54.216485 32262 solver.cpp:244]     Train net output #1: loss = 0.00347119 (* 1 = 0.00347119 loss)\nI0822 14:05:54.300488 32262 sgd_solver.cpp:166] Iteration 48000, lr = 0.35\nI0822 14:08:13.104399 32262 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0822 14:09:35.564570 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80612\nI0822 14:09:35.564829 32262 solver.cpp:404]     Test net output #1: loss = 0.791792 (* 1 = 0.791792 loss)\nI0822 14:09:36.894796 32262 solver.cpp:228] Iteration 48100, loss = 0.0195442\nI0822 14:09:36.894837 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:09:36.894853 32262 solver.cpp:244]     Train net output #1: loss = 0.0195443 (* 1 = 0.0195443 loss)\nI0822 14:09:36.977408 32262 sgd_solver.cpp:166] Iteration 48100, lr = 0.35\nI0822 14:11:55.789270 32262 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0822 14:13:18.243239 32262 solver.cpp:404]     Test net output #0: accuracy = 0.68248\nI0822 14:13:18.243628 32262 solver.cpp:404]     Test net output #1: loss = 1.64734 (* 1 = 1.64734 loss)\nI0822 14:13:19.573415 32262 solver.cpp:228] Iteration 48200, loss = 0.016246\nI0822 14:13:19.573457 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:13:19.573472 32262 solver.cpp:244]     Train net output #1: loss = 0.0162461 (* 1 = 0.0162461 loss)\nI0822 14:13:19.651262 32262 sgd_solver.cpp:166] Iteration 48200, lr = 0.35\nI0822 14:15:38.409572 32262 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0822 14:17:00.866552 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7216\nI0822 14:17:00.866807 32262 solver.cpp:404]     Test net output #1: loss = 1.22849 (* 1 = 1.22849 loss)\nI0822 14:17:02.197170 32262 solver.cpp:228] Iteration 48300, loss = 0.0590286\nI0822 14:17:02.197209 32262 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0822 14:17:02.197224 32262 solver.cpp:244]     Train net output #1: loss = 0.0590287 (* 1 = 0.0590287 loss)\nI0822 14:17:02.279569 32262 sgd_solver.cpp:166] Iteration 48300, lr = 0.35\nI0822 14:19:21.089512 32262 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0822 14:20:43.576023 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80592\nI0822 14:20:43.576329 32262 solver.cpp:404]     Test net output #1: loss = 0.812553 (* 1 = 0.812553 loss)\nI0822 14:20:44.906740 32262 solver.cpp:228] Iteration 48400, loss = 0.0139228\nI0822 14:20:44.906782 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:20:44.906797 32262 solver.cpp:244]     Train net output #1: loss = 0.013923 (* 1 = 0.013923 loss)\nI0822 14:20:44.984143 32262 sgd_solver.cpp:166] Iteration 48400, lr = 0.35\nI0822 14:23:03.810966 32262 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0822 14:24:26.297935 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7906\nI0822 14:24:26.298279 32262 solver.cpp:404]     Test net output #1: loss = 0.942561 (* 1 = 0.942561 loss)\nI0822 14:24:27.628360 32262 solver.cpp:228] Iteration 48500, loss = 0.00397437\nI0822 14:24:27.628403 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:24:27.628419 32262 solver.cpp:244]     Train net output #1: loss = 0.00397449 (* 1 = 0.00397449 loss)\nI0822 14:24:27.706181 32262 sgd_solver.cpp:166] Iteration 48500, lr = 0.35\nI0822 14:26:46.430866 32262 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0822 14:28:08.752879 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82276\nI0822 14:28:08.753136 32262 solver.cpp:404]     Test net output #1: loss = 0.759559 (* 1 = 0.759559 loss)\nI0822 14:28:10.082953 32262 solver.cpp:228] Iteration 48600, loss = 0.00571888\nI0822 14:28:10.082993 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:28:10.083009 32262 solver.cpp:244]     Train net output #1: loss = 0.005719 (* 1 = 0.005719 loss)\nI0822 14:28:10.163692 32262 sgd_solver.cpp:166] Iteration 48600, lr = 0.35\nI0822 14:30:28.959614 32262 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0822 14:31:51.133940 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82188\nI0822 14:31:51.134184 32262 solver.cpp:404]     Test net output #1: loss = 0.713154 (* 1 = 0.713154 loss)\nI0822 14:31:52.464619 32262 solver.cpp:228] Iteration 48700, loss = 0.0264065\nI0822 14:31:52.464661 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:31:52.464678 32262 solver.cpp:244]     Train net output #1: loss = 0.0264066 (* 1 = 0.0264066 loss)\nI0822 14:31:52.546169 32262 sgd_solver.cpp:166] Iteration 48700, lr = 0.35\nI0822 14:34:11.430418 32262 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0822 14:35:33.509727 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80164\nI0822 14:35:33.509989 32262 solver.cpp:404]     Test net output #1: loss = 0.91888 (* 1 = 0.91888 loss)\nI0822 14:35:34.840085 32262 solver.cpp:228] Iteration 48800, loss = 0.039233\nI0822 14:35:34.840126 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:35:34.840142 32262 solver.cpp:244]     Train net output #1: loss = 0.0392331 (* 1 = 0.0392331 loss)\nI0822 14:35:34.923470 32262 sgd_solver.cpp:166] Iteration 48800, lr = 0.35\nI0822 14:37:53.690693 32262 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0822 14:39:15.836320 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7634\nI0822 14:39:15.836591 32262 solver.cpp:404]     Test net output #1: loss = 1.27517 (* 1 = 1.27517 loss)\nI0822 14:39:17.166419 32262 solver.cpp:228] Iteration 48900, loss = 0.0370535\nI0822 14:39:17.166458 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 14:39:17.166474 32262 solver.cpp:244]     Train net output #1: loss = 0.0370536 (* 1 = 0.0370536 loss)\nI0822 14:39:17.245538 32262 sgd_solver.cpp:166] Iteration 48900, lr = 0.35\nI0822 14:41:36.022090 32262 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0822 14:42:58.376893 32262 solver.cpp:404]     Test net output #0: accuracy = 0.83188\nI0822 14:42:58.377212 32262 solver.cpp:404]     Test net output #1: loss = 0.675322 (* 1 = 0.675322 loss)\nI0822 14:42:59.706948 32262 solver.cpp:228] Iteration 49000, loss = 0.0573439\nI0822 14:42:59.706987 32262 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0822 14:42:59.707003 32262 solver.cpp:244]     Train net output #1: loss = 0.057344 (* 1 = 0.057344 loss)\nI0822 14:42:59.788473 32262 sgd_solver.cpp:166] Iteration 49000, lr = 0.35\nI0822 14:45:18.528992 32262 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0822 14:46:40.978428 32262 solver.cpp:404]     Test net output #0: accuracy = 0.81724\nI0822 14:46:40.978714 32262 solver.cpp:404]     Test net output #1: loss = 0.809027 (* 1 = 0.809027 loss)\nI0822 14:46:42.309197 32262 solver.cpp:228] Iteration 49100, loss = 0.00734354\nI0822 14:46:42.309239 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:46:42.309254 32262 solver.cpp:244]     Train net output #1: loss = 0.00734366 (* 1 = 0.00734366 loss)\nI0822 14:46:42.390578 32262 sgd_solver.cpp:166] Iteration 49100, lr = 0.35\nI0822 14:49:01.167346 32262 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0822 14:50:23.644798 32262 solver.cpp:404]     Test net output #0: accuracy = 0.69912\nI0822 14:50:23.645107 32262 solver.cpp:404]     Test net output #1: loss = 1.71516 (* 1 = 1.71516 loss)\nI0822 14:50:24.974766 32262 solver.cpp:228] Iteration 49200, loss = 0.0297448\nI0822 14:50:24.974804 32262 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0822 14:50:24.974819 32262 solver.cpp:244]     Train net output #1: loss = 0.029745 (* 1 = 0.029745 loss)\nI0822 14:50:25.051710 32262 sgd_solver.cpp:166] Iteration 49200, lr = 0.35\nI0822 14:52:43.823844 32262 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0822 14:54:06.331918 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80844\nI0822 14:54:06.332181 32262 solver.cpp:404]     Test net output #1: loss = 0.728001 (* 1 = 0.728001 loss)\nI0822 14:54:07.662461 32262 solver.cpp:228] Iteration 49300, loss = 0.0117745\nI0822 14:54:07.662500 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:54:07.662515 32262 solver.cpp:244]     Train net output #1: loss = 0.0117746 (* 1 = 0.0117746 loss)\nI0822 14:54:07.742565 32262 sgd_solver.cpp:166] Iteration 49300, lr = 0.35\nI0822 14:56:26.833750 32262 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0822 14:57:49.371230 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7098\nI0822 14:57:49.371512 32262 solver.cpp:404]     Test net output #1: loss = 1.50208 (* 1 = 1.50208 loss)\nI0822 14:57:50.702425 32262 solver.cpp:228] Iteration 49400, loss = 0.00860415\nI0822 14:57:50.702468 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 14:57:50.702491 32262 solver.cpp:244]     Train net output #1: loss = 0.00860425 (* 1 = 0.00860425 loss)\nI0822 14:57:50.788005 32262 sgd_solver.cpp:166] Iteration 49400, lr = 0.35\nI0822 15:00:10.112612 32262 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0822 15:01:32.673872 32262 solver.cpp:404]     Test net output #0: accuracy = 0.77608\nI0822 15:01:32.674177 32262 solver.cpp:404]     Test net output #1: loss = 1.21652 (* 1 = 1.21652 loss)\nI0822 15:01:34.005661 32262 solver.cpp:228] Iteration 49500, loss = 0.0198028\nI0822 15:01:34.005705 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 15:01:34.005728 32262 solver.cpp:244]     Train net output #1: loss = 0.0198029 (* 1 = 0.0198029 loss)\nI0822 15:01:34.087240 32262 sgd_solver.cpp:166] Iteration 49500, lr = 0.35\nI0822 15:03:53.463016 32262 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0822 15:05:16.028539 32262 solver.cpp:404]     Test net output #0: accuracy = 0.82964\nI0822 15:05:16.028816 32262 solver.cpp:404]     Test net output #1: loss = 0.747171 (* 1 = 0.747171 loss)\nI0822 15:05:17.359302 32262 solver.cpp:228] Iteration 49600, loss = 0.00445216\nI0822 15:05:17.359345 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:05:17.359369 32262 solver.cpp:244]     Train net output #1: loss = 0.00445227 (* 1 = 0.00445227 loss)\nI0822 15:05:17.454315 32262 sgd_solver.cpp:166] Iteration 49600, lr = 0.35\nI0822 15:07:36.807260 32262 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0822 15:08:59.355643 32262 solver.cpp:404]     Test net output #0: accuracy = 0.84008\nI0822 15:08:59.355893 32262 solver.cpp:404]     Test net output #1: loss = 0.736537 (* 1 = 0.736537 loss)\nI0822 15:09:00.685878 32262 solver.cpp:228] Iteration 49700, loss = 0.00491769\nI0822 15:09:00.685919 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:09:00.685933 32262 solver.cpp:244]     Train net output #1: loss = 0.0049178 (* 1 = 0.0049178 loss)\nI0822 15:09:00.770887 32262 sgd_solver.cpp:166] Iteration 49700, lr = 0.35\nI0822 15:11:20.072544 32262 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0822 15:12:42.589989 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8004\nI0822 15:12:42.590327 32262 solver.cpp:404]     Test net output #1: loss = 0.8696 (* 1 = 0.8696 loss)\nI0822 15:12:43.921087 32262 solver.cpp:228] Iteration 49800, loss = 0.0112223\nI0822 15:12:43.921123 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:12:43.921138 32262 solver.cpp:244]     Train net output #1: loss = 0.0112224 (* 1 = 0.0112224 loss)\nI0822 15:12:44.006772 32262 sgd_solver.cpp:166] Iteration 49800, lr = 0.35\nI0822 15:15:03.321694 32262 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0822 15:16:25.687196 32262 solver.cpp:404]     Test net output #0: accuracy = 0.80632\nI0822 15:16:25.687475 32262 solver.cpp:404]     Test net output #1: loss = 0.757829 (* 1 = 0.757829 loss)\nI0822 15:16:27.018004 32262 solver.cpp:228] Iteration 49900, loss = 0.0135001\nI0822 15:16:27.018056 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:16:27.018074 32262 solver.cpp:244]     Train net output #1: loss = 0.0135002 (* 1 = 0.0135002 loss)\nI0822 15:16:27.106820 32262 sgd_solver.cpp:166] Iteration 49900, lr = 0.35\nI0822 15:18:46.427546 32262 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0822 15:20:08.928784 32262 solver.cpp:404]     Test net output #0: accuracy = 0.7318\nI0822 15:20:08.929054 32262 solver.cpp:404]     Test net output #1: loss = 1.25557 (* 1 = 1.25557 loss)\nI0822 15:20:10.259769 32262 solver.cpp:228] Iteration 50000, loss = 0.0281532\nI0822 15:20:10.259806 32262 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0822 15:20:10.259822 32262 solver.cpp:244]     Train net output #1: loss = 0.0281533 (* 1 = 0.0281533 loss)\nI0822 15:20:10.347153 32262 sgd_solver.cpp:107] MultiStep Status: Iteration 50000, step = 1\nI0822 15:20:10.347179 32262 sgd_solver.cpp:166] Iteration 50000, lr = 0.035\nI0822 15:22:29.695617 32262 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0822 15:23:52.163527 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8612\nI0822 15:23:52.163780 32262 solver.cpp:404]     Test net output #1: loss = 0.541975 (* 1 = 0.541975 loss)\nI0822 15:23:53.494096 32262 solver.cpp:228] Iteration 50100, loss = 0.0028894\nI0822 15:23:53.494140 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:23:53.494156 32262 solver.cpp:244]     Train net output #1: loss = 0.0028895 (* 1 = 0.0028895 loss)\nI0822 15:23:53.579262 32262 sgd_solver.cpp:166] Iteration 50100, lr = 0.035\nI0822 15:26:12.904820 32262 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0822 15:27:35.406215 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87056\nI0822 15:27:35.406469 32262 solver.cpp:404]     Test net output #1: loss = 0.51162 (* 1 = 0.51162 loss)\nI0822 15:27:36.736487 32262 solver.cpp:228] Iteration 50200, loss = 0.000663885\nI0822 15:27:36.736531 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:27:36.736546 32262 solver.cpp:244]     Train net output #1: loss = 0.00066399 (* 1 = 0.00066399 loss)\nI0822 15:27:36.822867 32262 sgd_solver.cpp:166] Iteration 50200, lr = 0.035\nI0822 15:29:56.205657 32262 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0822 15:31:18.717416 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87588\nI0822 15:31:18.717723 32262 solver.cpp:404]     Test net output #1: loss = 0.491454 (* 1 = 0.491454 loss)\nI0822 15:31:20.048236 32262 solver.cpp:228] Iteration 50300, loss = 0.000697723\nI0822 15:31:20.048279 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:31:20.048295 32262 solver.cpp:244]     Train net output #1: loss = 0.000697828 (* 1 = 0.000697828 loss)\nI0822 15:31:20.131790 32262 sgd_solver.cpp:166] Iteration 50300, lr = 0.035\nI0822 15:33:39.453531 32262 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0822 15:35:01.945430 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0822 15:35:01.945706 32262 solver.cpp:404]     Test net output #1: loss = 0.489108 (* 1 = 0.489108 loss)\nI0822 15:35:03.276073 32262 solver.cpp:228] Iteration 50400, loss = 0.000350559\nI0822 15:35:03.276115 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:35:03.276131 32262 solver.cpp:244]     Train net output #1: loss = 0.000350664 (* 1 = 0.000350664 loss)\nI0822 15:35:03.359920 32262 sgd_solver.cpp:166] Iteration 50400, lr = 0.035\nI0822 15:37:22.707343 32262 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0822 15:38:44.929384 32262 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0822 15:38:44.929633 32262 solver.cpp:404]     Test net output #1: loss = 0.479553 (* 1 = 0.479553 loss)\nI0822 15:38:46.259917 32262 solver.cpp:228] Iteration 50500, loss = 0.000501023\nI0822 15:38:46.259960 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:38:46.259976 32262 solver.cpp:244]     Train net output #1: loss = 0.000501128 (* 1 = 0.000501128 loss)\nI0822 15:38:46.344548 32262 sgd_solver.cpp:166] Iteration 50500, lr = 0.035\nI0822 15:41:05.647176 32262 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0822 15:42:27.916656 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87908\nI0822 15:42:27.916965 32262 solver.cpp:404]     Test net output #1: loss = 0.48622 (* 1 = 0.48622 loss)\nI0822 15:42:29.247128 32262 solver.cpp:228] Iteration 50600, loss = 0.000298614\nI0822 15:42:29.247169 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:42:29.247184 32262 solver.cpp:244]     Train net output #1: loss = 0.000298719 (* 1 = 0.000298719 loss)\nI0822 15:42:29.331951 32262 sgd_solver.cpp:166] Iteration 50600, lr = 0.035\nI0822 15:44:48.663432 32262 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0822 15:46:11.187043 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0822 15:46:11.187300 32262 solver.cpp:404]     Test net output #1: loss = 0.482766 (* 1 = 0.482766 loss)\nI0822 15:46:12.517192 32262 solver.cpp:228] Iteration 50700, loss = 0.000309202\nI0822 15:46:12.517235 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:46:12.517249 32262 solver.cpp:244]     Train net output #1: loss = 0.000309307 (* 1 = 0.000309307 loss)\nI0822 15:46:12.599617 32262 sgd_solver.cpp:166] Iteration 50700, lr = 0.035\nI0822 15:48:31.902640 32262 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0822 15:49:54.408885 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0822 15:49:54.409162 32262 solver.cpp:404]     Test net output #1: loss = 0.492191 (* 1 = 0.492191 loss)\nI0822 15:49:55.739562 32262 solver.cpp:228] Iteration 50800, loss = 0.000237977\nI0822 15:49:55.739605 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:49:55.739620 32262 solver.cpp:244]     Train net output #1: loss = 0.000238082 (* 1 = 0.000238082 loss)\nI0822 15:49:55.821557 32262 sgd_solver.cpp:166] Iteration 50800, lr = 0.035\nI0822 15:52:15.089152 32262 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0822 15:53:37.618520 32262 solver.cpp:404]     Test net output #0: accuracy = 0.88024\nI0822 15:53:37.618788 32262 solver.cpp:404]     Test net output #1: loss = 0.491245 (* 1 = 0.491245 loss)\nI0822 15:53:38.950456 32262 solver.cpp:228] Iteration 50900, loss = 0.00035832\nI0822 15:53:38.950500 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:53:38.950516 32262 solver.cpp:244]     Train net output #1: loss = 0.000358425 (* 1 = 0.000358425 loss)\nI0822 15:53:39.034250 32262 sgd_solver.cpp:166] Iteration 50900, lr = 0.035\nI0822 15:55:58.281697 32262 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0822 15:57:20.392772 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87836\nI0822 15:57:20.393076 32262 solver.cpp:404]     Test net output #1: loss = 0.503335 (* 1 = 0.503335 loss)\nI0822 15:57:21.725036 32262 solver.cpp:228] Iteration 51000, loss = 0.000370972\nI0822 15:57:21.725085 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 15:57:21.725101 32262 solver.cpp:244]     Train net output #1: loss = 0.000371077 (* 1 = 0.000371077 loss)\nI0822 15:57:21.810176 32262 sgd_solver.cpp:166] Iteration 51000, lr = 0.035\nI0822 15:59:41.109802 32262 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0822 16:01:03.387112 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87872\nI0822 16:01:03.387370 32262 solver.cpp:404]     Test net output #1: loss = 0.50275 (* 1 = 0.50275 loss)\nI0822 16:01:04.717507 32262 solver.cpp:228] Iteration 51100, loss = 0.000351402\nI0822 16:01:04.717552 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:01:04.717568 32262 solver.cpp:244]     Train net output #1: loss = 0.000351507 (* 1 = 0.000351507 loss)\nI0822 16:01:04.798908 32262 sgd_solver.cpp:166] Iteration 51100, lr = 0.035\nI0822 16:03:24.073206 32262 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0822 16:04:46.138483 32262 solver.cpp:404]     Test net output #0: accuracy = 0.875881\nI0822 16:04:46.138756 32262 solver.cpp:404]     Test net output #1: loss = 0.515395 (* 1 = 0.515395 loss)\nI0822 16:04:47.470971 32262 solver.cpp:228] Iteration 51200, loss = 0.000376201\nI0822 16:04:47.471016 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:04:47.471031 32262 solver.cpp:244]     Train net output #1: loss = 0.000376306 (* 1 = 0.000376306 loss)\nI0822 16:04:47.553267 32262 sgd_solver.cpp:166] Iteration 51200, lr = 0.035\nI0822 16:07:06.775660 32262 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0822 16:08:29.095793 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87608\nI0822 16:08:29.096052 32262 solver.cpp:404]     Test net output #1: loss = 0.515227 (* 1 = 0.515227 loss)\nI0822 16:08:30.427217 32262 solver.cpp:228] Iteration 51300, loss = 0.000438491\nI0822 16:08:30.427261 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:08:30.427276 32262 solver.cpp:244]     Train net output #1: loss = 0.000438596 (* 1 = 0.000438596 loss)\nI0822 16:08:30.514983 32262 sgd_solver.cpp:166] Iteration 51300, lr = 0.035\nI0822 16:10:49.776932 32262 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0822 16:12:12.043973 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8726\nI0822 16:12:12.044306 32262 solver.cpp:404]     Test net output #1: loss = 0.528095 (* 1 = 0.528095 loss)\nI0822 16:12:13.376232 32262 solver.cpp:228] Iteration 51400, loss = 0.000323701\nI0822 16:12:13.376277 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:12:13.376292 32262 solver.cpp:244]     Train net output #1: loss = 0.000323806 (* 1 = 0.000323806 loss)\nI0822 16:12:13.455981 32262 sgd_solver.cpp:166] Iteration 51400, lr = 0.035\nI0822 16:14:32.827904 32262 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0822 16:15:55.346712 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87216\nI0822 16:15:55.346994 32262 solver.cpp:404]     Test net output #1: loss = 0.527255 (* 1 = 0.527255 loss)\nI0822 16:15:56.677327 32262 solver.cpp:228] Iteration 51500, loss = 0.000484309\nI0822 16:15:56.677371 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:15:56.677387 32262 solver.cpp:244]     Train net output #1: loss = 0.000484414 (* 1 = 0.000484414 loss)\nI0822 16:15:56.766104 32262 sgd_solver.cpp:166] Iteration 51500, lr = 0.035\nI0822 16:18:16.114933 32262 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0822 16:19:38.606806 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86976\nI0822 16:19:38.607065 32262 solver.cpp:404]     Test net output #1: loss = 0.538067 (* 1 = 0.538067 loss)\nI0822 16:19:39.938339 32262 solver.cpp:228] Iteration 51600, loss = 0.000231977\nI0822 16:19:39.938381 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:19:39.938397 32262 solver.cpp:244]     Train net output #1: loss = 0.000232082 (* 1 = 0.000232082 loss)\nI0822 16:19:40.024770 32262 sgd_solver.cpp:166] Iteration 51600, lr = 0.035\nI0822 16:21:59.323361 32262 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0822 16:23:21.825112 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86996\nI0822 16:23:21.825430 32262 solver.cpp:404]     Test net output #1: loss = 0.535408 (* 1 = 0.535408 loss)\nI0822 16:23:23.157276 32262 solver.cpp:228] Iteration 51700, loss = 0.000271416\nI0822 16:23:23.157321 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:23:23.157337 32262 solver.cpp:244]     Train net output #1: loss = 0.000271521 (* 1 = 0.000271521 loss)\nI0822 16:23:23.237848 32262 sgd_solver.cpp:166] Iteration 51700, lr = 0.035\nI0822 16:25:42.587724 32262 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0822 16:27:05.109731 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86716\nI0822 16:27:05.110013 32262 solver.cpp:404]     Test net output #1: loss = 0.547898 (* 1 = 0.547898 loss)\nI0822 16:27:06.441679 32262 solver.cpp:228] Iteration 51800, loss = 0.000310529\nI0822 16:27:06.441725 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:27:06.441740 32262 solver.cpp:244]     Train net output #1: loss = 0.000310634 (* 1 = 0.000310634 loss)\nI0822 16:27:06.523175 32262 sgd_solver.cpp:166] Iteration 51800, lr = 0.035\nI0822 16:29:25.756463 32262 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0822 16:30:48.171679 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86712\nI0822 16:30:48.171953 32262 solver.cpp:404]     Test net output #1: loss = 0.544527 (* 1 = 0.544527 loss)\nI0822 16:30:49.505213 32262 solver.cpp:228] Iteration 51900, loss = 0.000373641\nI0822 16:30:49.505257 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:30:49.505273 32262 solver.cpp:244]     Train net output #1: loss = 0.000373746 (* 1 = 0.000373746 loss)\nI0822 16:30:49.583472 32262 sgd_solver.cpp:166] Iteration 51900, lr = 0.035\nI0822 16:33:08.908939 32262 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0822 16:34:31.444237 32262 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0822 16:34:31.444514 32262 solver.cpp:404]     Test net output #1: loss = 0.555756 (* 1 = 0.555756 loss)\nI0822 16:34:32.774957 32262 solver.cpp:228] Iteration 52000, loss = 0.000264658\nI0822 16:34:32.775002 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:34:32.775023 32262 solver.cpp:244]     Train net output #1: loss = 0.000264763 (* 1 = 0.000264763 loss)\nI0822 16:34:32.858772 32262 sgd_solver.cpp:166] Iteration 52000, lr = 0.035\nI0822 16:36:52.238584 32262 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0822 16:38:14.719915 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86536\nI0822 16:38:14.720175 32262 solver.cpp:404]     Test net output #1: loss = 0.55041 (* 1 = 0.55041 loss)\nI0822 16:38:16.050658 32262 solver.cpp:228] Iteration 52100, loss = 0.000387679\nI0822 16:38:16.050704 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:38:16.050717 32262 solver.cpp:244]     Train net output #1: loss = 0.000387784 (* 1 = 0.000387784 loss)\nI0822 16:38:16.138701 32262 sgd_solver.cpp:166] Iteration 52100, lr = 0.035\nI0822 16:40:35.424537 32262 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0822 16:41:57.942591 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8634\nI0822 16:41:57.942850 32262 solver.cpp:404]     Test net output #1: loss = 0.562574 (* 1 = 0.562574 loss)\nI0822 16:41:59.272848 32262 solver.cpp:228] Iteration 52200, loss = 0.000291057\nI0822 16:41:59.272891 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:41:59.272907 32262 solver.cpp:244]     Train net output #1: loss = 0.000291162 (* 1 = 0.000291162 loss)\nI0822 16:41:59.355517 32262 sgd_solver.cpp:166] Iteration 52200, lr = 0.035\nI0822 16:44:18.607789 32262 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0822 16:45:41.024438 32262 solver.cpp:404]     Test net output #0: accuracy = 0.863\nI0822 16:45:41.024722 32262 solver.cpp:404]     Test net output #1: loss = 0.557766 (* 1 = 0.557766 loss)\nI0822 16:45:42.355026 32262 solver.cpp:228] Iteration 52300, loss = 0.000272759\nI0822 16:45:42.355073 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:45:42.355089 32262 solver.cpp:244]     Train net output #1: loss = 0.000272865 (* 1 = 0.000272865 loss)\nI0822 16:45:42.438555 32262 sgd_solver.cpp:166] Iteration 52300, lr = 0.035\nI0822 16:48:01.688257 32262 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0822 16:49:24.194895 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86192\nI0822 16:49:24.195174 32262 solver.cpp:404]     Test net output #1: loss = 0.56772 (* 1 = 0.56772 loss)\nI0822 16:49:25.525688 32262 solver.cpp:228] Iteration 52400, loss = 0.00017857\nI0822 16:49:25.525732 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:49:25.525748 32262 solver.cpp:244]     Train net output #1: loss = 0.000178675 (* 1 = 0.000178675 loss)\nI0822 16:49:25.614475 32262 sgd_solver.cpp:166] Iteration 52400, lr = 0.035\nI0822 16:51:44.934881 32262 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0822 16:53:07.432973 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86232\nI0822 16:53:07.433300 32262 solver.cpp:404]     Test net output #1: loss = 0.561184 (* 1 = 0.561184 loss)\nI0822 16:53:08.763528 32262 solver.cpp:228] Iteration 52500, loss = 0.000358019\nI0822 16:53:08.763574 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:53:08.763591 32262 solver.cpp:244]     Train net output #1: loss = 0.000358124 (* 1 = 0.000358124 loss)\nI0822 16:53:08.850644 32262 sgd_solver.cpp:166] Iteration 52500, lr = 0.035\nI0822 16:55:28.165412 32262 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0822 16:56:50.559937 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86144\nI0822 16:56:50.560200 32262 solver.cpp:404]     Test net output #1: loss = 0.569881 (* 1 = 0.569881 loss)\nI0822 16:56:51.891396 32262 solver.cpp:228] Iteration 52600, loss = 0.000260785\nI0822 16:56:51.891441 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 16:56:51.891455 32262 solver.cpp:244]     Train net output #1: loss = 0.00026089 (* 1 = 0.00026089 loss)\nI0822 16:56:51.974103 32262 sgd_solver.cpp:166] Iteration 52600, lr = 0.035\nI0822 16:59:11.207033 32262 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0822 17:00:33.469229 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86244\nI0822 17:00:33.469527 32262 solver.cpp:404]     Test net output #1: loss = 0.563655 (* 1 = 0.563655 loss)\nI0822 17:00:34.800166 32262 solver.cpp:228] Iteration 52700, loss = 0.000296654\nI0822 17:00:34.800209 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:00:34.800225 32262 solver.cpp:244]     Train net output #1: loss = 0.000296759 (* 1 = 0.000296759 loss)\nI0822 17:00:34.886566 32262 sgd_solver.cpp:166] Iteration 52700, lr = 0.035\nI0822 17:02:54.210947 32262 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0822 17:04:16.484488 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86112\nI0822 17:04:16.484767 32262 solver.cpp:404]     Test net output #1: loss = 0.573538 (* 1 = 0.573538 loss)\nI0822 17:04:17.816169 32262 solver.cpp:228] Iteration 52800, loss = 0.000253447\nI0822 17:04:17.816212 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:04:17.816228 32262 solver.cpp:244]     Train net output #1: loss = 0.000253552 (* 1 = 0.000253552 loss)\nI0822 17:04:17.896818 32262 sgd_solver.cpp:166] Iteration 52800, lr = 0.035\nI0822 17:06:37.207015 32262 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0822 17:07:59.489050 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86096\nI0822 17:07:59.489290 32262 solver.cpp:404]     Test net output #1: loss = 0.566003 (* 1 = 0.566003 loss)\nI0822 17:08:00.819347 32262 solver.cpp:228] Iteration 52900, loss = 0.000363221\nI0822 17:08:00.819391 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:08:00.819406 32262 solver.cpp:244]     Train net output #1: loss = 0.000363326 (* 1 = 0.000363326 loss)\nI0822 17:08:00.905881 32262 sgd_solver.cpp:166] Iteration 52900, lr = 0.035\nI0822 17:10:20.220605 32262 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0822 17:11:42.438884 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85984\nI0822 17:11:42.439146 32262 solver.cpp:404]     Test net output #1: loss = 0.575905 (* 1 = 0.575905 loss)\nI0822 17:11:43.769697 32262 solver.cpp:228] Iteration 53000, loss = 0.000290292\nI0822 17:11:43.769742 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:11:43.769757 32262 solver.cpp:244]     Train net output #1: loss = 0.000290397 (* 1 = 0.000290397 loss)\nI0822 17:11:43.853480 32262 sgd_solver.cpp:166] Iteration 53000, lr = 0.035\nI0822 17:14:03.163940 32262 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0822 17:15:25.584699 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85916\nI0822 17:15:25.584980 32262 solver.cpp:404]     Test net output #1: loss = 0.567202 (* 1 = 0.567202 loss)\nI0822 17:15:26.915125 32262 solver.cpp:228] Iteration 53100, loss = 0.000292116\nI0822 17:15:26.915169 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:15:26.915184 32262 solver.cpp:244]     Train net output #1: loss = 0.000292221 (* 1 = 0.000292221 loss)\nI0822 17:15:26.997839 32262 sgd_solver.cpp:166] Iteration 53100, lr = 0.035\nI0822 17:17:46.348498 32262 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0822 17:19:08.828241 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8594\nI0822 17:19:08.828523 32262 solver.cpp:404]     Test net output #1: loss = 0.575311 (* 1 = 0.575311 loss)\nI0822 17:19:10.158543 32262 solver.cpp:228] Iteration 53200, loss = 0.000241683\nI0822 17:19:10.158586 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:19:10.158601 32262 solver.cpp:244]     Train net output #1: loss = 0.000241788 (* 1 = 0.000241788 loss)\nI0822 17:19:10.242007 32262 sgd_solver.cpp:166] Iteration 53200, lr = 0.035\nI0822 17:21:29.524173 32262 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0822 17:22:51.963888 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85848\nI0822 17:22:51.964244 32262 solver.cpp:404]     Test net output #1: loss = 0.567626 (* 1 = 0.567626 loss)\nI0822 17:22:53.294054 32262 solver.cpp:228] Iteration 53300, loss = 0.000291062\nI0822 17:22:53.294098 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:22:53.294114 32262 solver.cpp:244]     Train net output #1: loss = 0.000291167 (* 1 = 0.000291167 loss)\nI0822 17:22:53.378072 32262 sgd_solver.cpp:166] Iteration 53300, lr = 0.035\nI0822 17:25:12.739066 32262 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0822 17:26:34.825131 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85752\nI0822 17:26:34.825371 32262 solver.cpp:404]     Test net output #1: loss = 0.577775 (* 1 = 0.577775 loss)\nI0822 17:26:36.155421 32262 solver.cpp:228] Iteration 53400, loss = 0.000211119\nI0822 17:26:36.155464 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:26:36.155480 32262 solver.cpp:244]     Train net output #1: loss = 0.000211224 (* 1 = 0.000211224 loss)\nI0822 17:26:36.241179 32262 sgd_solver.cpp:166] Iteration 53400, lr = 0.035\nI0822 17:28:55.591547 32262 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0822 17:30:17.681850 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85772\nI0822 17:30:17.682119 32262 solver.cpp:404]     Test net output #1: loss = 0.570346 (* 1 = 0.570346 loss)\nI0822 17:30:19.012958 32262 solver.cpp:228] Iteration 53500, loss = 0.000384371\nI0822 17:30:19.013000 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:30:19.013015 32262 solver.cpp:244]     Train net output #1: loss = 0.000384476 (* 1 = 0.000384476 loss)\nI0822 17:30:19.099336 32262 sgd_solver.cpp:166] Iteration 53500, lr = 0.035\nI0822 17:32:38.349033 32262 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0822 17:34:00.424938 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85724\nI0822 17:34:00.425184 32262 solver.cpp:404]     Test net output #1: loss = 0.579633 (* 1 = 0.579633 loss)\nI0822 17:34:01.756337 32262 solver.cpp:228] Iteration 53600, loss = 0.000281641\nI0822 17:34:01.756377 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:34:01.756392 32262 solver.cpp:244]     Train net output #1: loss = 0.000281746 (* 1 = 0.000281746 loss)\nI0822 17:34:01.843199 32262 sgd_solver.cpp:166] Iteration 53600, lr = 0.035\nI0822 17:36:21.182221 32262 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0822 17:37:43.261639 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85776\nI0822 17:37:43.261885 32262 solver.cpp:404]     Test net output #1: loss = 0.569823 (* 1 = 0.569823 loss)\nI0822 17:37:44.592959 32262 solver.cpp:228] Iteration 53700, loss = 0.000445561\nI0822 17:37:44.593005 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:37:44.593021 32262 solver.cpp:244]     Train net output #1: loss = 0.000445666 (* 1 = 0.000445666 loss)\nI0822 17:37:44.676597 32262 sgd_solver.cpp:166] Iteration 53700, lr = 0.035\nI0822 17:40:04.030952 32262 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0822 17:41:26.125783 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85704\nI0822 17:41:26.126112 32262 solver.cpp:404]     Test net output #1: loss = 0.577327 (* 1 = 0.577327 loss)\nI0822 17:41:27.456120 32262 solver.cpp:228] Iteration 53800, loss = 0.000306114\nI0822 17:41:27.456162 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:41:27.456178 32262 solver.cpp:244]     Train net output #1: loss = 0.000306219 (* 1 = 0.000306219 loss)\nI0822 17:41:27.543864 32262 sgd_solver.cpp:166] Iteration 53800, lr = 0.035\nI0822 17:43:46.854960 32262 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0822 17:45:09.242194 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85784\nI0822 17:45:09.242475 32262 solver.cpp:404]     Test net output #1: loss = 0.568343 (* 1 = 0.568343 loss)\nI0822 17:45:10.572016 32262 solver.cpp:228] Iteration 53900, loss = 0.00037549\nI0822 17:45:10.572065 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:45:10.572082 32262 solver.cpp:244]     Train net output #1: loss = 0.000375595 (* 1 = 0.000375595 loss)\nI0822 17:45:10.658918 32262 sgd_solver.cpp:166] Iteration 53900, lr = 0.035\nI0822 17:47:29.968621 32262 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0822 17:48:52.398272 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85744\nI0822 17:48:52.398560 32262 solver.cpp:404]     Test net output #1: loss = 0.576534 (* 1 = 0.576534 loss)\nI0822 17:48:53.729537 32262 solver.cpp:228] Iteration 54000, loss = 0.000328389\nI0822 17:48:53.729579 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:48:53.729595 32262 solver.cpp:244]     Train net output #1: loss = 0.000328494 (* 1 = 0.000328494 loss)\nI0822 17:48:53.812011 32262 sgd_solver.cpp:166] Iteration 54000, lr = 0.035\nI0822 17:51:13.120334 32262 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0822 17:52:35.482101 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85772\nI0822 17:52:35.482431 32262 solver.cpp:404]     Test net output #1: loss = 0.568497 (* 1 = 0.568497 loss)\nI0822 17:52:36.814281 32262 solver.cpp:228] Iteration 54100, loss = 0.000314749\nI0822 17:52:36.814322 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:52:36.814338 32262 solver.cpp:244]     Train net output #1: loss = 0.000314854 (* 1 = 0.000314854 loss)\nI0822 17:52:36.895761 32262 sgd_solver.cpp:166] Iteration 54100, lr = 0.035\nI0822 17:54:56.216794 32262 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0822 17:56:18.595183 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85728\nI0822 17:56:18.595477 32262 solver.cpp:404]     Test net output #1: loss = 0.575518 (* 1 = 0.575518 loss)\nI0822 17:56:19.926789 32262 solver.cpp:228] Iteration 54200, loss = 0.000279291\nI0822 17:56:19.926829 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 17:56:19.926846 32262 solver.cpp:244]     Train net output #1: loss = 0.000279396 (* 1 = 0.000279396 loss)\nI0822 17:56:20.007578 32262 sgd_solver.cpp:166] Iteration 54200, lr = 0.035\nI0822 17:58:39.298269 32262 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0822 18:00:01.562942 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85828\nI0822 18:00:01.563226 32262 solver.cpp:404]     Test net output #1: loss = 0.566745 (* 1 = 0.566745 loss)\nI0822 18:00:02.893162 32262 solver.cpp:228] Iteration 54300, loss = 0.000411831\nI0822 18:00:02.893201 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:00:02.893216 32262 solver.cpp:244]     Train net output #1: loss = 0.000411936 (* 1 = 0.000411936 loss)\nI0822 18:00:02.979851 32262 sgd_solver.cpp:166] Iteration 54300, lr = 0.035\nI0822 18:02:22.258601 32262 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0822 18:03:44.692750 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8574\nI0822 18:03:44.693063 32262 solver.cpp:404]     Test net output #1: loss = 0.575152 (* 1 = 0.575152 loss)\nI0822 18:03:46.022608 32262 solver.cpp:228] Iteration 54400, loss = 0.000225253\nI0822 18:03:46.022649 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:03:46.022665 32262 solver.cpp:244]     Train net output #1: loss = 0.000225358 (* 1 = 0.000225358 loss)\nI0822 18:03:46.108780 32262 sgd_solver.cpp:166] Iteration 54400, lr = 0.035\nI0822 18:06:05.501338 32262 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0822 18:07:27.696892 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85796\nI0822 18:07:27.697154 32262 solver.cpp:404]     Test net output #1: loss = 0.566068 (* 1 = 0.566068 loss)\nI0822 18:07:29.026880 32262 solver.cpp:228] Iteration 54500, loss = 0.000413542\nI0822 18:07:29.026922 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:07:29.026938 32262 solver.cpp:244]     Train net output #1: loss = 0.000413647 (* 1 = 0.000413647 loss)\nI0822 18:07:29.108068 32262 sgd_solver.cpp:166] Iteration 54500, lr = 0.035\nI0822 18:09:48.446610 32262 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0822 18:11:10.926039 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8574\nI0822 18:11:10.926368 32262 solver.cpp:404]     Test net output #1: loss = 0.574575 (* 1 = 0.574575 loss)\nI0822 18:11:12.256322 32262 solver.cpp:228] Iteration 54600, loss = 0.000226303\nI0822 18:11:12.256366 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:11:12.256381 32262 solver.cpp:244]     Train net output #1: loss = 0.000226408 (* 1 = 0.000226408 loss)\nI0822 18:11:12.341825 32262 sgd_solver.cpp:166] Iteration 54600, lr = 0.035\nI0822 18:13:31.625787 32262 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0822 18:14:53.940306 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85736\nI0822 18:14:53.940577 32262 solver.cpp:404]     Test net output #1: loss = 0.566382 (* 1 = 0.566382 loss)\nI0822 18:14:55.270373 32262 solver.cpp:228] Iteration 54700, loss = 0.000352752\nI0822 18:14:55.270417 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:14:55.270432 32262 solver.cpp:244]     Train net output #1: loss = 0.000352857 (* 1 = 0.000352857 loss)\nI0822 18:14:55.351903 32262 sgd_solver.cpp:166] Iteration 54700, lr = 0.035\nI0822 18:17:14.638290 32262 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0822 18:18:36.691247 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85648\nI0822 18:18:36.691525 32262 solver.cpp:404]     Test net output #1: loss = 0.575261 (* 1 = 0.575261 loss)\nI0822 18:18:38.021222 32262 solver.cpp:228] Iteration 54800, loss = 0.000322015\nI0822 18:18:38.021265 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:18:38.021281 32262 solver.cpp:244]     Train net output #1: loss = 0.00032212 (* 1 = 0.00032212 loss)\nI0822 18:18:38.102334 32262 sgd_solver.cpp:166] Iteration 54800, lr = 0.035\nI0822 18:20:57.430485 32262 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0822 18:22:19.738168 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85728\nI0822 18:22:19.738453 32262 solver.cpp:404]     Test net output #1: loss = 0.564595 (* 1 = 0.564595 loss)\nI0822 18:22:21.067837 32262 solver.cpp:228] Iteration 54900, loss = 0.000413037\nI0822 18:22:21.067880 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:22:21.067895 32262 solver.cpp:244]     Train net output #1: loss = 0.000413142 (* 1 = 0.000413142 loss)\nI0822 18:22:21.154378 32262 sgd_solver.cpp:166] Iteration 54900, lr = 0.035\nI0822 18:24:40.507803 32262 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0822 18:26:02.631239 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85672\nI0822 18:26:02.631516 32262 solver.cpp:404]     Test net output #1: loss = 0.574518 (* 1 = 0.574518 loss)\nI0822 18:26:03.961717 32262 solver.cpp:228] Iteration 55000, loss = 0.000239582\nI0822 18:26:03.961760 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:26:03.961776 32262 solver.cpp:244]     Train net output #1: loss = 0.000239687 (* 1 = 0.000239687 loss)\nI0822 18:26:04.050317 32262 sgd_solver.cpp:166] Iteration 55000, lr = 0.035\nI0822 18:28:23.404564 32262 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0822 18:29:45.509050 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85668\nI0822 18:29:45.509310 32262 solver.cpp:404]     Test net output #1: loss = 0.563525 (* 1 = 0.563525 loss)\nI0822 18:29:46.839174 32262 solver.cpp:228] Iteration 55100, loss = 0.000378332\nI0822 18:29:46.839217 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:29:46.839233 32262 solver.cpp:244]     Train net output #1: loss = 0.000378437 (* 1 = 0.000378437 loss)\nI0822 18:29:46.923879 32262 sgd_solver.cpp:166] Iteration 55100, lr = 0.035\nI0822 18:32:06.238442 32262 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0822 18:33:28.299882 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85632\nI0822 18:33:28.300184 32262 solver.cpp:404]     Test net output #1: loss = 0.574026 (* 1 = 0.574026 loss)\nI0822 18:33:29.629901 32262 solver.cpp:228] Iteration 55200, loss = 0.000251444\nI0822 18:33:29.629945 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:33:29.629961 32262 solver.cpp:244]     Train net output #1: loss = 0.000251549 (* 1 = 0.000251549 loss)\nI0822 18:33:29.711243 32262 sgd_solver.cpp:166] Iteration 55200, lr = 0.035\nI0822 18:35:49.024471 32262 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0822 18:37:11.082991 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8568\nI0822 18:37:11.083283 32262 solver.cpp:404]     Test net output #1: loss = 0.563882 (* 1 = 0.563882 loss)\nI0822 18:37:12.413077 32262 solver.cpp:228] Iteration 55300, loss = 0.00043358\nI0822 18:37:12.413125 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:37:12.413141 32262 solver.cpp:244]     Train net output #1: loss = 0.000433685 (* 1 = 0.000433685 loss)\nI0822 18:37:12.500490 32262 sgd_solver.cpp:166] Iteration 55300, lr = 0.035\nI0822 18:39:31.803321 32262 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0822 18:40:54.155174 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85692\nI0822 18:40:54.155498 32262 solver.cpp:404]     Test net output #1: loss = 0.570707 (* 1 = 0.570707 loss)\nI0822 18:40:55.485461 32262 solver.cpp:228] Iteration 55400, loss = 0.000311937\nI0822 18:40:55.485505 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:40:55.485522 32262 solver.cpp:244]     Train net output #1: loss = 0.000312042 (* 1 = 0.000312042 loss)\nI0822 18:40:55.573318 32262 sgd_solver.cpp:166] Iteration 55400, lr = 0.035\nI0822 18:43:14.853484 32262 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0822 18:44:37.286002 32262 solver.cpp:404]     Test net output #0: accuracy = 0.857\nI0822 18:44:37.286285 32262 solver.cpp:404]     Test net output #1: loss = 0.561607 (* 1 = 0.561607 loss)\nI0822 18:44:38.616106 32262 solver.cpp:228] Iteration 55500, loss = 0.000442624\nI0822 18:44:38.616149 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:44:38.616164 32262 solver.cpp:244]     Train net output #1: loss = 0.000442729 (* 1 = 0.000442729 loss)\nI0822 18:44:38.702359 32262 sgd_solver.cpp:166] Iteration 55500, lr = 0.035\nI0822 18:46:58.038172 32262 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0822 18:48:20.466261 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85628\nI0822 18:48:20.466506 32262 solver.cpp:404]     Test net output #1: loss = 0.570515 (* 1 = 0.570515 loss)\nI0822 18:48:21.796285 32262 solver.cpp:228] Iteration 55600, loss = 0.000267924\nI0822 18:48:21.796329 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:48:21.796344 32262 solver.cpp:244]     Train net output #1: loss = 0.00026803 (* 1 = 0.00026803 loss)\nI0822 18:48:21.877164 32262 sgd_solver.cpp:166] Iteration 55600, lr = 0.035\nI0822 18:50:40.853201 32262 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0822 18:52:03.242023 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85712\nI0822 18:52:03.242314 32262 solver.cpp:404]     Test net output #1: loss = 0.559106 (* 1 = 0.559106 loss)\nI0822 18:52:04.572310 32262 solver.cpp:228] Iteration 55700, loss = 0.000434028\nI0822 18:52:04.572352 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:52:04.572368 32262 solver.cpp:244]     Train net output #1: loss = 0.000434133 (* 1 = 0.000434133 loss)\nI0822 18:52:04.651798 32262 sgd_solver.cpp:166] Iteration 55700, lr = 0.035\nI0822 18:54:23.414718 32262 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0822 18:55:45.116576 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85624\nI0822 18:55:45.116899 32262 solver.cpp:404]     Test net output #1: loss = 0.569932 (* 1 = 0.569932 loss)\nI0822 18:55:46.444072 32262 solver.cpp:228] Iteration 55800, loss = 0.000332856\nI0822 18:55:46.444108 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:55:46.444123 32262 solver.cpp:244]     Train net output #1: loss = 0.000332961 (* 1 = 0.000332961 loss)\nI0822 18:55:46.521392 32262 sgd_solver.cpp:166] Iteration 55800, lr = 0.035\nI0822 18:58:04.470247 32262 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0822 18:59:26.127624 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85724\nI0822 18:59:26.127948 32262 solver.cpp:404]     Test net output #1: loss = 0.560153 (* 1 = 0.560153 loss)\nI0822 18:59:27.454869 32262 solver.cpp:228] Iteration 55900, loss = 0.000466607\nI0822 18:59:27.454903 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 18:59:27.454918 32262 solver.cpp:244]     Train net output #1: loss = 0.000466712 (* 1 = 0.000466712 loss)\nI0822 18:59:27.529999 32262 sgd_solver.cpp:166] Iteration 55900, lr = 0.035\nI0822 19:01:45.500140 32262 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0822 19:03:07.151175 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85644\nI0822 19:03:07.151505 32262 solver.cpp:404]     Test net output #1: loss = 0.567777 (* 1 = 0.567777 loss)\nI0822 19:03:08.478291 32262 solver.cpp:228] Iteration 56000, loss = 0.000284131\nI0822 19:03:08.478323 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:03:08.478338 32262 solver.cpp:244]     Train net output #1: loss = 0.000284236 (* 1 = 0.000284236 loss)\nI0822 19:03:08.553980 32262 sgd_solver.cpp:166] Iteration 56000, lr = 0.035\nI0822 19:05:26.482688 32262 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0822 19:06:48.143227 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85692\nI0822 19:06:48.143559 32262 solver.cpp:404]     Test net output #1: loss = 0.558756 (* 1 = 0.558756 loss)\nI0822 19:06:49.471017 32262 solver.cpp:228] Iteration 56100, loss = 0.000337206\nI0822 19:06:49.471062 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:06:49.471079 32262 solver.cpp:244]     Train net output #1: loss = 0.000337311 (* 1 = 0.000337311 loss)\nI0822 19:06:49.551177 32262 sgd_solver.cpp:166] Iteration 56100, lr = 0.035\nI0822 19:09:07.472668 32262 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0822 19:10:29.123757 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85664\nI0822 19:10:29.124096 32262 solver.cpp:404]     Test net output #1: loss = 0.568622 (* 1 = 0.568622 loss)\nI0822 19:10:30.451136 32262 solver.cpp:228] Iteration 56200, loss = 0.000262352\nI0822 19:10:30.451186 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:10:30.451203 32262 solver.cpp:244]     Train net output #1: loss = 0.000262457 (* 1 = 0.000262457 loss)\nI0822 19:10:30.534873 32262 sgd_solver.cpp:166] Iteration 56200, lr = 0.035\nI0822 19:12:48.481484 32262 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0822 19:14:10.158116 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85748\nI0822 19:14:10.158447 32262 solver.cpp:404]     Test net output #1: loss = 0.558757 (* 1 = 0.558757 loss)\nI0822 19:14:11.486596 32262 solver.cpp:228] Iteration 56300, loss = 0.000459319\nI0822 19:14:11.486632 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:14:11.486647 32262 solver.cpp:244]     Train net output #1: loss = 0.000459424 (* 1 = 0.000459424 loss)\nI0822 19:14:11.560063 32262 sgd_solver.cpp:166] Iteration 56300, lr = 0.035\nI0822 19:16:29.481148 32262 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0822 19:17:51.150820 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85656\nI0822 19:17:51.151183 32262 solver.cpp:404]     Test net output #1: loss = 0.567666 (* 1 = 0.567666 loss)\nI0822 19:17:52.477669 32262 solver.cpp:228] Iteration 56400, loss = 0.000297486\nI0822 19:17:52.477705 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:17:52.477721 32262 solver.cpp:244]     Train net output #1: loss = 0.000297591 (* 1 = 0.000297591 loss)\nI0822 19:17:52.555699 32262 sgd_solver.cpp:166] Iteration 56400, lr = 0.035\nI0822 19:20:10.522028 32262 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0822 19:21:32.189278 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85744\nI0822 19:21:32.189586 32262 solver.cpp:404]     Test net output #1: loss = 0.556629 (* 1 = 0.556629 loss)\nI0822 19:21:33.516566 32262 solver.cpp:228] Iteration 56500, loss = 0.000368067\nI0822 19:21:33.516611 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:21:33.516628 32262 solver.cpp:244]     Train net output #1: loss = 0.000368172 (* 1 = 0.000368172 loss)\nI0822 19:21:33.594753 32262 sgd_solver.cpp:166] Iteration 56500, lr = 0.035\nI0822 19:23:51.534988 32262 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0822 19:25:13.200322 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85664\nI0822 19:25:13.200631 32262 solver.cpp:404]     Test net output #1: loss = 0.565591 (* 1 = 0.565591 loss)\nI0822 19:25:14.526824 32262 solver.cpp:228] Iteration 56600, loss = 0.000298522\nI0822 19:25:14.526860 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:25:14.526880 32262 solver.cpp:244]     Train net output #1: loss = 0.000298627 (* 1 = 0.000298627 loss)\nI0822 19:25:14.602443 32262 sgd_solver.cpp:166] Iteration 56600, lr = 0.035\nI0822 19:27:32.589313 32262 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0822 19:28:54.199342 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8578\nI0822 19:28:54.199658 32262 solver.cpp:404]     Test net output #1: loss = 0.55509 (* 1 = 0.55509 loss)\nI0822 19:28:55.525699 32262 solver.cpp:228] Iteration 56700, loss = 0.000539628\nI0822 19:28:55.525745 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:28:55.525763 32262 solver.cpp:244]     Train net output #1: loss = 0.000539733 (* 1 = 0.000539733 loss)\nI0822 19:28:55.605953 32262 sgd_solver.cpp:166] Iteration 56700, lr = 0.035\nI0822 19:31:13.573490 32262 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0822 19:32:35.354307 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85692\nI0822 19:32:35.354642 32262 solver.cpp:404]     Test net output #1: loss = 0.564301 (* 1 = 0.564301 loss)\nI0822 19:32:36.686125 32262 solver.cpp:228] Iteration 56800, loss = 0.000267376\nI0822 19:32:36.686170 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:32:36.686187 32262 solver.cpp:244]     Train net output #1: loss = 0.000267481 (* 1 = 0.000267481 loss)\nI0822 19:32:36.762475 32262 sgd_solver.cpp:166] Iteration 56800, lr = 0.035\nI0822 19:34:55.639847 32262 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0822 19:36:17.848661 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85752\nI0822 19:36:17.848927 32262 solver.cpp:404]     Test net output #1: loss = 0.554351 (* 1 = 0.554351 loss)\nI0822 19:36:19.180110 32262 solver.cpp:228] Iteration 56900, loss = 0.00045957\nI0822 19:36:19.180151 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:36:19.180166 32262 solver.cpp:244]     Train net output #1: loss = 0.000459675 (* 1 = 0.000459675 loss)\nI0822 19:36:19.261868 32262 sgd_solver.cpp:166] Iteration 56900, lr = 0.035\nI0822 19:38:37.998194 32262 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0822 19:40:00.241303 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85616\nI0822 19:40:00.241552 32262 solver.cpp:404]     Test net output #1: loss = 0.564945 (* 1 = 0.564945 loss)\nI0822 19:40:01.572765 32262 solver.cpp:228] Iteration 57000, loss = 0.000317253\nI0822 19:40:01.572805 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:40:01.572821 32262 solver.cpp:244]     Train net output #1: loss = 0.000317358 (* 1 = 0.000317358 loss)\nI0822 19:40:01.652945 32262 sgd_solver.cpp:166] Iteration 57000, lr = 0.035\nI0822 19:42:20.391652 32262 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0822 19:43:42.847820 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85716\nI0822 19:43:42.848167 32262 solver.cpp:404]     Test net output #1: loss = 0.555177 (* 1 = 0.555177 loss)\nI0822 19:43:44.178325 32262 solver.cpp:228] Iteration 57100, loss = 0.000479367\nI0822 19:43:44.178373 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:43:44.178396 32262 solver.cpp:244]     Train net output #1: loss = 0.000479472 (* 1 = 0.000479472 loss)\nI0822 19:43:44.259120 32262 sgd_solver.cpp:166] Iteration 57100, lr = 0.035\nI0822 19:46:03.073354 32262 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0822 19:47:24.666301 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85636\nI0822 19:47:24.666612 32262 solver.cpp:404]     Test net output #1: loss = 0.564549 (* 1 = 0.564549 loss)\nI0822 19:47:25.992851 32262 solver.cpp:228] Iteration 57200, loss = 0.000291763\nI0822 19:47:25.992885 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:47:25.992900 32262 solver.cpp:244]     Train net output #1: loss = 0.000291868 (* 1 = 0.000291868 loss)\nI0822 19:47:26.075057 32262 sgd_solver.cpp:166] Iteration 57200, lr = 0.035\nI0822 19:49:44.730238 32262 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0822 19:51:06.333504 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85728\nI0822 19:51:06.333844 32262 solver.cpp:404]     Test net output #1: loss = 0.553915 (* 1 = 0.553915 loss)\nI0822 19:51:07.661005 32262 solver.cpp:228] Iteration 57300, loss = 0.000535476\nI0822 19:51:07.661041 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:51:07.661056 32262 solver.cpp:244]     Train net output #1: loss = 0.000535581 (* 1 = 0.000535581 loss)\nI0822 19:51:07.746449 32262 sgd_solver.cpp:166] Iteration 57300, lr = 0.035\nI0822 19:53:26.357115 32262 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0822 19:54:47.958231 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85676\nI0822 19:54:47.958559 32262 solver.cpp:404]     Test net output #1: loss = 0.562175 (* 1 = 0.562175 loss)\nI0822 19:54:49.284804 32262 solver.cpp:228] Iteration 57400, loss = 0.000279694\nI0822 19:54:49.284849 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:54:49.284865 32262 solver.cpp:244]     Train net output #1: loss = 0.000279799 (* 1 = 0.000279799 loss)\nI0822 19:54:49.369740 32262 sgd_solver.cpp:166] Iteration 57400, lr = 0.035\nI0822 19:57:07.963076 32262 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0822 19:58:29.555070 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85772\nI0822 19:58:29.555371 32262 solver.cpp:404]     Test net output #1: loss = 0.551624 (* 1 = 0.551624 loss)\nI0822 19:58:30.881249 32262 solver.cpp:228] Iteration 57500, loss = 0.000558609\nI0822 19:58:30.881295 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 19:58:30.881311 32262 solver.cpp:244]     Train net output #1: loss = 0.000558714 (* 1 = 0.000558714 loss)\nI0822 19:58:30.965176 32262 sgd_solver.cpp:166] Iteration 57500, lr = 0.035\nI0822 20:00:49.547627 32262 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0822 20:02:11.194919 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85612\nI0822 20:02:11.195228 32262 solver.cpp:404]     Test net output #1: loss = 0.561956 (* 1 = 0.561956 loss)\nI0822 20:02:12.522186 32262 solver.cpp:228] Iteration 57600, loss = 0.000294039\nI0822 20:02:12.522222 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:02:12.522238 32262 solver.cpp:244]     Train net output #1: loss = 0.000294144 (* 1 = 0.000294144 loss)\nI0822 20:02:12.602721 32262 sgd_solver.cpp:166] Iteration 57600, lr = 0.035\nI0822 20:04:31.389600 32262 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0822 20:05:53.047463 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85716\nI0822 20:05:53.047790 32262 solver.cpp:404]     Test net output #1: loss = 0.551601 (* 1 = 0.551601 loss)\nI0822 20:05:54.373898 32262 solver.cpp:228] Iteration 57700, loss = 0.000460146\nI0822 20:05:54.373939 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:05:54.373953 32262 solver.cpp:244]     Train net output #1: loss = 0.000460251 (* 1 = 0.000460251 loss)\nI0822 20:05:54.460093 32262 sgd_solver.cpp:166] Iteration 57700, lr = 0.035\nI0822 20:08:13.012164 32262 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0822 20:09:34.661193 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85672\nI0822 20:09:34.661521 32262 solver.cpp:404]     Test net output #1: loss = 0.559977 (* 1 = 0.559977 loss)\nI0822 20:09:35.987988 32262 solver.cpp:228] Iteration 57800, loss = 0.000274247\nI0822 20:09:35.988024 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:09:35.988040 32262 solver.cpp:244]     Train net output #1: loss = 0.000274352 (* 1 = 0.000274352 loss)\nI0822 20:09:36.072584 32262 sgd_solver.cpp:166] Iteration 57800, lr = 0.035\nI0822 20:11:54.703495 32262 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0822 20:13:16.358362 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85748\nI0822 20:13:16.358712 32262 solver.cpp:404]     Test net output #1: loss = 0.549921 (* 1 = 0.549921 loss)\nI0822 20:13:17.684231 32262 solver.cpp:228] Iteration 57900, loss = 0.000508193\nI0822 20:13:17.684278 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:13:17.684294 32262 solver.cpp:244]     Train net output #1: loss = 0.000508298 (* 1 = 0.000508298 loss)\nI0822 20:13:17.769579 32262 sgd_solver.cpp:166] Iteration 57900, lr = 0.035\nI0822 20:15:36.352022 32262 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0822 20:16:57.998576 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85644\nI0822 20:16:57.998929 32262 solver.cpp:404]     Test net output #1: loss = 0.557081 (* 1 = 0.557081 loss)\nI0822 20:16:59.325580 32262 solver.cpp:228] Iteration 58000, loss = 0.000322514\nI0822 20:16:59.325626 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:16:59.325642 32262 solver.cpp:244]     Train net output #1: loss = 0.000322619 (* 1 = 0.000322619 loss)\nI0822 20:16:59.407163 32262 sgd_solver.cpp:166] Iteration 58000, lr = 0.035\nI0822 20:19:17.980587 32262 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0822 20:20:39.637236 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8574\nI0822 20:20:39.637574 32262 solver.cpp:404]     Test net output #1: loss = 0.549392 (* 1 = 0.549392 loss)\nI0822 20:20:40.964789 32262 solver.cpp:228] Iteration 58100, loss = 0.000403555\nI0822 20:20:40.964825 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:20:40.964840 32262 solver.cpp:244]     Train net output #1: loss = 0.00040366 (* 1 = 0.00040366 loss)\nI0822 20:20:41.051468 32262 sgd_solver.cpp:166] Iteration 58100, lr = 0.035\nI0822 20:22:59.605257 32262 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0822 20:24:21.252975 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8562\nI0822 20:24:21.253289 32262 solver.cpp:404]     Test net output #1: loss = 0.560122 (* 1 = 0.560122 loss)\nI0822 20:24:22.579757 32262 solver.cpp:228] Iteration 58200, loss = 0.000352102\nI0822 20:24:22.579794 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:24:22.579809 32262 solver.cpp:244]     Train net output #1: loss = 0.000352207 (* 1 = 0.000352207 loss)\nI0822 20:24:22.665441 32262 sgd_solver.cpp:166] Iteration 58200, lr = 0.035\nI0822 20:26:41.183444 32262 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0822 20:28:02.833206 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85724\nI0822 20:28:02.833537 32262 solver.cpp:404]     Test net output #1: loss = 0.55036 (* 1 = 0.55036 loss)\nI0822 20:28:04.159234 32262 solver.cpp:228] Iteration 58300, loss = 0.000497826\nI0822 20:28:04.159271 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:28:04.159286 32262 solver.cpp:244]     Train net output #1: loss = 0.000497931 (* 1 = 0.000497931 loss)\nI0822 20:28:04.247254 32262 sgd_solver.cpp:166] Iteration 58300, lr = 0.035\nI0822 20:30:22.927116 32262 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0822 20:31:44.576242 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85608\nI0822 20:31:44.576578 32262 solver.cpp:404]     Test net output #1: loss = 0.559006 (* 1 = 0.559006 loss)\nI0822 20:31:45.902729 32262 solver.cpp:228] Iteration 58400, loss = 0.000372431\nI0822 20:31:45.902762 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:31:45.902778 32262 solver.cpp:244]     Train net output #1: loss = 0.000372536 (* 1 = 0.000372536 loss)\nI0822 20:31:45.984869 32262 sgd_solver.cpp:166] Iteration 58400, lr = 0.035\nI0822 20:34:04.532668 32262 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0822 20:35:26.185395 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85728\nI0822 20:35:26.185720 32262 solver.cpp:404]     Test net output #1: loss = 0.550831 (* 1 = 0.550831 loss)\nI0822 20:35:27.512060 32262 solver.cpp:228] Iteration 58500, loss = 0.000512666\nI0822 20:35:27.512094 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:35:27.512109 32262 solver.cpp:244]     Train net output #1: loss = 0.000512771 (* 1 = 0.000512771 loss)\nI0822 20:35:27.600172 32262 sgd_solver.cpp:166] Iteration 58500, lr = 0.035\nI0822 20:37:46.203027 32262 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0822 20:39:07.857172 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85576\nI0822 20:39:07.857499 32262 solver.cpp:404]     Test net output #1: loss = 0.560878 (* 1 = 0.560878 loss)\nI0822 20:39:09.183213 32262 solver.cpp:228] Iteration 58600, loss = 0.00030348\nI0822 20:39:09.183255 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:39:09.183271 32262 solver.cpp:244]     Train net output #1: loss = 0.000303585 (* 1 = 0.000303585 loss)\nI0822 20:39:09.269487 32262 sgd_solver.cpp:166] Iteration 58600, lr = 0.035\nI0822 20:41:27.953459 32262 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0822 20:42:49.612109 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85676\nI0822 20:42:49.612452 32262 solver.cpp:404]     Test net output #1: loss = 0.551304 (* 1 = 0.551304 loss)\nI0822 20:42:50.938575 32262 solver.cpp:228] Iteration 58700, loss = 0.000467107\nI0822 20:42:50.938617 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:42:50.938634 32262 solver.cpp:244]     Train net output #1: loss = 0.000467212 (* 1 = 0.000467212 loss)\nI0822 20:42:51.023797 32262 sgd_solver.cpp:166] Iteration 58700, lr = 0.035\nI0822 20:45:09.689004 32262 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0822 20:46:31.354182 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85528\nI0822 20:46:31.354488 32262 solver.cpp:404]     Test net output #1: loss = 0.56118 (* 1 = 0.56118 loss)\nI0822 20:46:32.680383 32262 solver.cpp:228] Iteration 58800, loss = 0.000321276\nI0822 20:46:32.680418 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:46:32.680433 32262 solver.cpp:244]     Train net output #1: loss = 0.000321381 (* 1 = 0.000321381 loss)\nI0822 20:46:32.767840 32262 sgd_solver.cpp:166] Iteration 58800, lr = 0.035\nI0822 20:48:51.477390 32262 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0822 20:50:13.149798 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85656\nI0822 20:50:13.150120 32262 solver.cpp:404]     Test net output #1: loss = 0.551357 (* 1 = 0.551357 loss)\nI0822 20:50:14.476518 32262 solver.cpp:228] Iteration 58900, loss = 0.00046295\nI0822 20:50:14.476562 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:50:14.476578 32262 solver.cpp:244]     Train net output #1: loss = 0.000463055 (* 1 = 0.000463055 loss)\nI0822 20:50:14.560999 32262 sgd_solver.cpp:166] Iteration 58900, lr = 0.035\nI0822 20:52:33.168094 32262 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0822 20:53:54.843607 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85556\nI0822 20:53:54.843953 32262 solver.cpp:404]     Test net output #1: loss = 0.560492 (* 1 = 0.560492 loss)\nI0822 20:53:56.170311 32262 solver.cpp:228] Iteration 59000, loss = 0.000399903\nI0822 20:53:56.170346 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:53:56.170362 32262 solver.cpp:244]     Train net output #1: loss = 0.000400008 (* 1 = 0.000400008 loss)\nI0822 20:53:56.255720 32262 sgd_solver.cpp:166] Iteration 59000, lr = 0.035\nI0822 20:56:14.985800 32262 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0822 20:57:36.657099 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8568\nI0822 20:57:36.657404 32262 solver.cpp:404]     Test net output #1: loss = 0.550197 (* 1 = 0.550197 loss)\nI0822 20:57:37.985110 32262 solver.cpp:228] Iteration 59100, loss = 0.000531294\nI0822 20:57:37.985146 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 20:57:37.985162 32262 solver.cpp:244]     Train net output #1: loss = 0.000531399 (* 1 = 0.000531399 loss)\nI0822 20:57:38.064862 32262 sgd_solver.cpp:166] Iteration 59100, lr = 0.035\nI0822 20:59:56.702847 32262 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0822 21:01:18.369571 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85636\nI0822 21:01:18.369884 32262 solver.cpp:404]     Test net output #1: loss = 0.558844 (* 1 = 0.558844 loss)\nI0822 21:01:19.697144 32262 solver.cpp:228] Iteration 59200, loss = 0.000305398\nI0822 21:01:19.697178 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:01:19.697194 32262 solver.cpp:244]     Train net output #1: loss = 0.000305503 (* 1 = 0.000305503 loss)\nI0822 21:01:19.780175 32262 sgd_solver.cpp:166] Iteration 59200, lr = 0.035\nI0822 21:03:38.392211 32262 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0822 21:05:00.059001 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8572\nI0822 21:05:00.059334 32262 solver.cpp:404]     Test net output #1: loss = 0.549608 (* 1 = 0.549608 loss)\nI0822 21:05:01.386711 32262 solver.cpp:228] Iteration 59300, loss = 0.000493395\nI0822 21:05:01.386756 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:05:01.386772 32262 solver.cpp:244]     Train net output #1: loss = 0.0004935 (* 1 = 0.0004935 loss)\nI0822 21:05:01.474314 32262 sgd_solver.cpp:166] Iteration 59300, lr = 0.035\nI0822 21:07:20.179528 32262 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0822 21:08:41.850602 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85628\nI0822 21:08:41.850910 32262 solver.cpp:404]     Test net output #1: loss = 0.559294 (* 1 = 0.559294 loss)\nI0822 21:08:43.178019 32262 solver.cpp:228] Iteration 59400, loss = 0.000362285\nI0822 21:08:43.178064 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:08:43.178082 32262 solver.cpp:244]     Train net output #1: loss = 0.00036239 (* 1 = 0.00036239 loss)\nI0822 21:08:43.263432 32262 sgd_solver.cpp:166] Iteration 59400, lr = 0.035\nI0822 21:11:01.862476 32262 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0822 21:12:23.536507 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8578\nI0822 21:12:23.536826 32262 solver.cpp:404]     Test net output #1: loss = 0.550698 (* 1 = 0.550698 loss)\nI0822 21:12:24.864032 32262 solver.cpp:228] Iteration 59500, loss = 0.000501913\nI0822 21:12:24.864068 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:12:24.864084 32262 solver.cpp:244]     Train net output #1: loss = 0.000502018 (* 1 = 0.000502018 loss)\nI0822 21:12:24.940618 32262 sgd_solver.cpp:166] Iteration 59500, lr = 0.035\nI0822 21:14:43.568460 32262 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0822 21:16:05.242470 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85648\nI0822 21:16:05.242784 32262 solver.cpp:404]     Test net output #1: loss = 0.559446 (* 1 = 0.559446 loss)\nI0822 21:16:06.569787 32262 solver.cpp:228] Iteration 59600, loss = 0.000381366\nI0822 21:16:06.569823 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:16:06.569839 32262 solver.cpp:244]     Train net output #1: loss = 0.000381471 (* 1 = 0.000381471 loss)\nI0822 21:16:06.651453 32262 sgd_solver.cpp:166] Iteration 59600, lr = 0.035\nI0822 21:18:25.303725 32262 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0822 21:19:46.983628 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85776\nI0822 21:19:46.983971 32262 solver.cpp:404]     Test net output #1: loss = 0.550066 (* 1 = 0.550066 loss)\nI0822 21:19:48.310542 32262 solver.cpp:228] Iteration 59700, loss = 0.00046612\nI0822 21:19:48.310577 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:19:48.310592 32262 solver.cpp:244]     Train net output #1: loss = 0.000466225 (* 1 = 0.000466225 loss)\nI0822 21:19:48.392128 32262 sgd_solver.cpp:166] Iteration 59700, lr = 0.035\nI0822 21:22:06.990583 32262 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0822 21:23:28.681645 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85632\nI0822 21:23:28.681991 32262 solver.cpp:404]     Test net output #1: loss = 0.558843 (* 1 = 0.558843 loss)\nI0822 21:23:30.009076 32262 solver.cpp:228] Iteration 59800, loss = 0.000390833\nI0822 21:23:30.009112 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:23:30.009127 32262 solver.cpp:244]     Train net output #1: loss = 0.000390938 (* 1 = 0.000390938 loss)\nI0822 21:23:30.093626 32262 sgd_solver.cpp:166] Iteration 59800, lr = 0.035\nI0822 21:25:48.698479 32262 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0822 21:27:10.379204 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85776\nI0822 21:27:10.379532 32262 solver.cpp:404]     Test net output #1: loss = 0.549638 (* 1 = 0.549638 loss)\nI0822 21:27:11.705785 32262 solver.cpp:228] Iteration 59900, loss = 0.000498718\nI0822 21:27:11.705822 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:27:11.705837 32262 solver.cpp:244]     Train net output #1: loss = 0.000498822 (* 1 = 0.000498822 loss)\nI0822 21:27:11.792796 32262 sgd_solver.cpp:166] Iteration 59900, lr = 0.035\nI0822 21:29:30.477390 32262 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0822 21:30:52.156013 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85648\nI0822 21:30:52.156347 32262 solver.cpp:404]     Test net output #1: loss = 0.557901 (* 1 = 0.557901 loss)\nI0822 21:30:53.483021 32262 solver.cpp:228] Iteration 60000, loss = 0.000360974\nI0822 21:30:53.483057 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:30:53.483073 32262 solver.cpp:244]     Train net output #1: loss = 0.000361079 (* 1 = 0.000361079 loss)\nI0822 21:30:53.568104 32262 sgd_solver.cpp:166] Iteration 60000, lr = 0.035\nI0822 21:33:12.133474 32262 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0822 21:34:33.801210 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85804\nI0822 21:34:33.801544 32262 solver.cpp:404]     Test net output #1: loss = 0.548272 (* 1 = 0.548272 loss)\nI0822 21:34:35.127801 32262 solver.cpp:228] Iteration 60100, loss = 0.0004919\nI0822 21:34:35.127835 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:34:35.127848 32262 solver.cpp:244]     Train net output #1: loss = 0.000492005 (* 1 = 0.000492005 loss)\nI0822 21:34:35.209040 32262 sgd_solver.cpp:166] Iteration 60100, lr = 0.035\nI0822 21:36:53.855902 32262 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0822 21:38:15.527982 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85652\nI0822 21:38:15.528316 32262 solver.cpp:404]     Test net output #1: loss = 0.5574 (* 1 = 0.5574 loss)\nI0822 21:38:16.854212 32262 solver.cpp:228] Iteration 60200, loss = 0.000354524\nI0822 21:38:16.854254 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:38:16.854270 32262 solver.cpp:244]     Train net output #1: loss = 0.000354629 (* 1 = 0.000354629 loss)\nI0822 21:38:16.937629 32262 sgd_solver.cpp:166] Iteration 60200, lr = 0.035\nI0822 21:40:35.589421 32262 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0822 21:41:57.269409 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8582\nI0822 21:41:57.269744 32262 solver.cpp:404]     Test net output #1: loss = 0.547598 (* 1 = 0.547598 loss)\nI0822 21:41:58.596074 32262 solver.cpp:228] Iteration 60300, loss = 0.000413968\nI0822 21:41:58.596115 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:41:58.596132 32262 solver.cpp:244]     Train net output #1: loss = 0.000414072 (* 1 = 0.000414072 loss)\nI0822 21:41:58.680177 32262 sgd_solver.cpp:166] Iteration 60300, lr = 0.035\nI0822 21:44:17.259276 32262 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0822 21:45:38.985393 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8572\nI0822 21:45:38.985733 32262 solver.cpp:404]     Test net output #1: loss = 0.55608 (* 1 = 0.55608 loss)\nI0822 21:45:40.312533 32262 solver.cpp:228] Iteration 60400, loss = 0.000400756\nI0822 21:45:40.312564 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:45:40.312579 32262 solver.cpp:244]     Train net output #1: loss = 0.000400861 (* 1 = 0.000400861 loss)\nI0822 21:45:40.396507 32262 sgd_solver.cpp:166] Iteration 60400, lr = 0.035\nI0822 21:47:58.955406 32262 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0822 21:49:20.621222 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85852\nI0822 21:49:20.621554 32262 solver.cpp:404]     Test net output #1: loss = 0.546347 (* 1 = 0.546347 loss)\nI0822 21:49:21.949120 32262 solver.cpp:228] Iteration 60500, loss = 0.000531389\nI0822 21:49:21.949164 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:49:21.949180 32262 solver.cpp:244]     Train net output #1: loss = 0.000531494 (* 1 = 0.000531494 loss)\nI0822 21:49:22.032790 32262 sgd_solver.cpp:166] Iteration 60500, lr = 0.035\nI0822 21:51:41.130477 32262 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0822 21:53:02.774658 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85748\nI0822 21:53:02.775005 32262 solver.cpp:404]     Test net output #1: loss = 0.555224 (* 1 = 0.555224 loss)\nI0822 21:53:04.101172 32262 solver.cpp:228] Iteration 60600, loss = 0.000406178\nI0822 21:53:04.101214 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:53:04.101230 32262 solver.cpp:244]     Train net output #1: loss = 0.000406283 (* 1 = 0.000406283 loss)\nI0822 21:53:04.192620 32262 sgd_solver.cpp:166] Iteration 60600, lr = 0.035\nI0822 21:55:23.306494 32262 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0822 21:56:44.940078 32262 solver.cpp:404]     Test net output #0: accuracy = 0.858\nI0822 21:56:44.940419 32262 solver.cpp:404]     Test net output #1: loss = 0.548038 (* 1 = 0.548038 loss)\nI0822 21:56:46.266890 32262 solver.cpp:228] Iteration 60700, loss = 0.000574485\nI0822 21:56:46.266926 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 21:56:46.266942 32262 solver.cpp:244]     Train net output #1: loss = 0.000574589 (* 1 = 0.000574589 loss)\nI0822 21:56:46.354542 32262 sgd_solver.cpp:166] Iteration 60700, lr = 0.035\nI0822 21:59:05.468591 32262 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0822 22:00:27.106936 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85756\nI0822 22:00:27.107313 32262 solver.cpp:404]     Test net output #1: loss = 0.556287 (* 1 = 0.556287 loss)\nI0822 22:00:28.433817 32262 solver.cpp:228] Iteration 60800, loss = 0.000404085\nI0822 22:00:28.433852 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:00:28.433868 32262 solver.cpp:244]     Train net output #1: loss = 0.00040419 (* 1 = 0.00040419 loss)\nI0822 22:00:28.526602 32262 sgd_solver.cpp:166] Iteration 60800, lr = 0.035\nI0822 22:02:47.619057 32262 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0822 22:04:09.313074 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85808\nI0822 22:04:09.313408 32262 solver.cpp:404]     Test net output #1: loss = 0.54697 (* 1 = 0.54697 loss)\nI0822 22:04:10.641074 32262 solver.cpp:228] Iteration 60900, loss = 0.000449435\nI0822 22:04:10.641110 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:04:10.641125 32262 solver.cpp:244]     Train net output #1: loss = 0.00044954 (* 1 = 0.00044954 loss)\nI0822 22:04:10.726616 32262 sgd_solver.cpp:166] Iteration 60900, lr = 0.035\nI0822 22:06:29.800060 32262 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0822 22:07:51.497509 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85692\nI0822 22:07:51.497848 32262 solver.cpp:404]     Test net output #1: loss = 0.556806 (* 1 = 0.556806 loss)\nI0822 22:07:52.823931 32262 solver.cpp:228] Iteration 61000, loss = 0.000406269\nI0822 22:07:52.823976 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:07:52.823993 32262 solver.cpp:244]     Train net output #1: loss = 0.000406374 (* 1 = 0.000406374 loss)\nI0822 22:07:52.913928 32262 sgd_solver.cpp:166] Iteration 61000, lr = 0.035\nI0822 22:10:12.025780 32262 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0822 22:11:33.711057 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85828\nI0822 22:11:33.711397 32262 solver.cpp:404]     Test net output #1: loss = 0.54654 (* 1 = 0.54654 loss)\nI0822 22:11:35.038565 32262 solver.cpp:228] Iteration 61100, loss = 0.000536877\nI0822 22:11:35.038601 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:11:35.038616 32262 solver.cpp:244]     Train net output #1: loss = 0.000536982 (* 1 = 0.000536982 loss)\nI0822 22:11:35.127504 32262 sgd_solver.cpp:166] Iteration 61100, lr = 0.035\nI0822 22:13:54.350091 32262 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0822 22:15:16.021212 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85712\nI0822 22:15:16.021549 32262 solver.cpp:404]     Test net output #1: loss = 0.556586 (* 1 = 0.556586 loss)\nI0822 22:15:17.347417 32262 solver.cpp:228] Iteration 61200, loss = 0.000344988\nI0822 22:15:17.347453 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:15:17.347470 32262 solver.cpp:244]     Train net output #1: loss = 0.000345093 (* 1 = 0.000345093 loss)\nI0822 22:15:17.437988 32262 sgd_solver.cpp:166] Iteration 61200, lr = 0.035\nI0822 22:17:36.635880 32262 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0822 22:18:58.326532 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85808\nI0822 22:18:58.326864 32262 solver.cpp:404]     Test net output #1: loss = 0.548826 (* 1 = 0.548826 loss)\nI0822 22:18:59.653909 32262 solver.cpp:228] Iteration 61300, loss = 0.000518751\nI0822 22:18:59.653954 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:18:59.653970 32262 solver.cpp:244]     Train net output #1: loss = 0.000518856 (* 1 = 0.000518856 loss)\nI0822 22:18:59.742017 32262 sgd_solver.cpp:166] Iteration 61300, lr = 0.035\nI0822 22:21:18.967440 32262 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0822 22:22:40.651161 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85696\nI0822 22:22:40.651510 32262 solver.cpp:404]     Test net output #1: loss = 0.557733 (* 1 = 0.557733 loss)\nI0822 22:22:41.978080 32262 solver.cpp:228] Iteration 61400, loss = 0.000416402\nI0822 22:22:41.978114 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:22:41.978129 32262 solver.cpp:244]     Train net output #1: loss = 0.000416507 (* 1 = 0.000416507 loss)\nI0822 22:22:42.068048 32262 sgd_solver.cpp:166] Iteration 61400, lr = 0.035\nI0822 22:25:01.221668 32262 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0822 22:26:22.911710 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85808\nI0822 22:26:22.912056 32262 solver.cpp:404]     Test net output #1: loss = 0.546944 (* 1 = 0.546944 loss)\nI0822 22:26:24.239089 32262 solver.cpp:228] Iteration 61500, loss = 0.000480699\nI0822 22:26:24.239135 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:26:24.239166 32262 solver.cpp:244]     Train net output #1: loss = 0.000480804 (* 1 = 0.000480804 loss)\nI0822 22:26:24.327487 32262 sgd_solver.cpp:166] Iteration 61500, lr = 0.035\nI0822 22:28:43.412456 32262 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0822 22:30:05.109395 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85732\nI0822 22:30:05.109733 32262 solver.cpp:404]     Test net output #1: loss = 0.556966 (* 1 = 0.556966 loss)\nI0822 22:30:06.436838 32262 solver.cpp:228] Iteration 61600, loss = 0.000394149\nI0822 22:30:06.436874 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:30:06.436889 32262 solver.cpp:244]     Train net output #1: loss = 0.000394254 (* 1 = 0.000394254 loss)\nI0822 22:30:06.524335 32262 sgd_solver.cpp:166] Iteration 61600, lr = 0.035\nI0822 22:32:25.671614 32262 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0822 22:33:47.363766 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85836\nI0822 22:33:47.364106 32262 solver.cpp:404]     Test net output #1: loss = 0.548301 (* 1 = 0.548301 loss)\nI0822 22:33:48.692402 32262 solver.cpp:228] Iteration 61700, loss = 0.000506091\nI0822 22:33:48.692437 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:33:48.692452 32262 solver.cpp:244]     Train net output #1: loss = 0.000506196 (* 1 = 0.000506196 loss)\nI0822 22:33:48.775105 32262 sgd_solver.cpp:166] Iteration 61700, lr = 0.035\nI0822 22:36:07.903380 32262 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0822 22:37:29.586386 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85776\nI0822 22:37:29.586724 32262 solver.cpp:404]     Test net output #1: loss = 0.556601 (* 1 = 0.556601 loss)\nI0822 22:37:30.913625 32262 solver.cpp:228] Iteration 61800, loss = 0.000322218\nI0822 22:37:30.913661 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:37:30.913676 32262 solver.cpp:244]     Train net output #1: loss = 0.000322323 (* 1 = 0.000322323 loss)\nI0822 22:37:30.997678 32262 sgd_solver.cpp:166] Iteration 61800, lr = 0.035\nI0822 22:39:50.196499 32262 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0822 22:41:11.891749 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85864\nI0822 22:41:11.892096 32262 solver.cpp:404]     Test net output #1: loss = 0.548244 (* 1 = 0.548244 loss)\nI0822 22:41:13.218188 32262 solver.cpp:228] Iteration 61900, loss = 0.000496178\nI0822 22:41:13.218233 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:41:13.218256 32262 solver.cpp:244]     Train net output #1: loss = 0.000496283 (* 1 = 0.000496283 loss)\nI0822 22:41:13.305384 32262 sgd_solver.cpp:166] Iteration 61900, lr = 0.035\nI0822 22:43:32.414494 32262 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0822 22:44:54.148576 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85736\nI0822 22:44:54.148933 32262 solver.cpp:404]     Test net output #1: loss = 0.557597 (* 1 = 0.557597 loss)\nI0822 22:44:55.476356 32262 solver.cpp:228] Iteration 62000, loss = 0.000314804\nI0822 22:44:55.476392 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:44:55.476414 32262 solver.cpp:244]     Train net output #1: loss = 0.000314909 (* 1 = 0.000314909 loss)\nI0822 22:44:55.561187 32262 sgd_solver.cpp:166] Iteration 62000, lr = 0.035\nI0822 22:47:14.676560 32262 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0822 22:48:36.369900 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85856\nI0822 22:48:36.370232 32262 solver.cpp:404]     Test net output #1: loss = 0.549089 (* 1 = 0.549089 loss)\nI0822 22:48:37.698053 32262 solver.cpp:228] Iteration 62100, loss = 0.00051859\nI0822 22:48:37.698091 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:48:37.698113 32262 solver.cpp:244]     Train net output #1: loss = 0.000518695 (* 1 = 0.000518695 loss)\nI0822 22:48:37.783068 32262 sgd_solver.cpp:166] Iteration 62100, lr = 0.035\nI0822 22:50:56.965934 32262 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0822 22:52:18.766115 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85712\nI0822 22:52:18.766455 32262 solver.cpp:404]     Test net output #1: loss = 0.557926 (* 1 = 0.557926 loss)\nI0822 22:52:20.100322 32262 solver.cpp:228] Iteration 62200, loss = 0.000359113\nI0822 22:52:20.100366 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:52:20.100383 32262 solver.cpp:244]     Train net output #1: loss = 0.000359218 (* 1 = 0.000359218 loss)\nI0822 22:52:20.179555 32262 sgd_solver.cpp:166] Iteration 62200, lr = 0.035\nI0822 22:54:39.601938 32262 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0822 22:56:02.156039 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85824\nI0822 22:56:02.156304 32262 solver.cpp:404]     Test net output #1: loss = 0.548818 (* 1 = 0.548818 loss)\nI0822 22:56:03.487048 32262 solver.cpp:228] Iteration 62300, loss = 0.000577357\nI0822 22:56:03.487092 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:56:03.487109 32262 solver.cpp:244]     Train net output #1: loss = 0.000577462 (* 1 = 0.000577462 loss)\nI0822 22:56:03.572871 32262 sgd_solver.cpp:166] Iteration 62300, lr = 0.035\nI0822 22:58:22.969450 32262 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0822 22:59:45.517261 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8578\nI0822 22:59:45.517525 32262 solver.cpp:404]     Test net output #1: loss = 0.556872 (* 1 = 0.556872 loss)\nI0822 22:59:46.848175 32262 solver.cpp:228] Iteration 62400, loss = 0.000344974\nI0822 22:59:46.848217 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 22:59:46.848233 32262 solver.cpp:244]     Train net output #1: loss = 0.000345079 (* 1 = 0.000345079 loss)\nI0822 22:59:46.927714 32262 sgd_solver.cpp:166] Iteration 62400, lr = 0.035\nI0822 23:02:06.284463 32262 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0822 23:03:28.805675 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85856\nI0822 23:03:28.805984 32262 solver.cpp:404]     Test net output #1: loss = 0.547654 (* 1 = 0.547654 loss)\nI0822 23:03:30.137497 32262 solver.cpp:228] Iteration 62500, loss = 0.000550804\nI0822 23:03:30.137542 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:03:30.137557 32262 solver.cpp:244]     Train net output #1: loss = 0.000550909 (* 1 = 0.000550909 loss)\nI0822 23:03:30.231624 32262 sgd_solver.cpp:166] Iteration 62500, lr = 0.035\nI0822 23:05:49.600622 32262 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0822 23:07:12.121578 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85784\nI0822 23:07:12.121870 32262 solver.cpp:404]     Test net output #1: loss = 0.558583 (* 1 = 0.558583 loss)\nI0822 23:07:13.453047 32262 solver.cpp:228] Iteration 62600, loss = 0.000336959\nI0822 23:07:13.453091 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:07:13.453107 32262 solver.cpp:244]     Train net output #1: loss = 0.000337064 (* 1 = 0.000337064 loss)\nI0822 23:07:13.536844 32262 sgd_solver.cpp:166] Iteration 62600, lr = 0.035\nI0822 23:09:32.960919 32262 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0822 23:10:55.250752 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85868\nI0822 23:10:55.251019 32262 solver.cpp:404]     Test net output #1: loss = 0.548433 (* 1 = 0.548433 loss)\nI0822 23:10:56.581037 32262 solver.cpp:228] Iteration 62700, loss = 0.000453534\nI0822 23:10:56.581079 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:10:56.581094 32262 solver.cpp:244]     Train net output #1: loss = 0.000453639 (* 1 = 0.000453639 loss)\nI0822 23:10:56.664337 32262 sgd_solver.cpp:166] Iteration 62700, lr = 0.035\nI0822 23:13:15.964493 32262 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0822 23:14:38.359838 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85888\nI0822 23:14:38.360126 32262 solver.cpp:404]     Test net output #1: loss = 0.556266 (* 1 = 0.556266 loss)\nI0822 23:14:39.692399 32262 solver.cpp:228] Iteration 62800, loss = 0.000383988\nI0822 23:14:39.692443 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:14:39.692461 32262 solver.cpp:244]     Train net output #1: loss = 0.000384093 (* 1 = 0.000384093 loss)\nI0822 23:14:39.772116 32262 sgd_solver.cpp:166] Iteration 62800, lr = 0.035\nI0822 23:16:59.192754 32262 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0822 23:18:21.735069 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85816\nI0822 23:18:21.735338 32262 solver.cpp:404]     Test net output #1: loss = 0.548825 (* 1 = 0.548825 loss)\nI0822 23:18:23.066735 32262 solver.cpp:228] Iteration 62900, loss = 0.000430001\nI0822 23:18:23.066781 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:18:23.066797 32262 solver.cpp:244]     Train net output #1: loss = 0.000430106 (* 1 = 0.000430106 loss)\nI0822 23:18:23.151571 32262 sgd_solver.cpp:166] Iteration 62900, lr = 0.035\nI0822 23:20:42.507560 32262 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0822 23:22:05.035296 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85876\nI0822 23:22:05.035621 32262 solver.cpp:404]     Test net output #1: loss = 0.555141 (* 1 = 0.555141 loss)\nI0822 23:22:06.366643 32262 solver.cpp:228] Iteration 63000, loss = 0.000421921\nI0822 23:22:06.366688 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:22:06.366704 32262 solver.cpp:244]     Train net output #1: loss = 0.000422026 (* 1 = 0.000422026 loss)\nI0822 23:22:06.448062 32262 sgd_solver.cpp:166] Iteration 63000, lr = 0.035\nI0822 23:24:25.790805 32262 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0822 23:25:47.499491 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85904\nI0822 23:25:47.499838 32262 solver.cpp:404]     Test net output #1: loss = 0.548554 (* 1 = 0.548554 loss)\nI0822 23:25:48.826367 32262 solver.cpp:228] Iteration 63100, loss = 0.00060652\nI0822 23:25:48.826402 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:25:48.826417 32262 solver.cpp:244]     Train net output #1: loss = 0.000606625 (* 1 = 0.000606625 loss)\nI0822 23:25:48.913475 32262 sgd_solver.cpp:166] Iteration 63100, lr = 0.035\nI0822 23:28:07.922127 32262 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0822 23:29:29.609297 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85888\nI0822 23:29:29.609597 32262 solver.cpp:404]     Test net output #1: loss = 0.555732 (* 1 = 0.555732 loss)\nI0822 23:29:30.936496 32262 solver.cpp:228] Iteration 63200, loss = 0.000406879\nI0822 23:29:30.936538 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:29:30.936553 32262 solver.cpp:244]     Train net output #1: loss = 0.000406984 (* 1 = 0.000406984 loss)\nI0822 23:29:31.026404 32262 sgd_solver.cpp:166] Iteration 63200, lr = 0.035\nI0822 23:31:49.752948 32262 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0822 23:33:11.452968 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85904\nI0822 23:33:11.453313 32262 solver.cpp:404]     Test net output #1: loss = 0.548479 (* 1 = 0.548479 loss)\nI0822 23:33:12.780781 32262 solver.cpp:228] Iteration 63300, loss = 0.000511402\nI0822 23:33:12.780814 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:33:12.780829 32262 solver.cpp:244]     Train net output #1: loss = 0.000511507 (* 1 = 0.000511507 loss)\nI0822 23:33:12.857197 32262 sgd_solver.cpp:166] Iteration 63300, lr = 0.035\nI0822 23:35:31.479136 32262 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0822 23:36:53.169332 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85916\nI0822 23:36:53.169683 32262 solver.cpp:404]     Test net output #1: loss = 0.557644 (* 1 = 0.557644 loss)\nI0822 23:36:54.495698 32262 solver.cpp:228] Iteration 63400, loss = 0.000396132\nI0822 23:36:54.495733 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:36:54.495748 32262 solver.cpp:244]     Train net output #1: loss = 0.000396237 (* 1 = 0.000396237 loss)\nI0822 23:36:54.579298 32262 sgd_solver.cpp:166] Iteration 63400, lr = 0.035\nI0822 23:39:13.260169 32262 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0822 23:40:34.947170 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85916\nI0822 23:40:34.947509 32262 solver.cpp:404]     Test net output #1: loss = 0.548057 (* 1 = 0.548057 loss)\nI0822 23:40:36.274747 32262 solver.cpp:228] Iteration 63500, loss = 0.00041005\nI0822 23:40:36.274783 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:40:36.274798 32262 solver.cpp:244]     Train net output #1: loss = 0.000410155 (* 1 = 0.000410155 loss)\nI0822 23:40:36.351575 32262 sgd_solver.cpp:166] Iteration 63500, lr = 0.035\nI0822 23:42:54.935891 32262 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0822 23:44:16.625457 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85864\nI0822 23:44:16.625802 32262 solver.cpp:404]     Test net output #1: loss = 0.55772 (* 1 = 0.55772 loss)\nI0822 23:44:17.952282 32262 solver.cpp:228] Iteration 63600, loss = 0.000304679\nI0822 23:44:17.952317 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:44:17.952333 32262 solver.cpp:244]     Train net output #1: loss = 0.000304784 (* 1 = 0.000304784 loss)\nI0822 23:44:18.034529 32262 sgd_solver.cpp:166] Iteration 63600, lr = 0.035\nI0822 23:46:36.516638 32262 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0822 23:47:58.202592 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85904\nI0822 23:47:58.202940 32262 solver.cpp:404]     Test net output #1: loss = 0.548499 (* 1 = 0.548499 loss)\nI0822 23:47:59.529976 32262 solver.cpp:228] Iteration 63700, loss = 0.000487196\nI0822 23:47:59.530009 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:47:59.530025 32262 solver.cpp:244]     Train net output #1: loss = 0.000487301 (* 1 = 0.000487301 loss)\nI0822 23:47:59.618393 32262 sgd_solver.cpp:166] Iteration 63700, lr = 0.035\nI0822 23:50:18.235806 32262 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0822 23:51:39.932952 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85872\nI0822 23:51:39.933277 32262 solver.cpp:404]     Test net output #1: loss = 0.557905 (* 1 = 0.557905 loss)\nI0822 23:51:41.260607 32262 solver.cpp:228] Iteration 63800, loss = 0.000326785\nI0822 23:51:41.260640 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:51:41.260656 32262 solver.cpp:244]     Train net output #1: loss = 0.00032689 (* 1 = 0.00032689 loss)\nI0822 23:51:41.345011 32262 sgd_solver.cpp:166] Iteration 63800, lr = 0.035\nI0822 23:54:00.037638 32262 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0822 23:55:21.718555 32262 solver.cpp:404]     Test net output #0: accuracy = 0.859\nI0822 23:55:21.718901 32262 solver.cpp:404]     Test net output #1: loss = 0.54897 (* 1 = 0.54897 loss)\nI0822 23:55:23.046000 32262 solver.cpp:228] Iteration 63900, loss = 0.000529247\nI0822 23:55:23.046036 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:55:23.046051 32262 solver.cpp:244]     Train net output #1: loss = 0.000529352 (* 1 = 0.000529352 loss)\nI0822 23:55:23.130218 32262 sgd_solver.cpp:166] Iteration 63900, lr = 0.035\nI0822 23:57:41.740789 32262 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0822 23:59:03.413487 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85892\nI0822 23:59:03.413827 32262 solver.cpp:404]     Test net output #1: loss = 0.558383 (* 1 = 0.558383 loss)\nI0822 23:59:04.740269 32262 solver.cpp:228] Iteration 64000, loss = 0.000331\nI0822 23:59:04.740304 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0822 23:59:04.740319 32262 solver.cpp:244]     Train net output #1: loss = 0.000331105 (* 1 = 0.000331105 loss)\nI0822 23:59:04.824018 32262 sgd_solver.cpp:166] Iteration 64000, lr = 0.035\nI0823 00:01:23.461782 32262 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0823 00:02:45.091259 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85892\nI0823 00:02:45.091583 32262 solver.cpp:404]     Test net output #1: loss = 0.549685 (* 1 = 0.549685 loss)\nI0823 00:02:46.418191 32262 solver.cpp:228] Iteration 64100, loss = 0.000480318\nI0823 00:02:46.418226 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:02:46.418241 32262 solver.cpp:244]     Train net output #1: loss = 0.000480423 (* 1 = 0.000480423 loss)\nI0823 00:02:46.502104 32262 sgd_solver.cpp:166] Iteration 64100, lr = 0.035\nI0823 00:05:05.061714 32262 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0823 00:06:26.681674 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85816\nI0823 00:06:26.682018 32262 solver.cpp:404]     Test net output #1: loss = 0.558493 (* 1 = 0.558493 loss)\nI0823 00:06:28.008834 32262 solver.cpp:228] Iteration 64200, loss = 0.000329519\nI0823 00:06:28.008868 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:06:28.008884 32262 solver.cpp:244]     Train net output #1: loss = 0.000329624 (* 1 = 0.000329624 loss)\nI0823 00:06:28.092382 32262 sgd_solver.cpp:166] Iteration 64200, lr = 0.035\nI0823 00:08:46.626922 32262 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0823 00:10:08.258020 32262 solver.cpp:404]     Test net output #0: accuracy = 0.859\nI0823 00:10:08.258358 32262 solver.cpp:404]     Test net output #1: loss = 0.551005 (* 1 = 0.551005 loss)\nI0823 00:10:09.584802 32262 solver.cpp:228] Iteration 64300, loss = 0.000521988\nI0823 00:10:09.584836 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:10:09.584851 32262 solver.cpp:244]     Train net output #1: loss = 0.000522093 (* 1 = 0.000522093 loss)\nI0823 00:10:09.669906 32262 sgd_solver.cpp:166] Iteration 64300, lr = 0.035\nI0823 00:12:28.213778 32262 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0823 00:13:49.843683 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85768\nI0823 00:13:49.844024 32262 solver.cpp:404]     Test net output #1: loss = 0.559034 (* 1 = 0.559034 loss)\nI0823 00:13:51.170717 32262 solver.cpp:228] Iteration 64400, loss = 0.000402078\nI0823 00:13:51.170753 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:13:51.170768 32262 solver.cpp:244]     Train net output #1: loss = 0.000402182 (* 1 = 0.000402182 loss)\nI0823 00:13:51.255058 32262 sgd_solver.cpp:166] Iteration 64400, lr = 0.035\nI0823 00:16:09.757293 32262 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0823 00:17:31.377641 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85884\nI0823 00:17:31.377974 32262 solver.cpp:404]     Test net output #1: loss = 0.550078 (* 1 = 0.550078 loss)\nI0823 00:17:32.704557 32262 solver.cpp:228] Iteration 64500, loss = 0.000471323\nI0823 00:17:32.704591 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:17:32.704607 32262 solver.cpp:244]     Train net output #1: loss = 0.000471428 (* 1 = 0.000471428 loss)\nI0823 00:17:32.785789 32262 sgd_solver.cpp:166] Iteration 64500, lr = 0.035\nI0823 00:19:51.449316 32262 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0823 00:21:13.084236 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85808\nI0823 00:21:13.084614 32262 solver.cpp:404]     Test net output #1: loss = 0.559293 (* 1 = 0.559293 loss)\nI0823 00:21:14.411016 32262 solver.cpp:228] Iteration 64600, loss = 0.000364181\nI0823 00:21:14.411052 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:21:14.411067 32262 solver.cpp:244]     Train net output #1: loss = 0.000364286 (* 1 = 0.000364286 loss)\nI0823 00:21:14.491777 32262 sgd_solver.cpp:166] Iteration 64600, lr = 0.035\nI0823 00:23:33.074100 32262 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0823 00:24:54.691664 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85844\nI0823 00:24:54.692086 32262 solver.cpp:404]     Test net output #1: loss = 0.551052 (* 1 = 0.551052 loss)\nI0823 00:24:56.018569 32262 solver.cpp:228] Iteration 64700, loss = 0.000502457\nI0823 00:24:56.018606 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:24:56.018621 32262 solver.cpp:244]     Train net output #1: loss = 0.000502562 (* 1 = 0.000502562 loss)\nI0823 00:24:56.104579 32262 sgd_solver.cpp:166] Iteration 64700, lr = 0.035\nI0823 00:27:14.741111 32262 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0823 00:28:36.360254 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85724\nI0823 00:28:36.360571 32262 solver.cpp:404]     Test net output #1: loss = 0.561221 (* 1 = 0.561221 loss)\nI0823 00:28:37.687590 32262 solver.cpp:228] Iteration 64800, loss = 0.000384207\nI0823 00:28:37.687626 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:28:37.687641 32262 solver.cpp:244]     Train net output #1: loss = 0.000384312 (* 1 = 0.000384312 loss)\nI0823 00:28:37.767946 32262 sgd_solver.cpp:166] Iteration 64800, lr = 0.035\nI0823 00:30:56.741207 32262 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0823 00:32:18.361732 32262 solver.cpp:404]     Test net output #0: accuracy = 0.858\nI0823 00:32:18.362074 32262 solver.cpp:404]     Test net output #1: loss = 0.552574 (* 1 = 0.552574 loss)\nI0823 00:32:19.688263 32262 solver.cpp:228] Iteration 64900, loss = 0.000393964\nI0823 00:32:19.688299 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:32:19.688314 32262 solver.cpp:244]     Train net output #1: loss = 0.000394069 (* 1 = 0.000394069 loss)\nI0823 00:32:19.775684 32262 sgd_solver.cpp:166] Iteration 64900, lr = 0.035\nI0823 00:34:38.882341 32262 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0823 00:36:00.560108 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85772\nI0823 00:36:00.560451 32262 solver.cpp:404]     Test net output #1: loss = 0.560881 (* 1 = 0.560881 loss)\nI0823 00:36:01.887110 32262 solver.cpp:228] Iteration 65000, loss = 0.000334556\nI0823 00:36:01.887154 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:36:01.887171 32262 solver.cpp:244]     Train net output #1: loss = 0.00033466 (* 1 = 0.00033466 loss)\nI0823 00:36:01.975255 32262 sgd_solver.cpp:166] Iteration 65000, lr = 0.035\nI0823 00:38:21.151304 32262 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0823 00:39:42.829989 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85732\nI0823 00:39:42.830329 32262 solver.cpp:404]     Test net output #1: loss = 0.553837 (* 1 = 0.553837 loss)\nI0823 00:39:44.156316 32262 solver.cpp:228] Iteration 65100, loss = 0.000461527\nI0823 00:39:44.156352 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:39:44.156366 32262 solver.cpp:244]     Train net output #1: loss = 0.000461632 (* 1 = 0.000461632 loss)\nI0823 00:39:44.244269 32262 sgd_solver.cpp:166] Iteration 65100, lr = 0.035\nI0823 00:42:03.335489 32262 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0823 00:43:25.367727 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85732\nI0823 00:43:25.368069 32262 solver.cpp:404]     Test net output #1: loss = 0.562097 (* 1 = 0.562097 loss)\nI0823 00:43:26.699273 32262 solver.cpp:228] Iteration 65200, loss = 0.000399075\nI0823 00:43:26.699326 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:43:26.699348 32262 solver.cpp:244]     Train net output #1: loss = 0.00039918 (* 1 = 0.00039918 loss)\nI0823 00:43:26.780004 32262 sgd_solver.cpp:166] Iteration 65200, lr = 0.035\nI0823 00:45:45.895073 32262 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0823 00:47:07.771556 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8578\nI0823 00:47:07.771816 32262 solver.cpp:404]     Test net output #1: loss = 0.553261 (* 1 = 0.553261 loss)\nI0823 00:47:09.101797 32262 solver.cpp:228] Iteration 65300, loss = 0.000439098\nI0823 00:47:09.101840 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:47:09.101855 32262 solver.cpp:244]     Train net output #1: loss = 0.000439203 (* 1 = 0.000439203 loss)\nI0823 00:47:09.186053 32262 sgd_solver.cpp:166] Iteration 65300, lr = 0.035\nI0823 00:49:28.474637 32262 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0823 00:50:50.353896 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8574\nI0823 00:50:50.354193 32262 solver.cpp:404]     Test net output #1: loss = 0.563175 (* 1 = 0.563175 loss)\nI0823 00:50:51.684237 32262 solver.cpp:228] Iteration 65400, loss = 0.000367504\nI0823 00:50:51.684278 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:50:51.684293 32262 solver.cpp:244]     Train net output #1: loss = 0.000367608 (* 1 = 0.000367608 loss)\nI0823 00:50:51.769536 32262 sgd_solver.cpp:166] Iteration 65400, lr = 0.035\nI0823 00:53:11.083778 32262 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0823 00:54:33.269620 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85756\nI0823 00:54:33.269984 32262 solver.cpp:404]     Test net output #1: loss = 0.554929 (* 1 = 0.554929 loss)\nI0823 00:54:34.602401 32262 solver.cpp:228] Iteration 65500, loss = 0.000486651\nI0823 00:54:34.602442 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:54:34.602457 32262 solver.cpp:244]     Train net output #1: loss = 0.000486756 (* 1 = 0.000486756 loss)\nI0823 00:54:34.685739 32262 sgd_solver.cpp:166] Iteration 65500, lr = 0.035\nI0823 00:56:54.026403 32262 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0823 00:58:16.383623 32262 solver.cpp:404]     Test net output #0: accuracy = 0.857\nI0823 00:58:16.383968 32262 solver.cpp:404]     Test net output #1: loss = 0.563686 (* 1 = 0.563686 loss)\nI0823 00:58:17.716837 32262 solver.cpp:228] Iteration 65600, loss = 0.000362451\nI0823 00:58:17.716878 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 00:58:17.716893 32262 solver.cpp:244]     Train net output #1: loss = 0.000362556 (* 1 = 0.000362556 loss)\nI0823 00:58:17.797806 32262 sgd_solver.cpp:166] Iteration 65600, lr = 0.035\nI0823 01:00:37.072866 32262 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0823 01:01:59.551754 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85772\nI0823 01:01:59.552088 32262 solver.cpp:404]     Test net output #1: loss = 0.554499 (* 1 = 0.554499 loss)\nI0823 01:02:00.884985 32262 solver.cpp:228] Iteration 65700, loss = 0.000433181\nI0823 01:02:00.885025 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:02:00.885040 32262 solver.cpp:244]     Train net output #1: loss = 0.000433286 (* 1 = 0.000433286 loss)\nI0823 01:02:00.968056 32262 sgd_solver.cpp:166] Iteration 65700, lr = 0.035\nI0823 01:04:20.276401 32262 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0823 01:05:42.738493 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85764\nI0823 01:05:42.738773 32262 solver.cpp:404]     Test net output #1: loss = 0.563051 (* 1 = 0.563051 loss)\nI0823 01:05:44.071246 32262 solver.cpp:228] Iteration 65800, loss = 0.000338482\nI0823 01:05:44.071288 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:05:44.071303 32262 solver.cpp:244]     Train net output #1: loss = 0.000338587 (* 1 = 0.000338587 loss)\nI0823 01:05:44.149677 32262 sgd_solver.cpp:166] Iteration 65800, lr = 0.035\nI0823 01:08:03.369349 32262 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0823 01:09:25.843602 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85876\nI0823 01:09:25.843868 32262 solver.cpp:404]     Test net output #1: loss = 0.553572 (* 1 = 0.553572 loss)\nI0823 01:09:27.176445 32262 solver.cpp:228] Iteration 65900, loss = 0.000464902\nI0823 01:09:27.176489 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:09:27.176503 32262 solver.cpp:244]     Train net output #1: loss = 0.000465007 (* 1 = 0.000465007 loss)\nI0823 01:09:27.255367 32262 sgd_solver.cpp:166] Iteration 65900, lr = 0.035\nI0823 01:11:46.508313 32262 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0823 01:13:08.996426 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85748\nI0823 01:13:08.996759 32262 solver.cpp:404]     Test net output #1: loss = 0.562607 (* 1 = 0.562607 loss)\nI0823 01:13:10.329685 32262 solver.cpp:228] Iteration 66000, loss = 0.000337937\nI0823 01:13:10.329727 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:13:10.329743 32262 solver.cpp:244]     Train net output #1: loss = 0.000338042 (* 1 = 0.000338042 loss)\nI0823 01:13:10.417076 32262 sgd_solver.cpp:166] Iteration 66000, lr = 0.035\nI0823 01:15:29.758555 32262 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0823 01:16:52.255652 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85776\nI0823 01:16:52.255950 32262 solver.cpp:404]     Test net output #1: loss = 0.555765 (* 1 = 0.555765 loss)\nI0823 01:16:53.589192 32262 solver.cpp:228] Iteration 66100, loss = 0.000501851\nI0823 01:16:53.589236 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:16:53.589251 32262 solver.cpp:244]     Train net output #1: loss = 0.000501956 (* 1 = 0.000501956 loss)\nI0823 01:16:53.673949 32262 sgd_solver.cpp:166] Iteration 66100, lr = 0.035\nI0823 01:19:12.891932 32262 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0823 01:20:35.390993 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85724\nI0823 01:20:35.391263 32262 solver.cpp:404]     Test net output #1: loss = 0.564809 (* 1 = 0.564809 loss)\nI0823 01:20:36.724253 32262 solver.cpp:228] Iteration 66200, loss = 0.000330167\nI0823 01:20:36.724298 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:20:36.724311 32262 solver.cpp:244]     Train net output #1: loss = 0.000330272 (* 1 = 0.000330272 loss)\nI0823 01:20:36.807430 32262 sgd_solver.cpp:166] Iteration 66200, lr = 0.035\nI0823 01:22:56.064540 32262 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0823 01:24:18.526233 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85796\nI0823 01:24:18.526527 32262 solver.cpp:404]     Test net output #1: loss = 0.556314 (* 1 = 0.556314 loss)\nI0823 01:24:19.859582 32262 solver.cpp:228] Iteration 66300, loss = 0.000426727\nI0823 01:24:19.859623 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:24:19.859638 32262 solver.cpp:244]     Train net output #1: loss = 0.000426832 (* 1 = 0.000426832 loss)\nI0823 01:24:19.939451 32262 sgd_solver.cpp:166] Iteration 66300, lr = 0.035\nI0823 01:26:39.241014 32262 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0823 01:28:01.728183 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85708\nI0823 01:28:01.728477 32262 solver.cpp:404]     Test net output #1: loss = 0.565419 (* 1 = 0.565419 loss)\nI0823 01:28:03.061718 32262 solver.cpp:228] Iteration 66400, loss = 0.000338339\nI0823 01:28:03.061764 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:28:03.061779 32262 solver.cpp:244]     Train net output #1: loss = 0.000338444 (* 1 = 0.000338444 loss)\nI0823 01:28:03.145460 32262 sgd_solver.cpp:166] Iteration 66400, lr = 0.035\nI0823 01:30:22.353567 32262 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0823 01:31:44.887403 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85864\nI0823 01:31:44.887763 32262 solver.cpp:404]     Test net output #1: loss = 0.556598 (* 1 = 0.556598 loss)\nI0823 01:31:46.220391 32262 solver.cpp:228] Iteration 66500, loss = 0.000432989\nI0823 01:31:46.220435 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:31:46.220450 32262 solver.cpp:244]     Train net output #1: loss = 0.000433093 (* 1 = 0.000433093 loss)\nI0823 01:31:46.301376 32262 sgd_solver.cpp:166] Iteration 66500, lr = 0.035\nI0823 01:34:05.578935 32262 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0823 01:35:27.888469 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8574\nI0823 01:35:27.888833 32262 solver.cpp:404]     Test net output #1: loss = 0.567139 (* 1 = 0.567139 loss)\nI0823 01:35:29.221634 32262 solver.cpp:228] Iteration 66600, loss = 0.000308216\nI0823 01:35:29.221676 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:35:29.221693 32262 solver.cpp:244]     Train net output #1: loss = 0.000308321 (* 1 = 0.000308321 loss)\nI0823 01:35:29.305852 32262 sgd_solver.cpp:166] Iteration 66600, lr = 0.035\nI0823 01:37:48.605710 32262 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0823 01:39:11.158325 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85796\nI0823 01:39:11.158706 32262 solver.cpp:404]     Test net output #1: loss = 0.558467 (* 1 = 0.558467 loss)\nI0823 01:39:12.491756 32262 solver.cpp:228] Iteration 66700, loss = 0.00038421\nI0823 01:39:12.491797 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:39:12.491812 32262 solver.cpp:244]     Train net output #1: loss = 0.000384315 (* 1 = 0.000384315 loss)\nI0823 01:39:12.581164 32262 sgd_solver.cpp:166] Iteration 66700, lr = 0.035\nI0823 01:41:31.862092 32262 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0823 01:42:54.401696 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85748\nI0823 01:42:54.402084 32262 solver.cpp:404]     Test net output #1: loss = 0.567556 (* 1 = 0.567556 loss)\nI0823 01:42:55.735056 32262 solver.cpp:228] Iteration 66800, loss = 0.000291908\nI0823 01:42:55.735103 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:42:55.735119 32262 solver.cpp:244]     Train net output #1: loss = 0.000292013 (* 1 = 0.000292013 loss)\nI0823 01:42:55.821151 32262 sgd_solver.cpp:166] Iteration 66800, lr = 0.035\nI0823 01:45:15.144840 32262 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0823 01:46:37.686100 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8582\nI0823 01:46:37.686482 32262 solver.cpp:404]     Test net output #1: loss = 0.559046 (* 1 = 0.559046 loss)\nI0823 01:46:39.019733 32262 solver.cpp:228] Iteration 66900, loss = 0.000469068\nI0823 01:46:39.019774 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:46:39.019790 32262 solver.cpp:244]     Train net output #1: loss = 0.000469173 (* 1 = 0.000469173 loss)\nI0823 01:46:39.103765 32262 sgd_solver.cpp:166] Iteration 66900, lr = 0.035\nI0823 01:48:58.456894 32262 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0823 01:50:20.983541 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85724\nI0823 01:50:20.983893 32262 solver.cpp:404]     Test net output #1: loss = 0.567704 (* 1 = 0.567704 loss)\nI0823 01:50:22.316740 32262 solver.cpp:228] Iteration 67000, loss = 0.000391013\nI0823 01:50:22.316783 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:50:22.316799 32262 solver.cpp:244]     Train net output #1: loss = 0.000391117 (* 1 = 0.000391117 loss)\nI0823 01:50:22.398836 32262 sgd_solver.cpp:166] Iteration 67000, lr = 0.035\nI0823 01:52:41.741559 32262 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0823 01:54:03.482010 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85756\nI0823 01:54:03.482352 32262 solver.cpp:404]     Test net output #1: loss = 0.560346 (* 1 = 0.560346 loss)\nI0823 01:54:04.813783 32262 solver.cpp:228] Iteration 67100, loss = 0.000470891\nI0823 01:54:04.813819 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:54:04.813834 32262 solver.cpp:244]     Train net output #1: loss = 0.000470996 (* 1 = 0.000470996 loss)\nI0823 01:54:04.892488 32262 sgd_solver.cpp:166] Iteration 67100, lr = 0.035\nI0823 01:56:24.111507 32262 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0823 01:57:46.638258 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85692\nI0823 01:57:46.638638 32262 solver.cpp:404]     Test net output #1: loss = 0.568422 (* 1 = 0.568422 loss)\nI0823 01:57:47.971299 32262 solver.cpp:228] Iteration 67200, loss = 0.000305774\nI0823 01:57:47.971338 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 01:57:47.971354 32262 solver.cpp:244]     Train net output #1: loss = 0.000305879 (* 1 = 0.000305879 loss)\nI0823 01:57:48.056565 32262 sgd_solver.cpp:166] Iteration 67200, lr = 0.035\nI0823 02:00:07.319859 32262 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0823 02:01:29.880041 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85764\nI0823 02:01:29.880394 32262 solver.cpp:404]     Test net output #1: loss = 0.559554 (* 1 = 0.559554 loss)\nI0823 02:01:31.213398 32262 solver.cpp:228] Iteration 67300, loss = 0.000471813\nI0823 02:01:31.213435 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:01:31.213451 32262 solver.cpp:244]     Train net output #1: loss = 0.000471918 (* 1 = 0.000471918 loss)\nI0823 02:01:31.297873 32262 sgd_solver.cpp:166] Iteration 67300, lr = 0.035\nI0823 02:03:50.519840 32262 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0823 02:05:13.081310 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8572\nI0823 02:05:13.081691 32262 solver.cpp:404]     Test net output #1: loss = 0.569387 (* 1 = 0.569387 loss)\nI0823 02:05:14.415128 32262 solver.cpp:228] Iteration 67400, loss = 0.000351259\nI0823 02:05:14.415169 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:05:14.415184 32262 solver.cpp:244]     Train net output #1: loss = 0.000351363 (* 1 = 0.000351363 loss)\nI0823 02:05:14.492334 32262 sgd_solver.cpp:166] Iteration 67400, lr = 0.035\nI0823 02:07:33.716507 32262 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0823 02:08:56.287240 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8576\nI0823 02:08:56.287602 32262 solver.cpp:404]     Test net output #1: loss = 0.560334 (* 1 = 0.560334 loss)\nI0823 02:08:57.620915 32262 solver.cpp:228] Iteration 67500, loss = 0.000454537\nI0823 02:08:57.620954 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:08:57.620970 32262 solver.cpp:244]     Train net output #1: loss = 0.000454642 (* 1 = 0.000454642 loss)\nI0823 02:08:57.704926 32262 sgd_solver.cpp:166] Iteration 67500, lr = 0.035\nI0823 02:11:17.054744 32262 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0823 02:12:39.611191 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85684\nI0823 02:12:39.611549 32262 solver.cpp:404]     Test net output #1: loss = 0.569521 (* 1 = 0.569521 loss)\nI0823 02:12:40.944752 32262 solver.cpp:228] Iteration 67600, loss = 0.000320479\nI0823 02:12:40.944792 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:12:40.944806 32262 solver.cpp:244]     Train net output #1: loss = 0.000320584 (* 1 = 0.000320584 loss)\nI0823 02:12:41.024596 32262 sgd_solver.cpp:166] Iteration 67600, lr = 0.035\nI0823 02:15:00.339136 32262 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0823 02:16:22.905377 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85736\nI0823 02:16:22.905737 32262 solver.cpp:404]     Test net output #1: loss = 0.561074 (* 1 = 0.561074 loss)\nI0823 02:16:24.238584 32262 solver.cpp:228] Iteration 67700, loss = 0.000421647\nI0823 02:16:24.238625 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:16:24.238641 32262 solver.cpp:244]     Train net output #1: loss = 0.000421752 (* 1 = 0.000421752 loss)\nI0823 02:16:24.319349 32262 sgd_solver.cpp:166] Iteration 67700, lr = 0.035\nI0823 02:18:43.651995 32262 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0823 02:20:06.244686 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85704\nI0823 02:20:06.245057 32262 solver.cpp:404]     Test net output #1: loss = 0.568674 (* 1 = 0.568674 loss)\nI0823 02:20:07.578171 32262 solver.cpp:228] Iteration 67800, loss = 0.000331133\nI0823 02:20:07.578209 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:20:07.578225 32262 solver.cpp:244]     Train net output #1: loss = 0.000331238 (* 1 = 0.000331238 loss)\nI0823 02:20:07.663300 32262 sgd_solver.cpp:166] Iteration 67800, lr = 0.035\nI0823 02:22:27.049581 32262 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0823 02:23:49.631829 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85752\nI0823 02:23:49.632232 32262 solver.cpp:404]     Test net output #1: loss = 0.561839 (* 1 = 0.561839 loss)\nI0823 02:23:50.964874 32262 solver.cpp:228] Iteration 67900, loss = 0.00050031\nI0823 02:23:50.964912 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:23:50.964928 32262 solver.cpp:244]     Train net output #1: loss = 0.000500415 (* 1 = 0.000500415 loss)\nI0823 02:23:51.042477 32262 sgd_solver.cpp:166] Iteration 67900, lr = 0.035\nI0823 02:26:10.358733 32262 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0823 02:27:32.947610 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85708\nI0823 02:27:32.947973 32262 solver.cpp:404]     Test net output #1: loss = 0.570928 (* 1 = 0.570928 loss)\nI0823 02:27:34.281296 32262 solver.cpp:228] Iteration 68000, loss = 0.000299304\nI0823 02:27:34.281337 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:27:34.281352 32262 solver.cpp:244]     Train net output #1: loss = 0.000299409 (* 1 = 0.000299409 loss)\nI0823 02:27:34.362896 32262 sgd_solver.cpp:166] Iteration 68000, lr = 0.035\nI0823 02:29:53.596302 32262 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0823 02:31:16.150349 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8582\nI0823 02:31:16.150703 32262 solver.cpp:404]     Test net output #1: loss = 0.561036 (* 1 = 0.561036 loss)\nI0823 02:31:17.482954 32262 solver.cpp:228] Iteration 68100, loss = 0.0004242\nI0823 02:31:17.482992 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:31:17.483008 32262 solver.cpp:244]     Train net output #1: loss = 0.000424305 (* 1 = 0.000424305 loss)\nI0823 02:31:17.564137 32262 sgd_solver.cpp:166] Iteration 68100, lr = 0.035\nI0823 02:33:36.992166 32262 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0823 02:34:59.542703 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8574\nI0823 02:34:59.543089 32262 solver.cpp:404]     Test net output #1: loss = 0.569154 (* 1 = 0.569154 loss)\nI0823 02:35:00.876250 32262 solver.cpp:228] Iteration 68200, loss = 0.000338923\nI0823 02:35:00.876289 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:35:00.876305 32262 solver.cpp:244]     Train net output #1: loss = 0.000339028 (* 1 = 0.000339028 loss)\nI0823 02:35:00.959595 32262 sgd_solver.cpp:166] Iteration 68200, lr = 0.035\nI0823 02:37:20.285449 32262 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0823 02:38:42.841147 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85848\nI0823 02:38:42.841536 32262 solver.cpp:404]     Test net output #1: loss = 0.562216 (* 1 = 0.562216 loss)\nI0823 02:38:44.174008 32262 solver.cpp:228] Iteration 68300, loss = 0.000451197\nI0823 02:38:44.174052 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:38:44.174067 32262 solver.cpp:244]     Train net output #1: loss = 0.000451302 (* 1 = 0.000451302 loss)\nI0823 02:38:44.258319 32262 sgd_solver.cpp:166] Iteration 68300, lr = 0.035\nI0823 02:41:03.562283 32262 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0823 02:42:26.108669 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85688\nI0823 02:42:26.109071 32262 solver.cpp:404]     Test net output #1: loss = 0.572157 (* 1 = 0.572157 loss)\nI0823 02:42:27.442025 32262 solver.cpp:228] Iteration 68400, loss = 0.000349561\nI0823 02:42:27.442065 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:42:27.442080 32262 solver.cpp:244]     Train net output #1: loss = 0.000349665 (* 1 = 0.000349665 loss)\nI0823 02:42:27.523572 32262 sgd_solver.cpp:166] Iteration 68400, lr = 0.035\nI0823 02:44:46.853580 32262 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0823 02:46:09.405794 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85768\nI0823 02:46:09.406189 32262 solver.cpp:404]     Test net output #1: loss = 0.563474 (* 1 = 0.563474 loss)\nI0823 02:46:10.739467 32262 solver.cpp:228] Iteration 68500, loss = 0.000463779\nI0823 02:46:10.739508 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:46:10.739523 32262 solver.cpp:244]     Train net output #1: loss = 0.000463884 (* 1 = 0.000463884 loss)\nI0823 02:46:10.818944 32262 sgd_solver.cpp:166] Iteration 68500, lr = 0.035\nI0823 02:48:30.119557 32262 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0823 02:49:52.673040 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85704\nI0823 02:49:52.673411 32262 solver.cpp:404]     Test net output #1: loss = 0.572033 (* 1 = 0.572033 loss)\nI0823 02:49:54.006283 32262 solver.cpp:228] Iteration 68600, loss = 0.000362302\nI0823 02:49:54.006325 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:49:54.006340 32262 solver.cpp:244]     Train net output #1: loss = 0.000362407 (* 1 = 0.000362407 loss)\nI0823 02:49:54.090296 32262 sgd_solver.cpp:166] Iteration 68600, lr = 0.035\nI0823 02:52:13.391415 32262 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0823 02:53:35.917863 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85712\nI0823 02:53:35.918272 32262 solver.cpp:404]     Test net output #1: loss = 0.5654 (* 1 = 0.5654 loss)\nI0823 02:53:37.250828 32262 solver.cpp:228] Iteration 68700, loss = 0.000450256\nI0823 02:53:37.250866 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:53:37.250882 32262 solver.cpp:244]     Train net output #1: loss = 0.000450361 (* 1 = 0.000450361 loss)\nI0823 02:53:37.331401 32262 sgd_solver.cpp:166] Iteration 68700, lr = 0.035\nI0823 02:55:56.595293 32262 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0823 02:57:19.131444 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85668\nI0823 02:57:19.131830 32262 solver.cpp:404]     Test net output #1: loss = 0.573968 (* 1 = 0.573968 loss)\nI0823 02:57:20.465085 32262 solver.cpp:228] Iteration 68800, loss = 0.000363519\nI0823 02:57:20.465128 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 02:57:20.465144 32262 solver.cpp:244]     Train net output #1: loss = 0.000363624 (* 1 = 0.000363624 loss)\nI0823 02:57:20.547716 32262 sgd_solver.cpp:166] Iteration 68800, lr = 0.035\nI0823 02:59:39.800200 32262 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0823 03:01:02.322614 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85748\nI0823 03:01:02.322978 32262 solver.cpp:404]     Test net output #1: loss = 0.566778 (* 1 = 0.566778 loss)\nI0823 03:01:03.656927 32262 solver.cpp:228] Iteration 68900, loss = 0.000385275\nI0823 03:01:03.656970 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:01:03.656985 32262 solver.cpp:244]     Train net output #1: loss = 0.00038538 (* 1 = 0.00038538 loss)\nI0823 03:01:03.732028 32262 sgd_solver.cpp:166] Iteration 68900, lr = 0.035\nI0823 03:03:23.142386 32262 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0823 03:04:45.658920 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85656\nI0823 03:04:45.659292 32262 solver.cpp:404]     Test net output #1: loss = 0.575184 (* 1 = 0.575184 loss)\nI0823 03:04:46.992100 32262 solver.cpp:228] Iteration 69000, loss = 0.000258379\nI0823 03:04:46.992141 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:04:46.992156 32262 solver.cpp:244]     Train net output #1: loss = 0.000258484 (* 1 = 0.000258484 loss)\nI0823 03:04:47.074993 32262 sgd_solver.cpp:166] Iteration 69000, lr = 0.035\nI0823 03:07:06.311620 32262 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0823 03:08:28.831374 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85736\nI0823 03:08:28.831739 32262 solver.cpp:404]     Test net output #1: loss = 0.56626 (* 1 = 0.56626 loss)\nI0823 03:08:30.161391 32262 solver.cpp:228] Iteration 69100, loss = 0.000445733\nI0823 03:08:30.161429 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:08:30.161444 32262 solver.cpp:244]     Train net output #1: loss = 0.000445838 (* 1 = 0.000445838 loss)\nI0823 03:08:30.246889 32262 sgd_solver.cpp:166] Iteration 69100, lr = 0.035\nI0823 03:10:49.512322 32262 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0823 03:12:12.017444 32262 solver.cpp:404]     Test net output #0: accuracy = 0.856\nI0823 03:12:12.017827 32262 solver.cpp:404]     Test net output #1: loss = 0.577168 (* 1 = 0.577168 loss)\nI0823 03:12:13.350435 32262 solver.cpp:228] Iteration 69200, loss = 0.000328059\nI0823 03:12:13.350476 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:12:13.350491 32262 solver.cpp:244]     Train net output #1: loss = 0.000328164 (* 1 = 0.000328164 loss)\nI0823 03:12:13.433109 32262 sgd_solver.cpp:166] Iteration 69200, lr = 0.035\nI0823 03:14:32.714982 32262 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0823 03:15:55.239054 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85708\nI0823 03:15:55.239444 32262 solver.cpp:404]     Test net output #1: loss = 0.56807 (* 1 = 0.56807 loss)\nI0823 03:15:56.572046 32262 solver.cpp:228] Iteration 69300, loss = 0.000471791\nI0823 03:15:56.572088 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:15:56.572104 32262 solver.cpp:244]     Train net output #1: loss = 0.000471896 (* 1 = 0.000471896 loss)\nI0823 03:15:56.653547 32262 sgd_solver.cpp:166] Iteration 69300, lr = 0.035\nI0823 03:18:15.930485 32262 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0823 03:19:38.449769 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85636\nI0823 03:19:38.450163 32262 solver.cpp:404]     Test net output #1: loss = 0.578476 (* 1 = 0.578476 loss)\nI0823 03:19:39.782295 32262 solver.cpp:228] Iteration 69400, loss = 0.000346525\nI0823 03:19:39.782333 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:19:39.782349 32262 solver.cpp:244]     Train net output #1: loss = 0.00034663 (* 1 = 0.00034663 loss)\nI0823 03:19:39.867223 32262 sgd_solver.cpp:166] Iteration 69400, lr = 0.035\nI0823 03:21:59.215071 32262 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0823 03:23:21.731799 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85684\nI0823 03:23:21.732180 32262 solver.cpp:404]     Test net output #1: loss = 0.570067 (* 1 = 0.570067 loss)\nI0823 03:23:23.064615 32262 solver.cpp:228] Iteration 69500, loss = 0.000420958\nI0823 03:23:23.064656 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:23:23.064672 32262 solver.cpp:244]     Train net output #1: loss = 0.000421063 (* 1 = 0.000421063 loss)\nI0823 03:23:23.148656 32262 sgd_solver.cpp:166] Iteration 69500, lr = 0.035\nI0823 03:25:42.470057 32262 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0823 03:27:04.987630 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85564\nI0823 03:27:04.988026 32262 solver.cpp:404]     Test net output #1: loss = 0.57824 (* 1 = 0.57824 loss)\nI0823 03:27:06.320832 32262 solver.cpp:228] Iteration 69600, loss = 0.000329122\nI0823 03:27:06.320875 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:27:06.320890 32262 solver.cpp:244]     Train net output #1: loss = 0.000329227 (* 1 = 0.000329227 loss)\nI0823 03:27:06.406817 32262 sgd_solver.cpp:166] Iteration 69600, lr = 0.035\nI0823 03:29:25.670323 32262 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0823 03:30:48.186164 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85632\nI0823 03:30:48.186542 32262 solver.cpp:404]     Test net output #1: loss = 0.571046 (* 1 = 0.571046 loss)\nI0823 03:30:49.519343 32262 solver.cpp:228] Iteration 69700, loss = 0.000443359\nI0823 03:30:49.519384 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:30:49.519398 32262 solver.cpp:244]     Train net output #1: loss = 0.000443464 (* 1 = 0.000443464 loss)\nI0823 03:30:49.597466 32262 sgd_solver.cpp:166] Iteration 69700, lr = 0.035\nI0823 03:33:08.900270 32262 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0823 03:34:31.421947 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85596\nI0823 03:34:31.422317 32262 solver.cpp:404]     Test net output #1: loss = 0.578349 (* 1 = 0.578349 loss)\nI0823 03:34:32.755393 32262 solver.cpp:228] Iteration 69800, loss = 0.000322855\nI0823 03:34:32.755436 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:34:32.755452 32262 solver.cpp:244]     Train net output #1: loss = 0.00032296 (* 1 = 0.00032296 loss)\nI0823 03:34:32.836105 32262 sgd_solver.cpp:166] Iteration 69800, lr = 0.035\nI0823 03:36:52.129231 32262 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0823 03:38:14.649482 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85656\nI0823 03:38:14.649845 32262 solver.cpp:404]     Test net output #1: loss = 0.570985 (* 1 = 0.570985 loss)\nI0823 03:38:15.982491 32262 solver.cpp:228] Iteration 69900, loss = 0.000398479\nI0823 03:38:15.982534 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:38:15.982550 32262 solver.cpp:244]     Train net output #1: loss = 0.000398584 (* 1 = 0.000398584 loss)\nI0823 03:38:16.065603 32262 sgd_solver.cpp:166] Iteration 69900, lr = 0.035\nI0823 03:40:35.344511 32262 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0823 03:41:57.859547 32262 solver.cpp:404]     Test net output #0: accuracy = 0.85632\nI0823 03:41:57.859917 32262 solver.cpp:404]     Test net output #1: loss = 0.578832 (* 1 = 0.578832 loss)\nI0823 03:41:59.192955 32262 solver.cpp:228] Iteration 70000, loss = 0.000345804\nI0823 03:41:59.192996 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:41:59.193011 32262 solver.cpp:244]     Train net output #1: loss = 0.000345908 (* 1 = 0.000345908 loss)\nI0823 03:41:59.281385 32262 sgd_solver.cpp:107] MultiStep Status: Iteration 70000, step = 2\nI0823 03:41:59.281410 32262 sgd_solver.cpp:166] Iteration 70000, lr = 0.0035\nI0823 03:44:18.519359 32262 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0823 03:45:41.036948 32262 solver.cpp:404]     Test net output #0: accuracy = 0.86636\nI0823 03:45:41.037320 32262 solver.cpp:404]     Test net output #1: loss = 0.534999 (* 1 = 0.534999 loss)\nI0823 03:45:42.370452 32262 solver.cpp:228] Iteration 70100, loss = 0.000370221\nI0823 03:45:42.370496 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:45:42.370510 32262 solver.cpp:244]     Train net output #1: loss = 0.000370326 (* 1 = 0.000370326 loss)\nI0823 03:45:42.451139 32262 sgd_solver.cpp:166] Iteration 70100, lr = 0.0035\nI0823 03:48:01.683007 32262 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0823 03:49:24.208597 32262 solver.cpp:404]     Test net output #0: accuracy = 0.87184\nI0823 03:49:24.208962 32262 solver.cpp:404]     Test net output #1: loss = 0.516303 (* 1 = 0.516303 loss)\nI0823 03:49:25.541641 32262 solver.cpp:228] Iteration 70200, loss = 0.000305799\nI0823 03:49:25.541680 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:49:25.541695 32262 solver.cpp:244]     Train net output #1: loss = 0.000305904 (* 1 = 0.000305904 loss)\nI0823 03:49:25.624235 32262 sgd_solver.cpp:166] Iteration 70200, lr = 0.0035\nI0823 03:51:44.905102 32262 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0823 03:53:07.414764 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8798\nI0823 03:53:07.415179 32262 solver.cpp:404]     Test net output #1: loss = 0.484931 (* 1 = 0.484931 loss)\nI0823 03:53:08.748245 32262 solver.cpp:228] Iteration 70300, loss = 0.000405187\nI0823 03:53:08.748291 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:53:08.748314 32262 solver.cpp:244]     Train net output #1: loss = 0.000405292 (* 1 = 0.000405292 loss)\nI0823 03:53:08.834687 32262 sgd_solver.cpp:166] Iteration 70300, lr = 0.0035\nI0823 03:55:28.172684 32262 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0823 03:56:50.722302 32262 solver.cpp:404]     Test net output #0: accuracy = 0.88296\nI0823 03:56:50.722667 32262 solver.cpp:404]     Test net output #1: loss = 0.474661 (* 1 = 0.474661 loss)\nI0823 03:56:52.056648 32262 solver.cpp:228] Iteration 70400, loss = 0.000310167\nI0823 03:56:52.056690 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 03:56:52.056713 32262 solver.cpp:244]     Train net output #1: loss = 0.000310271 (* 1 = 0.000310271 loss)\nI0823 03:56:52.139367 32262 sgd_solver.cpp:166] Iteration 70400, lr = 0.0035\nI0823 03:59:11.249099 32262 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0823 04:00:33.757704 32262 solver.cpp:404]     Test net output #0: accuracy = 0.88796\nI0823 04:00:33.758064 32262 solver.cpp:404]     Test net output #1: loss = 0.452598 (* 1 = 0.452598 loss)\nI0823 04:00:35.090951 32262 solver.cpp:228] Iteration 70500, loss = 0.000435234\nI0823 04:00:35.090989 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:00:35.091004 32262 solver.cpp:244]     Train net output #1: loss = 0.000435339 (* 1 = 0.000435339 loss)\nI0823 04:00:35.168156 32262 sgd_solver.cpp:166] Iteration 70500, lr = 0.0035\nI0823 04:02:53.971009 32262 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0823 04:04:16.472925 32262 solver.cpp:404]     Test net output #0: accuracy = 0.88924\nI0823 04:04:16.473322 32262 solver.cpp:404]     Test net output #1: loss = 0.447917 (* 1 = 0.447917 loss)\nI0823 04:04:17.806094 32262 solver.cpp:228] Iteration 70600, loss = 0.000264845\nI0823 04:04:17.806131 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:04:17.806147 32262 solver.cpp:244]     Train net output #1: loss = 0.00026495 (* 1 = 0.00026495 loss)\nI0823 04:04:17.889619 32262 sgd_solver.cpp:166] Iteration 70600, lr = 0.0035\nI0823 04:06:36.671069 32262 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0823 04:07:59.180454 32262 solver.cpp:404]     Test net output #0: accuracy = 0.89316\nI0823 04:07:59.180852 32262 solver.cpp:404]     Test net output #1: loss = 0.43143 (* 1 = 0.43143 loss)\nI0823 04:08:00.513267 32262 solver.cpp:228] Iteration 70700, loss = 0.000420211\nI0823 04:08:00.513304 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:08:00.513319 32262 solver.cpp:244]     Train net output #1: loss = 0.000420316 (* 1 = 0.000420316 loss)\nI0823 04:08:00.594115 32262 sgd_solver.cpp:166] Iteration 70700, lr = 0.0035\nI0823 04:10:19.391575 32262 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0823 04:11:41.902344 32262 solver.cpp:404]     Test net output #0: accuracy = 0.895\nI0823 04:11:41.902737 32262 solver.cpp:404]     Test net output #1: loss = 0.430046 (* 1 = 0.430046 loss)\nI0823 04:11:43.234886 32262 solver.cpp:228] Iteration 70800, loss = 0.000292115\nI0823 04:11:43.234920 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:11:43.234935 32262 solver.cpp:244]     Train net output #1: loss = 0.00029222 (* 1 = 0.00029222 loss)\nI0823 04:11:43.310921 32262 sgd_solver.cpp:166] Iteration 70800, lr = 0.0035\nI0823 04:14:02.078090 32262 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0823 04:15:24.600641 32262 solver.cpp:404]     Test net output #0: accuracy = 0.8972\nI0823 04:15:24.600998 32262 solver.cpp:404]     Test net output #1: loss = 0.417354 (* 1 = 0.417354 loss)\nI0823 04:15:25.934001 32262 solver.cpp:228] Iteration 70900, loss = 0.000370505\nI0823 04:15:25.934046 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:15:25.934062 32262 solver.cpp:244]     Train net output #1: loss = 0.00037061 (* 1 = 0.00037061 loss)\nI0823 04:15:26.010381 32262 sgd_solver.cpp:166] Iteration 70900, lr = 0.0035\nI0823 04:17:44.906430 32262 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0823 04:19:07.421722 32262 solver.cpp:404]     Test net output #0: accuracy = 0.89852\nI0823 04:19:07.422143 32262 solver.cpp:404]     Test net output #1: loss = 0.418265 (* 1 = 0.418265 loss)\nI0823 04:19:08.755044 32262 solver.cpp:228] Iteration 71000, loss = 0.000345788\nI0823 04:19:08.755084 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:19:08.755100 32262 solver.cpp:244]     Train net output #1: loss = 0.000345893 (* 1 = 0.000345893 loss)\nI0823 04:19:08.828230 32262 sgd_solver.cpp:166] Iteration 71000, lr = 0.0035\nI0823 04:21:27.628314 32262 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0823 04:22:50.098801 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90048\nI0823 04:22:50.099200 32262 solver.cpp:404]     Test net output #1: loss = 0.408186 (* 1 = 0.408186 loss)\nI0823 04:22:51.431985 32262 solver.cpp:228] Iteration 71100, loss = 0.000382034\nI0823 04:22:51.432024 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:22:51.432044 32262 solver.cpp:244]     Train net output #1: loss = 0.000382139 (* 1 = 0.000382139 loss)\nI0823 04:22:51.509378 32262 sgd_solver.cpp:166] Iteration 71100, lr = 0.0035\nI0823 04:25:10.207917 32262 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0823 04:26:32.711827 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9006\nI0823 04:26:32.712117 32262 solver.cpp:404]     Test net output #1: loss = 0.410329 (* 1 = 0.410329 loss)\nI0823 04:26:34.044891 32262 solver.cpp:228] Iteration 71200, loss = 0.000320755\nI0823 04:26:34.044932 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:26:34.044948 32262 solver.cpp:244]     Train net output #1: loss = 0.00032086 (* 1 = 0.00032086 loss)\nI0823 04:26:34.120852 32262 sgd_solver.cpp:166] Iteration 71200, lr = 0.0035\nI0823 04:28:52.866441 32262 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0823 04:30:15.352538 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9022\nI0823 04:30:15.352826 32262 solver.cpp:404]     Test net output #1: loss = 0.401972 (* 1 = 0.401972 loss)\nI0823 04:30:16.685601 32262 solver.cpp:228] Iteration 71300, loss = 0.000416283\nI0823 04:30:16.685644 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:30:16.685662 32262 solver.cpp:244]     Train net output #1: loss = 0.000416388 (* 1 = 0.000416388 loss)\nI0823 04:30:16.759631 32262 sgd_solver.cpp:166] Iteration 71300, lr = 0.0035\nI0823 04:32:35.447598 32262 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0823 04:33:57.923602 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90168\nI0823 04:33:57.923902 32262 solver.cpp:404]     Test net output #1: loss = 0.404783 (* 1 = 0.404783 loss)\nI0823 04:33:59.256629 32262 solver.cpp:228] Iteration 71400, loss = 0.000321781\nI0823 04:33:59.256669 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:33:59.256685 32262 solver.cpp:244]     Train net output #1: loss = 0.000321886 (* 1 = 0.000321886 loss)\nI0823 04:33:59.331683 32262 sgd_solver.cpp:166] Iteration 71400, lr = 0.0035\nI0823 04:36:18.062836 32262 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0823 04:37:40.299159 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90288\nI0823 04:37:40.299458 32262 solver.cpp:404]     Test net output #1: loss = 0.397249 (* 1 = 0.397249 loss)\nI0823 04:37:41.631621 32262 solver.cpp:228] Iteration 71500, loss = 0.000388618\nI0823 04:37:41.631664 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:37:41.631678 32262 solver.cpp:244]     Train net output #1: loss = 0.000388723 (* 1 = 0.000388723 loss)\nI0823 04:37:41.708343 32262 sgd_solver.cpp:166] Iteration 71500, lr = 0.0035\nI0823 04:40:00.413138 32262 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0823 04:41:22.679822 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90284\nI0823 04:41:22.680116 32262 solver.cpp:404]     Test net output #1: loss = 0.400967 (* 1 = 0.400967 loss)\nI0823 04:41:24.012189 32262 solver.cpp:228] Iteration 71600, loss = 0.000342647\nI0823 04:41:24.012231 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:41:24.012248 32262 solver.cpp:244]     Train net output #1: loss = 0.000342752 (* 1 = 0.000342752 loss)\nI0823 04:41:24.091889 32262 sgd_solver.cpp:166] Iteration 71600, lr = 0.0035\nI0823 04:43:42.780362 32262 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0823 04:45:05.099270 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90324\nI0823 04:45:05.099526 32262 solver.cpp:404]     Test net output #1: loss = 0.394508 (* 1 = 0.394508 loss)\nI0823 04:45:06.433199 32262 solver.cpp:228] Iteration 71700, loss = 0.000389272\nI0823 04:45:06.433243 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:45:06.433259 32262 solver.cpp:244]     Train net output #1: loss = 0.000389377 (* 1 = 0.000389377 loss)\nI0823 04:45:06.507385 32262 sgd_solver.cpp:166] Iteration 71700, lr = 0.0035\nI0823 04:47:25.237727 32262 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0823 04:48:47.464445 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90284\nI0823 04:48:47.464743 32262 solver.cpp:404]     Test net output #1: loss = 0.398554 (* 1 = 0.398554 loss)\nI0823 04:48:48.798620 32262 solver.cpp:228] Iteration 71800, loss = 0.000346078\nI0823 04:48:48.798663 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:48:48.798678 32262 solver.cpp:244]     Train net output #1: loss = 0.000346183 (* 1 = 0.000346183 loss)\nI0823 04:48:48.874171 32262 sgd_solver.cpp:166] Iteration 71800, lr = 0.0035\nI0823 04:51:07.581728 32262 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0823 04:52:29.712774 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9036\nI0823 04:52:29.713143 32262 solver.cpp:404]     Test net output #1: loss = 0.392576 (* 1 = 0.392576 loss)\nI0823 04:52:31.047060 32262 solver.cpp:228] Iteration 71900, loss = 0.000419144\nI0823 04:52:31.047104 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:52:31.047119 32262 solver.cpp:244]     Train net output #1: loss = 0.000419249 (* 1 = 0.000419249 loss)\nI0823 04:52:31.122169 32262 sgd_solver.cpp:166] Iteration 71900, lr = 0.0035\nI0823 04:54:49.852773 32262 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0823 04:56:12.129037 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90324\nI0823 04:56:12.129318 32262 solver.cpp:404]     Test net output #1: loss = 0.396884 (* 1 = 0.396884 loss)\nI0823 04:56:13.463485 32262 solver.cpp:228] Iteration 72000, loss = 0.000328332\nI0823 04:56:13.463528 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:56:13.463544 32262 solver.cpp:244]     Train net output #1: loss = 0.000328437 (* 1 = 0.000328437 loss)\nI0823 04:56:13.540109 32262 sgd_solver.cpp:166] Iteration 72000, lr = 0.0035\nI0823 04:58:32.325683 32262 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0823 04:59:54.802495 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90388\nI0823 04:59:54.802808 32262 solver.cpp:404]     Test net output #1: loss = 0.391256 (* 1 = 0.391256 loss)\nI0823 04:59:56.136176 32262 solver.cpp:228] Iteration 72100, loss = 0.00041061\nI0823 04:59:56.136217 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 04:59:56.136234 32262 solver.cpp:244]     Train net output #1: loss = 0.000410715 (* 1 = 0.000410715 loss)\nI0823 04:59:56.215481 32262 sgd_solver.cpp:166] Iteration 72100, lr = 0.0035\nI0823 05:02:15.020483 32262 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0823 05:03:37.481631 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90324\nI0823 05:03:37.481930 32262 solver.cpp:404]     Test net output #1: loss = 0.395775 (* 1 = 0.395775 loss)\nI0823 05:03:38.815420 32262 solver.cpp:228] Iteration 72200, loss = 0.000308534\nI0823 05:03:38.815460 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:03:38.815476 32262 solver.cpp:244]     Train net output #1: loss = 0.000308639 (* 1 = 0.000308639 loss)\nI0823 05:03:38.892549 32262 sgd_solver.cpp:166] Iteration 72200, lr = 0.0035\nI0823 05:05:57.588001 32262 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0823 05:07:19.989544 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90424\nI0823 05:07:19.989805 32262 solver.cpp:404]     Test net output #1: loss = 0.390397 (* 1 = 0.390397 loss)\nI0823 05:07:21.323362 32262 solver.cpp:228] Iteration 72300, loss = 0.000436251\nI0823 05:07:21.323406 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:07:21.323421 32262 solver.cpp:244]     Train net output #1: loss = 0.000436356 (* 1 = 0.000436356 loss)\nI0823 05:07:21.402297 32262 sgd_solver.cpp:166] Iteration 72300, lr = 0.0035\nI0823 05:09:40.082432 32262 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0823 05:11:02.244176 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90308\nI0823 05:11:02.244455 32262 solver.cpp:404]     Test net output #1: loss = 0.394951 (* 1 = 0.394951 loss)\nI0823 05:11:03.578456 32262 solver.cpp:228] Iteration 72400, loss = 0.000310087\nI0823 05:11:03.578500 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:11:03.578514 32262 solver.cpp:244]     Train net output #1: loss = 0.000310192 (* 1 = 0.000310192 loss)\nI0823 05:11:03.657646 32262 sgd_solver.cpp:166] Iteration 72400, lr = 0.0035\nI0823 05:13:22.438707 32262 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0823 05:14:44.469712 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90388\nI0823 05:14:44.469964 32262 solver.cpp:404]     Test net output #1: loss = 0.389832 (* 1 = 0.389832 loss)\nI0823 05:14:45.803582 32262 solver.cpp:228] Iteration 72500, loss = 0.00038136\nI0823 05:14:45.803627 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:14:45.803642 32262 solver.cpp:244]     Train net output #1: loss = 0.000381465 (* 1 = 0.000381465 loss)\nI0823 05:14:45.878749 32262 sgd_solver.cpp:166] Iteration 72500, lr = 0.0035\nI0823 05:17:04.644024 32262 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0823 05:18:26.664399 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90316\nI0823 05:18:26.664659 32262 solver.cpp:404]     Test net output #1: loss = 0.394613 (* 1 = 0.394613 loss)\nI0823 05:18:27.997012 32262 solver.cpp:228] Iteration 72600, loss = 0.000274295\nI0823 05:18:27.997056 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:18:27.997076 32262 solver.cpp:244]     Train net output #1: loss = 0.0002744 (* 1 = 0.0002744 loss)\nI0823 05:18:28.074367 32262 sgd_solver.cpp:166] Iteration 72600, lr = 0.0035\nI0823 05:20:46.961596 32262 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0823 05:22:09.224081 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90396\nI0823 05:22:09.224392 32262 solver.cpp:404]     Test net output #1: loss = 0.389528 (* 1 = 0.389528 loss)\nI0823 05:22:10.557050 32262 solver.cpp:228] Iteration 72700, loss = 0.000450474\nI0823 05:22:10.557096 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:22:10.557112 32262 solver.cpp:244]     Train net output #1: loss = 0.000450579 (* 1 = 0.000450579 loss)\nI0823 05:22:10.635535 32262 sgd_solver.cpp:166] Iteration 72700, lr = 0.0035\nI0823 05:24:29.543362 32262 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0823 05:25:51.793009 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90336\nI0823 05:25:51.793289 32262 solver.cpp:404]     Test net output #1: loss = 0.394419 (* 1 = 0.394419 loss)\nI0823 05:25:53.126327 32262 solver.cpp:228] Iteration 72800, loss = 0.000302549\nI0823 05:25:53.126366 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:25:53.126380 32262 solver.cpp:244]     Train net output #1: loss = 0.000302654 (* 1 = 0.000302654 loss)\nI0823 05:25:53.203768 32262 sgd_solver.cpp:166] Iteration 72800, lr = 0.0035\nI0823 05:28:12.034819 32262 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0823 05:29:34.524071 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90392\nI0823 05:29:34.524415 32262 solver.cpp:404]     Test net output #1: loss = 0.389334 (* 1 = 0.389334 loss)\nI0823 05:29:35.857357 32262 solver.cpp:228] Iteration 72900, loss = 0.000392377\nI0823 05:29:35.857400 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:29:35.857415 32262 solver.cpp:244]     Train net output #1: loss = 0.000392482 (* 1 = 0.000392482 loss)\nI0823 05:29:35.936208 32262 sgd_solver.cpp:166] Iteration 72900, lr = 0.0035\nI0823 05:31:54.764669 32262 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0823 05:33:17.131858 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90348\nI0823 05:33:17.132202 32262 solver.cpp:404]     Test net output #1: loss = 0.394235 (* 1 = 0.394235 loss)\nI0823 05:33:18.464889 32262 solver.cpp:228] Iteration 73000, loss = 0.000354227\nI0823 05:33:18.464932 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:33:18.464947 32262 solver.cpp:244]     Train net output #1: loss = 0.000354332 (* 1 = 0.000354332 loss)\nI0823 05:33:18.542335 32262 sgd_solver.cpp:166] Iteration 73000, lr = 0.0035\nI0823 05:35:37.324960 32262 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0823 05:36:59.799863 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90416\nI0823 05:36:59.800281 32262 solver.cpp:404]     Test net output #1: loss = 0.389287 (* 1 = 0.389287 loss)\nI0823 05:37:01.132714 32262 solver.cpp:228] Iteration 73100, loss = 0.000409359\nI0823 05:37:01.132757 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:37:01.132773 32262 solver.cpp:244]     Train net output #1: loss = 0.000409464 (* 1 = 0.000409464 loss)\nI0823 05:37:01.208087 32262 sgd_solver.cpp:166] Iteration 73100, lr = 0.0035\nI0823 05:39:20.014554 32262 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0823 05:40:42.521255 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90336\nI0823 05:40:42.521627 32262 solver.cpp:404]     Test net output #1: loss = 0.394206 (* 1 = 0.394206 loss)\nI0823 05:40:43.854013 32262 solver.cpp:228] Iteration 73200, loss = 0.000283426\nI0823 05:40:43.854056 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:40:43.854075 32262 solver.cpp:244]     Train net output #1: loss = 0.000283531 (* 1 = 0.000283531 loss)\nI0823 05:40:43.928503 32262 sgd_solver.cpp:166] Iteration 73200, lr = 0.0035\nI0823 05:43:02.752184 32262 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0823 05:44:24.866343 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90424\nI0823 05:44:24.866683 32262 solver.cpp:404]     Test net output #1: loss = 0.389242 (* 1 = 0.389242 loss)\nI0823 05:44:26.199496 32262 solver.cpp:228] Iteration 73300, loss = 0.000376407\nI0823 05:44:26.199537 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:44:26.199553 32262 solver.cpp:244]     Train net output #1: loss = 0.000376512 (* 1 = 0.000376512 loss)\nI0823 05:44:26.278333 32262 sgd_solver.cpp:166] Iteration 73300, lr = 0.0035\nI0823 05:46:45.088255 32262 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0823 05:48:07.231982 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90352\nI0823 05:48:07.232259 32262 solver.cpp:404]     Test net output #1: loss = 0.394287 (* 1 = 0.394287 loss)\nI0823 05:48:08.564967 32262 solver.cpp:228] Iteration 73400, loss = 0.000320543\nI0823 05:48:08.565006 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:48:08.565022 32262 solver.cpp:244]     Train net output #1: loss = 0.000320648 (* 1 = 0.000320648 loss)\nI0823 05:48:08.640223 32262 sgd_solver.cpp:166] Iteration 73400, lr = 0.0035\nI0823 05:50:27.481807 32262 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0823 05:51:49.531157 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90416\nI0823 05:51:49.531443 32262 solver.cpp:404]     Test net output #1: loss = 0.389311 (* 1 = 0.389311 loss)\nI0823 05:51:50.864275 32262 solver.cpp:228] Iteration 73500, loss = 0.0003595\nI0823 05:51:50.864313 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:51:50.864328 32262 solver.cpp:244]     Train net output #1: loss = 0.000359605 (* 1 = 0.000359605 loss)\nI0823 05:51:50.942577 32262 sgd_solver.cpp:166] Iteration 73500, lr = 0.0035\nI0823 05:54:09.743396 32262 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0823 05:55:31.931780 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90336\nI0823 05:55:31.932129 32262 solver.cpp:404]     Test net output #1: loss = 0.394201 (* 1 = 0.394201 loss)\nI0823 05:55:33.264905 32262 solver.cpp:228] Iteration 73600, loss = 0.000291588\nI0823 05:55:33.264946 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:55:33.264962 32262 solver.cpp:244]     Train net output #1: loss = 0.000291693 (* 1 = 0.000291693 loss)\nI0823 05:55:33.344326 32262 sgd_solver.cpp:166] Iteration 73600, lr = 0.0035\nI0823 05:57:52.088654 32262 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0823 05:59:14.215111 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90468\nI0823 05:59:14.215381 32262 solver.cpp:404]     Test net output #1: loss = 0.389265 (* 1 = 0.389265 loss)\nI0823 05:59:15.548820 32262 solver.cpp:228] Iteration 73700, loss = 0.000389665\nI0823 05:59:15.548863 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 05:59:15.548887 32262 solver.cpp:244]     Train net output #1: loss = 0.000389769 (* 1 = 0.000389769 loss)\nI0823 05:59:15.626277 32262 sgd_solver.cpp:166] Iteration 73700, lr = 0.0035\nI0823 06:01:34.469354 32262 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0823 06:02:56.837294 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90372\nI0823 06:02:56.837646 32262 solver.cpp:404]     Test net output #1: loss = 0.394206 (* 1 = 0.394206 loss)\nI0823 06:02:58.171034 32262 solver.cpp:228] Iteration 73800, loss = 0.00032303\nI0823 06:02:58.171077 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:02:58.171100 32262 solver.cpp:244]     Train net output #1: loss = 0.000323135 (* 1 = 0.000323135 loss)\nI0823 06:02:58.247192 32262 sgd_solver.cpp:166] Iteration 73800, lr = 0.0035\nI0823 06:05:17.094440 32262 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0823 06:06:39.623922 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90504\nI0823 06:06:39.624267 32262 solver.cpp:404]     Test net output #1: loss = 0.389269 (* 1 = 0.389269 loss)\nI0823 06:06:40.957514 32262 solver.cpp:228] Iteration 73900, loss = 0.000346978\nI0823 06:06:40.957556 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:06:40.957578 32262 solver.cpp:244]     Train net output #1: loss = 0.000347083 (* 1 = 0.000347083 loss)\nI0823 06:06:41.031536 32262 sgd_solver.cpp:166] Iteration 73900, lr = 0.0035\nI0823 06:08:59.820314 32262 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0823 06:10:22.267760 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90404\nI0823 06:10:22.268098 32262 solver.cpp:404]     Test net output #1: loss = 0.394307 (* 1 = 0.394307 loss)\nI0823 06:10:23.602388 32262 solver.cpp:228] Iteration 74000, loss = 0.000317654\nI0823 06:10:23.602434 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:10:23.602457 32262 solver.cpp:244]     Train net output #1: loss = 0.000317759 (* 1 = 0.000317759 loss)\nI0823 06:10:23.681246 32262 sgd_solver.cpp:166] Iteration 74000, lr = 0.0035\nI0823 06:12:42.526994 32262 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0823 06:14:05.063843 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0823 06:14:05.064116 32262 solver.cpp:404]     Test net output #1: loss = 0.389402 (* 1 = 0.389402 loss)\nI0823 06:14:06.397292 32262 solver.cpp:228] Iteration 74100, loss = 0.000393516\nI0823 06:14:06.397336 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:14:06.397359 32262 solver.cpp:244]     Train net output #1: loss = 0.000393621 (* 1 = 0.000393621 loss)\nI0823 06:14:06.475469 32262 sgd_solver.cpp:166] Iteration 74100, lr = 0.0035\nI0823 06:16:25.370124 32262 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0823 06:17:47.899313 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90432\nI0823 06:17:47.899605 32262 solver.cpp:404]     Test net output #1: loss = 0.394431 (* 1 = 0.394431 loss)\nI0823 06:17:49.232779 32262 solver.cpp:228] Iteration 74200, loss = 0.000282098\nI0823 06:17:49.232821 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:17:49.232844 32262 solver.cpp:244]     Train net output #1: loss = 0.000282203 (* 1 = 0.000282203 loss)\nI0823 06:17:49.304934 32262 sgd_solver.cpp:166] Iteration 74200, lr = 0.0035\nI0823 06:20:08.122722 32262 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0823 06:21:30.619153 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90544\nI0823 06:21:30.619485 32262 solver.cpp:404]     Test net output #1: loss = 0.389449 (* 1 = 0.389449 loss)\nI0823 06:21:31.953514 32262 solver.cpp:228] Iteration 74300, loss = 0.000353771\nI0823 06:21:31.953562 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:21:31.953583 32262 solver.cpp:244]     Train net output #1: loss = 0.000353876 (* 1 = 0.000353876 loss)\nI0823 06:21:32.027029 32262 sgd_solver.cpp:166] Iteration 74300, lr = 0.0035\nI0823 06:23:50.800508 32262 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0823 06:25:13.314142 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90464\nI0823 06:25:13.314430 32262 solver.cpp:404]     Test net output #1: loss = 0.394429 (* 1 = 0.394429 loss)\nI0823 06:25:14.648993 32262 solver.cpp:228] Iteration 74400, loss = 0.000295993\nI0823 06:25:14.649037 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:25:14.649061 32262 solver.cpp:244]     Train net output #1: loss = 0.000296098 (* 1 = 0.000296098 loss)\nI0823 06:25:14.732762 32262 sgd_solver.cpp:166] Iteration 74400, lr = 0.0035\nI0823 06:27:33.520851 32262 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0823 06:28:56.057305 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0823 06:28:56.057606 32262 solver.cpp:404]     Test net output #1: loss = 0.389537 (* 1 = 0.389537 loss)\nI0823 06:28:57.391821 32262 solver.cpp:228] Iteration 74500, loss = 0.000417449\nI0823 06:28:57.391868 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:28:57.391891 32262 solver.cpp:244]     Train net output #1: loss = 0.000417554 (* 1 = 0.000417554 loss)\nI0823 06:28:57.469496 32262 sgd_solver.cpp:166] Iteration 74500, lr = 0.0035\nI0823 06:31:16.166759 32262 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0823 06:32:38.668658 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90452\nI0823 06:32:38.669049 32262 solver.cpp:404]     Test net output #1: loss = 0.394597 (* 1 = 0.394597 loss)\nI0823 06:32:40.003409 32262 solver.cpp:228] Iteration 74600, loss = 0.000331024\nI0823 06:32:40.003454 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:32:40.003478 32262 solver.cpp:244]     Train net output #1: loss = 0.000331129 (* 1 = 0.000331129 loss)\nI0823 06:32:40.080025 32262 sgd_solver.cpp:166] Iteration 74600, lr = 0.0035\nI0823 06:34:58.842267 32262 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0823 06:36:21.365357 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90556\nI0823 06:36:21.365674 32262 solver.cpp:404]     Test net output #1: loss = 0.389666 (* 1 = 0.389666 loss)\nI0823 06:36:22.698904 32262 solver.cpp:228] Iteration 74700, loss = 0.000357715\nI0823 06:36:22.698949 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:36:22.698972 32262 solver.cpp:244]     Train net output #1: loss = 0.00035782 (* 1 = 0.00035782 loss)\nI0823 06:36:22.774027 32262 sgd_solver.cpp:166] Iteration 74700, lr = 0.0035\nI0823 06:38:41.707411 32262 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0823 06:40:04.220881 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9046\nI0823 06:40:04.221221 32262 solver.cpp:404]     Test net output #1: loss = 0.394652 (* 1 = 0.394652 loss)\nI0823 06:40:05.555915 32262 solver.cpp:228] Iteration 74800, loss = 0.000327895\nI0823 06:40:05.555961 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:40:05.555989 32262 solver.cpp:244]     Train net output #1: loss = 0.000328 (* 1 = 0.000328 loss)\nI0823 06:40:05.630744 32262 sgd_solver.cpp:166] Iteration 74800, lr = 0.0035\nI0823 06:42:24.503445 32262 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0823 06:43:46.663622 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9058\nI0823 06:43:46.663964 32262 solver.cpp:404]     Test net output #1: loss = 0.389881 (* 1 = 0.389881 loss)\nI0823 06:43:47.998126 32262 solver.cpp:228] Iteration 74900, loss = 0.00042752\nI0823 06:43:47.998172 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:43:47.998196 32262 solver.cpp:244]     Train net output #1: loss = 0.000427625 (* 1 = 0.000427625 loss)\nI0823 06:43:48.073156 32262 sgd_solver.cpp:166] Iteration 74900, lr = 0.0035\nI0823 06:46:06.906205 32262 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0823 06:47:29.339321 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90448\nI0823 06:47:29.339725 32262 solver.cpp:404]     Test net output #1: loss = 0.394856 (* 1 = 0.394856 loss)\nI0823 06:47:30.674577 32262 solver.cpp:228] Iteration 75000, loss = 0.000283336\nI0823 06:47:30.674621 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:47:30.674644 32262 solver.cpp:244]     Train net output #1: loss = 0.000283441 (* 1 = 0.000283441 loss)\nI0823 06:47:30.751560 32262 sgd_solver.cpp:166] Iteration 75000, lr = 0.0035\nI0823 06:49:49.559077 32262 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0823 06:51:11.942312 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90592\nI0823 06:51:11.942623 32262 solver.cpp:404]     Test net output #1: loss = 0.389944 (* 1 = 0.389944 loss)\nI0823 06:51:13.277441 32262 solver.cpp:228] Iteration 75100, loss = 0.000396897\nI0823 06:51:13.277487 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:51:13.277509 32262 solver.cpp:244]     Train net output #1: loss = 0.000397002 (* 1 = 0.000397002 loss)\nI0823 06:51:13.355480 32262 sgd_solver.cpp:166] Iteration 75100, lr = 0.0035\nI0823 06:53:32.179736 32262 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0823 06:54:54.382773 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90456\nI0823 06:54:54.383070 32262 solver.cpp:404]     Test net output #1: loss = 0.394855 (* 1 = 0.394855 loss)\nI0823 06:54:55.717455 32262 solver.cpp:228] Iteration 75200, loss = 0.000309895\nI0823 06:54:55.717501 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:54:55.717517 32262 solver.cpp:244]     Train net output #1: loss = 0.00031 (* 1 = 0.00031 loss)\nI0823 06:54:55.794709 32262 sgd_solver.cpp:166] Iteration 75200, lr = 0.0035\nI0823 06:57:14.622822 32262 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0823 06:58:36.975939 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9056\nI0823 06:58:36.976219 32262 solver.cpp:404]     Test net output #1: loss = 0.389923 (* 1 = 0.389923 loss)\nI0823 06:58:38.310490 32262 solver.cpp:228] Iteration 75300, loss = 0.000373417\nI0823 06:58:38.310534 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 06:58:38.310549 32262 solver.cpp:244]     Train net output #1: loss = 0.000373522 (* 1 = 0.000373522 loss)\nI0823 06:58:38.387835 32262 sgd_solver.cpp:166] Iteration 75300, lr = 0.0035\nI0823 07:00:57.239639 32262 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0823 07:02:19.774636 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90468\nI0823 07:02:19.775001 32262 solver.cpp:404]     Test net output #1: loss = 0.394897 (* 1 = 0.394897 loss)\nI0823 07:02:21.108263 32262 solver.cpp:228] Iteration 75400, loss = 0.00035052\nI0823 07:02:21.108307 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:02:21.108324 32262 solver.cpp:244]     Train net output #1: loss = 0.000350624 (* 1 = 0.000350624 loss)\nI0823 07:02:21.183610 32262 sgd_solver.cpp:166] Iteration 75400, lr = 0.0035\nI0823 07:04:40.067529 32262 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0823 07:06:02.626935 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90548\nI0823 07:06:02.627251 32262 solver.cpp:404]     Test net output #1: loss = 0.390068 (* 1 = 0.390068 loss)\nI0823 07:06:03.960482 32262 solver.cpp:228] Iteration 75500, loss = 0.000419627\nI0823 07:06:03.960527 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:06:03.960543 32262 solver.cpp:244]     Train net output #1: loss = 0.000419732 (* 1 = 0.000419732 loss)\nI0823 07:06:04.040130 32262 sgd_solver.cpp:166] Iteration 75500, lr = 0.0035\nI0823 07:08:22.968950 32262 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0823 07:09:45.521795 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90472\nI0823 07:09:45.522109 32262 solver.cpp:404]     Test net output #1: loss = 0.395069 (* 1 = 0.395069 loss)\nI0823 07:09:46.856101 32262 solver.cpp:228] Iteration 75600, loss = 0.000288156\nI0823 07:09:46.856143 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:09:46.856159 32262 solver.cpp:244]     Train net output #1: loss = 0.000288261 (* 1 = 0.000288261 loss)\nI0823 07:09:46.929369 32262 sgd_solver.cpp:166] Iteration 75600, lr = 0.0035\nI0823 07:12:05.802073 32262 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0823 07:13:28.350402 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90544\nI0823 07:13:28.350759 32262 solver.cpp:404]     Test net output #1: loss = 0.390113 (* 1 = 0.390113 loss)\nI0823 07:13:29.684068 32262 solver.cpp:228] Iteration 75700, loss = 0.000436753\nI0823 07:13:29.684111 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:13:29.684128 32262 solver.cpp:244]     Train net output #1: loss = 0.000436858 (* 1 = 0.000436858 loss)\nI0823 07:13:29.764617 32262 sgd_solver.cpp:166] Iteration 75700, lr = 0.0035\nI0823 07:15:48.533635 32262 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0823 07:17:11.078490 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90468\nI0823 07:17:11.078783 32262 solver.cpp:404]     Test net output #1: loss = 0.395169 (* 1 = 0.395169 loss)\nI0823 07:17:12.413141 32262 solver.cpp:228] Iteration 75800, loss = 0.000294709\nI0823 07:17:12.413187 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:17:12.413211 32262 solver.cpp:244]     Train net output #1: loss = 0.000294814 (* 1 = 0.000294814 loss)\nI0823 07:17:12.485559 32262 sgd_solver.cpp:166] Iteration 75800, lr = 0.0035\nI0823 07:19:31.300931 32262 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0823 07:20:53.855875 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90532\nI0823 07:20:53.856230 32262 solver.cpp:404]     Test net output #1: loss = 0.390259 (* 1 = 0.390259 loss)\nI0823 07:20:55.190621 32262 solver.cpp:228] Iteration 75900, loss = 0.000386378\nI0823 07:20:55.190668 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:20:55.190685 32262 solver.cpp:244]     Train net output #1: loss = 0.000386483 (* 1 = 0.000386483 loss)\nI0823 07:20:55.262200 32262 sgd_solver.cpp:166] Iteration 75900, lr = 0.0035\nI0823 07:23:14.016345 32262 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0823 07:24:36.561452 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90448\nI0823 07:24:36.561755 32262 solver.cpp:404]     Test net output #1: loss = 0.395282 (* 1 = 0.395282 loss)\nI0823 07:24:37.894634 32262 solver.cpp:228] Iteration 76000, loss = 0.000297613\nI0823 07:24:37.894676 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:24:37.894692 32262 solver.cpp:244]     Train net output #1: loss = 0.000297718 (* 1 = 0.000297718 loss)\nI0823 07:24:37.973181 32262 sgd_solver.cpp:166] Iteration 76000, lr = 0.0035\nI0823 07:26:56.782114 32262 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0823 07:28:19.324815 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90568\nI0823 07:28:19.325117 32262 solver.cpp:404]     Test net output #1: loss = 0.390358 (* 1 = 0.390358 loss)\nI0823 07:28:20.659166 32262 solver.cpp:228] Iteration 76100, loss = 0.00037607\nI0823 07:28:20.659210 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:28:20.659226 32262 solver.cpp:244]     Train net output #1: loss = 0.000376175 (* 1 = 0.000376175 loss)\nI0823 07:28:20.735541 32262 sgd_solver.cpp:166] Iteration 76100, lr = 0.0035\nI0823 07:30:39.529263 32262 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0823 07:32:02.032588 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90448\nI0823 07:32:02.032899 32262 solver.cpp:404]     Test net output #1: loss = 0.395341 (* 1 = 0.395341 loss)\nI0823 07:32:03.365589 32262 solver.cpp:228] Iteration 76200, loss = 0.000315859\nI0823 07:32:03.365633 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:32:03.365648 32262 solver.cpp:244]     Train net output #1: loss = 0.000315964 (* 1 = 0.000315964 loss)\nI0823 07:32:03.444371 32262 sgd_solver.cpp:166] Iteration 76200, lr = 0.0035\nI0823 07:34:22.178607 32262 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0823 07:35:44.615537 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9056\nI0823 07:35:44.615814 32262 solver.cpp:404]     Test net output #1: loss = 0.3905 (* 1 = 0.3905 loss)\nI0823 07:35:45.948839 32262 solver.cpp:228] Iteration 76300, loss = 0.000355785\nI0823 07:35:45.948884 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:35:45.948899 32262 solver.cpp:244]     Train net output #1: loss = 0.00035589 (* 1 = 0.00035589 loss)\nI0823 07:35:46.028201 32262 sgd_solver.cpp:166] Iteration 76300, lr = 0.0035\nI0823 07:38:04.803899 32262 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0823 07:39:27.067147 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90448\nI0823 07:39:27.067456 32262 solver.cpp:404]     Test net output #1: loss = 0.395521 (* 1 = 0.395521 loss)\nI0823 07:39:28.400830 32262 solver.cpp:228] Iteration 76400, loss = 0.00029216\nI0823 07:39:28.400873 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:39:28.400889 32262 solver.cpp:244]     Train net output #1: loss = 0.000292265 (* 1 = 0.000292265 loss)\nI0823 07:39:28.478348 32262 sgd_solver.cpp:166] Iteration 76400, lr = 0.0035\nI0823 07:41:47.323446 32262 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0823 07:43:09.525032 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9054\nI0823 07:43:09.525410 32262 solver.cpp:404]     Test net output #1: loss = 0.390638 (* 1 = 0.390638 loss)\nI0823 07:43:10.858644 32262 solver.cpp:228] Iteration 76500, loss = 0.00040745\nI0823 07:43:10.858685 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:43:10.858701 32262 solver.cpp:244]     Train net output #1: loss = 0.000407555 (* 1 = 0.000407555 loss)\nI0823 07:43:10.934736 32262 sgd_solver.cpp:166] Iteration 76500, lr = 0.0035\nI0823 07:45:29.691442 32262 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0823 07:46:51.982512 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9044\nI0823 07:46:51.982800 32262 solver.cpp:404]     Test net output #1: loss = 0.395638 (* 1 = 0.395638 loss)\nI0823 07:46:53.316452 32262 solver.cpp:228] Iteration 76600, loss = 0.000338664\nI0823 07:46:53.316495 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:46:53.316510 32262 solver.cpp:244]     Train net output #1: loss = 0.000338769 (* 1 = 0.000338769 loss)\nI0823 07:46:53.393049 32262 sgd_solver.cpp:166] Iteration 76600, lr = 0.0035\nI0823 07:49:12.167573 32262 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0823 07:50:34.352713 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9054\nI0823 07:50:34.352987 32262 solver.cpp:404]     Test net output #1: loss = 0.390761 (* 1 = 0.390761 loss)\nI0823 07:50:35.686646 32262 solver.cpp:228] Iteration 76700, loss = 0.000424688\nI0823 07:50:35.686687 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:50:35.686703 32262 solver.cpp:244]     Train net output #1: loss = 0.000424793 (* 1 = 0.000424793 loss)\nI0823 07:50:35.763492 32262 sgd_solver.cpp:166] Iteration 76700, lr = 0.0035\nI0823 07:52:54.497546 32262 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0823 07:54:16.820575 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90416\nI0823 07:54:16.820891 32262 solver.cpp:404]     Test net output #1: loss = 0.395706 (* 1 = 0.395706 loss)\nI0823 07:54:18.153847 32262 solver.cpp:228] Iteration 76800, loss = 0.000325583\nI0823 07:54:18.153887 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:54:18.153903 32262 solver.cpp:244]     Train net output #1: loss = 0.000325688 (* 1 = 0.000325688 loss)\nI0823 07:54:18.229950 32262 sgd_solver.cpp:166] Iteration 76800, lr = 0.0035\nI0823 07:56:37.070272 32262 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0823 07:57:59.575943 32262 solver.cpp:404]     Test net output #0: accuracy = 0.9054\nI0823 07:57:59.576263 32262 solver.cpp:404]     Test net output #1: loss = 0.390933 (* 1 = 0.390933 loss)\nI0823 07:58:00.908870 32262 solver.cpp:228] Iteration 76900, loss = 0.000432813\nI0823 07:58:00.908910 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 07:58:00.908926 32262 solver.cpp:244]     Train net output #1: loss = 0.000432918 (* 1 = 0.000432918 loss)\nI0823 07:58:00.986398 32262 sgd_solver.cpp:166] Iteration 76900, lr = 0.0035\nI0823 08:00:19.788990 32262 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0823 08:01:42.293419 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90428\nI0823 08:01:42.293735 32262 solver.cpp:404]     Test net output #1: loss = 0.395877 (* 1 = 0.395877 loss)\nI0823 08:01:43.628006 32262 solver.cpp:228] Iteration 77000, loss = 0.000284221\nI0823 08:01:43.628051 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:01:43.628065 32262 solver.cpp:244]     Train net output #1: loss = 0.000284326 (* 1 = 0.000284326 loss)\nI0823 08:01:43.701359 32262 sgd_solver.cpp:166] Iteration 77000, lr = 0.0035\nI0823 08:04:02.492264 32262 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0823 08:05:25.026219 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90556\nI0823 08:05:25.026577 32262 solver.cpp:404]     Test net output #1: loss = 0.391039 (* 1 = 0.391039 loss)\nI0823 08:05:26.361241 32262 solver.cpp:228] Iteration 77100, loss = 0.000415729\nI0823 08:05:26.361286 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:05:26.361308 32262 solver.cpp:244]     Train net output #1: loss = 0.000415834 (* 1 = 0.000415834 loss)\nI0823 08:05:26.433856 32262 sgd_solver.cpp:166] Iteration 77100, lr = 0.0035\nI0823 08:07:45.315101 32262 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0823 08:09:07.841667 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90416\nI0823 08:09:07.842046 32262 solver.cpp:404]     Test net output #1: loss = 0.395959 (* 1 = 0.395959 loss)\nI0823 08:09:09.176548 32262 solver.cpp:228] Iteration 77200, loss = 0.000312694\nI0823 08:09:09.176590 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:09:09.176605 32262 solver.cpp:244]     Train net output #1: loss = 0.000312798 (* 1 = 0.000312798 loss)\nI0823 08:09:09.248211 32262 sgd_solver.cpp:166] Iteration 77200, lr = 0.0035\nI0823 08:11:27.950592 32262 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0823 08:12:50.497099 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90524\nI0823 08:12:50.497495 32262 solver.cpp:404]     Test net output #1: loss = 0.391102 (* 1 = 0.391102 loss)\nI0823 08:12:51.832315 32262 solver.cpp:228] Iteration 77300, loss = 0.000435315\nI0823 08:12:51.832360 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:12:51.832376 32262 solver.cpp:244]     Train net output #1: loss = 0.00043542 (* 1 = 0.00043542 loss)\nI0823 08:12:51.909682 32262 sgd_solver.cpp:166] Iteration 77300, lr = 0.0035\nI0823 08:15:10.736755 32262 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0823 08:16:33.300634 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90404\nI0823 08:16:33.301054 32262 solver.cpp:404]     Test net output #1: loss = 0.396166 (* 1 = 0.396166 loss)\nI0823 08:16:34.636143 32262 solver.cpp:228] Iteration 77400, loss = 0.000307294\nI0823 08:16:34.636188 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:16:34.636204 32262 solver.cpp:244]     Train net output #1: loss = 0.000307399 (* 1 = 0.000307399 loss)\nI0823 08:16:34.709980 32262 sgd_solver.cpp:166] Iteration 77400, lr = 0.0035\nI0823 08:18:53.433923 32262 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0823 08:20:15.990047 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90524\nI0823 08:20:15.990433 32262 solver.cpp:404]     Test net output #1: loss = 0.391255 (* 1 = 0.391255 loss)\nI0823 08:20:17.325318 32262 solver.cpp:228] Iteration 77500, loss = 0.000384218\nI0823 08:20:17.325362 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:20:17.325379 32262 solver.cpp:244]     Train net output #1: loss = 0.000384323 (* 1 = 0.000384323 loss)\nI0823 08:20:17.401559 32262 sgd_solver.cpp:166] Iteration 77500, lr = 0.0035\nI0823 08:22:36.108445 32262 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0823 08:23:58.662554 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90424\nI0823 08:23:58.662963 32262 solver.cpp:404]     Test net output #1: loss = 0.396182 (* 1 = 0.396182 loss)\nI0823 08:23:59.995932 32262 solver.cpp:228] Iteration 77600, loss = 0.000333439\nI0823 08:23:59.995976 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:23:59.995993 32262 solver.cpp:244]     Train net output #1: loss = 0.000333543 (* 1 = 0.000333543 loss)\nI0823 08:24:00.070101 32262 sgd_solver.cpp:166] Iteration 77600, lr = 0.0035\nI0823 08:26:18.919705 32262 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0823 08:27:41.473762 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90524\nI0823 08:27:41.474134 32262 solver.cpp:404]     Test net output #1: loss = 0.391291 (* 1 = 0.391291 loss)\nI0823 08:27:42.808761 32262 solver.cpp:228] Iteration 77700, loss = 0.00037559\nI0823 08:27:42.808804 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:27:42.808821 32262 solver.cpp:244]     Train net output #1: loss = 0.000375695 (* 1 = 0.000375695 loss)\nI0823 08:27:42.886970 32262 sgd_solver.cpp:166] Iteration 77700, lr = 0.0035\nI0823 08:30:01.725569 32262 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0823 08:31:24.274369 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90428\nI0823 08:31:24.274734 32262 solver.cpp:404]     Test net output #1: loss = 0.39623 (* 1 = 0.39623 loss)\nI0823 08:31:25.608851 32262 solver.cpp:228] Iteration 77800, loss = 0.000265229\nI0823 08:31:25.608896 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:31:25.608911 32262 solver.cpp:244]     Train net output #1: loss = 0.000265334 (* 1 = 0.000265334 loss)\nI0823 08:31:25.688053 32262 sgd_solver.cpp:166] Iteration 77800, lr = 0.0035\nI0823 08:33:44.449363 32262 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0823 08:35:06.995790 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90524\nI0823 08:35:06.996191 32262 solver.cpp:404]     Test net output #1: loss = 0.391397 (* 1 = 0.391397 loss)\nI0823 08:35:08.330277 32262 solver.cpp:228] Iteration 77900, loss = 0.000428581\nI0823 08:35:08.330320 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:35:08.330338 32262 solver.cpp:244]     Train net output #1: loss = 0.000428686 (* 1 = 0.000428686 loss)\nI0823 08:35:08.404403 32262 sgd_solver.cpp:166] Iteration 77900, lr = 0.0035\nI0823 08:37:27.246209 32262 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0823 08:38:49.798288 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90436\nI0823 08:38:49.798768 32262 solver.cpp:404]     Test net output #1: loss = 0.396336 (* 1 = 0.396336 loss)\nI0823 08:38:51.131574 32262 solver.cpp:228] Iteration 78000, loss = 0.000358819\nI0823 08:38:51.131616 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:38:51.131633 32262 solver.cpp:244]     Train net output #1: loss = 0.000358923 (* 1 = 0.000358923 loss)\nI0823 08:38:51.204890 32262 sgd_solver.cpp:166] Iteration 78000, lr = 0.0035\nI0823 08:41:10.101513 32262 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0823 08:42:32.679236 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0823 08:42:32.679641 32262 solver.cpp:404]     Test net output #1: loss = 0.391475 (* 1 = 0.391475 loss)\nI0823 08:42:34.013483 32262 solver.cpp:228] Iteration 78100, loss = 0.000383875\nI0823 08:42:34.013530 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:42:34.013552 32262 solver.cpp:244]     Train net output #1: loss = 0.00038398 (* 1 = 0.00038398 loss)\nI0823 08:42:34.091812 32262 sgd_solver.cpp:166] Iteration 78100, lr = 0.0035\nI0823 08:44:52.842337 32262 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0823 08:46:15.411659 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90412\nI0823 08:46:15.412067 32262 solver.cpp:404]     Test net output #1: loss = 0.396485 (* 1 = 0.396485 loss)\nI0823 08:46:16.746388 32262 solver.cpp:228] Iteration 78200, loss = 0.000247056\nI0823 08:46:16.746435 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:46:16.746457 32262 solver.cpp:244]     Train net output #1: loss = 0.00024716 (* 1 = 0.00024716 loss)\nI0823 08:46:16.824945 32262 sgd_solver.cpp:166] Iteration 78200, lr = 0.0035\nI0823 08:48:35.518738 32262 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0823 08:49:58.079051 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0823 08:49:58.079427 32262 solver.cpp:404]     Test net output #1: loss = 0.391646 (* 1 = 0.391646 loss)\nI0823 08:49:59.413405 32262 solver.cpp:228] Iteration 78300, loss = 0.000355378\nI0823 08:49:59.413451 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:49:59.413475 32262 solver.cpp:244]     Train net output #1: loss = 0.000355483 (* 1 = 0.000355483 loss)\nI0823 08:49:59.489485 32262 sgd_solver.cpp:166] Iteration 78300, lr = 0.0035\nI0823 08:52:18.305043 32262 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0823 08:53:40.861558 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90448\nI0823 08:53:40.861943 32262 solver.cpp:404]     Test net output #1: loss = 0.396606 (* 1 = 0.396606 loss)\nI0823 08:53:42.194717 32262 solver.cpp:228] Iteration 78400, loss = 0.000314874\nI0823 08:53:42.194764 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:53:42.194787 32262 solver.cpp:244]     Train net output #1: loss = 0.000314979 (* 1 = 0.000314979 loss)\nI0823 08:53:42.269443 32262 sgd_solver.cpp:166] Iteration 78400, lr = 0.0035\nI0823 08:56:01.108044 32262 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0823 08:57:23.654654 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0823 08:57:23.655074 32262 solver.cpp:404]     Test net output #1: loss = 0.391812 (* 1 = 0.391812 loss)\nI0823 08:57:24.988709 32262 solver.cpp:228] Iteration 78500, loss = 0.000377972\nI0823 08:57:24.988755 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 08:57:24.988780 32262 solver.cpp:244]     Train net output #1: loss = 0.000378077 (* 1 = 0.000378077 loss)\nI0823 08:57:25.062451 32262 sgd_solver.cpp:166] Iteration 78500, lr = 0.0035\nI0823 08:59:43.957101 32262 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0823 09:01:06.513743 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90404\nI0823 09:01:06.514123 32262 solver.cpp:404]     Test net output #1: loss = 0.396798 (* 1 = 0.396798 loss)\nI0823 09:01:07.848629 32262 solver.cpp:228] Iteration 78600, loss = 0.000309444\nI0823 09:01:07.848675 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:01:07.848697 32262 solver.cpp:244]     Train net output #1: loss = 0.000309548 (* 1 = 0.000309548 loss)\nI0823 09:01:07.925483 32262 sgd_solver.cpp:166] Iteration 78600, lr = 0.0035\nI0823 09:03:26.793450 32262 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0823 09:04:49.360460 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0823 09:04:49.360843 32262 solver.cpp:404]     Test net output #1: loss = 0.391955 (* 1 = 0.391955 loss)\nI0823 09:04:50.695719 32262 solver.cpp:228] Iteration 78700, loss = 0.000390884\nI0823 09:04:50.695766 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:04:50.695788 32262 solver.cpp:244]     Train net output #1: loss = 0.000390989 (* 1 = 0.000390989 loss)\nI0823 09:04:50.773607 32262 sgd_solver.cpp:166] Iteration 78700, lr = 0.0035\nI0823 09:07:09.559325 32262 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0823 09:08:32.146078 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90412\nI0823 09:08:32.146488 32262 solver.cpp:404]     Test net output #1: loss = 0.396843 (* 1 = 0.396843 loss)\nI0823 09:08:33.479679 32262 solver.cpp:228] Iteration 78800, loss = 0.000291728\nI0823 09:08:33.479723 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:08:33.479746 32262 solver.cpp:244]     Train net output #1: loss = 0.000291833 (* 1 = 0.000291833 loss)\nI0823 09:08:33.558733 32262 sgd_solver.cpp:166] Iteration 78800, lr = 0.0035\nI0823 09:10:52.362560 32262 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0823 09:12:14.927754 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90536\nI0823 09:12:14.928148 32262 solver.cpp:404]     Test net output #1: loss = 0.392006 (* 1 = 0.392006 loss)\nI0823 09:12:16.262624 32262 solver.cpp:228] Iteration 78900, loss = 0.000412827\nI0823 09:12:16.262670 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:12:16.262692 32262 solver.cpp:244]     Train net output #1: loss = 0.000412932 (* 1 = 0.000412932 loss)\nI0823 09:12:16.338786 32262 sgd_solver.cpp:166] Iteration 78900, lr = 0.0035\nI0823 09:14:35.175529 32262 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0823 09:15:57.759902 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90412\nI0823 09:15:57.760301 32262 solver.cpp:404]     Test net output #1: loss = 0.396973 (* 1 = 0.396973 loss)\nI0823 09:15:59.093219 32262 solver.cpp:228] Iteration 79000, loss = 0.00028702\nI0823 09:15:59.093261 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:15:59.093278 32262 solver.cpp:244]     Train net output #1: loss = 0.000287125 (* 1 = 0.000287125 loss)\nI0823 09:15:59.168967 32262 sgd_solver.cpp:166] Iteration 79000, lr = 0.0035\nI0823 09:18:17.973204 32262 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0823 09:19:40.552562 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90528\nI0823 09:19:40.552933 32262 solver.cpp:404]     Test net output #1: loss = 0.39208 (* 1 = 0.39208 loss)\nI0823 09:19:41.885900 32262 solver.cpp:228] Iteration 79100, loss = 0.000432607\nI0823 09:19:41.885938 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:19:41.885954 32262 solver.cpp:244]     Train net output #1: loss = 0.000432712 (* 1 = 0.000432712 loss)\nI0823 09:19:41.960997 32262 sgd_solver.cpp:166] Iteration 79100, lr = 0.0035\nI0823 09:22:00.824246 32262 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0823 09:23:23.351547 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90424\nI0823 09:23:23.351905 32262 solver.cpp:404]     Test net output #1: loss = 0.397017 (* 1 = 0.397017 loss)\nI0823 09:23:24.684937 32262 solver.cpp:228] Iteration 79200, loss = 0.000346453\nI0823 09:23:24.684976 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:23:24.684991 32262 solver.cpp:244]     Train net output #1: loss = 0.000346558 (* 1 = 0.000346558 loss)\nI0823 09:23:24.770540 32262 sgd_solver.cpp:166] Iteration 79200, lr = 0.0035\nI0823 09:25:43.226296 32262 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0823 09:27:05.346448 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90528\nI0823 09:27:05.346742 32262 solver.cpp:404]     Test net output #1: loss = 0.392168 (* 1 = 0.392168 loss)\nI0823 09:27:06.677136 32262 solver.cpp:228] Iteration 79300, loss = 0.000385717\nI0823 09:27:06.677175 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:27:06.677191 32262 solver.cpp:244]     Train net output #1: loss = 0.000385822 (* 1 = 0.000385822 loss)\nI0823 09:27:06.749953 32262 sgd_solver.cpp:166] Iteration 79300, lr = 0.0035\nI0823 09:29:25.015466 32262 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0823 09:30:47.501547 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90412\nI0823 09:30:47.501863 32262 solver.cpp:404]     Test net output #1: loss = 0.397141 (* 1 = 0.397141 loss)\nI0823 09:30:48.832226 32262 solver.cpp:228] Iteration 79400, loss = 0.000295088\nI0823 09:30:48.832268 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:30:48.832283 32262 solver.cpp:244]     Train net output #1: loss = 0.000295193 (* 1 = 0.000295193 loss)\nI0823 09:30:48.907697 32262 sgd_solver.cpp:166] Iteration 79400, lr = 0.0035\nI0823 09:33:07.149555 32262 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0823 09:34:29.627645 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90528\nI0823 09:34:29.627919 32262 solver.cpp:404]     Test net output #1: loss = 0.392274 (* 1 = 0.392274 loss)\nI0823 09:34:30.959233 32262 solver.cpp:228] Iteration 79500, loss = 0.000389795\nI0823 09:34:30.959277 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:34:30.959293 32262 solver.cpp:244]     Train net output #1: loss = 0.0003899 (* 1 = 0.0003899 loss)\nI0823 09:34:31.031003 32262 sgd_solver.cpp:166] Iteration 79500, lr = 0.0035\nI0823 09:36:49.319800 32262 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0823 09:38:11.455919 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90412\nI0823 09:38:11.456204 32262 solver.cpp:404]     Test net output #1: loss = 0.397235 (* 1 = 0.397235 loss)\nI0823 09:38:12.787689 32262 solver.cpp:228] Iteration 79600, loss = 0.000303637\nI0823 09:38:12.787734 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:38:12.787750 32262 solver.cpp:244]     Train net output #1: loss = 0.000303742 (* 1 = 0.000303742 loss)\nI0823 09:38:12.860205 32262 sgd_solver.cpp:166] Iteration 79600, lr = 0.0035\nI0823 09:40:31.125996 32262 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0823 09:41:53.275562 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90528\nI0823 09:41:53.275830 32262 solver.cpp:404]     Test net output #1: loss = 0.392392 (* 1 = 0.392392 loss)\nI0823 09:41:54.606004 32262 solver.cpp:228] Iteration 79700, loss = 0.000416163\nI0823 09:41:54.606055 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:41:54.606071 32262 solver.cpp:244]     Train net output #1: loss = 0.000416268 (* 1 = 0.000416268 loss)\nI0823 09:41:54.684777 32262 sgd_solver.cpp:166] Iteration 79700, lr = 0.0035\nI0823 09:44:12.961942 32262 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0823 09:45:35.210995 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90432\nI0823 09:45:35.211303 32262 solver.cpp:404]     Test net output #1: loss = 0.397323 (* 1 = 0.397323 loss)\nI0823 09:45:36.541766 32262 solver.cpp:228] Iteration 79800, loss = 0.000349954\nI0823 09:45:36.541812 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:45:36.541828 32262 solver.cpp:244]     Train net output #1: loss = 0.000350059 (* 1 = 0.000350059 loss)\nI0823 09:45:36.610890 32262 sgd_solver.cpp:166] Iteration 79800, lr = 0.0035\nI0823 09:47:54.827380 32262 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0823 09:49:17.062922 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90528\nI0823 09:49:17.063195 32262 solver.cpp:404]     Test net output #1: loss = 0.392449 (* 1 = 0.392449 loss)\nI0823 09:49:18.394367 32262 solver.cpp:228] Iteration 79900, loss = 0.000366788\nI0823 09:49:18.394412 32262 solver.cpp:244]     Train net output #0: accuracy = 1\nI0823 09:49:18.394428 32262 solver.cpp:244]     Train net output #1: loss = 0.000366893 (* 1 = 0.000366893 loss)\nI0823 09:49:18.466365 32262 sgd_solver.cpp:166] Iteration 79900, lr = 0.0035\nI0823 09:51:36.725575 32262 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/lr35Tr40kTab1_iter_80000.caffemodel\nI0823 09:51:36.954550 32262 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/lr35Tr40kTab1_iter_80000.solverstate\nI0823 09:51:37.407655 32262 solver.cpp:317] Iteration 80000, loss = 0.000288752\nI0823 09:51:37.407709 32262 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0823 09:52:59.854486 32262 solver.cpp:404]     Test net output #0: accuracy = 0.90432\nI0823 09:52:59.854847 32262 solver.cpp:404]     Test net output #1: loss = 0.397277 (* 1 = 0.397277 loss)\nI0823 09:52:59.854861 32262 solver.cpp:322] Optimization Done.\nI0823 09:53:05.191905 32262 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lrRange3SS520kClip10Fig12a",
    "content": "I1206 09:01:22.534219 30052 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1206 09:01:22.537324 30052 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1206 09:01:22.538789 30052 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1206 09:01:22.540010 30052 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1206 09:01:22.541224 30052 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1206 09:01:22.542453 30052 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1206 09:01:22.543678 30052 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1206 09:01:22.544915 30052 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1206 09:01:22.546146 30052 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1206 09:01:22.971736 30052 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/Fig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nclip_gradients: 10\nmax_lr: 3\nI1206 09:01:38.227269 30052 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1206 09:01:38.235813 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:38.235882 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:38.236769 30052 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1206 09:01:38.238394 30052 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-ResNeXt\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stri\nI1206 09:01:38.240054 30052 layer_factory.hpp:77] Creating layer dataLayer\nI1206 09:01:38.245827 30052 net.cpp:100] Creating Layer dataLayer\nI1206 09:01:38.245908 30052 net.cpp:408] dataLayer -> data_top\nI1206 09:01:38.246134 30052 net.cpp:408] dataLayer -> label\nI1206 09:01:38.246269 30052 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1206 09:01:47.532135 30061 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1206 09:01:47.552681 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:47.558512 30052 net.cpp:150] Setting up dataLayer\nI1206 09:01:47.558599 30052 net.cpp:157] Top shape: 85 3 32 32 (261120)\nI1206 09:01:47.558621 30052 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:47.558632 30052 net.cpp:165] Memory required for data: 1044820\nI1206 09:01:47.558660 30052 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1206 09:01:47.558686 30052 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1206 09:01:47.558702 30052 net.cpp:434] label_dataLayer_1_split <- label\nI1206 09:01:47.558734 30052 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1206 09:01:47.558764 30052 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1206 09:01:47.558881 30052 net.cpp:150] Setting up label_dataLayer_1_split\nI1206 09:01:47.558908 30052 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:47.558923 30052 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:47.558934 30052 net.cpp:165] Memory required for data: 1045500\nI1206 09:01:47.558954 30052 layer_factory.hpp:77] Creating layer pre_conv\nI1206 09:01:47.559047 30052 net.cpp:100] Creating Layer pre_conv\nI1206 09:01:47.559063 30052 net.cpp:434] pre_conv <- data_top\nI1206 09:01:47.559087 30052 net.cpp:408] pre_conv -> pre_conv_top\nI1206 09:01:47.560969 30052 net.cpp:150] Setting up pre_conv\nI1206 09:01:47.560997 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.561007 30052 net.cpp:165] Memory required for data: 6616060\nI1206 09:01:47.561096 30052 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1206 09:01:47.561120 30052 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1206 09:01:47.561131 30052 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1206 09:01:47.561153 30052 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1206 09:01:47.561173 30052 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1206 09:01:47.561254 30062 blocking_queue.cpp:50] Waiting for data\nI1206 09:01:47.561257 30052 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1206 09:01:47.561287 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.561295 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.561300 30052 net.cpp:165] Memory required for data: 17757180\nI1206 09:01:47.561306 30052 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1206 09:01:47.561393 30052 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1206 09:01:47.561409 30052 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1206 09:01:47.561424 30052 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1206 09:01:47.561908 30052 net.cpp:150] Setting up L1_b1_brc1_bn\nI1206 09:01:47.561929 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.561949 30052 net.cpp:165] Memory required for data: 23327740\nI1206 09:01:47.561981 30052 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1206 09:01:47.562053 30052 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1206 09:01:47.562069 30052 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1206 09:01:47.562085 30052 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1206 09:01:47.562105 30052 net.cpp:150] Setting up L1_b1_brc1_relu\nI1206 09:01:47.562121 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.562131 30052 net.cpp:165] Memory required for data: 28898300\nI1206 09:01:47.562141 30052 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1206 09:01:47.562168 30052 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1206 09:01:47.562181 30052 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1206 09:01:47.562203 30052 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1206 09:01:47.562537 30052 net.cpp:150] Setting up L1_b1_brc1_conv\nI1206 09:01:47.562559 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.562568 30052 net.cpp:165] Memory required for data: 40039420\nI1206 09:01:47.562587 30052 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1206 09:01:47.562604 30052 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1206 09:01:47.562615 30052 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1206 09:01:47.562635 30052 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1206 09:01:47.562922 30052 net.cpp:150] Setting up L1_b1_brc2_bn\nI1206 09:01:47.562948 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.562959 30052 net.cpp:165] Memory required for data: 51180540\nI1206 09:01:47.562988 30052 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1206 09:01:47.563014 30052 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1206 09:01:47.563027 30052 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1206 09:01:47.563043 30052 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1206 09:01:47.563062 30052 net.cpp:150] Setting up L1_b1_brc2_relu\nI1206 09:01:47.563088 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.563098 30052 net.cpp:165] Memory required for data: 62321660\nI1206 09:01:47.563109 30052 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1206 09:01:47.563132 30052 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1206 09:01:47.563144 30052 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1206 09:01:47.563166 30052 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1206 09:01:47.563475 30052 net.cpp:150] Setting up L1_b1_brc2_conv\nI1206 09:01:47.563495 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.563506 30052 net.cpp:165] Memory required for data: 73462780\nI1206 09:01:47.563524 30052 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1206 09:01:47.563542 30052 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1206 09:01:47.563552 30052 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1206 09:01:47.563572 30052 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1206 09:01:47.563830 30052 net.cpp:150] Setting up L1_b1_brc3_bn\nI1206 09:01:47.563856 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.563868 30052 net.cpp:165] Memory required for data: 84603900\nI1206 09:01:47.563891 30052 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1206 09:01:47.563907 30052 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1206 09:01:47.563918 30052 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1206 09:01:47.563935 30052 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1206 09:01:47.563973 30052 net.cpp:150] Setting up L1_b1_brc3_relu\nI1206 09:01:47.563989 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.563999 30052 net.cpp:165] Memory required for data: 95745020\nI1206 09:01:47.564010 30052 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1206 09:01:47.564031 30052 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1206 09:01:47.564043 30052 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1206 09:01:47.564066 30052 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1206 09:01:47.564415 30052 net.cpp:150] Setting up L1_b1_brc3_conv\nI1206 09:01:47.564435 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.564445 30052 net.cpp:165] Memory required for data: 118027260\nI1206 09:01:47.564468 30052 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1206 09:01:47.564504 30052 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1206 09:01:47.564518 30052 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1206 09:01:47.564540 30052 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1206 09:01:47.564863 30052 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1206 09:01:47.564882 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.564893 30052 net.cpp:165] Memory required for data: 140309500\nI1206 09:01:47.564911 30052 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1206 09:01:47.564993 30052 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1206 09:01:47.565008 30052 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1206 09:01:47.565022 30052 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1206 09:01:47.565038 30052 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1206 09:01:47.565138 30052 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1206 09:01:47.565160 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.565170 30052 net.cpp:165] Memory required for data: 162591740\nI1206 09:01:47.565179 30052 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:47.565196 30052 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:47.565207 30052 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1206 09:01:47.565227 30052 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:01:47.565248 30052 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:01:47.565340 30052 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:47.565359 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.565374 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.565383 30052 net.cpp:165] Memory required for data: 207156220\nI1206 09:01:47.565394 30052 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1206 09:01:47.565418 30052 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1206 09:01:47.565431 30052 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:01:47.565448 30052 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1206 09:01:47.565701 30052 net.cpp:150] Setting up L1_b2_brc1_bn\nI1206 09:01:47.565721 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.565731 30052 net.cpp:165] Memory required for data: 229438460\nI1206 09:01:47.565753 30052 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1206 09:01:47.565776 30052 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1206 09:01:47.565789 30052 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1206 09:01:47.565804 30052 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1206 09:01:47.565824 30052 net.cpp:150] Setting up L1_b2_brc1_relu\nI1206 09:01:47.565837 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.565848 30052 net.cpp:165] Memory required for data: 251720700\nI1206 09:01:47.565858 30052 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1206 09:01:47.565884 30052 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1206 09:01:47.565898 30052 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1206 09:01:47.565919 30052 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1206 09:01:47.566263 30052 net.cpp:150] Setting up L1_b2_brc1_conv\nI1206 09:01:47.566283 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.566293 30052 net.cpp:165] Memory required for data: 262861820\nI1206 09:01:47.566311 30052 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1206 09:01:47.566328 30052 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1206 09:01:47.566339 30052 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1206 09:01:47.566354 30052 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1206 09:01:47.566628 30052 net.cpp:150] Setting up L1_b2_brc2_bn\nI1206 09:01:47.566648 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.566658 30052 net.cpp:165] Memory required for data: 274002940\nI1206 09:01:47.566680 30052 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1206 09:01:47.566696 30052 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1206 09:01:47.566707 30052 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1206 09:01:47.566726 30052 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1206 09:01:47.566746 30052 net.cpp:150] Setting up L1_b2_brc2_relu\nI1206 09:01:47.566761 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.566771 30052 net.cpp:165] Memory required for data: 285144060\nI1206 09:01:47.566781 30052 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1206 09:01:47.566807 30052 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1206 09:01:47.566821 30052 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1206 09:01:47.566838 30052 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1206 09:01:47.567160 30052 net.cpp:150] Setting up L1_b2_brc2_conv\nI1206 09:01:47.567180 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.567190 30052 net.cpp:165] Memory required for data: 296285180\nI1206 09:01:47.567209 30052 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1206 09:01:47.567230 30052 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1206 09:01:47.567241 30052 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1206 09:01:47.567258 30052 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1206 09:01:47.567525 30052 net.cpp:150] Setting up L1_b2_brc3_bn\nI1206 09:01:47.567544 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.567553 30052 net.cpp:165] Memory required for data: 307426300\nI1206 09:01:47.567575 30052 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1206 09:01:47.567602 30052 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1206 09:01:47.567615 30052 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1206 09:01:47.567633 30052 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1206 09:01:47.567653 30052 net.cpp:150] Setting up L1_b2_brc3_relu\nI1206 09:01:47.567668 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.567678 30052 net.cpp:165] Memory required for data: 318567420\nI1206 09:01:47.567688 30052 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1206 09:01:47.567714 30052 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1206 09:01:47.567726 30052 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1206 09:01:47.567745 30052 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1206 09:01:47.568090 30052 net.cpp:150] Setting up L1_b2_brc3_conv\nI1206 09:01:47.568110 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.568120 30052 net.cpp:165] Memory required for data: 340849660\nI1206 09:01:47.568151 30052 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1206 09:01:47.568176 30052 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1206 09:01:47.568190 30052 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1206 09:01:47.568203 30052 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:01:47.568219 30052 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1206 09:01:47.568271 30052 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1206 09:01:47.568305 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.568316 30052 net.cpp:165] Memory required for data: 363131900\nI1206 09:01:47.568328 30052 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:47.568342 30052 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:47.568353 30052 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1206 09:01:47.568369 30052 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:01:47.568392 30052 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:01:47.568470 30052 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:47.568490 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.568502 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.568512 30052 net.cpp:165] Memory required for data: 407696380\nI1206 09:01:47.568523 30052 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1206 09:01:47.568543 30052 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1206 09:01:47.568555 30052 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:01:47.568572 30052 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1206 09:01:47.568842 30052 net.cpp:150] Setting up L1_b3_brc1_bn\nI1206 09:01:47.568861 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.568871 30052 net.cpp:165] Memory required for data: 429978620\nI1206 09:01:47.568892 30052 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1206 09:01:47.568908 30052 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1206 09:01:47.568919 30052 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1206 09:01:47.568933 30052 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1206 09:01:47.568961 30052 net.cpp:150] Setting up L1_b3_brc1_relu\nI1206 09:01:47.568976 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.568986 30052 net.cpp:165] Memory required for data: 452260860\nI1206 09:01:47.568996 30052 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1206 09:01:47.569023 30052 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1206 09:01:47.569036 30052 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1206 09:01:47.569058 30052 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1206 09:01:47.569416 30052 net.cpp:150] Setting up L1_b3_brc1_conv\nI1206 09:01:47.569443 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.569454 30052 net.cpp:165] Memory required for data: 463401980\nI1206 09:01:47.569473 30052 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1206 09:01:47.569494 30052 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1206 09:01:47.569505 30052 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1206 09:01:47.569522 30052 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1206 09:01:47.569784 30052 net.cpp:150] Setting up L1_b3_brc2_bn\nI1206 09:01:47.569803 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.569813 30052 net.cpp:165] Memory required for data: 474543100\nI1206 09:01:47.569835 30052 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1206 09:01:47.569851 30052 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1206 09:01:47.569864 30052 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1206 09:01:47.569877 30052 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1206 09:01:47.569896 30052 net.cpp:150] Setting up L1_b3_brc2_relu\nI1206 09:01:47.569911 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.569921 30052 net.cpp:165] Memory required for data: 485684220\nI1206 09:01:47.569932 30052 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1206 09:01:47.569969 30052 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1206 09:01:47.569983 30052 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1206 09:01:47.570008 30052 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1206 09:01:47.570319 30052 net.cpp:150] Setting up L1_b3_brc2_conv\nI1206 09:01:47.570338 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.570348 30052 net.cpp:165] Memory required for data: 496825340\nI1206 09:01:47.570366 30052 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1206 09:01:47.570387 30052 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1206 09:01:47.570399 30052 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1206 09:01:47.570421 30052 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1206 09:01:47.570682 30052 net.cpp:150] Setting up L1_b3_brc3_bn\nI1206 09:01:47.570700 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.570710 30052 net.cpp:165] Memory required for data: 507966460\nI1206 09:01:47.570732 30052 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1206 09:01:47.570749 30052 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1206 09:01:47.570760 30052 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1206 09:01:47.570773 30052 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1206 09:01:47.570793 30052 net.cpp:150] Setting up L1_b3_brc3_relu\nI1206 09:01:47.570808 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.570817 30052 net.cpp:165] Memory required for data: 519107580\nI1206 09:01:47.570828 30052 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1206 09:01:47.570853 30052 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1206 09:01:47.570866 30052 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1206 09:01:47.570893 30052 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1206 09:01:47.571249 30052 net.cpp:150] Setting up L1_b3_brc3_conv\nI1206 09:01:47.571269 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.571279 30052 net.cpp:165] Memory required for data: 541389820\nI1206 09:01:47.571296 30052 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1206 09:01:47.571317 30052 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1206 09:01:47.571329 30052 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1206 09:01:47.571344 30052 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:01:47.571360 30052 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1206 09:01:47.571421 30052 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1206 09:01:47.571440 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.571450 30052 net.cpp:165] Memory required for data: 563672060\nI1206 09:01:47.571460 30052 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:47.571490 30052 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:47.571502 30052 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1206 09:01:47.571518 30052 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:01:47.571543 30052 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:01:47.571622 30052 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:47.571641 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.571655 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.571666 30052 net.cpp:165] Memory required for data: 608236540\nI1206 09:01:47.571676 30052 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1206 09:01:47.571696 30052 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1206 09:01:47.571709 30052 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:01:47.571725 30052 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1206 09:01:47.571992 30052 net.cpp:150] Setting up L1_b4_brc1_bn\nI1206 09:01:47.572013 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.572022 30052 net.cpp:165] Memory required for data: 630518780\nI1206 09:01:47.572044 30052 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1206 09:01:47.572067 30052 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1206 09:01:47.572078 30052 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1206 09:01:47.572093 30052 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1206 09:01:47.572113 30052 net.cpp:150] Setting up L1_b4_brc1_relu\nI1206 09:01:47.572126 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.572137 30052 net.cpp:165] Memory required for data: 652801020\nI1206 09:01:47.572147 30052 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1206 09:01:47.572180 30052 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1206 09:01:47.572192 30052 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1206 09:01:47.572214 30052 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1206 09:01:47.572566 30052 net.cpp:150] Setting up L1_b4_brc1_conv\nI1206 09:01:47.572587 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.572595 30052 net.cpp:165] Memory required for data: 663942140\nI1206 09:01:47.572614 30052 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1206 09:01:47.572630 30052 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1206 09:01:47.572641 30052 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1206 09:01:47.572657 30052 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1206 09:01:47.572944 30052 net.cpp:150] Setting up L1_b4_brc2_bn\nI1206 09:01:47.572964 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.572974 30052 net.cpp:165] Memory required for data: 675083260\nI1206 09:01:47.572998 30052 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1206 09:01:47.573014 30052 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1206 09:01:47.573024 30052 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1206 09:01:47.573043 30052 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1206 09:01:47.573063 30052 net.cpp:150] Setting up L1_b4_brc2_relu\nI1206 09:01:47.573078 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.573088 30052 net.cpp:165] Memory required for data: 686224380\nI1206 09:01:47.573098 30052 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1206 09:01:47.573125 30052 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1206 09:01:47.573138 30052 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1206 09:01:47.573154 30052 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1206 09:01:47.573464 30052 net.cpp:150] Setting up L1_b4_brc2_conv\nI1206 09:01:47.573483 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.573493 30052 net.cpp:165] Memory required for data: 697365500\nI1206 09:01:47.573513 30052 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1206 09:01:47.573542 30052 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1206 09:01:47.573555 30052 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1206 09:01:47.573572 30052 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1206 09:01:47.573854 30052 net.cpp:150] Setting up L1_b4_brc3_bn\nI1206 09:01:47.573875 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.573884 30052 net.cpp:165] Memory required for data: 708506620\nI1206 09:01:47.573907 30052 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1206 09:01:47.573923 30052 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1206 09:01:47.573941 30052 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1206 09:01:47.573963 30052 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1206 09:01:47.573982 30052 net.cpp:150] Setting up L1_b4_brc3_relu\nI1206 09:01:47.573997 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.574007 30052 net.cpp:165] Memory required for data: 719647740\nI1206 09:01:47.574018 30052 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1206 09:01:47.574041 30052 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1206 09:01:47.574054 30052 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1206 09:01:47.574072 30052 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1206 09:01:47.574419 30052 net.cpp:150] Setting up L1_b4_brc3_conv\nI1206 09:01:47.574440 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.574450 30052 net.cpp:165] Memory required for data: 741929980\nI1206 09:01:47.574467 30052 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1206 09:01:47.574484 30052 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1206 09:01:47.574501 30052 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1206 09:01:47.574515 30052 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:01:47.574532 30052 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1206 09:01:47.574581 30052 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1206 09:01:47.574600 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.574610 30052 net.cpp:165] Memory required for data: 764212220\nI1206 09:01:47.574620 30052 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:47.574642 30052 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:47.574656 30052 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1206 09:01:47.574676 30052 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:01:47.574697 30052 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:01:47.574771 30052 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:47.574795 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.574808 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.574817 30052 net.cpp:165] Memory required for data: 808776700\nI1206 09:01:47.574828 30052 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1206 09:01:47.574843 30052 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1206 09:01:47.574854 30052 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:01:47.574877 30052 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1206 09:01:47.575158 30052 net.cpp:150] Setting up L1_b5_brc1_bn\nI1206 09:01:47.575178 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.575187 30052 net.cpp:165] Memory required for data: 831058940\nI1206 09:01:47.575227 30052 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1206 09:01:47.575248 30052 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1206 09:01:47.575259 30052 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1206 09:01:47.575274 30052 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1206 09:01:47.575294 30052 net.cpp:150] Setting up L1_b5_brc1_relu\nI1206 09:01:47.575309 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.575326 30052 net.cpp:165] Memory required for data: 853341180\nI1206 09:01:47.575337 30052 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1206 09:01:47.575366 30052 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1206 09:01:47.575379 30052 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1206 09:01:47.575397 30052 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1206 09:01:47.575742 30052 net.cpp:150] Setting up L1_b5_brc1_conv\nI1206 09:01:47.575762 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.575772 30052 net.cpp:165] Memory required for data: 864482300\nI1206 09:01:47.575790 30052 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1206 09:01:47.575811 30052 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1206 09:01:47.575822 30052 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1206 09:01:47.575839 30052 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1206 09:01:47.576114 30052 net.cpp:150] Setting up L1_b5_brc2_bn\nI1206 09:01:47.576134 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.576143 30052 net.cpp:165] Memory required for data: 875623420\nI1206 09:01:47.576164 30052 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1206 09:01:47.576181 30052 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1206 09:01:47.576192 30052 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1206 09:01:47.576215 30052 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1206 09:01:47.576234 30052 net.cpp:150] Setting up L1_b5_brc2_relu\nI1206 09:01:47.576249 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.576259 30052 net.cpp:165] Memory required for data: 886764540\nI1206 09:01:47.576269 30052 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1206 09:01:47.576295 30052 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1206 09:01:47.576308 30052 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1206 09:01:47.576326 30052 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1206 09:01:47.576673 30052 net.cpp:150] Setting up L1_b5_brc2_conv\nI1206 09:01:47.576696 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.576706 30052 net.cpp:165] Memory required for data: 897905660\nI1206 09:01:47.576722 30052 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1206 09:01:47.576743 30052 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1206 09:01:47.576753 30052 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1206 09:01:47.576768 30052 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1206 09:01:47.577090 30052 net.cpp:150] Setting up L1_b5_brc3_bn\nI1206 09:01:47.577111 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.577121 30052 net.cpp:165] Memory required for data: 909046780\nI1206 09:01:47.577142 30052 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1206 09:01:47.577159 30052 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1206 09:01:47.577172 30052 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1206 09:01:47.577191 30052 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1206 09:01:47.577213 30052 net.cpp:150] Setting up L1_b5_brc3_relu\nI1206 09:01:47.577226 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.577236 30052 net.cpp:165] Memory required for data: 920187900\nI1206 09:01:47.577247 30052 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1206 09:01:47.577273 30052 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1206 09:01:47.577286 30052 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1206 09:01:47.577306 30052 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1206 09:01:47.577666 30052 net.cpp:150] Setting up L1_b5_brc3_conv\nI1206 09:01:47.577685 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.577695 30052 net.cpp:165] Memory required for data: 942470140\nI1206 09:01:47.577713 30052 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1206 09:01:47.577730 30052 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1206 09:01:47.577744 30052 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1206 09:01:47.577756 30052 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:01:47.577791 30052 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1206 09:01:47.577846 30052 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1206 09:01:47.577863 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.577873 30052 net.cpp:165] Memory required for data: 964752380\nI1206 09:01:47.577884 30052 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:47.577905 30052 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:47.577917 30052 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1206 09:01:47.577946 30052 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:01:47.577970 30052 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:01:47.578049 30052 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:47.578068 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.578081 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.578091 30052 net.cpp:165] Memory required for data: 1009316860\nI1206 09:01:47.578102 30052 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1206 09:01:47.578122 30052 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1206 09:01:47.578135 30052 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:01:47.578152 30052 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1206 09:01:47.578426 30052 net.cpp:150] Setting up L1_b6_brc1_bn\nI1206 09:01:47.578445 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.578455 30052 net.cpp:165] Memory required for data: 1031599100\nI1206 09:01:47.578477 30052 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1206 09:01:47.578498 30052 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1206 09:01:47.578511 30052 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1206 09:01:47.578526 30052 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1206 09:01:47.578546 30052 net.cpp:150] Setting up L1_b6_brc1_relu\nI1206 09:01:47.578560 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.578572 30052 net.cpp:165] Memory required for data: 1053881340\nI1206 09:01:47.578582 30052 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1206 09:01:47.578608 30052 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1206 09:01:47.578620 30052 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1206 09:01:47.578641 30052 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1206 09:01:47.579017 30052 net.cpp:150] Setting up L1_b6_brc1_conv\nI1206 09:01:47.579038 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.579048 30052 net.cpp:165] Memory required for data: 1065022460\nI1206 09:01:47.579067 30052 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1206 09:01:47.579088 30052 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1206 09:01:47.579099 30052 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1206 09:01:47.579115 30052 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1206 09:01:47.579391 30052 net.cpp:150] Setting up L1_b6_brc2_bn\nI1206 09:01:47.579411 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.579421 30052 net.cpp:165] Memory required for data: 1076163580\nI1206 09:01:47.579443 30052 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1206 09:01:47.579474 30052 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1206 09:01:47.579488 30052 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1206 09:01:47.579504 30052 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1206 09:01:47.579524 30052 net.cpp:150] Setting up L1_b6_brc2_relu\nI1206 09:01:47.579540 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.579550 30052 net.cpp:165] Memory required for data: 1087304700\nI1206 09:01:47.579560 30052 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1206 09:01:47.579589 30052 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1206 09:01:47.579602 30052 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1206 09:01:47.579625 30052 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1206 09:01:47.579970 30052 net.cpp:150] Setting up L1_b6_brc2_conv\nI1206 09:01:47.579990 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.580000 30052 net.cpp:165] Memory required for data: 1098445820\nI1206 09:01:47.580019 30052 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1206 09:01:47.580040 30052 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1206 09:01:47.580052 30052 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1206 09:01:47.580070 30052 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1206 09:01:47.580339 30052 net.cpp:150] Setting up L1_b6_brc3_bn\nI1206 09:01:47.580363 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.580373 30052 net.cpp:165] Memory required for data: 1109586940\nI1206 09:01:47.580395 30052 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1206 09:01:47.580410 30052 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1206 09:01:47.580421 30052 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1206 09:01:47.580436 30052 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1206 09:01:47.580456 30052 net.cpp:150] Setting up L1_b6_brc3_relu\nI1206 09:01:47.580471 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.580482 30052 net.cpp:165] Memory required for data: 1120728060\nI1206 09:01:47.580492 30052 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1206 09:01:47.580516 30052 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1206 09:01:47.580530 30052 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1206 09:01:47.580548 30052 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1206 09:01:47.580899 30052 net.cpp:150] Setting up L1_b6_brc3_conv\nI1206 09:01:47.580919 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.580929 30052 net.cpp:165] Memory required for data: 1143010300\nI1206 09:01:47.580955 30052 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1206 09:01:47.580973 30052 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1206 09:01:47.580986 30052 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1206 09:01:47.580999 30052 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:01:47.581024 30052 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1206 09:01:47.581077 30052 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1206 09:01:47.581101 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.581111 30052 net.cpp:165] Memory required for data: 1165292540\nI1206 09:01:47.581123 30052 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:47.581137 30052 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:47.581149 30052 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1206 09:01:47.581166 30052 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:01:47.581185 30052 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:01:47.581270 30052 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:47.581292 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.581305 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.581316 30052 net.cpp:165] Memory required for data: 1209857020\nI1206 09:01:47.581327 30052 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1206 09:01:47.581347 30052 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1206 09:01:47.581360 30052 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:01:47.581377 30052 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1206 09:01:47.581653 30052 net.cpp:150] Setting up L2_b1_brc1_bn\nI1206 09:01:47.581673 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.581701 30052 net.cpp:165] Memory required for data: 1232139260\nI1206 09:01:47.581725 30052 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1206 09:01:47.581743 30052 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1206 09:01:47.581754 30052 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1206 09:01:47.581768 30052 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1206 09:01:47.581789 30052 net.cpp:150] Setting up L2_b1_brc1_relu\nI1206 09:01:47.581804 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.581814 30052 net.cpp:165] Memory required for data: 1254421500\nI1206 09:01:47.581825 30052 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1206 09:01:47.581849 30052 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1206 09:01:47.581862 30052 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1206 09:01:47.581884 30052 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1206 09:01:47.582289 30052 net.cpp:150] Setting up L2_b1_brc1_conv\nI1206 09:01:47.582310 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.582320 30052 net.cpp:165] Memory required for data: 1259992060\nI1206 09:01:47.582340 30052 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1206 09:01:47.582360 30052 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1206 09:01:47.582372 30052 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1206 09:01:47.582394 30052 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1206 09:01:47.582665 30052 net.cpp:150] Setting up L2_b1_brc2_bn\nI1206 09:01:47.582685 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.582695 30052 net.cpp:165] Memory required for data: 1265562620\nI1206 09:01:47.582717 30052 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1206 09:01:47.582733 30052 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1206 09:01:47.582746 30052 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1206 09:01:47.582764 30052 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1206 09:01:47.582785 30052 net.cpp:150] Setting up L2_b1_brc2_relu\nI1206 09:01:47.582800 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.582810 30052 net.cpp:165] Memory required for data: 1271133180\nI1206 09:01:47.582820 30052 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1206 09:01:47.582845 30052 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1206 09:01:47.582859 30052 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1206 09:01:47.582876 30052 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1206 09:01:47.583223 30052 net.cpp:150] Setting up L2_b1_brc2_conv\nI1206 09:01:47.583243 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.583253 30052 net.cpp:165] Memory required for data: 1276703740\nI1206 09:01:47.583271 30052 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1206 09:01:47.583292 30052 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1206 09:01:47.583305 30052 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1206 09:01:47.583323 30052 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1206 09:01:47.583600 30052 net.cpp:150] Setting up L2_b1_brc3_bn\nI1206 09:01:47.583618 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.583628 30052 net.cpp:165] Memory required for data: 1282274300\nI1206 09:01:47.583652 30052 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1206 09:01:47.583667 30052 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1206 09:01:47.583679 30052 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1206 09:01:47.583694 30052 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1206 09:01:47.583714 30052 net.cpp:150] Setting up L2_b1_brc3_relu\nI1206 09:01:47.583729 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.583739 30052 net.cpp:165] Memory required for data: 1287844860\nI1206 09:01:47.583750 30052 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1206 09:01:47.583773 30052 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1206 09:01:47.583787 30052 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1206 09:01:47.583811 30052 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1206 09:01:47.585338 30052 net.cpp:150] Setting up L2_b1_brc3_conv\nI1206 09:01:47.585361 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.585371 30052 net.cpp:165] Memory required for data: 1298985980\nI1206 09:01:47.585391 30052 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1206 09:01:47.585423 30052 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1206 09:01:47.585438 30052 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:01:47.585458 30052 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1206 09:01:47.585918 30052 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1206 09:01:47.585943 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.585954 30052 net.cpp:165] Memory required for data: 1310127100\nI1206 09:01:47.585973 30052 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1206 09:01:47.585990 30052 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1206 09:01:47.586002 30052 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1206 09:01:47.586017 30052 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1206 09:01:47.586032 30052 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1206 09:01:47.586083 30052 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1206 09:01:47.586102 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.586113 30052 net.cpp:165] Memory required for data: 1321268220\nI1206 09:01:47.586123 30052 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:47.586138 30052 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:47.586150 30052 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1206 09:01:47.586171 30052 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:01:47.586191 30052 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:01:47.586277 30052 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:47.586297 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.586309 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.586319 30052 net.cpp:165] Memory required for data: 1343550460\nI1206 09:01:47.586330 30052 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1206 09:01:47.586345 30052 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1206 09:01:47.586357 30052 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:01:47.586378 30052 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1206 09:01:47.586637 30052 net.cpp:150] Setting up L2_b2_brc1_bn\nI1206 09:01:47.586657 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.586666 30052 net.cpp:165] Memory required for data: 1354691580\nI1206 09:01:47.586688 30052 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1206 09:01:47.586705 30052 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1206 09:01:47.586719 30052 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1206 09:01:47.586737 30052 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1206 09:01:47.586758 30052 net.cpp:150] Setting up L2_b2_brc1_relu\nI1206 09:01:47.586773 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.586782 30052 net.cpp:165] Memory required for data: 1365832700\nI1206 09:01:47.586793 30052 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1206 09:01:47.586813 30052 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1206 09:01:47.586825 30052 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1206 09:01:47.586843 30052 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1206 09:01:47.587339 30052 net.cpp:150] Setting up L2_b2_brc1_conv\nI1206 09:01:47.587359 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.587369 30052 net.cpp:165] Memory required for data: 1371403260\nI1206 09:01:47.587388 30052 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1206 09:01:47.587419 30052 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1206 09:01:47.587432 30052 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1206 09:01:47.587450 30052 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1206 09:01:47.587733 30052 net.cpp:150] Setting up L2_b2_brc2_bn\nI1206 09:01:47.587752 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.587762 30052 net.cpp:165] Memory required for data: 1376973820\nI1206 09:01:47.587785 30052 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1206 09:01:47.587801 30052 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1206 09:01:47.587813 30052 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1206 09:01:47.587833 30052 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1206 09:01:47.587853 30052 net.cpp:150] Setting up L2_b2_brc2_relu\nI1206 09:01:47.587869 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.587879 30052 net.cpp:165] Memory required for data: 1382544380\nI1206 09:01:47.587889 30052 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1206 09:01:47.587915 30052 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1206 09:01:47.587929 30052 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1206 09:01:47.587954 30052 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1206 09:01:47.588299 30052 net.cpp:150] Setting up L2_b2_brc2_conv\nI1206 09:01:47.588320 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.588328 30052 net.cpp:165] Memory required for data: 1388114940\nI1206 09:01:47.588347 30052 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1206 09:01:47.588374 30052 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1206 09:01:47.588387 30052 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1206 09:01:47.588404 30052 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1206 09:01:47.588671 30052 net.cpp:150] Setting up L2_b2_brc3_bn\nI1206 09:01:47.588690 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.588701 30052 net.cpp:165] Memory required for data: 1393685500\nI1206 09:01:47.588721 30052 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1206 09:01:47.588737 30052 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1206 09:01:47.588749 30052 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1206 09:01:47.588763 30052 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1206 09:01:47.588783 30052 net.cpp:150] Setting up L2_b2_brc3_relu\nI1206 09:01:47.588798 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.588807 30052 net.cpp:165] Memory required for data: 1399256060\nI1206 09:01:47.588817 30052 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1206 09:01:47.588846 30052 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1206 09:01:47.588860 30052 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1206 09:01:47.588883 30052 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1206 09:01:47.589362 30052 net.cpp:150] Setting up L2_b2_brc3_conv\nI1206 09:01:47.589383 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.589392 30052 net.cpp:165] Memory required for data: 1410397180\nI1206 09:01:47.589411 30052 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1206 09:01:47.589432 30052 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1206 09:01:47.589445 30052 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1206 09:01:47.589459 30052 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:01:47.589481 30052 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1206 09:01:47.589527 30052 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1206 09:01:47.589546 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.589556 30052 net.cpp:165] Memory required for data: 1421538300\nI1206 09:01:47.589567 30052 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:47.589582 30052 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:47.589594 30052 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1206 09:01:47.589624 30052 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:01:47.589646 30052 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:01:47.589730 30052 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:47.589756 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.589772 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.589783 30052 net.cpp:165] Memory required for data: 1443820540\nI1206 09:01:47.589793 30052 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1206 09:01:47.589808 30052 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1206 09:01:47.589821 30052 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:01:47.589843 30052 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1206 09:01:47.590121 30052 net.cpp:150] Setting up L2_b3_brc1_bn\nI1206 09:01:47.590140 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.590149 30052 net.cpp:165] Memory required for data: 1454961660\nI1206 09:01:47.590201 30052 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1206 09:01:47.590220 30052 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1206 09:01:47.590232 30052 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1206 09:01:47.590257 30052 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1206 09:01:47.590279 30052 net.cpp:150] Setting up L2_b3_brc1_relu\nI1206 09:01:47.590294 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.590304 30052 net.cpp:165] Memory required for data: 1466102780\nI1206 09:01:47.590314 30052 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1206 09:01:47.590335 30052 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1206 09:01:47.590348 30052 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1206 09:01:47.590371 30052 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1206 09:01:47.590847 30052 net.cpp:150] Setting up L2_b3_brc1_conv\nI1206 09:01:47.590865 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.590875 30052 net.cpp:165] Memory required for data: 1471673340\nI1206 09:01:47.590894 30052 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1206 09:01:47.590911 30052 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1206 09:01:47.590922 30052 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1206 09:01:47.590950 30052 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1206 09:01:47.591223 30052 net.cpp:150] Setting up L2_b3_brc2_bn\nI1206 09:01:47.591243 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.591251 30052 net.cpp:165] Memory required for data: 1477243900\nI1206 09:01:47.591274 30052 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1206 09:01:47.591295 30052 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1206 09:01:47.591308 30052 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1206 09:01:47.591325 30052 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1206 09:01:47.591343 30052 net.cpp:150] Setting up L2_b3_brc2_relu\nI1206 09:01:47.591358 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.591369 30052 net.cpp:165] Memory required for data: 1482814460\nI1206 09:01:47.591379 30052 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1206 09:01:47.591400 30052 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1206 09:01:47.591413 30052 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1206 09:01:47.591434 30052 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1206 09:01:47.591779 30052 net.cpp:150] Setting up L2_b3_brc2_conv\nI1206 09:01:47.591799 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.591809 30052 net.cpp:165] Memory required for data: 1488385020\nI1206 09:01:47.591826 30052 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1206 09:01:47.591843 30052 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1206 09:01:47.591856 30052 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1206 09:01:47.591876 30052 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1206 09:01:47.592173 30052 net.cpp:150] Setting up L2_b3_brc3_bn\nI1206 09:01:47.592193 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.592202 30052 net.cpp:165] Memory required for data: 1493955580\nI1206 09:01:47.592224 30052 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1206 09:01:47.592252 30052 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1206 09:01:47.592265 30052 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1206 09:01:47.592280 30052 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1206 09:01:47.592299 30052 net.cpp:150] Setting up L2_b3_brc3_relu\nI1206 09:01:47.592314 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.592324 30052 net.cpp:165] Memory required for data: 1499526140\nI1206 09:01:47.592334 30052 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1206 09:01:47.592355 30052 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1206 09:01:47.592368 30052 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1206 09:01:47.592389 30052 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1206 09:01:47.592860 30052 net.cpp:150] Setting up L2_b3_brc3_conv\nI1206 09:01:47.592881 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.592890 30052 net.cpp:165] Memory required for data: 1510667260\nI1206 09:01:47.592909 30052 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1206 09:01:47.592926 30052 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1206 09:01:47.592945 30052 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1206 09:01:47.592962 30052 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:01:47.592978 30052 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1206 09:01:47.593024 30052 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1206 09:01:47.593041 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.593052 30052 net.cpp:165] Memory required for data: 1521808380\nI1206 09:01:47.593062 30052 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:47.593081 30052 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:47.593094 30052 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1206 09:01:47.593113 30052 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:01:47.593134 30052 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:01:47.593214 30052 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:47.593231 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.593245 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.593255 30052 net.cpp:165] Memory required for data: 1544090620\nI1206 09:01:47.593266 30052 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1206 09:01:47.593286 30052 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1206 09:01:47.593298 30052 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:01:47.593315 30052 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1206 09:01:47.593596 30052 net.cpp:150] Setting up L2_b4_brc1_bn\nI1206 09:01:47.593616 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.593626 30052 net.cpp:165] Memory required for data: 1555231740\nI1206 09:01:47.593647 30052 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1206 09:01:47.593663 30052 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1206 09:01:47.593674 30052 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1206 09:01:47.593689 30052 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1206 09:01:47.593708 30052 net.cpp:150] Setting up L2_b4_brc1_relu\nI1206 09:01:47.593722 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.593734 30052 net.cpp:165] Memory required for data: 1566372860\nI1206 09:01:47.593744 30052 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1206 09:01:47.593778 30052 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1206 09:01:47.593792 30052 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1206 09:01:47.593816 30052 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1206 09:01:47.594306 30052 net.cpp:150] Setting up L2_b4_brc1_conv\nI1206 09:01:47.594327 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.594337 30052 net.cpp:165] Memory required for data: 1571943420\nI1206 09:01:47.594357 30052 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1206 09:01:47.594378 30052 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1206 09:01:47.594390 30052 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1206 09:01:47.594408 30052 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1206 09:01:47.594682 30052 net.cpp:150] Setting up L2_b4_brc2_bn\nI1206 09:01:47.594702 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.594712 30052 net.cpp:165] Memory required for data: 1577513980\nI1206 09:01:47.594734 30052 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1206 09:01:47.594750 30052 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1206 09:01:47.594761 30052 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1206 09:01:47.594776 30052 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1206 09:01:47.594795 30052 net.cpp:150] Setting up L2_b4_brc2_relu\nI1206 09:01:47.594810 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.594821 30052 net.cpp:165] Memory required for data: 1583084540\nI1206 09:01:47.594831 30052 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1206 09:01:47.594854 30052 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1206 09:01:47.594868 30052 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1206 09:01:47.594892 30052 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1206 09:01:47.595243 30052 net.cpp:150] Setting up L2_b4_brc2_conv\nI1206 09:01:47.595263 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.595273 30052 net.cpp:165] Memory required for data: 1588655100\nI1206 09:01:47.595293 30052 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1206 09:01:47.595309 30052 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1206 09:01:47.595319 30052 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1206 09:01:47.595341 30052 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1206 09:01:47.595613 30052 net.cpp:150] Setting up L2_b4_brc3_bn\nI1206 09:01:47.595631 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.595641 30052 net.cpp:165] Memory required for data: 1594225660\nI1206 09:01:47.595662 30052 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1206 09:01:47.595679 30052 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1206 09:01:47.595690 30052 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1206 09:01:47.595705 30052 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1206 09:01:47.595724 30052 net.cpp:150] Setting up L2_b4_brc3_relu\nI1206 09:01:47.595738 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.595748 30052 net.cpp:165] Memory required for data: 1599796220\nI1206 09:01:47.595759 30052 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1206 09:01:47.595788 30052 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1206 09:01:47.595803 30052 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1206 09:01:47.595826 30052 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1206 09:01:47.596310 30052 net.cpp:150] Setting up L2_b4_brc3_conv\nI1206 09:01:47.596331 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.596340 30052 net.cpp:165] Memory required for data: 1610937340\nI1206 09:01:47.596359 30052 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1206 09:01:47.596381 30052 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1206 09:01:47.596395 30052 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1206 09:01:47.596408 30052 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:01:47.596426 30052 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1206 09:01:47.596472 30052 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1206 09:01:47.596503 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.596513 30052 net.cpp:165] Memory required for data: 1622078460\nI1206 09:01:47.596524 30052 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:47.596539 30052 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:47.596550 30052 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1206 09:01:47.596566 30052 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:01:47.596592 30052 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:01:47.596683 30052 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:47.596701 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.596714 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.596724 30052 net.cpp:165] Memory required for data: 1644360700\nI1206 09:01:47.596735 30052 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1206 09:01:47.596755 30052 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1206 09:01:47.596768 30052 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:01:47.596789 30052 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1206 09:01:47.597074 30052 net.cpp:150] Setting up L2_b5_brc1_bn\nI1206 09:01:47.597097 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.597108 30052 net.cpp:165] Memory required for data: 1655501820\nI1206 09:01:47.597131 30052 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1206 09:01:47.597147 30052 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1206 09:01:47.597159 30052 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1206 09:01:47.597174 30052 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1206 09:01:47.597193 30052 net.cpp:150] Setting up L2_b5_brc1_relu\nI1206 09:01:47.597209 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.597219 30052 net.cpp:165] Memory required for data: 1666642940\nI1206 09:01:47.597230 30052 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1206 09:01:47.597250 30052 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1206 09:01:47.597263 30052 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1206 09:01:47.597285 30052 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1206 09:01:47.597765 30052 net.cpp:150] Setting up L2_b5_brc1_conv\nI1206 09:01:47.597785 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.597795 30052 net.cpp:165] Memory required for data: 1672213500\nI1206 09:01:47.597813 30052 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1206 09:01:47.597834 30052 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1206 09:01:47.597846 30052 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1206 09:01:47.597863 30052 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1206 09:01:47.598146 30052 net.cpp:150] Setting up L2_b5_brc2_bn\nI1206 09:01:47.598166 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.598176 30052 net.cpp:165] Memory required for data: 1677784060\nI1206 09:01:47.598197 30052 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1206 09:01:47.598222 30052 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1206 09:01:47.598234 30052 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1206 09:01:47.598250 30052 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1206 09:01:47.598269 30052 net.cpp:150] Setting up L2_b5_brc2_relu\nI1206 09:01:47.598284 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.598295 30052 net.cpp:165] Memory required for data: 1683354620\nI1206 09:01:47.598304 30052 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1206 09:01:47.598325 30052 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1206 09:01:47.598337 30052 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1206 09:01:47.598361 30052 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1206 09:01:47.598728 30052 net.cpp:150] Setting up L2_b5_brc2_conv\nI1206 09:01:47.598748 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.598759 30052 net.cpp:165] Memory required for data: 1688925180\nI1206 09:01:47.598778 30052 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1206 09:01:47.598795 30052 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1206 09:01:47.598808 30052 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1206 09:01:47.598830 30052 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1206 09:01:47.599114 30052 net.cpp:150] Setting up L2_b5_brc3_bn\nI1206 09:01:47.599133 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.599143 30052 net.cpp:165] Memory required for data: 1694495740\nI1206 09:01:47.599165 30052 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1206 09:01:47.599186 30052 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1206 09:01:47.599198 30052 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1206 09:01:47.599215 30052 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1206 09:01:47.599233 30052 net.cpp:150] Setting up L2_b5_brc3_relu\nI1206 09:01:47.599249 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.599259 30052 net.cpp:165] Memory required for data: 1700066300\nI1206 09:01:47.599270 30052 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1206 09:01:47.599292 30052 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1206 09:01:47.599304 30052 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1206 09:01:47.599326 30052 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1206 09:01:47.599805 30052 net.cpp:150] Setting up L2_b5_brc3_conv\nI1206 09:01:47.599825 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.599835 30052 net.cpp:165] Memory required for data: 1711207420\nI1206 09:01:47.599853 30052 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1206 09:01:47.599871 30052 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1206 09:01:47.599884 30052 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1206 09:01:47.599897 30052 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:01:47.599913 30052 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1206 09:01:47.599966 30052 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1206 09:01:47.599983 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.599994 30052 net.cpp:165] Memory required for data: 1722348540\nI1206 09:01:47.600005 30052 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:47.600024 30052 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:47.600037 30052 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1206 09:01:47.600059 30052 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:01:47.600080 30052 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:01:47.600157 30052 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:47.600175 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.600193 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.600204 30052 net.cpp:165] Memory required for data: 1744630780\nI1206 09:01:47.600215 30052 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1206 09:01:47.600229 30052 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1206 09:01:47.600241 30052 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:01:47.600258 30052 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1206 09:01:47.600540 30052 net.cpp:150] Setting up L2_b6_brc1_bn\nI1206 09:01:47.600559 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.600569 30052 net.cpp:165] Memory required for data: 1755771900\nI1206 09:01:47.600592 30052 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1206 09:01:47.600637 30052 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1206 09:01:47.600653 30052 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1206 09:01:47.600669 30052 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1206 09:01:47.600689 30052 net.cpp:150] Setting up L2_b6_brc1_relu\nI1206 09:01:47.600706 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.600716 30052 net.cpp:165] Memory required for data: 1766913020\nI1206 09:01:47.600728 30052 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1206 09:01:47.600751 30052 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1206 09:01:47.600765 30052 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1206 09:01:47.600791 30052 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1206 09:01:47.601286 30052 net.cpp:150] Setting up L2_b6_brc1_conv\nI1206 09:01:47.601306 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.601316 30052 net.cpp:165] Memory required for data: 1772483580\nI1206 09:01:47.601336 30052 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1206 09:01:47.601356 30052 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1206 09:01:47.601369 30052 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1206 09:01:47.601390 30052 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1206 09:01:47.601667 30052 net.cpp:150] Setting up L2_b6_brc2_bn\nI1206 09:01:47.601686 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.601696 30052 net.cpp:165] Memory required for data: 1778054140\nI1206 09:01:47.601717 30052 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1206 09:01:47.601733 30052 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1206 09:01:47.601745 30052 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1206 09:01:47.601760 30052 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1206 09:01:47.601781 30052 net.cpp:150] Setting up L2_b6_brc2_relu\nI1206 09:01:47.601795 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.601805 30052 net.cpp:165] Memory required for data: 1783624700\nI1206 09:01:47.601816 30052 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1206 09:01:47.601841 30052 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1206 09:01:47.601855 30052 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1206 09:01:47.601874 30052 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1206 09:01:47.602231 30052 net.cpp:150] Setting up L2_b6_brc2_conv\nI1206 09:01:47.602252 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.602262 30052 net.cpp:165] Memory required for data: 1789195260\nI1206 09:01:47.602280 30052 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1206 09:01:47.602301 30052 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1206 09:01:47.602314 30052 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1206 09:01:47.602330 30052 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1206 09:01:47.602605 30052 net.cpp:150] Setting up L2_b6_brc3_bn\nI1206 09:01:47.602632 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.602643 30052 net.cpp:165] Memory required for data: 1794765820\nI1206 09:01:47.602665 30052 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1206 09:01:47.602681 30052 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1206 09:01:47.602694 30052 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1206 09:01:47.602710 30052 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1206 09:01:47.602730 30052 net.cpp:150] Setting up L2_b6_brc3_relu\nI1206 09:01:47.602744 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.602754 30052 net.cpp:165] Memory required for data: 1800336380\nI1206 09:01:47.602764 30052 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1206 09:01:47.602784 30052 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1206 09:01:47.602797 30052 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1206 09:01:47.602820 30052 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1206 09:01:47.603312 30052 net.cpp:150] Setting up L2_b6_brc3_conv\nI1206 09:01:47.603332 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.603350 30052 net.cpp:165] Memory required for data: 1811477500\nI1206 09:01:47.603369 30052 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1206 09:01:47.603386 30052 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1206 09:01:47.603399 30052 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1206 09:01:47.603413 30052 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:01:47.603433 30052 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1206 09:01:47.603482 30052 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1206 09:01:47.603500 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.603509 30052 net.cpp:165] Memory required for data: 1822618620\nI1206 09:01:47.603520 30052 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:47.603540 30052 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:47.603552 30052 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1206 09:01:47.603569 30052 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:01:47.603588 30052 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:01:47.603673 30052 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:47.603693 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.603706 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.603716 30052 net.cpp:165] Memory required for data: 1844900860\nI1206 09:01:47.603727 30052 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1206 09:01:47.603742 30052 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1206 09:01:47.603754 30052 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:01:47.603777 30052 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1206 09:01:47.604050 30052 net.cpp:150] Setting up L3_b1_brc1_bn\nI1206 09:01:47.604070 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.604080 30052 net.cpp:165] Memory required for data: 1856041980\nI1206 09:01:47.604102 30052 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1206 09:01:47.604122 30052 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1206 09:01:47.604135 30052 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1206 09:01:47.604151 30052 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1206 09:01:47.604171 30052 net.cpp:150] Setting up L3_b1_brc1_relu\nI1206 09:01:47.604185 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.604197 30052 net.cpp:165] Memory required for data: 1867183100\nI1206 09:01:47.604207 30052 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1206 09:01:47.604226 30052 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1206 09:01:47.604239 30052 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1206 09:01:47.604261 30052 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1206 09:01:47.604904 30052 net.cpp:150] Setting up L3_b1_brc1_conv\nI1206 09:01:47.604924 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.604934 30052 net.cpp:165] Memory required for data: 1869968380\nI1206 09:01:47.604961 30052 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1206 09:01:47.604977 30052 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1206 09:01:47.604990 30052 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1206 09:01:47.605006 30052 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1206 09:01:47.605290 30052 net.cpp:150] Setting up L3_b1_brc2_bn\nI1206 09:01:47.605310 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.605319 30052 net.cpp:165] Memory required for data: 1872753660\nI1206 09:01:47.605342 30052 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1206 09:01:47.605358 30052 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1206 09:01:47.605370 30052 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1206 09:01:47.605389 30052 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1206 09:01:47.605419 30052 net.cpp:150] Setting up L3_b1_brc2_relu\nI1206 09:01:47.605435 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.605446 30052 net.cpp:165] Memory required for data: 1875538940\nI1206 09:01:47.605456 30052 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1206 09:01:47.605481 30052 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1206 09:01:47.605494 30052 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1206 09:01:47.605514 30052 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1206 09:01:47.605940 30052 net.cpp:150] Setting up L3_b1_brc2_conv\nI1206 09:01:47.605962 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.605971 30052 net.cpp:165] Memory required for data: 1878324220\nI1206 09:01:47.605990 30052 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1206 09:01:47.606007 30052 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1206 09:01:47.606019 30052 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1206 09:01:47.606040 30052 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1206 09:01:47.606310 30052 net.cpp:150] Setting up L3_b1_brc3_bn\nI1206 09:01:47.606329 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.606339 30052 net.cpp:165] Memory required for data: 1881109500\nI1206 09:01:47.606361 30052 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1206 09:01:47.606377 30052 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1206 09:01:47.606389 30052 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1206 09:01:47.606403 30052 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1206 09:01:47.606423 30052 net.cpp:150] Setting up L3_b1_brc3_relu\nI1206 09:01:47.606437 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.606447 30052 net.cpp:165] Memory required for data: 1883894780\nI1206 09:01:47.606458 30052 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1206 09:01:47.606482 30052 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1206 09:01:47.606495 30052 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1206 09:01:47.606518 30052 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1206 09:01:47.608525 30052 net.cpp:150] Setting up L3_b1_brc3_conv\nI1206 09:01:47.608551 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.608562 30052 net.cpp:165] Memory required for data: 1889465340\nI1206 09:01:47.608582 30052 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1206 09:01:47.608603 30052 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1206 09:01:47.608616 30052 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:01:47.608640 30052 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1206 09:01:47.609634 30052 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1206 09:01:47.609655 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.609665 30052 net.cpp:165] Memory required for data: 1895035900\nI1206 09:01:47.609684 30052 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1206 09:01:47.609701 30052 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1206 09:01:47.609714 30052 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1206 09:01:47.609727 30052 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1206 09:01:47.609745 30052 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1206 09:01:47.609804 30052 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1206 09:01:47.609825 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.609836 30052 net.cpp:165] Memory required for data: 1900606460\nI1206 09:01:47.609848 30052 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:47.609869 30052 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:47.609882 30052 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1206 09:01:47.609899 30052 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:01:47.609918 30052 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:01:47.610023 30052 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:47.610046 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.610060 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.610070 30052 net.cpp:165] Memory required for data: 1911747580\nI1206 09:01:47.610081 30052 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1206 09:01:47.610101 30052 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1206 09:01:47.610114 30052 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:01:47.610131 30052 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1206 09:01:47.610410 30052 net.cpp:150] Setting up L3_b2_brc1_bn\nI1206 09:01:47.610433 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.610445 30052 net.cpp:165] Memory required for data: 1917318140\nI1206 09:01:47.610468 30052 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1206 09:01:47.610484 30052 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1206 09:01:47.610496 30052 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1206 09:01:47.610512 30052 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1206 09:01:47.610532 30052 net.cpp:150] Setting up L3_b2_brc1_relu\nI1206 09:01:47.610548 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.610558 30052 net.cpp:165] Memory required for data: 1922888700\nI1206 09:01:47.610566 30052 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1206 09:01:47.610587 30052 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1206 09:01:47.610600 30052 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1206 09:01:47.610622 30052 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1206 09:01:47.611610 30052 net.cpp:150] Setting up L3_b2_brc1_conv\nI1206 09:01:47.611644 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.611655 30052 net.cpp:165] Memory required for data: 1925673980\nI1206 09:01:47.611675 30052 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1206 09:01:47.611691 30052 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1206 09:01:47.611703 30052 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1206 09:01:47.611724 30052 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1206 09:01:47.612004 30052 net.cpp:150] Setting up L3_b2_brc2_bn\nI1206 09:01:47.612025 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.612035 30052 net.cpp:165] Memory required for data: 1928459260\nI1206 09:01:47.612056 30052 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1206 09:01:47.612072 30052 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1206 09:01:47.612084 30052 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1206 09:01:47.612104 30052 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1206 09:01:47.612124 30052 net.cpp:150] Setting up L3_b2_brc2_relu\nI1206 09:01:47.612139 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.612150 30052 net.cpp:165] Memory required for data: 1931244540\nI1206 09:01:47.612160 30052 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1206 09:01:47.612181 30052 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1206 09:01:47.612195 30052 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1206 09:01:47.612211 30052 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1206 09:01:47.612640 30052 net.cpp:150] Setting up L3_b2_brc2_conv\nI1206 09:01:47.612660 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.612669 30052 net.cpp:165] Memory required for data: 1934029820\nI1206 09:01:47.612689 30052 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1206 09:01:47.612709 30052 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1206 09:01:47.612722 30052 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1206 09:01:47.612740 30052 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1206 09:01:47.613029 30052 net.cpp:150] Setting up L3_b2_brc3_bn\nI1206 09:01:47.613049 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.613059 30052 net.cpp:165] Memory required for data: 1936815100\nI1206 09:01:47.613090 30052 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1206 09:01:47.613108 30052 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1206 09:01:47.613121 30052 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1206 09:01:47.613134 30052 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1206 09:01:47.613154 30052 net.cpp:150] Setting up L3_b2_brc3_relu\nI1206 09:01:47.613169 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.613179 30052 net.cpp:165] Memory required for data: 1939600380\nI1206 09:01:47.613189 30052 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1206 09:01:47.613215 30052 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1206 09:01:47.613229 30052 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1206 09:01:47.613255 30052 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1206 09:01:47.614234 30052 net.cpp:150] Setting up L3_b2_brc3_conv\nI1206 09:01:47.614255 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.614264 30052 net.cpp:165] Memory required for data: 1945170940\nI1206 09:01:47.614284 30052 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1206 09:01:47.614305 30052 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1206 09:01:47.614318 30052 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1206 09:01:47.614333 30052 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:01:47.614351 30052 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1206 09:01:47.614410 30052 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1206 09:01:47.614429 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.614439 30052 net.cpp:165] Memory required for data: 1950741500\nI1206 09:01:47.614450 30052 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:47.614465 30052 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:47.614477 30052 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1206 09:01:47.614497 30052 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:01:47.614518 30052 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:01:47.614601 30052 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:47.614629 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.614645 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.614655 30052 net.cpp:165] Memory required for data: 1961882620\nI1206 09:01:47.614666 30052 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1206 09:01:47.614681 30052 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1206 09:01:47.614694 30052 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:01:47.614715 30052 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1206 09:01:47.615005 30052 net.cpp:150] Setting up L3_b3_brc1_bn\nI1206 09:01:47.615025 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.615034 30052 net.cpp:165] Memory required for data: 1967453180\nI1206 09:01:47.615056 30052 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1206 09:01:47.615072 30052 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1206 09:01:47.615084 30052 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1206 09:01:47.615099 30052 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1206 09:01:47.615119 30052 net.cpp:150] Setting up L3_b3_brc1_relu\nI1206 09:01:47.615134 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.615144 30052 net.cpp:165] Memory required for data: 1973023740\nI1206 09:01:47.615154 30052 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1206 09:01:47.615178 30052 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1206 09:01:47.615192 30052 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1206 09:01:47.615216 30052 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1206 09:01:47.616209 30052 net.cpp:150] Setting up L3_b3_brc1_conv\nI1206 09:01:47.616230 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.616240 30052 net.cpp:165] Memory required for data: 1975809020\nI1206 09:01:47.616256 30052 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1206 09:01:47.616277 30052 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1206 09:01:47.616291 30052 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1206 09:01:47.616307 30052 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1206 09:01:47.616614 30052 net.cpp:150] Setting up L3_b3_brc2_bn\nI1206 09:01:47.616634 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.616644 30052 net.cpp:165] Memory required for data: 1978594300\nI1206 09:01:47.616667 30052 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1206 09:01:47.616683 30052 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1206 09:01:47.616695 30052 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1206 09:01:47.616709 30052 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1206 09:01:47.616729 30052 net.cpp:150] Setting up L3_b3_brc2_relu\nI1206 09:01:47.616744 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.616753 30052 net.cpp:165] Memory required for data: 1981379580\nI1206 09:01:47.616765 30052 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1206 09:01:47.616791 30052 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1206 09:01:47.616803 30052 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1206 09:01:47.616822 30052 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1206 09:01:47.617254 30052 net.cpp:150] Setting up L3_b3_brc2_conv\nI1206 09:01:47.617274 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.617283 30052 net.cpp:165] Memory required for data: 1984164860\nI1206 09:01:47.617301 30052 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1206 09:01:47.617318 30052 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1206 09:01:47.617331 30052 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1206 09:01:47.617350 30052 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1206 09:01:47.617630 30052 net.cpp:150] Setting up L3_b3_brc3_bn\nI1206 09:01:47.617650 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.617660 30052 net.cpp:165] Memory required for data: 1986950140\nI1206 09:01:47.617682 30052 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1206 09:01:47.617703 30052 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1206 09:01:47.617717 30052 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1206 09:01:47.617730 30052 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1206 09:01:47.617751 30052 net.cpp:150] Setting up L3_b3_brc3_relu\nI1206 09:01:47.617766 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.617775 30052 net.cpp:165] Memory required for data: 1989735420\nI1206 09:01:47.617786 30052 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1206 09:01:47.617805 30052 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1206 09:01:47.617820 30052 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1206 09:01:47.617841 30052 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1206 09:01:47.618818 30052 net.cpp:150] Setting up L3_b3_brc3_conv\nI1206 09:01:47.618839 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.618849 30052 net.cpp:165] Memory required for data: 1995305980\nI1206 09:01:47.618867 30052 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1206 09:01:47.618885 30052 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1206 09:01:47.618898 30052 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1206 09:01:47.618912 30052 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:01:47.618928 30052 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1206 09:01:47.618999 30052 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1206 09:01:47.619017 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.619027 30052 net.cpp:165] Memory required for data: 2000876540\nI1206 09:01:47.619037 30052 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:47.619065 30052 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:47.619077 30052 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1206 09:01:47.619093 30052 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:01:47.619114 30052 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:01:47.619201 30052 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:47.619220 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.619235 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.619244 30052 net.cpp:165] Memory required for data: 2012017660\nI1206 09:01:47.619256 30052 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1206 09:01:47.619271 30052 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1206 09:01:47.619282 30052 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:01:47.619303 30052 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1206 09:01:47.619598 30052 net.cpp:150] Setting up L3_b4_brc1_bn\nI1206 09:01:47.619617 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.619627 30052 net.cpp:165] Memory required for data: 2017588220\nI1206 09:01:47.619649 30052 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1206 09:01:47.619670 30052 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1206 09:01:47.619683 30052 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1206 09:01:47.619699 30052 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1206 09:01:47.619719 30052 net.cpp:150] Setting up L3_b4_brc1_relu\nI1206 09:01:47.619734 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.619743 30052 net.cpp:165] Memory required for data: 2023158780\nI1206 09:01:47.619755 30052 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1206 09:01:47.619776 30052 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1206 09:01:47.619788 30052 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1206 09:01:47.619812 30052 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1206 09:01:47.620784 30052 net.cpp:150] Setting up L3_b4_brc1_conv\nI1206 09:01:47.620803 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.620813 30052 net.cpp:165] Memory required for data: 2025944060\nI1206 09:01:47.620832 30052 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1206 09:01:47.620849 30052 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1206 09:01:47.620862 30052 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1206 09:01:47.620877 30052 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1206 09:01:47.621163 30052 net.cpp:150] Setting up L3_b4_brc2_bn\nI1206 09:01:47.621183 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.621192 30052 net.cpp:165] Memory required for data: 2028729340\nI1206 09:01:47.621214 30052 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1206 09:01:47.621230 30052 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1206 09:01:47.621243 30052 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1206 09:01:47.621261 30052 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1206 09:01:47.621282 30052 net.cpp:150] Setting up L3_b4_brc2_relu\nI1206 09:01:47.621297 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.621307 30052 net.cpp:165] Memory required for data: 2031514620\nI1206 09:01:47.621317 30052 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1206 09:01:47.621343 30052 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1206 09:01:47.621356 30052 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1206 09:01:47.621374 30052 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1206 09:01:47.621803 30052 net.cpp:150] Setting up L3_b4_brc2_conv\nI1206 09:01:47.621822 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.621832 30052 net.cpp:165] Memory required for data: 2034299900\nI1206 09:01:47.621861 30052 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1206 09:01:47.621882 30052 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1206 09:01:47.621896 30052 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1206 09:01:47.621914 30052 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1206 09:01:47.622202 30052 net.cpp:150] Setting up L3_b4_brc3_bn\nI1206 09:01:47.622222 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.622232 30052 net.cpp:165] Memory required for data: 2037085180\nI1206 09:01:47.622253 30052 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1206 09:01:47.622270 30052 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1206 09:01:47.622282 30052 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1206 09:01:47.622298 30052 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1206 09:01:47.622318 30052 net.cpp:150] Setting up L3_b4_brc3_relu\nI1206 09:01:47.622333 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.622342 30052 net.cpp:165] Memory required for data: 2039870460\nI1206 09:01:47.622352 30052 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1206 09:01:47.622380 30052 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1206 09:01:47.622395 30052 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1206 09:01:47.622416 30052 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1206 09:01:47.623389 30052 net.cpp:150] Setting up L3_b4_brc3_conv\nI1206 09:01:47.623409 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.623420 30052 net.cpp:165] Memory required for data: 2045441020\nI1206 09:01:47.623440 30052 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1206 09:01:47.623461 30052 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1206 09:01:47.623473 30052 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1206 09:01:47.623487 30052 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:01:47.623504 30052 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1206 09:01:47.623564 30052 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1206 09:01:47.623584 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.623594 30052 net.cpp:165] Memory required for data: 2051011580\nI1206 09:01:47.623605 30052 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:47.623620 30052 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:47.623631 30052 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1206 09:01:47.623652 30052 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:01:47.623673 30052 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:01:47.623757 30052 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:47.623781 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.623796 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.623807 30052 net.cpp:165] Memory required for data: 2062152700\nI1206 09:01:47.623817 30052 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1206 09:01:47.623832 30052 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1206 09:01:47.623845 30052 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:01:47.623867 30052 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1206 09:01:47.624150 30052 net.cpp:150] Setting up L3_b5_brc1_bn\nI1206 09:01:47.624171 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.624181 30052 net.cpp:165] Memory required for data: 2067723260\nI1206 09:01:47.624203 30052 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1206 09:01:47.624219 30052 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1206 09:01:47.624231 30052 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1206 09:01:47.624246 30052 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1206 09:01:47.624266 30052 net.cpp:150] Setting up L3_b5_brc1_relu\nI1206 09:01:47.624290 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.624301 30052 net.cpp:165] Memory required for data: 2073293820\nI1206 09:01:47.624311 30052 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1206 09:01:47.624336 30052 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1206 09:01:47.624349 30052 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1206 09:01:47.624372 30052 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1206 09:01:47.626519 30052 net.cpp:150] Setting up L3_b5_brc1_conv\nI1206 09:01:47.626543 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.626554 30052 net.cpp:165] Memory required for data: 2076079100\nI1206 09:01:47.626571 30052 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1206 09:01:47.626593 30052 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1206 09:01:47.626606 30052 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1206 09:01:47.626623 30052 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1206 09:01:47.626907 30052 net.cpp:150] Setting up L3_b5_brc2_bn\nI1206 09:01:47.626927 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.626943 30052 net.cpp:165] Memory required for data: 2078864380\nI1206 09:01:47.626968 30052 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1206 09:01:47.626983 30052 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1206 09:01:47.626996 30052 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1206 09:01:47.627015 30052 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1206 09:01:47.627037 30052 net.cpp:150] Setting up L3_b5_brc2_relu\nI1206 09:01:47.627053 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.627063 30052 net.cpp:165] Memory required for data: 2081649660\nI1206 09:01:47.627073 30052 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1206 09:01:47.627099 30052 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1206 09:01:47.627111 30052 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1206 09:01:47.627130 30052 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1206 09:01:47.627544 30052 net.cpp:150] Setting up L3_b5_brc2_conv\nI1206 09:01:47.627565 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.627575 30052 net.cpp:165] Memory required for data: 2084434940\nI1206 09:01:47.627643 30052 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1206 09:01:47.627667 30052 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1206 09:01:47.627681 30052 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1206 09:01:47.627699 30052 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1206 09:01:47.627993 30052 net.cpp:150] Setting up L3_b5_brc3_bn\nI1206 09:01:47.628012 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.628022 30052 net.cpp:165] Memory required for data: 2087220220\nI1206 09:01:47.628044 30052 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1206 09:01:47.628065 30052 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1206 09:01:47.628078 30052 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1206 09:01:47.628094 30052 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1206 09:01:47.628114 30052 net.cpp:150] Setting up L3_b5_brc3_relu\nI1206 09:01:47.628129 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.628137 30052 net.cpp:165] Memory required for data: 2090005500\nI1206 09:01:47.628147 30052 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1206 09:01:47.628167 30052 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1206 09:01:47.628180 30052 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1206 09:01:47.628203 30052 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1206 09:01:47.629176 30052 net.cpp:150] Setting up L3_b5_brc3_conv\nI1206 09:01:47.629196 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.629206 30052 net.cpp:165] Memory required for data: 2095576060\nI1206 09:01:47.629225 30052 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1206 09:01:47.629242 30052 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1206 09:01:47.629254 30052 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1206 09:01:47.629276 30052 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:01:47.629295 30052 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1206 09:01:47.629356 30052 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1206 09:01:47.629375 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.629385 30052 net.cpp:165] Memory required for data: 2101146620\nI1206 09:01:47.629396 30052 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:47.629411 30052 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:47.629423 30052 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1206 09:01:47.629444 30052 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:01:47.629465 30052 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:01:47.629551 30052 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:47.629576 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.629590 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.629601 30052 net.cpp:165] Memory required for data: 2112287740\nI1206 09:01:47.629611 30052 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1206 09:01:47.629626 30052 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1206 09:01:47.629637 30052 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:01:47.629659 30052 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1206 09:01:47.629961 30052 net.cpp:150] Setting up L3_b6_brc1_bn\nI1206 09:01:47.629981 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.629992 30052 net.cpp:165] Memory required for data: 2117858300\nI1206 09:01:47.630013 30052 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1206 09:01:47.630034 30052 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1206 09:01:47.630048 30052 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1206 09:01:47.630064 30052 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1206 09:01:47.630084 30052 net.cpp:150] Setting up L3_b6_brc1_relu\nI1206 09:01:47.630098 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.630110 30052 net.cpp:165] Memory required for data: 2123428860\nI1206 09:01:47.630120 30052 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1206 09:01:47.630141 30052 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1206 09:01:47.630153 30052 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1206 09:01:47.630177 30052 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1206 09:01:47.631151 30052 net.cpp:150] Setting up L3_b6_brc1_conv\nI1206 09:01:47.631171 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.631181 30052 net.cpp:165] Memory required for data: 2126214140\nI1206 09:01:47.631199 30052 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1206 09:01:47.631216 30052 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1206 09:01:47.631228 30052 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1206 09:01:47.631244 30052 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1206 09:01:47.631533 30052 net.cpp:150] Setting up L3_b6_brc2_bn\nI1206 09:01:47.631558 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.631567 30052 net.cpp:165] Memory required for data: 2128999420\nI1206 09:01:47.631589 30052 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1206 09:01:47.631605 30052 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1206 09:01:47.631618 30052 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1206 09:01:47.631638 30052 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1206 09:01:47.631659 30052 net.cpp:150] Setting up L3_b6_brc2_relu\nI1206 09:01:47.631673 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.631685 30052 net.cpp:165] Memory required for data: 2131784700\nI1206 09:01:47.631695 30052 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1206 09:01:47.631728 30052 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1206 09:01:47.631743 30052 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1206 09:01:47.631762 30052 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1206 09:01:47.632196 30052 net.cpp:150] Setting up L3_b6_brc2_conv\nI1206 09:01:47.632217 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.632226 30052 net.cpp:165] Memory required for data: 2134569980\nI1206 09:01:47.632246 30052 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1206 09:01:47.632262 30052 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1206 09:01:47.632274 30052 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1206 09:01:47.632295 30052 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1206 09:01:47.632571 30052 net.cpp:150] Setting up L3_b6_brc3_bn\nI1206 09:01:47.632591 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.632601 30052 net.cpp:165] Memory required for data: 2137355260\nI1206 09:01:47.632622 30052 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1206 09:01:47.632639 30052 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1206 09:01:47.632650 30052 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1206 09:01:47.632665 30052 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1206 09:01:47.632686 30052 net.cpp:150] Setting up L3_b6_brc3_relu\nI1206 09:01:47.632700 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.632710 30052 net.cpp:165] Memory required for data: 2140140540\nI1206 09:01:47.632720 30052 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1206 09:01:47.632745 30052 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1206 09:01:47.632760 30052 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1206 09:01:47.632782 30052 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1206 09:01:47.633761 30052 net.cpp:150] Setting up L3_b6_brc3_conv\nI1206 09:01:47.633782 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.633792 30052 net.cpp:165] Memory required for data: 2145711100\nI1206 09:01:47.633811 30052 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1206 09:01:47.633832 30052 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1206 09:01:47.633846 30052 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1206 09:01:47.633859 30052 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:01:47.633877 30052 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1206 09:01:47.633944 30052 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1206 09:01:47.633963 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.633973 30052 net.cpp:165] Memory required for data: 2151281660\nI1206 09:01:47.633985 30052 layer_factory.hpp:77] Creating layer post_bn\nI1206 09:01:47.634004 30052 net.cpp:100] Creating Layer post_bn\nI1206 09:01:47.634017 30052 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1206 09:01:47.634033 30052 net.cpp:408] post_bn -> post_bn_top\nI1206 09:01:47.634312 30052 net.cpp:150] Setting up post_bn\nI1206 09:01:47.634335 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.634346 30052 net.cpp:165] Memory required for data: 2156852220\nI1206 09:01:47.634368 30052 layer_factory.hpp:77] Creating layer post_relu\nI1206 09:01:47.634383 30052 net.cpp:100] Creating Layer post_relu\nI1206 09:01:47.634397 30052 net.cpp:434] post_relu <- post_bn_top\nI1206 09:01:47.634412 30052 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1206 09:01:47.634430 30052 net.cpp:150] Setting up post_relu\nI1206 09:01:47.634446 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.634456 30052 net.cpp:165] Memory required for data: 2162422780\nI1206 09:01:47.634466 30052 layer_factory.hpp:77] Creating layer post_pool\nI1206 09:01:47.634485 30052 net.cpp:100] Creating Layer post_pool\nI1206 09:01:47.634498 30052 net.cpp:434] post_pool <- post_bn_top\nI1206 09:01:47.634515 30052 net.cpp:408] post_pool -> post_pool\nI1206 09:01:47.634649 30052 net.cpp:150] Setting up post_pool\nI1206 09:01:47.634670 30052 net.cpp:157] Top shape: 85 256 1 1 (21760)\nI1206 09:01:47.634690 30052 net.cpp:165] Memory required for data: 2162509820\nI1206 09:01:47.634701 30052 layer_factory.hpp:77] Creating layer post_FC\nI1206 09:01:47.634812 30052 net.cpp:100] Creating Layer post_FC\nI1206 09:01:47.634829 30052 net.cpp:434] post_FC <- post_pool\nI1206 09:01:47.634848 30052 net.cpp:408] post_FC -> post_FC_top\nI1206 09:01:47.635159 30052 net.cpp:150] Setting up post_FC\nI1206 09:01:47.635179 30052 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:47.635190 30052 net.cpp:165] Memory required for data: 2162513220\nI1206 09:01:47.635208 30052 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1206 09:01:47.635224 30052 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1206 09:01:47.635236 30052 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1206 09:01:47.635258 30052 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1206 09:01:47.635280 30052 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1206 09:01:47.635366 30052 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1206 09:01:47.635391 30052 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:47.635406 30052 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:47.635416 30052 net.cpp:165] Memory required for data: 2162520020\nI1206 09:01:47.635428 30052 layer_factory.hpp:77] Creating layer accuracy\nI1206 09:01:47.635493 30052 net.cpp:100] Creating Layer accuracy\nI1206 09:01:47.635509 30052 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1206 09:01:47.635522 30052 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1206 09:01:47.635540 30052 net.cpp:408] accuracy -> accuracy\nI1206 09:01:47.635609 30052 net.cpp:150] Setting up accuracy\nI1206 09:01:47.635628 30052 net.cpp:157] Top shape: (1)\nI1206 09:01:47.635639 30052 net.cpp:165] Memory required for data: 2162520024\nI1206 09:01:47.635650 30052 layer_factory.hpp:77] Creating layer loss\nI1206 09:01:47.635665 30052 net.cpp:100] Creating Layer loss\nI1206 09:01:47.635677 30052 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1206 09:01:47.635691 30052 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1206 09:01:47.635713 30052 net.cpp:408] loss -> loss\nI1206 09:01:47.639513 30052 layer_factory.hpp:77] Creating layer loss\nI1206 09:01:47.643782 30052 net.cpp:150] Setting up loss\nI1206 09:01:47.643803 30052 net.cpp:157] Top shape: (1)\nI1206 09:01:47.643813 30052 net.cpp:160]     with loss weight 1\nI1206 09:01:47.643923 30052 net.cpp:165] Memory required for data: 2162520028\nI1206 09:01:47.643945 30052 net.cpp:226] loss needs backward computation.\nI1206 09:01:47.643959 30052 net.cpp:228] accuracy does not need backward computation.\nI1206 09:01:47.643972 30052 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1206 09:01:47.643982 30052 net.cpp:226] post_FC needs backward computation.\nI1206 09:01:47.643992 30052 net.cpp:226] post_pool needs backward computation.\nI1206 09:01:47.644004 30052 net.cpp:226] post_relu needs backward computation.\nI1206 09:01:47.644014 30052 net.cpp:226] post_bn needs backward computation.\nI1206 09:01:47.644024 30052 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1206 09:01:47.644035 30052 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1206 09:01:47.644045 30052 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1206 09:01:47.644055 30052 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1206 09:01:47.644065 30052 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1206 09:01:47.644076 30052 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1206 09:01:47.644086 30052 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1206 09:01:47.644096 30052 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1206 09:01:47.644107 30052 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1206 09:01:47.644117 30052 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1206 09:01:47.644129 30052 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.644146 30052 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1206 09:01:47.644158 30052 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1206 09:01:47.644170 30052 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1206 09:01:47.644181 30052 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1206 09:01:47.644191 30052 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1206 09:01:47.644201 30052 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1206 09:01:47.644210 30052 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1206 09:01:47.644222 30052 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1206 09:01:47.644232 30052 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1206 09:01:47.644242 30052 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1206 09:01:47.644253 30052 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.644263 30052 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1206 09:01:47.644275 30052 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1206 09:01:47.644286 30052 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1206 09:01:47.644296 30052 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1206 09:01:47.644307 30052 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1206 09:01:47.644318 30052 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1206 09:01:47.644330 30052 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1206 09:01:47.644345 30052 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1206 09:01:47.644358 30052 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1206 09:01:47.644368 30052 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1206 09:01:47.644378 30052 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.644390 30052 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1206 09:01:47.644402 30052 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1206 09:01:47.644413 30052 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1206 09:01:47.644423 30052 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1206 09:01:47.644434 30052 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1206 09:01:47.644446 30052 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1206 09:01:47.644455 30052 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1206 09:01:47.644466 30052 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1206 09:01:47.644477 30052 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1206 09:01:47.644489 30052 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1206 09:01:47.644500 30052 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.644510 30052 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1206 09:01:47.644523 30052 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1206 09:01:47.644533 30052 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1206 09:01:47.644543 30052 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1206 09:01:47.644556 30052 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1206 09:01:47.644567 30052 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1206 09:01:47.644575 30052 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1206 09:01:47.644587 30052 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1206 09:01:47.644598 30052 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1206 09:01:47.644608 30052 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1206 09:01:47.644618 30052 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.644629 30052 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1206 09:01:47.644641 30052 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1206 09:01:47.644651 30052 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1206 09:01:47.644670 30052 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1206 09:01:47.644682 30052 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1206 09:01:47.644693 30052 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1206 09:01:47.644704 30052 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1206 09:01:47.644716 30052 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1206 09:01:47.644726 30052 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1206 09:01:47.644735 30052 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1206 09:01:47.644747 30052 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1206 09:01:47.644757 30052 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.644768 30052 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1206 09:01:47.644779 30052 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1206 09:01:47.644790 30052 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1206 09:01:47.644801 30052 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1206 09:01:47.644812 30052 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1206 09:01:47.644824 30052 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1206 09:01:47.644840 30052 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1206 09:01:47.644851 30052 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1206 09:01:47.644862 30052 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1206 09:01:47.644873 30052 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1206 09:01:47.644884 30052 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.644896 30052 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1206 09:01:47.644906 30052 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1206 09:01:47.644918 30052 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1206 09:01:47.644929 30052 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1206 09:01:47.644948 30052 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1206 09:01:47.644961 30052 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1206 09:01:47.644971 30052 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1206 09:01:47.644982 30052 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1206 09:01:47.644994 30052 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1206 09:01:47.645004 30052 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1206 09:01:47.645016 30052 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.645027 30052 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1206 09:01:47.645040 30052 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1206 09:01:47.645051 30052 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1206 09:01:47.645061 30052 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1206 09:01:47.645072 30052 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1206 09:01:47.645084 30052 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1206 09:01:47.645094 30052 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1206 09:01:47.645105 30052 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1206 09:01:47.645117 30052 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1206 09:01:47.645128 30052 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1206 09:01:47.645138 30052 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.645149 30052 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1206 09:01:47.645162 30052 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1206 09:01:47.645174 30052 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1206 09:01:47.645184 30052 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1206 09:01:47.645195 30052 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1206 09:01:47.645215 30052 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1206 09:01:47.645226 30052 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1206 09:01:47.645237 30052 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1206 09:01:47.645249 30052 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1206 09:01:47.645259 30052 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1206 09:01:47.645272 30052 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.645282 30052 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1206 09:01:47.645293 30052 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1206 09:01:47.645305 30052 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1206 09:01:47.645315 30052 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1206 09:01:47.645326 30052 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1206 09:01:47.645337 30052 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1206 09:01:47.645349 30052 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1206 09:01:47.645360 30052 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1206 09:01:47.645372 30052 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1206 09:01:47.645383 30052 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1206 09:01:47.645395 30052 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.645406 30052 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1206 09:01:47.645417 30052 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1206 09:01:47.645429 30052 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1206 09:01:47.645440 30052 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1206 09:01:47.645450 30052 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1206 09:01:47.645462 30052 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1206 09:01:47.645473 30052 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1206 09:01:47.645483 30052 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1206 09:01:47.645493 30052 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1206 09:01:47.645505 30052 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1206 09:01:47.645516 30052 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1206 09:01:47.645540 30052 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.645555 30052 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1206 09:01:47.645565 30052 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1206 09:01:47.645577 30052 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1206 09:01:47.645588 30052 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1206 09:01:47.645599 30052 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1206 09:01:47.645609 30052 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1206 09:01:47.645620 30052 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1206 09:01:47.645632 30052 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1206 09:01:47.645642 30052 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1206 09:01:47.645653 30052 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1206 09:01:47.645665 30052 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.645676 30052 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1206 09:01:47.645689 30052 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1206 09:01:47.645700 30052 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1206 09:01:47.645711 30052 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1206 09:01:47.645723 30052 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1206 09:01:47.645735 30052 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1206 09:01:47.645745 30052 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1206 09:01:47.645772 30052 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1206 09:01:47.645784 30052 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1206 09:01:47.645795 30052 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1206 09:01:47.645807 30052 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.645818 30052 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1206 09:01:47.645831 30052 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1206 09:01:47.645843 30052 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1206 09:01:47.645853 30052 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1206 09:01:47.645865 30052 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1206 09:01:47.645876 30052 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1206 09:01:47.645887 30052 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1206 09:01:47.645898 30052 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1206 09:01:47.645910 30052 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1206 09:01:47.645921 30052 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1206 09:01:47.645932 30052 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.645952 30052 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1206 09:01:47.645965 30052 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1206 09:01:47.645977 30052 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1206 09:01:47.645987 30052 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1206 09:01:47.645998 30052 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1206 09:01:47.646008 30052 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1206 09:01:47.646019 30052 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1206 09:01:47.646031 30052 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1206 09:01:47.646041 30052 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1206 09:01:47.646052 30052 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1206 09:01:47.646064 30052 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.646076 30052 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1206 09:01:47.646087 30052 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1206 09:01:47.646100 30052 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1206 09:01:47.646111 30052 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1206 09:01:47.646122 30052 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1206 09:01:47.646134 30052 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1206 09:01:47.646144 30052 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1206 09:01:47.646157 30052 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1206 09:01:47.646167 30052 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1206 09:01:47.646179 30052 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1206 09:01:47.646191 30052 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.646203 30052 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1206 09:01:47.646215 30052 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1206 09:01:47.646226 30052 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1206 09:01:47.646239 30052 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1206 09:01:47.646248 30052 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1206 09:01:47.646260 30052 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1206 09:01:47.646271 30052 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1206 09:01:47.646281 30052 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1206 09:01:47.646293 30052 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1206 09:01:47.646304 30052 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1206 09:01:47.646325 30052 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1206 09:01:47.646337 30052 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1206 09:01:47.646348 30052 net.cpp:226] pre_conv needs backward computation.\nI1206 09:01:47.646361 30052 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1206 09:01:47.646375 30052 net.cpp:228] dataLayer does not need backward computation.\nI1206 09:01:47.646384 30052 net.cpp:270] This network produces output accuracy\nI1206 09:01:47.646396 30052 net.cpp:270] This network produces output loss\nI1206 09:01:47.646714 30052 net.cpp:283] Network initialization done.\nI1206 09:01:47.652952 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:47.652990 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:47.653064 30052 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1206 09:01:47.653338 30052 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1206 09:01:47.654830 30052 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-ResNeXt\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: false\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\nI1206 09:01:47.656203 30052 layer_factory.hpp:77] Creating layer dataLayer\nI1206 09:01:47.656422 30052 net.cpp:100] Creating Layer dataLayer\nI1206 09:01:47.656445 30052 net.cpp:408] dataLayer -> data_top\nI1206 09:01:47.656476 30052 net.cpp:408] dataLayer -> label\nI1206 09:01:47.656502 30052 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1206 09:01:57.590665 30063 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1206 09:01:57.597437 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:57.603484 30052 net.cpp:150] Setting up dataLayer\nI1206 09:01:57.603514 30052 net.cpp:157] Top shape: 85 3 32 32 (261120)\nI1206 09:01:57.603530 30052 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:57.603544 30052 net.cpp:165] Memory required for data: 1044820\nI1206 09:01:57.603557 30052 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1206 09:01:57.603579 30052 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1206 09:01:57.603590 30052 net.cpp:434] label_dataLayer_1_split <- label\nI1206 09:01:57.603617 30052 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1206 09:01:57.603641 30052 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1206 09:01:57.603754 30052 net.cpp:150] Setting up label_dataLayer_1_split\nI1206 09:01:57.603780 30052 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:57.603796 30052 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:57.603808 30052 net.cpp:165] Memory required for data: 1045500\nI1206 09:01:57.603818 30052 layer_factory.hpp:77] Creating layer pre_conv\nI1206 09:01:57.603875 30052 net.cpp:100] Creating Layer pre_conv\nI1206 09:01:57.603891 30052 net.cpp:434] pre_conv <- data_top\nI1206 09:01:57.603910 30052 net.cpp:408] pre_conv -> pre_conv_top\nI1206 09:01:57.604385 30052 net.cpp:150] Setting up pre_conv\nI1206 09:01:57.604408 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.604418 30052 net.cpp:165] Memory required for data: 6616060\nI1206 09:01:57.604467 30052 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1206 09:01:57.604487 30052 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1206 09:01:57.604499 30052 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1206 09:01:57.604519 30052 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1206 09:01:57.604542 30052 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1206 09:01:57.604660 30052 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1206 09:01:57.604681 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.604693 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.604703 30052 net.cpp:165] Memory required for data: 17757180\nI1206 09:01:57.604717 30052 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1206 09:01:57.604784 30052 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1206 09:01:57.604800 30052 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1206 09:01:57.604817 30052 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1206 09:01:57.605217 30052 net.cpp:150] Setting up L1_b1_brc1_bn\nI1206 09:01:57.605237 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.605252 30052 net.cpp:165] Memory required for data: 23327740\nI1206 09:01:57.605285 30052 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1206 09:01:57.605309 30052 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1206 09:01:57.605324 30052 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1206 09:01:57.605340 30052 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1206 09:01:57.605357 30052 net.cpp:150] Setting up L1_b1_brc1_relu\nI1206 09:01:57.605372 30052 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.605382 30052 net.cpp:165] Memory required for data: 28898300\nI1206 09:01:57.605396 30052 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1206 09:01:57.605422 30052 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1206 09:01:57.605438 30052 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1206 09:01:57.605461 30052 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1206 09:01:57.605881 30052 net.cpp:150] Setting up L1_b1_brc1_conv\nI1206 09:01:57.605904 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.605916 30052 net.cpp:165] Memory required for data: 40039420\nI1206 09:01:57.605934 30052 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1206 09:01:57.605962 30052 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1206 09:01:57.605973 30052 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1206 09:01:57.606000 30052 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1206 09:01:57.606616 30052 net.cpp:150] Setting up L1_b1_brc2_bn\nI1206 09:01:57.606637 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.606649 30052 net.cpp:165] Memory required for data: 51180540\nI1206 09:01:57.606678 30052 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1206 09:01:57.606705 30052 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1206 09:01:57.606719 30052 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1206 09:01:57.606734 30052 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1206 09:01:57.606751 30052 net.cpp:150] Setting up L1_b1_brc2_relu\nI1206 09:01:57.606770 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.606781 30052 net.cpp:165] Memory required for data: 62321660\nI1206 09:01:57.606791 30052 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1206 09:01:57.606818 30052 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1206 09:01:57.606837 30052 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1206 09:01:57.606863 30052 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1206 09:01:57.608351 30052 net.cpp:150] Setting up L1_b1_brc2_conv\nI1206 09:01:57.608373 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.608387 30052 net.cpp:165] Memory required for data: 73462780\nI1206 09:01:57.608407 30052 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1206 09:01:57.608428 30052 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1206 09:01:57.608440 30052 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1206 09:01:57.608461 30052 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1206 09:01:57.609393 30052 net.cpp:150] Setting up L1_b1_brc3_bn\nI1206 09:01:57.609414 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.609424 30052 net.cpp:165] Memory required for data: 84603900\nI1206 09:01:57.609447 30052 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1206 09:01:57.609462 30052 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1206 09:01:57.609473 30052 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1206 09:01:57.609488 30052 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1206 09:01:57.609508 30052 net.cpp:150] Setting up L1_b1_brc3_relu\nI1206 09:01:57.609522 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.609531 30052 net.cpp:165] Memory required for data: 95745020\nI1206 09:01:57.609542 30052 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1206 09:01:57.609568 30052 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1206 09:01:57.609581 30052 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1206 09:01:57.609599 30052 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1206 09:01:57.610100 30052 net.cpp:150] Setting up L1_b1_brc3_conv\nI1206 09:01:57.610123 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.610133 30052 net.cpp:165] Memory required for data: 118027260\nI1206 09:01:57.610158 30052 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1206 09:01:57.610185 30052 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1206 09:01:57.610199 30052 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1206 09:01:57.610221 30052 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1206 09:01:57.610582 30052 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1206 09:01:57.610602 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.610613 30052 net.cpp:165] Memory required for data: 140309500\nI1206 09:01:57.610632 30052 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1206 09:01:57.610656 30052 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1206 09:01:57.610667 30052 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1206 09:01:57.610682 30052 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1206 09:01:57.610702 30052 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1206 09:01:57.610757 30052 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1206 09:01:57.610776 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.610797 30052 net.cpp:165] Memory required for data: 162591740\nI1206 09:01:57.610810 30052 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:57.610829 30052 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:57.610841 30052 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1206 09:01:57.610862 30052 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:01:57.610882 30052 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:01:57.610978 30052 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:57.610998 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.611018 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.611029 30052 net.cpp:165] Memory required for data: 207156220\nI1206 09:01:57.611040 30052 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1206 09:01:57.611059 30052 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1206 09:01:57.611071 30052 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:01:57.611093 30052 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1206 09:01:57.611378 30052 net.cpp:150] Setting up L1_b2_brc1_bn\nI1206 09:01:57.611399 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.611409 30052 net.cpp:165] Memory required for data: 229438460\nI1206 09:01:57.611431 30052 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1206 09:01:57.611451 30052 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1206 09:01:57.611464 30052 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1206 09:01:57.611480 30052 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1206 09:01:57.611500 30052 net.cpp:150] Setting up L1_b2_brc1_relu\nI1206 09:01:57.611521 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.611531 30052 net.cpp:165] Memory required for data: 251720700\nI1206 09:01:57.611541 30052 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1206 09:01:57.611560 30052 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1206 09:01:57.611573 30052 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1206 09:01:57.611598 30052 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1206 09:01:57.612133 30052 net.cpp:150] Setting up L1_b2_brc1_conv\nI1206 09:01:57.612154 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.612164 30052 net.cpp:165] Memory required for data: 262861820\nI1206 09:01:57.612182 30052 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1206 09:01:57.612200 30052 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1206 09:01:57.612211 30052 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1206 09:01:57.612227 30052 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1206 09:01:57.612519 30052 net.cpp:150] Setting up L1_b2_brc2_bn\nI1206 09:01:57.612540 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.612550 30052 net.cpp:165] Memory required for data: 274002940\nI1206 09:01:57.612573 30052 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1206 09:01:57.612596 30052 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1206 09:01:57.612609 30052 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1206 09:01:57.612624 30052 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1206 09:01:57.612643 30052 net.cpp:150] Setting up L1_b2_brc2_relu\nI1206 09:01:57.612658 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.612669 30052 net.cpp:165] Memory required for data: 285144060\nI1206 09:01:57.612679 30052 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1206 09:01:57.612711 30052 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1206 09:01:57.612725 30052 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1206 09:01:57.612747 30052 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1206 09:01:57.613088 30052 net.cpp:150] Setting up L1_b2_brc2_conv\nI1206 09:01:57.613109 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.613132 30052 net.cpp:165] Memory required for data: 296285180\nI1206 09:01:57.613152 30052 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1206 09:01:57.613169 30052 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1206 09:01:57.613181 30052 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1206 09:01:57.613198 30052 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1206 09:01:57.613502 30052 net.cpp:150] Setting up L1_b2_brc3_bn\nI1206 09:01:57.613521 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.613531 30052 net.cpp:165] Memory required for data: 307426300\nI1206 09:01:57.613554 30052 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1206 09:01:57.613575 30052 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1206 09:01:57.613587 30052 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1206 09:01:57.613602 30052 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1206 09:01:57.613621 30052 net.cpp:150] Setting up L1_b2_brc3_relu\nI1206 09:01:57.613636 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.613646 30052 net.cpp:165] Memory required for data: 318567420\nI1206 09:01:57.613657 30052 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1206 09:01:57.613682 30052 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1206 09:01:57.613696 30052 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1206 09:01:57.613719 30052 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1206 09:01:57.614099 30052 net.cpp:150] Setting up L1_b2_brc3_conv\nI1206 09:01:57.614118 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.614128 30052 net.cpp:165] Memory required for data: 340849660\nI1206 09:01:57.614154 30052 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1206 09:01:57.614181 30052 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1206 09:01:57.614193 30052 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1206 09:01:57.614207 30052 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:01:57.614223 30052 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1206 09:01:57.614284 30052 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1206 09:01:57.614301 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.614311 30052 net.cpp:165] Memory required for data: 363131900\nI1206 09:01:57.614322 30052 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:57.614337 30052 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:57.614348 30052 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1206 09:01:57.614368 30052 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:01:57.614389 30052 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:01:57.614475 30052 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:57.614498 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.614512 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.614522 30052 net.cpp:165] Memory required for data: 407696380\nI1206 09:01:57.614534 30052 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1206 09:01:57.614548 30052 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1206 09:01:57.614560 30052 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:01:57.614581 30052 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1206 09:01:57.614879 30052 net.cpp:150] Setting up L1_b3_brc1_bn\nI1206 09:01:57.614898 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.614908 30052 net.cpp:165] Memory required for data: 429978620\nI1206 09:01:57.614930 30052 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1206 09:01:57.614954 30052 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1206 09:01:57.614966 30052 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1206 09:01:57.614990 30052 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1206 09:01:57.615008 30052 net.cpp:150] Setting up L1_b3_brc1_relu\nI1206 09:01:57.615025 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.615034 30052 net.cpp:165] Memory required for data: 452260860\nI1206 09:01:57.615044 30052 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1206 09:01:57.615069 30052 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1206 09:01:57.615082 30052 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1206 09:01:57.615104 30052 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1206 09:01:57.615473 30052 net.cpp:150] Setting up L1_b3_brc1_conv\nI1206 09:01:57.615494 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.615504 30052 net.cpp:165] Memory required for data: 463401980\nI1206 09:01:57.615522 30052 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1206 09:01:57.615542 30052 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1206 09:01:57.615556 30052 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1206 09:01:57.615576 30052 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1206 09:01:57.615871 30052 net.cpp:150] Setting up L1_b3_brc2_bn\nI1206 09:01:57.615891 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.615901 30052 net.cpp:165] Memory required for data: 474543100\nI1206 09:01:57.615923 30052 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1206 09:01:57.615947 30052 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1206 09:01:57.615959 30052 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1206 09:01:57.615974 30052 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1206 09:01:57.615994 30052 net.cpp:150] Setting up L1_b3_brc2_relu\nI1206 09:01:57.616009 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.616020 30052 net.cpp:165] Memory required for data: 485684220\nI1206 09:01:57.616030 30052 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1206 09:01:57.616062 30052 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1206 09:01:57.616076 30052 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1206 09:01:57.616094 30052 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1206 09:01:57.616439 30052 net.cpp:150] Setting up L1_b3_brc2_conv\nI1206 09:01:57.616459 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.616469 30052 net.cpp:165] Memory required for data: 496825340\nI1206 09:01:57.616487 30052 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1206 09:01:57.616508 30052 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1206 09:01:57.616519 30052 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1206 09:01:57.616535 30052 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1206 09:01:57.616825 30052 net.cpp:150] Setting up L1_b3_brc3_bn\nI1206 09:01:57.616845 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.616855 30052 net.cpp:165] Memory required for data: 507966460\nI1206 09:01:57.616878 30052 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1206 09:01:57.616894 30052 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1206 09:01:57.616904 30052 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1206 09:01:57.616919 30052 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1206 09:01:57.616946 30052 net.cpp:150] Setting up L1_b3_brc3_relu\nI1206 09:01:57.616963 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.616973 30052 net.cpp:165] Memory required for data: 519107580\nI1206 09:01:57.616983 30052 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1206 09:01:57.617008 30052 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1206 09:01:57.617022 30052 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1206 09:01:57.617043 30052 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1206 09:01:57.617415 30052 net.cpp:150] Setting up L1_b3_brc3_conv\nI1206 09:01:57.617435 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.617445 30052 net.cpp:165] Memory required for data: 541389820\nI1206 09:01:57.617462 30052 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1206 09:01:57.617492 30052 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1206 09:01:57.617506 30052 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1206 09:01:57.617521 30052 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:01:57.617540 30052 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1206 09:01:57.617599 30052 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1206 09:01:57.617624 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.617635 30052 net.cpp:165] Memory required for data: 563672060\nI1206 09:01:57.617645 30052 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:57.617660 30052 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:57.617671 30052 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1206 09:01:57.617691 30052 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:01:57.617712 30052 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:01:57.617794 30052 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:57.617817 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.617832 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.617841 30052 net.cpp:165] Memory required for data: 608236540\nI1206 09:01:57.617852 30052 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1206 09:01:57.617868 30052 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1206 09:01:57.617879 30052 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:01:57.617900 30052 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1206 09:01:57.618186 30052 net.cpp:150] Setting up L1_b4_brc1_bn\nI1206 09:01:57.618206 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.618216 30052 net.cpp:165] Memory required for data: 630518780\nI1206 09:01:57.618238 30052 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1206 09:01:57.618259 30052 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1206 09:01:57.618271 30052 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1206 09:01:57.618288 30052 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1206 09:01:57.618306 30052 net.cpp:150] Setting up L1_b4_brc1_relu\nI1206 09:01:57.618321 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.618331 30052 net.cpp:165] Memory required for data: 652801020\nI1206 09:01:57.618341 30052 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1206 09:01:57.618360 30052 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1206 09:01:57.618373 30052 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1206 09:01:57.618396 30052 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1206 09:01:57.618779 30052 net.cpp:150] Setting up L1_b4_brc1_conv\nI1206 09:01:57.618799 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.618809 30052 net.cpp:165] Memory required for data: 663942140\nI1206 09:01:57.618826 30052 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1206 09:01:57.618844 30052 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1206 09:01:57.618856 30052 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1206 09:01:57.618871 30052 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1206 09:01:57.619170 30052 net.cpp:150] Setting up L1_b4_brc2_bn\nI1206 09:01:57.619190 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.619200 30052 net.cpp:165] Memory required for data: 675083260\nI1206 09:01:57.619222 30052 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1206 09:01:57.619243 30052 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1206 09:01:57.619256 30052 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1206 09:01:57.619271 30052 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1206 09:01:57.619290 30052 net.cpp:150] Setting up L1_b4_brc2_relu\nI1206 09:01:57.619305 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.619324 30052 net.cpp:165] Memory required for data: 686224380\nI1206 09:01:57.619335 30052 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1206 09:01:57.619361 30052 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1206 09:01:57.619374 30052 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1206 09:01:57.619395 30052 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1206 09:01:57.619735 30052 net.cpp:150] Setting up L1_b4_brc2_conv\nI1206 09:01:57.619755 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.619765 30052 net.cpp:165] Memory required for data: 697365500\nI1206 09:01:57.619782 30052 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1206 09:01:57.619799 30052 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1206 09:01:57.619810 30052 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1206 09:01:57.619825 30052 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1206 09:01:57.620138 30052 net.cpp:150] Setting up L1_b4_brc3_bn\nI1206 09:01:57.620158 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.620168 30052 net.cpp:165] Memory required for data: 708506620\nI1206 09:01:57.620190 30052 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1206 09:01:57.620213 30052 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1206 09:01:57.620225 30052 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1206 09:01:57.620240 30052 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1206 09:01:57.620260 30052 net.cpp:150] Setting up L1_b4_brc3_relu\nI1206 09:01:57.620275 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.620283 30052 net.cpp:165] Memory required for data: 719647740\nI1206 09:01:57.620293 30052 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1206 09:01:57.620319 30052 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1206 09:01:57.620332 30052 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1206 09:01:57.620354 30052 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1206 09:01:57.620744 30052 net.cpp:150] Setting up L1_b4_brc3_conv\nI1206 09:01:57.620764 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.620774 30052 net.cpp:165] Memory required for data: 741929980\nI1206 09:01:57.620791 30052 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1206 09:01:57.620808 30052 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1206 09:01:57.620820 30052 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1206 09:01:57.620834 30052 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:01:57.620851 30052 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1206 09:01:57.620914 30052 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1206 09:01:57.620932 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.620957 30052 net.cpp:165] Memory required for data: 764212220\nI1206 09:01:57.620970 30052 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:57.620985 30052 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:57.620996 30052 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1206 09:01:57.621017 30052 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:01:57.621037 30052 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:01:57.621119 30052 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:57.621140 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.621153 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.621162 30052 net.cpp:165] Memory required for data: 808776700\nI1206 09:01:57.621173 30052 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1206 09:01:57.621188 30052 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1206 09:01:57.621199 30052 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:01:57.621219 30052 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1206 09:01:57.621521 30052 net.cpp:150] Setting up L1_b5_brc1_bn\nI1206 09:01:57.621541 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.621551 30052 net.cpp:165] Memory required for data: 831058940\nI1206 09:01:57.621589 30052 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1206 09:01:57.621609 30052 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1206 09:01:57.621623 30052 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1206 09:01:57.621637 30052 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1206 09:01:57.621662 30052 net.cpp:150] Setting up L1_b5_brc1_relu\nI1206 09:01:57.621678 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.621687 30052 net.cpp:165] Memory required for data: 853341180\nI1206 09:01:57.621698 30052 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1206 09:01:57.621719 30052 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1206 09:01:57.621732 30052 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1206 09:01:57.621754 30052 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1206 09:01:57.622145 30052 net.cpp:150] Setting up L1_b5_brc1_conv\nI1206 09:01:57.622166 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.622176 30052 net.cpp:165] Memory required for data: 864482300\nI1206 09:01:57.622195 30052 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1206 09:01:57.622212 30052 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1206 09:01:57.622223 30052 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1206 09:01:57.622239 30052 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1206 09:01:57.622537 30052 net.cpp:150] Setting up L1_b5_brc2_bn\nI1206 09:01:57.622556 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.622566 30052 net.cpp:165] Memory required for data: 875623420\nI1206 09:01:57.622589 30052 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1206 09:01:57.622606 30052 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1206 09:01:57.622617 30052 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1206 09:01:57.622635 30052 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1206 09:01:57.622656 30052 net.cpp:150] Setting up L1_b5_brc2_relu\nI1206 09:01:57.622670 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.622681 30052 net.cpp:165] Memory required for data: 886764540\nI1206 09:01:57.622691 30052 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1206 09:01:57.622717 30052 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1206 09:01:57.622730 30052 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1206 09:01:57.622748 30052 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1206 09:01:57.623106 30052 net.cpp:150] Setting up L1_b5_brc2_conv\nI1206 09:01:57.623131 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.623142 30052 net.cpp:165] Memory required for data: 897905660\nI1206 09:01:57.623160 30052 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1206 09:01:57.623178 30052 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1206 09:01:57.623189 30052 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1206 09:01:57.623205 30052 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1206 09:01:57.623495 30052 net.cpp:150] Setting up L1_b5_brc3_bn\nI1206 09:01:57.623514 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.623523 30052 net.cpp:165] Memory required for data: 909046780\nI1206 09:01:57.623546 30052 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1206 09:01:57.623569 30052 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1206 09:01:57.623582 30052 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1206 09:01:57.623598 30052 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1206 09:01:57.623617 30052 net.cpp:150] Setting up L1_b5_brc3_relu\nI1206 09:01:57.623632 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.623643 30052 net.cpp:165] Memory required for data: 920187900\nI1206 09:01:57.623653 30052 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1206 09:01:57.623678 30052 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1206 09:01:57.623702 30052 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1206 09:01:57.623724 30052 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1206 09:01:57.624110 30052 net.cpp:150] Setting up L1_b5_brc3_conv\nI1206 09:01:57.624130 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624140 30052 net.cpp:165] Memory required for data: 942470140\nI1206 09:01:57.624158 30052 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1206 09:01:57.624176 30052 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1206 09:01:57.624187 30052 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1206 09:01:57.624202 30052 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:01:57.624217 30052 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1206 09:01:57.624276 30052 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1206 09:01:57.624294 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624305 30052 net.cpp:165] Memory required for data: 964752380\nI1206 09:01:57.624316 30052 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:57.624330 30052 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:57.624342 30052 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1206 09:01:57.624361 30052 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:01:57.624380 30052 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:01:57.624459 30052 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:57.624481 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624495 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624505 30052 net.cpp:165] Memory required for data: 1009316860\nI1206 09:01:57.624517 30052 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1206 09:01:57.624532 30052 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1206 09:01:57.624544 30052 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:01:57.624572 30052 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1206 09:01:57.624867 30052 net.cpp:150] Setting up L1_b6_brc1_bn\nI1206 09:01:57.624887 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624897 30052 net.cpp:165] Memory required for data: 1031599100\nI1206 09:01:57.624918 30052 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1206 09:01:57.624946 30052 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1206 09:01:57.624960 30052 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1206 09:01:57.624976 30052 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1206 09:01:57.624995 30052 net.cpp:150] Setting up L1_b6_brc1_relu\nI1206 09:01:57.625010 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.625020 30052 net.cpp:165] Memory required for data: 1053881340\nI1206 09:01:57.625030 30052 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1206 09:01:57.625051 30052 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1206 09:01:57.625064 30052 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1206 09:01:57.625087 30052 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1206 09:01:57.625468 30052 net.cpp:150] Setting up L1_b6_brc1_conv\nI1206 09:01:57.625488 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.625497 30052 net.cpp:165] Memory required for data: 1065022460\nI1206 09:01:57.625515 30052 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1206 09:01:57.625532 30052 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1206 09:01:57.625545 30052 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1206 09:01:57.625561 30052 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1206 09:01:57.625859 30052 net.cpp:150] Setting up L1_b6_brc2_bn\nI1206 09:01:57.625879 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.625888 30052 net.cpp:165] Memory required for data: 1076163580\nI1206 09:01:57.625921 30052 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1206 09:01:57.625959 30052 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1206 09:01:57.625973 30052 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1206 09:01:57.625989 30052 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1206 09:01:57.626009 30052 net.cpp:150] Setting up L1_b6_brc2_relu\nI1206 09:01:57.626024 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.626034 30052 net.cpp:165] Memory required for data: 1087304700\nI1206 09:01:57.626046 30052 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1206 09:01:57.626072 30052 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1206 09:01:57.626086 30052 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1206 09:01:57.626109 30052 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1206 09:01:57.626466 30052 net.cpp:150] Setting up L1_b6_brc2_conv\nI1206 09:01:57.626485 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.626495 30052 net.cpp:165] Memory required for data: 1098445820\nI1206 09:01:57.626514 30052 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1206 09:01:57.626543 30052 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1206 09:01:57.626556 30052 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1206 09:01:57.626572 30052 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1206 09:01:57.626874 30052 net.cpp:150] Setting up L1_b6_brc3_bn\nI1206 09:01:57.626894 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.626904 30052 net.cpp:165] Memory required for data: 1109586940\nI1206 09:01:57.626925 30052 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1206 09:01:57.626948 30052 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1206 09:01:57.626962 30052 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1206 09:01:57.626977 30052 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1206 09:01:57.626996 30052 net.cpp:150] Setting up L1_b6_brc3_relu\nI1206 09:01:57.627012 30052 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.627022 30052 net.cpp:165] Memory required for data: 1120728060\nI1206 09:01:57.627032 30052 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1206 09:01:57.627058 30052 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1206 09:01:57.627071 30052 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1206 09:01:57.627094 30052 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1206 09:01:57.627472 30052 net.cpp:150] Setting up L1_b6_brc3_conv\nI1206 09:01:57.627491 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.627501 30052 net.cpp:165] Memory required for data: 1143010300\nI1206 09:01:57.627518 30052 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1206 09:01:57.627537 30052 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1206 09:01:57.627547 30052 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1206 09:01:57.627562 30052 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:01:57.627585 30052 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1206 09:01:57.627646 30052 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1206 09:01:57.627665 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.627674 30052 net.cpp:165] Memory required for data: 1165292540\nI1206 09:01:57.627686 30052 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:57.627701 30052 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:57.627712 30052 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1206 09:01:57.627732 30052 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:01:57.627753 30052 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:01:57.627846 30052 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:57.627874 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.627889 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.627899 30052 net.cpp:165] Memory required for data: 1209857020\nI1206 09:01:57.627910 30052 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1206 09:01:57.627930 30052 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1206 09:01:57.627951 30052 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:01:57.627974 30052 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1206 09:01:57.628319 30052 net.cpp:150] Setting up L2_b1_brc1_bn\nI1206 09:01:57.628342 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.628352 30052 net.cpp:165] Memory required for data: 1232139260\nI1206 09:01:57.628374 30052 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1206 09:01:57.628389 30052 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1206 09:01:57.628401 30052 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1206 09:01:57.628420 30052 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1206 09:01:57.628440 30052 net.cpp:150] Setting up L2_b1_brc1_relu\nI1206 09:01:57.628458 30052 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.628468 30052 net.cpp:165] Memory required for data: 1254421500\nI1206 09:01:57.628479 30052 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1206 09:01:57.628511 30052 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1206 09:01:57.628525 30052 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1206 09:01:57.628547 30052 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1206 09:01:57.629021 30052 net.cpp:150] Setting up L2_b1_brc1_conv\nI1206 09:01:57.629045 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.629055 30052 net.cpp:165] Memory required for data: 1259992060\nI1206 09:01:57.629072 30052 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1206 09:01:57.629098 30052 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1206 09:01:57.629110 30052 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1206 09:01:57.629132 30052 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1206 09:01:57.629474 30052 net.cpp:150] Setting up L2_b1_brc2_bn\nI1206 09:01:57.629500 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.629510 30052 net.cpp:165] Memory required for data: 1265562620\nI1206 09:01:57.629534 30052 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1206 09:01:57.629545 30052 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1206 09:01:57.629554 30052 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1206 09:01:57.629566 30052 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1206 09:01:57.629578 30052 net.cpp:150] Setting up L2_b1_brc2_relu\nI1206 09:01:57.629585 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.629592 30052 net.cpp:165] Memory required for data: 1271133180\nI1206 09:01:57.629601 30052 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1206 09:01:57.629626 30052 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1206 09:01:57.629637 30052 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1206 09:01:57.629657 30052 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1206 09:01:57.630134 30052 net.cpp:150] Setting up L2_b1_brc2_conv\nI1206 09:01:57.630156 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.630165 30052 net.cpp:165] Memory required for data: 1276703740\nI1206 09:01:57.630183 30052 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1206 09:01:57.630203 30052 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1206 09:01:57.630216 30052 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1206 09:01:57.630244 30052 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1206 09:01:57.630578 30052 net.cpp:150] Setting up L2_b1_brc3_bn\nI1206 09:01:57.630600 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.630610 30052 net.cpp:165] Memory required for data: 1282274300\nI1206 09:01:57.630631 30052 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1206 09:01:57.630647 30052 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1206 09:01:57.630671 30052 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1206 09:01:57.630697 30052 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1206 09:01:57.630719 30052 net.cpp:150] Setting up L2_b1_brc3_relu\nI1206 09:01:57.630738 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.630748 30052 net.cpp:165] Memory required for data: 1287844860\nI1206 09:01:57.630758 30052 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1206 09:01:57.630782 30052 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1206 09:01:57.630795 30052 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1206 09:01:57.630820 30052 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1206 09:01:57.631330 30052 net.cpp:150] Setting up L2_b1_brc3_conv\nI1206 09:01:57.631350 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.631361 30052 net.cpp:165] Memory required for data: 1298985980\nI1206 09:01:57.631377 30052 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1206 09:01:57.631402 30052 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1206 09:01:57.631417 30052 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:01:57.631438 30052 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1206 09:01:57.631966 30052 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1206 09:01:57.631988 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.631997 30052 net.cpp:165] Memory required for data: 1310127100\nI1206 09:01:57.632014 30052 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1206 09:01:57.632051 30052 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1206 09:01:57.632066 30052 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1206 09:01:57.632079 30052 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1206 09:01:57.632097 30052 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1206 09:01:57.632143 30052 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1206 09:01:57.632159 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.632175 30052 net.cpp:165] Memory required for data: 1321268220\nI1206 09:01:57.632186 30052 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:57.632200 30052 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:57.632210 30052 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1206 09:01:57.632225 30052 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:01:57.632244 30052 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:01:57.632325 30052 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:57.632344 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.632356 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.632365 30052 net.cpp:165] Memory required for data: 1343550460\nI1206 09:01:57.632376 30052 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1206 09:01:57.632396 30052 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1206 09:01:57.632408 30052 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:01:57.632423 30052 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1206 09:01:57.632709 30052 net.cpp:150] Setting up L2_b2_brc1_bn\nI1206 09:01:57.632728 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.632737 30052 net.cpp:165] Memory required for data: 1354691580\nI1206 09:01:57.632757 30052 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1206 09:01:57.632781 30052 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1206 09:01:57.632794 30052 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1206 09:01:57.632807 30052 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1206 09:01:57.632827 30052 net.cpp:150] Setting up L2_b2_brc1_relu\nI1206 09:01:57.632840 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.632858 30052 net.cpp:165] Memory required for data: 1365832700\nI1206 09:01:57.632869 30052 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1206 09:01:57.632889 30052 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1206 09:01:57.632901 30052 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1206 09:01:57.632923 30052 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1206 09:01:57.633427 30052 net.cpp:150] Setting up L2_b2_brc1_conv\nI1206 09:01:57.633447 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.633456 30052 net.cpp:165] Memory required for data: 1371403260\nI1206 09:01:57.633474 30052 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1206 09:01:57.633491 30052 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1206 09:01:57.633502 30052 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1206 09:01:57.633522 30052 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1206 09:01:57.633827 30052 net.cpp:150] Setting up L2_b2_brc2_bn\nI1206 09:01:57.633846 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.633857 30052 net.cpp:165] Memory required for data: 1376973820\nI1206 09:01:57.633877 30052 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1206 09:01:57.633898 30052 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1206 09:01:57.633911 30052 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1206 09:01:57.633926 30052 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1206 09:01:57.633944 30052 net.cpp:150] Setting up L2_b2_brc2_relu\nI1206 09:01:57.633958 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.633968 30052 net.cpp:165] Memory required for data: 1382544380\nI1206 09:01:57.633978 30052 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1206 09:01:57.633997 30052 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1206 09:01:57.634009 30052 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1206 09:01:57.634040 30052 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1206 09:01:57.634418 30052 net.cpp:150] Setting up L2_b2_brc2_conv\nI1206 09:01:57.634436 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.634445 30052 net.cpp:165] Memory required for data: 1388114940\nI1206 09:01:57.634462 30052 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1206 09:01:57.634479 30052 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1206 09:01:57.634490 30052 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1206 09:01:57.634505 30052 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1206 09:01:57.634805 30052 net.cpp:150] Setting up L2_b2_brc3_bn\nI1206 09:01:57.634825 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.634835 30052 net.cpp:165] Memory required for data: 1393685500\nI1206 09:01:57.634855 30052 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1206 09:01:57.634871 30052 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1206 09:01:57.634882 30052 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1206 09:01:57.634902 30052 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1206 09:01:57.634923 30052 net.cpp:150] Setting up L2_b2_brc3_relu\nI1206 09:01:57.634938 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.634946 30052 net.cpp:165] Memory required for data: 1399256060\nI1206 09:01:57.634956 30052 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1206 09:01:57.634981 30052 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1206 09:01:57.634994 30052 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1206 09:01:57.635011 30052 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1206 09:01:57.636188 30052 net.cpp:150] Setting up L2_b2_brc3_conv\nI1206 09:01:57.636212 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.636224 30052 net.cpp:165] Memory required for data: 1410397180\nI1206 09:01:57.636240 30052 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1206 09:01:57.636258 30052 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1206 09:01:57.636271 30052 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1206 09:01:57.636284 30052 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:01:57.636318 30052 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1206 09:01:57.636368 30052 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1206 09:01:57.636386 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.636396 30052 net.cpp:165] Memory required for data: 1421538300\nI1206 09:01:57.636407 30052 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:57.636427 30052 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:57.636440 30052 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1206 09:01:57.636454 30052 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:01:57.636474 30052 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:01:57.636564 30052 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:57.636584 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.636598 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.636607 30052 net.cpp:165] Memory required for data: 1443820540\nI1206 09:01:57.636618 30052 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1206 09:01:57.636637 30052 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1206 09:01:57.636651 30052 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:01:57.636667 30052 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1206 09:01:57.636955 30052 net.cpp:150] Setting up L2_b3_brc1_bn\nI1206 09:01:57.636975 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.636984 30052 net.cpp:165] Memory required for data: 1454961660\nI1206 09:01:57.637037 30052 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1206 09:01:57.637060 30052 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1206 09:01:57.637073 30052 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1206 09:01:57.637089 30052 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1206 09:01:57.637109 30052 net.cpp:150] Setting up L2_b3_brc1_relu\nI1206 09:01:57.637123 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.637133 30052 net.cpp:165] Memory required for data: 1466102780\nI1206 09:01:57.637145 30052 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1206 09:01:57.637169 30052 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1206 09:01:57.637182 30052 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1206 09:01:57.637200 30052 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1206 09:01:57.637714 30052 net.cpp:150] Setting up L2_b3_brc1_conv\nI1206 09:01:57.637734 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.637743 30052 net.cpp:165] Memory required for data: 1471673340\nI1206 09:01:57.637763 30052 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1206 09:01:57.637784 30052 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1206 09:01:57.637797 30052 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1206 09:01:57.637814 30052 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1206 09:01:57.638098 30052 net.cpp:150] Setting up L2_b3_brc2_bn\nI1206 09:01:57.638120 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.638130 30052 net.cpp:165] Memory required for data: 1477243900\nI1206 09:01:57.638154 30052 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1206 09:01:57.638170 30052 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1206 09:01:57.638181 30052 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1206 09:01:57.638196 30052 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1206 09:01:57.638216 30052 net.cpp:150] Setting up L2_b3_brc2_relu\nI1206 09:01:57.638229 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.638239 30052 net.cpp:165] Memory required for data: 1482814460\nI1206 09:01:57.638249 30052 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1206 09:01:57.638269 30052 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1206 09:01:57.638291 30052 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1206 09:01:57.638314 30052 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1206 09:01:57.638682 30052 net.cpp:150] Setting up L2_b3_brc2_conv\nI1206 09:01:57.638708 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.638717 30052 net.cpp:165] Memory required for data: 1488385020\nI1206 09:01:57.638736 30052 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1206 09:01:57.638757 30052 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1206 09:01:57.638769 30052 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1206 09:01:57.638787 30052 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1206 09:01:57.639078 30052 net.cpp:150] Setting up L2_b3_brc3_bn\nI1206 09:01:57.639097 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.639106 30052 net.cpp:165] Memory required for data: 1493955580\nI1206 09:01:57.639128 30052 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1206 09:01:57.639150 30052 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1206 09:01:57.639163 30052 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1206 09:01:57.639178 30052 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1206 09:01:57.639197 30052 net.cpp:150] Setting up L2_b3_brc3_relu\nI1206 09:01:57.639212 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.639221 30052 net.cpp:165] Memory required for data: 1499526140\nI1206 09:01:57.639231 30052 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1206 09:01:57.639252 30052 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1206 09:01:57.639266 30052 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1206 09:01:57.639287 30052 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1206 09:01:57.639788 30052 net.cpp:150] Setting up L2_b3_brc3_conv\nI1206 09:01:57.639808 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.639818 30052 net.cpp:165] Memory required for data: 1510667260\nI1206 09:01:57.639837 30052 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1206 09:01:57.639853 30052 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1206 09:01:57.639864 30052 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1206 09:01:57.639878 30052 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:01:57.639894 30052 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1206 09:01:57.639943 30052 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1206 09:01:57.639966 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.639976 30052 net.cpp:165] Memory required for data: 1521808380\nI1206 09:01:57.639986 30052 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:57.640002 30052 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:57.640012 30052 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1206 09:01:57.640033 30052 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:01:57.640053 30052 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:01:57.640136 30052 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:57.640159 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640173 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640182 30052 net.cpp:165] Memory required for data: 1544090620\nI1206 09:01:57.640193 30052 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1206 09:01:57.640208 30052 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1206 09:01:57.640219 30052 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:01:57.640239 30052 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1206 09:01:57.640525 30052 net.cpp:150] Setting up L2_b4_brc1_bn\nI1206 09:01:57.640544 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640564 30052 net.cpp:165] Memory required for data: 1555231740\nI1206 09:01:57.640586 30052 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1206 09:01:57.640602 30052 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1206 09:01:57.640614 30052 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1206 09:01:57.640636 30052 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1206 09:01:57.640656 30052 net.cpp:150] Setting up L2_b4_brc1_relu\nI1206 09:01:57.640671 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640681 30052 net.cpp:165] Memory required for data: 1566372860\nI1206 09:01:57.640700 30052 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1206 09:01:57.640722 30052 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1206 09:01:57.640734 30052 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1206 09:01:57.640753 30052 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1206 09:01:57.641260 30052 net.cpp:150] Setting up L2_b4_brc1_conv\nI1206 09:01:57.641279 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.641289 30052 net.cpp:165] Memory required for data: 1571943420\nI1206 09:01:57.641307 30052 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1206 09:01:57.641329 30052 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1206 09:01:57.641342 30052 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1206 09:01:57.641357 30052 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1206 09:01:57.641656 30052 net.cpp:150] Setting up L2_b4_brc2_bn\nI1206 09:01:57.641675 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.641691 30052 net.cpp:165] Memory required for data: 1577513980\nI1206 09:01:57.641716 30052 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1206 09:01:57.641732 30052 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1206 09:01:57.641746 30052 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1206 09:01:57.641765 30052 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1206 09:01:57.641784 30052 net.cpp:150] Setting up L2_b4_brc2_relu\nI1206 09:01:57.641799 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.641808 30052 net.cpp:165] Memory required for data: 1583084540\nI1206 09:01:57.641819 30052 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1206 09:01:57.641844 30052 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1206 09:01:57.641856 30052 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1206 09:01:57.641873 30052 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1206 09:01:57.642236 30052 net.cpp:150] Setting up L2_b4_brc2_conv\nI1206 09:01:57.642256 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.642266 30052 net.cpp:165] Memory required for data: 1588655100\nI1206 09:01:57.642282 30052 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1206 09:01:57.642304 30052 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1206 09:01:57.642316 30052 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1206 09:01:57.642333 30052 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1206 09:01:57.642619 30052 net.cpp:150] Setting up L2_b4_brc3_bn\nI1206 09:01:57.642637 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.642647 30052 net.cpp:165] Memory required for data: 1594225660\nI1206 09:01:57.642669 30052 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1206 09:01:57.642691 30052 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1206 09:01:57.642705 30052 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1206 09:01:57.642721 30052 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1206 09:01:57.642740 30052 net.cpp:150] Setting up L2_b4_brc3_relu\nI1206 09:01:57.642756 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.642766 30052 net.cpp:165] Memory required for data: 1599796220\nI1206 09:01:57.642776 30052 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1206 09:01:57.642801 30052 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1206 09:01:57.642814 30052 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1206 09:01:57.642837 30052 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1206 09:01:57.643339 30052 net.cpp:150] Setting up L2_b4_brc3_conv\nI1206 09:01:57.643358 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643368 30052 net.cpp:165] Memory required for data: 1610937340\nI1206 09:01:57.643386 30052 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1206 09:01:57.643407 30052 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1206 09:01:57.643420 30052 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1206 09:01:57.643435 30052 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:01:57.643455 30052 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1206 09:01:57.643501 30052 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1206 09:01:57.643519 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643529 30052 net.cpp:165] Memory required for data: 1622078460\nI1206 09:01:57.643540 30052 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:57.643554 30052 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:57.643566 30052 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1206 09:01:57.643590 30052 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:01:57.643611 30052 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:01:57.643710 30052 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:57.643735 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643749 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643759 30052 net.cpp:165] Memory required for data: 1644360700\nI1206 09:01:57.643770 30052 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1206 09:01:57.643785 30052 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1206 09:01:57.643797 30052 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:01:57.643818 30052 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1206 09:01:57.644105 30052 net.cpp:150] Setting up L2_b5_brc1_bn\nI1206 09:01:57.644124 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.644134 30052 net.cpp:165] Memory required for data: 1655501820\nI1206 09:01:57.644156 30052 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1206 09:01:57.644171 30052 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1206 09:01:57.644183 30052 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1206 09:01:57.644197 30052 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1206 09:01:57.644217 30052 net.cpp:150] Setting up L2_b5_brc1_relu\nI1206 09:01:57.644232 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.644243 30052 net.cpp:165] Memory required for data: 1666642940\nI1206 09:01:57.644253 30052 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1206 09:01:57.644278 30052 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1206 09:01:57.644291 30052 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1206 09:01:57.644314 30052 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1206 09:01:57.644827 30052 net.cpp:150] Setting up L2_b5_brc1_conv\nI1206 09:01:57.644847 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.644857 30052 net.cpp:165] Memory required for data: 1672213500\nI1206 09:01:57.644876 30052 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1206 09:01:57.644897 30052 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1206 09:01:57.644908 30052 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1206 09:01:57.644925 30052 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1206 09:01:57.645215 30052 net.cpp:150] Setting up L2_b5_brc2_bn\nI1206 09:01:57.645237 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.645248 30052 net.cpp:165] Memory required for data: 1677784060\nI1206 09:01:57.645270 30052 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1206 09:01:57.645287 30052 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1206 09:01:57.645308 30052 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1206 09:01:57.645325 30052 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1206 09:01:57.645344 30052 net.cpp:150] Setting up L2_b5_brc2_relu\nI1206 09:01:57.645359 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.645370 30052 net.cpp:165] Memory required for data: 1683354620\nI1206 09:01:57.645380 30052 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1206 09:01:57.645411 30052 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1206 09:01:57.645423 30052 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1206 09:01:57.645442 30052 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1206 09:01:57.645810 30052 net.cpp:150] Setting up L2_b5_brc2_conv\nI1206 09:01:57.645830 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.645840 30052 net.cpp:165] Memory required for data: 1688925180\nI1206 09:01:57.645859 30052 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1206 09:01:57.645884 30052 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1206 09:01:57.645898 30052 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1206 09:01:57.645915 30052 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1206 09:01:57.646220 30052 net.cpp:150] Setting up L2_b5_brc3_bn\nI1206 09:01:57.646242 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.646252 30052 net.cpp:165] Memory required for data: 1694495740\nI1206 09:01:57.646275 30052 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1206 09:01:57.646291 30052 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1206 09:01:57.646303 30052 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1206 09:01:57.646317 30052 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1206 09:01:57.646337 30052 net.cpp:150] Setting up L2_b5_brc3_relu\nI1206 09:01:57.646351 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.646363 30052 net.cpp:165] Memory required for data: 1700066300\nI1206 09:01:57.646373 30052 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1206 09:01:57.646392 30052 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1206 09:01:57.646405 30052 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1206 09:01:57.646428 30052 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1206 09:01:57.646939 30052 net.cpp:150] Setting up L2_b5_brc3_conv\nI1206 09:01:57.646957 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.646967 30052 net.cpp:165] Memory required for data: 1711207420\nI1206 09:01:57.646986 30052 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1206 09:01:57.647003 30052 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1206 09:01:57.647016 30052 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1206 09:01:57.647029 30052 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:01:57.647047 30052 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1206 09:01:57.647096 30052 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1206 09:01:57.647114 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.647125 30052 net.cpp:165] Memory required for data: 1722348540\nI1206 09:01:57.647136 30052 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:57.647150 30052 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:57.647161 30052 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1206 09:01:57.647181 30052 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:01:57.647200 30052 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:01:57.647286 30052 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:57.647305 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.647318 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.647328 30052 net.cpp:165] Memory required for data: 1744630780\nI1206 09:01:57.647347 30052 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1206 09:01:57.647362 30052 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1206 09:01:57.647374 30052 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:01:57.647395 30052 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1206 09:01:57.647696 30052 net.cpp:150] Setting up L2_b6_brc1_bn\nI1206 09:01:57.647716 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.647725 30052 net.cpp:165] Memory required for data: 1755771900\nI1206 09:01:57.647747 30052 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1206 09:01:57.647781 30052 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1206 09:01:57.647796 30052 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1206 09:01:57.647811 30052 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1206 09:01:57.647831 30052 net.cpp:150] Setting up L2_b6_brc1_relu\nI1206 09:01:57.647846 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.647855 30052 net.cpp:165] Memory required for data: 1766913020\nI1206 09:01:57.647866 30052 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1206 09:01:57.647891 30052 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1206 09:01:57.647904 30052 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1206 09:01:57.647927 30052 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1206 09:01:57.648453 30052 net.cpp:150] Setting up L2_b6_brc1_conv\nI1206 09:01:57.648473 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.648483 30052 net.cpp:165] Memory required for data: 1772483580\nI1206 09:01:57.648501 30052 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1206 09:01:57.648517 30052 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1206 09:01:57.648530 30052 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1206 09:01:57.648550 30052 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1206 09:01:57.648846 30052 net.cpp:150] Setting up L2_b6_brc2_bn\nI1206 09:01:57.648866 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.648875 30052 net.cpp:165] Memory required for data: 1778054140\nI1206 09:01:57.648897 30052 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1206 09:01:57.648913 30052 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1206 09:01:57.648926 30052 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1206 09:01:57.648941 30052 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1206 09:01:57.648960 30052 net.cpp:150] Setting up L2_b6_brc2_relu\nI1206 09:01:57.648974 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.648984 30052 net.cpp:165] Memory required for data: 1783624700\nI1206 09:01:57.648994 30052 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1206 09:01:57.649019 30052 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1206 09:01:57.649032 30052 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1206 09:01:57.649055 30052 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1206 09:01:57.649428 30052 net.cpp:150] Setting up L2_b6_brc2_conv\nI1206 09:01:57.649447 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.649456 30052 net.cpp:165] Memory required for data: 1789195260\nI1206 09:01:57.649474 30052 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1206 09:01:57.649495 30052 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1206 09:01:57.649508 30052 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1206 09:01:57.649528 30052 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1206 09:01:57.649827 30052 net.cpp:150] Setting up L2_b6_brc3_bn\nI1206 09:01:57.649847 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.649855 30052 net.cpp:165] Memory required for data: 1794765820\nI1206 09:01:57.649878 30052 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1206 09:01:57.649893 30052 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1206 09:01:57.649905 30052 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1206 09:01:57.649919 30052 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1206 09:01:57.649948 30052 net.cpp:150] Setting up L2_b6_brc3_relu\nI1206 09:01:57.649965 30052 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.649974 30052 net.cpp:165] Memory required for data: 1800336380\nI1206 09:01:57.649984 30052 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1206 09:01:57.650009 30052 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1206 09:01:57.650023 30052 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1206 09:01:57.650045 30052 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1206 09:01:57.650548 30052 net.cpp:150] Setting up L2_b6_brc3_conv\nI1206 09:01:57.650568 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.650576 30052 net.cpp:165] Memory required for data: 1811477500\nI1206 09:01:57.650593 30052 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1206 09:01:57.650610 30052 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1206 09:01:57.650622 30052 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1206 09:01:57.650640 30052 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:01:57.650658 30052 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1206 09:01:57.650713 30052 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1206 09:01:57.650739 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.650751 30052 net.cpp:165] Memory required for data: 1822618620\nI1206 09:01:57.650761 30052 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:57.650776 30052 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:57.650787 30052 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1206 09:01:57.650804 30052 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:01:57.650831 30052 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:01:57.650921 30052 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:57.650940 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.650952 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.650962 30052 net.cpp:165] Memory required for data: 1844900860\nI1206 09:01:57.650972 30052 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1206 09:01:57.650992 30052 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1206 09:01:57.651005 30052 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:01:57.651022 30052 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1206 09:01:57.651296 30052 net.cpp:150] Setting up L3_b1_brc1_bn\nI1206 09:01:57.651319 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.651329 30052 net.cpp:165] Memory required for data: 1856041980\nI1206 09:01:57.651350 30052 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1206 09:01:57.651365 30052 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1206 09:01:57.651377 30052 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1206 09:01:57.651392 30052 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1206 09:01:57.651412 30052 net.cpp:150] Setting up L3_b1_brc1_relu\nI1206 09:01:57.651427 30052 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.651438 30052 net.cpp:165] Memory required for data: 1867183100\nI1206 09:01:57.651448 30052 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1206 09:01:57.651468 30052 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1206 09:01:57.651480 30052 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1206 09:01:57.651502 30052 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1206 09:01:57.652178 30052 net.cpp:150] Setting up L3_b1_brc1_conv\nI1206 09:01:57.652197 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.652207 30052 net.cpp:165] Memory required for data: 1869968380\nI1206 09:01:57.652225 30052 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1206 09:01:57.652241 30052 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1206 09:01:57.652271 30052 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1206 09:01:57.652292 30052 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1206 09:01:57.652592 30052 net.cpp:150] Setting up L3_b1_brc2_bn\nI1206 09:01:57.652611 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.652621 30052 net.cpp:165] Memory required for data: 1872753660\nI1206 09:01:57.652642 30052 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1206 09:01:57.652663 30052 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1206 09:01:57.652676 30052 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1206 09:01:57.652699 30052 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1206 09:01:57.652720 30052 net.cpp:150] Setting up L3_b1_brc2_relu\nI1206 09:01:57.652735 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.652745 30052 net.cpp:165] Memory required for data: 1875538940\nI1206 09:01:57.652755 30052 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1206 09:01:57.652776 30052 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1206 09:01:57.652788 30052 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1206 09:01:57.652811 30052 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1206 09:01:57.653260 30052 net.cpp:150] Setting up L3_b1_brc2_conv\nI1206 09:01:57.653280 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.653290 30052 net.cpp:165] Memory required for data: 1878324220\nI1206 09:01:57.653307 30052 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1206 09:01:57.653323 30052 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1206 09:01:57.653336 30052 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1206 09:01:57.653352 30052 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1206 09:01:57.653645 30052 net.cpp:150] Setting up L3_b1_brc3_bn\nI1206 09:01:57.653664 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.653673 30052 net.cpp:165] Memory required for data: 1881109500\nI1206 09:01:57.653704 30052 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1206 09:01:57.653722 30052 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1206 09:01:57.653733 30052 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1206 09:01:57.653748 30052 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1206 09:01:57.653767 30052 net.cpp:150] Setting up L3_b1_brc3_relu\nI1206 09:01:57.653782 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.653792 30052 net.cpp:165] Memory required for data: 1883894780\nI1206 09:01:57.653803 30052 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1206 09:01:57.653828 30052 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1206 09:01:57.653842 30052 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1206 09:01:57.653864 30052 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1206 09:01:57.654862 30052 net.cpp:150] Setting up L3_b1_brc3_conv\nI1206 09:01:57.654882 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.654892 30052 net.cpp:165] Memory required for data: 1889465340\nI1206 09:01:57.654911 30052 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1206 09:01:57.654942 30052 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1206 09:01:57.654955 30052 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:01:57.654974 30052 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1206 09:01:57.655973 30052 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1206 09:01:57.655993 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.656003 30052 net.cpp:165] Memory required for data: 1895035900\nI1206 09:01:57.656020 30052 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1206 09:01:57.656038 30052 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1206 09:01:57.656049 30052 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1206 09:01:57.656064 30052 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1206 09:01:57.656085 30052 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1206 09:01:57.656141 30052 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1206 09:01:57.656168 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.656179 30052 net.cpp:165] Memory required for data: 1900606460\nI1206 09:01:57.656189 30052 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:57.656209 30052 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:57.656222 30052 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1206 09:01:57.656237 30052 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:01:57.656262 30052 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:01:57.656347 30052 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:57.656368 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.656381 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.656390 30052 net.cpp:165] Memory required for data: 1911747580\nI1206 09:01:57.656401 30052 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1206 09:01:57.656421 30052 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1206 09:01:57.656435 30052 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:01:57.656451 30052 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1206 09:01:57.656752 30052 net.cpp:150] Setting up L3_b2_brc1_bn\nI1206 09:01:57.656772 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.656781 30052 net.cpp:165] Memory required for data: 1917318140\nI1206 09:01:57.656805 30052 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1206 09:01:57.656821 30052 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1206 09:01:57.656831 30052 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1206 09:01:57.656850 30052 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1206 09:01:57.656872 30052 net.cpp:150] Setting up L3_b2_brc1_relu\nI1206 09:01:57.656886 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.656896 30052 net.cpp:165] Memory required for data: 1922888700\nI1206 09:01:57.656906 30052 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1206 09:01:57.656930 30052 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1206 09:01:57.656944 30052 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1206 09:01:57.656963 30052 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1206 09:01:57.657991 30052 net.cpp:150] Setting up L3_b2_brc1_conv\nI1206 09:01:57.658015 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.658026 30052 net.cpp:165] Memory required for data: 1925673980\nI1206 09:01:57.658044 30052 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1206 09:01:57.658066 30052 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1206 09:01:57.658078 30052 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1206 09:01:57.658095 30052 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1206 09:01:57.658475 30052 net.cpp:150] Setting up L3_b2_brc2_bn\nI1206 09:01:57.658498 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.658506 30052 net.cpp:165] Memory required for data: 1928459260\nI1206 09:01:57.658525 30052 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1206 09:01:57.658537 30052 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1206 09:01:57.658548 30052 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1206 09:01:57.658561 30052 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1206 09:01:57.658578 30052 net.cpp:150] Setting up L3_b2_brc2_relu\nI1206 09:01:57.658587 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.658592 30052 net.cpp:165] Memory required for data: 1931244540\nI1206 09:01:57.658597 30052 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1206 09:01:57.658613 30052 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1206 09:01:57.658619 30052 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1206 09:01:57.658632 30052 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1206 09:01:57.660157 30052 net.cpp:150] Setting up L3_b2_brc2_conv\nI1206 09:01:57.660182 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.660192 30052 net.cpp:165] Memory required for data: 1934029820\nI1206 09:01:57.660209 30052 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1206 09:01:57.660231 30052 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1206 09:01:57.660244 30052 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1206 09:01:57.660259 30052 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1206 09:01:57.660563 30052 net.cpp:150] Setting up L3_b2_brc3_bn\nI1206 09:01:57.660583 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.660593 30052 net.cpp:165] Memory required for data: 1936815100\nI1206 09:01:57.660614 30052 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1206 09:01:57.660629 30052 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1206 09:01:57.660641 30052 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1206 09:01:57.660665 30052 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1206 09:01:57.660693 30052 net.cpp:150] Setting up L3_b2_brc3_relu\nI1206 09:01:57.660709 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.660719 30052 net.cpp:165] Memory required for data: 1939600380\nI1206 09:01:57.660730 30052 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1206 09:01:57.660758 30052 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1206 09:01:57.660771 30052 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1206 09:01:57.660789 30052 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1206 09:01:57.661783 30052 net.cpp:150] Setting up L3_b2_brc3_conv\nI1206 09:01:57.661803 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.661813 30052 net.cpp:165] Memory required for data: 1945170940\nI1206 09:01:57.661831 30052 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1206 09:01:57.661856 30052 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1206 09:01:57.661870 30052 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1206 09:01:57.661883 30052 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:01:57.661900 30052 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1206 09:01:57.661962 30052 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1206 09:01:57.661981 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.661991 30052 net.cpp:165] Memory required for data: 1950741500\nI1206 09:01:57.662001 30052 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:57.662015 30052 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:57.662027 30052 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1206 09:01:57.662047 30052 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:01:57.662070 30052 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:01:57.662151 30052 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:57.662173 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.662187 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.662197 30052 net.cpp:165] Memory required for data: 1961882620\nI1206 09:01:57.662209 30052 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1206 09:01:57.662223 30052 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1206 09:01:57.662235 30052 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:01:57.662256 30052 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1206 09:01:57.662549 30052 net.cpp:150] Setting up L3_b3_brc1_bn\nI1206 09:01:57.662567 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.662578 30052 net.cpp:165] Memory required for data: 1967453180\nI1206 09:01:57.662600 30052 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1206 09:01:57.662616 30052 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1206 09:01:57.662637 30052 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1206 09:01:57.662654 30052 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1206 09:01:57.662673 30052 net.cpp:150] Setting up L3_b3_brc1_relu\nI1206 09:01:57.662696 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.662708 30052 net.cpp:165] Memory required for data: 1973023740\nI1206 09:01:57.662719 30052 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1206 09:01:57.662744 30052 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1206 09:01:57.662757 30052 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1206 09:01:57.662780 30052 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1206 09:01:57.664769 30052 net.cpp:150] Setting up L3_b3_brc1_conv\nI1206 09:01:57.664795 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.664805 30052 net.cpp:165] Memory required for data: 1975809020\nI1206 09:01:57.664824 30052 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1206 09:01:57.664841 30052 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1206 09:01:57.664854 30052 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1206 09:01:57.664870 30052 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1206 09:01:57.665180 30052 net.cpp:150] Setting up L3_b3_brc2_bn\nI1206 09:01:57.665201 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.665211 30052 net.cpp:165] Memory required for data: 1978594300\nI1206 09:01:57.665232 30052 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1206 09:01:57.665248 30052 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1206 09:01:57.665259 30052 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1206 09:01:57.665279 30052 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1206 09:01:57.665300 30052 net.cpp:150] Setting up L3_b3_brc2_relu\nI1206 09:01:57.665315 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.665325 30052 net.cpp:165] Memory required for data: 1981379580\nI1206 09:01:57.665336 30052 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1206 09:01:57.665361 30052 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1206 09:01:57.665374 30052 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1206 09:01:57.665391 30052 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1206 09:01:57.665833 30052 net.cpp:150] Setting up L3_b3_brc2_conv\nI1206 09:01:57.665853 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.665863 30052 net.cpp:165] Memory required for data: 1984164860\nI1206 09:01:57.665880 30052 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1206 09:01:57.665901 30052 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1206 09:01:57.665913 30052 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1206 09:01:57.665930 30052 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1206 09:01:57.666224 30052 net.cpp:150] Setting up L3_b3_brc3_bn\nI1206 09:01:57.666242 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.666252 30052 net.cpp:165] Memory required for data: 1986950140\nI1206 09:01:57.666273 30052 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1206 09:01:57.666290 30052 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1206 09:01:57.666301 30052 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1206 09:01:57.666316 30052 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1206 09:01:57.666337 30052 net.cpp:150] Setting up L3_b3_brc3_relu\nI1206 09:01:57.666350 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.666360 30052 net.cpp:165] Memory required for data: 1989735420\nI1206 09:01:57.666371 30052 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1206 09:01:57.666395 30052 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1206 09:01:57.666409 30052 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1206 09:01:57.666434 30052 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1206 09:01:57.667423 30052 net.cpp:150] Setting up L3_b3_brc3_conv\nI1206 09:01:57.667443 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.667453 30052 net.cpp:165] Memory required for data: 1995305980\nI1206 09:01:57.667481 30052 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1206 09:01:57.667505 30052 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1206 09:01:57.667517 30052 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1206 09:01:57.667532 30052 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:01:57.667547 30052 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1206 09:01:57.667609 30052 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1206 09:01:57.667628 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.667637 30052 net.cpp:165] Memory required for data: 2000876540\nI1206 09:01:57.667649 30052 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:57.667663 30052 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:57.667675 30052 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1206 09:01:57.667702 30052 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:01:57.667726 30052 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:01:57.667819 30052 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:57.667839 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.667852 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.667862 30052 net.cpp:165] Memory required for data: 2012017660\nI1206 09:01:57.667873 30052 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1206 09:01:57.667892 30052 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1206 09:01:57.667906 30052 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:01:57.667922 30052 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1206 09:01:57.668221 30052 net.cpp:150] Setting up L3_b4_brc1_bn\nI1206 09:01:57.668241 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.668249 30052 net.cpp:165] Memory required for data: 2017588220\nI1206 09:01:57.668270 30052 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1206 09:01:57.668287 30052 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1206 09:01:57.668298 30052 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1206 09:01:57.668313 30052 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1206 09:01:57.668331 30052 net.cpp:150] Setting up L3_b4_brc1_relu\nI1206 09:01:57.668345 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.668355 30052 net.cpp:165] Memory required for data: 2023158780\nI1206 09:01:57.668366 30052 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1206 09:01:57.668390 30052 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1206 09:01:57.668404 30052 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1206 09:01:57.668427 30052 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1206 09:01:57.669420 30052 net.cpp:150] Setting up L3_b4_brc1_conv\nI1206 09:01:57.669440 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.669450 30052 net.cpp:165] Memory required for data: 2025944060\nI1206 09:01:57.669468 30052 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1206 09:01:57.669489 30052 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1206 09:01:57.669502 30052 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1206 09:01:57.669523 30052 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1206 09:01:57.669829 30052 net.cpp:150] Setting up L3_b4_brc2_bn\nI1206 09:01:57.669848 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.669858 30052 net.cpp:165] Memory required for data: 2028729340\nI1206 09:01:57.669880 30052 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1206 09:01:57.669896 30052 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1206 09:01:57.669908 30052 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1206 09:01:57.669922 30052 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1206 09:01:57.669941 30052 net.cpp:150] Setting up L3_b4_brc2_relu\nI1206 09:01:57.669966 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.669977 30052 net.cpp:165] Memory required for data: 2031514620\nI1206 09:01:57.669987 30052 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1206 09:01:57.670012 30052 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1206 09:01:57.670027 30052 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1206 09:01:57.670048 30052 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1206 09:01:57.670477 30052 net.cpp:150] Setting up L3_b4_brc2_conv\nI1206 09:01:57.670496 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.670506 30052 net.cpp:165] Memory required for data: 2034299900\nI1206 09:01:57.670523 30052 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1206 09:01:57.670544 30052 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1206 09:01:57.670557 30052 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1206 09:01:57.670572 30052 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1206 09:01:57.670884 30052 net.cpp:150] Setting up L3_b4_brc3_bn\nI1206 09:01:57.670904 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.670913 30052 net.cpp:165] Memory required for data: 2037085180\nI1206 09:01:57.670934 30052 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1206 09:01:57.670956 30052 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1206 09:01:57.670969 30052 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1206 09:01:57.670984 30052 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1206 09:01:57.671005 30052 net.cpp:150] Setting up L3_b4_brc3_relu\nI1206 09:01:57.671018 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.671027 30052 net.cpp:165] Memory required for data: 2039870460\nI1206 09:01:57.671038 30052 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1206 09:01:57.671058 30052 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1206 09:01:57.671072 30052 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1206 09:01:57.671093 30052 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1206 09:01:57.672093 30052 net.cpp:150] Setting up L3_b4_brc3_conv\nI1206 09:01:57.672113 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.672123 30052 net.cpp:165] Memory required for data: 2045441020\nI1206 09:01:57.672142 30052 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1206 09:01:57.672158 30052 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1206 09:01:57.672170 30052 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1206 09:01:57.672183 30052 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:01:57.672200 30052 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1206 09:01:57.672264 30052 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1206 09:01:57.672283 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.672292 30052 net.cpp:165] Memory required for data: 2051011580\nI1206 09:01:57.672303 30052 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:57.672322 30052 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:57.672334 30052 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1206 09:01:57.672350 30052 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:01:57.672370 30052 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:01:57.672461 30052 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:57.672482 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.672494 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.672504 30052 net.cpp:165] Memory required for data: 2062152700\nI1206 09:01:57.672516 30052 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1206 09:01:57.672535 30052 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1206 09:01:57.672549 30052 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:01:57.672580 30052 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1206 09:01:57.672878 30052 net.cpp:150] Setting up L3_b5_brc1_bn\nI1206 09:01:57.672899 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.672909 30052 net.cpp:165] Memory required for data: 2067723260\nI1206 09:01:57.672931 30052 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1206 09:01:57.672952 30052 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1206 09:01:57.672965 30052 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1206 09:01:57.672979 30052 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1206 09:01:57.672999 30052 net.cpp:150] Setting up L3_b5_brc1_relu\nI1206 09:01:57.673013 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.673022 30052 net.cpp:165] Memory required for data: 2073293820\nI1206 09:01:57.673033 30052 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1206 09:01:57.673053 30052 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1206 09:01:57.673065 30052 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1206 09:01:57.673087 30052 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1206 09:01:57.674075 30052 net.cpp:150] Setting up L3_b5_brc1_conv\nI1206 09:01:57.674095 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.674105 30052 net.cpp:165] Memory required for data: 2076079100\nI1206 09:01:57.674124 30052 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1206 09:01:57.674140 30052 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1206 09:01:57.674152 30052 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1206 09:01:57.674172 30052 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1206 09:01:57.674466 30052 net.cpp:150] Setting up L3_b5_brc2_bn\nI1206 09:01:57.674485 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.674494 30052 net.cpp:165] Memory required for data: 2078864380\nI1206 09:01:57.674516 30052 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1206 09:01:57.674532 30052 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1206 09:01:57.674543 30052 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1206 09:01:57.674562 30052 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1206 09:01:57.674583 30052 net.cpp:150] Setting up L3_b5_brc2_relu\nI1206 09:01:57.674597 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.674607 30052 net.cpp:165] Memory required for data: 2081649660\nI1206 09:01:57.674618 30052 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1206 09:01:57.674638 30052 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1206 09:01:57.674651 30052 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1206 09:01:57.674669 30052 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1206 09:01:57.675106 30052 net.cpp:150] Setting up L3_b5_brc2_conv\nI1206 09:01:57.675124 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.675134 30052 net.cpp:165] Memory required for data: 2084434940\nI1206 09:01:57.675209 30052 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1206 09:01:57.675231 30052 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1206 09:01:57.675245 30052 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1206 09:01:57.675261 30052 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1206 09:01:57.675561 30052 net.cpp:150] Setting up L3_b5_brc3_bn\nI1206 09:01:57.675583 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.675595 30052 net.cpp:165] Memory required for data: 2087220220\nI1206 09:01:57.675616 30052 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1206 09:01:57.675632 30052 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1206 09:01:57.675644 30052 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1206 09:01:57.675659 30052 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1206 09:01:57.675679 30052 net.cpp:150] Setting up L3_b5_brc3_relu\nI1206 09:01:57.675704 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.675714 30052 net.cpp:165] Memory required for data: 2090005500\nI1206 09:01:57.675724 30052 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1206 09:01:57.675753 30052 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1206 09:01:57.675767 30052 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1206 09:01:57.675791 30052 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1206 09:01:57.676770 30052 net.cpp:150] Setting up L3_b5_brc3_conv\nI1206 09:01:57.676790 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.676800 30052 net.cpp:165] Memory required for data: 2095576060\nI1206 09:01:57.676818 30052 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1206 09:01:57.676836 30052 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1206 09:01:57.676847 30052 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1206 09:01:57.677045 30052 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:01:57.677068 30052 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1206 09:01:57.677129 30052 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1206 09:01:57.677150 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.677160 30052 net.cpp:165] Memory required for data: 2101146620\nI1206 09:01:57.677170 30052 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:57.677191 30052 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:57.677203 30052 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1206 09:01:57.677219 30052 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:01:57.677239 30052 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:01:57.677327 30052 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:57.677346 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.677359 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.677369 30052 net.cpp:165] Memory required for data: 2112287740\nI1206 09:01:57.677381 30052 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1206 09:01:57.677400 30052 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1206 09:01:57.677413 30052 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:01:57.677430 30052 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1206 09:01:57.677737 30052 net.cpp:150] Setting up L3_b6_brc1_bn\nI1206 09:01:57.677760 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.677772 30052 net.cpp:165] Memory required for data: 2117858300\nI1206 09:01:57.677794 30052 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1206 09:01:57.677810 30052 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1206 09:01:57.677822 30052 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1206 09:01:57.677837 30052 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1206 09:01:57.677858 30052 net.cpp:150] Setting up L3_b6_brc1_relu\nI1206 09:01:57.677872 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.677883 30052 net.cpp:165] Memory required for data: 2123428860\nI1206 09:01:57.677893 30052 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1206 09:01:57.677912 30052 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1206 09:01:57.677927 30052 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1206 09:01:57.677949 30052 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1206 09:01:57.678944 30052 net.cpp:150] Setting up L3_b6_brc1_conv\nI1206 09:01:57.678964 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.678973 30052 net.cpp:165] Memory required for data: 2126214140\nI1206 09:01:57.678992 30052 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1206 09:01:57.679008 30052 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1206 09:01:57.679020 30052 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1206 09:01:57.679042 30052 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1206 09:01:57.679358 30052 net.cpp:150] Setting up L3_b6_brc2_bn\nI1206 09:01:57.679378 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.679395 30052 net.cpp:165] Memory required for data: 2128999420\nI1206 09:01:57.679419 30052 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1206 09:01:57.679438 30052 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1206 09:01:57.679451 30052 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1206 09:01:57.679467 30052 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1206 09:01:57.679486 30052 net.cpp:150] Setting up L3_b6_brc2_relu\nI1206 09:01:57.679502 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.679512 30052 net.cpp:165] Memory required for data: 2131784700\nI1206 09:01:57.679522 30052 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1206 09:01:57.679543 30052 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1206 09:01:57.679555 30052 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1206 09:01:57.679576 30052 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1206 09:01:57.680032 30052 net.cpp:150] Setting up L3_b6_brc2_conv\nI1206 09:01:57.680052 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.680063 30052 net.cpp:165] Memory required for data: 2134569980\nI1206 09:01:57.680080 30052 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1206 09:01:57.680097 30052 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1206 09:01:57.680109 30052 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1206 09:01:57.680125 30052 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1206 09:01:57.680416 30052 net.cpp:150] Setting up L3_b6_brc3_bn\nI1206 09:01:57.680435 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.680444 30052 net.cpp:165] Memory required for data: 2137355260\nI1206 09:01:57.680465 30052 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1206 09:01:57.680480 30052 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1206 09:01:57.680491 30052 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1206 09:01:57.680511 30052 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1206 09:01:57.680532 30052 net.cpp:150] Setting up L3_b6_brc3_relu\nI1206 09:01:57.680546 30052 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.680557 30052 net.cpp:165] Memory required for data: 2140140540\nI1206 09:01:57.680567 30052 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1206 09:01:57.680586 30052 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1206 09:01:57.680610 30052 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1206 09:01:57.680627 30052 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1206 09:01:57.681605 30052 net.cpp:150] Setting up L3_b6_brc3_conv\nI1206 09:01:57.681625 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.681635 30052 net.cpp:165] Memory required for data: 2145711100\nI1206 09:01:57.681653 30052 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1206 09:01:57.681675 30052 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1206 09:01:57.681694 30052 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1206 09:01:57.681711 30052 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:01:57.681733 30052 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1206 09:01:57.681790 30052 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1206 09:01:57.681809 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.681818 30052 net.cpp:165] Memory required for data: 2151281660\nI1206 09:01:57.681829 30052 layer_factory.hpp:77] Creating layer post_bn\nI1206 09:01:57.681852 30052 net.cpp:100] Creating Layer post_bn\nI1206 09:01:57.681866 30052 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1206 09:01:57.681886 30052 net.cpp:408] post_bn -> post_bn_top\nI1206 09:01:57.682173 30052 net.cpp:150] Setting up post_bn\nI1206 09:01:57.682191 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.682201 30052 net.cpp:165] Memory required for data: 2156852220\nI1206 09:01:57.682224 30052 layer_factory.hpp:77] Creating layer post_relu\nI1206 09:01:57.682240 30052 net.cpp:100] Creating Layer post_relu\nI1206 09:01:57.682250 30052 net.cpp:434] post_relu <- post_bn_top\nI1206 09:01:57.682276 30052 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1206 09:01:57.682296 30052 net.cpp:150] Setting up post_relu\nI1206 09:01:57.682310 30052 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.682320 30052 net.cpp:165] Memory required for data: 2162422780\nI1206 09:01:57.682330 30052 layer_factory.hpp:77] Creating layer post_pool\nI1206 09:01:57.682348 30052 net.cpp:100] Creating Layer post_pool\nI1206 09:01:57.682359 30052 net.cpp:434] post_pool <- post_bn_top\nI1206 09:01:57.682380 30052 net.cpp:408] post_pool -> post_pool\nI1206 09:01:57.682440 30052 net.cpp:150] Setting up post_pool\nI1206 09:01:57.682463 30052 net.cpp:157] Top shape: 85 256 1 1 (21760)\nI1206 09:01:57.682473 30052 net.cpp:165] Memory required for data: 2162509820\nI1206 09:01:57.682484 30052 layer_factory.hpp:77] Creating layer post_FC\nI1206 09:01:57.682504 30052 net.cpp:100] Creating Layer post_FC\nI1206 09:01:57.682516 30052 net.cpp:434] post_FC <- post_pool\nI1206 09:01:57.682538 30052 net.cpp:408] post_FC -> post_FC_top\nI1206 09:01:57.682783 30052 net.cpp:150] Setting up post_FC\nI1206 09:01:57.682802 30052 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:57.682812 30052 net.cpp:165] Memory required for data: 2162513220\nI1206 09:01:57.682831 30052 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1206 09:01:57.682850 30052 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1206 09:01:57.682863 30052 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1206 09:01:57.682880 30052 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1206 09:01:57.682900 30052 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1206 09:01:57.682991 30052 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1206 09:01:57.683012 30052 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:57.683025 30052 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:57.683035 30052 net.cpp:165] Memory required for data: 2162520020\nI1206 09:01:57.683045 30052 layer_factory.hpp:77] Creating layer accuracy\nI1206 09:01:57.683060 30052 net.cpp:100] Creating Layer accuracy\nI1206 09:01:57.683073 30052 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1206 09:01:57.683086 30052 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1206 09:01:57.683102 30052 net.cpp:408] accuracy -> accuracy\nI1206 09:01:57.683126 30052 net.cpp:150] Setting up accuracy\nI1206 09:01:57.683142 30052 net.cpp:157] Top shape: (1)\nI1206 09:01:57.683151 30052 net.cpp:165] Memory required for data: 2162520024\nI1206 09:01:57.683161 30052 layer_factory.hpp:77] Creating layer loss\nI1206 09:01:57.683181 30052 net.cpp:100] Creating Layer loss\nI1206 09:01:57.683192 30052 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1206 09:01:57.683205 30052 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1206 09:01:57.683220 30052 net.cpp:408] loss -> loss\nI1206 09:01:57.683243 30052 layer_factory.hpp:77] Creating layer loss\nI1206 09:01:57.683398 30052 net.cpp:150] Setting up loss\nI1206 09:01:57.683414 30052 net.cpp:157] Top shape: (1)\nI1206 09:01:57.683424 30052 net.cpp:160]     with loss weight 1\nI1206 09:01:57.683449 30052 net.cpp:165] Memory required for data: 2162520028\nI1206 09:01:57.683461 30052 net.cpp:226] loss needs backward computation.\nI1206 09:01:57.683472 30052 net.cpp:228] accuracy does not need backward computation.\nI1206 09:01:57.683485 30052 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1206 09:01:57.683495 30052 net.cpp:226] post_FC needs backward computation.\nI1206 09:01:57.683506 30052 net.cpp:226] post_pool needs backward computation.\nI1206 09:01:57.683516 30052 net.cpp:226] post_relu needs backward computation.\nI1206 09:01:57.683524 30052 net.cpp:226] post_bn needs backward computation.\nI1206 09:01:57.683535 30052 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1206 09:01:57.683547 30052 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1206 09:01:57.683557 30052 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1206 09:01:57.683565 30052 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1206 09:01:57.683585 30052 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1206 09:01:57.683596 30052 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1206 09:01:57.683605 30052 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1206 09:01:57.683616 30052 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1206 09:01:57.683627 30052 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1206 09:01:57.683637 30052 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1206 09:01:57.683648 30052 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.683660 30052 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1206 09:01:57.683681 30052 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1206 09:01:57.683702 30052 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1206 09:01:57.683714 30052 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1206 09:01:57.683724 30052 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1206 09:01:57.683737 30052 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1206 09:01:57.683745 30052 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1206 09:01:57.683756 30052 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1206 09:01:57.683766 30052 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1206 09:01:57.683776 30052 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1206 09:01:57.683786 30052 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.683799 30052 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1206 09:01:57.683809 30052 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1206 09:01:57.683820 30052 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1206 09:01:57.683830 30052 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1206 09:01:57.683841 30052 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1206 09:01:57.683852 30052 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1206 09:01:57.683862 30052 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1206 09:01:57.683873 30052 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1206 09:01:57.683883 30052 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1206 09:01:57.683893 30052 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1206 09:01:57.683905 30052 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.683915 30052 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1206 09:01:57.683926 30052 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1206 09:01:57.683938 30052 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1206 09:01:57.683948 30052 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1206 09:01:57.683959 30052 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1206 09:01:57.683970 30052 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1206 09:01:57.683981 30052 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1206 09:01:57.683992 30052 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1206 09:01:57.684002 30052 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1206 09:01:57.684012 30052 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1206 09:01:57.684023 30052 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.684034 30052 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1206 09:01:57.684046 30052 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1206 09:01:57.684056 30052 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1206 09:01:57.684068 30052 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1206 09:01:57.684078 30052 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1206 09:01:57.684088 30052 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1206 09:01:57.684099 30052 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1206 09:01:57.684119 30052 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1206 09:01:57.684131 30052 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1206 09:01:57.684142 30052 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1206 09:01:57.684154 30052 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.684165 30052 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1206 09:01:57.684175 30052 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1206 09:01:57.684195 30052 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1206 09:01:57.684206 30052 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1206 09:01:57.684216 30052 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1206 09:01:57.684227 30052 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1206 09:01:57.684238 30052 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1206 09:01:57.684248 30052 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1206 09:01:57.684259 30052 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1206 09:01:57.684269 30052 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1206 09:01:57.684280 30052 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1206 09:01:57.684290 30052 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.684301 30052 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1206 09:01:57.684314 30052 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1206 09:01:57.684324 30052 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1206 09:01:57.684335 30052 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1206 09:01:57.684346 30052 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1206 09:01:57.684356 30052 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1206 09:01:57.684367 30052 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1206 09:01:57.684378 30052 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1206 09:01:57.684388 30052 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1206 09:01:57.684399 30052 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1206 09:01:57.684411 30052 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.684422 30052 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1206 09:01:57.684432 30052 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1206 09:01:57.684444 30052 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1206 09:01:57.684454 30052 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1206 09:01:57.684465 30052 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1206 09:01:57.684475 30052 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1206 09:01:57.684486 30052 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1206 09:01:57.684497 30052 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1206 09:01:57.684509 30052 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1206 09:01:57.684518 30052 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1206 09:01:57.684530 30052 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.684540 30052 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1206 09:01:57.684551 30052 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1206 09:01:57.684562 30052 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1206 09:01:57.684572 30052 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1206 09:01:57.684583 30052 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1206 09:01:57.684594 30052 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1206 09:01:57.684605 30052 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1206 09:01:57.684617 30052 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1206 09:01:57.684635 30052 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1206 09:01:57.684646 30052 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1206 09:01:57.684659 30052 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.684669 30052 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1206 09:01:57.684680 30052 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1206 09:01:57.684700 30052 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1206 09:01:57.684711 30052 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1206 09:01:57.684722 30052 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1206 09:01:57.684734 30052 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1206 09:01:57.684744 30052 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1206 09:01:57.684756 30052 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1206 09:01:57.684765 30052 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1206 09:01:57.684777 30052 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1206 09:01:57.684787 30052 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.684798 30052 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1206 09:01:57.684809 30052 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1206 09:01:57.684821 30052 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1206 09:01:57.684831 30052 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1206 09:01:57.684844 30052 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1206 09:01:57.684860 30052 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1206 09:01:57.684871 30052 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1206 09:01:57.684882 30052 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1206 09:01:57.684895 30052 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1206 09:01:57.684904 30052 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1206 09:01:57.684914 30052 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.684926 30052 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1206 09:01:57.684938 30052 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1206 09:01:57.684949 30052 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1206 09:01:57.684959 30052 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1206 09:01:57.684970 30052 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1206 09:01:57.684983 30052 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1206 09:01:57.684993 30052 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1206 09:01:57.685003 30052 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1206 09:01:57.685014 30052 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1206 09:01:57.685024 30052 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1206 09:01:57.685034 30052 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1206 09:01:57.685045 30052 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.685056 30052 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1206 09:01:57.685068 30052 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1206 09:01:57.685079 30052 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1206 09:01:57.685091 30052 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1206 09:01:57.685101 30052 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1206 09:01:57.685112 30052 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1206 09:01:57.685123 30052 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1206 09:01:57.685134 30052 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1206 09:01:57.685145 30052 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1206 09:01:57.685155 30052 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1206 09:01:57.685178 30052 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.685190 30052 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1206 09:01:57.685201 30052 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1206 09:01:57.685212 30052 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1206 09:01:57.685223 30052 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1206 09:01:57.685235 30052 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1206 09:01:57.685246 30052 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1206 09:01:57.685257 30052 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1206 09:01:57.685269 30052 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1206 09:01:57.685281 30052 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1206 09:01:57.685292 30052 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1206 09:01:57.685303 30052 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.685314 30052 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1206 09:01:57.685326 30052 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1206 09:01:57.685338 30052 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1206 09:01:57.685348 30052 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1206 09:01:57.685360 30052 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1206 09:01:57.685371 30052 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1206 09:01:57.685382 30052 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1206 09:01:57.685392 30052 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1206 09:01:57.685405 30052 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1206 09:01:57.685415 30052 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1206 09:01:57.685426 30052 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.685437 30052 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1206 09:01:57.685451 30052 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1206 09:01:57.685461 30052 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1206 09:01:57.685472 30052 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1206 09:01:57.685483 30052 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1206 09:01:57.685494 30052 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1206 09:01:57.685504 30052 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1206 09:01:57.685516 30052 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1206 09:01:57.685528 30052 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1206 09:01:57.685539 30052 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1206 09:01:57.685550 30052 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.685561 30052 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1206 09:01:57.685575 30052 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1206 09:01:57.685585 30052 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1206 09:01:57.685595 30052 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1206 09:01:57.685606 30052 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1206 09:01:57.685617 30052 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1206 09:01:57.685628 30052 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1206 09:01:57.685639 30052 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1206 09:01:57.685650 30052 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1206 09:01:57.685660 30052 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1206 09:01:57.685672 30052 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.685684 30052 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1206 09:01:57.685704 30052 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1206 09:01:57.685726 30052 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1206 09:01:57.685739 30052 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1206 09:01:57.685750 30052 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1206 09:01:57.685760 30052 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1206 09:01:57.685772 30052 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1206 09:01:57.685782 30052 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1206 09:01:57.685793 30052 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1206 09:01:57.685806 30052 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1206 09:01:57.685817 30052 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1206 09:01:57.685827 30052 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1206 09:01:57.685839 30052 net.cpp:226] pre_conv needs backward computation.\nI1206 09:01:57.685853 30052 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1206 09:01:57.685865 30052 net.cpp:228] dataLayer does not need backward computation.\nI1206 09:01:57.685874 30052 net.cpp:270] This network produces output accuracy\nI1206 09:01:57.685886 30052 net.cpp:270] This network produces output loss\nI1206 09:01:57.686167 30052 net.cpp:283] Network initialization done.\nI1206 09:01:57.686820 30052 solver.cpp:60] Solver scaffolding done.\nI1206 09:01:57.902051 30052 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1206 09:01:58.228759 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:58.228814 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:58.234598 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:58.880620 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:58.880689 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:58.887567 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:59.621412 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:59.621482 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:59.629375 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:59.992461 30052 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1206 09:02:00.443823 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:02:00.443872 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:02:00.452127 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:01.344419 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:02:01.344491 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:02:01.353847 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:02.387068 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:02:02.387122 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:02:02.399019 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:03.540320 30052 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:02:03.540388 30052 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:02:03.551712 30052 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:03.606909 30067 blocking_queue.cpp:50] Waiting for data\nI1206 09:02:04.161134 30052 parallel.cpp:425] Starting Optimization\nI1206 09:02:04.162478 30052 solver.cpp:279] Solving Cifar-ResNeXt\nI1206 09:02:04.162504 30052 solver.cpp:280] Learning Rate Policy: triangular\nI1206 09:02:04.166438 30052 solver.cpp:337] Iteration 0, Testing net (#0)\nI1206 09:04:42.645661 30052 solver.cpp:404]     Test net output #0: accuracy = 0.107118\nI1206 09:04:42.645956 30052 solver.cpp:404]     Test net output #1: loss = 2.39492 (* 1 = 2.39492 loss)\nI1206 09:04:48.107677 30052 solver.cpp:228] Iteration 0, loss = 2.29761\nI1206 09:04:48.107733 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 09:04:48.107748 30052 solver.cpp:244]     Train net output #1: loss = 2.29761 (* 1 = 2.29761 loss)\nI1206 09:04:48.172040 30052 sgd_solver.cpp:166] Iteration 0, lr = 0\nI1206 09:11:39.000715 30052 solver.cpp:337] Iteration 100, Testing net (#0)\nI1206 09:14:18.259156 30052 solver.cpp:404]     Test net output #0: accuracy = 0.283588\nI1206 09:14:18.259423 30052 solver.cpp:404]     Test net output #1: loss = 2.0154 (* 1 = 2.0154 loss)\nI1206 09:14:22.238236 30052 solver.cpp:228] Iteration 100, loss = 2.12794\nI1206 09:14:22.238277 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 09:14:22.238293 30052 solver.cpp:244]     Train net output #1: loss = 2.12794 (* 1 = 2.12794 loss)\nI1206 09:14:22.499716 30052 sgd_solver.cpp:166] Iteration 100, lr = 0.015\nI1206 09:21:13.497244 30052 solver.cpp:337] Iteration 200, Testing net (#0)\nI1206 09:23:52.423033 30052 solver.cpp:404]     Test net output #0: accuracy = 0.309353\nI1206 09:23:52.423307 30052 solver.cpp:404]     Test net output #1: loss = 1.9117 (* 1 = 1.9117 loss)\nI1206 09:23:56.403549 30052 solver.cpp:228] Iteration 200, loss = 2.04422\nI1206 09:23:56.403599 30052 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1206 09:23:56.403616 30052 solver.cpp:244]     Train net output #1: loss = 2.04422 (* 1 = 2.04422 loss)\nI1206 09:23:56.588284 30052 sgd_solver.cpp:166] Iteration 200, lr = 0.03\nI1206 09:30:50.061231 30052 solver.cpp:337] Iteration 300, Testing net (#0)\nI1206 09:33:28.804064 30052 solver.cpp:404]     Test net output #0: accuracy = 0.333117\nI1206 09:33:28.804288 30052 solver.cpp:404]     Test net output #1: loss = 1.83902 (* 1 = 1.83902 loss)\nI1206 09:33:32.783094 30052 solver.cpp:228] Iteration 300, loss = 1.83397\nI1206 09:33:32.783148 30052 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 09:33:32.783164 30052 solver.cpp:244]     Train net output #1: loss = 1.83397 (* 1 = 1.83397 loss)\nI1206 09:33:32.973197 30052 sgd_solver.cpp:166] Iteration 300, lr = 0.045\nI1206 09:40:26.369559 30052 solver.cpp:337] Iteration 400, Testing net (#0)\nI1206 09:43:03.999284 30052 solver.cpp:404]     Test net output #0: accuracy = 0.35153\nI1206 09:43:03.999507 30052 solver.cpp:404]     Test net output #1: loss = 1.77891 (* 1 = 1.77891 loss)\nI1206 09:43:07.977948 30052 solver.cpp:228] Iteration 400, loss = 1.79143\nI1206 09:43:07.977993 30052 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 09:43:07.978008 30052 solver.cpp:244]     Train net output #1: loss = 1.79143 (* 1 = 1.79143 loss)\nI1206 09:43:08.168926 30052 sgd_solver.cpp:166] Iteration 400, lr = 0.0599999\nI1206 09:50:01.892235 30052 solver.cpp:337] Iteration 500, Testing net (#0)\nI1206 09:52:40.838358 30052 solver.cpp:404]     Test net output #0: accuracy = 0.370588\nI1206 09:52:40.838598 30052 solver.cpp:404]     Test net output #1: loss = 1.74204 (* 1 = 1.74204 loss)\nI1206 09:52:44.818789 30052 solver.cpp:228] Iteration 500, loss = 1.69666\nI1206 09:52:44.818831 30052 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 09:52:44.818847 30052 solver.cpp:244]     Train net output #1: loss = 1.69666 (* 1 = 1.69666 loss)\nI1206 09:52:45.006247 30052 sgd_solver.cpp:166] Iteration 500, lr = 0.0749999\nI1206 09:59:38.692500 30052 solver.cpp:337] Iteration 600, Testing net (#0)\nI1206 10:02:17.711758 30052 solver.cpp:404]     Test net output #0: accuracy = 0.367647\nI1206 10:02:17.712016 30052 solver.cpp:404]     Test net output #1: loss = 1.73196 (* 1 = 1.73196 loss)\nI1206 10:02:21.693876 30052 solver.cpp:228] Iteration 600, loss = 1.61659\nI1206 10:02:21.693920 30052 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1206 10:02:21.693943 30052 solver.cpp:244]     Train net output #1: loss = 1.61659 (* 1 = 1.61659 loss)\nI1206 10:02:21.878744 30052 sgd_solver.cpp:166] Iteration 600, lr = 0.0899999\nI1206 10:09:15.581826 30052 solver.cpp:337] Iteration 700, Testing net (#0)\nI1206 10:11:54.767410 30052 solver.cpp:404]     Test net output #0: accuracy = 0.382118\nI1206 10:11:54.767663 30052 solver.cpp:404]     Test net output #1: loss = 1.69784 (* 1 = 1.69784 loss)\nI1206 10:11:58.748255 30052 solver.cpp:228] Iteration 700, loss = 1.58971\nI1206 10:11:58.748301 30052 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1206 10:11:58.748324 30052 solver.cpp:244]     Train net output #1: loss = 1.58971 (* 1 = 1.58971 loss)\nI1206 10:11:58.934504 30052 sgd_solver.cpp:166] Iteration 700, lr = 0.105\nI1206 10:18:52.419904 30052 solver.cpp:337] Iteration 800, Testing net (#0)\nI1206 10:21:31.663827 30052 solver.cpp:404]     Test net output #0: accuracy = 0.408177\nI1206 10:21:31.664094 30052 solver.cpp:404]     Test net output #1: loss = 1.65132 (* 1 = 1.65132 loss)\nI1206 10:21:35.646963 30052 solver.cpp:228] Iteration 800, loss = 1.76782\nI1206 10:21:35.647009 30052 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1206 10:21:35.647032 30052 solver.cpp:244]     Train net output #1: loss = 1.76782 (* 1 = 1.76782 loss)\nI1206 10:21:35.834993 30052 sgd_solver.cpp:166] Iteration 800, lr = 0.12\nI1206 10:28:29.619946 30052 solver.cpp:337] Iteration 900, Testing net (#0)\nI1206 10:31:07.697551 30052 solver.cpp:404]     Test net output #0: accuracy = 0.426471\nI1206 10:31:07.697808 30052 solver.cpp:404]     Test net output #1: loss = 1.60697 (* 1 = 1.60697 loss)\nI1206 10:31:11.679191 30052 solver.cpp:228] Iteration 900, loss = 1.62484\nI1206 10:31:11.679235 30052 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 10:31:11.679257 30052 solver.cpp:244]     Train net output #1: loss = 1.62484 (* 1 = 1.62484 loss)\nI1206 10:31:11.867130 30052 sgd_solver.cpp:166] Iteration 900, lr = 0.135\nI1206 10:38:05.310047 30052 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1206 10:40:44.264523 30052 solver.cpp:404]     Test net output #0: accuracy = 0.409883\nI1206 10:40:44.264787 30052 solver.cpp:404]     Test net output #1: loss = 1.65043 (* 1 = 1.65043 loss)\nI1206 10:40:48.348686 30052 solver.cpp:228] Iteration 1000, loss = 1.77964\nI1206 10:40:48.348733 30052 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 10:40:48.348749 30052 solver.cpp:244]     Train net output #1: loss = 1.77964 (* 1 = 1.77964 loss)\nI1206 10:40:48.543378 30052 sgd_solver.cpp:166] Iteration 1000, lr = 0.15\nI1206 10:47:42.627142 30052 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1206 10:50:22.138752 30052 solver.cpp:404]     Test net output #0: accuracy = 0.418118\nI1206 10:50:22.138983 30052 solver.cpp:404]     Test net output #1: loss = 1.62052 (* 1 = 1.62052 loss)\nI1206 10:50:26.118896 30052 solver.cpp:228] Iteration 1100, loss = 1.58645\nI1206 10:50:26.118949 30052 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1206 10:50:26.118966 30052 solver.cpp:244]     Train net output #1: loss = 1.58645 (* 1 = 1.58645 loss)\nI1206 10:50:26.306249 30052 sgd_solver.cpp:166] Iteration 1100, lr = 0.165\nI1206 10:57:19.663306 30052 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1206 10:59:58.568176 30052 solver.cpp:404]     Test net output #0: accuracy = 0.406706\nI1206 10:59:58.568454 30052 solver.cpp:404]     Test net output #1: loss = 1.6236 (* 1 = 1.6236 loss)\nI1206 11:00:02.548395 30052 solver.cpp:228] Iteration 1200, loss = 1.70497\nI1206 11:00:02.548434 30052 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1206 11:00:02.548450 30052 solver.cpp:244]     Train net output #1: loss = 1.70497 (* 1 = 1.70497 loss)\nI1206 11:00:02.734534 30052 sgd_solver.cpp:166] Iteration 1200, lr = 0.18\nI1206 11:06:56.179580 30052 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1206 11:09:33.910069 30052 solver.cpp:404]     Test net output #0: accuracy = 0.421412\nI1206 11:09:33.910320 30052 solver.cpp:404]     Test net output #1: loss = 1.58283 (* 1 = 1.58283 loss)\nI1206 11:09:37.890722 30052 solver.cpp:228] Iteration 1300, loss = 1.40878\nI1206 11:09:37.890772 30052 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1206 11:09:37.890789 30052 solver.cpp:244]     Train net output #1: loss = 1.40878 (* 1 = 1.40878 loss)\nI1206 11:09:38.078086 30052 sgd_solver.cpp:166] Iteration 1300, lr = 0.195\nI1206 11:16:31.671644 30052 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1206 11:19:09.537205 30052 solver.cpp:404]     Test net output #0: accuracy = 0.422824\nI1206 11:19:09.537408 30052 solver.cpp:404]     Test net output #1: loss = 1.55909 (* 1 = 1.55909 loss)\nI1206 11:19:13.475653 30052 solver.cpp:228] Iteration 1400, loss = 1.36252\nI1206 11:19:13.475704 30052 solver.cpp:244]     Train net output #0: accuracy = 0.470588\nI1206 11:19:13.475721 30052 solver.cpp:244]     Train net output #1: loss = 1.36252 (* 1 = 1.36252 loss)\nI1206 11:19:13.705839 30052 sgd_solver.cpp:166] Iteration 1400, lr = 0.21\nI1206 11:26:07.122917 30052 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1206 11:28:44.671087 30052 solver.cpp:404]     Test net output #0: accuracy = 0.414589\nI1206 11:28:44.671289 30052 solver.cpp:404]     Test net output #1: loss = 1.61958 (* 1 = 1.61958 loss)\nI1206 11:28:48.611757 30052 solver.cpp:228] Iteration 1500, loss = 1.60766\nI1206 11:28:48.611806 30052 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1206 11:28:48.611824 30052 solver.cpp:244]     Train net output #1: loss = 1.60766 (* 1 = 1.60766 loss)\nI1206 11:28:48.841822 30052 sgd_solver.cpp:166] Iteration 1500, lr = 0.225\nI1206 11:35:42.311974 30052 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1206 11:38:19.963068 30052 solver.cpp:404]     Test net output #0: accuracy = 0.428765\nI1206 11:38:19.963312 30052 solver.cpp:404]     Test net output #1: loss = 1.57535 (* 1 = 1.57535 loss)\nI1206 11:38:23.902079 30052 solver.cpp:228] Iteration 1600, loss = 1.70419\nI1206 11:38:23.902122 30052 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1206 11:38:23.902137 30052 solver.cpp:244]     Train net output #1: loss = 1.70419 (* 1 = 1.70419 loss)\nI1206 11:38:24.134215 30052 sgd_solver.cpp:166] Iteration 1600, lr = 0.24\nI1206 11:45:18.218684 30052 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1206 11:47:55.871240 30052 solver.cpp:404]     Test net output #0: accuracy = 0.444471\nI1206 11:47:55.871484 30052 solver.cpp:404]     Test net output #1: loss = 1.5351 (* 1 = 1.5351 loss)\nI1206 11:47:59.809952 30052 solver.cpp:228] Iteration 1700, loss = 1.44069\nI1206 11:47:59.809996 30052 solver.cpp:244]     Train net output #0: accuracy = 0.435294\nI1206 11:47:59.810014 30052 solver.cpp:244]     Train net output #1: loss = 1.44069 (* 1 = 1.44069 loss)\nI1206 11:48:00.118463 30052 sgd_solver.cpp:166] Iteration 1700, lr = 0.255\nI1206 11:54:54.898797 30052 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1206 11:57:32.731488 30052 solver.cpp:404]     Test net output #0: accuracy = 0.399412\nI1206 11:57:32.731704 30052 solver.cpp:404]     Test net output #1: loss = 1.67109 (* 1 = 1.67109 loss)\nI1206 11:57:36.669483 30052 solver.cpp:228] Iteration 1800, loss = 1.40594\nI1206 11:57:36.669528 30052 solver.cpp:244]     Train net output #0: accuracy = 0.541176\nI1206 11:57:36.669543 30052 solver.cpp:244]     Train net output #1: loss = 1.40594 (* 1 = 1.40594 loss)\nI1206 11:57:36.913802 30052 sgd_solver.cpp:166] Iteration 1800, lr = 0.27\nI1206 12:04:31.795003 30052 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1206 12:07:09.918411 30052 solver.cpp:404]     Test net output #0: accuracy = 0.420883\nI1206 12:07:09.918596 30052 solver.cpp:404]     Test net output #1: loss = 1.5689 (* 1 = 1.5689 loss)\nI1206 12:07:13.856734 30052 solver.cpp:228] Iteration 1900, loss = 1.61175\nI1206 12:07:13.856778 30052 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 12:07:13.856794 30052 solver.cpp:244]     Train net output #1: loss = 1.61175 (* 1 = 1.61175 loss)\nI1206 12:07:14.098929 30052 sgd_solver.cpp:166] Iteration 1900, lr = 0.285\nI1206 12:14:08.756554 30052 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1206 12:16:46.622339 30052 solver.cpp:404]     Test net output #0: accuracy = 0.449295\nI1206 12:16:46.622617 30052 solver.cpp:404]     Test net output #1: loss = 1.52851 (* 1 = 1.52851 loss)\nI1206 12:16:50.561990 30052 solver.cpp:228] Iteration 2000, loss = 1.62506\nI1206 12:16:50.562031 30052 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 12:16:50.562047 30052 solver.cpp:244]     Train net output #1: loss = 1.62506 (* 1 = 1.62506 loss)\nI1206 12:16:50.808071 30052 sgd_solver.cpp:166] Iteration 2000, lr = 0.3\nI1206 12:23:45.529664 30052 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1206 12:26:23.635180 30052 solver.cpp:404]     Test net output #0: accuracy = 0.328529\nI1206 12:26:23.635395 30052 solver.cpp:404]     Test net output #1: loss = 1.90139 (* 1 = 1.90139 loss)\nI1206 12:26:27.574249 30052 solver.cpp:228] Iteration 2100, loss = 1.82662\nI1206 12:26:27.574292 30052 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 12:26:27.574307 30052 solver.cpp:244]     Train net output #1: loss = 1.82662 (* 1 = 1.82662 loss)\nI1206 12:26:27.909590 30052 sgd_solver.cpp:166] Iteration 2100, lr = 0.315\nI1206 12:33:22.807772 30052 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1206 12:36:00.940610 30052 solver.cpp:404]     Test net output #0: accuracy = 0.452412\nI1206 12:36:00.940850 30052 solver.cpp:404]     Test net output #1: loss = 1.49955 (* 1 = 1.49955 loss)\nI1206 12:36:04.878747 30052 solver.cpp:228] Iteration 2200, loss = 1.51563\nI1206 12:36:04.878789 30052 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1206 12:36:04.878804 30052 solver.cpp:244]     Train net output #1: loss = 1.51563 (* 1 = 1.51563 loss)\nI1206 12:36:05.126547 30052 sgd_solver.cpp:166] Iteration 2200, lr = 0.33\nI1206 12:42:59.979908 30052 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1206 12:45:38.032213 30052 solver.cpp:404]     Test net output #0: accuracy = 0.447471\nI1206 12:45:38.032444 30052 solver.cpp:404]     Test net output #1: loss = 1.52557 (* 1 = 1.52557 loss)\nI1206 12:45:41.969902 30052 solver.cpp:228] Iteration 2300, loss = 1.47701\nI1206 12:45:41.969944 30052 solver.cpp:244]     Train net output #0: accuracy = 0.435294\nI1206 12:45:41.969960 30052 solver.cpp:244]     Train net output #1: loss = 1.47701 (* 1 = 1.47701 loss)\nI1206 12:45:42.212627 30052 sgd_solver.cpp:166] Iteration 2300, lr = 0.345\nI1206 12:52:36.842715 30052 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1206 12:55:14.060811 30052 solver.cpp:404]     Test net output #0: accuracy = 0.41\nI1206 12:55:14.061084 30052 solver.cpp:404]     Test net output #1: loss = 1.64097 (* 1 = 1.64097 loss)\nI1206 12:55:17.999022 30052 solver.cpp:228] Iteration 2400, loss = 1.61477\nI1206 12:55:17.999063 30052 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1206 12:55:17.999079 30052 solver.cpp:244]     Train net output #1: loss = 1.61477 (* 1 = 1.61477 loss)\nI1206 12:55:18.244984 30052 sgd_solver.cpp:166] Iteration 2400, lr = 0.36\nI1206 13:02:13.010697 30052 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1206 13:04:50.751312 30052 solver.cpp:404]     Test net output #0: accuracy = 0.434648\nI1206 13:04:50.751557 30052 solver.cpp:404]     Test net output #1: loss = 1.54876 (* 1 = 1.54876 loss)\nI1206 13:04:54.690065 30052 solver.cpp:228] Iteration 2500, loss = 1.37537\nI1206 13:04:54.690107 30052 solver.cpp:244]     Train net output #0: accuracy = 0.482353\nI1206 13:04:54.690124 30052 solver.cpp:244]     Train net output #1: loss = 1.37537 (* 1 = 1.37537 loss)\nI1206 13:04:54.929355 30052 sgd_solver.cpp:166] Iteration 2500, lr = 0.375\nI1206 13:11:49.574621 30052 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1206 13:14:27.693084 30052 solver.cpp:404]     Test net output #0: accuracy = 0.411177\nI1206 13:14:27.693325 30052 solver.cpp:404]     Test net output #1: loss = 1.59029 (* 1 = 1.59029 loss)\nI1206 13:14:31.630748 30052 solver.cpp:228] Iteration 2600, loss = 1.78535\nI1206 13:14:31.630795 30052 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1206 13:14:31.630812 30052 solver.cpp:244]     Train net output #1: loss = 1.78535 (* 1 = 1.78535 loss)\nI1206 13:14:31.873126 30052 sgd_solver.cpp:166] Iteration 2600, lr = 0.39\nI1206 13:21:26.389554 30052 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1206 13:23:59.724347 30052 solver.cpp:404]     Test net output #0: accuracy = 0.459118\nI1206 13:23:59.724598 30052 solver.cpp:404]     Test net output #1: loss = 1.49109 (* 1 = 1.49109 loss)\nI1206 13:24:03.642560 30052 solver.cpp:228] Iteration 2700, loss = 1.51085\nI1206 13:24:03.642599 30052 solver.cpp:244]     Train net output #0: accuracy = 0.494118\nI1206 13:24:03.642616 30052 solver.cpp:244]     Train net output #1: loss = 1.51085 (* 1 = 1.51085 loss)\nI1206 13:24:03.905668 30052 sgd_solver.cpp:166] Iteration 2700, lr = 0.405\nI1206 13:30:58.473707 30052 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1206 13:33:31.818388 30052 solver.cpp:404]     Test net output #0: accuracy = 0.436589\nI1206 13:33:31.818641 30052 solver.cpp:404]     Test net output #1: loss = 1.53071 (* 1 = 1.53071 loss)\nI1206 13:33:35.736698 30052 solver.cpp:228] Iteration 2800, loss = 1.45047\nI1206 13:33:35.736742 30052 solver.cpp:244]     Train net output #0: accuracy = 0.435294\nI1206 13:33:35.736768 30052 solver.cpp:244]     Train net output #1: loss = 1.45047 (* 1 = 1.45047 loss)\nI1206 13:33:36.004225 30052 sgd_solver.cpp:166] Iteration 2800, lr = 0.42\nI1206 13:40:30.616940 30052 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1206 13:43:03.812361 30052 solver.cpp:404]     Test net output #0: accuracy = 0.42953\nI1206 13:43:03.812619 30052 solver.cpp:404]     Test net output #1: loss = 1.59002 (* 1 = 1.59002 loss)\nI1206 13:43:07.728088 30052 solver.cpp:228] Iteration 2900, loss = 1.57584\nI1206 13:43:07.728129 30052 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 13:43:07.728147 30052 solver.cpp:244]     Train net output #1: loss = 1.57584 (* 1 = 1.57584 loss)\nI1206 13:43:07.994392 30052 sgd_solver.cpp:166] Iteration 2900, lr = 0.435\nI1206 13:50:02.555436 30052 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1206 13:52:35.734637 30052 solver.cpp:404]     Test net output #0: accuracy = 0.431354\nI1206 13:52:35.734890 30052 solver.cpp:404]     Test net output #1: loss = 1.55531 (* 1 = 1.55531 loss)\nI1206 13:52:39.649040 30052 solver.cpp:228] Iteration 3000, loss = 1.56779\nI1206 13:52:39.649080 30052 solver.cpp:244]     Train net output #0: accuracy = 0.435294\nI1206 13:52:39.649096 30052 solver.cpp:244]     Train net output #1: loss = 1.56779 (* 1 = 1.56779 loss)\nI1206 13:52:39.912696 30052 sgd_solver.cpp:166] Iteration 3000, lr = 0.45\nI1206 13:59:34.317862 30052 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1206 14:02:07.474539 30052 solver.cpp:404]     Test net output #0: accuracy = 0.409589\nI1206 14:02:07.474781 30052 solver.cpp:404]     Test net output #1: loss = 1.61435 (* 1 = 1.61435 loss)\nI1206 14:02:11.390313 30052 solver.cpp:228] Iteration 3100, loss = 1.51867\nI1206 14:02:11.390355 30052 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 14:02:11.390372 30052 solver.cpp:244]     Train net output #1: loss = 1.51867 (* 1 = 1.51867 loss)\nI1206 14:02:11.650842 30052 sgd_solver.cpp:166] Iteration 3100, lr = 0.465\nI1206 14:09:05.929935 30052 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1206 14:11:39.114615 30052 solver.cpp:404]     Test net output #0: accuracy = 0.198647\nI1206 14:11:39.114859 30052 solver.cpp:404]     Test net output #1: loss = 5.70506 (* 1 = 5.70506 loss)\nI1206 14:11:43.029743 30052 solver.cpp:228] Iteration 3200, loss = 4.97321\nI1206 14:11:43.029785 30052 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 14:11:43.029803 30052 solver.cpp:244]     Train net output #1: loss = 4.97321 (* 1 = 4.97321 loss)\nI1206 14:11:43.293850 30052 sgd_solver.cpp:166] Iteration 3200, lr = 0.48\nI1206 14:18:37.696907 30052 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1206 14:21:10.871260 30052 solver.cpp:404]     Test net output #0: accuracy = 0.19153\nI1206 14:21:10.871506 30052 solver.cpp:404]     Test net output #1: loss = 2.66048 (* 1 = 2.66048 loss)\nI1206 14:21:14.787207 30052 solver.cpp:228] Iteration 3300, loss = 2.94446\nI1206 14:21:14.787248 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 14:21:14.787264 30052 solver.cpp:244]     Train net output #1: loss = 2.94446 (* 1 = 2.94446 loss)\nI1206 14:21:15.049480 30052 sgd_solver.cpp:166] Iteration 3300, lr = 0.495\nI1206 14:28:09.391729 30052 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1206 14:30:42.531949 30052 solver.cpp:404]     Test net output #0: accuracy = 0.227059\nI1206 14:30:42.532204 30052 solver.cpp:404]     Test net output #1: loss = 2.58599 (* 1 = 2.58599 loss)\nI1206 14:30:46.446131 30052 solver.cpp:228] Iteration 3400, loss = 2.89317\nI1206 14:30:46.446173 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 14:30:46.446189 30052 solver.cpp:244]     Train net output #1: loss = 2.89317 (* 1 = 2.89317 loss)\nI1206 14:30:46.710165 30052 sgd_solver.cpp:166] Iteration 3400, lr = 0.51\nI1206 14:37:41.120615 30052 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1206 14:40:14.253180 30052 solver.cpp:404]     Test net output #0: accuracy = 0.281823\nI1206 14:40:14.253428 30052 solver.cpp:404]     Test net output #1: loss = 2.0255 (* 1 = 2.0255 loss)\nI1206 14:40:18.167206 30052 solver.cpp:228] Iteration 3500, loss = 2.02873\nI1206 14:40:18.167244 30052 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1206 14:40:18.167260 30052 solver.cpp:244]     Train net output #1: loss = 2.02873 (* 1 = 2.02873 loss)\nI1206 14:40:18.435597 30052 sgd_solver.cpp:166] Iteration 3500, lr = 0.525\nI1206 14:47:11.935596 30052 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1206 14:49:45.079104 30052 solver.cpp:404]     Test net output #0: accuracy = 0.286059\nI1206 14:49:45.079356 30052 solver.cpp:404]     Test net output #1: loss = 1.93383 (* 1 = 1.93383 loss)\nI1206 14:49:48.994957 30052 solver.cpp:228] Iteration 3600, loss = 1.88746\nI1206 14:49:48.994997 30052 solver.cpp:244]     Train net output #0: accuracy = 0.294118\nI1206 14:49:48.995013 30052 solver.cpp:244]     Train net output #1: loss = 1.88746 (* 1 = 1.88746 loss)\nI1206 14:49:49.245997 30052 sgd_solver.cpp:166] Iteration 3600, lr = 0.54\nI1206 14:56:42.324120 30052 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1206 14:59:15.448984 30052 solver.cpp:404]     Test net output #0: accuracy = 0.270353\nI1206 14:59:15.449216 30052 solver.cpp:404]     Test net output #1: loss = 2.17343 (* 1 = 2.17343 loss)\nI1206 14:59:19.364080 30052 solver.cpp:228] Iteration 3700, loss = 2.3246\nI1206 14:59:19.364121 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 14:59:19.364140 30052 solver.cpp:244]     Train net output #1: loss = 2.3246 (* 1 = 2.3246 loss)\nI1206 14:59:19.616824 30052 sgd_solver.cpp:166] Iteration 3700, lr = 0.555\nI1206 15:06:12.907197 30052 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1206 15:08:46.041445 30052 solver.cpp:404]     Test net output #0: accuracy = 0.273941\nI1206 15:08:46.041697 30052 solver.cpp:404]     Test net output #1: loss = 1.9539 (* 1 = 1.9539 loss)\nI1206 15:08:49.955205 30052 solver.cpp:228] Iteration 3800, loss = 1.94716\nI1206 15:08:49.955245 30052 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 15:08:49.955261 30052 solver.cpp:244]     Train net output #1: loss = 1.94716 (* 1 = 1.94716 loss)\nI1206 15:08:50.208961 30052 sgd_solver.cpp:166] Iteration 3800, lr = 0.57\nI1206 15:15:43.584327 30052 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1206 15:18:16.724818 30052 solver.cpp:404]     Test net output #0: accuracy = 0.292706\nI1206 15:18:16.725136 30052 solver.cpp:404]     Test net output #1: loss = 1.98948 (* 1 = 1.98948 loss)\nI1206 15:18:20.640251 30052 solver.cpp:228] Iteration 3900, loss = 1.89111\nI1206 15:18:20.640291 30052 solver.cpp:244]     Train net output #0: accuracy = 0.294118\nI1206 15:18:20.640306 30052 solver.cpp:244]     Train net output #1: loss = 1.89111 (* 1 = 1.89111 loss)\nI1206 15:18:20.893779 30052 sgd_solver.cpp:166] Iteration 3900, lr = 0.585\nI1206 15:25:14.250193 30052 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1206 15:27:47.392447 30052 solver.cpp:404]     Test net output #0: accuracy = 0.325765\nI1206 15:27:47.392699 30052 solver.cpp:404]     Test net output #1: loss = 1.81864 (* 1 = 1.81864 loss)\nI1206 15:27:51.307312 30052 solver.cpp:228] Iteration 4000, loss = 1.74628\nI1206 15:27:51.307353 30052 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1206 15:27:51.307369 30052 solver.cpp:244]     Train net output #1: loss = 1.74628 (* 1 = 1.74628 loss)\nI1206 15:27:51.560989 30052 sgd_solver.cpp:166] Iteration 4000, lr = 0.6\nI1206 15:34:44.769863 30052 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1206 15:37:17.925132 30052 solver.cpp:404]     Test net output #0: accuracy = 0.281059\nI1206 15:37:17.925362 30052 solver.cpp:404]     Test net output #1: loss = 1.98541 (* 1 = 1.98541 loss)\nI1206 15:37:21.840052 30052 solver.cpp:228] Iteration 4100, loss = 2.14597\nI1206 15:37:21.840092 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 15:37:21.840109 30052 solver.cpp:244]     Train net output #1: loss = 2.14597 (* 1 = 2.14597 loss)\nI1206 15:37:22.092694 30052 sgd_solver.cpp:166] Iteration 4100, lr = 0.615\nI1206 15:44:15.164146 30052 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1206 15:46:48.333575 30052 solver.cpp:404]     Test net output #0: accuracy = 0.235412\nI1206 15:46:48.333803 30052 solver.cpp:404]     Test net output #1: loss = 2.91404 (* 1 = 2.91404 loss)\nI1206 15:46:52.248270 30052 solver.cpp:228] Iteration 4200, loss = 2.93828\nI1206 15:46:52.248311 30052 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1206 15:46:52.248327 30052 solver.cpp:244]     Train net output #1: loss = 2.93828 (* 1 = 2.93828 loss)\nI1206 15:46:52.501741 30052 sgd_solver.cpp:166] Iteration 4200, lr = 0.63\nI1206 15:53:46.005694 30052 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1206 15:56:19.152979 30052 solver.cpp:404]     Test net output #0: accuracy = 0.264176\nI1206 15:56:19.153228 30052 solver.cpp:404]     Test net output #1: loss = 2.43376 (* 1 = 2.43376 loss)\nI1206 15:56:23.068794 30052 solver.cpp:228] Iteration 4300, loss = 2.48924\nI1206 15:56:23.068835 30052 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 15:56:23.068852 30052 solver.cpp:244]     Train net output #1: loss = 2.48924 (* 1 = 2.48924 loss)\nI1206 15:56:23.323612 30052 sgd_solver.cpp:166] Iteration 4300, lr = 0.645\nI1206 16:03:16.743294 30052 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1206 16:05:49.894996 30052 solver.cpp:404]     Test net output #0: accuracy = 0.299706\nI1206 16:05:49.895249 30052 solver.cpp:404]     Test net output #1: loss = 1.86766 (* 1 = 1.86766 loss)\nI1206 16:05:53.809603 30052 solver.cpp:228] Iteration 4400, loss = 1.88724\nI1206 16:05:53.809643 30052 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1206 16:05:53.809660 30052 solver.cpp:244]     Train net output #1: loss = 1.88724 (* 1 = 1.88724 loss)\nI1206 16:05:54.060750 30052 sgd_solver.cpp:166] Iteration 4400, lr = 0.66\nI1206 16:12:47.248754 30052 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1206 16:15:20.419412 30052 solver.cpp:404]     Test net output #0: accuracy = 0.309765\nI1206 16:15:20.419633 30052 solver.cpp:404]     Test net output #1: loss = 2.0242 (* 1 = 2.0242 loss)\nI1206 16:15:24.334606 30052 solver.cpp:228] Iteration 4500, loss = 2.16848\nI1206 16:15:24.334648 30052 solver.cpp:244]     Train net output #0: accuracy = 0.282353\nI1206 16:15:24.334664 30052 solver.cpp:244]     Train net output #1: loss = 2.16848 (* 1 = 2.16848 loss)\nI1206 16:15:24.586431 30052 sgd_solver.cpp:166] Iteration 4500, lr = 0.675\nI1206 16:22:17.772024 30052 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1206 16:24:50.896971 30052 solver.cpp:404]     Test net output #0: accuracy = 0.172177\nI1206 16:24:50.897228 30052 solver.cpp:404]     Test net output #1: loss = 6.62276 (* 1 = 6.62276 loss)\nI1206 16:24:54.810766 30052 solver.cpp:228] Iteration 4600, loss = 6.77301\nI1206 16:24:54.810808 30052 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 16:24:54.810825 30052 solver.cpp:244]     Train net output #1: loss = 6.77301 (* 1 = 6.77301 loss)\nI1206 16:24:55.061892 30052 sgd_solver.cpp:166] Iteration 4600, lr = 0.69\nI1206 16:31:48.175853 30052 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1206 16:34:21.306969 30052 solver.cpp:404]     Test net output #0: accuracy = 0.157294\nI1206 16:34:21.307236 30052 solver.cpp:404]     Test net output #1: loss = 8.44181 (* 1 = 8.44181 loss)\nI1206 16:34:25.222280 30052 solver.cpp:228] Iteration 4700, loss = 7.20341\nI1206 16:34:25.222321 30052 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 16:34:25.222337 30052 solver.cpp:244]     Train net output #1: loss = 7.20341 (* 1 = 7.20341 loss)\nI1206 16:34:25.472535 30052 sgd_solver.cpp:166] Iteration 4700, lr = 0.705\nI1206 16:41:18.668200 30052 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1206 16:43:51.761930 30052 solver.cpp:404]     Test net output #0: accuracy = 0.264647\nI1206 16:43:51.762186 30052 solver.cpp:404]     Test net output #1: loss = 2.10712 (* 1 = 2.10712 loss)\nI1206 16:43:55.675319 30052 solver.cpp:228] Iteration 4800, loss = 2.03697\nI1206 16:43:55.675359 30052 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1206 16:43:55.675374 30052 solver.cpp:244]     Train net output #1: loss = 2.03697 (* 1 = 2.03697 loss)\nI1206 16:43:55.932361 30052 sgd_solver.cpp:166] Iteration 4800, lr = 0.72\nI1206 16:50:49.346386 30052 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1206 16:53:22.436513 30052 solver.cpp:404]     Test net output #0: accuracy = 0.172529\nI1206 16:53:22.436766 30052 solver.cpp:404]     Test net output #1: loss = 4.31307 (* 1 = 4.31307 loss)\nI1206 16:53:26.350003 30052 solver.cpp:228] Iteration 4900, loss = 3.76302\nI1206 16:53:26.350047 30052 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 16:53:26.350064 30052 solver.cpp:244]     Train net output #1: loss = 3.76302 (* 1 = 3.76302 loss)\nI1206 16:53:26.605918 30052 sgd_solver.cpp:166] Iteration 4900, lr = 0.735\nI1206 17:00:19.624282 30052 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1206 17:02:52.693692 30052 solver.cpp:404]     Test net output #0: accuracy = 0.224177\nI1206 17:02:52.693943 30052 solver.cpp:404]     Test net output #1: loss = 5.26652 (* 1 = 5.26652 loss)\nI1206 17:02:56.608211 30052 solver.cpp:228] Iteration 5000, loss = 4.4046\nI1206 17:02:56.608252 30052 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1206 17:02:56.608268 30052 solver.cpp:244]     Train net output #1: loss = 4.4046 (* 1 = 4.4046 loss)\nI1206 17:02:56.865031 30052 sgd_solver.cpp:166] Iteration 5000, lr = 0.75\nI1206 17:09:50.304819 30052 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1206 17:12:23.371712 30052 solver.cpp:404]     Test net output #0: accuracy = 0.178882\nI1206 17:12:23.371950 30052 solver.cpp:404]     Test net output #1: loss = 4.78595 (* 1 = 4.78595 loss)\nI1206 17:12:27.285600 30052 solver.cpp:228] Iteration 5100, loss = 5.42798\nI1206 17:12:27.285639 30052 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1206 17:12:27.285655 30052 solver.cpp:244]     Train net output #1: loss = 5.42798 (* 1 = 5.42798 loss)\nI1206 17:12:27.543236 30052 sgd_solver.cpp:166] Iteration 5100, lr = 0.765\nI1206 17:19:20.990423 30052 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1206 17:21:54.043058 30052 solver.cpp:404]     Test net output #0: accuracy = 0.239\nI1206 17:21:54.043300 30052 solver.cpp:404]     Test net output #1: loss = 2.76902 (* 1 = 2.76902 loss)\nI1206 17:21:57.959826 30052 solver.cpp:228] Iteration 5200, loss = 3.25567\nI1206 17:21:57.959864 30052 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1206 17:21:57.959879 30052 solver.cpp:244]     Train net output #1: loss = 3.25567 (* 1 = 3.25567 loss)\nI1206 17:21:58.207381 30052 sgd_solver.cpp:166] Iteration 5200, lr = 0.78\nI1206 17:28:51.405133 30052 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1206 17:31:24.500493 30052 solver.cpp:404]     Test net output #0: accuracy = 0.209\nI1206 17:31:24.500747 30052 solver.cpp:404]     Test net output #1: loss = 4.31669 (* 1 = 4.31669 loss)\nI1206 17:31:28.414376 30052 solver.cpp:228] Iteration 5300, loss = 4.53178\nI1206 17:31:28.414412 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 17:31:28.414429 30052 solver.cpp:244]     Train net output #1: loss = 4.53178 (* 1 = 4.53178 loss)\nI1206 17:31:28.670513 30052 sgd_solver.cpp:166] Iteration 5300, lr = 0.795\nI1206 17:38:21.785415 30052 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1206 17:40:54.835957 30052 solver.cpp:404]     Test net output #0: accuracy = 0.166706\nI1206 17:40:54.836221 30052 solver.cpp:404]     Test net output #1: loss = 7.41613 (* 1 = 7.41613 loss)\nI1206 17:40:58.751765 30052 solver.cpp:228] Iteration 5400, loss = 7.09262\nI1206 17:40:58.751804 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 17:40:58.751821 30052 solver.cpp:244]     Train net output #1: loss = 7.09262 (* 1 = 7.09262 loss)\nI1206 17:40:59.006790 30052 sgd_solver.cpp:166] Iteration 5400, lr = 0.81\nI1206 17:47:52.265624 30052 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1206 17:50:25.330440 30052 solver.cpp:404]     Test net output #0: accuracy = 0.166824\nI1206 17:50:25.330677 30052 solver.cpp:404]     Test net output #1: loss = 4.25176 (* 1 = 4.25176 loss)\nI1206 17:50:29.244315 30052 solver.cpp:228] Iteration 5500, loss = 4.23121\nI1206 17:50:29.244354 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 17:50:29.244369 30052 solver.cpp:244]     Train net output #1: loss = 4.23121 (* 1 = 4.23121 loss)\nI1206 17:50:29.498915 30052 sgd_solver.cpp:166] Iteration 5500, lr = 0.825\nI1206 17:57:22.887486 30052 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1206 17:59:55.958811 30052 solver.cpp:404]     Test net output #0: accuracy = 0.248059\nI1206 17:59:55.959069 30052 solver.cpp:404]     Test net output #1: loss = 3.85775 (* 1 = 3.85775 loss)\nI1206 17:59:59.872496 30052 solver.cpp:228] Iteration 5600, loss = 4.00249\nI1206 17:59:59.872535 30052 solver.cpp:244]     Train net output #0: accuracy = 0.294118\nI1206 17:59:59.872552 30052 solver.cpp:244]     Train net output #1: loss = 4.00249 (* 1 = 4.00249 loss)\nI1206 18:00:00.128010 30052 sgd_solver.cpp:166] Iteration 5600, lr = 0.84\nI1206 18:06:53.562325 30052 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1206 18:09:26.608086 30052 solver.cpp:404]     Test net output #0: accuracy = 0.172882\nI1206 18:09:26.608327 30052 solver.cpp:404]     Test net output #1: loss = 5.26849 (* 1 = 5.26849 loss)\nI1206 18:09:30.523674 30052 solver.cpp:228] Iteration 5700, loss = 4.83831\nI1206 18:09:30.523715 30052 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 18:09:30.523730 30052 solver.cpp:244]     Train net output #1: loss = 4.83831 (* 1 = 4.83831 loss)\nI1206 18:09:30.779120 30052 sgd_solver.cpp:166] Iteration 5700, lr = 0.855\nI1206 18:16:24.115422 30052 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1206 18:18:57.160006 30052 solver.cpp:404]     Test net output #0: accuracy = 0.211118\nI1206 18:18:57.160254 30052 solver.cpp:404]     Test net output #1: loss = 5.67728 (* 1 = 5.67728 loss)\nI1206 18:19:01.073943 30052 solver.cpp:228] Iteration 5800, loss = 5.86557\nI1206 18:19:01.073983 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 18:19:01.073999 30052 solver.cpp:244]     Train net output #1: loss = 5.86557 (* 1 = 5.86557 loss)\nI1206 18:19:01.333576 30052 sgd_solver.cpp:166] Iteration 5800, lr = 0.87\nI1206 18:25:54.654052 30052 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1206 18:28:27.660043 30052 solver.cpp:404]     Test net output #0: accuracy = 0.211824\nI1206 18:28:27.660285 30052 solver.cpp:404]     Test net output #1: loss = 3.95524 (* 1 = 3.95524 loss)\nI1206 18:28:31.575469 30052 solver.cpp:228] Iteration 5900, loss = 3.29636\nI1206 18:28:31.575506 30052 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1206 18:28:31.575522 30052 solver.cpp:244]     Train net output #1: loss = 3.29636 (* 1 = 3.29636 loss)\nI1206 18:28:31.824934 30052 sgd_solver.cpp:166] Iteration 5900, lr = 0.885\nI1206 18:35:24.973505 30052 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1206 18:37:57.951629 30052 solver.cpp:404]     Test net output #0: accuracy = 0.183647\nI1206 18:37:57.951875 30052 solver.cpp:404]     Test net output #1: loss = 10.6099 (* 1 = 10.6099 loss)\nI1206 18:38:01.868201 30052 solver.cpp:228] Iteration 6000, loss = 9.19019\nI1206 18:38:01.868239 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 18:38:01.868255 30052 solver.cpp:244]     Train net output #1: loss = 9.19019 (* 1 = 9.19019 loss)\nI1206 18:38:02.125872 30052 sgd_solver.cpp:166] Iteration 6000, lr = 0.9\nI1206 18:44:55.458112 30052 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1206 18:47:28.466979 30052 solver.cpp:404]     Test net output #0: accuracy = 0.179824\nI1206 18:47:28.467231 30052 solver.cpp:404]     Test net output #1: loss = 5.55316 (* 1 = 5.55316 loss)\nI1206 18:47:32.381959 30052 solver.cpp:228] Iteration 6100, loss = 5.38344\nI1206 18:47:32.381999 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 18:47:32.382014 30052 solver.cpp:244]     Train net output #1: loss = 5.38344 (* 1 = 5.38344 loss)\nI1206 18:47:32.638583 30052 sgd_solver.cpp:166] Iteration 6100, lr = 0.915\nI1206 18:54:25.794644 30052 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1206 18:56:58.833986 30052 solver.cpp:404]     Test net output #0: accuracy = 0.172\nI1206 18:56:58.834244 30052 solver.cpp:404]     Test net output #1: loss = 7.51208 (* 1 = 7.51208 loss)\nI1206 18:57:02.748796 30052 solver.cpp:228] Iteration 6200, loss = 7.10659\nI1206 18:57:02.748834 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 18:57:02.748852 30052 solver.cpp:244]     Train net output #1: loss = 7.10659 (* 1 = 7.10659 loss)\nI1206 18:57:03.001288 30052 sgd_solver.cpp:166] Iteration 6200, lr = 0.93\nI1206 19:03:56.160673 30052 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1206 19:06:29.185411 30052 solver.cpp:404]     Test net output #0: accuracy = 0.225059\nI1206 19:06:29.185648 30052 solver.cpp:404]     Test net output #1: loss = 4.32798 (* 1 = 4.32798 loss)\nI1206 19:06:33.099045 30052 solver.cpp:228] Iteration 6300, loss = 4.29086\nI1206 19:06:33.099083 30052 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 19:06:33.099099 30052 solver.cpp:244]     Train net output #1: loss = 4.29086 (* 1 = 4.29086 loss)\nI1206 19:06:33.353427 30052 sgd_solver.cpp:166] Iteration 6300, lr = 0.945\nI1206 19:13:26.315094 30052 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1206 19:15:59.368909 30052 solver.cpp:404]     Test net output #0: accuracy = 0.222647\nI1206 19:15:59.369137 30052 solver.cpp:404]     Test net output #1: loss = 5.27249 (* 1 = 5.27249 loss)\nI1206 19:16:03.285488 30052 solver.cpp:228] Iteration 6400, loss = 5.26644\nI1206 19:16:03.285527 30052 solver.cpp:244]     Train net output #0: accuracy = 0.305882\nI1206 19:16:03.285544 30052 solver.cpp:244]     Train net output #1: loss = 5.26644 (* 1 = 5.26644 loss)\nI1206 19:16:03.539069 30052 sgd_solver.cpp:166] Iteration 6400, lr = 0.96\nI1206 19:22:56.988153 30052 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1206 19:25:30.020153 30052 solver.cpp:404]     Test net output #0: accuracy = 0.227353\nI1206 19:25:30.020354 30052 solver.cpp:404]     Test net output #1: loss = 4.45647 (* 1 = 4.45647 loss)\nI1206 19:25:33.935122 30052 solver.cpp:228] Iteration 6500, loss = 4.83641\nI1206 19:25:33.935158 30052 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1206 19:25:33.935174 30052 solver.cpp:244]     Train net output #1: loss = 4.83641 (* 1 = 4.83641 loss)\nI1206 19:25:34.191632 30052 sgd_solver.cpp:166] Iteration 6500, lr = 0.975\nI1206 19:32:27.534364 30052 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1206 19:35:00.544961 30052 solver.cpp:404]     Test net output #0: accuracy = 0.198353\nI1206 19:35:00.545210 30052 solver.cpp:404]     Test net output #1: loss = 4.75282 (* 1 = 4.75282 loss)\nI1206 19:35:04.459378 30052 solver.cpp:228] Iteration 6600, loss = 5.2905\nI1206 19:35:04.459417 30052 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1206 19:35:04.459434 30052 solver.cpp:244]     Train net output #1: loss = 5.2905 (* 1 = 5.2905 loss)\nI1206 19:35:04.719068 30052 sgd_solver.cpp:166] Iteration 6600, lr = 0.99\nI1206 19:41:58.025084 30052 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1206 19:44:31.078990 30052 solver.cpp:404]     Test net output #0: accuracy = 0.227588\nI1206 19:44:31.079238 30052 solver.cpp:404]     Test net output #1: loss = 7.8338 (* 1 = 7.8338 loss)\nI1206 19:44:34.992902 30052 solver.cpp:228] Iteration 6700, loss = 8.36179\nI1206 19:44:34.992943 30052 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1206 19:44:34.992959 30052 solver.cpp:244]     Train net output #1: loss = 8.36179 (* 1 = 8.36179 loss)\nI1206 19:44:35.250494 30052 sgd_solver.cpp:166] Iteration 6700, lr = 1.005\nI1206 19:51:28.715987 30052 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1206 19:54:01.772847 30052 solver.cpp:404]     Test net output #0: accuracy = 0.132765\nI1206 19:54:01.773094 30052 solver.cpp:404]     Test net output #1: loss = 10.8238 (* 1 = 10.8238 loss)\nI1206 19:54:05.687316 30052 solver.cpp:228] Iteration 6800, loss = 9.72472\nI1206 19:54:05.687356 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 19:54:05.687373 30052 solver.cpp:244]     Train net output #1: loss = 9.72472 (* 1 = 9.72472 loss)\nI1206 19:54:05.943814 30052 sgd_solver.cpp:166] Iteration 6800, lr = 1.02\nI1206 20:00:59.196925 30052 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1206 20:03:32.256016 30052 solver.cpp:404]     Test net output #0: accuracy = 0.176882\nI1206 20:03:32.256255 30052 solver.cpp:404]     Test net output #1: loss = 6.7471 (* 1 = 6.7471 loss)\nI1206 20:03:36.170539 30052 solver.cpp:228] Iteration 6900, loss = 6.8525\nI1206 20:03:36.170578 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 20:03:36.170594 30052 solver.cpp:244]     Train net output #1: loss = 6.8525 (* 1 = 6.8525 loss)\nI1206 20:03:36.430146 30052 sgd_solver.cpp:166] Iteration 6900, lr = 1.035\nI1206 20:10:29.988286 30052 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1206 20:13:03.024072 30052 solver.cpp:404]     Test net output #0: accuracy = 0.217294\nI1206 20:13:03.024309 30052 solver.cpp:404]     Test net output #1: loss = 6.85272 (* 1 = 6.85272 loss)\nI1206 20:13:06.937582 30052 solver.cpp:228] Iteration 7000, loss = 7.89111\nI1206 20:13:06.937623 30052 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 20:13:06.937639 30052 solver.cpp:244]     Train net output #1: loss = 7.89111 (* 1 = 7.89111 loss)\nI1206 20:13:07.202105 30052 sgd_solver.cpp:166] Iteration 7000, lr = 1.05\nI1206 20:20:01.616385 30052 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1206 20:22:34.679574 30052 solver.cpp:404]     Test net output #0: accuracy = 0.191941\nI1206 20:22:34.679805 30052 solver.cpp:404]     Test net output #1: loss = 11.2723 (* 1 = 11.2723 loss)\nI1206 20:22:38.595203 30052 solver.cpp:228] Iteration 7100, loss = 11.0078\nI1206 20:22:38.595240 30052 solver.cpp:244]     Train net output #0: accuracy = 0.282353\nI1206 20:22:38.595257 30052 solver.cpp:244]     Train net output #1: loss = 11.0078 (* 1 = 11.0078 loss)\nI1206 20:22:38.861476 30052 sgd_solver.cpp:166] Iteration 7100, lr = 1.065\nI1206 20:29:33.371650 30052 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1206 20:32:06.401239 30052 solver.cpp:404]     Test net output #0: accuracy = 0.188235\nI1206 20:32:06.401475 30052 solver.cpp:404]     Test net output #1: loss = 8.3505 (* 1 = 8.3505 loss)\nI1206 20:32:10.315362 30052 solver.cpp:228] Iteration 7200, loss = 8.43489\nI1206 20:32:10.315402 30052 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1206 20:32:10.315416 30052 solver.cpp:244]     Train net output #1: loss = 8.43489 (* 1 = 8.43489 loss)\nI1206 20:32:10.584323 30052 sgd_solver.cpp:166] Iteration 7200, lr = 1.08\nI1206 20:39:05.100466 30052 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1206 20:41:38.133337 30052 solver.cpp:404]     Test net output #0: accuracy = 0.196706\nI1206 20:41:38.133559 30052 solver.cpp:404]     Test net output #1: loss = 8.16757 (* 1 = 8.16757 loss)\nI1206 20:41:42.049060 30052 solver.cpp:228] Iteration 7300, loss = 9.30326\nI1206 20:41:42.049101 30052 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 20:41:42.049118 30052 solver.cpp:244]     Train net output #1: loss = 9.30326 (* 1 = 9.30326 loss)\nI1206 20:41:42.312983 30052 sgd_solver.cpp:166] Iteration 7300, lr = 1.095\nI1206 20:48:36.718070 30052 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1206 20:51:09.749704 30052 solver.cpp:404]     Test net output #0: accuracy = 0.219353\nI1206 20:51:09.749945 30052 solver.cpp:404]     Test net output #1: loss = 6.40003 (* 1 = 6.40003 loss)\nI1206 20:51:13.663547 30052 solver.cpp:228] Iteration 7400, loss = 7.49557\nI1206 20:51:13.663584 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 20:51:13.663600 30052 solver.cpp:244]     Train net output #1: loss = 7.49557 (* 1 = 7.49557 loss)\nI1206 20:51:13.926920 30052 sgd_solver.cpp:166] Iteration 7400, lr = 1.11\nI1206 20:58:08.307508 30052 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1206 21:00:41.344962 30052 solver.cpp:404]     Test net output #0: accuracy = 0.207471\nI1206 21:00:41.345207 30052 solver.cpp:404]     Test net output #1: loss = 6.71007 (* 1 = 6.71007 loss)\nI1206 21:00:45.261477 30052 solver.cpp:228] Iteration 7500, loss = 7.4936\nI1206 21:00:45.261515 30052 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1206 21:00:45.261531 30052 solver.cpp:244]     Train net output #1: loss = 7.4936 (* 1 = 7.4936 loss)\nI1206 21:00:45.524086 30052 sgd_solver.cpp:166] Iteration 7500, lr = 1.125\nI1206 21:07:40.098603 30052 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1206 21:10:13.161119 30052 solver.cpp:404]     Test net output #0: accuracy = 0.249235\nI1206 21:10:13.161350 30052 solver.cpp:404]     Test net output #1: loss = 5.6363 (* 1 = 5.6363 loss)\nI1206 21:10:17.078526 30052 solver.cpp:228] Iteration 7600, loss = 7.14086\nI1206 21:10:17.078563 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 21:10:17.078580 30052 solver.cpp:244]     Train net output #1: loss = 7.14086 (* 1 = 7.14086 loss)\nI1206 21:10:17.340718 30052 sgd_solver.cpp:166] Iteration 7600, lr = 1.14\nI1206 21:17:12.072698 30052 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1206 21:19:45.081892 30052 solver.cpp:404]     Test net output #0: accuracy = 0.181471\nI1206 21:19:45.082129 30052 solver.cpp:404]     Test net output #1: loss = 11.722 (* 1 = 11.722 loss)\nI1206 21:19:48.995946 30052 solver.cpp:228] Iteration 7700, loss = 15.1486\nI1206 21:19:48.995985 30052 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1206 21:19:48.996001 30052 solver.cpp:244]     Train net output #1: loss = 15.1486 (* 1 = 15.1486 loss)\nI1206 21:19:49.264678 30052 sgd_solver.cpp:166] Iteration 7700, lr = 1.155\nI1206 21:26:43.833448 30052 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1206 21:29:16.831324 30052 solver.cpp:404]     Test net output #0: accuracy = 0.180647\nI1206 21:29:16.831548 30052 solver.cpp:404]     Test net output #1: loss = 8.13314 (* 1 = 8.13314 loss)\nI1206 21:29:20.746654 30052 solver.cpp:228] Iteration 7800, loss = 8.42751\nI1206 21:29:20.746685 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 21:29:20.746700 30052 solver.cpp:244]     Train net output #1: loss = 8.42751 (* 1 = 8.42751 loss)\nI1206 21:29:21.013922 30052 sgd_solver.cpp:166] Iteration 7800, lr = 1.17\nI1206 21:36:15.571612 30052 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1206 21:38:48.602991 30052 solver.cpp:404]     Test net output #0: accuracy = 0.228706\nI1206 21:38:48.603250 30052 solver.cpp:404]     Test net output #1: loss = 6.32946 (* 1 = 6.32946 loss)\nI1206 21:38:52.517529 30052 solver.cpp:228] Iteration 7900, loss = 6.5575\nI1206 21:38:52.517567 30052 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1206 21:38:52.517585 30052 solver.cpp:244]     Train net output #1: loss = 6.5575 (* 1 = 6.5575 loss)\nI1206 21:38:52.781236 30052 sgd_solver.cpp:166] Iteration 7900, lr = 1.185\nI1206 21:45:47.127362 30052 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1206 21:48:20.165073 30052 solver.cpp:404]     Test net output #0: accuracy = 0.206118\nI1206 21:48:20.165309 30052 solver.cpp:404]     Test net output #1: loss = 6.52829 (* 1 = 6.52829 loss)\nI1206 21:48:24.080142 30052 solver.cpp:228] Iteration 8000, loss = 6.98582\nI1206 21:48:24.080178 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 21:48:24.080195 30052 solver.cpp:244]     Train net output #1: loss = 6.98582 (* 1 = 6.98582 loss)\nI1206 21:48:24.345006 30052 sgd_solver.cpp:166] Iteration 8000, lr = 1.2\nI1206 21:55:18.833706 30052 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1206 21:57:51.663411 30052 solver.cpp:404]     Test net output #0: accuracy = 0.186529\nI1206 21:57:51.663651 30052 solver.cpp:404]     Test net output #1: loss = 10.806 (* 1 = 10.806 loss)\nI1206 21:57:55.579637 30052 solver.cpp:228] Iteration 8100, loss = 11.9406\nI1206 21:57:55.579676 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 21:57:55.579694 30052 solver.cpp:244]     Train net output #1: loss = 11.9406 (* 1 = 11.9406 loss)\nI1206 21:57:55.845368 30052 sgd_solver.cpp:166] Iteration 8100, lr = 1.215\nI1206 22:04:50.368504 30052 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1206 22:07:23.219200 30052 solver.cpp:404]     Test net output #0: accuracy = 0.187941\nI1206 22:07:23.219442 30052 solver.cpp:404]     Test net output #1: loss = 16.1111 (* 1 = 16.1111 loss)\nI1206 22:07:27.135659 30052 solver.cpp:228] Iteration 8200, loss = 15.9826\nI1206 22:07:27.135699 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 22:07:27.135716 30052 solver.cpp:244]     Train net output #1: loss = 15.9826 (* 1 = 15.9826 loss)\nI1206 22:07:27.399677 30052 sgd_solver.cpp:166] Iteration 8200, lr = 1.23\nI1206 22:14:22.065578 30052 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1206 22:16:54.938576 30052 solver.cpp:404]     Test net output #0: accuracy = 0.151294\nI1206 22:16:54.938822 30052 solver.cpp:404]     Test net output #1: loss = 17.1577 (* 1 = 17.1577 loss)\nI1206 22:16:58.854749 30052 solver.cpp:228] Iteration 8300, loss = 17.5578\nI1206 22:16:58.854789 30052 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1206 22:16:58.854805 30052 solver.cpp:244]     Train net output #1: loss = 17.5578 (* 1 = 17.5578 loss)\nI1206 22:16:59.115628 30052 sgd_solver.cpp:166] Iteration 8300, lr = 1.245\nI1206 22:23:53.408776 30052 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1206 22:26:26.270648 30052 solver.cpp:404]     Test net output #0: accuracy = 0.217824\nI1206 22:26:26.270853 30052 solver.cpp:404]     Test net output #1: loss = 8.8401 (* 1 = 8.8401 loss)\nI1206 22:26:30.184871 30052 solver.cpp:228] Iteration 8400, loss = 8.6074\nI1206 22:26:30.184911 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 22:26:30.184926 30052 solver.cpp:244]     Train net output #1: loss = 8.6074 (* 1 = 8.6074 loss)\nI1206 22:26:30.450793 30052 sgd_solver.cpp:166] Iteration 8400, lr = 1.26\nI1206 22:33:25.000269 30052 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1206 22:35:57.958266 30052 solver.cpp:404]     Test net output #0: accuracy = 0.192118\nI1206 22:35:57.958511 30052 solver.cpp:404]     Test net output #1: loss = 12.5768 (* 1 = 12.5768 loss)\nI1206 22:36:01.873535 30052 solver.cpp:228] Iteration 8500, loss = 13.3111\nI1206 22:36:01.873566 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 22:36:01.873582 30052 solver.cpp:244]     Train net output #1: loss = 13.3111 (* 1 = 13.3111 loss)\nI1206 22:36:02.137900 30052 sgd_solver.cpp:166] Iteration 8500, lr = 1.275\nI1206 22:42:56.724414 30052 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1206 22:45:29.756284 30052 solver.cpp:404]     Test net output #0: accuracy = 0.217824\nI1206 22:45:29.756531 30052 solver.cpp:404]     Test net output #1: loss = 15.5997 (* 1 = 15.5997 loss)\nI1206 22:45:33.672067 30052 solver.cpp:228] Iteration 8600, loss = 16.8094\nI1206 22:45:33.672103 30052 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 22:45:33.672121 30052 solver.cpp:244]     Train net output #1: loss = 16.8094 (* 1 = 16.8094 loss)\nI1206 22:45:33.955400 30052 sgd_solver.cpp:166] Iteration 8600, lr = 1.29\nI1206 22:52:28.683954 30052 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1206 22:55:01.706182 30052 solver.cpp:404]     Test net output #0: accuracy = 0.168294\nI1206 22:55:01.706423 30052 solver.cpp:404]     Test net output #1: loss = 9.99879 (* 1 = 9.99879 loss)\nI1206 22:55:05.622300 30052 solver.cpp:228] Iteration 8700, loss = 11.3403\nI1206 22:55:05.622339 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 22:55:05.622355 30052 solver.cpp:244]     Train net output #1: loss = 11.3403 (* 1 = 11.3403 loss)\nI1206 22:55:05.889118 30052 sgd_solver.cpp:166] Iteration 8700, lr = 1.305\nI1206 23:02:00.483304 30052 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1206 23:04:33.478904 30052 solver.cpp:404]     Test net output #0: accuracy = 0.155118\nI1206 23:04:33.479146 30052 solver.cpp:404]     Test net output #1: loss = 15.0483 (* 1 = 15.0483 loss)\nI1206 23:04:37.392700 30052 solver.cpp:228] Iteration 8800, loss = 15.0595\nI1206 23:04:37.392740 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 23:04:37.392757 30052 solver.cpp:244]     Train net output #1: loss = 15.0595 (* 1 = 15.0595 loss)\nI1206 23:04:37.656668 30052 sgd_solver.cpp:166] Iteration 8800, lr = 1.32\nI1206 23:11:32.165769 30052 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1206 23:14:05.329038 30052 solver.cpp:404]     Test net output #0: accuracy = 0.197235\nI1206 23:14:05.329282 30052 solver.cpp:404]     Test net output #1: loss = 13.7607 (* 1 = 13.7607 loss)\nI1206 23:14:09.245901 30052 solver.cpp:228] Iteration 8900, loss = 14.4306\nI1206 23:14:09.245941 30052 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 23:14:09.245959 30052 solver.cpp:244]     Train net output #1: loss = 14.4306 (* 1 = 14.4306 loss)\nI1206 23:14:09.505822 30052 sgd_solver.cpp:166] Iteration 8900, lr = 1.335\nI1206 23:21:03.933943 30052 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1206 23:23:36.917776 30052 solver.cpp:404]     Test net output #0: accuracy = 0.220706\nI1206 23:23:36.917978 30052 solver.cpp:404]     Test net output #1: loss = 7.58464 (* 1 = 7.58464 loss)\nI1206 23:23:40.834841 30052 solver.cpp:228] Iteration 9000, loss = 6.81356\nI1206 23:23:40.834873 30052 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 23:23:40.834888 30052 solver.cpp:244]     Train net output #1: loss = 6.81356 (* 1 = 6.81356 loss)\nI1206 23:23:41.097962 30052 sgd_solver.cpp:166] Iteration 9000, lr = 1.35\nI1206 23:30:35.524600 30052 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1206 23:33:08.490834 30052 solver.cpp:404]     Test net output #0: accuracy = 0.204059\nI1206 23:33:08.491034 30052 solver.cpp:404]     Test net output #1: loss = 7.74105 (* 1 = 7.74105 loss)\nI1206 23:33:12.405434 30052 solver.cpp:228] Iteration 9100, loss = 8.11601\nI1206 23:33:12.405468 30052 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 23:33:12.405484 30052 solver.cpp:244]     Train net output #1: loss = 8.116 (* 1 = 8.116 loss)\nI1206 23:33:12.668175 30052 sgd_solver.cpp:166] Iteration 9100, lr = 1.365\nI1206 23:40:07.161470 30052 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1206 23:42:40.214922 30052 solver.cpp:404]     Test net output #0: accuracy = 0.21553\nI1206 23:42:40.215164 30052 solver.cpp:404]     Test net output #1: loss = 14.2309 (* 1 = 14.2309 loss)\nI1206 23:42:44.129954 30052 solver.cpp:228] Iteration 9200, loss = 15.4289\nI1206 23:42:44.129992 30052 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 23:42:44.130007 30052 solver.cpp:244]     Train net output #1: loss = 15.4289 (* 1 = 15.4289 loss)\nI1206 23:42:44.394295 30052 sgd_solver.cpp:166] Iteration 9200, lr = 1.38\nI1206 23:49:38.977228 30052 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1206 23:52:12.086014 30052 solver.cpp:404]     Test net output #0: accuracy = 0.221588\nI1206 23:52:12.086241 30052 solver.cpp:404]     Test net output #1: loss = 16.1552 (* 1 = 16.1552 loss)\nI1206 23:52:16.002625 30052 solver.cpp:228] Iteration 9300, loss = 13.0715\nI1206 23:52:16.002670 30052 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1206 23:52:16.002686 30052 solver.cpp:244]     Train net output #1: loss = 13.0715 (* 1 = 13.0715 loss)\nI1206 23:52:16.264081 30052 sgd_solver.cpp:166] Iteration 9300, lr = 1.395\nI1206 23:59:10.798514 30052 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1207 00:01:43.987077 30052 solver.cpp:404]     Test net output #0: accuracy = 0.189\nI1207 00:01:43.987340 30052 solver.cpp:404]     Test net output #1: loss = 11.333 (* 1 = 11.333 loss)\nI1207 00:01:47.901634 30052 solver.cpp:228] Iteration 9400, loss = 10.6152\nI1207 00:01:47.901679 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 00:01:47.901695 30052 solver.cpp:244]     Train net output #1: loss = 10.6152 (* 1 = 10.6152 loss)\nI1207 00:01:48.164786 30052 sgd_solver.cpp:166] Iteration 9400, lr = 1.41\nI1207 00:08:42.624922 30052 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1207 00:11:15.799334 30052 solver.cpp:404]     Test net output #0: accuracy = 0.202118\nI1207 00:11:15.799592 30052 solver.cpp:404]     Test net output #1: loss = 13.4802 (* 1 = 13.4802 loss)\nI1207 00:11:19.715435 30052 solver.cpp:228] Iteration 9500, loss = 13.9987\nI1207 00:11:19.715476 30052 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 00:11:19.715493 30052 solver.cpp:244]     Train net output #1: loss = 13.9987 (* 1 = 13.9987 loss)\nI1207 00:11:19.980352 30052 sgd_solver.cpp:166] Iteration 9500, lr = 1.425\nI1207 00:18:14.557189 30052 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1207 00:20:47.657758 30052 solver.cpp:404]     Test net output #0: accuracy = 0.199353\nI1207 00:20:47.658026 30052 solver.cpp:404]     Test net output #1: loss = 12.7969 (* 1 = 12.7969 loss)\nI1207 00:20:51.572480 30052 solver.cpp:228] Iteration 9600, loss = 10.7926\nI1207 00:20:51.572522 30052 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1207 00:20:51.572540 30052 solver.cpp:244]     Train net output #1: loss = 10.7926 (* 1 = 10.7926 loss)\nI1207 00:20:51.837723 30052 sgd_solver.cpp:166] Iteration 9600, lr = 1.44\nI1207 00:27:46.163192 30052 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1207 00:30:19.292579 30052 solver.cpp:404]     Test net output #0: accuracy = 0.226\nI1207 00:30:19.292822 30052 solver.cpp:404]     Test net output #1: loss = 13.2722 (* 1 = 13.2722 loss)\nI1207 00:30:23.208493 30052 solver.cpp:228] Iteration 9700, loss = 11.6179\nI1207 00:30:23.208536 30052 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 00:30:23.208554 30052 solver.cpp:244]     Train net output #1: loss = 11.6179 (* 1 = 11.6179 loss)\nI1207 00:30:23.472985 30052 sgd_solver.cpp:166] Iteration 9700, lr = 1.455\nI1207 00:37:17.943433 30052 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1207 00:39:51.059516 30052 solver.cpp:404]     Test net output #0: accuracy = 0.219235\nI1207 00:39:51.059787 30052 solver.cpp:404]     Test net output #1: loss = 13.5208 (* 1 = 13.5208 loss)\nI1207 00:39:54.974694 30052 solver.cpp:228] Iteration 9800, loss = 13.2825\nI1207 00:39:54.974737 30052 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 00:39:54.974755 30052 solver.cpp:244]     Train net output #1: loss = 13.2825 (* 1 = 13.2825 loss)\nI1207 00:39:55.237009 30052 sgd_solver.cpp:166] Iteration 9800, lr = 1.47\nI1207 00:46:48.792896 30052 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1207 00:49:21.901598 30052 solver.cpp:404]     Test net output #0: accuracy = 0.164706\nI1207 00:49:21.901824 30052 solver.cpp:404]     Test net output #1: loss = 13.6791 (* 1 = 13.6791 loss)\nI1207 00:49:25.818053 30052 solver.cpp:228] Iteration 9900, loss = 15.0373\nI1207 00:49:25.818095 30052 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 00:49:25.818112 30052 solver.cpp:244]     Train net output #1: loss = 15.0373 (* 1 = 15.0373 loss)\nI1207 00:49:26.075827 30052 sgd_solver.cpp:166] Iteration 9900, lr = 1.485\nI1207 00:56:19.461021 30052 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1207 00:58:56.581750 30052 solver.cpp:404]     Test net output #0: accuracy = 0.233\nI1207 00:58:56.582044 30052 solver.cpp:404]     Test net output #1: loss = 14.1226 (* 1 = 14.1226 loss)\nI1207 00:59:00.520313 30052 solver.cpp:228] Iteration 10000, loss = 11.7705\nI1207 00:59:00.520356 30052 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 00:59:00.520380 30052 solver.cpp:244]     Train net output #1: loss = 11.7705 (* 1 = 11.7705 loss)\nI1207 00:59:00.749397 30052 sgd_solver.cpp:166] Iteration 10000, lr = 1.5\nI1207 01:05:54.229117 30052 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1207 01:08:32.617750 30052 solver.cpp:404]     Test net output #0: accuracy = 0.18653\nI1207 01:08:32.618026 30052 solver.cpp:404]     Test net output #1: loss = 15.9431 (* 1 = 15.9431 loss)\nI1207 01:08:36.557502 30052 solver.cpp:228] Iteration 10100, loss = 20.2332\nI1207 01:08:36.557545 30052 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1207 01:08:36.557570 30052 solver.cpp:244]     Train net output #1: loss = 20.2332 (* 1 = 20.2332 loss)\nI1207 01:08:36.784332 30052 sgd_solver.cpp:166] Iteration 10100, lr = 1.515\nI1207 01:15:30.147523 30052 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1207 01:18:08.535727 30052 solver.cpp:404]     Test net output #0: accuracy = 0.156941\nI1207 01:18:08.536036 30052 solver.cpp:404]     Test net output #1: loss = 13.1779 (* 1 = 13.1779 loss)\nI1207 01:18:12.478214 30052 solver.cpp:228] Iteration 10200, loss = 12.8627\nI1207 01:18:12.478255 30052 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 01:18:12.478279 30052 solver.cpp:244]     Train net output #1: loss = 12.8627 (* 1 = 12.8627 loss)\nI1207 01:18:12.702718 30052 sgd_solver.cpp:166] Iteration 10200, lr = 1.53\nI1207 01:25:06.246603 30052 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1207 01:27:44.706593 30052 solver.cpp:404]     Test net output #0: accuracy = 0.179471\nI1207 01:27:44.706898 30052 solver.cpp:404]     Test net output #1: loss = 15.5991 (* 1 = 15.5991 loss)\nI1207 01:27:48.646191 30052 solver.cpp:228] Iteration 10300, loss = 15.3671\nI1207 01:27:48.646234 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 01:27:48.646257 30052 solver.cpp:244]     Train net output #1: loss = 15.3671 (* 1 = 15.3671 loss)\nI1207 01:27:48.873412 30052 sgd_solver.cpp:166] Iteration 10300, lr = 1.545\nI1207 01:34:42.368819 30052 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1207 01:37:20.683305 30052 solver.cpp:404]     Test net output #0: accuracy = 0.217059\nI1207 01:37:20.683612 30052 solver.cpp:404]     Test net output #1: loss = 10.1804 (* 1 = 10.1804 loss)\nI1207 01:37:24.626937 30052 solver.cpp:228] Iteration 10400, loss = 10.5444\nI1207 01:37:24.626977 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 01:37:24.626993 30052 solver.cpp:244]     Train net output #1: loss = 10.5444 (* 1 = 10.5444 loss)\nI1207 01:37:24.855957 30052 sgd_solver.cpp:166] Iteration 10400, lr = 1.56\nI1207 01:44:18.645223 30052 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1207 01:46:56.910112 30052 solver.cpp:404]     Test net output #0: accuracy = 0.175765\nI1207 01:46:56.910389 30052 solver.cpp:404]     Test net output #1: loss = 19.121 (* 1 = 19.121 loss)\nI1207 01:47:00.853873 30052 solver.cpp:228] Iteration 10500, loss = 18.0175\nI1207 01:47:00.853912 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 01:47:00.853929 30052 solver.cpp:244]     Train net output #1: loss = 18.0175 (* 1 = 18.0175 loss)\nI1207 01:47:01.082094 30052 sgd_solver.cpp:166] Iteration 10500, lr = 1.575\nI1207 01:53:54.756058 30052 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1207 01:56:33.056951 30052 solver.cpp:404]     Test net output #0: accuracy = 0.189353\nI1207 01:56:33.057230 30052 solver.cpp:404]     Test net output #1: loss = 25.1853 (* 1 = 25.1853 loss)\nI1207 01:56:36.999176 30052 solver.cpp:228] Iteration 10600, loss = 25.208\nI1207 01:56:36.999210 30052 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 01:56:36.999227 30052 solver.cpp:244]     Train net output #1: loss = 25.208 (* 1 = 25.208 loss)\nI1207 01:56:37.225410 30052 sgd_solver.cpp:166] Iteration 10600, lr = 1.59\nI1207 02:03:30.744426 30052 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1207 02:06:08.864181 30052 solver.cpp:404]     Test net output #0: accuracy = 0.175\nI1207 02:06:08.864485 30052 solver.cpp:404]     Test net output #1: loss = 10.2651 (* 1 = 10.2651 loss)\nI1207 02:06:12.806937 30052 solver.cpp:228] Iteration 10700, loss = 9.67295\nI1207 02:06:12.806970 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 02:06:12.806985 30052 solver.cpp:244]     Train net output #1: loss = 9.67295 (* 1 = 9.67295 loss)\nI1207 02:06:13.033188 30052 sgd_solver.cpp:166] Iteration 10700, lr = 1.605\nI1207 02:13:06.427865 30052 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1207 02:15:44.617476 30052 solver.cpp:404]     Test net output #0: accuracy = 0.206\nI1207 02:15:44.617794 30052 solver.cpp:404]     Test net output #1: loss = 11.6664 (* 1 = 11.6664 loss)\nI1207 02:15:48.561527 30052 solver.cpp:228] Iteration 10800, loss = 11.5574\nI1207 02:15:48.561560 30052 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1207 02:15:48.561575 30052 solver.cpp:244]     Train net output #1: loss = 11.5574 (* 1 = 11.5574 loss)\nI1207 02:15:48.783079 30052 sgd_solver.cpp:166] Iteration 10800, lr = 1.62\nI1207 02:22:42.192446 30052 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1207 02:25:20.442098 30052 solver.cpp:404]     Test net output #0: accuracy = 0.172588\nI1207 02:25:20.442378 30052 solver.cpp:404]     Test net output #1: loss = 10.7573 (* 1 = 10.7573 loss)\nI1207 02:25:24.383747 30052 solver.cpp:228] Iteration 10900, loss = 11.0432\nI1207 02:25:24.383780 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 02:25:24.383796 30052 solver.cpp:244]     Train net output #1: loss = 11.0432 (* 1 = 11.0432 loss)\nI1207 02:25:24.610141 30052 sgd_solver.cpp:166] Iteration 10900, lr = 1.635\nI1207 02:32:18.085516 30052 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1207 02:34:56.346299 30052 solver.cpp:404]     Test net output #0: accuracy = 0.192588\nI1207 02:34:56.346604 30052 solver.cpp:404]     Test net output #1: loss = 13.5867 (* 1 = 13.5867 loss)\nI1207 02:35:00.289261 30052 solver.cpp:228] Iteration 11000, loss = 13.6818\nI1207 02:35:00.289296 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 02:35:00.289311 30052 solver.cpp:244]     Train net output #1: loss = 13.6818 (* 1 = 13.6818 loss)\nI1207 02:35:00.517293 30052 sgd_solver.cpp:166] Iteration 11000, lr = 1.65\nI1207 02:41:54.199791 30052 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1207 02:44:32.401413 30052 solver.cpp:404]     Test net output #0: accuracy = 0.236294\nI1207 02:44:32.401716 30052 solver.cpp:404]     Test net output #1: loss = 19.6847 (* 1 = 19.6847 loss)\nI1207 02:44:36.345329 30052 solver.cpp:228] Iteration 11100, loss = 18.5998\nI1207 02:44:36.345363 30052 solver.cpp:244]     Train net output #0: accuracy = 0.282353\nI1207 02:44:36.345378 30052 solver.cpp:244]     Train net output #1: loss = 18.5998 (* 1 = 18.5998 loss)\nI1207 02:44:36.570969 30052 sgd_solver.cpp:166] Iteration 11100, lr = 1.665\nI1207 02:51:30.177750 30052 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1207 02:54:08.341879 30052 solver.cpp:404]     Test net output #0: accuracy = 0.205118\nI1207 02:54:08.342162 30052 solver.cpp:404]     Test net output #1: loss = 14.1973 (* 1 = 14.1973 loss)\nI1207 02:54:12.283768 30052 solver.cpp:228] Iteration 11200, loss = 14.2411\nI1207 02:54:12.283807 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 02:54:12.283823 30052 solver.cpp:244]     Train net output #1: loss = 14.2411 (* 1 = 14.2411 loss)\nI1207 02:54:12.508816 30052 sgd_solver.cpp:166] Iteration 11200, lr = 1.68\nI1207 03:01:06.116420 30052 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1207 03:03:44.424468 30052 solver.cpp:404]     Test net output #0: accuracy = 0.194412\nI1207 03:03:44.424772 30052 solver.cpp:404]     Test net output #1: loss = 23.514 (* 1 = 23.514 loss)\nI1207 03:03:48.472726 30052 solver.cpp:228] Iteration 11300, loss = 24.3223\nI1207 03:03:48.472767 30052 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 03:03:48.472785 30052 solver.cpp:244]     Train net output #1: loss = 24.3223 (* 1 = 24.3223 loss)\nI1207 03:03:48.592016 30052 sgd_solver.cpp:166] Iteration 11300, lr = 1.695\nI1207 03:10:42.032299 30052 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1207 03:13:20.379555 30052 solver.cpp:404]     Test net output #0: accuracy = 0.210883\nI1207 03:13:20.379828 30052 solver.cpp:404]     Test net output #1: loss = 11.5105 (* 1 = 11.5105 loss)\nI1207 03:13:24.322896 30052 solver.cpp:228] Iteration 11400, loss = 11.5128\nI1207 03:13:24.322942 30052 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1207 03:13:24.322957 30052 solver.cpp:244]     Train net output #1: loss = 11.5128 (* 1 = 11.5128 loss)\nI1207 03:13:24.550652 30052 sgd_solver.cpp:166] Iteration 11400, lr = 1.71\nI1207 03:20:18.087234 30052 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1207 03:22:56.414597 30052 solver.cpp:404]     Test net output #0: accuracy = 0.173294\nI1207 03:22:56.414885 30052 solver.cpp:404]     Test net output #1: loss = 22.2582 (* 1 = 22.2582 loss)\nI1207 03:23:00.356753 30052 solver.cpp:228] Iteration 11500, loss = 25.5334\nI1207 03:23:00.356792 30052 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 03:23:00.356807 30052 solver.cpp:244]     Train net output #1: loss = 25.5334 (* 1 = 25.5334 loss)\nI1207 03:23:00.581070 30052 sgd_solver.cpp:166] Iteration 11500, lr = 1.725\nI1207 03:29:54.023025 30052 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1207 03:32:32.324453 30052 solver.cpp:404]     Test net output #0: accuracy = 0.186412\nI1207 03:32:32.324723 30052 solver.cpp:404]     Test net output #1: loss = 27.8013 (* 1 = 27.8013 loss)\nI1207 03:32:36.267855 30052 solver.cpp:228] Iteration 11600, loss = 30.9782\nI1207 03:32:36.267895 30052 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 03:32:36.267911 30052 solver.cpp:244]     Train net output #1: loss = 30.9782 (* 1 = 30.9782 loss)\nI1207 03:32:36.491883 30052 sgd_solver.cpp:166] Iteration 11600, lr = 1.74\nI1207 03:39:29.978348 30052 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1207 03:42:08.270717 30052 solver.cpp:404]     Test net output #0: accuracy = 0.164\nI1207 03:42:08.270998 30052 solver.cpp:404]     Test net output #1: loss = 12.8209 (* 1 = 12.8209 loss)\nI1207 03:42:12.211580 30052 solver.cpp:228] Iteration 11700, loss = 13.0717\nI1207 03:42:12.211619 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 03:42:12.211635 30052 solver.cpp:244]     Train net output #1: loss = 13.0717 (* 1 = 13.0717 loss)\nI1207 03:42:12.439257 30052 sgd_solver.cpp:166] Iteration 11700, lr = 1.755\nI1207 03:49:05.850863 30052 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1207 03:51:44.150838 30052 solver.cpp:404]     Test net output #0: accuracy = 0.117588\nI1207 03:51:44.151142 30052 solver.cpp:404]     Test net output #1: loss = 22.4845 (* 1 = 22.4845 loss)\nI1207 03:51:48.091259 30052 solver.cpp:228] Iteration 11800, loss = 23.1197\nI1207 03:51:48.091306 30052 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1207 03:51:48.091323 30052 solver.cpp:244]     Train net output #1: loss = 23.1197 (* 1 = 23.1197 loss)\nI1207 03:51:48.427379 30052 sgd_solver.cpp:166] Iteration 11800, lr = 1.77\nI1207 03:58:42.261746 30052 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1207 04:01:20.525908 30052 solver.cpp:404]     Test net output #0: accuracy = 0.120941\nI1207 04:01:20.526196 30052 solver.cpp:404]     Test net output #1: loss = 27.7149 (* 1 = 27.7149 loss)\nI1207 04:01:24.467672 30052 solver.cpp:228] Iteration 11900, loss = 27.4544\nI1207 04:01:24.467710 30052 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 04:01:24.467726 30052 solver.cpp:244]     Train net output #1: loss = 27.4544 (* 1 = 27.4544 loss)\nI1207 04:01:24.692030 30052 sgd_solver.cpp:166] Iteration 11900, lr = 1.785\nI1207 04:08:18.144973 30052 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1207 04:10:56.444398 30052 solver.cpp:404]     Test net output #0: accuracy = 0.164294\nI1207 04:10:56.444689 30052 solver.cpp:404]     Test net output #1: loss = 16.9291 (* 1 = 16.9291 loss)\nI1207 04:11:00.385962 30052 solver.cpp:228] Iteration 12000, loss = 17.8209\nI1207 04:11:00.386000 30052 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 04:11:00.386015 30052 solver.cpp:244]     Train net output #1: loss = 17.8209 (* 1 = 17.8209 loss)\nI1207 04:11:00.610796 30052 sgd_solver.cpp:166] Iteration 12000, lr = 1.8\nI1207 04:17:54.276583 30052 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1207 04:20:32.643482 30052 solver.cpp:404]     Test net output #0: accuracy = 0.193412\nI1207 04:20:32.643776 30052 solver.cpp:404]     Test net output #1: loss = 12.0679 (* 1 = 12.0679 loss)\nI1207 04:20:36.586515 30052 solver.cpp:228] Iteration 12100, loss = 10.6651\nI1207 04:20:36.586555 30052 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 04:20:36.586571 30052 solver.cpp:244]     Train net output #1: loss = 10.6651 (* 1 = 10.6651 loss)\nI1207 04:20:36.811265 30052 sgd_solver.cpp:166] Iteration 12100, lr = 1.815\nI1207 04:27:30.588038 30052 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1207 04:30:08.946524 30052 solver.cpp:404]     Test net output #0: accuracy = 0.213236\nI1207 04:30:08.946812 30052 solver.cpp:404]     Test net output #1: loss = 13.6172 (* 1 = 13.6172 loss)\nI1207 04:30:12.888461 30052 solver.cpp:228] Iteration 12200, loss = 12.2947\nI1207 04:30:12.888504 30052 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 04:30:12.888519 30052 solver.cpp:244]     Train net output #1: loss = 12.2947 (* 1 = 12.2947 loss)\nI1207 04:30:13.117686 30052 sgd_solver.cpp:166] Iteration 12200, lr = 1.83\nI1207 04:37:06.608743 30052 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1207 04:39:45.002498 30052 solver.cpp:404]     Test net output #0: accuracy = 0.201059\nI1207 04:39:45.002813 30052 solver.cpp:404]     Test net output #1: loss = 20.7337 (* 1 = 20.7337 loss)\nI1207 04:39:48.945504 30052 solver.cpp:228] Iteration 12300, loss = 25.3433\nI1207 04:39:48.945544 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 04:39:48.945561 30052 solver.cpp:244]     Train net output #1: loss = 25.3433 (* 1 = 25.3433 loss)\nI1207 04:39:49.168973 30052 sgd_solver.cpp:166] Iteration 12300, lr = 1.845\nI1207 04:46:42.641753 30052 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1207 04:49:21.045044 30052 solver.cpp:404]     Test net output #0: accuracy = 0.132882\nI1207 04:49:21.045346 30052 solver.cpp:404]     Test net output #1: loss = 17.5426 (* 1 = 17.5426 loss)\nI1207 04:49:24.989713 30052 solver.cpp:228] Iteration 12400, loss = 18.4223\nI1207 04:49:24.989753 30052 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 04:49:24.989769 30052 solver.cpp:244]     Train net output #1: loss = 18.4223 (* 1 = 18.4223 loss)\nI1207 04:49:25.208413 30052 sgd_solver.cpp:166] Iteration 12400, lr = 1.86\nI1207 04:56:18.620591 30052 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1207 04:58:56.985404 30052 solver.cpp:404]     Test net output #0: accuracy = 0.121706\nI1207 04:58:56.985688 30052 solver.cpp:404]     Test net output #1: loss = 23.4429 (* 1 = 23.4429 loss)\nI1207 04:59:00.928591 30052 solver.cpp:228] Iteration 12500, loss = 21.8404\nI1207 04:59:00.928630 30052 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 04:59:00.928647 30052 solver.cpp:244]     Train net output #1: loss = 21.8404 (* 1 = 21.8404 loss)\nI1207 04:59:01.153142 30052 sgd_solver.cpp:166] Iteration 12500, lr = 1.875\nI1207 05:05:54.611122 30052 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1207 05:08:32.926013 30052 solver.cpp:404]     Test net output #0: accuracy = 0.167529\nI1207 05:08:32.926297 30052 solver.cpp:404]     Test net output #1: loss = 23.8083 (* 1 = 23.8083 loss)\nI1207 05:08:36.869240 30052 solver.cpp:228] Iteration 12600, loss = 27.6247\nI1207 05:08:36.869278 30052 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1207 05:08:36.869294 30052 solver.cpp:244]     Train net output #1: loss = 27.6247 (* 1 = 27.6247 loss)\nI1207 05:08:37.093304 30052 sgd_solver.cpp:166] Iteration 12600, lr = 1.89\nI1207 05:15:30.801656 30052 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1207 05:18:09.148147 30052 solver.cpp:404]     Test net output #0: accuracy = 0.161471\nI1207 05:18:09.148427 30052 solver.cpp:404]     Test net output #1: loss = 19.684 (* 1 = 19.684 loss)\nI1207 05:18:13.091024 30052 solver.cpp:228] Iteration 12700, loss = 22.7334\nI1207 05:18:13.091064 30052 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 05:18:13.091080 30052 solver.cpp:244]     Train net output #1: loss = 22.7334 (* 1 = 22.7334 loss)\nI1207 05:18:13.314947 30052 sgd_solver.cpp:166] Iteration 12700, lr = 1.905\nI1207 05:25:07.081300 30052 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1207 05:27:45.578995 30052 solver.cpp:404]     Test net output #0: accuracy = 0.173353\nI1207 05:27:45.579313 30052 solver.cpp:404]     Test net output #1: loss = 13.9124 (* 1 = 13.9124 loss)\nI1207 05:27:49.522670 30052 solver.cpp:228] Iteration 12800, loss = 13.2937\nI1207 05:27:49.522706 30052 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 05:27:49.522722 30052 solver.cpp:244]     Train net output #1: loss = 13.2937 (* 1 = 13.2937 loss)\nI1207 05:27:49.748776 30052 sgd_solver.cpp:166] Iteration 12800, lr = 1.92\nI1207 05:34:43.187788 30052 solver.cpp:337] Iteration 12900, Testing net (#0)\n"
  },
  {
    "path": "Results/lrRange3SS520kClip10Fig12b",
    "content": "I1206 09:01:22.728986 28553 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1206 09:01:22.732164 28553 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1206 09:01:22.733727 28553 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1206 09:01:22.734956 28553 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1206 09:01:22.736178 28553 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1206 09:01:22.737421 28553 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1206 09:01:22.739032 28553 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1206 09:01:22.740276 28553 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1206 09:01:22.741519 28553 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1206 09:01:23.171736 28553 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/Fig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nclip_gradients: 10\nmax_lr: 3\nI1206 09:01:38.233484 28553 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1206 09:01:38.240586 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:38.240654 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:38.241539 28553 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1206 09:01:38.243172 28553 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-ResNeXt\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stri\nI1206 09:01:38.244825 28553 layer_factory.hpp:77] Creating layer dataLayer\nI1206 09:01:38.247292 28553 net.cpp:100] Creating Layer dataLayer\nI1206 09:01:38.247375 28553 net.cpp:408] dataLayer -> data_top\nI1206 09:01:38.247601 28553 net.cpp:408] dataLayer -> label\nI1206 09:01:38.247735 28553 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1206 09:01:47.582607 28561 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1206 09:01:47.589159 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:47.594386 28553 net.cpp:150] Setting up dataLayer\nI1206 09:01:47.594465 28553 net.cpp:157] Top shape: 85 3 32 32 (261120)\nI1206 09:01:47.594488 28553 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:47.594496 28553 net.cpp:165] Memory required for data: 1044820\nI1206 09:01:47.594513 28553 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1206 09:01:47.594533 28553 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1206 09:01:47.594542 28553 net.cpp:434] label_dataLayer_1_split <- label\nI1206 09:01:47.594561 28553 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1206 09:01:47.594584 28553 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1206 09:01:47.594718 28553 net.cpp:150] Setting up label_dataLayer_1_split\nI1206 09:01:47.594732 28553 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:47.594739 28553 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:47.594744 28553 net.cpp:165] Memory required for data: 1045500\nI1206 09:01:47.594750 28553 layer_factory.hpp:77] Creating layer pre_conv\nI1206 09:01:47.594823 28553 net.cpp:100] Creating Layer pre_conv\nI1206 09:01:47.594836 28553 net.cpp:434] pre_conv <- data_top\nI1206 09:01:47.594849 28553 net.cpp:408] pre_conv -> pre_conv_top\nI1206 09:01:47.596770 28553 net.cpp:150] Setting up pre_conv\nI1206 09:01:47.596789 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.596796 28553 net.cpp:165] Memory required for data: 6616060\nI1206 09:01:47.596850 28553 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1206 09:01:47.596868 28553 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1206 09:01:47.596873 28553 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1206 09:01:47.596881 28553 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1206 09:01:47.596891 28553 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1206 09:01:47.596942 28553 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1206 09:01:47.596956 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.596962 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.596967 28553 net.cpp:165] Memory required for data: 17757180\nI1206 09:01:47.596972 28553 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1206 09:01:47.597048 28553 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1206 09:01:47.597060 28553 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1206 09:01:47.597072 28553 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1206 09:01:47.597780 28553 net.cpp:150] Setting up L1_b1_brc1_bn\nI1206 09:01:47.597800 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.597806 28553 net.cpp:165] Memory required for data: 23327740\nI1206 09:01:47.597825 28553 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1206 09:01:47.597849 28562 blocking_queue.cpp:50] Waiting for data\nI1206 09:01:47.597877 28553 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1206 09:01:47.597887 28553 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1206 09:01:47.597896 28553 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1206 09:01:47.597908 28553 net.cpp:150] Setting up L1_b1_brc1_relu\nI1206 09:01:47.597915 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:47.597920 28553 net.cpp:165] Memory required for data: 28898300\nI1206 09:01:47.597925 28553 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1206 09:01:47.597937 28553 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1206 09:01:47.597944 28553 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1206 09:01:47.597957 28553 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1206 09:01:47.598229 28553 net.cpp:150] Setting up L1_b1_brc1_conv\nI1206 09:01:47.598244 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.598249 28553 net.cpp:165] Memory required for data: 40039420\nI1206 09:01:47.598259 28553 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1206 09:01:47.598268 28553 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1206 09:01:47.598274 28553 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1206 09:01:47.598284 28553 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1206 09:01:47.598528 28553 net.cpp:150] Setting up L1_b1_brc2_bn\nI1206 09:01:47.598546 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.598551 28553 net.cpp:165] Memory required for data: 51180540\nI1206 09:01:47.598565 28553 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1206 09:01:47.598575 28553 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1206 09:01:47.598582 28553 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1206 09:01:47.598589 28553 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1206 09:01:47.598598 28553 net.cpp:150] Setting up L1_b1_brc2_relu\nI1206 09:01:47.598613 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.598619 28553 net.cpp:165] Memory required for data: 62321660\nI1206 09:01:47.598624 28553 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1206 09:01:47.598641 28553 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1206 09:01:47.598647 28553 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1206 09:01:47.598659 28553 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1206 09:01:47.598923 28553 net.cpp:150] Setting up L1_b1_brc2_conv\nI1206 09:01:47.598937 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.598942 28553 net.cpp:165] Memory required for data: 73462780\nI1206 09:01:47.598951 28553 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1206 09:01:47.598968 28553 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1206 09:01:47.598973 28553 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1206 09:01:47.598981 28553 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1206 09:01:47.599203 28553 net.cpp:150] Setting up L1_b1_brc3_bn\nI1206 09:01:47.599216 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.599221 28553 net.cpp:165] Memory required for data: 84603900\nI1206 09:01:47.599232 28553 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1206 09:01:47.599241 28553 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1206 09:01:47.599246 28553 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1206 09:01:47.599253 28553 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1206 09:01:47.599262 28553 net.cpp:150] Setting up L1_b1_brc3_relu\nI1206 09:01:47.599269 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.599273 28553 net.cpp:165] Memory required for data: 95745020\nI1206 09:01:47.599278 28553 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1206 09:01:47.599292 28553 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1206 09:01:47.599298 28553 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1206 09:01:47.599308 28553 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1206 09:01:47.599829 28553 net.cpp:150] Setting up L1_b1_brc3_conv\nI1206 09:01:47.599845 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.599850 28553 net.cpp:165] Memory required for data: 118027260\nI1206 09:01:47.599865 28553 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1206 09:01:47.599880 28553 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1206 09:01:47.599886 28553 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1206 09:01:47.599895 28553 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1206 09:01:47.600179 28553 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1206 09:01:47.600193 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.600198 28553 net.cpp:165] Memory required for data: 140309500\nI1206 09:01:47.600208 28553 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1206 09:01:47.600265 28553 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1206 09:01:47.600277 28553 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1206 09:01:47.600286 28553 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1206 09:01:47.600297 28553 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1206 09:01:47.600373 28553 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1206 09:01:47.600389 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.600394 28553 net.cpp:165] Memory required for data: 162591740\nI1206 09:01:47.600399 28553 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:47.600411 28553 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:47.600417 28553 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1206 09:01:47.600425 28553 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:01:47.600437 28553 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:01:47.600512 28553 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:47.600525 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.600535 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.600539 28553 net.cpp:165] Memory required for data: 207156220\nI1206 09:01:47.600544 28553 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1206 09:01:47.600555 28553 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1206 09:01:47.600561 28553 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:01:47.600572 28553 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1206 09:01:47.600790 28553 net.cpp:150] Setting up L1_b2_brc1_bn\nI1206 09:01:47.600803 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.600808 28553 net.cpp:165] Memory required for data: 229438460\nI1206 09:01:47.600821 28553 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1206 09:01:47.600831 28553 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1206 09:01:47.600837 28553 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1206 09:01:47.600844 28553 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1206 09:01:47.600853 28553 net.cpp:150] Setting up L1_b2_brc1_relu\nI1206 09:01:47.600860 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.600865 28553 net.cpp:165] Memory required for data: 251720700\nI1206 09:01:47.600870 28553 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1206 09:01:47.600881 28553 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1206 09:01:47.600886 28553 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1206 09:01:47.600898 28553 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1206 09:01:47.601193 28553 net.cpp:150] Setting up L1_b2_brc1_conv\nI1206 09:01:47.601207 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.601212 28553 net.cpp:165] Memory required for data: 262861820\nI1206 09:01:47.601220 28553 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1206 09:01:47.601229 28553 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1206 09:01:47.601235 28553 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1206 09:01:47.601243 28553 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1206 09:01:47.601475 28553 net.cpp:150] Setting up L1_b2_brc2_bn\nI1206 09:01:47.601488 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.601493 28553 net.cpp:165] Memory required for data: 274002940\nI1206 09:01:47.601503 28553 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1206 09:01:47.601514 28553 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1206 09:01:47.601521 28553 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1206 09:01:47.601527 28553 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1206 09:01:47.601537 28553 net.cpp:150] Setting up L1_b2_brc2_relu\nI1206 09:01:47.601544 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.601548 28553 net.cpp:165] Memory required for data: 285144060\nI1206 09:01:47.601553 28553 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1206 09:01:47.601567 28553 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1206 09:01:47.601573 28553 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1206 09:01:47.601583 28553 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1206 09:01:47.601853 28553 net.cpp:150] Setting up L1_b2_brc2_conv\nI1206 09:01:47.601866 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.601872 28553 net.cpp:165] Memory required for data: 296285180\nI1206 09:01:47.601881 28553 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1206 09:01:47.601889 28553 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1206 09:01:47.601894 28553 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1206 09:01:47.601914 28553 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1206 09:01:47.602146 28553 net.cpp:150] Setting up L1_b2_brc3_bn\nI1206 09:01:47.602159 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.602164 28553 net.cpp:165] Memory required for data: 307426300\nI1206 09:01:47.602174 28553 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1206 09:01:47.602191 28553 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1206 09:01:47.602197 28553 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1206 09:01:47.602205 28553 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1206 09:01:47.602214 28553 net.cpp:150] Setting up L1_b2_brc3_relu\nI1206 09:01:47.602221 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.602226 28553 net.cpp:165] Memory required for data: 318567420\nI1206 09:01:47.602231 28553 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1206 09:01:47.602241 28553 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1206 09:01:47.602246 28553 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1206 09:01:47.602257 28553 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1206 09:01:47.602557 28553 net.cpp:150] Setting up L1_b2_brc3_conv\nI1206 09:01:47.602572 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.602577 28553 net.cpp:165] Memory required for data: 340849660\nI1206 09:01:47.602591 28553 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1206 09:01:47.602605 28553 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1206 09:01:47.602612 28553 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1206 09:01:47.602618 28553 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:01:47.602627 28553 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1206 09:01:47.602659 28553 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1206 09:01:47.602671 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.602676 28553 net.cpp:165] Memory required for data: 363131900\nI1206 09:01:47.602681 28553 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:47.602689 28553 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:47.602695 28553 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1206 09:01:47.602705 28553 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:01:47.602715 28553 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:01:47.602756 28553 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:47.602771 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.602777 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.602782 28553 net.cpp:165] Memory required for data: 407696380\nI1206 09:01:47.602787 28553 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1206 09:01:47.602795 28553 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1206 09:01:47.602800 28553 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:01:47.602810 28553 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1206 09:01:47.603027 28553 net.cpp:150] Setting up L1_b3_brc1_bn\nI1206 09:01:47.603039 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.603044 28553 net.cpp:165] Memory required for data: 429978620\nI1206 09:01:47.603055 28553 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1206 09:01:47.603062 28553 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1206 09:01:47.603068 28553 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1206 09:01:47.603075 28553 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1206 09:01:47.603085 28553 net.cpp:150] Setting up L1_b3_brc1_relu\nI1206 09:01:47.603091 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.603096 28553 net.cpp:165] Memory required for data: 452260860\nI1206 09:01:47.603101 28553 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1206 09:01:47.603113 28553 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1206 09:01:47.603121 28553 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1206 09:01:47.603130 28553 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1206 09:01:47.603431 28553 net.cpp:150] Setting up L1_b3_brc1_conv\nI1206 09:01:47.603451 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.603456 28553 net.cpp:165] Memory required for data: 463401980\nI1206 09:01:47.603471 28553 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1206 09:01:47.603483 28553 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1206 09:01:47.603490 28553 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1206 09:01:47.603500 28553 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1206 09:01:47.603725 28553 net.cpp:150] Setting up L1_b3_brc2_bn\nI1206 09:01:47.603739 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.603744 28553 net.cpp:165] Memory required for data: 474543100\nI1206 09:01:47.603754 28553 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1206 09:01:47.603761 28553 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1206 09:01:47.603766 28553 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1206 09:01:47.603773 28553 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1206 09:01:47.603782 28553 net.cpp:150] Setting up L1_b3_brc2_relu\nI1206 09:01:47.603790 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.603793 28553 net.cpp:165] Memory required for data: 485684220\nI1206 09:01:47.603798 28553 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1206 09:01:47.603817 28553 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1206 09:01:47.603823 28553 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1206 09:01:47.603832 28553 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1206 09:01:47.604097 28553 net.cpp:150] Setting up L1_b3_brc2_conv\nI1206 09:01:47.604111 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.604116 28553 net.cpp:165] Memory required for data: 496825340\nI1206 09:01:47.604125 28553 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1206 09:01:47.604135 28553 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1206 09:01:47.604142 28553 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1206 09:01:47.604149 28553 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1206 09:01:47.604379 28553 net.cpp:150] Setting up L1_b3_brc3_bn\nI1206 09:01:47.604393 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.604398 28553 net.cpp:165] Memory required for data: 507966460\nI1206 09:01:47.604408 28553 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1206 09:01:47.604415 28553 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1206 09:01:47.604421 28553 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1206 09:01:47.604434 28553 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1206 09:01:47.604444 28553 net.cpp:150] Setting up L1_b3_brc3_relu\nI1206 09:01:47.604449 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.604454 28553 net.cpp:165] Memory required for data: 519107580\nI1206 09:01:47.604460 28553 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1206 09:01:47.604478 28553 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1206 09:01:47.604485 28553 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1206 09:01:47.604495 28553 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1206 09:01:47.604799 28553 net.cpp:150] Setting up L1_b3_brc3_conv\nI1206 09:01:47.604813 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.604818 28553 net.cpp:165] Memory required for data: 541389820\nI1206 09:01:47.604826 28553 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1206 09:01:47.604835 28553 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1206 09:01:47.604841 28553 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1206 09:01:47.604848 28553 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:01:47.604858 28553 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1206 09:01:47.604894 28553 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1206 09:01:47.604907 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.604912 28553 net.cpp:165] Memory required for data: 563672060\nI1206 09:01:47.604917 28553 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:47.604931 28553 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:47.604938 28553 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1206 09:01:47.604948 28553 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:01:47.604956 28553 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:01:47.605000 28553 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:47.605013 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.605020 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.605024 28553 net.cpp:165] Memory required for data: 608236540\nI1206 09:01:47.605029 28553 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1206 09:01:47.605037 28553 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1206 09:01:47.605043 28553 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:01:47.605056 28553 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1206 09:01:47.605275 28553 net.cpp:150] Setting up L1_b4_brc1_bn\nI1206 09:01:47.605288 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.605293 28553 net.cpp:165] Memory required for data: 630518780\nI1206 09:01:47.605304 28553 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1206 09:01:47.605314 28553 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1206 09:01:47.605320 28553 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1206 09:01:47.605327 28553 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1206 09:01:47.605336 28553 net.cpp:150] Setting up L1_b4_brc1_relu\nI1206 09:01:47.605343 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.605348 28553 net.cpp:165] Memory required for data: 652801020\nI1206 09:01:47.605352 28553 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1206 09:01:47.605363 28553 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1206 09:01:47.605368 28553 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1206 09:01:47.605379 28553 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1206 09:01:47.605698 28553 net.cpp:150] Setting up L1_b4_brc1_conv\nI1206 09:01:47.605712 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.605717 28553 net.cpp:165] Memory required for data: 663942140\nI1206 09:01:47.605726 28553 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1206 09:01:47.605736 28553 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1206 09:01:47.605741 28553 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1206 09:01:47.605754 28553 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1206 09:01:47.605990 28553 net.cpp:150] Setting up L1_b4_brc2_bn\nI1206 09:01:47.606003 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.606009 28553 net.cpp:165] Memory required for data: 675083260\nI1206 09:01:47.606019 28553 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1206 09:01:47.606030 28553 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1206 09:01:47.606036 28553 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1206 09:01:47.606043 28553 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1206 09:01:47.606052 28553 net.cpp:150] Setting up L1_b4_brc2_relu\nI1206 09:01:47.606060 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.606065 28553 net.cpp:165] Memory required for data: 686224380\nI1206 09:01:47.606068 28553 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1206 09:01:47.606079 28553 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1206 09:01:47.606084 28553 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1206 09:01:47.606096 28553 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1206 09:01:47.606364 28553 net.cpp:150] Setting up L1_b4_brc2_conv\nI1206 09:01:47.606379 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.606384 28553 net.cpp:165] Memory required for data: 697365500\nI1206 09:01:47.606391 28553 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1206 09:01:47.606406 28553 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1206 09:01:47.606413 28553 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1206 09:01:47.606425 28553 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1206 09:01:47.606664 28553 net.cpp:150] Setting up L1_b4_brc3_bn\nI1206 09:01:47.606678 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.606683 28553 net.cpp:165] Memory required for data: 708506620\nI1206 09:01:47.606693 28553 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1206 09:01:47.606704 28553 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1206 09:01:47.606710 28553 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1206 09:01:47.606717 28553 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1206 09:01:47.606727 28553 net.cpp:150] Setting up L1_b4_brc3_relu\nI1206 09:01:47.606734 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.606739 28553 net.cpp:165] Memory required for data: 719647740\nI1206 09:01:47.606743 28553 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1206 09:01:47.606753 28553 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1206 09:01:47.606760 28553 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1206 09:01:47.606770 28553 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1206 09:01:47.607082 28553 net.cpp:150] Setting up L1_b4_brc3_conv\nI1206 09:01:47.607096 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.607101 28553 net.cpp:165] Memory required for data: 741929980\nI1206 09:01:47.607110 28553 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1206 09:01:47.607118 28553 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1206 09:01:47.607125 28553 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1206 09:01:47.607131 28553 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:01:47.607139 28553 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1206 09:01:47.607172 28553 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1206 09:01:47.607182 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.607187 28553 net.cpp:165] Memory required for data: 764212220\nI1206 09:01:47.607192 28553 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:47.607201 28553 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:47.607208 28553 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1206 09:01:47.607215 28553 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:01:47.607224 28553 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:01:47.607269 28553 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:47.607280 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.607286 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.607291 28553 net.cpp:165] Memory required for data: 808776700\nI1206 09:01:47.607296 28553 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1206 09:01:47.607305 28553 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1206 09:01:47.607309 28553 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:01:47.607321 28553 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1206 09:01:47.607549 28553 net.cpp:150] Setting up L1_b5_brc1_bn\nI1206 09:01:47.607564 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.607570 28553 net.cpp:165] Memory required for data: 831058940\nI1206 09:01:47.607592 28553 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1206 09:01:47.607601 28553 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1206 09:01:47.607607 28553 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1206 09:01:47.607617 28553 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1206 09:01:47.607627 28553 net.cpp:150] Setting up L1_b5_brc1_relu\nI1206 09:01:47.607635 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.607645 28553 net.cpp:165] Memory required for data: 853341180\nI1206 09:01:47.607650 28553 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1206 09:01:47.607661 28553 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1206 09:01:47.607667 28553 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1206 09:01:47.607678 28553 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1206 09:01:47.607980 28553 net.cpp:150] Setting up L1_b5_brc1_conv\nI1206 09:01:47.607993 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.607998 28553 net.cpp:165] Memory required for data: 864482300\nI1206 09:01:47.608007 28553 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1206 09:01:47.608016 28553 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1206 09:01:47.608021 28553 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1206 09:01:47.608029 28553 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1206 09:01:47.608259 28553 net.cpp:150] Setting up L1_b5_brc2_bn\nI1206 09:01:47.608273 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.608278 28553 net.cpp:165] Memory required for data: 875623420\nI1206 09:01:47.608288 28553 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1206 09:01:47.608299 28553 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1206 09:01:47.608304 28553 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1206 09:01:47.608311 28553 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1206 09:01:47.608321 28553 net.cpp:150] Setting up L1_b5_brc2_relu\nI1206 09:01:47.608328 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.608332 28553 net.cpp:165] Memory required for data: 886764540\nI1206 09:01:47.608337 28553 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1206 09:01:47.608350 28553 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1206 09:01:47.608356 28553 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1206 09:01:47.608371 28553 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1206 09:01:47.608645 28553 net.cpp:150] Setting up L1_b5_brc2_conv\nI1206 09:01:47.608660 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.608665 28553 net.cpp:165] Memory required for data: 897905660\nI1206 09:01:47.608674 28553 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1206 09:01:47.608682 28553 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1206 09:01:47.608688 28553 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1206 09:01:47.608700 28553 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1206 09:01:47.608937 28553 net.cpp:150] Setting up L1_b5_brc3_bn\nI1206 09:01:47.608950 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.608955 28553 net.cpp:165] Memory required for data: 909046780\nI1206 09:01:47.608965 28553 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1206 09:01:47.608976 28553 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1206 09:01:47.608983 28553 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1206 09:01:47.608989 28553 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1206 09:01:47.608999 28553 net.cpp:150] Setting up L1_b5_brc3_relu\nI1206 09:01:47.609006 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.609010 28553 net.cpp:165] Memory required for data: 920187900\nI1206 09:01:47.609015 28553 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1206 09:01:47.609025 28553 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1206 09:01:47.609030 28553 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1206 09:01:47.609041 28553 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1206 09:01:47.609347 28553 net.cpp:150] Setting up L1_b5_brc3_conv\nI1206 09:01:47.609360 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.609365 28553 net.cpp:165] Memory required for data: 942470140\nI1206 09:01:47.609375 28553 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1206 09:01:47.609383 28553 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1206 09:01:47.609390 28553 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1206 09:01:47.609396 28553 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:01:47.609411 28553 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1206 09:01:47.609447 28553 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1206 09:01:47.609457 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.609462 28553 net.cpp:165] Memory required for data: 964752380\nI1206 09:01:47.609472 28553 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:47.609480 28553 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:47.609485 28553 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1206 09:01:47.609496 28553 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:01:47.609506 28553 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:01:47.609555 28553 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:47.609567 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.609575 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.609580 28553 net.cpp:165] Memory required for data: 1009316860\nI1206 09:01:47.609585 28553 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1206 09:01:47.609592 28553 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1206 09:01:47.609598 28553 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:01:47.609608 28553 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1206 09:01:47.609838 28553 net.cpp:150] Setting up L1_b6_brc1_bn\nI1206 09:01:47.609853 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.609858 28553 net.cpp:165] Memory required for data: 1031599100\nI1206 09:01:47.609869 28553 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1206 09:01:47.609876 28553 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1206 09:01:47.609882 28553 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1206 09:01:47.609889 28553 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1206 09:01:47.609899 28553 net.cpp:150] Setting up L1_b6_brc1_relu\nI1206 09:01:47.609905 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.609910 28553 net.cpp:165] Memory required for data: 1053881340\nI1206 09:01:47.609915 28553 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1206 09:01:47.609925 28553 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1206 09:01:47.609930 28553 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1206 09:01:47.609941 28553 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1206 09:01:47.610244 28553 net.cpp:150] Setting up L1_b6_brc1_conv\nI1206 09:01:47.610258 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.610263 28553 net.cpp:165] Memory required for data: 1065022460\nI1206 09:01:47.610271 28553 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1206 09:01:47.610280 28553 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1206 09:01:47.610286 28553 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1206 09:01:47.610296 28553 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1206 09:01:47.610538 28553 net.cpp:150] Setting up L1_b6_brc2_bn\nI1206 09:01:47.610550 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.610555 28553 net.cpp:165] Memory required for data: 1076163580\nI1206 09:01:47.610565 28553 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1206 09:01:47.610581 28553 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1206 09:01:47.610587 28553 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1206 09:01:47.610595 28553 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1206 09:01:47.610605 28553 net.cpp:150] Setting up L1_b6_brc2_relu\nI1206 09:01:47.610611 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.610616 28553 net.cpp:165] Memory required for data: 1087304700\nI1206 09:01:47.610621 28553 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1206 09:01:47.610641 28553 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1206 09:01:47.610648 28553 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1206 09:01:47.610659 28553 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1206 09:01:47.610935 28553 net.cpp:150] Setting up L1_b6_brc2_conv\nI1206 09:01:47.610949 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.610954 28553 net.cpp:165] Memory required for data: 1098445820\nI1206 09:01:47.610962 28553 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1206 09:01:47.610973 28553 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1206 09:01:47.610980 28553 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1206 09:01:47.610991 28553 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1206 09:01:47.611227 28553 net.cpp:150] Setting up L1_b6_brc3_bn\nI1206 09:01:47.611240 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.611245 28553 net.cpp:165] Memory required for data: 1109586940\nI1206 09:01:47.611255 28553 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1206 09:01:47.611263 28553 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1206 09:01:47.611269 28553 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1206 09:01:47.611276 28553 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1206 09:01:47.611285 28553 net.cpp:150] Setting up L1_b6_brc3_relu\nI1206 09:01:47.611292 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:47.611296 28553 net.cpp:165] Memory required for data: 1120728060\nI1206 09:01:47.611301 28553 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1206 09:01:47.611315 28553 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1206 09:01:47.611320 28553 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1206 09:01:47.611331 28553 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1206 09:01:47.611646 28553 net.cpp:150] Setting up L1_b6_brc3_conv\nI1206 09:01:47.611660 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.611665 28553 net.cpp:165] Memory required for data: 1143010300\nI1206 09:01:47.611675 28553 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1206 09:01:47.611686 28553 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1206 09:01:47.611692 28553 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1206 09:01:47.611699 28553 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:01:47.611707 28553 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1206 09:01:47.611739 28553 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1206 09:01:47.611749 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.611754 28553 net.cpp:165] Memory required for data: 1165292540\nI1206 09:01:47.611759 28553 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:47.611766 28553 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:47.611771 28553 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1206 09:01:47.611783 28553 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:01:47.611793 28553 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:01:47.611835 28553 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:47.611850 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.611856 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.611860 28553 net.cpp:165] Memory required for data: 1209857020\nI1206 09:01:47.611865 28553 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1206 09:01:47.611873 28553 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1206 09:01:47.611879 28553 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:01:47.611889 28553 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1206 09:01:47.612112 28553 net.cpp:150] Setting up L2_b1_brc1_bn\nI1206 09:01:47.612124 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.612138 28553 net.cpp:165] Memory required for data: 1232139260\nI1206 09:01:47.612149 28553 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1206 09:01:47.612159 28553 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1206 09:01:47.612164 28553 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1206 09:01:47.612170 28553 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1206 09:01:47.612180 28553 net.cpp:150] Setting up L2_b1_brc1_relu\nI1206 09:01:47.612186 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:47.612191 28553 net.cpp:165] Memory required for data: 1254421500\nI1206 09:01:47.612196 28553 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1206 09:01:47.612210 28553 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1206 09:01:47.612215 28553 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1206 09:01:47.612226 28553 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1206 09:01:47.612591 28553 net.cpp:150] Setting up L2_b1_brc1_conv\nI1206 09:01:47.612606 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.612610 28553 net.cpp:165] Memory required for data: 1259992060\nI1206 09:01:47.612619 28553 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1206 09:01:47.612628 28553 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1206 09:01:47.612637 28553 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1206 09:01:47.612644 28553 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1206 09:01:47.612884 28553 net.cpp:150] Setting up L2_b1_brc2_bn\nI1206 09:01:47.612896 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.612901 28553 net.cpp:165] Memory required for data: 1265562620\nI1206 09:01:47.612911 28553 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1206 09:01:47.612920 28553 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1206 09:01:47.612926 28553 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1206 09:01:47.612936 28553 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1206 09:01:47.612946 28553 net.cpp:150] Setting up L2_b1_brc2_relu\nI1206 09:01:47.612952 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.612957 28553 net.cpp:165] Memory required for data: 1271133180\nI1206 09:01:47.612962 28553 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1206 09:01:47.612975 28553 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1206 09:01:47.612982 28553 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1206 09:01:47.612993 28553 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1206 09:01:47.613282 28553 net.cpp:150] Setting up L2_b1_brc2_conv\nI1206 09:01:47.613296 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.613301 28553 net.cpp:165] Memory required for data: 1276703740\nI1206 09:01:47.613309 28553 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1206 09:01:47.613317 28553 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1206 09:01:47.613323 28553 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1206 09:01:47.613332 28553 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1206 09:01:47.613572 28553 net.cpp:150] Setting up L2_b1_brc3_bn\nI1206 09:01:47.613586 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.613591 28553 net.cpp:165] Memory required for data: 1282274300\nI1206 09:01:47.613601 28553 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1206 09:01:47.613610 28553 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1206 09:01:47.613615 28553 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1206 09:01:47.613625 28553 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1206 09:01:47.613634 28553 net.cpp:150] Setting up L2_b1_brc3_relu\nI1206 09:01:47.613641 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.613646 28553 net.cpp:165] Memory required for data: 1287844860\nI1206 09:01:47.613651 28553 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1206 09:01:47.613664 28553 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1206 09:01:47.613670 28553 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1206 09:01:47.613678 28553 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1206 09:01:47.615170 28553 net.cpp:150] Setting up L2_b1_brc3_conv\nI1206 09:01:47.615187 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.615193 28553 net.cpp:165] Memory required for data: 1298985980\nI1206 09:01:47.615202 28553 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1206 09:01:47.615219 28553 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1206 09:01:47.615226 28553 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:01:47.615239 28553 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1206 09:01:47.615667 28553 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1206 09:01:47.615682 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.615687 28553 net.cpp:165] Memory required for data: 1310127100\nI1206 09:01:47.615696 28553 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1206 09:01:47.615705 28553 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1206 09:01:47.615711 28553 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1206 09:01:47.615718 28553 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1206 09:01:47.615730 28553 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1206 09:01:47.615756 28553 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1206 09:01:47.615768 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.615773 28553 net.cpp:165] Memory required for data: 1321268220\nI1206 09:01:47.615778 28553 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:47.615787 28553 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:47.615792 28553 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1206 09:01:47.615799 28553 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:01:47.615808 28553 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:01:47.615857 28553 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:47.615869 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.615875 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.615880 28553 net.cpp:165] Memory required for data: 1343550460\nI1206 09:01:47.615885 28553 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1206 09:01:47.615896 28553 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1206 09:01:47.615902 28553 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:01:47.615909 28553 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1206 09:01:47.616127 28553 net.cpp:150] Setting up L2_b2_brc1_bn\nI1206 09:01:47.616139 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.616144 28553 net.cpp:165] Memory required for data: 1354691580\nI1206 09:01:47.616155 28553 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1206 09:01:47.616166 28553 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1206 09:01:47.616173 28553 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1206 09:01:47.616179 28553 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1206 09:01:47.616189 28553 net.cpp:150] Setting up L2_b2_brc1_relu\nI1206 09:01:47.616196 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.616200 28553 net.cpp:165] Memory required for data: 1365832700\nI1206 09:01:47.616205 28553 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1206 09:01:47.616215 28553 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1206 09:01:47.616221 28553 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1206 09:01:47.616236 28553 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1206 09:01:47.616683 28553 net.cpp:150] Setting up L2_b2_brc1_conv\nI1206 09:01:47.616698 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.616703 28553 net.cpp:165] Memory required for data: 1371403260\nI1206 09:01:47.616711 28553 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1206 09:01:47.616729 28553 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1206 09:01:47.616735 28553 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1206 09:01:47.616746 28553 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1206 09:01:47.616982 28553 net.cpp:150] Setting up L2_b2_brc2_bn\nI1206 09:01:47.616994 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.616999 28553 net.cpp:165] Memory required for data: 1376973820\nI1206 09:01:47.617009 28553 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1206 09:01:47.617022 28553 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1206 09:01:47.617029 28553 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1206 09:01:47.617036 28553 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1206 09:01:47.617045 28553 net.cpp:150] Setting up L2_b2_brc2_relu\nI1206 09:01:47.617053 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.617058 28553 net.cpp:165] Memory required for data: 1382544380\nI1206 09:01:47.617063 28553 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1206 09:01:47.617072 28553 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1206 09:01:47.617079 28553 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1206 09:01:47.617089 28553 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1206 09:01:47.617394 28553 net.cpp:150] Setting up L2_b2_brc2_conv\nI1206 09:01:47.617408 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.617413 28553 net.cpp:165] Memory required for data: 1388114940\nI1206 09:01:47.617421 28553 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1206 09:01:47.617429 28553 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1206 09:01:47.617435 28553 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1206 09:01:47.617446 28553 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1206 09:01:47.617694 28553 net.cpp:150] Setting up L2_b2_brc3_bn\nI1206 09:01:47.617708 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.617713 28553 net.cpp:165] Memory required for data: 1393685500\nI1206 09:01:47.617723 28553 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1206 09:01:47.617732 28553 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1206 09:01:47.617738 28553 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1206 09:01:47.617748 28553 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1206 09:01:47.617758 28553 net.cpp:150] Setting up L2_b2_brc3_relu\nI1206 09:01:47.617764 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.617769 28553 net.cpp:165] Memory required for data: 1399256060\nI1206 09:01:47.617774 28553 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1206 09:01:47.617784 28553 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1206 09:01:47.617789 28553 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1206 09:01:47.617797 28553 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1206 09:01:47.618227 28553 net.cpp:150] Setting up L2_b2_brc3_conv\nI1206 09:01:47.618239 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.618245 28553 net.cpp:165] Memory required for data: 1410397180\nI1206 09:01:47.618253 28553 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1206 09:01:47.618265 28553 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1206 09:01:47.618271 28553 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1206 09:01:47.618278 28553 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:01:47.618286 28553 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1206 09:01:47.618311 28553 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1206 09:01:47.618320 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.618325 28553 net.cpp:165] Memory required for data: 1421538300\nI1206 09:01:47.618330 28553 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:47.618340 28553 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:47.618346 28553 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1206 09:01:47.618361 28553 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:01:47.618374 28553 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:01:47.618420 28553 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:47.618430 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.618438 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.618441 28553 net.cpp:165] Memory required for data: 1443820540\nI1206 09:01:47.618446 28553 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1206 09:01:47.618458 28553 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1206 09:01:47.618463 28553 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:01:47.618477 28553 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1206 09:01:47.618770 28553 net.cpp:150] Setting up L2_b3_brc1_bn\nI1206 09:01:47.618793 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.618803 28553 net.cpp:165] Memory required for data: 1454961660\nI1206 09:01:47.618844 28553 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1206 09:01:47.618860 28553 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1206 09:01:47.618866 28553 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1206 09:01:47.618875 28553 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1206 09:01:47.618885 28553 net.cpp:150] Setting up L2_b3_brc1_relu\nI1206 09:01:47.618891 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.618896 28553 net.cpp:165] Memory required for data: 1466102780\nI1206 09:01:47.618901 28553 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1206 09:01:47.618916 28553 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1206 09:01:47.618921 28553 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1206 09:01:47.618930 28553 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1206 09:01:47.619361 28553 net.cpp:150] Setting up L2_b3_brc1_conv\nI1206 09:01:47.619376 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.619381 28553 net.cpp:165] Memory required for data: 1471673340\nI1206 09:01:47.619390 28553 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1206 09:01:47.619401 28553 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1206 09:01:47.619407 28553 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1206 09:01:47.619415 28553 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1206 09:01:47.619663 28553 net.cpp:150] Setting up L2_b3_brc2_bn\nI1206 09:01:47.619678 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.619683 28553 net.cpp:165] Memory required for data: 1477243900\nI1206 09:01:47.619694 28553 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1206 09:01:47.619702 28553 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1206 09:01:47.619709 28553 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1206 09:01:47.619715 28553 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1206 09:01:47.619725 28553 net.cpp:150] Setting up L2_b3_brc2_relu\nI1206 09:01:47.619731 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.619736 28553 net.cpp:165] Memory required for data: 1482814460\nI1206 09:01:47.619741 28553 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1206 09:01:47.619751 28553 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1206 09:01:47.619757 28553 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1206 09:01:47.619768 28553 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1206 09:01:47.620069 28553 net.cpp:150] Setting up L2_b3_brc2_conv\nI1206 09:01:47.620082 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.620088 28553 net.cpp:165] Memory required for data: 1488385020\nI1206 09:01:47.620097 28553 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1206 09:01:47.620108 28553 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1206 09:01:47.620115 28553 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1206 09:01:47.620122 28553 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1206 09:01:47.620371 28553 net.cpp:150] Setting up L2_b3_brc3_bn\nI1206 09:01:47.620388 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.620393 28553 net.cpp:165] Memory required for data: 1493955580\nI1206 09:01:47.620404 28553 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1206 09:01:47.620411 28553 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1206 09:01:47.620417 28553 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1206 09:01:47.620424 28553 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1206 09:01:47.620434 28553 net.cpp:150] Setting up L2_b3_brc3_relu\nI1206 09:01:47.620440 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.620445 28553 net.cpp:165] Memory required for data: 1499526140\nI1206 09:01:47.620450 28553 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1206 09:01:47.620460 28553 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1206 09:01:47.620471 28553 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1206 09:01:47.620483 28553 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1206 09:01:47.620915 28553 net.cpp:150] Setting up L2_b3_brc3_conv\nI1206 09:01:47.620929 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.620934 28553 net.cpp:165] Memory required for data: 1510667260\nI1206 09:01:47.620944 28553 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1206 09:01:47.620952 28553 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1206 09:01:47.620959 28553 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1206 09:01:47.620965 28553 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:01:47.620973 28553 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1206 09:01:47.621006 28553 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1206 09:01:47.621016 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.621021 28553 net.cpp:165] Memory required for data: 1521808380\nI1206 09:01:47.621026 28553 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:47.621034 28553 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:47.621039 28553 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1206 09:01:47.621050 28553 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:01:47.621059 28553 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:01:47.621106 28553 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:47.621117 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.621124 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.621129 28553 net.cpp:165] Memory required for data: 1544090620\nI1206 09:01:47.621134 28553 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1206 09:01:47.621141 28553 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1206 09:01:47.621147 28553 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:01:47.621158 28553 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1206 09:01:47.621381 28553 net.cpp:150] Setting up L2_b4_brc1_bn\nI1206 09:01:47.621394 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.621399 28553 net.cpp:165] Memory required for data: 1555231740\nI1206 09:01:47.621409 28553 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1206 09:01:47.621417 28553 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1206 09:01:47.621423 28553 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1206 09:01:47.621433 28553 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1206 09:01:47.621443 28553 net.cpp:150] Setting up L2_b4_brc1_relu\nI1206 09:01:47.621449 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.621454 28553 net.cpp:165] Memory required for data: 1566372860\nI1206 09:01:47.621459 28553 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1206 09:01:47.621482 28553 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1206 09:01:47.621490 28553 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1206 09:01:47.621498 28553 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1206 09:01:47.621945 28553 net.cpp:150] Setting up L2_b4_brc1_conv\nI1206 09:01:47.621959 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.621964 28553 net.cpp:165] Memory required for data: 1571943420\nI1206 09:01:47.621973 28553 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1206 09:01:47.621984 28553 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1206 09:01:47.621990 28553 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1206 09:01:47.621999 28553 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1206 09:01:47.622239 28553 net.cpp:150] Setting up L2_b4_brc2_bn\nI1206 09:01:47.622251 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.622257 28553 net.cpp:165] Memory required for data: 1577513980\nI1206 09:01:47.622267 28553 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1206 09:01:47.622275 28553 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1206 09:01:47.622282 28553 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1206 09:01:47.622292 28553 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1206 09:01:47.622301 28553 net.cpp:150] Setting up L2_b4_brc2_relu\nI1206 09:01:47.622308 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.622313 28553 net.cpp:165] Memory required for data: 1583084540\nI1206 09:01:47.622318 28553 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1206 09:01:47.622331 28553 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1206 09:01:47.622337 28553 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1206 09:01:47.622345 28553 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1206 09:01:47.622661 28553 net.cpp:150] Setting up L2_b4_brc2_conv\nI1206 09:01:47.622675 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.622681 28553 net.cpp:165] Memory required for data: 1588655100\nI1206 09:01:47.622689 28553 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1206 09:01:47.622701 28553 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1206 09:01:47.622707 28553 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1206 09:01:47.622715 28553 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1206 09:01:47.622954 28553 net.cpp:150] Setting up L2_b4_brc3_bn\nI1206 09:01:47.622967 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.622972 28553 net.cpp:165] Memory required for data: 1594225660\nI1206 09:01:47.622982 28553 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1206 09:01:47.622989 28553 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1206 09:01:47.622995 28553 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1206 09:01:47.623003 28553 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1206 09:01:47.623013 28553 net.cpp:150] Setting up L2_b4_brc3_relu\nI1206 09:01:47.623018 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.623023 28553 net.cpp:165] Memory required for data: 1599796220\nI1206 09:01:47.623028 28553 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1206 09:01:47.623044 28553 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1206 09:01:47.623050 28553 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1206 09:01:47.623061 28553 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1206 09:01:47.623499 28553 net.cpp:150] Setting up L2_b4_brc3_conv\nI1206 09:01:47.623513 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.623519 28553 net.cpp:165] Memory required for data: 1610937340\nI1206 09:01:47.623528 28553 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1206 09:01:47.623539 28553 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1206 09:01:47.623546 28553 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1206 09:01:47.623553 28553 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:01:47.623564 28553 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1206 09:01:47.623589 28553 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1206 09:01:47.623605 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.623611 28553 net.cpp:165] Memory required for data: 1622078460\nI1206 09:01:47.623616 28553 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:47.623625 28553 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:47.623630 28553 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1206 09:01:47.623641 28553 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:01:47.623649 28553 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:01:47.623694 28553 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:47.623709 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.623716 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.623721 28553 net.cpp:165] Memory required for data: 1644360700\nI1206 09:01:47.623726 28553 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1206 09:01:47.623733 28553 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1206 09:01:47.623739 28553 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:01:47.623750 28553 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1206 09:01:47.623982 28553 net.cpp:150] Setting up L2_b5_brc1_bn\nI1206 09:01:47.623994 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.623999 28553 net.cpp:165] Memory required for data: 1655501820\nI1206 09:01:47.624011 28553 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1206 09:01:47.624018 28553 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1206 09:01:47.624024 28553 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1206 09:01:47.624032 28553 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1206 09:01:47.624042 28553 net.cpp:150] Setting up L2_b5_brc1_relu\nI1206 09:01:47.624048 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.624053 28553 net.cpp:165] Memory required for data: 1666642940\nI1206 09:01:47.624058 28553 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1206 09:01:47.624075 28553 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1206 09:01:47.624083 28553 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1206 09:01:47.624094 28553 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1206 09:01:47.624536 28553 net.cpp:150] Setting up L2_b5_brc1_conv\nI1206 09:01:47.624550 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.624555 28553 net.cpp:165] Memory required for data: 1672213500\nI1206 09:01:47.624564 28553 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1206 09:01:47.624575 28553 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1206 09:01:47.624583 28553 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1206 09:01:47.624590 28553 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1206 09:01:47.624833 28553 net.cpp:150] Setting up L2_b5_brc2_bn\nI1206 09:01:47.624845 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.624850 28553 net.cpp:165] Memory required for data: 1677784060\nI1206 09:01:47.624861 28553 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1206 09:01:47.624869 28553 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1206 09:01:47.624874 28553 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1206 09:01:47.624882 28553 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1206 09:01:47.624891 28553 net.cpp:150] Setting up L2_b5_brc2_relu\nI1206 09:01:47.624898 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.624902 28553 net.cpp:165] Memory required for data: 1683354620\nI1206 09:01:47.624907 28553 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1206 09:01:47.624922 28553 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1206 09:01:47.624927 28553 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1206 09:01:47.624935 28553 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1206 09:01:47.625244 28553 net.cpp:150] Setting up L2_b5_brc2_conv\nI1206 09:01:47.625258 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.625263 28553 net.cpp:165] Memory required for data: 1688925180\nI1206 09:01:47.625272 28553 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1206 09:01:47.625285 28553 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1206 09:01:47.625291 28553 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1206 09:01:47.625299 28553 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1206 09:01:47.625550 28553 net.cpp:150] Setting up L2_b5_brc3_bn\nI1206 09:01:47.625566 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.625571 28553 net.cpp:165] Memory required for data: 1694495740\nI1206 09:01:47.625581 28553 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1206 09:01:47.625589 28553 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1206 09:01:47.625596 28553 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1206 09:01:47.625602 28553 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1206 09:01:47.625612 28553 net.cpp:150] Setting up L2_b5_brc3_relu\nI1206 09:01:47.625618 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.625623 28553 net.cpp:165] Memory required for data: 1700066300\nI1206 09:01:47.625627 28553 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1206 09:01:47.625638 28553 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1206 09:01:47.625643 28553 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1206 09:01:47.625656 28553 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1206 09:01:47.626090 28553 net.cpp:150] Setting up L2_b5_brc3_conv\nI1206 09:01:47.626103 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.626108 28553 net.cpp:165] Memory required for data: 1711207420\nI1206 09:01:47.626117 28553 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1206 09:01:47.626127 28553 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1206 09:01:47.626132 28553 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1206 09:01:47.626139 28553 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:01:47.626149 28553 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1206 09:01:47.626175 28553 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1206 09:01:47.626184 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.626189 28553 net.cpp:165] Memory required for data: 1722348540\nI1206 09:01:47.626194 28553 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:47.626205 28553 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:47.626211 28553 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1206 09:01:47.626219 28553 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:01:47.626227 28553 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:01:47.626276 28553 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:47.626286 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.626293 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.626298 28553 net.cpp:165] Memory required for data: 1744630780\nI1206 09:01:47.626303 28553 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1206 09:01:47.626312 28553 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1206 09:01:47.626317 28553 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:01:47.626327 28553 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1206 09:01:47.626562 28553 net.cpp:150] Setting up L2_b6_brc1_bn\nI1206 09:01:47.626575 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.626580 28553 net.cpp:165] Memory required for data: 1755771900\nI1206 09:01:47.626590 28553 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1206 09:01:47.626619 28553 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1206 09:01:47.626627 28553 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1206 09:01:47.626634 28553 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1206 09:01:47.626644 28553 net.cpp:150] Setting up L2_b6_brc1_relu\nI1206 09:01:47.626652 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.626655 28553 net.cpp:165] Memory required for data: 1766913020\nI1206 09:01:47.626660 28553 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1206 09:01:47.626672 28553 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1206 09:01:47.626683 28553 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1206 09:01:47.626693 28553 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1206 09:01:47.627132 28553 net.cpp:150] Setting up L2_b6_brc1_conv\nI1206 09:01:47.627146 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.627151 28553 net.cpp:165] Memory required for data: 1772483580\nI1206 09:01:47.627161 28553 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1206 09:01:47.627171 28553 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1206 09:01:47.627178 28553 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1206 09:01:47.627187 28553 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1206 09:01:47.627431 28553 net.cpp:150] Setting up L2_b6_brc2_bn\nI1206 09:01:47.627444 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.627449 28553 net.cpp:165] Memory required for data: 1778054140\nI1206 09:01:47.627460 28553 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1206 09:01:47.627473 28553 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1206 09:01:47.627480 28553 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1206 09:01:47.627487 28553 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1206 09:01:47.627497 28553 net.cpp:150] Setting up L2_b6_brc2_relu\nI1206 09:01:47.627504 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.627508 28553 net.cpp:165] Memory required for data: 1783624700\nI1206 09:01:47.627513 28553 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1206 09:01:47.627529 28553 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1206 09:01:47.627535 28553 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1206 09:01:47.627547 28553 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1206 09:01:47.627848 28553 net.cpp:150] Setting up L2_b6_brc2_conv\nI1206 09:01:47.627861 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.627866 28553 net.cpp:165] Memory required for data: 1789195260\nI1206 09:01:47.627876 28553 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1206 09:01:47.627887 28553 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1206 09:01:47.627893 28553 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1206 09:01:47.627904 28553 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1206 09:01:47.628142 28553 net.cpp:150] Setting up L2_b6_brc3_bn\nI1206 09:01:47.628154 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.628159 28553 net.cpp:165] Memory required for data: 1794765820\nI1206 09:01:47.628170 28553 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1206 09:01:47.628177 28553 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1206 09:01:47.628183 28553 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1206 09:01:47.628190 28553 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1206 09:01:47.628201 28553 net.cpp:150] Setting up L2_b6_brc3_relu\nI1206 09:01:47.628206 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:47.628211 28553 net.cpp:165] Memory required for data: 1800336380\nI1206 09:01:47.628216 28553 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1206 09:01:47.628229 28553 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1206 09:01:47.628235 28553 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1206 09:01:47.628247 28553 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1206 09:01:47.628681 28553 net.cpp:150] Setting up L2_b6_brc3_conv\nI1206 09:01:47.628696 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.628707 28553 net.cpp:165] Memory required for data: 1811477500\nI1206 09:01:47.628716 28553 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1206 09:01:47.628728 28553 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1206 09:01:47.628734 28553 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1206 09:01:47.628742 28553 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:01:47.628749 28553 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1206 09:01:47.628775 28553 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1206 09:01:47.628795 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.628800 28553 net.cpp:165] Memory required for data: 1822618620\nI1206 09:01:47.628805 28553 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:47.628813 28553 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:47.628818 28553 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1206 09:01:47.628826 28553 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:01:47.628839 28553 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:01:47.628883 28553 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:47.628895 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.628901 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.628906 28553 net.cpp:165] Memory required for data: 1844900860\nI1206 09:01:47.628911 28553 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1206 09:01:47.628924 28553 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1206 09:01:47.628931 28553 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:01:47.628942 28553 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1206 09:01:47.629168 28553 net.cpp:150] Setting up L3_b1_brc1_bn\nI1206 09:01:47.629182 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.629189 28553 net.cpp:165] Memory required for data: 1856041980\nI1206 09:01:47.629199 28553 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1206 09:01:47.629206 28553 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1206 09:01:47.629212 28553 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1206 09:01:47.629220 28553 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1206 09:01:47.629230 28553 net.cpp:150] Setting up L3_b1_brc1_relu\nI1206 09:01:47.629236 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:47.629240 28553 net.cpp:165] Memory required for data: 1867183100\nI1206 09:01:47.629245 28553 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1206 09:01:47.629256 28553 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1206 09:01:47.629261 28553 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1206 09:01:47.629272 28553 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1206 09:01:47.629899 28553 net.cpp:150] Setting up L3_b1_brc1_conv\nI1206 09:01:47.629914 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.629920 28553 net.cpp:165] Memory required for data: 1869968380\nI1206 09:01:47.629928 28553 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1206 09:01:47.629937 28553 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1206 09:01:47.629943 28553 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1206 09:01:47.629956 28553 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1206 09:01:47.630197 28553 net.cpp:150] Setting up L3_b1_brc2_bn\nI1206 09:01:47.630208 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.630213 28553 net.cpp:165] Memory required for data: 1872753660\nI1206 09:01:47.630224 28553 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1206 09:01:47.630235 28553 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1206 09:01:47.630241 28553 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1206 09:01:47.630249 28553 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1206 09:01:47.630265 28553 net.cpp:150] Setting up L3_b1_brc2_relu\nI1206 09:01:47.630273 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.630278 28553 net.cpp:165] Memory required for data: 1875538940\nI1206 09:01:47.630282 28553 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1206 09:01:47.630293 28553 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1206 09:01:47.630298 28553 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1206 09:01:47.630311 28553 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1206 09:01:47.630687 28553 net.cpp:150] Setting up L3_b1_brc2_conv\nI1206 09:01:47.630702 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.630707 28553 net.cpp:165] Memory required for data: 1878324220\nI1206 09:01:47.630715 28553 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1206 09:01:47.630723 28553 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1206 09:01:47.630729 28553 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1206 09:01:47.630738 28553 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1206 09:01:47.630972 28553 net.cpp:150] Setting up L3_b1_brc3_bn\nI1206 09:01:47.630985 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.630990 28553 net.cpp:165] Memory required for data: 1881109500\nI1206 09:01:47.631000 28553 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1206 09:01:47.631007 28553 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1206 09:01:47.631013 28553 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1206 09:01:47.631026 28553 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1206 09:01:47.631036 28553 net.cpp:150] Setting up L3_b1_brc3_relu\nI1206 09:01:47.631043 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.631048 28553 net.cpp:165] Memory required for data: 1883894780\nI1206 09:01:47.631053 28553 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1206 09:01:47.631063 28553 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1206 09:01:47.631072 28553 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1206 09:01:47.631080 28553 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1206 09:01:47.633090 28553 net.cpp:150] Setting up L3_b1_brc3_conv\nI1206 09:01:47.633108 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.633114 28553 net.cpp:165] Memory required for data: 1889465340\nI1206 09:01:47.633123 28553 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1206 09:01:47.633138 28553 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1206 09:01:47.633147 28553 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:01:47.633158 28553 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1206 09:01:47.634093 28553 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1206 09:01:47.634109 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.634114 28553 net.cpp:165] Memory required for data: 1895035900\nI1206 09:01:47.634122 28553 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1206 09:01:47.634131 28553 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1206 09:01:47.634137 28553 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1206 09:01:47.634145 28553 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1206 09:01:47.634155 28553 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1206 09:01:47.634191 28553 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1206 09:01:47.634202 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.634207 28553 net.cpp:165] Memory required for data: 1900606460\nI1206 09:01:47.634212 28553 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:47.634220 28553 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:47.634227 28553 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1206 09:01:47.634239 28553 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:01:47.634249 28553 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:01:47.634305 28553 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:47.634316 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.634323 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.634327 28553 net.cpp:165] Memory required for data: 1911747580\nI1206 09:01:47.634332 28553 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1206 09:01:47.634344 28553 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1206 09:01:47.634351 28553 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:01:47.634361 28553 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1206 09:01:47.634598 28553 net.cpp:150] Setting up L3_b2_brc1_bn\nI1206 09:01:47.634611 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.634616 28553 net.cpp:165] Memory required for data: 1917318140\nI1206 09:01:47.634626 28553 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1206 09:01:47.634635 28553 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1206 09:01:47.634641 28553 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1206 09:01:47.634649 28553 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1206 09:01:47.634658 28553 net.cpp:150] Setting up L3_b2_brc1_relu\nI1206 09:01:47.634665 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.634670 28553 net.cpp:165] Memory required for data: 1922888700\nI1206 09:01:47.634673 28553 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1206 09:01:47.634690 28553 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1206 09:01:47.634696 28553 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1206 09:01:47.634707 28553 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1206 09:01:47.635646 28553 net.cpp:150] Setting up L3_b2_brc1_conv\nI1206 09:01:47.635665 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.635670 28553 net.cpp:165] Memory required for data: 1925673980\nI1206 09:01:47.635679 28553 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1206 09:01:47.635691 28553 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1206 09:01:47.635697 28553 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1206 09:01:47.635706 28553 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1206 09:01:47.635951 28553 net.cpp:150] Setting up L3_b2_brc2_bn\nI1206 09:01:47.635964 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.635969 28553 net.cpp:165] Memory required for data: 1928459260\nI1206 09:01:47.635980 28553 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1206 09:01:47.635992 28553 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1206 09:01:47.635998 28553 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1206 09:01:47.636004 28553 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1206 09:01:47.636014 28553 net.cpp:150] Setting up L3_b2_brc2_relu\nI1206 09:01:47.636021 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.636025 28553 net.cpp:165] Memory required for data: 1931244540\nI1206 09:01:47.636030 28553 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1206 09:01:47.636041 28553 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1206 09:01:47.636046 28553 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1206 09:01:47.636059 28553 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1206 09:01:47.636435 28553 net.cpp:150] Setting up L3_b2_brc2_conv\nI1206 09:01:47.636448 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.636453 28553 net.cpp:165] Memory required for data: 1934029820\nI1206 09:01:47.636462 28553 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1206 09:01:47.636477 28553 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1206 09:01:47.636483 28553 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1206 09:01:47.636492 28553 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1206 09:01:47.636744 28553 net.cpp:150] Setting up L3_b2_brc3_bn\nI1206 09:01:47.636756 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.636761 28553 net.cpp:165] Memory required for data: 1936815100\nI1206 09:01:47.636778 28553 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1206 09:01:47.636787 28553 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1206 09:01:47.636793 28553 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1206 09:01:47.636803 28553 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1206 09:01:47.636813 28553 net.cpp:150] Setting up L3_b2_brc3_relu\nI1206 09:01:47.636821 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.636824 28553 net.cpp:165] Memory required for data: 1939600380\nI1206 09:01:47.636829 28553 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1206 09:01:47.636843 28553 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1206 09:01:47.636849 28553 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1206 09:01:47.636857 28553 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1206 09:01:47.637792 28553 net.cpp:150] Setting up L3_b2_brc3_conv\nI1206 09:01:47.637806 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.637812 28553 net.cpp:165] Memory required for data: 1945170940\nI1206 09:01:47.637820 28553 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1206 09:01:47.637830 28553 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1206 09:01:47.637836 28553 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1206 09:01:47.637843 28553 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:01:47.637854 28553 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1206 09:01:47.637886 28553 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1206 09:01:47.637894 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.637899 28553 net.cpp:165] Memory required for data: 1950741500\nI1206 09:01:47.637904 28553 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:47.637918 28553 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:47.637925 28553 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1206 09:01:47.637933 28553 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:01:47.637945 28553 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:01:47.637991 28553 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:47.638003 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.638010 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.638015 28553 net.cpp:165] Memory required for data: 1961882620\nI1206 09:01:47.638020 28553 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1206 09:01:47.638031 28553 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1206 09:01:47.638036 28553 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:01:47.638044 28553 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1206 09:01:47.638281 28553 net.cpp:150] Setting up L3_b3_brc1_bn\nI1206 09:01:47.638294 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.638299 28553 net.cpp:165] Memory required for data: 1967453180\nI1206 09:01:47.638309 28553 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1206 09:01:47.638317 28553 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1206 09:01:47.638324 28553 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1206 09:01:47.638334 28553 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1206 09:01:47.638344 28553 net.cpp:150] Setting up L3_b3_brc1_relu\nI1206 09:01:47.638350 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.638355 28553 net.cpp:165] Memory required for data: 1973023740\nI1206 09:01:47.638360 28553 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1206 09:01:47.638373 28553 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1206 09:01:47.638380 28553 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1206 09:01:47.638387 28553 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1206 09:01:47.639323 28553 net.cpp:150] Setting up L3_b3_brc1_conv\nI1206 09:01:47.639338 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.639343 28553 net.cpp:165] Memory required for data: 1975809020\nI1206 09:01:47.639353 28553 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1206 09:01:47.639360 28553 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1206 09:01:47.639370 28553 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1206 09:01:47.639379 28553 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1206 09:01:47.639623 28553 net.cpp:150] Setting up L3_b3_brc2_bn\nI1206 09:01:47.639637 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.639642 28553 net.cpp:165] Memory required for data: 1978594300\nI1206 09:01:47.639652 28553 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1206 09:01:47.639660 28553 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1206 09:01:47.639667 28553 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1206 09:01:47.639673 28553 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1206 09:01:47.639683 28553 net.cpp:150] Setting up L3_b3_brc2_relu\nI1206 09:01:47.639690 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.639694 28553 net.cpp:165] Memory required for data: 1981379580\nI1206 09:01:47.639699 28553 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1206 09:01:47.639714 28553 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1206 09:01:47.639719 28553 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1206 09:01:47.639730 28553 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1206 09:01:47.640100 28553 net.cpp:150] Setting up L3_b3_brc2_conv\nI1206 09:01:47.640115 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.640120 28553 net.cpp:165] Memory required for data: 1984164860\nI1206 09:01:47.640127 28553 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1206 09:01:47.640141 28553 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1206 09:01:47.640147 28553 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1206 09:01:47.640156 28553 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1206 09:01:47.640403 28553 net.cpp:150] Setting up L3_b3_brc3_bn\nI1206 09:01:47.640419 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.640424 28553 net.cpp:165] Memory required for data: 1986950140\nI1206 09:01:47.640434 28553 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1206 09:01:47.640442 28553 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1206 09:01:47.640449 28553 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1206 09:01:47.640455 28553 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1206 09:01:47.640470 28553 net.cpp:150] Setting up L3_b3_brc3_relu\nI1206 09:01:47.640477 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.640482 28553 net.cpp:165] Memory required for data: 1989735420\nI1206 09:01:47.640487 28553 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1206 09:01:47.640501 28553 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1206 09:01:47.640507 28553 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1206 09:01:47.640516 28553 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1206 09:01:47.641436 28553 net.cpp:150] Setting up L3_b3_brc3_conv\nI1206 09:01:47.641450 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.641455 28553 net.cpp:165] Memory required for data: 1995305980\nI1206 09:01:47.641464 28553 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1206 09:01:47.641479 28553 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1206 09:01:47.641485 28553 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1206 09:01:47.641492 28553 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:01:47.641505 28553 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1206 09:01:47.641540 28553 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1206 09:01:47.641558 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.641563 28553 net.cpp:165] Memory required for data: 2000876540\nI1206 09:01:47.641568 28553 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:47.641583 28553 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:47.641589 28553 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1206 09:01:47.641597 28553 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:01:47.641609 28553 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:01:47.641659 28553 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:47.641670 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.641677 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.641682 28553 net.cpp:165] Memory required for data: 2012017660\nI1206 09:01:47.641687 28553 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1206 09:01:47.641698 28553 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1206 09:01:47.641705 28553 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:01:47.641712 28553 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1206 09:01:47.641947 28553 net.cpp:150] Setting up L3_b4_brc1_bn\nI1206 09:01:47.641961 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.641966 28553 net.cpp:165] Memory required for data: 2017588220\nI1206 09:01:47.641978 28553 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1206 09:01:47.641985 28553 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1206 09:01:47.641991 28553 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1206 09:01:47.641999 28553 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1206 09:01:47.642007 28553 net.cpp:150] Setting up L3_b4_brc1_relu\nI1206 09:01:47.642014 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.642019 28553 net.cpp:165] Memory required for data: 2023158780\nI1206 09:01:47.642024 28553 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1206 09:01:47.642037 28553 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1206 09:01:47.642043 28553 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1206 09:01:47.642051 28553 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1206 09:01:47.642980 28553 net.cpp:150] Setting up L3_b4_brc1_conv\nI1206 09:01:47.642994 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.643000 28553 net.cpp:165] Memory required for data: 2025944060\nI1206 09:01:47.643008 28553 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1206 09:01:47.643018 28553 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1206 09:01:47.643023 28553 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1206 09:01:47.643034 28553 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1206 09:01:47.643273 28553 net.cpp:150] Setting up L3_b4_brc2_bn\nI1206 09:01:47.643286 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.643291 28553 net.cpp:165] Memory required for data: 2028729340\nI1206 09:01:47.643301 28553 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1206 09:01:47.643312 28553 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1206 09:01:47.643319 28553 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1206 09:01:47.643326 28553 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1206 09:01:47.643335 28553 net.cpp:150] Setting up L3_b4_brc2_relu\nI1206 09:01:47.643342 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.643347 28553 net.cpp:165] Memory required for data: 2031514620\nI1206 09:01:47.643352 28553 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1206 09:01:47.643362 28553 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1206 09:01:47.643368 28553 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1206 09:01:47.643378 28553 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1206 09:01:47.643760 28553 net.cpp:150] Setting up L3_b4_brc2_conv\nI1206 09:01:47.643774 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.643779 28553 net.cpp:165] Memory required for data: 2034299900\nI1206 09:01:47.643795 28553 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1206 09:01:47.643805 28553 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1206 09:01:47.643810 28553 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1206 09:01:47.643818 28553 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1206 09:01:47.644057 28553 net.cpp:150] Setting up L3_b4_brc3_bn\nI1206 09:01:47.644070 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.644075 28553 net.cpp:165] Memory required for data: 2037085180\nI1206 09:01:47.644085 28553 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1206 09:01:47.644093 28553 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1206 09:01:47.644099 28553 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1206 09:01:47.644109 28553 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1206 09:01:47.644119 28553 net.cpp:150] Setting up L3_b4_brc3_relu\nI1206 09:01:47.644126 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.644131 28553 net.cpp:165] Memory required for data: 2039870460\nI1206 09:01:47.644135 28553 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1206 09:01:47.644151 28553 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1206 09:01:47.644157 28553 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1206 09:01:47.644166 28553 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1206 09:01:47.645102 28553 net.cpp:150] Setting up L3_b4_brc3_conv\nI1206 09:01:47.645117 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.645123 28553 net.cpp:165] Memory required for data: 2045441020\nI1206 09:01:47.645130 28553 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1206 09:01:47.645139 28553 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1206 09:01:47.645146 28553 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1206 09:01:47.645153 28553 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:01:47.645164 28553 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1206 09:01:47.645196 28553 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1206 09:01:47.645205 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.645210 28553 net.cpp:165] Memory required for data: 2051011580\nI1206 09:01:47.645215 28553 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:47.645228 28553 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:47.645234 28553 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1206 09:01:47.645242 28553 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:01:47.645256 28553 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:01:47.645300 28553 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:47.645313 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.645318 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.645323 28553 net.cpp:165] Memory required for data: 2062152700\nI1206 09:01:47.645328 28553 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1206 09:01:47.645339 28553 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1206 09:01:47.645345 28553 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:01:47.645354 28553 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1206 09:01:47.645589 28553 net.cpp:150] Setting up L3_b5_brc1_bn\nI1206 09:01:47.645602 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.645608 28553 net.cpp:165] Memory required for data: 2067723260\nI1206 09:01:47.645618 28553 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1206 09:01:47.645627 28553 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1206 09:01:47.645632 28553 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1206 09:01:47.645642 28553 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1206 09:01:47.645653 28553 net.cpp:150] Setting up L3_b5_brc1_relu\nI1206 09:01:47.645666 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.645671 28553 net.cpp:165] Memory required for data: 2073293820\nI1206 09:01:47.645676 28553 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1206 09:01:47.645690 28553 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1206 09:01:47.645696 28553 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1206 09:01:47.645705 28553 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1206 09:01:47.647686 28553 net.cpp:150] Setting up L3_b5_brc1_conv\nI1206 09:01:47.647704 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.647709 28553 net.cpp:165] Memory required for data: 2076079100\nI1206 09:01:47.647719 28553 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1206 09:01:47.647728 28553 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1206 09:01:47.647735 28553 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1206 09:01:47.647752 28553 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1206 09:01:47.647992 28553 net.cpp:150] Setting up L3_b5_brc2_bn\nI1206 09:01:47.648005 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.648010 28553 net.cpp:165] Memory required for data: 2078864380\nI1206 09:01:47.648021 28553 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1206 09:01:47.648032 28553 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1206 09:01:47.648039 28553 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1206 09:01:47.648046 28553 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1206 09:01:47.648056 28553 net.cpp:150] Setting up L3_b5_brc2_relu\nI1206 09:01:47.648063 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.648068 28553 net.cpp:165] Memory required for data: 2081649660\nI1206 09:01:47.648072 28553 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1206 09:01:47.648083 28553 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1206 09:01:47.648089 28553 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1206 09:01:47.648102 28553 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1206 09:01:47.648483 28553 net.cpp:150] Setting up L3_b5_brc2_conv\nI1206 09:01:47.648499 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.648504 28553 net.cpp:165] Memory required for data: 2084434940\nI1206 09:01:47.648545 28553 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1206 09:01:47.648561 28553 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1206 09:01:47.648568 28553 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1206 09:01:47.648576 28553 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1206 09:01:47.648825 28553 net.cpp:150] Setting up L3_b5_brc3_bn\nI1206 09:01:47.648841 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.648847 28553 net.cpp:165] Memory required for data: 2087220220\nI1206 09:01:47.648857 28553 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1206 09:01:47.648866 28553 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1206 09:01:47.648872 28553 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1206 09:01:47.648880 28553 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1206 09:01:47.648890 28553 net.cpp:150] Setting up L3_b5_brc3_relu\nI1206 09:01:47.648896 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.648901 28553 net.cpp:165] Memory required for data: 2090005500\nI1206 09:01:47.648905 28553 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1206 09:01:47.648916 28553 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1206 09:01:47.648921 28553 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1206 09:01:47.648932 28553 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1206 09:01:47.649883 28553 net.cpp:150] Setting up L3_b5_brc3_conv\nI1206 09:01:47.649897 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.649902 28553 net.cpp:165] Memory required for data: 2095576060\nI1206 09:01:47.649910 28553 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1206 09:01:47.649920 28553 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1206 09:01:47.649926 28553 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1206 09:01:47.649941 28553 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:01:47.649953 28553 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1206 09:01:47.649987 28553 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1206 09:01:47.650001 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.650005 28553 net.cpp:165] Memory required for data: 2101146620\nI1206 09:01:47.650010 28553 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:47.650018 28553 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:47.650024 28553 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1206 09:01:47.650032 28553 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:01:47.650041 28553 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:01:47.650092 28553 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:47.650104 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.650111 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.650116 28553 net.cpp:165] Memory required for data: 2112287740\nI1206 09:01:47.650121 28553 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1206 09:01:47.650131 28553 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1206 09:01:47.650137 28553 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:01:47.650146 28553 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1206 09:01:47.650380 28553 net.cpp:150] Setting up L3_b6_brc1_bn\nI1206 09:01:47.650396 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.650401 28553 net.cpp:165] Memory required for data: 2117858300\nI1206 09:01:47.650413 28553 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1206 09:01:47.650420 28553 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1206 09:01:47.650426 28553 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1206 09:01:47.650434 28553 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1206 09:01:47.650442 28553 net.cpp:150] Setting up L3_b6_brc1_relu\nI1206 09:01:47.650449 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.650454 28553 net.cpp:165] Memory required for data: 2123428860\nI1206 09:01:47.650458 28553 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1206 09:01:47.650475 28553 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1206 09:01:47.650482 28553 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1206 09:01:47.650493 28553 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1206 09:01:47.651422 28553 net.cpp:150] Setting up L3_b6_brc1_conv\nI1206 09:01:47.651437 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.651443 28553 net.cpp:165] Memory required for data: 2126214140\nI1206 09:01:47.651450 28553 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1206 09:01:47.651459 28553 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1206 09:01:47.651471 28553 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1206 09:01:47.651484 28553 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1206 09:01:47.651758 28553 net.cpp:150] Setting up L3_b6_brc2_bn\nI1206 09:01:47.651772 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.651777 28553 net.cpp:165] Memory required for data: 2128999420\nI1206 09:01:47.651788 28553 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1206 09:01:47.651804 28553 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1206 09:01:47.651810 28553 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1206 09:01:47.651818 28553 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1206 09:01:47.651828 28553 net.cpp:150] Setting up L3_b6_brc2_relu\nI1206 09:01:47.651834 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.651839 28553 net.cpp:165] Memory required for data: 2131784700\nI1206 09:01:47.651844 28553 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1206 09:01:47.651862 28553 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1206 09:01:47.651868 28553 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1206 09:01:47.651880 28553 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1206 09:01:47.652266 28553 net.cpp:150] Setting up L3_b6_brc2_conv\nI1206 09:01:47.652281 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.652285 28553 net.cpp:165] Memory required for data: 2134569980\nI1206 09:01:47.652293 28553 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1206 09:01:47.652302 28553 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1206 09:01:47.652308 28553 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1206 09:01:47.652317 28553 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1206 09:01:47.652627 28553 net.cpp:150] Setting up L3_b6_brc3_bn\nI1206 09:01:47.652649 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.652657 28553 net.cpp:165] Memory required for data: 2137355260\nI1206 09:01:47.652667 28553 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1206 09:01:47.652675 28553 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1206 09:01:47.652683 28553 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1206 09:01:47.652693 28553 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1206 09:01:47.652704 28553 net.cpp:150] Setting up L3_b6_brc3_relu\nI1206 09:01:47.652711 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:47.652715 28553 net.cpp:165] Memory required for data: 2140140540\nI1206 09:01:47.652720 28553 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1206 09:01:47.652734 28553 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1206 09:01:47.652740 28553 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1206 09:01:47.652750 28553 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1206 09:01:47.653707 28553 net.cpp:150] Setting up L3_b6_brc3_conv\nI1206 09:01:47.653723 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.653728 28553 net.cpp:165] Memory required for data: 2145711100\nI1206 09:01:47.653735 28553 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1206 09:01:47.653744 28553 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1206 09:01:47.653750 28553 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1206 09:01:47.653758 28553 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:01:47.653769 28553 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1206 09:01:47.653801 28553 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1206 09:01:47.653810 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.653815 28553 net.cpp:165] Memory required for data: 2151281660\nI1206 09:01:47.653821 28553 layer_factory.hpp:77] Creating layer post_bn\nI1206 09:01:47.653834 28553 net.cpp:100] Creating Layer post_bn\nI1206 09:01:47.653841 28553 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1206 09:01:47.653852 28553 net.cpp:408] post_bn -> post_bn_top\nI1206 09:01:47.654096 28553 net.cpp:150] Setting up post_bn\nI1206 09:01:47.654109 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.654114 28553 net.cpp:165] Memory required for data: 2156852220\nI1206 09:01:47.654124 28553 layer_factory.hpp:77] Creating layer post_relu\nI1206 09:01:47.654134 28553 net.cpp:100] Creating Layer post_relu\nI1206 09:01:47.654139 28553 net.cpp:434] post_relu <- post_bn_top\nI1206 09:01:47.654146 28553 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1206 09:01:47.654155 28553 net.cpp:150] Setting up post_relu\nI1206 09:01:47.654162 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:47.654167 28553 net.cpp:165] Memory required for data: 2162422780\nI1206 09:01:47.654171 28553 layer_factory.hpp:77] Creating layer post_pool\nI1206 09:01:47.654186 28553 net.cpp:100] Creating Layer post_pool\nI1206 09:01:47.654192 28553 net.cpp:434] post_pool <- post_bn_top\nI1206 09:01:47.654201 28553 net.cpp:408] post_pool -> post_pool\nI1206 09:01:47.654306 28553 net.cpp:150] Setting up post_pool\nI1206 09:01:47.654327 28553 net.cpp:157] Top shape: 85 256 1 1 (21760)\nI1206 09:01:47.654340 28553 net.cpp:165] Memory required for data: 2162509820\nI1206 09:01:47.654346 28553 layer_factory.hpp:77] Creating layer post_FC\nI1206 09:01:47.654423 28553 net.cpp:100] Creating Layer post_FC\nI1206 09:01:47.654435 28553 net.cpp:434] post_FC <- post_pool\nI1206 09:01:47.654449 28553 net.cpp:408] post_FC -> post_FC_top\nI1206 09:01:47.654728 28553 net.cpp:150] Setting up post_FC\nI1206 09:01:47.654745 28553 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:47.654750 28553 net.cpp:165] Memory required for data: 2162513220\nI1206 09:01:47.654760 28553 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1206 09:01:47.654772 28553 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1206 09:01:47.654778 28553 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1206 09:01:47.654786 28553 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1206 09:01:47.654799 28553 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1206 09:01:47.654850 28553 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1206 09:01:47.654861 28553 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:47.654867 28553 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:47.654872 28553 net.cpp:165] Memory required for data: 2162520020\nI1206 09:01:47.654877 28553 layer_factory.hpp:77] Creating layer accuracy\nI1206 09:01:47.654927 28553 net.cpp:100] Creating Layer accuracy\nI1206 09:01:47.654939 28553 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1206 09:01:47.654947 28553 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1206 09:01:47.654955 28553 net.cpp:408] accuracy -> accuracy\nI1206 09:01:47.655004 28553 net.cpp:150] Setting up accuracy\nI1206 09:01:47.655019 28553 net.cpp:157] Top shape: (1)\nI1206 09:01:47.655023 28553 net.cpp:165] Memory required for data: 2162520024\nI1206 09:01:47.655028 28553 layer_factory.hpp:77] Creating layer loss\nI1206 09:01:47.655041 28553 net.cpp:100] Creating Layer loss\nI1206 09:01:47.655047 28553 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1206 09:01:47.655055 28553 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1206 09:01:47.655062 28553 net.cpp:408] loss -> loss\nI1206 09:01:47.656080 28553 layer_factory.hpp:77] Creating layer loss\nI1206 09:01:47.657351 28553 net.cpp:150] Setting up loss\nI1206 09:01:47.657378 28553 net.cpp:157] Top shape: (1)\nI1206 09:01:47.657384 28553 net.cpp:160]     with loss weight 1\nI1206 09:01:47.657482 28553 net.cpp:165] Memory required for data: 2162520028\nI1206 09:01:47.657491 28553 net.cpp:226] loss needs backward computation.\nI1206 09:01:47.657498 28553 net.cpp:228] accuracy does not need backward computation.\nI1206 09:01:47.657505 28553 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1206 09:01:47.657510 28553 net.cpp:226] post_FC needs backward computation.\nI1206 09:01:47.657516 28553 net.cpp:226] post_pool needs backward computation.\nI1206 09:01:47.657521 28553 net.cpp:226] post_relu needs backward computation.\nI1206 09:01:47.657526 28553 net.cpp:226] post_bn needs backward computation.\nI1206 09:01:47.657529 28553 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1206 09:01:47.657536 28553 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1206 09:01:47.657541 28553 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1206 09:01:47.657546 28553 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1206 09:01:47.657551 28553 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1206 09:01:47.657555 28553 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1206 09:01:47.657560 28553 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1206 09:01:47.657565 28553 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1206 09:01:47.657570 28553 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1206 09:01:47.657574 28553 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1206 09:01:47.657579 28553 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.657594 28553 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1206 09:01:47.657600 28553 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1206 09:01:47.657605 28553 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1206 09:01:47.657610 28553 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1206 09:01:47.657616 28553 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1206 09:01:47.657621 28553 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1206 09:01:47.657626 28553 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1206 09:01:47.657631 28553 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1206 09:01:47.657636 28553 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1206 09:01:47.657641 28553 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1206 09:01:47.657646 28553 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.657651 28553 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1206 09:01:47.657657 28553 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1206 09:01:47.657662 28553 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1206 09:01:47.657667 28553 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1206 09:01:47.657672 28553 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1206 09:01:47.657677 28553 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1206 09:01:47.657682 28553 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1206 09:01:47.657687 28553 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1206 09:01:47.657692 28553 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1206 09:01:47.657697 28553 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1206 09:01:47.657703 28553 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.657708 28553 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1206 09:01:47.657713 28553 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1206 09:01:47.657719 28553 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1206 09:01:47.657723 28553 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1206 09:01:47.657729 28553 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1206 09:01:47.657734 28553 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1206 09:01:47.657739 28553 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1206 09:01:47.657744 28553 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1206 09:01:47.657749 28553 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1206 09:01:47.657754 28553 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1206 09:01:47.657759 28553 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.657764 28553 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1206 09:01:47.657770 28553 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1206 09:01:47.657775 28553 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1206 09:01:47.657786 28553 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1206 09:01:47.657791 28553 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1206 09:01:47.657796 28553 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1206 09:01:47.657801 28553 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1206 09:01:47.657807 28553 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1206 09:01:47.657812 28553 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1206 09:01:47.657817 28553 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1206 09:01:47.657822 28553 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.657829 28553 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1206 09:01:47.657835 28553 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1206 09:01:47.657840 28553 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1206 09:01:47.657850 28553 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1206 09:01:47.657855 28553 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1206 09:01:47.657860 28553 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1206 09:01:47.657866 28553 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1206 09:01:47.657871 28553 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1206 09:01:47.657876 28553 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1206 09:01:47.657881 28553 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1206 09:01:47.657886 28553 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1206 09:01:47.657891 28553 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.657896 28553 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1206 09:01:47.657902 28553 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1206 09:01:47.657907 28553 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1206 09:01:47.657912 28553 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1206 09:01:47.657917 28553 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1206 09:01:47.657922 28553 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1206 09:01:47.657927 28553 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1206 09:01:47.657932 28553 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1206 09:01:47.657938 28553 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1206 09:01:47.657943 28553 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1206 09:01:47.657948 28553 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.657953 28553 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1206 09:01:47.657958 28553 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1206 09:01:47.657964 28553 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1206 09:01:47.657968 28553 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1206 09:01:47.657974 28553 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1206 09:01:47.657979 28553 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1206 09:01:47.657984 28553 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1206 09:01:47.657989 28553 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1206 09:01:47.657994 28553 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1206 09:01:47.657999 28553 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1206 09:01:47.658005 28553 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658010 28553 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1206 09:01:47.658016 28553 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1206 09:01:47.658021 28553 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1206 09:01:47.658026 28553 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1206 09:01:47.658031 28553 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1206 09:01:47.658036 28553 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1206 09:01:47.658041 28553 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1206 09:01:47.658046 28553 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1206 09:01:47.658051 28553 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1206 09:01:47.658056 28553 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1206 09:01:47.658062 28553 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658067 28553 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1206 09:01:47.658073 28553 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1206 09:01:47.658078 28553 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1206 09:01:47.658083 28553 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1206 09:01:47.658088 28553 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1206 09:01:47.658098 28553 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1206 09:01:47.658104 28553 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1206 09:01:47.658115 28553 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1206 09:01:47.658121 28553 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1206 09:01:47.658126 28553 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1206 09:01:47.658133 28553 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658138 28553 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1206 09:01:47.658143 28553 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1206 09:01:47.658149 28553 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1206 09:01:47.658154 28553 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1206 09:01:47.658159 28553 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1206 09:01:47.658165 28553 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1206 09:01:47.658170 28553 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1206 09:01:47.658175 28553 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1206 09:01:47.658181 28553 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1206 09:01:47.658186 28553 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1206 09:01:47.658191 28553 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658197 28553 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1206 09:01:47.658203 28553 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1206 09:01:47.658208 28553 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1206 09:01:47.658215 28553 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1206 09:01:47.658219 28553 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1206 09:01:47.658224 28553 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1206 09:01:47.658231 28553 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1206 09:01:47.658236 28553 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1206 09:01:47.658241 28553 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1206 09:01:47.658246 28553 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1206 09:01:47.658252 28553 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1206 09:01:47.658257 28553 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658262 28553 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1206 09:01:47.658267 28553 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1206 09:01:47.658273 28553 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1206 09:01:47.658278 28553 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1206 09:01:47.658283 28553 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1206 09:01:47.658289 28553 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1206 09:01:47.658294 28553 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1206 09:01:47.658299 28553 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1206 09:01:47.658305 28553 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1206 09:01:47.658310 28553 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1206 09:01:47.658315 28553 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658321 28553 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1206 09:01:47.658327 28553 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1206 09:01:47.658332 28553 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1206 09:01:47.658337 28553 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1206 09:01:47.658342 28553 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1206 09:01:47.658349 28553 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1206 09:01:47.658354 28553 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1206 09:01:47.658365 28553 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1206 09:01:47.658370 28553 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1206 09:01:47.658375 28553 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1206 09:01:47.658381 28553 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658386 28553 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1206 09:01:47.658392 28553 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1206 09:01:47.658398 28553 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1206 09:01:47.658403 28553 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1206 09:01:47.658409 28553 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1206 09:01:47.658416 28553 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1206 09:01:47.658421 28553 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1206 09:01:47.658426 28553 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1206 09:01:47.658432 28553 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1206 09:01:47.658437 28553 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1206 09:01:47.658442 28553 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658447 28553 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1206 09:01:47.658453 28553 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1206 09:01:47.658458 28553 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1206 09:01:47.658464 28553 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1206 09:01:47.658476 28553 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1206 09:01:47.658483 28553 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1206 09:01:47.658488 28553 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1206 09:01:47.658493 28553 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1206 09:01:47.658499 28553 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1206 09:01:47.658504 28553 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1206 09:01:47.658509 28553 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658515 28553 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1206 09:01:47.658521 28553 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1206 09:01:47.658526 28553 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1206 09:01:47.658531 28553 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1206 09:01:47.658537 28553 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1206 09:01:47.658542 28553 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1206 09:01:47.658547 28553 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1206 09:01:47.658553 28553 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1206 09:01:47.658558 28553 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1206 09:01:47.658563 28553 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1206 09:01:47.658569 28553 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:47.658576 28553 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1206 09:01:47.658581 28553 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1206 09:01:47.658586 28553 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1206 09:01:47.658593 28553 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1206 09:01:47.658598 28553 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1206 09:01:47.658604 28553 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1206 09:01:47.658609 28553 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1206 09:01:47.658615 28553 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1206 09:01:47.658620 28553 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1206 09:01:47.658625 28553 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1206 09:01:47.658637 28553 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1206 09:01:47.658643 28553 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1206 09:01:47.658648 28553 net.cpp:226] pre_conv needs backward computation.\nI1206 09:01:47.658654 28553 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1206 09:01:47.658661 28553 net.cpp:228] dataLayer does not need backward computation.\nI1206 09:01:47.658665 28553 net.cpp:270] This network produces output accuracy\nI1206 09:01:47.658673 28553 net.cpp:270] This network produces output loss\nI1206 09:01:47.658968 28553 net.cpp:283] Network initialization done.\nI1206 09:01:47.664954 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:47.664988 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:47.665046 28553 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1206 09:01:47.665297 28553 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1206 09:01:47.666771 28553 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-ResNeXt\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: false\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\nI1206 09:01:47.667970 28553 layer_factory.hpp:77] Creating layer dataLayer\nI1206 09:01:47.668728 28553 net.cpp:100] Creating Layer dataLayer\nI1206 09:01:47.668751 28553 net.cpp:408] dataLayer -> data_top\nI1206 09:01:47.668768 28553 net.cpp:408] dataLayer -> label\nI1206 09:01:47.668781 28553 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1206 09:01:57.605829 28563 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1206 09:01:57.609325 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:57.615787 28553 net.cpp:150] Setting up dataLayer\nI1206 09:01:57.615825 28553 net.cpp:157] Top shape: 85 3 32 32 (261120)\nI1206 09:01:57.615834 28553 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:57.615840 28553 net.cpp:165] Memory required for data: 1044820\nI1206 09:01:57.615850 28553 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1206 09:01:57.615869 28553 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1206 09:01:57.615876 28553 net.cpp:434] label_dataLayer_1_split <- label\nI1206 09:01:57.615895 28553 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1206 09:01:57.615914 28553 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1206 09:01:57.616061 28553 net.cpp:150] Setting up label_dataLayer_1_split\nI1206 09:01:57.616075 28553 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:57.616086 28553 net.cpp:157] Top shape: 85 (85)\nI1206 09:01:57.616091 28553 net.cpp:165] Memory required for data: 1045500\nI1206 09:01:57.616096 28553 layer_factory.hpp:77] Creating layer pre_conv\nI1206 09:01:57.616122 28553 net.cpp:100] Creating Layer pre_conv\nI1206 09:01:57.616129 28553 net.cpp:434] pre_conv <- data_top\nI1206 09:01:57.616139 28553 net.cpp:408] pre_conv -> pre_conv_top\nI1206 09:01:57.616595 28553 net.cpp:150] Setting up pre_conv\nI1206 09:01:57.616616 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.616622 28553 net.cpp:165] Memory required for data: 6616060\nI1206 09:01:57.616644 28553 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1206 09:01:57.616654 28553 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1206 09:01:57.616660 28553 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1206 09:01:57.616667 28553 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1206 09:01:57.616680 28553 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1206 09:01:57.616735 28553 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1206 09:01:57.616747 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.616757 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.616762 28553 net.cpp:165] Memory required for data: 17757180\nI1206 09:01:57.616767 28553 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1206 09:01:57.616799 28553 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1206 09:01:57.616807 28553 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1206 09:01:57.616816 28553 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1206 09:01:57.617167 28553 net.cpp:150] Setting up L1_b1_brc1_bn\nI1206 09:01:57.617184 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.617192 28553 net.cpp:165] Memory required for data: 23327740\nI1206 09:01:57.617210 28553 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1206 09:01:57.617220 28553 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1206 09:01:57.617226 28553 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1206 09:01:57.617234 28553 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1206 09:01:57.617244 28553 net.cpp:150] Setting up L1_b1_brc1_relu\nI1206 09:01:57.617251 28553 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:01:57.617256 28553 net.cpp:165] Memory required for data: 28898300\nI1206 09:01:57.617264 28553 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1206 09:01:57.617280 28553 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1206 09:01:57.617286 28553 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1206 09:01:57.617295 28553 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1206 09:01:57.617714 28553 net.cpp:150] Setting up L1_b1_brc1_conv\nI1206 09:01:57.617729 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.617734 28553 net.cpp:165] Memory required for data: 40039420\nI1206 09:01:57.617744 28553 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1206 09:01:57.617756 28553 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1206 09:01:57.617766 28553 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1206 09:01:57.617784 28553 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1206 09:01:57.618113 28553 net.cpp:150] Setting up L1_b1_brc2_bn\nI1206 09:01:57.618129 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.618134 28553 net.cpp:165] Memory required for data: 51180540\nI1206 09:01:57.618149 28553 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1206 09:01:57.618160 28553 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1206 09:01:57.618165 28553 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1206 09:01:57.618172 28553 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1206 09:01:57.618187 28553 net.cpp:150] Setting up L1_b1_brc2_relu\nI1206 09:01:57.618196 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.618199 28553 net.cpp:165] Memory required for data: 62321660\nI1206 09:01:57.618206 28553 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1206 09:01:57.618219 28553 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1206 09:01:57.618225 28553 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1206 09:01:57.618242 28553 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1206 09:01:57.619945 28553 net.cpp:150] Setting up L1_b1_brc2_conv\nI1206 09:01:57.619963 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.619969 28553 net.cpp:165] Memory required for data: 73462780\nI1206 09:01:57.619981 28553 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1206 09:01:57.619997 28553 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1206 09:01:57.620003 28553 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1206 09:01:57.620013 28553 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1206 09:01:57.620296 28553 net.cpp:150] Setting up L1_b1_brc3_bn\nI1206 09:01:57.620308 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.620313 28553 net.cpp:165] Memory required for data: 84603900\nI1206 09:01:57.620328 28553 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1206 09:01:57.620337 28553 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1206 09:01:57.620343 28553 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1206 09:01:57.620359 28553 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1206 09:01:57.620371 28553 net.cpp:150] Setting up L1_b1_brc3_relu\nI1206 09:01:57.620379 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.620384 28553 net.cpp:165] Memory required for data: 95745020\nI1206 09:01:57.620389 28553 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1206 09:01:57.620405 28553 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1206 09:01:57.620411 28553 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1206 09:01:57.620424 28553 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1206 09:01:57.620813 28553 net.cpp:150] Setting up L1_b1_brc3_conv\nI1206 09:01:57.620829 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.620834 28553 net.cpp:165] Memory required for data: 118027260\nI1206 09:01:57.620849 28553 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1206 09:01:57.620868 28553 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1206 09:01:57.620877 28553 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1206 09:01:57.620887 28553 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1206 09:01:57.621253 28553 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1206 09:01:57.621268 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.621273 28553 net.cpp:165] Memory required for data: 140309500\nI1206 09:01:57.621284 28553 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1206 09:01:57.621295 28553 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1206 09:01:57.621301 28553 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1206 09:01:57.621309 28553 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1206 09:01:57.621320 28553 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1206 09:01:57.621362 28553 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1206 09:01:57.621378 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.621393 28553 net.cpp:165] Memory required for data: 162591740\nI1206 09:01:57.621400 28553 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:57.621409 28553 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:57.621415 28553 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1206 09:01:57.621425 28553 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:01:57.621436 28553 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:01:57.621510 28553 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:01:57.621526 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.621533 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.621538 28553 net.cpp:165] Memory required for data: 207156220\nI1206 09:01:57.621544 28553 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1206 09:01:57.621557 28553 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1206 09:01:57.621567 28553 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:01:57.621575 28553 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1206 09:01:57.622004 28553 net.cpp:150] Setting up L1_b2_brc1_bn\nI1206 09:01:57.622017 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.622022 28553 net.cpp:165] Memory required for data: 229438460\nI1206 09:01:57.622035 28553 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1206 09:01:57.622043 28553 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1206 09:01:57.622050 28553 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1206 09:01:57.622056 28553 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1206 09:01:57.622066 28553 net.cpp:150] Setting up L1_b2_brc1_relu\nI1206 09:01:57.622073 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.622078 28553 net.cpp:165] Memory required for data: 251720700\nI1206 09:01:57.622083 28553 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1206 09:01:57.622097 28553 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1206 09:01:57.622103 28553 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1206 09:01:57.622112 28553 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1206 09:01:57.622447 28553 net.cpp:150] Setting up L1_b2_brc1_conv\nI1206 09:01:57.622462 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.622473 28553 net.cpp:165] Memory required for data: 262861820\nI1206 09:01:57.622483 28553 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1206 09:01:57.622493 28553 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1206 09:01:57.622501 28553 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1206 09:01:57.622512 28553 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1206 09:01:57.622778 28553 net.cpp:150] Setting up L1_b2_brc2_bn\nI1206 09:01:57.622797 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.622802 28553 net.cpp:165] Memory required for data: 274002940\nI1206 09:01:57.622813 28553 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1206 09:01:57.622822 28553 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1206 09:01:57.622828 28553 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1206 09:01:57.622836 28553 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1206 09:01:57.622846 28553 net.cpp:150] Setting up L1_b2_brc2_relu\nI1206 09:01:57.622853 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.622859 28553 net.cpp:165] Memory required for data: 285144060\nI1206 09:01:57.622862 28553 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1206 09:01:57.622874 28553 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1206 09:01:57.622880 28553 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1206 09:01:57.622891 28553 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1206 09:01:57.623185 28553 net.cpp:150] Setting up L1_b2_brc2_conv\nI1206 09:01:57.623199 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.623211 28553 net.cpp:165] Memory required for data: 296285180\nI1206 09:01:57.623221 28553 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1206 09:01:57.623232 28553 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1206 09:01:57.623239 28553 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1206 09:01:57.623247 28553 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1206 09:01:57.623540 28553 net.cpp:150] Setting up L1_b2_brc3_bn\nI1206 09:01:57.623558 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.623564 28553 net.cpp:165] Memory required for data: 307426300\nI1206 09:01:57.623575 28553 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1206 09:01:57.623584 28553 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1206 09:01:57.623589 28553 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1206 09:01:57.623596 28553 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1206 09:01:57.623606 28553 net.cpp:150] Setting up L1_b2_brc3_relu\nI1206 09:01:57.623613 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.623618 28553 net.cpp:165] Memory required for data: 318567420\nI1206 09:01:57.623623 28553 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1206 09:01:57.623634 28553 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1206 09:01:57.623639 28553 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1206 09:01:57.623654 28553 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1206 09:01:57.623980 28553 net.cpp:150] Setting up L1_b2_brc3_conv\nI1206 09:01:57.623993 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.623998 28553 net.cpp:165] Memory required for data: 340849660\nI1206 09:01:57.624016 28553 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1206 09:01:57.624027 28553 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1206 09:01:57.624032 28553 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1206 09:01:57.624039 28553 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:01:57.624050 28553 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1206 09:01:57.624083 28553 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1206 09:01:57.624094 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624099 28553 net.cpp:165] Memory required for data: 363131900\nI1206 09:01:57.624104 28553 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:57.624115 28553 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:57.624120 28553 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1206 09:01:57.624128 28553 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:01:57.624138 28553 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:01:57.624187 28553 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:01:57.624198 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624205 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624210 28553 net.cpp:165] Memory required for data: 407696380\nI1206 09:01:57.624215 28553 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1206 09:01:57.624227 28553 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1206 09:01:57.624233 28553 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:01:57.624241 28553 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1206 09:01:57.624488 28553 net.cpp:150] Setting up L1_b3_brc1_bn\nI1206 09:01:57.624501 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624507 28553 net.cpp:165] Memory required for data: 429978620\nI1206 09:01:57.624519 28553 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1206 09:01:57.624527 28553 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1206 09:01:57.624533 28553 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1206 09:01:57.624550 28553 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1206 09:01:57.624562 28553 net.cpp:150] Setting up L1_b3_brc1_relu\nI1206 09:01:57.624569 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.624575 28553 net.cpp:165] Memory required for data: 452260860\nI1206 09:01:57.624580 28553 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1206 09:01:57.624594 28553 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1206 09:01:57.624600 28553 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1206 09:01:57.624609 28553 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1206 09:01:57.624939 28553 net.cpp:150] Setting up L1_b3_brc1_conv\nI1206 09:01:57.624953 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.624958 28553 net.cpp:165] Memory required for data: 463401980\nI1206 09:01:57.624969 28553 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1206 09:01:57.624981 28553 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1206 09:01:57.624987 28553 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1206 09:01:57.624995 28553 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1206 09:01:57.625247 28553 net.cpp:150] Setting up L1_b3_brc2_bn\nI1206 09:01:57.625260 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.625265 28553 net.cpp:165] Memory required for data: 474543100\nI1206 09:01:57.625277 28553 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1206 09:01:57.625284 28553 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1206 09:01:57.625290 28553 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1206 09:01:57.625300 28553 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1206 09:01:57.625310 28553 net.cpp:150] Setting up L1_b3_brc2_relu\nI1206 09:01:57.625318 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.625321 28553 net.cpp:165] Memory required for data: 485684220\nI1206 09:01:57.625326 28553 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1206 09:01:57.625344 28553 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1206 09:01:57.625349 28553 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1206 09:01:57.625360 28553 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1206 09:01:57.625663 28553 net.cpp:150] Setting up L1_b3_brc2_conv\nI1206 09:01:57.625677 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.625682 28553 net.cpp:165] Memory required for data: 496825340\nI1206 09:01:57.625691 28553 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1206 09:01:57.625700 28553 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1206 09:01:57.625705 28553 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1206 09:01:57.625713 28553 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1206 09:01:57.625960 28553 net.cpp:150] Setting up L1_b3_brc3_bn\nI1206 09:01:57.625973 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.625978 28553 net.cpp:165] Memory required for data: 507966460\nI1206 09:01:57.625988 28553 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1206 09:01:57.625999 28553 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1206 09:01:57.626005 28553 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1206 09:01:57.626013 28553 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1206 09:01:57.626022 28553 net.cpp:150] Setting up L1_b3_brc3_relu\nI1206 09:01:57.626029 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.626034 28553 net.cpp:165] Memory required for data: 519107580\nI1206 09:01:57.626039 28553 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1206 09:01:57.626052 28553 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1206 09:01:57.626058 28553 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1206 09:01:57.626070 28553 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1206 09:01:57.626397 28553 net.cpp:150] Setting up L1_b3_brc3_conv\nI1206 09:01:57.626411 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.626416 28553 net.cpp:165] Memory required for data: 541389820\nI1206 09:01:57.626425 28553 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1206 09:01:57.626441 28553 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1206 09:01:57.626448 28553 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1206 09:01:57.626456 28553 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:01:57.626463 28553 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1206 09:01:57.626512 28553 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1206 09:01:57.626528 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.626533 28553 net.cpp:165] Memory required for data: 563672060\nI1206 09:01:57.626538 28553 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:57.626546 28553 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:57.626551 28553 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1206 09:01:57.626559 28553 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:01:57.626569 28553 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:01:57.626621 28553 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:01:57.626631 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.626636 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.626641 28553 net.cpp:165] Memory required for data: 608236540\nI1206 09:01:57.626646 28553 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1206 09:01:57.626657 28553 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1206 09:01:57.626663 28553 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:01:57.626672 28553 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1206 09:01:57.626915 28553 net.cpp:150] Setting up L1_b4_brc1_bn\nI1206 09:01:57.626929 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.626933 28553 net.cpp:165] Memory required for data: 630518780\nI1206 09:01:57.626945 28553 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1206 09:01:57.626952 28553 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1206 09:01:57.626958 28553 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1206 09:01:57.626965 28553 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1206 09:01:57.626976 28553 net.cpp:150] Setting up L1_b4_brc1_relu\nI1206 09:01:57.626981 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.626986 28553 net.cpp:165] Memory required for data: 652801020\nI1206 09:01:57.626991 28553 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1206 09:01:57.627005 28553 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1206 09:01:57.627012 28553 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1206 09:01:57.627020 28553 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1206 09:01:57.627346 28553 net.cpp:150] Setting up L1_b4_brc1_conv\nI1206 09:01:57.627359 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.627364 28553 net.cpp:165] Memory required for data: 663942140\nI1206 09:01:57.627373 28553 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1206 09:01:57.627382 28553 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1206 09:01:57.627388 28553 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1206 09:01:57.627398 28553 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1206 09:01:57.627653 28553 net.cpp:150] Setting up L1_b4_brc2_bn\nI1206 09:01:57.627671 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.627676 28553 net.cpp:165] Memory required for data: 675083260\nI1206 09:01:57.627686 28553 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1206 09:01:57.627694 28553 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1206 09:01:57.627701 28553 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1206 09:01:57.627707 28553 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1206 09:01:57.627717 28553 net.cpp:150] Setting up L1_b4_brc2_relu\nI1206 09:01:57.627723 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.627735 28553 net.cpp:165] Memory required for data: 686224380\nI1206 09:01:57.627740 28553 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1206 09:01:57.627751 28553 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1206 09:01:57.627758 28553 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1206 09:01:57.627768 28553 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1206 09:01:57.628072 28553 net.cpp:150] Setting up L1_b4_brc2_conv\nI1206 09:01:57.628087 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.628092 28553 net.cpp:165] Memory required for data: 697365500\nI1206 09:01:57.628100 28553 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1206 09:01:57.628111 28553 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1206 09:01:57.628118 28553 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1206 09:01:57.628126 28553 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1206 09:01:57.628381 28553 net.cpp:150] Setting up L1_b4_brc3_bn\nI1206 09:01:57.628396 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.628401 28553 net.cpp:165] Memory required for data: 708506620\nI1206 09:01:57.628412 28553 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1206 09:01:57.628420 28553 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1206 09:01:57.628427 28553 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1206 09:01:57.628433 28553 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1206 09:01:57.628443 28553 net.cpp:150] Setting up L1_b4_brc3_relu\nI1206 09:01:57.628449 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.628454 28553 net.cpp:165] Memory required for data: 719647740\nI1206 09:01:57.628459 28553 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1206 09:01:57.628479 28553 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1206 09:01:57.628485 28553 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1206 09:01:57.628494 28553 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1206 09:01:57.628820 28553 net.cpp:150] Setting up L1_b4_brc3_conv\nI1206 09:01:57.628834 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.628839 28553 net.cpp:165] Memory required for data: 741929980\nI1206 09:01:57.628847 28553 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1206 09:01:57.628856 28553 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1206 09:01:57.628862 28553 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1206 09:01:57.628870 28553 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:01:57.628880 28553 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1206 09:01:57.628911 28553 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1206 09:01:57.628926 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.628931 28553 net.cpp:165] Memory required for data: 764212220\nI1206 09:01:57.628937 28553 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:57.628943 28553 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:57.628949 28553 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1206 09:01:57.628957 28553 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:01:57.628965 28553 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:01:57.629014 28553 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:01:57.629025 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.629031 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.629036 28553 net.cpp:165] Memory required for data: 808776700\nI1206 09:01:57.629041 28553 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1206 09:01:57.629051 28553 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1206 09:01:57.629057 28553 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:01:57.629065 28553 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1206 09:01:57.629317 28553 net.cpp:150] Setting up L1_b5_brc1_bn\nI1206 09:01:57.629331 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.629336 28553 net.cpp:165] Memory required for data: 831058940\nI1206 09:01:57.629359 28553 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1206 09:01:57.629374 28553 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1206 09:01:57.629380 28553 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1206 09:01:57.629387 28553 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1206 09:01:57.629397 28553 net.cpp:150] Setting up L1_b5_brc1_relu\nI1206 09:01:57.629405 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.629410 28553 net.cpp:165] Memory required for data: 853341180\nI1206 09:01:57.629413 28553 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1206 09:01:57.629427 28553 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1206 09:01:57.629434 28553 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1206 09:01:57.629442 28553 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1206 09:01:57.629787 28553 net.cpp:150] Setting up L1_b5_brc1_conv\nI1206 09:01:57.629802 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.629807 28553 net.cpp:165] Memory required for data: 864482300\nI1206 09:01:57.629817 28553 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1206 09:01:57.629825 28553 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1206 09:01:57.629832 28553 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1206 09:01:57.629842 28553 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1206 09:01:57.630095 28553 net.cpp:150] Setting up L1_b5_brc2_bn\nI1206 09:01:57.630111 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.630116 28553 net.cpp:165] Memory required for data: 875623420\nI1206 09:01:57.630127 28553 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1206 09:01:57.630136 28553 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1206 09:01:57.630141 28553 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1206 09:01:57.630148 28553 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1206 09:01:57.630157 28553 net.cpp:150] Setting up L1_b5_brc2_relu\nI1206 09:01:57.630164 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.630168 28553 net.cpp:165] Memory required for data: 886764540\nI1206 09:01:57.630173 28553 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1206 09:01:57.630184 28553 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1206 09:01:57.630189 28553 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1206 09:01:57.630201 28553 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1206 09:01:57.630547 28553 net.cpp:150] Setting up L1_b5_brc2_conv\nI1206 09:01:57.630561 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.630566 28553 net.cpp:165] Memory required for data: 897905660\nI1206 09:01:57.630575 28553 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1206 09:01:57.630584 28553 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1206 09:01:57.630590 28553 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1206 09:01:57.630601 28553 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1206 09:01:57.630854 28553 net.cpp:150] Setting up L1_b5_brc3_bn\nI1206 09:01:57.630875 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.630880 28553 net.cpp:165] Memory required for data: 909046780\nI1206 09:01:57.630892 28553 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1206 09:01:57.630899 28553 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1206 09:01:57.630905 28553 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1206 09:01:57.630913 28553 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1206 09:01:57.630921 28553 net.cpp:150] Setting up L1_b5_brc3_relu\nI1206 09:01:57.630928 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.630934 28553 net.cpp:165] Memory required for data: 920187900\nI1206 09:01:57.630941 28553 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1206 09:01:57.630952 28553 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1206 09:01:57.630965 28553 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1206 09:01:57.630977 28553 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1206 09:01:57.631409 28553 net.cpp:150] Setting up L1_b5_brc3_conv\nI1206 09:01:57.631428 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.631433 28553 net.cpp:165] Memory required for data: 942470140\nI1206 09:01:57.631443 28553 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1206 09:01:57.631454 28553 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1206 09:01:57.631460 28553 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1206 09:01:57.631474 28553 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:01:57.631485 28553 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1206 09:01:57.631570 28553 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1206 09:01:57.631584 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.631589 28553 net.cpp:165] Memory required for data: 964752380\nI1206 09:01:57.631595 28553 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:57.631609 28553 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:57.631616 28553 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1206 09:01:57.631624 28553 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:01:57.631634 28553 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:01:57.631690 28553 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:01:57.631705 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.631711 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.631716 28553 net.cpp:165] Memory required for data: 1009316860\nI1206 09:01:57.631721 28553 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1206 09:01:57.631729 28553 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1206 09:01:57.631738 28553 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:01:57.631750 28553 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1206 09:01:57.632028 28553 net.cpp:150] Setting up L1_b6_brc1_bn\nI1206 09:01:57.632046 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.632052 28553 net.cpp:165] Memory required for data: 1031599100\nI1206 09:01:57.632063 28553 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1206 09:01:57.632074 28553 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1206 09:01:57.632081 28553 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1206 09:01:57.632087 28553 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1206 09:01:57.632097 28553 net.cpp:150] Setting up L1_b6_brc1_relu\nI1206 09:01:57.632104 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.632109 28553 net.cpp:165] Memory required for data: 1053881340\nI1206 09:01:57.632113 28553 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1206 09:01:57.632124 28553 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1206 09:01:57.632133 28553 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1206 09:01:57.632145 28553 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1206 09:01:57.632524 28553 net.cpp:150] Setting up L1_b6_brc1_conv\nI1206 09:01:57.632537 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.632544 28553 net.cpp:165] Memory required for data: 1065022460\nI1206 09:01:57.632552 28553 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1206 09:01:57.632565 28553 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1206 09:01:57.632570 28553 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1206 09:01:57.632582 28553 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1206 09:01:57.632879 28553 net.cpp:150] Setting up L1_b6_brc2_bn\nI1206 09:01:57.632899 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.632903 28553 net.cpp:165] Memory required for data: 1076163580\nI1206 09:01:57.632925 28553 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1206 09:01:57.632938 28553 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1206 09:01:57.632946 28553 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1206 09:01:57.632952 28553 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1206 09:01:57.632962 28553 net.cpp:150] Setting up L1_b6_brc2_relu\nI1206 09:01:57.632969 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.632974 28553 net.cpp:165] Memory required for data: 1087304700\nI1206 09:01:57.632982 28553 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1206 09:01:57.632997 28553 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1206 09:01:57.633004 28553 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1206 09:01:57.633018 28553 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1206 09:01:57.633394 28553 net.cpp:150] Setting up L1_b6_brc2_conv\nI1206 09:01:57.633409 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.633414 28553 net.cpp:165] Memory required for data: 1098445820\nI1206 09:01:57.633426 28553 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1206 09:01:57.633441 28553 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1206 09:01:57.633448 28553 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1206 09:01:57.633456 28553 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1206 09:01:57.633757 28553 net.cpp:150] Setting up L1_b6_brc3_bn\nI1206 09:01:57.633771 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.633780 28553 net.cpp:165] Memory required for data: 1109586940\nI1206 09:01:57.633790 28553 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1206 09:01:57.633798 28553 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1206 09:01:57.633805 28553 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1206 09:01:57.633812 28553 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1206 09:01:57.633821 28553 net.cpp:150] Setting up L1_b6_brc3_relu\nI1206 09:01:57.633828 28553 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:01:57.633833 28553 net.cpp:165] Memory required for data: 1120728060\nI1206 09:01:57.633837 28553 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1206 09:01:57.633852 28553 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1206 09:01:57.633857 28553 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1206 09:01:57.633872 28553 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1206 09:01:57.634217 28553 net.cpp:150] Setting up L1_b6_brc3_conv\nI1206 09:01:57.634230 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.634235 28553 net.cpp:165] Memory required for data: 1143010300\nI1206 09:01:57.634244 28553 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1206 09:01:57.634253 28553 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1206 09:01:57.634259 28553 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1206 09:01:57.634269 28553 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:01:57.634277 28553 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1206 09:01:57.634312 28553 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1206 09:01:57.634321 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.634326 28553 net.cpp:165] Memory required for data: 1165292540\nI1206 09:01:57.634331 28553 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:57.634340 28553 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:57.634344 28553 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1206 09:01:57.634359 28553 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:01:57.634368 28553 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:01:57.634415 28553 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:01:57.634436 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.634443 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.634449 28553 net.cpp:165] Memory required for data: 1209857020\nI1206 09:01:57.634454 28553 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1206 09:01:57.634461 28553 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1206 09:01:57.634474 28553 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:01:57.634485 28553 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1206 09:01:57.634732 28553 net.cpp:150] Setting up L2_b1_brc1_bn\nI1206 09:01:57.634747 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.634752 28553 net.cpp:165] Memory required for data: 1232139260\nI1206 09:01:57.634762 28553 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1206 09:01:57.634769 28553 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1206 09:01:57.634775 28553 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1206 09:01:57.634783 28553 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1206 09:01:57.634793 28553 net.cpp:150] Setting up L2_b1_brc1_relu\nI1206 09:01:57.634799 28553 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:01:57.634804 28553 net.cpp:165] Memory required for data: 1254421500\nI1206 09:01:57.634809 28553 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1206 09:01:57.634822 28553 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1206 09:01:57.634829 28553 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1206 09:01:57.634840 28553 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1206 09:01:57.635260 28553 net.cpp:150] Setting up L2_b1_brc1_conv\nI1206 09:01:57.635277 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.635282 28553 net.cpp:165] Memory required for data: 1259992060\nI1206 09:01:57.635290 28553 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1206 09:01:57.635303 28553 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1206 09:01:57.635309 28553 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1206 09:01:57.635320 28553 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1206 09:01:57.635579 28553 net.cpp:150] Setting up L2_b1_brc2_bn\nI1206 09:01:57.635593 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.635598 28553 net.cpp:165] Memory required for data: 1265562620\nI1206 09:01:57.635609 28553 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1206 09:01:57.635617 28553 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1206 09:01:57.635623 28553 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1206 09:01:57.635635 28553 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1206 09:01:57.635645 28553 net.cpp:150] Setting up L2_b1_brc2_relu\nI1206 09:01:57.635653 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.635658 28553 net.cpp:165] Memory required for data: 1271133180\nI1206 09:01:57.635663 28553 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1206 09:01:57.635676 28553 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1206 09:01:57.635682 28553 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1206 09:01:57.635691 28553 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1206 09:01:57.636010 28553 net.cpp:150] Setting up L2_b1_brc2_conv\nI1206 09:01:57.636023 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.636029 28553 net.cpp:165] Memory required for data: 1276703740\nI1206 09:01:57.636037 28553 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1206 09:01:57.636049 28553 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1206 09:01:57.636056 28553 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1206 09:01:57.636063 28553 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1206 09:01:57.636312 28553 net.cpp:150] Setting up L2_b1_brc3_bn\nI1206 09:01:57.636323 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.636328 28553 net.cpp:165] Memory required for data: 1282274300\nI1206 09:01:57.636338 28553 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1206 09:01:57.636346 28553 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1206 09:01:57.636361 28553 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1206 09:01:57.636368 28553 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1206 09:01:57.636379 28553 net.cpp:150] Setting up L2_b1_brc3_relu\nI1206 09:01:57.636385 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.636390 28553 net.cpp:165] Memory required for data: 1287844860\nI1206 09:01:57.636395 28553 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1206 09:01:57.636409 28553 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1206 09:01:57.636415 28553 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1206 09:01:57.636426 28553 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1206 09:01:57.636886 28553 net.cpp:150] Setting up L2_b1_brc3_conv\nI1206 09:01:57.636900 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.636906 28553 net.cpp:165] Memory required for data: 1298985980\nI1206 09:01:57.636914 28553 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1206 09:01:57.636940 28553 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1206 09:01:57.636947 28553 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:01:57.636957 28553 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1206 09:01:57.637401 28553 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1206 09:01:57.637415 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.637421 28553 net.cpp:165] Memory required for data: 1310127100\nI1206 09:01:57.637429 28553 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1206 09:01:57.637442 28553 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1206 09:01:57.637449 28553 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1206 09:01:57.637455 28553 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1206 09:01:57.637471 28553 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1206 09:01:57.637500 28553 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1206 09:01:57.637508 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.637513 28553 net.cpp:165] Memory required for data: 1321268220\nI1206 09:01:57.637518 28553 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:57.637526 28553 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:57.637531 28553 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1206 09:01:57.637542 28553 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:01:57.637552 28553 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:01:57.637598 28553 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:01:57.637617 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.637624 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.637629 28553 net.cpp:165] Memory required for data: 1343550460\nI1206 09:01:57.637634 28553 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1206 09:01:57.637641 28553 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1206 09:01:57.637647 28553 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:01:57.637658 28553 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1206 09:01:57.637889 28553 net.cpp:150] Setting up L2_b2_brc1_bn\nI1206 09:01:57.637902 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.637907 28553 net.cpp:165] Memory required for data: 1354691580\nI1206 09:01:57.637917 28553 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1206 09:01:57.637925 28553 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1206 09:01:57.637931 28553 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1206 09:01:57.637938 28553 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1206 09:01:57.637948 28553 net.cpp:150] Setting up L2_b2_brc1_relu\nI1206 09:01:57.637955 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.637966 28553 net.cpp:165] Memory required for data: 1365832700\nI1206 09:01:57.637972 28553 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1206 09:01:57.637986 28553 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1206 09:01:57.637992 28553 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1206 09:01:57.638005 28553 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1206 09:01:57.638454 28553 net.cpp:150] Setting up L2_b2_brc1_conv\nI1206 09:01:57.638474 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.638479 28553 net.cpp:165] Memory required for data: 1371403260\nI1206 09:01:57.638489 28553 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1206 09:01:57.638500 28553 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1206 09:01:57.638507 28553 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1206 09:01:57.638515 28553 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1206 09:01:57.638758 28553 net.cpp:150] Setting up L2_b2_brc2_bn\nI1206 09:01:57.638988 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.638996 28553 net.cpp:165] Memory required for data: 1376973820\nI1206 09:01:57.639008 28553 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1206 09:01:57.639016 28553 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1206 09:01:57.639022 28553 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1206 09:01:57.639029 28553 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1206 09:01:57.639040 28553 net.cpp:150] Setting up L2_b2_brc2_relu\nI1206 09:01:57.639047 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.639052 28553 net.cpp:165] Memory required for data: 1382544380\nI1206 09:01:57.639065 28553 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1206 09:01:57.639080 28553 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1206 09:01:57.639086 28553 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1206 09:01:57.639094 28553 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1206 09:01:57.639415 28553 net.cpp:150] Setting up L2_b2_brc2_conv\nI1206 09:01:57.639428 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.639433 28553 net.cpp:165] Memory required for data: 1388114940\nI1206 09:01:57.639441 28553 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1206 09:01:57.639457 28553 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1206 09:01:57.639464 28553 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1206 09:01:57.639479 28553 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1206 09:01:57.639721 28553 net.cpp:150] Setting up L2_b2_brc3_bn\nI1206 09:01:57.639737 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.639742 28553 net.cpp:165] Memory required for data: 1393685500\nI1206 09:01:57.639753 28553 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1206 09:01:57.639761 28553 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1206 09:01:57.639767 28553 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1206 09:01:57.639773 28553 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1206 09:01:57.639783 28553 net.cpp:150] Setting up L2_b2_brc3_relu\nI1206 09:01:57.639791 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.639794 28553 net.cpp:165] Memory required for data: 1399256060\nI1206 09:01:57.639799 28553 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1206 09:01:57.639811 28553 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1206 09:01:57.639816 28553 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1206 09:01:57.639827 28553 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1206 09:01:57.640271 28553 net.cpp:150] Setting up L2_b2_brc3_conv\nI1206 09:01:57.640285 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640290 28553 net.cpp:165] Memory required for data: 1410397180\nI1206 09:01:57.640298 28553 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1206 09:01:57.640308 28553 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1206 09:01:57.640314 28553 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1206 09:01:57.640321 28553 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:01:57.640339 28553 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1206 09:01:57.640367 28553 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1206 09:01:57.640379 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640384 28553 net.cpp:165] Memory required for data: 1421538300\nI1206 09:01:57.640389 28553 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:57.640396 28553 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:57.640403 28553 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1206 09:01:57.640413 28553 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:01:57.640424 28553 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:01:57.640481 28553 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:01:57.640492 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640499 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640504 28553 net.cpp:165] Memory required for data: 1443820540\nI1206 09:01:57.640509 28553 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1206 09:01:57.640517 28553 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1206 09:01:57.640523 28553 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:01:57.640533 28553 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1206 09:01:57.640770 28553 net.cpp:150] Setting up L2_b3_brc1_bn\nI1206 09:01:57.640782 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640789 28553 net.cpp:165] Memory required for data: 1454961660\nI1206 09:01:57.640817 28553 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1206 09:01:57.640826 28553 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1206 09:01:57.640832 28553 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1206 09:01:57.640842 28553 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1206 09:01:57.640853 28553 net.cpp:150] Setting up L2_b3_brc1_relu\nI1206 09:01:57.640859 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.640864 28553 net.cpp:165] Memory required for data: 1466102780\nI1206 09:01:57.640869 28553 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1206 09:01:57.640880 28553 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1206 09:01:57.640885 28553 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1206 09:01:57.640897 28553 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1206 09:01:57.641366 28553 net.cpp:150] Setting up L2_b3_brc1_conv\nI1206 09:01:57.641379 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.641386 28553 net.cpp:165] Memory required for data: 1471673340\nI1206 09:01:57.641393 28553 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1206 09:01:57.641402 28553 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1206 09:01:57.641408 28553 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1206 09:01:57.641417 28553 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1206 09:01:57.641669 28553 net.cpp:150] Setting up L2_b3_brc2_bn\nI1206 09:01:57.641681 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.641687 28553 net.cpp:165] Memory required for data: 1477243900\nI1206 09:01:57.641697 28553 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1206 09:01:57.641705 28553 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1206 09:01:57.641711 28553 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1206 09:01:57.641718 28553 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1206 09:01:57.641728 28553 net.cpp:150] Setting up L2_b3_brc2_relu\nI1206 09:01:57.641736 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.641739 28553 net.cpp:165] Memory required for data: 1482814460\nI1206 09:01:57.641744 28553 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1206 09:01:57.641758 28553 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1206 09:01:57.641770 28553 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1206 09:01:57.641782 28553 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1206 09:01:57.642105 28553 net.cpp:150] Setting up L2_b3_brc2_conv\nI1206 09:01:57.642119 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.642124 28553 net.cpp:165] Memory required for data: 1488385020\nI1206 09:01:57.642132 28553 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1206 09:01:57.642145 28553 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1206 09:01:57.642151 28553 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1206 09:01:57.642163 28553 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1206 09:01:57.642408 28553 net.cpp:150] Setting up L2_b3_brc3_bn\nI1206 09:01:57.642421 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.642426 28553 net.cpp:165] Memory required for data: 1493955580\nI1206 09:01:57.642436 28553 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1206 09:01:57.642446 28553 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1206 09:01:57.642452 28553 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1206 09:01:57.642458 28553 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1206 09:01:57.642472 28553 net.cpp:150] Setting up L2_b3_brc3_relu\nI1206 09:01:57.642480 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.642485 28553 net.cpp:165] Memory required for data: 1499526140\nI1206 09:01:57.642490 28553 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1206 09:01:57.642504 28553 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1206 09:01:57.642510 28553 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1206 09:01:57.642526 28553 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1206 09:01:57.642976 28553 net.cpp:150] Setting up L2_b3_brc3_conv\nI1206 09:01:57.642990 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.642995 28553 net.cpp:165] Memory required for data: 1510667260\nI1206 09:01:57.643004 28553 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1206 09:01:57.643013 28553 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1206 09:01:57.643019 28553 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1206 09:01:57.643029 28553 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:01:57.643038 28553 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1206 09:01:57.643064 28553 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1206 09:01:57.643075 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643080 28553 net.cpp:165] Memory required for data: 1521808380\nI1206 09:01:57.643085 28553 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:57.643093 28553 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:57.643098 28553 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1206 09:01:57.643106 28553 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:01:57.643117 28553 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:01:57.643164 28553 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:01:57.643175 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643182 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643187 28553 net.cpp:165] Memory required for data: 1544090620\nI1206 09:01:57.643193 28553 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1206 09:01:57.643203 28553 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1206 09:01:57.643209 28553 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:01:57.643216 28553 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1206 09:01:57.643450 28553 net.cpp:150] Setting up L2_b4_brc1_bn\nI1206 09:01:57.643465 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643482 28553 net.cpp:165] Memory required for data: 1555231740\nI1206 09:01:57.643494 28553 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1206 09:01:57.643502 28553 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1206 09:01:57.643508 28553 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1206 09:01:57.643517 28553 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1206 09:01:57.643525 28553 net.cpp:150] Setting up L2_b4_brc1_relu\nI1206 09:01:57.643532 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.643537 28553 net.cpp:165] Memory required for data: 1566372860\nI1206 09:01:57.643541 28553 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1206 09:01:57.643551 28553 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1206 09:01:57.643558 28553 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1206 09:01:57.643568 28553 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1206 09:01:57.644026 28553 net.cpp:150] Setting up L2_b4_brc1_conv\nI1206 09:01:57.644040 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.644045 28553 net.cpp:165] Memory required for data: 1571943420\nI1206 09:01:57.644054 28553 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1206 09:01:57.644063 28553 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1206 09:01:57.644069 28553 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1206 09:01:57.644080 28553 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1206 09:01:57.644333 28553 net.cpp:150] Setting up L2_b4_brc2_bn\nI1206 09:01:57.644346 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.644351 28553 net.cpp:165] Memory required for data: 1577513980\nI1206 09:01:57.644362 28553 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1206 09:01:57.644373 28553 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1206 09:01:57.644381 28553 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1206 09:01:57.644387 28553 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1206 09:01:57.644397 28553 net.cpp:150] Setting up L2_b4_brc2_relu\nI1206 09:01:57.644403 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.644407 28553 net.cpp:165] Memory required for data: 1583084540\nI1206 09:01:57.644412 28553 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1206 09:01:57.644423 28553 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1206 09:01:57.644428 28553 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1206 09:01:57.644440 28553 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1206 09:01:57.644764 28553 net.cpp:150] Setting up L2_b4_brc2_conv\nI1206 09:01:57.644778 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.644783 28553 net.cpp:165] Memory required for data: 1588655100\nI1206 09:01:57.644793 28553 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1206 09:01:57.644800 28553 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1206 09:01:57.644806 28553 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1206 09:01:57.644817 28553 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1206 09:01:57.645067 28553 net.cpp:150] Setting up L2_b4_brc3_bn\nI1206 09:01:57.645081 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.645084 28553 net.cpp:165] Memory required for data: 1594225660\nI1206 09:01:57.645095 28553 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1206 09:01:57.645107 28553 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1206 09:01:57.645112 28553 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1206 09:01:57.645119 28553 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1206 09:01:57.645129 28553 net.cpp:150] Setting up L2_b4_brc3_relu\nI1206 09:01:57.645136 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.645140 28553 net.cpp:165] Memory required for data: 1599796220\nI1206 09:01:57.645145 28553 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1206 09:01:57.645155 28553 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1206 09:01:57.645161 28553 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1206 09:01:57.645172 28553 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1206 09:01:57.645638 28553 net.cpp:150] Setting up L2_b4_brc3_conv\nI1206 09:01:57.645653 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.645658 28553 net.cpp:165] Memory required for data: 1610937340\nI1206 09:01:57.645668 28553 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1206 09:01:57.645676 28553 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1206 09:01:57.645683 28553 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1206 09:01:57.645689 28553 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:01:57.645697 28553 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1206 09:01:57.645723 28553 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1206 09:01:57.645732 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.645737 28553 net.cpp:165] Memory required for data: 1622078460\nI1206 09:01:57.645743 28553 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:57.645754 28553 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:57.645761 28553 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1206 09:01:57.645771 28553 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:01:57.645781 28553 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:01:57.645826 28553 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:01:57.645838 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.645845 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.645849 28553 net.cpp:165] Memory required for data: 1644360700\nI1206 09:01:57.645854 28553 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1206 09:01:57.645869 28553 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1206 09:01:57.645876 28553 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:01:57.645884 28553 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1206 09:01:57.646124 28553 net.cpp:150] Setting up L2_b5_brc1_bn\nI1206 09:01:57.646137 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.646142 28553 net.cpp:165] Memory required for data: 1655501820\nI1206 09:01:57.646153 28553 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1206 09:01:57.646162 28553 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1206 09:01:57.646167 28553 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1206 09:01:57.646174 28553 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1206 09:01:57.646183 28553 net.cpp:150] Setting up L2_b5_brc1_relu\nI1206 09:01:57.646190 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.646195 28553 net.cpp:165] Memory required for data: 1666642940\nI1206 09:01:57.646199 28553 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1206 09:01:57.646212 28553 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1206 09:01:57.646219 28553 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1206 09:01:57.646230 28553 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1206 09:01:57.646701 28553 net.cpp:150] Setting up L2_b5_brc1_conv\nI1206 09:01:57.646715 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.646721 28553 net.cpp:165] Memory required for data: 1672213500\nI1206 09:01:57.646729 28553 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1206 09:01:57.646741 28553 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1206 09:01:57.646749 28553 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1206 09:01:57.646756 28553 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1206 09:01:57.647002 28553 net.cpp:150] Setting up L2_b5_brc2_bn\nI1206 09:01:57.647016 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.647020 28553 net.cpp:165] Memory required for data: 1677784060\nI1206 09:01:57.647030 28553 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1206 09:01:57.647038 28553 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1206 09:01:57.647052 28553 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1206 09:01:57.647058 28553 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1206 09:01:57.647068 28553 net.cpp:150] Setting up L2_b5_brc2_relu\nI1206 09:01:57.647075 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.647080 28553 net.cpp:165] Memory required for data: 1683354620\nI1206 09:01:57.647085 28553 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1206 09:01:57.647099 28553 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1206 09:01:57.647105 28553 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1206 09:01:57.647116 28553 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1206 09:01:57.647446 28553 net.cpp:150] Setting up L2_b5_brc2_conv\nI1206 09:01:57.647460 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.647469 28553 net.cpp:165] Memory required for data: 1688925180\nI1206 09:01:57.647480 28553 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1206 09:01:57.647488 28553 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1206 09:01:57.647495 28553 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1206 09:01:57.647505 28553 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1206 09:01:57.647756 28553 net.cpp:150] Setting up L2_b5_brc3_bn\nI1206 09:01:57.647769 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.647774 28553 net.cpp:165] Memory required for data: 1694495740\nI1206 09:01:57.647784 28553 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1206 09:01:57.647792 28553 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1206 09:01:57.647799 28553 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1206 09:01:57.647805 28553 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1206 09:01:57.647814 28553 net.cpp:150] Setting up L2_b5_brc3_relu\nI1206 09:01:57.647821 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.647825 28553 net.cpp:165] Memory required for data: 1700066300\nI1206 09:01:57.647830 28553 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1206 09:01:57.647847 28553 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1206 09:01:57.647855 28553 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1206 09:01:57.647866 28553 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1206 09:01:57.648319 28553 net.cpp:150] Setting up L2_b5_brc3_conv\nI1206 09:01:57.648334 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.648339 28553 net.cpp:165] Memory required for data: 1711207420\nI1206 09:01:57.648346 28553 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1206 09:01:57.648358 28553 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1206 09:01:57.648365 28553 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1206 09:01:57.648372 28553 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:01:57.648380 28553 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1206 09:01:57.648409 28553 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1206 09:01:57.648418 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.648423 28553 net.cpp:165] Memory required for data: 1722348540\nI1206 09:01:57.648429 28553 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:57.648437 28553 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:57.648442 28553 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1206 09:01:57.648449 28553 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:01:57.648461 28553 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:01:57.648516 28553 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:01:57.648528 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.648535 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.648540 28553 net.cpp:165] Memory required for data: 1744630780\nI1206 09:01:57.648551 28553 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1206 09:01:57.648563 28553 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1206 09:01:57.648571 28553 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:01:57.648581 28553 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1206 09:01:57.648814 28553 net.cpp:150] Setting up L2_b6_brc1_bn\nI1206 09:01:57.648830 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.648835 28553 net.cpp:165] Memory required for data: 1755771900\nI1206 09:01:57.648845 28553 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1206 09:01:57.648865 28553 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1206 09:01:57.648872 28553 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1206 09:01:57.648880 28553 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1206 09:01:57.648890 28553 net.cpp:150] Setting up L2_b6_brc1_relu\nI1206 09:01:57.648897 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.648902 28553 net.cpp:165] Memory required for data: 1766913020\nI1206 09:01:57.648907 28553 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1206 09:01:57.648921 28553 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1206 09:01:57.648927 28553 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1206 09:01:57.648938 28553 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1206 09:01:57.649405 28553 net.cpp:150] Setting up L2_b6_brc1_conv\nI1206 09:01:57.649418 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.649423 28553 net.cpp:165] Memory required for data: 1772483580\nI1206 09:01:57.649432 28553 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1206 09:01:57.649441 28553 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1206 09:01:57.649447 28553 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1206 09:01:57.649456 28553 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1206 09:01:57.649715 28553 net.cpp:150] Setting up L2_b6_brc2_bn\nI1206 09:01:57.649729 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.649734 28553 net.cpp:165] Memory required for data: 1778054140\nI1206 09:01:57.649744 28553 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1206 09:01:57.649752 28553 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1206 09:01:57.649758 28553 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1206 09:01:57.649768 28553 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1206 09:01:57.649780 28553 net.cpp:150] Setting up L2_b6_brc2_relu\nI1206 09:01:57.649785 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.649791 28553 net.cpp:165] Memory required for data: 1783624700\nI1206 09:01:57.649796 28553 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1206 09:01:57.649809 28553 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1206 09:01:57.649816 28553 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1206 09:01:57.649823 28553 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1206 09:01:57.650147 28553 net.cpp:150] Setting up L2_b6_brc2_conv\nI1206 09:01:57.650161 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.650166 28553 net.cpp:165] Memory required for data: 1789195260\nI1206 09:01:57.650174 28553 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1206 09:01:57.650187 28553 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1206 09:01:57.650194 28553 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1206 09:01:57.650202 28553 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1206 09:01:57.650452 28553 net.cpp:150] Setting up L2_b6_brc3_bn\nI1206 09:01:57.650465 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.650476 28553 net.cpp:165] Memory required for data: 1794765820\nI1206 09:01:57.650487 28553 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1206 09:01:57.650496 28553 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1206 09:01:57.650502 28553 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1206 09:01:57.650512 28553 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1206 09:01:57.650529 28553 net.cpp:150] Setting up L2_b6_brc3_relu\nI1206 09:01:57.650537 28553 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:01:57.650542 28553 net.cpp:165] Memory required for data: 1800336380\nI1206 09:01:57.650547 28553 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1206 09:01:57.650559 28553 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1206 09:01:57.650565 28553 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1206 09:01:57.650574 28553 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1206 09:01:57.651041 28553 net.cpp:150] Setting up L2_b6_brc3_conv\nI1206 09:01:57.651055 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.651060 28553 net.cpp:165] Memory required for data: 1811477500\nI1206 09:01:57.651069 28553 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1206 09:01:57.651078 28553 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1206 09:01:57.651084 28553 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1206 09:01:57.651091 28553 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:01:57.651103 28553 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1206 09:01:57.651129 28553 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1206 09:01:57.651137 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.651142 28553 net.cpp:165] Memory required for data: 1822618620\nI1206 09:01:57.651147 28553 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:57.651154 28553 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:57.651160 28553 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1206 09:01:57.651171 28553 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:01:57.651180 28553 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:01:57.651231 28553 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:01:57.651242 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.651248 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.651253 28553 net.cpp:165] Memory required for data: 1844900860\nI1206 09:01:57.651258 28553 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1206 09:01:57.651269 28553 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1206 09:01:57.651275 28553 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:01:57.651283 28553 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1206 09:01:57.651526 28553 net.cpp:150] Setting up L3_b1_brc1_bn\nI1206 09:01:57.651540 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.651545 28553 net.cpp:165] Memory required for data: 1856041980\nI1206 09:01:57.651556 28553 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1206 09:01:57.651563 28553 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1206 09:01:57.651569 28553 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1206 09:01:57.651576 28553 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1206 09:01:57.651587 28553 net.cpp:150] Setting up L3_b1_brc1_relu\nI1206 09:01:57.651593 28553 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:01:57.651597 28553 net.cpp:165] Memory required for data: 1867183100\nI1206 09:01:57.651602 28553 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1206 09:01:57.651615 28553 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1206 09:01:57.651621 28553 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1206 09:01:57.651633 28553 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1206 09:01:57.652283 28553 net.cpp:150] Setting up L3_b1_brc1_conv\nI1206 09:01:57.652298 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.652303 28553 net.cpp:165] Memory required for data: 1869968380\nI1206 09:01:57.652312 28553 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1206 09:01:57.652324 28553 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1206 09:01:57.652338 28553 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1206 09:01:57.652346 28553 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1206 09:01:57.652612 28553 net.cpp:150] Setting up L3_b1_brc2_bn\nI1206 09:01:57.652631 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.652637 28553 net.cpp:165] Memory required for data: 1872753660\nI1206 09:01:57.652647 28553 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1206 09:01:57.652657 28553 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1206 09:01:57.652662 28553 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1206 09:01:57.652669 28553 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1206 09:01:57.652678 28553 net.cpp:150] Setting up L3_b1_brc2_relu\nI1206 09:01:57.652685 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.652690 28553 net.cpp:165] Memory required for data: 1875538940\nI1206 09:01:57.652695 28553 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1206 09:01:57.652705 28553 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1206 09:01:57.652711 28553 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1206 09:01:57.652722 28553 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1206 09:01:57.653113 28553 net.cpp:150] Setting up L3_b1_brc2_conv\nI1206 09:01:57.653127 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.653132 28553 net.cpp:165] Memory required for data: 1878324220\nI1206 09:01:57.653141 28553 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1206 09:01:57.653149 28553 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1206 09:01:57.653156 28553 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1206 09:01:57.653167 28553 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1206 09:01:57.653420 28553 net.cpp:150] Setting up L3_b1_brc3_bn\nI1206 09:01:57.653434 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.653439 28553 net.cpp:165] Memory required for data: 1881109500\nI1206 09:01:57.653448 28553 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1206 09:01:57.653460 28553 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1206 09:01:57.653471 28553 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1206 09:01:57.653479 28553 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1206 09:01:57.653488 28553 net.cpp:150] Setting up L3_b1_brc3_relu\nI1206 09:01:57.653496 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.653501 28553 net.cpp:165] Memory required for data: 1883894780\nI1206 09:01:57.653506 28553 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1206 09:01:57.653515 28553 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1206 09:01:57.653522 28553 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1206 09:01:57.653534 28553 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1206 09:01:57.654486 28553 net.cpp:150] Setting up L3_b1_brc3_conv\nI1206 09:01:57.654501 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.654507 28553 net.cpp:165] Memory required for data: 1889465340\nI1206 09:01:57.654516 28553 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1206 09:01:57.654527 28553 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1206 09:01:57.654534 28553 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:01:57.654546 28553 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1206 09:01:57.655514 28553 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1206 09:01:57.655527 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.655532 28553 net.cpp:165] Memory required for data: 1895035900\nI1206 09:01:57.655541 28553 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1206 09:01:57.655551 28553 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1206 09:01:57.655557 28553 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1206 09:01:57.655565 28553 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1206 09:01:57.655572 28553 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1206 09:01:57.655607 28553 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1206 09:01:57.655625 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.655632 28553 net.cpp:165] Memory required for data: 1900606460\nI1206 09:01:57.655637 28553 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:57.655647 28553 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:57.655653 28553 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1206 09:01:57.655660 28553 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:01:57.655670 28553 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:01:57.655720 28553 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:01:57.655732 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.655738 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.655743 28553 net.cpp:165] Memory required for data: 1911747580\nI1206 09:01:57.655748 28553 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1206 09:01:57.655756 28553 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1206 09:01:57.655762 28553 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:01:57.655773 28553 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1206 09:01:57.656070 28553 net.cpp:150] Setting up L3_b2_brc1_bn\nI1206 09:01:57.656090 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.656100 28553 net.cpp:165] Memory required for data: 1917318140\nI1206 09:01:57.656111 28553 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1206 09:01:57.656126 28553 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1206 09:01:57.656132 28553 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1206 09:01:57.656141 28553 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1206 09:01:57.656173 28553 net.cpp:150] Setting up L3_b2_brc1_relu\nI1206 09:01:57.656183 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.656188 28553 net.cpp:165] Memory required for data: 1922888700\nI1206 09:01:57.656193 28553 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1206 09:01:57.656204 28553 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1206 09:01:57.656210 28553 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1206 09:01:57.656222 28553 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1206 09:01:57.657197 28553 net.cpp:150] Setting up L3_b2_brc1_conv\nI1206 09:01:57.657217 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.657222 28553 net.cpp:165] Memory required for data: 1925673980\nI1206 09:01:57.657230 28553 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1206 09:01:57.657239 28553 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1206 09:01:57.657245 28553 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1206 09:01:57.657253 28553 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1206 09:01:57.657518 28553 net.cpp:150] Setting up L3_b2_brc2_bn\nI1206 09:01:57.657531 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.657536 28553 net.cpp:165] Memory required for data: 1928459260\nI1206 09:01:57.657547 28553 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1206 09:01:57.657555 28553 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1206 09:01:57.657562 28553 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1206 09:01:57.657572 28553 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1206 09:01:57.657582 28553 net.cpp:150] Setting up L3_b2_brc2_relu\nI1206 09:01:57.657589 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.657593 28553 net.cpp:165] Memory required for data: 1931244540\nI1206 09:01:57.657598 28553 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1206 09:01:57.657616 28553 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1206 09:01:57.657624 28553 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1206 09:01:57.657631 28553 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1206 09:01:57.659093 28553 net.cpp:150] Setting up L3_b2_brc2_conv\nI1206 09:01:57.659111 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.659116 28553 net.cpp:165] Memory required for data: 1934029820\nI1206 09:01:57.659126 28553 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1206 09:01:57.659135 28553 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1206 09:01:57.659142 28553 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1206 09:01:57.659153 28553 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1206 09:01:57.659405 28553 net.cpp:150] Setting up L3_b2_brc3_bn\nI1206 09:01:57.659418 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.659423 28553 net.cpp:165] Memory required for data: 1936815100\nI1206 09:01:57.659433 28553 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1206 09:01:57.659446 28553 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1206 09:01:57.659451 28553 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1206 09:01:57.659458 28553 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1206 09:01:57.659474 28553 net.cpp:150] Setting up L3_b2_brc3_relu\nI1206 09:01:57.659482 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.659487 28553 net.cpp:165] Memory required for data: 1939600380\nI1206 09:01:57.659492 28553 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1206 09:01:57.659503 28553 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1206 09:01:57.659509 28553 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1206 09:01:57.659521 28553 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1206 09:01:57.660461 28553 net.cpp:150] Setting up L3_b2_brc3_conv\nI1206 09:01:57.660482 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.660487 28553 net.cpp:165] Memory required for data: 1945170940\nI1206 09:01:57.660496 28553 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1206 09:01:57.660506 28553 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1206 09:01:57.660513 28553 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1206 09:01:57.660521 28553 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:01:57.660528 28553 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1206 09:01:57.660562 28553 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1206 09:01:57.660573 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.660578 28553 net.cpp:165] Memory required for data: 1950741500\nI1206 09:01:57.660583 28553 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:57.660598 28553 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:57.660604 28553 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1206 09:01:57.660614 28553 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:01:57.660624 28553 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:01:57.660672 28553 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:01:57.660686 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.660693 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.660698 28553 net.cpp:165] Memory required for data: 1961882620\nI1206 09:01:57.660703 28553 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1206 09:01:57.660712 28553 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1206 09:01:57.660717 28553 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:01:57.660727 28553 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1206 09:01:57.660981 28553 net.cpp:150] Setting up L3_b3_brc1_bn\nI1206 09:01:57.660995 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.661000 28553 net.cpp:165] Memory required for data: 1967453180\nI1206 09:01:57.661010 28553 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1206 09:01:57.661017 28553 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1206 09:01:57.661031 28553 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1206 09:01:57.661042 28553 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1206 09:01:57.661052 28553 net.cpp:150] Setting up L3_b3_brc1_relu\nI1206 09:01:57.661059 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.661063 28553 net.cpp:165] Memory required for data: 1973023740\nI1206 09:01:57.661068 28553 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1206 09:01:57.661079 28553 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1206 09:01:57.661084 28553 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1206 09:01:57.661093 28553 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1206 09:01:57.663369 28553 net.cpp:150] Setting up L3_b3_brc1_conv\nI1206 09:01:57.663388 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.663393 28553 net.cpp:165] Memory required for data: 1975809020\nI1206 09:01:57.663403 28553 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1206 09:01:57.663415 28553 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1206 09:01:57.663422 28553 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1206 09:01:57.663431 28553 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1206 09:01:57.663699 28553 net.cpp:150] Setting up L3_b3_brc2_bn\nI1206 09:01:57.663717 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.663722 28553 net.cpp:165] Memory required for data: 1978594300\nI1206 09:01:57.663733 28553 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1206 09:01:57.663740 28553 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1206 09:01:57.663746 28553 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1206 09:01:57.663753 28553 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1206 09:01:57.663763 28553 net.cpp:150] Setting up L3_b3_brc2_relu\nI1206 09:01:57.663770 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.663775 28553 net.cpp:165] Memory required for data: 1981379580\nI1206 09:01:57.663780 28553 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1206 09:01:57.663791 28553 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1206 09:01:57.663796 28553 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1206 09:01:57.663808 28553 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1206 09:01:57.664199 28553 net.cpp:150] Setting up L3_b3_brc2_conv\nI1206 09:01:57.664213 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.664218 28553 net.cpp:165] Memory required for data: 1984164860\nI1206 09:01:57.664227 28553 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1206 09:01:57.664237 28553 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1206 09:01:57.664242 28553 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1206 09:01:57.664253 28553 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1206 09:01:57.664515 28553 net.cpp:150] Setting up L3_b3_brc3_bn\nI1206 09:01:57.664527 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.664532 28553 net.cpp:165] Memory required for data: 1986950140\nI1206 09:01:57.664543 28553 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1206 09:01:57.664556 28553 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1206 09:01:57.664562 28553 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1206 09:01:57.664569 28553 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1206 09:01:57.664580 28553 net.cpp:150] Setting up L3_b3_brc3_relu\nI1206 09:01:57.664587 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.664592 28553 net.cpp:165] Memory required for data: 1989735420\nI1206 09:01:57.664597 28553 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1206 09:01:57.664608 28553 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1206 09:01:57.664613 28553 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1206 09:01:57.664624 28553 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1206 09:01:57.665554 28553 net.cpp:150] Setting up L3_b3_brc3_conv\nI1206 09:01:57.665568 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.665573 28553 net.cpp:165] Memory required for data: 1995305980\nI1206 09:01:57.665590 28553 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1206 09:01:57.665601 28553 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1206 09:01:57.665606 28553 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1206 09:01:57.665614 28553 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:01:57.665621 28553 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1206 09:01:57.665664 28553 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1206 09:01:57.665678 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.665683 28553 net.cpp:165] Memory required for data: 2000876540\nI1206 09:01:57.665688 28553 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:57.665695 28553 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:57.665700 28553 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1206 09:01:57.665711 28553 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:01:57.665721 28553 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:01:57.665771 28553 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:01:57.665784 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.665791 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.665796 28553 net.cpp:165] Memory required for data: 2012017660\nI1206 09:01:57.665801 28553 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1206 09:01:57.665808 28553 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1206 09:01:57.665814 28553 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:01:57.665825 28553 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1206 09:01:57.666077 28553 net.cpp:150] Setting up L3_b4_brc1_bn\nI1206 09:01:57.666090 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.666095 28553 net.cpp:165] Memory required for data: 2017588220\nI1206 09:01:57.666106 28553 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1206 09:01:57.666117 28553 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1206 09:01:57.666123 28553 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1206 09:01:57.666131 28553 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1206 09:01:57.666141 28553 net.cpp:150] Setting up L3_b4_brc1_relu\nI1206 09:01:57.666147 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.666152 28553 net.cpp:165] Memory required for data: 2023158780\nI1206 09:01:57.666157 28553 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1206 09:01:57.666167 28553 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1206 09:01:57.666173 28553 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1206 09:01:57.666184 28553 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1206 09:01:57.667124 28553 net.cpp:150] Setting up L3_b4_brc1_conv\nI1206 09:01:57.667140 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.667145 28553 net.cpp:165] Memory required for data: 2025944060\nI1206 09:01:57.667153 28553 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1206 09:01:57.667162 28553 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1206 09:01:57.667168 28553 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1206 09:01:57.667176 28553 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1206 09:01:57.667429 28553 net.cpp:150] Setting up L3_b4_brc2_bn\nI1206 09:01:57.667443 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.667448 28553 net.cpp:165] Memory required for data: 2028729340\nI1206 09:01:57.667457 28553 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1206 09:01:57.667470 28553 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1206 09:01:57.667477 28553 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1206 09:01:57.667485 28553 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1206 09:01:57.667495 28553 net.cpp:150] Setting up L3_b4_brc2_relu\nI1206 09:01:57.667510 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.667515 28553 net.cpp:165] Memory required for data: 2031514620\nI1206 09:01:57.667520 28553 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1206 09:01:57.667533 28553 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1206 09:01:57.667541 28553 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1206 09:01:57.667551 28553 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1206 09:01:57.667939 28553 net.cpp:150] Setting up L3_b4_brc2_conv\nI1206 09:01:57.667953 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.667958 28553 net.cpp:165] Memory required for data: 2034299900\nI1206 09:01:57.667968 28553 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1206 09:01:57.667979 28553 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1206 09:01:57.667986 28553 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1206 09:01:57.667999 28553 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1206 09:01:57.668247 28553 net.cpp:150] Setting up L3_b4_brc3_bn\nI1206 09:01:57.668259 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.668264 28553 net.cpp:165] Memory required for data: 2037085180\nI1206 09:01:57.668274 28553 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1206 09:01:57.668282 28553 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1206 09:01:57.668288 28553 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1206 09:01:57.668295 28553 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1206 09:01:57.668305 28553 net.cpp:150] Setting up L3_b4_brc3_relu\nI1206 09:01:57.668313 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.668316 28553 net.cpp:165] Memory required for data: 2039870460\nI1206 09:01:57.668321 28553 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1206 09:01:57.668334 28553 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1206 09:01:57.668340 28553 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1206 09:01:57.668352 28553 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1206 09:01:57.669296 28553 net.cpp:150] Setting up L3_b4_brc3_conv\nI1206 09:01:57.669309 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.669315 28553 net.cpp:165] Memory required for data: 2045441020\nI1206 09:01:57.669324 28553 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1206 09:01:57.669337 28553 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1206 09:01:57.669343 28553 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1206 09:01:57.669350 28553 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:01:57.669358 28553 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1206 09:01:57.669395 28553 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1206 09:01:57.669406 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.669411 28553 net.cpp:165] Memory required for data: 2051011580\nI1206 09:01:57.669416 28553 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:57.669425 28553 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:57.669430 28553 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1206 09:01:57.669443 28553 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:01:57.669453 28553 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:01:57.669508 28553 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:01:57.669524 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.669531 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.669535 28553 net.cpp:165] Memory required for data: 2062152700\nI1206 09:01:57.669540 28553 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1206 09:01:57.669549 28553 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1206 09:01:57.669555 28553 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:01:57.669572 28553 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1206 09:01:57.669816 28553 net.cpp:150] Setting up L3_b5_brc1_bn\nI1206 09:01:57.669829 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.669834 28553 net.cpp:165] Memory required for data: 2067723260\nI1206 09:01:57.669844 28553 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1206 09:01:57.669852 28553 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1206 09:01:57.669858 28553 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1206 09:01:57.669865 28553 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1206 09:01:57.669875 28553 net.cpp:150] Setting up L3_b5_brc1_relu\nI1206 09:01:57.669883 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.669886 28553 net.cpp:165] Memory required for data: 2073293820\nI1206 09:01:57.669891 28553 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1206 09:01:57.669905 28553 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1206 09:01:57.669911 28553 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1206 09:01:57.669927 28553 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1206 09:01:57.670864 28553 net.cpp:150] Setting up L3_b5_brc1_conv\nI1206 09:01:57.670879 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.670884 28553 net.cpp:165] Memory required for data: 2076079100\nI1206 09:01:57.670893 28553 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1206 09:01:57.670905 28553 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1206 09:01:57.670912 28553 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1206 09:01:57.670920 28553 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1206 09:01:57.671170 28553 net.cpp:150] Setting up L3_b5_brc2_bn\nI1206 09:01:57.671185 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.671191 28553 net.cpp:165] Memory required for data: 2078864380\nI1206 09:01:57.671201 28553 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1206 09:01:57.671210 28553 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1206 09:01:57.671216 28553 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1206 09:01:57.671222 28553 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1206 09:01:57.671232 28553 net.cpp:150] Setting up L3_b5_brc2_relu\nI1206 09:01:57.671239 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.671244 28553 net.cpp:165] Memory required for data: 2081649660\nI1206 09:01:57.671248 28553 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1206 09:01:57.671259 28553 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1206 09:01:57.671264 28553 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1206 09:01:57.671277 28553 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1206 09:01:57.671663 28553 net.cpp:150] Setting up L3_b5_brc2_conv\nI1206 09:01:57.671677 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.671684 28553 net.cpp:165] Memory required for data: 2084434940\nI1206 09:01:57.671731 28553 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1206 09:01:57.671747 28553 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1206 09:01:57.671754 28553 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1206 09:01:57.671762 28553 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1206 09:01:57.672017 28553 net.cpp:150] Setting up L3_b5_brc3_bn\nI1206 09:01:57.672030 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.672035 28553 net.cpp:165] Memory required for data: 2087220220\nI1206 09:01:57.672046 28553 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1206 09:01:57.672055 28553 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1206 09:01:57.672060 28553 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1206 09:01:57.672067 28553 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1206 09:01:57.672077 28553 net.cpp:150] Setting up L3_b5_brc3_relu\nI1206 09:01:57.672085 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.672089 28553 net.cpp:165] Memory required for data: 2090005500\nI1206 09:01:57.672093 28553 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1206 09:01:57.672113 28553 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1206 09:01:57.672122 28553 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1206 09:01:57.672129 28553 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1206 09:01:57.673068 28553 net.cpp:150] Setting up L3_b5_brc3_conv\nI1206 09:01:57.673082 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.673087 28553 net.cpp:165] Memory required for data: 2095576060\nI1206 09:01:57.673096 28553 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1206 09:01:57.673108 28553 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1206 09:01:57.673115 28553 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1206 09:01:57.673122 28553 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:01:57.673130 28553 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1206 09:01:57.673166 28553 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1206 09:01:57.673176 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.673180 28553 net.cpp:165] Memory required for data: 2101146620\nI1206 09:01:57.673185 28553 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:57.673193 28553 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:57.673198 28553 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1206 09:01:57.673211 28553 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:01:57.673221 28553 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:01:57.673269 28553 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:01:57.673282 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.673290 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.673295 28553 net.cpp:165] Memory required for data: 2112287740\nI1206 09:01:57.673300 28553 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1206 09:01:57.673307 28553 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1206 09:01:57.673313 28553 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:01:57.673323 28553 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1206 09:01:57.673568 28553 net.cpp:150] Setting up L3_b6_brc1_bn\nI1206 09:01:57.673583 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.673588 28553 net.cpp:165] Memory required for data: 2117858300\nI1206 09:01:57.673597 28553 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1206 09:01:57.673604 28553 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1206 09:01:57.673610 28553 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1206 09:01:57.673617 28553 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1206 09:01:57.673627 28553 net.cpp:150] Setting up L3_b6_brc1_relu\nI1206 09:01:57.673635 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.673638 28553 net.cpp:165] Memory required for data: 2123428860\nI1206 09:01:57.673643 28553 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1206 09:01:57.673657 28553 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1206 09:01:57.673663 28553 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1206 09:01:57.673674 28553 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1206 09:01:57.674613 28553 net.cpp:150] Setting up L3_b6_brc1_conv\nI1206 09:01:57.674628 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.674633 28553 net.cpp:165] Memory required for data: 2126214140\nI1206 09:01:57.674643 28553 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1206 09:01:57.674654 28553 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1206 09:01:57.674660 28553 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1206 09:01:57.674669 28553 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1206 09:01:57.674922 28553 net.cpp:150] Setting up L3_b6_brc2_bn\nI1206 09:01:57.674937 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.674949 28553 net.cpp:165] Memory required for data: 2128999420\nI1206 09:01:57.674960 28553 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1206 09:01:57.674968 28553 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1206 09:01:57.674974 28553 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1206 09:01:57.674981 28553 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1206 09:01:57.674991 28553 net.cpp:150] Setting up L3_b6_brc2_relu\nI1206 09:01:57.674998 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.675002 28553 net.cpp:165] Memory required for data: 2131784700\nI1206 09:01:57.675007 28553 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1206 09:01:57.675019 28553 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1206 09:01:57.675024 28553 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1206 09:01:57.675035 28553 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1206 09:01:57.675423 28553 net.cpp:150] Setting up L3_b6_brc2_conv\nI1206 09:01:57.675436 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.675442 28553 net.cpp:165] Memory required for data: 2134569980\nI1206 09:01:57.675451 28553 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1206 09:01:57.675458 28553 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1206 09:01:57.675470 28553 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1206 09:01:57.675482 28553 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1206 09:01:57.675732 28553 net.cpp:150] Setting up L3_b6_brc3_bn\nI1206 09:01:57.675745 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.675750 28553 net.cpp:165] Memory required for data: 2137355260\nI1206 09:01:57.675760 28553 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1206 09:01:57.675771 28553 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1206 09:01:57.675777 28553 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1206 09:01:57.675784 28553 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1206 09:01:57.675794 28553 net.cpp:150] Setting up L3_b6_brc3_relu\nI1206 09:01:57.675801 28553 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:01:57.675806 28553 net.cpp:165] Memory required for data: 2140140540\nI1206 09:01:57.675810 28553 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1206 09:01:57.675820 28553 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1206 09:01:57.675827 28553 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1206 09:01:57.675837 28553 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1206 09:01:57.676777 28553 net.cpp:150] Setting up L3_b6_brc3_conv\nI1206 09:01:57.676792 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.676797 28553 net.cpp:165] Memory required for data: 2145711100\nI1206 09:01:57.676806 28553 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1206 09:01:57.676815 28553 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1206 09:01:57.676821 28553 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1206 09:01:57.676829 28553 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:01:57.676836 28553 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1206 09:01:57.676875 28553 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1206 09:01:57.676887 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.676892 28553 net.cpp:165] Memory required for data: 2151281660\nI1206 09:01:57.676898 28553 layer_factory.hpp:77] Creating layer post_bn\nI1206 09:01:57.676908 28553 net.cpp:100] Creating Layer post_bn\nI1206 09:01:57.676914 28553 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1206 09:01:57.676923 28553 net.cpp:408] post_bn -> post_bn_top\nI1206 09:01:57.677165 28553 net.cpp:150] Setting up post_bn\nI1206 09:01:57.677197 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.677202 28553 net.cpp:165] Memory required for data: 2156852220\nI1206 09:01:57.677213 28553 layer_factory.hpp:77] Creating layer post_relu\nI1206 09:01:57.677222 28553 net.cpp:100] Creating Layer post_relu\nI1206 09:01:57.677227 28553 net.cpp:434] post_relu <- post_bn_top\nI1206 09:01:57.677242 28553 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1206 09:01:57.677253 28553 net.cpp:150] Setting up post_relu\nI1206 09:01:57.677260 28553 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:01:57.677264 28553 net.cpp:165] Memory required for data: 2162422780\nI1206 09:01:57.677269 28553 layer_factory.hpp:77] Creating layer post_pool\nI1206 09:01:57.677284 28553 net.cpp:100] Creating Layer post_pool\nI1206 09:01:57.677289 28553 net.cpp:434] post_pool <- post_bn_top\nI1206 09:01:57.677296 28553 net.cpp:408] post_pool -> post_pool\nI1206 09:01:57.677338 28553 net.cpp:150] Setting up post_pool\nI1206 09:01:57.677350 28553 net.cpp:157] Top shape: 85 256 1 1 (21760)\nI1206 09:01:57.677354 28553 net.cpp:165] Memory required for data: 2162509820\nI1206 09:01:57.677359 28553 layer_factory.hpp:77] Creating layer post_FC\nI1206 09:01:57.677374 28553 net.cpp:100] Creating Layer post_FC\nI1206 09:01:57.677381 28553 net.cpp:434] post_FC <- post_pool\nI1206 09:01:57.677389 28553 net.cpp:408] post_FC -> post_FC_top\nI1206 09:01:57.677574 28553 net.cpp:150] Setting up post_FC\nI1206 09:01:57.677588 28553 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:57.677594 28553 net.cpp:165] Memory required for data: 2162513220\nI1206 09:01:57.677603 28553 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1206 09:01:57.677610 28553 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1206 09:01:57.677616 28553 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1206 09:01:57.677628 28553 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1206 09:01:57.677637 28553 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1206 09:01:57.677686 28553 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1206 09:01:57.677700 28553 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:57.677707 28553 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:01:57.677712 28553 net.cpp:165] Memory required for data: 2162520020\nI1206 09:01:57.677717 28553 layer_factory.hpp:77] Creating layer accuracy\nI1206 09:01:57.677724 28553 net.cpp:100] Creating Layer accuracy\nI1206 09:01:57.677731 28553 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1206 09:01:57.677737 28553 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1206 09:01:57.677744 28553 net.cpp:408] accuracy -> accuracy\nI1206 09:01:57.677759 28553 net.cpp:150] Setting up accuracy\nI1206 09:01:57.677767 28553 net.cpp:157] Top shape: (1)\nI1206 09:01:57.677772 28553 net.cpp:165] Memory required for data: 2162520024\nI1206 09:01:57.677776 28553 layer_factory.hpp:77] Creating layer loss\nI1206 09:01:57.677784 28553 net.cpp:100] Creating Layer loss\nI1206 09:01:57.677789 28553 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1206 09:01:57.677796 28553 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1206 09:01:57.677803 28553 net.cpp:408] loss -> loss\nI1206 09:01:57.677814 28553 layer_factory.hpp:77] Creating layer loss\nI1206 09:01:57.677929 28553 net.cpp:150] Setting up loss\nI1206 09:01:57.677942 28553 net.cpp:157] Top shape: (1)\nI1206 09:01:57.677947 28553 net.cpp:160]     with loss weight 1\nI1206 09:01:57.677963 28553 net.cpp:165] Memory required for data: 2162520028\nI1206 09:01:57.677969 28553 net.cpp:226] loss needs backward computation.\nI1206 09:01:57.677975 28553 net.cpp:228] accuracy does not need backward computation.\nI1206 09:01:57.677981 28553 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1206 09:01:57.677986 28553 net.cpp:226] post_FC needs backward computation.\nI1206 09:01:57.677991 28553 net.cpp:226] post_pool needs backward computation.\nI1206 09:01:57.677996 28553 net.cpp:226] post_relu needs backward computation.\nI1206 09:01:57.678001 28553 net.cpp:226] post_bn needs backward computation.\nI1206 09:01:57.678006 28553 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1206 09:01:57.678011 28553 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1206 09:01:57.678016 28553 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1206 09:01:57.678021 28553 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1206 09:01:57.678033 28553 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1206 09:01:57.678040 28553 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1206 09:01:57.678043 28553 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1206 09:01:57.678048 28553 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1206 09:01:57.678055 28553 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1206 09:01:57.678059 28553 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1206 09:01:57.678064 28553 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678069 28553 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1206 09:01:57.678074 28553 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1206 09:01:57.678081 28553 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1206 09:01:57.678084 28553 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1206 09:01:57.678089 28553 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1206 09:01:57.678095 28553 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1206 09:01:57.678099 28553 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1206 09:01:57.678104 28553 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1206 09:01:57.678109 28553 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1206 09:01:57.678114 28553 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1206 09:01:57.678119 28553 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678124 28553 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1206 09:01:57.678129 28553 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1206 09:01:57.678139 28553 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1206 09:01:57.678144 28553 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1206 09:01:57.678149 28553 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1206 09:01:57.678154 28553 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1206 09:01:57.678159 28553 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1206 09:01:57.678164 28553 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1206 09:01:57.678169 28553 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1206 09:01:57.678174 28553 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1206 09:01:57.678179 28553 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678184 28553 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1206 09:01:57.678190 28553 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1206 09:01:57.678195 28553 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1206 09:01:57.678200 28553 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1206 09:01:57.678205 28553 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1206 09:01:57.678210 28553 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1206 09:01:57.678215 28553 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1206 09:01:57.678220 28553 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1206 09:01:57.678225 28553 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1206 09:01:57.678231 28553 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1206 09:01:57.678236 28553 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678241 28553 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1206 09:01:57.678246 28553 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1206 09:01:57.678251 28553 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1206 09:01:57.678256 28553 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1206 09:01:57.678261 28553 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1206 09:01:57.678267 28553 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1206 09:01:57.678272 28553 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1206 09:01:57.678282 28553 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1206 09:01:57.678287 28553 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1206 09:01:57.678292 28553 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1206 09:01:57.678297 28553 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678303 28553 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1206 09:01:57.678308 28553 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1206 09:01:57.678314 28553 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1206 09:01:57.678319 28553 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1206 09:01:57.678324 28553 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1206 09:01:57.678329 28553 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1206 09:01:57.678334 28553 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1206 09:01:57.678339 28553 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1206 09:01:57.678344 28553 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1206 09:01:57.678349 28553 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1206 09:01:57.678354 28553 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1206 09:01:57.678359 28553 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678365 28553 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1206 09:01:57.678370 28553 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1206 09:01:57.678375 28553 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1206 09:01:57.678380 28553 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1206 09:01:57.678385 28553 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1206 09:01:57.678390 28553 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1206 09:01:57.678395 28553 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1206 09:01:57.678400 28553 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1206 09:01:57.678406 28553 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1206 09:01:57.678411 28553 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1206 09:01:57.678416 28553 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678421 28553 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1206 09:01:57.678427 28553 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1206 09:01:57.678432 28553 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1206 09:01:57.678437 28553 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1206 09:01:57.678442 28553 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1206 09:01:57.678450 28553 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1206 09:01:57.678457 28553 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1206 09:01:57.678462 28553 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1206 09:01:57.678472 28553 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1206 09:01:57.678478 28553 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1206 09:01:57.678484 28553 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678490 28553 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1206 09:01:57.678496 28553 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1206 09:01:57.678501 28553 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1206 09:01:57.678506 28553 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1206 09:01:57.678511 28553 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1206 09:01:57.678517 28553 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1206 09:01:57.678522 28553 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1206 09:01:57.678527 28553 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1206 09:01:57.678539 28553 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1206 09:01:57.678544 28553 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1206 09:01:57.678550 28553 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678555 28553 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1206 09:01:57.678560 28553 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1206 09:01:57.678566 28553 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1206 09:01:57.678571 28553 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1206 09:01:57.678576 28553 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1206 09:01:57.678582 28553 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1206 09:01:57.678587 28553 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1206 09:01:57.678592 28553 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1206 09:01:57.678597 28553 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1206 09:01:57.678602 28553 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1206 09:01:57.678608 28553 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678613 28553 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1206 09:01:57.678619 28553 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1206 09:01:57.678625 28553 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1206 09:01:57.678630 28553 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1206 09:01:57.678635 28553 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1206 09:01:57.678640 28553 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1206 09:01:57.678645 28553 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1206 09:01:57.678652 28553 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1206 09:01:57.678658 28553 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1206 09:01:57.678663 28553 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1206 09:01:57.678668 28553 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678673 28553 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1206 09:01:57.678679 28553 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1206 09:01:57.678684 28553 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1206 09:01:57.678689 28553 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1206 09:01:57.678694 28553 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1206 09:01:57.678700 28553 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1206 09:01:57.678705 28553 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1206 09:01:57.678710 28553 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1206 09:01:57.678716 28553 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1206 09:01:57.678721 28553 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1206 09:01:57.678726 28553 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1206 09:01:57.678732 28553 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678738 28553 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1206 09:01:57.678745 28553 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1206 09:01:57.678750 28553 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1206 09:01:57.678755 28553 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1206 09:01:57.678761 28553 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1206 09:01:57.678766 28553 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1206 09:01:57.678771 28553 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1206 09:01:57.678776 28553 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1206 09:01:57.678781 28553 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1206 09:01:57.678786 28553 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1206 09:01:57.678799 28553 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678805 28553 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1206 09:01:57.678812 28553 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1206 09:01:57.678817 28553 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1206 09:01:57.678822 28553 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1206 09:01:57.678828 28553 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1206 09:01:57.678833 28553 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1206 09:01:57.678839 28553 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1206 09:01:57.678845 28553 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1206 09:01:57.678850 28553 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1206 09:01:57.678856 28553 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1206 09:01:57.678861 28553 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678867 28553 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1206 09:01:57.678874 28553 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1206 09:01:57.678879 28553 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1206 09:01:57.678884 28553 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1206 09:01:57.678890 28553 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1206 09:01:57.678895 28553 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1206 09:01:57.678900 28553 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1206 09:01:57.678906 28553 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1206 09:01:57.678911 28553 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1206 09:01:57.678917 28553 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1206 09:01:57.678923 28553 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678928 28553 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1206 09:01:57.678936 28553 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1206 09:01:57.678941 28553 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1206 09:01:57.678946 28553 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1206 09:01:57.678951 28553 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1206 09:01:57.678956 28553 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1206 09:01:57.678962 28553 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1206 09:01:57.678967 28553 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1206 09:01:57.678973 28553 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1206 09:01:57.678978 28553 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1206 09:01:57.678984 28553 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.678989 28553 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1206 09:01:57.678995 28553 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1206 09:01:57.679002 28553 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1206 09:01:57.679008 28553 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1206 09:01:57.679013 28553 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1206 09:01:57.679018 28553 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1206 09:01:57.679023 28553 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1206 09:01:57.679029 28553 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1206 09:01:57.679034 28553 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1206 09:01:57.679039 28553 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1206 09:01:57.679049 28553 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:01:57.679055 28553 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1206 09:01:57.679061 28553 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1206 09:01:57.679076 28553 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1206 09:01:57.679082 28553 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1206 09:01:57.679088 28553 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1206 09:01:57.679095 28553 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1206 09:01:57.679100 28553 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1206 09:01:57.679105 28553 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1206 09:01:57.679111 28553 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1206 09:01:57.679116 28553 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1206 09:01:57.679121 28553 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1206 09:01:57.679127 28553 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1206 09:01:57.679132 28553 net.cpp:226] pre_conv needs backward computation.\nI1206 09:01:57.679139 28553 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1206 09:01:57.679145 28553 net.cpp:228] dataLayer does not need backward computation.\nI1206 09:01:57.679150 28553 net.cpp:270] This network produces output accuracy\nI1206 09:01:57.679157 28553 net.cpp:270] This network produces output loss\nI1206 09:01:57.679405 28553 net.cpp:283] Network initialization done.\nI1206 09:01:57.680042 28553 solver.cpp:60] Solver scaffolding done.\nI1206 09:01:57.899430 28553 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1206 09:01:58.230334 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:58.230386 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:58.236255 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:58.887320 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:58.887370 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:58.894131 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:01:59.632721 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:01:59.632791 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:01:59.640599 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:00.000138 28553 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1206 09:02:00.445526 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:02:00.445572 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:02:00.454555 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:01.347995 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:02:01.348065 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:02:01.357398 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:02.320595 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:02:02.320652 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:02:02.330526 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:03.453994 28553 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:02:03.454040 28553 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:02:03.465229 28553 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:02:03.588160 28579 blocking_queue.cpp:50] Waiting for data\nI1206 09:02:04.087388 28553 parallel.cpp:425] Starting Optimization\nI1206 09:02:04.089952 28553 solver.cpp:279] Solving Cifar-ResNeXt\nI1206 09:02:04.089968 28553 solver.cpp:280] Learning Rate Policy: triangular\nI1206 09:02:04.093880 28553 solver.cpp:337] Iteration 0, Testing net (#0)\nI1206 09:04:39.496155 28553 solver.cpp:404]     Test net output #0: accuracy = 0.114706\nI1206 09:04:39.496398 28553 solver.cpp:404]     Test net output #1: loss = 2.34593 (* 1 = 2.34593 loss)\nI1206 09:04:44.827812 28553 solver.cpp:228] Iteration 0, loss = 2.31809\nI1206 09:04:44.827854 28553 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1206 09:04:44.827870 28553 solver.cpp:244]     Train net output #1: loss = 2.31809 (* 1 = 2.31809 loss)\nI1206 09:04:44.888826 28553 sgd_solver.cpp:166] Iteration 0, lr = 0\nI1206 09:11:32.635984 28553 solver.cpp:337] Iteration 100, Testing net (#0)\nI1206 09:14:09.187556 28553 solver.cpp:404]     Test net output #0: accuracy = 0.279706\nI1206 09:14:09.187801 28553 solver.cpp:404]     Test net output #1: loss = 2.00816 (* 1 = 2.00816 loss)\nI1206 09:14:13.237257 28553 solver.cpp:228] Iteration 100, loss = 2.11263\nI1206 09:14:13.237290 28553 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 09:14:13.237305 28553 solver.cpp:244]     Train net output #1: loss = 2.11263 (* 1 = 2.11263 loss)\nI1206 09:14:13.293424 28553 sgd_solver.cpp:166] Iteration 100, lr = 0.015\nI1206 09:21:01.118271 28553 solver.cpp:337] Iteration 200, Testing net (#0)\nI1206 09:23:37.672017 28553 solver.cpp:404]     Test net output #0: accuracy = 0.312941\nI1206 09:23:37.672257 28553 solver.cpp:404]     Test net output #1: loss = 1.9089 (* 1 = 1.9089 loss)\nI1206 09:23:41.719830 28553 solver.cpp:228] Iteration 200, loss = 2.03565\nI1206 09:23:41.719864 28553 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1206 09:23:41.719879 28553 solver.cpp:244]     Train net output #1: loss = 2.03565 (* 1 = 2.03565 loss)\nI1206 09:23:41.782055 28553 sgd_solver.cpp:166] Iteration 200, lr = 0.03\nI1206 09:30:30.888425 28553 solver.cpp:337] Iteration 300, Testing net (#0)\nI1206 09:33:07.428323 28553 solver.cpp:404]     Test net output #0: accuracy = 0.342118\nI1206 09:33:07.428562 28553 solver.cpp:404]     Test net output #1: loss = 1.83339 (* 1 = 1.83339 loss)\nI1206 09:33:11.478416 28553 solver.cpp:228] Iteration 300, loss = 1.83938\nI1206 09:33:11.478449 28553 solver.cpp:244]     Train net output #0: accuracy = 0.352941\nI1206 09:33:11.478466 28553 solver.cpp:244]     Train net output #1: loss = 1.83938 (* 1 = 1.83938 loss)\nI1206 09:33:11.541211 28553 sgd_solver.cpp:166] Iteration 300, lr = 0.045\nI1206 09:40:01.178915 28553 solver.cpp:337] Iteration 400, Testing net (#0)\nI1206 09:42:44.667567 28553 solver.cpp:404]     Test net output #0: accuracy = 0.365882\nI1206 09:42:44.667853 28553 solver.cpp:404]     Test net output #1: loss = 1.77244 (* 1 = 1.77244 loss)\nI1206 09:42:48.751891 28553 solver.cpp:228] Iteration 400, loss = 1.75176\nI1206 09:42:48.751930 28553 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 09:42:48.751946 28553 solver.cpp:244]     Train net output #1: loss = 1.75176 (* 1 = 1.75176 loss)\nI1206 09:42:48.799937 28553 sgd_solver.cpp:166] Iteration 400, lr = 0.0599999\nI1206 09:49:39.120213 28553 solver.cpp:337] Iteration 500, Testing net (#0)\nI1206 09:52:22.624351 28553 solver.cpp:404]     Test net output #0: accuracy = 0.378765\nI1206 09:52:22.624604 28553 solver.cpp:404]     Test net output #1: loss = 1.7344 (* 1 = 1.7344 loss)\nI1206 09:52:26.697310 28553 solver.cpp:228] Iteration 500, loss = 1.73803\nI1206 09:52:26.697348 28553 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 09:52:26.697365 28553 solver.cpp:244]     Train net output #1: loss = 1.73803 (* 1 = 1.73803 loss)\nI1206 09:52:26.745739 28553 sgd_solver.cpp:166] Iteration 500, lr = 0.0749999\nI1206 09:59:17.122275 28553 solver.cpp:337] Iteration 600, Testing net (#0)\nI1206 10:02:00.639772 28553 solver.cpp:404]     Test net output #0: accuracy = 0.379647\nI1206 10:02:00.640056 28553 solver.cpp:404]     Test net output #1: loss = 1.71376 (* 1 = 1.71376 loss)\nI1206 10:02:04.714108 28553 solver.cpp:228] Iteration 600, loss = 1.61271\nI1206 10:02:04.714149 28553 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1206 10:02:04.714166 28553 solver.cpp:244]     Train net output #1: loss = 1.61271 (* 1 = 1.61271 loss)\nI1206 10:02:04.762322 28553 sgd_solver.cpp:166] Iteration 600, lr = 0.0899999\nI1206 10:08:54.804945 28553 solver.cpp:337] Iteration 700, Testing net (#0)\nI1206 10:11:37.907330 28553 solver.cpp:404]     Test net output #0: accuracy = 0.394883\nI1206 10:11:37.907640 28553 solver.cpp:404]     Test net output #1: loss = 1.68356 (* 1 = 1.68356 loss)\nI1206 10:11:41.981139 28553 solver.cpp:228] Iteration 700, loss = 1.65101\nI1206 10:11:41.981174 28553 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1206 10:11:41.981189 28553 solver.cpp:244]     Train net output #1: loss = 1.65101 (* 1 = 1.65101 loss)\nI1206 10:11:42.029583 28553 sgd_solver.cpp:166] Iteration 700, lr = 0.105\nI1206 10:18:32.033195 28553 solver.cpp:337] Iteration 800, Testing net (#0)\nI1206 10:21:15.566180 28553 solver.cpp:404]     Test net output #0: accuracy = 0.410001\nI1206 10:21:15.566448 28553 solver.cpp:404]     Test net output #1: loss = 1.65706 (* 1 = 1.65706 loss)\nI1206 10:21:19.641108 28553 solver.cpp:228] Iteration 800, loss = 1.85284\nI1206 10:21:19.641146 28553 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1206 10:21:19.641162 28553 solver.cpp:244]     Train net output #1: loss = 1.85284 (* 1 = 1.85284 loss)\nI1206 10:21:19.689550 28553 sgd_solver.cpp:166] Iteration 800, lr = 0.12\nI1206 10:28:09.712142 28553 solver.cpp:337] Iteration 900, Testing net (#0)\nI1206 10:30:53.271162 28553 solver.cpp:404]     Test net output #0: accuracy = 0.413001\nI1206 10:30:53.271450 28553 solver.cpp:404]     Test net output #1: loss = 1.63105 (* 1 = 1.63105 loss)\nI1206 10:30:57.348645 28553 solver.cpp:228] Iteration 900, loss = 1.63252\nI1206 10:30:57.348680 28553 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1206 10:30:57.348696 28553 solver.cpp:244]     Train net output #1: loss = 1.63252 (* 1 = 1.63252 loss)\nI1206 10:30:57.396841 28553 sgd_solver.cpp:166] Iteration 900, lr = 0.135\nI1206 10:37:47.520742 28553 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1206 10:40:31.058907 28553 solver.cpp:404]     Test net output #0: accuracy = 0.410824\nI1206 10:40:31.059170 28553 solver.cpp:404]     Test net output #1: loss = 1.6471 (* 1 = 1.6471 loss)\nI1206 10:40:35.133971 28553 solver.cpp:228] Iteration 1000, loss = 1.75248\nI1206 10:40:35.134006 28553 solver.cpp:244]     Train net output #0: accuracy = 0.352941\nI1206 10:40:35.134021 28553 solver.cpp:244]     Train net output #1: loss = 1.75248 (* 1 = 1.75248 loss)\nI1206 10:40:35.182184 28553 sgd_solver.cpp:166] Iteration 1000, lr = 0.15\nI1206 10:47:25.246724 28553 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1206 10:50:08.794225 28553 solver.cpp:404]     Test net output #0: accuracy = 0.417059\nI1206 10:50:08.794507 28553 solver.cpp:404]     Test net output #1: loss = 1.61475 (* 1 = 1.61475 loss)\nI1206 10:50:12.868201 28553 solver.cpp:228] Iteration 1100, loss = 1.57634\nI1206 10:50:12.868238 28553 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 10:50:12.868253 28553 solver.cpp:244]     Train net output #1: loss = 1.57634 (* 1 = 1.57634 loss)\nI1206 10:50:12.916430 28553 sgd_solver.cpp:166] Iteration 1100, lr = 0.165\nI1206 10:57:02.950318 28553 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1206 10:59:46.526541 28553 solver.cpp:404]     Test net output #0: accuracy = 0.424589\nI1206 10:59:46.526844 28553 solver.cpp:404]     Test net output #1: loss = 1.60638 (* 1 = 1.60638 loss)\nI1206 10:59:50.600941 28553 solver.cpp:228] Iteration 1200, loss = 1.6718\nI1206 10:59:50.600977 28553 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1206 10:59:50.600993 28553 solver.cpp:244]     Train net output #1: loss = 1.6718 (* 1 = 1.6718 loss)\nI1206 10:59:50.649116 28553 sgd_solver.cpp:166] Iteration 1200, lr = 0.18\nI1206 11:06:40.716284 28553 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1206 11:09:24.292266 28553 solver.cpp:404]     Test net output #0: accuracy = 0.406177\nI1206 11:09:24.292559 28553 solver.cpp:404]     Test net output #1: loss = 1.64282 (* 1 = 1.64282 loss)\nI1206 11:09:28.365562 28553 solver.cpp:228] Iteration 1300, loss = 1.50506\nI1206 11:09:28.365600 28553 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 11:09:28.365617 28553 solver.cpp:244]     Train net output #1: loss = 1.50506 (* 1 = 1.50506 loss)\nI1206 11:09:28.414824 28553 sgd_solver.cpp:166] Iteration 1300, lr = 0.195\nI1206 11:16:18.501464 28553 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1206 11:19:02.085664 28553 solver.cpp:404]     Test net output #0: accuracy = 0.428647\nI1206 11:19:02.085938 28553 solver.cpp:404]     Test net output #1: loss = 1.58745 (* 1 = 1.58745 loss)\nI1206 11:19:06.160615 28553 solver.cpp:228] Iteration 1400, loss = 1.40843\nI1206 11:19:06.160657 28553 solver.cpp:244]     Train net output #0: accuracy = 0.505882\nI1206 11:19:06.160673 28553 solver.cpp:244]     Train net output #1: loss = 1.40843 (* 1 = 1.40843 loss)\nI1206 11:19:06.208961 28553 sgd_solver.cpp:166] Iteration 1400, lr = 0.21\nI1206 11:25:56.377091 28553 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1206 11:28:39.881935 28553 solver.cpp:404]     Test net output #0: accuracy = 0.392942\nI1206 11:28:39.882236 28553 solver.cpp:404]     Test net output #1: loss = 1.6666 (* 1 = 1.6666 loss)\nI1206 11:28:43.956091 28553 solver.cpp:228] Iteration 1500, loss = 1.58189\nI1206 11:28:43.956126 28553 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1206 11:28:43.956142 28553 solver.cpp:244]     Train net output #1: loss = 1.58189 (* 1 = 1.58189 loss)\nI1206 11:28:44.004416 28553 sgd_solver.cpp:166] Iteration 1500, lr = 0.225\nI1206 11:35:34.078492 28553 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1206 11:38:17.618401 28553 solver.cpp:404]     Test net output #0: accuracy = 0.422471\nI1206 11:38:17.618674 28553 solver.cpp:404]     Test net output #1: loss = 1.59116 (* 1 = 1.59116 loss)\nI1206 11:38:21.693073 28553 solver.cpp:228] Iteration 1600, loss = 1.72497\nI1206 11:38:21.693110 28553 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 11:38:21.693126 28553 solver.cpp:244]     Train net output #1: loss = 1.72497 (* 1 = 1.72497 loss)\nI1206 11:38:21.741420 28553 sgd_solver.cpp:166] Iteration 1600, lr = 0.24\nI1206 11:45:11.839443 28553 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1206 11:47:55.394073 28553 solver.cpp:404]     Test net output #0: accuracy = 0.35153\nI1206 11:47:55.394372 28553 solver.cpp:404]     Test net output #1: loss = 1.78222 (* 1 = 1.78222 loss)\nI1206 11:47:59.470345 28553 solver.cpp:228] Iteration 1700, loss = 1.69722\nI1206 11:47:59.470382 28553 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1206 11:47:59.470398 28553 solver.cpp:244]     Train net output #1: loss = 1.69722 (* 1 = 1.69722 loss)\nI1206 11:47:59.518790 28553 sgd_solver.cpp:166] Iteration 1700, lr = 0.255\nI1206 11:54:49.547196 28553 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1206 11:57:33.072877 28553 solver.cpp:404]     Test net output #0: accuracy = 0.421589\nI1206 11:57:33.073180 28553 solver.cpp:404]     Test net output #1: loss = 1.64392 (* 1 = 1.64392 loss)\nI1206 11:57:37.148633 28553 solver.cpp:228] Iteration 1800, loss = 1.42446\nI1206 11:57:37.148671 28553 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1206 11:57:37.148687 28553 solver.cpp:244]     Train net output #1: loss = 1.42446 (* 1 = 1.42446 loss)\nI1206 11:57:37.197023 28553 sgd_solver.cpp:166] Iteration 1800, lr = 0.27\nI1206 12:04:27.258909 28553 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1206 12:07:10.752007 28553 solver.cpp:404]     Test net output #0: accuracy = 0.415295\nI1206 12:07:10.752297 28553 solver.cpp:404]     Test net output #1: loss = 1.61792 (* 1 = 1.61792 loss)\nI1206 12:07:14.828752 28553 solver.cpp:228] Iteration 1900, loss = 1.70114\nI1206 12:07:14.828786 28553 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 12:07:14.828801 28553 solver.cpp:244]     Train net output #1: loss = 1.70114 (* 1 = 1.70114 loss)\nI1206 12:07:14.877094 28553 sgd_solver.cpp:166] Iteration 1900, lr = 0.285\nI1206 12:14:04.954497 28553 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1206 12:16:48.456619 28553 solver.cpp:404]     Test net output #0: accuracy = 0.42553\nI1206 12:16:48.456920 28553 solver.cpp:404]     Test net output #1: loss = 1.59689 (* 1 = 1.59689 loss)\nI1206 12:16:52.530203 28553 solver.cpp:228] Iteration 2000, loss = 1.71046\nI1206 12:16:52.530241 28553 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1206 12:16:52.530256 28553 solver.cpp:244]     Train net output #1: loss = 1.71046 (* 1 = 1.71046 loss)\nI1206 12:16:52.579720 28553 sgd_solver.cpp:166] Iteration 2000, lr = 0.3\nI1206 12:23:42.638382 28553 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1206 12:26:26.129932 28553 solver.cpp:404]     Test net output #0: accuracy = 0.427\nI1206 12:26:26.130210 28553 solver.cpp:404]     Test net output #1: loss = 1.59087 (* 1 = 1.59087 loss)\nI1206 12:26:30.202919 28553 solver.cpp:228] Iteration 2100, loss = 1.54125\nI1206 12:26:30.202953 28553 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1206 12:26:30.202970 28553 solver.cpp:244]     Train net output #1: loss = 1.54125 (* 1 = 1.54125 loss)\nI1206 12:26:30.251308 28553 sgd_solver.cpp:166] Iteration 2100, lr = 0.315\nI1206 12:33:20.185864 28553 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1206 12:36:03.653097 28553 solver.cpp:404]     Test net output #0: accuracy = 0.405589\nI1206 12:36:03.653396 28553 solver.cpp:404]     Test net output #1: loss = 1.63164 (* 1 = 1.63164 loss)\nI1206 12:36:07.726336 28553 solver.cpp:228] Iteration 2200, loss = 1.67956\nI1206 12:36:07.726369 28553 solver.cpp:244]     Train net output #0: accuracy = 0.294118\nI1206 12:36:07.726385 28553 solver.cpp:244]     Train net output #1: loss = 1.67956 (* 1 = 1.67956 loss)\nI1206 12:36:07.774694 28553 sgd_solver.cpp:166] Iteration 2200, lr = 0.33\nI1206 12:42:57.804453 28553 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1206 12:45:41.290693 28553 solver.cpp:404]     Test net output #0: accuracy = 0.425177\nI1206 12:45:41.290977 28553 solver.cpp:404]     Test net output #1: loss = 1.56067 (* 1 = 1.56067 loss)\nI1206 12:45:45.364243 28553 solver.cpp:228] Iteration 2300, loss = 1.55674\nI1206 12:45:45.364282 28553 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 12:45:45.364298 28553 solver.cpp:244]     Train net output #1: loss = 1.55674 (* 1 = 1.55674 loss)\nI1206 12:45:45.412712 28553 sgd_solver.cpp:166] Iteration 2300, lr = 0.345\nI1206 12:52:35.518206 28553 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1206 12:55:19.035944 28553 solver.cpp:404]     Test net output #0: accuracy = 0.39953\nI1206 12:55:19.036236 28553 solver.cpp:404]     Test net output #1: loss = 1.62581 (* 1 = 1.62581 loss)\nI1206 12:55:23.109710 28553 solver.cpp:228] Iteration 2400, loss = 1.59992\nI1206 12:55:23.109750 28553 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 12:55:23.109766 28553 solver.cpp:244]     Train net output #1: loss = 1.59992 (* 1 = 1.59992 loss)\nI1206 12:55:23.157990 28553 sgd_solver.cpp:166] Iteration 2400, lr = 0.36\nI1206 13:02:13.294101 28553 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1206 13:04:56.782984 28553 solver.cpp:404]     Test net output #0: accuracy = 0.415883\nI1206 13:04:56.783262 28553 solver.cpp:404]     Test net output #1: loss = 1.59382 (* 1 = 1.59382 loss)\nI1206 13:05:00.857566 28553 solver.cpp:228] Iteration 2500, loss = 1.40303\nI1206 13:05:00.857609 28553 solver.cpp:244]     Train net output #0: accuracy = 0.529412\nI1206 13:05:00.857625 28553 solver.cpp:244]     Train net output #1: loss = 1.40303 (* 1 = 1.40303 loss)\nI1206 13:05:00.905961 28553 sgd_solver.cpp:166] Iteration 2500, lr = 0.375\nI1206 13:11:50.996007 28553 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1206 13:14:34.473927 28553 solver.cpp:404]     Test net output #0: accuracy = 0.431059\nI1206 13:14:34.474225 28553 solver.cpp:404]     Test net output #1: loss = 1.54485 (* 1 = 1.54485 loss)\nI1206 13:14:38.547924 28553 solver.cpp:228] Iteration 2600, loss = 1.72511\nI1206 13:14:38.547966 28553 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1206 13:14:38.547981 28553 solver.cpp:244]     Train net output #1: loss = 1.72511 (* 1 = 1.72511 loss)\nI1206 13:14:38.596258 28553 sgd_solver.cpp:166] Iteration 2600, lr = 0.39\nI1206 13:21:28.632227 28553 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1206 13:24:12.109541 28553 solver.cpp:404]     Test net output #0: accuracy = 0.434177\nI1206 13:24:12.109836 28553 solver.cpp:404]     Test net output #1: loss = 1.54408 (* 1 = 1.54408 loss)\nI1206 13:24:16.182118 28553 solver.cpp:228] Iteration 2700, loss = 1.59506\nI1206 13:24:16.182160 28553 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 13:24:16.182176 28553 solver.cpp:244]     Train net output #1: loss = 1.59506 (* 1 = 1.59506 loss)\nI1206 13:24:16.230515 28553 sgd_solver.cpp:166] Iteration 2700, lr = 0.405\nI1206 13:31:06.257782 28553 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1206 13:33:49.733705 28553 solver.cpp:404]     Test net output #0: accuracy = 0.340471\nI1206 13:33:49.733960 28553 solver.cpp:404]     Test net output #1: loss = 1.84986 (* 1 = 1.84986 loss)\nI1206 13:33:53.808940 28553 solver.cpp:228] Iteration 2800, loss = 1.79189\nI1206 13:33:53.808980 28553 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 13:33:53.808996 28553 solver.cpp:244]     Train net output #1: loss = 1.79189 (* 1 = 1.79189 loss)\nI1206 13:33:53.857352 28553 sgd_solver.cpp:166] Iteration 2800, lr = 0.42\nI1206 13:40:43.895509 28553 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1206 13:43:27.298015 28553 solver.cpp:404]     Test net output #0: accuracy = 0.373588\nI1206 13:43:27.298290 28553 solver.cpp:404]     Test net output #1: loss = 1.82142 (* 1 = 1.82142 loss)\nI1206 13:43:31.372958 28553 solver.cpp:228] Iteration 2900, loss = 1.7625\nI1206 13:43:31.372999 28553 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 13:43:31.373015 28553 solver.cpp:244]     Train net output #1: loss = 1.7625 (* 1 = 1.7625 loss)\nI1206 13:43:31.421239 28553 sgd_solver.cpp:166] Iteration 2900, lr = 0.435\nI1206 13:50:21.440129 28553 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1206 13:53:04.836956 28553 solver.cpp:404]     Test net output #0: accuracy = 0.376059\nI1206 13:53:04.837254 28553 solver.cpp:404]     Test net output #1: loss = 1.73023 (* 1 = 1.73023 loss)\nI1206 13:53:08.911208 28553 solver.cpp:228] Iteration 3000, loss = 1.73741\nI1206 13:53:08.911247 28553 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1206 13:53:08.911263 28553 solver.cpp:244]     Train net output #1: loss = 1.73741 (* 1 = 1.73741 loss)\nI1206 13:53:08.959681 28553 sgd_solver.cpp:166] Iteration 3000, lr = 0.45\nI1206 13:59:58.990442 28553 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1206 14:02:42.339046 28553 solver.cpp:404]     Test net output #0: accuracy = 0.391706\nI1206 14:02:42.339319 28553 solver.cpp:404]     Test net output #1: loss = 1.67585 (* 1 = 1.67585 loss)\nI1206 14:02:46.412348 28553 solver.cpp:228] Iteration 3100, loss = 1.6389\nI1206 14:02:46.412390 28553 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1206 14:02:46.412406 28553 solver.cpp:244]     Train net output #1: loss = 1.6389 (* 1 = 1.6389 loss)\nI1206 14:02:46.460822 28553 sgd_solver.cpp:166] Iteration 3100, lr = 0.465\nI1206 14:09:36.490463 28553 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1206 14:12:18.030931 28553 solver.cpp:404]     Test net output #0: accuracy = 0.398883\nI1206 14:12:18.031208 28553 solver.cpp:404]     Test net output #1: loss = 1.66581 (* 1 = 1.66581 loss)\nI1206 14:12:22.106170 28553 solver.cpp:228] Iteration 3200, loss = 1.56063\nI1206 14:12:22.106209 28553 solver.cpp:244]     Train net output #0: accuracy = 0.470588\nI1206 14:12:22.106225 28553 solver.cpp:244]     Train net output #1: loss = 1.56063 (* 1 = 1.56063 loss)\nI1206 14:12:22.154322 28553 sgd_solver.cpp:166] Iteration 3200, lr = 0.48\nI1206 14:19:11.288646 28553 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1206 14:21:53.087855 28553 solver.cpp:404]     Test net output #0: accuracy = 0.422412\nI1206 14:21:53.088117 28553 solver.cpp:404]     Test net output #1: loss = 1.62031 (* 1 = 1.62031 loss)\nI1206 14:21:57.130264 28553 solver.cpp:228] Iteration 3300, loss = 1.76\nI1206 14:21:57.130303 28553 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 14:21:57.130319 28553 solver.cpp:244]     Train net output #1: loss = 1.76 (* 1 = 1.76 loss)\nI1206 14:21:57.207998 28553 sgd_solver.cpp:166] Iteration 3300, lr = 0.495\nI1206 14:28:45.823351 28553 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1206 14:31:27.742776 28553 solver.cpp:404]     Test net output #0: accuracy = 0.401001\nI1206 14:31:27.743043 28553 solver.cpp:404]     Test net output #1: loss = 1.63024 (* 1 = 1.63024 loss)\nI1206 14:31:31.786526 28553 solver.cpp:228] Iteration 3400, loss = 1.63598\nI1206 14:31:31.786568 28553 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1206 14:31:31.786582 28553 solver.cpp:244]     Train net output #1: loss = 1.63598 (* 1 = 1.63598 loss)\nI1206 14:31:31.853209 28553 sgd_solver.cpp:166] Iteration 3400, lr = 0.51\nI1206 14:38:20.454376 28553 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1206 14:41:02.651243 28553 solver.cpp:404]     Test net output #0: accuracy = 0.375\nI1206 14:41:02.651522 28553 solver.cpp:404]     Test net output #1: loss = 1.76283 (* 1 = 1.76283 loss)\nI1206 14:41:06.695919 28553 solver.cpp:228] Iteration 3500, loss = 1.80148\nI1206 14:41:06.695960 28553 solver.cpp:244]     Train net output #0: accuracy = 0.305882\nI1206 14:41:06.695976 28553 solver.cpp:244]     Train net output #1: loss = 1.80148 (* 1 = 1.80148 loss)\nI1206 14:41:06.761458 28553 sgd_solver.cpp:166] Iteration 3500, lr = 0.525\nI1206 14:47:55.406574 28553 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1206 14:50:36.513852 28553 solver.cpp:404]     Test net output #0: accuracy = 0.374589\nI1206 14:50:36.514111 28553 solver.cpp:404]     Test net output #1: loss = 1.71507 (* 1 = 1.71507 loss)\nI1206 14:50:40.558358 28553 solver.cpp:228] Iteration 3600, loss = 1.74716\nI1206 14:50:40.558398 28553 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1206 14:50:40.558415 28553 solver.cpp:244]     Train net output #1: loss = 1.74716 (* 1 = 1.74716 loss)\nI1206 14:50:40.625651 28553 sgd_solver.cpp:166] Iteration 3600, lr = 0.54\nI1206 14:57:29.216595 28553 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1206 15:00:10.297418 28553 solver.cpp:404]     Test net output #0: accuracy = 0.38853\nI1206 15:00:10.297650 28553 solver.cpp:404]     Test net output #1: loss = 1.72941 (* 1 = 1.72941 loss)\nI1206 15:00:14.341097 28553 solver.cpp:228] Iteration 3700, loss = 1.79065\nI1206 15:00:14.341137 28553 solver.cpp:244]     Train net output #0: accuracy = 0.352941\nI1206 15:00:14.341153 28553 solver.cpp:244]     Train net output #1: loss = 1.79065 (* 1 = 1.79065 loss)\nI1206 15:00:14.407140 28553 sgd_solver.cpp:166] Iteration 3700, lr = 0.555\nI1206 15:07:03.081645 28553 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1206 15:09:44.231611 28553 solver.cpp:404]     Test net output #0: accuracy = 0.371177\nI1206 15:09:44.231845 28553 solver.cpp:404]     Test net output #1: loss = 1.72599 (* 1 = 1.72599 loss)\nI1206 15:09:48.274036 28553 solver.cpp:228] Iteration 3800, loss = 1.56942\nI1206 15:09:48.274076 28553 solver.cpp:244]     Train net output #0: accuracy = 0.470588\nI1206 15:09:48.274092 28553 solver.cpp:244]     Train net output #1: loss = 1.56942 (* 1 = 1.56942 loss)\nI1206 15:09:48.340771 28553 sgd_solver.cpp:166] Iteration 3800, lr = 0.57\nI1206 15:16:36.952221 28553 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1206 15:19:18.828219 28553 solver.cpp:404]     Test net output #0: accuracy = 0.410471\nI1206 15:19:18.828433 28553 solver.cpp:404]     Test net output #1: loss = 1.59809 (* 1 = 1.59809 loss)\nI1206 15:19:22.870651 28553 solver.cpp:228] Iteration 3900, loss = 1.41434\nI1206 15:19:22.870692 28553 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1206 15:19:22.870708 28553 solver.cpp:244]     Train net output #1: loss = 1.41434 (* 1 = 1.41434 loss)\nI1206 15:19:22.939693 28553 sgd_solver.cpp:166] Iteration 3900, lr = 0.585\nI1206 15:26:11.901362 28553 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1206 15:28:53.135972 28553 solver.cpp:404]     Test net output #0: accuracy = 0.411471\nI1206 15:28:53.136210 28553 solver.cpp:404]     Test net output #1: loss = 1.60353 (* 1 = 1.60353 loss)\nI1206 15:28:57.179177 28553 solver.cpp:228] Iteration 4000, loss = 1.56427\nI1206 15:28:57.179219 28553 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1206 15:28:57.179234 28553 solver.cpp:244]     Train net output #1: loss = 1.56427 (* 1 = 1.56427 loss)\nI1206 15:28:57.245678 28553 sgd_solver.cpp:166] Iteration 4000, lr = 0.6\nI1206 15:35:45.837808 28553 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1206 15:38:26.674887 28553 solver.cpp:404]     Test net output #0: accuracy = 0.234412\nI1206 15:38:26.675127 28553 solver.cpp:404]     Test net output #1: loss = 2.83475 (* 1 = 2.83475 loss)\nI1206 15:38:30.718093 28553 solver.cpp:228] Iteration 4100, loss = 2.75474\nI1206 15:38:30.718134 28553 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 15:38:30.718150 28553 solver.cpp:244]     Train net output #1: loss = 2.75474 (* 1 = 2.75474 loss)\nI1206 15:38:30.785832 28553 sgd_solver.cpp:166] Iteration 4100, lr = 0.615\nI1206 15:45:19.273564 28553 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1206 15:48:01.219733 28553 solver.cpp:404]     Test net output #0: accuracy = 0.137353\nI1206 15:48:01.219965 28553 solver.cpp:404]     Test net output #1: loss = 8.17109 (* 1 = 8.17109 loss)\nI1206 15:48:05.265058 28553 solver.cpp:228] Iteration 4200, loss = 7.96218\nI1206 15:48:05.265101 28553 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1206 15:48:05.265118 28553 solver.cpp:244]     Train net output #1: loss = 7.96218 (* 1 = 7.96218 loss)\nI1206 15:48:05.334497 28553 sgd_solver.cpp:166] Iteration 4200, lr = 0.63\nI1206 15:54:53.905916 28553 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1206 15:57:36.182524 28553 solver.cpp:404]     Test net output #0: accuracy = 0.177647\nI1206 15:57:36.182740 28553 solver.cpp:404]     Test net output #1: loss = 4.04505 (* 1 = 4.04505 loss)\nI1206 15:57:40.225977 28553 solver.cpp:228] Iteration 4300, loss = 4.06959\nI1206 15:57:40.226020 28553 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1206 15:57:40.226037 28553 solver.cpp:244]     Train net output #1: loss = 4.06959 (* 1 = 4.06959 loss)\nI1206 15:57:40.292515 28553 sgd_solver.cpp:166] Iteration 4300, lr = 0.645\nI1206 16:04:28.971038 28553 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1206 16:07:11.379528 28553 solver.cpp:404]     Test net output #0: accuracy = 0.176412\nI1206 16:07:11.379765 28553 solver.cpp:404]     Test net output #1: loss = 6.25005 (* 1 = 6.25005 loss)\nI1206 16:07:15.423614 28553 solver.cpp:228] Iteration 4400, loss = 6.07726\nI1206 16:07:15.423655 28553 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 16:07:15.423672 28553 solver.cpp:244]     Train net output #1: loss = 6.07726 (* 1 = 6.07726 loss)\nI1206 16:07:15.491217 28553 sgd_solver.cpp:166] Iteration 4400, lr = 0.66\nI1206 16:14:04.305251 28553 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1206 16:16:46.712947 28553 solver.cpp:404]     Test net output #0: accuracy = 0.124\nI1206 16:16:46.713183 28553 solver.cpp:404]     Test net output #1: loss = 7.52663 (* 1 = 7.52663 loss)\nI1206 16:16:50.757730 28553 solver.cpp:228] Iteration 4500, loss = 6.93608\nI1206 16:16:50.757771 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 16:16:50.757787 28553 solver.cpp:244]     Train net output #1: loss = 6.93608 (* 1 = 6.93608 loss)\nI1206 16:16:50.830660 28553 sgd_solver.cpp:166] Iteration 4500, lr = 0.675\nI1206 16:23:39.619777 28553 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1206 16:26:22.145735 28553 solver.cpp:404]     Test net output #0: accuracy = 0.163882\nI1206 16:26:22.145952 28553 solver.cpp:404]     Test net output #1: loss = 7.80215 (* 1 = 7.80215 loss)\nI1206 16:26:26.189985 28553 solver.cpp:228] Iteration 4600, loss = 9.04537\nI1206 16:26:26.190026 28553 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 16:26:26.190042 28553 solver.cpp:244]     Train net output #1: loss = 9.04537 (* 1 = 9.04537 loss)\nI1206 16:26:26.253774 28553 sgd_solver.cpp:166] Iteration 4600, lr = 0.69\nI1206 16:33:14.841965 28553 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1206 16:35:57.176707 28553 solver.cpp:404]     Test net output #0: accuracy = 0.207118\nI1206 16:35:57.176947 28553 solver.cpp:404]     Test net output #1: loss = 8.14453 (* 1 = 8.14453 loss)\nI1206 16:36:01.221102 28553 solver.cpp:228] Iteration 4700, loss = 8.27502\nI1206 16:36:01.221143 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 16:36:01.221160 28553 solver.cpp:244]     Train net output #1: loss = 8.27502 (* 1 = 8.27502 loss)\nI1206 16:36:01.288415 28553 sgd_solver.cpp:166] Iteration 4700, lr = 0.705\nI1206 16:42:49.782063 28553 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1206 16:45:32.096393 28553 solver.cpp:404]     Test net output #0: accuracy = 0.190471\nI1206 16:45:32.096645 28553 solver.cpp:404]     Test net output #1: loss = 4.53687 (* 1 = 4.53687 loss)\nI1206 16:45:36.139390 28553 solver.cpp:228] Iteration 4800, loss = 5.21352\nI1206 16:45:36.139433 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 16:45:36.139449 28553 solver.cpp:244]     Train net output #1: loss = 5.21352 (* 1 = 5.21352 loss)\nI1206 16:45:36.208561 28553 sgd_solver.cpp:166] Iteration 4800, lr = 0.72\nI1206 16:52:25.110657 28553 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1206 16:55:07.453506 28553 solver.cpp:404]     Test net output #0: accuracy = 0.164235\nI1206 16:55:07.453747 28553 solver.cpp:404]     Test net output #1: loss = 5.32121 (* 1 = 5.32121 loss)\nI1206 16:55:11.497381 28553 solver.cpp:228] Iteration 4900, loss = 5.69146\nI1206 16:55:11.497423 28553 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1206 16:55:11.497440 28553 solver.cpp:244]     Train net output #1: loss = 5.69146 (* 1 = 5.69146 loss)\nI1206 16:55:11.566809 28553 sgd_solver.cpp:166] Iteration 4900, lr = 0.735\nI1206 17:02:00.063275 28553 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1206 17:04:42.185856 28553 solver.cpp:404]     Test net output #0: accuracy = 0.245941\nI1206 17:04:42.186148 28553 solver.cpp:404]     Test net output #1: loss = 3.73961 (* 1 = 3.73961 loss)\nI1206 17:04:46.229812 28553 solver.cpp:228] Iteration 5000, loss = 3.93189\nI1206 17:04:46.229856 28553 solver.cpp:244]     Train net output #0: accuracy = 0.282353\nI1206 17:04:46.229871 28553 solver.cpp:244]     Train net output #1: loss = 3.93189 (* 1 = 3.93189 loss)\nI1206 17:04:46.296432 28553 sgd_solver.cpp:166] Iteration 5000, lr = 0.75\nI1206 17:11:34.947935 28553 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1206 17:14:16.860455 28553 solver.cpp:404]     Test net output #0: accuracy = 0.234765\nI1206 17:14:16.860694 28553 solver.cpp:404]     Test net output #1: loss = 5.28346 (* 1 = 5.28346 loss)\nI1206 17:14:20.902885 28553 solver.cpp:228] Iteration 5100, loss = 5.24149\nI1206 17:14:20.902927 28553 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 17:14:20.902943 28553 solver.cpp:244]     Train net output #1: loss = 5.24149 (* 1 = 5.24149 loss)\nI1206 17:14:20.968861 28553 sgd_solver.cpp:166] Iteration 5100, lr = 0.765\nI1206 17:21:09.852947 28553 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1206 17:23:51.198848 28553 solver.cpp:404]     Test net output #0: accuracy = 0.24\nI1206 17:23:51.199122 28553 solver.cpp:404]     Test net output #1: loss = 3.82381 (* 1 = 3.82381 loss)\nI1206 17:23:55.240825 28553 solver.cpp:228] Iteration 5200, loss = 4.2174\nI1206 17:23:55.240867 28553 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 17:23:55.240882 28553 solver.cpp:244]     Train net output #1: loss = 4.2174 (* 1 = 4.2174 loss)\nI1206 17:23:55.310881 28553 sgd_solver.cpp:166] Iteration 5200, lr = 0.78\nI1206 17:30:44.138988 28553 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1206 17:33:24.913511 28553 solver.cpp:404]     Test net output #0: accuracy = 0.168353\nI1206 17:33:24.913769 28553 solver.cpp:404]     Test net output #1: loss = 6.64113 (* 1 = 6.64113 loss)\nI1206 17:33:28.955893 28553 solver.cpp:228] Iteration 5300, loss = 5.94659\nI1206 17:33:28.955934 28553 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 17:33:28.955950 28553 solver.cpp:244]     Train net output #1: loss = 5.94659 (* 1 = 5.94659 loss)\nI1206 17:33:29.020714 28553 sgd_solver.cpp:166] Iteration 5300, lr = 0.795\nI1206 17:40:17.481380 28553 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1206 17:42:58.808233 28553 solver.cpp:404]     Test net output #0: accuracy = 0.21553\nI1206 17:42:58.808485 28553 solver.cpp:404]     Test net output #1: loss = 6.84565 (* 1 = 6.84565 loss)\nI1206 17:43:02.850431 28553 solver.cpp:228] Iteration 5400, loss = 6.46888\nI1206 17:43:02.850473 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 17:43:02.850488 28553 solver.cpp:244]     Train net output #1: loss = 6.46888 (* 1 = 6.46888 loss)\nI1206 17:43:02.919176 28553 sgd_solver.cpp:166] Iteration 5400, lr = 0.81\nI1206 17:49:51.524139 28553 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1206 17:52:32.871659 28553 solver.cpp:404]     Test net output #0: accuracy = 0.166941\nI1206 17:52:32.871945 28553 solver.cpp:404]     Test net output #1: loss = 9.85514 (* 1 = 9.85514 loss)\nI1206 17:52:36.914710 28553 solver.cpp:228] Iteration 5500, loss = 9.78519\nI1206 17:52:36.914752 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 17:52:36.914768 28553 solver.cpp:244]     Train net output #1: loss = 9.78519 (* 1 = 9.78519 loss)\nI1206 17:52:36.983983 28553 sgd_solver.cpp:166] Iteration 5500, lr = 0.825\nI1206 17:59:25.526435 28553 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1206 18:02:07.635217 28553 solver.cpp:404]     Test net output #0: accuracy = 0.223765\nI1206 18:02:07.635490 28553 solver.cpp:404]     Test net output #1: loss = 9.73767 (* 1 = 9.73767 loss)\nI1206 18:02:11.679028 28553 solver.cpp:228] Iteration 5600, loss = 9.69549\nI1206 18:02:11.679071 28553 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 18:02:11.679087 28553 solver.cpp:244]     Train net output #1: loss = 9.69549 (* 1 = 9.69549 loss)\nI1206 18:02:11.750710 28553 sgd_solver.cpp:166] Iteration 5600, lr = 0.84\nI1206 18:09:00.473007 28553 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1206 18:11:42.719422 28553 solver.cpp:404]     Test net output #0: accuracy = 0.22653\nI1206 18:11:42.719694 28553 solver.cpp:404]     Test net output #1: loss = 7.66816 (* 1 = 7.66816 loss)\nI1206 18:11:46.763712 28553 solver.cpp:228] Iteration 5700, loss = 7.1393\nI1206 18:11:46.763754 28553 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1206 18:11:46.763770 28553 solver.cpp:244]     Train net output #1: loss = 7.1393 (* 1 = 7.1393 loss)\nI1206 18:11:46.826532 28553 sgd_solver.cpp:166] Iteration 5700, lr = 0.855\nI1206 18:18:35.583330 28553 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1206 18:21:16.550266 28553 solver.cpp:404]     Test net output #0: accuracy = 0.199647\nI1206 18:21:16.550504 28553 solver.cpp:404]     Test net output #1: loss = 13.3736 (* 1 = 13.3736 loss)\nI1206 18:21:20.593006 28553 solver.cpp:228] Iteration 5800, loss = 13.0669\nI1206 18:21:20.593047 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 18:21:20.593065 28553 solver.cpp:244]     Train net output #1: loss = 13.0669 (* 1 = 13.0669 loss)\nI1206 18:21:20.661511 28553 sgd_solver.cpp:166] Iteration 5800, lr = 0.87\nI1206 18:28:09.236743 28553 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1206 18:30:50.384214 28553 solver.cpp:404]     Test net output #0: accuracy = 0.237941\nI1206 18:30:50.384454 28553 solver.cpp:404]     Test net output #1: loss = 8.10804 (* 1 = 8.10804 loss)\nI1206 18:30:54.420538 28553 solver.cpp:228] Iteration 5900, loss = 7.94927\nI1206 18:30:54.420578 28553 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 18:30:54.420594 28553 solver.cpp:244]     Train net output #1: loss = 7.94927 (* 1 = 7.94927 loss)\nI1206 18:30:54.493376 28553 sgd_solver.cpp:166] Iteration 5900, lr = 0.885\nI1206 18:37:42.299347 28553 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1206 18:40:24.565371 28553 solver.cpp:404]     Test net output #0: accuracy = 0.243765\nI1206 18:40:24.565625 28553 solver.cpp:404]     Test net output #1: loss = 3.93237 (* 1 = 3.93237 loss)\nI1206 18:40:28.601310 28553 solver.cpp:228] Iteration 6000, loss = 4.04885\nI1206 18:40:28.601351 28553 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 18:40:28.601367 28553 solver.cpp:244]     Train net output #1: loss = 4.04885 (* 1 = 4.04885 loss)\nI1206 18:40:28.674619 28553 sgd_solver.cpp:166] Iteration 6000, lr = 0.9\nI1206 18:47:16.778800 28553 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1206 18:49:59.065970 28553 solver.cpp:404]     Test net output #0: accuracy = 0.194059\nI1206 18:49:59.066216 28553 solver.cpp:404]     Test net output #1: loss = 10.6582 (* 1 = 10.6582 loss)\nI1206 18:50:03.103459 28553 solver.cpp:228] Iteration 6100, loss = 10.9616\nI1206 18:50:03.103502 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 18:50:03.103516 28553 solver.cpp:244]     Train net output #1: loss = 10.9616 (* 1 = 10.9616 loss)\nI1206 18:50:03.180150 28553 sgd_solver.cpp:166] Iteration 6100, lr = 0.915\nI1206 18:56:51.308792 28553 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1206 18:59:33.435874 28553 solver.cpp:404]     Test net output #0: accuracy = 0.156\nI1206 18:59:33.436110 28553 solver.cpp:404]     Test net output #1: loss = 9.50661 (* 1 = 9.50661 loss)\nI1206 18:59:37.471982 28553 solver.cpp:228] Iteration 6200, loss = 12.5183\nI1206 18:59:37.472021 28553 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1206 18:59:37.472038 28553 solver.cpp:244]     Train net output #1: loss = 12.5183 (* 1 = 12.5183 loss)\nI1206 18:59:37.550537 28553 sgd_solver.cpp:166] Iteration 6200, lr = 0.93\nI1206 19:06:25.646916 28553 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1206 19:09:08.007217 28553 solver.cpp:404]     Test net output #0: accuracy = 0.137706\nI1206 19:09:08.007504 28553 solver.cpp:404]     Test net output #1: loss = 14.0967 (* 1 = 14.0967 loss)\nI1206 19:09:12.044024 28553 solver.cpp:228] Iteration 6300, loss = 15.1722\nI1206 19:09:12.044062 28553 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1206 19:09:12.044080 28553 solver.cpp:244]     Train net output #1: loss = 15.1722 (* 1 = 15.1722 loss)\nI1206 19:09:12.117347 28553 sgd_solver.cpp:166] Iteration 6300, lr = 0.945\nI1206 19:16:00.124953 28553 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1206 19:18:42.566061 28553 solver.cpp:404]     Test net output #0: accuracy = 0.130647\nI1206 19:18:42.566349 28553 solver.cpp:404]     Test net output #1: loss = 17.269 (* 1 = 17.269 loss)\nI1206 19:18:46.602221 28553 solver.cpp:228] Iteration 6400, loss = 14.5289\nI1206 19:18:46.602260 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 19:18:46.602277 28553 solver.cpp:244]     Train net output #1: loss = 14.5289 (* 1 = 14.5289 loss)\nI1206 19:18:46.678824 28553 sgd_solver.cpp:166] Iteration 6400, lr = 0.96\nI1206 19:25:34.988466 28553 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1206 19:28:16.537161 28553 solver.cpp:404]     Test net output #0: accuracy = 0.195412\nI1206 19:28:16.537441 28553 solver.cpp:404]     Test net output #1: loss = 6.99536 (* 1 = 6.99536 loss)\nI1206 19:28:20.573393 28553 solver.cpp:228] Iteration 6500, loss = 7.63285\nI1206 19:28:20.573431 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 19:28:20.573447 28553 solver.cpp:244]     Train net output #1: loss = 7.63285 (* 1 = 7.63285 loss)\nI1206 19:28:20.646838 28553 sgd_solver.cpp:166] Iteration 6500, lr = 0.975\nI1206 19:35:08.724153 28553 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1206 19:37:51.154808 28553 solver.cpp:404]     Test net output #0: accuracy = 0.157529\nI1206 19:37:51.155113 28553 solver.cpp:404]     Test net output #1: loss = 14.1563 (* 1 = 14.1563 loss)\nI1206 19:37:55.191009 28553 solver.cpp:228] Iteration 6600, loss = 15.4931\nI1206 19:37:55.191048 28553 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1206 19:37:55.191066 28553 solver.cpp:244]     Train net output #1: loss = 15.4931 (* 1 = 15.4931 loss)\nI1206 19:37:55.261297 28553 sgd_solver.cpp:166] Iteration 6600, lr = 0.99\nI1206 19:44:43.223853 28553 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1206 19:47:25.893056 28553 solver.cpp:404]     Test net output #0: accuracy = 0.155647\nI1206 19:47:25.893342 28553 solver.cpp:404]     Test net output #1: loss = 18.742 (* 1 = 18.742 loss)\nI1206 19:47:29.928699 28553 solver.cpp:228] Iteration 6700, loss = 19.8025\nI1206 19:47:29.928740 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 19:47:29.928757 28553 solver.cpp:244]     Train net output #1: loss = 19.8025 (* 1 = 19.8025 loss)\nI1206 19:47:30.008236 28553 sgd_solver.cpp:166] Iteration 6700, lr = 1.005\nI1206 19:54:18.197311 28553 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1206 19:57:00.852555 28553 solver.cpp:404]     Test net output #0: accuracy = 0.233471\nI1206 19:57:00.852877 28553 solver.cpp:404]     Test net output #1: loss = 7.66114 (* 1 = 7.66114 loss)\nI1206 19:57:04.890096 28553 solver.cpp:228] Iteration 6800, loss = 6.18227\nI1206 19:57:04.890136 28553 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 19:57:04.890153 28553 solver.cpp:244]     Train net output #1: loss = 6.18227 (* 1 = 6.18227 loss)\nI1206 19:57:04.960250 28553 sgd_solver.cpp:166] Iteration 6800, lr = 1.02\nI1206 20:03:53.048555 28553 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1206 20:06:35.736696 28553 solver.cpp:404]     Test net output #0: accuracy = 0.184765\nI1206 20:06:35.736989 28553 solver.cpp:404]     Test net output #1: loss = 14.5604 (* 1 = 14.5604 loss)\nI1206 20:06:39.774653 28553 solver.cpp:228] Iteration 6900, loss = 16.0615\nI1206 20:06:39.774708 28553 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1206 20:06:39.774736 28553 solver.cpp:244]     Train net output #1: loss = 16.0615 (* 1 = 16.0615 loss)\nI1206 20:06:39.921046 28553 sgd_solver.cpp:166] Iteration 6900, lr = 1.035\nI1206 20:13:27.877959 28553 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1206 20:16:10.515596 28553 solver.cpp:404]     Test net output #0: accuracy = 0.210706\nI1206 20:16:10.515913 28553 solver.cpp:404]     Test net output #1: loss = 6.14641 (* 1 = 6.14641 loss)\nI1206 20:16:14.553477 28553 solver.cpp:228] Iteration 7000, loss = 7.35378\nI1206 20:16:14.553515 28553 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1206 20:16:14.553532 28553 solver.cpp:244]     Train net output #1: loss = 7.35379 (* 1 = 7.35379 loss)\nI1206 20:16:14.627991 28553 sgd_solver.cpp:166] Iteration 7000, lr = 1.05\nI1206 20:23:02.602407 28553 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1206 20:25:45.217500 28553 solver.cpp:404]     Test net output #0: accuracy = 0.139824\nI1206 20:25:45.217811 28553 solver.cpp:404]     Test net output #1: loss = 9.70508 (* 1 = 9.70508 loss)\nI1206 20:25:49.251794 28553 solver.cpp:228] Iteration 7100, loss = 9.6637\nI1206 20:25:49.251832 28553 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1206 20:25:49.251848 28553 solver.cpp:244]     Train net output #1: loss = 9.6637 (* 1 = 9.6637 loss)\nI1206 20:25:49.331467 28553 sgd_solver.cpp:166] Iteration 7100, lr = 1.065\nI1206 20:32:37.446576 28553 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1206 20:35:20.045511 28553 solver.cpp:404]     Test net output #0: accuracy = 0.19\nI1206 20:35:20.045805 28553 solver.cpp:404]     Test net output #1: loss = 12.5237 (* 1 = 12.5237 loss)\nI1206 20:35:24.080417 28553 solver.cpp:228] Iteration 7200, loss = 13.561\nI1206 20:35:24.080456 28553 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1206 20:35:24.080472 28553 solver.cpp:244]     Train net output #1: loss = 13.561 (* 1 = 13.561 loss)\nI1206 20:35:24.149245 28553 sgd_solver.cpp:166] Iteration 7200, lr = 1.08\nI1206 20:42:12.184532 28553 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1206 20:44:54.779453 28553 solver.cpp:404]     Test net output #0: accuracy = 0.167882\nI1206 20:44:54.779757 28553 solver.cpp:404]     Test net output #1: loss = 10.7489 (* 1 = 10.7489 loss)\nI1206 20:44:58.816229 28553 solver.cpp:228] Iteration 7300, loss = 11.223\nI1206 20:44:58.816268 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 20:44:58.816285 28553 solver.cpp:244]     Train net output #1: loss = 11.223 (* 1 = 11.223 loss)\nI1206 20:44:58.891770 28553 sgd_solver.cpp:166] Iteration 7300, lr = 1.095\nI1206 20:51:47.086442 28553 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1206 20:54:29.572518 28553 solver.cpp:404]     Test net output #0: accuracy = 0.212647\nI1206 20:54:29.572827 28553 solver.cpp:404]     Test net output #1: loss = 10.9182 (* 1 = 10.9182 loss)\nI1206 20:54:33.607713 28553 solver.cpp:228] Iteration 7400, loss = 11.4345\nI1206 20:54:33.607750 28553 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 20:54:33.607767 28553 solver.cpp:244]     Train net output #1: loss = 11.4345 (* 1 = 11.4345 loss)\nI1206 20:54:33.678427 28553 sgd_solver.cpp:166] Iteration 7400, lr = 1.11\nI1206 21:01:21.778322 28553 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1206 21:04:04.301801 28553 solver.cpp:404]     Test net output #0: accuracy = 0.214471\nI1206 21:04:04.302083 28553 solver.cpp:404]     Test net output #1: loss = 8.77901 (* 1 = 8.77901 loss)\nI1206 21:04:08.336338 28553 solver.cpp:228] Iteration 7500, loss = 7.38219\nI1206 21:04:08.336375 28553 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 21:04:08.336391 28553 solver.cpp:244]     Train net output #1: loss = 7.38219 (* 1 = 7.38219 loss)\nI1206 21:04:08.405536 28553 sgd_solver.cpp:166] Iteration 7500, lr = 1.125\nI1206 21:10:56.267763 28553 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1206 21:13:38.800149 28553 solver.cpp:404]     Test net output #0: accuracy = 0.141353\nI1206 21:13:38.800432 28553 solver.cpp:404]     Test net output #1: loss = 15.1129 (* 1 = 15.1129 loss)\nI1206 21:13:42.834888 28553 solver.cpp:228] Iteration 7600, loss = 17.089\nI1206 21:13:42.834926 28553 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1206 21:13:42.834944 28553 solver.cpp:244]     Train net output #1: loss = 17.089 (* 1 = 17.089 loss)\nI1206 21:13:42.908728 28553 sgd_solver.cpp:166] Iteration 7600, lr = 1.14\nI1206 21:20:30.962574 28553 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1206 21:23:13.470619 28553 solver.cpp:404]     Test net output #0: accuracy = 0.223471\nI1206 21:23:13.470922 28553 solver.cpp:404]     Test net output #1: loss = 7.43273 (* 1 = 7.43273 loss)\nI1206 21:23:17.504866 28553 solver.cpp:228] Iteration 7700, loss = 8.42106\nI1206 21:23:17.504902 28553 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 21:23:17.504918 28553 solver.cpp:244]     Train net output #1: loss = 8.42106 (* 1 = 8.42106 loss)\nI1206 21:23:17.579731 28553 sgd_solver.cpp:166] Iteration 7700, lr = 1.155\nI1206 21:30:05.704392 28553 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1206 21:32:48.165488 28553 solver.cpp:404]     Test net output #0: accuracy = 0.197059\nI1206 21:32:48.165853 28553 solver.cpp:404]     Test net output #1: loss = 11.3025 (* 1 = 11.3025 loss)\nI1206 21:32:52.201787 28553 solver.cpp:228] Iteration 7800, loss = 10.1646\nI1206 21:32:52.201828 28553 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1206 21:32:52.201844 28553 solver.cpp:244]     Train net output #1: loss = 10.1646 (* 1 = 10.1646 loss)\nI1206 21:32:52.273809 28553 sgd_solver.cpp:166] Iteration 7800, lr = 1.17\nI1206 21:39:40.188802 28553 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1206 21:42:22.641964 28553 solver.cpp:404]     Test net output #0: accuracy = 0.150118\nI1206 21:42:22.642204 28553 solver.cpp:404]     Test net output #1: loss = 22.5667 (* 1 = 22.5667 loss)\nI1206 21:42:26.677558 28553 solver.cpp:228] Iteration 7900, loss = 21.4098\nI1206 21:42:26.677593 28553 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 21:42:26.677609 28553 solver.cpp:244]     Train net output #1: loss = 21.4098 (* 1 = 21.4098 loss)\nI1206 21:42:26.754151 28553 sgd_solver.cpp:166] Iteration 7900, lr = 1.185\nI1206 21:49:14.644271 28553 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1206 21:51:57.129216 28553 solver.cpp:404]     Test net output #0: accuracy = 0.149529\nI1206 21:51:57.129513 28553 solver.cpp:404]     Test net output #1: loss = 21.8588 (* 1 = 21.8588 loss)\nI1206 21:52:01.163425 28553 solver.cpp:228] Iteration 8000, loss = 21.0519\nI1206 21:52:01.163460 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 21:52:01.163476 28553 solver.cpp:244]     Train net output #1: loss = 21.0519 (* 1 = 21.0519 loss)\nI1206 21:52:01.233186 28553 sgd_solver.cpp:166] Iteration 8000, lr = 1.2\nI1206 21:58:49.100332 28553 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1206 22:01:31.616163 28553 solver.cpp:404]     Test net output #0: accuracy = 0.145294\nI1206 22:01:31.616463 28553 solver.cpp:404]     Test net output #1: loss = 17.6587 (* 1 = 17.6587 loss)\nI1206 22:01:35.652334 28553 solver.cpp:228] Iteration 8100, loss = 17.5769\nI1206 22:01:35.652369 28553 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 22:01:35.652385 28553 solver.cpp:244]     Train net output #1: loss = 17.5769 (* 1 = 17.5769 loss)\nI1206 22:01:35.726366 28553 sgd_solver.cpp:166] Iteration 8100, lr = 1.215\nI1206 22:08:23.873893 28553 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1206 22:11:06.409869 28553 solver.cpp:404]     Test net output #0: accuracy = 0.167059\nI1206 22:11:06.410166 28553 solver.cpp:404]     Test net output #1: loss = 14.3262 (* 1 = 14.3262 loss)\nI1206 22:11:10.445467 28553 solver.cpp:228] Iteration 8200, loss = 15.9711\nI1206 22:11:10.445502 28553 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1206 22:11:10.445518 28553 solver.cpp:244]     Train net output #1: loss = 15.9711 (* 1 = 15.9711 loss)\nI1206 22:11:10.514987 28553 sgd_solver.cpp:166] Iteration 8200, lr = 1.23\nI1206 22:17:58.445819 28553 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1206 22:20:40.971164 28553 solver.cpp:404]     Test net output #0: accuracy = 0.264941\nI1206 22:20:40.971462 28553 solver.cpp:404]     Test net output #1: loss = 5.91328 (* 1 = 5.91328 loss)\nI1206 22:20:45.005744 28553 solver.cpp:228] Iteration 8300, loss = 5.69437\nI1206 22:20:45.005780 28553 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1206 22:20:45.005796 28553 solver.cpp:244]     Train net output #1: loss = 5.69437 (* 1 = 5.69437 loss)\nI1206 22:20:45.075886 28553 sgd_solver.cpp:166] Iteration 8300, lr = 1.245\nI1206 22:27:33.113651 28553 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1206 22:30:15.635237 28553 solver.cpp:404]     Test net output #0: accuracy = 0.249765\nI1206 22:30:15.635504 28553 solver.cpp:404]     Test net output #1: loss = 6.99495 (* 1 = 6.99495 loss)\nI1206 22:30:19.672122 28553 solver.cpp:228] Iteration 8400, loss = 5.7353\nI1206 22:30:19.672154 28553 solver.cpp:244]     Train net output #0: accuracy = 0.305882\nI1206 22:30:19.672170 28553 solver.cpp:244]     Train net output #1: loss = 5.7353 (* 1 = 5.7353 loss)\nI1206 22:30:19.741413 28553 sgd_solver.cpp:166] Iteration 8400, lr = 1.26\nI1206 22:37:07.720247 28553 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1206 22:39:50.294582 28553 solver.cpp:404]     Test net output #0: accuracy = 0.182765\nI1206 22:39:50.294879 28553 solver.cpp:404]     Test net output #1: loss = 10.74 (* 1 = 10.74 loss)\nI1206 22:39:54.329682 28553 solver.cpp:228] Iteration 8500, loss = 11.0131\nI1206 22:39:54.329715 28553 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 22:39:54.329731 28553 solver.cpp:244]     Train net output #1: loss = 11.0131 (* 1 = 11.0131 loss)\nI1206 22:39:54.400068 28553 sgd_solver.cpp:166] Iteration 8500, lr = 1.275\nI1206 22:46:42.203044 28553 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1206 22:49:24.784859 28553 solver.cpp:404]     Test net output #0: accuracy = 0.205706\nI1206 22:49:24.785162 28553 solver.cpp:404]     Test net output #1: loss = 23.6662 (* 1 = 23.6662 loss)\nI1206 22:49:28.818341 28553 solver.cpp:228] Iteration 8600, loss = 25.9246\nI1206 22:49:28.818374 28553 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 22:49:28.818392 28553 solver.cpp:244]     Train net output #1: loss = 25.9246 (* 1 = 25.9246 loss)\nI1206 22:49:28.899293 28553 sgd_solver.cpp:166] Iteration 8600, lr = 1.29\nI1206 22:56:17.965873 28553 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1206 22:59:00.503442 28553 solver.cpp:404]     Test net output #0: accuracy = 0.206177\nI1206 22:59:00.503743 28553 solver.cpp:404]     Test net output #1: loss = 12.4935 (* 1 = 12.4935 loss)\nI1206 22:59:04.537055 28553 solver.cpp:228] Iteration 8700, loss = 14.016\nI1206 22:59:04.537088 28553 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 22:59:04.537103 28553 solver.cpp:244]     Train net output #1: loss = 14.0161 (* 1 = 14.0161 loss)\nI1206 22:59:04.641232 28553 sgd_solver.cpp:166] Iteration 8700, lr = 1.305\nI1206 23:05:54.360834 28553 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1206 23:08:36.898375 28553 solver.cpp:404]     Test net output #0: accuracy = 0.156\nI1206 23:08:36.898669 28553 solver.cpp:404]     Test net output #1: loss = 14.1897 (* 1 = 14.1897 loss)\nI1206 23:08:40.934922 28553 solver.cpp:228] Iteration 8800, loss = 14.0112\nI1206 23:08:40.934957 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 23:08:40.934973 28553 solver.cpp:244]     Train net output #1: loss = 14.0112 (* 1 = 14.0112 loss)\nI1206 23:08:41.039477 28553 sgd_solver.cpp:166] Iteration 8800, lr = 1.32\nI1206 23:15:30.990459 28553 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1206 23:18:13.498375 28553 solver.cpp:404]     Test net output #0: accuracy = 0.201294\nI1206 23:18:13.498661 28553 solver.cpp:404]     Test net output #1: loss = 12.8578 (* 1 = 12.8578 loss)\nI1206 23:18:17.532490 28553 solver.cpp:228] Iteration 8900, loss = 14.0857\nI1206 23:18:17.532524 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 23:18:17.532541 28553 solver.cpp:244]     Train net output #1: loss = 14.0857 (* 1 = 14.0857 loss)\nI1206 23:18:17.713851 28553 sgd_solver.cpp:166] Iteration 8900, lr = 1.335\nI1206 23:25:07.620642 28553 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1206 23:27:50.136965 28553 solver.cpp:404]     Test net output #0: accuracy = 0.159\nI1206 23:27:50.137274 28553 solver.cpp:404]     Test net output #1: loss = 12.8145 (* 1 = 12.8145 loss)\nI1206 23:27:54.171551 28553 solver.cpp:228] Iteration 9000, loss = 11.4432\nI1206 23:27:54.171589 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 23:27:54.171607 28553 solver.cpp:244]     Train net output #1: loss = 11.4432 (* 1 = 11.4432 loss)\nI1206 23:27:54.279952 28553 sgd_solver.cpp:166] Iteration 9000, lr = 1.35\nI1206 23:34:44.471940 28553 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1206 23:37:27.026765 28553 solver.cpp:404]     Test net output #0: accuracy = 0.189118\nI1206 23:37:27.027050 28553 solver.cpp:404]     Test net output #1: loss = 12.2223 (* 1 = 12.2223 loss)\nI1206 23:37:31.061307 28553 solver.cpp:228] Iteration 9100, loss = 10.4893\nI1206 23:37:31.061343 28553 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 23:37:31.061359 28553 solver.cpp:244]     Train net output #1: loss = 10.4893 (* 1 = 10.4893 loss)\nI1206 23:37:31.168010 28553 sgd_solver.cpp:166] Iteration 9100, lr = 1.365\nI1206 23:44:20.848590 28553 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1206 23:47:03.419939 28553 solver.cpp:404]     Test net output #0: accuracy = 0.129235\nI1206 23:47:03.420238 28553 solver.cpp:404]     Test net output #1: loss = 15.4517 (* 1 = 15.4517 loss)\nI1206 23:47:07.455670 28553 solver.cpp:228] Iteration 9200, loss = 15.2103\nI1206 23:47:07.455708 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 23:47:07.455724 28553 solver.cpp:244]     Train net output #1: loss = 15.2103 (* 1 = 15.2103 loss)\nI1206 23:47:07.558907 28553 sgd_solver.cpp:166] Iteration 9200, lr = 1.38\nI1206 23:53:57.496244 28553 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1206 23:56:40.044747 28553 solver.cpp:404]     Test net output #0: accuracy = 0.188412\nI1206 23:56:40.045055 28553 solver.cpp:404]     Test net output #1: loss = 17.9405 (* 1 = 17.9405 loss)\nI1206 23:56:44.081961 28553 solver.cpp:228] Iteration 9300, loss = 15.0996\nI1206 23:56:44.081997 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 23:56:44.082015 28553 solver.cpp:244]     Train net output #1: loss = 15.0996 (* 1 = 15.0996 loss)\nI1206 23:56:44.187237 28553 sgd_solver.cpp:166] Iteration 9300, lr = 1.395\nI1207 00:03:34.144445 28553 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1207 00:06:16.697517 28553 solver.cpp:404]     Test net output #0: accuracy = 0.157941\nI1207 00:06:16.697804 28553 solver.cpp:404]     Test net output #1: loss = 11.3582 (* 1 = 11.3582 loss)\nI1207 00:06:20.731715 28553 solver.cpp:228] Iteration 9400, loss = 13.0679\nI1207 00:06:20.731752 28553 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 00:06:20.731770 28553 solver.cpp:244]     Train net output #1: loss = 13.0679 (* 1 = 13.0679 loss)\nI1207 00:06:20.836316 28553 sgd_solver.cpp:166] Iteration 9400, lr = 1.41\nI1207 00:13:10.931524 28553 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1207 00:15:53.446291 28553 solver.cpp:404]     Test net output #0: accuracy = 0.187412\nI1207 00:15:53.446609 28553 solver.cpp:404]     Test net output #1: loss = 17.5821 (* 1 = 17.5821 loss)\nI1207 00:15:57.480985 28553 solver.cpp:228] Iteration 9500, loss = 20.3237\nI1207 00:15:57.481024 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 00:15:57.481040 28553 solver.cpp:244]     Train net output #1: loss = 20.3237 (* 1 = 20.3237 loss)\nI1207 00:15:57.587052 28553 sgd_solver.cpp:166] Iteration 9500, lr = 1.425\nI1207 00:22:47.333034 28553 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1207 00:25:29.901976 28553 solver.cpp:404]     Test net output #0: accuracy = 0.215235\nI1207 00:25:29.902261 28553 solver.cpp:404]     Test net output #1: loss = 8.64859 (* 1 = 8.64859 loss)\nI1207 00:25:33.938293 28553 solver.cpp:228] Iteration 9600, loss = 8.53451\nI1207 00:25:33.938328 28553 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 00:25:33.938344 28553 solver.cpp:244]     Train net output #1: loss = 8.53451 (* 1 = 8.53451 loss)\nI1207 00:25:34.036455 28553 sgd_solver.cpp:166] Iteration 9600, lr = 1.44\nI1207 00:32:23.765669 28553 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1207 00:35:06.306217 28553 solver.cpp:404]     Test net output #0: accuracy = 0.12053\nI1207 00:35:06.306529 28553 solver.cpp:404]     Test net output #1: loss = 21.4968 (* 1 = 21.4968 loss)\nI1207 00:35:10.341528 28553 solver.cpp:228] Iteration 9700, loss = 23.5091\nI1207 00:35:10.341565 28553 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 00:35:10.341581 28553 solver.cpp:244]     Train net output #1: loss = 23.5091 (* 1 = 23.5091 loss)\nI1207 00:35:10.444144 28553 sgd_solver.cpp:166] Iteration 9700, lr = 1.455\nI1207 00:42:00.370772 28553 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1207 00:44:39.882954 28553 solver.cpp:404]     Test net output #0: accuracy = 0.205059\nI1207 00:44:39.883224 28553 solver.cpp:404]     Test net output #1: loss = 8.52191 (* 1 = 8.52191 loss)\nI1207 00:44:43.917275 28553 solver.cpp:228] Iteration 9800, loss = 9.29096\nI1207 00:44:43.917313 28553 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 00:44:43.917330 28553 solver.cpp:244]     Train net output #1: loss = 9.29096 (* 1 = 9.29096 loss)\nI1207 00:44:44.067728 28553 sgd_solver.cpp:166] Iteration 9800, lr = 1.47\nI1207 00:51:35.112128 28553 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1207 00:54:15.601593 28553 solver.cpp:404]     Test net output #0: accuracy = 0.219177\nI1207 00:54:15.601896 28553 solver.cpp:404]     Test net output #1: loss = 14.8115 (* 1 = 14.8115 loss)\nI1207 00:54:19.635460 28553 solver.cpp:228] Iteration 9900, loss = 18.2153\nI1207 00:54:19.635499 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 00:54:19.635514 28553 solver.cpp:244]     Train net output #1: loss = 18.2153 (* 1 = 18.2153 loss)\nI1207 00:54:19.742079 28553 sgd_solver.cpp:166] Iteration 9900, lr = 1.485\nI1207 01:01:10.497768 28553 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1207 01:03:53.003388 28553 solver.cpp:404]     Test net output #0: accuracy = 0.171706\nI1207 01:03:53.003684 28553 solver.cpp:404]     Test net output #1: loss = 17.4187 (* 1 = 17.4187 loss)\nI1207 01:03:57.038513 28553 solver.cpp:228] Iteration 10000, loss = 21.3335\nI1207 01:03:57.038552 28553 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 01:03:57.038568 28553 solver.cpp:244]     Train net output #1: loss = 21.3336 (* 1 = 21.3336 loss)\nI1207 01:03:57.143079 28553 sgd_solver.cpp:166] Iteration 10000, lr = 1.5\nI1207 01:10:48.034988 28553 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1207 01:13:30.536937 28553 solver.cpp:404]     Test net output #0: accuracy = 0.169941\nI1207 01:13:30.537220 28553 solver.cpp:404]     Test net output #1: loss = 19.5151 (* 1 = 19.5151 loss)\nI1207 01:13:34.570022 28553 solver.cpp:228] Iteration 10100, loss = 15.4662\nI1207 01:13:34.570060 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 01:13:34.570076 28553 solver.cpp:244]     Train net output #1: loss = 15.4662 (* 1 = 15.4662 loss)\nI1207 01:13:34.679167 28553 sgd_solver.cpp:166] Iteration 10100, lr = 1.515\nI1207 01:20:25.606566 28553 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1207 01:23:08.107648 28553 solver.cpp:404]     Test net output #0: accuracy = 0.221765\nI1207 01:23:08.107951 28553 solver.cpp:404]     Test net output #1: loss = 16.0454 (* 1 = 16.0454 loss)\nI1207 01:23:12.141239 28553 solver.cpp:228] Iteration 10200, loss = 13.472\nI1207 01:23:12.141278 28553 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1207 01:23:12.141294 28553 solver.cpp:244]     Train net output #1: loss = 13.472 (* 1 = 13.472 loss)\nI1207 01:23:12.250543 28553 sgd_solver.cpp:166] Iteration 10200, lr = 1.53\nI1207 01:30:03.236785 28553 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1207 01:32:45.726332 28553 solver.cpp:404]     Test net output #0: accuracy = 0.103118\nI1207 01:32:45.726625 28553 solver.cpp:404]     Test net output #1: loss = 23.4233 (* 1 = 23.4233 loss)\nI1207 01:32:49.759980 28553 solver.cpp:228] Iteration 10300, loss = 22.2966\nI1207 01:32:49.760017 28553 solver.cpp:244]     Train net output #0: accuracy = 0.0823529\nI1207 01:32:49.760035 28553 solver.cpp:244]     Train net output #1: loss = 22.2966 (* 1 = 22.2966 loss)\nI1207 01:32:49.863508 28553 sgd_solver.cpp:166] Iteration 10300, lr = 1.545\nI1207 01:39:40.594944 28553 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1207 01:42:23.104882 28553 solver.cpp:404]     Test net output #0: accuracy = 0.179471\nI1207 01:42:23.105173 28553 solver.cpp:404]     Test net output #1: loss = 19.0782 (* 1 = 19.0782 loss)\nI1207 01:42:27.139858 28553 solver.cpp:228] Iteration 10400, loss = 20.3485\nI1207 01:42:27.139891 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 01:42:27.139907 28553 solver.cpp:244]     Train net output #1: loss = 20.3485 (* 1 = 20.3485 loss)\nI1207 01:42:27.245180 28553 sgd_solver.cpp:166] Iteration 10400, lr = 1.56\nI1207 01:49:18.156066 28553 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1207 01:52:00.679435 28553 solver.cpp:404]     Test net output #0: accuracy = 0.151824\nI1207 01:52:00.679741 28553 solver.cpp:404]     Test net output #1: loss = 24.0568 (* 1 = 24.0568 loss)\nI1207 01:52:04.715317 28553 solver.cpp:228] Iteration 10500, loss = 21.8537\nI1207 01:52:04.715359 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 01:52:04.715376 28553 solver.cpp:244]     Train net output #1: loss = 21.8537 (* 1 = 21.8537 loss)\nI1207 01:52:04.821774 28553 sgd_solver.cpp:166] Iteration 10500, lr = 1.575\nI1207 01:58:55.803987 28553 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1207 02:01:38.338588 28553 solver.cpp:404]     Test net output #0: accuracy = 0.17553\nI1207 02:01:38.338892 28553 solver.cpp:404]     Test net output #1: loss = 15.4075 (* 1 = 15.4075 loss)\nI1207 02:01:42.375387 28553 solver.cpp:228] Iteration 10600, loss = 17.2769\nI1207 02:01:42.375423 28553 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 02:01:42.375440 28553 solver.cpp:244]     Train net output #1: loss = 17.2769 (* 1 = 17.2769 loss)\nI1207 02:01:42.482075 28553 sgd_solver.cpp:166] Iteration 10600, lr = 1.59\nI1207 02:08:33.616444 28553 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1207 02:11:16.107297 28553 solver.cpp:404]     Test net output #0: accuracy = 0.164471\nI1207 02:11:16.107575 28553 solver.cpp:404]     Test net output #1: loss = 20.7522 (* 1 = 20.7522 loss)\nI1207 02:11:20.141683 28553 solver.cpp:228] Iteration 10700, loss = 19.55\nI1207 02:11:20.141718 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 02:11:20.141734 28553 solver.cpp:244]     Train net output #1: loss = 19.55 (* 1 = 19.55 loss)\nI1207 02:11:20.251579 28553 sgd_solver.cpp:166] Iteration 10700, lr = 1.605\nI1207 02:18:11.185537 28553 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1207 02:20:53.648090 28553 solver.cpp:404]     Test net output #0: accuracy = 0.129765\nI1207 02:20:53.648386 28553 solver.cpp:404]     Test net output #1: loss = 29.9178 (* 1 = 29.9178 loss)\nI1207 02:20:57.682929 28553 solver.cpp:228] Iteration 10800, loss = 32.817\nI1207 02:20:57.682962 28553 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1207 02:20:57.682978 28553 solver.cpp:244]     Train net output #1: loss = 32.817 (* 1 = 32.817 loss)\nI1207 02:20:57.791965 28553 sgd_solver.cpp:166] Iteration 10800, lr = 1.62\nI1207 02:27:48.702169 28553 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1207 02:30:31.197952 28553 solver.cpp:404]     Test net output #0: accuracy = 0.138706\nI1207 02:30:31.198253 28553 solver.cpp:404]     Test net output #1: loss = 26.5049 (* 1 = 26.5049 loss)\nI1207 02:30:35.233024 28553 solver.cpp:228] Iteration 10900, loss = 26.4111\nI1207 02:30:35.233057 28553 solver.cpp:244]     Train net output #0: accuracy = 0.0705882\nI1207 02:30:35.233074 28553 solver.cpp:244]     Train net output #1: loss = 26.4111 (* 1 = 26.4111 loss)\nI1207 02:30:35.339355 28553 sgd_solver.cpp:166] Iteration 10900, lr = 1.635\nI1207 02:37:26.248630 28553 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1207 02:40:08.765540 28553 solver.cpp:404]     Test net output #0: accuracy = 0.199235\nI1207 02:40:08.765841 28553 solver.cpp:404]     Test net output #1: loss = 15.6814 (* 1 = 15.6814 loss)\nI1207 02:40:12.799785 28553 solver.cpp:228] Iteration 11000, loss = 15.7545\nI1207 02:40:12.799820 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 02:40:12.799836 28553 solver.cpp:244]     Train net output #1: loss = 15.7545 (* 1 = 15.7545 loss)\nI1207 02:40:12.903626 28553 sgd_solver.cpp:166] Iteration 11000, lr = 1.65\nI1207 02:47:03.730950 28553 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1207 02:49:46.266507 28553 solver.cpp:404]     Test net output #0: accuracy = 0.232353\nI1207 02:49:46.266808 28553 solver.cpp:404]     Test net output #1: loss = 17.3865 (* 1 = 17.3865 loss)\nI1207 02:49:50.302062 28553 solver.cpp:228] Iteration 11100, loss = 15.8404\nI1207 02:49:50.302096 28553 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1207 02:49:50.302112 28553 solver.cpp:244]     Train net output #1: loss = 15.8404 (* 1 = 15.8404 loss)\nI1207 02:49:50.409292 28553 sgd_solver.cpp:166] Iteration 11100, lr = 1.665\nI1207 02:56:41.351210 28553 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1207 02:59:23.966584 28553 solver.cpp:404]     Test net output #0: accuracy = 0.151529\nI1207 02:59:23.966912 28553 solver.cpp:404]     Test net output #1: loss = 24.5957 (* 1 = 24.5957 loss)\nI1207 02:59:28.002871 28553 solver.cpp:228] Iteration 11200, loss = 23.6305\nI1207 02:59:28.002903 28553 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 02:59:28.002920 28553 solver.cpp:244]     Train net output #1: loss = 23.6305 (* 1 = 23.6305 loss)\nI1207 02:59:28.107815 28553 sgd_solver.cpp:166] Iteration 11200, lr = 1.68\nI1207 03:06:19.103026 28553 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1207 03:09:01.713613 28553 solver.cpp:404]     Test net output #0: accuracy = 0.175765\nI1207 03:09:01.713901 28553 solver.cpp:404]     Test net output #1: loss = 17.6601 (* 1 = 17.6601 loss)\nI1207 03:09:05.750506 28553 solver.cpp:228] Iteration 11300, loss = 17.446\nI1207 03:09:05.750540 28553 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 03:09:05.750556 28553 solver.cpp:244]     Train net output #1: loss = 17.446 (* 1 = 17.446 loss)\nI1207 03:09:05.848402 28553 sgd_solver.cpp:166] Iteration 11300, lr = 1.695\nI1207 03:15:56.574779 28553 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1207 03:18:39.305788 28553 solver.cpp:404]     Test net output #0: accuracy = 0.164177\nI1207 03:18:39.306073 28553 solver.cpp:404]     Test net output #1: loss = 15.1862 (* 1 = 15.1862 loss)\nI1207 03:18:43.342411 28553 solver.cpp:228] Iteration 11400, loss = 13.2035\nI1207 03:18:43.342447 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 03:18:43.342463 28553 solver.cpp:244]     Train net output #1: loss = 13.2035 (* 1 = 13.2035 loss)\nI1207 03:18:43.440266 28553 sgd_solver.cpp:166] Iteration 11400, lr = 1.71\nI1207 03:25:34.200273 28553 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1207 03:28:16.904124 28553 solver.cpp:404]     Test net output #0: accuracy = 0.220706\nI1207 03:28:16.904414 28553 solver.cpp:404]     Test net output #1: loss = 18.2425 (* 1 = 18.2425 loss)\nI1207 03:28:20.939997 28553 solver.cpp:228] Iteration 11500, loss = 14.9788\nI1207 03:28:20.940032 28553 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 03:28:20.940047 28553 solver.cpp:244]     Train net output #1: loss = 14.9788 (* 1 = 14.9788 loss)\nI1207 03:28:21.038954 28553 sgd_solver.cpp:166] Iteration 11500, lr = 1.725\nI1207 03:35:11.846308 28553 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1207 03:37:54.568131 28553 solver.cpp:404]     Test net output #0: accuracy = 0.175882\nI1207 03:37:54.568442 28553 solver.cpp:404]     Test net output #1: loss = 24.0036 (* 1 = 24.0036 loss)\nI1207 03:37:58.605692 28553 solver.cpp:228] Iteration 11600, loss = 23.001\nI1207 03:37:58.605728 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 03:37:58.605744 28553 solver.cpp:244]     Train net output #1: loss = 23.001 (* 1 = 23.001 loss)\nI1207 03:37:58.708389 28553 sgd_solver.cpp:166] Iteration 11600, lr = 1.74\nI1207 03:44:49.690153 28553 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1207 03:47:32.373100 28553 solver.cpp:404]     Test net output #0: accuracy = 0.186941\nI1207 03:47:32.373386 28553 solver.cpp:404]     Test net output #1: loss = 14.7206 (* 1 = 14.7206 loss)\nI1207 03:47:36.409726 28553 solver.cpp:228] Iteration 11700, loss = 15.4387\nI1207 03:47:36.409763 28553 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 03:47:36.409780 28553 solver.cpp:244]     Train net output #1: loss = 15.4387 (* 1 = 15.4387 loss)\nI1207 03:47:36.509462 28553 sgd_solver.cpp:166] Iteration 11700, lr = 1.755\nI1207 03:54:27.396309 28553 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1207 03:57:10.091425 28553 solver.cpp:404]     Test net output #0: accuracy = 0.173353\nI1207 03:57:10.091730 28553 solver.cpp:404]     Test net output #1: loss = 27.1074 (* 1 = 27.1074 loss)\nI1207 03:57:14.128729 28553 solver.cpp:228] Iteration 11800, loss = 25.832\nI1207 03:57:14.128765 28553 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 03:57:14.128782 28553 solver.cpp:244]     Train net output #1: loss = 25.832 (* 1 = 25.832 loss)\nI1207 03:57:14.226956 28553 sgd_solver.cpp:166] Iteration 11800, lr = 1.77\nI1207 04:04:04.957383 28553 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1207 04:06:47.669920 28553 solver.cpp:404]     Test net output #0: accuracy = 0.149529\nI1207 04:06:47.670233 28553 solver.cpp:404]     Test net output #1: loss = 21.6364 (* 1 = 21.6364 loss)\nI1207 04:06:51.705893 28553 solver.cpp:228] Iteration 11900, loss = 24.3277\nI1207 04:06:51.705929 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 04:06:51.705945 28553 solver.cpp:244]     Train net output #1: loss = 24.3277 (* 1 = 24.3277 loss)\nI1207 04:06:51.812443 28553 sgd_solver.cpp:166] Iteration 11900, lr = 1.785\nI1207 04:13:42.745110 28553 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1207 04:16:25.443203 28553 solver.cpp:404]     Test net output #0: accuracy = 0.135235\nI1207 04:16:25.443509 28553 solver.cpp:404]     Test net output #1: loss = 45.9344 (* 1 = 45.9344 loss)\nI1207 04:16:29.479638 28553 solver.cpp:228] Iteration 12000, loss = 44.2155\nI1207 04:16:29.479678 28553 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 04:16:29.479696 28553 solver.cpp:244]     Train net output #1: loss = 44.2155 (* 1 = 44.2155 loss)\nI1207 04:16:29.582473 28553 sgd_solver.cpp:166] Iteration 12000, lr = 1.8\nI1207 04:23:20.528985 28553 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1207 04:26:03.212551 28553 solver.cpp:404]     Test net output #0: accuracy = 0.192471\nI1207 04:26:03.212842 28553 solver.cpp:404]     Test net output #1: loss = 27.909 (* 1 = 27.909 loss)\nI1207 04:26:07.249354 28553 solver.cpp:228] Iteration 12100, loss = 25.6379\nI1207 04:26:07.249395 28553 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 04:26:07.249411 28553 solver.cpp:244]     Train net output #1: loss = 25.6379 (* 1 = 25.6379 loss)\nI1207 04:26:07.351305 28553 sgd_solver.cpp:166] Iteration 12100, lr = 1.815\nI1207 04:32:58.424337 28553 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1207 04:35:41.156580 28553 solver.cpp:404]     Test net output #0: accuracy = 0.180294\nI1207 04:35:41.156873 28553 solver.cpp:404]     Test net output #1: loss = 15.1238 (* 1 = 15.1238 loss)\nI1207 04:35:45.194108 28553 solver.cpp:228] Iteration 12200, loss = 16.8196\nI1207 04:35:45.194147 28553 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1207 04:35:45.194164 28553 solver.cpp:244]     Train net output #1: loss = 16.8196 (* 1 = 16.8196 loss)\nI1207 04:35:45.291417 28553 sgd_solver.cpp:166] Iteration 12200, lr = 1.83\nI1207 04:42:36.049602 28553 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1207 04:45:18.830655 28553 solver.cpp:404]     Test net output #0: accuracy = 0.166059\nI1207 04:45:18.830970 28553 solver.cpp:404]     Test net output #1: loss = 26.3825 (* 1 = 26.3825 loss)\nI1207 04:45:22.867802 28553 solver.cpp:228] Iteration 12300, loss = 26.0079\nI1207 04:45:22.867843 28553 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 04:45:22.867861 28553 solver.cpp:244]     Train net output #1: loss = 26.0079 (* 1 = 26.0079 loss)\nI1207 04:45:22.955932 28553 sgd_solver.cpp:166] Iteration 12300, lr = 1.845\nI1207 04:52:12.344214 28553 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1207 04:54:55.144250 28553 solver.cpp:404]     Test net output #0: accuracy = 0.171706\nI1207 04:54:55.144562 28553 solver.cpp:404]     Test net output #1: loss = 24.8711 (* 1 = 24.8711 loss)\nI1207 04:54:59.178701 28553 solver.cpp:228] Iteration 12400, loss = 19.9918\nI1207 04:54:59.178743 28553 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 04:54:59.178761 28553 solver.cpp:244]     Train net output #1: loss = 19.9918 (* 1 = 19.9918 loss)\nI1207 04:54:59.254042 28553 sgd_solver.cpp:166] Iteration 12400, lr = 1.86\nI1207 05:01:47.865744 28553 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1207 05:04:30.653781 28553 solver.cpp:404]     Test net output #0: accuracy = 0.193588\nI1207 05:04:30.654109 28553 solver.cpp:404]     Test net output #1: loss = 23.6643 (* 1 = 23.6643 loss)\nI1207 05:04:34.689446 28553 solver.cpp:228] Iteration 12500, loss = 20.6535\nI1207 05:04:34.689487 28553 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 05:04:34.689504 28553 solver.cpp:244]     Train net output #1: loss = 20.6535 (* 1 = 20.6535 loss)\nI1207 05:04:34.765363 28553 sgd_solver.cpp:166] Iteration 12500, lr = 1.875\nI1207 05:11:23.358141 28553 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1207 05:14:06.172274 28553 solver.cpp:404]     Test net output #0: accuracy = 0.133882\nI1207 05:14:06.172575 28553 solver.cpp:404]     Test net output #1: loss = 35.1364 (* 1 = 35.1364 loss)\nI1207 05:14:10.208287 28553 solver.cpp:228] Iteration 12600, loss = 34.7426\nI1207 05:14:10.208328 28553 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1207 05:14:10.208345 28553 solver.cpp:244]     Train net output #1: loss = 34.7426 (* 1 = 34.7426 loss)\nI1207 05:14:10.287653 28553 sgd_solver.cpp:166] Iteration 12600, lr = 1.89\nI1207 05:20:59.125895 28553 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1207 05:23:41.852573 28553 solver.cpp:404]     Test net output #0: accuracy = 0.169765\nI1207 05:23:41.852886 28553 solver.cpp:404]     Test net output #1: loss = 31.2586 (* 1 = 31.2586 loss)\nI1207 05:23:45.887009 28553 solver.cpp:228] Iteration 12700, loss = 29.5879\nI1207 05:23:45.887049 28553 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 05:23:45.887068 28553 solver.cpp:244]     Train net output #1: loss = 29.5879 (* 1 = 29.5879 loss)\nI1207 05:23:46.003914 28553 sgd_solver.cpp:166] Iteration 12700, lr = 1.905\nI1207 05:30:34.640947 28553 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1207 05:33:17.362170 28553 solver.cpp:404]     Test net output #0: accuracy = 0.102706\nI1207 05:33:17.362476 28553 solver.cpp:404]     Test net output #1: loss = 47.5376 (* 1 = 47.5376 loss)\nI1207 05:33:21.397840 28553 solver.cpp:228] Iteration 12800, loss = 48.299\nI1207 05:33:21.397881 28553 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 05:33:21.397898 28553 solver.cpp:244]     Train net output #1: loss = 48.299 (* 1 = 48.299 loss)\nI1207 05:33:21.472098 28553 sgd_solver.cpp:166] Iteration 12800, lr = 1.92\n"
  },
  {
    "path": "Results/lrRange3SS520kClip2Fig12a",
    "content": "I1207 05:37:08.502259   369 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1207 05:37:08.504585   369 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1207 05:37:08.506026   369 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1207 05:37:08.507241   369 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1207 05:37:08.508471   369 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1207 05:37:08.510004   369 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1207 05:37:08.511234   369 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1207 05:37:08.512471   369 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1207 05:37:08.513700   369 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1207 05:37:08.948482   369 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/Fig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nclip_gradients: 2\nmax_lr: 3\nI1207 05:37:08.951611   369 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1207 05:37:08.996284   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:08.996351   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:08.997186   369 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1207 05:37:08.998819   369 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3\nI1207 05:37:09.000437   369 layer_factory.hpp:77] Creating layer dataLayer\nI1207 05:37:09.001612   369 net.cpp:100] Creating Layer dataLayer\nI1207 05:37:09.001691   369 net.cpp:408] dataLayer -> data_top\nI1207 05:37:09.001888   369 net.cpp:408] dataLayer -> label\nI1207 05:37:09.002009   369 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1207 05:37:09.025980   377 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1207 05:37:09.030030   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:09.036255   369 net.cpp:150] Setting up dataLayer\nI1207 05:37:09.036315   369 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI1207 05:37:09.036327   369 net.cpp:157] Top shape: 100 (100)\nI1207 05:37:09.036334   369 net.cpp:165] Memory required for data: 1229200\nI1207 05:37:09.036348   369 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1207 05:37:09.036363   369 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1207 05:37:09.036371   369 net.cpp:434] label_dataLayer_1_split <- label\nI1207 05:37:09.036391   369 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1207 05:37:09.036408   369 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1207 05:37:09.036495   369 net.cpp:150] Setting up label_dataLayer_1_split\nI1207 05:37:09.036514   369 net.cpp:157] Top shape: 100 (100)\nI1207 05:37:09.036521   369 net.cpp:157] Top shape: 100 (100)\nI1207 05:37:09.036526   369 net.cpp:165] Memory required for data: 1230000\nI1207 05:37:09.036532   369 layer_factory.hpp:77] Creating layer pre_conv\nI1207 05:37:09.036594   369 net.cpp:100] Creating Layer pre_conv\nI1207 05:37:09.036607   369 net.cpp:434] pre_conv <- data_top\nI1207 05:37:09.036617   369 net.cpp:408] pre_conv -> pre_conv_top\nI1207 05:37:09.038322   369 net.cpp:150] Setting up pre_conv\nI1207 05:37:09.038341   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.038347   369 net.cpp:165] Memory required for data: 7783600\nI1207 05:37:09.038401   369 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1207 05:37:09.038414   369 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1207 05:37:09.038420   369 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1207 05:37:09.038430   369 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1207 05:37:09.038441   369 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1207 05:37:09.038535   369 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1207 05:37:09.038549   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.038556   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.038561   369 net.cpp:165] Memory required for data: 20890800\nI1207 05:37:09.038568   369 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1207 05:37:09.038640   369 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1207 05:37:09.038652   369 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1207 05:37:09.038661   369 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1207 05:37:09.038784   378 blocking_queue.cpp:50] Waiting for data\nI1207 05:37:09.038969   369 net.cpp:150] Setting up L1_b1_brc1_bn\nI1207 05:37:09.038986   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.038992   369 net.cpp:165] Memory required for data: 27444400\nI1207 05:37:09.039010   369 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1207 05:37:09.039057   369 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1207 05:37:09.039067   369 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1207 05:37:09.039075   369 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1207 05:37:09.039086   369 net.cpp:150] Setting up L1_b1_brc1_relu\nI1207 05:37:09.039094   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.039099   369 net.cpp:165] Memory required for data: 33998000\nI1207 05:37:09.039104   369 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1207 05:37:09.039119   369 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1207 05:37:09.039125   369 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1207 05:37:09.039139   369 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1207 05:37:09.039408   369 net.cpp:150] Setting up L1_b1_brc1_conv\nI1207 05:37:09.039423   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.039428   369 net.cpp:165] Memory required for data: 40551600\nI1207 05:37:09.039438   369 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1207 05:37:09.039448   369 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1207 05:37:09.039453   369 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1207 05:37:09.039463   369 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1207 05:37:09.039700   369 net.cpp:150] Setting up L1_b1_brc2_bn\nI1207 05:37:09.039714   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.039721   369 net.cpp:165] Memory required for data: 47105200\nI1207 05:37:09.039734   369 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1207 05:37:09.039747   369 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1207 05:37:09.039753   369 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1207 05:37:09.039762   369 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1207 05:37:09.039772   369 net.cpp:150] Setting up L1_b1_brc2_relu\nI1207 05:37:09.039788   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.039793   369 net.cpp:165] Memory required for data: 53658800\nI1207 05:37:09.039798   369 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1207 05:37:09.039809   369 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1207 05:37:09.039814   369 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1207 05:37:09.039826   369 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1207 05:37:09.040150   369 net.cpp:150] Setting up L1_b1_brc2_conv\nI1207 05:37:09.040165   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.040170   369 net.cpp:165] Memory required for data: 60212400\nI1207 05:37:09.040179   369 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1207 05:37:09.040189   369 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1207 05:37:09.040194   369 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1207 05:37:09.040205   369 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1207 05:37:09.040436   369 net.cpp:150] Setting up L1_b1_brc3_bn\nI1207 05:37:09.040449   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.040454   369 net.cpp:165] Memory required for data: 66766000\nI1207 05:37:09.040472   369 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1207 05:37:09.040484   369 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1207 05:37:09.040490   369 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1207 05:37:09.040498   369 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1207 05:37:09.040508   369 net.cpp:150] Setting up L1_b1_brc3_relu\nI1207 05:37:09.040514   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.040519   369 net.cpp:165] Memory required for data: 73319600\nI1207 05:37:09.040524   369 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1207 05:37:09.040535   369 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1207 05:37:09.040540   369 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1207 05:37:09.040551   369 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1207 05:37:09.040834   369 net.cpp:150] Setting up L1_b1_brc3_conv\nI1207 05:37:09.040848   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.040853   369 net.cpp:165] Memory required for data: 99534000\nI1207 05:37:09.040866   369 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1207 05:37:09.040881   369 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1207 05:37:09.040887   369 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1207 05:37:09.040899   369 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1207 05:37:09.041187   369 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1207 05:37:09.041200   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.041205   369 net.cpp:165] Memory required for data: 125748400\nI1207 05:37:09.041214   369 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1207 05:37:09.041265   369 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1207 05:37:09.041275   369 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1207 05:37:09.041281   369 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1207 05:37:09.041293   369 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1207 05:37:09.041366   369 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1207 05:37:09.041381   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.041386   369 net.cpp:165] Memory required for data: 151962800\nI1207 05:37:09.041393   369 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:37:09.041400   369 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:37:09.041406   369 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1207 05:37:09.041417   369 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1207 05:37:09.041427   369 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1207 05:37:09.041489   369 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:37:09.041503   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.041512   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.041515   369 net.cpp:165] Memory required for data: 204391600\nI1207 05:37:09.041522   369 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1207 05:37:09.041534   369 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1207 05:37:09.041540   369 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1207 05:37:09.041549   369 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1207 05:37:09.041774   369 net.cpp:150] Setting up L1_b2_brc1_bn\nI1207 05:37:09.041788   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.041793   369 net.cpp:165] Memory required for data: 230606000\nI1207 05:37:09.041805   369 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1207 05:37:09.041813   369 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1207 05:37:09.041818   369 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1207 05:37:09.041833   369 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1207 05:37:09.041843   369 net.cpp:150] Setting up L1_b2_brc1_relu\nI1207 05:37:09.041851   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.041857   369 net.cpp:165] Memory required for data: 256820400\nI1207 05:37:09.041862   369 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1207 05:37:09.041875   369 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1207 05:37:09.041882   369 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1207 05:37:09.041890   369 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1207 05:37:09.042183   369 net.cpp:150] Setting up L1_b2_brc1_conv\nI1207 05:37:09.042197   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.042202   369 net.cpp:165] Memory required for data: 263374000\nI1207 05:37:09.042212   369 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1207 05:37:09.042219   369 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1207 05:37:09.042228   369 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1207 05:37:09.042237   369 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1207 05:37:09.042471   369 net.cpp:150] Setting up L1_b2_brc2_bn\nI1207 05:37:09.042485   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.042491   369 net.cpp:165] Memory required for data: 269927600\nI1207 05:37:09.042501   369 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1207 05:37:09.042510   369 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1207 05:37:09.042515   369 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1207 05:37:09.042522   369 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1207 05:37:09.042531   369 net.cpp:150] Setting up L1_b2_brc2_relu\nI1207 05:37:09.042538   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.042543   369 net.cpp:165] Memory required for data: 276481200\nI1207 05:37:09.042548   369 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1207 05:37:09.042565   369 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1207 05:37:09.042572   369 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1207 05:37:09.042582   369 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1207 05:37:09.042889   369 net.cpp:150] Setting up L1_b2_brc2_conv\nI1207 05:37:09.042903   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.042908   369 net.cpp:165] Memory required for data: 283034800\nI1207 05:37:09.042917   369 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1207 05:37:09.042929   369 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1207 05:37:09.042935   369 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1207 05:37:09.042946   369 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1207 05:37:09.043179   369 net.cpp:150] Setting up L1_b2_brc3_bn\nI1207 05:37:09.043192   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.043197   369 net.cpp:165] Memory required for data: 289588400\nI1207 05:37:09.043207   369 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1207 05:37:09.043223   369 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1207 05:37:09.043231   369 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1207 05:37:09.043237   369 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1207 05:37:09.043247   369 net.cpp:150] Setting up L1_b2_brc3_relu\nI1207 05:37:09.043254   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.043259   369 net.cpp:165] Memory required for data: 296142000\nI1207 05:37:09.043263   369 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1207 05:37:09.043277   369 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1207 05:37:09.043283   369 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1207 05:37:09.043294   369 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1207 05:37:09.043577   369 net.cpp:150] Setting up L1_b2_brc3_conv\nI1207 05:37:09.043591   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.043597   369 net.cpp:165] Memory required for data: 322356400\nI1207 05:37:09.043617   369 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1207 05:37:09.043627   369 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1207 05:37:09.043633   369 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1207 05:37:09.043640   369 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1207 05:37:09.043648   369 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1207 05:37:09.043682   369 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1207 05:37:09.043692   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.043697   369 net.cpp:165] Memory required for data: 348570800\nI1207 05:37:09.043702   369 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:37:09.043709   369 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:37:09.043715   369 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1207 05:37:09.043725   369 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1207 05:37:09.043735   369 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1207 05:37:09.043779   369 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:37:09.043790   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.043797   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.043802   369 net.cpp:165] Memory required for data: 400999600\nI1207 05:37:09.043807   369 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1207 05:37:09.043815   369 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1207 05:37:09.043822   369 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1207 05:37:09.043831   369 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1207 05:37:09.044051   369 net.cpp:150] Setting up L1_b3_brc1_bn\nI1207 05:37:09.044066   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.044072   369 net.cpp:165] Memory required for data: 427214000\nI1207 05:37:09.044082   369 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1207 05:37:09.044090   369 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1207 05:37:09.044096   369 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1207 05:37:09.044103   369 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1207 05:37:09.044112   369 net.cpp:150] Setting up L1_b3_brc1_relu\nI1207 05:37:09.044119   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.044124   369 net.cpp:165] Memory required for data: 453428400\nI1207 05:37:09.044128   369 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1207 05:37:09.044139   369 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1207 05:37:09.044144   369 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1207 05:37:09.044155   369 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1207 05:37:09.044440   369 net.cpp:150] Setting up L1_b3_brc1_conv\nI1207 05:37:09.044461   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.044474   369 net.cpp:165] Memory required for data: 459982000\nI1207 05:37:09.044484   369 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1207 05:37:09.044493   369 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1207 05:37:09.044499   369 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1207 05:37:09.044510   369 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1207 05:37:09.044741   369 net.cpp:150] Setting up L1_b3_brc2_bn\nI1207 05:37:09.044755   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.044760   369 net.cpp:165] Memory required for data: 466535600\nI1207 05:37:09.044770   369 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1207 05:37:09.044782   369 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1207 05:37:09.044788   369 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1207 05:37:09.044795   369 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1207 05:37:09.044806   369 net.cpp:150] Setting up L1_b3_brc2_relu\nI1207 05:37:09.044812   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.044816   369 net.cpp:165] Memory required for data: 473089200\nI1207 05:37:09.044821   369 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1207 05:37:09.044837   369 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1207 05:37:09.044844   369 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1207 05:37:09.044852   369 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1207 05:37:09.045156   369 net.cpp:150] Setting up L1_b3_brc2_conv\nI1207 05:37:09.045169   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.045174   369 net.cpp:165] Memory required for data: 479642800\nI1207 05:37:09.045183   369 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1207 05:37:09.045192   369 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1207 05:37:09.045197   369 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1207 05:37:09.045208   369 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1207 05:37:09.045442   369 net.cpp:150] Setting up L1_b3_brc3_bn\nI1207 05:37:09.045460   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.045478   369 net.cpp:165] Memory required for data: 486196400\nI1207 05:37:09.045490   369 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1207 05:37:09.045497   369 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1207 05:37:09.045503   369 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1207 05:37:09.045511   369 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1207 05:37:09.045521   369 net.cpp:150] Setting up L1_b3_brc3_relu\nI1207 05:37:09.045527   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.045532   369 net.cpp:165] Memory required for data: 492750000\nI1207 05:37:09.045537   369 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1207 05:37:09.045547   369 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1207 05:37:09.045552   369 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1207 05:37:09.045564   369 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1207 05:37:09.045848   369 net.cpp:150] Setting up L1_b3_brc3_conv\nI1207 05:37:09.045861   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.045867   369 net.cpp:165] Memory required for data: 518964400\nI1207 05:37:09.045876   369 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1207 05:37:09.045884   369 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1207 05:37:09.045892   369 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1207 05:37:09.045898   369 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1207 05:37:09.045907   369 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1207 05:37:09.045949   369 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1207 05:37:09.045963   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.045967   369 net.cpp:165] Memory required for data: 545178800\nI1207 05:37:09.045974   369 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:37:09.045987   369 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:37:09.045994   369 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1207 05:37:09.046005   369 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1207 05:37:09.046015   369 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1207 05:37:09.046061   369 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:37:09.046072   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.046079   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.046084   369 net.cpp:165] Memory required for data: 597607600\nI1207 05:37:09.046089   369 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1207 05:37:09.046100   369 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1207 05:37:09.046108   369 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1207 05:37:09.046118   369 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1207 05:37:09.046342   369 net.cpp:150] Setting up L1_b4_brc1_bn\nI1207 05:37:09.046355   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.046360   369 net.cpp:165] Memory required for data: 623822000\nI1207 05:37:09.046371   369 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1207 05:37:09.046380   369 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1207 05:37:09.046386   369 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1207 05:37:09.046392   369 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1207 05:37:09.046401   369 net.cpp:150] Setting up L1_b4_brc1_relu\nI1207 05:37:09.046408   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.046413   369 net.cpp:165] Memory required for data: 650036400\nI1207 05:37:09.046417   369 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1207 05:37:09.046432   369 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1207 05:37:09.046437   369 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1207 05:37:09.046448   369 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1207 05:37:09.046742   369 net.cpp:150] Setting up L1_b4_brc1_conv\nI1207 05:37:09.046756   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.046762   369 net.cpp:165] Memory required for data: 656590000\nI1207 05:37:09.046771   369 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1207 05:37:09.046782   369 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1207 05:37:09.046788   369 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1207 05:37:09.046797   369 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1207 05:37:09.047030   369 net.cpp:150] Setting up L1_b4_brc2_bn\nI1207 05:37:09.047044   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.047049   369 net.cpp:165] Memory required for data: 663143600\nI1207 05:37:09.047058   369 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1207 05:37:09.047067   369 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1207 05:37:09.047073   369 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1207 05:37:09.047080   369 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1207 05:37:09.047089   369 net.cpp:150] Setting up L1_b4_brc2_relu\nI1207 05:37:09.047096   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.047101   369 net.cpp:165] Memory required for data: 669697200\nI1207 05:37:09.047106   369 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1207 05:37:09.047118   369 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1207 05:37:09.047125   369 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1207 05:37:09.047135   369 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1207 05:37:09.047451   369 net.cpp:150] Setting up L1_b4_brc2_conv\nI1207 05:37:09.047471   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.047477   369 net.cpp:165] Memory required for data: 676250800\nI1207 05:37:09.047487   369 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1207 05:37:09.047505   369 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1207 05:37:09.047513   369 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1207 05:37:09.047521   369 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1207 05:37:09.047758   369 net.cpp:150] Setting up L1_b4_brc3_bn\nI1207 05:37:09.047771   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.047776   369 net.cpp:165] Memory required for data: 682804400\nI1207 05:37:09.047787   369 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1207 05:37:09.047796   369 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1207 05:37:09.047801   369 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1207 05:37:09.047807   369 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1207 05:37:09.047817   369 net.cpp:150] Setting up L1_b4_brc3_relu\nI1207 05:37:09.047824   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.047828   369 net.cpp:165] Memory required for data: 689358000\nI1207 05:37:09.047834   369 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1207 05:37:09.047847   369 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1207 05:37:09.047853   369 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1207 05:37:09.047861   369 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1207 05:37:09.048146   369 net.cpp:150] Setting up L1_b4_brc3_conv\nI1207 05:37:09.048161   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.048166   369 net.cpp:165] Memory required for data: 715572400\nI1207 05:37:09.048174   369 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1207 05:37:09.048183   369 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1207 05:37:09.048189   369 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1207 05:37:09.048197   369 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1207 05:37:09.048207   369 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1207 05:37:09.048238   369 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1207 05:37:09.048249   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.048254   369 net.cpp:165] Memory required for data: 741786800\nI1207 05:37:09.048260   369 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:37:09.048267   369 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:37:09.048274   369 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1207 05:37:09.048280   369 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1207 05:37:09.048291   369 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1207 05:37:09.048336   369 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:37:09.048346   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.048353   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.048357   369 net.cpp:165] Memory required for data: 794215600\nI1207 05:37:09.048363   369 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1207 05:37:09.048374   369 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1207 05:37:09.048380   369 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1207 05:37:09.048388   369 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1207 05:37:09.048619   369 net.cpp:150] Setting up L1_b5_brc1_bn\nI1207 05:37:09.048632   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.048637   369 net.cpp:165] Memory required for data: 820430000\nI1207 05:37:09.048666   369 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1207 05:37:09.048676   369 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1207 05:37:09.048681   369 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1207 05:37:09.048689   369 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1207 05:37:09.048698   369 net.cpp:150] Setting up L1_b5_brc1_relu\nI1207 05:37:09.048713   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.048718   369 net.cpp:165] Memory required for data: 846644400\nI1207 05:37:09.048723   369 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1207 05:37:09.048739   369 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1207 05:37:09.048745   369 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1207 05:37:09.048754   369 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1207 05:37:09.049041   369 net.cpp:150] Setting up L1_b5_brc1_conv\nI1207 05:37:09.049054   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.049060   369 net.cpp:165] Memory required for data: 853198000\nI1207 05:37:09.049068   369 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1207 05:37:09.049077   369 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1207 05:37:09.049083   369 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1207 05:37:09.049094   369 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1207 05:37:09.049327   369 net.cpp:150] Setting up L1_b5_brc2_bn\nI1207 05:37:09.049343   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.049348   369 net.cpp:165] Memory required for data: 859751600\nI1207 05:37:09.049360   369 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1207 05:37:09.049368   369 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1207 05:37:09.049374   369 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1207 05:37:09.049381   369 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1207 05:37:09.049391   369 net.cpp:150] Setting up L1_b5_brc2_relu\nI1207 05:37:09.049398   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.049402   369 net.cpp:165] Memory required for data: 866305200\nI1207 05:37:09.049408   369 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1207 05:37:09.049418   369 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1207 05:37:09.049423   369 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1207 05:37:09.049435   369 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1207 05:37:09.049752   369 net.cpp:150] Setting up L1_b5_brc2_conv\nI1207 05:37:09.049767   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.049772   369 net.cpp:165] Memory required for data: 872858800\nI1207 05:37:09.049782   369 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1207 05:37:09.049789   369 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1207 05:37:09.049795   369 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1207 05:37:09.049806   369 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1207 05:37:09.050050   369 net.cpp:150] Setting up L1_b5_brc3_bn\nI1207 05:37:09.050062   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.050067   369 net.cpp:165] Memory required for data: 879412400\nI1207 05:37:09.050077   369 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1207 05:37:09.050089   369 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1207 05:37:09.050096   369 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1207 05:37:09.050102   369 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1207 05:37:09.050113   369 net.cpp:150] Setting up L1_b5_brc3_relu\nI1207 05:37:09.050120   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.050124   369 net.cpp:165] Memory required for data: 885966000\nI1207 05:37:09.050130   369 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1207 05:37:09.050140   369 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1207 05:37:09.050146   369 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1207 05:37:09.050158   369 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1207 05:37:09.050443   369 net.cpp:150] Setting up L1_b5_brc3_conv\nI1207 05:37:09.050457   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.050462   369 net.cpp:165] Memory required for data: 912180400\nI1207 05:37:09.050477   369 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1207 05:37:09.050487   369 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1207 05:37:09.050493   369 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1207 05:37:09.050508   369 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1207 05:37:09.050515   369 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1207 05:37:09.050552   369 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1207 05:37:09.050564   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.050568   369 net.cpp:165] Memory required for data: 938394800\nI1207 05:37:09.050575   369 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:37:09.050585   369 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:37:09.050590   369 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1207 05:37:09.050597   369 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1207 05:37:09.050607   369 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1207 05:37:09.050655   369 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:37:09.050667   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.050673   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.050678   369 net.cpp:165] Memory required for data: 990823600\nI1207 05:37:09.050683   369 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1207 05:37:09.050691   369 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1207 05:37:09.050698   369 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1207 05:37:09.050707   369 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1207 05:37:09.050936   369 net.cpp:150] Setting up L1_b6_brc1_bn\nI1207 05:37:09.050951   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.050956   369 net.cpp:165] Memory required for data: 1017038000\nI1207 05:37:09.050967   369 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1207 05:37:09.050976   369 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1207 05:37:09.050981   369 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1207 05:37:09.050988   369 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1207 05:37:09.050998   369 net.cpp:150] Setting up L1_b6_brc1_relu\nI1207 05:37:09.051005   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.051009   369 net.cpp:165] Memory required for data: 1043252400\nI1207 05:37:09.051014   369 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1207 05:37:09.051028   369 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1207 05:37:09.051034   369 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1207 05:37:09.051043   369 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1207 05:37:09.051331   369 net.cpp:150] Setting up L1_b6_brc1_conv\nI1207 05:37:09.051343   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.051348   369 net.cpp:165] Memory required for data: 1049806000\nI1207 05:37:09.051357   369 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1207 05:37:09.051367   369 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1207 05:37:09.051373   369 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1207 05:37:09.051383   369 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1207 05:37:09.051630   369 net.cpp:150] Setting up L1_b6_brc2_bn\nI1207 05:37:09.051647   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.051652   369 net.cpp:165] Memory required for data: 1056359600\nI1207 05:37:09.051663   369 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1207 05:37:09.051676   369 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1207 05:37:09.051682   369 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1207 05:37:09.051689   369 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1207 05:37:09.051699   369 net.cpp:150] Setting up L1_b6_brc2_relu\nI1207 05:37:09.051705   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.051710   369 net.cpp:165] Memory required for data: 1062913200\nI1207 05:37:09.051715   369 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1207 05:37:09.051736   369 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1207 05:37:09.051743   369 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1207 05:37:09.051754   369 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1207 05:37:09.052073   369 net.cpp:150] Setting up L1_b6_brc2_conv\nI1207 05:37:09.052086   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.052093   369 net.cpp:165] Memory required for data: 1069466800\nI1207 05:37:09.052100   369 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1207 05:37:09.052112   369 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1207 05:37:09.052119   369 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1207 05:37:09.052126   369 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1207 05:37:09.052364   369 net.cpp:150] Setting up L1_b6_brc3_bn\nI1207 05:37:09.052377   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.052382   369 net.cpp:165] Memory required for data: 1076020400\nI1207 05:37:09.052392   369 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1207 05:37:09.052402   369 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1207 05:37:09.052407   369 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1207 05:37:09.052413   369 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1207 05:37:09.052423   369 net.cpp:150] Setting up L1_b6_brc3_relu\nI1207 05:37:09.052430   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.052434   369 net.cpp:165] Memory required for data: 1082574000\nI1207 05:37:09.052439   369 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1207 05:37:09.052453   369 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1207 05:37:09.052459   369 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1207 05:37:09.052475   369 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1207 05:37:09.052779   369 net.cpp:150] Setting up L1_b6_brc3_conv\nI1207 05:37:09.052793   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.052798   369 net.cpp:165] Memory required for data: 1108788400\nI1207 05:37:09.052808   369 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1207 05:37:09.052819   369 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1207 05:37:09.052825   369 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1207 05:37:09.052834   369 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1207 05:37:09.052840   369 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1207 05:37:09.052875   369 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1207 05:37:09.052884   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.052888   369 net.cpp:165] Memory required for data: 1135002800\nI1207 05:37:09.052894   369 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:37:09.052901   369 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:37:09.052907   369 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1207 05:37:09.052917   369 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1207 05:37:09.052927   369 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1207 05:37:09.052970   369 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:37:09.052985   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.052992   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.052997   369 net.cpp:165] Memory required for data: 1187431600\nI1207 05:37:09.053002   369 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1207 05:37:09.053011   369 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1207 05:37:09.053016   369 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1207 05:37:09.053027   369 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1207 05:37:09.053253   369 net.cpp:150] Setting up L2_b1_brc1_bn\nI1207 05:37:09.053274   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.053280   369 net.cpp:165] Memory required for data: 1213646000\nI1207 05:37:09.053290   369 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1207 05:37:09.053298   369 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1207 05:37:09.053304   369 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1207 05:37:09.053311   369 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1207 05:37:09.053321   369 net.cpp:150] Setting up L2_b1_brc1_relu\nI1207 05:37:09.053328   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.053333   369 net.cpp:165] Memory required for data: 1239860400\nI1207 05:37:09.053337   369 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1207 05:37:09.053351   369 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1207 05:37:09.053357   369 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1207 05:37:09.053369   369 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1207 05:37:09.053695   369 net.cpp:150] Setting up L2_b1_brc1_conv\nI1207 05:37:09.053710   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.053715   369 net.cpp:165] Memory required for data: 1243137200\nI1207 05:37:09.053725   369 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1207 05:37:09.053733   369 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1207 05:37:09.053740   369 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1207 05:37:09.053750   369 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1207 05:37:09.053993   369 net.cpp:150] Setting up L2_b1_brc2_bn\nI1207 05:37:09.054008   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.054013   369 net.cpp:165] Memory required for data: 1246414000\nI1207 05:37:09.054023   369 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1207 05:37:09.054031   369 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1207 05:37:09.054038   369 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1207 05:37:09.054047   369 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1207 05:37:09.054056   369 net.cpp:150] Setting up L2_b1_brc2_relu\nI1207 05:37:09.054064   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.054069   369 net.cpp:165] Memory required for data: 1249690800\nI1207 05:37:09.054074   369 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1207 05:37:09.054087   369 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1207 05:37:09.054093   369 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1207 05:37:09.054105   369 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1207 05:37:09.055619   369 net.cpp:150] Setting up L2_b1_brc2_conv\nI1207 05:37:09.055637   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.055642   369 net.cpp:165] Memory required for data: 1252967600\nI1207 05:37:09.055652   369 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1207 05:37:09.055665   369 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1207 05:37:09.055671   369 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1207 05:37:09.055682   369 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1207 05:37:09.055919   369 net.cpp:150] Setting up L2_b1_brc3_bn\nI1207 05:37:09.055932   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.055938   369 net.cpp:165] Memory required for data: 1256244400\nI1207 05:37:09.055948   369 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1207 05:37:09.055956   369 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1207 05:37:09.055963   369 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1207 05:37:09.055970   369 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1207 05:37:09.055979   369 net.cpp:150] Setting up L2_b1_brc3_relu\nI1207 05:37:09.055986   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.055991   369 net.cpp:165] Memory required for data: 1259521200\nI1207 05:37:09.055996   369 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1207 05:37:09.056010   369 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1207 05:37:09.056016   369 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1207 05:37:09.056036   369 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1207 05:37:09.056392   369 net.cpp:150] Setting up L2_b1_brc3_conv\nI1207 05:37:09.056406   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.056411   369 net.cpp:165] Memory required for data: 1272628400\nI1207 05:37:09.056421   369 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1207 05:37:09.056442   369 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1207 05:37:09.056449   369 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1207 05:37:09.056462   369 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1207 05:37:09.056896   369 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1207 05:37:09.056910   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.056915   369 net.cpp:165] Memory required for data: 1285735600\nI1207 05:37:09.056924   369 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1207 05:37:09.056934   369 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1207 05:37:09.056941   369 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1207 05:37:09.056947   369 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1207 05:37:09.056958   369 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1207 05:37:09.056985   369 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1207 05:37:09.056994   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.056999   369 net.cpp:165] Memory required for data: 1298842800\nI1207 05:37:09.057004   369 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:37:09.057018   369 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:37:09.057024   369 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1207 05:37:09.057030   369 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1207 05:37:09.057040   369 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1207 05:37:09.057086   369 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:37:09.057098   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.057106   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.057111   369 net.cpp:165] Memory required for data: 1325057200\nI1207 05:37:09.057116   369 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1207 05:37:09.057126   369 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1207 05:37:09.057132   369 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1207 05:37:09.057140   369 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1207 05:37:09.057363   369 net.cpp:150] Setting up L2_b2_brc1_bn\nI1207 05:37:09.057375   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.057380   369 net.cpp:165] Memory required for data: 1338164400\nI1207 05:37:09.057391   369 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1207 05:37:09.057402   369 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1207 05:37:09.057409   369 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1207 05:37:09.057416   369 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1207 05:37:09.057426   369 net.cpp:150] Setting up L2_b2_brc1_relu\nI1207 05:37:09.057433   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.057438   369 net.cpp:165] Memory required for data: 1351271600\nI1207 05:37:09.057443   369 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1207 05:37:09.057453   369 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1207 05:37:09.057459   369 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1207 05:37:09.057476   369 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1207 05:37:09.057873   369 net.cpp:150] Setting up L2_b2_brc1_conv\nI1207 05:37:09.057888   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.057893   369 net.cpp:165] Memory required for data: 1354548400\nI1207 05:37:09.057910   369 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1207 05:37:09.057920   369 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1207 05:37:09.057926   369 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1207 05:37:09.057937   369 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1207 05:37:09.058182   369 net.cpp:150] Setting up L2_b2_brc2_bn\nI1207 05:37:09.058194   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.058199   369 net.cpp:165] Memory required for data: 1357825200\nI1207 05:37:09.058209   369 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1207 05:37:09.058223   369 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1207 05:37:09.058230   369 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1207 05:37:09.058238   369 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1207 05:37:09.058248   369 net.cpp:150] Setting up L2_b2_brc2_relu\nI1207 05:37:09.058254   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.058259   369 net.cpp:165] Memory required for data: 1361102000\nI1207 05:37:09.058264   369 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1207 05:37:09.058274   369 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1207 05:37:09.058279   369 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1207 05:37:09.058291   369 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1207 05:37:09.058759   369 net.cpp:150] Setting up L2_b2_brc2_conv\nI1207 05:37:09.058774   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.058779   369 net.cpp:165] Memory required for data: 1364378800\nI1207 05:37:09.058789   369 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1207 05:37:09.058797   369 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1207 05:37:09.058804   369 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1207 05:37:09.058811   369 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1207 05:37:09.059046   369 net.cpp:150] Setting up L2_b2_brc3_bn\nI1207 05:37:09.059058   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.059063   369 net.cpp:165] Memory required for data: 1367655600\nI1207 05:37:09.059073   369 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1207 05:37:09.059082   369 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1207 05:37:09.059087   369 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1207 05:37:09.059098   369 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1207 05:37:09.059108   369 net.cpp:150] Setting up L2_b2_brc3_relu\nI1207 05:37:09.059115   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.059119   369 net.cpp:165] Memory required for data: 1370932400\nI1207 05:37:09.059124   369 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1207 05:37:09.059139   369 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1207 05:37:09.059144   369 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1207 05:37:09.059152   369 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1207 05:37:09.059518   369 net.cpp:150] Setting up L2_b2_brc3_conv\nI1207 05:37:09.059532   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.059538   369 net.cpp:165] Memory required for data: 1384039600\nI1207 05:37:09.059547   369 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1207 05:37:09.059556   369 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1207 05:37:09.059562   369 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1207 05:37:09.059571   369 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1207 05:37:09.059581   369 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1207 05:37:09.059607   369 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1207 05:37:09.059617   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.059622   369 net.cpp:165] Memory required for data: 1397146800\nI1207 05:37:09.059628   369 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:37:09.059638   369 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:37:09.059644   369 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1207 05:37:09.059659   369 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1207 05:37:09.059669   369 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1207 05:37:09.059722   369 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:37:09.059736   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.059741   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.059746   369 net.cpp:165] Memory required for data: 1423361200\nI1207 05:37:09.059752   369 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1207 05:37:09.059763   369 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1207 05:37:09.059769   369 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1207 05:37:09.059777   369 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1207 05:37:09.060005   369 net.cpp:150] Setting up L2_b3_brc1_bn\nI1207 05:37:09.060019   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.060024   369 net.cpp:165] Memory required for data: 1436468400\nI1207 05:37:09.060055   369 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1207 05:37:09.060066   369 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1207 05:37:09.060073   369 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1207 05:37:09.060081   369 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1207 05:37:09.060091   369 net.cpp:150] Setting up L2_b3_brc1_relu\nI1207 05:37:09.060097   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.060102   369 net.cpp:165] Memory required for data: 1449575600\nI1207 05:37:09.060107   369 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1207 05:37:09.060120   369 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1207 05:37:09.060127   369 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1207 05:37:09.060135   369 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1207 05:37:09.060501   369 net.cpp:150] Setting up L2_b3_brc1_conv\nI1207 05:37:09.060516   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.060521   369 net.cpp:165] Memory required for data: 1452852400\nI1207 05:37:09.060530   369 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1207 05:37:09.060542   369 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1207 05:37:09.060549   369 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1207 05:37:09.060557   369 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1207 05:37:09.060793   369 net.cpp:150] Setting up L2_b3_brc2_bn\nI1207 05:37:09.060809   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.060814   369 net.cpp:165] Memory required for data: 1456129200\nI1207 05:37:09.060824   369 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1207 05:37:09.060832   369 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1207 05:37:09.060838   369 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1207 05:37:09.060845   369 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1207 05:37:09.060855   369 net.cpp:150] Setting up L2_b3_brc2_relu\nI1207 05:37:09.060863   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.060866   369 net.cpp:165] Memory required for data: 1459406000\nI1207 05:37:09.060871   369 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1207 05:37:09.060883   369 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1207 05:37:09.060887   369 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1207 05:37:09.060899   369 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1207 05:37:09.061362   369 net.cpp:150] Setting up L2_b3_brc2_conv\nI1207 05:37:09.061375   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.061380   369 net.cpp:165] Memory required for data: 1462682800\nI1207 05:37:09.061389   369 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1207 05:37:09.061403   369 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1207 05:37:09.061408   369 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1207 05:37:09.061424   369 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1207 05:37:09.061671   369 net.cpp:150] Setting up L2_b3_brc3_bn\nI1207 05:37:09.061686   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.061691   369 net.cpp:165] Memory required for data: 1465959600\nI1207 05:37:09.061702   369 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1207 05:37:09.061712   369 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1207 05:37:09.061719   369 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1207 05:37:09.061727   369 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1207 05:37:09.061736   369 net.cpp:150] Setting up L2_b3_brc3_relu\nI1207 05:37:09.061743   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.061748   369 net.cpp:165] Memory required for data: 1469236400\nI1207 05:37:09.061753   369 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1207 05:37:09.061764   369 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1207 05:37:09.061769   369 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1207 05:37:09.061780   369 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1207 05:37:09.062135   369 net.cpp:150] Setting up L2_b3_brc3_conv\nI1207 05:37:09.062150   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.062155   369 net.cpp:165] Memory required for data: 1482343600\nI1207 05:37:09.062163   369 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1207 05:37:09.062172   369 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1207 05:37:09.062180   369 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1207 05:37:09.062186   369 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1207 05:37:09.062193   369 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1207 05:37:09.062222   369 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1207 05:37:09.062232   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.062238   369 net.cpp:165] Memory required for data: 1495450800\nI1207 05:37:09.062243   369 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:37:09.062252   369 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:37:09.062256   369 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1207 05:37:09.062266   369 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1207 05:37:09.062276   369 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1207 05:37:09.062320   369 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:37:09.062335   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.062341   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.062346   369 net.cpp:165] Memory required for data: 1521665200\nI1207 05:37:09.062351   369 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1207 05:37:09.062360   369 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1207 05:37:09.062364   369 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1207 05:37:09.062376   369 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1207 05:37:09.062610   369 net.cpp:150] Setting up L2_b4_brc1_bn\nI1207 05:37:09.062623   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.062628   369 net.cpp:165] Memory required for data: 1534772400\nI1207 05:37:09.062639   369 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1207 05:37:09.062647   369 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1207 05:37:09.062654   369 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1207 05:37:09.062664   369 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1207 05:37:09.062674   369 net.cpp:150] Setting up L2_b4_brc1_relu\nI1207 05:37:09.062681   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.062685   369 net.cpp:165] Memory required for data: 1547879600\nI1207 05:37:09.062697   369 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1207 05:37:09.062708   369 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1207 05:37:09.062714   369 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1207 05:37:09.062723   369 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1207 05:37:09.063094   369 net.cpp:150] Setting up L2_b4_brc1_conv\nI1207 05:37:09.063107   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.063113   369 net.cpp:165] Memory required for data: 1551156400\nI1207 05:37:09.063122   369 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1207 05:37:09.063133   369 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1207 05:37:09.063139   369 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1207 05:37:09.063148   369 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1207 05:37:09.063391   369 net.cpp:150] Setting up L2_b4_brc2_bn\nI1207 05:37:09.063405   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.063410   369 net.cpp:165] Memory required for data: 1554433200\nI1207 05:37:09.063421   369 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1207 05:37:09.063428   369 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1207 05:37:09.063434   369 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1207 05:37:09.063444   369 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1207 05:37:09.063454   369 net.cpp:150] Setting up L2_b4_brc2_relu\nI1207 05:37:09.063462   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.063472   369 net.cpp:165] Memory required for data: 1557710000\nI1207 05:37:09.063478   369 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1207 05:37:09.063493   369 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1207 05:37:09.063500   369 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1207 05:37:09.063508   369 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1207 05:37:09.063971   369 net.cpp:150] Setting up L2_b4_brc2_conv\nI1207 05:37:09.063984   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.063989   369 net.cpp:165] Memory required for data: 1560986800\nI1207 05:37:09.063997   369 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1207 05:37:09.064009   369 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1207 05:37:09.064015   369 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1207 05:37:09.064023   369 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1207 05:37:09.064265   369 net.cpp:150] Setting up L2_b4_brc3_bn\nI1207 05:37:09.064277   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.064282   369 net.cpp:165] Memory required for data: 1564263600\nI1207 05:37:09.064292   369 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1207 05:37:09.064301   369 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1207 05:37:09.064306   369 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1207 05:37:09.064313   369 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1207 05:37:09.064322   369 net.cpp:150] Setting up L2_b4_brc3_relu\nI1207 05:37:09.064329   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.064334   369 net.cpp:165] Memory required for data: 1567540400\nI1207 05:37:09.064339   369 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1207 05:37:09.064352   369 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1207 05:37:09.064358   369 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1207 05:37:09.064374   369 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1207 05:37:09.064738   369 net.cpp:150] Setting up L2_b4_brc3_conv\nI1207 05:37:09.064754   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.064759   369 net.cpp:165] Memory required for data: 1580647600\nI1207 05:37:09.064767   369 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1207 05:37:09.064779   369 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1207 05:37:09.064786   369 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1207 05:37:09.064793   369 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1207 05:37:09.064810   369 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1207 05:37:09.064838   369 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1207 05:37:09.064848   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.064853   369 net.cpp:165] Memory required for data: 1593754800\nI1207 05:37:09.064858   369 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:37:09.064865   369 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:37:09.064872   369 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1207 05:37:09.064883   369 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1207 05:37:09.064893   369 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1207 05:37:09.064939   369 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:37:09.064954   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.064960   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.064965   369 net.cpp:165] Memory required for data: 1619969200\nI1207 05:37:09.064970   369 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1207 05:37:09.064978   369 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1207 05:37:09.064985   369 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1207 05:37:09.064996   369 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1207 05:37:09.065227   369 net.cpp:150] Setting up L2_b5_brc1_bn\nI1207 05:37:09.065240   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.065245   369 net.cpp:165] Memory required for data: 1633076400\nI1207 05:37:09.065256   369 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1207 05:37:09.065264   369 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1207 05:37:09.065270   369 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1207 05:37:09.065277   369 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1207 05:37:09.065287   369 net.cpp:150] Setting up L2_b5_brc1_relu\nI1207 05:37:09.065294   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.065299   369 net.cpp:165] Memory required for data: 1646183600\nI1207 05:37:09.065304   369 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1207 05:37:09.065317   369 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1207 05:37:09.065323   369 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1207 05:37:09.065335   369 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1207 05:37:09.065773   369 net.cpp:150] Setting up L2_b5_brc1_conv\nI1207 05:37:09.065788   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.065794   369 net.cpp:165] Memory required for data: 1649460400\nI1207 05:37:09.065804   369 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1207 05:37:09.065815   369 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1207 05:37:09.065822   369 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1207 05:37:09.065830   369 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1207 05:37:09.066082   369 net.cpp:150] Setting up L2_b5_brc2_bn\nI1207 05:37:09.066097   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.066102   369 net.cpp:165] Memory required for data: 1652737200\nI1207 05:37:09.066112   369 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1207 05:37:09.066121   369 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1207 05:37:09.066128   369 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1207 05:37:09.066134   369 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1207 05:37:09.066144   369 net.cpp:150] Setting up L2_b5_brc2_relu\nI1207 05:37:09.066151   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.066156   369 net.cpp:165] Memory required for data: 1656014000\nI1207 05:37:09.066160   369 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1207 05:37:09.066174   369 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1207 05:37:09.066187   369 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1207 05:37:09.066197   369 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1207 05:37:09.066674   369 net.cpp:150] Setting up L2_b5_brc2_conv\nI1207 05:37:09.066689   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.066694   369 net.cpp:165] Memory required for data: 1659290800\nI1207 05:37:09.066704   369 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1207 05:37:09.066715   369 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1207 05:37:09.066721   369 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1207 05:37:09.066730   369 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1207 05:37:09.066972   369 net.cpp:150] Setting up L2_b5_brc3_bn\nI1207 05:37:09.066987   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.066992   369 net.cpp:165] Memory required for data: 1662567600\nI1207 05:37:09.067003   369 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1207 05:37:09.067010   369 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1207 05:37:09.067018   369 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1207 05:37:09.067024   369 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1207 05:37:09.067034   369 net.cpp:150] Setting up L2_b5_brc3_relu\nI1207 05:37:09.067040   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.067045   369 net.cpp:165] Memory required for data: 1665844400\nI1207 05:37:09.067049   369 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1207 05:37:09.067059   369 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1207 05:37:09.067065   369 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1207 05:37:09.067080   369 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1207 05:37:09.067445   369 net.cpp:150] Setting up L2_b5_brc3_conv\nI1207 05:37:09.067458   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.067463   369 net.cpp:165] Memory required for data: 1678951600\nI1207 05:37:09.067479   369 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1207 05:37:09.067488   369 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1207 05:37:09.067495   369 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1207 05:37:09.067502   369 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1207 05:37:09.067510   369 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1207 05:37:09.067540   369 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1207 05:37:09.067550   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.067555   369 net.cpp:165] Memory required for data: 1692058800\nI1207 05:37:09.067562   369 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:37:09.067570   369 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:37:09.067576   369 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1207 05:37:09.067586   369 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1207 05:37:09.067596   369 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1207 05:37:09.067648   369 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:37:09.067664   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.067672   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.067677   369 net.cpp:165] Memory required for data: 1718273200\nI1207 05:37:09.067682   369 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1207 05:37:09.067689   369 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1207 05:37:09.067695   369 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1207 05:37:09.067706   369 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1207 05:37:09.067937   369 net.cpp:150] Setting up L2_b6_brc1_bn\nI1207 05:37:09.067950   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.067955   369 net.cpp:165] Memory required for data: 1731380400\nI1207 05:37:09.067972   369 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1207 05:37:09.067994   369 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1207 05:37:09.068001   369 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1207 05:37:09.068008   369 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1207 05:37:09.068018   369 net.cpp:150] Setting up L2_b6_brc1_relu\nI1207 05:37:09.068025   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.068030   369 net.cpp:165] Memory required for data: 1744487600\nI1207 05:37:09.068035   369 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1207 05:37:09.068051   369 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1207 05:37:09.068058   369 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1207 05:37:09.068068   369 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1207 05:37:09.068459   369 net.cpp:150] Setting up L2_b6_brc1_conv\nI1207 05:37:09.068480   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.068486   369 net.cpp:165] Memory required for data: 1747764400\nI1207 05:37:09.068495   369 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1207 05:37:09.068505   369 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1207 05:37:09.068514   369 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1207 05:37:09.068522   369 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1207 05:37:09.068773   369 net.cpp:150] Setting up L2_b6_brc2_bn\nI1207 05:37:09.068786   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.068791   369 net.cpp:165] Memory required for data: 1751041200\nI1207 05:37:09.068802   369 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1207 05:37:09.068810   369 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1207 05:37:09.068816   369 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1207 05:37:09.068823   369 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1207 05:37:09.068833   369 net.cpp:150] Setting up L2_b6_brc2_relu\nI1207 05:37:09.068840   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.068845   369 net.cpp:165] Memory required for data: 1754318000\nI1207 05:37:09.068850   369 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1207 05:37:09.068863   369 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1207 05:37:09.068871   369 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1207 05:37:09.068881   369 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1207 05:37:09.069351   369 net.cpp:150] Setting up L2_b6_brc2_conv\nI1207 05:37:09.069365   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.069370   369 net.cpp:165] Memory required for data: 1757594800\nI1207 05:37:09.069378   369 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1207 05:37:09.069391   369 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1207 05:37:09.069397   369 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1207 05:37:09.069407   369 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1207 05:37:09.069660   369 net.cpp:150] Setting up L2_b6_brc3_bn\nI1207 05:37:09.069674   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.069679   369 net.cpp:165] Memory required for data: 1760871600\nI1207 05:37:09.069690   369 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1207 05:37:09.069699   369 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1207 05:37:09.069705   369 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1207 05:37:09.069711   369 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1207 05:37:09.069721   369 net.cpp:150] Setting up L2_b6_brc3_relu\nI1207 05:37:09.069728   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.069733   369 net.cpp:165] Memory required for data: 1764148400\nI1207 05:37:09.069737   369 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1207 05:37:09.069751   369 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1207 05:37:09.069758   369 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1207 05:37:09.069769   369 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1207 05:37:09.070138   369 net.cpp:150] Setting up L2_b6_brc3_conv\nI1207 05:37:09.070159   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.070164   369 net.cpp:165] Memory required for data: 1777255600\nI1207 05:37:09.070173   369 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1207 05:37:09.070183   369 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1207 05:37:09.070189   369 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1207 05:37:09.070197   369 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1207 05:37:09.070207   369 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1207 05:37:09.070235   369 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1207 05:37:09.070248   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.070253   369 net.cpp:165] Memory required for data: 1790362800\nI1207 05:37:09.070258   369 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:37:09.070266   369 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:37:09.070271   369 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1207 05:37:09.070279   369 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1207 05:37:09.070291   369 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1207 05:37:09.070341   369 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:37:09.070353   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.070360   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.070365   369 net.cpp:165] Memory required for data: 1816577200\nI1207 05:37:09.070370   369 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1207 05:37:09.070381   369 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1207 05:37:09.070387   369 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1207 05:37:09.070395   369 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1207 05:37:09.070638   369 net.cpp:150] Setting up L3_b1_brc1_bn\nI1207 05:37:09.070654   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.070660   369 net.cpp:165] Memory required for data: 1829684400\nI1207 05:37:09.070670   369 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1207 05:37:09.070679   369 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1207 05:37:09.070685   369 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1207 05:37:09.070693   369 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1207 05:37:09.070703   369 net.cpp:150] Setting up L3_b1_brc1_relu\nI1207 05:37:09.070709   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.070714   369 net.cpp:165] Memory required for data: 1842791600\nI1207 05:37:09.070719   369 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1207 05:37:09.070729   369 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1207 05:37:09.070735   369 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1207 05:37:09.070746   369 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1207 05:37:09.071202   369 net.cpp:150] Setting up L3_b1_brc1_conv\nI1207 05:37:09.071215   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.071220   369 net.cpp:165] Memory required for data: 1844430000\nI1207 05:37:09.071230   369 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1207 05:37:09.071238   369 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1207 05:37:09.071244   369 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1207 05:37:09.071255   369 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1207 05:37:09.071521   369 net.cpp:150] Setting up L3_b1_brc2_bn\nI1207 05:37:09.071534   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.071539   369 net.cpp:165] Memory required for data: 1846068400\nI1207 05:37:09.071549   369 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1207 05:37:09.071558   369 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1207 05:37:09.071573   369 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1207 05:37:09.071580   369 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1207 05:37:09.071590   369 net.cpp:150] Setting up L3_b1_brc2_relu\nI1207 05:37:09.071599   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.071602   369 net.cpp:165] Memory required for data: 1847706800\nI1207 05:37:09.071609   369 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1207 05:37:09.071621   369 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1207 05:37:09.071627   369 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1207 05:37:09.071635   369 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1207 05:37:09.073679   369 net.cpp:150] Setting up L3_b1_brc2_conv\nI1207 05:37:09.073696   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.073703   369 net.cpp:165] Memory required for data: 1849345200\nI1207 05:37:09.073711   369 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1207 05:37:09.073724   369 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1207 05:37:09.073731   369 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1207 05:37:09.073740   369 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1207 05:37:09.074002   369 net.cpp:150] Setting up L3_b1_brc3_bn\nI1207 05:37:09.074017   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.074021   369 net.cpp:165] Memory required for data: 1850983600\nI1207 05:37:09.074033   369 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1207 05:37:09.074043   369 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1207 05:37:09.074049   369 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1207 05:37:09.074057   369 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1207 05:37:09.074067   369 net.cpp:150] Setting up L3_b1_brc3_relu\nI1207 05:37:09.074074   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.074079   369 net.cpp:165] Memory required for data: 1852622000\nI1207 05:37:09.074084   369 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1207 05:37:09.074098   369 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1207 05:37:09.074105   369 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1207 05:37:09.074116   369 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1207 05:37:09.074731   369 net.cpp:150] Setting up L3_b1_brc3_conv\nI1207 05:37:09.074746   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.074753   369 net.cpp:165] Memory required for data: 1859175600\nI1207 05:37:09.074761   369 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1207 05:37:09.074774   369 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1207 05:37:09.074779   369 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1207 05:37:09.074792   369 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1207 05:37:09.075729   369 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1207 05:37:09.075744   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.075749   369 net.cpp:165] Memory required for data: 1865729200\nI1207 05:37:09.075758   369 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1207 05:37:09.075768   369 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1207 05:37:09.075774   369 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1207 05:37:09.075781   369 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1207 05:37:09.075789   369 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1207 05:37:09.075826   369 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1207 05:37:09.075839   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.075844   369 net.cpp:165] Memory required for data: 1872282800\nI1207 05:37:09.075850   369 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:37:09.075860   369 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:37:09.075866   369 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1207 05:37:09.075875   369 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1207 05:37:09.075891   369 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1207 05:37:09.075943   369 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:37:09.075955   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.075963   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.075968   369 net.cpp:165] Memory required for data: 1885390000\nI1207 05:37:09.075973   369 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1207 05:37:09.075984   369 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1207 05:37:09.075990   369 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1207 05:37:09.075999   369 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1207 05:37:09.076241   369 net.cpp:150] Setting up L3_b2_brc1_bn\nI1207 05:37:09.076256   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.076262   369 net.cpp:165] Memory required for data: 1891943600\nI1207 05:37:09.076273   369 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1207 05:37:09.076282   369 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1207 05:37:09.076288   369 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1207 05:37:09.076295   369 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1207 05:37:09.076306   369 net.cpp:150] Setting up L3_b2_brc1_relu\nI1207 05:37:09.076313   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.076318   369 net.cpp:165] Memory required for data: 1898497200\nI1207 05:37:09.076323   369 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1207 05:37:09.076333   369 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1207 05:37:09.076339   369 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1207 05:37:09.076350   369 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1207 05:37:09.076990   369 net.cpp:150] Setting up L3_b2_brc1_conv\nI1207 05:37:09.077009   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.077014   369 net.cpp:165] Memory required for data: 1900135600\nI1207 05:37:09.077023   369 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1207 05:37:09.077033   369 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1207 05:37:09.077039   369 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1207 05:37:09.077050   369 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1207 05:37:09.077306   369 net.cpp:150] Setting up L3_b2_brc2_bn\nI1207 05:37:09.077322   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.077328   369 net.cpp:165] Memory required for data: 1901774000\nI1207 05:37:09.077338   369 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1207 05:37:09.077347   369 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1207 05:37:09.077353   369 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1207 05:37:09.077360   369 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1207 05:37:09.077370   369 net.cpp:150] Setting up L3_b2_brc2_relu\nI1207 05:37:09.077378   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.077381   369 net.cpp:165] Memory required for data: 1903412400\nI1207 05:37:09.077386   369 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1207 05:37:09.077397   369 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1207 05:37:09.077404   369 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1207 05:37:09.077416   369 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1207 05:37:09.078426   369 net.cpp:150] Setting up L3_b2_brc2_conv\nI1207 05:37:09.078441   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.078447   369 net.cpp:165] Memory required for data: 1905050800\nI1207 05:37:09.078455   369 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1207 05:37:09.078472   369 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1207 05:37:09.078480   369 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1207 05:37:09.078488   369 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1207 05:37:09.078764   369 net.cpp:150] Setting up L3_b2_brc3_bn\nI1207 05:37:09.078785   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.078790   369 net.cpp:165] Memory required for data: 1906689200\nI1207 05:37:09.078801   369 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1207 05:37:09.078810   369 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1207 05:37:09.078816   369 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1207 05:37:09.078824   369 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1207 05:37:09.078833   369 net.cpp:150] Setting up L3_b2_brc3_relu\nI1207 05:37:09.078840   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.078845   369 net.cpp:165] Memory required for data: 1908327600\nI1207 05:37:09.078850   369 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1207 05:37:09.078863   369 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1207 05:37:09.078871   369 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1207 05:37:09.078881   369 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1207 05:37:09.079496   369 net.cpp:150] Setting up L3_b2_brc3_conv\nI1207 05:37:09.079511   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.079516   369 net.cpp:165] Memory required for data: 1914881200\nI1207 05:37:09.079525   369 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1207 05:37:09.079535   369 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1207 05:37:09.079541   369 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1207 05:37:09.079548   369 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1207 05:37:09.079561   369 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1207 05:37:09.079598   369 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1207 05:37:09.079610   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.079615   369 net.cpp:165] Memory required for data: 1921434800\nI1207 05:37:09.079622   369 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:37:09.079628   369 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:37:09.079634   369 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1207 05:37:09.079645   369 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1207 05:37:09.079655   369 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1207 05:37:09.079702   369 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:37:09.079716   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.079723   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.079728   369 net.cpp:165] Memory required for data: 1934542000\nI1207 05:37:09.079733   369 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1207 05:37:09.079741   369 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1207 05:37:09.079747   369 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1207 05:37:09.079759   369 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1207 05:37:09.079998   369 net.cpp:150] Setting up L3_b3_brc1_bn\nI1207 05:37:09.080009   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.080014   369 net.cpp:165] Memory required for data: 1941095600\nI1207 05:37:09.080025   369 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1207 05:37:09.080034   369 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1207 05:37:09.080040   369 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1207 05:37:09.080047   369 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1207 05:37:09.080057   369 net.cpp:150] Setting up L3_b3_brc1_relu\nI1207 05:37:09.080065   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.080068   369 net.cpp:165] Memory required for data: 1947649200\nI1207 05:37:09.080073   369 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1207 05:37:09.080087   369 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1207 05:37:09.080101   369 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1207 05:37:09.080113   369 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1207 05:37:09.080735   369 net.cpp:150] Setting up L3_b3_brc1_conv\nI1207 05:37:09.080750   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.080756   369 net.cpp:165] Memory required for data: 1949287600\nI1207 05:37:09.080765   369 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1207 05:37:09.080776   369 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1207 05:37:09.080783   369 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1207 05:37:09.080796   369 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1207 05:37:09.081048   369 net.cpp:150] Setting up L3_b3_brc2_bn\nI1207 05:37:09.081060   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.081065   369 net.cpp:165] Memory required for data: 1950926000\nI1207 05:37:09.081075   369 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1207 05:37:09.081084   369 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1207 05:37:09.081089   369 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1207 05:37:09.081096   369 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1207 05:37:09.081106   369 net.cpp:150] Setting up L3_b3_brc2_relu\nI1207 05:37:09.081113   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.081118   369 net.cpp:165] Memory required for data: 1952564400\nI1207 05:37:09.081123   369 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1207 05:37:09.081136   369 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1207 05:37:09.081143   369 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1207 05:37:09.081154   369 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1207 05:37:09.082170   369 net.cpp:150] Setting up L3_b3_brc2_conv\nI1207 05:37:09.082185   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.082190   369 net.cpp:165] Memory required for data: 1954202800\nI1207 05:37:09.082200   369 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1207 05:37:09.082208   369 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1207 05:37:09.082214   369 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1207 05:37:09.082226   369 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1207 05:37:09.082489   369 net.cpp:150] Setting up L3_b3_brc3_bn\nI1207 05:37:09.082504   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.082509   369 net.cpp:165] Memory required for data: 1955841200\nI1207 05:37:09.082518   369 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1207 05:37:09.082526   369 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1207 05:37:09.082533   369 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1207 05:37:09.082543   369 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1207 05:37:09.082554   369 net.cpp:150] Setting up L3_b3_brc3_relu\nI1207 05:37:09.082561   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.082566   369 net.cpp:165] Memory required for data: 1957479600\nI1207 05:37:09.082571   369 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1207 05:37:09.082583   369 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1207 05:37:09.082590   369 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1207 05:37:09.082598   369 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1207 05:37:09.083209   369 net.cpp:150] Setting up L3_b3_brc3_conv\nI1207 05:37:09.083225   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.083230   369 net.cpp:165] Memory required for data: 1964033200\nI1207 05:37:09.083238   369 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1207 05:37:09.083247   369 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1207 05:37:09.083254   369 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1207 05:37:09.083261   369 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1207 05:37:09.083272   369 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1207 05:37:09.083305   369 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1207 05:37:09.083315   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.083328   369 net.cpp:165] Memory required for data: 1970586800\nI1207 05:37:09.083333   369 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:37:09.083343   369 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:37:09.083350   369 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1207 05:37:09.083359   369 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1207 05:37:09.083370   369 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1207 05:37:09.083418   369 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:37:09.083431   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.083437   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.083442   369 net.cpp:165] Memory required for data: 1983694000\nI1207 05:37:09.083447   369 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1207 05:37:09.083458   369 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1207 05:37:09.083464   369 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1207 05:37:09.083479   369 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1207 05:37:09.083722   369 net.cpp:150] Setting up L3_b4_brc1_bn\nI1207 05:37:09.083735   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.083740   369 net.cpp:165] Memory required for data: 1990247600\nI1207 05:37:09.083751   369 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1207 05:37:09.083760   369 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1207 05:37:09.083765   369 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1207 05:37:09.083775   369 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1207 05:37:09.083786   369 net.cpp:150] Setting up L3_b4_brc1_relu\nI1207 05:37:09.083792   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.083797   369 net.cpp:165] Memory required for data: 1996801200\nI1207 05:37:09.083802   369 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1207 05:37:09.083817   369 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1207 05:37:09.083823   369 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1207 05:37:09.083832   369 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1207 05:37:09.084445   369 net.cpp:150] Setting up L3_b4_brc1_conv\nI1207 05:37:09.084460   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.084465   369 net.cpp:165] Memory required for data: 1998439600\nI1207 05:37:09.084481   369 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1207 05:37:09.084492   369 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1207 05:37:09.084499   369 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1207 05:37:09.084507   369 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1207 05:37:09.084769   369 net.cpp:150] Setting up L3_b4_brc2_bn\nI1207 05:37:09.084784   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.084789   369 net.cpp:165] Memory required for data: 2000078000\nI1207 05:37:09.084798   369 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1207 05:37:09.084815   369 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1207 05:37:09.084821   369 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1207 05:37:09.084828   369 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1207 05:37:09.084838   369 net.cpp:150] Setting up L3_b4_brc2_relu\nI1207 05:37:09.084846   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.084849   369 net.cpp:165] Memory required for data: 2001716400\nI1207 05:37:09.084854   369 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1207 05:37:09.084867   369 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1207 05:37:09.084873   369 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1207 05:37:09.084884   369 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1207 05:37:09.085901   369 net.cpp:150] Setting up L3_b4_brc2_conv\nI1207 05:37:09.085922   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.085928   369 net.cpp:165] Memory required for data: 2003354800\nI1207 05:37:09.085937   369 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1207 05:37:09.085947   369 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1207 05:37:09.085953   369 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1207 05:37:09.085964   369 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1207 05:37:09.086230   369 net.cpp:150] Setting up L3_b4_brc3_bn\nI1207 05:37:09.086246   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.086251   369 net.cpp:165] Memory required for data: 2004993200\nI1207 05:37:09.086261   369 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1207 05:37:09.086269   369 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1207 05:37:09.086277   369 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1207 05:37:09.086283   369 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1207 05:37:09.086292   369 net.cpp:150] Setting up L3_b4_brc3_relu\nI1207 05:37:09.086299   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.086304   369 net.cpp:165] Memory required for data: 2006631600\nI1207 05:37:09.086309   369 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1207 05:37:09.086319   369 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1207 05:37:09.086325   369 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1207 05:37:09.086336   369 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1207 05:37:09.086948   369 net.cpp:150] Setting up L3_b4_brc3_conv\nI1207 05:37:09.086964   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.086969   369 net.cpp:165] Memory required for data: 2013185200\nI1207 05:37:09.086978   369 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1207 05:37:09.086987   369 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1207 05:37:09.086994   369 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1207 05:37:09.087002   369 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1207 05:37:09.087009   369 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1207 05:37:09.087047   369 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1207 05:37:09.087060   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.087065   369 net.cpp:165] Memory required for data: 2019738800\nI1207 05:37:09.087070   369 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:37:09.087080   369 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:37:09.087087   369 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1207 05:37:09.087095   369 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1207 05:37:09.087105   369 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1207 05:37:09.087157   369 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:37:09.087169   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.087175   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.087180   369 net.cpp:165] Memory required for data: 2032846000\nI1207 05:37:09.087185   369 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1207 05:37:09.087196   369 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1207 05:37:09.087203   369 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1207 05:37:09.087211   369 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1207 05:37:09.087457   369 net.cpp:150] Setting up L3_b5_brc1_bn\nI1207 05:37:09.087479   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.087486   369 net.cpp:165] Memory required for data: 2039399600\nI1207 05:37:09.087496   369 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1207 05:37:09.087504   369 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1207 05:37:09.087510   369 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1207 05:37:09.087524   369 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1207 05:37:09.087535   369 net.cpp:150] Setting up L3_b5_brc1_relu\nI1207 05:37:09.087543   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.087548   369 net.cpp:165] Memory required for data: 2045953200\nI1207 05:37:09.087553   369 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1207 05:37:09.087563   369 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1207 05:37:09.087568   369 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1207 05:37:09.087580   369 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1207 05:37:09.088198   369 net.cpp:150] Setting up L3_b5_brc1_conv\nI1207 05:37:09.088213   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.088218   369 net.cpp:165] Memory required for data: 2047591600\nI1207 05:37:09.088227   369 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1207 05:37:09.088235   369 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1207 05:37:09.088243   369 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1207 05:37:09.088253   369 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1207 05:37:09.088524   369 net.cpp:150] Setting up L3_b5_brc2_bn\nI1207 05:37:09.088538   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.088543   369 net.cpp:165] Memory required for data: 2049230000\nI1207 05:37:09.088554   369 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1207 05:37:09.088562   369 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1207 05:37:09.088568   369 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1207 05:37:09.088575   369 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1207 05:37:09.088585   369 net.cpp:150] Setting up L3_b5_brc2_relu\nI1207 05:37:09.088593   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.088596   369 net.cpp:165] Memory required for data: 2050868400\nI1207 05:37:09.088601   369 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1207 05:37:09.088615   369 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1207 05:37:09.088621   369 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1207 05:37:09.088630   369 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1207 05:37:09.089651   369 net.cpp:150] Setting up L3_b5_brc2_conv\nI1207 05:37:09.089666   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.089671   369 net.cpp:165] Memory required for data: 2052506800\nI1207 05:37:09.089721   369 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1207 05:37:09.089738   369 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1207 05:37:09.089745   369 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1207 05:37:09.089754   369 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1207 05:37:09.090024   369 net.cpp:150] Setting up L3_b5_brc3_bn\nI1207 05:37:09.090037   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.090042   369 net.cpp:165] Memory required for data: 2054145200\nI1207 05:37:09.090052   369 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1207 05:37:09.090065   369 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1207 05:37:09.090070   369 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1207 05:37:09.090078   369 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1207 05:37:09.090087   369 net.cpp:150] Setting up L3_b5_brc3_relu\nI1207 05:37:09.090095   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.090100   369 net.cpp:165] Memory required for data: 2055783600\nI1207 05:37:09.090104   369 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1207 05:37:09.090116   369 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1207 05:37:09.090121   369 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1207 05:37:09.090132   369 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1207 05:37:09.090756   369 net.cpp:150] Setting up L3_b5_brc3_conv\nI1207 05:37:09.090771   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.090776   369 net.cpp:165] Memory required for data: 2062337200\nI1207 05:37:09.090785   369 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1207 05:37:09.090801   369 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1207 05:37:09.090808   369 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1207 05:37:09.090816   369 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1207 05:37:09.090823   369 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1207 05:37:09.090862   369 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1207 05:37:09.090873   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.090878   369 net.cpp:165] Memory required for data: 2068890800\nI1207 05:37:09.090884   369 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:37:09.090893   369 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:37:09.090901   369 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1207 05:37:09.090909   369 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1207 05:37:09.090919   369 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1207 05:37:09.090968   369 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:37:09.090981   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.090987   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.090992   369 net.cpp:165] Memory required for data: 2081998000\nI1207 05:37:09.090997   369 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1207 05:37:09.091006   369 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1207 05:37:09.091012   369 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1207 05:37:09.091024   369 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1207 05:37:09.091266   369 net.cpp:150] Setting up L3_b6_brc1_bn\nI1207 05:37:09.091279   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.091284   369 net.cpp:165] Memory required for data: 2088551600\nI1207 05:37:09.091295   369 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1207 05:37:09.091305   369 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1207 05:37:09.091312   369 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1207 05:37:09.091320   369 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1207 05:37:09.091331   369 net.cpp:150] Setting up L3_b6_brc1_relu\nI1207 05:37:09.091337   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.091342   369 net.cpp:165] Memory required for data: 2095105200\nI1207 05:37:09.091346   369 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1207 05:37:09.091357   369 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1207 05:37:09.091363   369 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1207 05:37:09.091379   369 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1207 05:37:09.092020   369 net.cpp:150] Setting up L3_b6_brc1_conv\nI1207 05:37:09.092036   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.092041   369 net.cpp:165] Memory required for data: 2096743600\nI1207 05:37:09.092049   369 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1207 05:37:09.092058   369 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1207 05:37:09.092064   369 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1207 05:37:09.092077   369 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1207 05:37:09.092342   369 net.cpp:150] Setting up L3_b6_brc2_bn\nI1207 05:37:09.092358   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.092365   369 net.cpp:165] Memory required for data: 2098382000\nI1207 05:37:09.092375   369 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1207 05:37:09.092383   369 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1207 05:37:09.092389   369 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1207 05:37:09.092396   369 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1207 05:37:09.092406   369 net.cpp:150] Setting up L3_b6_brc2_relu\nI1207 05:37:09.092413   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.092424   369 net.cpp:165] Memory required for data: 2100020400\nI1207 05:37:09.092430   369 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1207 05:37:09.092440   369 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1207 05:37:09.092447   369 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1207 05:37:09.092458   369 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1207 05:37:09.093500   369 net.cpp:150] Setting up L3_b6_brc2_conv\nI1207 05:37:09.093515   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.093521   369 net.cpp:165] Memory required for data: 2101658800\nI1207 05:37:09.093529   369 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1207 05:37:09.093541   369 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1207 05:37:09.093549   369 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1207 05:37:09.093556   369 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1207 05:37:09.093827   369 net.cpp:150] Setting up L3_b6_brc3_bn\nI1207 05:37:09.093840   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.093845   369 net.cpp:165] Memory required for data: 2103297200\nI1207 05:37:09.093857   369 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1207 05:37:09.093865   369 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1207 05:37:09.093871   369 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1207 05:37:09.093878   369 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1207 05:37:09.093888   369 net.cpp:150] Setting up L3_b6_brc3_relu\nI1207 05:37:09.093894   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.093899   369 net.cpp:165] Memory required for data: 2104935600\nI1207 05:37:09.093904   369 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1207 05:37:09.093919   369 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1207 05:37:09.093925   369 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1207 05:37:09.093936   369 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1207 05:37:09.094561   369 net.cpp:150] Setting up L3_b6_brc3_conv\nI1207 05:37:09.094576   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.094581   369 net.cpp:165] Memory required for data: 2111489200\nI1207 05:37:09.094590   369 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1207 05:37:09.094599   369 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1207 05:37:09.094605   369 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1207 05:37:09.094612   369 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1207 05:37:09.094625   369 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1207 05:37:09.094663   369 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1207 05:37:09.094676   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.094681   369 net.cpp:165] Memory required for data: 2118042800\nI1207 05:37:09.094686   369 layer_factory.hpp:77] Creating layer post_bn\nI1207 05:37:09.094694   369 net.cpp:100] Creating Layer post_bn\nI1207 05:37:09.094700   369 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1207 05:37:09.094712   369 net.cpp:408] post_bn -> post_bn_top\nI1207 05:37:09.094959   369 net.cpp:150] Setting up post_bn\nI1207 05:37:09.094971   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.094976   369 net.cpp:165] Memory required for data: 2124596400\nI1207 05:37:09.094987   369 layer_factory.hpp:77] Creating layer post_relu\nI1207 05:37:09.094998   369 net.cpp:100] Creating Layer post_relu\nI1207 05:37:09.095005   369 net.cpp:434] post_relu <- post_bn_top\nI1207 05:37:09.095012   369 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1207 05:37:09.095021   369 net.cpp:150] Setting up post_relu\nI1207 05:37:09.095028   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.095033   369 net.cpp:165] Memory required for data: 2131150000\nI1207 05:37:09.095038   369 layer_factory.hpp:77] Creating layer post_pool\nI1207 05:37:09.095048   369 net.cpp:100] Creating Layer post_pool\nI1207 05:37:09.095054   369 net.cpp:434] post_pool <- post_bn_top\nI1207 05:37:09.095069   369 net.cpp:408] post_pool -> post_pool\nI1207 05:37:09.095173   369 net.cpp:150] Setting up post_pool\nI1207 05:37:09.095190   369 net.cpp:157] Top shape: 100 256 1 1 (25600)\nI1207 05:37:09.095196   369 net.cpp:165] Memory required for data: 2131252400\nI1207 05:37:09.095201   369 layer_factory.hpp:77] Creating layer post_FC\nI1207 05:37:09.095271   369 net.cpp:100] Creating Layer post_FC\nI1207 05:37:09.095284   369 net.cpp:434] post_FC <- post_pool\nI1207 05:37:09.095294   369 net.cpp:408] post_FC -> post_FC_top\nI1207 05:37:09.095568   369 net.cpp:150] Setting up post_FC\nI1207 05:37:09.095584   369 net.cpp:157] Top shape: 100 10 (1000)\nI1207 05:37:09.095589   369 net.cpp:165] Memory required for data: 2131256400\nI1207 05:37:09.095599   369 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1207 05:37:09.095607   369 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1207 05:37:09.095613   369 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1207 05:37:09.095625   369 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1207 05:37:09.095636   369 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1207 05:37:09.095687   369 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1207 05:37:09.095700   369 net.cpp:157] Top shape: 100 10 (1000)\nI1207 05:37:09.095705   369 net.cpp:157] Top shape: 100 10 (1000)\nI1207 05:37:09.095710   369 net.cpp:165] Memory required for data: 2131264400\nI1207 05:37:09.095715   369 layer_factory.hpp:77] Creating layer accuracy\nI1207 05:37:09.095762   369 net.cpp:100] Creating Layer accuracy\nI1207 05:37:09.095774   369 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1207 05:37:09.095782   369 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1207 05:37:09.095790   369 net.cpp:408] accuracy -> accuracy\nI1207 05:37:09.095834   369 net.cpp:150] Setting up accuracy\nI1207 05:37:09.095846   369 net.cpp:157] Top shape: (1)\nI1207 05:37:09.095851   369 net.cpp:165] Memory required for data: 2131264404\nI1207 05:37:09.095857   369 layer_factory.hpp:77] Creating layer loss\nI1207 05:37:09.095865   369 net.cpp:100] Creating Layer loss\nI1207 05:37:09.095871   369 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1207 05:37:09.095878   369 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1207 05:37:09.095885   369 net.cpp:408] loss -> loss\nI1207 05:37:09.095932   369 layer_factory.hpp:77] Creating layer loss\nI1207 05:37:09.096088   369 net.cpp:150] Setting up loss\nI1207 05:37:09.096103   369 net.cpp:157] Top shape: (1)\nI1207 05:37:09.096108   369 net.cpp:160]     with loss weight 1\nI1207 05:37:09.096187   369 net.cpp:165] Memory required for data: 2131264408\nI1207 05:37:09.096196   369 net.cpp:226] loss needs backward computation.\nI1207 05:37:09.096202   369 net.cpp:228] accuracy does not need backward computation.\nI1207 05:37:09.096209   369 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1207 05:37:09.096215   369 net.cpp:226] post_FC needs backward computation.\nI1207 05:37:09.096220   369 net.cpp:226] post_pool needs backward computation.\nI1207 05:37:09.096225   369 net.cpp:226] post_relu needs backward computation.\nI1207 05:37:09.096230   369 net.cpp:226] post_bn needs backward computation.\nI1207 05:37:09.096235   369 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1207 05:37:09.096240   369 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1207 05:37:09.096245   369 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1207 05:37:09.096249   369 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1207 05:37:09.096254   369 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1207 05:37:09.096259   369 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1207 05:37:09.096264   369 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1207 05:37:09.096269   369 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1207 05:37:09.096274   369 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1207 05:37:09.096279   369 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1207 05:37:09.096300   369 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096307   369 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1207 05:37:09.096312   369 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1207 05:37:09.096318   369 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1207 05:37:09.096323   369 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1207 05:37:09.096328   369 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1207 05:37:09.096333   369 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1207 05:37:09.096338   369 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1207 05:37:09.096343   369 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1207 05:37:09.096348   369 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1207 05:37:09.096352   369 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1207 05:37:09.096357   369 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096362   369 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1207 05:37:09.096369   369 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1207 05:37:09.096374   369 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1207 05:37:09.096379   369 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1207 05:37:09.096384   369 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1207 05:37:09.096388   369 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1207 05:37:09.096393   369 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1207 05:37:09.096400   369 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1207 05:37:09.096405   369 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1207 05:37:09.096410   369 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1207 05:37:09.096415   369 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096420   369 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1207 05:37:09.096426   369 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1207 05:37:09.096431   369 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1207 05:37:09.096436   369 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1207 05:37:09.096441   369 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1207 05:37:09.096446   369 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1207 05:37:09.096451   369 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1207 05:37:09.096460   369 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1207 05:37:09.096472   369 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1207 05:37:09.096478   369 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1207 05:37:09.096484   369 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096489   369 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1207 05:37:09.096495   369 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1207 05:37:09.096501   369 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1207 05:37:09.096506   369 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1207 05:37:09.096511   369 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1207 05:37:09.096516   369 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1207 05:37:09.096523   369 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1207 05:37:09.096527   369 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1207 05:37:09.096532   369 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1207 05:37:09.096537   369 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1207 05:37:09.096544   369 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096549   369 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1207 05:37:09.096560   369 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1207 05:37:09.096566   369 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1207 05:37:09.096572   369 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1207 05:37:09.096577   369 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1207 05:37:09.096582   369 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1207 05:37:09.096588   369 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1207 05:37:09.096593   369 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1207 05:37:09.096598   369 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1207 05:37:09.096604   369 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1207 05:37:09.096609   369 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1207 05:37:09.096614   369 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096621   369 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1207 05:37:09.096626   369 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1207 05:37:09.096632   369 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1207 05:37:09.096637   369 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1207 05:37:09.096642   369 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1207 05:37:09.096647   369 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1207 05:37:09.096652   369 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1207 05:37:09.096657   369 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1207 05:37:09.096662   369 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1207 05:37:09.096668   369 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1207 05:37:09.096673   369 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096678   369 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1207 05:37:09.096684   369 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1207 05:37:09.096690   369 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1207 05:37:09.096695   369 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1207 05:37:09.096700   369 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1207 05:37:09.096705   369 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1207 05:37:09.096715   369 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1207 05:37:09.096721   369 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1207 05:37:09.096727   369 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1207 05:37:09.096732   369 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1207 05:37:09.096738   369 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096743   369 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1207 05:37:09.096750   369 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1207 05:37:09.096755   369 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1207 05:37:09.096760   369 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1207 05:37:09.096765   369 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1207 05:37:09.096771   369 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1207 05:37:09.096776   369 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1207 05:37:09.096781   369 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1207 05:37:09.096786   369 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1207 05:37:09.096792   369 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1207 05:37:09.096797   369 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096803   369 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1207 05:37:09.096808   369 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1207 05:37:09.096814   369 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1207 05:37:09.096824   369 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1207 05:37:09.096830   369 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1207 05:37:09.096837   369 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1207 05:37:09.096842   369 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1207 05:37:09.096846   369 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1207 05:37:09.096853   369 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1207 05:37:09.096858   369 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1207 05:37:09.096863   369 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096869   369 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1207 05:37:09.096875   369 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1207 05:37:09.096880   369 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1207 05:37:09.096885   369 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1207 05:37:09.096891   369 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1207 05:37:09.096896   369 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1207 05:37:09.096901   369 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1207 05:37:09.096907   369 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1207 05:37:09.096912   369 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1207 05:37:09.096918   369 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1207 05:37:09.096923   369 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096930   369 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1207 05:37:09.096935   369 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1207 05:37:09.096940   369 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1207 05:37:09.096946   369 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1207 05:37:09.096951   369 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1207 05:37:09.096956   369 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1207 05:37:09.096962   369 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1207 05:37:09.096967   369 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1207 05:37:09.096972   369 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1207 05:37:09.096978   369 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1207 05:37:09.096983   369 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1207 05:37:09.096988   369 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.096994   369 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1207 05:37:09.097000   369 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1207 05:37:09.097005   369 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1207 05:37:09.097010   369 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1207 05:37:09.097017   369 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1207 05:37:09.097021   369 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1207 05:37:09.097026   369 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1207 05:37:09.097033   369 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1207 05:37:09.097038   369 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1207 05:37:09.097043   369 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1207 05:37:09.097051   369 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.097057   369 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1207 05:37:09.097064   369 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1207 05:37:09.097069   369 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1207 05:37:09.097074   369 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1207 05:37:09.097079   369 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1207 05:37:09.097090   369 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1207 05:37:09.097096   369 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1207 05:37:09.097101   369 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1207 05:37:09.097107   369 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1207 05:37:09.097112   369 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1207 05:37:09.097118   369 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.097123   369 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1207 05:37:09.097129   369 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1207 05:37:09.097136   369 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1207 05:37:09.097141   369 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1207 05:37:09.097146   369 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1207 05:37:09.097151   369 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1207 05:37:09.097157   369 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1207 05:37:09.097162   369 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1207 05:37:09.097167   369 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1207 05:37:09.097173   369 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1207 05:37:09.097178   369 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.097184   369 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1207 05:37:09.097190   369 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1207 05:37:09.097196   369 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1207 05:37:09.097201   369 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1207 05:37:09.097206   369 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1207 05:37:09.097213   369 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1207 05:37:09.097218   369 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1207 05:37:09.097223   369 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1207 05:37:09.097229   369 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1207 05:37:09.097234   369 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1207 05:37:09.097240   369 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.097246   369 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1207 05:37:09.097252   369 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1207 05:37:09.097257   369 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1207 05:37:09.097263   369 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1207 05:37:09.097268   369 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1207 05:37:09.097275   369 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1207 05:37:09.097280   369 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1207 05:37:09.097285   369 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1207 05:37:09.097290   369 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1207 05:37:09.097295   369 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1207 05:37:09.097301   369 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.097306   369 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1207 05:37:09.097313   369 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1207 05:37:09.097318   369 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1207 05:37:09.097324   369 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1207 05:37:09.097329   369 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1207 05:37:09.097335   369 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1207 05:37:09.097340   369 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1207 05:37:09.097347   369 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1207 05:37:09.097357   369 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1207 05:37:09.097362   369 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1207 05:37:09.097368   369 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1207 05:37:09.097373   369 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1207 05:37:09.097379   369 net.cpp:226] pre_conv needs backward computation.\nI1207 05:37:09.097386   369 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1207 05:37:09.097393   369 net.cpp:228] dataLayer does not need backward computation.\nI1207 05:37:09.097396   369 net.cpp:270] This network produces output accuracy\nI1207 05:37:09.097404   369 net.cpp:270] This network produces output loss\nI1207 05:37:09.097697   369 net.cpp:283] Network initialization done.\nI1207 05:37:09.103493   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:09.103524   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:09.103574   369 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1207 05:37:09.103821   369 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1207 05:37:09.105290   369 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_brc1_conv_top\"\n  top: \nI1207 05:37:09.106506   369 layer_factory.hpp:77] Creating layer dataLayer\nI1207 05:37:09.106731   369 net.cpp:100] Creating Layer dataLayer\nI1207 05:37:09.106755   369 net.cpp:408] dataLayer -> data_top\nI1207 05:37:09.106770   369 net.cpp:408] dataLayer -> label\nI1207 05:37:09.106782   369 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1207 05:37:09.129729   379 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1207 05:37:09.130018   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:09.137473   369 net.cpp:150] Setting up dataLayer\nI1207 05:37:09.137497   369 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI1207 05:37:09.137506   369 net.cpp:157] Top shape: 100 (100)\nI1207 05:37:09.137511   369 net.cpp:165] Memory required for data: 1229200\nI1207 05:37:09.137518   369 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1207 05:37:09.137528   369 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1207 05:37:09.137534   369 net.cpp:434] label_dataLayer_1_split <- label\nI1207 05:37:09.137543   369 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1207 05:37:09.137555   369 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1207 05:37:09.137691   369 net.cpp:150] Setting up label_dataLayer_1_split\nI1207 05:37:09.137706   369 net.cpp:157] Top shape: 100 (100)\nI1207 05:37:09.137722   369 net.cpp:157] Top shape: 100 (100)\nI1207 05:37:09.137728   369 net.cpp:165] Memory required for data: 1230000\nI1207 05:37:09.137737   369 layer_factory.hpp:77] Creating layer pre_conv\nI1207 05:37:09.137754   369 net.cpp:100] Creating Layer pre_conv\nI1207 05:37:09.137760   369 net.cpp:434] pre_conv <- data_top\nI1207 05:37:09.137775   369 net.cpp:408] pre_conv -> pre_conv_top\nI1207 05:37:09.138195   369 net.cpp:150] Setting up pre_conv\nI1207 05:37:09.138214   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.138219   369 net.cpp:165] Memory required for data: 7783600\nI1207 05:37:09.138234   369 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1207 05:37:09.138245   369 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1207 05:37:09.138252   369 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1207 05:37:09.138273   369 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1207 05:37:09.138285   369 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1207 05:37:09.138346   369 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1207 05:37:09.138361   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.138368   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.138373   369 net.cpp:165] Memory required for data: 20890800\nI1207 05:37:09.138378   369 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1207 05:37:09.138391   369 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1207 05:37:09.138397   369 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1207 05:37:09.138408   369 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1207 05:37:09.138774   369 net.cpp:150] Setting up L1_b1_brc1_bn\nI1207 05:37:09.138792   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.138797   369 net.cpp:165] Memory required for data: 27444400\nI1207 05:37:09.138813   369 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1207 05:37:09.138825   369 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1207 05:37:09.138834   369 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1207 05:37:09.138844   369 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1207 05:37:09.138852   369 net.cpp:150] Setting up L1_b1_brc1_relu\nI1207 05:37:09.138860   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.138864   369 net.cpp:165] Memory required for data: 33998000\nI1207 05:37:09.138870   369 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1207 05:37:09.138880   369 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1207 05:37:09.138885   369 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1207 05:37:09.138897   369 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1207 05:37:09.139292   369 net.cpp:150] Setting up L1_b1_brc1_conv\nI1207 05:37:09.139310   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.139315   369 net.cpp:165] Memory required for data: 40551600\nI1207 05:37:09.139325   369 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1207 05:37:09.139334   369 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1207 05:37:09.139343   369 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1207 05:37:09.139358   369 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1207 05:37:09.140766   369 net.cpp:150] Setting up L1_b1_brc2_bn\nI1207 05:37:09.140784   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.140790   369 net.cpp:165] Memory required for data: 47105200\nI1207 05:37:09.140806   369 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1207 05:37:09.140817   369 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1207 05:37:09.140826   369 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1207 05:37:09.140838   369 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1207 05:37:09.140849   369 net.cpp:150] Setting up L1_b1_brc2_relu\nI1207 05:37:09.140861   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.140866   369 net.cpp:165] Memory required for data: 53658800\nI1207 05:37:09.140879   369 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1207 05:37:09.140894   369 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1207 05:37:09.140903   369 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1207 05:37:09.140913   369 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1207 05:37:09.141391   369 net.cpp:150] Setting up L1_b1_brc2_conv\nI1207 05:37:09.141408   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.141414   369 net.cpp:165] Memory required for data: 60212400\nI1207 05:37:09.141423   369 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1207 05:37:09.141436   369 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1207 05:37:09.141443   369 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1207 05:37:09.141453   369 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1207 05:37:09.141768   369 net.cpp:150] Setting up L1_b1_brc3_bn\nI1207 05:37:09.141782   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.141788   369 net.cpp:165] Memory required for data: 66766000\nI1207 05:37:09.141801   369 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1207 05:37:09.141810   369 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1207 05:37:09.141816   369 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1207 05:37:09.141824   369 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1207 05:37:09.141834   369 net.cpp:150] Setting up L1_b1_brc3_relu\nI1207 05:37:09.141840   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.141844   369 net.cpp:165] Memory required for data: 73319600\nI1207 05:37:09.141849   369 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1207 05:37:09.141867   369 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1207 05:37:09.141873   369 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1207 05:37:09.141885   369 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1207 05:37:09.142241   369 net.cpp:150] Setting up L1_b1_brc3_conv\nI1207 05:37:09.142258   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.142263   369 net.cpp:165] Memory required for data: 99534000\nI1207 05:37:09.142280   369 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1207 05:37:09.142302   369 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1207 05:37:09.142310   369 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1207 05:37:09.142320   369 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1207 05:37:09.142691   369 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1207 05:37:09.142709   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.142714   369 net.cpp:165] Memory required for data: 125748400\nI1207 05:37:09.142724   369 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1207 05:37:09.142743   369 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1207 05:37:09.142750   369 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1207 05:37:09.142757   369 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1207 05:37:09.142767   369 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1207 05:37:09.142809   369 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1207 05:37:09.142819   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.142824   369 net.cpp:165] Memory required for data: 151962800\nI1207 05:37:09.142830   369 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:37:09.142841   369 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:37:09.142850   369 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1207 05:37:09.142858   369 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1207 05:37:09.142868   369 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1207 05:37:09.142926   369 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:37:09.142937   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.142958   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.142964   369 net.cpp:165] Memory required for data: 204391600\nI1207 05:37:09.142969   369 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1207 05:37:09.142980   369 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1207 05:37:09.142988   369 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1207 05:37:09.143000   369 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1207 05:37:09.143328   369 net.cpp:150] Setting up L1_b2_brc1_bn\nI1207 05:37:09.143345   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.143352   369 net.cpp:165] Memory required for data: 230606000\nI1207 05:37:09.143362   369 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1207 05:37:09.143376   369 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1207 05:37:09.143386   369 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1207 05:37:09.143394   369 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1207 05:37:09.143404   369 net.cpp:150] Setting up L1_b2_brc1_relu\nI1207 05:37:09.143411   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.143416   369 net.cpp:165] Memory required for data: 256820400\nI1207 05:37:09.143420   369 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1207 05:37:09.143431   369 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1207 05:37:09.143436   369 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1207 05:37:09.143445   369 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1207 05:37:09.143893   369 net.cpp:150] Setting up L1_b2_brc1_conv\nI1207 05:37:09.143909   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.143914   369 net.cpp:165] Memory required for data: 263374000\nI1207 05:37:09.143923   369 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1207 05:37:09.143946   369 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1207 05:37:09.143954   369 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1207 05:37:09.143962   369 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1207 05:37:09.144258   369 net.cpp:150] Setting up L1_b2_brc2_bn\nI1207 05:37:09.144273   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.144279   369 net.cpp:165] Memory required for data: 269927600\nI1207 05:37:09.144289   369 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1207 05:37:09.144297   369 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1207 05:37:09.144304   369 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1207 05:37:09.144317   369 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1207 05:37:09.144327   369 net.cpp:150] Setting up L1_b2_brc2_relu\nI1207 05:37:09.144335   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.144341   369 net.cpp:165] Memory required for data: 276481200\nI1207 05:37:09.144347   369 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1207 05:37:09.144361   369 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1207 05:37:09.144367   369 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1207 05:37:09.144376   369 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1207 05:37:09.144789   369 net.cpp:150] Setting up L1_b2_brc2_conv\nI1207 05:37:09.144806   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.144812   369 net.cpp:165] Memory required for data: 283034800\nI1207 05:37:09.144822   369 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1207 05:37:09.144834   369 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1207 05:37:09.144842   369 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1207 05:37:09.144852   369 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1207 05:37:09.145145   369 net.cpp:150] Setting up L1_b2_brc3_bn\nI1207 05:37:09.145159   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.145164   369 net.cpp:165] Memory required for data: 289588400\nI1207 05:37:09.145175   369 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1207 05:37:09.145182   369 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1207 05:37:09.145191   369 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1207 05:37:09.145215   369 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1207 05:37:09.145227   369 net.cpp:150] Setting up L1_b2_brc3_relu\nI1207 05:37:09.145236   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.145241   369 net.cpp:165] Memory required for data: 296142000\nI1207 05:37:09.145246   369 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1207 05:37:09.145261   369 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1207 05:37:09.145270   369 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1207 05:37:09.145280   369 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1207 05:37:09.145663   369 net.cpp:150] Setting up L1_b2_brc3_conv\nI1207 05:37:09.145679   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.145685   369 net.cpp:165] Memory required for data: 322356400\nI1207 05:37:09.145707   369 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1207 05:37:09.145717   369 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1207 05:37:09.145723   369 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1207 05:37:09.145730   369 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1207 05:37:09.145738   369 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1207 05:37:09.145783   369 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1207 05:37:09.145793   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.145799   369 net.cpp:165] Memory required for data: 348570800\nI1207 05:37:09.145805   369 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:37:09.145812   369 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:37:09.145818   369 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1207 05:37:09.145828   369 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1207 05:37:09.145838   369 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1207 05:37:09.145995   369 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:37:09.146008   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.146015   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.146020   369 net.cpp:165] Memory required for data: 400999600\nI1207 05:37:09.146025   369 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1207 05:37:09.146034   369 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1207 05:37:09.146040   369 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1207 05:37:09.146050   369 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1207 05:37:09.146301   369 net.cpp:150] Setting up L1_b3_brc1_bn\nI1207 05:37:09.146317   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.146322   369 net.cpp:165] Memory required for data: 427214000\nI1207 05:37:09.146333   369 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1207 05:37:09.146340   369 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1207 05:37:09.146347   369 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1207 05:37:09.146355   369 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1207 05:37:09.146364   369 net.cpp:150] Setting up L1_b3_brc1_relu\nI1207 05:37:09.146371   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.146376   369 net.cpp:165] Memory required for data: 453428400\nI1207 05:37:09.146381   369 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1207 05:37:09.146391   369 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1207 05:37:09.146397   369 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1207 05:37:09.146409   369 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1207 05:37:09.146745   369 net.cpp:150] Setting up L1_b3_brc1_conv\nI1207 05:37:09.146761   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.146766   369 net.cpp:165] Memory required for data: 459982000\nI1207 05:37:09.146775   369 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1207 05:37:09.146790   369 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1207 05:37:09.146797   369 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1207 05:37:09.146808   369 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1207 05:37:09.147073   369 net.cpp:150] Setting up L1_b3_brc2_bn\nI1207 05:37:09.147087   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.147094   369 net.cpp:165] Memory required for data: 466535600\nI1207 05:37:09.147104   369 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1207 05:37:09.147114   369 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1207 05:37:09.147120   369 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1207 05:37:09.147127   369 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1207 05:37:09.147137   369 net.cpp:150] Setting up L1_b3_brc2_relu\nI1207 05:37:09.147145   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.147148   369 net.cpp:165] Memory required for data: 473089200\nI1207 05:37:09.147153   369 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1207 05:37:09.147166   369 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1207 05:37:09.147172   369 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1207 05:37:09.147186   369 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1207 05:37:09.147536   369 net.cpp:150] Setting up L1_b3_brc2_conv\nI1207 05:37:09.147550   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.147555   369 net.cpp:165] Memory required for data: 479642800\nI1207 05:37:09.147564   369 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1207 05:37:09.147573   369 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1207 05:37:09.147579   369 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1207 05:37:09.147590   369 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1207 05:37:09.147851   369 net.cpp:150] Setting up L1_b3_brc3_bn\nI1207 05:37:09.147864   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.147869   369 net.cpp:165] Memory required for data: 486196400\nI1207 05:37:09.147881   369 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1207 05:37:09.147892   369 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1207 05:37:09.147899   369 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1207 05:37:09.147907   369 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1207 05:37:09.147917   369 net.cpp:150] Setting up L1_b3_brc3_relu\nI1207 05:37:09.147923   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.147927   369 net.cpp:165] Memory required for data: 492750000\nI1207 05:37:09.147933   369 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1207 05:37:09.147943   369 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1207 05:37:09.147948   369 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1207 05:37:09.147959   369 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1207 05:37:09.148296   369 net.cpp:150] Setting up L1_b3_brc3_conv\nI1207 05:37:09.148310   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.148315   369 net.cpp:165] Memory required for data: 518964400\nI1207 05:37:09.148324   369 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1207 05:37:09.148334   369 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1207 05:37:09.148339   369 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1207 05:37:09.148346   369 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1207 05:37:09.148355   369 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1207 05:37:09.148396   369 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1207 05:37:09.148411   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.148416   369 net.cpp:165] Memory required for data: 545178800\nI1207 05:37:09.148421   369 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:37:09.148427   369 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:37:09.148433   369 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1207 05:37:09.148452   369 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1207 05:37:09.148463   369 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1207 05:37:09.148524   369 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:37:09.148536   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.148543   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.148547   369 net.cpp:165] Memory required for data: 597607600\nI1207 05:37:09.148553   369 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1207 05:37:09.148564   369 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1207 05:37:09.148571   369 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1207 05:37:09.148578   369 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1207 05:37:09.148835   369 net.cpp:150] Setting up L1_b4_brc1_bn\nI1207 05:37:09.148849   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.148854   369 net.cpp:165] Memory required for data: 623822000\nI1207 05:37:09.148864   369 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1207 05:37:09.148872   369 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1207 05:37:09.148877   369 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1207 05:37:09.148885   369 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1207 05:37:09.148895   369 net.cpp:150] Setting up L1_b4_brc1_relu\nI1207 05:37:09.148901   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.148905   369 net.cpp:165] Memory required for data: 650036400\nI1207 05:37:09.148910   369 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1207 05:37:09.148924   369 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1207 05:37:09.148931   369 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1207 05:37:09.148942   369 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1207 05:37:09.149255   369 net.cpp:150] Setting up L1_b4_brc1_conv\nI1207 05:37:09.149268   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.149273   369 net.cpp:165] Memory required for data: 656590000\nI1207 05:37:09.149281   369 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1207 05:37:09.149293   369 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1207 05:37:09.149299   369 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1207 05:37:09.149307   369 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1207 05:37:09.149576   369 net.cpp:150] Setting up L1_b4_brc2_bn\nI1207 05:37:09.149593   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.149600   369 net.cpp:165] Memory required for data: 663143600\nI1207 05:37:09.149610   369 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1207 05:37:09.149617   369 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1207 05:37:09.149623   369 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1207 05:37:09.149631   369 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1207 05:37:09.149639   369 net.cpp:150] Setting up L1_b4_brc2_relu\nI1207 05:37:09.149646   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.149652   369 net.cpp:165] Memory required for data: 669697200\nI1207 05:37:09.149657   369 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1207 05:37:09.149669   369 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1207 05:37:09.149675   369 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1207 05:37:09.149683   369 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1207 05:37:09.150050   369 net.cpp:150] Setting up L1_b4_brc2_conv\nI1207 05:37:09.150066   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.150071   369 net.cpp:165] Memory required for data: 676250800\nI1207 05:37:09.150080   369 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1207 05:37:09.150089   369 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1207 05:37:09.150094   369 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1207 05:37:09.150113   369 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1207 05:37:09.150383   369 net.cpp:150] Setting up L1_b4_brc3_bn\nI1207 05:37:09.150400   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.150405   369 net.cpp:165] Memory required for data: 682804400\nI1207 05:37:09.150416   369 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1207 05:37:09.150424   369 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1207 05:37:09.150430   369 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1207 05:37:09.150437   369 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1207 05:37:09.150447   369 net.cpp:150] Setting up L1_b4_brc3_relu\nI1207 05:37:09.150454   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.150458   369 net.cpp:165] Memory required for data: 689358000\nI1207 05:37:09.150463   369 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1207 05:37:09.150481   369 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1207 05:37:09.150487   369 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1207 05:37:09.150498   369 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1207 05:37:09.150820   369 net.cpp:150] Setting up L1_b4_brc3_conv\nI1207 05:37:09.150835   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.150840   369 net.cpp:165] Memory required for data: 715572400\nI1207 05:37:09.150848   369 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1207 05:37:09.150857   369 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1207 05:37:09.150863   369 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1207 05:37:09.150871   369 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1207 05:37:09.150882   369 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1207 05:37:09.150913   369 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1207 05:37:09.150925   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.150930   369 net.cpp:165] Memory required for data: 741786800\nI1207 05:37:09.150935   369 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:37:09.150946   369 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:37:09.150952   369 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1207 05:37:09.150960   369 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1207 05:37:09.150969   369 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1207 05:37:09.151020   369 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:37:09.151031   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.151037   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.151042   369 net.cpp:165] Memory required for data: 794215600\nI1207 05:37:09.151047   369 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1207 05:37:09.151058   369 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1207 05:37:09.151064   369 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1207 05:37:09.151072   369 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1207 05:37:09.151322   369 net.cpp:150] Setting up L1_b5_brc1_bn\nI1207 05:37:09.151336   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.151341   369 net.cpp:165] Memory required for data: 820430000\nI1207 05:37:09.151387   369 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1207 05:37:09.151397   369 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1207 05:37:09.151412   369 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1207 05:37:09.151419   369 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1207 05:37:09.151429   369 net.cpp:150] Setting up L1_b5_brc1_relu\nI1207 05:37:09.151437   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.151441   369 net.cpp:165] Memory required for data: 846644400\nI1207 05:37:09.151446   369 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1207 05:37:09.151464   369 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1207 05:37:09.151486   369 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1207 05:37:09.151499   369 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1207 05:37:09.151837   369 net.cpp:150] Setting up L1_b5_brc1_conv\nI1207 05:37:09.151854   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.151859   369 net.cpp:165] Memory required for data: 853198000\nI1207 05:37:09.151867   369 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1207 05:37:09.151877   369 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1207 05:37:09.151883   369 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1207 05:37:09.151893   369 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1207 05:37:09.152153   369 net.cpp:150] Setting up L1_b5_brc2_bn\nI1207 05:37:09.152166   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.152171   369 net.cpp:165] Memory required for data: 859751600\nI1207 05:37:09.152181   369 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1207 05:37:09.152194   369 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1207 05:37:09.152199   369 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1207 05:37:09.152206   369 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1207 05:37:09.152216   369 net.cpp:150] Setting up L1_b5_brc2_relu\nI1207 05:37:09.152223   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.152228   369 net.cpp:165] Memory required for data: 866305200\nI1207 05:37:09.152232   369 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1207 05:37:09.152243   369 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1207 05:37:09.152248   369 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1207 05:37:09.152261   369 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1207 05:37:09.152613   369 net.cpp:150] Setting up L1_b5_brc2_conv\nI1207 05:37:09.152628   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.152633   369 net.cpp:165] Memory required for data: 872858800\nI1207 05:37:09.152642   369 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1207 05:37:09.152652   369 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1207 05:37:09.152657   369 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1207 05:37:09.152668   369 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1207 05:37:09.152936   369 net.cpp:150] Setting up L1_b5_brc3_bn\nI1207 05:37:09.152951   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.152956   369 net.cpp:165] Memory required for data: 879412400\nI1207 05:37:09.152966   369 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1207 05:37:09.152977   369 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1207 05:37:09.152983   369 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1207 05:37:09.152992   369 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1207 05:37:09.153000   369 net.cpp:150] Setting up L1_b5_brc3_relu\nI1207 05:37:09.153007   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.153012   369 net.cpp:165] Memory required for data: 885966000\nI1207 05:37:09.153017   369 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1207 05:37:09.153028   369 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1207 05:37:09.153033   369 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1207 05:37:09.153044   369 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1207 05:37:09.153363   369 net.cpp:150] Setting up L1_b5_brc3_conv\nI1207 05:37:09.153378   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.153383   369 net.cpp:165] Memory required for data: 912180400\nI1207 05:37:09.153393   369 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1207 05:37:09.153401   369 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1207 05:37:09.153408   369 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1207 05:37:09.153414   369 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1207 05:37:09.153421   369 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1207 05:37:09.153475   369 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1207 05:37:09.153486   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.153491   369 net.cpp:165] Memory required for data: 938394800\nI1207 05:37:09.153497   369 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:37:09.153506   369 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:37:09.153512   369 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1207 05:37:09.153522   369 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1207 05:37:09.153532   369 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1207 05:37:09.153584   369 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:37:09.153596   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.153604   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.153609   369 net.cpp:165] Memory required for data: 990823600\nI1207 05:37:09.153614   369 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1207 05:37:09.153621   369 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1207 05:37:09.153626   369 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1207 05:37:09.153637   369 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1207 05:37:09.153892   369 net.cpp:150] Setting up L1_b6_brc1_bn\nI1207 05:37:09.153908   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.153913   369 net.cpp:165] Memory required for data: 1017038000\nI1207 05:37:09.153923   369 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1207 05:37:09.153931   369 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1207 05:37:09.153937   369 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1207 05:37:09.153944   369 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1207 05:37:09.153954   369 net.cpp:150] Setting up L1_b6_brc1_relu\nI1207 05:37:09.153960   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.153965   369 net.cpp:165] Memory required for data: 1043252400\nI1207 05:37:09.153970   369 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1207 05:37:09.153980   369 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1207 05:37:09.153986   369 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1207 05:37:09.153997   369 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1207 05:37:09.154317   369 net.cpp:150] Setting up L1_b6_brc1_conv\nI1207 05:37:09.154331   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.154336   369 net.cpp:165] Memory required for data: 1049806000\nI1207 05:37:09.154345   369 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1207 05:37:09.154353   369 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1207 05:37:09.154361   369 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1207 05:37:09.154371   369 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1207 05:37:09.154644   369 net.cpp:150] Setting up L1_b6_brc2_bn\nI1207 05:37:09.154657   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.154662   369 net.cpp:165] Memory required for data: 1056359600\nI1207 05:37:09.154672   369 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1207 05:37:09.154688   369 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1207 05:37:09.154695   369 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1207 05:37:09.154702   369 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1207 05:37:09.154712   369 net.cpp:150] Setting up L1_b6_brc2_relu\nI1207 05:37:09.154719   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.154724   369 net.cpp:165] Memory required for data: 1062913200\nI1207 05:37:09.154729   369 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1207 05:37:09.154742   369 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1207 05:37:09.154748   369 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1207 05:37:09.154767   369 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1207 05:37:09.155115   369 net.cpp:150] Setting up L1_b6_brc2_conv\nI1207 05:37:09.155129   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.155134   369 net.cpp:165] Memory required for data: 1069466800\nI1207 05:37:09.155143   369 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1207 05:37:09.155154   369 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1207 05:37:09.155161   369 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1207 05:37:09.155169   369 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1207 05:37:09.155431   369 net.cpp:150] Setting up L1_b6_brc3_bn\nI1207 05:37:09.155444   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.155449   369 net.cpp:165] Memory required for data: 1076020400\nI1207 05:37:09.155459   369 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1207 05:37:09.155474   369 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1207 05:37:09.155481   369 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1207 05:37:09.155488   369 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1207 05:37:09.155498   369 net.cpp:150] Setting up L1_b6_brc3_relu\nI1207 05:37:09.155505   369 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1207 05:37:09.155510   369 net.cpp:165] Memory required for data: 1082574000\nI1207 05:37:09.155515   369 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1207 05:37:09.155529   369 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1207 05:37:09.155535   369 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1207 05:37:09.155544   369 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1207 05:37:09.155863   369 net.cpp:150] Setting up L1_b6_brc3_conv\nI1207 05:37:09.155876   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.155882   369 net.cpp:165] Memory required for data: 1108788400\nI1207 05:37:09.155890   369 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1207 05:37:09.155900   369 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1207 05:37:09.155906   369 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1207 05:37:09.155913   369 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1207 05:37:09.155923   369 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1207 05:37:09.155956   369 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1207 05:37:09.155977   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.155982   369 net.cpp:165] Memory required for data: 1135002800\nI1207 05:37:09.155987   369 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:37:09.155994   369 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:37:09.156000   369 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1207 05:37:09.156008   369 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1207 05:37:09.156019   369 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1207 05:37:09.156067   369 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:37:09.156078   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.156085   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.156090   369 net.cpp:165] Memory required for data: 1187431600\nI1207 05:37:09.156095   369 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1207 05:37:09.156106   369 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1207 05:37:09.156112   369 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1207 05:37:09.156123   369 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1207 05:37:09.156378   369 net.cpp:150] Setting up L2_b1_brc1_bn\nI1207 05:37:09.156391   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.156396   369 net.cpp:165] Memory required for data: 1213646000\nI1207 05:37:09.156407   369 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1207 05:37:09.156424   369 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1207 05:37:09.156430   369 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1207 05:37:09.156438   369 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1207 05:37:09.156447   369 net.cpp:150] Setting up L2_b1_brc1_relu\nI1207 05:37:09.156455   369 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1207 05:37:09.156460   369 net.cpp:165] Memory required for data: 1239860400\nI1207 05:37:09.156471   369 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1207 05:37:09.156486   369 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1207 05:37:09.156491   369 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1207 05:37:09.156503   369 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1207 05:37:09.156848   369 net.cpp:150] Setting up L2_b1_brc1_conv\nI1207 05:37:09.156862   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.156868   369 net.cpp:165] Memory required for data: 1243137200\nI1207 05:37:09.156877   369 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1207 05:37:09.156888   369 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1207 05:37:09.156895   369 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1207 05:37:09.156905   369 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1207 05:37:09.157162   369 net.cpp:150] Setting up L2_b1_brc2_bn\nI1207 05:37:09.157176   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.157181   369 net.cpp:165] Memory required for data: 1246414000\nI1207 05:37:09.157192   369 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1207 05:37:09.157200   369 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1207 05:37:09.157207   369 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1207 05:37:09.157217   369 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1207 05:37:09.157227   369 net.cpp:150] Setting up L2_b1_brc2_relu\nI1207 05:37:09.157233   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.157238   369 net.cpp:165] Memory required for data: 1249690800\nI1207 05:37:09.157243   369 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1207 05:37:09.157256   369 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1207 05:37:09.157263   369 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1207 05:37:09.157271   369 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1207 05:37:09.157764   369 net.cpp:150] Setting up L2_b1_brc2_conv\nI1207 05:37:09.157779   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.157784   369 net.cpp:165] Memory required for data: 1252967600\nI1207 05:37:09.157794   369 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1207 05:37:09.157805   369 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1207 05:37:09.157812   369 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1207 05:37:09.157820   369 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1207 05:37:09.158076   369 net.cpp:150] Setting up L2_b1_brc3_bn\nI1207 05:37:09.158088   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.158094   369 net.cpp:165] Memory required for data: 1256244400\nI1207 05:37:09.158104   369 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1207 05:37:09.158113   369 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1207 05:37:09.158118   369 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1207 05:37:09.158128   369 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1207 05:37:09.158139   369 net.cpp:150] Setting up L2_b1_brc3_relu\nI1207 05:37:09.158146   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.158150   369 net.cpp:165] Memory required for data: 1259521200\nI1207 05:37:09.158155   369 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1207 05:37:09.158166   369 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1207 05:37:09.158175   369 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1207 05:37:09.158185   369 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1207 05:37:09.158562   369 net.cpp:150] Setting up L2_b1_brc3_conv\nI1207 05:37:09.158577   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.158589   369 net.cpp:165] Memory required for data: 1272628400\nI1207 05:37:09.158599   369 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1207 05:37:09.158620   369 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1207 05:37:09.158628   369 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1207 05:37:09.158638   369 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1207 05:37:09.160115   369 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1207 05:37:09.160132   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.160137   369 net.cpp:165] Memory required for data: 1285735600\nI1207 05:37:09.160147   369 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1207 05:37:09.160157   369 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1207 05:37:09.160163   369 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1207 05:37:09.160171   369 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1207 05:37:09.160182   369 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1207 05:37:09.160210   369 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1207 05:37:09.160220   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.160224   369 net.cpp:165] Memory required for data: 1298842800\nI1207 05:37:09.160230   369 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:37:09.160241   369 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:37:09.160248   369 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1207 05:37:09.160255   369 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1207 05:37:09.160264   369 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1207 05:37:09.160315   369 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:37:09.160326   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.160333   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.160338   369 net.cpp:165] Memory required for data: 1325057200\nI1207 05:37:09.160343   369 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1207 05:37:09.160351   369 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1207 05:37:09.160357   369 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1207 05:37:09.160367   369 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1207 05:37:09.160612   369 net.cpp:150] Setting up L2_b2_brc1_bn\nI1207 05:37:09.160626   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.160631   369 net.cpp:165] Memory required for data: 1338164400\nI1207 05:37:09.160642   369 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1207 05:37:09.160655   369 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1207 05:37:09.160661   369 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1207 05:37:09.160670   369 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1207 05:37:09.160679   369 net.cpp:150] Setting up L2_b2_brc1_relu\nI1207 05:37:09.160686   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.160691   369 net.cpp:165] Memory required for data: 1351271600\nI1207 05:37:09.160696   369 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1207 05:37:09.160706   369 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1207 05:37:09.160712   369 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1207 05:37:09.160723   369 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1207 05:37:09.161106   369 net.cpp:150] Setting up L2_b2_brc1_conv\nI1207 05:37:09.161120   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.161125   369 net.cpp:165] Memory required for data: 1354548400\nI1207 05:37:09.161134   369 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1207 05:37:09.161144   369 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1207 05:37:09.161149   369 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1207 05:37:09.161166   369 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1207 05:37:09.161424   369 net.cpp:150] Setting up L2_b2_brc2_bn\nI1207 05:37:09.161438   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.161443   369 net.cpp:165] Memory required for data: 1357825200\nI1207 05:37:09.161454   369 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1207 05:37:09.161463   369 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1207 05:37:09.161475   369 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1207 05:37:09.161486   369 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1207 05:37:09.161497   369 net.cpp:150] Setting up L2_b2_brc2_relu\nI1207 05:37:09.161504   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.161509   369 net.cpp:165] Memory required for data: 1361102000\nI1207 05:37:09.161514   369 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1207 05:37:09.161528   369 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1207 05:37:09.161535   369 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1207 05:37:09.161543   369 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1207 05:37:09.162021   369 net.cpp:150] Setting up L2_b2_brc2_conv\nI1207 05:37:09.162035   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.162040   369 net.cpp:165] Memory required for data: 1364378800\nI1207 05:37:09.162050   369 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1207 05:37:09.162061   369 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1207 05:37:09.162067   369 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1207 05:37:09.162075   369 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1207 05:37:09.162336   369 net.cpp:150] Setting up L2_b2_brc3_bn\nI1207 05:37:09.162349   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.162354   369 net.cpp:165] Memory required for data: 1367655600\nI1207 05:37:09.162365   369 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1207 05:37:09.162372   369 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1207 05:37:09.162379   369 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1207 05:37:09.162389   369 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1207 05:37:09.162400   369 net.cpp:150] Setting up L2_b2_brc3_relu\nI1207 05:37:09.162406   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.162411   369 net.cpp:165] Memory required for data: 1370932400\nI1207 05:37:09.162416   369 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1207 05:37:09.162430   369 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1207 05:37:09.162436   369 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1207 05:37:09.162444   369 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1207 05:37:09.162832   369 net.cpp:150] Setting up L2_b2_brc3_conv\nI1207 05:37:09.162847   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.162853   369 net.cpp:165] Memory required for data: 1384039600\nI1207 05:37:09.162861   369 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1207 05:37:09.162870   369 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1207 05:37:09.162878   369 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1207 05:37:09.162884   369 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1207 05:37:09.162895   369 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1207 05:37:09.162922   369 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1207 05:37:09.162932   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.162937   369 net.cpp:165] Memory required for data: 1397146800\nI1207 05:37:09.162942   369 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:37:09.162950   369 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:37:09.162955   369 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1207 05:37:09.162966   369 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1207 05:37:09.162983   369 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1207 05:37:09.163038   369 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:37:09.163049   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.163056   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.163061   369 net.cpp:165] Memory required for data: 1423361200\nI1207 05:37:09.163066   369 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1207 05:37:09.163082   369 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1207 05:37:09.163089   369 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1207 05:37:09.163096   369 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1207 05:37:09.163338   369 net.cpp:150] Setting up L2_b3_brc1_bn\nI1207 05:37:09.163352   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.163357   369 net.cpp:165] Memory required for data: 1436468400\nI1207 05:37:09.163388   369 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1207 05:37:09.163396   369 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1207 05:37:09.163403   369 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1207 05:37:09.163413   369 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1207 05:37:09.163424   369 net.cpp:150] Setting up L2_b3_brc1_relu\nI1207 05:37:09.163430   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.163435   369 net.cpp:165] Memory required for data: 1449575600\nI1207 05:37:09.163440   369 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1207 05:37:09.163450   369 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1207 05:37:09.163456   369 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1207 05:37:09.163475   369 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1207 05:37:09.163863   369 net.cpp:150] Setting up L2_b3_brc1_conv\nI1207 05:37:09.163877   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.163883   369 net.cpp:165] Memory required for data: 1452852400\nI1207 05:37:09.163892   369 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1207 05:37:09.163900   369 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1207 05:37:09.163906   369 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1207 05:37:09.163918   369 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1207 05:37:09.164173   369 net.cpp:150] Setting up L2_b3_brc2_bn\nI1207 05:37:09.164187   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.164192   369 net.cpp:165] Memory required for data: 1456129200\nI1207 05:37:09.164202   369 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1207 05:37:09.164214   369 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1207 05:37:09.164221   369 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1207 05:37:09.164227   369 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1207 05:37:09.164237   369 net.cpp:150] Setting up L2_b3_brc2_relu\nI1207 05:37:09.164244   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.164249   369 net.cpp:165] Memory required for data: 1459406000\nI1207 05:37:09.164254   369 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1207 05:37:09.164264   369 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1207 05:37:09.164270   369 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1207 05:37:09.164281   369 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1207 05:37:09.164775   369 net.cpp:150] Setting up L2_b3_brc2_conv\nI1207 05:37:09.164790   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.164795   369 net.cpp:165] Memory required for data: 1462682800\nI1207 05:37:09.164804   369 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1207 05:37:09.164813   369 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1207 05:37:09.164819   369 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1207 05:37:09.164835   369 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1207 05:37:09.165089   369 net.cpp:150] Setting up L2_b3_brc3_bn\nI1207 05:37:09.165110   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.165117   369 net.cpp:165] Memory required for data: 1465959600\nI1207 05:37:09.165127   369 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1207 05:37:09.165138   369 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1207 05:37:09.165143   369 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1207 05:37:09.165151   369 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1207 05:37:09.165161   369 net.cpp:150] Setting up L2_b3_brc3_relu\nI1207 05:37:09.165169   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.165172   369 net.cpp:165] Memory required for data: 1469236400\nI1207 05:37:09.165177   369 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1207 05:37:09.165189   369 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1207 05:37:09.165194   369 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1207 05:37:09.165206   369 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1207 05:37:09.165593   369 net.cpp:150] Setting up L2_b3_brc3_conv\nI1207 05:37:09.165608   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.165614   369 net.cpp:165] Memory required for data: 1482343600\nI1207 05:37:09.165623   369 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1207 05:37:09.165632   369 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1207 05:37:09.165638   369 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1207 05:37:09.165645   369 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1207 05:37:09.165653   369 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1207 05:37:09.165679   369 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1207 05:37:09.165689   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.165694   369 net.cpp:165] Memory required for data: 1495450800\nI1207 05:37:09.165699   369 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:37:09.165709   369 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:37:09.165716   369 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1207 05:37:09.165726   369 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1207 05:37:09.165736   369 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1207 05:37:09.165783   369 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:37:09.165797   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.165804   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.165809   369 net.cpp:165] Memory required for data: 1521665200\nI1207 05:37:09.165814   369 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1207 05:37:09.165822   369 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1207 05:37:09.165828   369 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1207 05:37:09.165837   369 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1207 05:37:09.166075   369 net.cpp:150] Setting up L2_b4_brc1_bn\nI1207 05:37:09.166088   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.166092   369 net.cpp:165] Memory required for data: 1534772400\nI1207 05:37:09.166103   369 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1207 05:37:09.166111   369 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1207 05:37:09.166117   369 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1207 05:37:09.166127   369 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1207 05:37:09.166138   369 net.cpp:150] Setting up L2_b4_brc1_relu\nI1207 05:37:09.166146   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.166151   369 net.cpp:165] Memory required for data: 1547879600\nI1207 05:37:09.166155   369 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1207 05:37:09.166168   369 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1207 05:37:09.166182   369 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1207 05:37:09.166191   369 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1207 05:37:09.166584   369 net.cpp:150] Setting up L2_b4_brc1_conv\nI1207 05:37:09.166599   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.166604   369 net.cpp:165] Memory required for data: 1551156400\nI1207 05:37:09.166613   369 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1207 05:37:09.166625   369 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1207 05:37:09.166631   369 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1207 05:37:09.166640   369 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1207 05:37:09.166895   369 net.cpp:150] Setting up L2_b4_brc2_bn\nI1207 05:37:09.166908   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.166913   369 net.cpp:165] Memory required for data: 1554433200\nI1207 05:37:09.166923   369 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1207 05:37:09.166932   369 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1207 05:37:09.166939   369 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1207 05:37:09.166945   369 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1207 05:37:09.166954   369 net.cpp:150] Setting up L2_b4_brc2_relu\nI1207 05:37:09.166961   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.166966   369 net.cpp:165] Memory required for data: 1557710000\nI1207 05:37:09.166971   369 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1207 05:37:09.166985   369 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1207 05:37:09.166990   369 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1207 05:37:09.167001   369 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1207 05:37:09.167492   369 net.cpp:150] Setting up L2_b4_brc2_conv\nI1207 05:37:09.167506   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.167511   369 net.cpp:165] Memory required for data: 1560986800\nI1207 05:37:09.167520   369 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1207 05:37:09.167529   369 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1207 05:37:09.167538   369 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1207 05:37:09.167546   369 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1207 05:37:09.167798   369 net.cpp:150] Setting up L2_b4_brc3_bn\nI1207 05:37:09.167810   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.167815   369 net.cpp:165] Memory required for data: 1564263600\nI1207 05:37:09.167825   369 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1207 05:37:09.167834   369 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1207 05:37:09.167840   369 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1207 05:37:09.167847   369 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1207 05:37:09.167856   369 net.cpp:150] Setting up L2_b4_brc3_relu\nI1207 05:37:09.167863   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.167868   369 net.cpp:165] Memory required for data: 1567540400\nI1207 05:37:09.167873   369 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1207 05:37:09.167886   369 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1207 05:37:09.167892   369 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1207 05:37:09.167903   369 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1207 05:37:09.168287   369 net.cpp:150] Setting up L2_b4_brc3_conv\nI1207 05:37:09.168299   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.168305   369 net.cpp:165] Memory required for data: 1580647600\nI1207 05:37:09.168313   369 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1207 05:37:09.168328   369 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1207 05:37:09.168334   369 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1207 05:37:09.168341   369 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1207 05:37:09.168349   369 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1207 05:37:09.168380   369 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1207 05:37:09.168388   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.168401   369 net.cpp:165] Memory required for data: 1593754800\nI1207 05:37:09.168406   369 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:37:09.168414   369 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:37:09.168421   369 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1207 05:37:09.168431   369 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1207 05:37:09.168440   369 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1207 05:37:09.168498   369 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:37:09.168511   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.168517   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.168522   369 net.cpp:165] Memory required for data: 1619969200\nI1207 05:37:09.168527   369 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1207 05:37:09.168539   369 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1207 05:37:09.168545   369 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1207 05:37:09.168555   369 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1207 05:37:09.168795   369 net.cpp:150] Setting up L2_b5_brc1_bn\nI1207 05:37:09.168812   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.168817   369 net.cpp:165] Memory required for data: 1633076400\nI1207 05:37:09.168826   369 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1207 05:37:09.168834   369 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1207 05:37:09.168840   369 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1207 05:37:09.168848   369 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1207 05:37:09.168857   369 net.cpp:150] Setting up L2_b5_brc1_relu\nI1207 05:37:09.168864   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.168869   369 net.cpp:165] Memory required for data: 1646183600\nI1207 05:37:09.168874   369 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1207 05:37:09.168884   369 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1207 05:37:09.168889   369 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1207 05:37:09.168900   369 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1207 05:37:09.169288   369 net.cpp:150] Setting up L2_b5_brc1_conv\nI1207 05:37:09.169302   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.169307   369 net.cpp:165] Memory required for data: 1649460400\nI1207 05:37:09.169317   369 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1207 05:37:09.169327   369 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1207 05:37:09.169334   369 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1207 05:37:09.169342   369 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1207 05:37:09.169608   369 net.cpp:150] Setting up L2_b5_brc2_bn\nI1207 05:37:09.169625   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.169631   369 net.cpp:165] Memory required for data: 1652737200\nI1207 05:37:09.169641   369 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1207 05:37:09.169648   369 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1207 05:37:09.169654   369 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1207 05:37:09.169661   369 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1207 05:37:09.169672   369 net.cpp:150] Setting up L2_b5_brc2_relu\nI1207 05:37:09.169678   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.169683   369 net.cpp:165] Memory required for data: 1656014000\nI1207 05:37:09.169687   369 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1207 05:37:09.169697   369 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1207 05:37:09.169703   369 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1207 05:37:09.169714   369 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1207 05:37:09.170195   369 net.cpp:150] Setting up L2_b5_brc2_conv\nI1207 05:37:09.170217   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.170222   369 net.cpp:165] Memory required for data: 1659290800\nI1207 05:37:09.170231   369 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1207 05:37:09.170240   369 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1207 05:37:09.170246   369 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1207 05:37:09.170258   369 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1207 05:37:09.170518   369 net.cpp:150] Setting up L2_b5_brc3_bn\nI1207 05:37:09.170532   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.170537   369 net.cpp:165] Memory required for data: 1662567600\nI1207 05:37:09.170548   369 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1207 05:37:09.170562   369 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1207 05:37:09.170569   369 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1207 05:37:09.170577   369 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1207 05:37:09.170586   369 net.cpp:150] Setting up L2_b5_brc3_relu\nI1207 05:37:09.170593   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.170598   369 net.cpp:165] Memory required for data: 1665844400\nI1207 05:37:09.170603   369 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1207 05:37:09.170614   369 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1207 05:37:09.170619   369 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1207 05:37:09.170630   369 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1207 05:37:09.171005   369 net.cpp:150] Setting up L2_b5_brc3_conv\nI1207 05:37:09.171018   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.171023   369 net.cpp:165] Memory required for data: 1678951600\nI1207 05:37:09.171032   369 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1207 05:37:09.171041   369 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1207 05:37:09.171048   369 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1207 05:37:09.171056   369 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1207 05:37:09.171062   369 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1207 05:37:09.171092   369 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1207 05:37:09.171103   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.171108   369 net.cpp:165] Memory required for data: 1692058800\nI1207 05:37:09.171113   369 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:37:09.171120   369 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:37:09.171126   369 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1207 05:37:09.171136   369 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1207 05:37:09.171146   369 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1207 05:37:09.171195   369 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:37:09.171210   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.171216   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.171221   369 net.cpp:165] Memory required for data: 1718273200\nI1207 05:37:09.171226   369 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1207 05:37:09.171234   369 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1207 05:37:09.171241   369 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1207 05:37:09.171250   369 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1207 05:37:09.171504   369 net.cpp:150] Setting up L2_b6_brc1_bn\nI1207 05:37:09.171517   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.171522   369 net.cpp:165] Memory required for data: 1731380400\nI1207 05:37:09.171533   369 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1207 05:37:09.171555   369 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1207 05:37:09.171568   369 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1207 05:37:09.171576   369 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1207 05:37:09.171586   369 net.cpp:150] Setting up L2_b6_brc1_relu\nI1207 05:37:09.171593   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.171598   369 net.cpp:165] Memory required for data: 1744487600\nI1207 05:37:09.171604   369 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1207 05:37:09.171617   369 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1207 05:37:09.171623   369 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1207 05:37:09.171635   369 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1207 05:37:09.172017   369 net.cpp:150] Setting up L2_b6_brc1_conv\nI1207 05:37:09.172031   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.172036   369 net.cpp:165] Memory required for data: 1747764400\nI1207 05:37:09.172045   369 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1207 05:37:09.172056   369 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1207 05:37:09.172063   369 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1207 05:37:09.172075   369 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1207 05:37:09.172333   369 net.cpp:150] Setting up L2_b6_brc2_bn\nI1207 05:37:09.172346   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.172351   369 net.cpp:165] Memory required for data: 1751041200\nI1207 05:37:09.172361   369 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1207 05:37:09.172369   369 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1207 05:37:09.172376   369 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1207 05:37:09.172384   369 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1207 05:37:09.172392   369 net.cpp:150] Setting up L2_b6_brc2_relu\nI1207 05:37:09.172399   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.172405   369 net.cpp:165] Memory required for data: 1754318000\nI1207 05:37:09.172410   369 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1207 05:37:09.172422   369 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1207 05:37:09.172428   369 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1207 05:37:09.172439   369 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1207 05:37:09.172929   369 net.cpp:150] Setting up L2_b6_brc2_conv\nI1207 05:37:09.172943   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.172948   369 net.cpp:165] Memory required for data: 1757594800\nI1207 05:37:09.172957   369 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1207 05:37:09.172971   369 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1207 05:37:09.172977   369 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1207 05:37:09.172992   369 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1207 05:37:09.173249   369 net.cpp:150] Setting up L2_b6_brc3_bn\nI1207 05:37:09.173264   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.173269   369 net.cpp:165] Memory required for data: 1760871600\nI1207 05:37:09.173279   369 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1207 05:37:09.173286   369 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1207 05:37:09.173292   369 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1207 05:37:09.173300   369 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1207 05:37:09.173308   369 net.cpp:150] Setting up L2_b6_brc3_relu\nI1207 05:37:09.173316   369 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1207 05:37:09.173321   369 net.cpp:165] Memory required for data: 1764148400\nI1207 05:37:09.173324   369 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1207 05:37:09.173337   369 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1207 05:37:09.173343   369 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1207 05:37:09.173352   369 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1207 05:37:09.173743   369 net.cpp:150] Setting up L2_b6_brc3_conv\nI1207 05:37:09.173758   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.173763   369 net.cpp:165] Memory required for data: 1777255600\nI1207 05:37:09.173779   369 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1207 05:37:09.173789   369 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1207 05:37:09.173794   369 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1207 05:37:09.173801   369 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1207 05:37:09.173812   369 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1207 05:37:09.173840   369 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1207 05:37:09.173851   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.173856   369 net.cpp:165] Memory required for data: 1790362800\nI1207 05:37:09.173861   369 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:37:09.173872   369 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:37:09.173878   369 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1207 05:37:09.173885   369 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1207 05:37:09.173895   369 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1207 05:37:09.173946   369 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:37:09.173959   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.173965   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.173969   369 net.cpp:165] Memory required for data: 1816577200\nI1207 05:37:09.173975   369 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1207 05:37:09.173985   369 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1207 05:37:09.173992   369 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1207 05:37:09.174000   369 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1207 05:37:09.174242   369 net.cpp:150] Setting up L3_b1_brc1_bn\nI1207 05:37:09.174255   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.174260   369 net.cpp:165] Memory required for data: 1829684400\nI1207 05:37:09.174270   369 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1207 05:37:09.174281   369 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1207 05:37:09.174288   369 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1207 05:37:09.174295   369 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1207 05:37:09.174304   369 net.cpp:150] Setting up L3_b1_brc1_relu\nI1207 05:37:09.174311   369 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1207 05:37:09.174316   369 net.cpp:165] Memory required for data: 1842791600\nI1207 05:37:09.174321   369 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1207 05:37:09.174331   369 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1207 05:37:09.174337   369 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1207 05:37:09.174348   369 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1207 05:37:09.174829   369 net.cpp:150] Setting up L3_b1_brc1_conv\nI1207 05:37:09.174844   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.174849   369 net.cpp:165] Memory required for data: 1844430000\nI1207 05:37:09.174857   369 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1207 05:37:09.174865   369 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1207 05:37:09.174872   369 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1207 05:37:09.174882   369 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1207 05:37:09.175148   369 net.cpp:150] Setting up L3_b1_brc2_bn\nI1207 05:37:09.175165   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.175170   369 net.cpp:165] Memory required for data: 1846068400\nI1207 05:37:09.175180   369 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1207 05:37:09.175189   369 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1207 05:37:09.175195   369 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1207 05:37:09.175202   369 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1207 05:37:09.175211   369 net.cpp:150] Setting up L3_b1_brc2_relu\nI1207 05:37:09.175225   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.175230   369 net.cpp:165] Memory required for data: 1847706800\nI1207 05:37:09.175235   369 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1207 05:37:09.175246   369 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1207 05:37:09.175252   369 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1207 05:37:09.175263   369 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1207 05:37:09.177311   369 net.cpp:150] Setting up L3_b1_brc2_conv\nI1207 05:37:09.177328   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.177335   369 net.cpp:165] Memory required for data: 1849345200\nI1207 05:37:09.177343   369 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1207 05:37:09.177357   369 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1207 05:37:09.177364   369 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1207 05:37:09.177373   369 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1207 05:37:09.177661   369 net.cpp:150] Setting up L3_b1_brc3_bn\nI1207 05:37:09.177676   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.177681   369 net.cpp:165] Memory required for data: 1850983600\nI1207 05:37:09.177692   369 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1207 05:37:09.177703   369 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1207 05:37:09.177711   369 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1207 05:37:09.177717   369 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1207 05:37:09.177727   369 net.cpp:150] Setting up L3_b1_brc3_relu\nI1207 05:37:09.177734   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.177739   369 net.cpp:165] Memory required for data: 1852622000\nI1207 05:37:09.177744   369 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1207 05:37:09.177757   369 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1207 05:37:09.177764   369 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1207 05:37:09.177775   369 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1207 05:37:09.178422   369 net.cpp:150] Setting up L3_b1_brc3_conv\nI1207 05:37:09.178437   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.178442   369 net.cpp:165] Memory required for data: 1859175600\nI1207 05:37:09.178452   369 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1207 05:37:09.178463   369 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1207 05:37:09.178477   369 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1207 05:37:09.178489   369 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1207 05:37:09.179428   369 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1207 05:37:09.179443   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.179448   369 net.cpp:165] Memory required for data: 1865729200\nI1207 05:37:09.179457   369 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1207 05:37:09.179472   369 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1207 05:37:09.179479   369 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1207 05:37:09.179487   369 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1207 05:37:09.179496   369 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1207 05:37:09.179533   369 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1207 05:37:09.179546   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.179551   369 net.cpp:165] Memory required for data: 1872282800\nI1207 05:37:09.179556   369 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:37:09.179564   369 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:37:09.179574   369 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1207 05:37:09.179581   369 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1207 05:37:09.179591   369 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1207 05:37:09.179651   369 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:37:09.179664   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.179671   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.179675   369 net.cpp:165] Memory required for data: 1885390000\nI1207 05:37:09.179682   369 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1207 05:37:09.179689   369 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1207 05:37:09.179695   369 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1207 05:37:09.179707   369 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1207 05:37:09.179958   369 net.cpp:150] Setting up L3_b2_brc1_bn\nI1207 05:37:09.179971   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.179976   369 net.cpp:165] Memory required for data: 1891943600\nI1207 05:37:09.179987   369 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1207 05:37:09.179998   369 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1207 05:37:09.180006   369 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1207 05:37:09.180012   369 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1207 05:37:09.180022   369 net.cpp:150] Setting up L3_b2_brc1_relu\nI1207 05:37:09.180029   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.180033   369 net.cpp:165] Memory required for data: 1898497200\nI1207 05:37:09.180038   369 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1207 05:37:09.180049   369 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1207 05:37:09.180055   369 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1207 05:37:09.180066   369 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1207 05:37:09.180718   369 net.cpp:150] Setting up L3_b2_brc1_conv\nI1207 05:37:09.180737   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.180742   369 net.cpp:165] Memory required for data: 1900135600\nI1207 05:37:09.180752   369 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1207 05:37:09.180760   369 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1207 05:37:09.180766   369 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1207 05:37:09.180778   369 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1207 05:37:09.181046   369 net.cpp:150] Setting up L3_b2_brc2_bn\nI1207 05:37:09.181059   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.181066   369 net.cpp:165] Memory required for data: 1901774000\nI1207 05:37:09.181076   369 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1207 05:37:09.181087   369 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1207 05:37:09.181094   369 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1207 05:37:09.181102   369 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1207 05:37:09.181111   369 net.cpp:150] Setting up L3_b2_brc2_relu\nI1207 05:37:09.181118   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.181123   369 net.cpp:165] Memory required for data: 1903412400\nI1207 05:37:09.181128   369 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1207 05:37:09.181138   369 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1207 05:37:09.181143   369 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1207 05:37:09.181156   369 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1207 05:37:09.182179   369 net.cpp:150] Setting up L3_b2_brc2_conv\nI1207 05:37:09.182195   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.182200   369 net.cpp:165] Memory required for data: 1905050800\nI1207 05:37:09.182209   369 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1207 05:37:09.182219   369 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1207 05:37:09.182224   369 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1207 05:37:09.182235   369 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1207 05:37:09.182518   369 net.cpp:150] Setting up L3_b2_brc3_bn\nI1207 05:37:09.182531   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.182536   369 net.cpp:165] Memory required for data: 1906689200\nI1207 05:37:09.182555   369 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1207 05:37:09.182564   369 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1207 05:37:09.182571   369 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1207 05:37:09.182579   369 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1207 05:37:09.182588   369 net.cpp:150] Setting up L3_b2_brc3_relu\nI1207 05:37:09.182595   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.182600   369 net.cpp:165] Memory required for data: 1908327600\nI1207 05:37:09.182605   369 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1207 05:37:09.182618   369 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1207 05:37:09.182624   369 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1207 05:37:09.182633   369 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1207 05:37:09.183253   369 net.cpp:150] Setting up L3_b2_brc3_conv\nI1207 05:37:09.183267   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.183272   369 net.cpp:165] Memory required for data: 1914881200\nI1207 05:37:09.183281   369 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1207 05:37:09.183290   369 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1207 05:37:09.183296   369 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1207 05:37:09.183305   369 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1207 05:37:09.183317   369 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1207 05:37:09.183351   369 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1207 05:37:09.183367   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.183372   369 net.cpp:165] Memory required for data: 1921434800\nI1207 05:37:09.183377   369 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:37:09.183384   369 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:37:09.183390   369 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1207 05:37:09.183398   369 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1207 05:37:09.183409   369 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1207 05:37:09.183460   369 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:37:09.183478   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.183485   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.183490   369 net.cpp:165] Memory required for data: 1934542000\nI1207 05:37:09.183495   369 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1207 05:37:09.183506   369 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1207 05:37:09.183513   369 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1207 05:37:09.183521   369 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1207 05:37:09.183773   369 net.cpp:150] Setting up L3_b3_brc1_bn\nI1207 05:37:09.183785   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.183790   369 net.cpp:165] Memory required for data: 1941095600\nI1207 05:37:09.183801   369 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1207 05:37:09.183809   369 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1207 05:37:09.183815   369 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1207 05:37:09.183822   369 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1207 05:37:09.183832   369 net.cpp:150] Setting up L3_b3_brc1_relu\nI1207 05:37:09.183840   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.183843   369 net.cpp:165] Memory required for data: 1947649200\nI1207 05:37:09.183848   369 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1207 05:37:09.183861   369 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1207 05:37:09.183868   369 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1207 05:37:09.183876   369 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1207 05:37:09.184511   369 net.cpp:150] Setting up L3_b3_brc1_conv\nI1207 05:37:09.184538   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.184543   369 net.cpp:165] Memory required for data: 1949287600\nI1207 05:37:09.184552   369 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1207 05:37:09.184564   369 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1207 05:37:09.184571   369 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1207 05:37:09.184579   369 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1207 05:37:09.184846   369 net.cpp:150] Setting up L3_b3_brc2_bn\nI1207 05:37:09.184860   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.184865   369 net.cpp:165] Memory required for data: 1950926000\nI1207 05:37:09.184875   369 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1207 05:37:09.184883   369 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1207 05:37:09.184890   369 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1207 05:37:09.184896   369 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1207 05:37:09.184906   369 net.cpp:150] Setting up L3_b3_brc2_relu\nI1207 05:37:09.184913   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.184917   369 net.cpp:165] Memory required for data: 1952564400\nI1207 05:37:09.184922   369 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1207 05:37:09.184936   369 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1207 05:37:09.184942   369 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1207 05:37:09.184953   369 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1207 05:37:09.185981   369 net.cpp:150] Setting up L3_b3_brc2_conv\nI1207 05:37:09.185997   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.186002   369 net.cpp:165] Memory required for data: 1954202800\nI1207 05:37:09.186010   369 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1207 05:37:09.186022   369 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1207 05:37:09.186028   369 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1207 05:37:09.186039   369 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1207 05:37:09.186303   369 net.cpp:150] Setting up L3_b3_brc3_bn\nI1207 05:37:09.186316   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.186321   369 net.cpp:165] Memory required for data: 1955841200\nI1207 05:37:09.186331   369 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1207 05:37:09.186341   369 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1207 05:37:09.186347   369 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1207 05:37:09.186353   369 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1207 05:37:09.186362   369 net.cpp:150] Setting up L3_b3_brc3_relu\nI1207 05:37:09.186369   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.186374   369 net.cpp:165] Memory required for data: 1957479600\nI1207 05:37:09.186379   369 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1207 05:37:09.186393   369 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1207 05:37:09.186399   369 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1207 05:37:09.186410   369 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1207 05:37:09.187047   369 net.cpp:150] Setting up L3_b3_brc3_conv\nI1207 05:37:09.187062   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.187067   369 net.cpp:165] Memory required for data: 1964033200\nI1207 05:37:09.187077   369 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1207 05:37:09.187089   369 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1207 05:37:09.187096   369 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1207 05:37:09.187103   369 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1207 05:37:09.187114   369 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1207 05:37:09.187149   369 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1207 05:37:09.187160   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.187165   369 net.cpp:165] Memory required for data: 1970586800\nI1207 05:37:09.187170   369 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:37:09.187188   369 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:37:09.187196   369 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1207 05:37:09.187203   369 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1207 05:37:09.187212   369 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1207 05:37:09.187264   369 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:37:09.187276   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.187283   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.187288   369 net.cpp:165] Memory required for data: 1983694000\nI1207 05:37:09.187294   369 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1207 05:37:09.187304   369 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1207 05:37:09.187310   369 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1207 05:37:09.187319   369 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1207 05:37:09.187573   369 net.cpp:150] Setting up L3_b4_brc1_bn\nI1207 05:37:09.187587   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.187592   369 net.cpp:165] Memory required for data: 1990247600\nI1207 05:37:09.187603   369 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1207 05:37:09.187611   369 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1207 05:37:09.187618   369 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1207 05:37:09.187624   369 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1207 05:37:09.187634   369 net.cpp:150] Setting up L3_b4_brc1_relu\nI1207 05:37:09.187641   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.187645   369 net.cpp:165] Memory required for data: 1996801200\nI1207 05:37:09.187650   369 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1207 05:37:09.187664   369 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1207 05:37:09.187671   369 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1207 05:37:09.187683   369 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1207 05:37:09.188309   369 net.cpp:150] Setting up L3_b4_brc1_conv\nI1207 05:37:09.188323   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.188328   369 net.cpp:165] Memory required for data: 1998439600\nI1207 05:37:09.188338   369 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1207 05:37:09.188349   369 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1207 05:37:09.188355   369 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1207 05:37:09.188364   369 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1207 05:37:09.188637   369 net.cpp:150] Setting up L3_b4_brc2_bn\nI1207 05:37:09.188650   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.188655   369 net.cpp:165] Memory required for data: 2000078000\nI1207 05:37:09.188666   369 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1207 05:37:09.188674   369 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1207 05:37:09.188680   369 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1207 05:37:09.188690   369 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1207 05:37:09.188700   369 net.cpp:150] Setting up L3_b4_brc2_relu\nI1207 05:37:09.188707   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.188712   369 net.cpp:165] Memory required for data: 2001716400\nI1207 05:37:09.188717   369 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1207 05:37:09.188730   369 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1207 05:37:09.188736   369 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1207 05:37:09.188745   369 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1207 05:37:09.189776   369 net.cpp:150] Setting up L3_b4_brc2_conv\nI1207 05:37:09.189792   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.189797   369 net.cpp:165] Memory required for data: 2003354800\nI1207 05:37:09.189806   369 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1207 05:37:09.189826   369 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1207 05:37:09.189832   369 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1207 05:37:09.189841   369 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1207 05:37:09.190109   369 net.cpp:150] Setting up L3_b4_brc3_bn\nI1207 05:37:09.190124   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.190129   369 net.cpp:165] Memory required for data: 2004993200\nI1207 05:37:09.190138   369 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1207 05:37:09.190150   369 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1207 05:37:09.190156   369 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1207 05:37:09.190163   369 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1207 05:37:09.190173   369 net.cpp:150] Setting up L3_b4_brc3_relu\nI1207 05:37:09.190181   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.190186   369 net.cpp:165] Memory required for data: 2006631600\nI1207 05:37:09.190191   369 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1207 05:37:09.190203   369 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1207 05:37:09.190210   369 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1207 05:37:09.190220   369 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1207 05:37:09.190842   369 net.cpp:150] Setting up L3_b4_brc3_conv\nI1207 05:37:09.190857   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.190862   369 net.cpp:165] Memory required for data: 2013185200\nI1207 05:37:09.190871   369 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1207 05:37:09.190881   369 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1207 05:37:09.190887   369 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1207 05:37:09.190894   369 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1207 05:37:09.190902   369 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1207 05:37:09.190942   369 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1207 05:37:09.190953   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.190958   369 net.cpp:165] Memory required for data: 2019738800\nI1207 05:37:09.190963   369 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:37:09.190971   369 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:37:09.190978   369 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1207 05:37:09.190987   369 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1207 05:37:09.190997   369 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1207 05:37:09.191049   369 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:37:09.191061   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.191067   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.191072   369 net.cpp:165] Memory required for data: 2032846000\nI1207 05:37:09.191077   369 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1207 05:37:09.191085   369 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1207 05:37:09.191092   369 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1207 05:37:09.191102   369 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1207 05:37:09.191354   369 net.cpp:150] Setting up L3_b5_brc1_bn\nI1207 05:37:09.191368   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.191373   369 net.cpp:165] Memory required for data: 2039399600\nI1207 05:37:09.191383   369 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1207 05:37:09.191395   369 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1207 05:37:09.191401   369 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1207 05:37:09.191408   369 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1207 05:37:09.191418   369 net.cpp:150] Setting up L3_b5_brc1_relu\nI1207 05:37:09.191432   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.191438   369 net.cpp:165] Memory required for data: 2045953200\nI1207 05:37:09.191443   369 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1207 05:37:09.191453   369 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1207 05:37:09.191459   369 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1207 05:37:09.191478   369 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1207 05:37:09.192104   369 net.cpp:150] Setting up L3_b5_brc1_conv\nI1207 05:37:09.192119   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.192124   369 net.cpp:165] Memory required for data: 2047591600\nI1207 05:37:09.192133   369 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1207 05:37:09.192142   369 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1207 05:37:09.192148   369 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1207 05:37:09.192159   369 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1207 05:37:09.192422   369 net.cpp:150] Setting up L3_b5_brc2_bn\nI1207 05:37:09.192438   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.192443   369 net.cpp:165] Memory required for data: 2049230000\nI1207 05:37:09.192453   369 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1207 05:37:09.192462   369 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1207 05:37:09.192476   369 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1207 05:37:09.192482   369 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1207 05:37:09.192493   369 net.cpp:150] Setting up L3_b5_brc2_relu\nI1207 05:37:09.192499   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.192504   369 net.cpp:165] Memory required for data: 2050868400\nI1207 05:37:09.192509   369 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1207 05:37:09.192519   369 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1207 05:37:09.192525   369 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1207 05:37:09.192538   369 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1207 05:37:09.193569   369 net.cpp:150] Setting up L3_b5_brc2_conv\nI1207 05:37:09.193584   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.193589   369 net.cpp:165] Memory required for data: 2052506800\nI1207 05:37:09.193637   369 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1207 05:37:09.193653   369 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1207 05:37:09.193660   369 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1207 05:37:09.193668   369 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1207 05:37:09.193938   369 net.cpp:150] Setting up L3_b5_brc3_bn\nI1207 05:37:09.193953   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.193958   369 net.cpp:165] Memory required for data: 2054145200\nI1207 05:37:09.193967   369 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1207 05:37:09.193979   369 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1207 05:37:09.193985   369 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1207 05:37:09.193992   369 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1207 05:37:09.194002   369 net.cpp:150] Setting up L3_b5_brc3_relu\nI1207 05:37:09.194010   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.194015   369 net.cpp:165] Memory required for data: 2055783600\nI1207 05:37:09.194020   369 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1207 05:37:09.194030   369 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1207 05:37:09.194036   369 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1207 05:37:09.194046   369 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1207 05:37:09.194676   369 net.cpp:150] Setting up L3_b5_brc3_conv\nI1207 05:37:09.194691   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.194696   369 net.cpp:165] Memory required for data: 2062337200\nI1207 05:37:09.194705   369 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1207 05:37:09.194715   369 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1207 05:37:09.194721   369 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1207 05:37:09.194736   369 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1207 05:37:09.194744   369 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1207 05:37:09.194783   369 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1207 05:37:09.194793   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.194798   369 net.cpp:165] Memory required for data: 2068890800\nI1207 05:37:09.194804   369 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:37:09.194811   369 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:37:09.194818   369 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1207 05:37:09.194828   369 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1207 05:37:09.194838   369 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1207 05:37:09.194887   369 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:37:09.194902   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.194910   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.194914   369 net.cpp:165] Memory required for data: 2081998000\nI1207 05:37:09.194919   369 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1207 05:37:09.194927   369 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1207 05:37:09.194933   369 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1207 05:37:09.194946   369 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1207 05:37:09.195199   369 net.cpp:150] Setting up L3_b6_brc1_bn\nI1207 05:37:09.195212   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.195217   369 net.cpp:165] Memory required for data: 2088551600\nI1207 05:37:09.195228   369 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1207 05:37:09.195238   369 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1207 05:37:09.195245   369 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1207 05:37:09.195252   369 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1207 05:37:09.195262   369 net.cpp:150] Setting up L3_b6_brc1_relu\nI1207 05:37:09.195269   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.195274   369 net.cpp:165] Memory required for data: 2095105200\nI1207 05:37:09.195279   369 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1207 05:37:09.195291   369 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1207 05:37:09.195296   369 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1207 05:37:09.195307   369 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1207 05:37:09.195955   369 net.cpp:150] Setting up L3_b6_brc1_conv\nI1207 05:37:09.195971   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.195976   369 net.cpp:165] Memory required for data: 2096743600\nI1207 05:37:09.195984   369 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1207 05:37:09.195993   369 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1207 05:37:09.196000   369 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1207 05:37:09.196008   369 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1207 05:37:09.196280   369 net.cpp:150] Setting up L3_b6_brc2_bn\nI1207 05:37:09.196293   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.196298   369 net.cpp:165] Memory required for data: 2098382000\nI1207 05:37:09.196308   369 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1207 05:37:09.196319   369 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1207 05:37:09.196326   369 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1207 05:37:09.196333   369 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1207 05:37:09.196344   369 net.cpp:150] Setting up L3_b6_brc2_relu\nI1207 05:37:09.196350   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.196355   369 net.cpp:165] Memory required for data: 2100020400\nI1207 05:37:09.196360   369 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1207 05:37:09.196382   369 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1207 05:37:09.196388   369 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1207 05:37:09.196399   369 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1207 05:37:09.197438   369 net.cpp:150] Setting up L3_b6_brc2_conv\nI1207 05:37:09.197453   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.197458   369 net.cpp:165] Memory required for data: 2101658800\nI1207 05:37:09.197473   369 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1207 05:37:09.197482   369 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1207 05:37:09.197489   369 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1207 05:37:09.197500   369 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1207 05:37:09.197777   369 net.cpp:150] Setting up L3_b6_brc3_bn\nI1207 05:37:09.197790   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.197795   369 net.cpp:165] Memory required for data: 2103297200\nI1207 05:37:09.197806   369 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1207 05:37:09.197814   369 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1207 05:37:09.197820   369 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1207 05:37:09.197827   369 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1207 05:37:09.197837   369 net.cpp:150] Setting up L3_b6_brc3_relu\nI1207 05:37:09.197844   369 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1207 05:37:09.197849   369 net.cpp:165] Memory required for data: 2104935600\nI1207 05:37:09.197854   369 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1207 05:37:09.197872   369 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1207 05:37:09.197878   369 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1207 05:37:09.197887   369 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1207 05:37:09.198516   369 net.cpp:150] Setting up L3_b6_brc3_conv\nI1207 05:37:09.198531   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.198536   369 net.cpp:165] Memory required for data: 2111489200\nI1207 05:37:09.198545   369 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1207 05:37:09.198554   369 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1207 05:37:09.198561   369 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1207 05:37:09.198568   369 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1207 05:37:09.198581   369 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1207 05:37:09.198616   369 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1207 05:37:09.198632   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.198637   369 net.cpp:165] Memory required for data: 2118042800\nI1207 05:37:09.198642   369 layer_factory.hpp:77] Creating layer post_bn\nI1207 05:37:09.198650   369 net.cpp:100] Creating Layer post_bn\nI1207 05:37:09.198657   369 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1207 05:37:09.198668   369 net.cpp:408] post_bn -> post_bn_top\nI1207 05:37:09.198922   369 net.cpp:150] Setting up post_bn\nI1207 05:37:09.198935   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.198940   369 net.cpp:165] Memory required for data: 2124596400\nI1207 05:37:09.198951   369 layer_factory.hpp:77] Creating layer post_relu\nI1207 05:37:09.198961   369 net.cpp:100] Creating Layer post_relu\nI1207 05:37:09.198968   369 net.cpp:434] post_relu <- post_bn_top\nI1207 05:37:09.198976   369 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1207 05:37:09.198984   369 net.cpp:150] Setting up post_relu\nI1207 05:37:09.198992   369 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1207 05:37:09.198997   369 net.cpp:165] Memory required for data: 2131150000\nI1207 05:37:09.199002   369 layer_factory.hpp:77] Creating layer post_pool\nI1207 05:37:09.199010   369 net.cpp:100] Creating Layer post_pool\nI1207 05:37:09.199015   369 net.cpp:434] post_pool <- post_bn_top\nI1207 05:37:09.199023   369 net.cpp:408] post_pool -> post_pool\nI1207 05:37:09.199059   369 net.cpp:150] Setting up post_pool\nI1207 05:37:09.199074   369 net.cpp:157] Top shape: 100 256 1 1 (25600)\nI1207 05:37:09.199085   369 net.cpp:165] Memory required for data: 2131252400\nI1207 05:37:09.199091   369 layer_factory.hpp:77] Creating layer post_FC\nI1207 05:37:09.199103   369 net.cpp:100] Creating Layer post_FC\nI1207 05:37:09.199110   369 net.cpp:434] post_FC <- post_pool\nI1207 05:37:09.199120   369 net.cpp:408] post_FC -> post_FC_top\nI1207 05:37:09.199301   369 net.cpp:150] Setting up post_FC\nI1207 05:37:09.199317   369 net.cpp:157] Top shape: 100 10 (1000)\nI1207 05:37:09.199322   369 net.cpp:165] Memory required for data: 2131256400\nI1207 05:37:09.199331   369 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1207 05:37:09.199339   369 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1207 05:37:09.199345   369 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1207 05:37:09.199353   369 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1207 05:37:09.199363   369 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1207 05:37:09.199417   369 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1207 05:37:09.199429   369 net.cpp:157] Top shape: 100 10 (1000)\nI1207 05:37:09.199436   369 net.cpp:157] Top shape: 100 10 (1000)\nI1207 05:37:09.199440   369 net.cpp:165] Memory required for data: 2131264400\nI1207 05:37:09.199445   369 layer_factory.hpp:77] Creating layer accuracy\nI1207 05:37:09.199457   369 net.cpp:100] Creating Layer accuracy\nI1207 05:37:09.199463   369 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1207 05:37:09.199476   369 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1207 05:37:09.199484   369 net.cpp:408] accuracy -> accuracy\nI1207 05:37:09.199497   369 net.cpp:150] Setting up accuracy\nI1207 05:37:09.199506   369 net.cpp:157] Top shape: (1)\nI1207 05:37:09.199510   369 net.cpp:165] Memory required for data: 2131264404\nI1207 05:37:09.199515   369 layer_factory.hpp:77] Creating layer loss\nI1207 05:37:09.199522   369 net.cpp:100] Creating Layer loss\nI1207 05:37:09.199527   369 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1207 05:37:09.199534   369 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1207 05:37:09.199542   369 net.cpp:408] loss -> loss\nI1207 05:37:09.199553   369 layer_factory.hpp:77] Creating layer loss\nI1207 05:37:09.199669   369 net.cpp:150] Setting up loss\nI1207 05:37:09.199686   369 net.cpp:157] Top shape: (1)\nI1207 05:37:09.199690   369 net.cpp:160]     with loss weight 1\nI1207 05:37:09.199707   369 net.cpp:165] Memory required for data: 2131264408\nI1207 05:37:09.199712   369 net.cpp:226] loss needs backward computation.\nI1207 05:37:09.199718   369 net.cpp:228] accuracy does not need backward computation.\nI1207 05:37:09.199724   369 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1207 05:37:09.199729   369 net.cpp:226] post_FC needs backward computation.\nI1207 05:37:09.199734   369 net.cpp:226] post_pool needs backward computation.\nI1207 05:37:09.199739   369 net.cpp:226] post_relu needs backward computation.\nI1207 05:37:09.199744   369 net.cpp:226] post_bn needs backward computation.\nI1207 05:37:09.199749   369 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1207 05:37:09.199755   369 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1207 05:37:09.199760   369 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1207 05:37:09.199764   369 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1207 05:37:09.199769   369 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1207 05:37:09.199774   369 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1207 05:37:09.199779   369 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1207 05:37:09.199784   369 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1207 05:37:09.199790   369 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1207 05:37:09.199795   369 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1207 05:37:09.199800   369 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.199805   369 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1207 05:37:09.199820   369 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1207 05:37:09.199826   369 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1207 05:37:09.199831   369 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1207 05:37:09.199836   369 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1207 05:37:09.199841   369 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1207 05:37:09.199846   369 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1207 05:37:09.199851   369 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1207 05:37:09.199856   369 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1207 05:37:09.199862   369 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1207 05:37:09.199867   369 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.199872   369 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1207 05:37:09.199877   369 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1207 05:37:09.199882   369 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1207 05:37:09.199887   369 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1207 05:37:09.199892   369 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1207 05:37:09.199898   369 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1207 05:37:09.199903   369 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1207 05:37:09.199908   369 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1207 05:37:09.199913   369 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1207 05:37:09.199918   369 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1207 05:37:09.199923   369 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.199928   369 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1207 05:37:09.199934   369 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1207 05:37:09.199939   369 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1207 05:37:09.199944   369 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1207 05:37:09.199950   369 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1207 05:37:09.199955   369 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1207 05:37:09.199960   369 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1207 05:37:09.199965   369 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1207 05:37:09.199970   369 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1207 05:37:09.199975   369 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1207 05:37:09.199981   369 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.199986   369 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1207 05:37:09.199992   369 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1207 05:37:09.199997   369 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1207 05:37:09.200002   369 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1207 05:37:09.200007   369 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1207 05:37:09.200012   369 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1207 05:37:09.200017   369 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1207 05:37:09.200026   369 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1207 05:37:09.200031   369 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1207 05:37:09.200037   369 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1207 05:37:09.200042   369 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200047   369 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1207 05:37:09.200053   369 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1207 05:37:09.200059   369 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1207 05:37:09.200064   369 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1207 05:37:09.200080   369 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1207 05:37:09.200085   369 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1207 05:37:09.200091   369 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1207 05:37:09.200096   369 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1207 05:37:09.200101   369 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1207 05:37:09.200107   369 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1207 05:37:09.200112   369 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1207 05:37:09.200119   369 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200124   369 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1207 05:37:09.200129   369 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1207 05:37:09.200134   369 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1207 05:37:09.200139   369 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1207 05:37:09.200145   369 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1207 05:37:09.200150   369 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1207 05:37:09.200155   369 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1207 05:37:09.200160   369 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1207 05:37:09.200166   369 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1207 05:37:09.200171   369 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1207 05:37:09.200176   369 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200182   369 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1207 05:37:09.200188   369 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1207 05:37:09.200193   369 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1207 05:37:09.200198   369 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1207 05:37:09.200203   369 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1207 05:37:09.200209   369 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1207 05:37:09.200214   369 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1207 05:37:09.200220   369 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1207 05:37:09.200225   369 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1207 05:37:09.200230   369 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1207 05:37:09.200237   369 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200242   369 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1207 05:37:09.200248   369 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1207 05:37:09.200253   369 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1207 05:37:09.200258   369 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1207 05:37:09.200263   369 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1207 05:37:09.200268   369 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1207 05:37:09.200273   369 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1207 05:37:09.200279   369 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1207 05:37:09.200289   369 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1207 05:37:09.200294   369 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1207 05:37:09.200299   369 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200304   369 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1207 05:37:09.200310   369 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1207 05:37:09.200315   369 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1207 05:37:09.200320   369 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1207 05:37:09.200326   369 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1207 05:37:09.200336   369 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1207 05:37:09.200342   369 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1207 05:37:09.200348   369 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1207 05:37:09.200353   369 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1207 05:37:09.200358   369 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1207 05:37:09.200364   369 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200371   369 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1207 05:37:09.200376   369 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1207 05:37:09.200381   369 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1207 05:37:09.200387   369 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1207 05:37:09.200392   369 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1207 05:37:09.200397   369 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1207 05:37:09.200403   369 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1207 05:37:09.200408   369 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1207 05:37:09.200413   369 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1207 05:37:09.200419   369 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1207 05:37:09.200424   369 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200429   369 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1207 05:37:09.200436   369 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1207 05:37:09.200441   369 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1207 05:37:09.200446   369 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1207 05:37:09.200451   369 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1207 05:37:09.200458   369 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1207 05:37:09.200462   369 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1207 05:37:09.200474   369 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1207 05:37:09.200480   369 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1207 05:37:09.200486   369 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1207 05:37:09.200491   369 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1207 05:37:09.200496   369 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200502   369 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1207 05:37:09.200508   369 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1207 05:37:09.200515   369 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1207 05:37:09.200520   369 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1207 05:37:09.200525   369 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1207 05:37:09.200531   369 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1207 05:37:09.200536   369 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1207 05:37:09.200541   369 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1207 05:37:09.200547   369 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1207 05:37:09.200552   369 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1207 05:37:09.200557   369 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200563   369 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1207 05:37:09.200569   369 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1207 05:37:09.200574   369 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1207 05:37:09.200580   369 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1207 05:37:09.200585   369 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1207 05:37:09.200592   369 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1207 05:37:09.200597   369 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1207 05:37:09.200608   369 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1207 05:37:09.200614   369 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1207 05:37:09.200619   369 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1207 05:37:09.200625   369 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200634   369 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1207 05:37:09.200641   369 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1207 05:37:09.200646   369 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1207 05:37:09.200652   369 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1207 05:37:09.200659   369 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1207 05:37:09.200664   369 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1207 05:37:09.200670   369 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1207 05:37:09.200675   369 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1207 05:37:09.200681   369 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1207 05:37:09.200686   369 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1207 05:37:09.200692   369 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200697   369 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1207 05:37:09.200704   369 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1207 05:37:09.200709   369 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1207 05:37:09.200716   369 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1207 05:37:09.200721   369 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1207 05:37:09.200726   369 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1207 05:37:09.200731   369 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1207 05:37:09.200737   369 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1207 05:37:09.200742   369 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1207 05:37:09.200748   369 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1207 05:37:09.200754   369 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200759   369 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1207 05:37:09.200767   369 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1207 05:37:09.200773   369 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1207 05:37:09.200778   369 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1207 05:37:09.200783   369 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1207 05:37:09.200788   369 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1207 05:37:09.200794   369 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1207 05:37:09.200800   369 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1207 05:37:09.200805   369 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1207 05:37:09.200810   369 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1207 05:37:09.200816   369 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:37:09.200822   369 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1207 05:37:09.200829   369 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1207 05:37:09.200834   369 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1207 05:37:09.200840   369 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1207 05:37:09.200845   369 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1207 05:37:09.200851   369 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1207 05:37:09.200857   369 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1207 05:37:09.200862   369 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1207 05:37:09.200868   369 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1207 05:37:09.200875   369 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1207 05:37:09.200884   369 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1207 05:37:09.200891   369 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1207 05:37:09.200896   369 net.cpp:226] pre_conv needs backward computation.\nI1207 05:37:09.200903   369 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1207 05:37:09.200911   369 net.cpp:228] dataLayer does not need backward computation.\nI1207 05:37:09.200914   369 net.cpp:270] This network produces output accuracy\nI1207 05:37:09.200922   369 net.cpp:270] This network produces output loss\nI1207 05:37:09.201174   369 net.cpp:283] Network initialization done.\nI1207 05:37:09.201771   369 solver.cpp:60] Solver scaffolding done.\nI1207 05:37:09.421263   369 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1207 05:37:09.771769   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:09.771818   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:09.777562   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:10.431900   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:10.431969   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:10.439298   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:11.175454   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:11.175526   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:11.183271   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:11.549769   369 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1207 05:37:11.996196   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:11.996261   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:12.004724   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:12.913663   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:12.913735   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:12.923285   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:13.916440   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:13.916512   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:13.926714   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:15.015717   369 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:37:15.015767   369 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:37:15.026773   369 data_layer.cpp:41] output data size: 100,3,32,32\nI1207 05:37:15.071105   398 blocking_queue.cpp:50] Waiting for data\nI1207 05:37:15.124532   395 blocking_queue.cpp:50] Waiting for data\nI1207 05:37:15.627777   369 parallel.cpp:425] Starting Optimization\nI1207 05:37:15.629091   369 solver.cpp:279] Solving Cifar-Resnet\nI1207 05:37:15.629110   369 solver.cpp:280] Learning Rate Policy: triangular\nI1207 05:37:15.632608   369 solver.cpp:337] Iteration 0, Testing net (#0)\nI1207 05:38:08.220774   369 solver.cpp:404]     Test net output #0: accuracy = 0.09775\nI1207 05:38:08.221101   369 solver.cpp:404]     Test net output #1: loss = 2.45742 (* 1 = 2.45742 loss)\nI1207 05:38:10.952322   369 solver.cpp:228] Iteration 0, loss = 2.47378\nI1207 05:38:10.952364   369 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1207 05:38:10.952381   369 solver.cpp:244]     Train net output #1: loss = 2.47378 (* 1 = 2.47378 loss)\nI1207 05:38:11.000061   369 sgd_solver.cpp:166] Iteration 0, lr = 0\nI1207 05:39:43.539911   369 solver.cpp:337] Iteration 100, Testing net (#0)\nI1207 05:40:36.444826   369 solver.cpp:404]     Test net output #0: accuracy = 0.2738\nI1207 05:40:36.445112   369 solver.cpp:404]     Test net output #1: loss = 2.01514 (* 1 = 2.01514 loss)\nI1207 05:40:37.317829   369 solver.cpp:228] Iteration 100, loss = 2.02566\nI1207 05:40:37.317881   369 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1207 05:40:37.317899   369 solver.cpp:244]     Train net output #1: loss = 2.02566 (* 1 = 2.02566 loss)\nI1207 05:40:37.389235   369 sgd_solver.cpp:166] Iteration 100, lr = 0.015\nI1207 05:42:10.313926   369 solver.cpp:337] Iteration 200, Testing net (#0)\nI1207 05:43:03.224663   369 solver.cpp:404]     Test net output #0: accuracy = 0.3059\nI1207 05:43:03.224958   369 solver.cpp:404]     Test net output #1: loss = 1.92391 (* 1 = 1.92391 loss)\nI1207 05:43:04.097414   369 solver.cpp:228] Iteration 200, loss = 1.93177\nI1207 05:43:04.097465   369 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1207 05:43:04.097484   369 solver.cpp:244]     Train net output #1: loss = 1.93177 (* 1 = 1.93177 loss)\nI1207 05:43:04.176160   369 sgd_solver.cpp:166] Iteration 200, lr = 0.03\nI1207 05:44:37.155108   369 solver.cpp:337] Iteration 300, Testing net (#0)\nI1207 05:45:30.084540   369 solver.cpp:404]     Test net output #0: accuracy = 0.3382\nI1207 05:45:30.084825   369 solver.cpp:404]     Test net output #1: loss = 1.84043 (* 1 = 1.84043 loss)\nI1207 05:45:30.957430   369 solver.cpp:228] Iteration 300, loss = 1.80525\nI1207 05:45:30.957479   369 solver.cpp:244]     Train net output #0: accuracy = 0.37\nI1207 05:45:30.957495   369 solver.cpp:244]     Train net output #1: loss = 1.80525 (* 1 = 1.80525 loss)\nI1207 05:45:31.027868   369 sgd_solver.cpp:166] Iteration 300, lr = 0.045\nI1207 05:47:04.000329   369 solver.cpp:337] Iteration 400, Testing net (#0)\nI1207 05:47:56.945699   369 solver.cpp:404]     Test net output #0: accuracy = 0.3577\nI1207 05:47:56.945967   369 solver.cpp:404]     Test net output #1: loss = 1.78424 (* 1 = 1.78424 loss)\nI1207 05:47:57.819098   369 solver.cpp:228] Iteration 400, loss = 1.75644\nI1207 05:47:57.819154   369 solver.cpp:244]     Train net output #0: accuracy = 0.33\nI1207 05:47:57.819172   369 solver.cpp:244]     Train net output #1: loss = 1.75644 (* 1 = 1.75644 loss)\nI1207 05:47:57.893234   369 sgd_solver.cpp:166] Iteration 400, lr = 0.0599999\nI1207 05:49:30.916252   369 solver.cpp:337] Iteration 500, Testing net (#0)\nI1207 05:50:23.875748   369 solver.cpp:404]     Test net output #0: accuracy = 0.38705\nI1207 05:50:23.876026   369 solver.cpp:404]     Test net output #1: loss = 1.72083 (* 1 = 1.72083 loss)\nI1207 05:50:24.749431   369 solver.cpp:228] Iteration 500, loss = 1.70761\nI1207 05:50:24.749481   369 solver.cpp:244]     Train net output #0: accuracy = 0.38\nI1207 05:50:24.749500   369 solver.cpp:244]     Train net output #1: loss = 1.70761 (* 1 = 1.70761 loss)\nI1207 05:50:24.822966   369 sgd_solver.cpp:166] Iteration 500, lr = 0.0749999\nI1207 05:51:57.820849   369 solver.cpp:337] Iteration 600, Testing net (#0)\nI1207 05:52:50.768782   369 solver.cpp:404]     Test net output #0: accuracy = 0.39965\nI1207 05:52:50.769064   369 solver.cpp:404]     Test net output #1: loss = 1.67321 (* 1 = 1.67321 loss)\nI1207 05:52:51.642107   369 solver.cpp:228] Iteration 600, loss = 1.75469\nI1207 05:52:51.642154   369 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 05:52:51.642172   369 solver.cpp:244]     Train net output #1: loss = 1.75469 (* 1 = 1.75469 loss)\nI1207 05:52:51.716331   369 sgd_solver.cpp:166] Iteration 600, lr = 0.0899999\nI1207 05:54:24.713359   369 solver.cpp:337] Iteration 700, Testing net (#0)\nI1207 05:55:17.680402   369 solver.cpp:404]     Test net output #0: accuracy = 0.40955\nI1207 05:55:17.680670   369 solver.cpp:404]     Test net output #1: loss = 1.64519 (* 1 = 1.64519 loss)\nI1207 05:55:18.553750   369 solver.cpp:228] Iteration 700, loss = 1.75484\nI1207 05:55:18.553799   369 solver.cpp:244]     Train net output #0: accuracy = 0.35\nI1207 05:55:18.553817   369 solver.cpp:244]     Train net output #1: loss = 1.75484 (* 1 = 1.75484 loss)\nI1207 05:55:18.629246   369 sgd_solver.cpp:166] Iteration 700, lr = 0.105\nI1207 05:56:51.665659   369 solver.cpp:337] Iteration 800, Testing net (#0)\nI1207 05:57:44.631422   369 solver.cpp:404]     Test net output #0: accuracy = 0.41435\nI1207 05:57:44.631716   369 solver.cpp:404]     Test net output #1: loss = 1.61079 (* 1 = 1.61079 loss)\nI1207 05:57:45.504654   369 solver.cpp:228] Iteration 800, loss = 1.59335\nI1207 05:57:45.504704   369 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1207 05:57:45.504721   369 solver.cpp:244]     Train net output #1: loss = 1.59335 (* 1 = 1.59335 loss)\nI1207 05:57:45.584867   369 sgd_solver.cpp:166] Iteration 800, lr = 0.12\nI1207 05:58:50.378731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07415 > 2) by scale factor 0.964251\nI1207 05:59:18.594264   369 solver.cpp:337] Iteration 900, Testing net (#0)\nI1207 06:00:11.558646   369 solver.cpp:404]     Test net output #0: accuracy = 0.4125\nI1207 06:00:11.558930   369 solver.cpp:404]     Test net output #1: loss = 1.65007 (* 1 = 1.65007 loss)\nI1207 06:00:12.431761   369 solver.cpp:228] Iteration 900, loss = 1.56428\nI1207 06:00:12.431808   369 solver.cpp:244]     Train net output #0: accuracy = 0.36\nI1207 06:00:12.431825   369 solver.cpp:244]     Train net output #1: loss = 1.56428 (* 1 = 1.56428 loss)\nI1207 06:00:12.510174   369 sgd_solver.cpp:166] Iteration 900, lr = 0.135\nI1207 06:00:12.519954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23881 > 2) by scale factor 0.893334\nI1207 06:01:45.537068   369 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1207 06:02:38.497556   369 solver.cpp:404]     Test net output #0: accuracy = 0.43195\nI1207 06:02:38.497861   369 solver.cpp:404]     Test net output #1: loss = 1.56942 (* 1 = 1.56942 loss)\nI1207 06:02:39.371332   369 solver.cpp:228] Iteration 1000, loss = 1.42361\nI1207 06:02:39.371381   369 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI1207 06:02:39.371398   369 solver.cpp:244]     Train net output #1: loss = 1.42361 (* 1 = 1.42361 loss)\nI1207 06:02:39.447249   369 sgd_solver.cpp:166] Iteration 1000, lr = 0.15\nI1207 06:04:12.519198   369 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1207 06:05:05.484213   369 solver.cpp:404]     Test net output #0: accuracy = 0.44365\nI1207 06:05:05.484477   369 solver.cpp:404]     Test net output #1: loss = 1.5389 (* 1 = 1.5389 loss)\nI1207 06:05:06.357363   369 solver.cpp:228] Iteration 1100, loss = 1.51772\nI1207 06:05:06.357419   369 solver.cpp:244]     Train net output #0: accuracy = 0.47\nI1207 06:05:06.357437   369 solver.cpp:244]     Train net output #1: loss = 1.51772 (* 1 = 1.51772 loss)\nI1207 06:05:06.433176   369 sgd_solver.cpp:166] Iteration 1100, lr = 0.165\nI1207 06:05:21.446313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12464 > 2) by scale factor 0.941335\nI1207 06:05:22.386493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26392 > 2) by scale factor 0.883425\nI1207 06:05:24.263854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05095 > 2) by scale factor 0.975157\nI1207 06:05:27.078902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13991 > 2) by scale factor 0.93462\nI1207 06:05:46.791534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29814 > 2) by scale factor 0.870269\nI1207 06:06:39.450119   369 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1207 06:07:32.418998   369 solver.cpp:404]     Test net output #0: accuracy = 0.4546\nI1207 06:07:32.419298   369 solver.cpp:404]     Test net output #1: loss = 1.50277 (* 1 = 1.50277 loss)\nI1207 06:07:33.292330   369 solver.cpp:228] Iteration 1200, loss = 1.5714\nI1207 06:07:33.292378   369 solver.cpp:244]     Train net output #0: accuracy = 0.41\nI1207 06:07:33.292397   369 solver.cpp:244]     Train net output #1: loss = 1.5714 (* 1 = 1.5714 loss)\nI1207 06:07:33.362458   369 sgd_solver.cpp:166] Iteration 1200, lr = 0.18\nI1207 06:09:06.393383   369 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1207 06:09:59.364718   369 solver.cpp:404]     Test net output #0: accuracy = 0.44605\nI1207 06:09:59.364974   369 solver.cpp:404]     Test net output #1: loss = 1.50613 (* 1 = 1.50613 loss)\nI1207 06:10:00.238059   369 solver.cpp:228] Iteration 1300, loss = 1.35871\nI1207 06:10:00.238107   369 solver.cpp:244]     Train net output #0: accuracy = 0.51\nI1207 06:10:00.238126   369 solver.cpp:244]     Train net output #1: loss = 1.35871 (* 1 = 1.35871 loss)\nI1207 06:10:00.317222   369 sgd_solver.cpp:166] Iteration 1300, lr = 0.195\nI1207 06:11:33.343633   369 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1207 06:12:26.308118   369 solver.cpp:404]     Test net output #0: accuracy = 0.4295\nI1207 06:12:26.308380   369 solver.cpp:404]     Test net output #1: loss = 1.55943 (* 1 = 1.55943 loss)\nI1207 06:12:27.181219   369 solver.cpp:228] Iteration 1400, loss = 1.55634\nI1207 06:12:27.181275   369 solver.cpp:244]     Train net output #0: accuracy = 0.37\nI1207 06:12:27.181294   369 solver.cpp:244]     Train net output #1: loss = 1.55634 (* 1 = 1.55634 loss)\nI1207 06:12:27.257194   369 sgd_solver.cpp:166] Iteration 1400, lr = 0.21\nI1207 06:12:52.579288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23005 > 2) by scale factor 0.896841\nI1207 06:13:55.568015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64504 > 2) by scale factor 0.756131\nI1207 06:14:00.281275   369 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1207 06:14:53.259666   369 solver.cpp:404]     Test net output #0: accuracy = 0.43485\nI1207 06:14:53.259966   369 solver.cpp:404]     Test net output #1: loss = 1.54571 (* 1 = 1.54571 loss)\nI1207 06:14:54.132973   369 solver.cpp:228] Iteration 1500, loss = 1.42475\nI1207 06:14:54.133031   369 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI1207 06:14:54.133050   369 solver.cpp:244]     Train net output #1: loss = 1.42475 (* 1 = 1.42475 loss)\nI1207 06:14:54.206899   369 sgd_solver.cpp:166] Iteration 1500, lr = 0.225\nI1207 06:15:03.592249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22295 > 2) by scale factor 0.899706\nI1207 06:15:06.407658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21006 > 2) by scale factor 0.904952\nI1207 06:16:27.219300   369 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1207 06:17:20.203043   369 solver.cpp:404]     Test net output #0: accuracy = 0.45435\nI1207 06:17:20.203337   369 solver.cpp:404]     Test net output #1: loss = 1.49059 (* 1 = 1.49059 loss)\nI1207 06:17:21.076462   369 solver.cpp:228] Iteration 1600, loss = 1.47932\nI1207 06:17:21.076511   369 solver.cpp:244]     Train net output #0: accuracy = 0.47\nI1207 06:17:21.076529   369 solver.cpp:244]     Train net output #1: loss = 1.47932 (* 1 = 1.47932 loss)\nI1207 06:17:21.147933   369 sgd_solver.cpp:166] Iteration 1600, lr = 0.24\nI1207 06:17:43.656931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50745 > 2) by scale factor 0.797622\nI1207 06:18:43.844257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10588 > 2) by scale factor 0.949722\nI1207 06:18:44.787426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41323 > 2) by scale factor 0.828763\nI1207 06:18:54.205796   369 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1207 06:19:47.179448   369 solver.cpp:404]     Test net output #0: accuracy = 0.4295\nI1207 06:19:47.179736   369 solver.cpp:404]     Test net output #1: loss = 1.56161 (* 1 = 1.56161 loss)\nI1207 06:19:48.053182   369 solver.cpp:228] Iteration 1700, loss = 1.71158\nI1207 06:19:48.053238   369 solver.cpp:244]     Train net output #0: accuracy = 0.38\nI1207 06:19:48.053256   369 solver.cpp:244]     Train net output #1: loss = 1.71158 (* 1 = 1.71158 loss)\nI1207 06:19:48.129667   369 sgd_solver.cpp:166] Iteration 1700, lr = 0.255\nI1207 06:21:21.173233   369 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1207 06:22:14.147485   369 solver.cpp:404]     Test net output #0: accuracy = 0.46545\nI1207 06:22:14.147758   369 solver.cpp:404]     Test net output #1: loss = 1.47578 (* 1 = 1.47578 loss)\nI1207 06:22:15.020665   369 solver.cpp:228] Iteration 1800, loss = 1.35821\nI1207 06:22:15.020726   369 solver.cpp:244]     Train net output #0: accuracy = 0.5\nI1207 06:22:15.020745   369 solver.cpp:244]     Train net output #1: loss = 1.35821 (* 1 = 1.35821 loss)\nI1207 06:22:15.093142   369 sgd_solver.cpp:166] Iteration 1800, lr = 0.27\nI1207 06:23:48.132215   369 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1207 06:24:41.114792   369 solver.cpp:404]     Test net output #0: accuracy = 0.46285\nI1207 06:24:41.115059   369 solver.cpp:404]     Test net output #1: loss = 1.46444 (* 1 = 1.46444 loss)\nI1207 06:24:41.988188   369 solver.cpp:228] Iteration 1900, loss = 1.37191\nI1207 06:24:41.988237   369 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1207 06:24:41.988255   369 solver.cpp:244]     Train net output #1: loss = 1.37191 (* 1 = 1.37191 loss)\nI1207 06:24:42.069891   369 sgd_solver.cpp:166] Iteration 1900, lr = 0.285\nI1207 06:24:49.577605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31609 > 2) by scale factor 0.603121\nI1207 06:24:54.266284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95449 > 2) by scale factor 0.676935\nI1207 06:24:55.205824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00629 > 2) by scale factor 0.996866\nI1207 06:24:59.894505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.88125 > 2) by scale factor 0.409731\nI1207 06:26:08.508723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00001 > 2) by scale factor 0.999994\nI1207 06:26:15.102341   369 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1207 06:27:08.090389   369 solver.cpp:404]     Test net output #0: accuracy = 0.42075\nI1207 06:27:08.090656   369 solver.cpp:404]     Test net output #1: loss = 1.54713 (* 1 = 1.54713 loss)\nI1207 06:27:08.963898   369 solver.cpp:228] Iteration 2000, loss = 1.44357\nI1207 06:27:08.963954   369 solver.cpp:244]     Train net output #0: accuracy = 0.42\nI1207 06:27:08.963974   369 solver.cpp:244]     Train net output #1: loss = 1.44357 (* 1 = 1.44357 loss)\nI1207 06:27:09.038434   369 sgd_solver.cpp:166] Iteration 2000, lr = 0.3\nI1207 06:28:42.083675   369 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1207 06:29:35.071936   369 solver.cpp:404]     Test net output #0: accuracy = 0.476\nI1207 06:29:35.072201   369 solver.cpp:404]     Test net output #1: loss = 1.45054 (* 1 = 1.45054 loss)\nI1207 06:29:35.944718   369 solver.cpp:228] Iteration 2100, loss = 1.4589\nI1207 06:29:35.944769   369 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1207 06:29:35.944787   369 solver.cpp:244]     Train net output #1: loss = 1.4589 (* 1 = 1.4589 loss)\nI1207 06:29:36.016381   369 sgd_solver.cpp:166] Iteration 2100, lr = 0.315\nI1207 06:29:47.279160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33594 > 2) by scale factor 0.856185\nI1207 06:29:50.093842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31219 > 2) by scale factor 0.86498\nI1207 06:29:54.782824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02654 > 2) by scale factor 0.986905\nI1207 06:30:56.842999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2492 > 2) by scale factor 0.615537\nI1207 06:31:00.608626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0705 > 2) by scale factor 0.965952\nI1207 06:31:09.087365   369 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1207 06:32:02.065282   369 solver.cpp:404]     Test net output #0: accuracy = 0.44065\nI1207 06:32:02.065515   369 solver.cpp:404]     Test net output #1: loss = 1.52232 (* 1 = 1.52232 loss)\nI1207 06:32:02.938526   369 solver.cpp:228] Iteration 2200, loss = 1.51638\nI1207 06:32:02.938576   369 solver.cpp:244]     Train net output #0: accuracy = 0.47\nI1207 06:32:02.938594   369 solver.cpp:244]     Train net output #1: loss = 1.51638 (* 1 = 1.51638 loss)\nI1207 06:32:03.013465   369 sgd_solver.cpp:166] Iteration 2200, lr = 0.33\nI1207 06:33:03.122174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28198 > 2) by scale factor 0.876432\nI1207 06:33:09.709919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99321 > 2) by scale factor 0.668179\nI1207 06:33:15.354775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17775 > 2) by scale factor 0.918378\nI1207 06:33:16.297727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69064 > 2) by scale factor 0.541911\nI1207 06:33:36.065526   369 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1207 06:34:29.052814   369 solver.cpp:404]     Test net output #0: accuracy = 0.4374\nI1207 06:34:29.053079   369 solver.cpp:404]     Test net output #1: loss = 1.52325 (* 1 = 1.52325 loss)\nI1207 06:34:29.926308   369 solver.cpp:228] Iteration 2300, loss = 1.367\nI1207 06:34:29.926359   369 solver.cpp:244]     Train net output #0: accuracy = 0.51\nI1207 06:34:29.926378   369 solver.cpp:244]     Train net output #1: loss = 1.367 (* 1 = 1.367 loss)\nI1207 06:34:29.997598   369 sgd_solver.cpp:166] Iteration 2300, lr = 0.345\nI1207 06:34:59.075686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46936 > 2) by scale factor 0.809928\nI1207 06:35:01.896576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19413 > 2) by scale factor 0.911524\nI1207 06:35:02.838757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43528 > 2) by scale factor 0.821262\nI1207 06:35:04.721879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59712 > 2) by scale factor 0.770083\nI1207 06:35:06.604667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16736 > 2) by scale factor 0.922781\nI1207 06:35:26.357228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02851 > 2) by scale factor 0.985946\nI1207 06:36:03.040041   369 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1207 06:36:56.029666   369 solver.cpp:404]     Test net output #0: accuracy = 0.46695\nI1207 06:36:56.029940   369 solver.cpp:404]     Test net output #1: loss = 1.46391 (* 1 = 1.46391 loss)\nI1207 06:36:56.902859   369 solver.cpp:228] Iteration 2400, loss = 1.45618\nI1207 06:36:56.902910   369 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI1207 06:36:56.902930   369 solver.cpp:244]     Train net output #1: loss = 1.45618 (* 1 = 1.45618 loss)\nI1207 06:36:56.973376   369 sgd_solver.cpp:166] Iteration 2400, lr = 0.36\nI1207 06:38:29.968771   369 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1207 06:39:22.952760   369 solver.cpp:404]     Test net output #0: accuracy = 0.46645\nI1207 06:39:22.953027   369 solver.cpp:404]     Test net output #1: loss = 1.47588 (* 1 = 1.47588 loss)\nI1207 06:39:23.827163   369 solver.cpp:228] Iteration 2500, loss = 1.281\nI1207 06:39:23.827213   369 solver.cpp:244]     Train net output #0: accuracy = 0.52\nI1207 06:39:23.827231   369 solver.cpp:244]     Train net output #1: loss = 1.281 (* 1 = 1.281 loss)\nI1207 06:39:23.896136   369 sgd_solver.cpp:166] Iteration 2500, lr = 0.375\nI1207 06:40:56.917894   369 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1207 06:41:49.894758   369 solver.cpp:404]     Test net output #0: accuracy = 0.4766\nI1207 06:41:49.895042   369 solver.cpp:404]     Test net output #1: loss = 1.44247 (* 1 = 1.44247 loss)\nI1207 06:41:50.768345   369 solver.cpp:228] Iteration 2600, loss = 1.42688\nI1207 06:41:50.768395   369 solver.cpp:244]     Train net output #0: accuracy = 0.52\nI1207 06:41:50.768414   369 solver.cpp:244]     Train net output #1: loss = 1.42688 (* 1 = 1.42688 loss)\nI1207 06:41:50.848644   369 sgd_solver.cpp:166] Iteration 2600, lr = 0.39\nI1207 06:42:04.923573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68639 > 2) by scale factor 0.744495\nI1207 06:42:08.677007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8008 > 2) by scale factor 0.714082\nI1207 06:42:09.616791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.8704 > 2) by scale factor 0.410644\nI1207 06:42:11.493985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9459 > 2) by scale factor 0.678909\nI1207 06:42:16.184761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12978 > 2) by scale factor 0.939066\nI1207 06:42:19.000607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52626 > 2) by scale factor 0.791684\nI1207 06:43:23.889364   369 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1207 06:44:16.871109   369 solver.cpp:404]     Test net output #0: accuracy = 0.4433\nI1207 06:44:16.871433   369 solver.cpp:404]     Test net output #1: loss = 1.52486 (* 1 = 1.52486 loss)\nI1207 06:44:17.745251   369 solver.cpp:228] Iteration 2700, loss = 1.61501\nI1207 06:44:17.745303   369 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 06:44:17.745322   369 solver.cpp:244]     Train net output #1: loss = 1.61501 (* 1 = 1.61501 loss)\nI1207 06:44:17.817576   369 sgd_solver.cpp:166] Iteration 2700, lr = 0.405\nI1207 06:45:50.866281   369 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1207 06:46:43.855778   369 solver.cpp:404]     Test net output #0: accuracy = 0.50525\nI1207 06:46:43.856045   369 solver.cpp:404]     Test net output #1: loss = 1.35175 (* 1 = 1.35175 loss)\nI1207 06:46:44.729106   369 solver.cpp:228] Iteration 2800, loss = 1.19692\nI1207 06:46:44.729156   369 solver.cpp:244]     Train net output #0: accuracy = 0.54\nI1207 06:46:44.729176   369 solver.cpp:244]     Train net output #1: loss = 1.19692 (* 1 = 1.19692 loss)\nI1207 06:46:44.808903   369 sgd_solver.cpp:166] Iteration 2800, lr = 0.42\nI1207 06:48:17.839625   369 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1207 06:49:10.821451   369 solver.cpp:404]     Test net output #0: accuracy = 0.5169\nI1207 06:49:10.821743   369 solver.cpp:404]     Test net output #1: loss = 1.33906 (* 1 = 1.33906 loss)\nI1207 06:49:11.694821   369 solver.cpp:228] Iteration 2900, loss = 1.3492\nI1207 06:49:11.694871   369 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI1207 06:49:11.694888   369 solver.cpp:244]     Train net output #1: loss = 1.3492 (* 1 = 1.3492 loss)\nI1207 06:49:11.775506   369 sgd_solver.cpp:166] Iteration 2900, lr = 0.435\nI1207 06:49:32.414402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2659 > 2) by scale factor 0.88265\nI1207 06:49:38.044278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06897 > 2) by scale factor 0.651685\nI1207 06:49:40.862748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08363 > 2) by scale factor 0.959862\nI1207 06:49:42.747280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71424 > 2) by scale factor 0.736854\nI1207 06:49:43.690299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59564 > 2) by scale factor 0.770523\nI1207 06:49:44.633579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16637 > 2) by scale factor 0.631639\nI1207 06:49:45.576424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00084 > 2) by scale factor 0.999582\nI1207 06:49:47.459553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06445 > 2) by scale factor 0.968782\nI1207 06:49:50.283759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24264 > 2) by scale factor 0.616781\nI1207 06:49:51.226800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36538 > 2) by scale factor 0.845531\nI1207 06:49:52.169414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02901 > 2) by scale factor 0.985703\nI1207 06:49:53.112504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92128 > 2) by scale factor 0.684632\nI1207 06:49:54.055212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55248 > 2) by scale factor 0.783551\nI1207 06:49:57.817723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51462 > 2) by scale factor 0.79535\nI1207 06:50:01.580999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00931 > 2) by scale factor 0.995369\nI1207 06:50:03.463954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11843 > 2) by scale factor 0.944096\nI1207 06:50:05.347164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08017 > 2) by scale factor 0.96146\nI1207 06:50:10.050601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25917 > 2) by scale factor 0.88528\nI1207 06:50:44.851400   369 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1207 06:51:37.841920   369 solver.cpp:404]     Test net output #0: accuracy = 0.43935\nI1207 06:51:37.842206   369 solver.cpp:404]     Test net output #1: loss = 1.52074 (* 1 = 1.52074 loss)\nI1207 06:51:38.716079   369 solver.cpp:228] Iteration 3000, loss = 1.33664\nI1207 06:51:38.716130   369 solver.cpp:244]     Train net output #0: accuracy = 0.54\nI1207 06:51:38.716146   369 solver.cpp:244]     Train net output #1: loss = 1.33664 (* 1 = 1.33664 loss)\nI1207 06:51:38.788692   369 sgd_solver.cpp:166] Iteration 3000, lr = 0.45\nI1207 06:53:11.857682   369 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1207 06:54:04.840945   369 solver.cpp:404]     Test net output #0: accuracy = 0.47105\nI1207 06:54:04.841215   369 solver.cpp:404]     Test net output #1: loss = 1.48101 (* 1 = 1.48101 loss)\nI1207 06:54:05.714623   369 solver.cpp:228] Iteration 3100, loss = 1.44938\nI1207 06:54:05.714676   369 solver.cpp:244]     Train net output #0: accuracy = 0.47\nI1207 06:54:05.714700   369 solver.cpp:244]     Train net output #1: loss = 1.44938 (* 1 = 1.44938 loss)\nI1207 06:54:05.789172   369 sgd_solver.cpp:166] Iteration 3100, lr = 0.465\nI1207 06:55:38.821048   369 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1207 06:56:31.794623   369 solver.cpp:404]     Test net output #0: accuracy = 0.4249\nI1207 06:56:31.794916   369 solver.cpp:404]     Test net output #1: loss = 1.59736 (* 1 = 1.59736 loss)\nI1207 06:56:32.668774   369 solver.cpp:228] Iteration 3200, loss = 1.80896\nI1207 06:56:32.668825   369 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 06:56:32.668843   369 solver.cpp:244]     Train net output #1: loss = 1.80896 (* 1 = 1.80896 loss)\nI1207 06:56:32.747555   369 sgd_solver.cpp:166] Iteration 3200, lr = 0.48\nI1207 06:57:03.706135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12557 > 2) by scale factor 0.484782\nI1207 06:57:04.649513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16843 > 2) by scale factor 0.922328\nI1207 06:57:12.177724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05229 > 2) by scale factor 0.655247\nI1207 06:57:18.766036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53756 > 2) by scale factor 0.788158\nI1207 06:57:19.709192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23846 > 2) by scale factor 0.893472\nI1207 06:58:05.808732   369 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1207 06:58:58.795930   369 solver.cpp:404]     Test net output #0: accuracy = 0.43905\nI1207 06:58:58.796222   369 solver.cpp:404]     Test net output #1: loss = 1.51824 (* 1 = 1.51824 loss)\nI1207 06:58:59.669905   369 solver.cpp:228] Iteration 3300, loss = 1.43927\nI1207 06:58:59.669957   369 solver.cpp:244]     Train net output #0: accuracy = 0.43\nI1207 06:58:59.669976   369 solver.cpp:244]     Train net output #1: loss = 1.43927 (* 1 = 1.43927 loss)\nI1207 06:58:59.748562   369 sgd_solver.cpp:166] Iteration 3300, lr = 0.495\nI1207 07:00:32.747210   369 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1207 07:01:25.730648   369 solver.cpp:404]     Test net output #0: accuracy = 0.49385\nI1207 07:01:25.730914   369 solver.cpp:404]     Test net output #1: loss = 1.39551 (* 1 = 1.39551 loss)\nI1207 07:01:26.604233   369 solver.cpp:228] Iteration 3400, loss = 1.39407\nI1207 07:01:26.604282   369 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI1207 07:01:26.604300   369 solver.cpp:244]     Train net output #1: loss = 1.39407 (* 1 = 1.39407 loss)\nI1207 07:01:26.686226   369 sgd_solver.cpp:166] Iteration 3400, lr = 0.51\nI1207 07:01:36.073434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51791 > 2) by scale factor 0.794311\nI1207 07:01:38.889114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38093 > 2) by scale factor 0.840008\nI1207 07:01:40.766549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00067 > 2) by scale factor 0.999663\nI1207 07:01:48.271642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24315 > 2) by scale factor 0.891603\nI1207 07:01:49.212126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20936 > 2) by scale factor 0.905241\nI1207 07:01:52.028040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89065 > 2) by scale factor 0.691886\nI1207 07:01:54.843644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06169 > 2) by scale factor 0.970079\nI1207 07:01:55.783366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19453 > 2) by scale factor 0.911358\nI1207 07:01:56.723191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13093 > 2) by scale factor 0.938558\nI1207 07:02:01.420348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80727 > 2) by scale factor 0.712436\nI1207 07:02:02.363371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24817 > 2) by scale factor 0.889611\nI1207 07:02:59.775758   369 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1207 07:03:52.761114   369 solver.cpp:404]     Test net output #0: accuracy = 0.4893\nI1207 07:03:52.761405   369 solver.cpp:404]     Test net output #1: loss = 1.41502 (* 1 = 1.41502 loss)\nI1207 07:03:53.634506   369 solver.cpp:228] Iteration 3500, loss = 1.30009\nI1207 07:03:53.634554   369 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1207 07:03:53.634572   369 solver.cpp:244]     Train net output #1: loss = 1.30009 (* 1 = 1.30009 loss)\nI1207 07:03:53.704897   369 sgd_solver.cpp:166] Iteration 3500, lr = 0.525\nI1207 07:04:03.087226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47623 > 2) by scale factor 0.80768\nI1207 07:04:04.027000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05607 > 2) by scale factor 0.654436\nI1207 07:04:04.967054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04864 > 2) by scale factor 0.976258\nI1207 07:05:26.747901   369 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1207 07:06:19.725083   369 solver.cpp:404]     Test net output #0: accuracy = 0.49195\nI1207 07:06:19.725371   369 solver.cpp:404]     Test net output #1: loss = 1.40371 (* 1 = 1.40371 loss)\nI1207 07:06:20.598263   369 solver.cpp:228] Iteration 3600, loss = 1.35842\nI1207 07:06:20.598310   369 solver.cpp:244]     Train net output #0: accuracy = 0.52\nI1207 07:06:20.598328   369 solver.cpp:244]     Train net output #1: loss = 1.35842 (* 1 = 1.35842 loss)\nI1207 07:06:20.677045   369 sgd_solver.cpp:166] Iteration 3600, lr = 0.54\nI1207 07:06:50.681736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69183 > 2) by scale factor 0.742988\nI1207 07:06:53.504971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24849 > 2) by scale factor 0.889488\nI1207 07:06:55.388059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25465 > 2) by scale factor 0.887054\nI1207 07:06:58.211161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19195 > 2) by scale factor 0.91243\nI1207 07:07:01.972841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77408 > 2) by scale factor 0.720961\nI1207 07:07:07.613911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2778 > 2) by scale factor 0.878042\nI1207 07:07:13.256785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4359 > 2) by scale factor 0.82105\nI1207 07:07:16.079035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12776 > 2) by scale factor 0.939955\nI1207 07:07:29.245378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01475 > 2) by scale factor 0.992677\nI1207 07:07:30.188463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05953 > 2) by scale factor 0.971093\nI1207 07:07:34.890981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06554 > 2) by scale factor 0.96827\nI1207 07:07:39.594346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0278 > 2) by scale factor 0.986293\nI1207 07:07:40.537255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16838 > 2) by scale factor 0.922346\nI1207 07:07:46.181900   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77629 > 2) by scale factor 0.720386\nI1207 07:07:50.885155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17867 > 2) by scale factor 0.917992\nI1207 07:07:53.718166   369 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1207 07:08:46.691982   369 solver.cpp:404]     Test net output #0: accuracy = 0.3178\nI1207 07:08:46.692250   369 solver.cpp:404]     Test net output #1: loss = 2.03076 (* 1 = 2.03076 loss)\nI1207 07:08:47.565222   369 solver.cpp:228] Iteration 3700, loss = 2.19587\nI1207 07:08:47.565265   369 solver.cpp:244]     Train net output #0: accuracy = 0.31\nI1207 07:08:47.565282   369 solver.cpp:244]     Train net output #1: loss = 2.19587 (* 1 = 2.19587 loss)\nI1207 07:08:47.641737   369 sgd_solver.cpp:166] Iteration 3700, lr = 0.555\nI1207 07:09:03.591388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02229 > 2) by scale factor 0.988977\nI1207 07:10:20.650996   369 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1207 07:11:13.644589   369 solver.cpp:404]     Test net output #0: accuracy = 0.4469\nI1207 07:11:13.644893   369 solver.cpp:404]     Test net output #1: loss = 1.51436 (* 1 = 1.51436 loss)\nI1207 07:11:14.517462   369 solver.cpp:228] Iteration 3800, loss = 1.42384\nI1207 07:11:14.517509   369 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1207 07:11:14.517527   369 solver.cpp:244]     Train net output #1: loss = 1.42384 (* 1 = 1.42384 loss)\nI1207 07:11:14.591827   369 sgd_solver.cpp:166] Iteration 3800, lr = 0.57\nI1207 07:12:10.947715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02448 > 2) by scale factor 0.987907\nI1207 07:12:47.629664   369 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1207 07:13:40.611383   369 solver.cpp:404]     Test net output #0: accuracy = 0.43855\nI1207 07:13:40.611652   369 solver.cpp:404]     Test net output #1: loss = 1.51158 (* 1 = 1.51158 loss)\nI1207 07:13:41.484398   369 solver.cpp:228] Iteration 3900, loss = 1.45755\nI1207 07:13:41.484447   369 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI1207 07:13:41.484463   369 solver.cpp:244]     Train net output #1: loss = 1.45755 (* 1 = 1.45755 loss)\nI1207 07:13:41.564635   369 sgd_solver.cpp:166] Iteration 3900, lr = 0.585\nI1207 07:15:14.599721   369 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1207 07:16:07.589324   369 solver.cpp:404]     Test net output #0: accuracy = 0.48085\nI1207 07:16:07.589613   369 solver.cpp:404]     Test net output #1: loss = 1.42163 (* 1 = 1.42163 loss)\nI1207 07:16:08.462733   369 solver.cpp:228] Iteration 4000, loss = 1.2643\nI1207 07:16:08.462777   369 solver.cpp:244]     Train net output #0: accuracy = 0.55\nI1207 07:16:08.462793   369 solver.cpp:244]     Train net output #1: loss = 1.2643 (* 1 = 1.2643 loss)\nI1207 07:16:08.536833   369 sgd_solver.cpp:166] Iteration 4000, lr = 0.6\nI1207 07:16:24.485502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55372 > 2) by scale factor 0.783173\nI1207 07:16:31.988802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18672 > 2) by scale factor 0.91461\nI1207 07:16:32.928823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52622 > 2) by scale factor 0.791697\nI1207 07:16:36.681061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11933 > 2) by scale factor 0.943696\nI1207 07:16:37.621234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07817 > 2) by scale factor 0.649736\nI1207 07:16:38.561159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12668 > 2) by scale factor 0.940432\nI1207 07:16:40.443966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68561 > 2) by scale factor 0.74471\nI1207 07:16:41.386272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4005 > 2) by scale factor 0.833159\nI1207 07:16:42.328912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32837 > 2) by scale factor 0.85897\nI1207 07:16:45.151814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05972 > 2) by scale factor 0.971005\nI1207 07:16:47.973613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38645 > 2) by scale factor 0.590589\nI1207 07:16:50.796159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37504 > 2) by scale factor 0.842091\nI1207 07:16:51.738524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23932 > 2) by scale factor 0.893128\nI1207 07:16:52.680881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31233 > 2) by scale factor 0.864928\nI1207 07:16:53.623726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16555 > 2) by scale factor 0.923551\nI1207 07:16:56.445895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69319 > 2) by scale factor 0.742614\nI1207 07:16:57.388281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65626 > 2) by scale factor 0.752938\nI1207 07:16:58.330401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07625 > 2) by scale factor 0.963273\nI1207 07:16:59.272637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40326 > 2) by scale factor 0.832204\nI1207 07:17:02.095024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63554 > 2) by scale factor 0.758859\nI1207 07:17:41.574396   369 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1207 07:18:34.561444   369 solver.cpp:404]     Test net output #0: accuracy = 0.3468\nI1207 07:18:34.561738   369 solver.cpp:404]     Test net output #1: loss = 1.73438 (* 1 = 1.73438 loss)\nI1207 07:18:35.435022   369 solver.cpp:228] Iteration 4100, loss = 1.76959\nI1207 07:18:35.435071   369 solver.cpp:244]     Train net output #0: accuracy = 0.3\nI1207 07:18:35.435089   369 solver.cpp:244]     Train net output #1: loss = 1.76959 (* 1 = 1.76959 loss)\nI1207 07:18:35.505273   369 sgd_solver.cpp:166] Iteration 4100, lr = 0.615\nI1207 07:20:08.546097   369 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1207 07:21:01.530556   369 solver.cpp:404]     Test net output #0: accuracy = 0.42\nI1207 07:21:01.530838   369 solver.cpp:404]     Test net output #1: loss = 1.59275 (* 1 = 1.59275 loss)\nI1207 07:21:02.404028   369 solver.cpp:228] Iteration 4200, loss = 1.75524\nI1207 07:21:02.404070   369 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI1207 07:21:02.404088   369 solver.cpp:244]     Train net output #1: loss = 1.75524 (* 1 = 1.75524 loss)\nI1207 07:21:02.481431   369 sgd_solver.cpp:166] Iteration 4200, lr = 0.63\nI1207 07:22:35.510970   369 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1207 07:23:28.497438   369 solver.cpp:404]     Test net output #0: accuracy = 0.43205\nI1207 07:23:28.497711   369 solver.cpp:404]     Test net output #1: loss = 1.51088 (* 1 = 1.51088 loss)\nI1207 07:23:29.370642   369 solver.cpp:228] Iteration 4300, loss = 1.39254\nI1207 07:23:29.370692   369 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1207 07:23:29.370714   369 solver.cpp:244]     Train net output #1: loss = 1.39254 (* 1 = 1.39254 loss)\nI1207 07:23:29.443400   369 sgd_solver.cpp:166] Iteration 4300, lr = 0.645\nI1207 07:24:12.624253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40282 > 2) by scale factor 0.832355\nI1207 07:24:14.506917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03489 > 2) by scale factor 0.982854\nI1207 07:24:15.449486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72484 > 2) by scale factor 0.733989\nI1207 07:24:16.391640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77566 > 2) by scale factor 0.720549\nI1207 07:24:17.334089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11034 > 2) by scale factor 0.947715\nI1207 07:24:18.277144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03777 > 2) by scale factor 0.981464\nI1207 07:24:19.219949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72331 > 2) by scale factor 0.734401\nI1207 07:24:21.103605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07681 > 2) by scale factor 0.963013\nI1207 07:24:22.046872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59211 > 2) by scale factor 0.771572\nI1207 07:24:22.990003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32106 > 2) by scale factor 0.861676\nI1207 07:24:24.873703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1304 > 2) by scale factor 0.938792\nI1207 07:24:25.816774   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85989 > 2) by scale factor 0.699327\nI1207 07:24:26.759436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70335 > 2) by scale factor 0.739823\nI1207 07:24:30.523814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12019 > 2) by scale factor 0.640987\nI1207 07:24:31.466483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37176 > 2) by scale factor 0.843254\nI1207 07:24:33.350070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11358 > 2) by scale factor 0.946262\nI1207 07:24:36.174135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83188 > 2) by scale factor 0.706246\nI1207 07:24:37.116204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24071 > 2) by scale factor 0.617149\nI1207 07:24:39.938997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79501 > 2) by scale factor 0.715562\nI1207 07:24:40.880795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18615 > 2) by scale factor 0.627716\nI1207 07:25:02.516140   369 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1207 07:25:55.499680   369 solver.cpp:404]     Test net output #0: accuracy = 0.30025\nI1207 07:25:55.499974   369 solver.cpp:404]     Test net output #1: loss = 2.0164 (* 1 = 2.0164 loss)\nI1207 07:25:56.373065   369 solver.cpp:228] Iteration 4400, loss = 1.81395\nI1207 07:25:56.373112   369 solver.cpp:244]     Train net output #0: accuracy = 0.33\nI1207 07:25:56.373128   369 solver.cpp:244]     Train net output #1: loss = 1.81395 (* 1 = 1.81395 loss)\nI1207 07:25:56.449417   369 sgd_solver.cpp:166] Iteration 4400, lr = 0.66\nI1207 07:27:29.473381   369 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1207 07:28:22.464042   369 solver.cpp:404]     Test net output #0: accuracy = 0.46895\nI1207 07:28:22.464311   369 solver.cpp:404]     Test net output #1: loss = 1.46292 (* 1 = 1.46292 loss)\nI1207 07:28:23.337333   369 solver.cpp:228] Iteration 4500, loss = 1.35441\nI1207 07:28:23.337376   369 solver.cpp:244]     Train net output #0: accuracy = 0.46\nI1207 07:28:23.337394   369 solver.cpp:244]     Train net output #1: loss = 1.35441 (* 1 = 1.35441 loss)\nI1207 07:28:23.409809   369 sgd_solver.cpp:166] Iteration 4500, lr = 0.675\nI1207 07:29:14.119546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36939 > 2) by scale factor 0.593579\nI1207 07:29:15.062392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02372 > 2) by scale factor 0.988279\nI1207 07:29:16.004876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59915 > 2) by scale factor 0.769482\nI1207 07:29:20.708780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54962 > 2) by scale factor 0.784431\nI1207 07:29:23.530433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21431 > 2) by scale factor 0.903217\nI1207 07:29:25.411456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08033 > 2) by scale factor 0.961386\nI1207 07:29:31.051453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53292 > 2) by scale factor 0.789603\nI1207 07:29:31.993224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69533 > 2) by scale factor 0.742023\nI1207 07:29:32.935009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49744 > 2) by scale factor 0.800821\nI1207 07:29:33.876708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93512 > 2) by scale factor 0.681404\nI1207 07:29:34.818614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86875 > 2) by scale factor 0.697167\nI1207 07:29:35.760555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69343 > 2) by scale factor 0.742548\nI1207 07:29:36.702262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07126 > 2) by scale factor 0.965594\nI1207 07:29:37.644222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45346 > 2) by scale factor 0.579129\nI1207 07:29:38.585417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50908 > 2) by scale factor 0.797105\nI1207 07:29:39.527343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95969 > 2) by scale factor 0.675747\nI1207 07:29:40.469086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42847 > 2) by scale factor 0.583351\nI1207 07:29:41.411306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25627 > 2) by scale factor 0.886418\nI1207 07:29:42.353793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22085 > 2) by scale factor 0.900558\nI1207 07:29:43.295763   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15257 > 2) by scale factor 0.481629\nI1207 07:29:44.237987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53562 > 2) by scale factor 0.788761\nI1207 07:29:45.180698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77348 > 2) by scale factor 0.721116\nI1207 07:29:46.123304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68695 > 2) by scale factor 0.744337\nI1207 07:29:47.065596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18144 > 2) by scale factor 0.916824\nI1207 07:29:48.947227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31927 > 2) by scale factor 0.862341\nI1207 07:29:49.889014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50817 > 2) by scale factor 0.797393\nI1207 07:29:52.709673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04421 > 2) by scale factor 0.656985\nI1207 07:29:56.480437   369 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1207 07:30:49.471763   369 solver.cpp:404]     Test net output #0: accuracy = 0.2845\nI1207 07:30:49.472046   369 solver.cpp:404]     Test net output #1: loss = 2.5392 (* 1 = 2.5392 loss)\nI1207 07:30:50.345314   369 solver.cpp:228] Iteration 4600, loss = 2.41021\nI1207 07:30:50.345357   369 solver.cpp:244]     Train net output #0: accuracy = 0.35\nI1207 07:30:50.345374   369 solver.cpp:244]     Train net output #1: loss = 2.41021 (* 1 = 2.41021 loss)\nI1207 07:30:50.424564   369 sgd_solver.cpp:166] Iteration 4600, lr = 0.69\nI1207 07:30:51.371840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24357 > 2) by scale factor 0.891437\nI1207 07:30:52.312381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33307 > 2) by scale factor 0.600047\nI1207 07:30:55.127290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13214 > 2) by scale factor 0.938024\nI1207 07:30:56.066902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31621 > 2) by scale factor 0.863481\nI1207 07:30:57.006297   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74098 > 2) by scale factor 0.729665\nI1207 07:31:01.695186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35601 > 2) by scale factor 0.848893\nI1207 07:31:06.384465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94527 > 2) by scale factor 0.679056\nI1207 07:31:07.324237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82804 > 2) by scale factor 0.707204\nI1207 07:31:09.200925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14944 > 2) by scale factor 0.930477\nI1207 07:31:12.016008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03251 > 2) by scale factor 0.984004\nI1207 07:31:20.456954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06208 > 2) by scale factor 0.969897\nI1207 07:31:39.264586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36349 > 2) by scale factor 0.846207\nI1207 07:32:23.476282   369 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1207 07:33:16.468186   369 solver.cpp:404]     Test net output #0: accuracy = 0.4121\nI1207 07:33:16.468484   369 solver.cpp:404]     Test net output #1: loss = 1.60597 (* 1 = 1.60597 loss)\nI1207 07:33:17.341608   369 solver.cpp:228] Iteration 4700, loss = 1.69067\nI1207 07:33:17.341655   369 solver.cpp:244]     Train net output #0: accuracy = 0.39\nI1207 07:33:17.341672   369 solver.cpp:244]     Train net output #1: loss = 1.69067 (* 1 = 1.69067 loss)\nI1207 07:33:17.413107   369 sgd_solver.cpp:166] Iteration 4700, lr = 0.705\nI1207 07:34:36.328008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27052 > 2) by scale factor 0.880854\nI1207 07:34:37.270244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33425 > 2) by scale factor 0.856805\nI1207 07:34:41.033565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07691 > 2) by scale factor 0.962969\nI1207 07:34:45.736505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85865 > 2) by scale factor 0.699631\nI1207 07:34:46.679447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24596 > 2) by scale factor 0.890489\nI1207 07:34:47.621955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03733 > 2) by scale factor 0.981677\nI1207 07:34:49.503509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51038 > 2) by scale factor 0.796691\nI1207 07:34:50.445523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92148 > 2) by scale factor 0.684585\nI1207 07:34:50.456771   369 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1207 07:35:43.440492   369 solver.cpp:404]     Test net output #0: accuracy = 0.27115\nI1207 07:35:43.440805   369 solver.cpp:404]     Test net output #1: loss = 2.52435 (* 1 = 2.52435 loss)\nI1207 07:35:44.313941   369 solver.cpp:228] Iteration 4800, loss = 2.38624\nI1207 07:35:44.313983   369 solver.cpp:244]     Train net output #0: accuracy = 0.31\nI1207 07:35:44.314000   369 solver.cpp:244]     Train net output #1: loss = 2.38624 (* 1 = 2.38624 loss)\nI1207 07:35:44.391010   369 sgd_solver.cpp:166] Iteration 4800, lr = 0.72\nI1207 07:35:44.401171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42942 > 2) by scale factor 0.82324\nI1207 07:35:45.341395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62708 > 2) by scale factor 0.761302\nI1207 07:35:47.218827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28167 > 2) by scale factor 0.876552\nI1207 07:35:49.095155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32496 > 2) by scale factor 0.860231\nI1207 07:35:50.971324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02501 > 2) by scale factor 0.987648\nI1207 07:35:51.911458   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39133 > 2) by scale factor 0.836354\nI1207 07:35:52.851367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12916 > 2) by scale factor 0.639149\nI1207 07:35:53.790666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2496 > 2) by scale factor 0.889045\nI1207 07:35:54.730136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84553 > 2) by scale factor 0.702857\nI1207 07:35:57.545243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84931 > 2) by scale factor 0.701924\nI1207 07:35:58.484799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21898 > 2) by scale factor 0.901316\nI1207 07:36:00.361989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15697 > 2) by scale factor 0.927226\nI1207 07:36:08.798897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34203 > 2) by scale factor 0.598438\nI1207 07:36:09.738270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37157 > 2) by scale factor 0.843324\nI1207 07:36:13.492003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33204 > 2) by scale factor 0.600233\nI1207 07:36:14.434384   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21933 > 2) by scale factor 0.901175\nI1207 07:36:15.376252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08473 > 2) by scale factor 0.648355\nI1207 07:36:18.198413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52989 > 2) by scale factor 0.79055\nI1207 07:36:20.080163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7896 > 2) by scale factor 0.716949\nI1207 07:36:21.021950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30752 > 2) by scale factor 0.866731\nI1207 07:36:22.904222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21478 > 2) by scale factor 0.622127\nI1207 07:36:23.846287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82275 > 2) by scale factor 0.70853\nI1207 07:36:24.788631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31382 > 2) by scale factor 0.86437\nI1207 07:36:25.731346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26577 > 2) by scale factor 0.468848\nI1207 07:36:26.673480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34849 > 2) by scale factor 0.851612\nI1207 07:36:28.555603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31047 > 2) by scale factor 0.865623\nI1207 07:36:29.498006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3686 > 2) by scale factor 0.844381\nI1207 07:36:30.440680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16593 > 2) by scale factor 0.631726\nI1207 07:36:31.383010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09506 > 2) by scale factor 0.954624\nI1207 07:36:32.325667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95642 > 2) by scale factor 0.676495\nI1207 07:36:33.268065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54825 > 2) by scale factor 0.784853\nI1207 07:36:34.210171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14311 > 2) by scale factor 0.933222\nI1207 07:36:39.851693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77009 > 2) by scale factor 0.530492\nI1207 07:36:41.734150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2316 > 2) by scale factor 0.618889\nI1207 07:36:42.676218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43247 > 2) by scale factor 0.822209\nI1207 07:36:44.558379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50027 > 2) by scale factor 0.799915\nI1207 07:36:45.501371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36995 > 2) by scale factor 0.843901\nI1207 07:36:46.443655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96921 > 2) by scale factor 0.67358\nI1207 07:36:47.386023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24207 > 2) by scale factor 0.892031\nI1207 07:36:49.268762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26307 > 2) by scale factor 0.883755\nI1207 07:36:50.211277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0854 > 2) by scale factor 0.959051\nI1207 07:36:52.094593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03782 > 2) by scale factor 0.981439\nI1207 07:36:53.037158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33022 > 2) by scale factor 0.600561\nI1207 07:36:53.979461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28502 > 2) by scale factor 0.875264\nI1207 07:36:55.861901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11864 > 2) by scale factor 0.944\nI1207 07:36:56.804217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87851 > 2) by scale factor 0.694804\nI1207 07:36:57.746891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75542 > 2) by scale factor 0.725843\nI1207 07:37:01.509382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30978 > 2) by scale factor 0.865884\nI1207 07:37:04.332209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25277 > 2) by scale factor 0.887795\nI1207 07:37:07.155571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11458 > 2) by scale factor 0.945813\nI1207 07:37:08.097813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30867 > 2) by scale factor 0.866301\nI1207 07:37:10.920212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40183 > 2) by scale factor 0.832699\nI1207 07:37:11.862566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01982 > 2) by scale factor 0.662291\nI1207 07:37:12.804903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6903 > 2) by scale factor 0.743412\nI1207 07:37:15.628094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42831 > 2) by scale factor 0.823619\nI1207 07:37:16.570453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16176 > 2) by scale factor 0.925171\nI1207 07:37:17.512740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09786 > 2) by scale factor 0.953352\nI1207 07:37:17.524639   369 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1207 07:38:10.499554   369 solver.cpp:404]     Test net output #0: accuracy = 0.2338\nI1207 07:38:10.499856   369 solver.cpp:404]     Test net output #1: loss = 2.62744 (* 1 = 2.62744 loss)\nI1207 07:38:11.373389   369 solver.cpp:228] Iteration 4900, loss = 3.0619\nI1207 07:38:11.373430   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 07:38:11.373446   369 solver.cpp:244]     Train net output #1: loss = 3.0619 (* 1 = 3.0619 loss)\nI1207 07:38:11.454033   369 sgd_solver.cpp:166] Iteration 4900, lr = 0.735\nI1207 07:38:12.402142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68784 > 2) by scale factor 0.744092\nI1207 07:38:14.279428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11288 > 2) by scale factor 0.946573\nI1207 07:38:15.218564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47739 > 2) by scale factor 0.8073\nI1207 07:38:18.034318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26607 > 2) by scale factor 0.882584\nI1207 07:38:19.911279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17018 > 2) by scale factor 0.630879\nI1207 07:38:20.850888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00714 > 2) by scale factor 0.996444\nI1207 07:38:22.727304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26617 > 2) by scale factor 0.882547\nI1207 07:38:23.666954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40701 > 2) by scale factor 0.830908\nI1207 07:38:24.606758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03834 > 2) by scale factor 0.658254\nI1207 07:38:25.546926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9668 > 2) by scale factor 0.674127\nI1207 07:38:26.486654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61309 > 2) by scale factor 0.765378\nI1207 07:38:27.426069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16498 > 2) by scale factor 0.923795\nI1207 07:38:29.302928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6096 > 2) by scale factor 0.766401\nI1207 07:38:30.242776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64076 > 2) by scale factor 0.549336\nI1207 07:38:31.182018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69822 > 2) by scale factor 0.74123\nI1207 07:38:32.122004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60187 > 2) by scale factor 0.768678\nI1207 07:38:33.061259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20027 > 2) by scale factor 0.90898\nI1207 07:38:34.000934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45311 > 2) by scale factor 0.579188\nI1207 07:38:34.939658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11113 > 2) by scale factor 0.947362\nI1207 07:38:35.879961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36855 > 2) by scale factor 0.844399\nI1207 07:38:36.820019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47771 > 2) by scale factor 0.807197\nI1207 07:38:37.759376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4041 > 2) by scale factor 0.587527\nI1207 07:38:38.698662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02652 > 2) by scale factor 0.986915\nI1207 07:38:58.455266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03777 > 2) by scale factor 0.981464\nI1207 07:39:00.341181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10476 > 2) by scale factor 0.950228\nI1207 07:39:05.048862   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31014 > 2) by scale factor 0.865747\nI1207 07:39:05.992588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53442 > 2) by scale factor 0.789134\nI1207 07:39:06.935545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32701 > 2) by scale factor 0.859473\nI1207 07:39:11.643406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16467 > 2) by scale factor 0.92393\nI1207 07:39:14.468040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05843 > 2) by scale factor 0.971612\nI1207 07:39:20.116331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62821 > 2) by scale factor 0.551236\nI1207 07:39:21.059492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56173 > 2) by scale factor 0.780723\nI1207 07:39:25.767525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63738 > 2) by scale factor 0.549847\nI1207 07:39:26.710748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44014 > 2) by scale factor 0.819626\nI1207 07:39:29.536594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31579 > 2) by scale factor 0.863636\nI1207 07:39:30.479915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41709 > 2) by scale factor 0.827442\nI1207 07:39:31.422857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0094 > 2) by scale factor 0.664585\nI1207 07:39:32.366125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25155 > 2) by scale factor 0.888276\nI1207 07:39:33.309048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16756 > 2) by scale factor 0.922698\nI1207 07:39:34.253029   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5599 > 2) by scale factor 0.781281\nI1207 07:39:35.197147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43742 > 2) by scale factor 0.820541\nI1207 07:39:36.140748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26125 > 2) by scale factor 0.613261\nI1207 07:39:37.084417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8163 > 2) by scale factor 0.710151\nI1207 07:39:39.909554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41853 > 2) by scale factor 0.82695\nI1207 07:39:44.625661   369 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1207 07:40:37.610641   369 solver.cpp:404]     Test net output #0: accuracy = 0.27115\nI1207 07:40:37.610915   369 solver.cpp:404]     Test net output #1: loss = 2.48621 (* 1 = 2.48621 loss)\nI1207 07:40:38.484297   369 solver.cpp:228] Iteration 5000, loss = 2.28102\nI1207 07:40:38.484333   369 solver.cpp:244]     Train net output #0: accuracy = 0.28\nI1207 07:40:38.484349   369 solver.cpp:244]     Train net output #1: loss = 2.28102 (* 1 = 2.28102 loss)\nI1207 07:40:38.564985   369 sgd_solver.cpp:166] Iteration 5000, lr = 0.75\nI1207 07:40:41.387110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02527 > 2) by scale factor 0.987524\nI1207 07:40:43.264039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21655 > 2) by scale factor 0.902302\nI1207 07:40:46.078306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21968 > 2) by scale factor 0.901032\nI1207 07:40:47.018059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06963 > 2) by scale factor 0.651543\nI1207 07:40:51.706004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91013 > 2) by scale factor 0.687254\nI1207 07:40:52.645383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10793 > 2) by scale factor 0.948797\nI1207 07:40:55.458518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18249 > 2) by scale factor 0.916385\nI1207 07:40:58.273053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55907 > 2) by scale factor 0.561945\nI1207 07:41:01.087716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74949 > 2) by scale factor 0.727408\nI1207 07:41:02.964618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54125 > 2) by scale factor 0.564773\nI1207 07:41:03.904088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16155 > 2) by scale factor 0.925264\nI1207 07:41:06.717506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00602 > 2) by scale factor 0.996998\nI1207 07:41:08.597471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45044 > 2) by scale factor 0.816179\nI1207 07:41:09.541250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76592 > 2) by scale factor 0.723088\nI1207 07:41:11.425530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.444 > 2) by scale factor 0.818332\nI1207 07:41:15.191998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52742 > 2) by scale factor 0.791321\nI1207 07:41:16.135506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26243 > 2) by scale factor 0.884004\nI1207 07:41:17.078878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42688 > 2) by scale factor 0.824105\nI1207 07:41:19.904978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10928 > 2) by scale factor 0.643236\nI1207 07:41:22.731423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21741 > 2) by scale factor 0.901953\nI1207 07:41:25.558188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5905 > 2) by scale factor 0.772053\nI1207 07:41:29.325090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17408 > 2) by scale factor 0.919928\nI1207 07:41:30.268569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53804 > 2) by scale factor 0.788009\nI1207 07:41:31.211398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11153 > 2) by scale factor 0.947183\nI1207 07:41:32.154983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34169 > 2) by scale factor 0.598499\nI1207 07:41:35.921435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20306 > 2) by scale factor 0.907829\nI1207 07:41:37.806666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81706 > 2) by scale factor 0.70996\nI1207 07:41:39.690219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44336 > 2) by scale factor 0.818547\nI1207 07:41:42.516659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04287 > 2) by scale factor 0.979015\nI1207 07:41:44.400238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8899 > 2) by scale factor 0.692065\nI1207 07:41:45.343154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75622 > 2) by scale factor 0.72563\nI1207 07:41:46.286590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44778 > 2) by scale factor 0.817067\nI1207 07:41:49.111891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29477 > 2) by scale factor 0.607023\nI1207 07:41:53.818583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19795 > 2) by scale factor 0.909939\nI1207 07:41:57.585273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11081 > 2) by scale factor 0.947504\nI1207 07:41:58.527843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62515 > 2) by scale factor 0.761862\nI1207 07:42:02.294936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62435 > 2) by scale factor 0.762092\nI1207 07:42:11.713318   369 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1207 07:43:04.703214   369 solver.cpp:404]     Test net output #0: accuracy = 0.3017\nI1207 07:43:04.703510   369 solver.cpp:404]     Test net output #1: loss = 2.17884 (* 1 = 2.17884 loss)\nI1207 07:43:05.576445   369 solver.cpp:228] Iteration 5100, loss = 2.05181\nI1207 07:43:05.576484   369 solver.cpp:244]     Train net output #0: accuracy = 0.35\nI1207 07:43:05.576501   369 solver.cpp:244]     Train net output #1: loss = 2.05181 (* 1 = 2.05181 loss)\nI1207 07:43:05.655323   369 sgd_solver.cpp:166] Iteration 5100, lr = 0.765\nI1207 07:43:13.164923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61822 > 2) by scale factor 0.552758\nI1207 07:43:14.104594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26323 > 2) by scale factor 0.883691\nI1207 07:43:15.043982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00253 > 2) by scale factor 0.998737\nI1207 07:43:15.984066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44329 > 2) by scale factor 0.81857\nI1207 07:43:17.862176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41994 > 2) by scale factor 0.826467\nI1207 07:43:19.739475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50683 > 2) by scale factor 0.797821\nI1207 07:43:20.678678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28393 > 2) by scale factor 0.875682\nI1207 07:43:22.555613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54811 > 2) by scale factor 0.784896\nI1207 07:43:30.056085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26805 > 2) by scale factor 0.881813\nI1207 07:43:30.995443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73272 > 2) by scale factor 0.535803\nI1207 07:43:31.934936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43539 > 2) by scale factor 0.821224\nI1207 07:43:35.686617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13977 > 2) by scale factor 0.934679\nI1207 07:43:36.626243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56307 > 2) by scale factor 0.780315\nI1207 07:43:37.565685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02405 > 2) by scale factor 0.988118\nI1207 07:43:38.504786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63417 > 2) by scale factor 0.759252\nI1207 07:43:39.443873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10805 > 2) by scale factor 0.948746\nI1207 07:43:40.385314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03279 > 2) by scale factor 0.983871\nI1207 07:43:41.328969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86339 > 2) by scale factor 0.698473\nI1207 07:43:42.272114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69914 > 2) by scale factor 0.540666\nI1207 07:43:43.214874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48252 > 2) by scale factor 0.805634\nI1207 07:43:44.157789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24952 > 2) by scale factor 0.615476\nI1207 07:43:45.100193   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2455 > 2) by scale factor 0.890671\nI1207 07:43:46.042913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40134 > 2) by scale factor 0.588004\nI1207 07:43:46.985401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83807 > 2) by scale factor 0.704704\nI1207 07:43:47.928529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00606 > 2) by scale factor 0.665322\nI1207 07:43:48.871381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40015 > 2) by scale factor 0.833281\nI1207 07:43:49.814018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50788 > 2) by scale factor 0.570145\nI1207 07:43:50.756813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63912 > 2) by scale factor 0.75783\nI1207 07:43:51.700253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77492 > 2) by scale factor 0.720741\nI1207 07:43:56.404959   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14201 > 2) by scale factor 0.933703\nI1207 07:43:58.288209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17378 > 2) by scale factor 0.630164\nI1207 07:44:03.935390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59153 > 2) by scale factor 0.771743\nI1207 07:44:04.878062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29046 > 2) by scale factor 0.873187\nI1207 07:44:05.821382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13309 > 2) by scale factor 0.937605\nI1207 07:44:06.765239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37767 > 2) by scale factor 0.84116\nI1207 07:44:09.589279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72767 > 2) by scale factor 0.733226\nI1207 07:44:10.531839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6679 > 2) by scale factor 0.749655\nI1207 07:44:11.475508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28839 > 2) by scale factor 0.873975\nI1207 07:44:12.418418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42271 > 2) by scale factor 0.584332\nI1207 07:44:13.360924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29079 > 2) by scale factor 0.87306\nI1207 07:44:14.303788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10016 > 2) by scale factor 0.952307\nI1207 07:44:16.187790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19577 > 2) by scale factor 0.910841\nI1207 07:44:17.131091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5368 > 2) by scale factor 0.788396\nI1207 07:44:18.073999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70579 > 2) by scale factor 0.739155\nI1207 07:44:19.957237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07714 > 2) by scale factor 0.962861\nI1207 07:44:21.840301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73764 > 2) by scale factor 0.730556\nI1207 07:44:22.782482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64079 > 2) by scale factor 0.549331\nI1207 07:44:23.725370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33961 > 2) by scale factor 0.854843\nI1207 07:44:25.609602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83209 > 2) by scale factor 0.706191\nI1207 07:44:26.552654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08391 > 2) by scale factor 0.959734\nI1207 07:44:27.495586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37662 > 2) by scale factor 0.84153\nI1207 07:44:28.437911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32981 > 2) by scale factor 0.600635\nI1207 07:44:29.380276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34124 > 2) by scale factor 0.854247\nI1207 07:44:30.323835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02275 > 2) by scale factor 0.661649\nI1207 07:44:31.266841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47852 > 2) by scale factor 0.806934\nI1207 07:44:33.150032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43978 > 2) by scale factor 0.581432\nI1207 07:44:34.092701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80585 > 2) by scale factor 0.416159\nI1207 07:44:35.035043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7767 > 2) by scale factor 0.720279\nI1207 07:44:35.978615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54348 > 2) by scale factor 0.786324\nI1207 07:44:36.922302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15298 > 2) by scale factor 0.928946\nI1207 07:44:37.865864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74324 > 2) by scale factor 0.729066\nI1207 07:44:38.808593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92462 > 2) by scale factor 0.509604\nI1207 07:44:38.820528   369 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1207 07:45:31.795356   369 solver.cpp:404]     Test net output #0: accuracy = 0.2413\nI1207 07:45:31.795670   369 solver.cpp:404]     Test net output #1: loss = 4.30236 (* 1 = 4.30236 loss)\nI1207 07:45:32.668731   369 solver.cpp:228] Iteration 5200, loss = 4.28916\nI1207 07:45:32.668769   369 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1207 07:45:32.668787   369 solver.cpp:244]     Train net output #1: loss = 4.28916 (* 1 = 4.28916 loss)\nI1207 07:45:32.745350   369 sgd_solver.cpp:166] Iteration 5200, lr = 0.78\nI1207 07:45:32.755477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27798 > 2) by scale factor 0.87797\nI1207 07:45:33.695281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2634 > 2) by scale factor 0.612858\nI1207 07:45:34.634369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11085 > 2) by scale factor 0.947487\nI1207 07:45:35.574180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03255 > 2) by scale factor 0.983987\nI1207 07:45:36.512923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3634 > 2) by scale factor 0.84624\nI1207 07:45:37.452127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40283 > 2) by scale factor 0.832353\nI1207 07:45:38.391894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17327 > 2) by scale factor 0.630266\nI1207 07:45:39.331157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20815 > 2) by scale factor 0.905736\nI1207 07:45:41.207540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79426 > 2) by scale factor 0.715752\nI1207 07:45:42.147810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03155 > 2) by scale factor 0.98447\nI1207 07:45:43.087471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42223 > 2) by scale factor 0.825685\nI1207 07:45:44.964057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65704 > 2) by scale factor 0.752717\nI1207 07:45:45.903522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62976 > 2) by scale factor 0.760526\nI1207 07:45:48.717490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16908 > 2) by scale factor 0.922051\nI1207 07:45:50.594215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55844 > 2) by scale factor 0.781727\nI1207 07:45:53.409641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7264 > 2) by scale factor 0.733567\nI1207 07:45:59.034255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21566 > 2) by scale factor 0.621956\nI1207 07:45:59.973505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26446 > 2) by scale factor 0.883213\nI1207 07:46:01.850423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1288 > 2) by scale factor 0.939496\nI1207 07:46:03.727579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12291 > 2) by scale factor 0.942102\nI1207 07:46:04.666525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01405 > 2) by scale factor 0.993022\nI1207 07:46:05.606381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08097 > 2) by scale factor 0.961093\nI1207 07:46:07.487376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89025 > 2) by scale factor 0.514105\nI1207 07:46:09.369890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18572 > 2) by scale factor 0.915031\nI1207 07:46:11.253057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07531 > 2) by scale factor 0.650341\nI1207 07:46:12.195010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44952 > 2) by scale factor 0.816485\nI1207 07:46:14.077396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30447 > 2) by scale factor 0.867879\nI1207 07:46:16.900347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18736 > 2) by scale factor 0.914343\nI1207 07:46:17.842888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3937 > 2) by scale factor 0.835527\nI1207 07:46:19.725427   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56321 > 2) by scale factor 0.561292\nI1207 07:46:20.668069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30739 > 2) by scale factor 0.866782\nI1207 07:46:22.551703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86667 > 2) by scale factor 0.697674\nI1207 07:46:23.494338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8804 > 2) by scale factor 0.694347\nI1207 07:46:24.436913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42247 > 2) by scale factor 0.825603\nI1207 07:46:25.379894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14757 > 2) by scale factor 0.635411\nI1207 07:46:26.322439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63272 > 2) by scale factor 0.75967\nI1207 07:46:28.204715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33991 > 2) by scale factor 0.854732\nI1207 07:46:29.147594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52111 > 2) by scale factor 0.7933\nI1207 07:46:30.089702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95888 > 2) by scale factor 0.675931\nI1207 07:46:33.852028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14335 > 2) by scale factor 0.933118\nI1207 07:46:34.794955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59752 > 2) by scale factor 0.769964\nI1207 07:46:36.678191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46372 > 2) by scale factor 0.811782\nI1207 07:46:37.620440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67815 > 2) by scale factor 0.746784\nI1207 07:46:38.562196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99501 > 2) by scale factor 0.500624\nI1207 07:46:39.504081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0114 > 2) by scale factor 0.664144\nI1207 07:46:41.387253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42416 > 2) by scale factor 0.825029\nI1207 07:46:43.269503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1618 > 2) by scale factor 0.925153\nI1207 07:46:44.211931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56962 > 2) by scale factor 0.778324\nI1207 07:46:45.154770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18194 > 2) by scale factor 0.628548\nI1207 07:46:47.037876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46175 > 2) by scale factor 0.812429\nI1207 07:46:48.921043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70076 > 2) by scale factor 0.740532\nI1207 07:46:51.742988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83618 > 2) by scale factor 0.705175\nI1207 07:46:58.326541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15642 > 2) by scale factor 0.927461\nI1207 07:47:01.148702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12953 > 2) by scale factor 0.939174\nI1207 07:47:05.851292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20549 > 2) by scale factor 0.906828\nI1207 07:47:05.863488   369 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1207 07:47:58.846488   369 solver.cpp:404]     Test net output #0: accuracy = 0.27565\nI1207 07:47:58.846770   369 solver.cpp:404]     Test net output #1: loss = 2.18175 (* 1 = 2.18175 loss)\nI1207 07:47:59.720558   369 solver.cpp:228] Iteration 5300, loss = 1.9618\nI1207 07:47:59.720602   369 solver.cpp:244]     Train net output #0: accuracy = 0.3\nI1207 07:47:59.720620   369 solver.cpp:244]     Train net output #1: loss = 1.9618 (* 1 = 1.9618 loss)\nI1207 07:47:59.798425   369 sgd_solver.cpp:166] Iteration 5300, lr = 0.795\nI1207 07:48:09.182006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06418 > 2) by scale factor 0.968906\nI1207 07:48:11.997048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3859 > 2) by scale factor 0.838259\nI1207 07:48:14.811000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2365 > 2) by scale factor 0.617952\nI1207 07:48:15.750834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2892 > 2) by scale factor 0.873669\nI1207 07:48:16.690680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19595 > 2) by scale factor 0.910767\nI1207 07:48:21.378240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07182 > 2) by scale factor 0.965336\nI1207 07:48:22.318085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56288 > 2) by scale factor 0.561344\nI1207 07:48:23.258371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46092 > 2) by scale factor 0.812705\nI1207 07:48:24.197616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87524 > 2) by scale factor 0.695595\nI1207 07:48:25.137053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41146 > 2) by scale factor 0.829374\nI1207 07:48:26.076730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39625 > 2) by scale factor 0.834636\nI1207 07:48:27.015630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44334 > 2) by scale factor 0.580832\nI1207 07:48:27.955643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4105 > 2) by scale factor 0.829705\nI1207 07:48:28.896206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5628 > 2) by scale factor 0.561357\nI1207 07:48:29.838333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12548 > 2) by scale factor 0.639901\nI1207 07:48:32.659215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12724 > 2) by scale factor 0.940184\nI1207 07:48:33.600831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00095 > 2) by scale factor 0.999525\nI1207 07:48:34.542562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39447 > 2) by scale factor 0.835259\nI1207 07:48:35.484935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71351 > 2) by scale factor 0.737053\nI1207 07:48:36.427227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82727 > 2) by scale factor 0.707395\nI1207 07:48:37.369210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11523 > 2) by scale factor 0.945523\nI1207 07:48:38.311247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27788 > 2) by scale factor 0.878008\nI1207 07:48:40.193311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63462 > 2) by scale factor 0.759123\nI1207 07:48:42.075362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68322 > 2) by scale factor 0.745372\nI1207 07:48:43.017741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18863 > 2) by scale factor 0.913813\nI1207 07:48:44.898674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45677 > 2) by scale factor 0.814078\nI1207 07:48:46.782169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60603 > 2) by scale factor 0.76745\nI1207 07:48:47.724455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41181 > 2) by scale factor 0.829253\nI1207 07:48:48.666342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94415 > 2) by scale factor 0.679312\nI1207 07:48:49.608649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55541 > 2) by scale factor 0.782653\nI1207 07:48:50.550438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22945 > 2) by scale factor 0.619301\nI1207 07:48:51.492460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50487 > 2) by scale factor 0.798444\nI1207 07:48:52.435021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12578 > 2) by scale factor 0.940833\nI1207 07:48:53.377646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08756 > 2) by scale factor 0.958054\nI1207 07:48:54.319496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0308 > 2) by scale factor 0.984831\nI1207 07:48:55.261497   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10962 > 2) by scale factor 0.948039\nI1207 07:48:56.203521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95992 > 2) by scale factor 0.675695\nI1207 07:48:57.145144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13713 > 2) by scale factor 0.935836\nI1207 07:48:58.087074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50546 > 2) by scale factor 0.798257\nI1207 07:49:02.788048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11919 > 2) by scale factor 0.943756\nI1207 07:49:03.730826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3769 > 2) by scale factor 0.841431\nI1207 07:49:04.672626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25472 > 2) by scale factor 0.614492\nI1207 07:49:05.614702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38867 > 2) by scale factor 0.837284\nI1207 07:49:06.556944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45531 > 2) by scale factor 0.814562\nI1207 07:49:07.498194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38883 > 2) by scale factor 0.83723\nI1207 07:49:08.440389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2198 > 2) by scale factor 0.621156\nI1207 07:49:09.381721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39286 > 2) by scale factor 0.835822\nI1207 07:49:10.323472   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2927 > 2) by scale factor 0.872333\nI1207 07:49:11.265985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11714 > 2) by scale factor 0.944673\nI1207 07:49:12.207952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18084 > 2) by scale factor 0.917078\nI1207 07:49:13.149412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32669 > 2) by scale factor 0.601199\nI1207 07:49:14.090852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5761 > 2) by scale factor 0.776367\nI1207 07:49:15.032338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10993 > 2) by scale factor 0.643102\nI1207 07:49:16.914005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20393 > 2) by scale factor 0.907468\nI1207 07:49:18.795233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01113 > 2) by scale factor 0.994465\nI1207 07:49:19.737293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01389 > 2) by scale factor 0.663594\nI1207 07:49:20.678751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07534 > 2) by scale factor 0.650335\nI1207 07:49:21.620090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80978 > 2) by scale factor 0.7118\nI1207 07:49:22.561578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90591 > 2) by scale factor 0.688253\nI1207 07:49:23.503470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74954 > 2) by scale factor 0.727395\nI1207 07:49:24.445410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50118 > 2) by scale factor 0.799624\nI1207 07:49:25.387653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97103 > 2) by scale factor 0.673167\nI1207 07:49:26.329358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30539 > 2) by scale factor 0.605072\nI1207 07:49:27.271301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99564 > 2) by scale factor 0.667636\nI1207 07:49:28.213742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75882 > 2) by scale factor 0.532081\nI1207 07:49:29.155928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73609 > 2) by scale factor 0.730969\nI1207 07:49:30.097348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61124 > 2) by scale factor 0.76592\nI1207 07:49:31.039676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37406 > 2) by scale factor 0.592758\nI1207 07:49:31.981907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45341 > 2) by scale factor 0.579137\nI1207 07:49:32.923269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49109 > 2) by scale factor 0.802863\nI1207 07:49:32.935534   369 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1207 07:50:25.904741   369 solver.cpp:404]     Test net output #0: accuracy = 0.2115\nI1207 07:50:25.905017   369 solver.cpp:404]     Test net output #1: loss = 4.4383 (* 1 = 4.4383 loss)\nI1207 07:50:26.778117   369 solver.cpp:228] Iteration 5400, loss = 4.24261\nI1207 07:50:26.778162   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 07:50:26.778178   369 solver.cpp:244]     Train net output #1: loss = 4.24261 (* 1 = 4.24261 loss)\nI1207 07:50:26.857322   369 sgd_solver.cpp:166] Iteration 5400, lr = 0.81\nI1207 07:50:26.867431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28541 > 2) by scale factor 0.875115\nI1207 07:50:27.807420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79863 > 2) by scale factor 0.714636\nI1207 07:50:28.746858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28362 > 2) by scale factor 0.875801\nI1207 07:50:29.686620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16747 > 2) by scale factor 0.631418\nI1207 07:50:30.625751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8728 > 2) by scale factor 0.696184\nI1207 07:50:31.565212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18978 > 2) by scale factor 0.627003\nI1207 07:50:32.504372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70848 > 2) by scale factor 0.738421\nI1207 07:50:34.380558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08288 > 2) by scale factor 0.960207\nI1207 07:50:35.320480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04706 > 2) by scale factor 0.977011\nI1207 07:50:36.260932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32867 > 2) by scale factor 0.600841\nI1207 07:50:37.200984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05276 > 2) by scale factor 0.655145\nI1207 07:50:38.140925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61736 > 2) by scale factor 0.764129\nI1207 07:50:39.080909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12528 > 2) by scale factor 0.639943\nI1207 07:50:40.020818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69994 > 2) by scale factor 0.740758\nI1207 07:50:40.960685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51931 > 2) by scale factor 0.793867\nI1207 07:50:41.900586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8595 > 2) by scale factor 0.699424\nI1207 07:50:42.840962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33167 > 2) by scale factor 0.600299\nI1207 07:50:43.780725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51532 > 2) by scale factor 0.795129\nI1207 07:50:44.720546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3392 > 2) by scale factor 0.460914\nI1207 07:50:45.660533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28662 > 2) by scale factor 0.874653\nI1207 07:50:46.600595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17029 > 2) by scale factor 0.921538\nI1207 07:50:47.540407   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23066 > 2) by scale factor 0.896596\nI1207 07:50:48.480255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05714 > 2) by scale factor 0.654205\nI1207 07:50:49.419746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04807 > 2) by scale factor 0.976529\nI1207 07:50:50.359678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17776 > 2) by scale factor 0.918376\nI1207 07:50:52.237383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66937 > 2) by scale factor 0.74924\nI1207 07:50:53.177366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31241 > 2) by scale factor 0.463778\nI1207 07:50:54.117211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35535 > 2) by scale factor 0.596062\nI1207 07:50:55.057348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5189 > 2) by scale factor 0.793996\nI1207 07:50:55.996824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23223 > 2) by scale factor 0.618768\nI1207 07:50:56.937129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18615 > 2) by scale factor 0.914849\nI1207 07:50:57.877130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36902 > 2) by scale factor 0.844231\nI1207 07:50:58.817138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00398 > 2) by scale factor 0.998014\nI1207 07:51:00.700923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36581 > 2) by scale factor 0.845376\nI1207 07:51:04.466380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66651 > 2) by scale factor 0.750045\nI1207 07:51:05.409302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0517 > 2) by scale factor 0.974801\nI1207 07:51:06.352268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38144 > 2) by scale factor 0.839829\nI1207 07:51:07.295397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56315 > 2) by scale factor 0.78029\nI1207 07:51:09.178648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17155 > 2) by scale factor 0.920999\nI1207 07:51:10.121263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82958 > 2) by scale factor 0.70682\nI1207 07:51:13.885058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0887 > 2) by scale factor 0.957535\nI1207 07:51:16.708400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3129 > 2) by scale factor 0.864716\nI1207 07:51:20.471105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98838 > 2) by scale factor 0.66926\nI1207 07:51:22.354027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77785 > 2) by scale factor 0.719981\nI1207 07:51:26.116086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35135 > 2) by scale factor 0.596775\nI1207 07:51:27.999436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47436 > 2) by scale factor 0.808291\nI1207 07:51:29.882553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10206 > 2) by scale factor 0.644734\nI1207 07:51:30.825206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88121 > 2) by scale factor 0.694153\nI1207 07:51:33.649381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61186 > 2) by scale factor 0.765737\nI1207 07:51:34.592041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11293 > 2) by scale factor 0.642481\nI1207 07:51:35.534615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50371 > 2) by scale factor 0.798816\nI1207 07:51:36.477108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73334 > 2) by scale factor 0.731706\nI1207 07:51:37.419131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12074 > 2) by scale factor 0.943069\nI1207 07:51:39.302289   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68325 > 2) by scale factor 0.745364\nI1207 07:51:41.185173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2258 > 2) by scale factor 0.898555\nI1207 07:51:43.068326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59703 > 2) by scale factor 0.77011\nI1207 07:51:44.010886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95603 > 2) by scale factor 0.676584\nI1207 07:51:46.834446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88229 > 2) by scale factor 0.693893\nI1207 07:51:47.776629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49683 > 2) by scale factor 0.571947\nI1207 07:51:49.658740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04732 > 2) by scale factor 0.976887\nI1207 07:51:51.540899   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17812 > 2) by scale factor 0.918223\nI1207 07:51:52.483611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76642 > 2) by scale factor 0.722955\nI1207 07:51:54.366783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05321 > 2) by scale factor 0.974084\nI1207 07:51:55.308938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51442 > 2) by scale factor 0.795412\nI1207 07:51:56.251245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57105 > 2) by scale factor 0.777894\nI1207 07:51:58.134743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5428 > 2) by scale factor 0.786536\nI1207 07:51:59.077533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49456 > 2) by scale factor 0.801744\nI1207 07:52:00.020018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0485 > 2) by scale factor 0.656061\nI1207 07:52:00.031298   369 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1207 07:52:52.990451   369 solver.cpp:404]     Test net output #0: accuracy = 0.19435\nI1207 07:52:52.990749   369 solver.cpp:404]     Test net output #1: loss = 4.74619 (* 1 = 4.74619 loss)\nI1207 07:52:53.864120   369 solver.cpp:228] Iteration 5500, loss = 5.33624\nI1207 07:52:53.864164   369 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 07:52:53.864181   369 solver.cpp:244]     Train net output #1: loss = 5.33624 (* 1 = 5.33624 loss)\nI1207 07:52:53.943387   369 sgd_solver.cpp:166] Iteration 5500, lr = 0.825\nI1207 07:52:53.953150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54364 > 2) by scale factor 0.786274\nI1207 07:52:55.830791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89024 > 2) by scale factor 0.691983\nI1207 07:52:56.770990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44348 > 2) by scale factor 0.450098\nI1207 07:52:59.585471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70331 > 2) by scale factor 0.739835\nI1207 07:53:00.524014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74205 > 2) by scale factor 0.729381\nI1207 07:53:01.463367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10805 > 2) by scale factor 0.948742\nI1207 07:53:03.340966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92612 > 2) by scale factor 0.6835\nI1207 07:53:05.218602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16312 > 2) by scale factor 0.92459\nI1207 07:53:06.158959   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12975 > 2) by scale factor 0.939078\nI1207 07:53:08.036770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43402 > 2) by scale factor 0.582408\nI1207 07:53:08.976203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48864 > 2) by scale factor 0.803652\nI1207 07:53:09.916090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09626 > 2) by scale factor 0.954081\nI1207 07:53:10.855314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00989 > 2) by scale factor 0.995079\nI1207 07:53:11.795398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.683 > 2) by scale factor 0.745435\nI1207 07:53:15.549568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22742 > 2) by scale factor 0.897901\nI1207 07:53:17.426427   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28307 > 2) by scale factor 0.609185\nI1207 07:53:18.366381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73979 > 2) by scale factor 0.729982\nI1207 07:53:19.305954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45641 > 2) by scale factor 0.814197\nI1207 07:53:20.246152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05164 > 2) by scale factor 0.974831\nI1207 07:53:21.186379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36448 > 2) by scale factor 0.845853\nI1207 07:53:22.126551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6763 > 2) by scale factor 0.747301\nI1207 07:53:23.066262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94456 > 2) by scale factor 0.507028\nI1207 07:53:24.006283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4365 > 2) by scale factor 0.450806\nI1207 07:53:24.946269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13702 > 2) by scale factor 0.935884\nI1207 07:53:25.887424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52742 > 2) by scale factor 0.791322\nI1207 07:53:26.830037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09993 > 2) by scale factor 0.952411\nI1207 07:53:27.772961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33546 > 2) by scale factor 0.856363\nI1207 07:53:28.715888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2864 > 2) by scale factor 0.874737\nI1207 07:53:29.659219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95425 > 2) by scale factor 0.67699\nI1207 07:53:30.602033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90367 > 2) by scale factor 0.688783\nI1207 07:53:31.544798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41081 > 2) by scale factor 0.829598\nI1207 07:53:32.487428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04345 > 2) by scale factor 0.978737\nI1207 07:53:33.430357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54478 > 2) by scale factor 0.56421\nI1207 07:53:34.373281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9649 > 2) by scale factor 0.674559\nI1207 07:53:35.316354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89368 > 2) by scale factor 0.691161\nI1207 07:53:36.260712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19985 > 2) by scale factor 0.625029\nI1207 07:53:37.204222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96947 > 2) by scale factor 0.67352\nI1207 07:53:38.147300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72692 > 2) by scale factor 0.536636\nI1207 07:53:39.090330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2221 > 2) by scale factor 0.473698\nI1207 07:53:40.033268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33621 > 2) by scale factor 0.856089\nI1207 07:53:40.976074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30271 > 2) by scale factor 0.868542\nI1207 07:53:41.918416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06014 > 2) by scale factor 0.653565\nI1207 07:53:42.860507   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37434 > 2) by scale factor 0.84234\nI1207 07:53:43.803308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67556 > 2) by scale factor 0.747508\nI1207 07:53:44.745481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02743 > 2) by scale factor 0.660625\nI1207 07:53:46.628306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4666 > 2) by scale factor 0.810834\nI1207 07:53:47.570955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72414 > 2) by scale factor 0.734177\nI1207 07:53:48.513747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35386 > 2) by scale factor 0.596327\nI1207 07:53:49.456068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5449 > 2) by scale factor 0.564191\nI1207 07:53:50.398486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42532 > 2) by scale factor 0.583888\nI1207 07:53:51.341210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6866 > 2) by scale factor 0.744436\nI1207 07:53:52.284097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19159 > 2) by scale factor 0.91258\nI1207 07:53:53.227206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03562 > 2) by scale factor 0.982501\nI1207 07:53:54.169800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32381 > 2) by scale factor 0.860655\nI1207 07:53:56.052175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55947 > 2) by scale factor 0.781413\nI1207 07:53:56.994308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28736 > 2) by scale factor 0.874371\nI1207 07:53:57.937160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98777 > 2) by scale factor 0.669395\nI1207 07:53:58.880084   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80839 > 2) by scale factor 0.712152\nI1207 07:53:59.822479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13543 > 2) by scale factor 0.936581\nI1207 07:54:01.705150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79259 > 2) by scale factor 0.716181\nI1207 07:54:02.647975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58183 > 2) by scale factor 0.774644\nI1207 07:54:03.591204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53504 > 2) by scale factor 0.788942\nI1207 07:54:06.415030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88052 > 2) by scale factor 0.515395\nI1207 07:54:10.178582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92346 > 2) by scale factor 0.68412\nI1207 07:54:11.121477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26151 > 2) by scale factor 0.613212\nI1207 07:54:13.004218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01559 > 2) by scale factor 0.992266\nI1207 07:54:13.946560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79786 > 2) by scale factor 0.714831\nI1207 07:54:14.888651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97927 > 2) by scale factor 0.671305\nI1207 07:54:15.831667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65483 > 2) by scale factor 0.547221\nI1207 07:54:17.715292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14412 > 2) by scale factor 0.932784\nI1207 07:54:18.658252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84993 > 2) by scale factor 0.701772\nI1207 07:54:19.601243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37994 > 2) by scale factor 0.840358\nI1207 07:54:20.544680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72092 > 2) by scale factor 0.735046\nI1207 07:54:21.487581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37471 > 2) by scale factor 0.842209\nI1207 07:54:22.430418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33125 > 2) by scale factor 0.857909\nI1207 07:54:23.373698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56907 > 2) by scale factor 0.778491\nI1207 07:54:24.316915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15145 > 2) by scale factor 0.634628\nI1207 07:54:25.260047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35423 > 2) by scale factor 0.596262\nI1207 07:54:26.203171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21279 > 2) by scale factor 0.903837\nI1207 07:54:27.145851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57709 > 2) by scale factor 0.559113\nI1207 07:54:27.157733   369 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1207 07:55:20.138008   369 solver.cpp:404]     Test net output #0: accuracy = 0.1816\nI1207 07:55:20.138320   369 solver.cpp:404]     Test net output #1: loss = 5.09818 (* 1 = 5.09818 loss)\nI1207 07:55:21.011378   369 solver.cpp:228] Iteration 5600, loss = 4.21979\nI1207 07:55:21.011426   369 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1207 07:55:21.011445   369 solver.cpp:244]     Train net output #1: loss = 4.21979 (* 1 = 4.21979 loss)\nI1207 07:55:21.083456   369 sgd_solver.cpp:166] Iteration 5600, lr = 0.84\nI1207 07:55:21.093601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57388 > 2) by scale factor 0.559616\nI1207 07:55:22.972183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45542 > 2) by scale factor 0.814524\nI1207 07:55:23.912499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92894 > 2) by scale factor 0.682841\nI1207 07:55:24.852615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39329 > 2) by scale factor 0.83567\nI1207 07:55:25.792846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13016 > 2) by scale factor 0.938895\nI1207 07:55:26.733129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.461 > 2) by scale factor 0.577868\nI1207 07:55:27.673151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9014 > 2) by scale factor 0.689322\nI1207 07:55:28.613538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02482 > 2) by scale factor 0.98774\nI1207 07:55:29.553603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99734 > 2) by scale factor 0.667259\nI1207 07:55:30.493504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79064 > 2) by scale factor 0.716682\nI1207 07:55:31.433145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30745 > 2) by scale factor 0.866758\nI1207 07:55:32.373358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55811 > 2) by scale factor 0.781828\nI1207 07:55:33.313275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00827 > 2) by scale factor 0.664833\nI1207 07:55:35.190805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02287 > 2) by scale factor 0.988693\nI1207 07:55:37.069092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92966 > 2) by scale factor 0.682674\nI1207 07:55:38.008724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38796 > 2) by scale factor 0.837536\nI1207 07:55:40.824059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11046 > 2) by scale factor 0.642992\nI1207 07:55:41.763837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59236 > 2) by scale factor 0.771498\nI1207 07:55:42.704082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74469 > 2) by scale factor 0.728681\nI1207 07:55:43.644230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14655 > 2) by scale factor 0.931727\nI1207 07:55:44.584262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35155 > 2) by scale factor 0.850502\nI1207 07:55:46.462237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06418 > 2) by scale factor 0.968909\nI1207 07:55:48.340287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16339 > 2) by scale factor 0.924476\nI1207 07:55:49.280802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81505 > 2) by scale factor 0.710468\nI1207 07:55:53.039583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26222 > 2) by scale factor 0.884086\nI1207 07:55:55.863616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05414 > 2) by scale factor 0.973642\nI1207 07:55:56.806344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45666 > 2) by scale factor 0.814113\nI1207 07:55:58.689610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80866 > 2) by scale factor 0.712082\nI1207 07:56:01.512802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51344 > 2) by scale factor 0.795724\nI1207 07:56:02.455288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14642 > 2) by scale factor 0.931785\nI1207 07:56:04.338538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68949 > 2) by scale factor 0.743634\nI1207 07:56:05.281093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29553 > 2) by scale factor 0.871257\nI1207 07:56:06.223544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35643 > 2) by scale factor 0.595872\nI1207 07:56:07.165966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.001 > 2) by scale factor 0.9995\nI1207 07:56:08.107817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49558 > 2) by scale factor 0.801417\nI1207 07:56:09.990005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14551 > 2) by scale factor 0.932178\nI1207 07:56:10.932289   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19583 > 2) by scale factor 0.625816\nI1207 07:56:11.874353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06546 > 2) by scale factor 0.968309\nI1207 07:56:12.816890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01357 > 2) by scale factor 0.993259\nI1207 07:56:14.698292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68729 > 2) by scale factor 0.744245\nI1207 07:56:16.581823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37594 > 2) by scale factor 0.841771\nI1207 07:56:17.524101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57824 > 2) by scale factor 0.775724\nI1207 07:56:19.406096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31388 > 2) by scale factor 0.864347\nI1207 07:56:21.289314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64508 > 2) by scale factor 0.756121\nI1207 07:56:22.232084   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69213 > 2) by scale factor 0.742905\nI1207 07:56:23.174818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1261 > 2) by scale factor 0.639774\nI1207 07:56:24.117405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25975 > 2) by scale factor 0.885054\nI1207 07:56:25.059954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31978 > 2) by scale factor 0.862149\nI1207 07:56:26.002609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26896 > 2) by scale factor 0.88146\nI1207 07:56:26.945593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59755 > 2) by scale factor 0.555934\nI1207 07:56:27.888121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62965 > 2) by scale factor 0.760558\nI1207 07:56:28.830687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4059 > 2) by scale factor 0.83129\nI1207 07:56:30.713673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13257 > 2) by scale factor 0.638454\nI1207 07:56:31.656327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08256 > 2) by scale factor 0.648812\nI1207 07:56:34.479485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3275 > 2) by scale factor 0.85929\nI1207 07:56:35.422268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74192 > 2) by scale factor 0.729416\nI1207 07:56:36.364708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51242 > 2) by scale factor 0.796046\nI1207 07:56:37.307651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43753 > 2) by scale factor 0.820504\nI1207 07:56:38.250401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92057 > 2) by scale factor 0.684798\nI1207 07:56:39.193243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31334 > 2) by scale factor 0.86455\nI1207 07:56:40.135890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91644 > 2) by scale factor 0.510668\nI1207 07:56:41.078279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72161 > 2) by scale factor 0.734859\nI1207 07:56:42.021050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41447 > 2) by scale factor 0.585742\nI1207 07:56:42.963871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01625 > 2) by scale factor 0.663075\nI1207 07:56:43.906051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28726 > 2) by scale factor 0.874409\nI1207 07:56:44.848399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30805 > 2) by scale factor 0.604585\nI1207 07:56:45.790663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68166 > 2) by scale factor 0.745807\nI1207 07:56:46.732956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56609 > 2) by scale factor 0.779396\nI1207 07:56:49.556001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60644 > 2) by scale factor 0.76733\nI1207 07:56:50.498903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16135 > 2) by scale factor 0.632642\nI1207 07:56:51.441371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32941 > 2) by scale factor 0.461956\nI1207 07:56:52.384065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44829 > 2) by scale factor 0.816895\nI1207 07:56:53.326434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13875 > 2) by scale factor 0.637195\nI1207 07:56:54.269347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51679 > 2) by scale factor 0.794664\nI1207 07:56:54.281321   369 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1207 07:57:47.263351   369 solver.cpp:404]     Test net output #0: accuracy = 0.189\nI1207 07:57:47.263659   369 solver.cpp:404]     Test net output #1: loss = 5.62404 (* 1 = 5.62404 loss)\nI1207 07:57:48.136898   369 solver.cpp:228] Iteration 5700, loss = 5.72452\nI1207 07:57:48.136942   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 07:57:48.136961   369 solver.cpp:244]     Train net output #1: loss = 5.72452 (* 1 = 5.72452 loss)\nI1207 07:57:48.205962   369 sgd_solver.cpp:166] Iteration 5700, lr = 0.855\nI1207 07:57:49.153448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08186 > 2) by scale factor 0.960679\nI1207 07:57:50.093502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26453 > 2) by scale factor 0.612646\nI1207 07:57:51.032392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23154 > 2) by scale factor 0.472641\nI1207 07:57:51.972182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72894 > 2) by scale factor 0.732884\nI1207 07:57:52.912271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84421 > 2) by scale factor 0.520263\nI1207 07:57:53.852147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98672 > 2) by scale factor 0.669631\nI1207 07:57:54.791604   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88707 > 2) by scale factor 0.692744\nI1207 07:57:55.731642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73978 > 2) by scale factor 0.729987\nI1207 07:57:56.671732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25704 > 2) by scale factor 0.886116\nI1207 07:57:57.611233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61762 > 2) by scale factor 0.764052\nI1207 07:57:58.550424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01609 > 2) by scale factor 0.99202\nI1207 07:57:59.490269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09721 > 2) by scale factor 0.953646\nI1207 07:58:00.430680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6366 > 2) by scale factor 0.758553\nI1207 07:58:01.370751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99578 > 2) by scale factor 0.667605\nI1207 07:58:02.310585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55031 > 2) by scale factor 0.784218\nI1207 07:58:03.250646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48086 > 2) by scale factor 0.806172\nI1207 07:58:04.190596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22251 > 2) by scale factor 0.620634\nI1207 07:58:05.130314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53998 > 2) by scale factor 0.564975\nI1207 07:58:07.944867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67637 > 2) by scale factor 0.74728\nI1207 07:58:08.884714   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11524 > 2) by scale factor 0.642005\nI1207 07:58:09.824833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25717 > 2) by scale factor 0.886066\nI1207 07:58:10.764595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45221 > 2) by scale factor 0.815591\nI1207 07:58:11.704562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21209 > 2) by scale factor 0.904124\nI1207 07:58:12.644444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79327 > 2) by scale factor 0.716006\nI1207 07:58:13.584291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6744 > 2) by scale factor 0.747831\nI1207 07:58:14.524405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67355 > 2) by scale factor 0.748068\nI1207 07:58:15.464315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1079 > 2) by scale factor 0.948812\nI1207 07:58:16.404654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54299 > 2) by scale factor 0.786477\nI1207 07:58:17.344746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07356 > 2) by scale factor 0.650711\nI1207 07:58:18.285099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44144 > 2) by scale factor 0.581153\nI1207 07:58:19.225730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50504 > 2) by scale factor 0.798391\nI1207 07:58:20.167744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00686 > 2) by scale factor 0.665146\nI1207 07:58:21.110450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4143 > 2) by scale factor 0.828397\nI1207 07:58:22.052569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12398 > 2) by scale factor 0.941627\nI1207 07:58:22.994807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.906 > 2) by scale factor 0.688232\nI1207 07:58:23.937080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96228 > 2) by scale factor 0.675157\nI1207 07:58:24.879295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46516 > 2) by scale factor 0.811308\nI1207 07:58:25.821352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82545 > 2) by scale factor 0.522814\nI1207 07:58:26.763453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86964 > 2) by scale factor 0.696951\nI1207 07:58:27.705823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09732 > 2) by scale factor 0.64572\nI1207 07:58:29.587863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3377 > 2) by scale factor 0.599214\nI1207 07:58:30.530447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2694 > 2) by scale factor 0.881291\nI1207 07:58:31.472708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16441 > 2) by scale factor 0.632029\nI1207 07:58:32.415563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16825 > 2) by scale factor 0.631263\nI1207 07:58:33.357852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28101 > 2) by scale factor 0.609568\nI1207 07:58:35.240005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23258 > 2) by scale factor 0.895826\nI1207 07:58:36.182555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96022 > 2) by scale factor 0.675625\nI1207 07:58:38.064404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.589 > 2) by scale factor 0.772498\nI1207 07:58:39.006355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11054 > 2) by scale factor 0.947625\nI1207 07:58:40.888728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3416 > 2) by scale factor 0.854117\nI1207 07:58:41.831071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50481 > 2) by scale factor 0.798464\nI1207 07:58:47.474165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19106 > 2) by scale factor 0.62675\nI1207 07:58:49.357626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49965 > 2) by scale factor 0.800111\nI1207 07:58:50.300285   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53352 > 2) by scale factor 0.789415\nI1207 07:58:51.242636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34744 > 2) by scale factor 0.851991\nI1207 07:58:53.125355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18266 > 2) by scale factor 0.916314\nI1207 07:58:54.067363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34849 > 2) by scale factor 0.851612\nI1207 07:58:55.009372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91621 > 2) by scale factor 0.685821\nI1207 07:58:57.832228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38384 > 2) by scale factor 0.838983\nI1207 07:58:58.774072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35858 > 2) by scale factor 0.847969\nI1207 07:59:00.657147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19926 > 2) by scale factor 0.909395\nI1207 07:59:02.539880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9572 > 2) by scale factor 0.676316\nI1207 07:59:03.482358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13458 > 2) by scale factor 0.936954\nI1207 07:59:04.424679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33514 > 2) by scale factor 0.856478\nI1207 07:59:06.307845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67731 > 2) by scale factor 0.543875\nI1207 07:59:08.189649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03557 > 2) by scale factor 0.982524\nI1207 07:59:09.131923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44396 > 2) by scale factor 0.818345\nI1207 07:59:11.014297   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64336 > 2) by scale factor 0.756612\nI1207 07:59:12.896121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19585 > 2) by scale factor 0.910808\nI1207 07:59:13.838637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46672 > 2) by scale factor 0.810794\nI1207 07:59:14.780848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4654 > 2) by scale factor 0.811227\nI1207 07:59:15.723352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47333 > 2) by scale factor 0.808627\nI1207 07:59:16.665325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25819 > 2) by scale factor 0.885665\nI1207 07:59:17.608220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38986 > 2) by scale factor 0.836869\nI1207 07:59:19.490484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58759 > 2) by scale factor 0.772919\nI1207 07:59:20.432467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64971 > 2) by scale factor 0.7548\nI1207 07:59:21.384317   369 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1207 08:00:14.362388   369 solver.cpp:404]     Test net output #0: accuracy = 0.2768\nI1207 08:00:14.362684   369 solver.cpp:404]     Test net output #1: loss = 3.23505 (* 1 = 3.23505 loss)\nI1207 08:00:15.235970   369 solver.cpp:228] Iteration 5800, loss = 2.96495\nI1207 08:00:15.236012   369 solver.cpp:244]     Train net output #0: accuracy = 0.37\nI1207 08:00:15.236030   369 solver.cpp:244]     Train net output #1: loss = 2.96495 (* 1 = 2.96495 loss)\nI1207 08:00:15.317646   369 sgd_solver.cpp:166] Iteration 5800, lr = 0.87\nI1207 08:00:16.265982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31623 > 2) by scale factor 0.863473\nI1207 08:00:19.081012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20707 > 2) by scale factor 0.906181\nI1207 08:00:20.021047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61714 > 2) by scale factor 0.764193\nI1207 08:00:22.836472   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66546 > 2) by scale factor 0.750341\nI1207 08:00:23.776871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24816 > 2) by scale factor 0.889616\nI1207 08:00:24.716840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55792 > 2) by scale factor 0.781886\nI1207 08:00:25.656188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26186 > 2) by scale factor 0.613147\nI1207 08:00:26.596587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23053 > 2) by scale factor 0.896648\nI1207 08:00:28.474272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18593 > 2) by scale factor 0.914944\nI1207 08:00:29.414223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25587 > 2) by scale factor 0.614275\nI1207 08:00:30.354415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84298 > 2) by scale factor 0.703487\nI1207 08:00:32.231700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16163 > 2) by scale factor 0.925226\nI1207 08:00:34.109588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7171 > 2) by scale factor 0.423989\nI1207 08:00:35.049747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66197 > 2) by scale factor 0.546155\nI1207 08:00:35.989334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26207 > 2) by scale factor 0.469256\nI1207 08:00:36.929395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75347 > 2) by scale factor 0.726356\nI1207 08:00:37.869177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47383 > 2) by scale factor 0.808462\nI1207 08:00:38.809154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17852 > 2) by scale factor 0.629224\nI1207 08:00:39.749287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93945 > 2) by scale factor 0.680399\nI1207 08:00:40.689504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59105 > 2) by scale factor 0.556941\nI1207 08:00:41.629758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8154 > 2) by scale factor 0.710378\nI1207 08:00:42.569708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28028 > 2) by scale factor 0.609704\nI1207 08:00:44.447468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75443 > 2) by scale factor 0.726104\nI1207 08:00:46.331990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40865 > 2) by scale factor 0.830342\nI1207 08:00:47.275286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7875 > 2) by scale factor 0.71749\nI1207 08:00:48.218354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1406 > 2) by scale factor 0.636822\nI1207 08:00:49.161594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0961 > 2) by scale factor 0.48827\nI1207 08:00:51.045979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1018 > 2) by scale factor 0.951567\nI1207 08:00:51.989460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33871 > 2) by scale factor 0.855171\nI1207 08:00:52.932677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21386 > 2) by scale factor 0.903399\nI1207 08:00:54.816298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11781 > 2) by scale factor 0.944373\nI1207 08:00:55.759318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27912 > 2) by scale factor 0.877533\nI1207 08:00:56.702536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74725 > 2) by scale factor 0.728\nI1207 08:00:58.586629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54865 > 2) by scale factor 0.784728\nI1207 08:00:59.529939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.109 > 2) by scale factor 0.948315\nI1207 08:01:00.473212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71788 > 2) by scale factor 0.735867\nI1207 08:01:01.416827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65243 > 2) by scale factor 0.754026\nI1207 08:01:03.301503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11049 > 2) by scale factor 0.642986\nI1207 08:01:04.244725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92191 > 2) by scale factor 0.684484\nI1207 08:01:05.188093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27951 > 2) by scale factor 0.87738\nI1207 08:01:06.131655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46426 > 2) by scale factor 0.448002\nI1207 08:01:07.074980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5273 > 2) by scale factor 0.791357\nI1207 08:01:08.017944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00852 > 2) by scale factor 0.664779\nI1207 08:01:08.961143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09042 > 2) by scale factor 0.647162\nI1207 08:01:09.904611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23615 > 2) by scale factor 0.894394\nI1207 08:01:10.847805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43345 > 2) by scale factor 0.582504\nI1207 08:01:12.731740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32491 > 2) by scale factor 0.860247\nI1207 08:01:13.674500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29634 > 2) by scale factor 0.606733\nI1207 08:01:14.617621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24958 > 2) by scale factor 0.615465\nI1207 08:01:15.561105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00087 > 2) by scale factor 0.499892\nI1207 08:01:16.504292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03268 > 2) by scale factor 0.983923\nI1207 08:01:17.447134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84262 > 2) by scale factor 0.703575\nI1207 08:01:18.390143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79109 > 2) by scale factor 0.716565\nI1207 08:01:19.333464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52089 > 2) by scale factor 0.79337\nI1207 08:01:20.276795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59253 > 2) by scale factor 0.771447\nI1207 08:01:21.219702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7493 > 2) by scale factor 0.727457\nI1207 08:01:22.162508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33013 > 2) by scale factor 0.600577\nI1207 08:01:23.105612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13168 > 2) by scale factor 0.938228\nI1207 08:01:24.048434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33196 > 2) by scale factor 0.857648\nI1207 08:01:24.991482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74195 > 2) by scale factor 0.729407\nI1207 08:01:25.934350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20214 > 2) by scale factor 0.624583\nI1207 08:01:26.877060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17726 > 2) by scale factor 0.629473\nI1207 08:01:27.820376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02608 > 2) by scale factor 0.987128\nI1207 08:01:28.763327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37846 > 2) by scale factor 0.591986\nI1207 08:01:29.706621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75138 > 2) by scale factor 0.726907\nI1207 08:01:30.649380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18721 > 2) by scale factor 0.477645\nI1207 08:01:31.591912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40869 > 2) by scale factor 0.830328\nI1207 08:01:37.238298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60314 > 2) by scale factor 0.768302\nI1207 08:01:38.181084   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18485 > 2) by scale factor 0.627973\nI1207 08:01:39.123780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03826 > 2) by scale factor 0.981229\nI1207 08:01:41.007262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59912 > 2) by scale factor 0.769493\nI1207 08:01:41.950018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28497 > 2) by scale factor 0.875285\nI1207 08:01:42.892987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37815 > 2) by scale factor 0.592041\nI1207 08:01:43.835772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15132 > 2) by scale factor 0.634654\nI1207 08:01:44.778903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06565 > 2) by scale factor 0.96822\nI1207 08:01:45.722064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83922 > 2) by scale factor 0.704418\nI1207 08:01:46.664957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90252 > 2) by scale factor 0.689055\nI1207 08:01:47.607550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36225 > 2) by scale factor 0.846651\nI1207 08:01:48.559685   369 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1207 08:02:41.528635   369 solver.cpp:404]     Test net output #0: accuracy = 0.22555\nI1207 08:02:41.528962   369 solver.cpp:404]     Test net output #1: loss = 4.94948 (* 1 = 4.94948 loss)\nI1207 08:02:42.402369   369 solver.cpp:228] Iteration 5900, loss = 6.16906\nI1207 08:02:42.402420   369 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1207 08:02:42.402439   369 solver.cpp:244]     Train net output #1: loss = 6.16906 (* 1 = 6.16906 loss)\nI1207 08:02:42.474675   369 sgd_solver.cpp:166] Iteration 5900, lr = 0.885\nI1207 08:02:43.422739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30619 > 2) by scale factor 0.604926\nI1207 08:02:44.363219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07848 > 2) by scale factor 0.962243\nI1207 08:02:45.303551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80047 > 2) by scale factor 0.714167\nI1207 08:02:46.243899   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62033 > 2) by scale factor 0.763262\nI1207 08:02:47.183938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26128 > 2) by scale factor 0.884454\nI1207 08:02:48.123646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28647 > 2) by scale factor 0.87471\nI1207 08:02:49.063385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21256 > 2) by scale factor 0.622557\nI1207 08:02:50.941411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77765 > 2) by scale factor 0.720032\nI1207 08:02:51.881680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6144 > 2) by scale factor 0.553342\nI1207 08:02:52.821797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07543 > 2) by scale factor 0.963656\nI1207 08:02:54.699679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10919 > 2) by scale factor 0.486714\nI1207 08:02:55.639631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71506 > 2) by scale factor 0.736632\nI1207 08:02:56.579406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74012 > 2) by scale factor 0.729895\nI1207 08:02:57.519536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96809 > 2) by scale factor 0.673834\nI1207 08:02:58.459805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22427 > 2) by scale factor 0.899171\nI1207 08:03:00.337209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14231 > 2) by scale factor 0.636475\nI1207 08:03:01.277623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05748 > 2) by scale factor 0.972063\nI1207 08:03:02.218199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44899 > 2) by scale factor 0.816665\nI1207 08:03:04.095715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2283 > 2) by scale factor 0.897546\nI1207 08:03:05.035315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36882 > 2) by scale factor 0.457789\nI1207 08:03:05.975178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19263 > 2) by scale factor 0.477027\nI1207 08:03:06.914572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62175 > 2) by scale factor 0.762848\nI1207 08:03:07.854732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49225 > 2) by scale factor 0.802487\nI1207 08:03:08.794389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4442 > 2) by scale factor 0.818263\nI1207 08:03:10.673804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04438 > 2) by scale factor 0.656949\nI1207 08:03:11.616253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80856 > 2) by scale factor 0.712108\nI1207 08:03:12.559144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44427 > 2) by scale factor 0.818241\nI1207 08:03:14.441550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91629 > 2) by scale factor 0.685804\nI1207 08:03:17.264580   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02506 > 2) by scale factor 0.987625\nI1207 08:03:18.206979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84393 > 2) by scale factor 0.703252\nI1207 08:03:19.149690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03469 > 2) by scale factor 0.982951\nI1207 08:03:20.091998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68763 > 2) by scale factor 0.744151\nI1207 08:03:21.033859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66958 > 2) by scale factor 0.749182\nI1207 08:03:21.976194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25177 > 2) by scale factor 0.888192\nI1207 08:03:22.918380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20177 > 2) by scale factor 0.908359\nI1207 08:03:23.860759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94692 > 2) by scale factor 0.678675\nI1207 08:03:24.803344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23153 > 2) by scale factor 0.618903\nI1207 08:03:25.745872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98466 > 2) by scale factor 0.501925\nI1207 08:03:26.688235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88324 > 2) by scale factor 0.693665\nI1207 08:03:27.630949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00038 > 2) by scale factor 0.666582\nI1207 08:03:28.573295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31777 > 2) by scale factor 0.862899\nI1207 08:03:30.455570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02078 > 2) by scale factor 0.662081\nI1207 08:03:31.397819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07115 > 2) by scale factor 0.965645\nI1207 08:03:32.339871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21116 > 2) by scale factor 0.622828\nI1207 08:03:34.222335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45021 > 2) by scale factor 0.816256\nI1207 08:03:35.164491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.552 > 2) by scale factor 0.783699\nI1207 08:03:36.106824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29949 > 2) by scale factor 0.869759\nI1207 08:03:37.049506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99182 > 2) by scale factor 0.668489\nI1207 08:03:37.991343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.815 > 2) by scale factor 0.710479\nI1207 08:03:38.932734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2222 > 2) by scale factor 0.620693\nI1207 08:03:39.875370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25521 > 2) by scale factor 0.6144\nI1207 08:03:41.757781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14134 > 2) by scale factor 0.933994\nI1207 08:03:44.579131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41486 > 2) by scale factor 0.585676\nI1207 08:03:45.520424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09312 > 2) by scale factor 0.955509\nI1207 08:03:48.342258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12808 > 2) by scale factor 0.939815\nI1207 08:03:49.284359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24971 > 2) by scale factor 0.889002\nI1207 08:03:50.226526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51612 > 2) by scale factor 0.794875\nI1207 08:03:51.168592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45351 > 2) by scale factor 0.815159\nI1207 08:03:53.050910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09588 > 2) by scale factor 0.64602\nI1207 08:03:54.932494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52425 > 2) by scale factor 0.792314\nI1207 08:03:56.813722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68953 > 2) by scale factor 0.743625\nI1207 08:03:57.755655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31384 > 2) by scale factor 0.864365\nI1207 08:03:58.697423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33204 > 2) by scale factor 0.857616\nI1207 08:03:59.639426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26618 > 2) by scale factor 0.612336\nI1207 08:04:00.580715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49389 > 2) by scale factor 0.801961\nI1207 08:04:01.522058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69167 > 2) by scale factor 0.743033\nI1207 08:04:02.463651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98633 > 2) by scale factor 0.669719\nI1207 08:04:03.405333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76995 > 2) by scale factor 0.722034\nI1207 08:04:04.347786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92772 > 2) by scale factor 0.683125\nI1207 08:04:05.289917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63217 > 2) by scale factor 0.759829\nI1207 08:04:06.231760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34123 > 2) by scale factor 0.598582\nI1207 08:04:07.173543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17226 > 2) by scale factor 0.630465\nI1207 08:04:08.115535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19728 > 2) by scale factor 0.910215\nI1207 08:04:09.056982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73163 > 2) by scale factor 0.732163\nI1207 08:04:10.938511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59749 > 2) by scale factor 0.769975\nI1207 08:04:11.880872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21603 > 2) by scale factor 0.621885\nI1207 08:04:12.823724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47934 > 2) by scale factor 0.806666\nI1207 08:04:14.705107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65332 > 2) by scale factor 0.753772\nI1207 08:04:15.647531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99556 > 2) by scale factor 0.667655\nI1207 08:04:15.659505   369 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1207 08:05:08.642532   369 solver.cpp:404]     Test net output #0: accuracy = 0.1807\nI1207 08:05:08.642858   369 solver.cpp:404]     Test net output #1: loss = 4.71755 (* 1 = 4.71755 loss)\nI1207 08:05:09.516464   369 solver.cpp:228] Iteration 6000, loss = 5.09218\nI1207 08:05:09.516510   369 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1207 08:05:09.516527   369 solver.cpp:244]     Train net output #1: loss = 5.09218 (* 1 = 5.09218 loss)\nI1207 08:05:09.587757   369 sgd_solver.cpp:166] Iteration 6000, lr = 0.9\nI1207 08:05:09.597867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88547 > 2) by scale factor 0.693129\nI1207 08:05:11.476047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01213 > 2) by scale factor 0.99397\nI1207 08:05:13.354024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61272 > 2) by scale factor 0.765485\nI1207 08:05:15.230890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37883 > 2) by scale factor 0.840749\nI1207 08:05:16.170231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08917 > 2) by scale factor 0.95732\nI1207 08:05:17.110186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63443 > 2) by scale factor 0.759178\nI1207 08:05:18.049486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17428 > 2) by scale factor 0.919846\nI1207 08:05:18.989063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21833 > 2) by scale factor 0.901581\nI1207 08:05:19.928958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34025 > 2) by scale factor 0.85461\nI1207 08:05:20.869210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14331 > 2) by scale factor 0.933136\nI1207 08:05:22.746155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33261 > 2) by scale factor 0.857409\nI1207 08:05:23.686368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3217 > 2) by scale factor 0.602101\nI1207 08:05:24.626116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22779 > 2) by scale factor 0.897753\nI1207 08:05:26.503010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66158 > 2) by scale factor 0.751432\nI1207 08:05:27.442373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20307 > 2) by scale factor 0.907825\nI1207 08:05:29.319631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34501 > 2) by scale factor 0.852876\nI1207 08:05:30.259460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0967 > 2) by scale factor 0.953878\nI1207 08:05:31.199221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90795 > 2) by scale factor 0.511777\nI1207 08:05:32.138239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54304 > 2) by scale factor 0.786459\nI1207 08:05:33.077936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71425 > 2) by scale factor 0.736852\nI1207 08:05:34.017478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1428 > 2) by scale factor 0.933358\nI1207 08:05:34.957471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00661 > 2) by scale factor 0.665202\nI1207 08:05:35.897014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03014 > 2) by scale factor 0.49626\nI1207 08:05:36.836385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16963 > 2) by scale factor 0.921816\nI1207 08:05:37.775796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4672 > 2) by scale factor 0.576834\nI1207 08:05:38.715174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11423 > 2) by scale factor 0.945972\nI1207 08:05:39.655572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25054 > 2) by scale factor 0.888676\nI1207 08:05:40.595120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0518 > 2) by scale factor 0.655351\nI1207 08:05:41.536653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91509 > 2) by scale factor 0.686085\nI1207 08:05:42.478741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31819 > 2) by scale factor 0.862741\nI1207 08:05:43.421022   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15488 > 2) by scale factor 0.928126\nI1207 08:05:44.362998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26604 > 2) by scale factor 0.882599\nI1207 08:05:46.245427   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52049 > 2) by scale factor 0.793496\nI1207 08:05:47.187657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43235 > 2) by scale factor 0.82225\nI1207 08:05:49.071050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00704 > 2) by scale factor 0.996495\nI1207 08:05:50.013895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4552 > 2) by scale factor 0.814597\nI1207 08:05:50.956315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18311 > 2) by scale factor 0.628317\nI1207 08:05:52.839467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50904 > 2) by scale factor 0.797119\nI1207 08:05:54.722756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57279 > 2) by scale factor 0.777367\nI1207 08:05:55.665179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98961 > 2) by scale factor 0.668983\nI1207 08:05:56.607789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03455 > 2) by scale factor 0.983019\nI1207 08:05:57.550148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62173 > 2) by scale factor 0.762854\nI1207 08:05:58.492601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29835 > 2) by scale factor 0.870188\nI1207 08:05:59.434793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28898 > 2) by scale factor 0.608092\nI1207 08:06:00.376416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07091 > 2) by scale factor 0.491291\nI1207 08:06:01.319138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37548 > 2) by scale factor 0.592509\nI1207 08:06:02.260891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07976 > 2) by scale factor 0.649401\nI1207 08:06:03.202885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19391 > 2) by scale factor 0.911614\nI1207 08:06:04.144850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29468 > 2) by scale factor 0.871579\nI1207 08:06:05.087275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34764 > 2) by scale factor 0.851918\nI1207 08:06:06.029788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51164 > 2) by scale factor 0.796292\nI1207 08:06:07.911428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05757 > 2) by scale factor 0.654114\nI1207 08:06:09.793375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56544 > 2) by scale factor 0.779592\nI1207 08:06:13.556319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04585 > 2) by scale factor 0.977588\nI1207 08:06:15.438017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25813 > 2) by scale factor 0.885688\nI1207 08:06:18.259109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47317 > 2) by scale factor 0.808679\nI1207 08:06:19.201339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11844 > 2) by scale factor 0.641346\nI1207 08:06:20.143612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09753 > 2) by scale factor 0.953502\nI1207 08:06:21.085783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68219 > 2) by scale factor 0.74566\nI1207 08:06:22.029028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58585 > 2) by scale factor 0.773441\nI1207 08:06:22.971451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02786 > 2) by scale factor 0.986261\nI1207 08:06:23.913940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47758 > 2) by scale factor 0.807239\nI1207 08:06:24.856554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15065 > 2) by scale factor 0.929952\nI1207 08:06:25.799079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23429 > 2) by scale factor 0.89514\nI1207 08:06:26.741469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7832 > 2) by scale factor 0.718598\nI1207 08:06:27.683951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8678 > 2) by scale factor 0.697399\nI1207 08:06:28.626301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78187 > 2) by scale factor 0.71894\nI1207 08:06:29.569003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59888 > 2) by scale factor 0.769564\nI1207 08:06:30.511276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29457 > 2) by scale factor 0.60706\nI1207 08:06:31.453693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35747 > 2) by scale factor 0.848366\nI1207 08:06:32.395674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60399 > 2) by scale factor 0.768052\nI1207 08:06:33.338313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0066 > 2) by scale factor 0.665203\nI1207 08:06:34.280903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1297 > 2) by scale factor 0.639038\nI1207 08:06:35.223423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23685 > 2) by scale factor 0.894115\nI1207 08:06:36.165712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29852 > 2) by scale factor 0.606332\nI1207 08:06:38.048030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19857 > 2) by scale factor 0.909682\nI1207 08:06:38.990030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16351 > 2) by scale factor 0.924424\nI1207 08:06:39.932271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10978 > 2) by scale factor 0.643132\nI1207 08:06:41.815469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14229 > 2) by scale factor 0.933578\nI1207 08:06:42.758527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27259 > 2) by scale factor 0.880053\nI1207 08:06:42.770449   369 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1207 08:07:35.755853   369 solver.cpp:404]     Test net output #0: accuracy = 0.2596\nI1207 08:07:35.756181   369 solver.cpp:404]     Test net output #1: loss = 4.09157 (* 1 = 4.09157 loss)\nI1207 08:07:36.629611   369 solver.cpp:228] Iteration 6100, loss = 4.53027\nI1207 08:07:36.629665   369 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1207 08:07:36.629685   369 solver.cpp:244]     Train net output #1: loss = 4.53027 (* 1 = 4.53027 loss)\nI1207 08:07:36.707129   369 sgd_solver.cpp:166] Iteration 6100, lr = 0.915\nI1207 08:07:36.717257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05109 > 2) by scale factor 0.975092\nI1207 08:07:38.595633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99942 > 2) by scale factor 0.666795\nI1207 08:07:39.535734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09323 > 2) by scale factor 0.646574\nI1207 08:07:41.413769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07023 > 2) by scale factor 0.966077\nI1207 08:07:42.354121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02903 > 2) by scale factor 0.985693\nI1207 08:07:43.294266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46721 > 2) by scale factor 0.810631\nI1207 08:07:44.234371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20051 > 2) by scale factor 0.6249\nI1207 08:07:45.174604   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23366 > 2) by scale factor 0.895392\nI1207 08:07:46.114398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48095 > 2) by scale factor 0.806143\nI1207 08:07:47.054563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2131 > 2) by scale factor 0.622453\nI1207 08:07:47.994524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28036 > 2) by scale factor 0.877055\nI1207 08:07:49.872189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86503 > 2) by scale factor 0.698072\nI1207 08:07:50.811295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69351 > 2) by scale factor 0.54149\nI1207 08:07:51.751358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30636 > 2) by scale factor 0.867169\nI1207 08:07:52.691485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55547 > 2) by scale factor 0.782634\nI1207 08:07:54.568686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19681 > 2) by scale factor 0.625624\nI1207 08:07:55.508720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0712 > 2) by scale factor 0.965625\nI1207 08:07:56.448818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35056 > 2) by scale factor 0.85086\nI1207 08:07:58.326679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93202 > 2) by scale factor 0.682123\nI1207 08:07:59.266520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14929 > 2) by scale factor 0.930539\nI1207 08:08:00.206873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41622 > 2) by scale factor 0.82774\nI1207 08:08:01.146553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28389 > 2) by scale factor 0.875698\nI1207 08:08:02.086982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03077 > 2) by scale factor 0.659898\nI1207 08:08:03.027120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11564 > 2) by scale factor 0.945339\nI1207 08:08:03.967001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28331 > 2) by scale factor 0.875921\nI1207 08:08:04.907605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16554 > 2) by scale factor 0.923556\nI1207 08:08:05.850790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36575 > 2) by scale factor 0.845399\nI1207 08:08:07.735172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33555 > 2) by scale factor 0.856329\nI1207 08:08:08.678247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08 > 2) by scale factor 0.649351\nI1207 08:08:12.444219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40227 > 2) by scale factor 0.832546\nI1207 08:08:14.327610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95681 > 2) by scale factor 0.676405\nI1207 08:08:16.211647   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30267 > 2) by scale factor 0.868555\nI1207 08:08:17.154608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19273 > 2) by scale factor 0.912104\nI1207 08:08:18.097658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05593 > 2) by scale factor 0.493105\nI1207 08:08:19.040865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51088 > 2) by scale factor 0.796533\nI1207 08:08:19.984395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50816 > 2) by scale factor 0.570099\nI1207 08:08:20.928143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85653 > 2) by scale factor 0.5186\nI1207 08:08:22.812556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00724 > 2) by scale factor 0.996391\nI1207 08:08:23.755995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60185 > 2) by scale factor 0.768685\nI1207 08:08:24.698918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67009 > 2) by scale factor 0.749039\nI1207 08:08:26.582931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28994 > 2) by scale factor 0.873387\nI1207 08:08:27.526237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52501 > 2) by scale factor 0.792076\nI1207 08:08:28.469537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29412 > 2) by scale factor 0.871793\nI1207 08:08:30.353471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8694 > 2) by scale factor 0.516876\nI1207 08:08:35.060488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10408 > 2) by scale factor 0.644314\nI1207 08:08:36.003721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94568 > 2) by scale factor 0.67896\nI1207 08:08:38.829032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32681 > 2) by scale factor 0.859545\nI1207 08:08:39.772447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18137 > 2) by scale factor 0.916855\nI1207 08:08:40.715956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86095 > 2) by scale factor 0.699068\nI1207 08:08:41.659112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29615 > 2) by scale factor 0.606768\nI1207 08:08:42.602115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03352 > 2) by scale factor 0.495845\nI1207 08:08:43.544910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82467 > 2) by scale factor 0.708048\nI1207 08:08:44.487916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.231 > 2) by scale factor 0.896458\nI1207 08:08:45.430586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00521 > 2) by scale factor 0.665512\nI1207 08:08:46.373150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29825 > 2) by scale factor 0.870227\nI1207 08:08:47.316279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05042 > 2) by scale factor 0.975409\nI1207 08:08:49.200368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71226 > 2) by scale factor 0.737392\nI1207 08:08:50.143363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10682 > 2) by scale factor 0.9493\nI1207 08:08:51.086289   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04289 > 2) by scale factor 0.979003\nI1207 08:08:52.029592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23199 > 2) by scale factor 0.896063\nI1207 08:08:54.854434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17921 > 2) by scale factor 0.917762\nI1207 08:08:56.737975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77204 > 2) by scale factor 0.721489\nI1207 08:08:57.681128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76787 > 2) by scale factor 0.722577\nI1207 08:08:58.624405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6151 > 2) by scale factor 0.764788\nI1207 08:08:59.567620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41582 > 2) by scale factor 0.827876\nI1207 08:09:00.510922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89558 > 2) by scale factor 0.690709\nI1207 08:09:01.454006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03644 > 2) by scale factor 0.658667\nI1207 08:09:02.397006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59611 > 2) by scale factor 0.556157\nI1207 08:09:03.340070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86498 > 2) by scale factor 0.698085\nI1207 08:09:04.282915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93381 > 2) by scale factor 0.681708\nI1207 08:09:08.047633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73058 > 2) by scale factor 0.732446\nI1207 08:09:08.990850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29275 > 2) by scale factor 0.607396\nI1207 08:09:09.933738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01199 > 2) by scale factor 0.994043\nI1207 08:09:09.945791   369 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1207 08:10:02.931610   369 solver.cpp:404]     Test net output #0: accuracy = 0.23495\nI1207 08:10:02.931922   369 solver.cpp:404]     Test net output #1: loss = 3.85524 (* 1 = 3.85524 loss)\nI1207 08:10:03.805136   369 solver.cpp:228] Iteration 6200, loss = 4.04827\nI1207 08:10:03.805191   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 08:10:03.805210   369 solver.cpp:244]     Train net output #1: loss = 4.04827 (* 1 = 4.04827 loss)\nI1207 08:10:03.876752   369 sgd_solver.cpp:166] Iteration 6200, lr = 0.93\nI1207 08:10:04.825076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76037 > 2) by scale factor 0.72454\nI1207 08:10:06.703312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76467 > 2) by scale factor 0.723414\nI1207 08:10:07.643610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7482 > 2) by scale factor 0.533589\nI1207 08:10:08.583765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74331 > 2) by scale factor 0.534287\nI1207 08:10:09.523969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48682 > 2) by scale factor 0.804241\nI1207 08:10:10.464099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41327 > 2) by scale factor 0.828751\nI1207 08:10:11.404112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62668 > 2) by scale factor 0.761417\nI1207 08:10:12.343776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90891 > 2) by scale factor 0.687542\nI1207 08:10:15.158869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31168 > 2) by scale factor 0.603924\nI1207 08:10:16.099496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38805 > 2) by scale factor 0.837504\nI1207 08:10:17.039654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13522 > 2) by scale factor 0.93667\nI1207 08:10:17.979279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53589 > 2) by scale factor 0.788678\nI1207 08:10:18.918491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70213 > 2) by scale factor 0.740156\nI1207 08:10:19.858357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21933 > 2) by scale factor 0.621247\nI1207 08:10:20.797675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71873 > 2) by scale factor 0.735637\nI1207 08:10:22.675128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14205 > 2) by scale factor 0.933686\nI1207 08:10:23.614899   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24052 > 2) by scale factor 0.892651\nI1207 08:10:24.554921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29347 > 2) by scale factor 0.607263\nI1207 08:10:25.494829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57029 > 2) by scale factor 0.778121\nI1207 08:10:26.434589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24235 > 2) by scale factor 0.89192\nI1207 08:10:27.373585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97232 > 2) by scale factor 0.672876\nI1207 08:10:28.313827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10147 > 2) by scale factor 0.644856\nI1207 08:10:29.253295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5804 > 2) by scale factor 0.775074\nI1207 08:10:32.067955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53452 > 2) by scale factor 0.789104\nI1207 08:10:33.946301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07138 > 2) by scale factor 0.965541\nI1207 08:10:34.886942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82332 > 2) by scale factor 0.523106\nI1207 08:10:35.827159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41224 > 2) by scale factor 0.829107\nI1207 08:10:36.767020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52053 > 2) by scale factor 0.568096\nI1207 08:10:38.650063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71496 > 2) by scale factor 0.736658\nI1207 08:10:39.592900   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95011 > 2) by scale factor 0.67794\nI1207 08:10:40.535634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93871 > 2) by scale factor 0.68057\nI1207 08:10:41.478489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37948 > 2) by scale factor 0.840521\nI1207 08:10:42.421277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53605 > 2) by scale factor 0.788628\nI1207 08:10:43.363888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37413 > 2) by scale factor 0.842414\nI1207 08:10:44.306802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05532 > 2) by scale factor 0.973085\nI1207 08:10:45.249663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68179 > 2) by scale factor 0.745771\nI1207 08:10:46.192291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78453 > 2) by scale factor 0.718254\nI1207 08:10:47.134655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7713 > 2) by scale factor 0.721683\nI1207 08:10:48.077002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77758 > 2) by scale factor 0.52944\nI1207 08:10:49.019019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23622 > 2) by scale factor 0.618005\nI1207 08:10:49.961827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36971 > 2) by scale factor 0.843986\nI1207 08:10:50.904758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47519 > 2) by scale factor 0.575508\nI1207 08:10:51.847614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75485 > 2) by scale factor 0.532645\nI1207 08:10:52.790345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76567 > 2) by scale factor 0.723151\nI1207 08:10:53.733572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28719 > 2) by scale factor 0.874435\nI1207 08:10:54.676199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38582 > 2) by scale factor 0.838286\nI1207 08:10:55.618985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65982 > 2) by scale factor 0.546475\nI1207 08:10:56.561498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63585 > 2) by scale factor 0.758767\nI1207 08:10:57.504434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48634 > 2) by scale factor 0.573668\nI1207 08:10:58.447635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26058 > 2) by scale factor 0.613388\nI1207 08:10:59.389708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25137 > 2) by scale factor 0.615125\nI1207 08:11:00.332533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01584 > 2) by scale factor 0.663165\nI1207 08:11:01.275207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3026 > 2) by scale factor 0.868582\nI1207 08:11:02.217999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38943 > 2) by scale factor 0.83702\nI1207 08:11:03.160681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9072 > 2) by scale factor 0.687948\nI1207 08:11:04.103322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87112 > 2) by scale factor 0.696593\nI1207 08:11:05.046474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74167 > 2) by scale factor 0.729483\nI1207 08:11:05.989589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59009 > 2) by scale factor 0.557088\nI1207 08:11:06.932265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03223 > 2) by scale factor 0.659581\nI1207 08:11:07.874876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28508 > 2) by scale factor 0.608813\nI1207 08:11:08.817239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59475 > 2) by scale factor 0.770786\nI1207 08:11:09.759932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66713 > 2) by scale factor 0.74987\nI1207 08:11:10.702430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29694 > 2) by scale factor 0.870725\nI1207 08:11:11.645351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64205 > 2) by scale factor 0.756988\nI1207 08:11:12.588002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75791 > 2) by scale factor 0.725188\nI1207 08:11:13.530405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30079 > 2) by scale factor 0.869266\nI1207 08:11:14.472060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54748 > 2) by scale factor 0.78509\nI1207 08:11:15.414466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36409 > 2) by scale factor 0.594515\nI1207 08:11:16.357075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25609 > 2) by scale factor 0.469914\nI1207 08:11:17.300150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03041 > 2) by scale factor 0.659976\nI1207 08:11:18.241883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12129 > 2) by scale factor 0.942821\nI1207 08:11:19.184247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9665 > 2) by scale factor 0.674195\nI1207 08:11:20.126960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36651 > 2) by scale factor 0.845125\nI1207 08:11:21.069299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36481 > 2) by scale factor 0.845734\nI1207 08:11:22.011788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45149 > 2) by scale factor 0.815831\nI1207 08:11:22.954350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4879 > 2) by scale factor 0.803892\nI1207 08:11:23.897227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43689 > 2) by scale factor 0.820719\nI1207 08:11:26.720309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45785 > 2) by scale factor 0.813718\nI1207 08:11:27.662539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90674 > 2) by scale factor 0.688056\nI1207 08:11:28.604791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70874 > 2) by scale factor 0.738352\nI1207 08:11:29.547078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07221 > 2) by scale factor 0.965153\nI1207 08:11:30.488451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0956 > 2) by scale factor 0.954379\nI1207 08:11:31.430333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34761 > 2) by scale factor 0.851929\nI1207 08:11:33.311554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65725 > 2) by scale factor 0.752657\nI1207 08:11:34.253937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67074 > 2) by scale factor 0.748857\nI1207 08:11:36.137006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31285 > 2) by scale factor 0.864733\nI1207 08:11:37.079671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79504 > 2) by scale factor 0.715554\nI1207 08:11:37.091003   369 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1207 08:12:30.066737   369 solver.cpp:404]     Test net output #0: accuracy = 0.143\nI1207 08:12:30.067034   369 solver.cpp:404]     Test net output #1: loss = 4.97987 (* 1 = 4.97987 loss)\nI1207 08:12:30.939800   369 solver.cpp:228] Iteration 6300, loss = 4.95924\nI1207 08:12:30.939855   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 08:12:30.939873   369 solver.cpp:244]     Train net output #1: loss = 4.95924 (* 1 = 4.95924 loss)\nI1207 08:12:31.018296   369 sgd_solver.cpp:166] Iteration 6300, lr = 0.945\nI1207 08:12:31.028417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13385 > 2) by scale factor 0.483811\nI1207 08:12:31.968945   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24621 > 2) by scale factor 0.890388\nI1207 08:12:32.909008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19469 > 2) by scale factor 0.911291\nI1207 08:12:33.849164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24279 > 2) by scale factor 0.471388\nI1207 08:12:34.789508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67969 > 2) by scale factor 0.746356\nI1207 08:12:35.729794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23605 > 2) by scale factor 0.894433\nI1207 08:12:37.607902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62322 > 2) by scale factor 0.762423\nI1207 08:12:38.548347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19178 > 2) by scale factor 0.912501\nI1207 08:12:39.489013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72395 > 2) by scale factor 0.734229\nI1207 08:12:41.367491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44713 > 2) by scale factor 0.817285\nI1207 08:12:42.307704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50351 > 2) by scale factor 0.798877\nI1207 08:12:45.123739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14628 > 2) by scale factor 0.931844\nI1207 08:12:46.063891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56597 > 2) by scale factor 0.560857\nI1207 08:12:47.004144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07524 > 2) by scale factor 0.963745\nI1207 08:12:47.944386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81701 > 2) by scale factor 0.709973\nI1207 08:12:48.884378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71437 > 2) by scale factor 0.73682\nI1207 08:12:49.824739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05607 > 2) by scale factor 0.972728\nI1207 08:12:50.764102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25746 > 2) by scale factor 0.469764\nI1207 08:12:51.704466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9869 > 2) by scale factor 0.66959\nI1207 08:12:52.644965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89992 > 2) by scale factor 0.689675\nI1207 08:12:53.585288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59193 > 2) by scale factor 0.771625\nI1207 08:12:54.524592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30022 > 2) by scale factor 0.869483\nI1207 08:12:56.402076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15668 > 2) by scale factor 0.633577\nI1207 08:12:57.342535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33631 > 2) by scale factor 0.599465\nI1207 08:12:58.282842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35761 > 2) by scale factor 0.848318\nI1207 08:12:59.223207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98443 > 2) by scale factor 0.670144\nI1207 08:13:00.164764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55966 > 2) by scale factor 0.781353\nI1207 08:13:01.107674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69821 > 2) by scale factor 0.741232\nI1207 08:13:02.050559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33741 > 2) by scale factor 0.855647\nI1207 08:13:02.993460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12094 > 2) by scale factor 0.640833\nI1207 08:13:03.936246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38502 > 2) by scale factor 0.590839\nI1207 08:13:04.878690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1045 > 2) by scale factor 0.950346\nI1207 08:13:05.821672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03647 > 2) by scale factor 0.982091\nI1207 08:13:07.704773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61488 > 2) by scale factor 0.764854\nI1207 08:13:08.647083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30202 > 2) by scale factor 0.868803\nI1207 08:13:09.589692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30048 > 2) by scale factor 0.605973\nI1207 08:13:10.532205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88168 > 2) by scale factor 0.694041\nI1207 08:13:11.474967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62812 > 2) by scale factor 0.55125\nI1207 08:13:16.180495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46524 > 2) by scale factor 0.811279\nI1207 08:13:17.122993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11052 > 2) by scale factor 0.64298\nI1207 08:13:18.065598   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40479 > 2) by scale factor 0.831675\nI1207 08:13:21.830137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43498 > 2) by scale factor 0.821361\nI1207 08:13:22.772656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09639 > 2) by scale factor 0.954023\nI1207 08:13:23.715922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.741 > 2) by scale factor 0.729661\nI1207 08:13:25.599217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60465 > 2) by scale factor 0.767858\nI1207 08:13:26.541718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25572 > 2) by scale factor 0.614303\nI1207 08:13:27.484922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04647 > 2) by scale factor 0.977292\nI1207 08:13:29.368749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74958 > 2) by scale factor 0.727384\nI1207 08:13:30.311753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1047 > 2) by scale factor 0.644185\nI1207 08:13:31.255131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93495 > 2) by scale factor 0.681442\nI1207 08:13:32.198148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07384 > 2) by scale factor 0.964393\nI1207 08:13:34.081513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54051 > 2) by scale factor 0.787242\nI1207 08:13:35.024336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3768 > 2) by scale factor 0.592277\nI1207 08:13:35.967289   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0674 > 2) by scale factor 0.967397\nI1207 08:13:36.910290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9798 > 2) by scale factor 0.671186\nI1207 08:13:37.853154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31172 > 2) by scale factor 0.865157\nI1207 08:13:38.795676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18632 > 2) by scale factor 0.914781\nI1207 08:13:39.738554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52301 > 2) by scale factor 0.567696\nI1207 08:13:40.681708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0891 > 2) by scale factor 0.647437\nI1207 08:13:42.565405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33765 > 2) by scale factor 0.599224\nI1207 08:13:43.508349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02287 > 2) by scale factor 0.661623\nI1207 08:13:45.392292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49944 > 2) by scale factor 0.80018\nI1207 08:13:47.277334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75693 > 2) by scale factor 0.725445\nI1207 08:13:48.219951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15475 > 2) by scale factor 0.633965\nI1207 08:13:49.163175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12102 > 2) by scale factor 0.942944\nI1207 08:13:50.106060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31796 > 2) by scale factor 0.862827\nI1207 08:13:51.989290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46101 > 2) by scale factor 0.812675\nI1207 08:13:53.873533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56558 > 2) by scale factor 0.779551\nI1207 08:13:54.816467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17748 > 2) by scale factor 0.918493\nI1207 08:13:55.759372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63838 > 2) by scale factor 0.758041\nI1207 08:13:56.702239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49412 > 2) by scale factor 0.57239\nI1207 08:13:57.645196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5337 > 2) by scale factor 0.789359\nI1207 08:13:59.528380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44345 > 2) by scale factor 0.818516\nI1207 08:14:03.292145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94509 > 2) by scale factor 0.679096\nI1207 08:14:04.234918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33269 > 2) by scale factor 0.857378\nI1207 08:14:04.246912   369 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1207 08:14:57.242900   369 solver.cpp:404]     Test net output #0: accuracy = 0.19955\nI1207 08:14:57.243227   369 solver.cpp:404]     Test net output #1: loss = 4.78439 (* 1 = 4.78439 loss)\nI1207 08:14:58.116607   369 solver.cpp:228] Iteration 6400, loss = 4.61239\nI1207 08:14:58.116658   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 08:14:58.116677   369 solver.cpp:244]     Train net output #1: loss = 4.61239 (* 1 = 4.61239 loss)\nI1207 08:14:58.186815   369 sgd_solver.cpp:166] Iteration 6400, lr = 0.96\nI1207 08:14:58.196933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78066 > 2) by scale factor 0.719253\nI1207 08:14:59.137280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9461 > 2) by scale factor 0.506829\nI1207 08:15:00.077610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05265 > 2) by scale factor 0.974352\nI1207 08:15:01.017262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77595 > 2) by scale factor 0.720473\nI1207 08:15:01.957605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55772 > 2) by scale factor 0.781945\nI1207 08:15:02.897575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11318 > 2) by scale factor 0.946439\nI1207 08:15:03.837754   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30476 > 2) by scale factor 0.605187\nI1207 08:15:04.777865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46106 > 2) by scale factor 0.577858\nI1207 08:15:05.717933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21473 > 2) by scale factor 0.622137\nI1207 08:15:06.658246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98642 > 2) by scale factor 0.501703\nI1207 08:15:07.598577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97797 > 2) by scale factor 0.502769\nI1207 08:15:08.538756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45148 > 2) by scale factor 0.579462\nI1207 08:15:09.478646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13413 > 2) by scale factor 0.638135\nI1207 08:15:11.357074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.005 > 2) by scale factor 0.665558\nI1207 08:15:12.297051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33976 > 2) by scale factor 0.854789\nI1207 08:15:13.237169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88932 > 2) by scale factor 0.514229\nI1207 08:15:14.177424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61197 > 2) by scale factor 0.433655\nI1207 08:15:15.117612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6439 > 2) by scale factor 0.756458\nI1207 08:15:16.057104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53414 > 2) by scale factor 0.565909\nI1207 08:15:17.934756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48011 > 2) by scale factor 0.806415\nI1207 08:15:18.875097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65517 > 2) by scale factor 0.753248\nI1207 08:15:20.752744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55285 > 2) by scale factor 0.783437\nI1207 08:15:21.692769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54985 > 2) by scale factor 0.784359\nI1207 08:15:22.632473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53709 > 2) by scale factor 0.788306\nI1207 08:15:23.573132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14431 > 2) by scale factor 0.63607\nI1207 08:15:25.451478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58859 > 2) by scale factor 0.557322\nI1207 08:15:26.391361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42537 > 2) by scale factor 0.583879\nI1207 08:15:27.331842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1529 > 2) by scale factor 0.928981\nI1207 08:15:28.274317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31764 > 2) by scale factor 0.463216\nI1207 08:15:30.156721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08931 > 2) by scale factor 0.647393\nI1207 08:15:31.099202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2963 > 2) by scale factor 0.870968\nI1207 08:15:32.041771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42842 > 2) by scale factor 0.823581\nI1207 08:15:32.984072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32361 > 2) by scale factor 0.86073\nI1207 08:15:33.926045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63666 > 2) by scale factor 0.758536\nI1207 08:15:34.868844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08173 > 2) by scale factor 0.648987\nI1207 08:15:36.751531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11376 > 2) by scale factor 0.946183\nI1207 08:15:37.694063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68792 > 2) by scale factor 0.74407\nI1207 08:15:38.636684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2091 > 2) by scale factor 0.623228\nI1207 08:15:41.459426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98515 > 2) by scale factor 0.669983\nI1207 08:15:42.401922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39326 > 2) by scale factor 0.835679\nI1207 08:15:45.224969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84984 > 2) by scale factor 0.519502\nI1207 08:15:46.167194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31173 > 2) by scale factor 0.603913\nI1207 08:15:47.110023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2992 > 2) by scale factor 0.869867\nI1207 08:15:48.052647   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41338 > 2) by scale factor 0.828714\nI1207 08:15:49.935148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80824 > 2) by scale factor 0.712189\nI1207 08:15:50.877815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04291 > 2) by scale factor 0.657266\nI1207 08:15:51.820315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32983 > 2) by scale factor 0.600631\nI1207 08:15:52.763079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29358 > 2) by scale factor 0.607242\nI1207 08:15:53.705435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21202 > 2) by scale factor 0.904151\nI1207 08:15:54.648159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13283 > 2) by scale factor 0.937722\nI1207 08:15:56.530544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14507 > 2) by scale factor 0.93237\nI1207 08:15:57.472241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55962 > 2) by scale factor 0.561858\nI1207 08:15:58.414789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69685 > 2) by scale factor 0.541001\nI1207 08:16:00.296946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35436 > 2) by scale factor 0.596239\nI1207 08:16:01.238492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78131 > 2) by scale factor 0.719087\nI1207 08:16:02.180941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82599 > 2) by scale factor 0.707718\nI1207 08:16:03.123143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12845 > 2) by scale factor 0.939651\nI1207 08:16:04.065549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02643 > 2) by scale factor 0.660844\nI1207 08:16:05.007684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77451 > 2) by scale factor 0.720848\nI1207 08:16:05.949048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98652 > 2) by scale factor 0.669677\nI1207 08:16:06.890930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1545 > 2) by scale factor 0.928291\nI1207 08:16:07.833312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56504 > 2) by scale factor 0.561004\nI1207 08:16:08.775701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20915 > 2) by scale factor 0.623218\nI1207 08:16:09.718077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13625 > 2) by scale factor 0.637704\nI1207 08:16:10.660146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75831 > 2) by scale factor 0.725081\nI1207 08:16:11.602468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0373 > 2) by scale factor 0.981691\nI1207 08:16:12.544870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81866 > 2) by scale factor 0.523744\nI1207 08:16:13.487079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3402 > 2) by scale factor 0.854627\nI1207 08:16:14.429559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89701 > 2) by scale factor 0.690367\nI1207 08:16:15.371465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56256 > 2) by scale factor 0.561394\nI1207 08:16:16.313819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12654 > 2) by scale factor 0.639685\nI1207 08:16:17.255903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22042 > 2) by scale factor 0.90073\nI1207 08:16:20.077493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16833 > 2) by scale factor 0.922371\nI1207 08:16:21.018817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14194 > 2) by scale factor 0.933735\nI1207 08:16:21.960893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57214 > 2) by scale factor 0.559889\nI1207 08:16:22.902465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04512 > 2) by scale factor 0.656788\nI1207 08:16:23.844671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08766 > 2) by scale factor 0.64774\nI1207 08:16:24.786990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19579 > 2) by scale factor 0.910833\nI1207 08:16:25.729091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6883 > 2) by scale factor 0.743965\nI1207 08:16:26.671473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15247 > 2) by scale factor 0.634423\nI1207 08:16:27.613042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4466 > 2) by scale factor 0.580283\nI1207 08:16:28.555932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60504 > 2) by scale factor 0.434307\nI1207 08:16:29.498003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11734 > 2) by scale factor 0.94458\nI1207 08:16:30.440680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9515 > 2) by scale factor 0.677622\nI1207 08:16:31.382773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20699 > 2) by scale factor 0.623638\nI1207 08:16:31.394757   369 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1207 08:17:24.381043   369 solver.cpp:404]     Test net output #0: accuracy = 0.2603\nI1207 08:17:24.381371   369 solver.cpp:404]     Test net output #1: loss = 5.75149 (* 1 = 5.75149 loss)\nI1207 08:17:25.254484   369 solver.cpp:228] Iteration 6500, loss = 4.82565\nI1207 08:17:25.254539   369 solver.cpp:244]     Train net output #0: accuracy = 0.31\nI1207 08:17:25.254559   369 solver.cpp:244]     Train net output #1: loss = 4.82565 (* 1 = 4.82565 loss)\nI1207 08:17:25.330628   369 sgd_solver.cpp:166] Iteration 6500, lr = 0.975\nI1207 08:17:25.340749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08566 > 2) by scale factor 0.95893\nI1207 08:17:27.218009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9144 > 2) by scale factor 0.686247\nI1207 08:17:28.157972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51774 > 2) by scale factor 0.794364\nI1207 08:17:29.097995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70814 > 2) by scale factor 0.539354\nI1207 08:17:30.038326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05908 > 2) by scale factor 0.971308\nI1207 08:17:33.794795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42699 > 2) by scale factor 0.824068\nI1207 08:17:35.674422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90949 > 2) by scale factor 0.511576\nI1207 08:17:37.554757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25228 > 2) by scale factor 0.887989\nI1207 08:17:39.434183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5043 > 2) by scale factor 0.798627\nI1207 08:17:40.375694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17716 > 2) by scale factor 0.629494\nI1207 08:17:41.316790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93752 > 2) by scale factor 0.680848\nI1207 08:17:42.257565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01612 > 2) by scale factor 0.992002\nI1207 08:17:43.198397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07428 > 2) by scale factor 0.650559\nI1207 08:17:44.139437   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57236 > 2) by scale factor 0.777497\nI1207 08:17:45.080376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64093 > 2) by scale factor 0.757309\nI1207 08:17:46.021277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13304 > 2) by scale factor 0.937627\nI1207 08:17:46.961978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65168 > 2) by scale factor 0.75424\nI1207 08:17:47.903350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41893 > 2) by scale factor 0.826812\nI1207 08:17:49.783141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65699 > 2) by scale factor 0.752731\nI1207 08:17:50.724377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09311 > 2) by scale factor 0.955516\nI1207 08:17:51.665434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28576 > 2) by scale factor 0.874981\nI1207 08:17:53.545358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32732 > 2) by scale factor 0.601085\nI1207 08:17:54.486464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03382 > 2) by scale factor 0.659235\nI1207 08:17:55.427834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29382 > 2) by scale factor 0.871909\nI1207 08:17:56.368909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25489 > 2) by scale factor 0.886959\nI1207 08:17:57.309808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27963 > 2) by scale factor 0.877335\nI1207 08:17:59.191695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56626 > 2) by scale factor 0.779345\nI1207 08:18:00.134829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31983 > 2) by scale factor 0.862132\nI1207 08:18:01.077631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94702 > 2) by scale factor 0.678651\nI1207 08:18:02.021015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47651 > 2) by scale factor 0.575289\nI1207 08:18:03.905419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76311 > 2) by scale factor 0.723822\nI1207 08:18:04.848071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10878 > 2) by scale factor 0.643339\nI1207 08:18:05.790983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35584 > 2) by scale factor 0.848955\nI1207 08:18:06.733803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88945 > 2) by scale factor 0.692173\nI1207 08:18:07.677269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81705 > 2) by scale factor 0.709962\nI1207 08:18:08.620811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37119 > 2) by scale factor 0.593262\nI1207 08:18:09.564074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32037 > 2) by scale factor 0.861932\nI1207 08:18:11.447950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26273 > 2) by scale factor 0.883889\nI1207 08:18:12.391932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42691 > 2) by scale factor 0.824093\nI1207 08:18:13.335002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51138 > 2) by scale factor 0.796374\nI1207 08:18:14.278126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34389 > 2) by scale factor 0.598107\nI1207 08:18:15.221715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65424 > 2) by scale factor 0.753511\nI1207 08:18:16.165562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39782 > 2) by scale factor 0.834092\nI1207 08:18:17.108577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40266 > 2) by scale factor 0.83241\nI1207 08:18:18.052037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67307 > 2) by scale factor 0.748204\nI1207 08:18:18.994410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36274 > 2) by scale factor 0.594754\nI1207 08:18:19.937957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36057 > 2) by scale factor 0.847254\nI1207 08:18:20.882252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64929 > 2) by scale factor 0.754919\nI1207 08:18:21.826417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72089 > 2) by scale factor 0.735054\nI1207 08:18:22.769577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43754 > 2) by scale factor 0.820499\nI1207 08:18:24.653867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96815 > 2) by scale factor 0.673819\nI1207 08:18:25.598340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18168 > 2) by scale factor 0.628598\nI1207 08:18:26.542284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28243 > 2) by scale factor 0.87626\nI1207 08:18:27.486378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4616 > 2) by scale factor 0.577768\nI1207 08:18:28.430691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38905 > 2) by scale factor 0.837152\nI1207 08:18:30.316953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11187 > 2) by scale factor 0.947026\nI1207 08:18:31.261328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01846 > 2) by scale factor 0.662589\nI1207 08:18:32.205844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61121 > 2) by scale factor 0.765929\nI1207 08:18:33.150068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67218 > 2) by scale factor 0.748452\nI1207 08:18:37.860846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52648 > 2) by scale factor 0.791615\nI1207 08:18:38.805049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7342 > 2) by scale factor 0.731475\nI1207 08:18:39.749238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96218 > 2) by scale factor 0.504773\nI1207 08:18:40.692957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31038 > 2) by scale factor 0.865658\nI1207 08:18:41.637316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20644 > 2) by scale factor 0.906438\nI1207 08:18:42.581164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12396 > 2) by scale factor 0.640213\nI1207 08:18:43.524931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71274 > 2) by scale factor 0.737262\nI1207 08:18:44.469558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44873 > 2) by scale factor 0.81675\nI1207 08:18:46.355468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24731 > 2) by scale factor 0.615895\nI1207 08:18:47.299396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51679 > 2) by scale factor 0.794663\nI1207 08:18:48.243150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0555 > 2) by scale factor 0.654558\nI1207 08:18:49.187150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19254 > 2) by scale factor 0.62646\nI1207 08:18:50.131453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20421 > 2) by scale factor 0.624179\nI1207 08:18:52.018033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24339 > 2) by scale factor 0.616639\nI1207 08:18:52.962170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1658 > 2) by scale factor 0.923445\nI1207 08:18:54.848361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50246 > 2) by scale factor 0.571027\nI1207 08:18:55.793174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87635 > 2) by scale factor 0.695327\nI1207 08:18:56.736873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1126 > 2) by scale factor 0.946701\nI1207 08:18:57.680964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42292 > 2) by scale factor 0.82545\nI1207 08:18:58.624789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57854 > 2) by scale factor 0.558887\nI1207 08:18:58.636791   369 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1207 08:19:51.628051   369 solver.cpp:404]     Test net output #0: accuracy = 0.15655\nI1207 08:19:51.628391   369 solver.cpp:404]     Test net output #1: loss = 6.42374 (* 1 = 6.42374 loss)\nI1207 08:19:52.502197   369 solver.cpp:228] Iteration 6600, loss = 5.97236\nI1207 08:19:52.502251   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 08:19:52.502271   369 solver.cpp:244]     Train net output #1: loss = 5.97236 (* 1 = 5.97236 loss)\nI1207 08:19:52.580318   369 sgd_solver.cpp:166] Iteration 6600, lr = 0.99\nI1207 08:19:52.590502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90167 > 2) by scale factor 0.689258\nI1207 08:19:53.531428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3709 > 2) by scale factor 0.593314\nI1207 08:19:54.472218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2183 > 2) by scale factor 0.901592\nI1207 08:19:55.412971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72388 > 2) by scale factor 0.734245\nI1207 08:19:56.354379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00734 > 2) by scale factor 0.996344\nI1207 08:19:57.295485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84108 > 2) by scale factor 0.703957\nI1207 08:19:58.236812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85927 > 2) by scale factor 0.518233\nI1207 08:19:59.177979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08876 > 2) by scale factor 0.647509\nI1207 08:20:00.119345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87547 > 2) by scale factor 0.516066\nI1207 08:20:01.060338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3658 > 2) by scale factor 0.458106\nI1207 08:20:02.940785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64217 > 2) by scale factor 0.549123\nI1207 08:20:03.881788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34343 > 2) by scale factor 0.853448\nI1207 08:20:05.761698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11968 > 2) by scale factor 0.641092\nI1207 08:20:06.702764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1478 > 2) by scale factor 0.635364\nI1207 08:20:07.643841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26917 > 2) by scale factor 0.881381\nI1207 08:20:08.585201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46617 > 2) by scale factor 0.810973\nI1207 08:20:09.526244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61072 > 2) by scale factor 0.766073\nI1207 08:20:10.467097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00196 > 2) by scale factor 0.999023\nI1207 08:20:11.408656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11052 > 2) by scale factor 0.947636\nI1207 08:20:12.349265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10377 > 2) by scale factor 0.950675\nI1207 08:20:13.289518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5629 > 2) by scale factor 0.780366\nI1207 08:20:14.230378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15059 > 2) by scale factor 0.481859\nI1207 08:20:15.170720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58797 > 2) by scale factor 0.557419\nI1207 08:20:16.112382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11866 > 2) by scale factor 0.943995\nI1207 08:20:17.053336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22776 > 2) by scale factor 0.897764\nI1207 08:20:17.994107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34928 > 2) by scale factor 0.851325\nI1207 08:20:18.934813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67917 > 2) by scale factor 0.5436\nI1207 08:20:19.876108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14798 > 2) by scale factor 0.931107\nI1207 08:20:20.818625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14245 > 2) by scale factor 0.933509\nI1207 08:20:23.641779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03356 > 2) by scale factor 0.659292\nI1207 08:20:24.584270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49323 > 2) by scale factor 0.572536\nI1207 08:20:25.526700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96696 > 2) by scale factor 0.504165\nI1207 08:20:29.291044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90034 > 2) by scale factor 0.689574\nI1207 08:20:30.233978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59239 > 2) by scale factor 0.77149\nI1207 08:20:31.176805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32532 > 2) by scale factor 0.601445\nI1207 08:20:33.060119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30877 > 2) by scale factor 0.866263\nI1207 08:20:34.002640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57685 > 2) by scale factor 0.77614\nI1207 08:20:34.945410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31986 > 2) by scale factor 0.602434\nI1207 08:20:37.768952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9235 > 2) by scale factor 0.684112\nI1207 08:20:38.711612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50515 > 2) by scale factor 0.798354\nI1207 08:20:39.654292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17015 > 2) by scale factor 0.479599\nI1207 08:20:40.597054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01075 > 2) by scale factor 0.664285\nI1207 08:20:42.479188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39031 > 2) by scale factor 0.836712\nI1207 08:20:43.422134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56889 > 2) by scale factor 0.778545\nI1207 08:20:44.364989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07734 > 2) by scale factor 0.962771\nI1207 08:20:45.307633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67223 > 2) by scale factor 0.748438\nI1207 08:20:46.250213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05684 > 2) by scale factor 0.972364\nI1207 08:20:47.192775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38129 > 2) by scale factor 0.839882\nI1207 08:20:49.075958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0343 > 2) by scale factor 0.659131\nI1207 08:20:50.018553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27579 > 2) by scale factor 0.610539\nI1207 08:20:50.961004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0234 > 2) by scale factor 0.988437\nI1207 08:20:51.903825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05214 > 2) by scale factor 0.655278\nI1207 08:20:52.846572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69005 > 2) by scale factor 0.74348\nI1207 08:20:54.730291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22433 > 2) by scale factor 0.620283\nI1207 08:20:55.673000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80782 > 2) by scale factor 0.712295\nI1207 08:20:56.615134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80416 > 2) by scale factor 0.713225\nI1207 08:20:57.557885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05168 > 2) by scale factor 0.655377\nI1207 08:20:58.500578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96124 > 2) by scale factor 0.675393\nI1207 08:20:59.443420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36707 > 2) by scale factor 0.844927\nI1207 08:21:00.385943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27906 > 2) by scale factor 0.609931\nI1207 08:21:01.328254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09116 > 2) by scale factor 0.956405\nI1207 08:21:03.211143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25005 > 2) by scale factor 0.888868\nI1207 08:21:06.034457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40116 > 2) by scale factor 0.588035\nI1207 08:21:06.977201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89371 > 2) by scale factor 0.691154\nI1207 08:21:07.919703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49629 > 2) by scale factor 0.801188\nI1207 08:21:08.862427   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21923 > 2) by scale factor 0.901213\nI1207 08:21:09.805186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10639 > 2) by scale factor 0.949493\nI1207 08:21:10.747810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09933 > 2) by scale factor 0.952686\nI1207 08:21:11.690832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9762 > 2) by scale factor 0.671998\nI1207 08:21:12.633380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68187 > 2) by scale factor 0.745749\nI1207 08:21:13.575850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20416 > 2) by scale factor 0.907373\nI1207 08:21:14.518432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61406 > 2) by scale factor 0.765093\nI1207 08:21:15.460949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48181 > 2) by scale factor 0.805865\nI1207 08:21:16.403422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74722 > 2) by scale factor 0.728008\nI1207 08:21:18.285993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44561 > 2) by scale factor 0.817793\nI1207 08:21:19.228981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02616 > 2) by scale factor 0.987087\nI1207 08:21:20.171288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83011 > 2) by scale factor 0.522179\nI1207 08:21:21.113711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8263 > 2) by scale factor 0.70764\nI1207 08:21:22.055799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61289 > 2) by scale factor 0.765436\nI1207 08:21:22.997756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38905 > 2) by scale factor 0.837153\nI1207 08:21:23.940470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83069 > 2) by scale factor 0.522099\nI1207 08:21:24.882786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50983 > 2) by scale factor 0.796866\nI1207 08:21:25.825459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28665 > 2) by scale factor 0.608523\nI1207 08:21:25.837426   369 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1207 08:22:18.814837   369 solver.cpp:404]     Test net output #0: accuracy = 0.17945\nI1207 08:22:18.815177   369 solver.cpp:404]     Test net output #1: loss = 6.4515 (* 1 = 6.4515 loss)\nI1207 08:22:19.688935   369 solver.cpp:228] Iteration 6700, loss = 5.97655\nI1207 08:22:19.688987   369 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1207 08:22:19.689007   369 solver.cpp:244]     Train net output #1: loss = 5.97655 (* 1 = 5.97655 loss)\nI1207 08:22:19.768682   369 sgd_solver.cpp:166] Iteration 6700, lr = 1.005\nI1207 08:22:19.778503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64807 > 2) by scale factor 0.755267\nI1207 08:22:20.718888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78134 > 2) by scale factor 0.528913\nI1207 08:22:21.659770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63684 > 2) by scale factor 0.549929\nI1207 08:22:22.600286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98414 > 2) by scale factor 0.670209\nI1207 08:22:23.541357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35478 > 2) by scale factor 0.596165\nI1207 08:22:24.481308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48309 > 2) by scale factor 0.574203\nI1207 08:22:25.422266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03977 > 2) by scale factor 0.657945\nI1207 08:22:26.363034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47089 > 2) by scale factor 0.57622\nI1207 08:22:27.303638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0802 > 2) by scale factor 0.961446\nI1207 08:22:28.244621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71663 > 2) by scale factor 0.736206\nI1207 08:22:30.124680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4858 > 2) by scale factor 0.573756\nI1207 08:22:31.065515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1008 > 2) by scale factor 0.48771\nI1207 08:22:32.006613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72046 > 2) by scale factor 0.73517\nI1207 08:22:32.947700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61724 > 2) by scale factor 0.764164\nI1207 08:22:33.888418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83313 > 2) by scale factor 0.705933\nI1207 08:22:35.768246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14096 > 2) by scale factor 0.93416\nI1207 08:22:36.709359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6018 > 2) by scale factor 0.555278\nI1207 08:22:37.650458   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66366 > 2) by scale factor 0.545903\nI1207 08:22:38.591368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22982 > 2) by scale factor 0.896931\nI1207 08:22:39.532483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48166 > 2) by scale factor 0.805911\nI1207 08:22:40.473551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59895 > 2) by scale factor 0.769543\nI1207 08:22:41.414706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16545 > 2) by scale factor 0.631822\nI1207 08:22:42.355594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59915 > 2) by scale factor 0.555687\nI1207 08:22:43.296551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30564 > 2) by scale factor 0.867437\nI1207 08:22:44.237658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12408 > 2) by scale factor 0.941583\nI1207 08:22:45.178644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42012 > 2) by scale factor 0.826406\nI1207 08:22:46.119910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12694 > 2) by scale factor 0.940319\nI1207 08:22:47.060927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59825 > 2) by scale factor 0.76975\nI1207 08:22:48.940769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74855 > 2) by scale factor 0.727656\nI1207 08:22:49.884060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07896 > 2) by scale factor 0.962018\nI1207 08:22:50.827306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50828 > 2) by scale factor 0.797359\nI1207 08:22:51.770174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58142 > 2) by scale factor 0.774767\nI1207 08:22:53.653687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28009 > 2) by scale factor 0.877159\nI1207 08:22:54.596669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23263 > 2) by scale factor 0.618691\nI1207 08:22:55.539397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02629 > 2) by scale factor 0.987025\nI1207 08:22:56.481951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71235 > 2) by scale factor 0.737369\nI1207 08:22:58.365821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65175 > 2) by scale factor 0.754218\nI1207 08:23:00.249228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75876 > 2) by scale factor 0.724962\nI1207 08:23:02.133541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05397 > 2) by scale factor 0.973722\nI1207 08:23:03.077430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11295 > 2) by scale factor 0.946545\nI1207 08:23:04.021498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19646 > 2) by scale factor 0.910556\nI1207 08:23:06.849648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33886 > 2) by scale factor 0.855116\nI1207 08:23:07.793694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67071 > 2) by scale factor 0.544853\nI1207 08:23:08.737143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02616 > 2) by scale factor 0.496751\nI1207 08:23:09.681432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5734 > 2) by scale factor 0.559691\nI1207 08:23:10.625421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23231 > 2) by scale factor 0.618753\nI1207 08:23:11.569326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90936 > 2) by scale factor 0.687437\nI1207 08:23:12.513484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14893 > 2) by scale factor 0.930694\nI1207 08:23:13.457257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89656 > 2) by scale factor 0.690473\nI1207 08:23:14.401278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27847 > 2) by scale factor 0.877783\nI1207 08:23:15.345762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28344 > 2) by scale factor 0.87587\nI1207 08:23:16.289769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04545 > 2) by scale factor 0.656717\nI1207 08:23:17.234318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31448 > 2) by scale factor 0.864125\nI1207 08:23:19.120069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24338 > 2) by scale factor 0.891514\nI1207 08:23:20.064357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10267 > 2) by scale factor 0.487488\nI1207 08:23:21.949072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19588 > 2) by scale factor 0.910797\nI1207 08:23:22.893128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13823 > 2) by scale factor 0.935354\nI1207 08:23:23.837352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71518 > 2) by scale factor 0.736599\nI1207 08:23:24.781574   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67342 > 2) by scale factor 0.748105\nI1207 08:23:25.725872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06743 > 2) by scale factor 0.967383\nI1207 08:23:26.670001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11881 > 2) by scale factor 0.943925\nI1207 08:23:27.613765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8406 > 2) by scale factor 0.520752\nI1207 08:23:28.557649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17969 > 2) by scale factor 0.628991\nI1207 08:23:29.501539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74173 > 2) by scale factor 0.534513\nI1207 08:23:30.445276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87437 > 2) by scale factor 0.516213\nI1207 08:23:31.389001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62508 > 2) by scale factor 0.761881\nI1207 08:23:32.332751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9673 > 2) by scale factor 0.674012\nI1207 08:23:35.160411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46646 > 2) by scale factor 0.810878\nI1207 08:23:37.045671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4422 > 2) by scale factor 0.818935\nI1207 08:23:38.931565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58525 > 2) by scale factor 0.77362\nI1207 08:23:39.875730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03759 > 2) by scale factor 0.981553\nI1207 08:23:41.761557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5797 > 2) by scale factor 0.775282\nI1207 08:23:42.705442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71538 > 2) by scale factor 0.736545\nI1207 08:23:43.649147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.353 > 2) by scale factor 0.59648\nI1207 08:23:44.593181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28755 > 2) by scale factor 0.874296\nI1207 08:23:45.537540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36853 > 2) by scale factor 0.844404\nI1207 08:23:46.481356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48488 > 2) by scale factor 0.804867\nI1207 08:23:47.425451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10842 > 2) by scale factor 0.643413\nI1207 08:23:48.369454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16302 > 2) by scale factor 0.632307\nI1207 08:23:49.313683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21199 > 2) by scale factor 0.904163\nI1207 08:23:50.258653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64592 > 2) by scale factor 0.755882\nI1207 08:23:51.202852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49744 > 2) by scale factor 0.571847\nI1207 08:23:52.146915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16236 > 2) by scale factor 0.924917\nI1207 08:23:53.090498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16595 > 2) by scale factor 0.92338\nI1207 08:23:53.101791   369 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1207 08:24:46.097062   369 solver.cpp:404]     Test net output #0: accuracy = 0.16955\nI1207 08:24:46.097394   369 solver.cpp:404]     Test net output #1: loss = 5.00866 (* 1 = 5.00866 loss)\nI1207 08:24:46.971235   369 solver.cpp:228] Iteration 6800, loss = 5.12636\nI1207 08:24:46.971284   369 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1207 08:24:46.971303   369 solver.cpp:244]     Train net output #1: loss = 5.12636 (* 1 = 5.12636 loss)\nI1207 08:24:47.052702   369 sgd_solver.cpp:166] Iteration 6800, lr = 1.02\nI1207 08:24:47.062791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08586 > 2) by scale factor 0.648117\nI1207 08:24:48.003890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32556 > 2) by scale factor 0.860007\nI1207 08:24:49.883504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54601 > 2) by scale factor 0.785542\nI1207 08:24:50.825006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48967 > 2) by scale factor 0.80332\nI1207 08:24:54.581960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85217 > 2) by scale factor 0.701221\nI1207 08:24:57.400542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83798 > 2) by scale factor 0.704726\nI1207 08:24:58.341565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80284 > 2) by scale factor 0.525923\nI1207 08:24:59.282928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14754 > 2) by scale factor 0.931298\nI1207 08:25:00.223933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25679 > 2) by scale factor 0.886216\nI1207 08:25:01.165251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29486 > 2) by scale factor 0.871512\nI1207 08:25:03.045660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05463 > 2) by scale factor 0.654745\nI1207 08:25:04.924818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24966 > 2) by scale factor 0.889023\nI1207 08:25:05.865638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35841 > 2) by scale factor 0.848029\nI1207 08:25:06.806864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31884 > 2) by scale factor 0.8625\nI1207 08:25:08.687208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73276 > 2) by scale factor 0.535796\nI1207 08:25:10.567134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.226 > 2) by scale factor 0.898471\nI1207 08:25:16.203436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2336 > 2) by scale factor 0.472411\nI1207 08:25:18.087606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4977 > 2) by scale factor 0.800736\nI1207 08:25:19.971875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34219 > 2) by scale factor 0.59841\nI1207 08:25:20.914578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00736 > 2) by scale factor 0.996335\nI1207 08:25:21.857410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43352 > 2) by scale factor 0.821853\nI1207 08:25:22.800609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13518 > 2) by scale factor 0.93669\nI1207 08:25:23.743952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59766 > 2) by scale factor 0.769924\nI1207 08:25:25.628077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41278 > 2) by scale factor 0.586033\nI1207 08:25:26.571408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49908 > 2) by scale factor 0.571578\nI1207 08:25:27.514355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54947 > 2) by scale factor 0.563464\nI1207 08:25:28.457579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73467 > 2) by scale factor 0.731348\nI1207 08:25:29.400766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14081 > 2) by scale factor 0.934228\nI1207 08:25:30.343848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5485 > 2) by scale factor 0.784774\nI1207 08:25:31.287236   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36376 > 2) by scale factor 0.594573\nI1207 08:25:32.229991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50709 > 2) by scale factor 0.797737\nI1207 08:25:34.113991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88398 > 2) by scale factor 0.693486\nI1207 08:25:35.056807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71201 > 2) by scale factor 0.737461\nI1207 08:25:35.999811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64582 > 2) by scale factor 0.755911\nI1207 08:25:37.883469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39011 > 2) by scale factor 0.836783\nI1207 08:25:38.826444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34008 > 2) by scale factor 0.598787\nI1207 08:25:39.769109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40329 > 2) by scale factor 0.832193\nI1207 08:25:41.653337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67089 > 2) by scale factor 0.748814\nI1207 08:25:44.478096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15605 > 2) by scale factor 0.927624\nI1207 08:25:47.302927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89849 > 2) by scale factor 0.690016\nI1207 08:25:48.246249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34394 > 2) by scale factor 0.853264\nI1207 08:25:49.189324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88389 > 2) by scale factor 0.693508\nI1207 08:25:50.132134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88801 > 2) by scale factor 0.692519\nI1207 08:25:52.015480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15199 > 2) by scale factor 0.634519\nI1207 08:25:52.957968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73499 > 2) by scale factor 0.731265\nI1207 08:25:53.901129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3993 > 2) by scale factor 0.833576\nI1207 08:25:55.785142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99359 > 2) by scale factor 0.668094\nI1207 08:25:56.727977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15492 > 2) by scale factor 0.928109\nI1207 08:25:57.671550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0476 > 2) by scale factor 0.49412\nI1207 08:25:58.614281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93084 > 2) by scale factor 0.682398\nI1207 08:26:00.498355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58041 > 2) by scale factor 0.775071\nI1207 08:26:01.441325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34753 > 2) by scale factor 0.85196\nI1207 08:26:02.384158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01631 > 2) by scale factor 0.99191\nI1207 08:26:04.266916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07889 > 2) by scale factor 0.962053\nI1207 08:26:05.209884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20213 > 2) by scale factor 0.908214\nI1207 08:26:06.152730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11794 > 2) by scale factor 0.944315\nI1207 08:26:07.095888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66435 > 2) by scale factor 0.750652\nI1207 08:26:09.919811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07393 > 2) by scale factor 0.650633\nI1207 08:26:10.862915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48852 > 2) by scale factor 0.803689\nI1207 08:26:12.746742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39077 > 2) by scale factor 0.836551\nI1207 08:26:14.630393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01315 > 2) by scale factor 0.663756\nI1207 08:26:15.573299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42635 > 2) by scale factor 0.824282\nI1207 08:26:16.516038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64845 > 2) by scale factor 0.755159\nI1207 08:26:17.459038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46376 > 2) by scale factor 0.811768\nI1207 08:26:18.402420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66967 > 2) by scale factor 0.749157\nI1207 08:26:19.345042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82593 > 2) by scale factor 0.707731\nI1207 08:26:20.287762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23737 > 2) by scale factor 0.893905\nI1207 08:26:20.299724   369 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1207 08:27:13.291699   369 solver.cpp:404]     Test net output #0: accuracy = 0.2099\nI1207 08:27:13.292027   369 solver.cpp:404]     Test net output #1: loss = 5.50342 (* 1 = 5.50342 loss)\nI1207 08:27:14.166218   369 solver.cpp:228] Iteration 6900, loss = 4.84645\nI1207 08:27:14.166265   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 08:27:14.166285   369 solver.cpp:244]     Train net output #1: loss = 4.84645 (* 1 = 4.84645 loss)\nI1207 08:27:14.235632   369 sgd_solver.cpp:166] Iteration 6900, lr = 1.035\nI1207 08:27:15.184239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67666 > 2) by scale factor 0.747199\nI1207 08:27:16.125283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29332 > 2) by scale factor 0.872097\nI1207 08:27:18.004675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09979 > 2) by scale factor 0.952474\nI1207 08:27:18.945598   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58515 > 2) by scale factor 0.773651\nI1207 08:27:20.825055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66058 > 2) by scale factor 0.546361\nI1207 08:27:21.766037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45419 > 2) by scale factor 0.814934\nI1207 08:27:22.706987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13818 > 2) by scale factor 0.637312\nI1207 08:27:23.647541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55405 > 2) by scale factor 0.783069\nI1207 08:27:24.588518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15577 > 2) by scale factor 0.633761\nI1207 08:27:25.529742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8176 > 2) by scale factor 0.709825\nI1207 08:27:26.470379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59889 > 2) by scale factor 0.76956\nI1207 08:27:27.410789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26728 > 2) by scale factor 0.882114\nI1207 08:27:28.351431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12837 > 2) by scale factor 0.939685\nI1207 08:27:29.292023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2371 > 2) by scale factor 0.617837\nI1207 08:27:30.232410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43892 > 2) by scale factor 0.820037\nI1207 08:27:31.173321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50246 > 2) by scale factor 0.799212\nI1207 08:27:32.114354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36222 > 2) by scale factor 0.594845\nI1207 08:27:33.055199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50802 > 2) by scale factor 0.797441\nI1207 08:27:34.933681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53075 > 2) by scale factor 0.790279\nI1207 08:27:35.874646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40841 > 2) by scale factor 0.586784\nI1207 08:27:37.754132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09862 > 2) by scale factor 0.645449\nI1207 08:27:39.633915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62195 > 2) by scale factor 0.762792\nI1207 08:27:40.574496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6285 > 2) by scale factor 0.760889\nI1207 08:27:41.515557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17076 > 2) by scale factor 0.921337\nI1207 08:27:42.456059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22519 > 2) by scale factor 0.898801\nI1207 08:27:43.396802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80535 > 2) by scale factor 0.712923\nI1207 08:27:44.337970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81558 > 2) by scale factor 0.710334\nI1207 08:27:45.277686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54985 > 2) by scale factor 0.784359\nI1207 08:27:46.217922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09686 > 2) by scale factor 0.645816\nI1207 08:27:47.160485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04046 > 2) by scale factor 0.657796\nI1207 08:27:49.042454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53655 > 2) by scale factor 0.788471\nI1207 08:27:49.984313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40433 > 2) by scale factor 0.831832\nI1207 08:27:50.926108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38735 > 2) by scale factor 0.837748\nI1207 08:27:51.869072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1425 > 2) by scale factor 0.636436\nI1207 08:27:52.811638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8294 > 2) by scale factor 0.522274\nI1207 08:27:53.754314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9139 > 2) by scale factor 0.686365\nI1207 08:27:54.696821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0147 > 2) by scale factor 0.992702\nI1207 08:27:55.639634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26175 > 2) by scale factor 0.884272\nI1207 08:27:56.583166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69411 > 2) by scale factor 0.742361\nI1207 08:27:57.525468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2764 > 2) by scale factor 0.878582\nI1207 08:27:58.468492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24085 > 2) by scale factor 0.617122\nI1207 08:27:59.411712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49285 > 2) by scale factor 0.802294\nI1207 08:28:00.355312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46376 > 2) by scale factor 0.811767\nI1207 08:28:01.298696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25104 > 2) by scale factor 0.888479\nI1207 08:28:02.241183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1748 > 2) by scale factor 0.919623\nI1207 08:28:03.184562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93746 > 2) by scale factor 0.507942\nI1207 08:28:05.068586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7474 > 2) by scale factor 0.421283\nI1207 08:28:06.011456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22363 > 2) by scale factor 0.89943\nI1207 08:28:06.953995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67114 > 2) by scale factor 0.748743\nI1207 08:28:07.898167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9528 > 2) by scale factor 0.505971\nI1207 08:28:08.840873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26349 > 2) by scale factor 0.88359\nI1207 08:28:09.784749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16441 > 2) by scale factor 0.632029\nI1207 08:28:11.667557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38483 > 2) by scale factor 0.838635\nI1207 08:28:12.611784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30746 > 2) by scale factor 0.604693\nI1207 08:28:15.436081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48629 > 2) by scale factor 0.573676\nI1207 08:28:16.379536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90828 > 2) by scale factor 0.687691\nI1207 08:28:17.322773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04986 > 2) by scale factor 0.975675\nI1207 08:28:18.266930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31882 > 2) by scale factor 0.862508\nI1207 08:28:19.209745   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84975 > 2) by scale factor 0.701815\nI1207 08:28:20.153255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04751 > 2) by scale factor 0.656273\nI1207 08:28:21.095695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30095 > 2) by scale factor 0.465014\nI1207 08:28:22.038496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26602 > 2) by scale factor 0.882605\nI1207 08:28:22.982681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58872 > 2) by scale factor 0.772584\nI1207 08:28:23.925892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93591 > 2) by scale factor 0.681221\nI1207 08:28:24.870101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11292 > 2) by scale factor 0.946556\nI1207 08:28:26.753917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06145 > 2) by scale factor 0.970192\nI1207 08:28:28.638229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09741 > 2) by scale factor 0.953558\nI1207 08:28:29.581637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18275 > 2) by scale factor 0.916274\nI1207 08:28:30.524667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10488 > 2) by scale factor 0.644147\nI1207 08:28:31.468833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07332 > 2) by scale factor 0.964638\nI1207 08:28:32.412801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47097 > 2) by scale factor 0.809399\nI1207 08:28:33.355921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62917 > 2) by scale factor 0.760695\nI1207 08:28:34.299057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22735 > 2) by scale factor 0.897928\nI1207 08:28:35.242846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33483 > 2) by scale factor 0.599732\nI1207 08:28:36.186628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55616 > 2) by scale factor 0.562404\nI1207 08:28:37.130508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81842 > 2) by scale factor 0.709617\nI1207 08:28:38.073158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70702 > 2) by scale factor 0.738819\nI1207 08:28:39.957356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30898 > 2) by scale factor 0.866183\nI1207 08:28:40.901213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23018 > 2) by scale factor 0.896788\nI1207 08:28:41.844588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84797 > 2) by scale factor 0.702255\nI1207 08:28:42.787174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27656 > 2) by scale factor 0.878518\nI1207 08:28:43.730370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52742 > 2) by scale factor 0.79132\nI1207 08:28:44.673889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47543 > 2) by scale factor 0.80794\nI1207 08:28:45.617995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59016 > 2) by scale factor 0.772154\nI1207 08:28:46.561746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1356 > 2) by scale factor 0.637836\nI1207 08:28:47.504108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24911 > 2) by scale factor 0.889239\nI1207 08:28:47.516086   369 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1207 08:29:40.500090   369 solver.cpp:404]     Test net output #0: accuracy = 0.18605\nI1207 08:29:40.500434   369 solver.cpp:404]     Test net output #1: loss = 5.78435 (* 1 = 5.78435 loss)\nI1207 08:29:41.374004   369 solver.cpp:228] Iteration 7000, loss = 5.37155\nI1207 08:29:41.374054   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 08:29:41.374073   369 solver.cpp:244]     Train net output #1: loss = 5.37155 (* 1 = 5.37155 loss)\nI1207 08:29:41.446301   369 sgd_solver.cpp:166] Iteration 7000, lr = 1.05\nI1207 08:29:41.456431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03745 > 2) by scale factor 0.981618\nI1207 08:29:42.396769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14401 > 2) by scale factor 0.482624\nI1207 08:29:43.337411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45511 > 2) by scale factor 0.814628\nI1207 08:29:44.278508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46802 > 2) by scale factor 0.447625\nI1207 08:29:45.219503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20169 > 2) by scale factor 0.475998\nI1207 08:29:46.160425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45864 > 2) by scale factor 0.813459\nI1207 08:29:48.039845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17407 > 2) by scale factor 0.919932\nI1207 08:29:48.980728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2559 > 2) by scale factor 0.886565\nI1207 08:29:50.860146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69933 > 2) by scale factor 0.540639\nI1207 08:29:51.801148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23799 > 2) by scale factor 0.893659\nI1207 08:29:52.742027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81756 > 2) by scale factor 0.523895\nI1207 08:29:53.682921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0007 > 2) by scale factor 0.666512\nI1207 08:29:54.623451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32669 > 2) by scale factor 0.859592\nI1207 08:29:55.564695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69284 > 2) by scale factor 0.74271\nI1207 08:29:56.505918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15755 > 2) by scale factor 0.926977\nI1207 08:29:57.446892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78733 > 2) by scale factor 0.717531\nI1207 08:29:58.387747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11033 > 2) by scale factor 0.643019\nI1207 08:30:01.205080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60078 > 2) by scale factor 0.768999\nI1207 08:30:03.084000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61609 > 2) by scale factor 0.764501\nI1207 08:30:04.025234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72998 > 2) by scale factor 0.732605\nI1207 08:30:04.965921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27638 > 2) by scale factor 0.610429\nI1207 08:30:05.906702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86928 > 2) by scale factor 0.697039\nI1207 08:30:06.848201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69507 > 2) by scale factor 0.742097\nI1207 08:30:07.789202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5887 > 2) by scale factor 0.772588\nI1207 08:30:08.730249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47908 > 2) by scale factor 0.80675\nI1207 08:30:09.671205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62906 > 2) by scale factor 0.760728\nI1207 08:30:10.613750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7164 > 2) by scale factor 0.538155\nI1207 08:30:11.557234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63791 > 2) by scale factor 0.758175\nI1207 08:30:12.499855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20167 > 2) by scale factor 0.9084\nI1207 08:30:13.442664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32115 > 2) by scale factor 0.861641\nI1207 08:30:14.385843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29385 > 2) by scale factor 0.871898\nI1207 08:30:16.269057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79805 > 2) by scale factor 0.526586\nI1207 08:30:17.212036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13893 > 2) by scale factor 0.935047\nI1207 08:30:20.036111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49924 > 2) by scale factor 0.571553\nI1207 08:30:20.978986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82295 > 2) by scale factor 0.708478\nI1207 08:30:21.921705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29679 > 2) by scale factor 0.870782\nI1207 08:30:22.864466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51161 > 2) by scale factor 0.796303\nI1207 08:30:23.806741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39336 > 2) by scale factor 0.835645\nI1207 08:30:24.749682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41466 > 2) by scale factor 0.828274\nI1207 08:30:26.634687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86871 > 2) by scale factor 0.516968\nI1207 08:30:28.520090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2581 > 2) by scale factor 0.885701\nI1207 08:30:29.463811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44943 > 2) by scale factor 0.579805\nI1207 08:30:30.406380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51158 > 2) by scale factor 0.796311\nI1207 08:30:32.292002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4072 > 2) by scale factor 0.830841\nI1207 08:30:34.178139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81526 > 2) by scale factor 0.710415\nI1207 08:30:35.121878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57484 > 2) by scale factor 0.776747\nI1207 08:30:36.065095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15126 > 2) by scale factor 0.929686\nI1207 08:30:37.009248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49128 > 2) by scale factor 0.802801\nI1207 08:30:37.953135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1714 > 2) by scale factor 0.921065\nI1207 08:30:38.896728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53858 > 2) by scale factor 0.787841\nI1207 08:30:39.840327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21172 > 2) by scale factor 0.904274\nI1207 08:30:40.783360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70029 > 2) by scale factor 0.540498\nI1207 08:30:41.727648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77554 > 2) by scale factor 0.72058\nI1207 08:30:42.672041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24481 > 2) by scale factor 0.890946\nI1207 08:30:43.615756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09413 > 2) by scale factor 0.955051\nI1207 08:30:45.500396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99631 > 2) by scale factor 0.500461\nI1207 08:30:46.444321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11203 > 2) by scale factor 0.486377\nI1207 08:30:47.387724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22728 > 2) by scale factor 0.619716\nI1207 08:30:48.331642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64626 > 2) by scale factor 0.548508\nI1207 08:30:49.275431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11118 > 2) by scale factor 0.642843\nI1207 08:30:50.219300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81104 > 2) by scale factor 0.71148\nI1207 08:30:51.163451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45035 > 2) by scale factor 0.81621\nI1207 08:30:52.107573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15902 > 2) by scale factor 0.926345\nI1207 08:30:53.051614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21642 > 2) by scale factor 0.902355\nI1207 08:30:53.994444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84942 > 2) by scale factor 0.519559\nI1207 08:30:54.937829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70817 > 2) by scale factor 0.539349\nI1207 08:30:55.880535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95147 > 2) by scale factor 0.50614\nI1207 08:30:56.822990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18356 > 2) by scale factor 0.628228\nI1207 08:30:57.766181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7734 > 2) by scale factor 0.530025\nI1207 08:30:59.649544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53397 > 2) by scale factor 0.789277\nI1207 08:31:01.534922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45973 > 2) by scale factor 0.813096\nI1207 08:31:02.478672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23723 > 2) by scale factor 0.893963\nI1207 08:31:03.422116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57217 > 2) by scale factor 0.777555\nI1207 08:31:04.364872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29632 > 2) by scale factor 0.870959\nI1207 08:31:05.308543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92225 > 2) by scale factor 0.684405\nI1207 08:31:06.252308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29039 > 2) by scale factor 0.873215\nI1207 08:31:07.196144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87211 > 2) by scale factor 0.696352\nI1207 08:31:08.139633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76603 > 2) by scale factor 0.723059\nI1207 08:31:09.083183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35337 > 2) by scale factor 0.596415\nI1207 08:31:10.969113   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91427 > 2) by scale factor 0.686278\nI1207 08:31:11.913643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67471 > 2) by scale factor 0.54426\nI1207 08:31:12.857468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36838 > 2) by scale factor 0.593758\nI1207 08:31:13.800956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76786 > 2) by scale factor 0.722579\nI1207 08:31:14.744359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22534 > 2) by scale factor 0.898741\nI1207 08:31:14.756326   369 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1207 08:32:07.742955   369 solver.cpp:404]     Test net output #0: accuracy = 0.15435\nI1207 08:32:07.743268   369 solver.cpp:404]     Test net output #1: loss = 5.76804 (* 1 = 5.76804 loss)\nI1207 08:32:08.617468   369 solver.cpp:228] Iteration 7100, loss = 6.31033\nI1207 08:32:08.617518   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 08:32:08.617537   369 solver.cpp:244]     Train net output #1: loss = 6.31033 (* 1 = 6.31033 loss)\nI1207 08:32:08.690440   369 sgd_solver.cpp:166] Iteration 7100, lr = 1.065\nI1207 08:32:08.700567   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19403 > 2) by scale factor 0.626168\nI1207 08:32:09.641538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05395 > 2) by scale factor 0.493345\nI1207 08:32:10.582540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5946 > 2) by scale factor 0.770832\nI1207 08:32:11.523674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43081 > 2) by scale factor 0.582952\nI1207 08:32:12.464797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16532 > 2) by scale factor 0.923653\nI1207 08:32:13.405680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86182 > 2) by scale factor 0.51789\nI1207 08:32:14.346752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68918 > 2) by scale factor 0.743722\nI1207 08:32:15.287848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97175 > 2) by scale factor 0.673005\nI1207 08:32:16.228874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57939 > 2) by scale factor 0.775378\nI1207 08:32:17.170140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15988 > 2) by scale factor 0.925976\nI1207 08:32:18.110862   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09517 > 2) by scale factor 0.48838\nI1207 08:32:19.051455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87295 > 2) by scale factor 0.516403\nI1207 08:32:19.992234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11701 > 2) by scale factor 0.944728\nI1207 08:32:20.933223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15857 > 2) by scale factor 0.926538\nI1207 08:32:21.874348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74479 > 2) by scale factor 0.728655\nI1207 08:32:22.815752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15611 > 2) by scale factor 0.927598\nI1207 08:32:23.756690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47186 > 2) by scale factor 0.809108\nI1207 08:32:25.636807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64933 > 2) by scale factor 0.754909\nI1207 08:32:27.517001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38256 > 2) by scale factor 0.839435\nI1207 08:32:31.274329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19777 > 2) by scale factor 0.910013\nI1207 08:32:32.215644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52331 > 2) by scale factor 0.792609\nI1207 08:32:35.972635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03297 > 2) by scale factor 0.983783\nI1207 08:32:39.732509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78849 > 2) by scale factor 0.717234\nI1207 08:32:40.675843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29114 > 2) by scale factor 0.872927\nI1207 08:32:41.618373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09 > 2) by scale factor 0.956938\nI1207 08:32:42.561970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12994 > 2) by scale factor 0.938993\nI1207 08:32:43.504843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28921 > 2) by scale factor 0.873665\nI1207 08:32:44.448009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31518 > 2) by scale factor 0.863864\nI1207 08:32:45.391129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67166 > 2) by scale factor 0.748599\nI1207 08:32:48.215066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35412 > 2) by scale factor 0.849574\nI1207 08:32:49.157697   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14962 > 2) by scale factor 0.930397\nI1207 08:32:50.100193   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13335 > 2) by scale factor 0.638294\nI1207 08:32:51.042523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24704 > 2) by scale factor 0.615946\nI1207 08:32:51.985399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96474 > 2) by scale factor 0.674595\nI1207 08:32:52.928228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01948 > 2) by scale factor 0.990356\nI1207 08:32:54.811898   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00863 > 2) by scale factor 0.664754\nI1207 08:32:55.754956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35402 > 2) by scale factor 0.5963\nI1207 08:32:56.697760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19614 > 2) by scale factor 0.625754\nI1207 08:32:57.640429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27238 > 2) by scale factor 0.880134\nI1207 08:32:58.583019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86661 > 2) by scale factor 0.697688\nI1207 08:32:59.525847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14807 > 2) by scale factor 0.931066\nI1207 08:33:00.468858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33946 > 2) by scale factor 0.598899\nI1207 08:33:01.411772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52124 > 2) by scale factor 0.442357\nI1207 08:33:02.354631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01032 > 2) by scale factor 0.664381\nI1207 08:33:03.297183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83612 > 2) by scale factor 0.70519\nI1207 08:33:04.239904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97954 > 2) by scale factor 0.671245\nI1207 08:33:05.182554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38462 > 2) by scale factor 0.590908\nI1207 08:33:06.125005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62567 > 2) by scale factor 0.761709\nI1207 08:33:07.067811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35873 > 2) by scale factor 0.847914\nI1207 08:33:08.010505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00764 > 2) by scale factor 0.664974\nI1207 08:33:08.953301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13605 > 2) by scale factor 0.483553\nI1207 08:33:09.895965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04718 > 2) by scale factor 0.494171\nI1207 08:33:10.839584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96436 > 2) by scale factor 0.674682\nI1207 08:33:11.782168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23296 > 2) by scale factor 0.895673\nI1207 08:33:12.724895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21623 > 2) by scale factor 0.621845\nI1207 08:33:13.667848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77333 > 2) by scale factor 0.721156\nI1207 08:33:14.610853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92257 > 2) by scale factor 0.684329\nI1207 08:33:15.553556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13212 > 2) by scale factor 0.938035\nI1207 08:33:16.496139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16275 > 2) by scale factor 0.924748\nI1207 08:33:17.438894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21808 > 2) by scale factor 0.901682\nI1207 08:33:18.381677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97585 > 2) by scale factor 0.672077\nI1207 08:33:19.324039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7069 > 2) by scale factor 0.738851\nI1207 08:33:20.266042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23409 > 2) by scale factor 0.618412\nI1207 08:33:21.208644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23043 > 2) by scale factor 0.619113\nI1207 08:33:23.091799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85197 > 2) by scale factor 0.70127\nI1207 08:33:24.034368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11518 > 2) by scale factor 0.642018\nI1207 08:33:25.917565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19023 > 2) by scale factor 0.626914\nI1207 08:33:27.800876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4489 > 2) by scale factor 0.816694\nI1207 08:33:28.743454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51309 > 2) by scale factor 0.795833\nI1207 08:33:29.686600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61061 > 2) by scale factor 0.766105\nI1207 08:33:31.569416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64524 > 2) by scale factor 0.54866\nI1207 08:33:32.512046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28781 > 2) by scale factor 0.8742\nI1207 08:33:33.454640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13986 > 2) by scale factor 0.934642\nI1207 08:33:34.397186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23831 > 2) by scale factor 0.893531\nI1207 08:33:35.340353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76381 > 2) by scale factor 0.723639\nI1207 08:33:36.282883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80396 > 2) by scale factor 0.713276\nI1207 08:33:37.225617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09829 > 2) by scale factor 0.645517\nI1207 08:33:38.168231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35338 > 2) by scale factor 0.849841\nI1207 08:33:39.111114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48073 > 2) by scale factor 0.806214\nI1207 08:33:40.054144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62182 > 2) by scale factor 0.76283\nI1207 08:33:40.996858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16822 > 2) by scale factor 0.922414\nI1207 08:33:41.949316   369 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1207 08:34:34.953877   369 solver.cpp:404]     Test net output #0: accuracy = 0.1729\nI1207 08:34:34.954210   369 solver.cpp:404]     Test net output #1: loss = 4.70913 (* 1 = 4.70913 loss)\nI1207 08:34:35.828541   369 solver.cpp:228] Iteration 7200, loss = 4.99132\nI1207 08:34:35.828590   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 08:34:35.828609   369 solver.cpp:244]     Train net output #1: loss = 4.99132 (* 1 = 4.99132 loss)\nI1207 08:34:35.903287   369 sgd_solver.cpp:166] Iteration 7200, lr = 1.08\nI1207 08:34:35.913378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22992 > 2) by scale factor 0.896894\nI1207 08:34:36.854573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73727 > 2) by scale factor 0.730654\nI1207 08:34:37.794929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73508 > 2) by scale factor 0.731241\nI1207 08:34:38.735540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59985 > 2) by scale factor 0.769276\nI1207 08:34:39.676635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22295 > 2) by scale factor 0.899707\nI1207 08:34:40.617095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28275 > 2) by scale factor 0.46699\nI1207 08:34:41.558101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90536 > 2) by scale factor 0.688384\nI1207 08:34:42.498535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02696 > 2) by scale factor 0.496652\nI1207 08:34:43.439255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31251 > 2) by scale factor 0.603772\nI1207 08:34:44.379891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60489 > 2) by scale factor 0.767788\nI1207 08:34:45.319860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72987 > 2) by scale factor 0.536211\nI1207 08:34:47.200139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85533 > 2) by scale factor 0.700444\nI1207 08:34:48.140658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4748 > 2) by scale factor 0.808146\nI1207 08:34:49.081197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08789 > 2) by scale factor 0.647691\nI1207 08:34:50.022195   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41272 > 2) by scale factor 0.586043\nI1207 08:34:50.962852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51302 > 2) by scale factor 0.795857\nI1207 08:34:51.903939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.18521 > 2) by scale factor 0.385713\nI1207 08:34:52.844725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8866 > 2) by scale factor 0.692856\nI1207 08:34:53.785830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58518 > 2) by scale factor 0.773641\nI1207 08:34:54.726150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01079 > 2) by scale factor 0.664278\nI1207 08:34:55.667526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0101 > 2) by scale factor 0.994977\nI1207 08:34:56.608980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89843 > 2) by scale factor 0.690028\nI1207 08:34:57.550082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62478 > 2) by scale factor 0.551757\nI1207 08:34:58.491008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99436 > 2) by scale factor 0.667923\nI1207 08:34:59.431969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86347 > 2) by scale factor 0.698453\nI1207 08:35:00.372485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40114 > 2) by scale factor 0.588038\nI1207 08:35:01.312969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53287 > 2) by scale factor 0.789619\nI1207 08:35:02.254073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22288 > 2) by scale factor 0.620562\nI1207 08:35:03.194820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24446 > 2) by scale factor 0.616435\nI1207 08:35:04.135766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9405 > 2) by scale factor 0.680157\nI1207 08:35:05.078646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89722 > 2) by scale factor 0.690316\nI1207 08:35:06.961849   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38637 > 2) by scale factor 0.838093\nI1207 08:35:07.904412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22441 > 2) by scale factor 0.620269\nI1207 08:35:08.846820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16455 > 2) by scale factor 0.480244\nI1207 08:35:09.789361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26844 > 2) by scale factor 0.881664\nI1207 08:35:10.731847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07601 > 2) by scale factor 0.650192\nI1207 08:35:14.494668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02758 > 2) by scale factor 0.660594\nI1207 08:35:16.377804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46228 > 2) by scale factor 0.812256\nI1207 08:35:17.320842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37976 > 2) by scale factor 0.840419\nI1207 08:35:19.203744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17555 > 2) by scale factor 0.629812\nI1207 08:35:20.146708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52757 > 2) by scale factor 0.791274\nI1207 08:35:21.089056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13731 > 2) by scale factor 0.935757\nI1207 08:35:22.048303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81458 > 2) by scale factor 0.710585\nI1207 08:35:22.990222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98729 > 2) by scale factor 0.669503\nI1207 08:35:23.933176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78956 > 2) by scale factor 0.527765\nI1207 08:35:24.876283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58089 > 2) by scale factor 0.55852\nI1207 08:35:28.641880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56868 > 2) by scale factor 0.778609\nI1207 08:35:29.584722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88519 > 2) by scale factor 0.693195\nI1207 08:35:30.526790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2738 > 2) by scale factor 0.879583\nI1207 08:35:32.410619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01374 > 2) by scale factor 0.993176\nI1207 08:35:34.293169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47561 > 2) by scale factor 0.575439\nI1207 08:35:35.235960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1311 > 2) by scale factor 0.638753\nI1207 08:35:36.178786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34702 > 2) by scale factor 0.597547\nI1207 08:35:37.121063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93473 > 2) by scale factor 0.681493\nI1207 08:35:39.004498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22756 > 2) by scale factor 0.619662\nI1207 08:35:39.946933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26701 > 2) by scale factor 0.61218\nI1207 08:35:40.889727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55388 > 2) by scale factor 0.562765\nI1207 08:35:41.832221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29145 > 2) by scale factor 0.872809\nI1207 08:35:42.774569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07305 > 2) by scale factor 0.964763\nI1207 08:35:43.717237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68712 > 2) by scale factor 0.744291\nI1207 08:35:44.659551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43427 > 2) by scale factor 0.582365\nI1207 08:35:45.602361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87806 > 2) by scale factor 0.515722\nI1207 08:35:46.545294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73631 > 2) by scale factor 0.422269\nI1207 08:35:47.487862   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23097 > 2) by scale factor 0.896473\nI1207 08:35:48.430514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16987 > 2) by scale factor 0.479632\nI1207 08:35:49.372596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85664 > 2) by scale factor 0.518586\nI1207 08:35:50.315644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00009 > 2) by scale factor 0.666648\nI1207 08:35:51.257452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86559 > 2) by scale factor 0.517385\nI1207 08:35:52.200253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20249 > 2) by scale factor 0.475908\nI1207 08:35:53.143055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69081 > 2) by scale factor 0.541886\nI1207 08:35:55.025533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6468 > 2) by scale factor 0.75563\nI1207 08:35:55.967882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40724 > 2) by scale factor 0.830826\nI1207 08:35:56.909960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71466 > 2) by scale factor 0.538407\nI1207 08:35:57.852423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54213 > 2) by scale factor 0.564633\nI1207 08:35:59.735244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24351 > 2) by scale factor 0.89146\nI1207 08:36:00.677414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51572 > 2) by scale factor 0.568873\nI1207 08:36:02.560756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5007 > 2) by scale factor 0.571314\nI1207 08:36:03.504142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34645 > 2) by scale factor 0.597648\nI1207 08:36:04.446702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06265 > 2) by scale factor 0.969628\nI1207 08:36:06.330143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95069 > 2) by scale factor 0.677807\nI1207 08:36:07.273715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80001 > 2) by scale factor 0.714284\nI1207 08:36:08.216338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0361 > 2) by scale factor 0.982271\nI1207 08:36:09.168357   369 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1207 08:37:02.164840   369 solver.cpp:404]     Test net output #0: accuracy = 0.16905\nI1207 08:37:02.165172   369 solver.cpp:404]     Test net output #1: loss = 5.9257 (* 1 = 5.9257 loss)\nI1207 08:37:03.039049   369 solver.cpp:228] Iteration 7300, loss = 5.73109\nI1207 08:37:03.039106   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 08:37:03.039125   369 solver.cpp:244]     Train net output #1: loss = 5.73109 (* 1 = 5.73109 loss)\nI1207 08:37:03.116708   369 sgd_solver.cpp:166] Iteration 7300, lr = 1.095\nI1207 08:37:03.126803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35418 > 2) by scale factor 0.596271\nI1207 08:37:04.068058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24961 > 2) by scale factor 0.615459\nI1207 08:37:05.009443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94583 > 2) by scale factor 0.678926\nI1207 08:37:05.950099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09801 > 2) by scale factor 0.953282\nI1207 08:37:06.890287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18042 > 2) by scale factor 0.917253\nI1207 08:37:07.830942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82102 > 2) by scale factor 0.52342\nI1207 08:37:08.771389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50434 > 2) by scale factor 0.798612\nI1207 08:37:09.712151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4365 > 2) by scale factor 0.820848\nI1207 08:37:10.653002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34444 > 2) by scale factor 0.598007\nI1207 08:37:11.594008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83603 > 2) by scale factor 0.705211\nI1207 08:37:13.473845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96641 > 2) by scale factor 0.674215\nI1207 08:37:14.414983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29085 > 2) by scale factor 0.873038\nI1207 08:37:15.355775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68381 > 2) by scale factor 0.745208\nI1207 08:37:16.296731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47878 > 2) by scale factor 0.806849\nI1207 08:37:18.176528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60704 > 2) by scale factor 0.554471\nI1207 08:37:19.117146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14988 > 2) by scale factor 0.634944\nI1207 08:37:20.057925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76114 > 2) by scale factor 0.724339\nI1207 08:37:20.998504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56783 > 2) by scale factor 0.778866\nI1207 08:37:21.939600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73899 > 2) by scale factor 0.730195\nI1207 08:37:23.819308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6968 > 2) by scale factor 0.541008\nI1207 08:37:24.760243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09088 > 2) by scale factor 0.488892\nI1207 08:37:25.700866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78177 > 2) by scale factor 0.718966\nI1207 08:37:26.641892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91201 > 2) by scale factor 0.686811\nI1207 08:37:27.582968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80918 > 2) by scale factor 0.711952\nI1207 08:37:28.523984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22604 > 2) by scale factor 0.898455\nI1207 08:37:29.465206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26964 > 2) by scale factor 0.881197\nI1207 08:37:30.406029   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0509 > 2) by scale factor 0.655544\nI1207 08:37:31.346926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44098 > 2) by scale factor 0.58123\nI1207 08:37:32.288116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39656 > 2) by scale factor 0.588831\nI1207 08:37:33.229846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68139 > 2) by scale factor 0.745883\nI1207 08:37:34.172765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41155 > 2) by scale factor 0.453355\nI1207 08:37:35.115185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41978 > 2) by scale factor 0.584832\nI1207 08:37:36.057199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22801 > 2) by scale factor 0.897663\nI1207 08:37:37.938957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44777 > 2) by scale factor 0.817071\nI1207 08:37:39.821612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16137 > 2) by scale factor 0.480611\nI1207 08:37:40.764217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82628 > 2) by scale factor 0.707644\nI1207 08:37:43.587461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76058 > 2) by scale factor 0.531833\nI1207 08:37:44.530230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25088 > 2) by scale factor 0.615218\nI1207 08:37:45.473290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64242 > 2) by scale factor 0.549086\nI1207 08:37:46.416328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7357 > 2) by scale factor 0.731075\nI1207 08:37:47.359004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30896 > 2) by scale factor 0.866192\nI1207 08:37:48.301239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50983 > 2) by scale factor 0.796866\nI1207 08:37:49.245031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72209 > 2) by scale factor 0.734731\nI1207 08:37:50.187851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59225 > 2) by scale factor 0.771531\nI1207 08:37:51.130695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02716 > 2) by scale factor 0.660686\nI1207 08:37:52.073704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07946 > 2) by scale factor 0.490262\nI1207 08:37:53.016611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02635 > 2) by scale factor 0.660861\nI1207 08:37:53.959719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29065 > 2) by scale factor 0.607782\nI1207 08:37:54.902452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71599 > 2) by scale factor 0.73638\nI1207 08:37:55.845109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05178 > 2) by scale factor 0.974765\nI1207 08:37:56.788136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74434 > 2) by scale factor 0.534139\nI1207 08:37:57.731650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81359 > 2) by scale factor 0.710837\nI1207 08:37:58.675173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13665 > 2) by scale factor 0.936045\nI1207 08:38:00.558926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65096 > 2) by scale factor 0.547801\nI1207 08:38:01.501773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88173 > 2) by scale factor 0.694028\nI1207 08:38:02.444417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61977 > 2) by scale factor 0.552521\nI1207 08:38:03.387192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31245 > 2) by scale factor 0.864882\nI1207 08:38:04.330468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83216 > 2) by scale factor 0.706176\nI1207 08:38:05.273416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14983 > 2) by scale factor 0.930306\nI1207 08:38:06.215849   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32622 > 2) by scale factor 0.859764\nI1207 08:38:08.098213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79319 > 2) by scale factor 0.716028\nI1207 08:38:09.040825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55817 > 2) by scale factor 0.781809\nI1207 08:38:09.983358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93757 > 2) by scale factor 0.680835\nI1207 08:38:10.927670   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33388 > 2) by scale factor 0.856943\nI1207 08:38:11.870357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58992 > 2) by scale factor 0.772224\nI1207 08:38:12.814172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54156 > 2) by scale factor 0.786919\nI1207 08:38:13.757594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58172 > 2) by scale factor 0.55839\nI1207 08:38:14.700701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02315 > 2) by scale factor 0.497123\nI1207 08:38:15.643770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69662 > 2) by scale factor 0.74167\nI1207 08:38:16.586498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09178 > 2) by scale factor 0.956122\nI1207 08:38:17.530069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48509 > 2) by scale factor 0.8048\nI1207 08:38:18.472875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16267 > 2) by scale factor 0.924783\nI1207 08:38:19.415366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80735 > 2) by scale factor 0.712415\nI1207 08:38:20.359387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3591 > 2) by scale factor 0.847782\nI1207 08:38:21.302073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35836 > 2) by scale factor 0.595528\nI1207 08:38:22.245043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46905 > 2) by scale factor 0.576527\nI1207 08:38:23.187891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70376 > 2) by scale factor 0.73971\nI1207 08:38:24.129917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12364 > 2) by scale factor 0.485009\nI1207 08:38:25.073289   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2155 > 2) by scale factor 0.47444\nI1207 08:38:27.897212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21546 > 2) by scale factor 0.902747\nI1207 08:38:28.840397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90671 > 2) by scale factor 0.688062\nI1207 08:38:29.783871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5173 > 2) by scale factor 0.568617\nI1207 08:38:30.727355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65251 > 2) by scale factor 0.547568\nI1207 08:38:31.670430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71952 > 2) by scale factor 0.735423\nI1207 08:38:32.612746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54254 > 2) by scale factor 0.786616\nI1207 08:38:34.495848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56285 > 2) by scale factor 0.780382\nI1207 08:38:35.439000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25929 > 2) by scale factor 0.885234\nI1207 08:38:36.382915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71828 > 2) by scale factor 0.537883\nI1207 08:38:36.394884   369 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1207 08:39:29.388129   369 solver.cpp:404]     Test net output #0: accuracy = 0.13505\nI1207 08:39:29.388484   369 solver.cpp:404]     Test net output #1: loss = 8.6723 (* 1 = 8.6723 loss)\nI1207 08:39:30.262428   369 solver.cpp:228] Iteration 7400, loss = 8.45131\nI1207 08:39:30.262485   369 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1207 08:39:30.262504   369 solver.cpp:244]     Train net output #1: loss = 8.45131 (* 1 = 8.45131 loss)\nI1207 08:39:30.340749   369 sgd_solver.cpp:166] Iteration 7400, lr = 1.11\nI1207 08:39:30.350422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00062 > 2) by scale factor 0.499922\nI1207 08:39:31.290813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39633 > 2) by scale factor 0.588871\nI1207 08:39:32.231137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76312 > 2) by scale factor 0.723818\nI1207 08:39:33.171401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11168 > 2) by scale factor 0.642739\nI1207 08:39:34.112267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44992 > 2) by scale factor 0.579724\nI1207 08:39:35.053258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75497 > 2) by scale factor 0.725962\nI1207 08:39:35.994233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66566 > 2) by scale factor 0.750284\nI1207 08:39:36.934382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21868 > 2) by scale factor 0.621373\nI1207 08:39:37.875023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27849 > 2) by scale factor 0.610037\nI1207 08:39:38.815346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1105 > 2) by scale factor 0.642984\nI1207 08:39:39.756438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94814 > 2) by scale factor 0.678393\nI1207 08:39:40.697314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59881 > 2) by scale factor 0.769584\nI1207 08:39:41.638619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95697 > 2) by scale factor 0.676368\nI1207 08:39:42.579310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50317 > 2) by scale factor 0.798988\nI1207 08:39:43.519970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72869 > 2) by scale factor 0.536381\nI1207 08:39:44.460526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42219 > 2) by scale factor 0.8257\nI1207 08:39:46.340243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32542 > 2) by scale factor 0.86006\nI1207 08:39:47.281370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67609 > 2) by scale factor 0.544057\nI1207 08:39:48.221966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72188 > 2) by scale factor 0.734787\nI1207 08:39:49.163204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59106 > 2) by scale factor 0.771885\nI1207 08:39:50.103950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79697 > 2) by scale factor 0.71506\nI1207 08:39:51.044137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49469 > 2) by scale factor 0.801702\nI1207 08:39:52.923918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05372 > 2) by scale factor 0.973843\nI1207 08:39:54.803583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13197 > 2) by scale factor 0.638576\nI1207 08:39:55.744266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09007 > 2) by scale factor 0.647235\nI1207 08:39:56.685626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31728 > 2) by scale factor 0.86308\nI1207 08:39:59.504371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18181 > 2) by scale factor 0.478262\nI1207 08:40:00.445601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25337 > 2) by scale factor 0.88756\nI1207 08:40:01.388626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92049 > 2) by scale factor 0.684815\nI1207 08:40:02.331646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27147 > 2) by scale factor 0.880488\nI1207 08:40:03.274147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99407 > 2) by scale factor 0.667987\nI1207 08:40:04.216909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40163 > 2) by scale factor 0.832768\nI1207 08:40:05.159693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28409 > 2) by scale factor 0.875621\nI1207 08:40:06.102406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96858 > 2) by scale factor 0.673722\nI1207 08:40:07.044515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78066 > 2) by scale factor 0.529008\nI1207 08:40:08.927893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36558 > 2) by scale factor 0.845458\nI1207 08:40:10.811777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83753 > 2) by scale factor 0.521169\nI1207 08:40:11.754595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13098 > 2) by scale factor 0.638777\nI1207 08:40:12.697440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26419 > 2) by scale factor 0.612709\nI1207 08:40:14.582057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16307 > 2) by scale factor 0.92461\nI1207 08:40:15.525406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53809 > 2) by scale factor 0.565276\nI1207 08:40:17.410871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63507 > 2) by scale factor 0.550196\nI1207 08:40:18.354713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87931 > 2) by scale factor 0.69461\nI1207 08:40:19.298635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34903 > 2) by scale factor 0.851415\nI1207 08:40:20.241925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25784 > 2) by scale factor 0.885803\nI1207 08:40:21.186617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83161 > 2) by scale factor 0.706311\nI1207 08:40:22.130604   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86022 > 2) by scale factor 0.518105\nI1207 08:40:23.074455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08733 > 2) by scale factor 0.95816\nI1207 08:40:24.017699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72555 > 2) by scale factor 0.733797\nI1207 08:40:24.961320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37874 > 2) by scale factor 0.840783\nI1207 08:40:26.846762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6335 > 2) by scale factor 0.550433\nI1207 08:40:27.791071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72796 > 2) by scale factor 0.733148\nI1207 08:40:28.734419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84245 > 2) by scale factor 0.520501\nI1207 08:40:29.677640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28254 > 2) by scale factor 0.609284\nI1207 08:40:30.621312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45598 > 2) by scale factor 0.578706\nI1207 08:40:31.565212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00814 > 2) by scale factor 0.664862\nI1207 08:40:32.508735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47443 > 2) by scale factor 0.575634\nI1207 08:40:33.452814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46023 > 2) by scale factor 0.448407\nI1207 08:40:34.396334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94367 > 2) by scale factor 0.679425\nI1207 08:40:35.339694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48077 > 2) by scale factor 0.574585\nI1207 08:40:36.283123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30618 > 2) by scale factor 0.464449\nI1207 08:40:37.226744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38917 > 2) by scale factor 0.83711\nI1207 08:40:38.170711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07403 > 2) by scale factor 0.964304\nI1207 08:40:39.114595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16987 > 2) by scale factor 0.630941\nI1207 08:40:40.057876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73439 > 2) by scale factor 0.535562\nI1207 08:40:41.001179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54212 > 2) by scale factor 0.786744\nI1207 08:40:41.945227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04994 > 2) by scale factor 0.975639\nI1207 08:40:42.888546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43068 > 2) by scale factor 0.822816\nI1207 08:40:43.831640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66918 > 2) by scale factor 0.749293\nI1207 08:40:44.775179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6323 > 2) by scale factor 0.550616\nI1207 08:40:45.718847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.268 > 2) by scale factor 0.881834\nI1207 08:40:46.662556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33274 > 2) by scale factor 0.857362\nI1207 08:40:47.606451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72118 > 2) by scale factor 0.734976\nI1207 08:40:48.549446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45444 > 2) by scale factor 0.81485\nI1207 08:40:49.492725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84577 > 2) by scale factor 0.702797\nI1207 08:40:50.435690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21841 > 2) by scale factor 0.621424\nI1207 08:40:51.379693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62121 > 2) by scale factor 0.763005\nI1207 08:40:52.323293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29221 > 2) by scale factor 0.607495\nI1207 08:40:53.266459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4267 > 2) by scale factor 0.824163\nI1207 08:40:54.209789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03113 > 2) by scale factor 0.984671\nI1207 08:40:55.153462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56006 > 2) by scale factor 0.561788\nI1207 08:40:56.097784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12453 > 2) by scale factor 0.640096\nI1207 08:40:57.040938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5941 > 2) by scale factor 0.770982\nI1207 08:40:57.984292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25349 > 2) by scale factor 0.887511\nI1207 08:40:58.928076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89432 > 2) by scale factor 0.691008\nI1207 08:40:59.871826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88221 > 2) by scale factor 0.693913\nI1207 08:41:00.816383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90531 > 2) by scale factor 0.688395\nI1207 08:41:01.760248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08229 > 2) by scale factor 0.648868\nI1207 08:41:02.704135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22215 > 2) by scale factor 0.900029\nI1207 08:41:03.647547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12941 > 2) by scale factor 0.639098\nI1207 08:41:03.659550   369 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1207 08:41:56.656054   369 solver.cpp:404]     Test net output #0: accuracy = 0.17275\nI1207 08:41:56.656420   369 solver.cpp:404]     Test net output #1: loss = 7.52307 (* 1 = 7.52307 loss)\nI1207 08:41:57.530660   369 solver.cpp:228] Iteration 7500, loss = 7.26347\nI1207 08:41:57.530712   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 08:41:57.530730   369 solver.cpp:244]     Train net output #1: loss = 7.26347 (* 1 = 7.26347 loss)\nI1207 08:41:57.609922   369 sgd_solver.cpp:166] Iteration 7500, lr = 1.125\nI1207 08:41:57.619783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24035 > 2) by scale factor 0.617218\nI1207 08:41:58.560214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58105 > 2) by scale factor 0.774878\nI1207 08:41:59.501200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31114 > 2) by scale factor 0.865373\nI1207 08:42:00.442103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17167 > 2) by scale factor 0.630583\nI1207 08:42:02.321522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60963 > 2) by scale factor 0.766393\nI1207 08:42:03.261517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46318 > 2) by scale factor 0.81196\nI1207 08:42:04.201692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67228 > 2) by scale factor 0.748426\nI1207 08:42:05.142781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16377 > 2) by scale factor 0.924314\nI1207 08:42:06.083261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6505 > 2) by scale factor 0.754575\nI1207 08:42:07.023174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87476 > 2) by scale factor 0.69571\nI1207 08:42:09.840584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03005 > 2) by scale factor 0.985196\nI1207 08:42:10.781530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34425 > 2) by scale factor 0.85315\nI1207 08:42:11.721892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33939 > 2) by scale factor 0.854926\nI1207 08:42:12.662822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30534 > 2) by scale factor 0.605082\nI1207 08:42:13.603624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43671 > 2) by scale factor 0.450785\nI1207 08:42:14.544356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87942 > 2) by scale factor 0.694585\nI1207 08:42:15.484964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77017 > 2) by scale factor 0.721977\nI1207 08:42:16.425823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58401 > 2) by scale factor 0.77399\nI1207 08:42:17.366871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55662 > 2) by scale factor 0.782283\nI1207 08:42:18.307544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46775 > 2) by scale factor 0.810456\nI1207 08:42:19.248020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38764 > 2) by scale factor 0.837646\nI1207 08:42:20.188685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66133 > 2) by scale factor 0.546249\nI1207 08:42:22.067680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52688 > 2) by scale factor 0.79149\nI1207 08:42:23.946476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95471 > 2) by scale factor 0.676885\nI1207 08:42:24.887290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29636 > 2) by scale factor 0.60673\nI1207 08:42:25.827940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51128 > 2) by scale factor 0.796405\nI1207 08:42:26.768853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15748 > 2) by scale factor 0.927006\nI1207 08:42:28.652443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58586 > 2) by scale factor 0.773438\nI1207 08:42:29.595594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80622 > 2) by scale factor 0.712701\nI1207 08:42:30.538102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52385 > 2) by scale factor 0.792439\nI1207 08:42:31.481250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41896 > 2) by scale factor 0.584974\nI1207 08:42:32.424371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8511 > 2) by scale factor 0.701483\nI1207 08:42:33.367269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25188 > 2) by scale factor 0.615028\nI1207 08:42:34.310027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80243 > 2) by scale factor 0.713666\nI1207 08:42:35.252606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24317 > 2) by scale factor 0.61668\nI1207 08:42:36.195741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45582 > 2) by scale factor 0.81439\nI1207 08:42:37.138664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84889 > 2) by scale factor 0.702028\nI1207 08:42:38.081727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47086 > 2) by scale factor 0.576226\nI1207 08:42:39.024772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08383 > 2) by scale factor 0.95977\nI1207 08:42:39.967381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07376 > 2) by scale factor 0.964434\nI1207 08:42:40.910303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9751 > 2) by scale factor 0.672246\nI1207 08:42:41.854477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08045 > 2) by scale factor 0.490142\nI1207 08:42:42.797756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18588 > 2) by scale factor 0.627771\nI1207 08:42:43.741426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72727 > 2) by scale factor 0.733333\nI1207 08:42:44.685472   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11332 > 2) by scale factor 0.642401\nI1207 08:42:45.629418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05517 > 2) by scale factor 0.654628\nI1207 08:42:46.573062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63843 > 2) by scale factor 0.758026\nI1207 08:42:47.516108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21372 > 2) by scale factor 0.622331\nI1207 08:42:48.459491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99408 > 2) by scale factor 0.500741\nI1207 08:42:49.403645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5052 > 2) by scale factor 0.798339\nI1207 08:42:50.347518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24628 > 2) by scale factor 0.471001\nI1207 08:42:51.291456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17081 > 2) by scale factor 0.630753\nI1207 08:42:52.234392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89893 > 2) by scale factor 0.68991\nI1207 08:42:53.177707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15032 > 2) by scale factor 0.634855\nI1207 08:42:54.121644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78113 > 2) by scale factor 0.719133\nI1207 08:42:55.065575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51715 > 2) by scale factor 0.568642\nI1207 08:42:56.009512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4039 > 2) by scale factor 0.831982\nI1207 08:42:57.894399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46009 > 2) by scale factor 0.812977\nI1207 08:42:58.838819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27249 > 2) by scale factor 0.880092\nI1207 08:42:59.783001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49092 > 2) by scale factor 0.802915\nI1207 08:43:00.727200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60827 > 2) by scale factor 0.766791\nI1207 08:43:01.670269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03376 > 2) by scale factor 0.659248\nI1207 08:43:02.614090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65286 > 2) by scale factor 0.753902\nI1207 08:43:03.557922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33379 > 2) by scale factor 0.856974\nI1207 08:43:04.501700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68678 > 2) by scale factor 0.744385\nI1207 08:43:05.445636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.187 > 2) by scale factor 0.914496\nI1207 08:43:09.212970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29374 > 2) by scale factor 0.87194\nI1207 08:43:10.156913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55563 > 2) by scale factor 0.562489\nI1207 08:43:11.100142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04485 > 2) by scale factor 0.656848\nI1207 08:43:12.043126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02903 > 2) by scale factor 0.985693\nI1207 08:43:12.987107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31428 > 2) by scale factor 0.8642\nI1207 08:43:13.931177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71385 > 2) by scale factor 0.736961\nI1207 08:43:14.874996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37039 > 2) by scale factor 0.593403\nI1207 08:43:15.818177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98208 > 2) by scale factor 0.670674\nI1207 08:43:16.760773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30256 > 2) by scale factor 0.868599\nI1207 08:43:17.703889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48274 > 2) by scale factor 0.80556\nI1207 08:43:18.648133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54763 > 2) by scale factor 0.563757\nI1207 08:43:19.591987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39179 > 2) by scale factor 0.58966\nI1207 08:43:20.535554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64148 > 2) by scale factor 0.549227\nI1207 08:43:21.477746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47204 > 2) by scale factor 0.80905\nI1207 08:43:22.420891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35335 > 2) by scale factor 0.596418\nI1207 08:43:23.364487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55339 > 2) by scale factor 0.783274\nI1207 08:43:24.307823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63396 > 2) by scale factor 0.550364\nI1207 08:43:25.251535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6959 > 2) by scale factor 0.741869\nI1207 08:43:26.195072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13602 > 2) by scale factor 0.936323\nI1207 08:43:27.138547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64135 > 2) by scale factor 0.75719\nI1207 08:43:28.081765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69114 > 2) by scale factor 0.74318\nI1207 08:43:29.025717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26798 > 2) by scale factor 0.468606\nI1207 08:43:29.969099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31366 > 2) by scale factor 0.603562\nI1207 08:43:30.911870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5954 > 2) by scale factor 0.770595\nI1207 08:43:30.923197   369 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1207 08:44:23.900080   369 solver.cpp:404]     Test net output #0: accuracy = 0.15805\nI1207 08:44:23.900418   369 solver.cpp:404]     Test net output #1: loss = 8.64987 (* 1 = 8.64987 loss)\nI1207 08:44:24.774778   369 solver.cpp:228] Iteration 7600, loss = 8.64371\nI1207 08:44:24.774832   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 08:44:24.774852   369 solver.cpp:244]     Train net output #1: loss = 8.64371 (* 1 = 8.64371 loss)\nI1207 08:44:24.852003   369 sgd_solver.cpp:166] Iteration 7600, lr = 1.14\nI1207 08:44:24.861811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54892 > 2) by scale factor 0.563552\nI1207 08:44:25.801888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47079 > 2) by scale factor 0.809456\nI1207 08:44:26.742200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43964 > 2) by scale factor 0.819794\nI1207 08:44:27.682303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35763 > 2) by scale factor 0.848309\nI1207 08:44:28.623011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71988 > 2) by scale factor 0.735327\nI1207 08:44:29.563248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51104 > 2) by scale factor 0.796484\nI1207 08:44:31.442072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03009 > 2) by scale factor 0.660047\nI1207 08:44:32.382108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34302 > 2) by scale factor 0.460509\nI1207 08:44:33.322283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81229 > 2) by scale factor 0.711164\nI1207 08:44:34.262590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24028 > 2) by scale factor 0.61723\nI1207 08:44:35.203492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37092 > 2) by scale factor 0.593309\nI1207 08:44:36.144153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20934 > 2) by scale factor 0.623181\nI1207 08:44:37.084522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43855 > 2) by scale factor 0.820158\nI1207 08:44:38.024705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33642 > 2) by scale factor 0.599446\nI1207 08:44:38.965149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0358 > 2) by scale factor 0.982417\nI1207 08:44:39.905431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76117 > 2) by scale factor 0.531749\nI1207 08:44:40.845403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53251 > 2) by scale factor 0.441256\nI1207 08:44:41.786073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43757 > 2) by scale factor 0.82049\nI1207 08:44:42.726578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85719 > 2) by scale factor 0.518512\nI1207 08:44:43.666965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72058 > 2) by scale factor 0.53755\nI1207 08:44:44.607296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09491 > 2) by scale factor 0.646223\nI1207 08:44:45.547811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40772 > 2) by scale factor 0.830662\nI1207 08:44:46.488667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27117 > 2) by scale factor 0.611402\nI1207 08:44:47.429071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19989 > 2) by scale factor 0.625022\nI1207 08:44:48.369307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90594 > 2) by scale factor 0.688245\nI1207 08:44:49.309826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90487 > 2) by scale factor 0.512181\nI1207 08:44:50.249924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72626 > 2) by scale factor 0.536732\nI1207 08:44:51.190578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50412 > 2) by scale factor 0.570756\nI1207 08:44:52.130923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34357 > 2) by scale factor 0.598162\nI1207 08:44:53.071570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46807 > 2) by scale factor 0.810349\nI1207 08:44:54.012131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10496 > 2) by scale factor 0.64413\nI1207 08:44:54.954977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63829 > 2) by scale factor 0.758067\nI1207 08:44:56.837381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51786 > 2) by scale factor 0.794325\nI1207 08:44:58.720319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52087 > 2) by scale factor 0.793378\nI1207 08:44:59.663107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03178 > 2) by scale factor 0.984357\nI1207 08:45:00.605438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8019 > 2) by scale factor 0.7138\nI1207 08:45:01.547880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.419 > 2) by scale factor 0.826787\nI1207 08:45:02.490129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65174 > 2) by scale factor 0.547685\nI1207 08:45:03.432389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34654 > 2) by scale factor 0.852319\nI1207 08:45:04.374646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83475 > 2) by scale factor 0.521547\nI1207 08:45:05.317265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06523 > 2) by scale factor 0.65248\nI1207 08:45:06.259714   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21155 > 2) by scale factor 0.904343\nI1207 08:45:07.202492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28089 > 2) by scale factor 0.876852\nI1207 08:45:08.145251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43708 > 2) by scale factor 0.58189\nI1207 08:45:09.088196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35965 > 2) by scale factor 0.5953\nI1207 08:45:10.030297   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79757 > 2) by scale factor 0.526652\nI1207 08:45:10.973469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51447 > 2) by scale factor 0.795396\nI1207 08:45:11.916527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39124 > 2) by scale factor 0.836387\nI1207 08:45:13.798631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34373 > 2) by scale factor 0.598134\nI1207 08:45:15.681082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37092 > 2) by scale factor 0.843554\nI1207 08:45:16.623733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03672 > 2) by scale factor 0.495452\nI1207 08:45:17.566426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72993 > 2) by scale factor 0.73262\nI1207 08:45:18.508992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44621 > 2) by scale factor 0.817591\nI1207 08:45:19.450212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53711 > 2) by scale factor 0.565434\nI1207 08:45:20.392237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34545 > 2) by scale factor 0.597826\nI1207 08:45:21.334393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82292 > 2) by scale factor 0.523161\nI1207 08:45:22.275766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15744 > 2) by scale factor 0.927023\nI1207 08:45:24.157605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51494 > 2) by scale factor 0.795249\nI1207 08:45:25.100541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3405 > 2) by scale factor 0.598714\nI1207 08:45:26.043283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29141 > 2) by scale factor 0.466048\nI1207 08:45:26.985380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6152 > 2) by scale factor 0.764759\nI1207 08:45:27.927337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74618 > 2) by scale factor 0.728284\nI1207 08:45:28.869613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2269 > 2) by scale factor 0.61979\nI1207 08:45:29.811226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37837 > 2) by scale factor 0.592002\nI1207 08:45:30.753758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35986 > 2) by scale factor 0.595263\nI1207 08:45:31.695605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84157 > 2) by scale factor 0.703835\nI1207 08:45:32.638067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3505 > 2) by scale factor 0.596926\nI1207 08:45:33.579882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85658 > 2) by scale factor 0.700138\nI1207 08:45:34.522296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75111 > 2) by scale factor 0.726979\nI1207 08:45:35.464953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46077 > 2) by scale factor 0.577906\nI1207 08:45:36.407510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30606 > 2) by scale factor 0.604951\nI1207 08:45:37.350189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41267 > 2) by scale factor 0.586052\nI1207 08:45:39.232190   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87675 > 2) by scale factor 0.515896\nI1207 08:45:40.174391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83871 > 2) by scale factor 0.704546\nI1207 08:45:41.116271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05688 > 2) by scale factor 0.654261\nI1207 08:45:42.058451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41944 > 2) by scale factor 0.826637\nI1207 08:45:43.001030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60713 > 2) by scale factor 0.554458\nI1207 08:45:43.942719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68327 > 2) by scale factor 0.74536\nI1207 08:45:44.885151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36562 > 2) by scale factor 0.845445\nI1207 08:45:45.827021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53291 > 2) by scale factor 0.789605\nI1207 08:45:46.769330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77674 > 2) by scale factor 0.720269\nI1207 08:45:47.712076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49279 > 2) by scale factor 0.802314\nI1207 08:45:48.653995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56593 > 2) by scale factor 0.779445\nI1207 08:45:49.596086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03548 > 2) by scale factor 0.658875\nI1207 08:45:50.538558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40732 > 2) by scale factor 0.453791\nI1207 08:45:51.479754   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56967 > 2) by scale factor 0.560276\nI1207 08:45:52.421699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02767 > 2) by scale factor 0.986355\nI1207 08:45:53.364364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27939 > 2) by scale factor 0.877429\nI1207 08:45:56.186770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21754 > 2) by scale factor 0.621594\nI1207 08:45:57.129442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55912 > 2) by scale factor 0.78152\nI1207 08:45:58.071411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1249 > 2) by scale factor 0.941219\nI1207 08:45:58.083374   369 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1207 08:46:51.061844   369 solver.cpp:404]     Test net output #0: accuracy = 0.1979\nI1207 08:46:51.062212   369 solver.cpp:404]     Test net output #1: loss = 4.5205 (* 1 = 4.5205 loss)\nI1207 08:46:51.936537   369 solver.cpp:228] Iteration 7700, loss = 5.28285\nI1207 08:46:51.936583   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 08:46:51.936600   369 solver.cpp:244]     Train net output #1: loss = 5.28285 (* 1 = 5.28285 loss)\nI1207 08:46:52.007078   369 sgd_solver.cpp:166] Iteration 7700, lr = 1.155\nI1207 08:46:52.017215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50217 > 2) by scale factor 0.799307\nI1207 08:46:52.957963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75118 > 2) by scale factor 0.726961\nI1207 08:46:53.898447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34799 > 2) by scale factor 0.597374\nI1207 08:46:54.839166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13353 > 2) by scale factor 0.638257\nI1207 08:46:55.779320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05884 > 2) by scale factor 0.971421\nI1207 08:46:56.719487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09641 > 2) by scale factor 0.64591\nI1207 08:46:57.660465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73386 > 2) by scale factor 0.731566\nI1207 08:46:58.601238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40929 > 2) by scale factor 0.586632\nI1207 08:46:59.541642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40902 > 2) by scale factor 0.830213\nI1207 08:47:00.481611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40169 > 2) by scale factor 0.587943\nI1207 08:47:01.422068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80412 > 2) by scale factor 0.713236\nI1207 08:47:02.362500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23715 > 2) by scale factor 0.617828\nI1207 08:47:03.302784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20778 > 2) by scale factor 0.905888\nI1207 08:47:04.243023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17894 > 2) by scale factor 0.62914\nI1207 08:47:06.122238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2789 > 2) by scale factor 0.877616\nI1207 08:47:07.062405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23049 > 2) by scale factor 0.619101\nI1207 08:47:08.002769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67991 > 2) by scale factor 0.746293\nI1207 08:47:08.943415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76725 > 2) by scale factor 0.530891\nI1207 08:47:09.883733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22328 > 2) by scale factor 0.620486\nI1207 08:47:10.824266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37185 > 2) by scale factor 0.593145\nI1207 08:47:11.764922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96311 > 2) by scale factor 0.674966\nI1207 08:47:12.705145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07703 > 2) by scale factor 0.962915\nI1207 08:47:13.645550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74675 > 2) by scale factor 0.533796\nI1207 08:47:14.585733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1522 > 2) by scale factor 0.929283\nI1207 08:47:17.401482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78613 > 2) by scale factor 0.717843\nI1207 08:47:18.341648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06356 > 2) by scale factor 0.49218\nI1207 08:47:19.282294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69339 > 2) by scale factor 0.742559\nI1207 08:47:20.222137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22822 > 2) by scale factor 0.897576\nI1207 08:47:21.162797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88932 > 2) by scale factor 0.514229\nI1207 08:47:22.103996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51615 > 2) by scale factor 0.442855\nI1207 08:47:23.044816   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57145 > 2) by scale factor 0.777772\nI1207 08:47:23.986232   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54429 > 2) by scale factor 0.786074\nI1207 08:47:24.928755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47617 > 2) by scale factor 0.8077\nI1207 08:47:25.870841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36063 > 2) by scale factor 0.847231\nI1207 08:47:27.753360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03315 > 2) by scale factor 0.65938\nI1207 08:47:28.696398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27171 > 2) by scale factor 0.880396\nI1207 08:47:29.638690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35035 > 2) by scale factor 0.850937\nI1207 08:47:30.581338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12854 > 2) by scale factor 0.939612\nI1207 08:47:31.523430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24242 > 2) by scale factor 0.891892\nI1207 08:47:32.465603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77515 > 2) by scale factor 0.720682\nI1207 08:47:34.348748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09287 > 2) by scale factor 0.955626\nI1207 08:47:35.291693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73519 > 2) by scale factor 0.73121\nI1207 08:47:37.174907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93357 > 2) by scale factor 0.681764\nI1207 08:47:38.117535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97019 > 2) by scale factor 0.673358\nI1207 08:47:39.060353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00719 > 2) by scale factor 0.665072\nI1207 08:47:40.003689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74716 > 2) by scale factor 0.728025\nI1207 08:47:40.946599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13496 > 2) by scale factor 0.936787\nI1207 08:47:41.889483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19832 > 2) by scale factor 0.625328\nI1207 08:47:42.832320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86604 > 2) by scale factor 0.697826\nI1207 08:47:43.774729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03048 > 2) by scale factor 0.984986\nI1207 08:47:44.717257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40894 > 2) by scale factor 0.830242\nI1207 08:47:45.659759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52339 > 2) by scale factor 0.792583\nI1207 08:47:46.602943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42779 > 2) by scale factor 0.583466\nI1207 08:47:47.545735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87 > 2) by scale factor 0.516796\nI1207 08:47:48.487664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92943 > 2) by scale factor 0.50898\nI1207 08:47:49.429311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18106 > 2) by scale factor 0.628722\nI1207 08:47:50.371948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12204 > 2) by scale factor 0.640608\nI1207 08:47:51.314817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67816 > 2) by scale factor 0.746782\nI1207 08:47:52.258265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56741 > 2) by scale factor 0.560631\nI1207 08:47:53.201107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18592 > 2) by scale factor 0.477792\nI1207 08:47:54.143805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45282 > 2) by scale factor 0.579237\nI1207 08:47:56.026374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54966 > 2) by scale factor 0.784418\nI1207 08:47:56.968540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30134 > 2) by scale factor 0.869057\nI1207 08:47:57.910760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09742 > 2) by scale factor 0.6457\nI1207 08:47:58.853904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36433 > 2) by scale factor 0.594471\nI1207 08:47:59.797104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04038 > 2) by scale factor 0.657813\nI1207 08:48:00.739907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6919 > 2) by scale factor 0.742968\nI1207 08:48:01.682888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4753 > 2) by scale factor 0.807984\nI1207 08:48:02.625751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22225 > 2) by scale factor 0.620684\nI1207 08:48:03.569017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91445 > 2) by scale factor 0.686237\nI1207 08:48:04.511865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88345 > 2) by scale factor 0.693613\nI1207 08:48:05.454589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46586 > 2) by scale factor 0.811077\nI1207 08:48:07.339097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10956 > 2) by scale factor 0.948065\nI1207 08:48:09.223526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53946 > 2) by scale factor 0.565058\nI1207 08:48:11.106812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71755 > 2) by scale factor 0.735956\nI1207 08:48:12.049835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07187 > 2) by scale factor 0.965311\nI1207 08:48:12.993208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39415 > 2) by scale factor 0.835369\nI1207 08:48:13.936276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91886 > 2) by scale factor 0.685198\nI1207 08:48:14.879487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36789 > 2) by scale factor 0.844634\nI1207 08:48:15.822564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3634 > 2) by scale factor 0.594637\nI1207 08:48:17.706359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20959 > 2) by scale factor 0.905147\nI1207 08:48:18.649174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25165 > 2) by scale factor 0.888238\nI1207 08:48:19.592028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1113 > 2) by scale factor 0.642817\nI1207 08:48:20.534853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25587 > 2) by scale factor 0.886577\nI1207 08:48:21.478001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58273 > 2) by scale factor 0.558234\nI1207 08:48:22.421133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02073 > 2) by scale factor 0.662091\nI1207 08:48:23.363543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2965 > 2) by scale factor 0.606704\nI1207 08:48:24.305455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37281 > 2) by scale factor 0.842883\nI1207 08:48:25.248510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07009 > 2) by scale factor 0.651447\nI1207 08:48:25.260462   369 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1207 08:49:18.244906   369 solver.cpp:404]     Test net output #0: accuracy = 0.2108\nI1207 08:49:18.245252   369 solver.cpp:404]     Test net output #1: loss = 5.98884 (* 1 = 5.98884 loss)\nI1207 08:49:19.119042   369 solver.cpp:228] Iteration 7800, loss = 4.92496\nI1207 08:49:19.119091   369 solver.cpp:244]     Train net output #0: accuracy = 0.29\nI1207 08:49:19.119108   369 solver.cpp:244]     Train net output #1: loss = 4.92496 (* 1 = 4.92496 loss)\nI1207 08:49:19.196877   369 sgd_solver.cpp:166] Iteration 7800, lr = 1.17\nI1207 08:49:19.206921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58649 > 2) by scale factor 0.77325\nI1207 08:49:20.147826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63785 > 2) by scale factor 0.549775\nI1207 08:49:21.089076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28221 > 2) by scale factor 0.609346\nI1207 08:49:22.029705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32655 > 2) by scale factor 0.601224\nI1207 08:49:22.970392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40056 > 2) by scale factor 0.83314\nI1207 08:49:23.911324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19177 > 2) by scale factor 0.626612\nI1207 08:49:24.852180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15014 > 2) by scale factor 0.930174\nI1207 08:49:25.792876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84294 > 2) by scale factor 0.703497\nI1207 08:49:26.733813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16927 > 2) by scale factor 0.631061\nI1207 08:49:27.674736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95698 > 2) by scale factor 0.676366\nI1207 08:49:28.615667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51639 > 2) by scale factor 0.568766\nI1207 08:49:29.556540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66107 > 2) by scale factor 0.751579\nI1207 08:49:30.497499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78064 > 2) by scale factor 0.71926\nI1207 08:49:31.438452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8226 > 2) by scale factor 0.708566\nI1207 08:49:33.317734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54328 > 2) by scale factor 0.786385\nI1207 08:49:34.258157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26083 > 2) by scale factor 0.884632\nI1207 08:49:35.199321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05229 > 2) by scale factor 0.655246\nI1207 08:49:36.140136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09662 > 2) by scale factor 0.953919\nI1207 08:49:37.080229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17596 > 2) by scale factor 0.919135\nI1207 08:49:38.020745   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31856 > 2) by scale factor 0.862604\nI1207 08:49:38.961599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84194 > 2) by scale factor 0.703745\nI1207 08:49:39.902357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62293 > 2) by scale factor 0.762506\nI1207 08:49:42.720379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67477 > 2) by scale factor 0.747727\nI1207 08:49:43.661207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35981 > 2) by scale factor 0.595272\nI1207 08:49:44.601882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11049 > 2) by scale factor 0.642985\nI1207 08:49:45.542734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57748 > 2) by scale factor 0.775952\nI1207 08:49:46.483554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12621 > 2) by scale factor 0.940643\nI1207 08:49:47.424412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06872 > 2) by scale factor 0.651738\nI1207 08:49:48.365165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76453 > 2) by scale factor 0.72345\nI1207 08:49:49.308109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74032 > 2) by scale factor 0.534713\nI1207 08:49:50.251085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07308 > 2) by scale factor 0.96475\nI1207 08:49:51.194061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93023 > 2) by scale factor 0.682539\nI1207 08:49:54.959188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54786 > 2) by scale factor 0.784973\nI1207 08:49:55.902385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52625 > 2) by scale factor 0.791686\nI1207 08:49:57.786092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51559 > 2) by scale factor 0.795042\nI1207 08:49:58.729550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68502 > 2) by scale factor 0.744873\nI1207 08:49:59.672732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87105 > 2) by scale factor 0.516656\nI1207 08:50:00.615674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1229 > 2) by scale factor 0.942106\nI1207 08:50:01.558606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89317 > 2) by scale factor 0.691284\nI1207 08:50:02.501457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79382 > 2) by scale factor 0.715865\nI1207 08:50:03.444201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20076 > 2) by scale factor 0.624852\nI1207 08:50:05.327664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86914 > 2) by scale factor 0.697074\nI1207 08:50:06.270692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25637 > 2) by scale factor 0.886381\nI1207 08:50:07.214494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05593 > 2) by scale factor 0.972793\nI1207 08:50:08.158134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17082 > 2) by scale factor 0.630752\nI1207 08:50:09.101352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47385 > 2) by scale factor 0.575729\nI1207 08:50:10.985718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73328 > 2) by scale factor 0.535721\nI1207 08:50:11.930080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14468 > 2) by scale factor 0.635996\nI1207 08:50:12.874110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2802 > 2) by scale factor 0.877118\nI1207 08:50:13.818414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04183 > 2) by scale factor 0.979512\nI1207 08:50:14.761651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1078 > 2) by scale factor 0.948856\nI1207 08:50:15.705401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16425 > 2) by scale factor 0.924108\nI1207 08:50:16.649145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13038 > 2) by scale factor 0.484217\nI1207 08:50:17.593075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37482 > 2) by scale factor 0.457162\nI1207 08:50:18.536870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60568 > 2) by scale factor 0.434246\nI1207 08:50:19.481737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19478 > 2) by scale factor 0.626022\nI1207 08:50:20.424937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01781 > 2) by scale factor 0.991173\nI1207 08:50:21.369385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6849 > 2) by scale factor 0.542755\nI1207 08:50:22.313990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11199 > 2) by scale factor 0.642675\nI1207 08:50:23.257396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17313 > 2) by scale factor 0.630292\nI1207 08:50:24.201462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07121 > 2) by scale factor 0.651209\nI1207 08:50:25.146008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18453 > 2) by scale factor 0.628036\nI1207 08:50:26.089421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32105 > 2) by scale factor 0.86168\nI1207 08:50:27.033460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48637 > 2) by scale factor 0.804384\nI1207 08:50:27.977789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76578 > 2) by scale factor 0.723124\nI1207 08:50:28.921646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35419 > 2) by scale factor 0.459328\nI1207 08:50:29.880641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37735 > 2) by scale factor 0.59218\nI1207 08:50:30.840059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36936 > 2) by scale factor 0.593584\nI1207 08:50:31.783042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40799 > 2) by scale factor 0.586856\nI1207 08:50:32.725705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49868 > 2) by scale factor 0.800424\nI1207 08:50:33.668653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95501 > 2) by scale factor 0.676817\nI1207 08:50:34.611595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80375 > 2) by scale factor 0.713332\nI1207 08:50:35.554633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38581 > 2) by scale factor 0.5907\nI1207 08:50:36.497450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61564 > 2) by scale factor 0.553152\nI1207 08:50:37.440280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75314 > 2) by scale factor 0.420774\nI1207 08:50:38.383033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58869 > 2) by scale factor 0.772592\nI1207 08:50:39.325855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50667 > 2) by scale factor 0.79787\nI1207 08:50:40.267865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69822 > 2) by scale factor 0.741229\nI1207 08:50:41.210202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35505 > 2) by scale factor 0.849239\nI1207 08:50:42.152956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92733 > 2) by scale factor 0.509252\nI1207 08:50:43.095691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21457 > 2) by scale factor 0.622168\nI1207 08:50:44.038223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75734 > 2) by scale factor 0.725338\nI1207 08:50:44.980944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78039 > 2) by scale factor 0.719324\nI1207 08:50:45.923501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35036 > 2) by scale factor 0.850933\nI1207 08:50:46.866103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5958 > 2) by scale factor 0.43518\nI1207 08:50:47.809032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08534 > 2) by scale factor 0.648227\nI1207 08:50:48.752064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93461 > 2) by scale factor 0.681521\nI1207 08:50:49.695127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.074 > 2) by scale factor 0.650618\nI1207 08:50:50.637513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00225 > 2) by scale factor 0.666168\nI1207 08:50:51.580044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48746 > 2) by scale factor 0.804033\nI1207 08:50:52.523120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16124 > 2) by scale factor 0.480626\nI1207 08:50:52.535097   369 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1207 08:51:45.513065   369 solver.cpp:404]     Test net output #0: accuracy = 0.15705\nI1207 08:51:45.513409   369 solver.cpp:404]     Test net output #1: loss = 7.69842 (* 1 = 7.69842 loss)\nI1207 08:51:46.386318   369 solver.cpp:228] Iteration 7900, loss = 8.60089\nI1207 08:51:46.386363   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 08:51:46.386380   369 solver.cpp:244]     Train net output #1: loss = 8.60089 (* 1 = 8.60089 loss)\nI1207 08:51:46.461475   369 sgd_solver.cpp:166] Iteration 7900, lr = 1.185\nI1207 08:51:46.471611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84704 > 2) by scale factor 0.702483\nI1207 08:51:47.411550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51989 > 2) by scale factor 0.793685\nI1207 08:51:48.351550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81381 > 2) by scale factor 0.71078\nI1207 08:51:49.291631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02236 > 2) by scale factor 0.497221\nI1207 08:51:50.231608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13901 > 2) by scale factor 0.935013\nI1207 08:51:51.171491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61318 > 2) by scale factor 0.55353\nI1207 08:51:52.111152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76611 > 2) by scale factor 0.723038\nI1207 08:51:53.051312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54879 > 2) by scale factor 0.563572\nI1207 08:51:53.991056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36946 > 2) by scale factor 0.457723\nI1207 08:51:54.931090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0476 > 2) by scale factor 0.656254\nI1207 08:51:55.870904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46127 > 2) by scale factor 0.577822\nI1207 08:51:56.810868   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18317 > 2) by scale factor 0.628304\nI1207 08:51:57.750252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90359 > 2) by scale factor 0.688802\nI1207 08:51:58.690341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49227 > 2) by scale factor 0.445209\nI1207 08:51:59.630565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13075 > 2) by scale factor 0.389806\nI1207 08:52:00.570315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24131 > 2) by scale factor 0.471552\nI1207 08:52:01.510036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65902 > 2) by scale factor 0.429275\nI1207 08:52:02.448866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38886 > 2) by scale factor 0.837221\nI1207 08:52:03.387811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33893 > 2) by scale factor 0.855091\nI1207 08:52:04.327600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6629 > 2) by scale factor 0.75106\nI1207 08:52:05.268013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76424 > 2) by scale factor 0.723527\nI1207 08:52:06.207978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59723 > 2) by scale factor 0.555984\nI1207 08:52:07.147651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78898 > 2) by scale factor 0.527846\nI1207 08:52:09.025141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00556 > 2) by scale factor 0.997229\nI1207 08:52:09.965342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88713 > 2) by scale factor 0.69273\nI1207 08:52:12.780704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08006 > 2) by scale factor 0.961511\nI1207 08:52:13.720237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10585 > 2) by scale factor 0.487109\nI1207 08:52:14.660089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03572 > 2) by scale factor 0.982454\nI1207 08:52:15.600005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24296 > 2) by scale factor 0.891679\nI1207 08:52:16.542939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51097 > 2) by scale factor 0.796504\nI1207 08:52:17.485368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31881 > 2) by scale factor 0.862511\nI1207 08:52:18.427932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.60773 > 2) by scale factor 0.356651\nI1207 08:52:19.370513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34768 > 2) by scale factor 0.597428\nI1207 08:52:20.313673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35194 > 2) by scale factor 0.59667\nI1207 08:52:21.256538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6562 > 2) by scale factor 0.752954\nI1207 08:52:22.198734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65386 > 2) by scale factor 0.753619\nI1207 08:52:23.141409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16548 > 2) by scale factor 0.631815\nI1207 08:52:25.024348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66105 > 2) by scale factor 0.546292\nI1207 08:52:25.967136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03171 > 2) by scale factor 0.496068\nI1207 08:52:27.848783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04376 > 2) by scale factor 0.97859\nI1207 08:52:28.791054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09694 > 2) by scale factor 0.953771\nI1207 08:52:29.733016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57337 > 2) by scale factor 0.777192\nI1207 08:52:30.675936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60564 > 2) by scale factor 0.767566\nI1207 08:52:31.618266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53067 > 2) by scale factor 0.566465\nI1207 08:52:32.560834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35475 > 2) by scale factor 0.59617\nI1207 08:52:33.502843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76198 > 2) by scale factor 0.531635\nI1207 08:52:34.444034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54707 > 2) by scale factor 0.563845\nI1207 08:52:35.386112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71048 > 2) by scale factor 0.539014\nI1207 08:52:36.328100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49123 > 2) by scale factor 0.445313\nI1207 08:52:37.270941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09153 > 2) by scale factor 0.488815\nI1207 08:52:38.212625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36813 > 2) by scale factor 0.84455\nI1207 08:52:39.154826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13423 > 2) by scale factor 0.937107\nI1207 08:52:40.096623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36996 > 2) by scale factor 0.843898\nI1207 08:52:41.039178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.805 > 2) by scale factor 0.713014\nI1207 08:52:41.981652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19239 > 2) by scale factor 0.477054\nI1207 08:52:42.924135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3803 > 2) by scale factor 0.840229\nI1207 08:52:43.866921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45974 > 2) by scale factor 0.448456\nI1207 08:52:44.809238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06165 > 2) by scale factor 0.970099\nI1207 08:52:45.751129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26439 > 2) by scale factor 0.883239\nI1207 08:52:46.693969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45277 > 2) by scale factor 0.579244\nI1207 08:52:47.636507   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51142 > 2) by scale factor 0.56957\nI1207 08:52:48.579288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53194 > 2) by scale factor 0.441312\nI1207 08:52:49.522040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83987 > 2) by scale factor 0.704257\nI1207 08:52:50.464664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56112 > 2) by scale factor 0.561621\nI1207 08:52:51.406541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91536 > 2) by scale factor 0.686022\nI1207 08:52:52.349411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7703 > 2) by scale factor 0.721944\nI1207 08:52:53.291931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80465 > 2) by scale factor 0.525672\nI1207 08:52:54.234133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15358 > 2) by scale factor 0.928685\nI1207 08:52:55.176329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27262 > 2) by scale factor 0.468097\nI1207 08:52:56.118590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01307 > 2) by scale factor 0.663774\nI1207 08:52:57.060912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80401 > 2) by scale factor 0.713263\nI1207 08:52:58.003127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53643 > 2) by scale factor 0.565542\nI1207 08:52:58.945003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07341 > 2) by scale factor 0.650743\nI1207 08:52:59.887215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53326 > 2) by scale factor 0.566049\nI1207 08:53:00.829551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67292 > 2) by scale factor 0.544526\nI1207 08:53:01.771546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8018 > 2) by scale factor 0.713827\nI1207 08:53:02.714063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19343 > 2) by scale factor 0.626287\nI1207 08:53:03.655992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14867 > 2) by scale factor 0.635188\nI1207 08:53:04.598618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15347 > 2) by scale factor 0.634222\nI1207 08:53:06.480672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67804 > 2) by scale factor 0.746816\nI1207 08:53:07.422986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4051 > 2) by scale factor 0.831566\nI1207 08:53:08.364799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19365 > 2) by scale factor 0.626243\nI1207 08:53:09.307638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65757 > 2) by scale factor 0.429408\nI1207 08:53:10.249814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86488 > 2) by scale factor 0.698109\nI1207 08:53:11.192242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16857 > 2) by scale factor 0.631199\nI1207 08:53:12.134335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26456 > 2) by scale factor 0.612641\nI1207 08:53:13.076858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50826 > 2) by scale factor 0.44363\nI1207 08:53:14.019537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39399 > 2) by scale factor 0.455168\nI1207 08:53:14.961874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00489 > 2) by scale factor 0.499389\nI1207 08:53:15.903837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87978 > 2) by scale factor 0.694498\nI1207 08:53:16.846395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10045 > 2) by scale factor 0.645067\nI1207 08:53:17.788513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92001 > 2) by scale factor 0.684929\nI1207 08:53:18.730417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25354 > 2) by scale factor 0.614715\nI1207 08:53:19.672667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17129 > 2) by scale factor 0.479468\nI1207 08:53:19.684615   369 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1207 08:54:12.649560   369 solver.cpp:404]     Test net output #0: accuracy = 0.1841\nI1207 08:54:12.649950   369 solver.cpp:404]     Test net output #1: loss = 7.86279 (* 1 = 7.86279 loss)\nI1207 08:54:13.522727   369 solver.cpp:228] Iteration 8000, loss = 7.00127\nI1207 08:54:13.522768   369 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1207 08:54:13.522785   369 solver.cpp:244]     Train net output #1: loss = 7.00127 (* 1 = 7.00127 loss)\nI1207 08:54:13.592706   369 sgd_solver.cpp:166] Iteration 8000, lr = 1.2\nI1207 08:54:13.602807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67748 > 2) by scale factor 0.746971\nI1207 08:54:14.543130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9301 > 2) by scale factor 0.682571\nI1207 08:54:15.482920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51591 > 2) by scale factor 0.794942\nI1207 08:54:16.422683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52252 > 2) by scale factor 0.567776\nI1207 08:54:17.362686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42647 > 2) by scale factor 0.824243\nI1207 08:54:18.302386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2475 > 2) by scale factor 0.889876\nI1207 08:54:19.241842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27301 > 2) by scale factor 0.468054\nI1207 08:54:20.182112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51984 > 2) by scale factor 0.793701\nI1207 08:54:21.122098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57764 > 2) by scale factor 0.775902\nI1207 08:54:22.061558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75098 > 2) by scale factor 0.727012\nI1207 08:54:23.001065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94189 > 2) by scale factor 0.507371\nI1207 08:54:23.941401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70204 > 2) by scale factor 0.540242\nI1207 08:54:24.881373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86119 > 2) by scale factor 0.699009\nI1207 08:54:25.821414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25973 > 2) by scale factor 0.613548\nI1207 08:54:26.761265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16788 > 2) by scale factor 0.631337\nI1207 08:54:27.701161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26514 > 2) by scale factor 0.882947\nI1207 08:54:29.578300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43576 > 2) by scale factor 0.582113\nI1207 08:54:30.518539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97797 > 2) by scale factor 0.671598\nI1207 08:54:31.458411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37958 > 2) by scale factor 0.840484\nI1207 08:54:32.397639   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14571 > 2) by scale factor 0.932094\nI1207 08:54:33.337450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74914 > 2) by scale factor 0.7275\nI1207 08:54:34.277593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79475 > 2) by scale factor 0.715629\nI1207 08:54:35.217370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34542 > 2) by scale factor 0.597833\nI1207 08:54:36.157614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44155 > 2) by scale factor 0.819152\nI1207 08:54:38.034819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68088 > 2) by scale factor 0.746024\nI1207 08:54:38.974624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15112 > 2) by scale factor 0.929747\nI1207 08:54:39.914587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31904 > 2) by scale factor 0.602585\nI1207 08:54:40.854877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90666 > 2) by scale factor 0.511946\nI1207 08:54:41.794780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3053 > 2) by scale factor 0.867567\nI1207 08:54:42.736007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6159 > 2) by scale factor 0.764556\nI1207 08:54:43.680217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64961 > 2) by scale factor 0.754827\nI1207 08:54:45.565259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90428 > 2) by scale factor 0.688638\nI1207 08:54:46.508515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38599 > 2) by scale factor 0.838228\nI1207 08:54:47.452023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9422 > 2) by scale factor 0.679764\nI1207 08:54:48.396064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21903 > 2) by scale factor 0.901294\nI1207 08:54:49.339663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44437 > 2) by scale factor 0.818208\nI1207 08:54:50.283490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58081 > 2) by scale factor 0.558533\nI1207 08:54:51.227573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58812 > 2) by scale factor 0.557394\nI1207 08:54:52.171505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71565 > 2) by scale factor 0.538264\nI1207 08:54:53.115314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27979 > 2) by scale factor 0.609795\nI1207 08:54:54.058696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20031 > 2) by scale factor 0.624939\nI1207 08:54:55.002410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00885 > 2) by scale factor 0.498896\nI1207 08:54:56.887218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45108 > 2) by scale factor 0.815967\nI1207 08:54:57.830911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74099 > 2) by scale factor 0.729663\nI1207 08:54:58.774086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66052 > 2) by scale factor 0.751733\nI1207 08:55:00.658660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18329 > 2) by scale factor 0.916051\nI1207 08:55:01.602380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47025 > 2) by scale factor 0.809634\nI1207 08:55:03.487032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55507 > 2) by scale factor 0.782756\nI1207 08:55:05.371985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23031 > 2) by scale factor 0.896736\nI1207 08:55:08.197715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08096 > 2) by scale factor 0.649148\nI1207 08:55:09.140883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34288 > 2) by scale factor 0.853649\nI1207 08:55:11.025944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92264 > 2) by scale factor 0.684312\nI1207 08:55:11.969671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67976 > 2) by scale factor 0.746335\nI1207 08:55:12.913305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26862 > 2) by scale factor 0.881591\nI1207 08:55:14.798347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47749 > 2) by scale factor 0.575128\nI1207 08:55:15.742267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90611 > 2) by scale factor 0.688205\nI1207 08:55:16.686139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26534 > 2) by scale factor 0.882871\nI1207 08:55:19.511517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04413 > 2) by scale factor 0.97841\nI1207 08:55:20.455006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39089 > 2) by scale factor 0.589816\nI1207 08:55:21.398066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73045 > 2) by scale factor 0.732481\nI1207 08:55:22.341740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17193 > 2) by scale factor 0.920841\nI1207 08:55:23.285255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20585 > 2) by scale factor 0.90668\nI1207 08:55:24.229022   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04155 > 2) by scale factor 0.979647\nI1207 08:55:25.172709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6804 > 2) by scale factor 0.746158\nI1207 08:55:26.116000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59076 > 2) by scale factor 0.771974\nI1207 08:55:27.059415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6507 > 2) by scale factor 0.754518\nI1207 08:55:28.002059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65864 > 2) by scale factor 0.752264\nI1207 08:55:28.945569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24376 > 2) by scale factor 0.89136\nI1207 08:55:30.829563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31428 > 2) by scale factor 0.60345\nI1207 08:55:31.773114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3742 > 2) by scale factor 0.842389\nI1207 08:55:32.717017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64034 > 2) by scale factor 0.757477\nI1207 08:55:33.660502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38586 > 2) by scale factor 0.590692\nI1207 08:55:34.604159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93164 > 2) by scale factor 0.682211\nI1207 08:55:35.546996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36268 > 2) by scale factor 0.846496\nI1207 08:55:36.490602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41548 > 2) by scale factor 0.585569\nI1207 08:55:37.434154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04899 > 2) by scale factor 0.97609\nI1207 08:55:39.319360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82937 > 2) by scale factor 0.706872\nI1207 08:55:40.262627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46549 > 2) by scale factor 0.577118\nI1207 08:55:42.147640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16739 > 2) by scale factor 0.631435\nI1207 08:55:44.974356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81516 > 2) by scale factor 0.524224\nI1207 08:55:46.860625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14721 > 2) by scale factor 0.635484\nI1207 08:55:46.872565   369 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1207 08:56:39.856436   369 solver.cpp:404]     Test net output #0: accuracy = 0.22925\nI1207 08:56:39.856820   369 solver.cpp:404]     Test net output #1: loss = 6.83587 (* 1 = 6.83587 loss)\nI1207 08:56:40.729437   369 solver.cpp:228] Iteration 8100, loss = 6.89028\nI1207 08:56:40.729482   369 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1207 08:56:40.729501   369 solver.cpp:244]     Train net output #1: loss = 6.89028 (* 1 = 6.89028 loss)\nI1207 08:56:40.804577   369 sgd_solver.cpp:166] Iteration 8100, lr = 1.215\nI1207 08:56:41.752585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70974 > 2) by scale factor 0.738077\nI1207 08:56:42.692309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64462 > 2) by scale factor 0.756252\nI1207 08:56:43.632305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40279 > 2) by scale factor 0.587752\nI1207 08:56:44.572531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29178 > 2) by scale factor 0.872684\nI1207 08:56:45.512835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23514 > 2) by scale factor 0.618211\nI1207 08:56:46.452795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73422 > 2) by scale factor 0.731471\nI1207 08:56:47.392870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12886 > 2) by scale factor 0.939469\nI1207 08:56:48.332823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73317 > 2) by scale factor 0.535738\nI1207 08:56:49.272923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94196 > 2) by scale factor 0.507362\nI1207 08:56:50.212885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40433 > 2) by scale factor 0.454099\nI1207 08:56:51.153071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11544 > 2) by scale factor 0.945429\nI1207 08:56:52.093024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12903 > 2) by scale factor 0.639176\nI1207 08:56:53.032688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06366 > 2) by scale factor 0.492167\nI1207 08:56:53.972692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03681 > 2) by scale factor 0.658587\nI1207 08:56:54.913115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89926 > 2) by scale factor 0.689832\nI1207 08:56:55.853205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50811 > 2) by scale factor 0.797412\nI1207 08:56:56.793118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67364 > 2) by scale factor 0.748044\nI1207 08:56:57.732509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84086 > 2) by scale factor 0.704011\nI1207 08:56:58.672741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37336 > 2) by scale factor 0.842687\nI1207 08:56:59.612989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54144 > 2) by scale factor 0.564742\nI1207 08:57:00.552592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93911 > 2) by scale factor 0.507729\nI1207 08:57:01.492557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05844 > 2) by scale factor 0.97161\nI1207 08:57:02.432687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53621 > 2) by scale factor 0.788578\nI1207 08:57:03.372622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27437 > 2) by scale factor 0.610805\nI1207 08:57:04.312479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95506 > 2) by scale factor 0.676806\nI1207 08:57:05.252156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43456 > 2) by scale factor 0.582317\nI1207 08:57:06.192162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3916 > 2) by scale factor 0.836261\nI1207 08:57:07.132205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10905 > 2) by scale factor 0.948294\nI1207 08:57:08.071934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02016 > 2) by scale factor 0.990023\nI1207 08:57:09.011778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62722 > 2) by scale factor 0.551387\nI1207 08:57:09.951681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53361 > 2) by scale factor 0.565993\nI1207 08:57:10.894881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31942 > 2) by scale factor 0.862283\nI1207 08:57:11.837971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13347 > 2) by scale factor 0.63827\nI1207 08:57:12.780812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2736 > 2) by scale factor 0.879661\nI1207 08:57:13.724318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83873 > 2) by scale factor 0.521005\nI1207 08:57:14.667527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10319 > 2) by scale factor 0.950937\nI1207 08:57:15.610718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9944 > 2) by scale factor 0.500701\nI1207 08:57:16.553457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53898 > 2) by scale factor 0.787718\nI1207 08:57:19.378408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00846 > 2) by scale factor 0.664792\nI1207 08:57:20.322031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66033 > 2) by scale factor 0.751788\nI1207 08:57:21.265252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10319 > 2) by scale factor 0.644499\nI1207 08:57:22.208082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8093 > 2) by scale factor 0.711921\nI1207 08:57:23.151607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01485 > 2) by scale factor 0.992628\nI1207 08:57:24.094743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56741 > 2) by scale factor 0.56063\nI1207 08:57:25.037720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28724 > 2) by scale factor 0.608414\nI1207 08:57:25.980933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77982 > 2) by scale factor 0.71947\nI1207 08:57:26.924132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18513 > 2) by scale factor 0.627918\nI1207 08:57:27.867352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13384 > 2) by scale factor 0.93728\nI1207 08:57:28.810034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70356 > 2) by scale factor 0.739765\nI1207 08:57:29.753008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70869 > 2) by scale factor 0.738365\nI1207 08:57:30.695924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97086 > 2) by scale factor 0.673206\nI1207 08:57:31.639268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42563 > 2) by scale factor 0.824529\nI1207 08:57:32.582886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27552 > 2) by scale factor 0.610591\nI1207 08:57:33.525879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06025 > 2) by scale factor 0.653541\nI1207 08:57:37.291200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51278 > 2) by scale factor 0.79593\nI1207 08:57:38.233299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73762 > 2) by scale factor 0.730562\nI1207 08:57:39.176234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32657 > 2) by scale factor 0.601219\nI1207 08:57:40.119429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90043 > 2) by scale factor 0.512764\nI1207 08:57:41.062815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78099 > 2) by scale factor 0.719169\nI1207 08:57:42.005506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27955 > 2) by scale factor 0.609839\nI1207 08:57:42.948088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02696 > 2) by scale factor 0.660729\nI1207 08:57:44.831212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02203 > 2) by scale factor 0.989104\nI1207 08:57:45.774271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7939 > 2) by scale factor 0.715846\nI1207 08:57:46.717208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06292 > 2) by scale factor 0.969499\nI1207 08:57:47.659797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83529 > 2) by scale factor 0.705394\nI1207 08:57:48.602329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79193 > 2) by scale factor 0.527436\nI1207 08:57:49.544989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34415 > 2) by scale factor 0.598059\nI1207 08:57:50.487339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52354 > 2) by scale factor 0.567611\nI1207 08:57:51.430074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18932 > 2) by scale factor 0.627093\nI1207 08:57:52.372359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75439 > 2) by scale factor 0.53271\nI1207 08:57:53.315470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27787 > 2) by scale factor 0.610152\nI1207 08:57:54.258399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61334 > 2) by scale factor 0.553504\nI1207 08:57:55.201573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92988 > 2) by scale factor 0.682622\nI1207 08:57:56.144158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22173 > 2) by scale factor 0.900199\nI1207 08:57:57.086241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4668 > 2) by scale factor 0.576901\nI1207 08:57:58.029175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62654 > 2) by scale factor 0.55149\nI1207 08:57:58.971870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83935 > 2) by scale factor 0.704386\nI1207 08:57:59.914748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51712 > 2) by scale factor 0.44276\nI1207 08:58:00.857801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49926 > 2) by scale factor 0.571549\nI1207 08:58:01.800493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44694 > 2) by scale factor 0.449748\nI1207 08:58:02.743625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57478 > 2) by scale factor 0.559474\nI1207 08:58:03.686453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81559 > 2) by scale factor 0.524166\nI1207 08:58:04.629364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82433 > 2) by scale factor 0.708133\nI1207 08:58:06.513284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36735 > 2) by scale factor 0.593939\nI1207 08:58:07.456457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00701 > 2) by scale factor 0.665112\nI1207 08:58:08.399147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92863 > 2) by scale factor 0.682914\nI1207 08:58:09.341609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23583 > 2) by scale factor 0.894522\nI1207 08:58:10.284193   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78844 > 2) by scale factor 0.717246\nI1207 08:58:11.227905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42086 > 2) by scale factor 0.584649\nI1207 08:58:12.171042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89307 > 2) by scale factor 0.691308\nI1207 08:58:13.114087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44878 > 2) by scale factor 0.816733\nI1207 08:58:14.056949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38866 > 2) by scale factor 0.83729\nI1207 08:58:14.068855   369 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1207 08:59:07.046156   369 solver.cpp:404]     Test net output #0: accuracy = 0.21015\nI1207 08:59:07.046538   369 solver.cpp:404]     Test net output #1: loss = 5.30297 (* 1 = 5.30297 loss)\nI1207 08:59:07.919646   369 solver.cpp:228] Iteration 8200, loss = 4.54061\nI1207 08:59:07.919694   369 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1207 08:59:07.919713   369 solver.cpp:244]     Train net output #1: loss = 4.54061 (* 1 = 4.54061 loss)\nI1207 08:59:07.999018   369 sgd_solver.cpp:166] Iteration 8200, lr = 1.23\nI1207 08:59:08.946871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28838 > 2) by scale factor 0.608203\nI1207 08:59:09.886389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58229 > 2) by scale factor 0.774507\nI1207 08:59:10.826014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33138 > 2) by scale factor 0.600352\nI1207 08:59:11.766211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30259 > 2) by scale factor 0.868586\nI1207 08:59:12.705742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00257 > 2) by scale factor 0.666097\nI1207 08:59:13.645539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16427 > 2) by scale factor 0.632057\nI1207 08:59:14.585711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28273 > 2) by scale factor 0.609249\nI1207 08:59:15.525568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15694 > 2) by scale factor 0.633524\nI1207 08:59:16.465492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41912 > 2) by scale factor 0.452579\nI1207 08:59:17.405315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35755 > 2) by scale factor 0.458973\nI1207 08:59:18.345512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06811 > 2) by scale factor 0.394624\nI1207 08:59:19.285308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58488 > 2) by scale factor 0.773729\nI1207 08:59:20.224840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53647 > 2) by scale factor 0.565536\nI1207 08:59:21.164877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8355 > 2) by scale factor 0.705342\nI1207 08:59:22.104811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92363 > 2) by scale factor 0.684081\nI1207 08:59:23.044450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02921 > 2) by scale factor 0.496375\nI1207 08:59:23.984716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12497 > 2) by scale factor 0.484852\nI1207 08:59:24.925171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53156 > 2) by scale factor 0.566322\nI1207 08:59:25.865468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05726 > 2) by scale factor 0.654181\nI1207 08:59:26.805608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.526 > 2) by scale factor 0.791767\nI1207 08:59:27.745831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19375 > 2) by scale factor 0.911679\nI1207 08:59:28.686074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60166 > 2) by scale factor 0.76874\nI1207 08:59:29.626612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65872 > 2) by scale factor 0.54664\nI1207 08:59:30.566975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87345 > 2) by scale factor 0.696027\nI1207 08:59:31.507388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31776 > 2) by scale factor 0.463203\nI1207 08:59:32.448490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1421 > 2) by scale factor 0.636518\nI1207 08:59:33.389070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53485 > 2) by scale factor 0.565794\nI1207 08:59:34.330220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26309 > 2) by scale factor 0.469143\nI1207 08:59:35.271452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73201 > 2) by scale factor 0.535904\nI1207 08:59:36.212770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16528 > 2) by scale factor 0.923667\nI1207 08:59:37.154140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26812 > 2) by scale factor 0.611973\nI1207 08:59:38.095710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57332 > 2) by scale factor 0.559703\nI1207 08:59:39.036872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49978 > 2) by scale factor 0.800071\nI1207 08:59:39.978756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14544 > 2) by scale factor 0.93221\nI1207 08:59:40.921016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92503 > 2) by scale factor 0.683755\nI1207 08:59:41.864166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14666 > 2) by scale factor 0.931679\nI1207 08:59:42.807013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55367 > 2) by scale factor 0.783185\nI1207 08:59:43.749691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3128 > 2) by scale factor 0.603718\nI1207 08:59:44.692423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41787 > 2) by scale factor 0.58516\nI1207 08:59:45.634811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34131 > 2) by scale factor 0.598568\nI1207 08:59:46.577452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42154 > 2) by scale factor 0.584533\nI1207 08:59:47.519927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57714 > 2) by scale factor 0.776053\nI1207 08:59:48.462360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48181 > 2) by scale factor 0.805863\nI1207 08:59:50.345129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33854 > 2) by scale factor 0.855234\nI1207 08:59:52.227756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58948 > 2) by scale factor 0.772355\nI1207 08:59:53.170893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00524 > 2) by scale factor 0.665503\nI1207 08:59:54.113180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 6.50631 > 2) by scale factor 0.307394\nI1207 08:59:55.055392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21954 > 2) by scale factor 0.473985\nI1207 08:59:55.997485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81758 > 2) by scale factor 0.709828\nI1207 08:59:56.939978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84803 > 2) by scale factor 0.519747\nI1207 08:59:57.882370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44255 > 2) by scale factor 0.580964\nI1207 08:59:58.824941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74288 > 2) by scale factor 0.72916\nI1207 08:59:59.767776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6212 > 2) by scale factor 0.552303\nI1207 09:00:00.710176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79458 > 2) by scale factor 0.715672\nI1207 09:00:01.653216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58113 > 2) by scale factor 0.558482\nI1207 09:00:02.596572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21172 > 2) by scale factor 0.474865\nI1207 09:00:03.539614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71078 > 2) by scale factor 0.53897\nI1207 09:00:04.482779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44904 > 2) by scale factor 0.816648\nI1207 09:00:05.425511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99463 > 2) by scale factor 0.667863\nI1207 09:00:06.368162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19801 > 2) by scale factor 0.909914\nI1207 09:00:07.310833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25381 > 2) by scale factor 0.614665\nI1207 09:00:08.254099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14429 > 2) by scale factor 0.482592\nI1207 09:00:09.196532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2846 > 2) by scale factor 0.875428\nI1207 09:00:10.138988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69025 > 2) by scale factor 0.743425\nI1207 09:00:11.081169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62909 > 2) by scale factor 0.76072\nI1207 09:00:12.024122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1803 > 2) by scale factor 0.628871\nI1207 09:00:12.967106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64541 > 2) by scale factor 0.756027\nI1207 09:00:14.849951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38509 > 2) by scale factor 0.590826\nI1207 09:00:15.792619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95948 > 2) by scale factor 0.675795\nI1207 09:00:16.735298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63506 > 2) by scale factor 0.758997\nI1207 09:00:17.678143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76513 > 2) by scale factor 0.723292\nI1207 09:00:18.621070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4347 > 2) by scale factor 0.582293\nI1207 09:00:19.563715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.112 > 2) by scale factor 0.94697\nI1207 09:00:20.506302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0731 > 2) by scale factor 0.650808\nI1207 09:00:21.449321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76625 > 2) by scale factor 0.723001\nI1207 09:00:22.391919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29537 > 2) by scale factor 0.871319\nI1207 09:00:24.274230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45961 > 2) by scale factor 0.5781\nI1207 09:00:25.216990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10848 > 2) by scale factor 0.643402\nI1207 09:00:26.159473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69183 > 2) by scale factor 0.541737\nI1207 09:00:27.102270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41362 > 2) by scale factor 0.585888\nI1207 09:00:28.044495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77106 > 2) by scale factor 0.721745\nI1207 09:00:28.986971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54706 > 2) by scale factor 0.785218\nI1207 09:00:29.929464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69566 > 2) by scale factor 0.741932\nI1207 09:00:30.872141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72295 > 2) by scale factor 0.734498\nI1207 09:00:31.813963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36364 > 2) by scale factor 0.846152\nI1207 09:00:32.756681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76436 > 2) by scale factor 0.723494\nI1207 09:00:34.639600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43639 > 2) by scale factor 0.450817\nI1207 09:00:35.582139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77243 > 2) by scale factor 0.721389\nI1207 09:00:36.524596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35528 > 2) by scale factor 0.849155\nI1207 09:00:37.467085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76363 > 2) by scale factor 0.723685\nI1207 09:00:38.410280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60921 > 2) by scale factor 0.766514\nI1207 09:00:39.352844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98799 > 2) by scale factor 0.669345\nI1207 09:00:40.294778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08076 > 2) by scale factor 0.64919\nI1207 09:00:41.236611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82116 > 2) by scale factor 0.708928\nI1207 09:00:41.248589   369 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1207 09:01:34.237928   369 solver.cpp:404]     Test net output #0: accuracy = 0.1671\nI1207 09:01:34.238296   369 solver.cpp:404]     Test net output #1: loss = 9.4215 (* 1 = 9.4215 loss)\nI1207 09:01:35.112220   369 solver.cpp:228] Iteration 8300, loss = 8.96746\nI1207 09:01:35.112267   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 09:01:35.112284   369 solver.cpp:244]     Train net output #1: loss = 8.96746 (* 1 = 8.96746 loss)\nI1207 09:01:35.183534   369 sgd_solver.cpp:166] Iteration 8300, lr = 1.245\nI1207 09:01:35.193660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2382 > 2) by scale factor 0.617628\nI1207 09:01:36.134810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25028 > 2) by scale factor 0.615332\nI1207 09:01:37.075204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24968 > 2) by scale factor 0.470623\nI1207 09:01:38.015547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33045 > 2) by scale factor 0.600519\nI1207 09:01:38.955832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91001 > 2) by scale factor 0.687283\nI1207 09:01:39.896620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98556 > 2) by scale factor 0.501812\nI1207 09:01:40.837229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68354 > 2) by scale factor 0.542956\nI1207 09:01:41.777510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74633 > 2) by scale factor 0.533856\nI1207 09:01:42.718001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49715 > 2) by scale factor 0.800915\nI1207 09:01:43.659276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32443 > 2) by scale factor 0.860426\nI1207 09:01:44.599895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.685 > 2) by scale factor 0.54274\nI1207 09:01:45.540920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37686 > 2) by scale factor 0.841447\nI1207 09:01:46.481819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00488 > 2) by scale factor 0.499391\nI1207 09:01:47.422997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99251 > 2) by scale factor 0.668335\nI1207 09:01:48.363919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82336 > 2) by scale factor 0.708377\nI1207 09:01:49.304064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50821 > 2) by scale factor 0.570091\nI1207 09:01:50.244776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8967 > 2) by scale factor 0.690441\nI1207 09:01:51.185322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31635 > 2) by scale factor 0.863427\nI1207 09:01:52.125946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13338 > 2) by scale factor 0.937479\nI1207 09:01:53.066102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93654 > 2) by scale factor 0.681075\nI1207 09:01:54.006712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8364 > 2) by scale factor 0.521322\nI1207 09:01:54.947271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69902 > 2) by scale factor 0.540683\nI1207 09:01:55.888450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51788 > 2) by scale factor 0.568524\nI1207 09:01:56.829712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56903 > 2) by scale factor 0.560376\nI1207 09:01:57.770223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28691 > 2) by scale factor 0.874541\nI1207 09:01:59.648887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39918 > 2) by scale factor 0.83362\nI1207 09:02:00.588826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64322 > 2) by scale factor 0.548966\nI1207 09:02:01.529454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08376 > 2) by scale factor 0.648559\nI1207 09:02:02.469303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67889 > 2) by scale factor 0.746577\nI1207 09:02:03.409576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11749 > 2) by scale factor 0.641542\nI1207 09:02:04.350949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62026 > 2) by scale factor 0.763284\nI1207 09:02:05.293634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0823 > 2) by scale factor 0.960477\nI1207 09:02:06.236145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16737 > 2) by scale factor 0.922779\nI1207 09:02:07.178493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51018 > 2) by scale factor 0.569771\nI1207 09:02:08.120744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9402 > 2) by scale factor 0.680226\nI1207 09:02:09.063288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03833 > 2) by scale factor 0.981196\nI1207 09:02:10.006224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70583 > 2) by scale factor 0.739144\nI1207 09:02:10.948230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4978 > 2) by scale factor 0.571788\nI1207 09:02:11.890713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82752 > 2) by scale factor 0.707333\nI1207 09:02:12.832492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05296 > 2) by scale factor 0.655101\nI1207 09:02:13.775321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44103 > 2) by scale factor 0.819326\nI1207 09:02:14.717706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80547 > 2) by scale factor 0.712893\nI1207 09:02:15.660465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78199 > 2) by scale factor 0.418236\nI1207 09:02:16.602923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91049 > 2) by scale factor 0.687171\nI1207 09:02:17.545565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32221 > 2) by scale factor 0.602009\nI1207 09:02:18.487413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65928 > 2) by scale factor 0.752084\nI1207 09:02:19.430424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10775 > 2) by scale factor 0.948878\nI1207 09:02:20.373757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11762 > 2) by scale factor 0.944455\nI1207 09:02:21.316519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59588 > 2) by scale factor 0.556192\nI1207 09:02:22.260248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98691 > 2) by scale factor 0.669588\nI1207 09:02:23.204071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19417 > 2) by scale factor 0.911506\nI1207 09:02:24.147475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22038 > 2) by scale factor 0.900747\nI1207 09:02:26.032804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22035 > 2) by scale factor 0.90076\nI1207 09:02:26.975786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2711 > 2) by scale factor 0.611414\nI1207 09:02:27.919852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55298 > 2) by scale factor 0.783399\nI1207 09:02:28.863266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01099 > 2) by scale factor 0.664233\nI1207 09:02:29.806780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95409 > 2) by scale factor 0.677027\nI1207 09:02:30.749940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9432 > 2) by scale factor 0.679532\nI1207 09:02:31.693471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86197 > 2) by scale factor 0.698819\nI1207 09:02:32.636984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73115 > 2) by scale factor 0.732291\nI1207 09:02:33.580711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77412 > 2) by scale factor 0.72095\nI1207 09:02:34.523324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61834 > 2) by scale factor 0.763844\nI1207 09:02:35.468049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91981 > 2) by scale factor 0.684976\nI1207 09:02:36.411942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10661 > 2) by scale factor 0.949394\nI1207 09:02:37.355664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19053 > 2) by scale factor 0.626856\nI1207 09:02:38.299373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81403 > 2) by scale factor 0.710725\nI1207 09:02:39.242321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19716 > 2) by scale factor 0.625555\nI1207 09:02:40.185557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58254 > 2) by scale factor 0.558264\nI1207 09:02:41.129623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85158 > 2) by scale factor 0.701365\nI1207 09:02:42.073319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84091 > 2) by scale factor 0.703999\nI1207 09:02:43.016939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59304 > 2) by scale factor 0.771295\nI1207 09:02:43.960250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 6.74179 > 2) by scale factor 0.296657\nI1207 09:02:44.903636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 7.24599 > 2) by scale factor 0.276015\nI1207 09:02:45.846746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20007 > 2) by scale factor 0.624986\nI1207 09:02:46.789880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86978 > 2) by scale factor 0.696917\nI1207 09:02:47.733253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57961 > 2) by scale factor 0.775312\nI1207 09:02:48.675442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70119 > 2) by scale factor 0.425424\nI1207 09:02:49.617872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89008 > 2) by scale factor 0.514128\nI1207 09:02:50.559947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08649 > 2) by scale factor 0.647986\nI1207 09:02:51.502558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46487 > 2) by scale factor 0.577222\nI1207 09:02:52.444910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99313 > 2) by scale factor 0.668196\nI1207 09:02:53.387439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43275 > 2) by scale factor 0.822114\nI1207 09:02:54.329974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57874 > 2) by scale factor 0.558857\nI1207 09:02:55.272812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11249 > 2) by scale factor 0.486323\nI1207 09:02:56.215926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66924 > 2) by scale factor 0.545072\nI1207 09:02:57.159636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3578 > 2) by scale factor 0.84825\nI1207 09:02:58.103688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72807 > 2) by scale factor 0.733118\nI1207 09:02:59.047127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39182 > 2) by scale factor 0.589653\nI1207 09:02:59.990262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48187 > 2) by scale factor 0.574403\nI1207 09:03:00.933322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11296 > 2) by scale factor 0.946542\nI1207 09:03:01.876648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27192 > 2) by scale factor 0.611262\nI1207 09:03:02.820358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79097 > 2) by scale factor 0.716596\nI1207 09:03:03.763715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39522 > 2) by scale factor 0.834996\nI1207 09:03:05.647037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62467 > 2) by scale factor 0.762001\nI1207 09:03:06.591965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21386 > 2) by scale factor 0.622304\nI1207 09:03:07.534903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14991 > 2) by scale factor 0.634938\nI1207 09:03:08.477880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05872 > 2) by scale factor 0.653868\nI1207 09:03:08.489778   369 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1207 09:04:01.462390   369 solver.cpp:404]     Test net output #0: accuracy = 0.1555\nI1207 09:04:01.462775   369 solver.cpp:404]     Test net output #1: loss = 5.54109 (* 1 = 5.54109 loss)\nI1207 09:04:02.336967   369 solver.cpp:228] Iteration 8400, loss = 6.05697\nI1207 09:04:02.337015   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 09:04:02.337033   369 solver.cpp:244]     Train net output #1: loss = 6.05697 (* 1 = 6.05697 loss)\nI1207 09:04:02.407840   369 sgd_solver.cpp:166] Iteration 8400, lr = 1.26\nI1207 09:04:02.417906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64579 > 2) by scale factor 0.755917\nI1207 09:04:03.359025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54015 > 2) by scale factor 0.787354\nI1207 09:04:04.300117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97277 > 2) by scale factor 0.503427\nI1207 09:04:05.241510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09292 > 2) by scale factor 0.488649\nI1207 09:04:06.182497   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01424 > 2) by scale factor 0.498226\nI1207 09:04:07.123980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68234 > 2) by scale factor 0.745619\nI1207 09:04:08.064752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79667 > 2) by scale factor 0.715135\nI1207 09:04:09.005561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6391 > 2) by scale factor 0.549586\nI1207 09:04:09.946584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43045 > 2) by scale factor 0.583014\nI1207 09:04:10.887821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93672 > 2) by scale factor 0.681032\nI1207 09:04:11.828909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20396 > 2) by scale factor 0.624227\nI1207 09:04:12.769887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53021 > 2) by scale factor 0.566539\nI1207 09:04:13.710523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80238 > 2) by scale factor 0.713679\nI1207 09:04:14.651221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52179 > 2) by scale factor 0.567893\nI1207 09:04:15.592430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10542 > 2) by scale factor 0.487161\nI1207 09:04:16.533671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34176 > 2) by scale factor 0.460643\nI1207 09:04:17.475069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51951 > 2) by scale factor 0.793804\nI1207 09:04:18.416052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22883 > 2) by scale factor 0.619419\nI1207 09:04:19.356436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03947 > 2) by scale factor 0.65801\nI1207 09:04:20.297664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5761 > 2) by scale factor 0.776368\nI1207 09:04:21.238766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26392 > 2) by scale factor 0.469052\nI1207 09:04:22.179630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47357 > 2) by scale factor 0.808548\nI1207 09:04:23.120756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77161 > 2) by scale factor 0.721603\nI1207 09:04:24.061622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82349 > 2) by scale factor 0.708343\nI1207 09:04:25.001914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72786 > 2) by scale factor 0.733174\nI1207 09:04:26.881719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05362 > 2) by scale factor 0.65496\nI1207 09:04:27.823015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12191 > 2) by scale factor 0.640634\nI1207 09:04:28.764101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23545 > 2) by scale factor 0.618152\nI1207 09:04:29.705206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23119 > 2) by scale factor 0.896381\nI1207 09:04:30.645970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24268 > 2) by scale factor 0.89179\nI1207 09:04:31.588235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04186 > 2) by scale factor 0.979498\nI1207 09:04:33.469722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31962 > 2) by scale factor 0.463004\nI1207 09:04:34.412297   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72109 > 2) by scale factor 0.537477\nI1207 09:04:35.355340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63066 > 2) by scale factor 0.760267\nI1207 09:04:36.298562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06438 > 2) by scale factor 0.65266\nI1207 09:04:37.241477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83461 > 2) by scale factor 0.705564\nI1207 09:04:38.184408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17611 > 2) by scale factor 0.629701\nI1207 09:04:39.127180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40138 > 2) by scale factor 0.832855\nI1207 09:04:40.070544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08578 > 2) by scale factor 0.489502\nI1207 09:04:41.954046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89745 > 2) by scale factor 0.690263\nI1207 09:04:42.896981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88265 > 2) by scale factor 0.693805\nI1207 09:04:43.839669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94341 > 2) by scale factor 0.507176\nI1207 09:04:44.782711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16515 > 2) by scale factor 0.480175\nI1207 09:04:45.725234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52446 > 2) by scale factor 0.567463\nI1207 09:04:46.667981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32534 > 2) by scale factor 0.462392\nI1207 09:04:47.611075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97074 > 2) by scale factor 0.503685\nI1207 09:04:48.553666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74154 > 2) by scale factor 0.534539\nI1207 09:04:49.496328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05841 > 2) by scale factor 0.395381\nI1207 09:04:50.438932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40528 > 2) by scale factor 0.454\nI1207 09:04:51.382200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54644 > 2) by scale factor 0.785411\nI1207 09:04:52.325279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51658 > 2) by scale factor 0.794731\nI1207 09:04:53.268020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93902 > 2) by scale factor 0.50774\nI1207 09:04:54.210620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.33173 > 2) by scale factor 0.375113\nI1207 09:04:55.153271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69777 > 2) by scale factor 0.741353\nI1207 09:04:56.096068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91018 > 2) by scale factor 0.511485\nI1207 09:04:57.038503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67959 > 2) by scale factor 0.746383\nI1207 09:04:57.981318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60536 > 2) by scale factor 0.767647\nI1207 09:04:58.923729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41963 > 2) by scale factor 0.826572\nI1207 09:04:59.866529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2405 > 2) by scale factor 0.617189\nI1207 09:05:00.809695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86551 > 2) by scale factor 0.517396\nI1207 09:05:01.752398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9235 > 2) by scale factor 0.684111\nI1207 09:05:02.695804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9421 > 2) by scale factor 0.679787\nI1207 09:05:03.638695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68208 > 2) by scale factor 0.543171\nI1207 09:05:04.581615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82265 > 2) by scale factor 0.523197\nI1207 09:05:05.524530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1775 > 2) by scale factor 0.629426\nI1207 09:05:06.467301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48185 > 2) by scale factor 0.574408\nI1207 09:05:07.410313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08964 > 2) by scale factor 0.647325\nI1207 09:05:08.353219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1448 > 2) by scale factor 0.932489\nI1207 09:05:09.295876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32881 > 2) by scale factor 0.600815\nI1207 09:05:10.238520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62751 > 2) by scale factor 0.551343\nI1207 09:05:11.181330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2951 > 2) by scale factor 0.465646\nI1207 09:05:12.123536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02423 > 2) by scale factor 0.988031\nI1207 09:05:13.065486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75616 > 2) by scale factor 0.725647\nI1207 09:05:14.008736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17422 > 2) by scale factor 0.919869\nI1207 09:05:14.951568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03769 > 2) by scale factor 0.981505\nI1207 09:05:15.894817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38654 > 2) by scale factor 0.838033\nI1207 09:05:16.837764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65082 > 2) by scale factor 0.547822\nI1207 09:05:18.721107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78454 > 2) by scale factor 0.718252\nI1207 09:05:19.664104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68887 > 2) by scale factor 0.743808\nI1207 09:05:20.606873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09427 > 2) by scale factor 0.954987\nI1207 09:05:21.549671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26722 > 2) by scale factor 0.612142\nI1207 09:05:22.492480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64748 > 2) by scale factor 0.755436\nI1207 09:05:23.435796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64651 > 2) by scale factor 0.755711\nI1207 09:05:24.378360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15757 > 2) by scale factor 0.633399\nI1207 09:05:25.320607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04583 > 2) by scale factor 0.656634\nI1207 09:05:26.263445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15996 > 2) by scale factor 0.63292\nI1207 09:05:27.206317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14732 > 2) by scale factor 0.635461\nI1207 09:05:28.149083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94725 > 2) by scale factor 0.678599\nI1207 09:05:29.091926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80128 > 2) by scale factor 0.526139\nI1207 09:05:30.034219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86742 > 2) by scale factor 0.51714\nI1207 09:05:30.976809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74577 > 2) by scale factor 0.533935\nI1207 09:05:31.919517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27657 > 2) by scale factor 0.878515\nI1207 09:05:32.863034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41683 > 2) by scale factor 0.82753\nI1207 09:05:33.805989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37736 > 2) by scale factor 0.841271\nI1207 09:05:34.749065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45143 > 2) by scale factor 0.57947\nI1207 09:05:35.691802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60794 > 2) by scale factor 0.766888\nI1207 09:05:35.703801   369 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1207 09:06:28.687752   369 solver.cpp:404]     Test net output #0: accuracy = 0.1588\nI1207 09:06:28.688122   369 solver.cpp:404]     Test net output #1: loss = 6.14848 (* 1 = 6.14848 loss)\nI1207 09:06:29.561231   369 solver.cpp:228] Iteration 8500, loss = 5.53222\nI1207 09:06:29.561280   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 09:06:29.561297   369 solver.cpp:244]     Train net output #1: loss = 5.53222 (* 1 = 5.53222 loss)\nI1207 09:06:29.635001   369 sgd_solver.cpp:166] Iteration 8500, lr = 1.275\nI1207 09:06:29.645195   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02735 > 2) by scale factor 0.660644\nI1207 09:06:30.585353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24326 > 2) by scale factor 0.471336\nI1207 09:06:31.524821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24792 > 2) by scale factor 0.615779\nI1207 09:06:32.464334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04047 > 2) by scale factor 0.657793\nI1207 09:06:33.403828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97702 > 2) by scale factor 0.671813\nI1207 09:06:34.344010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61 > 2) by scale factor 0.766282\nI1207 09:06:35.284219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37128 > 2) by scale factor 0.593247\nI1207 09:06:36.223855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73659 > 2) by scale factor 0.730837\nI1207 09:06:37.163715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48366 > 2) by scale factor 0.574108\nI1207 09:06:38.103977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26229 > 2) by scale factor 0.613065\nI1207 09:06:39.044116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56112 > 2) by scale factor 0.561621\nI1207 09:06:39.984196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47809 > 2) by scale factor 0.575028\nI1207 09:06:40.924517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28157 > 2) by scale factor 0.876589\nI1207 09:06:42.802614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13142 > 2) by scale factor 0.938341\nI1207 09:06:44.679723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89186 > 2) by scale factor 0.513893\nI1207 09:06:45.619278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86906 > 2) by scale factor 0.697093\nI1207 09:06:46.558655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4665 > 2) by scale factor 0.810864\nI1207 09:06:47.498948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25018 > 2) by scale factor 0.888816\nI1207 09:06:48.439280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18844 > 2) by scale factor 0.627266\nI1207 09:06:49.378630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64456 > 2) by scale factor 0.548764\nI1207 09:06:50.318576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79261 > 2) by scale factor 0.716175\nI1207 09:06:51.258112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66636 > 2) by scale factor 0.750087\nI1207 09:06:52.198180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66105 > 2) by scale factor 0.751584\nI1207 09:06:53.138051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24487 > 2) by scale factor 0.890921\nI1207 09:06:54.077850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18584 > 2) by scale factor 0.914982\nI1207 09:06:55.017369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43177 > 2) by scale factor 0.822446\nI1207 09:06:55.957446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48738 > 2) by scale factor 0.804058\nI1207 09:06:56.896692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3363 > 2) by scale factor 0.856055\nI1207 09:06:57.836730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17061 > 2) by scale factor 0.630794\nI1207 09:06:58.776542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06929 > 2) by scale factor 0.966516\nI1207 09:06:59.716394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29527 > 2) by scale factor 0.871359\nI1207 09:07:00.658076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96604 > 2) by scale factor 0.674301\nI1207 09:07:01.601056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61607 > 2) by scale factor 0.553086\nI1207 09:07:02.543287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34391 > 2) by scale factor 0.853274\nI1207 09:07:03.486096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95999 > 2) by scale factor 0.675679\nI1207 09:07:04.429229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0382 > 2) by scale factor 0.49527\nI1207 09:07:05.372206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04655 > 2) by scale factor 0.977253\nI1207 09:07:06.315224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64688 > 2) by scale factor 0.755607\nI1207 09:07:07.257629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17582 > 2) by scale factor 0.919192\nI1207 09:07:08.200371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03485 > 2) by scale factor 0.659012\nI1207 09:07:09.142874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55813 > 2) by scale factor 0.438776\nI1207 09:07:10.085749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50655 > 2) by scale factor 0.570362\nI1207 09:07:11.028455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47274 > 2) by scale factor 0.575915\nI1207 09:07:11.970969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07803 > 2) by scale factor 0.962448\nI1207 09:07:12.913158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56658 > 2) by scale factor 0.560761\nI1207 09:07:13.855725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56502 > 2) by scale factor 0.561006\nI1207 09:07:15.739184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31378 > 2) by scale factor 0.603541\nI1207 09:07:16.682147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81545 > 2) by scale factor 0.710367\nI1207 09:07:17.624778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87529 > 2) by scale factor 0.51609\nI1207 09:07:18.566637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34742 > 2) by scale factor 0.597474\nI1207 09:07:19.509207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83078 > 2) by scale factor 0.70652\nI1207 09:07:20.451973   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6951 > 2) by scale factor 0.425976\nI1207 09:07:21.394559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83732 > 2) by scale factor 0.70489\nI1207 09:07:22.336959   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03929 > 2) by scale factor 0.980735\nI1207 09:07:23.279850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09572 > 2) by scale factor 0.646053\nI1207 09:07:24.222704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02362 > 2) by scale factor 0.661459\nI1207 09:07:25.165035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20802 > 2) by scale factor 0.623437\nI1207 09:07:26.107544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66345 > 2) by scale factor 0.750905\nI1207 09:07:27.049609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55877 > 2) by scale factor 0.438715\nI1207 09:07:27.993031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52146 > 2) by scale factor 0.79319\nI1207 09:07:28.936239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64497 > 2) by scale factor 0.756151\nI1207 09:07:29.879403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40769 > 2) by scale factor 0.586908\nI1207 09:07:30.821920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99619 > 2) by scale factor 0.667513\nI1207 09:07:31.765092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10234 > 2) by scale factor 0.644674\nI1207 09:07:32.707824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83941 > 2) by scale factor 0.704372\nI1207 09:07:33.650915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12173 > 2) by scale factor 0.64067\nI1207 09:07:34.592739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43125 > 2) by scale factor 0.822621\nI1207 09:07:35.535809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12426 > 2) by scale factor 0.941506\nI1207 09:07:37.419806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39561 > 2) by scale factor 0.588995\nI1207 09:07:38.362720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55349 > 2) by scale factor 0.783241\nI1207 09:07:39.305579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41924 > 2) by scale factor 0.584925\nI1207 09:07:40.248536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07935 > 2) by scale factor 0.490274\nI1207 09:07:41.191771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07124 > 2) by scale factor 0.651204\nI1207 09:07:42.134783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91315 > 2) by scale factor 0.686543\nI1207 09:07:44.018476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5459 > 2) by scale factor 0.564032\nI1207 09:07:44.961067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7055 > 2) by scale factor 0.739236\nI1207 09:07:45.904192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67189 > 2) by scale factor 0.544679\nI1207 09:07:46.847262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7167 > 2) by scale factor 0.538112\nI1207 09:07:47.790217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15954 > 2) by scale factor 0.926122\nI1207 09:07:48.733289   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3084 > 2) by scale factor 0.604522\nI1207 09:07:49.676486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87049 > 2) by scale factor 0.696744\nI1207 09:07:50.619817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73167 > 2) by scale factor 0.732153\nI1207 09:07:51.562466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58947 > 2) by scale factor 0.557185\nI1207 09:07:52.505339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26646 > 2) by scale factor 0.612284\nI1207 09:07:53.447895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23382 > 2) by scale factor 0.895328\nI1207 09:07:54.391245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19317 > 2) by scale factor 0.911922\nI1207 09:07:55.334380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05485 > 2) by scale factor 0.654697\nI1207 09:07:56.277446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62572 > 2) by scale factor 0.761696\nI1207 09:07:58.160738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41793 > 2) by scale factor 0.585149\nI1207 09:07:59.103638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.46615 > 2) by scale factor 0.365888\nI1207 09:08:00.046757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93396 > 2) by scale factor 0.681673\nI1207 09:08:00.989866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13569 > 2) by scale factor 0.637819\nI1207 09:08:01.932533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63414 > 2) by scale factor 0.550337\nI1207 09:08:02.875139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57668 > 2) by scale factor 0.559178\nI1207 09:08:02.887162   369 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1207 09:08:55.868036   369 solver.cpp:404]     Test net output #0: accuracy = 0.176\nI1207 09:08:55.868401   369 solver.cpp:404]     Test net output #1: loss = 9.01658 (* 1 = 9.01658 loss)\nI1207 09:08:56.741755   369 solver.cpp:228] Iteration 8600, loss = 9.95329\nI1207 09:08:56.741812   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 09:08:56.741832   369 solver.cpp:244]     Train net output #1: loss = 9.95329 (* 1 = 9.95329 loss)\nI1207 09:08:56.819245   369 sgd_solver.cpp:166] Iteration 8600, lr = 1.29\nI1207 09:08:56.829424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42428 > 2) by scale factor 0.584065\nI1207 09:08:57.769001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25957 > 2) by scale factor 0.885125\nI1207 09:08:58.708405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09371 > 2) by scale factor 0.955244\nI1207 09:08:59.648458   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62062 > 2) by scale factor 0.763177\nI1207 09:09:00.588062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45741 > 2) by scale factor 0.813867\nI1207 09:09:01.527153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20151 > 2) by scale factor 0.624705\nI1207 09:09:02.466634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68787 > 2) by scale factor 0.426633\nI1207 09:09:03.406778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04516 > 2) by scale factor 0.494417\nI1207 09:09:04.346894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77685 > 2) by scale factor 0.72024\nI1207 09:09:05.286970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99262 > 2) by scale factor 0.668311\nI1207 09:09:06.227386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46202 > 2) by scale factor 0.577697\nI1207 09:09:07.168023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75841 > 2) by scale factor 0.725055\nI1207 09:09:08.108351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20783 > 2) by scale factor 0.475304\nI1207 09:09:09.048118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24335 > 2) by scale factor 0.471325\nI1207 09:09:09.988309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19648 > 2) by scale factor 0.47659\nI1207 09:09:10.928267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16304 > 2) by scale factor 0.632302\nI1207 09:09:11.868360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44871 > 2) by scale factor 0.816757\nI1207 09:09:12.808717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10829 > 2) by scale factor 0.643441\nI1207 09:09:13.748584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93329 > 2) by scale factor 0.681827\nI1207 09:09:14.688951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01569 > 2) by scale factor 0.498046\nI1207 09:09:15.628834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02403 > 2) by scale factor 0.661369\nI1207 09:09:16.569034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58815 > 2) by scale factor 0.772753\nI1207 09:09:17.509008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60391 > 2) by scale factor 0.554952\nI1207 09:09:18.449322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43407 > 2) by scale factor 0.451053\nI1207 09:09:19.389108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23919 > 2) by scale factor 0.617438\nI1207 09:09:20.329301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13046 > 2) by scale factor 0.938765\nI1207 09:09:21.269456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54862 > 2) by scale factor 0.5636\nI1207 09:09:22.209525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77071 > 2) by scale factor 0.530403\nI1207 09:09:23.149477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11793 > 2) by scale factor 0.641452\nI1207 09:09:24.089776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01005 > 2) by scale factor 0.498746\nI1207 09:09:25.029425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93427 > 2) by scale factor 0.681601\nI1207 09:09:25.969633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65938 > 2) by scale factor 0.54654\nI1207 09:09:26.909528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31185 > 2) by scale factor 0.865106\nI1207 09:09:27.849486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15265 > 2) by scale factor 0.48162\nI1207 09:09:28.789791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26604 > 2) by scale factor 0.612363\nI1207 09:09:29.730446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07018 > 2) by scale factor 0.491378\nI1207 09:09:30.673338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51071 > 2) by scale factor 0.796586\nI1207 09:09:31.616420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70105 > 2) by scale factor 0.425436\nI1207 09:09:32.559016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47351 > 2) by scale factor 0.575787\nI1207 09:09:33.502070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85261 > 2) by scale factor 0.519129\nI1207 09:09:34.445194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25897 > 2) by scale factor 0.61369\nI1207 09:09:36.328471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39685 > 2) by scale factor 0.58878\nI1207 09:09:37.271220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66236 > 2) by scale factor 0.751212\nI1207 09:09:38.214190   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18859 > 2) by scale factor 0.627237\nI1207 09:09:39.156767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75481 > 2) by scale factor 0.726004\nI1207 09:09:40.099102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47451 > 2) by scale factor 0.808241\nI1207 09:09:41.041607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34278 > 2) by scale factor 0.598304\nI1207 09:09:41.984496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94625 > 2) by scale factor 0.506811\nI1207 09:09:42.927182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77115 > 2) by scale factor 0.721722\nI1207 09:09:43.869746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52255 > 2) by scale factor 0.567771\nI1207 09:09:44.812603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01135 > 2) by scale factor 0.498585\nI1207 09:09:45.755456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9198 > 2) by scale factor 0.684978\nI1207 09:09:46.698138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41141 > 2) by scale factor 0.586267\nI1207 09:09:47.641053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12942 > 2) by scale factor 0.484329\nI1207 09:09:48.583808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90545 > 2) by scale factor 0.688361\nI1207 09:09:49.526422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52173 > 2) by scale factor 0.567903\nI1207 09:09:50.468827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05166 > 2) by scale factor 0.655381\nI1207 09:09:51.411790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55444 > 2) by scale factor 0.562676\nI1207 09:09:52.354475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26365 > 2) by scale factor 0.612812\nI1207 09:09:53.297101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00932 > 2) by scale factor 0.995363\nI1207 09:09:54.239650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93182 > 2) by scale factor 0.68217\nI1207 09:09:55.182142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13177 > 2) by scale factor 0.938186\nI1207 09:09:56.125006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6904 > 2) by scale factor 0.743383\nI1207 09:09:57.068148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06891 > 2) by scale factor 0.966692\nI1207 09:09:58.010773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54805 > 2) by scale factor 0.784913\nI1207 09:09:58.953124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71858 > 2) by scale factor 0.735677\nI1207 09:09:59.895854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42026 > 2) by scale factor 0.826358\nI1207 09:10:00.838327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3194 > 2) by scale factor 0.862291\nI1207 09:10:01.780956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1476 > 2) by scale factor 0.931271\nI1207 09:10:02.723075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45659 > 2) by scale factor 0.578605\nI1207 09:10:03.665554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49905 > 2) by scale factor 0.800303\nI1207 09:10:04.608157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69844 > 2) by scale factor 0.741168\nI1207 09:10:05.551246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64843 > 2) by scale factor 0.755163\nI1207 09:10:06.493860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62525 > 2) by scale factor 0.761832\nI1207 09:10:07.436720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53894 > 2) by scale factor 0.56514\nI1207 09:10:08.380053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18235 > 2) by scale factor 0.916442\nI1207 09:10:09.323140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31112 > 2) by scale factor 0.604025\nI1207 09:10:10.266304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32387 > 2) by scale factor 0.860635\nI1207 09:10:11.209007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18434 > 2) by scale factor 0.628074\nI1207 09:10:12.151828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53986 > 2) by scale factor 0.564994\nI1207 09:10:13.094899   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37894 > 2) by scale factor 0.591901\nI1207 09:10:14.037752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50797 > 2) by scale factor 0.57013\nI1207 09:10:15.921149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1014 > 2) by scale factor 0.64487\nI1207 09:10:16.864320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89302 > 2) by scale factor 0.691319\nI1207 09:10:17.807200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27945 > 2) by scale factor 0.877404\nI1207 09:10:18.750221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75246 > 2) by scale factor 0.726623\nI1207 09:10:19.692595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6586 > 2) by scale factor 0.752275\nI1207 09:10:20.635391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1546 > 2) by scale factor 0.481394\nI1207 09:10:21.578186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21066 > 2) by scale factor 0.622925\nI1207 09:10:22.521293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79621 > 2) by scale factor 0.715255\nI1207 09:10:23.463733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39287 > 2) by scale factor 0.589471\nI1207 09:10:24.406020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18431 > 2) by scale factor 0.477976\nI1207 09:10:25.348923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29777 > 2) by scale factor 0.870408\nI1207 09:10:26.291862   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93025 > 2) by scale factor 0.682535\nI1207 09:10:27.235173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49174 > 2) by scale factor 0.802652\nI1207 09:10:28.178148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1586 > 2) by scale factor 0.633191\nI1207 09:10:29.120813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57111 > 2) by scale factor 0.777874\nI1207 09:10:30.063372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90858 > 2) by scale factor 0.511695\nI1207 09:10:30.075323   369 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1207 09:11:23.053784   369 solver.cpp:404]     Test net output #0: accuracy = 0.15135\nI1207 09:11:23.054322   369 solver.cpp:404]     Test net output #1: loss = 10.5608 (* 1 = 10.5608 loss)\nI1207 09:11:23.927443   369 solver.cpp:228] Iteration 8700, loss = 10.1037\nI1207 09:11:23.927490   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 09:11:23.927510   369 solver.cpp:244]     Train net output #1: loss = 10.1037 (* 1 = 10.1037 loss)\nI1207 09:11:23.996412   369 sgd_solver.cpp:166] Iteration 8700, lr = 1.305\nI1207 09:11:24.006579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36441 > 2) by scale factor 0.458252\nI1207 09:11:25.884485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89016 > 2) by scale factor 0.692004\nI1207 09:11:26.824268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77012 > 2) by scale factor 0.72199\nI1207 09:11:27.764317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41296 > 2) by scale factor 0.828859\nI1207 09:11:28.704344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82631 > 2) by scale factor 0.522696\nI1207 09:11:29.644145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34826 > 2) by scale factor 0.851693\nI1207 09:11:30.584022   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80079 > 2) by scale factor 0.714085\nI1207 09:11:32.461756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58053 > 2) by scale factor 0.558576\nI1207 09:11:34.339712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76898 > 2) by scale factor 0.722287\nI1207 09:11:35.280091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35947 > 2) by scale factor 0.847649\nI1207 09:11:36.220191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19944 > 2) by scale factor 0.909323\nI1207 09:11:38.097995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28324 > 2) by scale factor 0.609155\nI1207 09:11:39.038117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73728 > 2) by scale factor 0.730653\nI1207 09:11:39.977815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.821 > 2) by scale factor 0.708969\nI1207 09:11:40.918216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44557 > 2) by scale factor 0.817805\nI1207 09:11:41.858191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3388 > 2) by scale factor 0.599017\nI1207 09:11:42.798105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47538 > 2) by scale factor 0.807956\nI1207 09:11:44.676079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13557 > 2) by scale factor 0.936517\nI1207 09:11:45.616416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80687 > 2) by scale factor 0.525365\nI1207 09:11:46.556601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44979 > 2) by scale factor 0.816395\nI1207 09:11:47.496579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33956 > 2) by scale factor 0.854863\nI1207 09:11:48.436681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75799 > 2) by scale factor 0.725165\nI1207 09:11:49.376674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96691 > 2) by scale factor 0.504171\nI1207 09:11:50.316753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80251 > 2) by scale factor 0.713646\nI1207 09:11:51.256815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70887 > 2) by scale factor 0.539248\nI1207 09:11:52.196929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69094 > 2) by scale factor 0.743235\nI1207 09:11:53.136488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04365 > 2) by scale factor 0.657106\nI1207 09:11:54.076758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16803 > 2) by scale factor 0.631307\nI1207 09:11:55.016930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05517 > 2) by scale factor 0.973156\nI1207 09:11:57.839412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10177 > 2) by scale factor 0.644792\nI1207 09:11:58.781903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46879 > 2) by scale factor 0.447548\nI1207 09:11:59.724272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5754 > 2) by scale factor 0.77658\nI1207 09:12:00.666779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3593 > 2) by scale factor 0.45879\nI1207 09:12:01.609122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43589 > 2) by scale factor 0.821055\nI1207 09:12:02.550693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28147 > 2) by scale factor 0.609483\nI1207 09:12:03.492499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82454 > 2) by scale factor 0.708079\nI1207 09:12:04.434092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84232 > 2) by scale factor 0.520519\nI1207 09:12:05.376242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82539 > 2) by scale factor 0.707866\nI1207 09:12:06.318348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74715 > 2) by scale factor 0.533739\nI1207 09:12:07.260716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11002 > 2) by scale factor 0.947857\nI1207 09:12:08.203052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18845 > 2) by scale factor 0.627264\nI1207 09:12:09.145649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83165 > 2) by scale factor 0.706302\nI1207 09:12:10.087839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8458 > 2) by scale factor 0.70279\nI1207 09:12:11.029994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70621 > 2) by scale factor 0.539634\nI1207 09:12:11.972247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44132 > 2) by scale factor 0.81923\nI1207 09:12:12.914901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91492 > 2) by scale factor 0.686125\nI1207 09:12:13.857317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13186 > 2) by scale factor 0.638598\nI1207 09:12:14.799806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66639 > 2) by scale factor 0.545495\nI1207 09:12:15.741891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99746 > 2) by scale factor 0.667231\nI1207 09:12:16.683399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00297 > 2) by scale factor 0.998515\nI1207 09:12:17.625517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05766 > 2) by scale factor 0.971979\nI1207 09:12:18.568099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84694 > 2) by scale factor 0.70251\nI1207 09:12:19.510212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26423 > 2) by scale factor 0.883301\nI1207 09:12:20.452616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76739 > 2) by scale factor 0.722702\nI1207 09:12:21.395041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04527 > 2) by scale factor 0.656757\nI1207 09:12:22.337306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32694 > 2) by scale factor 0.601153\nI1207 09:12:23.279481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.103 > 2) by scale factor 0.644538\nI1207 09:12:24.222002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37463 > 2) by scale factor 0.842236\nI1207 09:12:25.164753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37634 > 2) by scale factor 0.592357\nI1207 09:12:26.106969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27466 > 2) by scale factor 0.61075\nI1207 09:12:27.048925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96642 > 2) by scale factor 0.504233\nI1207 09:12:27.991065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85097 > 2) by scale factor 0.701517\nI1207 09:12:28.932739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96399 > 2) by scale factor 0.504542\nI1207 09:12:29.874687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19651 > 2) by scale factor 0.476587\nI1207 09:12:30.816141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29055 > 2) by scale factor 0.6078\nI1207 09:12:31.758548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12526 > 2) by scale factor 0.639946\nI1207 09:12:32.700834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31292 > 2) by scale factor 0.603698\nI1207 09:12:33.643122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34943 > 2) by scale factor 0.85127\nI1207 09:12:34.585785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27916 > 2) by scale factor 0.609912\nI1207 09:12:35.528244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53545 > 2) by scale factor 0.788815\nI1207 09:12:36.470362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16793 > 2) by scale factor 0.479854\nI1207 09:12:37.412751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57521 > 2) by scale factor 0.776636\nI1207 09:12:38.355412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27664 > 2) by scale factor 0.878488\nI1207 09:12:39.297631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76479 > 2) by scale factor 0.723381\nI1207 09:12:40.239837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67365 > 2) by scale factor 0.748042\nI1207 09:12:41.182349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60876 > 2) by scale factor 0.766647\nI1207 09:12:42.124498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08971 > 2) by scale factor 0.957069\nI1207 09:12:43.067081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67841 > 2) by scale factor 0.543713\nI1207 09:12:44.009299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19925 > 2) by scale factor 0.909403\nI1207 09:12:44.951803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12096 > 2) by scale factor 0.942971\nI1207 09:12:46.834659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00834 > 2) by scale factor 0.49896\nI1207 09:12:47.776912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28639 > 2) by scale factor 0.608571\nI1207 09:12:48.718783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41416 > 2) by scale factor 0.585795\nI1207 09:12:49.661459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00284 > 2) by scale factor 0.499645\nI1207 09:12:50.603971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78694 > 2) by scale factor 0.52813\nI1207 09:12:51.546442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25466 > 2) by scale factor 0.887053\nI1207 09:12:52.488663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80804 > 2) by scale factor 0.71224\nI1207 09:12:53.430713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09178 > 2) by scale factor 0.488785\nI1207 09:12:54.373502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00791 > 2) by scale factor 0.664913\nI1207 09:12:55.315876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20382 > 2) by scale factor 0.624256\nI1207 09:12:56.257242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1947 > 2) by scale factor 0.626036\nI1207 09:12:57.199226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09562 > 2) by scale factor 0.954374\nI1207 09:12:57.211108   369 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1207 09:13:50.179893   369 solver.cpp:404]     Test net output #0: accuracy = 0.21735\nI1207 09:13:50.180305   369 solver.cpp:404]     Test net output #1: loss = 5.62303 (* 1 = 5.62303 loss)\nI1207 09:13:51.053365   369 solver.cpp:228] Iteration 8800, loss = 5.87214\nI1207 09:13:51.053413   369 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1207 09:13:51.053431   369 solver.cpp:244]     Train net output #1: loss = 5.87214 (* 1 = 5.87214 loss)\nI1207 09:13:51.129890   369 sgd_solver.cpp:166] Iteration 8800, lr = 1.32\nI1207 09:13:51.139945   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65568 > 2) by scale factor 0.753102\nI1207 09:13:52.079962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25128 > 2) by scale factor 0.888382\nI1207 09:13:53.019587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76736 > 2) by scale factor 0.72271\nI1207 09:13:53.959189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2686 > 2) by scale factor 0.611882\nI1207 09:13:54.898273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56368 > 2) by scale factor 0.780127\nI1207 09:13:55.837625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22825 > 2) by scale factor 0.897566\nI1207 09:13:57.714049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63805 > 2) by scale factor 0.758136\nI1207 09:13:58.653408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02524 > 2) by scale factor 0.987537\nI1207 09:13:59.593161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39642 > 2) by scale factor 0.588856\nI1207 09:14:00.532388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42445 > 2) by scale factor 0.82493\nI1207 09:14:01.471662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43236 > 2) by scale factor 0.822247\nI1207 09:14:03.348572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62369 > 2) by scale factor 0.551924\nI1207 09:14:04.288750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43703 > 2) by scale factor 0.820671\nI1207 09:14:06.167040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16285 > 2) by scale factor 0.924708\nI1207 09:14:07.107098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22137 > 2) by scale factor 0.900345\nI1207 09:14:08.047338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80904 > 2) by scale factor 0.711988\nI1207 09:14:08.987393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67179 > 2) by scale factor 0.748563\nI1207 09:14:10.865025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51141 > 2) by scale factor 0.796366\nI1207 09:14:11.805150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32223 > 2) by scale factor 0.602006\nI1207 09:14:12.745337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60998 > 2) by scale factor 0.55402\nI1207 09:14:13.685356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40197 > 2) by scale factor 0.832651\nI1207 09:14:14.625090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08411 > 2) by scale factor 0.959641\nI1207 09:14:15.564903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06437 > 2) by scale factor 0.492081\nI1207 09:14:16.504842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25886 > 2) by scale factor 0.613711\nI1207 09:14:17.444589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6551 > 2) by scale factor 0.753268\nI1207 09:14:19.321665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70894 > 2) by scale factor 0.738296\nI1207 09:14:20.262033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54975 > 2) by scale factor 0.56342\nI1207 09:14:21.202111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18786 > 2) by scale factor 0.914137\nI1207 09:14:22.142125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58043 > 2) by scale factor 0.775065\nI1207 09:14:23.081625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40965 > 2) by scale factor 0.453551\nI1207 09:14:24.021217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76566 > 2) by scale factor 0.723155\nI1207 09:14:24.964339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13417 > 2) by scale factor 0.937135\nI1207 09:14:25.908177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49012 > 2) by scale factor 0.445422\nI1207 09:14:26.851389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87164 > 2) by scale factor 0.696467\nI1207 09:14:27.794801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82991 > 2) by scale factor 0.522205\nI1207 09:14:28.737814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22707 > 2) by scale factor 0.619757\nI1207 09:14:29.681529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27767 > 2) by scale factor 0.61019\nI1207 09:14:30.624790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1394 > 2) by scale factor 0.934841\nI1207 09:14:31.568359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03682 > 2) by scale factor 0.658583\nI1207 09:14:32.511404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37819 > 2) by scale factor 0.45681\nI1207 09:14:33.454939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86891 > 2) by scale factor 0.697128\nI1207 09:14:34.398509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36355 > 2) by scale factor 0.846185\nI1207 09:14:35.341924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06443 > 2) by scale factor 0.65265\nI1207 09:14:36.285660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30579 > 2) by scale factor 0.867382\nI1207 09:14:37.229013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28844 > 2) by scale factor 0.60819\nI1207 09:14:38.172683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07928 > 2) by scale factor 0.649503\nI1207 09:14:39.115998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12137 > 2) by scale factor 0.942786\nI1207 09:14:40.059386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6633 > 2) by scale factor 0.750948\nI1207 09:14:41.003450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39198 > 2) by scale factor 0.836128\nI1207 09:14:41.946725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88321 > 2) by scale factor 0.515038\nI1207 09:14:42.889783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25922 > 2) by scale factor 0.885261\nI1207 09:14:43.832742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85187 > 2) by scale factor 0.701294\nI1207 09:14:44.775746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98373 > 2) by scale factor 0.502042\nI1207 09:14:47.601800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13941 > 2) by scale factor 0.48316\nI1207 09:14:48.544294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73553 > 2) by scale factor 0.535399\nI1207 09:14:49.487143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75385 > 2) by scale factor 0.726256\nI1207 09:14:50.430578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28738 > 2) by scale factor 0.874361\nI1207 09:14:51.374739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79476 > 2) by scale factor 0.715624\nI1207 09:14:52.317981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29677 > 2) by scale factor 0.870787\nI1207 09:14:53.261667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70367 > 2) by scale factor 0.739734\nI1207 09:14:55.146245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89087 > 2) by scale factor 0.691834\nI1207 09:14:57.030490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46529 > 2) by scale factor 0.811265\nI1207 09:14:57.973624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03254 > 2) by scale factor 0.659513\nI1207 09:14:58.916618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34587 > 2) by scale factor 0.852562\nI1207 09:14:59.859920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06808 > 2) by scale factor 0.967079\nI1207 09:15:00.803026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02757 > 2) by scale factor 0.986401\nI1207 09:15:01.746152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01104 > 2) by scale factor 0.664222\nI1207 09:15:02.688894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66263 > 2) by scale factor 0.751136\nI1207 09:15:04.574023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4372 > 2) by scale factor 0.820613\nI1207 09:15:05.517146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99285 > 2) by scale factor 0.66826\nI1207 09:15:06.460569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31871 > 2) by scale factor 0.862547\nI1207 09:15:07.404176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83489 > 2) by scale factor 0.705496\nI1207 09:15:08.347893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41239 > 2) by scale factor 0.453269\nI1207 09:15:09.291414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73014 > 2) by scale factor 0.732563\nI1207 09:15:10.235137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76228 > 2) by scale factor 0.531593\nI1207 09:15:11.178853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50813 > 2) by scale factor 0.570104\nI1207 09:15:12.122282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1971 > 2) by scale factor 0.910289\nI1207 09:15:13.066071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02824 > 2) by scale factor 0.660449\nI1207 09:15:14.009677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67485 > 2) by scale factor 0.747706\nI1207 09:15:14.953233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1307 > 2) by scale factor 0.638836\nI1207 09:15:15.896282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07187 > 2) by scale factor 0.491175\nI1207 09:15:16.839395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16932 > 2) by scale factor 0.631051\nI1207 09:15:17.782855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86239 > 2) by scale factor 0.698716\nI1207 09:15:18.726260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12101 > 2) by scale factor 0.485317\nI1207 09:15:19.669661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37824 > 2) by scale factor 0.592023\nI1207 09:15:20.613018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8379 > 2) by scale factor 0.704746\nI1207 09:15:22.498064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86703 > 2) by scale factor 0.697586\nI1207 09:15:23.441453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46745 > 2) by scale factor 0.447682\nI1207 09:15:24.385109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77266 > 2) by scale factor 0.721328\nI1207 09:15:24.397032   369 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1207 09:16:17.366302   369 solver.cpp:404]     Test net output #0: accuracy = 0.2048\nI1207 09:16:17.366653   369 solver.cpp:404]     Test net output #1: loss = 4.96677 (* 1 = 4.96677 loss)\nI1207 09:16:18.239408   369 solver.cpp:228] Iteration 8900, loss = 4.42507\nI1207 09:16:18.239452   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 09:16:18.239470   369 solver.cpp:244]     Train net output #1: loss = 4.42507 (* 1 = 4.42507 loss)\nI1207 09:16:18.311282   369 sgd_solver.cpp:166] Iteration 8900, lr = 1.335\nI1207 09:16:18.321110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0747 > 2) by scale factor 0.963994\nI1207 09:16:19.261106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24609 > 2) by scale factor 0.616126\nI1207 09:16:20.201092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37112 > 2) by scale factor 0.843483\nI1207 09:16:21.141165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6159 > 2) by scale factor 0.553113\nI1207 09:16:22.081341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14623 > 2) by scale factor 0.482366\nI1207 09:16:23.021680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07515 > 2) by scale factor 0.650376\nI1207 09:16:23.961900   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85325 > 2) by scale factor 0.700954\nI1207 09:16:24.902132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40316 > 2) by scale factor 0.58769\nI1207 09:16:25.842419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54385 > 2) by scale factor 0.564357\nI1207 09:16:26.782759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94224 > 2) by scale factor 0.507326\nI1207 09:16:27.722455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36795 > 2) by scale factor 0.844613\nI1207 09:16:28.662716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52855 > 2) by scale factor 0.566804\nI1207 09:16:29.602797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5172 > 2) by scale factor 0.568634\nI1207 09:16:30.542882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65553 > 2) by scale factor 0.547116\nI1207 09:16:31.483162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87137 > 2) by scale factor 0.516613\nI1207 09:16:32.423346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6504 > 2) by scale factor 0.547885\nI1207 09:16:33.363440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58946 > 2) by scale factor 0.772362\nI1207 09:16:34.303861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13362 > 2) by scale factor 0.937373\nI1207 09:16:35.243993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3403 > 2) by scale factor 0.460798\nI1207 09:16:36.184031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83361 > 2) by scale factor 0.521702\nI1207 09:16:37.123814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09871 > 2) by scale factor 0.64543\nI1207 09:16:38.064121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50204 > 2) by scale factor 0.571095\nI1207 09:16:39.004788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24004 > 2) by scale factor 0.617276\nI1207 09:16:39.944960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47046 > 2) by scale factor 0.576292\nI1207 09:16:40.885036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98236 > 2) by scale factor 0.670609\nI1207 09:16:41.825199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64911 > 2) by scale factor 0.754971\nI1207 09:16:42.765367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5729 > 2) by scale factor 0.777333\nI1207 09:16:43.705977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39581 > 2) by scale factor 0.588961\nI1207 09:16:44.646061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95205 > 2) by scale factor 0.677496\nI1207 09:16:45.585743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56742 > 2) by scale factor 0.778991\nI1207 09:16:46.525864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98136 > 2) by scale factor 0.670836\nI1207 09:16:47.465950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10522 > 2) by scale factor 0.644078\nI1207 09:16:48.406292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17928 > 2) by scale factor 0.917732\nI1207 09:16:49.346216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39369 > 2) by scale factor 0.589329\nI1207 09:16:50.290122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77767 > 2) by scale factor 0.720027\nI1207 09:16:51.234375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47134 > 2) by scale factor 0.576146\nI1207 09:16:52.178488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74791 > 2) by scale factor 0.533631\nI1207 09:16:53.122208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63986 > 2) by scale factor 0.757615\nI1207 09:16:54.065958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99861 > 2) by scale factor 0.500174\nI1207 09:16:55.009678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64413 > 2) by scale factor 0.756393\nI1207 09:16:55.953426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50909 > 2) by scale factor 0.569949\nI1207 09:16:56.897348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52339 > 2) by scale factor 0.567636\nI1207 09:16:57.841303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95854 > 2) by scale factor 0.505236\nI1207 09:16:58.785843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57298 > 2) by scale factor 0.777308\nI1207 09:16:59.728974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77903 > 2) by scale factor 0.529236\nI1207 09:17:00.672103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49553 > 2) by scale factor 0.801432\nI1207 09:17:01.615279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28223 > 2) by scale factor 0.876335\nI1207 09:17:02.557881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02014 > 2) by scale factor 0.497495\nI1207 09:17:03.500439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21975 > 2) by scale factor 0.621167\nI1207 09:17:04.443114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29319 > 2) by scale factor 0.607314\nI1207 09:17:05.386029   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64179 > 2) by scale factor 0.757062\nI1207 09:17:06.328830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08775 > 2) by scale factor 0.647722\nI1207 09:17:07.270681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3176 > 2) by scale factor 0.602845\nI1207 09:17:08.213243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81136 > 2) by scale factor 0.711399\nI1207 09:17:09.155439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2661 > 2) by scale factor 0.882574\nI1207 09:17:10.098630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41983 > 2) by scale factor 0.826503\nI1207 09:17:11.042035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81291 > 2) by scale factor 0.711007\nI1207 09:17:11.985460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13571 > 2) by scale factor 0.637814\nI1207 09:17:12.928522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35226 > 2) by scale factor 0.850245\nI1207 09:17:13.871881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9995 > 2) by scale factor 0.666777\nI1207 09:17:14.815176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30815 > 2) by scale factor 0.866496\nI1207 09:17:15.758054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01647 > 2) by scale factor 0.497949\nI1207 09:17:16.701572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79955 > 2) by scale factor 0.714401\nI1207 09:17:17.644985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27246 > 2) by scale factor 0.880104\nI1207 09:17:18.588656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50349 > 2) by scale factor 0.4441\nI1207 09:17:19.531657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97233 > 2) by scale factor 0.672873\nI1207 09:17:20.474710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38657 > 2) by scale factor 0.838022\nI1207 09:17:21.417659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48824 > 2) by scale factor 0.80378\nI1207 09:17:22.360352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07578 > 2) by scale factor 0.650242\nI1207 09:17:23.303282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47907 > 2) by scale factor 0.806753\nI1207 09:17:24.246584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06525 > 2) by scale factor 0.652476\nI1207 09:17:25.189730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84084 > 2) by scale factor 0.520719\nI1207 09:17:26.133067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37841 > 2) by scale factor 0.591994\nI1207 09:17:27.076187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0567 > 2) by scale factor 0.972433\nI1207 09:17:28.018970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93385 > 2) by scale factor 0.681698\nI1207 09:17:28.962433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40173 > 2) by scale factor 0.832731\nI1207 09:17:29.905535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90353 > 2) by scale factor 0.512357\nI1207 09:17:30.848335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63334 > 2) by scale factor 0.759491\nI1207 09:17:31.791667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42524 > 2) by scale factor 0.824659\nI1207 09:17:32.734627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21471 > 2) by scale factor 0.903052\nI1207 09:17:33.677575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70786 > 2) by scale factor 0.73859\nI1207 09:17:34.620412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68549 > 2) by scale factor 0.744744\nI1207 09:17:36.504050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76602 > 2) by scale factor 0.723062\nI1207 09:17:37.447397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05117 > 2) by scale factor 0.975053\nI1207 09:17:38.390584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97764 > 2) by scale factor 0.671674\nI1207 09:17:39.333744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78057 > 2) by scale factor 0.719277\nI1207 09:17:40.276831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12356 > 2) by scale factor 0.941814\nI1207 09:17:41.219416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76605 > 2) by scale factor 0.723053\nI1207 09:17:42.162830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59248 > 2) by scale factor 0.771462\nI1207 09:17:43.105895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71652 > 2) by scale factor 0.736237\nI1207 09:17:44.049082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63475 > 2) by scale factor 0.759084\nI1207 09:17:44.991998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90253 > 2) by scale factor 0.512489\nI1207 09:17:45.934877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22363 > 2) by scale factor 0.620418\nI1207 09:17:46.878134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18653 > 2) by scale factor 0.627642\nI1207 09:17:47.821002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51284 > 2) by scale factor 0.569339\nI1207 09:17:48.764394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14238 > 2) by scale factor 0.636459\nI1207 09:17:49.707494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73516 > 2) by scale factor 0.731217\nI1207 09:17:50.650578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06886 > 2) by scale factor 0.651708\nI1207 09:17:51.593505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75135 > 2) by scale factor 0.533141\nI1207 09:17:51.605443   369 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1207 09:18:44.554802   369 solver.cpp:404]     Test net output #0: accuracy = 0.2082\nI1207 09:18:44.555140   369 solver.cpp:404]     Test net output #1: loss = 9.4215 (* 1 = 9.4215 loss)\nI1207 09:18:45.428586   369 solver.cpp:228] Iteration 9000, loss = 9.38121\nI1207 09:18:45.428628   369 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 09:18:45.428647   369 solver.cpp:244]     Train net output #1: loss = 9.38121 (* 1 = 9.38121 loss)\nI1207 09:18:45.502804   369 sgd_solver.cpp:166] Iteration 9000, lr = 1.35\nI1207 09:18:45.512912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60381 > 2) by scale factor 0.768106\nI1207 09:18:46.452955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39896 > 2) by scale factor 0.833693\nI1207 09:18:47.393674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16605 > 2) by scale factor 0.631701\nI1207 09:18:48.333659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77075 > 2) by scale factor 0.721826\nI1207 09:18:49.274184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77658 > 2) by scale factor 0.52958\nI1207 09:18:50.214347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59971 > 2) by scale factor 0.769318\nI1207 09:18:51.154682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77716 > 2) by scale factor 0.529498\nI1207 09:18:52.094748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52608 > 2) by scale factor 0.791741\nI1207 09:18:53.034490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.382 > 2) by scale factor 0.591366\nI1207 09:18:53.974735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58129 > 2) by scale factor 0.774807\nI1207 09:18:54.914796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3044 > 2) by scale factor 0.605254\nI1207 09:18:55.854130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54352 > 2) by scale factor 0.786313\nI1207 09:18:56.793766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57983 > 2) by scale factor 0.775244\nI1207 09:18:57.733147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76066 > 2) by scale factor 0.531821\nI1207 09:18:58.673300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16363 > 2) by scale factor 0.632186\nI1207 09:18:59.613492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99876 > 2) by scale factor 0.666941\nI1207 09:19:00.553002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78946 > 2) by scale factor 0.52778\nI1207 09:19:01.492385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63267 > 2) by scale factor 0.759685\nI1207 09:19:02.431679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55964 > 2) by scale factor 0.561855\nI1207 09:19:03.371042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92516 > 2) by scale factor 0.509534\nI1207 09:19:04.310523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35198 > 2) by scale factor 0.596663\nI1207 09:19:05.250880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90378 > 2) by scale factor 0.512324\nI1207 09:19:06.190770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94935 > 2) by scale factor 0.678115\nI1207 09:19:07.130513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50648 > 2) by scale factor 0.797933\nI1207 09:19:08.069669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90888 > 2) by scale factor 0.687549\nI1207 09:19:09.008381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90991 > 2) by scale factor 0.687307\nI1207 09:19:09.947620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95542 > 2) by scale factor 0.676722\nI1207 09:19:10.887166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77615 > 2) by scale factor 0.529641\nI1207 09:19:11.826957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.066 > 2) by scale factor 0.652316\nI1207 09:19:12.766526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39897 > 2) by scale factor 0.83369\nI1207 09:19:13.705756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7897 > 2) by scale factor 0.716923\nI1207 09:19:14.645764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97325 > 2) by scale factor 0.672664\nI1207 09:19:15.588963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56724 > 2) by scale factor 0.779047\nI1207 09:19:16.531608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08393 > 2) by scale factor 0.648524\nI1207 09:19:17.474380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32932 > 2) by scale factor 0.461967\nI1207 09:19:18.417567   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12978 > 2) by scale factor 0.639022\nI1207 09:19:19.360353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61803 > 2) by scale factor 0.763933\nI1207 09:19:20.303035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54255 > 2) by scale factor 0.786612\nI1207 09:19:21.245652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15092 > 2) by scale factor 0.634736\nI1207 09:19:22.188640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2974 > 2) by scale factor 0.606538\nI1207 09:19:23.131511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47936 > 2) by scale factor 0.806659\nI1207 09:19:24.074089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12435 > 2) by scale factor 0.484925\nI1207 09:19:25.015950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79976 > 2) by scale factor 0.714347\nI1207 09:19:25.958317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98744 > 2) by scale factor 0.669469\nI1207 09:19:26.901105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60119 > 2) by scale factor 0.555372\nI1207 09:19:27.843513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10489 > 2) by scale factor 0.644146\nI1207 09:19:28.786414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17551 > 2) by scale factor 0.919323\nI1207 09:19:29.729111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32007 > 2) by scale factor 0.602396\nI1207 09:19:30.671862   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03018 > 2) by scale factor 0.985134\nI1207 09:19:31.614509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66262 > 2) by scale factor 0.751139\nI1207 09:19:32.556628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15014 > 2) by scale factor 0.93017\nI1207 09:19:33.498946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2361 > 2) by scale factor 0.472132\nI1207 09:19:34.441284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54929 > 2) by scale factor 0.563494\nI1207 09:19:35.383746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29169 > 2) by scale factor 0.466017\nI1207 09:19:36.325870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80313 > 2) by scale factor 0.713488\nI1207 09:19:37.268380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11059 > 2) by scale factor 0.486548\nI1207 09:19:38.209838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56733 > 2) by scale factor 0.77902\nI1207 09:19:39.152067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14897 > 2) by scale factor 0.482047\nI1207 09:19:40.094130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.568 > 2) by scale factor 0.560538\nI1207 09:19:41.036343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22285 > 2) by scale factor 0.620569\nI1207 09:19:41.979254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86883 > 2) by scale factor 0.516952\nI1207 09:19:42.921252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64722 > 2) by scale factor 0.75551\nI1207 09:19:43.863770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42334 > 2) by scale factor 0.584224\nI1207 09:19:45.745470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01425 > 2) by scale factor 0.663514\nI1207 09:19:46.688500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01076 > 2) by scale factor 0.399141\nI1207 09:19:47.631012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.571 > 2) by scale factor 0.777907\nI1207 09:19:48.573173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22084 > 2) by scale factor 0.620956\nI1207 09:19:49.514153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22009 > 2) by scale factor 0.621101\nI1207 09:19:50.455744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64139 > 2) by scale factor 0.549241\nI1207 09:19:51.397562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28055 > 2) by scale factor 0.609654\nI1207 09:19:52.339402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70723 > 2) by scale factor 0.738764\nI1207 09:19:53.281996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65854 > 2) by scale factor 0.546666\nI1207 09:19:54.224509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73027 > 2) by scale factor 0.73253\nI1207 09:19:55.166494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60618 > 2) by scale factor 0.767405\nI1207 09:19:56.108175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37096 > 2) by scale factor 0.593304\nI1207 09:19:57.049716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89171 > 2) by scale factor 0.513912\nI1207 09:19:57.992019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23734 > 2) by scale factor 0.89392\nI1207 09:19:59.874698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13108 > 2) by scale factor 0.938492\nI1207 09:20:00.817312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72999 > 2) by scale factor 0.732603\nI1207 09:20:01.759305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25438 > 2) by scale factor 0.470104\nI1207 09:20:02.700975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48832 > 2) by scale factor 0.573341\nI1207 09:20:03.642777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02817 > 2) by scale factor 0.660465\nI1207 09:20:04.585410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45126 > 2) by scale factor 0.815906\nI1207 09:20:05.527705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56513 > 2) by scale factor 0.779688\nI1207 09:20:06.470036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20489 > 2) by scale factor 0.907075\nI1207 09:20:07.412475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14977 > 2) by scale factor 0.930334\nI1207 09:20:08.354837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80879 > 2) by scale factor 0.712049\nI1207 09:20:09.297966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81935 > 2) by scale factor 0.709384\nI1207 09:20:10.240856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6523 > 2) by scale factor 0.754062\nI1207 09:20:11.183465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12789 > 2) by scale factor 0.639408\nI1207 09:20:12.126178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66153 > 2) by scale factor 0.54622\nI1207 09:20:13.067955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8047 > 2) by scale factor 0.525665\nI1207 09:20:14.009690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2094 > 2) by scale factor 0.62317\nI1207 09:20:14.952087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64239 > 2) by scale factor 0.54909\nI1207 09:20:15.894402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11939 > 2) by scale factor 0.943666\nI1207 09:20:16.837482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15873 > 2) by scale factor 0.633165\nI1207 09:20:17.780016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97079 > 2) by scale factor 0.673222\nI1207 09:20:18.722532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00018 > 2) by scale factor 0.666627\nI1207 09:20:18.734592   369 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1207 09:21:11.682137   369 solver.cpp:404]     Test net output #0: accuracy = 0.18125\nI1207 09:21:11.682500   369 solver.cpp:404]     Test net output #1: loss = 8.07821 (* 1 = 8.07821 loss)\nI1207 09:21:12.555232   369 solver.cpp:228] Iteration 9100, loss = 9.55662\nI1207 09:21:12.555274   369 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1207 09:21:12.555291   369 solver.cpp:244]     Train net output #1: loss = 9.55662 (* 1 = 9.55662 loss)\nI1207 09:21:12.627830   369 sgd_solver.cpp:166] Iteration 9100, lr = 1.365\nI1207 09:21:12.637976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13535 > 2) by scale factor 0.936614\nI1207 09:21:13.577713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62584 > 2) by scale factor 0.761662\nI1207 09:21:14.517246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51154 > 2) by scale factor 0.796324\nI1207 09:21:15.457286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26858 > 2) by scale factor 0.611887\nI1207 09:21:16.397478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3069 > 2) by scale factor 0.604796\nI1207 09:21:17.337558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08961 > 2) by scale factor 0.489044\nI1207 09:21:18.277607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34129 > 2) by scale factor 0.598571\nI1207 09:21:19.217205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39882 > 2) by scale factor 0.833744\nI1207 09:21:20.156975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18733 > 2) by scale factor 0.627485\nI1207 09:21:21.096345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98748 > 2) by scale factor 0.66946\nI1207 09:21:22.035770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82493 > 2) by scale factor 0.707983\nI1207 09:21:22.975674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71696 > 2) by scale factor 0.736118\nI1207 09:21:24.853595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50339 > 2) by scale factor 0.570876\nI1207 09:21:25.793763   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35308 > 2) by scale factor 0.596466\nI1207 09:21:26.733614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18284 > 2) by scale factor 0.916238\nI1207 09:21:27.673519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22581 > 2) by scale factor 0.62\nI1207 09:21:28.613389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60859 > 2) by scale factor 0.766696\nI1207 09:21:29.552891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15803 > 2) by scale factor 0.633306\nI1207 09:21:30.492775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9979 > 2) by scale factor 0.500262\nI1207 09:21:31.432464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78729 > 2) by scale factor 0.717542\nI1207 09:21:32.372061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38097 > 2) by scale factor 0.839994\nI1207 09:21:33.312266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55908 > 2) by scale factor 0.78153\nI1207 09:21:35.189813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0061 > 2) by scale factor 0.499239\nI1207 09:21:36.130074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07852 > 2) by scale factor 0.649663\nI1207 09:21:37.069744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09309 > 2) by scale factor 0.955524\nI1207 09:21:38.008632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31378 > 2) by scale factor 0.864385\nI1207 09:21:38.948804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95239 > 2) by scale factor 0.677418\nI1207 09:21:39.888715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04996 > 2) by scale factor 0.97563\nI1207 09:21:40.828869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16926 > 2) by scale factor 0.921973\nI1207 09:21:42.709100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5172 > 2) by scale factor 0.568633\nI1207 09:21:43.652624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09066 > 2) by scale factor 0.64711\nI1207 09:21:44.595525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3649 > 2) by scale factor 0.594371\nI1207 09:21:45.538658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29233 > 2) by scale factor 0.872475\nI1207 09:21:47.422250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47947 > 2) by scale factor 0.806625\nI1207 09:21:48.364878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40394 > 2) by scale factor 0.587554\nI1207 09:21:50.249270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64069 > 2) by scale factor 0.549346\nI1207 09:21:51.192569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5162 > 2) by scale factor 0.794848\nI1207 09:21:52.135928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23755 > 2) by scale factor 0.893836\nI1207 09:21:53.078996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10521 > 2) by scale factor 0.644079\nI1207 09:21:54.022001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59252 > 2) by scale factor 0.771451\nI1207 09:21:54.965034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07881 > 2) by scale factor 0.962089\nI1207 09:21:55.907850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91183 > 2) by scale factor 0.686854\nI1207 09:21:56.850785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92121 > 2) by scale factor 0.684649\nI1207 09:21:57.793859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27574 > 2) by scale factor 0.878833\nI1207 09:21:58.737231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4631 > 2) by scale factor 0.811986\nI1207 09:21:59.680567   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46545 > 2) by scale factor 0.81121\nI1207 09:22:00.623667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6899 > 2) by scale factor 0.743523\nI1207 09:22:01.566398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85822 > 2) by scale factor 0.699737\nI1207 09:22:02.509481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18811 > 2) by scale factor 0.477543\nI1207 09:22:03.452462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26375 > 2) by scale factor 0.883491\nI1207 09:22:04.394672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95061 > 2) by scale factor 0.506251\nI1207 09:22:05.337872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31415 > 2) by scale factor 0.864248\nI1207 09:22:07.221459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22591 > 2) by scale factor 0.898509\nI1207 09:22:08.164700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27197 > 2) by scale factor 0.880292\nI1207 09:22:09.107919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16189 > 2) by scale factor 0.632533\nI1207 09:22:10.050762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88423 > 2) by scale factor 0.693426\nI1207 09:22:10.994060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1774 > 2) by scale factor 0.918525\nI1207 09:22:11.937144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43881 > 2) by scale factor 0.820072\nI1207 09:22:12.880581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66857 > 2) by scale factor 0.749464\nI1207 09:22:13.823832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44797 > 2) by scale factor 0.580051\nI1207 09:22:14.766788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26345 > 2) by scale factor 0.883607\nI1207 09:22:15.709722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65211 > 2) by scale factor 0.547628\nI1207 09:22:16.652467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27851 > 2) by scale factor 0.467452\nI1207 09:22:17.594774   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9563 > 2) by scale factor 0.505523\nI1207 09:22:18.537348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47132 > 2) by scale factor 0.57615\nI1207 09:22:19.480046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75483 > 2) by scale factor 0.725997\nI1207 09:22:20.422894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22366 > 2) by scale factor 0.620413\nI1207 09:22:21.365859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04027 > 2) by scale factor 0.657837\nI1207 09:22:22.308760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36766 > 2) by scale factor 0.593885\nI1207 09:22:23.251713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24981 > 2) by scale factor 0.888965\nI1207 09:22:24.194648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69055 > 2) by scale factor 0.743343\nI1207 09:22:25.137536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52176 > 2) by scale factor 0.793096\nI1207 09:22:26.080682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66485 > 2) by scale factor 0.750513\nI1207 09:22:27.023464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78764 > 2) by scale factor 0.717452\nI1207 09:22:27.966133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92479 > 2) by scale factor 0.683811\nI1207 09:22:28.908694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68402 > 2) by scale factor 0.745151\nI1207 09:22:29.851629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88326 > 2) by scale factor 0.515031\nI1207 09:22:31.734632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1872 > 2) by scale factor 0.91441\nI1207 09:22:32.677644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03279 > 2) by scale factor 0.983869\nI1207 09:22:33.620371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1807 > 2) by scale factor 0.628792\nI1207 09:22:35.503139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51819 > 2) by scale factor 0.794221\nI1207 09:22:36.445508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07172 > 2) by scale factor 0.965381\nI1207 09:22:37.387854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23738 > 2) by scale factor 0.617784\nI1207 09:22:38.330575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62441 > 2) by scale factor 0.762076\nI1207 09:22:39.273381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45479 > 2) by scale factor 0.578906\nI1207 09:22:40.216254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31493 > 2) by scale factor 0.603331\nI1207 09:22:41.158905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17465 > 2) by scale factor 0.62999\nI1207 09:22:42.101377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95542 > 2) by scale factor 0.676722\nI1207 09:22:43.043972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88636 > 2) by scale factor 0.692914\nI1207 09:22:43.987263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4863 > 2) by scale factor 0.445802\nI1207 09:22:44.930085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62865 > 2) by scale factor 0.760846\nI1207 09:22:45.872478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13361 > 2) by scale factor 0.937381\nI1207 09:22:45.884409   369 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1207 09:23:38.850692   369 solver.cpp:404]     Test net output #0: accuracy = 0.15335\nI1207 09:23:38.851037   369 solver.cpp:404]     Test net output #1: loss = 5.86096 (* 1 = 5.86096 loss)\nI1207 09:23:39.723978   369 solver.cpp:228] Iteration 9200, loss = 5.25963\nI1207 09:23:39.724028   369 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 09:23:39.724046   369 solver.cpp:244]     Train net output #1: loss = 5.25963 (* 1 = 5.25963 loss)\nI1207 09:23:39.799072   369 sgd_solver.cpp:166] Iteration 9200, lr = 1.38\nI1207 09:23:39.809190   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70656 > 2) by scale factor 0.738944\nI1207 09:23:40.749053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26077 > 2) by scale factor 0.613352\nI1207 09:23:41.689110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7806 > 2) by scale factor 0.719269\nI1207 09:23:42.628969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56823 > 2) by scale factor 0.778746\nI1207 09:23:43.568261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97723 > 2) by scale factor 0.502863\nI1207 09:23:44.508141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86835 > 2) by scale factor 0.697264\nI1207 09:23:45.447839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32408 > 2) by scale factor 0.860557\nI1207 09:23:46.388037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22987 > 2) by scale factor 0.472827\nI1207 09:23:47.328135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13381 > 2) by scale factor 0.6382\nI1207 09:23:48.267748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43557 > 2) by scale factor 0.821162\nI1207 09:23:49.207777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16265 > 2) by scale factor 0.63238\nI1207 09:23:50.147997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72338 > 2) by scale factor 0.73438\nI1207 09:23:51.088003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0379 > 2) by scale factor 0.981403\nI1207 09:23:52.028211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39733 > 2) by scale factor 0.588697\nI1207 09:23:52.967804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63953 > 2) by scale factor 0.549522\nI1207 09:23:53.907887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39658 > 2) by scale factor 0.588828\nI1207 09:23:54.847636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07858 > 2) by scale factor 0.64965\nI1207 09:23:55.786914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60169 > 2) by scale factor 0.768731\nI1207 09:23:56.726794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33728 > 2) by scale factor 0.599291\nI1207 09:23:57.666858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43121 > 2) by scale factor 0.582885\nI1207 09:23:58.606633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78806 > 2) by scale factor 0.717344\nI1207 09:23:59.546459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73116 > 2) by scale factor 0.732291\nI1207 09:24:00.486318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26867 > 2) by scale factor 0.46853\nI1207 09:24:01.426246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99122 > 2) by scale factor 0.668624\nI1207 09:24:02.366025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78292 > 2) by scale factor 0.718671\nI1207 09:24:03.305796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00313 > 2) by scale factor 0.665973\nI1207 09:24:04.246073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51788 > 2) by scale factor 0.568525\nI1207 09:24:05.186110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83793 > 2) by scale factor 0.704738\nI1207 09:24:06.125964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41829 > 2) by scale factor 0.827032\nI1207 09:24:07.065954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00397 > 2) by scale factor 0.499504\nI1207 09:24:08.006223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33081 > 2) by scale factor 0.858072\nI1207 09:24:08.946178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85093 > 2) by scale factor 0.701524\nI1207 09:24:09.886447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00321 > 2) by scale factor 0.499599\nI1207 09:24:10.827445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38923 > 2) by scale factor 0.590105\nI1207 09:24:11.770359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48379 > 2) by scale factor 0.574087\nI1207 09:24:12.712782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00411 > 2) by scale factor 0.665755\nI1207 09:24:13.655684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07538 > 2) by scale factor 0.650327\nI1207 09:24:14.598675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93006 > 2) by scale factor 0.68258\nI1207 09:24:15.541255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40338 > 2) by scale factor 0.587651\nI1207 09:24:17.424670   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60646 > 2) by scale factor 0.767323\nI1207 09:24:18.367388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44412 > 2) by scale factor 0.81829\nI1207 09:24:19.310060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33429 > 2) by scale factor 0.856792\nI1207 09:24:20.252491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79654 > 2) by scale factor 0.526795\nI1207 09:24:21.195238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67199 > 2) by scale factor 0.544663\nI1207 09:24:22.137434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14278 > 2) by scale factor 0.482768\nI1207 09:24:23.080026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22542 > 2) by scale factor 0.620075\nI1207 09:24:24.022596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76283 > 2) by scale factor 0.723895\nI1207 09:24:24.965178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10701 > 2) by scale factor 0.643705\nI1207 09:24:25.907871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66217 > 2) by scale factor 0.751266\nI1207 09:24:26.850631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67899 > 2) by scale factor 0.746549\nI1207 09:24:27.792979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94842 > 2) by scale factor 0.67833\nI1207 09:24:28.735196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08892 > 2) by scale factor 0.957434\nI1207 09:24:29.677870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23584 > 2) by scale factor 0.472162\nI1207 09:24:30.620404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.35488 > 2) by scale factor 0.373491\nI1207 09:24:31.563561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19183 > 2) by scale factor 0.477119\nI1207 09:24:32.505833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26653 > 2) by scale factor 0.468765\nI1207 09:24:33.448379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36317 > 2) by scale factor 0.594677\nI1207 09:24:34.391043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7897 > 2) by scale factor 0.716923\nI1207 09:24:35.333364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26265 > 2) by scale factor 0.883917\nI1207 09:24:36.275452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33953 > 2) by scale factor 0.598886\nI1207 09:24:37.217919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42347 > 2) by scale factor 0.584203\nI1207 09:24:38.160392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29649 > 2) by scale factor 0.606705\nI1207 09:24:39.102859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18084 > 2) by scale factor 0.478373\nI1207 09:24:40.046052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74029 > 2) by scale factor 0.421915\nI1207 09:24:40.988348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43351 > 2) by scale factor 0.582493\nI1207 09:24:41.930403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11527 > 2) by scale factor 0.945507\nI1207 09:24:42.873174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70157 > 2) by scale factor 0.540312\nI1207 09:24:43.815773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18794 > 2) by scale factor 0.627364\nI1207 09:24:45.698519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90997 > 2) by scale factor 0.687291\nI1207 09:24:46.641288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04882 > 2) by scale factor 0.655992\nI1207 09:24:47.583946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42694 > 2) by scale factor 0.824082\nI1207 09:24:48.526715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5806 > 2) by scale factor 0.775015\nI1207 09:24:49.469154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27504 > 2) by scale factor 0.879105\nI1207 09:24:50.411751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84913 > 2) by scale factor 0.701968\nI1207 09:24:51.354581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13712 > 2) by scale factor 0.935839\nI1207 09:24:52.297355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38721 > 2) by scale factor 0.837796\nI1207 09:24:53.239744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40709 > 2) by scale factor 0.587011\nI1207 09:24:54.182008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17106 > 2) by scale factor 0.479494\nI1207 09:24:55.124251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22475 > 2) by scale factor 0.898978\nI1207 09:24:56.066891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76267 > 2) by scale factor 0.723937\nI1207 09:24:57.009577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34752 > 2) by scale factor 0.851961\nI1207 09:24:57.952359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31806 > 2) by scale factor 0.463171\nI1207 09:24:58.894662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53541 > 2) by scale factor 0.565705\nI1207 09:24:59.837257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36374 > 2) by scale factor 0.594576\nI1207 09:25:00.779867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38461 > 2) by scale factor 0.838713\nI1207 09:25:01.722355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.349 > 2) by scale factor 0.597192\nI1207 09:25:02.664752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03783 > 2) by scale factor 0.658364\nI1207 09:25:03.606889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54638 > 2) by scale factor 0.785428\nI1207 09:25:04.549562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89778 > 2) by scale factor 0.513112\nI1207 09:25:05.491909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63721 > 2) by scale factor 0.549872\nI1207 09:25:06.434387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9583 > 2) by scale factor 0.676064\nI1207 09:25:07.376490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20682 > 2) by scale factor 0.623671\nI1207 09:25:08.319140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27988 > 2) by scale factor 0.877239\nI1207 09:25:09.261724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78521 > 2) by scale factor 0.528373\nI1207 09:25:10.204417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68588 > 2) by scale factor 0.542611\nI1207 09:25:11.146656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81946 > 2) by scale factor 0.414984\nI1207 09:25:12.088850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13179 > 2) by scale factor 0.484052\nI1207 09:25:13.031180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75785 > 2) by scale factor 0.725203\nI1207 09:25:13.043077   369 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1207 09:26:06.002660   369 solver.cpp:404]     Test net output #0: accuracy = 0.20295\nI1207 09:26:06.003005   369 solver.cpp:404]     Test net output #1: loss = 7.17863 (* 1 = 7.17863 loss)\nI1207 09:26:06.876662   369 solver.cpp:228] Iteration 9300, loss = 6.4108\nI1207 09:26:06.876710   369 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1207 09:26:06.876729   369 solver.cpp:244]     Train net output #1: loss = 6.4108 (* 1 = 6.4108 loss)\nI1207 09:26:06.947311   369 sgd_solver.cpp:166] Iteration 9300, lr = 1.395\nI1207 09:26:07.895052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08507 > 2) by scale factor 0.648284\nI1207 09:26:08.834250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21036 > 2) by scale factor 0.904832\nI1207 09:26:10.710965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45773 > 2) by scale factor 0.578415\nI1207 09:26:11.650435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4513 > 2) by scale factor 0.815893\nI1207 09:26:12.589848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4188 > 2) by scale factor 0.585\nI1207 09:26:13.528918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00455 > 2) by scale factor 0.665658\nI1207 09:26:14.468339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05553 > 2) by scale factor 0.493153\nI1207 09:26:15.408712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01036 > 2) by scale factor 0.664373\nI1207 09:26:16.348750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33676 > 2) by scale factor 0.855886\nI1207 09:26:17.289002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8561 > 2) by scale factor 0.700257\nI1207 09:26:18.228988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48515 > 2) by scale factor 0.804781\nI1207 09:26:19.168607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41667 > 2) by scale factor 0.585365\nI1207 09:26:20.108090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68791 > 2) by scale factor 0.744073\nI1207 09:26:21.047966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64819 > 2) by scale factor 0.755234\nI1207 09:26:21.987226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40336 > 2) by scale factor 0.454199\nI1207 09:26:22.927455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82524 > 2) by scale factor 0.707904\nI1207 09:26:23.867535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02739 > 2) by scale factor 0.98649\nI1207 09:26:24.807662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20699 > 2) by scale factor 0.906211\nI1207 09:26:25.747640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17778 > 2) by scale factor 0.62937\nI1207 09:26:26.687500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88457 > 2) by scale factor 0.693345\nI1207 09:26:27.626420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29449 > 2) by scale factor 0.607073\nI1207 09:26:28.566248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96296 > 2) by scale factor 0.504673\nI1207 09:26:29.505404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23689 > 2) by scale factor 0.617876\nI1207 09:26:30.445112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02515 > 2) by scale factor 0.661125\nI1207 09:26:31.384820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95184 > 2) by scale factor 0.506093\nI1207 09:26:32.324105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45287 > 2) by scale factor 0.579228\nI1207 09:26:33.263988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9867 > 2) by scale factor 0.669636\nI1207 09:26:34.203092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56714 > 2) by scale factor 0.779077\nI1207 09:26:35.142596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05664 > 2) by scale factor 0.654314\nI1207 09:26:36.081707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72659 > 2) by scale factor 0.733517\nI1207 09:26:37.022439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90097 > 2) by scale factor 0.689425\nI1207 09:26:37.965051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1512 > 2) by scale factor 0.63468\nI1207 09:26:38.907397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81564 > 2) by scale factor 0.524158\nI1207 09:26:39.848919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53747 > 2) by scale factor 0.565376\nI1207 09:26:40.791313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71131 > 2) by scale factor 0.737652\nI1207 09:26:41.733852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36202 > 2) by scale factor 0.59488\nI1207 09:26:42.676368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49924 > 2) by scale factor 0.800243\nI1207 09:26:43.618415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29612 > 2) by scale factor 0.606775\nI1207 09:26:44.561069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58501 > 2) by scale factor 0.77369\nI1207 09:26:45.503520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12194 > 2) by scale factor 0.485209\nI1207 09:26:46.445802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8798 > 2) by scale factor 0.694492\nI1207 09:26:47.388362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91358 > 2) by scale factor 0.686441\nI1207 09:26:48.330893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76411 > 2) by scale factor 0.723561\nI1207 09:26:49.273712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38343 > 2) by scale factor 0.839127\nI1207 09:26:50.216256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36128 > 2) by scale factor 0.595011\nI1207 09:26:51.158789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79224 > 2) by scale factor 0.527393\nI1207 09:26:52.101202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68915 > 2) by scale factor 0.54213\nI1207 09:26:53.044623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18835 > 2) by scale factor 0.477516\nI1207 09:26:53.987579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62416 > 2) by scale factor 0.762148\nI1207 09:26:54.930029   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70255 > 2) by scale factor 0.540168\nI1207 09:26:55.873522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21582 > 2) by scale factor 0.621926\nI1207 09:26:56.816491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82426 > 2) by scale factor 0.522978\nI1207 09:26:57.759876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31561 > 2) by scale factor 0.863702\nI1207 09:26:59.643846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78526 > 2) by scale factor 0.718067\nI1207 09:27:00.587165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94632 > 2) by scale factor 0.678813\nI1207 09:27:01.530755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47108 > 2) by scale factor 0.809364\nI1207 09:27:03.414716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54967 > 2) by scale factor 0.784415\nI1207 09:27:04.357857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69898 > 2) by scale factor 0.741022\nI1207 09:27:05.301347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82038 > 2) by scale factor 0.709124\nI1207 09:27:06.244791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8904 > 2) by scale factor 0.691947\nI1207 09:27:07.188369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57944 > 2) by scale factor 0.558747\nI1207 09:27:08.132033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51434 > 2) by scale factor 0.795436\nI1207 09:27:09.074625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08081 > 2) by scale factor 0.961163\nI1207 09:27:10.017174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22004 > 2) by scale factor 0.900887\nI1207 09:27:10.960664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79748 > 2) by scale factor 0.714928\nI1207 09:27:11.903296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31626 > 2) by scale factor 0.863459\nI1207 09:27:12.846369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40571 > 2) by scale factor 0.831354\nI1207 09:27:13.789608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63271 > 2) by scale factor 0.759675\nI1207 09:27:14.732743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09821 > 2) by scale factor 0.953194\nI1207 09:27:15.675813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70479 > 2) by scale factor 0.539842\nI1207 09:27:16.618980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21035 > 2) by scale factor 0.47502\nI1207 09:27:17.561739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01748 > 2) by scale factor 0.991337\nI1207 09:27:18.505082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74849 > 2) by scale factor 0.727673\nI1207 09:27:19.448375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96075 > 2) by scale factor 0.675506\nI1207 09:27:20.391628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46712 > 2) by scale factor 0.810662\nI1207 09:27:21.334942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66358 > 2) by scale factor 0.428855\nI1207 09:27:22.278244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32009 > 2) by scale factor 0.862036\nI1207 09:27:23.221319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41385 > 2) by scale factor 0.585848\nI1207 09:27:24.163944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57629 > 2) by scale factor 0.77631\nI1207 09:27:25.107455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61386 > 2) by scale factor 0.765152\nI1207 09:27:27.933291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75631 > 2) by scale factor 0.532438\nI1207 09:27:28.876737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43874 > 2) by scale factor 0.581609\nI1207 09:27:29.819859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99053 > 2) by scale factor 0.668778\nI1207 09:27:30.762223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87866 > 2) by scale factor 0.694768\nI1207 09:27:31.705370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38827 > 2) by scale factor 0.837426\nI1207 09:27:32.648502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.249 > 2) by scale factor 0.889285\nI1207 09:27:33.591364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54774 > 2) by scale factor 0.563738\nI1207 09:27:34.534807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75998 > 2) by scale factor 0.724644\nI1207 09:27:35.477855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15142 > 2) by scale factor 0.929619\nI1207 09:27:36.420684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29877 > 2) by scale factor 0.606286\nI1207 09:27:37.364493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04938 > 2) by scale factor 0.655872\nI1207 09:27:38.307742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24104 > 2) by scale factor 0.892444\nI1207 09:27:39.250876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19741 > 2) by scale factor 0.625507\nI1207 09:27:40.193919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4162 > 2) by scale factor 0.452878\nI1207 09:27:40.205823   369 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1207 09:28:33.172153   369 solver.cpp:404]     Test net output #0: accuracy = 0.1909\nI1207 09:28:33.172502   369 solver.cpp:404]     Test net output #1: loss = 8.20227 (* 1 = 8.20227 loss)\nI1207 09:28:34.045630   369 solver.cpp:228] Iteration 9400, loss = 7.70403\nI1207 09:28:34.045677   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 09:28:34.045699   369 solver.cpp:244]     Train net output #1: loss = 7.70403 (* 1 = 7.70403 loss)\nI1207 09:28:34.119736   369 sgd_solver.cpp:166] Iteration 9400, lr = 1.41\nI1207 09:28:34.129861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14829 > 2) by scale factor 0.635265\nI1207 09:28:35.070045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45493 > 2) by scale factor 0.578882\nI1207 09:28:36.009989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06639 > 2) by scale factor 0.967874\nI1207 09:28:36.949888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34497 > 2) by scale factor 0.852888\nI1207 09:28:37.889691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73092 > 2) by scale factor 0.536061\nI1207 09:28:38.829650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93572 > 2) by scale factor 0.681263\nI1207 09:28:39.769671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0926 > 2) by scale factor 0.488687\nI1207 09:28:40.709470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28686 > 2) by scale factor 0.608483\nI1207 09:28:41.649668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78412 > 2) by scale factor 0.71836\nI1207 09:28:42.589510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59959 > 2) by scale factor 0.769353\nI1207 09:28:43.529517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55125 > 2) by scale factor 0.783928\nI1207 09:28:44.469454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92448 > 2) by scale factor 0.683883\nI1207 09:28:45.409600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8597 > 2) by scale factor 0.518175\nI1207 09:28:46.349205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78341 > 2) by scale factor 0.718543\nI1207 09:28:47.288892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50069 > 2) by scale factor 0.444376\nI1207 09:28:48.229079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96746 > 2) by scale factor 0.673978\nI1207 09:28:49.168923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45288 > 2) by scale factor 0.815369\nI1207 09:28:50.108901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44532 > 2) by scale factor 0.580497\nI1207 09:28:51.048913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82385 > 2) by scale factor 0.708253\nI1207 09:28:51.989240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7497 > 2) by scale factor 0.727351\nI1207 09:28:52.928833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.587 > 2) by scale factor 0.773095\nI1207 09:28:53.868705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46848 > 2) by scale factor 0.810216\nI1207 09:28:54.808293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02964 > 2) by scale factor 0.496323\nI1207 09:28:55.747542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34931 > 2) by scale factor 0.851313\nI1207 09:28:56.687819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98969 > 2) by scale factor 0.668965\nI1207 09:28:57.627758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58081 > 2) by scale factor 0.774951\nI1207 09:28:58.567936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59613 > 2) by scale factor 0.770376\nI1207 09:28:59.507675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80815 > 2) by scale factor 0.712212\nI1207 09:29:00.447387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36253 > 2) by scale factor 0.45845\nI1207 09:29:01.387276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84487 > 2) by scale factor 0.520174\nI1207 09:29:02.327147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2661 > 2) by scale factor 0.882572\nI1207 09:29:03.267082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.632 > 2) by scale factor 0.75988\nI1207 09:29:04.209636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28378 > 2) by scale factor 0.609055\nI1207 09:29:05.153766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30892 > 2) by scale factor 0.866207\nI1207 09:29:06.097903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4994 > 2) by scale factor 0.571527\nI1207 09:29:07.041903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53018 > 2) by scale factor 0.790458\nI1207 09:29:07.986268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43141 > 2) by scale factor 0.582851\nI1207 09:29:08.930250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81382 > 2) by scale factor 0.524409\nI1207 09:29:09.874649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09778 > 2) by scale factor 0.953391\nI1207 09:29:10.818894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89474 > 2) by scale factor 0.690909\nI1207 09:29:11.762866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40031 > 2) by scale factor 0.833226\nI1207 09:29:12.707346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1426 > 2) by scale factor 0.933446\nI1207 09:29:13.651520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89446 > 2) by scale factor 0.690974\nI1207 09:29:14.596045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80032 > 2) by scale factor 0.526271\nI1207 09:29:15.540455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12182 > 2) by scale factor 0.485223\nI1207 09:29:16.483633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62186 > 2) by scale factor 0.552203\nI1207 09:29:17.427033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6822 > 2) by scale factor 0.745656\nI1207 09:29:18.370301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34213 > 2) by scale factor 0.460604\nI1207 09:29:19.313707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43056 > 2) by scale factor 0.582996\nI1207 09:29:20.256956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85221 > 2) by scale factor 0.70121\nI1207 09:29:21.200167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18004 > 2) by scale factor 0.628923\nI1207 09:29:22.143303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49663 > 2) by scale factor 0.80108\nI1207 09:29:23.086012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02583 > 2) by scale factor 0.98725\nI1207 09:29:24.028646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72496 > 2) by scale factor 0.423284\nI1207 09:29:24.972116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06176 > 2) by scale factor 0.492397\nI1207 09:29:25.914691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28746 > 2) by scale factor 0.608373\nI1207 09:29:26.857954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17787 > 2) by scale factor 0.918328\nI1207 09:29:27.801146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99646 > 2) by scale factor 0.667455\nI1207 09:29:28.744405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36682 > 2) by scale factor 0.845015\nI1207 09:29:29.687243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39172 > 2) by scale factor 0.589672\nI1207 09:29:30.630192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70557 > 2) by scale factor 0.539728\nI1207 09:29:31.573777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6881 > 2) by scale factor 0.542284\nI1207 09:29:32.517221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22297 > 2) by scale factor 0.899696\nI1207 09:29:33.460546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10072 > 2) by scale factor 0.952053\nI1207 09:29:34.404139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41093 > 2) by scale factor 0.829556\nI1207 09:29:35.346740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64595 > 2) by scale factor 0.755873\nI1207 09:29:36.289984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56882 > 2) by scale factor 0.778569\nI1207 09:29:37.233160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82432 > 2) by scale factor 0.708136\nI1207 09:29:38.176383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65262 > 2) by scale factor 0.547553\nI1207 09:29:39.119114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58968 > 2) by scale factor 0.557152\nI1207 09:29:40.061609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27911 > 2) by scale factor 0.609922\nI1207 09:29:41.005304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22227 > 2) by scale factor 0.899981\nI1207 09:29:41.948740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20671 > 2) by scale factor 0.623691\nI1207 09:29:42.891929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57024 > 2) by scale factor 0.778138\nI1207 09:29:43.835290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09442 > 2) by scale factor 0.646326\nI1207 09:29:44.777928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61537 > 2) by scale factor 0.553194\nI1207 09:29:45.721245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85375 > 2) by scale factor 0.700831\nI1207 09:29:46.664310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2191 > 2) by scale factor 0.621292\nI1207 09:29:47.607242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27431 > 2) by scale factor 0.879388\nI1207 09:29:48.550374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05668 > 2) by scale factor 0.972439\nI1207 09:29:49.494365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07256 > 2) by scale factor 0.491092\nI1207 09:29:50.438467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18485 > 2) by scale factor 0.627973\nI1207 09:29:51.382167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74348 > 2) by scale factor 0.534263\nI1207 09:29:52.326372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7614 > 2) by scale factor 0.531717\nI1207 09:29:53.270248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70729 > 2) by scale factor 0.424873\nI1207 09:29:54.213500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85082 > 2) by scale factor 0.412301\nI1207 09:29:55.157492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3811 > 2) by scale factor 0.456506\nI1207 09:29:56.101205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28145 > 2) by scale factor 0.467131\nI1207 09:29:57.045224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26422 > 2) by scale factor 0.612705\nI1207 09:29:57.989238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89097 > 2) by scale factor 0.514011\nI1207 09:29:58.932595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5389 > 2) by scale factor 0.440636\nI1207 09:29:59.876701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98943 > 2) by scale factor 0.669025\nI1207 09:30:00.820246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45076 > 2) by scale factor 0.579583\nI1207 09:30:01.764011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18482 > 2) by scale factor 0.477918\nI1207 09:30:02.707949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4042 > 2) by scale factor 0.454111\nI1207 09:30:03.652149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10475 > 2) by scale factor 0.644174\nI1207 09:30:04.596457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7121 > 2) by scale factor 0.538778\nI1207 09:30:05.540766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99386 > 2) by scale factor 0.500768\nI1207 09:30:06.484891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44346 > 2) by scale factor 0.818511\nI1207 09:30:07.428990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03582 > 2) by scale factor 0.982405\nI1207 09:30:07.440914   369 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1207 09:31:00.406086   369 solver.cpp:404]     Test net output #0: accuracy = 0.1497\nI1207 09:31:00.406479   369 solver.cpp:404]     Test net output #1: loss = 8.9828 (* 1 = 8.9828 loss)\nI1207 09:31:01.279742   369 solver.cpp:228] Iteration 9500, loss = 10.8238\nI1207 09:31:01.279791   369 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1207 09:31:01.279809   369 solver.cpp:244]     Train net output #1: loss = 10.8238 (* 1 = 10.8238 loss)\nI1207 09:31:01.353119   369 sgd_solver.cpp:166] Iteration 9500, lr = 1.425\nI1207 09:31:01.363204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81483 > 2) by scale factor 0.710521\nI1207 09:31:02.303232   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05474 > 2) by scale factor 0.49325\nI1207 09:31:03.243219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28443 > 2) by scale factor 0.875493\nI1207 09:31:04.183229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06751 > 2) by scale factor 0.967348\nI1207 09:31:05.122956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68187 > 2) by scale factor 0.543202\nI1207 09:31:06.062356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21267 > 2) by scale factor 0.622536\nI1207 09:31:07.001785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45071 > 2) by scale factor 0.579592\nI1207 09:31:07.941483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57692 > 2) by scale factor 0.776122\nI1207 09:31:08.881361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73899 > 2) by scale factor 0.730196\nI1207 09:31:09.821152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34429 > 2) by scale factor 0.598034\nI1207 09:31:10.761203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18612 > 2) by scale factor 0.477769\nI1207 09:31:11.701333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19858 > 2) by scale factor 0.476352\nI1207 09:31:12.641247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77858 > 2) by scale factor 0.719793\nI1207 09:31:13.581015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36227 > 2) by scale factor 0.846642\nI1207 09:31:14.520948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11834 > 2) by scale factor 0.641368\nI1207 09:31:15.460850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23384 > 2) by scale factor 0.895318\nI1207 09:31:16.400960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87432 > 2) by scale factor 0.695816\nI1207 09:31:17.341323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32143 > 2) by scale factor 0.861539\nI1207 09:31:18.281767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53949 > 2) by scale factor 0.565054\nI1207 09:31:21.096603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19192 > 2) by scale factor 0.912443\nI1207 09:31:22.035708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56558 > 2) by scale factor 0.77955\nI1207 09:31:22.975919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68435 > 2) by scale factor 0.745059\nI1207 09:31:23.915740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14223 > 2) by scale factor 0.933607\nI1207 09:31:24.855572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28653 > 2) by scale factor 0.466577\nI1207 09:31:25.795552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75067 > 2) by scale factor 0.727096\nI1207 09:31:26.734668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33379 > 2) by scale factor 0.46149\nI1207 09:31:27.674358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90287 > 2) by scale factor 0.688974\nI1207 09:31:28.614145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0049 > 2) by scale factor 0.499389\nI1207 09:31:29.553493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21461 > 2) by scale factor 0.62216\nI1207 09:31:30.493227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36637 > 2) by scale factor 0.458046\nI1207 09:31:31.433560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31327 > 2) by scale factor 0.603632\nI1207 09:31:32.372540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42661 > 2) by scale factor 0.583666\nI1207 09:31:33.316565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66402 > 2) by scale factor 0.750746\nI1207 09:31:34.259879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1894 > 2) by scale factor 0.627077\nI1207 09:31:35.203758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37093 > 2) by scale factor 0.593308\nI1207 09:31:36.147279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11548 > 2) by scale factor 0.48597\nI1207 09:31:37.090847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54276 > 2) by scale factor 0.786547\nI1207 09:31:38.034309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98556 > 2) by scale factor 0.501811\nI1207 09:31:38.978035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16131 > 2) by scale factor 0.632648\nI1207 09:31:39.921298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87766 > 2) by scale factor 0.515776\nI1207 09:31:40.865139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0578 > 2) by scale factor 0.654065\nI1207 09:31:41.808853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22573 > 2) by scale factor 0.620014\nI1207 09:31:42.752768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3826 > 2) by scale factor 0.59126\nI1207 09:31:43.696997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12618 > 2) by scale factor 0.639758\nI1207 09:31:44.640540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12662 > 2) by scale factor 0.940459\nI1207 09:31:45.583559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45724 > 2) by scale factor 0.578496\nI1207 09:31:46.527521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14636 > 2) by scale factor 0.482351\nI1207 09:31:47.471179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58017 > 2) by scale factor 0.558632\nI1207 09:31:48.414779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78461 > 2) by scale factor 0.718233\nI1207 09:31:49.358584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45405 > 2) by scale factor 0.579031\nI1207 09:31:51.243825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27105 > 2) by scale factor 0.611425\nI1207 09:31:52.187286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27293 > 2) by scale factor 0.879923\nI1207 09:31:53.131553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33353 > 2) by scale factor 0.857072\nI1207 09:31:54.075243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0308 > 2) by scale factor 0.659892\nI1207 09:31:55.018237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41898 > 2) by scale factor 0.826796\nI1207 09:31:55.961558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24912 > 2) by scale factor 0.615552\nI1207 09:31:56.905557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65703 > 2) by scale factor 0.546891\nI1207 09:31:57.848929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78802 > 2) by scale factor 0.717354\nI1207 09:31:58.793020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2185 > 2) by scale factor 0.474103\nI1207 09:31:59.736855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48638 > 2) by scale factor 0.804382\nI1207 09:32:00.680176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72594 > 2) by scale factor 0.733692\nI1207 09:32:01.624358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18186 > 2) by scale factor 0.916648\nI1207 09:32:02.567553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15204 > 2) by scale factor 0.929352\nI1207 09:32:03.511219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11347 > 2) by scale factor 0.94631\nI1207 09:32:04.455278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34079 > 2) by scale factor 0.854411\nI1207 09:32:05.399013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25747 > 2) by scale factor 0.613974\nI1207 09:32:06.343070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7326 > 2) by scale factor 0.731903\nI1207 09:32:07.287209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91305 > 2) by scale factor 0.511111\nI1207 09:32:08.230226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64646 > 2) by scale factor 0.548477\nI1207 09:32:09.173844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64304 > 2) by scale factor 0.548991\nI1207 09:32:10.117491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99341 > 2) by scale factor 0.668134\nI1207 09:32:11.060861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87268 > 2) by scale factor 0.516438\nI1207 09:32:12.003983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62874 > 2) by scale factor 0.76082\nI1207 09:32:12.947672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22371 > 2) by scale factor 0.8994\nI1207 09:32:13.890686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03998 > 2) by scale factor 0.657899\nI1207 09:32:14.834235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05878 > 2) by scale factor 0.971449\nI1207 09:32:15.777984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23667 > 2) by scale factor 0.61792\nI1207 09:32:16.721777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48402 > 2) by scale factor 0.574049\nI1207 09:32:17.665007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3893 > 2) by scale factor 0.837064\nI1207 09:32:18.608100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05939 > 2) by scale factor 0.492684\nI1207 09:32:19.551499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22746 > 2) by scale factor 0.619682\nI1207 09:32:20.494359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26812 > 2) by scale factor 0.611973\nI1207 09:32:21.437718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06966 > 2) by scale factor 0.651538\nI1207 09:32:23.323732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60164 > 2) by scale factor 0.768746\nI1207 09:32:24.267371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39647 > 2) by scale factor 0.454911\nI1207 09:32:25.210341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89682 > 2) by scale factor 0.513239\nI1207 09:32:27.094610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98237 > 2) by scale factor 0.502214\nI1207 09:32:28.037588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90479 > 2) by scale factor 0.512191\nI1207 09:32:28.981137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35892 > 2) by scale factor 0.59543\nI1207 09:32:29.924422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58644 > 2) by scale factor 0.773265\nI1207 09:32:30.867869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48417 > 2) by scale factor 0.805099\nI1207 09:32:31.811945   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89466 > 2) by scale factor 0.690926\nI1207 09:32:32.755439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37761 > 2) by scale factor 0.592135\nI1207 09:32:33.698844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2945 > 2) by scale factor 0.607073\nI1207 09:32:34.642251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71231 > 2) by scale factor 0.42442\nI1207 09:32:34.654237   369 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1207 09:33:27.607004   369 solver.cpp:404]     Test net output #0: accuracy = 0.18585\nI1207 09:33:27.607355   369 solver.cpp:404]     Test net output #1: loss = 10.632 (* 1 = 10.632 loss)\nI1207 09:33:28.480432   369 solver.cpp:228] Iteration 9600, loss = 12.2298\nI1207 09:33:28.480484   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 09:33:28.480501   369 solver.cpp:244]     Train net output #1: loss = 12.2298 (* 1 = 12.2298 loss)\nI1207 09:33:28.560015   369 sgd_solver.cpp:166] Iteration 9600, lr = 1.44\nI1207 09:33:28.570134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2974 > 2) by scale factor 0.870549\nI1207 09:33:29.509963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38458 > 2) by scale factor 0.838721\nI1207 09:33:30.450104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17814 > 2) by scale factor 0.6293\nI1207 09:33:31.390321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88015 > 2) by scale factor 0.694408\nI1207 09:33:32.330366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76782 > 2) by scale factor 0.419479\nI1207 09:33:33.270534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55642 > 2) by scale factor 0.562363\nI1207 09:33:34.210634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11103 > 2) by scale factor 0.642874\nI1207 09:33:35.170018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84739 > 2) by scale factor 0.702399\nI1207 09:33:36.109894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70963 > 2) by scale factor 0.539138\nI1207 09:33:37.049506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47643 > 2) by scale factor 0.807616\nI1207 09:33:37.988483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95322 > 2) by scale factor 0.403777\nI1207 09:33:38.928213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0974 > 2) by scale factor 0.645703\nI1207 09:33:39.868551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35393 > 2) by scale factor 0.596316\nI1207 09:33:40.808713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70316 > 2) by scale factor 0.739875\nI1207 09:33:41.748605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68775 > 2) by scale factor 0.542336\nI1207 09:33:42.688577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.93745 > 2) by scale factor 0.405068\nI1207 09:33:43.628587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46686 > 2) by scale factor 0.447742\nI1207 09:33:44.568675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5312 > 2) by scale factor 0.790139\nI1207 09:33:45.508410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78973 > 2) by scale factor 0.527741\nI1207 09:33:46.448483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9568 > 2) by scale factor 0.505459\nI1207 09:33:47.388414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89689 > 2) by scale factor 0.51323\nI1207 09:33:48.328173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40909 > 2) by scale factor 0.830189\nI1207 09:33:49.267789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93804 > 2) by scale factor 0.507867\nI1207 09:33:50.207573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05304 > 2) by scale factor 0.493457\nI1207 09:33:51.147263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21864 > 2) by scale factor 0.901454\nI1207 09:33:52.087210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6527 > 2) by scale factor 0.753949\nI1207 09:33:53.026841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37234 > 2) by scale factor 0.59306\nI1207 09:33:53.966806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82383 > 2) by scale factor 0.708258\nI1207 09:33:54.906450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52308 > 2) by scale factor 0.567685\nI1207 09:33:55.845783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.929 > 2) by scale factor 0.682828\nI1207 09:33:56.785501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99708 > 2) by scale factor 0.500365\nI1207 09:33:57.727036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89324 > 2) by scale factor 0.691266\nI1207 09:33:58.671787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5716 > 2) by scale factor 0.559974\nI1207 09:33:59.616122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69013 > 2) by scale factor 0.541987\nI1207 09:34:00.559978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02452 > 2) by scale factor 0.987889\nI1207 09:34:01.503288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47767 > 2) by scale factor 0.575098\nI1207 09:34:02.446561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55795 > 2) by scale factor 0.562121\nI1207 09:34:03.390557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23673 > 2) by scale factor 0.472062\nI1207 09:34:04.333582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60033 > 2) by scale factor 0.555504\nI1207 09:34:05.277799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37126 > 2) by scale factor 0.593249\nI1207 09:34:06.221585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56405 > 2) by scale factor 0.561159\nI1207 09:34:07.165529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99246 > 2) by scale factor 0.668346\nI1207 09:34:08.109655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09508 > 2) by scale factor 0.954618\nI1207 09:34:09.053421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76646 > 2) by scale factor 0.722946\nI1207 09:34:09.996788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52204 > 2) by scale factor 0.442278\nI1207 09:34:10.940491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54906 > 2) by scale factor 0.56353\nI1207 09:34:11.884445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50993 > 2) by scale factor 0.796833\nI1207 09:34:12.828140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11975 > 2) by scale factor 0.943509\nI1207 09:34:13.771793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34424 > 2) by scale factor 0.853154\nI1207 09:34:14.715534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84201 > 2) by scale factor 0.703728\nI1207 09:34:16.599175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70724 > 2) by scale factor 0.73876\nI1207 09:34:17.542342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65681 > 2) by scale factor 0.752783\nI1207 09:34:18.484961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95401 > 2) by scale factor 0.677046\nI1207 09:34:19.427966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02147 > 2) by scale factor 0.661929\nI1207 09:34:20.370115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79491 > 2) by scale factor 0.715586\nI1207 09:34:21.312978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55079 > 2) by scale factor 0.563255\nI1207 09:34:22.256184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43365 > 2) by scale factor 0.58247\nI1207 09:34:23.198951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23671 > 2) by scale factor 0.617911\nI1207 09:34:24.141167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06995 > 2) by scale factor 0.651477\nI1207 09:34:25.083479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46666 > 2) by scale factor 0.810812\nI1207 09:34:26.025893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6953 > 2) by scale factor 0.742032\nI1207 09:34:26.968737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8838 > 2) by scale factor 0.69353\nI1207 09:34:27.911273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49029 > 2) by scale factor 0.573018\nI1207 09:34:28.854835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91717 > 2) by scale factor 0.510573\nI1207 09:34:29.797559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03381 > 2) by scale factor 0.49581\nI1207 09:34:30.740275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61187 > 2) by scale factor 0.765734\nI1207 09:34:31.682739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36112 > 2) by scale factor 0.847056\nI1207 09:34:32.625563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39067 > 2) by scale factor 0.836584\nI1207 09:34:33.567791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85533 > 2) by scale factor 0.700444\nI1207 09:34:34.510176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11008 > 2) by scale factor 0.947832\nI1207 09:34:35.452767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28127 > 2) by scale factor 0.609519\nI1207 09:34:36.395129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51553 > 2) by scale factor 0.568905\nI1207 09:34:37.337440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3726 > 2) by scale factor 0.457393\nI1207 09:34:38.280480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81477 > 2) by scale factor 0.524278\nI1207 09:34:39.223383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76393 > 2) by scale factor 0.723607\nI1207 09:34:40.166517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39361 > 2) by scale factor 0.835557\nI1207 09:34:41.109650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43884 > 2) by scale factor 0.581592\nI1207 09:34:42.052557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80527 > 2) by scale factor 0.712943\nI1207 09:34:42.995543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88214 > 2) by scale factor 0.693929\nI1207 09:34:43.939013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65546 > 2) by scale factor 0.753166\nI1207 09:34:44.882158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44079 > 2) by scale factor 0.581261\nI1207 09:34:45.825095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69976 > 2) by scale factor 0.740806\nI1207 09:34:46.768117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61565 > 2) by scale factor 0.553151\nI1207 09:34:47.711048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.26941 > 2) by scale factor 0.379549\nI1207 09:34:48.653766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90401 > 2) by scale factor 0.688704\nI1207 09:34:49.595916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16985 > 2) by scale factor 0.630945\nI1207 09:34:50.538589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42434 > 2) by scale factor 0.824967\nI1207 09:34:51.480846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9824 > 2) by scale factor 0.50221\nI1207 09:34:52.424074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92189 > 2) by scale factor 0.684489\nI1207 09:34:53.367487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15495 > 2) by scale factor 0.928095\nI1207 09:34:54.310494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05803 > 2) by scale factor 0.654016\nI1207 09:34:55.253448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66078 > 2) by scale factor 0.75166\nI1207 09:34:56.196261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41807 > 2) by scale factor 0.827106\nI1207 09:34:57.139299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21726 > 2) by scale factor 0.621647\nI1207 09:34:58.082341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17025 > 2) by scale factor 0.479587\nI1207 09:34:59.025629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04363 > 2) by scale factor 0.494605\nI1207 09:34:59.968528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89026 > 2) by scale factor 0.514104\nI1207 09:35:00.910333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01764 > 2) by scale factor 0.66277\nI1207 09:35:01.853044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6016 > 2) by scale factor 0.555309\nI1207 09:35:01.864377   369 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1207 09:35:54.768070   369 solver.cpp:404]     Test net output #0: accuracy = 0.16535\nI1207 09:35:54.768447   369 solver.cpp:404]     Test net output #1: loss = 9.92332 (* 1 = 9.92332 loss)\nI1207 09:35:55.641685   369 solver.cpp:228] Iteration 9700, loss = 10.7951\nI1207 09:35:55.641739   369 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1207 09:35:55.641758   369 solver.cpp:244]     Train net output #1: loss = 10.7951 (* 1 = 10.7951 loss)\nI1207 09:35:55.713939   369 sgd_solver.cpp:166] Iteration 9700, lr = 1.455\nI1207 09:35:55.723757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47888 > 2) by scale factor 0.574897\nI1207 09:35:56.663257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82599 > 2) by scale factor 0.707716\nI1207 09:35:57.602186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12989 > 2) by scale factor 0.638999\nI1207 09:35:58.541621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09642 > 2) by scale factor 0.488232\nI1207 09:35:59.481616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25767 > 2) by scale factor 0.613935\nI1207 09:36:00.421217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85566 > 2) by scale factor 0.700364\nI1207 09:36:01.360666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16051 > 2) by scale factor 0.632808\nI1207 09:36:02.300751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24979 > 2) by scale factor 0.615424\nI1207 09:36:03.240829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31787 > 2) by scale factor 0.602797\nI1207 09:36:04.181098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25821 > 2) by scale factor 0.613833\nI1207 09:36:05.121197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81433 > 2) by scale factor 0.71065\nI1207 09:36:06.060514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69592 > 2) by scale factor 0.425902\nI1207 09:36:06.999863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8156 > 2) by scale factor 0.710327\nI1207 09:36:07.939776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79104 > 2) by scale factor 0.716578\nI1207 09:36:08.879142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2833 > 2) by scale factor 0.466929\nI1207 09:36:09.819000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28953 > 2) by scale factor 0.873543\nI1207 09:36:10.759073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24315 > 2) by scale factor 0.891603\nI1207 09:36:11.698982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04966 > 2) by scale factor 0.655812\nI1207 09:36:12.639315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40613 > 2) by scale factor 0.587176\nI1207 09:36:13.578831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86774 > 2) by scale factor 0.697414\nI1207 09:36:14.518246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77288 > 2) by scale factor 0.721271\nI1207 09:36:15.458102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84768 > 2) by scale factor 0.702327\nI1207 09:36:16.398221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99044 > 2) by scale factor 0.668797\nI1207 09:36:17.338071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34446 > 2) by scale factor 0.853076\nI1207 09:36:18.278030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34061 > 2) by scale factor 0.460765\nI1207 09:36:19.217777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60727 > 2) by scale factor 0.767087\nI1207 09:36:20.157282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78709 > 2) by scale factor 0.717594\nI1207 09:36:21.097337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16288 > 2) by scale factor 0.924692\nI1207 09:36:22.037552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81078 > 2) by scale factor 0.711546\nI1207 09:36:23.919075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74114 > 2) by scale factor 0.729624\nI1207 09:36:24.862381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12175 > 2) by scale factor 0.942617\nI1207 09:36:25.806176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08787 > 2) by scale factor 0.647695\nI1207 09:36:26.749037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43656 > 2) by scale factor 0.581977\nI1207 09:36:27.691967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95649 > 2) by scale factor 0.676477\nI1207 09:36:28.635360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60685 > 2) by scale factor 0.767208\nI1207 09:36:29.578933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36635 > 2) by scale factor 0.594115\nI1207 09:36:30.522352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43017 > 2) by scale factor 0.822987\nI1207 09:36:31.465520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9316 > 2) by scale factor 0.682222\nI1207 09:36:32.409137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61331 > 2) by scale factor 0.765312\nI1207 09:36:33.352751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91435 > 2) by scale factor 0.686259\nI1207 09:36:34.296067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85577 > 2) by scale factor 0.700338\nI1207 09:36:35.239856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5258 > 2) by scale factor 0.791829\nI1207 09:36:36.183307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00298 > 2) by scale factor 0.998512\nI1207 09:36:38.067945   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04251 > 2) by scale factor 0.657351\nI1207 09:36:39.010535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63499 > 2) by scale factor 0.759015\nI1207 09:36:39.953603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53501 > 2) by scale factor 0.788953\nI1207 09:36:40.896669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27648 > 2) by scale factor 0.61041\nI1207 09:36:41.840498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27145 > 2) by scale factor 0.611349\nI1207 09:36:42.783911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66987 > 2) by scale factor 0.544979\nI1207 09:36:43.727609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49954 > 2) by scale factor 0.800147\nI1207 09:36:44.670680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94482 > 2) by scale factor 0.679158\nI1207 09:36:45.613229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82192 > 2) by scale factor 0.708737\nI1207 09:36:46.556910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56792 > 2) by scale factor 0.560551\nI1207 09:36:48.440197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52343 > 2) by scale factor 0.792573\nI1207 09:36:49.383275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46788 > 2) by scale factor 0.576721\nI1207 09:36:50.326676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7911 > 2) by scale factor 0.716563\nI1207 09:36:51.270380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47912 > 2) by scale factor 0.806739\nI1207 09:36:52.214006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30635 > 2) by scale factor 0.604897\nI1207 09:36:53.157215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62847 > 2) by scale factor 0.760899\nI1207 09:36:54.100910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72761 > 2) by scale factor 0.733243\nI1207 09:36:55.044419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24029 > 2) by scale factor 0.892741\nI1207 09:36:55.988412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65957 > 2) by scale factor 0.752\nI1207 09:36:56.931931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06499 > 2) by scale factor 0.652532\nI1207 09:36:57.875483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79078 > 2) by scale factor 0.527596\nI1207 09:36:58.818822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65676 > 2) by scale factor 0.546932\nI1207 09:36:59.762459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08009 > 2) by scale factor 0.649331\nI1207 09:37:00.705920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30179 > 2) by scale factor 0.605731\nI1207 09:37:01.649242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03863 > 2) by scale factor 0.658192\nI1207 09:37:02.592269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24075 > 2) by scale factor 0.892559\nI1207 09:37:03.535934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77464 > 2) by scale factor 0.720815\nI1207 09:37:04.478876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0394 > 2) by scale factor 0.980679\nI1207 09:37:05.421684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40804 > 2) by scale factor 0.586847\nI1207 09:37:06.364967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67345 > 2) by scale factor 0.748096\nI1207 09:37:07.308115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50034 > 2) by scale factor 0.799892\nI1207 09:37:08.251469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66228 > 2) by scale factor 0.751236\nI1207 09:37:09.194675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30946 > 2) by scale factor 0.866003\nI1207 09:37:10.137851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64564 > 2) by scale factor 0.755961\nI1207 09:37:11.081012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92305 > 2) by scale factor 0.509807\nI1207 09:37:12.024184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91794 > 2) by scale factor 0.685415\nI1207 09:37:12.967665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09316 > 2) by scale factor 0.955495\nI1207 09:37:13.910908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42832 > 2) by scale factor 0.823616\nI1207 09:37:14.853886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92333 > 2) by scale factor 0.684152\nI1207 09:37:15.797019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69603 > 2) by scale factor 0.541121\nI1207 09:37:16.740345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55536 > 2) by scale factor 0.78267\nI1207 09:37:17.683804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17615 > 2) by scale factor 0.629693\nI1207 09:37:18.627096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15952 > 2) by scale factor 0.633007\nI1207 09:37:19.570158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47285 > 2) by scale factor 0.575896\nI1207 09:37:20.513597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75712 > 2) by scale factor 0.532322\nI1207 09:37:21.457206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40473 > 2) by scale factor 0.831694\nI1207 09:37:22.400526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32337 > 2) by scale factor 0.601798\nI1207 09:37:23.343592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67966 > 2) by scale factor 0.746363\nI1207 09:37:24.286568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12995 > 2) by scale factor 0.638988\nI1207 09:37:25.229606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62497 > 2) by scale factor 0.761913\nI1207 09:37:26.173374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88456 > 2) by scale factor 0.514858\nI1207 09:37:27.116293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33842 > 2) by scale factor 0.599085\nI1207 09:37:28.059603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33865 > 2) by scale factor 0.599044\nI1207 09:37:29.002512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.261 > 2) by scale factor 0.469373\nI1207 09:37:29.014569   369 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1207 09:38:21.973029   369 solver.cpp:404]     Test net output #0: accuracy = 0.11245\nI1207 09:38:21.973386   369 solver.cpp:404]     Test net output #1: loss = 12.8686 (* 1 = 12.8686 loss)\nI1207 09:38:22.846226   369 solver.cpp:228] Iteration 9800, loss = 13.1417\nI1207 09:38:22.846282   369 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1207 09:38:22.846300   369 solver.cpp:244]     Train net output #1: loss = 13.1417 (* 1 = 13.1417 loss)\nI1207 09:38:22.925981   369 sgd_solver.cpp:166] Iteration 9800, lr = 1.47\nI1207 09:38:22.936149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95058 > 2) by scale factor 0.506255\nI1207 09:38:23.876061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06987 > 2) by scale factor 0.491416\nI1207 09:38:24.816606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32516 > 2) by scale factor 0.601476\nI1207 09:38:25.756862   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92996 > 2) by scale factor 0.508911\nI1207 09:38:26.697039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97989 > 2) by scale factor 0.671165\nI1207 09:38:27.637279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97775 > 2) by scale factor 0.671648\nI1207 09:38:28.577409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35704 > 2) by scale factor 0.595764\nI1207 09:38:29.517709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28103 > 2) by scale factor 0.609565\nI1207 09:38:30.457832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26787 > 2) by scale factor 0.612019\nI1207 09:38:31.398110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68287 > 2) by scale factor 0.543055\nI1207 09:38:32.338408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28007 > 2) by scale factor 0.609743\nI1207 09:38:33.279016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24051 > 2) by scale factor 0.617187\nI1207 09:38:34.218842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38603 > 2) by scale factor 0.455993\nI1207 09:38:35.158824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3603 > 2) by scale factor 0.595186\nI1207 09:38:36.098703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74103 > 2) by scale factor 0.534612\nI1207 09:38:37.038053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2505 > 2) by scale factor 0.888692\nI1207 09:38:37.977680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68268 > 2) by scale factor 0.745524\nI1207 09:38:38.917963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73737 > 2) by scale factor 0.730627\nI1207 09:38:39.857970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14396 > 2) by scale factor 0.932852\nI1207 09:38:40.797845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2499 > 2) by scale factor 0.888929\nI1207 09:38:41.737953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06449 > 2) by scale factor 0.652637\nI1207 09:38:42.678164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53881 > 2) by scale factor 0.565161\nI1207 09:38:43.617822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07319 > 2) by scale factor 0.491016\nI1207 09:38:44.557767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22349 > 2) by scale factor 0.620445\nI1207 09:38:45.497330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87516 > 2) by scale factor 0.695613\nI1207 09:38:46.436617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75105 > 2) by scale factor 0.726995\nI1207 09:38:47.376520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16603 > 2) by scale factor 0.92335\nI1207 09:38:48.316416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63455 > 2) by scale factor 0.759144\nI1207 09:38:49.256561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22454 > 2) by scale factor 0.620244\nI1207 09:38:50.196818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89306 > 2) by scale factor 0.513734\nI1207 09:38:51.136312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82923 > 2) by scale factor 0.706906\nI1207 09:38:52.076310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27841 > 2) by scale factor 0.877806\nI1207 09:38:53.016858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51032 > 2) by scale factor 0.79671\nI1207 09:38:53.956444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5833 > 2) by scale factor 0.558145\nI1207 09:38:54.896728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94865 > 2) by scale factor 0.678277\nI1207 09:38:55.840201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08766 > 2) by scale factor 0.958011\nI1207 09:38:56.783041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57943 > 2) by scale factor 0.775364\nI1207 09:38:57.725901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67522 > 2) by scale factor 0.544185\nI1207 09:38:58.669448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58305 > 2) by scale factor 0.558184\nI1207 09:38:59.612143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93251 > 2) by scale factor 0.68201\nI1207 09:39:00.555171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51038 > 2) by scale factor 0.796692\nI1207 09:39:01.498519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1464 > 2) by scale factor 0.635646\nI1207 09:39:02.442112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46462 > 2) by scale factor 0.811484\nI1207 09:39:03.384665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71469 > 2) by scale factor 0.538404\nI1207 09:39:04.327672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99143 > 2) by scale factor 0.668576\nI1207 09:39:05.271066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99231 > 2) by scale factor 0.500963\nI1207 09:39:06.213788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26563 > 2) by scale factor 0.882756\nI1207 09:39:07.155966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25822 > 2) by scale factor 0.885653\nI1207 09:39:08.098439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67056 > 2) by scale factor 0.748905\nI1207 09:39:09.040738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06155 > 2) by scale factor 0.492423\nI1207 09:39:09.983552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2603 > 2) by scale factor 0.884838\nI1207 09:39:10.926434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17827 > 2) by scale factor 0.918162\nI1207 09:39:11.868851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20854 > 2) by scale factor 0.905577\nI1207 09:39:12.810920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79264 > 2) by scale factor 0.716169\nI1207 09:39:13.753741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51825 > 2) by scale factor 0.568464\nI1207 09:39:14.696208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80099 > 2) by scale factor 0.714033\nI1207 09:39:15.639123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52274 > 2) by scale factor 0.56774\nI1207 09:39:16.581845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0236 > 2) by scale factor 0.497067\nI1207 09:39:17.524682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16659 > 2) by scale factor 0.631594\nI1207 09:39:18.467599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17367 > 2) by scale factor 0.630186\nI1207 09:39:19.410502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98524 > 2) by scale factor 0.669962\nI1207 09:39:20.353521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48519 > 2) by scale factor 0.573857\nI1207 09:39:21.296677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01604 > 2) by scale factor 0.663121\nI1207 09:39:22.239559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09839 > 2) by scale factor 0.953113\nI1207 09:39:23.182793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77368 > 2) by scale factor 0.529986\nI1207 09:39:24.125718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35945 > 2) by scale factor 0.458773\nI1207 09:39:25.068630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35733 > 2) by scale factor 0.595711\nI1207 09:39:26.011281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86029 > 2) by scale factor 0.69923\nI1207 09:39:26.954095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63708 > 2) by scale factor 0.549891\nI1207 09:39:27.896792   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36405 > 2) by scale factor 0.846005\nI1207 09:39:28.839655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22813 > 2) by scale factor 0.619554\nI1207 09:39:29.781911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61643 > 2) by scale factor 0.764401\nI1207 09:39:30.724575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16261 > 2) by scale factor 0.924809\nI1207 09:39:31.667428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35854 > 2) by scale factor 0.847983\nI1207 09:39:32.609799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22232 > 2) by scale factor 0.899961\nI1207 09:39:33.552644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28262 > 2) by scale factor 0.876185\nI1207 09:39:34.495209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79198 > 2) by scale factor 0.716338\nI1207 09:39:35.437678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22806 > 2) by scale factor 0.619567\nI1207 09:39:36.380247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51094 > 2) by scale factor 0.569647\nI1207 09:39:37.323269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78276 > 2) by scale factor 0.71871\nI1207 09:39:39.204664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86971 > 2) by scale factor 0.696934\nI1207 09:39:40.147003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29151 > 2) by scale factor 0.872786\nI1207 09:39:41.089488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10468 > 2) by scale factor 0.64419\nI1207 09:39:42.032213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76886 > 2) by scale factor 0.722319\nI1207 09:39:42.974894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23778 > 2) by scale factor 0.471946\nI1207 09:39:43.917346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2615 > 2) by scale factor 0.884369\nI1207 09:39:44.859932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21461 > 2) by scale factor 0.622159\nI1207 09:39:45.802666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65917 > 2) by scale factor 0.546573\nI1207 09:39:46.745507   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25173 > 2) by scale factor 0.470397\nI1207 09:39:47.688525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03108 > 2) by scale factor 0.659831\nI1207 09:39:48.631475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33667 > 2) by scale factor 0.85592\nI1207 09:39:50.515168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03455 > 2) by scale factor 0.659075\nI1207 09:39:52.398267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41368 > 2) by scale factor 0.453136\nI1207 09:39:53.341562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6062 > 2) by scale factor 0.7674\nI1207 09:39:54.283994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4895 > 2) by scale factor 0.803373\nI1207 09:39:55.226742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74448 > 2) by scale factor 0.534119\nI1207 09:39:56.169529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8581 > 2) by scale factor 0.699766\nI1207 09:39:56.181443   369 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1207 09:40:49.124487   369 solver.cpp:404]     Test net output #0: accuracy = 0.1367\nI1207 09:40:49.124872   369 solver.cpp:404]     Test net output #1: loss = 10.6402 (* 1 = 10.6402 loss)\nI1207 09:40:49.997943   369 solver.cpp:228] Iteration 9900, loss = 10.169\nI1207 09:40:49.998001   369 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1207 09:40:49.998019   369 solver.cpp:244]     Train net output #1: loss = 10.169 (* 1 = 10.169 loss)\nI1207 09:40:50.073179   369 sgd_solver.cpp:166] Iteration 9900, lr = 1.485\nI1207 09:40:50.082994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26856 > 2) by scale factor 0.611891\nI1207 09:40:51.021872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78409 > 2) by scale factor 0.718368\nI1207 09:40:51.961374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36889 > 2) by scale factor 0.593668\nI1207 09:40:52.901576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87459 > 2) by scale factor 0.695752\nI1207 09:40:54.779168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50851 > 2) by scale factor 0.797286\nI1207 09:40:55.719434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65107 > 2) by scale factor 0.547784\nI1207 09:40:58.535346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30356 > 2) by scale factor 0.605408\nI1207 09:40:59.475383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25403 > 2) by scale factor 0.614622\nI1207 09:41:00.415500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03852 > 2) by scale factor 0.658214\nI1207 09:41:01.355535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10964 > 2) by scale factor 0.643161\nI1207 09:41:02.295800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46009 > 2) by scale factor 0.81298\nI1207 09:41:03.235998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48736 > 2) by scale factor 0.804066\nI1207 09:41:04.176590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6036 > 2) by scale factor 0.555001\nI1207 09:41:06.054550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17147 > 2) by scale factor 0.630623\nI1207 09:41:06.993628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13277 > 2) by scale factor 0.638412\nI1207 09:41:07.933614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17279 > 2) by scale factor 0.63036\nI1207 09:41:08.873550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43207 > 2) by scale factor 0.582738\nI1207 09:41:09.813374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42025 > 2) by scale factor 0.452463\nI1207 09:41:10.753427   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30051 > 2) by scale factor 0.869371\nI1207 09:41:11.693459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48916 > 2) by scale factor 0.573203\nI1207 09:41:12.633828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34657 > 2) by scale factor 0.597628\nI1207 09:41:13.573499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48086 > 2) by scale factor 0.574571\nI1207 09:41:14.513365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5784 > 2) by scale factor 0.775675\nI1207 09:41:16.391028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53438 > 2) by scale factor 0.789147\nI1207 09:41:17.330567   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42166 > 2) by scale factor 0.452319\nI1207 09:41:18.270767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38154 > 2) by scale factor 0.591446\nI1207 09:41:19.210541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61011 > 2) by scale factor 0.554\nI1207 09:41:20.150969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63992 > 2) by scale factor 0.431042\nI1207 09:41:21.094208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47889 > 2) by scale factor 0.574895\nI1207 09:41:22.037273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48788 > 2) by scale factor 0.573414\nI1207 09:41:22.980206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30492 > 2) by scale factor 0.605159\nI1207 09:41:23.923043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89618 > 2) by scale factor 0.513323\nI1207 09:41:24.866155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93699 > 2) by scale factor 0.508002\nI1207 09:41:25.809176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7714 > 2) by scale factor 0.721657\nI1207 09:41:26.752560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67467 > 2) by scale factor 0.427837\nI1207 09:41:27.695284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34307 > 2) by scale factor 0.598252\nI1207 09:41:28.637897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40162 > 2) by scale factor 0.83277\nI1207 09:41:29.580926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20967 > 2) by scale factor 0.623117\nI1207 09:41:30.523653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29169 > 2) by scale factor 0.466017\nI1207 09:41:31.466563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83346 > 2) by scale factor 0.521721\nI1207 09:41:32.409495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60141 > 2) by scale factor 0.555338\nI1207 09:41:33.352025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67059 > 2) by scale factor 0.544872\nI1207 09:41:34.293992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56598 > 2) by scale factor 0.560856\nI1207 09:41:35.237145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08311 > 2) by scale factor 0.960101\nI1207 09:41:36.180362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69718 > 2) by scale factor 0.540953\nI1207 09:41:37.123883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81769 > 2) by scale factor 0.7098\nI1207 09:41:38.066180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92871 > 2) by scale factor 0.682896\nI1207 09:41:39.008394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54736 > 2) by scale factor 0.785128\nI1207 09:41:39.951637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22217 > 2) by scale factor 0.900021\nI1207 09:41:40.894807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76245 > 2) by scale factor 0.419952\nI1207 09:41:41.838006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98763 > 2) by scale factor 0.400992\nI1207 09:41:42.780704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2279 > 2) by scale factor 0.619599\nI1207 09:41:43.723350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65047 > 2) by scale factor 0.547874\nI1207 09:41:44.666440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70552 > 2) by scale factor 0.539735\nI1207 09:41:45.609503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97177 > 2) by scale factor 0.503554\nI1207 09:41:46.552511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03476 > 2) by scale factor 0.659031\nI1207 09:41:47.495069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95755 > 2) by scale factor 0.505364\nI1207 09:41:48.437782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90246 > 2) by scale factor 0.689071\nI1207 09:41:49.380980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98385 > 2) by scale factor 0.670274\nI1207 09:41:50.324237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01339 > 2) by scale factor 0.498332\nI1207 09:41:51.267351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47388 > 2) by scale factor 0.447039\nI1207 09:41:52.210309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89251 > 2) by scale factor 0.691442\nI1207 09:41:53.153173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98898 > 2) by scale factor 0.669126\nI1207 09:41:54.095958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70955 > 2) by scale factor 0.539149\nI1207 09:41:55.038192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49039 > 2) by scale factor 0.573002\nI1207 09:41:55.981279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96719 > 2) by scale factor 0.504135\nI1207 09:41:56.924073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44013 > 2) by scale factor 0.819628\nI1207 09:41:57.866914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2872 > 2) by scale factor 0.874432\nI1207 09:41:58.809885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01014 > 2) by scale factor 0.664422\nI1207 09:41:59.752734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52178 > 2) by scale factor 0.79309\nI1207 09:42:00.695734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05058 > 2) by scale factor 0.655613\nI1207 09:42:01.638059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24913 > 2) by scale factor 0.615549\nI1207 09:42:02.580047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25399 > 2) by scale factor 0.887317\nI1207 09:42:03.522861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74675 > 2) by scale factor 0.728132\nI1207 09:42:04.465719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76634 > 2) by scale factor 0.722977\nI1207 09:42:05.408354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17305 > 2) by scale factor 0.630308\nI1207 09:42:06.351173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15109 > 2) by scale factor 0.481801\nI1207 09:42:07.294122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85258 > 2) by scale factor 0.701119\nI1207 09:42:08.236752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71515 > 2) by scale factor 0.736608\nI1207 09:42:09.179353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43397 > 2) by scale factor 0.582416\nI1207 09:42:10.122028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52537 > 2) by scale factor 0.791963\nI1207 09:42:11.065176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06885 > 2) by scale factor 0.65171\nI1207 09:42:12.008234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89213 > 2) by scale factor 0.513857\nI1207 09:42:12.951038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81004 > 2) by scale factor 0.711734\nI1207 09:42:13.893961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65587 > 2) by scale factor 0.753048\nI1207 09:42:14.836832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99613 > 2) by scale factor 0.500484\nI1207 09:42:15.779819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21282 > 2) by scale factor 0.622507\nI1207 09:42:16.722892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44437 > 2) by scale factor 0.818206\nI1207 09:42:17.665768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57819 > 2) by scale factor 0.775738\nI1207 09:42:18.608588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0923 > 2) by scale factor 0.646767\nI1207 09:42:19.551389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77637 > 2) by scale factor 0.720366\nI1207 09:42:21.435000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45975 > 2) by scale factor 0.813092\nI1207 09:42:22.376786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66431 > 2) by scale factor 0.750664\nI1207 09:42:23.318243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58081 > 2) by scale factor 0.77495\nI1207 09:42:23.329576   369 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1207 09:43:16.276379   369 solver.cpp:404]     Test net output #0: accuracy = 0.2118\nI1207 09:43:16.276772   369 solver.cpp:404]     Test net output #1: loss = 9.26745 (* 1 = 9.26745 loss)\nI1207 09:43:17.149436   369 solver.cpp:228] Iteration 10000, loss = 9.69229\nI1207 09:43:17.149488   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 09:43:17.149505   369 solver.cpp:244]     Train net output #1: loss = 9.69229 (* 1 = 9.69229 loss)\nI1207 09:43:17.230962   369 sgd_solver.cpp:166] Iteration 10000, lr = 1.5\nI1207 09:43:17.240993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59533 > 2) by scale factor 0.770614\nI1207 09:43:18.181169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8254 > 2) by scale factor 0.707865\nI1207 09:43:19.121359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48066 > 2) by scale factor 0.446363\nI1207 09:43:20.061257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88853 > 2) by scale factor 0.692393\nI1207 09:43:21.000916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77434 > 2) by scale factor 0.418906\nI1207 09:43:21.941001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01506 > 2) by scale factor 0.663336\nI1207 09:43:22.881341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4919 > 2) by scale factor 0.572754\nI1207 09:43:23.821110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63336 > 2) by scale factor 0.759487\nI1207 09:43:24.761142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57819 > 2) by scale factor 0.775738\nI1207 09:43:25.701131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66028 > 2) by scale factor 0.546406\nI1207 09:43:26.641083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34462 > 2) by scale factor 0.460339\nI1207 09:43:27.580863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64847 > 2) by scale factor 0.755153\nI1207 09:43:28.520925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31679 > 2) by scale factor 0.602992\nI1207 09:43:29.460813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79465 > 2) by scale factor 0.527058\nI1207 09:43:30.400897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76586 > 2) by scale factor 0.723101\nI1207 09:43:31.341022   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72731 > 2) by scale factor 0.53658\nI1207 09:43:32.281396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2337 > 2) by scale factor 0.895377\nI1207 09:43:33.221318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94782 > 2) by scale factor 0.678467\nI1207 09:43:34.161479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55274 > 2) by scale factor 0.562946\nI1207 09:43:35.101359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80942 > 2) by scale factor 0.415851\nI1207 09:43:36.041465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30909 > 2) by scale factor 0.464135\nI1207 09:43:36.981353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06729 > 2) by scale factor 0.652042\nI1207 09:43:37.921540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53092 > 2) by scale factor 0.566424\nI1207 09:43:38.861893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00483 > 2) by scale factor 0.665596\nI1207 09:43:39.801846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74142 > 2) by scale factor 0.534556\nI1207 09:43:40.741940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16472 > 2) by scale factor 0.631967\nI1207 09:43:41.681565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1225 > 2) by scale factor 0.640512\nI1207 09:43:42.621587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78938 > 2) by scale factor 0.527791\nI1207 09:43:44.499971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26748 > 2) by scale factor 0.612092\nI1207 09:43:45.439865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7854 > 2) by scale factor 0.718029\nI1207 09:43:46.382525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86621 > 2) by scale factor 0.517303\nI1207 09:43:47.325670   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14618 > 2) by scale factor 0.635692\nI1207 09:43:48.268302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14328 > 2) by scale factor 0.636279\nI1207 09:43:49.211165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05109 > 2) by scale factor 0.97509\nI1207 09:43:50.153666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88448 > 2) by scale factor 0.51487\nI1207 09:43:51.096487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07432 > 2) by scale factor 0.650551\nI1207 09:43:52.039191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84696 > 2) by scale factor 0.519891\nI1207 09:43:52.981600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18178 > 2) by scale factor 0.916684\nI1207 09:43:53.924547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09867 > 2) by scale factor 0.952982\nI1207 09:43:54.867185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09711 > 2) by scale factor 0.953694\nI1207 09:43:55.809708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83185 > 2) by scale factor 0.706252\nI1207 09:43:56.752274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22964 > 2) by scale factor 0.619265\nI1207 09:43:57.694916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66057 > 2) by scale factor 0.546364\nI1207 09:43:58.637588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82785 > 2) by scale factor 0.522486\nI1207 09:43:59.580241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10953 > 2) by scale factor 0.94808\nI1207 09:44:00.522893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76551 > 2) by scale factor 0.723193\nI1207 09:44:01.465421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59037 > 2) by scale factor 0.557045\nI1207 09:44:02.408115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71556 > 2) by scale factor 0.736496\nI1207 09:44:03.350987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74959 > 2) by scale factor 0.72738\nI1207 09:44:04.293210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31087 > 2) by scale factor 0.865474\nI1207 09:44:05.235435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76635 > 2) by scale factor 0.722975\nI1207 09:44:06.178107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33208 > 2) by scale factor 0.600225\nI1207 09:44:07.120790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23399 > 2) by scale factor 0.618431\nI1207 09:44:08.063449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67549 > 2) by scale factor 0.544145\nI1207 09:44:09.005692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18731 > 2) by scale factor 0.627489\nI1207 09:44:09.947557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85143 > 2) by scale factor 0.701403\nI1207 09:44:10.890158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03467 > 2) by scale factor 0.659051\nI1207 09:44:11.833158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48807 > 2) by scale factor 0.573383\nI1207 09:44:12.775889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32334 > 2) by scale factor 0.601805\nI1207 09:44:13.717734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55343 > 2) by scale factor 0.562836\nI1207 09:44:14.660464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46574 > 2) by scale factor 0.577077\nI1207 09:44:15.603415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83281 > 2) by scale factor 0.706013\nI1207 09:44:16.546331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18304 > 2) by scale factor 0.916154\nI1207 09:44:17.489382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27703 > 2) by scale factor 0.610308\nI1207 09:44:18.431758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78814 > 2) by scale factor 0.717325\nI1207 09:44:19.374065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90756 > 2) by scale factor 0.687863\nI1207 09:44:20.316951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24523 > 2) by scale factor 0.890778\nI1207 09:44:21.258965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01438 > 2) by scale factor 0.663487\nI1207 09:44:22.200709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74314 > 2) by scale factor 0.729091\nI1207 09:44:23.143318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23826 > 2) by scale factor 0.617615\nI1207 09:44:24.085613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86108 > 2) by scale factor 0.699038\nI1207 09:44:25.027755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.184 > 2) by scale factor 0.478011\nI1207 09:44:25.970274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15225 > 2) by scale factor 0.481667\nI1207 09:44:26.912760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56603 > 2) by scale factor 0.560847\nI1207 09:44:27.855398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00603 > 2) by scale factor 0.66533\nI1207 09:44:28.798077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68923 > 2) by scale factor 0.743707\nI1207 09:44:29.740684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98076 > 2) by scale factor 0.670969\nI1207 09:44:30.683897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64111 > 2) by scale factor 0.757258\nI1207 09:44:31.626343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96199 > 2) by scale factor 0.504797\nI1207 09:44:32.569205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53129 > 2) by scale factor 0.566365\nI1207 09:44:33.511662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9843 > 2) by scale factor 0.670174\nI1207 09:44:34.453508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9235 > 2) by scale factor 0.509749\nI1207 09:44:35.396185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99398 > 2) by scale factor 0.668008\nI1207 09:44:36.338791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63207 > 2) by scale factor 0.759858\nI1207 09:44:37.281512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90482 > 2) by scale factor 0.512187\nI1207 09:44:38.224215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0995 > 2) by scale factor 0.645266\nI1207 09:44:39.166684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98586 > 2) by scale factor 0.501774\nI1207 09:44:40.109293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63866 > 2) by scale factor 0.757961\nI1207 09:44:41.051759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04684 > 2) by scale factor 0.656418\nI1207 09:44:41.994794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30529 > 2) by scale factor 0.605091\nI1207 09:44:42.937007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6013 > 2) by scale factor 0.555355\nI1207 09:44:43.879539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68597 > 2) by scale factor 0.744611\nI1207 09:44:44.822104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05691 > 2) by scale factor 0.654255\nI1207 09:44:45.764786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50388 > 2) by scale factor 0.798761\nI1207 09:44:46.707350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66683 > 2) by scale factor 0.54543\nI1207 09:44:47.650357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92611 > 2) by scale factor 0.509411\nI1207 09:44:48.593035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11211 > 2) by scale factor 0.642651\nI1207 09:44:49.535535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44016 > 2) by scale factor 0.81962\nI1207 09:44:50.478080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8067 > 2) by scale factor 0.71258\nI1207 09:44:50.490041   369 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1207 09:45:43.434676   369 solver.cpp:404]     Test net output #0: accuracy = 0.19585\nI1207 09:45:43.435050   369 solver.cpp:404]     Test net output #1: loss = 9.69663 (* 1 = 9.69663 loss)\nI1207 09:45:44.308615   369 solver.cpp:228] Iteration 10100, loss = 9.10593\nI1207 09:45:44.308677   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 09:45:44.308701   369 solver.cpp:244]     Train net output #1: loss = 9.10593 (* 1 = 9.10593 loss)\nI1207 09:45:44.384245   369 sgd_solver.cpp:166] Iteration 10100, lr = 1.515\nI1207 09:45:44.394407   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68906 > 2) by scale factor 0.743754\nI1207 09:45:45.334506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61359 > 2) by scale factor 0.553466\nI1207 09:45:46.274590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80908 > 2) by scale factor 0.711977\nI1207 09:45:47.214925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75671 > 2) by scale factor 0.725504\nI1207 09:45:48.155251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35232 > 2) by scale factor 0.459525\nI1207 09:45:49.095480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96719 > 2) by scale factor 0.674038\nI1207 09:45:50.035394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49301 > 2) by scale factor 0.445136\nI1207 09:45:50.974012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6227 > 2) by scale factor 0.552074\nI1207 09:45:51.913686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07996 > 2) by scale factor 0.64936\nI1207 09:45:52.853878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99874 > 2) by scale factor 0.666947\nI1207 09:45:53.793928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13106 > 2) by scale factor 0.638762\nI1207 09:45:54.733747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71601 > 2) by scale factor 0.736375\nI1207 09:45:55.674250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84155 > 2) by scale factor 0.520623\nI1207 09:45:56.614521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0075 > 2) by scale factor 0.665003\nI1207 09:45:57.554921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45311 > 2) by scale factor 0.579188\nI1207 09:45:58.495074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36268 > 2) by scale factor 0.594764\nI1207 09:45:59.435423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13877 > 2) by scale factor 0.637192\nI1207 09:46:00.375712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97078 > 2) by scale factor 0.503679\nI1207 09:46:01.315640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2207 > 2) by scale factor 0.620984\nI1207 09:46:02.255969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32097 > 2) by scale factor 0.602233\nI1207 09:46:03.196092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68065 > 2) by scale factor 0.543382\nI1207 09:46:04.136447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49857 > 2) by scale factor 0.571661\nI1207 09:46:05.076174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96014 > 2) by scale factor 0.675645\nI1207 09:46:06.016307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01669 > 2) by scale factor 0.662978\nI1207 09:46:06.956663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46336 > 2) by scale factor 0.811899\nI1207 09:46:07.896575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00785 > 2) by scale factor 0.664926\nI1207 09:46:08.836210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04384 > 2) by scale factor 0.657065\nI1207 09:46:09.776235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85654 > 2) by scale factor 0.700149\nI1207 09:46:10.716320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06663 > 2) by scale factor 0.491808\nI1207 09:46:11.656185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89356 > 2) by scale factor 0.691189\nI1207 09:46:12.596372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99712 > 2) by scale factor 0.400231\nI1207 09:46:13.538537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99361 > 2) by scale factor 0.5008\nI1207 09:46:14.482089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81098 > 2) by scale factor 0.711495\nI1207 09:46:15.425240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5523 > 2) by scale factor 0.563015\nI1207 09:46:16.368145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72069 > 2) by scale factor 0.423667\nI1207 09:46:17.311148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72496 > 2) by scale factor 0.536919\nI1207 09:46:18.254242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4307 > 2) by scale factor 0.822808\nI1207 09:46:19.197468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2019 > 2) by scale factor 0.62463\nI1207 09:46:20.140758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81785 > 2) by scale factor 0.70976\nI1207 09:46:21.083869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63404 > 2) by scale factor 0.550352\nI1207 09:46:22.026180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99884 > 2) by scale factor 0.500145\nI1207 09:46:22.969476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11784 > 2) by scale factor 0.485692\nI1207 09:46:23.912650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16763 > 2) by scale factor 0.479889\nI1207 09:46:24.855754   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95984 > 2) by scale factor 0.505071\nI1207 09:46:25.799100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67931 > 2) by scale factor 0.746461\nI1207 09:46:26.742179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21015 > 2) by scale factor 0.623025\nI1207 09:46:27.685895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67169 > 2) by scale factor 0.74859\nI1207 09:46:28.629000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63391 > 2) by scale factor 0.759327\nI1207 09:46:29.571926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81282 > 2) by scale factor 0.524547\nI1207 09:46:30.514967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50301 > 2) by scale factor 0.570937\nI1207 09:46:31.458076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57296 > 2) by scale factor 0.437354\nI1207 09:46:32.401465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83587 > 2) by scale factor 0.705252\nI1207 09:46:33.344164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5579 > 2) by scale factor 0.781892\nI1207 09:46:34.287842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77846 > 2) by scale factor 0.719823\nI1207 09:46:35.230249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25685 > 2) by scale factor 0.61409\nI1207 09:46:36.172610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01586 > 2) by scale factor 0.66316\nI1207 09:46:37.116173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92897 > 2) by scale factor 0.509039\nI1207 09:46:38.058888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48166 > 2) by scale factor 0.805914\nI1207 09:46:39.000457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09987 > 2) by scale factor 0.645189\nI1207 09:46:39.942656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50211 > 2) by scale factor 0.799327\nI1207 09:46:40.885926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30708 > 2) by scale factor 0.464352\nI1207 09:46:41.829216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14245 > 2) by scale factor 0.933513\nI1207 09:46:42.772158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60083 > 2) by scale factor 0.768984\nI1207 09:46:43.715121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10604 > 2) by scale factor 0.643907\nI1207 09:46:44.658413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87059 > 2) by scale factor 0.696722\nI1207 09:46:45.600677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68827 > 2) by scale factor 0.743973\nI1207 09:46:46.543515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40027 > 2) by scale factor 0.588188\nI1207 09:46:47.486747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92785 > 2) by scale factor 0.683094\nI1207 09:46:48.430078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02907 > 2) by scale factor 0.660268\nI1207 09:46:49.372653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80601 > 2) by scale factor 0.712755\nI1207 09:46:50.315281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73465 > 2) by scale factor 0.731354\nI1207 09:46:51.258414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93288 > 2) by scale factor 0.681923\nI1207 09:46:52.201464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45673 > 2) by scale factor 0.81409\nI1207 09:46:53.144212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4359 > 2) by scale factor 0.450867\nI1207 09:46:54.087080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63597 > 2) by scale factor 0.550059\nI1207 09:46:55.029894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22718 > 2) by scale factor 0.619736\nI1207 09:46:55.973250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17469 > 2) by scale factor 0.629983\nI1207 09:46:56.916421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81894 > 2) by scale factor 0.709485\nI1207 09:46:57.859643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06961 > 2) by scale factor 0.966365\nI1207 09:46:58.802686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05144 > 2) by scale factor 0.655428\nI1207 09:47:00.687075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81843 > 2) by scale factor 0.709616\nI1207 09:47:01.630122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67193 > 2) by scale factor 0.748523\nI1207 09:47:02.573444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9089 > 2) by scale factor 0.687544\nI1207 09:47:03.516418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2408 > 2) by scale factor 0.617131\nI1207 09:47:04.459554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49676 > 2) by scale factor 0.801037\nI1207 09:47:05.402370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82618 > 2) by scale factor 0.522714\nI1207 09:47:06.345386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95477 > 2) by scale factor 0.676872\nI1207 09:47:07.288072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62898 > 2) by scale factor 0.760752\nI1207 09:47:08.231340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62018 > 2) by scale factor 0.763305\nI1207 09:47:09.174515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7157 > 2) by scale factor 0.736459\nI1207 09:47:10.117657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49456 > 2) by scale factor 0.444982\nI1207 09:47:11.060467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22305 > 2) by scale factor 0.620529\nI1207 09:47:12.003206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74708 > 2) by scale factor 0.728045\nI1207 09:47:12.946437   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16942 > 2) by scale factor 0.921905\nI1207 09:47:13.889165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85391 > 2) by scale factor 0.700794\nI1207 09:47:14.832443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30142 > 2) by scale factor 0.869028\nI1207 09:47:15.775382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32676 > 2) by scale factor 0.601185\nI1207 09:47:16.718608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07642 > 2) by scale factor 0.650106\nI1207 09:47:17.661278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15815 > 2) by scale factor 0.633283\nI1207 09:47:17.673219   369 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1207 09:48:10.620854   369 solver.cpp:404]     Test net output #0: accuracy = 0.1709\nI1207 09:48:10.621224   369 solver.cpp:404]     Test net output #1: loss = 8.0625 (* 1 = 8.0625 loss)\nI1207 09:48:11.494748   369 solver.cpp:228] Iteration 10200, loss = 8.34492\nI1207 09:48:11.494799   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 09:48:11.494818   369 solver.cpp:244]     Train net output #1: loss = 8.34492 (* 1 = 8.34492 loss)\nI1207 09:48:11.573331   369 sgd_solver.cpp:166] Iteration 10200, lr = 1.53\nI1207 09:48:11.583144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.076 > 2) by scale factor 0.650194\nI1207 09:48:12.522438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25311 > 2) by scale factor 0.614796\nI1207 09:48:13.462221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3638 > 2) by scale factor 0.846095\nI1207 09:48:14.402384   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66238 > 2) by scale factor 0.751207\nI1207 09:48:15.341693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04289 > 2) by scale factor 0.65727\nI1207 09:48:16.281519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55038 > 2) by scale factor 0.784197\nI1207 09:48:17.221812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4727 > 2) by scale factor 0.57592\nI1207 09:48:18.161885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3253 > 2) by scale factor 0.860104\nI1207 09:48:19.100759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0841 > 2) by scale factor 0.648487\nI1207 09:48:20.040658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96568 > 2) by scale factor 0.504328\nI1207 09:48:20.980002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0597 > 2) by scale factor 0.971014\nI1207 09:48:21.919749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99808 > 2) by scale factor 0.50024\nI1207 09:48:22.859936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67784 > 2) by scale factor 0.427547\nI1207 09:48:23.800511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45313 > 2) by scale factor 0.815284\nI1207 09:48:24.740830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45833 > 2) by scale factor 0.448599\nI1207 09:48:25.681078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13171 > 2) by scale factor 0.63863\nI1207 09:48:26.621348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96719 > 2) by scale factor 0.674037\nI1207 09:48:28.498731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49586 > 2) by scale factor 0.572106\nI1207 09:48:29.438976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00278 > 2) by scale factor 0.666049\nI1207 09:48:30.378623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9137 > 2) by scale factor 0.686413\nI1207 09:48:32.255244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16532 > 2) by scale factor 0.923649\nI1207 09:48:33.194772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12918 > 2) by scale factor 0.484358\nI1207 09:48:34.134718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80756 > 2) by scale factor 0.712361\nI1207 09:48:35.074991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06681 > 2) by scale factor 0.652144\nI1207 09:48:36.014807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35779 > 2) by scale factor 0.84825\nI1207 09:48:36.954630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25887 > 2) by scale factor 0.885399\nI1207 09:48:37.894197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07315 > 2) by scale factor 0.650799\nI1207 09:48:38.833927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72055 > 2) by scale factor 0.735146\nI1207 09:48:39.775960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61314 > 2) by scale factor 0.553536\nI1207 09:48:40.719372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22577 > 2) by scale factor 0.473287\nI1207 09:48:41.662780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62618 > 2) by scale factor 0.551545\nI1207 09:48:42.604756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90983 > 2) by scale factor 0.511531\nI1207 09:48:43.547652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23232 > 2) by scale factor 0.472554\nI1207 09:48:44.491358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2801 > 2) by scale factor 0.609738\nI1207 09:48:45.434568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17736 > 2) by scale factor 0.478771\nI1207 09:48:46.377568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73522 > 2) by scale factor 0.731203\nI1207 09:48:47.320684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27688 > 2) by scale factor 0.46763\nI1207 09:48:48.263444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97873 > 2) by scale factor 0.502673\nI1207 09:48:49.205909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68674 > 2) by scale factor 0.744396\nI1207 09:48:50.148809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26073 > 2) by scale factor 0.884671\nI1207 09:48:51.091480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62672 > 2) by scale factor 0.761405\nI1207 09:48:52.034500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82365 > 2) by scale factor 0.52306\nI1207 09:48:52.977552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5702 > 2) by scale factor 0.778149\nI1207 09:48:54.860805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86061 > 2) by scale factor 0.699151\nI1207 09:48:55.803827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54347 > 2) by scale factor 0.786326\nI1207 09:48:56.746932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98087 > 2) by scale factor 0.670945\nI1207 09:48:57.690093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96369 > 2) by scale factor 0.674835\nI1207 09:48:58.632576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70273 > 2) by scale factor 0.739993\nI1207 09:48:59.575667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22761 > 2) by scale factor 0.897822\nI1207 09:49:01.458606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0432 > 2) by scale factor 0.494657\nI1207 09:49:02.402256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27859 > 2) by scale factor 0.610019\nI1207 09:49:03.345537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19523 > 2) by scale factor 0.625934\nI1207 09:49:04.289224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09748 > 2) by scale factor 0.953524\nI1207 09:49:05.232389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17429 > 2) by scale factor 0.630063\nI1207 09:49:06.175577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17308 > 2) by scale factor 0.920354\nI1207 09:49:07.118365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54695 > 2) by scale factor 0.563864\nI1207 09:49:09.001252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65806 > 2) by scale factor 0.546737\nI1207 09:49:09.943908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10425 > 2) by scale factor 0.4873\nI1207 09:49:10.886732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54042 > 2) by scale factor 0.564905\nI1207 09:49:11.829876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91702 > 2) by scale factor 0.685632\nI1207 09:49:12.772109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11915 > 2) by scale factor 0.641201\nI1207 09:49:13.715065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30619 > 2) by scale factor 0.867231\nI1207 09:49:14.657548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94552 > 2) by scale factor 0.678997\nI1207 09:49:15.600723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85281 > 2) by scale factor 0.519102\nI1207 09:49:16.543653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41074 > 2) by scale factor 0.829622\nI1207 09:49:17.486909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23955 > 2) by scale factor 0.617369\nI1207 09:49:18.429394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7849 > 2) by scale factor 0.718159\nI1207 09:49:19.372381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37226 > 2) by scale factor 0.843078\nI1207 09:49:20.315320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02848 > 2) by scale factor 0.985959\nI1207 09:49:21.258249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45567 > 2) by scale factor 0.578759\nI1207 09:49:22.201169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57995 > 2) by scale factor 0.775209\nI1207 09:49:23.144008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9905 > 2) by scale factor 0.400762\nI1207 09:49:24.086948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73171 > 2) by scale factor 0.732142\nI1207 09:49:25.029434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74441 > 2) by scale factor 0.728755\nI1207 09:49:25.972542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78367 > 2) by scale factor 0.718476\nI1207 09:49:26.915246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55228 > 2) by scale factor 0.783612\nI1207 09:49:27.858242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25774 > 2) by scale factor 0.885843\nI1207 09:49:28.801429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23486 > 2) by scale factor 0.47227\nI1207 09:49:29.744302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8547 > 2) by scale factor 0.700598\nI1207 09:49:30.687096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97851 > 2) by scale factor 0.671476\nI1207 09:49:31.629815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16768 > 2) by scale factor 0.479883\nI1207 09:49:32.572593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89709 > 2) by scale factor 0.690349\nI1207 09:49:33.515612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57371 > 2) by scale factor 0.559642\nI1207 09:49:34.458514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54135 > 2) by scale factor 0.786982\nI1207 09:49:35.401484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85564 > 2) by scale factor 0.700369\nI1207 09:49:36.344125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81572 > 2) by scale factor 0.524147\nI1207 09:49:37.286428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64378 > 2) by scale factor 0.756492\nI1207 09:49:38.229734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00511 > 2) by scale factor 0.665533\nI1207 09:49:39.172571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28104 > 2) by scale factor 0.609563\nI1207 09:49:40.115651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60253 > 2) by scale factor 0.768483\nI1207 09:49:41.058701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1739 > 2) by scale factor 0.63014\nI1207 09:49:42.001504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49524 > 2) by scale factor 0.801527\nI1207 09:49:42.944313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0707 > 2) by scale factor 0.965855\nI1207 09:49:43.887249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48688 > 2) by scale factor 0.573578\nI1207 09:49:44.830299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24485 > 2) by scale factor 0.616361\nI1207 09:49:44.842283   369 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1207 09:50:37.780637   369 solver.cpp:404]     Test net output #0: accuracy = 0.1442\nI1207 09:50:37.781000   369 solver.cpp:404]     Test net output #1: loss = 8.58722 (* 1 = 8.58722 loss)\nI1207 09:50:38.654616   369 solver.cpp:228] Iteration 10300, loss = 7.83043\nI1207 09:50:38.654670   369 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1207 09:50:38.654687   369 solver.cpp:244]     Train net output #1: loss = 7.83043 (* 1 = 7.83043 loss)\nI1207 09:50:38.728366   369 sgd_solver.cpp:166] Iteration 10300, lr = 1.545\nI1207 09:50:38.738526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38449 > 2) by scale factor 0.590932\nI1207 09:50:39.678500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49466 > 2) by scale factor 0.801713\nI1207 09:50:40.618099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98086 > 2) by scale factor 0.670948\nI1207 09:50:41.558045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60842 > 2) by scale factor 0.766749\nI1207 09:50:42.497452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73875 > 2) by scale factor 0.534938\nI1207 09:50:43.436861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47363 > 2) by scale factor 0.447065\nI1207 09:50:44.376461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3719 > 2) by scale factor 0.593137\nI1207 09:50:45.316035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45966 > 2) by scale factor 0.81312\nI1207 09:50:46.256122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64432 > 2) by scale factor 0.756337\nI1207 09:50:47.196002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18921 > 2) by scale factor 0.913574\nI1207 09:50:48.136098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09015 > 2) by scale factor 0.48898\nI1207 09:50:49.076755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11816 > 2) by scale factor 0.944217\nI1207 09:50:50.954722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65481 > 2) by scale factor 0.547223\nI1207 09:50:51.895134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26155 > 2) by scale factor 0.469313\nI1207 09:50:52.835564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06836 > 2) by scale factor 0.651815\nI1207 09:50:53.775632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11381 > 2) by scale factor 0.946159\nI1207 09:50:54.715806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28316 > 2) by scale factor 0.609169\nI1207 09:50:55.656261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26409 > 2) by scale factor 0.883357\nI1207 09:50:56.596515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54584 > 2) by scale factor 0.564041\nI1207 09:50:57.536499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78758 > 2) by scale factor 0.528042\nI1207 09:50:58.476631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96767 > 2) by scale factor 0.673929\nI1207 09:51:00.354107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40013 > 2) by scale factor 0.83329\nI1207 09:51:01.294399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99339 > 2) by scale factor 0.500827\nI1207 09:51:02.234760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52676 > 2) by scale factor 0.567093\nI1207 09:51:03.174883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80542 > 2) by scale factor 0.712905\nI1207 09:51:04.114928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47887 > 2) by scale factor 0.806818\nI1207 09:51:05.054947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71419 > 2) by scale factor 0.736869\nI1207 09:51:05.994896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01873 > 2) by scale factor 0.662531\nI1207 09:51:06.934998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07589 > 2) by scale factor 0.650218\nI1207 09:51:07.874722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81741 > 2) by scale factor 0.709871\nI1207 09:51:08.815485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16049 > 2) by scale factor 0.632813\nI1207 09:51:09.756747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27685 > 2) by scale factor 0.610343\nI1207 09:51:10.700146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27266 > 2) by scale factor 0.468092\nI1207 09:51:11.643795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1371 > 2) by scale factor 0.637531\nI1207 09:51:12.586918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87833 > 2) by scale factor 0.515686\nI1207 09:51:13.530349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94941 > 2) by scale factor 0.678103\nI1207 09:51:14.473386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4365 > 2) by scale factor 0.581987\nI1207 09:51:15.416852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2009 > 2) by scale factor 0.624824\nI1207 09:51:16.360208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43283 > 2) by scale factor 0.58261\nI1207 09:51:17.303262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00606 > 2) by scale factor 0.499244\nI1207 09:51:18.246428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69618 > 2) by scale factor 0.741791\nI1207 09:51:19.189555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64767 > 2) by scale factor 0.75538\nI1207 09:51:20.132608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96508 > 2) by scale factor 0.504403\nI1207 09:51:21.076102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29992 > 2) by scale factor 0.606075\nI1207 09:51:22.019328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13255 > 2) by scale factor 0.638458\nI1207 09:51:22.962608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88401 > 2) by scale factor 0.514932\nI1207 09:51:23.905712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54846 > 2) by scale factor 0.784787\nI1207 09:51:24.849076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25264 > 2) by scale factor 0.614886\nI1207 09:51:25.792129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21047 > 2) by scale factor 0.622962\nI1207 09:51:26.735270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31118 > 2) by scale factor 0.46391\nI1207 09:51:27.678341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28736 > 2) by scale factor 0.87437\nI1207 09:51:28.621459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57241 > 2) by scale factor 0.559847\nI1207 09:51:29.564527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58565 > 2) by scale factor 0.557779\nI1207 09:51:30.507560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2036 > 2) by scale factor 0.624298\nI1207 09:51:31.450553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03638 > 2) by scale factor 0.495494\nI1207 09:51:32.393792   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32348 > 2) by scale factor 0.860777\nI1207 09:51:33.336843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00174 > 2) by scale factor 0.499783\nI1207 09:51:34.280148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98319 > 2) by scale factor 0.50211\nI1207 09:51:35.223390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.793 > 2) by scale factor 0.716076\nI1207 09:51:36.166573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46216 > 2) by scale factor 0.577674\nI1207 09:51:37.109736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75958 > 2) by scale factor 0.724749\nI1207 09:51:38.052531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36036 > 2) by scale factor 0.458678\nI1207 09:51:38.996845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71652 > 2) by scale factor 0.424042\nI1207 09:51:39.939934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1056 > 2) by scale factor 0.643998\nI1207 09:51:40.883360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74449 > 2) by scale factor 0.728732\nI1207 09:51:41.826397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6712 > 2) by scale factor 0.54478\nI1207 09:51:42.769076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36319 > 2) by scale factor 0.594674\nI1207 09:51:43.712491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90909 > 2) by scale factor 0.511628\nI1207 09:51:44.655514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28997 > 2) by scale factor 0.607907\nI1207 09:51:45.599004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52559 > 2) by scale factor 0.791894\nI1207 09:51:46.542240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1701 > 2) by scale factor 0.630895\nI1207 09:51:47.485321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09114 > 2) by scale factor 0.647011\nI1207 09:51:48.428850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04516 > 2) by scale factor 0.65678\nI1207 09:51:49.371865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5821 > 2) by scale factor 0.558332\nI1207 09:51:50.315459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56931 > 2) by scale factor 0.778419\nI1207 09:51:51.258821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60528 > 2) by scale factor 0.554742\nI1207 09:51:52.201864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94963 > 2) by scale factor 0.404071\nI1207 09:51:53.145256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8747 > 2) by scale factor 0.51617\nI1207 09:51:54.088635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96996 > 2) by scale factor 0.673409\nI1207 09:51:55.032048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0678 > 2) by scale factor 0.491667\nI1207 09:51:55.975340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28847 > 2) by scale factor 0.466367\nI1207 09:51:56.918496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66022 > 2) by scale factor 0.751816\nI1207 09:51:57.861718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9499 > 2) by scale factor 0.677989\nI1207 09:51:58.805094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56957 > 2) by scale factor 0.437678\nI1207 09:51:59.748499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10561 > 2) by scale factor 0.391726\nI1207 09:52:00.691972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2329 > 2) by scale factor 0.895698\nI1207 09:52:01.634958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51511 > 2) by scale factor 0.442957\nI1207 09:52:02.578614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6806 > 2) by scale factor 0.54339\nI1207 09:52:03.521708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83333 > 2) by scale factor 0.705883\nI1207 09:52:04.464421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54463 > 2) by scale factor 0.78597\nI1207 09:52:05.407435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81944 > 2) by scale factor 0.523637\nI1207 09:52:06.350312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71656 > 2) by scale factor 0.538132\nI1207 09:52:07.293561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67198 > 2) by scale factor 0.428084\nI1207 09:52:08.236873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55506 > 2) by scale factor 0.439072\nI1207 09:52:10.120658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74325 > 2) by scale factor 0.729062\nI1207 09:52:11.064018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86902 > 2) by scale factor 0.697102\nI1207 09:52:12.007133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0645 > 2) by scale factor 0.968758\nI1207 09:52:12.019148   369 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1207 09:53:04.965546   369 solver.cpp:404]     Test net output #0: accuracy = 0.1516\nI1207 09:53:04.965953   369 solver.cpp:404]     Test net output #1: loss = 8.94246 (* 1 = 8.94246 loss)\nI1207 09:53:05.839376   369 solver.cpp:228] Iteration 10400, loss = 9.75605\nI1207 09:53:05.839434   369 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1207 09:53:05.839453   369 solver.cpp:244]     Train net output #1: loss = 9.75605 (* 1 = 9.75605 loss)\nI1207 09:53:05.918794   369 sgd_solver.cpp:166] Iteration 10400, lr = 1.56\nI1207 09:53:05.928990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29328 > 2) by scale factor 0.607296\nI1207 09:53:06.870283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72929 > 2) by scale factor 0.536295\nI1207 09:53:07.811322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70194 > 2) by scale factor 0.540257\nI1207 09:53:08.752182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15735 > 2) by scale factor 0.633443\nI1207 09:53:09.693235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02224 > 2) by scale factor 0.497235\nI1207 09:53:10.634184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31472 > 2) by scale factor 0.60337\nI1207 09:53:11.574934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44736 > 2) by scale factor 0.817207\nI1207 09:53:12.515626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50121 > 2) by scale factor 0.799613\nI1207 09:53:13.456681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86475 > 2) by scale factor 0.698141\nI1207 09:53:14.397547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94473 > 2) by scale factor 0.67918\nI1207 09:53:15.338471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68759 > 2) by scale factor 0.54236\nI1207 09:53:17.217878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27304 > 2) by scale factor 0.879878\nI1207 09:53:18.158715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91282 > 2) by scale factor 0.68662\nI1207 09:53:19.099599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03732 > 2) by scale factor 0.495379\nI1207 09:53:20.040679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38525 > 2) by scale factor 0.456074\nI1207 09:53:20.981655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23633 > 2) by scale factor 0.894321\nI1207 09:53:21.922906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74777 > 2) by scale factor 0.533651\nI1207 09:53:22.863776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31936 > 2) by scale factor 0.602526\nI1207 09:53:23.804616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.568 > 2) by scale factor 0.437829\nI1207 09:53:24.745568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33602 > 2) by scale factor 0.599516\nI1207 09:53:25.686645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15925 > 2) by scale factor 0.480856\nI1207 09:53:26.627517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45828 > 2) by scale factor 0.578323\nI1207 09:53:27.568276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1439 > 2) by scale factor 0.636153\nI1207 09:53:28.509038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2954 > 2) by scale factor 0.465614\nI1207 09:53:29.450187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98901 > 2) by scale factor 0.669119\nI1207 09:53:30.390970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70376 > 2) by scale factor 0.739711\nI1207 09:53:31.331511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4369 > 2) by scale factor 0.58192\nI1207 09:53:32.272686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99095 > 2) by scale factor 0.668684\nI1207 09:53:33.213788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6274 > 2) by scale factor 0.551359\nI1207 09:53:34.154896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46126 > 2) by scale factor 0.577824\nI1207 09:53:35.095541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9325 > 2) by scale factor 0.508582\nI1207 09:53:36.037057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2305 > 2) by scale factor 0.619098\nI1207 09:53:36.977896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21289 > 2) by scale factor 0.622492\nI1207 09:53:37.919757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60249 > 2) by scale factor 0.555172\nI1207 09:53:38.862880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92457 > 2) by scale factor 0.683862\nI1207 09:53:39.805588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03777 > 2) by scale factor 0.495323\nI1207 09:53:40.747998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15746 > 2) by scale factor 0.481063\nI1207 09:53:41.691028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0701 > 2) by scale factor 0.491388\nI1207 09:53:42.633592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63713 > 2) by scale factor 0.549884\nI1207 09:53:43.576483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94794 > 2) by scale factor 0.678441\nI1207 09:53:44.519398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09603 > 2) by scale factor 0.645988\nI1207 09:53:45.462249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78306 > 2) by scale factor 0.528673\nI1207 09:53:46.405035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85064 > 2) by scale factor 0.701598\nI1207 09:53:47.347916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43854 > 2) by scale factor 0.820163\nI1207 09:53:48.290848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27787 > 2) by scale factor 0.878015\nI1207 09:53:49.233794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37573 > 2) by scale factor 0.592465\nI1207 09:53:50.176966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59938 > 2) by scale factor 0.769413\nI1207 09:53:51.119220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43816 > 2) by scale factor 0.820292\nI1207 09:53:53.001752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46369 > 2) by scale factor 0.81179\nI1207 09:53:53.944007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31073 > 2) by scale factor 0.604096\nI1207 09:53:54.886965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22478 > 2) by scale factor 0.473397\nI1207 09:53:55.829604   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62715 > 2) by scale factor 0.761281\nI1207 09:53:56.772847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54236 > 2) by scale factor 0.786669\nI1207 09:53:57.715363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59372 > 2) by scale factor 0.556526\nI1207 09:53:58.658393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98409 > 2) by scale factor 0.670221\nI1207 09:53:59.601675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32319 > 2) by scale factor 0.601832\nI1207 09:54:00.544759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32736 > 2) by scale factor 0.462175\nI1207 09:54:01.487612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49084 > 2) by scale factor 0.80294\nI1207 09:54:02.430493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74023 > 2) by scale factor 0.42192\nI1207 09:54:03.373267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13305 > 2) by scale factor 0.638356\nI1207 09:54:04.316165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91128 > 2) by scale factor 0.686983\nI1207 09:54:05.259165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50666 > 2) by scale factor 0.797874\nI1207 09:54:06.202548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48829 > 2) by scale factor 0.573347\nI1207 09:54:08.086103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05724 > 2) by scale factor 0.654185\nI1207 09:54:09.029287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85342 > 2) by scale factor 0.519019\nI1207 09:54:09.971961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43651 > 2) by scale factor 0.820847\nI1207 09:54:10.914480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56704 > 2) by scale factor 0.779108\nI1207 09:54:11.856886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56942 > 2) by scale factor 0.560314\nI1207 09:54:12.799739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33857 > 2) by scale factor 0.460981\nI1207 09:54:13.742417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83014 > 2) by scale factor 0.522174\nI1207 09:54:14.685510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71227 > 2) by scale factor 0.538754\nI1207 09:54:15.627836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5371 > 2) by scale factor 0.788302\nI1207 09:54:16.570916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39028 > 2) by scale factor 0.836722\nI1207 09:54:17.513679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49766 > 2) by scale factor 0.800748\nI1207 09:54:18.456290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87387 > 2) by scale factor 0.695925\nI1207 09:54:19.398782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00032 > 2) by scale factor 0.49996\nI1207 09:54:20.341272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71489 > 2) by scale factor 0.736677\nI1207 09:54:21.284005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8188 > 2) by scale factor 0.709522\nI1207 09:54:23.167667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45433 > 2) by scale factor 0.814885\nI1207 09:54:24.110669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14908 > 2) by scale factor 0.93063\nI1207 09:54:25.052922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49609 > 2) by scale factor 0.444831\nI1207 09:54:25.995571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92264 > 2) by scale factor 0.50986\nI1207 09:54:26.938498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62819 > 2) by scale factor 0.551239\nI1207 09:54:27.880970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06174 > 2) by scale factor 0.970054\nI1207 09:54:28.824041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.035 > 2) by scale factor 0.658979\nI1207 09:54:29.766458   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44473 > 2) by scale factor 0.580598\nI1207 09:54:30.708717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04786 > 2) by scale factor 0.396208\nI1207 09:54:31.651015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67785 > 2) by scale factor 0.746867\nI1207 09:54:32.593538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58472 > 2) by scale factor 0.773777\nI1207 09:54:33.535866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41002 > 2) by scale factor 0.586506\nI1207 09:54:34.478636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2828 > 2) by scale factor 0.609236\nI1207 09:54:35.421643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24859 > 2) by scale factor 0.889447\nI1207 09:54:36.365007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93249 > 2) by scale factor 0.682015\nI1207 09:54:37.307519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2392 > 2) by scale factor 0.617437\nI1207 09:54:38.249644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59367 > 2) by scale factor 0.556534\nI1207 09:54:39.192452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85406 > 2) by scale factor 0.700756\nI1207 09:54:39.204461   369 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1207 09:55:31.959339   369 solver.cpp:404]     Test net output #0: accuracy = 0.17885\nI1207 09:55:31.959743   369 solver.cpp:404]     Test net output #1: loss = 6.2049 (* 1 = 6.2049 loss)\nI1207 09:55:32.833861   369 solver.cpp:228] Iteration 10500, loss = 6.96589\nI1207 09:55:32.833914   369 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1207 09:55:32.833932   369 solver.cpp:244]     Train net output #1: loss = 6.96589 (* 1 = 6.96589 loss)\nI1207 09:55:32.904634   369 sgd_solver.cpp:166] Iteration 10500, lr = 1.575\nI1207 09:55:32.914775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66619 > 2) by scale factor 0.750135\nI1207 09:55:33.855861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01754 > 2) by scale factor 0.497817\nI1207 09:55:34.796777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85904 > 2) by scale factor 0.699535\nI1207 09:55:35.737568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62074 > 2) by scale factor 0.763143\nI1207 09:55:36.678524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85509 > 2) by scale factor 0.518795\nI1207 09:55:37.619221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18638 > 2) by scale factor 0.627672\nI1207 09:55:38.560739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23691 > 2) by scale factor 0.472042\nI1207 09:55:39.501482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1171 > 2) by scale factor 0.641622\nI1207 09:55:40.442312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00457 > 2) by scale factor 0.997722\nI1207 09:55:41.383278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68876 > 2) by scale factor 0.542188\nI1207 09:55:42.324388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18923 > 2) by scale factor 0.477415\nI1207 09:55:43.265435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34225 > 2) by scale factor 0.598399\nI1207 09:55:44.206279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79476 > 2) by scale factor 0.715624\nI1207 09:55:45.146934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91609 > 2) by scale factor 0.685849\nI1207 09:55:46.088184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18897 > 2) by scale factor 0.477444\nI1207 09:55:47.028862   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85855 > 2) by scale factor 0.51833\nI1207 09:55:47.969887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37195 > 2) by scale factor 0.593128\nI1207 09:55:48.910914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98047 > 2) by scale factor 0.671034\nI1207 09:55:49.852421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16728 > 2) by scale factor 0.631457\nI1207 09:55:50.793351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74638 > 2) by scale factor 0.728231\nI1207 09:55:51.734300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36826 > 2) by scale factor 0.593778\nI1207 09:55:52.674842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13713 > 2) by scale factor 0.483427\nI1207 09:55:53.616113   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.497 > 2) by scale factor 0.800962\nI1207 09:55:54.557036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42483 > 2) by scale factor 0.824799\nI1207 09:55:55.497895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76672 > 2) by scale factor 0.530966\nI1207 09:55:56.438937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41624 > 2) by scale factor 0.827731\nI1207 09:55:57.380352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7542 > 2) by scale factor 0.726165\nI1207 09:55:58.321485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00808 > 2) by scale factor 0.498991\nI1207 09:55:59.261930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0084 > 2) by scale factor 0.664806\nI1207 09:56:00.202340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94421 > 2) by scale factor 0.679299\nI1207 09:56:01.143692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29058 > 2) by scale factor 0.607795\nI1207 09:56:02.084955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19744 > 2) by scale factor 0.6255\nI1207 09:56:03.026638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36967 > 2) by scale factor 0.843999\nI1207 09:56:03.969300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66787 > 2) by scale factor 0.428461\nI1207 09:56:04.911823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05406 > 2) by scale factor 0.654866\nI1207 09:56:05.854476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63502 > 2) by scale factor 0.759009\nI1207 09:56:06.797216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76075 > 2) by scale factor 0.724442\nI1207 09:56:07.740248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39574 > 2) by scale factor 0.834817\nI1207 09:56:08.682864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19226 > 2) by scale factor 0.912299\nI1207 09:56:09.625191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54659 > 2) by scale factor 0.785365\nI1207 09:56:10.567780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03203 > 2) by scale factor 0.659625\nI1207 09:56:11.510408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51122 > 2) by scale factor 0.796425\nI1207 09:56:12.452864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65654 > 2) by scale factor 0.752858\nI1207 09:56:13.395067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41167 > 2) by scale factor 0.8293\nI1207 09:56:14.337227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58573 > 2) by scale factor 0.557767\nI1207 09:56:15.279937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45206 > 2) by scale factor 0.44923\nI1207 09:56:16.222936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6138 > 2) by scale factor 0.553435\nI1207 09:56:17.165899   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86394 > 2) by scale factor 0.698339\nI1207 09:56:18.108502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20841 > 2) by scale factor 0.905627\nI1207 09:56:19.050482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32642 > 2) by scale factor 0.859689\nI1207 09:56:19.992600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69467 > 2) by scale factor 0.742205\nI1207 09:56:20.935364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55696 > 2) by scale factor 0.562278\nI1207 09:56:21.877722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43509 > 2) by scale factor 0.821324\nI1207 09:56:22.819744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71448 > 2) by scale factor 0.736789\nI1207 09:56:23.762424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28533 > 2) by scale factor 0.608767\nI1207 09:56:24.705170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39602 > 2) by scale factor 0.834719\nI1207 09:56:25.647531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76517 > 2) by scale factor 0.531185\nI1207 09:56:26.590188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59529 > 2) by scale factor 0.556284\nI1207 09:56:27.533109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10879 > 2) by scale factor 0.643337\nI1207 09:56:28.475595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3825 > 2) by scale factor 0.839455\nI1207 09:56:29.418393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97403 > 2) by scale factor 0.503267\nI1207 09:56:30.361027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49735 > 2) by scale factor 0.571861\nI1207 09:56:31.303634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50705 > 2) by scale factor 0.443749\nI1207 09:56:32.246385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90817 > 2) by scale factor 0.687718\nI1207 09:56:33.189079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39111 > 2) by scale factor 0.836432\nI1207 09:56:34.131997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82328 > 2) by scale factor 0.708396\nI1207 09:56:35.074447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12665 > 2) by scale factor 0.639663\nI1207 09:56:36.016474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74569 > 2) by scale factor 0.728413\nI1207 09:56:36.958025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63603 > 2) by scale factor 0.55005\nI1207 09:56:37.900281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21207 > 2) by scale factor 0.904132\nI1207 09:56:38.841770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01789 > 2) by scale factor 0.662715\nI1207 09:56:39.784204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25254 > 2) by scale factor 0.614905\nI1207 09:56:40.726541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41802 > 2) by scale factor 0.585134\nI1207 09:56:41.668893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81492 > 2) by scale factor 0.524257\nI1207 09:56:42.611790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63121 > 2) by scale factor 0.55078\nI1207 09:56:43.554219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2114 > 2) by scale factor 0.622781\nI1207 09:56:44.496151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15362 > 2) by scale factor 0.634192\nI1207 09:56:45.438592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06415 > 2) by scale factor 0.492108\nI1207 09:56:46.381150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53599 > 2) by scale factor 0.565613\nI1207 09:56:47.323783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75571 > 2) by scale factor 0.532523\nI1207 09:56:48.266357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86179 > 2) by scale factor 0.517895\nI1207 09:56:49.209224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08347 > 2) by scale factor 0.648619\nI1207 09:56:50.151628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07188 > 2) by scale factor 0.651067\nI1207 09:56:51.094151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24221 > 2) by scale factor 0.891977\nI1207 09:56:52.036721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31087 > 2) by scale factor 0.865475\nI1207 09:56:52.979270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07785 > 2) by scale factor 0.962534\nI1207 09:56:54.861363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95803 > 2) by scale factor 0.676126\nI1207 09:56:55.803705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46374 > 2) by scale factor 0.811774\nI1207 09:56:56.745981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45853 > 2) by scale factor 0.813496\nI1207 09:56:58.628568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36514 > 2) by scale factor 0.845616\nI1207 09:56:59.570670   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07143 > 2) by scale factor 0.651163\nI1207 09:57:00.513034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67692 > 2) by scale factor 0.543934\nI1207 09:57:01.455253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52533 > 2) by scale factor 0.567322\nI1207 09:57:02.397817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09072 > 2) by scale factor 0.488912\nI1207 09:57:03.340579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34319 > 2) by scale factor 0.853537\nI1207 09:57:04.283041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85236 > 2) by scale factor 0.701174\nI1207 09:57:05.225594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08622 > 2) by scale factor 0.648041\nI1207 09:57:06.167752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6679 > 2) by scale factor 0.545271\nI1207 09:57:06.179729   369 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1207 09:57:59.034333   369 solver.cpp:404]     Test net output #0: accuracy = 0.12755\nI1207 09:57:59.034744   369 solver.cpp:404]     Test net output #1: loss = 15.3956 (* 1 = 15.3956 loss)\nI1207 09:57:59.908659   369 solver.cpp:228] Iteration 10600, loss = 16.0279\nI1207 09:57:59.908717   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 09:57:59.908735   369 solver.cpp:244]     Train net output #1: loss = 16.0279 (* 1 = 16.0279 loss)\nI1207 09:57:59.979305   369 sgd_solver.cpp:166] Iteration 10600, lr = 1.59\nI1207 09:57:59.989452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81857 > 2) by scale factor 0.523757\nI1207 09:58:00.929867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31451 > 2) by scale factor 0.463552\nI1207 09:58:01.871016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22708 > 2) by scale factor 0.898038\nI1207 09:58:02.812091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78717 > 2) by scale factor 0.528099\nI1207 09:58:03.753145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05565 > 2) by scale factor 0.654526\nI1207 09:58:04.694223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11051 > 2) by scale factor 0.642982\nI1207 09:58:05.635067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52985 > 2) by scale factor 0.790562\nI1207 09:58:06.576171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2599 > 2) by scale factor 0.613516\nI1207 09:58:07.517313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29967 > 2) by scale factor 0.465152\nI1207 09:58:08.458333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3813 > 2) by scale factor 0.591489\nI1207 09:58:09.398865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96585 > 2) by scale factor 0.674343\nI1207 09:58:10.340039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10601 > 2) by scale factor 0.487091\nI1207 09:58:11.281211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96862 > 2) by scale factor 0.503954\nI1207 09:58:12.222005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30627 > 2) by scale factor 0.8672\nI1207 09:58:13.162873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02767 > 2) by scale factor 0.660573\nI1207 09:58:14.104180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71275 > 2) by scale factor 0.73726\nI1207 09:58:15.045132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5375 > 2) by scale factor 0.565371\nI1207 09:58:15.986491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11697 > 2) by scale factor 0.944745\nI1207 09:58:16.927757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04428 > 2) by scale factor 0.656971\nI1207 09:58:17.868793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96771 > 2) by scale factor 0.504069\nI1207 09:58:18.810030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19658 > 2) by scale factor 0.910508\nI1207 09:58:19.751201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07565 > 2) by scale factor 0.650269\nI1207 09:58:21.630916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65204 > 2) by scale factor 0.547639\nI1207 09:58:22.571606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71855 > 2) by scale factor 0.537844\nI1207 09:58:23.512621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96467 > 2) by scale factor 0.674612\nI1207 09:58:24.453225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88789 > 2) by scale factor 0.514418\nI1207 09:58:25.393496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1226 > 2) by scale factor 0.942239\nI1207 09:58:26.334494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85646 > 2) by scale factor 0.700168\nI1207 09:58:27.275431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09468 > 2) by scale factor 0.954801\nI1207 09:58:28.216684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10702 > 2) by scale factor 0.486971\nI1207 09:58:29.158002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35498 > 2) by scale factor 0.849263\nI1207 09:58:30.099613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81457 > 2) by scale factor 0.710589\nI1207 09:58:31.040813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45636 > 2) by scale factor 0.814214\nI1207 09:58:31.984429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99234 > 2) by scale factor 0.668372\nI1207 09:58:32.928009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82813 > 2) by scale factor 0.70718\nI1207 09:58:33.871642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81736 > 2) by scale factor 0.709885\nI1207 09:58:34.815718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81227 > 2) by scale factor 0.711168\nI1207 09:58:35.759706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42968 > 2) by scale factor 0.823154\nI1207 09:58:36.703578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18313 > 2) by scale factor 0.478111\nI1207 09:58:37.647294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31831 > 2) by scale factor 0.602716\nI1207 09:58:38.591358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09377 > 2) by scale factor 0.646461\nI1207 09:58:41.418130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29306 > 2) by scale factor 0.872198\nI1207 09:58:42.361357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19271 > 2) by scale factor 0.912113\nI1207 09:58:43.304673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67566 > 2) by scale factor 0.54412\nI1207 09:58:44.248235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3826 > 2) by scale factor 0.83942\nI1207 09:58:45.192080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30902 > 2) by scale factor 0.866168\nI1207 09:58:46.135582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00489 > 2) by scale factor 0.49939\nI1207 09:58:47.078857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55298 > 2) by scale factor 0.783398\nI1207 09:58:48.022207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03124 > 2) by scale factor 0.659796\nI1207 09:58:48.965896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42174 > 2) by scale factor 0.584498\nI1207 09:58:49.909535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21653 > 2) by scale factor 0.621788\nI1207 09:58:50.853019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32781 > 2) by scale factor 0.462128\nI1207 09:58:51.796315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11273 > 2) by scale factor 0.486295\nI1207 09:58:52.740087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33757 > 2) by scale factor 0.855591\nI1207 09:58:53.683683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75753 > 2) by scale factor 0.725286\nI1207 09:58:54.627351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39454 > 2) by scale factor 0.835234\nI1207 09:58:55.570667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35974 > 2) by scale factor 0.847551\nI1207 09:58:56.514086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65654 > 2) by scale factor 0.546965\nI1207 09:58:57.457830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70279 > 2) by scale factor 0.540133\nI1207 09:58:58.401517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42312 > 2) by scale factor 0.452169\nI1207 09:58:59.345396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52675 > 2) by scale factor 0.791529\nI1207 09:59:00.289680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87395 > 2) by scale factor 0.695907\nI1207 09:59:01.233355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24975 > 2) by scale factor 0.615433\nI1207 09:59:02.176882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36346 > 2) by scale factor 0.846216\nI1207 09:59:03.120476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14229 > 2) by scale factor 0.636479\nI1207 09:59:04.064285   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71788 > 2) by scale factor 0.735868\nI1207 09:59:05.007972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13575 > 2) by scale factor 0.936439\nI1207 09:59:05.951721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02129 > 2) by scale factor 0.497353\nI1207 09:59:06.895267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32217 > 2) by scale factor 0.602016\nI1207 09:59:07.838574   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24109 > 2) by scale factor 0.617076\nI1207 09:59:08.782923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18468 > 2) by scale factor 0.628006\nI1207 09:59:09.726598   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81089 > 2) by scale factor 0.524812\nI1207 09:59:10.670161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30142 > 2) by scale factor 0.464962\nI1207 09:59:11.614163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36556 > 2) by scale factor 0.594254\nI1207 09:59:12.557524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41088 > 2) by scale factor 0.829573\nI1207 09:59:13.501118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25268 > 2) by scale factor 0.887831\nI1207 09:59:14.445118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19979 > 2) by scale factor 0.625041\nI1207 09:59:16.330127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65771 > 2) by scale factor 0.752526\nI1207 09:59:17.273200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66306 > 2) by scale factor 0.545991\nI1207 09:59:18.217031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22016 > 2) by scale factor 0.621088\nI1207 09:59:19.160930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72216 > 2) by scale factor 0.734711\nI1207 09:59:20.104339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37211 > 2) by scale factor 0.843132\nI1207 09:59:21.047303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64984 > 2) by scale factor 0.754764\nI1207 09:59:21.990869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78939 > 2) by scale factor 0.52779\nI1207 09:59:22.934240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4356 > 2) by scale factor 0.582141\nI1207 09:59:23.877888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44535 > 2) by scale factor 0.580493\nI1207 09:59:25.762866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48767 > 2) by scale factor 0.573449\nI1207 09:59:26.706233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23223 > 2) by scale factor 0.472564\nI1207 09:59:27.650153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19004 > 2) by scale factor 0.626951\nI1207 09:59:28.593806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26447 > 2) by scale factor 0.612657\nI1207 09:59:29.537436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87973 > 2) by scale factor 0.69451\nI1207 09:59:30.481506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36308 > 2) by scale factor 0.846354\nI1207 09:59:31.424986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51384 > 2) by scale factor 0.795595\nI1207 09:59:32.368491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15333 > 2) by scale factor 0.634249\nI1207 09:59:33.312644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83454 > 2) by scale factor 0.705581\nI1207 09:59:33.324663   369 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1207 10:00:26.254230   369 solver.cpp:404]     Test net output #0: accuracy = 0.1427\nI1207 10:00:26.254621   369 solver.cpp:404]     Test net output #1: loss = 8.60713 (* 1 = 8.60713 loss)\nI1207 10:00:27.128566   369 solver.cpp:228] Iteration 10700, loss = 8.18756\nI1207 10:00:27.128625   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 10:00:27.128645   369 solver.cpp:244]     Train net output #1: loss = 8.18756 (* 1 = 8.18756 loss)\nI1207 10:00:27.201580   369 sgd_solver.cpp:166] Iteration 10700, lr = 1.605\nI1207 10:00:27.211735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68509 > 2) by scale factor 0.542728\nI1207 10:00:28.152720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27374 > 2) by scale factor 0.879607\nI1207 10:00:29.093674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.759 > 2) by scale factor 0.7249\nI1207 10:00:30.034221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1592 > 2) by scale factor 0.633071\nI1207 10:00:30.975349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60321 > 2) by scale factor 0.555061\nI1207 10:00:31.916354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32877 > 2) by scale factor 0.600822\nI1207 10:00:32.857003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40832 > 2) by scale factor 0.830454\nI1207 10:00:33.797734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38427 > 2) by scale factor 0.590969\nI1207 10:00:34.738800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56397 > 2) by scale factor 0.78004\nI1207 10:00:36.618515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05329 > 2) by scale factor 0.655032\nI1207 10:00:37.559131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69966 > 2) by scale factor 0.740834\nI1207 10:00:38.500334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23986 > 2) by scale factor 0.892912\nI1207 10:00:39.441220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74966 > 2) by scale factor 0.533381\nI1207 10:00:40.382062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02191 > 2) by scale factor 0.989164\nI1207 10:00:41.323009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30881 > 2) by scale factor 0.604447\nI1207 10:00:42.263927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9737 > 2) by scale factor 0.503309\nI1207 10:00:43.204876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28287 > 2) by scale factor 0.87609\nI1207 10:00:44.145563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08051 > 2) by scale factor 0.9613\nI1207 10:00:45.086526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71916 > 2) by scale factor 0.537756\nI1207 10:00:46.027570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08868 > 2) by scale factor 0.489155\nI1207 10:00:46.968720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88682 > 2) by scale factor 0.692804\nI1207 10:00:47.909610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65815 > 2) by scale factor 0.546724\nI1207 10:00:48.850688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45654 > 2) by scale factor 0.814154\nI1207 10:00:49.792042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87976 > 2) by scale factor 0.409856\nI1207 10:00:50.733026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06375 > 2) by scale factor 0.492157\nI1207 10:00:51.673856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91923 > 2) by scale factor 0.685113\nI1207 10:00:52.614437   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58927 > 2) by scale factor 0.772419\nI1207 10:00:54.493619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53648 > 2) by scale factor 0.788494\nI1207 10:00:55.433732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13341 > 2) by scale factor 0.638282\nI1207 10:00:56.374574   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.061 > 2) by scale factor 0.653381\nI1207 10:00:58.258378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10882 > 2) by scale factor 0.64333\nI1207 10:00:59.200881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39014 > 2) by scale factor 0.455566\nI1207 10:01:00.143564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00207 > 2) by scale factor 0.998967\nI1207 10:01:01.086503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6509 > 2) by scale factor 0.754459\nI1207 10:01:02.029532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44417 > 2) by scale factor 0.580692\nI1207 10:01:02.972784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34846 > 2) by scale factor 0.851622\nI1207 10:01:03.915359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07033 > 2) by scale factor 0.491361\nI1207 10:01:04.857465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60221 > 2) by scale factor 0.555215\nI1207 10:01:05.800431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65715 > 2) by scale factor 0.546874\nI1207 10:01:06.743649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45571 > 2) by scale factor 0.448862\nI1207 10:01:07.686512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39317 > 2) by scale factor 0.835711\nI1207 10:01:08.629469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98267 > 2) by scale factor 0.67054\nI1207 10:01:09.572114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19287 > 2) by scale factor 0.626396\nI1207 10:01:10.514760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54584 > 2) by scale factor 0.785597\nI1207 10:01:11.457341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33663 > 2) by scale factor 0.855933\nI1207 10:01:12.400192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97979 > 2) by scale factor 0.671189\nI1207 10:01:13.343006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43868 > 2) by scale factor 0.581619\nI1207 10:01:14.285598   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80235 > 2) by scale factor 0.713686\nI1207 10:01:15.228482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60824 > 2) by scale factor 0.554287\nI1207 10:01:16.171641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31015 > 2) by scale factor 0.865744\nI1207 10:01:17.114637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85367 > 2) by scale factor 0.518985\nI1207 10:01:18.057066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69876 > 2) by scale factor 0.741082\nI1207 10:01:18.999096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19327 > 2) by scale factor 0.476955\nI1207 10:01:20.882221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95456 > 2) by scale factor 0.505746\nI1207 10:01:21.825065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46657 > 2) by scale factor 0.576939\nI1207 10:01:22.767719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07071 > 2) by scale factor 0.651315\nI1207 10:01:23.710363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11867 > 2) by scale factor 0.641298\nI1207 10:01:24.653467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63512 > 2) by scale factor 0.550188\nI1207 10:01:25.596463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41988 > 2) by scale factor 0.584817\nI1207 10:01:27.480152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2854 > 2) by scale factor 0.608753\nI1207 10:01:28.423475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14773 > 2) by scale factor 0.482191\nI1207 10:01:29.366518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80527 > 2) by scale factor 0.525588\nI1207 10:01:30.309160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34777 > 2) by scale factor 0.597413\nI1207 10:01:31.252048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55884 > 2) by scale factor 0.56198\nI1207 10:01:32.194947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24921 > 2) by scale factor 0.615533\nI1207 10:01:33.137905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8485 > 2) by scale factor 0.519683\nI1207 10:01:34.080348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50454 > 2) by scale factor 0.570688\nI1207 10:01:35.022675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76145 > 2) by scale factor 0.724256\nI1207 10:01:35.965461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08867 > 2) by scale factor 0.957546\nI1207 10:01:36.907959   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58344 > 2) by scale factor 0.558123\nI1207 10:01:37.850911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24696 > 2) by scale factor 0.890091\nI1207 10:01:38.793866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04286 > 2) by scale factor 0.494699\nI1207 10:01:39.736531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6093 > 2) by scale factor 0.554124\nI1207 10:01:40.679052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0215 > 2) by scale factor 0.661923\nI1207 10:01:41.621698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18869 > 2) by scale factor 0.477476\nI1207 10:01:42.564812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51131 > 2) by scale factor 0.44333\nI1207 10:01:43.507416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3284 > 2) by scale factor 0.60089\nI1207 10:01:44.450067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26787 > 2) by scale factor 0.61202\nI1207 10:01:45.392446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31335 > 2) by scale factor 0.463676\nI1207 10:01:46.335561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52141 > 2) by scale factor 0.793208\nI1207 10:01:47.278328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95202 > 2) by scale factor 0.677503\nI1207 10:01:48.221323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2914 > 2) by scale factor 0.607644\nI1207 10:01:49.164376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90402 > 2) by scale factor 0.512293\nI1207 10:01:50.106640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08135 > 2) by scale factor 0.393596\nI1207 10:01:51.048899   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.89864 > 2) by scale factor 0.408277\nI1207 10:01:51.991629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3033 > 2) by scale factor 0.868319\nI1207 10:01:52.934337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65204 > 2) by scale factor 0.429919\nI1207 10:01:53.876844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4576 > 2) by scale factor 0.813803\nI1207 10:01:54.819025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09449 > 2) by scale factor 0.488461\nI1207 10:01:55.761703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.274 > 2) by scale factor 0.379219\nI1207 10:01:56.704339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66301 > 2) by scale factor 0.428907\nI1207 10:01:57.646494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72622 > 2) by scale factor 0.423171\nI1207 10:01:58.589246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53439 > 2) by scale factor 0.441073\nI1207 10:01:59.531922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69445 > 2) by scale factor 0.426035\nI1207 10:02:00.474488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32706 > 2) by scale factor 0.601132\nI1207 10:02:00.486490   369 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1207 10:02:53.392688   369 solver.cpp:404]     Test net output #0: accuracy = 0.1586\nI1207 10:02:53.393101   369 solver.cpp:404]     Test net output #1: loss = 17.708 (* 1 = 17.708 loss)\nI1207 10:02:54.267447   369 solver.cpp:228] Iteration 10800, loss = 15.9763\nI1207 10:02:54.267499   369 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1207 10:02:54.267518   369 solver.cpp:244]     Train net output #1: loss = 15.9763 (* 1 = 15.9763 loss)\nI1207 10:02:54.338383   369 sgd_solver.cpp:166] Iteration 10800, lr = 1.62\nI1207 10:02:54.348523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14673 > 2) by scale factor 0.482308\nI1207 10:02:55.289041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45417 > 2) by scale factor 0.57901\nI1207 10:02:56.230319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22295 > 2) by scale factor 0.620549\nI1207 10:02:57.170861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80259 > 2) by scale factor 0.713626\nI1207 10:02:58.111609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91185 > 2) by scale factor 0.686848\nI1207 10:02:59.052572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71292 > 2) by scale factor 0.424365\nI1207 10:02:59.993669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28696 > 2) by scale factor 0.608466\nI1207 10:03:00.934732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77179 > 2) by scale factor 0.530253\nI1207 10:03:01.875818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00814 > 2) by scale factor 0.664862\nI1207 10:03:02.816573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09837 > 2) by scale factor 0.645501\nI1207 10:03:03.757544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20871 > 2) by scale factor 0.905508\nI1207 10:03:04.698221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43216 > 2) by scale factor 0.451248\nI1207 10:03:05.639076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95947 > 2) by scale factor 0.403269\nI1207 10:03:06.579970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52549 > 2) by scale factor 0.567296\nI1207 10:03:07.520743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65421 > 2) by scale factor 0.547314\nI1207 10:03:08.461599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44992 > 2) by scale factor 0.449446\nI1207 10:03:09.401769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3125 > 2) by scale factor 0.463768\nI1207 10:03:10.342890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39605 > 2) by scale factor 0.588919\nI1207 10:03:11.283939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81865 > 2) by scale factor 0.523745\nI1207 10:03:12.224818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27973 > 2) by scale factor 0.877296\nI1207 10:03:13.165576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87375 > 2) by scale factor 0.695955\nI1207 10:03:14.106576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64705 > 2) by scale factor 0.755557\nI1207 10:03:15.047358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77346 > 2) by scale factor 0.72112\nI1207 10:03:15.988529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06122 > 2) by scale factor 0.653335\nI1207 10:03:16.929195   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57603 > 2) by scale factor 0.55928\nI1207 10:03:17.870156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96769 > 2) by scale factor 0.673925\nI1207 10:03:18.810969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04294 > 2) by scale factor 0.65726\nI1207 10:03:19.752112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66671 > 2) by scale factor 0.749987\nI1207 10:03:20.693214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20344 > 2) by scale factor 0.62433\nI1207 10:03:21.634097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24295 > 2) by scale factor 0.616722\nI1207 10:03:22.575184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88697 > 2) by scale factor 0.692768\nI1207 10:03:23.516448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10731 > 2) by scale factor 0.643644\nI1207 10:03:25.396672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17188 > 2) by scale factor 0.63054\nI1207 10:03:26.339432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97256 > 2) by scale factor 0.503453\nI1207 10:03:27.282284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15194 > 2) by scale factor 0.481703\nI1207 10:03:28.224859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43458 > 2) by scale factor 0.582312\nI1207 10:03:29.167881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75411 > 2) by scale factor 0.726187\nI1207 10:03:30.110875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72681 > 2) by scale factor 0.733457\nI1207 10:03:31.053822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03496 > 2) by scale factor 0.658987\nI1207 10:03:31.996865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95518 > 2) by scale factor 0.676779\nI1207 10:03:32.939685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2711 > 2) by scale factor 0.468263\nI1207 10:03:33.882800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71565 > 2) by scale factor 0.736472\nI1207 10:03:34.825955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17923 > 2) by scale factor 0.629083\nI1207 10:03:35.768712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54461 > 2) by scale factor 0.564238\nI1207 10:03:36.711339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60331 > 2) by scale factor 0.555046\nI1207 10:03:37.654034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74029 > 2) by scale factor 0.72985\nI1207 10:03:38.597121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11831 > 2) by scale factor 0.944147\nI1207 10:03:39.539526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23121 > 2) by scale factor 0.896373\nI1207 10:03:41.422531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47137 > 2) by scale factor 0.809267\nI1207 10:03:42.365331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63391 > 2) by scale factor 0.550371\nI1207 10:03:43.308044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35111 > 2) by scale factor 0.459653\nI1207 10:03:44.251006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4528 > 2) by scale factor 0.579241\nI1207 10:03:45.193625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98342 > 2) by scale factor 0.502081\nI1207 10:03:46.136095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83297 > 2) by scale factor 0.705974\nI1207 10:03:47.078706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65117 > 2) by scale factor 0.754384\nI1207 10:03:48.021368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25486 > 2) by scale factor 0.886972\nI1207 10:03:48.963901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29795 > 2) by scale factor 0.465338\nI1207 10:03:49.906730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18858 > 2) by scale factor 0.477488\nI1207 10:03:50.849339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20252 > 2) by scale factor 0.624509\nI1207 10:03:51.792541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39283 > 2) by scale factor 0.83583\nI1207 10:03:52.735410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49964 > 2) by scale factor 0.44448\nI1207 10:03:53.677845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99928 > 2) by scale factor 0.666828\nI1207 10:03:54.620564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19142 > 2) by scale factor 0.62668\nI1207 10:03:55.562991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89918 > 2) by scale factor 0.512928\nI1207 10:03:56.505776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32716 > 2) by scale factor 0.601113\nI1207 10:03:57.448611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2076 > 2) by scale factor 0.905961\nI1207 10:03:58.390542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98316 > 2) by scale factor 0.670431\nI1207 10:03:59.332806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25771 > 2) by scale factor 0.469736\nI1207 10:04:00.275324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29236 > 2) by scale factor 0.872463\nI1207 10:04:01.218086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02223 > 2) by scale factor 0.497237\nI1207 10:04:02.160699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42952 > 2) by scale factor 0.583173\nI1207 10:04:03.103428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48177 > 2) by scale factor 0.446252\nI1207 10:04:04.045855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99208 > 2) by scale factor 0.668431\nI1207 10:04:04.988338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15778 > 2) by scale factor 0.926878\nI1207 10:04:05.931072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20179 > 2) by scale factor 0.908352\nI1207 10:04:06.873646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03687 > 2) by scale factor 0.658572\nI1207 10:04:07.816139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91788 > 2) by scale factor 0.68543\nI1207 10:04:09.699259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44286 > 2) by scale factor 0.450161\nI1207 10:04:10.641926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15829 > 2) by scale factor 0.480967\nI1207 10:04:11.584714   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89225 > 2) by scale factor 0.691502\nI1207 10:04:12.527210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73649 > 2) by scale factor 0.730863\nI1207 10:04:13.469794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66795 > 2) by scale factor 0.545263\nI1207 10:04:14.412317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11181 > 2) by scale factor 0.642712\nI1207 10:04:15.354827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45194 > 2) by scale factor 0.579384\nI1207 10:04:16.296952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13776 > 2) by scale factor 0.935557\nI1207 10:04:17.239115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19235 > 2) by scale factor 0.626498\nI1207 10:04:18.181087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27625 > 2) by scale factor 0.610455\nI1207 10:04:19.123332   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31172 > 2) by scale factor 0.865158\nI1207 10:04:20.065353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73814 > 2) by scale factor 0.730424\nI1207 10:04:21.007875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2266 > 2) by scale factor 0.89823\nI1207 10:04:21.950356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61652 > 2) by scale factor 0.553017\nI1207 10:04:22.892771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21725 > 2) by scale factor 0.902017\nI1207 10:04:23.835402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34995 > 2) by scale factor 0.597024\nI1207 10:04:24.778028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11068 > 2) by scale factor 0.486537\nI1207 10:04:25.720546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86164 > 2) by scale factor 0.698899\nI1207 10:04:26.663817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21526 > 2) by scale factor 0.474466\nI1207 10:04:27.606585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16446 > 2) by scale factor 0.632019\nI1207 10:04:27.618592   369 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1207 10:05:20.565816   369 solver.cpp:404]     Test net output #0: accuracy = 0.1941\nI1207 10:05:20.566201   369 solver.cpp:404]     Test net output #1: loss = 9.27421 (* 1 = 9.27421 loss)\nI1207 10:05:21.443125   369 solver.cpp:228] Iteration 10900, loss = 9.07058\nI1207 10:05:21.443179   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 10:05:21.443198   369 solver.cpp:244]     Train net output #1: loss = 9.07058 (* 1 = 9.07058 loss)\nI1207 10:05:21.515434   369 sgd_solver.cpp:166] Iteration 10900, lr = 1.635\nI1207 10:05:21.525537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48953 > 2) by scale factor 0.803363\nI1207 10:05:22.466971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38577 > 2) by scale factor 0.590708\nI1207 10:05:23.408308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42442 > 2) by scale factor 0.824941\nI1207 10:05:24.349633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55051 > 2) by scale factor 0.439512\nI1207 10:05:25.290796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50215 > 2) by scale factor 0.444232\nI1207 10:05:26.232502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07831 > 2) by scale factor 0.490399\nI1207 10:05:27.173732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56709 > 2) by scale factor 0.437916\nI1207 10:05:28.115264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89085 > 2) by scale factor 0.691839\nI1207 10:05:29.056599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40423 > 2) by scale factor 0.587504\nI1207 10:05:29.997845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67613 > 2) by scale factor 0.747349\nI1207 10:05:30.939126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69435 > 2) by scale factor 0.742294\nI1207 10:05:31.880360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03411 > 2) by scale factor 0.659172\nI1207 10:05:32.821604   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16831 > 2) by scale factor 0.631252\nI1207 10:05:33.763164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56454 > 2) by scale factor 0.43816\nI1207 10:05:34.704928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51048 > 2) by scale factor 0.796662\nI1207 10:05:35.646628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91529 > 2) by scale factor 0.510818\nI1207 10:05:36.588166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49324 > 2) by scale factor 0.80217\nI1207 10:05:37.529408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40408 > 2) by scale factor 0.83192\nI1207 10:05:38.471035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87942 > 2) by scale factor 0.694585\nI1207 10:05:39.412179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83799 > 2) by scale factor 0.704724\nI1207 10:05:40.353564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12078 > 2) by scale factor 0.943048\nI1207 10:05:42.233353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41289 > 2) by scale factor 0.828881\nI1207 10:05:43.173872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80442 > 2) by scale factor 0.713159\nI1207 10:05:44.114528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4651 > 2) by scale factor 0.811327\nI1207 10:05:45.055622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38152 > 2) by scale factor 0.456462\nI1207 10:05:45.996026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76177 > 2) by scale factor 0.531665\nI1207 10:05:46.937006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6061 > 2) by scale factor 0.767431\nI1207 10:05:47.878229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06906 > 2) by scale factor 0.651666\nI1207 10:05:48.819283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45635 > 2) by scale factor 0.814215\nI1207 10:05:49.760025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98143 > 2) by scale factor 0.670819\nI1207 10:05:50.702734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93795 > 2) by scale factor 0.680746\nI1207 10:05:51.646450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95583 > 2) by scale factor 0.676629\nI1207 10:05:52.588961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11846 > 2) by scale factor 0.944081\nI1207 10:05:53.531230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70632 > 2) by scale factor 0.739012\nI1207 10:05:54.473927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28658 > 2) by scale factor 0.608536\nI1207 10:05:55.416445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3124 > 2) by scale factor 0.463779\nI1207 10:05:56.359098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23538 > 2) by scale factor 0.894704\nI1207 10:05:57.301962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33903 > 2) by scale factor 0.855054\nI1207 10:05:58.244561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81616 > 2) by scale factor 0.710186\nI1207 10:05:59.187396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53497 > 2) by scale factor 0.788963\nI1207 10:06:00.130478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5286 > 2) by scale factor 0.566798\nI1207 10:06:01.072913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09906 > 2) by scale factor 0.952806\nI1207 10:06:02.014876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51651 > 2) by scale factor 0.794751\nI1207 10:06:02.957482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90497 > 2) by scale factor 0.512168\nI1207 10:06:03.900692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95671 > 2) by scale factor 0.676428\nI1207 10:06:04.843412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05706 > 2) by scale factor 0.654223\nI1207 10:06:05.788976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85457 > 2) by scale factor 0.700631\nI1207 10:06:06.733314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88932 > 2) by scale factor 0.692204\nI1207 10:06:07.678149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68328 > 2) by scale factor 0.542995\nI1207 10:06:08.621724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81678 > 2) by scale factor 0.524001\nI1207 10:06:09.565678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6513 > 2) by scale factor 0.547749\nI1207 10:06:10.509280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43842 > 2) by scale factor 0.581663\nI1207 10:06:11.453733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44785 > 2) by scale factor 0.817043\nI1207 10:06:12.396755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50642 > 2) by scale factor 0.570382\nI1207 10:06:13.341107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83375 > 2) by scale factor 0.705778\nI1207 10:06:14.285393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01564 > 2) by scale factor 0.498053\nI1207 10:06:15.228301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12022 > 2) by scale factor 0.943299\nI1207 10:06:16.173256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10793 > 2) by scale factor 0.643515\nI1207 10:06:17.117797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15184 > 2) by scale factor 0.481714\nI1207 10:06:18.062327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01906 > 2) by scale factor 0.662458\nI1207 10:06:19.005782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20404 > 2) by scale factor 0.624213\nI1207 10:06:19.948755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29779 > 2) by scale factor 0.606466\nI1207 10:06:20.893081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26374 > 2) by scale factor 0.612794\nI1207 10:06:21.837582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34454 > 2) by scale factor 0.853046\nI1207 10:06:22.781438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97091 > 2) by scale factor 0.673195\nI1207 10:06:23.725239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83124 > 2) by scale factor 0.522024\nI1207 10:06:24.668462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0998 > 2) by scale factor 0.645203\nI1207 10:06:25.612208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8525 > 2) by scale factor 0.519143\nI1207 10:06:26.555042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82725 > 2) by scale factor 0.522569\nI1207 10:06:27.497985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49184 > 2) by scale factor 0.572764\nI1207 10:06:28.440501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74226 > 2) by scale factor 0.729327\nI1207 10:06:29.383405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96582 > 2) by scale factor 0.674349\nI1207 10:06:30.326921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58116 > 2) by scale factor 0.774845\nI1207 10:06:31.270217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26022 > 2) by scale factor 0.613455\nI1207 10:06:32.213071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55251 > 2) by scale factor 0.562982\nI1207 10:06:33.155469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75598 > 2) by scale factor 0.532484\nI1207 10:06:34.097841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22591 > 2) by scale factor 0.619981\nI1207 10:06:35.041226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68378 > 2) by scale factor 0.542921\nI1207 10:06:35.984674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85377 > 2) by scale factor 0.518972\nI1207 10:06:36.928716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27842 > 2) by scale factor 0.877801\nI1207 10:06:37.871296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.961 > 2) by scale factor 0.675447\nI1207 10:06:38.814047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26697 > 2) by scale factor 0.882236\nI1207 10:06:39.756054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33659 > 2) by scale factor 0.599415\nI1207 10:06:40.699350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3066 > 2) by scale factor 0.464403\nI1207 10:06:41.642237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84122 > 2) by scale factor 0.520668\nI1207 10:06:42.584501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73055 > 2) by scale factor 0.536114\nI1207 10:06:43.526067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84741 > 2) by scale factor 0.702393\nI1207 10:06:44.469344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39244 > 2) by scale factor 0.835966\nI1207 10:06:45.412248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15115 > 2) by scale factor 0.481794\nI1207 10:06:46.355379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00575 > 2) by scale factor 0.997131\nI1207 10:06:47.298363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46568 > 2) by scale factor 0.811134\nI1207 10:06:48.240700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39553 > 2) by scale factor 0.589009\nI1207 10:06:49.184939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77363 > 2) by scale factor 0.721078\nI1207 10:06:50.128473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49489 > 2) by scale factor 0.801637\nI1207 10:06:51.071065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67965 > 2) by scale factor 0.54353\nI1207 10:06:52.014626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51056 > 2) by scale factor 0.569709\nI1207 10:06:52.957311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43588 > 2) by scale factor 0.821058\nI1207 10:06:53.900115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17118 > 2) by scale factor 0.63068\nI1207 10:06:54.842813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66444 > 2) by scale factor 0.545787\nI1207 10:06:54.854883   369 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1207 10:07:47.824956   369 solver.cpp:404]     Test net output #0: accuracy = 0.2455\nI1207 10:07:47.825376   369 solver.cpp:404]     Test net output #1: loss = 7.04206 (* 1 = 7.04206 loss)\nI1207 10:07:48.699353   369 solver.cpp:228] Iteration 11000, loss = 5.51978\nI1207 10:07:48.699405   369 solver.cpp:244]     Train net output #0: accuracy = 0.35\nI1207 10:07:48.699424   369 solver.cpp:244]     Train net output #1: loss = 5.51978 (* 1 = 5.51978 loss)\nI1207 10:07:48.774184   369 sgd_solver.cpp:166] Iteration 11000, lr = 1.65\nI1207 10:07:49.723378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67407 > 2) by scale factor 0.747924\nI1207 10:07:50.664503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8596 > 2) by scale factor 0.699398\nI1207 10:07:51.605520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56401 > 2) by scale factor 0.780029\nI1207 10:07:52.546174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23391 > 2) by scale factor 0.472376\nI1207 10:07:53.487687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2954 > 2) by scale factor 0.87131\nI1207 10:07:54.428411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58687 > 2) by scale factor 0.773135\nI1207 10:07:55.369359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07518 > 2) by scale factor 0.650369\nI1207 10:07:56.309967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52551 > 2) by scale factor 0.79192\nI1207 10:07:57.250504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47729 > 2) by scale factor 0.57516\nI1207 10:07:58.191552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24523 > 2) by scale factor 0.471116\nI1207 10:07:59.133173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30475 > 2) by scale factor 0.867773\nI1207 10:08:00.074259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57573 > 2) by scale factor 0.77648\nI1207 10:08:01.015017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12742 > 2) by scale factor 0.639505\nI1207 10:08:01.956060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73752 > 2) by scale factor 0.730589\nI1207 10:08:02.897562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83763 > 2) by scale factor 0.704813\nI1207 10:08:03.839170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71834 > 2) by scale factor 0.537874\nI1207 10:08:04.780007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89374 > 2) by scale factor 0.691147\nI1207 10:08:05.720917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58087 > 2) by scale factor 0.774933\nI1207 10:08:06.661818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41508 > 2) by scale factor 0.585637\nI1207 10:08:07.603247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25773 > 2) by scale factor 0.613925\nI1207 10:08:08.544209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71281 > 2) by scale factor 0.737244\nI1207 10:08:09.485134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3309 > 2) by scale factor 0.858036\nI1207 10:08:10.425925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20596 > 2) by scale factor 0.475515\nI1207 10:08:11.367069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5871 > 2) by scale factor 0.557554\nI1207 10:08:12.308027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78464 > 2) by scale factor 0.718226\nI1207 10:08:13.248896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63798 > 2) by scale factor 0.549755\nI1207 10:08:14.190214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13028 > 2) by scale factor 0.63892\nI1207 10:08:15.131155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30738 > 2) by scale factor 0.866783\nI1207 10:08:16.072130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58987 > 2) by scale factor 0.77224\nI1207 10:08:17.013468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7421 > 2) by scale factor 0.729368\nI1207 10:08:17.956833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35796 > 2) by scale factor 0.595599\nI1207 10:08:18.900884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22856 > 2) by scale factor 0.619471\nI1207 10:08:19.844161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98382 > 2) by scale factor 0.502031\nI1207 10:08:20.787474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62956 > 2) by scale factor 0.551031\nI1207 10:08:21.730698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19734 > 2) by scale factor 0.625519\nI1207 10:08:22.674099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6524 > 2) by scale factor 0.547585\nI1207 10:08:23.616976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6302 > 2) by scale factor 0.550934\nI1207 10:08:24.560370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99291 > 2) by scale factor 0.668245\nI1207 10:08:25.503715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87079 > 2) by scale factor 0.696672\nI1207 10:08:26.447115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82186 > 2) by scale factor 0.708753\nI1207 10:08:27.390192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33675 > 2) by scale factor 0.599386\nI1207 10:08:28.333303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09025 > 2) by scale factor 0.647196\nI1207 10:08:29.276309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69904 > 2) by scale factor 0.540681\nI1207 10:08:30.219522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5867 > 2) by scale factor 0.436043\nI1207 10:08:31.162775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.09434 > 2) by scale factor 0.392593\nI1207 10:08:32.106148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98943 > 2) by scale factor 0.669024\nI1207 10:08:33.049242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84115 > 2) by scale factor 0.70394\nI1207 10:08:33.992480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50579 > 2) by scale factor 0.798152\nI1207 10:08:34.935405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75068 > 2) by scale factor 0.533237\nI1207 10:08:35.878731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18035 > 2) by scale factor 0.628861\nI1207 10:08:36.821794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44362 > 2) by scale factor 0.818457\nI1207 10:08:37.764829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36746 > 2) by scale factor 0.59392\nI1207 10:08:38.708066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94978 > 2) by scale factor 0.678017\nI1207 10:08:39.651657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14283 > 2) by scale factor 0.63637\nI1207 10:08:40.594925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43923 > 2) by scale factor 0.450528\nI1207 10:08:41.538100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89827 > 2) by scale factor 0.513048\nI1207 10:08:42.481267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3919 > 2) by scale factor 0.58964\nI1207 10:08:43.424589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34943 > 2) by scale factor 0.851271\nI1207 10:08:44.367909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3885 > 2) by scale factor 0.837347\nI1207 10:08:45.311030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29294 > 2) by scale factor 0.60736\nI1207 10:08:46.253654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66895 > 2) by scale factor 0.428362\nI1207 10:08:47.196679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86611 > 2) by scale factor 0.517316\nI1207 10:08:48.139701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8903 > 2) by scale factor 0.514099\nI1207 10:08:49.083283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45452 > 2) by scale factor 0.578952\nI1207 10:08:50.026523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20725 > 2) by scale factor 0.623588\nI1207 10:08:50.969308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16025 > 2) by scale factor 0.480741\nI1207 10:08:51.912565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9906 > 2) by scale factor 0.501177\nI1207 10:08:52.855657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11228 > 2) by scale factor 0.642616\nI1207 10:08:53.799445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69052 > 2) by scale factor 0.541929\nI1207 10:08:54.742525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20246 > 2) by scale factor 0.62452\nI1207 10:08:55.685595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87005 > 2) by scale factor 0.696852\nI1207 10:08:56.628504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47523 > 2) by scale factor 0.575501\nI1207 10:08:57.571801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92196 > 2) by scale factor 0.684472\nI1207 10:08:58.515173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39909 > 2) by scale factor 0.588393\nI1207 10:08:59.458329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52307 > 2) by scale factor 0.567687\nI1207 10:09:00.401263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02424 > 2) by scale factor 0.496988\nI1207 10:09:01.344317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96287 > 2) by scale factor 0.67502\nI1207 10:09:02.287485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39438 > 2) by scale factor 0.835291\nI1207 10:09:03.230926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00057 > 2) by scale factor 0.66654\nI1207 10:09:05.114264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69255 > 2) by scale factor 0.541631\nI1207 10:09:06.056905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32178 > 2) by scale factor 0.602087\nI1207 10:09:07.000080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56353 > 2) by scale factor 0.780175\nI1207 10:09:07.943217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53605 > 2) by scale factor 0.565604\nI1207 10:09:08.886649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12072 > 2) by scale factor 0.943075\nI1207 10:09:09.829773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65819 > 2) by scale factor 0.752391\nI1207 10:09:10.772889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2249 > 2) by scale factor 0.620175\nI1207 10:09:11.716099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4746 > 2) by scale factor 0.808211\nI1207 10:09:12.659421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38041 > 2) by scale factor 0.591644\nI1207 10:09:13.602546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19981 > 2) by scale factor 0.625036\nI1207 10:09:14.545547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50546 > 2) by scale factor 0.443906\nI1207 10:09:15.488468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49845 > 2) by scale factor 0.800495\nI1207 10:09:16.431759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82583 > 2) by scale factor 0.707756\nI1207 10:09:17.374732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91764 > 2) by scale factor 0.685485\nI1207 10:09:18.317672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22442 > 2) by scale factor 0.620267\nI1207 10:09:19.260994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7772 > 2) by scale factor 0.720149\nI1207 10:09:20.204064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05923 > 2) by scale factor 0.492704\nI1207 10:09:21.147173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73025 > 2) by scale factor 0.732533\nI1207 10:09:22.089645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84384 > 2) by scale factor 0.520313\nI1207 10:09:22.101688   369 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1207 10:10:15.109457   369 solver.cpp:404]     Test net output #0: accuracy = 0.20015\nI1207 10:10:15.109927   369 solver.cpp:404]     Test net output #1: loss = 11.9044 (* 1 = 11.9044 loss)\nI1207 10:10:15.984653   369 solver.cpp:228] Iteration 11100, loss = 12.4081\nI1207 10:10:15.984706   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 10:10:15.984725   369 solver.cpp:244]     Train net output #1: loss = 12.4081 (* 1 = 12.4081 loss)\nI1207 10:10:16.054636   369 sgd_solver.cpp:166] Iteration 11100, lr = 1.665\nI1207 10:10:16.064748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19702 > 2) by scale factor 0.625582\nI1207 10:10:17.005226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14463 > 2) by scale factor 0.636005\nI1207 10:10:17.945513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47049 > 2) by scale factor 0.576288\nI1207 10:10:18.885973   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47993 > 2) by scale factor 0.574723\nI1207 10:10:19.826401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4634 > 2) by scale factor 0.811886\nI1207 10:10:20.766670   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70168 > 2) by scale factor 0.425379\nI1207 10:10:21.707301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76132 > 2) by scale factor 0.724292\nI1207 10:10:22.648267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31162 > 2) by scale factor 0.603934\nI1207 10:10:23.589323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48089 > 2) by scale factor 0.574566\nI1207 10:10:24.529992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31068 > 2) by scale factor 0.463964\nI1207 10:10:25.471230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24918 > 2) by scale factor 0.47068\nI1207 10:10:26.412153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44993 > 2) by scale factor 0.81635\nI1207 10:10:27.353435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92213 > 2) by scale factor 0.509927\nI1207 10:10:28.294576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51944 > 2) by scale factor 0.442532\nI1207 10:10:29.235743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42556 > 2) by scale factor 0.583847\nI1207 10:10:30.176640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46719 > 2) by scale factor 0.576837\nI1207 10:10:31.117879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9434 > 2) by scale factor 0.40458\nI1207 10:10:32.058727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40381 > 2) by scale factor 0.454152\nI1207 10:10:32.999797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19212 > 2) by scale factor 0.626543\nI1207 10:10:33.941189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48366 > 2) by scale factor 0.446064\nI1207 10:10:34.881732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5209 > 2) by scale factor 0.568036\nI1207 10:10:35.822634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30395 > 2) by scale factor 0.605336\nI1207 10:10:36.763960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93939 > 2) by scale factor 0.680414\nI1207 10:10:37.704746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01906 > 2) by scale factor 0.990558\nI1207 10:10:38.644634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9685 > 2) by scale factor 0.503969\nI1207 10:10:39.585552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64843 > 2) by scale factor 0.548181\nI1207 10:10:40.526016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79033 > 2) by scale factor 0.527659\nI1207 10:10:41.467490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66777 > 2) by scale factor 0.749689\nI1207 10:10:42.408088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72519 > 2) by scale factor 0.536885\nI1207 10:10:43.348305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8436 > 2) by scale factor 0.520346\nI1207 10:10:44.288822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16055 > 2) by scale factor 0.632801\nI1207 10:10:45.228744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16338 > 2) by scale factor 0.632235\nI1207 10:10:46.169823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42068 > 2) by scale factor 0.826213\nI1207 10:10:47.111083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30595 > 2) by scale factor 0.867323\nI1207 10:10:48.052101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42102 > 2) by scale factor 0.584621\nI1207 10:10:48.992671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94578 > 2) by scale factor 0.678938\nI1207 10:10:49.935397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5548 > 2) by scale factor 0.78284\nI1207 10:10:50.878159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29613 > 2) by scale factor 0.465535\nI1207 10:10:51.821094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73593 > 2) by scale factor 0.731014\nI1207 10:10:52.763628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33892 > 2) by scale factor 0.855096\nI1207 10:10:53.706368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51299 > 2) by scale factor 0.443165\nI1207 10:10:54.648769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66903 > 2) by scale factor 0.749337\nI1207 10:10:55.592371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76002 > 2) by scale factor 0.724632\nI1207 10:10:56.535526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33217 > 2) by scale factor 0.60021\nI1207 10:10:57.478826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22101 > 2) by scale factor 0.473821\nI1207 10:10:58.421960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96061 > 2) by scale factor 0.504972\nI1207 10:10:59.365304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16237 > 2) by scale factor 0.632436\nI1207 10:11:00.309296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53281 > 2) by scale factor 0.789638\nI1207 10:11:01.252158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5195 > 2) by scale factor 0.793807\nI1207 10:11:02.195478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87576 > 2) by scale factor 0.695469\nI1207 10:11:03.138924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35562 > 2) by scale factor 0.596016\nI1207 10:11:04.082077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15347 > 2) by scale factor 0.634222\nI1207 10:11:05.024734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49302 > 2) by scale factor 0.572571\nI1207 10:11:05.968031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60953 > 2) by scale factor 0.766423\nI1207 10:11:06.910924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21063 > 2) by scale factor 0.622931\nI1207 10:11:08.795013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99583 > 2) by scale factor 0.667594\nI1207 10:11:09.738052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00471 > 2) by scale factor 0.665621\nI1207 10:11:10.681365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66477 > 2) by scale factor 0.428746\nI1207 10:11:11.624388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60974 > 2) by scale factor 0.554057\nI1207 10:11:12.567339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08062 > 2) by scale factor 0.64922\nI1207 10:11:13.510669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06037 > 2) by scale factor 0.653515\nI1207 10:11:14.453917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72759 > 2) by scale factor 0.733248\nI1207 10:11:15.396885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19233 > 2) by scale factor 0.626501\nI1207 10:11:16.340436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68709 > 2) by scale factor 0.744301\nI1207 10:11:17.282846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91359 > 2) by scale factor 0.686438\nI1207 10:11:18.225733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01396 > 2) by scale factor 0.663579\nI1207 10:11:19.168889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52462 > 2) by scale factor 0.442026\nI1207 10:11:20.111789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99126 > 2) by scale factor 0.668616\nI1207 10:11:21.054390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03807 > 2) by scale factor 0.658313\nI1207 10:11:21.997340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30065 > 2) by scale factor 0.869319\nI1207 10:11:22.939960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47945 > 2) by scale factor 0.80663\nI1207 10:11:23.882805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98662 > 2) by scale factor 0.669653\nI1207 10:11:24.825820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08648 > 2) by scale factor 0.647987\nI1207 10:11:25.768687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99875 > 2) by scale factor 0.666944\nI1207 10:11:26.711606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95751 > 2) by scale factor 0.676245\nI1207 10:11:27.654580   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6128 > 2) by scale factor 0.765461\nI1207 10:11:28.597268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01142 > 2) by scale factor 0.664138\nI1207 10:11:29.539932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27 > 2) by scale factor 0.61162\nI1207 10:11:30.482635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80273 > 2) by scale factor 0.713589\nI1207 10:11:31.425467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20403 > 2) by scale factor 0.624215\nI1207 10:11:32.368551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34848 > 2) by scale factor 0.459931\nI1207 10:11:33.311175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3777 > 2) by scale factor 0.841148\nI1207 10:11:34.254225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33035 > 2) by scale factor 0.858241\nI1207 10:11:35.197365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17179 > 2) by scale factor 0.47941\nI1207 10:11:36.140465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41392 > 2) by scale factor 0.453112\nI1207 10:11:37.083331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22315 > 2) by scale factor 0.899623\nI1207 10:11:38.026958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11243 > 2) by scale factor 0.946776\nI1207 10:11:38.969609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49265 > 2) by scale factor 0.80236\nI1207 10:11:39.912742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76727 > 2) by scale factor 0.530889\nI1207 10:11:40.855665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81332 > 2) by scale factor 0.524478\nI1207 10:11:41.798378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89222 > 2) by scale factor 0.69151\nI1207 10:11:42.741753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82601 > 2) by scale factor 0.707711\nI1207 10:11:43.684670   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29101 > 2) by scale factor 0.607716\nI1207 10:11:44.628130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86238 > 2) by scale factor 0.69872\nI1207 10:11:45.571305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24859 > 2) by scale factor 0.615652\nI1207 10:11:46.515066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67133 > 2) by scale factor 0.428144\nI1207 10:11:47.458323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06947 > 2) by scale factor 0.651578\nI1207 10:11:48.401266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47121 > 2) by scale factor 0.576167\nI1207 10:11:49.344420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44528 > 2) by scale factor 0.817904\nI1207 10:11:49.356492   369 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1207 10:12:42.413069   369 solver.cpp:404]     Test net output #0: accuracy = 0.1822\nI1207 10:12:42.413533   369 solver.cpp:404]     Test net output #1: loss = 11.2642 (* 1 = 11.2642 loss)\nI1207 10:12:43.288045   369 solver.cpp:228] Iteration 11200, loss = 10.719\nI1207 10:12:43.288099   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 10:12:43.288117   369 solver.cpp:244]     Train net output #1: loss = 10.719 (* 1 = 10.719 loss)\nI1207 10:12:43.364991   369 sgd_solver.cpp:166] Iteration 11200, lr = 1.68\nI1207 10:12:43.375169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75237 > 2) by scale factor 0.726646\nI1207 10:12:44.316589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42325 > 2) by scale factor 0.58424\nI1207 10:12:45.257829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32148 > 2) by scale factor 0.602141\nI1207 10:12:46.198894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24417 > 2) by scale factor 0.471234\nI1207 10:12:47.139906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79092 > 2) by scale factor 0.716609\nI1207 10:12:48.080848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84434 > 2) by scale factor 0.703152\nI1207 10:12:49.021347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17429 > 2) by scale factor 0.479123\nI1207 10:12:49.962491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96857 > 2) by scale factor 0.50396\nI1207 10:12:50.902972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45011 > 2) by scale factor 0.449427\nI1207 10:12:51.843283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90239 > 2) by scale factor 0.689088\nI1207 10:12:52.783998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28948 > 2) by scale factor 0.873562\nI1207 10:12:53.724581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1368 > 2) by scale factor 0.637593\nI1207 10:12:54.664872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16721 > 2) by scale factor 0.631472\nI1207 10:12:55.606356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25671 > 2) by scale factor 0.614116\nI1207 10:12:56.546944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3229 > 2) by scale factor 0.860993\nI1207 10:12:57.487705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45571 > 2) by scale factor 0.814428\nI1207 10:12:58.428961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19835 > 2) by scale factor 0.625323\nI1207 10:12:59.370575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1329 > 2) by scale factor 0.93769\nI1207 10:13:00.311560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88174 > 2) by scale factor 0.515233\nI1207 10:13:01.252645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85034 > 2) by scale factor 0.519435\nI1207 10:13:02.193601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21843 > 2) by scale factor 0.47411\nI1207 10:13:03.134977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47948 > 2) by scale factor 0.446481\nI1207 10:13:04.076103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19919 > 2) by scale factor 0.625159\nI1207 10:13:05.016626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3117 > 2) by scale factor 0.603919\nI1207 10:13:05.957646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0123 > 2) by scale factor 0.663943\nI1207 10:13:06.898602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38641 > 2) by scale factor 0.590596\nI1207 10:13:07.839442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57153 > 2) by scale factor 0.777748\nI1207 10:13:08.780539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70098 > 2) by scale factor 0.740471\nI1207 10:13:09.721243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94097 > 2) by scale factor 0.50749\nI1207 10:13:10.662369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16038 > 2) by scale factor 0.632835\nI1207 10:13:11.603514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41237 > 2) by scale factor 0.586102\nI1207 10:13:12.544426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76948 > 2) by scale factor 0.722157\nI1207 10:13:13.485792   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27775 > 2) by scale factor 0.610174\nI1207 10:13:14.426750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76818 > 2) by scale factor 0.53076\nI1207 10:13:15.368055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44036 > 2) by scale factor 0.581335\nI1207 10:13:16.311844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54074 > 2) by scale factor 0.787173\nI1207 10:13:17.255182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23969 > 2) by scale factor 0.617344\nI1207 10:13:18.198521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47291 > 2) by scale factor 0.575886\nI1207 10:13:19.142138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88378 > 2) by scale factor 0.693535\nI1207 10:13:20.085849   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68645 > 2) by scale factor 0.744478\nI1207 10:13:21.029579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32108 > 2) by scale factor 0.861668\nI1207 10:13:21.973021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74997 > 2) by scale factor 0.727282\nI1207 10:13:22.916873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51591 > 2) by scale factor 0.79494\nI1207 10:13:23.860049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73604 > 2) by scale factor 0.535326\nI1207 10:13:24.803632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06477 > 2) by scale factor 0.652578\nI1207 10:13:25.747421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89166 > 2) by scale factor 0.691645\nI1207 10:13:26.690927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6957 > 2) by scale factor 0.541169\nI1207 10:13:27.634325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54929 > 2) by scale factor 0.563493\nI1207 10:13:28.577920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66343 > 2) by scale factor 0.545937\nI1207 10:13:29.521661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62487 > 2) by scale factor 0.761942\nI1207 10:13:30.465229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84172 > 2) by scale factor 0.703798\nI1207 10:13:31.408637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95716 > 2) by scale factor 0.676324\nI1207 10:13:32.351876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57875 > 2) by scale factor 0.558854\nI1207 10:13:33.295083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62161 > 2) by scale factor 0.55224\nI1207 10:13:34.238941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07571 > 2) by scale factor 0.650257\nI1207 10:13:35.182695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73333 > 2) by scale factor 0.535715\nI1207 10:13:36.126070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21918 > 2) by scale factor 0.621276\nI1207 10:13:37.069138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94279 > 2) by scale factor 0.679626\nI1207 10:13:38.012749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54228 > 2) by scale factor 0.564608\nI1207 10:13:38.956559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68708 > 2) by scale factor 0.542435\nI1207 10:13:39.900244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44382 > 2) by scale factor 0.818391\nI1207 10:13:40.843786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06512 > 2) by scale factor 0.652502\nI1207 10:13:41.786988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25496 > 2) by scale factor 0.614448\nI1207 10:13:42.730409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86395 > 2) by scale factor 0.698337\nI1207 10:13:43.674432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56508 > 2) by scale factor 0.560997\nI1207 10:13:44.617566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69292 > 2) by scale factor 0.541576\nI1207 10:13:45.561375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09273 > 2) by scale factor 0.646677\nI1207 10:13:46.504938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00947 > 2) by scale factor 0.664568\nI1207 10:13:47.448313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09825 > 2) by scale factor 0.645526\nI1207 10:13:48.391501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95812 > 2) by scale factor 0.676104\nI1207 10:13:49.334591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19617 > 2) by scale factor 0.476625\nI1207 10:13:50.278234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40372 > 2) by scale factor 0.587592\nI1207 10:13:51.221886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19633 > 2) by scale factor 0.910609\nI1207 10:13:52.165563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05836 > 2) by scale factor 0.971646\nI1207 10:13:53.109174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53848 > 2) by scale factor 0.565215\nI1207 10:13:54.052314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82482 > 2) by scale factor 0.708009\nI1207 10:13:54.995471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27665 > 2) by scale factor 0.467656\nI1207 10:13:55.939081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69706 > 2) by scale factor 0.741548\nI1207 10:13:56.882879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27293 > 2) by scale factor 0.611073\nI1207 10:13:57.826627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95061 > 2) by scale factor 0.677827\nI1207 10:13:58.770426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10534 > 2) by scale factor 0.949966\nI1207 10:13:59.714350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02099 > 2) by scale factor 0.49739\nI1207 10:14:00.657950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71845 > 2) by scale factor 0.537859\nI1207 10:14:01.601330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47089 > 2) by scale factor 0.57622\nI1207 10:14:02.545085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95459 > 2) by scale factor 0.676914\nI1207 10:14:03.488494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76082 > 2) by scale factor 0.724423\nI1207 10:14:04.432268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30288 > 2) by scale factor 0.464805\nI1207 10:14:05.375540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61491 > 2) by scale factor 0.764845\nI1207 10:14:06.318709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92675 > 2) by scale factor 0.683351\nI1207 10:14:07.262401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70865 > 2) by scale factor 0.738376\nI1207 10:14:08.205970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40279 > 2) by scale factor 0.832367\nI1207 10:14:09.149595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54051 > 2) by scale factor 0.56489\nI1207 10:14:10.092326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40032 > 2) by scale factor 0.833223\nI1207 10:14:11.035544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75327 > 2) by scale factor 0.726408\nI1207 10:14:11.979388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68546 > 2) by scale factor 0.744753\nI1207 10:14:12.922809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01035 > 2) by scale factor 0.664375\nI1207 10:14:13.866827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79879 > 2) by scale factor 0.714595\nI1207 10:14:14.809876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07773 > 2) by scale factor 0.64983\nI1207 10:14:15.753276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25462 > 2) by scale factor 0.887067\nI1207 10:14:16.696640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45989 > 2) by scale factor 0.578053\nI1207 10:14:16.708711   369 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1207 10:15:09.778146   369 solver.cpp:404]     Test net output #0: accuracy = 0.14265\nI1207 10:15:09.778611   369 solver.cpp:404]     Test net output #1: loss = 16.1406 (* 1 = 16.1406 loss)\nI1207 10:15:10.651820   369 solver.cpp:228] Iteration 11300, loss = 16.9782\nI1207 10:15:10.651876   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 10:15:10.651896   369 solver.cpp:244]     Train net output #1: loss = 16.9782 (* 1 = 16.9782 loss)\nI1207 10:15:10.724920   369 sgd_solver.cpp:166] Iteration 11300, lr = 1.695\nI1207 10:15:10.735103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10137 > 2) by scale factor 0.487643\nI1207 10:15:11.675951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10828 > 2) by scale factor 0.643443\nI1207 10:15:12.615540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66417 > 2) by scale factor 0.750703\nI1207 10:15:13.556674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86965 > 2) by scale factor 0.696949\nI1207 10:15:14.497373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13977 > 2) by scale factor 0.483118\nI1207 10:15:15.437989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17611 > 2) by scale factor 0.919071\nI1207 10:15:16.379107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17583 > 2) by scale factor 0.919189\nI1207 10:15:17.320924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28346 > 2) by scale factor 0.609113\nI1207 10:15:18.261615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41886 > 2) by scale factor 0.584991\nI1207 10:15:20.141392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41172 > 2) by scale factor 0.586214\nI1207 10:15:21.081928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58948 > 2) by scale factor 0.772355\nI1207 10:15:22.022606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47475 > 2) by scale factor 0.808162\nI1207 10:15:22.963279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17276 > 2) by scale factor 0.920487\nI1207 10:15:23.904496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61715 > 2) by scale factor 0.764189\nI1207 10:15:24.845048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23835 > 2) by scale factor 0.893515\nI1207 10:15:25.786128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3479 > 2) by scale factor 0.459992\nI1207 10:15:26.727087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26178 > 2) by scale factor 0.884259\nI1207 10:15:27.667948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63145 > 2) by scale factor 0.760036\nI1207 10:15:28.608443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6638 > 2) by scale factor 0.750807\nI1207 10:15:30.488749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81306 > 2) by scale factor 0.710968\nI1207 10:15:32.369436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71444 > 2) by scale factor 0.736801\nI1207 10:15:34.249925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65232 > 2) by scale factor 0.754056\nI1207 10:15:35.191117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54342 > 2) by scale factor 0.564427\nI1207 10:15:36.132720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00465 > 2) by scale factor 0.665635\nI1207 10:15:37.073806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69121 > 2) by scale factor 0.743161\nI1207 10:15:38.014863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21768 > 2) by scale factor 0.474195\nI1207 10:15:38.956079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13529 > 2) by scale factor 0.6379\nI1207 10:15:39.897408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34296 > 2) by scale factor 0.85362\nI1207 10:15:40.841672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79996 > 2) by scale factor 0.714296\nI1207 10:15:41.786552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21456 > 2) by scale factor 0.622169\nI1207 10:15:42.730612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70907 > 2) by scale factor 0.73826\nI1207 10:15:43.674903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62817 > 2) by scale factor 0.551243\nI1207 10:15:44.619314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54831 > 2) by scale factor 0.563649\nI1207 10:15:45.563087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45468 > 2) by scale factor 0.578925\nI1207 10:15:46.507745   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18143 > 2) by scale factor 0.478305\nI1207 10:15:47.451820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8254 > 2) by scale factor 0.522821\nI1207 10:15:48.396442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.989 > 2) by scale factor 0.669119\nI1207 10:15:49.340250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56823 > 2) by scale factor 0.560502\nI1207 10:15:50.284713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08737 > 2) by scale factor 0.6478\nI1207 10:15:51.228617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36338 > 2) by scale factor 0.45836\nI1207 10:15:52.172845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85292 > 2) by scale factor 0.701037\nI1207 10:15:53.117208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43648 > 2) by scale factor 0.581991\nI1207 10:15:54.060865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4312 > 2) by scale factor 0.582886\nI1207 10:15:55.004556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14697 > 2) by scale factor 0.931546\nI1207 10:15:55.948421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85584 > 2) by scale factor 0.518694\nI1207 10:15:57.834034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5218 > 2) by scale factor 0.793085\nI1207 10:15:58.777762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50509 > 2) by scale factor 0.570599\nI1207 10:15:59.721670   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66552 > 2) by scale factor 0.545626\nI1207 10:16:00.665684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16475 > 2) by scale factor 0.923895\nI1207 10:16:01.609935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5877 > 2) by scale factor 0.772888\nI1207 10:16:02.553653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58656 > 2) by scale factor 0.557638\nI1207 10:16:03.497789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31095 > 2) by scale factor 0.604056\nI1207 10:16:04.441534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55903 > 2) by scale factor 0.781545\nI1207 10:16:05.385345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64948 > 2) by scale factor 0.754866\nI1207 10:16:06.329284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88343 > 2) by scale factor 0.693619\nI1207 10:16:07.273074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93413 > 2) by scale factor 0.681633\nI1207 10:16:08.216821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19514 > 2) by scale factor 0.62595\nI1207 10:16:09.160708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80961 > 2) by scale factor 0.711842\nI1207 10:16:10.104534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09635 > 2) by scale factor 0.645922\nI1207 10:16:11.049371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4562 > 2) by scale factor 0.814266\nI1207 10:16:11.993252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45884 > 2) by scale factor 0.813392\nI1207 10:16:12.937413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77696 > 2) by scale factor 0.720211\nI1207 10:16:13.881475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47452 > 2) by scale factor 0.57562\nI1207 10:16:14.825256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43436 > 2) by scale factor 0.582351\nI1207 10:16:15.768779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68348 > 2) by scale factor 0.427033\nI1207 10:16:16.713328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80924 > 2) by scale factor 0.525039\nI1207 10:16:17.657356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68094 > 2) by scale factor 0.543339\nI1207 10:16:18.601642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18933 > 2) by scale factor 0.62709\nI1207 10:16:20.487273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22629 > 2) by scale factor 0.619907\nI1207 10:16:21.431502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49814 > 2) by scale factor 0.800595\nI1207 10:16:22.375450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6327 > 2) by scale factor 0.759676\nI1207 10:16:23.319603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10848 > 2) by scale factor 0.643401\nI1207 10:16:24.263725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98247 > 2) by scale factor 0.670586\nI1207 10:16:25.207758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6595 > 2) by scale factor 0.752022\nI1207 10:16:26.151542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33459 > 2) by scale factor 0.599774\nI1207 10:16:27.095484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27482 > 2) by scale factor 0.610721\nI1207 10:16:28.039247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69626 > 2) by scale factor 0.741769\nI1207 10:16:28.982975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54045 > 2) by scale factor 0.787261\nI1207 10:16:29.926511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41729 > 2) by scale factor 0.452766\nI1207 10:16:31.811491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34128 > 2) by scale factor 0.598574\nI1207 10:16:32.755707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10745 > 2) by scale factor 0.486921\nI1207 10:16:33.699412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85939 > 2) by scale factor 0.699449\nI1207 10:16:34.643137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74095 > 2) by scale factor 0.729673\nI1207 10:16:35.586812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86866 > 2) by scale factor 0.697191\nI1207 10:16:36.530673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26562 > 2) by scale factor 0.612442\nI1207 10:16:37.474779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07122 > 2) by scale factor 0.651206\nI1207 10:16:38.418819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70473 > 2) by scale factor 0.539851\nI1207 10:16:39.362571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71758 > 2) by scale factor 0.735948\nI1207 10:16:40.306366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01171 > 2) by scale factor 0.664074\nI1207 10:16:41.250387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48884 > 2) by scale factor 0.803588\nI1207 10:16:42.194586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6817 > 2) by scale factor 0.543228\nI1207 10:16:43.138067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01579 > 2) by scale factor 0.663176\nI1207 10:16:44.081382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03398 > 2) by scale factor 0.6592\nI1207 10:16:44.092906   369 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1207 10:17:37.153633   369 solver.cpp:404]     Test net output #0: accuracy = 0.20365\nI1207 10:17:37.154104   369 solver.cpp:404]     Test net output #1: loss = 9.2734 (* 1 = 9.2734 loss)\nI1207 10:17:38.028244   369 solver.cpp:228] Iteration 11400, loss = 7.72183\nI1207 10:17:38.028295   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 10:17:38.028312   369 solver.cpp:244]     Train net output #1: loss = 7.72183 (* 1 = 7.72183 loss)\nI1207 10:17:38.099870   369 sgd_solver.cpp:166] Iteration 11400, lr = 1.71\nI1207 10:17:38.109701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76139 > 2) by scale factor 0.724274\nI1207 10:17:39.050149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02158 > 2) by scale factor 0.497317\nI1207 10:17:39.991469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9235 > 2) by scale factor 0.68411\nI1207 10:17:40.932423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59817 > 2) by scale factor 0.769774\nI1207 10:17:41.873725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20334 > 2) by scale factor 0.475812\nI1207 10:17:42.814303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34082 > 2) by scale factor 0.854403\nI1207 10:17:43.755556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73426 > 2) by scale factor 0.731459\nI1207 10:17:44.696738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52194 > 2) by scale factor 0.793042\nI1207 10:17:45.637837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73102 > 2) by scale factor 0.536047\nI1207 10:17:46.578910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55816 > 2) by scale factor 0.781813\nI1207 10:17:47.519984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77617 > 2) by scale factor 0.720417\nI1207 10:17:48.461235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59379 > 2) by scale factor 0.771074\nI1207 10:17:49.402173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10052 > 2) by scale factor 0.952144\nI1207 10:17:50.343219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09207 > 2) by scale factor 0.646816\nI1207 10:17:51.284152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26711 > 2) by scale factor 0.882182\nI1207 10:17:52.225368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49017 > 2) by scale factor 0.573038\nI1207 10:17:53.165954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87384 > 2) by scale factor 0.516284\nI1207 10:17:54.106948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87861 > 2) by scale factor 0.694779\nI1207 10:17:55.048228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42804 > 2) by scale factor 0.583424\nI1207 10:17:55.989491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92054 > 2) by scale factor 0.684805\nI1207 10:17:56.930361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62528 > 2) by scale factor 0.761823\nI1207 10:17:58.809885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38732 > 2) by scale factor 0.83776\nI1207 10:17:59.751148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07564 > 2) by scale factor 0.490721\nI1207 10:18:00.692163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94842 > 2) by scale factor 0.506532\nI1207 10:18:01.633128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49942 > 2) by scale factor 0.800187\nI1207 10:18:02.573837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49681 > 2) by scale factor 0.801023\nI1207 10:18:03.515033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55738 > 2) by scale factor 0.782051\nI1207 10:18:04.456251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17932 > 2) by scale factor 0.917717\nI1207 10:18:05.397509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39951 > 2) by scale factor 0.588321\nI1207 10:18:06.338062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22707 > 2) by scale factor 0.619757\nI1207 10:18:07.279491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86508 > 2) by scale factor 0.69806\nI1207 10:18:08.221316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89444 > 2) by scale factor 0.690981\nI1207 10:18:09.162070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32611 > 2) by scale factor 0.859805\nI1207 10:18:11.044490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29695 > 2) by scale factor 0.870721\nI1207 10:18:11.988811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10614 > 2) by scale factor 0.949604\nI1207 10:18:12.932592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96004 > 2) by scale factor 0.675666\nI1207 10:18:13.876768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29845 > 2) by scale factor 0.870153\nI1207 10:18:14.820525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42044 > 2) by scale factor 0.584719\nI1207 10:18:15.764055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1186 > 2) by scale factor 0.485602\nI1207 10:18:16.707803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26454 > 2) by scale factor 0.612643\nI1207 10:18:17.651494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5108 > 2) by scale factor 0.56967\nI1207 10:18:18.595636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12994 > 2) by scale factor 0.63899\nI1207 10:18:19.540067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16278 > 2) by scale factor 0.924736\nI1207 10:18:20.483538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46905 > 2) by scale factor 0.810029\nI1207 10:18:21.427309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5344 > 2) by scale factor 0.78914\nI1207 10:18:23.312518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8454 > 2) by scale factor 0.702888\nI1207 10:18:24.255856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32825 > 2) by scale factor 0.46208\nI1207 10:18:25.199756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79582 > 2) by scale factor 0.526896\nI1207 10:18:26.143267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51898 > 2) by scale factor 0.793972\nI1207 10:18:27.086741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22184 > 2) by scale factor 0.900155\nI1207 10:18:28.030535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33149 > 2) by scale factor 0.600332\nI1207 10:18:28.974267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70168 > 2) by scale factor 0.540295\nI1207 10:18:29.917811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37663 > 2) by scale factor 0.456972\nI1207 10:18:30.861464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22022 > 2) by scale factor 0.621075\nI1207 10:18:31.804491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67346 > 2) by scale factor 0.748094\nI1207 10:18:32.748245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2534 > 2) by scale factor 0.887549\nI1207 10:18:33.692009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02501 > 2) by scale factor 0.496894\nI1207 10:18:34.635653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67553 > 2) by scale factor 0.747517\nI1207 10:18:35.578944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68063 > 2) by scale factor 0.543386\nI1207 10:18:36.522490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87089 > 2) by scale factor 0.696648\nI1207 10:18:37.466424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61181 > 2) by scale factor 0.765754\nI1207 10:18:38.411111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35067 > 2) by scale factor 0.850823\nI1207 10:18:39.355300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27101 > 2) by scale factor 0.611432\nI1207 10:18:40.298915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18744 > 2) by scale factor 0.627462\nI1207 10:18:41.242857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78343 > 2) by scale factor 0.718538\nI1207 10:18:42.186684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54921 > 2) by scale factor 0.784556\nI1207 10:18:43.130803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76516 > 2) by scale factor 0.723286\nI1207 10:18:44.074779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5351 > 2) by scale factor 0.788924\nI1207 10:18:45.019184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26734 > 2) by scale factor 0.88209\nI1207 10:18:45.963446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53145 > 2) by scale factor 0.790062\nI1207 10:18:46.906939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19791 > 2) by scale factor 0.625408\nI1207 10:18:47.850013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3026 > 2) by scale factor 0.464836\nI1207 10:18:48.793015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08973 > 2) by scale factor 0.957061\nI1207 10:18:49.735946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18168 > 2) by scale factor 0.478277\nI1207 10:18:50.678531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54532 > 2) by scale factor 0.785757\nI1207 10:18:51.621258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18994 > 2) by scale factor 0.913266\nI1207 10:18:52.564162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28584 > 2) by scale factor 0.874954\nI1207 10:18:53.507429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60844 > 2) by scale factor 0.554257\nI1207 10:18:54.450268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60737 > 2) by scale factor 0.55442\nI1207 10:18:55.392953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7888 > 2) by scale factor 0.717154\nI1207 10:18:56.335885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49129 > 2) by scale factor 0.572854\nI1207 10:18:57.278841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89594 > 2) by scale factor 0.513355\nI1207 10:18:58.222290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72547 > 2) by scale factor 0.423239\nI1207 10:18:59.165330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64945 > 2) by scale factor 0.430158\nI1207 10:19:00.107908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45167 > 2) by scale factor 0.579429\nI1207 10:19:01.049778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45245 > 2) by scale factor 0.449191\nI1207 10:19:01.993903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76637 > 2) by scale factor 0.722969\nI1207 10:19:02.937741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96858 > 2) by scale factor 0.673722\nI1207 10:19:03.881569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23956 > 2) by scale factor 0.617368\nI1207 10:19:04.825850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72219 > 2) by scale factor 0.537319\nI1207 10:19:05.769348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43935 > 2) by scale factor 0.450516\nI1207 10:19:06.712911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77297 > 2) by scale factor 0.419027\nI1207 10:19:07.656138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98734 > 2) by scale factor 0.501588\nI1207 10:19:08.599489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53021 > 2) by scale factor 0.44148\nI1207 10:19:09.541934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61122 > 2) by scale factor 0.553829\nI1207 10:19:10.484812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26759 > 2) by scale factor 0.612071\nI1207 10:19:11.428108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1306 > 2) by scale factor 0.638855\nI1207 10:19:11.440165   369 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1207 10:20:04.576916   369 solver.cpp:404]     Test net output #0: accuracy = 0.201\nI1207 10:20:04.577380   369 solver.cpp:404]     Test net output #1: loss = 14.1826 (* 1 = 14.1826 loss)\nI1207 10:20:05.451767   369 solver.cpp:228] Iteration 11500, loss = 16.7583\nI1207 10:20:05.451819   369 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1207 10:20:05.451838   369 solver.cpp:244]     Train net output #1: loss = 16.7583 (* 1 = 16.7583 loss)\nI1207 10:20:05.522943   369 sgd_solver.cpp:166] Iteration 11500, lr = 1.725\nI1207 10:20:05.533089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96732 > 2) by scale factor 0.674009\nI1207 10:20:06.474301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96194 > 2) by scale factor 0.675232\nI1207 10:20:07.415539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.036 > 2) by scale factor 0.658761\nI1207 10:20:08.356482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12376 > 2) by scale factor 0.640253\nI1207 10:20:09.297950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83079 > 2) by scale factor 0.706516\nI1207 10:20:10.239265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79461 > 2) by scale factor 0.527064\nI1207 10:20:11.180120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83918 > 2) by scale factor 0.70443\nI1207 10:20:12.121325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08333 > 2) by scale factor 0.96\nI1207 10:20:13.062829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34252 > 2) by scale factor 0.598352\nI1207 10:20:14.004276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57583 > 2) by scale factor 0.77645\nI1207 10:20:14.945309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52424 > 2) by scale factor 0.567498\nI1207 10:20:15.885941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72606 > 2) by scale factor 0.733659\nI1207 10:20:16.826998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24669 > 2) by scale factor 0.616012\nI1207 10:20:17.768326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04295 > 2) by scale factor 0.657257\nI1207 10:20:18.709568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66119 > 2) by scale factor 0.751543\nI1207 10:20:19.650701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53646 > 2) by scale factor 0.565537\nI1207 10:20:20.591811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78677 > 2) by scale factor 0.717678\nI1207 10:20:21.532876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07104 > 2) by scale factor 0.651246\nI1207 10:20:22.474036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3914 > 2) by scale factor 0.455435\nI1207 10:20:23.415278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43336 > 2) by scale factor 0.821908\nI1207 10:20:24.356608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90783 > 2) by scale factor 0.687799\nI1207 10:20:25.297719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3767 > 2) by scale factor 0.592295\nI1207 10:20:26.238646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15884 > 2) by scale factor 0.633144\nI1207 10:20:27.180104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45677 > 2) by scale factor 0.578576\nI1207 10:20:28.121299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37444 > 2) by scale factor 0.842304\nI1207 10:20:29.062525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45994 > 2) by scale factor 0.448437\nI1207 10:20:30.003479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56794 > 2) by scale factor 0.560548\nI1207 10:20:30.944458   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26571 > 2) by scale factor 0.468856\nI1207 10:20:31.885535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0285 > 2) by scale factor 0.496462\nI1207 10:20:32.826922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6909 > 2) by scale factor 0.541874\nI1207 10:20:33.768095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53942 > 2) by scale factor 0.787581\nI1207 10:20:34.709152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09638 > 2) by scale factor 0.645915\nI1207 10:20:35.650591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62874 > 2) by scale factor 0.551156\nI1207 10:20:36.591558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16872 > 2) by scale factor 0.63117\nI1207 10:20:37.532217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13853 > 2) by scale factor 0.93522\nI1207 10:20:38.473276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8208 > 2) by scale factor 0.52345\nI1207 10:20:40.356925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32208 > 2) by scale factor 0.861297\nI1207 10:20:41.300200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79008 > 2) by scale factor 0.716824\nI1207 10:20:42.243026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54696 > 2) by scale factor 0.439855\nI1207 10:20:43.185736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91023 > 2) by scale factor 0.511479\nI1207 10:20:44.128710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63234 > 2) by scale factor 0.550609\nI1207 10:20:45.071405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51567 > 2) by scale factor 0.568881\nI1207 10:20:46.014436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06345 > 2) by scale factor 0.652859\nI1207 10:20:46.956996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95739 > 2) by scale factor 0.676272\nI1207 10:20:47.899479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26075 > 2) by scale factor 0.613355\nI1207 10:20:48.842308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44904 > 2) by scale factor 0.579871\nI1207 10:20:49.785066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5934 > 2) by scale factor 0.77119\nI1207 10:20:50.727776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27336 > 2) by scale factor 0.468016\nI1207 10:20:51.670951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01654 > 2) by scale factor 0.663012\nI1207 10:20:52.613873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26708 > 2) by scale factor 0.882192\nI1207 10:20:53.557896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02347 > 2) by scale factor 0.497084\nI1207 10:20:54.502143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55585 > 2) by scale factor 0.562453\nI1207 10:20:56.385560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24318 > 2) by scale factor 0.89159\nI1207 10:20:57.328352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19553 > 2) by scale factor 0.625874\nI1207 10:20:58.272073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47319 > 2) by scale factor 0.575839\nI1207 10:20:59.215003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1942 > 2) by scale factor 0.626136\nI1207 10:21:00.158093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03804 > 2) by scale factor 0.65832\nI1207 10:21:01.100925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62514 > 2) by scale factor 0.761865\nI1207 10:21:02.044306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62396 > 2) by scale factor 0.551883\nI1207 10:21:02.986933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14483 > 2) by scale factor 0.932474\nI1207 10:21:03.929890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92655 > 2) by scale factor 0.683399\nI1207 10:21:04.872262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97103 > 2) by scale factor 0.673167\nI1207 10:21:05.815737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10303 > 2) by scale factor 0.644531\nI1207 10:21:06.758579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26305 > 2) by scale factor 0.883762\nI1207 10:21:08.641455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10018 > 2) by scale factor 0.645124\nI1207 10:21:09.584446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50687 > 2) by scale factor 0.57031\nI1207 10:21:10.527772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57626 > 2) by scale factor 0.559244\nI1207 10:21:11.470801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80512 > 2) by scale factor 0.712983\nI1207 10:21:13.354689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50924 > 2) by scale factor 0.797054\nI1207 10:21:14.297629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39348 > 2) by scale factor 0.835603\nI1207 10:21:15.240562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98422 > 2) by scale factor 0.50198\nI1207 10:21:16.183396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97085 > 2) by scale factor 0.673209\nI1207 10:21:17.126646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21141 > 2) by scale factor 0.622779\nI1207 10:21:18.069473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10098 > 2) by scale factor 0.392082\nI1207 10:21:19.012902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44475 > 2) by scale factor 0.818081\nI1207 10:21:19.956481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63707 > 2) by scale factor 0.431307\nI1207 10:21:20.900749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93705 > 2) by scale factor 0.507995\nI1207 10:21:21.843899   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84263 > 2) by scale factor 0.703574\nI1207 10:21:22.787586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70744 > 2) by scale factor 0.539456\nI1207 10:21:23.730527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4452 > 2) by scale factor 0.817928\nI1207 10:21:24.673463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27326 > 2) by scale factor 0.468026\nI1207 10:21:25.616997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82591 > 2) by scale factor 0.522751\nI1207 10:21:26.560271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66874 > 2) by scale factor 0.545147\nI1207 10:21:27.503054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72247 > 2) by scale factor 0.537277\nI1207 10:21:28.446596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.20608 > 2) by scale factor 0.384166\nI1207 10:21:29.389556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68425 > 2) by scale factor 0.426963\nI1207 10:21:30.332718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08674 > 2) by scale factor 0.647933\nI1207 10:21:31.275820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67741 > 2) by scale factor 0.74699\nI1207 10:21:32.219049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75709 > 2) by scale factor 0.532327\nI1207 10:21:33.162799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9626 > 2) by scale factor 0.675083\nI1207 10:21:34.105726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24314 > 2) by scale factor 0.616685\nI1207 10:21:35.048764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34267 > 2) by scale factor 0.598324\nI1207 10:21:35.992619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59699 > 2) by scale factor 0.556021\nI1207 10:21:36.935551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26972 > 2) by scale factor 0.611674\nI1207 10:21:37.879084   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60267 > 2) by scale factor 0.555143\nI1207 10:21:38.822528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07392 > 2) by scale factor 0.964356\nI1207 10:21:38.834506   369 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1207 10:22:31.888092   369 solver.cpp:404]     Test net output #0: accuracy = 0.12515\nI1207 10:22:31.888553   369 solver.cpp:404]     Test net output #1: loss = 14.0792 (* 1 = 14.0792 loss)\nI1207 10:22:32.762373   369 solver.cpp:228] Iteration 11600, loss = 15.9329\nI1207 10:22:32.762425   369 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI1207 10:22:32.762444   369 solver.cpp:244]     Train net output #1: loss = 15.9329 (* 1 = 15.9329 loss)\nI1207 10:22:32.840040   369 sgd_solver.cpp:166] Iteration 11600, lr = 1.74\nI1207 10:22:32.850076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42149 > 2) by scale factor 0.452336\nI1207 10:22:33.791028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45588 > 2) by scale factor 0.448845\nI1207 10:22:34.732339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8174 > 2) by scale factor 0.709875\nI1207 10:22:35.673254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67296 > 2) by scale factor 0.544519\nI1207 10:22:36.614228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32755 > 2) by scale factor 0.601043\nI1207 10:22:37.555258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21231 > 2) by scale factor 0.622604\nI1207 10:22:38.496487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67595 > 2) by scale factor 0.747397\nI1207 10:22:39.437484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92294 > 2) by scale factor 0.509822\nI1207 10:22:40.378129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75086 > 2) by scale factor 0.727046\nI1207 10:22:41.319423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44909 > 2) by scale factor 0.579864\nI1207 10:22:42.260620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02337 > 2) by scale factor 0.988448\nI1207 10:22:43.201407   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19197 > 2) by scale factor 0.626572\nI1207 10:22:44.142125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6841 > 2) by scale factor 0.542874\nI1207 10:22:45.083416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7285 > 2) by scale factor 0.536408\nI1207 10:22:46.024579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04365 > 2) by scale factor 0.657105\nI1207 10:22:46.965925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93642 > 2) by scale factor 0.681101\nI1207 10:22:47.906734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08712 > 2) by scale factor 0.489342\nI1207 10:22:48.847753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05879 > 2) by scale factor 0.653853\nI1207 10:22:49.788812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10408 > 2) by scale factor 0.644313\nI1207 10:22:50.729758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57677 > 2) by scale factor 0.43699\nI1207 10:22:51.670408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73984 > 2) by scale factor 0.534782\nI1207 10:22:52.611276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0622 > 2) by scale factor 0.492344\nI1207 10:22:53.552124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48169 > 2) by scale factor 0.574433\nI1207 10:22:54.493207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94509 > 2) by scale factor 0.506959\nI1207 10:22:55.434427   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26623 > 2) by scale factor 0.612327\nI1207 10:22:56.375895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65294 > 2) by scale factor 0.75388\nI1207 10:22:57.317281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9072 > 2) by scale factor 0.687946\nI1207 10:22:58.258316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75528 > 2) by scale factor 0.725879\nI1207 10:22:59.199388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65425 > 2) by scale factor 0.753509\nI1207 10:23:00.140199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68411 > 2) by scale factor 0.542872\nI1207 10:23:01.081161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46999 > 2) by scale factor 0.80972\nI1207 10:23:02.022691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30217 > 2) by scale factor 0.464881\nI1207 10:23:02.964323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4288 > 2) by scale factor 0.583294\nI1207 10:23:03.908160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51003 > 2) by scale factor 0.569796\nI1207 10:23:04.851176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07612 > 2) by scale factor 0.65017\nI1207 10:23:05.794584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15361 > 2) by scale factor 0.928672\nI1207 10:23:06.737975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19977 > 2) by scale factor 0.625045\nI1207 10:23:07.681020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96494 > 2) by scale factor 0.67455\nI1207 10:23:08.624294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49581 > 2) by scale factor 0.572114\nI1207 10:23:09.567880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82349 > 2) by scale factor 0.708343\nI1207 10:23:10.511237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87945 > 2) by scale factor 0.694577\nI1207 10:23:11.454780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78574 > 2) by scale factor 0.717942\nI1207 10:23:12.397982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00044 > 2) by scale factor 0.666569\nI1207 10:23:13.341351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65255 > 2) by scale factor 0.429872\nI1207 10:23:14.284808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16264 > 2) by scale factor 0.632383\nI1207 10:23:15.228183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41291 > 2) by scale factor 0.453215\nI1207 10:23:16.171633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35923 > 2) by scale factor 0.595375\nI1207 10:23:17.114431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79445 > 2) by scale factor 0.527085\nI1207 10:23:18.057112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00412 > 2) by scale factor 0.499486\nI1207 10:23:19.000473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88331 > 2) by scale factor 0.515024\nI1207 10:23:19.943779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56082 > 2) by scale factor 0.780999\nI1207 10:23:20.887320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37664 > 2) by scale factor 0.456971\nI1207 10:23:21.830659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83858 > 2) by scale factor 0.521026\nI1207 10:23:22.773706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05679 > 2) by scale factor 0.493001\nI1207 10:23:23.717335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7804 > 2) by scale factor 0.719321\nI1207 10:23:24.660748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75845 > 2) by scale factor 0.532135\nI1207 10:23:25.603886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96045 > 2) by scale factor 0.675574\nI1207 10:23:26.547005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52995 > 2) by scale factor 0.790528\nI1207 10:23:27.490244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67426 > 2) by scale factor 0.544327\nI1207 10:23:28.433743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2309 > 2) by scale factor 0.472712\nI1207 10:23:29.377316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96719 > 2) by scale factor 0.504135\nI1207 10:23:30.320667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37609 > 2) by scale factor 0.592402\nI1207 10:23:31.263700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43843 > 2) by scale factor 0.581661\nI1207 10:23:32.206920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15189 > 2) by scale factor 0.634541\nI1207 10:23:33.150977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69882 > 2) by scale factor 0.741063\nI1207 10:23:34.094461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73312 > 2) by scale factor 0.535744\nI1207 10:23:35.038403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58279 > 2) by scale factor 0.436415\nI1207 10:23:35.981554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81957 > 2) by scale factor 0.709328\nI1207 10:23:36.924702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5663 > 2) by scale factor 0.779334\nI1207 10:23:37.868067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30136 > 2) by scale factor 0.869052\nI1207 10:23:38.811342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48493 > 2) by scale factor 0.804852\nI1207 10:23:39.754760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57075 > 2) by scale factor 0.560106\nI1207 10:23:40.698319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57503 > 2) by scale factor 0.559436\nI1207 10:23:41.641430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01797 > 2) by scale factor 0.662697\nI1207 10:23:42.584620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39456 > 2) by scale factor 0.835225\nI1207 10:23:43.528161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83673 > 2) by scale factor 0.705038\nI1207 10:23:44.471598   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90952 > 2) by scale factor 0.511572\nI1207 10:23:45.414921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46595 > 2) by scale factor 0.577043\nI1207 10:23:46.358184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32076 > 2) by scale factor 0.462881\nI1207 10:23:47.301553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5578 > 2) by scale factor 0.562145\nI1207 10:23:48.244912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61976 > 2) by scale factor 0.763429\nI1207 10:23:49.188740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37654 > 2) by scale factor 0.592323\nI1207 10:23:50.131935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47388 > 2) by scale factor 0.575726\nI1207 10:23:51.075201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10305 > 2) by scale factor 0.487442\nI1207 10:23:52.018409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14218 > 2) by scale factor 0.482837\nI1207 10:23:52.961607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57235 > 2) by scale factor 0.559855\nI1207 10:23:53.905251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90653 > 2) by scale factor 0.688107\nI1207 10:23:54.848685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73125 > 2) by scale factor 0.536013\nI1207 10:23:55.791959   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57499 > 2) by scale factor 0.776703\nI1207 10:23:56.735299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47021 > 2) by scale factor 0.809648\nI1207 10:23:57.678345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38624 > 2) by scale factor 0.838137\nI1207 10:23:58.621408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83404 > 2) by scale factor 0.521642\nI1207 10:23:59.564219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36049 > 2) by scale factor 0.595152\nI1207 10:24:00.507477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0708 > 2) by scale factor 0.491304\nI1207 10:24:01.450770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71203 > 2) by scale factor 0.737456\nI1207 10:24:02.394372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6281 > 2) by scale factor 0.551252\nI1207 10:24:03.338289   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69787 > 2) by scale factor 0.540852\nI1207 10:24:04.281538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87245 > 2) by scale factor 0.696269\nI1207 10:24:05.224648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18182 > 2) by scale factor 0.916664\nI1207 10:24:06.167985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14417 > 2) by scale factor 0.93276\nI1207 10:24:06.180024   369 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1207 10:24:59.297659   369 solver.cpp:404]     Test net output #0: accuracy = 0.2128\nI1207 10:24:59.298154   369 solver.cpp:404]     Test net output #1: loss = 10.846 (* 1 = 10.846 loss)\nI1207 10:25:00.172495   369 solver.cpp:228] Iteration 11700, loss = 10.7652\nI1207 10:25:00.172546   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 10:25:00.172565   369 solver.cpp:244]     Train net output #1: loss = 10.7652 (* 1 = 10.7652 loss)\nI1207 10:25:00.248786   369 sgd_solver.cpp:166] Iteration 11700, lr = 1.755\nI1207 10:25:00.258822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06358 > 2) by scale factor 0.652832\nI1207 10:25:01.200131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24913 > 2) by scale factor 0.61555\nI1207 10:25:02.140786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43017 > 2) by scale factor 0.45145\nI1207 10:25:03.081527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65366 > 2) by scale factor 0.753676\nI1207 10:25:04.022415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03489 > 2) by scale factor 0.659002\nI1207 10:25:04.963456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5493 > 2) by scale factor 0.563491\nI1207 10:25:05.904595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67931 > 2) by scale factor 0.54358\nI1207 10:25:06.845366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82886 > 2) by scale factor 0.706999\nI1207 10:25:07.785528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46453 > 2) by scale factor 0.811513\nI1207 10:25:08.726543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11779 > 2) by scale factor 0.485697\nI1207 10:25:09.667444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74929 > 2) by scale factor 0.533434\nI1207 10:25:10.608515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29431 > 2) by scale factor 0.465733\nI1207 10:25:11.549470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8871 > 2) by scale factor 0.514522\nI1207 10:25:12.490707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52128 > 2) by scale factor 0.567976\nI1207 10:25:13.431738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38845 > 2) by scale factor 0.59024\nI1207 10:25:14.372828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07079 > 2) by scale factor 0.965817\nI1207 10:25:15.313432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05814 > 2) by scale factor 0.653992\nI1207 10:25:16.254663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67627 > 2) by scale factor 0.74731\nI1207 10:25:17.195667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72161 > 2) by scale factor 0.734859\nI1207 10:25:18.136381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77948 > 2) by scale factor 0.719558\nI1207 10:25:20.955102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01427 > 2) by scale factor 0.66351\nI1207 10:25:21.895879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24277 > 2) by scale factor 0.891755\nI1207 10:25:22.836470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24178 > 2) by scale factor 0.4715\nI1207 10:25:23.777454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62599 > 2) by scale factor 0.761619\nI1207 10:25:24.718302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57172 > 2) by scale factor 0.777688\nI1207 10:25:25.659582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50386 > 2) by scale factor 0.798766\nI1207 10:25:26.600488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68437 > 2) by scale factor 0.745054\nI1207 10:25:27.542058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41644 > 2) by scale factor 0.585404\nI1207 10:25:28.483480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78414 > 2) by scale factor 0.718355\nI1207 10:25:29.426730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24159 > 2) by scale factor 0.892222\nI1207 10:25:30.369617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06053 > 2) by scale factor 0.653482\nI1207 10:25:31.312211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28007 > 2) by scale factor 0.609744\nI1207 10:25:32.255228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04176 > 2) by scale factor 0.657513\nI1207 10:25:33.197957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97755 > 2) by scale factor 0.671693\nI1207 10:25:34.140660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92708 > 2) by scale factor 0.683276\nI1207 10:25:35.083447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19531 > 2) by scale factor 0.476723\nI1207 10:25:36.026088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11551 > 2) by scale factor 0.64195\nI1207 10:25:36.968742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7506 > 2) by scale factor 0.727115\nI1207 10:25:37.911280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28067 > 2) by scale factor 0.609631\nI1207 10:25:38.854019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12214 > 2) by scale factor 0.485185\nI1207 10:25:39.796147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9984 > 2) by scale factor 0.667023\nI1207 10:25:40.738811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71579 > 2) by scale factor 0.538243\nI1207 10:25:41.681409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51018 > 2) by scale factor 0.569772\nI1207 10:25:42.624130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83357 > 2) by scale factor 0.521707\nI1207 10:25:43.566815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72188 > 2) by scale factor 0.537363\nI1207 10:25:44.509310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53659 > 2) by scale factor 0.565517\nI1207 10:25:45.452131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74335 > 2) by scale factor 0.534281\nI1207 10:25:46.395279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76609 > 2) by scale factor 0.419631\nI1207 10:25:47.338495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13791 > 2) by scale factor 0.483335\nI1207 10:25:48.281819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73349 > 2) by scale factor 0.535692\nI1207 10:25:49.224432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70969 > 2) by scale factor 0.738091\nI1207 10:25:50.167196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87561 > 2) by scale factor 0.695504\nI1207 10:25:51.109797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43999 > 2) by scale factor 0.450451\nI1207 10:25:52.052441   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85389 > 2) by scale factor 0.518957\nI1207 10:25:52.995404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93731 > 2) by scale factor 0.680896\nI1207 10:25:53.938119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35938 > 2) by scale factor 0.595348\nI1207 10:25:54.881048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32274 > 2) by scale factor 0.601914\nI1207 10:25:55.823896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65877 > 2) by scale factor 0.752228\nI1207 10:25:56.766715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63131 > 2) by scale factor 0.760077\nI1207 10:25:57.709381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63452 > 2) by scale factor 0.550279\nI1207 10:25:58.651949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70421 > 2) by scale factor 0.739587\nI1207 10:25:59.594466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06826 > 2) by scale factor 0.651836\nI1207 10:26:00.537839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90194 > 2) by scale factor 0.512565\nI1207 10:26:01.480525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88107 > 2) by scale factor 0.515321\nI1207 10:26:03.363492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24557 > 2) by scale factor 0.890641\nI1207 10:26:04.306447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1988 > 2) by scale factor 0.909585\nI1207 10:26:05.249209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8422 > 2) by scale factor 0.70368\nI1207 10:26:06.192020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87551 > 2) by scale factor 0.695529\nI1207 10:26:07.134940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75506 > 2) by scale factor 0.532615\nI1207 10:26:08.078016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36838 > 2) by scale factor 0.457836\nI1207 10:26:09.021014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02254 > 2) by scale factor 0.497198\nI1207 10:26:09.963768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76449 > 2) by scale factor 0.723462\nI1207 10:26:10.906551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09749 > 2) by scale factor 0.645685\nI1207 10:26:11.849309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89754 > 2) by scale factor 0.69024\nI1207 10:26:12.792171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42859 > 2) by scale factor 0.823525\nI1207 10:26:13.734900   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04718 > 2) by scale factor 0.656343\nI1207 10:26:15.617615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68794 > 2) by scale factor 0.744065\nI1207 10:26:16.560485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7559 > 2) by scale factor 0.532496\nI1207 10:26:17.503258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76966 > 2) by scale factor 0.530552\nI1207 10:26:18.445605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89204 > 2) by scale factor 0.691553\nI1207 10:26:19.388312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90087 > 2) by scale factor 0.512706\nI1207 10:26:22.212035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23353 > 2) by scale factor 0.895442\nI1207 10:26:23.154624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36385 > 2) by scale factor 0.846079\nI1207 10:26:24.097771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19152 > 2) by scale factor 0.626661\nI1207 10:26:25.040540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52557 > 2) by scale factor 0.7919\nI1207 10:26:26.923303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02668 > 2) by scale factor 0.496687\nI1207 10:26:27.866077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55588 > 2) by scale factor 0.562449\nI1207 10:26:28.808605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80134 > 2) by scale factor 0.416551\nI1207 10:26:30.691745   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26494 > 2) by scale factor 0.612568\nI1207 10:26:31.635061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34178 > 2) by scale factor 0.598484\nI1207 10:26:32.577059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50405 > 2) by scale factor 0.444045\nI1207 10:26:33.519866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01152 > 2) by scale factor 0.498564\nI1207 10:26:33.531970   369 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1207 10:27:26.629806   369 solver.cpp:404]     Test net output #0: accuracy = 0.2076\nI1207 10:27:26.630267   369 solver.cpp:404]     Test net output #1: loss = 8.79183 (* 1 = 8.79183 loss)\nI1207 10:27:27.504256   369 solver.cpp:228] Iteration 11800, loss = 9.23541\nI1207 10:27:27.504307   369 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1207 10:27:27.504325   369 solver.cpp:244]     Train net output #1: loss = 9.23541 (* 1 = 9.23541 loss)\nI1207 10:27:27.576066   369 sgd_solver.cpp:166] Iteration 11800, lr = 1.77\nI1207 10:27:28.524927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61641 > 2) by scale factor 0.764407\nI1207 10:27:29.465834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70024 > 2) by scale factor 0.540506\nI1207 10:27:30.407255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07702 > 2) by scale factor 0.649979\nI1207 10:27:31.348568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97597 > 2) by scale factor 0.503022\nI1207 10:27:32.289525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92468 > 2) by scale factor 0.683834\nI1207 10:27:33.230145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57141 > 2) by scale factor 0.777784\nI1207 10:27:34.171262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98118 > 2) by scale factor 0.670876\nI1207 10:27:35.112116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32529 > 2) by scale factor 0.601451\nI1207 10:27:36.053155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28763 > 2) by scale factor 0.874267\nI1207 10:27:36.993991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86924 > 2) by scale factor 0.697048\nI1207 10:27:37.935312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03467 > 2) by scale factor 0.982961\nI1207 10:27:38.876510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34665 > 2) by scale factor 0.460125\nI1207 10:27:39.817443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76705 > 2) by scale factor 0.419547\nI1207 10:27:40.758281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79958 > 2) by scale factor 0.714393\nI1207 10:27:41.699267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51198 > 2) by scale factor 0.796186\nI1207 10:27:42.640118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10212 > 2) by scale factor 0.644721\nI1207 10:27:43.580886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84168 > 2) by scale factor 0.70381\nI1207 10:27:44.521720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81564 > 2) by scale factor 0.524159\nI1207 10:27:45.463186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75807 > 2) by scale factor 0.725145\nI1207 10:27:46.403844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96241 > 2) by scale factor 0.675126\nI1207 10:27:47.345049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97716 > 2) by scale factor 0.671781\nI1207 10:27:48.286083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68335 > 2) by scale factor 0.745337\nI1207 10:27:49.227046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12157 > 2) by scale factor 0.942698\nI1207 10:27:51.106994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28169 > 2) by scale factor 0.876544\nI1207 10:27:52.047737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67094 > 2) by scale factor 0.748801\nI1207 10:27:52.988647   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96848 > 2) by scale factor 0.503972\nI1207 10:27:53.929496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15032 > 2) by scale factor 0.930095\nI1207 10:27:54.869410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83321 > 2) by scale factor 0.705913\nI1207 10:27:55.810886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17276 > 2) by scale factor 0.630365\nI1207 10:27:56.752277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81467 > 2) by scale factor 0.710563\nI1207 10:27:57.695653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61109 > 2) by scale factor 0.553849\nI1207 10:27:58.638473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41063 > 2) by scale factor 0.45345\nI1207 10:27:59.581648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05505 > 2) by scale factor 0.493213\nI1207 10:28:00.525019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63235 > 2) by scale factor 0.550607\nI1207 10:28:01.468240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31283 > 2) by scale factor 0.603714\nI1207 10:28:02.411348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43877 > 2) by scale factor 0.820086\nI1207 10:28:03.355051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50067 > 2) by scale factor 0.571319\nI1207 10:28:04.298434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05422 > 2) by scale factor 0.654831\nI1207 10:28:05.241870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80516 > 2) by scale factor 0.525603\nI1207 10:28:06.184885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85222 > 2) by scale factor 0.519181\nI1207 10:28:07.127799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.09284 > 2) by scale factor 0.392708\nI1207 10:28:08.071079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93497 > 2) by scale factor 0.508263\nI1207 10:28:09.014544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43964 > 2) by scale factor 0.581456\nI1207 10:28:09.957406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5965 > 2) by scale factor 0.435114\nI1207 10:28:10.899989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28972 > 2) by scale factor 0.607954\nI1207 10:28:11.842958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26928 > 2) by scale factor 0.468463\nI1207 10:28:12.786231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83162 > 2) by scale factor 0.41394\nI1207 10:28:13.729770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31222 > 2) by scale factor 0.603824\nI1207 10:28:14.673056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3703 > 2) by scale factor 0.59342\nI1207 10:28:15.616348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16186 > 2) by scale factor 0.632538\nI1207 10:28:16.559454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69569 > 2) by scale factor 0.741925\nI1207 10:28:17.502667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19575 > 2) by scale factor 0.910852\nI1207 10:28:18.446002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0713 > 2) by scale factor 0.651191\nI1207 10:28:19.388352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48526 > 2) by scale factor 0.804746\nI1207 10:28:20.331655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14643 > 2) by scale factor 0.635641\nI1207 10:28:21.275049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94237 > 2) by scale factor 0.679724\nI1207 10:28:22.218189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01086 > 2) by scale factor 0.498647\nI1207 10:28:23.161424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13536 > 2) by scale factor 0.483634\nI1207 10:28:24.104681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37032 > 2) by scale factor 0.593416\nI1207 10:28:25.047747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80527 > 2) by scale factor 0.525586\nI1207 10:28:25.990682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57336 > 2) by scale factor 0.777195\nI1207 10:28:26.933866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42911 > 2) by scale factor 0.823349\nI1207 10:28:28.818277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01664 > 2) by scale factor 0.662989\nI1207 10:28:29.761461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60212 > 2) by scale factor 0.555228\nI1207 10:28:30.704944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76109 > 2) by scale factor 0.531761\nI1207 10:28:31.648008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9894 > 2) by scale factor 0.501329\nI1207 10:28:32.591181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86197 > 2) by scale factor 0.698819\nI1207 10:28:33.534588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10211 > 2) by scale factor 0.644721\nI1207 10:28:34.477823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66817 > 2) by scale factor 0.749576\nI1207 10:28:35.421264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4763 > 2) by scale factor 0.807656\nI1207 10:28:36.364612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7218 > 2) by scale factor 0.734808\nI1207 10:28:37.307754   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47882 > 2) by scale factor 0.574907\nI1207 10:28:39.190790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40402 > 2) by scale factor 0.45413\nI1207 10:28:40.134171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32934 > 2) by scale factor 0.858613\nI1207 10:28:41.077337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38692 > 2) by scale factor 0.837899\nI1207 10:28:42.020660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32502 > 2) by scale factor 0.6015\nI1207 10:28:42.963508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79903 > 2) by scale factor 0.526451\nI1207 10:28:43.906538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56323 > 2) by scale factor 0.780265\nI1207 10:28:44.849566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70467 > 2) by scale factor 0.739462\nI1207 10:28:45.792938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71507 > 2) by scale factor 0.73663\nI1207 10:28:46.735796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48345 > 2) by scale factor 0.805331\nI1207 10:28:47.678644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15125 > 2) by scale factor 0.929691\nI1207 10:28:49.562422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4026 > 2) by scale factor 0.587785\nI1207 10:28:50.505151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4363 > 2) by scale factor 0.582022\nI1207 10:28:51.447930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13986 > 2) by scale factor 0.636971\nI1207 10:28:52.390290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85089 > 2) by scale factor 0.51936\nI1207 10:28:53.333111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93434 > 2) by scale factor 0.681583\nI1207 10:28:54.275460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3303 > 2) by scale factor 0.858259\nI1207 10:28:55.218039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06782 > 2) by scale factor 0.651929\nI1207 10:28:56.160527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81903 > 2) by scale factor 0.709465\nI1207 10:28:57.102988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20261 > 2) by scale factor 0.62449\nI1207 10:28:58.046226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26155 > 2) by scale factor 0.88435\nI1207 10:28:58.989449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85123 > 2) by scale factor 0.519315\nI1207 10:28:59.932193   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10323 > 2) by scale factor 0.48742\nI1207 10:29:00.874908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09673 > 2) by scale factor 0.645843\nI1207 10:29:00.886278   369 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1207 10:29:53.906540   369 solver.cpp:404]     Test net output #0: accuracy = 0.1795\nI1207 10:29:53.907032   369 solver.cpp:404]     Test net output #1: loss = 10.6482 (* 1 = 10.6482 loss)\nI1207 10:29:54.780197   369 solver.cpp:228] Iteration 11900, loss = 14.1291\nI1207 10:29:54.780249   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 10:29:54.780268   369 solver.cpp:244]     Train net output #1: loss = 14.1291 (* 1 = 14.1291 loss)\nI1207 10:29:54.853291   369 sgd_solver.cpp:166] Iteration 11900, lr = 1.785\nI1207 10:29:54.863509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41294 > 2) by scale factor 0.828863\nI1207 10:29:55.803870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52301 > 2) by scale factor 0.792702\nI1207 10:29:56.744525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28248 > 2) by scale factor 0.467019\nI1207 10:29:57.684742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80245 > 2) by scale factor 0.713662\nI1207 10:29:58.625433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09529 > 2) by scale factor 0.488366\nI1207 10:29:59.566313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53111 > 2) by scale factor 0.566394\nI1207 10:30:00.506419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43523 > 2) by scale factor 0.582203\nI1207 10:30:01.446682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41893 > 2) by scale factor 0.584978\nI1207 10:30:02.386873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6258 > 2) by scale factor 0.551603\nI1207 10:30:03.326691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36461 > 2) by scale factor 0.845805\nI1207 10:30:04.267030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5064 > 2) by scale factor 0.797957\nI1207 10:30:05.207458   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2481 > 2) by scale factor 0.470799\nI1207 10:30:06.147989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79738 > 2) by scale factor 0.526679\nI1207 10:30:07.088809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62974 > 2) by scale factor 0.551004\nI1207 10:30:08.029307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30406 > 2) by scale factor 0.605317\nI1207 10:30:08.970088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96152 > 2) by scale factor 0.504857\nI1207 10:30:09.910390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34595 > 2) by scale factor 0.852535\nI1207 10:30:10.851253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90012 > 2) by scale factor 0.512805\nI1207 10:30:12.730489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43334 > 2) by scale factor 0.821916\nI1207 10:30:13.671237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16392 > 2) by scale factor 0.924248\nI1207 10:30:14.612171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01447 > 2) by scale factor 0.992815\nI1207 10:30:15.552470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31378 > 2) by scale factor 0.60354\nI1207 10:30:16.493387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60711 > 2) by scale factor 0.55446\nI1207 10:30:17.434341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72924 > 2) by scale factor 0.732804\nI1207 10:30:18.374763   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1576 > 2) by scale factor 0.633392\nI1207 10:30:19.315637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3404 > 2) by scale factor 0.59873\nI1207 10:30:20.256242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78598 > 2) by scale factor 0.528265\nI1207 10:30:21.197630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13274 > 2) by scale factor 0.638418\nI1207 10:30:22.137923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91383 > 2) by scale factor 0.686381\nI1207 10:30:23.079887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28495 > 2) by scale factor 0.46675\nI1207 10:30:24.023283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19935 > 2) by scale factor 0.476265\nI1207 10:30:24.966990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28925 > 2) by scale factor 0.608041\nI1207 10:30:25.909263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11702 > 2) by scale factor 0.485788\nI1207 10:30:26.851843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07814 > 2) by scale factor 0.649743\nI1207 10:30:27.793618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88788 > 2) by scale factor 0.69255\nI1207 10:30:28.736647   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44886 > 2) by scale factor 0.579902\nI1207 10:30:29.679692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88118 > 2) by scale factor 0.69416\nI1207 10:30:30.622596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08195 > 2) by scale factor 0.489962\nI1207 10:30:31.565592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25936 > 2) by scale factor 0.613618\nI1207 10:30:32.507931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75618 > 2) by scale factor 0.532456\nI1207 10:30:33.450804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99879 > 2) by scale factor 0.500152\nI1207 10:30:34.393589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26505 > 2) by scale factor 0.468928\nI1207 10:30:35.336570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17577 > 2) by scale factor 0.478953\nI1207 10:30:36.279191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4918 > 2) by scale factor 0.802633\nI1207 10:30:37.222072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24691 > 2) by scale factor 0.890113\nI1207 10:30:38.164867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72577 > 2) by scale factor 0.536802\nI1207 10:30:39.107753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73348 > 2) by scale factor 0.535693\nI1207 10:30:40.049705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00142 > 2) by scale factor 0.666351\nI1207 10:30:40.992688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0623 > 2) by scale factor 0.969791\nI1207 10:30:41.936172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31044 > 2) by scale factor 0.865638\nI1207 10:30:42.878811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66323 > 2) by scale factor 0.750967\nI1207 10:30:43.820983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88188 > 2) by scale factor 0.515214\nI1207 10:30:44.763653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10008 > 2) by scale factor 0.645145\nI1207 10:30:45.705709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06482 > 2) by scale factor 0.652566\nI1207 10:30:46.648561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47619 > 2) by scale factor 0.807691\nI1207 10:30:47.591383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45488 > 2) by scale factor 0.578891\nI1207 10:30:48.532954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5622 > 2) by scale factor 0.78058\nI1207 10:30:49.475121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33281 > 2) by scale factor 0.600094\nI1207 10:30:50.417443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44654 > 2) by scale factor 0.580292\nI1207 10:30:51.359689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37526 > 2) by scale factor 0.592547\nI1207 10:30:52.302055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61831 > 2) by scale factor 0.763852\nI1207 10:30:53.244822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38395 > 2) by scale factor 0.838943\nI1207 10:30:54.186657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71937 > 2) by scale factor 0.537726\nI1207 10:30:55.129755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37029 > 2) by scale factor 0.59342\nI1207 10:30:56.071981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76206 > 2) by scale factor 0.531624\nI1207 10:30:57.014040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41449 > 2) by scale factor 0.585739\nI1207 10:30:57.956522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07506 > 2) by scale factor 0.650394\nI1207 10:30:58.899847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73162 > 2) by scale factor 0.422688\nI1207 10:30:59.842887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58061 > 2) by scale factor 0.775011\nI1207 10:31:00.785768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95804 > 2) by scale factor 0.676123\nI1207 10:31:01.728705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40162 > 2) by scale factor 0.83277\nI1207 10:31:02.671104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94517 > 2) by scale factor 0.679079\nI1207 10:31:03.613777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6404 > 2) by scale factor 0.757462\nI1207 10:31:04.556468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00568 > 2) by scale factor 0.665406\nI1207 10:31:05.499174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12875 > 2) by scale factor 0.639232\nI1207 10:31:06.440971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08023 > 2) by scale factor 0.649303\nI1207 10:31:07.383303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01589 > 2) by scale factor 0.663153\nI1207 10:31:08.325626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05153 > 2) by scale factor 0.493641\nI1207 10:31:09.268728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41833 > 2) by scale factor 0.585081\nI1207 10:31:10.211341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03327 > 2) by scale factor 0.495876\nI1207 10:31:11.154410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32906 > 2) by scale factor 0.60077\nI1207 10:31:12.096524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50669 > 2) by scale factor 0.570339\nI1207 10:31:13.038530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69721 > 2) by scale factor 0.540948\nI1207 10:31:13.980954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42833 > 2) by scale factor 0.583374\nI1207 10:31:14.923737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62703 > 2) by scale factor 0.551416\nI1207 10:31:15.865918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76463 > 2) by scale factor 0.723423\nI1207 10:31:16.808962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49915 > 2) by scale factor 0.571568\nI1207 10:31:17.751799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16531 > 2) by scale factor 0.923655\nI1207 10:31:18.694489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99966 > 2) by scale factor 0.666743\nI1207 10:31:19.636770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85808 > 2) by scale factor 0.69977\nI1207 10:31:20.579275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70449 > 2) by scale factor 0.73951\nI1207 10:31:21.521338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54242 > 2) by scale factor 0.564586\nI1207 10:31:22.463620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13774 > 2) by scale factor 0.93557\nI1207 10:31:23.406450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30412 > 2) by scale factor 0.605305\nI1207 10:31:24.349098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75591 > 2) by scale factor 0.725713\nI1207 10:31:26.233211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96175 > 2) by scale factor 0.675276\nI1207 10:31:27.176170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5549 > 2) by scale factor 0.78281\nI1207 10:31:28.118659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09071 > 2) by scale factor 0.647101\nI1207 10:31:28.130738   369 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1207 10:32:21.217566   369 solver.cpp:404]     Test net output #0: accuracy = 0.16605\nI1207 10:32:21.218050   369 solver.cpp:404]     Test net output #1: loss = 14.0892 (* 1 = 14.0892 loss)\nI1207 10:32:22.092393   369 solver.cpp:228] Iteration 12000, loss = 12.0707\nI1207 10:32:22.092454   369 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1207 10:32:22.092483   369 solver.cpp:244]     Train net output #1: loss = 12.0707 (* 1 = 12.0707 loss)\nI1207 10:32:22.167942   369 sgd_solver.cpp:166] Iteration 12000, lr = 1.8\nI1207 10:32:22.178063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82047 > 2) by scale factor 0.523495\nI1207 10:32:23.119455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56419 > 2) by scale factor 0.561137\nI1207 10:32:24.060441   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18081 > 2) by scale factor 0.628771\nI1207 10:32:25.001767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72966 > 2) by scale factor 0.536242\nI1207 10:32:25.942814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55117 > 2) by scale factor 0.783954\nI1207 10:32:26.884194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80416 > 2) by scale factor 0.713226\nI1207 10:32:27.825253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42843 > 2) by scale factor 0.451627\nI1207 10:32:28.766803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65623 > 2) by scale factor 0.429532\nI1207 10:32:29.707494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42644 > 2) by scale factor 0.583697\nI1207 10:32:30.648811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83583 > 2) by scale factor 0.413579\nI1207 10:32:31.589958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3177 > 2) by scale factor 0.46321\nI1207 10:32:32.531270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80237 > 2) by scale factor 0.525988\nI1207 10:32:33.472383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86888 > 2) by scale factor 0.516946\nI1207 10:32:34.413622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4705 > 2) by scale factor 0.576287\nI1207 10:32:35.354547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9049 > 2) by scale factor 0.688491\nI1207 10:32:36.295465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65603 > 2) by scale factor 0.753004\nI1207 10:32:37.236691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57135 > 2) by scale factor 0.560012\nI1207 10:32:38.177819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58423 > 2) by scale factor 0.557999\nI1207 10:32:39.118577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92384 > 2) by scale factor 0.509705\nI1207 10:32:40.059202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39712 > 2) by scale factor 0.454843\nI1207 10:32:41.000177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73231 > 2) by scale factor 0.535861\nI1207 10:32:41.940665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69458 > 2) by scale factor 0.541333\nI1207 10:32:42.880674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18151 > 2) by scale factor 0.478297\nI1207 10:32:43.820195   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43896 > 2) by scale factor 0.82002\nI1207 10:32:44.760149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15352 > 2) by scale factor 0.634212\nI1207 10:32:45.700188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87458 > 2) by scale factor 0.516185\nI1207 10:32:46.640422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11129 > 2) by scale factor 0.642821\nI1207 10:32:47.580727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98132 > 2) by scale factor 0.670845\nI1207 10:32:48.520643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66386 > 2) by scale factor 0.545873\nI1207 10:32:49.459825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1941 > 2) by scale factor 0.626155\nI1207 10:32:50.399919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70895 > 2) by scale factor 0.738293\nI1207 10:32:51.342705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0684 > 2) by scale factor 0.491594\nI1207 10:32:52.283421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78154 > 2) by scale factor 0.719027\nI1207 10:32:53.223768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49855 > 2) by scale factor 0.800465\nI1207 10:32:54.163398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7198 > 2) by scale factor 0.537664\nI1207 10:32:55.105533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66735 > 2) by scale factor 0.749808\nI1207 10:32:56.048149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8066 > 2) by scale factor 0.712606\nI1207 10:32:56.990983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96101 > 2) by scale factor 0.675444\nI1207 10:32:57.934031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90429 > 2) by scale factor 0.688636\nI1207 10:32:58.876785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36491 > 2) by scale factor 0.4582\nI1207 10:32:59.819399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63342 > 2) by scale factor 0.550446\nI1207 10:33:00.761595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54728 > 2) by scale factor 0.785153\nI1207 10:33:01.704035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38398 > 2) by scale factor 0.838932\nI1207 10:33:02.646777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06756 > 2) by scale factor 0.651984\nI1207 10:33:03.589231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1772 > 2) by scale factor 0.91861\nI1207 10:33:04.532037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7279 > 2) by scale factor 0.733166\nI1207 10:33:07.357259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6554 > 2) by scale factor 0.753182\nI1207 10:33:08.300941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5574 > 2) by scale factor 0.782044\nI1207 10:33:09.244688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55492 > 2) by scale factor 0.782805\nI1207 10:33:10.187889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70696 > 2) by scale factor 0.738835\nI1207 10:33:11.132268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65917 > 2) by scale factor 0.429261\nI1207 10:33:12.076273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24366 > 2) by scale factor 0.471291\nI1207 10:33:13.019656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13504 > 2) by scale factor 0.637951\nI1207 10:33:13.963273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1043 > 2) by scale factor 0.644269\nI1207 10:33:14.906554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17917 > 2) by scale factor 0.917782\nI1207 10:33:15.849730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32518 > 2) by scale factor 0.601472\nI1207 10:33:16.792696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65425 > 2) by scale factor 0.753508\nI1207 10:33:17.735896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93194 > 2) by scale factor 0.682142\nI1207 10:33:18.679183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62437 > 2) by scale factor 0.551821\nI1207 10:33:19.622963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00467 > 2) by scale factor 0.499417\nI1207 10:33:20.566187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99557 > 2) by scale factor 0.667652\nI1207 10:33:21.509002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5732 > 2) by scale factor 0.777244\nI1207 10:33:22.453021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01961 > 2) by scale factor 0.662337\nI1207 10:33:23.396559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62207 > 2) by scale factor 0.762757\nI1207 10:33:24.340410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1982 > 2) by scale factor 0.909836\nI1207 10:33:25.284340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92958 > 2) by scale factor 0.682693\nI1207 10:33:26.227432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89686 > 2) by scale factor 0.690403\nI1207 10:33:27.170874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42398 > 2) by scale factor 0.584116\nI1207 10:33:28.114257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7248 > 2) by scale factor 0.536942\nI1207 10:33:29.057638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51562 > 2) by scale factor 0.795034\nI1207 10:33:30.000571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4066 > 2) by scale factor 0.587095\nI1207 10:33:30.944516   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50603 > 2) by scale factor 0.798075\nI1207 10:33:31.888711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94282 > 2) by scale factor 0.679621\nI1207 10:33:32.832242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03971 > 2) by scale factor 0.657957\nI1207 10:33:33.775161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95449 > 2) by scale factor 0.676937\nI1207 10:33:34.719323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54102 > 2) by scale factor 0.564809\nI1207 10:33:35.663254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98717 > 2) by scale factor 0.66953\nI1207 10:33:36.606765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98152 > 2) by scale factor 0.502321\nI1207 10:33:38.492476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79047 > 2) by scale factor 0.527638\nI1207 10:33:39.436789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10444 > 2) by scale factor 0.487277\nI1207 10:33:40.379456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40233 > 2) by scale factor 0.587833\nI1207 10:33:41.323012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21195 > 2) by scale factor 0.622676\nI1207 10:33:42.265651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32307 > 2) by scale factor 0.601853\nI1207 10:33:43.209421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85353 > 2) by scale factor 0.700886\nI1207 10:33:44.153198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3557 > 2) by scale factor 0.596\nI1207 10:33:45.096086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16737 > 2) by scale factor 0.631439\nI1207 10:33:46.039033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29266 > 2) by scale factor 0.465911\nI1207 10:33:46.981499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16148 > 2) by scale factor 0.480599\nI1207 10:33:47.924242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51327 > 2) by scale factor 0.795776\nI1207 10:33:48.866868   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11733 > 2) by scale factor 0.944584\nI1207 10:33:49.809296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27148 > 2) by scale factor 0.468222\nI1207 10:33:50.752163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4303 > 2) by scale factor 0.451436\nI1207 10:33:51.694989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02253 > 2) by scale factor 0.497199\nI1207 10:33:52.637475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55492 > 2) by scale factor 0.439086\nI1207 10:33:53.579556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16925 > 2) by scale factor 0.631064\nI1207 10:33:54.521877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7365 > 2) by scale factor 0.730861\nI1207 10:33:55.463438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76429 > 2) by scale factor 0.723513\nI1207 10:33:55.474776   369 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1207 10:34:48.517491   369 solver.cpp:404]     Test net output #0: accuracy = 0.1757\nI1207 10:34:48.517983   369 solver.cpp:404]     Test net output #1: loss = 8.26731 (* 1 = 8.26731 loss)\nI1207 10:34:49.391090   369 solver.cpp:228] Iteration 12100, loss = 8.18056\nI1207 10:34:49.391139   369 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1207 10:34:49.391166   369 solver.cpp:244]     Train net output #1: loss = 8.18056 (* 1 = 8.18056 loss)\nI1207 10:34:49.462920   369 sgd_solver.cpp:166] Iteration 12100, lr = 1.815\nI1207 10:34:49.472735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99055 > 2) by scale factor 0.668774\nI1207 10:34:50.412209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27852 > 2) by scale factor 0.610031\nI1207 10:34:51.351696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58046 > 2) by scale factor 0.558587\nI1207 10:34:52.292121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4366 > 2) by scale factor 0.820817\nI1207 10:34:53.232806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01877 > 2) by scale factor 0.662521\nI1207 10:34:54.173198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11195 > 2) by scale factor 0.642684\nI1207 10:34:55.113935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91327 > 2) by scale factor 0.511082\nI1207 10:34:56.054404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59202 > 2) by scale factor 0.771599\nI1207 10:34:56.995043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48564 > 2) by scale factor 0.804622\nI1207 10:34:57.935447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72991 > 2) by scale factor 0.536205\nI1207 10:34:58.875982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23919 > 2) by scale factor 0.617439\nI1207 10:34:59.817036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36395 > 2) by scale factor 0.846042\nI1207 10:35:00.757619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17301 > 2) by scale factor 0.479271\nI1207 10:35:01.697748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9921 > 2) by scale factor 0.668427\nI1207 10:35:02.638476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93565 > 2) by scale factor 0.68128\nI1207 10:35:03.578918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72093 > 2) by scale factor 0.735043\nI1207 10:35:04.519377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01599 > 2) by scale factor 0.663131\nI1207 10:35:05.459283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07015 > 2) by scale factor 0.651434\nI1207 10:35:06.399602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65964 > 2) by scale factor 0.75198\nI1207 10:35:07.340325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46092 > 2) by scale factor 0.812703\nI1207 10:35:08.280858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26766 > 2) by scale factor 0.881968\nI1207 10:35:09.221591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82254 > 2) by scale factor 0.708581\nI1207 10:35:10.162055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79148 > 2) by scale factor 0.716467\nI1207 10:35:11.103127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62002 > 2) by scale factor 0.552483\nI1207 10:35:12.044431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16236 > 2) by scale factor 0.63244\nI1207 10:35:12.984905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3349 > 2) by scale factor 0.599719\nI1207 10:35:13.925096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48804 > 2) by scale factor 0.573389\nI1207 10:35:14.865371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01802 > 2) by scale factor 0.497757\nI1207 10:35:15.805521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57514 > 2) by scale factor 0.776658\nI1207 10:35:16.746356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20405 > 2) by scale factor 0.62421\nI1207 10:35:17.685773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46662 > 2) by scale factor 0.576931\nI1207 10:35:18.625977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54418 > 2) by scale factor 0.440124\nI1207 10:35:19.569808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19612 > 2) by scale factor 0.625758\nI1207 10:35:20.513044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43163 > 2) by scale factor 0.582813\nI1207 10:35:21.455557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02589 > 2) by scale factor 0.660962\nI1207 10:35:22.398365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17378 > 2) by scale factor 0.630163\nI1207 10:35:23.341423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18046 > 2) by scale factor 0.91724\nI1207 10:35:24.283856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44319 > 2) by scale factor 0.818602\nI1207 10:35:25.226672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61403 > 2) by scale factor 0.765104\nI1207 10:35:26.169168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01482 > 2) by scale factor 0.992645\nI1207 10:35:27.112260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3671 > 2) by scale factor 0.593984\nI1207 10:35:28.054978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74393 > 2) by scale factor 0.728882\nI1207 10:35:28.998239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49083 > 2) by scale factor 0.445352\nI1207 10:35:29.940651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7285 > 2) by scale factor 0.536408\nI1207 10:35:31.823410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57891 > 2) by scale factor 0.775522\nI1207 10:35:32.766152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04673 > 2) by scale factor 0.656442\nI1207 10:35:33.709039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12991 > 2) by scale factor 0.638997\nI1207 10:35:34.652176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5193 > 2) by scale factor 0.793873\nI1207 10:35:35.596155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40083 > 2) by scale factor 0.588091\nI1207 10:35:36.540143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05465 > 2) by scale factor 0.654739\nI1207 10:35:37.483619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5249 > 2) by scale factor 0.792111\nI1207 10:35:38.427726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79037 > 2) by scale factor 0.71675\nI1207 10:35:39.371526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3461 > 2) by scale factor 0.852479\nI1207 10:35:40.314975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26803 > 2) by scale factor 0.881822\nI1207 10:35:41.258739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03396 > 2) by scale factor 0.495791\nI1207 10:35:42.202587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35623 > 2) by scale factor 0.595907\nI1207 10:35:43.146493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5633 > 2) by scale factor 0.561277\nI1207 10:35:44.090301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21384 > 2) by scale factor 0.622308\nI1207 10:35:45.033809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88856 > 2) by scale factor 0.692386\nI1207 10:35:45.977145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93784 > 2) by scale factor 0.680771\nI1207 10:35:46.921245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27188 > 2) by scale factor 0.611269\nI1207 10:35:47.865465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99624 > 2) by scale factor 0.400301\nI1207 10:35:48.809650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1285 > 2) by scale factor 0.484437\nI1207 10:35:49.754262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63648 > 2) by scale factor 0.549982\nI1207 10:35:50.698169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26127 > 2) by scale factor 0.613258\nI1207 10:35:51.642020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62976 > 2) by scale factor 0.760525\nI1207 10:35:52.586053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60436 > 2) by scale factor 0.767943\nI1207 10:35:53.529148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50375 > 2) by scale factor 0.798803\nI1207 10:35:54.472642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28931 > 2) by scale factor 0.608029\nI1207 10:35:55.416576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68857 > 2) by scale factor 0.743889\nI1207 10:35:56.360564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03335 > 2) by scale factor 0.659337\nI1207 10:35:57.304545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99095 > 2) by scale factor 0.668685\nI1207 10:35:58.248211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60005 > 2) by scale factor 0.769217\nI1207 10:35:59.191821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62393 > 2) by scale factor 0.551888\nI1207 10:36:00.134850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34767 > 2) by scale factor 0.59743\nI1207 10:36:01.078894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01225 > 2) by scale factor 0.993911\nI1207 10:36:02.022143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77115 > 2) by scale factor 0.721723\nI1207 10:36:02.966229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09279 > 2) by scale factor 0.488664\nI1207 10:36:03.909572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36099 > 2) by scale factor 0.595062\nI1207 10:36:04.853433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77595 > 2) by scale factor 0.529669\nI1207 10:36:05.796764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71724 > 2) by scale factor 0.538034\nI1207 10:36:06.740088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16188 > 2) by scale factor 0.480552\nI1207 10:36:07.682797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39879 > 2) by scale factor 0.588445\nI1207 10:36:08.625779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14304 > 2) by scale factor 0.482737\nI1207 10:36:09.568820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36202 > 2) by scale factor 0.458504\nI1207 10:36:10.512234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31786 > 2) by scale factor 0.463192\nI1207 10:36:11.456609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04608 > 2) by scale factor 0.656581\nI1207 10:36:12.399513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40343 > 2) by scale factor 0.587642\nI1207 10:36:13.342789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19576 > 2) by scale factor 0.476672\nI1207 10:36:14.286016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87831 > 2) by scale factor 0.409978\nI1207 10:36:15.229403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80878 > 2) by scale factor 0.712053\nI1207 10:36:16.173683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19507 > 2) by scale factor 0.625965\nI1207 10:36:17.117610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4395 > 2) by scale factor 0.450501\nI1207 10:36:18.060964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42364 > 2) by scale factor 0.452116\nI1207 10:36:19.004786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80946 > 2) by scale factor 0.525008\nI1207 10:36:19.949156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40719 > 2) by scale factor 0.586994\nI1207 10:36:20.893026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11518 > 2) by scale factor 0.486005\nI1207 10:36:21.836392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80853 > 2) by scale factor 0.712117\nI1207 10:36:22.780783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62015 > 2) by scale factor 0.763316\nI1207 10:36:22.792726   369 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1207 10:37:15.847514   369 solver.cpp:404]     Test net output #0: accuracy = 0.1833\nI1207 10:37:15.848008   369 solver.cpp:404]     Test net output #1: loss = 16.7725 (* 1 = 16.7725 loss)\nI1207 10:37:16.722333   369 solver.cpp:228] Iteration 12200, loss = 19.4061\nI1207 10:37:16.722379   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 10:37:16.722405   369 solver.cpp:244]     Train net output #1: loss = 19.4061 (* 1 = 19.4061 loss)\nI1207 10:37:16.801517   369 sgd_solver.cpp:166] Iteration 12200, lr = 1.83\nI1207 10:37:16.811710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32343 > 2) by scale factor 0.601789\nI1207 10:37:17.752221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7118 > 2) by scale factor 0.538822\nI1207 10:37:18.692037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55796 > 2) by scale factor 0.781873\nI1207 10:37:19.633093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92337 > 2) by scale factor 0.509765\nI1207 10:37:20.572937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31117 > 2) by scale factor 0.604016\nI1207 10:37:21.513602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48041 > 2) by scale factor 0.574646\nI1207 10:37:22.454954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55817 > 2) by scale factor 0.562086\nI1207 10:37:23.395923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98713 > 2) by scale factor 0.669538\nI1207 10:37:24.336596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32478 > 2) by scale factor 0.601543\nI1207 10:37:25.277117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04069 > 2) by scale factor 0.657746\nI1207 10:37:26.217612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53411 > 2) by scale factor 0.565914\nI1207 10:37:27.158017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55425 > 2) by scale factor 0.78301\nI1207 10:37:28.098795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86481 > 2) by scale factor 0.51749\nI1207 10:37:29.039947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64588 > 2) by scale factor 0.548564\nI1207 10:37:29.980547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94228 > 2) by scale factor 0.50732\nI1207 10:37:30.920910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05746 > 2) by scale factor 0.395455\nI1207 10:37:31.861332   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66973 > 2) by scale factor 0.545\nI1207 10:37:32.802372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12358 > 2) by scale factor 0.485016\nI1207 10:37:33.742626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30881 > 2) by scale factor 0.604446\nI1207 10:37:34.683902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61134 > 2) by scale factor 0.55381\nI1207 10:37:35.624476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77284 > 2) by scale factor 0.419038\nI1207 10:37:36.565625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03564 > 2) by scale factor 0.397169\nI1207 10:37:37.505272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76888 > 2) by scale factor 0.722313\nI1207 10:37:38.444460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12576 > 2) by scale factor 0.639844\nI1207 10:37:39.384407   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51991 > 2) by scale factor 0.793679\nI1207 10:37:40.323925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81324 > 2) by scale factor 0.524489\nI1207 10:37:41.263809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16311 > 2) by scale factor 0.632289\nI1207 10:37:42.203449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72019 > 2) by scale factor 0.537607\nI1207 10:37:43.142722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11444 > 2) by scale factor 0.642169\nI1207 10:37:44.081944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95562 > 2) by scale factor 0.676678\nI1207 10:37:45.021432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.17882 > 2) by scale factor 0.386188\nI1207 10:37:45.960798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20004 > 2) by scale factor 0.624993\nI1207 10:37:46.901433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99022 > 2) by scale factor 0.668846\nI1207 10:37:47.844827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38359 > 2) by scale factor 0.591088\nI1207 10:37:48.788228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19714 > 2) by scale factor 0.625559\nI1207 10:37:49.731241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64384 > 2) by scale factor 0.430678\nI1207 10:37:50.674039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0483 > 2) by scale factor 0.656103\nI1207 10:37:51.616051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38429 > 2) by scale factor 0.838826\nI1207 10:37:52.559314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0195 > 2) by scale factor 0.66236\nI1207 10:37:53.501601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15488 > 2) by scale factor 0.633938\nI1207 10:37:54.444146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49965 > 2) by scale factor 0.800111\nI1207 10:37:55.387586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29022 > 2) by scale factor 0.466176\nI1207 10:37:56.330484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.16595 > 2) by scale factor 0.38715\nI1207 10:37:57.273561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08228 > 2) by scale factor 0.489922\nI1207 10:37:58.216223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29424 > 2) by scale factor 0.46574\nI1207 10:37:59.159559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30619 > 2) by scale factor 0.604927\nI1207 10:38:00.102613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70345 > 2) by scale factor 0.42522\nI1207 10:38:01.044909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75734 > 2) by scale factor 0.725336\nI1207 10:38:01.988150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28496 > 2) by scale factor 0.608835\nI1207 10:38:02.931329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50352 > 2) by scale factor 0.798874\nI1207 10:38:03.873931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34671 > 2) by scale factor 0.852256\nI1207 10:38:04.816911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25814 > 2) by scale factor 0.469688\nI1207 10:38:05.759734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35438 > 2) by scale factor 0.849482\nI1207 10:38:06.703095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76366 > 2) by scale factor 0.723677\nI1207 10:38:07.646044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73602 > 2) by scale factor 0.730989\nI1207 10:38:08.589520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56041 > 2) by scale factor 0.561733\nI1207 10:38:09.532163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66147 > 2) by scale factor 0.429049\nI1207 10:38:11.415999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57887 > 2) by scale factor 0.558836\nI1207 10:38:12.358621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43768 > 2) by scale factor 0.581789\nI1207 10:38:13.301702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00311 > 2) by scale factor 0.665977\nI1207 10:38:14.244827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8506 > 2) by scale factor 0.701607\nI1207 10:38:15.187891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40748 > 2) by scale factor 0.586944\nI1207 10:38:16.130934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51667 > 2) by scale factor 0.56872\nI1207 10:38:17.074071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28365 > 2) by scale factor 0.466892\nI1207 10:38:18.016682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09411 > 2) by scale factor 0.488507\nI1207 10:38:18.959779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1598 > 2) by scale factor 0.480792\nI1207 10:38:19.902582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74145 > 2) by scale factor 0.534552\nI1207 10:38:20.845548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4424 > 2) by scale factor 0.818868\nI1207 10:38:21.788182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63304 > 2) by scale factor 0.759578\nI1207 10:38:22.731583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59151 > 2) by scale factor 0.77175\nI1207 10:38:23.674757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78238 > 2) by scale factor 0.528768\nI1207 10:38:24.618358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34054 > 2) by scale factor 0.598706\nI1207 10:38:25.562041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81865 > 2) by scale factor 0.523745\nI1207 10:38:26.505587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61391 > 2) by scale factor 0.765139\nI1207 10:38:27.449396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07894 > 2) by scale factor 0.490323\nI1207 10:38:28.392868   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57273 > 2) by scale factor 0.777384\nI1207 10:38:30.277487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85274 > 2) by scale factor 0.519111\nI1207 10:38:31.221030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73074 > 2) by scale factor 0.732402\nI1207 10:38:32.164891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52369 > 2) by scale factor 0.792491\nI1207 10:38:33.108019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53923 > 2) by scale factor 0.440603\nI1207 10:38:34.051100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88976 > 2) by scale factor 0.51417\nI1207 10:38:34.994750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85354 > 2) by scale factor 0.519003\nI1207 10:38:35.938396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83124 > 2) by scale factor 0.706404\nI1207 10:38:36.881661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44073 > 2) by scale factor 0.581272\nI1207 10:38:37.825484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06655 > 2) by scale factor 0.491817\nI1207 10:38:38.768672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6179 > 2) by scale factor 0.552807\nI1207 10:38:39.711833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64699 > 2) by scale factor 0.430386\nI1207 10:38:40.655155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7986 > 2) by scale factor 0.52651\nI1207 10:38:41.598433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48649 > 2) by scale factor 0.445782\nI1207 10:38:43.482689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75437 > 2) by scale factor 0.726118\nI1207 10:38:44.425616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06835 > 2) by scale factor 0.651817\nI1207 10:38:45.368850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52364 > 2) by scale factor 0.792507\nI1207 10:38:46.312155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21907 > 2) by scale factor 0.474038\nI1207 10:38:47.255986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89494 > 2) by scale factor 0.513487\nI1207 10:38:48.199636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6096 > 2) by scale factor 0.554078\nI1207 10:38:49.142794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19346 > 2) by scale factor 0.911802\nI1207 10:38:50.086032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72153 > 2) by scale factor 0.537414\nI1207 10:38:50.098006   369 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1207 10:39:43.137987   369 solver.cpp:404]     Test net output #0: accuracy = 0.12995\nI1207 10:39:43.138465   369 solver.cpp:404]     Test net output #1: loss = 18.8336 (* 1 = 18.8336 loss)\nI1207 10:39:44.011607   369 solver.cpp:228] Iteration 12300, loss = 17.7245\nI1207 10:39:44.011656   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 10:39:44.011682   369 solver.cpp:244]     Train net output #1: loss = 17.7245 (* 1 = 17.7245 loss)\nI1207 10:39:44.085999   369 sgd_solver.cpp:166] Iteration 12300, lr = 1.845\nI1207 10:39:44.096196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62256 > 2) by scale factor 0.432661\nI1207 10:39:45.036191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36406 > 2) by scale factor 0.594519\nI1207 10:39:45.976033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51884 > 2) by scale factor 0.56837\nI1207 10:39:46.916709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08869 > 2) by scale factor 0.647524\nI1207 10:39:47.857496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69527 > 2) by scale factor 0.541233\nI1207 10:39:48.797415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.575 > 2) by scale factor 0.55944\nI1207 10:39:49.737716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93389 > 2) by scale factor 0.681688\nI1207 10:39:50.677455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10756 > 2) by scale factor 0.486907\nI1207 10:39:51.617368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70631 > 2) by scale factor 0.739014\nI1207 10:39:52.557148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.90974 > 2) by scale factor 0.407353\nI1207 10:39:53.497079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51246 > 2) by scale factor 0.569401\nI1207 10:39:54.437552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37146 > 2) by scale factor 0.457513\nI1207 10:39:55.377724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01779 > 2) by scale factor 0.497786\nI1207 10:39:56.317234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53424 > 2) by scale factor 0.789192\nI1207 10:39:57.256736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4279 > 2) by scale factor 0.583447\nI1207 10:39:58.197046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74659 > 2) by scale factor 0.728176\nI1207 10:39:59.136996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13716 > 2) by scale factor 0.637519\nI1207 10:40:00.077095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0095 > 2) by scale factor 0.664562\nI1207 10:40:01.017180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98522 > 2) by scale factor 0.669967\nI1207 10:40:01.957238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52954 > 2) by scale factor 0.566646\nI1207 10:40:02.897198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88764 > 2) by scale factor 0.514451\nI1207 10:40:03.836932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38322 > 2) by scale factor 0.591153\nI1207 10:40:04.777340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74622 > 2) by scale factor 0.421388\nI1207 10:40:05.717372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85711 > 2) by scale factor 0.700008\nI1207 10:40:06.657057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5701 > 2) by scale factor 0.560208\nI1207 10:40:07.596951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21891 > 2) by scale factor 0.621329\nI1207 10:40:08.537041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07435 > 2) by scale factor 0.490876\nI1207 10:40:09.477026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76993 > 2) by scale factor 0.419293\nI1207 10:40:10.417376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80325 > 2) by scale factor 0.416384\nI1207 10:40:11.357404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81897 > 2) by scale factor 0.709479\nI1207 10:40:12.297998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53161 > 2) by scale factor 0.790012\nI1207 10:40:13.238219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11023 > 2) by scale factor 0.486591\nI1207 10:40:14.178450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93767 > 2) by scale factor 0.507915\nI1207 10:40:15.118623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53427 > 2) by scale factor 0.441085\nI1207 10:40:16.060004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92942 > 2) by scale factor 0.682729\nI1207 10:40:17.003372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12596 > 2) by scale factor 0.639804\nI1207 10:40:17.946600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59473 > 2) by scale factor 0.55637\nI1207 10:40:18.890365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77005 > 2) by scale factor 0.530496\nI1207 10:40:20.774592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32978 > 2) by scale factor 0.60064\nI1207 10:40:21.718220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38686 > 2) by scale factor 0.455906\nI1207 10:40:22.661929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37961 > 2) by scale factor 0.840472\nI1207 10:40:23.605684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12282 > 2) by scale factor 0.485105\nI1207 10:40:24.549031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48174 > 2) by scale factor 0.805885\nI1207 10:40:25.492336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60155 > 2) by scale factor 0.768773\nI1207 10:40:26.436041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36116 > 2) by scale factor 0.595032\nI1207 10:40:27.379765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00087 > 2) by scale factor 0.666473\nI1207 10:40:28.323618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29425 > 2) by scale factor 0.871744\nI1207 10:40:29.266752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27458 > 2) by scale factor 0.879282\nI1207 10:40:30.210255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71826 > 2) by scale factor 0.735766\nI1207 10:40:31.153933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31353 > 2) by scale factor 0.864481\nI1207 10:40:32.097484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8487 > 2) by scale factor 0.702075\nI1207 10:40:33.040714   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11099 > 2) by scale factor 0.947422\nI1207 10:40:33.984225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24674 > 2) by scale factor 0.616003\nI1207 10:40:34.926518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93117 > 2) by scale factor 0.682321\nI1207 10:40:35.869361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89686 > 2) by scale factor 0.690402\nI1207 10:40:36.811995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36452 > 2) by scale factor 0.845837\nI1207 10:40:37.754803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95161 > 2) by scale factor 0.506122\nI1207 10:40:38.697556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00076 > 2) by scale factor 0.666497\nI1207 10:40:40.580524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50907 > 2) by scale factor 0.569951\nI1207 10:40:41.522694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06096 > 2) by scale factor 0.970421\nI1207 10:40:42.464627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6271 > 2) by scale factor 0.761295\nI1207 10:40:43.406237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15006 > 2) by scale factor 0.634909\nI1207 10:40:44.349050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43716 > 2) by scale factor 0.581875\nI1207 10:40:45.290693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66021 > 2) by scale factor 0.546417\nI1207 10:40:46.233222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42328 > 2) by scale factor 0.452154\nI1207 10:40:47.175676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96481 > 2) by scale factor 0.504438\nI1207 10:40:48.118369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95147 > 2) by scale factor 0.506141\nI1207 10:40:49.060843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16662 > 2) by scale factor 0.631589\nI1207 10:40:50.003556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02187 > 2) by scale factor 0.661843\nI1207 10:40:50.945948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03824 > 2) by scale factor 0.658275\nI1207 10:40:51.888288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99083 > 2) by scale factor 0.668711\nI1207 10:40:52.830907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19844 > 2) by scale factor 0.909735\nI1207 10:40:53.773418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86263 > 2) by scale factor 0.517782\nI1207 10:40:54.715826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2564 > 2) by scale factor 0.469881\nI1207 10:40:55.657972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65297 > 2) by scale factor 0.547499\nI1207 10:40:56.600061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13379 > 2) by scale factor 0.638206\nI1207 10:40:57.542966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62484 > 2) by scale factor 0.761952\nI1207 10:40:58.485513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06304 > 2) by scale factor 0.492242\nI1207 10:40:59.428025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24184 > 2) by scale factor 0.471493\nI1207 10:41:00.370318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14318 > 2) by scale factor 0.933192\nI1207 10:41:01.312755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2423 > 2) by scale factor 0.616846\nI1207 10:41:02.255430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30055 > 2) by scale factor 0.869358\nI1207 10:41:03.197880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27511 > 2) by scale factor 0.879077\nI1207 10:41:04.139780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57719 > 2) by scale factor 0.77604\nI1207 10:41:05.081904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9219 > 2) by scale factor 0.684486\nI1207 10:41:06.023881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54685 > 2) by scale factor 0.785283\nI1207 10:41:06.965564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14082 > 2) by scale factor 0.934222\nI1207 10:41:07.907860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71227 > 2) by scale factor 0.737389\nI1207 10:41:08.850119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68435 > 2) by scale factor 0.542837\nI1207 10:41:09.792337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14902 > 2) by scale factor 0.482042\nI1207 10:41:10.735177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33542 > 2) by scale factor 0.856377\nI1207 10:41:11.677325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64028 > 2) by scale factor 0.757494\nI1207 10:41:12.618302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13987 > 2) by scale factor 0.636968\nI1207 10:41:13.560345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9052 > 2) by scale factor 0.688421\nI1207 10:41:14.503211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07878 > 2) by scale factor 0.649609\nI1207 10:41:15.445391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42486 > 2) by scale factor 0.583966\nI1207 10:41:16.388154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27372 > 2) by scale factor 0.879615\nI1207 10:41:17.330554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52573 > 2) by scale factor 0.791851\nI1207 10:41:17.342604   369 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1207 10:42:10.412945   369 solver.cpp:404]     Test net output #0: accuracy = 0.12375\nI1207 10:42:10.413427   369 solver.cpp:404]     Test net output #1: loss = 21.0111 (* 1 = 21.0111 loss)\nI1207 10:42:11.287662   369 solver.cpp:228] Iteration 12400, loss = 21.0121\nI1207 10:42:11.287715   369 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI1207 10:42:11.287742   369 solver.cpp:244]     Train net output #1: loss = 21.0121 (* 1 = 21.0121 loss)\nI1207 10:42:11.364837   369 sgd_solver.cpp:166] Iteration 12400, lr = 1.86\nI1207 10:42:11.374927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68911 > 2) by scale factor 0.42652\nI1207 10:42:12.315518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42002 > 2) by scale factor 0.584792\nI1207 10:42:13.255498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29054 > 2) by scale factor 0.607803\nI1207 10:42:14.195469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16978 > 2) by scale factor 0.479642\nI1207 10:42:15.136535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04343 > 2) by scale factor 0.657154\nI1207 10:42:16.077523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17231 > 2) by scale factor 0.630456\nI1207 10:42:17.018784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07065 > 2) by scale factor 0.491322\nI1207 10:42:17.959560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71996 > 2) by scale factor 0.735306\nI1207 10:42:18.899376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42935 > 2) by scale factor 0.583201\nI1207 10:42:19.840729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52057 > 2) by scale factor 0.442422\nI1207 10:42:20.781798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94853 > 2) by scale factor 0.678304\nI1207 10:42:21.722892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77304 > 2) by scale factor 0.530077\nI1207 10:42:22.663094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66051 > 2) by scale factor 0.751735\nI1207 10:42:23.604451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86156 > 2) by scale factor 0.69892\nI1207 10:42:24.544360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26336 > 2) by scale factor 0.469114\nI1207 10:42:25.485471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23871 > 2) by scale factor 0.61753\nI1207 10:42:26.425446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16599 > 2) by scale factor 0.631713\nI1207 10:42:27.366592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97394 > 2) by scale factor 0.503279\nI1207 10:42:28.307013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84404 > 2) by scale factor 0.703226\nI1207 10:42:29.247833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35385 > 2) by scale factor 0.596329\nI1207 10:42:30.188341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24257 > 2) by scale factor 0.616795\nI1207 10:42:31.128942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2504 > 2) by scale factor 0.615309\nI1207 10:42:32.069592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91128 > 2) by scale factor 0.511341\nI1207 10:42:33.010154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42337 > 2) by scale factor 0.825298\nI1207 10:42:33.950620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28651 > 2) by scale factor 0.46658\nI1207 10:42:34.891245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97553 > 2) by scale factor 0.67215\nI1207 10:42:35.831949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44223 > 2) by scale factor 0.581019\nI1207 10:42:36.773378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35599 > 2) by scale factor 0.595949\nI1207 10:42:37.714833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32078 > 2) by scale factor 0.602269\nI1207 10:42:38.656123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11828 > 2) by scale factor 0.641379\nI1207 10:42:39.597733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40439 > 2) by scale factor 0.831813\nI1207 10:42:40.541055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45245 > 2) by scale factor 0.815511\nI1207 10:42:41.484823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29685 > 2) by scale factor 0.60664\nI1207 10:42:42.428093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51399 > 2) by scale factor 0.795547\nI1207 10:42:43.371748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18412 > 2) by scale factor 0.9157\nI1207 10:42:44.314906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37372 > 2) by scale factor 0.592818\nI1207 10:42:45.258482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31184 > 2) by scale factor 0.603894\nI1207 10:42:46.201397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0018 > 2) by scale factor 0.999101\nI1207 10:42:47.145051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86618 > 2) by scale factor 0.697794\nI1207 10:42:48.088140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62951 > 2) by scale factor 0.551038\nI1207 10:42:49.030992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77782 > 2) by scale factor 0.719989\nI1207 10:42:49.973963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2723 > 2) by scale factor 0.611191\nI1207 10:42:50.916354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65728 > 2) by scale factor 0.752648\nI1207 10:42:51.859664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99448 > 2) by scale factor 0.667895\nI1207 10:42:52.803087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28977 > 2) by scale factor 0.607946\nI1207 10:42:53.745723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3899 > 2) by scale factor 0.836854\nI1207 10:42:54.688014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79852 > 2) by scale factor 0.714664\nI1207 10:42:55.631114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8201 > 2) by scale factor 0.709195\nI1207 10:42:57.515633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45246 > 2) by scale factor 0.579298\nI1207 10:42:58.459040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68065 > 2) by scale factor 0.746087\nI1207 10:42:59.402185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61534 > 2) by scale factor 0.764718\nI1207 10:43:00.344470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01225 > 2) by scale factor 0.663955\nI1207 10:43:01.286873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78407 > 2) by scale factor 0.718374\nI1207 10:43:02.229822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03954 > 2) by scale factor 0.980614\nI1207 10:43:03.172328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6451 > 2) by scale factor 0.548682\nI1207 10:43:04.114624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3919 > 2) by scale factor 0.589641\nI1207 10:43:05.057648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25582 > 2) by scale factor 0.469945\nI1207 10:43:06.000520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26757 > 2) by scale factor 0.468651\nI1207 10:43:06.943047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42682 > 2) by scale factor 0.451791\nI1207 10:43:07.885630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44897 > 2) by scale factor 0.816669\nI1207 10:43:08.828135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8581 > 2) by scale factor 0.699766\nI1207 10:43:09.771025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14532 > 2) by scale factor 0.93226\nI1207 10:43:10.714435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77134 > 2) by scale factor 0.721672\nI1207 10:43:11.657953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26842 > 2) by scale factor 0.611917\nI1207 10:43:12.600908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94086 > 2) by scale factor 0.507503\nI1207 10:43:13.544234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22083 > 2) by scale factor 0.620957\nI1207 10:43:14.487123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50367 > 2) by scale factor 0.570829\nI1207 10:43:15.429666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20682 > 2) by scale factor 0.623672\nI1207 10:43:16.374045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89226 > 2) by scale factor 0.691501\nI1207 10:43:17.317965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92425 > 2) by scale factor 0.509651\nI1207 10:43:18.261337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03238 > 2) by scale factor 0.659547\nI1207 10:43:19.205055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83787 > 2) by scale factor 0.704753\nI1207 10:43:20.148991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95183 > 2) by scale factor 0.677545\nI1207 10:43:21.092821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04795 > 2) by scale factor 0.656178\nI1207 10:43:22.036743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8449 > 2) by scale factor 0.52017\nI1207 10:43:22.981276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2854 > 2) by scale factor 0.875118\nI1207 10:43:23.924407   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02631 > 2) by scale factor 0.987014\nI1207 10:43:24.868417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74917 > 2) by scale factor 0.727494\nI1207 10:43:25.811730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68425 > 2) by scale factor 0.542852\nI1207 10:43:26.755364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2495 > 2) by scale factor 0.470644\nI1207 10:43:27.699522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50143 > 2) by scale factor 0.444303\nI1207 10:43:28.643200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07878 > 2) by scale factor 0.649607\nI1207 10:43:29.587486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83651 > 2) by scale factor 0.705092\nI1207 10:43:30.531469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2049 > 2) by scale factor 0.907071\nI1207 10:43:31.475438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83069 > 2) by scale factor 0.522099\nI1207 10:43:32.419170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71919 > 2) by scale factor 0.537752\nI1207 10:43:33.362118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86411 > 2) by scale factor 0.698298\nI1207 10:43:34.305274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94848 > 2) by scale factor 0.678315\nI1207 10:43:35.248282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18791 > 2) by scale factor 0.627371\nI1207 10:43:36.190598   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18531 > 2) by scale factor 0.627882\nI1207 10:43:37.133229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41245 > 2) by scale factor 0.58609\nI1207 10:43:38.075588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88403 > 2) by scale factor 0.514929\nI1207 10:43:39.017907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24971 > 2) by scale factor 0.47062\nI1207 10:43:39.960844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25443 > 2) by scale factor 0.470098\nI1207 10:43:40.903551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68805 > 2) by scale factor 0.744033\nI1207 10:43:41.846806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79636 > 2) by scale factor 0.715216\nI1207 10:43:42.789552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61603 > 2) by scale factor 0.764518\nI1207 10:43:43.732678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66789 > 2) by scale factor 0.749656\nI1207 10:43:44.675667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.709 > 2) by scale factor 0.53923\nI1207 10:43:44.687655   369 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1207 10:44:37.746024   369 solver.cpp:404]     Test net output #0: accuracy = 0.13915\nI1207 10:44:37.746537   369 solver.cpp:404]     Test net output #1: loss = 16.4432 (* 1 = 16.4432 loss)\nI1207 10:44:38.620064   369 solver.cpp:228] Iteration 12500, loss = 19.3504\nI1207 10:44:38.620113   369 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1207 10:44:38.620139   369 solver.cpp:244]     Train net output #1: loss = 19.3504 (* 1 = 19.3504 loss)\nI1207 10:44:38.696532   369 sgd_solver.cpp:166] Iteration 12500, lr = 1.875\nI1207 10:44:38.706687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91431 > 2) by scale factor 0.510945\nI1207 10:44:39.647640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72795 > 2) by scale factor 0.733151\nI1207 10:44:40.587992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80493 > 2) by scale factor 0.713029\nI1207 10:44:41.527473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12105 > 2) by scale factor 0.485313\nI1207 10:44:42.466810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32042 > 2) by scale factor 0.462918\nI1207 10:44:43.406157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13203 > 2) by scale factor 0.938073\nI1207 10:44:44.345948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29642 > 2) by scale factor 0.87092\nI1207 10:44:45.285225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5039 > 2) by scale factor 0.570793\nI1207 10:44:46.225399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14653 > 2) by scale factor 0.635622\nI1207 10:44:47.165202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29868 > 2) by scale factor 0.606303\nI1207 10:44:48.105206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08385 > 2) by scale factor 0.489734\nI1207 10:44:49.045261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72465 > 2) by scale factor 0.423312\nI1207 10:44:49.984927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77681 > 2) by scale factor 0.529547\nI1207 10:44:50.923710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84309 > 2) by scale factor 0.70346\nI1207 10:44:51.863765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25735 > 2) by scale factor 0.885995\nI1207 10:44:52.804455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39084 > 2) by scale factor 0.836527\nI1207 10:44:54.682867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20233 > 2) by scale factor 0.624545\nI1207 10:44:55.623533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46222 > 2) by scale factor 0.577663\nI1207 10:44:56.563583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21299 > 2) by scale factor 0.474723\nI1207 10:44:57.503695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67317 > 2) by scale factor 0.544489\nI1207 10:44:58.444283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94289 > 2) by scale factor 0.679604\nI1207 10:44:59.384707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18154 > 2) by scale factor 0.916783\nI1207 10:45:01.263100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83563 > 2) by scale factor 0.521427\nI1207 10:45:02.203408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45837 > 2) by scale factor 0.578307\nI1207 10:45:03.143604   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13063 > 2) by scale factor 0.484187\nI1207 10:45:04.084607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06655 > 2) by scale factor 0.652199\nI1207 10:45:05.025629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0791 > 2) by scale factor 0.649541\nI1207 10:45:05.966248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06256 > 2) by scale factor 0.653048\nI1207 10:45:06.906791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19894 > 2) by scale factor 0.625206\nI1207 10:45:07.847247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50004 > 2) by scale factor 0.571421\nI1207 10:45:08.788481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94177 > 2) by scale factor 0.679863\nI1207 10:45:09.729302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69807 > 2) by scale factor 0.74127\nI1207 10:45:10.671826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81877 > 2) by scale factor 0.523728\nI1207 10:45:11.614367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58242 > 2) by scale factor 0.774469\nI1207 10:45:12.556677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92193 > 2) by scale factor 0.509954\nI1207 10:45:13.499111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8332 > 2) by scale factor 0.521757\nI1207 10:45:14.441789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48604 > 2) by scale factor 0.445827\nI1207 10:45:15.384263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39729 > 2) by scale factor 0.834275\nI1207 10:45:16.327723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80793 > 2) by scale factor 0.712269\nI1207 10:45:17.270558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53015 > 2) by scale factor 0.566548\nI1207 10:45:18.214128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48303 > 2) by scale factor 0.574213\nI1207 10:45:19.158335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41491 > 2) by scale factor 0.585666\nI1207 10:45:20.102090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21756 > 2) by scale factor 0.62159\nI1207 10:45:21.045811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65867 > 2) by scale factor 0.752255\nI1207 10:45:21.989672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58029 > 2) by scale factor 0.775107\nI1207 10:45:22.933296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12534 > 2) by scale factor 0.484808\nI1207 10:45:23.877024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25809 > 2) by scale factor 0.613857\nI1207 10:45:24.820978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80538 > 2) by scale factor 0.712917\nI1207 10:45:25.764765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56141 > 2) by scale factor 0.561576\nI1207 10:45:26.708873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50562 > 2) by scale factor 0.44389\nI1207 10:45:27.652896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.11881 > 2) by scale factor 0.390716\nI1207 10:45:28.596858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21668 > 2) by scale factor 0.621758\nI1207 10:45:29.540478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12176 > 2) by scale factor 0.640664\nI1207 10:45:30.483352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20918 > 2) by scale factor 0.475152\nI1207 10:45:31.426720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29925 > 2) by scale factor 0.86985\nI1207 10:45:32.369997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70326 > 2) by scale factor 0.540064\nI1207 10:45:33.312942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38531 > 2) by scale factor 0.590789\nI1207 10:45:34.256137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35333 > 2) by scale factor 0.596422\nI1207 10:45:35.198792   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52161 > 2) by scale factor 0.567922\nI1207 10:45:36.141619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13177 > 2) by scale factor 0.389729\nI1207 10:45:37.084806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73529 > 2) by scale factor 0.422361\nI1207 10:45:38.028072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05093 > 2) by scale factor 0.655538\nI1207 10:45:38.971781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0872 > 2) by scale factor 0.489332\nI1207 10:45:39.915187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42219 > 2) by scale factor 0.584422\nI1207 10:45:40.858515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57988 > 2) by scale factor 0.436693\nI1207 10:45:41.801745   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30901 > 2) by scale factor 0.60441\nI1207 10:45:42.744634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76288 > 2) by scale factor 0.419914\nI1207 10:45:43.687490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27525 > 2) by scale factor 0.61064\nI1207 10:45:44.630170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06193 > 2) by scale factor 0.492377\nI1207 10:45:45.573377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4143 > 2) by scale factor 0.828396\nI1207 10:45:46.516366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.39917 > 2) by scale factor 0.370427\nI1207 10:45:47.459256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38945 > 2) by scale factor 0.590066\nI1207 10:45:48.402003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15967 > 2) by scale factor 0.926067\nI1207 10:45:49.344727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75533 > 2) by scale factor 0.532577\nI1207 10:45:50.287801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45346 > 2) by scale factor 0.57913\nI1207 10:45:51.230744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12173 > 2) by scale factor 0.390493\nI1207 10:45:52.173303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21787 > 2) by scale factor 0.474173\nI1207 10:45:53.116333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45186 > 2) by scale factor 0.579398\nI1207 10:45:54.058820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01389 > 2) by scale factor 0.663595\nI1207 10:45:55.000977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89091 > 2) by scale factor 0.691824\nI1207 10:45:55.944393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97717 > 2) by scale factor 0.67178\nI1207 10:45:56.886806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22615 > 2) by scale factor 0.619933\nI1207 10:45:57.829465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27205 > 2) by scale factor 0.880261\nI1207 10:45:58.772552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77808 > 2) by scale factor 0.71992\nI1207 10:45:59.715844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78106 > 2) by scale factor 0.418317\nI1207 10:46:00.659070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60398 > 2) by scale factor 0.768054\nI1207 10:46:01.602073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04991 > 2) by scale factor 0.493838\nI1207 10:46:02.544813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49445 > 2) by scale factor 0.444994\nI1207 10:46:03.488198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41773 > 2) by scale factor 0.827221\nI1207 10:46:04.431455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24779 > 2) by scale factor 0.615803\nI1207 10:46:05.374459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2448 > 2) by scale factor 0.890949\nI1207 10:46:06.317229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19983 > 2) by scale factor 0.909163\nI1207 10:46:07.260423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7573 > 2) by scale factor 0.532297\nI1207 10:46:08.204010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98942 > 2) by scale factor 0.669026\nI1207 10:46:09.147184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47795 > 2) by scale factor 0.575051\nI1207 10:46:10.089951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82379 > 2) by scale factor 0.708268\nI1207 10:46:11.033078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95134 > 2) by scale factor 0.677659\nI1207 10:46:11.976162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21384 > 2) by scale factor 0.474626\nI1207 10:46:11.988139   369 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1207 10:47:05.039057   369 solver.cpp:404]     Test net output #0: accuracy = 0.12095\nI1207 10:47:05.039542   369 solver.cpp:404]     Test net output #1: loss = 18.4485 (* 1 = 18.4485 loss)\nI1207 10:47:05.913640   369 solver.cpp:228] Iteration 12600, loss = 20.0543\nI1207 10:47:05.913689   369 solver.cpp:244]     Train net output #0: accuracy = 0.08\nI1207 10:47:05.913714   369 solver.cpp:244]     Train net output #1: loss = 20.0543 (* 1 = 20.0543 loss)\nI1207 10:47:05.986686   369 sgd_solver.cpp:166] Iteration 12600, lr = 1.89\nI1207 10:47:05.996847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.93301 > 2) by scale factor 0.405432\nI1207 10:47:06.937158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01443 > 2) by scale factor 0.498203\nI1207 10:47:07.877540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58081 > 2) by scale factor 0.558533\nI1207 10:47:08.818117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31529 > 2) by scale factor 0.863821\nI1207 10:47:09.758685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03248 > 2) by scale factor 0.659526\nI1207 10:47:10.699026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6569 > 2) by scale factor 0.546911\nI1207 10:47:11.639156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02642 > 2) by scale factor 0.660846\nI1207 10:47:12.579339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24353 > 2) by scale factor 0.471306\nI1207 10:47:13.519457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4494 > 2) by scale factor 0.579811\nI1207 10:47:14.458855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58028 > 2) by scale factor 0.77511\nI1207 10:47:15.399669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43375 > 2) by scale factor 0.821778\nI1207 10:47:16.340595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06416 > 2) by scale factor 0.492107\nI1207 10:47:17.281440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54447 > 2) by scale factor 0.56426\nI1207 10:47:18.222342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71089 > 2) by scale factor 0.424548\nI1207 10:47:19.163309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72777 > 2) by scale factor 0.423032\nI1207 10:47:20.104086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58708 > 2) by scale factor 0.773073\nI1207 10:47:21.044317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76795 > 2) by scale factor 0.722557\nI1207 10:47:21.984925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03675 > 2) by scale factor 0.658598\nI1207 10:47:22.925284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22053 > 2) by scale factor 0.621016\nI1207 10:47:23.865984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98665 > 2) by scale factor 0.669646\nI1207 10:47:24.805811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79514 > 2) by scale factor 0.715528\nI1207 10:47:25.746551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68519 > 2) by scale factor 0.542712\nI1207 10:47:26.686386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87995 > 2) by scale factor 0.51547\nI1207 10:47:27.626914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27608 > 2) by scale factor 0.610487\nI1207 10:47:28.567139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98105 > 2) by scale factor 0.50238\nI1207 10:47:29.508015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58909 > 2) by scale factor 0.557245\nI1207 10:47:30.448627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69448 > 2) by scale factor 0.742259\nI1207 10:47:31.388963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16084 > 2) by scale factor 0.632742\nI1207 10:47:32.329324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62741 > 2) by scale factor 0.761206\nI1207 10:47:33.269910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88844 > 2) by scale factor 0.514345\nI1207 10:47:34.210974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46987 > 2) by scale factor 0.57639\nI1207 10:47:35.154816   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05375 > 2) by scale factor 0.49337\nI1207 10:47:36.099009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64717 > 2) by scale factor 0.430369\nI1207 10:47:37.042125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45687 > 2) by scale factor 0.578558\nI1207 10:47:37.985890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54754 > 2) by scale factor 0.563771\nI1207 10:47:38.929879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33113 > 2) by scale factor 0.600397\nI1207 10:47:39.873615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52617 > 2) by scale factor 0.441874\nI1207 10:47:40.817528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91841 > 2) by scale factor 0.510411\nI1207 10:47:41.761188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68637 > 2) by scale factor 0.744498\nI1207 10:47:42.704854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30545 > 2) by scale factor 0.605062\nI1207 10:47:43.648778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76588 > 2) by scale factor 0.723098\nI1207 10:47:44.592663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0691 > 2) by scale factor 0.966605\nI1207 10:47:45.536617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91816 > 2) by scale factor 0.510444\nI1207 10:47:46.480741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1628 > 2) by scale factor 0.924729\nI1207 10:47:47.424660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34027 > 2) by scale factor 0.598754\nI1207 10:47:48.368479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15215 > 2) by scale factor 0.634487\nI1207 10:47:49.312428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16886 > 2) by scale factor 0.631141\nI1207 10:47:50.256167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70934 > 2) by scale factor 0.738188\nI1207 10:47:51.200155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70362 > 2) by scale factor 0.739748\nI1207 10:47:52.144263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71823 > 2) by scale factor 0.53789\nI1207 10:47:53.088344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34787 > 2) by scale factor 0.597394\nI1207 10:47:54.032114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29621 > 2) by scale factor 0.871\nI1207 10:47:54.976234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36541 > 2) by scale factor 0.594281\nI1207 10:47:55.920250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44461 > 2) by scale factor 0.580617\nI1207 10:47:56.864584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82117 > 2) by scale factor 0.708926\nI1207 10:47:57.808676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78205 > 2) by scale factor 0.718895\nI1207 10:47:58.752557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37824 > 2) by scale factor 0.84096\nI1207 10:47:59.696359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25094 > 2) by scale factor 0.615207\nI1207 10:48:00.641098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1401 > 2) by scale factor 0.636922\nI1207 10:48:01.585110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06799 > 2) by scale factor 0.651892\nI1207 10:48:02.529093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47851 > 2) by scale factor 0.574959\nI1207 10:48:03.473480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51277 > 2) by scale factor 0.569352\nI1207 10:48:04.417585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19289 > 2) by scale factor 0.626393\nI1207 10:48:05.361583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42829 > 2) by scale factor 0.451642\nI1207 10:48:06.305719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98559 > 2) by scale factor 0.501808\nI1207 10:48:07.248839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10063 > 2) by scale factor 0.48773\nI1207 10:48:08.192673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73259 > 2) by scale factor 0.731905\nI1207 10:48:09.136034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33689 > 2) by scale factor 0.59936\nI1207 10:48:10.079614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53724 > 2) by scale factor 0.788257\nI1207 10:48:11.022761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32481 > 2) by scale factor 0.860284\nI1207 10:48:11.966397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91297 > 2) by scale factor 0.511121\nI1207 10:48:12.909755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85773 > 2) by scale factor 0.699856\nI1207 10:48:13.853312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8596 > 2) by scale factor 0.518189\nI1207 10:48:14.796424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38538 > 2) by scale factor 0.590775\nI1207 10:48:15.740216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33956 > 2) by scale factor 0.598882\nI1207 10:48:16.684073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59516 > 2) by scale factor 0.770666\nI1207 10:48:17.627854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64544 > 2) by scale factor 0.756019\nI1207 10:48:18.570711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30448 > 2) by scale factor 0.464633\nI1207 10:48:19.513770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09806 > 2) by scale factor 0.645566\nI1207 10:48:20.457208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55636 > 2) by scale factor 0.438947\nI1207 10:48:22.341832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12838 > 2) by scale factor 0.939683\nI1207 10:48:23.285524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81903 > 2) by scale factor 0.709465\nI1207 10:48:24.229189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71576 > 2) by scale factor 0.736442\nI1207 10:48:25.173203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51606 > 2) by scale factor 0.794894\nI1207 10:48:26.117074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49794 > 2) by scale factor 0.800659\nI1207 10:48:27.060272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05732 > 2) by scale factor 0.654167\nI1207 10:48:28.003512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05838 > 2) by scale factor 0.65394\nI1207 10:48:28.946869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40466 > 2) by scale factor 0.454064\nI1207 10:48:30.832378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66278 > 2) by scale factor 0.751096\nI1207 10:48:31.776273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5221 > 2) by scale factor 0.567844\nI1207 10:48:32.720000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7988 > 2) by scale factor 0.526481\nI1207 10:48:33.663410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3332 > 2) by scale factor 0.461553\nI1207 10:48:34.606909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07532 > 2) by scale factor 0.963706\nI1207 10:48:35.551054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34425 > 2) by scale factor 0.598041\nI1207 10:48:36.495242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51359 > 2) by scale factor 0.795676\nI1207 10:48:37.439321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8992 > 2) by scale factor 0.689845\nI1207 10:48:38.382787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1243 > 2) by scale factor 0.484931\nI1207 10:48:39.326632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7536 > 2) by scale factor 0.726323\nI1207 10:48:39.338618   369 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1207 10:49:32.449522   369 solver.cpp:404]     Test net output #0: accuracy = 0.1433\nI1207 10:49:32.450018   369 solver.cpp:404]     Test net output #1: loss = 10.3565 (* 1 = 10.3565 loss)\nI1207 10:49:33.324223   369 solver.cpp:228] Iteration 12700, loss = 10.0514\nI1207 10:49:33.324266   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 10:49:33.324291   369 solver.cpp:244]     Train net output #1: loss = 10.0514 (* 1 = 10.0514 loss)\nI1207 10:49:33.396020   369 sgd_solver.cpp:166] Iteration 12700, lr = 1.905\nI1207 10:49:33.406118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22192 > 2) by scale factor 0.620748\nI1207 10:49:34.347023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02823 > 2) by scale factor 0.98608\nI1207 10:49:35.287984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62328 > 2) by scale factor 0.551986\nI1207 10:49:36.228770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99304 > 2) by scale factor 0.400557\nI1207 10:49:37.169052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82578 > 2) by scale factor 0.522769\nI1207 10:49:38.110446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32545 > 2) by scale factor 0.46238\nI1207 10:49:39.051741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57933 > 2) by scale factor 0.775395\nI1207 10:49:39.992882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9019 > 2) by scale factor 0.689204\nI1207 10:49:40.933935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79658 > 2) by scale factor 0.52679\nI1207 10:49:41.875162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32752 > 2) by scale factor 0.859282\nI1207 10:49:42.816148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72188 > 2) by scale factor 0.734785\nI1207 10:49:43.757537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34768 > 2) by scale factor 0.597429\nI1207 10:49:44.698202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26527 > 2) by scale factor 0.612507\nI1207 10:49:45.639442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66011 > 2) by scale factor 0.751848\nI1207 10:49:46.580904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31011 > 2) by scale factor 0.604209\nI1207 10:49:47.522091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4652 > 2) by scale factor 0.577167\nI1207 10:49:48.462920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98983 > 2) by scale factor 0.668935\nI1207 10:49:49.403645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69508 > 2) by scale factor 0.742094\nI1207 10:49:50.344983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2437 > 2) by scale factor 0.61658\nI1207 10:49:51.285840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05916 > 2) by scale factor 0.653774\nI1207 10:49:52.226732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01299 > 2) by scale factor 0.663792\nI1207 10:49:53.167496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20666 > 2) by scale factor 0.623701\nI1207 10:49:54.108645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10871 > 2) by scale factor 0.643354\nI1207 10:49:55.049803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19426 > 2) by scale factor 0.91147\nI1207 10:49:55.990979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46179 > 2) by scale factor 0.812417\nI1207 10:49:56.931653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19564 > 2) by scale factor 0.625852\nI1207 10:49:57.872838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45855 > 2) by scale factor 0.578278\nI1207 10:49:58.813920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27489 > 2) by scale factor 0.879164\nI1207 10:49:59.754354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60529 > 2) by scale factor 0.76767\nI1207 10:50:00.695310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91562 > 2) by scale factor 0.685962\nI1207 10:50:01.636411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23562 > 2) by scale factor 0.472186\nI1207 10:50:02.580093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97585 > 2) by scale factor 0.672077\nI1207 10:50:03.524436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27183 > 2) by scale factor 0.880349\nI1207 10:50:04.468287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75988 > 2) by scale factor 0.531931\nI1207 10:50:05.412464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46306 > 2) by scale factor 0.811998\nI1207 10:50:06.356398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45961 > 2) by scale factor 0.813137\nI1207 10:50:07.300539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69801 > 2) by scale factor 0.540832\nI1207 10:50:08.244513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04745 > 2) by scale factor 0.656286\nI1207 10:50:09.188658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78958 > 2) by scale factor 0.527764\nI1207 10:50:10.132872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71052 > 2) by scale factor 0.737866\nI1207 10:50:11.076912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36117 > 2) by scale factor 0.847037\nI1207 10:50:12.020751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58683 > 2) by scale factor 0.773148\nI1207 10:50:12.964998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86583 > 2) by scale factor 0.697877\nI1207 10:50:13.908794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76271 > 2) by scale factor 0.723926\nI1207 10:50:14.852538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78648 > 2) by scale factor 0.528196\nI1207 10:50:15.796701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76694 > 2) by scale factor 0.419557\nI1207 10:50:16.740599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79066 > 2) by scale factor 0.527613\nI1207 10:50:17.684741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.29794 > 2) by scale factor 0.377505\nI1207 10:50:18.629083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02822 > 2) by scale factor 0.496497\nI1207 10:50:19.572934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87009 > 2) by scale factor 0.696843\nI1207 10:50:20.517321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85913 > 2) by scale factor 0.699513\nI1207 10:50:21.461277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25476 > 2) by scale factor 0.470062\nI1207 10:50:22.405247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02351 > 2) by scale factor 0.398128\nI1207 10:50:23.349457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30466 > 2) by scale factor 0.605206\nI1207 10:50:24.293704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01179 > 2) by scale factor 0.664058\nI1207 10:50:25.237515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.551 > 2) by scale factor 0.563222\nI1207 10:50:26.181465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8908 > 2) by scale factor 0.514033\nI1207 10:50:27.125512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19356 > 2) by scale factor 0.626259\nI1207 10:50:29.011256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80026 > 2) by scale factor 0.71422\nI1207 10:50:29.954637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03696 > 2) by scale factor 0.658554\nI1207 10:50:30.898768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2967 > 2) by scale factor 0.465474\nI1207 10:50:31.842471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80324 > 2) by scale factor 0.713459\nI1207 10:50:32.786547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9215 > 2) by scale factor 0.510009\nI1207 10:50:33.730789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92764 > 2) by scale factor 0.683144\nI1207 10:50:34.674751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16771 > 2) by scale factor 0.922631\nI1207 10:50:35.618643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94955 > 2) by scale factor 0.678069\nI1207 10:50:36.562224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10874 > 2) by scale factor 0.948434\nI1207 10:50:38.448086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24064 > 2) by scale factor 0.617163\nI1207 10:50:39.391952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94959 > 2) by scale factor 0.678061\nI1207 10:50:40.336084   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53866 > 2) by scale factor 0.787816\nI1207 10:50:41.280202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11444 > 2) by scale factor 0.945876\nI1207 10:50:42.224432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70952 > 2) by scale factor 0.738139\nI1207 10:50:43.168231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07728 > 2) by scale factor 0.962795\nI1207 10:50:44.111434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31108 > 2) by scale factor 0.865397\nI1207 10:50:45.055794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20718 > 2) by scale factor 0.6236\nI1207 10:50:45.999721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00799 > 2) by scale factor 0.499003\nI1207 10:50:46.943627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60762 > 2) by scale factor 0.766982\nI1207 10:50:47.887445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03406 > 2) by scale factor 0.659183\nI1207 10:50:48.831290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55918 > 2) by scale factor 0.781502\nI1207 10:50:49.774643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06482 > 2) by scale factor 0.968607\nI1207 10:50:50.718459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79076 > 2) by scale factor 0.527599\nI1207 10:50:51.662066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61288 > 2) by scale factor 0.553575\nI1207 10:50:52.605967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30314 > 2) by scale factor 0.605484\nI1207 10:50:53.550235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17162 > 2) by scale factor 0.920971\nI1207 10:50:54.494127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38976 > 2) by scale factor 0.836903\nI1207 10:50:55.438413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45288 > 2) by scale factor 0.579227\nI1207 10:50:56.383021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59342 > 2) by scale factor 0.771183\nI1207 10:50:58.268710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19671 > 2) by scale factor 0.910451\nI1207 10:50:59.212792   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75155 > 2) by scale factor 0.726862\nI1207 10:51:00.156967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25666 > 2) by scale factor 0.469852\nI1207 10:51:01.101199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65815 > 2) by scale factor 0.546724\nI1207 10:51:02.045230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10517 > 2) by scale factor 0.644088\nI1207 10:51:02.989262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44763 > 2) by scale factor 0.817118\nI1207 10:51:03.934046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60747 > 2) by scale factor 0.767026\nI1207 10:51:04.877768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45744 > 2) by scale factor 0.578462\nI1207 10:51:05.821986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95507 > 2) by scale factor 0.50568\nI1207 10:51:06.765926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75895 > 2) by scale factor 0.532063\nI1207 10:51:06.777832   369 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1207 10:51:59.822968   369 solver.cpp:404]     Test net output #0: accuracy = 0.11705\nI1207 10:51:59.823478   369 solver.cpp:404]     Test net output #1: loss = 19.9941 (* 1 = 19.9941 loss)\nI1207 10:52:00.697470   369 solver.cpp:228] Iteration 12800, loss = 20.9854\nI1207 10:52:00.697517   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 10:52:00.697544   369 solver.cpp:244]     Train net output #1: loss = 20.9854 (* 1 = 20.9854 loss)\nI1207 10:52:00.772644   369 sgd_solver.cpp:166] Iteration 12800, lr = 1.92\nI1207 10:52:00.782785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29586 > 2) by scale factor 0.465565\nI1207 10:52:01.722970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38499 > 2) by scale factor 0.456101\nI1207 10:52:02.663720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39851 > 2) by scale factor 0.4547\nI1207 10:52:03.605377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79204 > 2) by scale factor 0.527421\nI1207 10:52:04.546330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53632 > 2) by scale factor 0.788544\nI1207 10:52:05.487032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48515 > 2) by scale factor 0.573863\nI1207 10:52:06.427358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51904 > 2) by scale factor 0.793952\nI1207 10:52:07.368351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54435 > 2) by scale factor 0.564278\nI1207 10:52:08.309196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49374 > 2) by scale factor 0.572452\nI1207 10:52:09.249547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7577 > 2) by scale factor 0.53224\nI1207 10:52:10.189942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79558 > 2) by scale factor 0.715414\nI1207 10:52:11.130417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48617 > 2) by scale factor 0.573695\nI1207 10:52:12.070571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20949 > 2) by scale factor 0.475117\nI1207 10:52:13.010751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10664 > 2) by scale factor 0.94938\nI1207 10:52:13.951429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42355 > 2) by scale factor 0.825236\nI1207 10:52:14.892328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4169 > 2) by scale factor 0.827507\nI1207 10:52:15.833194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46055 > 2) by scale factor 0.812825\nI1207 10:52:16.773900   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5951 > 2) by scale factor 0.435246\nI1207 10:52:17.714290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4039 > 2) by scale factor 0.454143\nI1207 10:52:18.655185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21749 > 2) by scale factor 0.474215\nI1207 10:52:19.595613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00107 > 2) by scale factor 0.499866\nI1207 10:52:20.535446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76285 > 2) by scale factor 0.531512\nI1207 10:52:21.475844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66992 > 2) by scale factor 0.428272\nI1207 10:52:22.416775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16602 > 2) by scale factor 0.631708\nI1207 10:52:23.357919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65397 > 2) by scale factor 0.429741\nI1207 10:52:24.299037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70022 > 2) by scale factor 0.540509\nI1207 10:52:25.239493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29283 > 2) by scale factor 0.607381\nI1207 10:52:26.180487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8648 > 2) by scale factor 0.698129\nI1207 10:52:27.121237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69014 > 2) by scale factor 0.743455\nI1207 10:52:28.061928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85162 > 2) by scale factor 0.519262\nI1207 10:52:29.002318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67228 > 2) by scale factor 0.544621\nI1207 10:52:29.943699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9304 > 2) by scale factor 0.508855\nI1207 10:52:30.885227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87541 > 2) by scale factor 0.695554\nI1207 10:52:31.826086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37285 > 2) by scale factor 0.59297\nI1207 10:52:32.767992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25556 > 2) by scale factor 0.469973\nI1207 10:52:33.712359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45748 > 2) by scale factor 0.813842\nI1207 10:52:34.656553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66485 > 2) by scale factor 0.545725\nI1207 10:52:35.600731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2899 > 2) by scale factor 0.607922\nI1207 10:52:36.545068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6406 > 2) by scale factor 0.757405\nI1207 10:52:37.489274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12259 > 2) by scale factor 0.485132\nI1207 10:52:38.433398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38173 > 2) by scale factor 0.839725\nI1207 10:52:39.377676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6201 > 2) by scale factor 0.763331\nI1207 10:52:40.321624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13355 > 2) by scale factor 0.638254\nI1207 10:52:41.265908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99738 > 2) by scale factor 0.500328\nI1207 10:52:42.210438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34564 > 2) by scale factor 0.460231\nI1207 10:52:43.154769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87126 > 2) by scale factor 0.516628\nI1207 10:52:44.098466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44641 > 2) by scale factor 0.449801\nI1207 10:52:45.042341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86154 > 2) by scale factor 0.698925\nI1207 10:52:45.986671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65753 > 2) by scale factor 0.752578\nI1207 10:52:46.930866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88319 > 2) by scale factor 0.51504\nI1207 10:52:47.875310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00503 > 2) by scale factor 0.499372\nI1207 10:52:48.819677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08528 > 2) by scale factor 0.489563\nI1207 10:52:49.764251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16376 > 2) by scale factor 0.63216\nI1207 10:52:50.708019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04462 > 2) by scale factor 0.494484\nI1207 10:52:51.652500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97072 > 2) by scale factor 0.503687\nI1207 10:52:52.597090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47722 > 2) by scale factor 0.575172\nI1207 10:52:53.541618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42592 > 2) by scale factor 0.583785\nI1207 10:52:54.486069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43718 > 2) by scale factor 0.581873\nI1207 10:52:55.430408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72698 > 2) by scale factor 0.536627\nI1207 10:52:56.374681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14759 > 2) by scale factor 0.482208\nI1207 10:52:57.319039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1163 > 2) by scale factor 0.641787\nI1207 10:52:58.263463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34576 > 2) by scale factor 0.597771\nI1207 10:52:59.207808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84572 > 2) by scale factor 0.70281\nI1207 10:53:00.151998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36584 > 2) by scale factor 0.594206\nI1207 10:53:01.096024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26056 > 2) by scale factor 0.469422\nI1207 10:53:02.040294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60351 > 2) by scale factor 0.768194\nI1207 10:53:02.985047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58487 > 2) by scale factor 0.773733\nI1207 10:53:03.928990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92102 > 2) by scale factor 0.510072\nI1207 10:53:04.872997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51925 > 2) by scale factor 0.442551\nI1207 10:53:05.817566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9635 > 2) by scale factor 0.674877\nI1207 10:53:06.761865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26565 > 2) by scale factor 0.468862\nI1207 10:53:07.705937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98943 > 2) by scale factor 0.669024\nI1207 10:53:08.650270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39169 > 2) by scale factor 0.836228\nI1207 10:53:09.595110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35505 > 2) by scale factor 0.849238\nI1207 10:53:10.539054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18544 > 2) by scale factor 0.915146\nI1207 10:53:11.483474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9597 > 2) by scale factor 0.675744\nI1207 10:53:12.427654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56516 > 2) by scale factor 0.560985\nI1207 10:53:13.371794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47189 > 2) by scale factor 0.576055\nI1207 10:53:14.315944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21292 > 2) by scale factor 0.47473\nI1207 10:53:15.260052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21753 > 2) by scale factor 0.474211\nI1207 10:53:16.204004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05682 > 2) by scale factor 0.654274\nI1207 10:53:17.147956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39644 > 2) by scale factor 0.588852\nI1207 10:53:18.091593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24033 > 2) by scale factor 0.617222\nI1207 10:53:19.035023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75998 > 2) by scale factor 0.724643\nI1207 10:53:19.979369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30685 > 2) by scale factor 0.866983\nI1207 10:53:20.923385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70439 > 2) by scale factor 0.739539\nI1207 10:53:21.867724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89227 > 2) by scale factor 0.513839\nI1207 10:53:22.812010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14712 > 2) by scale factor 0.482263\nI1207 10:53:23.756072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23568 > 2) by scale factor 0.894582\nI1207 10:53:24.700125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60698 > 2) by scale factor 0.55448\nI1207 10:53:25.644443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54394 > 2) by scale factor 0.564343\nI1207 10:53:26.588582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96285 > 2) by scale factor 0.675026\nI1207 10:53:27.532605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18496 > 2) by scale factor 0.62795\nI1207 10:53:28.477447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44218 > 2) by scale factor 0.581026\nI1207 10:53:29.421983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25688 > 2) by scale factor 0.614084\nI1207 10:53:30.366593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37978 > 2) by scale factor 0.840414\nI1207 10:53:31.311321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14262 > 2) by scale factor 0.933435\nI1207 10:53:32.256019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17411 > 2) by scale factor 0.630098\nI1207 10:53:33.200578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90408 > 2) by scale factor 0.688687\nI1207 10:53:34.145169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70102 > 2) by scale factor 0.540392\nI1207 10:53:34.157179   369 solver.cpp:337] Iteration 12900, Testing net (#0)\nI1207 10:54:27.271528   369 solver.cpp:404]     Test net output #0: accuracy = 0.14605\nI1207 10:54:27.272025   369 solver.cpp:404]     Test net output #1: loss = 21.5666 (* 1 = 21.5666 loss)\nI1207 10:54:28.146178   369 solver.cpp:228] Iteration 12900, loss = 23.1555\nI1207 10:54:28.146225   369 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1207 10:54:28.146251   369 solver.cpp:244]     Train net output #1: loss = 23.1555 (* 1 = 23.1555 loss)\nI1207 10:54:28.218310   369 sgd_solver.cpp:166] Iteration 12900, lr = 1.935\nI1207 10:54:28.228582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45142 > 2) by scale factor 0.449295\nI1207 10:54:29.169093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30097 > 2) by scale factor 0.869198\nI1207 10:54:30.110106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01216 > 2) by scale factor 0.663975\nI1207 10:54:31.051188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44588 > 2) by scale factor 0.817702\nI1207 10:54:31.991747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32521 > 2) by scale factor 0.462405\nI1207 10:54:32.931803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01247 > 2) by scale factor 0.663907\nI1207 10:54:33.872936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56377 > 2) by scale factor 0.780101\nI1207 10:54:34.813992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.967 > 2) by scale factor 0.674082\nI1207 10:54:35.755008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32978 > 2) by scale factor 0.60064\nI1207 10:54:36.695086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31698 > 2) by scale factor 0.863191\nI1207 10:54:37.634860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6491 > 2) by scale factor 0.754973\nI1207 10:54:38.575011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11352 > 2) by scale factor 0.64236\nI1207 10:54:39.516202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7883 > 2) by scale factor 0.527941\nI1207 10:54:40.456954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81695 > 2) by scale factor 0.709987\nI1207 10:54:41.398219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73261 > 2) by scale factor 0.731901\nI1207 10:54:42.339419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87649 > 2) by scale factor 0.695292\nI1207 10:54:43.280330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12483 > 2) by scale factor 0.640035\nI1207 10:54:44.220481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6648 > 2) by scale factor 0.545732\nI1207 10:54:45.161234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36207 > 2) by scale factor 0.846715\nI1207 10:54:46.101663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50927 > 2) by scale factor 0.797045\nI1207 10:54:47.042539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26298 > 2) by scale factor 0.883792\nI1207 10:54:47.982982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79634 > 2) by scale factor 0.526824\nI1207 10:54:48.923308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58089 > 2) by scale factor 0.558521\nI1207 10:54:49.864053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51016 > 2) by scale factor 0.569774\nI1207 10:54:50.805090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47503 > 2) by scale factor 0.446924\nI1207 10:54:51.745656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78831 > 2) by scale factor 0.717281\nI1207 10:54:52.686482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17236 > 2) by scale factor 0.630446\nI1207 10:54:53.627221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21452 > 2) by scale factor 0.622178\nI1207 10:54:54.567963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42224 > 2) by scale factor 0.825682\nI1207 10:54:55.508891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7799 > 2) by scale factor 0.529115\nI1207 10:54:56.449823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40813 > 2) by scale factor 0.586833\nI1207 10:54:57.390635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.596 > 2) by scale factor 0.556174\nI1207 10:54:58.334549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28055 > 2) by scale factor 0.46723\nI1207 10:54:59.279189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79685 > 2) by scale factor 0.526752\nI1207 10:55:00.223034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13892 > 2) by scale factor 0.935051\nI1207 10:55:01.166832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67865 > 2) by scale factor 0.746646\nI1207 10:55:02.110815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62783 > 2) by scale factor 0.551294\nI1207 10:55:03.055024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49687 > 2) by scale factor 0.571941\nI1207 10:55:03.999109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18192 > 2) by scale factor 0.916624\nI1207 10:55:04.943079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94153 > 2) by scale factor 0.679917\nI1207 10:55:05.887269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24653 > 2) by scale factor 0.616042\nI1207 10:55:06.831553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0275 > 2) by scale factor 0.660611\nI1207 10:55:07.775547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08015 > 2) by scale factor 0.64932\nI1207 10:55:08.719595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60897 > 2) by scale factor 0.766586\nI1207 10:55:10.605269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9613 > 2) by scale factor 0.67538\nI1207 10:55:11.549240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12464 > 2) by scale factor 0.640073\nI1207 10:55:12.493521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73307 > 2) by scale factor 0.731778\nI1207 10:55:13.437471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15287 > 2) by scale factor 0.634342\nI1207 10:55:14.381453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73836 > 2) by scale factor 0.730365\nI1207 10:55:15.325222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2316 > 2) by scale factor 0.896218\nI1207 10:55:16.269520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47732 > 2) by scale factor 0.807323\nI1207 10:55:17.213450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90597 > 2) by scale factor 0.688239\nI1207 10:55:18.157459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91479 > 2) by scale factor 0.510883\nI1207 10:55:19.101613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39116 > 2) by scale factor 0.45546\nI1207 10:55:20.045363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89282 > 2) by scale factor 0.691366\nI1207 10:55:20.989451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18102 > 2) by scale factor 0.628729\nI1207 10:55:21.933336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75517 > 2) by scale factor 0.725908\nI1207 10:55:22.877681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74846 > 2) by scale factor 0.533552\nI1207 10:55:23.821642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88632 > 2) by scale factor 0.514626\nI1207 10:55:24.765789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56495 > 2) by scale factor 0.779743\nI1207 10:55:25.710080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39906 > 2) by scale factor 0.454642\nI1207 10:55:26.654381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52317 > 2) by scale factor 0.56767\nI1207 10:55:27.598266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87613 > 2) by scale factor 0.69538\nI1207 10:55:28.542696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05141 > 2) by scale factor 0.974941\nI1207 10:55:29.486240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2042 > 2) by scale factor 0.624181\nI1207 10:55:30.430645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53753 > 2) by scale factor 0.788169\nI1207 10:55:31.375005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8976 > 2) by scale factor 0.690226\nI1207 10:55:32.318461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62401 > 2) by scale factor 0.762191\nI1207 10:55:33.262533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01636 > 2) by scale factor 0.497963\nI1207 10:55:34.206763   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01297 > 2) by scale factor 0.663797\nI1207 10:55:35.150931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95419 > 2) by scale factor 0.505792\nI1207 10:55:36.094455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81625 > 2) by scale factor 0.710163\nI1207 10:55:37.039238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75485 > 2) by scale factor 0.725992\nI1207 10:55:37.983134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92611 > 2) by scale factor 0.50941\nI1207 10:55:38.927063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39366 > 2) by scale factor 0.589335\nI1207 10:55:39.870491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65622 > 2) by scale factor 0.547012\nI1207 10:55:40.813494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24581 > 2) by scale factor 0.616179\nI1207 10:55:41.757930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24804 > 2) by scale factor 0.615755\nI1207 10:55:42.701654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82714 > 2) by scale factor 0.707428\nI1207 10:55:43.645577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33341 > 2) by scale factor 0.46153\nI1207 10:55:44.589644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59648 > 2) by scale factor 0.435115\nI1207 10:55:45.533273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39688 > 2) by scale factor 0.588776\nI1207 10:55:46.477641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45193 > 2) by scale factor 0.815685\nI1207 10:55:47.420642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35883 > 2) by scale factor 0.595445\nI1207 10:55:48.364627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63591 > 2) by scale factor 0.550069\nI1207 10:55:49.308356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97856 > 2) by scale factor 0.502695\nI1207 10:55:50.251408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67705 > 2) by scale factor 0.543914\nI1207 10:55:51.195657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75847 > 2) by scale factor 0.532132\nI1207 10:55:52.138440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56054 > 2) by scale factor 0.561712\nI1207 10:55:53.082422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67691 > 2) by scale factor 0.427633\nI1207 10:55:54.025269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09429 > 2) by scale factor 0.646352\nI1207 10:55:54.968356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77 > 2) by scale factor 0.530503\nI1207 10:55:55.912117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3164 > 2) by scale factor 0.863407\nI1207 10:55:56.855499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08746 > 2) by scale factor 0.647782\nI1207 10:55:57.799588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61509 > 2) by scale factor 0.553237\nI1207 10:55:58.743448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82789 > 2) by scale factor 0.707242\nI1207 10:55:59.686621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.216 > 2) by scale factor 0.902526\nI1207 10:56:00.630795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96892 > 2) by scale factor 0.673647\nI1207 10:56:01.573606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34156 > 2) by scale factor 0.460664\nI1207 10:56:01.585531   369 solver.cpp:337] Iteration 13000, Testing net (#0)\nI1207 10:56:54.360776   369 solver.cpp:404]     Test net output #0: accuracy = 0.1718\nI1207 10:56:54.361269   369 solver.cpp:404]     Test net output #1: loss = 15.1133 (* 1 = 15.1133 loss)\nI1207 10:56:55.235855   369 solver.cpp:228] Iteration 13000, loss = 14.276\nI1207 10:56:55.235913   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 10:56:55.235939   369 solver.cpp:244]     Train net output #1: loss = 14.276 (* 1 = 14.276 loss)\nI1207 10:56:55.304947   369 sgd_solver.cpp:166] Iteration 13000, lr = 1.95\nI1207 10:56:55.315119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8548 > 2) by scale factor 0.700574\nI1207 10:56:56.256446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58593 > 2) by scale factor 0.773416\nI1207 10:56:57.197579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87248 > 2) by scale factor 0.516465\nI1207 10:56:58.138520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92293 > 2) by scale factor 0.684245\nI1207 10:56:59.079409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51333 > 2) by scale factor 0.795756\nI1207 10:57:00.019953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16911 > 2) by scale factor 0.479719\nI1207 10:57:00.960286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50017 > 2) by scale factor 0.799946\nI1207 10:57:01.900076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60599 > 2) by scale factor 0.767464\nI1207 10:57:02.840590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35842 > 2) by scale factor 0.595519\nI1207 10:57:03.781600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99439 > 2) by scale factor 0.500702\nI1207 10:57:04.722757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45436 > 2) by scale factor 0.814875\nI1207 10:57:05.663478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51969 > 2) by scale factor 0.568232\nI1207 10:57:06.604357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88037 > 2) by scale factor 0.694356\nI1207 10:57:07.545238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57703 > 2) by scale factor 0.559124\nI1207 10:57:08.485553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49049 > 2) by scale factor 0.445386\nI1207 10:57:09.425482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75906 > 2) by scale factor 0.724886\nI1207 10:57:10.366797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86954 > 2) by scale factor 0.696976\nI1207 10:57:11.307518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47778 > 2) by scale factor 0.807173\nI1207 10:57:12.248808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27194 > 2) by scale factor 0.611259\nI1207 10:57:13.189616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16754 > 2) by scale factor 0.922705\nI1207 10:57:15.069622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00196 > 2) by scale factor 0.666232\nI1207 10:57:16.010531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49208 > 2) by scale factor 0.802542\nI1207 10:57:17.890353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33905 > 2) by scale factor 0.598972\nI1207 10:57:18.831523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49402 > 2) by scale factor 0.801919\nI1207 10:57:19.772639   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45018 > 2) by scale factor 0.57968\nI1207 10:57:20.713443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97164 > 2) by scale factor 0.673029\nI1207 10:57:21.654307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90612 > 2) by scale factor 0.688204\nI1207 10:57:22.595273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9923 > 2) by scale factor 0.668383\nI1207 10:57:23.536095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06744 > 2) by scale factor 0.491709\nI1207 10:57:24.476784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27718 > 2) by scale factor 0.878278\nI1207 10:57:25.418848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13862 > 2) by scale factor 0.637222\nI1207 10:57:26.360337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94451 > 2) by scale factor 0.507034\nI1207 10:57:27.301673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94831 > 2) by scale factor 0.506546\nI1207 10:57:28.244808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03947 > 2) by scale factor 0.495114\nI1207 10:57:29.188134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23629 > 2) by scale factor 0.617991\nI1207 10:57:30.131392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11462 > 2) by scale factor 0.945797\nI1207 10:57:31.074689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36488 > 2) by scale factor 0.84571\nI1207 10:57:32.017951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20384 > 2) by scale factor 0.624251\nI1207 10:57:32.960415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06216 > 2) by scale factor 0.969858\nI1207 10:57:33.903548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14334 > 2) by scale factor 0.933124\nI1207 10:57:34.845835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06969 > 2) by scale factor 0.651531\nI1207 10:57:35.788394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26925 > 2) by scale factor 0.611762\nI1207 10:57:36.731006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37898 > 2) by scale factor 0.456728\nI1207 10:57:37.673820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2308 > 2) by scale factor 0.896539\nI1207 10:57:38.617739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8156 > 2) by scale factor 0.524164\nI1207 10:57:39.561187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96674 > 2) by scale factor 0.674141\nI1207 10:57:40.503463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7624 > 2) by scale factor 0.724008\nI1207 10:57:41.447039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69944 > 2) by scale factor 0.540622\nI1207 10:57:42.389770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9288 > 2) by scale factor 0.509061\nI1207 10:57:43.332499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60669 > 2) by scale factor 0.554525\nI1207 10:57:44.275310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65417 > 2) by scale factor 0.547319\nI1207 10:57:45.217825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31862 > 2) by scale factor 0.602659\nI1207 10:57:46.160933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20339 > 2) by scale factor 0.62434\nI1207 10:57:47.103132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38077 > 2) by scale factor 0.840066\nI1207 10:57:48.046476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11699 > 2) by scale factor 0.944739\nI1207 10:57:48.989388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12377 > 2) by scale factor 0.484994\nI1207 10:57:49.931949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86359 > 2) by scale factor 0.517654\nI1207 10:57:50.875144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49084 > 2) by scale factor 0.572927\nI1207 10:57:51.818519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14534 > 2) by scale factor 0.482469\nI1207 10:57:52.762085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56282 > 2) by scale factor 0.780391\nI1207 10:57:53.705245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39581 > 2) by scale factor 0.83479\nI1207 10:57:54.647893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0381 > 2) by scale factor 0.658306\nI1207 10:57:55.591063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50267 > 2) by scale factor 0.444181\nI1207 10:57:56.535352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9842 > 2) by scale factor 0.501983\nI1207 10:57:57.479702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82265 > 2) by scale factor 0.523197\nI1207 10:57:58.422142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94815 > 2) by scale factor 0.678392\nI1207 10:57:59.365222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41786 > 2) by scale factor 0.452708\nI1207 10:58:00.307641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01613 > 2) by scale factor 0.497992\nI1207 10:58:01.251153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81288 > 2) by scale factor 0.711014\nI1207 10:58:02.193747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14343 > 2) by scale factor 0.482692\nI1207 10:58:03.137027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32614 > 2) by scale factor 0.462306\nI1207 10:58:04.079617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20153 > 2) by scale factor 0.624701\nI1207 10:58:05.021981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38451 > 2) by scale factor 0.590928\nI1207 10:58:05.964484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29982 > 2) by scale factor 0.606094\nI1207 10:58:06.906955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13031 > 2) by scale factor 0.638915\nI1207 10:58:07.850159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55658 > 2) by scale factor 0.782295\nI1207 10:58:08.793292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62407 > 2) by scale factor 0.762174\nI1207 10:58:09.735906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21721 > 2) by scale factor 0.902034\nI1207 10:58:10.679817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46861 > 2) by scale factor 0.810173\nI1207 10:58:11.622346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39718 > 2) by scale factor 0.834314\nI1207 10:58:12.566303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11592 > 2) by scale factor 0.641865\nI1207 10:58:13.508930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40457 > 2) by scale factor 0.587446\nI1207 10:58:14.451354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84832 > 2) by scale factor 0.702168\nI1207 10:58:15.394060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6721 > 2) by scale factor 0.544647\nI1207 10:58:16.336627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01547 > 2) by scale factor 0.498074\nI1207 10:58:17.280177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81714 > 2) by scale factor 0.709939\nI1207 10:58:18.223675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26358 > 2) by scale factor 0.612823\nI1207 10:58:19.166537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06609 > 2) by scale factor 0.652297\nI1207 10:58:20.109902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76064 > 2) by scale factor 0.420111\nI1207 10:58:21.052558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79368 > 2) by scale factor 0.417216\nI1207 10:58:21.996600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73956 > 2) by scale factor 0.730044\nI1207 10:58:22.940235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96016 > 2) by scale factor 0.67564\nI1207 10:58:23.883002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85756 > 2) by scale factor 0.699897\nI1207 10:58:24.826380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80339 > 2) by scale factor 0.713423\nI1207 10:58:25.769523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15286 > 2) by scale factor 0.481595\nI1207 10:58:26.713086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5051 > 2) by scale factor 0.798373\nI1207 10:58:27.656757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61672 > 2) by scale factor 0.764316\nI1207 10:58:28.599062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85356 > 2) by scale factor 0.700879\nI1207 10:58:28.611021   369 solver.cpp:337] Iteration 13100, Testing net (#0)\nI1207 10:59:21.336217   369 solver.cpp:404]     Test net output #0: accuracy = 0.2135\nI1207 10:59:21.336714   369 solver.cpp:404]     Test net output #1: loss = 7.78627 (* 1 = 7.78627 loss)\nI1207 10:59:22.211714   369 solver.cpp:228] Iteration 13100, loss = 8.41381\nI1207 10:59:22.211767   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 10:59:22.211793   369 solver.cpp:244]     Train net output #1: loss = 8.41381 (* 1 = 8.41381 loss)\nI1207 10:59:22.293669   369 sgd_solver.cpp:166] Iteration 13100, lr = 1.965\nI1207 10:59:22.303778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32278 > 2) by scale factor 0.861036\nI1207 10:59:23.244734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66885 > 2) by scale factor 0.428371\nI1207 10:59:24.185361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87362 > 2) by scale factor 0.695987\nI1207 10:59:25.126758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80653 > 2) by scale factor 0.525412\nI1207 10:59:26.067713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69786 > 2) by scale factor 0.540853\nI1207 10:59:27.008616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91225 > 2) by scale factor 0.511215\nI1207 10:59:27.949672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52623 > 2) by scale factor 0.791694\nI1207 10:59:28.890060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75831 > 2) by scale factor 0.725082\nI1207 10:59:29.829799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01331 > 2) by scale factor 0.993391\nI1207 10:59:30.770390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60975 > 2) by scale factor 0.554056\nI1207 10:59:31.711086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22594 > 2) by scale factor 0.619974\nI1207 10:59:32.651842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64171 > 2) by scale factor 0.757084\nI1207 10:59:33.592883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50489 > 2) by scale factor 0.570631\nI1207 10:59:34.533350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31108 > 2) by scale factor 0.604032\nI1207 10:59:35.474591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70404 > 2) by scale factor 0.539951\nI1207 10:59:36.415464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6481 > 2) by scale factor 0.54823\nI1207 10:59:37.356003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10288 > 2) by scale factor 0.487462\nI1207 10:59:38.296746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54021 > 2) by scale factor 0.564939\nI1207 10:59:39.236467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54879 > 2) by scale factor 0.563573\nI1207 10:59:40.177037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94667 > 2) by scale factor 0.506757\nI1207 10:59:41.118002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1888 > 2) by scale factor 0.477463\nI1207 10:59:42.058724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54866 > 2) by scale factor 0.563593\nI1207 10:59:42.998631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.745 > 2) by scale factor 0.728597\nI1207 10:59:43.939848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76981 > 2) by scale factor 0.722071\nI1207 10:59:44.881019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65888 > 2) by scale factor 0.546615\nI1207 10:59:45.821871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11222 > 2) by scale factor 0.642628\nI1207 10:59:46.763077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86621 > 2) by scale factor 0.517303\nI1207 10:59:47.703955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63821 > 2) by scale factor 0.54972\nI1207 10:59:48.644991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87703 > 2) by scale factor 0.695161\nI1207 10:59:49.585991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13058 > 2) by scale factor 0.638858\nI1207 10:59:50.527110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35021 > 2) by scale factor 0.596977\nI1207 10:59:51.469106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55129 > 2) by scale factor 0.439436\nI1207 10:59:52.412307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52592 > 2) by scale factor 0.441899\nI1207 10:59:53.354882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3216 > 2) by scale factor 0.60212\nI1207 10:59:54.297708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58454 > 2) by scale factor 0.557952\nI1207 10:59:55.240618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40073 > 2) by scale factor 0.588108\nI1207 10:59:56.183262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94366 > 2) by scale factor 0.507144\nI1207 10:59:57.125830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74082 > 2) by scale factor 0.72971\nI1207 10:59:58.068353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41672 > 2) by scale factor 0.452824\nI1207 10:59:59.010455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01504 > 2) by scale factor 0.498127\nI1207 10:59:59.953725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06412 > 2) by scale factor 0.492111\nI1207 11:00:00.896611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73764 > 2) by scale factor 0.535097\nI1207 11:00:01.839193   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74824 > 2) by scale factor 0.533584\nI1207 11:00:02.782402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55538 > 2) by scale factor 0.562528\nI1207 11:00:03.725278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72226 > 2) by scale factor 0.537308\nI1207 11:00:04.668565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09025 > 2) by scale factor 0.647198\nI1207 11:00:06.553506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3206 > 2) by scale factor 0.6023\nI1207 11:00:07.497251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38024 > 2) by scale factor 0.591674\nI1207 11:00:08.440918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85424 > 2) by scale factor 0.700711\nI1207 11:00:09.385234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7715 > 2) by scale factor 0.530293\nI1207 11:00:10.328558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40302 > 2) by scale factor 0.587713\nI1207 11:00:11.272167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03295 > 2) by scale factor 0.495915\nI1207 11:00:12.215735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84126 > 2) by scale factor 0.520663\nI1207 11:00:13.159273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88579 > 2) by scale factor 0.693051\nI1207 11:00:14.103389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25951 > 2) by scale factor 0.469537\nI1207 11:00:15.046756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0982 > 2) by scale factor 0.645536\nI1207 11:00:15.990728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35397 > 2) by scale factor 0.596309\nI1207 11:00:16.934535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8329 > 2) by scale factor 0.521798\nI1207 11:00:17.878402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98516 > 2) by scale factor 0.501862\nI1207 11:00:18.822301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15945 > 2) by scale factor 0.633022\nI1207 11:00:19.766466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15808 > 2) by scale factor 0.480991\nI1207 11:00:20.709194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95367 > 2) by scale factor 0.505859\nI1207 11:00:21.652441   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83007 > 2) by scale factor 0.706697\nI1207 11:00:22.595916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66049 > 2) by scale factor 0.751741\nI1207 11:00:23.539414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37918 > 2) by scale factor 0.591859\nI1207 11:00:24.483659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94 > 2) by scale factor 0.680273\nI1207 11:00:25.427346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57316 > 2) by scale factor 0.777255\nI1207 11:00:27.312428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54318 > 2) by scale factor 0.564465\nI1207 11:00:28.256074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75357 > 2) by scale factor 0.420737\nI1207 11:00:29.200084   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84726 > 2) by scale factor 0.519851\nI1207 11:00:30.144204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24867 > 2) by scale factor 0.615636\nI1207 11:00:31.088366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00059 > 2) by scale factor 0.666535\nI1207 11:00:32.032366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15128 > 2) by scale factor 0.634662\nI1207 11:00:32.977022   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87404 > 2) by scale factor 0.695886\nI1207 11:00:33.920591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12914 > 2) by scale factor 0.639153\nI1207 11:00:34.864948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30214 > 2) by scale factor 0.464884\nI1207 11:00:35.809092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25868 > 2) by scale factor 0.885474\nI1207 11:00:36.753404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51087 > 2) by scale factor 0.796536\nI1207 11:00:37.697518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20024 > 2) by scale factor 0.908992\nI1207 11:00:38.641393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56709 > 2) by scale factor 0.437916\nI1207 11:00:39.585741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04357 > 2) by scale factor 0.494612\nI1207 11:00:40.529675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33701 > 2) by scale factor 0.855793\nI1207 11:00:41.473399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63743 > 2) by scale factor 0.758313\nI1207 11:00:42.417481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92775 > 2) by scale factor 0.683119\nI1207 11:00:43.361416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94602 > 2) by scale factor 0.678883\nI1207 11:00:44.305579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42348 > 2) by scale factor 0.82526\nI1207 11:00:45.249845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57098 > 2) by scale factor 0.560071\nI1207 11:00:46.192733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02624 > 2) by scale factor 0.660887\nI1207 11:00:47.135184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63672 > 2) by scale factor 0.758518\nI1207 11:00:48.077817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17413 > 2) by scale factor 0.919908\nI1207 11:00:49.019675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70079 > 2) by scale factor 0.740524\nI1207 11:00:49.962617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9616 > 2) by scale factor 0.504846\nI1207 11:00:50.905396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52641 > 2) by scale factor 0.56715\nI1207 11:00:52.789495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56186 > 2) by scale factor 0.780683\nI1207 11:00:53.733414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66248 > 2) by scale factor 0.751179\nI1207 11:00:54.676728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67979 > 2) by scale factor 0.543509\nI1207 11:00:55.619519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53782 > 2) by scale factor 0.56532\nI1207 11:00:55.631536   369 solver.cpp:337] Iteration 13200, Testing net (#0)\nI1207 11:01:48.567986   369 solver.cpp:404]     Test net output #0: accuracy = 0.1785\nI1207 11:01:48.568462   369 solver.cpp:404]     Test net output #1: loss = 11.9658 (* 1 = 11.9658 loss)\nI1207 11:01:49.442562   369 solver.cpp:228] Iteration 13200, loss = 12.7017\nI1207 11:01:49.442615   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 11:01:49.442642   369 solver.cpp:244]     Train net output #1: loss = 12.7017 (* 1 = 12.7017 loss)\nI1207 11:01:49.512338   369 sgd_solver.cpp:166] Iteration 13200, lr = 1.98\nI1207 11:01:49.522477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30435 > 2) by scale factor 0.867924\nI1207 11:01:50.463526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68001 > 2) by scale factor 0.543477\nI1207 11:01:51.404093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66858 > 2) by scale factor 0.545171\nI1207 11:01:52.345134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24446 > 2) by scale factor 0.471202\nI1207 11:01:53.286362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41842 > 2) by scale factor 0.45265\nI1207 11:01:54.227160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17101 > 2) by scale factor 0.92123\nI1207 11:01:55.167866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92573 > 2) by scale factor 0.509459\nI1207 11:01:56.109047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04705 > 2) by scale factor 0.977017\nI1207 11:01:57.048727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2905 > 2) by scale factor 0.873172\nI1207 11:01:57.988595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87501 > 2) by scale factor 0.516128\nI1207 11:01:59.865203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22322 > 2) by scale factor 0.899598\nI1207 11:02:00.805208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15407 > 2) by scale factor 0.6341\nI1207 11:02:01.745134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36019 > 2) by scale factor 0.847391\nI1207 11:02:02.685115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42873 > 2) by scale factor 0.583306\nI1207 11:02:03.624554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12504 > 2) by scale factor 0.484843\nI1207 11:02:04.564141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7912 > 2) by scale factor 0.716538\nI1207 11:02:05.503787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.92149 > 2) by scale factor 0.406381\nI1207 11:02:06.443666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94513 > 2) by scale factor 0.506954\nI1207 11:02:07.384760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64223 > 2) by scale factor 0.549114\nI1207 11:02:08.325193   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40833 > 2) by scale factor 0.586797\nI1207 11:02:09.265653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07335 > 2) by scale factor 0.490997\nI1207 11:02:10.205729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75974 > 2) by scale factor 0.531952\nI1207 11:02:12.085389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44508 > 2) by scale factor 0.817968\nI1207 11:02:13.026440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66341 > 2) by scale factor 0.750916\nI1207 11:02:13.966583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28506 > 2) by scale factor 0.608816\nI1207 11:02:14.907640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02991 > 2) by scale factor 0.985265\nI1207 11:02:15.848196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24852 > 2) by scale factor 0.615664\nI1207 11:02:16.789397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96035 > 2) by scale factor 0.675595\nI1207 11:02:17.729312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06945 > 2) by scale factor 0.491467\nI1207 11:02:18.670359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71894 > 2) by scale factor 0.537787\nI1207 11:02:19.612112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24126 > 2) by scale factor 0.471558\nI1207 11:02:20.552968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27369 > 2) by scale factor 0.610932\nI1207 11:02:21.492931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26441 > 2) by scale factor 0.612668\nI1207 11:02:22.433498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17467 > 2) by scale factor 0.919682\nI1207 11:02:23.374794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82101 > 2) by scale factor 0.708967\nI1207 11:02:24.318270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60953 > 2) by scale factor 0.433884\nI1207 11:02:25.261330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88244 > 2) by scale factor 0.51514\nI1207 11:02:26.204779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00244 > 2) by scale factor 0.499696\nI1207 11:02:27.147874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48538 > 2) by scale factor 0.804705\nI1207 11:02:28.091328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07353 > 2) by scale factor 0.650718\nI1207 11:02:29.035014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4979 > 2) by scale factor 0.800674\nI1207 11:02:29.977697   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85371 > 2) by scale factor 0.700842\nI1207 11:02:30.920779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54125 > 2) by scale factor 0.787014\nI1207 11:02:31.863860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41897 > 2) by scale factor 0.826799\nI1207 11:02:32.806668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30518 > 2) by scale factor 0.86761\nI1207 11:02:33.749243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37526 > 2) by scale factor 0.592548\nI1207 11:02:34.691701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65761 > 2) by scale factor 0.752555\nI1207 11:02:35.634939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15558 > 2) by scale factor 0.927824\nI1207 11:02:36.578874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1372 > 2) by scale factor 0.935804\nI1207 11:02:37.522711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11635 > 2) by scale factor 0.945021\nI1207 11:02:38.466128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93445 > 2) by scale factor 0.681558\nI1207 11:02:39.409680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24265 > 2) by scale factor 0.61678\nI1207 11:02:40.352946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95783 > 2) by scale factor 0.505327\nI1207 11:02:41.296393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80198 > 2) by scale factor 0.526042\nI1207 11:02:42.238847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94057 > 2) by scale factor 0.68014\nI1207 11:02:43.182155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78632 > 2) by scale factor 0.528217\nI1207 11:02:44.124697   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64113 > 2) by scale factor 0.43093\nI1207 11:02:46.008994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17872 > 2) by scale factor 0.629185\nI1207 11:02:46.952798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99521 > 2) by scale factor 0.667733\nI1207 11:02:47.896220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78731 > 2) by scale factor 0.52808\nI1207 11:02:48.839411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80415 > 2) by scale factor 0.713229\nI1207 11:02:49.783223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23662 > 2) by scale factor 0.617928\nI1207 11:02:50.726768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02234 > 2) by scale factor 0.66174\nI1207 11:02:51.669689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75931 > 2) by scale factor 0.532013\nI1207 11:02:52.612964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22473 > 2) by scale factor 0.473403\nI1207 11:02:53.556233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26601 > 2) by scale factor 0.88261\nI1207 11:02:54.499480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42112 > 2) by scale factor 0.584604\nI1207 11:02:55.442108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14741 > 2) by scale factor 0.482228\nI1207 11:02:56.385815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64231 > 2) by scale factor 0.549103\nI1207 11:02:57.329258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97465 > 2) by scale factor 0.672348\nI1207 11:02:58.272080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77937 > 2) by scale factor 0.529188\nI1207 11:02:59.215471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06637 > 2) by scale factor 0.652237\nI1207 11:03:00.159183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47546 > 2) by scale factor 0.446882\nI1207 11:03:01.102627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39524 > 2) by scale factor 0.455038\nI1207 11:03:02.045542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28726 > 2) by scale factor 0.466499\nI1207 11:03:02.988884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87076 > 2) by scale factor 0.410614\nI1207 11:03:03.932056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89632 > 2) by scale factor 0.690531\nI1207 11:03:04.875239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61774 > 2) by scale factor 0.764017\nI1207 11:03:05.818039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63009 > 2) by scale factor 0.760429\nI1207 11:03:06.761112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70331 > 2) by scale factor 0.739835\nI1207 11:03:07.703881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65712 > 2) by scale factor 0.546879\nI1207 11:03:08.647379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39246 > 2) by scale factor 0.589543\nI1207 11:03:09.590713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68323 > 2) by scale factor 0.543002\nI1207 11:03:10.534093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09354 > 2) by scale factor 0.646508\nI1207 11:03:11.477187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31852 > 2) by scale factor 0.602678\nI1207 11:03:12.420197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02535 > 2) by scale factor 0.66108\nI1207 11:03:13.363587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8329 > 2) by scale factor 0.70599\nI1207 11:03:14.306376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50021 > 2) by scale factor 0.799933\nI1207 11:03:15.249979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9658 > 2) by scale factor 0.674355\nI1207 11:03:16.193523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05167 > 2) by scale factor 0.974818\nI1207 11:03:17.136523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61112 > 2) by scale factor 0.765956\nI1207 11:03:18.079244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58165 > 2) by scale factor 0.558402\nI1207 11:03:19.961993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20513 > 2) by scale factor 0.623999\nI1207 11:03:20.906163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27398 > 2) by scale factor 0.467948\nI1207 11:03:21.849442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28006 > 2) by scale factor 0.467284\nI1207 11:03:22.792713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58838 > 2) by scale factor 0.772685\nI1207 11:03:22.804746   369 solver.cpp:337] Iteration 13300, Testing net (#0)\nI1207 11:04:15.653133   369 solver.cpp:404]     Test net output #0: accuracy = 0.1532\nI1207 11:04:15.653586   369 solver.cpp:404]     Test net output #1: loss = 12.9872 (* 1 = 12.9872 loss)\nI1207 11:04:16.528720   369 solver.cpp:228] Iteration 13300, loss = 12.9954\nI1207 11:04:16.528770   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 11:04:16.528796   369 solver.cpp:244]     Train net output #1: loss = 12.9954 (* 1 = 12.9954 loss)\nI1207 11:04:16.598732   369 sgd_solver.cpp:166] Iteration 13300, lr = 1.995\nI1207 11:04:16.608871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66392 > 2) by scale factor 0.545863\nI1207 11:04:17.548580   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39346 > 2) by scale factor 0.83561\nI1207 11:04:18.487825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79509 > 2) by scale factor 0.715541\nI1207 11:04:19.426957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58365 > 2) by scale factor 0.558091\nI1207 11:04:20.366662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76581 > 2) by scale factor 0.531094\nI1207 11:04:21.306325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15257 > 2) by scale factor 0.634403\nI1207 11:04:22.246408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41392 > 2) by scale factor 0.585836\nI1207 11:04:23.187151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0195 > 2) by scale factor 0.662361\nI1207 11:04:24.128208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39002 > 2) by scale factor 0.589967\nI1207 11:04:25.068701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83884 > 2) by scale factor 0.704514\nI1207 11:04:26.008630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86682 > 2) by scale factor 0.517221\nI1207 11:04:26.949234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53612 > 2) by scale factor 0.788605\nI1207 11:04:27.890234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57649 > 2) by scale factor 0.437016\nI1207 11:04:28.830860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71504 > 2) by scale factor 0.736636\nI1207 11:04:29.771911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22658 > 2) by scale factor 0.619851\nI1207 11:04:30.712649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68592 > 2) by scale factor 0.542605\nI1207 11:04:31.653443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20914 > 2) by scale factor 0.623221\nI1207 11:04:32.594172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71888 > 2) by scale factor 0.423829\nI1207 11:04:33.534045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00955 > 2) by scale factor 0.664551\nI1207 11:04:34.474424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04808 > 2) by scale factor 0.656151\nI1207 11:04:35.414799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75115 > 2) by scale factor 0.53317\nI1207 11:04:36.355123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28018 > 2) by scale factor 0.877125\nI1207 11:04:37.296455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4688 > 2) by scale factor 0.576568\nI1207 11:04:38.237690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44973 > 2) by scale factor 0.579756\nI1207 11:04:39.177525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31554 > 2) by scale factor 0.60322\nI1207 11:04:40.116741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64222 > 2) by scale factor 0.549116\nI1207 11:04:41.056746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9817 > 2) by scale factor 0.502298\nI1207 11:04:41.996736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11888 > 2) by scale factor 0.485569\nI1207 11:04:42.936944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02525 > 2) by scale factor 0.661102\nI1207 11:04:43.876840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09028 > 2) by scale factor 0.647191\nI1207 11:04:44.817131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25526 > 2) by scale factor 0.886816\nI1207 11:04:45.757230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59307 > 2) by scale factor 0.771285\nI1207 11:04:46.697520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83756 > 2) by scale factor 0.70483\nI1207 11:04:47.641343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94678 > 2) by scale factor 0.678707\nI1207 11:04:48.584991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85866 > 2) by scale factor 0.699629\nI1207 11:04:49.528605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92909 > 2) by scale factor 0.509023\nI1207 11:04:50.471866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79883 > 2) by scale factor 0.714583\nI1207 11:04:51.415165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08091 > 2) by scale factor 0.649158\nI1207 11:04:52.358645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60982 > 2) by scale factor 0.766337\nI1207 11:04:53.302283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65991 > 2) by scale factor 0.546462\nI1207 11:04:54.245616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12775 > 2) by scale factor 0.484525\nI1207 11:04:55.188905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94354 > 2) by scale factor 0.679454\nI1207 11:04:56.132459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95633 > 2) by scale factor 0.676514\nI1207 11:04:58.016881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15353 > 2) by scale factor 0.634211\nI1207 11:04:58.960062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96512 > 2) by scale factor 0.504399\nI1207 11:04:59.903560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49325 > 2) by scale factor 0.802164\nI1207 11:05:00.846565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31282 > 2) by scale factor 0.603715\nI1207 11:05:01.788805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26527 > 2) by scale factor 0.468904\nI1207 11:05:02.731163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5075 > 2) by scale factor 0.797607\nI1207 11:05:03.673595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0157 > 2) by scale factor 0.663195\nI1207 11:05:05.555781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86776 > 2) by scale factor 0.697408\nI1207 11:05:06.498967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30553 > 2) by scale factor 0.605046\nI1207 11:05:07.441311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1028 > 2) by scale factor 0.487472\nI1207 11:05:08.384660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31188 > 2) by scale factor 0.603887\nI1207 11:05:09.327211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37303 > 2) by scale factor 0.842805\nI1207 11:05:10.269450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22924 > 2) by scale factor 0.619341\nI1207 11:05:11.211539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48041 > 2) by scale factor 0.574645\nI1207 11:05:12.153645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27632 > 2) by scale factor 0.467692\nI1207 11:05:13.096019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87121 > 2) by scale factor 0.516634\nI1207 11:05:14.037781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28889 > 2) by scale factor 0.466321\nI1207 11:05:14.979531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50894 > 2) by scale factor 0.569973\nI1207 11:05:15.921161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11486 > 2) by scale factor 0.486044\nI1207 11:05:16.863850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40273 > 2) by scale factor 0.587763\nI1207 11:05:17.806257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62303 > 2) by scale factor 0.762478\nI1207 11:05:18.749732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55795 > 2) by scale factor 0.438794\nI1207 11:05:19.692221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16125 > 2) by scale factor 0.480624\nI1207 11:05:20.635258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86751 > 2) by scale factor 0.69747\nI1207 11:05:21.578758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79784 > 2) by scale factor 0.526616\nI1207 11:05:22.521944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81836 > 2) by scale factor 0.523785\nI1207 11:05:23.465656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48491 > 2) by scale factor 0.804857\nI1207 11:05:24.408661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72769 > 2) by scale factor 0.73322\nI1207 11:05:25.352021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63823 > 2) by scale factor 0.758084\nI1207 11:05:26.295166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00656 > 2) by scale factor 0.665212\nI1207 11:05:27.239028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6523 > 2) by scale factor 0.547601\nI1207 11:05:28.182101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42884 > 2) by scale factor 0.823437\nI1207 11:05:29.125609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91 > 2) by scale factor 0.687286\nI1207 11:05:30.068435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29939 > 2) by scale factor 0.606172\nI1207 11:05:31.012449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62679 > 2) by scale factor 0.761386\nI1207 11:05:31.956753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53339 > 2) by scale factor 0.566029\nI1207 11:05:32.899873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74521 > 2) by scale factor 0.728541\nI1207 11:05:33.843627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.652 > 2) by scale factor 0.754146\nI1207 11:05:34.786540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5815 > 2) by scale factor 0.774743\nI1207 11:05:35.730687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17467 > 2) by scale factor 0.919682\nI1207 11:05:36.674537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22788 > 2) by scale factor 0.619601\nI1207 11:05:37.617780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00271 > 2) by scale factor 0.666065\nI1207 11:05:38.561700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3738 > 2) by scale factor 0.842531\nI1207 11:05:39.505465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3189 > 2) by scale factor 0.463081\nI1207 11:05:40.448034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48162 > 2) by scale factor 0.805925\nI1207 11:05:41.390347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07389 > 2) by scale factor 0.964371\nI1207 11:05:42.333072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31607 > 2) by scale factor 0.863533\nI1207 11:05:43.276185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53525 > 2) by scale factor 0.788878\nI1207 11:05:44.218737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00799 > 2) by scale factor 0.664896\nI1207 11:05:45.161658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43032 > 2) by scale factor 0.822938\nI1207 11:05:46.104951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14688 > 2) by scale factor 0.48229\nI1207 11:05:47.049309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76272 > 2) by scale factor 0.531531\nI1207 11:05:47.993302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11902 > 2) by scale factor 0.641226\nI1207 11:05:48.935698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50261 > 2) by scale factor 0.571002\nI1207 11:05:49.888075   369 solver.cpp:337] Iteration 13400, Testing net (#0)\nI1207 11:06:42.766621   369 solver.cpp:404]     Test net output #0: accuracy = 0.181\nI1207 11:06:42.767132   369 solver.cpp:404]     Test net output #1: loss = 11.3832 (* 1 = 11.3832 loss)\nI1207 11:06:43.654455   369 solver.cpp:228] Iteration 13400, loss = 10.3455\nI1207 11:06:43.654498   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 11:06:43.654523   369 solver.cpp:244]     Train net output #1: loss = 10.3455 (* 1 = 10.3455 loss)\nI1207 11:06:43.710283   369 sgd_solver.cpp:166] Iteration 13400, lr = 2.01\nI1207 11:06:43.720420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97623 > 2) by scale factor 0.671992\nI1207 11:06:44.661262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80732 > 2) by scale factor 0.712423\nI1207 11:06:45.602442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10655 > 2) by scale factor 0.6438\nI1207 11:06:46.543540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34514 > 2) by scale factor 0.597883\nI1207 11:06:47.484098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24511 > 2) by scale factor 0.47113\nI1207 11:06:48.424284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67694 > 2) by scale factor 0.747123\nI1207 11:06:49.364194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29503 > 2) by scale factor 0.606975\nI1207 11:06:50.304656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56511 > 2) by scale factor 0.779695\nI1207 11:06:51.244557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97515 > 2) by scale factor 0.672234\nI1207 11:06:52.183771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99511 > 2) by scale factor 0.667754\nI1207 11:06:53.124107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17957 > 2) by scale factor 0.629016\nI1207 11:06:54.064520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84026 > 2) by scale factor 0.70416\nI1207 11:06:55.004452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13469 > 2) by scale factor 0.638023\nI1207 11:06:55.944733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18145 > 2) by scale factor 0.478303\nI1207 11:06:56.884541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01072 > 2) by scale factor 0.664292\nI1207 11:06:57.824476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34214 > 2) by scale factor 0.598419\nI1207 11:06:58.764852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42537 > 2) by scale factor 0.824618\nI1207 11:06:59.705024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00814 > 2) by scale factor 0.498984\nI1207 11:07:00.645460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12021 > 2) by scale factor 0.943302\nI1207 11:07:01.585661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78902 > 2) by scale factor 0.717099\nI1207 11:07:02.525238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33523 > 2) by scale factor 0.856446\nI1207 11:07:03.465410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70306 > 2) by scale factor 0.739902\nI1207 11:07:04.405522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66882 > 2) by scale factor 0.428374\nI1207 11:07:05.345765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79857 > 2) by scale factor 0.71465\nI1207 11:07:06.286046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42581 > 2) by scale factor 0.451895\nI1207 11:07:07.225713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5891 > 2) by scale factor 0.772471\nI1207 11:07:08.165518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10722 > 2) by scale factor 0.643661\nI1207 11:07:09.105830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50735 > 2) by scale factor 0.797654\nI1207 11:07:10.046145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58967 > 2) by scale factor 0.7723\nI1207 11:07:10.986307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45097 > 2) by scale factor 0.816002\nI1207 11:07:11.926383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65667 > 2) by scale factor 0.752823\nI1207 11:07:12.866506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58171 > 2) by scale factor 0.77468\nI1207 11:07:13.807974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51632 > 2) by scale factor 0.794811\nI1207 11:07:14.748489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07967 > 2) by scale factor 0.64942\nI1207 11:07:15.688796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51389 > 2) by scale factor 0.79558\nI1207 11:07:16.632212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8233 > 2) by scale factor 0.708391\nI1207 11:07:17.575392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13087 > 2) by scale factor 0.938583\nI1207 11:07:18.518895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58161 > 2) by scale factor 0.774709\nI1207 11:07:19.461989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5337 > 2) by scale factor 0.789361\nI1207 11:07:20.405242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62208 > 2) by scale factor 0.55217\nI1207 11:07:21.348387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62133 > 2) by scale factor 0.552283\nI1207 11:07:22.291183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48035 > 2) by scale factor 0.806338\nI1207 11:07:23.235852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71168 > 2) by scale factor 0.737549\nI1207 11:07:24.179090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47568 > 2) by scale factor 0.575427\nI1207 11:07:25.121991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14998 > 2) by scale factor 0.930241\nI1207 11:07:27.005997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92156 > 2) by scale factor 0.510001\nI1207 11:07:27.949151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68081 > 2) by scale factor 0.427277\nI1207 11:07:28.892328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99642 > 2) by scale factor 0.500448\nI1207 11:07:29.835654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84466 > 2) by scale factor 0.520202\nI1207 11:07:30.779067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21441 > 2) by scale factor 0.622197\nI1207 11:07:31.722452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19824 > 2) by scale factor 0.625344\nI1207 11:07:32.665518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04353 > 2) by scale factor 0.657132\nI1207 11:07:33.608553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88322 > 2) by scale factor 0.69367\nI1207 11:07:34.551611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51027 > 2) by scale factor 0.569756\nI1207 11:07:35.494807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05118 > 2) by scale factor 0.655484\nI1207 11:07:36.438621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35094 > 2) by scale factor 0.850725\nI1207 11:07:37.382050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04997 > 2) by scale factor 0.975623\nI1207 11:07:38.324836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60099 > 2) by scale factor 0.555403\nI1207 11:07:39.267627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08787 > 2) by scale factor 0.489253\nI1207 11:07:40.210700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96544 > 2) by scale factor 0.674436\nI1207 11:07:41.153942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91105 > 2) by scale factor 0.687038\nI1207 11:07:42.096727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15496 > 2) by scale factor 0.481352\nI1207 11:07:43.039722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19203 > 2) by scale factor 0.477096\nI1207 11:07:43.982985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55306 > 2) by scale factor 0.562895\nI1207 11:07:44.926336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78992 > 2) by scale factor 0.527715\nI1207 11:07:45.869545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42274 > 2) by scale factor 0.452209\nI1207 11:07:46.812919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90792 > 2) by scale factor 0.687778\nI1207 11:07:47.757326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02538 > 2) by scale factor 0.496848\nI1207 11:07:48.701400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67718 > 2) by scale factor 0.543895\nI1207 11:07:49.644703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42949 > 2) by scale factor 0.583176\nI1207 11:07:50.587952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03189 > 2) by scale factor 0.659656\nI1207 11:07:51.531266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29619 > 2) by scale factor 0.465529\nI1207 11:07:52.474802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21874 > 2) by scale factor 0.901411\nI1207 11:07:53.418069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65174 > 2) by scale factor 0.547683\nI1207 11:07:54.361127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90144 > 2) by scale factor 0.689313\nI1207 11:07:56.244618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68431 > 2) by scale factor 0.542842\nI1207 11:07:57.187742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43119 > 2) by scale factor 0.582888\nI1207 11:07:58.130894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44614 > 2) by scale factor 0.58036\nI1207 11:07:59.073581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48988 > 2) by scale factor 0.573086\nI1207 11:08:00.016922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81757 > 2) by scale factor 0.523893\nI1207 11:08:00.960009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72187 > 2) by scale factor 0.734789\nI1207 11:08:01.903611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35444 > 2) by scale factor 0.459301\nI1207 11:08:02.846827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1158 > 2) by scale factor 0.641889\nI1207 11:08:03.790277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77792 > 2) by scale factor 0.418592\nI1207 11:08:04.732959   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59644 > 2) by scale factor 0.556105\nI1207 11:08:05.675592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74031 > 2) by scale factor 0.729843\nI1207 11:08:06.618690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25308 > 2) by scale factor 0.470247\nI1207 11:08:07.561343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69521 > 2) by scale factor 0.742058\nI1207 11:08:08.503777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30647 > 2) by scale factor 0.867126\nI1207 11:08:09.446038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72471 > 2) by scale factor 0.423307\nI1207 11:08:10.388955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31681 > 2) by scale factor 0.602989\nI1207 11:08:11.332006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06864 > 2) by scale factor 0.651754\nI1207 11:08:12.275189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00656 > 2) by scale factor 0.665212\nI1207 11:08:13.217646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79492 > 2) by scale factor 0.715585\nI1207 11:08:14.160854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93224 > 2) by scale factor 0.682073\nI1207 11:08:15.104395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12887 > 2) by scale factor 0.639209\nI1207 11:08:16.046511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55536 > 2) by scale factor 0.78267\nI1207 11:08:16.989872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03057 > 2) by scale factor 0.659941\nI1207 11:08:17.001827   369 solver.cpp:337] Iteration 13500, Testing net (#0)\nI1207 11:09:09.920245   369 solver.cpp:404]     Test net output #0: accuracy = 0.12765\nI1207 11:09:09.920730   369 solver.cpp:404]     Test net output #1: loss = 18.5175 (* 1 = 18.5175 loss)\nI1207 11:09:10.794730   369 solver.cpp:228] Iteration 13500, loss = 16.0394\nI1207 11:09:10.794788   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 11:09:10.794816   369 solver.cpp:244]     Train net output #1: loss = 16.0394 (* 1 = 16.0394 loss)\nI1207 11:09:10.870714   369 sgd_solver.cpp:166] Iteration 13500, lr = 2.025\nI1207 11:09:10.880935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20444 > 2) by scale factor 0.475688\nI1207 11:09:11.821831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60783 > 2) by scale factor 0.55435\nI1207 11:09:12.762369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79555 > 2) by scale factor 0.417054\nI1207 11:09:13.702359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34061 > 2) by scale factor 0.460765\nI1207 11:09:14.642566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78066 > 2) by scale factor 0.418352\nI1207 11:09:15.582983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07115 > 2) by scale factor 0.651222\nI1207 11:09:16.523843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40125 > 2) by scale factor 0.832899\nI1207 11:09:17.464939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91197 > 2) by scale factor 0.686821\nI1207 11:09:18.405545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95882 > 2) by scale factor 0.675946\nI1207 11:09:19.346391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55744 > 2) by scale factor 0.782033\nI1207 11:09:20.287246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43973 > 2) by scale factor 0.819762\nI1207 11:09:21.228261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79604 > 2) by scale factor 0.715298\nI1207 11:09:22.169235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51026 > 2) by scale factor 0.569758\nI1207 11:09:23.109884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33856 > 2) by scale factor 0.460982\nI1207 11:09:24.988922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46211 > 2) by scale factor 0.812313\nI1207 11:09:25.930023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59133 > 2) by scale factor 0.556897\nI1207 11:09:26.870761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68648 > 2) by scale factor 0.542523\nI1207 11:09:27.810798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85727 > 2) by scale factor 0.518501\nI1207 11:09:28.751348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07578 > 2) by scale factor 0.963493\nI1207 11:09:29.692190   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22699 > 2) by scale factor 0.619772\nI1207 11:09:30.632913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61727 > 2) by scale factor 0.552903\nI1207 11:09:31.573758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93781 > 2) by scale factor 0.68078\nI1207 11:09:32.514521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29715 > 2) by scale factor 0.606585\nI1207 11:09:33.455317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21715 > 2) by scale factor 0.902059\nI1207 11:09:34.395830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57579 > 2) by scale factor 0.437083\nI1207 11:09:35.336388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12148 > 2) by scale factor 0.640723\nI1207 11:09:36.276624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50758 > 2) by scale factor 0.79758\nI1207 11:09:37.217757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80735 > 2) by scale factor 0.712416\nI1207 11:09:39.097406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21777 > 2) by scale factor 0.901805\nI1207 11:09:40.038372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39659 > 2) by scale factor 0.83452\nI1207 11:09:40.982017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39011 > 2) by scale factor 0.836781\nI1207 11:09:41.924851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66868 > 2) by scale factor 0.545156\nI1207 11:09:42.868162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36274 > 2) by scale factor 0.594752\nI1207 11:09:43.811940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02756 > 2) by scale factor 0.986405\nI1207 11:09:44.755573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77542 > 2) by scale factor 0.720612\nI1207 11:09:45.699091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48827 > 2) by scale factor 0.80377\nI1207 11:09:46.642611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44976 > 2) by scale factor 0.816408\nI1207 11:09:47.586055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81994 > 2) by scale factor 0.523568\nI1207 11:09:48.529160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85176 > 2) by scale factor 0.701322\nI1207 11:09:49.472586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84744 > 2) by scale factor 0.519826\nI1207 11:09:50.415678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01119 > 2) by scale factor 0.664189\nI1207 11:09:51.359382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13505 > 2) by scale factor 0.637949\nI1207 11:09:52.302464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78723 > 2) by scale factor 0.717558\nI1207 11:09:53.245623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22306 > 2) by scale factor 0.89966\nI1207 11:09:54.189199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32124 > 2) by scale factor 0.861608\nI1207 11:09:56.073266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0073 > 2) by scale factor 0.665048\nI1207 11:09:57.016348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62024 > 2) by scale factor 0.763288\nI1207 11:09:57.959538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23493 > 2) by scale factor 0.472262\nI1207 11:09:58.902709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76601 > 2) by scale factor 0.531066\nI1207 11:09:59.846195   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51804 > 2) by scale factor 0.794268\nI1207 11:10:00.789636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6957 > 2) by scale factor 0.741921\nI1207 11:10:01.733157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61676 > 2) by scale factor 0.552981\nI1207 11:10:02.676508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00484 > 2) by scale factor 0.499395\nI1207 11:10:03.618679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03056 > 2) by scale factor 0.496209\nI1207 11:10:04.561976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36157 > 2) by scale factor 0.59496\nI1207 11:10:05.505219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46491 > 2) by scale factor 0.447938\nI1207 11:10:06.447638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75843 > 2) by scale factor 0.725049\nI1207 11:10:07.390651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11922 > 2) by scale factor 0.943745\nI1207 11:10:08.333526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5409 > 2) by scale factor 0.440441\nI1207 11:10:09.276883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11816 > 2) by scale factor 0.641403\nI1207 11:10:10.220556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83305 > 2) by scale factor 0.521777\nI1207 11:10:11.164628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07275 > 2) by scale factor 0.491069\nI1207 11:10:12.107765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60365 > 2) by scale factor 0.554993\nI1207 11:10:13.050890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0489 > 2) by scale factor 0.655973\nI1207 11:10:13.994303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92915 > 2) by scale factor 0.509016\nI1207 11:10:14.938387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52893 > 2) by scale factor 0.566744\nI1207 11:10:15.882760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85145 > 2) by scale factor 0.701397\nI1207 11:10:16.826074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74758 > 2) by scale factor 0.533677\nI1207 11:10:17.769208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59173 > 2) by scale factor 0.556834\nI1207 11:10:18.713013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36883 > 2) by scale factor 0.457789\nI1207 11:10:19.656605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76513 > 2) by scale factor 0.53119\nI1207 11:10:20.600204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0524 > 2) by scale factor 0.655223\nI1207 11:10:21.543258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89969 > 2) by scale factor 0.512861\nI1207 11:10:22.486582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39705 > 2) by scale factor 0.588746\nI1207 11:10:23.429919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45096 > 2) by scale factor 0.816006\nI1207 11:10:24.373000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23003 > 2) by scale factor 0.619188\nI1207 11:10:25.316876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42579 > 2) by scale factor 0.824473\nI1207 11:10:26.260602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50941 > 2) by scale factor 0.797001\nI1207 11:10:27.204105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32471 > 2) by scale factor 0.860323\nI1207 11:10:28.147559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74488 > 2) by scale factor 0.728629\nI1207 11:10:29.091809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77297 > 2) by scale factor 0.721249\nI1207 11:10:30.035249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27986 > 2) by scale factor 0.877249\nI1207 11:10:30.979117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31949 > 2) by scale factor 0.463017\nI1207 11:10:31.921953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54043 > 2) by scale factor 0.787269\nI1207 11:10:32.865497   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.783 > 2) by scale factor 0.528681\nI1207 11:10:33.808782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2991 > 2) by scale factor 0.606226\nI1207 11:10:34.752933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68913 > 2) by scale factor 0.542134\nI1207 11:10:35.697254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47017 > 2) by scale factor 0.576341\nI1207 11:10:36.641445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04859 > 2) by scale factor 0.494\nI1207 11:10:37.585876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26741 > 2) by scale factor 0.612105\nI1207 11:10:38.529903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49151 > 2) by scale factor 0.445285\nI1207 11:10:39.473803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6285 > 2) by scale factor 0.760889\nI1207 11:10:40.417835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41192 > 2) by scale factor 0.58618\nI1207 11:10:41.361644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51253 > 2) by scale factor 0.79601\nI1207 11:10:42.304440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97871 > 2) by scale factor 0.502676\nI1207 11:10:43.248797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28082 > 2) by scale factor 0.609603\nI1207 11:10:44.192874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36926 > 2) by scale factor 0.593603\nI1207 11:10:44.204843   369 solver.cpp:337] Iteration 13600, Testing net (#0)\nI1207 11:11:37.030216   369 solver.cpp:404]     Test net output #0: accuracy = 0.15625\nI1207 11:11:37.030738   369 solver.cpp:404]     Test net output #1: loss = 13.9063 (* 1 = 13.9063 loss)\nI1207 11:11:37.905117   369 solver.cpp:228] Iteration 13600, loss = 12.9938\nI1207 11:11:37.905165   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 11:11:37.905192   369 solver.cpp:244]     Train net output #1: loss = 12.9938 (* 1 = 12.9938 loss)\nI1207 11:11:37.978067   369 sgd_solver.cpp:166] Iteration 13600, lr = 2.04\nI1207 11:11:37.988286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53947 > 2) by scale factor 0.565057\nI1207 11:11:38.928362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18185 > 2) by scale factor 0.628564\nI1207 11:11:39.868523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98885 > 2) by scale factor 0.669153\nI1207 11:11:40.808912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01044 > 2) by scale factor 0.664355\nI1207 11:11:41.748543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00646 > 2) by scale factor 0.665234\nI1207 11:11:42.689108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77179 > 2) by scale factor 0.530252\nI1207 11:11:43.629279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56508 > 2) by scale factor 0.438108\nI1207 11:11:44.570394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53915 > 2) by scale factor 0.565108\nI1207 11:11:45.510639   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5422 > 2) by scale factor 0.440315\nI1207 11:11:46.450855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7199 > 2) by scale factor 0.423738\nI1207 11:11:47.391463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94358 > 2) by scale factor 0.507153\nI1207 11:11:48.331892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95958 > 2) by scale factor 0.40326\nI1207 11:11:49.272500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62302 > 2) by scale factor 0.552026\nI1207 11:11:50.212952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48391 > 2) by scale factor 0.446039\nI1207 11:11:51.153800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37942 > 2) by scale factor 0.84054\nI1207 11:11:52.094079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94531 > 2) by scale factor 0.404424\nI1207 11:11:53.034853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76458 > 2) by scale factor 0.419764\nI1207 11:11:53.975226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23597 > 2) by scale factor 0.894465\nI1207 11:11:54.915874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2548 > 2) by scale factor 0.614478\nI1207 11:11:55.856067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23283 > 2) by scale factor 0.472497\nI1207 11:11:56.796723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44246 > 2) by scale factor 0.450201\nI1207 11:11:57.736950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06453 > 2) by scale factor 0.652628\nI1207 11:11:58.677999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64855 > 2) by scale factor 0.430241\nI1207 11:11:59.618986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45816 > 2) by scale factor 0.448615\nI1207 11:12:00.559623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01575 > 2) by scale factor 0.398744\nI1207 11:12:01.500035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65588 > 2) by scale factor 0.753047\nI1207 11:12:02.440452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02278 > 2) by scale factor 0.661643\nI1207 11:12:03.380924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63093 > 2) by scale factor 0.550822\nI1207 11:12:04.322063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25661 > 2) by scale factor 0.469858\nI1207 11:12:05.262725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30157 > 2) by scale factor 0.605773\nI1207 11:12:06.202822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03207 > 2) by scale factor 0.496023\nI1207 11:12:07.143203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81829 > 2) by scale factor 0.523795\nI1207 11:12:08.084684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21261 > 2) by scale factor 0.383685\nI1207 11:12:09.027381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28783 > 2) by scale factor 0.608303\nI1207 11:12:09.970093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7539 > 2) by scale factor 0.53278\nI1207 11:12:10.913058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59599 > 2) by scale factor 0.556175\nI1207 11:12:11.856019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30352 > 2) by scale factor 0.605416\nI1207 11:12:12.798632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17079 > 2) by scale factor 0.630758\nI1207 11:12:13.741230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00893 > 2) by scale factor 0.664687\nI1207 11:12:14.683827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40873 > 2) by scale factor 0.453645\nI1207 11:12:15.626070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3033 > 2) by scale factor 0.605455\nI1207 11:12:16.568984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81077 > 2) by scale factor 0.524828\nI1207 11:12:17.511191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52748 > 2) by scale factor 0.791301\nI1207 11:12:18.453351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76968 > 2) by scale factor 0.722105\nI1207 11:12:19.397460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2952 > 2) by scale factor 0.465636\nI1207 11:12:20.340880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63153 > 2) by scale factor 0.431823\nI1207 11:12:21.283756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95185 > 2) by scale factor 0.506092\nI1207 11:12:22.226544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19901 > 2) by scale factor 0.476303\nI1207 11:12:23.169631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48234 > 2) by scale factor 0.574327\nI1207 11:12:24.112413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81737 > 2) by scale factor 0.709882\nI1207 11:12:25.054929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68783 > 2) by scale factor 0.542325\nI1207 11:12:25.997695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60601 > 2) by scale factor 0.767458\nI1207 11:12:26.940496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49863 > 2) by scale factor 0.571652\nI1207 11:12:27.882880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07027 > 2) by scale factor 0.651408\nI1207 11:12:28.826443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96164 > 2) by scale factor 0.675302\nI1207 11:12:29.768771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45005 > 2) by scale factor 0.579701\nI1207 11:12:30.711702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.265 > 2) by scale factor 0.883004\nI1207 11:12:31.653731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48792 > 2) by scale factor 0.803884\nI1207 11:12:32.596173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93481 > 2) by scale factor 0.681476\nI1207 11:12:33.538326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57997 > 2) by scale factor 0.775202\nI1207 11:12:34.480609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96998 > 2) by scale factor 0.503781\nI1207 11:12:35.424306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34409 > 2) by scale factor 0.460395\nI1207 11:12:36.367045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01327 > 2) by scale factor 0.663731\nI1207 11:12:37.310693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05716 > 2) by scale factor 0.654202\nI1207 11:12:38.254941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57296 > 2) by scale factor 0.559761\nI1207 11:12:39.197978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70033 > 2) by scale factor 0.540493\nI1207 11:12:40.141010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95274 > 2) by scale factor 0.505978\nI1207 11:12:41.083880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58669 > 2) by scale factor 0.77319\nI1207 11:12:42.027513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39173 > 2) by scale factor 0.836214\nI1207 11:12:42.970767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33593 > 2) by scale factor 0.85619\nI1207 11:12:43.913303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11223 > 2) by scale factor 0.642627\nI1207 11:12:44.856595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59806 > 2) by scale factor 0.769806\nI1207 11:12:45.799062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47993 > 2) by scale factor 0.806475\nI1207 11:12:46.743329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48749 > 2) by scale factor 0.445683\nI1207 11:12:47.687453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33451 > 2) by scale factor 0.599788\nI1207 11:12:48.630048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99152 > 2) by scale factor 0.501062\nI1207 11:12:49.572613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.559 > 2) by scale factor 0.561955\nI1207 11:12:50.516032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61882 > 2) by scale factor 0.763704\nI1207 11:12:51.459372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48669 > 2) by scale factor 0.804282\nI1207 11:12:52.402880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59854 > 2) by scale factor 0.769663\nI1207 11:12:53.346019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95057 > 2) by scale factor 0.506256\nI1207 11:12:54.288321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54753 > 2) by scale factor 0.785074\nI1207 11:12:55.230979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09768 > 2) by scale factor 0.953436\nI1207 11:12:56.173048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9035 > 2) by scale factor 0.688824\nI1207 11:12:57.115651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15401 > 2) by scale factor 0.481463\nI1207 11:12:58.057804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47362 > 2) by scale factor 0.80853\nI1207 11:12:59.000294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70632 > 2) by scale factor 0.73901\nI1207 11:12:59.942721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62305 > 2) by scale factor 0.76247\nI1207 11:13:00.885857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05535 > 2) by scale factor 0.65459\nI1207 11:13:01.828573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30604 > 2) by scale factor 0.604953\nI1207 11:13:02.771505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48026 > 2) by scale factor 0.574669\nI1207 11:13:03.713752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06872 > 2) by scale factor 0.651737\nI1207 11:13:04.656612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17733 > 2) by scale factor 0.629459\nI1207 11:13:05.598968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4336 > 2) by scale factor 0.821827\nI1207 11:13:06.541482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64342 > 2) by scale factor 0.756595\nI1207 11:13:07.483212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77006 > 2) by scale factor 0.530496\nI1207 11:13:08.426280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42172 > 2) by scale factor 0.825859\nI1207 11:13:09.368717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84281 > 2) by scale factor 0.70353\nI1207 11:13:10.311159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05516 > 2) by scale factor 0.493199\nI1207 11:13:11.262943   369 solver.cpp:337] Iteration 13700, Testing net (#0)\nI1207 11:14:04.221266   369 solver.cpp:404]     Test net output #0: accuracy = 0.195\nI1207 11:14:04.221787   369 solver.cpp:404]     Test net output #1: loss = 8.92668 (* 1 = 8.92668 loss)\nI1207 11:14:05.095630   369 solver.cpp:228] Iteration 13700, loss = 9.08158\nI1207 11:14:05.095685   369 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1207 11:14:05.095713   369 solver.cpp:244]     Train net output #1: loss = 9.08158 (* 1 = 9.08158 loss)\nI1207 11:14:05.171382   369 sgd_solver.cpp:166] Iteration 13700, lr = 2.055\nI1207 11:14:05.181581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90444 > 2) by scale factor 0.6886\nI1207 11:14:06.122414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67048 > 2) by scale factor 0.748929\nI1207 11:14:08.001457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44394 > 2) by scale factor 0.58073\nI1207 11:14:08.942021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31702 > 2) by scale factor 0.463282\nI1207 11:14:09.882051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02158 > 2) by scale factor 0.661905\nI1207 11:14:10.822904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77212 > 2) by scale factor 0.721469\nI1207 11:14:11.763751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43333 > 2) by scale factor 0.582524\nI1207 11:14:12.704892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77697 > 2) by scale factor 0.529524\nI1207 11:14:13.645829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60817 > 2) by scale factor 0.554297\nI1207 11:14:14.586730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73738 > 2) by scale factor 0.535134\nI1207 11:14:15.527130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06872 > 2) by scale factor 0.491555\nI1207 11:14:16.467829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96656 > 2) by scale factor 0.504215\nI1207 11:14:17.407934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1734 > 2) by scale factor 0.630239\nI1207 11:14:18.348765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05166 > 2) by scale factor 0.655381\nI1207 11:14:19.289398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19432 > 2) by scale factor 0.626112\nI1207 11:14:20.230043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78792 > 2) by scale factor 0.417718\nI1207 11:14:21.170487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0762 > 2) by scale factor 0.650152\nI1207 11:14:22.111238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12552 > 2) by scale factor 0.484787\nI1207 11:14:23.052280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63396 > 2) by scale factor 0.759314\nI1207 11:14:23.993520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96902 > 2) by scale factor 0.673623\nI1207 11:14:24.933948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10461 > 2) by scale factor 0.644202\nI1207 11:14:25.874445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87295 > 2) by scale factor 0.696148\nI1207 11:14:26.815202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28547 > 2) by scale factor 0.875094\nI1207 11:14:27.755669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35483 > 2) by scale factor 0.849318\nI1207 11:14:28.696529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39726 > 2) by scale factor 0.834287\nI1207 11:14:29.637217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98606 > 2) by scale factor 0.669779\nI1207 11:14:30.577759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75848 > 2) by scale factor 0.420302\nI1207 11:14:31.517897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82187 > 2) by scale factor 0.523304\nI1207 11:14:32.458106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11581 > 2) by scale factor 0.641887\nI1207 11:14:33.398530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87469 > 2) by scale factor 0.695727\nI1207 11:14:34.338584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69379 > 2) by scale factor 0.541449\nI1207 11:14:35.279610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6872 > 2) by scale factor 0.542417\nI1207 11:14:36.220957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75351 > 2) by scale factor 0.726345\nI1207 11:14:37.164078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14008 > 2) by scale factor 0.483082\nI1207 11:14:38.106570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41566 > 2) by scale factor 0.585538\nI1207 11:14:39.049257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45982 > 2) by scale factor 0.578065\nI1207 11:14:39.991910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45664 > 2) by scale factor 0.814122\nI1207 11:14:40.934741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38843 > 2) by scale factor 0.83737\nI1207 11:14:41.877034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71758 > 2) by scale factor 0.423946\nI1207 11:14:42.819931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17332 > 2) by scale factor 0.479235\nI1207 11:14:43.762490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39141 > 2) by scale factor 0.589725\nI1207 11:14:44.705762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64159 > 2) by scale factor 0.549211\nI1207 11:14:45.648924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14957 > 2) by scale factor 0.635008\nI1207 11:14:46.592280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09105 > 2) by scale factor 0.64703\nI1207 11:14:47.535141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.069 > 2) by scale factor 0.966649\nI1207 11:14:48.477830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56692 > 2) by scale factor 0.560708\nI1207 11:14:49.420382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53768 > 2) by scale factor 0.565342\nI1207 11:14:50.362740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39218 > 2) by scale factor 0.589592\nI1207 11:14:51.305115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22162 > 2) by scale factor 0.473751\nI1207 11:14:52.247205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86881 > 2) by scale factor 0.697154\nI1207 11:14:53.190269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40226 > 2) by scale factor 0.587845\nI1207 11:14:54.132601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85898 > 2) by scale factor 0.411609\nI1207 11:14:55.075934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52704 > 2) by scale factor 0.44179\nI1207 11:14:56.019356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26691 > 2) by scale factor 0.612199\nI1207 11:14:56.962168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98911 > 2) by scale factor 0.669096\nI1207 11:14:57.905396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25901 > 2) by scale factor 0.613684\nI1207 11:14:58.848125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28199 > 2) by scale factor 0.876427\nI1207 11:14:59.791067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9835 > 2) by scale factor 0.670353\nI1207 11:15:00.733501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71262 > 2) by scale factor 0.538703\nI1207 11:15:01.675938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18769 > 2) by scale factor 0.627415\nI1207 11:15:02.618800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66484 > 2) by scale factor 0.750513\nI1207 11:15:03.561238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1465 > 2) by scale factor 0.635626\nI1207 11:15:04.503787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59581 > 2) by scale factor 0.556203\nI1207 11:15:05.447033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23656 > 2) by scale factor 0.472081\nI1207 11:15:06.389564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45347 > 2) by scale factor 0.815172\nI1207 11:15:07.332859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42786 > 2) by scale factor 0.583454\nI1207 11:15:08.275413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65592 > 2) by scale factor 0.547058\nI1207 11:15:09.218472   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25285 > 2) by scale factor 0.614845\nI1207 11:15:10.161681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2831 > 2) by scale factor 0.876\nI1207 11:15:11.104813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09732 > 2) by scale factor 0.645719\nI1207 11:15:12.048004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60584 > 2) by scale factor 0.767506\nI1207 11:15:12.990936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06297 > 2) by scale factor 0.49225\nI1207 11:15:13.932780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36039 > 2) by scale factor 0.847317\nI1207 11:15:14.875449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18767 > 2) by scale factor 0.627418\nI1207 11:15:15.817947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1447 > 2) by scale factor 0.482544\nI1207 11:15:16.760644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65102 > 2) by scale factor 0.754427\nI1207 11:15:17.703369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71147 > 2) by scale factor 0.737607\nI1207 11:15:18.645834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89128 > 2) by scale factor 0.51397\nI1207 11:15:19.588268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26092 > 2) by scale factor 0.469383\nI1207 11:15:20.530858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64486 > 2) by scale factor 0.430583\nI1207 11:15:21.472901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19056 > 2) by scale factor 0.626849\nI1207 11:15:22.415132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68057 > 2) by scale factor 0.746109\nI1207 11:15:23.357954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01287 > 2) by scale factor 0.993608\nI1207 11:15:24.301257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59027 > 2) by scale factor 0.557061\nI1207 11:15:25.243919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24589 > 2) by scale factor 0.890515\nI1207 11:15:26.186089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71015 > 2) by scale factor 0.539062\nI1207 11:15:27.129246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37441 > 2) by scale factor 0.592695\nI1207 11:15:28.072540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54824 > 2) by scale factor 0.563659\nI1207 11:15:29.013556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4302 > 2) by scale factor 0.583057\nI1207 11:15:29.954883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95424 > 2) by scale factor 0.676994\nI1207 11:15:31.839133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15561 > 2) by scale factor 0.633791\nI1207 11:15:32.782783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28891 > 2) by scale factor 0.608105\nI1207 11:15:33.726080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39667 > 2) by scale factor 0.834492\nI1207 11:15:34.670323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72608 > 2) by scale factor 0.733653\nI1207 11:15:35.614187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75936 > 2) by scale factor 0.724807\nI1207 11:15:36.557844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49438 > 2) by scale factor 0.572347\nI1207 11:15:37.500759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85897 > 2) by scale factor 0.699553\nI1207 11:15:38.454169   369 solver.cpp:337] Iteration 13800, Testing net (#0)\nI1207 11:16:31.402961   369 solver.cpp:404]     Test net output #0: accuracy = 0.17485\nI1207 11:16:31.403466   369 solver.cpp:404]     Test net output #1: loss = 15.4524 (* 1 = 15.4524 loss)\nI1207 11:16:32.277017   369 solver.cpp:228] Iteration 13800, loss = 15.4265\nI1207 11:16:32.277070   369 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 11:16:32.277096   369 solver.cpp:244]     Train net output #1: loss = 15.4265 (* 1 = 15.4265 loss)\nI1207 11:16:32.348510   369 sgd_solver.cpp:166] Iteration 13800, lr = 2.07\nI1207 11:16:32.358656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99086 > 2) by scale factor 0.501145\nI1207 11:16:33.298971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60964 > 2) by scale factor 0.554072\nI1207 11:16:34.239150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86188 > 2) by scale factor 0.517883\nI1207 11:16:35.180120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13454 > 2) by scale factor 0.638052\nI1207 11:16:36.120270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68909 > 2) by scale factor 0.743746\nI1207 11:16:37.060740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63067 > 2) by scale factor 0.550862\nI1207 11:16:38.001065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93747 > 2) by scale factor 0.680859\nI1207 11:16:38.941831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25468 > 2) by scale factor 0.47007\nI1207 11:16:39.882166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93061 > 2) by scale factor 0.508827\nI1207 11:16:40.822612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50669 > 2) by scale factor 0.570339\nI1207 11:16:41.762456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50197 > 2) by scale factor 0.44425\nI1207 11:16:42.702747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93156 > 2) by scale factor 0.508704\nI1207 11:16:43.642911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13285 > 2) by scale factor 0.638396\nI1207 11:16:44.583827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31646 > 2) by scale factor 0.863387\nI1207 11:16:45.523557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56798 > 2) by scale factor 0.778823\nI1207 11:16:46.463937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46421 > 2) by scale factor 0.577332\nI1207 11:16:47.404315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36143 > 2) by scale factor 0.846944\nI1207 11:16:48.344735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44227 > 2) by scale factor 0.581012\nI1207 11:16:49.285359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94971 > 2) by scale factor 0.678033\nI1207 11:16:50.225723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81241 > 2) by scale factor 0.711134\nI1207 11:16:51.166471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89043 > 2) by scale factor 0.691938\nI1207 11:16:52.106501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8464 > 2) by scale factor 0.702642\nI1207 11:16:53.047160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68158 > 2) by scale factor 0.745828\nI1207 11:16:53.987555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6673 > 2) by scale factor 0.428513\nI1207 11:16:54.927914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45932 > 2) by scale factor 0.813232\nI1207 11:16:56.807124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45642 > 2) by scale factor 0.578633\nI1207 11:16:57.747316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77185 > 2) by scale factor 0.530244\nI1207 11:16:58.688017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47401 > 2) by scale factor 0.575704\nI1207 11:16:59.629227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98733 > 2) by scale factor 0.669494\nI1207 11:17:00.570088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43941 > 2) by scale factor 0.581495\nI1207 11:17:01.510036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80821 > 2) by scale factor 0.712197\nI1207 11:17:02.451380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63246 > 2) by scale factor 0.759745\nI1207 11:17:03.394227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44697 > 2) by scale factor 0.817337\nI1207 11:17:04.337406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78702 > 2) by scale factor 0.717614\nI1207 11:17:05.280679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61897 > 2) by scale factor 0.763658\nI1207 11:17:06.223852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46726 > 2) by scale factor 0.810614\nI1207 11:17:07.167222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25206 > 2) by scale factor 0.614995\nI1207 11:17:08.110543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87937 > 2) by scale factor 0.515548\nI1207 11:17:09.994868   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59605 > 2) by scale factor 0.556165\nI1207 11:17:10.938117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05972 > 2) by scale factor 0.492645\nI1207 11:17:11.881907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58598 > 2) by scale factor 0.557727\nI1207 11:17:12.824996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00187 > 2) by scale factor 0.499766\nI1207 11:17:13.768008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86917 > 2) by scale factor 0.697065\nI1207 11:17:14.711585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29835 > 2) by scale factor 0.870189\nI1207 11:17:15.655292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7097 > 2) by scale factor 0.73809\nI1207 11:17:16.599181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79935 > 2) by scale factor 0.714451\nI1207 11:17:17.542819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.677 > 2) by scale factor 0.543922\nI1207 11:17:18.485447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45667 > 2) by scale factor 0.448766\nI1207 11:17:19.428227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17356 > 2) by scale factor 0.479207\nI1207 11:17:20.371013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63773 > 2) by scale factor 0.758228\nI1207 11:17:21.314343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87349 > 2) by scale factor 0.51633\nI1207 11:17:22.257345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38411 > 2) by scale factor 0.456193\nI1207 11:17:23.200938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16392 > 2) by scale factor 0.480316\nI1207 11:17:24.143451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07954 > 2) by scale factor 0.490252\nI1207 11:17:25.086331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32411 > 2) by scale factor 0.860545\nI1207 11:17:26.029223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21978 > 2) by scale factor 0.621161\nI1207 11:17:27.913143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20733 > 2) by scale factor 0.906071\nI1207 11:17:28.856590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72185 > 2) by scale factor 0.734794\nI1207 11:17:29.799268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42418 > 2) by scale factor 0.825022\nI1207 11:17:30.741952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5686 > 2) by scale factor 0.778633\nI1207 11:17:31.684530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65637 > 2) by scale factor 0.752907\nI1207 11:17:32.628154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87242 > 2) by scale factor 0.410473\nI1207 11:17:33.570616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10401 > 2) by scale factor 0.487328\nI1207 11:17:34.513479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94343 > 2) by scale factor 0.507172\nI1207 11:17:35.456846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01776 > 2) by scale factor 0.398585\nI1207 11:17:36.398949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66942 > 2) by scale factor 0.545046\nI1207 11:17:37.341648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70077 > 2) by scale factor 0.740528\nI1207 11:17:38.284404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80465 > 2) by scale factor 0.525672\nI1207 11:17:39.227394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11271 > 2) by scale factor 0.486298\nI1207 11:17:40.170282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94572 > 2) by scale factor 0.40439\nI1207 11:17:41.113818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57289 > 2) by scale factor 0.559771\nI1207 11:17:42.056339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.86902 > 2) by scale factor 0.41076\nI1207 11:17:42.999404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20524 > 2) by scale factor 0.623977\nI1207 11:17:43.941797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60398 > 2) by scale factor 0.768054\nI1207 11:17:44.884081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35123 > 2) by scale factor 0.596796\nI1207 11:17:45.827373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90156 > 2) by scale factor 0.689285\nI1207 11:17:46.770735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39984 > 2) by scale factor 0.588262\nI1207 11:17:47.714284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38644 > 2) by scale factor 0.590591\nI1207 11:17:48.657171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54322 > 2) by scale factor 0.786406\nI1207 11:17:49.599719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38992 > 2) by scale factor 0.589984\nI1207 11:17:50.541975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41899 > 2) by scale factor 0.452592\nI1207 11:17:51.484412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93037 > 2) by scale factor 0.682507\nI1207 11:17:52.427312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94031 > 2) by scale factor 0.680199\nI1207 11:17:53.370754   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09978 > 2) by scale factor 0.952481\nI1207 11:17:54.312906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17857 > 2) by scale factor 0.629215\nI1207 11:17:55.255518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37342 > 2) by scale factor 0.842666\nI1207 11:17:56.198082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65045 > 2) by scale factor 0.75459\nI1207 11:17:57.141392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15618 > 2) by scale factor 0.481211\nI1207 11:17:58.083628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20145 > 2) by scale factor 0.476027\nI1207 11:17:59.026967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52583 > 2) by scale factor 0.441908\nI1207 11:17:59.970288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37928 > 2) by scale factor 0.591843\nI1207 11:18:00.913148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98025 > 2) by scale factor 0.502481\nI1207 11:18:01.855909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29196 > 2) by scale factor 0.465988\nI1207 11:18:02.799541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24296 > 2) by scale factor 0.61672\nI1207 11:18:03.742444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9807 > 2) by scale factor 0.502424\nI1207 11:18:04.685514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.211 > 2) by scale factor 0.622858\nI1207 11:18:05.628947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51837 > 2) by scale factor 0.794164\nI1207 11:18:05.640898   369 solver.cpp:337] Iteration 13900, Testing net (#0)\nI1207 11:18:58.611194   369 solver.cpp:404]     Test net output #0: accuracy = 0.16465\nI1207 11:18:58.611786   369 solver.cpp:404]     Test net output #1: loss = 13.8685 (* 1 = 13.8685 loss)\nI1207 11:18:59.485584   369 solver.cpp:228] Iteration 13900, loss = 12.9093\nI1207 11:18:59.485636   369 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1207 11:18:59.485662   369 solver.cpp:244]     Train net output #1: loss = 12.9093 (* 1 = 12.9093 loss)\nI1207 11:18:59.557796   369 sgd_solver.cpp:166] Iteration 13900, lr = 2.085\nI1207 11:18:59.567979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56522 > 2) by scale factor 0.560975\nI1207 11:19:00.508659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08195 > 2) by scale factor 0.648939\nI1207 11:19:01.448806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31016 > 2) by scale factor 0.865742\nI1207 11:19:02.389874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1684 > 2) by scale factor 0.479801\nI1207 11:19:03.330806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57777 > 2) by scale factor 0.559008\nI1207 11:19:04.271591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05704 > 2) by scale factor 0.49297\nI1207 11:19:05.212205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49428 > 2) by scale factor 0.801834\nI1207 11:19:06.152606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08349 > 2) by scale factor 0.648617\nI1207 11:19:07.093179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57639 > 2) by scale factor 0.776279\nI1207 11:19:08.033411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73928 > 2) by scale factor 0.534862\nI1207 11:19:08.973799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13217 > 2) by scale factor 0.638534\nI1207 11:19:09.913976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84503 > 2) by scale factor 0.520152\nI1207 11:19:10.854904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60155 > 2) by scale factor 0.768773\nI1207 11:19:11.795951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29282 > 2) by scale factor 0.607382\nI1207 11:19:12.736788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37058 > 2) by scale factor 0.843677\nI1207 11:19:13.677343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40895 > 2) by scale factor 0.830237\nI1207 11:19:14.617986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40138 > 2) by scale factor 0.832856\nI1207 11:19:15.558305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08838 > 2) by scale factor 0.957678\nI1207 11:19:16.498939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30631 > 2) by scale factor 0.867187\nI1207 11:19:17.439680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45642 > 2) by scale factor 0.578633\nI1207 11:19:18.380092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32023 > 2) by scale factor 0.861983\nI1207 11:19:19.320976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3705 > 2) by scale factor 0.843705\nI1207 11:19:20.261198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61827 > 2) by scale factor 0.763863\nI1207 11:19:21.202087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81243 > 2) by scale factor 0.5246\nI1207 11:19:22.142163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59019 > 2) by scale factor 0.557074\nI1207 11:19:23.083132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35073 > 2) by scale factor 0.596885\nI1207 11:19:24.024406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22321 > 2) by scale factor 0.620499\nI1207 11:19:24.965337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81596 > 2) by scale factor 0.710237\nI1207 11:19:25.905977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48269 > 2) by scale factor 0.805578\nI1207 11:19:26.846395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77892 > 2) by scale factor 0.719705\nI1207 11:19:27.786335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40798 > 2) by scale factor 0.586857\nI1207 11:19:28.727062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70962 > 2) by scale factor 0.539139\nI1207 11:19:29.668716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6198 > 2) by scale factor 0.552517\nI1207 11:19:30.609076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12334 > 2) by scale factor 0.485044\nI1207 11:19:31.551757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05728 > 2) by scale factor 0.654176\nI1207 11:19:32.494648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41074 > 2) by scale factor 0.586382\nI1207 11:19:33.437597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27659 > 2) by scale factor 0.610391\nI1207 11:19:34.380795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32664 > 2) by scale factor 0.601207\nI1207 11:19:35.324107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94451 > 2) by scale factor 0.679229\nI1207 11:19:36.267573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09468 > 2) by scale factor 0.64627\nI1207 11:19:37.210434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66319 > 2) by scale factor 0.75098\nI1207 11:19:38.153112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27989 > 2) by scale factor 0.609777\nI1207 11:19:39.096510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97532 > 2) by scale factor 0.672197\nI1207 11:19:40.040159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75941 > 2) by scale factor 0.724794\nI1207 11:19:40.983323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52321 > 2) by scale factor 0.792642\nI1207 11:19:41.926343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4455 > 2) by scale factor 0.580468\nI1207 11:19:42.869066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0453 > 2) by scale factor 0.656749\nI1207 11:19:43.812423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6903 > 2) by scale factor 0.541961\nI1207 11:19:44.756269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35954 > 2) by scale factor 0.595319\nI1207 11:19:45.699062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84467 > 2) by scale factor 0.520201\nI1207 11:19:46.642704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04765 > 2) by scale factor 0.656244\nI1207 11:19:47.586473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67258 > 2) by scale factor 0.74834\nI1207 11:19:48.529395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65748 > 2) by scale factor 0.429417\nI1207 11:19:49.471853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31985 > 2) by scale factor 0.602436\nI1207 11:19:50.415096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76624 > 2) by scale factor 0.723004\nI1207 11:19:51.358001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22285 > 2) by scale factor 0.620569\nI1207 11:19:52.301245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61537 > 2) by scale factor 0.433335\nI1207 11:19:53.244323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0668 > 2) by scale factor 0.652146\nI1207 11:19:54.187417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17717 > 2) by scale factor 0.478793\nI1207 11:19:55.130820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83738 > 2) by scale factor 0.521188\nI1207 11:19:56.074133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82462 > 2) by scale factor 0.522928\nI1207 11:19:57.017740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64869 > 2) by scale factor 0.75509\nI1207 11:19:57.961103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49468 > 2) by scale factor 0.801708\nI1207 11:19:58.904572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29847 > 2) by scale factor 0.606342\nI1207 11:19:59.848754   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.328 > 2) by scale factor 0.462107\nI1207 11:20:00.792368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40712 > 2) by scale factor 0.453811\nI1207 11:20:01.736135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53683 > 2) by scale factor 0.440837\nI1207 11:20:02.679366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65141 > 2) by scale factor 0.547734\nI1207 11:20:03.622112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02223 > 2) by scale factor 0.497237\nI1207 11:20:04.565764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54564 > 2) by scale factor 0.785656\nI1207 11:20:05.508630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5282 > 2) by scale factor 0.791076\nI1207 11:20:06.452121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74649 > 2) by scale factor 0.728201\nI1207 11:20:07.395772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.812 > 2) by scale factor 0.711239\nI1207 11:20:08.339468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23666 > 2) by scale factor 0.47207\nI1207 11:20:09.283429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76193 > 2) by scale factor 0.72413\nI1207 11:20:10.226580   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08934 > 2) by scale factor 0.647388\nI1207 11:20:11.170680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41293 > 2) by scale factor 0.586006\nI1207 11:20:12.114060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56152 > 2) by scale factor 0.561558\nI1207 11:20:13.057773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49163 > 2) by scale factor 0.572798\nI1207 11:20:14.001229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0535 > 2) by scale factor 0.654985\nI1207 11:20:14.945055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8405 > 2) by scale factor 0.520765\nI1207 11:20:15.888523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30518 > 2) by scale factor 0.464556\nI1207 11:20:16.833322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78314 > 2) by scale factor 0.528661\nI1207 11:20:17.776692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85579 > 2) by scale factor 0.700332\nI1207 11:20:18.720402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4971 > 2) by scale factor 0.571903\nI1207 11:20:19.663682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76284 > 2) by scale factor 0.531513\nI1207 11:20:20.607332   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61871 > 2) by scale factor 0.763735\nI1207 11:20:23.432451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28431 > 2) by scale factor 0.608957\nI1207 11:20:25.317030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84192 > 2) by scale factor 0.703749\nI1207 11:20:26.261222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45026 > 2) by scale factor 0.579667\nI1207 11:20:27.204785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53389 > 2) by scale factor 0.565949\nI1207 11:20:28.148025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69406 > 2) by scale factor 0.54141\nI1207 11:20:29.092312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53255 > 2) by scale factor 0.441253\nI1207 11:20:30.037071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5244 > 2) by scale factor 0.567472\nI1207 11:20:30.980696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29963 > 2) by scale factor 0.869705\nI1207 11:20:31.924446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96909 > 2) by scale factor 0.673607\nI1207 11:20:32.868259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14975 > 2) by scale factor 0.481957\nI1207 11:20:32.880246   369 solver.cpp:337] Iteration 14000, Testing net (#0)\nI1207 11:21:25.895221   369 solver.cpp:404]     Test net output #0: accuracy = 0.17715\nI1207 11:21:25.895751   369 solver.cpp:404]     Test net output #1: loss = 17.1075 (* 1 = 17.1075 loss)\nI1207 11:21:26.769613   369 solver.cpp:228] Iteration 14000, loss = 15.0364\nI1207 11:21:26.769659   369 solver.cpp:244]     Train net output #0: accuracy = 0.27\nI1207 11:21:26.769676   369 solver.cpp:244]     Train net output #1: loss = 15.0364 (* 1 = 15.0364 loss)\nI1207 11:21:26.842644   369 sgd_solver.cpp:166] Iteration 14000, lr = 2.1\nI1207 11:21:26.852465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.272 > 2) by scale factor 0.611246\nI1207 11:21:27.791254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52266 > 2) by scale factor 0.567752\nI1207 11:21:28.731195   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29344 > 2) by scale factor 0.872051\nI1207 11:21:29.670843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14308 > 2) by scale factor 0.636319\nI1207 11:21:30.610296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25287 > 2) by scale factor 0.887755\nI1207 11:21:31.549341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76609 > 2) by scale factor 0.723042\nI1207 11:21:32.488234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27772 > 2) by scale factor 0.467539\nI1207 11:21:33.426842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18904 > 2) by scale factor 0.913644\nI1207 11:21:34.366649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00704 > 2) by scale factor 0.665107\nI1207 11:21:35.306493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16763 > 2) by scale factor 0.479889\nI1207 11:21:36.245658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56956 > 2) by scale factor 0.778345\nI1207 11:21:37.184947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14883 > 2) by scale factor 0.635157\nI1207 11:21:38.124212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90318 > 2) by scale factor 0.512403\nI1207 11:21:39.063844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55419 > 2) by scale factor 0.562716\nI1207 11:21:40.003296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59335 > 2) by scale factor 0.771202\nI1207 11:21:40.942566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23991 > 2) by scale factor 0.617302\nI1207 11:21:41.882936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35607 > 2) by scale factor 0.459129\nI1207 11:21:42.822388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97347 > 2) by scale factor 0.672615\nI1207 11:21:43.761648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69519 > 2) by scale factor 0.425968\nI1207 11:21:44.701201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5889 > 2) by scale factor 0.77253\nI1207 11:21:45.640707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40082 > 2) by scale factor 0.83305\nI1207 11:21:46.580767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96054 > 2) by scale factor 0.675552\nI1207 11:21:47.520035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.143 > 2) by scale factor 0.636335\nI1207 11:21:48.459231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2886 > 2) by scale factor 0.466353\nI1207 11:21:49.398625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78127 > 2) by scale factor 0.719095\nI1207 11:21:51.274632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96627 > 2) by scale factor 0.674248\nI1207 11:21:52.213973   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25019 > 2) by scale factor 0.470567\nI1207 11:21:53.154306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65474 > 2) by scale factor 0.429669\nI1207 11:21:54.093672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32602 > 2) by scale factor 0.859838\nI1207 11:21:55.033839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28236 > 2) by scale factor 0.876287\nI1207 11:21:55.973707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71838 > 2) by scale factor 0.735733\nI1207 11:21:56.913933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77833 > 2) by scale factor 0.529334\nI1207 11:21:57.855057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15414 > 2) by scale factor 0.634088\nI1207 11:21:58.798837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41495 > 2) by scale factor 0.58566\nI1207 11:21:59.742503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79107 > 2) by scale factor 0.417443\nI1207 11:22:00.685609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65105 > 2) by scale factor 0.754417\nI1207 11:22:01.627940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07255 > 2) by scale factor 0.491092\nI1207 11:22:02.581876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33762 > 2) by scale factor 0.461083\nI1207 11:22:03.533195   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69242 > 2) by scale factor 0.426219\nI1207 11:22:04.476130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63303 > 2) by scale factor 0.431683\nI1207 11:22:05.417910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.31571 > 2) by scale factor 0.376243\nI1207 11:22:06.359828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0604 > 2) by scale factor 0.492562\nI1207 11:22:07.301447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.37837 > 2) by scale factor 0.37186\nI1207 11:22:08.244015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81608 > 2) by scale factor 0.710206\nI1207 11:22:09.186446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45798 > 2) by scale factor 0.578373\nI1207 11:22:10.129031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25397 > 2) by scale factor 0.614634\nI1207 11:22:11.071379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62223 > 2) by scale factor 0.762709\nI1207 11:22:12.014143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10032 > 2) by scale factor 0.645095\nI1207 11:22:12.957469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86726 > 2) by scale factor 0.517162\nI1207 11:22:13.899827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67575 > 2) by scale factor 0.427739\nI1207 11:22:14.841953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53602 > 2) by scale factor 0.788637\nI1207 11:22:15.783380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99694 > 2) by scale factor 0.667346\nI1207 11:22:16.726019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62617 > 2) by scale factor 0.761565\nI1207 11:22:17.668148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21359 > 2) by scale factor 0.622357\nI1207 11:22:18.609910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87117 > 2) by scale factor 0.69658\nI1207 11:22:19.551863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47998 > 2) by scale factor 0.806457\nI1207 11:22:20.493532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77169 > 2) by scale factor 0.72158\nI1207 11:22:21.435549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74206 > 2) by scale factor 0.534465\nI1207 11:22:22.377727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46981 > 2) by scale factor 0.809778\nI1207 11:22:23.320302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23781 > 2) by scale factor 0.471942\nI1207 11:22:24.262382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21869 > 2) by scale factor 0.474081\nI1207 11:22:25.204533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4221 > 2) by scale factor 0.452274\nI1207 11:22:26.147225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54627 > 2) by scale factor 0.785463\nI1207 11:22:27.090348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28626 > 2) by scale factor 0.466608\nI1207 11:22:28.032172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.441 > 2) by scale factor 0.581226\nI1207 11:22:28.974423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17877 > 2) by scale factor 0.917949\nI1207 11:22:29.916774   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55692 > 2) by scale factor 0.562284\nI1207 11:22:30.858361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67295 > 2) by scale factor 0.544522\nI1207 11:22:31.801064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9392 > 2) by scale factor 0.680458\nI1207 11:22:32.743232   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41235 > 2) by scale factor 0.829069\nI1207 11:22:33.684993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0415 > 2) by scale factor 0.494866\nI1207 11:22:34.627830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24077 > 2) by scale factor 0.617138\nI1207 11:22:35.570382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96441 > 2) by scale factor 0.504489\nI1207 11:22:36.512967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0474 > 2) by scale factor 0.494144\nI1207 11:22:37.455399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52285 > 2) by scale factor 0.792755\nI1207 11:22:38.398155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25025 > 2) by scale factor 0.888791\nI1207 11:22:39.340847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37472 > 2) by scale factor 0.842203\nI1207 11:22:40.283602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62422 > 2) by scale factor 0.551842\nI1207 11:22:41.225961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80356 > 2) by scale factor 0.713378\nI1207 11:22:43.108043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62274 > 2) by scale factor 0.552068\nI1207 11:22:44.050037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26355 > 2) by scale factor 0.61283\nI1207 11:22:45.934136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52207 > 2) by scale factor 0.792999\nI1207 11:22:46.877837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89845 > 2) by scale factor 0.690024\nI1207 11:22:47.821148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61161 > 2) by scale factor 0.765812\nI1207 11:22:49.704346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52235 > 2) by scale factor 0.792912\nI1207 11:22:50.646708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60279 > 2) by scale factor 0.768406\nI1207 11:22:51.588536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59167 > 2) by scale factor 0.556845\nI1207 11:22:52.531414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18685 > 2) by scale factor 0.477686\nI1207 11:22:53.473848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89277 > 2) by scale factor 0.691379\nI1207 11:22:54.416322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07644 > 2) by scale factor 0.650103\nI1207 11:22:55.359133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06899 > 2) by scale factor 0.966655\nI1207 11:22:56.301741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93972 > 2) by scale factor 0.50765\nI1207 11:22:57.245020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34051 > 2) by scale factor 0.598711\nI1207 11:22:58.187386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16228 > 2) by scale factor 0.632454\nI1207 11:23:00.068996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74651 > 2) by scale factor 0.728197\nI1207 11:23:00.080900   369 solver.cpp:337] Iteration 14100, Testing net (#0)\nI1207 11:23:53.072572   369 solver.cpp:404]     Test net output #0: accuracy = 0.2117\nI1207 11:23:53.073096   369 solver.cpp:404]     Test net output #1: loss = 11.38 (* 1 = 11.38 loss)\nI1207 11:23:53.946135   369 solver.cpp:228] Iteration 14100, loss = 11.4927\nI1207 11:23:53.946182   369 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1207 11:23:53.946198   369 solver.cpp:244]     Train net output #1: loss = 11.4927 (* 1 = 11.4927 loss)\nI1207 11:23:54.022843   369 sgd_solver.cpp:166] Iteration 14100, lr = 2.115\nI1207 11:23:54.033011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68368 > 2) by scale factor 0.745244\nI1207 11:23:54.973203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50779 > 2) by scale factor 0.570159\nI1207 11:23:55.913410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35489 > 2) by scale factor 0.596145\nI1207 11:23:56.853606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26699 > 2) by scale factor 0.882227\nI1207 11:23:57.793968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88718 > 2) by scale factor 0.692716\nI1207 11:23:58.733728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44171 > 2) by scale factor 0.819098\nI1207 11:23:59.673825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96054 > 2) by scale factor 0.675553\nI1207 11:24:00.614069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7458 > 2) by scale factor 0.728386\nI1207 11:24:01.553747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93137 > 2) by scale factor 0.508728\nI1207 11:24:02.494098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68996 > 2) by scale factor 0.542012\nI1207 11:24:03.434149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83967 > 2) by scale factor 0.704308\nI1207 11:24:04.374333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82153 > 2) by scale factor 0.523351\nI1207 11:24:05.314452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16676 > 2) by scale factor 0.63156\nI1207 11:24:07.192183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34706 > 2) by scale factor 0.597539\nI1207 11:24:08.132536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34218 > 2) by scale factor 0.598412\nI1207 11:24:09.072257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60468 > 2) by scale factor 0.554834\nI1207 11:24:10.011816   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32127 > 2) by scale factor 0.462827\nI1207 11:24:10.951671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60332 > 2) by scale factor 0.555044\nI1207 11:24:11.891947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46789 > 2) by scale factor 0.57672\nI1207 11:24:12.832200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27776 > 2) by scale factor 0.610173\nI1207 11:24:13.772692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05885 > 2) by scale factor 0.49275\nI1207 11:24:14.712764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80346 > 2) by scale factor 0.713403\nI1207 11:24:15.652578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05831 > 2) by scale factor 0.492815\nI1207 11:24:16.592556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37961 > 2) by scale factor 0.456661\nI1207 11:24:17.532621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50433 > 2) by scale factor 0.570723\nI1207 11:24:18.472425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56529 > 2) by scale factor 0.77964\nI1207 11:24:19.411499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3552 > 2) by scale factor 0.59609\nI1207 11:24:20.351372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2115 > 2) by scale factor 0.47489\nI1207 11:24:21.291080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85547 > 2) by scale factor 0.700409\nI1207 11:24:22.230340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70027 > 2) by scale factor 0.540501\nI1207 11:24:23.170617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23732 > 2) by scale factor 0.893925\nI1207 11:24:24.111238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3013 > 2) by scale factor 0.605823\nI1207 11:24:25.051283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52509 > 2) by scale factor 0.792051\nI1207 11:24:25.993333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7042 > 2) by scale factor 0.539927\nI1207 11:24:26.935917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76156 > 2) by scale factor 0.724227\nI1207 11:24:27.878548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92741 > 2) by scale factor 0.683197\nI1207 11:24:28.820624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97965 > 2) by scale factor 0.671219\nI1207 11:24:29.761983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87322 > 2) by scale factor 0.516366\nI1207 11:24:30.704913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38377 > 2) by scale factor 0.591057\nI1207 11:24:31.647331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73731 > 2) by scale factor 0.535145\nI1207 11:24:32.589613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27354 > 2) by scale factor 0.467997\nI1207 11:24:33.531960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0912 > 2) by scale factor 0.646999\nI1207 11:24:34.474438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19377 > 2) by scale factor 0.62622\nI1207 11:24:35.416599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50719 > 2) by scale factor 0.570257\nI1207 11:24:36.358283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22564 > 2) by scale factor 0.898618\nI1207 11:24:37.300246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55715 > 2) by scale factor 0.782121\nI1207 11:24:38.241863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38471 > 2) by scale factor 0.838678\nI1207 11:24:39.184341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6905 > 2) by scale factor 0.426394\nI1207 11:24:40.126817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62786 > 2) by scale factor 0.551289\nI1207 11:24:41.070322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51262 > 2) by scale factor 0.443201\nI1207 11:24:42.013972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90422 > 2) by scale factor 0.512266\nI1207 11:24:42.957247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64797 > 2) by scale factor 0.548249\nI1207 11:24:43.900665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42803 > 2) by scale factor 0.823713\nI1207 11:24:44.844034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5087 > 2) by scale factor 0.570011\nI1207 11:24:45.787529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48966 > 2) by scale factor 0.573122\nI1207 11:24:46.730849   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50714 > 2) by scale factor 0.797721\nI1207 11:24:47.674573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63429 > 2) by scale factor 0.550313\nI1207 11:24:48.618062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07843 > 2) by scale factor 0.490385\nI1207 11:24:49.561425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.26673 > 2) by scale factor 0.379742\nI1207 11:24:50.504854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5051 > 2) by scale factor 0.570597\nI1207 11:24:51.448374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16921 > 2) by scale factor 0.631073\nI1207 11:24:52.391993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48799 > 2) by scale factor 0.573396\nI1207 11:24:53.335881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98594 > 2) by scale factor 0.501764\nI1207 11:24:54.280031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51381 > 2) by scale factor 0.443085\nI1207 11:24:55.223260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74813 > 2) by scale factor 0.727767\nI1207 11:24:56.166172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.042 > 2) by scale factor 0.657462\nI1207 11:24:57.109069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66921 > 2) by scale factor 0.545076\nI1207 11:24:58.051854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29641 > 2) by scale factor 0.606721\nI1207 11:24:58.995429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47092 > 2) by scale factor 0.576217\nI1207 11:24:59.938570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56056 > 2) by scale factor 0.438543\nI1207 11:25:00.880808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41728 > 2) by scale factor 0.58526\nI1207 11:25:01.823982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81101 > 2) by scale factor 0.711488\nI1207 11:25:02.767349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58357 > 2) by scale factor 0.436341\nI1207 11:25:03.710325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10076 > 2) by scale factor 0.645003\nI1207 11:25:04.653316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70826 > 2) by scale factor 0.738481\nI1207 11:25:05.596138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61317 > 2) by scale factor 0.433541\nI1207 11:25:06.539927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79419 > 2) by scale factor 0.417172\nI1207 11:25:07.482959   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00229 > 2) by scale factor 0.499714\nI1207 11:25:08.426450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23609 > 2) by scale factor 0.618029\nI1207 11:25:09.369870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94764 > 2) by scale factor 0.404233\nI1207 11:25:10.312611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85242 > 2) by scale factor 0.412166\nI1207 11:25:11.255342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21014 > 2) by scale factor 0.623025\nI1207 11:25:12.197937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94938 > 2) by scale factor 0.678108\nI1207 11:25:13.141033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39573 > 2) by scale factor 0.588975\nI1207 11:25:14.084074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36641 > 2) by scale factor 0.594104\nI1207 11:25:15.026957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7006 > 2) by scale factor 0.540453\nI1207 11:25:15.969292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67925 > 2) by scale factor 0.427419\nI1207 11:25:16.912451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82417 > 2) by scale factor 0.52299\nI1207 11:25:17.856065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90816 > 2) by scale factor 0.51175\nI1207 11:25:18.800300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32733 > 2) by scale factor 0.601082\nI1207 11:25:19.742548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82578 > 2) by scale factor 0.522769\nI1207 11:25:20.685580   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67902 > 2) by scale factor 0.746542\nI1207 11:25:21.627790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72306 > 2) by scale factor 0.537193\nI1207 11:25:22.570443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68775 > 2) by scale factor 0.744117\nI1207 11:25:23.513101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31532 > 2) by scale factor 0.60326\nI1207 11:25:24.456353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95097 > 2) by scale factor 0.677744\nI1207 11:25:25.400904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69976 > 2) by scale factor 0.740805\nI1207 11:25:26.343423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78209 > 2) by scale factor 0.718885\nI1207 11:25:27.285255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05575 > 2) by scale factor 0.654504\nI1207 11:25:27.296532   369 solver.cpp:337] Iteration 14200, Testing net (#0)\nI1207 11:26:20.279569   369 solver.cpp:404]     Test net output #0: accuracy = 0.15465\nI1207 11:26:20.280112   369 solver.cpp:404]     Test net output #1: loss = 14.703 (* 1 = 14.703 loss)\nI1207 11:26:21.154237   369 solver.cpp:228] Iteration 14200, loss = 14.1729\nI1207 11:26:21.154287   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 11:26:21.154306   369 solver.cpp:244]     Train net output #1: loss = 14.1729 (* 1 = 14.1729 loss)\nI1207 11:26:21.232611   369 sgd_solver.cpp:166] Iteration 14200, lr = 2.13\nI1207 11:26:21.242764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33684 > 2) by scale factor 0.599369\nI1207 11:26:22.183410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21663 > 2) by scale factor 0.902271\nI1207 11:26:23.123847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62794 > 2) by scale factor 0.551278\nI1207 11:26:24.063887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0234 > 2) by scale factor 0.661508\nI1207 11:26:25.004462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60414 > 2) by scale factor 0.768009\nI1207 11:26:25.944782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6047 > 2) by scale factor 0.767843\nI1207 11:26:26.885924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31982 > 2) by scale factor 0.602442\nI1207 11:26:27.826099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78265 > 2) by scale factor 0.52873\nI1207 11:26:28.767050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12802 > 2) by scale factor 0.484494\nI1207 11:26:29.707975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4204 > 2) by scale factor 0.826311\nI1207 11:26:31.586035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87879 > 2) by scale factor 0.694737\nI1207 11:26:32.526633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64216 > 2) by scale factor 0.756958\nI1207 11:26:33.467092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04944 > 2) by scale factor 0.655858\nI1207 11:26:34.407711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81543 > 2) by scale factor 0.710371\nI1207 11:26:35.348227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39371 > 2) by scale factor 0.589326\nI1207 11:26:36.288034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23787 > 2) by scale factor 0.471935\nI1207 11:26:37.228396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01657 > 2) by scale factor 0.663005\nI1207 11:26:38.168545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06746 > 2) by scale factor 0.652004\nI1207 11:26:39.109513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11831 > 2) by scale factor 0.485636\nI1207 11:26:40.049758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59129 > 2) by scale factor 0.556903\nI1207 11:26:40.990299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55441 > 2) by scale factor 0.562681\nI1207 11:26:41.930649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42833 > 2) by scale factor 0.583374\nI1207 11:26:42.871737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25529 > 2) by scale factor 0.614384\nI1207 11:26:43.812228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32847 > 2) by scale factor 0.858935\nI1207 11:26:44.752943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26579 > 2) by scale factor 0.468847\nI1207 11:26:46.631243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5558 > 2) by scale factor 0.439\nI1207 11:26:47.571373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16609 > 2) by scale factor 0.631695\nI1207 11:26:48.510941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65378 > 2) by scale factor 0.753643\nI1207 11:26:49.450770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38149 > 2) by scale factor 0.591455\nI1207 11:26:50.390877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87763 > 2) by scale factor 0.515779\nI1207 11:26:51.330889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17182 > 2) by scale factor 0.630554\nI1207 11:26:52.270520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49217 > 2) by scale factor 0.572709\nI1207 11:26:53.210822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0608 > 2) by scale factor 0.653424\nI1207 11:26:54.152652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48604 > 2) by scale factor 0.573716\nI1207 11:26:55.096964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54496 > 2) by scale factor 0.785868\nI1207 11:26:56.041302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45442 > 2) by scale factor 0.578968\nI1207 11:26:56.985105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2828 > 2) by scale factor 0.609236\nI1207 11:26:57.928405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94106 > 2) by scale factor 0.680026\nI1207 11:26:58.872460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62865 > 2) by scale factor 0.55117\nI1207 11:26:59.816577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1138 > 2) by scale factor 0.486168\nI1207 11:27:00.760664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01723 > 2) by scale factor 0.497856\nI1207 11:27:01.704839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65077 > 2) by scale factor 0.754496\nI1207 11:27:02.648401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97123 > 2) by scale factor 0.673121\nI1207 11:27:03.591236   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03783 > 2) by scale factor 0.658364\nI1207 11:27:04.535061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09717 > 2) by scale factor 0.953665\nI1207 11:27:05.478444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92783 > 2) by scale factor 0.6831\nI1207 11:27:06.439244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22329 > 2) by scale factor 0.620484\nI1207 11:27:07.382275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.791 > 2) by scale factor 0.716589\nI1207 11:27:08.325398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75675 > 2) by scale factor 0.420455\nI1207 11:27:09.269067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74515 > 2) by scale factor 0.728557\nI1207 11:27:10.212924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89058 > 2) by scale factor 0.691903\nI1207 11:27:11.156950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19355 > 2) by scale factor 0.476923\nI1207 11:27:12.100684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18405 > 2) by scale factor 0.628132\nI1207 11:27:13.044374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77455 > 2) by scale factor 0.720837\nI1207 11:27:13.987781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24841 > 2) by scale factor 0.470764\nI1207 11:27:14.930825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40233 > 2) by scale factor 0.832525\nI1207 11:27:15.874214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66978 > 2) by scale factor 0.544991\nI1207 11:27:16.817354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8057 > 2) by scale factor 0.525528\nI1207 11:27:17.760666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36483 > 2) by scale factor 0.845725\nI1207 11:27:18.703925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13801 > 2) by scale factor 0.637348\nI1207 11:27:19.646915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92171 > 2) by scale factor 0.684532\nI1207 11:27:20.589723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0867 > 2) by scale factor 0.958453\nI1207 11:27:21.532958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38623 > 2) by scale factor 0.590627\nI1207 11:27:22.475302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87617 > 2) by scale factor 0.515973\nI1207 11:27:23.418329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07159 > 2) by scale factor 0.491209\nI1207 11:27:24.361378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06591 > 2) by scale factor 0.491894\nI1207 11:27:25.305514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94842 > 2) by scale factor 0.506532\nI1207 11:27:26.248240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1013 > 2) by scale factor 0.64489\nI1207 11:27:27.190891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49067 > 2) by scale factor 0.802997\nI1207 11:27:28.133697   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29544 > 2) by scale factor 0.606899\nI1207 11:27:29.077651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56483 > 2) by scale factor 0.561037\nI1207 11:27:30.020314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16703 > 2) by scale factor 0.479958\nI1207 11:27:30.962595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08973 > 2) by scale factor 0.48903\nI1207 11:27:31.904345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86619 > 2) by scale factor 0.697791\nI1207 11:27:32.847054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48178 > 2) by scale factor 0.805873\nI1207 11:27:33.789633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80337 > 2) by scale factor 0.525849\nI1207 11:27:34.732590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43903 > 2) by scale factor 0.58156\nI1207 11:27:35.675730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82 > 2) by scale factor 0.709219\nI1207 11:27:36.618029   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71125 > 2) by scale factor 0.538903\nI1207 11:27:37.560842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03869 > 2) by scale factor 0.981025\nI1207 11:27:38.503674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45479 > 2) by scale factor 0.814735\nI1207 11:27:39.446897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51454 > 2) by scale factor 0.569064\nI1207 11:27:40.389993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39336 > 2) by scale factor 0.455232\nI1207 11:27:41.332891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48741 > 2) by scale factor 0.804049\nI1207 11:27:42.275687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89421 > 2) by scale factor 0.691034\nI1207 11:27:43.218914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65431 > 2) by scale factor 0.547298\nI1207 11:27:44.161947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61038 > 2) by scale factor 0.766171\nI1207 11:27:45.104856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86036 > 2) by scale factor 0.699214\nI1207 11:27:46.047746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98632 > 2) by scale factor 0.669721\nI1207 11:27:46.991169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87553 > 2) by scale factor 0.516058\nI1207 11:27:47.935206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73645 > 2) by scale factor 0.535267\nI1207 11:27:48.878860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98458 > 2) by scale factor 0.67011\nI1207 11:27:49.822703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67348 > 2) by scale factor 0.748089\nI1207 11:27:50.765672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98601 > 2) by scale factor 0.669791\nI1207 11:27:51.709731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36029 > 2) by scale factor 0.458685\nI1207 11:27:52.653491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44525 > 2) by scale factor 0.580509\nI1207 11:27:53.596099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7815 > 2) by scale factor 0.418279\nI1207 11:27:54.539449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31438 > 2) by scale factor 0.463566\nI1207 11:27:54.550788   369 solver.cpp:337] Iteration 14300, Testing net (#0)\nI1207 11:28:47.530001   369 solver.cpp:404]     Test net output #0: accuracy = 0.1457\nI1207 11:28:47.530527   369 solver.cpp:404]     Test net output #1: loss = 14.8754 (* 1 = 14.8754 loss)\nI1207 11:28:48.403399   369 solver.cpp:228] Iteration 14300, loss = 14.3743\nI1207 11:28:48.403442   369 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1207 11:28:48.403460   369 solver.cpp:244]     Train net output #1: loss = 14.3743 (* 1 = 14.3743 loss)\nI1207 11:28:48.483402   369 sgd_solver.cpp:166] Iteration 14300, lr = 2.145\nI1207 11:28:48.493541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58059 > 2) by scale factor 0.558567\nI1207 11:28:49.433105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50414 > 2) by scale factor 0.570754\nI1207 11:28:50.372287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80377 > 2) by scale factor 0.713324\nI1207 11:28:51.312130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92191 > 2) by scale factor 0.684485\nI1207 11:28:52.251868   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74676 > 2) by scale factor 0.533795\nI1207 11:28:53.192591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58255 > 2) by scale factor 0.77443\nI1207 11:28:54.133416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86746 > 2) by scale factor 0.697481\nI1207 11:28:56.012166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88914 > 2) by scale factor 0.692247\nI1207 11:28:56.952755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08115 > 2) by scale factor 0.649109\nI1207 11:28:57.893019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91353 > 2) by scale factor 0.686452\nI1207 11:28:58.833673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06296 > 2) by scale factor 0.492251\nI1207 11:28:59.774642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6517 > 2) by scale factor 0.547689\nI1207 11:29:00.714947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03988 > 2) by scale factor 0.657921\nI1207 11:29:01.655181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82172 > 2) by scale factor 0.708788\nI1207 11:29:02.596009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0545 > 2) by scale factor 0.654771\nI1207 11:29:03.536412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22648 > 2) by scale factor 0.473207\nI1207 11:29:04.477370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39183 > 2) by scale factor 0.836181\nI1207 11:29:05.417747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91216 > 2) by scale factor 0.511226\nI1207 11:29:06.358405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99514 > 2) by scale factor 0.667747\nI1207 11:29:07.299260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89544 > 2) by scale factor 0.690741\nI1207 11:29:08.239336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63006 > 2) by scale factor 0.550954\nI1207 11:29:09.180153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70631 > 2) by scale factor 0.739013\nI1207 11:29:10.120175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23544 > 2) by scale factor 0.894679\nI1207 11:29:11.061146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81002 > 2) by scale factor 0.711739\nI1207 11:29:12.001742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94113 > 2) by scale factor 0.507469\nI1207 11:29:12.942456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87547 > 2) by scale factor 0.69554\nI1207 11:29:13.882669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69482 > 2) by scale factor 0.541298\nI1207 11:29:14.823242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8562 > 2) by scale factor 0.70023\nI1207 11:29:15.763479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40689 > 2) by scale factor 0.830947\nI1207 11:29:16.704120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29501 > 2) by scale factor 0.871457\nI1207 11:29:17.644174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36748 > 2) by scale factor 0.593917\nI1207 11:29:18.585674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62613 > 2) by scale factor 0.761577\nI1207 11:29:19.525367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61518 > 2) by scale factor 0.764766\nI1207 11:29:20.465400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57001 > 2) by scale factor 0.437636\nI1207 11:29:21.405905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01452 > 2) by scale factor 0.663455\nI1207 11:29:22.347815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36186 > 2) by scale factor 0.45852\nI1207 11:29:23.290390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82386 > 2) by scale factor 0.708251\nI1207 11:29:24.232426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90388 > 2) by scale factor 0.688734\nI1207 11:29:25.174660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28459 > 2) by scale factor 0.608905\nI1207 11:29:26.116370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09725 > 2) by scale factor 0.645734\nI1207 11:29:27.058006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73431 > 2) by scale factor 0.731446\nI1207 11:29:27.999871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22816 > 2) by scale factor 0.619548\nI1207 11:29:28.942775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59867 > 2) by scale factor 0.769624\nI1207 11:29:29.885540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16355 > 2) by scale factor 0.632202\nI1207 11:29:30.828881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58307 > 2) by scale factor 0.436389\nI1207 11:29:31.771450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25382 > 2) by scale factor 0.614662\nI1207 11:29:32.714185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86315 > 2) by scale factor 0.698532\nI1207 11:29:33.657635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63833 > 2) by scale factor 0.549703\nI1207 11:29:34.600739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88843 > 2) by scale factor 0.692419\nI1207 11:29:35.544373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31259 > 2) by scale factor 0.463758\nI1207 11:29:36.487135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99892 > 2) by scale factor 0.500136\nI1207 11:29:37.430245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09066 > 2) by scale factor 0.647111\nI1207 11:29:38.373602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9987 > 2) by scale factor 0.400104\nI1207 11:29:39.317152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99903 > 2) by scale factor 0.500121\nI1207 11:29:40.261024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76073 > 2) by scale factor 0.531812\nI1207 11:29:41.204412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84579 > 2) by scale factor 0.520049\nI1207 11:29:42.148011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59107 > 2) by scale factor 0.771881\nI1207 11:29:43.091239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75733 > 2) by scale factor 0.532293\nI1207 11:29:44.033804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72106 > 2) by scale factor 0.537482\nI1207 11:29:44.977154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.28658 > 2) by scale factor 0.378317\nI1207 11:29:45.920073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.6077 > 2) by scale factor 0.356652\nI1207 11:29:46.863657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.74264 > 2) by scale factor 0.348272\nI1207 11:29:47.807499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57755 > 2) by scale factor 0.559042\nI1207 11:29:48.751461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29923 > 2) by scale factor 0.869855\nI1207 11:29:49.694133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64011 > 2) by scale factor 0.549433\nI1207 11:29:50.637316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7368 > 2) by scale factor 0.73078\nI1207 11:29:51.580148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4572 > 2) by scale factor 0.813934\nI1207 11:29:52.523385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35212 > 2) by scale factor 0.459546\nI1207 11:29:53.466564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05823 > 2) by scale factor 0.492826\nI1207 11:29:54.410584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18258 > 2) by scale factor 0.628421\nI1207 11:29:55.353776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87866 > 2) by scale factor 0.694769\nI1207 11:29:56.296491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55366 > 2) by scale factor 0.562801\nI1207 11:29:57.240114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6074 > 2) by scale factor 0.554416\nI1207 11:29:58.183123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49656 > 2) by scale factor 0.444784\nI1207 11:29:59.126894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69413 > 2) by scale factor 0.541399\nI1207 11:30:00.069996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79245 > 2) by scale factor 0.716217\nI1207 11:30:01.013429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34209 > 2) by scale factor 0.853937\nI1207 11:30:01.956476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99089 > 2) by scale factor 0.501141\nI1207 11:30:02.900058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06425 > 2) by scale factor 0.652687\nI1207 11:30:03.843055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44615 > 2) by scale factor 0.817611\nI1207 11:30:04.786303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95676 > 2) by scale factor 0.676417\nI1207 11:30:05.729892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09343 > 2) by scale factor 0.646532\nI1207 11:30:06.673017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56835 > 2) by scale factor 0.77871\nI1207 11:30:07.614935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37887 > 2) by scale factor 0.591914\nI1207 11:30:08.557296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01816 > 2) by scale factor 0.49774\nI1207 11:30:09.498821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24645 > 2) by scale factor 0.616058\nI1207 11:30:10.441444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67126 > 2) by scale factor 0.42815\nI1207 11:30:11.383893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04877 > 2) by scale factor 0.656003\nI1207 11:30:12.326133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4607 > 2) by scale factor 0.577918\nI1207 11:30:13.268808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5082 > 2) by scale factor 0.570093\nI1207 11:30:14.210882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11734 > 2) by scale factor 0.48575\nI1207 11:30:15.153323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86692 > 2) by scale factor 0.697613\nI1207 11:30:16.095474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38806 > 2) by scale factor 0.455782\nI1207 11:30:17.038511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86247 > 2) by scale factor 0.698696\nI1207 11:30:17.981046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3174 > 2) by scale factor 0.602881\nI1207 11:30:18.923642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35842 > 2) by scale factor 0.595518\nI1207 11:30:19.866096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38477 > 2) by scale factor 0.456124\nI1207 11:30:20.809268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2872 > 2) by scale factor 0.466505\nI1207 11:30:21.750892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85869 > 2) by scale factor 0.518311\nI1207 11:30:21.762874   369 solver.cpp:337] Iteration 14400, Testing net (#0)\nI1207 11:31:14.750072   369 solver.cpp:404]     Test net output #0: accuracy = 0.134\nI1207 11:31:14.750620   369 solver.cpp:404]     Test net output #1: loss = 23.7376 (* 1 = 23.7376 loss)\nI1207 11:31:15.624568   369 solver.cpp:228] Iteration 14400, loss = 21.1494\nI1207 11:31:15.624619   369 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 11:31:15.624639   369 solver.cpp:244]     Train net output #1: loss = 21.1494 (* 1 = 21.1494 loss)\nI1207 11:31:15.700664   369 sgd_solver.cpp:166] Iteration 14400, lr = 2.16\nI1207 11:31:15.710732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03915 > 2) by scale factor 0.495154\nI1207 11:31:16.650977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3934 > 2) by scale factor 0.455228\nI1207 11:31:17.590577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87857 > 2) by scale factor 0.69479\nI1207 11:31:18.530261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03449 > 2) by scale factor 0.495725\nI1207 11:31:19.470216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28099 > 2) by scale factor 0.876811\nI1207 11:31:20.410528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75419 > 2) by scale factor 0.726167\nI1207 11:31:21.351214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2345 > 2) by scale factor 0.895056\nI1207 11:31:22.291467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60295 > 2) by scale factor 0.434504\nI1207 11:31:23.232159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46063 > 2) by scale factor 0.812799\nI1207 11:31:25.110739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14711 > 2) by scale factor 0.482263\nI1207 11:31:26.050547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47099 > 2) by scale factor 0.576205\nI1207 11:31:26.990346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84097 > 2) by scale factor 0.703986\nI1207 11:31:27.930157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40355 > 2) by scale factor 0.454179\nI1207 11:31:28.871017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26603 > 2) by scale factor 0.8826\nI1207 11:31:29.811815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96849 > 2) by scale factor 0.673743\nI1207 11:31:30.752583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7709 > 2) by scale factor 0.721788\nI1207 11:31:31.692853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71071 > 2) by scale factor 0.538981\nI1207 11:31:32.632941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76994 > 2) by scale factor 0.530512\nI1207 11:31:33.573076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04548 > 2) by scale factor 0.494378\nI1207 11:31:34.512851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61967 > 2) by scale factor 0.552537\nI1207 11:31:35.453351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12005 > 2) by scale factor 0.943375\nI1207 11:31:36.393674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38393 > 2) by scale factor 0.456212\nI1207 11:31:37.334337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9523 > 2) by scale factor 0.677438\nI1207 11:31:38.274535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8358 > 2) by scale factor 0.705268\nI1207 11:31:39.215077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77843 > 2) by scale factor 0.719832\nI1207 11:31:40.154893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83956 > 2) by scale factor 0.520893\nI1207 11:31:41.095863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82554 > 2) by scale factor 0.522802\nI1207 11:31:42.036983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59834 > 2) by scale factor 0.43494\nI1207 11:31:42.977562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63173 > 2) by scale factor 0.759955\nI1207 11:31:43.917780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0241 > 2) by scale factor 0.497005\nI1207 11:31:44.858804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53953 > 2) by scale factor 0.787546\nI1207 11:31:45.801143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69786 > 2) by scale factor 0.741329\nI1207 11:31:46.743955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27938 > 2) by scale factor 0.609871\nI1207 11:31:47.686512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83195 > 2) by scale factor 0.706226\nI1207 11:31:48.628787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7502 > 2) by scale factor 0.533305\nI1207 11:31:49.571503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91201 > 2) by scale factor 0.68681\nI1207 11:31:50.515489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20313 > 2) by scale factor 0.62439\nI1207 11:31:51.459139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.746 > 2) by scale factor 0.533902\nI1207 11:31:52.402743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93441 > 2) by scale factor 0.681569\nI1207 11:31:53.346199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02212 > 2) by scale factor 0.661786\nI1207 11:31:54.290287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38283 > 2) by scale factor 0.456326\nI1207 11:31:55.234421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98393 > 2) by scale factor 0.502017\nI1207 11:31:56.177806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03541 > 2) by scale factor 0.495612\nI1207 11:31:57.121309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0165 > 2) by scale factor 0.497946\nI1207 11:31:58.064271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94679 > 2) by scale factor 0.678704\nI1207 11:31:59.007093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17295 > 2) by scale factor 0.479277\nI1207 11:31:59.950528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18105 > 2) by scale factor 0.628724\nI1207 11:32:00.894332   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48872 > 2) by scale factor 0.445561\nI1207 11:32:01.837752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26586 > 2) by scale factor 0.468839\nI1207 11:32:02.781234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65467 > 2) by scale factor 0.753389\nI1207 11:32:03.724892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69594 > 2) by scale factor 0.4259\nI1207 11:32:04.667629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.15951 > 2) by scale factor 0.387633\nI1207 11:32:05.610437   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15299 > 2) by scale factor 0.481581\nI1207 11:32:06.552925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.50502 > 2) by scale factor 0.363304\nI1207 11:32:07.495976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01957 > 2) by scale factor 0.39844\nI1207 11:32:08.438977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59487 > 2) by scale factor 0.435268\nI1207 11:32:09.382349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82031 > 2) by scale factor 0.523517\nI1207 11:32:10.325075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85914 > 2) by scale factor 0.51825\nI1207 11:32:11.267858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58401 > 2) by scale factor 0.558034\nI1207 11:32:12.211071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01773 > 2) by scale factor 0.66275\nI1207 11:32:13.154953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8706 > 2) by scale factor 0.696719\nI1207 11:32:14.098795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66231 > 2) by scale factor 0.751228\nI1207 11:32:15.040411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50871 > 2) by scale factor 0.443586\nI1207 11:32:15.983386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64391 > 2) by scale factor 0.430671\nI1207 11:32:16.927124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23741 > 2) by scale factor 0.893891\nI1207 11:32:17.871067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18524 > 2) by scale factor 0.627896\nI1207 11:32:18.815336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1601 > 2) by scale factor 0.925881\nI1207 11:32:19.758150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85738 > 2) by scale factor 0.699941\nI1207 11:32:20.701563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40167 > 2) by scale factor 0.587946\nI1207 11:32:21.644551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04116 > 2) by scale factor 0.657644\nI1207 11:32:22.588744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12219 > 2) by scale factor 0.485179\nI1207 11:32:23.532100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91905 > 2) by scale factor 0.685153\nI1207 11:32:24.475004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28213 > 2) by scale factor 0.876375\nI1207 11:32:25.418478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14345 > 2) by scale factor 0.636243\nI1207 11:32:26.362344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05431 > 2) by scale factor 0.493302\nI1207 11:32:27.306675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23407 > 2) by scale factor 0.895229\nI1207 11:32:28.250557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62811 > 2) by scale factor 0.761004\nI1207 11:32:29.193187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04373 > 2) by scale factor 0.657088\nI1207 11:32:30.137451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06937 > 2) by scale factor 0.491476\nI1207 11:32:31.080448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10389 > 2) by scale factor 0.950618\nI1207 11:32:32.024713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41184 > 2) by scale factor 0.829242\nI1207 11:32:32.969645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6142 > 2) by scale factor 0.553372\nI1207 11:32:33.912742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83399 > 2) by scale factor 0.705719\nI1207 11:32:34.855949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13969 > 2) by scale factor 0.483128\nI1207 11:32:35.799350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95462 > 2) by scale factor 0.676906\nI1207 11:32:36.742671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61125 > 2) by scale factor 0.553825\nI1207 11:32:37.685189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42073 > 2) by scale factor 0.584671\nI1207 11:32:38.627564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25685 > 2) by scale factor 0.61409\nI1207 11:32:39.571115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71178 > 2) by scale factor 0.737523\nI1207 11:32:40.513773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1928 > 2) by scale factor 0.62641\nI1207 11:32:41.456898   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5346 > 2) by scale factor 0.441053\nI1207 11:32:42.400184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90517 > 2) by scale factor 0.688427\nI1207 11:32:43.342339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75746 > 2) by scale factor 0.725306\nI1207 11:32:44.285375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84302 > 2) by scale factor 0.520424\nI1207 11:32:45.229404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25536 > 2) by scale factor 0.614371\nI1207 11:32:46.173712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.986 > 2) by scale factor 0.669793\nI1207 11:32:47.117838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47219 > 2) by scale factor 0.576006\nI1207 11:32:48.060580   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37566 > 2) by scale factor 0.841871\nI1207 11:32:49.002841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38541 > 2) by scale factor 0.59077\nI1207 11:32:49.014811   369 solver.cpp:337] Iteration 14500, Testing net (#0)\nI1207 11:33:41.995856   369 solver.cpp:404]     Test net output #0: accuracy = 0.1628\nI1207 11:33:41.996387   369 solver.cpp:404]     Test net output #1: loss = 15.1916 (* 1 = 15.1916 loss)\nI1207 11:33:42.870384   369 solver.cpp:228] Iteration 14500, loss = 14.9433\nI1207 11:33:42.870434   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 11:33:42.870453   369 solver.cpp:244]     Train net output #1: loss = 14.9433 (* 1 = 14.9433 loss)\nI1207 11:33:42.940671   369 sgd_solver.cpp:166] Iteration 14500, lr = 2.175\nI1207 11:33:42.950793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7142 > 2) by scale factor 0.538474\nI1207 11:33:43.891330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72223 > 2) by scale factor 0.734693\nI1207 11:33:44.831657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79872 > 2) by scale factor 0.714613\nI1207 11:33:45.772418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20232 > 2) by scale factor 0.624548\nI1207 11:33:46.713150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3662 > 2) by scale factor 0.845238\nI1207 11:33:48.590971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14588 > 2) by scale factor 0.635752\nI1207 11:33:49.531996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08172 > 2) by scale factor 0.48999\nI1207 11:33:50.472453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62101 > 2) by scale factor 0.552333\nI1207 11:33:51.412225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79303 > 2) by scale factor 0.417272\nI1207 11:33:52.352851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13206 > 2) by scale factor 0.638557\nI1207 11:33:53.293437   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59107 > 2) by scale factor 0.556938\nI1207 11:33:54.233891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75206 > 2) by scale factor 0.53304\nI1207 11:33:55.174219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73814 > 2) by scale factor 0.730422\nI1207 11:33:56.115036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06928 > 2) by scale factor 0.651618\nI1207 11:33:57.055143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06914 > 2) by scale factor 0.651649\nI1207 11:33:57.995682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26136 > 2) by scale factor 0.61324\nI1207 11:33:58.935516   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2696 > 2) by scale factor 0.611696\nI1207 11:33:59.876356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90639 > 2) by scale factor 0.688139\nI1207 11:34:00.816696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3172 > 2) by scale factor 0.863111\nI1207 11:34:01.757258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48498 > 2) by scale factor 0.573891\nI1207 11:34:02.698173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45794 > 2) by scale factor 0.448638\nI1207 11:34:03.639037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91562 > 2) by scale factor 0.510775\nI1207 11:34:04.579174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65334 > 2) by scale factor 0.547444\nI1207 11:34:05.520051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.091 > 2) by scale factor 0.488878\nI1207 11:34:06.460253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79721 > 2) by scale factor 0.714999\nI1207 11:34:07.400457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20347 > 2) by scale factor 0.624322\nI1207 11:34:08.341181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7796 > 2) by scale factor 0.719529\nI1207 11:34:09.281085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7485 > 2) by scale factor 0.727671\nI1207 11:34:10.221993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41532 > 2) by scale factor 0.452969\nI1207 11:34:11.162879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6705 > 2) by scale factor 0.748923\nI1207 11:34:12.103703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56824 > 2) by scale factor 0.5605\nI1207 11:34:13.044889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2709 > 2) by scale factor 0.611452\nI1207 11:34:13.985381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99158 > 2) by scale factor 0.501055\nI1207 11:34:14.925915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60247 > 2) by scale factor 0.768502\nI1207 11:34:15.876593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80681 > 2) by scale factor 0.712553\nI1207 11:34:17.759503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81245 > 2) by scale factor 0.711125\nI1207 11:34:18.701933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77565 > 2) by scale factor 0.52971\nI1207 11:34:19.644552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36706 > 2) by scale factor 0.457974\nI1207 11:34:20.586946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28565 > 2) by scale factor 0.608708\nI1207 11:34:21.529780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02407 > 2) by scale factor 0.66136\nI1207 11:34:22.472676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85817 > 2) by scale factor 0.699749\nI1207 11:34:23.415457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34086 > 2) by scale factor 0.854388\nI1207 11:34:24.357851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7928 > 2) by scale factor 0.527315\nI1207 11:34:25.300360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31944 > 2) by scale factor 0.463023\nI1207 11:34:26.242828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15288 > 2) by scale factor 0.634341\nI1207 11:34:27.185361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30649 > 2) by scale factor 0.604872\nI1207 11:34:28.127403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57023 > 2) by scale factor 0.560187\nI1207 11:34:29.070111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35548 > 2) by scale factor 0.596039\nI1207 11:34:30.012866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59143 > 2) by scale factor 0.771774\nI1207 11:34:30.955538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95202 > 2) by scale factor 0.677503\nI1207 11:34:31.898294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9226 > 2) by scale factor 0.684323\nI1207 11:34:32.840732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79796 > 2) by scale factor 0.526598\nI1207 11:34:33.783185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90552 > 2) by scale factor 0.512096\nI1207 11:34:34.725591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09215 > 2) by scale factor 0.488741\nI1207 11:34:35.668081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6147 > 2) by scale factor 0.764905\nI1207 11:34:36.610668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70219 > 2) by scale factor 0.740142\nI1207 11:34:37.553073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54844 > 2) by scale factor 0.784793\nI1207 11:34:38.495398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21519 > 2) by scale factor 0.622048\nI1207 11:34:39.437700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18586 > 2) by scale factor 0.627774\nI1207 11:34:40.379999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62998 > 2) by scale factor 0.760462\nI1207 11:34:41.322293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55357 > 2) by scale factor 0.783217\nI1207 11:34:42.264478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4269 > 2) by scale factor 0.583617\nI1207 11:34:43.207880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70261 > 2) by scale factor 0.540159\nI1207 11:34:44.150375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63665 > 2) by scale factor 0.758538\nI1207 11:34:45.092633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22528 > 2) by scale factor 0.620101\nI1207 11:34:46.035221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90847 > 2) by scale factor 0.51171\nI1207 11:34:46.977726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56153 > 2) by scale factor 0.561556\nI1207 11:34:47.920572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.624 > 2) by scale factor 0.762196\nI1207 11:34:48.863176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06171 > 2) by scale factor 0.492404\nI1207 11:34:49.805702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11766 > 2) by scale factor 0.485713\nI1207 11:34:50.748150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26366 > 2) by scale factor 0.883525\nI1207 11:34:51.690722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45127 > 2) by scale factor 0.815903\nI1207 11:34:52.633194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69475 > 2) by scale factor 0.541309\nI1207 11:34:54.515494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53089 > 2) by scale factor 0.790235\nI1207 11:34:55.457795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38393 > 2) by scale factor 0.456212\nI1207 11:34:56.400923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59468 > 2) by scale factor 0.770807\nI1207 11:34:57.343883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25845 > 2) by scale factor 0.613789\nI1207 11:34:58.286675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50443 > 2) by scale factor 0.798584\nI1207 11:34:59.228993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2245 > 2) by scale factor 0.620251\nI1207 11:35:00.171308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64983 > 2) by scale factor 0.754764\nI1207 11:35:01.114115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12813 > 2) by scale factor 0.939792\nI1207 11:35:02.056879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39367 > 2) by scale factor 0.835538\nI1207 11:35:02.998677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60423 > 2) by scale factor 0.554903\nI1207 11:35:03.940783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.778 > 2) by scale factor 0.719943\nI1207 11:35:04.884047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49688 > 2) by scale factor 0.800999\nI1207 11:35:06.766466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82111 > 2) by scale factor 0.708942\nI1207 11:35:07.708678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94978 > 2) by scale factor 0.678018\nI1207 11:35:08.651077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15872 > 2) by scale factor 0.926474\nI1207 11:35:09.593430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34363 > 2) by scale factor 0.460445\nI1207 11:35:10.535784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80893 > 2) by scale factor 0.525082\nI1207 11:35:11.477726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58269 > 2) by scale factor 0.55824\nI1207 11:35:12.420217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12817 > 2) by scale factor 0.939776\nI1207 11:35:13.363585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17417 > 2) by scale factor 0.630087\nI1207 11:35:14.305696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85665 > 2) by scale factor 0.700121\nI1207 11:35:15.248247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43394 > 2) by scale factor 0.821713\nI1207 11:35:16.190721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78775 > 2) by scale factor 0.528019\nI1207 11:35:16.202648   369 solver.cpp:337] Iteration 14600, Testing net (#0)\nI1207 11:36:09.051486   369 solver.cpp:404]     Test net output #0: accuracy = 0.13735\nI1207 11:36:09.052022   369 solver.cpp:404]     Test net output #1: loss = 12.7213 (* 1 = 12.7213 loss)\nI1207 11:36:09.926340   369 solver.cpp:228] Iteration 14600, loss = 10.7487\nI1207 11:36:09.926394   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 11:36:09.926412   369 solver.cpp:244]     Train net output #1: loss = 10.7487 (* 1 = 10.7487 loss)\nI1207 11:36:09.996268   369 sgd_solver.cpp:166] Iteration 14600, lr = 2.19\nI1207 11:36:10.006413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85768 > 2) by scale factor 0.518446\nI1207 11:36:10.947311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70794 > 2) by scale factor 0.539383\nI1207 11:36:11.888186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59405 > 2) by scale factor 0.770994\nI1207 11:36:12.828326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44111 > 2) by scale factor 0.581207\nI1207 11:36:13.769409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95755 > 2) by scale factor 0.505363\nI1207 11:36:14.710412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7077 > 2) by scale factor 0.539419\nI1207 11:36:15.651434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00386 > 2) by scale factor 0.499518\nI1207 11:36:17.531205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19239 > 2) by scale factor 0.477054\nI1207 11:36:18.471773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89499 > 2) by scale factor 0.51348\nI1207 11:36:19.413156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2384 > 2) by scale factor 0.893494\nI1207 11:36:20.354125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76029 > 2) by scale factor 0.724561\nI1207 11:36:21.295066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84416 > 2) by scale factor 0.52027\nI1207 11:36:22.236220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61472 > 2) by scale factor 0.553293\nI1207 11:36:23.177420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25743 > 2) by scale factor 0.885964\nI1207 11:36:24.117856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18383 > 2) by scale factor 0.628175\nI1207 11:36:25.058346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48293 > 2) by scale factor 0.574229\nI1207 11:36:25.998819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37002 > 2) by scale factor 0.593468\nI1207 11:36:26.939635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73796 > 2) by scale factor 0.535052\nI1207 11:36:27.880503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43876 > 2) by scale factor 0.450577\nI1207 11:36:28.821393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25515 > 2) by scale factor 0.470018\nI1207 11:36:29.762100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23653 > 2) by scale factor 0.894242\nI1207 11:36:30.702972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00896 > 2) by scale factor 0.664682\nI1207 11:36:31.643560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10652 > 2) by scale factor 0.949434\nI1207 11:36:32.584514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4574 > 2) by scale factor 0.57847\nI1207 11:36:33.525353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91232 > 2) by scale factor 0.511205\nI1207 11:36:34.466217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21787 > 2) by scale factor 0.621528\nI1207 11:36:35.406777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94084 > 2) by scale factor 0.507506\nI1207 11:36:36.347460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49382 > 2) by scale factor 0.572439\nI1207 11:36:37.288321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78003 > 2) by scale factor 0.418407\nI1207 11:36:38.229573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3691 > 2) by scale factor 0.59363\nI1207 11:36:39.170457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41495 > 2) by scale factor 0.58566\nI1207 11:36:40.111943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07328 > 2) by scale factor 0.491005\nI1207 11:36:41.052873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26975 > 2) by scale factor 0.611667\nI1207 11:36:41.995766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72355 > 2) by scale factor 0.734335\nI1207 11:36:42.937825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02598 > 2) by scale factor 0.987178\nI1207 11:36:43.879925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94866 > 2) by scale factor 0.506501\nI1207 11:36:44.822839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36664 > 2) by scale factor 0.845079\nI1207 11:36:45.764994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89023 > 2) by scale factor 0.514108\nI1207 11:36:46.707828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98151 > 2) by scale factor 0.502323\nI1207 11:36:47.650705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44829 > 2) by scale factor 0.579998\nI1207 11:36:48.593484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11366 > 2) by scale factor 0.642331\nI1207 11:36:49.535423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12004 > 2) by scale factor 0.390622\nI1207 11:36:50.477880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16872 > 2) by scale factor 0.631169\nI1207 11:36:51.420922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63125 > 2) by scale factor 0.550775\nI1207 11:36:52.363198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78637 > 2) by scale factor 0.528211\nI1207 11:36:53.306877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49013 > 2) by scale factor 0.573045\nI1207 11:36:54.249716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56933 > 2) by scale factor 0.437701\nI1207 11:36:55.193142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29928 > 2) by scale factor 0.465194\nI1207 11:36:56.136337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04361 > 2) by scale factor 0.396541\nI1207 11:36:57.078685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91032 > 2) by scale factor 0.511467\nI1207 11:36:58.021622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9914 > 2) by scale factor 0.40069\nI1207 11:36:58.964493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75681 > 2) by scale factor 0.532366\nI1207 11:36:59.908174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17874 > 2) by scale factor 0.478613\nI1207 11:37:00.850731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39123 > 2) by scale factor 0.455453\nI1207 11:37:01.793304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7567 > 2) by scale factor 0.725505\nI1207 11:37:02.736168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41318 > 2) by scale factor 0.453188\nI1207 11:37:03.677819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48808 > 2) by scale factor 0.573381\nI1207 11:37:04.619895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.91414 > 2) by scale factor 0.406989\nI1207 11:37:05.561488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53158 > 2) by scale factor 0.79002\nI1207 11:37:06.504128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42765 > 2) by scale factor 0.451707\nI1207 11:37:07.446732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91134 > 2) by scale factor 0.686968\nI1207 11:37:08.388710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40366 > 2) by scale factor 0.587603\nI1207 11:37:09.331234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30004 > 2) by scale factor 0.86955\nI1207 11:37:10.274250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61548 > 2) by scale factor 0.764677\nI1207 11:37:11.217124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41441 > 2) by scale factor 0.585753\nI1207 11:37:12.159889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9597 > 2) by scale factor 0.675745\nI1207 11:37:13.102572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4949 > 2) by scale factor 0.801636\nI1207 11:37:14.045529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95745 > 2) by scale factor 0.676258\nI1207 11:37:14.988648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58137 > 2) by scale factor 0.558445\nI1207 11:37:15.931716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0844 > 2) by scale factor 0.489668\nI1207 11:37:16.874430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8676 > 2) by scale factor 0.697448\nI1207 11:37:17.817368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28445 > 2) by scale factor 0.608931\nI1207 11:37:18.760828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4783 > 2) by scale factor 0.807005\nI1207 11:37:19.703949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83086 > 2) by scale factor 0.706498\nI1207 11:37:20.647001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1397 > 2) by scale factor 0.483127\nI1207 11:37:21.589891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60108 > 2) by scale factor 0.434681\nI1207 11:37:22.532933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51166 > 2) by scale factor 0.443296\nI1207 11:37:23.476279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07404 > 2) by scale factor 0.65061\nI1207 11:37:24.420069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80231 > 2) by scale factor 0.713697\nI1207 11:37:25.363493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55525 > 2) by scale factor 0.562548\nI1207 11:37:26.307618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29846 > 2) by scale factor 0.465283\nI1207 11:37:27.251207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99012 > 2) by scale factor 0.668869\nI1207 11:37:28.194316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02712 > 2) by scale factor 0.660693\nI1207 11:37:29.138213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43641 > 2) by scale factor 0.820881\nI1207 11:37:30.081852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94116 > 2) by scale factor 0.680003\nI1207 11:37:31.024585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59772 > 2) by scale factor 0.555907\nI1207 11:37:31.968075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19662 > 2) by scale factor 0.62566\nI1207 11:37:32.912032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40894 > 2) by scale factor 0.453623\nI1207 11:37:33.856068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10482 > 2) by scale factor 0.487233\nI1207 11:37:34.798892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47976 > 2) by scale factor 0.806529\nI1207 11:37:35.742316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46441 > 2) by scale factor 0.577298\nI1207 11:37:36.686254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56616 > 2) by scale factor 0.779375\nI1207 11:37:37.629585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70017 > 2) by scale factor 0.740695\nI1207 11:37:38.573676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5891 > 2) by scale factor 0.77247\nI1207 11:37:39.517555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36313 > 2) by scale factor 0.458386\nI1207 11:37:40.461352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7041 > 2) by scale factor 0.425161\nI1207 11:37:41.404491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20663 > 2) by scale factor 0.623708\nI1207 11:37:42.347595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43727 > 2) by scale factor 0.581857\nI1207 11:37:43.291532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13379 > 2) by scale factor 0.483817\nI1207 11:37:43.303513   369 solver.cpp:337] Iteration 14700, Testing net (#0)\nI1207 11:38:35.954200   369 solver.cpp:404]     Test net output #0: accuracy = 0.1695\nI1207 11:38:35.954676   369 solver.cpp:404]     Test net output #1: loss = 17.2942 (* 1 = 17.2942 loss)\nI1207 11:38:36.828594   369 solver.cpp:228] Iteration 14700, loss = 18.5246\nI1207 11:38:36.828649   369 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1207 11:38:36.828673   369 solver.cpp:244]     Train net output #1: loss = 18.5246 (* 1 = 18.5246 loss)\nI1207 11:38:36.899299   369 sgd_solver.cpp:166] Iteration 14700, lr = 2.205\nI1207 11:38:36.909370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80649 > 2) by scale factor 0.525419\nI1207 11:38:37.850247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26167 > 2) by scale factor 0.469299\nI1207 11:38:38.790854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51314 > 2) by scale factor 0.569291\nI1207 11:38:39.731432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38799 > 2) by scale factor 0.59032\nI1207 11:38:40.671603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59621 > 2) by scale factor 0.770352\nI1207 11:38:41.612223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8052 > 2) by scale factor 0.712962\nI1207 11:38:42.551709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21951 > 2) by scale factor 0.621212\nI1207 11:38:43.491708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17826 > 2) by scale factor 0.629274\nI1207 11:38:44.432003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78117 > 2) by scale factor 0.719121\nI1207 11:38:45.372509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67926 > 2) by scale factor 0.543588\nI1207 11:38:46.313349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19301 > 2) by scale factor 0.626369\nI1207 11:38:47.254385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4281 > 2) by scale factor 0.583413\nI1207 11:38:48.194766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16342 > 2) by scale factor 0.92446\nI1207 11:38:49.134941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71868 > 2) by scale factor 0.537825\nI1207 11:38:50.075721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24274 > 2) by scale factor 0.616762\nI1207 11:38:51.016137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18223 > 2) by scale factor 0.628491\nI1207 11:38:51.956919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10311 > 2) by scale factor 0.487435\nI1207 11:38:52.897861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20464 > 2) by scale factor 0.624095\nI1207 11:38:53.838462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30386 > 2) by scale factor 0.868108\nI1207 11:38:54.779007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81015 > 2) by scale factor 0.711706\nI1207 11:38:55.719712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96075 > 2) by scale factor 0.675504\nI1207 11:38:56.660667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59083 > 2) by scale factor 0.556974\nI1207 11:38:57.601152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84376 > 2) by scale factor 0.703294\nI1207 11:38:58.541597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82638 > 2) by scale factor 0.707619\nI1207 11:38:59.482460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13139 > 2) by scale factor 0.638694\nI1207 11:39:00.423431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42275 > 2) by scale factor 0.584326\nI1207 11:39:01.363600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61943 > 2) by scale factor 0.763526\nI1207 11:39:02.303896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08343 > 2) by scale factor 0.648629\nI1207 11:39:03.243947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48136 > 2) by scale factor 0.80601\nI1207 11:39:04.185076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98911 > 2) by scale factor 0.669095\nI1207 11:39:05.126376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57586 > 2) by scale factor 0.776438\nI1207 11:39:06.066280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00383 > 2) by scale factor 0.499522\nI1207 11:39:07.007490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44872 > 2) by scale factor 0.449568\nI1207 11:39:07.950901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90896 > 2) by scale factor 0.687531\nI1207 11:39:09.835378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8475 > 2) by scale factor 0.70237\nI1207 11:39:10.778318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75079 > 2) by scale factor 0.727063\nI1207 11:39:11.721946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87067 > 2) by scale factor 0.696701\nI1207 11:39:12.665621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29392 > 2) by scale factor 0.871869\nI1207 11:39:13.608727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32078 > 2) by scale factor 0.46288\nI1207 11:39:14.551077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81623 > 2) by scale factor 0.415262\nI1207 11:39:15.494712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42855 > 2) by scale factor 0.583336\nI1207 11:39:16.438241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54039 > 2) by scale factor 0.787279\nI1207 11:39:17.381999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15397 > 2) by scale factor 0.481467\nI1207 11:39:18.325234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28369 > 2) by scale factor 0.466888\nI1207 11:39:19.268432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36247 > 2) by scale factor 0.846571\nI1207 11:39:20.212450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03301 > 2) by scale factor 0.495908\nI1207 11:39:21.156162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97569 > 2) by scale factor 0.503057\nI1207 11:39:22.099678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07205 > 2) by scale factor 0.651032\nI1207 11:39:23.043073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47993 > 2) by scale factor 0.574724\nI1207 11:39:23.985869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60243 > 2) by scale factor 0.555181\nI1207 11:39:24.929409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13111 > 2) by scale factor 0.63875\nI1207 11:39:25.873494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85507 > 2) by scale factor 0.518797\nI1207 11:39:26.817204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13401 > 2) by scale factor 0.937203\nI1207 11:39:27.760576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29555 > 2) by scale factor 0.87125\nI1207 11:39:28.703845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76116 > 2) by scale factor 0.724334\nI1207 11:39:29.647024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32199 > 2) by scale factor 0.602049\nI1207 11:39:30.590049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64528 > 2) by scale factor 0.756063\nI1207 11:39:31.533334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91229 > 2) by scale factor 0.686746\nI1207 11:39:32.475821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64532 > 2) by scale factor 0.548649\nI1207 11:39:33.418462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07856 > 2) by scale factor 0.490369\nI1207 11:39:34.362874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51464 > 2) by scale factor 0.795342\nI1207 11:39:35.306958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98057 > 2) by scale factor 0.671014\nI1207 11:39:36.250530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10263 > 2) by scale factor 0.644614\nI1207 11:39:37.194404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34292 > 2) by scale factor 0.460519\nI1207 11:39:38.137187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2144 > 2) by scale factor 0.6222\nI1207 11:39:39.080422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16617 > 2) by scale factor 0.480057\nI1207 11:39:40.023885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05938 > 2) by scale factor 0.653728\nI1207 11:39:40.967643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53597 > 2) by scale factor 0.788653\nI1207 11:39:41.911036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53212 > 2) by scale factor 0.566232\nI1207 11:39:42.854387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64238 > 2) by scale factor 0.549092\nI1207 11:39:43.798352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46014 > 2) by scale factor 0.812963\nI1207 11:39:44.742117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39647 > 2) by scale factor 0.588847\nI1207 11:39:45.685516   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18376 > 2) by scale factor 0.915853\nI1207 11:39:46.629285   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11629 > 2) by scale factor 0.945051\nI1207 11:39:47.572890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97178 > 2) by scale factor 0.672998\nI1207 11:39:48.516468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6868 > 2) by scale factor 0.42673\nI1207 11:39:49.460731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21837 > 2) by scale factor 0.474116\nI1207 11:39:50.404559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22524 > 2) by scale factor 0.620109\nI1207 11:39:51.348547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09353 > 2) by scale factor 0.64651\nI1207 11:39:52.292346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80197 > 2) by scale factor 0.713784\nI1207 11:39:53.236408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24683 > 2) by scale factor 0.890144\nI1207 11:39:54.180181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44477 > 2) by scale factor 0.818072\nI1207 11:39:55.124222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0188 > 2) by scale factor 0.497661\nI1207 11:39:56.067674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36682 > 2) by scale factor 0.845016\nI1207 11:39:57.010521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49082 > 2) by scale factor 0.572932\nI1207 11:39:57.954154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01627 > 2) by scale factor 0.497974\nI1207 11:39:58.897820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37494 > 2) by scale factor 0.592603\nI1207 11:39:59.841327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50595 > 2) by scale factor 0.570458\nI1207 11:40:00.785326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6737 > 2) by scale factor 0.748027\nI1207 11:40:01.728883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88728 > 2) by scale factor 0.514499\nI1207 11:40:02.672344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87754 > 2) by scale factor 0.410043\nI1207 11:40:03.615957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03971 > 2) by scale factor 0.495085\nI1207 11:40:04.559698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38359 > 2) by scale factor 0.591087\nI1207 11:40:05.503018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55285 > 2) by scale factor 0.783439\nI1207 11:40:06.446101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34503 > 2) by scale factor 0.852867\nI1207 11:40:07.390172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34106 > 2) by scale factor 0.460717\nI1207 11:40:08.334085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.0665 > 2) by scale factor 0.39475\nI1207 11:40:09.277570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33651 > 2) by scale factor 0.461201\nI1207 11:40:10.221019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.26931 > 2) by scale factor 0.379557\nI1207 11:40:10.232983   369 solver.cpp:337] Iteration 14800, Testing net (#0)\nI1207 11:41:02.879474   369 solver.cpp:404]     Test net output #0: accuracy = 0.1111\nI1207 11:41:02.879962   369 solver.cpp:404]     Test net output #1: loss = 27.2026 (* 1 = 27.2026 loss)\nI1207 11:41:03.754065   369 solver.cpp:228] Iteration 14800, loss = 27.7704\nI1207 11:41:03.754120   369 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI1207 11:41:03.754139   369 solver.cpp:244]     Train net output #1: loss = 27.7704 (* 1 = 27.7704 loss)\nI1207 11:41:03.825387   369 sgd_solver.cpp:166] Iteration 14800, lr = 2.22\nI1207 11:41:03.835564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.33599 > 2) by scale factor 0.374813\nI1207 11:41:04.776489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03156 > 2) by scale factor 0.496085\nI1207 11:41:05.717394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55714 > 2) by scale factor 0.782124\nI1207 11:41:06.658390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30221 > 2) by scale factor 0.605654\nI1207 11:41:07.599267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2417 > 2) by scale factor 0.61696\nI1207 11:41:08.540192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58803 > 2) by scale factor 0.557408\nI1207 11:41:09.481101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0491 > 2) by scale factor 0.655931\nI1207 11:41:10.422225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38821 > 2) by scale factor 0.837446\nI1207 11:41:11.363183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12383 > 2) by scale factor 0.64024\nI1207 11:41:12.304051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21451 > 2) by scale factor 0.474551\nI1207 11:41:13.244623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63321 > 2) by scale factor 0.431666\nI1207 11:41:14.185974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17829 > 2) by scale factor 0.478664\nI1207 11:41:15.126929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99692 > 2) by scale factor 0.667351\nI1207 11:41:16.067538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64686 > 2) by scale factor 0.548417\nI1207 11:41:17.008420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77718 > 2) by scale factor 0.720154\nI1207 11:41:17.949514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5197 > 2) by scale factor 0.56823\nI1207 11:41:18.890067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47713 > 2) by scale factor 0.575188\nI1207 11:41:19.830883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18403 > 2) by scale factor 0.628135\nI1207 11:41:20.771498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44806 > 2) by scale factor 0.580037\nI1207 11:41:21.712836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50357 > 2) by scale factor 0.570846\nI1207 11:41:22.653666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46533 > 2) by scale factor 0.577145\nI1207 11:41:23.594228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3613 > 2) by scale factor 0.458579\nI1207 11:41:24.535003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42155 > 2) by scale factor 0.825917\nI1207 11:41:25.476059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51085 > 2) by scale factor 0.569662\nI1207 11:41:26.416443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85424 > 2) by scale factor 0.700712\nI1207 11:41:27.357139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20995 > 2) by scale factor 0.623063\nI1207 11:41:28.297924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45162 > 2) by scale factor 0.815786\nI1207 11:41:29.238421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49735 > 2) by scale factor 0.444707\nI1207 11:41:30.179409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81293 > 2) by scale factor 0.524531\nI1207 11:41:31.120049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94747 > 2) by scale factor 0.678549\nI1207 11:41:32.060950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1224 > 2) by scale factor 0.485154\nI1207 11:41:33.003605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02839 > 2) by scale factor 0.660418\nI1207 11:41:33.946336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51827 > 2) by scale factor 0.794196\nI1207 11:41:34.889096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33461 > 2) by scale factor 0.461402\nI1207 11:41:35.831925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17335 > 2) by scale factor 0.630249\nI1207 11:41:36.774526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3202 > 2) by scale factor 0.602373\nI1207 11:41:37.717133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.218 > 2) by scale factor 0.901712\nI1207 11:41:38.660208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31644 > 2) by scale factor 0.863394\nI1207 11:41:39.602996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66224 > 2) by scale factor 0.428978\nI1207 11:41:40.545457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.099 > 2) by scale factor 0.487923\nI1207 11:41:41.487774   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77079 > 2) by scale factor 0.530393\nI1207 11:41:42.430323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96526 > 2) by scale factor 0.674478\nI1207 11:41:43.373178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1235 > 2) by scale factor 0.485025\nI1207 11:41:44.316846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10047 > 2) by scale factor 0.95217\nI1207 11:41:45.259330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0875 > 2) by scale factor 0.647773\nI1207 11:41:46.201723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82615 > 2) by scale factor 0.707677\nI1207 11:41:47.144551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79102 > 2) by scale factor 0.716585\nI1207 11:41:48.087883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64996 > 2) by scale factor 0.547952\nI1207 11:41:49.030649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10287 > 2) by scale factor 0.951083\nI1207 11:41:49.972962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94894 > 2) by scale factor 0.678211\nI1207 11:41:50.915938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62668 > 2) by scale factor 0.432275\nI1207 11:41:51.858907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04465 > 2) by scale factor 0.656889\nI1207 11:41:52.801795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42657 > 2) by scale factor 0.583675\nI1207 11:41:53.744168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75952 > 2) by scale factor 0.724763\nI1207 11:41:54.686712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24979 > 2) by scale factor 0.615425\nI1207 11:41:55.630764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40528 > 2) by scale factor 0.831505\nI1207 11:41:56.573511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77912 > 2) by scale factor 0.529223\nI1207 11:41:57.516331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63225 > 2) by scale factor 0.759807\nI1207 11:41:58.459154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80844 > 2) by scale factor 0.712139\nI1207 11:41:59.402384   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30125 > 2) by scale factor 0.869091\nI1207 11:42:00.345620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22792 > 2) by scale factor 0.619595\nI1207 11:42:01.289211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63028 > 2) by scale factor 0.760377\nI1207 11:42:02.231758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27451 > 2) by scale factor 0.610779\nI1207 11:42:03.174597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28607 > 2) by scale factor 0.466628\nI1207 11:42:04.118479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7498 > 2) by scale factor 0.533362\nI1207 11:42:05.061892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97348 > 2) by scale factor 0.672612\nI1207 11:42:06.005741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96133 > 2) by scale factor 0.504881\nI1207 11:42:06.949170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38444 > 2) by scale factor 0.59094\nI1207 11:42:07.892315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11106 > 2) by scale factor 0.486492\nI1207 11:42:08.835371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42326 > 2) by scale factor 0.825335\nI1207 11:42:09.778311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72971 > 2) by scale factor 0.422859\nI1207 11:42:10.721741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32493 > 2) by scale factor 0.601516\nI1207 11:42:11.665089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6036 > 2) by scale factor 0.434442\nI1207 11:42:12.608652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62705 > 2) by scale factor 0.551412\nI1207 11:42:13.551993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23354 > 2) by scale factor 0.472418\nI1207 11:42:14.494454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7815 > 2) by scale factor 0.418278\nI1207 11:42:15.437103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8446 > 2) by scale factor 0.703087\nI1207 11:42:16.380441   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81042 > 2) by scale factor 0.711638\nI1207 11:42:17.324138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16077 > 2) by scale factor 0.48068\nI1207 11:42:18.267474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51634 > 2) by scale factor 0.794804\nI1207 11:42:19.210142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44629 > 2) by scale factor 0.817565\nI1207 11:42:20.153210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24351 > 2) by scale factor 0.616615\nI1207 11:42:21.095911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33049 > 2) by scale factor 0.461842\nI1207 11:42:22.039204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83855 > 2) by scale factor 0.704585\nI1207 11:42:22.981844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36706 > 2) by scale factor 0.593989\nI1207 11:42:23.924945   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73454 > 2) by scale factor 0.731384\nI1207 11:42:24.867805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06895 > 2) by scale factor 0.651689\nI1207 11:42:25.810937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54116 > 2) by scale factor 0.564787\nI1207 11:42:26.754185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81907 > 2) by scale factor 0.415018\nI1207 11:42:27.697237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7932 > 2) by scale factor 0.527259\nI1207 11:42:28.639961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70936 > 2) by scale factor 0.424686\nI1207 11:42:29.583511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13534 > 2) by scale factor 0.483636\nI1207 11:42:30.526377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47254 > 2) by scale factor 0.575947\nI1207 11:42:31.468479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82659 > 2) by scale factor 0.707567\nI1207 11:42:32.411326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38758 > 2) by scale factor 0.455833\nI1207 11:42:33.354430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75959 > 2) by scale factor 0.724744\nI1207 11:42:34.298516   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29389 > 2) by scale factor 0.465778\nI1207 11:42:35.241709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21463 > 2) by scale factor 0.622156\nI1207 11:42:36.185089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08524 > 2) by scale factor 0.648247\nI1207 11:42:37.128346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25306 > 2) by scale factor 0.887682\nI1207 11:42:37.140265   369 solver.cpp:337] Iteration 14900, Testing net (#0)\nI1207 11:43:30.002835   369 solver.cpp:404]     Test net output #0: accuracy = 0.20165\nI1207 11:43:30.003340   369 solver.cpp:404]     Test net output #1: loss = 11.2161 (* 1 = 11.2161 loss)\nI1207 11:43:30.876150   369 solver.cpp:228] Iteration 14900, loss = 9.73639\nI1207 11:43:30.876201   369 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1207 11:43:30.876219   369 solver.cpp:244]     Train net output #1: loss = 9.73639 (* 1 = 9.73639 loss)\nI1207 11:43:30.955153   369 sgd_solver.cpp:166] Iteration 14900, lr = 2.235\nI1207 11:43:30.965162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62828 > 2) by scale factor 0.760955\nI1207 11:43:31.905423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81499 > 2) by scale factor 0.710482\nI1207 11:43:32.845512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00637 > 2) by scale factor 0.665253\nI1207 11:43:33.785670   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05339 > 2) by scale factor 0.973997\nI1207 11:43:34.725226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28705 > 2) by scale factor 0.466521\nI1207 11:43:35.666179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63008 > 2) by scale factor 0.550951\nI1207 11:43:36.607030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12143 > 2) by scale factor 0.485269\nI1207 11:43:37.547721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32457 > 2) by scale factor 0.462474\nI1207 11:43:38.487951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46572 > 2) by scale factor 0.57708\nI1207 11:43:39.428225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96582 > 2) by scale factor 0.674351\nI1207 11:43:40.369491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35906 > 2) by scale factor 0.458815\nI1207 11:43:41.310169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28031 > 2) by scale factor 0.467256\nI1207 11:43:42.251139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55589 > 2) by scale factor 0.782505\nI1207 11:43:43.192303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12021 > 2) by scale factor 0.485412\nI1207 11:43:44.132778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31987 > 2) by scale factor 0.462976\nI1207 11:43:45.073031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36106 > 2) by scale factor 0.595051\nI1207 11:43:46.014099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08953 > 2) by scale factor 0.647347\nI1207 11:43:46.955411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05319 > 2) by scale factor 0.493438\nI1207 11:43:47.896394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42162 > 2) by scale factor 0.584518\nI1207 11:43:48.836452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44168 > 2) by scale factor 0.581112\nI1207 11:43:49.777253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22536 > 2) by scale factor 0.620085\nI1207 11:43:50.718530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51456 > 2) by scale factor 0.795366\nI1207 11:43:51.659183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50402 > 2) by scale factor 0.798716\nI1207 11:43:52.599905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45815 > 2) by scale factor 0.448617\nI1207 11:43:53.540530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20426 > 2) by scale factor 0.624169\nI1207 11:43:54.481243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59731 > 2) by scale factor 0.435037\nI1207 11:43:55.421805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81271 > 2) by scale factor 0.415566\nI1207 11:43:56.362880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52576 > 2) by scale factor 0.791842\nI1207 11:43:57.303977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03914 > 2) by scale factor 0.658081\nI1207 11:43:58.245123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67127 > 2) by scale factor 0.748708\nI1207 11:43:59.185814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.874 > 2) by scale factor 0.516262\nI1207 11:44:00.126363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58726 > 2) by scale factor 0.43599\nI1207 11:44:01.067621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38459 > 2) by scale factor 0.590914\nI1207 11:44:02.008627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04077 > 2) by scale factor 0.494955\nI1207 11:44:02.949398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03146 > 2) by scale factor 0.659748\nI1207 11:44:03.891541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46651 > 2) by scale factor 0.576949\nI1207 11:44:04.834045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91394 > 2) by scale factor 0.686355\nI1207 11:44:05.776579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20446 > 2) by scale factor 0.475686\nI1207 11:44:06.718942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02613 > 2) by scale factor 0.496755\nI1207 11:44:07.661569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37012 > 2) by scale factor 0.59345\nI1207 11:44:08.603592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48353 > 2) by scale factor 0.805307\nI1207 11:44:09.545737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49877 > 2) by scale factor 0.571629\nI1207 11:44:10.487766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60045 > 2) by scale factor 0.555486\nI1207 11:44:11.430271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55077 > 2) by scale factor 0.563258\nI1207 11:44:12.372874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66891 > 2) by scale factor 0.74937\nI1207 11:44:13.314765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78601 > 2) by scale factor 0.528261\nI1207 11:44:14.258750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16767 > 2) by scale factor 0.631379\nI1207 11:44:15.201645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77402 > 2) by scale factor 0.720976\nI1207 11:44:16.144021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07063 > 2) by scale factor 0.651333\nI1207 11:44:17.086544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4332 > 2) by scale factor 0.582547\nI1207 11:44:18.029471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58275 > 2) by scale factor 0.558231\nI1207 11:44:18.972779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7992 > 2) by scale factor 0.526427\nI1207 11:44:19.915776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51226 > 2) by scale factor 0.443237\nI1207 11:44:20.858534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86713 > 2) by scale factor 0.697562\nI1207 11:44:21.801621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15641 > 2) by scale factor 0.633632\nI1207 11:44:22.744735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83333 > 2) by scale factor 0.705884\nI1207 11:44:23.688809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13205 > 2) by scale factor 0.63856\nI1207 11:44:24.631273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25809 > 2) by scale factor 0.613858\nI1207 11:44:25.573688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9376 > 2) by scale factor 0.507924\nI1207 11:44:26.516067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71268 > 2) by scale factor 0.737279\nI1207 11:44:27.458226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51653 > 2) by scale factor 0.568742\nI1207 11:44:28.400918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70973 > 2) by scale factor 0.738082\nI1207 11:44:29.343382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76142 > 2) by scale factor 0.724266\nI1207 11:44:30.286244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80308 > 2) by scale factor 0.7135\nI1207 11:44:31.229118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19904 > 2) by scale factor 0.625188\nI1207 11:44:32.171546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08134 > 2) by scale factor 0.490035\nI1207 11:44:33.114150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42298 > 2) by scale factor 0.452184\nI1207 11:44:34.057253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90876 > 2) by scale factor 0.687577\nI1207 11:44:34.999133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60661 > 2) by scale factor 0.76728\nI1207 11:44:35.942227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15896 > 2) by scale factor 0.48089\nI1207 11:44:36.885170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40645 > 2) by scale factor 0.587121\nI1207 11:44:37.828500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46835 > 2) by scale factor 0.576643\nI1207 11:44:38.771842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88735 > 2) by scale factor 0.692677\nI1207 11:44:39.715093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17597 > 2) by scale factor 0.629729\nI1207 11:44:40.658094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62662 > 2) by scale factor 0.761434\nI1207 11:44:41.601876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7348 > 2) by scale factor 0.535504\nI1207 11:44:42.545173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83781 > 2) by scale factor 0.704768\nI1207 11:44:43.488386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49799 > 2) by scale factor 0.800644\nI1207 11:44:44.431784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5339 > 2) by scale factor 0.789296\nI1207 11:44:45.375820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58782 > 2) by scale factor 0.557442\nI1207 11:44:46.319226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52012 > 2) by scale factor 0.442466\nI1207 11:44:47.262938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30788 > 2) by scale factor 0.604617\nI1207 11:44:48.206240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73077 > 2) by scale factor 0.536083\nI1207 11:44:49.148649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54893 > 2) by scale factor 0.784643\nI1207 11:44:50.091621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63204 > 2) by scale factor 0.759867\nI1207 11:44:51.035156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98153 > 2) by scale factor 0.670797\nI1207 11:44:51.978117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63742 > 2) by scale factor 0.54984\nI1207 11:44:52.921489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02883 > 2) by scale factor 0.496422\nI1207 11:44:53.864969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71772 > 2) by scale factor 0.537965\nI1207 11:44:54.808987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39647 > 2) by scale factor 0.588847\nI1207 11:44:55.752271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29749 > 2) by scale factor 0.606522\nI1207 11:44:56.696485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86129 > 2) by scale factor 0.698986\nI1207 11:44:57.639418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82047 > 2) by scale factor 0.523496\nI1207 11:44:58.583087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36732 > 2) by scale factor 0.593945\nI1207 11:44:59.526392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73817 > 2) by scale factor 0.535021\nI1207 11:45:00.470665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88176 > 2) by scale factor 0.694021\nI1207 11:45:01.414356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89243 > 2) by scale factor 0.691459\nI1207 11:45:02.357969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73538 > 2) by scale factor 0.535421\nI1207 11:45:03.301779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97121 > 2) by scale factor 0.503625\nI1207 11:45:04.245296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84411 > 2) by scale factor 0.703207\nI1207 11:45:04.257273   369 solver.cpp:337] Iteration 15000, Testing net (#0)\nI1207 11:45:57.171736   369 solver.cpp:404]     Test net output #0: accuracy = 0.2008\nI1207 11:45:57.172231   369 solver.cpp:404]     Test net output #1: loss = 14.6918 (* 1 = 14.6918 loss)\nI1207 11:45:58.046407   369 solver.cpp:228] Iteration 15000, loss = 14.6323\nI1207 11:45:58.046458   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 11:45:58.046476   369 solver.cpp:244]     Train net output #1: loss = 14.6322 (* 1 = 14.6322 loss)\nI1207 11:45:58.118957   369 sgd_solver.cpp:166] Iteration 15000, lr = 2.25\nI1207 11:45:58.129120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39598 > 2) by scale factor 0.834731\nI1207 11:45:59.070418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89992 > 2) by scale factor 0.512831\nI1207 11:46:00.011248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28555 > 2) by scale factor 0.608727\nI1207 11:46:00.951763   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57048 > 2) by scale factor 0.560149\nI1207 11:46:01.892915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65549 > 2) by scale factor 0.753158\nI1207 11:46:02.834367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84839 > 2) by scale factor 0.519698\nI1207 11:46:03.775255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03167 > 2) by scale factor 0.496073\nI1207 11:46:04.716384   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14581 > 2) by scale factor 0.482415\nI1207 11:46:05.657646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7609 > 2) by scale factor 0.531788\nI1207 11:46:06.598619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40455 > 2) by scale factor 0.58745\nI1207 11:46:07.540093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77035 > 2) by scale factor 0.530455\nI1207 11:46:08.481149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12894 > 2) by scale factor 0.639195\nI1207 11:46:09.421942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75481 > 2) by scale factor 0.726003\nI1207 11:46:10.363452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19442 > 2) by scale factor 0.626091\nI1207 11:46:11.304394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05797 > 2) by scale factor 0.492858\nI1207 11:46:12.245620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05263 > 2) by scale factor 0.974362\nI1207 11:46:13.186872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2528 > 2) by scale factor 0.614855\nI1207 11:46:14.127400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88859 > 2) by scale factor 0.514325\nI1207 11:46:15.067778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7247 > 2) by scale factor 0.536956\nI1207 11:46:16.008870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42586 > 2) by scale factor 0.583795\nI1207 11:46:16.949940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70381 > 2) by scale factor 0.539984\nI1207 11:46:17.890893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93037 > 2) by scale factor 0.682507\nI1207 11:46:18.831871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88674 > 2) by scale factor 0.692823\nI1207 11:46:20.711607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39643 > 2) by scale factor 0.834574\nI1207 11:46:21.652478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48131 > 2) by scale factor 0.806025\nI1207 11:46:22.593713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81736 > 2) by scale factor 0.709883\nI1207 11:46:23.534406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81465 > 2) by scale factor 0.524295\nI1207 11:46:24.475335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24993 > 2) by scale factor 0.615397\nI1207 11:46:25.416079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45911 > 2) by scale factor 0.44852\nI1207 11:46:26.357116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3124 > 2) by scale factor 0.603791\nI1207 11:46:27.298108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70172 > 2) by scale factor 0.54029\nI1207 11:46:28.239429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63337 > 2) by scale factor 0.550454\nI1207 11:46:29.182226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70979 > 2) by scale factor 0.738064\nI1207 11:46:30.124191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66348 > 2) by scale factor 0.750898\nI1207 11:46:31.067000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71923 > 2) by scale factor 0.537746\nI1207 11:46:32.009979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87314 > 2) by scale factor 0.516377\nI1207 11:46:32.952821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18614 > 2) by scale factor 0.62772\nI1207 11:46:33.896009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10732 > 2) by scale factor 0.643641\nI1207 11:46:34.838393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78069 > 2) by scale factor 0.529004\nI1207 11:46:35.781322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36238 > 2) by scale factor 0.594817\nI1207 11:46:36.723896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45244 > 2) by scale factor 0.579301\nI1207 11:46:37.666721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7773 > 2) by scale factor 0.720123\nI1207 11:46:38.609681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11199 > 2) by scale factor 0.642675\nI1207 11:46:39.552278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4786 > 2) by scale factor 0.574943\nI1207 11:46:40.494870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6973 > 2) by scale factor 0.425777\nI1207 11:46:41.438415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0544 > 2) by scale factor 0.654794\nI1207 11:46:42.381227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65151 > 2) by scale factor 0.429968\nI1207 11:46:43.324038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12228 > 2) by scale factor 0.485168\nI1207 11:46:44.267213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02461 > 2) by scale factor 0.496943\nI1207 11:46:45.210674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.92844 > 2) by scale factor 0.405808\nI1207 11:46:46.154275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52164 > 2) by scale factor 0.442317\nI1207 11:46:47.097975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64999 > 2) by scale factor 0.430109\nI1207 11:46:48.041477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63473 > 2) by scale factor 0.431524\nI1207 11:46:48.985080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05414 > 2) by scale factor 0.654848\nI1207 11:46:49.928791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13752 > 2) by scale factor 0.637447\nI1207 11:46:50.872290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59176 > 2) by scale factor 0.771678\nI1207 11:46:51.816133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50439 > 2) by scale factor 0.570712\nI1207 11:46:52.759521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65597 > 2) by scale factor 0.54705\nI1207 11:46:53.703081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77825 > 2) by scale factor 0.529345\nI1207 11:46:54.646662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94526 > 2) by scale factor 0.506938\nI1207 11:46:55.590153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14299 > 2) by scale factor 0.482743\nI1207 11:46:56.533866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35506 > 2) by scale factor 0.459236\nI1207 11:46:57.477681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14172 > 2) by scale factor 0.482891\nI1207 11:46:58.422013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1559 > 2) by scale factor 0.633734\nI1207 11:46:59.365672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68756 > 2) by scale factor 0.426661\nI1207 11:47:00.309401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77375 > 2) by scale factor 0.721047\nI1207 11:47:01.252878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92447 > 2) by scale factor 0.509623\nI1207 11:47:02.196699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0409 > 2) by scale factor 0.494939\nI1207 11:47:03.140157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17699 > 2) by scale factor 0.9187\nI1207 11:47:04.084066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20039 > 2) by scale factor 0.476146\nI1207 11:47:05.027739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19713 > 2) by scale factor 0.476517\nI1207 11:47:05.970681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10102 > 2) by scale factor 0.487684\nI1207 11:47:06.914317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89003 > 2) by scale factor 0.692034\nI1207 11:47:07.857818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.616 > 2) by scale factor 0.764526\nI1207 11:47:08.801582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02923 > 2) by scale factor 0.660233\nI1207 11:47:09.745146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3545 > 2) by scale factor 0.596213\nI1207 11:47:10.687863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23996 > 2) by scale factor 0.617292\nI1207 11:47:11.631423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96968 > 2) by scale factor 0.673474\nI1207 11:47:12.575191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62094 > 2) by scale factor 0.432812\nI1207 11:47:13.518587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46913 > 2) by scale factor 0.576513\nI1207 11:47:14.462157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52606 > 2) by scale factor 0.567206\nI1207 11:47:15.405218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61061 > 2) by scale factor 0.766105\nI1207 11:47:16.349062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33079 > 2) by scale factor 0.600457\nI1207 11:47:17.292549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13367 > 2) by scale factor 0.937353\nI1207 11:47:18.253767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80144 > 2) by scale factor 0.713917\nI1207 11:47:19.215514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41178 > 2) by scale factor 0.586204\nI1207 11:47:20.158991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02371 > 2) by scale factor 0.661438\nI1207 11:47:21.102838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30241 > 2) by scale factor 0.605617\nI1207 11:47:22.046648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16408 > 2) by scale factor 0.632094\nI1207 11:47:22.990356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60161 > 2) by scale factor 0.555307\nI1207 11:47:23.933778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66447 > 2) by scale factor 0.428774\nI1207 11:47:24.877370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63324 > 2) by scale factor 0.550473\nI1207 11:47:25.820691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35756 > 2) by scale factor 0.458973\nI1207 11:47:26.764511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18689 > 2) by scale factor 0.627571\nI1207 11:47:27.708297   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21138 > 2) by scale factor 0.474904\nI1207 11:47:28.652034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52122 > 2) by scale factor 0.793268\nI1207 11:47:29.595914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47877 > 2) by scale factor 0.806853\nI1207 11:47:30.539505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36334 > 2) by scale factor 0.594648\nI1207 11:47:31.483001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45787 > 2) by scale factor 0.448645\nI1207 11:47:31.494933   369 solver.cpp:337] Iteration 15100, Testing net (#0)\nI1207 11:48:24.164340   369 solver.cpp:404]     Test net output #0: accuracy = 0.1743\nI1207 11:48:24.164903   369 solver.cpp:404]     Test net output #1: loss = 14.8492 (* 1 = 14.8492 loss)\nI1207 11:48:25.038868   369 solver.cpp:228] Iteration 15100, loss = 15.4769\nI1207 11:48:25.038921   369 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1207 11:48:25.038939   369 solver.cpp:244]     Train net output #1: loss = 15.4769 (* 1 = 15.4769 loss)\nI1207 11:48:25.121492   369 sgd_solver.cpp:166] Iteration 15100, lr = 2.265\nI1207 11:48:25.131638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71984 > 2) by scale factor 0.735336\nI1207 11:48:26.072538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1391 > 2) by scale factor 0.637124\nI1207 11:48:27.012686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61867 > 2) by scale factor 0.552689\nI1207 11:48:27.953723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85122 > 2) by scale factor 0.701453\nI1207 11:48:28.894556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94153 > 2) by scale factor 0.507417\nI1207 11:48:29.835768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37801 > 2) by scale factor 0.592065\nI1207 11:48:30.777227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37637 > 2) by scale factor 0.592352\nI1207 11:48:31.718030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25733 > 2) by scale factor 0.886004\nI1207 11:48:32.658898   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00408 > 2) by scale factor 0.997962\nI1207 11:48:33.600153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9791 > 2) by scale factor 0.671344\nI1207 11:48:34.540729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30739 > 2) by scale factor 0.866779\nI1207 11:48:35.480873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11171 > 2) by scale factor 0.642733\nI1207 11:48:36.421319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75624 > 2) by scale factor 0.725625\nI1207 11:48:37.361485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78354 > 2) by scale factor 0.528605\nI1207 11:48:38.301606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71924 > 2) by scale factor 0.735501\nI1207 11:48:39.241506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27089 > 2) by scale factor 0.611454\nI1207 11:48:40.181686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22483 > 2) by scale factor 0.620187\nI1207 11:48:41.121770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16537 > 2) by scale factor 0.48015\nI1207 11:48:42.061276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95053 > 2) by scale factor 0.677845\nI1207 11:48:43.000761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08444 > 2) by scale factor 0.648415\nI1207 11:48:43.940901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25421 > 2) by scale factor 0.614589\nI1207 11:48:44.881155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33547 > 2) by scale factor 0.461311\nI1207 11:48:45.821475   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52874 > 2) by scale factor 0.566775\nI1207 11:48:46.761390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8427 > 2) by scale factor 0.703556\nI1207 11:48:47.701609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88626 > 2) by scale factor 0.514634\nI1207 11:48:49.579167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29095 > 2) by scale factor 0.607726\nI1207 11:48:50.519181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09257 > 2) by scale factor 0.646711\nI1207 11:48:52.397321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4844 > 2) by scale factor 0.573987\nI1207 11:48:53.337414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54013 > 2) by scale factor 0.78736\nI1207 11:48:54.277575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73873 > 2) by scale factor 0.534941\nI1207 11:48:55.218423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08599 > 2) by scale factor 0.489478\nI1207 11:48:56.158501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07502 > 2) by scale factor 0.650402\nI1207 11:48:57.101444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04122 > 2) by scale factor 0.657631\nI1207 11:48:58.045047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24151 > 2) by scale factor 0.616996\nI1207 11:48:58.989507   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51944 > 2) by scale factor 0.793827\nI1207 11:48:59.933524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77059 > 2) by scale factor 0.419235\nI1207 11:49:00.877154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96633 > 2) by scale factor 0.674233\nI1207 11:49:01.820901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81917 > 2) by scale factor 0.523673\nI1207 11:49:02.764441   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30139 > 2) by scale factor 0.605805\nI1207 11:49:03.708489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00402 > 2) by scale factor 0.499498\nI1207 11:49:04.652139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6938 > 2) by scale factor 0.541448\nI1207 11:49:05.595939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66299 > 2) by scale factor 0.751035\nI1207 11:49:06.539283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9224 > 2) by scale factor 0.509892\nI1207 11:49:07.481261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64316 > 2) by scale factor 0.548975\nI1207 11:49:08.424181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99748 > 2) by scale factor 0.667226\nI1207 11:49:09.367313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.416 > 2) by scale factor 0.452899\nI1207 11:49:10.310199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93856 > 2) by scale factor 0.680605\nI1207 11:49:11.253687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70199 > 2) by scale factor 0.54025\nI1207 11:49:12.196961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1222 > 2) by scale factor 0.485178\nI1207 11:49:13.140259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43474 > 2) by scale factor 0.450985\nI1207 11:49:14.082834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37043 > 2) by scale factor 0.45762\nI1207 11:49:15.026355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10283 > 2) by scale factor 0.644573\nI1207 11:49:15.970103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81493 > 2) by scale factor 0.710497\nI1207 11:49:16.912807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77127 > 2) by scale factor 0.72169\nI1207 11:49:17.855494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7346 > 2) by scale factor 0.731369\nI1207 11:49:18.798943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57318 > 2) by scale factor 0.559726\nI1207 11:49:19.741853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34003 > 2) by scale factor 0.598797\nI1207 11:49:20.685153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80065 > 2) by scale factor 0.526226\nI1207 11:49:21.628141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91846 > 2) by scale factor 0.685293\nI1207 11:49:22.571987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4538 > 2) by scale factor 0.815061\nI1207 11:49:23.515220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26328 > 2) by scale factor 0.612881\nI1207 11:49:24.457716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58071 > 2) by scale factor 0.558549\nI1207 11:49:25.401715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90244 > 2) by scale factor 0.5125\nI1207 11:49:26.345010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45032 > 2) by scale factor 0.81622\nI1207 11:49:27.288252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68671 > 2) by scale factor 0.744404\nI1207 11:49:28.230998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94313 > 2) by scale factor 0.679549\nI1207 11:49:29.174378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72299 > 2) by scale factor 0.734486\nI1207 11:49:30.117585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68305 > 2) by scale factor 0.74542\nI1207 11:49:31.059979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98264 > 2) by scale factor 0.670547\nI1207 11:49:32.002971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01308 > 2) by scale factor 0.663773\nI1207 11:49:32.946494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.722 > 2) by scale factor 0.537345\nI1207 11:49:33.889793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5525 > 2) by scale factor 0.562983\nI1207 11:49:34.832679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05503 > 2) by scale factor 0.654659\nI1207 11:49:35.776325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5105 > 2) by scale factor 0.44341\nI1207 11:49:36.719700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0757 > 2) by scale factor 0.490713\nI1207 11:49:37.663218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02479 > 2) by scale factor 0.49692\nI1207 11:49:38.606560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88129 > 2) by scale factor 0.694133\nI1207 11:49:39.550056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03504 > 2) by scale factor 0.65897\nI1207 11:49:40.493450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47435 > 2) by scale factor 0.808293\nI1207 11:49:41.436717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97049 > 2) by scale factor 0.673291\nI1207 11:49:42.380110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37869 > 2) by scale factor 0.591946\nI1207 11:49:43.323619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08022 > 2) by scale factor 0.649304\nI1207 11:49:44.266603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84717 > 2) by scale factor 0.702452\nI1207 11:49:45.209139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08255 > 2) by scale factor 0.648814\nI1207 11:49:46.152559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9724 > 2) by scale factor 0.503474\nI1207 11:49:47.096418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19865 > 2) by scale factor 0.625263\nI1207 11:49:48.039268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91001 > 2) by scale factor 0.687283\nI1207 11:49:48.981295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90378 > 2) by scale factor 0.688758\nI1207 11:49:49.923708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61196 > 2) by scale factor 0.765708\nI1207 11:49:50.866144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09435 > 2) by scale factor 0.95495\nI1207 11:49:51.808373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17869 > 2) by scale factor 0.478619\nI1207 11:49:52.751243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42172 > 2) by scale factor 0.82586\nI1207 11:49:53.693639   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81819 > 2) by scale factor 0.709676\nI1207 11:49:54.636689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33346 > 2) by scale factor 0.857097\nI1207 11:49:55.580620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42723 > 2) by scale factor 0.823986\nI1207 11:49:56.524106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56497 > 2) by scale factor 0.561015\nI1207 11:49:57.466157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93107 > 2) by scale factor 0.682344\nI1207 11:49:58.408970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76551 > 2) by scale factor 0.531137\nI1207 11:49:58.420866   369 solver.cpp:337] Iteration 15200, Testing net (#0)\nI1207 11:50:51.304217   369 solver.cpp:404]     Test net output #0: accuracy = 0.16525\nI1207 11:50:51.304739   369 solver.cpp:404]     Test net output #1: loss = 12.5452 (* 1 = 12.5452 loss)\nI1207 11:50:52.179011   369 solver.cpp:228] Iteration 15200, loss = 13.7164\nI1207 11:50:52.179056   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 11:50:52.179074   369 solver.cpp:244]     Train net output #1: loss = 13.7164 (* 1 = 13.7164 loss)\nI1207 11:50:52.250499   369 sgd_solver.cpp:166] Iteration 15200, lr = 2.28\nI1207 11:50:52.260645   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27711 > 2) by scale factor 0.610294\nI1207 11:50:53.201812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27567 > 2) by scale factor 0.878862\nI1207 11:50:54.142585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91694 > 2) by scale factor 0.68565\nI1207 11:50:55.083021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04217 > 2) by scale factor 0.494784\nI1207 11:50:56.023577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10039 > 2) by scale factor 0.645079\nI1207 11:50:56.964012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49192 > 2) by scale factor 0.57275\nI1207 11:50:57.904839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24878 > 2) by scale factor 0.615615\nI1207 11:50:58.845377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41739 > 2) by scale factor 0.452756\nI1207 11:50:59.785838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92889 > 2) by scale factor 0.50905\nI1207 11:51:00.726184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40922 > 2) by scale factor 0.586644\nI1207 11:51:01.667040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85593 > 2) by scale factor 0.411868\nI1207 11:51:02.607975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.90017 > 2) by scale factor 0.408149\nI1207 11:51:03.548141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.45607 > 2) by scale factor 0.366564\nI1207 11:51:04.488489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12699 > 2) by scale factor 0.390093\nI1207 11:51:05.429679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41974 > 2) by scale factor 0.58484\nI1207 11:51:06.370368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64214 > 2) by scale factor 0.756962\nI1207 11:51:07.310569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18512 > 2) by scale factor 0.627919\nI1207 11:51:08.250829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64438 > 2) by scale factor 0.430628\nI1207 11:51:09.191401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59109 > 2) by scale factor 0.435627\nI1207 11:51:10.132089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32489 > 2) by scale factor 0.601523\nI1207 11:51:11.073096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5661 > 2) by scale factor 0.560836\nI1207 11:51:12.013741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96883 > 2) by scale factor 0.673665\nI1207 11:51:12.953806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5812 > 2) by scale factor 0.774832\nI1207 11:51:14.832906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04654 > 2) by scale factor 0.494249\nI1207 11:51:15.773537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66881 > 2) by scale factor 0.749398\nI1207 11:51:16.714205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04342 > 2) by scale factor 0.657155\nI1207 11:51:17.654824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65907 > 2) by scale factor 0.752144\nI1207 11:51:18.595551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92892 > 2) by scale factor 0.682846\nI1207 11:51:19.536620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16159 > 2) by scale factor 0.925244\nI1207 11:51:20.477520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08431 > 2) by scale factor 0.648443\nI1207 11:51:21.418275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24833 > 2) by scale factor 0.470773\nI1207 11:51:22.359673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53879 > 2) by scale factor 0.565164\nI1207 11:51:23.300662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92507 > 2) by scale factor 0.509546\nI1207 11:51:24.241461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34442 > 2) by scale factor 0.46036\nI1207 11:51:25.182365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39576 > 2) by scale factor 0.58897\nI1207 11:51:26.124413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42538 > 2) by scale factor 0.583876\nI1207 11:51:27.067057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91874 > 2) by scale factor 0.685227\nI1207 11:51:28.009954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92032 > 2) by scale factor 0.510162\nI1207 11:51:28.952935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95744 > 2) by scale factor 0.67626\nI1207 11:51:29.895822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60884 > 2) by scale factor 0.554195\nI1207 11:51:30.839010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31871 > 2) by scale factor 0.463101\nI1207 11:51:31.781685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55919 > 2) by scale factor 0.438674\nI1207 11:51:32.724902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6247 > 2) by scale factor 0.761991\nI1207 11:51:33.668016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93798 > 2) by scale factor 0.680739\nI1207 11:51:34.611001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19536 > 2) by scale factor 0.476717\nI1207 11:51:35.554149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19609 > 2) by scale factor 0.476635\nI1207 11:51:36.497814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20365 > 2) by scale factor 0.475777\nI1207 11:51:37.441606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78014 > 2) by scale factor 0.52908\nI1207 11:51:38.385540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89719 > 2) by scale factor 0.51319\nI1207 11:51:39.328909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21955 > 2) by scale factor 0.621205\nI1207 11:51:40.273046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80522 > 2) by scale factor 0.525594\nI1207 11:51:41.217097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95168 > 2) by scale factor 0.677581\nI1207 11:51:42.160575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67614 > 2) by scale factor 0.427703\nI1207 11:51:43.104357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45755 > 2) by scale factor 0.813818\nI1207 11:51:44.047646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82384 > 2) by scale factor 0.523034\nI1207 11:51:44.991706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16895 > 2) by scale factor 0.631124\nI1207 11:51:45.935461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6714 > 2) by scale factor 0.748672\nI1207 11:51:46.878923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55695 > 2) by scale factor 0.56228\nI1207 11:51:47.822664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2689 > 2) by scale factor 0.881486\nI1207 11:51:48.766108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60372 > 2) by scale factor 0.768131\nI1207 11:51:49.709910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54705 > 2) by scale factor 0.563848\nI1207 11:51:50.653558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02422 > 2) by scale factor 0.49699\nI1207 11:51:51.597329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86725 > 2) by scale factor 0.697534\nI1207 11:51:52.541641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9506 > 2) by scale factor 0.506252\nI1207 11:51:53.485332   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07292 > 2) by scale factor 0.491048\nI1207 11:51:54.428529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92473 > 2) by scale factor 0.683824\nI1207 11:51:55.372673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05522 > 2) by scale factor 0.493192\nI1207 11:51:56.316396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10066 > 2) by scale factor 0.645025\nI1207 11:51:57.259953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80955 > 2) by scale factor 0.711859\nI1207 11:51:58.203866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22395 > 2) by scale factor 0.620356\nI1207 11:51:59.147457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40982 > 2) by scale factor 0.829938\nI1207 11:52:00.091037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57743 > 2) by scale factor 0.775968\nI1207 11:52:01.034108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19147 > 2) by scale factor 0.62667\nI1207 11:52:01.977704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38748 > 2) by scale factor 0.590409\nI1207 11:52:02.920282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22919 > 2) by scale factor 0.472904\nI1207 11:52:03.863608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75776 > 2) by scale factor 0.532232\nI1207 11:52:04.807144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65452 > 2) by scale factor 0.547267\nI1207 11:52:05.750126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88157 > 2) by scale factor 0.515255\nI1207 11:52:06.693374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9724 > 2) by scale factor 0.503473\nI1207 11:52:07.636209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07725 > 2) by scale factor 0.490527\nI1207 11:52:08.578836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00468 > 2) by scale factor 0.399626\nI1207 11:52:09.521611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.90106 > 2) by scale factor 0.408075\nI1207 11:52:10.464408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73714 > 2) by scale factor 0.422196\nI1207 11:52:11.407595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73312 > 2) by scale factor 0.731765\nI1207 11:52:12.350364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08271 > 2) by scale factor 0.48987\nI1207 11:52:13.293520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52427 > 2) by scale factor 0.567493\nI1207 11:52:14.236147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79789 > 2) by scale factor 0.41685\nI1207 11:52:15.179857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13255 > 2) by scale factor 0.483962\nI1207 11:52:16.123715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.49519 > 2) by scale factor 0.363954\nI1207 11:52:17.067152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95168 > 2) by scale factor 0.403904\nI1207 11:52:18.011029   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00247 > 2) by scale factor 0.666117\nI1207 11:52:18.954957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39096 > 2) by scale factor 0.455481\nI1207 11:52:19.898507   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43643 > 2) by scale factor 0.820872\nI1207 11:52:20.842101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02179 > 2) by scale factor 0.661859\nI1207 11:52:21.785761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16763 > 2) by scale factor 0.479889\nI1207 11:52:22.729818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93647 > 2) by scale factor 0.508069\nI1207 11:52:23.673874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59236 > 2) by scale factor 0.556737\nI1207 11:52:24.617182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26648 > 2) by scale factor 0.612281\nI1207 11:52:25.560935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39503 > 2) by scale factor 0.45506\nI1207 11:52:25.573024   369 solver.cpp:337] Iteration 15300, Testing net (#0)\nI1207 11:53:18.235935   369 solver.cpp:404]     Test net output #0: accuracy = 0.1428\nI1207 11:53:18.236420   369 solver.cpp:404]     Test net output #1: loss = 26.3628 (* 1 = 26.3628 loss)\nI1207 11:53:19.110343   369 solver.cpp:228] Iteration 15300, loss = 24.106\nI1207 11:53:19.110389   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 11:53:19.110406   369 solver.cpp:244]     Train net output #1: loss = 24.106 (* 1 = 24.106 loss)\nI1207 11:53:19.183814   369 sgd_solver.cpp:166] Iteration 15300, lr = 2.295\nI1207 11:53:19.193696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30324 > 2) by scale factor 0.464766\nI1207 11:53:20.134838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89207 > 2) by scale factor 0.513866\nI1207 11:53:21.075913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19574 > 2) by scale factor 0.625833\nI1207 11:53:22.016412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66088 > 2) by scale factor 0.546316\nI1207 11:53:22.956454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02095 > 2) by scale factor 0.497394\nI1207 11:53:23.896625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93082 > 2) by scale factor 0.508799\nI1207 11:53:24.837097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40966 > 2) by scale factor 0.45355\nI1207 11:53:25.778000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69847 > 2) by scale factor 0.425671\nI1207 11:53:26.718243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59227 > 2) by scale factor 0.771526\nI1207 11:53:27.659277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3061 > 2) by scale factor 0.464457\nI1207 11:53:28.600076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74093 > 2) by scale factor 0.72968\nI1207 11:53:29.541052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68365 > 2) by scale factor 0.745254\nI1207 11:53:30.481700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57903 > 2) by scale factor 0.55881\nI1207 11:53:31.421756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01212 > 2) by scale factor 0.663984\nI1207 11:53:32.362591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80362 > 2) by scale factor 0.713363\nI1207 11:53:33.303974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98099 > 2) by scale factor 0.670917\nI1207 11:53:34.244710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0458 > 2) by scale factor 0.977614\nI1207 11:53:35.185732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63581 > 2) by scale factor 0.431424\nI1207 11:53:36.126338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31885 > 2) by scale factor 0.602619\nI1207 11:53:37.067194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57604 > 2) by scale factor 0.559278\nI1207 11:53:38.007695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68541 > 2) by scale factor 0.542681\nI1207 11:53:38.948011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1427 > 2) by scale factor 0.636395\nI1207 11:53:39.888408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77256 > 2) by scale factor 0.419062\nI1207 11:53:40.828783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09568 > 2) by scale factor 0.488319\nI1207 11:53:41.769353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79954 > 2) by scale factor 0.52638\nI1207 11:53:42.709616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43243 > 2) by scale factor 0.451219\nI1207 11:53:43.650390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90542 > 2) by scale factor 0.512109\nI1207 11:53:44.591459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03228 > 2) by scale factor 0.659569\nI1207 11:53:45.532196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78561 > 2) by scale factor 0.528317\nI1207 11:53:46.472772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11636 > 2) by scale factor 0.485866\nI1207 11:53:47.413359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00224 > 2) by scale factor 0.66617\nI1207 11:53:48.353514   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69076 > 2) by scale factor 0.743284\nI1207 11:53:49.294876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37525 > 2) by scale factor 0.592548\nI1207 11:53:50.235760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40582 > 2) by scale factor 0.453945\nI1207 11:53:51.176507   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02536 > 2) by scale factor 0.49685\nI1207 11:53:52.119427   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24464 > 2) by scale factor 0.891013\nI1207 11:53:53.062734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1279 > 2) by scale factor 0.939893\nI1207 11:53:54.006552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18994 > 2) by scale factor 0.913265\nI1207 11:53:54.950137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34303 > 2) by scale factor 0.59826\nI1207 11:53:55.893508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26314 > 2) by scale factor 0.612906\nI1207 11:53:56.836810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92395 > 2) by scale factor 0.684005\nI1207 11:53:57.780675   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78209 > 2) by scale factor 0.528809\nI1207 11:53:58.724325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18014 > 2) by scale factor 0.628903\nI1207 11:53:59.667721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81424 > 2) by scale factor 0.710671\nI1207 11:54:00.611523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55312 > 2) by scale factor 0.562886\nI1207 11:54:01.554963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31377 > 2) by scale factor 0.603543\nI1207 11:54:02.499011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07002 > 2) by scale factor 0.491399\nI1207 11:54:03.442615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43878 > 2) by scale factor 0.581601\nI1207 11:54:04.385819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36138 > 2) by scale factor 0.594994\nI1207 11:54:05.329547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30163 > 2) by scale factor 0.605761\nI1207 11:54:06.272817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35955 > 2) by scale factor 0.595317\nI1207 11:54:07.217078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12941 > 2) by scale factor 0.639098\nI1207 11:54:08.161044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87886 > 2) by scale factor 0.515615\nI1207 11:54:09.104979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56046 > 2) by scale factor 0.561726\nI1207 11:54:10.048319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73694 > 2) by scale factor 0.535197\nI1207 11:54:10.991256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82595 > 2) by scale factor 0.707727\nI1207 11:54:11.936007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6549 > 2) by scale factor 0.753323\nI1207 11:54:12.879709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47451 > 2) by scale factor 0.446977\nI1207 11:54:13.823961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80803 > 2) by scale factor 0.525206\nI1207 11:54:14.767546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49964 > 2) by scale factor 0.44448\nI1207 11:54:15.711429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13263 > 2) by scale factor 0.638441\nI1207 11:54:16.655746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84032 > 2) by scale factor 0.52079\nI1207 11:54:17.600255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65701 > 2) by scale factor 0.752725\nI1207 11:54:18.544106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15346 > 2) by scale factor 0.92874\nI1207 11:54:19.488045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67877 > 2) by scale factor 0.543661\nI1207 11:54:20.431838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90359 > 2) by scale factor 0.512348\nI1207 11:54:21.376082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91773 > 2) by scale factor 0.510499\nI1207 11:54:22.320127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48598 > 2) by scale factor 0.445833\nI1207 11:54:23.263664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4444 > 2) by scale factor 0.818198\nI1207 11:54:24.207459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5364 > 2) by scale factor 0.565547\nI1207 11:54:25.150651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16142 > 2) by scale factor 0.480605\nI1207 11:54:26.094074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60839 > 2) by scale factor 0.554263\nI1207 11:54:27.037825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48956 > 2) by scale factor 0.573138\nI1207 11:54:27.981628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09542 > 2) by scale factor 0.954461\nI1207 11:54:28.925359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08213 > 2) by scale factor 0.648902\nI1207 11:54:29.868892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75354 > 2) by scale factor 0.53283\nI1207 11:54:30.811764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53979 > 2) by scale factor 0.565005\nI1207 11:54:31.754520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43756 > 2) by scale factor 0.450698\nI1207 11:54:32.697993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20225 > 2) by scale factor 0.62456\nI1207 11:54:33.641424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74665 > 2) by scale factor 0.72816\nI1207 11:54:34.584250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10246 > 2) by scale factor 0.487513\nI1207 11:54:35.527628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71889 > 2) by scale factor 0.735594\nI1207 11:54:36.470595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13842 > 2) by scale factor 0.637264\nI1207 11:54:37.414166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9991 > 2) by scale factor 0.500112\nI1207 11:54:38.357600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35933 > 2) by scale factor 0.595356\nI1207 11:54:39.299777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2589 > 2) by scale factor 0.469605\nI1207 11:54:40.243223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65275 > 2) by scale factor 0.753934\nI1207 11:54:41.186715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20112 > 2) by scale factor 0.62478\nI1207 11:54:42.130501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68878 > 2) by scale factor 0.426551\nI1207 11:54:43.074473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93016 > 2) by scale factor 0.682556\nI1207 11:54:44.017640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1284 > 2) by scale factor 0.639304\nI1207 11:54:44.960777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50403 > 2) by scale factor 0.444046\nI1207 11:54:45.903559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03845 > 2) by scale factor 0.495239\nI1207 11:54:46.847100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98514 > 2) by scale factor 0.401192\nI1207 11:54:47.790282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83032 > 2) by scale factor 0.52215\nI1207 11:54:48.733551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60721 > 2) by scale factor 0.434102\nI1207 11:54:49.677439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41904 > 2) by scale factor 0.584959\nI1207 11:54:50.620509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74044 > 2) by scale factor 0.534697\nI1207 11:54:51.564368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5739 > 2) by scale factor 0.559612\nI1207 11:54:52.508473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34708 > 2) by scale factor 0.597535\nI1207 11:54:52.520428   369 solver.cpp:337] Iteration 15400, Testing net (#0)\nI1207 11:55:45.145167   369 solver.cpp:404]     Test net output #0: accuracy = 0.1498\nI1207 11:55:45.145648   369 solver.cpp:404]     Test net output #1: loss = 26.4636 (* 1 = 26.4636 loss)\nI1207 11:55:46.019683   369 solver.cpp:228] Iteration 15400, loss = 26.2262\nI1207 11:55:46.019729   369 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI1207 11:55:46.019747   369 solver.cpp:244]     Train net output #1: loss = 26.2262 (* 1 = 26.2262 loss)\nI1207 11:55:46.089927   369 sgd_solver.cpp:166] Iteration 15400, lr = 2.31\nI1207 11:55:46.099781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31454 > 2) by scale factor 0.603402\nI1207 11:55:47.040616   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66092 > 2) by scale factor 0.4291\nI1207 11:55:47.981305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.388 > 2) by scale factor 0.371195\nI1207 11:55:48.921855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37134 > 2) by scale factor 0.457526\nI1207 11:55:49.862474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81563 > 2) by scale factor 0.415314\nI1207 11:55:50.802829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38882 > 2) by scale factor 0.590177\nI1207 11:55:51.743149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.84192 > 2) by scale factor 0.41306\nI1207 11:55:52.683688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85577 > 2) by scale factor 0.700336\nI1207 11:55:53.624079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52479 > 2) by scale factor 0.44201\nI1207 11:55:54.564262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68017 > 2) by scale factor 0.543454\nI1207 11:55:55.504245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98233 > 2) by scale factor 0.401419\nI1207 11:55:56.441431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60278 > 2) by scale factor 0.555127\nI1207 11:55:57.377857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84855 > 2) by scale factor 0.519676\nI1207 11:55:58.314188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04675 > 2) by scale factor 0.494223\nI1207 11:55:59.250478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70352 > 2) by scale factor 0.425214\nI1207 11:56:00.186759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45621 > 2) by scale factor 0.578668\nI1207 11:56:01.123260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12877 > 2) by scale factor 0.484405\nI1207 11:56:02.059830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94689 > 2) by scale factor 0.404294\nI1207 11:56:02.995998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42144 > 2) by scale factor 0.58455\nI1207 11:56:03.932368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35563 > 2) by scale factor 0.596013\nI1207 11:56:04.868894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07755 > 2) by scale factor 0.649868\nI1207 11:56:05.805264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83953 > 2) by scale factor 0.520898\nI1207 11:56:06.741793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37335 > 2) by scale factor 0.457316\nI1207 11:56:07.678190   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96946 > 2) by scale factor 0.503847\nI1207 11:56:08.614715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95224 > 2) by scale factor 0.677452\nI1207 11:56:09.551082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69862 > 2) by scale factor 0.540743\nI1207 11:56:10.487730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54532 > 2) by scale factor 0.440013\nI1207 11:56:11.424142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07849 > 2) by scale factor 0.649668\nI1207 11:56:12.360647   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4 > 2) by scale factor 0.833334\nI1207 11:56:13.296965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48089 > 2) by scale factor 0.806164\nI1207 11:56:14.233402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65108 > 2) by scale factor 0.547783\nI1207 11:56:15.169699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60257 > 2) by scale factor 0.768472\nI1207 11:56:16.106865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01292 > 2) by scale factor 0.49839\nI1207 11:56:17.043421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17113 > 2) by scale factor 0.921178\nI1207 11:56:17.980952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2948 > 2) by scale factor 0.465679\nI1207 11:56:18.919559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74193 > 2) by scale factor 0.729412\nI1207 11:56:19.858639   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06146 > 2) by scale factor 0.653283\nI1207 11:56:21.735050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58456 > 2) by scale factor 0.773825\nI1207 11:56:22.674170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3486 > 2) by scale factor 0.597265\nI1207 11:56:23.613296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48723 > 2) by scale factor 0.57352\nI1207 11:56:24.552644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43265 > 2) by scale factor 0.822149\nI1207 11:56:25.491936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35197 > 2) by scale factor 0.459562\nI1207 11:56:26.431370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73342 > 2) by scale factor 0.535702\nI1207 11:56:27.370612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03058 > 2) by scale factor 0.659939\nI1207 11:56:28.309933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39658 > 2) by scale factor 0.588827\nI1207 11:56:29.249089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80277 > 2) by scale factor 0.525933\nI1207 11:56:30.188477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31661 > 2) by scale factor 0.463326\nI1207 11:56:31.127420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79762 > 2) by scale factor 0.714894\nI1207 11:56:32.066359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77731 > 2) by scale factor 0.529477\nI1207 11:56:33.005550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04519 > 2) by scale factor 0.656775\nI1207 11:56:33.944716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80487 > 2) by scale factor 0.525642\nI1207 11:56:34.883586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18299 > 2) by scale factor 0.916173\nI1207 11:56:35.822731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68452 > 2) by scale factor 0.542812\nI1207 11:56:36.762336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75867 > 2) by scale factor 0.724987\nI1207 11:56:37.701634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86615 > 2) by scale factor 0.51731\nI1207 11:56:38.640493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40528 > 2) by scale factor 0.454001\nI1207 11:56:39.579947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57959 > 2) by scale factor 0.775318\nI1207 11:56:40.519083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21968 > 2) by scale factor 0.62118\nI1207 11:56:41.458197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95136 > 2) by scale factor 0.677654\nI1207 11:56:42.397708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70362 > 2) by scale factor 0.540012\nI1207 11:56:43.336582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19343 > 2) by scale factor 0.476936\nI1207 11:56:44.275648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34488 > 2) by scale factor 0.460312\nI1207 11:56:45.214431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84411 > 2) by scale factor 0.703207\nI1207 11:56:46.153890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86207 > 2) by scale factor 0.517857\nI1207 11:56:47.092824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39506 > 2) by scale factor 0.589092\nI1207 11:56:48.031572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89882 > 2) by scale factor 0.512975\nI1207 11:56:48.970502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17797 > 2) by scale factor 0.478702\nI1207 11:56:49.909979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6588 > 2) by scale factor 0.75222\nI1207 11:56:50.849246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73201 > 2) by scale factor 0.732063\nI1207 11:56:51.788377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93292 > 2) by scale factor 0.681913\nI1207 11:56:52.727653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17616 > 2) by scale factor 0.629691\nI1207 11:56:53.666995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11066 > 2) by scale factor 0.64295\nI1207 11:56:54.606185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09049 > 2) by scale factor 0.488939\nI1207 11:56:55.544936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.934 > 2) by scale factor 0.681663\nI1207 11:56:56.483569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04251 > 2) by scale factor 0.657352\nI1207 11:56:57.422327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2312 > 2) by scale factor 0.47268\nI1207 11:56:58.361124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32384 > 2) by scale factor 0.601713\nI1207 11:56:59.300313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36177 > 2) by scale factor 0.594925\nI1207 11:57:00.239274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35389 > 2) by scale factor 0.596322\nI1207 11:57:01.177827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2242 > 2) by scale factor 0.62031\nI1207 11:57:02.116607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57491 > 2) by scale factor 0.559455\nI1207 11:57:03.055547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34412 > 2) by scale factor 0.598065\nI1207 11:57:03.994576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74345 > 2) by scale factor 0.729009\nI1207 11:57:04.933751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54525 > 2) by scale factor 0.785778\nI1207 11:57:05.872880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06865 > 2) by scale factor 0.651752\nI1207 11:57:06.811549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64061 > 2) by scale factor 0.7574\nI1207 11:57:07.750109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54737 > 2) by scale factor 0.785124\nI1207 11:57:08.689317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12816 > 2) by scale factor 0.484477\nI1207 11:57:09.628502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47283 > 2) by scale factor 0.5759\nI1207 11:57:10.567360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59075 > 2) by scale factor 0.771976\nI1207 11:57:11.505815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93369 > 2) by scale factor 0.508428\nI1207 11:57:12.444597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7579 > 2) by scale factor 0.420354\nI1207 11:57:13.383430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07937 > 2) by scale factor 0.649484\nI1207 11:57:14.322540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77321 > 2) by scale factor 0.530053\nI1207 11:57:15.261680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29847 > 2) by scale factor 0.465282\nI1207 11:57:16.200767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40701 > 2) by scale factor 0.587024\nI1207 11:57:17.139271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32036 > 2) by scale factor 0.861935\nI1207 11:57:18.077754   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07902 > 2) by scale factor 0.649558\nI1207 11:57:19.016706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99811 > 2) by scale factor 0.667087\nI1207 11:57:19.026885   369 solver.cpp:337] Iteration 15500, Testing net (#0)\nI1207 11:58:11.445137   369 solver.cpp:404]     Test net output #0: accuracy = 0.16015\nI1207 11:58:11.445763   369 solver.cpp:404]     Test net output #1: loss = 16.8792 (* 1 = 16.8792 loss)\nI1207 11:58:12.318498   369 solver.cpp:228] Iteration 15500, loss = 13.2604\nI1207 11:58:12.318541   369 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1207 11:58:12.318558   369 solver.cpp:244]     Train net output #1: loss = 13.2604 (* 1 = 13.2604 loss)\nI1207 11:58:12.398188   369 sgd_solver.cpp:166] Iteration 15500, lr = 2.325\nI1207 11:58:12.407387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73571 > 2) by scale factor 0.731072\nI1207 11:58:13.343747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27592 > 2) by scale factor 0.610516\nI1207 11:58:14.279721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95334 > 2) by scale factor 0.505901\nI1207 11:58:15.216148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23162 > 2) by scale factor 0.472632\nI1207 11:58:16.152482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12419 > 2) by scale factor 0.484943\nI1207 11:58:17.089090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49991 > 2) by scale factor 0.444453\nI1207 11:58:18.025669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56022 > 2) by scale factor 0.781184\nI1207 11:58:18.962205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59128 > 2) by scale factor 0.556904\nI1207 11:58:19.898721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84707 > 2) by scale factor 0.702476\nI1207 11:58:20.835407   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.244 > 2) by scale factor 0.471254\nI1207 11:58:21.771883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88835 > 2) by scale factor 0.692438\nI1207 11:58:22.708596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6834 > 2) by scale factor 0.745322\nI1207 11:58:23.644919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6932 > 2) by scale factor 0.742612\nI1207 11:58:24.581728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84544 > 2) by scale factor 0.520097\nI1207 11:58:25.518432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16184 > 2) by scale factor 0.480556\nI1207 11:58:26.454857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05479 > 2) by scale factor 0.65471\nI1207 11:58:27.391166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44643 > 2) by scale factor 0.817519\nI1207 11:58:28.327955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63249 > 2) by scale factor 0.759737\nI1207 11:58:29.264690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01434 > 2) by scale factor 0.992883\nI1207 11:58:30.200999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6188 > 2) by scale factor 0.763709\nI1207 11:58:31.137284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19267 > 2) by scale factor 0.912129\nI1207 11:58:32.073643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54961 > 2) by scale factor 0.784435\nI1207 11:58:33.010331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03896 > 2) by scale factor 0.65812\nI1207 11:58:33.947113   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9554 > 2) by scale factor 0.676727\nI1207 11:58:34.883323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6037 > 2) by scale factor 0.554985\nI1207 11:58:35.819985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09559 > 2) by scale factor 0.48833\nI1207 11:58:36.756417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27218 > 2) by scale factor 0.468145\nI1207 11:58:37.692986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79495 > 2) by scale factor 0.715578\nI1207 11:58:38.629392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06949 > 2) by scale factor 0.491462\nI1207 11:58:39.565990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61198 > 2) by scale factor 0.765703\nI1207 11:58:40.502766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67002 > 2) by scale factor 0.749058\nI1207 11:58:41.439064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25458 > 2) by scale factor 0.887084\nI1207 11:58:42.375643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08027 > 2) by scale factor 0.649295\nI1207 11:58:43.314646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28395 > 2) by scale factor 0.609023\nI1207 11:58:44.253520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94729 > 2) by scale factor 0.506677\nI1207 11:58:45.192454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77349 > 2) by scale factor 0.721113\nI1207 11:58:46.130897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04917 > 2) by scale factor 0.493928\nI1207 11:58:47.069591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69851 > 2) by scale factor 0.741149\nI1207 11:58:48.008762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79049 > 2) by scale factor 0.527637\nI1207 11:58:48.947166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42997 > 2) by scale factor 0.583095\nI1207 11:58:49.885881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87054 > 2) by scale factor 0.516724\nI1207 11:58:50.824702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10854 > 2) by scale factor 0.643389\nI1207 11:58:51.763191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85783 > 2) by scale factor 0.518427\nI1207 11:58:52.700453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02013 > 2) by scale factor 0.497497\nI1207 11:58:53.638449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70289 > 2) by scale factor 0.425271\nI1207 11:58:54.576035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77771 > 2) by scale factor 0.720017\nI1207 11:58:55.514014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38344 > 2) by scale factor 0.591115\nI1207 11:58:56.451669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31588 > 2) by scale factor 0.603157\nI1207 11:58:57.389533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52191 > 2) by scale factor 0.567873\nI1207 11:58:58.328094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91942 > 2) by scale factor 0.51028\nI1207 11:58:59.265326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24323 > 2) by scale factor 0.89157\nI1207 11:59:00.203083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94967 > 2) by scale factor 0.678043\nI1207 11:59:01.140806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80101 > 2) by scale factor 0.714027\nI1207 11:59:02.078028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67144 > 2) by scale factor 0.748661\nI1207 11:59:03.015996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.279 > 2) by scale factor 0.467399\nI1207 11:59:03.953944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64274 > 2) by scale factor 0.549037\nI1207 11:59:04.891829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65461 > 2) by scale factor 0.753407\nI1207 11:59:05.829638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38322 > 2) by scale factor 0.456286\nI1207 11:59:06.767421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58467 > 2) by scale factor 0.436236\nI1207 11:59:07.704610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46581 > 2) by scale factor 0.811091\nI1207 11:59:08.642834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99704 > 2) by scale factor 0.50037\nI1207 11:59:09.581382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47043 > 2) by scale factor 0.447384\nI1207 11:59:10.519954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.619 > 2) by scale factor 0.763651\nI1207 11:59:11.458060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87414 > 2) by scale factor 0.695861\nI1207 11:59:12.396934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99489 > 2) by scale factor 0.667804\nI1207 11:59:13.336028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89636 > 2) by scale factor 0.690522\nI1207 11:59:14.274121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8822 > 2) by scale factor 0.693915\nI1207 11:59:15.212337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2912 > 2) by scale factor 0.872906\nI1207 11:59:16.150122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15006 > 2) by scale factor 0.634908\nI1207 11:59:17.088435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21456 > 2) by scale factor 0.474546\nI1207 11:59:18.026872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81095 > 2) by scale factor 0.524804\nI1207 11:59:18.965342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49625 > 2) by scale factor 0.572042\nI1207 11:59:19.903723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49374 > 2) by scale factor 0.572453\nI1207 11:59:20.841820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15227 > 2) by scale factor 0.481664\nI1207 11:59:21.780138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62901 > 2) by scale factor 0.551115\nI1207 11:59:22.718353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42513 > 2) by scale factor 0.451964\nI1207 11:59:23.656905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07298 > 2) by scale factor 0.49104\nI1207 11:59:24.595639   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6734 > 2) by scale factor 0.748112\nI1207 11:59:25.533987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53186 > 2) by scale factor 0.789934\nI1207 11:59:26.471997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64669 > 2) by scale factor 0.75566\nI1207 11:59:27.409785   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19976 > 2) by scale factor 0.909191\nI1207 11:59:28.348265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99065 > 2) by scale factor 0.668752\nI1207 11:59:29.286541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28017 > 2) by scale factor 0.609725\nI1207 11:59:30.224576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.739 > 2) by scale factor 0.730194\nI1207 11:59:31.162775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.11884 > 2) by scale factor 0.390714\nI1207 11:59:32.100967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83843 > 2) by scale factor 0.704614\nI1207 11:59:33.038902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0492 > 2) by scale factor 0.493925\nI1207 11:59:34.913389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62138 > 2) by scale factor 0.762957\nI1207 11:59:35.851130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40365 > 2) by scale factor 0.832067\nI1207 11:59:36.789752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93674 > 2) by scale factor 0.681027\nI1207 11:59:37.727694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11284 > 2) by scale factor 0.642499\nI1207 11:59:38.666390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93463 > 2) by scale factor 0.681516\nI1207 11:59:39.604668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17982 > 2) by scale factor 0.917508\nI1207 11:59:40.542641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42271 > 2) by scale factor 0.825521\nI1207 11:59:42.417229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52787 > 2) by scale factor 0.441709\nI1207 11:59:43.356014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8801 > 2) by scale factor 0.694421\nI1207 11:59:44.294464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78872 > 2) by scale factor 0.527883\nI1207 11:59:45.232563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72146 > 2) by scale factor 0.734901\nI1207 11:59:45.242794   369 solver.cpp:337] Iteration 15600, Testing net (#0)\nI1207 12:00:37.570416   369 solver.cpp:404]     Test net output #0: accuracy = 0.17025\nI1207 12:00:37.571115   369 solver.cpp:404]     Test net output #1: loss = 13.898 (* 1 = 13.898 loss)\nI1207 12:00:38.443711   369 solver.cpp:228] Iteration 15600, loss = 12.6928\nI1207 12:00:38.443749   369 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1207 12:00:38.443774   369 solver.cpp:244]     Train net output #1: loss = 12.6928 (* 1 = 12.6928 loss)\nI1207 12:00:38.518472   369 sgd_solver.cpp:166] Iteration 15600, lr = 2.34\nI1207 12:00:38.527700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43601 > 2) by scale factor 0.821016\nI1207 12:00:39.464166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77872 > 2) by scale factor 0.52928\nI1207 12:00:40.400635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49084 > 2) by scale factor 0.445351\nI1207 12:00:41.337322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17302 > 2) by scale factor 0.479269\nI1207 12:00:42.273669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62936 > 2) by scale factor 0.760642\nI1207 12:00:43.210245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55947 > 2) by scale factor 0.561882\nI1207 12:00:44.146692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83419 > 2) by scale factor 0.705669\nI1207 12:00:45.082855   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28318 > 2) by scale factor 0.875973\nI1207 12:00:46.018970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96475 > 2) by scale factor 0.674593\nI1207 12:00:46.955293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33732 > 2) by scale factor 0.461115\nI1207 12:00:47.891295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73351 > 2) by scale factor 0.535689\nI1207 12:00:48.827636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95338 > 2) by scale factor 0.677191\nI1207 12:00:49.764539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96203 > 2) by scale factor 0.675213\nI1207 12:00:50.700570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12017 > 2) by scale factor 0.64099\nI1207 12:00:51.636865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68254 > 2) by scale factor 0.745563\nI1207 12:00:52.573351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61064 > 2) by scale factor 0.553919\nI1207 12:00:53.509537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39235 > 2) by scale factor 0.835998\nI1207 12:00:54.445664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81746 > 2) by scale factor 0.523908\nI1207 12:00:55.382275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61094 > 2) by scale factor 0.553872\nI1207 12:00:56.318333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45171 > 2) by scale factor 0.449266\nI1207 12:00:57.254590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38476 > 2) by scale factor 0.838659\nI1207 12:00:58.191298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97005 > 2) by scale factor 0.503772\nI1207 12:00:59.127362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0247 > 2) by scale factor 0.496932\nI1207 12:01:00.063773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06574 > 2) by scale factor 0.65237\nI1207 12:01:01.000017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8501 > 2) by scale factor 0.519468\nI1207 12:01:01.936326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03876 > 2) by scale factor 0.658164\nI1207 12:01:02.872866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0496 > 2) by scale factor 0.493876\nI1207 12:01:03.808964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82224 > 2) by scale factor 0.523253\nI1207 12:01:04.745564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38588 > 2) by scale factor 0.590688\nI1207 12:01:05.681988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49341 > 2) by scale factor 0.802114\nI1207 12:01:06.618407   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72451 > 2) by scale factor 0.734076\nI1207 12:01:07.554731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69516 > 2) by scale factor 0.541249\nI1207 12:01:08.490851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71486 > 2) by scale factor 0.736687\nI1207 12:01:09.429656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55834 > 2) by scale factor 0.781756\nI1207 12:01:10.367877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24185 > 2) by scale factor 0.471492\nI1207 12:01:11.305832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52481 > 2) by scale factor 0.442008\nI1207 12:01:12.243760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92027 > 2) by scale factor 0.510169\nI1207 12:01:13.181903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15944 > 2) by scale factor 0.480834\nI1207 12:01:14.120271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31606 > 2) by scale factor 0.603125\nI1207 12:01:15.057972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5263 > 2) by scale factor 0.791672\nI1207 12:01:15.996398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.96575 > 2) by scale factor 0.402759\nI1207 12:01:16.935127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04991 > 2) by scale factor 0.493839\nI1207 12:01:17.873697   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68082 > 2) by scale factor 0.543357\nI1207 12:01:18.812490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65199 > 2) by scale factor 0.547647\nI1207 12:01:19.751111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98801 > 2) by scale factor 0.669342\nI1207 12:01:20.689074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20595 > 2) by scale factor 0.623841\nI1207 12:01:21.627534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85289 > 2) by scale factor 0.519091\nI1207 12:01:22.565984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46398 > 2) by scale factor 0.44803\nI1207 12:01:23.504570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03039 > 2) by scale factor 0.49623\nI1207 12:01:24.443058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7799 > 2) by scale factor 0.418419\nI1207 12:01:25.381920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44886 > 2) by scale factor 0.816707\nI1207 12:01:26.320372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96057 > 2) by scale factor 0.504978\nI1207 12:01:27.258932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65973 > 2) by scale factor 0.546489\nI1207 12:01:29.133782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23019 > 2) by scale factor 0.619158\nI1207 12:01:30.072468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39581 > 2) by scale factor 0.454979\nI1207 12:01:31.010871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56139 > 2) by scale factor 0.438463\nI1207 12:01:31.949447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77081 > 2) by scale factor 0.721809\nI1207 12:01:32.888231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68108 > 2) by scale factor 0.745968\nI1207 12:01:33.826808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.423 > 2) by scale factor 0.584283\nI1207 12:01:34.765292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05741 > 2) by scale factor 0.654148\nI1207 12:01:35.703729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58205 > 2) by scale factor 0.436486\nI1207 12:01:36.641573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60616 > 2) by scale factor 0.554606\nI1207 12:01:37.579684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39976 > 2) by scale factor 0.588276\nI1207 12:01:38.517813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6937 > 2) by scale factor 0.426103\nI1207 12:01:39.456651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.871 > 2) by scale factor 0.516662\nI1207 12:01:40.395334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26041 > 2) by scale factor 0.613419\nI1207 12:01:41.333854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02981 > 2) by scale factor 0.397629\nI1207 12:01:42.271955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81769 > 2) by scale factor 0.415137\nI1207 12:01:43.209811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.67979 > 2) by scale factor 0.352126\nI1207 12:01:44.148061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53785 > 2) by scale factor 0.440738\nI1207 12:01:45.086607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05464 > 2) by scale factor 0.493263\nI1207 12:01:46.025224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61972 > 2) by scale factor 0.432926\nI1207 12:01:46.963434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95878 > 2) by scale factor 0.675954\nI1207 12:01:47.901592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7655 > 2) by scale factor 0.419684\nI1207 12:01:48.839812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.353 > 2) by scale factor 0.849979\nI1207 12:01:49.777393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94561 > 2) by scale factor 0.506893\nI1207 12:01:50.715683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46442 > 2) by scale factor 0.577298\nI1207 12:01:51.654314   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92435 > 2) by scale factor 0.509639\nI1207 12:01:52.593030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56499 > 2) by scale factor 0.561011\nI1207 12:01:53.531515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59202 > 2) by scale factor 0.771598\nI1207 12:01:54.469962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6042 > 2) by scale factor 0.554909\nI1207 12:01:55.408018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82676 > 2) by scale factor 0.414357\nI1207 12:01:56.346103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81618 > 2) by scale factor 0.415267\nI1207 12:01:57.284571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89815 > 2) by scale factor 0.513064\nI1207 12:01:58.222561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70883 > 2) by scale factor 0.539253\nI1207 12:01:59.160846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73843 > 2) by scale factor 0.534984\nI1207 12:02:00.099293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53779 > 2) by scale factor 0.565325\nI1207 12:02:01.037104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72013 > 2) by scale factor 0.735258\nI1207 12:02:01.975579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91156 > 2) by scale factor 0.686917\nI1207 12:02:02.913992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69613 > 2) by scale factor 0.425883\nI1207 12:02:03.852751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82103 > 2) by scale factor 0.708962\nI1207 12:02:04.791064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79624 > 2) by scale factor 0.526837\nI1207 12:02:05.729504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27588 > 2) by scale factor 0.610524\nI1207 12:02:06.667984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63722 > 2) by scale factor 0.758375\nI1207 12:02:07.606431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21423 > 2) by scale factor 0.622234\nI1207 12:02:08.544210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00693 > 2) by scale factor 0.665131\nI1207 12:02:09.483364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33507 > 2) by scale factor 0.599688\nI1207 12:02:10.421335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45431 > 2) by scale factor 0.814893\nI1207 12:02:11.368042   369 solver.cpp:337] Iteration 15700, Testing net (#0)\nI1207 12:03:03.709681   369 solver.cpp:404]     Test net output #0: accuracy = 0.19215\nI1207 12:03:03.710281   369 solver.cpp:404]     Test net output #1: loss = 11.5217 (* 1 = 11.5217 loss)\nI1207 12:03:04.582844   369 solver.cpp:228] Iteration 15700, loss = 13.2864\nI1207 12:03:04.582890   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 12:03:04.582914   369 solver.cpp:244]     Train net output #1: loss = 13.2864 (* 1 = 13.2864 loss)\nI1207 12:03:04.652722   369 sgd_solver.cpp:166] Iteration 15700, lr = 2.355\nI1207 12:03:04.661950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24662 > 2) by scale factor 0.890226\nI1207 12:03:05.597717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3633 > 2) by scale factor 0.846275\nI1207 12:03:06.533419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66214 > 2) by scale factor 0.751276\nI1207 12:03:07.470180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17489 > 2) by scale factor 0.919588\nI1207 12:03:08.406566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38135 > 2) by scale factor 0.45648\nI1207 12:03:09.342725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17729 > 2) by scale factor 0.478779\nI1207 12:03:10.279131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49616 > 2) by scale factor 0.572057\nI1207 12:03:11.215515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18568 > 2) by scale factor 0.62781\nI1207 12:03:12.151482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87752 > 2) by scale factor 0.515793\nI1207 12:03:13.087762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34313 > 2) by scale factor 0.598242\nI1207 12:03:14.024549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86079 > 2) by scale factor 0.518029\nI1207 12:03:14.961043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10646 > 2) by scale factor 0.643819\nI1207 12:03:15.897331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73645 > 2) by scale factor 0.535268\nI1207 12:03:16.833780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73707 > 2) by scale factor 0.535179\nI1207 12:03:17.770074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20226 > 2) by scale factor 0.475935\nI1207 12:03:18.706277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29531 > 2) by scale factor 0.606922\nI1207 12:03:19.642531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09288 > 2) by scale factor 0.646647\nI1207 12:03:20.579030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15784 > 2) by scale factor 0.633344\nI1207 12:03:21.515440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77918 > 2) by scale factor 0.719636\nI1207 12:03:22.451828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48673 > 2) by scale factor 0.804269\nI1207 12:03:23.388348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34112 > 2) by scale factor 0.598602\nI1207 12:03:24.324748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96618 > 2) by scale factor 0.504263\nI1207 12:03:25.260897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75434 > 2) by scale factor 0.726126\nI1207 12:03:26.197728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93247 > 2) by scale factor 0.508586\nI1207 12:03:27.133994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54869 > 2) by scale factor 0.563589\nI1207 12:03:28.070433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14007 > 2) by scale factor 0.483084\nI1207 12:03:29.007046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27207 > 2) by scale factor 0.611234\nI1207 12:03:29.943449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39312 > 2) by scale factor 0.83573\nI1207 12:03:30.879777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75914 > 2) by scale factor 0.724863\nI1207 12:03:31.816145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05692 > 2) by scale factor 0.492985\nI1207 12:03:32.753036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17757 > 2) by scale factor 0.629411\nI1207 12:03:33.689285   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97048 > 2) by scale factor 0.673291\nI1207 12:03:34.627624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03434 > 2) by scale factor 0.659121\nI1207 12:03:35.566993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72402 > 2) by scale factor 0.423369\nI1207 12:03:36.505690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35571 > 2) by scale factor 0.849001\nI1207 12:03:37.443941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41549 > 2) by scale factor 0.585568\nI1207 12:03:38.382632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92763 > 2) by scale factor 0.683147\nI1207 12:03:39.321372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25208 > 2) by scale factor 0.614991\nI1207 12:03:40.260136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28188 > 2) by scale factor 0.467085\nI1207 12:03:41.198859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90386 > 2) by scale factor 0.512314\nI1207 12:03:42.137848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09442 > 2) by scale factor 0.646325\nI1207 12:03:43.076723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11579 > 2) by scale factor 0.641891\nI1207 12:03:44.015594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28906 > 2) by scale factor 0.608077\nI1207 12:03:44.954370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18719 > 2) by scale factor 0.627513\nI1207 12:03:45.893229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7595 > 2) by scale factor 0.72477\nI1207 12:03:46.831794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49235 > 2) by scale factor 0.57268\nI1207 12:03:47.770783   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53067 > 2) by scale factor 0.441436\nI1207 12:03:48.709158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96781 > 2) by scale factor 0.504057\nI1207 12:03:49.647192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35668 > 2) by scale factor 0.848651\nI1207 12:03:50.586064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87747 > 2) by scale factor 0.695054\nI1207 12:03:51.524895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49584 > 2) by scale factor 0.801335\nI1207 12:03:52.463151   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43167 > 2) by scale factor 0.582806\nI1207 12:03:53.401572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05317 > 2) by scale factor 0.493441\nI1207 12:03:54.339905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04612 > 2) by scale factor 0.396344\nI1207 12:03:55.278419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63544 > 2) by scale factor 0.431458\nI1207 12:03:56.216915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93303 > 2) by scale factor 0.681889\nI1207 12:03:57.154692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9888 > 2) by scale factor 0.669165\nI1207 12:03:58.092236   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66598 > 2) by scale factor 0.545556\nI1207 12:03:59.029908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76728 > 2) by scale factor 0.419526\nI1207 12:03:59.967028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62025 > 2) by scale factor 0.552448\nI1207 12:04:00.904902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18774 > 2) by scale factor 0.627404\nI1207 12:04:01.843053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17516 > 2) by scale factor 0.629889\nI1207 12:04:02.781082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41394 > 2) by scale factor 0.585834\nI1207 12:04:03.719092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73302 > 2) by scale factor 0.731791\nI1207 12:04:04.656831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37825 > 2) by scale factor 0.456803\nI1207 12:04:05.595229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08832 > 2) by scale factor 0.647601\nI1207 12:04:06.532858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42266 > 2) by scale factor 0.584341\nI1207 12:04:07.470561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65981 > 2) by scale factor 0.751934\nI1207 12:04:08.408627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07435 > 2) by scale factor 0.964157\nI1207 12:04:09.346282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76395 > 2) by scale factor 0.531357\nI1207 12:04:10.284291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13933 > 2) by scale factor 0.483169\nI1207 12:04:11.222143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37347 > 2) by scale factor 0.592862\nI1207 12:04:12.159878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02437 > 2) by scale factor 0.661294\nI1207 12:04:13.097793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95235 > 2) by scale factor 0.677426\nI1207 12:04:14.035838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63296 > 2) by scale factor 0.550515\nI1207 12:04:14.973695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85595 > 2) by scale factor 0.700294\nI1207 12:04:15.911527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52877 > 2) by scale factor 0.790898\nI1207 12:04:16.848808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89202 > 2) by scale factor 0.513871\nI1207 12:04:17.786717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28602 > 2) by scale factor 0.466633\nI1207 12:04:18.724541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91381 > 2) by scale factor 0.511011\nI1207 12:04:19.662374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19986 > 2) by scale factor 0.476207\nI1207 12:04:20.600709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16494 > 2) by scale factor 0.631923\nI1207 12:04:21.539039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43735 > 2) by scale factor 0.820565\nI1207 12:04:22.477794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7069 > 2) by scale factor 0.539534\nI1207 12:04:23.416198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6121 > 2) by scale factor 0.553694\nI1207 12:04:24.354534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47522 > 2) by scale factor 0.575503\nI1207 12:04:25.293083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4905 > 2) by scale factor 0.445384\nI1207 12:04:26.231070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55874 > 2) by scale factor 0.561997\nI1207 12:04:27.169203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83498 > 2) by scale factor 0.705472\nI1207 12:04:28.107342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59696 > 2) by scale factor 0.770132\nI1207 12:04:29.045732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14296 > 2) by scale factor 0.636342\nI1207 12:04:29.984267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07574 > 2) by scale factor 0.490708\nI1207 12:04:30.922943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8439 > 2) by scale factor 0.520305\nI1207 12:04:31.861050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83119 > 2) by scale factor 0.706416\nI1207 12:04:32.799731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25869 > 2) by scale factor 0.469628\nI1207 12:04:33.738106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63288 > 2) by scale factor 0.550528\nI1207 12:04:34.676870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10965 > 2) by scale factor 0.643158\nI1207 12:04:35.616210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48499 > 2) by scale factor 0.445932\nI1207 12:04:36.554564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71616 > 2) by scale factor 0.53819\nI1207 12:04:37.493224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5436 > 2) by scale factor 0.440179\nI1207 12:04:37.503535   369 solver.cpp:337] Iteration 15800, Testing net (#0)\nI1207 12:05:29.848592   369 solver.cpp:404]     Test net output #0: accuracy = 0.10125\nI1207 12:05:29.849200   369 solver.cpp:404]     Test net output #1: loss = 31.3363 (* 1 = 31.3363 loss)\nI1207 12:05:30.721112   369 solver.cpp:228] Iteration 15800, loss = 32.2854\nI1207 12:05:30.721156   369 solver.cpp:244]     Train net output #0: accuracy = 0.06\nI1207 12:05:30.721181   369 solver.cpp:244]     Train net output #1: loss = 32.2854 (* 1 = 32.2854 loss)\nI1207 12:05:30.795653   369 sgd_solver.cpp:166] Iteration 15800, lr = 2.37\nI1207 12:05:30.804832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56273 > 2) by scale factor 0.438334\nI1207 12:05:31.741322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36047 > 2) by scale factor 0.595154\nI1207 12:05:32.677963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18359 > 2) by scale factor 0.478059\nI1207 12:05:33.613759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06236 > 2) by scale factor 0.65309\nI1207 12:05:34.549700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83154 > 2) by scale factor 0.521984\nI1207 12:05:35.485502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86107 > 2) by scale factor 0.517991\nI1207 12:05:36.420996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94244 > 2) by scale factor 0.404658\nI1207 12:05:37.356765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3911 > 2) by scale factor 0.455467\nI1207 12:05:38.292541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59838 > 2) by scale factor 0.555806\nI1207 12:05:39.228150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47587 > 2) by scale factor 0.807797\nI1207 12:05:40.163969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63918 > 2) by scale factor 0.757811\nI1207 12:05:41.099433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24787 > 2) by scale factor 0.470824\nI1207 12:05:42.034935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26289 > 2) by scale factor 0.469165\nI1207 12:05:42.970556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40048 > 2) by scale factor 0.454495\nI1207 12:05:43.906365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40421 > 2) by scale factor 0.587508\nI1207 12:05:44.841814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98476 > 2) by scale factor 0.501913\nI1207 12:05:45.777323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88262 > 2) by scale factor 0.515116\nI1207 12:05:46.712934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01484 > 2) by scale factor 0.398816\nI1207 12:05:47.648761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40335 > 2) by scale factor 0.454199\nI1207 12:05:48.584321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08441 > 2) by scale factor 0.393359\nI1207 12:05:49.520119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.14903 > 2) by scale factor 0.388423\nI1207 12:05:50.455940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28286 > 2) by scale factor 0.609225\nI1207 12:05:51.392066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99904 > 2) by scale factor 0.400077\nI1207 12:05:52.327714   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98384 > 2) by scale factor 0.401297\nI1207 12:05:53.263334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32468 > 2) by scale factor 0.601562\nI1207 12:05:54.199218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14034 > 2) by scale factor 0.636873\nI1207 12:05:55.135471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05408 > 2) by scale factor 0.654863\nI1207 12:05:56.072320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20884 > 2) by scale factor 0.475191\nI1207 12:05:57.008738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98767 > 2) by scale factor 0.400989\nI1207 12:05:57.944777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49129 > 2) by scale factor 0.572854\nI1207 12:05:58.881541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91805 > 2) by scale factor 0.68539\nI1207 12:05:59.817941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56733 > 2) by scale factor 0.560643\nI1207 12:06:00.754320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14608 > 2) by scale factor 0.635712\nI1207 12:06:01.691340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37401 > 2) by scale factor 0.457246\nI1207 12:06:02.630403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66532 > 2) by scale factor 0.750378\nI1207 12:06:03.569156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03675 > 2) by scale factor 0.658599\nI1207 12:06:04.508152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60958 > 2) by scale factor 0.766408\nI1207 12:06:05.447163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83024 > 2) by scale factor 0.706655\nI1207 12:06:06.385869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11771 > 2) by scale factor 0.641497\nI1207 12:06:07.323734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9722 > 2) by scale factor 0.5035\nI1207 12:06:08.262095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18893 > 2) by scale factor 0.627169\nI1207 12:06:09.200531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17475 > 2) by scale factor 0.62997\nI1207 12:06:10.138851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69521 > 2) by scale factor 0.541241\nI1207 12:06:11.076717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1563 > 2) by scale factor 0.481198\nI1207 12:06:12.015403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28678 > 2) by scale factor 0.608499\nI1207 12:06:12.953217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52975 > 2) by scale factor 0.790591\nI1207 12:06:13.891448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82224 > 2) by scale factor 0.708656\nI1207 12:06:14.829891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57277 > 2) by scale factor 0.777373\nI1207 12:06:15.767773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35244 > 2) by scale factor 0.459512\nI1207 12:06:16.705739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3451 > 2) by scale factor 0.597889\nI1207 12:06:17.643880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32234 > 2) by scale factor 0.601986\nI1207 12:06:18.581903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.86035 > 2) by scale factor 0.411493\nI1207 12:06:19.519507   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4003 > 2) by scale factor 0.588184\nI1207 12:06:20.457826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76468 > 2) by scale factor 0.72341\nI1207 12:06:21.396183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24668 > 2) by scale factor 0.616013\nI1207 12:06:22.334538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96862 > 2) by scale factor 0.673713\nI1207 12:06:23.272440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07923 > 2) by scale factor 0.649513\nI1207 12:06:24.210939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54877 > 2) by scale factor 0.784693\nI1207 12:06:25.149111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80293 > 2) by scale factor 0.71354\nI1207 12:06:26.087110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93555 > 2) by scale factor 0.508188\nI1207 12:06:27.025573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48871 > 2) by scale factor 0.445562\nI1207 12:06:27.963731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62872 > 2) by scale factor 0.551159\nI1207 12:06:28.902042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87363 > 2) by scale factor 0.516312\nI1207 12:06:29.839928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16914 > 2) by scale factor 0.631085\nI1207 12:06:30.778501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05921 > 2) by scale factor 0.653764\nI1207 12:06:31.717231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74024 > 2) by scale factor 0.729862\nI1207 12:06:32.655149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57564 > 2) by scale factor 0.55934\nI1207 12:06:33.593411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42522 > 2) by scale factor 0.583904\nI1207 12:06:34.531790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00174 > 2) by scale factor 0.666281\nI1207 12:06:35.469709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34699 > 2) by scale factor 0.597552\nI1207 12:06:36.408159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39278 > 2) by scale factor 0.589487\nI1207 12:06:37.346324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22827 > 2) by scale factor 0.473007\nI1207 12:06:38.284400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70718 > 2) by scale factor 0.738776\nI1207 12:06:39.222764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2192 > 2) by scale factor 0.621272\nI1207 12:06:40.161098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87393 > 2) by scale factor 0.516272\nI1207 12:06:41.099036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88142 > 2) by scale factor 0.515275\nI1207 12:06:42.037096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68848 > 2) by scale factor 0.542229\nI1207 12:06:42.975169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5611 > 2) by scale factor 0.561624\nI1207 12:06:43.913147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57225 > 2) by scale factor 0.559871\nI1207 12:06:44.851126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60798 > 2) by scale factor 0.43403\nI1207 12:06:45.789125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94732 > 2) by scale factor 0.404259\nI1207 12:06:46.726938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57564 > 2) by scale factor 0.559341\nI1207 12:06:47.664793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17212 > 2) by scale factor 0.479372\nI1207 12:06:48.602979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07098 > 2) by scale factor 0.651258\nI1207 12:06:49.541132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3097 > 2) by scale factor 0.604284\nI1207 12:06:50.479439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08195 > 2) by scale factor 0.489962\nI1207 12:06:51.417305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54653 > 2) by scale factor 0.785381\nI1207 12:06:52.355602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33916 > 2) by scale factor 0.598952\nI1207 12:06:53.293566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35001 > 2) by scale factor 0.459769\nI1207 12:06:54.231761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63372 > 2) by scale factor 0.431619\nI1207 12:06:55.169872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02838 > 2) by scale factor 0.660419\nI1207 12:06:56.108275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6711 > 2) by scale factor 0.428164\nI1207 12:06:57.045990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99139 > 2) by scale factor 0.501079\nI1207 12:06:57.984313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42832 > 2) by scale factor 0.583376\nI1207 12:06:58.922315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06438 > 2) by scale factor 0.65266\nI1207 12:06:59.861016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00613 > 2) by scale factor 0.665307\nI1207 12:07:00.799192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62792 > 2) by scale factor 0.55128\nI1207 12:07:01.737933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81979 > 2) by scale factor 0.709272\nI1207 12:07:03.611856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86464 > 2) by scale factor 0.517513\nI1207 12:07:03.622006   369 solver.cpp:337] Iteration 15900, Testing net (#0)\nI1207 12:07:55.963243   369 solver.cpp:404]     Test net output #0: accuracy = 0.1649\nI1207 12:07:55.963841   369 solver.cpp:404]     Test net output #1: loss = 21.7161 (* 1 = 21.7161 loss)\nI1207 12:07:56.835155   369 solver.cpp:228] Iteration 15900, loss = 19.3479\nI1207 12:07:56.835198   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 12:07:56.835222   369 solver.cpp:244]     Train net output #1: loss = 19.3479 (* 1 = 19.3479 loss)\nI1207 12:07:56.907810   369 sgd_solver.cpp:166] Iteration 15900, lr = 2.385\nI1207 12:07:56.917011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32298 > 2) by scale factor 0.60187\nI1207 12:07:57.852900   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24811 > 2) by scale factor 0.615744\nI1207 12:07:58.788813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25642 > 2) by scale factor 0.614172\nI1207 12:07:59.724505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07645 > 2) by scale factor 0.6501\nI1207 12:08:00.660404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03691 > 2) by scale factor 0.658565\nI1207 12:08:01.595966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90233 > 2) by scale factor 0.512515\nI1207 12:08:02.531370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33746 > 2) by scale factor 0.855628\nI1207 12:08:03.467500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14449 > 2) by scale factor 0.636033\nI1207 12:08:04.403327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11955 > 2) by scale factor 0.641119\nI1207 12:08:05.338661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73401 > 2) by scale factor 0.731525\nI1207 12:08:06.274345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7771 > 2) by scale factor 0.720175\nI1207 12:08:07.210306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18891 > 2) by scale factor 0.627173\nI1207 12:08:08.145500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73799 > 2) by scale factor 0.730464\nI1207 12:08:09.081210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66941 > 2) by scale factor 0.749229\nI1207 12:08:10.016695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25353 > 2) by scale factor 0.614717\nI1207 12:08:10.952500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37327 > 2) by scale factor 0.592897\nI1207 12:08:11.888180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5542 > 2) by scale factor 0.783025\nI1207 12:08:12.823603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63737 > 2) by scale factor 0.758331\nI1207 12:08:13.758949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02921 > 2) by scale factor 0.496375\nI1207 12:08:14.695049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05186 > 2) by scale factor 0.493601\nI1207 12:08:15.630964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75257 > 2) by scale factor 0.532968\nI1207 12:08:16.566548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71425 > 2) by scale factor 0.736852\nI1207 12:08:18.435683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67633 > 2) by scale factor 0.747293\nI1207 12:08:19.371234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13603 > 2) by scale factor 0.637749\nI1207 12:08:20.307385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61847 > 2) by scale factor 0.433044\nI1207 12:08:21.243033   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2739 > 2) by scale factor 0.467957\nI1207 12:08:22.178812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42277 > 2) by scale factor 0.584322\nI1207 12:08:23.114536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24187 > 2) by scale factor 0.47149\nI1207 12:08:24.050076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32124 > 2) by scale factor 0.602185\nI1207 12:08:24.986080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92443 > 2) by scale factor 0.509629\nI1207 12:08:25.921950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1954 > 2) by scale factor 0.910994\nI1207 12:08:26.857880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25085 > 2) by scale factor 0.615223\nI1207 12:08:27.794098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0089 > 2) by scale factor 0.664694\nI1207 12:08:28.731050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98097 > 2) by scale factor 0.50239\nI1207 12:08:29.669406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48381 > 2) by scale factor 0.805214\nI1207 12:08:30.608101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41238 > 2) by scale factor 0.829056\nI1207 12:08:31.546728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77003 > 2) by scale factor 0.722014\nI1207 12:08:32.486268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71913 > 2) by scale factor 0.73553\nI1207 12:08:33.425047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77427 > 2) by scale factor 0.72091\nI1207 12:08:34.364075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39089 > 2) by scale factor 0.455489\nI1207 12:08:35.303222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67267 > 2) by scale factor 0.428021\nI1207 12:08:36.241727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70804 > 2) by scale factor 0.539369\nI1207 12:08:37.180382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94673 > 2) by scale factor 0.506749\nI1207 12:08:38.119287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62037 > 2) by scale factor 0.76325\nI1207 12:08:39.058172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56642 > 2) by scale factor 0.560787\nI1207 12:08:39.996798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4225 > 2) by scale factor 0.452233\nI1207 12:08:40.935556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66881 > 2) by scale factor 0.545136\nI1207 12:08:41.874333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62353 > 2) by scale factor 0.762332\nI1207 12:08:42.813359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13124 > 2) by scale factor 0.484116\nI1207 12:08:43.751227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73118 > 2) by scale factor 0.732283\nI1207 12:08:44.689635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31788 > 2) by scale factor 0.602794\nI1207 12:08:45.627198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69765 > 2) by scale factor 0.540884\nI1207 12:08:46.565879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97953 > 2) by scale factor 0.671246\nI1207 12:08:47.504302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23633 > 2) by scale factor 0.894322\nI1207 12:08:48.443198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19507 > 2) by scale factor 0.47675\nI1207 12:08:49.381954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10558 > 2) by scale factor 0.949858\nI1207 12:08:50.320483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09501 > 2) by scale factor 0.646201\nI1207 12:08:51.259560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54172 > 2) by scale factor 0.564697\nI1207 12:08:52.198179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36702 > 2) by scale factor 0.593996\nI1207 12:08:53.136461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75139 > 2) by scale factor 0.726904\nI1207 12:08:54.075026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91294 > 2) by scale factor 0.511125\nI1207 12:08:55.014281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87888 > 2) by scale factor 0.515613\nI1207 12:08:55.953227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65997 > 2) by scale factor 0.546453\nI1207 12:08:56.891710   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0582 > 2) by scale factor 0.492829\nI1207 12:08:57.831053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04495 > 2) by scale factor 0.656824\nI1207 12:08:58.769790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88082 > 2) by scale factor 0.515355\nI1207 12:08:59.708365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50509 > 2) by scale factor 0.5706\nI1207 12:09:00.647325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93821 > 2) by scale factor 0.507845\nI1207 12:09:01.586087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28628 > 2) by scale factor 0.608591\nI1207 12:09:02.525465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80254 > 2) by scale factor 0.713639\nI1207 12:09:03.463621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24223 > 2) by scale factor 0.616859\nI1207 12:09:04.401769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50057 > 2) by scale factor 0.571335\nI1207 12:09:05.339674   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94475 > 2) by scale factor 0.507003\nI1207 12:09:06.277611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38542 > 2) by scale factor 0.590769\nI1207 12:09:07.216136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13014 > 2) by scale factor 0.638949\nI1207 12:09:08.155076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57863 > 2) by scale factor 0.775607\nI1207 12:09:09.093842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83677 > 2) by scale factor 0.705028\nI1207 12:09:10.032897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72533 > 2) by scale factor 0.536866\nI1207 12:09:10.971858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47984 > 2) by scale factor 0.574739\nI1207 12:09:11.910491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25166 > 2) by scale factor 0.888232\nI1207 12:09:12.849057   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2712 > 2) by scale factor 0.880591\nI1207 12:09:13.788038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19759 > 2) by scale factor 0.625471\nI1207 12:09:14.726966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39358 > 2) by scale factor 0.455209\nI1207 12:09:15.665818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76207 > 2) by scale factor 0.531623\nI1207 12:09:16.604387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04661 > 2) by scale factor 0.656468\nI1207 12:09:17.543233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07554 > 2) by scale factor 0.650292\nI1207 12:09:18.482659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55363 > 2) by scale factor 0.7832\nI1207 12:09:19.421731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71935 > 2) by scale factor 0.73547\nI1207 12:09:20.360406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8505 > 2) by scale factor 0.701631\nI1207 12:09:21.299082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77664 > 2) by scale factor 0.529571\nI1207 12:09:22.237921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06506 > 2) by scale factor 0.491998\nI1207 12:09:23.176795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86837 > 2) by scale factor 0.697261\nI1207 12:09:24.115948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15829 > 2) by scale factor 0.480967\nI1207 12:09:25.054821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80366 > 2) by scale factor 0.713354\nI1207 12:09:25.993222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3556 > 2) by scale factor 0.596018\nI1207 12:09:26.932245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53086 > 2) by scale factor 0.566434\nI1207 12:09:27.871755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91241 > 2) by scale factor 0.686716\nI1207 12:09:28.810617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62388 > 2) by scale factor 0.551895\nI1207 12:09:29.749500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62045 > 2) by scale factor 0.432858\nI1207 12:09:29.759656   369 solver.cpp:337] Iteration 16000, Testing net (#0)\nI1207 12:10:22.098482   369 solver.cpp:404]     Test net output #0: accuracy = 0.203\nI1207 12:10:22.099120   369 solver.cpp:404]     Test net output #1: loss = 8.78481 (* 1 = 8.78481 loss)\nI1207 12:10:22.970952   369 solver.cpp:228] Iteration 16000, loss = 10.1833\nI1207 12:10:22.970985   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 12:10:22.971002   369 solver.cpp:244]     Train net output #1: loss = 10.1833 (* 1 = 10.1833 loss)\nI1207 12:10:23.052970   369 sgd_solver.cpp:166] Iteration 16000, lr = 2.4\nI1207 12:10:23.995858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73987 > 2) by scale factor 0.729963\nI1207 12:10:24.931810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91018 > 2) by scale factor 0.511485\nI1207 12:10:25.867729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37865 > 2) by scale factor 0.840812\nI1207 12:10:26.803712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61261 > 2) by scale factor 0.765518\nI1207 12:10:27.739465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73037 > 2) by scale factor 0.732502\nI1207 12:10:28.674456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5263 > 2) by scale factor 0.567166\nI1207 12:10:29.610132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82919 > 2) by scale factor 0.706917\nI1207 12:10:30.545215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56575 > 2) by scale factor 0.7795\nI1207 12:10:31.480773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43594 > 2) by scale factor 0.821038\nI1207 12:10:32.416478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81421 > 2) by scale factor 0.524355\nI1207 12:10:33.351795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57544 > 2) by scale factor 0.559372\nI1207 12:10:34.287143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06825 > 2) by scale factor 0.491612\nI1207 12:10:35.222565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97639 > 2) by scale factor 0.671956\nI1207 12:10:36.157747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38137 > 2) by scale factor 0.456478\nI1207 12:10:37.093430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11921 > 2) by scale factor 0.641188\nI1207 12:10:38.028784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66545 > 2) by scale factor 0.750341\nI1207 12:10:38.963388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19766 > 2) by scale factor 0.476456\nI1207 12:10:39.898659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87293 > 2) by scale factor 0.410431\nI1207 12:10:40.834401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01028 > 2) by scale factor 0.498718\nI1207 12:10:41.769826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34181 > 2) by scale factor 0.854041\nI1207 12:10:42.705127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4168 > 2) by scale factor 0.452816\nI1207 12:10:43.640729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23888 > 2) by scale factor 0.471823\nI1207 12:10:44.575876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42939 > 2) by scale factor 0.823253\nI1207 12:10:45.511559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50809 > 2) by scale factor 0.797419\nI1207 12:10:46.446918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57728 > 2) by scale factor 0.559084\nI1207 12:10:47.382863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65389 > 2) by scale factor 0.547362\nI1207 12:10:48.318225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53861 > 2) by scale factor 0.787834\nI1207 12:10:49.253571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22533 > 2) by scale factor 0.898743\nI1207 12:10:50.189599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63005 > 2) by scale factor 0.550956\nI1207 12:10:51.125145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67798 > 2) by scale factor 0.543777\nI1207 12:10:52.060806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22693 > 2) by scale factor 0.619785\nI1207 12:10:52.996527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9916 > 2) by scale factor 0.668539\nI1207 12:10:53.932953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92779 > 2) by scale factor 0.68311\nI1207 12:10:54.868585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32343 > 2) by scale factor 0.601788\nI1207 12:10:55.807559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78401 > 2) by scale factor 0.528539\nI1207 12:10:56.746480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85008 > 2) by scale factor 0.701735\nI1207 12:10:57.685286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31811 > 2) by scale factor 0.602753\nI1207 12:10:58.624402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54727 > 2) by scale factor 0.563814\nI1207 12:10:59.563311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09898 > 2) by scale factor 0.645374\nI1207 12:11:00.502578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71088 > 2) by scale factor 0.737768\nI1207 12:11:01.441505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15759 > 2) by scale factor 0.633395\nI1207 12:11:02.380676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39018 > 2) by scale factor 0.455563\nI1207 12:11:03.319484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29888 > 2) by scale factor 0.606266\nI1207 12:11:04.258044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5098 > 2) by scale factor 0.569833\nI1207 12:11:05.196876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31751 > 2) by scale factor 0.46323\nI1207 12:11:06.135833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23776 > 2) by scale factor 0.471948\nI1207 12:11:07.074803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88965 > 2) by scale factor 0.692125\nI1207 12:11:08.013890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74676 > 2) by scale factor 0.728132\nI1207 12:11:08.952492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60681 > 2) by scale factor 0.767221\nI1207 12:11:09.891541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51155 > 2) by scale factor 0.569549\nI1207 12:11:10.830610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08982 > 2) by scale factor 0.647287\nI1207 12:11:11.769562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89258 > 2) by scale factor 0.513798\nI1207 12:11:12.708220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72642 > 2) by scale factor 0.536709\nI1207 12:11:13.647367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00208 > 2) by scale factor 0.49974\nI1207 12:11:14.585891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0179 > 2) by scale factor 0.497773\nI1207 12:11:15.524703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92509 > 2) by scale factor 0.683739\nI1207 12:11:16.463619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78422 > 2) by scale factor 0.528511\nI1207 12:11:17.402539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97949 > 2) by scale factor 0.671257\nI1207 12:11:18.341459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82733 > 2) by scale factor 0.522557\nI1207 12:11:19.280342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98075 > 2) by scale factor 0.670972\nI1207 12:11:20.218987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08736 > 2) by scale factor 0.489314\nI1207 12:11:21.158051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09743 > 2) by scale factor 0.488111\nI1207 12:11:22.096846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40551 > 2) by scale factor 0.831426\nI1207 12:11:23.036082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28948 > 2) by scale factor 0.607998\nI1207 12:11:23.975586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49389 > 2) by scale factor 0.572429\nI1207 12:11:25.851548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86142 > 2) by scale factor 0.698953\nI1207 12:11:27.727311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06211 > 2) by scale factor 0.492355\nI1207 12:11:28.666209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0992 > 2) by scale factor 0.4879\nI1207 12:11:29.605001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13525 > 2) by scale factor 0.637907\nI1207 12:11:30.543817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82 > 2) by scale factor 0.52356\nI1207 12:11:31.483077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04097 > 2) by scale factor 0.494931\nI1207 12:11:32.422077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28339 > 2) by scale factor 0.609127\nI1207 12:11:33.360826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75089 > 2) by scale factor 0.727039\nI1207 12:11:34.299902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06762 > 2) by scale factor 0.651971\nI1207 12:11:35.238970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68514 > 2) by scale factor 0.542721\nI1207 12:11:36.177454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02264 > 2) by scale factor 0.497186\nI1207 12:11:37.116622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03803 > 2) by scale factor 0.981337\nI1207 12:11:38.055819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67109 > 2) by scale factor 0.544797\nI1207 12:11:38.994557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75517 > 2) by scale factor 0.725907\nI1207 12:11:39.933364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50468 > 2) by scale factor 0.798504\nI1207 12:11:40.872205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46129 > 2) by scale factor 0.57782\nI1207 12:11:41.810905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73749 > 2) by scale factor 0.422164\nI1207 12:11:42.749665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12603 > 2) by scale factor 0.484728\nI1207 12:11:43.687961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85326 > 2) by scale factor 0.519041\nI1207 12:11:44.626601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55441 > 2) by scale factor 0.562681\nI1207 12:11:45.565528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15076 > 2) by scale factor 0.634766\nI1207 12:11:46.504206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33378 > 2) by scale factor 0.461491\nI1207 12:11:47.443375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.96285 > 2) by scale factor 0.402994\nI1207 12:11:48.381849   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05724 > 2) by scale factor 0.492946\nI1207 12:11:49.320680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.43139 > 2) by scale factor 0.36823\nI1207 12:11:50.259536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.15935 > 2) by scale factor 0.387646\nI1207 12:11:51.198125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89314 > 2) by scale factor 0.513724\nI1207 12:11:52.136890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.484 > 2) by scale factor 0.44603\nI1207 12:11:53.075053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57323 > 2) by scale factor 0.777233\nI1207 12:11:54.014600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37729 > 2) by scale factor 0.456903\nI1207 12:11:54.953191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7598 > 2) by scale factor 0.531943\nI1207 12:11:55.892287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38005 > 2) by scale factor 0.840317\nI1207 12:11:55.902402   369 solver.cpp:337] Iteration 16100, Testing net (#0)\nI1207 12:12:48.234449   369 solver.cpp:404]     Test net output #0: accuracy = 0.18045\nI1207 12:12:48.235067   369 solver.cpp:404]     Test net output #1: loss = 15.7984 (* 1 = 15.7984 loss)\nI1207 12:12:49.106217   369 solver.cpp:228] Iteration 16100, loss = 15.0826\nI1207 12:12:49.106250   369 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1207 12:12:49.106266   369 solver.cpp:244]     Train net output #1: loss = 15.0826 (* 1 = 15.0826 loss)\nI1207 12:12:49.180224   369 sgd_solver.cpp:166] Iteration 16100, lr = 2.415\nI1207 12:12:49.189492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88496 > 2) by scale factor 0.514805\nI1207 12:12:50.124677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15358 > 2) by scale factor 0.6342\nI1207 12:12:51.060165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16397 > 2) by scale factor 0.632117\nI1207 12:12:51.996170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56215 > 2) by scale factor 0.561459\nI1207 12:12:52.931797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43196 > 2) by scale factor 0.582757\nI1207 12:12:53.867117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55203 > 2) by scale factor 0.563058\nI1207 12:12:54.802558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29349 > 2) by scale factor 0.465822\nI1207 12:12:55.737833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79528 > 2) by scale factor 0.526971\nI1207 12:12:56.673274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87672 > 2) by scale factor 0.515899\nI1207 12:12:57.608955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00854 > 2) by scale factor 0.498935\nI1207 12:12:58.544277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04874 > 2) by scale factor 0.396139\nI1207 12:12:59.479791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52692 > 2) by scale factor 0.441801\nI1207 12:13:00.415402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94112 > 2) by scale factor 0.404767\nI1207 12:13:01.350878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06756 > 2) by scale factor 0.651985\nI1207 12:13:02.286224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5329 > 2) by scale factor 0.78961\nI1207 12:13:03.221874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03309 > 2) by scale factor 0.659394\nI1207 12:13:04.157160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48028 > 2) by scale factor 0.4464\nI1207 12:13:05.093283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72468 > 2) by scale factor 0.536958\nI1207 12:13:06.028741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49278 > 2) by scale factor 0.57261\nI1207 12:13:06.964272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47597 > 2) by scale factor 0.575379\nI1207 12:13:07.900077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24031 > 2) by scale factor 0.617225\nI1207 12:13:08.835655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6727 > 2) by scale factor 0.428018\nI1207 12:13:09.771301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63732 > 2) by scale factor 0.431283\nI1207 12:13:10.706683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86052 > 2) by scale factor 0.699174\nI1207 12:13:11.642241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49075 > 2) by scale factor 0.802972\nI1207 12:13:12.577504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36427 > 2) by scale factor 0.845928\nI1207 12:13:13.513244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63961 > 2) by scale factor 0.54951\nI1207 12:13:14.448707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32136 > 2) by scale factor 0.602163\nI1207 12:13:15.384449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07689 > 2) by scale factor 0.962979\nI1207 12:13:16.319746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43046 > 2) by scale factor 0.82289\nI1207 12:13:17.261418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73301 > 2) by scale factor 0.731793\nI1207 12:13:18.197700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69974 > 2) by scale factor 0.740813\nI1207 12:13:20.068049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48719 > 2) by scale factor 0.80412\nI1207 12:13:21.005066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37316 > 2) by scale factor 0.592917\nI1207 12:13:21.941148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65663 > 2) by scale factor 0.752832\nI1207 12:13:22.879103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00377 > 2) by scale factor 0.66583\nI1207 12:13:23.817132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61939 > 2) by scale factor 0.552579\nI1207 12:13:24.755306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75661 > 2) by scale factor 0.725529\nI1207 12:13:25.693176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62779 > 2) by scale factor 0.5513\nI1207 12:13:26.631245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10014 > 2) by scale factor 0.487789\nI1207 12:13:27.569313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7872 > 2) by scale factor 0.41778\nI1207 12:13:28.507222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70334 > 2) by scale factor 0.425229\nI1207 12:13:29.444876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16136 > 2) by scale factor 0.925343\nI1207 12:13:30.382757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67301 > 2) by scale factor 0.544513\nI1207 12:13:31.320924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58624 > 2) by scale factor 0.557687\nI1207 12:13:32.258548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98958 > 2) by scale factor 0.66899\nI1207 12:13:33.196285   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89167 > 2) by scale factor 0.691641\nI1207 12:13:34.134330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68555 > 2) by scale factor 0.744727\nI1207 12:13:35.072167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48629 > 2) by scale factor 0.445802\nI1207 12:13:36.009891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49115 > 2) by scale factor 0.572878\nI1207 12:13:36.948047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20849 > 2) by scale factor 0.623346\nI1207 12:13:37.886020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55954 > 2) by scale factor 0.56187\nI1207 12:13:38.824116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.286 > 2) by scale factor 0.466636\nI1207 12:13:39.762564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49564 > 2) by scale factor 0.572142\nI1207 12:13:40.700546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57005 > 2) by scale factor 0.560216\nI1207 12:13:41.638758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92765 > 2) by scale factor 0.683142\nI1207 12:13:42.576952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25325 > 2) by scale factor 0.470229\nI1207 12:13:43.514631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42027 > 2) by scale factor 0.826355\nI1207 12:13:44.452411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91392 > 2) by scale factor 0.686361\nI1207 12:13:45.390115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25133 > 2) by scale factor 0.470441\nI1207 12:13:46.328456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4623 > 2) by scale factor 0.448199\nI1207 12:13:47.266391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06756 > 2) by scale factor 0.651984\nI1207 12:13:48.204689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44077 > 2) by scale factor 0.450373\nI1207 12:13:49.142966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88434 > 2) by scale factor 0.514888\nI1207 12:13:50.081774   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88152 > 2) by scale factor 0.694078\nI1207 12:13:51.019861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09024 > 2) by scale factor 0.488968\nI1207 12:13:51.957633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58968 > 2) by scale factor 0.557152\nI1207 12:13:52.896287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41579 > 2) by scale factor 0.585515\nI1207 12:13:53.834993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5682 > 2) by scale factor 0.778756\nI1207 12:13:54.773402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25088 > 2) by scale factor 0.47049\nI1207 12:13:55.711758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28672 > 2) by scale factor 0.466557\nI1207 12:13:56.649987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90538 > 2) by scale factor 0.512114\nI1207 12:13:57.588701   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84647 > 2) by scale factor 0.702624\nI1207 12:13:58.526731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1042 > 2) by scale factor 0.487306\nI1207 12:13:59.465194   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18387 > 2) by scale factor 0.478027\nI1207 12:14:00.402817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60002 > 2) by scale factor 0.434781\nI1207 12:14:01.340860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17789 > 2) by scale factor 0.629348\nI1207 12:14:02.279211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61061 > 2) by scale factor 0.433782\nI1207 12:14:03.217334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65788 > 2) by scale factor 0.546764\nI1207 12:14:04.155848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72907 > 2) by scale factor 0.422916\nI1207 12:14:05.093789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40391 > 2) by scale factor 0.587559\nI1207 12:14:06.031730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78216 > 2) by scale factor 0.528799\nI1207 12:14:06.969491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84239 > 2) by scale factor 0.52051\nI1207 12:14:07.907296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68105 > 2) by scale factor 0.543324\nI1207 12:14:08.844955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7767 > 2) by scale factor 0.529562\nI1207 12:14:09.782729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86248 > 2) by scale factor 0.517802\nI1207 12:14:10.721067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03576 > 2) by scale factor 0.397159\nI1207 12:14:11.658823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72892 > 2) by scale factor 0.42293\nI1207 12:14:12.596318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83622 > 2) by scale factor 0.413546\nI1207 12:14:13.534312   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21407 > 2) by scale factor 0.4746\nI1207 12:14:14.472679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68011 > 2) by scale factor 0.746239\nI1207 12:14:15.411298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37628 > 2) by scale factor 0.592367\nI1207 12:14:16.349702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52141 > 2) by scale factor 0.793208\nI1207 12:14:17.288087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65648 > 2) by scale factor 0.429509\nI1207 12:14:18.226452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67734 > 2) by scale factor 0.543872\nI1207 12:14:19.164795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69456 > 2) by scale factor 0.541336\nI1207 12:14:20.103478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85939 > 2) by scale factor 0.518217\nI1207 12:14:21.042594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64328 > 2) by scale factor 0.43073\nI1207 12:14:21.980669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95308 > 2) by scale factor 0.677258\nI1207 12:14:21.990922   369 solver.cpp:337] Iteration 16200, Testing net (#0)\nI1207 12:15:14.324182   369 solver.cpp:404]     Test net output #0: accuracy = 0.19705\nI1207 12:15:14.324782   369 solver.cpp:404]     Test net output #1: loss = 11.8462 (* 1 = 11.8462 loss)\nI1207 12:15:15.197365   369 solver.cpp:228] Iteration 16200, loss = 13.6519\nI1207 12:15:15.197397   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 12:15:15.197414   369 solver.cpp:244]     Train net output #1: loss = 13.6519 (* 1 = 13.6519 loss)\nI1207 12:15:15.270668   369 sgd_solver.cpp:166] Iteration 16200, lr = 2.43\nI1207 12:15:15.279836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40524 > 2) by scale factor 0.58733\nI1207 12:15:16.216346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44065 > 2) by scale factor 0.450385\nI1207 12:15:17.153417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37598 > 2) by scale factor 0.841757\nI1207 12:15:18.089974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14441 > 2) by scale factor 0.63605\nI1207 12:15:19.026334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05145 > 2) by scale factor 0.655427\nI1207 12:15:19.962594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46943 > 2) by scale factor 0.447484\nI1207 12:15:20.898365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96841 > 2) by scale factor 0.673762\nI1207 12:15:21.833891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16497 > 2) by scale factor 0.631917\nI1207 12:15:22.770539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.334 > 2) by scale factor 0.599881\nI1207 12:15:23.707259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5761 > 2) by scale factor 0.437053\nI1207 12:15:24.643244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78562 > 2) by scale factor 0.417918\nI1207 12:15:25.579607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63405 > 2) by scale factor 0.550351\nI1207 12:15:26.515851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18765 > 2) by scale factor 0.477595\nI1207 12:15:27.452085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40017 > 2) by scale factor 0.454528\nI1207 12:15:28.388288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11522 > 2) by scale factor 0.486001\nI1207 12:15:29.324828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66878 > 2) by scale factor 0.54514\nI1207 12:15:30.260839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28165 > 2) by scale factor 0.467109\nI1207 12:15:31.197204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59891 > 2) by scale factor 0.555723\nI1207 12:15:32.133677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64698 > 2) by scale factor 0.548399\nI1207 12:15:33.070006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14444 > 2) by scale factor 0.482574\nI1207 12:15:34.006191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09993 > 2) by scale factor 0.487814\nI1207 12:15:34.942453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44479 > 2) by scale factor 0.449965\nI1207 12:15:35.878628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85526 > 2) by scale factor 0.518772\nI1207 12:15:36.814724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26952 > 2) by scale factor 0.468437\nI1207 12:15:37.750802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31552 > 2) by scale factor 0.463443\nI1207 12:15:38.687162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87785 > 2) by scale factor 0.694964\nI1207 12:15:39.623452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44129 > 2) by scale factor 0.581177\nI1207 12:15:40.559885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40254 > 2) by scale factor 0.587797\nI1207 12:15:41.496551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29277 > 2) by scale factor 0.607392\nI1207 12:15:42.432844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58107 > 2) by scale factor 0.436579\nI1207 12:15:43.369027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62324 > 2) by scale factor 0.551992\nI1207 12:15:44.305260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98304 > 2) by scale factor 0.502129\nI1207 12:15:45.241425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23795 > 2) by scale factor 0.617675\nI1207 12:15:46.178288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74524 > 2) by scale factor 0.534011\nI1207 12:15:47.114634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23879 > 2) by scale factor 0.617514\nI1207 12:15:48.051066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53204 > 2) by scale factor 0.566245\nI1207 12:15:48.987051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36348 > 2) by scale factor 0.594623\nI1207 12:15:49.924965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05417 > 2) by scale factor 0.493319\nI1207 12:15:50.862820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6926 > 2) by scale factor 0.541623\nI1207 12:15:51.800818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10659 > 2) by scale factor 0.487023\nI1207 12:15:52.738322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43975 > 2) by scale factor 0.581438\nI1207 12:15:53.675905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69219 > 2) by scale factor 0.74289\nI1207 12:15:54.613392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32957 > 2) by scale factor 0.46194\nI1207 12:15:55.551017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35448 > 2) by scale factor 0.596217\nI1207 12:15:56.488890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01827 > 2) by scale factor 0.398544\nI1207 12:15:57.427204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96855 > 2) by scale factor 0.503962\nI1207 12:15:58.365149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50429 > 2) by scale factor 0.444021\nI1207 12:15:59.302842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91481 > 2) by scale factor 0.51088\nI1207 12:16:00.240744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94928 > 2) by scale factor 0.506422\nI1207 12:16:01.178638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09549 > 2) by scale factor 0.954431\nI1207 12:16:02.116062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87642 > 2) by scale factor 0.695308\nI1207 12:16:03.053637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72094 > 2) by scale factor 0.537498\nI1207 12:16:03.991447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16128 > 2) by scale factor 0.480622\nI1207 12:16:04.929204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22176 > 2) by scale factor 0.473736\nI1207 12:16:05.866329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5774 > 2) by scale factor 0.559066\nI1207 12:16:06.803650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57234 > 2) by scale factor 0.559856\nI1207 12:16:07.741732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05754 > 2) by scale factor 0.65412\nI1207 12:16:08.678954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.92363 > 2) by scale factor 0.406204\nI1207 12:16:09.616758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03076 > 2) by scale factor 0.496185\nI1207 12:16:10.554774   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43672 > 2) by scale factor 0.450783\nI1207 12:16:11.492698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77008 > 2) by scale factor 0.530493\nI1207 12:16:12.430188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51763 > 2) by scale factor 0.44271\nI1207 12:16:13.367709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88366 > 2) by scale factor 0.514978\nI1207 12:16:14.305575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38537 > 2) by scale factor 0.590777\nI1207 12:16:15.243593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71977 > 2) by scale factor 0.537668\nI1207 12:16:16.182353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99865 > 2) by scale factor 0.666966\nI1207 12:16:17.120513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64534 > 2) by scale factor 0.548646\nI1207 12:16:18.058321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16922 > 2) by scale factor 0.631069\nI1207 12:16:18.997023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96496 > 2) by scale factor 0.504419\nI1207 12:16:19.935219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17377 > 2) by scale factor 0.630166\nI1207 12:16:20.873327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98742 > 2) by scale factor 0.669474\nI1207 12:16:21.811300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12529 > 2) by scale factor 0.63994\nI1207 12:16:22.749729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83303 > 2) by scale factor 0.705958\nI1207 12:16:23.688143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77427 > 2) by scale factor 0.720909\nI1207 12:16:24.626101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7211 > 2) by scale factor 0.537476\nI1207 12:16:25.564826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19572 > 2) by scale factor 0.476676\nI1207 12:16:26.502851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86885 > 2) by scale factor 0.51695\nI1207 12:16:27.440845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78116 > 2) by scale factor 0.719125\nI1207 12:16:28.379179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29944 > 2) by scale factor 0.606164\nI1207 12:16:29.317356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38263 > 2) by scale factor 0.591256\nI1207 12:16:30.255439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55698 > 2) by scale factor 0.782172\nI1207 12:16:31.193852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77353 > 2) by scale factor 0.721104\nI1207 12:16:32.132438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33783 > 2) by scale factor 0.46106\nI1207 12:16:33.071040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67976 > 2) by scale factor 0.543514\nI1207 12:16:34.008818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47768 > 2) by scale factor 0.807208\nI1207 12:16:34.947157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21612 > 2) by scale factor 0.621868\nI1207 12:16:36.821506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72752 > 2) by scale factor 0.423055\nI1207 12:16:37.760107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26766 > 2) by scale factor 0.612059\nI1207 12:16:38.698325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74797 > 2) by scale factor 0.533622\nI1207 12:16:39.636646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36624 > 2) by scale factor 0.594134\nI1207 12:16:40.575038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98655 > 2) by scale factor 0.501688\nI1207 12:16:41.513746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0982 > 2) by scale factor 0.645535\nI1207 12:16:42.451915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04613 > 2) by scale factor 0.494299\nI1207 12:16:43.389772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28146 > 2) by scale factor 0.46713\nI1207 12:16:44.328099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29775 > 2) by scale factor 0.606473\nI1207 12:16:45.266149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63359 > 2) by scale factor 0.759421\nI1207 12:16:46.205128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49525 > 2) by scale factor 0.572206\nI1207 12:16:47.143129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03966 > 2) by scale factor 0.657968\nI1207 12:16:48.081256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86336 > 2) by scale factor 0.517684\nI1207 12:16:48.091434   369 solver.cpp:337] Iteration 16300, Testing net (#0)\nI1207 12:17:40.428799   369 solver.cpp:404]     Test net output #0: accuracy = 0.14905\nI1207 12:17:40.429421   369 solver.cpp:404]     Test net output #1: loss = 19.1043 (* 1 = 19.1043 loss)\nI1207 12:17:41.301803   369 solver.cpp:228] Iteration 16300, loss = 19.3224\nI1207 12:17:41.301846   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 12:17:41.301863   369 solver.cpp:244]     Train net output #1: loss = 19.3224 (* 1 = 19.3224 loss)\nI1207 12:17:41.372109   369 sgd_solver.cpp:166] Iteration 16300, lr = 2.445\nI1207 12:17:41.381319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32991 > 2) by scale factor 0.461903\nI1207 12:17:42.317330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03032 > 2) by scale factor 0.496238\nI1207 12:17:43.253952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32016 > 2) by scale factor 0.602381\nI1207 12:17:44.190179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5412 > 2) by scale factor 0.78703\nI1207 12:17:45.126296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31894 > 2) by scale factor 0.602602\nI1207 12:17:46.062561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12894 > 2) by scale factor 0.484386\nI1207 12:17:46.999604   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64692 > 2) by scale factor 0.548408\nI1207 12:17:47.935561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89616 > 2) by scale factor 0.690569\nI1207 12:17:48.871840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70856 > 2) by scale factor 0.7384\nI1207 12:17:49.808347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46049 > 2) by scale factor 0.577953\nI1207 12:17:50.744400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10767 > 2) by scale factor 0.486894\nI1207 12:17:51.680829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70089 > 2) by scale factor 0.54041\nI1207 12:17:52.617303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29861 > 2) by scale factor 0.606317\nI1207 12:17:53.553584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45617 > 2) by scale factor 0.578676\nI1207 12:17:54.489861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95937 > 2) by scale factor 0.675819\nI1207 12:17:55.426275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34124 > 2) by scale factor 0.854247\nI1207 12:17:56.362560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70141 > 2) by scale factor 0.540335\nI1207 12:17:57.299196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30023 > 2) by scale factor 0.606017\nI1207 12:17:58.235615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31192 > 2) by scale factor 0.603878\nI1207 12:17:59.172003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74552 > 2) by scale factor 0.42145\nI1207 12:18:00.108250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95408 > 2) by scale factor 0.505807\nI1207 12:18:01.044572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77864 > 2) by scale factor 0.529291\nI1207 12:18:01.981132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43347 > 2) by scale factor 0.582501\nI1207 12:18:02.917739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23713 > 2) by scale factor 0.617832\nI1207 12:18:03.853940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20031 > 2) by scale factor 0.624939\nI1207 12:18:04.790174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52452 > 2) by scale factor 0.792231\nI1207 12:18:05.726570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99107 > 2) by scale factor 0.668656\nI1207 12:18:06.662398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30501 > 2) by scale factor 0.464575\nI1207 12:18:07.598835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60977 > 2) by scale factor 0.766351\nI1207 12:18:08.534791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43093 > 2) by scale factor 0.582933\nI1207 12:18:09.471451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7687 > 2) by scale factor 0.722362\nI1207 12:18:10.407805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01619 > 2) by scale factor 0.663088\nI1207 12:18:11.344348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65353 > 2) by scale factor 0.429781\nI1207 12:18:12.281185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21262 > 2) by scale factor 0.622546\nI1207 12:18:13.217618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93784 > 2) by scale factor 0.507893\nI1207 12:18:14.153843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43308 > 2) by scale factor 0.582568\nI1207 12:18:15.091276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30552 > 2) by scale factor 0.46452\nI1207 12:18:16.030040   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57237 > 2) by scale factor 0.559853\nI1207 12:18:16.968662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73199 > 2) by scale factor 0.732066\nI1207 12:18:17.907652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75632 > 2) by scale factor 0.532436\nI1207 12:18:18.846572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00367 > 2) by scale factor 0.665853\nI1207 12:18:19.785516   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08415 > 2) by scale factor 0.648477\nI1207 12:18:20.723968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69052 > 2) by scale factor 0.743351\nI1207 12:18:21.662895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54372 > 2) by scale factor 0.78625\nI1207 12:18:22.601807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21366 > 2) by scale factor 0.622343\nI1207 12:18:23.540859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14205 > 2) by scale factor 0.933683\nI1207 12:18:24.479854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98734 > 2) by scale factor 0.669492\nI1207 12:18:25.418615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22937 > 2) by scale factor 0.897115\nI1207 12:18:26.357190   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66368 > 2) by scale factor 0.545899\nI1207 12:18:27.295893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40739 > 2) by scale factor 0.453784\nI1207 12:18:28.234889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79987 > 2) by scale factor 0.526334\nI1207 12:18:29.173610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13245 > 2) by scale factor 0.638477\nI1207 12:18:30.112372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50545 > 2) by scale factor 0.798258\nI1207 12:18:31.051291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58043 > 2) by scale factor 0.775064\nI1207 12:18:31.989955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71986 > 2) by scale factor 0.735331\nI1207 12:18:32.928845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20481 > 2) by scale factor 0.475646\nI1207 12:18:33.867827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88506 > 2) by scale factor 0.693228\nI1207 12:18:35.743192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69994 > 2) by scale factor 0.540549\nI1207 12:18:36.681591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75953 > 2) by scale factor 0.724761\nI1207 12:18:37.620427   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75236 > 2) by scale factor 0.726648\nI1207 12:18:38.559505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60692 > 2) by scale factor 0.76719\nI1207 12:18:39.498126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80721 > 2) by scale factor 0.525319\nI1207 12:18:40.437373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9925 > 2) by scale factor 0.50094\nI1207 12:18:41.376274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03446 > 2) by scale factor 0.49573\nI1207 12:18:42.315521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22986 > 2) by scale factor 0.619223\nI1207 12:18:43.254029   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9196 > 2) by scale factor 0.685026\nI1207 12:18:44.192930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47774 > 2) by scale factor 0.807188\nI1207 12:18:45.131589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80014 > 2) by scale factor 0.526296\nI1207 12:18:46.070050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59589 > 2) by scale factor 0.556191\nI1207 12:18:47.009073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59539 > 2) by scale factor 0.556268\nI1207 12:18:47.948096   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43899 > 2) by scale factor 0.581567\nI1207 12:18:48.886920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32353 > 2) by scale factor 0.601769\nI1207 12:18:49.825919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51807 > 2) by scale factor 0.794258\nI1207 12:18:50.764384   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31992 > 2) by scale factor 0.602423\nI1207 12:18:51.703387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29141 > 2) by scale factor 0.607642\nI1207 12:18:52.641455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94567 > 2) by scale factor 0.678963\nI1207 12:18:53.580539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70569 > 2) by scale factor 0.739183\nI1207 12:18:54.519106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97902 > 2) by scale factor 0.671363\nI1207 12:18:55.458053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82005 > 2) by scale factor 0.709208\nI1207 12:18:56.396739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83083 > 2) by scale factor 0.52208\nI1207 12:18:57.335517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62766 > 2) by scale factor 0.761132\nI1207 12:18:58.274204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81313 > 2) by scale factor 0.524503\nI1207 12:18:59.212601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41473 > 2) by scale factor 0.585698\nI1207 12:19:00.151271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05073 > 2) by scale factor 0.655581\nI1207 12:19:01.089956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63823 > 2) by scale factor 0.431199\nI1207 12:19:02.028823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23542 > 2) by scale factor 0.618157\nI1207 12:19:02.967303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99227 > 2) by scale factor 0.400619\nI1207 12:19:03.906117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65723 > 2) by scale factor 0.546862\nI1207 12:19:04.844595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09052 > 2) by scale factor 0.488936\nI1207 12:19:05.783233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53876 > 2) by scale factor 0.440649\nI1207 12:19:06.721792   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40719 > 2) by scale factor 0.586993\nI1207 12:19:07.660336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09934 > 2) by scale factor 0.487884\nI1207 12:19:08.598839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80995 > 2) by scale factor 0.415805\nI1207 12:19:09.537629   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01641 > 2) by scale factor 0.497957\nI1207 12:19:10.476369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86283 > 2) by scale factor 0.517755\nI1207 12:19:11.414723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86247 > 2) by scale factor 0.517804\nI1207 12:19:12.353951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93165 > 2) by scale factor 0.508692\nI1207 12:19:13.292327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31831 > 2) by scale factor 0.602717\nI1207 12:19:14.230715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47421 > 2) by scale factor 0.447007\nI1207 12:19:14.240902   369 solver.cpp:337] Iteration 16400, Testing net (#0)\nI1207 12:20:06.511135   369 solver.cpp:404]     Test net output #0: accuracy = 0.175\nI1207 12:20:06.511750   369 solver.cpp:404]     Test net output #1: loss = 14.616 (* 1 = 14.616 loss)\nI1207 12:20:07.384037   369 solver.cpp:228] Iteration 16400, loss = 14.9318\nI1207 12:20:07.384071   369 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1207 12:20:07.384088   369 solver.cpp:244]     Train net output #1: loss = 14.9318 (* 1 = 14.9318 loss)\nI1207 12:20:07.461272   369 sgd_solver.cpp:166] Iteration 16400, lr = 2.46\nI1207 12:20:07.470463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97492 > 2) by scale factor 0.672287\nI1207 12:20:08.406908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73632 > 2) by scale factor 0.535287\nI1207 12:20:09.343189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88215 > 2) by scale factor 0.515179\nI1207 12:20:10.279104   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42629 > 2) by scale factor 0.583721\nI1207 12:20:11.215399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07283 > 2) by scale factor 0.650866\nI1207 12:20:12.151644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97803 > 2) by scale factor 0.671585\nI1207 12:20:13.088212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21453 > 2) by scale factor 0.903128\nI1207 12:20:14.024539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95228 > 2) by scale factor 0.403855\nI1207 12:20:14.960753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80141 > 2) by scale factor 0.526121\nI1207 12:20:15.897063   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70306 > 2) by scale factor 0.425255\nI1207 12:20:16.833824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55838 > 2) by scale factor 0.438753\nI1207 12:20:17.769892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71235 > 2) by scale factor 0.424417\nI1207 12:20:18.706342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46536 > 2) by scale factor 0.577141\nI1207 12:20:19.642709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54454 > 2) by scale factor 0.564248\nI1207 12:20:20.579110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23714 > 2) by scale factor 0.61783\nI1207 12:20:21.515341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45105 > 2) by scale factor 0.815977\nI1207 12:20:22.451856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12222 > 2) by scale factor 0.485175\nI1207 12:20:24.322239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69364 > 2) by scale factor 0.74249\nI1207 12:20:25.258700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04226 > 2) by scale factor 0.657406\nI1207 12:20:26.195464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44015 > 2) by scale factor 0.819622\nI1207 12:20:27.131362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10298 > 2) by scale factor 0.487451\nI1207 12:20:28.067836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67838 > 2) by scale factor 0.543717\nI1207 12:20:29.003921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16387 > 2) by scale factor 0.480322\nI1207 12:20:29.939749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75448 > 2) by scale factor 0.420656\nI1207 12:20:30.875964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90803 > 2) by scale factor 0.68775\nI1207 12:20:31.812175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22994 > 2) by scale factor 0.472819\nI1207 12:20:32.748517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48646 > 2) by scale factor 0.573648\nI1207 12:20:33.684954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02616 > 2) by scale factor 0.496751\nI1207 12:20:34.621565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42803 > 2) by scale factor 0.823713\nI1207 12:20:35.557905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47318 > 2) by scale factor 0.447109\nI1207 12:20:36.494117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.082 > 2) by scale factor 0.489956\nI1207 12:20:37.430796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4642 > 2) by scale factor 0.448009\nI1207 12:20:38.367568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53047 > 2) by scale factor 0.790366\nI1207 12:20:39.303901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70733 > 2) by scale factor 0.738736\nI1207 12:20:40.240346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2044 > 2) by scale factor 0.907275\nI1207 12:20:41.178803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25392 > 2) by scale factor 0.614643\nI1207 12:20:42.117555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22297 > 2) by scale factor 0.4736\nI1207 12:20:43.993113   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08517 > 2) by scale factor 0.648262\nI1207 12:20:44.932041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11283 > 2) by scale factor 0.946596\nI1207 12:20:45.870877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0553 > 2) by scale factor 0.654601\nI1207 12:20:46.809797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00813 > 2) by scale factor 0.498986\nI1207 12:20:47.748893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48279 > 2) by scale factor 0.805546\nI1207 12:20:48.687830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23165 > 2) by scale factor 0.472629\nI1207 12:20:49.626755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81905 > 2) by scale factor 0.709459\nI1207 12:20:50.565315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67494 > 2) by scale factor 0.544226\nI1207 12:20:51.504396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48458 > 2) by scale factor 0.445973\nI1207 12:20:52.443553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38079 > 2) by scale factor 0.591578\nI1207 12:20:53.382230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08121 > 2) by scale factor 0.649096\nI1207 12:20:54.320590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65471 > 2) by scale factor 0.753378\nI1207 12:20:55.258939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4985 > 2) by scale factor 0.800479\nI1207 12:20:56.197274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53331 > 2) by scale factor 0.441179\nI1207 12:20:57.135155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95805 > 2) by scale factor 0.676121\nI1207 12:20:58.072664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25584 > 2) by scale factor 0.61428\nI1207 12:20:59.010432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05031 > 2) by scale factor 0.655672\nI1207 12:20:59.948563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02132 > 2) by scale factor 0.661962\nI1207 12:21:00.886899   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38385 > 2) by scale factor 0.45622\nI1207 12:21:01.824592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58861 > 2) by scale factor 0.557319\nI1207 12:21:02.762774   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59932 > 2) by scale factor 0.769432\nI1207 12:21:03.701241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64271 > 2) by scale factor 0.756799\nI1207 12:21:04.639322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24876 > 2) by scale factor 0.470725\nI1207 12:21:05.577605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.97191 > 2) by scale factor 0.40226\nI1207 12:21:06.515591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69103 > 2) by scale factor 0.541854\nI1207 12:21:07.453495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43739 > 2) by scale factor 0.82055\nI1207 12:21:08.392210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52011 > 2) by scale factor 0.568163\nI1207 12:21:09.329836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95318 > 2) by scale factor 0.505922\nI1207 12:21:10.267820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54449 > 2) by scale factor 0.440093\nI1207 12:21:11.205613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41447 > 2) by scale factor 0.453056\nI1207 12:21:12.143893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5103 > 2) by scale factor 0.796716\nI1207 12:21:13.082224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82222 > 2) by scale factor 0.708661\nI1207 12:21:14.020328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87333 > 2) by scale factor 0.696058\nI1207 12:21:14.958431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4313 > 2) by scale factor 0.582869\nI1207 12:21:15.896036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75555 > 2) by scale factor 0.725809\nI1207 12:21:16.833865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40614 > 2) by scale factor 0.587174\nI1207 12:21:17.771729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69613 > 2) by scale factor 0.741803\nI1207 12:21:18.709341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99464 > 2) by scale factor 0.500671\nI1207 12:21:19.647061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87284 > 2) by scale factor 0.696176\nI1207 12:21:20.584957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51819 > 2) by scale factor 0.568474\nI1207 12:21:21.522936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9115 > 2) by scale factor 0.686931\nI1207 12:21:22.460924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71123 > 2) by scale factor 0.737671\nI1207 12:21:23.398746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82412 > 2) by scale factor 0.708186\nI1207 12:21:24.336622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91664 > 2) by scale factor 0.685721\nI1207 12:21:25.274562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58816 > 2) by scale factor 0.557389\nI1207 12:21:26.212280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03415 > 2) by scale factor 0.495767\nI1207 12:21:27.150331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74408 > 2) by scale factor 0.534176\nI1207 12:21:28.088446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91827 > 2) by scale factor 0.510429\nI1207 12:21:29.026335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27243 > 2) by scale factor 0.880116\nI1207 12:21:29.964229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78261 > 2) by scale factor 0.528736\nI1207 12:21:30.902119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14943 > 2) by scale factor 0.481993\nI1207 12:21:31.839555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0555 > 2) by scale factor 0.654558\nI1207 12:21:32.777189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89986 > 2) by scale factor 0.512839\nI1207 12:21:33.714886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49389 > 2) by scale factor 0.801959\nI1207 12:21:34.653064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96351 > 2) by scale factor 0.674875\nI1207 12:21:35.590493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73404 > 2) by scale factor 0.535613\nI1207 12:21:36.528581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83971 > 2) by scale factor 0.520872\nI1207 12:21:37.466768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65157 > 2) by scale factor 0.54771\nI1207 12:21:38.405046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2918 > 2) by scale factor 0.466004\nI1207 12:21:39.343056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3042 > 2) by scale factor 0.60529\nI1207 12:21:40.280402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22758 > 2) by scale factor 0.619659\nI1207 12:21:40.290583   369 solver.cpp:337] Iteration 16500, Testing net (#0)\nI1207 12:22:32.556983   369 solver.cpp:404]     Test net output #0: accuracy = 0.198\nI1207 12:22:32.557613   369 solver.cpp:404]     Test net output #1: loss = 14.8153 (* 1 = 14.8153 loss)\nI1207 12:22:33.430258   369 solver.cpp:228] Iteration 16500, loss = 11.9029\nI1207 12:22:33.430301   369 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1207 12:22:33.430320   369 solver.cpp:244]     Train net output #1: loss = 11.9029 (* 1 = 11.9029 loss)\nI1207 12:22:33.503438   369 sgd_solver.cpp:166] Iteration 16500, lr = 2.475\nI1207 12:22:33.512660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46014 > 2) by scale factor 0.812963\nI1207 12:22:34.449301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20215 > 2) by scale factor 0.908202\nI1207 12:22:35.385589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80339 > 2) by scale factor 0.713421\nI1207 12:22:36.321835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68018 > 2) by scale factor 0.543452\nI1207 12:22:37.258178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5789 > 2) by scale factor 0.775525\nI1207 12:22:38.194435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48693 > 2) by scale factor 0.573571\nI1207 12:22:39.130937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36364 > 2) by scale factor 0.846154\nI1207 12:22:40.067303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44215 > 2) by scale factor 0.81895\nI1207 12:22:41.003947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31005 > 2) by scale factor 0.60422\nI1207 12:22:41.940264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09351 > 2) by scale factor 0.646514\nI1207 12:22:42.876245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62469 > 2) by scale factor 0.551772\nI1207 12:22:43.812234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.425 > 2) by scale factor 0.824742\nI1207 12:22:44.748742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14024 > 2) by scale factor 0.636894\nI1207 12:22:45.684902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10717 > 2) by scale factor 0.643672\nI1207 12:22:46.621359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77131 > 2) by scale factor 0.53032\nI1207 12:22:47.558058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05875 > 2) by scale factor 0.653861\nI1207 12:22:48.494694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03812 > 2) by scale factor 0.658303\nI1207 12:22:49.431118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22432 > 2) by scale factor 0.473448\nI1207 12:22:50.367777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12384 > 2) by scale factor 0.941691\nI1207 12:22:51.304283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8614 > 2) by scale factor 0.69896\nI1207 12:22:52.240499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25346 > 2) by scale factor 0.470205\nI1207 12:22:54.111135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28693 > 2) by scale factor 0.60847\nI1207 12:22:55.047169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32307 > 2) by scale factor 0.601853\nI1207 12:22:55.983816   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40173 > 2) by scale factor 0.587937\nI1207 12:22:56.920162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74661 > 2) by scale factor 0.421353\nI1207 12:22:57.856052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.88889 > 2) by scale factor 0.40909\nI1207 12:22:58.792804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62405 > 2) by scale factor 0.551868\nI1207 12:22:59.729246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13762 > 2) by scale factor 0.48337\nI1207 12:23:00.664799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71928 > 2) by scale factor 0.423793\nI1207 12:23:01.601078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45343 > 2) by scale factor 0.449092\nI1207 12:23:02.537412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01369 > 2) by scale factor 0.498294\nI1207 12:23:03.473467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52404 > 2) by scale factor 0.442083\nI1207 12:23:04.411308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52576 > 2) by scale factor 0.567253\nI1207 12:23:05.349731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31429 > 2) by scale factor 0.463575\nI1207 12:23:06.300626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25382 > 2) by scale factor 0.470165\nI1207 12:23:07.238842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85814 > 2) by scale factor 0.518385\nI1207 12:23:08.177539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28866 > 2) by scale factor 0.466346\nI1207 12:23:09.115695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.35314 > 2) by scale factor 0.373613\nI1207 12:23:10.054049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89517 > 2) by scale factor 0.513456\nI1207 12:23:10.992660   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19927 > 2) by scale factor 0.476273\nI1207 12:23:11.931524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67767 > 2) by scale factor 0.427563\nI1207 12:23:12.870036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50108 > 2) by scale factor 0.571253\nI1207 12:23:13.808411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12877 > 2) by scale factor 0.484406\nI1207 12:23:14.747270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2094 > 2) by scale factor 0.623169\nI1207 12:23:15.685590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97666 > 2) by scale factor 0.502934\nI1207 12:23:16.623940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91569 > 2) by scale factor 0.685944\nI1207 12:23:17.562818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53763 > 2) by scale factor 0.56535\nI1207 12:23:18.501130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59749 > 2) by scale factor 0.555943\nI1207 12:23:19.439625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39792 > 2) by scale factor 0.588595\nI1207 12:23:20.378161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71716 > 2) by scale factor 0.538045\nI1207 12:23:21.316226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76794 > 2) by scale factor 0.419469\nI1207 12:23:22.254576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72417 > 2) by scale factor 0.537033\nI1207 12:23:23.192369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78075 > 2) by scale factor 0.528996\nI1207 12:23:24.130676   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41475 > 2) by scale factor 0.453027\nI1207 12:23:25.068984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2842 > 2) by scale factor 0.608976\nI1207 12:23:26.007274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90114 > 2) by scale factor 0.512671\nI1207 12:23:26.945722   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8331 > 2) by scale factor 0.705939\nI1207 12:23:27.883836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08194 > 2) by scale factor 0.648942\nI1207 12:23:28.822010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41029 > 2) by scale factor 0.58646\nI1207 12:23:29.760170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03404 > 2) by scale factor 0.659188\nI1207 12:23:30.698129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98247 > 2) by scale factor 0.670584\nI1207 12:23:31.636656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72752 > 2) by scale factor 0.733267\nI1207 12:23:32.574990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16441 > 2) by scale factor 0.632029\nI1207 12:23:33.513826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45058 > 2) by scale factor 0.816135\nI1207 12:23:34.452950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96013 > 2) by scale factor 0.675645\nI1207 12:23:35.390944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59112 > 2) by scale factor 0.771868\nI1207 12:23:36.329666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95629 > 2) by scale factor 0.676523\nI1207 12:23:37.268115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54197 > 2) by scale factor 0.564658\nI1207 12:23:38.206317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19969 > 2) by scale factor 0.62506\nI1207 12:23:39.144889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59156 > 2) by scale factor 0.556861\nI1207 12:23:40.082902   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54116 > 2) by scale factor 0.440416\nI1207 12:23:41.021131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10954 > 2) by scale factor 0.643182\nI1207 12:23:41.959353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67458 > 2) by scale factor 0.427846\nI1207 12:23:42.898012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01586 > 2) by scale factor 0.66316\nI1207 12:23:43.836762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44682 > 2) by scale factor 0.817387\nI1207 12:23:44.774874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83901 > 2) by scale factor 0.704471\nI1207 12:23:45.713313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18368 > 2) by scale factor 0.478048\nI1207 12:23:46.651584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20245 > 2) by scale factor 0.624522\nI1207 12:23:47.589749   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10505 > 2) by scale factor 0.950096\nI1207 12:23:48.528045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8431 > 2) by scale factor 0.520414\nI1207 12:23:49.466651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86891 > 2) by scale factor 0.69713\nI1207 12:23:50.404866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53586 > 2) by scale factor 0.565633\nI1207 12:23:51.342998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93722 > 2) by scale factor 0.507973\nI1207 12:23:52.281632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33845 > 2) by scale factor 0.460994\nI1207 12:23:53.219944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03905 > 2) by scale factor 0.98085\nI1207 12:23:54.157359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32631 > 2) by scale factor 0.462288\nI1207 12:23:55.094907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12768 > 2) by scale factor 0.93999\nI1207 12:23:56.032512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48085 > 2) by scale factor 0.574572\nI1207 12:23:56.969888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78977 > 2) by scale factor 0.716905\nI1207 12:23:57.907482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20411 > 2) by scale factor 0.624198\nI1207 12:23:58.845326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85533 > 2) by scale factor 0.700445\nI1207 12:23:59.782980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86617 > 2) by scale factor 0.517307\nI1207 12:24:00.720512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45482 > 2) by scale factor 0.814724\nI1207 12:24:01.658149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08784 > 2) by scale factor 0.647703\nI1207 12:24:02.596031   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61411 > 2) by scale factor 0.553387\nI1207 12:24:03.533700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53685 > 2) by scale factor 0.565475\nI1207 12:24:04.471974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72411 > 2) by scale factor 0.537041\nI1207 12:24:05.409621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67234 > 2) by scale factor 0.748408\nI1207 12:24:06.346998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04075 > 2) by scale factor 0.980032\nI1207 12:24:06.357142   369 solver.cpp:337] Iteration 16600, Testing net (#0)\nI1207 12:24:58.625109   369 solver.cpp:404]     Test net output #0: accuracy = 0.1939\nI1207 12:24:58.625731   369 solver.cpp:404]     Test net output #1: loss = 20.1524 (* 1 = 20.1524 loss)\nI1207 12:24:59.497280   369 solver.cpp:228] Iteration 16600, loss = 21.4198\nI1207 12:24:59.497323   369 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1207 12:24:59.497340   369 solver.cpp:244]     Train net output #1: loss = 21.4198 (* 1 = 21.4198 loss)\nI1207 12:24:59.576575   369 sgd_solver.cpp:166] Iteration 16600, lr = 2.49\nI1207 12:24:59.585798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30796 > 2) by scale factor 0.604602\nI1207 12:25:00.521906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39256 > 2) by scale factor 0.589525\nI1207 12:25:01.458117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59752 > 2) by scale factor 0.769966\nI1207 12:25:02.393846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91483 > 2) by scale factor 0.510878\nI1207 12:25:03.329988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48652 > 2) by scale factor 0.573637\nI1207 12:25:04.266227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38678 > 2) by scale factor 0.455916\nI1207 12:25:05.201409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56463 > 2) by scale factor 0.561068\nI1207 12:25:06.137529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.584 > 2) by scale factor 0.773995\nI1207 12:25:07.073101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87957 > 2) by scale factor 0.694549\nI1207 12:25:08.008177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2679 > 2) by scale factor 0.612013\nI1207 12:25:08.943241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05215 > 2) by scale factor 0.655275\nI1207 12:25:09.878754   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89263 > 2) by scale factor 0.691412\nI1207 12:25:10.814211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69081 > 2) by scale factor 0.541887\nI1207 12:25:11.749706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46545 > 2) by scale factor 0.577125\nI1207 12:25:12.684964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13632 > 2) by scale factor 0.637689\nI1207 12:25:13.620498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59907 > 2) by scale factor 0.769506\nI1207 12:25:14.556115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69294 > 2) by scale factor 0.742683\nI1207 12:25:15.489012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04594 > 2) by scale factor 0.494322\nI1207 12:25:16.421308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4482 > 2) by scale factor 0.44962\nI1207 12:25:17.354442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91376 > 2) by scale factor 0.511017\nI1207 12:25:18.287256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16713 > 2) by scale factor 0.479947\nI1207 12:25:19.219576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01981 > 2) by scale factor 0.662293\nI1207 12:25:20.151911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09188 > 2) by scale factor 0.646856\nI1207 12:25:21.084229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7315 > 2) by scale factor 0.732199\nI1207 12:25:22.016602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49172 > 2) by scale factor 0.572783\nI1207 12:25:22.949242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44339 > 2) by scale factor 0.818536\nI1207 12:25:23.881729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81183 > 2) by scale factor 0.524682\nI1207 12:25:24.814180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5419 > 2) by scale factor 0.564669\nI1207 12:25:25.746644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26552 > 2) by scale factor 0.612459\nI1207 12:25:26.679250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27996 > 2) by scale factor 0.467294\nI1207 12:25:27.611999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90377 > 2) by scale factor 0.512325\nI1207 12:25:28.544656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34866 > 2) by scale factor 0.459912\nI1207 12:25:29.477213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30166 > 2) by scale factor 0.464936\nI1207 12:25:30.410528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50184 > 2) by scale factor 0.444262\nI1207 12:25:31.343255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94884 > 2) by scale factor 0.506477\nI1207 12:25:32.280623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32465 > 2) by scale factor 0.462466\nI1207 12:25:33.219869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29054 > 2) by scale factor 0.873157\nI1207 12:25:34.159242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33658 > 2) by scale factor 0.461193\nI1207 12:25:35.098477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48378 > 2) by scale factor 0.805224\nI1207 12:25:36.037865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.28077 > 2) by scale factor 0.378733\nI1207 12:25:36.976362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59786 > 2) by scale factor 0.555886\nI1207 12:25:37.915282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55265 > 2) by scale factor 0.7835\nI1207 12:25:38.853765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14101 > 2) by scale factor 0.636737\nI1207 12:25:39.792248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76481 > 2) by scale factor 0.723377\nI1207 12:25:40.731003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23024 > 2) by scale factor 0.619149\nI1207 12:25:41.669589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71206 > 2) by scale factor 0.538785\nI1207 12:25:42.608264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90134 > 2) by scale factor 0.689336\nI1207 12:25:43.547245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71722 > 2) by scale factor 0.736047\nI1207 12:25:44.485887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13168 > 2) by scale factor 0.638635\nI1207 12:25:45.424347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35836 > 2) by scale factor 0.595529\nI1207 12:25:46.363008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33198 > 2) by scale factor 0.600243\nI1207 12:25:47.301336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08513 > 2) by scale factor 0.489581\nI1207 12:25:48.240214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08505 > 2) by scale factor 0.648288\nI1207 12:25:49.178720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18571 > 2) by scale factor 0.915036\nI1207 12:25:50.117794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13637 > 2) by scale factor 0.483516\nI1207 12:25:51.056601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12989 > 2) by scale factor 0.484274\nI1207 12:25:51.995134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91572 > 2) by scale factor 0.685936\nI1207 12:25:52.934273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41431 > 2) by scale factor 0.828394\nI1207 12:25:53.872483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49562 > 2) by scale factor 0.572144\nI1207 12:25:54.810729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26534 > 2) by scale factor 0.468895\nI1207 12:25:55.749224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20728 > 2) by scale factor 0.475366\nI1207 12:25:56.687480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48125 > 2) by scale factor 0.806046\nI1207 12:25:57.626178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01437 > 2) by scale factor 0.49821\nI1207 12:25:58.574847   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50771 > 2) by scale factor 0.443684\nI1207 12:25:59.523213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36519 > 2) by scale factor 0.59432\nI1207 12:26:00.462512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56767 > 2) by scale factor 0.778917\nI1207 12:26:01.404016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95773 > 2) by scale factor 0.50534\nI1207 12:26:02.343350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59963 > 2) by scale factor 0.555613\nI1207 12:26:03.283017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93682 > 2) by scale factor 0.508024\nI1207 12:26:04.222043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74029 > 2) by scale factor 0.534719\nI1207 12:26:05.161341   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00952 > 2) by scale factor 0.664558\nI1207 12:26:06.100420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04928 > 2) by scale factor 0.655892\nI1207 12:26:07.039955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8377 > 2) by scale factor 0.704797\nI1207 12:26:07.979167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57658 > 2) by scale factor 0.776223\nI1207 12:26:08.918216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34424 > 2) by scale factor 0.598043\nI1207 12:26:09.857264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00109 > 2) by scale factor 0.499864\nI1207 12:26:10.796303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57524 > 2) by scale factor 0.437136\nI1207 12:26:11.735558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90349 > 2) by scale factor 0.688826\nI1207 12:26:12.674852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1027 > 2) by scale factor 0.487484\nI1207 12:26:13.613704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32056 > 2) by scale factor 0.602308\nI1207 12:26:14.552966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16581 > 2) by scale factor 0.923441\nI1207 12:26:15.492202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2334 > 2) by scale factor 0.618544\nI1207 12:26:16.431339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2706 > 2) by scale factor 0.611508\nI1207 12:26:17.370729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35438 > 2) by scale factor 0.84948\nI1207 12:26:18.309568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18215 > 2) by scale factor 0.628507\nI1207 12:26:19.248693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35427 > 2) by scale factor 0.596255\nI1207 12:26:20.188426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36986 > 2) by scale factor 0.593497\nI1207 12:26:21.127912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22429 > 2) by scale factor 0.473452\nI1207 12:26:22.067265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06899 > 2) by scale factor 0.491523\nI1207 12:26:23.006501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40604 > 2) by scale factor 0.453923\nI1207 12:26:23.945546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42272 > 2) by scale factor 0.584331\nI1207 12:26:24.884546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94525 > 2) by scale factor 0.506939\nI1207 12:26:25.823818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59665 > 2) by scale factor 0.770222\nI1207 12:26:26.762971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58733 > 2) by scale factor 0.772996\nI1207 12:26:27.702042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15241 > 2) by scale factor 0.481648\nI1207 12:26:28.641311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7022 > 2) by scale factor 0.425333\nI1207 12:26:29.580751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02079 > 2) by scale factor 0.398344\nI1207 12:26:30.520620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08383 > 2) by scale factor 0.489736\nI1207 12:26:31.459825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21627 > 2) by scale factor 0.621838\nI1207 12:26:32.398439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20622 > 2) by scale factor 0.623787\nI1207 12:26:32.408628   369 solver.cpp:337] Iteration 16700, Testing net (#0)\nI1207 12:27:24.656342   369 solver.cpp:404]     Test net output #0: accuracy = 0.15925\nI1207 12:27:24.656986   369 solver.cpp:404]     Test net output #1: loss = 16.4227 (* 1 = 16.4227 loss)\nI1207 12:27:25.524822   369 solver.cpp:228] Iteration 16700, loss = 19.6195\nI1207 12:27:25.524863   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 12:27:25.524880   369 solver.cpp:244]     Train net output #1: loss = 19.6195 (* 1 = 19.6195 loss)\nI1207 12:27:25.606160   369 sgd_solver.cpp:166] Iteration 16700, lr = 2.505\nI1207 12:27:25.615352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2293 > 2) by scale factor 0.472891\nI1207 12:27:26.548586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84503 > 2) by scale factor 0.702981\nI1207 12:27:27.481668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78629 > 2) by scale factor 0.7178\nI1207 12:27:28.415313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16072 > 2) by scale factor 0.632767\nI1207 12:27:29.348815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3191 > 2) by scale factor 0.602573\nI1207 12:27:30.282521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02009 > 2) by scale factor 0.662233\nI1207 12:27:31.216015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79936 > 2) by scale factor 0.526405\nI1207 12:27:32.149080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07263 > 2) by scale factor 0.491083\nI1207 12:27:33.082123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.20731 > 2) by scale factor 0.384075\nI1207 12:27:34.014947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.11454 > 2) by scale factor 0.391042\nI1207 12:27:34.948405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19258 > 2) by scale factor 0.477034\nI1207 12:27:35.881337   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95449 > 2) by scale factor 0.505754\nI1207 12:27:36.814755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9678 > 2) by scale factor 0.402592\nI1207 12:27:37.747776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29958 > 2) by scale factor 0.465162\nI1207 12:27:38.680797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65891 > 2) by scale factor 0.429285\nI1207 12:27:39.613914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67781 > 2) by scale factor 0.543801\nI1207 12:27:40.547379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30572 > 2) by scale factor 0.605012\nI1207 12:27:41.480257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.14043 > 2) by scale factor 0.389072\nI1207 12:27:42.413197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7204 > 2) by scale factor 0.537577\nI1207 12:27:43.345921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57764 > 2) by scale factor 0.559028\nI1207 12:27:44.279184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80164 > 2) by scale factor 0.526089\nI1207 12:27:45.212294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50149 > 2) by scale factor 0.799524\nI1207 12:27:46.145300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08521 > 2) by scale factor 0.648254\nI1207 12:27:48.009795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29111 > 2) by scale factor 0.607697\nI1207 12:27:48.943161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46476 > 2) by scale factor 0.577241\nI1207 12:27:49.875830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53523 > 2) by scale factor 0.788882\nI1207 12:27:50.808878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52746 > 2) by scale factor 0.791308\nI1207 12:27:51.742051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27868 > 2) by scale factor 0.610002\nI1207 12:27:52.674576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55426 > 2) by scale factor 0.562705\nI1207 12:27:53.607425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78158 > 2) by scale factor 0.418272\nI1207 12:27:54.540468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16358 > 2) by scale factor 0.480355\nI1207 12:27:55.473891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89924 > 2) by scale factor 0.689835\nI1207 12:27:56.407667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6103 > 2) by scale factor 0.433811\nI1207 12:27:57.340723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26595 > 2) by scale factor 0.468828\nI1207 12:27:58.273963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83223 > 2) by scale factor 0.52189\nI1207 12:27:59.211068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40584 > 2) by scale factor 0.587226\nI1207 12:28:00.149791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37534 > 2) by scale factor 0.841984\nI1207 12:28:01.088376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07246 > 2) by scale factor 0.491103\nI1207 12:28:02.027403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48977 > 2) by scale factor 0.573103\nI1207 12:28:02.966127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3142 > 2) by scale factor 0.603464\nI1207 12:28:03.904661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18653 > 2) by scale factor 0.477722\nI1207 12:28:04.842953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5015 > 2) by scale factor 0.571183\nI1207 12:28:05.781594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44835 > 2) by scale factor 0.579988\nI1207 12:28:06.719987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58711 > 2) by scale factor 0.557552\nI1207 12:28:07.658123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61689 > 2) by scale factor 0.764266\nI1207 12:28:08.596350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23711 > 2) by scale factor 0.617835\nI1207 12:28:09.534759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28808 > 2) by scale factor 0.608258\nI1207 12:28:10.473228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10273 > 2) by scale factor 0.951144\nI1207 12:28:11.411793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92921 > 2) by scale factor 0.509008\nI1207 12:28:12.349802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.807 > 2) by scale factor 0.712505\nI1207 12:28:13.288007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60238 > 2) by scale factor 0.555189\nI1207 12:28:14.226168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98969 > 2) by scale factor 0.668966\nI1207 12:28:15.164425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93583 > 2) by scale factor 0.681238\nI1207 12:28:16.102618   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79026 > 2) by scale factor 0.527668\nI1207 12:28:17.041095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59367 > 2) by scale factor 0.556534\nI1207 12:28:17.979728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79473 > 2) by scale factor 0.527047\nI1207 12:28:18.918244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54201 > 2) by scale factor 0.440334\nI1207 12:28:19.856590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.56747 > 2) by scale factor 0.35923\nI1207 12:28:20.795050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04907 > 2) by scale factor 0.49394\nI1207 12:28:21.733624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98865 > 2) by scale factor 0.501423\nI1207 12:28:22.671728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88827 > 2) by scale factor 0.514368\nI1207 12:28:23.610129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69042 > 2) by scale factor 0.426401\nI1207 12:28:24.548889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39701 > 2) by scale factor 0.588753\nI1207 12:28:25.486992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54801 > 2) by scale factor 0.563697\nI1207 12:28:26.425418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81387 > 2) by scale factor 0.415466\nI1207 12:28:27.363912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71563 > 2) by scale factor 0.424122\nI1207 12:28:28.302258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87268 > 2) by scale factor 0.696214\nI1207 12:28:29.240056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14627 > 2) by scale factor 0.635672\nI1207 12:28:30.177976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22604 > 2) by scale factor 0.619954\nI1207 12:28:31.115394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60402 > 2) by scale factor 0.554935\nI1207 12:28:32.052901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07721 > 2) by scale factor 0.490531\nI1207 12:28:32.990483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91896 > 2) by scale factor 0.510339\nI1207 12:28:33.927995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26264 > 2) by scale factor 0.613001\nI1207 12:28:34.865417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00957 > 2) by scale factor 0.664547\nI1207 12:28:35.802850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21468 > 2) by scale factor 0.622145\nI1207 12:28:36.740762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32727 > 2) by scale factor 0.859375\nI1207 12:28:37.677934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26211 > 2) by scale factor 0.469252\nI1207 12:28:38.615419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87497 > 2) by scale factor 0.516134\nI1207 12:28:39.553241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22048 > 2) by scale factor 0.621026\nI1207 12:28:40.490761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04385 > 2) by scale factor 0.494578\nI1207 12:28:41.428128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.767 > 2) by scale factor 0.419551\nI1207 12:28:42.366214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9907 > 2) by scale factor 0.668741\nI1207 12:28:43.303972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37318 > 2) by scale factor 0.457333\nI1207 12:28:44.241569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81444 > 2) by scale factor 0.415417\nI1207 12:28:45.179293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54523 > 2) by scale factor 0.785782\nI1207 12:28:46.117827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44529 > 2) by scale factor 0.8179\nI1207 12:28:47.056069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15066 > 2) by scale factor 0.634788\nI1207 12:28:47.994438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89467 > 2) by scale factor 0.513522\nI1207 12:28:48.932451   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13356 > 2) by scale factor 0.638253\nI1207 12:28:49.870683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42249 > 2) by scale factor 0.452234\nI1207 12:28:50.808729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14035 > 2) by scale factor 0.483052\nI1207 12:28:51.747170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8245 > 2) by scale factor 0.70809\nI1207 12:28:52.685173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97486 > 2) by scale factor 0.503162\nI1207 12:28:53.623034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79928 > 2) by scale factor 0.714469\nI1207 12:28:54.561518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57461 > 2) by scale factor 0.776817\nI1207 12:28:55.499673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.319 > 2) by scale factor 0.86244\nI1207 12:28:56.438593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29394 > 2) by scale factor 0.607175\nI1207 12:28:57.376570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73632 > 2) by scale factor 0.535286\nI1207 12:28:58.314640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13354 > 2) by scale factor 0.638256\nI1207 12:28:58.324842   369 solver.cpp:337] Iteration 16800, Testing net (#0)\nI1207 12:29:50.593773   369 solver.cpp:404]     Test net output #0: accuracy = 0.1755\nI1207 12:29:50.594409   369 solver.cpp:404]     Test net output #1: loss = 9.53445 (* 1 = 9.53445 loss)\nI1207 12:29:51.462951   369 solver.cpp:228] Iteration 16800, loss = 8.48691\nI1207 12:29:51.462991   369 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1207 12:29:51.463008   369 solver.cpp:244]     Train net output #1: loss = 8.4869 (* 1 = 8.4869 loss)\nI1207 12:29:51.541451   369 sgd_solver.cpp:166] Iteration 16800, lr = 2.52\nI1207 12:29:51.550632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62846 > 2) by scale factor 0.760901\nI1207 12:29:52.483515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63611 > 2) by scale factor 0.758694\nI1207 12:29:53.416716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60568 > 2) by scale factor 0.767553\nI1207 12:29:54.350082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44524 > 2) by scale factor 0.817914\nI1207 12:29:55.283265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39116 > 2) by scale factor 0.589768\nI1207 12:29:56.216447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6956 > 2) by scale factor 0.541185\nI1207 12:29:57.149324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22111 > 2) by scale factor 0.473809\nI1207 12:29:58.082793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26017 > 2) by scale factor 0.469465\nI1207 12:29:59.016034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51132 > 2) by scale factor 0.44333\nI1207 12:29:59.949400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16527 > 2) by scale factor 0.480161\nI1207 12:30:00.882761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22471 > 2) by scale factor 0.620211\nI1207 12:30:01.815907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11773 > 2) by scale factor 0.641492\nI1207 12:30:02.749112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28381 > 2) by scale factor 0.609049\nI1207 12:30:03.682384   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2286 > 2) by scale factor 0.619464\nI1207 12:30:04.615706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0524 > 2) by scale factor 0.493535\nI1207 12:30:05.549037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94499 > 2) by scale factor 0.506972\nI1207 12:30:06.482098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03946 > 2) by scale factor 0.980652\nI1207 12:30:07.415539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09657 > 2) by scale factor 0.488213\nI1207 12:30:08.348448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2448 > 2) by scale factor 0.471165\nI1207 12:30:09.280971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05846 > 2) by scale factor 0.653923\nI1207 12:30:10.214028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05346 > 2) by scale factor 0.654994\nI1207 12:30:11.146878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70265 > 2) by scale factor 0.740013\nI1207 12:30:12.080126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75212 > 2) by scale factor 0.533032\nI1207 12:30:13.013370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54276 > 2) by scale factor 0.440261\nI1207 12:30:13.946159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47567 > 2) by scale factor 0.575428\nI1207 12:30:14.879078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52605 > 2) by scale factor 0.567208\nI1207 12:30:15.811820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43806 > 2) by scale factor 0.450647\nI1207 12:30:16.744818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46791 > 2) by scale factor 0.576716\nI1207 12:30:17.677717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78299 > 2) by scale factor 0.528682\nI1207 12:30:18.611052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96334 > 2) by scale factor 0.674913\nI1207 12:30:19.544054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13454 > 2) by scale factor 0.48373\nI1207 12:30:20.476778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74321 > 2) by scale factor 0.534301\nI1207 12:30:21.409755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97492 > 2) by scale factor 0.503155\nI1207 12:30:22.343055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39794 > 2) by scale factor 0.588592\nI1207 12:30:23.276048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5408 > 2) by scale factor 0.564844\nI1207 12:30:24.209113   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56475 > 2) by scale factor 0.561049\nI1207 12:30:25.147935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65299 > 2) by scale factor 0.547496\nI1207 12:30:26.086326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01822 > 2) by scale factor 0.497732\nI1207 12:30:27.024715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25642 > 2) by scale factor 0.614172\nI1207 12:30:27.963098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49825 > 2) by scale factor 0.444618\nI1207 12:30:28.901890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9873 > 2) by scale factor 0.669501\nI1207 12:30:29.840526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53311 > 2) by scale factor 0.566073\nI1207 12:30:30.779345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40122 > 2) by scale factor 0.588024\nI1207 12:30:31.717525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94755 > 2) by scale factor 0.678531\nI1207 12:30:32.656318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15444 > 2) by scale factor 0.928316\nI1207 12:30:33.595211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92429 > 2) by scale factor 0.683927\nI1207 12:30:34.533656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9329 > 2) by scale factor 0.681918\nI1207 12:30:35.472311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96657 > 2) by scale factor 0.504214\nI1207 12:30:36.410706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52317 > 2) by scale factor 0.442168\nI1207 12:30:37.349603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54604 > 2) by scale factor 0.564009\nI1207 12:30:38.287637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13431 > 2) by scale factor 0.638099\nI1207 12:30:39.226205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66749 > 2) by scale factor 0.545332\nI1207 12:30:40.164782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10951 > 2) by scale factor 0.486676\nI1207 12:30:41.103093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66961 > 2) by scale factor 0.749173\nI1207 12:30:42.042120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97421 > 2) by scale factor 0.672447\nI1207 12:30:42.980146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22115 > 2) by scale factor 0.473804\nI1207 12:30:43.918527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50824 > 2) by scale factor 0.443632\nI1207 12:30:44.856890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56073 > 2) by scale factor 0.781028\nI1207 12:30:45.795446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40931 > 2) by scale factor 0.586629\nI1207 12:30:46.734036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45743 > 2) by scale factor 0.448689\nI1207 12:30:47.672425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40076 > 2) by scale factor 0.83307\nI1207 12:30:48.610621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76823 > 2) by scale factor 0.722484\nI1207 12:30:49.549335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91221 > 2) by scale factor 0.511221\nI1207 12:30:50.487901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74738 > 2) by scale factor 0.727965\nI1207 12:30:51.426287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12571 > 2) by scale factor 0.639856\nI1207 12:30:52.365234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71591 > 2) by scale factor 0.736401\nI1207 12:30:53.303879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13845 > 2) by scale factor 0.483273\nI1207 12:30:54.242394   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49867 > 2) by scale factor 0.444576\nI1207 12:30:55.180790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20127 > 2) by scale factor 0.476047\nI1207 12:30:56.119493   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9814 > 2) by scale factor 0.670826\nI1207 12:30:57.057838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88391 > 2) by scale factor 0.514945\nI1207 12:30:57.996280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17766 > 2) by scale factor 0.629393\nI1207 12:30:58.934528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61813 > 2) by scale factor 0.552772\nI1207 12:30:59.872972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0425 > 2) by scale factor 0.657355\nI1207 12:31:00.810817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42953 > 2) by scale factor 0.58317\nI1207 12:31:01.748883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85542 > 2) by scale factor 0.51875\nI1207 12:31:02.687129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87265 > 2) by scale factor 0.696221\nI1207 12:31:03.625500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06321 > 2) by scale factor 0.652911\nI1207 12:31:04.563524   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71034 > 2) by scale factor 0.424598\nI1207 12:31:05.502290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03109 > 2) by scale factor 0.496144\nI1207 12:31:06.440522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.11257 > 2) by scale factor 0.391192\nI1207 12:31:07.379076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97595 > 2) by scale factor 0.503024\nI1207 12:31:08.317564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95525 > 2) by scale factor 0.505657\nI1207 12:31:10.192260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28067 > 2) by scale factor 0.876936\nI1207 12:31:11.130856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72807 > 2) by scale factor 0.733119\nI1207 12:31:12.069094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34345 > 2) by scale factor 0.853444\nI1207 12:31:13.006940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6812 > 2) by scale factor 0.745936\nI1207 12:31:13.944968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44076 > 2) by scale factor 0.581267\nI1207 12:31:14.883448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06444 > 2) by scale factor 0.968784\nI1207 12:31:15.821771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62207 > 2) by scale factor 0.55217\nI1207 12:31:16.759791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43705 > 2) by scale factor 0.45075\nI1207 12:31:17.698240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89995 > 2) by scale factor 0.689668\nI1207 12:31:18.636744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42243 > 2) by scale factor 0.58438\nI1207 12:31:19.575188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70493 > 2) by scale factor 0.539821\nI1207 12:31:20.513609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90626 > 2) by scale factor 0.688171\nI1207 12:31:21.451856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14039 > 2) by scale factor 0.636863\nI1207 12:31:22.390408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52583 > 2) by scale factor 0.79182\nI1207 12:31:23.328517   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46405 > 2) by scale factor 0.57736\nI1207 12:31:24.266698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9281 > 2) by scale factor 0.683036\nI1207 12:31:24.276891   369 solver.cpp:337] Iteration 16900, Testing net (#0)\nI1207 12:32:16.717221   369 solver.cpp:404]     Test net output #0: accuracy = 0.1293\nI1207 12:32:16.717808   369 solver.cpp:404]     Test net output #1: loss = 23.2013 (* 1 = 23.2013 loss)\nI1207 12:32:17.604370   369 solver.cpp:228] Iteration 16900, loss = 22.7614\nI1207 12:32:17.604413   369 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1207 12:32:17.604429   369 solver.cpp:244]     Train net output #1: loss = 22.7614 (* 1 = 22.7614 loss)\nI1207 12:32:17.663715   369 sgd_solver.cpp:166] Iteration 16900, lr = 2.535\nI1207 12:32:17.673919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0109 > 2) by scale factor 0.498641\nI1207 12:32:18.610834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68697 > 2) by scale factor 0.426715\nI1207 12:32:19.546628   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64524 > 2) by scale factor 0.548661\nI1207 12:32:20.483192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86083 > 2) by scale factor 0.518024\nI1207 12:32:21.420292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76447 > 2) by scale factor 0.531283\nI1207 12:32:22.357468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32013 > 2) by scale factor 0.462949\nI1207 12:32:23.294549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09468 > 2) by scale factor 0.64627\nI1207 12:32:24.231468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27933 > 2) by scale factor 0.60988\nI1207 12:32:25.168073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33647 > 2) by scale factor 0.599437\nI1207 12:32:26.105087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99385 > 2) by scale factor 0.50077\nI1207 12:32:27.041627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27261 > 2) by scale factor 0.468098\nI1207 12:32:27.978344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84131 > 2) by scale factor 0.7039\nI1207 12:32:28.916023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36771 > 2) by scale factor 0.593875\nI1207 12:32:29.854048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72226 > 2) by scale factor 0.734684\nI1207 12:32:30.790961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31969 > 2) by scale factor 0.862185\nI1207 12:32:31.727792   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84246 > 2) by scale factor 0.703617\nI1207 12:32:32.664253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70669 > 2) by scale factor 0.539565\nI1207 12:32:33.600425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45754 > 2) by scale factor 0.578446\nI1207 12:32:34.537922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31014 > 2) by scale factor 0.604203\nI1207 12:32:35.474918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95646 > 2) by scale factor 0.676484\nI1207 12:32:36.411975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2486 > 2) by scale factor 0.615649\nI1207 12:32:37.348829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74326 > 2) by scale factor 0.72906\nI1207 12:32:38.285560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19793 > 2) by scale factor 0.909946\nI1207 12:32:39.223768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83584 > 2) by scale factor 0.705258\nI1207 12:32:40.162073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28748 > 2) by scale factor 0.608368\nI1207 12:32:41.099532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5558 > 2) by scale factor 0.562461\nI1207 12:32:42.036948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17845 > 2) by scale factor 0.918082\nI1207 12:32:42.974545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64103 > 2) by scale factor 0.549295\nI1207 12:32:43.912930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98008 > 2) by scale factor 0.671123\nI1207 12:32:44.850945   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99334 > 2) by scale factor 0.500834\nI1207 12:32:45.788884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69845 > 2) by scale factor 0.741166\nI1207 12:32:46.727099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05188 > 2) by scale factor 0.493597\nI1207 12:32:47.666052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16077 > 2) by scale factor 0.632757\nI1207 12:32:48.609596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64142 > 2) by scale factor 0.549236\nI1207 12:32:49.552872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51518 > 2) by scale factor 0.568961\nI1207 12:32:50.495123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52844 > 2) by scale factor 0.566823\nI1207 12:32:51.437994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07037 > 2) by scale factor 0.491356\nI1207 12:32:52.380719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98723 > 2) by scale factor 0.501601\nI1207 12:32:53.323633   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87796 > 2) by scale factor 0.694936\nI1207 12:32:54.266300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94441 > 2) by scale factor 0.507047\nI1207 12:32:55.209048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9524 > 2) by scale factor 0.677414\nI1207 12:32:56.152294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92109 > 2) by scale factor 0.684676\nI1207 12:32:57.094571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07912 > 2) by scale factor 0.649536\nI1207 12:32:58.037062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54846 > 2) by scale factor 0.784788\nI1207 12:32:58.980273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65232 > 2) by scale factor 0.547597\nI1207 12:32:59.922735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20649 > 2) by scale factor 0.906416\nI1207 12:33:00.865279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82462 > 2) by scale factor 0.708059\nI1207 12:33:01.808367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03518 > 2) by scale factor 0.658939\nI1207 12:33:02.751204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29602 > 2) by scale factor 0.606792\nI1207 12:33:03.694172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60521 > 2) by scale factor 0.767694\nI1207 12:33:04.637136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35956 > 2) by scale factor 0.595316\nI1207 12:33:05.580466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73495 > 2) by scale factor 0.535482\nI1207 12:33:06.523272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02448 > 2) by scale factor 0.66127\nI1207 12:33:07.466076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28225 > 2) by scale factor 0.609339\nI1207 12:33:08.408787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28808 > 2) by scale factor 0.608257\nI1207 12:33:09.351737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.90329 > 2) by scale factor 0.40789\nI1207 12:33:10.294713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95536 > 2) by scale factor 0.403603\nI1207 12:33:11.237532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33654 > 2) by scale factor 0.599424\nI1207 12:33:12.180155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9542 > 2) by scale factor 0.505791\nI1207 12:33:13.122450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38141 > 2) by scale factor 0.456474\nI1207 12:33:14.065592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35545 > 2) by scale factor 0.596045\nI1207 12:33:15.008308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87687 > 2) by scale factor 0.51588\nI1207 12:33:15.951082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37617 > 2) by scale factor 0.592387\nI1207 12:33:16.893764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08265 > 2) by scale factor 0.489878\nI1207 12:33:17.837039   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.867 > 2) by scale factor 0.517197\nI1207 12:33:18.780146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04554 > 2) by scale factor 0.494372\nI1207 12:33:19.722887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11798 > 2) by scale factor 0.485675\nI1207 12:33:20.665521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79176 > 2) by scale factor 0.52746\nI1207 12:33:21.608053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01376 > 2) by scale factor 0.663624\nI1207 12:33:22.550884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05441 > 2) by scale factor 0.49329\nI1207 12:33:23.493824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8145 > 2) by scale factor 0.524315\nI1207 12:33:24.436688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61221 > 2) by scale factor 0.433632\nI1207 12:33:25.379303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93785 > 2) by scale factor 0.680771\nI1207 12:33:26.321936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54159 > 2) by scale factor 0.564719\nI1207 12:33:27.265130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12955 > 2) by scale factor 0.484315\nI1207 12:33:28.207453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27755 > 2) by scale factor 0.467557\nI1207 12:33:29.149361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75926 > 2) by scale factor 0.724831\nI1207 12:33:30.091845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88053 > 2) by scale factor 0.694316\nI1207 12:33:31.034627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61221 > 2) by scale factor 0.765634\nI1207 12:33:31.977416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20399 > 2) by scale factor 0.907446\nI1207 12:33:32.920017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41652 > 2) by scale factor 0.827637\nI1207 12:33:33.862942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9616 > 2) by scale factor 0.675312\nI1207 12:33:34.805905   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6612 > 2) by scale factor 0.75154\nI1207 12:33:35.748957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33794 > 2) by scale factor 0.461048\nI1207 12:33:36.691223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36644 > 2) by scale factor 0.458039\nI1207 12:33:37.633360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80906 > 2) by scale factor 0.415881\nI1207 12:33:38.575820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22614 > 2) by scale factor 0.473245\nI1207 12:33:39.518227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65839 > 2) by scale factor 0.752335\nI1207 12:33:40.460533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09931 > 2) by scale factor 0.487887\nI1207 12:33:41.402936   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44362 > 2) by scale factor 0.580785\nI1207 12:33:42.345520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99167 > 2) by scale factor 0.501043\nI1207 12:33:43.288345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83331 > 2) by scale factor 0.521743\nI1207 12:33:44.231045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62725 > 2) by scale factor 0.551382\nI1207 12:33:45.173310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87093 > 2) by scale factor 0.410599\nI1207 12:33:46.115882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.90403 > 2) by scale factor 0.407828\nI1207 12:33:47.058224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38585 > 2) by scale factor 0.590694\nI1207 12:33:48.001062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52759 > 2) by scale factor 0.56696\nI1207 12:33:48.943991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53535 > 2) by scale factor 0.565715\nI1207 12:33:49.886946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21145 > 2) by scale factor 0.622772\nI1207 12:33:50.829813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82093 > 2) by scale factor 0.523433\nI1207 12:33:50.841800   369 solver.cpp:337] Iteration 17000, Testing net (#0)\nI1207 12:34:43.792948   369 solver.cpp:404]     Test net output #0: accuracy = 0.1769\nI1207 12:34:43.793576   369 solver.cpp:404]     Test net output #1: loss = 19.1955 (* 1 = 19.1955 loss)\nI1207 12:34:44.664791   369 solver.cpp:228] Iteration 17000, loss = 18.8909\nI1207 12:34:44.664844   369 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1207 12:34:44.664875   369 solver.cpp:244]     Train net output #1: loss = 18.8909 (* 1 = 18.8909 loss)\nI1207 12:34:44.741274   369 sgd_solver.cpp:166] Iteration 17000, lr = 2.55\nI1207 12:34:44.751499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91665 > 2) by scale factor 0.685717\nI1207 12:34:45.689446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70576 > 2) by scale factor 0.739163\nI1207 12:34:46.627699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46501 > 2) by scale factor 0.5772\nI1207 12:34:47.565620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82523 > 2) by scale factor 0.707908\nI1207 12:34:48.503053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31155 > 2) by scale factor 0.603947\nI1207 12:34:49.441234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89824 > 2) by scale factor 0.690073\nI1207 12:34:50.379259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09938 > 2) by scale factor 0.64529\nI1207 12:34:51.317023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54521 > 2) by scale factor 0.78579\nI1207 12:34:52.255340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09295 > 2) by scale factor 0.646633\nI1207 12:34:53.193342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60508 > 2) by scale factor 0.554773\nI1207 12:34:54.131075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70887 > 2) by scale factor 0.738316\nI1207 12:34:55.068770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43306 > 2) by scale factor 0.82201\nI1207 12:34:56.006418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57734 > 2) by scale factor 0.775995\nI1207 12:34:56.944339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92625 > 2) by scale factor 0.683468\nI1207 12:34:57.882184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05987 > 2) by scale factor 0.492626\nI1207 12:34:58.820165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76578 > 2) by scale factor 0.419659\nI1207 12:34:59.758131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98351 > 2) by scale factor 0.670351\nI1207 12:35:00.695948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32496 > 2) by scale factor 0.462432\nI1207 12:35:01.634088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30998 > 2) by scale factor 0.604233\nI1207 12:35:02.571506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03168 > 2) by scale factor 0.6597\nI1207 12:35:03.509838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87484 > 2) by scale factor 0.695692\nI1207 12:35:04.447638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11862 > 2) by scale factor 0.4856\nI1207 12:35:05.385736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1921 > 2) by scale factor 0.477088\nI1207 12:35:06.324053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33618 > 2) by scale factor 0.461236\nI1207 12:35:07.262522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61972 > 2) by scale factor 0.552529\nI1207 12:35:08.200551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0498 > 2) by scale factor 0.655781\nI1207 12:35:09.138515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01722 > 2) by scale factor 0.497857\nI1207 12:35:10.076751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23868 > 2) by scale factor 0.617535\nI1207 12:35:11.015240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17715 > 2) by scale factor 0.629494\nI1207 12:35:11.953303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43914 > 2) by scale factor 0.581541\nI1207 12:35:12.891461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54085 > 2) by scale factor 0.564837\nI1207 12:35:13.829279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10686 > 2) by scale factor 0.486989\nI1207 12:35:14.768074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00642 > 2) by scale factor 0.499198\nI1207 12:35:15.711004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71645 > 2) by scale factor 0.736255\nI1207 12:35:16.654587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85473 > 2) by scale factor 0.518844\nI1207 12:35:17.597703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66836 > 2) by scale factor 0.749523\nI1207 12:35:18.540300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1563 > 2) by scale factor 0.481198\nI1207 12:35:20.424338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92568 > 2) by scale factor 0.683601\nI1207 12:35:21.367254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76908 > 2) by scale factor 0.722261\nI1207 12:35:22.310160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13436 > 2) by scale factor 0.638089\nI1207 12:35:23.253881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70833 > 2) by scale factor 0.738462\nI1207 12:35:24.197686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14522 > 2) by scale factor 0.635886\nI1207 12:35:25.141619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72635 > 2) by scale factor 0.536718\nI1207 12:35:26.085583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68321 > 2) by scale factor 0.745377\nI1207 12:35:27.029172   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46485 > 2) by scale factor 0.811409\nI1207 12:35:27.972504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91873 > 2) by scale factor 0.685229\nI1207 12:35:28.915504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65152 > 2) by scale factor 0.547717\nI1207 12:35:29.858672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81641 > 2) by scale factor 0.524053\nI1207 12:35:30.801584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09567 > 2) by scale factor 0.488321\nI1207 12:35:31.744601   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60325 > 2) by scale factor 0.768269\nI1207 12:35:32.687845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60732 > 2) by scale factor 0.767071\nI1207 12:35:33.630728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37063 > 2) by scale factor 0.4576\nI1207 12:35:34.573789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37022 > 2) by scale factor 0.593433\nI1207 12:35:35.516651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06329 > 2) by scale factor 0.652894\nI1207 12:35:36.459662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66134 > 2) by scale factor 0.751501\nI1207 12:35:37.402923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94281 > 2) by scale factor 0.679623\nI1207 12:35:38.346014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46451 > 2) by scale factor 0.81152\nI1207 12:35:39.288875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30999 > 2) by scale factor 0.464038\nI1207 12:35:40.231981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11427 > 2) by scale factor 0.486113\nI1207 12:35:41.174679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68204 > 2) by scale factor 0.543178\nI1207 12:35:42.117125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53418 > 2) by scale factor 0.565901\nI1207 12:35:43.059434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02657 > 2) by scale factor 0.496701\nI1207 12:35:44.001786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15739 > 2) by scale factor 0.481071\nI1207 12:35:44.945320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45237 > 2) by scale factor 0.449198\nI1207 12:35:45.888027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31639 > 2) by scale factor 0.863414\nI1207 12:35:46.830984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86521 > 2) by scale factor 0.698029\nI1207 12:35:47.774075   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71171 > 2) by scale factor 0.538835\nI1207 12:35:48.716753   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44383 > 2) by scale factor 0.818386\nI1207 12:35:49.659545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73542 > 2) by scale factor 0.731149\nI1207 12:35:50.601991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15772 > 2) by scale factor 0.633369\nI1207 12:35:51.545166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90494 > 2) by scale factor 0.512172\nI1207 12:35:52.488657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44718 > 2) by scale factor 0.580185\nI1207 12:35:53.431952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89622 > 2) by scale factor 0.513318\nI1207 12:35:54.374900   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49629 > 2) by scale factor 0.444811\nI1207 12:35:55.317977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65393 > 2) by scale factor 0.547355\nI1207 12:35:56.260875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33776 > 2) by scale factor 0.461068\nI1207 12:35:57.203850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04377 > 2) by scale factor 0.65708\nI1207 12:35:58.146667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95749 > 2) by scale factor 0.67625\nI1207 12:35:59.089035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02409 > 2) by scale factor 0.661355\nI1207 12:36:00.030937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27756 > 2) by scale factor 0.61021\nI1207 12:36:00.974120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02981 > 2) by scale factor 0.496302\nI1207 12:36:01.916724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67873 > 2) by scale factor 0.543665\nI1207 12:36:02.859560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40586 > 2) by scale factor 0.587224\nI1207 12:36:03.802379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54831 > 2) by scale factor 0.563649\nI1207 12:36:04.745110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07544 > 2) by scale factor 0.650313\nI1207 12:36:05.688274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36146 > 2) by scale factor 0.59498\nI1207 12:36:06.631084   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0822 > 2) by scale factor 0.648888\nI1207 12:36:07.573791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02056 > 2) by scale factor 0.662128\nI1207 12:36:08.516631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80239 > 2) by scale factor 0.713676\nI1207 12:36:09.460680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48159 > 2) by scale factor 0.44627\nI1207 12:36:10.404215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53002 > 2) by scale factor 0.566569\nI1207 12:36:11.347946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4893 > 2) by scale factor 0.445504\nI1207 12:36:12.291520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14479 > 2) by scale factor 0.482533\nI1207 12:36:13.235236   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3496 > 2) by scale factor 0.851209\nI1207 12:36:14.178988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16137 > 2) by scale factor 0.925337\nI1207 12:36:15.122750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54625 > 2) by scale factor 0.785469\nI1207 12:36:17.007608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46173 > 2) by scale factor 0.448257\nI1207 12:36:17.951527   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95848 > 2) by scale factor 0.505244\nI1207 12:36:17.963552   369 solver.cpp:337] Iteration 17100, Testing net (#0)\nI1207 12:37:10.914724   369 solver.cpp:404]     Test net output #0: accuracy = 0.10785\nI1207 12:37:10.915374   369 solver.cpp:404]     Test net output #1: loss = 26.132 (* 1 = 26.132 loss)\nI1207 12:37:11.785961   369 solver.cpp:228] Iteration 17100, loss = 28.3941\nI1207 12:37:11.786005   369 solver.cpp:244]     Train net output #0: accuracy = 0.08\nI1207 12:37:11.786031   369 solver.cpp:244]     Train net output #1: loss = 28.3941 (* 1 = 28.3941 loss)\nI1207 12:37:11.859535   369 sgd_solver.cpp:166] Iteration 17100, lr = 2.565\nI1207 12:37:11.869727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67422 > 2) by scale factor 0.427879\nI1207 12:37:12.807696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52585 > 2) by scale factor 0.441906\nI1207 12:37:13.745637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21091 > 2) by scale factor 0.474957\nI1207 12:37:14.684061   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74074 > 2) by scale factor 0.534653\nI1207 12:37:15.622046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07504 > 2) by scale factor 0.490793\nI1207 12:37:16.559851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59679 > 2) by scale factor 0.770182\nI1207 12:37:17.496894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18165 > 2) by scale factor 0.47828\nI1207 12:37:18.434114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02552 > 2) by scale factor 0.49683\nI1207 12:37:19.371661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49164 > 2) by scale factor 0.572796\nI1207 12:37:20.308833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51288 > 2) by scale factor 0.569334\nI1207 12:37:21.246702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93948 > 2) by scale factor 0.507681\nI1207 12:37:22.184554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.071 > 2) by scale factor 0.651254\nI1207 12:37:23.122707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09366 > 2) by scale factor 0.646483\nI1207 12:37:24.060178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14559 > 2) by scale factor 0.482441\nI1207 12:37:24.997859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73802 > 2) by scale factor 0.730455\nI1207 12:37:25.935243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06763 > 2) by scale factor 0.651969\nI1207 12:37:26.872220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32129 > 2) by scale factor 0.602176\nI1207 12:37:27.809752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80212 > 2) by scale factor 0.713746\nI1207 12:37:28.747179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80812 > 2) by scale factor 0.712219\nI1207 12:37:29.684381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87716 > 2) by scale factor 0.695129\nI1207 12:37:30.622081   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2184 > 2) by scale factor 0.474114\nI1207 12:37:31.558658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43826 > 2) by scale factor 0.820256\nI1207 12:37:32.495609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.689 > 2) by scale factor 0.743771\nI1207 12:37:33.432837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7191 > 2) by scale factor 0.735539\nI1207 12:37:34.371007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31973 > 2) by scale factor 0.602459\nI1207 12:37:35.308436   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62501 > 2) by scale factor 0.551722\nI1207 12:37:37.181366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00125 > 2) by scale factor 0.66639\nI1207 12:37:38.118728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25889 > 2) by scale factor 0.613705\nI1207 12:37:39.056138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3035 > 2) by scale factor 0.605419\nI1207 12:37:39.993736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60283 > 2) by scale factor 0.55512\nI1207 12:37:40.931318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26318 > 2) by scale factor 0.612899\nI1207 12:37:41.869510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41958 > 2) by scale factor 0.826591\nI1207 12:37:42.806771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17218 > 2) by scale factor 0.630482\nI1207 12:37:43.744226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93141 > 2) by scale factor 0.508724\nI1207 12:37:44.684573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3377 > 2) by scale factor 0.599215\nI1207 12:37:45.628111   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66615 > 2) by scale factor 0.545531\nI1207 12:37:46.572139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13425 > 2) by scale factor 0.937097\nI1207 12:37:47.516238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51417 > 2) by scale factor 0.569124\nI1207 12:37:48.459919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88232 > 2) by scale factor 0.515156\nI1207 12:37:49.403764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95133 > 2) by scale factor 0.506158\nI1207 12:37:50.347582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06912 > 2) by scale factor 0.491507\nI1207 12:37:51.291335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01562 > 2) by scale factor 0.663213\nI1207 12:37:52.234820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43315 > 2) by scale factor 0.821979\nI1207 12:37:53.178417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02912 > 2) by scale factor 0.660257\nI1207 12:37:54.121745   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72263 > 2) by scale factor 0.734583\nI1207 12:37:55.065556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25488 > 2) by scale factor 0.886966\nI1207 12:37:56.009021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51615 > 2) by scale factor 0.442855\nI1207 12:37:56.953274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66272 > 2) by scale factor 0.546042\nI1207 12:37:57.897498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7236 > 2) by scale factor 0.734323\nI1207 12:37:58.841689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58051 > 2) by scale factor 0.775041\nI1207 12:37:59.785867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67241 > 2) by scale factor 0.748388\nI1207 12:38:00.729758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80741 > 2) by scale factor 0.712399\nI1207 12:38:01.673521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38713 > 2) by scale factor 0.59047\nI1207 12:38:02.617686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64127 > 2) by scale factor 0.549259\nI1207 12:38:03.561697   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2405 > 2) by scale factor 0.617188\nI1207 12:38:04.506028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37478 > 2) by scale factor 0.59263\nI1207 12:38:05.450203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4329 > 2) by scale factor 0.822064\nI1207 12:38:06.394526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68642 > 2) by scale factor 0.744485\nI1207 12:38:07.338773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29174 > 2) by scale factor 0.607582\nI1207 12:38:08.282452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32461 > 2) by scale factor 0.46247\nI1207 12:38:09.226909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61815 > 2) by scale factor 0.552769\nI1207 12:38:10.171097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68252 > 2) by scale factor 0.543107\nI1207 12:38:11.115669   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00026 > 2) by scale factor 0.666609\nI1207 12:38:12.060333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55971 > 2) by scale factor 0.561844\nI1207 12:38:13.004019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96453 > 2) by scale factor 0.674644\nI1207 12:38:13.948216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23763 > 2) by scale factor 0.471962\nI1207 12:38:14.892307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01421 > 2) by scale factor 0.49823\nI1207 12:38:15.836509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07678 > 2) by scale factor 0.65003\nI1207 12:38:16.779912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65266 > 2) by scale factor 0.547546\nI1207 12:38:17.723062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62988 > 2) by scale factor 0.760492\nI1207 12:38:18.666218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54808 > 2) by scale factor 0.563685\nI1207 12:38:19.608810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15489 > 2) by scale factor 0.928122\nI1207 12:38:20.552428   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85482 > 2) by scale factor 0.700569\nI1207 12:38:21.495586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77015 > 2) by scale factor 0.419274\nI1207 12:38:22.438810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58584 > 2) by scale factor 0.55775\nI1207 12:38:23.382011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87766 > 2) by scale factor 0.515774\nI1207 12:38:24.325242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8081 > 2) by scale factor 0.712226\nI1207 12:38:25.268211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67392 > 2) by scale factor 0.544377\nI1207 12:38:26.211385   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86533 > 2) by scale factor 0.698\nI1207 12:38:27.154667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72103 > 2) by scale factor 0.537486\nI1207 12:38:28.097751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88487 > 2) by scale factor 0.514818\nI1207 12:38:29.040511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30518 > 2) by scale factor 0.464557\nI1207 12:38:29.983829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87626 > 2) by scale factor 0.515962\nI1207 12:38:30.926952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48712 > 2) by scale factor 0.573539\nI1207 12:38:31.870280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08823 > 2) by scale factor 0.48921\nI1207 12:38:32.813292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35607 > 2) by scale factor 0.595934\nI1207 12:38:33.756372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44819 > 2) by scale factor 0.580015\nI1207 12:38:34.699091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65607 > 2) by scale factor 0.547035\nI1207 12:38:35.642026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70354 > 2) by scale factor 0.540024\nI1207 12:38:36.584661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45315 > 2) by scale factor 0.579181\nI1207 12:38:37.527163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07939 > 2) by scale factor 0.64948\nI1207 12:38:38.470360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78821 > 2) by scale factor 0.717307\nI1207 12:38:39.413686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45806 > 2) by scale factor 0.57836\nI1207 12:38:40.356585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01794 > 2) by scale factor 0.497768\nI1207 12:38:41.299417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62417 > 2) by scale factor 0.43251\nI1207 12:38:42.242866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92062 > 2) by scale factor 0.510123\nI1207 12:38:43.185430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30975 > 2) by scale factor 0.604275\nI1207 12:38:44.127565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30384 > 2) by scale factor 0.464702\nI1207 12:38:45.070483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54491 > 2) by scale factor 0.564189\nI1207 12:38:45.082511   369 solver.cpp:337] Iteration 17200, Testing net (#0)\nI1207 12:39:38.051599   369 solver.cpp:404]     Test net output #0: accuracy = 0.1918\nI1207 12:39:38.052176   369 solver.cpp:404]     Test net output #1: loss = 18.2515 (* 1 = 18.2515 loss)\nI1207 12:39:38.924487   369 solver.cpp:228] Iteration 17200, loss = 19.7015\nI1207 12:39:38.924531   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 12:39:38.924557   369 solver.cpp:244]     Train net output #1: loss = 19.7015 (* 1 = 19.7015 loss)\nI1207 12:39:39.004698   369 sgd_solver.cpp:166] Iteration 17200, lr = 2.58\nI1207 12:39:39.014886   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77526 > 2) by scale factor 0.720653\nI1207 12:39:39.952872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56715 > 2) by scale factor 0.560671\nI1207 12:39:40.890538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28282 > 2) by scale factor 0.609233\nI1207 12:39:41.829074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15615 > 2) by scale factor 0.481215\nI1207 12:39:42.766521   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63599 > 2) by scale factor 0.758728\nI1207 12:39:43.704412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30264 > 2) by scale factor 0.868567\nI1207 12:39:44.642400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27018 > 2) by scale factor 0.880989\nI1207 12:39:45.580456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10335 > 2) by scale factor 0.644464\nI1207 12:39:46.518635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27176 > 2) by scale factor 0.880373\nI1207 12:39:48.391494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1263 > 2) by scale factor 0.639734\nI1207 12:39:49.329296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6643 > 2) by scale factor 0.750667\nI1207 12:39:50.267091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89746 > 2) by scale factor 0.513155\nI1207 12:39:51.204653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43241 > 2) by scale factor 0.58268\nI1207 12:39:52.142817   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72854 > 2) by scale factor 0.732992\nI1207 12:39:53.080667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57404 > 2) by scale factor 0.43725\nI1207 12:39:54.018347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59604 > 2) by scale factor 0.770405\nI1207 12:39:54.955955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96852 > 2) by scale factor 0.673737\nI1207 12:39:55.893052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28734 > 2) by scale factor 0.466489\nI1207 12:39:56.830523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.572 > 2) by scale factor 0.437446\nI1207 12:39:57.767812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2543 > 2) by scale factor 0.470112\nI1207 12:39:58.705435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25847 > 2) by scale factor 0.469652\nI1207 12:40:00.577813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65666 > 2) by scale factor 0.752824\nI1207 12:40:01.514549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32835 > 2) by scale factor 0.858978\nI1207 12:40:02.452143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22096 > 2) by scale factor 0.900512\nI1207 12:40:03.389956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71973 > 2) by scale factor 0.735366\nI1207 12:40:04.327994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32236 > 2) by scale factor 0.601981\nI1207 12:40:05.265987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41368 > 2) by scale factor 0.828611\nI1207 12:40:06.203826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8813 > 2) by scale factor 0.694131\nI1207 12:40:07.141816   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79175 > 2) by scale factor 0.527461\nI1207 12:40:08.079377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08319 > 2) by scale factor 0.489813\nI1207 12:40:09.017951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.224 > 2) by scale factor 0.620347\nI1207 12:40:09.955809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05002 > 2) by scale factor 0.493824\nI1207 12:40:10.897539   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6497 > 2) by scale factor 0.754803\nI1207 12:40:11.841794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18303 > 2) by scale factor 0.916159\nI1207 12:40:12.785949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16474 > 2) by scale factor 0.631964\nI1207 12:40:14.671571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53523 > 2) by scale factor 0.440992\nI1207 12:40:15.615243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4407 > 2) by scale factor 0.819438\nI1207 12:40:16.559140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06414 > 2) by scale factor 0.652711\nI1207 12:40:17.502743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44749 > 2) by scale factor 0.580132\nI1207 12:40:18.446276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48153 > 2) by scale factor 0.446276\nI1207 12:40:19.389957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87745 > 2) by scale factor 0.695059\nI1207 12:40:20.333577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15858 > 2) by scale factor 0.633196\nI1207 12:40:21.277892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85352 > 2) by scale factor 0.700889\nI1207 12:40:22.221663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29699 > 2) by scale factor 0.870703\nI1207 12:40:23.165299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09799 > 2) by scale factor 0.64558\nI1207 12:40:24.108947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06908 > 2) by scale factor 0.651661\nI1207 12:40:25.052460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85561 > 2) by scale factor 0.700375\nI1207 12:40:25.995548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35146 > 2) by scale factor 0.459616\nI1207 12:40:26.939230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82088 > 2) by scale factor 0.523439\nI1207 12:40:27.883054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66637 > 2) by scale factor 0.750083\nI1207 12:40:28.826954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61495 > 2) by scale factor 0.553258\nI1207 12:40:29.770856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50069 > 2) by scale factor 0.571316\nI1207 12:40:30.714483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30784 > 2) by scale factor 0.464269\nI1207 12:40:31.658543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48984 > 2) by scale factor 0.573091\nI1207 12:40:32.602210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95632 > 2) by scale factor 0.50552\nI1207 12:40:33.546099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7046 > 2) by scale factor 0.539869\nI1207 12:40:34.489828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27295 > 2) by scale factor 0.46806\nI1207 12:40:35.433761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45703 > 2) by scale factor 0.578531\nI1207 12:40:36.377552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96864 > 2) by scale factor 0.673708\nI1207 12:40:37.321594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05596 > 2) by scale factor 0.493101\nI1207 12:40:38.265564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66741 > 2) by scale factor 0.749792\nI1207 12:40:39.209617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30047 > 2) by scale factor 0.605975\nI1207 12:40:40.152941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1616 > 2) by scale factor 0.632592\nI1207 12:40:41.095906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96973 > 2) by scale factor 0.503812\nI1207 12:40:42.038832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7513 > 2) by scale factor 0.72693\nI1207 12:40:42.982846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35215 > 2) by scale factor 0.596631\nI1207 12:40:43.926484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03984 > 2) by scale factor 0.65793\nI1207 12:40:44.871276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21077 > 2) by scale factor 0.474972\nI1207 12:40:45.814805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23282 > 2) by scale factor 0.472498\nI1207 12:40:46.758960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41752 > 2) by scale factor 0.58522\nI1207 12:40:47.702584   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83536 > 2) by scale factor 0.521463\nI1207 12:40:48.646056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24899 > 2) by scale factor 0.615575\nI1207 12:40:49.592278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73955 > 2) by scale factor 0.730047\nI1207 12:40:50.535642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56587 > 2) by scale factor 0.560874\nI1207 12:40:51.479368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31482 > 2) by scale factor 0.60335\nI1207 12:40:52.422917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31544 > 2) by scale factor 0.463452\nI1207 12:40:53.366526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6445 > 2) by scale factor 0.548773\nI1207 12:40:54.310119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05935 > 2) by scale factor 0.49269\nI1207 12:40:55.254097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17005 > 2) by scale factor 0.921637\nI1207 12:40:56.197662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41904 > 2) by scale factor 0.58496\nI1207 12:40:57.141466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35431 > 2) by scale factor 0.596248\nI1207 12:40:58.085024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60989 > 2) by scale factor 0.766314\nI1207 12:40:59.028502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08785 > 2) by scale factor 0.6477\nI1207 12:40:59.972301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54865 > 2) by scale factor 0.439691\nI1207 12:41:00.915809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00505 > 2) by scale factor 0.665546\nI1207 12:41:01.859930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18409 > 2) by scale factor 0.478002\nI1207 12:41:02.803346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30075 > 2) by scale factor 0.465035\nI1207 12:41:03.747076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1109 > 2) by scale factor 0.947461\nI1207 12:41:04.690573   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6279 > 2) by scale factor 0.761065\nI1207 12:41:05.634239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42249 > 2) by scale factor 0.452234\nI1207 12:41:06.577991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9944 > 2) by scale factor 0.500701\nI1207 12:41:07.521271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63827 > 2) by scale factor 0.758072\nI1207 12:41:08.464412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00351 > 2) by scale factor 0.665888\nI1207 12:41:09.408702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25682 > 2) by scale factor 0.886202\nI1207 12:41:10.352286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99038 > 2) by scale factor 0.668812\nI1207 12:41:11.295848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8415 > 2) by scale factor 0.52063\nI1207 12:41:12.239455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12828 > 2) by scale factor 0.484464\nI1207 12:41:12.251504   369 solver.cpp:337] Iteration 17300, Testing net (#0)\nI1207 12:42:05.208887   369 solver.cpp:404]     Test net output #0: accuracy = 0.14685\nI1207 12:42:05.209465   369 solver.cpp:404]     Test net output #1: loss = 29.7609 (* 1 = 29.7609 loss)\nI1207 12:42:06.081763   369 solver.cpp:228] Iteration 17300, loss = 29.6913\nI1207 12:42:06.081806   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 12:42:06.081831   369 solver.cpp:244]     Train net output #1: loss = 29.6913 (* 1 = 29.6913 loss)\nI1207 12:42:06.155181   369 sgd_solver.cpp:166] Iteration 17300, lr = 2.595\nI1207 12:42:06.165388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69022 > 2) by scale factor 0.42642\nI1207 12:42:07.102634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56747 > 2) by scale factor 0.560621\nI1207 12:42:08.039942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12002 > 2) by scale factor 0.641022\nI1207 12:42:08.976860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34025 > 2) by scale factor 0.598757\nI1207 12:42:09.914099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57197 > 2) by scale factor 0.559914\nI1207 12:42:10.851505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00917 > 2) by scale factor 0.664635\nI1207 12:42:12.724668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33528 > 2) by scale factor 0.856429\nI1207 12:42:13.662977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49672 > 2) by scale factor 0.571964\nI1207 12:42:14.599994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00337 > 2) by scale factor 0.665918\nI1207 12:42:15.537019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67446 > 2) by scale factor 0.544297\nI1207 12:42:16.474606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16637 > 2) by scale factor 0.631638\nI1207 12:42:18.348147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17338 > 2) by scale factor 0.479228\nI1207 12:42:19.286319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16065 > 2) by scale factor 0.480694\nI1207 12:42:20.224347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11364 > 2) by scale factor 0.486187\nI1207 12:42:21.162597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37747 > 2) by scale factor 0.592158\nI1207 12:42:22.100518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93605 > 2) by scale factor 0.681186\nI1207 12:42:23.038887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68118 > 2) by scale factor 0.427242\nI1207 12:42:23.977191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52821 > 2) by scale factor 0.441675\nI1207 12:42:24.915400   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70344 > 2) by scale factor 0.42522\nI1207 12:42:25.853771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49315 > 2) by scale factor 0.802198\nI1207 12:42:26.792017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40969 > 2) by scale factor 0.586563\nI1207 12:42:27.730062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58649 > 2) by scale factor 0.557649\nI1207 12:42:28.668076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28763 > 2) by scale factor 0.466458\nI1207 12:42:29.606010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17678 > 2) by scale factor 0.918787\nI1207 12:42:30.543566   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74124 > 2) by scale factor 0.534581\nI1207 12:42:31.480792   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62201 > 2) by scale factor 0.762772\nI1207 12:42:32.418612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7455 > 2) by scale factor 0.533974\nI1207 12:42:33.356864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87383 > 2) by scale factor 0.695935\nI1207 12:42:34.294433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25123 > 2) by scale factor 0.615152\nI1207 12:42:35.232579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66984 > 2) by scale factor 0.544983\nI1207 12:42:36.171170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66978 > 2) by scale factor 0.428286\nI1207 12:42:37.109372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71592 > 2) by scale factor 0.424095\nI1207 12:42:38.052170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14838 > 2) by scale factor 0.635247\nI1207 12:42:38.994819   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23208 > 2) by scale factor 0.618797\nI1207 12:42:39.937525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49227 > 2) by scale factor 0.572693\nI1207 12:42:40.881219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96244 > 2) by scale factor 0.675118\nI1207 12:42:41.824695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49214 > 2) by scale factor 0.572715\nI1207 12:42:42.768134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31334 > 2) by scale factor 0.86455\nI1207 12:42:43.710640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7355 > 2) by scale factor 0.422342\nI1207 12:42:44.653476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34282 > 2) by scale factor 0.598297\nI1207 12:42:45.596751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21907 > 2) by scale factor 0.474038\nI1207 12:42:46.539955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3688 > 2) by scale factor 0.457791\nI1207 12:42:47.483088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18464 > 2) by scale factor 0.628014\nI1207 12:42:48.425659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88356 > 2) by scale factor 0.693586\nI1207 12:42:49.368559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37482 > 2) by scale factor 0.592624\nI1207 12:42:50.311301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99301 > 2) by scale factor 0.40056\nI1207 12:42:51.253988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21947 > 2) by scale factor 0.383181\nI1207 12:42:52.196626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56394 > 2) by scale factor 0.438218\nI1207 12:42:53.139454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68046 > 2) by scale factor 0.54341\nI1207 12:42:54.082474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86205 > 2) by scale factor 0.6988\nI1207 12:42:55.025496   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09045 > 2) by scale factor 0.647154\nI1207 12:42:55.967829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63573 > 2) by scale factor 0.758803\nI1207 12:42:56.910951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38744 > 2) by scale factor 0.455846\nI1207 12:42:57.853593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94604 > 2) by scale factor 0.678878\nI1207 12:42:58.796741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51836 > 2) by scale factor 0.568446\nI1207 12:42:59.739820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50386 > 2) by scale factor 0.444063\nI1207 12:43:00.682548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.43836 > 2) by scale factor 0.367758\nI1207 12:43:01.625181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00095 > 2) by scale factor 0.499881\nI1207 12:43:02.567944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18184 > 2) by scale factor 0.478258\nI1207 12:43:03.511198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09995 > 2) by scale factor 0.48781\nI1207 12:43:04.454529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21278 > 2) by scale factor 0.474746\nI1207 12:43:05.397253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13341 > 2) by scale factor 0.638281\nI1207 12:43:06.340708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29278 > 2) by scale factor 0.60739\nI1207 12:43:07.283465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65128 > 2) by scale factor 0.547753\nI1207 12:43:08.226814   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90552 > 2) by scale factor 0.512096\nI1207 12:43:09.169721   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3961 > 2) by scale factor 0.454948\nI1207 12:43:10.112262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34443 > 2) by scale factor 0.59801\nI1207 12:43:11.055212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89893 > 2) by scale factor 0.68991\nI1207 12:43:11.997491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87685 > 2) by scale factor 0.695204\nI1207 12:43:12.940435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63026 > 2) by scale factor 0.550925\nI1207 12:43:13.882956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50809 > 2) by scale factor 0.443647\nI1207 12:43:14.825579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39291 > 2) by scale factor 0.589465\nI1207 12:43:15.767976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89856 > 2) by scale factor 0.51301\nI1207 12:43:16.710430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62666 > 2) by scale factor 0.432277\nI1207 12:43:17.653089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21994 > 2) by scale factor 0.621129\nI1207 12:43:18.596189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47839 > 2) by scale factor 0.806974\nI1207 12:43:19.554055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96589 > 2) by scale factor 0.674333\nI1207 12:43:20.496490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89409 > 2) by scale factor 0.513598\nI1207 12:43:21.438510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57969 > 2) by scale factor 0.775287\nI1207 12:43:22.381191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73758 > 2) by scale factor 0.730573\nI1207 12:43:23.325036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93271 > 2) by scale factor 0.508556\nI1207 12:43:24.268100   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76246 > 2) by scale factor 0.723992\nI1207 12:43:25.210986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67636 > 2) by scale factor 0.747285\nI1207 12:43:26.153291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83944 > 2) by scale factor 0.52091\nI1207 12:43:27.096735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52271 > 2) by scale factor 0.567744\nI1207 12:43:28.039984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25471 > 2) by scale factor 0.614494\nI1207 12:43:28.982954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87078 > 2) by scale factor 0.516691\nI1207 12:43:29.925956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35355 > 2) by scale factor 0.596382\nI1207 12:43:30.868933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00111 > 2) by scale factor 0.66642\nI1207 12:43:31.811769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27964 > 2) by scale factor 0.467329\nI1207 12:43:32.755241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11692 > 2) by scale factor 0.641659\nI1207 12:43:33.697597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90235 > 2) by scale factor 0.512511\nI1207 12:43:34.640563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16933 > 2) by scale factor 0.479693\nI1207 12:43:35.583323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62905 > 2) by scale factor 0.432054\nI1207 12:43:36.526482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0791 > 2) by scale factor 0.649541\nI1207 12:43:37.468461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78592 > 2) by scale factor 0.717895\nI1207 12:43:38.410691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3478 > 2) by scale factor 0.597408\nI1207 12:43:39.353363   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01607 > 2) by scale factor 0.663115\nI1207 12:43:39.365422   369 solver.cpp:337] Iteration 17400, Testing net (#0)\nI1207 12:44:32.326381   369 solver.cpp:404]     Test net output #0: accuracy = 0.15925\nI1207 12:44:32.326982   369 solver.cpp:404]     Test net output #1: loss = 26.0484 (* 1 = 26.0484 loss)\nI1207 12:44:33.199342   369 solver.cpp:228] Iteration 17400, loss = 32.0197\nI1207 12:44:33.199388   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 12:44:33.199412   369 solver.cpp:244]     Train net output #1: loss = 32.0197 (* 1 = 32.0197 loss)\nI1207 12:44:33.269513   369 sgd_solver.cpp:166] Iteration 17400, lr = 2.61\nI1207 12:44:33.279392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81091 > 2) by scale factor 0.524809\nI1207 12:44:34.216557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58059 > 2) by scale factor 0.775017\nI1207 12:44:35.154391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51894 > 2) by scale factor 0.568352\nI1207 12:44:36.091576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56638 > 2) by scale factor 0.779309\nI1207 12:44:37.029706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67324 > 2) by scale factor 0.748156\nI1207 12:44:37.968228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56931 > 2) by scale factor 0.437703\nI1207 12:44:38.906716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81336 > 2) by scale factor 0.710894\nI1207 12:44:39.844853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63228 > 2) by scale factor 0.431753\nI1207 12:44:40.782732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29553 > 2) by scale factor 0.606883\nI1207 12:44:41.721192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06321 > 2) by scale factor 0.969363\nI1207 12:44:42.658920   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51317 > 2) by scale factor 0.443147\nI1207 12:44:43.596187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67176 > 2) by scale factor 0.544698\nI1207 12:44:44.533881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86159 > 2) by scale factor 0.698913\nI1207 12:44:45.470532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68942 > 2) by scale factor 0.743654\nI1207 12:44:46.408391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18949 > 2) by scale factor 0.477385\nI1207 12:44:47.346020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30579 > 2) by scale factor 0.605\nI1207 12:44:48.283504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8667 > 2) by scale factor 0.697665\nI1207 12:44:49.220971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52711 > 2) by scale factor 0.791417\nI1207 12:44:50.158372   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83709 > 2) by scale factor 0.521228\nI1207 12:44:51.095980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67394 > 2) by scale factor 0.544375\nI1207 12:44:52.033304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52079 > 2) by scale factor 0.568054\nI1207 12:44:52.971549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98705 > 2) by scale factor 0.669556\nI1207 12:44:53.909653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98916 > 2) by scale factor 0.669085\nI1207 12:44:54.847741   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30993 > 2) by scale factor 0.464045\nI1207 12:44:55.785331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3525 > 2) by scale factor 0.596571\nI1207 12:44:56.722856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5199 > 2) by scale factor 0.793682\nI1207 12:44:57.660161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08801 > 2) by scale factor 0.489236\nI1207 12:44:58.598115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30058 > 2) by scale factor 0.869344\nI1207 12:44:59.535570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62233 > 2) by scale factor 0.76268\nI1207 12:45:00.472890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08562 > 2) by scale factor 0.648169\nI1207 12:45:01.410408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76642 > 2) by scale factor 0.722956\nI1207 12:45:02.347803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0923 > 2) by scale factor 0.646768\nI1207 12:45:03.289000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73784 > 2) by scale factor 0.535069\nI1207 12:45:04.231910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50196 > 2) by scale factor 0.571109\nI1207 12:45:05.174679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49066 > 2) by scale factor 0.572958\nI1207 12:45:06.117125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8665 > 2) by scale factor 0.517264\nI1207 12:45:07.059773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74884 > 2) by scale factor 0.727581\nI1207 12:45:08.001914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46729 > 2) by scale factor 0.576819\nI1207 12:45:08.944304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86099 > 2) by scale factor 0.699059\nI1207 12:45:09.886919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18685 > 2) by scale factor 0.627579\nI1207 12:45:10.829296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62689 > 2) by scale factor 0.551436\nI1207 12:45:11.772421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95152 > 2) by scale factor 0.506134\nI1207 12:45:12.715543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.84374 > 2) by scale factor 0.412904\nI1207 12:45:13.658421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37974 > 2) by scale factor 0.456649\nI1207 12:45:14.601272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70062 > 2) by scale factor 0.74057\nI1207 12:45:15.543603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33943 > 2) by scale factor 0.46089\nI1207 12:45:16.485580   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40905 > 2) by scale factor 0.586675\nI1207 12:45:17.428351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92757 > 2) by scale factor 0.509221\nI1207 12:45:18.371054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61246 > 2) by scale factor 0.433608\nI1207 12:45:19.313560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19384 > 2) by scale factor 0.626206\nI1207 12:45:20.256608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1943 > 2) by scale factor 0.626116\nI1207 12:45:21.198884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59215 > 2) by scale factor 0.55677\nI1207 12:45:22.141415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08618 > 2) by scale factor 0.489454\nI1207 12:45:23.084090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54613 > 2) by scale factor 0.563995\nI1207 12:45:24.026787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99519 > 2) by scale factor 0.667737\nI1207 12:45:24.969429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82105 > 2) by scale factor 0.523417\nI1207 12:45:25.912024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56442 > 2) by scale factor 0.438172\nI1207 12:45:26.854787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20165 > 2) by scale factor 0.476004\nI1207 12:45:27.797045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3111 > 2) by scale factor 0.60403\nI1207 12:45:28.739994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46024 > 2) by scale factor 0.448407\nI1207 12:45:29.683135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87696 > 2) by scale factor 0.410091\nI1207 12:45:30.626737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01923 > 2) by scale factor 0.497607\nI1207 12:45:31.570086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82551 > 2) by scale factor 0.707837\nI1207 12:45:32.513187   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37487 > 2) by scale factor 0.592615\nI1207 12:45:33.457108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04975 > 2) by scale factor 0.493858\nI1207 12:45:34.399469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63464 > 2) by scale factor 0.550261\nI1207 12:45:35.342972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79847 > 2) by scale factor 0.416799\nI1207 12:45:36.285843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83814 > 2) by scale factor 0.413382\nI1207 12:45:37.229346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21527 > 2) by scale factor 0.622032\nI1207 12:45:38.172979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31529 > 2) by scale factor 0.863823\nI1207 12:45:39.116183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73098 > 2) by scale factor 0.536052\nI1207 12:45:40.059751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60209 > 2) by scale factor 0.768614\nI1207 12:45:41.003090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54521 > 2) by scale factor 0.564141\nI1207 12:45:41.946290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90216 > 2) by scale factor 0.689141\nI1207 12:45:42.889708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79543 > 2) by scale factor 0.715452\nI1207 12:45:43.833386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14036 > 2) by scale factor 0.63687\nI1207 12:45:44.777448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10171 > 2) by scale factor 0.487602\nI1207 12:45:45.721413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74586 > 2) by scale factor 0.42142\nI1207 12:45:46.665310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90271 > 2) by scale factor 0.689011\nI1207 12:45:47.608760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75478 > 2) by scale factor 0.532655\nI1207 12:45:48.552315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89453 > 2) by scale factor 0.51354\nI1207 12:45:49.496042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28604 > 2) by scale factor 0.466631\nI1207 12:45:50.439287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53788 > 2) by scale factor 0.440735\nI1207 12:45:51.383160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34819 > 2) by scale factor 0.597338\nI1207 12:45:52.327064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32756 > 2) by scale factor 0.859269\nI1207 12:45:53.270331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90599 > 2) by scale factor 0.688234\nI1207 12:45:54.213452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46247 > 2) by scale factor 0.812191\nI1207 12:45:55.156803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07866 > 2) by scale factor 0.490357\nI1207 12:45:56.099650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25835 > 2) by scale factor 0.469666\nI1207 12:45:57.043050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47559 > 2) by scale factor 0.575442\nI1207 12:45:57.985908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57105 > 2) by scale factor 0.56006\nI1207 12:45:58.929805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74742 > 2) by scale factor 0.533701\nI1207 12:45:59.873180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26839 > 2) by scale factor 0.611921\nI1207 12:46:00.816136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44711 > 2) by scale factor 0.580196\nI1207 12:46:01.759119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93015 > 2) by scale factor 0.508886\nI1207 12:46:02.701875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49467 > 2) by scale factor 0.444972\nI1207 12:46:03.646136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37573 > 2) by scale factor 0.592465\nI1207 12:46:04.590083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27176 > 2) by scale factor 0.611291\nI1207 12:46:05.533308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56683 > 2) by scale factor 0.560721\nI1207 12:46:06.476577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68899 > 2) by scale factor 0.743774\nI1207 12:46:06.488608   369 solver.cpp:337] Iteration 17500, Testing net (#0)\nI1207 12:46:59.442088   369 solver.cpp:404]     Test net output #0: accuracy = 0.1517\nI1207 12:46:59.442687   369 solver.cpp:404]     Test net output #1: loss = 27.9898 (* 1 = 27.9898 loss)\nI1207 12:47:00.315296   369 solver.cpp:228] Iteration 17500, loss = 27.6591\nI1207 12:47:00.315347   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 12:47:00.315374   369 solver.cpp:244]     Train net output #1: loss = 27.6591 (* 1 = 27.6591 loss)\nI1207 12:47:00.394210   369 sgd_solver.cpp:166] Iteration 17500, lr = 2.625\nI1207 12:47:00.404357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87779 > 2) by scale factor 0.515758\nI1207 12:47:01.342715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70015 > 2) by scale factor 0.540519\nI1207 12:47:02.280690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17487 > 2) by scale factor 0.629946\nI1207 12:47:03.218346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7387 > 2) by scale factor 0.422056\nI1207 12:47:04.155206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09608 > 2) by scale factor 0.488271\nI1207 12:47:05.092598   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87151 > 2) by scale factor 0.516595\nI1207 12:47:06.029795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66601 > 2) by scale factor 0.428632\nI1207 12:47:06.967828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40159 > 2) by scale factor 0.58796\nI1207 12:47:07.906107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77891 > 2) by scale factor 0.719707\nI1207 12:47:08.843811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89885 > 2) by scale factor 0.689929\nI1207 12:47:09.782007   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53854 > 2) by scale factor 0.44067\nI1207 12:47:10.719908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03012 > 2) by scale factor 0.660039\nI1207 12:47:11.657564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34179 > 2) by scale factor 0.598482\nI1207 12:47:12.595950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60508 > 2) by scale factor 0.76773\nI1207 12:47:13.533702   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60308 > 2) by scale factor 0.555081\nI1207 12:47:14.472034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49628 > 2) by scale factor 0.444812\nI1207 12:47:15.409869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7474 > 2) by scale factor 0.421283\nI1207 12:47:16.347637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67866 > 2) by scale factor 0.427473\nI1207 12:47:17.285653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56097 > 2) by scale factor 0.438503\nI1207 12:47:18.223611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07637 > 2) by scale factor 0.650118\nI1207 12:47:19.161744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80422 > 2) by scale factor 0.713212\nI1207 12:47:20.099491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53291 > 2) by scale factor 0.441218\nI1207 12:47:21.036736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86895 > 2) by scale factor 0.516936\nI1207 12:47:21.974089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34735 > 2) by scale factor 0.597488\nI1207 12:47:22.912026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13349 > 2) by scale factor 0.638267\nI1207 12:47:23.849977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73754 > 2) by scale factor 0.535111\nI1207 12:47:24.788213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01316 > 2) by scale factor 0.498361\nI1207 12:47:25.726145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2357 > 2) by scale factor 0.472177\nI1207 12:47:26.663409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34937 > 2) by scale factor 0.597127\nI1207 12:47:27.600564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53013 > 2) by scale factor 0.790472\nI1207 12:47:28.538424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63685 > 2) by scale factor 0.549927\nI1207 12:47:29.476054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.94403 > 2) by scale factor 0.404528\nI1207 12:47:30.414345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79681 > 2) by scale factor 0.715101\nI1207 12:47:31.356972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02218 > 2) by scale factor 0.661773\nI1207 12:47:32.300052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88517 > 2) by scale factor 0.6932\nI1207 12:47:33.242789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17139 > 2) by scale factor 0.479456\nI1207 12:47:34.185986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46432 > 2) by scale factor 0.577314\nI1207 12:47:35.128713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82891 > 2) by scale factor 0.706985\nI1207 12:47:36.071403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54462 > 2) by scale factor 0.785973\nI1207 12:47:37.014247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46377 > 2) by scale factor 0.811764\nI1207 12:47:37.957198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51038 > 2) by scale factor 0.796694\nI1207 12:47:38.899919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.451 > 2) by scale factor 0.815995\nI1207 12:47:39.842599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28527 > 2) by scale factor 0.608778\nI1207 12:47:40.785298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00887 > 2) by scale factor 0.498894\nI1207 12:47:41.728416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19063 > 2) by scale factor 0.626836\nI1207 12:47:42.670878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75193 > 2) by scale factor 0.53306\nI1207 12:47:43.613415   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.8659 > 2) by scale factor 0.411023\nI1207 12:47:44.555794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7398 > 2) by scale factor 0.421959\nI1207 12:47:45.498342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81794 > 2) by scale factor 0.709739\nI1207 12:47:46.440207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47474 > 2) by scale factor 0.446953\nI1207 12:47:47.383677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31686 > 2) by scale factor 0.463299\nI1207 12:47:48.326709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95367 > 2) by scale factor 0.505859\nI1207 12:47:49.269459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65159 > 2) by scale factor 0.429961\nI1207 12:47:50.212034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31813 > 2) by scale factor 0.463164\nI1207 12:47:51.154191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54289 > 2) by scale factor 0.440248\nI1207 12:47:52.095736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39652 > 2) by scale factor 0.454905\nI1207 12:47:53.037839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04567 > 2) by scale factor 0.656671\nI1207 12:47:53.980008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99979 > 2) by scale factor 0.500026\nI1207 12:47:54.922488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9273 > 2) by scale factor 0.683223\nI1207 12:47:55.865170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48709 > 2) by scale factor 0.573545\nI1207 12:47:57.747148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7484 > 2) by scale factor 0.727696\nI1207 12:47:58.689991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38511 > 2) by scale factor 0.838535\nI1207 12:47:59.632963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78435 > 2) by scale factor 0.528492\nI1207 12:48:00.575575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79433 > 2) by scale factor 0.527102\nI1207 12:48:01.517212   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7619 > 2) by scale factor 0.531646\nI1207 12:48:02.459278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51355 > 2) by scale factor 0.44311\nI1207 12:48:03.402043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72862 > 2) by scale factor 0.536391\nI1207 12:48:04.344518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32023 > 2) by scale factor 0.462938\nI1207 12:48:05.287009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11348 > 2) by scale factor 0.642368\nI1207 12:48:06.228866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27246 > 2) by scale factor 0.61116\nI1207 12:48:08.110918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91074 > 2) by scale factor 0.511412\nI1207 12:48:09.053012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71871 > 2) by scale factor 0.537822\nI1207 12:48:09.995502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8597 > 2) by scale factor 0.518175\nI1207 12:48:10.938074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78318 > 2) by scale factor 0.528656\nI1207 12:48:11.880437   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20295 > 2) by scale factor 0.624425\nI1207 12:48:12.822595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89138 > 2) by scale factor 0.691712\nI1207 12:48:13.764256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82649 > 2) by scale factor 0.522672\nI1207 12:48:14.706463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37728 > 2) by scale factor 0.592193\nI1207 12:48:15.648768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92608 > 2) by scale factor 0.683507\nI1207 12:48:16.591445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09072 > 2) by scale factor 0.647098\nI1207 12:48:17.533484   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06009 > 2) by scale factor 0.4926\nI1207 12:48:18.475016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49538 > 2) by scale factor 0.80148\nI1207 12:48:19.417223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43287 > 2) by scale factor 0.582603\nI1207 12:48:20.359340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22663 > 2) by scale factor 0.898219\nI1207 12:48:21.302006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72232 > 2) by scale factor 0.423521\nI1207 12:48:22.244276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08654 > 2) by scale factor 0.393194\nI1207 12:48:23.187136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00684 > 2) by scale factor 0.399454\nI1207 12:48:24.129142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34352 > 2) by scale factor 0.598172\nI1207 12:48:25.071322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35111 > 2) by scale factor 0.596816\nI1207 12:48:26.013244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41541 > 2) by scale factor 0.828015\nI1207 12:48:26.955879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68746 > 2) by scale factor 0.744198\nI1207 12:48:27.898375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74284 > 2) by scale factor 0.534353\nI1207 12:48:28.840952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.045 > 2) by scale factor 0.494438\nI1207 12:48:29.783884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9441 > 2) by scale factor 0.679325\nI1207 12:48:30.726950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41578 > 2) by scale factor 0.827891\nI1207 12:48:31.669448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98003 > 2) by scale factor 0.502509\nI1207 12:48:32.611804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17889 > 2) by scale factor 0.478596\nI1207 12:48:33.554658   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98596 > 2) by scale factor 0.501761\nI1207 12:48:33.566658   369 solver.cpp:337] Iteration 17600, Testing net (#0)\nI1207 12:49:26.520225   369 solver.cpp:404]     Test net output #0: accuracy = 0.1339\nI1207 12:49:26.520807   369 solver.cpp:404]     Test net output #1: loss = 26.9985 (* 1 = 26.9985 loss)\nI1207 12:49:27.392340   369 solver.cpp:228] Iteration 17600, loss = 27.6859\nI1207 12:49:27.392395   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 12:49:27.392421   369 solver.cpp:244]     Train net output #1: loss = 27.6859 (* 1 = 27.6859 loss)\nI1207 12:49:27.467659   369 sgd_solver.cpp:166] Iteration 17600, lr = 2.64\nI1207 12:49:27.477898   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05235 > 2) by scale factor 0.493541\nI1207 12:49:28.415263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39677 > 2) by scale factor 0.45488\nI1207 12:49:29.352283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8986 > 2) by scale factor 0.513005\nI1207 12:49:30.289238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45062 > 2) by scale factor 0.579606\nI1207 12:49:31.226346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91361 > 2) by scale factor 0.511037\nI1207 12:49:32.163167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62185 > 2) by scale factor 0.432727\nI1207 12:49:33.099961   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27701 > 2) by scale factor 0.610312\nI1207 12:49:34.036607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58978 > 2) by scale factor 0.435751\nI1207 12:49:34.973364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58735 > 2) by scale factor 0.557515\nI1207 12:49:35.910190   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4716 > 2) by scale factor 0.576103\nI1207 12:49:36.847116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75051 > 2) by scale factor 0.727139\nI1207 12:49:37.784267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10449 > 2) by scale factor 0.644228\nI1207 12:49:38.721076   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71367 > 2) by scale factor 0.73701\nI1207 12:49:39.658231   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36975 > 2) by scale factor 0.843973\nI1207 12:49:40.594918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86497 > 2) by scale factor 0.517468\nI1207 12:49:41.531976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33912 > 2) by scale factor 0.59896\nI1207 12:49:42.468994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92553 > 2) by scale factor 0.683637\nI1207 12:49:43.405937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94147 > 2) by scale factor 0.679933\nI1207 12:49:44.343374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60418 > 2) by scale factor 0.554911\nI1207 12:49:45.280046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45884 > 2) by scale factor 0.578228\nI1207 12:49:46.216755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73606 > 2) by scale factor 0.730979\nI1207 12:49:47.153889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45532 > 2) by scale factor 0.814558\nI1207 12:49:48.090169   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5609 > 2) by scale factor 0.780976\nI1207 12:49:49.026784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55475 > 2) by scale factor 0.562627\nI1207 12:49:49.963929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83626 > 2) by scale factor 0.705154\nI1207 12:49:50.900218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49713 > 2) by scale factor 0.80092\nI1207 12:49:51.837211   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13731 > 2) by scale factor 0.389309\nI1207 12:49:52.773952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7435 > 2) by scale factor 0.728995\nI1207 12:49:53.710913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78614 > 2) by scale factor 0.717838\nI1207 12:49:54.647987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06148 > 2) by scale factor 0.492432\nI1207 12:49:55.584476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16229 > 2) by scale factor 0.632453\nI1207 12:49:56.521164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5009 > 2) by scale factor 0.444355\nI1207 12:49:57.458951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64358 > 2) by scale factor 0.430703\nI1207 12:49:58.395859   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54027 > 2) by scale factor 0.440502\nI1207 12:49:59.332505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95687 > 2) by scale factor 0.50545\nI1207 12:50:00.269453   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17446 > 2) by scale factor 0.479104\nI1207 12:50:01.209810   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73541 > 2) by scale factor 0.422349\nI1207 12:50:02.151952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.88738 > 2) by scale factor 0.409217\nI1207 12:50:03.094614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19603 > 2) by scale factor 0.476641\nI1207 12:50:04.037117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81838 > 2) by scale factor 0.523782\nI1207 12:50:04.980512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71157 > 2) by scale factor 0.538855\nI1207 12:50:05.923101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22232 > 2) by scale factor 0.473673\nI1207 12:50:06.865697   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52252 > 2) by scale factor 0.567775\nI1207 12:50:07.808856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11387 > 2) by scale factor 0.48616\nI1207 12:50:08.751737   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84819 > 2) by scale factor 0.519726\nI1207 12:50:09.694586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70163 > 2) by scale factor 0.540303\nI1207 12:50:10.637264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34945 > 2) by scale factor 0.597112\nI1207 12:50:11.579726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14775 > 2) by scale factor 0.635374\nI1207 12:50:12.522550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56812 > 2) by scale factor 0.778779\nI1207 12:50:13.465842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53203 > 2) by scale factor 0.566246\nI1207 12:50:14.408406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12488 > 2) by scale factor 0.640025\nI1207 12:50:15.350746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39794 > 2) by scale factor 0.83405\nI1207 12:50:16.294126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48231 > 2) by scale factor 0.574332\nI1207 12:50:17.237236   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33168 > 2) by scale factor 0.600298\nI1207 12:50:18.179659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99571 > 2) by scale factor 0.500537\nI1207 12:50:19.121795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06178 > 2) by scale factor 0.653215\nI1207 12:50:20.064507   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02833 > 2) by scale factor 0.496483\nI1207 12:50:21.007603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80881 > 2) by scale factor 0.525098\nI1207 12:50:21.949949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29334 > 2) by scale factor 0.607287\nI1207 12:50:22.892918   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57784 > 2) by scale factor 0.436887\nI1207 12:50:23.835523   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69199 > 2) by scale factor 0.742945\nI1207 12:50:24.778339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47604 > 2) by scale factor 0.807741\nI1207 12:50:25.720880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45079 > 2) by scale factor 0.449358\nI1207 12:50:26.663839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40686 > 2) by scale factor 0.453838\nI1207 12:50:27.606627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50365 > 2) by scale factor 0.570833\nI1207 12:50:28.549263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96849 > 2) by scale factor 0.673744\nI1207 12:50:29.492166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14168 > 2) by scale factor 0.482896\nI1207 12:50:30.434640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08085 > 2) by scale factor 0.649172\nI1207 12:50:31.377349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10626 > 2) by scale factor 0.487061\nI1207 12:50:32.319752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74253 > 2) by scale factor 0.729255\nI1207 12:50:33.262459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45999 > 2) by scale factor 0.578036\nI1207 12:50:34.204841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.488 > 2) by scale factor 0.445633\nI1207 12:50:35.147099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08714 > 2) by scale factor 0.64785\nI1207 12:50:36.089830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5955 > 2) by scale factor 0.556251\nI1207 12:50:37.032444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20098 > 2) by scale factor 0.908688\nI1207 12:50:37.975210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13184 > 2) by scale factor 0.638602\nI1207 12:50:38.918020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50051 > 2) by scale factor 0.571346\nI1207 12:50:39.860988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54675 > 2) by scale factor 0.439875\nI1207 12:50:40.803648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73353 > 2) by scale factor 0.535686\nI1207 12:50:41.746060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52091 > 2) by scale factor 0.568035\nI1207 12:50:42.688915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05629 > 2) by scale factor 0.493062\nI1207 12:50:43.631696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66388 > 2) by scale factor 0.428828\nI1207 12:50:44.574429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33134 > 2) by scale factor 0.46175\nI1207 12:50:45.516991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59865 > 2) by scale factor 0.43491\nI1207 12:50:46.458726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75527 > 2) by scale factor 0.532585\nI1207 12:50:47.401657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28968 > 2) by scale factor 0.607963\nI1207 12:50:48.344127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04329 > 2) by scale factor 0.657183\nI1207 12:50:49.286308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34091 > 2) by scale factor 0.854369\nI1207 12:50:50.228668   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25592 > 2) by scale factor 0.886554\nI1207 12:50:51.171237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04436 > 2) by scale factor 0.656952\nI1207 12:50:52.113883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73041 > 2) by scale factor 0.732492\nI1207 12:50:53.056322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05163 > 2) by scale factor 0.493628\nI1207 12:50:53.998841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19234 > 2) by scale factor 0.626499\nI1207 12:50:54.941323   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2945 > 2) by scale factor 0.465712\nI1207 12:50:55.883982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73261 > 2) by scale factor 0.535819\nI1207 12:50:56.826714   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69836 > 2) by scale factor 0.54078\nI1207 12:50:57.769968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85004 > 2) by scale factor 0.701745\nI1207 12:50:58.712682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31724 > 2) by scale factor 0.602911\nI1207 12:50:59.655560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52926 > 2) by scale factor 0.566692\nI1207 12:51:00.598680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47728 > 2) by scale factor 0.575161\nI1207 12:51:00.610632   369 solver.cpp:337] Iteration 17700, Testing net (#0)\nI1207 12:51:53.548243   369 solver.cpp:404]     Test net output #0: accuracy = 0.14285\nI1207 12:51:53.548843   369 solver.cpp:404]     Test net output #1: loss = 21.6388 (* 1 = 21.6388 loss)\nI1207 12:51:54.419571   369 solver.cpp:228] Iteration 17700, loss = 25.9258\nI1207 12:51:54.419610   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 12:51:54.419636   369 solver.cpp:244]     Train net output #1: loss = 25.9258 (* 1 = 25.9258 loss)\nI1207 12:51:54.498042   369 sgd_solver.cpp:166] Iteration 17700, lr = 2.655\nI1207 12:51:54.508195   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06376 > 2) by scale factor 0.492155\nI1207 12:51:55.445371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72832 > 2) by scale factor 0.536434\nI1207 12:51:56.382419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33375 > 2) by scale factor 0.85699\nI1207 12:51:57.320024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08612 > 2) by scale factor 0.648063\nI1207 12:51:58.256615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19723 > 2) by scale factor 0.910237\nI1207 12:51:59.193615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9675 > 2) by scale factor 0.504096\nI1207 12:52:00.130735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56724 > 2) by scale factor 0.560658\nI1207 12:52:01.067591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76851 > 2) by scale factor 0.530713\nI1207 12:52:02.004101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5358 > 2) by scale factor 0.440936\nI1207 12:52:02.942178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69581 > 2) by scale factor 0.425911\nI1207 12:52:03.879307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.91095 > 2) by scale factor 0.407253\nI1207 12:52:04.817149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50599 > 2) by scale factor 0.570452\nI1207 12:52:05.755136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68241 > 2) by scale factor 0.42713\nI1207 12:52:06.692087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99895 > 2) by scale factor 0.400084\nI1207 12:52:07.629106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18297 > 2) by scale factor 0.478129\nI1207 12:52:08.565773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10188 > 2) by scale factor 0.487581\nI1207 12:52:09.501679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40352 > 2) by scale factor 0.454182\nI1207 12:52:10.438707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08821 > 2) by scale factor 0.647624\nI1207 12:52:11.375919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11432 > 2) by scale factor 0.486107\nI1207 12:52:12.312681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79454 > 2) by scale factor 0.417142\nI1207 12:52:13.249462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88355 > 2) by scale factor 0.514992\nI1207 12:52:14.186622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13165 > 2) by scale factor 0.638641\nI1207 12:52:15.124764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9346 > 2) by scale factor 0.681523\nI1207 12:52:16.062602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26657 > 2) by scale factor 0.46876\nI1207 12:52:16.999099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24504 > 2) by scale factor 0.471138\nI1207 12:52:17.936444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07856 > 2) by scale factor 0.490369\nI1207 12:52:18.873574   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16433 > 2) by scale factor 0.632045\nI1207 12:52:19.810465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81436 > 2) by scale factor 0.710642\nI1207 12:52:20.747515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20474 > 2) by scale factor 0.475653\nI1207 12:52:21.685751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4869 > 2) by scale factor 0.573575\nI1207 12:52:22.622833   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51229 > 2) by scale factor 0.569428\nI1207 12:52:23.560703   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64692 > 2) by scale factor 0.430392\nI1207 12:52:24.498739   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55036 > 2) by scale factor 0.784203\nI1207 12:52:25.436518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27737 > 2) by scale factor 0.878204\nI1207 12:52:26.374480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08185 > 2) by scale factor 0.648962\nI1207 12:52:27.312361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35639 > 2) by scale factor 0.848755\nI1207 12:52:28.255801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5124 > 2) by scale factor 0.569412\nI1207 12:52:29.198700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49105 > 2) by scale factor 0.572894\nI1207 12:52:30.141587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10294 > 2) by scale factor 0.487455\nI1207 12:52:31.085114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55835 > 2) by scale factor 0.438755\nI1207 12:52:32.028560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36941 > 2) by scale factor 0.593576\nI1207 12:52:32.971858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19012 > 2) by scale factor 0.626935\nI1207 12:52:33.915130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73503 > 2) by scale factor 0.731254\nI1207 12:52:34.858569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8063 > 2) by scale factor 0.712682\nI1207 12:52:35.802119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32042 > 2) by scale factor 0.462918\nI1207 12:52:36.745482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01589 > 2) by scale factor 0.663153\nI1207 12:52:37.689185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93682 > 2) by scale factor 0.508024\nI1207 12:52:38.632124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41295 > 2) by scale factor 0.453212\nI1207 12:52:39.574970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44952 > 2) by scale factor 0.57979\nI1207 12:52:40.518307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32239 > 2) by scale factor 0.601977\nI1207 12:52:41.461608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6965 > 2) by scale factor 0.541052\nI1207 12:52:42.404659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27 > 2) by scale factor 0.468384\nI1207 12:52:43.347952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03709 > 2) by scale factor 0.495407\nI1207 12:52:44.290989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99619 > 2) by scale factor 0.500477\nI1207 12:52:45.234110   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64423 > 2) by scale factor 0.548812\nI1207 12:52:46.177270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95399 > 2) by scale factor 0.403715\nI1207 12:52:47.120556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37009 > 2) by scale factor 0.593457\nI1207 12:52:48.063344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88297 > 2) by scale factor 0.693729\nI1207 12:52:49.005533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75659 > 2) by scale factor 0.532398\nI1207 12:52:49.948652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66115 > 2) by scale factor 0.546276\nI1207 12:52:50.891719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50969 > 2) by scale factor 0.569851\nI1207 12:52:51.833971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99227 > 2) by scale factor 0.66839\nI1207 12:52:52.776943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30564 > 2) by scale factor 0.464507\nI1207 12:52:53.720026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98416 > 2) by scale factor 0.501988\nI1207 12:52:54.663440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4098 > 2) by scale factor 0.829946\nI1207 12:52:55.606165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3044 > 2) by scale factor 0.464641\nI1207 12:52:56.548835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81679 > 2) by scale factor 0.710029\nI1207 12:52:57.491825   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56874 > 2) by scale factor 0.560421\nI1207 12:52:58.435017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68605 > 2) by scale factor 0.542586\nI1207 12:52:59.377974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69467 > 2) by scale factor 0.54132\nI1207 12:53:00.321336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37514 > 2) by scale factor 0.842057\nI1207 12:53:01.264255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28106 > 2) by scale factor 0.876787\nI1207 12:53:03.148185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67905 > 2) by scale factor 0.746533\nI1207 12:53:04.091729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60202 > 2) by scale factor 0.768633\nI1207 12:53:05.034592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04292 > 2) by scale factor 0.396596\nI1207 12:53:05.977782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46664 > 2) by scale factor 0.447764\nI1207 12:53:06.921355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82877 > 2) by scale factor 0.522361\nI1207 12:53:07.864364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06151 > 2) by scale factor 0.653273\nI1207 12:53:08.807591   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98764 > 2) by scale factor 0.400992\nI1207 12:53:09.750555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99719 > 2) by scale factor 0.500351\nI1207 12:53:10.693691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84267 > 2) by scale factor 0.520472\nI1207 12:53:11.636342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09867 > 2) by scale factor 0.645438\nI1207 12:53:12.579732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47045 > 2) by scale factor 0.447382\nI1207 12:53:13.522763   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36131 > 2) by scale factor 0.595006\nI1207 12:53:14.466229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69638 > 2) by scale factor 0.741735\nI1207 12:53:15.409485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74832 > 2) by scale factor 0.727717\nI1207 12:53:16.352479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98052 > 2) by scale factor 0.671025\nI1207 12:53:17.295927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.1491 > 2) by scale factor 0.388418\nI1207 12:53:18.238730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79703 > 2) by scale factor 0.715043\nI1207 12:53:19.181485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44176 > 2) by scale factor 0.819083\nI1207 12:53:20.124331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34986 > 2) by scale factor 0.851116\nI1207 12:53:21.067203   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99488 > 2) by scale factor 0.667806\nI1207 12:53:22.010048   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29252 > 2) by scale factor 0.607437\nI1207 12:53:22.953266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33898 > 2) by scale factor 0.598985\nI1207 12:53:23.896447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35855 > 2) by scale factor 0.458869\nI1207 12:53:24.839556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96054 > 2) by scale factor 0.504981\nI1207 12:53:25.782835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02051 > 2) by scale factor 0.49745\nI1207 12:53:26.725447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10345 > 2) by scale factor 0.950818\nI1207 12:53:27.668438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15082 > 2) by scale factor 0.481832\nI1207 12:53:27.680411   369 solver.cpp:337] Iteration 17800, Testing net (#0)\nI1207 12:54:20.654144   369 solver.cpp:404]     Test net output #0: accuracy = 0.136\nI1207 12:54:20.654778   369 solver.cpp:404]     Test net output #1: loss = 16.4248 (* 1 = 16.4248 loss)\nI1207 12:54:21.527294   369 solver.cpp:228] Iteration 17800, loss = 16.3034\nI1207 12:54:21.527336   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 12:54:21.527361   369 solver.cpp:244]     Train net output #1: loss = 16.3034 (* 1 = 16.3034 loss)\nI1207 12:54:21.607064   369 sgd_solver.cpp:166] Iteration 17800, lr = 2.67\nI1207 12:54:21.617178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59288 > 2) by scale factor 0.556657\nI1207 12:54:22.554072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06435 > 2) by scale factor 0.652667\nI1207 12:54:23.491199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5001 > 2) by scale factor 0.444435\nI1207 12:54:24.428158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11809 > 2) by scale factor 0.485662\nI1207 12:54:25.365711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36136 > 2) by scale factor 0.458572\nI1207 12:54:26.302562   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33908 > 2) by scale factor 0.460927\nI1207 12:54:27.239655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20395 > 2) by scale factor 0.475743\nI1207 12:54:28.176782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74641 > 2) by scale factor 0.728224\nI1207 12:54:29.113382   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70142 > 2) by scale factor 0.740351\nI1207 12:54:30.049926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57207 > 2) by scale factor 0.559899\nI1207 12:54:30.985908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73127 > 2) by scale factor 0.536011\nI1207 12:54:31.922665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70276 > 2) by scale factor 0.540138\nI1207 12:54:32.859357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72643 > 2) by scale factor 0.733559\nI1207 12:54:33.795972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06784 > 2) by scale factor 0.491661\nI1207 12:54:34.732951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65711 > 2) by scale factor 0.54688\nI1207 12:54:35.670070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.4721 > 2) by scale factor 0.365491\nI1207 12:54:36.606914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33284 > 2) by scale factor 0.60009\nI1207 12:54:37.543983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.61538 > 2) by scale factor 0.356165\nI1207 12:54:38.480839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66208 > 2) by scale factor 0.428993\nI1207 12:54:39.417800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87655 > 2) by scale factor 0.695278\nI1207 12:54:40.355226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24453 > 2) by scale factor 0.471194\nI1207 12:54:41.292376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15361 > 2) by scale factor 0.481509\nI1207 12:54:42.230180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.89522 > 2) by scale factor 0.408562\nI1207 12:54:43.168112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11897 > 2) by scale factor 0.485558\nI1207 12:54:44.105113   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28418 > 2) by scale factor 0.466834\nI1207 12:54:45.042740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94563 > 2) by scale factor 0.678971\nI1207 12:54:45.980682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35044 > 2) by scale factor 0.459724\nI1207 12:54:46.918581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56342 > 2) by scale factor 0.438267\nI1207 12:54:47.856356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69075 > 2) by scale factor 0.541895\nI1207 12:54:48.794095   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76431 > 2) by scale factor 0.531306\nI1207 12:54:49.732378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61706 > 2) by scale factor 0.552936\nI1207 12:54:50.674893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12319 > 2) by scale factor 0.485062\nI1207 12:54:51.617966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36776 > 2) by scale factor 0.593867\nI1207 12:54:52.560513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47713 > 2) by scale factor 0.446715\nI1207 12:54:53.502924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7616 > 2) by scale factor 0.420027\nI1207 12:54:54.445156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.93446 > 2) by scale factor 0.405313\nI1207 12:54:55.387238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63601 > 2) by scale factor 0.758724\nI1207 12:54:56.329450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91898 > 2) by scale factor 0.510337\nI1207 12:54:57.271726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09235 > 2) by scale factor 0.488717\nI1207 12:54:58.214119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94202 > 2) by scale factor 0.679805\nI1207 12:54:59.156486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79666 > 2) by scale factor 0.715139\nI1207 12:55:00.098424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69563 > 2) by scale factor 0.741941\nI1207 12:55:01.040220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04763 > 2) by scale factor 0.494117\nI1207 12:55:01.982873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.84208 > 2) by scale factor 0.413045\nI1207 12:55:02.925369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.2277 > 2) by scale factor 0.382577\nI1207 12:55:03.867717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10465 > 2) by scale factor 0.3918\nI1207 12:55:04.809988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58209 > 2) by scale factor 0.774565\nI1207 12:55:05.752614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12683 > 2) by scale factor 0.390105\nI1207 12:55:06.694983   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.88309 > 2) by scale factor 0.409577\nI1207 12:55:07.637238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95585 > 2) by scale factor 0.403564\nI1207 12:55:08.578881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02211 > 2) by scale factor 0.66179\nI1207 12:55:09.521389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88182 > 2) by scale factor 0.694006\nI1207 12:55:10.464009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82372 > 2) by scale factor 0.414618\nI1207 12:55:11.406373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15655 > 2) by scale factor 0.481168\nI1207 12:55:12.348666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8079 > 2) by scale factor 0.712275\nI1207 12:55:13.290729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04839 > 2) by scale factor 0.494023\nI1207 12:55:14.233005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47421 > 2) by scale factor 0.447006\nI1207 12:55:15.175465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62006 > 2) by scale factor 0.552477\nI1207 12:55:16.117414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56375 > 2) by scale factor 0.438236\nI1207 12:55:17.059027   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80033 > 2) by scale factor 0.526271\nI1207 12:55:18.001708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8066 > 2) by scale factor 0.712606\nI1207 12:55:18.943778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61331 > 2) by scale factor 0.765313\nI1207 12:55:19.885875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03669 > 2) by scale factor 0.495456\nI1207 12:55:20.827570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.96908 > 2) by scale factor 0.402489\nI1207 12:55:21.770035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84211 > 2) by scale factor 0.520547\nI1207 12:55:22.712011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98568 > 2) by scale factor 0.401149\nI1207 12:55:23.653831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64971 > 2) by scale factor 0.547988\nI1207 12:55:24.595803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78458 > 2) by scale factor 0.418009\nI1207 12:55:25.537086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0648 > 2) by scale factor 0.492029\nI1207 12:55:26.478873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10947 > 2) by scale factor 0.486681\nI1207 12:55:27.420609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52004 > 2) by scale factor 0.568175\nI1207 12:55:28.362201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31858 > 2) by scale factor 0.602667\nI1207 12:55:29.303954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59255 > 2) by scale factor 0.556708\nI1207 12:55:30.245929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92039 > 2) by scale factor 0.510154\nI1207 12:55:31.187929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26114 > 2) by scale factor 0.469358\nI1207 12:55:32.129806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69004 > 2) by scale factor 0.743482\nI1207 12:55:33.071725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6694 > 2) by scale factor 0.749232\nI1207 12:55:34.953487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03346 > 2) by scale factor 0.983543\nI1207 12:55:35.895758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25444 > 2) by scale factor 0.470097\nI1207 12:55:36.837870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98298 > 2) by scale factor 0.670469\nI1207 12:55:37.780273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46281 > 2) by scale factor 0.812082\nI1207 12:55:38.722544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18223 > 2) by scale factor 0.628491\nI1207 12:55:39.663854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03443 > 2) by scale factor 0.659102\nI1207 12:55:40.605649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12737 > 2) by scale factor 0.639515\nI1207 12:55:41.546766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10593 > 2) by scale factor 0.64393\nI1207 12:55:42.488204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98293 > 2) by scale factor 0.502143\nI1207 12:55:43.429692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1931 > 2) by scale factor 0.476975\nI1207 12:55:44.371093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19745 > 2) by scale factor 0.910146\nI1207 12:55:45.312904   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99437 > 2) by scale factor 0.500704\nI1207 12:55:46.255393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36197 > 2) by scale factor 0.59489\nI1207 12:55:47.197471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85931 > 2) by scale factor 0.518228\nI1207 12:55:48.139873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0991 > 2) by scale factor 0.645348\nI1207 12:55:49.081856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38133 > 2) by scale factor 0.456482\nI1207 12:55:50.023506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25336 > 2) by scale factor 0.887563\nI1207 12:55:50.965555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39816 > 2) by scale factor 0.833972\nI1207 12:55:51.908485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50757 > 2) by scale factor 0.443698\nI1207 12:55:52.850558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10266 > 2) by scale factor 0.487489\nI1207 12:55:53.792073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25449 > 2) by scale factor 0.614536\nI1207 12:55:54.734242   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91442 > 2) by scale factor 0.510931\nI1207 12:55:54.745461   369 solver.cpp:337] Iteration 17900, Testing net (#0)\nI1207 12:56:47.532590   369 solver.cpp:404]     Test net output #0: accuracy = 0.1097\nI1207 12:56:47.533164   369 solver.cpp:404]     Test net output #1: loss = 33.9665 (* 1 = 33.9665 loss)\nI1207 12:56:48.405526   369 solver.cpp:228] Iteration 17900, loss = 32.0917\nI1207 12:56:48.405567   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 12:56:48.405592   369 solver.cpp:244]     Train net output #1: loss = 32.0917 (* 1 = 32.0917 loss)\nI1207 12:56:48.477522   369 sgd_solver.cpp:166] Iteration 17900, lr = 2.685\nI1207 12:56:48.487424   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68272 > 2) by scale factor 0.427102\nI1207 12:56:49.423455   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06171 > 2) by scale factor 0.492403\nI1207 12:56:50.359694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51839 > 2) by scale factor 0.442636\nI1207 12:56:51.296411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16845 > 2) by scale factor 0.631223\nI1207 12:56:52.233042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22941 > 2) by scale factor 0.472879\nI1207 12:56:53.170236   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33138 > 2) by scale factor 0.600352\nI1207 12:56:54.107022   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00684 > 2) by scale factor 0.499147\nI1207 12:56:55.044411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00501 > 2) by scale factor 0.665556\nI1207 12:56:55.982125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9161 > 2) by scale factor 0.685848\nI1207 12:56:56.919389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45123 > 2) by scale factor 0.579504\nI1207 12:56:57.856318   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53526 > 2) by scale factor 0.788875\nI1207 12:56:58.793186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87297 > 2) by scale factor 0.5164\nI1207 12:56:59.729804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06351 > 2) by scale factor 0.492185\nI1207 12:57:00.666457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25796 > 2) by scale factor 0.613881\nI1207 12:57:01.603207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01414 > 2) by scale factor 0.992982\nI1207 12:57:02.540387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01981 > 2) by scale factor 0.662292\nI1207 12:57:03.477295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4326 > 2) by scale factor 0.582649\nI1207 12:57:04.414074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35294 > 2) by scale factor 0.596492\nI1207 12:57:05.350989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07346 > 2) by scale factor 0.650732\nI1207 12:57:06.288266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56119 > 2) by scale factor 0.56161\nI1207 12:57:07.225287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38658 > 2) by scale factor 0.455936\nI1207 12:57:08.162219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73964 > 2) by scale factor 0.730022\nI1207 12:57:09.099440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98822 > 2) by scale factor 0.669294\nI1207 12:57:10.035856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60669 > 2) by scale factor 0.767258\nI1207 12:57:10.972072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70964 > 2) by scale factor 0.738106\nI1207 12:57:11.909268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21057 > 2) by scale factor 0.474996\nI1207 12:57:12.846388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28175 > 2) by scale factor 0.467099\nI1207 12:57:13.783308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81562 > 2) by scale factor 0.524161\nI1207 12:57:14.720273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56662 > 2) by scale factor 0.43796\nI1207 12:57:15.657438   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.401 > 2) by scale factor 0.832986\nI1207 12:57:16.594480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30005 > 2) by scale factor 0.465111\nI1207 12:57:17.531330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06138 > 2) by scale factor 0.6533\nI1207 12:57:18.468377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91942 > 2) by scale factor 0.685069\nI1207 12:57:19.407457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87758 > 2) by scale factor 0.695028\nI1207 12:57:20.350219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26919 > 2) by scale factor 0.468473\nI1207 12:57:21.292692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00671 > 2) by scale factor 0.665179\nI1207 12:57:22.235368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98629 > 2) by scale factor 0.501719\nI1207 12:57:23.178087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39066 > 2) by scale factor 0.83659\nI1207 12:57:24.120301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39108 > 2) by scale factor 0.836441\nI1207 12:57:25.063019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60346 > 2) by scale factor 0.555022\nI1207 12:57:26.005348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48633 > 2) by scale factor 0.573669\nI1207 12:57:26.947506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00812 > 2) by scale factor 0.498988\nI1207 12:57:27.890336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86651 > 2) by scale factor 0.697712\nI1207 12:57:28.833168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51761 > 2) by scale factor 0.794403\nI1207 12:57:29.776043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78005 > 2) by scale factor 0.719411\nI1207 12:57:30.718412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12251 > 2) by scale factor 0.485141\nI1207 12:57:31.660677   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88495 > 2) by scale factor 0.514807\nI1207 12:57:32.603201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88163 > 2) by scale factor 0.694052\nI1207 12:57:33.545053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33551 > 2) by scale factor 0.461307\nI1207 12:57:34.486868   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91775 > 2) by scale factor 0.68546\nI1207 12:57:35.429563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96149 > 2) by scale factor 0.504861\nI1207 12:57:36.371659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9883 > 2) by scale factor 0.669277\nI1207 12:57:37.313379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51668 > 2) by scale factor 0.794698\nI1207 12:57:39.195276   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0324 > 2) by scale factor 0.984056\nI1207 12:57:40.137338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63326 > 2) by scale factor 0.759513\nI1207 12:57:41.078850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05478 > 2) by scale factor 0.493245\nI1207 12:57:42.021184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25696 > 2) by scale factor 0.614069\nI1207 12:57:42.962872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79492 > 2) by scale factor 0.715584\nI1207 12:57:43.904726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5015 > 2) by scale factor 0.571184\nI1207 12:57:44.846222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63345 > 2) by scale factor 0.550442\nI1207 12:57:45.788077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5231 > 2) by scale factor 0.567683\nI1207 12:57:46.729625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96259 > 2) by scale factor 0.675085\nI1207 12:57:47.671682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.896 > 2) by scale factor 0.513348\nI1207 12:57:48.612953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63931 > 2) by scale factor 0.549554\nI1207 12:57:49.555570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51819 > 2) by scale factor 0.794221\nI1207 12:57:50.496748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34644 > 2) by scale factor 0.59765\nI1207 12:57:51.438962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16477 > 2) by scale factor 0.923884\nI1207 12:57:52.380486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82318 > 2) by scale factor 0.708422\nI1207 12:57:53.321808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70668 > 2) by scale factor 0.424928\nI1207 12:57:54.263845   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54178 > 2) by scale factor 0.440356\nI1207 12:57:55.205354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14169 > 2) by scale factor 0.482895\nI1207 12:57:56.147253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6109 > 2) by scale factor 0.553879\nI1207 12:57:57.088685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89269 > 2) by scale factor 0.691397\nI1207 12:57:58.030686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03212 > 2) by scale factor 0.496017\nI1207 12:57:58.972384   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91233 > 2) by scale factor 0.511204\nI1207 12:57:59.914099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89755 > 2) by scale factor 0.690238\nI1207 12:58:00.855666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87924 > 2) by scale factor 0.694628\nI1207 12:58:01.797425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67803 > 2) by scale factor 0.543769\nI1207 12:58:02.739120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60295 > 2) by scale factor 0.768359\nI1207 12:58:03.681099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51195 > 2) by scale factor 0.796193\nI1207 12:58:04.623059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07062 > 2) by scale factor 0.491326\nI1207 12:58:05.564950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28136 > 2) by scale factor 0.467141\nI1207 12:58:06.506733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2666 > 2) by scale factor 0.468757\nI1207 12:58:07.448215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15937 > 2) by scale factor 0.480842\nI1207 12:58:08.390233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56087 > 2) by scale factor 0.780986\nI1207 12:58:09.331981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33087 > 2) by scale factor 0.461801\nI1207 12:58:10.274286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45703 > 2) by scale factor 0.813992\nI1207 12:58:11.216461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1567 > 2) by scale factor 0.481151\nI1207 12:58:12.158396   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66192 > 2) by scale factor 0.429007\nI1207 12:58:13.099448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62462 > 2) by scale factor 0.432468\nI1207 12:58:14.040750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24688 > 2) by scale factor 0.615976\nI1207 12:58:14.982023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86871 > 2) by scale factor 0.516968\nI1207 12:58:15.923981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0151 > 2) by scale factor 0.49812\nI1207 12:58:16.865429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44963 > 2) by scale factor 0.449476\nI1207 12:58:17.807201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35667 > 2) by scale factor 0.595829\nI1207 12:58:18.748522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46516 > 2) by scale factor 0.577173\nI1207 12:58:19.690379   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30548 > 2) by scale factor 0.464525\nI1207 12:58:20.631945   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44798 > 2) by scale factor 0.58005\nI1207 12:58:21.573835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18379 > 2) by scale factor 0.628182\nI1207 12:58:21.585153   369 solver.cpp:337] Iteration 18000, Testing net (#0)\nI1207 12:59:14.348510   369 solver.cpp:404]     Test net output #0: accuracy = 0.1277\nI1207 12:59:14.349139   369 solver.cpp:404]     Test net output #1: loss = 19.6807 (* 1 = 19.6807 loss)\nI1207 12:59:15.220196   369 solver.cpp:228] Iteration 18000, loss = 17.5729\nI1207 12:59:15.220247   369 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1207 12:59:15.220274   369 solver.cpp:244]     Train net output #1: loss = 17.5729 (* 1 = 17.5729 loss)\nI1207 12:59:15.300393   369 sgd_solver.cpp:166] Iteration 18000, lr = 2.7\nI1207 12:59:15.310583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60768 > 2) by scale factor 0.434058\nI1207 12:59:16.248133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.41078 > 2) by scale factor 0.369633\nI1207 12:59:17.185356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.55599 > 2) by scale factor 0.359972\nI1207 12:59:18.122308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05321 > 2) by scale factor 0.395788\nI1207 12:59:19.059747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52796 > 2) by scale factor 0.4417\nI1207 12:59:19.996255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06857 > 2) by scale factor 0.394589\nI1207 12:59:20.933071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06815 > 2) by scale factor 0.651859\nI1207 12:59:21.870373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68231 > 2) by scale factor 0.745625\nI1207 12:59:22.807747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51911 > 2) by scale factor 0.442565\nI1207 12:59:23.744940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.55059 > 2) by scale factor 0.360322\nI1207 12:59:24.681906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.0183 > 2) by scale factor 0.398541\nI1207 12:59:25.619480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65347 > 2) by scale factor 0.753729\nI1207 12:59:26.556406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33923 > 2) by scale factor 0.59894\nI1207 12:59:27.493932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.432 > 2) by scale factor 0.368188\nI1207 12:59:28.430449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.89016 > 2) by scale factor 0.408984\nI1207 12:59:29.367815   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92971 > 2) by scale factor 0.508943\nI1207 12:59:30.305481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76982 > 2) by scale factor 0.419303\nI1207 12:59:31.242429   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47377 > 2) by scale factor 0.447051\nI1207 12:59:32.180246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37213 > 2) by scale factor 0.457443\nI1207 12:59:33.117446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.42001 > 2) by scale factor 0.369003\nI1207 12:59:34.054589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52453 > 2) by scale factor 0.567452\nI1207 12:59:34.991958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22002 > 2) by scale factor 0.473931\nI1207 12:59:35.929594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22776 > 2) by scale factor 0.619624\nI1207 12:59:36.866839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29883 > 2) by scale factor 0.606276\nI1207 12:59:37.804812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26386 > 2) by scale factor 0.612771\nI1207 12:59:38.741925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76317 > 2) by scale factor 0.531467\nI1207 12:59:39.678926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82906 > 2) by scale factor 0.706949\nI1207 12:59:40.616366   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31074 > 2) by scale factor 0.463958\nI1207 12:59:41.553768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70855 > 2) by scale factor 0.738402\nI1207 12:59:42.490831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82984 > 2) by scale factor 0.522214\nI1207 12:59:43.428236   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00236 > 2) by scale factor 0.666143\nI1207 12:59:44.365947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23517 > 2) by scale factor 0.618205\nI1207 12:59:45.308156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75052 > 2) by scale factor 0.53326\nI1207 12:59:46.250882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90241 > 2) by scale factor 0.512504\nI1207 12:59:47.193569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07443 > 2) by scale factor 0.964122\nI1207 12:59:48.135437   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4863 > 2) by scale factor 0.573674\nI1207 12:59:50.017298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29589 > 2) by scale factor 0.87112\nI1207 12:59:50.959383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38587 > 2) by scale factor 0.838269\nI1207 12:59:51.901588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4971 > 2) by scale factor 0.571903\nI1207 12:59:52.843613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57048 > 2) by scale factor 0.560149\nI1207 12:59:53.785881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29116 > 2) by scale factor 0.872921\nI1207 12:59:54.727915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19228 > 2) by scale factor 0.626512\nI1207 12:59:55.670133   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92427 > 2) by scale factor 0.509649\nI1207 12:59:56.612275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27336 > 2) by scale factor 0.468015\nI1207 12:59:57.554540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36848 > 2) by scale factor 0.59374\nI1207 12:59:58.496903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84506 > 2) by scale factor 0.702972\nI1207 12:59:59.439535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26679 > 2) by scale factor 0.612222\nI1207 13:00:00.381458   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92953 > 2) by scale factor 0.508967\nI1207 13:00:01.322821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93461 > 2) by scale factor 0.50831\nI1207 13:00:02.264997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1497 > 2) by scale factor 0.63498\nI1207 13:00:03.207803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86956 > 2) by scale factor 0.696971\nI1207 13:00:04.150085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80222 > 2) by scale factor 0.713719\nI1207 13:00:05.091987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73855 > 2) by scale factor 0.730312\nI1207 13:00:06.034441   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18733 > 2) by scale factor 0.627485\nI1207 13:00:06.976794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01179 > 2) by scale factor 0.498531\nI1207 13:00:07.919315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64691 > 2) by scale factor 0.548409\nI1207 13:00:08.861037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76375 > 2) by scale factor 0.531385\nI1207 13:00:09.803380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03881 > 2) by scale factor 0.495195\nI1207 13:00:10.745901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.84665 > 2) by scale factor 0.412656\nI1207 13:00:11.688174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68288 > 2) by scale factor 0.427088\nI1207 13:00:12.630308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62685 > 2) by scale factor 0.761367\nI1207 13:00:13.573344   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69325 > 2) by scale factor 0.742596\nI1207 13:00:14.516222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71929 > 2) by scale factor 0.735486\nI1207 13:00:15.460044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23342 > 2) by scale factor 0.472431\nI1207 13:00:16.402812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63046 > 2) by scale factor 0.431922\nI1207 13:00:17.345412   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.91705 > 2) by scale factor 0.406748\nI1207 13:00:18.286906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78885 > 2) by scale factor 0.527865\nI1207 13:00:19.228950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40617 > 2) by scale factor 0.58717\nI1207 13:00:20.170789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25036 > 2) by scale factor 0.470548\nI1207 13:00:21.113106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60729 > 2) by scale factor 0.767081\nI1207 13:00:22.055857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72607 > 2) by scale factor 0.536758\nI1207 13:00:22.997757   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55663 > 2) by scale factor 0.782279\nI1207 13:00:23.940345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59155 > 2) by scale factor 0.556863\nI1207 13:00:24.882087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54344 > 2) by scale factor 0.786337\nI1207 13:00:25.823586   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58794 > 2) by scale factor 0.435926\nI1207 13:00:26.765691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80607 > 2) by scale factor 0.525477\nI1207 13:00:27.708598   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19053 > 2) by scale factor 0.477267\nI1207 13:00:28.650271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8552 > 2) by scale factor 0.700477\nI1207 13:00:29.593602   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44198 > 2) by scale factor 0.819007\nI1207 13:00:30.536597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34391 > 2) by scale factor 0.460415\nI1207 13:00:31.479199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70396 > 2) by scale factor 0.539963\nI1207 13:00:32.423019   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9541 > 2) by scale factor 0.505804\nI1207 13:00:33.366442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83595 > 2) by scale factor 0.521384\nI1207 13:00:34.310510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67705 > 2) by scale factor 0.42762\nI1207 13:00:35.254065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21054 > 2) by scale factor 0.622948\nI1207 13:00:36.197942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52863 > 2) by scale factor 0.566792\nI1207 13:00:37.141345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05092 > 2) by scale factor 0.655539\nI1207 13:00:38.084693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56378 > 2) by scale factor 0.561202\nI1207 13:00:39.028090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35142 > 2) by scale factor 0.850549\nI1207 13:00:39.971657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89811 > 2) by scale factor 0.690104\nI1207 13:00:40.915350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77333 > 2) by scale factor 0.530036\nI1207 13:00:41.858750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56674 > 2) by scale factor 0.437949\nI1207 13:00:42.802275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05433 > 2) by scale factor 0.973555\nI1207 13:00:43.745934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97256 > 2) by scale factor 0.67282\nI1207 13:00:44.689446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31891 > 2) by scale factor 0.46308\nI1207 13:00:45.633860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65026 > 2) by scale factor 0.430084\nI1207 13:00:46.577600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69221 > 2) by scale factor 0.742885\nI1207 13:00:47.521536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36014 > 2) by scale factor 0.458701\nI1207 13:00:48.465659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52633 > 2) by scale factor 0.441859\nI1207 13:00:48.477721   369 solver.cpp:337] Iteration 18100, Testing net (#0)\nI1207 13:01:41.353472   369 solver.cpp:404]     Test net output #0: accuracy = 0.1807\nI1207 13:01:41.354110   369 solver.cpp:404]     Test net output #1: loss = 29.722 (* 1 = 29.722 loss)\nI1207 13:01:42.226969   369 solver.cpp:228] Iteration 18100, loss = 31.7561\nI1207 13:01:42.227010   369 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1207 13:01:42.227037   369 solver.cpp:244]     Train net output #1: loss = 31.7561 (* 1 = 31.7561 loss)\nI1207 13:01:42.297720   369 sgd_solver.cpp:166] Iteration 18100, lr = 2.715\nI1207 13:01:42.307850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56774 > 2) by scale factor 0.56058\nI1207 13:01:43.246166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59791 > 2) by scale factor 0.555879\nI1207 13:01:44.184298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47214 > 2) by scale factor 0.447213\nI1207 13:01:45.122391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70173 > 2) by scale factor 0.425375\nI1207 13:01:46.060534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48712 > 2) by scale factor 0.573539\nI1207 13:01:46.998423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67068 > 2) by scale factor 0.544858\nI1207 13:01:47.936209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85148 > 2) by scale factor 0.519281\nI1207 13:01:48.873597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40307 > 2) by scale factor 0.587705\nI1207 13:01:49.810873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79661 > 2) by scale factor 0.526786\nI1207 13:01:50.747890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00239 > 2) by scale factor 0.499701\nI1207 13:01:51.685284   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17187 > 2) by scale factor 0.479401\nI1207 13:01:52.622166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72353 > 2) by scale factor 0.423412\nI1207 13:01:53.560021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86979 > 2) by scale factor 0.516824\nI1207 13:01:54.497635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91933 > 2) by scale factor 0.685088\nI1207 13:01:55.435755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54059 > 2) by scale factor 0.440471\nI1207 13:01:56.373651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66079 > 2) by scale factor 0.429112\nI1207 13:01:57.312177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7215 > 2) by scale factor 0.537418\nI1207 13:01:58.249843   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14843 > 2) by scale factor 0.635238\nI1207 13:01:59.187723   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75279 > 2) by scale factor 0.726536\nI1207 13:02:00.125988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36186 > 2) by scale factor 0.594909\nI1207 13:02:01.063500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99694 > 2) by scale factor 0.500383\nI1207 13:02:02.001322   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83121 > 2) by scale factor 0.522028\nI1207 13:02:02.938906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09387 > 2) by scale factor 0.488535\nI1207 13:02:03.876309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1069 > 2) by scale factor 0.486986\nI1207 13:02:04.814432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37808 > 2) by scale factor 0.592052\nI1207 13:02:05.752501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10049 > 2) by scale factor 0.952158\nI1207 13:02:06.690433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0095 > 2) by scale factor 0.498815\nI1207 13:02:07.629009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54602 > 2) by scale factor 0.564013\nI1207 13:02:08.566998   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86318 > 2) by scale factor 0.517708\nI1207 13:02:09.504822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66794 > 2) by scale factor 0.545265\nI1207 13:02:10.443469   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11671 > 2) by scale factor 0.641702\nI1207 13:02:11.381397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5519 > 2) by scale factor 0.439377\nI1207 13:02:12.319875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.91259 > 2) by scale factor 0.407117\nI1207 13:02:13.257577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57662 > 2) by scale factor 0.437004\nI1207 13:02:14.195531   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85134 > 2) by scale factor 0.519299\nI1207 13:02:15.136234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79544 > 2) by scale factor 0.417063\nI1207 13:02:16.079468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.97069 > 2) by scale factor 0.402358\nI1207 13:02:17.022488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23944 > 2) by scale factor 0.61739\nI1207 13:02:17.965216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71212 > 2) by scale factor 0.538776\nI1207 13:02:18.908653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79986 > 2) by scale factor 0.526335\nI1207 13:02:19.852102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30502 > 2) by scale factor 0.60514\nI1207 13:02:20.795332   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06481 > 2) by scale factor 0.394881\nI1207 13:02:21.738690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37917 > 2) by scale factor 0.456707\nI1207 13:02:22.682258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73799 > 2) by scale factor 0.42212\nI1207 13:02:23.625756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79692 > 2) by scale factor 0.416934\nI1207 13:02:24.568852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71964 > 2) by scale factor 0.423761\nI1207 13:02:25.512367   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31585 > 2) by scale factor 0.463409\nI1207 13:02:26.455510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02076 > 2) by scale factor 0.662085\nI1207 13:02:27.398653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85999 > 2) by scale factor 0.518136\nI1207 13:02:28.342155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02827 > 2) by scale factor 0.660443\nI1207 13:02:29.285173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08704 > 2) by scale factor 0.647869\nI1207 13:02:30.228128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62162 > 2) by scale factor 0.762886\nI1207 13:02:31.171260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04205 > 2) by scale factor 0.494798\nI1207 13:02:32.114171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27815 > 2) by scale factor 0.467491\nI1207 13:02:33.057483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68213 > 2) by scale factor 0.543163\nI1207 13:02:33.999989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0624 > 2) by scale factor 0.49232\nI1207 13:02:34.942747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06897 > 2) by scale factor 0.491525\nI1207 13:02:35.885576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19439 > 2) by scale factor 0.911416\nI1207 13:02:36.828649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45497 > 2) by scale factor 0.578876\nI1207 13:02:37.771575   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85826 > 2) by scale factor 0.518369\nI1207 13:02:38.714829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00201 > 2) by scale factor 0.499749\nI1207 13:02:39.657986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80849 > 2) by scale factor 0.525142\nI1207 13:02:40.601585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1997 > 2) by scale factor 0.625059\nI1207 13:02:41.544915   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60422 > 2) by scale factor 0.767985\nI1207 13:02:42.488706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32659 > 2) by scale factor 0.859627\nI1207 13:02:43.431620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73609 > 2) by scale factor 0.535319\nI1207 13:02:44.374508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6359 > 2) by scale factor 0.55007\nI1207 13:02:45.317704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34331 > 2) by scale factor 0.59821\nI1207 13:02:46.260495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70339 > 2) by scale factor 0.540045\nI1207 13:02:47.203213   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63955 > 2) by scale factor 0.757705\nI1207 13:02:48.146077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17105 > 2) by scale factor 0.630705\nI1207 13:02:49.088979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75669 > 2) by scale factor 0.725508\nI1207 13:02:50.031896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08909 > 2) by scale factor 0.489106\nI1207 13:02:50.975320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23172 > 2) by scale factor 0.472621\nI1207 13:02:51.918056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.90358 > 2) by scale factor 0.407865\nI1207 13:02:52.860713   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09169 > 2) by scale factor 0.488796\nI1207 13:02:53.804227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77272 > 2) by scale factor 0.530121\nI1207 13:02:54.747234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22088 > 2) by scale factor 0.473835\nI1207 13:02:55.690117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8589 > 2) by scale factor 0.69957\nI1207 13:02:56.632926   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47547 > 2) by scale factor 0.575461\nI1207 13:02:57.575292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07466 > 2) by scale factor 0.490839\nI1207 13:02:58.518065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43163 > 2) by scale factor 0.582814\nI1207 13:02:59.460991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27058 > 2) by scale factor 0.611513\nI1207 13:03:00.404251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05879 > 2) by scale factor 0.492757\nI1207 13:03:01.347446   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49538 > 2) by scale factor 0.444901\nI1207 13:03:02.290108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72293 > 2) by scale factor 0.423466\nI1207 13:03:03.232610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65842 > 2) by scale factor 0.42933\nI1207 13:03:04.175550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38232 > 2) by scale factor 0.456379\nI1207 13:03:05.118227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6775 > 2) by scale factor 0.427579\nI1207 13:03:06.060791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0073 > 2) by scale factor 0.665048\nI1207 13:03:07.004045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08454 > 2) by scale factor 0.648395\nI1207 13:03:07.947357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89292 > 2) by scale factor 0.691342\nI1207 13:03:08.890277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75355 > 2) by scale factor 0.532829\nI1207 13:03:09.832901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48249 > 2) by scale factor 0.805643\nI1207 13:03:10.775897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9823 > 2) by scale factor 0.670623\nI1207 13:03:11.718896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.609 > 2) by scale factor 0.554171\nI1207 13:03:12.662140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.818 > 2) by scale factor 0.523834\nI1207 13:03:13.605134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44838 > 2) by scale factor 0.579982\nI1207 13:03:14.547690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93475 > 2) by scale factor 0.681489\nI1207 13:03:15.490837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99876 > 2) by scale factor 0.500155\nI1207 13:03:15.502777   369 solver.cpp:337] Iteration 18200, Testing net (#0)\nI1207 13:04:08.172370   369 solver.cpp:404]     Test net output #0: accuracy = 0.11315\nI1207 13:04:08.172994   369 solver.cpp:404]     Test net output #1: loss = 22.725 (* 1 = 22.725 loss)\nI1207 13:04:09.044761   369 solver.cpp:228] Iteration 18200, loss = 23.1226\nI1207 13:04:09.044813   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 13:04:09.044833   369 solver.cpp:244]     Train net output #1: loss = 23.1226 (* 1 = 23.1226 loss)\nI1207 13:04:09.120929   369 sgd_solver.cpp:166] Iteration 18200, lr = 2.73\nI1207 13:04:09.131074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55267 > 2) by scale factor 0.439303\nI1207 13:04:10.068923   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2006 > 2) by scale factor 0.908844\nI1207 13:04:11.006846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71896 > 2) by scale factor 0.537785\nI1207 13:04:11.944664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47916 > 2) by scale factor 0.806724\nI1207 13:04:12.881688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25344 > 2) by scale factor 0.470207\nI1207 13:04:13.818951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50771 > 2) by scale factor 0.797541\nI1207 13:04:14.756768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8043 > 2) by scale factor 0.71319\nI1207 13:04:15.694331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06985 > 2) by scale factor 0.651498\nI1207 13:04:16.631542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71971 > 2) by scale factor 0.537676\nI1207 13:04:17.569140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93904 > 2) by scale factor 0.680494\nI1207 13:04:18.507274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59071 > 2) by scale factor 0.435663\nI1207 13:04:19.445128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.26216 > 2) by scale factor 0.380072\nI1207 13:04:20.382571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33637 > 2) by scale factor 0.461215\nI1207 13:04:21.320466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62571 > 2) by scale factor 0.761698\nI1207 13:04:22.257962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03227 > 2) by scale factor 0.659571\nI1207 13:04:23.196142   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89793 > 2) by scale factor 0.690147\nI1207 13:04:24.133978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4135 > 2) by scale factor 0.453155\nI1207 13:04:25.071518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88796 > 2) by scale factor 0.692531\nI1207 13:04:26.009223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45939 > 2) by scale factor 0.578136\nI1207 13:04:26.946768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89588 > 2) by scale factor 0.513362\nI1207 13:04:27.884495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36075 > 2) by scale factor 0.595105\nI1207 13:04:28.822572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59207 > 2) by scale factor 0.771585\nI1207 13:04:29.760543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51328 > 2) by scale factor 0.569269\nI1207 13:04:30.698173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.464 > 2) by scale factor 0.811687\nI1207 13:04:31.636198   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87535 > 2) by scale factor 0.516083\nI1207 13:04:32.574452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54904 > 2) by scale factor 0.784609\nI1207 13:04:33.512401   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82389 > 2) by scale factor 0.414603\nI1207 13:04:34.450294   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19894 > 2) by scale factor 0.476311\nI1207 13:04:35.388108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49036 > 2) by scale factor 0.803097\nI1207 13:04:36.326292   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55137 > 2) by scale factor 0.563164\nI1207 13:04:37.264103   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07783 > 2) by scale factor 0.649809\nI1207 13:04:38.202023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14909 > 2) by scale factor 0.635105\nI1207 13:04:39.139986   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35122 > 2) by scale factor 0.596798\nI1207 13:04:40.077850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3103 > 2) by scale factor 0.464005\nI1207 13:04:41.015522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55314 > 2) by scale factor 0.439257\nI1207 13:04:41.953392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12114 > 2) by scale factor 0.485303\nI1207 13:04:42.894834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30691 > 2) by scale factor 0.866961\nI1207 13:04:43.837740   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79038 > 2) by scale factor 0.527651\nI1207 13:04:44.780510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21313 > 2) by scale factor 0.474706\nI1207 13:04:45.723188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22548 > 2) by scale factor 0.473319\nI1207 13:04:46.665454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94508 > 2) by scale factor 0.506961\nI1207 13:04:47.608255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21549 > 2) by scale factor 0.62199\nI1207 13:04:48.551137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74349 > 2) by scale factor 0.728999\nI1207 13:04:49.494354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41923 > 2) by scale factor 0.584927\nI1207 13:04:50.437466   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94684 > 2) by scale factor 0.506734\nI1207 13:04:51.380463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16329 > 2) by scale factor 0.92452\nI1207 13:04:52.323467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02169 > 2) by scale factor 0.661881\nI1207 13:04:53.266417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02284 > 2) by scale factor 0.661629\nI1207 13:04:54.209265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29537 > 2) by scale factor 0.465617\nI1207 13:04:55.152114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76801 > 2) by scale factor 0.530784\nI1207 13:04:56.094858   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64529 > 2) by scale factor 0.430544\nI1207 13:04:57.037715   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05308 > 2) by scale factor 0.655076\nI1207 13:04:57.980403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44678 > 2) by scale factor 0.580252\nI1207 13:04:58.922865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95033 > 2) by scale factor 0.506287\nI1207 13:04:59.865803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05862 > 2) by scale factor 0.492778\nI1207 13:05:00.808706   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20247 > 2) by scale factor 0.624518\nI1207 13:05:01.751492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89516 > 2) by scale factor 0.690809\nI1207 13:05:02.693958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81093 > 2) by scale factor 0.524806\nI1207 13:05:03.636934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94475 > 2) by scale factor 0.507003\nI1207 13:05:04.579967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11985 > 2) by scale factor 0.641057\nI1207 13:05:05.522895   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05882 > 2) by scale factor 0.653847\nI1207 13:05:06.465943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52998 > 2) by scale factor 0.79052\nI1207 13:05:07.409102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32356 > 2) by scale factor 0.601764\nI1207 13:05:08.352000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58216 > 2) by scale factor 0.558322\nI1207 13:05:09.295343   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95086 > 2) by scale factor 0.506218\nI1207 13:05:10.238824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.42789 > 2) by scale factor 0.368468\nI1207 13:05:11.182130   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34202 > 2) by scale factor 0.598441\nI1207 13:05:12.124966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.43409 > 2) by scale factor 0.368047\nI1207 13:05:13.067375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.7163 > 2) by scale factor 0.349876\nI1207 13:05:14.009522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9587 > 2) by scale factor 0.505216\nI1207 13:05:14.951946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63389 > 2) by scale factor 0.550375\nI1207 13:05:15.894392   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52419 > 2) by scale factor 0.567507\nI1207 13:05:16.837321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82685 > 2) by scale factor 0.522623\nI1207 13:05:17.779655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.86037 > 2) by scale factor 0.411491\nI1207 13:05:18.722781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20539 > 2) by scale factor 0.623949\nI1207 13:05:19.665311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36588 > 2) by scale factor 0.458097\nI1207 13:05:20.608224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06504 > 2) by scale factor 0.652519\nI1207 13:05:21.551116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28623 > 2) by scale factor 0.6086\nI1207 13:05:22.493512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5135 > 2) by scale factor 0.569233\nI1207 13:05:23.436094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20069 > 2) by scale factor 0.476112\nI1207 13:05:24.378885   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20458 > 2) by scale factor 0.475671\nI1207 13:05:25.321651   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96257 > 2) by scale factor 0.67509\nI1207 13:05:26.264957   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05428 > 2) by scale factor 0.654818\nI1207 13:05:27.207881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.11165 > 2) by scale factor 0.391263\nI1207 13:05:28.150846   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62801 > 2) by scale factor 0.432152\nI1207 13:05:29.093250   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58185 > 2) by scale factor 0.436505\nI1207 13:05:30.036113   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17939 > 2) by scale factor 0.478539\nI1207 13:05:30.978860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64118 > 2) by scale factor 0.430925\nI1207 13:05:31.921304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93515 > 2) by scale factor 0.681396\nI1207 13:05:32.864290   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45753 > 2) by scale factor 0.813825\nI1207 13:05:33.806867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75593 > 2) by scale factor 0.532491\nI1207 13:05:34.749545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17981 > 2) by scale factor 0.478491\nI1207 13:05:35.692054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39886 > 2) by scale factor 0.833729\nI1207 13:05:36.634681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93352 > 2) by scale factor 0.681775\nI1207 13:05:37.577497   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11351 > 2) by scale factor 0.486203\nI1207 13:05:38.519951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06332 > 2) by scale factor 0.652885\nI1207 13:05:39.463032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22239 > 2) by scale factor 0.473665\nI1207 13:05:40.405656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52776 > 2) by scale factor 0.566932\nI1207 13:05:41.348546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41285 > 2) by scale factor 0.586021\nI1207 13:05:42.291388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50522 > 2) by scale factor 0.44393\nI1207 13:05:42.303340   369 solver.cpp:337] Iteration 18300, Testing net (#0)\nI1207 13:06:34.921197   369 solver.cpp:404]     Test net output #0: accuracy = 0.1376\nI1207 13:06:34.921778   369 solver.cpp:404]     Test net output #1: loss = 14.974 (* 1 = 14.974 loss)\nI1207 13:06:35.809149   369 solver.cpp:228] Iteration 18300, loss = 16.5836\nI1207 13:06:35.809190   369 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1207 13:06:35.809209   369 solver.cpp:244]     Train net output #1: loss = 16.5836 (* 1 = 16.5836 loss)\nI1207 13:06:35.870127   369 sgd_solver.cpp:166] Iteration 18300, lr = 2.745\nI1207 13:06:35.880239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82063 > 2) by scale factor 0.709062\nI1207 13:06:36.817559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13551 > 2) by scale factor 0.483616\nI1207 13:06:37.754055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37812 > 2) by scale factor 0.592045\nI1207 13:06:38.690395   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15245 > 2) by scale factor 0.634428\nI1207 13:06:39.626890   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72171 > 2) by scale factor 0.734832\nI1207 13:06:40.563045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13554 > 2) by scale factor 0.637848\nI1207 13:06:41.500053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81645 > 2) by scale factor 0.710113\nI1207 13:06:42.436607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07256 > 2) by scale factor 0.491091\nI1207 13:06:43.372768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25906 > 2) by scale factor 0.613673\nI1207 13:06:44.309310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88836 > 2) by scale factor 0.514356\nI1207 13:06:45.246273   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05108 > 2) by scale factor 0.493695\nI1207 13:06:46.183125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14 > 2) by scale factor 0.483092\nI1207 13:06:47.120522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29821 > 2) by scale factor 0.46531\nI1207 13:06:48.057498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12671 > 2) by scale factor 0.484647\nI1207 13:06:48.993595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37836 > 2) by scale factor 0.456792\nI1207 13:06:49.930840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99592 > 2) by scale factor 0.667574\nI1207 13:06:50.867861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23173 > 2) by scale factor 0.47262\nI1207 13:06:51.805074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21226 > 2) by scale factor 0.904053\nI1207 13:06:52.741982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84488 > 2) by scale factor 0.703017\nI1207 13:06:53.679004   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8096 > 2) by scale factor 0.52499\nI1207 13:06:54.615835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31301 > 2) by scale factor 0.60368\nI1207 13:06:55.552762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41699 > 2) by scale factor 0.452797\nI1207 13:06:56.489681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22999 > 2) by scale factor 0.619198\nI1207 13:06:57.426632   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49001 > 2) by scale factor 0.803209\nI1207 13:06:58.363502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74626 > 2) by scale factor 0.533865\nI1207 13:06:59.300269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5955 > 2) by scale factor 0.55625\nI1207 13:07:00.236613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01242 > 2) by scale factor 0.663918\nI1207 13:07:01.173614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98684 > 2) by scale factor 0.669604\nI1207 13:07:02.109228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67989 > 2) by scale factor 0.427361\nI1207 13:07:03.045308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0125 > 2) by scale factor 0.498442\nI1207 13:07:03.982875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80049 > 2) by scale factor 0.526248\nI1207 13:07:04.919472   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43948 > 2) by scale factor 0.450503\nI1207 13:07:05.855901   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07661 > 2) by scale factor 0.490603\nI1207 13:07:06.793422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45621 > 2) by scale factor 0.578669\nI1207 13:07:07.733839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88908 > 2) by scale factor 0.692261\nI1207 13:07:08.677206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84135 > 2) by scale factor 0.703892\nI1207 13:07:09.619933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9122 > 2) by scale factor 0.686765\nI1207 13:07:10.561945   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.001 > 2) by scale factor 0.499875\nI1207 13:07:11.504775   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23212 > 2) by scale factor 0.618788\nI1207 13:07:12.448047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52087 > 2) by scale factor 0.442393\nI1207 13:07:13.391083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70052 > 2) by scale factor 0.740598\nI1207 13:07:14.334017   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19466 > 2) by scale factor 0.476796\nI1207 13:07:15.277227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68292 > 2) by scale factor 0.427084\nI1207 13:07:16.220124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98317 > 2) by scale factor 0.670428\nI1207 13:07:17.162583   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62334 > 2) by scale factor 0.762388\nI1207 13:07:18.104259   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22023 > 2) by scale factor 0.621074\nI1207 13:07:19.045433   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44516 > 2) by scale factor 0.449928\nI1207 13:07:19.987555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48848 > 2) by scale factor 0.573316\nI1207 13:07:20.930600   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21041 > 2) by scale factor 0.475013\nI1207 13:07:21.873694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33049 > 2) by scale factor 0.85819\nI1207 13:07:22.816510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15908 > 2) by scale factor 0.633096\nI1207 13:07:23.759335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83704 > 2) by scale factor 0.70496\nI1207 13:07:24.701808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5835 > 2) by scale factor 0.774143\nI1207 13:07:25.643908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59569 > 2) by scale factor 0.43519\nI1207 13:07:26.586285   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66 > 2) by scale factor 0.429185\nI1207 13:07:27.527729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91387 > 2) by scale factor 0.686372\nI1207 13:07:28.469866   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67276 > 2) by scale factor 0.428013\nI1207 13:07:29.412916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96908 > 2) by scale factor 0.673609\nI1207 13:07:30.355727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89612 > 2) by scale factor 0.513331\nI1207 13:07:31.298681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52661 > 2) by scale factor 0.441832\nI1207 13:07:32.241530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01626 > 2) by scale factor 0.663073\nI1207 13:07:33.184351   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94267 > 2) by scale factor 0.679655\nI1207 13:07:34.127790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52738 > 2) by scale factor 0.441756\nI1207 13:07:35.070641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12613 > 2) by scale factor 0.639769\nI1207 13:07:36.013756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57214 > 2) by scale factor 0.777563\nI1207 13:07:36.957119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12143 > 2) by scale factor 0.485268\nI1207 13:07:37.899798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24128 > 2) by scale factor 0.471555\nI1207 13:07:38.842726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26713 > 2) by scale factor 0.612159\nI1207 13:07:39.785316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53038 > 2) by scale factor 0.790395\nI1207 13:07:40.728459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98115 > 2) by scale factor 0.670883\nI1207 13:07:41.670462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39787 > 2) by scale factor 0.834073\nI1207 13:07:42.613399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71007 > 2) by scale factor 0.539073\nI1207 13:07:43.556784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19667 > 2) by scale factor 0.476568\nI1207 13:07:44.499609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18762 > 2) by scale factor 0.627427\nI1207 13:07:45.442641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32044 > 2) by scale factor 0.602331\nI1207 13:07:46.385592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16531 > 2) by scale factor 0.631849\nI1207 13:07:47.328419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7333 > 2) by scale factor 0.422538\nI1207 13:07:48.271482   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52243 > 2) by scale factor 0.44224\nI1207 13:07:49.214762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79002 > 2) by scale factor 0.716842\nI1207 13:07:50.157873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31242 > 2) by scale factor 0.463777\nI1207 13:07:51.100577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05464 > 2) by scale factor 0.493262\nI1207 13:07:52.043228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05536 > 2) by scale factor 0.654588\nI1207 13:07:52.985812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32513 > 2) by scale factor 0.60148\nI1207 13:07:53.928352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54876 > 2) by scale factor 0.563577\nI1207 13:07:54.871486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49494 > 2) by scale factor 0.444944\nI1207 13:07:55.814622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21113 > 2) by scale factor 0.622834\nI1207 13:07:56.757892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48552 > 2) by scale factor 0.573803\nI1207 13:07:57.700664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46995 > 2) by scale factor 0.576377\nI1207 13:07:58.643832   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3915 > 2) by scale factor 0.589709\nI1207 13:07:59.586854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28614 > 2) by scale factor 0.608618\nI1207 13:08:00.529841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28172 > 2) by scale factor 0.876531\nI1207 13:08:02.413525   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83469 > 2) by scale factor 0.705545\nI1207 13:08:03.356146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75729 > 2) by scale factor 0.725351\nI1207 13:08:04.298650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87679 > 2) by scale factor 0.695219\nI1207 13:08:05.241500   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53232 > 2) by scale factor 0.441275\nI1207 13:08:06.184479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93394 > 2) by scale factor 0.508396\nI1207 13:08:07.127544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79647 > 2) by scale factor 0.715188\nI1207 13:08:08.069764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84576 > 2) by scale factor 0.520053\nI1207 13:08:09.012073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9566 > 2) by scale factor 0.505484\nI1207 13:08:09.023927   369 solver.cpp:337] Iteration 18400, Testing net (#0)\nI1207 13:09:01.771163   369 solver.cpp:404]     Test net output #0: accuracy = 0.14075\nI1207 13:09:01.771792   369 solver.cpp:404]     Test net output #1: loss = 23.9875 (* 1 = 23.9875 loss)\nI1207 13:09:02.642233   369 solver.cpp:228] Iteration 18400, loss = 25.4176\nI1207 13:09:02.642285   369 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1207 13:09:02.642305   369 solver.cpp:244]     Train net output #1: loss = 25.4176 (* 1 = 25.4176 loss)\nI1207 13:09:02.722638   369 sgd_solver.cpp:166] Iteration 18400, lr = 2.76\nI1207 13:09:02.732717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63251 > 2) by scale factor 0.550583\nI1207 13:09:03.669220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73683 > 2) by scale factor 0.535213\nI1207 13:09:04.606245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24546 > 2) by scale factor 0.471092\nI1207 13:09:05.543256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.34341 > 2) by scale factor 0.374293\nI1207 13:09:06.480064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.57196 > 2) by scale factor 0.35894\nI1207 13:09:07.416764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36271 > 2) by scale factor 0.45843\nI1207 13:09:08.353570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85745 > 2) by scale factor 0.518478\nI1207 13:09:09.290763   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61924 > 2) by scale factor 0.552603\nI1207 13:09:10.227368   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09606 > 2) by scale factor 0.645982\nI1207 13:09:11.164108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10087 > 2) by scale factor 0.644979\nI1207 13:09:12.101320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80456 > 2) by scale factor 0.525685\nI1207 13:09:13.038875   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55973 > 2) by scale factor 0.781331\nI1207 13:09:13.975373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99035 > 2) by scale factor 0.668818\nI1207 13:09:14.912313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64302 > 2) by scale factor 0.548995\nI1207 13:09:15.849413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26925 > 2) by scale factor 0.61176\nI1207 13:09:16.786214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47606 > 2) by scale factor 0.807735\nI1207 13:09:17.723357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01845 > 2) by scale factor 0.497704\nI1207 13:09:19.595016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31699 > 2) by scale factor 0.602956\nI1207 13:09:20.532295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54113 > 2) by scale factor 0.787051\nI1207 13:09:21.469120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56013 > 2) by scale factor 0.438584\nI1207 13:09:22.406062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51202 > 2) by scale factor 0.569473\nI1207 13:09:23.343220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26121 > 2) by scale factor 0.613268\nI1207 13:09:24.279974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61437 > 2) by scale factor 0.765003\nI1207 13:09:25.216614   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88092 > 2) by scale factor 0.515341\nI1207 13:09:26.153488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14296 > 2) by scale factor 0.482746\nI1207 13:09:27.089932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5601 > 2) by scale factor 0.438587\nI1207 13:09:28.025933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50013 > 2) by scale factor 0.444432\nI1207 13:09:28.962704   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74521 > 2) by scale factor 0.421478\nI1207 13:09:29.899260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28369 > 2) by scale factor 0.466887\nI1207 13:09:30.836304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84151 > 2) by scale factor 0.520628\nI1207 13:09:31.773416   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49112 > 2) by scale factor 0.445324\nI1207 13:09:32.711182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02217 > 2) by scale factor 0.398234\nI1207 13:09:33.648219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4778 > 2) by scale factor 0.575077\nI1207 13:09:34.589498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61052 > 2) by scale factor 0.43379\nI1207 13:09:35.533154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67803 > 2) by scale factor 0.427531\nI1207 13:09:36.477012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1831 > 2) by scale factor 0.628319\nI1207 13:09:37.420346   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13074 > 2) by scale factor 0.938642\nI1207 13:09:38.362990   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40012 > 2) by scale factor 0.833292\nI1207 13:09:39.306164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57902 > 2) by scale factor 0.775488\nI1207 13:09:40.248944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66916 > 2) by scale factor 0.545083\nI1207 13:09:41.192272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08422 > 2) by scale factor 0.48969\nI1207 13:09:42.135102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18613 > 2) by scale factor 0.627721\nI1207 13:09:43.078083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42449 > 2) by scale factor 0.584028\nI1207 13:09:44.021787   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01894 > 2) by scale factor 0.662484\nI1207 13:09:44.965459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86251 > 2) by scale factor 0.698688\nI1207 13:09:45.909291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04572 > 2) by scale factor 0.977653\nI1207 13:09:46.853029   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66292 > 2) by scale factor 0.546013\nI1207 13:09:47.796152   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55808 > 2) by scale factor 0.438781\nI1207 13:09:48.739426   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5189 > 2) by scale factor 0.442585\nI1207 13:09:49.683053   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20089 > 2) by scale factor 0.47609\nI1207 13:09:50.626719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10316 > 2) by scale factor 0.644505\nI1207 13:09:51.570410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51933 > 2) by scale factor 0.793861\nI1207 13:09:52.513681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7007 > 2) by scale factor 0.740548\nI1207 13:09:53.456995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60118 > 2) by scale factor 0.555373\nI1207 13:09:54.400857   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52673 > 2) by scale factor 0.44182\nI1207 13:09:55.344316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50025 > 2) by scale factor 0.571388\nI1207 13:09:56.287834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31112 > 2) by scale factor 0.604026\nI1207 13:09:57.231120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07659 > 2) by scale factor 0.490606\nI1207 13:09:58.174383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23987 > 2) by scale factor 0.617308\nI1207 13:09:59.117946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46074 > 2) by scale factor 0.812762\nI1207 13:10:00.061823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21858 > 2) by scale factor 0.621393\nI1207 13:10:01.004909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25052 > 2) by scale factor 0.470531\nI1207 13:10:01.947216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39149 > 2) by scale factor 0.455427\nI1207 13:10:02.891324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68198 > 2) by scale factor 0.427169\nI1207 13:10:03.834839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01477 > 2) by scale factor 0.49816\nI1207 13:10:04.778604   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81514 > 2) by scale factor 0.415357\nI1207 13:10:05.722085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83871 > 2) by scale factor 0.413333\nI1207 13:10:06.665179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25902 > 2) by scale factor 0.469592\nI1207 13:10:07.608989   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9788 > 2) by scale factor 0.502664\nI1207 13:10:08.552191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56137 > 2) by scale factor 0.561581\nI1207 13:10:09.495635   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69413 > 2) by scale factor 0.426064\nI1207 13:10:10.438827   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97492 > 2) by scale factor 0.503155\nI1207 13:10:11.382144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.97811 > 2) by scale factor 0.401759\nI1207 13:10:12.325361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34533 > 2) by scale factor 0.460264\nI1207 13:10:13.268522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76852 > 2) by scale factor 0.530713\nI1207 13:10:14.211771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17955 > 2) by scale factor 0.62902\nI1207 13:10:15.155159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70412 > 2) by scale factor 0.539939\nI1207 13:10:16.098456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97713 > 2) by scale factor 0.502875\nI1207 13:10:17.041661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1949 > 2) by scale factor 0.47677\nI1207 13:10:17.984925   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78569 > 2) by scale factor 0.528305\nI1207 13:10:18.927963   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.28407 > 2) by scale factor 0.378496\nI1207 13:10:19.870244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21603 > 2) by scale factor 0.621884\nI1207 13:10:20.813185   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80415 > 2) by scale factor 0.713228\nI1207 13:10:21.756237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01309 > 2) by scale factor 0.498369\nI1207 13:10:22.699777   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14192 > 2) by scale factor 0.482868\nI1207 13:10:23.643160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97203 > 2) by scale factor 0.672942\nI1207 13:10:24.586388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84738 > 2) by scale factor 0.7024\nI1207 13:10:25.529180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83057 > 2) by scale factor 0.41403\nI1207 13:10:26.472221   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04255 > 2) by scale factor 0.494737\nI1207 13:10:27.415606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96066 > 2) by scale factor 0.504967\nI1207 13:10:28.359671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94689 > 2) by scale factor 0.678681\nI1207 13:10:29.303071   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8207 > 2) by scale factor 0.523464\nI1207 13:10:30.246192   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92353 > 2) by scale factor 0.509745\nI1207 13:10:31.189399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4045 > 2) by scale factor 0.454081\nI1207 13:10:32.132788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1892 > 2) by scale factor 0.627116\nI1207 13:10:33.076937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22934 > 2) by scale factor 0.619322\nI1207 13:10:34.019476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24827 > 2) by scale factor 0.615713\nI1207 13:10:34.962440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61897 > 2) by scale factor 0.432997\nI1207 13:10:35.905638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98265 > 2) by scale factor 0.670545\nI1207 13:10:35.917572   369 solver.cpp:337] Iteration 18500, Testing net (#0)\nI1207 13:11:28.614117   369 solver.cpp:404]     Test net output #0: accuracy = 0.1122\nI1207 13:11:28.614740   369 solver.cpp:404]     Test net output #1: loss = 13.5408 (* 1 = 13.5408 loss)\nI1207 13:11:29.485088   369 solver.cpp:228] Iteration 18500, loss = 15.0581\nI1207 13:11:29.485134   369 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI1207 13:11:29.485153   369 solver.cpp:244]     Train net output #1: loss = 15.0581 (* 1 = 15.0581 loss)\nI1207 13:11:29.560794   369 sgd_solver.cpp:166] Iteration 18500, lr = 2.775\nI1207 13:11:29.570956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42077 > 2) by scale factor 0.826184\nI1207 13:11:30.507997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0503 > 2) by scale factor 0.49379\nI1207 13:11:31.444967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02878 > 2) by scale factor 0.496428\nI1207 13:11:32.381988   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87336 > 2) by scale factor 0.696049\nI1207 13:11:33.318569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30852 > 2) by scale factor 0.464197\nI1207 13:11:34.254801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41392 > 2) by scale factor 0.585836\nI1207 13:11:35.191274   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28866 > 2) by scale factor 0.60815\nI1207 13:11:36.127976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62754 > 2) by scale factor 0.432195\nI1207 13:11:37.064545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.97342 > 2) by scale factor 0.402138\nI1207 13:11:38.001390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71115 > 2) by scale factor 0.737695\nI1207 13:11:38.938251   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33724 > 2) by scale factor 0.461123\nI1207 13:11:39.874362   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74799 > 2) by scale factor 0.53362\nI1207 13:11:40.811275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.25457 > 2) by scale factor 0.380621\nI1207 13:11:41.747934   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99356 > 2) by scale factor 0.400516\nI1207 13:11:42.684528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81332 > 2) by scale factor 0.524477\nI1207 13:11:43.621590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37407 > 2) by scale factor 0.592756\nI1207 13:11:44.558626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39162 > 2) by scale factor 0.589689\nI1207 13:11:45.495425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50649 > 2) by scale factor 0.443804\nI1207 13:11:46.432066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5855 > 2) by scale factor 0.557802\nI1207 13:11:47.368799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66946 > 2) by scale factor 0.749215\nI1207 13:11:48.305512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5842 > 2) by scale factor 0.558004\nI1207 13:11:49.242188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74405 > 2) by scale factor 0.421581\nI1207 13:11:50.178390   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26216 > 2) by scale factor 0.884111\nI1207 13:11:51.114732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63502 > 2) by scale factor 0.550203\nI1207 13:11:52.050849   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53843 > 2) by scale factor 0.787888\nI1207 13:11:52.986790   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99977 > 2) by scale factor 0.666717\nI1207 13:11:53.922225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8277 > 2) by scale factor 0.707289\nI1207 13:11:54.859123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70575 > 2) by scale factor 0.539702\nI1207 13:11:55.796083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43777 > 2) by scale factor 0.450677\nI1207 13:11:56.732548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96102 > 2) by scale factor 0.675444\nI1207 13:11:57.668917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28062 > 2) by scale factor 0.609641\nI1207 13:11:58.605695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96153 > 2) by scale factor 0.675327\nI1207 13:11:59.548391   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23048 > 2) by scale factor 0.619103\nI1207 13:12:00.492969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59307 > 2) by scale factor 0.556626\nI1207 13:12:01.436725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39907 > 2) by scale factor 0.454642\nI1207 13:12:02.380594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33759 > 2) by scale factor 0.461086\nI1207 13:12:03.323360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58794 > 2) by scale factor 0.557422\nI1207 13:12:04.266641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05477 > 2) by scale factor 0.973346\nI1207 13:12:05.210733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32654 > 2) by scale factor 0.859644\nI1207 13:12:06.154345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70678 > 2) by scale factor 0.539551\nI1207 13:12:07.098301   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18791 > 2) by scale factor 0.627371\nI1207 13:12:08.042621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52781 > 2) by scale factor 0.566925\nI1207 13:12:08.986245   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22918 > 2) by scale factor 0.619352\nI1207 13:12:09.930127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58925 > 2) by scale factor 0.557219\nI1207 13:12:10.873554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15254 > 2) by scale factor 0.929133\nI1207 13:12:11.817692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19202 > 2) by scale factor 0.477097\nI1207 13:12:12.762078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07939 > 2) by scale factor 0.649479\nI1207 13:12:13.706065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91656 > 2) by scale factor 0.510652\nI1207 13:12:14.650511   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14697 > 2) by scale factor 0.48228\nI1207 13:12:15.594607   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35532 > 2) by scale factor 0.596068\nI1207 13:12:16.538465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27002 > 2) by scale factor 0.468381\nI1207 13:12:17.482909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04404 > 2) by scale factor 0.657022\nI1207 13:12:18.426949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45734 > 2) by scale factor 0.57848\nI1207 13:12:19.370954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69624 > 2) by scale factor 0.741774\nI1207 13:12:20.314836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44957 > 2) by scale factor 0.449482\nI1207 13:12:21.258744   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73691 > 2) by scale factor 0.535202\nI1207 13:12:22.202836   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08784 > 2) by scale factor 0.647703\nI1207 13:12:23.146275   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18073 > 2) by scale factor 0.478386\nI1207 13:12:24.089447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45818 > 2) by scale factor 0.81361\nI1207 13:12:25.032619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21532 > 2) by scale factor 0.47446\nI1207 13:12:25.976542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14149 > 2) by scale factor 0.482918\nI1207 13:12:26.920608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40259 > 2) by scale factor 0.587787\nI1207 13:12:27.864652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92594 > 2) by scale factor 0.509433\nI1207 13:12:28.807960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72879 > 2) by scale factor 0.422942\nI1207 13:12:29.751376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67601 > 2) by scale factor 0.747381\nI1207 13:12:30.695894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.1822 > 2) by scale factor 0.385936\nI1207 13:12:31.639623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52201 > 2) by scale factor 0.793017\nI1207 13:12:32.583329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39784 > 2) by scale factor 0.58861\nI1207 13:12:33.527282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72748 > 2) by scale factor 0.733278\nI1207 13:12:34.471289   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92839 > 2) by scale factor 0.509115\nI1207 13:12:35.415165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85699 > 2) by scale factor 0.411778\nI1207 13:12:36.358611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2085 > 2) by scale factor 0.475228\nI1207 13:12:37.302611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.28602 > 2) by scale factor 0.378356\nI1207 13:12:38.246311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67077 > 2) by scale factor 0.544844\nI1207 13:12:39.190083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70019 > 2) by scale factor 0.425514\nI1207 13:12:40.133731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37063 > 2) by scale factor 0.593361\nI1207 13:12:41.077271   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36036 > 2) by scale factor 0.595173\nI1207 13:12:42.021147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94427 > 2) by scale factor 0.507065\nI1207 13:12:42.964887   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49981 > 2) by scale factor 0.444463\nI1207 13:12:43.908655   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63267 > 2) by scale factor 0.759685\nI1207 13:12:44.852418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0231 > 2) by scale factor 0.661573\nI1207 13:12:45.795938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76678 > 2) by scale factor 0.41957\nI1207 13:12:46.739459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20964 > 2) by scale factor 0.623122\nI1207 13:12:47.683441   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09783 > 2) by scale factor 0.488063\nI1207 13:12:48.627298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22843 > 2) by scale factor 0.472989\nI1207 13:12:49.571316   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68149 > 2) by scale factor 0.745855\nI1207 13:12:50.515419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4317 > 2) by scale factor 0.582801\nI1207 13:12:51.459408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74576 > 2) by scale factor 0.728396\nI1207 13:12:52.403347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50296 > 2) by scale factor 0.444152\nI1207 13:12:53.347270   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5725 > 2) by scale factor 0.777453\nI1207 13:12:54.291349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54614 > 2) by scale factor 0.563993\nI1207 13:12:55.234791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52613 > 2) by scale factor 0.567195\nI1207 13:12:56.178246   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35045 > 2) by scale factor 0.596934\nI1207 13:12:57.121968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.431 > 2) by scale factor 0.822707\nI1207 13:12:58.065526   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80631 > 2) by scale factor 0.525443\nI1207 13:12:59.008383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66342 > 2) by scale factor 0.545937\nI1207 13:12:59.951874   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09715 > 2) by scale factor 0.645754\nI1207 13:13:00.895732   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81824 > 2) by scale factor 0.415089\nI1207 13:13:01.839371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.27709 > 2) by scale factor 0.378997\nI1207 13:13:02.782871   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17757 > 2) by scale factor 0.629412\nI1207 13:13:02.794811   369 solver.cpp:337] Iteration 18600, Testing net (#0)\nI1207 13:13:55.572898   369 solver.cpp:404]     Test net output #0: accuracy = 0.1635\nI1207 13:13:55.573508   369 solver.cpp:404]     Test net output #1: loss = 17.9988 (* 1 = 17.9988 loss)\nI1207 13:13:56.444000   369 solver.cpp:228] Iteration 18600, loss = 18.6238\nI1207 13:13:56.444048   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 13:13:56.444067   369 solver.cpp:244]     Train net output #1: loss = 18.6238 (* 1 = 18.6238 loss)\nI1207 13:13:56.527040   369 sgd_solver.cpp:166] Iteration 18600, lr = 2.79\nI1207 13:13:56.537102   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1617 > 2) by scale factor 0.480573\nI1207 13:13:57.474308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77897 > 2) by scale factor 0.529244\nI1207 13:13:58.411538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54751 > 2) by scale factor 0.785081\nI1207 13:13:59.348981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34179 > 2) by scale factor 0.460639\nI1207 13:14:00.286253   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61647 > 2) by scale factor 0.553025\nI1207 13:14:01.223611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90417 > 2) by scale factor 0.512273\nI1207 13:14:02.160698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35202 > 2) by scale factor 0.459556\nI1207 13:14:03.097997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90092 > 2) by scale factor 0.689436\nI1207 13:14:04.035488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24317 > 2) by scale factor 0.616681\nI1207 13:14:04.972767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.92362 > 2) by scale factor 0.406205\nI1207 13:14:05.909956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70275 > 2) by scale factor 0.425283\nI1207 13:14:06.846743   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67505 > 2) by scale factor 0.427803\nI1207 13:14:07.783952   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58524 > 2) by scale factor 0.436182\nI1207 13:14:08.721042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03504 > 2) by scale factor 0.658969\nI1207 13:14:09.658166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34819 > 2) by scale factor 0.459962\nI1207 13:14:10.595371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.29212 > 2) by scale factor 0.377921\nI1207 13:14:11.532585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13379 > 2) by scale factor 0.483817\nI1207 13:14:12.469971   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0464 > 2) by scale factor 0.494266\nI1207 13:14:13.407014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39412 > 2) by scale factor 0.589254\nI1207 13:14:14.344331   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71599 > 2) by scale factor 0.736379\nI1207 13:14:15.281716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14843 > 2) by scale factor 0.635237\nI1207 13:14:16.218935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51153 > 2) by scale factor 0.443309\nI1207 13:14:17.156188   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1543 > 2) by scale factor 0.481429\nI1207 13:14:18.093443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43417 > 2) by scale factor 0.582382\nI1207 13:14:19.030386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99706 > 2) by scale factor 0.500367\nI1207 13:14:19.967599   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.93405 > 2) by scale factor 0.405346\nI1207 13:14:20.904361   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72183 > 2) by scale factor 0.423564\nI1207 13:14:21.841267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67667 > 2) by scale factor 0.427655\nI1207 13:14:22.778306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42594 > 2) by scale factor 0.583781\nI1207 13:14:23.715381   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86242 > 2) by scale factor 0.51781\nI1207 13:14:24.652835   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31965 > 2) by scale factor 0.602474\nI1207 13:14:25.590121   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96937 > 2) by scale factor 0.503859\nI1207 13:14:26.527535   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81532 > 2) by scale factor 0.524203\nI1207 13:14:27.464637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54289 > 2) by scale factor 0.564511\nI1207 13:14:28.401319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6371 > 2) by scale factor 0.549889\nI1207 13:14:29.342620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38735 > 2) by scale factor 0.837749\nI1207 13:14:30.285784   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07644 > 2) by scale factor 0.490624\nI1207 13:14:31.228277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38718 > 2) by scale factor 0.455874\nI1207 13:14:32.170944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.635 > 2) by scale factor 0.4315\nI1207 13:14:33.113545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.15488 > 2) by scale factor 0.387982\nI1207 13:14:34.057034   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82057 > 2) by scale factor 0.414889\nI1207 13:14:34.999439   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82147 > 2) by scale factor 0.70885\nI1207 13:14:35.942606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38898 > 2) by scale factor 0.455686\nI1207 13:14:36.885547   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24646 > 2) by scale factor 0.616056\nI1207 13:14:37.828315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24162 > 2) by scale factor 0.471518\nI1207 13:14:38.771054   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27384 > 2) by scale factor 0.610904\nI1207 13:14:39.714087   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09536 > 2) by scale factor 0.488357\nI1207 13:14:40.656968   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21041 > 2) by scale factor 0.904808\nI1207 13:14:41.599797   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28401 > 2) by scale factor 0.609011\nI1207 13:14:42.542719   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17045 > 2) by scale factor 0.630824\nI1207 13:14:43.486058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55056 > 2) by scale factor 0.563291\nI1207 13:14:44.428913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2665 > 2) by scale factor 0.468769\nI1207 13:14:45.371481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90088 > 2) by scale factor 0.512705\nI1207 13:14:46.314280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83154 > 2) by scale factor 0.413947\nI1207 13:14:47.257577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24505 > 2) by scale factor 0.471137\nI1207 13:14:48.200728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33587 > 2) by scale factor 0.856211\nI1207 13:14:49.143695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34858 > 2) by scale factor 0.45992\nI1207 13:14:50.086762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83618 > 2) by scale factor 0.705174\nI1207 13:14:51.029464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77109 > 2) by scale factor 0.721738\nI1207 13:14:51.972542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56942 > 2) by scale factor 0.437692\nI1207 13:14:52.915621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9939 > 2) by scale factor 0.400489\nI1207 13:14:53.858425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.51467 > 2) by scale factor 0.362669\nI1207 13:14:54.801216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40088 > 2) by scale factor 0.454455\nI1207 13:14:55.743418   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05031 > 2) by scale factor 0.49379\nI1207 13:14:56.687181   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58776 > 2) by scale factor 0.435942\nI1207 13:14:57.629725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.33087 > 2) by scale factor 0.375174\nI1207 13:14:58.572656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88383 > 2) by scale factor 0.514955\nI1207 13:14:59.515908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.18695 > 2) by scale factor 0.385583\nI1207 13:15:00.458281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83274 > 2) by scale factor 0.413844\nI1207 13:15:01.401490   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14287 > 2) by scale factor 0.636361\nI1207 13:15:02.343909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93716 > 2) by scale factor 0.507981\nI1207 13:15:03.286981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06087 > 2) by scale factor 0.492506\nI1207 13:15:04.229617   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69893 > 2) by scale factor 0.540696\nI1207 13:15:05.172369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73963 > 2) by scale factor 0.534812\nI1207 13:15:06.115407   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0098 > 2) by scale factor 0.664495\nI1207 13:15:07.058365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09323 > 2) by scale factor 0.646574\nI1207 13:15:08.000695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02216 > 2) by scale factor 0.497246\nI1207 13:15:08.943300   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28999 > 2) by scale factor 0.607904\nI1207 13:15:09.885577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67111 > 2) by scale factor 0.748753\nI1207 13:15:10.828502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07156 > 2) by scale factor 0.491212\nI1207 13:15:11.771050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84846 > 2) by scale factor 0.519688\nI1207 13:15:12.713492   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49318 > 2) by scale factor 0.572544\nI1207 13:15:13.656069   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18512 > 2) by scale factor 0.62792\nI1207 13:15:14.598631   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36526 > 2) by scale factor 0.594308\nI1207 13:15:15.541265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80248 > 2) by scale factor 0.713654\nI1207 13:15:16.484043   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61631 > 2) by scale factor 0.55305\nI1207 13:15:17.426611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.931 > 2) by scale factor 0.405597\nI1207 13:15:18.369215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13569 > 2) by scale factor 0.637817\nI1207 13:15:19.311800   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58688 > 2) by scale factor 0.436026\nI1207 13:15:20.254659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05243 > 2) by scale factor 0.395849\nI1207 13:15:21.197386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84356 > 2) by scale factor 0.703344\nI1207 13:15:22.139844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87508 > 2) by scale factor 0.695633\nI1207 13:15:23.082638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67858 > 2) by scale factor 0.543687\nI1207 13:15:24.025506   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83931 > 2) by scale factor 0.704398\nI1207 13:15:24.968020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20953 > 2) by scale factor 0.623144\nI1207 13:15:25.910509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80642 > 2) by scale factor 0.712653\nI1207 13:15:26.853603   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3833 > 2) by scale factor 0.591139\nI1207 13:15:27.796561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60218 > 2) by scale factor 0.434576\nI1207 13:15:28.738979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.898 > 2) by scale factor 0.513084\nI1207 13:15:29.681457   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04182 > 2) by scale factor 0.657501\nI1207 13:15:29.693426   369 solver.cpp:337] Iteration 18700, Testing net (#0)\nI1207 13:16:22.499061   369 solver.cpp:404]     Test net output #0: accuracy = 0.12475\nI1207 13:16:22.499634   369 solver.cpp:404]     Test net output #1: loss = 18.2104 (* 1 = 18.2104 loss)\nI1207 13:16:23.370452   369 solver.cpp:228] Iteration 18700, loss = 19.6853\nI1207 13:16:23.370508   369 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1207 13:16:23.370527   369 solver.cpp:244]     Train net output #1: loss = 19.6853 (* 1 = 19.6853 loss)\nI1207 13:16:23.452518   369 sgd_solver.cpp:166] Iteration 18700, lr = 2.805\nI1207 13:16:23.462656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77641 > 2) by scale factor 0.529603\nI1207 13:16:24.399664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0137 > 2) by scale factor 0.663636\nI1207 13:16:25.336365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86883 > 2) by scale factor 0.697149\nI1207 13:16:26.273608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81257 > 2) by scale factor 0.711094\nI1207 13:16:27.210435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02934 > 2) by scale factor 0.66021\nI1207 13:16:28.147536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31229 > 2) by scale factor 0.603813\nI1207 13:16:29.084656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66913 > 2) by scale factor 0.749309\nI1207 13:16:30.021888   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31074 > 2) by scale factor 0.604095\nI1207 13:16:30.958572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96047 > 2) by scale factor 0.50499\nI1207 13:16:31.895992   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88751 > 2) by scale factor 0.514468\nI1207 13:16:32.833097   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44191 > 2) by scale factor 0.581073\nI1207 13:16:33.769822   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73169 > 2) by scale factor 0.535951\nI1207 13:16:34.707155   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01836 > 2) by scale factor 0.497716\nI1207 13:16:35.644008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65115 > 2) by scale factor 0.547773\nI1207 13:16:36.581028   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57292 > 2) by scale factor 0.559767\nI1207 13:16:37.518256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3656 > 2) by scale factor 0.845451\nI1207 13:16:38.455410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.957 > 2) by scale factor 0.676362\nI1207 13:16:39.392841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14912 > 2) by scale factor 0.635098\nI1207 13:16:40.329907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03008 > 2) by scale factor 0.660049\nI1207 13:16:41.266860   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09764 > 2) by scale factor 0.645653\nI1207 13:16:42.203943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.302 > 2) by scale factor 0.4649\nI1207 13:16:43.141083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5318 > 2) by scale factor 0.566284\nI1207 13:16:44.077762   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26446 > 2) by scale factor 0.468993\nI1207 13:16:45.015013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62493 > 2) by scale factor 0.432439\nI1207 13:16:45.952214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33305 > 2) by scale factor 0.600051\nI1207 13:16:46.889091   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3638 > 2) by scale factor 0.458317\nI1207 13:16:47.826498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88246 > 2) by scale factor 0.693853\nI1207 13:16:48.763653   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35526 > 2) by scale factor 0.849162\nI1207 13:16:49.700098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78159 > 2) by scale factor 0.719013\nI1207 13:16:50.637115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20778 > 2) by scale factor 0.905888\nI1207 13:16:51.574214   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03833 > 2) by scale factor 0.658256\nI1207 13:16:52.511227   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23659 > 2) by scale factor 0.617933\nI1207 13:16:53.454928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76967 > 2) by scale factor 0.530551\nI1207 13:16:54.397948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70959 > 2) by scale factor 0.424666\nI1207 13:16:55.341225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18137 > 2) by scale factor 0.478313\nI1207 13:16:56.284051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9148 > 2) by scale factor 0.510882\nI1207 13:16:57.226673   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28429 > 2) by scale factor 0.608959\nI1207 13:16:58.169243   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21155 > 2) by scale factor 0.904344\nI1207 13:16:59.112298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39969 > 2) by scale factor 0.833443\nI1207 13:17:00.055248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21485 > 2) by scale factor 0.902998\nI1207 13:17:00.998387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04784 > 2) by scale factor 0.976637\nI1207 13:17:01.941779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21482 > 2) by scale factor 0.622119\nI1207 13:17:02.884896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17246 > 2) by scale factor 0.630425\nI1207 13:17:03.828178   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62836 > 2) by scale factor 0.432119\nI1207 13:17:04.770879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01091 > 2) by scale factor 0.49864\nI1207 13:17:05.713980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53557 > 2) by scale factor 0.440959\nI1207 13:17:06.657090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98011 > 2) by scale factor 0.502498\nI1207 13:17:07.599980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71632 > 2) by scale factor 0.538166\nI1207 13:17:08.542842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8328 > 2) by scale factor 0.706015\nI1207 13:17:09.485802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72224 > 2) by scale factor 0.537311\nI1207 13:17:10.428625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36445 > 2) by scale factor 0.845864\nI1207 13:17:11.371299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50269 > 2) by scale factor 0.570989\nI1207 13:17:12.314352   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49236 > 2) by scale factor 0.4452\nI1207 13:17:13.256896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54934 > 2) by scale factor 0.563485\nI1207 13:17:14.199877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85723 > 2) by scale factor 0.518507\nI1207 13:17:15.142712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69541 > 2) by scale factor 0.742003\nI1207 13:17:16.085094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04816 > 2) by scale factor 0.976486\nI1207 13:17:17.028023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44276 > 2) by scale factor 0.580929\nI1207 13:17:17.970947   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35878 > 2) by scale factor 0.847897\nI1207 13:17:18.914093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6406 > 2) by scale factor 0.757402\nI1207 13:17:19.857183   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38917 > 2) by scale factor 0.455667\nI1207 13:17:20.800118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4728 > 2) by scale factor 0.447148\nI1207 13:17:21.743265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6247 > 2) by scale factor 0.43246\nI1207 13:17:22.686417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68062 > 2) by scale factor 0.427293\nI1207 13:17:23.630177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10312 > 2) by scale factor 0.487434\nI1207 13:17:24.573137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1376 > 2) by scale factor 0.63743\nI1207 13:17:25.516036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63613 > 2) by scale factor 0.431394\nI1207 13:17:26.459487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56687 > 2) by scale factor 0.560716\nI1207 13:17:27.402549   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55988 > 2) by scale factor 0.561817\nI1207 13:17:28.345683   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19615 > 2) by scale factor 0.476627\nI1207 13:17:29.288980   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21625 > 2) by scale factor 0.383417\nI1207 13:17:30.231694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12081 > 2) by scale factor 0.640858\nI1207 13:17:31.174448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.8506 > 2) by scale factor 0.41232\nI1207 13:17:32.117353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63482 > 2) by scale factor 0.550233\nI1207 13:17:33.060298   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90625 > 2) by scale factor 0.688173\nI1207 13:17:34.003136   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88125 > 2) by scale factor 0.515298\nI1207 13:17:34.945608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65462 > 2) by scale factor 0.753403\nI1207 13:17:35.888849   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43728 > 2) by scale factor 0.581856\nI1207 13:17:36.831564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03235 > 2) by scale factor 0.659555\nI1207 13:17:37.774148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41079 > 2) by scale factor 0.586375\nI1207 13:17:38.716718   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58984 > 2) by scale factor 0.772248\nI1207 13:17:39.658969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13072 > 2) by scale factor 0.389809\nI1207 13:17:40.601850   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55537 > 2) by scale factor 0.56253\nI1207 13:17:41.545051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68482 > 2) by scale factor 0.542768\nI1207 13:17:42.487993   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85971 > 2) by scale factor 0.518173\nI1207 13:17:43.430735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56461 > 2) by scale factor 0.779846\nI1207 13:17:44.373478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63951 > 2) by scale factor 0.757718\nI1207 13:17:45.316345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97039 > 2) by scale factor 0.673312\nI1207 13:17:46.259306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26536 > 2) by scale factor 0.61249\nI1207 13:17:47.201977   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3434 > 2) by scale factor 0.598194\nI1207 13:17:48.144574   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06731 > 2) by scale factor 0.652036\nI1207 13:17:49.087666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74001 > 2) by scale factor 0.729925\nI1207 13:17:50.030750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3897 > 2) by scale factor 0.590022\nI1207 13:17:50.973408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83431 > 2) by scale factor 0.521606\nI1207 13:17:51.915853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08824 > 2) by scale factor 0.489208\nI1207 13:17:52.858906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93991 > 2) by scale factor 0.507626\nI1207 13:17:53.802834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0837 > 2) by scale factor 0.489752\nI1207 13:17:54.745717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48607 > 2) by scale factor 0.445825\nI1207 13:17:55.688657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72598 > 2) by scale factor 0.536771\nI1207 13:17:56.631461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59864 > 2) by scale factor 0.434912\nI1207 13:17:56.643457   369 solver.cpp:337] Iteration 18800, Testing net (#0)\nI1207 13:18:49.423307   369 solver.cpp:404]     Test net output #0: accuracy = 0.1111\nI1207 13:18:49.423902   369 solver.cpp:404]     Test net output #1: loss = 20.1753 (* 1 = 20.1753 loss)\nI1207 13:18:50.294589   369 solver.cpp:228] Iteration 18800, loss = 20.6891\nI1207 13:18:50.294642   369 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1207 13:18:50.294661   369 solver.cpp:244]     Train net output #1: loss = 20.6891 (* 1 = 20.6891 loss)\nI1207 13:18:50.374287   369 sgd_solver.cpp:166] Iteration 18800, lr = 2.82\nI1207 13:18:50.384498   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78476 > 2) by scale factor 0.528435\nI1207 13:18:51.321666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52075 > 2) by scale factor 0.568061\nI1207 13:18:52.258564   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08915 > 2) by scale factor 0.489099\nI1207 13:18:53.195652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95989 > 2) by scale factor 0.505064\nI1207 13:18:54.132503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65533 > 2) by scale factor 0.753201\nI1207 13:18:55.069589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.8535 > 2) by scale factor 0.412074\nI1207 13:18:56.006671   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95673 > 2) by scale factor 0.505467\nI1207 13:18:56.943370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97041 > 2) by scale factor 0.673308\nI1207 13:18:57.880234   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97158 > 2) by scale factor 0.673043\nI1207 13:18:58.817497   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65232 > 2) by scale factor 0.547597\nI1207 13:18:59.754307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28215 > 2) by scale factor 0.609357\nI1207 13:19:00.691107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63639 > 2) by scale factor 0.549996\nI1207 13:19:01.628167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39091 > 2) by scale factor 0.455487\nI1207 13:19:02.565079   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91092 > 2) by scale factor 0.687068\nI1207 13:19:03.501889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49422 > 2) by scale factor 0.801855\nI1207 13:19:04.438627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59169 > 2) by scale factor 0.435569\nI1207 13:19:05.375694   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06571 > 2) by scale factor 0.652376\nI1207 13:19:06.312248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12081 > 2) by scale factor 0.64086\nI1207 13:19:07.249534   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22639 > 2) by scale factor 0.619887\nI1207 13:19:08.186358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47003 > 2) by scale factor 0.576364\nI1207 13:19:09.123286   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60874 > 2) by scale factor 0.554211\nI1207 13:19:10.060153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98365 > 2) by scale factor 0.67032\nI1207 13:19:10.997189   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28756 > 2) by scale factor 0.608354\nI1207 13:19:11.933856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59677 > 2) by scale factor 0.770189\nI1207 13:19:12.870726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01926 > 2) by scale factor 0.662414\nI1207 13:19:13.807313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8478 > 2) by scale factor 0.702296\nI1207 13:19:14.744115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19228 > 2) by scale factor 0.626511\nI1207 13:19:15.680979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3395 > 2) by scale factor 0.460882\nI1207 13:19:16.617823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65883 > 2) by scale factor 0.546623\nI1207 13:19:17.554919   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17042 > 2) by scale factor 0.63083\nI1207 13:19:18.491781   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07824 > 2) by scale factor 0.490407\nI1207 13:19:19.428014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92565 > 2) by scale factor 0.683609\nI1207 13:19:20.365360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80958 > 2) by scale factor 0.524992\nI1207 13:19:21.302510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77716 > 2) by scale factor 0.720159\nI1207 13:19:22.239627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72852 > 2) by scale factor 0.536406\nI1207 13:19:23.176620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0629 > 2) by scale factor 0.652977\nI1207 13:19:24.118013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12731 > 2) by scale factor 0.639527\nI1207 13:19:25.061417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43678 > 2) by scale factor 0.581941\nI1207 13:19:26.004593   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43556 > 2) by scale factor 0.582146\nI1207 13:19:26.947693   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2099 > 2) by scale factor 0.623072\nI1207 13:19:27.891134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99179 > 2) by scale factor 0.501028\nI1207 13:19:28.834223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55633 > 2) by scale factor 0.43895\nI1207 13:19:29.777707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89734 > 2) by scale factor 0.51317\nI1207 13:19:30.721038   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57921 > 2) by scale factor 0.775433\nI1207 13:19:31.664692   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51723 > 2) by scale factor 0.794523\nI1207 13:19:32.607956   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71024 > 2) by scale factor 0.424607\nI1207 13:19:33.550597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81789 > 2) by scale factor 0.70975\nI1207 13:19:34.493927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87052 > 2) by scale factor 0.696739\nI1207 13:19:35.437662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38991 > 2) by scale factor 0.589986\nI1207 13:19:36.380789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10776 > 2) by scale factor 0.643551\nI1207 13:19:37.324282   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60541 > 2) by scale factor 0.554721\nI1207 13:19:38.267256   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66254 > 2) by scale factor 0.54607\nI1207 13:19:39.210353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10082 > 2) by scale factor 0.487707\nI1207 13:19:40.153661   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17181 > 2) by scale factor 0.479408\nI1207 13:19:41.097204   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45823 > 2) by scale factor 0.578331\nI1207 13:19:42.040345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14649 > 2) by scale factor 0.482336\nI1207 13:19:42.983041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23926 > 2) by scale factor 0.893153\nI1207 13:19:43.926148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01411 > 2) by scale factor 0.498242\nI1207 13:19:44.869403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71486 > 2) by scale factor 0.736686\nI1207 13:19:45.812840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90338 > 2) by scale factor 0.512376\nI1207 13:19:46.756239   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8698 > 2) by scale factor 0.516822\nI1207 13:19:47.699045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86627 > 2) by scale factor 0.517295\nI1207 13:19:48.642042   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61367 > 2) by scale factor 0.553454\nI1207 13:19:49.585228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71692 > 2) by scale factor 0.538079\nI1207 13:19:50.528705   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62583 > 2) by scale factor 0.551598\nI1207 13:19:51.472023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60792 > 2) by scale factor 0.434036\nI1207 13:19:52.414922   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4913 > 2) by scale factor 0.802792\nI1207 13:19:53.358148   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76527 > 2) by scale factor 0.723257\nI1207 13:19:54.301561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3687 > 2) by scale factor 0.5937\nI1207 13:19:55.245056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26209 > 2) by scale factor 0.469253\nI1207 13:19:56.188516   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70178 > 2) by scale factor 0.425371\nI1207 13:19:57.131541   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27383 > 2) by scale factor 0.467964\nI1207 13:19:58.074865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94424 > 2) by scale factor 0.507069\nI1207 13:19:59.018153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89994 > 2) by scale factor 0.512828\nI1207 13:19:59.961311   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67748 > 2) by scale factor 0.54385\nI1207 13:20:00.904608   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12602 > 2) by scale factor 0.484728\nI1207 13:20:01.847067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69866 > 2) by scale factor 0.74111\nI1207 13:20:02.790452   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00737 > 2) by scale factor 0.665032\nI1207 13:20:03.733755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24939 > 2) by scale factor 0.615499\nI1207 13:20:04.677167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51223 > 2) by scale factor 0.796105\nI1207 13:20:05.620610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50443 > 2) by scale factor 0.798585\nI1207 13:20:06.563419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82903 > 2) by scale factor 0.706956\nI1207 13:20:07.506659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12331 > 2) by scale factor 0.640346\nI1207 13:20:08.450044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07623 > 2) by scale factor 0.49065\nI1207 13:20:09.393582   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11417 > 2) by scale factor 0.486125\nI1207 13:20:10.337505   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99012 > 2) by scale factor 0.668868\nI1207 13:20:11.281574   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54827 > 2) by scale factor 0.784845\nI1207 13:20:12.224882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92641 > 2) by scale factor 0.509371\nI1207 13:20:13.168141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8384 > 2) by scale factor 0.521051\nI1207 13:20:14.112119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89832 > 2) by scale factor 0.513041\nI1207 13:20:15.056399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55835 > 2) by scale factor 0.781754\nI1207 13:20:16.000355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14403 > 2) by scale factor 0.482622\nI1207 13:20:16.943863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96878 > 2) by scale factor 0.673677\nI1207 13:20:17.887996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28377 > 2) by scale factor 0.609056\nI1207 13:20:18.831976   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74155 > 2) by scale factor 0.534538\nI1207 13:20:19.775321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7995 > 2) by scale factor 0.526385\nI1207 13:20:20.719892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08021 > 2) by scale factor 0.490171\nI1207 13:20:21.663657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14241 > 2) by scale factor 0.636454\nI1207 13:20:22.607666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77907 > 2) by scale factor 0.418492\nI1207 13:20:23.551543   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57341 > 2) by scale factor 0.559689\nI1207 13:20:23.563443   369 solver.cpp:337] Iteration 18900, Testing net (#0)\nI1207 13:21:16.402549   369 solver.cpp:404]     Test net output #0: accuracy = 0.13875\nI1207 13:21:16.403179   369 solver.cpp:404]     Test net output #1: loss = 31.2495 (* 1 = 31.2495 loss)\nI1207 13:21:17.274188   369 solver.cpp:228] Iteration 18900, loss = 34.7567\nI1207 13:21:17.274225   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 13:21:17.274243   369 solver.cpp:244]     Train net output #1: loss = 34.7567 (* 1 = 34.7567 loss)\nI1207 13:21:17.350117   369 sgd_solver.cpp:166] Iteration 18900, lr = 2.835\nI1207 13:21:17.360268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29374 > 2) by scale factor 0.465794\nI1207 13:21:18.297016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83752 > 2) by scale factor 0.413435\nI1207 13:21:19.234508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29401 > 2) by scale factor 0.871834\nI1207 13:21:20.171356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38053 > 2) by scale factor 0.456566\nI1207 13:21:21.108530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01854 > 2) by scale factor 0.497694\nI1207 13:21:22.045441   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57583 > 2) by scale factor 0.559311\nI1207 13:21:22.981884   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55401 > 2) by scale factor 0.783081\nI1207 13:21:23.918759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81719 > 2) by scale factor 0.709927\nI1207 13:21:24.855471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55327 > 2) by scale factor 0.78331\nI1207 13:21:25.792304   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42408 > 2) by scale factor 0.584099\nI1207 13:21:26.729665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34522 > 2) by scale factor 0.852799\nI1207 13:21:27.666235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73669 > 2) by scale factor 0.535232\nI1207 13:21:28.603166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31024 > 2) by scale factor 0.604187\nI1207 13:21:29.540127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66312 > 2) by scale factor 0.428897\nI1207 13:21:30.476809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38793 > 2) by scale factor 0.455796\nI1207 13:21:31.413892   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91087 > 2) by scale factor 0.511395\nI1207 13:21:32.351140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74768 > 2) by scale factor 0.533663\nI1207 13:21:33.287883   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70892 > 2) by scale factor 0.424726\nI1207 13:21:34.225013   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06714 > 2) by scale factor 0.652074\nI1207 13:21:35.161991   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1253 > 2) by scale factor 0.484813\nI1207 13:21:36.098269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62881 > 2) by scale factor 0.760799\nI1207 13:21:37.034463   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42596 > 2) by scale factor 0.824416\nI1207 13:21:37.971717   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0774 > 2) by scale factor 0.490509\nI1207 13:21:38.908659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34493 > 2) by scale factor 0.852904\nI1207 13:21:39.845332   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80549 > 2) by scale factor 0.525557\nI1207 13:21:40.782588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82251 > 2) by scale factor 0.523217\nI1207 13:21:41.719425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94493 > 2) by scale factor 0.679134\nI1207 13:21:42.656569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58888 > 2) by scale factor 0.557277\nI1207 13:21:43.593611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.91984 > 2) by scale factor 0.406517\nI1207 13:21:44.530768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06723 > 2) by scale factor 0.491735\nI1207 13:21:45.467684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24306 > 2) by scale factor 0.471358\nI1207 13:21:46.404865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16772 > 2) by scale factor 0.479879\nI1207 13:21:47.342844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56914 > 2) by scale factor 0.77847\nI1207 13:21:48.286864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55861 > 2) by scale factor 0.562018\nI1207 13:21:49.230422   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25877 > 2) by scale factor 0.613729\nI1207 13:21:50.174568   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35388 > 2) by scale factor 0.45936\nI1207 13:21:51.118485   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.07486 > 2) by scale factor 0.3941\nI1207 13:21:52.062070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50873 > 2) by scale factor 0.443584\nI1207 13:21:53.005758   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05312 > 2) by scale factor 0.493447\nI1207 13:21:53.949654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32787 > 2) by scale factor 0.462121\nI1207 13:21:54.893124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64787 > 2) by scale factor 0.430304\nI1207 13:21:55.836954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52296 > 2) by scale factor 0.442188\nI1207 13:21:56.780414   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.40272 > 2) by scale factor 0.370184\nI1207 13:21:57.724220   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.40343 > 2) by scale factor 0.370135\nI1207 13:21:58.667176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64077 > 2) by scale factor 0.549334\nI1207 13:21:59.610826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07003 > 2) by scale factor 0.491396\nI1207 13:22:00.554065   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71437 > 2) by scale factor 0.53845\nI1207 13:22:01.497581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86574 > 2) by scale factor 0.517365\nI1207 13:22:02.441313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38878 > 2) by scale factor 0.590183\nI1207 13:22:03.385107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95502 > 2) by scale factor 0.403631\nI1207 13:22:04.329576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48409 > 2) by scale factor 0.805124\nI1207 13:22:05.273129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70035 > 2) by scale factor 0.4255\nI1207 13:22:06.216778   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83926 > 2) by scale factor 0.413287\nI1207 13:22:07.160238   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19955 > 2) by scale factor 0.476241\nI1207 13:22:08.104001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01765 > 2) by scale factor 0.662768\nI1207 13:22:09.989156   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63357 > 2) by scale factor 0.550423\nI1207 13:22:10.932340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49698 > 2) by scale factor 0.444743\nI1207 13:22:11.875962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32567 > 2) by scale factor 0.601382\nI1207 13:22:12.819839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43116 > 2) by scale factor 0.582892\nI1207 13:22:13.764037   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54212 > 2) by scale factor 0.440323\nI1207 13:22:14.707828   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83386 > 2) by scale factor 0.413748\nI1207 13:22:15.651932   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1539 > 2) by scale factor 0.634136\nI1207 13:22:16.596047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70187 > 2) by scale factor 0.425362\nI1207 13:22:17.540640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00294 > 2) by scale factor 0.666014\nI1207 13:22:18.484938   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17642 > 2) by scale factor 0.62964\nI1207 13:22:19.429116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64898 > 2) by scale factor 0.548098\nI1207 13:22:20.372912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22077 > 2) by scale factor 0.473847\nI1207 13:22:21.317080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63241 > 2) by scale factor 0.550598\nI1207 13:22:22.261375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61407 > 2) by scale factor 0.76509\nI1207 13:22:23.205595   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3149 > 2) by scale factor 0.863968\nI1207 13:22:24.149468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0527 > 2) by scale factor 0.493498\nI1207 13:22:25.093900   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31258 > 2) by scale factor 0.864837\nI1207 13:22:26.038267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3359 > 2) by scale factor 0.599539\nI1207 13:22:26.982182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21776 > 2) by scale factor 0.62155\nI1207 13:22:27.926478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16263 > 2) by scale factor 0.480466\nI1207 13:22:28.870652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.15164 > 2) by scale factor 0.388226\nI1207 13:22:29.814553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83951 > 2) by scale factor 0.413265\nI1207 13:22:30.758610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92644 > 2) by scale factor 0.509368\nI1207 13:22:31.703055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04699 > 2) by scale factor 0.656386\nI1207 13:22:32.647218   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81116 > 2) by scale factor 0.524774\nI1207 13:22:33.591567   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53876 > 2) by scale factor 0.440649\nI1207 13:22:34.535878   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02938 > 2) by scale factor 0.496354\nI1207 13:22:35.479689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.8379 > 2) by scale factor 0.413403\nI1207 13:22:36.423851   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36821 > 2) by scale factor 0.593787\nI1207 13:22:37.368062   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17417 > 2) by scale factor 0.630085\nI1207 13:22:38.312077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85959 > 2) by scale factor 0.699402\nI1207 13:22:39.256306   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77768 > 2) by scale factor 0.720026\nI1207 13:22:40.200225   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29657 > 2) by scale factor 0.465488\nI1207 13:22:41.144588   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.86836 > 2) by scale factor 0.410816\nI1207 13:22:42.088897   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77543 > 2) by scale factor 0.720609\nI1207 13:22:43.033313   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37562 > 2) by scale factor 0.592485\nI1207 13:22:43.977370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35914 > 2) by scale factor 0.458806\nI1207 13:22:44.921643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11017 > 2) by scale factor 0.486598\nI1207 13:22:45.865727   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67459 > 2) by scale factor 0.427845\nI1207 13:22:46.809650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.8334 > 2) by scale factor 0.413787\nI1207 13:22:47.754751   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32588 > 2) by scale factor 0.85989\nI1207 13:22:48.698716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89335 > 2) by scale factor 0.513696\nI1207 13:22:49.642809   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78709 > 2) by scale factor 0.717593\nI1207 13:22:50.586863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77647 > 2) by scale factor 0.529595\nI1207 13:22:50.598889   369 solver.cpp:337] Iteration 19000, Testing net (#0)\nI1207 13:23:43.550318   369 solver.cpp:404]     Test net output #0: accuracy = 0.1244\nI1207 13:23:43.550968   369 solver.cpp:404]     Test net output #1: loss = 31.2972 (* 1 = 31.2972 loss)\nI1207 13:23:44.421962   369 solver.cpp:228] Iteration 19000, loss = 36.4638\nI1207 13:23:44.422015   369 solver.cpp:244]     Train net output #0: accuracy = 0.08\nI1207 13:23:44.422034   369 solver.cpp:244]     Train net output #1: loss = 36.4638 (* 1 = 36.4638 loss)\nI1207 13:23:44.495643   369 sgd_solver.cpp:166] Iteration 19000, lr = 2.85\nI1207 13:23:44.505894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95474 > 2) by scale factor 0.403654\nI1207 13:23:45.443068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53247 > 2) by scale factor 0.44126\nI1207 13:23:46.379912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0714 > 2) by scale factor 0.491231\nI1207 13:23:47.317230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10665 > 2) by scale factor 0.487015\nI1207 13:23:48.254515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06872 > 2) by scale factor 0.651738\nI1207 13:23:49.191164   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65834 > 2) by scale factor 0.546696\nI1207 13:23:50.128540   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11995 > 2) by scale factor 0.641036\nI1207 13:23:51.065364   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36683 > 2) by scale factor 0.457998\nI1207 13:23:52.002138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21291 > 2) by scale factor 0.622489\nI1207 13:23:52.939106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45684 > 2) by scale factor 0.814054\nI1207 13:23:53.876114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09594 > 2) by scale factor 0.646008\nI1207 13:23:54.812889   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92273 > 2) by scale factor 0.684291\nI1207 13:23:55.749696   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21515 > 2) by scale factor 0.474479\nI1207 13:23:56.686462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68127 > 2) by scale factor 0.543291\nI1207 13:23:57.623802   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46767 > 2) by scale factor 0.44766\nI1207 13:23:58.561317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16494 > 2) by scale factor 0.631923\nI1207 13:23:59.497982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3244 > 2) by scale factor 0.601613\nI1207 13:24:00.435431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41651 > 2) by scale factor 0.585393\nI1207 13:24:01.372748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6954 > 2) by scale factor 0.541213\nI1207 13:24:02.309924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73469 > 2) by scale factor 0.422414\nI1207 13:24:03.247141   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25439 > 2) by scale factor 0.470102\nI1207 13:24:04.184077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35632 > 2) by scale factor 0.595891\nI1207 13:24:05.121402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70224 > 2) by scale factor 0.425329\nI1207 13:24:06.058497   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32527 > 2) by scale factor 0.601456\nI1207 13:24:06.995398   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38294 > 2) by scale factor 0.591202\nI1207 13:24:07.932561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43945 > 2) by scale factor 0.581488\nI1207 13:24:08.869340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75382 > 2) by scale factor 0.726263\nI1207 13:24:09.806354   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91658 > 2) by scale factor 0.685735\nI1207 13:24:10.743005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42911 > 2) by scale factor 0.583242\nI1207 13:24:11.679795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52557 > 2) by scale factor 0.567284\nI1207 13:24:12.616489   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40838 > 2) by scale factor 0.453682\nI1207 13:24:13.559267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67511 > 2) by scale factor 0.747634\nI1207 13:24:14.502869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61083 > 2) by scale factor 0.553889\nI1207 13:24:15.445291   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79102 > 2) by scale factor 0.527562\nI1207 13:24:16.387914   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02107 > 2) by scale factor 0.662017\nI1207 13:24:17.330891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91295 > 2) by scale factor 0.511124\nI1207 13:24:18.273984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64028 > 2) by scale factor 0.431008\nI1207 13:24:19.216869   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33186 > 2) by scale factor 0.461696\nI1207 13:24:20.160092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5053 > 2) by scale factor 0.443921\nI1207 13:24:21.103196   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41684 > 2) by scale factor 0.452813\nI1207 13:24:22.046098   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99864 > 2) by scale factor 0.666969\nI1207 13:24:22.989173   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3865 > 2) by scale factor 0.455944\nI1207 13:24:23.932425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63571 > 2) by scale factor 0.758809\nI1207 13:24:24.875581   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85828 > 2) by scale factor 0.699722\nI1207 13:24:25.818615   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14053 > 2) by scale factor 0.636835\nI1207 13:24:26.761281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44786 > 2) by scale factor 0.58007\nI1207 13:24:27.704960   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72391 > 2) by scale factor 0.423378\nI1207 13:24:28.648942   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44561 > 2) by scale factor 0.449882\nI1207 13:24:29.592761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.961 > 2) by scale factor 0.675448\nI1207 13:24:30.536772   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9285 > 2) by scale factor 0.682943\nI1207 13:24:31.480108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5814 > 2) by scale factor 0.774775\nI1207 13:24:32.423984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81379 > 2) by scale factor 0.524413\nI1207 13:24:33.367924   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56677 > 2) by scale factor 0.779188\nI1207 13:24:34.311928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43733 > 2) by scale factor 0.581846\nI1207 13:24:35.255105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35068 > 2) by scale factor 0.459699\nI1207 13:24:36.197826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7866 > 2) by scale factor 0.717721\nI1207 13:24:37.140985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8501 > 2) by scale factor 0.519468\nI1207 13:24:38.083844   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48798 > 2) by scale factor 0.445635\nI1207 13:24:39.026773   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3357 > 2) by scale factor 0.461286\nI1207 13:24:39.969228   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21101 > 2) by scale factor 0.474945\nI1207 13:24:40.911780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55752 > 2) by scale factor 0.562189\nI1207 13:24:41.854650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7875 > 2) by scale factor 0.528053\nI1207 13:24:42.797734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2465 > 2) by scale factor 0.470976\nI1207 13:24:43.740432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73493 > 2) by scale factor 0.535485\nI1207 13:24:44.684445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.752 > 2) by scale factor 0.533049\nI1207 13:24:45.627233   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14668 > 2) by scale factor 0.482314\nI1207 13:24:46.570709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46856 > 2) by scale factor 0.810188\nI1207 13:24:47.513808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52422 > 2) by scale factor 0.792323\nI1207 13:24:48.456946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67586 > 2) by scale factor 0.544091\nI1207 13:24:49.400281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61885 > 2) by scale factor 0.763693\nI1207 13:24:50.342641   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2424 > 2) by scale factor 0.471431\nI1207 13:24:51.285513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13568 > 2) by scale factor 0.483597\nI1207 13:24:52.229216   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32628 > 2) by scale factor 0.462291\nI1207 13:24:53.173070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70627 > 2) by scale factor 0.739024\nI1207 13:24:54.116477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91957 > 2) by scale factor 0.685033\nI1207 13:24:55.059571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38973 > 2) by scale factor 0.836914\nI1207 13:24:56.003624   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29668 > 2) by scale factor 0.870824\nI1207 13:24:56.947336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81543 > 2) by scale factor 0.524188\nI1207 13:24:57.891335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95551 > 2) by scale factor 0.403591\nI1207 13:24:58.834974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50394 > 2) by scale factor 0.570786\nI1207 13:24:59.778682   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62414 > 2) by scale factor 0.432513\nI1207 13:25:00.722108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53962 > 2) by scale factor 0.565033\nI1207 13:25:01.665972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76055 > 2) by scale factor 0.531837\nI1207 13:25:02.609556   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30427 > 2) by scale factor 0.605278\nI1207 13:25:03.552954   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07273 > 2) by scale factor 0.650888\nI1207 13:25:04.496388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01628 > 2) by scale factor 0.497974\nI1207 13:25:05.440356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3918 > 2) by scale factor 0.455394\nI1207 13:25:06.384459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79017 > 2) by scale factor 0.527681\nI1207 13:25:07.328135   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94072 > 2) by scale factor 0.507522\nI1207 13:25:08.272045   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32482 > 2) by scale factor 0.860282\nI1207 13:25:09.215585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84138 > 2) by scale factor 0.520646\nI1207 13:25:10.159263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19619 > 2) by scale factor 0.625745\nI1207 13:25:11.103210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73147 > 2) by scale factor 0.535982\nI1207 13:25:12.046974   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8616 > 2) by scale factor 0.698909\nI1207 13:25:12.990665   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92815 > 2) by scale factor 0.683025\nI1207 13:25:13.933940   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83883 > 2) by scale factor 0.704517\nI1207 13:25:14.878082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96484 > 2) by scale factor 0.674572\nI1207 13:25:15.821542   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29154 > 2) by scale factor 0.607619\nI1207 13:25:16.765086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32316 > 2) by scale factor 0.462625\nI1207 13:25:17.708813   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4154 > 2) by scale factor 0.45296\nI1207 13:25:17.720791   369 solver.cpp:337] Iteration 19100, Testing net (#0)\nI1207 13:26:10.691807   369 solver.cpp:404]     Test net output #0: accuracy = 0.10685\nI1207 13:26:10.692430   369 solver.cpp:404]     Test net output #1: loss = 41.3014 (* 1 = 41.3014 loss)\nI1207 13:26:11.562918   369 solver.cpp:228] Iteration 19100, loss = 44.5899\nI1207 13:26:11.562978   369 solver.cpp:244]     Train net output #0: accuracy = 0.05\nI1207 13:26:11.562997   369 solver.cpp:244]     Train net output #1: loss = 44.5899 (* 1 = 44.5899 loss)\nI1207 13:26:11.645246   369 sgd_solver.cpp:166] Iteration 19100, lr = 2.865\nI1207 13:26:11.655374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37512 > 2) by scale factor 0.45713\nI1207 13:26:12.592386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76377 > 2) by scale factor 0.531382\nI1207 13:26:13.529325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65857 > 2) by scale factor 0.546662\nI1207 13:26:14.466332   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35389 > 2) by scale factor 0.459359\nI1207 13:26:15.403066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77322 > 2) by scale factor 0.721183\nI1207 13:26:16.339788   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75159 > 2) by scale factor 0.533108\nI1207 13:26:17.276767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09112 > 2) by scale factor 0.647015\nI1207 13:26:18.213763   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47216 > 2) by scale factor 0.447211\nI1207 13:26:19.150830   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58673 > 2) by scale factor 0.773176\nI1207 13:26:20.087358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96441 > 2) by scale factor 0.504489\nI1207 13:26:21.023450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20425 > 2) by scale factor 0.624171\nI1207 13:26:21.960491   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94987 > 2) by scale factor 0.677996\nI1207 13:26:22.897255   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7053 > 2) by scale factor 0.73929\nI1207 13:26:23.834818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2475 > 2) by scale factor 0.615858\nI1207 13:26:24.771941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68181 > 2) by scale factor 0.427185\nI1207 13:26:25.709435   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96907 > 2) by scale factor 0.673612\nI1207 13:26:26.646448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8683 > 2) by scale factor 0.517023\nI1207 13:26:27.583557   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19352 > 2) by scale factor 0.626268\nI1207 13:26:28.520861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40996 > 2) by scale factor 0.586516\nI1207 13:26:29.457698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60607 > 2) by scale factor 0.767439\nI1207 13:26:30.394515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13849 > 2) by scale factor 0.63725\nI1207 13:26:31.331933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54051 > 2) by scale factor 0.440479\nI1207 13:26:32.269124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43174 > 2) by scale factor 0.45129\nI1207 13:26:33.205698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15982 > 2) by scale factor 0.48079\nI1207 13:26:34.143205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14487 > 2) by scale factor 0.482524\nI1207 13:26:35.080626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01146 > 2) by scale factor 0.664129\nI1207 13:26:36.017513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04143 > 2) by scale factor 0.657586\nI1207 13:26:36.954782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04755 > 2) by scale factor 0.656265\nI1207 13:26:37.891816   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57914 > 2) by scale factor 0.558794\nI1207 13:26:38.828552   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22793 > 2) by scale factor 0.473044\nI1207 13:26:39.765689   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64263 > 2) by scale factor 0.756821\nI1207 13:26:40.702770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68476 > 2) by scale factor 0.542776\nI1207 13:26:41.640447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26677 > 2) by scale factor 0.882313\nI1207 13:26:42.577921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50506 > 2) by scale factor 0.443946\nI1207 13:26:43.515162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87575 > 2) by scale factor 0.69547\nI1207 13:26:44.456235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39091 > 2) by scale factor 0.455487\nI1207 13:26:45.399622   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13969 > 2) by scale factor 0.483128\nI1207 13:26:46.342808   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48746 > 2) by scale factor 0.573483\nI1207 13:26:47.285873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10532 > 2) by scale factor 0.644055\nI1207 13:26:48.229199   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8889 > 2) by scale factor 0.514284\nI1207 13:26:49.172863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96877 > 2) by scale factor 0.673679\nI1207 13:26:50.115973   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05853 > 2) by scale factor 0.492789\nI1207 13:26:51.059125   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65819 > 2) by scale factor 0.546718\nI1207 13:26:52.002423   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82395 > 2) by scale factor 0.523019\nI1207 13:26:52.945750   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75157 > 2) by scale factor 0.420914\nI1207 13:26:53.889158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00789 > 2) by scale factor 0.499015\nI1207 13:26:54.832470   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06013 > 2) by scale factor 0.492595\nI1207 13:26:55.776015   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8993 > 2) by scale factor 0.512912\nI1207 13:26:56.719365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25873 > 2) by scale factor 0.469624\nI1207 13:26:57.662643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11231 > 2) by scale factor 0.486345\nI1207 13:26:58.606340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47159 > 2) by scale factor 0.576105\nI1207 13:26:59.550005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87807 > 2) by scale factor 0.51572\nI1207 13:27:00.493646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30851 > 2) by scale factor 0.464198\nI1207 13:27:01.437058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16935 > 2) by scale factor 0.631044\nI1207 13:27:02.380569   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.495 > 2) by scale factor 0.572245\nI1207 13:27:03.324025   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32978 > 2) by scale factor 0.600641\nI1207 13:27:04.267140   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28593 > 2) by scale factor 0.466643\nI1207 13:27:05.210124   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61863 > 2) by scale factor 0.552695\nI1207 13:27:06.152811   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31858 > 2) by scale factor 0.602667\nI1207 13:27:07.096041   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25085 > 2) by scale factor 0.470494\nI1207 13:27:08.039691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41017 > 2) by scale factor 0.829818\nI1207 13:27:08.983230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46007 > 2) by scale factor 0.812987\nI1207 13:27:09.926565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20395 > 2) by scale factor 0.475743\nI1207 13:27:10.870193   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00786 > 2) by scale factor 0.664925\nI1207 13:27:11.814066   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09185 > 2) by scale factor 0.646861\nI1207 13:27:12.757630   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14983 > 2) by scale factor 0.634955\nI1207 13:27:13.701176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24044 > 2) by scale factor 0.892682\nI1207 13:27:14.644536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98096 > 2) by scale factor 0.502392\nI1207 13:27:15.587779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15377 > 2) by scale factor 0.634162\nI1207 13:27:16.530985   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86848 > 2) by scale factor 0.697232\nI1207 13:27:17.474611   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42867 > 2) by scale factor 0.451603\nI1207 13:27:18.417793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4641 > 2) by scale factor 0.57735\nI1207 13:27:19.360863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09905 > 2) by scale factor 0.645359\nI1207 13:27:20.303870   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12756 > 2) by scale factor 0.639476\nI1207 13:27:21.247472   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21412 > 2) by scale factor 0.474595\nI1207 13:27:22.191085   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39006 > 2) by scale factor 0.58996\nI1207 13:27:23.133929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34692 > 2) by scale factor 0.597564\nI1207 13:27:24.076958   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44806 > 2) by scale factor 0.816972\nI1207 13:27:25.019951   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25814 > 2) by scale factor 0.613847\nI1207 13:27:25.963137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39415 > 2) by scale factor 0.589249\nI1207 13:27:26.906126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.283 > 2) by scale factor 0.609198\nI1207 13:27:27.849333   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87879 > 2) by scale factor 0.515625\nI1207 13:27:28.791908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88319 > 2) by scale factor 0.515041\nI1207 13:27:29.735585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6129 > 2) by scale factor 0.433567\nI1207 13:27:30.679644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84029 > 2) by scale factor 0.704154\nI1207 13:27:31.623803   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44079 > 2) by scale factor 0.581262\nI1207 13:27:32.567467   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68198 > 2) by scale factor 0.543186\nI1207 13:27:33.511350   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78904 > 2) by scale factor 0.41762\nI1207 13:27:34.454831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07382 > 2) by scale factor 0.650657\nI1207 13:27:35.398413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61976 > 2) by scale factor 0.432923\nI1207 13:27:36.343008   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63498 > 2) by scale factor 0.550209\nI1207 13:27:37.286839   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90463 > 2) by scale factor 0.512212\nI1207 13:27:38.230695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.96978 > 2) by scale factor 0.402432\nI1207 13:27:39.174620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12357 > 2) by scale factor 0.485017\nI1207 13:27:40.118605   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43211 > 2) by scale factor 0.582733\nI1207 13:27:41.062149   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.30852 > 2) by scale factor 0.376753\nI1207 13:27:42.006158   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72083 > 2) by scale factor 0.423654\nI1207 13:27:42.949448   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90232 > 2) by scale factor 0.689104\nI1207 13:27:43.892321   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51922 > 2) by scale factor 0.442555\nI1207 13:27:44.835106   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22222 > 2) by scale factor 0.473684\nI1207 13:27:44.847100   369 solver.cpp:337] Iteration 19200, Testing net (#0)\nI1207 13:28:37.777081   369 solver.cpp:404]     Test net output #0: accuracy = 0.1695\nI1207 13:28:37.777740   369 solver.cpp:404]     Test net output #1: loss = 29.4594 (* 1 = 29.4594 loss)\nI1207 13:28:38.648191   369 solver.cpp:228] Iteration 19200, loss = 28.1904\nI1207 13:28:38.648234   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 13:28:38.648252   369 solver.cpp:244]     Train net output #1: loss = 28.1904 (* 1 = 28.1904 loss)\nI1207 13:28:38.728217   369 sgd_solver.cpp:166] Iteration 19200, lr = 2.88\nI1207 13:28:38.738411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25233 > 2) by scale factor 0.614943\nI1207 13:28:39.675459   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56439 > 2) by scale factor 0.561106\nI1207 13:28:40.612488   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97554 > 2) by scale factor 0.503076\nI1207 13:28:41.549577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37401 > 2) by scale factor 0.842455\nI1207 13:28:42.486752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32117 > 2) by scale factor 0.602198\nI1207 13:28:43.424000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14593 > 2) by scale factor 0.4824\nI1207 13:28:44.360764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.86862 > 2) by scale factor 0.410794\nI1207 13:28:45.297987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5954 > 2) by scale factor 0.770596\nI1207 13:28:46.235035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07865 > 2) by scale factor 0.649635\nI1207 13:28:47.171939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51208 > 2) by scale factor 0.569463\nI1207 13:28:48.117637   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44906 > 2) by scale factor 0.816641\nI1207 13:28:49.055893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79804 > 2) by scale factor 0.526588\nI1207 13:28:49.993376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31309 > 2) by scale factor 0.463704\nI1207 13:28:50.930801   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17077 > 2) by scale factor 0.479528\nI1207 13:28:51.868360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06581 > 2) by scale factor 0.491907\nI1207 13:28:52.805794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56366 > 2) by scale factor 0.780135\nI1207 13:28:53.743119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43995 > 2) by scale factor 0.819689\nI1207 13:28:54.680348   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28115 > 2) by scale factor 0.609542\nI1207 13:28:55.617266   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.515 > 2) by scale factor 0.568991\nI1207 13:28:56.554430   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87359 > 2) by scale factor 0.695994\nI1207 13:28:57.491780   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16675 > 2) by scale factor 0.631563\nI1207 13:28:58.428791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9144 > 2) by scale factor 0.510934\nI1207 13:28:59.366055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86474 > 2) by scale factor 0.517499\nI1207 13:29:00.303387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77937 > 2) by scale factor 0.719587\nI1207 13:29:01.240408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35881 > 2) by scale factor 0.458841\nI1207 13:29:02.177460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13117 > 2) by scale factor 0.638739\nI1207 13:29:03.114285   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77396 > 2) by scale factor 0.529947\nI1207 13:29:04.051789   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93034 > 2) by scale factor 0.682514\nI1207 13:29:04.988906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68355 > 2) by scale factor 0.745281\nI1207 13:29:05.925561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37644 > 2) by scale factor 0.456992\nI1207 13:29:06.863092   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35876 > 2) by scale factor 0.458846\nI1207 13:29:07.799979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98141 > 2) by scale factor 0.502335\nI1207 13:29:08.743522   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42637 > 2) by scale factor 0.583707\nI1207 13:29:09.686820   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18719 > 2) by scale factor 0.627511\nI1207 13:29:10.629982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47328 > 2) by scale factor 0.4471\nI1207 13:29:11.573014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32145 > 2) by scale factor 0.462808\nI1207 13:29:12.516162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01002 > 2) by scale factor 0.664448\nI1207 13:29:13.459060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33675 > 2) by scale factor 0.599386\nI1207 13:29:14.401896   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40945 > 2) by scale factor 0.586604\nI1207 13:29:15.345257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31054 > 2) by scale factor 0.604131\nI1207 13:29:16.288667   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.88821 > 2) by scale factor 0.409147\nI1207 13:29:17.231873   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58826 > 2) by scale factor 0.435895\nI1207 13:29:18.174862   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14551 > 2) by scale factor 0.482449\nI1207 13:29:19.118191   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47344 > 2) by scale factor 0.575799\nI1207 13:29:20.061357   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83001 > 2) by scale factor 0.522191\nI1207 13:29:21.004501   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.585 > 2) by scale factor 0.773693\nI1207 13:29:21.947576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37153 > 2) by scale factor 0.593203\nI1207 13:29:22.890082   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2459 > 2) by scale factor 0.471042\nI1207 13:29:23.832684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18133 > 2) by scale factor 0.628668\nI1207 13:29:24.775413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46969 > 2) by scale factor 0.576421\nI1207 13:29:25.718643   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9049 > 2) by scale factor 0.688492\nI1207 13:29:26.661223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53581 > 2) by scale factor 0.440936\nI1207 13:29:27.603806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15828 > 2) by scale factor 0.480968\nI1207 13:29:28.546838   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85415 > 2) by scale factor 0.518921\nI1207 13:29:29.489434   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34055 > 2) by scale factor 0.460772\nI1207 13:29:30.432163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50835 > 2) by scale factor 0.570068\nI1207 13:29:31.374807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83823 > 2) by scale factor 0.521073\nI1207 13:29:32.317503   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12421 > 2) by scale factor 0.640161\nI1207 13:29:33.260150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40217 > 2) by scale factor 0.58786\nI1207 13:29:34.202769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51053 > 2) by scale factor 0.569714\nI1207 13:29:35.145359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41956 > 2) by scale factor 0.452533\nI1207 13:29:36.087921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72607 > 2) by scale factor 0.536758\nI1207 13:29:37.031157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90584 > 2) by scale factor 0.512054\nI1207 13:29:37.973893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13767 > 2) by scale factor 0.483363\nI1207 13:29:38.917410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17033 > 2) by scale factor 0.479578\nI1207 13:29:39.860088   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08083 > 2) by scale factor 0.649176\nI1207 13:29:40.803083   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9818 > 2) by scale factor 0.502285\nI1207 13:29:41.745829   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75732 > 2) by scale factor 0.532294\nI1207 13:29:42.688650   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61352 > 2) by scale factor 0.765251\nI1207 13:29:43.631798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94202 > 2) by scale factor 0.679805\nI1207 13:29:44.574497   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8954 > 2) by scale factor 0.513427\nI1207 13:29:45.517293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.498 > 2) by scale factor 0.571756\nI1207 13:29:46.460700   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15203 > 2) by scale factor 0.481692\nI1207 13:29:47.403684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03925 > 2) by scale factor 0.658057\nI1207 13:29:48.346640   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53981 > 2) by scale factor 0.565002\nI1207 13:29:49.289644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5144 > 2) by scale factor 0.443027\nI1207 13:29:50.232419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90238 > 2) by scale factor 0.689089\nI1207 13:29:51.175393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35124 > 2) by scale factor 0.459639\nI1207 13:29:52.118690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87377 > 2) by scale factor 0.516294\nI1207 13:29:53.061805   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83227 > 2) by scale factor 0.521883\nI1207 13:29:54.005012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77606 > 2) by scale factor 0.529653\nI1207 13:29:54.947685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76754 > 2) by scale factor 0.722663\nI1207 13:29:55.890619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84342 > 2) by scale factor 0.703379\nI1207 13:29:56.833510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66084 > 2) by scale factor 0.751641\nI1207 13:29:57.776371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29697 > 2) by scale factor 0.465444\nI1207 13:29:58.719431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8004 > 2) by scale factor 0.52626\nI1207 13:29:59.662159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39737 > 2) by scale factor 0.588691\nI1207 13:30:00.605175   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8108 > 2) by scale factor 0.524824\nI1207 13:30:01.548405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36393 > 2) by scale factor 0.846047\nI1207 13:30:02.491444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38522 > 2) by scale factor 0.590804\nI1207 13:30:03.434515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33821 > 2) by scale factor 0.599124\nI1207 13:30:04.377383   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3178 > 2) by scale factor 0.463199\nI1207 13:30:05.320684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59266 > 2) by scale factor 0.556691\nI1207 13:30:06.263386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72999 > 2) by scale factor 0.536195\nI1207 13:30:07.206159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87012 > 2) by scale factor 0.696835\nI1207 13:30:08.149235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0099 > 2) by scale factor 0.664473\nI1207 13:30:09.092933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88084 > 2) by scale factor 0.694241\nI1207 13:30:10.035972   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21122 > 2) by scale factor 0.90448\nI1207 13:30:10.978767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39697 > 2) by scale factor 0.834388\nI1207 13:30:11.921319   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11007 > 2) by scale factor 0.643072\nI1207 13:30:11.933212   369 solver.cpp:337] Iteration 19300, Testing net (#0)\nI1207 13:31:04.908713   369 solver.cpp:404]     Test net output #0: accuracy = 0.16605\nI1207 13:31:04.909358   369 solver.cpp:404]     Test net output #1: loss = 20.8021 (* 1 = 20.8021 loss)\nI1207 13:31:05.779614   369 solver.cpp:228] Iteration 19300, loss = 20.5394\nI1207 13:31:05.779670   369 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1207 13:31:05.779695   369 solver.cpp:244]     Train net output #1: loss = 20.5394 (* 1 = 20.5394 loss)\nI1207 13:31:05.855770   369 sgd_solver.cpp:166] Iteration 19300, lr = 2.895\nI1207 13:31:05.865969   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15975 > 2) by scale factor 0.632962\nI1207 13:31:06.802609   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45094 > 2) by scale factor 0.579552\nI1207 13:31:07.739876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83226 > 2) by scale factor 0.706149\nI1207 13:31:08.676837   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00527 > 2) by scale factor 0.665497\nI1207 13:31:09.613807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17585 > 2) by scale factor 0.478945\nI1207 13:31:10.550520   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72588 > 2) by scale factor 0.733709\nI1207 13:31:11.487686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53905 > 2) by scale factor 0.440621\nI1207 13:31:12.424639   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07925 > 2) by scale factor 0.649509\nI1207 13:31:13.361099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38186 > 2) by scale factor 0.456427\nI1207 13:31:14.298166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86237 > 2) by scale factor 0.517816\nI1207 13:31:15.235590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85452 > 2) by scale factor 0.700643\nI1207 13:31:16.172219   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55328 > 2) by scale factor 0.439244\nI1207 13:31:17.109413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62296 > 2) by scale factor 0.762498\nI1207 13:31:18.046764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77866 > 2) by scale factor 0.529289\nI1207 13:31:18.983479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85865 > 2) by scale factor 0.518316\nI1207 13:31:19.920621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65607 > 2) by scale factor 0.547035\nI1207 13:31:20.858012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23412 > 2) by scale factor 0.472353\nI1207 13:31:21.795074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.593 > 2) by scale factor 0.435446\nI1207 13:31:22.732405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08333 > 2) by scale factor 0.64865\nI1207 13:31:23.669697   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29778 > 2) by scale factor 0.465357\nI1207 13:31:24.606663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64697 > 2) by scale factor 0.548401\nI1207 13:31:25.544174   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79183 > 2) by scale factor 0.527449\nI1207 13:31:26.481606   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20137 > 2) by scale factor 0.476035\nI1207 13:31:27.418207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46902 > 2) by scale factor 0.576532\nI1207 13:31:28.355481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24901 > 2) by scale factor 0.470698\nI1207 13:31:29.292654   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10888 > 2) by scale factor 0.643318\nI1207 13:31:30.229982   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07141 > 2) by scale factor 0.651166\nI1207 13:31:31.166941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95293 > 2) by scale factor 0.505954\nI1207 13:31:32.104012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55696 > 2) by scale factor 0.562278\nI1207 13:31:33.040943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1236 > 2) by scale factor 0.485013\nI1207 13:31:33.978119   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85879 > 2) by scale factor 0.518298\nI1207 13:31:34.920646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51324 > 2) by scale factor 0.569276\nI1207 13:31:35.863953   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59581 > 2) by scale factor 0.435179\nI1207 13:31:36.806910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99623 > 2) by scale factor 0.667505\nI1207 13:31:37.749269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87596 > 2) by scale factor 0.516002\nI1207 13:31:38.691735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05302 > 2) by scale factor 0.49346\nI1207 13:31:39.634268   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16883 > 2) by scale factor 0.479751\nI1207 13:31:40.577544   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16919 > 2) by scale factor 0.631076\nI1207 13:31:41.520262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19052 > 2) by scale factor 0.626857\nI1207 13:31:42.462903   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26588 > 2) by scale factor 0.468836\nI1207 13:31:43.405735   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89576 > 2) by scale factor 0.513378\nI1207 13:31:44.348508   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91408 > 2) by scale factor 0.686322\nI1207 13:31:45.290997   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12512 > 2) by scale factor 0.639976\nI1207 13:31:46.234036   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71495 > 2) by scale factor 0.538365\nI1207 13:31:47.176826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54164 > 2) by scale factor 0.440369\nI1207 13:31:48.119679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53441 > 2) by scale factor 0.441071\nI1207 13:31:49.062546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8975 > 2) by scale factor 0.513149\nI1207 13:31:50.005686   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41674 > 2) by scale factor 0.452823\nI1207 13:31:50.948375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00466 > 2) by scale factor 0.665633\nI1207 13:31:51.891147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45045 > 2) by scale factor 0.816176\nI1207 13:31:52.833565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45255 > 2) by scale factor 0.579282\nI1207 13:31:53.775944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80324 > 2) by scale factor 0.416386\nI1207 13:31:54.718528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58199 > 2) by scale factor 0.55835\nI1207 13:31:55.661056   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91007 > 2) by scale factor 0.687268\nI1207 13:31:56.603672   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68887 > 2) by scale factor 0.743806\nI1207 13:31:57.545876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55273 > 2) by scale factor 0.439297\nI1207 13:31:58.488818   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80707 > 2) by scale factor 0.525339\nI1207 13:31:59.431465   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01738 > 2) by scale factor 0.497837\nI1207 13:32:00.374161   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89462 > 2) by scale factor 0.690938\nI1207 13:32:01.316812   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26901 > 2) by scale factor 0.468493\nI1207 13:32:02.259055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13728 > 2) by scale factor 0.483409\nI1207 13:32:03.200917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44831 > 2) by scale factor 0.449609\nI1207 13:32:04.143806   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44839 > 2) by scale factor 0.4496\nI1207 13:32:05.086518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91523 > 2) by scale factor 0.510826\nI1207 13:32:06.029724   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94062 > 2) by scale factor 0.680129\nI1207 13:32:06.972375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11196 > 2) by scale factor 0.642682\nI1207 13:32:07.914852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71448 > 2) by scale factor 0.538433\nI1207 13:32:08.857861   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21211 > 2) by scale factor 0.622644\nI1207 13:32:09.800299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12199 > 2) by scale factor 0.485203\nI1207 13:32:10.743378   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72012 > 2) by scale factor 0.537618\nI1207 13:32:11.686403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44156 > 2) by scale factor 0.819148\nI1207 13:32:12.629217   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30132 > 2) by scale factor 0.605818\nI1207 13:32:13.572134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14084 > 2) by scale factor 0.636772\nI1207 13:32:14.514571   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91932 > 2) by scale factor 0.510292\nI1207 13:32:15.457325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00198 > 2) by scale factor 0.499752\nI1207 13:32:16.400044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43526 > 2) by scale factor 0.450932\nI1207 13:32:17.342577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3986 > 2) by scale factor 0.588478\nI1207 13:32:18.284939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18235 > 2) by scale factor 0.478199\nI1207 13:32:19.227656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28762 > 2) by scale factor 0.466459\nI1207 13:32:20.170553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15773 > 2) by scale factor 0.633366\nI1207 13:32:21.112948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91688 > 2) by scale factor 0.685663\nI1207 13:32:22.055554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35781 > 2) by scale factor 0.848245\nI1207 13:32:22.997824   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53386 > 2) by scale factor 0.565954\nI1207 13:32:23.940021   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65308 > 2) by scale factor 0.547483\nI1207 13:32:24.882738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88089 > 2) by scale factor 0.515346\nI1207 13:32:25.825513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16296 > 2) by scale factor 0.480428\nI1207 13:32:26.768074   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51704 > 2) by scale factor 0.442768\nI1207 13:32:27.710481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09583 > 2) by scale factor 0.488301\nI1207 13:32:28.652995   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47603 > 2) by scale factor 0.446824\nI1207 13:32:29.595590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67283 > 2) by scale factor 0.748271\nI1207 13:32:30.537767   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.007 > 2) by scale factor 0.499127\nI1207 13:32:31.480108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74374 > 2) by scale factor 0.534224\nI1207 13:32:32.422258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2094 > 2) by scale factor 0.475127\nI1207 13:32:33.364553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78978 > 2) by scale factor 0.527735\nI1207 13:32:34.307134   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65855 > 2) by scale factor 0.546665\nI1207 13:32:35.249881   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62532 > 2) by scale factor 0.551676\nI1207 13:32:36.192587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43018 > 2) by scale factor 0.451449\nI1207 13:32:37.135138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71451 > 2) by scale factor 0.424222\nI1207 13:32:38.077621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7353 > 2) by scale factor 0.535432\nI1207 13:32:39.020656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6125 > 2) by scale factor 0.553634\nI1207 13:32:39.032667   369 solver.cpp:337] Iteration 19400, Testing net (#0)\nI1207 13:33:32.002490   369 solver.cpp:404]     Test net output #0: accuracy = 0.1212\nI1207 13:33:32.003150   369 solver.cpp:404]     Test net output #1: loss = 14.9888 (* 1 = 14.9888 loss)\nI1207 13:33:32.873549   369 solver.cpp:228] Iteration 19400, loss = 16.544\nI1207 13:33:32.873606   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 13:33:32.873625   369 solver.cpp:244]     Train net output #1: loss = 16.544 (* 1 = 16.544 loss)\nI1207 13:33:32.950886   369 sgd_solver.cpp:166] Iteration 19400, lr = 2.91\nI1207 13:33:32.961022   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15866 > 2) by scale factor 0.63318\nI1207 13:33:33.898170   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37044 > 2) by scale factor 0.843725\nI1207 13:33:34.835494   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05653 > 2) by scale factor 0.654336\nI1207 13:33:35.773176   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58166 > 2) by scale factor 0.436523\nI1207 13:33:36.710342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46049 > 2) by scale factor 0.577953\nI1207 13:33:37.647826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09046 > 2) by scale factor 0.488942\nI1207 13:33:38.585047   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95139 > 2) by scale factor 0.677646\nI1207 13:33:39.522146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73037 > 2) by scale factor 0.732501\nI1207 13:33:40.459373   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76821 > 2) by scale factor 0.419444\nI1207 13:33:41.396504   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54101 > 2) by scale factor 0.44043\nI1207 13:33:42.333509   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57623 > 2) by scale factor 0.437041\nI1207 13:33:43.270529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02097 > 2) by scale factor 0.497392\nI1207 13:33:44.207342   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25174 > 2) by scale factor 0.615056\nI1207 13:33:45.144114   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06049 > 2) by scale factor 0.653491\nI1207 13:33:46.080927   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14803 > 2) by scale factor 0.931084\nI1207 13:33:47.017949   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94448 > 2) by scale factor 0.507038\nI1207 13:33:47.954962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5731 > 2) by scale factor 0.559738\nI1207 13:33:48.892279   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06511 > 2) by scale factor 0.652505\nI1207 13:33:49.829179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68441 > 2) by scale factor 0.745042\nI1207 13:33:50.766248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1112 > 2) by scale factor 0.486476\nI1207 13:33:51.703330   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08933 > 2) by scale factor 0.489077\nI1207 13:33:52.640559   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98084 > 2) by scale factor 0.502407\nI1207 13:33:53.577281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94292 > 2) by scale factor 0.507238\nI1207 13:33:54.513943   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31698 > 2) by scale factor 0.463287\nI1207 13:33:55.451444   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34123 > 2) by scale factor 0.460699\nI1207 13:33:56.388738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85217 > 2) by scale factor 0.519188\nI1207 13:33:57.325872   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20751 > 2) by scale factor 0.623538\nI1207 13:33:58.263280   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50594 > 2) by scale factor 0.798104\nI1207 13:33:59.200278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38398 > 2) by scale factor 0.456207\nI1207 13:34:00.137182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4933 > 2) by scale factor 0.572525\nI1207 13:34:01.074153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47326 > 2) by scale factor 0.575827\nI1207 13:34:02.011297   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3408 > 2) by scale factor 0.460745\nI1207 13:34:02.948726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44011 > 2) by scale factor 0.581377\nI1207 13:34:03.889107   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65015 > 2) by scale factor 0.430094\nI1207 13:34:04.831550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69047 > 2) by scale factor 0.426396\nI1207 13:34:05.773725   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55672 > 2) by scale factor 0.562316\nI1207 13:34:06.718224   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24766 > 2) by scale factor 0.470847\nI1207 13:34:07.672761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.90023 > 2) by scale factor 0.408144\nI1207 13:34:08.615005   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84431 > 2) by scale factor 0.520249\nI1207 13:34:09.557795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77081 > 2) by scale factor 0.53039\nI1207 13:34:10.500303   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28153 > 2) by scale factor 0.609472\nI1207 13:34:11.443094   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02164 > 2) by scale factor 0.661893\nI1207 13:34:12.385720   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68058 > 2) by scale factor 0.746108\nI1207 13:34:13.328454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33116 > 2) by scale factor 0.857943\nI1207 13:34:14.271143   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68803 > 2) by scale factor 0.542295\nI1207 13:34:15.213738   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27265 > 2) by scale factor 0.611125\nI1207 13:34:16.156464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63128 > 2) by scale factor 0.760086\nI1207 13:34:17.098930   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6046 > 2) by scale factor 0.434348\nI1207 13:34:18.040966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85328 > 2) by scale factor 0.519038\nI1207 13:34:18.983546   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18548 > 2) by scale factor 0.627848\nI1207 13:34:19.926376   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45654 > 2) by scale factor 0.578613\nI1207 13:34:20.869006   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13731 > 2) by scale factor 0.637488\nI1207 13:34:21.811709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20278 > 2) by scale factor 0.475875\nI1207 13:34:22.754293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37082 > 2) by scale factor 0.45758\nI1207 13:34:23.697055   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70718 > 2) by scale factor 0.539493\nI1207 13:34:24.639519   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79516 > 2) by scale factor 0.417087\nI1207 13:34:25.582265   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24117 > 2) by scale factor 0.617062\nI1207 13:34:26.524576   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61678 > 2) by scale factor 0.433202\nI1207 13:34:27.467000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26918 > 2) by scale factor 0.611775\nI1207 13:34:28.409657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82802 > 2) by scale factor 0.522463\nI1207 13:34:29.352308   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36896 > 2) by scale factor 0.593656\nI1207 13:34:30.294759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63949 > 2) by scale factor 0.757723\nI1207 13:34:31.237248   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48958 > 2) by scale factor 0.573134\nI1207 13:34:32.179325   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91157 > 2) by scale factor 0.686915\nI1207 13:34:33.122771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30851 > 2) by scale factor 0.604502\nI1207 13:34:34.065690   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26232 > 2) by scale factor 0.61306\nI1207 13:34:35.008260   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03297 > 2) by scale factor 0.495912\nI1207 13:34:35.950984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33737 > 2) by scale factor 0.855663\nI1207 13:34:36.893708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10051 > 2) by scale factor 0.95215\nI1207 13:34:37.836257   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32795 > 2) by scale factor 0.462113\nI1207 13:34:38.778627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15432 > 2) by scale factor 0.481427\nI1207 13:34:39.720865   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97231 > 2) by scale factor 0.503485\nI1207 13:34:40.663020   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07821 > 2) by scale factor 0.490411\nI1207 13:34:41.605226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3482 > 2) by scale factor 0.597336\nI1207 13:34:42.547324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75495 > 2) by scale factor 0.53263\nI1207 13:34:43.490160   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23897 > 2) by scale factor 0.471812\nI1207 13:34:44.432621   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49265 > 2) by scale factor 0.572631\nI1207 13:34:45.374553   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6867 > 2) by scale factor 0.54249\nI1207 13:34:46.316948   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50768 > 2) by scale factor 0.443687\nI1207 13:34:47.259402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07594 > 2) by scale factor 0.650208\nI1207 13:34:48.201755   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47495 > 2) by scale factor 0.575548\nI1207 13:34:49.143841   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39567 > 2) by scale factor 0.588985\nI1207 13:34:50.086179   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37571 > 2) by scale factor 0.592467\nI1207 13:34:51.028659   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08783 > 2) by scale factor 0.647704\nI1207 13:34:51.970917   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62343 > 2) by scale factor 0.762359\nI1207 13:34:52.912734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.232 > 2) by scale factor 0.618813\nI1207 13:34:53.854652   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27807 > 2) by scale factor 0.4675\nI1207 13:34:54.796908   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85483 > 2) by scale factor 0.700567\nI1207 13:34:55.739123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63603 > 2) by scale factor 0.55005\nI1207 13:34:56.681207   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62198 > 2) by scale factor 0.762782\nI1207 13:34:57.623302   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9204 > 2) by scale factor 0.684837\nI1207 13:34:58.565551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18021 > 2) by scale factor 0.62889\nI1207 13:34:59.507728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45765 > 2) by scale factor 0.578428\nI1207 13:35:00.450202   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6986 > 2) by scale factor 0.540746\nI1207 13:35:01.392153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16364 > 2) by scale factor 0.632183\nI1207 13:35:02.333768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94143 > 2) by scale factor 0.50743\nI1207 13:35:03.276691   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14262 > 2) by scale factor 0.636413\nI1207 13:35:04.218513   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90578 > 2) by scale factor 0.512061\nI1207 13:35:05.160967   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76314 > 2) by scale factor 0.531471\nI1207 13:35:06.103070   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56918 > 2) by scale factor 0.778459\nI1207 13:35:06.114986   369 solver.cpp:337] Iteration 19500, Testing net (#0)\nI1207 13:35:59.089994   369 solver.cpp:404]     Test net output #0: accuracy = 0.1733\nI1207 13:35:59.090642   369 solver.cpp:404]     Test net output #1: loss = 13.0061 (* 1 = 13.0061 loss)\nI1207 13:35:59.961467   369 solver.cpp:228] Iteration 19500, loss = 12.8934\nI1207 13:35:59.961525   369 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1207 13:35:59.961545   369 solver.cpp:244]     Train net output #1: loss = 12.8934 (* 1 = 12.8934 loss)\nI1207 13:36:00.035014   369 sgd_solver.cpp:166] Iteration 19500, lr = 2.925\nI1207 13:36:00.045168   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23615 > 2) by scale factor 0.894396\nI1207 13:36:00.982278   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96807 > 2) by scale factor 0.67384\nI1207 13:36:01.919277   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39243 > 2) by scale factor 0.835972\nI1207 13:36:02.856210   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43861 > 2) by scale factor 0.581631\nI1207 13:36:03.793551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8872 > 2) by scale factor 0.692712\nI1207 13:36:04.730264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11448 > 2) by scale factor 0.486089\nI1207 13:36:05.667387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64805 > 2) by scale factor 0.430288\nI1207 13:36:06.604532   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88081 > 2) by scale factor 0.694248\nI1207 13:36:07.541946   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96251 > 2) by scale factor 0.504731\nI1207 13:36:08.479089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11047 > 2) by scale factor 0.486563\nI1207 13:36:09.416481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21002 > 2) by scale factor 0.623048\nI1207 13:36:10.353695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83403 > 2) by scale factor 0.705708\nI1207 13:36:11.290911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5292 > 2) by scale factor 0.790763\nI1207 13:36:12.228235   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67161 > 2) by scale factor 0.428118\nI1207 13:36:13.165345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65048 > 2) by scale factor 0.547873\nI1207 13:36:14.102370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93215 > 2) by scale factor 0.508628\nI1207 13:36:15.039499   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.39416 > 2) by scale factor 0.370771\nI1207 13:36:15.976244   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09204 > 2) by scale factor 0.646822\nI1207 13:36:16.913288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25457 > 2) by scale factor 0.61452\nI1207 13:36:17.850016   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08705 > 2) by scale factor 0.393155\nI1207 13:36:18.786742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50531 > 2) by scale factor 0.443921\nI1207 13:36:19.723425   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27521 > 2) by scale factor 0.610647\nI1207 13:36:20.660528   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52004 > 2) by scale factor 0.442474\nI1207 13:36:21.597548   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68024 > 2) by scale factor 0.427329\nI1207 13:36:22.534646   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32307 > 2) by scale factor 0.462634\nI1207 13:36:23.471999   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42007 > 2) by scale factor 0.584783\nI1207 13:36:24.409132   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8832 > 2) by scale factor 0.51504\nI1207 13:36:25.345966   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63874 > 2) by scale factor 0.431152\nI1207 13:36:26.283138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66318 > 2) by scale factor 0.545974\nI1207 13:36:27.220432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33172 > 2) by scale factor 0.60029\nI1207 13:36:28.157649   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37309 > 2) by scale factor 0.592928\nI1207 13:36:29.094768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32238 > 2) by scale factor 0.462708\nI1207 13:36:30.032495   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98339 > 2) by scale factor 0.670377\nI1207 13:36:30.969287   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54523 > 2) by scale factor 0.564138\nI1207 13:36:33.777262   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56461 > 2) by scale factor 0.779845\nI1207 13:36:34.720077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35562 > 2) by scale factor 0.596016\nI1207 13:36:35.663365   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29382 > 2) by scale factor 0.607197\nI1207 13:36:36.606165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26253 > 2) by scale factor 0.613022\nI1207 13:36:37.549247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39299 > 2) by scale factor 0.45527\nI1207 13:36:38.491834   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95795 > 2) by scale factor 0.676143\nI1207 13:36:39.434931   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68274 > 2) by scale factor 0.543073\nI1207 13:36:40.377852   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3214 > 2) by scale factor 0.462813\nI1207 13:36:41.321146   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68618 > 2) by scale factor 0.542567\nI1207 13:36:42.264128   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80286 > 2) by scale factor 0.52592\nI1207 13:36:43.206288   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30735 > 2) by scale factor 0.464323\nI1207 13:36:44.149060   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19202 > 2) by scale factor 0.626562\nI1207 13:36:45.091477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77257 > 2) by scale factor 0.721352\nI1207 13:36:46.034049   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3317 > 2) by scale factor 0.461713\nI1207 13:36:46.976868   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99751 > 2) by scale factor 0.66722\nI1207 13:36:47.919515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80619 > 2) by scale factor 0.712711\nI1207 13:36:48.862324   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12333 > 2) by scale factor 0.485045\nI1207 13:36:49.805001   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16203 > 2) by scale factor 0.632504\nI1207 13:36:50.747206   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15749 > 2) by scale factor 0.633414\nI1207 13:36:51.689512   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55534 > 2) by scale factor 0.782675\nI1207 13:36:53.572051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70558 > 2) by scale factor 0.539726\nI1207 13:36:54.514698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68231 > 2) by scale factor 0.745625\nI1207 13:36:55.457353   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84624 > 2) by scale factor 0.702683\nI1207 13:36:56.399786   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42716 > 2) by scale factor 0.824009\nI1207 13:36:57.342360   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34831 > 2) by scale factor 0.597316\nI1207 13:36:58.285127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11115 > 2) by scale factor 0.486482\nI1207 13:36:59.227432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61306 > 2) by scale factor 0.433551\nI1207 13:37:00.170711   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72588 > 2) by scale factor 0.733709\nI1207 13:37:01.113112   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43517 > 2) by scale factor 0.582212\nI1207 13:37:02.055734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06243 > 2) by scale factor 0.653077\nI1207 13:37:02.998733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87863 > 2) by scale factor 0.694776\nI1207 13:37:03.941064   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24803 > 2) by scale factor 0.470806\nI1207 13:37:04.883080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28834 > 2) by scale factor 0.466381\nI1207 13:37:05.825979   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81626 > 2) by scale factor 0.710161\nI1207 13:37:06.768563   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02795 > 2) by scale factor 0.660513\nI1207 13:37:07.711258   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29277 > 2) by scale factor 0.4659\nI1207 13:37:08.653636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57248 > 2) by scale factor 0.77746\nI1207 13:37:09.596046   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39029 > 2) by scale factor 0.58992\nI1207 13:37:10.538935   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45257 > 2) by scale factor 0.815471\nI1207 13:37:11.481144   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99011 > 2) by scale factor 0.668872\nI1207 13:37:12.423796   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38889 > 2) by scale factor 0.590164\nI1207 13:37:13.366293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16973 > 2) by scale factor 0.921775\nI1207 13:37:14.309101   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97968 > 2) by scale factor 0.671212\nI1207 13:37:15.251742   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71502 > 2) by scale factor 0.538354\nI1207 13:37:16.194864   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15471 > 2) by scale factor 0.633973\nI1207 13:37:17.137315   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5204 > 2) by scale factor 0.568118\nI1207 13:37:18.079768   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76739 > 2) by scale factor 0.530872\nI1207 13:37:19.022589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19571 > 2) by scale factor 0.625839\nI1207 13:37:19.964906   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1931 > 2) by scale factor 0.62635\nI1207 13:37:20.907533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26931 > 2) by scale factor 0.46846\nI1207 13:37:21.850011   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5887 > 2) by scale factor 0.557306\nI1207 13:37:22.792402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36691 > 2) by scale factor 0.45799\nI1207 13:37:23.734807   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30616 > 2) by scale factor 0.464451\nI1207 13:37:24.677263   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69063 > 2) by scale factor 0.541912\nI1207 13:37:25.618965   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15964 > 2) by scale factor 0.632984\nI1207 13:37:26.560474   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46926 > 2) by scale factor 0.447501\nI1207 13:37:27.502876   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35832 > 2) by scale factor 0.458892\nI1207 13:37:28.445222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16975 > 2) by scale factor 0.479645\nI1207 13:37:29.387733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69631 > 2) by scale factor 0.54108\nI1207 13:37:30.330929   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9501 > 2) by scale factor 0.506316\nI1207 13:37:31.273267   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41877 > 2) by scale factor 0.452615\nI1207 13:37:32.215662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56487 > 2) by scale factor 0.56103\nI1207 13:37:33.157464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18919 > 2) by scale factor 0.627119\nI1207 13:37:33.169494   369 solver.cpp:337] Iteration 19600, Testing net (#0)\nI1207 13:38:26.139607   369 solver.cpp:404]     Test net output #0: accuracy = 0.17155\nI1207 13:38:26.140245   369 solver.cpp:404]     Test net output #1: loss = 31.7593 (* 1 = 31.7593 loss)\nI1207 13:38:27.010829   369 solver.cpp:228] Iteration 19600, loss = 31.8054\nI1207 13:38:27.010886   369 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1207 13:38:27.010906   369 solver.cpp:244]     Train net output #1: loss = 31.8054 (* 1 = 31.8054 loss)\nI1207 13:38:27.087352   369 sgd_solver.cpp:166] Iteration 19600, lr = 2.94\nI1207 13:38:27.097209   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25182 > 2) by scale factor 0.615039\nI1207 13:38:28.033320   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06662 > 2) by scale factor 0.652184\nI1207 13:38:28.970409   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98934 > 2) by scale factor 0.669045\nI1207 13:38:29.907840   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23918 > 2) by scale factor 0.617439\nI1207 13:38:30.844962   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26005 > 2) by scale factor 0.884935\nI1207 13:38:31.781877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00132 > 2) by scale factor 0.499835\nI1207 13:38:32.719305   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93608 > 2) by scale factor 0.508119\nI1207 13:38:33.656460   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11526 > 2) by scale factor 0.485996\nI1207 13:38:34.593402   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63139 > 2) by scale factor 0.760055\nI1207 13:38:35.530200   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13265 > 2) by scale factor 0.483951\nI1207 13:38:36.466771   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95297 > 2) by scale factor 0.505948\nI1207 13:38:37.403551   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17353 > 2) by scale factor 0.630213\nI1207 13:38:38.340397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69164 > 2) by scale factor 0.541764\nI1207 13:38:39.276804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81828 > 2) by scale factor 0.709654\nI1207 13:38:40.213944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9126 > 2) by scale factor 0.511169\nI1207 13:38:41.151154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63116 > 2) by scale factor 0.550788\nI1207 13:38:42.087996   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36034 > 2) by scale factor 0.595177\nI1207 13:38:43.025338   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15951 > 2) by scale factor 0.480826\nI1207 13:38:43.962456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37693 > 2) by scale factor 0.456941\nI1207 13:38:44.899893   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30636 > 2) by scale factor 0.464429\nI1207 13:38:45.836973   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67122 > 2) by scale factor 0.544777\nI1207 13:38:46.774093   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99929 > 2) by scale factor 0.666825\nI1207 13:38:47.710937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4678 > 2) by scale factor 0.576735\nI1207 13:38:48.647186   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59642 > 2) by scale factor 0.435122\nI1207 13:38:49.584254   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53109 > 2) by scale factor 0.566398\nI1207 13:38:50.521550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9413 > 2) by scale factor 0.679972\nI1207 13:38:51.458597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97709 > 2) by scale factor 0.671798\nI1207 13:38:52.396073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90183 > 2) by scale factor 0.689221\nI1207 13:38:53.333230   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13883 > 2) by scale factor 0.63718\nI1207 13:38:54.270530   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83606 > 2) by scale factor 0.521369\nI1207 13:38:55.207978   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96348 > 2) by scale factor 0.674882\nI1207 13:38:56.145131   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53849 > 2) by scale factor 0.565213\nI1207 13:38:57.083205   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59762 > 2) by scale factor 0.555924\nI1207 13:38:58.019928   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51315 > 2) by scale factor 0.44315\nI1207 13:38:58.957226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46399 > 2) by scale factor 0.57737\nI1207 13:38:59.898478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34803 > 2) by scale factor 0.597367\nI1207 13:39:00.841411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56496 > 2) by scale factor 0.561016\nI1207 13:39:01.783798   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49451 > 2) by scale factor 0.444988\nI1207 13:39:02.725950   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03727 > 2) by scale factor 0.495385\nI1207 13:39:03.668450   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81468 > 2) by scale factor 0.71056\nI1207 13:39:04.610879   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88762 > 2) by scale factor 0.514454\nI1207 13:39:05.553171   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89506 > 2) by scale factor 0.690833\nI1207 13:39:06.495578   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44344 > 2) by scale factor 0.450102\nI1207 13:39:07.438478   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75003 > 2) by scale factor 0.533329\nI1207 13:39:08.381336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3142 > 2) by scale factor 0.463585\nI1207 13:39:09.323736   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08415 > 2) by scale factor 0.648477\nI1207 13:39:10.265518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18322 > 2) by scale factor 0.628295\nI1207 13:39:11.207625   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78241 > 2) by scale factor 0.528763\nI1207 13:39:12.149916   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35191 > 2) by scale factor 0.459568\nI1207 13:39:13.092118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56964 > 2) by scale factor 0.56028\nI1207 13:39:14.033984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08462 > 2) by scale factor 0.959406\nI1207 13:39:14.975388   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04839 > 2) by scale factor 0.656084\nI1207 13:39:15.917232   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47043 > 2) by scale factor 0.447384\nI1207 13:39:16.859326   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72167 > 2) by scale factor 0.734844\nI1207 13:39:17.801165   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08835 > 2) by scale factor 0.647595\nI1207 13:39:18.743067   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74279 > 2) by scale factor 0.729184\nI1207 13:39:19.684937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3409 > 2) by scale factor 0.460734\nI1207 13:39:20.627120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6629 > 2) by scale factor 0.751061\nI1207 13:39:21.569561   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28245 > 2) by scale factor 0.609301\nI1207 13:39:22.511340   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62602 > 2) by scale factor 0.761608\nI1207 13:39:23.453456   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12053 > 2) by scale factor 0.640916\nI1207 13:39:24.395483   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71882 > 2) by scale factor 0.423835\nI1207 13:39:25.337410   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89691 > 2) by scale factor 0.513228\nI1207 13:39:26.279518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07501 > 2) by scale factor 0.650404\nI1207 13:39:27.222122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78216 > 2) by scale factor 0.418221\nI1207 13:39:28.164264   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96056 > 2) by scale factor 0.675548\nI1207 13:39:29.106596   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80004 > 2) by scale factor 0.714274\nI1207 13:39:30.048619   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14535 > 2) by scale factor 0.635859\nI1207 13:39:30.989634   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70522 > 2) by scale factor 0.739311\nI1207 13:39:31.931310   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6741 > 2) by scale factor 0.54435\nI1207 13:39:32.873283   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82527 > 2) by scale factor 0.707897\nI1207 13:39:33.815307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16843 > 2) by scale factor 0.631228\nI1207 13:39:34.757565   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65836 > 2) by scale factor 0.429336\nI1207 13:39:35.699779   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98517 > 2) by scale factor 0.501861\nI1207 13:39:36.642002   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76799 > 2) by scale factor 0.530787\nI1207 13:39:37.583678   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29023 > 2) by scale factor 0.466175\nI1207 13:39:38.525560   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.98247 > 2) by scale factor 0.33431\nI1207 13:39:39.467550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48055 > 2) by scale factor 0.574622\nI1207 13:39:40.409137   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.64724 > 2) by scale factor 0.354155\nI1207 13:39:41.350863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.18111 > 2) by scale factor 0.386018\nI1207 13:39:42.292734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7784 > 2) by scale factor 0.529325\nI1207 13:39:43.234479   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83874 > 2) by scale factor 0.521004\nI1207 13:39:44.175854   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.79989 > 2) by scale factor 0.344834\nI1207 13:39:45.117662   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63082 > 2) by scale factor 0.431889\nI1207 13:39:46.059707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49609 > 2) by scale factor 0.444831\nI1207 13:39:47.001688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72801 > 2) by scale factor 0.423011\nI1207 13:39:47.943442   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.86394 > 2) by scale factor 0.41119\nI1207 13:39:48.885623   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38817 > 2) by scale factor 0.590288\nI1207 13:39:49.827939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88908 > 2) by scale factor 0.692261\nI1207 13:39:50.769680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00082 > 2) by scale factor 0.499898\nI1207 13:39:51.711405   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16257 > 2) by scale factor 0.480473\nI1207 13:39:52.653208   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58305 > 2) by scale factor 0.558184\nI1207 13:39:53.595480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18063 > 2) by scale factor 0.478396\nI1207 13:39:54.537685   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4415 > 2) by scale factor 0.81917\nI1207 13:39:55.479406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51852 > 2) by scale factor 0.56842\nI1207 13:39:56.420397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.032 > 2) by scale factor 0.397456\nI1207 13:39:57.363431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65341 > 2) by scale factor 0.429792\nI1207 13:39:58.304770   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95467 > 2) by scale factor 0.505731\nI1207 13:39:59.246937   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24157 > 2) by scale factor 0.471524\nI1207 13:40:00.188681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91491 > 2) by scale factor 0.686128\nI1207 13:40:00.200727   369 solver.cpp:337] Iteration 19700, Testing net (#0)\nI1207 13:40:52.981653   369 solver.cpp:404]     Test net output #0: accuracy = 0.1223\nI1207 13:40:52.982267   369 solver.cpp:404]     Test net output #1: loss = 27.0254 (* 1 = 27.0254 loss)\nI1207 13:40:53.852751   369 solver.cpp:228] Iteration 19700, loss = 26.1256\nI1207 13:40:53.852797   369 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1207 13:40:53.852814   369 solver.cpp:244]     Train net output #1: loss = 26.1256 (* 1 = 26.1256 loss)\nI1207 13:40:53.932642   369 sgd_solver.cpp:166] Iteration 19700, lr = 2.955\nI1207 13:40:53.942515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69137 > 2) by scale factor 0.426314\nI1207 13:40:54.879223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14633 > 2) by scale factor 0.482354\nI1207 13:40:55.816309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.96022 > 2) by scale factor 0.403208\nI1207 13:40:56.753638   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.488 > 2) by scale factor 0.445633\nI1207 13:40:57.690933   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41897 > 2) by scale factor 0.452594\nI1207 13:40:58.628481   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51296 > 2) by scale factor 0.443168\nI1207 13:40:59.565587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82779 > 2) by scale factor 0.707265\nI1207 13:41:00.502712   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34245 > 2) by scale factor 0.598363\nI1207 13:41:01.440587   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27402 > 2) by scale factor 0.61087\nI1207 13:41:02.377709   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62139 > 2) by scale factor 0.43277\nI1207 13:41:03.314882   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89058 > 2) by scale factor 0.691903\nI1207 13:41:04.252120   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42876 > 2) by scale factor 0.583301\nI1207 13:41:05.189247   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73973 > 2) by scale factor 0.534798\nI1207 13:41:06.126579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11649 > 2) by scale factor 0.48585\nI1207 13:41:07.063345   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3868 > 2) by scale factor 0.455913\nI1207 13:41:07.999826   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41975 > 2) by scale factor 0.826533\nI1207 13:41:08.937180   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89232 > 2) by scale factor 0.691487\nI1207 13:41:09.873909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94822 > 2) by scale factor 0.678375\nI1207 13:41:10.811050   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70132 > 2) by scale factor 0.74038\nI1207 13:41:11.747745   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41263 > 2) by scale factor 0.586059\nI1207 13:41:12.683856   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02293 > 2) by scale factor 0.66161\nI1207 13:41:13.620386   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51094 > 2) by scale factor 0.443366\nI1207 13:41:14.557413   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12311 > 2) by scale factor 0.640388\nI1207 13:41:15.493708   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71125 > 2) by scale factor 0.538902\nI1207 13:41:16.430307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30261 > 2) by scale factor 0.605581\nI1207 13:41:17.366793   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20578 > 2) by scale factor 0.623873\nI1207 13:41:18.303406   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97309 > 2) by scale factor 0.503386\nI1207 13:41:19.240229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91091 > 2) by scale factor 0.687071\nI1207 13:41:20.176445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99101 > 2) by scale factor 0.668671\nI1207 13:41:21.113663   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34404 > 2) by scale factor 0.598078\nI1207 13:41:22.050335   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97968 > 2) by scale factor 0.671213\nI1207 13:41:22.987510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26815 > 2) by scale factor 0.881775\nI1207 13:41:23.924764   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1194 > 2) by scale factor 0.485507\nI1207 13:41:24.862000   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94141 > 2) by scale factor 0.507433\nI1207 13:41:25.806293   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68806 > 2) by scale factor 0.542291\nI1207 13:41:26.751026   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64724 > 2) by scale factor 0.54836\nI1207 13:41:27.695370   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04642 > 2) by scale factor 0.656509\nI1207 13:41:28.639765   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13655 > 2) by scale factor 0.483494\nI1207 13:41:29.584126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39132 > 2) by scale factor 0.455444\nI1207 13:41:30.528389   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78835 > 2) by scale factor 0.41768\nI1207 13:41:31.472375   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28621 > 2) by scale factor 0.466612\nI1207 13:41:32.416620   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22105 > 2) by scale factor 0.620916\nI1207 13:41:33.361680   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7345 > 2) by scale factor 0.731395\nI1207 13:41:34.306529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57502 > 2) by scale factor 0.437157\nI1207 13:41:35.251538   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19905 > 2) by scale factor 0.625185\nI1207 13:41:36.196440   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62477 > 2) by scale factor 0.551759\nI1207 13:41:37.140941   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74825 > 2) by scale factor 0.421208\nI1207 13:41:38.084975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35663 > 2) by scale factor 0.595836\nI1207 13:41:39.029537   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24695 > 2) by scale factor 0.615963\nI1207 13:41:39.973987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46641 > 2) by scale factor 0.576966\nI1207 13:41:40.917716   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86856 > 2) by scale factor 0.697214\nI1207 13:41:41.861145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30806 > 2) by scale factor 0.604584\nI1207 13:41:42.805090   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95316 > 2) by scale factor 0.505924\nI1207 13:41:43.749150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82418 > 2) by scale factor 0.522988\nI1207 13:41:44.693030   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17239 > 2) by scale factor 0.630439\nI1207 13:41:45.636468   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29097 > 2) by scale factor 0.872991\nI1207 13:41:46.580731   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43137 > 2) by scale factor 0.582857\nI1207 13:41:47.524613   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4502 > 2) by scale factor 0.81626\nI1207 13:41:48.468590   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5458 > 2) by scale factor 0.439966\nI1207 13:41:49.412585   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13721 > 2) by scale factor 0.483417\nI1207 13:41:50.356431   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56222 > 2) by scale factor 0.438383\nI1207 13:41:51.300626   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63171 > 2) by scale factor 0.550705\nI1207 13:41:52.243970   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04005 > 2) by scale factor 0.657884\nI1207 13:41:53.187912   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39816 > 2) by scale factor 0.588554\nI1207 13:41:54.133003   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89904 > 2) by scale factor 0.689882\nI1207 13:41:55.077374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18141 > 2) by scale factor 0.628651\nI1207 13:41:56.021894   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71715 > 2) by scale factor 0.736065\nI1207 13:41:56.966403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39117 > 2) by scale factor 0.589767\nI1207 13:41:57.910681   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27302 > 2) by scale factor 0.611056\nI1207 13:41:58.855089   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26486 > 2) by scale factor 0.468948\nI1207 13:41:59.799129   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72009 > 2) by scale factor 0.537621\nI1207 13:42:00.743118   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74578 > 2) by scale factor 0.533934\nI1207 13:42:01.686359   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18706 > 2) by scale factor 0.627537\nI1207 13:42:02.630269   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76132 > 2) by scale factor 0.531729\nI1207 13:42:03.574127   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35701 > 2) by scale factor 0.595768\nI1207 13:42:04.518237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8813 > 2) by scale factor 0.515291\nI1207 13:42:05.461473   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75176 > 2) by scale factor 0.533083\nI1207 13:42:06.405432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34163 > 2) by scale factor 0.854106\nI1207 13:42:07.348909   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41427 > 2) by scale factor 0.585776\nI1207 13:42:08.292443   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18286 > 2) by scale factor 0.628366\nI1207 13:42:09.236397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89726 > 2) by scale factor 0.690307\nI1207 13:42:10.180147   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85098 > 2) by scale factor 0.701514\nI1207 13:42:11.123994   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2874 > 2) by scale factor 0.466484\nI1207 13:42:12.067404   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17691 > 2) by scale factor 0.629542\nI1207 13:42:13.011109   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39135 > 2) by scale factor 0.455441\nI1207 13:42:13.954746   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33682 > 2) by scale factor 0.461168\nI1207 13:42:14.898309   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01242 > 2) by scale factor 0.498452\nI1207 13:42:15.841536   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89523 > 2) by scale factor 0.690793\nI1207 13:42:16.785419   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15853 > 2) by scale factor 0.480939\nI1207 13:42:17.728799   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69307 > 2) by scale factor 0.541554\nI1207 13:42:18.672848   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20728 > 2) by scale factor 0.475367\nI1207 13:42:19.616510   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56529 > 2) by scale factor 0.77964\nI1207 13:42:20.560545   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52738 > 2) by scale factor 0.791333\nI1207 13:42:21.504480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72367 > 2) by scale factor 0.537105\nI1207 13:42:22.448577   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85743 > 2) by scale factor 0.41174\nI1207 13:42:23.393177   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08102 > 2) by scale factor 0.490073\nI1207 13:42:24.337939   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90441 > 2) by scale factor 0.512241\nI1207 13:42:25.282145   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0664 > 2) by scale factor 0.65223\nI1207 13:42:26.226240   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03238 > 2) by scale factor 0.495986\nI1207 13:42:27.169831   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22409 > 2) by scale factor 0.620331\nI1207 13:42:27.181871   369 solver.cpp:337] Iteration 19800, Testing net (#0)\nI1207 13:43:19.827968   369 solver.cpp:404]     Test net output #0: accuracy = 0.1613\nI1207 13:43:19.828577   369 solver.cpp:404]     Test net output #1: loss = 25.5714 (* 1 = 25.5714 loss)\nI1207 13:43:20.698880   369 solver.cpp:228] Iteration 19800, loss = 25.5661\nI1207 13:43:20.698938   369 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1207 13:43:20.698957   369 solver.cpp:244]     Train net output #1: loss = 25.5661 (* 1 = 25.5661 loss)\nI1207 13:43:20.772595   369 sgd_solver.cpp:166] Iteration 19800, lr = 2.97\nI1207 13:43:20.782794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94946 > 2) by scale factor 0.67809\nI1207 13:43:21.720355   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03433 > 2) by scale factor 0.659125\nI1207 13:43:22.657454   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59406 > 2) by scale factor 0.556474\nI1207 13:43:23.594374   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05992 > 2) by scale factor 0.492621\nI1207 13:43:24.531477   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91135 > 2) by scale factor 0.511333\nI1207 13:43:25.468642   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32002 > 2) by scale factor 0.46296\nI1207 13:43:26.405529   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71237 > 2) by scale factor 0.737364\nI1207 13:43:27.342387   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91414 > 2) by scale factor 0.510968\nI1207 13:43:28.279572   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83818 > 2) by scale factor 0.704678\nI1207 13:43:29.216339   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61448 > 2) by scale factor 0.76497\nI1207 13:43:30.153679   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86541 > 2) by scale factor 0.697981\nI1207 13:43:31.090688   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85742 > 2) by scale factor 0.518481\nI1207 13:43:32.027644   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7451 > 2) by scale factor 0.53403\nI1207 13:43:32.964733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71116 > 2) by scale factor 0.538915\nI1207 13:43:33.901913   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6988 > 2) by scale factor 0.425641\nI1207 13:43:34.838842   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74569 > 2) by scale factor 0.728415\nI1207 13:43:35.775480   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65169 > 2) by scale factor 0.547692\nI1207 13:43:36.712447   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01286 > 2) by scale factor 0.663821\nI1207 13:43:37.649515   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15501 > 2) by scale factor 0.633913\nI1207 13:43:38.586197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5465 > 2) by scale factor 0.785392\nI1207 13:43:39.523077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08685 > 2) by scale factor 0.489374\nI1207 13:43:40.460261   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24369 > 2) by scale factor 0.471288\nI1207 13:43:41.396975   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52958 > 2) by scale factor 0.441542\nI1207 13:43:43.268987   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96838 > 2) by scale factor 0.673768\nI1207 13:43:44.205863   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91618 > 2) by scale factor 0.510701\nI1207 13:43:45.143296   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09802 > 2) by scale factor 0.645573\nI1207 13:43:46.080327   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17652 > 2) by scale factor 0.62962\nI1207 13:43:47.017222   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51495 > 2) by scale factor 0.795246\nI1207 13:43:47.954699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46367 > 2) by scale factor 0.448062\nI1207 13:43:48.892035   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28582 > 2) by scale factor 0.466655\nI1207 13:43:49.829077   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65553 > 2) by scale factor 0.547116\nI1207 13:43:50.771921   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61149 > 2) by scale factor 0.433699\nI1207 13:43:51.716462   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71847 > 2) by scale factor 0.537856\nI1207 13:43:52.661018   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21389 > 2) by scale factor 0.47462\nI1207 13:43:53.605223   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12784 > 2) by scale factor 0.484514\nI1207 13:43:54.549729   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36592 > 2) by scale factor 0.845338\nI1207 13:43:55.494010   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14723 > 2) by scale factor 0.63548\nI1207 13:43:56.438380   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54581 > 2) by scale factor 0.785606\nI1207 13:43:57.382791   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17517 > 2) by scale factor 0.919469\nI1207 13:43:58.327122   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6537 > 2) by scale factor 0.54739\nI1207 13:43:59.271347   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06161 > 2) by scale factor 0.492416\nI1207 13:44:00.215734   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34455 > 2) by scale factor 0.460347\nI1207 13:44:01.159612   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68986 > 2) by scale factor 0.542026\nI1207 13:44:02.103747   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76305 > 2) by scale factor 0.419899\nI1207 13:44:03.047420   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71414 > 2) by scale factor 0.736883\nI1207 13:44:03.991648   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99353 > 2) by scale factor 0.668107\nI1207 13:44:04.935730   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59107 > 2) by scale factor 0.556937\nI1207 13:44:05.879752   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11113 > 2) by scale factor 0.94736\nI1207 13:44:06.824105   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97419 > 2) by scale factor 0.672452\nI1207 13:44:07.768157   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1991 > 2) by scale factor 0.476293\nI1207 13:44:08.711944   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74954 > 2) by scale factor 0.533399\nI1207 13:44:09.655592   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04163 > 2) by scale factor 0.49485\nI1207 13:44:10.600123   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86855 > 2) by scale factor 0.697216\nI1207 13:44:11.544080   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85774 > 2) by scale factor 0.411714\nI1207 13:44:12.488241   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96016 > 2) by scale factor 0.675638\nI1207 13:44:13.431877   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5551 > 2) by scale factor 0.439068\nI1207 13:44:14.375699   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07708 > 2) by scale factor 0.649967\nI1207 13:44:15.319550   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16008 > 2) by scale factor 0.632896\nI1207 13:44:16.263558   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53187 > 2) by scale factor 0.789929\nI1207 13:44:17.207237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21918 > 2) by scale factor 0.383202\nI1207 13:44:18.151108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30265 > 2) by scale factor 0.605575\nI1207 13:44:19.094748   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.88226 > 2) by scale factor 0.409646\nI1207 13:44:20.038518   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20673 > 2) by scale factor 0.475429\nI1207 13:44:20.983417   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03181 > 2) by scale factor 0.496055\nI1207 13:44:21.927201   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00363 > 2) by scale factor 0.665861\nI1207 13:44:22.870867   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08618 > 2) by scale factor 0.64805\nI1207 13:44:23.814636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11726 > 2) by scale factor 0.641589\nI1207 13:44:24.758411   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43024 > 2) by scale factor 0.451443\nI1207 13:44:25.702249   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38425 > 2) by scale factor 0.590974\nI1207 13:44:26.645823   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28572 > 2) by scale factor 0.466666\nI1207 13:44:27.590281   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12183 > 2) by scale factor 0.485222\nI1207 13:44:28.534334   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95695 > 2) by scale factor 0.676373\nI1207 13:44:29.478369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29359 > 2) by scale factor 0.871997\nI1207 13:44:30.422329   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77991 > 2) by scale factor 0.719448\nI1207 13:44:31.366116   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37637 > 2) by scale factor 0.841619\nI1207 13:44:32.310032   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99071 > 2) by scale factor 0.668737\nI1207 13:44:33.253782   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93253 > 2) by scale factor 0.508578\nI1207 13:44:34.197445   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25452 > 2) by scale factor 0.470088\nI1207 13:44:35.141377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80332 > 2) by scale factor 0.525856\nI1207 13:44:36.085295   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44143 > 2) by scale factor 0.450305\nI1207 13:44:37.029237   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33026 > 2) by scale factor 0.600554\nI1207 13:44:37.973044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59619 > 2) by scale factor 0.435143\nI1207 13:44:38.916891   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37879 > 2) by scale factor 0.591928\nI1207 13:44:39.860769   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8096 > 2) by scale factor 0.524989\nI1207 13:44:40.804666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61865 > 2) by scale factor 0.763752\nI1207 13:44:41.748252   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72943 > 2) by scale factor 0.732753\nI1207 13:44:42.692139   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.47892 > 2) by scale factor 0.365035\nI1207 13:44:43.636163   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.23598 > 2) by scale factor 0.381972\nI1207 13:44:44.580154   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62456 > 2) by scale factor 0.762034\nI1207 13:44:45.524358   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42412 > 2) by scale factor 0.584092\nI1207 13:44:46.468072   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16395 > 2) by scale factor 0.632121\nI1207 13:44:47.411821   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51367 > 2) by scale factor 0.569206\nI1207 13:44:48.355756   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63012 > 2) by scale factor 0.550946\nI1207 13:44:49.299226   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40933 > 2) by scale factor 0.453584\nI1207 13:44:50.243461   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55713 > 2) by scale factor 0.782126\nI1207 13:44:51.188009   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89972 > 2) by scale factor 0.689722\nI1207 13:44:52.131356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24518 > 2) by scale factor 0.471123\nI1207 13:44:53.074898   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94111 > 2) by scale factor 0.507471\nI1207 13:44:54.018555   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8117 > 2) by scale factor 0.711314\nI1207 13:44:54.030452   369 solver.cpp:337] Iteration 19900, Testing net (#0)\nI1207 13:45:47.016793   369 solver.cpp:404]     Test net output #0: accuracy = 0.17575\nI1207 13:45:47.017436   369 solver.cpp:404]     Test net output #1: loss = 19.2373 (* 1 = 19.2373 loss)\nI1207 13:45:47.888013   369 solver.cpp:228] Iteration 19900, loss = 21.816\nI1207 13:45:47.888056   369 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1207 13:45:47.888072   369 solver.cpp:244]     Train net output #1: loss = 21.816 (* 1 = 21.816 loss)\nI1207 13:45:47.963548   369 sgd_solver.cpp:166] Iteration 19900, lr = 2.985\nI1207 13:45:47.973776   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62604 > 2) by scale factor 0.761603\nI1207 13:45:48.910910   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15644 > 2) by scale factor 0.481182\nI1207 13:45:49.847733   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74332 > 2) by scale factor 0.421645\nI1207 13:45:50.785023   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95857 > 2) by scale factor 0.676002\nI1207 13:45:51.722299   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35424 > 2) by scale factor 0.459322\nI1207 13:45:52.659215   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13947 > 2) by scale factor 0.637051\nI1207 13:45:53.596597   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82403 > 2) by scale factor 0.708208\nI1207 13:45:54.533728   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60974 > 2) by scale factor 0.433864\nI1207 13:45:55.470487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09039 > 2) by scale factor 0.647167\nI1207 13:45:56.407795   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61651 > 2) by scale factor 0.553019\nI1207 13:45:57.345024   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89098 > 2) by scale factor 0.51401\nI1207 13:45:58.282166   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.89914 > 2) by scale factor 0.408235\nI1207 13:45:59.219126   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68011 > 2) by scale factor 0.42734\nI1207 13:46:00.156471   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86779 > 2) by scale factor 0.517092\nI1207 13:46:01.093437   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5763 > 2) by scale factor 0.437034\nI1207 13:46:02.029853   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72369 > 2) by scale factor 0.734297\nI1207 13:46:02.967182   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85587 > 2) by scale factor 0.411873\nI1207 13:46:03.904356   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.17987 > 2) by scale factor 0.38611\nI1207 13:46:04.841044   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66 > 2) by scale factor 0.546449\nI1207 13:46:05.778117   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58523 > 2) by scale factor 0.436183\nI1207 13:46:06.715167   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.3196 > 2) by scale factor 0.375968\nI1207 13:46:07.652307   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23805 > 2) by scale factor 0.617655\nI1207 13:46:08.589449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47394 > 2) by scale factor 0.575716\nI1207 13:46:09.526432   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73254 > 2) by scale factor 0.535829\nI1207 13:46:10.463570   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31846 > 2) by scale factor 0.862642\nI1207 13:46:11.401068   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16798 > 2) by scale factor 0.479849\nI1207 13:46:12.338052   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0374 > 2) by scale factor 0.495369\nI1207 13:46:13.274794   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63046 > 2) by scale factor 0.760323\nI1207 13:46:14.211664   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35322 > 2) by scale factor 0.45943\nI1207 13:46:15.148766   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50023 > 2) by scale factor 0.444421\nI1207 13:46:16.085955   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62705 > 2) by scale factor 0.432241\nI1207 13:46:17.029369   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89928 > 2) by scale factor 0.512915\nI1207 13:46:17.976099   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86031 > 2) by scale factor 0.518093\nI1207 13:46:18.922317   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29359 > 2) by scale factor 0.871996\nI1207 13:46:19.868449   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23628 > 2) by scale factor 0.894343\nI1207 13:46:20.814761   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27703 > 2) by scale factor 0.610308\nI1207 13:46:21.760666   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31308 > 2) by scale factor 0.864649\nI1207 13:46:22.706579   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95289 > 2) by scale factor 0.505959\nI1207 13:46:23.652627   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48923 > 2) by scale factor 0.573192\nI1207 13:46:24.599138   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16017 > 2) by scale factor 0.48075\nI1207 13:46:25.545760   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0896 > 2) by scale factor 0.647333\nI1207 13:46:26.492115   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3468 > 2) by scale factor 0.597586\nI1207 13:46:27.438554   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78027 > 2) by scale factor 0.719356\nI1207 13:46:28.384371   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99363 > 2) by scale factor 0.500798\nI1207 13:46:29.330421   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80378 > 2) by scale factor 0.713323\nI1207 13:46:30.276150   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38461 > 2) by scale factor 0.59091\nI1207 13:46:31.222012   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18649 > 2) by scale factor 0.477727\nI1207 13:46:32.168051   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2297 > 2) by scale factor 0.896981\nI1207 13:46:33.113984   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1379 > 2) by scale factor 0.483337\nI1207 13:46:34.059880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09812 > 2) by scale factor 0.488029\nI1207 13:46:35.005707   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31042 > 2) by scale factor 0.865644\nI1207 13:46:35.951184   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96864 > 2) by scale factor 0.503951\nI1207 13:46:36.896487   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.90781 > 2) by scale factor 0.407514\nI1207 13:46:37.842058   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85085 > 2) by scale factor 0.519366\nI1207 13:46:38.788336   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76084 > 2) by scale factor 0.531795\nI1207 13:46:39.733610   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32239 > 2) by scale factor 0.462707\nI1207 13:46:40.679589   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80346 > 2) by scale factor 0.416367\nI1207 13:46:41.625502   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84411 > 2) by scale factor 0.520277\nI1207 13:46:42.571059   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72781 > 2) by scale factor 0.536508\nI1207 13:46:43.516880   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32366 > 2) by scale factor 0.462571\nI1207 13:46:44.463078   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08664 > 2) by scale factor 0.393187\nI1207 13:46:45.408804   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13191 > 2) by scale factor 0.638589\nI1207 13:46:46.354408   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22982 > 2) by scale factor 0.61923\nI1207 13:46:47.300086   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97031 > 2) by scale factor 0.503739\nI1207 13:46:48.247014   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84435 > 2) by scale factor 0.703148\nI1207 13:46:49.192695   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2785 > 2) by scale factor 0.877772\nI1207 13:46:50.138911   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8429 > 2) by scale factor 0.703506\nI1207 13:46:51.085193   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43873 > 2) by scale factor 0.450579\nI1207 13:46:52.031229   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91482 > 2) by scale factor 0.686149\nI1207 13:46:52.977399   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41194 > 2) by scale factor 0.453315\nI1207 13:46:53.923403   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38691 > 2) by scale factor 0.590508\nI1207 13:46:54.867964   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12413 > 2) by scale factor 0.640178\nI1207 13:46:55.812656   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96207 > 2) by scale factor 0.675205\nI1207 13:46:56.757594   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84593 > 2) by scale factor 0.702758\nI1207 13:46:57.702349   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51207 > 2) by scale factor 0.569465\nI1207 13:46:58.647486   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70873 > 2) by scale factor 0.539268\nI1207 13:46:59.592377   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30088 > 2) by scale factor 0.465021\nI1207 13:47:00.537073   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88797 > 2) by scale factor 0.514408\nI1207 13:47:01.481981   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31628 > 2) by scale factor 0.863455\nI1207 13:47:02.427726   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33461 > 2) by scale factor 0.599771\nI1207 13:47:03.373464   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77595 > 2) by scale factor 0.720474\nI1207 13:47:04.319162   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35824 > 2) by scale factor 0.848089\nI1207 13:47:05.264533   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15737 > 2) by scale factor 0.481073\nI1207 13:47:06.210197   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38736 > 2) by scale factor 0.590431\nI1207 13:47:07.156397   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92605 > 2) by scale factor 0.509418\nI1207 13:47:08.102272   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23564 > 2) by scale factor 0.618115\nI1207 13:47:09.048153   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08961 > 2) by scale factor 0.647331\nI1207 13:47:09.993907   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63424 > 2) by scale factor 0.550322\nI1207 13:47:10.939684   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39528 > 2) by scale factor 0.589052\nI1207 13:47:11.884687   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67064 > 2) by scale factor 0.428207\nI1207 13:47:12.830328   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66915 > 2) by scale factor 0.749303\nI1207 13:47:13.776393   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41884 > 2) by scale factor 0.826843\nI1207 13:47:14.722476   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5107 > 2) by scale factor 0.569688\nI1207 13:47:15.668159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45154 > 2) by scale factor 0.815814\nI1207 13:47:16.614159   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22429 > 2) by scale factor 0.473453\nI1207 13:47:17.559698   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99853 > 2) by scale factor 0.500184\nI1207 13:47:18.506108   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50749 > 2) by scale factor 0.570208\nI1207 13:47:19.451759   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51145 > 2) by scale factor 0.443316\nI1207 13:47:20.397657   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47215 > 2) by scale factor 0.576012\nI1207 13:47:21.343636   369 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78768 > 2) by scale factor 0.717443\nI1207 13:47:21.353606   369 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/Fig2b_iter_20000.caffemodel\nI1207 13:47:21.490380   369 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/Fig2b_iter_20000.solverstate\nI1207 13:47:21.773103   369 solver.cpp:317] Iteration 20000, loss = 21.4453\nI1207 13:47:21.773150   369 solver.cpp:337] Iteration 20000, Testing net (#0)\nI1207 13:48:14.661320   369 solver.cpp:404]     Test net output #0: accuracy = 0.17355\nI1207 13:48:14.661984   369 solver.cpp:404]     Test net output #1: loss = 23.8841 (* 1 = 23.8841 loss)\nI1207 13:48:14.662003   369 solver.cpp:322] Optimization Done.\nI1207 13:48:18.887544   369 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lrRange3SS520kClip2Fig12b",
    "content": "I1207 05:38:13.785434  1922 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1207 05:38:13.787983  1922 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1207 05:38:13.789178  1922 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1207 05:38:13.790716  1922 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1207 05:38:13.791919  1922 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1207 05:38:13.793903  1922 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1207 05:38:13.795109  1922 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1207 05:38:13.796315  1922 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1207 05:38:13.797526  1922 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1207 05:38:14.232614  1922 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/Fig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nclip_gradients: 2\nmax_lr: 3\nI1207 05:38:14.235353  1922 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1207 05:38:14.285473  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:14.285537  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:14.286345  1922 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1207 05:38:14.287935  1922 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-ResNeXt\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stri\nI1207 05:38:14.289438  1922 layer_factory.hpp:77] Creating layer dataLayer\nI1207 05:38:14.290567  1922 net.cpp:100] Creating Layer dataLayer\nI1207 05:38:14.290621  1922 net.cpp:408] dataLayer -> data_top\nI1207 05:38:14.290802  1922 net.cpp:408] dataLayer -> label\nI1207 05:38:14.290895  1922 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1207 05:38:14.296048  1927 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1207 05:38:14.318827  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:14.324509  1922 net.cpp:150] Setting up dataLayer\nI1207 05:38:14.324569  1922 net.cpp:157] Top shape: 85 3 32 32 (261120)\nI1207 05:38:14.324581  1922 net.cpp:157] Top shape: 85 (85)\nI1207 05:38:14.324587  1922 net.cpp:165] Memory required for data: 1044820\nI1207 05:38:14.324604  1922 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1207 05:38:14.324618  1922 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1207 05:38:14.324626  1922 net.cpp:434] label_dataLayer_1_split <- label\nI1207 05:38:14.324645  1922 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1207 05:38:14.324658  1922 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1207 05:38:14.324751  1922 net.cpp:150] Setting up label_dataLayer_1_split\nI1207 05:38:14.324765  1922 net.cpp:157] Top shape: 85 (85)\nI1207 05:38:14.324772  1922 net.cpp:157] Top shape: 85 (85)\nI1207 05:38:14.324777  1922 net.cpp:165] Memory required for data: 1045500\nI1207 05:38:14.324782  1922 layer_factory.hpp:77] Creating layer pre_conv\nI1207 05:38:14.324846  1922 net.cpp:100] Creating Layer pre_conv\nI1207 05:38:14.324858  1922 net.cpp:434] pre_conv <- data_top\nI1207 05:38:14.324870  1922 net.cpp:408] pre_conv -> pre_conv_top\nI1207 05:38:14.326570  1922 net.cpp:150] Setting up pre_conv\nI1207 05:38:14.326588  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.326593  1922 net.cpp:165] Memory required for data: 6616060\nI1207 05:38:14.326643  1922 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1207 05:38:14.326655  1922 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1207 05:38:14.326661  1922 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1207 05:38:14.326673  1922 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1207 05:38:14.326684  1922 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1207 05:38:14.326766  1922 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1207 05:38:14.326779  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.326786  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.326792  1922 net.cpp:165] Memory required for data: 17757180\nI1207 05:38:14.326797  1922 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1207 05:38:14.326869  1922 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1207 05:38:14.326881  1922 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1207 05:38:14.326897  1922 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1207 05:38:14.327013  1928 blocking_queue.cpp:50] Waiting for data\nI1207 05:38:14.327214  1922 net.cpp:150] Setting up L1_b1_brc1_bn\nI1207 05:38:14.327229  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.327235  1922 net.cpp:165] Memory required for data: 23327740\nI1207 05:38:14.327251  1922 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1207 05:38:14.327294  1922 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1207 05:38:14.327303  1922 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1207 05:38:14.327311  1922 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1207 05:38:14.327322  1922 net.cpp:150] Setting up L1_b1_brc1_relu\nI1207 05:38:14.327330  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.327335  1922 net.cpp:165] Memory required for data: 28898300\nI1207 05:38:14.327340  1922 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1207 05:38:14.327355  1922 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1207 05:38:14.327361  1922 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1207 05:38:14.327374  1922 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1207 05:38:14.327653  1922 net.cpp:150] Setting up L1_b1_brc1_conv\nI1207 05:38:14.327668  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.327673  1922 net.cpp:165] Memory required for data: 40039420\nI1207 05:38:14.327682  1922 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1207 05:38:14.327700  1922 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1207 05:38:14.327708  1922 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1207 05:38:14.327718  1922 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1207 05:38:14.327944  1922 net.cpp:150] Setting up L1_b1_brc2_bn\nI1207 05:38:14.327957  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.327962  1922 net.cpp:165] Memory required for data: 51180540\nI1207 05:38:14.327976  1922 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1207 05:38:14.327986  1922 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1207 05:38:14.327991  1922 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1207 05:38:14.328001  1922 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1207 05:38:14.328011  1922 net.cpp:150] Setting up L1_b1_brc2_relu\nI1207 05:38:14.328027  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.328032  1922 net.cpp:165] Memory required for data: 62321660\nI1207 05:38:14.328037  1922 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1207 05:38:14.328052  1922 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1207 05:38:14.328058  1922 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1207 05:38:14.328065  1922 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1207 05:38:14.328333  1922 net.cpp:150] Setting up L1_b1_brc2_conv\nI1207 05:38:14.328347  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.328352  1922 net.cpp:165] Memory required for data: 73462780\nI1207 05:38:14.328361  1922 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1207 05:38:14.328377  1922 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1207 05:38:14.328383  1922 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1207 05:38:14.328392  1922 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1207 05:38:14.328618  1922 net.cpp:150] Setting up L1_b1_brc3_bn\nI1207 05:38:14.328630  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.328635  1922 net.cpp:165] Memory required for data: 84603900\nI1207 05:38:14.328645  1922 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1207 05:38:14.328653  1922 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1207 05:38:14.328660  1922 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1207 05:38:14.328670  1922 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1207 05:38:14.328680  1922 net.cpp:150] Setting up L1_b1_brc3_relu\nI1207 05:38:14.328691  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.328697  1922 net.cpp:165] Memory required for data: 95745020\nI1207 05:38:14.328702  1922 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1207 05:38:14.328716  1922 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1207 05:38:14.328722  1922 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1207 05:38:14.328730  1922 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1207 05:38:14.329054  1922 net.cpp:150] Setting up L1_b1_brc3_conv\nI1207 05:38:14.329069  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.329074  1922 net.cpp:165] Memory required for data: 118027260\nI1207 05:38:14.329087  1922 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1207 05:38:14.329102  1922 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1207 05:38:14.329109  1922 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1207 05:38:14.329120  1922 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1207 05:38:14.329401  1922 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1207 05:38:14.329416  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.329421  1922 net.cpp:165] Memory required for data: 140309500\nI1207 05:38:14.329429  1922 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1207 05:38:14.329479  1922 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1207 05:38:14.329488  1922 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1207 05:38:14.329496  1922 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1207 05:38:14.329504  1922 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1207 05:38:14.329576  1922 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1207 05:38:14.329591  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.329596  1922 net.cpp:165] Memory required for data: 162591740\nI1207 05:38:14.329602  1922 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:38:14.329612  1922 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:38:14.329618  1922 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1207 05:38:14.329627  1922 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1207 05:38:14.329635  1922 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1207 05:38:14.329699  1922 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:38:14.329712  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.329720  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.329725  1922 net.cpp:165] Memory required for data: 207156220\nI1207 05:38:14.329730  1922 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1207 05:38:14.329742  1922 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1207 05:38:14.329748  1922 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1207 05:38:14.329764  1922 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1207 05:38:14.329988  1922 net.cpp:150] Setting up L1_b2_brc1_bn\nI1207 05:38:14.330000  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.330005  1922 net.cpp:165] Memory required for data: 229438460\nI1207 05:38:14.330016  1922 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1207 05:38:14.330024  1922 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1207 05:38:14.330030  1922 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1207 05:38:14.330037  1922 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1207 05:38:14.330047  1922 net.cpp:150] Setting up L1_b2_brc1_relu\nI1207 05:38:14.330054  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.330058  1922 net.cpp:165] Memory required for data: 251720700\nI1207 05:38:14.330063  1922 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1207 05:38:14.330077  1922 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1207 05:38:14.330083  1922 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1207 05:38:14.330096  1922 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1207 05:38:14.330397  1922 net.cpp:150] Setting up L1_b2_brc1_conv\nI1207 05:38:14.330411  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.330416  1922 net.cpp:165] Memory required for data: 262861820\nI1207 05:38:14.330425  1922 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1207 05:38:14.330436  1922 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1207 05:38:14.330442  1922 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1207 05:38:14.330451  1922 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1207 05:38:14.330694  1922 net.cpp:150] Setting up L1_b2_brc2_bn\nI1207 05:38:14.330708  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.330714  1922 net.cpp:165] Memory required for data: 274002940\nI1207 05:38:14.330724  1922 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1207 05:38:14.330732  1922 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1207 05:38:14.330739  1922 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1207 05:38:14.330745  1922 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1207 05:38:14.330755  1922 net.cpp:150] Setting up L1_b2_brc2_relu\nI1207 05:38:14.330762  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.330766  1922 net.cpp:165] Memory required for data: 285144060\nI1207 05:38:14.330771  1922 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1207 05:38:14.330785  1922 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1207 05:38:14.330790  1922 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1207 05:38:14.330801  1922 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1207 05:38:14.331070  1922 net.cpp:150] Setting up L1_b2_brc2_conv\nI1207 05:38:14.331084  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.331089  1922 net.cpp:165] Memory required for data: 296285180\nI1207 05:38:14.331097  1922 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1207 05:38:14.331110  1922 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1207 05:38:14.331115  1922 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1207 05:38:14.331123  1922 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1207 05:38:14.331363  1922 net.cpp:150] Setting up L1_b2_brc3_bn\nI1207 05:38:14.331377  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.331382  1922 net.cpp:165] Memory required for data: 307426300\nI1207 05:38:14.331392  1922 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1207 05:38:14.331406  1922 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1207 05:38:14.331413  1922 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1207 05:38:14.331420  1922 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1207 05:38:14.331429  1922 net.cpp:150] Setting up L1_b2_brc3_relu\nI1207 05:38:14.331437  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.331441  1922 net.cpp:165] Memory required for data: 318567420\nI1207 05:38:14.331446  1922 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1207 05:38:14.331460  1922 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1207 05:38:14.331466  1922 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1207 05:38:14.331477  1922 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1207 05:38:14.331786  1922 net.cpp:150] Setting up L1_b2_brc3_conv\nI1207 05:38:14.331800  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.331805  1922 net.cpp:165] Memory required for data: 340849660\nI1207 05:38:14.331823  1922 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1207 05:38:14.331836  1922 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1207 05:38:14.331842  1922 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1207 05:38:14.331849  1922 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1207 05:38:14.331857  1922 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1207 05:38:14.331887  1922 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1207 05:38:14.331897  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.331902  1922 net.cpp:165] Memory required for data: 363131900\nI1207 05:38:14.331907  1922 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:38:14.331917  1922 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:38:14.331923  1922 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1207 05:38:14.331933  1922 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1207 05:38:14.331943  1922 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1207 05:38:14.331985  1922 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:38:14.332000  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.332006  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.332010  1922 net.cpp:165] Memory required for data: 407696380\nI1207 05:38:14.332016  1922 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1207 05:38:14.332023  1922 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1207 05:38:14.332029  1922 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1207 05:38:14.332036  1922 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1207 05:38:14.332259  1922 net.cpp:150] Setting up L1_b3_brc1_bn\nI1207 05:38:14.332273  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.332278  1922 net.cpp:165] Memory required for data: 429978620\nI1207 05:38:14.332288  1922 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1207 05:38:14.332299  1922 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1207 05:38:14.332305  1922 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1207 05:38:14.332314  1922 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1207 05:38:14.332322  1922 net.cpp:150] Setting up L1_b3_brc1_relu\nI1207 05:38:14.332330  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.332334  1922 net.cpp:165] Memory required for data: 452260860\nI1207 05:38:14.332340  1922 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1207 05:38:14.332352  1922 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1207 05:38:14.332358  1922 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1207 05:38:14.332370  1922 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1207 05:38:14.332682  1922 net.cpp:150] Setting up L1_b3_brc1_conv\nI1207 05:38:14.332708  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.332715  1922 net.cpp:165] Memory required for data: 463401980\nI1207 05:38:14.332722  1922 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1207 05:38:14.332731  1922 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1207 05:38:14.332737  1922 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1207 05:38:14.332746  1922 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1207 05:38:14.332978  1922 net.cpp:150] Setting up L1_b3_brc2_bn\nI1207 05:38:14.332991  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.332996  1922 net.cpp:165] Memory required for data: 474543100\nI1207 05:38:14.333006  1922 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1207 05:38:14.333014  1922 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1207 05:38:14.333020  1922 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1207 05:38:14.333029  1922 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1207 05:38:14.333039  1922 net.cpp:150] Setting up L1_b3_brc2_relu\nI1207 05:38:14.333047  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.333051  1922 net.cpp:165] Memory required for data: 485684220\nI1207 05:38:14.333056  1922 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1207 05:38:14.333072  1922 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1207 05:38:14.333078  1922 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1207 05:38:14.333091  1922 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1207 05:38:14.333364  1922 net.cpp:150] Setting up L1_b3_brc2_conv\nI1207 05:38:14.333379  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.333384  1922 net.cpp:165] Memory required for data: 496825340\nI1207 05:38:14.333391  1922 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1207 05:38:14.333400  1922 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1207 05:38:14.333406  1922 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1207 05:38:14.333416  1922 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1207 05:38:14.333649  1922 net.cpp:150] Setting up L1_b3_brc3_bn\nI1207 05:38:14.333662  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.333667  1922 net.cpp:165] Memory required for data: 507966460\nI1207 05:38:14.333678  1922 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1207 05:38:14.333705  1922 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1207 05:38:14.333714  1922 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1207 05:38:14.333721  1922 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1207 05:38:14.333730  1922 net.cpp:150] Setting up L1_b3_brc3_relu\nI1207 05:38:14.333737  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.333742  1922 net.cpp:165] Memory required for data: 519107580\nI1207 05:38:14.333747  1922 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1207 05:38:14.333758  1922 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1207 05:38:14.333763  1922 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1207 05:38:14.333775  1922 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1207 05:38:14.334086  1922 net.cpp:150] Setting up L1_b3_brc3_conv\nI1207 05:38:14.334100  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.334105  1922 net.cpp:165] Memory required for data: 541389820\nI1207 05:38:14.334113  1922 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1207 05:38:14.334123  1922 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1207 05:38:14.334130  1922 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1207 05:38:14.334136  1922 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1207 05:38:14.334143  1922 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1207 05:38:14.334180  1922 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1207 05:38:14.334193  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.334197  1922 net.cpp:165] Memory required for data: 563672060\nI1207 05:38:14.334203  1922 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:38:14.334218  1922 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:38:14.334223  1922 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1207 05:38:14.334233  1922 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1207 05:38:14.334244  1922 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1207 05:38:14.334288  1922 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:38:14.334298  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.334305  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.334309  1922 net.cpp:165] Memory required for data: 608236540\nI1207 05:38:14.334314  1922 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1207 05:38:14.334324  1922 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1207 05:38:14.334331  1922 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1207 05:38:14.334342  1922 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1207 05:38:14.334564  1922 net.cpp:150] Setting up L1_b4_brc1_bn\nI1207 05:38:14.334578  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.334583  1922 net.cpp:165] Memory required for data: 630518780\nI1207 05:38:14.334594  1922 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1207 05:38:14.334601  1922 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1207 05:38:14.334607  1922 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1207 05:38:14.334614  1922 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1207 05:38:14.334623  1922 net.cpp:150] Setting up L1_b4_brc1_relu\nI1207 05:38:14.334630  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.334635  1922 net.cpp:165] Memory required for data: 652801020\nI1207 05:38:14.334640  1922 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1207 05:38:14.334653  1922 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1207 05:38:14.334659  1922 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1207 05:38:14.334671  1922 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1207 05:38:14.334987  1922 net.cpp:150] Setting up L1_b4_brc1_conv\nI1207 05:38:14.335001  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.335006  1922 net.cpp:165] Memory required for data: 663942140\nI1207 05:38:14.335014  1922 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1207 05:38:14.335026  1922 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1207 05:38:14.335032  1922 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1207 05:38:14.335041  1922 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1207 05:38:14.335284  1922 net.cpp:150] Setting up L1_b4_brc2_bn\nI1207 05:38:14.335297  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.335302  1922 net.cpp:165] Memory required for data: 675083260\nI1207 05:38:14.335312  1922 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1207 05:38:14.335321  1922 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1207 05:38:14.335327  1922 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1207 05:38:14.335335  1922 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1207 05:38:14.335343  1922 net.cpp:150] Setting up L1_b4_brc2_relu\nI1207 05:38:14.335350  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.335355  1922 net.cpp:165] Memory required for data: 686224380\nI1207 05:38:14.335360  1922 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1207 05:38:14.335372  1922 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1207 05:38:14.335378  1922 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1207 05:38:14.335389  1922 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1207 05:38:14.335664  1922 net.cpp:150] Setting up L1_b4_brc2_conv\nI1207 05:38:14.335677  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.335682  1922 net.cpp:165] Memory required for data: 697365500\nI1207 05:38:14.335698  1922 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1207 05:38:14.335717  1922 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1207 05:38:14.335724  1922 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1207 05:38:14.335732  1922 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1207 05:38:14.335971  1922 net.cpp:150] Setting up L1_b4_brc3_bn\nI1207 05:38:14.335984  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.335989  1922 net.cpp:165] Memory required for data: 708506620\nI1207 05:38:14.335999  1922 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1207 05:38:14.336007  1922 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1207 05:38:14.336014  1922 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1207 05:38:14.336020  1922 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1207 05:38:14.336030  1922 net.cpp:150] Setting up L1_b4_brc3_relu\nI1207 05:38:14.336037  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.336041  1922 net.cpp:165] Memory required for data: 719647740\nI1207 05:38:14.336046  1922 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1207 05:38:14.336060  1922 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1207 05:38:14.336066  1922 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1207 05:38:14.336076  1922 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1207 05:38:14.336387  1922 net.cpp:150] Setting up L1_b4_brc3_conv\nI1207 05:38:14.336401  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.336406  1922 net.cpp:165] Memory required for data: 741929980\nI1207 05:38:14.336416  1922 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1207 05:38:14.336423  1922 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1207 05:38:14.336429  1922 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1207 05:38:14.336439  1922 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1207 05:38:14.336447  1922 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1207 05:38:14.336482  1922 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1207 05:38:14.336490  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.336495  1922 net.cpp:165] Memory required for data: 764212220\nI1207 05:38:14.336500  1922 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:38:14.336508  1922 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:38:14.336513  1922 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1207 05:38:14.336524  1922 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1207 05:38:14.336534  1922 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1207 05:38:14.336577  1922 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:38:14.336591  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.336598  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.336603  1922 net.cpp:165] Memory required for data: 808776700\nI1207 05:38:14.336608  1922 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1207 05:38:14.336616  1922 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1207 05:38:14.336621  1922 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1207 05:38:14.336637  1922 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1207 05:38:14.336871  1922 net.cpp:150] Setting up L1_b5_brc1_bn\nI1207 05:38:14.336885  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.336890  1922 net.cpp:165] Memory required for data: 831058940\nI1207 05:38:14.336916  1922 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1207 05:38:14.336926  1922 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1207 05:38:14.336932  1922 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1207 05:38:14.336940  1922 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1207 05:38:14.336949  1922 net.cpp:150] Setting up L1_b5_brc1_relu\nI1207 05:38:14.336956  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.336967  1922 net.cpp:165] Memory required for data: 853341180\nI1207 05:38:14.336973  1922 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1207 05:38:14.336987  1922 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1207 05:38:14.336993  1922 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1207 05:38:14.337004  1922 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1207 05:38:14.337307  1922 net.cpp:150] Setting up L1_b5_brc1_conv\nI1207 05:38:14.337321  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.337327  1922 net.cpp:165] Memory required for data: 864482300\nI1207 05:38:14.337334  1922 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1207 05:38:14.337345  1922 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1207 05:38:14.337352  1922 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1207 05:38:14.337360  1922 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1207 05:38:14.337596  1922 net.cpp:150] Setting up L1_b5_brc2_bn\nI1207 05:38:14.337610  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.337615  1922 net.cpp:165] Memory required for data: 875623420\nI1207 05:38:14.337625  1922 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1207 05:38:14.337632  1922 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1207 05:38:14.337638  1922 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1207 05:38:14.337646  1922 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1207 05:38:14.337656  1922 net.cpp:150] Setting up L1_b5_brc2_relu\nI1207 05:38:14.337662  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.337666  1922 net.cpp:165] Memory required for data: 886764540\nI1207 05:38:14.337671  1922 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1207 05:38:14.337689  1922 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1207 05:38:14.337697  1922 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1207 05:38:14.337707  1922 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1207 05:38:14.337981  1922 net.cpp:150] Setting up L1_b5_brc2_conv\nI1207 05:38:14.337996  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.338001  1922 net.cpp:165] Memory required for data: 897905660\nI1207 05:38:14.338008  1922 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1207 05:38:14.338024  1922 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1207 05:38:14.338032  1922 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1207 05:38:14.338039  1922 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1207 05:38:14.338289  1922 net.cpp:150] Setting up L1_b5_brc3_bn\nI1207 05:38:14.338302  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.338307  1922 net.cpp:165] Memory required for data: 909046780\nI1207 05:38:14.338317  1922 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1207 05:38:14.338325  1922 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1207 05:38:14.338331  1922 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1207 05:38:14.338338  1922 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1207 05:38:14.338348  1922 net.cpp:150] Setting up L1_b5_brc3_relu\nI1207 05:38:14.338356  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.338361  1922 net.cpp:165] Memory required for data: 920187900\nI1207 05:38:14.338364  1922 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1207 05:38:14.338377  1922 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1207 05:38:14.338384  1922 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1207 05:38:14.338395  1922 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1207 05:38:14.338711  1922 net.cpp:150] Setting up L1_b5_brc3_conv\nI1207 05:38:14.338726  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.338731  1922 net.cpp:165] Memory required for data: 942470140\nI1207 05:38:14.338739  1922 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1207 05:38:14.338748  1922 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1207 05:38:14.338754  1922 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1207 05:38:14.338762  1922 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1207 05:38:14.338778  1922 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1207 05:38:14.338812  1922 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1207 05:38:14.338822  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.338827  1922 net.cpp:165] Memory required for data: 964752380\nI1207 05:38:14.338832  1922 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:38:14.338840  1922 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:38:14.338845  1922 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1207 05:38:14.338857  1922 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1207 05:38:14.338867  1922 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1207 05:38:14.338912  1922 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:38:14.338922  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.338929  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.338934  1922 net.cpp:165] Memory required for data: 1009316860\nI1207 05:38:14.338939  1922 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1207 05:38:14.338950  1922 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1207 05:38:14.338956  1922 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1207 05:38:14.338966  1922 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1207 05:38:14.339191  1922 net.cpp:150] Setting up L1_b6_brc1_bn\nI1207 05:38:14.339205  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.339208  1922 net.cpp:165] Memory required for data: 1031599100\nI1207 05:38:14.339220  1922 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1207 05:38:14.339227  1922 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1207 05:38:14.339233  1922 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1207 05:38:14.339241  1922 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1207 05:38:14.339249  1922 net.cpp:150] Setting up L1_b6_brc1_relu\nI1207 05:38:14.339257  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.339262  1922 net.cpp:165] Memory required for data: 1053881340\nI1207 05:38:14.339265  1922 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1207 05:38:14.339279  1922 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1207 05:38:14.339285  1922 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1207 05:38:14.339298  1922 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1207 05:38:14.339612  1922 net.cpp:150] Setting up L1_b6_brc1_conv\nI1207 05:38:14.339625  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.339630  1922 net.cpp:165] Memory required for data: 1065022460\nI1207 05:38:14.339639  1922 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1207 05:38:14.339650  1922 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1207 05:38:14.339656  1922 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1207 05:38:14.339664  1922 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1207 05:38:14.339911  1922 net.cpp:150] Setting up L1_b6_brc2_bn\nI1207 05:38:14.339925  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.339931  1922 net.cpp:165] Memory required for data: 1076163580\nI1207 05:38:14.339941  1922 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1207 05:38:14.339956  1922 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1207 05:38:14.339962  1922 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1207 05:38:14.339969  1922 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1207 05:38:14.339980  1922 net.cpp:150] Setting up L1_b6_brc2_relu\nI1207 05:38:14.339987  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.339992  1922 net.cpp:165] Memory required for data: 1087304700\nI1207 05:38:14.339996  1922 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1207 05:38:14.340018  1922 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1207 05:38:14.340023  1922 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1207 05:38:14.340032  1922 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1207 05:38:14.340315  1922 net.cpp:150] Setting up L1_b6_brc2_conv\nI1207 05:38:14.340329  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.340334  1922 net.cpp:165] Memory required for data: 1098445820\nI1207 05:38:14.340343  1922 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1207 05:38:14.340354  1922 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1207 05:38:14.340360  1922 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1207 05:38:14.340368  1922 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1207 05:38:14.340606  1922 net.cpp:150] Setting up L1_b6_brc3_bn\nI1207 05:38:14.340618  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.340623  1922 net.cpp:165] Memory required for data: 1109586940\nI1207 05:38:14.340633  1922 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1207 05:38:14.340642  1922 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1207 05:38:14.340648  1922 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1207 05:38:14.340657  1922 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1207 05:38:14.340668  1922 net.cpp:150] Setting up L1_b6_brc3_relu\nI1207 05:38:14.340675  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.340679  1922 net.cpp:165] Memory required for data: 1120728060\nI1207 05:38:14.340684  1922 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1207 05:38:14.340703  1922 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1207 05:38:14.340710  1922 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1207 05:38:14.340718  1922 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1207 05:38:14.341039  1922 net.cpp:150] Setting up L1_b6_brc3_conv\nI1207 05:38:14.341053  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.341058  1922 net.cpp:165] Memory required for data: 1143010300\nI1207 05:38:14.341066  1922 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1207 05:38:14.341075  1922 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1207 05:38:14.341083  1922 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1207 05:38:14.341089  1922 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1207 05:38:14.341099  1922 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1207 05:38:14.341130  1922 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1207 05:38:14.341140  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.341145  1922 net.cpp:165] Memory required for data: 1165292540\nI1207 05:38:14.341150  1922 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:38:14.341159  1922 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:38:14.341166  1922 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1207 05:38:14.341176  1922 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1207 05:38:14.341186  1922 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1207 05:38:14.341230  1922 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:38:14.341243  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.341251  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.341256  1922 net.cpp:165] Memory required for data: 1209857020\nI1207 05:38:14.341261  1922 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1207 05:38:14.341269  1922 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1207 05:38:14.341275  1922 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1207 05:38:14.341284  1922 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1207 05:38:14.341517  1922 net.cpp:150] Setting up L2_b1_brc1_bn\nI1207 05:38:14.341531  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.341543  1922 net.cpp:165] Memory required for data: 1232139260\nI1207 05:38:14.341554  1922 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1207 05:38:14.341565  1922 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1207 05:38:14.341572  1922 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1207 05:38:14.341578  1922 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1207 05:38:14.341588  1922 net.cpp:150] Setting up L2_b1_brc1_relu\nI1207 05:38:14.341595  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.341599  1922 net.cpp:165] Memory required for data: 1254421500\nI1207 05:38:14.341604  1922 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1207 05:38:14.341617  1922 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1207 05:38:14.341624  1922 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1207 05:38:14.341634  1922 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1207 05:38:14.342001  1922 net.cpp:150] Setting up L2_b1_brc1_conv\nI1207 05:38:14.342015  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.342020  1922 net.cpp:165] Memory required for data: 1259992060\nI1207 05:38:14.342030  1922 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1207 05:38:14.342037  1922 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1207 05:38:14.342043  1922 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1207 05:38:14.342054  1922 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1207 05:38:14.342305  1922 net.cpp:150] Setting up L2_b1_brc2_bn\nI1207 05:38:14.342321  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.342326  1922 net.cpp:165] Memory required for data: 1265562620\nI1207 05:38:14.342336  1922 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1207 05:38:14.342345  1922 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1207 05:38:14.342351  1922 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1207 05:38:14.342358  1922 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1207 05:38:14.342368  1922 net.cpp:150] Setting up L2_b1_brc2_relu\nI1207 05:38:14.342375  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.342380  1922 net.cpp:165] Memory required for data: 1271133180\nI1207 05:38:14.342384  1922 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1207 05:38:14.342397  1922 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1207 05:38:14.342403  1922 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1207 05:38:14.342412  1922 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1207 05:38:14.342720  1922 net.cpp:150] Setting up L2_b1_brc2_conv\nI1207 05:38:14.342733  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.342738  1922 net.cpp:165] Memory required for data: 1276703740\nI1207 05:38:14.342746  1922 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1207 05:38:14.342758  1922 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1207 05:38:14.342766  1922 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1207 05:38:14.342773  1922 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1207 05:38:14.343014  1922 net.cpp:150] Setting up L2_b1_brc3_bn\nI1207 05:38:14.343030  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.343035  1922 net.cpp:165] Memory required for data: 1282274300\nI1207 05:38:14.343046  1922 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1207 05:38:14.343055  1922 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1207 05:38:14.343060  1922 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1207 05:38:14.343067  1922 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1207 05:38:14.343077  1922 net.cpp:150] Setting up L2_b1_brc3_relu\nI1207 05:38:14.343085  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.343088  1922 net.cpp:165] Memory required for data: 1287844860\nI1207 05:38:14.343093  1922 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1207 05:38:14.343103  1922 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1207 05:38:14.343108  1922 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1207 05:38:14.343119  1922 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1207 05:38:14.344606  1922 net.cpp:150] Setting up L2_b1_brc3_conv\nI1207 05:38:14.344624  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.344630  1922 net.cpp:165] Memory required for data: 1298985980\nI1207 05:38:14.344640  1922 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1207 05:38:14.344655  1922 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1207 05:38:14.344661  1922 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1207 05:38:14.344671  1922 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1207 05:38:14.345110  1922 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1207 05:38:14.345124  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.345130  1922 net.cpp:165] Memory required for data: 1310127100\nI1207 05:38:14.345139  1922 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1207 05:38:14.345149  1922 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1207 05:38:14.345155  1922 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1207 05:38:14.345161  1922 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1207 05:38:14.345172  1922 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1207 05:38:14.345198  1922 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1207 05:38:14.345207  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.345212  1922 net.cpp:165] Memory required for data: 1321268220\nI1207 05:38:14.345217  1922 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:38:14.345226  1922 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:38:14.345232  1922 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1207 05:38:14.345242  1922 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1207 05:38:14.345252  1922 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1207 05:38:14.345301  1922 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:38:14.345314  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.345320  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.345324  1922 net.cpp:165] Memory required for data: 1343550460\nI1207 05:38:14.345329  1922 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1207 05:38:14.345341  1922 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1207 05:38:14.345347  1922 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1207 05:38:14.345355  1922 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1207 05:38:14.345577  1922 net.cpp:150] Setting up L2_b2_brc1_bn\nI1207 05:38:14.345589  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.345594  1922 net.cpp:165] Memory required for data: 1354691580\nI1207 05:38:14.345604  1922 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1207 05:38:14.345613  1922 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1207 05:38:14.345619  1922 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1207 05:38:14.345626  1922 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1207 05:38:14.345636  1922 net.cpp:150] Setting up L2_b2_brc1_relu\nI1207 05:38:14.345644  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.345649  1922 net.cpp:165] Memory required for data: 1365832700\nI1207 05:38:14.345654  1922 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1207 05:38:14.345666  1922 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1207 05:38:14.345672  1922 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1207 05:38:14.345685  1922 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1207 05:38:14.346168  1922 net.cpp:150] Setting up L2_b2_brc1_conv\nI1207 05:38:14.346184  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.346189  1922 net.cpp:165] Memory required for data: 1371403260\nI1207 05:38:14.346199  1922 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1207 05:38:14.346222  1922 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1207 05:38:14.346230  1922 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1207 05:38:14.346240  1922 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1207 05:38:14.346479  1922 net.cpp:150] Setting up L2_b2_brc2_bn\nI1207 05:38:14.346493  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.346498  1922 net.cpp:165] Memory required for data: 1376973820\nI1207 05:38:14.346508  1922 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1207 05:38:14.346515  1922 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1207 05:38:14.346521  1922 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1207 05:38:14.346529  1922 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1207 05:38:14.346539  1922 net.cpp:150] Setting up L2_b2_brc2_relu\nI1207 05:38:14.346545  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.346549  1922 net.cpp:165] Memory required for data: 1382544380\nI1207 05:38:14.346554  1922 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1207 05:38:14.346568  1922 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1207 05:38:14.346575  1922 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1207 05:38:14.346583  1922 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1207 05:38:14.346901  1922 net.cpp:150] Setting up L2_b2_brc2_conv\nI1207 05:38:14.346916  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.346921  1922 net.cpp:165] Memory required for data: 1388114940\nI1207 05:38:14.346930  1922 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1207 05:38:14.346940  1922 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1207 05:38:14.346947  1922 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1207 05:38:14.346956  1922 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1207 05:38:14.347193  1922 net.cpp:150] Setting up L2_b2_brc3_bn\nI1207 05:38:14.347208  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.347214  1922 net.cpp:165] Memory required for data: 1393685500\nI1207 05:38:14.347224  1922 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1207 05:38:14.347232  1922 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1207 05:38:14.347239  1922 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1207 05:38:14.347245  1922 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1207 05:38:14.347255  1922 net.cpp:150] Setting up L2_b2_brc3_relu\nI1207 05:38:14.347262  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.347266  1922 net.cpp:165] Memory required for data: 1399256060\nI1207 05:38:14.347271  1922 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1207 05:38:14.347281  1922 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1207 05:38:14.347286  1922 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1207 05:38:14.347298  1922 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1207 05:38:14.347740  1922 net.cpp:150] Setting up L2_b2_brc3_conv\nI1207 05:38:14.347755  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.347760  1922 net.cpp:165] Memory required for data: 1410397180\nI1207 05:38:14.347769  1922 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1207 05:38:14.347779  1922 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1207 05:38:14.347784  1922 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1207 05:38:14.347791  1922 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1207 05:38:14.347805  1922 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1207 05:38:14.347831  1922 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1207 05:38:14.347841  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.347846  1922 net.cpp:165] Memory required for data: 1421538300\nI1207 05:38:14.347851  1922 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:38:14.347862  1922 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:38:14.347867  1922 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1207 05:38:14.347882  1922 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1207 05:38:14.347892  1922 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1207 05:38:14.347940  1922 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:38:14.347952  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.347959  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.347965  1922 net.cpp:165] Memory required for data: 1443820540\nI1207 05:38:14.347970  1922 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1207 05:38:14.347977  1922 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1207 05:38:14.347983  1922 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1207 05:38:14.347993  1922 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1207 05:38:14.348220  1922 net.cpp:150] Setting up L2_b3_brc1_bn\nI1207 05:38:14.348232  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.348237  1922 net.cpp:165] Memory required for data: 1454961660\nI1207 05:38:14.348265  1922 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1207 05:38:14.348278  1922 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1207 05:38:14.348284  1922 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1207 05:38:14.348291  1922 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1207 05:38:14.348301  1922 net.cpp:150] Setting up L2_b3_brc1_relu\nI1207 05:38:14.348309  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.348312  1922 net.cpp:165] Memory required for data: 1466102780\nI1207 05:38:14.348317  1922 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1207 05:38:14.348327  1922 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1207 05:38:14.348333  1922 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1207 05:38:14.348345  1922 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1207 05:38:14.348800  1922 net.cpp:150] Setting up L2_b3_brc1_conv\nI1207 05:38:14.348815  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.348820  1922 net.cpp:165] Memory required for data: 1471673340\nI1207 05:38:14.348829  1922 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1207 05:38:14.348837  1922 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1207 05:38:14.348843  1922 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1207 05:38:14.348851  1922 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1207 05:38:14.349088  1922 net.cpp:150] Setting up L2_b3_brc2_bn\nI1207 05:38:14.349102  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.349107  1922 net.cpp:165] Memory required for data: 1477243900\nI1207 05:38:14.349117  1922 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1207 05:38:14.349123  1922 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1207 05:38:14.349129  1922 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1207 05:38:14.349136  1922 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1207 05:38:14.349146  1922 net.cpp:150] Setting up L2_b3_brc2_relu\nI1207 05:38:14.349153  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.349159  1922 net.cpp:165] Memory required for data: 1482814460\nI1207 05:38:14.349164  1922 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1207 05:38:14.349176  1922 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1207 05:38:14.349182  1922 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1207 05:38:14.349198  1922 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1207 05:38:14.349510  1922 net.cpp:150] Setting up L2_b3_brc2_conv\nI1207 05:38:14.349524  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.349529  1922 net.cpp:165] Memory required for data: 1488385020\nI1207 05:38:14.349539  1922 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1207 05:38:14.349546  1922 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1207 05:38:14.349552  1922 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1207 05:38:14.349563  1922 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1207 05:38:14.349817  1922 net.cpp:150] Setting up L2_b3_brc3_bn\nI1207 05:38:14.349831  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.349836  1922 net.cpp:165] Memory required for data: 1493955580\nI1207 05:38:14.349846  1922 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1207 05:38:14.349854  1922 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1207 05:38:14.349860  1922 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1207 05:38:14.349867  1922 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1207 05:38:14.349877  1922 net.cpp:150] Setting up L2_b3_brc3_relu\nI1207 05:38:14.349884  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.349889  1922 net.cpp:165] Memory required for data: 1499526140\nI1207 05:38:14.349894  1922 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1207 05:38:14.349907  1922 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1207 05:38:14.349913  1922 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1207 05:38:14.349925  1922 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1207 05:38:14.350360  1922 net.cpp:150] Setting up L2_b3_brc3_conv\nI1207 05:38:14.350374  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.350379  1922 net.cpp:165] Memory required for data: 1510667260\nI1207 05:38:14.350388  1922 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1207 05:38:14.350401  1922 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1207 05:38:14.350407  1922 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1207 05:38:14.350415  1922 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1207 05:38:14.350422  1922 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1207 05:38:14.350452  1922 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1207 05:38:14.350461  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.350466  1922 net.cpp:165] Memory required for data: 1521808380\nI1207 05:38:14.350471  1922 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:38:14.350478  1922 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:38:14.350484  1922 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1207 05:38:14.350494  1922 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1207 05:38:14.350504  1922 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1207 05:38:14.350549  1922 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:38:14.350561  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.350567  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.350572  1922 net.cpp:165] Memory required for data: 1544090620\nI1207 05:38:14.350577  1922 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1207 05:38:14.350589  1922 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1207 05:38:14.350594  1922 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1207 05:38:14.350605  1922 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1207 05:38:14.350841  1922 net.cpp:150] Setting up L2_b4_brc1_bn\nI1207 05:38:14.350857  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.350862  1922 net.cpp:165] Memory required for data: 1555231740\nI1207 05:38:14.350872  1922 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1207 05:38:14.350880  1922 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1207 05:38:14.350886  1922 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1207 05:38:14.350893  1922 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1207 05:38:14.350903  1922 net.cpp:150] Setting up L2_b4_brc1_relu\nI1207 05:38:14.350909  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.350914  1922 net.cpp:165] Memory required for data: 1566372860\nI1207 05:38:14.350919  1922 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1207 05:38:14.350936  1922 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1207 05:38:14.350942  1922 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1207 05:38:14.350953  1922 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1207 05:38:14.351397  1922 net.cpp:150] Setting up L2_b4_brc1_conv\nI1207 05:38:14.351411  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.351416  1922 net.cpp:165] Memory required for data: 1571943420\nI1207 05:38:14.351425  1922 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1207 05:38:14.351436  1922 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1207 05:38:14.351444  1922 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1207 05:38:14.351451  1922 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1207 05:38:14.351698  1922 net.cpp:150] Setting up L2_b4_brc2_bn\nI1207 05:38:14.351711  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.351716  1922 net.cpp:165] Memory required for data: 1577513980\nI1207 05:38:14.351727  1922 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1207 05:38:14.351737  1922 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1207 05:38:14.351744  1922 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1207 05:38:14.351752  1922 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1207 05:38:14.351761  1922 net.cpp:150] Setting up L2_b4_brc2_relu\nI1207 05:38:14.351768  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.351773  1922 net.cpp:165] Memory required for data: 1583084540\nI1207 05:38:14.351778  1922 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1207 05:38:14.351788  1922 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1207 05:38:14.351794  1922 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1207 05:38:14.351804  1922 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1207 05:38:14.352111  1922 net.cpp:150] Setting up L2_b4_brc2_conv\nI1207 05:38:14.352125  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.352130  1922 net.cpp:165] Memory required for data: 1588655100\nI1207 05:38:14.352138  1922 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1207 05:38:14.352147  1922 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1207 05:38:14.352154  1922 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1207 05:38:14.352164  1922 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1207 05:38:14.352411  1922 net.cpp:150] Setting up L2_b4_brc3_bn\nI1207 05:38:14.352423  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.352428  1922 net.cpp:165] Memory required for data: 1594225660\nI1207 05:38:14.352438  1922 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1207 05:38:14.352452  1922 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1207 05:38:14.352458  1922 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1207 05:38:14.352464  1922 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1207 05:38:14.352474  1922 net.cpp:150] Setting up L2_b4_brc3_relu\nI1207 05:38:14.352481  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.352485  1922 net.cpp:165] Memory required for data: 1599796220\nI1207 05:38:14.352490  1922 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1207 05:38:14.352500  1922 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1207 05:38:14.352506  1922 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1207 05:38:14.352517  1922 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1207 05:38:14.352986  1922 net.cpp:150] Setting up L2_b4_brc3_conv\nI1207 05:38:14.353001  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.353006  1922 net.cpp:165] Memory required for data: 1610937340\nI1207 05:38:14.353014  1922 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1207 05:38:14.353024  1922 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1207 05:38:14.353030  1922 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1207 05:38:14.353037  1922 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1207 05:38:14.353045  1922 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1207 05:38:14.353077  1922 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1207 05:38:14.353093  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.353098  1922 net.cpp:165] Memory required for data: 1622078460\nI1207 05:38:14.353103  1922 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:38:14.353111  1922 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:38:14.353117  1922 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1207 05:38:14.353127  1922 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1207 05:38:14.353137  1922 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1207 05:38:14.353184  1922 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:38:14.353199  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.353205  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.353209  1922 net.cpp:165] Memory required for data: 1644360700\nI1207 05:38:14.353214  1922 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1207 05:38:14.353224  1922 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1207 05:38:14.353229  1922 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1207 05:38:14.353240  1922 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1207 05:38:14.353499  1922 net.cpp:150] Setting up L2_b5_brc1_bn\nI1207 05:38:14.353513  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.353518  1922 net.cpp:165] Memory required for data: 1655501820\nI1207 05:38:14.353528  1922 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1207 05:38:14.353538  1922 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1207 05:38:14.353543  1922 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1207 05:38:14.353554  1922 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1207 05:38:14.353564  1922 net.cpp:150] Setting up L2_b5_brc1_relu\nI1207 05:38:14.353570  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.353575  1922 net.cpp:165] Memory required for data: 1666642940\nI1207 05:38:14.353579  1922 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1207 05:38:14.353590  1922 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1207 05:38:14.353595  1922 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1207 05:38:14.353605  1922 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1207 05:38:14.354058  1922 net.cpp:150] Setting up L2_b5_brc1_conv\nI1207 05:38:14.354073  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.354077  1922 net.cpp:165] Memory required for data: 1672213500\nI1207 05:38:14.354085  1922 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1207 05:38:14.354102  1922 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1207 05:38:14.354109  1922 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1207 05:38:14.354116  1922 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1207 05:38:14.354362  1922 net.cpp:150] Setting up L2_b5_brc2_bn\nI1207 05:38:14.354374  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.354379  1922 net.cpp:165] Memory required for data: 1677784060\nI1207 05:38:14.354389  1922 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1207 05:38:14.354398  1922 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1207 05:38:14.354403  1922 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1207 05:38:14.354410  1922 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1207 05:38:14.354419  1922 net.cpp:150] Setting up L2_b5_brc2_relu\nI1207 05:38:14.354426  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.354431  1922 net.cpp:165] Memory required for data: 1683354620\nI1207 05:38:14.354435  1922 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1207 05:38:14.354449  1922 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1207 05:38:14.354454  1922 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1207 05:38:14.354465  1922 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1207 05:38:14.354792  1922 net.cpp:150] Setting up L2_b5_brc2_conv\nI1207 05:38:14.354806  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.354811  1922 net.cpp:165] Memory required for data: 1688925180\nI1207 05:38:14.354820  1922 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1207 05:38:14.354833  1922 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1207 05:38:14.354840  1922 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1207 05:38:14.354848  1922 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1207 05:38:14.355088  1922 net.cpp:150] Setting up L2_b5_brc3_bn\nI1207 05:38:14.355100  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.355105  1922 net.cpp:165] Memory required for data: 1694495740\nI1207 05:38:14.355115  1922 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1207 05:38:14.355124  1922 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1207 05:38:14.355130  1922 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1207 05:38:14.355137  1922 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1207 05:38:14.355147  1922 net.cpp:150] Setting up L2_b5_brc3_relu\nI1207 05:38:14.355154  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.355159  1922 net.cpp:165] Memory required for data: 1700066300\nI1207 05:38:14.355163  1922 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1207 05:38:14.355176  1922 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1207 05:38:14.355183  1922 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1207 05:38:14.355195  1922 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1207 05:38:14.355636  1922 net.cpp:150] Setting up L2_b5_brc3_conv\nI1207 05:38:14.355650  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.355655  1922 net.cpp:165] Memory required for data: 1711207420\nI1207 05:38:14.355664  1922 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1207 05:38:14.355676  1922 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1207 05:38:14.355682  1922 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1207 05:38:14.355696  1922 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1207 05:38:14.355705  1922 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1207 05:38:14.355734  1922 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1207 05:38:14.355744  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.355749  1922 net.cpp:165] Memory required for data: 1722348540\nI1207 05:38:14.355754  1922 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:38:14.355762  1922 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:38:14.355767  1922 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1207 05:38:14.355778  1922 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1207 05:38:14.355788  1922 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1207 05:38:14.355834  1922 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:38:14.355849  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.355855  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.355860  1922 net.cpp:165] Memory required for data: 1744630780\nI1207 05:38:14.355865  1922 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1207 05:38:14.355873  1922 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1207 05:38:14.355880  1922 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1207 05:38:14.355890  1922 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1207 05:38:14.356114  1922 net.cpp:150] Setting up L2_b6_brc1_bn\nI1207 05:38:14.356130  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.356135  1922 net.cpp:165] Memory required for data: 1755771900\nI1207 05:38:14.356145  1922 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1207 05:38:14.356173  1922 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1207 05:38:14.356179  1922 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1207 05:38:14.356186  1922 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1207 05:38:14.356197  1922 net.cpp:150] Setting up L2_b6_brc1_relu\nI1207 05:38:14.356204  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.356209  1922 net.cpp:165] Memory required for data: 1766913020\nI1207 05:38:14.356215  1922 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1207 05:38:14.356227  1922 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1207 05:38:14.356233  1922 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1207 05:38:14.356246  1922 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1207 05:38:14.356695  1922 net.cpp:150] Setting up L2_b6_brc1_conv\nI1207 05:38:14.356709  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.356714  1922 net.cpp:165] Memory required for data: 1772483580\nI1207 05:38:14.356724  1922 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1207 05:38:14.356732  1922 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1207 05:38:14.356739  1922 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1207 05:38:14.356750  1922 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1207 05:38:14.356994  1922 net.cpp:150] Setting up L2_b6_brc2_bn\nI1207 05:38:14.357007  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.357012  1922 net.cpp:165] Memory required for data: 1778054140\nI1207 05:38:14.357023  1922 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1207 05:38:14.357034  1922 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1207 05:38:14.357041  1922 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1207 05:38:14.357048  1922 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1207 05:38:14.357059  1922 net.cpp:150] Setting up L2_b6_brc2_relu\nI1207 05:38:14.357065  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.357069  1922 net.cpp:165] Memory required for data: 1783624700\nI1207 05:38:14.357074  1922 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1207 05:38:14.357085  1922 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1207 05:38:14.357091  1922 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1207 05:38:14.357107  1922 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1207 05:38:14.357415  1922 net.cpp:150] Setting up L2_b6_brc2_conv\nI1207 05:38:14.357429  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.357434  1922 net.cpp:165] Memory required for data: 1789195260\nI1207 05:38:14.357442  1922 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1207 05:38:14.357450  1922 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1207 05:38:14.357457  1922 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1207 05:38:14.357465  1922 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1207 05:38:14.357713  1922 net.cpp:150] Setting up L2_b6_brc3_bn\nI1207 05:38:14.357726  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.357731  1922 net.cpp:165] Memory required for data: 1794765820\nI1207 05:38:14.357743  1922 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1207 05:38:14.357750  1922 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1207 05:38:14.357756  1922 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1207 05:38:14.357766  1922 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1207 05:38:14.357777  1922 net.cpp:150] Setting up L2_b6_brc3_relu\nI1207 05:38:14.357784  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.357789  1922 net.cpp:165] Memory required for data: 1800336380\nI1207 05:38:14.357794  1922 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1207 05:38:14.357807  1922 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1207 05:38:14.357813  1922 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1207 05:38:14.357821  1922 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1207 05:38:14.358253  1922 net.cpp:150] Setting up L2_b6_brc3_conv\nI1207 05:38:14.358266  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.358278  1922 net.cpp:165] Memory required for data: 1811477500\nI1207 05:38:14.358286  1922 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1207 05:38:14.358295  1922 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1207 05:38:14.358301  1922 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1207 05:38:14.358309  1922 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1207 05:38:14.358319  1922 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1207 05:38:14.358346  1922 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1207 05:38:14.358357  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.358362  1922 net.cpp:165] Memory required for data: 1822618620\nI1207 05:38:14.358367  1922 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:38:14.358381  1922 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:38:14.358387  1922 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1207 05:38:14.358394  1922 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1207 05:38:14.358404  1922 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1207 05:38:14.358453  1922 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:38:14.358464  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.358471  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.358475  1922 net.cpp:165] Memory required for data: 1844900860\nI1207 05:38:14.358480  1922 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1207 05:38:14.358492  1922 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1207 05:38:14.358500  1922 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1207 05:38:14.358507  1922 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1207 05:38:14.358747  1922 net.cpp:150] Setting up L3_b1_brc1_bn\nI1207 05:38:14.358759  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.358764  1922 net.cpp:165] Memory required for data: 1856041980\nI1207 05:38:14.358775  1922 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1207 05:38:14.358783  1922 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1207 05:38:14.358789  1922 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1207 05:38:14.358796  1922 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1207 05:38:14.358806  1922 net.cpp:150] Setting up L3_b1_brc1_relu\nI1207 05:38:14.358814  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.358817  1922 net.cpp:165] Memory required for data: 1867183100\nI1207 05:38:14.358822  1922 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1207 05:38:14.358835  1922 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1207 05:38:14.358841  1922 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1207 05:38:14.358852  1922 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1207 05:38:14.359453  1922 net.cpp:150] Setting up L3_b1_brc1_conv\nI1207 05:38:14.359468  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.359473  1922 net.cpp:165] Memory required for data: 1869968380\nI1207 05:38:14.359482  1922 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1207 05:38:14.359493  1922 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1207 05:38:14.359499  1922 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1207 05:38:14.359508  1922 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1207 05:38:14.359763  1922 net.cpp:150] Setting up L3_b1_brc2_bn\nI1207 05:38:14.359777  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.359782  1922 net.cpp:165] Memory required for data: 1872753660\nI1207 05:38:14.359792  1922 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1207 05:38:14.359802  1922 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1207 05:38:14.359807  1922 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1207 05:38:14.359814  1922 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1207 05:38:14.359832  1922 net.cpp:150] Setting up L3_b1_brc2_relu\nI1207 05:38:14.359839  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.359843  1922 net.cpp:165] Memory required for data: 1875538940\nI1207 05:38:14.359848  1922 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1207 05:38:14.359863  1922 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1207 05:38:14.359869  1922 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1207 05:38:14.359876  1922 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1207 05:38:14.360249  1922 net.cpp:150] Setting up L3_b1_brc2_conv\nI1207 05:38:14.360262  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.360267  1922 net.cpp:165] Memory required for data: 1878324220\nI1207 05:38:14.360275  1922 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1207 05:38:14.360285  1922 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1207 05:38:14.360291  1922 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1207 05:38:14.360303  1922 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1207 05:38:14.360548  1922 net.cpp:150] Setting up L3_b1_brc3_bn\nI1207 05:38:14.360561  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.360566  1922 net.cpp:165] Memory required for data: 1881109500\nI1207 05:38:14.360576  1922 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1207 05:38:14.360587  1922 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1207 05:38:14.360594  1922 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1207 05:38:14.360600  1922 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1207 05:38:14.360610  1922 net.cpp:150] Setting up L3_b1_brc3_relu\nI1207 05:38:14.360617  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.360622  1922 net.cpp:165] Memory required for data: 1883894780\nI1207 05:38:14.360626  1922 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1207 05:38:14.360636  1922 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1207 05:38:14.360642  1922 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1207 05:38:14.360653  1922 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1207 05:38:14.362623  1922 net.cpp:150] Setting up L3_b1_brc3_conv\nI1207 05:38:14.362642  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.362646  1922 net.cpp:165] Memory required for data: 1889465340\nI1207 05:38:14.362656  1922 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1207 05:38:14.362673  1922 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1207 05:38:14.362680  1922 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1207 05:38:14.362697  1922 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1207 05:38:14.363627  1922 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1207 05:38:14.363641  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.363647  1922 net.cpp:165] Memory required for data: 1895035900\nI1207 05:38:14.363656  1922 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1207 05:38:14.363668  1922 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1207 05:38:14.363674  1922 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1207 05:38:14.363682  1922 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1207 05:38:14.363699  1922 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1207 05:38:14.363734  1922 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1207 05:38:14.363745  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.363750  1922 net.cpp:165] Memory required for data: 1900606460\nI1207 05:38:14.363755  1922 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:38:14.363766  1922 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:38:14.363773  1922 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1207 05:38:14.363781  1922 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1207 05:38:14.363790  1922 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1207 05:38:14.363855  1922 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:38:14.363868  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.363875  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.363879  1922 net.cpp:165] Memory required for data: 1911747580\nI1207 05:38:14.363885  1922 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1207 05:38:14.363896  1922 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1207 05:38:14.363903  1922 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1207 05:38:14.363911  1922 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1207 05:38:14.364148  1922 net.cpp:150] Setting up L3_b2_brc1_bn\nI1207 05:38:14.364161  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.364166  1922 net.cpp:165] Memory required for data: 1917318140\nI1207 05:38:14.364177  1922 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1207 05:38:14.364186  1922 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1207 05:38:14.364192  1922 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1207 05:38:14.364202  1922 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1207 05:38:14.364212  1922 net.cpp:150] Setting up L3_b2_brc1_relu\nI1207 05:38:14.364219  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.364223  1922 net.cpp:165] Memory required for data: 1922888700\nI1207 05:38:14.364228  1922 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1207 05:38:14.364241  1922 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1207 05:38:14.364248  1922 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1207 05:38:14.364256  1922 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1207 05:38:14.365208  1922 net.cpp:150] Setting up L3_b2_brc1_conv\nI1207 05:38:14.365227  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.365232  1922 net.cpp:165] Memory required for data: 1925673980\nI1207 05:38:14.365242  1922 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1207 05:38:14.365250  1922 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1207 05:38:14.365257  1922 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1207 05:38:14.365268  1922 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1207 05:38:14.365512  1922 net.cpp:150] Setting up L3_b2_brc2_bn\nI1207 05:38:14.365525  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.365530  1922 net.cpp:165] Memory required for data: 1928459260\nI1207 05:38:14.365540  1922 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1207 05:38:14.365550  1922 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1207 05:38:14.365556  1922 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1207 05:38:14.365562  1922 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1207 05:38:14.365572  1922 net.cpp:150] Setting up L3_b2_brc2_relu\nI1207 05:38:14.365579  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.365583  1922 net.cpp:165] Memory required for data: 1931244540\nI1207 05:38:14.365588  1922 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1207 05:38:14.365602  1922 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1207 05:38:14.365608  1922 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1207 05:38:14.365619  1922 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1207 05:38:14.366014  1922 net.cpp:150] Setting up L3_b2_brc2_conv\nI1207 05:38:14.366029  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.366034  1922 net.cpp:165] Memory required for data: 1934029820\nI1207 05:38:14.366041  1922 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1207 05:38:14.366053  1922 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1207 05:38:14.366060  1922 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1207 05:38:14.366068  1922 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1207 05:38:14.366313  1922 net.cpp:150] Setting up L3_b2_brc3_bn\nI1207 05:38:14.366327  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.366331  1922 net.cpp:165] Memory required for data: 1936815100\nI1207 05:38:14.366348  1922 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1207 05:38:14.366363  1922 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1207 05:38:14.366369  1922 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1207 05:38:14.366376  1922 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1207 05:38:14.366386  1922 net.cpp:150] Setting up L3_b2_brc3_relu\nI1207 05:38:14.366394  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.366399  1922 net.cpp:165] Memory required for data: 1939600380\nI1207 05:38:14.366402  1922 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1207 05:38:14.366413  1922 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1207 05:38:14.366420  1922 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1207 05:38:14.366430  1922 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1207 05:38:14.367367  1922 net.cpp:150] Setting up L3_b2_brc3_conv\nI1207 05:38:14.367382  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.367386  1922 net.cpp:165] Memory required for data: 1945170940\nI1207 05:38:14.367395  1922 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1207 05:38:14.367404  1922 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1207 05:38:14.367410  1922 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1207 05:38:14.367417  1922 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1207 05:38:14.367425  1922 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1207 05:38:14.367461  1922 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1207 05:38:14.367473  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.367478  1922 net.cpp:165] Memory required for data: 1950741500\nI1207 05:38:14.367483  1922 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:38:14.367494  1922 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:38:14.367501  1922 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1207 05:38:14.367507  1922 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1207 05:38:14.367517  1922 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1207 05:38:14.367566  1922 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:38:14.367578  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.367585  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.367589  1922 net.cpp:165] Memory required for data: 1961882620\nI1207 05:38:14.367594  1922 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1207 05:38:14.367602  1922 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1207 05:38:14.367609  1922 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1207 05:38:14.367619  1922 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1207 05:38:14.367866  1922 net.cpp:150] Setting up L3_b3_brc1_bn\nI1207 05:38:14.367879  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.367884  1922 net.cpp:165] Memory required for data: 1967453180\nI1207 05:38:14.367894  1922 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1207 05:38:14.367907  1922 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1207 05:38:14.367913  1922 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1207 05:38:14.367919  1922 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1207 05:38:14.367929  1922 net.cpp:150] Setting up L3_b3_brc1_relu\nI1207 05:38:14.367938  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.367941  1922 net.cpp:165] Memory required for data: 1973023740\nI1207 05:38:14.367946  1922 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1207 05:38:14.367956  1922 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1207 05:38:14.367962  1922 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1207 05:38:14.367974  1922 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1207 05:38:14.368919  1922 net.cpp:150] Setting up L3_b3_brc1_conv\nI1207 05:38:14.368934  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.368939  1922 net.cpp:165] Memory required for data: 1975809020\nI1207 05:38:14.368947  1922 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1207 05:38:14.368957  1922 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1207 05:38:14.368963  1922 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1207 05:38:14.368973  1922 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1207 05:38:14.369246  1922 net.cpp:150] Setting up L3_b3_brc2_bn\nI1207 05:38:14.369267  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.369274  1922 net.cpp:165] Memory required for data: 1978594300\nI1207 05:38:14.369297  1922 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1207 05:38:14.369309  1922 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1207 05:38:14.369319  1922 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1207 05:38:14.369338  1922 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1207 05:38:14.369355  1922 net.cpp:150] Setting up L3_b3_brc2_relu\nI1207 05:38:14.369367  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.369375  1922 net.cpp:165] Memory required for data: 1981379580\nI1207 05:38:14.369385  1922 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1207 05:38:14.369397  1922 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1207 05:38:14.369405  1922 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1207 05:38:14.369412  1922 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1207 05:38:14.369812  1922 net.cpp:150] Setting up L3_b3_brc2_conv\nI1207 05:38:14.369827  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.369832  1922 net.cpp:165] Memory required for data: 1984164860\nI1207 05:38:14.369841  1922 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1207 05:38:14.369853  1922 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1207 05:38:14.369859  1922 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1207 05:38:14.369868  1922 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1207 05:38:14.370115  1922 net.cpp:150] Setting up L3_b3_brc3_bn\nI1207 05:38:14.370128  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.370133  1922 net.cpp:165] Memory required for data: 1986950140\nI1207 05:38:14.370143  1922 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1207 05:38:14.370151  1922 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1207 05:38:14.370157  1922 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1207 05:38:14.370164  1922 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1207 05:38:14.370174  1922 net.cpp:150] Setting up L3_b3_brc3_relu\nI1207 05:38:14.370182  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.370187  1922 net.cpp:165] Memory required for data: 1989735420\nI1207 05:38:14.370190  1922 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1207 05:38:14.370204  1922 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1207 05:38:14.370210  1922 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1207 05:38:14.370226  1922 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1207 05:38:14.371183  1922 net.cpp:150] Setting up L3_b3_brc3_conv\nI1207 05:38:14.371198  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.371204  1922 net.cpp:165] Memory required for data: 1995305980\nI1207 05:38:14.371212  1922 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1207 05:38:14.371224  1922 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1207 05:38:14.371232  1922 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1207 05:38:14.371238  1922 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1207 05:38:14.371251  1922 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1207 05:38:14.371285  1922 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1207 05:38:14.371295  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.371300  1922 net.cpp:165] Memory required for data: 2000876540\nI1207 05:38:14.371305  1922 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:38:14.371322  1922 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:38:14.371330  1922 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1207 05:38:14.371337  1922 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1207 05:38:14.371347  1922 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1207 05:38:14.371398  1922 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:38:14.371410  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.371417  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.371423  1922 net.cpp:165] Memory required for data: 2012017660\nI1207 05:38:14.371428  1922 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1207 05:38:14.371438  1922 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1207 05:38:14.371444  1922 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1207 05:38:14.371453  1922 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1207 05:38:14.371698  1922 net.cpp:150] Setting up L3_b4_brc1_bn\nI1207 05:38:14.371711  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.371716  1922 net.cpp:165] Memory required for data: 2017588220\nI1207 05:38:14.371727  1922 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1207 05:38:14.371736  1922 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1207 05:38:14.371742  1922 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1207 05:38:14.371749  1922 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1207 05:38:14.371759  1922 net.cpp:150] Setting up L3_b4_brc1_relu\nI1207 05:38:14.371767  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.371770  1922 net.cpp:165] Memory required for data: 2023158780\nI1207 05:38:14.371775  1922 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1207 05:38:14.371788  1922 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1207 05:38:14.371795  1922 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1207 05:38:14.371806  1922 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1207 05:38:14.372741  1922 net.cpp:150] Setting up L3_b4_brc1_conv\nI1207 05:38:14.372756  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.372761  1922 net.cpp:165] Memory required for data: 2025944060\nI1207 05:38:14.372769  1922 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1207 05:38:14.372781  1922 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1207 05:38:14.372787  1922 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1207 05:38:14.372798  1922 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1207 05:38:14.373049  1922 net.cpp:150] Setting up L3_b4_brc2_bn\nI1207 05:38:14.373061  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.373066  1922 net.cpp:165] Memory required for data: 2028729340\nI1207 05:38:14.373076  1922 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1207 05:38:14.373085  1922 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1207 05:38:14.373090  1922 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1207 05:38:14.373097  1922 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1207 05:38:14.373107  1922 net.cpp:150] Setting up L3_b4_brc2_relu\nI1207 05:38:14.373114  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.373119  1922 net.cpp:165] Memory required for data: 2031514620\nI1207 05:38:14.373123  1922 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1207 05:38:14.373136  1922 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1207 05:38:14.373143  1922 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1207 05:38:14.373154  1922 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1207 05:38:14.373535  1922 net.cpp:150] Setting up L3_b4_brc2_conv\nI1207 05:38:14.373549  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.373554  1922 net.cpp:165] Memory required for data: 2034299900\nI1207 05:38:14.373569  1922 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1207 05:38:14.373582  1922 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1207 05:38:14.373589  1922 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1207 05:38:14.373596  1922 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1207 05:38:14.373872  1922 net.cpp:150] Setting up L3_b4_brc3_bn\nI1207 05:38:14.373889  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.373894  1922 net.cpp:165] Memory required for data: 2037085180\nI1207 05:38:14.373904  1922 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1207 05:38:14.373913  1922 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1207 05:38:14.373919  1922 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1207 05:38:14.373926  1922 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1207 05:38:14.373936  1922 net.cpp:150] Setting up L3_b4_brc3_relu\nI1207 05:38:14.373944  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.373947  1922 net.cpp:165] Memory required for data: 2039870460\nI1207 05:38:14.373952  1922 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1207 05:38:14.373962  1922 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1207 05:38:14.373968  1922 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1207 05:38:14.373983  1922 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1207 05:38:14.374922  1922 net.cpp:150] Setting up L3_b4_brc3_conv\nI1207 05:38:14.374936  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.374941  1922 net.cpp:165] Memory required for data: 2045441020\nI1207 05:38:14.374950  1922 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1207 05:38:14.374959  1922 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1207 05:38:14.374966  1922 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1207 05:38:14.374974  1922 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1207 05:38:14.374981  1922 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1207 05:38:14.375017  1922 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1207 05:38:14.375030  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.375033  1922 net.cpp:165] Memory required for data: 2051011580\nI1207 05:38:14.375039  1922 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:38:14.375049  1922 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:38:14.375056  1922 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1207 05:38:14.375063  1922 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1207 05:38:14.375072  1922 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1207 05:38:14.375128  1922 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:38:14.375139  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.375145  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.375150  1922 net.cpp:165] Memory required for data: 2062152700\nI1207 05:38:14.375155  1922 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1207 05:38:14.375166  1922 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1207 05:38:14.375172  1922 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1207 05:38:14.375180  1922 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1207 05:38:14.375422  1922 net.cpp:150] Setting up L3_b5_brc1_bn\nI1207 05:38:14.375434  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.375439  1922 net.cpp:165] Memory required for data: 2067723260\nI1207 05:38:14.375449  1922 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1207 05:38:14.375460  1922 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1207 05:38:14.375468  1922 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1207 05:38:14.375474  1922 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1207 05:38:14.375484  1922 net.cpp:150] Setting up L3_b5_brc1_relu\nI1207 05:38:14.375499  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.375504  1922 net.cpp:165] Memory required for data: 2073293820\nI1207 05:38:14.375509  1922 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1207 05:38:14.375519  1922 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1207 05:38:14.375524  1922 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1207 05:38:14.375536  1922 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1207 05:38:14.377507  1922 net.cpp:150] Setting up L3_b5_brc1_conv\nI1207 05:38:14.377526  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.377530  1922 net.cpp:165] Memory required for data: 2076079100\nI1207 05:38:14.377539  1922 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1207 05:38:14.377552  1922 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1207 05:38:14.377558  1922 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1207 05:38:14.377568  1922 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1207 05:38:14.377831  1922 net.cpp:150] Setting up L3_b5_brc2_bn\nI1207 05:38:14.377846  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.377851  1922 net.cpp:165] Memory required for data: 2078864380\nI1207 05:38:14.377861  1922 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1207 05:38:14.377871  1922 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1207 05:38:14.377876  1922 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1207 05:38:14.377883  1922 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1207 05:38:14.377894  1922 net.cpp:150] Setting up L3_b5_brc2_relu\nI1207 05:38:14.377902  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.377907  1922 net.cpp:165] Memory required for data: 2081649660\nI1207 05:38:14.377910  1922 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1207 05:38:14.377924  1922 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1207 05:38:14.377931  1922 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1207 05:38:14.377940  1922 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1207 05:38:14.378324  1922 net.cpp:150] Setting up L3_b5_brc2_conv\nI1207 05:38:14.378337  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.378342  1922 net.cpp:165] Memory required for data: 2084434940\nI1207 05:38:14.378386  1922 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1207 05:38:14.378402  1922 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1207 05:38:14.378409  1922 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1207 05:38:14.378417  1922 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1207 05:38:14.378670  1922 net.cpp:150] Setting up L3_b5_brc3_bn\nI1207 05:38:14.378684  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.378695  1922 net.cpp:165] Memory required for data: 2087220220\nI1207 05:38:14.378705  1922 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1207 05:38:14.378713  1922 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1207 05:38:14.378720  1922 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1207 05:38:14.378727  1922 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1207 05:38:14.378737  1922 net.cpp:150] Setting up L3_b5_brc3_relu\nI1207 05:38:14.378744  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.378748  1922 net.cpp:165] Memory required for data: 2090005500\nI1207 05:38:14.378753  1922 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1207 05:38:14.378767  1922 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1207 05:38:14.378773  1922 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1207 05:38:14.378782  1922 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1207 05:38:14.379722  1922 net.cpp:150] Setting up L3_b5_brc3_conv\nI1207 05:38:14.379737  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.379742  1922 net.cpp:165] Memory required for data: 2095576060\nI1207 05:38:14.379750  1922 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1207 05:38:14.379763  1922 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1207 05:38:14.379770  1922 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1207 05:38:14.379784  1922 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1207 05:38:14.379793  1922 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1207 05:38:14.379830  1922 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1207 05:38:14.379840  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.379845  1922 net.cpp:165] Memory required for data: 2101146620\nI1207 05:38:14.379850  1922 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:38:14.379858  1922 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:38:14.379864  1922 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1207 05:38:14.379879  1922 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1207 05:38:14.379889  1922 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1207 05:38:14.379941  1922 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:38:14.379953  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.379959  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.379964  1922 net.cpp:165] Memory required for data: 2112287740\nI1207 05:38:14.379969  1922 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1207 05:38:14.379981  1922 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1207 05:38:14.379987  1922 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1207 05:38:14.379995  1922 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1207 05:38:14.380234  1922 net.cpp:150] Setting up L3_b6_brc1_bn\nI1207 05:38:14.380246  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.380251  1922 net.cpp:165] Memory required for data: 2117858300\nI1207 05:38:14.380261  1922 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1207 05:38:14.380270  1922 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1207 05:38:14.380275  1922 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1207 05:38:14.380282  1922 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1207 05:38:14.380292  1922 net.cpp:150] Setting up L3_b6_brc1_relu\nI1207 05:38:14.380300  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.380304  1922 net.cpp:165] Memory required for data: 2123428860\nI1207 05:38:14.380308  1922 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1207 05:38:14.380322  1922 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1207 05:38:14.380328  1922 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1207 05:38:14.380340  1922 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1207 05:38:14.381278  1922 net.cpp:150] Setting up L3_b6_brc1_conv\nI1207 05:38:14.381292  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.381297  1922 net.cpp:165] Memory required for data: 2126214140\nI1207 05:38:14.381306  1922 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1207 05:38:14.381319  1922 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1207 05:38:14.381325  1922 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1207 05:38:14.381336  1922 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1207 05:38:14.381580  1922 net.cpp:150] Setting up L3_b6_brc2_bn\nI1207 05:38:14.381592  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.381597  1922 net.cpp:165] Memory required for data: 2128999420\nI1207 05:38:14.381608  1922 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1207 05:38:14.381616  1922 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1207 05:38:14.381623  1922 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1207 05:38:14.381629  1922 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1207 05:38:14.381639  1922 net.cpp:150] Setting up L3_b6_brc2_relu\nI1207 05:38:14.381646  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.381650  1922 net.cpp:165] Memory required for data: 2131784700\nI1207 05:38:14.381655  1922 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1207 05:38:14.381675  1922 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1207 05:38:14.381682  1922 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1207 05:38:14.381697  1922 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1207 05:38:14.382093  1922 net.cpp:150] Setting up L3_b6_brc2_conv\nI1207 05:38:14.382107  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.382112  1922 net.cpp:165] Memory required for data: 2134569980\nI1207 05:38:14.382122  1922 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1207 05:38:14.382133  1922 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1207 05:38:14.382139  1922 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1207 05:38:14.382148  1922 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1207 05:38:14.382395  1922 net.cpp:150] Setting up L3_b6_brc3_bn\nI1207 05:38:14.382407  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.382412  1922 net.cpp:165] Memory required for data: 2137355260\nI1207 05:38:14.382422  1922 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1207 05:38:14.382433  1922 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1207 05:38:14.382439  1922 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1207 05:38:14.382447  1922 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1207 05:38:14.382457  1922 net.cpp:150] Setting up L3_b6_brc3_relu\nI1207 05:38:14.382464  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.382468  1922 net.cpp:165] Memory required for data: 2140140540\nI1207 05:38:14.382473  1922 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1207 05:38:14.382484  1922 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1207 05:38:14.382489  1922 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1207 05:38:14.382503  1922 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1207 05:38:14.383445  1922 net.cpp:150] Setting up L3_b6_brc3_conv\nI1207 05:38:14.383458  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.383464  1922 net.cpp:165] Memory required for data: 2145711100\nI1207 05:38:14.383472  1922 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1207 05:38:14.383481  1922 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1207 05:38:14.383488  1922 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1207 05:38:14.383496  1922 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1207 05:38:14.383503  1922 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1207 05:38:14.383539  1922 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1207 05:38:14.383551  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.383556  1922 net.cpp:165] Memory required for data: 2151281660\nI1207 05:38:14.383561  1922 layer_factory.hpp:77] Creating layer post_bn\nI1207 05:38:14.383572  1922 net.cpp:100] Creating Layer post_bn\nI1207 05:38:14.383579  1922 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1207 05:38:14.383587  1922 net.cpp:408] post_bn -> post_bn_top\nI1207 05:38:14.383838  1922 net.cpp:150] Setting up post_bn\nI1207 05:38:14.383852  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.383857  1922 net.cpp:165] Memory required for data: 2156852220\nI1207 05:38:14.383867  1922 layer_factory.hpp:77] Creating layer post_relu\nI1207 05:38:14.383875  1922 net.cpp:100] Creating Layer post_relu\nI1207 05:38:14.383882  1922 net.cpp:434] post_relu <- post_bn_top\nI1207 05:38:14.383891  1922 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1207 05:38:14.383903  1922 net.cpp:150] Setting up post_relu\nI1207 05:38:14.383910  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.383914  1922 net.cpp:165] Memory required for data: 2162422780\nI1207 05:38:14.383919  1922 layer_factory.hpp:77] Creating layer post_pool\nI1207 05:38:14.383929  1922 net.cpp:100] Creating Layer post_pool\nI1207 05:38:14.383934  1922 net.cpp:434] post_pool <- post_bn_top\nI1207 05:38:14.383945  1922 net.cpp:408] post_pool -> post_pool\nI1207 05:38:14.384042  1922 net.cpp:150] Setting up post_pool\nI1207 05:38:14.384057  1922 net.cpp:157] Top shape: 85 256 1 1 (21760)\nI1207 05:38:14.384070  1922 net.cpp:165] Memory required for data: 2162509820\nI1207 05:38:14.384076  1922 layer_factory.hpp:77] Creating layer post_FC\nI1207 05:38:14.384148  1922 net.cpp:100] Creating Layer post_FC\nI1207 05:38:14.384161  1922 net.cpp:434] post_FC <- post_pool\nI1207 05:38:14.384173  1922 net.cpp:408] post_FC -> post_FC_top\nI1207 05:38:14.384433  1922 net.cpp:150] Setting up post_FC\nI1207 05:38:14.384447  1922 net.cpp:157] Top shape: 85 10 (850)\nI1207 05:38:14.384454  1922 net.cpp:165] Memory required for data: 2162513220\nI1207 05:38:14.384462  1922 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1207 05:38:14.384474  1922 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1207 05:38:14.384480  1922 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1207 05:38:14.384488  1922 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1207 05:38:14.384498  1922 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1207 05:38:14.384551  1922 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1207 05:38:14.384562  1922 net.cpp:157] Top shape: 85 10 (850)\nI1207 05:38:14.384569  1922 net.cpp:157] Top shape: 85 10 (850)\nI1207 05:38:14.384573  1922 net.cpp:165] Memory required for data: 2162520020\nI1207 05:38:14.384578  1922 layer_factory.hpp:77] Creating layer accuracy\nI1207 05:38:14.384623  1922 net.cpp:100] Creating Layer accuracy\nI1207 05:38:14.384634  1922 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1207 05:38:14.384642  1922 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1207 05:38:14.384654  1922 net.cpp:408] accuracy -> accuracy\nI1207 05:38:14.384704  1922 net.cpp:150] Setting up accuracy\nI1207 05:38:14.384717  1922 net.cpp:157] Top shape: (1)\nI1207 05:38:14.384722  1922 net.cpp:165] Memory required for data: 2162520024\nI1207 05:38:14.384728  1922 layer_factory.hpp:77] Creating layer loss\nI1207 05:38:14.384737  1922 net.cpp:100] Creating Layer loss\nI1207 05:38:14.384743  1922 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1207 05:38:14.384750  1922 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1207 05:38:14.384758  1922 net.cpp:408] loss -> loss\nI1207 05:38:14.384804  1922 layer_factory.hpp:77] Creating layer loss\nI1207 05:38:14.384955  1922 net.cpp:150] Setting up loss\nI1207 05:38:14.384969  1922 net.cpp:157] Top shape: (1)\nI1207 05:38:14.384974  1922 net.cpp:160]     with loss weight 1\nI1207 05:38:14.385051  1922 net.cpp:165] Memory required for data: 2162520028\nI1207 05:38:14.385061  1922 net.cpp:226] loss needs backward computation.\nI1207 05:38:14.385066  1922 net.cpp:228] accuracy does not need backward computation.\nI1207 05:38:14.385072  1922 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1207 05:38:14.385078  1922 net.cpp:226] post_FC needs backward computation.\nI1207 05:38:14.385083  1922 net.cpp:226] post_pool needs backward computation.\nI1207 05:38:14.385088  1922 net.cpp:226] post_relu needs backward computation.\nI1207 05:38:14.385093  1922 net.cpp:226] post_bn needs backward computation.\nI1207 05:38:14.385098  1922 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1207 05:38:14.385103  1922 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1207 05:38:14.385108  1922 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1207 05:38:14.385113  1922 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1207 05:38:14.385118  1922 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1207 05:38:14.385123  1922 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1207 05:38:14.385128  1922 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1207 05:38:14.385133  1922 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1207 05:38:14.385138  1922 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1207 05:38:14.385143  1922 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1207 05:38:14.385149  1922 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385164  1922 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1207 05:38:14.385170  1922 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1207 05:38:14.385176  1922 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1207 05:38:14.385180  1922 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1207 05:38:14.385186  1922 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1207 05:38:14.385191  1922 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1207 05:38:14.385196  1922 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1207 05:38:14.385201  1922 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1207 05:38:14.385206  1922 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1207 05:38:14.385211  1922 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1207 05:38:14.385216  1922 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385221  1922 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1207 05:38:14.385227  1922 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1207 05:38:14.385232  1922 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1207 05:38:14.385237  1922 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1207 05:38:14.385242  1922 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1207 05:38:14.385248  1922 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1207 05:38:14.385253  1922 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1207 05:38:14.385258  1922 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1207 05:38:14.385263  1922 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1207 05:38:14.385268  1922 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1207 05:38:14.385274  1922 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385279  1922 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1207 05:38:14.385285  1922 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1207 05:38:14.385290  1922 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1207 05:38:14.385295  1922 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1207 05:38:14.385300  1922 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1207 05:38:14.385305  1922 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1207 05:38:14.385310  1922 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1207 05:38:14.385315  1922 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1207 05:38:14.385320  1922 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1207 05:38:14.385325  1922 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1207 05:38:14.385331  1922 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385336  1922 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1207 05:38:14.385342  1922 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1207 05:38:14.385347  1922 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1207 05:38:14.385352  1922 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1207 05:38:14.385357  1922 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1207 05:38:14.385362  1922 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1207 05:38:14.385367  1922 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1207 05:38:14.385373  1922 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1207 05:38:14.385378  1922 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1207 05:38:14.385383  1922 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1207 05:38:14.385388  1922 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385395  1922 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1207 05:38:14.385401  1922 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1207 05:38:14.385406  1922 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1207 05:38:14.385416  1922 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1207 05:38:14.385421  1922 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1207 05:38:14.385426  1922 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1207 05:38:14.385432  1922 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1207 05:38:14.385437  1922 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1207 05:38:14.385442  1922 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1207 05:38:14.385447  1922 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1207 05:38:14.385452  1922 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1207 05:38:14.385458  1922 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385463  1922 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1207 05:38:14.385468  1922 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1207 05:38:14.385478  1922 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1207 05:38:14.385483  1922 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1207 05:38:14.385488  1922 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1207 05:38:14.385493  1922 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1207 05:38:14.385499  1922 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1207 05:38:14.385504  1922 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1207 05:38:14.385509  1922 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1207 05:38:14.385514  1922 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1207 05:38:14.385520  1922 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385526  1922 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1207 05:38:14.385532  1922 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1207 05:38:14.385537  1922 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1207 05:38:14.385542  1922 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1207 05:38:14.385548  1922 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1207 05:38:14.385553  1922 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1207 05:38:14.385558  1922 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1207 05:38:14.385563  1922 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1207 05:38:14.385570  1922 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1207 05:38:14.385574  1922 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1207 05:38:14.385579  1922 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385586  1922 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1207 05:38:14.385591  1922 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1207 05:38:14.385596  1922 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1207 05:38:14.385601  1922 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1207 05:38:14.385607  1922 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1207 05:38:14.385612  1922 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1207 05:38:14.385617  1922 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1207 05:38:14.385622  1922 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1207 05:38:14.385627  1922 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1207 05:38:14.385632  1922 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1207 05:38:14.385637  1922 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385643  1922 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1207 05:38:14.385649  1922 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1207 05:38:14.385654  1922 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1207 05:38:14.385660  1922 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1207 05:38:14.385665  1922 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1207 05:38:14.385676  1922 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1207 05:38:14.385681  1922 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1207 05:38:14.385694  1922 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1207 05:38:14.385699  1922 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1207 05:38:14.385705  1922 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1207 05:38:14.385710  1922 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385716  1922 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1207 05:38:14.385722  1922 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1207 05:38:14.385727  1922 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1207 05:38:14.385735  1922 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1207 05:38:14.385741  1922 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1207 05:38:14.385747  1922 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1207 05:38:14.385752  1922 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1207 05:38:14.385757  1922 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1207 05:38:14.385763  1922 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1207 05:38:14.385768  1922 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1207 05:38:14.385773  1922 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385779  1922 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1207 05:38:14.385785  1922 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1207 05:38:14.385790  1922 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1207 05:38:14.385797  1922 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1207 05:38:14.385802  1922 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1207 05:38:14.385807  1922 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1207 05:38:14.385812  1922 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1207 05:38:14.385818  1922 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1207 05:38:14.385823  1922 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1207 05:38:14.385828  1922 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1207 05:38:14.385833  1922 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1207 05:38:14.385838  1922 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385844  1922 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1207 05:38:14.385850  1922 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1207 05:38:14.385855  1922 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1207 05:38:14.385860  1922 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1207 05:38:14.385866  1922 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1207 05:38:14.385871  1922 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1207 05:38:14.385876  1922 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1207 05:38:14.385881  1922 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1207 05:38:14.385887  1922 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1207 05:38:14.385892  1922 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1207 05:38:14.385897  1922 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385903  1922 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1207 05:38:14.385910  1922 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1207 05:38:14.385915  1922 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1207 05:38:14.385920  1922 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1207 05:38:14.385926  1922 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1207 05:38:14.385931  1922 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1207 05:38:14.385936  1922 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1207 05:38:14.385946  1922 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1207 05:38:14.385952  1922 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1207 05:38:14.385958  1922 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1207 05:38:14.385964  1922 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.385969  1922 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1207 05:38:14.385975  1922 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1207 05:38:14.385982  1922 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1207 05:38:14.385987  1922 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1207 05:38:14.385993  1922 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1207 05:38:14.385998  1922 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1207 05:38:14.386003  1922 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1207 05:38:14.386008  1922 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1207 05:38:14.386013  1922 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1207 05:38:14.386019  1922 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1207 05:38:14.386024  1922 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.386030  1922 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1207 05:38:14.386036  1922 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1207 05:38:14.386042  1922 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1207 05:38:14.386047  1922 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1207 05:38:14.386054  1922 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1207 05:38:14.386059  1922 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1207 05:38:14.386065  1922 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1207 05:38:14.386072  1922 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1207 05:38:14.386078  1922 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1207 05:38:14.386083  1922 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1207 05:38:14.386090  1922 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.386096  1922 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1207 05:38:14.386101  1922 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1207 05:38:14.386106  1922 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1207 05:38:14.386111  1922 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1207 05:38:14.386117  1922 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1207 05:38:14.386122  1922 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1207 05:38:14.386128  1922 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1207 05:38:14.386133  1922 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1207 05:38:14.386139  1922 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1207 05:38:14.386144  1922 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1207 05:38:14.386150  1922 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.386155  1922 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1207 05:38:14.386162  1922 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1207 05:38:14.386168  1922 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1207 05:38:14.386173  1922 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1207 05:38:14.386178  1922 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1207 05:38:14.386184  1922 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1207 05:38:14.386189  1922 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1207 05:38:14.386194  1922 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1207 05:38:14.386200  1922 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1207 05:38:14.386205  1922 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1207 05:38:14.386216  1922 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1207 05:38:14.386222  1922 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1207 05:38:14.386227  1922 net.cpp:226] pre_conv needs backward computation.\nI1207 05:38:14.386234  1922 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1207 05:38:14.386240  1922 net.cpp:228] dataLayer does not need backward computation.\nI1207 05:38:14.386245  1922 net.cpp:270] This network produces output accuracy\nI1207 05:38:14.386251  1922 net.cpp:270] This network produces output loss\nI1207 05:38:14.386543  1922 net.cpp:283] Network initialization done.\nI1207 05:38:14.392873  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:14.392907  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:14.392968  1922 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1207 05:38:14.393218  1922 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1207 05:38:14.394691  1922 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-ResNeXt\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: false\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\nI1207 05:38:14.395885  1922 layer_factory.hpp:77] Creating layer dataLayer\nI1207 05:38:14.396628  1922 net.cpp:100] Creating Layer dataLayer\nI1207 05:38:14.396663  1922 net.cpp:408] dataLayer -> data_top\nI1207 05:38:14.396680  1922 net.cpp:408] dataLayer -> label\nI1207 05:38:14.396699  1922 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1207 05:38:14.402348  1929 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1207 05:38:14.402626  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:14.408614  1922 net.cpp:150] Setting up dataLayer\nI1207 05:38:14.408638  1922 net.cpp:157] Top shape: 85 3 32 32 (261120)\nI1207 05:38:14.408646  1922 net.cpp:157] Top shape: 85 (85)\nI1207 05:38:14.408653  1922 net.cpp:165] Memory required for data: 1044820\nI1207 05:38:14.408659  1922 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1207 05:38:14.408720  1922 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1207 05:38:14.408730  1922 net.cpp:434] label_dataLayer_1_split <- label\nI1207 05:38:14.408741  1922 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1207 05:38:14.408753  1922 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1207 05:38:14.408884  1922 net.cpp:150] Setting up label_dataLayer_1_split\nI1207 05:38:14.408897  1922 net.cpp:157] Top shape: 85 (85)\nI1207 05:38:14.408905  1922 net.cpp:157] Top shape: 85 (85)\nI1207 05:38:14.408911  1922 net.cpp:165] Memory required for data: 1045500\nI1207 05:38:14.408926  1922 layer_factory.hpp:77] Creating layer pre_conv\nI1207 05:38:14.408946  1922 net.cpp:100] Creating Layer pre_conv\nI1207 05:38:14.408953  1922 net.cpp:434] pre_conv <- data_top\nI1207 05:38:14.408965  1922 net.cpp:408] pre_conv -> pre_conv_top\nI1207 05:38:14.409423  1922 net.cpp:150] Setting up pre_conv\nI1207 05:38:14.409441  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.409446  1922 net.cpp:165] Memory required for data: 6616060\nI1207 05:38:14.409461  1922 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1207 05:38:14.409469  1922 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1207 05:38:14.409476  1922 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1207 05:38:14.409487  1922 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1207 05:38:14.409497  1922 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1207 05:38:14.409559  1922 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1207 05:38:14.409584  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.409593  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.409598  1922 net.cpp:165] Memory required for data: 17757180\nI1207 05:38:14.409603  1922 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1207 05:38:14.409615  1922 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1207 05:38:14.409621  1922 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1207 05:38:14.409651  1922 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1207 05:38:14.410006  1922 net.cpp:150] Setting up L1_b1_brc1_bn\nI1207 05:38:14.410019  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.410025  1922 net.cpp:165] Memory required for data: 23327740\nI1207 05:38:14.410040  1922 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1207 05:38:14.410049  1922 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1207 05:38:14.410058  1922 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1207 05:38:14.410069  1922 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1207 05:38:14.410080  1922 net.cpp:150] Setting up L1_b1_brc1_relu\nI1207 05:38:14.410087  1922 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1207 05:38:14.410092  1922 net.cpp:165] Memory required for data: 28898300\nI1207 05:38:14.410099  1922 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1207 05:38:14.410133  1922 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1207 05:38:14.410140  1922 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1207 05:38:14.410149  1922 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1207 05:38:14.410569  1922 net.cpp:150] Setting up L1_b1_brc1_conv\nI1207 05:38:14.410585  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.410590  1922 net.cpp:165] Memory required for data: 40039420\nI1207 05:38:14.410599  1922 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1207 05:38:14.410612  1922 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1207 05:38:14.410617  1922 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1207 05:38:14.410629  1922 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1207 05:38:14.410959  1922 net.cpp:150] Setting up L1_b1_brc2_bn\nI1207 05:38:14.410974  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.410980  1922 net.cpp:165] Memory required for data: 51180540\nI1207 05:38:14.410995  1922 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1207 05:38:14.411005  1922 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1207 05:38:14.411011  1922 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1207 05:38:14.411022  1922 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1207 05:38:14.411032  1922 net.cpp:150] Setting up L1_b1_brc2_relu\nI1207 05:38:14.411042  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.411047  1922 net.cpp:165] Memory required for data: 62321660\nI1207 05:38:14.411052  1922 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1207 05:38:14.411067  1922 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1207 05:38:14.411075  1922 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1207 05:38:14.411092  1922 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1207 05:38:14.412770  1922 net.cpp:150] Setting up L1_b1_brc2_conv\nI1207 05:38:14.412787  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.412793  1922 net.cpp:165] Memory required for data: 73462780\nI1207 05:38:14.412802  1922 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1207 05:38:14.412812  1922 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1207 05:38:14.412820  1922 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1207 05:38:14.412833  1922 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1207 05:38:14.413166  1922 net.cpp:150] Setting up L1_b1_brc3_bn\nI1207 05:38:14.413182  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.413187  1922 net.cpp:165] Memory required for data: 84603900\nI1207 05:38:14.413197  1922 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1207 05:38:14.413208  1922 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1207 05:38:14.413214  1922 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1207 05:38:14.413223  1922 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1207 05:38:14.413235  1922 net.cpp:150] Setting up L1_b1_brc3_relu\nI1207 05:38:14.413244  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.413249  1922 net.cpp:165] Memory required for data: 95745020\nI1207 05:38:14.413254  1922 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1207 05:38:14.413266  1922 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1207 05:38:14.413274  1922 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1207 05:38:14.413286  1922 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1207 05:38:14.413691  1922 net.cpp:150] Setting up L1_b1_brc3_conv\nI1207 05:38:14.413708  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.413715  1922 net.cpp:165] Memory required for data: 118027260\nI1207 05:38:14.413728  1922 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1207 05:38:14.413743  1922 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1207 05:38:14.413749  1922 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1207 05:38:14.413761  1922 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1207 05:38:14.414130  1922 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1207 05:38:14.414147  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.414152  1922 net.cpp:165] Memory required for data: 140309500\nI1207 05:38:14.414161  1922 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1207 05:38:14.414170  1922 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1207 05:38:14.414180  1922 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1207 05:38:14.414187  1922 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1207 05:38:14.414198  1922 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1207 05:38:14.414237  1922 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1207 05:38:14.414250  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.414255  1922 net.cpp:165] Memory required for data: 162591740\nI1207 05:38:14.414260  1922 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:38:14.414268  1922 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:38:14.414276  1922 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1207 05:38:14.414285  1922 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1207 05:38:14.414294  1922 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1207 05:38:14.414356  1922 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1207 05:38:14.414366  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.414373  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.414378  1922 net.cpp:165] Memory required for data: 207156220\nI1207 05:38:14.414386  1922 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1207 05:38:14.414407  1922 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1207 05:38:14.414414  1922 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1207 05:38:14.414425  1922 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1207 05:38:14.414923  1922 net.cpp:150] Setting up L1_b2_brc1_bn\nI1207 05:38:14.414938  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.414943  1922 net.cpp:165] Memory required for data: 229438460\nI1207 05:38:14.414955  1922 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1207 05:38:14.414965  1922 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1207 05:38:14.414973  1922 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1207 05:38:14.414979  1922 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1207 05:38:14.414989  1922 net.cpp:150] Setting up L1_b2_brc1_relu\nI1207 05:38:14.414999  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.415005  1922 net.cpp:165] Memory required for data: 251720700\nI1207 05:38:14.415010  1922 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1207 05:38:14.415024  1922 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1207 05:38:14.415032  1922 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1207 05:38:14.415048  1922 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1207 05:38:14.415449  1922 net.cpp:150] Setting up L1_b2_brc1_conv\nI1207 05:38:14.415464  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.415470  1922 net.cpp:165] Memory required for data: 262861820\nI1207 05:38:14.415482  1922 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1207 05:38:14.415494  1922 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1207 05:38:14.415500  1922 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1207 05:38:14.415510  1922 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1207 05:38:14.415829  1922 net.cpp:150] Setting up L1_b2_brc2_bn\nI1207 05:38:14.415844  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.415849  1922 net.cpp:165] Memory required for data: 274002940\nI1207 05:38:14.415860  1922 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1207 05:38:14.415868  1922 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1207 05:38:14.415874  1922 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1207 05:38:14.415881  1922 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1207 05:38:14.415890  1922 net.cpp:150] Setting up L1_b2_brc2_relu\nI1207 05:38:14.415899  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.415902  1922 net.cpp:165] Memory required for data: 285144060\nI1207 05:38:14.415907  1922 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1207 05:38:14.415920  1922 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1207 05:38:14.415926  1922 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1207 05:38:14.415935  1922 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1207 05:38:14.416239  1922 net.cpp:150] Setting up L1_b2_brc2_conv\nI1207 05:38:14.416254  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.416259  1922 net.cpp:165] Memory required for data: 296285180\nI1207 05:38:14.416267  1922 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1207 05:38:14.416280  1922 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1207 05:38:14.416285  1922 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1207 05:38:14.416293  1922 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1207 05:38:14.416553  1922 net.cpp:150] Setting up L1_b2_brc3_bn\nI1207 05:38:14.416568  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.416573  1922 net.cpp:165] Memory required for data: 307426300\nI1207 05:38:14.416584  1922 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1207 05:38:14.416591  1922 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1207 05:38:14.416596  1922 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1207 05:38:14.416604  1922 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1207 05:38:14.416613  1922 net.cpp:150] Setting up L1_b2_brc3_relu\nI1207 05:38:14.416620  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.416631  1922 net.cpp:165] Memory required for data: 318567420\nI1207 05:38:14.416636  1922 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1207 05:38:14.416654  1922 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1207 05:38:14.416661  1922 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1207 05:38:14.416672  1922 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1207 05:38:14.417160  1922 net.cpp:150] Setting up L1_b2_brc3_conv\nI1207 05:38:14.417174  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.417181  1922 net.cpp:165] Memory required for data: 340849660\nI1207 05:38:14.417197  1922 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1207 05:38:14.417207  1922 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1207 05:38:14.417213  1922 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1207 05:38:14.417220  1922 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1207 05:38:14.417232  1922 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1207 05:38:14.417265  1922 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1207 05:38:14.417275  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.417280  1922 net.cpp:165] Memory required for data: 363131900\nI1207 05:38:14.417285  1922 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:38:14.417296  1922 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:38:14.417302  1922 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1207 05:38:14.417309  1922 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1207 05:38:14.417318  1922 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1207 05:38:14.417371  1922 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1207 05:38:14.417381  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.417387  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.417392  1922 net.cpp:165] Memory required for data: 407696380\nI1207 05:38:14.417397  1922 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1207 05:38:14.417407  1922 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1207 05:38:14.417412  1922 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1207 05:38:14.417420  1922 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1207 05:38:14.417696  1922 net.cpp:150] Setting up L1_b3_brc1_bn\nI1207 05:38:14.417711  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.417716  1922 net.cpp:165] Memory required for data: 429978620\nI1207 05:38:14.417726  1922 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1207 05:38:14.417734  1922 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1207 05:38:14.417739  1922 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1207 05:38:14.417749  1922 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1207 05:38:14.417760  1922 net.cpp:150] Setting up L1_b3_brc1_relu\nI1207 05:38:14.417767  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.417771  1922 net.cpp:165] Memory required for data: 452260860\nI1207 05:38:14.417776  1922 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1207 05:38:14.417790  1922 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1207 05:38:14.417796  1922 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1207 05:38:14.417804  1922 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1207 05:38:14.418148  1922 net.cpp:150] Setting up L1_b3_brc1_conv\nI1207 05:38:14.418164  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.418169  1922 net.cpp:165] Memory required for data: 463401980\nI1207 05:38:14.418176  1922 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1207 05:38:14.418189  1922 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1207 05:38:14.418195  1922 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1207 05:38:14.418210  1922 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1207 05:38:14.418474  1922 net.cpp:150] Setting up L1_b3_brc2_bn\nI1207 05:38:14.418488  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.418493  1922 net.cpp:165] Memory required for data: 474543100\nI1207 05:38:14.418503  1922 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1207 05:38:14.418511  1922 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1207 05:38:14.418517  1922 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1207 05:38:14.418527  1922 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1207 05:38:14.418537  1922 net.cpp:150] Setting up L1_b3_brc2_relu\nI1207 05:38:14.418545  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.418550  1922 net.cpp:165] Memory required for data: 485684220\nI1207 05:38:14.418553  1922 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1207 05:38:14.418570  1922 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1207 05:38:14.418576  1922 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1207 05:38:14.418587  1922 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1207 05:38:14.418936  1922 net.cpp:150] Setting up L1_b3_brc2_conv\nI1207 05:38:14.418951  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.418956  1922 net.cpp:165] Memory required for data: 496825340\nI1207 05:38:14.418965  1922 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1207 05:38:14.418973  1922 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1207 05:38:14.418979  1922 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1207 05:38:14.418987  1922 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1207 05:38:14.419250  1922 net.cpp:150] Setting up L1_b3_brc3_bn\nI1207 05:38:14.419262  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.419267  1922 net.cpp:165] Memory required for data: 507966460\nI1207 05:38:14.419277  1922 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1207 05:38:14.419319  1922 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1207 05:38:14.419329  1922 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1207 05:38:14.419337  1922 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1207 05:38:14.419348  1922 net.cpp:150] Setting up L1_b3_brc3_relu\nI1207 05:38:14.419354  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.419359  1922 net.cpp:165] Memory required for data: 519107580\nI1207 05:38:14.419364  1922 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1207 05:38:14.419378  1922 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1207 05:38:14.419384  1922 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1207 05:38:14.419395  1922 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1207 05:38:14.419749  1922 net.cpp:150] Setting up L1_b3_brc3_conv\nI1207 05:38:14.419762  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.419767  1922 net.cpp:165] Memory required for data: 541389820\nI1207 05:38:14.419776  1922 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1207 05:38:14.419785  1922 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1207 05:38:14.419791  1922 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1207 05:38:14.419798  1922 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1207 05:38:14.419806  1922 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1207 05:38:14.419845  1922 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1207 05:38:14.419860  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.419865  1922 net.cpp:165] Memory required for data: 563672060\nI1207 05:38:14.419870  1922 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:38:14.419878  1922 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:38:14.419884  1922 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1207 05:38:14.419891  1922 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1207 05:38:14.419909  1922 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1207 05:38:14.419965  1922 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1207 05:38:14.419975  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.419981  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.419986  1922 net.cpp:165] Memory required for data: 608236540\nI1207 05:38:14.419991  1922 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1207 05:38:14.420001  1922 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1207 05:38:14.420007  1922 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1207 05:38:14.420016  1922 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1207 05:38:14.420274  1922 net.cpp:150] Setting up L1_b4_brc1_bn\nI1207 05:38:14.420287  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.420292  1922 net.cpp:165] Memory required for data: 630518780\nI1207 05:38:14.420302  1922 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1207 05:38:14.420310  1922 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1207 05:38:14.420316  1922 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1207 05:38:14.420323  1922 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1207 05:38:14.420332  1922 net.cpp:150] Setting up L1_b4_brc1_relu\nI1207 05:38:14.420339  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.420344  1922 net.cpp:165] Memory required for data: 652801020\nI1207 05:38:14.420348  1922 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1207 05:38:14.420361  1922 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1207 05:38:14.420367  1922 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1207 05:38:14.420375  1922 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1207 05:38:14.420723  1922 net.cpp:150] Setting up L1_b4_brc1_conv\nI1207 05:38:14.420738  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.420743  1922 net.cpp:165] Memory required for data: 663942140\nI1207 05:38:14.420752  1922 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1207 05:38:14.420760  1922 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1207 05:38:14.420766  1922 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1207 05:38:14.420778  1922 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1207 05:38:14.421057  1922 net.cpp:150] Setting up L1_b4_brc2_bn\nI1207 05:38:14.421074  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.421079  1922 net.cpp:165] Memory required for data: 675083260\nI1207 05:38:14.421090  1922 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1207 05:38:14.421098  1922 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1207 05:38:14.421104  1922 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1207 05:38:14.421111  1922 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1207 05:38:14.421121  1922 net.cpp:150] Setting up L1_b4_brc2_relu\nI1207 05:38:14.421128  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.421133  1922 net.cpp:165] Memory required for data: 686224380\nI1207 05:38:14.421138  1922 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1207 05:38:14.421147  1922 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1207 05:38:14.421154  1922 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1207 05:38:14.421164  1922 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1207 05:38:14.421483  1922 net.cpp:150] Setting up L1_b4_brc2_conv\nI1207 05:38:14.421497  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.421502  1922 net.cpp:165] Memory required for data: 697365500\nI1207 05:38:14.421510  1922 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1207 05:38:14.421522  1922 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1207 05:38:14.421528  1922 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1207 05:38:14.421536  1922 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1207 05:38:14.421809  1922 net.cpp:150] Setting up L1_b4_brc3_bn\nI1207 05:38:14.421825  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.421838  1922 net.cpp:165] Memory required for data: 708506620\nI1207 05:38:14.421847  1922 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1207 05:38:14.421855  1922 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1207 05:38:14.421861  1922 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1207 05:38:14.421869  1922 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1207 05:38:14.421878  1922 net.cpp:150] Setting up L1_b4_brc3_relu\nI1207 05:38:14.421885  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.421890  1922 net.cpp:165] Memory required for data: 719647740\nI1207 05:38:14.421895  1922 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1207 05:38:14.421907  1922 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1207 05:38:14.421913  1922 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1207 05:38:14.421921  1922 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1207 05:38:14.422264  1922 net.cpp:150] Setting up L1_b4_brc3_conv\nI1207 05:38:14.422278  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.422283  1922 net.cpp:165] Memory required for data: 741929980\nI1207 05:38:14.422292  1922 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1207 05:38:14.422300  1922 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1207 05:38:14.422307  1922 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1207 05:38:14.422313  1922 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1207 05:38:14.422325  1922 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1207 05:38:14.422358  1922 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1207 05:38:14.422374  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.422379  1922 net.cpp:165] Memory required for data: 764212220\nI1207 05:38:14.422384  1922 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:38:14.422392  1922 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:38:14.422397  1922 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1207 05:38:14.422405  1922 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1207 05:38:14.422415  1922 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1207 05:38:14.422464  1922 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1207 05:38:14.422477  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.422483  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.422487  1922 net.cpp:165] Memory required for data: 808776700\nI1207 05:38:14.422492  1922 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1207 05:38:14.422503  1922 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1207 05:38:14.422509  1922 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1207 05:38:14.422518  1922 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1207 05:38:14.422783  1922 net.cpp:150] Setting up L1_b5_brc1_bn\nI1207 05:38:14.422797  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.422802  1922 net.cpp:165] Memory required for data: 831058940\nI1207 05:38:14.422824  1922 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1207 05:38:14.422835  1922 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1207 05:38:14.422842  1922 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1207 05:38:14.422849  1922 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1207 05:38:14.422858  1922 net.cpp:150] Setting up L1_b5_brc1_relu\nI1207 05:38:14.422865  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.422870  1922 net.cpp:165] Memory required for data: 853341180\nI1207 05:38:14.422874  1922 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1207 05:38:14.422893  1922 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1207 05:38:14.422899  1922 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1207 05:38:14.422907  1922 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1207 05:38:14.423257  1922 net.cpp:150] Setting up L1_b5_brc1_conv\nI1207 05:38:14.423271  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.423276  1922 net.cpp:165] Memory required for data: 864482300\nI1207 05:38:14.423285  1922 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1207 05:38:14.423295  1922 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1207 05:38:14.423300  1922 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1207 05:38:14.423311  1922 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1207 05:38:14.423573  1922 net.cpp:150] Setting up L1_b5_brc2_bn\nI1207 05:38:14.423589  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.423594  1922 net.cpp:165] Memory required for data: 875623420\nI1207 05:38:14.423605  1922 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1207 05:38:14.423612  1922 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1207 05:38:14.423619  1922 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1207 05:38:14.423625  1922 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1207 05:38:14.423635  1922 net.cpp:150] Setting up L1_b5_brc2_relu\nI1207 05:38:14.423642  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.423647  1922 net.cpp:165] Memory required for data: 886764540\nI1207 05:38:14.423651  1922 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1207 05:38:14.423661  1922 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1207 05:38:14.423667  1922 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1207 05:38:14.423681  1922 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1207 05:38:14.423992  1922 net.cpp:150] Setting up L1_b5_brc2_conv\nI1207 05:38:14.424007  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.424012  1922 net.cpp:165] Memory required for data: 897905660\nI1207 05:38:14.424021  1922 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1207 05:38:14.424029  1922 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1207 05:38:14.424036  1922 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1207 05:38:14.424046  1922 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1207 05:38:14.424309  1922 net.cpp:150] Setting up L1_b5_brc3_bn\nI1207 05:38:14.424334  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.424338  1922 net.cpp:165] Memory required for data: 909046780\nI1207 05:38:14.424348  1922 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1207 05:38:14.424357  1922 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1207 05:38:14.424363  1922 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1207 05:38:14.424371  1922 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1207 05:38:14.424381  1922 net.cpp:150] Setting up L1_b5_brc3_relu\nI1207 05:38:14.424387  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.424391  1922 net.cpp:165] Memory required for data: 920187900\nI1207 05:38:14.424396  1922 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1207 05:38:14.424407  1922 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1207 05:38:14.424412  1922 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1207 05:38:14.424423  1922 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1207 05:38:14.424778  1922 net.cpp:150] Setting up L1_b5_brc3_conv\nI1207 05:38:14.424793  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.424798  1922 net.cpp:165] Memory required for data: 942470140\nI1207 05:38:14.424806  1922 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1207 05:38:14.424815  1922 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1207 05:38:14.424821  1922 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1207 05:38:14.424829  1922 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1207 05:38:14.424839  1922 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1207 05:38:14.424872  1922 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1207 05:38:14.424885  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.424890  1922 net.cpp:165] Memory required for data: 964752380\nI1207 05:38:14.424901  1922 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:38:14.424909  1922 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:38:14.424916  1922 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1207 05:38:14.424923  1922 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1207 05:38:14.424932  1922 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1207 05:38:14.424983  1922 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1207 05:38:14.424995  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.425001  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.425006  1922 net.cpp:165] Memory required for data: 1009316860\nI1207 05:38:14.425011  1922 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1207 05:38:14.425024  1922 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1207 05:38:14.425030  1922 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1207 05:38:14.425038  1922 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1207 05:38:14.425297  1922 net.cpp:150] Setting up L1_b6_brc1_bn\nI1207 05:38:14.425310  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.425315  1922 net.cpp:165] Memory required for data: 1031599100\nI1207 05:38:14.425326  1922 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1207 05:38:14.425333  1922 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1207 05:38:14.425340  1922 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1207 05:38:14.425348  1922 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1207 05:38:14.425356  1922 net.cpp:150] Setting up L1_b6_brc1_relu\nI1207 05:38:14.425364  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.425369  1922 net.cpp:165] Memory required for data: 1053881340\nI1207 05:38:14.425374  1922 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1207 05:38:14.425386  1922 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1207 05:38:14.425393  1922 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1207 05:38:14.425401  1922 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1207 05:38:14.425753  1922 net.cpp:150] Setting up L1_b6_brc1_conv\nI1207 05:38:14.425768  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.425773  1922 net.cpp:165] Memory required for data: 1065022460\nI1207 05:38:14.425781  1922 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1207 05:38:14.425793  1922 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1207 05:38:14.425799  1922 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1207 05:38:14.425807  1922 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1207 05:38:14.426082  1922 net.cpp:150] Setting up L1_b6_brc2_bn\nI1207 05:38:14.426098  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.426103  1922 net.cpp:165] Memory required for data: 1076163580\nI1207 05:38:14.426115  1922 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1207 05:38:14.426127  1922 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1207 05:38:14.426133  1922 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1207 05:38:14.426144  1922 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1207 05:38:14.426154  1922 net.cpp:150] Setting up L1_b6_brc2_relu\nI1207 05:38:14.426162  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.426167  1922 net.cpp:165] Memory required for data: 1087304700\nI1207 05:38:14.426172  1922 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1207 05:38:14.426182  1922 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1207 05:38:14.426187  1922 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1207 05:38:14.426198  1922 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1207 05:38:14.426515  1922 net.cpp:150] Setting up L1_b6_brc2_conv\nI1207 05:38:14.426529  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.426542  1922 net.cpp:165] Memory required for data: 1098445820\nI1207 05:38:14.426550  1922 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1207 05:38:14.426559  1922 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1207 05:38:14.426568  1922 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1207 05:38:14.426576  1922 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1207 05:38:14.426852  1922 net.cpp:150] Setting up L1_b6_brc3_bn\nI1207 05:38:14.426865  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.426870  1922 net.cpp:165] Memory required for data: 1109586940\nI1207 05:38:14.426880  1922 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1207 05:38:14.426888  1922 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1207 05:38:14.426894  1922 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1207 05:38:14.426901  1922 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1207 05:38:14.426911  1922 net.cpp:150] Setting up L1_b6_brc3_relu\nI1207 05:38:14.426918  1922 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1207 05:38:14.426923  1922 net.cpp:165] Memory required for data: 1120728060\nI1207 05:38:14.426928  1922 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1207 05:38:14.426940  1922 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1207 05:38:14.426946  1922 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1207 05:38:14.426957  1922 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1207 05:38:14.427311  1922 net.cpp:150] Setting up L1_b6_brc3_conv\nI1207 05:38:14.427325  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.427330  1922 net.cpp:165] Memory required for data: 1143010300\nI1207 05:38:14.427338  1922 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1207 05:38:14.427350  1922 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1207 05:38:14.427356  1922 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1207 05:38:14.427363  1922 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1207 05:38:14.427374  1922 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1207 05:38:14.427407  1922 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1207 05:38:14.427418  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.427423  1922 net.cpp:165] Memory required for data: 1165292540\nI1207 05:38:14.427428  1922 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:38:14.427443  1922 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:38:14.427449  1922 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1207 05:38:14.427458  1922 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1207 05:38:14.427466  1922 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1207 05:38:14.427517  1922 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1207 05:38:14.427528  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.427536  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.427541  1922 net.cpp:165] Memory required for data: 1209857020\nI1207 05:38:14.427544  1922 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1207 05:38:14.427556  1922 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1207 05:38:14.427561  1922 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1207 05:38:14.427568  1922 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1207 05:38:14.427837  1922 net.cpp:150] Setting up L2_b1_brc1_bn\nI1207 05:38:14.427851  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.427856  1922 net.cpp:165] Memory required for data: 1232139260\nI1207 05:38:14.427866  1922 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1207 05:38:14.427875  1922 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1207 05:38:14.427881  1922 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1207 05:38:14.427891  1922 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1207 05:38:14.427907  1922 net.cpp:150] Setting up L2_b1_brc1_relu\nI1207 05:38:14.427914  1922 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1207 05:38:14.427919  1922 net.cpp:165] Memory required for data: 1254421500\nI1207 05:38:14.427923  1922 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1207 05:38:14.427938  1922 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1207 05:38:14.427944  1922 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1207 05:38:14.427953  1922 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1207 05:38:14.428342  1922 net.cpp:150] Setting up L2_b1_brc1_conv\nI1207 05:38:14.428356  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.428361  1922 net.cpp:165] Memory required for data: 1259992060\nI1207 05:38:14.428369  1922 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1207 05:38:14.428381  1922 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1207 05:38:14.428387  1922 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1207 05:38:14.428395  1922 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1207 05:38:14.428658  1922 net.cpp:150] Setting up L2_b1_brc2_bn\nI1207 05:38:14.428673  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.428678  1922 net.cpp:165] Memory required for data: 1265562620\nI1207 05:38:14.428694  1922 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1207 05:38:14.428705  1922 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1207 05:38:14.428712  1922 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1207 05:38:14.428719  1922 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1207 05:38:14.428731  1922 net.cpp:150] Setting up L2_b1_brc2_relu\nI1207 05:38:14.428740  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.428743  1922 net.cpp:165] Memory required for data: 1271133180\nI1207 05:38:14.428748  1922 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1207 05:38:14.428759  1922 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1207 05:38:14.428764  1922 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1207 05:38:14.428776  1922 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1207 05:38:14.429112  1922 net.cpp:150] Setting up L2_b1_brc2_conv\nI1207 05:38:14.429126  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.429131  1922 net.cpp:165] Memory required for data: 1276703740\nI1207 05:38:14.429139  1922 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1207 05:38:14.429148  1922 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1207 05:38:14.429154  1922 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1207 05:38:14.429165  1922 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1207 05:38:14.429425  1922 net.cpp:150] Setting up L2_b1_brc3_bn\nI1207 05:38:14.429438  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.429443  1922 net.cpp:165] Memory required for data: 1282274300\nI1207 05:38:14.429452  1922 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1207 05:38:14.429466  1922 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1207 05:38:14.429473  1922 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1207 05:38:14.429481  1922 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1207 05:38:14.429491  1922 net.cpp:150] Setting up L2_b1_brc3_relu\nI1207 05:38:14.429497  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.429502  1922 net.cpp:165] Memory required for data: 1287844860\nI1207 05:38:14.429507  1922 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1207 05:38:14.429517  1922 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1207 05:38:14.429522  1922 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1207 05:38:14.429533  1922 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1207 05:38:14.430004  1922 net.cpp:150] Setting up L2_b1_brc3_conv\nI1207 05:38:14.430018  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.430023  1922 net.cpp:165] Memory required for data: 1298985980\nI1207 05:38:14.430032  1922 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1207 05:38:14.430043  1922 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1207 05:38:14.430058  1922 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1207 05:38:14.430070  1922 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1207 05:38:14.430528  1922 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1207 05:38:14.430542  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.430548  1922 net.cpp:165] Memory required for data: 1310127100\nI1207 05:38:14.430557  1922 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1207 05:38:14.430565  1922 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1207 05:38:14.430572  1922 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1207 05:38:14.430579  1922 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1207 05:38:14.430586  1922 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1207 05:38:14.430613  1922 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1207 05:38:14.430621  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.430626  1922 net.cpp:165] Memory required for data: 1321268220\nI1207 05:38:14.430632  1922 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:38:14.430642  1922 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:38:14.430649  1922 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1207 05:38:14.430658  1922 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1207 05:38:14.430668  1922 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1207 05:38:14.430723  1922 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1207 05:38:14.430743  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.430750  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.430754  1922 net.cpp:165] Memory required for data: 1343550460\nI1207 05:38:14.430760  1922 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1207 05:38:14.430768  1922 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1207 05:38:14.430774  1922 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1207 05:38:14.430785  1922 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1207 05:38:14.431025  1922 net.cpp:150] Setting up L2_b2_brc1_bn\nI1207 05:38:14.431038  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.431043  1922 net.cpp:165] Memory required for data: 1354691580\nI1207 05:38:14.431053  1922 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1207 05:38:14.431061  1922 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1207 05:38:14.431067  1922 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1207 05:38:14.431077  1922 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1207 05:38:14.431087  1922 net.cpp:150] Setting up L2_b2_brc1_relu\nI1207 05:38:14.431094  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.431098  1922 net.cpp:165] Memory required for data: 1365832700\nI1207 05:38:14.431103  1922 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1207 05:38:14.431113  1922 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1207 05:38:14.431119  1922 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1207 05:38:14.431128  1922 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1207 05:38:14.431596  1922 net.cpp:150] Setting up L2_b2_brc1_conv\nI1207 05:38:14.431609  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.431614  1922 net.cpp:165] Memory required for data: 1371403260\nI1207 05:38:14.431623  1922 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1207 05:38:14.431634  1922 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1207 05:38:14.431640  1922 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1207 05:38:14.431648  1922 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1207 05:38:14.431910  1922 net.cpp:150] Setting up L2_b2_brc2_bn\nI1207 05:38:14.431922  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.431934  1922 net.cpp:165] Memory required for data: 1376973820\nI1207 05:38:14.431946  1922 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1207 05:38:14.431953  1922 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1207 05:38:14.431959  1922 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1207 05:38:14.431967  1922 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1207 05:38:14.431977  1922 net.cpp:150] Setting up L2_b2_brc2_relu\nI1207 05:38:14.431984  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.431988  1922 net.cpp:165] Memory required for data: 1382544380\nI1207 05:38:14.431993  1922 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1207 05:38:14.432006  1922 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1207 05:38:14.432013  1922 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1207 05:38:14.432024  1922 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1207 05:38:14.432354  1922 net.cpp:150] Setting up L2_b2_brc2_conv\nI1207 05:38:14.432368  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.432374  1922 net.cpp:165] Memory required for data: 1388114940\nI1207 05:38:14.432382  1922 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1207 05:38:14.432390  1922 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1207 05:38:14.432400  1922 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1207 05:38:14.432409  1922 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1207 05:38:14.432657  1922 net.cpp:150] Setting up L2_b2_brc3_bn\nI1207 05:38:14.432670  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.432675  1922 net.cpp:165] Memory required for data: 1393685500\nI1207 05:38:14.432690  1922 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1207 05:38:14.432699  1922 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1207 05:38:14.432705  1922 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1207 05:38:14.432713  1922 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1207 05:38:14.432723  1922 net.cpp:150] Setting up L2_b2_brc3_relu\nI1207 05:38:14.432730  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.432734  1922 net.cpp:165] Memory required for data: 1399256060\nI1207 05:38:14.432739  1922 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1207 05:38:14.432752  1922 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1207 05:38:14.432759  1922 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1207 05:38:14.432770  1922 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1207 05:38:14.433231  1922 net.cpp:150] Setting up L2_b2_brc3_conv\nI1207 05:38:14.433245  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.433250  1922 net.cpp:165] Memory required for data: 1410397180\nI1207 05:38:14.433259  1922 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1207 05:38:14.433270  1922 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1207 05:38:14.433276  1922 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1207 05:38:14.433284  1922 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1207 05:38:14.433291  1922 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1207 05:38:14.433323  1922 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1207 05:38:14.433333  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.433338  1922 net.cpp:165] Memory required for data: 1421538300\nI1207 05:38:14.433343  1922 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:38:14.433351  1922 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:38:14.433357  1922 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1207 05:38:14.433369  1922 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1207 05:38:14.433380  1922 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1207 05:38:14.433430  1922 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1207 05:38:14.433449  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.433456  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.433460  1922 net.cpp:165] Memory required for data: 1443820540\nI1207 05:38:14.433466  1922 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1207 05:38:14.433477  1922 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1207 05:38:14.433483  1922 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1207 05:38:14.433495  1922 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1207 05:38:14.433743  1922 net.cpp:150] Setting up L2_b3_brc1_bn\nI1207 05:38:14.433760  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.433765  1922 net.cpp:165] Memory required for data: 1454961660\nI1207 05:38:14.433796  1922 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1207 05:38:14.433805  1922 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1207 05:38:14.433811  1922 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1207 05:38:14.433818  1922 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1207 05:38:14.433828  1922 net.cpp:150] Setting up L2_b3_brc1_relu\nI1207 05:38:14.433835  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.433840  1922 net.cpp:165] Memory required for data: 1466102780\nI1207 05:38:14.433845  1922 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1207 05:38:14.433862  1922 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1207 05:38:14.433869  1922 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1207 05:38:14.433877  1922 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1207 05:38:14.434351  1922 net.cpp:150] Setting up L2_b3_brc1_conv\nI1207 05:38:14.434365  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.434370  1922 net.cpp:165] Memory required for data: 1471673340\nI1207 05:38:14.434379  1922 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1207 05:38:14.434387  1922 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1207 05:38:14.434394  1922 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1207 05:38:14.434404  1922 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1207 05:38:14.434662  1922 net.cpp:150] Setting up L2_b3_brc2_bn\nI1207 05:38:14.434675  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.434680  1922 net.cpp:165] Memory required for data: 1477243900\nI1207 05:38:14.434695  1922 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1207 05:38:14.434705  1922 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1207 05:38:14.434710  1922 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1207 05:38:14.434720  1922 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1207 05:38:14.434731  1922 net.cpp:150] Setting up L2_b3_brc2_relu\nI1207 05:38:14.434738  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.434742  1922 net.cpp:165] Memory required for data: 1482814460\nI1207 05:38:14.434747  1922 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1207 05:38:14.434757  1922 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1207 05:38:14.434762  1922 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1207 05:38:14.434772  1922 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1207 05:38:14.435107  1922 net.cpp:150] Setting up L2_b3_brc2_conv\nI1207 05:38:14.435124  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.435130  1922 net.cpp:165] Memory required for data: 1488385020\nI1207 05:38:14.435138  1922 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1207 05:38:14.435147  1922 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1207 05:38:14.435153  1922 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1207 05:38:14.435160  1922 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1207 05:38:14.435416  1922 net.cpp:150] Setting up L2_b3_brc3_bn\nI1207 05:38:14.435430  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.435434  1922 net.cpp:165] Memory required for data: 1493955580\nI1207 05:38:14.435444  1922 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1207 05:38:14.435452  1922 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1207 05:38:14.435465  1922 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1207 05:38:14.435477  1922 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1207 05:38:14.435487  1922 net.cpp:150] Setting up L2_b3_brc3_relu\nI1207 05:38:14.435494  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.435499  1922 net.cpp:165] Memory required for data: 1499526140\nI1207 05:38:14.435504  1922 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1207 05:38:14.435521  1922 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1207 05:38:14.435528  1922 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1207 05:38:14.435535  1922 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1207 05:38:14.436007  1922 net.cpp:150] Setting up L2_b3_brc3_conv\nI1207 05:38:14.436022  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.436027  1922 net.cpp:165] Memory required for data: 1510667260\nI1207 05:38:14.436034  1922 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1207 05:38:14.436043  1922 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1207 05:38:14.436049  1922 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1207 05:38:14.436056  1922 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1207 05:38:14.436067  1922 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1207 05:38:14.436094  1922 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1207 05:38:14.436103  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.436108  1922 net.cpp:165] Memory required for data: 1521808380\nI1207 05:38:14.436113  1922 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:38:14.436123  1922 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:38:14.436130  1922 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1207 05:38:14.436137  1922 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1207 05:38:14.436146  1922 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1207 05:38:14.436197  1922 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1207 05:38:14.436208  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.436215  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.436220  1922 net.cpp:165] Memory required for data: 1544090620\nI1207 05:38:14.436224  1922 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1207 05:38:14.436235  1922 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1207 05:38:14.436241  1922 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1207 05:38:14.436249  1922 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1207 05:38:14.436484  1922 net.cpp:150] Setting up L2_b4_brc1_bn\nI1207 05:38:14.436497  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.436502  1922 net.cpp:165] Memory required for data: 1555231740\nI1207 05:38:14.436512  1922 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1207 05:38:14.436520  1922 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1207 05:38:14.436527  1922 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1207 05:38:14.436533  1922 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1207 05:38:14.436543  1922 net.cpp:150] Setting up L2_b4_brc1_relu\nI1207 05:38:14.436550  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.436554  1922 net.cpp:165] Memory required for data: 1566372860\nI1207 05:38:14.436559  1922 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1207 05:38:14.436573  1922 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1207 05:38:14.436578  1922 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1207 05:38:14.436589  1922 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1207 05:38:14.437064  1922 net.cpp:150] Setting up L2_b4_brc1_conv\nI1207 05:38:14.437079  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.437091  1922 net.cpp:165] Memory required for data: 1571943420\nI1207 05:38:14.437100  1922 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1207 05:38:14.437111  1922 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1207 05:38:14.437119  1922 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1207 05:38:14.437129  1922 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1207 05:38:14.437384  1922 net.cpp:150] Setting up L2_b4_brc2_bn\nI1207 05:38:14.437397  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.437402  1922 net.cpp:165] Memory required for data: 1577513980\nI1207 05:38:14.437412  1922 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1207 05:38:14.437420  1922 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1207 05:38:14.437427  1922 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1207 05:38:14.437433  1922 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1207 05:38:14.437443  1922 net.cpp:150] Setting up L2_b4_brc2_relu\nI1207 05:38:14.437450  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.437455  1922 net.cpp:165] Memory required for data: 1583084540\nI1207 05:38:14.437459  1922 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1207 05:38:14.437474  1922 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1207 05:38:14.437479  1922 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1207 05:38:14.437490  1922 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1207 05:38:14.437827  1922 net.cpp:150] Setting up L2_b4_brc2_conv\nI1207 05:38:14.437841  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.437847  1922 net.cpp:165] Memory required for data: 1588655100\nI1207 05:38:14.437855  1922 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1207 05:38:14.437868  1922 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1207 05:38:14.437875  1922 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1207 05:38:14.437885  1922 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1207 05:38:14.438143  1922 net.cpp:150] Setting up L2_b4_brc3_bn\nI1207 05:38:14.438155  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.438160  1922 net.cpp:165] Memory required for data: 1594225660\nI1207 05:38:14.438170  1922 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1207 05:38:14.438179  1922 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1207 05:38:14.438184  1922 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1207 05:38:14.438191  1922 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1207 05:38:14.438201  1922 net.cpp:150] Setting up L2_b4_brc3_relu\nI1207 05:38:14.438208  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.438213  1922 net.cpp:165] Memory required for data: 1599796220\nI1207 05:38:14.438218  1922 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1207 05:38:14.438231  1922 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1207 05:38:14.438237  1922 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1207 05:38:14.438246  1922 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1207 05:38:14.438877  1922 net.cpp:150] Setting up L2_b4_brc3_conv\nI1207 05:38:14.438894  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.438899  1922 net.cpp:165] Memory required for data: 1610937340\nI1207 05:38:14.438908  1922 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1207 05:38:14.438918  1922 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1207 05:38:14.438925  1922 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1207 05:38:14.438931  1922 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1207 05:38:14.438944  1922 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1207 05:38:14.438973  1922 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1207 05:38:14.438985  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.438990  1922 net.cpp:165] Memory required for data: 1622078460\nI1207 05:38:14.438995  1922 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:38:14.439007  1922 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:38:14.439019  1922 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1207 05:38:14.439028  1922 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1207 05:38:14.439038  1922 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1207 05:38:14.439095  1922 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1207 05:38:14.439107  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.439113  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.439118  1922 net.cpp:165] Memory required for data: 1644360700\nI1207 05:38:14.439123  1922 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1207 05:38:14.439134  1922 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1207 05:38:14.439141  1922 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1207 05:38:14.439149  1922 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1207 05:38:14.439398  1922 net.cpp:150] Setting up L2_b5_brc1_bn\nI1207 05:38:14.439410  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.439415  1922 net.cpp:165] Memory required for data: 1655501820\nI1207 05:38:14.439425  1922 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1207 05:38:14.439436  1922 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1207 05:38:14.439442  1922 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1207 05:38:14.439450  1922 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1207 05:38:14.439460  1922 net.cpp:150] Setting up L2_b5_brc1_relu\nI1207 05:38:14.439467  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.439471  1922 net.cpp:165] Memory required for data: 1666642940\nI1207 05:38:14.439476  1922 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1207 05:38:14.439486  1922 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1207 05:38:14.439492  1922 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1207 05:38:14.439504  1922 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1207 05:38:14.439990  1922 net.cpp:150] Setting up L2_b5_brc1_conv\nI1207 05:38:14.440004  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.440009  1922 net.cpp:165] Memory required for data: 1672213500\nI1207 05:38:14.440017  1922 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1207 05:38:14.440026  1922 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1207 05:38:14.440033  1922 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1207 05:38:14.440043  1922 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1207 05:38:14.440304  1922 net.cpp:150] Setting up L2_b5_brc2_bn\nI1207 05:38:14.440316  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.440321  1922 net.cpp:165] Memory required for data: 1677784060\nI1207 05:38:14.440331  1922 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1207 05:38:14.440343  1922 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1207 05:38:14.440351  1922 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1207 05:38:14.440357  1922 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1207 05:38:14.440367  1922 net.cpp:150] Setting up L2_b5_brc2_relu\nI1207 05:38:14.440374  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.440379  1922 net.cpp:165] Memory required for data: 1683354620\nI1207 05:38:14.440383  1922 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1207 05:38:14.440393  1922 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1207 05:38:14.440399  1922 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1207 05:38:14.440410  1922 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1207 05:38:14.440753  1922 net.cpp:150] Setting up L2_b5_brc2_conv\nI1207 05:38:14.440768  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.440773  1922 net.cpp:165] Memory required for data: 1688925180\nI1207 05:38:14.440781  1922 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1207 05:38:14.440801  1922 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1207 05:38:14.440809  1922 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1207 05:38:14.440816  1922 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1207 05:38:14.441079  1922 net.cpp:150] Setting up L2_b5_brc3_bn\nI1207 05:38:14.441092  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.441097  1922 net.cpp:165] Memory required for data: 1694495740\nI1207 05:38:14.441107  1922 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1207 05:38:14.441115  1922 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1207 05:38:14.441121  1922 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1207 05:38:14.441133  1922 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1207 05:38:14.441143  1922 net.cpp:150] Setting up L2_b5_brc3_relu\nI1207 05:38:14.441149  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.441154  1922 net.cpp:165] Memory required for data: 1700066300\nI1207 05:38:14.441159  1922 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1207 05:38:14.441171  1922 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1207 05:38:14.441177  1922 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1207 05:38:14.441186  1922 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1207 05:38:14.441653  1922 net.cpp:150] Setting up L2_b5_brc3_conv\nI1207 05:38:14.441666  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.441673  1922 net.cpp:165] Memory required for data: 1711207420\nI1207 05:38:14.441680  1922 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1207 05:38:14.441694  1922 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1207 05:38:14.441702  1922 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1207 05:38:14.441709  1922 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1207 05:38:14.441720  1922 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1207 05:38:14.441747  1922 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1207 05:38:14.441756  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.441761  1922 net.cpp:165] Memory required for data: 1722348540\nI1207 05:38:14.441766  1922 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:38:14.441776  1922 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:38:14.441783  1922 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1207 05:38:14.441790  1922 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1207 05:38:14.441800  1922 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1207 05:38:14.441851  1922 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1207 05:38:14.441864  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.441870  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.441874  1922 net.cpp:165] Memory required for data: 1744630780\nI1207 05:38:14.441879  1922 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1207 05:38:14.441890  1922 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1207 05:38:14.441896  1922 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1207 05:38:14.441905  1922 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1207 05:38:14.442150  1922 net.cpp:150] Setting up L2_b6_brc1_bn\nI1207 05:38:14.442162  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.442167  1922 net.cpp:165] Memory required for data: 1755771900\nI1207 05:38:14.442178  1922 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1207 05:38:14.442200  1922 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1207 05:38:14.442207  1922 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1207 05:38:14.442215  1922 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1207 05:38:14.442224  1922 net.cpp:150] Setting up L2_b6_brc1_relu\nI1207 05:38:14.442232  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.442243  1922 net.cpp:165] Memory required for data: 1766913020\nI1207 05:38:14.442248  1922 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1207 05:38:14.442262  1922 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1207 05:38:14.442270  1922 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1207 05:38:14.442278  1922 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1207 05:38:14.442770  1922 net.cpp:150] Setting up L2_b6_brc1_conv\nI1207 05:38:14.442785  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.442790  1922 net.cpp:165] Memory required for data: 1772483580\nI1207 05:38:14.442798  1922 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1207 05:38:14.442811  1922 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1207 05:38:14.442817  1922 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1207 05:38:14.442826  1922 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1207 05:38:14.443085  1922 net.cpp:150] Setting up L2_b6_brc2_bn\nI1207 05:38:14.443101  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.443106  1922 net.cpp:165] Memory required for data: 1778054140\nI1207 05:38:14.443116  1922 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1207 05:38:14.443123  1922 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1207 05:38:14.443130  1922 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1207 05:38:14.443136  1922 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1207 05:38:14.443146  1922 net.cpp:150] Setting up L2_b6_brc2_relu\nI1207 05:38:14.443155  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.443158  1922 net.cpp:165] Memory required for data: 1783624700\nI1207 05:38:14.443163  1922 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1207 05:38:14.443173  1922 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1207 05:38:14.443178  1922 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1207 05:38:14.443190  1922 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1207 05:38:14.443527  1922 net.cpp:150] Setting up L2_b6_brc2_conv\nI1207 05:38:14.443542  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.443547  1922 net.cpp:165] Memory required for data: 1789195260\nI1207 05:38:14.443554  1922 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1207 05:38:14.443567  1922 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1207 05:38:14.443572  1922 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1207 05:38:14.443581  1922 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1207 05:38:14.443846  1922 net.cpp:150] Setting up L2_b6_brc3_bn\nI1207 05:38:14.443859  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.443864  1922 net.cpp:165] Memory required for data: 1794765820\nI1207 05:38:14.443874  1922 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1207 05:38:14.443886  1922 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1207 05:38:14.443892  1922 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1207 05:38:14.443899  1922 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1207 05:38:14.443909  1922 net.cpp:150] Setting up L2_b6_brc3_relu\nI1207 05:38:14.443917  1922 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1207 05:38:14.443920  1922 net.cpp:165] Memory required for data: 1800336380\nI1207 05:38:14.443925  1922 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1207 05:38:14.443934  1922 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1207 05:38:14.443940  1922 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1207 05:38:14.443953  1922 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1207 05:38:14.444427  1922 net.cpp:150] Setting up L2_b6_brc3_conv\nI1207 05:38:14.444440  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.444445  1922 net.cpp:165] Memory required for data: 1811477500\nI1207 05:38:14.444453  1922 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1207 05:38:14.444463  1922 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1207 05:38:14.444469  1922 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1207 05:38:14.444476  1922 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1207 05:38:14.444491  1922 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1207 05:38:14.444525  1922 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1207 05:38:14.444535  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.444540  1922 net.cpp:165] Memory required for data: 1822618620\nI1207 05:38:14.444545  1922 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:38:14.444552  1922 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:38:14.444558  1922 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1207 05:38:14.444568  1922 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1207 05:38:14.444578  1922 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1207 05:38:14.444628  1922 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1207 05:38:14.444644  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.444650  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.444655  1922 net.cpp:165] Memory required for data: 1844900860\nI1207 05:38:14.444660  1922 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1207 05:38:14.444669  1922 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1207 05:38:14.444674  1922 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1207 05:38:14.444690  1922 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1207 05:38:14.444937  1922 net.cpp:150] Setting up L3_b1_brc1_bn\nI1207 05:38:14.444950  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.444955  1922 net.cpp:165] Memory required for data: 1856041980\nI1207 05:38:14.444965  1922 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1207 05:38:14.444973  1922 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1207 05:38:14.444979  1922 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1207 05:38:14.444991  1922 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1207 05:38:14.445001  1922 net.cpp:150] Setting up L3_b1_brc1_relu\nI1207 05:38:14.445008  1922 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1207 05:38:14.445013  1922 net.cpp:165] Memory required for data: 1867183100\nI1207 05:38:14.445019  1922 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1207 05:38:14.445029  1922 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1207 05:38:14.445034  1922 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1207 05:38:14.445042  1922 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1207 05:38:14.445672  1922 net.cpp:150] Setting up L3_b1_brc1_conv\nI1207 05:38:14.445691  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.445698  1922 net.cpp:165] Memory required for data: 1869968380\nI1207 05:38:14.445706  1922 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1207 05:38:14.445719  1922 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1207 05:38:14.445725  1922 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1207 05:38:14.445734  1922 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1207 05:38:14.445992  1922 net.cpp:150] Setting up L3_b1_brc2_bn\nI1207 05:38:14.446005  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.446012  1922 net.cpp:165] Memory required for data: 1872753660\nI1207 05:38:14.446022  1922 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1207 05:38:14.446029  1922 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1207 05:38:14.446035  1922 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1207 05:38:14.446043  1922 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1207 05:38:14.446053  1922 net.cpp:150] Setting up L3_b1_brc2_relu\nI1207 05:38:14.446059  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.446063  1922 net.cpp:165] Memory required for data: 1875538940\nI1207 05:38:14.446069  1922 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1207 05:38:14.446089  1922 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1207 05:38:14.446094  1922 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1207 05:38:14.446105  1922 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1207 05:38:14.446506  1922 net.cpp:150] Setting up L3_b1_brc2_conv\nI1207 05:38:14.446521  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.446526  1922 net.cpp:165] Memory required for data: 1878324220\nI1207 05:38:14.446533  1922 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1207 05:38:14.446547  1922 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1207 05:38:14.446555  1922 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1207 05:38:14.446565  1922 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1207 05:38:14.446835  1922 net.cpp:150] Setting up L3_b1_brc3_bn\nI1207 05:38:14.446847  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.446852  1922 net.cpp:165] Memory required for data: 1881109500\nI1207 05:38:14.446862  1922 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1207 05:38:14.446871  1922 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1207 05:38:14.446877  1922 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1207 05:38:14.446883  1922 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1207 05:38:14.446893  1922 net.cpp:150] Setting up L3_b1_brc3_relu\nI1207 05:38:14.446900  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.446905  1922 net.cpp:165] Memory required for data: 1883894780\nI1207 05:38:14.446909  1922 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1207 05:38:14.446923  1922 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1207 05:38:14.446929  1922 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1207 05:38:14.446938  1922 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1207 05:38:14.447895  1922 net.cpp:150] Setting up L3_b1_brc3_conv\nI1207 05:38:14.447909  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.447914  1922 net.cpp:165] Memory required for data: 1889465340\nI1207 05:38:14.447923  1922 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1207 05:38:14.447937  1922 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1207 05:38:14.447944  1922 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1207 05:38:14.447957  1922 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1207 05:38:14.448928  1922 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1207 05:38:14.448943  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.448948  1922 net.cpp:165] Memory required for data: 1895035900\nI1207 05:38:14.448957  1922 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1207 05:38:14.448969  1922 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1207 05:38:14.448976  1922 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1207 05:38:14.448983  1922 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1207 05:38:14.448992  1922 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1207 05:38:14.449028  1922 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1207 05:38:14.449040  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.449045  1922 net.cpp:165] Memory required for data: 1900606460\nI1207 05:38:14.449050  1922 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:38:14.449057  1922 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:38:14.449064  1922 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1207 05:38:14.449074  1922 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1207 05:38:14.449084  1922 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1207 05:38:14.449134  1922 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1207 05:38:14.449148  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.449156  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.449167  1922 net.cpp:165] Memory required for data: 1911747580\nI1207 05:38:14.449172  1922 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1207 05:38:14.449180  1922 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1207 05:38:14.449187  1922 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1207 05:38:14.449198  1922 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1207 05:38:14.449452  1922 net.cpp:150] Setting up L3_b2_brc1_bn\nI1207 05:38:14.449465  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.449470  1922 net.cpp:165] Memory required for data: 1917318140\nI1207 05:38:14.449481  1922 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1207 05:38:14.449489  1922 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1207 05:38:14.449496  1922 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1207 05:38:14.449502  1922 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1207 05:38:14.449512  1922 net.cpp:150] Setting up L3_b2_brc1_relu\nI1207 05:38:14.449519  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.449524  1922 net.cpp:165] Memory required for data: 1922888700\nI1207 05:38:14.449528  1922 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1207 05:38:14.449542  1922 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1207 05:38:14.449548  1922 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1207 05:38:14.449559  1922 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1207 05:38:14.450536  1922 net.cpp:150] Setting up L3_b2_brc1_conv\nI1207 05:38:14.450551  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.450556  1922 net.cpp:165] Memory required for data: 1925673980\nI1207 05:38:14.450564  1922 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1207 05:38:14.450577  1922 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1207 05:38:14.450583  1922 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1207 05:38:14.450592  1922 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1207 05:38:14.450862  1922 net.cpp:150] Setting up L3_b2_brc2_bn\nI1207 05:38:14.450876  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.450881  1922 net.cpp:165] Memory required for data: 1928459260\nI1207 05:38:14.450891  1922 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1207 05:38:14.450906  1922 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1207 05:38:14.450913  1922 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1207 05:38:14.450920  1922 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1207 05:38:14.450930  1922 net.cpp:150] Setting up L3_b2_brc2_relu\nI1207 05:38:14.450937  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.450942  1922 net.cpp:165] Memory required for data: 1931244540\nI1207 05:38:14.450947  1922 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1207 05:38:14.450958  1922 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1207 05:38:14.450963  1922 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1207 05:38:14.450974  1922 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1207 05:38:14.452446  1922 net.cpp:150] Setting up L3_b2_brc2_conv\nI1207 05:38:14.452463  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.452469  1922 net.cpp:165] Memory required for data: 1934029820\nI1207 05:38:14.452478  1922 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1207 05:38:14.452491  1922 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1207 05:38:14.452498  1922 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1207 05:38:14.452509  1922 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1207 05:38:14.452811  1922 net.cpp:150] Setting up L3_b2_brc3_bn\nI1207 05:38:14.452826  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.452831  1922 net.cpp:165] Memory required for data: 1936815100\nI1207 05:38:14.452842  1922 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1207 05:38:14.452850  1922 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1207 05:38:14.452857  1922 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1207 05:38:14.452864  1922 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1207 05:38:14.452884  1922 net.cpp:150] Setting up L3_b2_brc3_relu\nI1207 05:38:14.452893  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.452898  1922 net.cpp:165] Memory required for data: 1939600380\nI1207 05:38:14.452903  1922 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1207 05:38:14.452916  1922 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1207 05:38:14.452922  1922 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1207 05:38:14.452934  1922 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1207 05:38:14.453920  1922 net.cpp:150] Setting up L3_b2_brc3_conv\nI1207 05:38:14.453936  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.453941  1922 net.cpp:165] Memory required for data: 1945170940\nI1207 05:38:14.453950  1922 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1207 05:38:14.453959  1922 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1207 05:38:14.453966  1922 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1207 05:38:14.453974  1922 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1207 05:38:14.453984  1922 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1207 05:38:14.454020  1922 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1207 05:38:14.454035  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.454041  1922 net.cpp:165] Memory required for data: 1950741500\nI1207 05:38:14.454046  1922 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:38:14.454053  1922 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:38:14.454059  1922 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1207 05:38:14.454066  1922 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1207 05:38:14.454077  1922 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1207 05:38:14.454128  1922 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1207 05:38:14.454140  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.454147  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.454151  1922 net.cpp:165] Memory required for data: 1961882620\nI1207 05:38:14.454156  1922 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1207 05:38:14.454167  1922 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1207 05:38:14.454174  1922 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1207 05:38:14.454181  1922 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1207 05:38:14.454433  1922 net.cpp:150] Setting up L3_b3_brc1_bn\nI1207 05:38:14.454448  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.454453  1922 net.cpp:165] Memory required for data: 1967453180\nI1207 05:38:14.454464  1922 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1207 05:38:14.454473  1922 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1207 05:38:14.454478  1922 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1207 05:38:14.454485  1922 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1207 05:38:14.454495  1922 net.cpp:150] Setting up L3_b3_brc1_relu\nI1207 05:38:14.454502  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.454506  1922 net.cpp:165] Memory required for data: 1973023740\nI1207 05:38:14.454511  1922 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1207 05:38:14.454522  1922 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1207 05:38:14.454527  1922 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1207 05:38:14.454540  1922 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1207 05:38:14.456538  1922 net.cpp:150] Setting up L3_b3_brc1_conv\nI1207 05:38:14.456557  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.456562  1922 net.cpp:165] Memory required for data: 1975809020\nI1207 05:38:14.456570  1922 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1207 05:38:14.456580  1922 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1207 05:38:14.456598  1922 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1207 05:38:14.456607  1922 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1207 05:38:14.456882  1922 net.cpp:150] Setting up L3_b3_brc2_bn\nI1207 05:38:14.456897  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.456902  1922 net.cpp:165] Memory required for data: 1978594300\nI1207 05:38:14.456912  1922 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1207 05:38:14.456920  1922 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1207 05:38:14.456926  1922 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1207 05:38:14.456934  1922 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1207 05:38:14.456944  1922 net.cpp:150] Setting up L3_b3_brc2_relu\nI1207 05:38:14.456951  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.456955  1922 net.cpp:165] Memory required for data: 1981379580\nI1207 05:38:14.456960  1922 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1207 05:38:14.456974  1922 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1207 05:38:14.456980  1922 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1207 05:38:14.456992  1922 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1207 05:38:14.457389  1922 net.cpp:150] Setting up L3_b3_brc2_conv\nI1207 05:38:14.457404  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.457409  1922 net.cpp:165] Memory required for data: 1984164860\nI1207 05:38:14.457417  1922 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1207 05:38:14.457429  1922 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1207 05:38:14.457437  1922 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1207 05:38:14.457444  1922 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1207 05:38:14.457711  1922 net.cpp:150] Setting up L3_b3_brc3_bn\nI1207 05:38:14.457725  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.457729  1922 net.cpp:165] Memory required for data: 1986950140\nI1207 05:38:14.457739  1922 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1207 05:38:14.457747  1922 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1207 05:38:14.457754  1922 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1207 05:38:14.457762  1922 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1207 05:38:14.457772  1922 net.cpp:150] Setting up L3_b3_brc3_relu\nI1207 05:38:14.457779  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.457783  1922 net.cpp:165] Memory required for data: 1989735420\nI1207 05:38:14.457788  1922 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1207 05:38:14.457803  1922 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1207 05:38:14.457809  1922 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1207 05:38:14.457818  1922 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1207 05:38:14.458775  1922 net.cpp:150] Setting up L3_b3_brc3_conv\nI1207 05:38:14.458789  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.458796  1922 net.cpp:165] Memory required for data: 1995305980\nI1207 05:38:14.458803  1922 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1207 05:38:14.458813  1922 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1207 05:38:14.458820  1922 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1207 05:38:14.458827  1922 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1207 05:38:14.458838  1922 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1207 05:38:14.458873  1922 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1207 05:38:14.458894  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.458899  1922 net.cpp:165] Memory required for data: 2000876540\nI1207 05:38:14.458904  1922 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:38:14.458911  1922 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:38:14.458917  1922 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1207 05:38:14.458925  1922 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1207 05:38:14.458945  1922 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1207 05:38:14.458998  1922 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1207 05:38:14.459010  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.459017  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.459022  1922 net.cpp:165] Memory required for data: 2012017660\nI1207 05:38:14.459028  1922 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1207 05:38:14.459038  1922 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1207 05:38:14.459044  1922 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1207 05:38:14.459053  1922 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1207 05:38:14.459313  1922 net.cpp:150] Setting up L3_b4_brc1_bn\nI1207 05:38:14.459326  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.459331  1922 net.cpp:165] Memory required for data: 2017588220\nI1207 05:38:14.459342  1922 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1207 05:38:14.459349  1922 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1207 05:38:14.459355  1922 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1207 05:38:14.459362  1922 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1207 05:38:14.459372  1922 net.cpp:150] Setting up L3_b4_brc1_relu\nI1207 05:38:14.459379  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.459384  1922 net.cpp:165] Memory required for data: 2023158780\nI1207 05:38:14.459388  1922 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1207 05:38:14.459403  1922 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1207 05:38:14.459408  1922 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1207 05:38:14.459417  1922 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1207 05:38:14.460368  1922 net.cpp:150] Setting up L3_b4_brc1_conv\nI1207 05:38:14.460383  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.460388  1922 net.cpp:165] Memory required for data: 2025944060\nI1207 05:38:14.460397  1922 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1207 05:38:14.460407  1922 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1207 05:38:14.460412  1922 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1207 05:38:14.460423  1922 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1207 05:38:14.460685  1922 net.cpp:150] Setting up L3_b4_brc2_bn\nI1207 05:38:14.460703  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.460708  1922 net.cpp:165] Memory required for data: 2028729340\nI1207 05:38:14.460719  1922 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1207 05:38:14.460731  1922 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1207 05:38:14.460737  1922 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1207 05:38:14.460744  1922 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1207 05:38:14.460754  1922 net.cpp:150] Setting up L3_b4_brc2_relu\nI1207 05:38:14.460762  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.460767  1922 net.cpp:165] Memory required for data: 2031514620\nI1207 05:38:14.460770  1922 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1207 05:38:14.460781  1922 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1207 05:38:14.460788  1922 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1207 05:38:14.460798  1922 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1207 05:38:14.461199  1922 net.cpp:150] Setting up L3_b4_brc2_conv\nI1207 05:38:14.461213  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.461218  1922 net.cpp:165] Memory required for data: 2034299900\nI1207 05:38:14.461227  1922 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1207 05:38:14.461236  1922 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1207 05:38:14.461242  1922 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1207 05:38:14.461251  1922 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1207 05:38:14.461511  1922 net.cpp:150] Setting up L3_b4_brc3_bn\nI1207 05:38:14.461531  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.461536  1922 net.cpp:165] Memory required for data: 2037085180\nI1207 05:38:14.461546  1922 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1207 05:38:14.461555  1922 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1207 05:38:14.461561  1922 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1207 05:38:14.461570  1922 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1207 05:38:14.461581  1922 net.cpp:150] Setting up L3_b4_brc3_relu\nI1207 05:38:14.461588  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.461593  1922 net.cpp:165] Memory required for data: 2039870460\nI1207 05:38:14.461597  1922 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1207 05:38:14.461611  1922 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1207 05:38:14.461617  1922 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1207 05:38:14.461625  1922 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1207 05:38:14.462580  1922 net.cpp:150] Setting up L3_b4_brc3_conv\nI1207 05:38:14.462596  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.462601  1922 net.cpp:165] Memory required for data: 2045441020\nI1207 05:38:14.462610  1922 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1207 05:38:14.462618  1922 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1207 05:38:14.462625  1922 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1207 05:38:14.462632  1922 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1207 05:38:14.462643  1922 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1207 05:38:14.462678  1922 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1207 05:38:14.462695  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.462702  1922 net.cpp:165] Memory required for data: 2051011580\nI1207 05:38:14.462707  1922 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:38:14.462716  1922 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:38:14.462723  1922 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1207 05:38:14.462733  1922 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1207 05:38:14.462743  1922 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1207 05:38:14.462795  1922 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1207 05:38:14.462806  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.462813  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.462817  1922 net.cpp:165] Memory required for data: 2062152700\nI1207 05:38:14.462822  1922 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1207 05:38:14.462833  1922 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1207 05:38:14.462841  1922 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1207 05:38:14.462848  1922 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1207 05:38:14.463099  1922 net.cpp:150] Setting up L3_b5_brc1_bn\nI1207 05:38:14.463111  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.463115  1922 net.cpp:165] Memory required for data: 2067723260\nI1207 05:38:14.463126  1922 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1207 05:38:14.463135  1922 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1207 05:38:14.463140  1922 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1207 05:38:14.463150  1922 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1207 05:38:14.463160  1922 net.cpp:150] Setting up L3_b5_brc1_relu\nI1207 05:38:14.463167  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.463171  1922 net.cpp:165] Memory required for data: 2073293820\nI1207 05:38:14.463176  1922 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1207 05:38:14.463191  1922 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1207 05:38:14.463207  1922 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1207 05:38:14.463217  1922 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1207 05:38:14.464172  1922 net.cpp:150] Setting up L3_b5_brc1_conv\nI1207 05:38:14.464187  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.464192  1922 net.cpp:165] Memory required for data: 2076079100\nI1207 05:38:14.464201  1922 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1207 05:38:14.464212  1922 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1207 05:38:14.464220  1922 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1207 05:38:14.464228  1922 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1207 05:38:14.464485  1922 net.cpp:150] Setting up L3_b5_brc2_bn\nI1207 05:38:14.464498  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.464503  1922 net.cpp:165] Memory required for data: 2078864380\nI1207 05:38:14.464512  1922 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1207 05:38:14.464521  1922 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1207 05:38:14.464527  1922 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1207 05:38:14.464534  1922 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1207 05:38:14.464545  1922 net.cpp:150] Setting up L3_b5_brc2_relu\nI1207 05:38:14.464551  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.464556  1922 net.cpp:165] Memory required for data: 2081649660\nI1207 05:38:14.464560  1922 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1207 05:38:14.464574  1922 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1207 05:38:14.464581  1922 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1207 05:38:14.464592  1922 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1207 05:38:14.464998  1922 net.cpp:150] Setting up L3_b5_brc2_conv\nI1207 05:38:14.465013  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.465018  1922 net.cpp:165] Memory required for data: 2084434940\nI1207 05:38:14.465066  1922 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1207 05:38:14.465080  1922 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1207 05:38:14.465085  1922 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1207 05:38:14.465093  1922 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1207 05:38:14.465363  1922 net.cpp:150] Setting up L3_b5_brc3_bn\nI1207 05:38:14.465375  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.465380  1922 net.cpp:165] Memory required for data: 2087220220\nI1207 05:38:14.465390  1922 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1207 05:38:14.465399  1922 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1207 05:38:14.465405  1922 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1207 05:38:14.465415  1922 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1207 05:38:14.465425  1922 net.cpp:150] Setting up L3_b5_brc3_relu\nI1207 05:38:14.465432  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.465437  1922 net.cpp:165] Memory required for data: 2090005500\nI1207 05:38:14.465442  1922 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1207 05:38:14.465452  1922 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1207 05:38:14.465458  1922 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1207 05:38:14.465467  1922 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1207 05:38:14.466415  1922 net.cpp:150] Setting up L3_b5_brc3_conv\nI1207 05:38:14.466430  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.466435  1922 net.cpp:165] Memory required for data: 2095576060\nI1207 05:38:14.466444  1922 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1207 05:38:14.466452  1922 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1207 05:38:14.466459  1922 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1207 05:38:14.466470  1922 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1207 05:38:14.466477  1922 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1207 05:38:14.466511  1922 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1207 05:38:14.466521  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.466532  1922 net.cpp:165] Memory required for data: 2101146620\nI1207 05:38:14.466538  1922 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:38:14.466549  1922 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:38:14.466555  1922 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1207 05:38:14.466567  1922 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1207 05:38:14.466576  1922 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1207 05:38:14.466627  1922 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1207 05:38:14.466642  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.466650  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.466655  1922 net.cpp:165] Memory required for data: 2112287740\nI1207 05:38:14.466660  1922 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1207 05:38:14.466667  1922 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1207 05:38:14.466673  1922 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1207 05:38:14.466684  1922 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1207 05:38:14.466944  1922 net.cpp:150] Setting up L3_b6_brc1_bn\nI1207 05:38:14.466958  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.466962  1922 net.cpp:165] Memory required for data: 2117858300\nI1207 05:38:14.466972  1922 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1207 05:38:14.466980  1922 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1207 05:38:14.466986  1922 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1207 05:38:14.466996  1922 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1207 05:38:14.467007  1922 net.cpp:150] Setting up L3_b6_brc1_relu\nI1207 05:38:14.467015  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.467018  1922 net.cpp:165] Memory required for data: 2123428860\nI1207 05:38:14.467023  1922 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1207 05:38:14.467034  1922 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1207 05:38:14.467039  1922 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1207 05:38:14.467048  1922 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1207 05:38:14.467999  1922 net.cpp:150] Setting up L3_b6_brc1_conv\nI1207 05:38:14.468014  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.468019  1922 net.cpp:165] Memory required for data: 2126214140\nI1207 05:38:14.468029  1922 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1207 05:38:14.468039  1922 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1207 05:38:14.468046  1922 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1207 05:38:14.468055  1922 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1207 05:38:14.468323  1922 net.cpp:150] Setting up L3_b6_brc2_bn\nI1207 05:38:14.468336  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.468341  1922 net.cpp:165] Memory required for data: 2128999420\nI1207 05:38:14.468351  1922 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1207 05:38:14.468359  1922 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1207 05:38:14.468365  1922 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1207 05:38:14.468372  1922 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1207 05:38:14.468382  1922 net.cpp:150] Setting up L3_b6_brc2_relu\nI1207 05:38:14.468389  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.468394  1922 net.cpp:165] Memory required for data: 2131784700\nI1207 05:38:14.468399  1922 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1207 05:38:14.468412  1922 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1207 05:38:14.468418  1922 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1207 05:38:14.468430  1922 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1207 05:38:14.468842  1922 net.cpp:150] Setting up L3_b6_brc2_conv\nI1207 05:38:14.468863  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.468868  1922 net.cpp:165] Memory required for data: 2134569980\nI1207 05:38:14.468876  1922 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1207 05:38:14.468888  1922 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1207 05:38:14.468895  1922 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1207 05:38:14.468906  1922 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1207 05:38:14.469172  1922 net.cpp:150] Setting up L3_b6_brc3_bn\nI1207 05:38:14.469184  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.469188  1922 net.cpp:165] Memory required for data: 2137355260\nI1207 05:38:14.469199  1922 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1207 05:38:14.469208  1922 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1207 05:38:14.469213  1922 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1207 05:38:14.469220  1922 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1207 05:38:14.469230  1922 net.cpp:150] Setting up L3_b6_brc3_relu\nI1207 05:38:14.469238  1922 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1207 05:38:14.469243  1922 net.cpp:165] Memory required for data: 2140140540\nI1207 05:38:14.469246  1922 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1207 05:38:14.469265  1922 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1207 05:38:14.469276  1922 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1207 05:38:14.469295  1922 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1207 05:38:14.470305  1922 net.cpp:150] Setting up L3_b6_brc3_conv\nI1207 05:38:14.470321  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.470327  1922 net.cpp:165] Memory required for data: 2145711100\nI1207 05:38:14.470336  1922 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1207 05:38:14.470345  1922 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1207 05:38:14.470352  1922 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1207 05:38:14.470360  1922 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1207 05:38:14.470371  1922 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1207 05:38:14.470409  1922 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1207 05:38:14.470422  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.470427  1922 net.cpp:165] Memory required for data: 2151281660\nI1207 05:38:14.470432  1922 layer_factory.hpp:77] Creating layer post_bn\nI1207 05:38:14.470439  1922 net.cpp:100] Creating Layer post_bn\nI1207 05:38:14.470445  1922 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1207 05:38:14.470459  1922 net.cpp:408] post_bn -> post_bn_top\nI1207 05:38:14.470726  1922 net.cpp:150] Setting up post_bn\nI1207 05:38:14.470739  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.470744  1922 net.cpp:165] Memory required for data: 2156852220\nI1207 05:38:14.470755  1922 layer_factory.hpp:77] Creating layer post_relu\nI1207 05:38:14.470767  1922 net.cpp:100] Creating Layer post_relu\nI1207 05:38:14.470774  1922 net.cpp:434] post_relu <- post_bn_top\nI1207 05:38:14.470782  1922 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1207 05:38:14.470791  1922 net.cpp:150] Setting up post_relu\nI1207 05:38:14.470798  1922 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1207 05:38:14.470803  1922 net.cpp:165] Memory required for data: 2162422780\nI1207 05:38:14.470808  1922 layer_factory.hpp:77] Creating layer post_pool\nI1207 05:38:14.470818  1922 net.cpp:100] Creating Layer post_pool\nI1207 05:38:14.470823  1922 net.cpp:434] post_pool <- post_bn_top\nI1207 05:38:14.470830  1922 net.cpp:408] post_pool -> post_pool\nI1207 05:38:14.470867  1922 net.cpp:150] Setting up post_pool\nI1207 05:38:14.470882  1922 net.cpp:157] Top shape: 85 256 1 1 (21760)\nI1207 05:38:14.470887  1922 net.cpp:165] Memory required for data: 2162509820\nI1207 05:38:14.470893  1922 layer_factory.hpp:77] Creating layer post_FC\nI1207 05:38:14.470903  1922 net.cpp:100] Creating Layer post_FC\nI1207 05:38:14.470909  1922 net.cpp:434] post_FC <- post_pool\nI1207 05:38:14.470921  1922 net.cpp:408] post_FC -> post_FC_top\nI1207 05:38:14.471112  1922 net.cpp:150] Setting up post_FC\nI1207 05:38:14.471125  1922 net.cpp:157] Top shape: 85 10 (850)\nI1207 05:38:14.471130  1922 net.cpp:165] Memory required for data: 2162513220\nI1207 05:38:14.471139  1922 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1207 05:38:14.471148  1922 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1207 05:38:14.471153  1922 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1207 05:38:14.471161  1922 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1207 05:38:14.471174  1922 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1207 05:38:14.471226  1922 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1207 05:38:14.471238  1922 net.cpp:157] Top shape: 85 10 (850)\nI1207 05:38:14.471245  1922 net.cpp:157] Top shape: 85 10 (850)\nI1207 05:38:14.471249  1922 net.cpp:165] Memory required for data: 2162520020\nI1207 05:38:14.471254  1922 layer_factory.hpp:77] Creating layer accuracy\nI1207 05:38:14.471266  1922 net.cpp:100] Creating Layer accuracy\nI1207 05:38:14.471271  1922 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1207 05:38:14.471278  1922 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1207 05:38:14.471287  1922 net.cpp:408] accuracy -> accuracy\nI1207 05:38:14.471298  1922 net.cpp:150] Setting up accuracy\nI1207 05:38:14.471307  1922 net.cpp:157] Top shape: (1)\nI1207 05:38:14.471310  1922 net.cpp:165] Memory required for data: 2162520024\nI1207 05:38:14.471315  1922 layer_factory.hpp:77] Creating layer loss\nI1207 05:38:14.471323  1922 net.cpp:100] Creating Layer loss\nI1207 05:38:14.471328  1922 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1207 05:38:14.471334  1922 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1207 05:38:14.471343  1922 net.cpp:408] loss -> loss\nI1207 05:38:14.471354  1922 layer_factory.hpp:77] Creating layer loss\nI1207 05:38:14.471477  1922 net.cpp:150] Setting up loss\nI1207 05:38:14.471490  1922 net.cpp:157] Top shape: (1)\nI1207 05:38:14.471494  1922 net.cpp:160]     with loss weight 1\nI1207 05:38:14.471509  1922 net.cpp:165] Memory required for data: 2162520028\nI1207 05:38:14.471515  1922 net.cpp:226] loss needs backward computation.\nI1207 05:38:14.471523  1922 net.cpp:228] accuracy does not need backward computation.\nI1207 05:38:14.471529  1922 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1207 05:38:14.471534  1922 net.cpp:226] post_FC needs backward computation.\nI1207 05:38:14.471539  1922 net.cpp:226] post_pool needs backward computation.\nI1207 05:38:14.471544  1922 net.cpp:226] post_relu needs backward computation.\nI1207 05:38:14.471549  1922 net.cpp:226] post_bn needs backward computation.\nI1207 05:38:14.471554  1922 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1207 05:38:14.471559  1922 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1207 05:38:14.471563  1922 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1207 05:38:14.471568  1922 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1207 05:38:14.471573  1922 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1207 05:38:14.471578  1922 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1207 05:38:14.471583  1922 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1207 05:38:14.471588  1922 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1207 05:38:14.471593  1922 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1207 05:38:14.471597  1922 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1207 05:38:14.471603  1922 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.471608  1922 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1207 05:38:14.471613  1922 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1207 05:38:14.471618  1922 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1207 05:38:14.471623  1922 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1207 05:38:14.471629  1922 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1207 05:38:14.471640  1922 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1207 05:38:14.471647  1922 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1207 05:38:14.471652  1922 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1207 05:38:14.471657  1922 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1207 05:38:14.471662  1922 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1207 05:38:14.471666  1922 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.471673  1922 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1207 05:38:14.471678  1922 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1207 05:38:14.471683  1922 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1207 05:38:14.471693  1922 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1207 05:38:14.471699  1922 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1207 05:38:14.471705  1922 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1207 05:38:14.471710  1922 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1207 05:38:14.471715  1922 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1207 05:38:14.471720  1922 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1207 05:38:14.471725  1922 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1207 05:38:14.471731  1922 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.471736  1922 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1207 05:38:14.471742  1922 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1207 05:38:14.471747  1922 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1207 05:38:14.471752  1922 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1207 05:38:14.471757  1922 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1207 05:38:14.471763  1922 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1207 05:38:14.471768  1922 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1207 05:38:14.471773  1922 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1207 05:38:14.471778  1922 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1207 05:38:14.471783  1922 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1207 05:38:14.471788  1922 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.471794  1922 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1207 05:38:14.471802  1922 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1207 05:38:14.471808  1922 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1207 05:38:14.471813  1922 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1207 05:38:14.471819  1922 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1207 05:38:14.471824  1922 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1207 05:38:14.471829  1922 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1207 05:38:14.471834  1922 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1207 05:38:14.471839  1922 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1207 05:38:14.471845  1922 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1207 05:38:14.471850  1922 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.471856  1922 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1207 05:38:14.471861  1922 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1207 05:38:14.471868  1922 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1207 05:38:14.471873  1922 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1207 05:38:14.471877  1922 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1207 05:38:14.471882  1922 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1207 05:38:14.471889  1922 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1207 05:38:14.471899  1922 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1207 05:38:14.471904  1922 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1207 05:38:14.471910  1922 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1207 05:38:14.471915  1922 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1207 05:38:14.471920  1922 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.471925  1922 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1207 05:38:14.471931  1922 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1207 05:38:14.471936  1922 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1207 05:38:14.471941  1922 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1207 05:38:14.471947  1922 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1207 05:38:14.471952  1922 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1207 05:38:14.471957  1922 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1207 05:38:14.471962  1922 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1207 05:38:14.471968  1922 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1207 05:38:14.471973  1922 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1207 05:38:14.471978  1922 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.471983  1922 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1207 05:38:14.471989  1922 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1207 05:38:14.471995  1922 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1207 05:38:14.472000  1922 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1207 05:38:14.472005  1922 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1207 05:38:14.472010  1922 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1207 05:38:14.472015  1922 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1207 05:38:14.472021  1922 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1207 05:38:14.472026  1922 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1207 05:38:14.472031  1922 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1207 05:38:14.472036  1922 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472043  1922 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1207 05:38:14.472051  1922 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1207 05:38:14.472057  1922 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1207 05:38:14.472062  1922 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1207 05:38:14.472067  1922 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1207 05:38:14.472072  1922 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1207 05:38:14.472077  1922 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1207 05:38:14.472084  1922 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1207 05:38:14.472088  1922 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1207 05:38:14.472093  1922 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1207 05:38:14.472100  1922 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472105  1922 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1207 05:38:14.472110  1922 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1207 05:38:14.472115  1922 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1207 05:38:14.472121  1922 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1207 05:38:14.472126  1922 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1207 05:38:14.472131  1922 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1207 05:38:14.472137  1922 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1207 05:38:14.472142  1922 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1207 05:38:14.472147  1922 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1207 05:38:14.472159  1922 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1207 05:38:14.472165  1922 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472170  1922 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1207 05:38:14.472177  1922 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1207 05:38:14.472182  1922 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1207 05:38:14.472187  1922 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1207 05:38:14.472193  1922 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1207 05:38:14.472198  1922 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1207 05:38:14.472203  1922 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1207 05:38:14.472209  1922 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1207 05:38:14.472214  1922 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1207 05:38:14.472219  1922 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1207 05:38:14.472225  1922 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472230  1922 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1207 05:38:14.472236  1922 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1207 05:38:14.472241  1922 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1207 05:38:14.472247  1922 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1207 05:38:14.472252  1922 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1207 05:38:14.472257  1922 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1207 05:38:14.472263  1922 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1207 05:38:14.472268  1922 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1207 05:38:14.472273  1922 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1207 05:38:14.472280  1922 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1207 05:38:14.472285  1922 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1207 05:38:14.472290  1922 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472295  1922 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1207 05:38:14.472301  1922 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1207 05:38:14.472306  1922 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1207 05:38:14.472311  1922 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1207 05:38:14.472317  1922 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1207 05:38:14.472323  1922 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1207 05:38:14.472328  1922 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1207 05:38:14.472333  1922 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1207 05:38:14.472339  1922 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1207 05:38:14.472344  1922 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1207 05:38:14.472349  1922 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472355  1922 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1207 05:38:14.472362  1922 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1207 05:38:14.472371  1922 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1207 05:38:14.472378  1922 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1207 05:38:14.472383  1922 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1207 05:38:14.472388  1922 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1207 05:38:14.472394  1922 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1207 05:38:14.472399  1922 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1207 05:38:14.472405  1922 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1207 05:38:14.472410  1922 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1207 05:38:14.472416  1922 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472427  1922 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1207 05:38:14.472434  1922 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1207 05:38:14.472440  1922 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1207 05:38:14.472445  1922 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1207 05:38:14.472451  1922 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1207 05:38:14.472456  1922 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1207 05:38:14.472462  1922 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1207 05:38:14.472467  1922 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1207 05:38:14.472473  1922 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1207 05:38:14.472478  1922 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1207 05:38:14.472484  1922 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472489  1922 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1207 05:38:14.472496  1922 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1207 05:38:14.472501  1922 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1207 05:38:14.472506  1922 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1207 05:38:14.472512  1922 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1207 05:38:14.472517  1922 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1207 05:38:14.472522  1922 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1207 05:38:14.472528  1922 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1207 05:38:14.472533  1922 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1207 05:38:14.472538  1922 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1207 05:38:14.472544  1922 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472550  1922 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1207 05:38:14.472556  1922 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1207 05:38:14.472561  1922 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1207 05:38:14.472566  1922 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1207 05:38:14.472573  1922 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1207 05:38:14.472578  1922 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1207 05:38:14.472582  1922 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1207 05:38:14.472589  1922 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1207 05:38:14.472594  1922 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1207 05:38:14.472599  1922 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1207 05:38:14.472604  1922 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1207 05:38:14.472610  1922 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1207 05:38:14.472616  1922 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1207 05:38:14.472622  1922 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1207 05:38:14.472627  1922 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1207 05:38:14.472633  1922 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1207 05:38:14.472638  1922 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1207 05:38:14.472645  1922 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1207 05:38:14.472649  1922 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1207 05:38:14.472656  1922 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1207 05:38:14.472661  1922 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1207 05:38:14.472666  1922 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1207 05:38:14.472671  1922 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1207 05:38:14.472676  1922 net.cpp:226] pre_conv needs backward computation.\nI1207 05:38:14.472683  1922 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1207 05:38:14.472700  1922 net.cpp:228] dataLayer does not need backward computation.\nI1207 05:38:14.472705  1922 net.cpp:270] This network produces output accuracy\nI1207 05:38:14.472712  1922 net.cpp:270] This network produces output loss\nI1207 05:38:14.472968  1922 net.cpp:283] Network initialization done.\nI1207 05:38:14.473569  1922 solver.cpp:60] Solver scaffolding done.\nI1207 05:38:14.692836  1922 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1207 05:38:15.030937  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:15.030985  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:15.036893  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:15.693405  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:15.693475  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:15.700433  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:16.443009  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:16.443084  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:16.450928  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:16.815299  1922 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1207 05:38:17.269668  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:17.269737  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:17.278762  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:18.200060  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:18.200109  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:18.209499  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:19.208979  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:19.209026  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:19.219234  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:20.301024  1922 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1207 05:38:20.301100  1922 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1207 05:38:20.312981  1922 data_layer.cpp:41] output data size: 85,3,32,32\nI1207 05:38:20.362080  1948 blocking_queue.cpp:50] Waiting for data\nI1207 05:38:20.943960  1922 parallel.cpp:425] Starting Optimization\nI1207 05:38:20.945907  1922 solver.cpp:279] Solving Cifar-ResNeXt\nI1207 05:38:20.945926  1922 solver.cpp:280] Learning Rate Policy: triangular\nI1207 05:38:20.950016  1922 solver.cpp:337] Iteration 0, Testing net (#0)\nI1207 05:40:59.720090  1922 solver.cpp:404]     Test net output #0: accuracy = 0.112\nI1207 05:40:59.720407  1922 solver.cpp:404]     Test net output #1: loss = 2.39504 (* 1 = 2.39504 loss)\nI1207 05:41:04.951356  1922 solver.cpp:228] Iteration 0, loss = 2.32656\nI1207 05:41:04.951407  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 05:41:04.951426  1922 solver.cpp:244]     Train net output #1: loss = 2.32656 (* 1 = 2.32656 loss)\nI1207 05:41:05.085891  1922 sgd_solver.cpp:166] Iteration 0, lr = 0\nI1207 05:48:00.005970  1922 solver.cpp:337] Iteration 100, Testing net (#0)\nI1207 05:50:38.187328  1922 solver.cpp:404]     Test net output #0: accuracy = 0.274765\nI1207 05:50:38.187604  1922 solver.cpp:404]     Test net output #1: loss = 2.01333 (* 1 = 2.01333 loss)\nI1207 05:50:42.136420  1922 solver.cpp:228] Iteration 100, loss = 2.13525\nI1207 05:50:42.136461  1922 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1207 05:50:42.136477  1922 solver.cpp:244]     Train net output #1: loss = 2.13525 (* 1 = 2.13525 loss)\nI1207 05:50:42.372990  1922 sgd_solver.cpp:166] Iteration 100, lr = 0.015\nI1207 05:57:37.014508  1922 solver.cpp:337] Iteration 200, Testing net (#0)\nI1207 06:00:15.234287  1922 solver.cpp:404]     Test net output #0: accuracy = 0.310411\nI1207 06:00:15.234547  1922 solver.cpp:404]     Test net output #1: loss = 1.91424 (* 1 = 1.91424 loss)\nI1207 06:00:19.175807  1922 solver.cpp:228] Iteration 200, loss = 2.02312\nI1207 06:00:19.175856  1922 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1207 06:00:19.175873  1922 solver.cpp:244]     Train net output #1: loss = 2.02312 (* 1 = 2.02312 loss)\nI1207 06:00:19.414407  1922 sgd_solver.cpp:166] Iteration 200, lr = 0.03\nI1207 06:07:14.023519  1922 solver.cpp:337] Iteration 300, Testing net (#0)\nI1207 06:09:52.346817  1922 solver.cpp:404]     Test net output #0: accuracy = 0.332647\nI1207 06:09:52.347086  1922 solver.cpp:404]     Test net output #1: loss = 1.84207 (* 1 = 1.84207 loss)\nI1207 06:09:56.287678  1922 solver.cpp:228] Iteration 300, loss = 1.83255\nI1207 06:09:56.287726  1922 solver.cpp:244]     Train net output #0: accuracy = 0.352941\nI1207 06:09:56.287744  1922 solver.cpp:244]     Train net output #1: loss = 1.83255 (* 1 = 1.83255 loss)\nI1207 06:09:56.525414  1922 sgd_solver.cpp:166] Iteration 300, lr = 0.045\nI1207 06:16:51.260555  1922 solver.cpp:337] Iteration 400, Testing net (#0)\nI1207 06:19:29.426304  1922 solver.cpp:404]     Test net output #0: accuracy = 0.357706\nI1207 06:19:29.426575  1922 solver.cpp:404]     Test net output #1: loss = 1.77241 (* 1 = 1.77241 loss)\nI1207 06:19:33.366374  1922 solver.cpp:228] Iteration 400, loss = 1.7737\nI1207 06:19:33.366421  1922 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 06:19:33.366438  1922 solver.cpp:244]     Train net output #1: loss = 1.7737 (* 1 = 1.7737 loss)\nI1207 06:19:33.713539  1922 sgd_solver.cpp:166] Iteration 400, lr = 0.0599999\nI1207 06:26:28.364234  1922 solver.cpp:337] Iteration 500, Testing net (#0)\nI1207 06:29:06.587554  1922 solver.cpp:404]     Test net output #0: accuracy = 0.377294\nI1207 06:29:06.587816  1922 solver.cpp:404]     Test net output #1: loss = 1.73559 (* 1 = 1.73559 loss)\nI1207 06:29:10.529541  1922 solver.cpp:228] Iteration 500, loss = 1.72849\nI1207 06:29:10.529583  1922 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1207 06:29:10.529601  1922 solver.cpp:244]     Train net output #1: loss = 1.72849 (* 1 = 1.72849 loss)\nI1207 06:29:10.766206  1922 sgd_solver.cpp:166] Iteration 500, lr = 0.0749999\nI1207 06:36:05.447110  1922 solver.cpp:337] Iteration 600, Testing net (#0)\nI1207 06:38:43.686641  1922 solver.cpp:404]     Test net output #0: accuracy = 0.362589\nI1207 06:38:43.686931  1922 solver.cpp:404]     Test net output #1: loss = 1.75157 (* 1 = 1.75157 loss)\nI1207 06:38:47.629292  1922 solver.cpp:228] Iteration 600, loss = 1.62574\nI1207 06:38:47.629336  1922 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1207 06:38:47.629353  1922 solver.cpp:244]     Train net output #1: loss = 1.62574 (* 1 = 1.62574 loss)\nI1207 06:38:47.880831  1922 sgd_solver.cpp:166] Iteration 600, lr = 0.0899999\nI1207 06:45:42.563418  1922 solver.cpp:337] Iteration 700, Testing net (#0)\nI1207 06:48:20.802810  1922 solver.cpp:404]     Test net output #0: accuracy = 0.387353\nI1207 06:48:20.803107  1922 solver.cpp:404]     Test net output #1: loss = 1.69189 (* 1 = 1.69189 loss)\nI1207 06:48:24.742509  1922 solver.cpp:228] Iteration 700, loss = 1.64496\nI1207 06:48:24.742549  1922 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1207 06:48:24.742566  1922 solver.cpp:244]     Train net output #1: loss = 1.64496 (* 1 = 1.64496 loss)\nI1207 06:48:24.982079  1922 sgd_solver.cpp:166] Iteration 700, lr = 0.105\nI1207 06:55:19.647358  1922 solver.cpp:337] Iteration 800, Testing net (#0)\nI1207 06:57:57.915130  1922 solver.cpp:404]     Test net output #0: accuracy = 0.409177\nI1207 06:57:57.915398  1922 solver.cpp:404]     Test net output #1: loss = 1.66085 (* 1 = 1.66085 loss)\nI1207 06:58:01.856958  1922 solver.cpp:228] Iteration 800, loss = 1.77355\nI1207 06:58:01.856997  1922 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1207 06:58:01.857012  1922 solver.cpp:244]     Train net output #1: loss = 1.77355 (* 1 = 1.77355 loss)\nI1207 06:58:02.096843  1922 sgd_solver.cpp:166] Iteration 800, lr = 0.12\nI1207 07:04:56.887424  1922 solver.cpp:337] Iteration 900, Testing net (#0)\nI1207 07:07:35.174866  1922 solver.cpp:404]     Test net output #0: accuracy = 0.42153\nI1207 07:07:35.175142  1922 solver.cpp:404]     Test net output #1: loss = 1.60678 (* 1 = 1.60678 loss)\nI1207 07:07:39.116495  1922 solver.cpp:228] Iteration 900, loss = 1.58378\nI1207 07:07:39.116534  1922 solver.cpp:244]     Train net output #0: accuracy = 0.482353\nI1207 07:07:39.116550  1922 solver.cpp:244]     Train net output #1: loss = 1.58378 (* 1 = 1.58378 loss)\nI1207 07:07:39.352471  1922 sgd_solver.cpp:166] Iteration 900, lr = 0.135\nI1207 07:14:34.066395  1922 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1207 07:17:12.392530  1922 solver.cpp:404]     Test net output #0: accuracy = 0.413295\nI1207 07:17:12.392818  1922 solver.cpp:404]     Test net output #1: loss = 1.63675 (* 1 = 1.63675 loss)\nI1207 07:17:16.335116  1922 solver.cpp:228] Iteration 1000, loss = 1.74607\nI1207 07:17:16.335156  1922 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1207 07:17:16.335172  1922 solver.cpp:244]     Train net output #1: loss = 1.74607 (* 1 = 1.74607 loss)\nI1207 07:17:16.572278  1922 sgd_solver.cpp:166] Iteration 1000, lr = 0.15\nI1207 07:24:11.178329  1922 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1207 07:26:49.510434  1922 solver.cpp:404]     Test net output #0: accuracy = 0.392647\nI1207 07:26:49.510695  1922 solver.cpp:404]     Test net output #1: loss = 1.66679 (* 1 = 1.66679 loss)\nI1207 07:26:53.453266  1922 solver.cpp:228] Iteration 1100, loss = 1.69004\nI1207 07:26:53.453307  1922 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1207 07:26:53.453325  1922 solver.cpp:244]     Train net output #1: loss = 1.69004 (* 1 = 1.69004 loss)\nI1207 07:26:53.690583  1922 sgd_solver.cpp:166] Iteration 1100, lr = 0.165\nI1207 07:33:48.557497  1922 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1207 07:36:26.885998  1922 solver.cpp:404]     Test net output #0: accuracy = 0.40853\nI1207 07:36:26.886267  1922 solver.cpp:404]     Test net output #1: loss = 1.63533 (* 1 = 1.63533 loss)\nI1207 07:36:30.828183  1922 solver.cpp:228] Iteration 1200, loss = 1.81917\nI1207 07:36:30.828224  1922 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 07:36:30.828241  1922 solver.cpp:244]     Train net output #1: loss = 1.81917 (* 1 = 1.81917 loss)\nI1207 07:36:31.066504  1922 sgd_solver.cpp:166] Iteration 1200, lr = 0.18\nI1207 07:43:25.712452  1922 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1207 07:46:04.121116  1922 solver.cpp:404]     Test net output #0: accuracy = 0.405236\nI1207 07:46:04.121405  1922 solver.cpp:404]     Test net output #1: loss = 1.61241 (* 1 = 1.61241 loss)\nI1207 07:46:08.063958  1922 solver.cpp:228] Iteration 1300, loss = 1.48452\nI1207 07:46:08.064007  1922 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1207 07:46:08.064030  1922 solver.cpp:244]     Train net output #1: loss = 1.48452 (* 1 = 1.48452 loss)\nI1207 07:46:08.298943  1922 sgd_solver.cpp:166] Iteration 1300, lr = 0.195\nI1207 07:46:29.246857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43275 > 2) by scale factor 0.822116\nI1207 07:46:33.435768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53513 > 2) by scale factor 0.788914\nI1207 07:53:02.865448  1922 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1207 07:55:40.631670  1922 solver.cpp:404]     Test net output #0: accuracy = 0.429294\nI1207 07:55:40.631925  1922 solver.cpp:404]     Test net output #1: loss = 1.56036 (* 1 = 1.56036 loss)\nI1207 07:55:44.573266  1922 solver.cpp:228] Iteration 1400, loss = 1.37082\nI1207 07:55:44.573316  1922 solver.cpp:244]     Train net output #0: accuracy = 0.470588\nI1207 07:55:44.573341  1922 solver.cpp:244]     Train net output #1: loss = 1.37082 (* 1 = 1.37082 loss)\nI1207 07:55:44.810786  1922 sgd_solver.cpp:166] Iteration 1400, lr = 0.21\nI1207 08:02:22.567890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45587 > 2) by scale factor 0.814376\nI1207 08:02:39.325170  1922 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1207 08:05:17.186653  1922 solver.cpp:404]     Test net output #0: accuracy = 0.408765\nI1207 08:05:17.186925  1922 solver.cpp:404]     Test net output #1: loss = 1.63205 (* 1 = 1.63205 loss)\nI1207 08:05:21.128899  1922 solver.cpp:228] Iteration 1500, loss = 1.60659\nI1207 08:05:21.128952  1922 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1207 08:05:21.128976  1922 solver.cpp:244]     Train net output #1: loss = 1.60659 (* 1 = 1.60659 loss)\nI1207 08:05:21.367398  1922 sgd_solver.cpp:166] Iteration 1500, lr = 0.225\nI1207 08:08:34.052819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84206 > 2) by scale factor 0.703714\nI1207 08:09:20.126221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65865 > 2) by scale factor 0.752262\nI1207 08:09:24.317036  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2132 > 2) by scale factor 0.903668\nI1207 08:09:32.695027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14139 > 2) by scale factor 0.933975\nI1207 08:09:53.638871  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14982 > 2) by scale factor 0.930309\nI1207 08:12:16.048620  1922 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1207 08:14:53.966280  1922 solver.cpp:404]     Test net output #0: accuracy = 0.408412\nI1207 08:14:53.966542  1922 solver.cpp:404]     Test net output #1: loss = 1.61482 (* 1 = 1.61482 loss)\nI1207 08:14:57.908215  1922 solver.cpp:228] Iteration 1600, loss = 1.67864\nI1207 08:14:57.908269  1922 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 08:14:57.908294  1922 solver.cpp:244]     Train net output #1: loss = 1.67864 (* 1 = 1.67864 loss)\nI1207 08:14:58.149158  1922 sgd_solver.cpp:166] Iteration 1600, lr = 0.24\nI1207 08:21:52.637823  1922 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1207 08:24:30.564589  1922 solver.cpp:404]     Test net output #0: accuracy = 0.454295\nI1207 08:24:30.564862  1922 solver.cpp:404]     Test net output #1: loss = 1.51784 (* 1 = 1.51784 loss)\nI1207 08:24:34.506222  1922 solver.cpp:228] Iteration 1700, loss = 1.41263\nI1207 08:24:34.506275  1922 solver.cpp:244]     Train net output #0: accuracy = 0.494118\nI1207 08:24:34.506301  1922 solver.cpp:244]     Train net output #1: loss = 1.41263 (* 1 = 1.41263 loss)\nI1207 08:24:34.748888  1922 sgd_solver.cpp:166] Iteration 1700, lr = 0.255\nI1207 08:31:29.360273  1922 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1207 08:34:06.442000  1922 solver.cpp:404]     Test net output #0: accuracy = 0.44253\nI1207 08:34:06.442250  1922 solver.cpp:404]     Test net output #1: loss = 1.54925 (* 1 = 1.54925 loss)\nI1207 08:34:10.452914  1922 solver.cpp:228] Iteration 1800, loss = 1.25617\nI1207 08:34:10.452951  1922 solver.cpp:244]     Train net output #0: accuracy = 0.517647\nI1207 08:34:10.452967  1922 solver.cpp:244]     Train net output #1: loss = 1.25617 (* 1 = 1.25617 loss)\nI1207 08:34:10.707397  1922 sgd_solver.cpp:166] Iteration 1800, lr = 0.27\nI1207 08:41:05.132328  1922 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1207 08:43:38.571523  1922 solver.cpp:404]     Test net output #0: accuracy = 0.442706\nI1207 08:43:38.571760  1922 solver.cpp:404]     Test net output #1: loss = 1.53512 (* 1 = 1.53512 loss)\nI1207 08:43:42.489538  1922 solver.cpp:228] Iteration 1900, loss = 1.67133\nI1207 08:43:42.489581  1922 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1207 08:43:42.489606  1922 solver.cpp:244]     Train net output #1: loss = 1.67133 (* 1 = 1.67133 loss)\nI1207 08:43:42.748266  1922 sgd_solver.cpp:166] Iteration 1900, lr = 0.285\nI1207 08:50:37.138569  1922 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1207 08:53:12.334240  1922 solver.cpp:404]     Test net output #0: accuracy = 0.432648\nI1207 08:53:12.334511  1922 solver.cpp:404]     Test net output #1: loss = 1.5557 (* 1 = 1.5557 loss)\nI1207 08:53:16.280297  1922 solver.cpp:228] Iteration 2000, loss = 1.60622\nI1207 08:53:16.280349  1922 solver.cpp:244]     Train net output #0: accuracy = 0.352941\nI1207 08:53:16.280367  1922 solver.cpp:244]     Train net output #1: loss = 1.60622 (* 1 = 1.60622 loss)\nI1207 08:53:16.514844  1922 sgd_solver.cpp:166] Iteration 2000, lr = 0.3\nI1207 09:00:11.094591  1922 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1207 09:02:48.998184  1922 solver.cpp:404]     Test net output #0: accuracy = 0.439824\nI1207 09:02:48.998407  1922 solver.cpp:404]     Test net output #1: loss = 1.52743 (* 1 = 1.52743 loss)\nI1207 09:02:52.944705  1922 solver.cpp:228] Iteration 2100, loss = 1.54542\nI1207 09:02:52.944757  1922 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1207 09:02:52.944784  1922 solver.cpp:244]     Train net output #1: loss = 1.54542 (* 1 = 1.54542 loss)\nI1207 09:02:53.175452  1922 sgd_solver.cpp:166] Iteration 2100, lr = 0.315\nI1207 09:06:26.834157  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05001 > 2) by scale factor 0.975603\nI1207 09:06:31.024682  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4866 > 2) by scale factor 0.573625\nI1207 09:09:47.913452  1922 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1207 09:12:26.238137  1922 solver.cpp:404]     Test net output #0: accuracy = 0.451353\nI1207 09:12:26.238402  1922 solver.cpp:404]     Test net output #1: loss = 1.5205 (* 1 = 1.5205 loss)\nI1207 09:12:30.183351  1922 solver.cpp:228] Iteration 2200, loss = 1.55916\nI1207 09:12:30.183401  1922 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1207 09:12:30.183426  1922 solver.cpp:244]     Train net output #1: loss = 1.55916 (* 1 = 1.55916 loss)\nI1207 09:12:30.414763  1922 sgd_solver.cpp:166] Iteration 2200, lr = 0.33\nI1207 09:19:25.117439  1922 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1207 09:22:02.358814  1922 solver.cpp:404]     Test net output #0: accuracy = 0.41253\nI1207 09:22:02.359032  1922 solver.cpp:404]     Test net output #1: loss = 1.62049 (* 1 = 1.62049 loss)\nI1207 09:22:06.306716  1922 solver.cpp:228] Iteration 2300, loss = 1.56804\nI1207 09:22:06.306777  1922 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1207 09:22:06.306804  1922 solver.cpp:244]     Train net output #1: loss = 1.56804 (* 1 = 1.56804 loss)\nI1207 09:22:06.540675  1922 sgd_solver.cpp:166] Iteration 2300, lr = 0.345\nI1207 09:27:04.086870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76562 > 2) by scale factor 0.723166\nI1207 09:29:01.431205  1922 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1207 09:31:38.982831  1922 solver.cpp:404]     Test net output #0: accuracy = 0.434236\nI1207 09:31:38.983065  1922 solver.cpp:404]     Test net output #1: loss = 1.55086 (* 1 = 1.55086 loss)\nI1207 09:31:42.938406  1922 solver.cpp:228] Iteration 2400, loss = 1.4999\nI1207 09:31:42.938460  1922 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1207 09:31:42.938486  1922 solver.cpp:244]     Train net output #1: loss = 1.4999 (* 1 = 1.4999 loss)\nI1207 09:31:43.161806  1922 sgd_solver.cpp:166] Iteration 2400, lr = 0.36\nI1207 09:38:38.019374  1922 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1207 09:41:14.562052  1922 solver.cpp:404]     Test net output #0: accuracy = 0.395471\nI1207 09:41:14.562292  1922 solver.cpp:404]     Test net output #1: loss = 1.66987 (* 1 = 1.66987 loss)\nI1207 09:41:18.509466  1922 solver.cpp:228] Iteration 2500, loss = 1.50653\nI1207 09:41:18.509517  1922 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1207 09:41:18.509542  1922 solver.cpp:244]     Train net output #1: loss = 1.50653 (* 1 = 1.50653 loss)\nI1207 09:41:18.739526  1922 sgd_solver.cpp:166] Iteration 2500, lr = 0.375\nI1207 09:48:13.361068  1922 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1207 09:50:51.335774  1922 solver.cpp:404]     Test net output #0: accuracy = 0.431765\nI1207 09:50:51.336004  1922 solver.cpp:404]     Test net output #1: loss = 1.54586 (* 1 = 1.54586 loss)\nI1207 09:50:55.283116  1922 solver.cpp:228] Iteration 2600, loss = 1.67663\nI1207 09:50:55.283170  1922 solver.cpp:244]     Train net output #0: accuracy = 0.305882\nI1207 09:50:55.283195  1922 solver.cpp:244]     Train net output #1: loss = 1.67663 (* 1 = 1.67663 loss)\nI1207 09:50:55.514835  1922 sgd_solver.cpp:166] Iteration 2600, lr = 0.39\nI1207 09:57:50.417523  1922 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1207 10:00:28.228742  1922 solver.cpp:404]     Test net output #0: accuracy = 0.450824\nI1207 10:00:28.228991  1922 solver.cpp:404]     Test net output #1: loss = 1.53084 (* 1 = 1.53084 loss)\nI1207 10:00:32.176277  1922 solver.cpp:228] Iteration 2700, loss = 1.67003\nI1207 10:00:32.176331  1922 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 10:00:32.176355  1922 solver.cpp:244]     Train net output #1: loss = 1.67003 (* 1 = 1.67003 loss)\nI1207 10:00:32.408227  1922 sgd_solver.cpp:166] Iteration 2700, lr = 0.405\nI1207 10:06:11.866554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0056 > 2) by scale factor 0.997209\nI1207 10:06:16.059041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61221 > 2) by scale factor 0.553677\nI1207 10:06:20.251493  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06096 > 2) by scale factor 0.970422\nI1207 10:07:27.311843  1922 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1207 10:10:03.857692  1922 solver.cpp:404]     Test net output #0: accuracy = 0.415295\nI1207 10:10:03.857903  1922 solver.cpp:404]     Test net output #1: loss = 1.61899 (* 1 = 1.61899 loss)\nI1207 10:10:07.802845  1922 solver.cpp:228] Iteration 2800, loss = 1.58843\nI1207 10:10:07.802896  1922 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1207 10:10:07.802919  1922 solver.cpp:244]     Train net output #1: loss = 1.58843 (* 1 = 1.58843 loss)\nI1207 10:10:08.035887  1922 sgd_solver.cpp:166] Iteration 2800, lr = 0.42\nI1207 10:17:02.745601  1922 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1207 10:19:40.012979  1922 solver.cpp:404]     Test net output #0: accuracy = 0.461059\nI1207 10:19:40.013182  1922 solver.cpp:404]     Test net output #1: loss = 1.5033 (* 1 = 1.5033 loss)\nI1207 10:19:43.959044  1922 solver.cpp:228] Iteration 2900, loss = 1.50565\nI1207 10:19:43.959095  1922 solver.cpp:244]     Train net output #0: accuracy = 0.517647\nI1207 10:19:43.959120  1922 solver.cpp:244]     Train net output #1: loss = 1.50565 (* 1 = 1.50565 loss)\nI1207 10:19:44.189286  1922 sgd_solver.cpp:166] Iteration 2900, lr = 0.435\nI1207 10:24:24.903694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85517 > 2) by scale factor 0.518784\nI1207 10:24:29.094828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23358 > 2) by scale factor 0.61851\nI1207 10:26:38.931677  1922 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1207 10:29:16.924960  1922 solver.cpp:404]     Test net output #0: accuracy = 0.415118\nI1207 10:29:16.925166  1922 solver.cpp:404]     Test net output #1: loss = 1.57546 (* 1 = 1.57546 loss)\nI1207 10:29:20.872094  1922 solver.cpp:228] Iteration 3000, loss = 1.52026\nI1207 10:29:20.872145  1922 solver.cpp:244]     Train net output #0: accuracy = 0.435294\nI1207 10:29:20.872170  1922 solver.cpp:244]     Train net output #1: loss = 1.52026 (* 1 = 1.52026 loss)\nI1207 10:29:21.101487  1922 sgd_solver.cpp:166] Iteration 3000, lr = 0.45\nI1207 10:36:15.606047  1922 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1207 10:38:52.258121  1922 solver.cpp:404]     Test net output #0: accuracy = 0.41953\nI1207 10:38:52.258361  1922 solver.cpp:404]     Test net output #1: loss = 1.56468 (* 1 = 1.56468 loss)\nI1207 10:38:56.204463  1922 solver.cpp:228] Iteration 3100, loss = 1.45931\nI1207 10:38:56.204505  1922 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1207 10:38:56.204521  1922 solver.cpp:244]     Train net output #1: loss = 1.45931 (* 1 = 1.45931 loss)\nI1207 10:38:56.440922  1922 sgd_solver.cpp:166] Iteration 3100, lr = 0.465\nI1207 10:45:51.220602  1922 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1207 10:48:28.217193  1922 solver.cpp:404]     Test net output #0: accuracy = 0.395824\nI1207 10:48:28.217401  1922 solver.cpp:404]     Test net output #1: loss = 1.62963 (* 1 = 1.62963 loss)\nI1207 10:48:32.158031  1922 solver.cpp:228] Iteration 3200, loss = 1.64201\nI1207 10:48:32.158082  1922 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 10:48:32.158100  1922 solver.cpp:244]     Train net output #1: loss = 1.64201 (* 1 = 1.64201 loss)\nI1207 10:48:32.395200  1922 sgd_solver.cpp:166] Iteration 3200, lr = 0.48\nI1207 10:51:28.328235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52225 > 2) by scale factor 0.792943\nI1207 10:54:36.792974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18886 > 2) by scale factor 0.913716\nI1207 10:55:10.300233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65621 > 2) by scale factor 0.547015\nI1207 10:55:27.064139  1922 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1207 10:58:04.937841  1922 solver.cpp:404]     Test net output #0: accuracy = 0.289823\nI1207 10:58:04.938084  1922 solver.cpp:404]     Test net output #1: loss = 2.20266 (* 1 = 2.20266 loss)\nI1207 10:58:08.879524  1922 solver.cpp:228] Iteration 3300, loss = 2.5375\nI1207 10:58:08.879575  1922 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 10:58:08.879592  1922 solver.cpp:244]     Train net output #1: loss = 2.5375 (* 1 = 2.5375 loss)\nI1207 10:58:09.129598  1922 sgd_solver.cpp:166] Iteration 3300, lr = 0.495\nI1207 10:58:46.842463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32427 > 2) by scale factor 0.860485\nI1207 10:59:07.792762  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48931 > 2) by scale factor 0.803435\nI1207 10:59:24.554198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28696 > 2) by scale factor 0.874523\nI1207 10:59:45.504029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35714 > 2) by scale factor 0.848486\nI1207 11:05:03.870486  1922 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1207 11:07:42.125046  1922 solver.cpp:404]     Test net output #0: accuracy = 0.383765\nI1207 11:07:42.125277  1922 solver.cpp:404]     Test net output #1: loss = 1.67913 (* 1 = 1.67913 loss)\nI1207 11:07:46.102121  1922 solver.cpp:228] Iteration 3400, loss = 1.62933\nI1207 11:07:46.102162  1922 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1207 11:07:46.102180  1922 solver.cpp:244]     Train net output #1: loss = 1.62933 (* 1 = 1.62933 loss)\nI1207 11:07:46.307646  1922 sgd_solver.cpp:166] Iteration 3400, lr = 0.51\nI1207 11:11:11.738301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07093 > 2) by scale factor 0.965751\nI1207 11:11:15.932695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08123 > 2) by scale factor 0.960972\nI1207 11:14:41.354647  1922 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1207 11:17:18.094466  1922 solver.cpp:404]     Test net output #0: accuracy = 0.392471\nI1207 11:17:18.094698  1922 solver.cpp:404]     Test net output #1: loss = 1.70705 (* 1 = 1.70705 loss)\nI1207 11:17:22.033464  1922 solver.cpp:228] Iteration 3500, loss = 1.7372\nI1207 11:17:22.033515  1922 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 11:17:22.033534  1922 solver.cpp:244]     Train net output #1: loss = 1.7372 (* 1 = 1.7372 loss)\nI1207 11:17:22.275842  1922 sgd_solver.cpp:166] Iteration 3500, lr = 0.525\nI1207 11:24:16.995517  1922 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1207 11:26:53.902088  1922 solver.cpp:404]     Test net output #0: accuracy = 0.387471\nI1207 11:26:53.902323  1922 solver.cpp:404]     Test net output #1: loss = 1.64871 (* 1 = 1.64871 loss)\nI1207 11:26:57.840806  1922 solver.cpp:228] Iteration 3600, loss = 1.714\nI1207 11:26:57.840855  1922 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1207 11:26:57.840873  1922 solver.cpp:244]     Train net output #1: loss = 1.714 (* 1 = 1.714 loss)\nI1207 11:26:58.082707  1922 sgd_solver.cpp:166] Iteration 3600, lr = 0.54\nI1207 11:33:52.685446  1922 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1207 11:36:30.447039  1922 solver.cpp:404]     Test net output #0: accuracy = 0.422\nI1207 11:36:30.447264  1922 solver.cpp:404]     Test net output #1: loss = 1.58965 (* 1 = 1.58965 loss)\nI1207 11:36:34.392650  1922 solver.cpp:228] Iteration 3700, loss = 1.56516\nI1207 11:36:34.392699  1922 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1207 11:36:34.392716  1922 solver.cpp:244]     Train net output #1: loss = 1.56516 (* 1 = 1.56516 loss)\nI1207 11:36:34.632721  1922 sgd_solver.cpp:166] Iteration 3700, lr = 0.555\nI1207 11:39:22.288628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82252 > 2) by scale factor 0.708586\nI1207 11:39:30.673238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14799 > 2) by scale factor 0.931101\nI1207 11:39:34.866168  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78535 > 2) by scale factor 0.718043\nI1207 11:40:00.013666  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03548 > 2) by scale factor 0.982568\nI1207 11:40:04.207635  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11859 > 2) by scale factor 0.944023\nI1207 11:40:25.167002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2197 > 2) by scale factor 0.901023\nI1207 11:40:33.550796  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11894 > 2) by scale factor 0.943869\nI1207 11:40:41.933499  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49491 > 2) by scale factor 0.801634\nI1207 11:40:46.125839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38396 > 2) by scale factor 0.838939\nI1207 11:40:54.511332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01995 > 2) by scale factor 0.990124\nI1207 11:41:40.614205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21246 > 2) by scale factor 0.903971\nI1207 11:42:09.951789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21364 > 2) by scale factor 0.90349\nI1207 11:42:30.908136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05966 > 2) by scale factor 0.653667\nI1207 11:42:35.101084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04311 > 2) by scale factor 0.978899\nI1207 11:42:51.865890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05925 > 2) by scale factor 0.971225\nI1207 11:43:29.594732  1922 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1207 11:46:07.281929  1922 solver.cpp:404]     Test net output #0: accuracy = 0.281117\nI1207 11:46:07.282138  1922 solver.cpp:404]     Test net output #1: loss = 2.04401 (* 1 = 2.04401 loss)\nI1207 11:46:11.224874  1922 solver.cpp:228] Iteration 3800, loss = 1.95178\nI1207 11:46:11.224928  1922 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1207 11:46:11.224946  1922 solver.cpp:244]     Train net output #1: loss = 1.95178 (* 1 = 1.95178 loss)\nI1207 11:46:11.461405  1922 sgd_solver.cpp:166] Iteration 3800, lr = 0.57\nI1207 11:46:32.421980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23208 > 2) by scale factor 0.896023\nI1207 11:48:04.587414  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57497 > 2) by scale factor 0.776709\nI1207 11:53:06.347054  1922 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1207 11:55:42.990916  1922 solver.cpp:404]     Test net output #0: accuracy = 0.378706\nI1207 11:55:42.991116  1922 solver.cpp:404]     Test net output #1: loss = 1.67799 (* 1 = 1.67799 loss)\nI1207 11:55:46.933998  1922 solver.cpp:228] Iteration 3900, loss = 1.51728\nI1207 11:55:46.934044  1922 solver.cpp:244]     Train net output #0: accuracy = 0.482353\nI1207 11:55:46.934061  1922 solver.cpp:244]     Train net output #1: loss = 1.51728 (* 1 = 1.51728 loss)\nI1207 11:55:47.169124  1922 sgd_solver.cpp:166] Iteration 3900, lr = 0.585\nI1207 11:57:36.090759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97911 > 2) by scale factor 0.671342\nI1207 12:02:41.869839  1922 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1207 12:05:18.806252  1922 solver.cpp:404]     Test net output #0: accuracy = 0.396295\nI1207 12:05:18.806468  1922 solver.cpp:404]     Test net output #1: loss = 1.60779 (* 1 = 1.60779 loss)\nI1207 12:05:22.749470  1922 solver.cpp:228] Iteration 4000, loss = 1.51558\nI1207 12:05:22.749519  1922 solver.cpp:244]     Train net output #0: accuracy = 0.435294\nI1207 12:05:22.749536  1922 solver.cpp:244]     Train net output #1: loss = 1.51558 (* 1 = 1.51558 loss)\nI1207 12:05:22.989538  1922 sgd_solver.cpp:166] Iteration 4000, lr = 0.6\nI1207 12:10:49.898308  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01036 > 2) by scale factor 0.664373\nI1207 12:12:17.923907  1922 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1207 12:14:55.590792  1922 solver.cpp:404]     Test net output #0: accuracy = 0.349059\nI1207 12:14:55.591017  1922 solver.cpp:404]     Test net output #1: loss = 1.79221 (* 1 = 1.79221 loss)\nI1207 12:14:59.534027  1922 solver.cpp:228] Iteration 4100, loss = 1.85191\nI1207 12:14:59.534067  1922 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1207 12:14:59.534083  1922 solver.cpp:244]     Train net output #1: loss = 1.85191 (* 1 = 1.85191 loss)\nI1207 12:14:59.770305  1922 sgd_solver.cpp:166] Iteration 4100, lr = 0.615\nI1207 12:21:54.718209  1922 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1207 12:24:31.344990  1922 solver.cpp:404]     Test net output #0: accuracy = 0.372765\nI1207 12:24:31.345260  1922 solver.cpp:404]     Test net output #1: loss = 1.81435 (* 1 = 1.81435 loss)\nI1207 12:24:35.287525  1922 solver.cpp:228] Iteration 4200, loss = 1.75979\nI1207 12:24:35.287569  1922 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1207 12:24:35.287585  1922 solver.cpp:244]     Train net output #1: loss = 1.75979 (* 1 = 1.75979 loss)\nI1207 12:24:35.527170  1922 sgd_solver.cpp:166] Iteration 4200, lr = 0.63\nI1207 12:29:11.950541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88087 > 2) by scale factor 0.694236\nI1207 12:31:30.162156  1922 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1207 12:34:07.258106  1922 solver.cpp:404]     Test net output #0: accuracy = 0.378412\nI1207 12:34:07.258381  1922 solver.cpp:404]     Test net output #1: loss = 1.84866 (* 1 = 1.84866 loss)\nI1207 12:34:11.201777  1922 solver.cpp:228] Iteration 4300, loss = 1.546\nI1207 12:34:11.201825  1922 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1207 12:34:11.201843  1922 solver.cpp:244]     Train net output #1: loss = 1.546 (* 1 = 1.546 loss)\nI1207 12:34:11.438947  1922 sgd_solver.cpp:166] Iteration 4300, lr = 0.645\nI1207 12:35:18.472645  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31968 > 2) by scale factor 0.862187\nI1207 12:35:35.231192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17283 > 2) by scale factor 0.920458\nI1207 12:35:39.421975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02635 > 2) by scale factor 0.986995\nI1207 12:35:43.612624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14837 > 2) by scale factor 0.930938\nI1207 12:35:47.803092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0757 > 2) by scale factor 0.96353\nI1207 12:35:56.181731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25548 > 2) by scale factor 0.886729\nI1207 12:36:00.372392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26797 > 2) by scale factor 0.881845\nI1207 12:36:21.316200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53786 > 2) by scale factor 0.565314\nI1207 12:38:14.409994  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30749 > 2) by scale factor 0.866741\nI1207 12:38:35.353065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19721 > 2) by scale factor 0.910243\nI1207 12:38:47.920984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07345 > 2) by scale factor 0.964577\nI1207 12:38:52.112107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03791 > 2) by scale factor 0.981396\nI1207 12:38:56.302778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26298 > 2) by scale factor 0.883788\nI1207 12:39:13.056084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00217 > 2) by scale factor 0.998915\nI1207 12:39:25.622397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09753 > 2) by scale factor 0.953503\nI1207 12:39:38.190892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24154 > 2) by scale factor 0.892245\nI1207 12:39:54.946321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.607 > 2) by scale factor 0.767165\nI1207 12:40:03.324036  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32537 > 2) by scale factor 0.860078\nI1207 12:40:11.703351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36586 > 2) by scale factor 0.84536\nI1207 12:40:20.080332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1464 > 2) by scale factor 0.931791\nI1207 12:40:32.647054  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54273 > 2) by scale factor 0.786557\nI1207 12:40:36.837623  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07372 > 2) by scale factor 0.964452\nI1207 12:40:45.216873  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4339 > 2) by scale factor 0.821726\nI1207 12:40:49.407546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74634 > 2) by scale factor 0.728241\nI1207 12:40:53.597180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0525 > 2) by scale factor 0.974421\nI1207 12:40:57.788039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62402 > 2) by scale factor 0.762188\nI1207 12:41:01.977480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02659 > 2) by scale factor 0.660809\nI1207 12:41:06.168364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76739 > 2) by scale factor 0.722702\nI1207 12:41:06.180220  1922 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1207 12:43:42.818503  1922 solver.cpp:404]     Test net output #0: accuracy = 0.267529\nI1207 12:43:42.818775  1922 solver.cpp:404]     Test net output #1: loss = 2.62736 (* 1 = 2.62736 loss)\nI1207 12:43:46.764057  1922 solver.cpp:228] Iteration 4400, loss = 2.58583\nI1207 12:43:46.764097  1922 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 12:43:46.764114  1922 solver.cpp:244]     Train net output #1: loss = 2.58583 (* 1 = 2.58583 loss)\nI1207 12:43:46.998813  1922 sgd_solver.cpp:166] Iteration 4400, lr = 0.66\nI1207 12:43:51.199832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79693 > 2) by scale factor 0.715071\nI1207 12:44:03.773406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72994 > 2) by scale factor 0.732616\nI1207 12:44:20.536738  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02392 > 2) by scale factor 0.988183\nI1207 12:44:33.109202  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13324 > 2) by scale factor 0.937542\nI1207 12:45:15.010466  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36731 > 2) by scale factor 0.844842\nI1207 12:45:52.655372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09005 > 2) by scale factor 0.956917\nI1207 12:46:00.993482  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54527 > 2) by scale factor 0.785773\nI1207 12:46:05.163677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24531 > 2) by scale factor 0.890747\nI1207 12:46:17.672065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59537 > 2) by scale factor 0.770603\nI1207 12:46:21.843080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56024 > 2) by scale factor 0.781176\nI1207 12:46:26.013787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60336 > 2) by scale factor 0.768239\nI1207 12:46:38.520702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98576 > 2) by scale factor 0.669845\nI1207 12:46:42.690346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02803 > 2) by scale factor 0.660495\nI1207 12:46:46.861366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36876 > 2) by scale factor 0.844324\nI1207 12:47:07.704429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44154 > 2) by scale factor 0.819154\nI1207 12:47:16.041827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35229 > 2) by scale factor 0.596607\nI1207 12:47:24.378525  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66772 > 2) by scale factor 0.749703\nI1207 12:47:41.051257  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65906 > 2) by scale factor 0.752147\nI1207 12:47:45.220661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7035 > 2) by scale factor 0.739782\nI1207 12:48:10.227735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26667 > 2) by scale factor 0.612244\nI1207 12:48:26.899015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1195 > 2) by scale factor 0.94362\nI1207 12:48:39.405023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71163 > 2) by scale factor 0.538847\nI1207 12:48:43.575880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38684 > 2) by scale factor 0.837929\nI1207 12:48:51.914908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.697 > 2) by scale factor 0.741563\nI1207 12:48:56.084836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62885 > 2) by scale factor 0.760788\nI1207 12:49:04.421628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88414 > 2) by scale factor 0.693448\nI1207 12:49:08.591102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43427 > 2) by scale factor 0.582366\nI1207 12:49:12.761330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77978 > 2) by scale factor 0.719481\nI1207 12:49:21.096748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85889 > 2) by scale factor 0.699573\nI1207 12:49:25.268594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88403 > 2) by scale factor 0.693473\nI1207 12:49:29.438351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36032 > 2) by scale factor 0.595181\nI1207 12:49:33.607486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01433 > 2) by scale factor 0.992888\nI1207 12:49:37.776279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55389 > 2) by scale factor 0.78312\nI1207 12:49:41.947285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16854 > 2) by scale factor 0.922281\nI1207 12:49:50.282944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50595 > 2) by scale factor 0.7981\nI1207 12:49:54.451874  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28263 > 2) by scale factor 0.609268\nI1207 12:50:06.956987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63398 > 2) by scale factor 0.759308\nI1207 12:50:15.294957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27394 > 2) by scale factor 0.87953\nI1207 12:50:27.799641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20993 > 2) by scale factor 0.905005\nI1207 12:50:40.306557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45723 > 2) by scale factor 0.578499\nI1207 12:50:40.318482  1922 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1207 12:53:17.210405  1922 solver.cpp:404]     Test net output #0: accuracy = 0.235412\nI1207 12:53:17.210685  1922 solver.cpp:404]     Test net output #1: loss = 3.22005 (* 1 = 3.22005 loss)\nI1207 12:53:21.153972  1922 solver.cpp:228] Iteration 4500, loss = 3.3535\nI1207 12:53:21.154021  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 12:53:21.154039  1922 solver.cpp:244]     Train net output #1: loss = 3.3535 (* 1 = 3.3535 loss)\nI1207 12:53:21.369719  1922 sgd_solver.cpp:166] Iteration 4500, lr = 0.675\nI1207 12:53:21.379812  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39645 > 2) by scale factor 0.834569\nI1207 12:53:33.888736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62056 > 2) by scale factor 0.763196\nI1207 12:53:38.061283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22513 > 2) by scale factor 0.898825\nI1207 12:53:42.232110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35963 > 2) by scale factor 0.847592\nI1207 12:53:46.401872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00266 > 2) by scale factor 0.998674\nI1207 12:53:50.572227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82158 > 2) by scale factor 0.708823\nI1207 12:53:58.910604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36685 > 2) by scale factor 0.845004\nI1207 12:54:11.419303  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44991 > 2) by scale factor 0.816358\nI1207 12:54:19.760584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51878 > 2) by scale factor 0.794034\nI1207 12:54:36.436779  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83427 > 2) by scale factor 0.70565\nI1207 12:55:01.448094  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0369 > 2) by scale factor 0.658566\nI1207 12:55:05.619660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06036 > 2) by scale factor 0.970704\nI1207 12:55:13.960603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31059 > 2) by scale factor 0.865578\nI1207 12:55:18.131096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11591 > 2) by scale factor 0.945219\nI1207 12:55:22.301055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6992 > 2) by scale factor 0.740959\nI1207 12:55:26.470877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0785 > 2) by scale factor 0.649667\nI1207 12:55:43.141943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31956 > 2) by scale factor 0.862233\nI1207 12:58:38.182258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41358 > 2) by scale factor 0.828644\nI1207 12:59:11.524675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03793 > 2) by scale factor 0.981388\nI1207 13:00:05.744989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51677 > 2) by scale factor 0.79467\nI1207 13:00:14.091644  1922 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1207 13:02:51.226644  1922 solver.cpp:404]     Test net output #0: accuracy = 0.310059\nI1207 13:02:51.226912  1922 solver.cpp:404]     Test net output #1: loss = 2.13734 (* 1 = 2.13734 loss)\nI1207 13:02:55.171278  1922 solver.cpp:228] Iteration 4600, loss = 1.95146\nI1207 13:02:55.171326  1922 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1207 13:02:55.171344  1922 solver.cpp:244]     Train net output #1: loss = 1.95146 (* 1 = 1.95146 loss)\nI1207 13:02:55.385359  1922 sgd_solver.cpp:166] Iteration 4600, lr = 0.69\nI1207 13:03:49.543119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14414 > 2) by scale factor 0.932775\nI1207 13:03:57.877024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06771 > 2) by scale factor 0.967254\nI1207 13:04:06.210171  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10544 > 2) by scale factor 0.949919\nI1207 13:04:10.377080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58714 > 2) by scale factor 0.773054\nI1207 13:04:14.544351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01479 > 2) by scale factor 0.992661\nI1207 13:04:27.041913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15982 > 2) by scale factor 0.926005\nI1207 13:04:56.197024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32734 > 2) by scale factor 0.859352\nI1207 13:06:48.644074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00294 > 2) by scale factor 0.666013\nI1207 13:07:13.633934  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54072 > 2) by scale factor 0.787178\nI1207 13:07:17.800917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0672 > 2) by scale factor 0.967491\nI1207 13:07:26.133260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2484 > 2) by scale factor 0.889522\nI1207 13:07:38.628057  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55079 > 2) by scale factor 0.784072\nI1207 13:07:42.794981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25364 > 2) by scale factor 0.887452\nI1207 13:08:03.617029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14006 > 2) by scale factor 0.934555\nI1207 13:09:06.087254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26366 > 2) by scale factor 0.883523\nI1207 13:09:26.912093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23422 > 2) by scale factor 0.895168\nI1207 13:09:47.733832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42498 > 2) by scale factor 0.82475\nI1207 13:09:47.745965  1922 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1207 13:12:24.285673  1922 solver.cpp:404]     Test net output #0: accuracy = 0.315176\nI1207 13:12:24.285892  1922 solver.cpp:404]     Test net output #1: loss = 2.43259 (* 1 = 2.43259 loss)\nI1207 13:12:28.228025  1922 solver.cpp:228] Iteration 4700, loss = 2.37381\nI1207 13:12:28.228072  1922 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1207 13:12:28.228090  1922 solver.cpp:244]     Train net output #1: loss = 2.37381 (* 1 = 2.37381 loss)\nI1207 13:12:28.443218  1922 sgd_solver.cpp:166] Iteration 4700, lr = 0.705\nI1207 13:12:53.448504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04805 > 2) by scale factor 0.976538\nI1207 13:12:57.616230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15393 > 2) by scale factor 0.928536\nI1207 13:13:01.784153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33213 > 2) by scale factor 0.857586\nI1207 13:13:10.118377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54842 > 2) by scale factor 0.784798\nI1207 13:13:14.286080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0711 > 2) by scale factor 0.96567\nI1207 13:13:22.619428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00251 > 2) by scale factor 0.998747\nI1207 13:13:43.450592  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53171 > 2) by scale factor 0.566298\nI1207 13:13:47.618785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.357 > 2) by scale factor 0.848536\nI1207 13:14:16.780874  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19192 > 2) by scale factor 0.912441\nI1207 13:14:33.443190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46058 > 2) by scale factor 0.812816\nI1207 13:14:37.609797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62881 > 2) by scale factor 0.551144\nI1207 13:14:45.942313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1524 > 2) by scale factor 0.929197\nI1207 13:14:58.439330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79695 > 2) by scale factor 0.715066\nI1207 13:15:06.770591  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28175 > 2) by scale factor 0.876518\nI1207 13:15:10.936863  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25698 > 2) by scale factor 0.88614\nI1207 13:15:27.602232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54685 > 2) by scale factor 0.785284\nI1207 13:15:40.099777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79002 > 2) by scale factor 0.716842\nI1207 13:16:13.423048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24947 > 2) by scale factor 0.889097\nI1207 13:19:20.861070  1922 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1207 13:21:57.717780  1922 solver.cpp:404]     Test net output #0: accuracy = 0.365118\nI1207 13:21:57.718030  1922 solver.cpp:404]     Test net output #1: loss = 1.72536 (* 1 = 1.72536 loss)\nI1207 13:22:01.660006  1922 solver.cpp:228] Iteration 4800, loss = 1.71581\nI1207 13:22:01.660053  1922 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1207 13:22:01.660071  1922 solver.cpp:244]     Train net output #1: loss = 1.71581 (* 1 = 1.71581 loss)\nI1207 13:22:01.875779  1922 sgd_solver.cpp:166] Iteration 4800, lr = 0.72\nI1207 13:22:26.961747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2071 > 2) by scale factor 0.906166\nI1207 13:23:08.635522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16655 > 2) by scale factor 0.923125\nI1207 13:23:54.473574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01444 > 2) by scale factor 0.99283\nI1207 13:24:31.975253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52189 > 2) by scale factor 0.793057\nI1207 13:24:36.143569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09157 > 2) by scale factor 0.956218\nI1207 13:24:52.811494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63428 > 2) by scale factor 0.759222\nI1207 13:25:01.146064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17243 > 2) by scale factor 0.920627\nI1207 13:25:13.646983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52248 > 2) by scale factor 0.79287\nI1207 13:25:21.983206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56272 > 2) by scale factor 0.780422\nI1207 13:25:42.819069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17676 > 2) by scale factor 0.918796\nI1207 13:25:59.487000  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89724 > 2) by scale factor 0.690312\nI1207 13:26:03.655486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64422 > 2) by scale factor 0.756367\nI1207 13:26:45.324903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14368 > 2) by scale factor 0.932973\nI1207 13:26:49.494181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17683 > 2) by scale factor 0.918769\nI1207 13:27:18.665735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49723 > 2) by scale factor 0.571881\nI1207 13:27:22.834842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79232 > 2) by scale factor 0.527381\nI1207 13:27:27.004561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11772 > 2) by scale factor 0.944413\nI1207 13:27:31.172967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92548 > 2) by scale factor 0.683647\nI1207 13:27:35.341456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59924 > 2) by scale factor 0.769455\nI1207 13:27:39.510959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61707 > 2) by scale factor 0.764213\nI1207 13:27:43.680727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32949 > 2) by scale factor 0.858557\nI1207 13:27:52.016019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49137 > 2) by scale factor 0.802771\nI1207 13:27:56.185583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92789 > 2) by scale factor 0.683085\nI1207 13:28:04.520783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34588 > 2) by scale factor 0.852558\nI1207 13:28:12.858026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63529 > 2) by scale factor 0.758929\nI1207 13:28:29.527501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28732 > 2) by scale factor 0.874387\nI1207 13:28:33.696087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88668 > 2) by scale factor 0.692838\nI1207 13:28:46.200790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29124 > 2) by scale factor 0.872891\nI1207 13:28:50.369805  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14478 > 2) by scale factor 0.932497\nI1207 13:28:54.549145  1922 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1207 13:31:31.586853  1922 solver.cpp:404]     Test net output #0: accuracy = 0.269176\nI1207 13:31:31.587131  1922 solver.cpp:404]     Test net output #1: loss = 2.54511 (* 1 = 2.54511 loss)\nI1207 13:31:35.528298  1922 solver.cpp:228] Iteration 4900, loss = 2.54434\nI1207 13:31:35.528342  1922 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1207 13:31:35.528360  1922 solver.cpp:244]     Train net output #1: loss = 2.54434 (* 1 = 2.54434 loss)\nI1207 13:31:35.746886  1922 sgd_solver.cpp:166] Iteration 4900, lr = 0.735\nI1207 13:31:52.426757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16278 > 2) by scale factor 0.924738\nI1207 13:32:04.932781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5532 > 2) by scale factor 0.783332\nI1207 13:32:21.693362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12785 > 2) by scale factor 0.939916\nI1207 13:32:30.075935  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36243 > 2) by scale factor 0.594807\nI1207 13:32:34.266870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12389 > 2) by scale factor 0.941666\nI1207 13:32:38.458242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18962 > 2) by scale factor 0.913402\nI1207 13:32:46.838484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68348 > 2) by scale factor 0.7453\nI1207 13:32:51.029886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75479 > 2) by scale factor 0.726008\nI1207 13:32:55.221789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74588 > 2) by scale factor 0.728363\nI1207 13:33:03.603147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28284 > 2) by scale factor 0.876104\nI1207 13:33:07.795936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17941 > 2) by scale factor 0.917681\nI1207 13:33:11.986016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82456 > 2) by scale factor 0.708075\nI1207 13:33:16.177680  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30208 > 2) by scale factor 0.868778\nI1207 13:33:20.369832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10909 > 2) by scale factor 0.643275\nI1207 13:33:32.937923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56284 > 2) by scale factor 0.780384\nI1207 13:33:37.129403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19566 > 2) by scale factor 0.910887\nI1207 13:33:41.319435  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25806 > 2) by scale factor 0.885715\nI1207 13:33:45.511952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36017 > 2) by scale factor 0.847398\nI1207 13:33:49.703969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26982 > 2) by scale factor 0.881127\nI1207 13:34:02.275091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70681 > 2) by scale factor 0.738878\nI1207 13:34:14.846274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09854 > 2) by scale factor 0.953044\nI1207 13:34:19.037523  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16683 > 2) by scale factor 0.923006\nI1207 13:34:23.229357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74989 > 2) by scale factor 0.727301\nI1207 13:34:27.419127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28912 > 2) by scale factor 0.873697\nI1207 13:34:35.799780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09401 > 2) by scale factor 0.955105\nI1207 13:34:44.180506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0024 > 2) by scale factor 0.998801\nI1207 13:34:48.370985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0652 > 2) by scale factor 0.968429\nI1207 13:34:52.560670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93357 > 2) by scale factor 0.681764\nI1207 13:35:05.129750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00757 > 2) by scale factor 0.996228\nI1207 13:35:09.320327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23067 > 2) by scale factor 0.619066\nI1207 13:35:17.699285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67103 > 2) by scale factor 0.748774\nI1207 13:35:26.081356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41475 > 2) by scale factor 0.828245\nI1207 13:35:30.273741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59613 > 2) by scale factor 0.770378\nI1207 13:35:34.466339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07451 > 2) by scale factor 0.650509\nI1207 13:35:38.659273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4236 > 2) by scale factor 0.825219\nI1207 13:35:42.852080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54054 > 2) by scale factor 0.787234\nI1207 13:35:47.043124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14095 > 2) by scale factor 0.934165\nI1207 13:35:59.611724  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76545 > 2) by scale factor 0.72321\nI1207 13:36:20.561663  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22621 > 2) by scale factor 0.898389\nI1207 13:36:45.698508  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8212 > 2) by scale factor 0.523396\nI1207 13:36:49.891468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29535 > 2) by scale factor 0.871327\nI1207 13:36:58.272380  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26948 > 2) by scale factor 0.881259\nI1207 13:37:02.463181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57086 > 2) by scale factor 0.777951\nI1207 13:37:06.654795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21854 > 2) by scale factor 0.901493\nI1207 13:37:10.846611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18944 > 2) by scale factor 0.913478\nI1207 13:37:19.227975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3648 > 2) by scale factor 0.594389\nI1207 13:37:31.799456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31818 > 2) by scale factor 0.862748\nI1207 13:37:40.180361  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0971 > 2) by scale factor 0.9537\nI1207 13:37:44.371608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62714 > 2) by scale factor 0.761285\nI1207 13:37:48.562707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17004 > 2) by scale factor 0.921643\nI1207 13:37:56.942605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80169 > 2) by scale factor 0.713854\nI1207 13:38:01.134191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34396 > 2) by scale factor 0.853256\nI1207 13:38:05.325001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1654 > 2) by scale factor 0.923617\nI1207 13:38:09.514886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20762 > 2) by scale factor 0.905952\nI1207 13:38:13.706192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39306 > 2) by scale factor 0.835751\nI1207 13:38:22.085839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61247 > 2) by scale factor 0.765559\nI1207 13:38:26.277210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61358 > 2) by scale factor 0.765234\nI1207 13:38:30.478457  1922 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1207 13:41:08.581157  1922 solver.cpp:404]     Test net output #0: accuracy = 0.252235\nI1207 13:41:08.581398  1922 solver.cpp:404]     Test net output #1: loss = 3.35081 (* 1 = 3.35081 loss)\nI1207 13:41:12.524758  1922 solver.cpp:228] Iteration 5000, loss = 2.65676\nI1207 13:41:12.524803  1922 solver.cpp:244]     Train net output #0: accuracy = 0.305882\nI1207 13:41:12.524821  1922 solver.cpp:244]     Train net output #1: loss = 2.65676 (* 1 = 2.65676 loss)\nI1207 13:41:12.764230  1922 sgd_solver.cpp:166] Iteration 5000, lr = 0.75\nI1207 13:41:12.774291  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13124 > 2) by scale factor 0.93842\nI1207 13:41:16.966605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35693 > 2) by scale factor 0.848562\nI1207 13:41:21.158354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71514 > 2) by scale factor 0.73661\nI1207 13:41:25.349864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89621 > 2) by scale factor 0.513319\nI1207 13:41:29.542616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07713 > 2) by scale factor 0.962866\nI1207 13:41:33.734231  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27953 > 2) by scale factor 0.877374\nI1207 13:41:42.114708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13764 > 2) by scale factor 0.93561\nI1207 13:41:46.307080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06189 > 2) by scale factor 0.653192\nI1207 13:41:50.497686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3165 > 2) by scale factor 0.86337\nI1207 13:41:54.687734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72354 > 2) by scale factor 0.734339\nI1207 13:41:58.878571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73113 > 2) by scale factor 0.732298\nI1207 13:42:03.068981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34533 > 2) by scale factor 0.852759\nI1207 13:42:07.259424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26323 > 2) by scale factor 0.612891\nI1207 13:42:11.450851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6241 > 2) by scale factor 0.762167\nI1207 13:42:19.831796  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06562 > 2) by scale factor 0.96823\nI1207 13:42:24.021965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44335 > 2) by scale factor 0.81855\nI1207 13:42:28.213325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19709 > 2) by scale factor 0.910296\nI1207 13:42:36.592779  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84539 > 2) by scale factor 0.702891\nI1207 13:42:40.784987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5808 > 2) by scale factor 0.774953\nI1207 13:42:44.976832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58299 > 2) by scale factor 0.558193\nI1207 13:42:49.170095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10524 > 2) by scale factor 0.95001\nI1207 13:42:53.360729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57435 > 2) by scale factor 0.559542\nI1207 13:42:57.553053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4624 > 2) by scale factor 0.812216\nI1207 13:43:01.744174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84753 > 2) by scale factor 0.702363\nI1207 13:43:05.935957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20163 > 2) by scale factor 0.624682\nI1207 13:43:10.128034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34331 > 2) by scale factor 0.853494\nI1207 13:43:14.320340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81338 > 2) by scale factor 0.710889\nI1207 13:43:18.511795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33256 > 2) by scale factor 0.600139\nI1207 13:43:22.703860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04511 > 2) by scale factor 0.977945\nI1207 13:43:26.894685  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25489 > 2) by scale factor 0.88696\nI1207 13:43:31.086784  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07627 > 2) by scale factor 0.963267\nI1207 13:43:39.468489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75167 > 2) by scale factor 0.533097\nI1207 13:43:47.850090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47628 > 2) by scale factor 0.807662\nI1207 13:43:52.040711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79783 > 2) by scale factor 0.714839\nI1207 13:43:56.232379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10102 > 2) by scale factor 0.951919\nI1207 13:44:00.422639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0476 > 2) by scale factor 0.976754\nI1207 13:44:08.800704  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25315 > 2) by scale factor 0.887646\nI1207 13:44:12.991847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61724 > 2) by scale factor 0.552907\nI1207 13:44:17.184281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87263 > 2) by scale factor 0.696225\nI1207 13:44:21.374776  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59539 > 2) by scale factor 0.556268\nI1207 13:44:29.755616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59681 > 2) by scale factor 0.770177\nI1207 13:44:33.947324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62579 > 2) by scale factor 0.551603\nI1207 13:44:38.139186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20917 > 2) by scale factor 0.623214\nI1207 13:44:42.329648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77907 > 2) by scale factor 0.719666\nI1207 13:44:46.520797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17077 > 2) by scale factor 0.921333\nI1207 13:44:54.901041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79193 > 2) by scale factor 0.71635\nI1207 13:45:20.032507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09524 > 2) by scale factor 0.954544\nI1207 13:45:24.224942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04444 > 2) by scale factor 0.656934\nI1207 13:45:28.414816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02891 > 2) by scale factor 0.985752\nI1207 13:45:32.605043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54788 > 2) by scale factor 0.784968\nI1207 13:45:40.985018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39944 > 2) by scale factor 0.833529\nI1207 13:45:45.176903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90128 > 2) by scale factor 0.512653\nI1207 13:45:53.556507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35546 > 2) by scale factor 0.849092\nI1207 13:45:57.748656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14415 > 2) by scale factor 0.932771\nI1207 13:46:01.940496  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42934 > 2) by scale factor 0.823268\nI1207 13:46:06.131955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38334 > 2) by scale factor 0.591131\nI1207 13:46:10.323807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67482 > 2) by scale factor 0.747714\nI1207 13:46:14.514855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73743 > 2) by scale factor 0.730612\nI1207 13:46:18.705395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05559 > 2) by scale factor 0.654539\nI1207 13:46:22.896479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89904 > 2) by scale factor 0.512947\nI1207 13:46:31.277720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02833 > 2) by scale factor 0.986032\nI1207 13:46:35.469873  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12384 > 2) by scale factor 0.941689\nI1207 13:46:39.659667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88352 > 2) by scale factor 0.693597\nI1207 13:46:43.850487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58686 > 2) by scale factor 0.557591\nI1207 13:46:48.039551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36931 > 2) by scale factor 0.593594\nI1207 13:46:52.230801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76288 > 2) by scale factor 0.723882\nI1207 13:46:56.421500  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86489 > 2) by scale factor 0.698108\nI1207 13:47:00.611934  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05391 > 2) by scale factor 0.654897\nI1207 13:47:04.803932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.376 > 2) by scale factor 0.841752\nI1207 13:47:08.994360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04872 > 2) by scale factor 0.976218\nI1207 13:47:13.185830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40252 > 2) by scale factor 0.5878\nI1207 13:47:17.375700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07803 > 2) by scale factor 0.649767\nI1207 13:47:21.566186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19908 > 2) by scale factor 0.909472\nI1207 13:47:25.757751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78552 > 2) by scale factor 0.717998\nI1207 13:47:29.948962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96768 > 2) by scale factor 0.673926\nI1207 13:47:34.138928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74573 > 2) by scale factor 0.533942\nI1207 13:47:38.329035  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88046 > 2) by scale factor 0.694334\nI1207 13:47:42.519306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19317 > 2) by scale factor 0.911922\nI1207 13:47:46.710466  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9355 > 2) by scale factor 0.681315\nI1207 13:47:50.901407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11224 > 2) by scale factor 0.946861\nI1207 13:47:55.094163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67466 > 2) by scale factor 0.544268\nI1207 13:47:59.285534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28033 > 2) by scale factor 0.877066\nI1207 13:48:03.477099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06388 > 2) by scale factor 0.492141\nI1207 13:48:07.668215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58728 > 2) by scale factor 0.557526\nI1207 13:48:07.680052  1922 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1207 13:50:45.831604  1922 solver.cpp:404]     Test net output #0: accuracy = 0.181\nI1207 13:50:45.831884  1922 solver.cpp:404]     Test net output #1: loss = 5.56792 (* 1 = 5.56792 loss)\nI1207 13:50:49.774132  1922 solver.cpp:228] Iteration 5100, loss = 5.98411\nI1207 13:50:49.774178  1922 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 13:50:49.774195  1922 solver.cpp:244]     Train net output #1: loss = 5.98411 (* 1 = 5.98411 loss)\nI1207 13:50:50.015509  1922 sgd_solver.cpp:166] Iteration 5100, lr = 0.765\nI1207 13:50:50.025672  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89679 > 2) by scale factor 0.690421\nI1207 13:50:58.406860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33072 > 2) by scale factor 0.858103\nI1207 13:51:02.599434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1765 > 2) by scale factor 0.918907\nI1207 13:51:06.792564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28192 > 2) by scale factor 0.876455\nI1207 13:51:10.984619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64632 > 2) by scale factor 0.755766\nI1207 13:51:15.176182  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.447 > 2) by scale factor 0.817327\nI1207 13:51:19.367929  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92171 > 2) by scale factor 0.68453\nI1207 13:51:23.559178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96288 > 2) by scale factor 0.67502\nI1207 13:51:27.750926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92053 > 2) by scale factor 0.684808\nI1207 13:51:31.942317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34231 > 2) by scale factor 0.598388\nI1207 13:51:36.133627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08951 > 2) by scale factor 0.95716\nI1207 13:51:40.326433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50358 > 2) by scale factor 0.798857\nI1207 13:51:48.705759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.961 > 2) by scale factor 0.504923\nI1207 13:51:52.896206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81147 > 2) by scale factor 0.711372\nI1207 13:51:57.087846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41858 > 2) by scale factor 0.826933\nI1207 13:52:01.279229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54387 > 2) by scale factor 0.564355\nI1207 13:52:05.470118  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19386 > 2) by scale factor 0.626202\nI1207 13:52:09.662436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92053 > 2) by scale factor 0.684808\nI1207 13:52:18.043184  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19665 > 2) by scale factor 0.625654\nI1207 13:52:26.422109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65624 > 2) by scale factor 0.54701\nI1207 13:52:30.613517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78616 > 2) by scale factor 0.52824\nI1207 13:52:34.804358  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18829 > 2) by scale factor 0.627296\nI1207 13:52:38.995914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46356 > 2) by scale factor 0.577441\nI1207 13:52:43.186748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51909 > 2) by scale factor 0.568328\nI1207 13:52:47.377223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56813 > 2) by scale factor 0.560518\nI1207 13:52:51.568246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53454 > 2) by scale factor 0.441059\nI1207 13:52:55.758869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35902 > 2) by scale factor 0.84781\nI1207 13:52:59.949066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41021 > 2) by scale factor 0.829803\nI1207 13:53:04.140730  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43406 > 2) by scale factor 0.821674\nI1207 13:53:08.332772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22164 > 2) by scale factor 0.620801\nI1207 13:53:12.523212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69829 > 2) by scale factor 0.54079\nI1207 13:53:20.900867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14682 > 2) by scale factor 0.93161\nI1207 13:53:25.090977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25627 > 2) by scale factor 0.886418\nI1207 13:53:29.280851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57738 > 2) by scale factor 0.775981\nI1207 13:53:33.470870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30984 > 2) by scale factor 0.464054\nI1207 13:53:37.662132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17172 > 2) by scale factor 0.630573\nI1207 13:53:41.852865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68716 > 2) by scale factor 0.542423\nI1207 13:53:46.043241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83906 > 2) by scale factor 0.704459\nI1207 13:53:50.234241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72001 > 2) by scale factor 0.537633\nI1207 13:53:54.425741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72488 > 2) by scale factor 0.733978\nI1207 13:53:58.616600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90215 > 2) by scale factor 0.689145\nI1207 13:54:02.808233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35006 > 2) by scale factor 0.851043\nI1207 13:54:06.998387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23425 > 2) by scale factor 0.618382\nI1207 13:54:11.189812  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26185 > 2) by scale factor 0.613149\nI1207 13:54:15.381263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72646 > 2) by scale factor 0.536702\nI1207 13:54:19.572197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00443 > 2) by scale factor 0.997792\nI1207 13:54:23.762868  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03292 > 2) by scale factor 0.983808\nI1207 13:54:32.141806  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74546 > 2) by scale factor 0.728474\nI1207 13:54:36.334296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59928 > 2) by scale factor 0.555667\nI1207 13:54:40.524793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61652 > 2) by scale factor 0.764375\nI1207 13:54:44.715699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11033 > 2) by scale factor 0.947721\nI1207 13:54:48.905879  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61677 > 2) by scale factor 0.764301\nI1207 13:54:53.097097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33672 > 2) by scale factor 0.599391\nI1207 13:54:57.287577  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42296 > 2) by scale factor 0.825438\nI1207 13:55:01.477772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.689 > 2) by scale factor 0.743772\nI1207 13:55:05.668056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84321 > 2) by scale factor 0.703429\nI1207 13:55:09.859289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49385 > 2) by scale factor 0.572435\nI1207 13:55:14.049942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58113 > 2) by scale factor 0.558484\nI1207 13:55:18.240049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83897 > 2) by scale factor 0.70448\nI1207 13:55:22.430694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88186 > 2) by scale factor 0.693997\nI1207 13:55:26.621232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57796 > 2) by scale factor 0.775807\nI1207 13:55:35.000648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2353 > 2) by scale factor 0.894733\nI1207 13:55:43.380275  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87969 > 2) by scale factor 0.694519\nI1207 13:55:47.571815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09208 > 2) by scale factor 0.955986\nI1207 13:55:51.762159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5057 > 2) by scale factor 0.798181\nI1207 13:55:55.953579  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57253 > 2) by scale factor 0.559827\nI1207 13:56:00.144024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29894 > 2) by scale factor 0.869967\nI1207 13:56:04.334633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6109 > 2) by scale factor 0.76602\nI1207 13:56:12.713498  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37727 > 2) by scale factor 0.841302\nI1207 13:56:16.904662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40573 > 2) by scale factor 0.831349\nI1207 13:56:21.094741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06211 > 2) by scale factor 0.653144\nI1207 13:56:25.284708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40714 > 2) by scale factor 0.830862\nI1207 13:56:37.850843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04346 > 2) by scale factor 0.657147\nI1207 13:56:42.040999  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24243 > 2) by scale factor 0.891889\nI1207 13:56:46.232138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56613 > 2) by scale factor 0.779384\nI1207 13:56:50.422017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30003 > 2) by scale factor 0.869552\nI1207 13:56:58.799801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66464 > 2) by scale factor 0.750571\nI1207 13:57:02.990965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23108 > 2) by scale factor 0.896428\nI1207 13:57:11.371707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79592 > 2) by scale factor 0.715329\nI1207 13:57:15.562235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12931 > 2) by scale factor 0.939271\nI1207 13:57:23.940783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26874 > 2) by scale factor 0.881546\nI1207 13:57:32.319383  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74314 > 2) by scale factor 0.729092\nI1207 13:57:36.509850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44546 > 2) by scale factor 0.817842\nI1207 13:57:40.699899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66633 > 2) by scale factor 0.750095\nI1207 13:57:44.889715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1232 > 2) by scale factor 0.941975\nI1207 13:57:44.901659  1922 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1207 14:00:22.950991  1922 solver.cpp:404]     Test net output #0: accuracy = 0.196883\nI1207 14:00:22.951258  1922 solver.cpp:404]     Test net output #1: loss = 4.95085 (* 1 = 4.95085 loss)\nI1207 14:00:26.895381  1922 solver.cpp:228] Iteration 5200, loss = 5.00101\nI1207 14:00:26.895429  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 14:00:26.895447  1922 solver.cpp:244]     Train net output #1: loss = 5.00101 (* 1 = 5.00101 loss)\nI1207 14:00:27.132547  1922 sgd_solver.cpp:166] Iteration 5200, lr = 0.78\nI1207 14:00:27.142712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90775 > 2) by scale factor 0.687817\nI1207 14:00:31.334478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67706 > 2) by scale factor 0.543912\nI1207 14:00:35.525535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07028 > 2) by scale factor 0.651406\nI1207 14:00:39.716229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09256 > 2) by scale factor 0.955766\nI1207 14:00:52.285984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75711 > 2) by scale factor 0.725397\nI1207 14:00:56.476450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11396 > 2) by scale factor 0.48615\nI1207 14:01:00.669353  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29636 > 2) by scale factor 0.870942\nI1207 14:01:04.860287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92908 > 2) by scale factor 0.682807\nI1207 14:01:09.052176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24618 > 2) by scale factor 0.61611\nI1207 14:01:13.243507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32052 > 2) by scale factor 0.861876\nI1207 14:01:17.435997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.378 > 2) by scale factor 0.841044\nI1207 14:01:21.627540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66192 > 2) by scale factor 0.751338\nI1207 14:01:25.819756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12213 > 2) by scale factor 0.640587\nI1207 14:01:34.201557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08423 > 2) by scale factor 0.959586\nI1207 14:01:38.392568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9585 > 2) by scale factor 0.676018\nI1207 14:01:42.584053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99869 > 2) by scale factor 0.666959\nI1207 14:01:46.775549  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4287 > 2) by scale factor 0.583312\nI1207 14:01:50.967049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33111 > 2) by scale factor 0.857959\nI1207 14:01:55.159127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13021 > 2) by scale factor 0.938873\nI1207 14:01:59.350675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7336 > 2) by scale factor 0.731635\nI1207 14:02:16.110342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83603 > 2) by scale factor 0.705212\nI1207 14:02:20.301517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26746 > 2) by scale factor 0.882045\nI1207 14:02:32.870992  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58608 > 2) by scale factor 0.773372\nI1207 14:02:41.251516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44943 > 2) by scale factor 0.816515\nI1207 14:02:53.820801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51827 > 2) by scale factor 0.794196\nI1207 14:03:06.393100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32469 > 2) by scale factor 0.860329\nI1207 14:03:14.774111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02318 > 2) by scale factor 0.988542\nI1207 14:03:18.965548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01002 > 2) by scale factor 0.664448\nI1207 14:03:23.157547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76282 > 2) by scale factor 0.531516\nI1207 14:03:39.913053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68054 > 2) by scale factor 0.746119\nI1207 14:03:48.292197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02301 > 2) by scale factor 0.988628\nI1207 14:03:52.483800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77348 > 2) by scale factor 0.721116\nI1207 14:03:56.674398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47307 > 2) by scale factor 0.808712\nI1207 14:04:00.865325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37232 > 2) by scale factor 0.843056\nI1207 14:04:05.056983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12533 > 2) by scale factor 0.941032\nI1207 14:04:17.625296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85984 > 2) by scale factor 0.69934\nI1207 14:04:26.005072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30013 > 2) by scale factor 0.606036\nI1207 14:04:38.575027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39685 > 2) by scale factor 0.834428\nI1207 14:04:46.955487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52542 > 2) by scale factor 0.791949\nI1207 14:05:03.711597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30508 > 2) by scale factor 0.867648\nI1207 14:05:07.902330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83975 > 2) by scale factor 0.704288\nI1207 14:05:12.092636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05113 > 2) by scale factor 0.975074\nI1207 14:05:16.283756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39365 > 2) by scale factor 0.835543\nI1207 14:05:20.474979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48847 > 2) by scale factor 0.803707\nI1207 14:05:24.665318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5623 > 2) by scale factor 0.561435\nI1207 14:05:28.856632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81253 > 2) by scale factor 0.524587\nI1207 14:05:33.047952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49565 > 2) by scale factor 0.801395\nI1207 14:05:41.427381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20023 > 2) by scale factor 0.908994\nI1207 14:05:45.618777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42946 > 2) by scale factor 0.823228\nI1207 14:05:49.810472  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19833 > 2) by scale factor 0.909781\nI1207 14:05:54.001245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80444 > 2) by scale factor 0.713155\nI1207 14:05:58.192122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97946 > 2) by scale factor 0.671263\nI1207 14:06:02.383591  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1963 > 2) by scale factor 0.625723\nI1207 14:06:10.764140  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60639 > 2) by scale factor 0.767344\nI1207 14:06:14.954857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42045 > 2) by scale factor 0.826294\nI1207 14:06:19.147240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49984 > 2) by scale factor 0.800052\nI1207 14:06:23.338048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33192 > 2) by scale factor 0.857661\nI1207 14:06:27.529454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13051 > 2) by scale factor 0.638873\nI1207 14:06:31.720212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54562 > 2) by scale factor 0.785663\nI1207 14:06:35.910554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41831 > 2) by scale factor 0.585085\nI1207 14:06:40.101372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71894 > 2) by scale factor 0.735581\nI1207 14:06:44.292847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9 > 2) by scale factor 0.512821\nI1207 14:06:48.483366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36793 > 2) by scale factor 0.593836\nI1207 14:06:52.673689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6068 > 2) by scale factor 0.554508\nI1207 14:06:56.863263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88255 > 2) by scale factor 0.693831\nI1207 14:07:01.054162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40974 > 2) by scale factor 0.829965\nI1207 14:07:05.244981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57038 > 2) by scale factor 0.778096\nI1207 14:07:09.435439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54702 > 2) by scale factor 0.439849\nI1207 14:07:13.626183  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0809 > 2) by scale factor 0.649161\nI1207 14:07:17.817586  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35247 > 2) by scale factor 0.85017\nI1207 14:07:22.009565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36507 > 2) by scale factor 0.845641\nI1207 14:07:22.021644  1922 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1207 14:09:59.618077  1922 solver.cpp:404]     Test net output #0: accuracy = 0.200882\nI1207 14:09:59.618305  1922 solver.cpp:404]     Test net output #1: loss = 6.78627 (* 1 = 6.78627 loss)\nI1207 14:10:03.562114  1922 solver.cpp:228] Iteration 5300, loss = 6.45369\nI1207 14:10:03.562155  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 14:10:03.562172  1922 solver.cpp:244]     Train net output #1: loss = 6.45369 (* 1 = 6.45369 loss)\nI1207 14:10:03.800534  1922 sgd_solver.cpp:166] Iteration 5300, lr = 0.795\nI1207 14:10:03.810720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01986 > 2) by scale factor 0.990166\nI1207 14:10:08.003160  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66794 > 2) by scale factor 0.749643\nI1207 14:10:12.195160  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67423 > 2) by scale factor 0.747878\nI1207 14:10:20.577142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0409 > 2) by scale factor 0.979961\nI1207 14:10:28.957952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9305 > 2) by scale factor 0.682478\nI1207 14:10:33.149866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21249 > 2) by scale factor 0.903957\nI1207 14:10:37.342478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3792 > 2) by scale factor 0.840619\nI1207 14:10:45.723098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22763 > 2) by scale factor 0.61965\nI1207 14:10:49.915357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69901 > 2) by scale factor 0.540686\nI1207 14:10:54.107455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76764 > 2) by scale factor 0.722637\nI1207 14:10:58.298923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06772 > 2) by scale factor 0.967247\nI1207 14:11:02.489725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68338 > 2) by scale factor 0.745329\nI1207 14:11:10.870636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67957 > 2) by scale factor 0.746389\nI1207 14:11:15.061151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87971 > 2) by scale factor 0.694515\nI1207 14:11:19.252398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43148 > 2) by scale factor 0.582839\nI1207 14:11:23.444614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23815 > 2) by scale factor 0.893594\nI1207 14:11:27.636075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66228 > 2) by scale factor 0.751237\nI1207 14:11:31.827297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82483 > 2) by scale factor 0.708006\nI1207 14:11:36.019351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51732 > 2) by scale factor 0.794497\nI1207 14:11:40.210654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44459 > 2) by scale factor 0.580621\nI1207 14:11:44.401700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64302 > 2) by scale factor 0.756709\nI1207 14:11:48.594192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11423 > 2) by scale factor 0.642213\nI1207 14:11:52.785640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88021 > 2) by scale factor 0.694393\nI1207 14:11:56.977152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4473 > 2) by scale factor 0.817228\nI1207 14:12:01.167752  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79861 > 2) by scale factor 0.71464\nI1207 14:12:05.360314  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82551 > 2) by scale factor 0.522806\nI1207 14:12:09.552450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03828 > 2) by scale factor 0.98122\nI1207 14:12:13.742461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07154 > 2) by scale factor 0.965464\nI1207 14:12:17.934984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33762 > 2) by scale factor 0.855572\nI1207 14:12:22.126583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49139 > 2) by scale factor 0.802764\nI1207 14:12:26.318049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32504 > 2) by scale factor 0.860202\nI1207 14:12:30.509774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39858 > 2) by scale factor 0.833827\nI1207 14:12:34.701182  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27165 > 2) by scale factor 0.880416\nI1207 14:12:38.892923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9096 > 2) by scale factor 0.687379\nI1207 14:12:43.085302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29724 > 2) by scale factor 0.606568\nI1207 14:12:47.276865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62855 > 2) by scale factor 0.551185\nI1207 14:12:51.468169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1464 > 2) by scale factor 0.931794\nI1207 14:12:55.660147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25744 > 2) by scale factor 0.885958\nI1207 14:13:08.230195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44111 > 2) by scale factor 0.8193\nI1207 14:13:16.611088  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26483 > 2) by scale factor 0.88307\nI1207 14:13:20.802477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18159 > 2) by scale factor 0.628617\nI1207 14:13:24.993643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70718 > 2) by scale factor 0.738777\nI1207 14:13:33.374116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06967 > 2) by scale factor 0.966338\nI1207 14:13:45.944406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27833 > 2) by scale factor 0.610067\nI1207 14:13:58.516155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04017 > 2) by scale factor 0.980309\nI1207 14:14:02.708708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75889 > 2) by scale factor 0.72493\nI1207 14:14:19.466748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9225 > 2) by scale factor 0.684346\nI1207 14:14:27.847692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29231 > 2) by scale factor 0.607477\nI1207 14:14:44.605792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07715 > 2) by scale factor 0.96286\nI1207 14:14:48.796929  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25752 > 2) by scale factor 0.885927\nI1207 14:14:52.988788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14219 > 2) by scale factor 0.6365\nI1207 14:14:57.181679  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01907 > 2) by scale factor 0.990556\nI1207 14:15:01.374114  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21534 > 2) by scale factor 0.902797\nI1207 14:15:05.564872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5756 > 2) by scale factor 0.776518\nI1207 14:15:09.756887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90787 > 2) by scale factor 0.687788\nI1207 14:15:18.138936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15511 > 2) by scale factor 0.928026\nI1207 14:15:22.329442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70499 > 2) by scale factor 0.739375\nI1207 14:15:26.520709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26543 > 2) by scale factor 0.882836\nI1207 14:15:30.711876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01012 > 2) by scale factor 0.994966\nI1207 14:15:34.903095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81428 > 2) by scale factor 0.710661\nI1207 14:15:39.094969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41993 > 2) by scale factor 0.826468\nI1207 14:15:43.286437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19761 > 2) by scale factor 0.625467\nI1207 14:15:47.477391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46674 > 2) by scale factor 0.810787\nI1207 14:15:55.858176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05042 > 2) by scale factor 0.97541\nI1207 14:16:04.237715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7228 > 2) by scale factor 0.734537\nI1207 14:16:08.429139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17477 > 2) by scale factor 0.629967\nI1207 14:16:25.186383  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60496 > 2) by scale factor 0.554791\nI1207 14:16:29.377020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07764 > 2) by scale factor 0.96263\nI1207 14:16:33.568543  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15597 > 2) by scale factor 0.927656\nI1207 14:16:37.759924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36969 > 2) by scale factor 0.843994\nI1207 14:16:41.950356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28328 > 2) by scale factor 0.875934\nI1207 14:16:46.142318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57977 > 2) by scale factor 0.775264\nI1207 14:16:50.332864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9 > 2) by scale factor 0.689656\nI1207 14:16:54.524044  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81835 > 2) by scale factor 0.709636\nI1207 14:16:58.714184  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33431 > 2) by scale factor 0.599823\nI1207 14:16:58.726047  1922 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1207 14:19:36.711222  1922 solver.cpp:404]     Test net output #0: accuracy = 0.237118\nI1207 14:19:36.711490  1922 solver.cpp:404]     Test net output #1: loss = 5.83122 (* 1 = 5.83122 loss)\nI1207 14:19:40.652506  1922 solver.cpp:228] Iteration 5400, loss = 5.48363\nI1207 14:19:40.652554  1922 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1207 14:19:40.652571  1922 solver.cpp:244]     Train net output #1: loss = 5.48363 (* 1 = 5.48363 loss)\nI1207 14:19:40.985723  1922 sgd_solver.cpp:166] Iteration 5400, lr = 0.81\nI1207 14:19:40.995915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5199 > 2) by scale factor 0.793681\nI1207 14:19:45.188078  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08564 > 2) by scale factor 0.958939\nI1207 14:19:49.381515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34922 > 2) by scale factor 0.851347\nI1207 14:19:53.574806  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84681 > 2) by scale factor 0.70254\nI1207 14:19:57.767395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93416 > 2) by scale factor 0.681626\nI1207 14:20:01.960850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25689 > 2) by scale factor 0.886177\nI1207 14:20:06.154228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03913 > 2) by scale factor 0.980809\nI1207 14:20:10.345471  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97408 > 2) by scale factor 0.503261\nI1207 14:20:14.537765  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6111 > 2) by scale factor 0.76596\nI1207 14:20:22.918833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84735 > 2) by scale factor 0.702407\nI1207 14:20:27.110913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34184 > 2) by scale factor 0.854029\nI1207 14:20:31.302292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48095 > 2) by scale factor 0.806142\nI1207 14:20:35.494129  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9139 > 2) by scale factor 0.686366\nI1207 14:20:48.063503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46472 > 2) by scale factor 0.577247\nI1207 14:20:52.255489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86891 > 2) by scale factor 0.516941\nI1207 14:20:56.446883  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73663 > 2) by scale factor 0.730826\nI1207 14:21:04.828735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00228 > 2) by scale factor 0.998861\nI1207 14:21:09.021750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18319 > 2) by scale factor 0.916092\nI1207 14:21:13.215064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18216 > 2) by scale factor 0.916523\nI1207 14:21:17.406695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43579 > 2) by scale factor 0.582108\nI1207 14:21:21.597720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24671 > 2) by scale factor 0.890191\nI1207 14:21:25.790902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32167 > 2) by scale factor 0.861449\nI1207 14:21:29.982484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66322 > 2) by scale factor 0.75097\nI1207 14:21:38.363543  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49279 > 2) by scale factor 0.802313\nI1207 14:21:42.555727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37954 > 2) by scale factor 0.591796\nI1207 14:21:55.126854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57478 > 2) by scale factor 0.776765\nI1207 14:21:59.318783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3909 > 2) by scale factor 0.589814\nI1207 14:22:03.509951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65531 > 2) by scale factor 0.429617\nI1207 14:22:07.702376  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80647 > 2) by scale factor 0.525421\nI1207 14:22:11.893599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98682 > 2) by scale factor 0.669609\nI1207 14:22:16.085551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73482 > 2) by scale factor 0.731309\nI1207 14:22:20.278215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34292 > 2) by scale factor 0.853636\nI1207 14:22:24.469267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59125 > 2) by scale factor 0.771828\nI1207 14:22:28.660380  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02814 > 2) by scale factor 0.660472\nI1207 14:22:32.852007  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10229 > 2) by scale factor 0.951342\nI1207 14:22:41.232728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73982 > 2) by scale factor 0.534785\nI1207 14:22:45.423964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23356 > 2) by scale factor 0.618513\nI1207 14:22:49.615650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22662 > 2) by scale factor 0.898224\nI1207 14:22:53.806429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37261 > 2) by scale factor 0.842953\nI1207 14:22:57.998159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40013 > 2) by scale factor 0.833288\nI1207 14:23:06.379405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47945 > 2) by scale factor 0.806632\nI1207 14:23:10.572005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27454 > 2) by scale factor 0.879298\nI1207 14:23:14.764101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30167 > 2) by scale factor 0.868936\nI1207 14:23:18.955746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19844 > 2) by scale factor 0.909737\nI1207 14:23:23.146987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30978 > 2) by scale factor 0.865885\nI1207 14:23:27.339067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24657 > 2) by scale factor 0.890245\nI1207 14:23:31.531018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46087 > 2) by scale factor 0.81272\nI1207 14:23:35.723289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50067 > 2) by scale factor 0.57132\nI1207 14:23:39.913879  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06169 > 2) by scale factor 0.97008\nI1207 14:23:44.105327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3362 > 2) by scale factor 0.856092\nI1207 14:23:48.296942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39908 > 2) by scale factor 0.833654\nI1207 14:23:52.487318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33681 > 2) by scale factor 0.855869\nI1207 14:23:56.678138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55438 > 2) by scale factor 0.782969\nI1207 14:24:00.869199  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32913 > 2) by scale factor 0.858692\nI1207 14:24:09.251240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46603 > 2) by scale factor 0.811021\nI1207 14:24:26.010442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87065 > 2) by scale factor 0.696707\nI1207 14:24:30.201792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16636 > 2) by scale factor 0.923206\nI1207 14:24:34.393513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6176 > 2) by scale factor 0.552852\nI1207 14:24:38.585521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82773 > 2) by scale factor 0.707282\nI1207 14:24:42.777729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98678 > 2) by scale factor 0.669617\nI1207 14:24:46.969334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81645 > 2) by scale factor 0.710115\nI1207 14:24:55.350797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37807 > 2) by scale factor 0.841018\nI1207 14:24:59.543087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22325 > 2) by scale factor 0.620492\nI1207 14:25:03.734635  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69662 > 2) by scale factor 0.74167\nI1207 14:25:07.925282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79461 > 2) by scale factor 0.715664\nI1207 14:25:12.115854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15251 > 2) by scale factor 0.929149\nI1207 14:25:20.496644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01834 > 2) by scale factor 0.990915\nI1207 14:25:24.686949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81263 > 2) by scale factor 0.711079\nI1207 14:25:33.067366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60566 > 2) by scale factor 0.767559\nI1207 14:25:41.447861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33632 > 2) by scale factor 0.856047\nI1207 14:25:54.015523  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22339 > 2) by scale factor 0.620465\nI1207 14:26:06.584354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18175 > 2) by scale factor 0.916697\nI1207 14:26:14.963909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2952 > 2) by scale factor 0.871385\nI1207 14:26:19.154973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23685 > 2) by scale factor 0.894113\nI1207 14:26:27.535471  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77923 > 2) by scale factor 0.719623\nI1207 14:26:31.725726  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77472 > 2) by scale factor 0.720793\nI1207 14:26:35.925976  1922 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1207 14:29:12.825067  1922 solver.cpp:404]     Test net output #0: accuracy = 0.186412\nI1207 14:29:12.825332  1922 solver.cpp:404]     Test net output #1: loss = 4.01539 (* 1 = 4.01539 loss)\nI1207 14:29:16.768003  1922 solver.cpp:228] Iteration 5500, loss = 4.01706\nI1207 14:29:16.768049  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 14:29:16.768065  1922 solver.cpp:244]     Train net output #1: loss = 4.01706 (* 1 = 4.01706 loss)\nI1207 14:29:17.003422  1922 sgd_solver.cpp:166] Iteration 5500, lr = 0.825\nI1207 14:29:17.013630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64062 > 2) by scale factor 0.757396\nI1207 14:29:21.205844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67872 > 2) by scale factor 0.746625\nI1207 14:29:25.396908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75055 > 2) by scale factor 0.727128\nI1207 14:29:33.778414  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43231 > 2) by scale factor 0.822263\nI1207 14:29:37.969542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38287 > 2) by scale factor 0.839326\nI1207 14:29:42.161550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23623 > 2) by scale factor 0.894361\nI1207 14:29:46.353145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93865 > 2) by scale factor 0.507789\nI1207 14:29:50.545006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5697 > 2) by scale factor 0.560272\nI1207 14:29:54.735512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68416 > 2) by scale factor 0.745112\nI1207 14:29:58.926578  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42054 > 2) by scale factor 0.826261\nI1207 14:30:03.117120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94515 > 2) by scale factor 0.679082\nI1207 14:30:07.308841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90162 > 2) by scale factor 0.512608\nI1207 14:30:11.499979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88692 > 2) by scale factor 0.514546\nI1207 14:30:15.690943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90356 > 2) by scale factor 0.68881\nI1207 14:30:19.882268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5869 > 2) by scale factor 0.773125\nI1207 14:30:24.073742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30605 > 2) by scale factor 0.604952\nI1207 14:30:28.265288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09853 > 2) by scale factor 0.953048\nI1207 14:30:32.456650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6387 > 2) by scale factor 0.757949\nI1207 14:30:36.647279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65432 > 2) by scale factor 0.753489\nI1207 14:30:40.839289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70968 > 2) by scale factor 0.738094\nI1207 14:30:45.031392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61928 > 2) by scale factor 0.763569\nI1207 14:30:57.599788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67261 > 2) by scale factor 0.748332\nI1207 14:31:01.792001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57399 > 2) by scale factor 0.777004\nI1207 14:31:05.984151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35935 > 2) by scale factor 0.84769\nI1207 14:31:10.176220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48451 > 2) by scale factor 0.804987\nI1207 14:31:14.366899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59938 > 2) by scale factor 0.769415\nI1207 14:31:18.558270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40759 > 2) by scale factor 0.586925\nI1207 14:31:22.749671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39319 > 2) by scale factor 0.835705\nI1207 14:31:26.940987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90904 > 2) by scale factor 0.687513\nI1207 14:31:31.132160  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17609 > 2) by scale factor 0.629705\nI1207 14:31:35.324473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43428 > 2) by scale factor 0.582364\nI1207 14:31:39.515985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76815 > 2) by scale factor 0.722504\nI1207 14:31:43.706981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49023 > 2) by scale factor 0.803139\nI1207 14:31:47.898636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05596 > 2) by scale factor 0.972781\nI1207 14:31:52.091074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54334 > 2) by scale factor 0.786368\nI1207 14:31:56.282830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75857 > 2) by scale factor 0.725014\nI1207 14:32:00.473835  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42959 > 2) by scale factor 0.823185\nI1207 14:32:04.666592  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8931 > 2) by scale factor 0.691301\nI1207 14:32:08.857476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00874 > 2) by scale factor 0.66473\nI1207 14:32:13.048552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64269 > 2) by scale factor 0.756804\nI1207 14:32:21.426829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19784 > 2) by scale factor 0.909985\nI1207 14:32:25.617945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64889 > 2) by scale factor 0.755033\nI1207 14:32:29.808827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12911 > 2) by scale factor 0.93936\nI1207 14:32:34.000598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2355 > 2) by scale factor 0.894653\nI1207 14:32:38.190855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34907 > 2) by scale factor 0.8514\nI1207 14:33:03.323789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1185 > 2) by scale factor 0.944064\nI1207 14:33:11.704113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49399 > 2) by scale factor 0.801927\nI1207 14:33:24.275272  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61024 > 2) by scale factor 0.766213\nI1207 14:33:28.466161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04746 > 2) by scale factor 0.656285\nI1207 14:33:36.846451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26159 > 2) by scale factor 0.884334\nI1207 14:33:41.036979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82029 > 2) by scale factor 0.709146\nI1207 14:33:45.227018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00732 > 2) by scale factor 0.996354\nI1207 14:33:49.417943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39319 > 2) by scale factor 0.835704\nI1207 14:33:53.608980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16407 > 2) by scale factor 0.924185\nI1207 14:33:57.800408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22298 > 2) by scale factor 0.899695\nI1207 14:34:14.560809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06845 > 2) by scale factor 0.966906\nI1207 14:34:22.940475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18131 > 2) by scale factor 0.916879\nI1207 14:34:52.265856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87149 > 2) by scale factor 0.516597\nI1207 14:34:56.457079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91351 > 2) by scale factor 0.686457\nI1207 14:35:13.215953  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53028 > 2) by scale factor 0.790425\nI1207 14:35:17.407279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02697 > 2) by scale factor 0.986694\nI1207 14:35:21.598551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14098 > 2) by scale factor 0.934151\nI1207 14:35:29.978713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29766 > 2) by scale factor 0.870452\nI1207 14:35:34.169719  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71665 > 2) by scale factor 0.736202\nI1207 14:35:38.361445  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13404 > 2) by scale factor 0.638153\nI1207 14:35:42.551733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36031 > 2) by scale factor 0.595183\nI1207 14:35:46.742715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86463 > 2) by scale factor 0.69817\nI1207 14:35:50.933409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90509 > 2) by scale factor 0.688447\nI1207 14:35:55.124788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71282 > 2) by scale factor 0.737239\nI1207 14:36:03.502472  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92391 > 2) by scale factor 0.684015\nI1207 14:36:11.881093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76298 > 2) by scale factor 0.723857\nI1207 14:36:11.892940  1922 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1207 14:38:49.961849  1922 solver.cpp:404]     Test net output #0: accuracy = 0.225588\nI1207 14:38:49.962124  1922 solver.cpp:404]     Test net output #1: loss = 3.66029 (* 1 = 3.66029 loss)\nI1207 14:38:53.906350  1922 solver.cpp:228] Iteration 5600, loss = 3.31258\nI1207 14:38:53.906394  1922 solver.cpp:244]     Train net output #0: accuracy = 0.294118\nI1207 14:38:53.906411  1922 solver.cpp:244]     Train net output #1: loss = 3.31258 (* 1 = 3.31258 loss)\nI1207 14:38:54.143069  1922 sgd_solver.cpp:166] Iteration 5600, lr = 0.84\nI1207 14:38:54.153105  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41545 > 2) by scale factor 0.828003\nI1207 14:39:02.531750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15895 > 2) by scale factor 0.926375\nI1207 14:39:06.722584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48091 > 2) by scale factor 0.806157\nI1207 14:39:15.101982  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25686 > 2) by scale factor 0.886187\nI1207 14:39:19.292354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00111 > 2) by scale factor 0.999445\nI1207 14:39:36.047374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05507 > 2) by scale factor 0.49321\nI1207 14:39:40.239598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06568 > 2) by scale factor 0.968205\nI1207 14:39:44.430250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11274 > 2) by scale factor 0.946637\nI1207 14:39:48.620782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91998 > 2) by scale factor 0.684937\nI1207 14:39:56.999644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53347 > 2) by scale factor 0.78943\nI1207 14:40:01.189839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03212 > 2) by scale factor 0.984195\nI1207 14:40:05.380650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78689 > 2) by scale factor 0.717645\nI1207 14:40:09.570948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48363 > 2) by scale factor 0.805273\nI1207 14:40:17.948781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56954 > 2) by scale factor 0.778348\nI1207 14:40:22.138849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18874 > 2) by scale factor 0.627206\nI1207 14:40:26.329211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96175 > 2) by scale factor 0.675277\nI1207 14:40:34.707303  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40996 > 2) by scale factor 0.829889\nI1207 14:40:43.084565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17753 > 2) by scale factor 0.918471\nI1207 14:40:47.275259  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16935 > 2) by scale factor 0.631044\nI1207 14:40:55.651134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4395 > 2) by scale factor 0.81984\nI1207 14:41:04.028901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25056 > 2) by scale factor 0.615279\nI1207 14:41:08.217947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30076 > 2) by scale factor 0.86928\nI1207 14:41:12.406527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04685 > 2) by scale factor 0.977112\nI1207 14:41:16.594660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53358 > 2) by scale factor 0.565998\nI1207 14:41:20.785455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30304 > 2) by scale factor 0.868417\nI1207 14:41:24.975143  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37905 > 2) by scale factor 0.840673\nI1207 14:41:29.164335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34908 > 2) by scale factor 0.851397\nI1207 14:41:33.353940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36637 > 2) by scale factor 0.594111\nI1207 14:41:37.544041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68071 > 2) by scale factor 0.74607\nI1207 14:41:41.734349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59688 > 2) by scale factor 0.770156\nI1207 14:41:45.924721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54725 > 2) by scale factor 0.78516\nI1207 14:41:50.114044  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25157 > 2) by scale factor 0.888269\nI1207 14:41:54.305138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41335 > 2) by scale factor 0.585935\nI1207 14:41:58.495611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87029 > 2) by scale factor 0.696794\nI1207 14:42:02.687448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48466 > 2) by scale factor 0.804939\nI1207 14:42:06.877552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64699 > 2) by scale factor 0.755576\nI1207 14:42:11.067656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2086 > 2) by scale factor 0.623325\nI1207 14:42:15.258496  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82265 > 2) by scale factor 0.708553\nI1207 14:42:19.447557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69317 > 2) by scale factor 0.742619\nI1207 14:42:23.637387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19567 > 2) by scale factor 0.910882\nI1207 14:42:27.828855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32472 > 2) by scale factor 0.601554\nI1207 14:42:32.019318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45222 > 2) by scale factor 0.579338\nI1207 14:42:36.208174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87514 > 2) by scale factor 0.695619\nI1207 14:42:40.397747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69596 > 2) by scale factor 0.741852\nI1207 14:42:44.588057  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22531 > 2) by scale factor 0.620095\nI1207 14:42:48.777575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59201 > 2) by scale factor 0.556791\nI1207 14:42:52.967398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2955 > 2) by scale factor 0.606887\nI1207 14:42:57.157332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22176 > 2) by scale factor 0.900187\nI1207 14:43:01.347859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88993 > 2) by scale factor 0.692058\nI1207 14:43:05.538245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45785 > 2) by scale factor 0.813719\nI1207 14:43:09.727721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73664 > 2) by scale factor 0.730824\nI1207 14:43:13.918048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32172 > 2) by scale factor 0.861432\nI1207 14:43:18.107914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43052 > 2) by scale factor 0.583002\nI1207 14:43:22.297704  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17187 > 2) by scale factor 0.920866\nI1207 14:43:30.676177  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51653 > 2) by scale factor 0.794745\nI1207 14:43:34.866821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63723 > 2) by scale factor 0.549868\nI1207 14:43:43.243860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58962 > 2) by scale factor 0.772313\nI1207 14:43:55.808398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13963 > 2) by scale factor 0.637018\nI1207 14:43:59.998620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72949 > 2) by scale factor 0.732738\nI1207 14:44:04.187974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31573 > 2) by scale factor 0.603186\nI1207 14:44:08.379694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29475 > 2) by scale factor 0.607027\nI1207 14:44:12.568923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23869 > 2) by scale factor 0.617533\nI1207 14:44:16.759491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14005 > 2) by scale factor 0.636933\nI1207 14:44:20.950304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56623 > 2) by scale factor 0.779352\nI1207 14:44:25.141489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30231 > 2) by scale factor 0.605637\nI1207 14:44:29.331459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63392 > 2) by scale factor 0.759323\nI1207 14:44:33.521245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35462 > 2) by scale factor 0.596193\nI1207 14:44:37.711714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93585 > 2) by scale factor 0.681233\nI1207 14:44:41.902300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40892 > 2) by scale factor 0.586697\nI1207 14:44:46.091912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47407 > 2) by scale factor 0.575693\nI1207 14:44:50.281792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45655 > 2) by scale factor 0.814151\nI1207 14:44:54.470932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11498 > 2) by scale factor 0.945635\nI1207 14:44:58.661343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56478 > 2) by scale factor 0.779795\nI1207 14:45:02.850915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06473 > 2) by scale factor 0.492037\nI1207 14:45:07.040906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1159 > 2) by scale factor 0.641868\nI1207 14:45:11.230306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45216 > 2) by scale factor 0.815607\nI1207 14:45:15.421599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58526 > 2) by scale factor 0.773617\nI1207 14:45:19.611770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20853 > 2) by scale factor 0.905582\nI1207 14:45:23.802256  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84123 > 2) by scale factor 0.70392\nI1207 14:45:27.993067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40445 > 2) by scale factor 0.83179\nI1207 14:45:32.183815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07358 > 2) by scale factor 0.964515\nI1207 14:45:40.561738  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4709 > 2) by scale factor 0.809422\nI1207 14:45:44.752032  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4182 > 2) by scale factor 0.82706\nI1207 14:45:48.942664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28288 > 2) by scale factor 0.876085\nI1207 14:45:48.954460  1922 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1207 14:48:26.988175  1922 solver.cpp:404]     Test net output #0: accuracy = 0.175177\nI1207 14:48:26.988431  1922 solver.cpp:404]     Test net output #1: loss = 3.90964 (* 1 = 3.90964 loss)\nI1207 14:48:30.930603  1922 solver.cpp:228] Iteration 5700, loss = 4.50849\nI1207 14:48:30.930645  1922 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 14:48:30.930660  1922 solver.cpp:244]     Train net output #1: loss = 4.50849 (* 1 = 4.50849 loss)\nI1207 14:48:31.172855  1922 sgd_solver.cpp:166] Iteration 5700, lr = 0.855\nI1207 14:48:31.182994  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58845 > 2) by scale factor 0.772664\nI1207 14:48:35.375713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08106 > 2) by scale factor 0.961047\nI1207 14:48:39.567849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22861 > 2) by scale factor 0.89742\nI1207 14:48:47.949518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7145 > 2) by scale factor 0.736785\nI1207 14:48:52.141643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51643 > 2) by scale factor 0.794775\nI1207 14:49:00.523888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46665 > 2) by scale factor 0.810817\nI1207 14:49:04.716373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88599 > 2) by scale factor 0.693004\nI1207 14:49:08.908419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34454 > 2) by scale factor 0.853046\nI1207 14:49:13.100404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14573 > 2) by scale factor 0.932086\nI1207 14:49:17.292629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12192 > 2) by scale factor 0.942541\nI1207 14:49:21.484268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89718 > 2) by scale factor 0.690327\nI1207 14:49:25.676975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46659 > 2) by scale factor 0.576936\nI1207 14:49:29.869112  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10866 > 2) by scale factor 0.643364\nI1207 14:49:42.440783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26701 > 2) by scale factor 0.612181\nI1207 14:49:46.633051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24222 > 2) by scale factor 0.891972\nI1207 14:49:55.012650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26982 > 2) by scale factor 0.881127\nI1207 14:49:59.204138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29795 > 2) by scale factor 0.87034\nI1207 14:50:15.963547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50997 > 2) by scale factor 0.796821\nI1207 14:50:36.914150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44904 > 2) by scale factor 0.816648\nI1207 14:50:49.483682  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37089 > 2) by scale factor 0.843564\nI1207 14:50:57.865046  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63913 > 2) by scale factor 0.757827\nI1207 14:51:02.057488  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72698 > 2) by scale factor 0.733412\nI1207 14:51:06.249714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13326 > 2) by scale factor 0.937532\nI1207 14:51:10.442142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09166 > 2) by scale factor 0.95618\nI1207 14:51:14.633996  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19999 > 2) by scale factor 0.909093\nI1207 14:51:18.826427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48453 > 2) by scale factor 0.80498\nI1207 14:51:23.018023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86101 > 2) by scale factor 0.699054\nI1207 14:51:27.209924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66562 > 2) by scale factor 0.750295\nI1207 14:51:35.590622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39266 > 2) by scale factor 0.83589\nI1207 14:51:39.782668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71293 > 2) by scale factor 0.737212\nI1207 14:51:48.164124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43447 > 2) by scale factor 0.582332\nI1207 14:51:52.354988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42152 > 2) by scale factor 0.825927\nI1207 14:51:56.545516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9049 > 2) by scale factor 0.688492\nI1207 14:52:00.737540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15634 > 2) by scale factor 0.927497\nI1207 14:52:04.928967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88359 > 2) by scale factor 0.693581\nI1207 14:52:09.120285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3729 > 2) by scale factor 0.842849\nI1207 14:52:13.313048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8729 > 2) by scale factor 0.69616\nI1207 14:52:17.504637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14708 > 2) by scale factor 0.931499\nI1207 14:52:21.696841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63151 > 2) by scale factor 0.76002\nI1207 14:52:25.887612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97771 > 2) by scale factor 0.671658\nI1207 14:52:30.078722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3409 > 2) by scale factor 0.854372\nI1207 14:52:34.271595  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66325 > 2) by scale factor 0.545963\nI1207 14:52:38.463543  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05091 > 2) by scale factor 0.493717\nI1207 14:52:42.654731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41579 > 2) by scale factor 0.585517\nI1207 14:52:46.845700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95677 > 2) by scale factor 0.676413\nI1207 14:52:51.036672  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47056 > 2) by scale factor 0.809535\nI1207 14:52:59.417829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77779 > 2) by scale factor 0.719997\nI1207 14:53:03.609266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60662 > 2) by scale factor 0.767277\nI1207 14:53:07.801890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36577 > 2) by scale factor 0.845391\nI1207 14:53:11.992615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12603 > 2) by scale factor 0.94072\nI1207 14:53:16.184650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12185 > 2) by scale factor 0.942572\nI1207 14:53:20.376513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13896 > 2) by scale factor 0.637153\nI1207 14:53:24.568486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05895 > 2) by scale factor 0.971371\nI1207 14:53:28.759909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36966 > 2) by scale factor 0.593532\nI1207 14:53:32.952311  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51105 > 2) by scale factor 0.796478\nI1207 14:53:37.144491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00486 > 2) by scale factor 0.665588\nI1207 14:53:41.336653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67285 > 2) by scale factor 0.544536\nI1207 14:53:45.528239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43706 > 2) by scale factor 0.82066\nI1207 14:53:49.719928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02518 > 2) by scale factor 0.987566\nI1207 14:53:53.911018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14121 > 2) by scale factor 0.636698\nI1207 14:53:58.102032  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20881 > 2) by scale factor 0.623284\nI1207 14:54:02.293829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58856 > 2) by scale factor 0.557327\nI1207 14:54:06.485554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90117 > 2) by scale factor 0.512666\nI1207 14:54:19.057260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07655 > 2) by scale factor 0.963135\nI1207 14:54:31.628751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45902 > 2) by scale factor 0.813331\nI1207 14:54:44.199710  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20755 > 2) by scale factor 0.905981\nI1207 14:54:48.391610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43248 > 2) by scale factor 0.822207\nI1207 14:54:52.583173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68261 > 2) by scale factor 0.745543\nI1207 14:54:56.774538  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18813 > 2) by scale factor 0.627328\nI1207 14:55:00.966307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95098 > 2) by scale factor 0.506203\nI1207 14:55:05.159142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35177 > 2) by scale factor 0.850422\nI1207 14:55:09.351047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87613 > 2) by scale factor 0.695379\nI1207 14:55:13.542194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47888 > 2) by scale factor 0.574898\nI1207 14:55:17.734050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21044 > 2) by scale factor 0.904797\nI1207 14:55:21.925823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05555 > 2) by scale factor 0.972976\nI1207 14:55:26.118095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59518 > 2) by scale factor 0.770659\nI1207 14:55:26.130195  1922 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1207 14:58:04.193125  1922 solver.cpp:404]     Test net output #0: accuracy = 0.208765\nI1207 14:58:04.193353  1922 solver.cpp:404]     Test net output #1: loss = 5.34595 (* 1 = 5.34595 loss)\nI1207 14:58:08.135007  1922 solver.cpp:228] Iteration 5800, loss = 6.33236\nI1207 14:58:08.135053  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 14:58:08.135069  1922 solver.cpp:244]     Train net output #1: loss = 6.33236 (* 1 = 6.33236 loss)\nI1207 14:58:08.378232  1922 sgd_solver.cpp:166] Iteration 5800, lr = 0.87\nI1207 14:58:08.388399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25445 > 2) by scale factor 0.887136\nI1207 14:58:12.581182  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52478 > 2) by scale factor 0.792147\nI1207 14:58:16.774015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64366 > 2) by scale factor 0.756526\nI1207 14:58:29.347456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79375 > 2) by scale factor 0.715883\nI1207 14:58:33.537617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36736 > 2) by scale factor 0.593938\nI1207 14:58:37.729220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57806 > 2) by scale factor 0.775777\nI1207 14:58:41.921921  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91221 > 2) by scale factor 0.686764\nI1207 14:58:46.114145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07808 > 2) by scale factor 0.649755\nI1207 14:58:50.304270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70217 > 2) by scale factor 0.740145\nI1207 14:58:54.495967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67053 > 2) by scale factor 0.748916\nI1207 14:58:58.687228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25333 > 2) by scale factor 0.887576\nI1207 14:59:07.066990  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.711 > 2) by scale factor 0.737734\nI1207 14:59:11.258476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37733 > 2) by scale factor 0.592183\nI1207 14:59:15.450953  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05668 > 2) by scale factor 0.97244\nI1207 14:59:19.642262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77935 > 2) by scale factor 0.719593\nI1207 14:59:23.832625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73932 > 2) by scale factor 0.534856\nI1207 14:59:36.404625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02242 > 2) by scale factor 0.988915\nI1207 14:59:40.597594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02826 > 2) by scale factor 0.986069\nI1207 14:59:44.789294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54026 > 2) by scale factor 0.78732\nI1207 14:59:48.980022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39277 > 2) by scale factor 0.589488\nI1207 14:59:53.171638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69547 > 2) by scale factor 0.741985\nI1207 14:59:57.362458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61046 > 2) by scale factor 0.766149\nI1207 15:00:01.553510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42106 > 2) by scale factor 0.584615\nI1207 15:00:05.745822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61268 > 2) by scale factor 0.765497\nI1207 15:00:09.936012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52338 > 2) by scale factor 0.792589\nI1207 15:00:14.127173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5521 > 2) by scale factor 0.783667\nI1207 15:00:18.317536  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21592 > 2) by scale factor 0.902561\nI1207 15:00:22.508903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37286 > 2) by scale factor 0.842863\nI1207 15:00:26.699139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01054 > 2) by scale factor 0.994757\nI1207 15:00:30.890280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16566 > 2) by scale factor 0.923504\nI1207 15:00:35.081297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52469 > 2) by scale factor 0.442019\nI1207 15:00:39.273700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22862 > 2) by scale factor 0.61946\nI1207 15:00:43.464972  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24467 > 2) by scale factor 0.616395\nI1207 15:00:47.656733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25561 > 2) by scale factor 0.886678\nI1207 15:00:51.847163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56572 > 2) by scale factor 0.779507\nI1207 15:00:56.039225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62545 > 2) by scale factor 0.761774\nI1207 15:01:00.231124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68288 > 2) by scale factor 0.745467\nI1207 15:01:04.422425  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94253 > 2) by scale factor 0.679686\nI1207 15:01:08.612746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08327 > 2) by scale factor 0.960029\nI1207 15:01:16.993700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38622 > 2) by scale factor 0.838147\nI1207 15:01:21.184873  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72092 > 2) by scale factor 0.735045\nI1207 15:01:25.376055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.501 > 2) by scale factor 0.571266\nI1207 15:01:29.567909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42488 > 2) by scale factor 0.824783\nI1207 15:01:33.759474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48692 > 2) by scale factor 0.804208\nI1207 15:01:37.950598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66727 > 2) by scale factor 0.749831\nI1207 15:01:42.141548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20419 > 2) by scale factor 0.624183\nI1207 15:01:46.332525  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14431 > 2) by scale factor 0.48259\nI1207 15:01:50.522754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28326 > 2) by scale factor 0.875941\nI1207 15:02:03.089901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75647 > 2) by scale factor 0.725565\nI1207 15:02:07.281345  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48934 > 2) by scale factor 0.573175\nI1207 15:02:15.663489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93184 > 2) by scale factor 0.682165\nI1207 15:02:19.854972  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63426 > 2) by scale factor 0.550318\nI1207 15:02:40.803417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66118 > 2) by scale factor 0.751548\nI1207 15:02:44.994946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04748 > 2) by scale factor 0.976809\nI1207 15:03:10.133461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97675 > 2) by scale factor 0.671873\nI1207 15:03:14.324810  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17197 > 2) by scale factor 0.920821\nI1207 15:03:18.516338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78851 > 2) by scale factor 0.717229\nI1207 15:03:31.086890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28779 > 2) by scale factor 0.874207\nI1207 15:03:39.468219  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35112 > 2) by scale factor 0.850659\nI1207 15:03:43.659193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35145 > 2) by scale factor 0.596756\nI1207 15:03:47.851702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81008 > 2) by scale factor 0.711723\nI1207 15:03:52.044148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61782 > 2) by scale factor 0.763995\nI1207 15:03:56.234169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07138 > 2) by scale factor 0.965542\nI1207 15:04:00.425727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55717 > 2) by scale factor 0.782114\nI1207 15:04:04.617605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64436 > 2) by scale factor 0.756326\nI1207 15:04:12.996455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04206 > 2) by scale factor 0.979401\nI1207 15:04:17.187985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96322 > 2) by scale factor 0.674942\nI1207 15:04:21.379746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3999 > 2) by scale factor 0.588252\nI1207 15:04:25.570870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88701 > 2) by scale factor 0.514534\nI1207 15:04:29.762410  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81205 > 2) by scale factor 0.711226\nI1207 15:04:33.953836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04886 > 2) by scale factor 0.655984\nI1207 15:04:38.145874  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10475 > 2) by scale factor 0.644175\nI1207 15:04:42.337079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56545 > 2) by scale factor 0.77959\nI1207 15:04:46.527611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7768 > 2) by scale factor 0.529549\nI1207 15:04:50.719786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25981 > 2) by scale factor 0.885029\nI1207 15:04:54.909337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26288 > 2) by scale factor 0.883829\nI1207 15:04:59.101265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55255 > 2) by scale factor 0.783529\nI1207 15:05:03.302225  1922 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1207 15:07:40.859360  1922 solver.cpp:404]     Test net output #0: accuracy = 0.218706\nI1207 15:07:40.859628  1922 solver.cpp:404]     Test net output #1: loss = 4.06979 (* 1 = 4.06979 loss)\nI1207 15:07:44.802202  1922 solver.cpp:228] Iteration 5900, loss = 4.05092\nI1207 15:07:44.802248  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 15:07:44.802265  1922 solver.cpp:244]     Train net output #1: loss = 4.05092 (* 1 = 4.05092 loss)\nI1207 15:07:45.039883  1922 sgd_solver.cpp:166] Iteration 5900, lr = 0.885\nI1207 15:07:45.049962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17914 > 2) by scale factor 0.917795\nI1207 15:07:49.243281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83726 > 2) by scale factor 0.521205\nI1207 15:07:53.436790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00257 > 2) by scale factor 0.666096\nI1207 15:07:57.629083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27828 > 2) by scale factor 0.877856\nI1207 15:08:01.822388  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31945 > 2) by scale factor 0.862274\nI1207 15:08:06.014716  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51919 > 2) by scale factor 0.793904\nI1207 15:08:14.397076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25813 > 2) by scale factor 0.88569\nI1207 15:08:18.589709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40224 > 2) by scale factor 0.587848\nI1207 15:08:22.781860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54443 > 2) by scale factor 0.564266\nI1207 15:08:26.975217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77795 > 2) by scale factor 0.719954\nI1207 15:08:31.167989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58026 > 2) by scale factor 0.775115\nI1207 15:08:35.360893  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20707 > 2) by scale factor 0.906178\nI1207 15:08:43.743475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37036 > 2) by scale factor 0.843754\nI1207 15:08:47.935442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26316 > 2) by scale factor 0.883721\nI1207 15:08:52.128942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15756 > 2) by scale factor 0.6334\nI1207 15:08:56.321419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17578 > 2) by scale factor 0.919212\nI1207 15:09:00.514031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31365 > 2) by scale factor 0.864435\nI1207 15:09:04.706606  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4795 > 2) by scale factor 0.574796\nI1207 15:09:08.898844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00371 > 2) by scale factor 0.998149\nI1207 15:09:13.090756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32719 > 2) by scale factor 0.859405\nI1207 15:09:17.281885  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36881 > 2) by scale factor 0.844304\nI1207 15:09:21.474807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60172 > 2) by scale factor 0.555291\nI1207 15:09:25.666785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14331 > 2) by scale factor 0.933138\nI1207 15:09:29.858234  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7252 > 2) by scale factor 0.733892\nI1207 15:09:34.050648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12998 > 2) by scale factor 0.938978\nI1207 15:09:38.242907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78879 > 2) by scale factor 0.717157\nI1207 15:09:42.435091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64907 > 2) by scale factor 0.754983\nI1207 15:09:46.627370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14233 > 2) by scale factor 0.933563\nI1207 15:09:55.010602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01664 > 2) by scale factor 0.991751\nI1207 15:09:59.203579  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23027 > 2) by scale factor 0.896752\nI1207 15:10:03.396622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13485 > 2) by scale factor 0.637989\nI1207 15:10:11.778573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30637 > 2) by scale factor 0.867164\nI1207 15:10:15.970302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06634 > 2) by scale factor 0.967897\nI1207 15:10:28.543016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00198 > 2) by scale factor 0.999009\nI1207 15:10:32.734956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13017 > 2) by scale factor 0.938892\nI1207 15:10:41.118108  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61944 > 2) by scale factor 0.763522\nI1207 15:10:45.310840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43919 > 2) by scale factor 0.819944\nI1207 15:10:49.504463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06806 > 2) by scale factor 0.96709\nI1207 15:10:53.696759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54354 > 2) by scale factor 0.786306\nI1207 15:10:57.889379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59678 > 2) by scale factor 0.770184\nI1207 15:11:06.271901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18756 > 2) by scale factor 0.914262\nI1207 15:11:10.464090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20506 > 2) by scale factor 0.907005\nI1207 15:11:23.036751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06382 > 2) by scale factor 0.969077\nI1207 15:11:31.418174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77723 > 2) by scale factor 0.720143\nI1207 15:11:35.610364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09804 > 2) by scale factor 0.95327\nI1207 15:11:39.803722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12351 > 2) by scale factor 0.941839\nI1207 15:11:52.379132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24981 > 2) by scale factor 0.888965\nI1207 15:11:56.571198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86433 > 2) by scale factor 0.698245\nI1207 15:12:00.763573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39061 > 2) by scale factor 0.836608\nI1207 15:12:09.145541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6985 > 2) by scale factor 0.741154\nI1207 15:12:13.337908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64714 > 2) by scale factor 0.755533\nI1207 15:12:17.530510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16813 > 2) by scale factor 0.922454\nI1207 15:12:21.721555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80146 > 2) by scale factor 0.713914\nI1207 15:12:25.913125  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29755 > 2) by scale factor 0.606511\nI1207 15:12:30.105392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27563 > 2) by scale factor 0.878876\nI1207 15:12:34.296808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26327 > 2) by scale factor 0.883676\nI1207 15:12:38.489050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76705 > 2) by scale factor 0.530919\nI1207 15:12:42.680968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15265 > 2) by scale factor 0.634387\nI1207 15:12:46.873651  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04019 > 2) by scale factor 0.9803\nI1207 15:12:51.065397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2798 > 2) by scale factor 0.877269\nI1207 15:12:59.448024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65903 > 2) by scale factor 0.752153\nI1207 15:13:07.830468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99454 > 2) by scale factor 0.667882\nI1207 15:13:12.024399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50381 > 2) by scale factor 0.570807\nI1207 15:13:16.215782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3467 > 2) by scale factor 0.85226\nI1207 15:13:20.408673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92143 > 2) by scale factor 0.684595\nI1207 15:13:24.600886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90194 > 2) by scale factor 0.689194\nI1207 15:13:28.793172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76903 > 2) by scale factor 0.722274\nI1207 15:13:32.986125  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71395 > 2) by scale factor 0.736933\nI1207 15:13:37.178375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72426 > 2) by scale factor 0.734143\nI1207 15:13:41.370846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3758 > 2) by scale factor 0.84182\nI1207 15:13:45.562698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44006 > 2) by scale factor 0.819652\nI1207 15:13:49.754020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61539 > 2) by scale factor 0.764704\nI1207 15:13:53.947882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72636 > 2) by scale factor 0.73358\nI1207 15:13:58.141245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76762 > 2) by scale factor 0.722641\nI1207 15:14:02.334137  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79583 > 2) by scale factor 0.715351\nI1207 15:14:14.909116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26672 > 2) by scale factor 0.882332\nI1207 15:14:19.102756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03718 > 2) by scale factor 0.981751\nI1207 15:14:23.294526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69831 > 2) by scale factor 0.741206\nI1207 15:14:27.485827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29766 > 2) by scale factor 0.87045\nI1207 15:14:31.679081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1771 > 2) by scale factor 0.918655\nI1207 15:14:35.871990  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19734 > 2) by scale factor 0.910192\nI1207 15:14:40.065126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92322 > 2) by scale factor 0.684177\nI1207 15:14:40.076977  1922 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1207 15:17:17.792646  1922 solver.cpp:404]     Test net output #0: accuracy = 0.216765\nI1207 15:17:17.792912  1922 solver.cpp:404]     Test net output #1: loss = 6.7811 (* 1 = 6.7811 loss)\nI1207 15:17:21.736512  1922 solver.cpp:228] Iteration 6000, loss = 6.96151\nI1207 15:17:21.736551  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 15:17:21.736567  1922 solver.cpp:244]     Train net output #1: loss = 6.96151 (* 1 = 6.96151 loss)\nI1207 15:17:21.972318  1922 sgd_solver.cpp:166] Iteration 6000, lr = 0.9\nI1207 15:17:21.982419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76142 > 2) by scale factor 0.724265\nI1207 15:17:26.175612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77418 > 2) by scale factor 0.720933\nI1207 15:17:30.368265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19754 > 2) by scale factor 0.910109\nI1207 15:17:34.560720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69792 > 2) by scale factor 0.741311\nI1207 15:17:38.753855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22679 > 2) by scale factor 0.898153\nI1207 15:17:42.946579  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60642 > 2) by scale factor 0.767336\nI1207 15:17:47.138258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09393 > 2) by scale factor 0.488528\nI1207 15:17:51.330703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56019 > 2) by scale factor 0.781191\nI1207 15:17:55.523478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26408 > 2) by scale factor 0.883362\nI1207 15:17:59.714282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87309 > 2) by scale factor 0.696115\nI1207 15:18:03.905416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23686 > 2) by scale factor 0.617883\nI1207 15:18:08.097424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58282 > 2) by scale factor 0.774348\nI1207 15:18:12.290040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22078 > 2) by scale factor 0.900585\nI1207 15:18:16.482254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36031 > 2) by scale factor 0.847345\nI1207 15:18:20.673647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10468 > 2) by scale factor 0.950261\nI1207 15:18:24.865743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51625 > 2) by scale factor 0.794832\nI1207 15:18:33.247089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95319 > 2) by scale factor 0.677233\nI1207 15:18:37.439049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78859 > 2) by scale factor 0.717209\nI1207 15:18:50.011318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51146 > 2) by scale factor 0.796349\nI1207 15:19:02.581079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02383 > 2) by scale factor 0.661412\nI1207 15:19:19.342093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01041 > 2) by scale factor 0.664362\nI1207 15:19:36.101056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78387 > 2) by scale factor 0.718425\nI1207 15:19:44.484303  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40807 > 2) by scale factor 0.586842\nI1207 15:19:48.674933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54284 > 2) by scale factor 0.564519\nI1207 15:19:52.867382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11297 > 2) by scale factor 0.946536\nI1207 15:19:57.059223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6655 > 2) by scale factor 0.75033\nI1207 15:20:01.251220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63737 > 2) by scale factor 0.549848\nI1207 15:20:05.444245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75961 > 2) by scale factor 0.53197\nI1207 15:20:09.636374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44928 > 2) by scale factor 0.816566\nI1207 15:20:13.829210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43798 > 2) by scale factor 0.820351\nI1207 15:20:18.021610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29287 > 2) by scale factor 0.872268\nI1207 15:20:22.213685  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67938 > 2) by scale factor 0.746441\nI1207 15:20:26.404902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76865 > 2) by scale factor 0.722374\nI1207 15:20:30.596612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03538 > 2) by scale factor 0.982616\nI1207 15:20:38.978765  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36153 > 2) by scale factor 0.594966\nI1207 15:20:43.169219  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02238 > 2) by scale factor 0.66173\nI1207 15:20:47.361913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35802 > 2) by scale factor 0.595588\nI1207 15:20:51.554203  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28906 > 2) by scale factor 0.873721\nI1207 15:20:59.935223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18743 > 2) by scale factor 0.914315\nI1207 15:21:04.127600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96319 > 2) by scale factor 0.674949\nI1207 15:21:12.509443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76836 > 2) by scale factor 0.72245\nI1207 15:21:16.701622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81042 > 2) by scale factor 0.711638\nI1207 15:21:20.894129  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79775 > 2) by scale factor 0.71486\nI1207 15:21:25.086001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31649 > 2) by scale factor 0.863374\nI1207 15:21:29.278728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32131 > 2) by scale factor 0.861581\nI1207 15:21:33.470173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99042 > 2) by scale factor 0.668803\nI1207 15:21:37.661689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1334 > 2) by scale factor 0.937472\nI1207 15:21:41.854025  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44122 > 2) by scale factor 0.819264\nI1207 15:21:46.045578  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50289 > 2) by scale factor 0.799078\nI1207 15:21:50.237723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23457 > 2) by scale factor 0.895027\nI1207 15:21:58.620594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04691 > 2) by scale factor 0.977082\nI1207 15:22:02.811394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06082 > 2) by scale factor 0.970487\nI1207 15:22:07.004773  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95027 > 2) by scale factor 0.677903\nI1207 15:22:11.197150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06604 > 2) by scale factor 0.968035\nI1207 15:22:15.388337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60241 > 2) by scale factor 0.768519\nI1207 15:22:23.768342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36318 > 2) by scale factor 0.846318\nI1207 15:22:27.960220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03774 > 2) by scale factor 0.981478\nI1207 15:22:36.340613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.745 > 2) by scale factor 0.728598\nI1207 15:22:40.533589  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00249 > 2) by scale factor 0.998754\nI1207 15:22:44.724943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70357 > 2) by scale factor 0.54002\nI1207 15:22:48.916329  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66903 > 2) by scale factor 0.749337\nI1207 15:22:53.108652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50963 > 2) by scale factor 0.79693\nI1207 15:22:57.300856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37343 > 2) by scale factor 0.842662\nI1207 15:23:01.493248  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10976 > 2) by scale factor 0.947976\nI1207 15:23:05.682996  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8046 > 2) by scale factor 0.713113\nI1207 15:23:09.875641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95088 > 2) by scale factor 0.506217\nI1207 15:23:14.067534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80614 > 2) by scale factor 0.525466\nI1207 15:23:18.259546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24608 > 2) by scale factor 0.616127\nI1207 15:23:22.449669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92203 > 2) by scale factor 0.684457\nI1207 15:23:26.641016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39189 > 2) by scale factor 0.83616\nI1207 15:23:30.832619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72394 > 2) by scale factor 0.734229\nI1207 15:23:35.024631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32314 > 2) by scale factor 0.601841\nI1207 15:23:39.215142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40871 > 2) by scale factor 0.586732\nI1207 15:23:43.406801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46276 > 2) by scale factor 0.812098\nI1207 15:23:47.597393  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38585 > 2) by scale factor 0.838276\nI1207 15:23:51.789077  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82291 > 2) by scale factor 0.70849\nI1207 15:23:55.981904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49924 > 2) by scale factor 0.800243\nI1207 15:24:00.172021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74331 > 2) by scale factor 0.729046\nI1207 15:24:04.364212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92604 > 2) by scale factor 0.683517\nI1207 15:24:08.556690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42075 > 2) by scale factor 0.82619\nI1207 15:24:12.747864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3782 > 2) by scale factor 0.840973\nI1207 15:24:16.939388  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49571 > 2) by scale factor 0.801375\nI1207 15:24:16.951215  1922 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1207 15:26:54.713153  1922 solver.cpp:404]     Test net output #0: accuracy = 0.197294\nI1207 15:26:54.713418  1922 solver.cpp:404]     Test net output #1: loss = 4.99594 (* 1 = 4.99594 loss)\nI1207 15:26:58.656285  1922 solver.cpp:228] Iteration 6100, loss = 5.42131\nI1207 15:26:58.656327  1922 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 15:26:58.656344  1922 solver.cpp:244]     Train net output #1: loss = 5.42131 (* 1 = 5.42131 loss)\nI1207 15:26:58.892889  1922 sgd_solver.cpp:166] Iteration 6100, lr = 0.915\nI1207 15:26:58.902998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78186 > 2) by scale factor 0.718943\nI1207 15:27:03.096266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35996 > 2) by scale factor 0.847471\nI1207 15:27:07.289671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46257 > 2) by scale factor 0.577605\nI1207 15:27:19.864583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74634 > 2) by scale factor 0.728243\nI1207 15:27:24.057376  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19254 > 2) by scale factor 0.626461\nI1207 15:27:28.250504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14351 > 2) by scale factor 0.933047\nI1207 15:27:32.443552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36573 > 2) by scale factor 0.845407\nI1207 15:27:36.636642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25947 > 2) by scale factor 0.885163\nI1207 15:27:40.828444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41804 > 2) by scale factor 0.827117\nI1207 15:27:49.212121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49401 > 2) by scale factor 0.801922\nI1207 15:27:53.406649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68067 > 2) by scale factor 0.543379\nI1207 15:28:01.787667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96094 > 2) by scale factor 0.675461\nI1207 15:28:05.980762  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04402 > 2) by scale factor 0.978464\nI1207 15:28:14.362686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16035 > 2) by scale factor 0.925774\nI1207 15:28:18.555317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98512 > 2) by scale factor 0.501867\nI1207 15:28:22.748256  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45524 > 2) by scale factor 0.814585\nI1207 15:28:26.941212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51526 > 2) by scale factor 0.568948\nI1207 15:28:31.133952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21545 > 2) by scale factor 0.902753\nI1207 15:28:39.515058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87333 > 2) by scale factor 0.696057\nI1207 15:28:43.707830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00107 > 2) by scale factor 0.999466\nI1207 15:28:47.899401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36426 > 2) by scale factor 0.84593\nI1207 15:28:52.091691  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54803 > 2) by scale factor 0.784921\nI1207 15:28:56.284757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23733 > 2) by scale factor 0.893921\nI1207 15:29:00.478283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13731 > 2) by scale factor 0.935755\nI1207 15:29:04.671231  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00992 > 2) by scale factor 0.66447\nI1207 15:29:08.865022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50519 > 2) by scale factor 0.798344\nI1207 15:29:13.057320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35271 > 2) by scale factor 0.850084\nI1207 15:29:17.250917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6534 > 2) by scale factor 0.753749\nI1207 15:29:21.443471  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72092 > 2) by scale factor 0.735046\nI1207 15:29:25.634495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98715 > 2) by scale factor 0.669535\nI1207 15:29:29.826834  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2184 > 2) by scale factor 0.901551\nI1207 15:29:34.020730  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84766 > 2) by scale factor 0.70233\nI1207 15:29:42.405563  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0845 > 2) by scale factor 0.959464\nI1207 15:29:46.597215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67502 > 2) by scale factor 0.747657\nI1207 15:29:50.789971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9234 > 2) by scale factor 0.684136\nI1207 15:29:54.983217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63663 > 2) by scale factor 0.758543\nI1207 15:29:59.175544  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50249 > 2) by scale factor 0.571023\nI1207 15:30:03.367621  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00532 > 2) by scale factor 0.997346\nI1207 15:30:07.560008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59501 > 2) by scale factor 0.770709\nI1207 15:30:11.751828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11654 > 2) by scale factor 0.944939\nI1207 15:30:15.944576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72783 > 2) by scale factor 0.536505\nI1207 15:30:20.136337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31068 > 2) by scale factor 0.865547\nI1207 15:30:28.518357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52329 > 2) by scale factor 0.792617\nI1207 15:30:32.711658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46576 > 2) by scale factor 0.81111\nI1207 15:30:36.904038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57335 > 2) by scale factor 0.559699\nI1207 15:30:41.096101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3455 > 2) by scale factor 0.852696\nI1207 15:30:45.288055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33734 > 2) by scale factor 0.855674\nI1207 15:30:49.479660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16395 > 2) by scale factor 0.632122\nI1207 15:30:53.672089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4108 > 2) by scale factor 0.8296\nI1207 15:30:57.863317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02454 > 2) by scale factor 0.661258\nI1207 15:31:02.056113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73442 > 2) by scale factor 0.731418\nI1207 15:31:06.248886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87255 > 2) by scale factor 0.516456\nI1207 15:31:10.442317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39462 > 2) by scale factor 0.835207\nI1207 15:31:14.634923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25057 > 2) by scale factor 0.615277\nI1207 15:31:18.826961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79381 > 2) by scale factor 0.715867\nI1207 15:31:23.018311  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88295 > 2) by scale factor 0.693734\nI1207 15:31:27.211508  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61593 > 2) by scale factor 0.764547\nI1207 15:31:31.404500  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50389 > 2) by scale factor 0.570795\nI1207 15:31:35.596899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14771 > 2) by scale factor 0.931224\nI1207 15:31:43.980573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87297 > 2) by scale factor 0.696145\nI1207 15:31:48.174315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73751 > 2) by scale factor 0.73059\nI1207 15:31:52.366750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52535 > 2) by scale factor 0.791968\nI1207 15:31:56.557958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28826 > 2) by scale factor 0.874027\nI1207 15:32:00.749402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0974 > 2) by scale factor 0.645703\nI1207 15:32:09.107866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06211 > 2) by scale factor 0.653145\nI1207 15:32:13.277753  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52778 > 2) by scale factor 0.791209\nI1207 15:32:17.447368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5361 > 2) by scale factor 0.788613\nI1207 15:32:21.616782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05419 > 2) by scale factor 0.97362\nI1207 15:32:25.785868  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49058 > 2) by scale factor 0.803026\nI1207 15:32:29.956044  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56562 > 2) by scale factor 0.779539\nI1207 15:32:34.127459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96461 > 2) by scale factor 0.674625\nI1207 15:32:38.297866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53656 > 2) by scale factor 0.565522\nI1207 15:32:42.467852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27271 > 2) by scale factor 0.468087\nI1207 15:32:46.637790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2943 > 2) by scale factor 0.607109\nI1207 15:32:50.808624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26099 > 2) by scale factor 0.613312\nI1207 15:32:54.978399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11531 > 2) by scale factor 0.64199\nI1207 15:32:59.148964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38395 > 2) by scale factor 0.591025\nI1207 15:33:07.486609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27568 > 2) by scale factor 0.878859\nI1207 15:33:11.658416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26463 > 2) by scale factor 0.883147\nI1207 15:33:15.829638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24533 > 2) by scale factor 0.89074\nI1207 15:33:20.000214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24381 > 2) by scale factor 0.616558\nI1207 15:33:24.169814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25655 > 2) by scale factor 0.614146\nI1207 15:33:28.339570  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94474 > 2) by scale factor 0.679177\nI1207 15:33:32.508687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22987 > 2) by scale factor 0.896912\nI1207 15:33:36.678548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97962 > 2) by scale factor 0.671226\nI1207 15:33:40.848465  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86925 > 2) by scale factor 0.697045\nI1207 15:33:45.019439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2743 > 2) by scale factor 0.610818\nI1207 15:33:49.190208  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78094 > 2) by scale factor 0.528969\nI1207 15:33:53.360052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07527 > 2) by scale factor 0.963732\nI1207 15:33:53.371851  1922 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1207 15:36:31.138829  1922 solver.cpp:404]     Test net output #0: accuracy = 0.141471\nI1207 15:36:31.139103  1922 solver.cpp:404]     Test net output #1: loss = 9.04106 (* 1 = 9.04106 loss)\nI1207 15:36:35.081017  1922 solver.cpp:228] Iteration 6200, loss = 9.03386\nI1207 15:36:35.081058  1922 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 15:36:35.081074  1922 solver.cpp:244]     Train net output #1: loss = 9.03386 (* 1 = 9.03386 loss)\nI1207 15:36:35.301370  1922 sgd_solver.cpp:166] Iteration 6200, lr = 0.93\nI1207 15:36:35.311406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37208 > 2) by scale factor 0.593106\nI1207 15:36:39.480839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21697 > 2) by scale factor 0.621702\nI1207 15:36:43.650773  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62876 > 2) by scale factor 0.760815\nI1207 15:36:47.820929  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86931 > 2) by scale factor 0.697033\nI1207 15:36:51.990743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49166 > 2) by scale factor 0.802678\nI1207 15:36:56.160198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53108 > 2) by scale factor 0.441396\nI1207 15:37:00.329931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35697 > 2) by scale factor 0.848547\nI1207 15:37:04.499274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24085 > 2) by scale factor 0.892517\nI1207 15:37:17.002771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74606 > 2) by scale factor 0.728317\nI1207 15:37:21.172480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21519 > 2) by scale factor 0.474475\nI1207 15:37:25.342244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92772 > 2) by scale factor 0.683126\nI1207 15:37:29.510401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03606 > 2) by scale factor 0.982291\nI1207 15:37:33.679993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18126 > 2) by scale factor 0.916901\nI1207 15:37:37.850373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66888 > 2) by scale factor 0.749378\nI1207 15:37:42.018741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14914 > 2) by scale factor 0.930605\nI1207 15:37:46.188256  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38369 > 2) by scale factor 0.839035\nI1207 15:37:50.357353  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34982 > 2) by scale factor 0.85113\nI1207 15:37:54.526782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63611 > 2) by scale factor 0.758693\nI1207 15:37:58.695528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92865 > 2) by scale factor 0.682908\nI1207 15:38:07.032122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49952 > 2) by scale factor 0.800153\nI1207 15:38:11.202358  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84485 > 2) by scale factor 0.520177\nI1207 15:38:15.371431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07093 > 2) by scale factor 0.965751\nI1207 15:38:23.707229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18432 > 2) by scale factor 0.628077\nI1207 15:38:27.876045  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00065 > 2) by scale factor 0.999678\nI1207 15:38:32.045840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11261 > 2) by scale factor 0.946697\nI1207 15:38:36.215970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33386 > 2) by scale factor 0.856949\nI1207 15:38:44.550637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79979 > 2) by scale factor 0.714338\nI1207 15:38:48.719427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16181 > 2) by scale factor 0.632549\nI1207 15:38:52.889019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11668 > 2) by scale factor 0.944875\nI1207 15:38:57.058450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66618 > 2) by scale factor 0.545526\nI1207 15:39:01.227439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96465 > 2) by scale factor 0.674616\nI1207 15:39:05.396813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38654 > 2) by scale factor 0.838033\nI1207 15:39:09.565788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53517 > 2) by scale factor 0.565744\nI1207 15:39:13.735343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65674 > 2) by scale factor 0.752802\nI1207 15:39:17.904850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95885 > 2) by scale factor 0.675939\nI1207 15:39:26.241418  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99031 > 2) by scale factor 0.501214\nI1207 15:39:30.410203  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16015 > 2) by scale factor 0.632881\nI1207 15:39:34.578004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55752 > 2) by scale factor 0.782009\nI1207 15:39:38.745882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72358 > 2) by scale factor 0.734326\nI1207 15:39:42.915119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89306 > 2) by scale factor 0.691309\nI1207 15:39:47.084699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27237 > 2) by scale factor 0.88014\nI1207 15:39:51.253911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72421 > 2) by scale factor 0.734157\nI1207 15:39:55.423713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63387 > 2) by scale factor 0.759338\nI1207 15:39:59.593267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53324 > 2) by scale factor 0.789501\nI1207 15:40:07.930498  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72951 > 2) by scale factor 0.732733\nI1207 15:40:12.100287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99912 > 2) by scale factor 0.50011\nI1207 15:40:16.270002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18213 > 2) by scale factor 0.62851\nI1207 15:40:20.438946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27821 > 2) by scale factor 0.877882\nI1207 15:40:24.607823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65241 > 2) by scale factor 0.754031\nI1207 15:40:32.942910  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65377 > 2) by scale factor 0.547379\nI1207 15:40:49.613780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86314 > 2) by scale factor 0.698533\nI1207 15:40:53.783120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72988 > 2) by scale factor 0.732632\nI1207 15:40:57.953567  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13849 > 2) by scale factor 0.483269\nI1207 15:41:02.123325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30264 > 2) by scale factor 0.605577\nI1207 15:41:06.294126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4658 > 2) by scale factor 0.811094\nI1207 15:41:14.629501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33654 > 2) by scale factor 0.599423\nI1207 15:41:18.798580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75929 > 2) by scale factor 0.532016\nI1207 15:41:22.967710  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64783 > 2) by scale factor 0.755335\nI1207 15:41:27.136891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15574 > 2) by scale factor 0.927757\nI1207 15:41:31.305315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35299 > 2) by scale factor 0.849982\nI1207 15:41:35.474663  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24149 > 2) by scale factor 0.892263\nI1207 15:41:39.642897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33698 > 2) by scale factor 0.461151\nI1207 15:41:43.812144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34029 > 2) by scale factor 0.854593\nI1207 15:41:47.980274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95276 > 2) by scale factor 0.677333\nI1207 15:41:52.151348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85064 > 2) by scale factor 0.519395\nI1207 15:41:56.320374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40981 > 2) by scale factor 0.829942\nI1207 15:42:00.489665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03983 > 2) by scale factor 0.980475\nI1207 15:42:04.650095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0953 > 2) by scale factor 0.646141\nI1207 15:42:08.828838  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19102 > 2) by scale factor 0.626758\nI1207 15:42:13.008795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81298 > 2) by scale factor 0.710989\nI1207 15:42:17.188284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20052 > 2) by scale factor 0.908874\nI1207 15:42:21.367597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03807 > 2) by scale factor 0.981322\nI1207 15:42:25.547091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91483 > 2) by scale factor 0.686146\nI1207 15:42:29.726224  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14021 > 2) by scale factor 0.636901\nI1207 15:42:33.905854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21465 > 2) by scale factor 0.903076\nI1207 15:42:38.085348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88919 > 2) by scale factor 0.692235\nI1207 15:42:42.264616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37396 > 2) by scale factor 0.842475\nI1207 15:42:46.444562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12691 > 2) by scale factor 0.940332\nI1207 15:42:50.623869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31907 > 2) by scale factor 0.602578\nI1207 15:42:54.803494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26933 > 2) by scale factor 0.881318\nI1207 15:43:07.337756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62032 > 2) by scale factor 0.763266\nI1207 15:43:15.694782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08658 > 2) by scale factor 0.647965\nI1207 15:43:19.873349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69746 > 2) by scale factor 0.741439\nI1207 15:43:24.052321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37304 > 2) by scale factor 0.592938\nI1207 15:43:28.232046  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85005 > 2) by scale factor 0.701741\nI1207 15:43:28.243821  1922 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1207 15:46:05.920207  1922 solver.cpp:404]     Test net output #0: accuracy = 0.229471\nI1207 15:46:05.920473  1922 solver.cpp:404]     Test net output #1: loss = 4.48811 (* 1 = 4.48811 loss)\nI1207 15:46:09.860844  1922 solver.cpp:228] Iteration 6300, loss = 4.22288\nI1207 15:46:09.860882  1922 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 15:46:09.860903  1922 solver.cpp:244]     Train net output #1: loss = 4.22288 (* 1 = 4.22288 loss)\nI1207 15:46:10.089078  1922 sgd_solver.cpp:166] Iteration 6300, lr = 0.945\nI1207 15:46:14.277686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.368 > 2) by scale factor 0.844593\nI1207 15:46:18.457988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39255 > 2) by scale factor 0.835929\nI1207 15:46:22.638948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53273 > 2) by scale factor 0.789661\nI1207 15:46:30.998281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64014 > 2) by scale factor 0.757537\nI1207 15:46:35.179082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26189 > 2) by scale factor 0.884216\nI1207 15:46:39.359769  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3029 > 2) by scale factor 0.868469\nI1207 15:46:43.541362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65183 > 2) by scale factor 0.54767\nI1207 15:46:47.723084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06599 > 2) by scale factor 0.968061\nI1207 15:46:51.903898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59188 > 2) by scale factor 0.556812\nI1207 15:46:56.085085  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12539 > 2) by scale factor 0.941003\nI1207 15:47:00.265934  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69855 > 2) by scale factor 0.741139\nI1207 15:47:04.447125  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39653 > 2) by scale factor 0.588836\nI1207 15:47:08.628285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9912 > 2) by scale factor 0.668628\nI1207 15:47:12.808614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9851 > 2) by scale factor 0.50187\nI1207 15:47:16.989162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6486 > 2) by scale factor 0.755115\nI1207 15:47:21.171008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14907 > 2) by scale factor 0.635109\nI1207 15:47:29.530467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66739 > 2) by scale factor 0.749796\nI1207 15:47:33.711066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46106 > 2) by scale factor 0.812658\nI1207 15:47:37.891443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42773 > 2) by scale factor 0.823814\nI1207 15:47:42.073482  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26243 > 2) by scale factor 0.884007\nI1207 15:47:46.254431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10419 > 2) by scale factor 0.64429\nI1207 15:47:50.435163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04704 > 2) by scale factor 0.656374\nI1207 15:47:54.616022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61366 > 2) by scale factor 0.765211\nI1207 15:47:58.796692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59327 > 2) by scale factor 0.771227\nI1207 15:48:02.977371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74701 > 2) by scale factor 0.728064\nI1207 15:48:07.158632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66156 > 2) by scale factor 0.751439\nI1207 15:48:11.338405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14577 > 2) by scale factor 0.482419\nI1207 15:48:19.697710  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41977 > 2) by scale factor 0.826524\nI1207 15:48:23.877748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14552 > 2) by scale factor 0.932175\nI1207 15:48:32.236876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31467 > 2) by scale factor 0.864055\nI1207 15:48:44.772928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16089 > 2) by scale factor 0.925544\nI1207 15:49:14.026073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35545 > 2) by scale factor 0.849093\nI1207 15:49:22.386277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46931 > 2) by scale factor 0.809941\nI1207 15:49:34.921721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79565 > 2) by scale factor 0.715398\nI1207 15:49:39.102478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60425 > 2) by scale factor 0.554901\nI1207 15:49:51.639374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0697 > 2) by scale factor 0.966322\nI1207 15:49:59.998728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20565 > 2) by scale factor 0.906761\nI1207 15:50:04.180050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4347 > 2) by scale factor 0.821456\nI1207 15:50:08.361058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5869 > 2) by scale factor 0.773126\nI1207 15:50:12.541923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00762 > 2) by scale factor 0.996207\nI1207 15:50:16.723867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04745 > 2) by scale factor 0.976824\nI1207 15:50:20.904101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03874 > 2) by scale factor 0.980999\nI1207 15:50:25.085698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34403 > 2) by scale factor 0.85323\nI1207 15:50:29.267725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77894 > 2) by scale factor 0.529249\nI1207 15:50:33.448560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04024 > 2) by scale factor 0.657842\nI1207 15:50:37.629580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52288 > 2) by scale factor 0.792746\nI1207 15:50:41.809597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3378 > 2) by scale factor 0.599196\nI1207 15:50:50.167186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62422 > 2) by scale factor 0.762132\nI1207 15:50:54.349014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1475 > 2) by scale factor 0.635425\nI1207 15:50:58.530273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40463 > 2) by scale factor 0.831727\nI1207 15:51:11.068295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75585 > 2) by scale factor 0.72573\nI1207 15:51:15.248400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87108 > 2) by scale factor 0.696603\nI1207 15:51:27.786556  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61533 > 2) by scale factor 0.764721\nI1207 15:51:31.966866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85083 > 2) by scale factor 0.701549\nI1207 15:51:40.327539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01611 > 2) by scale factor 0.992011\nI1207 15:51:48.686374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32067 > 2) by scale factor 0.861821\nI1207 15:51:52.868234  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39962 > 2) by scale factor 0.833464\nI1207 15:51:57.049597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59633 > 2) by scale factor 0.770319\nI1207 15:52:01.231065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72559 > 2) by scale factor 0.733785\nI1207 15:52:05.412200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08762 > 2) by scale factor 0.95803\nI1207 15:52:09.593163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78101 > 2) by scale factor 0.719162\nI1207 15:52:13.774370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36651 > 2) by scale factor 0.594088\nI1207 15:52:17.954912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04783 > 2) by scale factor 0.656206\nI1207 15:52:22.136569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34552 > 2) by scale factor 0.597815\nI1207 15:52:26.318306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80015 > 2) by scale factor 0.714248\nI1207 15:52:30.499228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1577 > 2) by scale factor 0.633372\nI1207 15:52:34.680394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34218 > 2) by scale factor 0.853905\nI1207 15:52:38.860965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78606 > 2) by scale factor 0.717859\nI1207 15:52:43.042850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31445 > 2) by scale factor 0.864138\nI1207 15:52:47.222720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44929 > 2) by scale factor 0.816564\nI1207 15:52:51.404083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18091 > 2) by scale factor 0.917048\nI1207 15:52:55.585292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86745 > 2) by scale factor 0.697483\nI1207 15:52:59.765173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18701 > 2) by scale factor 0.627548\nI1207 15:53:03.946096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56034 > 2) by scale factor 0.561745\nI1207 15:53:03.957981  1922 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1207 15:55:41.728665  1922 solver.cpp:404]     Test net output #0: accuracy = 0.207588\nI1207 15:55:41.728960  1922 solver.cpp:404]     Test net output #1: loss = 5.12749 (* 1 = 5.12749 loss)\nI1207 15:55:45.670393  1922 solver.cpp:228] Iteration 6400, loss = 4.04679\nI1207 15:55:45.670434  1922 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1207 15:55:45.670451  1922 solver.cpp:244]     Train net output #1: loss = 4.04679 (* 1 = 4.04679 loss)\nI1207 15:55:45.901572  1922 sgd_solver.cpp:166] Iteration 6400, lr = 0.96\nI1207 15:55:45.911731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06312 > 2) by scale factor 0.652928\nI1207 15:55:50.093120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50711 > 2) by scale factor 0.797732\nI1207 15:55:54.273881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62902 > 2) by scale factor 0.76074\nI1207 15:55:58.455981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0357 > 2) by scale factor 0.982465\nI1207 15:56:02.637218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48971 > 2) by scale factor 0.803306\nI1207 15:56:06.817711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78297 > 2) by scale factor 0.718656\nI1207 15:56:10.997105  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89777 > 2) by scale factor 0.690186\nI1207 15:56:23.534344  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4836 > 2) by scale factor 0.805284\nI1207 15:56:27.716580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16054 > 2) by scale factor 0.925695\nI1207 15:56:36.075978  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03674 > 2) by scale factor 0.981964\nI1207 15:56:40.257179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05155 > 2) by scale factor 0.974873\nI1207 15:56:44.438949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44784 > 2) by scale factor 0.817046\nI1207 15:56:48.619179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29441 > 2) by scale factor 0.871682\nI1207 15:56:52.799054  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34663 > 2) by scale factor 0.852286\nI1207 15:56:56.979692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4039 > 2) by scale factor 0.831981\nI1207 15:57:01.160174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12494 > 2) by scale factor 0.941202\nI1207 15:57:05.340047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91753 > 2) by scale factor 0.68551\nI1207 15:57:09.522265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12461 > 2) by scale factor 0.94135\nI1207 15:57:17.881206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49778 > 2) by scale factor 0.800711\nI1207 15:57:22.061951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77562 > 2) by scale factor 0.72056\nI1207 15:57:26.243193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83519 > 2) by scale factor 0.705419\nI1207 15:57:38.781030  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44672 > 2) by scale factor 0.817421\nI1207 15:57:42.961540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82943 > 2) by scale factor 0.706855\nI1207 15:57:51.320549  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14676 > 2) by scale factor 0.635575\nI1207 15:57:55.500659  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16849 > 2) by scale factor 0.922301\nI1207 15:57:59.681665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53992 > 2) by scale factor 0.787427\nI1207 15:58:03.862386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8101 > 2) by scale factor 0.711718\nI1207 15:58:08.042073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06183 > 2) by scale factor 0.653205\nI1207 15:58:16.400051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49444 > 2) by scale factor 0.572338\nI1207 15:58:20.581058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70531 > 2) by scale factor 0.539766\nI1207 15:58:24.761624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8846 > 2) by scale factor 0.693336\nI1207 15:58:28.942049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98227 > 2) by scale factor 0.67063\nI1207 15:58:33.123953  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97343 > 2) by scale factor 0.672624\nI1207 15:58:37.302564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40447 > 2) by scale factor 0.587463\nI1207 15:58:41.481880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2926 > 2) by scale factor 0.87237\nI1207 15:58:45.662423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28966 > 2) by scale factor 0.873491\nI1207 15:58:49.843283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28831 > 2) by scale factor 0.874007\nI1207 15:58:54.023846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98493 > 2) by scale factor 0.670032\nI1207 15:59:02.382293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0169 > 2) by scale factor 0.99162\nI1207 15:59:06.562440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55252 > 2) by scale factor 0.78354\nI1207 15:59:14.920047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27321 > 2) by scale factor 0.879814\nI1207 15:59:23.277863  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84414 > 2) by scale factor 0.7032\nI1207 15:59:31.638116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51573 > 2) by scale factor 0.794998\nI1207 15:59:35.818948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81298 > 2) by scale factor 0.71099\nI1207 15:59:39.999068  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15668 > 2) by scale factor 0.633577\nI1207 15:59:56.715189  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64901 > 2) by scale factor 0.754999\nI1207 16:00:09.252404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85951 > 2) by scale factor 0.699419\nI1207 16:00:13.432086  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64957 > 2) by scale factor 0.75484\nI1207 16:00:17.611800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01382 > 2) by scale factor 0.993135\nI1207 16:00:21.790961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30695 > 2) by scale factor 0.866945\nI1207 16:00:25.971097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82867 > 2) by scale factor 0.707045\nI1207 16:00:34.329413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70411 > 2) by scale factor 0.739615\nI1207 16:00:42.686166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8987 > 2) by scale factor 0.689965\nI1207 16:00:46.865393  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54667 > 2) by scale factor 0.78534\nI1207 16:00:51.046598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94679 > 2) by scale factor 0.678704\nI1207 16:00:55.227483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0854 > 2) by scale factor 0.489548\nI1207 16:00:59.407747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79814 > 2) by scale factor 0.714762\nI1207 16:01:03.587062  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5884 > 2) by scale factor 0.772678\nI1207 16:01:11.944859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04324 > 2) by scale factor 0.978836\nI1207 16:01:16.124310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49644 > 2) by scale factor 0.57201\nI1207 16:01:20.303989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27304 > 2) by scale factor 0.879879\nI1207 16:01:28.662454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70882 > 2) by scale factor 0.738328\nI1207 16:01:37.021369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03903 > 2) by scale factor 0.658104\nI1207 16:01:41.202419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12144 > 2) by scale factor 0.640729\nI1207 16:01:45.382711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31993 > 2) by scale factor 0.862095\nI1207 16:01:53.740962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11987 > 2) by scale factor 0.641052\nI1207 16:01:57.922070  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37115 > 2) by scale factor 0.593269\nI1207 16:02:02.103193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63718 > 2) by scale factor 0.758386\nI1207 16:02:06.283344  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79335 > 2) by scale factor 0.715985\nI1207 16:02:10.463711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20397 > 2) by scale factor 0.907453\nI1207 16:02:14.644487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19004 > 2) by scale factor 0.913224\nI1207 16:02:18.825408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3038 > 2) by scale factor 0.868132\nI1207 16:02:23.006139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36193 > 2) by scale factor 0.846764\nI1207 16:02:27.187141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33596 > 2) by scale factor 0.599527\nI1207 16:02:35.544695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2989 > 2) by scale factor 0.86998\nI1207 16:02:39.734863  1922 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1207 16:05:17.619644  1922 solver.cpp:404]     Test net output #0: accuracy = 0.204412\nI1207 16:05:17.619953  1922 solver.cpp:404]     Test net output #1: loss = 4.34604 (* 1 = 4.34604 loss)\nI1207 16:05:21.562381  1922 solver.cpp:228] Iteration 6500, loss = 4.59617\nI1207 16:05:21.562424  1922 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 16:05:21.562439  1922 solver.cpp:244]     Train net output #1: loss = 4.59617 (* 1 = 4.59617 loss)\nI1207 16:05:21.796797  1922 sgd_solver.cpp:166] Iteration 6500, lr = 0.975\nI1207 16:05:21.806933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80673 > 2) by scale factor 0.712574\nI1207 16:05:25.987821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14714 > 2) by scale factor 0.931472\nI1207 16:05:30.169327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3656 > 2) by scale factor 0.594248\nI1207 16:05:38.530320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48282 > 2) by scale factor 0.574248\nI1207 16:05:42.711460  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20668 > 2) by scale factor 0.906338\nI1207 16:05:51.073516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17055 > 2) by scale factor 0.630805\nI1207 16:05:55.254237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58829 > 2) by scale factor 0.557368\nI1207 16:06:03.613637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94677 > 2) by scale factor 0.67871\nI1207 16:06:07.795554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49104 > 2) by scale factor 0.802878\nI1207 16:06:11.976914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0959 > 2) by scale factor 0.954244\nI1207 16:06:16.159139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97678 > 2) by scale factor 0.671867\nI1207 16:06:24.519196  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70845 > 2) by scale factor 0.539309\nI1207 16:06:28.700328  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15669 > 2) by scale factor 0.927349\nI1207 16:06:32.882576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20506 > 2) by scale factor 0.624013\nI1207 16:06:37.063637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28226 > 2) by scale factor 0.609337\nI1207 16:06:41.244138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67842 > 2) by scale factor 0.746709\nI1207 16:06:45.424402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03078 > 2) by scale factor 0.984843\nI1207 16:06:49.606135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31304 > 2) by scale factor 0.864662\nI1207 16:06:57.965999  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9817 > 2) by scale factor 0.670759\nI1207 16:07:02.146941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40726 > 2) by scale factor 0.830821\nI1207 16:07:06.327833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73225 > 2) by scale factor 0.731997\nI1207 16:07:10.506170  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26596 > 2) by scale factor 0.88263\nI1207 16:07:14.686070  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2183 > 2) by scale factor 0.621446\nI1207 16:07:18.866547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33774 > 2) by scale factor 0.599207\nI1207 16:07:23.046546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14864 > 2) by scale factor 0.930822\nI1207 16:07:27.226904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19504 > 2) by scale factor 0.911147\nI1207 16:07:31.406803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15161 > 2) by scale factor 0.634595\nI1207 16:07:35.587455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64982 > 2) by scale factor 0.547972\nI1207 16:07:39.768609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61104 > 2) by scale factor 0.765977\nI1207 16:07:43.949075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4631 > 2) by scale factor 0.577517\nI1207 16:07:48.130252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51394 > 2) by scale factor 0.795564\nI1207 16:07:52.312949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78276 > 2) by scale factor 0.71871\nI1207 16:07:56.494403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03898 > 2) by scale factor 0.980884\nI1207 16:08:00.675099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27666 > 2) by scale factor 0.878479\nI1207 16:08:04.855644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90097 > 2) by scale factor 0.689426\nI1207 16:08:09.036725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23047 > 2) by scale factor 0.619106\nI1207 16:08:13.217324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22427 > 2) by scale factor 0.89917\nI1207 16:08:17.398573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71728 > 2) by scale factor 0.736031\nI1207 16:08:21.578966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81209 > 2) by scale factor 0.711214\nI1207 16:08:25.760684  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72368 > 2) by scale factor 0.7343\nI1207 16:08:29.941798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22321 > 2) by scale factor 0.899598\nI1207 16:08:34.121852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33881 > 2) by scale factor 0.855135\nI1207 16:08:38.304028  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16904 > 2) by scale factor 0.922068\nI1207 16:08:42.484544  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67833 > 2) by scale factor 0.746735\nI1207 16:08:46.665609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34816 > 2) by scale factor 0.851732\nI1207 16:08:50.846211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46688 > 2) by scale factor 0.576888\nI1207 16:09:03.385653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73629 > 2) by scale factor 0.730917\nI1207 16:09:07.567193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87451 > 2) by scale factor 0.695771\nI1207 16:09:11.748323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.107 > 2) by scale factor 0.949218\nI1207 16:09:24.287428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43644 > 2) by scale factor 0.820869\nI1207 16:09:45.181061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73849 > 2) by scale factor 0.73033\nI1207 16:09:53.540071  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22282 > 2) by scale factor 0.620574\nI1207 16:09:57.720860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42741 > 2) by scale factor 0.823924\nI1207 16:10:01.902231  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64096 > 2) by scale factor 0.757299\nI1207 16:10:06.082263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20556 > 2) by scale factor 0.623916\nI1207 16:10:10.263211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72226 > 2) by scale factor 0.537308\nI1207 16:10:14.445907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05287 > 2) by scale factor 0.974244\nI1207 16:10:18.627151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74715 > 2) by scale factor 0.728027\nI1207 16:10:22.808709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75916 > 2) by scale factor 0.724858\nI1207 16:10:26.990002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72554 > 2) by scale factor 0.7338\nI1207 16:10:31.170601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98928 > 2) by scale factor 0.669056\nI1207 16:10:35.351425  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37368 > 2) by scale factor 0.842573\nI1207 16:10:39.532686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31857 > 2) by scale factor 0.862599\nI1207 16:10:43.713337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53993 > 2) by scale factor 0.564984\nI1207 16:10:47.893792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72272 > 2) by scale factor 0.537242\nI1207 16:10:52.075121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04865 > 2) by scale factor 0.656029\nI1207 16:10:56.257342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22389 > 2) by scale factor 0.899327\nI1207 16:11:00.436821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88888 > 2) by scale factor 0.692309\nI1207 16:11:04.618170  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02132 > 2) by scale factor 0.989451\nI1207 16:11:08.799515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2392 > 2) by scale factor 0.617436\nI1207 16:11:12.979660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09049 > 2) by scale factor 0.488939\nI1207 16:11:17.160887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33215 > 2) by scale factor 0.461664\nI1207 16:11:21.343149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29014 > 2) by scale factor 0.873311\nI1207 16:11:25.524734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64444 > 2) by scale factor 0.756304\nI1207 16:11:29.707072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03055 > 2) by scale factor 0.984956\nI1207 16:11:33.888130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05467 > 2) by scale factor 0.97339\nI1207 16:11:38.069464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58935 > 2) by scale factor 0.772396\nI1207 16:11:42.250344  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64437 > 2) by scale factor 0.756322\nI1207 16:11:50.610031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49721 > 2) by scale factor 0.571885\nI1207 16:11:54.792176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67518 > 2) by scale factor 0.544192\nI1207 16:11:58.973737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03859 > 2) by scale factor 0.981069\nI1207 16:12:07.332862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43973 > 2) by scale factor 0.581441\nI1207 16:12:11.515295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43616 > 2) by scale factor 0.820963\nI1207 16:12:15.695677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19399 > 2) by scale factor 0.91158\nI1207 16:12:15.707579  1922 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1207 16:14:52.348505  1922 solver.cpp:404]     Test net output #0: accuracy = 0.214118\nI1207 16:14:52.348804  1922 solver.cpp:404]     Test net output #1: loss = 6.14739 (* 1 = 6.14739 loss)\nI1207 16:14:56.291210  1922 solver.cpp:228] Iteration 6600, loss = 6.45504\nI1207 16:14:56.291260  1922 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 16:14:56.291276  1922 solver.cpp:244]     Train net output #1: loss = 6.45504 (* 1 = 6.45504 loss)\nI1207 16:14:56.520673  1922 sgd_solver.cpp:166] Iteration 6600, lr = 0.99\nI1207 16:14:56.530820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66097 > 2) by scale factor 0.751607\nI1207 16:15:00.710192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29997 > 2) by scale factor 0.869577\nI1207 16:15:09.067700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81803 > 2) by scale factor 0.523831\nI1207 16:15:13.247953  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77079 > 2) by scale factor 0.721817\nI1207 16:15:17.427373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18429 > 2) by scale factor 0.477979\nI1207 16:15:21.606405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36539 > 2) by scale factor 0.845528\nI1207 16:15:25.786732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90613 > 2) by scale factor 0.688201\nI1207 16:15:29.965802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73957 > 2) by scale factor 0.730042\nI1207 16:15:34.144799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91369 > 2) by scale factor 0.686414\nI1207 16:15:38.324829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6017 > 2) by scale factor 0.768727\nI1207 16:15:42.505511  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81092 > 2) by scale factor 0.71151\nI1207 16:15:46.685185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17909 > 2) by scale factor 0.917813\nI1207 16:15:50.864030  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63212 > 2) by scale factor 0.759843\nI1207 16:15:59.223337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93752 > 2) by scale factor 0.507935\nI1207 16:16:03.404641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06579 > 2) by scale factor 0.96815\nI1207 16:16:15.939688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1345 > 2) by scale factor 0.936986\nI1207 16:16:20.119498  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26148 > 2) by scale factor 0.613218\nI1207 16:16:28.477413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27391 > 2) by scale factor 0.879543\nI1207 16:16:32.656009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06239 > 2) by scale factor 0.969749\nI1207 16:16:36.836124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03993 > 2) by scale factor 0.980424\nI1207 16:16:41.016799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64609 > 2) by scale factor 0.755833\nI1207 16:16:49.373433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59149 > 2) by scale factor 0.556872\nI1207 16:16:53.553735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91858 > 2) by scale factor 0.685265\nI1207 16:17:01.912022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89315 > 2) by scale factor 0.691289\nI1207 16:17:06.092320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19442 > 2) by scale factor 0.911403\nI1207 16:17:10.272581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42527 > 2) by scale factor 0.82465\nI1207 16:17:14.452610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08092 > 2) by scale factor 0.649156\nI1207 16:17:18.633311  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86663 > 2) by scale factor 0.517246\nI1207 16:17:22.812480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68935 > 2) by scale factor 0.743673\nI1207 16:17:26.990448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83951 > 2) by scale factor 0.704347\nI1207 16:17:31.168412  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55159 > 2) by scale factor 0.563129\nI1207 16:17:35.348681  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48136 > 2) by scale factor 0.574489\nI1207 16:17:39.527329  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84381 > 2) by scale factor 0.703283\nI1207 16:17:43.708169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50082 > 2) by scale factor 0.799738\nI1207 16:17:47.888402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28876 > 2) by scale factor 0.873835\nI1207 16:17:52.067258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02266 > 2) by scale factor 0.497183\nI1207 16:17:56.245962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69686 > 2) by scale factor 0.741602\nI1207 16:18:00.425998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87186 > 2) by scale factor 0.696413\nI1207 16:18:04.606201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77748 > 2) by scale factor 0.720076\nI1207 16:18:08.785950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03949 > 2) by scale factor 0.980638\nI1207 16:18:12.965837  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42289 > 2) by scale factor 0.584301\nI1207 16:18:21.323767  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94071 > 2) by scale factor 0.680108\nI1207 16:18:25.504436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09752 > 2) by scale factor 0.953506\nI1207 16:18:29.685675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46236 > 2) by scale factor 0.81223\nI1207 16:18:46.397171  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32298 > 2) by scale factor 0.60187\nI1207 16:18:50.577440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50156 > 2) by scale factor 0.571174\nI1207 16:18:54.757524  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46993 > 2) by scale factor 0.809739\nI1207 16:18:58.937860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68512 > 2) by scale factor 0.744846\nI1207 16:19:03.117926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16888 > 2) by scale factor 0.922136\nI1207 16:19:11.476408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50387 > 2) by scale factor 0.798764\nI1207 16:19:15.655166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01367 > 2) by scale factor 0.663643\nI1207 16:19:24.013664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11511 > 2) by scale factor 0.945577\nI1207 16:19:28.194649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37575 > 2) by scale factor 0.592461\nI1207 16:19:32.375653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65346 > 2) by scale factor 0.547426\nI1207 16:19:36.555616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71155 > 2) by scale factor 0.737584\nI1207 16:19:49.090698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92195 > 2) by scale factor 0.684474\nI1207 16:19:53.271237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91983 > 2) by scale factor 0.684971\nI1207 16:19:57.450860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53988 > 2) by scale factor 0.787439\nI1207 16:20:14.163712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50067 > 2) by scale factor 0.571318\nI1207 16:20:22.519696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54517 > 2) by scale factor 0.785801\nI1207 16:20:26.698947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82623 > 2) by scale factor 0.707656\nI1207 16:20:35.056613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29418 > 2) by scale factor 0.60713\nI1207 16:20:39.237814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5535 > 2) by scale factor 0.783238\nI1207 16:20:43.417794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66747 > 2) by scale factor 0.749775\nI1207 16:20:51.774904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10573 > 2) by scale factor 0.949789\nI1207 16:20:55.953114  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15675 > 2) by scale factor 0.927323\nI1207 16:21:00.132527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82739 > 2) by scale factor 0.707365\nI1207 16:21:04.311617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85305 > 2) by scale factor 0.519069\nI1207 16:21:08.491667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35318 > 2) by scale factor 0.596449\nI1207 16:21:25.204419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21463 > 2) by scale factor 0.903086\nI1207 16:21:29.384201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33158 > 2) by scale factor 0.857786\nI1207 16:21:33.563786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25752 > 2) by scale factor 0.885928\nI1207 16:21:37.744866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7952 > 2) by scale factor 0.715513\nI1207 16:21:50.278301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61493 > 2) by scale factor 0.764838\nI1207 16:21:50.290576  1922 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1207 16:24:28.104849  1922 solver.cpp:404]     Test net output #0: accuracy = 0.197941\nI1207 16:24:28.105166  1922 solver.cpp:404]     Test net output #1: loss = 3.85758 (* 1 = 3.85758 loss)\nI1207 16:24:32.047389  1922 solver.cpp:228] Iteration 6700, loss = 3.53686\nI1207 16:24:32.047437  1922 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 16:24:32.047456  1922 solver.cpp:244]     Train net output #1: loss = 3.53686 (* 1 = 3.53686 loss)\nI1207 16:24:32.276456  1922 sgd_solver.cpp:166] Iteration 6700, lr = 1.005\nI1207 16:24:32.286617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39331 > 2) by scale factor 0.835663\nI1207 16:24:40.647732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24281 > 2) by scale factor 0.891737\nI1207 16:24:44.831029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0888 > 2) by scale factor 0.957487\nI1207 16:24:49.011548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74237 > 2) by scale factor 0.53442\nI1207 16:24:53.192798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16738 > 2) by scale factor 0.922773\nI1207 16:24:57.376015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52184 > 2) by scale factor 0.793071\nI1207 16:25:01.557637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39781 > 2) by scale factor 0.834094\nI1207 16:25:05.739015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20297 > 2) by scale factor 0.907864\nI1207 16:25:09.921802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79041 > 2) by scale factor 0.71674\nI1207 16:25:14.104502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62647 > 2) by scale factor 0.761478\nI1207 16:25:22.464550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57722 > 2) by scale factor 0.559093\nI1207 16:25:30.825567  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89546 > 2) by scale factor 0.690735\nI1207 16:25:39.186166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71887 > 2) by scale factor 0.735599\nI1207 16:25:43.367405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76597 > 2) by scale factor 0.723073\nI1207 16:25:47.548730  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06204 > 2) by scale factor 0.653158\nI1207 16:25:51.730300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11288 > 2) by scale factor 0.946574\nI1207 16:25:55.911772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20104 > 2) by scale factor 0.908661\nI1207 16:26:00.094178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6152 > 2) by scale factor 0.76476\nI1207 16:26:04.275789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94708 > 2) by scale factor 0.678637\nI1207 16:26:08.456374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9234 > 2) by scale factor 0.684134\nI1207 16:26:12.638051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08165 > 2) by scale factor 0.960776\nI1207 16:26:16.819430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05145 > 2) by scale factor 0.655425\nI1207 16:26:20.999563  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03633 > 2) by scale factor 0.65869\nI1207 16:26:25.180555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82698 > 2) by scale factor 0.707468\nI1207 16:26:29.361369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42109 > 2) by scale factor 0.584609\nI1207 16:26:33.542484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10195 > 2) by scale factor 0.951497\nI1207 16:26:37.722491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69427 > 2) by scale factor 0.742316\nI1207 16:26:41.904542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44878 > 2) by scale factor 0.816733\nI1207 16:26:46.086063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44423 > 2) by scale factor 0.818252\nI1207 16:26:54.447157  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15464 > 2) by scale factor 0.92823\nI1207 16:26:58.628226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59139 > 2) by scale factor 0.556888\nI1207 16:27:02.809017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24811 > 2) by scale factor 0.470798\nI1207 16:27:11.171617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20667 > 2) by scale factor 0.906341\nI1207 16:27:15.352015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61672 > 2) by scale factor 0.552988\nI1207 16:27:23.713284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32334 > 2) by scale factor 0.86083\nI1207 16:27:27.894981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03351 > 2) by scale factor 0.495846\nI1207 16:27:32.078011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40484 > 2) by scale factor 0.831656\nI1207 16:27:40.440116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69001 > 2) by scale factor 0.743491\nI1207 16:27:48.802420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31018 > 2) by scale factor 0.865735\nI1207 16:27:52.984674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84738 > 2) by scale factor 0.702401\nI1207 16:27:57.167114  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74473 > 2) by scale factor 0.534083\nI1207 16:28:01.349256  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61892 > 2) by scale factor 0.433002\nI1207 16:28:09.709594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12871 > 2) by scale factor 0.939535\nI1207 16:28:18.070818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80658 > 2) by scale factor 0.712611\nI1207 16:28:22.253000  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73812 > 2) by scale factor 0.730429\nI1207 16:28:26.434527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56175 > 2) by scale factor 0.780717\nI1207 16:28:38.977377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60497 > 2) by scale factor 0.767762\nI1207 16:28:43.160620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02121 > 2) by scale factor 0.497362\nI1207 16:28:47.342145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44361 > 2) by scale factor 0.818461\nI1207 16:28:59.882486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83944 > 2) by scale factor 0.704364\nI1207 16:29:08.243633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.093 > 2) by scale factor 0.955565\nI1207 16:29:20.785693  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44354 > 2) by scale factor 0.818486\nI1207 16:29:33.327520  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8181 > 2) by scale factor 0.709697\nI1207 16:29:37.509274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13926 > 2) by scale factor 0.934901\nI1207 16:29:45.870203  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10352 > 2) by scale factor 0.950786\nI1207 16:29:50.052776  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7625 > 2) by scale factor 0.531562\nI1207 16:29:54.235954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03574 > 2) by scale factor 0.982443\nI1207 16:29:58.417927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22579 > 2) by scale factor 0.620004\nI1207 16:30:02.598772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92187 > 2) by scale factor 0.684493\nI1207 16:30:06.780313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15839 > 2) by scale factor 0.926616\nI1207 16:30:10.962074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1194 > 2) by scale factor 0.943662\nI1207 16:30:15.143575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26429 > 2) by scale factor 0.883277\nI1207 16:30:19.324986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5834 > 2) by scale factor 0.774172\nI1207 16:30:23.506362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08949 > 2) by scale factor 0.957173\nI1207 16:30:27.688724  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62233 > 2) by scale factor 0.76268\nI1207 16:30:31.870717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02652 > 2) by scale factor 0.660824\nI1207 16:30:36.053709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51407 > 2) by scale factor 0.56914\nI1207 16:30:40.237716  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88619 > 2) by scale factor 0.692956\nI1207 16:30:52.780633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48746 > 2) by scale factor 0.804034\nI1207 16:30:56.961854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82657 > 2) by scale factor 0.707573\nI1207 16:31:01.146214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4552 > 2) by scale factor 0.814598\nI1207 16:31:05.328601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10506 > 2) by scale factor 0.644111\nI1207 16:31:09.511242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1334 > 2) by scale factor 0.937471\nI1207 16:31:13.693521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15463 > 2) by scale factor 0.633988\nI1207 16:31:22.056802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22989 > 2) by scale factor 0.896906\nI1207 16:31:26.239187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05899 > 2) by scale factor 0.971348\nI1207 16:31:26.251092  1922 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1207 16:34:03.062754  1922 solver.cpp:404]     Test net output #0: accuracy = 0.225177\nI1207 16:34:03.063093  1922 solver.cpp:404]     Test net output #1: loss = 3.91053 (* 1 = 3.91053 loss)\nI1207 16:34:07.005883  1922 solver.cpp:228] Iteration 6800, loss = 3.7949\nI1207 16:34:07.005939  1922 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1207 16:34:07.005957  1922 solver.cpp:244]     Train net output #1: loss = 3.7949 (* 1 = 3.7949 loss)\nI1207 16:34:07.231875  1922 sgd_solver.cpp:166] Iteration 6800, lr = 1.02\nI1207 16:34:07.241978  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06956 > 2) by scale factor 0.966388\nI1207 16:34:11.423799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84714 > 2) by scale factor 0.519867\nI1207 16:34:15.606667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33579 > 2) by scale factor 0.599558\nI1207 16:34:19.789503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02523 > 2) by scale factor 0.987544\nI1207 16:34:23.970012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64697 > 2) by scale factor 0.755581\nI1207 16:34:28.152307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43268 > 2) by scale factor 0.822139\nI1207 16:34:32.334419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32713 > 2) by scale factor 0.859427\nI1207 16:34:36.515669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34648 > 2) by scale factor 0.460143\nI1207 16:34:40.697429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85532 > 2) by scale factor 0.700448\nI1207 16:34:49.058779  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01254 > 2) by scale factor 0.663893\nI1207 16:34:53.239930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72281 > 2) by scale factor 0.734536\nI1207 16:34:57.421677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47133 > 2) by scale factor 0.809279\nI1207 16:35:05.782627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07017 > 2) by scale factor 0.966106\nI1207 16:35:09.965117  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19783 > 2) by scale factor 0.909987\nI1207 16:35:14.147631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36457 > 2) by scale factor 0.845819\nI1207 16:35:18.327217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26611 > 2) by scale factor 0.882571\nI1207 16:35:22.507628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99693 > 2) by scale factor 0.667349\nI1207 16:35:26.687809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22811 > 2) by scale factor 0.897622\nI1207 16:35:35.047785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55587 > 2) by scale factor 0.782514\nI1207 16:35:39.229053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18815 > 2) by scale factor 0.627323\nI1207 16:35:47.589290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35407 > 2) by scale factor 0.849594\nI1207 16:35:51.771168  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75051 > 2) by scale factor 0.727139\nI1207 16:36:00.131247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53278 > 2) by scale factor 0.789647\nI1207 16:36:04.314334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7695 > 2) by scale factor 0.722151\nI1207 16:36:08.496489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32332 > 2) by scale factor 0.860836\nI1207 16:36:12.677502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30013 > 2) by scale factor 0.606037\nI1207 16:36:21.038625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39208 > 2) by scale factor 0.836092\nI1207 16:36:25.219063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57124 > 2) by scale factor 0.777833\nI1207 16:36:29.401088  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65478 > 2) by scale factor 0.753358\nI1207 16:36:33.582918  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33036 > 2) by scale factor 0.858236\nI1207 16:36:37.763291  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66232 > 2) by scale factor 0.751224\nI1207 16:36:41.944154  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39035 > 2) by scale factor 0.836698\nI1207 16:36:46.125260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14279 > 2) by scale factor 0.933363\nI1207 16:36:58.664819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.187 > 2) by scale factor 0.627549\nI1207 16:37:15.385869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65077 > 2) by scale factor 0.754499\nI1207 16:37:32.104506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44417 > 2) by scale factor 0.818272\nI1207 16:37:36.285718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2831 > 2) by scale factor 0.60918\nI1207 16:37:40.466132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91522 > 2) by scale factor 0.510828\nI1207 16:37:48.825734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26247 > 2) by scale factor 0.883991\nI1207 16:37:53.007185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22562 > 2) by scale factor 0.620036\nI1207 16:37:57.189805  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94825 > 2) by scale factor 0.678368\nI1207 16:38:01.370668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31531 > 2) by scale factor 0.863816\nI1207 16:38:05.551944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09177 > 2) by scale factor 0.95613\nI1207 16:38:18.090427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55668 > 2) by scale factor 0.782264\nI1207 16:38:22.271755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08509 > 2) by scale factor 0.648279\nI1207 16:38:34.812382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29463 > 2) by scale factor 0.8716\nI1207 16:38:38.993034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39073 > 2) by scale factor 0.836565\nI1207 16:38:43.174752  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29014 > 2) by scale factor 0.873307\nI1207 16:38:47.356956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50397 > 2) by scale factor 0.798732\nI1207 16:38:51.537472  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54998 > 2) by scale factor 0.784321\nI1207 16:38:55.718761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76797 > 2) by scale factor 0.722552\nI1207 16:38:59.900251  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26844 > 2) by scale factor 0.611914\nI1207 16:39:04.081714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07489 > 2) by scale factor 0.963904\nI1207 16:39:08.262667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26076 > 2) by scale factor 0.884659\nI1207 16:39:12.444298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15212 > 2) by scale factor 0.929314\nI1207 16:39:16.625970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25839 > 2) by scale factor 0.885587\nI1207 16:39:20.807422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26186 > 2) by scale factor 0.469279\nI1207 16:39:24.988749  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70097 > 2) by scale factor 0.740475\nI1207 16:39:29.168967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18703 > 2) by scale factor 0.477666\nI1207 16:39:33.350263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00967 > 2) by scale factor 0.995191\nI1207 16:39:37.531338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83802 > 2) by scale factor 0.704717\nI1207 16:39:41.712767  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86188 > 2) by scale factor 0.69884\nI1207 16:39:45.894322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1324 > 2) by scale factor 0.638489\nI1207 16:39:50.076206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83108 > 2) by scale factor 0.522045\nI1207 16:39:54.257371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27178 > 2) by scale factor 0.880366\nI1207 16:39:58.438738  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84608 > 2) by scale factor 0.702722\nI1207 16:40:02.619920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3593 > 2) by scale factor 0.84771\nI1207 16:40:06.800951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97693 > 2) by scale factor 0.502901\nI1207 16:40:10.983073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46796 > 2) by scale factor 0.810385\nI1207 16:40:15.165052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03676 > 2) by scale factor 0.658596\nI1207 16:40:19.344849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68666 > 2) by scale factor 0.744418\nI1207 16:40:31.884310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22252 > 2) by scale factor 0.899879\nI1207 16:40:36.064815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22117 > 2) by scale factor 0.620892\nI1207 16:40:40.245004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37132 > 2) by scale factor 0.843411\nI1207 16:40:44.425227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7355 > 2) by scale factor 0.535404\nI1207 16:40:48.605165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70335 > 2) by scale factor 0.739823\nI1207 16:40:52.786867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15297 > 2) by scale factor 0.634323\nI1207 16:40:56.967581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25813 > 2) by scale factor 0.885689\nI1207 16:41:01.148417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56313 > 2) by scale factor 0.561304\nI1207 16:41:01.160382  1922 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1207 16:43:37.769979  1922 solver.cpp:404]     Test net output #0: accuracy = 0.194941\nI1207 16:43:37.770310  1922 solver.cpp:404]     Test net output #1: loss = 5.56701 (* 1 = 5.56701 loss)\nI1207 16:43:41.712875  1922 solver.cpp:228] Iteration 6900, loss = 5.12676\nI1207 16:43:41.712930  1922 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1207 16:43:41.712947  1922 solver.cpp:244]     Train net output #1: loss = 5.12676 (* 1 = 5.12676 loss)\nI1207 16:43:41.935356  1922 sgd_solver.cpp:166] Iteration 6900, lr = 1.035\nI1207 16:43:41.945495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65953 > 2) by scale factor 0.752011\nI1207 16:43:54.478147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46693 > 2) by scale factor 0.810724\nI1207 16:43:58.657990  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04212 > 2) by scale factor 0.657437\nI1207 16:44:02.837910  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34552 > 2) by scale factor 0.852689\nI1207 16:44:07.016106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74183 > 2) by scale factor 0.534497\nI1207 16:44:11.194581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89492 > 2) by scale factor 0.690866\nI1207 16:44:15.373014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77497 > 2) by scale factor 0.720729\nI1207 16:44:19.549334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26081 > 2) by scale factor 0.884639\nI1207 16:44:23.726199  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87512 > 2) by scale factor 0.695623\nI1207 16:44:32.079501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53576 > 2) by scale factor 0.788719\nI1207 16:44:36.257272  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9183 > 2) by scale factor 0.685331\nI1207 16:44:40.434746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43553 > 2) by scale factor 0.821177\nI1207 16:44:44.611562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54242 > 2) by scale factor 0.786653\nI1207 16:44:48.789716  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35151 > 2) by scale factor 0.596746\nI1207 16:44:52.967615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34167 > 2) by scale factor 0.854092\nI1207 16:44:57.146275  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10487 > 2) by scale factor 0.950179\nI1207 16:45:01.324491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00764 > 2) by scale factor 0.664973\nI1207 16:45:05.502763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01194 > 2) by scale factor 0.664024\nI1207 16:45:09.680325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57388 > 2) by scale factor 0.777036\nI1207 16:45:13.857904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65997 > 2) by scale factor 0.751888\nI1207 16:45:18.036814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59218 > 2) by scale factor 0.771551\nI1207 16:45:22.215759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3146 > 2) by scale factor 0.603392\nI1207 16:45:30.567865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87925 > 2) by scale factor 0.694625\nI1207 16:45:38.921900  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06953 > 2) by scale factor 0.966404\nI1207 16:45:43.101136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03754 > 2) by scale factor 0.981577\nI1207 16:45:47.279481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19375 > 2) by scale factor 0.626222\nI1207 16:45:51.457478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5635 > 2) by scale factor 0.561246\nI1207 16:45:55.635737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21006 > 2) by scale factor 0.904952\nI1207 16:45:59.812971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12304 > 2) by scale factor 0.640401\nI1207 16:46:03.990908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75642 > 2) by scale factor 0.725579\nI1207 16:46:08.168581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89116 > 2) by scale factor 0.691764\nI1207 16:46:12.345640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47668 > 2) by scale factor 0.807533\nI1207 16:46:20.697695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38731 > 2) by scale factor 0.837764\nI1207 16:46:24.875190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14289 > 2) by scale factor 0.636357\nI1207 16:46:29.053652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31248 > 2) by scale factor 0.864873\nI1207 16:46:41.583878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87016 > 2) by scale factor 0.516774\nI1207 16:46:45.761215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48877 > 2) by scale factor 0.573268\nI1207 16:46:49.938832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84174 > 2) by scale factor 0.703795\nI1207 16:46:54.117008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45328 > 2) by scale factor 0.815234\nI1207 16:46:58.294801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09078 > 2) by scale factor 0.956582\nI1207 16:47:02.473295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86233 > 2) by scale factor 0.698732\nI1207 16:47:10.828140  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69815 > 2) by scale factor 0.741248\nI1207 16:47:15.006968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92235 > 2) by scale factor 0.68438\nI1207 16:47:19.185907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5573 > 2) by scale factor 0.782075\nI1207 16:47:23.364019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44688 > 2) by scale factor 0.580235\nI1207 16:47:27.542670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11983 > 2) by scale factor 0.485457\nI1207 16:47:31.720742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49359 > 2) by scale factor 0.572476\nI1207 16:47:35.898769  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35879 > 2) by scale factor 0.595453\nI1207 16:47:40.075845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45366 > 2) by scale factor 0.815109\nI1207 16:47:48.429368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22801 > 2) by scale factor 0.897664\nI1207 16:47:56.781237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23821 > 2) by scale factor 0.89357\nI1207 16:48:05.135774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91566 > 2) by scale factor 0.685952\nI1207 16:48:13.489570  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16061 > 2) by scale factor 0.925663\nI1207 16:48:21.843487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9712 > 2) by scale factor 0.673128\nI1207 16:48:26.019922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84619 > 2) by scale factor 0.702695\nI1207 16:48:30.197546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04589 > 2) by scale factor 0.494329\nI1207 16:48:34.375458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25301 > 2) by scale factor 0.887703\nI1207 16:48:38.552916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13234 > 2) by scale factor 0.638501\nI1207 16:48:42.730873  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69793 > 2) by scale factor 0.74131\nI1207 16:48:51.084583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62329 > 2) by scale factor 0.551984\nI1207 16:48:55.263375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23758 > 2) by scale factor 0.893822\nI1207 16:48:59.441540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30692 > 2) by scale factor 0.866958\nI1207 16:49:03.619197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88093 > 2) by scale factor 0.694221\nI1207 16:49:07.797744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89326 > 2) by scale factor 0.691262\nI1207 16:49:11.976747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00998 > 2) by scale factor 0.664456\nI1207 16:49:16.155179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03918 > 2) by scale factor 0.980784\nI1207 16:49:20.332756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3202 > 2) by scale factor 0.861994\nI1207 16:49:24.511759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43367 > 2) by scale factor 0.582467\nI1207 16:49:28.689851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06061 > 2) by scale factor 0.970587\nI1207 16:49:32.867715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45123 > 2) by scale factor 0.815919\nI1207 16:49:37.046774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16349 > 2) by scale factor 0.924431\nI1207 16:49:41.224335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11149 > 2) by scale factor 0.642779\nI1207 16:49:45.400760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01912 > 2) by scale factor 0.990533\nI1207 16:49:49.578667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37275 > 2) by scale factor 0.592987\nI1207 16:49:53.756654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83398 > 2) by scale factor 0.705721\nI1207 16:49:57.933928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45364 > 2) by scale factor 0.5791\nI1207 16:50:02.112618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2907 > 2) by scale factor 0.607773\nI1207 16:50:06.290801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01604 > 2) by scale factor 0.663122\nI1207 16:50:10.469166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87435 > 2) by scale factor 0.516216\nI1207 16:50:18.823297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79471 > 2) by scale factor 0.715637\nI1207 16:50:23.001804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83851 > 2) by scale factor 0.704594\nI1207 16:50:27.180614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64479 > 2) by scale factor 0.756202\nI1207 16:50:35.533993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17887 > 2) by scale factor 0.629154\nI1207 16:50:35.545955  1922 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1207 16:53:12.526732  1922 solver.cpp:404]     Test net output #0: accuracy = 0.215118\nI1207 16:53:12.527087  1922 solver.cpp:404]     Test net output #1: loss = 7.63574 (* 1 = 7.63574 loss)\nI1207 16:53:16.470489  1922 solver.cpp:228] Iteration 7000, loss = 7.20032\nI1207 16:53:16.470540  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 16:53:16.470556  1922 solver.cpp:244]     Train net output #1: loss = 7.20032 (* 1 = 7.20032 loss)\nI1207 16:53:16.698529  1922 sgd_solver.cpp:166] Iteration 7000, lr = 1.05\nI1207 16:53:16.708665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40959 > 2) by scale factor 0.830015\nI1207 16:53:20.886749  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34713 > 2) by scale factor 0.852105\nI1207 16:53:29.242417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91416 > 2) by scale factor 0.686305\nI1207 16:53:33.421483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64286 > 2) by scale factor 0.549019\nI1207 16:53:37.600186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12217 > 2) by scale factor 0.64058\nI1207 16:53:41.778028  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04427 > 2) by scale factor 0.494527\nI1207 16:53:54.308663  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32065 > 2) by scale factor 0.602291\nI1207 16:54:02.660917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4589 > 2) by scale factor 0.813371\nI1207 16:54:06.837610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78611 > 2) by scale factor 0.717845\nI1207 16:54:11.015161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07155 > 2) by scale factor 0.651137\nI1207 16:54:15.193570  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11854 > 2) by scale factor 0.944048\nI1207 16:54:19.371767  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73619 > 2) by scale factor 0.535304\nI1207 16:54:23.550853  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35248 > 2) by scale factor 0.596574\nI1207 16:54:27.729092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24466 > 2) by scale factor 0.47118\nI1207 16:54:31.907907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18206 > 2) by scale factor 0.628523\nI1207 16:54:36.085878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07084 > 2) by scale factor 0.491299\nI1207 16:54:40.264124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.126 > 2) by scale factor 0.484731\nI1207 16:54:44.442461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66434 > 2) by scale factor 0.545801\nI1207 16:54:48.619891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69359 > 2) by scale factor 0.541479\nI1207 16:54:52.799758  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53252 > 2) by scale factor 0.789728\nI1207 16:54:56.978019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26221 > 2) by scale factor 0.613081\nI1207 16:55:01.156280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65589 > 2) by scale factor 0.753044\nI1207 16:55:05.335868  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74089 > 2) by scale factor 0.729689\nI1207 16:55:09.513950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17486 > 2) by scale factor 0.629949\nI1207 16:55:13.690877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74928 > 2) by scale factor 0.727464\nI1207 16:55:17.869168  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28572 > 2) by scale factor 0.874997\nI1207 16:55:22.047161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29291 > 2) by scale factor 0.872252\nI1207 16:55:26.225813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04972 > 2) by scale factor 0.975744\nI1207 16:55:30.403483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42939 > 2) by scale factor 0.583195\nI1207 16:55:34.581574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99721 > 2) by scale factor 0.667286\nI1207 16:55:38.760068  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26235 > 2) by scale factor 0.884037\nI1207 16:55:42.937721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4169 > 2) by scale factor 0.827508\nI1207 16:55:47.115965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27429 > 2) by scale factor 0.879395\nI1207 16:55:51.293704  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55884 > 2) by scale factor 0.561981\nI1207 16:55:55.472645  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12066 > 2) by scale factor 0.64089\nI1207 16:56:03.824318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28547 > 2) by scale factor 0.875094\nI1207 16:56:08.002379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71174 > 2) by scale factor 0.737533\nI1207 16:56:12.181002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.572 > 2) by scale factor 0.777604\nI1207 16:56:16.359397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91461 > 2) by scale factor 0.686198\nI1207 16:56:20.536229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63567 > 2) by scale factor 0.758819\nI1207 16:56:24.713860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74366 > 2) by scale factor 0.728953\nI1207 16:56:28.891602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81145 > 2) by scale factor 0.711376\nI1207 16:56:33.068363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.408 > 2) by scale factor 0.830563\nI1207 16:56:37.245971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81247 > 2) by scale factor 0.711119\nI1207 16:56:41.424183  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20615 > 2) by scale factor 0.906558\nI1207 16:56:45.603004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96664 > 2) by scale factor 0.674163\nI1207 16:56:53.958097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38334 > 2) by scale factor 0.591132\nI1207 16:56:58.137146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0896 > 2) by scale factor 0.647334\nI1207 16:57:02.315230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52414 > 2) by scale factor 0.792349\nI1207 16:57:06.492486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3632 > 2) by scale factor 0.84631\nI1207 16:57:10.670384  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46317 > 2) by scale factor 0.811963\nI1207 16:57:14.848553  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70606 > 2) by scale factor 0.539657\nI1207 16:57:23.204286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15833 > 2) by scale factor 0.926642\nI1207 16:57:27.382604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6849 > 2) by scale factor 0.744907\nI1207 16:57:31.561295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22964 > 2) by scale factor 0.619264\nI1207 16:57:35.739403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98416 > 2) by scale factor 0.501988\nI1207 16:57:56.622457  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59354 > 2) by scale factor 0.771148\nI1207 16:58:00.801568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18734 > 2) by scale factor 0.914353\nI1207 16:58:04.979987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3504 > 2) by scale factor 0.850919\nI1207 16:58:09.158640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31072 > 2) by scale factor 0.604097\nI1207 16:58:13.335903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15113 > 2) by scale factor 0.929745\nI1207 16:58:21.688344  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20685 > 2) by scale factor 0.623664\nI1207 16:58:25.866575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00879 > 2) by scale factor 0.498904\nI1207 16:58:30.044919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30762 > 2) by scale factor 0.604664\nI1207 16:58:34.222930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10767 > 2) by scale factor 0.643569\nI1207 16:58:38.401732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5692 > 2) by scale factor 0.778451\nI1207 16:58:46.753479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88584 > 2) by scale factor 0.693039\nI1207 16:58:50.931227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16763 > 2) by scale factor 0.631387\nI1207 16:58:55.109340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44864 > 2) by scale factor 0.579938\nI1207 16:58:59.286348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05199 > 2) by scale factor 0.655309\nI1207 16:59:03.465463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80908 > 2) by scale factor 0.711977\nI1207 16:59:07.644263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50094 > 2) by scale factor 0.799698\nI1207 16:59:11.821066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76253 > 2) by scale factor 0.723974\nI1207 16:59:15.999301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01305 > 2) by scale factor 0.993519\nI1207 16:59:20.176966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55458 > 2) by scale factor 0.782908\nI1207 16:59:24.354022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21419 > 2) by scale factor 0.903263\nI1207 16:59:28.532063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5684 > 2) by scale factor 0.560475\nI1207 16:59:32.709267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48923 > 2) by scale factor 0.80346\nI1207 16:59:36.886626  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60316 > 2) by scale factor 0.768297\nI1207 16:59:45.241268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83969 > 2) by scale factor 0.704303\nI1207 16:59:49.419188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35526 > 2) by scale factor 0.849164\nI1207 16:59:53.597358  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38244 > 2) by scale factor 0.591289\nI1207 16:59:57.775606  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09462 > 2) by scale factor 0.954828\nI1207 17:00:01.953488  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01228 > 2) by scale factor 0.993896\nI1207 17:00:06.130362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7927 > 2) by scale factor 0.716153\nI1207 17:00:10.306964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00013 > 2) by scale factor 0.999933\nI1207 17:00:10.319176  1922 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1207 17:02:47.858955  1922 solver.cpp:404]     Test net output #0: accuracy = 0.275059\nI1207 17:02:47.859287  1922 solver.cpp:404]     Test net output #1: loss = 3.80167 (* 1 = 3.80167 loss)\nI1207 17:02:51.801870  1922 solver.cpp:228] Iteration 7100, loss = 3.71705\nI1207 17:02:51.801926  1922 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 17:02:51.801944  1922 solver.cpp:244]     Train net output #1: loss = 3.71705 (* 1 = 3.71705 loss)\nI1207 17:02:52.026311  1922 sgd_solver.cpp:166] Iteration 7100, lr = 1.065\nI1207 17:02:56.213691  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31582 > 2) by scale factor 0.60317\nI1207 17:03:00.393183  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13181 > 2) by scale factor 0.938169\nI1207 17:03:04.572295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29389 > 2) by scale factor 0.871882\nI1207 17:03:12.928341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70873 > 2) by scale factor 0.738353\nI1207 17:03:17.107142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46217 > 2) by scale factor 0.81229\nI1207 17:03:21.286255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03217 > 2) by scale factor 0.659593\nI1207 17:03:25.465984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75178 > 2) by scale factor 0.726802\nI1207 17:03:29.646266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82737 > 2) by scale factor 0.707372\nI1207 17:03:33.824616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02769 > 2) by scale factor 0.986344\nI1207 17:03:38.003516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43402 > 2) by scale factor 0.582407\nI1207 17:03:42.182268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51719 > 2) by scale factor 0.568636\nI1207 17:03:46.360188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62558 > 2) by scale factor 0.761738\nI1207 17:03:50.538262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52294 > 2) by scale factor 0.792726\nI1207 17:03:54.717092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80311 > 2) by scale factor 0.713494\nI1207 17:03:58.897800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19796 > 2) by scale factor 0.909936\nI1207 17:04:03.076468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29831 > 2) by scale factor 0.465299\nI1207 17:04:07.254273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5871 > 2) by scale factor 0.773067\nI1207 17:04:11.433202  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34696 > 2) by scale factor 0.597557\nI1207 17:04:15.611865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46584 > 2) by scale factor 0.577061\nI1207 17:04:19.790300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58529 > 2) by scale factor 0.557835\nI1207 17:04:23.969238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72173 > 2) by scale factor 0.734827\nI1207 17:04:32.324111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1944 > 2) by scale factor 0.626095\nI1207 17:04:36.502785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66155 > 2) by scale factor 0.546217\nI1207 17:04:44.858816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08686 > 2) by scale factor 0.647908\nI1207 17:04:49.037487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03258 > 2) by scale factor 0.983969\nI1207 17:04:53.216001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69227 > 2) by scale factor 0.742867\nI1207 17:04:57.394760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91484 > 2) by scale factor 0.686145\nI1207 17:05:01.573720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05673 > 2) by scale factor 0.493008\nI1207 17:05:05.752913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20831 > 2) by scale factor 0.90567\nI1207 17:05:09.932178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65602 > 2) by scale factor 0.753005\nI1207 17:05:14.109261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30064 > 2) by scale factor 0.869322\nI1207 17:05:18.288061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94074 > 2) by scale factor 0.507519\nI1207 17:05:22.465425  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05527 > 2) by scale factor 0.654607\nI1207 17:05:26.644335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06945 > 2) by scale factor 0.651582\nI1207 17:05:34.998518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46262 > 2) by scale factor 0.812144\nI1207 17:05:39.177757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79733 > 2) by scale factor 0.714969\nI1207 17:05:43.356854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30043 > 2) by scale factor 0.605982\nI1207 17:05:47.535444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24182 > 2) by scale factor 0.616937\nI1207 17:05:51.713811  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42887 > 2) by scale factor 0.823428\nI1207 17:05:55.890869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77411 > 2) by scale factor 0.720952\nI1207 17:06:00.068158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5974 > 2) by scale factor 0.770001\nI1207 17:06:04.245296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06521 > 2) by scale factor 0.652483\nI1207 17:06:08.422680  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85945 > 2) by scale factor 0.518209\nI1207 17:06:12.601339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61998 > 2) by scale factor 0.763364\nI1207 17:06:16.780751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81673 > 2) by scale factor 0.524008\nI1207 17:06:20.959601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49735 > 2) by scale factor 0.800849\nI1207 17:06:25.139356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65151 > 2) by scale factor 0.754287\nI1207 17:06:29.319074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47913 > 2) by scale factor 0.806736\nI1207 17:06:37.673398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03308 > 2) by scale factor 0.659395\nI1207 17:06:41.850033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24404 > 2) by scale factor 0.891248\nI1207 17:06:46.027555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28681 > 2) by scale factor 0.466548\nI1207 17:06:54.383878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49097 > 2) by scale factor 0.802902\nI1207 17:06:58.563596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01463 > 2) by scale factor 0.992739\nI1207 17:07:06.920217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62072 > 2) by scale factor 0.76315\nI1207 17:07:11.100095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0082 > 2) by scale factor 0.995919\nI1207 17:07:15.279477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00721 > 2) by scale factor 0.665069\nI1207 17:07:19.457973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1874 > 2) by scale factor 0.914326\nI1207 17:07:23.637253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05276 > 2) by scale factor 0.655145\nI1207 17:07:27.816684  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4876 > 2) by scale factor 0.57346\nI1207 17:07:31.996454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43297 > 2) by scale factor 0.582585\nI1207 17:07:36.175649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30296 > 2) by scale factor 0.868447\nI1207 17:07:44.531399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00485 > 2) by scale factor 0.66559\nI1207 17:07:48.710202  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7195 > 2) by scale factor 0.735428\nI1207 17:07:52.888968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64428 > 2) by scale factor 0.75635\nI1207 17:08:01.245378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14 > 2) by scale factor 0.934578\nI1207 17:08:05.424286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19908 > 2) by scale factor 0.909473\nI1207 17:08:13.779727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.084 > 2) by scale factor 0.648509\nI1207 17:08:17.958115  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4639 > 2) by scale factor 0.811721\nI1207 17:08:22.137485  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44577 > 2) by scale factor 0.817738\nI1207 17:08:26.315904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55755 > 2) by scale factor 0.781999\nI1207 17:08:30.494448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19962 > 2) by scale factor 0.909247\nI1207 17:08:34.673089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47522 > 2) by scale factor 0.575504\nI1207 17:08:38.851421  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7044 > 2) by scale factor 0.539899\nI1207 17:08:47.206641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0077 > 2) by scale factor 0.996166\nI1207 17:08:51.385756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72888 > 2) by scale factor 0.732901\nI1207 17:08:55.563922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33398 > 2) by scale factor 0.599883\nI1207 17:08:59.743068  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66951 > 2) by scale factor 0.749202\nI1207 17:09:03.922622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99033 > 2) by scale factor 0.668824\nI1207 17:09:08.101824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8439 > 2) by scale factor 0.70326\nI1207 17:09:12.279929  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49405 > 2) by scale factor 0.801909\nI1207 17:09:16.459224  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05481 > 2) by scale factor 0.973328\nI1207 17:09:20.638661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49688 > 2) by scale factor 0.801001\nI1207 17:09:24.817199  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01298 > 2) by scale factor 0.663794\nI1207 17:09:28.995489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94691 > 2) by scale factor 0.678676\nI1207 17:09:33.174051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28693 > 2) by scale factor 0.608471\nI1207 17:09:37.353469  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75986 > 2) by scale factor 0.724674\nI1207 17:09:41.532263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30421 > 2) by scale factor 0.867978\nI1207 17:09:45.711318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05191 > 2) by scale factor 0.655327\nI1207 17:09:45.723209  1922 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1207 17:12:22.900665  1922 solver.cpp:404]     Test net output #0: accuracy = 0.198235\nI1207 17:12:22.900993  1922 solver.cpp:404]     Test net output #1: loss = 4.9368 (* 1 = 4.9368 loss)\nI1207 17:12:26.844666  1922 solver.cpp:228] Iteration 7200, loss = 4.37396\nI1207 17:12:26.844710  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 17:12:26.844727  1922 solver.cpp:244]     Train net output #1: loss = 4.37396 (* 1 = 4.37396 loss)\nI1207 17:12:27.065644  1922 sgd_solver.cpp:166] Iteration 7200, lr = 1.08\nI1207 17:12:27.075805  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5271 > 2) by scale factor 0.791422\nI1207 17:12:31.253921  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45546 > 2) by scale factor 0.814511\nI1207 17:12:35.432339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11286 > 2) by scale factor 0.946583\nI1207 17:12:39.610290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02819 > 2) by scale factor 0.986099\nI1207 17:12:47.965399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01865 > 2) by scale factor 0.497679\nI1207 17:12:52.142781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82348 > 2) by scale factor 0.708345\nI1207 17:12:56.321024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40548 > 2) by scale factor 0.831434\nI1207 17:13:00.499816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13782 > 2) by scale factor 0.935533\nI1207 17:13:04.678427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23047 > 2) by scale factor 0.89667\nI1207 17:13:08.857162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.689 > 2) by scale factor 0.74377\nI1207 17:13:21.386591  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53072 > 2) by scale factor 0.790289\nI1207 17:13:29.740533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03684 > 2) by scale factor 0.981912\nI1207 17:13:33.919147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83627 > 2) by scale factor 0.70515\nI1207 17:13:38.096019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4736 > 2) by scale factor 0.808539\nI1207 17:13:42.272939  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70372 > 2) by scale factor 0.739721\nI1207 17:13:46.449944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26475 > 2) by scale factor 0.612604\nI1207 17:13:50.627130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4728 > 2) by scale factor 0.808799\nI1207 17:13:54.806162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34037 > 2) by scale factor 0.598737\nI1207 17:13:58.983320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64043 > 2) by scale factor 0.549385\nI1207 17:14:03.161129  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05781 > 2) by scale factor 0.492876\nI1207 17:14:07.338820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03733 > 2) by scale factor 0.981675\nI1207 17:14:11.515089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43429 > 2) by scale factor 0.582362\nI1207 17:14:15.692657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82127 > 2) by scale factor 0.7089\nI1207 17:14:19.870518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88568 > 2) by scale factor 0.51471\nI1207 17:14:24.048951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10224 > 2) by scale factor 0.644695\nI1207 17:14:28.227059  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38488 > 2) by scale factor 0.838618\nI1207 17:14:32.405197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77939 > 2) by scale factor 0.719583\nI1207 17:14:36.583128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61463 > 2) by scale factor 0.553307\nI1207 17:14:40.760625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72824 > 2) by scale factor 0.733074\nI1207 17:14:44.937436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94837 > 2) by scale factor 0.678341\nI1207 17:14:49.115757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31902 > 2) by scale factor 0.862432\nI1207 17:14:53.294942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70403 > 2) by scale factor 0.539952\nI1207 17:14:57.473052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91739 > 2) by scale factor 0.685544\nI1207 17:15:01.653340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22692 > 2) by scale factor 0.898102\nI1207 17:15:05.832283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9186 > 2) by scale factor 0.685261\nI1207 17:15:14.187023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37112 > 2) by scale factor 0.843485\nI1207 17:15:18.365316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39709 > 2) by scale factor 0.834346\nI1207 17:15:22.542346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41559 > 2) by scale factor 0.827957\nI1207 17:15:26.719794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09687 > 2) by scale factor 0.645813\nI1207 17:15:30.897727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93989 > 2) by scale factor 0.680298\nI1207 17:15:35.077258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05631 > 2) by scale factor 0.972615\nI1207 17:15:39.255872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78183 > 2) by scale factor 0.528845\nI1207 17:15:43.434701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46265 > 2) by scale factor 0.812132\nI1207 17:15:47.614220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36301 > 2) by scale factor 0.846377\nI1207 17:15:51.792215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87412 > 2) by scale factor 0.695865\nI1207 17:15:55.969909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81762 > 2) by scale factor 0.709819\nI1207 17:16:00.148566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13226 > 2) by scale factor 0.638517\nI1207 17:16:04.325731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13362 > 2) by scale factor 0.937373\nI1207 17:16:08.503870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87112 > 2) by scale factor 0.696591\nI1207 17:16:12.681794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07552 > 2) by scale factor 0.963615\nI1207 17:16:16.860196  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39966 > 2) by scale factor 0.588295\nI1207 17:16:21.038158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56822 > 2) by scale factor 0.560503\nI1207 17:16:25.216836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87193 > 2) by scale factor 0.516538\nI1207 17:16:29.394596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63086 > 2) by scale factor 0.550834\nI1207 17:16:33.572448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83115 > 2) by scale factor 0.706428\nI1207 17:16:37.750177  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75611 > 2) by scale factor 0.725661\nI1207 17:16:46.102622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39268 > 2) by scale factor 0.835883\nI1207 17:16:50.280783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5912 > 2) by scale factor 0.771843\nI1207 17:16:54.458432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73979 > 2) by scale factor 0.729984\nI1207 17:16:58.635643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6821 > 2) by scale factor 0.745685\nI1207 17:17:02.813812  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48785 > 2) by scale factor 0.803907\nI1207 17:17:06.991830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58008 > 2) by scale factor 0.77517\nI1207 17:17:11.168875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59917 > 2) by scale factor 0.769477\nI1207 17:17:15.346421  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05307 > 2) by scale factor 0.655079\nI1207 17:17:19.523195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10697 > 2) by scale factor 0.94923\nI1207 17:17:23.701067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55957 > 2) by scale factor 0.781381\nI1207 17:17:27.880949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74369 > 2) by scale factor 0.728947\nI1207 17:17:32.059516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36421 > 2) by scale factor 0.458273\nI1207 17:17:36.237447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96962 > 2) by scale factor 0.503826\nI1207 17:17:48.765318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96491 > 2) by scale factor 0.674556\nI1207 17:17:52.942778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04535 > 2) by scale factor 0.977827\nI1207 17:17:57.121029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10218 > 2) by scale factor 0.951392\nI1207 17:18:05.473858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14743 > 2) by scale factor 0.931347\nI1207 17:18:09.651792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10312 > 2) by scale factor 0.950968\nI1207 17:18:18.006608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68638 > 2) by scale factor 0.744497\nI1207 17:18:22.182916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06253 > 2) by scale factor 0.969684\nI1207 17:18:26.360091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03615 > 2) by scale factor 0.982245\nI1207 17:18:30.536777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50601 > 2) by scale factor 0.798083\nI1207 17:18:34.713627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80056 > 2) by scale factor 0.526238\nI1207 17:18:38.890954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63956 > 2) by scale factor 0.757701\nI1207 17:18:43.067946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17388 > 2) by scale factor 0.920012\nI1207 17:18:47.246608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50561 > 2) by scale factor 0.798209\nI1207 17:18:51.423841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65236 > 2) by scale factor 0.754046\nI1207 17:18:55.601964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22724 > 2) by scale factor 0.897973\nI1207 17:19:08.129932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12183 > 2) by scale factor 0.485221\nI1207 17:19:12.306689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96832 > 2) by scale factor 0.673781\nI1207 17:19:16.484724  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52593 > 2) by scale factor 0.791787\nI1207 17:19:20.663437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05642 > 2) by scale factor 0.654361\nI1207 17:19:20.675302  1922 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1207 17:21:56.956746  1922 solver.cpp:404]     Test net output #0: accuracy = 0.199353\nI1207 17:21:56.957041  1922 solver.cpp:404]     Test net output #1: loss = 6.56806 (* 1 = 6.56806 loss)\nI1207 17:22:00.900918  1922 solver.cpp:228] Iteration 7300, loss = 5.38134\nI1207 17:22:00.900962  1922 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 17:22:00.900980  1922 solver.cpp:244]     Train net output #1: loss = 5.38134 (* 1 = 5.38134 loss)\nI1207 17:22:01.121843  1922 sgd_solver.cpp:166] Iteration 7300, lr = 1.095\nI1207 17:22:01.131974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25186 > 2) by scale factor 0.888154\nI1207 17:22:05.310701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02457 > 2) by scale factor 0.661252\nI1207 17:22:09.490008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36765 > 2) by scale factor 0.84472\nI1207 17:22:13.667213  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63278 > 2) by scale factor 0.759653\nI1207 17:22:17.845491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79133 > 2) by scale factor 0.716505\nI1207 17:22:22.022938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60767 > 2) by scale factor 0.766968\nI1207 17:22:26.201526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29505 > 2) by scale factor 0.871442\nI1207 17:22:30.378777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16435 > 2) by scale factor 0.480267\nI1207 17:22:34.557373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28753 > 2) by scale factor 0.60836\nI1207 17:22:38.734035  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9762 > 2) by scale factor 0.671997\nI1207 17:22:47.087241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19258 > 2) by scale factor 0.912166\nI1207 17:22:51.266288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38146 > 2) by scale factor 0.591461\nI1207 17:22:55.444147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30821 > 2) by scale factor 0.866472\nI1207 17:22:59.621659  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86603 > 2) by scale factor 0.697828\nI1207 17:23:03.799532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89769 > 2) by scale factor 0.690204\nI1207 17:23:12.152544  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53689 > 2) by scale factor 0.788368\nI1207 17:23:20.504981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80603 > 2) by scale factor 0.712752\nI1207 17:23:24.684123  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.89837 > 2) by scale factor 0.408299\nI1207 17:23:28.862721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31085 > 2) by scale factor 0.865483\nI1207 17:23:33.041865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9322 > 2) by scale factor 0.682083\nI1207 17:23:37.219610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00439 > 2) by scale factor 0.997809\nI1207 17:23:41.398128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80353 > 2) by scale factor 0.713388\nI1207 17:23:45.576460  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.437 > 2) by scale factor 0.581903\nI1207 17:23:49.753598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20657 > 2) by scale factor 0.906385\nI1207 17:23:53.929986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34571 > 2) by scale factor 0.852619\nI1207 17:23:58.107642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07574 > 2) by scale factor 0.963512\nI1207 17:24:02.285707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29209 > 2) by scale factor 0.465974\nI1207 17:24:06.463502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26075 > 2) by scale factor 0.884663\nI1207 17:24:14.817919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84076 > 2) by scale factor 0.704037\nI1207 17:24:18.995187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89937 > 2) by scale factor 0.689804\nI1207 17:24:23.172785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75509 > 2) by scale factor 0.725929\nI1207 17:24:27.349329  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86553 > 2) by scale factor 0.697951\nI1207 17:24:31.526443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8512 > 2) by scale factor 0.701458\nI1207 17:24:35.704038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02546 > 2) by scale factor 0.661056\nI1207 17:24:39.882565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91462 > 2) by scale factor 0.686196\nI1207 17:24:44.060335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42766 > 2) by scale factor 0.583488\nI1207 17:24:48.237927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77366 > 2) by scale factor 0.72107\nI1207 17:24:56.590301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63101 > 2) by scale factor 0.760164\nI1207 17:25:00.768008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10217 > 2) by scale factor 0.64471\nI1207 17:25:04.945952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16433 > 2) by scale factor 0.632045\nI1207 17:25:13.298521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78474 > 2) by scale factor 0.718201\nI1207 17:25:17.477043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97788 > 2) by scale factor 0.671618\nI1207 17:25:21.654798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54937 > 2) by scale factor 0.784506\nI1207 17:25:25.832449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65244 > 2) by scale factor 0.754024\nI1207 17:25:30.010532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4192 > 2) by scale factor 0.826719\nI1207 17:25:46.715433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32365 > 2) by scale factor 0.860715\nI1207 17:25:50.895467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50199 > 2) by scale factor 0.799363\nI1207 17:25:55.074856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30478 > 2) by scale factor 0.867763\nI1207 17:26:07.604286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19301 > 2) by scale factor 0.911988\nI1207 17:26:11.781744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42057 > 2) by scale factor 0.584698\nI1207 17:26:15.959280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63249 > 2) by scale factor 0.759737\nI1207 17:26:24.312475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18159 > 2) by scale factor 0.478287\nI1207 17:26:28.490718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12005 > 2) by scale factor 0.943373\nI1207 17:26:32.668371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17484 > 2) by scale factor 0.919609\nI1207 17:26:36.845628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0434 > 2) by scale factor 0.978761\nI1207 17:26:41.023624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15044 > 2) by scale factor 0.634831\nI1207 17:26:45.199528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3531 > 2) by scale factor 0.596464\nI1207 17:26:49.377285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75142 > 2) by scale factor 0.420927\nI1207 17:26:53.555851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35301 > 2) by scale factor 0.849973\nI1207 17:26:57.733513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32618 > 2) by scale factor 0.85978\nI1207 17:27:01.910763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30306 > 2) by scale factor 0.6055\nI1207 17:27:06.088357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49576 > 2) by scale factor 0.572121\nI1207 17:27:10.265326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00046 > 2) by scale factor 0.666564\nI1207 17:27:14.443481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40013 > 2) by scale factor 0.454532\nI1207 17:27:18.620218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69361 > 2) by scale factor 0.541475\nI1207 17:27:22.797942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55544 > 2) by scale factor 0.562519\nI1207 17:27:26.974396  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27569 > 2) by scale factor 0.878855\nI1207 17:27:31.152622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38471 > 2) by scale factor 0.838676\nI1207 17:27:35.330600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1359 > 2) by scale factor 0.936374\nI1207 17:27:39.508535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33975 > 2) by scale factor 0.598847\nI1207 17:27:43.686774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56081 > 2) by scale factor 0.56167\nI1207 17:27:47.865242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56657 > 2) by scale factor 0.560763\nI1207 17:27:52.043301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87758 > 2) by scale factor 0.515785\nI1207 17:28:04.570736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95049 > 2) by scale factor 0.506266\nI1207 17:28:08.747656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23614 > 2) by scale factor 0.472128\nI1207 17:28:12.926429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19296 > 2) by scale factor 0.912009\nI1207 17:28:21.280661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3794 > 2) by scale factor 0.591822\nI1207 17:28:25.458505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62604 > 2) by scale factor 0.761604\nI1207 17:28:33.811132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49077 > 2) by scale factor 0.57294\nI1207 17:28:46.340323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50601 > 2) by scale factor 0.570449\nI1207 17:28:50.518314  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20233 > 2) by scale factor 0.624545\nI1207 17:28:54.696938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32109 > 2) by scale factor 0.861665\nI1207 17:28:54.708783  1922 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1207 17:31:32.009035  1922 solver.cpp:404]     Test net output #0: accuracy = 0.142824\nI1207 17:31:32.009356  1922 solver.cpp:404]     Test net output #1: loss = 8.41889 (* 1 = 8.41889 loss)\nI1207 17:31:35.951429  1922 solver.cpp:228] Iteration 7400, loss = 9.62065\nI1207 17:31:35.951470  1922 solver.cpp:244]     Train net output #0: accuracy = 0.0823529\nI1207 17:31:35.951486  1922 solver.cpp:244]     Train net output #1: loss = 9.62065 (* 1 = 9.62065 loss)\nI1207 17:31:36.180516  1922 sgd_solver.cpp:166] Iteration 7400, lr = 1.11\nI1207 17:31:36.190282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30248 > 2) by scale factor 0.464848\nI1207 17:31:40.368144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05303 > 2) by scale factor 0.655088\nI1207 17:31:44.548095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94945 > 2) by scale factor 0.678092\nI1207 17:31:48.727943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3732 > 2) by scale factor 0.592909\nI1207 17:31:52.907444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59689 > 2) by scale factor 0.770151\nI1207 17:31:57.087509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71843 > 2) by scale factor 0.73572\nI1207 17:32:01.266814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14981 > 2) by scale factor 0.930313\nI1207 17:32:05.446645  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09566 > 2) by scale factor 0.646066\nI1207 17:32:09.627174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46416 > 2) by scale factor 0.577341\nI1207 17:32:13.806586  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56142 > 2) by scale factor 0.780818\nI1207 17:32:22.163306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1675 > 2) by scale factor 0.631412\nI1207 17:32:26.342888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73791 > 2) by scale factor 0.730483\nI1207 17:32:30.522729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75988 > 2) by scale factor 0.72467\nI1207 17:32:34.702615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16313 > 2) by scale factor 0.924588\nI1207 17:32:38.881431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23018 > 2) by scale factor 0.472793\nI1207 17:32:47.235941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98883 > 2) by scale factor 0.669159\nI1207 17:32:55.593107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5298 > 2) by scale factor 0.790575\nI1207 17:32:59.772483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62912 > 2) by scale factor 0.760711\nI1207 17:33:03.952039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37927 > 2) by scale factor 0.591843\nI1207 17:33:08.130887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8054 > 2) by scale factor 0.712912\nI1207 17:33:12.309756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72249 > 2) by scale factor 0.734622\nI1207 17:33:16.489408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61206 > 2) by scale factor 0.765679\nI1207 17:33:20.668130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21927 > 2) by scale factor 0.901199\nI1207 17:33:24.845762  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06725 > 2) by scale factor 0.652051\nI1207 17:33:29.025025  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19794 > 2) by scale factor 0.909945\nI1207 17:33:33.204349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67598 > 2) by scale factor 0.74739\nI1207 17:33:37.383278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06498 > 2) by scale factor 0.968534\nI1207 17:33:41.562527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41852 > 2) by scale factor 0.585048\nI1207 17:33:49.918689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45505 > 2) by scale factor 0.814646\nI1207 17:33:54.098287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04572 > 2) by scale factor 0.977651\nI1207 17:34:02.454840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6023 > 2) by scale factor 0.768552\nI1207 17:34:06.634572  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93626 > 2) by scale factor 0.681138\nI1207 17:34:10.813585  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54927 > 2) by scale factor 0.784539\nI1207 17:34:14.991837  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40856 > 2) by scale factor 0.830372\nI1207 17:34:19.170327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23692 > 2) by scale factor 0.894088\nI1207 17:34:23.351187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91089 > 2) by scale factor 0.687076\nI1207 17:34:27.531430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68245 > 2) by scale factor 0.745588\nI1207 17:34:31.709249  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.207 > 2) by scale factor 0.623635\nI1207 17:34:35.888126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03451 > 2) by scale factor 0.983037\nI1207 17:34:40.068327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91573 > 2) by scale factor 0.685935\nI1207 17:34:44.247671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7712 > 2) by scale factor 0.530336\nI1207 17:34:48.426961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98661 > 2) by scale factor 0.669657\nI1207 17:34:52.605851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23619 > 2) by scale factor 0.894379\nI1207 17:34:56.786492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41429 > 2) by scale factor 0.828401\nI1207 17:35:00.966605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28642 > 2) by scale factor 0.874729\nI1207 17:35:05.145186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43853 > 2) by scale factor 0.820166\nI1207 17:35:09.324772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18647 > 2) by scale factor 0.914718\nI1207 17:35:13.502571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55868 > 2) by scale factor 0.781654\nI1207 17:35:17.681159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49655 > 2) by scale factor 0.801104\nI1207 17:35:26.037225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45628 > 2) by scale factor 0.578658\nI1207 17:35:30.217384  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20453 > 2) by scale factor 0.624117\nI1207 17:35:34.396960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77963 > 2) by scale factor 0.71952\nI1207 17:35:38.576607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02071 > 2) by scale factor 0.989753\nI1207 17:35:42.755825  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0425 > 2) by scale factor 0.979191\nI1207 17:35:46.935652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98278 > 2) by scale factor 0.502162\nI1207 17:35:51.114321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6533 > 2) by scale factor 0.75378\nI1207 17:35:55.292412  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48081 > 2) by scale factor 0.806189\nI1207 17:35:59.470149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73537 > 2) by scale factor 0.731161\nI1207 17:36:03.649401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4815 > 2) by scale factor 0.805965\nI1207 17:36:07.828368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57786 > 2) by scale factor 0.775839\nI1207 17:36:16.185497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5017 > 2) by scale factor 0.571151\nI1207 17:36:20.364395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92099 > 2) by scale factor 0.510076\nI1207 17:36:24.543417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18399 > 2) by scale factor 0.915754\nI1207 17:36:28.722947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80693 > 2) by scale factor 0.712522\nI1207 17:36:32.902056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83254 > 2) by scale factor 0.70608\nI1207 17:36:37.080945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77848 > 2) by scale factor 0.719818\nI1207 17:36:41.259923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65816 > 2) by scale factor 0.752401\nI1207 17:36:45.438241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28662 > 2) by scale factor 0.874653\nI1207 17:36:49.617624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58033 > 2) by scale factor 0.775095\nI1207 17:36:53.796159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69755 > 2) by scale factor 0.741413\nI1207 17:36:57.975221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07755 > 2) by scale factor 0.962674\nI1207 17:37:02.155390  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32426 > 2) by scale factor 0.86049\nI1207 17:37:06.335326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03181 > 2) by scale factor 0.659672\nI1207 17:37:10.513523  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43022 > 2) by scale factor 0.583053\nI1207 17:37:14.692791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33581 > 2) by scale factor 0.856233\nI1207 17:37:27.225705  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21504 > 2) by scale factor 0.902919\nI1207 17:37:31.404953  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67741 > 2) by scale factor 0.746992\nI1207 17:37:35.583369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66803 > 2) by scale factor 0.749616\nI1207 17:37:39.762933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10206 > 2) by scale factor 0.48756\nI1207 17:37:43.940279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63298 > 2) by scale factor 0.759596\nI1207 17:37:48.119886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28428 > 2) by scale factor 0.875551\nI1207 17:37:56.475653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25523 > 2) by scale factor 0.886829\nI1207 17:38:00.653973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28298 > 2) by scale factor 0.609202\nI1207 17:38:04.833051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2155 > 2) by scale factor 0.902731\nI1207 17:38:09.011348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63892 > 2) by scale factor 0.757886\nI1207 17:38:13.190228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11157 > 2) by scale factor 0.947162\nI1207 17:38:17.369375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31274 > 2) by scale factor 0.60373\nI1207 17:38:21.547997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72486 > 2) by scale factor 0.733983\nI1207 17:38:29.902755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32099 > 2) by scale factor 0.8617\nI1207 17:38:29.914588  1922 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1207 17:41:08.078779  1922 solver.cpp:404]     Test net output #0: accuracy = 0.220529\nI1207 17:41:08.079126  1922 solver.cpp:404]     Test net output #1: loss = 6.67208 (* 1 = 6.67208 loss)\nI1207 17:41:12.022307  1922 solver.cpp:228] Iteration 7500, loss = 7.31126\nI1207 17:41:12.022343  1922 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 17:41:12.022359  1922 solver.cpp:244]     Train net output #1: loss = 7.31126 (* 1 = 7.31126 loss)\nI1207 17:41:12.243860  1922 sgd_solver.cpp:166] Iteration 7500, lr = 1.125\nI1207 17:41:12.254034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29358 > 2) by scale factor 0.871999\nI1207 17:41:16.433147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54921 > 2) by scale factor 0.784558\nI1207 17:41:20.612665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67916 > 2) by scale factor 0.746502\nI1207 17:41:24.792120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41922 > 2) by scale factor 0.584929\nI1207 17:41:28.972364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03421 > 2) by scale factor 0.49576\nI1207 17:41:33.151867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90178 > 2) by scale factor 0.689232\nI1207 17:41:37.332111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75238 > 2) by scale factor 0.726644\nI1207 17:41:41.513111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5142 > 2) by scale factor 0.795483\nI1207 17:41:45.692948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7149 > 2) by scale factor 0.736676\nI1207 17:41:54.049338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61305 > 2) by scale factor 0.76539\nI1207 17:41:58.228432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33379 > 2) by scale factor 0.46149\nI1207 17:42:02.407445  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95989 > 2) by scale factor 0.505064\nI1207 17:42:06.586922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.529 > 2) by scale factor 0.790827\nI1207 17:42:10.768071  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93511 > 2) by scale factor 0.681405\nI1207 17:42:14.946563  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97929 > 2) by scale factor 0.671302\nI1207 17:42:19.126480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09294 > 2) by scale factor 0.955595\nI1207 17:42:23.304976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61603 > 2) by scale factor 0.764518\nI1207 17:42:31.661494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08056 > 2) by scale factor 0.961278\nI1207 17:42:35.839974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04142 > 2) by scale factor 0.979708\nI1207 17:42:40.019320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47532 > 2) by scale factor 0.807976\nI1207 17:42:44.199126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57448 > 2) by scale factor 0.559522\nI1207 17:42:48.378324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16519 > 2) by scale factor 0.631874\nI1207 17:42:52.558066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7698 > 2) by scale factor 0.530532\nI1207 17:42:56.737401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1568 > 2) by scale factor 0.927301\nI1207 17:43:05.092875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50836 > 2) by scale factor 0.797332\nI1207 17:43:09.271162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6986 > 2) by scale factor 0.741124\nI1207 17:43:25.979678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02855 > 2) by scale factor 0.985928\nI1207 17:43:30.160398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21609 > 2) by scale factor 0.621874\nI1207 17:43:34.339509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69667 > 2) by scale factor 0.741655\nI1207 17:43:38.518662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4894 > 2) by scale factor 0.803408\nI1207 17:43:46.873692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16573 > 2) by scale factor 0.923475\nI1207 17:43:51.053058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58466 > 2) by scale factor 0.557934\nI1207 17:44:03.587512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0374 > 2) by scale factor 0.981641\nI1207 17:44:07.765617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4511 > 2) by scale factor 0.815961\nI1207 17:44:16.122136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13488 > 2) by scale factor 0.93682\nI1207 17:44:24.478534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42432 > 2) by scale factor 0.584057\nI1207 17:44:28.657030  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03423 > 2) by scale factor 0.983175\nI1207 17:44:37.012872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68577 > 2) by scale factor 0.744666\nI1207 17:44:41.192587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08542 > 2) by scale factor 0.648209\nI1207 17:44:45.371842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23118 > 2) by scale factor 0.896388\nI1207 17:44:49.551451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62743 > 2) by scale factor 0.551354\nI1207 17:44:53.730370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26465 > 2) by scale factor 0.883138\nI1207 17:45:02.085881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57863 > 2) by scale factor 0.558874\nI1207 17:45:06.264804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31762 > 2) by scale factor 0.862953\nI1207 17:45:10.442400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24638 > 2) by scale factor 0.61607\nI1207 17:45:18.798058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00346 > 2) by scale factor 0.998275\nI1207 17:45:22.976591  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34052 > 2) by scale factor 0.598709\nI1207 17:45:27.155745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40026 > 2) by scale factor 0.833241\nI1207 17:45:31.334364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77208 > 2) by scale factor 0.721479\nI1207 17:45:35.513025  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80504 > 2) by scale factor 0.713003\nI1207 17:45:39.690793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23563 > 2) by scale factor 0.618119\nI1207 17:45:43.869891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30801 > 2) by scale factor 0.464251\nI1207 17:45:48.049667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48141 > 2) by scale factor 0.805993\nI1207 17:45:52.228444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53089 > 2) by scale factor 0.56643\nI1207 17:46:00.584326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93204 > 2) by scale factor 0.68212\nI1207 17:46:04.763617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64411 > 2) by scale factor 0.756398\nI1207 17:46:08.942005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83659 > 2) by scale factor 0.705073\nI1207 17:46:13.119968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95428 > 2) by scale factor 0.676984\nI1207 17:46:17.298959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60857 > 2) by scale factor 0.766705\nI1207 17:46:21.477794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90184 > 2) by scale factor 0.689217\nI1207 17:46:25.655877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45236 > 2) by scale factor 0.579314\nI1207 17:46:29.835300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09944 > 2) by scale factor 0.645279\nI1207 17:46:34.014880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99033 > 2) by scale factor 0.668822\nI1207 17:46:38.193534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6088 > 2) by scale factor 0.766636\nI1207 17:46:42.372475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43111 > 2) by scale factor 0.82267\nI1207 17:46:46.552294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0734 > 2) by scale factor 0.49099\nI1207 17:46:50.730475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22719 > 2) by scale factor 0.897994\nI1207 17:46:54.908454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35711 > 2) by scale factor 0.848496\nI1207 17:46:59.085218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62423 > 2) by scale factor 0.762129\nI1207 17:47:03.264016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24789 > 2) by scale factor 0.615784\nI1207 17:47:07.443099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76493 > 2) by scale factor 0.723347\nI1207 17:47:11.621539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49911 > 2) by scale factor 0.800285\nI1207 17:47:15.800197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04075 > 2) by scale factor 0.980033\nI1207 17:47:24.156615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84652 > 2) by scale factor 0.51995\nI1207 17:47:28.335433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08557 > 2) by scale factor 0.958972\nI1207 17:47:32.514936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8782 > 2) by scale factor 0.515704\nI1207 17:47:40.869359  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33316 > 2) by scale factor 0.857205\nI1207 17:47:49.225061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48774 > 2) by scale factor 0.803941\nI1207 17:47:53.404173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45137 > 2) by scale factor 0.815871\nI1207 17:47:57.583504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27401 > 2) by scale factor 0.610872\nI1207 17:48:01.763618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00193 > 2) by scale factor 0.499759\nI1207 17:48:05.943353  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03746 > 2) by scale factor 0.658446\nI1207 17:48:05.955173  1922 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1207 17:50:44.156385  1922 solver.cpp:404]     Test net output #0: accuracy = 0.221941\nI1207 17:50:44.156714  1922 solver.cpp:404]     Test net output #1: loss = 4.75868 (* 1 = 4.75868 loss)\nI1207 17:50:48.098850  1922 solver.cpp:228] Iteration 7600, loss = 4.97611\nI1207 17:50:48.098887  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 17:50:48.098907  1922 solver.cpp:244]     Train net output #1: loss = 4.97611 (* 1 = 4.97611 loss)\nI1207 17:50:48.329707  1922 sgd_solver.cpp:166] Iteration 7600, lr = 1.14\nI1207 17:50:52.518005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19976 > 2) by scale factor 0.909191\nI1207 17:50:56.699174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37086 > 2) by scale factor 0.843576\nI1207 17:51:00.880398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75668 > 2) by scale factor 0.532386\nI1207 17:51:09.240026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42099 > 2) by scale factor 0.82611\nI1207 17:51:13.420303  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60511 > 2) by scale factor 0.767722\nI1207 17:51:17.601222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85029 > 2) by scale factor 0.701684\nI1207 17:51:21.782058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27625 > 2) by scale factor 0.878638\nI1207 17:51:25.962369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78165 > 2) by scale factor 0.718998\nI1207 17:51:30.144625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72286 > 2) by scale factor 0.537222\nI1207 17:51:34.325217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79239 > 2) by scale factor 0.716232\nI1207 17:51:42.684424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18945 > 2) by scale factor 0.913472\nI1207 17:51:46.864389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44329 > 2) by scale factor 0.580839\nI1207 17:51:51.045647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4025 > 2) by scale factor 0.587803\nI1207 17:51:55.229307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08031 > 2) by scale factor 0.393677\nI1207 17:51:59.412818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13341 > 2) by scale factor 0.638283\nI1207 17:52:03.592880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36651 > 2) by scale factor 0.458032\nI1207 17:52:07.774283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13146 > 2) by scale factor 0.638679\nI1207 17:52:16.133431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83284 > 2) by scale factor 0.706005\nI1207 17:52:20.315330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66102 > 2) by scale factor 0.751593\nI1207 17:52:24.496227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16712 > 2) by scale factor 0.922884\nI1207 17:52:28.677182  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45922 > 2) by scale factor 0.813266\nI1207 17:52:32.859452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82614 > 2) by scale factor 0.52272\nI1207 17:52:37.040546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99109 > 2) by scale factor 0.668652\nI1207 17:52:41.221153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68313 > 2) by scale factor 0.745399\nI1207 17:52:45.401808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54329 > 2) by scale factor 0.786383\nI1207 17:52:49.583753  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68204 > 2) by scale factor 0.543178\nI1207 17:52:53.766476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57923 > 2) by scale factor 0.775425\nI1207 17:52:57.949010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35661 > 2) by scale factor 0.848677\nI1207 17:53:02.130530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29094 > 2) by scale factor 0.873005\nI1207 17:53:06.310771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58383 > 2) by scale factor 0.774045\nI1207 17:53:10.491945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97642 > 2) by scale factor 0.671948\nI1207 17:53:14.675009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55232 > 2) by scale factor 0.783602\nI1207 17:53:18.855180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58005 > 2) by scale factor 0.775177\nI1207 17:53:23.037133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52449 > 2) by scale factor 0.792241\nI1207 17:53:27.219530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1221 > 2) by scale factor 0.942461\nI1207 17:53:35.579015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26788 > 2) by scale factor 0.881881\nI1207 17:53:39.760012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24385 > 2) by scale factor 0.891325\nI1207 17:53:43.940989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86623 > 2) by scale factor 0.697781\nI1207 17:53:52.301514  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34171 > 2) by scale factor 0.598497\nI1207 17:53:56.482049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71332 > 2) by scale factor 0.737105\nI1207 17:54:00.660961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11307 > 2) by scale factor 0.946491\nI1207 17:54:04.841902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35772 > 2) by scale factor 0.848279\nI1207 17:54:09.023228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42129 > 2) by scale factor 0.584574\nI1207 17:54:13.203240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1729 > 2) by scale factor 0.479284\nI1207 17:54:17.384760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29781 > 2) by scale factor 0.606463\nI1207 17:54:21.566103  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78058 > 2) by scale factor 0.719274\nI1207 17:54:25.746587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10929 > 2) by scale factor 0.643235\nI1207 17:54:29.928225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33035 > 2) by scale factor 0.85824\nI1207 17:54:38.286907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9274 > 2) by scale factor 0.509242\nI1207 17:54:42.468031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04667 > 2) by scale factor 0.977195\nI1207 17:54:46.649588  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98262 > 2) by scale factor 0.670551\nI1207 17:54:50.830087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08804 > 2) by scale factor 0.647659\nI1207 17:54:55.011956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40194 > 2) by scale factor 0.587901\nI1207 17:54:59.193569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28671 > 2) by scale factor 0.874618\nI1207 17:55:07.553539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9159 > 2) by scale factor 0.685896\nI1207 17:55:11.732887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00085 > 2) by scale factor 0.666477\nI1207 17:55:20.091876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10426 > 2) by scale factor 0.644275\nI1207 17:55:24.274621  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8456 > 2) by scale factor 0.702838\nI1207 17:55:28.455338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82921 > 2) by scale factor 0.522301\nI1207 17:55:32.635426  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44788 > 2) by scale factor 0.817034\nI1207 17:55:36.816467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97466 > 2) by scale factor 0.672346\nI1207 17:55:40.996517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43332 > 2) by scale factor 0.821923\nI1207 17:55:45.178217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24792 > 2) by scale factor 0.615779\nI1207 17:55:49.360067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38834 > 2) by scale factor 0.8374\nI1207 17:55:53.541590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16325 > 2) by scale factor 0.632261\nI1207 17:55:57.721582  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13183 > 2) by scale factor 0.638604\nI1207 17:56:01.902629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0066 > 2) by scale factor 0.996709\nI1207 17:56:10.261144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13181 > 2) by scale factor 0.938171\nI1207 17:56:14.442440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42489 > 2) by scale factor 0.82478\nI1207 17:56:18.622336  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80934 > 2) by scale factor 0.711912\nI1207 17:56:22.803633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80118 > 2) by scale factor 0.526152\nI1207 17:56:35.340910  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35371 > 2) by scale factor 0.849724\nI1207 17:56:43.701284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09874 > 2) by scale factor 0.952953\nI1207 17:56:52.061422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65276 > 2) by scale factor 0.753932\nI1207 17:56:56.242722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85261 > 2) by scale factor 0.701111\nI1207 17:57:00.424265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55125 > 2) by scale factor 0.783931\nI1207 17:57:04.604876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8683 > 2) by scale factor 0.697276\nI1207 17:57:08.785799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16289 > 2) by scale factor 0.924689\nI1207 17:57:12.966424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5665 > 2) by scale factor 0.77927\nI1207 17:57:21.324647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10028 > 2) by scale factor 0.645102\nI1207 17:57:25.506232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54468 > 2) by scale factor 0.564226\nI1207 17:57:29.686477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36717 > 2) by scale factor 0.844891\nI1207 17:57:33.867566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36825 > 2) by scale factor 0.844506\nI1207 17:57:38.047987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44854 > 2) by scale factor 0.449585\nI1207 17:57:42.227926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84045 > 2) by scale factor 0.520772\nI1207 17:57:42.240134  1922 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1207 18:00:20.347415  1922 solver.cpp:404]     Test net output #0: accuracy = 0.169529\nI1207 18:00:20.347731  1922 solver.cpp:404]     Test net output #1: loss = 6.94023 (* 1 = 6.94023 loss)\nI1207 18:00:24.290910  1922 solver.cpp:228] Iteration 7700, loss = 7.56719\nI1207 18:00:24.290961  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 18:00:24.290978  1922 solver.cpp:244]     Train net output #1: loss = 7.56719 (* 1 = 7.56719 loss)\nI1207 18:00:24.516604  1922 sgd_solver.cpp:166] Iteration 7700, lr = 1.155\nI1207 18:00:24.526795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61076 > 2) by scale factor 0.5539\nI1207 18:00:28.707979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46714 > 2) by scale factor 0.576845\nI1207 18:00:32.889770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74118 > 2) by scale factor 0.53459\nI1207 18:00:37.071902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70531 > 2) by scale factor 0.739285\nI1207 18:00:41.253201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21816 > 2) by scale factor 0.901648\nI1207 18:00:45.433811  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66815 > 2) by scale factor 0.749583\nI1207 18:00:49.614568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52333 > 2) by scale factor 0.792603\nI1207 18:00:53.795517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05225 > 2) by scale factor 0.655254\nI1207 18:00:57.977757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38123 > 2) by scale factor 0.839902\nI1207 18:01:02.160254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35782 > 2) by scale factor 0.595625\nI1207 18:01:06.342098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36567 > 2) by scale factor 0.845428\nI1207 18:01:10.523898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10499 > 2) by scale factor 0.644124\nI1207 18:01:14.705090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18619 > 2) by scale factor 0.914835\nI1207 18:01:18.887017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4102 > 2) by scale factor 0.586476\nI1207 18:01:23.068922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77189 > 2) by scale factor 0.72153\nI1207 18:01:27.249208  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81892 > 2) by scale factor 0.709492\nI1207 18:01:31.429631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2109 > 2) by scale factor 0.622878\nI1207 18:01:35.610584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29168 > 2) by scale factor 0.872722\nI1207 18:01:39.792162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19809 > 2) by scale factor 0.909881\nI1207 18:01:43.974082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12428 > 2) by scale factor 0.941496\nI1207 18:01:48.155375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29495 > 2) by scale factor 0.87148\nI1207 18:01:52.335487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69089 > 2) by scale factor 0.743249\nI1207 18:01:56.516448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87901 > 2) by scale factor 0.694682\nI1207 18:02:00.698344  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91623 > 2) by scale factor 0.685818\nI1207 18:02:04.879458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25688 > 2) by scale factor 0.886179\nI1207 18:02:09.060717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50408 > 2) by scale factor 0.798695\nI1207 18:02:17.421913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90632 > 2) by scale factor 0.688155\nI1207 18:02:25.782548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44089 > 2) by scale factor 0.819372\nI1207 18:02:29.964031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1034 > 2) by scale factor 0.4874\nI1207 18:02:34.145504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88581 > 2) by scale factor 0.514693\nI1207 18:02:38.326721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42141 > 2) by scale factor 0.825965\nI1207 18:02:42.508708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64651 > 2) by scale factor 0.755711\nI1207 18:02:46.689540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18839 > 2) by scale factor 0.913915\nI1207 18:02:50.870399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21909 > 2) by scale factor 0.621293\nI1207 18:02:55.051614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20725 > 2) by scale factor 0.906104\nI1207 18:02:59.231914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51882 > 2) by scale factor 0.568372\nI1207 18:03:07.592062  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26028 > 2) by scale factor 0.884845\nI1207 18:03:11.773766  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04533 > 2) by scale factor 0.656743\nI1207 18:03:15.955077  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28736 > 2) by scale factor 0.874371\nI1207 18:03:20.136230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59092 > 2) by scale factor 0.556961\nI1207 18:03:24.317828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30056 > 2) by scale factor 0.605957\nI1207 18:03:28.499634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31354 > 2) by scale factor 0.603585\nI1207 18:03:41.038527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89666 > 2) by scale factor 0.690449\nI1207 18:03:45.220083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56228 > 2) by scale factor 0.780554\nI1207 18:03:49.403326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14803 > 2) by scale factor 0.635318\nI1207 18:03:53.585544  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37716 > 2) by scale factor 0.841341\nI1207 18:03:57.767248  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8208 > 2) by scale factor 0.709018\nI1207 18:04:01.949862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28325 > 2) by scale factor 0.609152\nI1207 18:04:06.132199  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59049 > 2) by scale factor 0.772055\nI1207 18:04:14.494541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68529 > 2) by scale factor 0.744798\nI1207 18:04:18.676652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70379 > 2) by scale factor 0.539987\nI1207 18:04:22.858364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32727 > 2) by scale factor 0.859375\nI1207 18:04:31.220204  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66651 > 2) by scale factor 0.545477\nI1207 18:04:35.401126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6147 > 2) by scale factor 0.764905\nI1207 18:04:39.583873  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83406 > 2) by scale factor 0.52164\nI1207 18:04:43.765278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3995 > 2) by scale factor 0.833508\nI1207 18:04:47.947106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82299 > 2) by scale factor 0.523151\nI1207 18:04:52.129104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39203 > 2) by scale factor 0.836111\nI1207 18:04:56.311961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43041 > 2) by scale factor 0.822908\nI1207 18:05:00.492278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68571 > 2) by scale factor 0.542636\nI1207 18:05:04.673688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19344 > 2) by scale factor 0.626283\nI1207 18:05:08.855897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22485 > 2) by scale factor 0.620183\nI1207 18:05:13.036047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47504 > 2) by scale factor 0.808068\nI1207 18:05:17.217999  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23187 > 2) by scale factor 0.896109\nI1207 18:05:21.400560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04074 > 2) by scale factor 0.980037\nI1207 18:05:25.581567  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12311 > 2) by scale factor 0.942016\nI1207 18:05:29.763339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35851 > 2) by scale factor 0.595503\nI1207 18:05:33.945578  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1255 > 2) by scale factor 0.639897\nI1207 18:05:38.128731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23355 > 2) by scale factor 0.895435\nI1207 18:05:42.311599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52522 > 2) by scale factor 0.792011\nI1207 18:05:46.493620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27004 > 2) by scale factor 0.88104\nI1207 18:05:50.675467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81973 > 2) by scale factor 0.523597\nI1207 18:05:54.857307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33825 > 2) by scale factor 0.85534\nI1207 18:05:59.038908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40147 > 2) by scale factor 0.587981\nI1207 18:06:03.220940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98934 > 2) by scale factor 0.669044\nI1207 18:06:11.582893  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1364 > 2) by scale factor 0.637674\nI1207 18:06:15.765278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44545 > 2) by scale factor 0.817845\nI1207 18:06:19.948421  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9046 > 2) by scale factor 0.688563\nI1207 18:06:24.130422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.91227 > 2) by scale factor 0.407144\nI1207 18:06:28.311372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38818 > 2) by scale factor 0.590287\nI1207 18:06:32.494375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74678 > 2) by scale factor 0.728125\nI1207 18:06:36.675254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47769 > 2) by scale factor 0.807204\nI1207 18:06:40.856144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15021 > 2) by scale factor 0.634879\nI1207 18:06:45.038499  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63474 > 2) by scale factor 0.759088\nI1207 18:06:49.221289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59849 > 2) by scale factor 0.769679\nI1207 18:06:53.402580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60517 > 2) by scale factor 0.767705\nI1207 18:06:57.584169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10496 > 2) by scale factor 0.950135\nI1207 18:07:01.767055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22017 > 2) by scale factor 0.900832\nI1207 18:07:05.949300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05878 > 2) by scale factor 0.653856\nI1207 18:07:10.130967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83669 > 2) by scale factor 0.705047\nI1207 18:07:14.312623  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10819 > 2) by scale factor 0.486832\nI1207 18:07:18.494276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75864 > 2) by scale factor 0.724994\nI1207 18:07:18.506187  1922 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1207 18:09:56.727874  1922 solver.cpp:404]     Test net output #0: accuracy = 0.174706\nI1207 18:09:56.728214  1922 solver.cpp:404]     Test net output #1: loss = 9.49623 (* 1 = 9.49623 loss)\nI1207 18:10:00.671682  1922 solver.cpp:228] Iteration 7800, loss = 8.68371\nI1207 18:10:00.671733  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 18:10:00.671751  1922 solver.cpp:244]     Train net output #1: loss = 8.68371 (* 1 = 8.68371 loss)\nI1207 18:10:00.895660  1922 sgd_solver.cpp:166] Iteration 7800, lr = 1.17\nI1207 18:10:00.905812  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75025 > 2) by scale factor 0.533298\nI1207 18:10:05.086376  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14685 > 2) by scale factor 0.635556\nI1207 18:10:09.267540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48415 > 2) by scale factor 0.574027\nI1207 18:10:13.448427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78203 > 2) by scale factor 0.718898\nI1207 18:10:17.628931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88753 > 2) by scale factor 0.692633\nI1207 18:10:21.809967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33035 > 2) by scale factor 0.600537\nI1207 18:10:25.989619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35967 > 2) by scale factor 0.595297\nI1207 18:10:30.172050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72047 > 2) by scale factor 0.537567\nI1207 18:10:34.353174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78627 > 2) by scale factor 0.528225\nI1207 18:10:38.534711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63162 > 2) by scale factor 0.759988\nI1207 18:10:42.714772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40197 > 2) by scale factor 0.832651\nI1207 18:10:46.895602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13785 > 2) by scale factor 0.93552\nI1207 18:10:51.076423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49267 > 2) by scale factor 0.572627\nI1207 18:10:55.257072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92241 > 2) by scale factor 0.509891\nI1207 18:10:59.438403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73772 > 2) by scale factor 0.535085\nI1207 18:11:03.619339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00366 > 2) by scale factor 0.665854\nI1207 18:11:07.800459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06864 > 2) by scale factor 0.651756\nI1207 18:11:11.982605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73767 > 2) by scale factor 0.535093\nI1207 18:11:16.163554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30625 > 2) by scale factor 0.604915\nI1207 18:11:20.344286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43221 > 2) by scale factor 0.582715\nI1207 18:11:24.525149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90897 > 2) by scale factor 0.68753\nI1207 18:11:28.705379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42597 > 2) by scale factor 0.824414\nI1207 18:11:32.886916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99379 > 2) by scale factor 0.668049\nI1207 18:11:49.604971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96644 > 2) by scale factor 0.504231\nI1207 18:11:53.786243  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9302 > 2) by scale factor 0.682546\nI1207 18:11:57.967648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27125 > 2) by scale factor 0.880571\nI1207 18:12:02.149283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92686 > 2) by scale factor 0.683326\nI1207 18:12:14.687829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54683 > 2) by scale factor 0.785291\nI1207 18:12:18.868085  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74915 > 2) by scale factor 0.533455\nI1207 18:12:23.048956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06508 > 2) by scale factor 0.968487\nI1207 18:12:27.231555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16623 > 2) by scale factor 0.923262\nI1207 18:12:31.413381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60357 > 2) by scale factor 0.768175\nI1207 18:12:35.594519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91963 > 2) by scale factor 0.685019\nI1207 18:12:39.775722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81769 > 2) by scale factor 0.709803\nI1207 18:12:43.955749  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11325 > 2) by scale factor 0.946409\nI1207 18:12:52.314489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69638 > 2) by scale factor 0.54107\nI1207 18:13:00.672435  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7866 > 2) by scale factor 0.717721\nI1207 18:13:04.852943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1628 > 2) by scale factor 0.924726\nI1207 18:13:09.033427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30163 > 2) by scale factor 0.868948\nI1207 18:13:17.392768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1175 > 2) by scale factor 0.641541\nI1207 18:13:21.573781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33447 > 2) by scale factor 0.599796\nI1207 18:13:25.754411  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76086 > 2) by scale factor 0.531793\nI1207 18:13:29.935125  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07993 > 2) by scale factor 0.649365\nI1207 18:13:34.116255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34218 > 2) by scale factor 0.853904\nI1207 18:13:38.297940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87763 > 2) by scale factor 0.695017\nI1207 18:13:42.478658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41069 > 2) by scale factor 0.586391\nI1207 18:13:46.659847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49009 > 2) by scale factor 0.445426\nI1207 18:13:50.840612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60635 > 2) by scale factor 0.767356\nI1207 18:13:55.021320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61098 > 2) by scale factor 0.765996\nI1207 18:13:59.203229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64946 > 2) by scale factor 0.754871\nI1207 18:14:03.385103  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09479 > 2) by scale factor 0.488425\nI1207 18:14:07.566061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70195 > 2) by scale factor 0.740207\nI1207 18:14:11.747140  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82896 > 2) by scale factor 0.706973\nI1207 18:14:15.927995  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85976 > 2) by scale factor 0.518168\nI1207 18:14:20.107384  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08669 > 2) by scale factor 0.647943\nI1207 18:14:24.287585  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77354 > 2) by scale factor 0.530006\nI1207 18:14:28.469554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02688 > 2) by scale factor 0.496663\nI1207 18:14:32.649662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24222 > 2) by scale factor 0.616861\nI1207 18:14:36.830299  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06289 > 2) by scale factor 0.652979\nI1207 18:14:41.010711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56014 > 2) by scale factor 0.781208\nI1207 18:14:45.192833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27637 > 2) by scale factor 0.878592\nI1207 18:14:49.372723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32165 > 2) by scale factor 0.861457\nI1207 18:14:53.553719  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18912 > 2) by scale factor 0.91361\nI1207 18:14:57.734323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06274 > 2) by scale factor 0.969585\nI1207 18:15:01.915269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88394 > 2) by scale factor 0.693495\nI1207 18:15:06.096747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12926 > 2) by scale factor 0.639129\nI1207 18:15:10.277318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80812 > 2) by scale factor 0.71222\nI1207 18:15:14.457737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00257 > 2) by scale factor 0.499679\nI1207 18:15:22.817950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57039 > 2) by scale factor 0.560163\nI1207 18:15:26.997790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09184 > 2) by scale factor 0.956097\nI1207 18:15:35.357100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02724 > 2) by scale factor 0.660667\nI1207 18:15:39.537464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39337 > 2) by scale factor 0.835641\nI1207 18:15:43.717809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27899 > 2) by scale factor 0.877581\nI1207 18:15:52.076583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23451 > 2) by scale factor 0.895051\nI1207 18:15:56.257124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22117 > 2) by scale factor 0.900425\nI1207 18:16:00.437413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2906 > 2) by scale factor 0.873132\nI1207 18:16:08.794912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4919 > 2) by scale factor 0.802599\nI1207 18:16:12.975869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50274 > 2) by scale factor 0.799123\nI1207 18:16:17.156883  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11558 > 2) by scale factor 0.641936\nI1207 18:16:21.337383  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74959 > 2) by scale factor 0.727382\nI1207 18:16:25.518141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33772 > 2) by scale factor 0.855534\nI1207 18:16:29.699522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98841 > 2) by scale factor 0.669251\nI1207 18:16:33.880537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06961 > 2) by scale factor 0.651549\nI1207 18:16:38.061106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71131 > 2) by scale factor 0.737651\nI1207 18:16:42.242171  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1696 > 2) by scale factor 0.630995\nI1207 18:16:46.422575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61027 > 2) by scale factor 0.553975\nI1207 18:16:50.602640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74875 > 2) by scale factor 0.533511\nI1207 18:16:54.783547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34774 > 2) by scale factor 0.460009\nI1207 18:16:54.795536  1922 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1207 18:19:33.052800  1922 solver.cpp:404]     Test net output #0: accuracy = 0.20553\nI1207 18:19:33.053184  1922 solver.cpp:404]     Test net output #1: loss = 6.7499 (* 1 = 6.7499 loss)\nI1207 18:19:36.994369  1922 solver.cpp:228] Iteration 7900, loss = 6.89443\nI1207 18:19:36.994417  1922 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 18:19:36.994434  1922 solver.cpp:244]     Train net output #1: loss = 6.89442 (* 1 = 6.89442 loss)\nI1207 18:19:37.221384  1922 sgd_solver.cpp:166] Iteration 7900, lr = 1.185\nI1207 18:19:37.231490  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65471 > 2) by scale factor 0.753379\nI1207 18:19:41.412513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56943 > 2) by scale factor 0.778383\nI1207 18:19:45.593214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87238 > 2) by scale factor 0.696287\nI1207 18:19:49.774787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20902 > 2) by scale factor 0.90538\nI1207 18:19:53.955993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8969 > 2) by scale factor 0.513228\nI1207 18:19:58.138332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65701 > 2) by scale factor 0.752727\nI1207 18:20:02.320840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05506 > 2) by scale factor 0.973207\nI1207 18:20:10.681478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68965 > 2) by scale factor 0.743591\nI1207 18:20:14.863147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10965 > 2) by scale factor 0.948025\nI1207 18:20:19.043985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28457 > 2) by scale factor 0.875437\nI1207 18:20:23.226303  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00365 > 2) by scale factor 0.998177\nI1207 18:20:31.587545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41363 > 2) by scale factor 0.828629\nI1207 18:20:35.767890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85465 > 2) by scale factor 0.700612\nI1207 18:20:44.128032  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34764 > 2) by scale factor 0.851918\nI1207 18:20:48.310010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77175 > 2) by scale factor 0.721566\nI1207 18:20:52.490581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18483 > 2) by scale factor 0.915404\nI1207 18:20:56.671483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05897 > 2) by scale factor 0.653814\nI1207 18:21:00.853124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78627 > 2) by scale factor 0.528224\nI1207 18:21:05.033788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53145 > 2) by scale factor 0.56634\nI1207 18:21:09.215430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12866 > 2) by scale factor 0.484419\nI1207 18:21:13.396500  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54299 > 2) by scale factor 0.786476\nI1207 18:21:17.576952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34617 > 2) by scale factor 0.852454\nI1207 18:21:21.759474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59265 > 2) by scale factor 0.771413\nI1207 18:21:25.939718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62732 > 2) by scale factor 0.761231\nI1207 18:21:30.119813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7604 > 2) by scale factor 0.724534\nI1207 18:21:34.302084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49268 > 2) by scale factor 0.572625\nI1207 18:21:38.483361  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32719 > 2) by scale factor 0.601108\nI1207 18:21:42.662521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5392 > 2) by scale factor 0.78765\nI1207 18:21:46.844275  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30779 > 2) by scale factor 0.604634\nI1207 18:21:51.025955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66648 > 2) by scale factor 0.750053\nI1207 18:21:55.207715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86401 > 2) by scale factor 0.698321\nI1207 18:21:59.389533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16525 > 2) by scale factor 0.480163\nI1207 18:22:03.570533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41182 > 2) by scale factor 0.586197\nI1207 18:22:07.751130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88619 > 2) by scale factor 0.692956\nI1207 18:22:11.933348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77004 > 2) by scale factor 0.530499\nI1207 18:22:16.113662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47261 > 2) by scale factor 0.808861\nI1207 18:22:20.295150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51866 > 2) by scale factor 0.794073\nI1207 18:22:28.656448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18967 > 2) by scale factor 0.913379\nI1207 18:22:32.838134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66726 > 2) by scale factor 0.749833\nI1207 18:22:37.020126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35175 > 2) by scale factor 0.596703\nI1207 18:22:41.202581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37655 > 2) by scale factor 0.841555\nI1207 18:22:45.384433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68872 > 2) by scale factor 0.743848\nI1207 18:22:49.564502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1037 > 2) by scale factor 0.950704\nI1207 18:23:02.102515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84496 > 2) by scale factor 0.702996\nI1207 18:23:06.285665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3309 > 2) by scale factor 0.600439\nI1207 18:23:10.465664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60722 > 2) by scale factor 0.554444\nI1207 18:23:23.005237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7358 > 2) by scale factor 0.731047\nI1207 18:23:39.722064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7672 > 2) by scale factor 0.722753\nI1207 18:23:43.902251  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17574 > 2) by scale factor 0.919227\nI1207 18:23:48.083132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56345 > 2) by scale factor 0.561253\nI1207 18:23:52.264664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4395 > 2) by scale factor 0.819839\nI1207 18:23:56.446127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96735 > 2) by scale factor 0.674002\nI1207 18:24:00.627890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93124 > 2) by scale factor 0.682306\nI1207 18:24:04.808337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53279 > 2) by scale factor 0.789643\nI1207 18:24:08.988755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73996 > 2) by scale factor 0.729937\nI1207 18:24:13.170140  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81895 > 2) by scale factor 0.523704\nI1207 18:24:17.351946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53554 > 2) by scale factor 0.788787\nI1207 18:24:21.533542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04565 > 2) by scale factor 0.494359\nI1207 18:24:25.715651  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15402 > 2) by scale factor 0.928495\nI1207 18:24:29.897092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15487 > 2) by scale factor 0.63394\nI1207 18:24:34.078186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01412 > 2) by scale factor 0.992989\nI1207 18:24:38.235221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57035 > 2) by scale factor 0.778105\nI1207 18:24:42.389050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01695 > 2) by scale factor 0.991594\nI1207 18:24:46.543709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77129 > 2) by scale factor 0.721685\nI1207 18:24:54.852522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07593 > 2) by scale factor 0.65021\nI1207 18:24:59.007546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07181 > 2) by scale factor 0.651081\nI1207 18:25:03.161942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53209 > 2) by scale factor 0.789861\nI1207 18:25:11.468922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69307 > 2) by scale factor 0.742646\nI1207 18:25:15.624130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32811 > 2) by scale factor 0.600942\nI1207 18:25:19.778949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70681 > 2) by scale factor 0.738877\nI1207 18:25:23.933086  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79791 > 2) by scale factor 0.714819\nI1207 18:25:28.087791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11185 > 2) by scale factor 0.947039\nI1207 18:25:32.241875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10924 > 2) by scale factor 0.94821\nI1207 18:25:36.397222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30775 > 2) by scale factor 0.866643\nI1207 18:25:40.551686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34856 > 2) by scale factor 0.597271\nI1207 18:25:44.706342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83158 > 2) by scale factor 0.521978\nI1207 18:25:48.860121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17661 > 2) by scale factor 0.91886\nI1207 18:25:53.014842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3996 > 2) by scale factor 0.833473\nI1207 18:25:57.170436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56085 > 2) by scale factor 0.780992\nI1207 18:26:05.478273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74967 > 2) by scale factor 0.727359\nI1207 18:26:09.633036  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31949 > 2) by scale factor 0.862257\nI1207 18:26:22.093955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96933 > 2) by scale factor 0.503863\nI1207 18:26:26.248698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59993 > 2) by scale factor 0.769252\nI1207 18:26:30.404095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29424 > 2) by scale factor 0.871747\nI1207 18:26:30.416023  1922 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1207 18:29:08.516695  1922 solver.cpp:404]     Test net output #0: accuracy = 0.247294\nI1207 18:29:08.517061  1922 solver.cpp:404]     Test net output #1: loss = 5.18614 (* 1 = 5.18614 loss)\nI1207 18:29:12.461436  1922 solver.cpp:228] Iteration 8000, loss = 4.74585\nI1207 18:29:12.461484  1922 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1207 18:29:12.461501  1922 solver.cpp:244]     Train net output #1: loss = 4.74585 (* 1 = 4.74585 loss)\nI1207 18:29:12.659277  1922 sgd_solver.cpp:166] Iteration 8000, lr = 1.2\nI1207 18:29:12.669391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28131 > 2) by scale factor 0.876688\nI1207 18:29:16.822010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03254 > 2) by scale factor 0.983992\nI1207 18:29:20.975180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07805 > 2) by scale factor 0.962442\nI1207 18:29:25.128132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48093 > 2) by scale factor 0.574559\nI1207 18:29:29.281338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09904 > 2) by scale factor 0.952818\nI1207 18:29:33.433924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37655 > 2) by scale factor 0.59232\nI1207 18:29:37.586433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75787 > 2) by scale factor 0.532217\nI1207 18:29:41.738955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96211 > 2) by scale factor 0.504781\nI1207 18:29:45.892720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95758 > 2) by scale factor 0.67623\nI1207 18:29:50.044530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24726 > 2) by scale factor 0.889975\nI1207 18:29:54.196272  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43658 > 2) by scale factor 0.581974\nI1207 18:29:58.349756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7843 > 2) by scale factor 0.718314\nI1207 18:30:02.503181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97572 > 2) by scale factor 0.672107\nI1207 18:30:06.654619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3278 > 2) by scale factor 0.859181\nI1207 18:30:10.807252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5412 > 2) by scale factor 0.787029\nI1207 18:30:14.959604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34348 > 2) by scale factor 0.85343\nI1207 18:30:19.111902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19936 > 2) by scale factor 0.909357\nI1207 18:30:23.265048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25789 > 2) by scale factor 0.613893\nI1207 18:30:27.417368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17749 > 2) by scale factor 0.629428\nI1207 18:30:31.570852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69242 > 2) by scale factor 0.742826\nI1207 18:30:35.724448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10895 > 2) by scale factor 0.643304\nI1207 18:30:39.875843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68536 > 2) by scale factor 0.74478\nI1207 18:30:48.177898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09774 > 2) by scale factor 0.953408\nI1207 18:30:52.330739  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24116 > 2) by scale factor 0.471569\nI1207 18:30:56.483664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93679 > 2) by scale factor 0.681015\nI1207 18:31:04.787225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78105 > 2) by scale factor 0.719153\nI1207 18:31:08.939203  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8781 > 2) by scale factor 0.694904\nI1207 18:31:13.091218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54357 > 2) by scale factor 0.786297\nI1207 18:31:17.242862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84603 > 2) by scale factor 0.702733\nI1207 18:31:21.395472  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75383 > 2) by scale factor 0.532789\nI1207 18:31:25.547745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26385 > 2) by scale factor 0.612772\nI1207 18:31:29.700484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90668 > 2) by scale factor 0.511944\nI1207 18:31:33.853492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19415 > 2) by scale factor 0.911515\nI1207 18:31:38.006086  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61946 > 2) by scale factor 0.763517\nI1207 18:31:42.158260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0636 > 2) by scale factor 0.652827\nI1207 18:31:46.309669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54254 > 2) by scale factor 0.440282\nI1207 18:31:54.611286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00475 > 2) by scale factor 0.665612\nI1207 18:32:02.913975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65528 > 2) by scale factor 0.547153\nI1207 18:32:07.065701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48157 > 2) by scale factor 0.805941\nI1207 18:32:11.219949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0632 > 2) by scale factor 0.969367\nI1207 18:32:15.371963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46044 > 2) by scale factor 0.812862\nI1207 18:32:19.524386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92867 > 2) by scale factor 0.682905\nI1207 18:32:23.676964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97621 > 2) by scale factor 0.671996\nI1207 18:32:27.829211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61078 > 2) by scale factor 0.766055\nI1207 18:32:31.981840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48297 > 2) by scale factor 0.805488\nI1207 18:32:36.134299  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9908 > 2) by scale factor 0.668717\nI1207 18:32:40.285610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50512 > 2) by scale factor 0.798364\nI1207 18:32:44.438398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17265 > 2) by scale factor 0.920535\nI1207 18:32:48.589886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75214 > 2) by scale factor 0.726708\nI1207 18:32:52.740866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22721 > 2) by scale factor 0.619731\nI1207 18:32:56.892936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15923 > 2) by scale factor 0.633067\nI1207 18:33:01.044165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86405 > 2) by scale factor 0.517591\nI1207 18:33:05.196368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68541 > 2) by scale factor 0.744766\nI1207 18:33:09.348505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14336 > 2) by scale factor 0.636261\nI1207 18:33:13.500006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46453 > 2) by scale factor 0.577278\nI1207 18:33:17.652542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78748 > 2) by scale factor 0.717494\nI1207 18:33:21.804397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60198 > 2) by scale factor 0.768645\nI1207 18:33:25.956403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22571 > 2) by scale factor 0.898589\nI1207 18:33:30.109277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53072 > 2) by scale factor 0.790288\nI1207 18:33:34.262806  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1481 > 2) by scale factor 0.931056\nI1207 18:33:38.415941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54921 > 2) by scale factor 0.784556\nI1207 18:33:42.567881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18883 > 2) by scale factor 0.47746\nI1207 18:33:46.720512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55883 > 2) by scale factor 0.781607\nI1207 18:33:50.872582  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6136 > 2) by scale factor 0.553465\nI1207 18:33:55.024034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87723 > 2) by scale factor 0.695114\nI1207 18:34:03.327947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3295 > 2) by scale factor 0.858555\nI1207 18:34:07.479326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91393 > 2) by scale factor 0.686358\nI1207 18:34:11.631687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22577 > 2) by scale factor 0.620008\nI1207 18:34:15.784605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23725 > 2) by scale factor 0.893953\nI1207 18:34:19.936149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52304 > 2) by scale factor 0.792693\nI1207 18:34:24.087769  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9007 > 2) by scale factor 0.68949\nI1207 18:34:28.240406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11416 > 2) by scale factor 0.642228\nI1207 18:34:32.392143  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14311 > 2) by scale factor 0.636313\nI1207 18:34:36.543829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14242 > 2) by scale factor 0.933525\nI1207 18:34:40.696271  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9173 > 2) by scale factor 0.510556\nI1207 18:34:48.998287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46608 > 2) by scale factor 0.577022\nI1207 18:34:53.150629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12225 > 2) by scale factor 0.640564\nI1207 18:34:57.303858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45594 > 2) by scale factor 0.814353\nI1207 18:35:05.605609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21955 > 2) by scale factor 0.901084\nI1207 18:35:09.757733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37673 > 2) by scale factor 0.592288\nI1207 18:35:13.910264  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67511 > 2) by scale factor 0.747632\nI1207 18:35:18.062702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01809 > 2) by scale factor 0.662671\nI1207 18:35:22.214566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0343 > 2) by scale factor 0.65913\nI1207 18:35:30.517482  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12447 > 2) by scale factor 0.941409\nI1207 18:35:34.668040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41106 > 2) by scale factor 0.586328\nI1207 18:35:38.819870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56695 > 2) by scale factor 0.560703\nI1207 18:35:42.972498  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61376 > 2) by scale factor 0.765181\nI1207 18:35:47.124634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47761 > 2) by scale factor 0.80723\nI1207 18:35:51.275657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83819 > 2) by scale factor 0.704675\nI1207 18:35:55.426936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06087 > 2) by scale factor 0.970462\nI1207 18:35:59.579239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95329 > 2) by scale factor 0.677211\nI1207 18:36:03.731062  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05354 > 2) by scale factor 0.973927\nI1207 18:36:03.742848  1922 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1207 18:38:41.993943  1922 solver.cpp:404]     Test net output #0: accuracy = 0.150059\nI1207 18:38:41.994318  1922 solver.cpp:404]     Test net output #1: loss = 9.20704 (* 1 = 9.20704 loss)\nI1207 18:38:45.937371  1922 solver.cpp:228] Iteration 8100, loss = 8.72587\nI1207 18:38:45.937412  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 18:38:45.937429  1922 solver.cpp:244]     Train net output #1: loss = 8.72587 (* 1 = 8.72587 loss)\nI1207 18:38:46.136013  1922 sgd_solver.cpp:166] Iteration 8100, lr = 1.215\nI1207 18:38:46.146086  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71269 > 2) by scale factor 0.538693\nI1207 18:38:50.300850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37348 > 2) by scale factor 0.59286\nI1207 18:38:54.456022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53416 > 2) by scale factor 0.789217\nI1207 18:39:02.765907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29491 > 2) by scale factor 0.606996\nI1207 18:39:19.379212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68403 > 2) by scale factor 0.745148\nI1207 18:39:23.534096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58999 > 2) by scale factor 0.772204\nI1207 18:39:27.688603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53706 > 2) by scale factor 0.565441\nI1207 18:39:31.844283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90575 > 2) by scale factor 0.512066\nI1207 18:39:36.000067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71466 > 2) by scale factor 0.736741\nI1207 18:39:40.155035  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47951 > 2) by scale factor 0.80661\nI1207 18:39:56.770289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50716 > 2) by scale factor 0.797716\nI1207 18:40:00.926033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42295 > 2) by scale factor 0.825442\nI1207 18:40:05.081001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47717 > 2) by scale factor 0.807373\nI1207 18:40:09.235971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94338 > 2) by scale factor 0.679491\nI1207 18:40:17.545764  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03323 > 2) by scale factor 0.659363\nI1207 18:40:21.701575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62819 > 2) by scale factor 0.551238\nI1207 18:40:38.315510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25857 > 2) by scale factor 0.885516\nI1207 18:40:46.625671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6222 > 2) by scale factor 0.552151\nI1207 18:40:50.781517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54086 > 2) by scale factor 0.787134\nI1207 18:40:54.936079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.239 > 2) by scale factor 0.617474\nI1207 18:40:59.092268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16601 > 2) by scale factor 0.63171\nI1207 18:41:03.247220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95921 > 2) by scale factor 0.675856\nI1207 18:41:07.403569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58337 > 2) by scale factor 0.774183\nI1207 18:41:15.713379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20531 > 2) by scale factor 0.906902\nI1207 18:41:19.870410  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5576 > 2) by scale factor 0.781985\nI1207 18:41:24.025746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32097 > 2) by scale factor 0.861709\nI1207 18:41:28.179682  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59556 > 2) by scale factor 0.770547\nI1207 18:41:32.334501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06655 > 2) by scale factor 0.967794\nI1207 18:41:36.488613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73687 > 2) by scale factor 0.730761\nI1207 18:41:44.796073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57353 > 2) by scale factor 0.777142\nI1207 18:41:48.951426  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85408 > 2) by scale factor 0.700751\nI1207 18:41:53.107039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85922 > 2) by scale factor 0.699492\nI1207 18:41:57.263146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36207 > 2) by scale factor 0.846714\nI1207 18:42:01.417740  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62378 > 2) by scale factor 0.76226\nI1207 18:42:05.572190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05998 > 2) by scale factor 0.653598\nI1207 18:42:09.728569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95305 > 2) by scale factor 0.505939\nI1207 18:42:13.884011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44926 > 2) by scale factor 0.816572\nI1207 18:42:18.039022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26208 > 2) by scale factor 0.613106\nI1207 18:42:22.195232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5008 > 2) by scale factor 0.571298\nI1207 18:42:26.351980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97216 > 2) by scale factor 0.503504\nI1207 18:42:30.507565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42601 > 2) by scale factor 0.451875\nI1207 18:42:34.663681  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19926 > 2) by scale factor 0.909398\nI1207 18:42:38.818933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87274 > 2) by scale factor 0.6962\nI1207 18:42:47.126688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41316 > 2) by scale factor 0.585967\nI1207 18:42:51.282007  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18754 > 2) by scale factor 0.914268\nI1207 18:42:55.436951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41493 > 2) by scale factor 0.82818\nI1207 18:42:59.593065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17415 > 2) by scale factor 0.919901\nI1207 18:43:03.748270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11752 > 2) by scale factor 0.641536\nI1207 18:43:12.056754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83077 > 2) by scale factor 0.706522\nI1207 18:43:16.211532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76637 > 2) by scale factor 0.722969\nI1207 18:43:20.367899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03799 > 2) by scale factor 0.495296\nI1207 18:43:24.524230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27443 > 2) by scale factor 0.879342\nI1207 18:43:28.678337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49743 > 2) by scale factor 0.800824\nI1207 18:43:36.987277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44461 > 2) by scale factor 0.580617\nI1207 18:43:41.143245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07942 > 2) by scale factor 0.961809\nI1207 18:43:45.298229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24222 > 2) by scale factor 0.616861\nI1207 18:43:49.453127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62376 > 2) by scale factor 0.762264\nI1207 18:43:53.608736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17391 > 2) by scale factor 0.920001\nI1207 18:43:57.763305  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80306 > 2) by scale factor 0.713506\nI1207 18:44:01.919055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96963 > 2) by scale factor 0.673484\nI1207 18:44:06.074247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02783 > 2) by scale factor 0.986276\nI1207 18:44:14.383570  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49857 > 2) by scale factor 0.800457\nI1207 18:44:18.538851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69482 > 2) by scale factor 0.541299\nI1207 18:44:22.694609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0667 > 2) by scale factor 0.967726\nI1207 18:44:26.849280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78558 > 2) by scale factor 0.717982\nI1207 18:44:31.004669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18054 > 2) by scale factor 0.917206\nI1207 18:44:35.160394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19611 > 2) by scale factor 0.62576\nI1207 18:44:39.316138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62395 > 2) by scale factor 0.762209\nI1207 18:44:43.471130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85269 > 2) by scale factor 0.701094\nI1207 18:44:47.625824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08654 > 2) by scale factor 0.489412\nI1207 18:44:51.781131  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32143 > 2) by scale factor 0.602151\nI1207 18:44:55.937340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42649 > 2) by scale factor 0.824237\nI1207 18:45:00.091872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56641 > 2) by scale factor 0.7793\nI1207 18:45:04.247128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77074 > 2) by scale factor 0.5304\nI1207 18:45:08.402314  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16255 > 2) by scale factor 0.924835\nI1207 18:45:12.558079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97005 > 2) by scale factor 0.503772\nI1207 18:45:16.713109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78058 > 2) by scale factor 0.529019\nI1207 18:45:20.868376  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08366 > 2) by scale factor 0.959847\nI1207 18:45:25.022697  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23797 > 2) by scale factor 0.893669\nI1207 18:45:29.177886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9413 > 2) by scale factor 0.679971\nI1207 18:45:33.332290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53333 > 2) by scale factor 0.789474\nI1207 18:45:37.487939  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17202 > 2) by scale factor 0.920803\nI1207 18:45:37.499812  1922 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1207 18:48:15.649933  1922 solver.cpp:404]     Test net output #0: accuracy = 0.222941\nI1207 18:48:15.650287  1922 solver.cpp:404]     Test net output #1: loss = 7.16795 (* 1 = 7.16795 loss)\nI1207 18:48:19.595866  1922 solver.cpp:228] Iteration 8200, loss = 6.62362\nI1207 18:48:19.595911  1922 solver.cpp:244]     Train net output #0: accuracy = 0.294118\nI1207 18:48:19.595929  1922 solver.cpp:244]     Train net output #1: loss = 6.62362 (* 1 = 6.62362 loss)\nI1207 18:48:19.794554  1922 sgd_solver.cpp:166] Iteration 8200, lr = 1.23\nI1207 18:48:19.804692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75572 > 2) by scale factor 0.725763\nI1207 18:48:23.959064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22042 > 2) by scale factor 0.621036\nI1207 18:48:28.113669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12926 > 2) by scale factor 0.939294\nI1207 18:48:32.267442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7868 > 2) by scale factor 0.717669\nI1207 18:48:36.421294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31969 > 2) by scale factor 0.862186\nI1207 18:48:40.574553  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76571 > 2) by scale factor 0.531108\nI1207 18:48:44.727723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87462 > 2) by scale factor 0.695744\nI1207 18:48:48.881708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49473 > 2) by scale factor 0.80169\nI1207 18:48:53.036731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16276 > 2) by scale factor 0.924746\nI1207 18:49:01.342121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93937 > 2) by scale factor 0.680417\nI1207 18:49:09.647802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99158 > 2) by scale factor 0.668542\nI1207 18:49:13.801594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66873 > 2) by scale factor 0.749419\nI1207 18:49:17.955778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54575 > 2) by scale factor 0.785624\nI1207 18:49:22.108867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04459 > 2) by scale factor 0.656903\nI1207 18:49:26.262871  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32505 > 2) by scale factor 0.601495\nI1207 18:49:30.416075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74326 > 2) by scale factor 0.534294\nI1207 18:49:34.568295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60173 > 2) by scale factor 0.555288\nI1207 18:49:38.721469  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49854 > 2) by scale factor 0.571668\nI1207 18:49:42.875313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9951 > 2) by scale factor 0.400392\nI1207 18:49:47.028427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72881 > 2) by scale factor 0.422939\nI1207 18:49:51.181212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87314 > 2) by scale factor 0.696103\nI1207 18:49:55.333499  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72122 > 2) by scale factor 0.734965\nI1207 18:49:59.487609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35874 > 2) by scale factor 0.595461\nI1207 18:50:03.640365  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37643 > 2) by scale factor 0.841598\nI1207 18:50:07.793710  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68791 > 2) by scale factor 0.744073\nI1207 18:50:11.947139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19028 > 2) by scale factor 0.477295\nI1207 18:50:20.251273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58284 > 2) by scale factor 0.774342\nI1207 18:50:24.405041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22641 > 2) by scale factor 0.898308\nI1207 18:50:28.558481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16465 > 2) by scale factor 0.480233\nI1207 18:50:32.712957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21933 > 2) by scale factor 0.901172\nI1207 18:50:36.866041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48457 > 2) by scale factor 0.80497\nI1207 18:50:41.019112  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95706 > 2) by scale factor 0.676347\nI1207 18:50:45.171989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1369 > 2) by scale factor 0.935934\nI1207 18:50:49.324216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58085 > 2) by scale factor 0.774939\nI1207 18:50:53.476847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80077 > 2) by scale factor 0.526209\nI1207 18:50:57.630547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82919 > 2) by scale factor 0.522304\nI1207 18:51:01.783067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26793 > 2) by scale factor 0.881862\nI1207 18:51:10.088073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31689 > 2) by scale factor 0.863228\nI1207 18:51:14.241597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57082 > 2) by scale factor 0.777962\nI1207 18:51:18.394790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39245 > 2) by scale factor 0.835965\nI1207 18:51:22.547577  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45352 > 2) by scale factor 0.57912\nI1207 18:51:26.700119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24174 > 2) by scale factor 0.616952\nI1207 18:51:30.852655  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70917 > 2) by scale factor 0.738234\nI1207 18:51:35.006745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56709 > 2) by scale factor 0.779094\nI1207 18:51:39.160161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44279 > 2) by scale factor 0.818736\nI1207 18:51:43.313472  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86921 > 2) by scale factor 0.516902\nI1207 18:51:47.467021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40705 > 2) by scale factor 0.587018\nI1207 18:51:51.620591  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77534 > 2) by scale factor 0.529754\nI1207 18:51:55.773622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79413 > 2) by scale factor 0.52713\nI1207 18:51:59.925984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85828 > 2) by scale factor 0.518365\nI1207 18:52:04.078734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33736 > 2) by scale factor 0.599275\nI1207 18:52:08.232095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90214 > 2) by scale factor 0.689147\nI1207 18:52:12.385016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16535 > 2) by scale factor 0.631841\nI1207 18:52:16.537263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08703 > 2) by scale factor 0.647871\nI1207 18:52:20.690997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98811 > 2) by scale factor 0.669319\nI1207 18:52:24.844188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83367 > 2) by scale factor 0.705798\nI1207 18:52:28.996714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48542 > 2) by scale factor 0.573819\nI1207 18:52:33.150418  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39413 > 2) by scale factor 0.589253\nI1207 18:52:37.304110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0609 > 2) by scale factor 0.653403\nI1207 18:52:41.458153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.652 > 2) by scale factor 0.547645\nI1207 18:52:45.611827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34183 > 2) by scale factor 0.854034\nI1207 18:52:49.764809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79275 > 2) by scale factor 0.71614\nI1207 18:52:53.918614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05425 > 2) by scale factor 0.654825\nI1207 18:52:58.071667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02143 > 2) by scale factor 0.989398\nI1207 18:53:02.225401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32796 > 2) by scale factor 0.85912\nI1207 18:53:06.378945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95124 > 2) by scale factor 0.67768\nI1207 18:53:10.532045  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08139 > 2) by scale factor 0.960895\nI1207 18:53:14.684609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14968 > 2) by scale factor 0.930373\nI1207 18:53:18.838089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54383 > 2) by scale factor 0.786216\nI1207 18:53:22.991430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02491 > 2) by scale factor 0.987699\nI1207 18:53:27.144366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13661 > 2) by scale factor 0.936061\nI1207 18:53:31.297510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51637 > 2) by scale factor 0.794794\nI1207 18:53:39.602362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12789 > 2) by scale factor 0.939897\nI1207 18:53:43.754916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49737 > 2) by scale factor 0.800842\nI1207 18:53:47.907724  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68797 > 2) by scale factor 0.744056\nI1207 18:53:52.062050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23314 > 2) by scale factor 0.895601\nI1207 18:54:00.365747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08239 > 2) by scale factor 0.648847\nI1207 18:54:04.518560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60396 > 2) by scale factor 0.768061\nI1207 18:54:12.824662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6446 > 2) by scale factor 0.756259\nI1207 18:54:16.977170  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76089 > 2) by scale factor 0.724404\nI1207 18:54:21.130254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10456 > 2) by scale factor 0.950316\nI1207 18:54:25.284500  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36004 > 2) by scale factor 0.847444\nI1207 18:54:29.437340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43907 > 2) by scale factor 0.581553\nI1207 18:54:33.590335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34235 > 2) by scale factor 0.853842\nI1207 18:54:37.744333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06384 > 2) by scale factor 0.969067\nI1207 18:54:46.048321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35311 > 2) by scale factor 0.596461\nI1207 18:54:50.201452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82584 > 2) by scale factor 0.707755\nI1207 18:54:54.354444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52399 > 2) by scale factor 0.792397\nI1207 18:55:02.659658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33225 > 2) by scale factor 0.857541\nI1207 18:55:06.811941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18486 > 2) by scale factor 0.627972\nI1207 18:55:10.964540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47722 > 2) by scale factor 0.575172\nI1207 18:55:10.976469  1922 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1207 18:57:49.273031  1922 solver.cpp:404]     Test net output #0: accuracy = 0.198883\nI1207 18:57:49.273382  1922 solver.cpp:404]     Test net output #1: loss = 4.94133 (* 1 = 4.94133 loss)\nI1207 18:57:53.218583  1922 solver.cpp:228] Iteration 8300, loss = 5.54948\nI1207 18:57:53.218631  1922 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 18:57:53.218648  1922 solver.cpp:244]     Train net output #1: loss = 5.54948 (* 1 = 5.54948 loss)\nI1207 18:57:53.415967  1922 sgd_solver.cpp:166] Iteration 8300, lr = 1.245\nI1207 18:57:53.426242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54886 > 2) by scale factor 0.784664\nI1207 18:57:57.580466  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54991 > 2) by scale factor 0.784342\nI1207 18:58:01.735193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88799 > 2) by scale factor 0.692524\nI1207 18:58:05.891767  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94279 > 2) by scale factor 0.679627\nI1207 18:58:10.047281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.993 > 2) by scale factor 0.668225\nI1207 18:58:14.202460  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19316 > 2) by scale factor 0.62634\nI1207 18:58:18.357722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37397 > 2) by scale factor 0.842471\nI1207 18:58:22.513795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23907 > 2) by scale factor 0.61746\nI1207 18:58:26.670450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63343 > 2) by scale factor 0.759465\nI1207 18:58:30.826548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4063 > 2) by scale factor 0.831151\nI1207 18:58:34.981976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45347 > 2) by scale factor 0.815172\nI1207 18:58:39.136862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23822 > 2) by scale factor 0.893566\nI1207 18:58:43.292611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81587 > 2) by scale factor 0.71026\nI1207 18:58:51.601127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80478 > 2) by scale factor 0.71307\nI1207 18:58:59.910359  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77338 > 2) by scale factor 0.721141\nI1207 18:59:04.065727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59954 > 2) by scale factor 0.769367\nI1207 18:59:08.220999  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06422 > 2) by scale factor 0.968888\nI1207 18:59:12.376904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20992 > 2) by scale factor 0.90501\nI1207 18:59:16.532261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90573 > 2) by scale factor 0.512069\nI1207 18:59:20.687988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63991 > 2) by scale factor 0.7576\nI1207 18:59:24.843581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18448 > 2) by scale factor 0.915549\nI1207 18:59:28.998371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13894 > 2) by scale factor 0.935042\nI1207 18:59:33.153456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45065 > 2) by scale factor 0.816111\nI1207 18:59:37.309208  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55918 > 2) by scale factor 0.781501\nI1207 18:59:41.464313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78189 > 2) by scale factor 0.718936\nI1207 18:59:49.774432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47805 > 2) by scale factor 0.807087\nI1207 18:59:53.930452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41563 > 2) by scale factor 0.827943\nI1207 18:59:58.086555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74085 > 2) by scale factor 0.534638\nI1207 19:00:02.242483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38592 > 2) by scale factor 0.590681\nI1207 19:00:06.397732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23409 > 2) by scale factor 0.895218\nI1207 19:00:10.553289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02399 > 2) by scale factor 0.988148\nI1207 19:00:23.016265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95272 > 2) by scale factor 0.677341\nI1207 19:00:27.171815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44363 > 2) by scale factor 0.818455\nI1207 19:00:35.479445  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78681 > 2) by scale factor 0.528148\nI1207 19:00:39.635915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47503 > 2) by scale factor 0.575535\nI1207 19:00:43.791736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80911 > 2) by scale factor 0.71197\nI1207 19:00:47.946343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30573 > 2) by scale factor 0.605011\nI1207 19:00:52.101492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04059 > 2) by scale factor 0.657767\nI1207 19:00:56.256701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22398 > 2) by scale factor 0.473487\nI1207 19:01:00.413381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10753 > 2) by scale factor 0.643598\nI1207 19:01:04.569124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0236 > 2) by scale factor 0.661464\nI1207 19:01:08.724788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31228 > 2) by scale factor 0.864947\nI1207 19:01:12.880405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7492 > 2) by scale factor 0.727485\nI1207 19:01:17.037251  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62341 > 2) by scale factor 0.551966\nI1207 19:01:21.192075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05735 > 2) by scale factor 0.972125\nI1207 19:01:25.347811  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19205 > 2) by scale factor 0.912388\nI1207 19:01:29.503274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03749 > 2) by scale factor 0.495357\nI1207 19:01:33.659075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30123 > 2) by scale factor 0.869099\nI1207 19:01:37.814190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4437 > 2) by scale factor 0.450076\nI1207 19:01:41.970124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75704 > 2) by scale factor 0.725416\nI1207 19:01:46.125988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45072 > 2) by scale factor 0.449365\nI1207 19:01:50.281373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85123 > 2) by scale factor 0.519315\nI1207 19:01:54.436615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78683 > 2) by scale factor 0.528147\nI1207 19:01:58.591743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27217 > 2) by scale factor 0.611215\nI1207 19:02:02.746381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03761 > 2) by scale factor 0.658413\nI1207 19:02:06.901914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41079 > 2) by scale factor 0.829602\nI1207 19:02:11.057847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03466 > 2) by scale factor 0.659052\nI1207 19:02:15.212878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33551 > 2) by scale factor 0.856344\nI1207 19:02:19.367449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64216 > 2) by scale factor 0.756955\nI1207 19:02:23.523058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12063 > 2) by scale factor 0.943116\nI1207 19:02:31.830616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23923 > 2) by scale factor 0.893166\nI1207 19:02:40.139008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56507 > 2) by scale factor 0.560999\nI1207 19:02:44.294695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66243 > 2) by scale factor 0.546086\nI1207 19:02:48.449545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09821 > 2) by scale factor 0.953194\nI1207 19:02:52.605041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38403 > 2) by scale factor 0.838917\nI1207 19:02:56.760758  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53587 > 2) by scale factor 0.788683\nI1207 19:03:00.916069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17929 > 2) by scale factor 0.629072\nI1207 19:03:05.071696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35353 > 2) by scale factor 0.849786\nI1207 19:03:09.226630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04864 > 2) by scale factor 0.65603\nI1207 19:03:17.536404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06304 > 2) by scale factor 0.969442\nI1207 19:03:21.691298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2377 > 2) by scale factor 0.617723\nI1207 19:03:25.846720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84474 > 2) by scale factor 0.703052\nI1207 19:03:30.002807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18367 > 2) by scale factor 0.915889\nI1207 19:03:34.159015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45338 > 2) by scale factor 0.815202\nI1207 19:03:42.467887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9083 > 2) by scale factor 0.687687\nI1207 19:03:46.623457  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80428 > 2) by scale factor 0.713196\nI1207 19:03:50.779083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22254 > 2) by scale factor 0.620628\nI1207 19:03:54.934514  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58055 > 2) by scale factor 0.775029\nI1207 19:03:59.090410  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53727 > 2) by scale factor 0.565408\nI1207 19:04:03.245204  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00138 > 2) by scale factor 0.999312\nI1207 19:04:07.400261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78553 > 2) by scale factor 0.717995\nI1207 19:04:11.555776  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49306 > 2) by scale factor 0.802226\nI1207 19:04:15.711338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44051 > 2) by scale factor 0.8195\nI1207 19:04:19.867043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84217 > 2) by scale factor 0.703688\nI1207 19:04:24.022300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12603 > 2) by scale factor 0.484728\nI1207 19:04:28.177476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34328 > 2) by scale factor 0.853506\nI1207 19:04:40.639644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50535 > 2) by scale factor 0.570557\nI1207 19:04:44.794598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43089 > 2) by scale factor 0.451377\nI1207 19:04:44.806524  1922 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1207 19:07:23.018434  1922 solver.cpp:404]     Test net output #0: accuracy = 0.157059\nI1207 19:07:23.018808  1922 solver.cpp:404]     Test net output #1: loss = 11.041 (* 1 = 11.041 loss)\nI1207 19:07:26.958801  1922 solver.cpp:228] Iteration 8400, loss = 9.62577\nI1207 19:07:26.958853  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 19:07:26.958870  1922 solver.cpp:244]     Train net output #1: loss = 9.62577 (* 1 = 9.62577 loss)\nI1207 19:07:27.178908  1922 sgd_solver.cpp:166] Iteration 8400, lr = 1.26\nI1207 19:07:27.189013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90058 > 2) by scale factor 0.512744\nI1207 19:07:31.344211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0703 > 2) by scale factor 0.491364\nI1207 19:07:35.498176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85546 > 2) by scale factor 0.700412\nI1207 19:07:39.652165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50206 > 2) by scale factor 0.799341\nI1207 19:07:43.805522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89589 > 2) by scale factor 0.690634\nI1207 19:07:47.959637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62776 > 2) by scale factor 0.761105\nI1207 19:07:52.114392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22704 > 2) by scale factor 0.898052\nI1207 19:07:56.268106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6747 > 2) by scale factor 0.747746\nI1207 19:08:00.422380  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42619 > 2) by scale factor 0.583738\nI1207 19:08:04.577087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69753 > 2) by scale factor 0.741419\nI1207 19:08:08.731757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10977 > 2) by scale factor 0.947969\nI1207 19:08:17.038813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43891 > 2) by scale factor 0.820038\nI1207 19:08:21.194484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88117 > 2) by scale factor 0.515309\nI1207 19:08:25.349019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4547 > 2) by scale factor 0.814765\nI1207 19:08:29.502967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72244 > 2) by scale factor 0.734634\nI1207 19:08:33.657933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29274 > 2) by scale factor 0.607396\nI1207 19:08:37.812764  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9402 > 2) by scale factor 0.680226\nI1207 19:08:41.968278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60813 > 2) by scale factor 0.554304\nI1207 19:08:46.121848  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42774 > 2) by scale factor 0.823812\nI1207 19:08:50.275696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9501 > 2) by scale factor 0.677943\nI1207 19:08:54.428632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40394 > 2) by scale factor 0.831968\nI1207 19:08:58.582842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16653 > 2) by scale factor 0.480015\nI1207 19:09:02.736191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45078 > 2) by scale factor 0.579579\nI1207 19:09:06.890499  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15702 > 2) by scale factor 0.633509\nI1207 19:09:11.045660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05796 > 2) by scale factor 0.654031\nI1207 19:09:15.200664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2588 > 2) by scale factor 0.469616\nI1207 19:09:19.354284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34722 > 2) by scale factor 0.852072\nI1207 19:09:23.508178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67125 > 2) by scale factor 0.748713\nI1207 19:09:27.661285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8033 > 2) by scale factor 0.713444\nI1207 19:09:31.814745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34834 > 2) by scale factor 0.851665\nI1207 19:09:40.121315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53222 > 2) by scale factor 0.78982\nI1207 19:09:44.275775  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57408 > 2) by scale factor 0.776976\nI1207 19:09:48.429795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92111 > 2) by scale factor 0.684671\nI1207 19:09:52.584355  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84368 > 2) by scale factor 0.703313\nI1207 19:09:56.738584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9367 > 2) by scale factor 0.50804\nI1207 19:10:00.892374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17663 > 2) by scale factor 0.629598\nI1207 19:10:05.046797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09335 > 2) by scale factor 0.646549\nI1207 19:10:09.200947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54158 > 2) by scale factor 0.440376\nI1207 19:10:13.355494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30642 > 2) by scale factor 0.604883\nI1207 19:10:17.509510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77957 > 2) by scale factor 0.52916\nI1207 19:10:21.663630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13815 > 2) by scale factor 0.637317\nI1207 19:10:25.817831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39529 > 2) by scale factor 0.589051\nI1207 19:10:29.971669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83164 > 2) by scale factor 0.706304\nI1207 19:10:34.126888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1892 > 2) by scale factor 0.627117\nI1207 19:10:38.280428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47903 > 2) by scale factor 0.806768\nI1207 19:10:42.434847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29106 > 2) by scale factor 0.872958\nI1207 19:10:46.589107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45741 > 2) by scale factor 0.578468\nI1207 19:10:50.743515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87573 > 2) by scale factor 0.695475\nI1207 19:10:54.897938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59224 > 2) by scale factor 0.556755\nI1207 19:11:03.204366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88569 > 2) by scale factor 0.693075\nI1207 19:11:07.359931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0739 > 2) by scale factor 0.650638\nI1207 19:11:11.514725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20518 > 2) by scale factor 0.906954\nI1207 19:11:19.819774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60293 > 2) by scale factor 0.768366\nI1207 19:11:23.974550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38339 > 2) by scale factor 0.839141\nI1207 19:11:36.433390  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48453 > 2) by scale factor 0.804981\nI1207 19:11:40.586928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24134 > 2) by scale factor 0.617028\nI1207 19:11:53.046960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12301 > 2) by scale factor 0.942059\nI1207 19:12:01.353621  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59011 > 2) by scale factor 0.772169\nI1207 19:12:05.508034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49838 > 2) by scale factor 0.571693\nI1207 19:12:09.663074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9363 > 2) by scale factor 0.681129\nI1207 19:12:13.816510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32247 > 2) by scale factor 0.861153\nI1207 19:12:17.969455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21603 > 2) by scale factor 0.902516\nI1207 19:12:22.124630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09925 > 2) by scale factor 0.487894\nI1207 19:12:26.278558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82641 > 2) by scale factor 0.707612\nI1207 19:12:30.432674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18594 > 2) by scale factor 0.627759\nI1207 19:12:34.586992  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28961 > 2) by scale factor 0.607975\nI1207 19:12:38.741021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44425 > 2) by scale factor 0.818248\nI1207 19:12:42.896025  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88301 > 2) by scale factor 0.693718\nI1207 19:12:47.049914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4615 > 2) by scale factor 0.44828\nI1207 19:12:51.202000  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60212 > 2) by scale factor 0.768604\nI1207 19:12:55.354733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72415 > 2) by scale factor 0.734175\nI1207 19:12:59.508776  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22738 > 2) by scale factor 0.619697\nI1207 19:13:03.664131  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98683 > 2) by scale factor 0.669606\nI1207 19:13:07.818675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30993 > 2) by scale factor 0.865827\nI1207 19:13:11.972653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43666 > 2) by scale factor 0.581961\nI1207 19:13:16.126881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10762 > 2) by scale factor 0.948937\nI1207 19:13:20.281041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09891 > 2) by scale factor 0.952877\nI1207 19:13:24.435859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91239 > 2) by scale factor 0.686722\nI1207 19:13:28.590641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77819 > 2) by scale factor 0.529355\nI1207 19:13:32.743441  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34257 > 2) by scale factor 0.598343\nI1207 19:13:36.897653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85002 > 2) by scale factor 0.519478\nI1207 19:13:41.051848  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30362 > 2) by scale factor 0.464726\nI1207 19:13:45.206113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27574 > 2) by scale factor 0.610549\nI1207 19:13:49.360040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78541 > 2) by scale factor 0.528345\nI1207 19:13:53.513813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6444 > 2) by scale factor 0.756314\nI1207 19:13:57.667958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32906 > 2) by scale factor 0.60077\nI1207 19:14:01.822474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60289 > 2) by scale factor 0.55511\nI1207 19:14:05.975950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18756 > 2) by scale factor 0.477605\nI1207 19:14:10.130887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07 > 2) by scale factor 0.4914\nI1207 19:14:14.285308  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47661 > 2) by scale factor 0.446767\nI1207 19:14:18.439092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81361 > 2) by scale factor 0.524438\nI1207 19:14:18.450955  1922 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1207 19:16:56.725323  1922 solver.cpp:404]     Test net output #0: accuracy = 0.160588\nI1207 19:16:56.725673  1922 solver.cpp:404]     Test net output #1: loss = 9.36626 (* 1 = 9.36626 loss)\nI1207 19:17:00.668309  1922 solver.cpp:228] Iteration 8500, loss = 9.24772\nI1207 19:17:00.668359  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 19:17:00.668375  1922 solver.cpp:244]     Train net output #1: loss = 9.24772 (* 1 = 9.24772 loss)\nI1207 19:17:00.866125  1922 sgd_solver.cpp:166] Iteration 8500, lr = 1.275\nI1207 19:17:00.876168  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27662 > 2) by scale factor 0.610385\nI1207 19:17:05.029690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18381 > 2) by scale factor 0.628178\nI1207 19:17:09.182457  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09563 > 2) by scale factor 0.646073\nI1207 19:17:13.335433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26681 > 2) by scale factor 0.882296\nI1207 19:17:17.489130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68899 > 2) by scale factor 0.743773\nI1207 19:17:21.642196  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77893 > 2) by scale factor 0.719701\nI1207 19:17:25.795542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69611 > 2) by scale factor 0.741809\nI1207 19:17:29.947963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61956 > 2) by scale factor 0.763487\nI1207 19:17:34.100845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19086 > 2) by scale factor 0.912885\nI1207 19:17:38.253892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70871 > 2) by scale factor 0.539271\nI1207 19:17:42.406991  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59884 > 2) by scale factor 0.769575\nI1207 19:17:46.560919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32736 > 2) by scale factor 0.601078\nI1207 19:17:50.713697  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5261 > 2) by scale factor 0.567198\nI1207 19:17:54.866077  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6322 > 2) by scale factor 0.75982\nI1207 19:17:59.018899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96792 > 2) by scale factor 0.673872\nI1207 19:18:03.172005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2039 > 2) by scale factor 0.907481\nI1207 19:18:07.325172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3622 > 2) by scale factor 0.846667\nI1207 19:18:11.478148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42544 > 2) by scale factor 0.451933\nI1207 19:18:15.631229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20262 > 2) by scale factor 0.624489\nI1207 19:18:19.783828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08779 > 2) by scale factor 0.957949\nI1207 19:18:23.937268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06512 > 2) by scale factor 0.49199\nI1207 19:18:28.089861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5195 > 2) by scale factor 0.793809\nI1207 19:18:32.242194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81553 > 2) by scale factor 0.710346\nI1207 19:18:36.394742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28258 > 2) by scale factor 0.609277\nI1207 19:18:40.547708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06753 > 2) by scale factor 0.967338\nI1207 19:18:44.701375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16267 > 2) by scale factor 0.632378\nI1207 19:18:48.854511  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28039 > 2) by scale factor 0.877044\nI1207 19:19:01.308408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27848 > 2) by scale factor 0.610039\nI1207 19:19:05.461310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98894 > 2) by scale factor 0.669134\nI1207 19:19:09.613757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46029 > 2) by scale factor 0.812912\nI1207 19:19:13.768944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36601 > 2) by scale factor 0.845303\nI1207 19:19:17.922698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49107 > 2) by scale factor 0.802869\nI1207 19:19:22.075564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06671 > 2) by scale factor 0.652164\nI1207 19:19:26.228111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41864 > 2) by scale factor 0.826911\nI1207 19:19:30.382087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.368 > 2) by scale factor 0.844594\nI1207 19:19:34.534984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91576 > 2) by scale factor 0.685928\nI1207 19:19:38.689067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41212 > 2) by scale factor 0.586146\nI1207 19:19:42.843036  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75721 > 2) by scale factor 0.72537\nI1207 19:19:46.997040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39456 > 2) by scale factor 0.589178\nI1207 19:19:51.150184  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48496 > 2) by scale factor 0.573894\nI1207 19:19:55.303092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06541 > 2) by scale factor 0.652442\nI1207 19:19:59.455374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27685 > 2) by scale factor 0.878405\nI1207 19:20:03.608448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51697 > 2) by scale factor 0.794605\nI1207 19:20:07.762473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43248 > 2) by scale factor 0.58267\nI1207 19:20:11.914901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06507 > 2) by scale factor 0.968491\nI1207 19:20:16.067587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33299 > 2) by scale factor 0.600062\nI1207 19:20:20.221252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06757 > 2) by scale factor 0.491694\nI1207 19:20:24.374356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79403 > 2) by scale factor 0.715811\nI1207 19:20:28.526464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20165 > 2) by scale factor 0.90841\nI1207 19:20:32.679241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43468 > 2) by scale factor 0.821462\nI1207 19:20:36.832407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84204 > 2) by scale factor 0.70372\nI1207 19:20:40.985987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93117 > 2) by scale factor 0.682321\nI1207 19:20:45.139014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47651 > 2) by scale factor 0.57529\nI1207 19:20:49.292146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13584 > 2) by scale factor 0.637788\nI1207 19:20:53.445906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7659 > 2) by scale factor 0.419648\nI1207 19:20:57.598217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71987 > 2) by scale factor 0.537653\nI1207 19:21:01.750319  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79827 > 2) by scale factor 0.416817\nI1207 19:21:05.902691  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92617 > 2) by scale factor 0.683487\nI1207 19:21:18.355717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7954 > 2) by scale factor 0.715461\nI1207 19:21:22.508232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16949 > 2) by scale factor 0.921874\nI1207 19:21:26.661065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35042 > 2) by scale factor 0.59694\nI1207 19:21:30.813863  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28929 > 2) by scale factor 0.873634\nI1207 19:21:34.966495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11818 > 2) by scale factor 0.944208\nI1207 19:21:39.120126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89466 > 2) by scale factor 0.690927\nI1207 19:21:43.272907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54153 > 2) by scale factor 0.786927\nI1207 19:21:47.425992  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11455 > 2) by scale factor 0.945828\nI1207 19:21:51.579907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81221 > 2) by scale factor 0.52463\nI1207 19:21:59.882141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2224 > 2) by scale factor 0.899927\nI1207 19:22:04.035800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42492 > 2) by scale factor 0.824768\nI1207 19:22:08.187399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97546 > 2) by scale factor 0.672164\nI1207 19:22:12.339942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78924 > 2) by scale factor 0.52781\nI1207 19:22:16.492660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9081 > 2) by scale factor 0.687735\nI1207 19:22:20.645062  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32568 > 2) by scale factor 0.859962\nI1207 19:22:24.799083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50509 > 2) by scale factor 0.798373\nI1207 19:22:28.951443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60651 > 2) by scale factor 0.767309\nI1207 19:22:33.104332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51022 > 2) by scale factor 0.796742\nI1207 19:22:37.258141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87039 > 2) by scale factor 0.69677\nI1207 19:22:41.411137  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44052 > 2) by scale factor 0.581307\nI1207 19:22:45.563254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00454 > 2) by scale factor 0.499434\nI1207 19:22:49.715706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21899 > 2) by scale factor 0.901311\nI1207 19:22:53.868430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35358 > 2) by scale factor 0.84977\nI1207 19:22:58.020495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5165 > 2) by scale factor 0.794754\nI1207 19:23:02.171849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52293 > 2) by scale factor 0.567709\nI1207 19:23:06.325395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04131 > 2) by scale factor 0.979765\nI1207 19:23:10.478533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23924 > 2) by scale factor 0.617428\nI1207 19:23:14.632190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96858 > 2) by scale factor 0.673723\nI1207 19:23:18.785024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19904 > 2) by scale factor 0.476299\nI1207 19:23:22.937121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81587 > 2) by scale factor 0.524127\nI1207 19:23:31.240044  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44647 > 2) by scale factor 0.817504\nI1207 19:23:39.542217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98063 > 2) by scale factor 0.670999\nI1207 19:23:43.695387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42233 > 2) by scale factor 0.584397\nI1207 19:23:47.847606  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08108 > 2) by scale factor 0.649124\nI1207 19:23:52.000735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19467 > 2) by scale factor 0.626043\nI1207 19:23:52.012508  1922 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1207 19:26:30.118150  1922 solver.cpp:404]     Test net output #0: accuracy = 0.187941\nI1207 19:26:30.118528  1922 solver.cpp:404]     Test net output #1: loss = 10.5481 (* 1 = 10.5481 loss)\nI1207 19:26:34.061945  1922 solver.cpp:228] Iteration 8600, loss = 10.8588\nI1207 19:26:34.061996  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 19:26:34.062013  1922 solver.cpp:244]     Train net output #1: loss = 10.8588 (* 1 = 10.8588 loss)\nI1207 19:26:34.263756  1922 sgd_solver.cpp:166] Iteration 8600, lr = 1.29\nI1207 19:26:34.273849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98584 > 2) by scale factor 0.669829\nI1207 19:26:38.428944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12639 > 2) by scale factor 0.639715\nI1207 19:26:42.584988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90013 > 2) by scale factor 0.512804\nI1207 19:26:46.740135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58445 > 2) by scale factor 0.557966\nI1207 19:26:50.894877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24109 > 2) by scale factor 0.892422\nI1207 19:26:55.048714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93877 > 2) by scale factor 0.680556\nI1207 19:26:59.203069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64356 > 2) by scale factor 0.548913\nI1207 19:27:03.357321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73045 > 2) by scale factor 0.536128\nI1207 19:27:07.512136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35181 > 2) by scale factor 0.850408\nI1207 19:27:11.665392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64422 > 2) by scale factor 0.756366\nI1207 19:27:15.819326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5659 > 2) by scale factor 0.779453\nI1207 19:27:24.125159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0262 > 2) by scale factor 0.660895\nI1207 19:27:28.279142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72047 > 2) by scale factor 0.735167\nI1207 19:27:32.433233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61774 > 2) by scale factor 0.764018\nI1207 19:27:36.586894  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84801 > 2) by scale factor 0.519749\nI1207 19:27:40.742089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64164 > 2) by scale factor 0.757104\nI1207 19:27:44.896891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00827 > 2) by scale factor 0.995884\nI1207 19:27:49.051584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79495 > 2) by scale factor 0.715576\nI1207 19:27:53.205678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02012 > 2) by scale factor 0.662225\nI1207 19:27:57.360623  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70794 > 2) by scale factor 0.738568\nI1207 19:28:01.516466  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21489 > 2) by scale factor 0.622105\nI1207 19:28:05.671732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71483 > 2) by scale factor 0.736695\nI1207 19:28:09.826367  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92067 > 2) by scale factor 0.684776\nI1207 19:28:13.981809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02974 > 2) by scale factor 0.660122\nI1207 19:28:18.136226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42162 > 2) by scale factor 0.825894\nI1207 19:28:22.290269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01067 > 2) by scale factor 0.664304\nI1207 19:28:26.444615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77954 > 2) by scale factor 0.418451\nI1207 19:28:30.598356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88549 > 2) by scale factor 0.693124\nI1207 19:28:34.752949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15178 > 2) by scale factor 0.634563\nI1207 19:28:38.906877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32847 > 2) by scale factor 0.858932\nI1207 19:28:43.061424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45225 > 2) by scale factor 0.579332\nI1207 19:28:47.216508  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25078 > 2) by scale factor 0.888579\nI1207 19:28:51.370234  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7417 > 2) by scale factor 0.534517\nI1207 19:28:55.524713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00402 > 2) by scale factor 0.665774\nI1207 19:29:07.984426  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55697 > 2) by scale factor 0.562277\nI1207 19:29:12.138571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59792 > 2) by scale factor 0.555876\nI1207 19:29:16.293081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2779 > 2) by scale factor 0.610147\nI1207 19:29:20.447288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20395 > 2) by scale factor 0.62423\nI1207 19:29:24.602607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22825 > 2) by scale factor 0.897567\nI1207 19:29:32.908901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84109 > 2) by scale factor 0.520686\nI1207 19:29:37.063737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07117 > 2) by scale factor 0.651218\nI1207 19:29:41.217917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23893 > 2) by scale factor 0.617488\nI1207 19:29:45.371681  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86226 > 2) by scale factor 0.698748\nI1207 19:29:49.526330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85226 > 2) by scale factor 0.701198\nI1207 19:29:53.680675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70778 > 2) by scale factor 0.424829\nI1207 19:29:57.834537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90084 > 2) by scale factor 0.689456\nI1207 19:30:01.988780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03316 > 2) by scale factor 0.98369\nI1207 19:30:06.142936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09083 > 2) by scale factor 0.956558\nI1207 19:30:10.297747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13469 > 2) by scale factor 0.936904\nI1207 19:30:14.452241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48319 > 2) by scale factor 0.805415\nI1207 19:30:18.606932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35788 > 2) by scale factor 0.84822\nI1207 19:30:26.912966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43175 > 2) by scale factor 0.582793\nI1207 19:30:31.067430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6938 > 2) by scale factor 0.541448\nI1207 19:30:35.221899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50918 > 2) by scale factor 0.569935\nI1207 19:30:39.376649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89449 > 2) by scale factor 0.690968\nI1207 19:30:43.532279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53284 > 2) by scale factor 0.789628\nI1207 19:30:47.686735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20631 > 2) by scale factor 0.623769\nI1207 19:30:51.840344  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42071 > 2) by scale factor 0.826204\nI1207 19:30:55.994961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18474 > 2) by scale factor 0.477927\nI1207 19:31:00.149344  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26304 > 2) by scale factor 0.612926\nI1207 19:31:04.304340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15662 > 2) by scale factor 0.927377\nI1207 19:31:08.458473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89792 > 2) by scale factor 0.69015\nI1207 19:31:16.763804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95613 > 2) by scale factor 0.67656\nI1207 19:31:20.918696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66849 > 2) by scale factor 0.749487\nI1207 19:31:25.073333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44898 > 2) by scale factor 0.816666\nI1207 19:31:33.379892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17642 > 2) by scale factor 0.478879\nI1207 19:31:37.534093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21509 > 2) by scale factor 0.622066\nI1207 19:31:41.688133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58552 > 2) by scale factor 0.773538\nI1207 19:31:45.842856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56466 > 2) by scale factor 0.77983\nI1207 19:31:49.996778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12749 > 2) by scale factor 0.940077\nI1207 19:31:54.149670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1989 > 2) by scale factor 0.625215\nI1207 19:31:58.303869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08689 > 2) by scale factor 0.647902\nI1207 19:32:02.457583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41871 > 2) by scale factor 0.826886\nI1207 19:32:06.610587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60668 > 2) by scale factor 0.767259\nI1207 19:32:10.764191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4737 > 2) by scale factor 0.808507\nI1207 19:32:14.918108  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33392 > 2) by scale factor 0.856926\nI1207 19:32:19.072803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27891 > 2) by scale factor 0.609958\nI1207 19:32:23.227458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42057 > 2) by scale factor 0.826251\nI1207 19:32:27.380550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00931 > 2) by scale factor 0.995365\nI1207 19:32:31.535636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64196 > 2) by scale factor 0.549155\nI1207 19:32:35.690352  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09369 > 2) by scale factor 0.95525\nI1207 19:32:39.844669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10234 > 2) by scale factor 0.644674\nI1207 19:32:43.999989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06285 > 2) by scale factor 0.652988\nI1207 19:32:48.153797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5545 > 2) by scale factor 0.782933\nI1207 19:32:52.307623  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19527 > 2) by scale factor 0.625925\nI1207 19:32:56.461637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22756 > 2) by scale factor 0.897843\nI1207 19:33:00.616405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73272 > 2) by scale factor 0.535803\nI1207 19:33:04.770913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07012 > 2) by scale factor 0.491386\nI1207 19:33:08.924110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11667 > 2) by scale factor 0.94488\nI1207 19:33:13.077702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12685 > 2) by scale factor 0.639621\nI1207 19:33:17.232398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0658 > 2) by scale factor 0.968146\nI1207 19:33:21.386672  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22455 > 2) by scale factor 0.899057\nI1207 19:33:25.541513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15597 > 2) by scale factor 0.633719\nI1207 19:33:25.553426  1922 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1207 19:36:03.785162  1922 solver.cpp:404]     Test net output #0: accuracy = 0.179824\nI1207 19:36:03.785567  1922 solver.cpp:404]     Test net output #1: loss = 10.534 (* 1 = 10.534 loss)\nI1207 19:36:07.730376  1922 solver.cpp:228] Iteration 8700, loss = 10.0991\nI1207 19:36:07.730428  1922 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 19:36:07.730445  1922 solver.cpp:244]     Train net output #1: loss = 10.0991 (* 1 = 10.0991 loss)\nI1207 19:36:07.929934  1922 sgd_solver.cpp:166] Iteration 8700, lr = 1.305\nI1207 19:36:07.939971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10743 > 2) by scale factor 0.643619\nI1207 19:36:12.095510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23253 > 2) by scale factor 0.895845\nI1207 19:36:16.251956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97713 > 2) by scale factor 0.671787\nI1207 19:36:24.562518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25717 > 2) by scale factor 0.61403\nI1207 19:36:28.719372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6818 > 2) by scale factor 0.543213\nI1207 19:36:32.875721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37106 > 2) by scale factor 0.593285\nI1207 19:36:37.031931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56524 > 2) by scale factor 0.779654\nI1207 19:36:41.189162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91675 > 2) by scale factor 0.510627\nI1207 19:36:45.347012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70511 > 2) by scale factor 0.73934\nI1207 19:36:49.503357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65958 > 2) by scale factor 0.752\nI1207 19:36:53.658802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04405 > 2) by scale factor 0.65702\nI1207 19:36:57.813612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38994 > 2) by scale factor 0.589981\nI1207 19:37:01.969800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47054 > 2) by scale factor 0.57628\nI1207 19:37:06.125104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79878 > 2) by scale factor 0.526484\nI1207 19:37:10.280525  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61311 > 2) by scale factor 0.55354\nI1207 19:37:14.436703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0407 > 2) by scale factor 0.657743\nI1207 19:37:18.593230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20777 > 2) by scale factor 0.623486\nI1207 19:37:22.748472  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42672 > 2) by scale factor 0.583649\nI1207 19:37:26.904603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74128 > 2) by scale factor 0.534577\nI1207 19:37:31.061183  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66631 > 2) by scale factor 0.7501\nI1207 19:37:35.217232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37338 > 2) by scale factor 0.84268\nI1207 19:37:39.372920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04993 > 2) by scale factor 0.655753\nI1207 19:37:43.527063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33507 > 2) by scale factor 0.599688\nI1207 19:37:47.683198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26805 > 2) by scale factor 0.611986\nI1207 19:37:51.839054  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17056 > 2) by scale factor 0.630803\nI1207 19:37:55.994856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39984 > 2) by scale factor 0.588263\nI1207 19:38:00.150195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66604 > 2) by scale factor 0.545548\nI1207 19:38:04.305636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8799 > 2) by scale factor 0.515477\nI1207 19:38:08.460506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0227 > 2) by scale factor 0.497178\nI1207 19:38:12.616149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90837 > 2) by scale factor 0.68767\nI1207 19:38:16.772155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97739 > 2) by scale factor 0.671728\nI1207 19:38:20.928306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.338 > 2) by scale factor 0.855433\nI1207 19:38:25.083474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9479 > 2) by scale factor 0.678449\nI1207 19:38:29.239295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45738 > 2) by scale factor 0.813874\nI1207 19:38:33.394529  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26532 > 2) by scale factor 0.882876\nI1207 19:38:37.550019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4392 > 2) by scale factor 0.819942\nI1207 19:38:41.705657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48582 > 2) by scale factor 0.804563\nI1207 19:38:45.861203  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15518 > 2) by scale factor 0.481327\nI1207 19:38:50.016800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98416 > 2) by scale factor 0.670206\nI1207 19:38:54.173072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0355 > 2) by scale factor 0.98256\nI1207 19:38:58.328982  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3134 > 2) by scale factor 0.864528\nI1207 19:39:02.484802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3867 > 2) by scale factor 0.590545\nI1207 19:39:06.639619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11393 > 2) by scale factor 0.642276\nI1207 19:39:10.794371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51989 > 2) by scale factor 0.793686\nI1207 19:39:14.949585  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19053 > 2) by scale factor 0.913021\nI1207 19:39:19.104768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43997 > 2) by scale factor 0.5814\nI1207 19:39:23.262002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18804 > 2) by scale factor 0.91406\nI1207 19:39:27.418339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46836 > 2) by scale factor 0.576642\nI1207 19:39:31.574043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25695 > 2) by scale factor 0.886151\nI1207 19:39:35.729957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72347 > 2) by scale factor 0.734358\nI1207 19:39:39.885349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55245 > 2) by scale factor 0.439324\nI1207 19:39:44.041754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5561 > 2) by scale factor 0.562414\nI1207 19:39:48.197481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22158 > 2) by scale factor 0.900258\nI1207 19:39:52.352483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18148 > 2) by scale factor 0.628639\nI1207 19:39:56.508604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69514 > 2) by scale factor 0.742075\nI1207 19:40:00.664247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49852 > 2) by scale factor 0.800473\nI1207 19:40:04.820255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89159 > 2) by scale factor 0.691662\nI1207 19:40:08.975687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32218 > 2) by scale factor 0.602014\nI1207 19:40:13.130326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35597 > 2) by scale factor 0.848908\nI1207 19:40:21.439321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29821 > 2) by scale factor 0.606389\nI1207 19:40:25.594641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98514 > 2) by scale factor 0.669985\nI1207 19:40:29.749819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23104 > 2) by scale factor 0.472697\nI1207 19:40:33.905761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25571 > 2) by scale factor 0.614305\nI1207 19:40:38.062898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47159 > 2) by scale factor 0.576105\nI1207 19:40:42.217558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79211 > 2) by scale factor 0.716305\nI1207 19:40:46.372923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99058 > 2) by scale factor 0.668766\nI1207 19:40:50.528532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37611 > 2) by scale factor 0.841712\nI1207 19:40:54.684079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16535 > 2) by scale factor 0.631842\nI1207 19:40:58.839846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2065 > 2) by scale factor 0.623734\nI1207 19:41:02.994889  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54481 > 2) by scale factor 0.564205\nI1207 19:41:07.150338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0918 > 2) by scale factor 0.646872\nI1207 19:41:15.460341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20699 > 2) by scale factor 0.623638\nI1207 19:41:19.615725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72029 > 2) by scale factor 0.735216\nI1207 19:41:23.771898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87292 > 2) by scale factor 0.696155\nI1207 19:41:27.927381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48352 > 2) by scale factor 0.805308\nI1207 19:41:32.082306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50827 > 2) by scale factor 0.443629\nI1207 19:41:36.237138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74774 > 2) by scale factor 0.727872\nI1207 19:41:40.393018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52452 > 2) by scale factor 0.567453\nI1207 19:41:44.549522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2049 > 2) by scale factor 0.624044\nI1207 19:41:48.704427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65505 > 2) by scale factor 0.753282\nI1207 19:41:52.859835  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70125 > 2) by scale factor 0.540358\nI1207 19:41:57.016427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61578 > 2) by scale factor 0.764589\nI1207 19:42:01.171934  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71803 > 2) by scale factor 0.735828\nI1207 19:42:05.327226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0379 > 2) by scale factor 0.981402\nI1207 19:42:09.482703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32766 > 2) by scale factor 0.601022\nI1207 19:42:13.638222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45828 > 2) by scale factor 0.813576\nI1207 19:42:17.794288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93937 > 2) by scale factor 0.507696\nI1207 19:42:21.949295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64024 > 2) by scale factor 0.757506\nI1207 19:42:26.104598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38098 > 2) by scale factor 0.591545\nI1207 19:42:30.259830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35413 > 2) by scale factor 0.84957\nI1207 19:42:34.414222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47312 > 2) by scale factor 0.808695\nI1207 19:42:38.569437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19134 > 2) by scale factor 0.912685\nI1207 19:42:42.724534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65404 > 2) by scale factor 0.753568\nI1207 19:42:46.879806  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57534 > 2) by scale factor 0.776595\nI1207 19:42:51.035478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8215 > 2) by scale factor 0.708844\nI1207 19:42:55.190445  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41926 > 2) by scale factor 0.584923\nI1207 19:42:59.345110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6676 > 2) by scale factor 0.749739\nI1207 19:42:59.357048  1922 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1207 19:45:37.618196  1922 solver.cpp:404]     Test net output #0: accuracy = 0.148059\nI1207 19:45:37.618564  1922 solver.cpp:404]     Test net output #1: loss = 7.76542 (* 1 = 7.76542 loss)\nI1207 19:45:41.560659  1922 solver.cpp:228] Iteration 8800, loss = 7.87662\nI1207 19:45:41.560703  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 19:45:41.560720  1922 solver.cpp:244]     Train net output #1: loss = 7.87662 (* 1 = 7.87662 loss)\nI1207 19:45:41.762245  1922 sgd_solver.cpp:166] Iteration 8800, lr = 1.32\nI1207 19:45:41.772313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71835 > 2) by scale factor 0.537873\nI1207 19:45:45.926270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12325 > 2) by scale factor 0.640359\nI1207 19:45:50.080386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13653 > 2) by scale factor 0.637647\nI1207 19:45:54.233676  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97816 > 2) by scale factor 0.671555\nI1207 19:45:58.386906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12015 > 2) by scale factor 0.640995\nI1207 19:46:02.540845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69461 > 2) by scale factor 0.54133\nI1207 19:46:06.695072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91229 > 2) by scale factor 0.686746\nI1207 19:46:10.849414  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02433 > 2) by scale factor 0.661304\nI1207 19:46:15.002861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42352 > 2) by scale factor 0.584194\nI1207 19:46:19.157430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64906 > 2) by scale factor 0.754985\nI1207 19:46:23.311887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01829 > 2) by scale factor 0.662628\nI1207 19:46:27.466169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01387 > 2) by scale factor 0.498272\nI1207 19:46:31.619511  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4399 > 2) by scale factor 0.450461\nI1207 19:46:35.772846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90637 > 2) by scale factor 0.688143\nI1207 19:46:39.926834  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81779 > 2) by scale factor 0.709777\nI1207 19:46:44.079627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51355 > 2) by scale factor 0.569224\nI1207 19:46:48.234057  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63433 > 2) by scale factor 0.431562\nI1207 19:46:52.386816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32702 > 2) by scale factor 0.462212\nI1207 19:46:56.539006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17141 > 2) by scale factor 0.630635\nI1207 19:47:00.692915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48231 > 2) by scale factor 0.805701\nI1207 19:47:08.999697  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74718 > 2) by scale factor 0.533734\nI1207 19:47:13.153193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64376 > 2) by scale factor 0.756498\nI1207 19:47:17.306555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09088 > 2) by scale factor 0.647064\nI1207 19:47:21.460610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50244 > 2) by scale factor 0.799219\nI1207 19:47:25.613438  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4411 > 2) by scale factor 0.819301\nI1207 19:47:29.766041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33497 > 2) by scale factor 0.856543\nI1207 19:47:33.919306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11272 > 2) by scale factor 0.642526\nI1207 19:47:38.073041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.461 > 2) by scale factor 0.577868\nI1207 19:47:42.225442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04553 > 2) by scale factor 0.977743\nI1207 19:47:58.833298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45397 > 2) by scale factor 0.579044\nI1207 19:48:07.138870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26236 > 2) by scale factor 0.884032\nI1207 19:48:11.292699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3899 > 2) by scale factor 0.836857\nI1207 19:48:15.446367  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09934 > 2) by scale factor 0.952682\nI1207 19:48:19.600111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03091 > 2) by scale factor 0.659868\nI1207 19:48:23.753412  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08493 > 2) by scale factor 0.648313\nI1207 19:48:27.907083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42331 > 2) by scale factor 0.825317\nI1207 19:48:32.060555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39693 > 2) by scale factor 0.834401\nI1207 19:48:36.213564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24337 > 2) by scale factor 0.616643\nI1207 19:48:40.366891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7866 > 2) by scale factor 0.528179\nI1207 19:48:44.520390  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19454 > 2) by scale factor 0.47681\nI1207 19:48:48.673384  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09752 > 2) by scale factor 0.645678\nI1207 19:48:52.826428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9593 > 2) by scale factor 0.675836\nI1207 19:48:56.980453  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19118 > 2) by scale factor 0.477193\nI1207 19:49:01.133925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59914 > 2) by scale factor 0.769484\nI1207 19:49:05.287449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79221 > 2) by scale factor 0.716278\nI1207 19:49:13.594322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42867 > 2) by scale factor 0.583317\nI1207 19:49:17.747864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34737 > 2) by scale factor 0.852018\nI1207 19:49:21.900555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50821 > 2) by scale factor 0.443635\nI1207 19:49:26.053947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82775 > 2) by scale factor 0.707276\nI1207 19:49:30.207079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45162 > 2) by scale factor 0.579437\nI1207 19:49:34.360338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79318 > 2) by scale factor 0.71603\nI1207 19:49:38.514144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4415 > 2) by scale factor 0.81917\nI1207 19:49:42.668895  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4161 > 2) by scale factor 0.827782\nI1207 19:49:46.821014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12143 > 2) by scale factor 0.640732\nI1207 19:49:50.973287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41996 > 2) by scale factor 0.584803\nI1207 19:49:55.126135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44799 > 2) by scale factor 0.449641\nI1207 19:49:59.279970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16904 > 2) by scale factor 0.479727\nI1207 19:50:03.434317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74186 > 2) by scale factor 0.729433\nI1207 19:50:07.588966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71871 > 2) by scale factor 0.735642\nI1207 19:50:11.743736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25006 > 2) by scale factor 0.888865\nI1207 19:50:20.049451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2265 > 2) by scale factor 0.619867\nI1207 19:50:24.202061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60105 > 2) by scale factor 0.76892\nI1207 19:50:28.355336  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61313 > 2) by scale factor 0.765364\nI1207 19:50:32.508960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47825 > 2) by scale factor 0.807022\nI1207 19:50:36.662689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81463 > 2) by scale factor 0.710572\nI1207 19:50:40.815778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53431 > 2) by scale factor 0.565882\nI1207 19:50:44.969458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50056 > 2) by scale factor 0.571338\nI1207 19:50:49.123877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40367 > 2) by scale factor 0.83206\nI1207 19:50:53.277565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19752 > 2) by scale factor 0.910115\nI1207 19:51:09.884479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95995 > 2) by scale factor 0.675688\nI1207 19:51:14.038235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25263 > 2) by scale factor 0.88785\nI1207 19:51:18.191917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27345 > 2) by scale factor 0.879721\nI1207 19:51:22.346498  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28943 > 2) by scale factor 0.873579\nI1207 19:51:26.499778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19101 > 2) by scale factor 0.912821\nI1207 19:51:30.654695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68554 > 2) by scale factor 0.744728\nI1207 19:51:34.808179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60745 > 2) by scale factor 0.767033\nI1207 19:51:38.960829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91732 > 2) by scale factor 0.68556\nI1207 19:51:43.114814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01576 > 2) by scale factor 0.663183\nI1207 19:51:47.268549  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75045 > 2) by scale factor 0.727154\nI1207 19:51:51.423888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14792 > 2) by scale factor 0.635341\nI1207 19:51:55.578613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22219 > 2) by scale factor 0.620696\nI1207 19:51:59.731822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35055 > 2) by scale factor 0.596918\nI1207 19:52:03.886142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90264 > 2) by scale factor 0.689028\nI1207 19:52:08.040568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85197 > 2) by scale factor 0.519215\nI1207 19:52:12.195085  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46902 > 2) by scale factor 0.810038\nI1207 19:52:16.348585  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02714 > 2) by scale factor 0.986613\nI1207 19:52:20.501161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46273 > 2) by scale factor 0.577578\nI1207 19:52:24.655531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85238 > 2) by scale factor 0.51916\nI1207 19:52:28.809559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34187 > 2) by scale factor 0.854018\nI1207 19:52:32.962355  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20425 > 2) by scale factor 0.907339\nI1207 19:52:32.974242  1922 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1207 19:55:11.240933  1922 solver.cpp:404]     Test net output #0: accuracy = 0.209235\nI1207 19:55:11.241293  1922 solver.cpp:404]     Test net output #1: loss = 7.8074 (* 1 = 7.8074 loss)\nI1207 19:55:15.183989  1922 solver.cpp:228] Iteration 8900, loss = 6.59178\nI1207 19:55:15.184034  1922 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1207 19:55:15.184051  1922 solver.cpp:244]     Train net output #1: loss = 6.59178 (* 1 = 6.59178 loss)\nI1207 19:55:15.380887  1922 sgd_solver.cpp:166] Iteration 8900, lr = 1.335\nI1207 19:55:15.390974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63421 > 2) by scale factor 0.75924\nI1207 19:55:19.544574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50044 > 2) by scale factor 0.799861\nI1207 19:55:23.698038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72694 > 2) by scale factor 0.733424\nI1207 19:55:27.851207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40819 > 2) by scale factor 0.586823\nI1207 19:55:32.005009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27713 > 2) by scale factor 0.467603\nI1207 19:55:36.158190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7412 > 2) by scale factor 0.729608\nI1207 19:55:40.311830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78105 > 2) by scale factor 0.719153\nI1207 19:55:44.464576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3321 > 2) by scale factor 0.857598\nI1207 19:55:48.618165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07864 > 2) by scale factor 0.962168\nI1207 19:55:52.771265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34492 > 2) by scale factor 0.852908\nI1207 19:55:56.925246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31152 > 2) by scale factor 0.865232\nI1207 19:56:01.078845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59271 > 2) by scale factor 0.556683\nI1207 19:56:05.232406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60015 > 2) by scale factor 0.769186\nI1207 19:56:09.385862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28233 > 2) by scale factor 0.876296\nI1207 19:56:13.539700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01759 > 2) by scale factor 0.662781\nI1207 19:56:17.692775  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92565 > 2) by scale factor 0.50947\nI1207 19:56:21.847265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74594 > 2) by scale factor 0.533912\nI1207 19:56:30.151901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64135 > 2) by scale factor 0.757188\nI1207 19:56:34.305119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12134 > 2) by scale factor 0.942801\nI1207 19:56:38.458703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57355 > 2) by scale factor 0.777135\nI1207 19:56:42.612403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3444 > 2) by scale factor 0.853098\nI1207 19:56:46.764756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89467 > 2) by scale factor 0.513522\nI1207 19:56:50.918068  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0258 > 2) by scale factor 0.660982\nI1207 19:56:55.072909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23909 > 2) by scale factor 0.471799\nI1207 19:56:59.227893  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49676 > 2) by scale factor 0.801037\nI1207 19:57:03.381052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93584 > 2) by scale factor 0.681237\nI1207 19:57:07.534723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44775 > 2) by scale factor 0.817076\nI1207 19:57:11.689236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53208 > 2) by scale factor 0.789864\nI1207 19:57:15.842228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83505 > 2) by scale factor 0.705455\nI1207 19:57:24.147289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9736 > 2) by scale factor 0.672585\nI1207 19:57:28.300544  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81815 > 2) by scale factor 0.709686\nI1207 19:57:32.454078  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41243 > 2) by scale factor 0.586093\nI1207 19:57:36.607730  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10178 > 2) by scale factor 0.644791\nI1207 19:57:40.760432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4753 > 2) by scale factor 0.57549\nI1207 19:57:44.913969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82893 > 2) by scale factor 0.706981\nI1207 19:57:49.068158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8902 > 2) by scale factor 0.691994\nI1207 19:57:53.220600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22107 > 2) by scale factor 0.900466\nI1207 19:57:57.372916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73564 > 2) by scale factor 0.731089\nI1207 19:58:01.526051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73043 > 2) by scale factor 0.536131\nI1207 19:58:09.831858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82118 > 2) by scale factor 0.523398\nI1207 19:58:13.984908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3303 > 2) by scale factor 0.461862\nI1207 19:58:18.137987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44949 > 2) by scale factor 0.579795\nI1207 19:58:22.291537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28546 > 2) by scale factor 0.466695\nI1207 19:58:26.444187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17002 > 2) by scale factor 0.921649\nI1207 19:58:30.597417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93521 > 2) by scale factor 0.681383\nI1207 19:58:34.751060  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06922 > 2) by scale factor 0.651632\nI1207 19:58:38.904116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86953 > 2) by scale factor 0.696978\nI1207 19:58:43.058153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0725 > 2) by scale factor 0.965017\nI1207 19:58:47.212594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34967 > 2) by scale factor 0.851183\nI1207 19:58:51.365607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63669 > 2) by scale factor 0.549951\nI1207 19:58:55.520197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83139 > 2) by scale factor 0.706366\nI1207 19:59:03.825702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30627 > 2) by scale factor 0.867201\nI1207 19:59:07.979188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9032 > 2) by scale factor 0.688896\nI1207 19:59:12.131790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35937 > 2) by scale factor 0.847684\nI1207 19:59:16.284979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41119 > 2) by scale factor 0.829466\nI1207 19:59:20.438069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77153 > 2) by scale factor 0.721623\nI1207 19:59:24.590946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23768 > 2) by scale factor 0.893784\nI1207 19:59:28.744968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62985 > 2) by scale factor 0.550987\nI1207 19:59:32.898277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92065 > 2) by scale factor 0.684778\nI1207 19:59:37.051733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99856 > 2) by scale factor 0.50018\nI1207 19:59:41.205878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17466 > 2) by scale factor 0.919686\nI1207 19:59:45.358947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59951 > 2) by scale factor 0.769375\nI1207 19:59:53.664512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49726 > 2) by scale factor 0.571876\nI1207 20:00:01.970912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55011 > 2) by scale factor 0.784281\nI1207 20:00:06.123742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14571 > 2) by scale factor 0.932093\nI1207 20:00:10.277480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60873 > 2) by scale factor 0.766656\nI1207 20:00:14.431421  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91387 > 2) by scale factor 0.686374\nI1207 20:00:18.585039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77103 > 2) by scale factor 0.721753\nI1207 20:00:22.738572  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79715 > 2) by scale factor 0.526711\nI1207 20:00:26.891569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01668 > 2) by scale factor 0.66298\nI1207 20:00:31.045830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96919 > 2) by scale factor 0.673585\nI1207 20:00:39.350327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36171 > 2) by scale factor 0.846845\nI1207 20:00:43.504106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18964 > 2) by scale factor 0.913391\nI1207 20:00:51.809631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42675 > 2) by scale factor 0.583644\nI1207 20:00:55.963555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59595 > 2) by scale factor 0.77043\nI1207 20:01:00.116278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33626 > 2) by scale factor 0.856069\nI1207 20:01:04.269254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26913 > 2) by scale factor 0.611784\nI1207 20:01:08.422262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00043 > 2) by scale factor 0.66657\nI1207 20:01:16.726519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30479 > 2) by scale factor 0.605182\nI1207 20:01:20.880043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68233 > 2) by scale factor 0.74562\nI1207 20:01:29.184633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1693 > 2) by scale factor 0.479697\nI1207 20:01:33.337463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4873 > 2) by scale factor 0.804084\nI1207 20:01:45.791270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60365 > 2) by scale factor 0.434438\nI1207 20:01:49.944316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79452 > 2) by scale factor 0.715686\nI1207 20:02:06.547724  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42645 > 2) by scale factor 0.824249\nI1207 20:02:06.559723  1922 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1207 20:04:44.477017  1922 solver.cpp:404]     Test net output #0: accuracy = 0.210059\nI1207 20:04:44.477407  1922 solver.cpp:404]     Test net output #1: loss = 8.40115 (* 1 = 8.40115 loss)\nI1207 20:04:48.418818  1922 solver.cpp:228] Iteration 9000, loss = 7.16143\nI1207 20:04:48.418867  1922 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 20:04:48.418887  1922 solver.cpp:244]     Train net output #1: loss = 7.16142 (* 1 = 7.16142 loss)\nI1207 20:04:48.619616  1922 sgd_solver.cpp:166] Iteration 9000, lr = 1.35\nI1207 20:04:48.629719  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67044 > 2) by scale factor 0.74894\nI1207 20:04:52.782584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33679 > 2) by scale factor 0.599378\nI1207 20:04:56.935761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00884 > 2) by scale factor 0.664707\nI1207 20:05:01.088567  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05324 > 2) by scale factor 0.655041\nI1207 20:05:05.241230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89985 > 2) by scale factor 0.68969\nI1207 20:05:09.394660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01369 > 2) by scale factor 0.663638\nI1207 20:05:13.547818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04248 > 2) by scale factor 0.657359\nI1207 20:05:17.701205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55297 > 2) by scale factor 0.7834\nI1207 20:05:21.854431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92076 > 2) by scale factor 0.510105\nI1207 20:05:26.007186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61617 > 2) by scale factor 0.553071\nI1207 20:05:30.160760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63647 > 2) by scale factor 0.758589\nI1207 20:05:34.313729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91266 > 2) by scale factor 0.511161\nI1207 20:05:38.465044  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59516 > 2) by scale factor 0.556303\nI1207 20:05:42.616477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52809 > 2) by scale factor 0.566878\nI1207 20:05:46.768674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94475 > 2) by scale factor 0.507003\nI1207 20:05:50.921993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48071 > 2) by scale factor 0.806221\nI1207 20:05:55.074265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96052 > 2) by scale factor 0.504984\nI1207 20:05:59.227504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52799 > 2) by scale factor 0.791143\nI1207 20:06:03.380748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06517 > 2) by scale factor 0.652492\nI1207 20:06:07.533695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53222 > 2) by scale factor 0.789821\nI1207 20:06:11.685652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36467 > 2) by scale factor 0.845783\nI1207 20:06:15.838615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32646 > 2) by scale factor 0.462272\nI1207 20:06:19.990850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02532 > 2) by scale factor 0.496855\nI1207 20:06:24.142832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34804 > 2) by scale factor 0.851775\nI1207 20:06:28.296522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11446 > 2) by scale factor 0.945867\nI1207 20:06:32.450155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56743 > 2) by scale factor 0.560628\nI1207 20:06:36.603610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45986 > 2) by scale factor 0.578058\nI1207 20:06:40.756942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24864 > 2) by scale factor 0.615643\nI1207 20:06:44.909294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3389 > 2) by scale factor 0.460947\nI1207 20:06:49.061055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87045 > 2) by scale factor 0.696755\nI1207 20:06:53.213562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98985 > 2) by scale factor 0.668931\nI1207 20:06:57.365727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0196 > 2) by scale factor 0.662339\nI1207 20:07:01.519275  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89394 > 2) by scale factor 0.513619\nI1207 20:07:05.672484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3259 > 2) by scale factor 0.601342\nI1207 20:07:09.823912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17149 > 2) by scale factor 0.630619\nI1207 20:07:13.976075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51763 > 2) by scale factor 0.568565\nI1207 20:07:18.130193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44981 > 2) by scale factor 0.449457\nI1207 20:07:22.282280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80457 > 2) by scale factor 0.525684\nI1207 20:07:26.434670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86716 > 2) by scale factor 0.517175\nI1207 20:07:30.586975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88489 > 2) by scale factor 0.693267\nI1207 20:07:34.739447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00526 > 2) by scale factor 0.665499\nI1207 20:07:43.042752  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6724 > 2) by scale factor 0.748391\nI1207 20:07:47.195307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58019 > 2) by scale factor 0.775138\nI1207 20:07:51.348233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95721 > 2) by scale factor 0.676313\nI1207 20:07:55.499429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90433 > 2) by scale factor 0.512252\nI1207 20:07:59.650650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0836 > 2) by scale factor 0.489764\nI1207 20:08:03.802557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99145 > 2) by scale factor 0.668571\nI1207 20:08:12.105967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95171 > 2) by scale factor 0.677574\nI1207 20:08:16.257580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57468 > 2) by scale factor 0.776795\nI1207 20:08:20.410797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83753 > 2) by scale factor 0.704838\nI1207 20:08:24.563868  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29453 > 2) by scale factor 0.871639\nI1207 20:08:28.714391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39154 > 2) by scale factor 0.83628\nI1207 20:08:37.016984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10381 > 2) by scale factor 0.644369\nI1207 20:08:41.170128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02864 > 2) by scale factor 0.660362\nI1207 20:08:45.322083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91871 > 2) by scale factor 0.685234\nI1207 20:08:53.626128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34986 > 2) by scale factor 0.851116\nI1207 20:08:57.779378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35272 > 2) by scale factor 0.85008\nI1207 20:09:01.931769  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4367 > 2) by scale factor 0.581953\nI1207 20:09:06.083879  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22864 > 2) by scale factor 0.897409\nI1207 20:09:10.236282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54923 > 2) by scale factor 0.78455\nI1207 20:09:14.389216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22343 > 2) by scale factor 0.620458\nI1207 20:09:18.540971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36089 > 2) by scale factor 0.595081\nI1207 20:09:22.693351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79163 > 2) by scale factor 0.527478\nI1207 20:09:26.845865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71605 > 2) by scale factor 0.538206\nI1207 20:09:30.998913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89739 > 2) by scale factor 0.513164\nI1207 20:09:35.150095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58249 > 2) by scale factor 0.774448\nI1207 20:09:39.302628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64203 > 2) by scale factor 0.549144\nI1207 20:09:47.606151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10642 > 2) by scale factor 0.487043\nI1207 20:09:51.758728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90923 > 2) by scale factor 0.687468\nI1207 20:09:55.911644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3156 > 2) by scale factor 0.863709\nI1207 20:10:00.064971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92993 > 2) by scale factor 0.682611\nI1207 20:10:04.217815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47114 > 2) by scale factor 0.576179\nI1207 20:10:08.370005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2003 > 2) by scale factor 0.908966\nI1207 20:10:12.522104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54839 > 2) by scale factor 0.78481\nI1207 20:10:16.675163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91964 > 2) by scale factor 0.510251\nI1207 20:10:20.828392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23698 > 2) by scale factor 0.617861\nI1207 20:10:24.981135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36625 > 2) by scale factor 0.845221\nI1207 20:10:29.133877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21971 > 2) by scale factor 0.901019\nI1207 20:10:33.286671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98362 > 2) by scale factor 0.670326\nI1207 20:10:37.438907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50873 > 2) by scale factor 0.797216\nI1207 20:10:41.589848  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07461 > 2) by scale factor 0.65049\nI1207 20:10:45.742614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71038 > 2) by scale factor 0.737905\nI1207 20:10:49.895478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70926 > 2) by scale factor 0.539192\nI1207 20:10:54.047709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28808 > 2) by scale factor 0.466409\nI1207 20:10:58.201973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62812 > 2) by scale factor 0.55125\nI1207 20:11:02.354007  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68933 > 2) by scale factor 0.743678\nI1207 20:11:06.507146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04848 > 2) by scale factor 0.976332\nI1207 20:11:10.660713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49945 > 2) by scale factor 0.571518\nI1207 20:11:14.812880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75064 > 2) by scale factor 0.533242\nI1207 20:11:18.965165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86999 > 2) by scale factor 0.516797\nI1207 20:11:23.117496  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96009 > 2) by scale factor 0.675656\nI1207 20:11:27.270452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67636 > 2) by scale factor 0.747284\nI1207 20:11:31.423403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46457 > 2) by scale factor 0.811501\nI1207 20:11:35.575100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85648 > 2) by scale factor 0.700163\nI1207 20:11:39.727771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81863 > 2) by scale factor 0.709564\nI1207 20:11:39.739708  1922 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1207 20:14:17.633132  1922 solver.cpp:404]     Test net output #0: accuracy = 0.132882\nI1207 20:14:17.633483  1922 solver.cpp:404]     Test net output #1: loss = 13.837 (* 1 = 13.837 loss)\nI1207 20:14:21.575634  1922 solver.cpp:228] Iteration 9100, loss = 15.0691\nI1207 20:14:21.575683  1922 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 20:14:21.575701  1922 solver.cpp:244]     Train net output #1: loss = 15.0691 (* 1 = 15.0691 loss)\nI1207 20:14:21.778722  1922 sgd_solver.cpp:166] Iteration 9100, lr = 1.365\nI1207 20:14:21.788792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28895 > 2) by scale factor 0.466315\nI1207 20:14:25.943050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44252 > 2) by scale factor 0.450195\nI1207 20:14:30.096840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60297 > 2) by scale factor 0.768354\nI1207 20:14:38.402627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02229 > 2) by scale factor 0.661749\nI1207 20:14:46.708518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27522 > 2) by scale factor 0.879037\nI1207 20:14:50.861979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32487 > 2) by scale factor 0.860264\nI1207 20:14:55.014909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71478 > 2) by scale factor 0.736708\nI1207 20:14:59.167843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12227 > 2) by scale factor 0.64056\nI1207 20:15:03.322090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26156 > 2) by scale factor 0.613203\nI1207 20:15:07.476194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60221 > 2) by scale factor 0.768577\nI1207 20:15:11.629930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3032 > 2) by scale factor 0.46477\nI1207 20:15:15.782356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84848 > 2) by scale factor 0.702128\nI1207 20:15:19.935238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09338 > 2) by scale factor 0.955391\nI1207 20:15:28.240285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45195 > 2) by scale factor 0.579382\nI1207 20:15:32.394330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4896 > 2) by scale factor 0.803343\nI1207 20:15:36.547454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57948 > 2) by scale factor 0.775351\nI1207 20:15:40.701119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20999 > 2) by scale factor 0.623056\nI1207 20:15:44.854888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21938 > 2) by scale factor 0.901153\nI1207 20:15:49.007913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83753 > 2) by scale factor 0.704838\nI1207 20:15:53.161926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39393 > 2) by scale factor 0.455173\nI1207 20:15:57.315906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.105 > 2) by scale factor 0.644122\nI1207 20:16:01.469897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73432 > 2) by scale factor 0.535573\nI1207 20:16:05.623698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26409 > 2) by scale factor 0.883357\nI1207 20:16:09.776792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61549 > 2) by scale factor 0.764676\nI1207 20:16:13.930647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74395 > 2) by scale factor 0.728876\nI1207 20:16:18.084915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21825 > 2) by scale factor 0.901613\nI1207 20:16:22.238548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86264 > 2) by scale factor 0.698657\nI1207 20:16:26.393146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35672 > 2) by scale factor 0.59582\nI1207 20:16:30.545367  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76371 > 2) by scale factor 0.723665\nI1207 20:16:34.699373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9781 > 2) by scale factor 0.502752\nI1207 20:16:38.852911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19203 > 2) by scale factor 0.912396\nI1207 20:16:43.005287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36839 > 2) by scale factor 0.844455\nI1207 20:16:47.159317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11574 > 2) by scale factor 0.641902\nI1207 20:16:51.313351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40293 > 2) by scale factor 0.832319\nI1207 20:16:55.466629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93004 > 2) by scale factor 0.508901\nI1207 20:16:59.620098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77134 > 2) by scale factor 0.530316\nI1207 20:17:03.773501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67696 > 2) by scale factor 0.747115\nI1207 20:17:07.926728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23207 > 2) by scale factor 0.618798\nI1207 20:17:12.080250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78816 > 2) by scale factor 0.717318\nI1207 20:17:16.234400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12955 > 2) by scale factor 0.484314\nI1207 20:17:20.388257  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37155 > 2) by scale factor 0.593199\nI1207 20:17:28.692939  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80714 > 2) by scale factor 0.416048\nI1207 20:17:32.846349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60158 > 2) by scale factor 0.555312\nI1207 20:17:37.001519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88507 > 2) by scale factor 0.693224\nI1207 20:17:41.154814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5521 > 2) by scale factor 0.783669\nI1207 20:17:45.308043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.489 > 2) by scale factor 0.803534\nI1207 20:17:49.461333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97383 > 2) by scale factor 0.672534\nI1207 20:17:53.614608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5943 > 2) by scale factor 0.77092\nI1207 20:17:57.767558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54801 > 2) by scale factor 0.439753\nI1207 20:18:01.920644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33441 > 2) by scale factor 0.599807\nI1207 20:18:06.074249  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91976 > 2) by scale factor 0.684987\nI1207 20:18:10.228185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82517 > 2) by scale factor 0.522853\nI1207 20:18:14.381409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67605 > 2) by scale factor 0.544062\nI1207 20:18:18.533799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6105 > 2) by scale factor 0.766137\nI1207 20:18:22.686341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53731 > 2) by scale factor 0.788237\nI1207 20:18:26.840548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51806 > 2) by scale factor 0.794262\nI1207 20:18:30.993259  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21353 > 2) by scale factor 0.474661\nI1207 20:18:35.146292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27707 > 2) by scale factor 0.610301\nI1207 20:18:39.300037  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39158 > 2) by scale factor 0.836268\nI1207 20:18:43.453409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12997 > 2) by scale factor 0.938979\nI1207 20:18:47.607311  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29265 > 2) by scale factor 0.872352\nI1207 20:18:51.760748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55 > 2) by scale factor 0.784312\nI1207 20:18:55.913480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7056 > 2) by scale factor 0.539723\nI1207 20:19:00.067418  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66803 > 2) by scale factor 0.545252\nI1207 20:19:04.220659  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3741 > 2) by scale factor 0.592751\nI1207 20:19:08.374531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82335 > 2) by scale factor 0.708379\nI1207 20:19:12.527588  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24792 > 2) by scale factor 0.615779\nI1207 20:19:16.682039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83733 > 2) by scale factor 0.704887\nI1207 20:19:20.836014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21482 > 2) by scale factor 0.903007\nI1207 20:19:24.990309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76704 > 2) by scale factor 0.722793\nI1207 20:19:29.143708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76912 > 2) by scale factor 0.722251\nI1207 20:19:33.296916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09959 > 2) by scale factor 0.952567\nI1207 20:19:37.450316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59532 > 2) by scale factor 0.770619\nI1207 20:19:41.603430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85358 > 2) by scale factor 0.700874\nI1207 20:19:45.756757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05283 > 2) by scale factor 0.974263\nI1207 20:19:49.909576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6579 > 2) by scale factor 0.752473\nI1207 20:19:54.062155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16012 > 2) by scale factor 0.925873\nI1207 20:19:58.216356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16595 > 2) by scale factor 0.631722\nI1207 20:20:02.368813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62389 > 2) by scale factor 0.551892\nI1207 20:20:06.521488  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45134 > 2) by scale factor 0.81588\nI1207 20:20:10.674579  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97162 > 2) by scale factor 0.673034\nI1207 20:20:14.827195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88193 > 2) by scale factor 0.693978\nI1207 20:20:18.981998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99725 > 2) by scale factor 0.500345\nI1207 20:20:23.135535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62653 > 2) by scale factor 0.76146\nI1207 20:20:27.287891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8146 > 2) by scale factor 0.71058\nI1207 20:20:31.441231  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30294 > 2) by scale factor 0.60552\nI1207 20:20:35.594696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18322 > 2) by scale factor 0.916079\nI1207 20:20:39.747490  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0257 > 2) by scale factor 0.987315\nI1207 20:20:43.900534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15524 > 2) by scale factor 0.633867\nI1207 20:20:48.054456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91214 > 2) by scale factor 0.511229\nI1207 20:20:52.208209  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65469 > 2) by scale factor 0.753385\nI1207 20:20:56.362504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24462 > 2) by scale factor 0.89102\nI1207 20:21:00.515532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67957 > 2) by scale factor 0.543542\nI1207 20:21:08.819093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06529 > 2) by scale factor 0.652467\nI1207 20:21:12.972594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90314 > 2) by scale factor 0.688909\nI1207 20:21:12.984544  1922 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1207 20:23:50.892091  1922 solver.cpp:404]     Test net output #0: accuracy = 0.21353\nI1207 20:23:50.892443  1922 solver.cpp:404]     Test net output #1: loss = 6.9306 (* 1 = 6.9306 loss)\nI1207 20:23:54.835580  1922 solver.cpp:228] Iteration 9200, loss = 7.32568\nI1207 20:23:54.835633  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 20:23:54.835649  1922 solver.cpp:244]     Train net output #1: loss = 7.32568 (* 1 = 7.32568 loss)\nI1207 20:23:55.035418  1922 sgd_solver.cpp:166] Iteration 9200, lr = 1.38\nI1207 20:23:55.045558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9726 > 2) by scale factor 0.672812\nI1207 20:23:59.198024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60213 > 2) by scale factor 0.768602\nI1207 20:24:03.350694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03844 > 2) by scale factor 0.981143\nI1207 20:24:11.653542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36581 > 2) by scale factor 0.845375\nI1207 20:24:19.956305  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10795 > 2) by scale factor 0.94879\nI1207 20:24:36.559043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30755 > 2) by scale factor 0.86672\nI1207 20:24:40.712719  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16085 > 2) by scale factor 0.925563\nI1207 20:24:44.864456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16715 > 2) by scale factor 0.631483\nI1207 20:24:49.016764  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9804 > 2) by scale factor 0.67105\nI1207 20:24:53.169006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97739 > 2) by scale factor 0.671729\nI1207 20:24:57.320220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05998 > 2) by scale factor 0.6536\nI1207 20:25:01.473065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67531 > 2) by scale factor 0.747576\nI1207 20:25:05.624596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2022 > 2) by scale factor 0.624571\nI1207 20:25:13.928550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43629 > 2) by scale factor 0.820921\nI1207 20:25:26.380327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97976 > 2) by scale factor 0.671195\nI1207 20:25:30.532136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02206 > 2) by scale factor 0.98909\nI1207 20:25:34.683991  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85197 > 2) by scale factor 0.70127\nI1207 20:25:38.836946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47202 > 2) by scale factor 0.809055\nI1207 20:25:42.989439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18938 > 2) by scale factor 0.627081\nI1207 20:25:47.141170  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57349 > 2) by scale factor 0.559677\nI1207 20:25:51.293959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17473 > 2) by scale factor 0.919654\nI1207 20:25:55.445917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62838 > 2) by scale factor 0.760924\nI1207 20:25:59.598232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66074 > 2) by scale factor 0.75167\nI1207 20:26:03.750982  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85954 > 2) by scale factor 0.518196\nI1207 20:26:07.902070  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44875 > 2) by scale factor 0.57992\nI1207 20:26:12.055135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93353 > 2) by scale factor 0.681772\nI1207 20:26:16.208158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3458 > 2) by scale factor 0.852587\nI1207 20:26:20.361248  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51038 > 2) by scale factor 0.443422\nI1207 20:26:24.513659  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48724 > 2) by scale factor 0.57352\nI1207 20:26:28.666600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86373 > 2) by scale factor 0.69839\nI1207 20:26:32.819247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18261 > 2) by scale factor 0.478171\nI1207 20:26:36.970855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35477 > 2) by scale factor 0.596165\nI1207 20:26:41.123456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25106 > 2) by scale factor 0.615184\nI1207 20:26:45.275571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28179 > 2) by scale factor 0.876504\nI1207 20:26:49.428824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.892 > 2) by scale factor 0.691562\nI1207 20:26:53.580309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35297 > 2) by scale factor 0.596486\nI1207 20:26:57.732676  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69596 > 2) by scale factor 0.741851\nI1207 20:27:01.884532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80682 > 2) by scale factor 0.712551\nI1207 20:27:06.037041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62844 > 2) by scale factor 0.760908\nI1207 20:27:10.188822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99099 > 2) by scale factor 0.501129\nI1207 20:27:14.340647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15654 > 2) by scale factor 0.633605\nI1207 20:27:18.492740  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16067 > 2) by scale factor 0.925637\nI1207 20:27:22.646258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22942 > 2) by scale factor 0.897093\nI1207 20:27:26.798275  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53352 > 2) by scale factor 0.789416\nI1207 20:27:35.100406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81766 > 2) by scale factor 0.709809\nI1207 20:27:39.252444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67317 > 2) by scale factor 0.748175\nI1207 20:27:43.404958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92832 > 2) by scale factor 0.682985\nI1207 20:27:47.556310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34054 > 2) by scale factor 0.598706\nI1207 20:27:51.707908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29415 > 2) by scale factor 0.607137\nI1207 20:27:55.859797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38912 > 2) by scale factor 0.590123\nI1207 20:28:00.011823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01857 > 2) by scale factor 0.497689\nI1207 20:28:08.313557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76951 > 2) by scale factor 0.722149\nI1207 20:28:12.465802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52466 > 2) by scale factor 0.792186\nI1207 20:28:16.617774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55075 > 2) by scale factor 0.563261\nI1207 20:28:20.770520  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13578 > 2) by scale factor 0.483584\nI1207 20:28:24.923429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93723 > 2) by scale factor 0.680915\nI1207 20:28:29.076380  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35141 > 2) by scale factor 0.596764\nI1207 20:28:33.228624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52404 > 2) by scale factor 0.792381\nI1207 20:28:37.381345  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63864 > 2) by scale factor 0.757967\nI1207 20:28:41.533416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58046 > 2) by scale factor 0.775056\nI1207 20:28:45.685024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2961 > 2) by scale factor 0.871042\nI1207 20:28:49.836714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78847 > 2) by scale factor 0.717239\nI1207 20:28:53.988968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77674 > 2) by scale factor 0.720268\nI1207 20:28:58.141427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42268 > 2) by scale factor 0.584337\nI1207 20:29:02.293748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5696 > 2) by scale factor 0.77833\nI1207 20:29:06.446180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54027 > 2) by scale factor 0.564929\nI1207 20:29:10.598702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11275 > 2) by scale factor 0.642518\nI1207 20:29:14.751978  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89607 > 2) by scale factor 0.513338\nI1207 20:29:18.904110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19569 > 2) by scale factor 0.625844\nI1207 20:29:23.056313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91348 > 2) by scale factor 0.686464\nI1207 20:29:27.208533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35581 > 2) by scale factor 0.595981\nI1207 20:29:35.512305  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98539 > 2) by scale factor 0.66993\nI1207 20:29:39.665191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37722 > 2) by scale factor 0.841318\nI1207 20:29:43.816664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72371 > 2) by scale factor 0.734293\nI1207 20:29:47.969830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71549 > 2) by scale factor 0.736517\nI1207 20:29:52.121760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50342 > 2) by scale factor 0.444107\nI1207 20:29:56.274760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99461 > 2) by scale factor 0.667866\nI1207 20:30:00.426120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53969 > 2) by scale factor 0.565021\nI1207 20:30:04.578706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18529 > 2) by scale factor 0.91521\nI1207 20:30:08.730949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67901 > 2) by scale factor 0.746544\nI1207 20:30:12.882957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72316 > 2) by scale factor 0.73444\nI1207 20:30:17.034754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03885 > 2) by scale factor 0.980947\nI1207 20:30:21.185928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06913 > 2) by scale factor 0.65165\nI1207 20:30:25.338477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99336 > 2) by scale factor 0.668144\nI1207 20:30:33.640861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04648 > 2) by scale factor 0.656496\nI1207 20:30:37.792932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19881 > 2) by scale factor 0.909584\nI1207 20:30:41.945885  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85961 > 2) by scale factor 0.518187\nI1207 20:30:46.097707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23265 > 2) by scale factor 0.895797\nI1207 20:30:46.110049  1922 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1207 20:33:22.736917  1922 solver.cpp:404]     Test net output #0: accuracy = 0.206941\nI1207 20:33:22.737284  1922 solver.cpp:404]     Test net output #1: loss = 7.40027 (* 1 = 7.40027 loss)\nI1207 20:33:26.680881  1922 solver.cpp:228] Iteration 9300, loss = 6.88177\nI1207 20:33:26.680930  1922 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 20:33:26.680948  1922 solver.cpp:244]     Train net output #1: loss = 6.88177 (* 1 = 6.88177 loss)\nI1207 20:33:26.880353  1922 sgd_solver.cpp:166] Iteration 9300, lr = 1.395\nI1207 20:33:26.890456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27157 > 2) by scale factor 0.880449\nI1207 20:33:31.043624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19894 > 2) by scale factor 0.625206\nI1207 20:33:35.197123  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28778 > 2) by scale factor 0.608312\nI1207 20:33:39.350240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37145 > 2) by scale factor 0.843366\nI1207 20:33:43.503597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05141 > 2) by scale factor 0.655436\nI1207 20:33:47.656711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44376 > 2) by scale factor 0.81841\nI1207 20:33:55.961803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25964 > 2) by scale factor 0.885097\nI1207 20:34:00.115348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05304 > 2) by scale factor 0.974165\nI1207 20:34:04.268628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50163 > 2) by scale factor 0.799479\nI1207 20:34:12.572566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4505 > 2) by scale factor 0.816158\nI1207 20:34:16.725720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2215 > 2) by scale factor 0.620829\nI1207 20:34:20.877636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92537 > 2) by scale factor 0.683674\nI1207 20:34:25.030910  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68473 > 2) by scale factor 0.744953\nI1207 20:34:29.184340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52204 > 2) by scale factor 0.793007\nI1207 20:34:33.336767  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50954 > 2) by scale factor 0.796959\nI1207 20:34:37.489850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90727 > 2) by scale factor 0.511866\nI1207 20:34:41.643213  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38802 > 2) by scale factor 0.455786\nI1207 20:34:45.796149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85824 > 2) by scale factor 0.518372\nI1207 20:34:49.950042  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9618 > 2) by scale factor 0.504822\nI1207 20:34:54.102807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29629 > 2) by scale factor 0.606742\nI1207 20:34:58.255990  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17612 > 2) by scale factor 0.629699\nI1207 20:35:02.408954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54775 > 2) by scale factor 0.785006\nI1207 20:35:06.562502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88354 > 2) by scale factor 0.693592\nI1207 20:35:10.715628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57143 > 2) by scale factor 0.777779\nI1207 20:35:14.869424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15993 > 2) by scale factor 0.925954\nI1207 20:35:19.022507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25635 > 2) by scale factor 0.614185\nI1207 20:35:23.175052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35668 > 2) by scale factor 0.595826\nI1207 20:35:27.327927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50125 > 2) by scale factor 0.571225\nI1207 20:35:31.479866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43776 > 2) by scale factor 0.581774\nI1207 20:35:35.632838  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52894 > 2) by scale factor 0.790845\nI1207 20:35:39.786603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55977 > 2) by scale factor 0.78132\nI1207 20:35:43.940215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12943 > 2) by scale factor 0.639094\nI1207 20:35:48.093669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03434 > 2) by scale factor 0.983117\nI1207 20:35:52.247041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01761 > 2) by scale factor 0.662776\nI1207 20:35:56.400485  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95727 > 2) by scale factor 0.6763\nI1207 20:36:00.552666  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7399 > 2) by scale factor 0.729954\nI1207 20:36:04.705662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0114 > 2) by scale factor 0.994332\nI1207 20:36:08.858192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5114 > 2) by scale factor 0.796367\nI1207 20:36:13.012511  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12565 > 2) by scale factor 0.484772\nI1207 20:36:17.165388  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0155 > 2) by scale factor 0.99231\nI1207 20:36:21.317167  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99409 > 2) by scale factor 0.667982\nI1207 20:36:25.469708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91221 > 2) by scale factor 0.686763\nI1207 20:36:29.622200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30675 > 2) by scale factor 0.604823\nI1207 20:36:33.774180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0758 > 2) by scale factor 0.963484\nI1207 20:36:37.927239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54396 > 2) by scale factor 0.564341\nI1207 20:36:42.079906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20132 > 2) by scale factor 0.908547\nI1207 20:36:46.233206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43524 > 2) by scale factor 0.582201\nI1207 20:36:50.386346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07833 > 2) by scale factor 0.962311\nI1207 20:36:54.538447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52482 > 2) by scale factor 0.792135\nI1207 20:37:02.842356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44385 > 2) by scale factor 0.818379\nI1207 20:37:06.995568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64136 > 2) by scale factor 0.757187\nI1207 20:37:11.148607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94278 > 2) by scale factor 0.679629\nI1207 20:37:15.300763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72852 > 2) by scale factor 0.732998\nI1207 20:37:19.454113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80938 > 2) by scale factor 0.711901\nI1207 20:37:23.606741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53757 > 2) by scale factor 0.788156\nI1207 20:37:31.909616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67201 > 2) by scale factor 0.748501\nI1207 20:37:36.063055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22757 > 2) by scale factor 0.619662\nI1207 20:37:40.215764  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76728 > 2) by scale factor 0.722731\nI1207 20:37:44.368582  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63217 > 2) by scale factor 0.759828\nI1207 20:37:48.521664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48882 > 2) by scale factor 0.803593\nI1207 20:37:52.674959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20295 > 2) by scale factor 0.624425\nI1207 20:37:56.828500  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72632 > 2) by scale factor 0.733591\nI1207 20:38:00.980386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58453 > 2) by scale factor 0.773836\nI1207 20:38:05.132884  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35328 > 2) by scale factor 0.596431\nI1207 20:38:09.285094  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06029 > 2) by scale factor 0.653534\nI1207 20:38:13.439054  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53195 > 2) by scale factor 0.441311\nI1207 20:38:17.592680  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95245 > 2) by scale factor 0.677405\nI1207 20:38:21.745309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02925 > 2) by scale factor 0.66023\nI1207 20:38:25.897843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69836 > 2) by scale factor 0.54078\nI1207 20:38:30.050415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44909 > 2) by scale factor 0.816629\nI1207 20:38:34.203218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86342 > 2) by scale factor 0.698465\nI1207 20:38:38.355881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18655 > 2) by scale factor 0.914683\nI1207 20:38:46.658809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53775 > 2) by scale factor 0.565331\nI1207 20:38:50.811640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23573 > 2) by scale factor 0.894564\nI1207 20:38:54.964002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95368 > 2) by scale factor 0.677122\nI1207 20:38:59.116880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88903 > 2) by scale factor 0.692275\nI1207 20:39:03.269441  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22053 > 2) by scale factor 0.900685\nI1207 20:39:07.422497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39677 > 2) by scale factor 0.834457\nI1207 20:39:24.025514  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8245 > 2) by scale factor 0.708089\nI1207 20:39:28.178037  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82375 > 2) by scale factor 0.708277\nI1207 20:39:32.331403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94359 > 2) by scale factor 0.679443\nI1207 20:39:36.484505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14872 > 2) by scale factor 0.930788\nI1207 20:39:40.636333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53828 > 2) by scale factor 0.787936\nI1207 20:39:44.789703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08997 > 2) by scale factor 0.956952\nI1207 20:39:48.942504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16451 > 2) by scale factor 0.923997\nI1207 20:39:53.095811  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91614 > 2) by scale factor 0.685839\nI1207 20:39:57.248236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36144 > 2) by scale factor 0.594983\nI1207 20:40:01.400457  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45615 > 2) by scale factor 0.814283\nI1207 20:40:09.704650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41067 > 2) by scale factor 0.829644\nI1207 20:40:18.017055  1922 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1207 20:42:56.054801  1922 solver.cpp:404]     Test net output #0: accuracy = 0.266706\nI1207 20:42:56.055197  1922 solver.cpp:404]     Test net output #1: loss = 5.94606 (* 1 = 5.94606 loss)\nI1207 20:42:59.999079  1922 solver.cpp:228] Iteration 9400, loss = 5.80558\nI1207 20:42:59.999126  1922 solver.cpp:244]     Train net output #0: accuracy = 0.282353\nI1207 20:42:59.999143  1922 solver.cpp:244]     Train net output #1: loss = 5.80557 (* 1 = 5.80557 loss)\nI1207 20:43:00.195046  1922 sgd_solver.cpp:166] Iteration 9400, lr = 1.41\nI1207 20:43:04.356789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30098 > 2) by scale factor 0.869193\nI1207 20:43:08.511637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32682 > 2) by scale factor 0.859542\nI1207 20:43:12.665747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8179 > 2) by scale factor 0.709749\nI1207 20:43:16.820827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98458 > 2) by scale factor 0.501935\nI1207 20:43:25.127243  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10844 > 2) by scale factor 0.64341\nI1207 20:43:29.282102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10792 > 2) by scale factor 0.948805\nI1207 20:43:33.436969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33179 > 2) by scale factor 0.600278\nI1207 20:43:37.591295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00583 > 2) by scale factor 0.499272\nI1207 20:43:41.744706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10473 > 2) by scale factor 0.950241\nI1207 20:43:45.898639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6806 > 2) by scale factor 0.746102\nI1207 20:43:50.052793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89361 > 2) by scale factor 0.691177\nI1207 20:43:54.205399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55747 > 2) by scale factor 0.562197\nI1207 20:43:58.359218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87427 > 2) by scale factor 0.516226\nI1207 20:44:02.512660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7146 > 2) by scale factor 0.736757\nI1207 20:44:06.667053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18824 > 2) by scale factor 0.913978\nI1207 20:44:10.820863  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19302 > 2) by scale factor 0.476983\nI1207 20:44:14.974020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37462 > 2) by scale factor 0.84224\nI1207 20:44:19.127274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82386 > 2) by scale factor 0.70825\nI1207 20:44:23.281298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61923 > 2) by scale factor 0.763583\nI1207 20:44:27.436607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19943 > 2) by scale factor 0.909325\nI1207 20:44:31.590806  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26132 > 2) by scale factor 0.469339\nI1207 20:44:35.744041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66086 > 2) by scale factor 0.54632\nI1207 20:44:39.897766  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36286 > 2) by scale factor 0.594731\nI1207 20:44:48.204185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19798 > 2) by scale factor 0.909925\nI1207 20:44:56.509682  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76049 > 2) by scale factor 0.724508\nI1207 20:45:00.662494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10679 > 2) by scale factor 0.643752\nI1207 20:45:04.816716  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56148 > 2) by scale factor 0.780799\nI1207 20:45:08.969981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20443 > 2) by scale factor 0.907263\nI1207 20:45:13.123483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80464 > 2) by scale factor 0.713103\nI1207 20:45:21.429997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26973 > 2) by scale factor 0.611671\nI1207 20:45:25.583199  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55239 > 2) by scale factor 0.78358\nI1207 20:45:38.038754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65945 > 2) by scale factor 0.54653\nI1207 20:45:42.192632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21712 > 2) by scale factor 0.621675\nI1207 20:45:46.346243  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12689 > 2) by scale factor 0.639613\nI1207 20:45:54.652034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16699 > 2) by scale factor 0.922941\nI1207 20:45:58.805382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73632 > 2) by scale factor 0.73091\nI1207 20:46:07.108073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18652 > 2) by scale factor 0.627643\nI1207 20:46:15.413231  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24206 > 2) by scale factor 0.616891\nI1207 20:46:19.567662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89358 > 2) by scale factor 0.691184\nI1207 20:46:23.721530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63183 > 2) by scale factor 0.759927\nI1207 20:46:27.875545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18364 > 2) by scale factor 0.628212\nI1207 20:46:32.029834  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74774 > 2) by scale factor 0.72787\nI1207 20:46:36.182960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8149 > 2) by scale factor 0.524259\nI1207 20:46:40.337427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62948 > 2) by scale factor 0.760607\nI1207 20:46:44.491008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09928 > 2) by scale factor 0.952706\nI1207 20:46:52.796551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55247 > 2) by scale factor 0.783555\nI1207 20:46:56.951074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18931 > 2) by scale factor 0.627094\nI1207 20:47:01.105325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21345 > 2) by scale factor 0.903568\nI1207 20:47:05.258759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76682 > 2) by scale factor 0.722852\nI1207 20:47:09.412986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52539 > 2) by scale factor 0.791958\nI1207 20:47:21.869359  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85546 > 2) by scale factor 0.700413\nI1207 20:47:26.022948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83956 > 2) by scale factor 0.704334\nI1207 20:47:30.175854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77193 > 2) by scale factor 0.72152\nI1207 20:47:34.329766  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72381 > 2) by scale factor 0.734266\nI1207 20:47:38.483306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71862 > 2) by scale factor 0.735667\nI1207 20:47:42.637922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20095 > 2) by scale factor 0.908698\nI1207 20:47:46.790690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21713 > 2) by scale factor 0.902065\nI1207 20:47:55.095559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00816 > 2) by scale factor 0.498981\nI1207 20:47:59.248378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25354 > 2) by scale factor 0.887494\nI1207 20:48:03.402276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07433 > 2) by scale factor 0.490878\nI1207 20:48:07.556521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30552 > 2) by scale factor 0.867482\nI1207 20:48:11.710808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35341 > 2) by scale factor 0.596409\nI1207 20:48:15.863263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13991 > 2) by scale factor 0.636962\nI1207 20:48:24.168179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17398 > 2) by scale factor 0.919972\nI1207 20:48:28.321321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42896 > 2) by scale factor 0.583268\nI1207 20:48:32.474194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60341 > 2) by scale factor 0.43446\nI1207 20:48:36.628092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52727 > 2) by scale factor 0.441767\nI1207 20:48:40.782281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20532 > 2) by scale factor 0.623963\nI1207 20:48:44.936020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63229 > 2) by scale factor 0.759794\nI1207 20:48:49.089362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41155 > 2) by scale factor 0.829342\nI1207 20:48:53.242938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75325 > 2) by scale factor 0.726415\nI1207 20:48:57.397030  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04241 > 2) by scale factor 0.494754\nI1207 20:49:01.550076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67763 > 2) by scale factor 0.543829\nI1207 20:49:05.703245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75065 > 2) by scale factor 0.53324\nI1207 20:49:09.856428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76823 > 2) by scale factor 0.530753\nI1207 20:49:14.009831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02125 > 2) by scale factor 0.989488\nI1207 20:49:18.163312  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10199 > 2) by scale factor 0.95148\nI1207 20:49:22.315954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16186 > 2) by scale factor 0.925129\nI1207 20:49:26.468961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24662 > 2) by scale factor 0.470963\nI1207 20:49:30.621917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45369 > 2) by scale factor 0.815098\nI1207 20:49:34.776120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59916 > 2) by scale factor 0.769478\nI1207 20:49:38.929966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09185 > 2) by scale factor 0.646861\nI1207 20:49:43.082569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46237 > 2) by scale factor 0.577639\nI1207 20:49:47.236155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42844 > 2) by scale factor 0.583356\nI1207 20:49:51.389111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3203 > 2) by scale factor 0.861956\nI1207 20:49:51.401008  1922 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1207 20:52:29.310614  1922 solver.cpp:404]     Test net output #0: accuracy = 0.248765\nI1207 20:52:29.310978  1922 solver.cpp:404]     Test net output #1: loss = 5.94401 (* 1 = 5.94401 loss)\nI1207 20:52:33.252480  1922 solver.cpp:228] Iteration 9500, loss = 6.17735\nI1207 20:52:33.252532  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 20:52:33.252548  1922 solver.cpp:244]     Train net output #1: loss = 6.17735 (* 1 = 6.17735 loss)\nI1207 20:52:33.456751  1922 sgd_solver.cpp:166] Iteration 9500, lr = 1.425\nI1207 20:52:33.466826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1009 > 2) by scale factor 0.951973\nI1207 20:52:37.620159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80318 > 2) by scale factor 0.525876\nI1207 20:52:41.773746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43143 > 2) by scale factor 0.822563\nI1207 20:52:45.928303  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74053 > 2) by scale factor 0.729785\nI1207 20:52:50.082396  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35223 > 2) by scale factor 0.596618\nI1207 20:52:54.234870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54294 > 2) by scale factor 0.78649\nI1207 20:52:58.388571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49687 > 2) by scale factor 0.571939\nI1207 20:53:02.542235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61935 > 2) by scale factor 0.763548\nI1207 20:53:06.696241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61349 > 2) by scale factor 0.553482\nI1207 20:53:10.850332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32352 > 2) by scale factor 0.601772\nI1207 20:53:19.155022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07907 > 2) by scale factor 0.96197\nI1207 20:53:23.309741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53802 > 2) by scale factor 0.788017\nI1207 20:53:27.463230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57633 > 2) by scale factor 0.776299\nI1207 20:53:31.617805  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03969 > 2) by scale factor 0.657962\nI1207 20:53:35.771865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23261 > 2) by scale factor 0.472521\nI1207 20:53:39.926326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41129 > 2) by scale factor 0.829433\nI1207 20:53:44.079274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76769 > 2) by scale factor 0.722623\nI1207 20:53:52.383797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79769 > 2) by scale factor 0.714876\nI1207 20:53:56.537346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39378 > 2) by scale factor 0.835497\nI1207 20:54:00.691705  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32232 > 2) by scale factor 0.462715\nI1207 20:54:04.845010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80606 > 2) by scale factor 0.712743\nI1207 20:54:08.997694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06988 > 2) by scale factor 0.96624\nI1207 20:54:13.151024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26162 > 2) by scale factor 0.884323\nI1207 20:54:17.304334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37506 > 2) by scale factor 0.842085\nI1207 20:54:21.457770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07359 > 2) by scale factor 0.650705\nI1207 20:54:25.610807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23049 > 2) by scale factor 0.896663\nI1207 20:54:29.763342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66127 > 2) by scale factor 0.75152\nI1207 20:54:33.916970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83844 > 2) by scale factor 0.521045\nI1207 20:54:38.070240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63499 > 2) by scale factor 0.550208\nI1207 20:54:42.223552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14139 > 2) by scale factor 0.636661\nI1207 20:54:46.377085  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84628 > 2) by scale factor 0.702671\nI1207 20:54:54.681699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59597 > 2) by scale factor 0.770424\nI1207 20:54:58.834492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90708 > 2) by scale factor 0.511891\nI1207 20:55:02.987665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51212 > 2) by scale factor 0.79614\nI1207 20:55:07.141891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8737 > 2) by scale factor 0.695967\nI1207 20:55:11.295490  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16099 > 2) by scale factor 0.632714\nI1207 20:55:15.448493  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36712 > 2) by scale factor 0.59398\nI1207 20:55:19.601359  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21711 > 2) by scale factor 0.902074\nI1207 20:55:23.754269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62302 > 2) by scale factor 0.552025\nI1207 20:55:27.907810  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16759 > 2) by scale factor 0.631395\nI1207 20:55:32.061444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29802 > 2) by scale factor 0.606424\nI1207 20:55:40.364809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3162 > 2) by scale factor 0.603099\nI1207 20:55:44.517166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66813 > 2) by scale factor 0.545236\nI1207 20:55:48.669805  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90459 > 2) by scale factor 0.688565\nI1207 20:55:52.823197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67092 > 2) by scale factor 0.748806\nI1207 20:55:56.976745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05177 > 2) by scale factor 0.974769\nI1207 20:56:01.129638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68619 > 2) by scale factor 0.542565\nI1207 20:56:05.282284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15766 > 2) by scale factor 0.633381\nI1207 20:56:09.435048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76395 > 2) by scale factor 0.531356\nI1207 20:56:13.588474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2647 > 2) by scale factor 0.883118\nI1207 20:56:17.741475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01184 > 2) by scale factor 0.664045\nI1207 20:56:21.894670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40256 > 2) by scale factor 0.587793\nI1207 20:56:26.048280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56556 > 2) by scale factor 0.560922\nI1207 20:56:30.201346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39448 > 2) by scale factor 0.835255\nI1207 20:56:34.353953  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90922 > 2) by scale factor 0.68747\nI1207 20:56:38.506706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8939 > 2) by scale factor 0.691108\nI1207 20:56:42.659759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91791 > 2) by scale factor 0.685422\nI1207 20:56:46.812602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67088 > 2) by scale factor 0.748817\nI1207 20:56:50.965703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81255 > 2) by scale factor 0.711098\nI1207 20:56:59.270671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63231 > 2) by scale factor 0.759789\nI1207 20:57:03.423866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86541 > 2) by scale factor 0.697981\nI1207 20:57:07.577411  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85077 > 2) by scale factor 0.701565\nI1207 20:57:11.730190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79175 > 2) by scale factor 0.716396\nI1207 20:57:15.883693  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01214 > 2) by scale factor 0.66398\nI1207 20:57:20.036916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52095 > 2) by scale factor 0.793352\nI1207 20:57:24.190274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9774 > 2) by scale factor 0.671727\nI1207 20:57:28.343677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64918 > 2) by scale factor 0.754951\nI1207 20:57:32.496536  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26313 > 2) by scale factor 0.883733\nI1207 20:57:36.649668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23931 > 2) by scale factor 0.617415\nI1207 20:57:40.802959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89862 > 2) by scale factor 0.513002\nI1207 20:57:44.956864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03508 > 2) by scale factor 0.495654\nI1207 20:57:49.110265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45634 > 2) by scale factor 0.578647\nI1207 20:57:53.263324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32129 > 2) by scale factor 0.602175\nI1207 20:57:57.416446  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01726 > 2) by scale factor 0.662852\nI1207 20:58:09.869699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32498 > 2) by scale factor 0.860222\nI1207 20:58:14.023496  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8442 > 2) by scale factor 0.703185\nI1207 20:58:22.327337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84897 > 2) by scale factor 0.702008\nI1207 20:58:26.481186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6057 > 2) by scale factor 0.767549\nI1207 20:58:30.634744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00298 > 2) by scale factor 0.499627\nI1207 20:58:34.787760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63381 > 2) by scale factor 0.759357\nI1207 20:58:43.092690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35522 > 2) by scale factor 0.849178\nI1207 20:58:51.397574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08557 > 2) by scale factor 0.648179\nI1207 20:58:55.551403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91554 > 2) by scale factor 0.685978\nI1207 20:58:59.704357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44251 > 2) by scale factor 0.580972\nI1207 20:59:03.857004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95969 > 2) by scale factor 0.403251\nI1207 20:59:08.010107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97191 > 2) by scale factor 0.503536\nI1207 20:59:12.162920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35468 > 2) by scale factor 0.849374\nI1207 20:59:16.316730  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63752 > 2) by scale factor 0.758289\nI1207 20:59:20.469763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78385 > 2) by scale factor 0.718429\nI1207 20:59:24.622663  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01255 > 2) by scale factor 0.993763\nI1207 20:59:24.634542  1922 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1207 21:02:02.599334  1922 solver.cpp:404]     Test net output #0: accuracy = 0.167765\nI1207 21:02:02.599658  1922 solver.cpp:404]     Test net output #1: loss = 11.1778 (* 1 = 11.1778 loss)\nI1207 21:02:06.542997  1922 solver.cpp:228] Iteration 9600, loss = 12.1976\nI1207 21:02:06.543047  1922 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 21:02:06.543066  1922 solver.cpp:244]     Train net output #1: loss = 12.1976 (* 1 = 12.1976 loss)\nI1207 21:02:06.745316  1922 sgd_solver.cpp:166] Iteration 9600, lr = 1.44\nI1207 21:02:06.755408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2961 > 2) by scale factor 0.606778\nI1207 21:02:10.913897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92313 > 2) by scale factor 0.509797\nI1207 21:02:15.072969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47703 > 2) by scale factor 0.575204\nI1207 21:02:23.387195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22355 > 2) by scale factor 0.899463\nI1207 21:02:27.545953  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10756 > 2) by scale factor 0.643592\nI1207 21:02:31.704838  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3672 > 2) by scale factor 0.844881\nI1207 21:02:35.862846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24397 > 2) by scale factor 0.891277\nI1207 21:02:40.021723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38207 > 2) by scale factor 0.839605\nI1207 21:02:44.179831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05621 > 2) by scale factor 0.972664\nI1207 21:02:48.338698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44501 > 2) by scale factor 0.580549\nI1207 21:02:52.497346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8559 > 2) by scale factor 0.518685\nI1207 21:02:56.656036  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00854 > 2) by scale factor 0.498935\nI1207 21:03:00.813299  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11552 > 2) by scale factor 0.641947\nI1207 21:03:04.971354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63073 > 2) by scale factor 0.760245\nI1207 21:03:09.129618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27651 > 2) by scale factor 0.610405\nI1207 21:03:13.287652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08914 > 2) by scale factor 0.64743\nI1207 21:03:17.445677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63495 > 2) by scale factor 0.759027\nI1207 21:03:21.604038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38947 > 2) by scale factor 0.590062\nI1207 21:03:25.762668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97033 > 2) by scale factor 0.503736\nI1207 21:03:29.920567  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98967 > 2) by scale factor 0.66897\nI1207 21:03:34.077770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75095 > 2) by scale factor 0.727022\nI1207 21:03:38.236150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7301 > 2) by scale factor 0.732573\nI1207 21:03:42.394398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6681 > 2) by scale factor 0.749597\nI1207 21:03:46.552778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17726 > 2) by scale factor 0.629472\nI1207 21:03:50.710266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.051 > 2) by scale factor 0.655523\nI1207 21:03:54.869055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88675 > 2) by scale factor 0.69282\nI1207 21:03:59.026598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66216 > 2) by scale factor 0.751268\nI1207 21:04:03.183323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03079 > 2) by scale factor 0.659894\nI1207 21:04:07.341598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92503 > 2) by scale factor 0.50955\nI1207 21:04:15.655159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35525 > 2) by scale factor 0.849167\nI1207 21:04:19.813709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18018 > 2) by scale factor 0.478448\nI1207 21:04:23.971837  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08207 > 2) by scale factor 0.648914\nI1207 21:04:28.130059  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53344 > 2) by scale factor 0.789442\nI1207 21:04:32.288363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94732 > 2) by scale factor 0.678584\nI1207 21:04:36.446449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10198 > 2) by scale factor 0.951484\nI1207 21:04:40.603598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39663 > 2) by scale factor 0.588819\nI1207 21:04:44.760901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04641 > 2) by scale factor 0.494265\nI1207 21:04:48.919201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70953 > 2) by scale factor 0.738135\nI1207 21:04:53.080615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68196 > 2) by scale factor 0.745725\nI1207 21:04:57.264174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39888 > 2) by scale factor 0.588429\nI1207 21:05:01.446686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75386 > 2) by scale factor 0.420711\nI1207 21:05:05.630388  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06411 > 2) by scale factor 0.652718\nI1207 21:05:09.812858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8354 > 2) by scale factor 0.521458\nI1207 21:05:13.995429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46258 > 2) by scale factor 0.577604\nI1207 21:05:18.178658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56434 > 2) by scale factor 0.779926\nI1207 21:05:22.360353  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23739 > 2) by scale factor 0.893897\nI1207 21:05:26.544040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00587 > 2) by scale factor 0.665364\nI1207 21:05:30.726604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11603 > 2) by scale factor 0.641842\nI1207 21:05:34.908185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93755 > 2) by scale factor 0.68084\nI1207 21:05:39.090229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6444 > 2) by scale factor 0.756316\nI1207 21:05:43.272642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35841 > 2) by scale factor 0.595521\nI1207 21:05:47.455292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31408 > 2) by scale factor 0.463598\nI1207 21:05:51.638065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01615 > 2) by scale factor 0.49799\nI1207 21:05:55.820041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43601 > 2) by scale factor 0.450856\nI1207 21:06:00.004190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63185 > 2) by scale factor 0.550683\nI1207 21:06:04.187878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01284 > 2) by scale factor 0.663826\nI1207 21:06:08.370334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69571 > 2) by scale factor 0.741919\nI1207 21:06:12.553501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80058 > 2) by scale factor 0.714137\nI1207 21:06:16.736207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36944 > 2) by scale factor 0.844083\nI1207 21:06:20.919935  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79033 > 2) by scale factor 0.417508\nI1207 21:06:25.102464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61545 > 2) by scale factor 0.433327\nI1207 21:06:29.285605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76345 > 2) by scale factor 0.723734\nI1207 21:06:33.469135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73536 > 2) by scale factor 0.731166\nI1207 21:06:37.652252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57745 > 2) by scale factor 0.775962\nI1207 21:06:41.834805  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79213 > 2) by scale factor 0.716299\nI1207 21:06:46.017804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00835 > 2) by scale factor 0.664817\nI1207 21:06:50.199604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62678 > 2) by scale factor 0.76139\nI1207 21:06:54.381707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69277 > 2) by scale factor 0.426188\nI1207 21:06:58.565439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31696 > 2) by scale factor 0.463289\nI1207 21:07:02.746392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93731 > 2) by scale factor 0.507961\nI1207 21:07:06.928941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58875 > 2) by scale factor 0.772575\nI1207 21:07:11.112416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26442 > 2) by scale factor 0.612666\nI1207 21:07:15.294217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77425 > 2) by scale factor 0.720917\nI1207 21:07:19.477107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01271 > 2) by scale factor 0.498417\nI1207 21:07:23.660109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65759 > 2) by scale factor 0.546809\nI1207 21:07:27.842973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09335 > 2) by scale factor 0.955408\nI1207 21:07:32.025650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43852 > 2) by scale factor 0.82017\nI1207 21:07:36.209857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52303 > 2) by scale factor 0.792698\nI1207 21:07:40.392676  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16708 > 2) by scale factor 0.631497\nI1207 21:07:44.574551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91653 > 2) by scale factor 0.510656\nI1207 21:07:48.757804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74049 > 2) by scale factor 0.729795\nI1207 21:07:52.940157  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54488 > 2) by scale factor 0.564193\nI1207 21:07:57.123016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48961 > 2) by scale factor 0.80334\nI1207 21:08:01.306493  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60873 > 2) by scale factor 0.554212\nI1207 21:08:05.489200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50774 > 2) by scale factor 0.443681\nI1207 21:08:09.672709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47729 > 2) by scale factor 0.575161\nI1207 21:08:13.855391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21728 > 2) by scale factor 0.621643\nI1207 21:08:18.038272  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43359 > 2) by scale factor 0.821832\nI1207 21:08:22.220471  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03158 > 2) by scale factor 0.659722\nI1207 21:08:26.402331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60252 > 2) by scale factor 0.768487\nI1207 21:08:30.585475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44025 > 2) by scale factor 0.819587\nI1207 21:08:34.767325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61596 > 2) by scale factor 0.764537\nI1207 21:08:38.949623  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60054 > 2) by scale factor 0.555472\nI1207 21:08:43.133174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13943 > 2) by scale factor 0.637059\nI1207 21:08:47.315405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45335 > 2) by scale factor 0.815211\nI1207 21:08:51.497149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93737 > 2) by scale factor 0.680881\nI1207 21:08:55.680008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91119 > 2) by scale factor 0.687005\nI1207 21:08:59.862856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73637 > 2) by scale factor 0.730894\nI1207 21:08:59.874816  1922 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1207 21:11:37.114037  1922 solver.cpp:404]     Test net output #0: accuracy = 0.118059\nI1207 21:11:37.114431  1922 solver.cpp:404]     Test net output #1: loss = 11.4695 (* 1 = 11.4695 loss)\nI1207 21:11:41.056017  1922 solver.cpp:228] Iteration 9700, loss = 10.7461\nI1207 21:11:41.056068  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 21:11:41.056087  1922 solver.cpp:244]     Train net output #1: loss = 10.7461 (* 1 = 10.7461 loss)\nI1207 21:11:41.284500  1922 sgd_solver.cpp:166] Iteration 9700, lr = 1.455\nI1207 21:11:41.294677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2859 > 2) by scale factor 0.466647\nI1207 21:11:45.474732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61146 > 2) by scale factor 0.765856\nI1207 21:11:49.654661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36764 > 2) by scale factor 0.457914\nI1207 21:11:53.834164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64162 > 2) by scale factor 0.549206\nI1207 21:11:58.014063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18582 > 2) by scale factor 0.477804\nI1207 21:12:02.193799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98211 > 2) by scale factor 0.670667\nI1207 21:12:06.373348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32223 > 2) by scale factor 0.86124\nI1207 21:12:10.552755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3246 > 2) by scale factor 0.601576\nI1207 21:12:18.911641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24623 > 2) by scale factor 0.616099\nI1207 21:12:23.091938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37644 > 2) by scale factor 0.592339\nI1207 21:12:27.272454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73683 > 2) by scale factor 0.730773\nI1207 21:12:31.452522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13262 > 2) by scale factor 0.638443\nI1207 21:12:35.632119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86887 > 2) by scale factor 0.516947\nI1207 21:12:43.989420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3241 > 2) by scale factor 0.860549\nI1207 21:12:48.170019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41423 > 2) by scale factor 0.585784\nI1207 21:12:52.349484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93006 > 2) by scale factor 0.68258\nI1207 21:12:56.528348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04491 > 2) by scale factor 0.494449\nI1207 21:13:00.708946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68659 > 2) by scale factor 0.744438\nI1207 21:13:04.889793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34648 > 2) by scale factor 0.852342\nI1207 21:13:09.070093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94709 > 2) by scale factor 0.678636\nI1207 21:13:13.249828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84707 > 2) by scale factor 0.702476\nI1207 21:13:17.429718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50195 > 2) by scale factor 0.799378\nI1207 21:13:21.610124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28094 > 2) by scale factor 0.876832\nI1207 21:13:25.789937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72825 > 2) by scale factor 0.733071\nI1207 21:13:29.969358  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51497 > 2) by scale factor 0.442971\nI1207 21:13:34.149477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43516 > 2) by scale factor 0.821301\nI1207 21:13:38.330214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43624 > 2) by scale factor 0.582032\nI1207 21:13:42.509526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13944 > 2) by scale factor 0.483158\nI1207 21:13:46.689190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46605 > 2) by scale factor 0.577026\nI1207 21:13:50.870191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23868 > 2) by scale factor 0.893383\nI1207 21:13:55.050343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74496 > 2) by scale factor 0.728608\nI1207 21:13:59.229873  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51922 > 2) by scale factor 0.793897\nI1207 21:14:03.409687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75098 > 2) by scale factor 0.727013\nI1207 21:14:07.588814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04735 > 2) by scale factor 0.656308\nI1207 21:14:11.767906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62451 > 2) by scale factor 0.762046\nI1207 21:14:15.947039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13887 > 2) by scale factor 0.637173\nI1207 21:14:20.127179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19608 > 2) by scale factor 0.476636\nI1207 21:14:24.306795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55494 > 2) by scale factor 0.782798\nI1207 21:14:28.487051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84351 > 2) by scale factor 0.703356\nI1207 21:14:32.667650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04666 > 2) by scale factor 0.656458\nI1207 21:14:36.846909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42841 > 2) by scale factor 0.58336\nI1207 21:14:41.026134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26415 > 2) by scale factor 0.612717\nI1207 21:14:45.206320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27675 > 2) by scale factor 0.467645\nI1207 21:14:49.386188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0941 > 2) by scale factor 0.955064\nI1207 21:14:57.743229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23441 > 2) by scale factor 0.895089\nI1207 21:15:01.923570  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63997 > 2) by scale factor 0.757585\nI1207 21:15:10.279598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89363 > 2) by scale factor 0.691174\nI1207 21:15:14.458520  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20493 > 2) by scale factor 0.475632\nI1207 21:15:18.638440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41282 > 2) by scale factor 0.828906\nI1207 21:15:22.819005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74081 > 2) by scale factor 0.729711\nI1207 21:15:26.997066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15697 > 2) by scale factor 0.927227\nI1207 21:15:31.176647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.804 > 2) by scale factor 0.713266\nI1207 21:15:39.534657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03615 > 2) by scale factor 0.495522\nI1207 21:15:43.714247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2973 > 2) by scale factor 0.870586\nI1207 21:15:47.893754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68707 > 2) by scale factor 0.744306\nI1207 21:15:52.073348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96641 > 2) by scale factor 0.674215\nI1207 21:15:56.253964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01998 > 2) by scale factor 0.99011\nI1207 21:16:00.434229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29629 > 2) by scale factor 0.606742\nI1207 21:16:04.614212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88143 > 2) by scale factor 0.694101\nI1207 21:16:08.795379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35977 > 2) by scale factor 0.595279\nI1207 21:16:12.975800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72419 > 2) by scale factor 0.734162\nI1207 21:16:17.156296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20778 > 2) by scale factor 0.623485\nI1207 21:16:21.336266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14364 > 2) by scale factor 0.636205\nI1207 21:16:25.516352  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26522 > 2) by scale factor 0.882916\nI1207 21:16:29.695719  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11977 > 2) by scale factor 0.485464\nI1207 21:16:33.875394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92984 > 2) by scale factor 0.682632\nI1207 21:16:38.055559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80183 > 2) by scale factor 0.713819\nI1207 21:16:42.235451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30564 > 2) by scale factor 0.605026\nI1207 21:16:46.413463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10145 > 2) by scale factor 0.644859\nI1207 21:16:50.592986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37657 > 2) by scale factor 0.592317\nI1207 21:16:54.772608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6663 > 2) by scale factor 0.545509\nI1207 21:16:58.952599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48249 > 2) by scale factor 0.805641\nI1207 21:17:07.309785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28401 > 2) by scale factor 0.609011\nI1207 21:17:11.489624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09129 > 2) by scale factor 0.956348\nI1207 21:17:15.670125  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70959 > 2) by scale factor 0.73812\nI1207 21:17:24.028615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61909 > 2) by scale factor 0.763625\nI1207 21:17:28.208073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48949 > 2) by scale factor 0.57315\nI1207 21:17:32.388296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47126 > 2) by scale factor 0.57616\nI1207 21:17:36.568975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83176 > 2) by scale factor 0.706275\nI1207 21:17:40.748302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10771 > 2) by scale factor 0.948897\nI1207 21:17:44.928014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65757 > 2) by scale factor 0.546811\nI1207 21:17:49.107511  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3479 > 2) by scale factor 0.59739\nI1207 21:17:53.286252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01058 > 2) by scale factor 0.994736\nI1207 21:17:57.465911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36996 > 2) by scale factor 0.843898\nI1207 21:18:01.645453  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13245 > 2) by scale factor 0.937887\nI1207 21:18:05.824770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55194 > 2) by scale factor 0.783716\nI1207 21:18:14.181772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3885 > 2) by scale factor 0.837345\nI1207 21:18:18.362120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27389 > 2) by scale factor 0.467958\nI1207 21:18:22.543300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92898 > 2) by scale factor 0.682832\nI1207 21:18:26.724145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01581 > 2) by scale factor 0.663172\nI1207 21:18:30.904266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.275 > 2) by scale factor 0.879122\nI1207 21:18:35.084868  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01235 > 2) by scale factor 0.993865\nI1207 21:18:35.096776  1922 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1207 21:21:12.007252  1922 solver.cpp:404]     Test net output #0: accuracy = 0.18953\nI1207 21:21:12.007638  1922 solver.cpp:404]     Test net output #1: loss = 6.45973 (* 1 = 6.45973 loss)\nI1207 21:21:15.950625  1922 solver.cpp:228] Iteration 9800, loss = 6.169\nI1207 21:21:15.950675  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 21:21:15.950691  1922 solver.cpp:244]     Train net output #1: loss = 6.169 (* 1 = 6.169 loss)\nI1207 21:21:16.177217  1922 sgd_solver.cpp:166] Iteration 9800, lr = 1.47\nI1207 21:21:16.187381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36012 > 2) by scale factor 0.595216\nI1207 21:21:20.366448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64906 > 2) by scale factor 0.754985\nI1207 21:21:24.546458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36171 > 2) by scale factor 0.846844\nI1207 21:21:28.725134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47677 > 2) by scale factor 0.807505\nI1207 21:21:32.904227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82919 > 2) by scale factor 0.522304\nI1207 21:21:37.083935  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28103 > 2) by scale factor 0.467178\nI1207 21:21:41.263156  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4789 > 2) by scale factor 0.574894\nI1207 21:21:45.442802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81344 > 2) by scale factor 0.710874\nI1207 21:21:49.622632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7343 > 2) by scale factor 0.731449\nI1207 21:21:53.801362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71851 > 2) by scale factor 0.735698\nI1207 21:21:57.981039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86396 > 2) by scale factor 0.698334\nI1207 21:22:02.160287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79729 > 2) by scale factor 0.714978\nI1207 21:22:06.338646  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84196 > 2) by scale factor 0.703739\nI1207 21:22:10.517874  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11053 > 2) by scale factor 0.486556\nI1207 21:22:14.698233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68227 > 2) by scale factor 0.745636\nI1207 21:22:23.052690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96582 > 2) by scale factor 0.67435\nI1207 21:22:27.231484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12162 > 2) by scale factor 0.485246\nI1207 21:22:31.411236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01232 > 2) by scale factor 0.66394\nI1207 21:22:39.766768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24233 > 2) by scale factor 0.891928\nI1207 21:22:43.947273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15635 > 2) by scale factor 0.633644\nI1207 21:22:48.126536  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16112 > 2) by scale factor 0.925446\nI1207 21:22:52.305423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17869 > 2) by scale factor 0.62919\nI1207 21:22:56.483678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45681 > 2) by scale factor 0.578567\nI1207 21:23:00.663890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59385 > 2) by scale factor 0.556506\nI1207 21:23:04.842850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77986 > 2) by scale factor 0.71946\nI1207 21:23:09.022156  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50323 > 2) by scale factor 0.798968\nI1207 21:23:13.200533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34386 > 2) by scale factor 0.853292\nI1207 21:23:17.378799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24873 > 2) by scale factor 0.889391\nI1207 21:23:21.556792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52821 > 2) by scale factor 0.566859\nI1207 21:23:25.735436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72706 > 2) by scale factor 0.733389\nI1207 21:23:29.914734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72553 > 2) by scale factor 0.733803\nI1207 21:23:34.093817  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61782 > 2) by scale factor 0.763994\nI1207 21:23:38.271960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53903 > 2) by scale factor 0.787701\nI1207 21:23:42.450614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10592 > 2) by scale factor 0.949702\nI1207 21:23:46.630121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7377 > 2) by scale factor 0.730541\nI1207 21:23:50.808394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22734 > 2) by scale factor 0.619706\nI1207 21:23:54.988263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42133 > 2) by scale factor 0.825991\nI1207 21:23:59.167279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74961 > 2) by scale factor 0.727375\nI1207 21:24:03.345463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42228 > 2) by scale factor 0.825668\nI1207 21:24:07.524235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73626 > 2) by scale factor 0.535295\nI1207 21:24:11.702246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80152 > 2) by scale factor 0.713899\nI1207 21:24:15.881392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41309 > 2) by scale factor 0.58598\nI1207 21:24:20.061151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17688 > 2) by scale factor 0.918746\nI1207 21:24:24.240015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38484 > 2) by scale factor 0.83863\nI1207 21:24:28.419737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59807 > 2) by scale factor 0.769802\nI1207 21:24:32.598863  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98159 > 2) by scale factor 0.670783\nI1207 21:24:36.776989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72418 > 2) by scale factor 0.537031\nI1207 21:24:40.955999  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74728 > 2) by scale factor 0.727992\nI1207 21:24:45.136288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25267 > 2) by scale factor 0.887835\nI1207 21:24:53.489193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95455 > 2) by scale factor 0.676921\nI1207 21:24:57.668033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59476 > 2) by scale factor 0.770785\nI1207 21:25:01.846969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86434 > 2) by scale factor 0.698242\nI1207 21:25:06.025318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61382 > 2) by scale factor 0.765164\nI1207 21:25:10.203876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78351 > 2) by scale factor 0.718517\nI1207 21:25:14.383541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79738 > 2) by scale factor 0.714954\nI1207 21:25:18.562224  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29888 > 2) by scale factor 0.869989\nI1207 21:25:22.741890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77768 > 2) by scale factor 0.720024\nI1207 21:25:26.920331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67481 > 2) by scale factor 0.747717\nI1207 21:25:31.099284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56857 > 2) by scale factor 0.560449\nI1207 21:25:35.278034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03552 > 2) by scale factor 0.982548\nI1207 21:25:39.456370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29113 > 2) by scale factor 0.87293\nI1207 21:25:43.633689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43411 > 2) by scale factor 0.821657\nI1207 21:25:47.813205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09478 > 2) by scale factor 0.488427\nI1207 21:25:51.991369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54937 > 2) by scale factor 0.784509\nI1207 21:25:56.169488  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57702 > 2) by scale factor 0.559124\nI1207 21:26:00.349288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23701 > 2) by scale factor 0.617854\nI1207 21:26:04.527475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71266 > 2) by scale factor 0.538697\nI1207 21:26:08.705644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67147 > 2) by scale factor 0.748653\nI1207 21:26:12.884152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42266 > 2) by scale factor 0.825538\nI1207 21:26:17.062722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76898 > 2) by scale factor 0.722287\nI1207 21:26:21.240234  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18896 > 2) by scale factor 0.627165\nI1207 21:26:25.418076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8176 > 2) by scale factor 0.709823\nI1207 21:26:29.595844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78157 > 2) by scale factor 0.528881\nI1207 21:26:33.774690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83179 > 2) by scale factor 0.706267\nI1207 21:26:37.952141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27683 > 2) by scale factor 0.467636\nI1207 21:26:42.129972  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20585 > 2) by scale factor 0.62386\nI1207 21:26:46.309103  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5832 > 2) by scale factor 0.436376\nI1207 21:26:50.488265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75213 > 2) by scale factor 0.420864\nI1207 21:26:54.666535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73932 > 2) by scale factor 0.730108\nI1207 21:26:58.845386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17609 > 2) by scale factor 0.478917\nI1207 21:27:03.024407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49841 > 2) by scale factor 0.571689\nI1207 21:27:07.204012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74998 > 2) by scale factor 0.727278\nI1207 21:27:11.381541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10002 > 2) by scale factor 0.487802\nI1207 21:27:15.558750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9166 > 2) by scale factor 0.510647\nI1207 21:27:19.736069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41309 > 2) by scale factor 0.453197\nI1207 21:27:23.914572  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55246 > 2) by scale factor 0.783557\nI1207 21:27:28.094144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96933 > 2) by scale factor 0.503863\nI1207 21:27:32.271814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56477 > 2) by scale factor 0.438138\nI1207 21:27:36.449771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06447 > 2) by scale factor 0.652641\nI1207 21:27:40.628274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08371 > 2) by scale factor 0.64857\nI1207 21:27:44.806996  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58435 > 2) by scale factor 0.557981\nI1207 21:27:48.986632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82046 > 2) by scale factor 0.709103\nI1207 21:27:53.165280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06915 > 2) by scale factor 0.651646\nI1207 21:27:57.343255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43685 > 2) by scale factor 0.581929\nI1207 21:28:01.521526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59596 > 2) by scale factor 0.55618\nI1207 21:28:05.699869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05554 > 2) by scale factor 0.972981\nI1207 21:28:09.877871  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37329 > 2) by scale factor 0.842712\nI1207 21:28:09.889890  1922 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1207 21:30:48.072386  1922 solver.cpp:404]     Test net output #0: accuracy = 0.228706\nI1207 21:30:48.072764  1922 solver.cpp:404]     Test net output #1: loss = 9.54577 (* 1 = 9.54577 loss)\nI1207 21:30:52.015329  1922 solver.cpp:228] Iteration 9900, loss = 11.0422\nI1207 21:30:52.015381  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 21:30:52.015399  1922 solver.cpp:244]     Train net output #1: loss = 11.0422 (* 1 = 11.0422 loss)\nI1207 21:30:52.238610  1922 sgd_solver.cpp:166] Iteration 9900, lr = 1.485\nI1207 21:30:52.248780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75232 > 2) by scale factor 0.72666\nI1207 21:30:56.429345  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47094 > 2) by scale factor 0.809409\nI1207 21:31:00.609933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62284 > 2) by scale factor 0.552053\nI1207 21:31:04.790247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24223 > 2) by scale factor 0.89197\nI1207 21:31:08.971565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.094 > 2) by scale factor 0.646413\nI1207 21:31:17.330389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2227 > 2) by scale factor 0.899808\nI1207 21:31:21.511354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20955 > 2) by scale factor 0.62314\nI1207 21:31:29.871551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74257 > 2) by scale factor 0.729244\nI1207 21:31:34.053233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53711 > 2) by scale factor 0.7883\nI1207 21:31:38.234565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40504 > 2) by scale factor 0.587365\nI1207 21:31:42.416553  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08584 > 2) by scale factor 0.489495\nI1207 21:31:46.598443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28005 > 2) by scale factor 0.609747\nI1207 21:31:50.779515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11764 > 2) by scale factor 0.641511\nI1207 21:31:59.141254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8626 > 2) by scale factor 0.698666\nI1207 21:32:03.322000  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24277 > 2) by scale factor 0.616757\nI1207 21:32:07.503103  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66481 > 2) by scale factor 0.54573\nI1207 21:32:11.683846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15857 > 2) by scale factor 0.633197\nI1207 21:32:15.864969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.052 > 2) by scale factor 0.97466\nI1207 21:32:20.044683  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14156 > 2) by scale factor 0.933901\nI1207 21:32:24.226598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70639 > 2) by scale factor 0.738992\nI1207 21:32:28.407490  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93032 > 2) by scale factor 0.682519\nI1207 21:32:32.589262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0286 > 2) by scale factor 0.660371\nI1207 21:32:36.771622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56787 > 2) by scale factor 0.560559\nI1207 21:32:40.952411  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03269 > 2) by scale factor 0.65948\nI1207 21:32:45.132923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93857 > 2) by scale factor 0.507798\nI1207 21:32:49.313262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73901 > 2) by scale factor 0.73019\nI1207 21:32:53.495020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60283 > 2) by scale factor 0.555119\nI1207 21:32:57.677130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61649 > 2) by scale factor 0.764382\nI1207 21:33:01.857064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81897 > 2) by scale factor 0.709478\nI1207 21:33:06.037923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79848 > 2) by scale factor 0.714674\nI1207 21:33:10.219542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29962 > 2) by scale factor 0.465157\nI1207 21:33:18.579911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51826 > 2) by scale factor 0.794199\nI1207 21:33:26.940150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96073 > 2) by scale factor 0.504957\nI1207 21:33:31.120951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45614 > 2) by scale factor 0.814285\nI1207 21:33:35.301656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57069 > 2) by scale factor 0.560116\nI1207 21:33:39.482653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0302 > 2) by scale factor 0.985124\nI1207 21:33:43.663502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00962 > 2) by scale factor 0.664535\nI1207 21:33:47.842694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56548 > 2) by scale factor 0.779581\nI1207 21:33:52.022524  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88177 > 2) by scale factor 0.694019\nI1207 21:33:56.203147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70173 > 2) by scale factor 0.740266\nI1207 21:34:00.382841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91562 > 2) by scale factor 0.510775\nI1207 21:34:04.563073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69993 > 2) by scale factor 0.740759\nI1207 21:34:08.743877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4997 > 2) by scale factor 0.800097\nI1207 21:34:12.924748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79939 > 2) by scale factor 0.71444\nI1207 21:34:17.106047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79804 > 2) by scale factor 0.714787\nI1207 21:34:21.287009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77378 > 2) by scale factor 0.529972\nI1207 21:34:25.467778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43966 > 2) by scale factor 0.819788\nI1207 21:34:29.649322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41981 > 2) by scale factor 0.584827\nI1207 21:34:33.829689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6332 > 2) by scale factor 0.759532\nI1207 21:34:38.011148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15073 > 2) by scale factor 0.634773\nI1207 21:34:42.192339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69914 > 2) by scale factor 0.740976\nI1207 21:34:46.373729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43905 > 2) by scale factor 0.819991\nI1207 21:34:50.555590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71742 > 2) by scale factor 0.538007\nI1207 21:34:54.736415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77179 > 2) by scale factor 0.41913\nI1207 21:34:58.917548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46445 > 2) by scale factor 0.577291\nI1207 21:35:03.100111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44792 > 2) by scale factor 0.58006\nI1207 21:35:07.281517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95306 > 2) by scale factor 0.677263\nI1207 21:35:11.461683  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26899 > 2) by scale factor 0.88145\nI1207 21:35:19.821928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01696 > 2) by scale factor 0.662918\nI1207 21:35:24.004142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83907 > 2) by scale factor 0.704456\nI1207 21:35:28.185075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8984 > 2) by scale factor 0.690037\nI1207 21:35:32.366276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67488 > 2) by scale factor 0.544236\nI1207 21:35:36.547231  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53241 > 2) by scale factor 0.566186\nI1207 21:35:40.728066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80033 > 2) by scale factor 0.52627\nI1207 21:35:44.909755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46393 > 2) by scale factor 0.811711\nI1207 21:35:49.089951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31348 > 2) by scale factor 0.8645\nI1207 21:35:53.271899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2085 > 2) by scale factor 0.623345\nI1207 21:35:57.453836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15534 > 2) by scale factor 0.927927\nI1207 21:36:01.633991  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60369 > 2) by scale factor 0.76814\nI1207 21:36:05.814940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50774 > 2) by scale factor 0.570167\nI1207 21:36:09.996294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75645 > 2) by scale factor 0.72557\nI1207 21:36:14.177315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77647 > 2) by scale factor 0.72034\nI1207 21:36:22.536542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93978 > 2) by scale factor 0.680323\nI1207 21:36:26.717499  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5994 > 2) by scale factor 0.769407\nI1207 21:36:35.077193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27767 > 2) by scale factor 0.878089\nI1207 21:36:39.257830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71956 > 2) by scale factor 0.735414\nI1207 21:36:43.438210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84096 > 2) by scale factor 0.703988\nI1207 21:36:47.617962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3302 > 2) by scale factor 0.858296\nI1207 21:36:51.798487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47981 > 2) by scale factor 0.806515\nI1207 21:36:55.978907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0362 > 2) by scale factor 0.658717\nI1207 21:37:00.158859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02495 > 2) by scale factor 0.661168\nI1207 21:37:04.340430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72353 > 2) by scale factor 0.734342\nI1207 21:37:08.521040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02648 > 2) by scale factor 0.660835\nI1207 21:37:12.701743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03487 > 2) by scale factor 0.659007\nI1207 21:37:16.883163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46462 > 2) by scale factor 0.811483\nI1207 21:37:21.063522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13426 > 2) by scale factor 0.937092\nI1207 21:37:29.423074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24676 > 2) by scale factor 0.89017\nI1207 21:37:33.603742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09898 > 2) by scale factor 0.487926\nI1207 21:37:37.784574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.11064 > 2) by scale factor 0.391341\nI1207 21:37:41.964460  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23849 > 2) by scale factor 0.617572\nI1207 21:37:46.145324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23086 > 2) by scale factor 0.896514\nI1207 21:37:46.157250  1922 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1207 21:40:23.328970  1922 solver.cpp:404]     Test net output #0: accuracy = 0.191941\nI1207 21:40:23.329321  1922 solver.cpp:404]     Test net output #1: loss = 8.13826 (* 1 = 8.13826 loss)\nI1207 21:40:27.271816  1922 solver.cpp:228] Iteration 10000, loss = 7.69682\nI1207 21:40:27.271872  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 21:40:27.271888  1922 solver.cpp:244]     Train net output #1: loss = 7.69682 (* 1 = 7.69682 loss)\nI1207 21:40:27.499722  1922 sgd_solver.cpp:166] Iteration 10000, lr = 1.5\nI1207 21:40:27.509541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11495 > 2) by scale factor 0.94565\nI1207 21:40:31.689332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9906 > 2) by scale factor 0.668761\nI1207 21:40:40.047596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65115 > 2) by scale factor 0.754389\nI1207 21:40:44.227079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65946 > 2) by scale factor 0.752032\nI1207 21:40:48.407194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44098 > 2) by scale factor 0.58123\nI1207 21:40:52.587092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97075 > 2) by scale factor 0.673231\nI1207 21:40:56.767700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36285 > 2) by scale factor 0.458416\nI1207 21:41:00.948127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84081 > 2) by scale factor 0.704024\nI1207 21:41:05.126763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82375 > 2) by scale factor 0.523047\nI1207 21:41:09.306764  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53463 > 2) by scale factor 0.565831\nI1207 21:41:13.486968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53383 > 2) by scale factor 0.565958\nI1207 21:41:17.667495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45319 > 2) by scale factor 0.815265\nI1207 21:41:21.846913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66269 > 2) by scale factor 0.751119\nI1207 21:41:26.027372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8378 > 2) by scale factor 0.704772\nI1207 21:41:30.206826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14387 > 2) by scale factor 0.932892\nI1207 21:41:34.387771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56042 > 2) by scale factor 0.781122\nI1207 21:41:38.566684  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90172 > 2) by scale factor 0.689246\nI1207 21:41:42.745502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69017 > 2) by scale factor 0.743447\nI1207 21:41:46.924826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85959 > 2) by scale factor 0.699401\nI1207 21:41:51.104564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55556 > 2) by scale factor 0.5625\nI1207 21:41:55.283656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29474 > 2) by scale factor 0.871559\nI1207 21:41:59.463039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76092 > 2) by scale factor 0.531785\nI1207 21:42:07.820087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27136 > 2) by scale factor 0.880529\nI1207 21:42:11.999994  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28366 > 2) by scale factor 0.609076\nI1207 21:42:16.179378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08471 > 2) by scale factor 0.648358\nI1207 21:42:20.359788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64566 > 2) by scale factor 0.548597\nI1207 21:42:24.539798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61065 > 2) by scale factor 0.766094\nI1207 21:42:28.719419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32145 > 2) by scale factor 0.602147\nI1207 21:42:32.898725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93161 > 2) by scale factor 0.508698\nI1207 21:42:37.078805  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17571 > 2) by scale factor 0.62978\nI1207 21:42:41.257730  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91257 > 2) by scale factor 0.686678\nI1207 21:42:45.438623  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03972 > 2) by scale factor 0.980527\nI1207 21:42:49.619138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79644 > 2) by scale factor 0.715195\nI1207 21:42:53.798348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97115 > 2) by scale factor 0.673141\nI1207 21:42:57.978099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57404 > 2) by scale factor 0.776989\nI1207 21:43:06.336014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11557 > 2) by scale factor 0.641937\nI1207 21:43:10.515058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5726 > 2) by scale factor 0.777425\nI1207 21:43:18.873077  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46126 > 2) by scale factor 0.577824\nI1207 21:43:23.053894  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18911 > 2) by scale factor 0.477428\nI1207 21:43:27.232360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83669 > 2) by scale factor 0.705047\nI1207 21:43:35.590263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2381 > 2) by scale factor 0.617646\nI1207 21:43:39.770529  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93248 > 2) by scale factor 0.682017\nI1207 21:43:43.951084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63426 > 2) by scale factor 0.550318\nI1207 21:43:48.131007  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85627 > 2) by scale factor 0.700214\nI1207 21:43:52.310883  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35357 > 2) by scale factor 0.596379\nI1207 21:43:56.490896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26217 > 2) by scale factor 0.469244\nI1207 21:44:00.670492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11199 > 2) by scale factor 0.486383\nI1207 21:44:04.850219  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01637 > 2) by scale factor 0.663049\nI1207 21:44:09.030458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71381 > 2) by scale factor 0.736971\nI1207 21:44:13.210932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77629 > 2) by scale factor 0.720386\nI1207 21:44:17.388808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76632 > 2) by scale factor 0.531022\nI1207 21:44:21.568220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78093 > 2) by scale factor 0.719184\nI1207 21:44:25.747807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.469 > 2) by scale factor 0.576535\nI1207 21:44:29.927791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76801 > 2) by scale factor 0.530784\nI1207 21:44:34.107318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11913 > 2) by scale factor 0.641204\nI1207 21:44:38.285956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99396 > 2) by scale factor 0.668012\nI1207 21:44:42.465754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54846 > 2) by scale factor 0.784788\nI1207 21:44:46.645495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50759 > 2) by scale factor 0.797579\nI1207 21:44:50.824270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8756 > 2) by scale factor 0.695508\nI1207 21:44:55.003633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56802 > 2) by scale factor 0.560534\nI1207 21:44:59.184717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50929 > 2) by scale factor 0.569916\nI1207 21:45:03.364373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66639 > 2) by scale factor 0.750077\nI1207 21:45:07.544195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57524 > 2) by scale factor 0.776626\nI1207 21:45:11.724053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77886 > 2) by scale factor 0.719719\nI1207 21:45:15.903301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88873 > 2) by scale factor 0.514306\nI1207 21:45:20.081820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73612 > 2) by scale factor 0.730962\nI1207 21:45:24.260803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01283 > 2) by scale factor 0.663827\nI1207 21:45:28.440039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4313 > 2) by scale factor 0.582869\nI1207 21:45:32.619513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37041 > 2) by scale factor 0.593399\nI1207 21:45:36.800359  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63163 > 2) by scale factor 0.759985\nI1207 21:45:40.979846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14037 > 2) by scale factor 0.636867\nI1207 21:45:45.160527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25616 > 2) by scale factor 0.886463\nI1207 21:45:49.339603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14989 > 2) by scale factor 0.634943\nI1207 21:45:53.520053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64491 > 2) by scale factor 0.54871\nI1207 21:45:57.700073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47904 > 2) by scale factor 0.806764\nI1207 21:46:01.879339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43461 > 2) by scale factor 0.821487\nI1207 21:46:06.058631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67175 > 2) by scale factor 0.748573\nI1207 21:46:10.238986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52646 > 2) by scale factor 0.567141\nI1207 21:46:14.419385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80491 > 2) by scale factor 0.525636\nI1207 21:46:18.598814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73189 > 2) by scale factor 0.422664\nI1207 21:46:22.780155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16102 > 2) by scale factor 0.632707\nI1207 21:46:26.959465  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56597 > 2) by scale factor 0.560858\nI1207 21:46:31.139214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22914 > 2) by scale factor 0.619361\nI1207 21:46:35.319027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02373 > 2) by scale factor 0.661435\nI1207 21:46:39.498750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45087 > 2) by scale factor 0.579563\nI1207 21:46:43.678575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83408 > 2) by scale factor 0.705697\nI1207 21:46:47.858187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45547 > 2) by scale factor 0.814508\nI1207 21:46:52.037833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72084 > 2) by scale factor 0.735068\nI1207 21:46:56.217983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48972 > 2) by scale factor 0.573111\nI1207 21:47:00.397588  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78749 > 2) by scale factor 0.528054\nI1207 21:47:04.577191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01716 > 2) by scale factor 0.662876\nI1207 21:47:08.757586  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75101 > 2) by scale factor 0.53319\nI1207 21:47:12.937902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15837 > 2) by scale factor 0.633239\nI1207 21:47:17.117810  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4866 > 2) by scale factor 0.573624\nI1207 21:47:21.296538  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62132 > 2) by scale factor 0.762973\nI1207 21:47:21.308435  1922 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1207 21:49:59.010694  1922 solver.cpp:404]     Test net output #0: accuracy = 0.175059\nI1207 21:49:59.011070  1922 solver.cpp:404]     Test net output #1: loss = 12.5557 (* 1 = 12.5557 loss)\nI1207 21:50:02.953599  1922 solver.cpp:228] Iteration 10100, loss = 13.7341\nI1207 21:50:02.953644  1922 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 21:50:02.953663  1922 solver.cpp:244]     Train net output #1: loss = 13.7341 (* 1 = 13.7341 loss)\nI1207 21:50:03.176095  1922 sgd_solver.cpp:166] Iteration 10100, lr = 1.515\nI1207 21:50:03.186257  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62366 > 2) by scale factor 0.762294\nI1207 21:50:07.364688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23033 > 2) by scale factor 0.619131\nI1207 21:50:11.542023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05822 > 2) by scale factor 0.653975\nI1207 21:50:15.719630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94581 > 2) by scale factor 0.67893\nI1207 21:50:19.898737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58931 > 2) by scale factor 0.772406\nI1207 21:50:24.077903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97494 > 2) by scale factor 0.672282\nI1207 21:50:28.255941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58028 > 2) by scale factor 0.77511\nI1207 21:50:32.434402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76509 > 2) by scale factor 0.531196\nI1207 21:50:36.612927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26982 > 2) by scale factor 0.468404\nI1207 21:50:40.791102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19344 > 2) by scale factor 0.911811\nI1207 21:50:44.969691  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39098 > 2) by scale factor 0.836477\nI1207 21:50:49.147207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00596 > 2) by scale factor 0.665346\nI1207 21:50:53.326270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95475 > 2) by scale factor 0.676876\nI1207 21:50:57.503959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64498 > 2) by scale factor 0.756149\nI1207 21:51:01.683080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35768 > 2) by scale factor 0.59565\nI1207 21:51:05.860954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34342 > 2) by scale factor 0.59819\nI1207 21:51:10.039276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20118 > 2) by scale factor 0.908605\nI1207 21:51:14.216399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52009 > 2) by scale factor 0.793622\nI1207 21:51:18.394403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94045 > 2) by scale factor 0.680168\nI1207 21:51:22.573254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43392 > 2) by scale factor 0.82172\nI1207 21:51:26.750458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96248 > 2) by scale factor 0.67511\nI1207 21:51:30.928577  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59712 > 2) by scale factor 0.770083\nI1207 21:51:35.105002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.541 > 2) by scale factor 0.564812\nI1207 21:51:39.283452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07996 > 2) by scale factor 0.490201\nI1207 21:51:43.461153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11832 > 2) by scale factor 0.944143\nI1207 21:51:47.638681  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72769 > 2) by scale factor 0.536526\nI1207 21:51:51.817358  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67624 > 2) by scale factor 0.427694\nI1207 21:51:55.996664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23627 > 2) by scale factor 0.472114\nI1207 21:52:00.175298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50374 > 2) by scale factor 0.570819\nI1207 21:52:04.352795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74908 > 2) by scale factor 0.533465\nI1207 21:52:08.531095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51723 > 2) by scale factor 0.794523\nI1207 21:52:12.709445  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77563 > 2) by scale factor 0.720557\nI1207 21:52:16.888620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2736 > 2) by scale factor 0.879663\nI1207 21:52:25.243554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30177 > 2) by scale factor 0.868898\nI1207 21:52:33.597411  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65443 > 2) by scale factor 0.547282\nI1207 21:52:37.775209  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98778 > 2) by scale factor 0.669393\nI1207 21:52:41.954030  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01393 > 2) by scale factor 0.498265\nI1207 21:52:46.132122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27458 > 2) by scale factor 0.467882\nI1207 21:52:50.309442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78846 > 2) by scale factor 0.717243\nI1207 21:52:54.487102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22275 > 2) by scale factor 0.473625\nI1207 21:52:58.665276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19245 > 2) by scale factor 0.626478\nI1207 21:53:02.843134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40518 > 2) by scale factor 0.83154\nI1207 21:53:07.020015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20545 > 2) by scale factor 0.623937\nI1207 21:53:11.198055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28624 > 2) by scale factor 0.608598\nI1207 21:53:15.375463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55492 > 2) by scale factor 0.782802\nI1207 21:53:19.552495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31359 > 2) by scale factor 0.864459\nI1207 21:53:23.730819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23323 > 2) by scale factor 0.895563\nI1207 21:53:27.909413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20894 > 2) by scale factor 0.475179\nI1207 21:53:32.087996  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06492 > 2) by scale factor 0.652545\nI1207 21:53:36.265666  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11098 > 2) by scale factor 0.486501\nI1207 21:53:40.442637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34353 > 2) by scale factor 0.460455\nI1207 21:53:44.620581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76071 > 2) by scale factor 0.531815\nI1207 21:53:48.796470  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91239 > 2) by scale factor 0.511196\nI1207 21:53:52.974661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39999 > 2) by scale factor 0.454546\nI1207 21:53:57.152763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55781 > 2) by scale factor 0.781919\nI1207 21:54:01.329140  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15322 > 2) by scale factor 0.92884\nI1207 21:54:05.506444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09241 > 2) by scale factor 0.646746\nI1207 21:54:09.683653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66363 > 2) by scale factor 0.750856\nI1207 21:54:13.861841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76693 > 2) by scale factor 0.530936\nI1207 21:54:18.039459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66281 > 2) by scale factor 0.546029\nI1207 21:54:22.216984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16958 > 2) by scale factor 0.921839\nI1207 21:54:26.394637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29722 > 2) by scale factor 0.870617\nI1207 21:54:30.572505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91572 > 2) by scale factor 0.685938\nI1207 21:54:34.750844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79117 > 2) by scale factor 0.527542\nI1207 21:54:38.929077  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18467 > 2) by scale factor 0.915469\nI1207 21:54:43.107198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62905 > 2) by scale factor 0.760731\nI1207 21:54:47.284319  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64249 > 2) by scale factor 0.430804\nI1207 21:54:51.461050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71268 > 2) by scale factor 0.737279\nI1207 21:54:55.638875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22428 > 2) by scale factor 0.620294\nI1207 21:54:59.816961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57448 > 2) by scale factor 0.559522\nI1207 21:55:03.995468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81831 > 2) by scale factor 0.709644\nI1207 21:55:08.174192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08723 > 2) by scale factor 0.647831\nI1207 21:55:12.351922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45614 > 2) by scale factor 0.448818\nI1207 21:55:16.529723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20738 > 2) by scale factor 0.623561\nI1207 21:55:20.707721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33811 > 2) by scale factor 0.599142\nI1207 21:55:24.885828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38319 > 2) by scale factor 0.591158\nI1207 21:55:29.063233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03414 > 2) by scale factor 0.659166\nI1207 21:55:33.241641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34931 > 2) by scale factor 0.597138\nI1207 21:55:37.419833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52959 > 2) by scale factor 0.566638\nI1207 21:55:41.598253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52326 > 2) by scale factor 0.567656\nI1207 21:55:45.776259  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99364 > 2) by scale factor 0.668082\nI1207 21:55:49.953568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47197 > 2) by scale factor 0.809071\nI1207 21:55:54.131101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99124 > 2) by scale factor 0.66862\nI1207 21:55:58.310142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64927 > 2) by scale factor 0.754926\nI1207 21:56:02.486651  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21368 > 2) by scale factor 0.903473\nI1207 21:56:06.663887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66299 > 2) by scale factor 0.751036\nI1207 21:56:10.841981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.157 > 2) by scale factor 0.633512\nI1207 21:56:15.019496  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78301 > 2) by scale factor 0.718648\nI1207 21:56:19.197551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74556 > 2) by scale factor 0.72845\nI1207 21:56:23.374907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62479 > 2) by scale factor 0.551756\nI1207 21:56:27.553493  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71431 > 2) by scale factor 0.736836\nI1207 21:56:31.731655  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79602 > 2) by scale factor 0.715302\nI1207 21:56:35.908550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34428 > 2) by scale factor 0.598036\nI1207 21:56:40.086395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24933 > 2) by scale factor 0.615512\nI1207 21:56:44.263896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08966 > 2) by scale factor 0.647321\nI1207 21:56:48.441922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63843 > 2) by scale factor 0.758026\nI1207 21:56:52.620055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6023 > 2) by scale factor 0.768551\nI1207 21:56:56.807474  1922 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1207 21:59:33.757969  1922 solver.cpp:404]     Test net output #0: accuracy = 0.192765\nI1207 21:59:33.758342  1922 solver.cpp:404]     Test net output #1: loss = 10.1468 (* 1 = 10.1468 loss)\nI1207 21:59:37.700559  1922 solver.cpp:228] Iteration 10200, loss = 10.6252\nI1207 21:59:37.700613  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 21:59:37.700630  1922 solver.cpp:244]     Train net output #1: loss = 10.6252 (* 1 = 10.6252 loss)\nI1207 21:59:37.924266  1922 sgd_solver.cpp:166] Iteration 10200, lr = 1.53\nI1207 21:59:37.934046  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95393 > 2) by scale factor 0.677063\nI1207 21:59:42.113966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77488 > 2) by scale factor 0.529819\nI1207 21:59:46.293395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04909 > 2) by scale factor 0.493939\nI1207 21:59:50.473331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91719 > 2) by scale factor 0.685592\nI1207 21:59:54.653216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10921 > 2) by scale factor 0.64325\nI1207 21:59:58.833031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33739 > 2) by scale factor 0.599271\nI1207 22:00:03.012284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12191 > 2) by scale factor 0.640633\nI1207 22:00:07.192808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84705 > 2) by scale factor 0.702481\nI1207 22:00:11.373505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78076 > 2) by scale factor 0.719228\nI1207 22:00:15.553093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02531 > 2) by scale factor 0.661089\nI1207 22:00:19.733145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47928 > 2) by scale factor 0.574832\nI1207 22:00:23.913775  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84136 > 2) by scale factor 0.703888\nI1207 22:00:36.447726  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98922 > 2) by scale factor 0.669071\nI1207 22:00:40.626674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81421 > 2) by scale factor 0.524355\nI1207 22:00:44.806552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67313 > 2) by scale factor 0.748186\nI1207 22:00:48.985642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.902 > 2) by scale factor 0.512558\nI1207 22:00:53.166424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81804 > 2) by scale factor 0.523829\nI1207 22:00:57.345672  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48446 > 2) by scale factor 0.805002\nI1207 22:01:01.525913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3458 > 2) by scale factor 0.597764\nI1207 22:01:05.705812  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88921 > 2) by scale factor 0.514243\nI1207 22:01:09.885479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32436 > 2) by scale factor 0.601619\nI1207 22:01:14.065186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91441 > 2) by scale factor 0.686244\nI1207 22:01:22.423503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42894 > 2) by scale factor 0.823405\nI1207 22:01:26.602455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77516 > 2) by scale factor 0.529778\nI1207 22:01:30.781818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8228 > 2) by scale factor 0.523177\nI1207 22:01:34.961640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19261 > 2) by scale factor 0.626447\nI1207 22:01:39.139415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24714 > 2) by scale factor 0.615927\nI1207 22:01:43.318074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90587 > 2) by scale factor 0.688261\nI1207 22:01:47.496938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81338 > 2) by scale factor 0.524469\nI1207 22:01:51.677389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67969 > 2) by scale factor 0.746355\nI1207 22:01:55.856637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02973 > 2) by scale factor 0.660125\nI1207 22:02:00.035647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56813 > 2) by scale factor 0.778778\nI1207 22:02:04.215133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19271 > 2) by scale factor 0.626428\nI1207 22:02:08.394273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16414 > 2) by scale factor 0.632083\nI1207 22:02:12.572914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90173 > 2) by scale factor 0.512593\nI1207 22:02:16.751829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88055 > 2) by scale factor 0.694311\nI1207 22:02:20.932163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76673 > 2) by scale factor 0.530965\nI1207 22:02:25.111896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40071 > 2) by scale factor 0.833086\nI1207 22:02:29.290725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48022 > 2) by scale factor 0.574676\nI1207 22:02:37.646782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75145 > 2) by scale factor 0.726889\nI1207 22:02:41.826316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70003 > 2) by scale factor 0.540537\nI1207 22:02:46.004994  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81382 > 2) by scale factor 0.710778\nI1207 22:02:50.183933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07586 > 2) by scale factor 0.490694\nI1207 22:02:54.364545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62843 > 2) by scale factor 0.551202\nI1207 22:02:58.544543  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15764 > 2) by scale factor 0.633385\nI1207 22:03:02.724087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44257 > 2) by scale factor 0.818811\nI1207 22:03:06.902627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83436 > 2) by scale factor 0.705626\nI1207 22:03:11.081501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52475 > 2) by scale factor 0.567416\nI1207 22:03:15.260430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88405 > 2) by scale factor 0.693469\nI1207 22:03:19.439877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18547 > 2) by scale factor 0.915135\nI1207 22:03:23.618476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59767 > 2) by scale factor 0.769921\nI1207 22:03:27.798431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06462 > 2) by scale factor 0.652609\nI1207 22:03:31.979400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51733 > 2) by scale factor 0.568613\nI1207 22:03:36.157138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73739 > 2) by scale factor 0.535133\nI1207 22:03:40.336403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30514 > 2) by scale factor 0.867626\nI1207 22:03:44.516000  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16196 > 2) by scale factor 0.925084\nI1207 22:03:52.872795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9436 > 2) by scale factor 0.679439\nI1207 22:03:57.051668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09078 > 2) by scale factor 0.647085\nI1207 22:04:01.230803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08235 > 2) by scale factor 0.960452\nI1207 22:04:05.409957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21168 > 2) by scale factor 0.904288\nI1207 22:04:09.588709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71918 > 2) by scale factor 0.735515\nI1207 22:04:13.768227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5701 > 2) by scale factor 0.778181\nI1207 22:04:17.947638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60281 > 2) by scale factor 0.768401\nI1207 22:04:22.126951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87676 > 2) by scale factor 0.695226\nI1207 22:04:26.305809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67209 > 2) by scale factor 0.748479\nI1207 22:04:30.486017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92109 > 2) by scale factor 0.684676\nI1207 22:04:34.664949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49554 > 2) by scale factor 0.801429\nI1207 22:04:38.843816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89404 > 2) by scale factor 0.691076\nI1207 22:04:43.022828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34641 > 2) by scale factor 0.852368\nI1207 22:04:47.200942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58386 > 2) by scale factor 0.558057\nI1207 22:04:51.380090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93597 > 2) by scale factor 0.508134\nI1207 22:04:55.558745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50664 > 2) by scale factor 0.797881\nI1207 22:04:59.738221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03757 > 2) by scale factor 0.495348\nI1207 22:05:03.917618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4702 > 2) by scale factor 0.447407\nI1207 22:05:08.097149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73002 > 2) by scale factor 0.732595\nI1207 22:05:12.277176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4463 > 2) by scale factor 0.81756\nI1207 22:05:24.808568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33563 > 2) by scale factor 0.856298\nI1207 22:05:28.988631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13584 > 2) by scale factor 0.9364\nI1207 22:05:33.167587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05885 > 2) by scale factor 0.65384\nI1207 22:05:37.346463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80242 > 2) by scale factor 0.525981\nI1207 22:05:41.524246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75694 > 2) by scale factor 0.420439\nI1207 22:05:45.703254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07139 > 2) by scale factor 0.491232\nI1207 22:05:49.882694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55068 > 2) by scale factor 0.784105\nI1207 22:05:54.061738  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35522 > 2) by scale factor 0.849178\nI1207 22:05:58.241299  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90685 > 2) by scale factor 0.511922\nI1207 22:06:02.421404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13312 > 2) by scale factor 0.937594\nI1207 22:06:06.601095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23584 > 2) by scale factor 0.894517\nI1207 22:06:14.957551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10779 > 2) by scale factor 0.94886\nI1207 22:06:19.136579  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09508 > 2) by scale factor 0.954617\nI1207 22:06:23.317546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49273 > 2) by scale factor 0.445164\nI1207 22:06:27.497193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45498 > 2) by scale factor 0.578875\nI1207 22:06:31.676297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19943 > 2) by scale factor 0.625111\nI1207 22:06:31.688246  1922 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1207 22:09:08.356335  1922 solver.cpp:404]     Test net output #0: accuracy = 0.168941\nI1207 22:09:08.356711  1922 solver.cpp:404]     Test net output #1: loss = 9.76665 (* 1 = 9.76665 loss)\nI1207 22:09:12.299554  1922 solver.cpp:228] Iteration 10300, loss = 9.53664\nI1207 22:09:12.299604  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 22:09:12.299623  1922 solver.cpp:244]     Train net output #1: loss = 9.53663 (* 1 = 9.53663 loss)\nI1207 22:09:12.527845  1922 sgd_solver.cpp:166] Iteration 10300, lr = 1.545\nI1207 22:09:12.537641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59609 > 2) by scale factor 0.55616\nI1207 22:09:16.718964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43004 > 2) by scale factor 0.823032\nI1207 22:09:20.899782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15716 > 2) by scale factor 0.927147\nI1207 22:09:25.081295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01596 > 2) by scale factor 0.992082\nI1207 22:09:29.263047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36207 > 2) by scale factor 0.846714\nI1207 22:09:33.444267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68694 > 2) by scale factor 0.74434\nI1207 22:09:37.625938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18341 > 2) by scale factor 0.915999\nI1207 22:09:41.806604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41407 > 2) by scale factor 0.585811\nI1207 22:09:45.988658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57887 > 2) by scale factor 0.775534\nI1207 22:09:50.171103  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04658 > 2) by scale factor 0.656474\nI1207 22:09:54.351860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06368 > 2) by scale factor 0.65281\nI1207 22:09:58.532866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51725 > 2) by scale factor 0.794518\nI1207 22:10:02.712599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27514 > 2) by scale factor 0.879065\nI1207 22:10:06.893712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.941 > 2) by scale factor 0.507486\nI1207 22:10:11.075013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96593 > 2) by scale factor 0.674324\nI1207 22:10:15.255976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65099 > 2) by scale factor 0.754435\nI1207 22:10:19.437985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41518 > 2) by scale factor 0.585621\nI1207 22:10:23.619297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00212 > 2) by scale factor 0.666196\nI1207 22:10:27.800520  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74639 > 2) by scale factor 0.533847\nI1207 22:10:31.981259  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22225 > 2) by scale factor 0.620684\nI1207 22:10:36.162557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05935 > 2) by scale factor 0.653733\nI1207 22:10:48.699573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23807 > 2) by scale factor 0.617652\nI1207 22:10:52.881541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23385 > 2) by scale factor 0.895315\nI1207 22:10:57.063040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41969 > 2) by scale factor 0.584848\nI1207 22:11:01.244773  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64972 > 2) by scale factor 0.754796\nI1207 22:11:05.425808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76342 > 2) by scale factor 0.72374\nI1207 22:11:09.607302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00517 > 2) by scale factor 0.997423\nI1207 22:11:13.788307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14667 > 2) by scale factor 0.931677\nI1207 22:11:17.969625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25727 > 2) by scale factor 0.469785\nI1207 22:11:22.151899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5088 > 2) by scale factor 0.797193\nI1207 22:11:26.334086  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24291 > 2) by scale factor 0.61673\nI1207 22:11:30.516296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32723 > 2) by scale factor 0.46219\nI1207 22:11:34.697561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60559 > 2) by scale factor 0.554694\nI1207 22:11:38.879842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72471 > 2) by scale factor 0.734022\nI1207 22:11:43.060915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19661 > 2) by scale factor 0.625663\nI1207 22:11:47.242789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82754 > 2) by scale factor 0.707329\nI1207 22:11:51.422873  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43913 > 2) by scale factor 0.819965\nI1207 22:11:59.780169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3701 > 2) by scale factor 0.593455\nI1207 22:12:03.961454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37663 > 2) by scale factor 0.841527\nI1207 22:12:12.320720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22082 > 2) by scale factor 0.90057\nI1207 22:12:16.501916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32145 > 2) by scale factor 0.602147\nI1207 22:12:20.683503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82996 > 2) by scale factor 0.414082\nI1207 22:12:24.865020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71617 > 2) by scale factor 0.538189\nI1207 22:12:29.047080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46395 > 2) by scale factor 0.577376\nI1207 22:12:33.228996  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35306 > 2) by scale factor 0.59647\nI1207 22:12:37.410925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6536 > 2) by scale factor 0.547405\nI1207 22:12:41.592592  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11715 > 2) by scale factor 0.641613\nI1207 22:12:45.773375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77681 > 2) by scale factor 0.720252\nI1207 22:12:49.954740  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06612 > 2) by scale factor 0.65229\nI1207 22:12:54.136627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91032 > 2) by scale factor 0.511467\nI1207 22:12:58.317631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78013 > 2) by scale factor 0.719391\nI1207 22:13:02.499294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27751 > 2) by scale factor 0.878153\nI1207 22:13:06.680744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99997 > 2) by scale factor 0.666672\nI1207 22:13:10.862946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03894 > 2) by scale factor 0.658123\nI1207 22:13:15.044541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27005 > 2) by scale factor 0.611612\nI1207 22:13:19.225977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10253 > 2) by scale factor 0.951235\nI1207 22:13:23.406913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37401 > 2) by scale factor 0.592766\nI1207 22:13:27.587913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05866 > 2) by scale factor 0.653881\nI1207 22:13:31.768949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81126 > 2) by scale factor 0.711425\nI1207 22:13:35.949589  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21464 > 2) by scale factor 0.90308\nI1207 22:13:40.130771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91934 > 2) by scale factor 0.685086\nI1207 22:13:44.311501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44921 > 2) by scale factor 0.816591\nI1207 22:13:48.492517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08601 > 2) by scale factor 0.648087\nI1207 22:13:52.672590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28023 > 2) by scale factor 0.877104\nI1207 22:13:56.853209  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68508 > 2) by scale factor 0.744856\nI1207 22:14:01.033502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22666 > 2) by scale factor 0.473187\nI1207 22:14:05.213234  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20904 > 2) by scale factor 0.62324\nI1207 22:14:09.395221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77531 > 2) by scale factor 0.72064\nI1207 22:14:13.575495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21958 > 2) by scale factor 0.90107\nI1207 22:14:21.936503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72299 > 2) by scale factor 0.537203\nI1207 22:14:26.116253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2323 > 2) by scale factor 0.895936\nI1207 22:14:30.296386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63241 > 2) by scale factor 0.75976\nI1207 22:14:34.477821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06682 > 2) by scale factor 0.652142\nI1207 22:14:42.837378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24743 > 2) by scale factor 0.615872\nI1207 22:14:47.017082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59231 > 2) by scale factor 0.435511\nI1207 22:14:51.197417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11081 > 2) by scale factor 0.64292\nI1207 22:14:55.378243  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38362 > 2) by scale factor 0.839059\nI1207 22:14:59.557819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09297 > 2) by scale factor 0.955578\nI1207 22:15:03.738996  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9259 > 2) by scale factor 0.683551\nI1207 22:15:07.919858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80934 > 2) by scale factor 0.525025\nI1207 22:15:12.100528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85176 > 2) by scale factor 0.701322\nI1207 22:15:16.280660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92325 > 2) by scale factor 0.684171\nI1207 22:15:20.460388  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80996 > 2) by scale factor 0.52494\nI1207 22:15:24.641914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45184 > 2) by scale factor 0.579401\nI1207 22:15:28.823053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02005 > 2) by scale factor 0.990073\nI1207 22:15:33.003406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39625 > 2) by scale factor 0.588885\nI1207 22:15:37.183980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61022 > 2) by scale factor 0.766218\nI1207 22:15:45.544976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45824 > 2) by scale factor 0.578328\nI1207 22:15:49.725473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40463 > 2) by scale factor 0.587436\nI1207 22:15:53.906726  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79927 > 2) by scale factor 0.714472\nI1207 22:15:58.089890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6312 > 2) by scale factor 0.760109\nI1207 22:16:02.272507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18011 > 2) by scale factor 0.628908\nI1207 22:16:06.455467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80017 > 2) by scale factor 0.714243\nI1207 22:16:06.467438  1922 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1207 22:18:43.984038  1922 solver.cpp:404]     Test net output #0: accuracy = 0.227588\nI1207 22:18:43.984422  1922 solver.cpp:404]     Test net output #1: loss = 7.98824 (* 1 = 7.98824 loss)\nI1207 22:18:47.927945  1922 solver.cpp:228] Iteration 10400, loss = 8.00917\nI1207 22:18:47.927995  1922 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1207 22:18:47.928014  1922 solver.cpp:244]     Train net output #1: loss = 8.00916 (* 1 = 8.00916 loss)\nI1207 22:18:48.150236  1922 sgd_solver.cpp:166] Iteration 10400, lr = 1.56\nI1207 22:18:48.160341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31839 > 2) by scale factor 0.862668\nI1207 22:18:52.340821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68679 > 2) by scale factor 0.542477\nI1207 22:18:56.521795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49537 > 2) by scale factor 0.801485\nI1207 22:19:00.702028  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85129 > 2) by scale factor 0.519306\nI1207 22:19:09.061626  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16321 > 2) by scale factor 0.924553\nI1207 22:19:13.241758  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79823 > 2) by scale factor 0.714738\nI1207 22:19:17.422930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87859 > 2) by scale factor 0.694784\nI1207 22:19:21.603322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43356 > 2) by scale factor 0.82184\nI1207 22:19:25.783041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61572 > 2) by scale factor 0.553141\nI1207 22:19:29.963245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11744 > 2) by scale factor 0.485739\nI1207 22:19:34.143028  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06212 > 2) by scale factor 0.492354\nI1207 22:19:38.323019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71149 > 2) by scale factor 0.737602\nI1207 22:19:42.503538  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9086 > 2) by scale factor 0.687615\nI1207 22:19:46.682895  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52677 > 2) by scale factor 0.791525\nI1207 22:19:50.862716  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19309 > 2) by scale factor 0.626352\nI1207 22:19:55.043010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16419 > 2) by scale factor 0.924131\nI1207 22:19:59.222515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89934 > 2) by scale factor 0.512908\nI1207 22:20:03.403218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89989 > 2) by scale factor 0.689681\nI1207 22:20:07.583645  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10541 > 2) by scale factor 0.644037\nI1207 22:20:11.763772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54863 > 2) by scale factor 0.784736\nI1207 22:20:15.943217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93427 > 2) by scale factor 0.508353\nI1207 22:20:20.124292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92714 > 2) by scale factor 0.683262\nI1207 22:20:24.305382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.173 > 2) by scale factor 0.920386\nI1207 22:20:28.485756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80759 > 2) by scale factor 0.525267\nI1207 22:20:32.666582  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25384 > 2) by scale factor 0.614659\nI1207 22:20:36.846954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26794 > 2) by scale factor 0.612006\nI1207 22:20:41.026530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16702 > 2) by scale factor 0.47996\nI1207 22:20:45.207269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43712 > 2) by scale factor 0.820642\nI1207 22:20:49.386796  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43849 > 2) by scale factor 0.82018\nI1207 22:20:53.567409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17997 > 2) by scale factor 0.478473\nI1207 22:20:57.749521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08757 > 2) by scale factor 0.958052\nI1207 22:21:01.929302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89266 > 2) by scale factor 0.691406\nI1207 22:21:06.108870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43011 > 2) by scale factor 0.823007\nI1207 22:21:10.289391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19546 > 2) by scale factor 0.910972\nI1207 22:21:14.469553  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2635 > 2) by scale factor 0.612839\nI1207 22:21:18.649739  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1254 > 2) by scale factor 0.639918\nI1207 22:21:22.830162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02346 > 2) by scale factor 0.988407\nI1207 22:21:27.009726  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23384 > 2) by scale factor 0.472384\nI1207 22:21:31.189602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75988 > 2) by scale factor 0.724668\nI1207 22:21:35.369596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25619 > 2) by scale factor 0.614215\nI1207 22:21:39.550635  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01196 > 2) by scale factor 0.66402\nI1207 22:21:43.731531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12384 > 2) by scale factor 0.484985\nI1207 22:21:47.911510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93808 > 2) by scale factor 0.680717\nI1207 22:21:52.092782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81445 > 2) by scale factor 0.710619\nI1207 22:21:56.273509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35696 > 2) by scale factor 0.848549\nI1207 22:22:00.454423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62425 > 2) by scale factor 0.551838\nI1207 22:22:04.635466  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63688 > 2) by scale factor 0.758473\nI1207 22:22:08.816498  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.768 > 2) by scale factor 0.722544\nI1207 22:22:12.996476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69164 > 2) by scale factor 0.541765\nI1207 22:22:17.176512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44239 > 2) by scale factor 0.818869\nI1207 22:22:21.356374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00611 > 2) by scale factor 0.996954\nI1207 22:22:25.537356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58136 > 2) by scale factor 0.774785\nI1207 22:22:29.717535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01884 > 2) by scale factor 0.662505\nI1207 22:22:33.897143  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7799 > 2) by scale factor 0.719449\nI1207 22:22:38.076761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65436 > 2) by scale factor 0.547292\nI1207 22:22:42.256970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49473 > 2) by scale factor 0.801689\nI1207 22:22:46.437325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48668 > 2) by scale factor 0.804284\nI1207 22:22:50.617398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67855 > 2) by scale factor 0.746673\nI1207 22:22:54.797492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05556 > 2) by scale factor 0.654544\nI1207 22:22:58.978260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45283 > 2) by scale factor 0.579235\nI1207 22:23:03.157559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77054 > 2) by scale factor 0.721881\nI1207 22:23:11.514626  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97737 > 2) by scale factor 0.671734\nI1207 22:23:15.694944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88221 > 2) by scale factor 0.693912\nI1207 22:23:19.873808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18945 > 2) by scale factor 0.913472\nI1207 22:23:24.052970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48896 > 2) by scale factor 0.803549\nI1207 22:23:28.232473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13998 > 2) by scale factor 0.636948\nI1207 22:23:32.412729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57466 > 2) by scale factor 0.776801\nI1207 22:23:36.591792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0368 > 2) by scale factor 0.981934\nI1207 22:23:44.950043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22166 > 2) by scale factor 0.900227\nI1207 22:23:49.130430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61396 > 2) by scale factor 0.55341\nI1207 22:23:53.310986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85763 > 2) by scale factor 0.518454\nI1207 22:23:57.490749  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47584 > 2) by scale factor 0.807806\nI1207 22:24:01.670648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31273 > 2) by scale factor 0.864779\nI1207 22:24:05.851006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08905 > 2) by scale factor 0.489111\nI1207 22:24:10.032055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96588 > 2) by scale factor 0.674336\nI1207 22:24:14.211172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96847 > 2) by scale factor 0.673748\nI1207 22:24:18.392112  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18082 > 2) by scale factor 0.628768\nI1207 22:24:22.572741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11789 > 2) by scale factor 0.944335\nI1207 22:24:26.752519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79463 > 2) by scale factor 0.527061\nI1207 22:24:30.933746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1041 > 2) by scale factor 0.950526\nI1207 22:24:35.114464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41063 > 2) by scale factor 0.45345\nI1207 22:24:39.293761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45975 > 2) by scale factor 0.578077\nI1207 22:24:43.473755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30959 > 2) by scale factor 0.865953\nI1207 22:24:47.653134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40811 > 2) by scale factor 0.830526\nI1207 22:24:51.832959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26058 > 2) by scale factor 0.46942\nI1207 22:24:56.012876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29933 > 2) by scale factor 0.465189\nI1207 22:25:00.192118  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65399 > 2) by scale factor 0.547347\nI1207 22:25:04.372874  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84658 > 2) by scale factor 0.702598\nI1207 22:25:08.552675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84123 > 2) by scale factor 0.703921\nI1207 22:25:12.733335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45144 > 2) by scale factor 0.579468\nI1207 22:25:16.914155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69501 > 2) by scale factor 0.742112\nI1207 22:25:21.094733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86431 > 2) by scale factor 0.698248\nI1207 22:25:25.274698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94831 > 2) by scale factor 0.678355\nI1207 22:25:33.633342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18089 > 2) by scale factor 0.628756\nI1207 22:25:37.813879  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12661 > 2) by scale factor 0.48466\nI1207 22:25:41.994031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88006 > 2) by scale factor 0.694431\nI1207 22:25:42.005964  1922 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1207 22:28:19.657044  1922 solver.cpp:404]     Test net output #0: accuracy = 0.160118\nI1207 22:28:19.657416  1922 solver.cpp:404]     Test net output #1: loss = 11.9163 (* 1 = 11.9163 loss)\nI1207 22:28:23.600880  1922 solver.cpp:228] Iteration 10500, loss = 11.5172\nI1207 22:28:23.600932  1922 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 22:28:23.600950  1922 solver.cpp:244]     Train net output #1: loss = 11.5172 (* 1 = 11.5172 loss)\nI1207 22:28:23.822970  1922 sgd_solver.cpp:166] Iteration 10500, lr = 1.575\nI1207 22:28:23.833220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34013 > 2) by scale factor 0.598779\nI1207 22:28:28.013973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32768 > 2) by scale factor 0.601018\nI1207 22:28:32.194159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4117 > 2) by scale factor 0.586218\nI1207 22:28:36.374105  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3653 > 2) by scale factor 0.5943\nI1207 22:28:40.553997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86523 > 2) by scale factor 0.698025\nI1207 22:28:44.733122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63119 > 2) by scale factor 0.550783\nI1207 22:28:48.912940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62852 > 2) by scale factor 0.760884\nI1207 22:28:53.094736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.449 > 2) by scale factor 0.816659\nI1207 22:28:57.275445  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4203 > 2) by scale factor 0.584744\nI1207 22:29:01.455415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67885 > 2) by scale factor 0.74659\nI1207 22:29:05.635992  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70855 > 2) by scale factor 0.738402\nI1207 22:29:09.815948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32913 > 2) by scale factor 0.600758\nI1207 22:29:13.995093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40384 > 2) by scale factor 0.454149\nI1207 22:29:18.174875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34825 > 2) by scale factor 0.851699\nI1207 22:29:22.355437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71799 > 2) by scale factor 0.735839\nI1207 22:29:26.534766  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26017 > 2) by scale factor 0.613465\nI1207 22:29:30.715035  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1003 > 2) by scale factor 0.645099\nI1207 22:29:34.895151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07511 > 2) by scale factor 0.650383\nI1207 22:29:39.074236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5159 > 2) by scale factor 0.794944\nI1207 22:29:43.254603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47226 > 2) by scale factor 0.808977\nI1207 22:29:47.433971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74232 > 2) by scale factor 0.534427\nI1207 22:29:51.611716  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38023 > 2) by scale factor 0.591676\nI1207 22:29:55.790573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21742 > 2) by scale factor 0.621617\nI1207 22:30:04.147698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71949 > 2) by scale factor 0.735431\nI1207 22:30:08.328127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27382 > 2) by scale factor 0.610906\nI1207 22:30:16.686779  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55356 > 2) by scale factor 0.562816\nI1207 22:30:20.866266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45036 > 2) by scale factor 0.449401\nI1207 22:30:25.045635  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53408 > 2) by scale factor 0.565918\nI1207 22:30:29.225877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.676 > 2) by scale factor 0.54407\nI1207 22:30:33.406082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85718 > 2) by scale factor 0.699992\nI1207 22:30:37.585248  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31118 > 2) by scale factor 0.865361\nI1207 22:30:41.765658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75865 > 2) by scale factor 0.724992\nI1207 22:30:45.944653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97763 > 2) by scale factor 0.502812\nI1207 22:30:50.124344  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73319 > 2) by scale factor 0.731745\nI1207 22:30:54.305287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72656 > 2) by scale factor 0.423141\nI1207 22:30:58.485150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21623 > 2) by scale factor 0.621845\nI1207 22:31:06.841759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38465 > 2) by scale factor 0.838696\nI1207 22:31:11.020206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50677 > 2) by scale factor 0.570326\nI1207 22:31:15.200409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39112 > 2) by scale factor 0.589776\nI1207 22:31:19.380267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41912 > 2) by scale factor 0.826747\nI1207 22:31:23.559537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19189 > 2) by scale factor 0.477112\nI1207 22:31:27.738831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93101 > 2) by scale factor 0.508775\nI1207 22:31:31.918570  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58829 > 2) by scale factor 0.77271\nI1207 22:31:36.098726  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95851 > 2) by scale factor 0.505241\nI1207 22:31:40.278246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9794 > 2) by scale factor 0.671276\nI1207 22:31:44.457907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83357 > 2) by scale factor 0.521707\nI1207 22:31:48.636936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07338 > 2) by scale factor 0.650749\nI1207 22:31:52.816458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73998 > 2) by scale factor 0.729932\nI1207 22:31:56.996117  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28265 > 2) by scale factor 0.609264\nI1207 22:32:01.176772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1159 > 2) by scale factor 0.641868\nI1207 22:32:05.356106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03403 > 2) by scale factor 0.495782\nI1207 22:32:09.536315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8655 > 2) by scale factor 0.697958\nI1207 22:32:13.714818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80242 > 2) by scale factor 0.713669\nI1207 22:32:17.893833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4144 > 2) by scale factor 0.453063\nI1207 22:32:22.074609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65898 > 2) by scale factor 0.752167\nI1207 22:32:26.253643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25501 > 2) by scale factor 0.614438\nI1207 22:32:30.432811  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74514 > 2) by scale factor 0.534026\nI1207 22:32:34.610894  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00783 > 2) by scale factor 0.399374\nI1207 22:32:38.789713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6799 > 2) by scale factor 0.543493\nI1207 22:32:42.971446  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29069 > 2) by scale factor 0.873099\nI1207 22:32:47.151175  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0712 > 2) by scale factor 0.651211\nI1207 22:32:51.331441  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67172 > 2) by scale factor 0.544704\nI1207 22:32:55.511528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2367 > 2) by scale factor 0.617913\nI1207 22:33:03.867594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72785 > 2) by scale factor 0.733179\nI1207 22:33:08.047185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2978 > 2) by scale factor 0.870398\nI1207 22:33:12.226105  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58363 > 2) by scale factor 0.558093\nI1207 22:33:16.405048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08744 > 2) by scale factor 0.489304\nI1207 22:33:20.584616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4944 > 2) by scale factor 0.801796\nI1207 22:33:24.764273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28514 > 2) by scale factor 0.608802\nI1207 22:33:28.943244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06967 > 2) by scale factor 0.49144\nI1207 22:33:33.123076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17282 > 2) by scale factor 0.920465\nI1207 22:33:37.302968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09924 > 2) by scale factor 0.952725\nI1207 22:33:41.482323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73671 > 2) by scale factor 0.730804\nI1207 22:33:54.018191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86513 > 2) by scale factor 0.517448\nI1207 22:33:58.197897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84368 > 2) by scale factor 0.703313\nI1207 22:34:02.378207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51623 > 2) by scale factor 0.794838\nI1207 22:34:06.558233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19076 > 2) by scale factor 0.912926\nI1207 22:34:10.737161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42909 > 2) by scale factor 0.823352\nI1207 22:34:14.916849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70344 > 2) by scale factor 0.540038\nI1207 22:34:19.097184  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53485 > 2) by scale factor 0.789\nI1207 22:34:23.277456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35152 > 2) by scale factor 0.596744\nI1207 22:34:27.458081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39357 > 2) by scale factor 0.835571\nI1207 22:34:31.637136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68992 > 2) by scale factor 0.743517\nI1207 22:34:39.993846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19236 > 2) by scale factor 0.477058\nI1207 22:34:44.172695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55293 > 2) by scale factor 0.439278\nI1207 22:34:48.353662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19678 > 2) by scale factor 0.910424\nI1207 22:34:52.534337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06994 > 2) by scale factor 0.651478\nI1207 22:34:56.713011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21372 > 2) by scale factor 0.47464\nI1207 22:35:00.894070  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71917 > 2) by scale factor 0.423803\nI1207 22:35:05.074529  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.93984 > 2) by scale factor 0.404871\nI1207 22:35:09.255337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42019 > 2) by scale factor 0.584763\nI1207 22:35:13.435094  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14303 > 2) by scale factor 0.933257\nI1207 22:35:17.616150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92037 > 2) by scale factor 0.510156\nI1207 22:35:17.628041  1922 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1207 22:37:55.362753  1922 solver.cpp:404]     Test net output #0: accuracy = 0.166353\nI1207 22:37:55.363162  1922 solver.cpp:404]     Test net output #1: loss = 10.2298 (* 1 = 10.2298 loss)\nI1207 22:37:59.306036  1922 solver.cpp:228] Iteration 10600, loss = 10.0009\nI1207 22:37:59.306087  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 22:37:59.306104  1922 solver.cpp:244]     Train net output #1: loss = 10.0009 (* 1 = 10.0009 loss)\nI1207 22:37:59.528648  1922 sgd_solver.cpp:166] Iteration 10600, lr = 1.59\nI1207 22:37:59.538795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38252 > 2) by scale factor 0.591275\nI1207 22:38:03.717468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51355 > 2) by scale factor 0.569224\nI1207 22:38:07.897729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37483 > 2) by scale factor 0.457161\nI1207 22:38:12.078122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41815 > 2) by scale factor 0.452678\nI1207 22:38:16.257839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70386 > 2) by scale factor 0.739684\nI1207 22:38:20.437435  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77072 > 2) by scale factor 0.721833\nI1207 22:38:32.972755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53947 > 2) by scale factor 0.565056\nI1207 22:38:37.152429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10593 > 2) by scale factor 0.64393\nI1207 22:38:41.331301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72345 > 2) by scale factor 0.537136\nI1207 22:38:45.511276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55251 > 2) by scale factor 0.783544\nI1207 22:38:49.689770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72652 > 2) by scale factor 0.536694\nI1207 22:38:53.867027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00311 > 2) by scale factor 0.665976\nI1207 22:38:58.046434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56033 > 2) by scale factor 0.561745\nI1207 22:39:02.225574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23884 > 2) by scale factor 0.617506\nI1207 22:39:06.403733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29784 > 2) by scale factor 0.606457\nI1207 22:39:10.582947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56733 > 2) by scale factor 0.779021\nI1207 22:39:14.763136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72122 > 2) by scale factor 0.537458\nI1207 22:39:18.942031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25344 > 2) by scale factor 0.887531\nI1207 22:39:23.121098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49613 > 2) by scale factor 0.801239\nI1207 22:39:27.300510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12869 > 2) by scale factor 0.484415\nI1207 22:39:31.479693  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69262 > 2) by scale factor 0.541621\nI1207 22:39:35.659325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36505 > 2) by scale factor 0.458185\nI1207 22:39:39.838014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19405 > 2) by scale factor 0.476866\nI1207 22:39:44.019539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93953 > 2) by scale factor 0.68038\nI1207 22:39:48.198593  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39763 > 2) by scale factor 0.588645\nI1207 22:39:52.377761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91606 > 2) by scale factor 0.510717\nI1207 22:39:56.557407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32549 > 2) by scale factor 0.601414\nI1207 22:40:00.736865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17128 > 2) by scale factor 0.921115\nI1207 22:40:04.916823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67573 > 2) by scale factor 0.54411\nI1207 22:40:09.096019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54098 > 2) by scale factor 0.440433\nI1207 22:40:13.276393  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64234 > 2) by scale factor 0.756904\nI1207 22:40:17.455862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24327 > 2) by scale factor 0.616662\nI1207 22:40:21.635676  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44769 > 2) by scale factor 0.817098\nI1207 22:40:25.815536  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00848 > 2) by scale factor 0.664788\nI1207 22:40:29.995512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65792 > 2) by scale factor 0.752468\nI1207 22:40:34.175680  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57302 > 2) by scale factor 0.437348\nI1207 22:40:38.355502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33098 > 2) by scale factor 0.461789\nI1207 22:40:42.535331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30464 > 2) by scale factor 0.605209\nI1207 22:40:46.713949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01377 > 2) by scale factor 0.663621\nI1207 22:40:50.893682  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8224 > 2) by scale factor 0.708618\nI1207 22:40:55.072861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24443 > 2) by scale factor 0.891094\nI1207 22:40:59.251260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58541 > 2) by scale factor 0.77357\nI1207 22:41:03.429687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74632 > 2) by scale factor 0.533858\nI1207 22:41:07.608148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29559 > 2) by scale factor 0.871237\nI1207 22:41:15.963503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24661 > 2) by scale factor 0.616026\nI1207 22:41:20.142804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73817 > 2) by scale factor 0.730415\nI1207 22:41:24.322712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11816 > 2) by scale factor 0.944217\nI1207 22:41:32.680127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94597 > 2) by scale factor 0.678893\nI1207 22:41:41.036725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56816 > 2) by scale factor 0.560513\nI1207 22:41:45.215004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24162 > 2) by scale factor 0.471518\nI1207 22:41:49.394268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46454 > 2) by scale factor 0.577277\nI1207 22:41:53.573194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9847 > 2) by scale factor 0.50192\nI1207 22:41:57.752431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26796 > 2) by scale factor 0.612002\nI1207 22:42:01.931411  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29838 > 2) by scale factor 0.87018\nI1207 22:42:06.109977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43225 > 2) by scale factor 0.822284\nI1207 22:42:10.289605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3293 > 2) by scale factor 0.461969\nI1207 22:42:14.469146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36409 > 2) by scale factor 0.845991\nI1207 22:42:18.648099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87262 > 2) by scale factor 0.696228\nI1207 22:42:22.827282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81183 > 2) by scale factor 0.711281\nI1207 22:42:31.183691  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70445 > 2) by scale factor 0.539891\nI1207 22:42:35.364545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31819 > 2) by scale factor 0.463157\nI1207 22:42:39.543352  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24127 > 2) by scale factor 0.617042\nI1207 22:42:43.723083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43827 > 2) by scale factor 0.581687\nI1207 22:42:47.902552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22785 > 2) by scale factor 0.619607\nI1207 22:42:52.082813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80616 > 2) by scale factor 0.712717\nI1207 22:42:56.262282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33863 > 2) by scale factor 0.855203\nI1207 22:43:00.440861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84663 > 2) by scale factor 0.519936\nI1207 22:43:04.620234  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02637 > 2) by scale factor 0.496725\nI1207 22:43:08.800285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28958 > 2) by scale factor 0.873523\nI1207 22:43:12.979802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90413 > 2) by scale factor 0.512278\nI1207 22:43:17.158758  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28532 > 2) by scale factor 0.608768\nI1207 22:43:21.337303  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27422 > 2) by scale factor 0.610833\nI1207 22:43:25.517103  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42791 > 2) by scale factor 0.823755\nI1207 22:43:29.695325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11778 > 2) by scale factor 0.641483\nI1207 22:43:33.874539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07003 > 2) by scale factor 0.651459\nI1207 22:43:38.053571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5337 > 2) by scale factor 0.565979\nI1207 22:43:42.232461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7513 > 2) by scale factor 0.533148\nI1207 22:43:46.411156  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57426 > 2) by scale factor 0.776923\nI1207 22:43:50.590427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23862 > 2) by scale factor 0.617547\nI1207 22:43:54.770462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6936 > 2) by scale factor 0.7425\nI1207 22:43:58.950192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1631 > 2) by scale factor 0.924601\nI1207 22:44:03.129144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77873 > 2) by scale factor 0.719753\nI1207 22:44:11.486215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31603 > 2) by scale factor 0.863548\nI1207 22:44:15.665267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06978 > 2) by scale factor 0.966288\nI1207 22:44:19.843250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51573 > 2) by scale factor 0.794999\nI1207 22:44:24.022301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22109 > 2) by scale factor 0.620907\nI1207 22:44:28.201122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84865 > 2) by scale factor 0.702087\nI1207 22:44:32.380764  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36635 > 2) by scale factor 0.845183\nI1207 22:44:36.560700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67499 > 2) by scale factor 0.747666\nI1207 22:44:40.740255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94292 > 2) by scale factor 0.679597\nI1207 22:44:44.919908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84582 > 2) by scale factor 0.520045\nI1207 22:44:49.099359  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30178 > 2) by scale factor 0.868891\nI1207 22:44:53.288532  1922 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1207 22:47:31.310464  1922 solver.cpp:404]     Test net output #0: accuracy = 0.195177\nI1207 22:47:31.310858  1922 solver.cpp:404]     Test net output #1: loss = 6.08243 (* 1 = 6.08243 loss)\nI1207 22:47:35.253299  1922 solver.cpp:228] Iteration 10700, loss = 6.34541\nI1207 22:47:35.253350  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 22:47:35.253367  1922 solver.cpp:244]     Train net output #1: loss = 6.34541 (* 1 = 6.34541 loss)\nI1207 22:47:35.483769  1922 sgd_solver.cpp:166] Iteration 10700, lr = 1.605\nI1207 22:47:35.493983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38296 > 2) by scale factor 0.839292\nI1207 22:47:39.673179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10593 > 2) by scale factor 0.64393\nI1207 22:47:43.854226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35831 > 2) by scale factor 0.848066\nI1207 22:47:48.035774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00636 > 2) by scale factor 0.996829\nI1207 22:47:52.217437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31499 > 2) by scale factor 0.863933\nI1207 22:47:56.397287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85298 > 2) by scale factor 0.519079\nI1207 22:48:00.577524  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90782 > 2) by scale factor 0.6878\nI1207 22:48:04.757869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42046 > 2) by scale factor 0.584717\nI1207 22:48:08.939429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78143 > 2) by scale factor 0.528901\nI1207 22:48:13.120759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19204 > 2) by scale factor 0.912392\nI1207 22:48:17.301298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23701 > 2) by scale factor 0.617854\nI1207 22:48:21.482547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11238 > 2) by scale factor 0.9468\nI1207 22:48:25.663692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00131 > 2) by scale factor 0.499836\nI1207 22:48:29.844193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95036 > 2) by scale factor 0.506283\nI1207 22:48:34.025768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73179 > 2) by scale factor 0.73212\nI1207 22:48:38.208886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79838 > 2) by scale factor 0.714699\nI1207 22:48:42.389245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39968 > 2) by scale factor 0.833445\nI1207 22:48:46.570318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87061 > 2) by scale factor 0.696716\nI1207 22:48:50.751307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68755 > 2) by scale factor 0.542366\nI1207 22:48:54.932515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3005 > 2) by scale factor 0.605968\nI1207 22:48:59.112670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78805 > 2) by scale factor 0.527976\nI1207 22:49:03.293931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60225 > 2) by scale factor 0.768566\nI1207 22:49:07.474815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07629 > 2) by scale factor 0.650135\nI1207 22:49:11.655689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56202 > 2) by scale factor 0.780634\nI1207 22:49:15.836539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80664 > 2) by scale factor 0.712597\nI1207 22:49:20.017213  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80077 > 2) by scale factor 0.526209\nI1207 22:49:24.198945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41229 > 2) by scale factor 0.45328\nI1207 22:49:28.379541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84115 > 2) by scale factor 0.70394\nI1207 22:49:32.561013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83944 > 2) by scale factor 0.413271\nI1207 22:49:36.743297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57637 > 2) by scale factor 0.437027\nI1207 22:49:40.924198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64525 > 2) by scale factor 0.430547\nI1207 22:49:45.105715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16044 > 2) by scale factor 0.632824\nI1207 22:49:49.285914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55603 > 2) by scale factor 0.782464\nI1207 22:49:53.466671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46161 > 2) by scale factor 0.577766\nI1207 22:49:57.647370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19811 > 2) by scale factor 0.476405\nI1207 22:50:01.827836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19711 > 2) by scale factor 0.625566\nI1207 22:50:06.009289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83034 > 2) by scale factor 0.414049\nI1207 22:50:10.189970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25936 > 2) by scale factor 0.613617\nI1207 22:50:14.371325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00673 > 2) by scale factor 0.665175\nI1207 22:50:18.551832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1773 > 2) by scale factor 0.478778\nI1207 22:50:22.734462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60061 > 2) by scale factor 0.769052\nI1207 22:50:26.916093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97082 > 2) by scale factor 0.503675\nI1207 22:50:31.096191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39573 > 2) by scale factor 0.588974\nI1207 22:50:35.278020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23758 > 2) by scale factor 0.893821\nI1207 22:50:39.459586  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31054 > 2) by scale factor 0.604131\nI1207 22:50:43.639670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22096 > 2) by scale factor 0.620933\nI1207 22:50:47.821096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66404 > 2) by scale factor 0.545845\nI1207 22:50:52.002712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34816 > 2) by scale factor 0.597344\nI1207 22:50:56.184609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9295 > 2) by scale factor 0.682711\nI1207 22:51:00.366390  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29376 > 2) by scale factor 0.607208\nI1207 22:51:04.547325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72805 > 2) by scale factor 0.733123\nI1207 22:51:08.727057  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96538 > 2) by scale factor 0.67445\nI1207 22:51:12.908098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58101 > 2) by scale factor 0.774892\nI1207 22:51:17.088217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26832 > 2) by scale factor 0.468568\nI1207 22:51:21.268201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8236 > 2) by scale factor 0.523067\nI1207 22:51:25.450575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52672 > 2) by scale factor 0.791541\nI1207 22:51:33.810091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07739 > 2) by scale factor 0.649901\nI1207 22:51:37.990170  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24303 > 2) by scale factor 0.616708\nI1207 22:51:42.171634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3062 > 2) by scale factor 0.604925\nI1207 22:51:50.529966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29337 > 2) by scale factor 0.87208\nI1207 22:51:54.710692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01305 > 2) by scale factor 0.66378\nI1207 22:51:58.891116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10631 > 2) by scale factor 0.643852\nI1207 22:52:03.070920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13053 > 2) by scale factor 0.638868\nI1207 22:52:07.251452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20997 > 2) by scale factor 0.904989\nI1207 22:52:11.432200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23621 > 2) by scale factor 0.618008\nI1207 22:52:15.612607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91098 > 2) by scale factor 0.687053\nI1207 22:52:19.791954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44344 > 2) by scale factor 0.818518\nI1207 22:52:23.972178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84466 > 2) by scale factor 0.703071\nI1207 22:52:28.153703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91738 > 2) by scale factor 0.685548\nI1207 22:52:32.333324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69398 > 2) by scale factor 0.742395\nI1207 22:52:36.514019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11941 > 2) by scale factor 0.641147\nI1207 22:52:40.694514  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20513 > 2) by scale factor 0.624\nI1207 22:52:44.873734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46336 > 2) by scale factor 0.8119\nI1207 22:52:53.232161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43604 > 2) by scale factor 0.821005\nI1207 22:52:57.412153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0194 > 2) by scale factor 0.662383\nI1207 22:53:01.593408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10028 > 2) by scale factor 0.952253\nI1207 22:53:05.774125  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69572 > 2) by scale factor 0.741917\nI1207 22:53:09.954516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55041 > 2) by scale factor 0.784187\nI1207 22:53:14.135500  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23943 > 2) by scale factor 0.471762\nI1207 22:53:18.316540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09402 > 2) by scale factor 0.646407\nI1207 22:53:22.496222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02839 > 2) by scale factor 0.660416\nI1207 22:53:26.676849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80737 > 2) by scale factor 0.525297\nI1207 22:53:30.857831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04162 > 2) by scale factor 0.657545\nI1207 22:53:35.038355  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83932 > 2) by scale factor 0.704394\nI1207 22:53:39.218652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39548 > 2) by scale factor 0.834904\nI1207 22:53:43.400092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91766 > 2) by scale factor 0.685481\nI1207 22:53:47.579715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32691 > 2) by scale factor 0.859509\nI1207 22:53:51.759802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70711 > 2) by scale factor 0.539504\nI1207 22:53:55.941246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10322 > 2) by scale factor 0.644492\nI1207 22:54:00.122565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71372 > 2) by scale factor 0.736995\nI1207 22:54:04.304903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18142 > 2) by scale factor 0.916836\nI1207 22:54:08.485654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58486 > 2) by scale factor 0.557902\nI1207 22:54:12.666776  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04948 > 2) by scale factor 0.493891\nI1207 22:54:16.847427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36099 > 2) by scale factor 0.847101\nI1207 22:54:21.028678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55739 > 2) by scale factor 0.562211\nI1207 22:54:25.208202  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46298 > 2) by scale factor 0.577537\nI1207 22:54:29.388527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92937 > 2) by scale factor 0.68274\nI1207 22:54:29.400486  1922 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1207 22:57:07.418325  1922 solver.cpp:404]     Test net output #0: accuracy = 0.193706\nI1207 22:57:07.418745  1922 solver.cpp:404]     Test net output #1: loss = 12.7648 (* 1 = 12.7648 loss)\nI1207 22:57:11.362579  1922 solver.cpp:228] Iteration 10800, loss = 13.229\nI1207 22:57:11.362629  1922 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1207 22:57:11.362648  1922 solver.cpp:244]     Train net output #1: loss = 13.229 (* 1 = 13.229 loss)\nI1207 22:57:11.587641  1922 sgd_solver.cpp:166] Iteration 10800, lr = 1.62\nI1207 22:57:11.597818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66112 > 2) by scale factor 0.751564\nI1207 22:57:15.778164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04051 > 2) by scale factor 0.494987\nI1207 22:57:19.959122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45728 > 2) by scale factor 0.813909\nI1207 22:57:24.139093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66842 > 2) by scale factor 0.545194\nI1207 22:57:28.319666  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6691 > 2) by scale factor 0.749317\nI1207 22:57:32.500342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50802 > 2) by scale factor 0.797442\nI1207 22:57:36.680069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63119 > 2) by scale factor 0.760112\nI1207 22:57:40.860424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0958 > 2) by scale factor 0.954291\nI1207 22:57:45.041287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17128 > 2) by scale factor 0.63066\nI1207 22:57:49.221436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46889 > 2) by scale factor 0.576553\nI1207 22:57:53.401950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17227 > 2) by scale factor 0.630463\nI1207 22:57:57.582517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02749 > 2) by scale factor 0.660614\nI1207 22:58:01.762859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83496 > 2) by scale factor 0.705478\nI1207 22:58:05.942888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17606 > 2) by scale factor 0.629712\nI1207 22:58:10.123595  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20123 > 2) by scale factor 0.476051\nI1207 22:58:14.303223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39311 > 2) by scale factor 0.455259\nI1207 22:58:18.483917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08648 > 2) by scale factor 0.647987\nI1207 22:58:22.664876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11721 > 2) by scale factor 0.485766\nI1207 22:58:26.843804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38442 > 2) by scale factor 0.838777\nI1207 22:58:31.022367  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55011 > 2) by scale factor 0.563362\nI1207 22:58:35.202611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46149 > 2) by scale factor 0.577786\nI1207 22:58:39.380950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80504 > 2) by scale factor 0.525619\nI1207 22:58:43.560673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93685 > 2) by scale factor 0.681002\nI1207 22:58:47.740164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63711 > 2) by scale factor 0.758406\nI1207 22:58:51.920686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40756 > 2) by scale factor 0.830717\nI1207 22:58:56.100690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87891 > 2) by scale factor 0.515609\nI1207 22:59:00.280390  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2087 > 2) by scale factor 0.623305\nI1207 22:59:04.460891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03059 > 2) by scale factor 0.659936\nI1207 22:59:08.641360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35329 > 2) by scale factor 0.849873\nI1207 22:59:12.821568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77224 > 2) by scale factor 0.53019\nI1207 22:59:17.001600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00153 > 2) by scale factor 0.499809\nI1207 22:59:21.182193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5879 > 2) by scale factor 0.772829\nI1207 22:59:25.362479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7856 > 2) by scale factor 0.717979\nI1207 22:59:29.542047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20004 > 2) by scale factor 0.624992\nI1207 22:59:33.722128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20939 > 2) by scale factor 0.623171\nI1207 22:59:37.901497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17929 > 2) by scale factor 0.629071\nI1207 22:59:42.081230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43842 > 2) by scale factor 0.820204\nI1207 22:59:46.261982  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19358 > 2) by scale factor 0.911754\nI1207 22:59:50.441486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75042 > 2) by scale factor 0.727161\nI1207 22:59:58.797216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45198 > 2) by scale factor 0.579377\nI1207 23:00:02.976122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21439 > 2) by scale factor 0.474564\nI1207 23:00:07.155331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61598 > 2) by scale factor 0.553101\nI1207 23:00:11.336239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2063 > 2) by scale factor 0.475477\nI1207 23:00:15.516206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49394 > 2) by scale factor 0.801945\nI1207 23:00:19.696113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81595 > 2) by scale factor 0.710239\nI1207 23:00:23.876217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90274 > 2) by scale factor 0.51246\nI1207 23:00:28.056568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53724 > 2) by scale factor 0.565412\nI1207 23:00:32.236910  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70121 > 2) by scale factor 0.740409\nI1207 23:00:36.417354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62035 > 2) by scale factor 0.552433\nI1207 23:00:40.597162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78953 > 2) by scale factor 0.716966\nI1207 23:00:44.777631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31013 > 2) by scale factor 0.865751\nI1207 23:00:48.957347  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17055 > 2) by scale factor 0.630805\nI1207 23:00:53.137908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2939 > 2) by scale factor 0.607183\nI1207 23:00:57.318537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14262 > 2) by scale factor 0.933435\nI1207 23:01:01.497850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12246 > 2) by scale factor 0.390437\nI1207 23:01:05.677573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51532 > 2) by scale factor 0.568939\nI1207 23:01:09.858491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76598 > 2) by scale factor 0.723071\nI1207 23:01:14.039707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0132 > 2) by scale factor 0.498355\nI1207 23:01:18.220160  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78702 > 2) by scale factor 0.717613\nI1207 23:01:22.400066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64823 > 2) by scale factor 0.430271\nI1207 23:01:26.578270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13674 > 2) by scale factor 0.389352\nI1207 23:01:30.758494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72685 > 2) by scale factor 0.733448\nI1207 23:01:34.939748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98268 > 2) by scale factor 0.502174\nI1207 23:01:39.119966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06859 > 2) by scale factor 0.394587\nI1207 23:01:43.299690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59034 > 2) by scale factor 0.435697\nI1207 23:01:47.479373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16834 > 2) by scale factor 0.631246\nI1207 23:01:51.659314  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21123 > 2) by scale factor 0.904473\nI1207 23:01:55.840205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51259 > 2) by scale factor 0.569381\nI1207 23:02:00.019620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.156 > 2) by scale factor 0.481232\nI1207 23:02:04.200659  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73232 > 2) by scale factor 0.73198\nI1207 23:02:08.382097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36081 > 2) by scale factor 0.595095\nI1207 23:02:12.562276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44415 > 2) by scale factor 0.580694\nI1207 23:02:16.741214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52786 > 2) by scale factor 0.566916\nI1207 23:02:20.920907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34517 > 2) by scale factor 0.597878\nI1207 23:02:25.100392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98436 > 2) by scale factor 0.670161\nI1207 23:02:29.280586  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93507 > 2) by scale factor 0.681415\nI1207 23:02:33.460747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67726 > 2) by scale factor 0.747033\nI1207 23:02:37.640357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73257 > 2) by scale factor 0.535823\nI1207 23:02:41.820838  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08497 > 2) by scale factor 0.648305\nI1207 23:02:46.000447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2346 > 2) by scale factor 0.895013\nI1207 23:02:50.181206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39383 > 2) by scale factor 0.83548\nI1207 23:02:54.360640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5267 > 2) by scale factor 0.791548\nI1207 23:03:06.895560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22885 > 2) by scale factor 0.619415\nI1207 23:03:11.075031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66583 > 2) by scale factor 0.545578\nI1207 23:03:15.255403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48722 > 2) by scale factor 0.573523\nI1207 23:03:19.435542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7274 > 2) by scale factor 0.733299\nI1207 23:03:23.616248  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57008 > 2) by scale factor 0.778185\nI1207 23:03:27.796550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11098 > 2) by scale factor 0.947426\nI1207 23:03:36.153610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0944 > 2) by scale factor 0.954927\nI1207 23:03:40.333708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38854 > 2) by scale factor 0.590224\nI1207 23:03:44.513921  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05031 > 2) by scale factor 0.655671\nI1207 23:03:48.692893  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56057 > 2) by scale factor 0.781077\nI1207 23:03:52.873347  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16017 > 2) by scale factor 0.632877\nI1207 23:03:57.054760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19405 > 2) by scale factor 0.626165\nI1207 23:04:01.233955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84438 > 2) by scale factor 0.703142\nI1207 23:04:05.414919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70587 > 2) by scale factor 0.739133\nI1207 23:04:05.426764  1922 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1207 23:06:42.764451  1922 solver.cpp:404]     Test net output #0: accuracy = 0.126353\nI1207 23:06:42.764828  1922 solver.cpp:404]     Test net output #1: loss = 12.3099 (* 1 = 12.3099 loss)\nI1207 23:06:46.706022  1922 solver.cpp:228] Iteration 10900, loss = 12.6619\nI1207 23:06:46.706073  1922 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 23:06:46.706090  1922 solver.cpp:244]     Train net output #1: loss = 12.6619 (* 1 = 12.6619 loss)\nI1207 23:06:46.933740  1922 sgd_solver.cpp:166] Iteration 10900, lr = 1.635\nI1207 23:06:46.943878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57649 > 2) by scale factor 0.559207\nI1207 23:06:51.124542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29423 > 2) by scale factor 0.465741\nI1207 23:06:55.306084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.869 > 2) by scale factor 0.697108\nI1207 23:06:59.486464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22574 > 2) by scale factor 0.47329\nI1207 23:07:03.668254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32455 > 2) by scale factor 0.601586\nI1207 23:07:07.849400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15018 > 2) by scale factor 0.634884\nI1207 23:07:12.030369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.576 > 2) by scale factor 0.776399\nI1207 23:07:16.210659  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91047 > 2) by scale factor 0.687174\nI1207 23:07:20.392010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2528 > 2) by scale factor 0.614854\nI1207 23:07:24.573024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31532 > 2) by scale factor 0.863811\nI1207 23:07:28.753788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64273 > 2) by scale factor 0.756794\nI1207 23:07:32.935309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91931 > 2) by scale factor 0.685094\nI1207 23:07:37.114748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05138 > 2) by scale factor 0.65544\nI1207 23:07:41.295235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76519 > 2) by scale factor 0.531181\nI1207 23:07:45.477883  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19211 > 2) by scale factor 0.912364\nI1207 23:07:49.660038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64275 > 2) by scale factor 0.756787\nI1207 23:07:53.841109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06906 > 2) by scale factor 0.651665\nI1207 23:07:58.021853  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86309 > 2) by scale factor 0.517721\nI1207 23:08:02.202673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02049 > 2) by scale factor 0.662145\nI1207 23:08:06.383836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18447 > 2) by scale factor 0.915552\nI1207 23:08:10.564628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47022 > 2) by scale factor 0.809643\nI1207 23:08:14.745080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62013 > 2) by scale factor 0.76332\nI1207 23:08:18.925619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3676 > 2) by scale factor 0.844737\nI1207 23:08:23.106942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23331 > 2) by scale factor 0.618561\nI1207 23:08:27.288095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81341 > 2) by scale factor 0.710881\nI1207 23:08:31.468974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70125 > 2) by scale factor 0.540357\nI1207 23:08:35.651023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9274 > 2) by scale factor 0.683201\nI1207 23:08:44.009745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80472 > 2) by scale factor 0.713085\nI1207 23:08:48.191287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70988 > 2) by scale factor 0.73804\nI1207 23:08:52.371585  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38786 > 2) by scale factor 0.590344\nI1207 23:08:56.552763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42067 > 2) by scale factor 0.826216\nI1207 23:09:00.734313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69507 > 2) by scale factor 0.541261\nI1207 23:09:04.914341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1081 > 2) by scale factor 0.64348\nI1207 23:09:09.094630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2015 > 2) by scale factor 0.624708\nI1207 23:09:13.276363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44031 > 2) by scale factor 0.581343\nI1207 23:09:17.457124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6697 > 2) by scale factor 0.749147\nI1207 23:09:21.637238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72968 > 2) by scale factor 0.536239\nI1207 23:09:25.818250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21077 > 2) by scale factor 0.904661\nI1207 23:09:29.997750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93412 > 2) by scale factor 0.681635\nI1207 23:09:34.178236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32839 > 2) by scale factor 0.600892\nI1207 23:09:38.359405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17444 > 2) by scale factor 0.630032\nI1207 23:09:42.540650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22543 > 2) by scale factor 0.620072\nI1207 23:09:46.720743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87742 > 2) by scale factor 0.695067\nI1207 23:09:50.902164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63121 > 2) by scale factor 0.550781\nI1207 23:09:55.082720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28779 > 2) by scale factor 0.466441\nI1207 23:09:59.262888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50506 > 2) by scale factor 0.443945\nI1207 23:10:03.442795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37478 > 2) by scale factor 0.59263\nI1207 23:10:07.623913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28976 > 2) by scale factor 0.873456\nI1207 23:10:11.804911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27737 > 2) by scale factor 0.610246\nI1207 23:10:15.985481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06538 > 2) by scale factor 0.491958\nI1207 23:10:20.166625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03552 > 2) by scale factor 0.658866\nI1207 23:10:24.347649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10138 > 2) by scale factor 0.951754\nI1207 23:10:28.528357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45719 > 2) by scale factor 0.578505\nI1207 23:10:32.708979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13654 > 2) by scale factor 0.483495\nI1207 23:10:36.889240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12628 > 2) by scale factor 0.639739\nI1207 23:10:41.068684  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10102 > 2) by scale factor 0.487684\nI1207 23:10:45.249480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53592 > 2) by scale factor 0.565624\nI1207 23:10:49.430497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0326 > 2) by scale factor 0.659501\nI1207 23:10:53.610769  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60083 > 2) by scale factor 0.768986\nI1207 23:10:57.790740  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5774 > 2) by scale factor 0.775977\nI1207 23:11:01.972429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38981 > 2) by scale factor 0.836886\nI1207 23:11:06.152909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04065 > 2) by scale factor 0.657753\nI1207 23:11:10.333122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8551 > 2) by scale factor 0.518794\nI1207 23:11:14.514129  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07609 > 2) by scale factor 0.650176\nI1207 23:11:18.695150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52958 > 2) by scale factor 0.790646\nI1207 23:11:22.874189  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27361 > 2) by scale factor 0.879659\nI1207 23:11:27.055409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12826 > 2) by scale factor 0.484465\nI1207 23:11:31.234935  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46777 > 2) by scale factor 0.81045\nI1207 23:11:35.415066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89098 > 2) by scale factor 0.691807\nI1207 23:11:39.595734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29235 > 2) by scale factor 0.872469\nI1207 23:11:47.954432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43988 > 2) by scale factor 0.819712\nI1207 23:11:52.134639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35296 > 2) by scale factor 0.596487\nI1207 23:11:56.314816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39013 > 2) by scale factor 0.836774\nI1207 23:12:00.495542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26512 > 2) by scale factor 0.46892\nI1207 23:12:04.675964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08382 > 2) by scale factor 0.648546\nI1207 23:12:08.863759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53509 > 2) by scale factor 0.565757\nI1207 23:12:13.055938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90134 > 2) by scale factor 0.512644\nI1207 23:12:17.248392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39516 > 2) by scale factor 0.589074\nI1207 23:12:21.440547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06334 > 2) by scale factor 0.652882\nI1207 23:12:25.633306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6994 > 2) by scale factor 0.740904\nI1207 23:12:29.825273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42773 > 2) by scale factor 0.823814\nI1207 23:12:34.018260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97876 > 2) by scale factor 0.67142\nI1207 23:12:38.209514  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09395 > 2) by scale factor 0.488526\nI1207 23:12:42.402186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55234 > 2) by scale factor 0.783593\nI1207 23:12:46.594974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77547 > 2) by scale factor 0.720599\nI1207 23:12:50.786598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02898 > 2) by scale factor 0.660288\nI1207 23:12:59.170353  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58634 > 2) by scale factor 0.773292\nI1207 23:13:03.362498  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49249 > 2) by scale factor 0.802411\nI1207 23:13:07.556710  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16919 > 2) by scale factor 0.922005\nI1207 23:13:11.747903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71338 > 2) by scale factor 0.737087\nI1207 23:13:15.939759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6073 > 2) by scale factor 0.767076\nI1207 23:13:20.132135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54946 > 2) by scale factor 0.784481\nI1207 23:13:24.324097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92833 > 2) by scale factor 0.682982\nI1207 23:13:28.517599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40355 > 2) by scale factor 0.832101\nI1207 23:13:32.711287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1307 > 2) by scale factor 0.484179\nI1207 23:13:36.904417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08235 > 2) by scale factor 0.489914\nI1207 23:13:41.096590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05792 > 2) by scale factor 0.492863\nI1207 23:13:41.108466  1922 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1207 23:16:19.034281  1922 solver.cpp:404]     Test net output #0: accuracy = 0.190882\nI1207 23:16:19.034668  1922 solver.cpp:404]     Test net output #1: loss = 10.8364 (* 1 = 10.8364 loss)\nI1207 23:16:22.972223  1922 solver.cpp:228] Iteration 11000, loss = 11.7939\nI1207 23:16:22.972273  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 23:16:22.972291  1922 solver.cpp:244]     Train net output #1: loss = 11.7939 (* 1 = 11.7939 loss)\nI1207 23:16:23.213356  1922 sgd_solver.cpp:166] Iteration 11000, lr = 1.65\nI1207 23:16:23.223598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83706 > 2) by scale factor 0.704955\nI1207 23:16:31.606009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63222 > 2) by scale factor 0.759815\nI1207 23:16:35.796757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5741 > 2) by scale factor 0.437245\nI1207 23:16:39.989454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67425 > 2) by scale factor 0.747872\nI1207 23:16:44.181463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07884 > 2) by scale factor 0.962076\nI1207 23:16:48.372573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05965 > 2) by scale factor 0.65367\nI1207 23:16:52.563480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02968 > 2) by scale factor 0.985375\nI1207 23:16:56.756292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30992 > 2) by scale factor 0.464045\nI1207 23:17:00.948298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28875 > 2) by scale factor 0.608134\nI1207 23:17:05.139127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73735 > 2) by scale factor 0.730634\nI1207 23:17:09.331065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78132 > 2) by scale factor 0.719084\nI1207 23:17:13.523098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5967 > 2) by scale factor 0.770207\nI1207 23:17:21.904731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57881 > 2) by scale factor 0.775553\nI1207 23:17:26.097983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4124 > 2) by scale factor 0.829052\nI1207 23:17:30.289300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35521 > 2) by scale factor 0.849181\nI1207 23:17:34.481443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74852 > 2) by scale factor 0.727665\nI1207 23:17:38.672791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93584 > 2) by scale factor 0.681236\nI1207 23:17:42.863901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36218 > 2) by scale factor 0.594852\nI1207 23:17:47.054946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07477 > 2) by scale factor 0.490825\nI1207 23:17:51.246125  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56244 > 2) by scale factor 0.780506\nI1207 23:17:55.437510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13275 > 2) by scale factor 0.937755\nI1207 23:17:59.628692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2625 > 2) by scale factor 0.883978\nI1207 23:18:03.820381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51638 > 2) by scale factor 0.794792\nI1207 23:18:08.012048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74756 > 2) by scale factor 0.533681\nI1207 23:18:12.204290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20211 > 2) by scale factor 0.908219\nI1207 23:18:20.584980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56023 > 2) by scale factor 0.438575\nI1207 23:18:24.775530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12018 > 2) by scale factor 0.640988\nI1207 23:18:28.966783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17891 > 2) by scale factor 0.478593\nI1207 23:18:33.157363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30202 > 2) by scale factor 0.605689\nI1207 23:18:37.349128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71734 > 2) by scale factor 0.538019\nI1207 23:18:41.540063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19446 > 2) by scale factor 0.476819\nI1207 23:18:45.731367  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90238 > 2) by scale factor 0.689091\nI1207 23:18:49.922374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89666 > 2) by scale factor 0.690449\nI1207 23:18:54.113979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32383 > 2) by scale factor 0.860648\nI1207 23:18:58.304924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63183 > 2) by scale factor 0.759928\nI1207 23:19:02.496244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81569 > 2) by scale factor 0.710304\nI1207 23:19:06.687211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74529 > 2) by scale factor 0.534005\nI1207 23:19:10.878849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02689 > 2) by scale factor 0.986733\nI1207 23:19:15.070847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02694 > 2) by scale factor 0.986709\nI1207 23:19:19.262233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18574 > 2) by scale factor 0.915024\nI1207 23:19:23.452677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03887 > 2) by scale factor 0.495187\nI1207 23:19:27.642710  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49233 > 2) by scale factor 0.572684\nI1207 23:19:31.834322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14215 > 2) by scale factor 0.636508\nI1207 23:19:36.025032  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42843 > 2) by scale factor 0.823577\nI1207 23:19:40.216537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64096 > 2) by scale factor 0.757301\nI1207 23:19:44.407908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3263 > 2) by scale factor 0.859736\nI1207 23:19:48.599833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58871 > 2) by scale factor 0.435853\nI1207 23:19:52.790979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75858 > 2) by scale factor 0.72501\nI1207 23:19:56.982542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.424 > 2) by scale factor 0.584113\nI1207 23:20:01.173207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50305 > 2) by scale factor 0.444144\nI1207 23:20:05.364270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97179 > 2) by scale factor 0.672996\nI1207 23:20:09.555701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34884 > 2) by scale factor 0.597221\nI1207 23:20:13.747014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31346 > 2) by scale factor 0.603599\nI1207 23:20:22.128415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77794 > 2) by scale factor 0.719957\nI1207 23:20:26.320901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71527 > 2) by scale factor 0.736576\nI1207 23:20:30.512912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35351 > 2) by scale factor 0.59639\nI1207 23:20:34.703847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88466 > 2) by scale factor 0.514845\nI1207 23:20:38.894737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5446 > 2) by scale factor 0.564239\nI1207 23:20:43.086127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06372 > 2) by scale factor 0.652801\nI1207 23:20:47.277216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30187 > 2) by scale factor 0.605717\nI1207 23:20:51.469137  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45738 > 2) by scale factor 0.578473\nI1207 23:20:55.660423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24042 > 2) by scale factor 0.892689\nI1207 23:20:59.851297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5549 > 2) by scale factor 0.782808\nI1207 23:21:04.042693  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66085 > 2) by scale factor 0.546321\nI1207 23:21:08.234943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26244 > 2) by scale factor 0.884002\nI1207 23:21:12.426265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39622 > 2) by scale factor 0.58889\nI1207 23:21:16.618230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45678 > 2) by scale factor 0.814074\nI1207 23:21:20.809784  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79673 > 2) by scale factor 0.715122\nI1207 23:21:25.001626  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8142 > 2) by scale factor 0.524357\nI1207 23:21:29.192705  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64461 > 2) by scale factor 0.548756\nI1207 23:21:33.384840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51415 > 2) by scale factor 0.795497\nI1207 23:21:37.575958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19642 > 2) by scale factor 0.910573\nI1207 23:21:41.768963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82351 > 2) by scale factor 0.708338\nI1207 23:21:45.959858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17001 > 2) by scale factor 0.479615\nI1207 23:21:50.150930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18402 > 2) by scale factor 0.915744\nI1207 23:21:54.343857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48818 > 2) by scale factor 0.803799\nI1207 23:21:58.534802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86054 > 2) by scale factor 0.699169\nI1207 23:22:02.726519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19805 > 2) by scale factor 0.476412\nI1207 23:22:06.917845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01775 > 2) by scale factor 0.662744\nI1207 23:22:11.108719  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34706 > 2) by scale factor 0.460081\nI1207 23:22:15.300060  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28917 > 2) by scale factor 0.46629\nI1207 23:22:19.491950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94316 > 2) by scale factor 0.679541\nI1207 23:22:23.682556  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69127 > 2) by scale factor 0.541819\nI1207 23:22:27.873241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18496 > 2) by scale factor 0.477902\nI1207 23:22:32.064741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69142 > 2) by scale factor 0.541796\nI1207 23:22:36.255652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03833 > 2) by scale factor 0.495255\nI1207 23:22:40.446825  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79643 > 2) by scale factor 0.526811\nI1207 23:22:44.637202  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02212 > 2) by scale factor 0.661788\nI1207 23:22:48.827878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07079 > 2) by scale factor 0.651299\nI1207 23:22:53.018734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78525 > 2) by scale factor 0.718069\nI1207 23:22:57.210546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53191 > 2) by scale factor 0.441315\nI1207 23:23:01.400493  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34529 > 2) by scale factor 0.597856\nI1207 23:23:05.591946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37143 > 2) by scale factor 0.59322\nI1207 23:23:09.783032  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81368 > 2) by scale factor 0.710813\nI1207 23:23:13.974385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53812 > 2) by scale factor 0.565272\nI1207 23:23:18.166478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30446 > 2) by scale factor 0.464634\nI1207 23:23:18.178360  1922 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1207 23:25:56.004573  1922 solver.cpp:404]     Test net output #0: accuracy = 0.128588\nI1207 23:25:56.004972  1922 solver.cpp:404]     Test net output #1: loss = 19.8552 (* 1 = 19.8552 loss)\nI1207 23:25:59.942112  1922 solver.cpp:228] Iteration 11100, loss = 21.8465\nI1207 23:25:59.942155  1922 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1207 23:25:59.942173  1922 solver.cpp:244]     Train net output #1: loss = 21.8465 (* 1 = 21.8465 loss)\nI1207 23:26:00.185911  1922 sgd_solver.cpp:166] Iteration 11100, lr = 1.665\nI1207 23:26:00.196043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02322 > 2) by scale factor 0.398151\nI1207 23:26:04.388216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92802 > 2) by scale factor 0.683055\nI1207 23:26:08.581225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94608 > 2) by scale factor 0.506832\nI1207 23:26:12.772795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21551 > 2) by scale factor 0.474439\nI1207 23:26:16.964742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30817 > 2) by scale factor 0.464234\nI1207 23:26:21.156152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35505 > 2) by scale factor 0.459237\nI1207 23:26:25.346976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07481 > 2) by scale factor 0.49082\nI1207 23:26:29.540407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34453 > 2) by scale factor 0.59799\nI1207 23:26:33.732831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54494 > 2) by scale factor 0.564184\nI1207 23:26:37.925866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61965 > 2) by scale factor 0.432933\nI1207 23:26:42.117435  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19684 > 2) by scale factor 0.476549\nI1207 23:26:46.309693  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41403 > 2) by scale factor 0.585818\nI1207 23:26:50.501251  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08278 > 2) by scale factor 0.648765\nI1207 23:26:54.693948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64748 > 2) by scale factor 0.548324\nI1207 23:26:58.885749  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67555 > 2) by scale factor 0.544136\nI1207 23:27:03.077124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97505 > 2) by scale factor 0.503139\nI1207 23:27:07.268754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66208 > 2) by scale factor 0.751292\nI1207 23:27:11.460505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18072 > 2) by scale factor 0.628788\nI1207 23:27:15.652951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73755 > 2) by scale factor 0.73058\nI1207 23:27:19.843557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20274 > 2) by scale factor 0.907962\nI1207 23:27:24.034865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76933 > 2) by scale factor 0.722196\nI1207 23:27:28.225503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01961 > 2) by scale factor 0.49756\nI1207 23:27:32.416240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77153 > 2) by scale factor 0.530289\nI1207 23:27:36.607780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54322 > 2) by scale factor 0.786406\nI1207 23:27:40.800514  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44911 > 2) by scale factor 0.816623\nI1207 23:27:44.993093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72194 > 2) by scale factor 0.537355\nI1207 23:27:49.185708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66263 > 2) by scale factor 0.546055\nI1207 23:27:53.378024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03067 > 2) by scale factor 0.496195\nI1207 23:27:57.570082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76543 > 2) by scale factor 0.723215\nI1207 23:28:05.951669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67404 > 2) by scale factor 0.747931\nI1207 23:28:10.142128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00496 > 2) by scale factor 0.49938\nI1207 23:28:14.334159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21658 > 2) by scale factor 0.90229\nI1207 23:28:18.527195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44729 > 2) by scale factor 0.817232\nI1207 23:28:22.719352  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01973 > 2) by scale factor 0.66231\nI1207 23:28:26.911039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19696 > 2) by scale factor 0.625594\nI1207 23:28:31.102749  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37904 > 2) by scale factor 0.591885\nI1207 23:28:35.295047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92053 > 2) by scale factor 0.510136\nI1207 23:28:39.487478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04332 > 2) by scale factor 0.494643\nI1207 23:28:43.679091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94726 > 2) by scale factor 0.50668\nI1207 23:28:47.871510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91899 > 2) by scale factor 0.685169\nI1207 23:28:52.063422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4056 > 2) by scale factor 0.831394\nI1207 23:28:56.254526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43354 > 2) by scale factor 0.821849\nI1207 23:29:00.447353  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99024 > 2) by scale factor 0.501222\nI1207 23:29:04.639103  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24832 > 2) by scale factor 0.889554\nI1207 23:29:08.830297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13734 > 2) by scale factor 0.637483\nI1207 23:29:13.021713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16947 > 2) by scale factor 0.63102\nI1207 23:29:17.213304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39957 > 2) by scale factor 0.58831\nI1207 23:29:21.405966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0152 > 2) by scale factor 0.663305\nI1207 23:29:25.596357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65896 > 2) by scale factor 0.546604\nI1207 23:29:29.788318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95907 > 2) by scale factor 0.505169\nI1207 23:29:33.979590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52642 > 2) by scale factor 0.567147\nI1207 23:29:38.171188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54183 > 2) by scale factor 0.56468\nI1207 23:29:42.364217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70929 > 2) by scale factor 0.7382\nI1207 23:29:46.556304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14731 > 2) by scale factor 0.48224\nI1207 23:29:50.747684  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36465 > 2) by scale factor 0.594415\nI1207 23:29:54.941712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6429 > 2) by scale factor 0.756746\nI1207 23:29:59.133333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29781 > 2) by scale factor 0.606462\nI1207 23:30:03.325026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27058 > 2) by scale factor 0.611512\nI1207 23:30:07.516641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45133 > 2) by scale factor 0.815883\nI1207 23:30:11.708997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49888 > 2) by scale factor 0.800358\nI1207 23:30:15.900928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38255 > 2) by scale factor 0.591269\nI1207 23:30:20.092095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04342 > 2) by scale factor 0.978752\nI1207 23:30:24.283687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90791 > 2) by scale factor 0.511782\nI1207 23:30:28.476657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66769 > 2) by scale factor 0.749712\nI1207 23:30:32.670313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10387 > 2) by scale factor 0.644358\nI1207 23:30:36.861954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4176 > 2) by scale factor 0.827268\nI1207 23:30:41.054283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0925 > 2) by scale factor 0.646725\nI1207 23:30:45.245364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92364 > 2) by scale factor 0.684078\nI1207 23:30:49.436940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06815 > 2) by scale factor 0.96705\nI1207 23:30:53.628479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17355 > 2) by scale factor 0.920155\nI1207 23:30:57.819465  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33217 > 2) by scale factor 0.60021\nI1207 23:31:06.200906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00396 > 2) by scale factor 0.665787\nI1207 23:31:10.393390  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59707 > 2) by scale factor 0.556008\nI1207 23:31:14.584069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84801 > 2) by scale factor 0.702245\nI1207 23:31:18.776017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15323 > 2) by scale factor 0.634271\nI1207 23:31:22.966902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9237 > 2) by scale factor 0.509723\nI1207 23:31:27.157634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14699 > 2) by scale factor 0.931538\nI1207 23:31:31.348505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98456 > 2) by scale factor 0.670115\nI1207 23:31:35.540863  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06443 > 2) by scale factor 0.968788\nI1207 23:31:39.732769  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2732 > 2) by scale factor 0.879816\nI1207 23:31:43.924926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13821 > 2) by scale factor 0.483301\nI1207 23:31:48.116519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80278 > 2) by scale factor 0.416425\nI1207 23:31:52.308377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57055 > 2) by scale factor 0.560137\nI1207 23:31:56.498948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5029 > 2) by scale factor 0.570955\nI1207 23:32:00.690621  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7059 > 2) by scale factor 0.539679\nI1207 23:32:04.881484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42636 > 2) by scale factor 0.58371\nI1207 23:32:09.073297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84139 > 2) by scale factor 0.520646\nI1207 23:32:13.265360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57763 > 2) by scale factor 0.436907\nI1207 23:32:17.457406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70216 > 2) by scale factor 0.740149\nI1207 23:32:21.648532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14403 > 2) by scale factor 0.482622\nI1207 23:32:25.838836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44684 > 2) by scale factor 0.580242\nI1207 23:32:30.030555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78477 > 2) by scale factor 0.528434\nI1207 23:32:34.221494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49553 > 2) by scale factor 0.57216\nI1207 23:32:38.412642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92151 > 2) by scale factor 0.510008\nI1207 23:32:42.604270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30706 > 2) by scale factor 0.866905\nI1207 23:32:50.985788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71347 > 2) by scale factor 0.53858\nI1207 23:32:55.177456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81249 > 2) by scale factor 0.711113\nI1207 23:32:55.189328  1922 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1207 23:35:33.121616  1922 solver.cpp:404]     Test net output #0: accuracy = 0.138059\nI1207 23:35:33.122035  1922 solver.cpp:404]     Test net output #1: loss = 11.4764 (* 1 = 11.4764 loss)\nI1207 23:35:37.061286  1922 solver.cpp:228] Iteration 11200, loss = 12.3505\nI1207 23:35:37.061337  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 23:35:37.061363  1922 solver.cpp:244]     Train net output #1: loss = 12.3505 (* 1 = 12.3505 loss)\nI1207 23:35:37.305544  1922 sgd_solver.cpp:166] Iteration 11200, lr = 1.68\nI1207 23:35:37.315692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14523 > 2) by scale factor 0.635885\nI1207 23:35:41.509397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99835 > 2) by scale factor 0.667033\nI1207 23:35:45.702497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31164 > 2) by scale factor 0.865188\nI1207 23:35:49.895292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40896 > 2) by scale factor 0.58669\nI1207 23:35:54.089058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84192 > 2) by scale factor 0.703751\nI1207 23:35:58.281522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70675 > 2) by scale factor 0.539555\nI1207 23:36:02.474272  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6422 > 2) by scale factor 0.549118\nI1207 23:36:06.667362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6573 > 2) by scale factor 0.752643\nI1207 23:36:10.860183  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09231 > 2) by scale factor 0.646766\nI1207 23:36:15.053654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77037 > 2) by scale factor 0.721925\nI1207 23:36:19.247067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36397 > 2) by scale factor 0.846034\nI1207 23:36:23.439131  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89647 > 2) by scale factor 0.690497\nI1207 23:36:27.631222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02787 > 2) by scale factor 0.986256\nI1207 23:36:31.825335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33871 > 2) by scale factor 0.855172\nI1207 23:36:36.018247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60426 > 2) by scale factor 0.767972\nI1207 23:36:40.210760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99599 > 2) by scale factor 0.500501\nI1207 23:36:44.403564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57873 > 2) by scale factor 0.775576\nI1207 23:36:48.596210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64254 > 2) by scale factor 0.549068\nI1207 23:36:56.979293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21133 > 2) by scale factor 0.622796\nI1207 23:37:05.364017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84853 > 2) by scale factor 0.702115\nI1207 23:37:09.556721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2813 > 2) by scale factor 0.609514\nI1207 23:37:13.748927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77594 > 2) by scale factor 0.720475\nI1207 23:37:17.941459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13774 > 2) by scale factor 0.637401\nI1207 23:37:22.133491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50916 > 2) by scale factor 0.79708\nI1207 23:37:26.326155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65827 > 2) by scale factor 0.752369\nI1207 23:37:30.519343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09426 > 2) by scale factor 0.954993\nI1207 23:37:34.712610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43227 > 2) by scale factor 0.582704\nI1207 23:37:43.094802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16631 > 2) by scale factor 0.631651\nI1207 23:37:47.287955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46904 > 2) by scale factor 0.810032\nI1207 23:37:51.480814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93753 > 2) by scale factor 0.680845\nI1207 23:37:55.672929  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71616 > 2) by scale factor 0.736333\nI1207 23:37:59.866019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26189 > 2) by scale factor 0.613141\nI1207 23:38:04.057647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80544 > 2) by scale factor 0.525563\nI1207 23:38:08.250708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39596 > 2) by scale factor 0.588934\nI1207 23:38:12.442632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44726 > 2) by scale factor 0.449715\nI1207 23:38:16.634529  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29499 > 2) by scale factor 0.606981\nI1207 23:38:20.827958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58573 > 2) by scale factor 0.557766\nI1207 23:38:25.021701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94104 > 2) by scale factor 0.680031\nI1207 23:38:29.213148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06415 > 2) by scale factor 0.492108\nI1207 23:38:33.406090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02129 > 2) by scale factor 0.497353\nI1207 23:38:37.598414  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75553 > 2) by scale factor 0.725812\nI1207 23:38:41.790323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18295 > 2) by scale factor 0.478131\nI1207 23:38:45.983542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77606 > 2) by scale factor 0.418755\nI1207 23:38:50.175814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66595 > 2) by scale factor 0.750202\nI1207 23:38:54.366515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35507 > 2) by scale factor 0.596113\nI1207 23:38:58.558094  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67883 > 2) by scale factor 0.543651\nI1207 23:39:02.749955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41289 > 2) by scale factor 0.586014\nI1207 23:39:06.942744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47858 > 2) by scale factor 0.574948\nI1207 23:39:11.133569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85949 > 2) by scale factor 0.699424\nI1207 23:39:15.325826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82819 > 2) by scale factor 0.707167\nI1207 23:39:19.518914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28932 > 2) by scale factor 0.873622\nI1207 23:39:23.712630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62168 > 2) by scale factor 0.55223\nI1207 23:39:27.905304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98604 > 2) by scale factor 0.669783\nI1207 23:39:32.099342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27884 > 2) by scale factor 0.609973\nI1207 23:39:36.290673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08447 > 2) by scale factor 0.959477\nI1207 23:39:44.673243  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41837 > 2) by scale factor 0.827005\nI1207 23:39:48.865293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92222 > 2) by scale factor 0.509915\nI1207 23:39:53.058600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53609 > 2) by scale factor 0.565596\nI1207 23:39:57.250769  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49363 > 2) by scale factor 0.802043\nI1207 23:40:01.443258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19237 > 2) by scale factor 0.626494\nI1207 23:40:05.635511  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48253 > 2) by scale factor 0.805629\nI1207 23:40:09.828119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87942 > 2) by scale factor 0.515541\nI1207 23:40:14.021140  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05261 > 2) by scale factor 0.493509\nI1207 23:40:18.212393  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90179 > 2) by scale factor 0.512586\nI1207 23:40:22.405037  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86516 > 2) by scale factor 0.517443\nI1207 23:40:26.596686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67867 > 2) by scale factor 0.543674\nI1207 23:40:30.789028  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3899 > 2) by scale factor 0.836856\nI1207 23:40:34.982296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68505 > 2) by scale factor 0.744864\nI1207 23:40:39.175108  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11961 > 2) by scale factor 0.943572\nI1207 23:40:43.367501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12445 > 2) by scale factor 0.640113\nI1207 23:40:47.560597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24005 > 2) by scale factor 0.892837\nI1207 23:40:51.753788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21044 > 2) by scale factor 0.622968\nI1207 23:40:55.946223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24196 > 2) by scale factor 0.892077\nI1207 23:41:00.138236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38463 > 2) by scale factor 0.838705\nI1207 23:41:04.329977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35148 > 2) by scale factor 0.596751\nI1207 23:41:08.523401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15592 > 2) by scale factor 0.633729\nI1207 23:41:12.716332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34296 > 2) by scale factor 0.853621\nI1207 23:41:16.907701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08362 > 2) by scale factor 0.959867\nI1207 23:41:21.099270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44581 > 2) by scale factor 0.580415\nI1207 23:41:25.291594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80387 > 2) by scale factor 0.7133\nI1207 23:41:29.483786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71927 > 2) by scale factor 0.537739\nI1207 23:41:33.676892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59761 > 2) by scale factor 0.769939\nI1207 23:41:37.868821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8123 > 2) by scale factor 0.711161\nI1207 23:41:42.061319  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38776 > 2) by scale factor 0.837606\nI1207 23:41:46.253598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57916 > 2) by scale factor 0.558791\nI1207 23:41:50.446090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29916 > 2) by scale factor 0.606215\nI1207 23:41:54.638258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91431 > 2) by scale factor 0.686269\nI1207 23:41:58.830063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82136 > 2) by scale factor 0.708877\nI1207 23:42:03.021803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1901 > 2) by scale factor 0.477316\nI1207 23:42:07.213258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53618 > 2) by scale factor 0.788587\nI1207 23:42:11.404373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78583 > 2) by scale factor 0.717919\nI1207 23:42:15.597754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84746 > 2) by scale factor 0.702381\nI1207 23:42:19.791261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06988 > 2) by scale factor 0.651492\nI1207 23:42:23.984257  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14745 > 2) by scale factor 0.482224\nI1207 23:42:28.178014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.629 > 2) by scale factor 0.760746\nI1207 23:42:32.370036  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04718 > 2) by scale factor 0.494171\nI1207 23:42:32.382030  1922 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1207 23:45:10.313813  1922 solver.cpp:404]     Test net output #0: accuracy = 0.119118\nI1207 23:45:10.314225  1922 solver.cpp:404]     Test net output #1: loss = 14.6449 (* 1 = 14.6449 loss)\nI1207 23:45:14.254071  1922 solver.cpp:228] Iteration 11300, loss = 13.8449\nI1207 23:45:14.254122  1922 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 23:45:14.254148  1922 solver.cpp:244]     Train net output #1: loss = 13.8449 (* 1 = 13.8449 loss)\nI1207 23:45:14.493814  1922 sgd_solver.cpp:166] Iteration 11300, lr = 1.695\nI1207 23:45:14.504046  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37116 > 2) by scale factor 0.457544\nI1207 23:45:18.694907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47335 > 2) by scale factor 0.447093\nI1207 23:45:22.885876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74179 > 2) by scale factor 0.534504\nI1207 23:45:27.076020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20295 > 2) by scale factor 0.624425\nI1207 23:45:31.266814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70006 > 2) by scale factor 0.540532\nI1207 23:45:35.457754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7027 > 2) by scale factor 0.740001\nI1207 23:45:43.838704  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58636 > 2) by scale factor 0.557668\nI1207 23:45:48.031150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51555 > 2) by scale factor 0.795056\nI1207 23:45:52.222770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03708 > 2) by scale factor 0.495408\nI1207 23:45:56.412550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01566 > 2) by scale factor 0.99223\nI1207 23:46:00.603754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14514 > 2) by scale factor 0.932338\nI1207 23:46:04.794963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12576 > 2) by scale factor 0.484759\nI1207 23:46:08.985694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67013 > 2) by scale factor 0.54494\nI1207 23:46:13.176461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92263 > 2) by scale factor 0.684316\nI1207 23:46:17.367808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85987 > 2) by scale factor 0.518152\nI1207 23:46:25.748323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9694 > 2) by scale factor 0.673537\nI1207 23:46:29.938601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69459 > 2) by scale factor 0.541333\nI1207 23:46:34.130362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08835 > 2) by scale factor 0.647595\nI1207 23:46:38.320471  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76717 > 2) by scale factor 0.530903\nI1207 23:46:42.511168  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68531 > 2) by scale factor 0.744794\nI1207 23:46:46.702334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93215 > 2) by scale factor 0.682094\nI1207 23:46:50.894052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19328 > 2) by scale factor 0.626315\nI1207 23:46:55.084887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83717 > 2) by scale factor 0.704929\nI1207 23:46:59.277212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85663 > 2) by scale factor 0.700126\nI1207 23:47:03.468154  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57248 > 2) by scale factor 0.777459\nI1207 23:47:07.660075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.683 > 2) by scale factor 0.543036\nI1207 23:47:11.850188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82639 > 2) by scale factor 0.707616\nI1207 23:47:16.041891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44837 > 2) by scale factor 0.81687\nI1207 23:47:20.233299  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64057 > 2) by scale factor 0.549365\nI1207 23:47:24.424612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31687 > 2) by scale factor 0.463299\nI1207 23:47:28.616654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62639 > 2) by scale factor 0.432302\nI1207 23:47:32.808943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16714 > 2) by scale factor 0.922877\nI1207 23:47:37.000263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07479 > 2) by scale factor 0.650452\nI1207 23:47:41.189224  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38427 > 2) by scale factor 0.590969\nI1207 23:47:45.368297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38412 > 2) by scale factor 0.590995\nI1207 23:47:49.548454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06117 > 2) by scale factor 0.970322\nI1207 23:47:53.728410  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12619 > 2) by scale factor 0.639756\nI1207 23:47:57.908452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77414 > 2) by scale factor 0.720944\nI1207 23:48:02.088879  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.363 > 2) by scale factor 0.846381\nI1207 23:48:06.268661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06656 > 2) by scale factor 0.652196\nI1207 23:48:10.446935  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91134 > 2) by scale factor 0.686969\nI1207 23:48:14.625530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5001 > 2) by scale factor 0.571412\nI1207 23:48:18.804963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1707 > 2) by scale factor 0.630775\nI1207 23:48:22.983872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92006 > 2) by scale factor 0.684917\nI1207 23:48:27.163671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29404 > 2) by scale factor 0.465761\nI1207 23:48:31.343106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06379 > 2) by scale factor 0.652787\nI1207 23:48:35.522861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5326 > 2) by scale factor 0.566155\nI1207 23:48:39.702747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95108 > 2) by scale factor 0.677717\nI1207 23:48:43.882009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38529 > 2) by scale factor 0.838472\nI1207 23:48:48.061972  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24152 > 2) by scale factor 0.471529\nI1207 23:48:52.240921  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0626 > 2) by scale factor 0.492296\nI1207 23:48:56.419598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11969 > 2) by scale factor 0.64109\nI1207 23:49:00.598439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97472 > 2) by scale factor 0.672332\nI1207 23:49:04.777979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87696 > 2) by scale factor 0.695179\nI1207 23:49:08.957530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84992 > 2) by scale factor 0.519491\nI1207 23:49:13.136260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02364 > 2) by scale factor 0.988318\nI1207 23:49:17.314878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22034 > 2) by scale factor 0.621052\nI1207 23:49:21.493167  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44562 > 2) by scale factor 0.817787\nI1207 23:49:25.672808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46176 > 2) by scale factor 0.812427\nI1207 23:49:29.852314  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72706 > 2) by scale factor 0.73339\nI1207 23:49:34.032191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93838 > 2) by scale factor 0.680647\nI1207 23:49:38.210893  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21017 > 2) by scale factor 0.623021\nI1207 23:49:42.391824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66304 > 2) by scale factor 0.428905\nI1207 23:49:46.570667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05708 > 2) by scale factor 0.492966\nI1207 23:49:50.749608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24885 > 2) by scale factor 0.615603\nI1207 23:49:54.928822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32558 > 2) by scale factor 0.601399\nI1207 23:49:59.108330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71426 > 2) by scale factor 0.73685\nI1207 23:50:07.464223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84522 > 2) by scale factor 0.702933\nI1207 23:50:11.643896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55254 > 2) by scale factor 0.783532\nI1207 23:50:15.823431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53648 > 2) by scale factor 0.788495\nI1207 23:50:20.001833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5669 > 2) by scale factor 0.779149\nI1207 23:50:24.180768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33903 > 2) by scale factor 0.855057\nI1207 23:50:28.360414  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16875 > 2) by scale factor 0.92219\nI1207 23:50:32.539192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58976 > 2) by scale factor 0.55714\nI1207 23:50:36.718605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71927 > 2) by scale factor 0.53774\nI1207 23:50:40.897644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28161 > 2) by scale factor 0.876576\nI1207 23:50:45.075387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79303 > 2) by scale factor 0.716069\nI1207 23:50:49.255250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14708 > 2) by scale factor 0.931499\nI1207 23:50:53.433212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21142 > 2) by scale factor 0.904397\nI1207 23:50:57.612677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12329 > 2) by scale factor 0.941933\nI1207 23:51:01.791702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61244 > 2) by scale factor 0.553642\nI1207 23:51:05.970703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27957 > 2) by scale factor 0.609835\nI1207 23:51:10.149539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1934 > 2) by scale factor 0.911828\nI1207 23:51:14.329529  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86431 > 2) by scale factor 0.698248\nI1207 23:51:18.509032  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18063 > 2) by scale factor 0.917167\nI1207 23:51:22.689379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2279 > 2) by scale factor 0.619598\nI1207 23:51:26.868602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70776 > 2) by scale factor 0.539409\nI1207 23:51:31.047516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54345 > 2) by scale factor 0.786333\nI1207 23:51:39.403398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42439 > 2) by scale factor 0.824949\nI1207 23:51:43.582070  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95999 > 2) by scale factor 0.505052\nI1207 23:51:47.760892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70607 > 2) by scale factor 0.539655\nI1207 23:51:51.939599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29574 > 2) by scale factor 0.606844\nI1207 23:51:56.119720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20766 > 2) by scale factor 0.475323\nI1207 23:52:00.297598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04932 > 2) by scale factor 0.655884\nI1207 23:52:04.475247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07551 > 2) by scale factor 0.490736\nI1207 23:52:08.653827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20278 > 2) by scale factor 0.475875\nI1207 23:52:08.665837  1922 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1207 23:54:46.537315  1922 solver.cpp:404]     Test net output #0: accuracy = 0.164647\nI1207 23:54:46.537756  1922 solver.cpp:404]     Test net output #1: loss = 18.0692 (* 1 = 18.0692 loss)\nI1207 23:54:50.477177  1922 solver.cpp:228] Iteration 11400, loss = 16.6468\nI1207 23:54:50.477228  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 23:54:50.477253  1922 solver.cpp:244]     Train net output #1: loss = 16.6468 (* 1 = 16.6468 loss)\nI1207 23:54:50.707701  1922 sgd_solver.cpp:166] Iteration 11400, lr = 1.71\nI1207 23:54:50.717968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02335 > 2) by scale factor 0.497098\nI1207 23:54:54.899907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62095 > 2) by scale factor 0.763082\nI1207 23:54:59.082350  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97547 > 2) by scale factor 0.672163\nI1207 23:55:03.263509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56629 > 2) by scale factor 0.560807\nI1207 23:55:07.443789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66322 > 2) by scale factor 0.545968\nI1207 23:55:11.626832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80288 > 2) by scale factor 0.525918\nI1207 23:55:15.806880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44931 > 2) by scale factor 0.816556\nI1207 23:55:19.988106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47474 > 2) by scale factor 0.575583\nI1207 23:55:24.170451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00792 > 2) by scale factor 0.499012\nI1207 23:55:28.351342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39892 > 2) by scale factor 0.833709\nI1207 23:55:32.532461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14202 > 2) by scale factor 0.482856\nI1207 23:55:36.714829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54261 > 2) by scale factor 0.786593\nI1207 23:55:40.895982  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21361 > 2) by scale factor 0.474653\nI1207 23:55:45.075515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92432 > 2) by scale factor 0.509643\nI1207 23:55:49.256433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38576 > 2) by scale factor 0.838307\nI1207 23:55:53.437021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99043 > 2) by scale factor 0.668801\nI1207 23:55:57.618464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8175 > 2) by scale factor 0.70985\nI1207 23:56:01.799516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33078 > 2) by scale factor 0.60046\nI1207 23:56:05.979226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42074 > 2) by scale factor 0.584669\nI1207 23:56:10.158666  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49612 > 2) by scale factor 0.572062\nI1207 23:56:14.340657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41126 > 2) by scale factor 0.586294\nI1207 23:56:18.520323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77509 > 2) by scale factor 0.720698\nI1207 23:56:22.699703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61966 > 2) by scale factor 0.763457\nI1207 23:56:26.880491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72637 > 2) by scale factor 0.733576\nI1207 23:56:31.062083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48856 > 2) by scale factor 0.445578\nI1207 23:56:35.242933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93691 > 2) by scale factor 0.680989\nI1207 23:56:39.423346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51814 > 2) by scale factor 0.794239\nI1207 23:56:43.604650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0911 > 2) by scale factor 0.488867\nI1207 23:56:47.785259  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02068 > 2) by scale factor 0.662102\nI1207 23:56:51.965436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07253 > 2) by scale factor 0.491096\nI1207 23:56:56.146201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18049 > 2) by scale factor 0.628833\nI1207 23:57:00.327518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24373 > 2) by scale factor 0.891374\nI1207 23:57:04.508394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81824 > 2) by scale factor 0.523802\nI1207 23:57:08.689532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97077 > 2) by scale factor 0.503681\nI1207 23:57:12.870210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50842 > 2) by scale factor 0.797315\nI1207 23:57:17.051050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66096 > 2) by scale factor 0.751609\nI1207 23:57:21.232564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35942 > 2) by scale factor 0.847667\nI1207 23:57:25.413494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49292 > 2) by scale factor 0.445144\nI1207 23:57:29.593643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99364 > 2) by scale factor 0.668084\nI1207 23:57:33.774474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0763 > 2) by scale factor 0.490641\nI1207 23:57:37.956092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99266 > 2) by scale factor 0.500919\nI1207 23:57:42.136181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26401 > 2) by scale factor 0.612744\nI1207 23:57:46.317006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71339 > 2) by scale factor 0.538592\nI1207 23:57:50.497478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95357 > 2) by scale factor 0.505872\nI1207 23:57:54.677711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83942 > 2) by scale factor 0.520912\nI1207 23:57:58.858625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21555 > 2) by scale factor 0.621978\nI1207 23:58:03.040442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15108 > 2) by scale factor 0.634702\nI1207 23:58:07.220794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29903 > 2) by scale factor 0.465221\nI1207 23:58:11.403290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17803 > 2) by scale factor 0.62932\nI1207 23:58:15.585942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93992 > 2) by scale factor 0.507625\nI1207 23:58:19.767449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94182 > 2) by scale factor 0.679851\nI1207 23:58:23.948200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88509 > 2) by scale factor 0.514789\nI1207 23:58:28.129878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67632 > 2) by scale factor 0.544022\nI1207 23:58:32.309662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99711 > 2) by scale factor 0.667309\nI1207 23:58:36.490325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41104 > 2) by scale factor 0.829517\nI1207 23:58:40.671612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13155 > 2) by scale factor 0.48408\nI1207 23:58:44.853489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72811 > 2) by scale factor 0.423002\nI1207 23:58:49.034225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63516 > 2) by scale factor 0.550182\nI1207 23:58:53.215603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.89823 > 2) by scale factor 0.408311\nI1207 23:58:57.396314  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54787 > 2) by scale factor 0.78497\nI1207 23:59:01.576911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13912 > 2) by scale factor 0.934965\nI1207 23:59:05.759227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11556 > 2) by scale factor 0.64194\nI1207 23:59:09.938887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95231 > 2) by scale factor 0.677437\nI1207 23:59:14.120364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17437 > 2) by scale factor 0.630046\nI1207 23:59:18.301261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29517 > 2) by scale factor 0.871397\nI1207 23:59:22.481065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95185 > 2) by scale factor 0.677542\nI1207 23:59:26.660785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3217 > 2) by scale factor 0.602102\nI1207 23:59:30.841348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13935 > 2) by scale factor 0.934863\nI1207 23:59:35.022640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55973 > 2) by scale factor 0.561841\nI1207 23:59:39.203184  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79037 > 2) by scale factor 0.71675\nI1207 23:59:43.383833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30792 > 2) by scale factor 0.86658\nI1207 23:59:47.564771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56091 > 2) by scale factor 0.561654\nI1207 23:59:51.744884  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54585 > 2) by scale factor 0.785593\nI1207 23:59:55.925801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83542 > 2) by scale factor 0.705364\nI1208 00:00:00.108089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50619 > 2) by scale factor 0.798023\nI1208 00:00:04.288249  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75865 > 2) by scale factor 0.532105\nI1208 00:00:08.468565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24022 > 2) by scale factor 0.471674\nI1208 00:00:12.648468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2449 > 2) by scale factor 0.616351\nI1208 00:00:16.829768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33532 > 2) by scale factor 0.599643\nI1208 00:00:21.009003  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59977 > 2) by scale factor 0.555592\nI1208 00:00:25.190840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82225 > 2) by scale factor 0.708654\nI1208 00:00:29.371402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62591 > 2) by scale factor 0.551586\nI1208 00:00:33.552323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55822 > 2) by scale factor 0.781795\nI1208 00:00:37.732673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1841 > 2) by scale factor 0.62812\nI1208 00:00:41.915519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31665 > 2) by scale factor 0.863317\nI1208 00:00:46.096235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93915 > 2) by scale factor 0.680469\nI1208 00:00:50.276556  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42196 > 2) by scale factor 0.584461\nI1208 00:00:54.457387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48705 > 2) by scale factor 0.804166\nI1208 00:00:58.638316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71003 > 2) by scale factor 0.738\nI1208 00:01:02.818466  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72194 > 2) by scale factor 0.734771\nI1208 00:01:06.999295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34941 > 2) by scale factor 0.597119\nI1208 00:01:11.179309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73577 > 2) by scale factor 0.731057\nI1208 00:01:15.359822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43715 > 2) by scale factor 0.820632\nI1208 00:01:19.541172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79142 > 2) by scale factor 0.71648\nI1208 00:01:23.720816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54423 > 2) by scale factor 0.440118\nI1208 00:01:27.901953  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61376 > 2) by scale factor 0.765181\nI1208 00:01:32.084097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04857 > 2) by scale factor 0.494001\nI1208 00:01:36.263288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43179 > 2) by scale factor 0.582786\nI1208 00:01:40.443379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8216 > 2) by scale factor 0.708819\nI1208 00:01:44.623909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05029 > 2) by scale factor 0.97547\nI1208 00:01:44.635922  1922 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1208 00:04:22.465767  1922 solver.cpp:404]     Test net output #0: accuracy = 0.20953\nI1208 00:04:22.466199  1922 solver.cpp:404]     Test net output #1: loss = 10.4514 (* 1 = 10.4514 loss)\nI1208 00:04:26.407269  1922 solver.cpp:228] Iteration 11500, loss = 10.6059\nI1208 00:04:26.407315  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1208 00:04:26.407341  1922 solver.cpp:244]     Train net output #1: loss = 10.6059 (* 1 = 10.6059 loss)\nI1208 00:04:26.636454  1922 sgd_solver.cpp:166] Iteration 11500, lr = 1.725\nI1208 00:04:30.827019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96219 > 2) by scale factor 0.504771\nI1208 00:04:35.008191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67405 > 2) by scale factor 0.747929\nI1208 00:04:39.191102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56171 > 2) by scale factor 0.780729\nI1208 00:04:43.372424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8309 > 2) by scale factor 0.706489\nI1208 00:04:47.554052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35549 > 2) by scale factor 0.459191\nI1208 00:04:51.737190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1945 > 2) by scale factor 0.911371\nI1208 00:04:55.919384  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22201 > 2) by scale factor 0.62073\nI1208 00:05:00.101069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43211 > 2) by scale factor 0.822333\nI1208 00:05:04.283246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81837 > 2) by scale factor 0.70963\nI1208 00:05:08.464614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81467 > 2) by scale factor 0.710562\nI1208 00:05:12.646157  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14655 > 2) by scale factor 0.931726\nI1208 00:05:16.828779  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05072 > 2) by scale factor 0.975269\nI1208 00:05:21.010973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02957 > 2) by scale factor 0.660159\nI1208 00:05:25.192613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26375 > 2) by scale factor 0.612793\nI1208 00:05:29.375217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07786 > 2) by scale factor 0.490454\nI1208 00:05:33.557188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39356 > 2) by scale factor 0.835576\nI1208 00:05:37.740674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29808 > 2) by scale factor 0.870292\nI1208 00:05:41.922796  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25886 > 2) by scale factor 0.613711\nI1208 00:05:50.284528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3142 > 2) by scale factor 0.864229\nI1208 00:05:54.466552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46653 > 2) by scale factor 0.810856\nI1208 00:05:58.648028  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09893 > 2) by scale factor 0.952864\nI1208 00:06:02.830837  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32074 > 2) by scale factor 0.861796\nI1208 00:06:07.013005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95835 > 2) by scale factor 0.676053\nI1208 00:06:11.195787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69145 > 2) by scale factor 0.743094\nI1208 00:06:15.376461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87403 > 2) by scale factor 0.695886\nI1208 00:06:19.557296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90434 > 2) by scale factor 0.512251\nI1208 00:06:23.740105  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0567 > 2) by scale factor 0.6543\nI1208 00:06:27.922021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72434 > 2) by scale factor 0.734123\nI1208 00:06:32.103847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54971 > 2) by scale factor 0.784404\nI1208 00:06:36.285620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74295 > 2) by scale factor 0.729143\nI1208 00:06:40.468178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60023 > 2) by scale factor 0.769161\nI1208 00:06:44.649271  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49619 > 2) by scale factor 0.80122\nI1208 00:06:48.830006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81721 > 2) by scale factor 0.709922\nI1208 00:06:53.011621  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76142 > 2) by scale factor 0.724266\nI1208 00:06:57.193836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34171 > 2) by scale factor 0.854076\nI1208 00:07:01.374924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54551 > 2) by scale factor 0.564094\nI1208 00:07:05.555938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6515 > 2) by scale factor 0.75429\nI1208 00:07:09.737808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47009 > 2) by scale factor 0.809689\nI1208 00:07:13.919587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0832 > 2) by scale factor 0.96006\nI1208 00:07:18.101333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3948 > 2) by scale factor 0.589137\nI1208 00:07:22.282863  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86752 > 2) by scale factor 0.517128\nI1208 00:07:26.463793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77584 > 2) by scale factor 0.720502\nI1208 00:07:30.646368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35853 > 2) by scale factor 0.45887\nI1208 00:07:34.827458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11708 > 2) by scale factor 0.944697\nI1208 00:07:39.008532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9388 > 2) by scale factor 0.507768\nI1208 00:07:43.190201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64113 > 2) by scale factor 0.54928\nI1208 00:07:47.371691  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06965 > 2) by scale factor 0.65154\nI1208 00:07:51.552794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79059 > 2) by scale factor 0.716693\nI1208 00:07:55.734684  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77193 > 2) by scale factor 0.530233\nI1208 00:07:59.915771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89147 > 2) by scale factor 0.691691\nI1208 00:08:04.096588  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24541 > 2) by scale factor 0.890706\nI1208 00:08:08.277546  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89497 > 2) by scale factor 0.513482\nI1208 00:08:12.458825  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65249 > 2) by scale factor 0.547571\nI1208 00:08:16.640547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08977 > 2) by scale factor 0.647298\nI1208 00:08:20.822237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56843 > 2) by scale factor 0.778685\nI1208 00:08:25.005049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53634 > 2) by scale factor 0.565557\nI1208 00:08:29.186434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81634 > 2) by scale factor 0.524062\nI1208 00:08:33.366513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86536 > 2) by scale factor 0.517417\nI1208 00:08:37.548420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77946 > 2) by scale factor 0.529176\nI1208 00:08:41.729925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60976 > 2) by scale factor 0.554053\nI1208 00:08:45.911553  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01533 > 2) by scale factor 0.663277\nI1208 00:08:50.092888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33995 > 2) by scale factor 0.460835\nI1208 00:08:54.274605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88414 > 2) by scale factor 0.693448\nI1208 00:08:58.456297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71477 > 2) by scale factor 0.538391\nI1208 00:09:02.637864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70966 > 2) by scale factor 0.424659\nI1208 00:09:06.819108  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08261 > 2) by scale factor 0.960335\nI1208 00:09:11.001040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37403 > 2) by scale factor 0.592763\nI1208 00:09:15.181733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48162 > 2) by scale factor 0.574446\nI1208 00:09:19.363401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80339 > 2) by scale factor 0.713422\nI1208 00:09:23.544338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92285 > 2) by scale factor 0.509834\nI1208 00:09:27.726178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52269 > 2) by scale factor 0.567747\nI1208 00:09:31.906889  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76544 > 2) by scale factor 0.723212\nI1208 00:09:36.088136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82728 > 2) by scale factor 0.522564\nI1208 00:09:40.269755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50568 > 2) by scale factor 0.570503\nI1208 00:09:44.450628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36182 > 2) by scale factor 0.846805\nI1208 00:09:48.631433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33394 > 2) by scale factor 0.599891\nI1208 00:09:52.813292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12215 > 2) by scale factor 0.942441\nI1208 00:09:56.994971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96534 > 2) by scale factor 0.674459\nI1208 00:10:01.177901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07897 > 2) by scale factor 0.649567\nI1208 00:10:05.358777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00199 > 2) by scale factor 0.499751\nI1208 00:10:09.540668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76647 > 2) by scale factor 0.531001\nI1208 00:10:13.721515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44315 > 2) by scale factor 0.580863\nI1208 00:10:17.902220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13619 > 2) by scale factor 0.637715\nI1208 00:10:22.083328  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06922 > 2) by scale factor 0.966546\nI1208 00:10:26.264616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08091 > 2) by scale factor 0.490087\nI1208 00:10:30.445287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06154 > 2) by scale factor 0.492425\nI1208 00:10:34.626288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12374 > 2) by scale factor 0.640259\nI1208 00:10:38.807283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76446 > 2) by scale factor 0.72347\nI1208 00:10:42.988468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88035 > 2) by scale factor 0.515417\nI1208 00:10:47.168833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32639 > 2) by scale factor 0.462279\nI1208 00:10:51.349274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7229 > 2) by scale factor 0.537215\nI1208 00:10:55.530334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15881 > 2) by scale factor 0.480906\nI1208 00:10:59.711102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85438 > 2) by scale factor 0.51889\nI1208 00:11:03.892134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63377 > 2) by scale factor 0.550393\nI1208 00:11:08.073406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03498 > 2) by scale factor 0.658982\nI1208 00:11:12.254474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61603 > 2) by scale factor 0.764516\nI1208 00:11:16.435699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63445 > 2) by scale factor 0.550289\nI1208 00:11:20.616917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69273 > 2) by scale factor 0.541605\nI1208 00:11:20.628856  1922 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1208 00:13:58.448650  1922 solver.cpp:404]     Test net output #0: accuracy = 0.187824\nI1208 00:13:58.449089  1922 solver.cpp:404]     Test net output #1: loss = 11.6873 (* 1 = 11.6873 loss)\nI1208 00:14:02.388490  1922 solver.cpp:228] Iteration 11600, loss = 11.586\nI1208 00:14:02.388543  1922 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1208 00:14:02.388568  1922 solver.cpp:244]     Train net output #1: loss = 11.586 (* 1 = 11.586 loss)\nI1208 00:14:02.621214  1922 sgd_solver.cpp:166] Iteration 11600, lr = 1.74\nI1208 00:14:02.631371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56865 > 2) by scale factor 0.778619\nI1208 00:14:06.812227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11877 > 2) by scale factor 0.641279\nI1208 00:14:10.992771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7167 > 2) by scale factor 0.736186\nI1208 00:14:15.173171  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00789 > 2) by scale factor 0.996071\nI1208 00:14:19.353798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84574 > 2) by scale factor 0.702804\nI1208 00:14:23.534793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86448 > 2) by scale factor 0.517534\nI1208 00:14:27.715924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3732 > 2) by scale factor 0.457331\nI1208 00:14:31.897017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6351 > 2) by scale factor 0.550191\nI1208 00:14:36.078579  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01108 > 2) by scale factor 0.664212\nI1208 00:14:40.260021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22881 > 2) by scale factor 0.89734\nI1208 00:14:44.440381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0068 > 2) by scale factor 0.665159\nI1208 00:14:48.621635  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09462 > 2) by scale factor 0.646282\nI1208 00:14:52.802325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52885 > 2) by scale factor 0.566758\nI1208 00:14:56.983281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70269 > 2) by scale factor 0.740004\nI1208 00:15:01.164687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98317 > 2) by scale factor 0.502113\nI1208 00:15:05.345417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05828 > 2) by scale factor 0.653962\nI1208 00:15:09.526068  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67786 > 2) by scale factor 0.746864\nI1208 00:15:13.707132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56796 > 2) by scale factor 0.778829\nI1208 00:15:17.888778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18497 > 2) by scale factor 0.915346\nI1208 00:15:22.069983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30981 > 2) by scale factor 0.86587\nI1208 00:15:26.251833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02357 > 2) by scale factor 0.988355\nI1208 00:15:30.431582  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9452 > 2) by scale factor 0.679071\nI1208 00:15:34.611853  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73654 > 2) by scale factor 0.535254\nI1208 00:15:38.793293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22013 > 2) by scale factor 0.621094\nI1208 00:15:47.152962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71378 > 2) by scale factor 0.736978\nI1208 00:15:51.333312  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03929 > 2) by scale factor 0.658049\nI1208 00:15:59.691046  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13241 > 2) by scale factor 0.937907\nI1208 00:16:03.871281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3937 > 2) by scale factor 0.589328\nI1208 00:16:08.051723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01629 > 2) by scale factor 0.663066\nI1208 00:16:12.232436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95766 > 2) by scale factor 0.676209\nI1208 00:16:16.412425  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58437 > 2) by scale factor 0.557978\nI1208 00:16:20.592789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2953 > 2) by scale factor 0.606925\nI1208 00:16:24.772795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66839 > 2) by scale factor 0.749516\nI1208 00:16:28.952702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44254 > 2) by scale factor 0.818819\nI1208 00:16:33.133623  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17131 > 2) by scale factor 0.630653\nI1208 00:16:37.314258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95393 > 2) by scale factor 0.677064\nI1208 00:16:41.493165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49551 > 2) by scale factor 0.801438\nI1208 00:16:49.850244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69683 > 2) by scale factor 0.541003\nI1208 00:16:54.031179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54458 > 2) by scale factor 0.564242\nI1208 00:16:58.212934  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26967 > 2) by scale factor 0.881185\nI1208 00:17:02.393173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61 > 2) by scale factor 0.766284\nI1208 00:17:06.573474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73037 > 2) by scale factor 0.4228\nI1208 00:17:10.754389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50016 > 2) by scale factor 0.799949\nI1208 00:17:14.934098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11549 > 2) by scale factor 0.945406\nI1208 00:17:19.114346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29386 > 2) by scale factor 0.871891\nI1208 00:17:23.294687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42754 > 2) by scale factor 0.583509\nI1208 00:17:27.475294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16417 > 2) by scale factor 0.632078\nI1208 00:17:31.655236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53034 > 2) by scale factor 0.790408\nI1208 00:17:35.834743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80508 > 2) by scale factor 0.525614\nI1208 00:17:40.014578  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19875 > 2) by scale factor 0.625244\nI1208 00:17:44.195369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19441 > 2) by scale factor 0.626093\nI1208 00:17:48.374750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89212 > 2) by scale factor 0.691534\nI1208 00:17:52.553972  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36766 > 2) by scale factor 0.593885\nI1208 00:17:56.734477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17041 > 2) by scale factor 0.630833\nI1208 00:18:00.914714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05097 > 2) by scale factor 0.65553\nI1208 00:18:05.095213  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85888 > 2) by scale factor 0.699576\nI1208 00:18:09.276850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28185 > 2) by scale factor 0.876481\nI1208 00:18:13.457330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17135 > 2) by scale factor 0.479462\nI1208 00:18:17.637799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92488 > 2) by scale factor 0.50957\nI1208 00:18:21.819396  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44482 > 2) by scale factor 0.818055\nI1208 00:18:26.000123  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84085 > 2) by scale factor 0.704016\nI1208 00:18:30.179397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67405 > 2) by scale factor 0.544359\nI1208 00:18:34.360884  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85653 > 2) by scale factor 0.700149\nI1208 00:18:38.541679  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73508 > 2) by scale factor 0.731241\nI1208 00:18:42.722533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53471 > 2) by scale factor 0.565817\nI1208 00:18:46.903085  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87348 > 2) by scale factor 0.696019\nI1208 00:18:51.083310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70712 > 2) by scale factor 0.539503\nI1208 00:18:55.262642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06912 > 2) by scale factor 0.651652\nI1208 00:18:59.442648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85195 > 2) by scale factor 0.701275\nI1208 00:19:03.624035  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03284 > 2) by scale factor 0.659447\nI1208 00:19:07.804464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58662 > 2) by scale factor 0.77321\nI1208 00:19:11.984210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77773 > 2) by scale factor 0.529418\nI1208 00:19:16.164644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31842 > 2) by scale factor 0.862657\nI1208 00:19:20.345100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43792 > 2) by scale factor 0.82037\nI1208 00:19:24.524673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30394 > 2) by scale factor 0.605338\nI1208 00:19:28.704744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24456 > 2) by scale factor 0.616416\nI1208 00:19:32.884768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06353 > 2) by scale factor 0.652842\nI1208 00:19:37.064018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38641 > 2) by scale factor 0.83808\nI1208 00:19:41.243423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26703 > 2) by scale factor 0.882212\nI1208 00:19:45.423334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99456 > 2) by scale factor 0.667877\nI1208 00:19:49.602644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55332 > 2) by scale factor 0.783295\nI1208 00:19:53.782574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94589 > 2) by scale factor 0.678911\nI1208 00:19:57.964402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6322 > 2) by scale factor 0.550631\nI1208 00:20:02.144191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15251 > 2) by scale factor 0.634416\nI1208 00:20:06.323945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15271 > 2) by scale factor 0.634374\nI1208 00:20:10.504560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81517 > 2) by scale factor 0.710436\nI1208 00:20:14.685250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99543 > 2) by scale factor 0.500572\nI1208 00:20:18.866142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18134 > 2) by scale factor 0.628665\nI1208 00:20:23.045887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64015 > 2) by scale factor 0.757534\nI1208 00:20:27.225860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07657 > 2) by scale factor 0.650074\nI1208 00:20:31.406110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68953 > 2) by scale factor 0.743625\nI1208 00:20:35.586479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83174 > 2) by scale factor 0.706279\nI1208 00:20:39.767235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72284 > 2) by scale factor 0.537224\nI1208 00:20:43.946276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33512 > 2) by scale factor 0.461348\nI1208 00:20:48.126677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70535 > 2) by scale factor 0.53976\nI1208 00:20:52.306190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6757 > 2) by scale factor 0.544114\nI1208 00:20:56.486076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45544 > 2) by scale factor 0.814519\nI1208 00:20:56.498085  1922 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1208 00:23:34.333657  1922 solver.cpp:404]     Test net output #0: accuracy = 0.204\nI1208 00:23:34.334094  1922 solver.cpp:404]     Test net output #1: loss = 12.1762 (* 1 = 12.1762 loss)\nI1208 00:23:38.276338  1922 solver.cpp:228] Iteration 11700, loss = 12.5893\nI1208 00:23:38.276393  1922 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1208 00:23:38.276420  1922 solver.cpp:244]     Train net output #1: loss = 12.5893 (* 1 = 12.5893 loss)\nI1208 00:23:38.500795  1922 sgd_solver.cpp:166] Iteration 11700, lr = 1.755\nI1208 00:23:38.511023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50052 > 2) by scale factor 0.799834\nI1208 00:23:46.863679  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21133 > 2) by scale factor 0.622795\nI1208 00:23:51.040904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17673 > 2) by scale factor 0.629578\nI1208 00:23:55.218986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15443 > 2) by scale factor 0.634029\nI1208 00:23:59.397706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77429 > 2) by scale factor 0.720904\nI1208 00:24:03.576337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11423 > 2) by scale factor 0.486118\nI1208 00:24:07.753453  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80301 > 2) by scale factor 0.71352\nI1208 00:24:11.930017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.229 > 2) by scale factor 0.897264\nI1208 00:24:16.106896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6804 > 2) by scale factor 0.746157\nI1208 00:24:20.283288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35672 > 2) by scale factor 0.59582\nI1208 00:24:24.459048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61588 > 2) by scale factor 0.553116\nI1208 00:24:28.635792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8829 > 2) by scale factor 0.693746\nI1208 00:24:32.813949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53965 > 2) by scale factor 0.565027\nI1208 00:24:41.166232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01994 > 2) by scale factor 0.990129\nI1208 00:24:45.343945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25956 > 2) by scale factor 0.885128\nI1208 00:24:49.520889  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44111 > 2) by scale factor 0.581208\nI1208 00:24:53.698159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00005 > 2) by scale factor 0.666656\nI1208 00:24:57.875304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77657 > 2) by scale factor 0.720314\nI1208 00:25:02.052075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19951 > 2) by scale factor 0.909294\nI1208 00:25:06.229048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66191 > 2) by scale factor 0.546163\nI1208 00:25:10.406646  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12213 > 2) by scale factor 0.640589\nI1208 00:25:14.583297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95888 > 2) by scale factor 0.505194\nI1208 00:25:18.761106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60518 > 2) by scale factor 0.767701\nI1208 00:25:22.938601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0057 > 2) by scale factor 0.665402\nI1208 00:25:27.115618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30513 > 2) by scale factor 0.867628\nI1208 00:25:35.467880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87067 > 2) by scale factor 0.516707\nI1208 00:25:39.646286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18025 > 2) by scale factor 0.917325\nI1208 00:25:43.824043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49178 > 2) by scale factor 0.572774\nI1208 00:25:48.000039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05719 > 2) by scale factor 0.654196\nI1208 00:25:52.177649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75718 > 2) by scale factor 0.725379\nI1208 00:25:56.354660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81761 > 2) by scale factor 0.709822\nI1208 00:26:00.532382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83493 > 2) by scale factor 0.705485\nI1208 00:26:04.708653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04599 > 2) by scale factor 0.494317\nI1208 00:26:08.886178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28957 > 2) by scale factor 0.466247\nI1208 00:26:13.063915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32162 > 2) by scale factor 0.861466\nI1208 00:26:17.240707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96924 > 2) by scale factor 0.673573\nI1208 00:26:21.417433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01003 > 2) by scale factor 0.664446\nI1208 00:26:25.594717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36807 > 2) by scale factor 0.844571\nI1208 00:26:29.771384  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75442 > 2) by scale factor 0.726106\nI1208 00:26:33.948221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81459 > 2) by scale factor 0.710583\nI1208 00:26:38.124861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52802 > 2) by scale factor 0.791132\nI1208 00:26:42.302067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15227 > 2) by scale factor 0.929251\nI1208 00:26:46.479491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40954 > 2) by scale factor 0.58659\nI1208 00:26:50.656216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80758 > 2) by scale factor 0.712357\nI1208 00:26:54.832617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4279 > 2) by scale factor 0.823756\nI1208 00:26:59.009810  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92297 > 2) by scale factor 0.684236\nI1208 00:27:03.186803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91374 > 2) by scale factor 0.686403\nI1208 00:27:07.363416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44518 > 2) by scale factor 0.580521\nI1208 00:27:11.539881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05981 > 2) by scale factor 0.653635\nI1208 00:27:15.717430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91139 > 2) by scale factor 0.686957\nI1208 00:27:19.893929  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74772 > 2) by scale factor 0.533658\nI1208 00:27:24.069911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0891 > 2) by scale factor 0.489106\nI1208 00:27:28.247180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72447 > 2) by scale factor 0.423328\nI1208 00:27:32.424254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25516 > 2) by scale factor 0.614409\nI1208 00:27:36.601459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02337 > 2) by scale factor 0.988452\nI1208 00:27:40.778211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38745 > 2) by scale factor 0.590414\nI1208 00:27:44.955869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19251 > 2) by scale factor 0.626467\nI1208 00:27:49.133360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51016 > 2) by scale factor 0.569774\nI1208 00:27:57.485565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39337 > 2) by scale factor 0.835643\nI1208 00:28:05.837249  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16822 > 2) by scale factor 0.631269\nI1208 00:28:10.015158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54104 > 2) by scale factor 0.564807\nI1208 00:28:14.192879  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03609 > 2) by scale factor 0.982275\nI1208 00:28:18.369175  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92822 > 2) by scale factor 0.68301\nI1208 00:28:22.544422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41349 > 2) by scale factor 0.585911\nI1208 00:28:26.720336  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8799 > 2) by scale factor 0.694469\nI1208 00:28:30.897944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74321 > 2) by scale factor 0.729073\nI1208 00:28:35.075258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08258 > 2) by scale factor 0.648808\nI1208 00:28:39.252081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66711 > 2) by scale factor 0.545388\nI1208 00:28:43.429405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19899 > 2) by scale factor 0.625197\nI1208 00:28:47.606559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22356 > 2) by scale factor 0.620433\nI1208 00:28:55.957777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18259 > 2) by scale factor 0.628419\nI1208 00:29:00.135318  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04755 > 2) by scale factor 0.656264\nI1208 00:29:04.312117  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97532 > 2) by scale factor 0.672196\nI1208 00:29:08.487926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19908 > 2) by scale factor 0.909472\nI1208 00:29:12.664201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22843 > 2) by scale factor 0.619496\nI1208 00:29:16.840517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91166 > 2) by scale factor 0.511292\nI1208 00:29:21.016595  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31576 > 2) by scale factor 0.863647\nI1208 00:29:25.192904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73412 > 2) by scale factor 0.731498\nI1208 00:29:29.369680  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06267 > 2) by scale factor 0.492287\nI1208 00:29:33.546732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88571 > 2) by scale factor 0.514706\nI1208 00:29:37.724791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09178 > 2) by scale factor 0.488784\nI1208 00:29:41.901965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02125 > 2) by scale factor 0.497358\nI1208 00:29:46.078052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63567 > 2) by scale factor 0.550105\nI1208 00:29:50.254037  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56566 > 2) by scale factor 0.779527\nI1208 00:29:54.429826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81485 > 2) by scale factor 0.524267\nI1208 00:29:58.607406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94044 > 2) by scale factor 0.68017\nI1208 00:30:02.784610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39374 > 2) by scale factor 0.835511\nI1208 00:30:06.960907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44319 > 2) by scale factor 0.580856\nI1208 00:30:11.137070  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97033 > 2) by scale factor 0.673326\nI1208 00:30:15.313113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29282 > 2) by scale factor 0.87229\nI1208 00:30:19.488992  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73778 > 2) by scale factor 0.730519\nI1208 00:30:23.666750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24594 > 2) by scale factor 0.890497\nI1208 00:30:27.843930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98263 > 2) by scale factor 0.50218\nI1208 00:30:32.021950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53894 > 2) by scale factor 0.565141\nI1208 00:30:32.033918  1922 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1208 00:33:09.593190  1922 solver.cpp:404]     Test net output #0: accuracy = 0.171177\nI1208 00:33:09.593627  1922 solver.cpp:404]     Test net output #1: loss = 8.10675 (* 1 = 8.10675 loss)\nI1208 00:33:13.534495  1922 solver.cpp:228] Iteration 11800, loss = 7.29803\nI1208 00:33:13.534551  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1208 00:33:13.534574  1922 solver.cpp:244]     Train net output #1: loss = 7.29803 (* 1 = 7.29803 loss)\nI1208 00:33:13.762475  1922 sgd_solver.cpp:166] Iteration 11800, lr = 1.77\nI1208 00:33:13.772675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98227 > 2) by scale factor 0.67063\nI1208 00:33:17.953831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03017 > 2) by scale factor 0.66003\nI1208 00:33:22.135614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08816 > 2) by scale factor 0.647634\nI1208 00:33:26.316640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69938 > 2) by scale factor 0.740911\nI1208 00:33:30.496932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35392 > 2) by scale factor 0.849648\nI1208 00:33:34.677909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76446 > 2) by scale factor 0.723469\nI1208 00:33:38.859185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5086 > 2) by scale factor 0.570028\nI1208 00:33:43.041419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34159 > 2) by scale factor 0.598518\nI1208 00:33:47.223295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06598 > 2) by scale factor 0.491887\nI1208 00:33:51.403630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39825 > 2) by scale factor 0.833941\nI1208 00:33:55.584578  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00693 > 2) by scale factor 0.66513\nI1208 00:33:59.767058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30423 > 2) by scale factor 0.605284\nI1208 00:34:03.948210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51254 > 2) by scale factor 0.569389\nI1208 00:34:08.128350  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59522 > 2) by scale factor 0.556294\nI1208 00:34:12.309726  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68855 > 2) by scale factor 0.743894\nI1208 00:34:16.491124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64235 > 2) by scale factor 0.756903\nI1208 00:34:20.672880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75135 > 2) by scale factor 0.420933\nI1208 00:34:24.854332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72262 > 2) by scale factor 0.537256\nI1208 00:34:29.036192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58351 > 2) by scale factor 0.558111\nI1208 00:34:33.217821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95475 > 2) by scale factor 0.676875\nI1208 00:34:37.399780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80627 > 2) by scale factor 0.416123\nI1208 00:34:41.581881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01508 > 2) by scale factor 0.498123\nI1208 00:34:45.763907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1557 > 2) by scale factor 0.633773\nI1208 00:34:49.944447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11771 > 2) by scale factor 0.641496\nI1208 00:34:54.125888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92919 > 2) by scale factor 0.682783\nI1208 00:34:58.306772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06078 > 2) by scale factor 0.492516\nI1208 00:35:02.487639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31675 > 2) by scale factor 0.463312\nI1208 00:35:06.667122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04616 > 2) by scale factor 0.396341\nI1208 00:35:10.849229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99533 > 2) by scale factor 0.667706\nI1208 00:35:15.029712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35283 > 2) by scale factor 0.459471\nI1208 00:35:19.209748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28798 > 2) by scale factor 0.874135\nI1208 00:35:23.391023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81024 > 2) by scale factor 0.524901\nI1208 00:35:27.572510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40885 > 2) by scale factor 0.453633\nI1208 00:35:31.753667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53243 > 2) by scale factor 0.441264\nI1208 00:35:35.934641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17559 > 2) by scale factor 0.629804\nI1208 00:35:40.115837  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31684 > 2) by scale factor 0.863245\nI1208 00:35:44.295749  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23624 > 2) by scale factor 0.472117\nI1208 00:35:48.476094  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63989 > 2) by scale factor 0.549468\nI1208 00:35:52.657954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21127 > 2) by scale factor 0.622806\nI1208 00:35:56.838842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80561 > 2) by scale factor 0.525539\nI1208 00:36:01.020645  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09379 > 2) by scale factor 0.646457\nI1208 00:36:05.201438  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57246 > 2) by scale factor 0.777465\nI1208 00:36:09.381721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66033 > 2) by scale factor 0.751786\nI1208 00:36:13.562881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20337 > 2) by scale factor 0.907699\nI1208 00:36:17.744312  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69539 > 2) by scale factor 0.742007\nI1208 00:36:21.925302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42753 > 2) by scale factor 0.823884\nI1208 00:36:26.106540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00565 > 2) by scale factor 0.499294\nI1208 00:36:30.288213  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42973 > 2) by scale factor 0.823138\nI1208 00:36:34.470005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04075 > 2) by scale factor 0.657732\nI1208 00:36:38.652012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88349 > 2) by scale factor 0.693604\nI1208 00:36:42.835487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60671 > 2) by scale factor 0.43415\nI1208 00:36:47.015792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72922 > 2) by scale factor 0.536306\nI1208 00:36:51.197487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24242 > 2) by scale factor 0.616823\nI1208 00:36:55.380842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87047 > 2) by scale factor 0.696749\nI1208 00:36:59.562721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0207 > 2) by scale factor 0.497426\nI1208 00:37:03.743721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84482 > 2) by scale factor 0.703033\nI1208 00:37:07.926079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5461 > 2) by scale factor 0.785515\nI1208 00:37:12.106854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56623 > 2) by scale factor 0.779355\nI1208 00:37:16.287969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04748 > 2) by scale factor 0.396237\nI1208 00:37:20.470479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67897 > 2) by scale factor 0.543631\nI1208 00:37:24.652056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2242 > 2) by scale factor 0.620309\nI1208 00:37:28.832708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81099 > 2) by scale factor 0.524798\nI1208 00:37:33.014345  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84631 > 2) by scale factor 0.702663\nI1208 00:37:37.195757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7255 > 2) by scale factor 0.733809\nI1208 00:37:41.378492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95135 > 2) by scale factor 0.677657\nI1208 00:37:45.560920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95916 > 2) by scale factor 0.675867\nI1208 00:37:49.741940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38241 > 2) by scale factor 0.591295\nI1208 00:37:53.923808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8018 > 2) by scale factor 0.713826\nI1208 00:37:58.104737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35078 > 2) by scale factor 0.85078\nI1208 00:38:02.285230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95294 > 2) by scale factor 0.677292\nI1208 00:38:06.466994  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69037 > 2) by scale factor 0.426406\nI1208 00:38:10.649011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78111 > 2) by scale factor 0.528946\nI1208 00:38:14.830703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79047 > 2) by scale factor 0.716725\nI1208 00:38:19.011643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32602 > 2) by scale factor 0.60132\nI1208 00:38:23.192832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77495 > 2) by scale factor 0.418853\nI1208 00:38:27.373121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92929 > 2) by scale factor 0.68276\nI1208 00:38:31.552619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37458 > 2) by scale factor 0.842254\nI1208 00:38:35.734105  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29147 > 2) by scale factor 0.607631\nI1208 00:38:39.915417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89932 > 2) by scale factor 0.51291\nI1208 00:38:44.097146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59146 > 2) by scale factor 0.556877\nI1208 00:38:48.279162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62562 > 2) by scale factor 0.761726\nI1208 00:38:52.459877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88814 > 2) by scale factor 0.692487\nI1208 00:38:56.640909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74824 > 2) by scale factor 0.727738\nI1208 00:39:00.821377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33787 > 2) by scale factor 0.599184\nI1208 00:39:05.003327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44279 > 2) by scale factor 0.450168\nI1208 00:39:09.185683  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1492 > 2) by scale factor 0.482021\nI1208 00:39:13.367027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99406 > 2) by scale factor 0.66799\nI1208 00:39:17.548678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70645 > 2) by scale factor 0.738975\nI1208 00:39:21.729408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32952 > 2) by scale factor 0.858547\nI1208 00:39:25.910398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3354 > 2) by scale factor 0.599628\nI1208 00:39:30.092324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91428 > 2) by scale factor 0.686277\nI1208 00:39:34.273331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48474 > 2) by scale factor 0.804915\nI1208 00:39:38.455024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41878 > 2) by scale factor 0.585004\nI1208 00:39:42.636255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44372 > 2) by scale factor 0.818424\nI1208 00:39:46.818768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04803 > 2) by scale factor 0.97655\nI1208 00:39:51.000293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50055 > 2) by scale factor 0.799825\nI1208 00:39:55.183164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00967 > 2) by scale factor 0.399228\nI1208 00:39:59.365622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5525 > 2) by scale factor 0.439319\nI1208 00:40:03.547657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81515 > 2) by scale factor 0.710441\nI1208 00:40:07.729876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68984 > 2) by scale factor 0.743538\nI1208 00:40:07.741817  1922 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1208 00:42:44.711267  1922 solver.cpp:404]     Test net output #0: accuracy = 0.141529\nI1208 00:42:44.711666  1922 solver.cpp:404]     Test net output #1: loss = 18.4792 (* 1 = 18.4792 loss)\nI1208 00:42:48.651348  1922 solver.cpp:228] Iteration 11900, loss = 17.2757\nI1208 00:42:48.651403  1922 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1208 00:42:48.651428  1922 solver.cpp:244]     Train net output #1: loss = 17.2757 (* 1 = 17.2757 loss)\nI1208 00:42:48.880573  1922 sgd_solver.cpp:166] Iteration 11900, lr = 1.785\nI1208 00:42:48.890302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01953 > 2) by scale factor 0.398443\nI1208 00:42:53.071669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43213 > 2) by scale factor 0.582729\nI1208 00:42:57.252586  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02293 > 2) by scale factor 0.49715\nI1208 00:43:01.434276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29507 > 2) by scale factor 0.606967\nI1208 00:43:05.615417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86687 > 2) by scale factor 0.697625\nI1208 00:43:09.795567  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69916 > 2) by scale factor 0.740972\nI1208 00:43:13.975859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42397 > 2) by scale factor 0.584117\nI1208 00:43:18.155620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87423 > 2) by scale factor 0.695839\nI1208 00:43:22.336128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46379 > 2) by scale factor 0.811757\nI1208 00:43:26.515782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72523 > 2) by scale factor 0.733884\nI1208 00:43:30.695598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80396 > 2) by scale factor 0.713278\nI1208 00:43:34.877087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22633 > 2) by scale factor 0.473224\nI1208 00:43:39.057834  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69883 > 2) by scale factor 0.540712\nI1208 00:43:43.237476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30499 > 2) by scale factor 0.464577\nI1208 00:43:47.418084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83151 > 2) by scale factor 0.521988\nI1208 00:43:51.598970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66933 > 2) by scale factor 0.545058\nI1208 00:43:55.780639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23607 > 2) by scale factor 0.472136\nI1208 00:43:59.961284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05751 > 2) by scale factor 0.972047\nI1208 00:44:04.141736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22947 > 2) by scale factor 0.619297\nI1208 00:44:08.323107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43858 > 2) by scale factor 0.581636\nI1208 00:44:12.502272  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47463 > 2) by scale factor 0.808202\nI1208 00:44:16.681015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9884 > 2) by scale factor 0.669255\nI1208 00:44:20.861513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.468 > 2) by scale factor 0.810373\nI1208 00:44:25.041986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91176 > 2) by scale factor 0.68687\nI1208 00:44:29.221297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58303 > 2) by scale factor 0.774285\nI1208 00:44:33.399704  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94922 > 2) by scale factor 0.678145\nI1208 00:44:37.578464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95019 > 2) by scale factor 0.506304\nI1208 00:44:41.756709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48178 > 2) by scale factor 0.574419\nI1208 00:44:45.934934  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63847 > 2) by scale factor 0.549681\nI1208 00:44:50.113507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53668 > 2) by scale factor 0.565502\nI1208 00:44:54.291761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22408 > 2) by scale factor 0.473476\nI1208 00:44:58.470207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44276 > 2) by scale factor 0.580929\nI1208 00:45:02.648586  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79007 > 2) by scale factor 0.527695\nI1208 00:45:06.827287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11128 > 2) by scale factor 0.947291\nI1208 00:45:11.005621  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9489 > 2) by scale factor 0.50647\nI1208 00:45:15.184872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55532 > 2) by scale factor 0.782681\nI1208 00:45:19.363229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77754 > 2) by scale factor 0.720062\nI1208 00:45:23.541968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09934 > 2) by scale factor 0.487884\nI1208 00:45:27.720065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53047 > 2) by scale factor 0.790367\nI1208 00:45:31.897527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79033 > 2) by scale factor 0.716761\nI1208 00:45:36.076272  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88615 > 2) by scale factor 0.514648\nI1208 00:45:40.254050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82331 > 2) by scale factor 0.523106\nI1208 00:45:44.431584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56102 > 2) by scale factor 0.561636\nI1208 00:45:48.608815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52786 > 2) by scale factor 0.791183\nI1208 00:45:52.786821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67831 > 2) by scale factor 0.74674\nI1208 00:45:56.965852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99406 > 2) by scale factor 0.500744\nI1208 00:46:01.144281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15901 > 2) by scale factor 0.926351\nI1208 00:46:05.321545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68332 > 2) by scale factor 0.542989\nI1208 00:46:09.500430  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38622 > 2) by scale factor 0.590628\nI1208 00:46:13.678370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65242 > 2) by scale factor 0.75403\nI1208 00:46:17.856813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1916 > 2) by scale factor 0.912575\nI1208 00:46:22.035615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40658 > 2) by scale factor 0.831054\nI1208 00:46:26.214267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87883 > 2) by scale factor 0.694727\nI1208 00:46:30.392580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60418 > 2) by scale factor 0.767995\nI1208 00:46:34.570559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03766 > 2) by scale factor 0.658402\nI1208 00:46:38.748414  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98582 > 2) by scale factor 0.501779\nI1208 00:46:42.926970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3768 > 2) by scale factor 0.592276\nI1208 00:46:47.105024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84059 > 2) by scale factor 0.520754\nI1208 00:46:51.282156  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00525 > 2) by scale factor 0.997382\nI1208 00:46:55.460636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81825 > 2) by scale factor 0.709661\nI1208 00:46:59.638779  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12481 > 2) by scale factor 0.94126\nI1208 00:47:03.817297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1692 > 2) by scale factor 0.479709\nI1208 00:47:07.995231  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41105 > 2) by scale factor 0.829513\nI1208 00:47:12.174048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55431 > 2) by scale factor 0.782991\nI1208 00:47:16.352483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10154 > 2) by scale factor 0.487622\nI1208 00:47:20.530331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89267 > 2) by scale factor 0.513786\nI1208 00:47:24.709137  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88163 > 2) by scale factor 0.694052\nI1208 00:47:28.888164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09006 > 2) by scale factor 0.956909\nI1208 00:47:33.066328  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91944 > 2) by scale factor 0.510276\nI1208 00:47:37.244045  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71273 > 2) by scale factor 0.538688\nI1208 00:47:41.422693  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07973 > 2) by scale factor 0.961663\nI1208 00:47:45.600639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96742 > 2) by scale factor 0.673986\nI1208 00:47:49.779728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32906 > 2) by scale factor 0.858714\nI1208 00:47:53.959444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45938 > 2) by scale factor 0.578139\nI1208 00:47:58.142892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20862 > 2) by scale factor 0.905542\nI1208 00:48:02.320482  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28818 > 2) by scale factor 0.874058\nI1208 00:48:06.497902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79588 > 2) by scale factor 0.715338\nI1208 00:48:10.675858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35929 > 2) by scale factor 0.45879\nI1208 00:48:14.853941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95723 > 2) by scale factor 0.676308\nI1208 00:48:19.032400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93701 > 2) by scale factor 0.508\nI1208 00:48:23.210199  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3662 > 2) by scale factor 0.594142\nI1208 00:48:27.388626  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39838 > 2) by scale factor 0.454713\nI1208 00:48:31.566071  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8656 > 2) by scale factor 0.697933\nI1208 00:48:35.744761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78207 > 2) by scale factor 0.71889\nI1208 00:48:39.921375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41136 > 2) by scale factor 0.829409\nI1208 00:48:44.100020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35576 > 2) by scale factor 0.848983\nI1208 00:48:48.277781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86734 > 2) by scale factor 0.69751\nI1208 00:48:52.455629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14531 > 2) by scale factor 0.932264\nI1208 00:48:56.634580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35248 > 2) by scale factor 0.459508\nI1208 00:49:00.813463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66396 > 2) by scale factor 0.750763\nI1208 00:49:04.991467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39865 > 2) by scale factor 0.833803\nI1208 00:49:09.168658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08967 > 2) by scale factor 0.647318\nI1208 00:49:13.347172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01493 > 2) by scale factor 0.663366\nI1208 00:49:17.524504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22862 > 2) by scale factor 0.897418\nI1208 00:49:21.702965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39169 > 2) by scale factor 0.589677\nI1208 00:49:25.881021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91844 > 2) by scale factor 0.510407\nI1208 00:49:30.060139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72552 > 2) by scale factor 0.733805\nI1208 00:49:34.238322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30904 > 2) by scale factor 0.604404\nI1208 00:49:38.416187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53376 > 2) by scale factor 0.565969\nI1208 00:49:42.593636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57475 > 2) by scale factor 0.559479\nI1208 00:49:42.603772  1922 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1208 00:52:16.593933  1922 solver.cpp:404]     Test net output #0: accuracy = 0.183294\nI1208 00:52:16.594316  1922 solver.cpp:404]     Test net output #1: loss = 13.8415 (* 1 = 13.8415 loss)\nI1208 00:52:20.533885  1922 solver.cpp:228] Iteration 12000, loss = 12.6024\nI1208 00:52:20.533928  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1208 00:52:20.533946  1922 solver.cpp:244]     Train net output #1: loss = 12.6024 (* 1 = 12.6024 loss)\nI1208 00:52:20.759116  1922 sgd_solver.cpp:166] Iteration 12000, lr = 1.8\nI1208 00:52:20.768860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14403 > 2) by scale factor 0.636126\nI1208 00:52:24.946166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94093 > 2) by scale factor 0.507495\nI1208 00:52:29.124299  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54219 > 2) by scale factor 0.786722\nI1208 00:52:33.300945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11851 > 2) by scale factor 0.641332\nI1208 00:52:37.477435  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25848 > 2) by scale factor 0.613782\nI1208 00:52:41.654109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66912 > 2) by scale factor 0.54509\nI1208 00:52:45.832222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03863 > 2) by scale factor 0.658191\nI1208 00:52:50.009987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60092 > 2) by scale factor 0.555414\nI1208 00:52:54.185901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15183 > 2) by scale factor 0.92944\nI1208 00:52:58.361567  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53893 > 2) by scale factor 0.565143\nI1208 00:53:02.538733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77625 > 2) by scale factor 0.720396\nI1208 00:53:06.717048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50551 > 2) by scale factor 0.798242\nI1208 00:53:10.895395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.155 > 2) by scale factor 0.633914\nI1208 00:53:15.073899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17537 > 2) by scale factor 0.629849\nI1208 00:53:19.252882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62135 > 2) by scale factor 0.552281\nI1208 00:53:23.432176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42263 > 2) by scale factor 0.825549\nI1208 00:53:27.610734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70685 > 2) by scale factor 0.539542\nI1208 00:53:31.788888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60055 > 2) by scale factor 0.555471\nI1208 00:53:35.966856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67444 > 2) by scale factor 0.5443\nI1208 00:53:40.144191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71475 > 2) by scale factor 0.424201\nI1208 00:53:44.322489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18601 > 2) by scale factor 0.477782\nI1208 00:53:48.500771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69366 > 2) by scale factor 0.742484\nI1208 00:53:52.678793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25264 > 2) by scale factor 0.614885\nI1208 00:53:56.856892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70164 > 2) by scale factor 0.74029\nI1208 00:54:01.034744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91718 > 2) by scale factor 0.685593\nI1208 00:54:05.212015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68797 > 2) by scale factor 0.542304\nI1208 00:54:09.389547  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04586 > 2) by scale factor 0.494333\nI1208 00:54:13.566583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49844 > 2) by scale factor 0.800498\nI1208 00:54:17.745338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05561 > 2) by scale factor 0.493145\nI1208 00:54:21.923923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19655 > 2) by scale factor 0.625675\nI1208 00:54:26.102058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13805 > 2) by scale factor 0.637338\nI1208 00:54:30.281335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98221 > 2) by scale factor 0.670643\nI1208 00:54:34.459509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00852 > 2) by scale factor 0.995756\nI1208 00:54:38.637027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41844 > 2) by scale factor 0.826978\nI1208 00:54:42.814368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23641 > 2) by scale factor 0.894291\nI1208 00:54:46.993177  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02699 > 2) by scale factor 0.660723\nI1208 00:54:51.171193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91669 > 2) by scale factor 0.685709\nI1208 00:54:55.348408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01334 > 2) by scale factor 0.663715\nI1208 00:54:59.526239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95263 > 2) by scale factor 0.505993\nI1208 00:55:03.704049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55807 > 2) by scale factor 0.562103\nI1208 00:55:07.882295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39473 > 2) by scale factor 0.589148\nI1208 00:55:12.059700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39009 > 2) by scale factor 0.589955\nI1208 00:55:16.237740  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83817 > 2) by scale factor 0.521081\nI1208 00:55:20.415823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05825 > 2) by scale factor 0.395394\nI1208 00:55:24.593164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15104 > 2) by scale factor 0.63471\nI1208 00:55:28.770771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15384 > 2) by scale factor 0.928575\nI1208 00:55:32.947934  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81698 > 2) by scale factor 0.709981\nI1208 00:55:37.124634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2653 > 2) by scale factor 0.882884\nI1208 00:55:45.475313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81134 > 2) by scale factor 0.711404\nI1208 00:55:49.652011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7762 > 2) by scale factor 0.720408\nI1208 00:55:53.830117  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24994 > 2) by scale factor 0.888911\nI1208 00:55:58.007571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66079 > 2) by scale factor 0.751657\nI1208 00:56:02.184938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36629 > 2) by scale factor 0.594126\nI1208 00:56:06.362154  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84739 > 2) by scale factor 0.519832\nI1208 00:56:10.539428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1279 > 2) by scale factor 0.639407\nI1208 00:56:14.718117  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25339 > 2) by scale factor 0.614743\nI1208 00:56:18.895311  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78589 > 2) by scale factor 0.717902\nI1208 00:56:23.073489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75855 > 2) by scale factor 0.725019\nI1208 00:56:27.251005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71459 > 2) by scale factor 0.736759\nI1208 00:56:31.428660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9957 > 2) by scale factor 0.667625\nI1208 00:56:35.604972  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09601 > 2) by scale factor 0.48828\nI1208 00:56:39.781819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.021 > 2) by scale factor 0.497389\nI1208 00:56:43.960304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58617 > 2) by scale factor 0.773343\nI1208 00:56:48.137394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06512 > 2) by scale factor 0.652503\nI1208 00:56:52.315299  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60956 > 2) by scale factor 0.554084\nI1208 00:56:56.492601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34043 > 2) by scale factor 0.854542\nI1208 00:57:00.670434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88601 > 2) by scale factor 0.514667\nI1208 00:57:04.848143  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63416 > 2) by scale factor 0.759256\nI1208 00:57:09.026700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70626 > 2) by scale factor 0.739028\nI1208 00:57:13.204411  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52154 > 2) by scale factor 0.567933\nI1208 00:57:17.380921  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3318 > 2) by scale factor 0.600277\nI1208 00:57:21.558735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31381 > 2) by scale factor 0.603535\nI1208 00:57:25.735473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22917 > 2) by scale factor 0.897196\nI1208 00:57:29.913447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11435 > 2) by scale factor 0.64219\nI1208 00:57:34.092061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79268 > 2) by scale factor 0.716157\nI1208 00:57:38.269150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92261 > 2) by scale factor 0.68432\nI1208 00:57:42.446233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06306 > 2) by scale factor 0.49224\nI1208 00:57:46.623658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80303 > 2) by scale factor 0.525896\nI1208 00:57:50.800499  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76783 > 2) by scale factor 0.53081\nI1208 00:57:54.978919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15567 > 2) by scale factor 0.48127\nI1208 00:57:59.157109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64253 > 2) by scale factor 0.549069\nI1208 00:58:03.334341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38665 > 2) by scale factor 0.590553\nI1208 00:58:07.511040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96518 > 2) by scale factor 0.50439\nI1208 00:58:11.688789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40073 > 2) by scale factor 0.45447\nI1208 00:58:15.866235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06887 > 2) by scale factor 0.96671\nI1208 00:58:20.044061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0159 > 2) by scale factor 0.992113\nI1208 00:58:24.221869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80645 > 2) by scale factor 0.712644\nI1208 00:58:28.399919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76926 > 2) by scale factor 0.722215\nI1208 00:58:32.577745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20059 > 2) by scale factor 0.476124\nI1208 00:58:36.755543  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19952 > 2) by scale factor 0.625094\nI1208 00:58:40.932962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83021 > 2) by scale factor 0.41406\nI1208 00:58:45.110402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08869 > 2) by scale factor 0.647524\nI1208 00:58:49.287410  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0898 > 2) by scale factor 0.957029\nI1208 00:58:53.465643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80639 > 2) by scale factor 0.712659\nI1208 00:58:57.643952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24677 > 2) by scale factor 0.615996\nI1208 00:59:01.821888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09705 > 2) by scale factor 0.645776\nI1208 00:59:05.999753  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88957 > 2) by scale factor 0.514195\nI1208 00:59:10.178009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34264 > 2) by scale factor 0.46055\nI1208 00:59:14.355031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89998 > 2) by scale factor 0.689661\nI1208 00:59:14.366997  1922 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1208 01:01:50.671133  1922 solver.cpp:404]     Test net output #0: accuracy = 0.124294\nI1208 01:01:50.671521  1922 solver.cpp:404]     Test net output #1: loss = 17.1064 (* 1 = 17.1064 loss)\nI1208 01:01:54.610432  1922 solver.cpp:228] Iteration 12100, loss = 16.2222\nI1208 01:01:54.610474  1922 solver.cpp:244]     Train net output #0: accuracy = 0.0823529\nI1208 01:01:54.610491  1922 solver.cpp:244]     Train net output #1: loss = 16.2222 (* 1 = 16.2222 loss)\nI1208 01:01:54.840335  1922 sgd_solver.cpp:166] Iteration 12100, lr = 1.815\nI1208 01:01:54.850476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68062 > 2) by scale factor 0.543387\nI1208 01:01:59.030328  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13538 > 2) by scale factor 0.389455\nI1208 01:02:03.210772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73644 > 2) by scale factor 0.730877\nI1208 01:02:07.390604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91332 > 2) by scale factor 0.686503\nI1208 01:02:11.570997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3588 > 2) by scale factor 0.847889\nI1208 01:02:15.750633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94615 > 2) by scale factor 0.678853\nI1208 01:02:19.930294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92109 > 2) by scale factor 0.684677\nI1208 01:02:24.111413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44454 > 2) by scale factor 0.44999\nI1208 01:02:28.291802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2847 > 2) by scale factor 0.608884\nI1208 01:02:32.472179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08024 > 2) by scale factor 0.649301\nI1208 01:02:36.652633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58666 > 2) by scale factor 0.773199\nI1208 01:02:40.832602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33734 > 2) by scale factor 0.59928\nI1208 01:02:45.012473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9434 > 2) by scale factor 0.679486\nI1208 01:02:49.192391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60726 > 2) by scale factor 0.434097\nI1208 01:02:53.372459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9551 > 2) by scale factor 0.676795\nI1208 01:02:57.551976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15025 > 2) by scale factor 0.634871\nI1208 01:03:01.731292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51035 > 2) by scale factor 0.569744\nI1208 01:03:05.912410  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30564 > 2) by scale factor 0.605027\nI1208 01:03:10.091709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79477 > 2) by scale factor 0.715622\nI1208 01:03:14.271905  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65581 > 2) by scale factor 0.547075\nI1208 01:03:18.451968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89586 > 2) by scale factor 0.513365\nI1208 01:03:22.631331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95163 > 2) by scale factor 0.677593\nI1208 01:03:26.810801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88503 > 2) by scale factor 0.693235\nI1208 01:03:35.167151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62384 > 2) by scale factor 0.762242\nI1208 01:03:39.347961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3402 > 2) by scale factor 0.598767\nI1208 01:03:43.526373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29758 > 2) by scale factor 0.870483\nI1208 01:03:47.705894  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85796 > 2) by scale factor 0.699801\nI1208 01:03:51.886704  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45171 > 2) by scale factor 0.579422\nI1208 01:03:56.066310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58004 > 2) by scale factor 0.775183\nI1208 01:04:00.247566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42625 > 2) by scale factor 0.824318\nI1208 01:04:04.427502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04343 > 2) by scale factor 0.494629\nI1208 01:04:08.606990  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05925 > 2) by scale factor 0.492702\nI1208 01:04:12.786624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60524 > 2) by scale factor 0.554749\nI1208 01:04:16.967627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56286 > 2) by scale factor 0.561347\nI1208 01:04:21.147217  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36084 > 2) by scale factor 0.59509\nI1208 01:04:25.327260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68659 > 2) by scale factor 0.744437\nI1208 01:04:29.507396  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6332 > 2) by scale factor 0.550479\nI1208 01:04:33.686781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74639 > 2) by scale factor 0.533847\nI1208 01:04:37.866210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56913 > 2) by scale factor 0.56036\nI1208 01:04:42.045735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91947 > 2) by scale factor 0.685056\nI1208 01:04:46.225786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64724 > 2) by scale factor 0.430363\nI1208 01:04:50.405426  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71322 > 2) by scale factor 0.424338\nI1208 01:04:54.585558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11027 > 2) by scale factor 0.643032\nI1208 01:04:58.764654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09804 > 2) by scale factor 0.645569\nI1208 01:05:02.944485  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.31239 > 2) by scale factor 0.376478\nI1208 01:05:07.124071  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53291 > 2) by scale factor 0.441217\nI1208 01:05:11.303305  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66405 > 2) by scale factor 0.428812\nI1208 01:05:15.484076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27046 > 2) by scale factor 0.611535\nI1208 01:05:19.665422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61502 > 2) by scale factor 0.553247\nI1208 01:05:23.846316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55148 > 2) by scale factor 0.783859\nI1208 01:05:28.026370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49581 > 2) by scale factor 0.572114\nI1208 01:05:32.206199  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95757 > 2) by scale factor 0.67623\nI1208 01:05:36.386500  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62475 > 2) by scale factor 0.761979\nI1208 01:05:40.565618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2476 > 2) by scale factor 0.470854\nI1208 01:05:44.745434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.0325 > 2) by scale factor 0.397417\nI1208 01:05:48.925537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69966 > 2) by scale factor 0.54059\nI1208 01:05:53.106629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98491 > 2) by scale factor 0.401211\nI1208 01:05:57.285178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62964 > 2) by scale factor 0.431999\nI1208 01:06:01.465592  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41227 > 2) by scale factor 0.829093\nI1208 01:06:05.646528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59719 > 2) by scale factor 0.55599\nI1208 01:06:09.826623  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89838 > 2) by scale factor 0.513033\nI1208 01:06:14.006912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.92545 > 2) by scale factor 0.406054\nI1208 01:06:18.187034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32125 > 2) by scale factor 0.861605\nI1208 01:06:22.366305  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80193 > 2) by scale factor 0.713795\nI1208 01:06:26.546708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16843 > 2) by scale factor 0.631228\nI1208 01:06:30.726619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85406 > 2) by scale factor 0.518933\nI1208 01:06:34.906450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59603 > 2) by scale factor 0.770406\nI1208 01:06:39.084930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43199 > 2) by scale factor 0.822372\nI1208 01:06:43.264021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83528 > 2) by scale factor 0.705398\nI1208 01:06:47.445708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20462 > 2) by scale factor 0.6241\nI1208 01:06:51.625214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14754 > 2) by scale factor 0.635416\nI1208 01:06:55.804601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82219 > 2) by scale factor 0.708669\nI1208 01:06:59.985436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53848 > 2) by scale factor 0.565214\nI1208 01:07:04.166038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06712 > 2) by scale factor 0.96753\nI1208 01:07:08.345958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23977 > 2) by scale factor 0.617328\nI1208 01:07:12.525882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94881 > 2) by scale factor 0.67824\nI1208 01:07:16.706743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06766 > 2) by scale factor 0.651963\nI1208 01:07:20.886332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11056 > 2) by scale factor 0.947615\nI1208 01:07:25.067241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20214 > 2) by scale factor 0.624582\nI1208 01:07:29.246968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93513 > 2) by scale factor 0.681401\nI1208 01:07:33.426102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69401 > 2) by scale factor 0.742386\nI1208 01:07:37.606490  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65549 > 2) by scale factor 0.547123\nI1208 01:07:41.786880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66252 > 2) by scale factor 0.751167\nI1208 01:07:45.967437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55302 > 2) by scale factor 0.783385\nI1208 01:07:50.147925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56889 > 2) by scale factor 0.778546\nI1208 01:07:54.328279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48639 > 2) by scale factor 0.573659\nI1208 01:07:58.507930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68787 > 2) by scale factor 0.744084\nI1208 01:08:02.688047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81882 > 2) by scale factor 0.523721\nI1208 01:08:06.869026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01118 > 2) by scale factor 0.664192\nI1208 01:08:11.050011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41365 > 2) by scale factor 0.453139\nI1208 01:08:15.230386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55565 > 2) by scale factor 0.562485\nI1208 01:08:19.409971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30368 > 2) by scale factor 0.605386\nI1208 01:08:23.589762  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72043 > 2) by scale factor 0.537572\nI1208 01:08:27.769413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32678 > 2) by scale factor 0.601183\nI1208 01:08:31.948988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44895 > 2) by scale factor 0.816675\nI1208 01:08:36.129245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52113 > 2) by scale factor 0.442368\nI1208 01:08:40.309603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92954 > 2) by scale factor 0.6827\nI1208 01:08:44.489686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01107 > 2) by scale factor 0.664216\nI1208 01:08:48.669651  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09118 > 2) by scale factor 0.647003\nI1208 01:08:48.681740  1922 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1208 01:11:26.930321  1922 solver.cpp:404]     Test net output #0: accuracy = 0.207118\nI1208 01:11:26.930738  1922 solver.cpp:404]     Test net output #1: loss = 16.5352 (* 1 = 16.5352 loss)\nI1208 01:11:30.870774  1922 solver.cpp:228] Iteration 12200, loss = 16.112\nI1208 01:11:30.870821  1922 solver.cpp:244]     Train net output #0: accuracy = 0.282353\nI1208 01:11:30.870839  1922 solver.cpp:244]     Train net output #1: loss = 16.112 (* 1 = 16.112 loss)\nI1208 01:11:31.101471  1922 sgd_solver.cpp:166] Iteration 12200, lr = 1.83\nI1208 01:11:31.111133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6579 > 2) by scale factor 0.752474\nI1208 01:11:35.289872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56268 > 2) by scale factor 0.780432\nI1208 01:11:39.470165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08991 > 2) by scale factor 0.647269\nI1208 01:11:43.650382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34291 > 2) by scale factor 0.460521\nI1208 01:11:47.829452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04852 > 2) by scale factor 0.656056\nI1208 01:11:52.011764  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51746 > 2) by scale factor 0.568593\nI1208 01:11:56.192347  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11917 > 2) by scale factor 0.641196\nI1208 01:12:00.371700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35347 > 2) by scale factor 0.596397\nI1208 01:12:04.552181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8787 > 2) by scale factor 0.515637\nI1208 01:12:08.732754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34259 > 2) by scale factor 0.598339\nI1208 01:12:12.912619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32934 > 2) by scale factor 0.60072\nI1208 01:12:17.092602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76742 > 2) by scale factor 0.722695\nI1208 01:12:21.272289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70155 > 2) by scale factor 0.740315\nI1208 01:12:25.452603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94247 > 2) by scale factor 0.507296\nI1208 01:12:29.631403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46129 > 2) by scale factor 0.81258\nI1208 01:12:33.812047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33975 > 2) by scale factor 0.854792\nI1208 01:12:37.991343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26031 > 2) by scale factor 0.884836\nI1208 01:12:42.169634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72337 > 2) by scale factor 0.734384\nI1208 01:12:46.349280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78928 > 2) by scale factor 0.71703\nI1208 01:12:50.530467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23274 > 2) by scale factor 0.89576\nI1208 01:12:54.710790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34638 > 2) by scale factor 0.597661\nI1208 01:12:58.891242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82784 > 2) by scale factor 0.707254\nI1208 01:13:03.071462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47283 > 2) by scale factor 0.5759\nI1208 01:13:07.251467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63721 > 2) by scale factor 0.758378\nI1208 01:13:11.431535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20477 > 2) by scale factor 0.62407\nI1208 01:13:15.611696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82565 > 2) by scale factor 0.522788\nI1208 01:13:19.792387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09462 > 2) by scale factor 0.646282\nI1208 01:13:23.970291  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60255 > 2) by scale factor 0.768478\nI1208 01:13:28.150313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30178 > 2) by scale factor 0.605734\nI1208 01:13:32.331356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07196 > 2) by scale factor 0.65105\nI1208 01:13:36.510831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26398 > 2) by scale factor 0.612749\nI1208 01:13:40.690480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03492 > 2) by scale factor 0.495673\nI1208 01:13:44.871104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54945 > 2) by scale factor 0.784481\nI1208 01:13:49.051674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77317 > 2) by scale factor 0.530059\nI1208 01:13:53.232194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07823 > 2) by scale factor 0.490409\nI1208 01:13:57.413167  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89559 > 2) by scale factor 0.690705\nI1208 01:14:01.591928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30035 > 2) by scale factor 0.605997\nI1208 01:14:05.770153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97322 > 2) by scale factor 0.672672\nI1208 01:14:09.949530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25083 > 2) by scale factor 0.615228\nI1208 01:14:14.129138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2865 > 2) by scale factor 0.874698\nI1208 01:14:18.309624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98845 > 2) by scale factor 0.669243\nI1208 01:14:22.488288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24203 > 2) by scale factor 0.471472\nI1208 01:14:26.668751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57838 > 2) by scale factor 0.77568\nI1208 01:14:30.848302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21684 > 2) by scale factor 0.621729\nI1208 01:14:35.027880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7899 > 2) by scale factor 0.527718\nI1208 01:14:39.207298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44147 > 2) by scale factor 0.581147\nI1208 01:14:43.387074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24013 > 2) by scale factor 0.617258\nI1208 01:14:47.566725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76602 > 2) by scale factor 0.531065\nI1208 01:14:51.746947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06318 > 2) by scale factor 0.652917\nI1208 01:14:55.926926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44828 > 2) by scale factor 0.8169\nI1208 01:15:00.106331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21196 > 2) by scale factor 0.904175\nI1208 01:15:04.285735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81534 > 2) by scale factor 0.710393\nI1208 01:15:08.466331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54492 > 2) by scale factor 0.564187\nI1208 01:15:12.646313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2247 > 2) by scale factor 0.620213\nI1208 01:15:21.004381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72959 > 2) by scale factor 0.536251\nI1208 01:15:25.183902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56769 > 2) by scale factor 0.778909\nI1208 01:15:29.364063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52424 > 2) by scale factor 0.792316\nI1208 01:15:37.722098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99619 > 2) by scale factor 0.667514\nI1208 01:15:41.901239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45882 > 2) by scale factor 0.8134\nI1208 01:15:46.081872  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38828 > 2) by scale factor 0.837422\nI1208 01:15:50.261401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35309 > 2) by scale factor 0.596465\nI1208 01:15:54.441258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56374 > 2) by scale factor 0.780112\nI1208 01:15:58.621254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38721 > 2) by scale factor 0.45587\nI1208 01:16:02.801298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25412 > 2) by scale factor 0.887264\nI1208 01:16:06.982100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36561 > 2) by scale factor 0.458126\nI1208 01:16:11.162012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6487 > 2) by scale factor 0.755089\nI1208 01:16:15.341233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90965 > 2) by scale factor 0.687368\nI1208 01:16:19.522145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79595 > 2) by scale factor 0.715321\nI1208 01:16:23.702771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59504 > 2) by scale factor 0.7707\nI1208 01:16:27.883226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91214 > 2) by scale factor 0.686781\nI1208 01:16:32.064548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4146 > 2) by scale factor 0.585719\nI1208 01:16:36.242908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04705 > 2) by scale factor 0.656372\nI1208 01:16:40.422603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45427 > 2) by scale factor 0.578993\nI1208 01:16:44.601434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93417 > 2) by scale factor 0.681623\nI1208 01:16:48.780438  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43655 > 2) by scale factor 0.581979\nI1208 01:16:52.960419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3372 > 2) by scale factor 0.461126\nI1208 01:16:57.139670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03356 > 2) by scale factor 0.659292\nI1208 01:17:01.319191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89045 > 2) by scale factor 0.691933\nI1208 01:17:05.498587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21041 > 2) by scale factor 0.90481\nI1208 01:17:09.677083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57141 > 2) by scale factor 0.777784\nI1208 01:17:13.856339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79081 > 2) by scale factor 0.716638\nI1208 01:17:18.037550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74812 > 2) by scale factor 0.727771\nI1208 01:17:22.216403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19845 > 2) by scale factor 0.625302\nI1208 01:17:26.395382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26247 > 2) by scale factor 0.613033\nI1208 01:17:30.576069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79004 > 2) by scale factor 0.527699\nI1208 01:17:34.755193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30049 > 2) by scale factor 0.869381\nI1208 01:17:38.934762  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76444 > 2) by scale factor 0.723474\nI1208 01:17:43.113900  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99174 > 2) by scale factor 0.668507\nI1208 01:17:47.292899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56556 > 2) by scale factor 0.560921\nI1208 01:17:51.471495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3759 > 2) by scale factor 0.841786\nI1208 01:17:55.650467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71676 > 2) by scale factor 0.736172\nI1208 01:17:59.830086  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32945 > 2) by scale factor 0.858573\nI1208 01:18:04.009522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95786 > 2) by scale factor 0.676164\nI1208 01:18:08.188884  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40896 > 2) by scale factor 0.830233\nI1208 01:18:12.368830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48413 > 2) by scale factor 0.574032\nI1208 01:18:16.548316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70019 > 2) by scale factor 0.740688\nI1208 01:18:20.727146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28625 > 2) by scale factor 0.608597\nI1208 01:18:24.906973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11964 > 2) by scale factor 0.485479\nI1208 01:18:24.918870  1922 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1208 01:21:03.088729  1922 solver.cpp:404]     Test net output #0: accuracy = 0.193353\nI1208 01:21:03.089172  1922 solver.cpp:404]     Test net output #1: loss = 11.3393 (* 1 = 11.3393 loss)\nI1208 01:21:07.027340  1922 solver.cpp:228] Iteration 12300, loss = 12.1543\nI1208 01:21:07.027384  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1208 01:21:07.027400  1922 solver.cpp:244]     Train net output #1: loss = 12.1543 (* 1 = 12.1543 loss)\nI1208 01:21:07.254977  1922 sgd_solver.cpp:166] Iteration 12300, lr = 1.845\nI1208 01:21:07.265143  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89216 > 2) by scale factor 0.691524\nI1208 01:21:11.443857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02362 > 2) by scale factor 0.497065\nI1208 01:21:15.623320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69055 > 2) by scale factor 0.743341\nI1208 01:21:19.802196  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04133 > 2) by scale factor 0.657608\nI1208 01:21:23.981283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4466 > 2) by scale factor 0.580282\nI1208 01:21:28.160945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65001 > 2) by scale factor 0.754715\nI1208 01:21:32.340723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02946 > 2) by scale factor 0.660184\nI1208 01:21:36.520148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27681 > 2) by scale factor 0.878422\nI1208 01:21:40.700045  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15393 > 2) by scale factor 0.63413\nI1208 01:21:44.879345  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5886 > 2) by scale factor 0.55732\nI1208 01:21:49.058068  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11082 > 2) by scale factor 0.642918\nI1208 01:21:53.238782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13663 > 2) by scale factor 0.637627\nI1208 01:21:57.417899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30858 > 2) by scale factor 0.60449\nI1208 01:22:01.596340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06886 > 2) by scale factor 0.651709\nI1208 01:22:05.775319  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05827 > 2) by scale factor 0.653964\nI1208 01:22:09.954723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12432 > 2) by scale factor 0.941479\nI1208 01:22:14.134191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93374 > 2) by scale factor 0.508423\nI1208 01:22:18.312970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20274 > 2) by scale factor 0.47588\nI1208 01:22:26.669370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17303 > 2) by scale factor 0.630312\nI1208 01:22:30.848969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20734 > 2) by scale factor 0.906067\nI1208 01:22:39.205574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31429 > 2) by scale factor 0.864196\nI1208 01:22:47.560860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99444 > 2) by scale factor 0.667905\nI1208 01:22:51.740332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65338 > 2) by scale factor 0.753756\nI1208 01:22:55.919363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7592 > 2) by scale factor 0.724849\nI1208 01:23:00.098521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79861 > 2) by scale factor 0.71464\nI1208 01:23:04.277031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50563 > 2) by scale factor 0.798203\nI1208 01:23:12.633437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02609 > 2) by scale factor 0.660918\nI1208 01:23:16.811686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16374 > 2) by scale factor 0.480338\nI1208 01:23:20.990334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77345 > 2) by scale factor 0.721123\nI1208 01:23:25.170354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16875 > 2) by scale factor 0.922191\nI1208 01:23:29.349532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37426 > 2) by scale factor 0.592723\nI1208 01:23:33.527737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52512 > 2) by scale factor 0.567357\nI1208 01:23:37.707494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95137 > 2) by scale factor 0.677651\nI1208 01:23:41.885748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30625 > 2) by scale factor 0.604915\nI1208 01:23:46.064291  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60352 > 2) by scale factor 0.76819\nI1208 01:23:50.242729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63853 > 2) by scale factor 0.757998\nI1208 01:23:54.421741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93482 > 2) by scale factor 0.681472\nI1208 01:23:58.602108  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19578 > 2) by scale factor 0.476669\nI1208 01:24:02.780470  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74534 > 2) by scale factor 0.533998\nI1208 01:24:06.959730  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01296 > 2) by scale factor 0.498385\nI1208 01:24:11.139298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24227 > 2) by scale factor 0.616852\nI1208 01:24:15.317235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2081 > 2) by scale factor 0.623422\nI1208 01:24:19.495542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23792 > 2) by scale factor 0.61768\nI1208 01:24:23.675864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31823 > 2) by scale factor 0.463152\nI1208 01:24:27.854894  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56779 > 2) by scale factor 0.56057\nI1208 01:24:32.034615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00495 > 2) by scale factor 0.399604\nI1208 01:24:36.213734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12072 > 2) by scale factor 0.640878\nI1208 01:24:40.391885  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03609 > 2) by scale factor 0.495529\nI1208 01:24:44.570587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92057 > 2) by scale factor 0.684799\nI1208 01:24:48.749367  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64591 > 2) by scale factor 0.755883\nI1208 01:24:52.928633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75518 > 2) by scale factor 0.532598\nI1208 01:24:57.107224  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62023 > 2) by scale factor 0.763291\nI1208 01:25:01.285758  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2891 > 2) by scale factor 0.873706\nI1208 01:25:05.465530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27976 > 2) by scale factor 0.6098\nI1208 01:25:09.644786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1497 > 2) by scale factor 0.930363\nI1208 01:25:13.823077  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41252 > 2) by scale factor 0.829008\nI1208 01:25:18.002136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60744 > 2) by scale factor 0.554409\nI1208 01:25:22.181154  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38617 > 2) by scale factor 0.455978\nI1208 01:25:26.359341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14301 > 2) by scale factor 0.482741\nI1208 01:25:30.537977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42003 > 2) by scale factor 0.452486\nI1208 01:25:34.715875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01599 > 2) by scale factor 0.992066\nI1208 01:25:38.894907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1566 > 2) by scale factor 0.633593\nI1208 01:25:43.074363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29256 > 2) by scale factor 0.872388\nI1208 01:25:47.253259  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63582 > 2) by scale factor 0.550083\nI1208 01:25:51.431622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62874 > 2) by scale factor 0.551155\nI1208 01:25:55.610190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64558 > 2) by scale factor 0.755979\nI1208 01:25:59.789223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69701 > 2) by scale factor 0.741561\nI1208 01:26:03.968008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33084 > 2) by scale factor 0.600449\nI1208 01:26:08.147928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84174 > 2) by scale factor 0.520598\nI1208 01:26:12.326714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57164 > 2) by scale factor 0.777714\nI1208 01:26:16.505300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12025 > 2) by scale factor 0.640974\nI1208 01:26:20.683789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5657 > 2) by scale factor 0.560899\nI1208 01:26:24.862648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97833 > 2) by scale factor 0.671517\nI1208 01:26:29.041074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37985 > 2) by scale factor 0.840389\nI1208 01:26:33.220209  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8499 > 2) by scale factor 0.519495\nI1208 01:26:37.398424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65668 > 2) by scale factor 0.752818\nI1208 01:26:45.754937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04326 > 2) by scale factor 0.65719\nI1208 01:26:49.934882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63538 > 2) by scale factor 0.758902\nI1208 01:26:54.114493  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24389 > 2) by scale factor 0.471266\nI1208 01:26:58.293467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19511 > 2) by scale factor 0.476746\nI1208 01:27:02.472224  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52566 > 2) by scale factor 0.56727\nI1208 01:27:06.650692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11443 > 2) by scale factor 0.642171\nI1208 01:27:10.829473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46612 > 2) by scale factor 0.447816\nI1208 01:27:15.008746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29085 > 2) by scale factor 0.607745\nI1208 01:27:19.188045  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83766 > 2) by scale factor 0.521151\nI1208 01:27:23.367897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92126 > 2) by scale factor 0.51004\nI1208 01:27:27.547046  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81652 > 2) by scale factor 0.524038\nI1208 01:27:31.726650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82955 > 2) by scale factor 0.706827\nI1208 01:27:35.907269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69069 > 2) by scale factor 0.743305\nI1208 01:27:40.086524  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05939 > 2) by scale factor 0.653724\nI1208 01:27:44.265735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7965 > 2) by scale factor 0.715179\nI1208 01:27:48.443997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01079 > 2) by scale factor 0.664279\nI1208 01:27:52.622306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81027 > 2) by scale factor 0.711676\nI1208 01:27:56.801373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62214 > 2) by scale factor 0.552159\nI1208 01:28:00.980156  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16288 > 2) by scale factor 0.632335\nI1208 01:28:00.992204  1922 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1208 01:30:38.523249  1922 solver.cpp:404]     Test net output #0: accuracy = 0.192294\nI1208 01:30:38.523625  1922 solver.cpp:404]     Test net output #1: loss = 16.6117 (* 1 = 16.6117 loss)\nI1208 01:30:42.462294  1922 solver.cpp:228] Iteration 12400, loss = 17.6085\nI1208 01:30:42.462337  1922 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1208 01:30:42.462354  1922 solver.cpp:244]     Train net output #1: loss = 17.6085 (* 1 = 17.6085 loss)\nI1208 01:30:42.693856  1922 sgd_solver.cpp:166] Iteration 12400, lr = 1.86\nI1208 01:30:42.703858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03434 > 2) by scale factor 0.659121\nI1208 01:30:46.886559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54712 > 2) by scale factor 0.563838\nI1208 01:30:51.068975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92481 > 2) by scale factor 0.509579\nI1208 01:30:55.252024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83719 > 2) by scale factor 0.521214\nI1208 01:30:59.435343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22416 > 2) by scale factor 0.620317\nI1208 01:31:03.618624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53194 > 2) by scale factor 0.789909\nI1208 01:31:07.802194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15019 > 2) by scale factor 0.93015\nI1208 01:31:11.984051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26108 > 2) by scale factor 0.613294\nI1208 01:31:16.168298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00404 > 2) by scale factor 0.499495\nI1208 01:31:20.351296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80106 > 2) by scale factor 0.52617\nI1208 01:31:24.533620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33682 > 2) by scale factor 0.599374\nI1208 01:31:28.716372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39085 > 2) by scale factor 0.589822\nI1208 01:31:32.899255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15832 > 2) by scale factor 0.633248\nI1208 01:31:37.082042  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03813 > 2) by scale factor 0.495279\nI1208 01:31:41.265683  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21492 > 2) by scale factor 0.902967\nI1208 01:31:45.448667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78626 > 2) by scale factor 0.717809\nI1208 01:31:49.631413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46626 > 2) by scale factor 0.576991\nI1208 01:31:53.814618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10187 > 2) by scale factor 0.487583\nI1208 01:31:57.996618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50356 > 2) by scale factor 0.798863\nI1208 01:32:02.178442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07125 > 2) by scale factor 0.965602\nI1208 01:32:06.361701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74979 > 2) by scale factor 0.727328\nI1208 01:32:10.544762  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9313 > 2) by scale factor 0.682292\nI1208 01:32:18.907461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49319 > 2) by scale factor 0.802184\nI1208 01:32:23.090554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78649 > 2) by scale factor 0.717749\nI1208 01:32:27.272783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86407 > 2) by scale factor 0.698308\nI1208 01:32:31.455255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6371 > 2) by scale factor 0.758408\nI1208 01:32:35.637960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54603 > 2) by scale factor 0.785536\nI1208 01:32:39.820132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84924 > 2) by scale factor 0.519582\nI1208 01:32:44.001866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42246 > 2) by scale factor 0.584374\nI1208 01:32:48.184741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3548 > 2) by scale factor 0.849328\nI1208 01:32:52.367449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21882 > 2) by scale factor 0.621345\nI1208 01:32:56.550498  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71776 > 2) by scale factor 0.7359\nI1208 01:33:00.733852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55803 > 2) by scale factor 0.781851\nI1208 01:33:04.916965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41828 > 2) by scale factor 0.452665\nI1208 01:33:09.100080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90052 > 2) by scale factor 0.689532\nI1208 01:33:13.281919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28232 > 2) by scale factor 0.467037\nI1208 01:33:17.465639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9576 > 2) by scale factor 0.676223\nI1208 01:33:21.648084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02478 > 2) by scale factor 0.496921\nI1208 01:33:25.830529  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70559 > 2) by scale factor 0.739212\nI1208 01:33:34.193320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69956 > 2) by scale factor 0.540604\nI1208 01:33:38.376889  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64772 > 2) by scale factor 0.548287\nI1208 01:33:42.558993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44445 > 2) by scale factor 0.449999\nI1208 01:33:50.922206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39008 > 2) by scale factor 0.836794\nI1208 01:33:55.105640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39776 > 2) by scale factor 0.834113\nI1208 01:33:59.287277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49186 > 2) by scale factor 0.802613\nI1208 01:34:03.471262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24167 > 2) by scale factor 0.471513\nI1208 01:34:07.653897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03838 > 2) by scale factor 0.658246\nI1208 01:34:11.836833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95437 > 2) by scale factor 0.676963\nI1208 01:34:16.020313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11351 > 2) by scale factor 0.642361\nI1208 01:34:20.202968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20996 > 2) by scale factor 0.904995\nI1208 01:34:24.386019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85363 > 2) by scale factor 0.700861\nI1208 01:34:28.568363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78955 > 2) by scale factor 0.716963\nI1208 01:34:32.751111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91563 > 2) by scale factor 0.685958\nI1208 01:34:36.934101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68683 > 2) by scale factor 0.542472\nI1208 01:34:41.116835  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6226 > 2) by scale factor 0.762603\nI1208 01:34:45.299492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66073 > 2) by scale factor 0.546339\nI1208 01:34:49.482450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76842 > 2) by scale factor 0.530727\nI1208 01:34:53.664147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74325 > 2) by scale factor 0.534295\nI1208 01:34:57.846844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4654 > 2) by scale factor 0.577135\nI1208 01:35:02.030697  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9868 > 2) by scale factor 0.669612\nI1208 01:35:06.213524  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4788 > 2) by scale factor 0.806841\nI1208 01:35:10.396565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6838 > 2) by scale factor 0.745212\nI1208 01:35:14.578914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84944 > 2) by scale factor 0.701891\nI1208 01:35:18.762331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79256 > 2) by scale factor 0.716189\nI1208 01:35:22.944845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38783 > 2) by scale factor 0.83758\nI1208 01:35:27.128162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4249 > 2) by scale factor 0.824777\nI1208 01:35:31.309900  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56836 > 2) by scale factor 0.778708\nI1208 01:35:35.493407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20328 > 2) by scale factor 0.907739\nI1208 01:35:39.675976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88847 > 2) by scale factor 0.692408\nI1208 01:35:43.858507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71888 > 2) by scale factor 0.735598\nI1208 01:35:48.040989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50185 > 2) by scale factor 0.799409\nI1208 01:35:52.224047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13322 > 2) by scale factor 0.937548\nI1208 01:35:56.405238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29923 > 2) by scale factor 0.606203\nI1208 01:36:00.589021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28728 > 2) by scale factor 0.608406\nI1208 01:36:08.951463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86612 > 2) by scale factor 0.697808\nI1208 01:36:13.135262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94156 > 2) by scale factor 0.507413\nI1208 01:36:17.318295  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33773 > 2) by scale factor 0.855531\nI1208 01:36:21.499819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57819 > 2) by scale factor 0.775739\nI1208 01:36:25.682544  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06638 > 2) by scale factor 0.967875\nI1208 01:36:29.864461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39438 > 2) by scale factor 0.83529\nI1208 01:36:34.046958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52683 > 2) by scale factor 0.791506\nI1208 01:36:38.229079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77312 > 2) by scale factor 0.72121\nI1208 01:36:46.590440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19416 > 2) by scale factor 0.911509\nI1208 01:36:50.773345  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05584 > 2) by scale factor 0.654485\nI1208 01:36:59.136327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68813 > 2) by scale factor 0.54228\nI1208 01:37:03.320600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75661 > 2) by scale factor 0.725529\nI1208 01:37:07.502846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24996 > 2) by scale factor 0.615392\nI1208 01:37:11.685449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60092 > 2) by scale factor 0.768959\nI1208 01:37:15.867429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02988 > 2) by scale factor 0.660092\nI1208 01:37:20.049540  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79525 > 2) by scale factor 0.715499\nI1208 01:37:24.233705  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41576 > 2) by scale factor 0.827898\nI1208 01:37:28.415081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98033 > 2) by scale factor 0.502471\nI1208 01:37:32.596619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04579 > 2) by scale factor 0.977617\nI1208 01:37:36.778826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77844 > 2) by scale factor 0.719827\nI1208 01:37:36.791002  1922 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1208 01:40:14.242744  1922 solver.cpp:404]     Test net output #0: accuracy = 0.167647\nI1208 01:40:14.243170  1922 solver.cpp:404]     Test net output #1: loss = 18.1615 (* 1 = 18.1615 loss)\nI1208 01:40:18.181865  1922 solver.cpp:228] Iteration 12500, loss = 15.5783\nI1208 01:40:18.181905  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1208 01:40:18.181921  1922 solver.cpp:244]     Train net output #1: loss = 15.5783 (* 1 = 15.5783 loss)\nI1208 01:40:18.409787  1922 sgd_solver.cpp:166] Iteration 12500, lr = 1.875\nI1208 01:40:18.419991  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53996 > 2) by scale factor 0.564979\nI1208 01:40:22.599527  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08213 > 2) by scale factor 0.648902\nI1208 01:40:26.779255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69476 > 2) by scale factor 0.541308\nI1208 01:40:30.958061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33259 > 2) by scale factor 0.600133\nI1208 01:40:35.138599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21032 > 2) by scale factor 0.475023\nI1208 01:40:39.318492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82768 > 2) by scale factor 0.414278\nI1208 01:40:43.498528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.723 > 2) by scale factor 0.537201\nI1208 01:40:47.677718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85041 > 2) by scale factor 0.519426\nI1208 01:40:51.857333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88925 > 2) by scale factor 0.514238\nI1208 01:40:56.036309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94966 > 2) by scale factor 0.678045\nI1208 01:41:00.215636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85329 > 2) by scale factor 0.700944\nI1208 01:41:04.394127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14909 > 2) by scale factor 0.635103\nI1208 01:41:08.574724  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62789 > 2) by scale factor 0.761067\nI1208 01:41:12.754884  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54917 > 2) by scale factor 0.784568\nI1208 01:41:16.934927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56353 > 2) by scale factor 0.56124\nI1208 01:41:21.114756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87706 > 2) by scale factor 0.695154\nI1208 01:41:25.295346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10513 > 2) by scale factor 0.487196\nI1208 01:41:29.474833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4829 > 2) by scale factor 0.574235\nI1208 01:41:33.654403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70025 > 2) by scale factor 0.740672\nI1208 01:41:42.013072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35876 > 2) by scale factor 0.847902\nI1208 01:41:46.193608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37805 > 2) by scale factor 0.841026\nI1208 01:41:50.372952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48406 > 2) by scale factor 0.574044\nI1208 01:41:54.552970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19194 > 2) by scale factor 0.626578\nI1208 01:41:58.734004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93872 > 2) by scale factor 0.680567\nI1208 01:42:02.912675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61566 > 2) by scale factor 0.764626\nI1208 01:42:07.091919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02275 > 2) by scale factor 0.661649\nI1208 01:42:11.272584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86372 > 2) by scale factor 0.517636\nI1208 01:42:15.452337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80608 > 2) by scale factor 0.712738\nI1208 01:42:19.631552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73283 > 2) by scale factor 0.731843\nI1208 01:42:23.811141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53017 > 2) by scale factor 0.566546\nI1208 01:42:27.990521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.352 > 2) by scale factor 0.596658\nI1208 01:42:32.172119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1152 > 2) by scale factor 0.642014\nI1208 01:42:36.352443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8351 > 2) by scale factor 0.705441\nI1208 01:42:40.532264  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.355 > 2) by scale factor 0.596125\nI1208 01:42:44.711664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32107 > 2) by scale factor 0.462848\nI1208 01:42:48.890152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38712 > 2) by scale factor 0.837828\nI1208 01:42:53.070271  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3987 > 2) by scale factor 0.833786\nI1208 01:42:57.249207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07027 > 2) by scale factor 0.966058\nI1208 01:43:01.428205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95992 > 2) by scale factor 0.675693\nI1208 01:43:05.608379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18434 > 2) by scale factor 0.477972\nI1208 01:43:09.788241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60172 > 2) by scale factor 0.768721\nI1208 01:43:13.967571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47997 > 2) by scale factor 0.806461\nI1208 01:43:18.146944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59111 > 2) by scale factor 0.55693\nI1208 01:43:22.326270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38337 > 2) by scale factor 0.591126\nI1208 01:43:26.505218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17487 > 2) by scale factor 0.919597\nI1208 01:43:30.683830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48204 > 2) by scale factor 0.574375\nI1208 01:43:34.862350  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05387 > 2) by scale factor 0.493355\nI1208 01:43:39.042289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69501 > 2) by scale factor 0.742111\nI1208 01:43:43.221477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9247 > 2) by scale factor 0.683831\nI1208 01:43:47.400434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56886 > 2) by scale factor 0.778557\nI1208 01:43:51.580559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53061 > 2) by scale factor 0.790323\nI1208 01:43:55.759023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54698 > 2) by scale factor 0.56386\nI1208 01:43:59.938247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10748 > 2) by scale factor 0.643609\nI1208 01:44:04.117420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72048 > 2) by scale factor 0.735165\nI1208 01:44:08.297618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04702 > 2) by scale factor 0.656378\nI1208 01:44:12.477850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20604 > 2) by scale factor 0.906603\nI1208 01:44:16.656802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89795 > 2) by scale factor 0.690142\nI1208 01:44:20.837298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19909 > 2) by scale factor 0.476294\nI1208 01:44:25.016566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5226 > 2) by scale factor 0.792833\nI1208 01:44:29.197402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02631 > 2) by scale factor 0.660871\nI1208 01:44:33.376050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38539 > 2) by scale factor 0.838436\nI1208 01:44:37.555274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77434 > 2) by scale factor 0.529895\nI1208 01:44:41.734835  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20914 > 2) by scale factor 0.62322\nI1208 01:44:45.915133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61136 > 2) by scale factor 0.553808\nI1208 01:44:50.095439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41619 > 2) by scale factor 0.585447\nI1208 01:44:54.274652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28297 > 2) by scale factor 0.609204\nI1208 01:44:58.454073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78082 > 2) by scale factor 0.418339\nI1208 01:45:02.634060  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12781 > 2) by scale factor 0.484519\nI1208 01:45:06.813194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38727 > 2) by scale factor 0.590446\nI1208 01:45:10.992332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14753 > 2) by scale factor 0.635419\nI1208 01:45:15.172066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72087 > 2) by scale factor 0.735058\nI1208 01:45:19.352507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13432 > 2) by scale factor 0.937069\nI1208 01:45:23.532244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23361 > 2) by scale factor 0.618503\nI1208 01:45:27.711422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98034 > 2) by scale factor 0.671065\nI1208 01:45:31.892040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62484 > 2) by scale factor 0.761951\nI1208 01:45:36.071420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09348 > 2) by scale factor 0.64652\nI1208 01:45:40.249470  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06278 > 2) by scale factor 0.653002\nI1208 01:45:44.429205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46556 > 2) by scale factor 0.447872\nI1208 01:45:48.607789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7281 > 2) by scale factor 0.536466\nI1208 01:45:52.786747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15122 > 2) by scale factor 0.481786\nI1208 01:45:56.966118  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88817 > 2) by scale factor 0.692481\nI1208 01:46:01.146044  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92921 > 2) by scale factor 0.682778\nI1208 01:46:05.325215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24793 > 2) by scale factor 0.615776\nI1208 01:46:09.504591  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47326 > 2) by scale factor 0.808648\nI1208 01:46:13.684497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03031 > 2) by scale factor 0.985072\nI1208 01:46:17.864142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8278 > 2) by scale factor 0.707264\nI1208 01:46:26.220747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35036 > 2) by scale factor 0.850932\nI1208 01:46:38.755913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47208 > 2) by scale factor 0.809034\nI1208 01:46:42.936127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76129 > 2) by scale factor 0.724299\nI1208 01:46:47.114528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4771 > 2) by scale factor 0.575192\nI1208 01:46:51.293808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54428 > 2) by scale factor 0.564289\nI1208 01:46:55.472977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81795 > 2) by scale factor 0.709735\nI1208 01:46:59.652657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0863 > 2) by scale factor 0.648026\nI1208 01:47:03.833238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25599 > 2) by scale factor 0.614253\nI1208 01:47:08.013180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32982 > 2) by scale factor 0.600633\nI1208 01:47:12.192718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19256 > 2) by scale factor 0.477035\nI1208 01:47:12.204659  1922 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1208 01:49:50.245759  1922 solver.cpp:404]     Test net output #0: accuracy = 0.185588\nI1208 01:49:50.246181  1922 solver.cpp:404]     Test net output #1: loss = 8.33398 (* 1 = 8.33398 loss)\nI1208 01:49:54.185886  1922 solver.cpp:228] Iteration 12600, loss = 9.44614\nI1208 01:49:54.185927  1922 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1208 01:49:54.185945  1922 solver.cpp:244]     Train net output #1: loss = 9.44614 (* 1 = 9.44614 loss)\nI1208 01:49:54.412999  1922 sgd_solver.cpp:166] Iteration 12600, lr = 1.89\nI1208 01:49:54.423225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64531 > 2) by scale factor 0.756056\nI1208 01:49:58.603104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28734 > 2) by scale factor 0.874379\nI1208 01:50:02.782459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25772 > 2) by scale factor 0.885848\nI1208 01:50:06.961146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65731 > 2) by scale factor 0.752642\nI1208 01:50:11.139809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25503 > 2) by scale factor 0.886905\nI1208 01:50:19.495167  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27049 > 2) by scale factor 0.611528\nI1208 01:50:23.673995  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51059 > 2) by scale factor 0.443401\nI1208 01:50:27.853762  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87613 > 2) by scale factor 0.515979\nI1208 01:50:32.033082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82663 > 2) by scale factor 0.522653\nI1208 01:50:36.211951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92588 > 2) by scale factor 0.50944\nI1208 01:50:40.390844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03034 > 2) by scale factor 0.496236\nI1208 01:50:44.569200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57868 > 2) by scale factor 0.558865\nI1208 01:50:48.747175  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04192 > 2) by scale factor 0.65748\nI1208 01:50:52.925371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16565 > 2) by scale factor 0.923509\nI1208 01:50:57.105391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59608 > 2) by scale factor 0.770393\nI1208 01:51:01.284109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45239 > 2) by scale factor 0.579308\nI1208 01:51:05.461241  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34503 > 2) by scale factor 0.597902\nI1208 01:51:09.639902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90254 > 2) by scale factor 0.512486\nI1208 01:51:13.818848  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42313 > 2) by scale factor 0.58426\nI1208 01:51:17.997257  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38312 > 2) by scale factor 0.456296\nI1208 01:51:22.174916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77451 > 2) by scale factor 0.52987\nI1208 01:51:26.352147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01432 > 2) by scale factor 0.498216\nI1208 01:51:30.530506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47283 > 2) by scale factor 0.575899\nI1208 01:51:34.708039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30762 > 2) by scale factor 0.464293\nI1208 01:51:38.885412  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97001 > 2) by scale factor 0.673398\nI1208 01:51:43.063679  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23942 > 2) by scale factor 0.617394\nI1208 01:51:47.242254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24683 > 2) by scale factor 0.47094\nI1208 01:51:51.421389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45573 > 2) by scale factor 0.44886\nI1208 01:51:55.600258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94869 > 2) by scale factor 0.506497\nI1208 01:51:59.778650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05984 > 2) by scale factor 0.970948\nI1208 01:52:03.957126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65402 > 2) by scale factor 0.753573\nI1208 01:52:08.136191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29758 > 2) by scale factor 0.465378\nI1208 01:52:12.314743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55296 > 2) by scale factor 0.783404\nI1208 01:52:16.493568  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64031 > 2) by scale factor 0.757488\nI1208 01:52:24.847668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70864 > 2) by scale factor 0.738377\nI1208 01:52:29.026393  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69123 > 2) by scale factor 0.743153\nI1208 01:52:33.205039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34902 > 2) by scale factor 0.597189\nI1208 01:52:37.382901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53151 > 2) by scale factor 0.56633\nI1208 01:52:41.561815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91078 > 2) by scale factor 0.687101\nI1208 01:52:45.739958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3928 > 2) by scale factor 0.455291\nI1208 01:52:49.919303  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11307 > 2) by scale factor 0.486254\nI1208 01:52:54.097827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84478 > 2) by scale factor 0.703042\nI1208 01:52:58.276448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04218 > 2) by scale factor 0.657424\nI1208 01:53:02.455574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35913 > 2) by scale factor 0.595393\nI1208 01:53:06.633133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28915 > 2) by scale factor 0.60806\nI1208 01:53:10.811854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30547 > 2) by scale factor 0.605057\nI1208 01:53:14.990459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50936 > 2) by scale factor 0.443522\nI1208 01:53:19.169652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65043 > 2) by scale factor 0.430068\nI1208 01:53:23.347806  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99684 > 2) by scale factor 0.66737\nI1208 01:53:27.526473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58991 > 2) by scale factor 0.772228\nI1208 01:53:31.705008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60733 > 2) by scale factor 0.554426\nI1208 01:53:35.883956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7633 > 2) by scale factor 0.723773\nI1208 01:53:40.062186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4382 > 2) by scale factor 0.820277\nI1208 01:53:44.239656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77579 > 2) by scale factor 0.720516\nI1208 01:53:48.419034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71272 > 2) by scale factor 0.737268\nI1208 01:53:52.597461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18057 > 2) by scale factor 0.628819\nI1208 01:53:56.775224  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26366 > 2) by scale factor 0.46908\nI1208 01:54:00.953256  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74272 > 2) by scale factor 0.53437\nI1208 01:54:05.131697  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84175 > 2) by scale factor 0.703793\nI1208 01:54:09.309136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85826 > 2) by scale factor 0.518369\nI1208 01:54:13.487607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97135 > 2) by scale factor 0.673096\nI1208 01:54:17.666352  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8507 > 2) by scale factor 0.519387\nI1208 01:54:21.845481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86169 > 2) by scale factor 0.517908\nI1208 01:54:26.023275  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06669 > 2) by scale factor 0.4918\nI1208 01:54:30.201086  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0891 > 2) by scale factor 0.647438\nI1208 01:54:34.380210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84717 > 2) by scale factor 0.702452\nI1208 01:54:38.558725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58785 > 2) by scale factor 0.772841\nI1208 01:54:42.736887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13351 > 2) by scale factor 0.638262\nI1208 01:54:46.914762  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25163 > 2) by scale factor 0.888246\nI1208 01:54:51.094336  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12501 > 2) by scale factor 0.639999\nI1208 01:54:55.274013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84882 > 2) by scale factor 0.51964\nI1208 01:54:59.451519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65148 > 2) by scale factor 0.547723\nI1208 01:55:03.628768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19279 > 2) by scale factor 0.626411\nI1208 01:55:07.808394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72519 > 2) by scale factor 0.733894\nI1208 01:55:11.985543  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95709 > 2) by scale factor 0.676339\nI1208 01:55:16.163327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77821 > 2) by scale factor 0.719889\nI1208 01:55:20.343057  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77083 > 2) by scale factor 0.721804\nI1208 01:55:24.521173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11178 > 2) by scale factor 0.947068\nI1208 01:55:28.700143  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30161 > 2) by scale factor 0.605765\nI1208 01:55:32.878458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27684 > 2) by scale factor 0.610344\nI1208 01:55:37.056859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57842 > 2) by scale factor 0.558906\nI1208 01:55:41.235127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66625 > 2) by scale factor 0.750116\nI1208 01:55:45.412796  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97931 > 2) by scale factor 0.671297\nI1208 01:55:49.591195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64582 > 2) by scale factor 0.755909\nI1208 01:55:53.770139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28267 > 2) by scale factor 0.876168\nI1208 01:55:57.947847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11357 > 2) by scale factor 0.486195\nI1208 01:56:02.125883  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77402 > 2) by scale factor 0.529938\nI1208 01:56:06.304332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55852 > 2) by scale factor 0.781701\nI1208 01:56:10.482197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45601 > 2) by scale factor 0.814327\nI1208 01:56:14.661064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36199 > 2) by scale factor 0.846744\nI1208 01:56:18.838515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71075 > 2) by scale factor 0.737803\nI1208 01:56:23.016775  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29644 > 2) by scale factor 0.870912\nI1208 01:56:27.195520  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75385 > 2) by scale factor 0.532786\nI1208 01:56:31.373638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35259 > 2) by scale factor 0.459497\nI1208 01:56:35.552315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64682 > 2) by scale factor 0.755624\nI1208 01:56:39.731025  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71728 > 2) by scale factor 0.736031\nI1208 01:56:43.907847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88215 > 2) by scale factor 0.693926\nI1208 01:56:48.086328  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89242 > 2) by scale factor 0.691463\nI1208 01:56:48.098155  1922 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1208 01:59:26.329995  1922 solver.cpp:404]     Test net output #0: accuracy = 0.18953\nI1208 01:59:26.330446  1922 solver.cpp:404]     Test net output #1: loss = 9.17062 (* 1 = 9.17062 loss)\nI1208 01:59:30.268456  1922 solver.cpp:228] Iteration 12700, loss = 10.2444\nI1208 01:59:30.268494  1922 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1208 01:59:30.268519  1922 solver.cpp:244]     Train net output #1: loss = 10.2444 (* 1 = 10.2444 loss)\nI1208 01:59:30.499030  1922 sgd_solver.cpp:166] Iteration 12700, lr = 1.905\nI1208 01:59:30.509240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09258 > 2) by scale factor 0.955758\nI1208 01:59:38.868721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05568 > 2) by scale factor 0.972916\nI1208 01:59:43.049405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44224 > 2) by scale factor 0.81892\nI1208 01:59:47.230782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26705 > 2) by scale factor 0.612172\nI1208 01:59:51.411373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90684 > 2) by scale factor 0.511923\nI1208 01:59:55.591271  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64268 > 2) by scale factor 0.549046\nI1208 01:59:59.771543  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82555 > 2) by scale factor 0.707826\nI1208 02:00:03.953495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05242 > 2) by scale factor 0.655218\nI1208 02:00:08.134058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73404 > 2) by scale factor 0.535614\nI1208 02:00:12.314920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40886 > 2) by scale factor 0.830267\nI1208 02:00:16.494968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27198 > 2) by scale factor 0.61125\nI1208 02:00:20.676122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36624 > 2) by scale factor 0.845221\nI1208 02:00:24.856994  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14959 > 2) by scale factor 0.635003\nI1208 02:00:29.037833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86559 > 2) by scale factor 0.517385\nI1208 02:00:33.217733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50989 > 2) by scale factor 0.569818\nI1208 02:00:37.399364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42701 > 2) by scale factor 0.583598\nI1208 02:00:41.580979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03566 > 2) by scale factor 0.495582\nI1208 02:00:45.761910  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84188 > 2) by scale factor 0.703758\nI1208 02:00:49.942649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48476 > 2) by scale factor 0.445955\nI1208 02:00:54.122958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03035 > 2) by scale factor 0.496235\nI1208 02:00:58.302798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3835 > 2) by scale factor 0.839102\nI1208 02:01:02.482774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06977 > 2) by scale factor 0.966291\nI1208 02:01:06.663013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79282 > 2) by scale factor 0.716123\nI1208 02:01:10.843670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51318 > 2) by scale factor 0.795803\nI1208 02:01:15.024369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40196 > 2) by scale factor 0.587897\nI1208 02:01:19.204774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89802 > 2) by scale factor 0.690127\nI1208 02:01:23.385509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71988 > 2) by scale factor 0.735326\nI1208 02:01:27.566526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82381 > 2) by scale factor 0.708263\nI1208 02:01:31.747740  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50578 > 2) by scale factor 0.570486\nI1208 02:01:35.928292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88088 > 2) by scale factor 0.694233\nI1208 02:01:40.108580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67959 > 2) by scale factor 0.746383\nI1208 02:01:44.290320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4919 > 2) by scale factor 0.802602\nI1208 02:01:48.470937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45768 > 2) by scale factor 0.813776\nI1208 02:01:52.652041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75564 > 2) by scale factor 0.532532\nI1208 02:01:56.832335  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31554 > 2) by scale factor 0.603219\nI1208 02:02:01.012137  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35666 > 2) by scale factor 0.848658\nI1208 02:02:05.192689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04102 > 2) by scale factor 0.657674\nI1208 02:02:09.375061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37791 > 2) by scale factor 0.841075\nI1208 02:02:13.556394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04718 > 2) by scale factor 0.656344\nI1208 02:02:17.737426  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42105 > 2) by scale factor 0.584616\nI1208 02:02:21.919282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07211 > 2) by scale factor 0.491146\nI1208 02:02:26.099511  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07527 > 2) by scale factor 0.963729\nI1208 02:02:30.282763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66755 > 2) by scale factor 0.545323\nI1208 02:02:34.463587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32346 > 2) by scale factor 0.860785\nI1208 02:02:38.643077  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34222 > 2) by scale factor 0.85389\nI1208 02:02:42.823732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32422 > 2) by scale factor 0.860504\nI1208 02:02:47.004892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65919 > 2) by scale factor 0.546569\nI1208 02:02:51.186308  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.97085 > 2) by scale factor 0.402346\nI1208 02:02:55.367234  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49179 > 2) by scale factor 0.572773\nI1208 02:02:59.548203  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82373 > 2) by scale factor 0.52305\nI1208 02:03:03.727963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04786 > 2) by scale factor 0.656199\nI1208 02:03:07.908232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8069 > 2) by scale factor 0.525362\nI1208 02:03:12.087960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69069 > 2) by scale factor 0.743303\nI1208 02:03:16.267383  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96976 > 2) by scale factor 0.673455\nI1208 02:03:20.447332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46223 > 2) by scale factor 0.577662\nI1208 02:03:24.628075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83514 > 2) by scale factor 0.521494\nI1208 02:03:28.808800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15366 > 2) by scale factor 0.634184\nI1208 02:03:32.988795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0005 > 2) by scale factor 0.666556\nI1208 02:03:37.169431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04608 > 2) by scale factor 0.977481\nI1208 02:03:41.350823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33417 > 2) by scale factor 0.599849\nI1208 02:03:45.531322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92023 > 2) by scale factor 0.684878\nI1208 02:03:49.712153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7051 > 2) by scale factor 0.739344\nI1208 02:03:53.892993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36245 > 2) by scale factor 0.458458\nI1208 02:03:58.074385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42951 > 2) by scale factor 0.451517\nI1208 02:04:02.254935  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19901 > 2) by scale factor 0.476303\nI1208 02:04:06.436733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96095 > 2) by scale factor 0.675458\nI1208 02:04:10.618080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13936 > 2) by scale factor 0.637072\nI1208 02:04:14.798396  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29822 > 2) by scale factor 0.465309\nI1208 02:04:18.978947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17531 > 2) by scale factor 0.62986\nI1208 02:04:23.160667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83727 > 2) by scale factor 0.521204\nI1208 02:04:27.340559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37871 > 2) by scale factor 0.591942\nI1208 02:04:31.521159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67441 > 2) by scale factor 0.747827\nI1208 02:04:39.878967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82519 > 2) by scale factor 0.52285\nI1208 02:04:44.059458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90352 > 2) by scale factor 0.68882\nI1208 02:04:48.240413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16508 > 2) by scale factor 0.631895\nI1208 02:04:52.420758  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22027 > 2) by scale factor 0.621067\nI1208 02:04:56.600821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62153 > 2) by scale factor 0.762912\nI1208 02:05:00.781755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8898 > 2) by scale factor 0.692089\nI1208 02:05:04.962702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17248 > 2) by scale factor 0.920607\nI1208 02:05:09.143379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61838 > 2) by scale factor 0.552734\nI1208 02:05:13.324406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86218 > 2) by scale factor 0.517842\nI1208 02:05:17.504813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4705 > 2) by scale factor 0.809554\nI1208 02:05:21.684406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10629 > 2) by scale factor 0.487058\nI1208 02:05:25.864732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35678 > 2) by scale factor 0.848615\nI1208 02:05:30.044420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14328 > 2) by scale factor 0.933148\nI1208 02:05:34.225695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49241 > 2) by scale factor 0.57267\nI1208 02:05:38.406479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.26193 > 2) by scale factor 0.884201\nI1208 02:05:42.585922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7286 > 2) by scale factor 0.732978\nI1208 02:05:46.765693  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48881 > 2) by scale factor 0.803597\nI1208 02:05:50.946133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65793 > 2) by scale factor 0.752465\nI1208 02:05:55.126669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48259 > 2) by scale factor 0.80561\nI1208 02:05:59.307878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07944 > 2) by scale factor 0.649468\nI1208 02:06:03.487789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05417 > 2) by scale factor 0.493319\nI1208 02:06:07.668265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.79713 > 2) by scale factor 0.416916\nI1208 02:06:11.848593  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63092 > 2) by scale factor 0.550825\nI1208 02:06:16.029561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2232 > 2) by scale factor 0.620501\nI1208 02:06:20.210633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75958 > 2) by scale factor 0.724748\nI1208 02:06:24.390667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46504 > 2) by scale factor 0.811346\nI1208 02:06:24.402606  1922 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1208 02:09:02.453946  1922 solver.cpp:404]     Test net output #0: accuracy = 0.156588\nI1208 02:09:02.454393  1922 solver.cpp:404]     Test net output #1: loss = 14.9497 (* 1 = 14.9497 loss)\nI1208 02:09:06.393596  1922 solver.cpp:228] Iteration 12800, loss = 15.5819\nI1208 02:09:06.393638  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1208 02:09:06.393662  1922 solver.cpp:244]     Train net output #1: loss = 15.5819 (* 1 = 15.5819 loss)\nI1208 02:09:06.625449  1922 sgd_solver.cpp:166] Iteration 12800, lr = 1.92\nI1208 02:09:06.635653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05087 > 2) by scale factor 0.493721\nI1208 02:09:10.815906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42417 > 2) by scale factor 0.584083\nI1208 02:09:14.996440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32055 > 2) by scale factor 0.60231\nI1208 02:09:19.176590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33064 > 2) by scale factor 0.461825\nI1208 02:09:23.358165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12575 > 2) by scale factor 0.639847\nI1208 02:09:27.537956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1705 > 2) by scale factor 0.630815\nI1208 02:09:31.719139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96039 > 2) by scale factor 0.675586\nI1208 02:09:35.898998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59935 > 2) by scale factor 0.555656\nI1208 02:09:40.080482  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78836 > 2) by scale factor 0.41768\nI1208 02:09:44.261260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43412 > 2) by scale factor 0.821653\nI1208 02:09:48.441993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.93939 > 2) by scale factor 0.404908\nI1208 02:09:52.622530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34477 > 2) by scale factor 0.597949\nI1208 02:09:56.802687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2404 > 2) by scale factor 0.617208\nI1208 02:10:00.983099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.135 > 2) by scale factor 0.637959\nI1208 02:10:05.164590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02124 > 2) by scale factor 0.66198\nI1208 02:10:09.345832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90119 > 2) by scale factor 0.689373\nI1208 02:10:13.527163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75551 > 2) by scale factor 0.725817\nI1208 02:10:17.707962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.479 > 2) by scale factor 0.806776\nI1208 02:10:21.888447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68002 > 2) by scale factor 0.543475\nI1208 02:10:26.069315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74079 > 2) by scale factor 0.729716\nI1208 02:10:30.249475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43754 > 2) by scale factor 0.581812\nI1208 02:10:34.430048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29638 > 2) by scale factor 0.606725\nI1208 02:10:38.610512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67316 > 2) by scale factor 0.748179\nI1208 02:10:42.790860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25263 > 2) by scale factor 0.614887\nI1208 02:10:46.971252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39205 > 2) by scale factor 0.589614\nI1208 02:10:51.151659  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56619 > 2) by scale factor 0.779366\nI1208 02:10:55.332571  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81206 > 2) by scale factor 0.711224\nI1208 02:10:59.513154  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01557 > 2) by scale factor 0.663224\nI1208 02:11:03.693949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16063 > 2) by scale factor 0.480696\nI1208 02:11:07.876044  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21425 > 2) by scale factor 0.62223\nI1208 02:11:12.056315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71014 > 2) by scale factor 0.737969\nI1208 02:11:16.235915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94184 > 2) by scale factor 0.507378\nI1208 02:11:20.416867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46178 > 2) by scale factor 0.81242\nI1208 02:11:24.596838  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61729 > 2) by scale factor 0.76415\nI1208 02:11:28.777387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72332 > 2) by scale factor 0.537156\nI1208 02:11:32.958139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37894 > 2) by scale factor 0.591902\nI1208 02:11:37.138813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34682 > 2) by scale factor 0.460106\nI1208 02:11:49.676897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34917 > 2) by scale factor 0.851365\nI1208 02:11:53.857291  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08497 > 2) by scale factor 0.959246\nI1208 02:11:58.036962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71883 > 2) by scale factor 0.537803\nI1208 02:12:02.216866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21795 > 2) by scale factor 0.621513\nI1208 02:12:10.575633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59132 > 2) by scale factor 0.771806\nI1208 02:12:14.756757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09318 > 2) by scale factor 0.955482\nI1208 02:12:18.936924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48755 > 2) by scale factor 0.573469\nI1208 02:12:23.117056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17052 > 2) by scale factor 0.630811\nI1208 02:12:27.298125  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70033 > 2) by scale factor 0.540493\nI1208 02:12:31.478509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40718 > 2) by scale factor 0.586996\nI1208 02:12:35.658000  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74562 > 2) by scale factor 0.728434\nI1208 02:12:39.838562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.23376 > 2) by scale factor 0.895352\nI1208 02:12:44.019338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17439 > 2) by scale factor 0.630042\nI1208 02:12:48.198700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4132 > 2) by scale factor 0.828774\nI1208 02:12:52.378648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89758 > 2) by scale factor 0.690232\nI1208 02:12:56.558907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39854 > 2) by scale factor 0.588488\nI1208 02:13:00.739317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29014 > 2) by scale factor 0.607878\nI1208 02:13:04.919848  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.23307 > 2) by scale factor 0.382185\nI1208 02:13:09.100983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3334 > 2) by scale factor 0.599987\nI1208 02:13:13.281014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21061 > 2) by scale factor 0.904728\nI1208 02:13:17.461176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1797 > 2) by scale factor 0.478503\nI1208 02:13:21.642110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61876 > 2) by scale factor 0.552676\nI1208 02:13:25.821526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06325 > 2) by scale factor 0.492217\nI1208 02:13:30.001408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08273 > 2) by scale factor 0.648777\nI1208 02:13:34.182221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.1558 > 2) by scale factor 0.387913\nI1208 02:13:38.362963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71821 > 2) by scale factor 0.735779\nI1208 02:13:42.543339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79888 > 2) by scale factor 0.71457\nI1208 02:13:46.724112  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33925 > 2) by scale factor 0.598936\nI1208 02:13:50.904316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4391 > 2) by scale factor 0.581547\nI1208 02:13:55.083739  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29006 > 2) by scale factor 0.607892\nI1208 02:13:59.265830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89459 > 2) by scale factor 0.690943\nI1208 02:14:03.446415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62815 > 2) by scale factor 0.551246\nI1208 02:14:07.626732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69525 > 2) by scale factor 0.541236\nI1208 02:14:11.806576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67206 > 2) by scale factor 0.748485\nI1208 02:14:15.986116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59769 > 2) by scale factor 0.555912\nI1208 02:14:20.166421  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22476 > 2) by scale factor 0.898973\nI1208 02:14:24.345892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03038 > 2) by scale factor 0.496232\nI1208 02:14:28.525732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64804 > 2) by scale factor 0.54824\nI1208 02:14:32.707363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75015 > 2) by scale factor 0.533312\nI1208 02:14:36.887356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71387 > 2) by scale factor 0.736955\nI1208 02:14:41.069110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09545 > 2) by scale factor 0.488347\nI1208 02:14:45.249277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43719 > 2) by scale factor 0.820617\nI1208 02:14:49.428742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75882 > 2) by scale factor 0.724948\nI1208 02:14:53.608057  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30869 > 2) by scale factor 0.604469\nI1208 02:14:57.787642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34904 > 2) by scale factor 0.597186\nI1208 02:15:01.967201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87129 > 2) by scale factor 0.696552\nI1208 02:15:06.147763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88197 > 2) by scale factor 0.693969\nI1208 02:15:10.327497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07627 > 2) by scale factor 0.963265\nI1208 02:15:14.508437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75996 > 2) by scale factor 0.724648\nI1208 02:15:18.687988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35595 > 2) by scale factor 0.595956\nI1208 02:15:22.868825  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38827 > 2) by scale factor 0.590271\nI1208 02:15:27.048512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34512 > 2) by scale factor 0.597885\nI1208 02:15:31.227989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45581 > 2) by scale factor 0.578735\nI1208 02:15:35.408777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30144 > 2) by scale factor 0.605796\nI1208 02:15:39.588110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25859 > 2) by scale factor 0.613762\nI1208 02:15:43.767110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21843 > 2) by scale factor 0.621421\nI1208 02:15:47.946699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08196 > 2) by scale factor 0.960633\nI1208 02:15:52.126366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04236 > 2) by scale factor 0.49476\nI1208 02:15:56.305763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68089 > 2) by scale factor 0.543347\nI1208 02:16:00.484791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31301 > 2) by scale factor 0.60368\nI1208 02:16:00.496678  1922 solver.cpp:337] Iteration 12900, Testing net (#0)\nI1208 02:18:37.583452  1922 solver.cpp:404]     Test net output #0: accuracy = 0.131412\nI1208 02:18:37.583910  1922 solver.cpp:404]     Test net output #1: loss = 22.4743 (* 1 = 22.4743 loss)\nI1208 02:18:41.522641  1922 solver.cpp:228] Iteration 12900, loss = 22.1051\nI1208 02:18:41.522683  1922 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1208 02:18:41.522707  1922 solver.cpp:244]     Train net output #1: loss = 22.105 (* 1 = 22.105 loss)\nI1208 02:18:41.753993  1922 sgd_solver.cpp:166] Iteration 12900, lr = 1.935\nI1208 02:18:41.764127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31675 > 2) by scale factor 0.463311\nI1208 02:18:45.943877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54329 > 2) by scale factor 0.786384\nI1208 02:18:50.123738  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15168 > 2) by scale factor 0.929506\nI1208 02:18:54.302880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80958 > 2) by scale factor 0.524992\nI1208 02:18:58.482836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16795 > 2) by scale factor 0.631323\nI1208 02:19:02.662786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45533 > 2) by scale factor 0.578816\nI1208 02:19:06.841840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66621 > 2) by scale factor 0.545522\nI1208 02:19:11.023129  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17484 > 2) by scale factor 0.919607\nI1208 02:19:15.202759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63361 > 2) by scale factor 0.431629\nI1208 02:19:19.382123  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98 > 2) by scale factor 0.502512\nI1208 02:19:23.561965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32546 > 2) by scale factor 0.860046\nI1208 02:19:27.742012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89613 > 2) by scale factor 0.690577\nI1208 02:19:31.921243  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21877 > 2) by scale factor 0.474072\nI1208 02:19:36.100325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20478 > 2) by scale factor 0.475649\nI1208 02:19:40.279922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60824 > 2) by scale factor 0.766799\nI1208 02:19:44.458925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49736 > 2) by scale factor 0.800845\nI1208 02:19:48.637823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97618 > 2) by scale factor 0.672004\nI1208 02:19:52.817473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95413 > 2) by scale factor 0.5058\nI1208 02:19:56.995932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13384 > 2) by scale factor 0.638194\nI1208 02:20:01.175503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95881 > 2) by scale factor 0.505202\nI1208 02:20:05.354454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30663 > 2) by scale factor 0.604846\nI1208 02:20:09.534166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26799 > 2) by scale factor 0.468605\nI1208 02:20:13.713371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17002 > 2) by scale factor 0.63091\nI1208 02:20:17.892379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73171 > 2) by scale factor 0.732141\nI1208 02:20:22.071977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15202 > 2) by scale factor 0.481693\nI1208 02:20:26.252394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37366 > 2) by scale factor 0.592828\nI1208 02:20:30.431147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30189 > 2) by scale factor 0.605714\nI1208 02:20:34.610435  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59546 > 2) by scale factor 0.435212\nI1208 02:20:38.791512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80043 > 2) by scale factor 0.526256\nI1208 02:20:42.971699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95378 > 2) by scale factor 0.677099\nI1208 02:20:47.150820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80106 > 2) by scale factor 0.714015\nI1208 02:20:51.330569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86749 > 2) by scale factor 0.697473\nI1208 02:20:55.510745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71604 > 2) by scale factor 0.736367\nI1208 02:20:59.690326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88274 > 2) by scale factor 0.5151\nI1208 02:21:03.869827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38934 > 2) by scale factor 0.590085\nI1208 02:21:08.049382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02419 > 2) by scale factor 0.661334\nI1208 02:21:12.228093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00072 > 2) by scale factor 0.49991\nI1208 02:21:16.405927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25183 > 2) by scale factor 0.888168\nI1208 02:21:20.585584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5243 > 2) by scale factor 0.567489\nI1208 02:21:24.765313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18589 > 2) by scale factor 0.477796\nI1208 02:21:28.944727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40383 > 2) by scale factor 0.587573\nI1208 02:21:33.124074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15314 > 2) by scale factor 0.481563\nI1208 02:21:37.303798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31935 > 2) by scale factor 0.602528\nI1208 02:21:41.483290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39718 > 2) by scale factor 0.834315\nI1208 02:21:45.662945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38169 > 2) by scale factor 0.839739\nI1208 02:21:49.841434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88558 > 2) by scale factor 0.693103\nI1208 02:21:54.019789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74413 > 2) by scale factor 0.728829\nI1208 02:21:58.198581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68531 > 2) by scale factor 0.426866\nI1208 02:22:02.377281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96053 > 2) by scale factor 0.504983\nI1208 02:22:06.556622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70727 > 2) by scale factor 0.73875\nI1208 02:22:10.735713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4383 > 2) by scale factor 0.581682\nI1208 02:22:19.091491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96303 > 2) by scale factor 0.674984\nI1208 02:22:23.272279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38782 > 2) by scale factor 0.59035\nI1208 02:22:27.451023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50929 > 2) by scale factor 0.797038\nI1208 02:22:31.631481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60409 > 2) by scale factor 0.434397\nI1208 02:22:35.811133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30495 > 2) by scale factor 0.605153\nI1208 02:22:39.990403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76025 > 2) by scale factor 0.531879\nI1208 02:22:44.169309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79585 > 2) by scale factor 0.526891\nI1208 02:22:48.348589  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84054 > 2) by scale factor 0.704091\nI1208 02:22:52.526463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66173 > 2) by scale factor 0.54619\nI1208 02:22:56.704387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50123 > 2) by scale factor 0.571228\nI1208 02:23:00.882549  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44891 > 2) by scale factor 0.449548\nI1208 02:23:05.061983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65823 > 2) by scale factor 0.546712\nI1208 02:23:09.241771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21312 > 2) by scale factor 0.622448\nI1208 02:23:13.422387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63126 > 2) by scale factor 0.550773\nI1208 02:23:17.602021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83592 > 2) by scale factor 0.521388\nI1208 02:23:21.781522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0427 > 2) by scale factor 0.979097\nI1208 02:23:25.960793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92113 > 2) by scale factor 0.684666\nI1208 02:23:30.140283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74474 > 2) by scale factor 0.534082\nI1208 02:23:34.319627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1117 > 2) by scale factor 0.642735\nI1208 02:23:38.499341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9108 > 2) by scale factor 0.687097\nI1208 02:23:42.679342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05057 > 2) by scale factor 0.655616\nI1208 02:23:46.860518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53634 > 2) by scale factor 0.565557\nI1208 02:23:51.039902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08526 > 2) by scale factor 0.648244\nI1208 02:23:55.219619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06626 > 2) by scale factor 0.967934\nI1208 02:24:03.577286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90311 > 2) by scale factor 0.688915\nI1208 02:24:07.757632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21873 > 2) by scale factor 0.901415\nI1208 02:24:16.115159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59393 > 2) by scale factor 0.556494\nI1208 02:24:24.471781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90057 > 2) by scale factor 0.512745\nI1208 02:24:28.651737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68523 > 2) by scale factor 0.542707\nI1208 02:24:32.832515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63457 > 2) by scale factor 0.550272\nI1208 02:24:37.012238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06899 > 2) by scale factor 0.491523\nI1208 02:24:41.192272  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05106 > 2) by scale factor 0.655511\nI1208 02:24:45.372776  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67471 > 2) by scale factor 0.54426\nI1208 02:24:49.552330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79409 > 2) by scale factor 0.527135\nI1208 02:24:53.732589  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36063 > 2) by scale factor 0.458649\nI1208 02:24:57.913089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58387 > 2) by scale factor 0.436313\nI1208 02:25:02.091758  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9033 > 2) by scale factor 0.512387\nI1208 02:25:06.271181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9453 > 2) by scale factor 0.506932\nI1208 02:25:10.449321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03335 > 2) by scale factor 0.659337\nI1208 02:25:14.629246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18343 > 2) by scale factor 0.628252\nI1208 02:25:18.807858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9123 > 2) by scale factor 0.686743\nI1208 02:25:22.986245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68046 > 2) by scale factor 0.74614\nI1208 02:25:27.166828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90402 > 2) by scale factor 0.6887\nI1208 02:25:31.346781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67745 > 2) by scale factor 0.746978\nI1208 02:25:35.526609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24163 > 2) by scale factor 0.616973\nI1208 02:25:35.538595  1922 solver.cpp:337] Iteration 13000, Testing net (#0)\nI1208 02:28:13.633990  1922 solver.cpp:404]     Test net output #0: accuracy = 0.157353\nI1208 02:28:13.634441  1922 solver.cpp:404]     Test net output #1: loss = 18.0689 (* 1 = 18.0689 loss)\nI1208 02:28:17.576349  1922 solver.cpp:228] Iteration 13000, loss = 17.5321\nI1208 02:28:17.576393  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1208 02:28:17.576418  1922 solver.cpp:244]     Train net output #1: loss = 17.5321 (* 1 = 17.5321 loss)\nI1208 02:28:17.802392  1922 sgd_solver.cpp:166] Iteration 13000, lr = 1.95\nI1208 02:28:17.812572  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53839 > 2) by scale factor 0.440685\nI1208 02:28:21.994364  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66809 > 2) by scale factor 0.7496\nI1208 02:28:26.176163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53605 > 2) by scale factor 0.788627\nI1208 02:28:30.357614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45822 > 2) by scale factor 0.578333\nI1208 02:28:34.538951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65849 > 2) by scale factor 0.752306\nI1208 02:28:38.720079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5081 > 2) by scale factor 0.797417\nI1208 02:28:42.901667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17069 > 2) by scale factor 0.630777\nI1208 02:28:47.082470  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01099 > 2) by scale factor 0.664233\nI1208 02:28:51.264142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7963 > 2) by scale factor 0.526829\nI1208 02:28:55.444463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26561 > 2) by scale factor 0.612444\nI1208 02:28:59.625794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6859 > 2) by scale factor 0.542609\nI1208 02:29:03.807778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54912 > 2) by scale factor 0.784584\nI1208 02:29:07.988682  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34676 > 2) by scale factor 0.597593\nI1208 02:29:12.170680  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45946 > 2) by scale factor 0.578124\nI1208 02:29:16.352020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49137 > 2) by scale factor 0.802773\nI1208 02:29:20.533195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95071 > 2) by scale factor 0.677803\nI1208 02:29:24.714529  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56753 > 2) by scale factor 0.778959\nI1208 02:29:28.895455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55595 > 2) by scale factor 0.438987\nI1208 02:29:33.076262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07378 > 2) by scale factor 0.490945\nI1208 02:29:37.257171  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19527 > 2) by scale factor 0.625925\nI1208 02:29:41.439038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.679 > 2) by scale factor 0.543626\nI1208 02:29:45.619513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05573 > 2) by scale factor 0.49313\nI1208 02:29:49.801012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51612 > 2) by scale factor 0.442858\nI1208 02:29:53.982908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8423 > 2) by scale factor 0.520521\nI1208 02:29:58.164468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19372 > 2) by scale factor 0.476903\nI1208 02:30:02.345899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31845 > 2) by scale factor 0.862646\nI1208 02:30:06.527817  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42758 > 2) by scale factor 0.583502\nI1208 02:30:10.708526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78203 > 2) by scale factor 0.528816\nI1208 02:30:14.890341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06074 > 2) by scale factor 0.653436\nI1208 02:30:19.071846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26606 > 2) by scale factor 0.612359\nI1208 02:30:23.252022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7242 > 2) by scale factor 0.734162\nI1208 02:30:27.433876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60439 > 2) by scale factor 0.554878\nI1208 02:30:31.615072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93792 > 2) by scale factor 0.680753\nI1208 02:30:39.975193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58176 > 2) by scale factor 0.774666\nI1208 02:30:44.156791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38389 > 2) by scale factor 0.456215\nI1208 02:30:48.338454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15843 > 2) by scale factor 0.480951\nI1208 02:30:52.520576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27063 > 2) by scale factor 0.880812\nI1208 02:30:56.702085  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18634 > 2) by scale factor 0.91477\nI1208 02:31:00.883011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91523 > 2) by scale factor 0.686051\nI1208 02:31:05.064831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94755 > 2) by scale factor 0.678528\nI1208 02:31:09.245983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39646 > 2) by scale factor 0.834566\nI1208 02:31:13.427004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43758 > 2) by scale factor 0.820487\nI1208 02:31:17.608932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83696 > 2) by scale factor 0.704981\nI1208 02:31:21.788954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07303 > 2) by scale factor 0.650823\nI1208 02:31:25.969842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45515 > 2) by scale factor 0.578846\nI1208 02:31:30.151093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04406 > 2) by scale factor 0.657018\nI1208 02:31:34.332726  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53092 > 2) by scale factor 0.790227\nI1208 02:31:38.513957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4553 > 2) by scale factor 0.814564\nI1208 02:31:42.695369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73798 > 2) by scale factor 0.535048\nI1208 02:31:46.876680  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3172 > 2) by scale factor 0.463263\nI1208 02:31:51.057334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56844 > 2) by scale factor 0.560469\nI1208 02:31:55.238545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97835 > 2) by scale factor 0.50272\nI1208 02:31:59.419224  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21968 > 2) by scale factor 0.621181\nI1208 02:32:03.599544  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4448 > 2) by scale factor 0.580586\nI1208 02:32:07.781230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87229 > 2) by scale factor 0.696309\nI1208 02:32:11.962703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75757 > 2) by scale factor 0.725277\nI1208 02:32:16.144052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54773 > 2) by scale factor 0.785012\nI1208 02:32:20.324669  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2577 > 2) by scale factor 0.61393\nI1208 02:32:24.505802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42055 > 2) by scale factor 0.584702\nI1208 02:32:28.685909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13269 > 2) by scale factor 0.638428\nI1208 02:32:32.867560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4551 > 2) by scale factor 0.578854\nI1208 02:32:37.048990  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42866 > 2) by scale factor 0.583319\nI1208 02:32:41.230095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18902 > 2) by scale factor 0.477439\nI1208 02:32:45.411254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56172 > 2) by scale factor 0.780724\nI1208 02:32:49.592865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08779 > 2) by scale factor 0.647713\nI1208 02:32:53.773489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88908 > 2) by scale factor 0.692262\nI1208 02:32:57.954375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08996 > 2) by scale factor 0.956956\nI1208 02:33:02.135267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00704 > 2) by scale factor 0.665105\nI1208 02:33:06.315949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49421 > 2) by scale factor 0.801857\nI1208 02:33:10.496261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65057 > 2) by scale factor 0.547859\nI1208 02:33:14.676900  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33248 > 2) by scale factor 0.461629\nI1208 02:33:18.857600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09989 > 2) by scale factor 0.487818\nI1208 02:33:23.038023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9873 > 2) by scale factor 0.6695\nI1208 02:33:27.217967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40769 > 2) by scale factor 0.586907\nI1208 02:33:31.398000  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04421 > 2) by scale factor 0.656984\nI1208 02:33:35.578478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8018 > 2) by scale factor 0.713826\nI1208 02:33:39.759429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22331 > 2) by scale factor 0.62048\nI1208 02:33:43.941444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46345 > 2) by scale factor 0.811868\nI1208 02:33:48.122509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77147 > 2) by scale factor 0.721639\nI1208 02:33:52.303578  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89559 > 2) by scale factor 0.690705\nI1208 02:33:56.483674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34235 > 2) by scale factor 0.598382\nI1208 02:34:00.664562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57679 > 2) by scale factor 0.559161\nI1208 02:34:04.845139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7024 > 2) by scale factor 0.740082\nI1208 02:34:09.025856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72228 > 2) by scale factor 0.734678\nI1208 02:34:13.206913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54148 > 2) by scale factor 0.786942\nI1208 02:34:17.388605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97864 > 2) by scale factor 0.502685\nI1208 02:34:21.569602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48126 > 2) by scale factor 0.806043\nI1208 02:34:25.751179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50477 > 2) by scale factor 0.570651\nI1208 02:34:29.931001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35867 > 2) by scale factor 0.847935\nI1208 02:34:34.112562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14713 > 2) by scale factor 0.482261\nI1208 02:34:38.293678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44398 > 2) by scale factor 0.450047\nI1208 02:34:42.473747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03663 > 2) by scale factor 0.495463\nI1208 02:34:46.654228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79449 > 2) by scale factor 0.715693\nI1208 02:34:50.834676  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28812 > 2) by scale factor 0.60825\nI1208 02:34:55.016903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26183 > 2) by scale factor 0.613152\nI1208 02:34:59.198421  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78226 > 2) by scale factor 0.528785\nI1208 02:35:03.379200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3606 > 2) by scale factor 0.595132\nI1208 02:35:07.559674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8474 > 2) by scale factor 0.702395\nI1208 02:35:11.740824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1603 > 2) by scale factor 0.480735\nI1208 02:35:11.752643  1922 solver.cpp:337] Iteration 13100, Testing net (#0)\nI1208 02:37:49.245995  1922 solver.cpp:404]     Test net output #0: accuracy = 0.166882\nI1208 02:37:49.246454  1922 solver.cpp:404]     Test net output #1: loss = 20.0364 (* 1 = 20.0364 loss)\nI1208 02:37:53.185703  1922 solver.cpp:228] Iteration 13100, loss = 20.5052\nI1208 02:37:53.185742  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1208 02:37:53.185768  1922 solver.cpp:244]     Train net output #1: loss = 20.5052 (* 1 = 20.5052 loss)\nI1208 02:37:53.418150  1922 sgd_solver.cpp:166] Iteration 13100, lr = 1.965\nI1208 02:37:53.428216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97157 > 2) by scale factor 0.673044\nI1208 02:37:57.607700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96363 > 2) by scale factor 0.674847\nI1208 02:38:01.788290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24308 > 2) by scale factor 0.891631\nI1208 02:38:05.968812  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65013 > 2) by scale factor 0.547925\nI1208 02:38:10.150602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19229 > 2) by scale factor 0.477066\nI1208 02:38:14.331452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98891 > 2) by scale factor 0.50139\nI1208 02:38:18.511186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59768 > 2) by scale factor 0.555914\nI1208 02:38:22.691648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54474 > 2) by scale factor 0.785934\nI1208 02:38:26.872545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67904 > 2) by scale factor 0.746536\nI1208 02:38:31.053556  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33314 > 2) by scale factor 0.600034\nI1208 02:38:35.233860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13222 > 2) by scale factor 0.638524\nI1208 02:38:39.414423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2864 > 2) by scale factor 0.874738\nI1208 02:38:43.595422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23606 > 2) by scale factor 0.618036\nI1208 02:38:47.775456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8022 > 2) by scale factor 0.713724\nI1208 02:38:51.955782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85065 > 2) by scale factor 0.701595\nI1208 02:38:56.136294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03938 > 2) by scale factor 0.495126\nI1208 02:39:00.316728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22305 > 2) by scale factor 0.620529\nI1208 02:39:04.496896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40286 > 2) by scale factor 0.832342\nI1208 02:39:08.678334  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19016 > 2) by scale factor 0.477309\nI1208 02:39:12.858634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97207 > 2) by scale factor 0.503516\nI1208 02:39:17.038512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19488 > 2) by scale factor 0.626001\nI1208 02:39:21.219008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84522 > 2) by scale factor 0.702932\nI1208 02:39:25.399933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25518 > 2) by scale factor 0.614405\nI1208 02:39:29.580665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76741 > 2) by scale factor 0.530869\nI1208 02:39:37.939718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48241 > 2) by scale factor 0.80567\nI1208 02:39:42.120864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59604 > 2) by scale factor 0.556168\nI1208 02:39:46.301919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82766 > 2) by scale factor 0.7073\nI1208 02:39:50.481957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0677 > 2) by scale factor 0.651954\nI1208 02:39:54.662377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53105 > 2) by scale factor 0.566404\nI1208 02:39:58.842538  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52414 > 2) by scale factor 0.442073\nI1208 02:40:03.025418  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93258 > 2) by scale factor 0.508572\nI1208 02:40:07.205014  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54007 > 2) by scale factor 0.787378\nI1208 02:40:11.385164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92934 > 2) by scale factor 0.682747\nI1208 02:40:15.564198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24877 > 2) by scale factor 0.889376\nI1208 02:40:19.743566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88422 > 2) by scale factor 0.514904\nI1208 02:40:23.924151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7792 > 2) by scale factor 0.719632\nI1208 02:40:28.103435  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74323 > 2) by scale factor 0.729069\nI1208 02:40:32.283918  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96908 > 2) by scale factor 0.673609\nI1208 02:40:36.464001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62482 > 2) by scale factor 0.761956\nI1208 02:40:40.644903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38811 > 2) by scale factor 0.455778\nI1208 02:40:44.824995  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10655 > 2) by scale factor 0.949419\nI1208 02:40:49.004851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88072 > 2) by scale factor 0.694272\nI1208 02:40:53.185056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45695 > 2) by scale factor 0.448737\nI1208 02:40:57.365419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24851 > 2) by scale factor 0.615668\nI1208 02:41:01.545140  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30916 > 2) by scale factor 0.604383\nI1208 02:41:05.725759  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91215 > 2) by scale factor 0.511228\nI1208 02:41:09.906090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86895 > 2) by scale factor 0.516936\nI1208 02:41:14.087939  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97955 > 2) by scale factor 0.50257\nI1208 02:41:18.268146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17496 > 2) by scale factor 0.629929\nI1208 02:41:22.448554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29265 > 2) by scale factor 0.607413\nI1208 02:41:26.629268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92434 > 2) by scale factor 0.683916\nI1208 02:41:30.808720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50354 > 2) by scale factor 0.79887\nI1208 02:41:34.989471  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74662 > 2) by scale factor 0.533814\nI1208 02:41:39.169761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45352 > 2) by scale factor 0.579119\nI1208 02:41:43.349956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9071 > 2) by scale factor 0.511889\nI1208 02:41:47.530308  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48546 > 2) by scale factor 0.573813\nI1208 02:41:51.710599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4592 > 2) by scale factor 0.44851\nI1208 02:41:55.890763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70521 > 2) by scale factor 0.425061\nI1208 02:42:00.070657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77548 > 2) by scale factor 0.720596\nI1208 02:42:04.251253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18044 > 2) by scale factor 0.628844\nI1208 02:42:08.431288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41681 > 2) by scale factor 0.452816\nI1208 02:42:12.611040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55483 > 2) by scale factor 0.782832\nI1208 02:42:16.790470  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32754 > 2) by scale factor 0.859275\nI1208 02:42:20.970557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19951 > 2) by scale factor 0.476246\nI1208 02:42:25.149922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55071 > 2) by scale factor 0.784094\nI1208 02:42:29.329445  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58994 > 2) by scale factor 0.557112\nI1208 02:42:33.509639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41783 > 2) by scale factor 0.827189\nI1208 02:42:37.690098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18541 > 2) by scale factor 0.627863\nI1208 02:42:41.870270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64889 > 2) by scale factor 0.43021\nI1208 02:42:46.050603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85115 > 2) by scale factor 0.519325\nI1208 02:42:50.230968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41901 > 2) by scale factor 0.584964\nI1208 02:42:54.410502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5462 > 2) by scale factor 0.563984\nI1208 02:42:58.591020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18023 > 2) by scale factor 0.628884\nI1208 02:43:02.770897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69055 > 2) by scale factor 0.743343\nI1208 02:43:06.950558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00268 > 2) by scale factor 0.666072\nI1208 02:43:11.130882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28521 > 2) by scale factor 0.875195\nI1208 02:43:15.310596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6037 > 2) by scale factor 0.554985\nI1208 02:43:19.490062  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48185 > 2) by scale factor 0.805852\nI1208 02:43:23.669562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2367 > 2) by scale factor 0.617913\nI1208 02:43:27.850235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63511 > 2) by scale factor 0.758981\nI1208 02:43:32.029695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05922 > 2) by scale factor 0.97124\nI1208 02:43:36.208984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22733 > 2) by scale factor 0.473112\nI1208 02:43:40.387611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82598 > 2) by scale factor 0.707718\nI1208 02:43:44.567733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05299 > 2) by scale factor 0.493462\nI1208 02:43:48.747447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41417 > 2) by scale factor 0.453086\nI1208 02:43:52.927755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50248 > 2) by scale factor 0.571025\nI1208 02:43:57.107151  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02941 > 2) by scale factor 0.496351\nI1208 02:44:01.287407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24866 > 2) by scale factor 0.889419\nI1208 02:44:05.466629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43086 > 2) by scale factor 0.822756\nI1208 02:44:13.824257  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69624 > 2) by scale factor 0.741774\nI1208 02:44:18.003720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50739 > 2) by scale factor 0.797643\nI1208 02:44:22.184258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72683 > 2) by scale factor 0.733452\nI1208 02:44:26.364862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1715 > 2) by scale factor 0.630616\nI1208 02:44:30.544229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88027 > 2) by scale factor 0.694378\nI1208 02:44:34.725487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16491 > 2) by scale factor 0.631929\nI1208 02:44:38.905699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33523 > 2) by scale factor 0.599659\nI1208 02:44:43.085147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72921 > 2) by scale factor 0.732811\nI1208 02:44:47.264714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35406 > 2) by scale factor 0.596292\nI1208 02:44:47.276652  1922 solver.cpp:337] Iteration 13200, Testing net (#0)\nI1208 02:47:25.349311  1922 solver.cpp:404]     Test net output #0: accuracy = 0.216765\nI1208 02:47:25.349776  1922 solver.cpp:404]     Test net output #1: loss = 11.7065 (* 1 = 11.7065 loss)\nI1208 02:47:29.286003  1922 solver.cpp:228] Iteration 13200, loss = 10.6043\nI1208 02:47:29.286041  1922 solver.cpp:244]     Train net output #0: accuracy = 0.294118\nI1208 02:47:29.286057  1922 solver.cpp:244]     Train net output #1: loss = 10.6043 (* 1 = 10.6043 loss)\nI1208 02:47:29.518102  1922 sgd_solver.cpp:166] Iteration 13200, lr = 1.98\nI1208 02:47:29.528211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93175 > 2) by scale factor 0.682185\nI1208 02:47:33.706281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72135 > 2) by scale factor 0.734928\nI1208 02:47:37.884618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54949 > 2) by scale factor 0.439609\nI1208 02:47:42.062471  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73334 > 2) by scale factor 0.535713\nI1208 02:47:50.417819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50819 > 2) by scale factor 0.570094\nI1208 02:47:54.596258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78254 > 2) by scale factor 0.718768\nI1208 02:47:58.774803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21058 > 2) by scale factor 0.90474\nI1208 02:48:02.954264  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28018 > 2) by scale factor 0.877123\nI1208 02:48:07.131947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05413 > 2) by scale factor 0.654851\nI1208 02:48:11.309962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98961 > 2) by scale factor 0.501303\nI1208 02:48:15.488631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53317 > 2) by scale factor 0.789526\nI1208 02:48:19.666751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52638 > 2) by scale factor 0.567153\nI1208 02:48:23.845187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47337 > 2) by scale factor 0.447091\nI1208 02:48:28.023826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08886 > 2) by scale factor 0.647488\nI1208 02:48:32.202157  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01053 > 2) by scale factor 0.664334\nI1208 02:48:36.381245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40054 > 2) by scale factor 0.833147\nI1208 02:48:40.559403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63717 > 2) by scale factor 0.549878\nI1208 02:48:44.737892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58665 > 2) by scale factor 0.557623\nI1208 02:48:48.916199  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48607 > 2) by scale factor 0.804482\nI1208 02:48:53.094506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41173 > 2) by scale factor 0.453337\nI1208 02:48:57.272039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71191 > 2) by scale factor 0.424456\nI1208 02:49:01.449654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54311 > 2) by scale factor 0.786438\nI1208 02:49:05.627444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95903 > 2) by scale factor 0.675896\nI1208 02:49:09.805925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73684 > 2) by scale factor 0.535211\nI1208 02:49:13.984642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12961 > 2) by scale factor 0.939138\nI1208 02:49:18.162349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00342 > 2) by scale factor 0.998292\nI1208 02:49:22.341017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40522 > 2) by scale factor 0.587334\nI1208 02:49:26.518388  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34591 > 2) by scale factor 0.597745\nI1208 02:49:30.697280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81941 > 2) by scale factor 0.709368\nI1208 02:49:34.876278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77354 > 2) by scale factor 0.418977\nI1208 02:49:39.054348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03416 > 2) by scale factor 0.659161\nI1208 02:49:43.232419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52535 > 2) by scale factor 0.56732\nI1208 02:49:47.410374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30437 > 2) by scale factor 0.464644\nI1208 02:49:51.588822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14114 > 2) by scale factor 0.636712\nI1208 02:49:55.767164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30737 > 2) by scale factor 0.604711\nI1208 02:49:59.945610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33328 > 2) by scale factor 0.461544\nI1208 02:50:04.123342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87171 > 2) by scale factor 0.69645\nI1208 02:50:08.301867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25351 > 2) by scale factor 0.614722\nI1208 02:50:12.480032  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19935 > 2) by scale factor 0.909359\nI1208 02:50:16.658385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37459 > 2) by scale factor 0.592664\nI1208 02:50:20.836630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27096 > 2) by scale factor 0.611441\nI1208 02:50:25.014155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91497 > 2) by scale factor 0.686113\nI1208 02:50:29.192567  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92352 > 2) by scale factor 0.684106\nI1208 02:50:33.369731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83816 > 2) by scale factor 0.704682\nI1208 02:50:37.547646  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84238 > 2) by scale factor 0.703636\nI1208 02:50:41.725869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07623 > 2) by scale factor 0.490649\nI1208 02:50:45.903699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5932 > 2) by scale factor 0.435426\nI1208 02:50:50.082648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52378 > 2) by scale factor 0.442108\nI1208 02:50:54.261834  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91453 > 2) by scale factor 0.510918\nI1208 02:50:58.439955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7055 > 2) by scale factor 0.739234\nI1208 02:51:02.618000  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27747 > 2) by scale factor 0.610227\nI1208 02:51:06.796670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03036 > 2) by scale factor 0.496234\nI1208 02:51:10.974864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32421 > 2) by scale factor 0.860509\nI1208 02:51:15.152132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02465 > 2) by scale factor 0.496938\nI1208 02:51:19.329694  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08551 > 2) by scale factor 0.648192\nI1208 02:51:23.507766  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01523 > 2) by scale factor 0.663299\nI1208 02:51:27.686153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46145 > 2) by scale factor 0.577793\nI1208 02:51:31.865149  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29249 > 2) by scale factor 0.607443\nI1208 02:51:36.043104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3267 > 2) by scale factor 0.601197\nI1208 02:51:40.221366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49231 > 2) by scale factor 0.802468\nI1208 02:51:44.398885  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.525 > 2) by scale factor 0.567375\nI1208 02:51:48.577049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15855 > 2) by scale factor 0.633201\nI1208 02:51:52.755215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46728 > 2) by scale factor 0.81061\nI1208 02:51:56.933362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69867 > 2) by scale factor 0.741105\nI1208 02:52:01.110944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38674 > 2) by scale factor 0.837964\nI1208 02:52:05.290048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07212 > 2) by scale factor 0.651017\nI1208 02:52:09.467947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46922 > 2) by scale factor 0.809972\nI1208 02:52:13.646191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03506 > 2) by scale factor 0.982771\nI1208 02:52:26.176535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00491 > 2) by scale factor 0.499387\nI1208 02:52:30.354717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0866 > 2) by scale factor 0.647962\nI1208 02:52:34.532667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94557 > 2) by scale factor 0.678986\nI1208 02:52:38.711714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04963 > 2) by scale factor 0.655818\nI1208 02:52:42.889605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51162 > 2) by scale factor 0.569538\nI1208 02:52:47.067010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42491 > 2) by scale factor 0.583957\nI1208 02:52:51.245371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30255 > 2) by scale factor 0.464841\nI1208 02:52:55.423372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59722 > 2) by scale factor 0.770053\nI1208 02:52:59.601400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29911 > 2) by scale factor 0.465213\nI1208 02:53:03.779741  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95212 > 2) by scale factor 0.67748\nI1208 02:53:07.957118  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80546 > 2) by scale factor 0.712896\nI1208 02:53:12.135715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16832 > 2) by scale factor 0.922375\nI1208 02:53:16.314118  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69184 > 2) by scale factor 0.742985\nI1208 02:53:20.492152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98585 > 2) by scale factor 0.669825\nI1208 02:53:24.670531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51867 > 2) by scale factor 0.79407\nI1208 02:53:28.848470  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.369 > 2) by scale factor 0.593648\nI1208 02:53:33.026638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79678 > 2) by scale factor 0.526762\nI1208 02:53:37.205415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26667 > 2) by scale factor 0.612244\nI1208 02:53:41.383581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78045 > 2) by scale factor 0.719309\nI1208 02:53:45.561350  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0219 > 2) by scale factor 0.661834\nI1208 02:53:49.751039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48575 > 2) by scale factor 0.445856\nI1208 02:53:53.940837  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96155 > 2) by scale factor 0.504853\nI1208 02:53:58.131130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68617 > 2) by scale factor 0.542569\nI1208 02:54:02.322262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98347 > 2) by scale factor 0.670361\nI1208 02:54:06.511613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91932 > 2) by scale factor 0.510293\nI1208 02:54:10.702033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00302 > 2) by scale factor 0.499622\nI1208 02:54:19.079720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56192 > 2) by scale factor 0.561495\nI1208 02:54:23.270110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78678 > 2) by scale factor 0.717675\nI1208 02:54:23.281911  1922 solver.cpp:337] Iteration 13300, Testing net (#0)\nI1208 02:57:01.330175  1922 solver.cpp:404]     Test net output #0: accuracy = 0.204059\nI1208 02:57:01.330627  1922 solver.cpp:404]     Test net output #1: loss = 11.5102 (* 1 = 11.5102 loss)\nI1208 02:57:05.268293  1922 solver.cpp:228] Iteration 13300, loss = 13.0513\nI1208 02:57:05.268335  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1208 02:57:05.268353  1922 solver.cpp:244]     Train net output #1: loss = 13.0513 (* 1 = 13.0513 loss)\nI1208 02:57:05.510653  1922 sgd_solver.cpp:166] Iteration 13300, lr = 1.995\nI1208 02:57:05.520786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.17552 > 2) by scale factor 0.919318\nI1208 02:57:09.710494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00398 > 2) by scale factor 0.665783\nI1208 02:57:13.901175  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04597 > 2) by scale factor 0.494319\nI1208 02:57:18.091197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09573 > 2) by scale factor 0.488313\nI1208 02:57:22.280859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51359 > 2) by scale factor 0.569219\nI1208 02:57:26.471163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74701 > 2) by scale factor 0.728064\nI1208 02:57:30.661291  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45773 > 2) by scale factor 0.813758\nI1208 02:57:34.851632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83782 > 2) by scale factor 0.521129\nI1208 02:57:39.042187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81516 > 2) by scale factor 0.710439\nI1208 02:57:43.232779  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44186 > 2) by scale factor 0.819046\nI1208 02:57:47.423024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96243 > 2) by scale factor 0.675121\nI1208 02:57:51.613401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11297 > 2) by scale factor 0.642473\nI1208 02:57:55.803740  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84982 > 2) by scale factor 0.7018\nI1208 02:57:59.993731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.30479 > 2) by scale factor 0.377018\nI1208 02:58:04.182937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.49509 > 2) by scale factor 0.363962\nI1208 02:58:08.372251  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84756 > 2) by scale factor 0.702355\nI1208 02:58:12.562665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91502 > 2) by scale factor 0.510854\nI1208 02:58:16.752424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49866 > 2) by scale factor 0.444576\nI1208 02:58:20.943029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.96561 > 2) by scale factor 0.402771\nI1208 02:58:25.131279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55756 > 2) by scale factor 0.781997\nI1208 02:58:29.320188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81488 > 2) by scale factor 0.524262\nI1208 02:58:33.510298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90617 > 2) by scale factor 0.512011\nI1208 02:58:37.699977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65954 > 2) by scale factor 0.752011\nI1208 02:58:41.889055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65014 > 2) by scale factor 0.547924\nI1208 02:58:46.078979  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34634 > 2) by scale factor 0.85239\nI1208 02:58:50.266515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3606 > 2) by scale factor 0.458652\nI1208 02:58:54.455437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19139 > 2) by scale factor 0.626686\nI1208 02:58:58.644814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6954 > 2) by scale factor 0.541214\nI1208 02:59:07.021034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22456 > 2) by scale factor 0.62024\nI1208 02:59:11.211320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.491 > 2) by scale factor 0.364232\nI1208 02:59:15.401049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99437 > 2) by scale factor 0.500705\nI1208 02:59:19.591464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.15397 > 2) by scale factor 0.38805\nI1208 02:59:23.780400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0663 > 2) by scale factor 0.491848\nI1208 02:59:27.969456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24044 > 2) by scale factor 0.6172\nI1208 02:59:32.158668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40683 > 2) by scale factor 0.587056\nI1208 02:59:36.347553  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58704 > 2) by scale factor 0.436011\nI1208 02:59:40.536383  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.57954 > 2) by scale factor 0.358453\nI1208 02:59:44.726387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.64399 > 2) by scale factor 0.354359\nI1208 02:59:48.915292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62273 > 2) by scale factor 0.55207\nI1208 02:59:53.105943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57272 > 2) by scale factor 0.437377\nI1208 02:59:57.294525  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99416 > 2) by scale factor 0.400468\nI1208 03:00:01.483180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81497 > 2) by scale factor 0.52425\nI1208 03:00:05.673360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.143 > 2) by scale factor 0.482742\nI1208 03:00:09.861903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56033 > 2) by scale factor 0.561745\nI1208 03:00:14.050917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86647 > 2) by scale factor 0.697724\nI1208 03:00:18.240255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62419 > 2) by scale factor 0.551847\nI1208 03:00:22.429425  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9844 > 2) by scale factor 0.670151\nI1208 03:00:26.618818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76142 > 2) by scale factor 0.724265\nI1208 03:00:30.808447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53235 > 2) by scale factor 0.566195\nI1208 03:00:34.998138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55477 > 2) by scale factor 0.4391\nI1208 03:00:39.187707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30208 > 2) by scale factor 0.605678\nI1208 03:00:43.377889  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81151 > 2) by scale factor 0.711361\nI1208 03:00:47.567236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97483 > 2) by scale factor 0.503166\nI1208 03:00:51.755575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79402 > 2) by scale factor 0.527145\nI1208 03:00:55.945575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34913 > 2) by scale factor 0.459862\nI1208 03:01:00.135977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36086 > 2) by scale factor 0.595085\nI1208 03:01:04.325142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38686 > 2) by scale factor 0.590517\nI1208 03:01:08.512698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.653 > 2) by scale factor 0.753864\nI1208 03:01:12.702628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20298 > 2) by scale factor 0.624419\nI1208 03:01:16.891975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80216 > 2) by scale factor 0.526017\nI1208 03:01:21.082777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6072 > 2) by scale factor 0.767107\nI1208 03:01:25.271860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58761 > 2) by scale factor 0.772914\nI1208 03:01:29.461432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26885 > 2) by scale factor 0.611837\nI1208 03:01:33.650251  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83501 > 2) by scale factor 0.705465\nI1208 03:01:37.840804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49378 > 2) by scale factor 0.801995\nI1208 03:01:42.030251  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87547 > 2) by scale factor 0.516067\nI1208 03:01:46.220022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98331 > 2) by scale factor 0.670397\nI1208 03:01:54.597301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91611 > 2) by scale factor 0.685844\nI1208 03:01:58.786970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4725 > 2) by scale factor 0.575953\nI1208 03:02:02.975688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57876 > 2) by scale factor 0.775568\nI1208 03:02:11.352409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56914 > 2) by scale factor 0.778471\nI1208 03:02:15.541541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04845 > 2) by scale factor 0.976346\nI1208 03:02:19.730052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08199 > 2) by scale factor 0.648932\nI1208 03:02:23.918908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24397 > 2) by scale factor 0.471257\nI1208 03:02:28.108507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03833 > 2) by scale factor 0.658257\nI1208 03:02:32.297919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53069 > 2) by scale factor 0.441434\nI1208 03:02:36.487061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55825 > 2) by scale factor 0.781786\nI1208 03:02:40.677731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96694 > 2) by scale factor 0.504166\nI1208 03:02:44.866415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24116 > 2) by scale factor 0.471569\nI1208 03:02:49.055747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88046 > 2) by scale factor 0.694334\nI1208 03:02:53.244915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60071 > 2) by scale factor 0.434716\nI1208 03:02:57.433215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86503 > 2) by scale factor 0.51746\nI1208 03:03:01.622004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9096 > 2) by scale factor 0.511561\nI1208 03:03:05.811818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54744 > 2) by scale factor 0.785101\nI1208 03:03:10.000573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41683 > 2) by scale factor 0.585337\nI1208 03:03:14.189963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05139 > 2) by scale factor 0.39593\nI1208 03:03:18.379026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89518 > 2) by scale factor 0.690803\nI1208 03:03:22.568687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70035 > 2) by scale factor 0.54049\nI1208 03:03:26.757969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48183 > 2) by scale factor 0.574411\nI1208 03:03:30.947486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40558 > 2) by scale factor 0.587272\nI1208 03:03:35.136394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40468 > 2) by scale factor 0.454062\nI1208 03:03:39.325214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16018 > 2) by scale factor 0.480749\nI1208 03:03:43.513756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07256 > 2) by scale factor 0.491091\nI1208 03:03:47.703099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38055 > 2) by scale factor 0.59162\nI1208 03:03:51.891566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09786 > 2) by scale factor 0.488059\nI1208 03:03:56.080060  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10758 > 2) by scale factor 0.643587\nI1208 03:04:00.269434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9164 > 2) by scale factor 0.685777\nI1208 03:04:00.281328  1922 solver.cpp:337] Iteration 13400, Testing net (#0)\nI1208 03:06:38.578804  1922 solver.cpp:404]     Test net output #0: accuracy = 0.154\nI1208 03:06:38.579298  1922 solver.cpp:404]     Test net output #1: loss = 14.5221 (* 1 = 14.5221 loss)\nI1208 03:06:42.517014  1922 solver.cpp:228] Iteration 13400, loss = 13.7356\nI1208 03:06:42.517055  1922 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1208 03:06:42.517071  1922 solver.cpp:244]     Train net output #1: loss = 13.7356 (* 1 = 13.7356 loss)\nI1208 03:06:42.760565  1922 sgd_solver.cpp:166] Iteration 13400, lr = 2.01\nI1208 03:06:42.770722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39853 > 2) by scale factor 0.454697\nI1208 03:06:46.962683  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73652 > 2) by scale factor 0.535257\nI1208 03:06:51.155005  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78342 > 2) by scale factor 0.71854\nI1208 03:06:55.346223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24818 > 2) by scale factor 0.61573\nI1208 03:06:59.538480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13873 > 2) by scale factor 0.6372\nI1208 03:07:03.730165  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44777 > 2) by scale factor 0.81707\nI1208 03:07:07.921699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12576 > 2) by scale factor 0.484759\nI1208 03:07:12.113214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.30967 > 2) by scale factor 0.376671\nI1208 03:07:16.305548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81406 > 2) by scale factor 0.415449\nI1208 03:07:20.496944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18707 > 2) by scale factor 0.914464\nI1208 03:07:28.878268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22323 > 2) by scale factor 0.620495\nI1208 03:07:33.070925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74149 > 2) by scale factor 0.534546\nI1208 03:07:37.263242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75768 > 2) by scale factor 0.532244\nI1208 03:07:41.456688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72751 > 2) by scale factor 0.733269\nI1208 03:07:45.649616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10865 > 2) by scale factor 0.486777\nI1208 03:07:49.840468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28828 > 2) by scale factor 0.608221\nI1208 03:07:54.033102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13807 > 2) by scale factor 0.637335\nI1208 03:07:58.225267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94402 > 2) by scale factor 0.679344\nI1208 03:08:02.416147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53493 > 2) by scale factor 0.788976\nI1208 03:08:06.608572  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57627 > 2) by scale factor 0.559241\nI1208 03:08:10.799237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11664 > 2) by scale factor 0.641718\nI1208 03:08:14.990869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90939 > 2) by scale factor 0.687429\nI1208 03:08:19.182531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06996 > 2) by scale factor 0.491405\nI1208 03:08:23.374191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85841 > 2) by scale factor 0.699689\nI1208 03:08:27.566215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12333 > 2) by scale factor 0.941916\nI1208 03:08:31.758329  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78626 > 2) by scale factor 0.528226\nI1208 03:08:35.949734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06167 > 2) by scale factor 0.653238\nI1208 03:08:40.142220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47157 > 2) by scale factor 0.809201\nI1208 03:08:44.334708  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83152 > 2) by scale factor 0.706335\nI1208 03:08:48.526095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8557 > 2) by scale factor 0.700353\nI1208 03:08:52.717552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56788 > 2) by scale factor 0.778852\nI1208 03:08:56.909207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48169 > 2) by scale factor 0.805901\nI1208 03:09:01.100529  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52398 > 2) by scale factor 0.792398\nI1208 03:09:05.293082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05708 > 2) by scale factor 0.65422\nI1208 03:09:09.484514  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28999 > 2) by scale factor 0.873365\nI1208 03:09:13.674649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89447 > 2) by scale factor 0.690974\nI1208 03:09:17.865844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01878 > 2) by scale factor 0.662519\nI1208 03:09:22.056821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48798 > 2) by scale factor 0.445635\nI1208 03:09:26.248085  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59758 > 2) by scale factor 0.769948\nI1208 03:09:30.439597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80528 > 2) by scale factor 0.712942\nI1208 03:09:34.630539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80604 > 2) by scale factor 0.712747\nI1208 03:09:38.822660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04406 > 2) by scale factor 0.494553\nI1208 03:09:43.014050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18479 > 2) by scale factor 0.627984\nI1208 03:09:47.204748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12109 > 2) by scale factor 0.94291\nI1208 03:09:51.398089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35651 > 2) by scale factor 0.848712\nI1208 03:09:55.588958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21399 > 2) by scale factor 0.622279\nI1208 03:09:59.781849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25095 > 2) by scale factor 0.888515\nI1208 03:10:08.163010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65565 > 2) by scale factor 0.547099\nI1208 03:10:12.354501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04127 > 2) by scale factor 0.494894\nI1208 03:10:16.547245  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4451 > 2) by scale factor 0.449934\nI1208 03:10:20.738970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55801 > 2) by scale factor 0.781859\nI1208 03:10:24.930155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09776 > 2) by scale factor 0.645628\nI1208 03:10:29.121385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15977 > 2) by scale factor 0.480796\nI1208 03:10:33.312189  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75346 > 2) by scale factor 0.72636\nI1208 03:10:37.503940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59304 > 2) by scale factor 0.771294\nI1208 03:10:41.694649  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88224 > 2) by scale factor 0.693904\nI1208 03:10:45.885895  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34679 > 2) by scale factor 0.597587\nI1208 03:10:50.077803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59831 > 2) by scale factor 0.76973\nI1208 03:10:58.457357  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00712 > 2) by scale factor 0.499111\nI1208 03:11:02.647992  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89307 > 2) by scale factor 0.513733\nI1208 03:11:06.839462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76427 > 2) by scale factor 0.531311\nI1208 03:11:11.032057  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63855 > 2) by scale factor 0.757993\nI1208 03:11:15.223173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61339 > 2) by scale factor 0.553497\nI1208 03:11:19.415230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99866 > 2) by scale factor 0.666965\nI1208 03:11:23.606528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58376 > 2) by scale factor 0.774065\nI1208 03:11:27.798244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10348 > 2) by scale factor 0.644438\nI1208 03:11:31.988966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35266 > 2) by scale factor 0.596542\nI1208 03:11:36.180594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02979 > 2) by scale factor 0.496303\nI1208 03:11:40.372261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34129 > 2) by scale factor 0.854229\nI1208 03:11:44.563505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03704 > 2) by scale factor 0.658537\nI1208 03:11:48.755110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54595 > 2) by scale factor 0.785562\nI1208 03:11:52.945765  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10588 > 2) by scale factor 0.643941\nI1208 03:11:57.138033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6358 > 2) by scale factor 0.758783\nI1208 03:12:01.329596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68036 > 2) by scale factor 0.746168\nI1208 03:12:05.521061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96318 > 2) by scale factor 0.674951\nI1208 03:12:09.712366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06797 > 2) by scale factor 0.651897\nI1208 03:12:13.904309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32586 > 2) by scale factor 0.601349\nI1208 03:12:18.095734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08493 > 2) by scale factor 0.648313\nI1208 03:12:22.288898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57461 > 2) by scale factor 0.776816\nI1208 03:12:26.479399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7122 > 2) by scale factor 0.538765\nI1208 03:12:30.670743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24659 > 2) by scale factor 0.616032\nI1208 03:12:34.861832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04259 > 2) by scale factor 0.494733\nI1208 03:12:39.053805  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88115 > 2) by scale factor 0.694168\nI1208 03:12:43.245615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5044 > 2) by scale factor 0.570711\nI1208 03:12:47.436890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00188 > 2) by scale factor 0.666249\nI1208 03:12:51.628718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32365 > 2) by scale factor 0.860713\nI1208 03:12:55.820379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52 > 2) by scale factor 0.793649\nI1208 03:13:00.011791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47847 > 2) by scale factor 0.574966\nI1208 03:13:04.202936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32676 > 2) by scale factor 0.46224\nI1208 03:13:08.395561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57459 > 2) by scale factor 0.776823\nI1208 03:13:12.586200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11889 > 2) by scale factor 0.641253\nI1208 03:13:16.776554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25563 > 2) by scale factor 0.614321\nI1208 03:13:25.156147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05566 > 2) by scale factor 0.654524\nI1208 03:13:29.348609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31646 > 2) by scale factor 0.603052\nI1208 03:13:33.540572  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8774 > 2) by scale factor 0.51581\nI1208 03:13:37.731490  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89458 > 2) by scale factor 0.690946\nI1208 03:13:37.743342  1922 solver.cpp:337] Iteration 13500, Testing net (#0)\nI1208 03:16:16.036605  1922 solver.cpp:404]     Test net output #0: accuracy = 0.145235\nI1208 03:16:16.037091  1922 solver.cpp:404]     Test net output #1: loss = 14.3045 (* 1 = 14.3045 loss)\nI1208 03:16:19.975797  1922 solver.cpp:228] Iteration 13500, loss = 14.6988\nI1208 03:16:19.975836  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1208 03:16:19.975859  1922 solver.cpp:244]     Train net output #1: loss = 14.6988 (* 1 = 14.6988 loss)\nI1208 03:16:20.215659  1922 sgd_solver.cpp:166] Iteration 13500, lr = 2.025\nI1208 03:16:20.225801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28447 > 2) by scale factor 0.608927\nI1208 03:16:24.417291  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45778 > 2) by scale factor 0.578406\nI1208 03:16:28.610049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09536 > 2) by scale factor 0.488358\nI1208 03:16:32.801787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69066 > 2) by scale factor 0.541909\nI1208 03:16:36.994949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79174 > 2) by scale factor 0.527462\nI1208 03:16:41.186333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69859 > 2) by scale factor 0.741128\nI1208 03:16:45.378145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8109 > 2) by scale factor 0.711516\nI1208 03:16:49.569556  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73688 > 2) by scale factor 0.73076\nI1208 03:16:53.761682  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4615 > 2) by scale factor 0.577785\nI1208 03:16:57.953325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57813 > 2) by scale factor 0.558951\nI1208 03:17:02.144832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05145 > 2) by scale factor 0.655426\nI1208 03:17:06.337047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90971 > 2) by scale factor 0.687355\nI1208 03:17:10.528836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6623 > 2) by scale factor 0.751229\nI1208 03:17:14.718869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38549 > 2) by scale factor 0.838401\nI1208 03:17:18.910176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02639 > 2) by scale factor 0.660852\nI1208 03:17:23.102133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66983 > 2) by scale factor 0.544985\nI1208 03:17:31.483029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51195 > 2) by scale factor 0.569484\nI1208 03:17:35.674015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00297 > 2) by scale factor 0.666006\nI1208 03:17:39.866233  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42637 > 2) by scale factor 0.583708\nI1208 03:17:44.057555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61669 > 2) by scale factor 0.764324\nI1208 03:17:48.249374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12278 > 2) by scale factor 0.640455\nI1208 03:17:52.441244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65141 > 2) by scale factor 0.547734\nI1208 03:17:56.632977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04224 > 2) by scale factor 0.657411\nI1208 03:18:00.824281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95263 > 2) by scale factor 0.677363\nI1208 03:18:05.015985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65448 > 2) by scale factor 0.753442\nI1208 03:18:09.207068  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34831 > 2) by scale factor 0.851677\nI1208 03:18:13.397117  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94141 > 2) by scale factor 0.507433\nI1208 03:18:17.588110  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35873 > 2) by scale factor 0.847915\nI1208 03:18:21.779500  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42758 > 2) by scale factor 0.583503\nI1208 03:18:25.971001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45987 > 2) by scale factor 0.813051\nI1208 03:18:30.161710  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46525 > 2) by scale factor 0.811275\nI1208 03:18:34.353109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6124 > 2) by scale factor 0.553648\nI1208 03:18:38.544870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99133 > 2) by scale factor 0.6686\nI1208 03:18:42.735625  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30262 > 2) by scale factor 0.464833\nI1208 03:18:46.926081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24462 > 2) by scale factor 0.471184\nI1208 03:18:51.117439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8549 > 2) by scale factor 0.70055\nI1208 03:18:55.308590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96994 > 2) by scale factor 0.503786\nI1208 03:18:59.500155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3049 > 2) by scale factor 0.605163\nI1208 03:19:03.691015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74859 > 2) by scale factor 0.533535\nI1208 03:19:07.882187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22059 > 2) by scale factor 0.90066\nI1208 03:19:12.073428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.02562 > 2) by scale factor 0.987351\nI1208 03:19:16.265408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72344 > 2) by scale factor 0.734365\nI1208 03:19:20.456348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29655 > 2) by scale factor 0.606695\nI1208 03:19:24.647346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12094 > 2) by scale factor 0.640833\nI1208 03:19:28.837205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33114 > 2) by scale factor 0.600396\nI1208 03:19:33.027797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38759 > 2) by scale factor 0.455831\nI1208 03:19:37.218814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43719 > 2) by scale factor 0.581871\nI1208 03:19:41.409437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15183 > 2) by scale factor 0.634551\nI1208 03:19:45.600049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8886 > 2) by scale factor 0.514323\nI1208 03:19:49.791031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88599 > 2) by scale factor 0.693002\nI1208 03:19:53.982056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50949 > 2) by scale factor 0.796975\nI1208 03:19:58.171870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62719 > 2) by scale factor 0.551391\nI1208 03:20:02.363416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91462 > 2) by scale factor 0.510905\nI1208 03:20:06.554909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90526 > 2) by scale factor 0.51213\nI1208 03:20:10.745641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76181 > 2) by scale factor 0.724164\nI1208 03:20:14.936756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02182 > 2) by scale factor 0.661853\nI1208 03:20:19.127899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08945 > 2) by scale factor 0.647365\nI1208 03:20:23.319321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13362 > 2) by scale factor 0.937374\nI1208 03:20:27.510249  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27698 > 2) by scale factor 0.610318\nI1208 03:20:31.700829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44036 > 2) by scale factor 0.81955\nI1208 03:20:35.891901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62806 > 2) by scale factor 0.761016\nI1208 03:20:40.082116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36729 > 2) by scale factor 0.45795\nI1208 03:20:44.274366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19608 > 2) by scale factor 0.625766\nI1208 03:20:48.465888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64366 > 2) by scale factor 0.756528\nI1208 03:20:52.656211  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09491 > 2) by scale factor 0.646223\nI1208 03:20:56.847899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68268 > 2) by scale factor 0.745522\nI1208 03:21:01.038878  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31427 > 2) by scale factor 0.603452\nI1208 03:21:05.229157  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37403 > 2) by scale factor 0.592763\nI1208 03:21:09.419136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94554 > 2) by scale factor 0.678992\nI1208 03:21:13.609437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89 > 2) by scale factor 0.692042\nI1208 03:21:17.801183  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71647 > 2) by scale factor 0.538145\nI1208 03:21:21.992735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18554 > 2) by scale factor 0.627837\nI1208 03:21:26.182942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20612 > 2) by scale factor 0.623807\nI1208 03:21:30.373522  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35957 > 2) by scale factor 0.595315\nI1208 03:21:34.564415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97562 > 2) by scale factor 0.672129\nI1208 03:21:38.756325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34554 > 2) by scale factor 0.460242\nI1208 03:21:42.947058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04714 > 2) by scale factor 0.656353\nI1208 03:21:47.137998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01466 > 2) by scale factor 0.663426\nI1208 03:21:51.328977  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23737 > 2) by scale factor 0.617786\nI1208 03:21:55.520087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4651 > 2) by scale factor 0.577184\nI1208 03:21:59.710572  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02798 > 2) by scale factor 0.496527\nI1208 03:22:03.901840  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66931 > 2) by scale factor 0.749256\nI1208 03:22:08.092944  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66714 > 2) by scale factor 0.428528\nI1208 03:22:12.283792  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25923 > 2) by scale factor 0.613641\nI1208 03:22:16.474925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98343 > 2) by scale factor 0.40133\nI1208 03:22:20.666281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57092 > 2) by scale factor 0.560079\nI1208 03:22:24.856525  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49236 > 2) by scale factor 0.802453\nI1208 03:22:29.048218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4327 > 2) by scale factor 0.822131\nI1208 03:22:33.238631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60103 > 2) by scale factor 0.768927\nI1208 03:22:37.428973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35245 > 2) by scale factor 0.596578\nI1208 03:22:41.619288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98844 > 2) by scale factor 0.669246\nI1208 03:22:45.809518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17865 > 2) by scale factor 0.629198\nI1208 03:22:49.999614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42698 > 2) by scale factor 0.824069\nI1208 03:22:54.191088  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30757 > 2) by scale factor 0.604673\nI1208 03:22:58.382221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32202 > 2) by scale factor 0.861319\nI1208 03:23:02.573443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80948 > 2) by scale factor 0.711875\nI1208 03:23:06.763608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89639 > 2) by scale factor 0.690515\nI1208 03:23:10.954216  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68671 > 2) by scale factor 0.54249\nI1208 03:23:15.145220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16624 > 2) by scale factor 0.480049\nI1208 03:23:15.157162  1922 solver.cpp:337] Iteration 13600, Testing net (#0)\nI1208 03:25:53.540671  1922 solver.cpp:404]     Test net output #0: accuracy = 0.160353\nI1208 03:25:53.541157  1922 solver.cpp:404]     Test net output #1: loss = 18.1819 (* 1 = 18.1819 loss)\nI1208 03:25:57.481637  1922 solver.cpp:228] Iteration 13600, loss = 19.179\nI1208 03:25:57.481681  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1208 03:25:57.481730  1922 solver.cpp:244]     Train net output #1: loss = 19.179 (* 1 = 19.179 loss)\nI1208 03:25:57.719594  1922 sgd_solver.cpp:166] Iteration 13600, lr = 2.04\nI1208 03:25:57.729755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96257 > 2) by scale factor 0.504722\nI1208 03:26:01.921268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31417 > 2) by scale factor 0.603469\nI1208 03:26:06.112198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8196 > 2) by scale factor 0.523616\nI1208 03:26:10.302722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97573 > 2) by scale factor 0.672105\nI1208 03:26:14.492998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10144 > 2) by scale factor 0.644862\nI1208 03:26:18.683363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91669 > 2) by scale factor 0.685708\nI1208 03:26:22.874219  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77803 > 2) by scale factor 0.719935\nI1208 03:26:27.063091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.749 > 2) by scale factor 0.533476\nI1208 03:26:31.254583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19768 > 2) by scale factor 0.625454\nI1208 03:26:35.444352  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98522 > 2) by scale factor 0.669966\nI1208 03:26:39.634402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78357 > 2) by scale factor 0.528601\nI1208 03:26:43.824626  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22522 > 2) by scale factor 0.620112\nI1208 03:26:48.015799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87725 > 2) by scale factor 0.51583\nI1208 03:26:52.205552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33555 > 2) by scale factor 0.461303\nI1208 03:26:56.396088  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22945 > 2) by scale factor 0.897084\nI1208 03:27:00.586302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18633 > 2) by scale factor 0.627681\nI1208 03:27:04.777261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70005 > 2) by scale factor 0.740728\nI1208 03:27:08.968091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56164 > 2) by scale factor 0.438439\nI1208 03:27:13.158102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66945 > 2) by scale factor 0.545041\nI1208 03:27:17.347867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51062 > 2) by scale factor 0.5697\nI1208 03:27:21.537885  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4294 > 2) by scale factor 0.823249\nI1208 03:27:25.727237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.2111 > 2) by scale factor 0.904527\nI1208 03:27:29.916985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10998 > 2) by scale factor 0.643091\nI1208 03:27:34.107465  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41637 > 2) by scale factor 0.585416\nI1208 03:27:38.297794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9557 > 2) by scale factor 0.676659\nI1208 03:27:42.488018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78928 > 2) by scale factor 0.717031\nI1208 03:27:46.677521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56336 > 2) by scale factor 0.561268\nI1208 03:27:50.868100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00929 > 2) by scale factor 0.664607\nI1208 03:27:55.057173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36947 > 2) by scale factor 0.844069\nI1208 03:27:59.246757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04898 > 2) by scale factor 0.976096\nI1208 03:28:03.437341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79196 > 2) by scale factor 0.716344\nI1208 03:28:07.628273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54678 > 2) by scale factor 0.785304\nI1208 03:28:11.818614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56559 > 2) by scale factor 0.779547\nI1208 03:28:16.008687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83309 > 2) by scale factor 0.705942\nI1208 03:28:20.199347  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42341 > 2) by scale factor 0.584212\nI1208 03:28:24.389178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84572 > 2) by scale factor 0.520059\nI1208 03:28:28.578214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41784 > 2) by scale factor 0.827183\nI1208 03:28:32.767873  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04909 > 2) by scale factor 0.655934\nI1208 03:28:36.957768  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04485 > 2) by scale factor 0.494456\nI1208 03:28:41.147438  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09275 > 2) by scale factor 0.955682\nI1208 03:28:45.337218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01219 > 2) by scale factor 0.498481\nI1208 03:28:49.526473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25588 > 2) by scale factor 0.614273\nI1208 03:28:53.716454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73485 > 2) by scale factor 0.731302\nI1208 03:28:57.905647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4503 > 2) by scale factor 0.816227\nI1208 03:29:02.096345  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93202 > 2) by scale factor 0.508644\nI1208 03:29:06.287096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0784 > 2) by scale factor 0.649688\nI1208 03:29:10.477560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13789 > 2) by scale factor 0.63737\nI1208 03:29:14.666914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99636 > 2) by scale factor 0.500456\nI1208 03:29:18.856463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5248 > 2) by scale factor 0.442008\nI1208 03:29:23.045613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38428 > 2) by scale factor 0.838829\nI1208 03:29:27.235867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47081 > 2) by scale factor 0.576234\nI1208 03:29:31.425583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54121 > 2) by scale factor 0.440411\nI1208 03:29:35.614755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34041 > 2) by scale factor 0.598728\nI1208 03:29:39.804891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6475 > 2) by scale factor 0.548321\nI1208 03:29:43.993134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05446 > 2) by scale factor 0.65478\nI1208 03:29:48.182643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15578 > 2) by scale factor 0.633757\nI1208 03:29:52.371732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07037 > 2) by scale factor 0.491356\nI1208 03:29:56.561378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86496 > 2) by scale factor 0.51747\nI1208 03:30:00.751097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0458 > 2) by scale factor 0.656642\nI1208 03:30:04.940578  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07752 > 2) by scale factor 0.490494\nI1208 03:30:09.129570  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32018 > 2) by scale factor 0.602377\nI1208 03:30:13.319375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08773 > 2) by scale factor 0.647725\nI1208 03:30:17.509176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45121 > 2) by scale factor 0.579507\nI1208 03:30:25.886039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97937 > 2) by scale factor 0.502593\nI1208 03:30:30.076180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41255 > 2) by scale factor 0.828999\nI1208 03:30:34.266417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97401 > 2) by scale factor 0.50327\nI1208 03:30:38.456372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01903 > 2) by scale factor 0.662464\nI1208 03:30:42.644994  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76725 > 2) by scale factor 0.530892\nI1208 03:30:46.833148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35829 > 2) by scale factor 0.595542\nI1208 03:30:51.023619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94447 > 2) by scale factor 0.679239\nI1208 03:30:55.212013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57721 > 2) by scale factor 0.559096\nI1208 03:30:59.402462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66319 > 2) by scale factor 0.545972\nI1208 03:31:03.593035  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0978 > 2) by scale factor 0.953378\nI1208 03:31:07.782662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27458 > 2) by scale factor 0.879282\nI1208 03:31:11.971654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57829 > 2) by scale factor 0.558927\nI1208 03:31:16.161062  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1308 > 2) by scale factor 0.638815\nI1208 03:31:20.350924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92345 > 2) by scale factor 0.509755\nI1208 03:31:24.539986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85545 > 2) by scale factor 0.518746\nI1208 03:31:28.729162  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8039 > 2) by scale factor 0.525776\nI1208 03:31:32.919760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.6077 > 2) by scale factor 0.356653\nI1208 03:31:37.109855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17235 > 2) by scale factor 0.479346\nI1208 03:31:41.298712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55569 > 2) by scale factor 0.562479\nI1208 03:31:45.488137  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58182 > 2) by scale factor 0.558376\nI1208 03:31:49.678297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27313 > 2) by scale factor 0.611035\nI1208 03:31:53.868232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96292 > 2) by scale factor 0.67501\nI1208 03:31:58.058109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31741 > 2) by scale factor 0.602879\nI1208 03:32:02.247310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45729 > 2) by scale factor 0.448703\nI1208 03:32:06.437285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52738 > 2) by scale factor 0.566993\nI1208 03:32:10.627670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85965 > 2) by scale factor 0.699387\nI1208 03:32:14.818184  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20549 > 2) by scale factor 0.475569\nI1208 03:32:19.008078  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18621 > 2) by scale factor 0.627704\nI1208 03:32:23.198660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36428 > 2) by scale factor 0.458266\nI1208 03:32:27.387789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44761 > 2) by scale factor 0.817124\nI1208 03:32:31.576648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89539 > 2) by scale factor 0.513427\nI1208 03:32:35.766067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01907 > 2) by scale factor 0.662457\nI1208 03:32:39.957988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92554 > 2) by scale factor 0.683636\nI1208 03:32:44.149721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72611 > 2) by scale factor 0.536753\nI1208 03:32:48.338848  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91429 > 2) by scale factor 0.510949\nI1208 03:32:52.528127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17384 > 2) by scale factor 0.630152\nI1208 03:32:52.540040  1922 solver.cpp:337] Iteration 13700, Testing net (#0)\nI1208 03:35:30.937775  1922 solver.cpp:404]     Test net output #0: accuracy = 0.196235\nI1208 03:35:30.938273  1922 solver.cpp:404]     Test net output #1: loss = 14.2253 (* 1 = 14.2253 loss)\nI1208 03:35:34.876489  1922 solver.cpp:228] Iteration 13700, loss = 17.6117\nI1208 03:35:34.876533  1922 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1208 03:35:34.876559  1922 solver.cpp:244]     Train net output #1: loss = 17.6117 (* 1 = 17.6117 loss)\nI1208 03:35:35.116153  1922 sgd_solver.cpp:166] Iteration 13700, lr = 2.055\nI1208 03:35:35.126317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94404 > 2) by scale factor 0.679339\nI1208 03:35:39.316300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84668 > 2) by scale factor 0.702573\nI1208 03:35:43.506613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36031 > 2) by scale factor 0.595183\nI1208 03:35:47.695924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12824 > 2) by scale factor 0.939742\nI1208 03:35:51.886242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77897 > 2) by scale factor 0.71969\nI1208 03:35:56.076794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15662 > 2) by scale factor 0.633588\nI1208 03:36:00.264935  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31368 > 2) by scale factor 0.864425\nI1208 03:36:04.454371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99975 > 2) by scale factor 0.666722\nI1208 03:36:08.643728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12642 > 2) by scale factor 0.940548\nI1208 03:36:12.834673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99269 > 2) by scale factor 0.668295\nI1208 03:36:17.023464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2048 > 2) by scale factor 0.624064\nI1208 03:36:21.213325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80886 > 2) by scale factor 0.712033\nI1208 03:36:25.402776  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01703 > 2) by scale factor 0.49788\nI1208 03:36:29.592214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09725 > 2) by scale factor 0.645734\nI1208 03:36:33.781703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89303 > 2) by scale factor 0.691316\nI1208 03:36:37.971276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93537 > 2) by scale factor 0.508212\nI1208 03:36:42.160307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2122 > 2) by scale factor 0.622627\nI1208 03:36:46.349521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48818 > 2) by scale factor 0.803799\nI1208 03:36:50.538836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37169 > 2) by scale factor 0.84328\nI1208 03:36:54.729171  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76363 > 2) by scale factor 0.531402\nI1208 03:36:58.918601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40339 > 2) by scale factor 0.587649\nI1208 03:37:03.107689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09482 > 2) by scale factor 0.646242\nI1208 03:37:07.296816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0881 > 2) by scale factor 0.489224\nI1208 03:37:11.486173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53999 > 2) by scale factor 0.787404\nI1208 03:37:15.677116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60757 > 2) by scale factor 0.766998\nI1208 03:37:19.865736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42888 > 2) by scale factor 0.583281\nI1208 03:37:24.055042  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06517 > 2) by scale factor 0.652492\nI1208 03:37:28.244411  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52211 > 2) by scale factor 0.567841\nI1208 03:37:32.432852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47005 > 2) by scale factor 0.809702\nI1208 03:37:36.622215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15304 > 2) by scale factor 0.481575\nI1208 03:37:40.811154  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52981 > 2) by scale factor 0.566602\nI1208 03:37:45.000172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7943 > 2) by scale factor 0.527106\nI1208 03:37:49.189723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85933 > 2) by scale factor 0.699464\nI1208 03:37:53.377916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41345 > 2) by scale factor 0.585918\nI1208 03:37:57.566968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79363 > 2) by scale factor 0.5272\nI1208 03:38:01.756180  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83782 > 2) by scale factor 0.521129\nI1208 03:38:05.946264  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83549 > 2) by scale factor 0.521446\nI1208 03:38:10.135592  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25774 > 2) by scale factor 0.469732\nI1208 03:38:14.324404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47663 > 2) by scale factor 0.80755\nI1208 03:38:18.513422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86735 > 2) by scale factor 0.697509\nI1208 03:38:22.704025  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1277 > 2) by scale factor 0.484531\nI1208 03:38:26.892983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21603 > 2) by scale factor 0.902515\nI1208 03:38:31.081393  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30741 > 2) by scale factor 0.866771\nI1208 03:38:35.270608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6465 > 2) by scale factor 0.755716\nI1208 03:38:39.460074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47351 > 2) by scale factor 0.447077\nI1208 03:38:43.649523  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42294 > 2) by scale factor 0.584293\nI1208 03:38:47.837640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89844 > 2) by scale factor 0.513026\nI1208 03:38:52.027025  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3755 > 2) by scale factor 0.592505\nI1208 03:38:56.216756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79468 > 2) by scale factor 0.527054\nI1208 03:39:00.406107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92433 > 2) by scale factor 0.683917\nI1208 03:39:04.595252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86383 > 2) by scale factor 0.517621\nI1208 03:39:08.783562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86792 > 2) by scale factor 0.517074\nI1208 03:39:12.973253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93129 > 2) by scale factor 0.508739\nI1208 03:39:17.162611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81623 > 2) by scale factor 0.710168\nI1208 03:39:21.351083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69487 > 2) by scale factor 0.541291\nI1208 03:39:25.539786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02282 > 2) by scale factor 0.661634\nI1208 03:39:29.729657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40597 > 2) by scale factor 0.587205\nI1208 03:39:33.918745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20637 > 2) by scale factor 0.623759\nI1208 03:39:38.107333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43851 > 2) by scale factor 0.450601\nI1208 03:39:42.295743  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19483 > 2) by scale factor 0.626011\nI1208 03:39:46.484673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47404 > 2) by scale factor 0.808393\nI1208 03:39:50.672518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72647 > 2) by scale factor 0.73355\nI1208 03:39:54.862392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6387 > 2) by scale factor 0.757948\nI1208 03:39:59.051901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88758 > 2) by scale factor 0.514459\nI1208 03:40:03.240587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40569 > 2) by scale factor 0.831364\nI1208 03:40:07.429291  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26129 > 2) by scale factor 0.469342\nI1208 03:40:11.619382  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49146 > 2) by scale factor 0.802742\nI1208 03:40:15.809373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48673 > 2) by scale factor 0.804271\nI1208 03:40:19.997563  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30847 > 2) by scale factor 0.604509\nI1208 03:40:24.186558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0886 > 2) by scale factor 0.489165\nI1208 03:40:28.375679  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57867 > 2) by scale factor 0.775593\nI1208 03:40:32.564270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66242 > 2) by scale factor 0.751196\nI1208 03:40:36.753206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47036 > 2) by scale factor 0.447391\nI1208 03:40:40.941674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73292 > 2) by scale factor 0.535774\nI1208 03:40:45.130918  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36896 > 2) by scale factor 0.593655\nI1208 03:40:49.323637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84071 > 2) by scale factor 0.520737\nI1208 03:40:53.513965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81494 > 2) by scale factor 0.710494\nI1208 03:40:57.704921  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77454 > 2) by scale factor 0.529866\nI1208 03:41:01.895742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45827 > 2) by scale factor 0.448604\nI1208 03:41:06.086033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46223 > 2) by scale factor 0.577662\nI1208 03:41:10.274152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47871 > 2) by scale factor 0.446558\nI1208 03:41:14.462733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5147 > 2) by scale factor 0.795325\nI1208 03:41:18.651440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19683 > 2) by scale factor 0.625619\nI1208 03:41:22.840670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13347 > 2) by scale factor 0.483855\nI1208 03:41:27.028995  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16563 > 2) by scale factor 0.92352\nI1208 03:41:31.217228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20774 > 2) by scale factor 0.475314\nI1208 03:41:35.406616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40383 > 2) by scale factor 0.45415\nI1208 03:41:39.595818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45125 > 2) by scale factor 0.449312\nI1208 03:41:43.784857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95677 > 2) by scale factor 0.505463\nI1208 03:41:47.974561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09175 > 2) by scale factor 0.646883\nI1208 03:41:52.163416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04165 > 2) by scale factor 0.657539\nI1208 03:41:56.351505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6597 > 2) by scale factor 0.429212\nI1208 03:42:00.538604  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99803 > 2) by scale factor 0.500246\nI1208 03:42:04.727377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25457 > 2) by scale factor 0.470083\nI1208 03:42:08.910745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67473 > 2) by scale factor 0.747739\nI1208 03:42:13.078688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17287 > 2) by scale factor 0.630343\nI1208 03:42:17.246347  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85468 > 2) by scale factor 0.411973\nI1208 03:42:21.413185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99772 > 2) by scale factor 0.500286\nI1208 03:42:25.579744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51657 > 2) by scale factor 0.442814\nI1208 03:42:29.747158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74092 > 2) by scale factor 0.534627\nI1208 03:42:29.759210  1922 solver.cpp:337] Iteration 13800, Testing net (#0)\nI1208 03:45:08.176884  1922 solver.cpp:404]     Test net output #0: accuracy = 0.128941\nI1208 03:45:08.177366  1922 solver.cpp:404]     Test net output #1: loss = 12.4044 (* 1 = 12.4044 loss)\nI1208 03:45:12.115023  1922 solver.cpp:228] Iteration 13800, loss = 12.3944\nI1208 03:45:12.115068  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1208 03:45:12.115093  1922 solver.cpp:244]     Train net output #1: loss = 12.3944 (* 1 = 12.3944 loss)\nI1208 03:45:12.336617  1922 sgd_solver.cpp:166] Iteration 13800, lr = 2.07\nI1208 03:45:12.346688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68066 > 2) by scale factor 0.42729\nI1208 03:45:16.516860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27736 > 2) by scale factor 0.878208\nI1208 03:45:20.688127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40698 > 2) by scale factor 0.453826\nI1208 03:45:24.858096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51592 > 2) by scale factor 0.568841\nI1208 03:45:29.028115  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75574 > 2) by scale factor 0.532519\nI1208 03:45:33.198092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83371 > 2) by scale factor 0.705788\nI1208 03:45:37.368176  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02523 > 2) by scale factor 0.397992\nI1208 03:45:41.538143  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42241 > 2) by scale factor 0.825623\nI1208 03:45:45.708139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79275 > 2) by scale factor 0.527322\nI1208 03:45:49.878250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5393 > 2) by scale factor 0.787617\nI1208 03:45:54.047255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33772 > 2) by scale factor 0.599212\nI1208 03:45:58.216667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91104 > 2) by scale factor 0.687039\nI1208 03:46:02.386044  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79936 > 2) by scale factor 0.714449\nI1208 03:46:06.556870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17034 > 2) by scale factor 0.630848\nI1208 03:46:10.726788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5266 > 2) by scale factor 0.567118\nI1208 03:46:14.896643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.1608 > 2) by scale factor 0.387537\nI1208 03:46:19.066375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81236 > 2) by scale factor 0.415597\nI1208 03:46:23.236287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00074 > 2) by scale factor 0.499908\nI1208 03:46:27.405558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46232 > 2) by scale factor 0.577648\nI1208 03:46:31.576247  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20569 > 2) by scale factor 0.906748\nI1208 03:46:39.912019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00529 > 2) by scale factor 0.499339\nI1208 03:46:44.081548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37436 > 2) by scale factor 0.592705\nI1208 03:46:48.251292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65521 > 2) by scale factor 0.547164\nI1208 03:46:52.420367  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82499 > 2) by scale factor 0.522877\nI1208 03:46:56.591186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80862 > 2) by scale factor 0.525125\nI1208 03:47:00.761870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11602 > 2) by scale factor 0.641844\nI1208 03:47:04.932013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62255 > 2) by scale factor 0.762616\nI1208 03:47:09.101606  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6364 > 2) by scale factor 0.549995\nI1208 03:47:13.272027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88722 > 2) by scale factor 0.692708\nI1208 03:47:17.441735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05817 > 2) by scale factor 0.653985\nI1208 03:47:21.612399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54462 > 2) by scale factor 0.785973\nI1208 03:47:25.781793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3276 > 2) by scale factor 0.601034\nI1208 03:47:29.951727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9914 > 2) by scale factor 0.501077\nI1208 03:47:34.122292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93138 > 2) by scale factor 0.682271\nI1208 03:47:38.291834  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80711 > 2) by scale factor 0.712476\nI1208 03:47:42.462172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9621 > 2) by scale factor 0.675196\nI1208 03:47:46.631638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60791 > 2) by scale factor 0.766898\nI1208 03:47:50.801497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83699 > 2) by scale factor 0.704973\nI1208 03:47:54.971240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17344 > 2) by scale factor 0.630231\nI1208 03:47:59.141261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60417 > 2) by scale factor 0.767998\nI1208 03:48:03.310868  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97995 > 2) by scale factor 0.502519\nI1208 03:48:07.480423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41592 > 2) by scale factor 0.585493\nI1208 03:48:11.650861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13457 > 2) by scale factor 0.638046\nI1208 03:48:15.820148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85877 > 2) by scale factor 0.699601\nI1208 03:48:19.991735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98977 > 2) by scale factor 0.668947\nI1208 03:48:24.163105  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33683 > 2) by scale factor 0.85586\nI1208 03:48:28.333119  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24165 > 2) by scale factor 0.471514\nI1208 03:48:32.502705  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93659 > 2) by scale factor 0.681063\nI1208 03:48:36.672330  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48422 > 2) by scale factor 0.805082\nI1208 03:48:40.841915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45252 > 2) by scale factor 0.815489\nI1208 03:48:45.012037  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66664 > 2) by scale factor 0.750007\nI1208 03:48:49.181751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69275 > 2) by scale factor 0.541602\nI1208 03:48:53.352059  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20845 > 2) by scale factor 0.475234\nI1208 03:48:57.523048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32702 > 2) by scale factor 0.859468\nI1208 03:49:01.693431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15868 > 2) by scale factor 0.480922\nI1208 03:49:05.863636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61372 > 2) by scale factor 0.553446\nI1208 03:49:10.032644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43262 > 2) by scale factor 0.582645\nI1208 03:49:14.201706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24812 > 2) by scale factor 0.889631\nI1208 03:49:18.370803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90689 > 2) by scale factor 0.68802\nI1208 03:49:22.539609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83274 > 2) by scale factor 0.52182\nI1208 03:49:26.709630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54961 > 2) by scale factor 0.784434\nI1208 03:49:30.879258  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82616 > 2) by scale factor 0.707673\nI1208 03:49:35.048558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38 > 2) by scale factor 0.840337\nI1208 03:49:39.218457  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12854 > 2) by scale factor 0.939613\nI1208 03:49:43.389066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98242 > 2) by scale factor 0.670597\nI1208 03:49:47.558521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02281 > 2) by scale factor 0.497164\nI1208 03:49:51.727936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57737 > 2) by scale factor 0.559071\nI1208 03:49:55.897428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5257 > 2) by scale factor 0.44192\nI1208 03:50:00.067091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70787 > 2) by scale factor 0.738589\nI1208 03:50:04.236232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36098 > 2) by scale factor 0.595064\nI1208 03:50:08.405891  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62148 > 2) by scale factor 0.762928\nI1208 03:50:12.575155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86806 > 2) by scale factor 0.697336\nI1208 03:50:16.745045  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02504 > 2) by scale factor 0.661148\nI1208 03:50:20.914541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60753 > 2) by scale factor 0.554395\nI1208 03:50:25.084974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3093 > 2) by scale factor 0.866064\nI1208 03:50:29.255177  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26014 > 2) by scale factor 0.61347\nI1208 03:50:33.423370  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47691 > 2) by scale factor 0.575224\nI1208 03:50:37.593483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43136 > 2) by scale factor 0.582859\nI1208 03:50:41.762506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57949 > 2) by scale factor 0.775347\nI1208 03:50:45.931838  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60176 > 2) by scale factor 0.555284\nI1208 03:50:50.100735  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4704 > 2) by scale factor 0.809584\nI1208 03:50:54.270350  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91408 > 2) by scale factor 0.510976\nI1208 03:50:58.438421  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95567 > 2) by scale factor 0.505603\nI1208 03:51:02.607246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14449 > 2) by scale factor 0.932623\nI1208 03:51:06.776825  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53227 > 2) by scale factor 0.789805\nI1208 03:51:10.946337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94742 > 2) by scale factor 0.67856\nI1208 03:51:15.117235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65207 > 2) by scale factor 0.547635\nI1208 03:51:19.287037  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10207 > 2) by scale factor 0.644731\nI1208 03:51:23.455355  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5738 > 2) by scale factor 0.559629\nI1208 03:51:27.624413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83362 > 2) by scale factor 0.70581\nI1208 03:51:31.793620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49096 > 2) by scale factor 0.572908\nI1208 03:51:35.962924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5143 > 2) by scale factor 0.569104\nI1208 03:51:40.131049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01049 > 2) by scale factor 0.994784\nI1208 03:51:44.299034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02284 > 2) by scale factor 0.661629\nI1208 03:51:48.468516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53787 > 2) by scale factor 0.788062\nI1208 03:51:52.638120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35523 > 2) by scale factor 0.849174\nI1208 03:51:56.807551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15162 > 2) by scale factor 0.634595\nI1208 03:52:00.976807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82591 > 2) by scale factor 0.707737\nI1208 03:52:05.145092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14569 > 2) by scale factor 0.63579\nI1208 03:52:05.157223  1922 solver.cpp:337] Iteration 13900, Testing net (#0)\nI1208 03:54:43.521232  1922 solver.cpp:404]     Test net output #0: accuracy = 0.173471\nI1208 03:54:43.521728  1922 solver.cpp:404]     Test net output #1: loss = 16.1517 (* 1 = 16.1517 loss)\nI1208 03:54:47.460086  1922 solver.cpp:228] Iteration 13900, loss = 15.3188\nI1208 03:54:47.460136  1922 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1208 03:54:47.460162  1922 solver.cpp:244]     Train net output #1: loss = 15.3188 (* 1 = 15.3188 loss)\nI1208 03:54:47.683249  1922 sgd_solver.cpp:166] Iteration 13900, lr = 2.085\nI1208 03:54:47.693377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83819 > 2) by scale factor 0.704675\nI1208 03:54:51.864497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55061 > 2) by scale factor 0.784126\nI1208 03:54:56.036962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58805 > 2) by scale factor 0.557406\nI1208 03:55:00.208253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52083 > 2) by scale factor 0.79339\nI1208 03:55:04.380661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62312 > 2) by scale factor 0.432608\nI1208 03:55:08.552148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06597 > 2) by scale factor 0.652323\nI1208 03:55:12.722832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1816 > 2) by scale factor 0.628614\nI1208 03:55:16.893930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16503 > 2) by scale factor 0.631905\nI1208 03:55:21.066692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05267 > 2) by scale factor 0.655164\nI1208 03:55:25.237453  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3345 > 2) by scale factor 0.856714\nI1208 03:55:29.409672  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94723 > 2) by scale factor 0.506684\nI1208 03:55:33.582036  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15924 > 2) by scale factor 0.480857\nI1208 03:55:37.752920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21757 > 2) by scale factor 0.474206\nI1208 03:55:41.924062  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69801 > 2) by scale factor 0.741286\nI1208 03:55:46.096451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13897 > 2) by scale factor 0.389183\nI1208 03:55:50.266474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51464 > 2) by scale factor 0.569049\nI1208 03:55:54.438189  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37984 > 2) by scale factor 0.456638\nI1208 03:55:58.608464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09568 > 2) by scale factor 0.646062\nI1208 03:56:02.781538  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01581 > 2) by scale factor 0.663171\nI1208 03:56:06.952437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04539 > 2) by scale factor 0.656731\nI1208 03:56:11.124459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24737 > 2) by scale factor 0.615883\nI1208 03:56:15.296078  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37554 > 2) by scale factor 0.592498\nI1208 03:56:19.466009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28291 > 2) by scale factor 0.876076\nI1208 03:56:23.637733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23532 > 2) by scale factor 0.618177\nI1208 03:56:27.808941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57789 > 2) by scale factor 0.775828\nI1208 03:56:31.979760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8028 > 2) by scale factor 0.713571\nI1208 03:56:36.151444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18692 > 2) by scale factor 0.914528\nI1208 03:56:40.322409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21095 > 2) by scale factor 0.622869\nI1208 03:56:44.493206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58133 > 2) by scale factor 0.774794\nI1208 03:56:48.663528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5819 > 2) by scale factor 0.774622\nI1208 03:56:52.834053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32825 > 2) by scale factor 0.859015\nI1208 03:56:57.005404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43386 > 2) by scale factor 0.582435\nI1208 03:57:01.176453  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.86038 > 2) by scale factor 0.41149\nI1208 03:57:05.346771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61212 > 2) by scale factor 0.553692\nI1208 03:57:09.518642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43097 > 2) by scale factor 0.582926\nI1208 03:57:13.689682  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14743 > 2) by scale factor 0.635439\nI1208 03:57:17.859678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76002 > 2) by scale factor 0.531913\nI1208 03:57:22.031031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98332 > 2) by scale factor 0.670395\nI1208 03:57:26.200598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4838 > 2) by scale factor 0.805216\nI1208 03:57:30.370936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55569 > 2) by scale factor 0.562478\nI1208 03:57:34.541657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37633 > 2) by scale factor 0.592359\nI1208 03:57:38.712266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55158 > 2) by scale factor 0.783829\nI1208 03:57:42.882491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80245 > 2) by scale factor 0.525977\nI1208 03:57:47.054656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59452 > 2) by scale factor 0.435302\nI1208 03:57:51.225075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04849 > 2) by scale factor 0.656062\nI1208 03:57:55.395931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39851 > 2) by scale factor 0.588493\nI1208 03:57:59.566174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14057 > 2) by scale factor 0.483026\nI1208 03:58:03.736425  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26099 > 2) by scale factor 0.613311\nI1208 03:58:07.908583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00159 > 2) by scale factor 0.499801\nI1208 03:58:12.080057  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44607 > 2) by scale factor 0.817639\nI1208 03:58:16.250993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11787 > 2) by scale factor 0.944344\nI1208 03:58:24.591495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70888 > 2) by scale factor 0.738312\nI1208 03:58:28.762193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4111 > 2) by scale factor 0.453402\nI1208 03:58:32.933220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85693 > 2) by scale factor 0.700053\nI1208 03:58:37.103893  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36507 > 2) by scale factor 0.458182\nI1208 03:58:41.275080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95483 > 2) by scale factor 0.676859\nI1208 03:58:45.445147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42816 > 2) by scale factor 0.583404\nI1208 03:58:49.616138  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83339 > 2) by scale factor 0.705869\nI1208 03:58:53.786947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61437 > 2) by scale factor 0.765004\nI1208 03:58:57.957084  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4163 > 2) by scale factor 0.827712\nI1208 03:59:02.128690  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4373 > 2) by scale factor 0.581851\nI1208 03:59:06.299101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75824 > 2) by scale factor 0.725101\nI1208 03:59:10.469815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06582 > 2) by scale factor 0.652354\nI1208 03:59:14.642386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0509 > 2) by scale factor 0.655545\nI1208 03:59:18.812757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95863 > 2) by scale factor 0.675988\nI1208 03:59:22.982671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30083 > 2) by scale factor 0.605909\nI1208 03:59:27.152556  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54506 > 2) by scale factor 0.564166\nI1208 03:59:31.323164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68353 > 2) by scale factor 0.542957\nI1208 03:59:35.493489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82818 > 2) by scale factor 0.522442\nI1208 03:59:39.664348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27274 > 2) by scale factor 0.611108\nI1208 03:59:43.835919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2109 > 2) by scale factor 0.622878\nI1208 03:59:48.006973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87471 > 2) by scale factor 0.695723\nI1208 03:59:52.178042  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88667 > 2) by scale factor 0.69284\nI1208 03:59:56.348325  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12048 > 2) by scale factor 0.640928\nI1208 04:00:00.518882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26519 > 2) by scale factor 0.612521\nI1208 04:00:04.690214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36152 > 2) by scale factor 0.594969\nI1208 04:00:08.861425  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73316 > 2) by scale factor 0.535739\nI1208 04:00:13.033099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99737 > 2) by scale factor 0.667252\nI1208 04:00:17.203558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7149 > 2) by scale factor 0.736677\nI1208 04:00:25.543359  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66424 > 2) by scale factor 0.750683\nI1208 04:00:29.714787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46659 > 2) by scale factor 0.576936\nI1208 04:00:33.885973  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66889 > 2) by scale factor 0.749376\nI1208 04:00:38.057265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6129 > 2) by scale factor 0.765432\nI1208 04:00:42.229638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57064 > 2) by scale factor 0.778015\nI1208 04:00:46.400173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84645 > 2) by scale factor 0.51996\nI1208 04:00:50.570914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02094 > 2) by scale factor 0.497396\nI1208 04:00:54.741384  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1036 > 2) by scale factor 0.487377\nI1208 04:00:58.912487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33845 > 2) by scale factor 0.460994\nI1208 04:01:03.083287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76838 > 2) by scale factor 0.722445\nI1208 04:01:07.254097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67151 > 2) by scale factor 0.74864\nI1208 04:01:11.425642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48755 > 2) by scale factor 0.804003\nI1208 04:01:15.598114  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47261 > 2) by scale factor 0.808861\nI1208 04:01:19.768561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01596 > 2) by scale factor 0.992085\nI1208 04:01:23.937881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78465 > 2) by scale factor 0.718224\nI1208 04:01:28.108300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98221 > 2) by scale factor 0.670645\nI1208 04:01:32.278786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97523 > 2) by scale factor 0.672217\nI1208 04:01:36.449087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60425 > 2) by scale factor 0.5549\nI1208 04:01:40.619688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51772 > 2) by scale factor 0.794368\nI1208 04:01:40.631542  1922 solver.cpp:337] Iteration 14000, Testing net (#0)\nI1208 04:04:18.985471  1922 solver.cpp:404]     Test net output #0: accuracy = 0.175353\nI1208 04:04:18.985971  1922 solver.cpp:404]     Test net output #1: loss = 10.6036 (* 1 = 10.6036 loss)\nI1208 04:04:22.924273  1922 solver.cpp:228] Iteration 14000, loss = 10.759\nI1208 04:04:22.924319  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1208 04:04:22.924343  1922 solver.cpp:244]     Train net output #1: loss = 10.759 (* 1 = 10.759 loss)\nI1208 04:04:23.141867  1922 sgd_solver.cpp:166] Iteration 14000, lr = 2.1\nI1208 04:04:23.151976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51634 > 2) by scale factor 0.794805\nI1208 04:04:27.319638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39033 > 2) by scale factor 0.589913\nI1208 04:04:31.486482  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26301 > 2) by scale factor 0.612931\nI1208 04:04:35.652952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8724 > 2) by scale factor 0.696282\nI1208 04:04:39.819201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63979 > 2) by scale factor 0.431054\nI1208 04:04:43.986089  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53973 > 2) by scale factor 0.440555\nI1208 04:04:48.154114  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97466 > 2) by scale factor 0.672345\nI1208 04:04:52.321292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74341 > 2) by scale factor 0.421638\nI1208 04:04:56.489958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60597 > 2) by scale factor 0.554635\nI1208 04:05:00.656376  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09301 > 2) by scale factor 0.955563\nI1208 04:05:04.823474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67051 > 2) by scale factor 0.544883\nI1208 04:05:08.989984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71028 > 2) by scale factor 0.539043\nI1208 04:05:13.157771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89939 > 2) by scale factor 0.5129\nI1208 04:05:17.325237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25121 > 2) by scale factor 0.615155\nI1208 04:05:21.492074  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40748 > 2) by scale factor 0.586944\nI1208 04:05:25.659446  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19913 > 2) by scale factor 0.62517\nI1208 04:05:29.826499  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70076 > 2) by scale factor 0.740532\nI1208 04:05:33.992727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18857 > 2) by scale factor 0.62724\nI1208 04:05:38.160521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19758 > 2) by scale factor 0.625473\nI1208 04:05:42.328187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47108 > 2) by scale factor 0.447319\nI1208 04:05:46.494967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61752 > 2) by scale factor 0.764083\nI1208 04:05:50.661844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77858 > 2) by scale factor 0.529299\nI1208 04:05:54.828016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15408 > 2) by scale factor 0.928469\nI1208 04:05:58.995024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4314 > 2) by scale factor 0.582853\nI1208 04:06:03.161981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4096 > 2) by scale factor 0.586579\nI1208 04:06:07.328407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40318 > 2) by scale factor 0.832231\nI1208 04:06:11.494916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31554 > 2) by scale factor 0.863729\nI1208 04:06:15.661666  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50093 > 2) by scale factor 0.444353\nI1208 04:06:19.828223  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36363 > 2) by scale factor 0.594596\nI1208 04:06:23.995167  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0174 > 2) by scale factor 0.662822\nI1208 04:06:28.161837  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51955 > 2) by scale factor 0.568255\nI1208 04:06:32.328519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10425 > 2) by scale factor 0.644279\nI1208 04:06:36.496273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63778 > 2) by scale factor 0.431241\nI1208 04:06:40.664204  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68395 > 2) by scale factor 0.42699\nI1208 04:06:44.830116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42877 > 2) by scale factor 0.583299\nI1208 04:06:48.996980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.5339 > 2) by scale factor 0.441122\nI1208 04:06:53.163588  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99098 > 2) by scale factor 0.668678\nI1208 04:06:57.330941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59645 > 2) by scale factor 0.556104\nI1208 04:07:01.497789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53418 > 2) by scale factor 0.565903\nI1208 04:07:05.664093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6305 > 2) by scale factor 0.550889\nI1208 04:07:09.831315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18203 > 2) by scale factor 0.628529\nI1208 04:07:13.997561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97649 > 2) by scale factor 0.502956\nI1208 04:07:18.164618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82396 > 2) by scale factor 0.708225\nI1208 04:07:22.331181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09206 > 2) by scale factor 0.646819\nI1208 04:07:26.497468  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6523 > 2) by scale factor 0.754063\nI1208 04:07:30.664506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60048 > 2) by scale factor 0.555481\nI1208 04:07:34.830627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55995 > 2) by scale factor 0.438602\nI1208 04:07:38.996821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41726 > 2) by scale factor 0.45277\nI1208 04:07:43.163648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12024 > 2) by scale factor 0.485408\nI1208 04:07:47.329727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69722 > 2) by scale factor 0.540947\nI1208 04:07:51.495980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79653 > 2) by scale factor 0.526797\nI1208 04:07:55.662907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01347 > 2) by scale factor 0.663687\nI1208 04:07:59.829088  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68654 > 2) by scale factor 0.744453\nI1208 04:08:03.996145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75543 > 2) by scale factor 0.72584\nI1208 04:08:12.327937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19651 > 2) by scale factor 0.625682\nI1208 04:08:16.493962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38536 > 2) by scale factor 0.838447\nI1208 04:08:20.660094  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9211 > 2) by scale factor 0.510061\nI1208 04:08:24.827270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39567 > 2) by scale factor 0.834841\nI1208 04:08:28.993754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75655 > 2) by scale factor 0.725544\nI1208 04:08:33.162369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14343 > 2) by scale factor 0.636248\nI1208 04:08:37.329588  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55773 > 2) by scale factor 0.781942\nI1208 04:08:41.496201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74782 > 2) by scale factor 0.72785\nI1208 04:08:45.661916  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04386 > 2) by scale factor 0.494577\nI1208 04:08:49.828955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43748 > 2) by scale factor 0.581821\nI1208 04:08:53.995374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70439 > 2) by scale factor 0.739538\nI1208 04:08:58.162415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61412 > 2) by scale factor 0.553385\nI1208 04:09:02.328770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90196 > 2) by scale factor 0.689189\nI1208 04:09:06.493942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34115 > 2) by scale factor 0.85428\nI1208 04:09:10.659884  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62809 > 2) by scale factor 0.432143\nI1208 04:09:14.826405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42907 > 2) by scale factor 0.583248\nI1208 04:09:18.992660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11087 > 2) by scale factor 0.642906\nI1208 04:09:23.159003  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89748 > 2) by scale factor 0.513153\nI1208 04:09:27.325455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67236 > 2) by scale factor 0.544609\nI1208 04:09:31.491240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14789 > 2) by scale factor 0.482173\nI1208 04:09:35.656826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31349 > 2) by scale factor 0.603593\nI1208 04:09:39.823988  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3052 > 2) by scale factor 0.605107\nI1208 04:09:43.991395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25637 > 2) by scale factor 0.469884\nI1208 04:09:48.157687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10425 > 2) by scale factor 0.644277\nI1208 04:09:52.324210  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63421 > 2) by scale factor 0.75924\nI1208 04:09:56.490361  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48914 > 2) by scale factor 0.573206\nI1208 04:10:00.656484  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38424 > 2) by scale factor 0.456179\nI1208 04:10:04.824189  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64255 > 2) by scale factor 0.549066\nI1208 04:10:08.991293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57231 > 2) by scale factor 0.559861\nI1208 04:10:13.159225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9275 > 2) by scale factor 0.50923\nI1208 04:10:17.327208  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71694 > 2) by scale factor 0.736123\nI1208 04:10:21.492732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68199 > 2) by scale factor 0.543184\nI1208 04:10:25.659415  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56893 > 2) by scale factor 0.778536\nI1208 04:10:29.826166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55872 > 2) by scale factor 0.78164\nI1208 04:10:33.993155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78113 > 2) by scale factor 0.719132\nI1208 04:10:38.160213  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46114 > 2) by scale factor 0.812631\nI1208 04:10:46.491819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37641 > 2) by scale factor 0.592345\nI1208 04:10:50.658778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10858 > 2) by scale factor 0.643382\nI1208 04:10:54.825788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24656 > 2) by scale factor 0.616037\nI1208 04:10:58.991550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80939 > 2) by scale factor 0.711899\nI1208 04:11:03.157974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33227 > 2) by scale factor 0.600191\nI1208 04:11:07.324734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56463 > 2) by scale factor 0.561068\nI1208 04:11:11.490631  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97783 > 2) by scale factor 0.502787\nI1208 04:11:15.657161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31378 > 2) by scale factor 0.60354\nI1208 04:11:15.669243  1922 solver.cpp:337] Iteration 14100, Testing net (#0)\nI1208 04:13:54.035300  1922 solver.cpp:404]     Test net output #0: accuracy = 0.196059\nI1208 04:13:54.035825  1922 solver.cpp:404]     Test net output #1: loss = 17.4302 (* 1 = 17.4302 loss)\nI1208 04:13:57.976152  1922 solver.cpp:228] Iteration 14100, loss = 19.0784\nI1208 04:13:57.976202  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1208 04:13:57.976228  1922 solver.cpp:244]     Train net output #1: loss = 19.0784 (* 1 = 19.0784 loss)\nI1208 04:13:58.190398  1922 sgd_solver.cpp:166] Iteration 14100, lr = 2.115\nI1208 04:13:58.200537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98273 > 2) by scale factor 0.670527\nI1208 04:14:02.368551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25565 > 2) by scale factor 0.614317\nI1208 04:14:06.535136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78012 > 2) by scale factor 0.529083\nI1208 04:14:10.702993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35496 > 2) by scale factor 0.84927\nI1208 04:14:14.868937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95345 > 2) by scale factor 0.505887\nI1208 04:14:19.036880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48912 > 2) by scale factor 0.803496\nI1208 04:14:23.203830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84473 > 2) by scale factor 0.703054\nI1208 04:14:27.371232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36364 > 2) by scale factor 0.458333\nI1208 04:14:31.538492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05906 > 2) by scale factor 0.492725\nI1208 04:14:35.705785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63321 > 2) by scale factor 0.75953\nI1208 04:14:39.874019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3998 > 2) by scale factor 0.833404\nI1208 04:14:44.040843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53611 > 2) by scale factor 0.565593\nI1208 04:14:48.208670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66629 > 2) by scale factor 0.54551\nI1208 04:14:52.375910  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9886 > 2) by scale factor 0.50143\nI1208 04:14:56.543038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32934 > 2) by scale factor 0.858612\nI1208 04:15:00.710081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91418 > 2) by scale factor 0.6863\nI1208 04:15:04.878026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7107 > 2) by scale factor 0.737817\nI1208 04:15:09.044603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.635 > 2) by scale factor 0.550206\nI1208 04:15:13.211772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7078 > 2) by scale factor 0.539404\nI1208 04:15:21.543831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91113 > 2) by scale factor 0.511361\nI1208 04:15:25.710477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39003 > 2) by scale factor 0.589965\nI1208 04:15:29.878002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41145 > 2) by scale factor 0.829377\nI1208 04:15:34.044395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19613 > 2) by scale factor 0.47663\nI1208 04:15:42.374363  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1605 > 2) by scale factor 0.632811\nI1208 04:15:46.540099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43282 > 2) by scale factor 0.582612\nI1208 04:15:50.706858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1189 > 2) by scale factor 0.641252\nI1208 04:15:54.874008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77258 > 2) by scale factor 0.721349\nI1208 04:15:59.040102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53022 > 2) by scale factor 0.566537\nI1208 04:16:03.206923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18586 > 2) by scale factor 0.627774\nI1208 04:16:07.373343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89825 > 2) by scale factor 0.690073\nI1208 04:16:11.540679  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.502 > 2) by scale factor 0.571102\nI1208 04:16:15.707424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52218 > 2) by scale factor 0.56783\nI1208 04:16:19.872912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62266 > 2) by scale factor 0.762584\nI1208 04:16:24.040182  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10432 > 2) by scale factor 0.644263\nI1208 04:16:28.207778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16613 > 2) by scale factor 0.923305\nI1208 04:16:32.374218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14447 > 2) by scale factor 0.636038\nI1208 04:16:36.541820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46973 > 2) by scale factor 0.809806\nI1208 04:16:40.708750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09059 > 2) by scale factor 0.647126\nI1208 04:16:44.875535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82777 > 2) by scale factor 0.522497\nI1208 04:16:49.043823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42149 > 2) by scale factor 0.584541\nI1208 04:16:53.210774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99114 > 2) by scale factor 0.668642\nI1208 04:16:57.377053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02193 > 2) by scale factor 0.661828\nI1208 04:17:01.544040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2868 > 2) by scale factor 0.608494\nI1208 04:17:05.710590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07478 > 2) by scale factor 0.490824\nI1208 04:17:09.878041  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15657 > 2) by scale factor 0.481166\nI1208 04:17:14.044795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33343 > 2) by scale factor 0.599983\nI1208 04:17:18.210925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31637 > 2) by scale factor 0.863421\nI1208 04:17:22.377813  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14133 > 2) by scale factor 0.636673\nI1208 04:17:26.544301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59054 > 2) by scale factor 0.772041\nI1208 04:17:30.710911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76822 > 2) by scale factor 0.722485\nI1208 04:17:34.877858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29412 > 2) by scale factor 0.607142\nI1208 04:17:39.029709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51164 > 2) by scale factor 0.796292\nI1208 04:17:43.181661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17961 > 2) by scale factor 0.629008\nI1208 04:17:47.333945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12368 > 2) by scale factor 0.485004\nI1208 04:17:51.486133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55636 > 2) by scale factor 0.782363\nI1208 04:17:55.637120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15883 > 2) by scale factor 0.633146\nI1208 04:17:59.789327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70156 > 2) by scale factor 0.540312\nI1208 04:18:03.941260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55624 > 2) by scale factor 0.438959\nI1208 04:18:08.092850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36498 > 2) by scale factor 0.594357\nI1208 04:18:12.244932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59149 > 2) by scale factor 0.556872\nI1208 04:18:16.396129  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7142 > 2) by scale factor 0.42425\nI1208 04:18:20.547029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65848 > 2) by scale factor 0.546676\nI1208 04:18:24.699512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39054 > 2) by scale factor 0.455525\nI1208 04:18:28.850589  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99267 > 2) by scale factor 0.500918\nI1208 04:18:33.002727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00501 > 2) by scale factor 0.499374\nI1208 04:18:37.153442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64794 > 2) by scale factor 0.430299\nI1208 04:18:45.453389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70602 > 2) by scale factor 0.739093\nI1208 04:18:49.604907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59452 > 2) by scale factor 0.556403\nI1208 04:18:53.756207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59015 > 2) by scale factor 0.772156\nI1208 04:18:57.908010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07838 > 2) by scale factor 0.490391\nI1208 04:19:02.058101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57764 > 2) by scale factor 0.436906\nI1208 04:19:06.209107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77403 > 2) by scale factor 0.418933\nI1208 04:19:10.360273  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58183 > 2) by scale factor 0.436507\nI1208 04:19:14.511507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52184 > 2) by scale factor 0.567885\nI1208 04:19:18.662374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52354 > 2) by scale factor 0.56761\nI1208 04:19:22.813951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59363 > 2) by scale factor 0.77112\nI1208 04:19:26.965736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42596 > 2) by scale factor 0.824418\nI1208 04:19:31.117458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26184 > 2) by scale factor 0.61315\nI1208 04:19:39.419055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2964 > 2) by scale factor 0.606723\nI1208 04:19:47.719665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97353 > 2) by scale factor 0.50333\nI1208 04:19:51.870926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5621 > 2) by scale factor 0.78061\nI1208 04:19:56.023602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22981 > 2) by scale factor 0.896936\nI1208 04:20:00.174901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70076 > 2) by scale factor 0.54043\nI1208 04:20:04.326139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6807 > 2) by scale factor 0.543374\nI1208 04:20:08.478235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6616 > 2) by scale factor 0.751429\nI1208 04:20:12.630462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8791 > 2) by scale factor 0.694661\nI1208 04:20:16.780575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95868 > 2) by scale factor 0.675977\nI1208 04:20:20.932394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77617 > 2) by scale factor 0.720418\nI1208 04:20:25.083986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24215 > 2) by scale factor 0.471459\nI1208 04:20:29.234843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97343 > 2) by scale factor 0.672623\nI1208 04:20:33.385082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96091 > 2) by scale factor 0.675468\nI1208 04:20:37.536023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68617 > 2) by scale factor 0.744554\nI1208 04:20:41.687696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0046 > 2) by scale factor 0.499426\nI1208 04:20:45.838675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36398 > 2) by scale factor 0.594534\nI1208 04:20:49.989296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88911 > 2) by scale factor 0.692255\nI1208 04:20:50.001144  1922 solver.cpp:337] Iteration 14200, Testing net (#0)\nI1208 04:23:28.210547  1922 solver.cpp:404]     Test net output #0: accuracy = 0.202118\nI1208 04:23:28.211040  1922 solver.cpp:404]     Test net output #1: loss = 15.2191 (* 1 = 15.2191 loss)\nI1208 04:23:32.147449  1922 solver.cpp:228] Iteration 14200, loss = 15.1156\nI1208 04:23:32.147487  1922 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1208 04:23:32.147503  1922 solver.cpp:244]     Train net output #1: loss = 15.1156 (* 1 = 15.1156 loss)\nI1208 04:23:32.355386  1922 sgd_solver.cpp:166] Iteration 14200, lr = 2.13\nI1208 04:23:32.365449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00042 > 2) by scale factor 0.666573\nI1208 04:23:36.520437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13362 > 2) by scale factor 0.937374\nI1208 04:23:40.676343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24365 > 2) by scale factor 0.61659\nI1208 04:23:44.832442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60309 > 2) by scale factor 0.555079\nI1208 04:23:48.988608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50634 > 2) by scale factor 0.570396\nI1208 04:23:53.143904  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15779 > 2) by scale factor 0.633355\nI1208 04:23:57.299301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78416 > 2) by scale factor 0.528519\nI1208 04:24:01.453789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3494 > 2) by scale factor 0.597122\nI1208 04:24:05.609153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39591 > 2) by scale factor 0.834756\nI1208 04:24:09.764261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03542 > 2) by scale factor 0.658888\nI1208 04:24:13.919787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57778 > 2) by scale factor 0.775861\nI1208 04:24:18.074600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.045 > 2) by scale factor 0.494438\nI1208 04:24:22.230096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92411 > 2) by scale factor 0.683968\nI1208 04:24:26.386029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72565 > 2) by scale factor 0.423222\nI1208 04:24:30.541342  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49181 > 2) by scale factor 0.572768\nI1208 04:24:34.696362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89098 > 2) by scale factor 0.691808\nI1208 04:24:38.852310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27192 > 2) by scale factor 0.611262\nI1208 04:24:43.006642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95522 > 2) by scale factor 0.505661\nI1208 04:24:47.161067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66064 > 2) by scale factor 0.751698\nI1208 04:24:51.316141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44677 > 2) by scale factor 0.580253\nI1208 04:24:55.471093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91493 > 2) by scale factor 0.510865\nI1208 04:24:59.625804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04869 > 2) by scale factor 0.976233\nI1208 04:25:03.780386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23901 > 2) by scale factor 0.471808\nI1208 04:25:07.935379  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97007 > 2) by scale factor 0.50377\nI1208 04:25:12.090319  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40943 > 2) by scale factor 0.453573\nI1208 04:25:16.245093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11654 > 2) by scale factor 0.641737\nI1208 04:25:24.554235  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88289 > 2) by scale factor 0.515081\nI1208 04:25:28.710448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93178 > 2) by scale factor 0.682179\nI1208 04:25:32.867022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0692 > 2) by scale factor 0.651636\nI1208 04:25:37.022054  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03358 > 2) by scale factor 0.659286\nI1208 04:25:41.177881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31781 > 2) by scale factor 0.463198\nI1208 04:25:45.333142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47827 > 2) by scale factor 0.807015\nI1208 04:25:49.488431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72254 > 2) by scale factor 0.537268\nI1208 04:25:53.642488  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74821 > 2) by scale factor 0.727746\nI1208 04:25:57.797716  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43671 > 2) by scale factor 0.820778\nI1208 04:26:01.953505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35285 > 2) by scale factor 0.596507\nI1208 04:26:06.108629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62954 > 2) by scale factor 0.551034\nI1208 04:26:10.264467  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17602 > 2) by scale factor 0.478925\nI1208 04:26:14.419067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85122 > 2) by scale factor 0.701455\nI1208 04:26:18.574475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60622 > 2) by scale factor 0.767396\nI1208 04:26:22.730147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5428 > 2) by scale factor 0.786535\nI1208 04:26:26.885643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70621 > 2) by scale factor 0.539635\nI1208 04:26:31.041477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41604 > 2) by scale factor 0.585473\nI1208 04:26:35.197054  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35859 > 2) by scale factor 0.847965\nI1208 04:26:43.504645  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75503 > 2) by scale factor 0.725946\nI1208 04:26:51.812250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97083 > 2) by scale factor 0.503673\nI1208 04:26:55.968425  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15403 > 2) by scale factor 0.634109\nI1208 04:27:00.124300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35593 > 2) by scale factor 0.459144\nI1208 04:27:04.279078  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41895 > 2) by scale factor 0.452597\nI1208 04:27:08.433598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06017 > 2) by scale factor 0.49259\nI1208 04:27:12.588111  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67463 > 2) by scale factor 0.747766\nI1208 04:27:16.743808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88269 > 2) by scale factor 0.693797\nI1208 04:27:20.900208  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.078 > 2) by scale factor 0.649773\nI1208 04:27:25.055460  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14986 > 2) by scale factor 0.634948\nI1208 04:27:29.209426  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42255 > 2) by scale factor 0.58436\nI1208 04:27:33.363839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6589 > 2) by scale factor 0.752191\nI1208 04:27:37.518594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47866 > 2) by scale factor 0.806889\nI1208 04:27:41.673739  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5033 > 2) by scale factor 0.570891\nI1208 04:27:45.828702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24769 > 2) by scale factor 0.615822\nI1208 04:27:49.984042  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27387 > 2) by scale factor 0.610898\nI1208 04:27:54.139982  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53417 > 2) by scale factor 0.441095\nI1208 04:27:58.295197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06982 > 2) by scale factor 0.651504\nI1208 04:28:02.449851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.98737 > 2) by scale factor 0.401013\nI1208 04:28:06.605331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63812 > 2) by scale factor 0.758117\nI1208 04:28:10.759724  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95179 > 2) by scale factor 0.5061\nI1208 04:28:14.914845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32624 > 2) by scale factor 0.60128\nI1208 04:28:19.069855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03913 > 2) by scale factor 0.658083\nI1208 04:28:23.224577  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56177 > 2) by scale factor 0.78071\nI1208 04:28:27.380194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49262 > 2) by scale factor 0.802367\nI1208 04:28:31.535259  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98691 > 2) by scale factor 0.669587\nI1208 04:28:35.690933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00218 > 2) by scale factor 0.666183\nI1208 04:28:39.846329  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3494 > 2) by scale factor 0.459834\nI1208 04:28:44.001214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80001 > 2) by scale factor 0.526314\nI1208 04:28:48.156865  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.65613 > 2) by scale factor 0.353599\nI1208 04:28:52.311456  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00104 > 2) by scale factor 0.666436\nI1208 04:28:56.466177  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33844 > 2) by scale factor 0.599082\nI1208 04:29:00.620237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5489 > 2) by scale factor 0.563554\nI1208 04:29:04.774428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8669 > 2) by scale factor 0.51721\nI1208 04:29:08.929719  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02604 > 2) by scale factor 0.397927\nI1208 04:29:13.084152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65731 > 2) by scale factor 0.752641\nI1208 04:29:17.239651  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48102 > 2) by scale factor 0.574545\nI1208 04:29:21.394628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76857 > 2) by scale factor 0.722396\nI1208 04:29:25.548957  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91619 > 2) by scale factor 0.5107\nI1208 04:29:29.704650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9378 > 2) by scale factor 0.680782\nI1208 04:29:33.859308  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60845 > 2) by scale factor 0.433986\nI1208 04:29:38.014714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95488 > 2) by scale factor 0.505704\nI1208 04:29:42.170075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5949 > 2) by scale factor 0.770744\nI1208 04:29:46.324647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45358 > 2) by scale factor 0.815135\nI1208 04:29:50.479815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54788 > 2) by scale factor 0.784967\nI1208 04:29:54.634152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34737 > 2) by scale factor 0.597484\nI1208 04:29:58.789615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2713 > 2) by scale factor 0.611378\nI1208 04:30:02.945195  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46556 > 2) by scale factor 0.811174\nI1208 04:30:07.100293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.10429 > 2) by scale factor 0.95044\nI1208 04:30:11.255112  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.029 > 2) by scale factor 0.660283\nI1208 04:30:15.410922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78386 > 2) by scale factor 0.528561\nI1208 04:30:19.566449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94247 > 2) by scale factor 0.507296\nI1208 04:30:23.722378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32483 > 2) by scale factor 0.601535\nI1208 04:30:23.734123  1922 solver.cpp:337] Iteration 14300, Testing net (#0)\nI1208 04:33:01.903654  1922 solver.cpp:404]     Test net output #0: accuracy = 0.157529\nI1208 04:33:01.904108  1922 solver.cpp:404]     Test net output #1: loss = 21.5628 (* 1 = 21.5628 loss)\nI1208 04:33:05.840781  1922 solver.cpp:228] Iteration 14300, loss = 20.9871\nI1208 04:33:05.840816  1922 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1208 04:33:05.840832  1922 solver.cpp:244]     Train net output #1: loss = 20.9871 (* 1 = 20.9871 loss)\nI1208 04:33:06.046661  1922 sgd_solver.cpp:166] Iteration 14300, lr = 2.145\nI1208 04:33:06.056737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09762 > 2) by scale factor 0.488088\nI1208 04:33:10.209249  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67454 > 2) by scale factor 0.747793\nI1208 04:33:14.361814  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32936 > 2) by scale factor 0.600716\nI1208 04:33:18.514744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16168 > 2) by scale factor 0.480575\nI1208 04:33:22.666985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60371 > 2) by scale factor 0.768135\nI1208 04:33:26.818636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98824 > 2) by scale factor 0.501475\nI1208 04:33:30.971782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98208 > 2) by scale factor 0.670672\nI1208 04:33:35.123088  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91013 > 2) by scale factor 0.511492\nI1208 04:33:39.274817  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38742 > 2) by scale factor 0.455849\nI1208 04:33:43.426136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04713 > 2) by scale factor 0.656355\nI1208 04:33:47.578563  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30971 > 2) by scale factor 0.604283\nI1208 04:33:51.731081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12112 > 2) by scale factor 0.485305\nI1208 04:33:55.882346  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03779 > 2) by scale factor 0.397\nI1208 04:34:00.035257  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42477 > 2) by scale factor 0.583981\nI1208 04:34:04.187767  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18652 > 2) by scale factor 0.477724\nI1208 04:34:08.338255  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.87702 > 2) by scale factor 0.410086\nI1208 04:34:12.489688  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07718 > 2) by scale factor 0.962843\nI1208 04:34:16.642594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84141 > 2) by scale factor 0.703875\nI1208 04:34:20.794397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23182 > 2) by scale factor 0.618846\nI1208 04:34:24.945755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73959 > 2) by scale factor 0.534819\nI1208 04:34:29.099447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78529 > 2) by scale factor 0.718058\nI1208 04:34:33.251107  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31946 > 2) by scale factor 0.862269\nI1208 04:34:37.403612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28999 > 2) by scale factor 0.466202\nI1208 04:34:41.556012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67107 > 2) by scale factor 0.5448\nI1208 04:34:45.708185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16624 > 2) by scale factor 0.480049\nI1208 04:34:49.859416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76532 > 2) by scale factor 0.531163\nI1208 04:34:54.011643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21719 > 2) by scale factor 0.902043\nI1208 04:34:58.163466  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79242 > 2) by scale factor 0.527368\nI1208 04:35:02.314746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27456 > 2) by scale factor 0.879291\nI1208 04:35:06.466609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27362 > 2) by scale factor 0.879656\nI1208 04:35:10.618314  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12478 > 2) by scale factor 0.640046\nI1208 04:35:14.771003  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97346 > 2) by scale factor 0.672617\nI1208 04:35:18.922392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.1207 > 2) by scale factor 0.943085\nI1208 04:35:23.073302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04809 > 2) by scale factor 0.656148\nI1208 04:35:27.223845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92262 > 2) by scale factor 0.509863\nI1208 04:35:31.374441  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16707 > 2) by scale factor 0.922906\nI1208 04:35:35.526232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45895 > 2) by scale factor 0.813357\nI1208 04:35:39.677911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44629 > 2) by scale factor 0.580334\nI1208 04:35:43.829594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80455 > 2) by scale factor 0.713128\nI1208 04:35:47.980551  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40264 > 2) by scale factor 0.58778\nI1208 04:35:52.132280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5181 > 2) by scale factor 0.568489\nI1208 04:35:56.284502  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49278 > 2) by scale factor 0.802318\nI1208 04:36:00.435503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83854 > 2) by scale factor 0.704588\nI1208 04:36:04.586190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3249 > 2) by scale factor 0.462438\nI1208 04:36:08.737773  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03053 > 2) by scale factor 0.65995\nI1208 04:36:12.889724  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21492 > 2) by scale factor 0.622099\nI1208 04:36:17.040554  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40046 > 2) by scale factor 0.454498\nI1208 04:36:21.191996  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0837 > 2) by scale factor 0.959833\nI1208 04:36:25.342942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22476 > 2) by scale factor 0.620201\nI1208 04:36:29.495242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22185 > 2) by scale factor 0.620762\nI1208 04:36:33.645862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09705 > 2) by scale factor 0.488156\nI1208 04:36:37.797266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03972 > 2) by scale factor 0.495083\nI1208 04:36:41.947841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12975 > 2) by scale factor 0.939077\nI1208 04:36:46.099423  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96456 > 2) by scale factor 0.504469\nI1208 04:36:50.249994  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67626 > 2) by scale factor 0.544032\nI1208 04:36:54.401264  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65737 > 2) by scale factor 0.546841\nI1208 04:36:58.553617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34046 > 2) by scale factor 0.460781\nI1208 04:37:02.705701  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.278 > 2) by scale factor 0.877961\nI1208 04:37:06.857519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53641 > 2) by scale factor 0.565546\nI1208 04:37:15.158836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87207 > 2) by scale factor 0.51652\nI1208 04:37:19.309670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32535 > 2) by scale factor 0.860084\nI1208 04:37:23.460613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.86467 > 2) by scale factor 0.411127\nI1208 04:37:27.611583  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35598 > 2) by scale factor 0.595951\nI1208 04:37:31.763336  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91624 > 2) by scale factor 0.510694\nI1208 04:37:35.914083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03467 > 2) by scale factor 0.495704\nI1208 04:37:40.065534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40668 > 2) by scale factor 0.831021\nI1208 04:37:44.216495  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99559 > 2) by scale factor 0.667649\nI1208 04:37:48.367723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2966 > 2) by scale factor 0.465484\nI1208 04:37:52.519285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95936 > 2) by scale factor 0.505132\nI1208 04:37:56.669605  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29752 > 2) by scale factor 0.870503\nI1208 04:38:00.821615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75217 > 2) by scale factor 0.7267\nI1208 04:38:04.972942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54645 > 2) by scale factor 0.785408\nI1208 04:38:09.125046  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.83473 > 2) by scale factor 0.413673\nI1208 04:38:13.276371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68721 > 2) by scale factor 0.744265\nI1208 04:38:17.427462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03582 > 2) by scale factor 0.658801\nI1208 04:38:21.578562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07855 > 2) by scale factor 0.490371\nI1208 04:38:25.728847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42019 > 2) by scale factor 0.826381\nI1208 04:38:29.880532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3441 > 2) by scale factor 0.598067\nI1208 04:38:34.032564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31174 > 2) by scale factor 0.46385\nI1208 04:38:38.184772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59704 > 2) by scale factor 0.770106\nI1208 04:38:42.336239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86005 > 2) by scale factor 0.518128\nI1208 04:38:46.487185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68349 > 2) by scale factor 0.745299\nI1208 04:38:50.638548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99837 > 2) by scale factor 0.667029\nI1208 04:38:54.788838  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55635 > 2) by scale factor 0.562374\nI1208 04:38:58.939929  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12991 > 2) by scale factor 0.939007\nI1208 04:39:03.091559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1911 > 2) by scale factor 0.477202\nI1208 04:39:07.242985  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22279 > 2) by scale factor 0.620581\nI1208 04:39:11.394413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40658 > 2) by scale factor 0.587098\nI1208 04:39:15.546205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52514 > 2) by scale factor 0.441975\nI1208 04:39:19.698112  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32027 > 2) by scale factor 0.602361\nI1208 04:39:23.849637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5009 > 2) by scale factor 0.799713\nI1208 04:39:28.000835  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21662 > 2) by scale factor 0.474313\nI1208 04:39:32.151360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67813 > 2) by scale factor 0.543754\nI1208 04:39:36.302671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76895 > 2) by scale factor 0.530652\nI1208 04:39:40.453536  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77167 > 2) by scale factor 0.53027\nI1208 04:39:44.604681  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14189 > 2) by scale factor 0.636559\nI1208 04:39:48.755592  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06622 > 2) by scale factor 0.65227\nI1208 04:39:52.908171  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39237 > 2) by scale factor 0.83599\nI1208 04:39:57.060842  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00449 > 2) by scale factor 0.66567\nI1208 04:39:57.072643  1922 solver.cpp:337] Iteration 14400, Testing net (#0)\nI1208 04:42:35.191486  1922 solver.cpp:404]     Test net output #0: accuracy = 0.147765\nI1208 04:42:35.191987  1922 solver.cpp:404]     Test net output #1: loss = 14.9184 (* 1 = 14.9184 loss)\nI1208 04:42:39.128366  1922 solver.cpp:228] Iteration 14400, loss = 16.5605\nI1208 04:42:39.128401  1922 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1208 04:42:39.128418  1922 solver.cpp:244]     Train net output #1: loss = 16.5605 (* 1 = 16.5605 loss)\nI1208 04:42:39.332310  1922 sgd_solver.cpp:166] Iteration 14400, lr = 2.16\nI1208 04:42:39.342396  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66542 > 2) by scale factor 0.428686\nI1208 04:42:43.494593  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64525 > 2) by scale factor 0.430547\nI1208 04:42:47.646287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70534 > 2) by scale factor 0.539762\nI1208 04:42:51.797341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47849 > 2) by scale factor 0.574962\nI1208 04:42:55.947968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05091 > 2) by scale factor 0.493716\nI1208 04:43:00.100389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75594 > 2) by scale factor 0.725705\nI1208 04:43:04.252473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28904 > 2) by scale factor 0.466305\nI1208 04:43:08.403705  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44139 > 2) by scale factor 0.819204\nI1208 04:43:12.555146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83067 > 2) by scale factor 0.522102\nI1208 04:43:16.707250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40942 > 2) by scale factor 0.58661\nI1208 04:43:20.859050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84288 > 2) by scale factor 0.703513\nI1208 04:43:25.010121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21314 > 2) by scale factor 0.474705\nI1208 04:43:29.161617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83584 > 2) by scale factor 0.521399\nI1208 04:43:33.312362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42918 > 2) by scale factor 0.58323\nI1208 04:43:37.463356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12678 > 2) by scale factor 0.639636\nI1208 04:43:41.614683  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52149 > 2) by scale factor 0.793181\nI1208 04:43:45.767153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7748 > 2) by scale factor 0.720774\nI1208 04:43:49.918253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15499 > 2) by scale factor 0.928078\nI1208 04:43:54.069315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62285 > 2) by scale factor 0.552051\nI1208 04:43:58.220556  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82624 > 2) by scale factor 0.707655\nI1208 04:44:02.372068  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49525 > 2) by scale factor 0.444914\nI1208 04:44:06.523533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12877 > 2) by scale factor 0.484406\nI1208 04:44:10.674875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00301 > 2) by scale factor 0.665998\nI1208 04:44:14.826639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14169 > 2) by scale factor 0.482895\nI1208 04:44:18.977833  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80458 > 2) by scale factor 0.71312\nI1208 04:44:23.128343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43458 > 2) by scale factor 0.821495\nI1208 04:44:27.280076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11963 > 2) by scale factor 0.641101\nI1208 04:44:31.431391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44157 > 2) by scale factor 0.581129\nI1208 04:44:35.582535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65125 > 2) by scale factor 0.75436\nI1208 04:44:39.733569  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99904 > 2) by scale factor 0.66688\nI1208 04:44:43.883266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28589 > 2) by scale factor 0.608662\nI1208 04:44:48.034427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49381 > 2) by scale factor 0.572441\nI1208 04:44:52.186365  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97033 > 2) by scale factor 0.503737\nI1208 04:44:56.337292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27239 > 2) by scale factor 0.468123\nI1208 04:45:00.487920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55792 > 2) by scale factor 0.781884\nI1208 04:45:04.638901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27959 > 2) by scale factor 0.87735\nI1208 04:45:08.789903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90924 > 2) by scale factor 0.687465\nI1208 04:45:12.941579  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42752 > 2) by scale factor 0.583512\nI1208 04:45:17.092887  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35646 > 2) by scale factor 0.848731\nI1208 04:45:21.243820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35472 > 2) by scale factor 0.849359\nI1208 04:45:25.395720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82774 > 2) by scale factor 0.707278\nI1208 04:45:29.546967  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29841 > 2) by scale factor 0.870165\nI1208 04:45:33.697890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5791 > 2) by scale factor 0.775463\nI1208 04:45:41.998855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.62088 > 2) by scale factor 0.552352\nI1208 04:45:46.150380  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90318 > 2) by scale factor 0.512402\nI1208 04:45:50.301156  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73607 > 2) by scale factor 0.730975\nI1208 04:45:54.453497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60439 > 2) by scale factor 0.767935\nI1208 04:45:58.604656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50811 > 2) by scale factor 0.570107\nI1208 04:46:02.754827  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53581 > 2) by scale factor 0.788702\nI1208 04:46:06.905063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95582 > 2) by scale factor 0.505584\nI1208 04:46:11.056282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63934 > 2) by scale factor 0.757764\nI1208 04:46:15.207028  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41343 > 2) by scale factor 0.58592\nI1208 04:46:19.358049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.12125 > 2) by scale factor 0.942841\nI1208 04:46:23.509505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84455 > 2) by scale factor 0.703098\nI1208 04:46:27.660825  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53068 > 2) by scale factor 0.7903\nI1208 04:46:31.811373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79709 > 2) by scale factor 0.71503\nI1208 04:46:35.962487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4464 > 2) by scale factor 0.817527\nI1208 04:46:40.114043  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34257 > 2) by scale factor 0.853762\nI1208 04:46:48.413053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15744 > 2) by scale factor 0.633425\nI1208 04:46:52.563521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67147 > 2) by scale factor 0.428131\nI1208 04:46:56.714064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70852 > 2) by scale factor 0.738411\nI1208 04:47:00.865545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20994 > 2) by scale factor 0.905002\nI1208 04:47:05.016984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02866 > 2) by scale factor 0.660358\nI1208 04:47:13.316772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24863 > 2) by scale factor 0.47074\nI1208 04:47:17.468148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61461 > 2) by scale factor 0.433406\nI1208 04:47:21.620612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73422 > 2) by scale factor 0.422456\nI1208 04:47:25.771576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31773 > 2) by scale factor 0.862913\nI1208 04:47:34.071027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01844 > 2) by scale factor 0.990863\nI1208 04:47:38.222432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69648 > 2) by scale factor 0.541055\nI1208 04:47:42.372938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5059 > 2) by scale factor 0.570467\nI1208 04:47:46.523178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87179 > 2) by scale factor 0.516557\nI1208 04:47:50.673564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2284 > 2) by scale factor 0.619503\nI1208 04:47:54.824689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76168 > 2) by scale factor 0.724196\nI1208 04:47:58.976019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05204 > 2) by scale factor 0.493579\nI1208 04:48:03.126744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46113 > 2) by scale factor 0.577846\nI1208 04:48:07.278118  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47982 > 2) by scale factor 0.574742\nI1208 04:48:11.429641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94295 > 2) by scale factor 0.67959\nI1208 04:48:15.580616  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72774 > 2) by scale factor 0.733207\nI1208 04:48:19.731389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45175 > 2) by scale factor 0.449262\nI1208 04:48:23.882360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58431 > 2) by scale factor 0.7739\nI1208 04:48:28.033124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99636 > 2) by scale factor 0.667477\nI1208 04:48:32.183885  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4465 > 2) by scale factor 0.449792\nI1208 04:48:36.335351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68828 > 2) by scale factor 0.426596\nI1208 04:48:40.486052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55552 > 2) by scale factor 0.439028\nI1208 04:48:44.637418  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65199 > 2) by scale factor 0.547646\nI1208 04:48:48.788270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05902 > 2) by scale factor 0.653805\nI1208 04:48:52.940444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71168 > 2) by scale factor 0.424477\nI1208 04:48:57.091461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38176 > 2) by scale factor 0.591408\nI1208 04:49:01.242499  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93539 > 2) by scale factor 0.508209\nI1208 04:49:05.394008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67848 > 2) by scale factor 0.543703\nI1208 04:49:09.544309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11983 > 2) by scale factor 0.64106\nI1208 04:49:13.695323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93513 > 2) by scale factor 0.6814\nI1208 04:49:17.846937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56095 > 2) by scale factor 0.780959\nI1208 04:49:21.998081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79631 > 2) by scale factor 0.526827\nI1208 04:49:26.148995  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.336 > 2) by scale factor 0.599521\nI1208 04:49:30.299531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9644 > 2) by scale factor 0.674673\nI1208 04:49:30.311305  1922 solver.cpp:337] Iteration 14500, Testing net (#0)\nI1208 04:52:08.539304  1922 solver.cpp:404]     Test net output #0: accuracy = 0.133647\nI1208 04:52:08.539857  1922 solver.cpp:404]     Test net output #1: loss = 17.6108 (* 1 = 17.6108 loss)\nI1208 04:52:12.478088  1922 solver.cpp:228] Iteration 14500, loss = 19.3263\nI1208 04:52:12.478132  1922 solver.cpp:244]     Train net output #0: accuracy = 0.0823529\nI1208 04:52:12.478157  1922 solver.cpp:244]     Train net output #1: loss = 19.3263 (* 1 = 19.3263 loss)\nI1208 04:52:12.681792  1922 sgd_solver.cpp:166] Iteration 14500, lr = 2.175\nI1208 04:52:12.691968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06197 > 2) by scale factor 0.492372\nI1208 04:52:16.844281  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95051 > 2) by scale factor 0.677849\nI1208 04:52:20.996408  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0017 > 2) by scale factor 0.499787\nI1208 04:52:25.149428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81418 > 2) by scale factor 0.524359\nI1208 04:52:29.301148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62767 > 2) by scale factor 0.432183\nI1208 04:52:33.453371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72423 > 2) by scale factor 0.42335\nI1208 04:52:37.605676  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4746 > 2) by scale factor 0.808212\nI1208 04:52:41.758020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98788 > 2) by scale factor 0.669371\nI1208 04:52:45.909652  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24094 > 2) by scale factor 0.617104\nI1208 04:52:50.062006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62889 > 2) by scale factor 0.760777\nI1208 04:52:54.214056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56495 > 2) by scale factor 0.438121\nI1208 04:52:58.366638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20393 > 2) by scale factor 0.624233\nI1208 04:53:02.519526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07452 > 2) by scale factor 0.650507\nI1208 04:53:06.671316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2652 > 2) by scale factor 0.612521\nI1208 04:53:10.824018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86898 > 2) by scale factor 0.516931\nI1208 04:53:14.975706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31238 > 2) by scale factor 0.463781\nI1208 04:53:19.128088  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26691 > 2) by scale factor 0.612199\nI1208 04:53:23.280531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58544 > 2) by scale factor 0.436164\nI1208 04:53:27.432169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05447 > 2) by scale factor 0.654778\nI1208 04:53:31.584071  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99264 > 2) by scale factor 0.500921\nI1208 04:53:35.736599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48312 > 2) by scale factor 0.574199\nI1208 04:53:39.888156  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.04438 > 2) by scale factor 0.978294\nI1208 04:53:44.041275  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6074 > 2) by scale factor 0.767049\nI1208 04:53:48.193186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98206 > 2) by scale factor 0.670678\nI1208 04:53:52.345139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89119 > 2) by scale factor 0.691756\nI1208 04:53:56.496384  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03016 > 2) by scale factor 0.660031\nI1208 04:54:00.649029  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13328 > 2) by scale factor 0.638308\nI1208 04:54:04.801141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21908 > 2) by scale factor 0.621295\nI1208 04:54:08.952761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50173 > 2) by scale factor 0.799446\nI1208 04:54:13.104826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82037 > 2) by scale factor 0.709128\nI1208 04:54:17.256316  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78981 > 2) by scale factor 0.527731\nI1208 04:54:21.408862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65762 > 2) by scale factor 0.752554\nI1208 04:54:25.561538  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31965 > 2) by scale factor 0.602474\nI1208 04:54:29.713512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22593 > 2) by scale factor 0.619976\nI1208 04:54:33.866116  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32347 > 2) by scale factor 0.860781\nI1208 04:54:38.018479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82938 > 2) by scale factor 0.70687\nI1208 04:54:42.169807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72112 > 2) by scale factor 0.537473\nI1208 04:54:46.321852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67744 > 2) by scale factor 0.746982\nI1208 04:54:50.472849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78004 > 2) by scale factor 0.719413\nI1208 04:54:54.624658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25411 > 2) by scale factor 0.614607\nI1208 04:54:58.777479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61391 > 2) by scale factor 0.553417\nI1208 04:55:02.929971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26715 > 2) by scale factor 0.468697\nI1208 04:55:07.081337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13897 > 2) by scale factor 0.935028\nI1208 04:55:11.232894  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7149 > 2) by scale factor 0.736676\nI1208 04:55:15.385535  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62175 > 2) by scale factor 0.762848\nI1208 04:55:19.537748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13644 > 2) by scale factor 0.637666\nI1208 04:55:23.689858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49423 > 2) by scale factor 0.572372\nI1208 04:55:27.841725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85789 > 2) by scale factor 0.518418\nI1208 04:55:31.994201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34906 > 2) by scale factor 0.597182\nI1208 04:55:36.146420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83692 > 2) by scale factor 0.521251\nI1208 04:55:40.298475  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18867 > 2) by scale factor 0.477479\nI1208 04:55:44.450048  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72581 > 2) by scale factor 0.536796\nI1208 04:55:48.602002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1432 > 2) by scale factor 0.482718\nI1208 04:55:52.755122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88681 > 2) by scale factor 0.692807\nI1208 04:55:56.908505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33055 > 2) by scale factor 0.600502\nI1208 04:56:01.059937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28235 > 2) by scale factor 0.60932\nI1208 04:56:05.212644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61479 > 2) by scale factor 0.76488\nI1208 04:56:09.365300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79914 > 2) by scale factor 0.714505\nI1208 04:56:13.516490  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0029 > 2) by scale factor 0.666023\nI1208 04:56:17.668187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22381 > 2) by scale factor 0.473507\nI1208 04:56:21.819975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28278 > 2) by scale factor 0.876125\nI1208 04:56:25.971976  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48036 > 2) by scale factor 0.574653\nI1208 04:56:30.123716  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32248 > 2) by scale factor 0.86115\nI1208 04:56:34.276012  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31781 > 2) by scale factor 0.862883\nI1208 04:56:38.427907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70144 > 2) by scale factor 0.740345\nI1208 04:56:42.580348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39034 > 2) by scale factor 0.836702\nI1208 04:56:46.731632  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42313 > 2) by scale factor 0.58426\nI1208 04:56:50.882951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85851 > 2) by scale factor 0.518335\nI1208 04:56:55.035487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40294 > 2) by scale factor 0.454242\nI1208 04:56:59.187209  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6198 > 2) by scale factor 0.552516\nI1208 04:57:03.339336  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32795 > 2) by scale factor 0.60097\nI1208 04:57:07.491185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50191 > 2) by scale factor 0.571117\nI1208 04:57:11.643790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4186 > 2) by scale factor 0.826926\nI1208 04:57:15.796496  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6886 > 2) by scale factor 0.743882\nI1208 04:57:19.948145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25135 > 2) by scale factor 0.888357\nI1208 04:57:24.099282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35822 > 2) by scale factor 0.595554\nI1208 04:57:28.251047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11048 > 2) by scale factor 0.947653\nI1208 04:57:32.402696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97091 > 2) by scale factor 0.673195\nI1208 04:57:36.553858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96199 > 2) by scale factor 0.675222\nI1208 04:57:40.705308  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91532 > 2) by scale factor 0.510814\nI1208 04:57:44.857756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59817 > 2) by scale factor 0.555838\nI1208 04:57:49.010378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85101 > 2) by scale factor 0.701506\nI1208 04:57:53.161851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41117 > 2) by scale factor 0.829474\nI1208 04:57:57.312381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45364 > 2) by scale factor 0.815115\nI1208 04:58:01.463857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68945 > 2) by scale factor 0.542086\nI1208 04:58:05.616204  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72748 > 2) by scale factor 0.536556\nI1208 04:58:09.767407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4758 > 2) by scale factor 0.446847\nI1208 04:58:13.919564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26179 > 2) by scale factor 0.469287\nI1208 04:58:18.071923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1378 > 2) by scale factor 0.483348\nI1208 04:58:22.224099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22471 > 2) by scale factor 0.620211\nI1208 04:58:26.376634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3025 > 2) by scale factor 0.868623\nI1208 04:58:30.528903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82958 > 2) by scale factor 0.522251\nI1208 04:58:34.680461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42411 > 2) by scale factor 0.584094\nI1208 04:58:38.832228  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28816 > 2) by scale factor 0.874064\nI1208 04:58:42.984194  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18404 > 2) by scale factor 0.628133\nI1208 04:58:47.135982  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88465 > 2) by scale factor 0.693325\nI1208 04:58:51.288442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60859 > 2) by scale factor 0.766698\nI1208 04:58:55.440917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67126 > 2) by scale factor 0.748711\nI1208 04:58:59.592362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19767 > 2) by scale factor 0.625455\nI1208 04:59:03.744459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76344 > 2) by scale factor 0.723735\nI1208 04:59:03.756356  1922 solver.cpp:337] Iteration 14600, Testing net (#0)\nI1208 05:01:41.972574  1922 solver.cpp:404]     Test net output #0: accuracy = 0.205765\nI1208 05:01:41.973080  1922 solver.cpp:404]     Test net output #1: loss = 11.6663 (* 1 = 11.6663 loss)\nI1208 05:01:45.911669  1922 solver.cpp:228] Iteration 14600, loss = 12.6977\nI1208 05:01:45.911708  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1208 05:01:45.911734  1922 solver.cpp:244]     Train net output #1: loss = 12.6977 (* 1 = 12.6977 loss)\nI1208 05:01:46.110496  1922 sgd_solver.cpp:166] Iteration 14600, lr = 2.19\nI1208 05:01:46.120576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58609 > 2) by scale factor 0.773369\nI1208 05:01:50.271219  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68565 > 2) by scale factor 0.744698\nI1208 05:01:54.422284  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48595 > 2) by scale factor 0.573731\nI1208 05:01:58.572962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6723 > 2) by scale factor 0.544618\nI1208 05:02:02.724072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66967 > 2) by scale factor 0.428295\nI1208 05:02:06.875326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65783 > 2) by scale factor 0.546772\nI1208 05:02:11.028065  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07547 > 2) by scale factor 0.650306\nI1208 05:02:15.178843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06468 > 2) by scale factor 0.652598\nI1208 05:02:19.331054  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44792 > 2) by scale factor 0.58006\nI1208 05:02:23.483319  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87823 > 2) by scale factor 0.515699\nI1208 05:02:27.634485  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01178 > 2) by scale factor 0.498532\nI1208 05:02:31.784824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09923 > 2) by scale factor 0.487897\nI1208 05:02:35.936064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05785 > 2) by scale factor 0.654055\nI1208 05:02:40.086581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34343 > 2) by scale factor 0.598187\nI1208 05:02:44.236975  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75987 > 2) by scale factor 0.531933\nI1208 05:02:48.388293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30008 > 2) by scale factor 0.869536\nI1208 05:02:52.540530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66268 > 2) by scale factor 0.751124\nI1208 05:02:56.691789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29676 > 2) by scale factor 0.606656\nI1208 05:03:00.842998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37309 > 2) by scale factor 0.592928\nI1208 05:03:04.993638  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87186 > 2) by scale factor 0.696412\nI1208 05:03:09.144783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08796 > 2) by scale factor 0.647678\nI1208 05:03:13.295265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30599 > 2) by scale factor 0.464469\nI1208 05:03:17.446321  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56164 > 2) by scale factor 0.56154\nI1208 05:03:21.597558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36788 > 2) by scale factor 0.593845\nI1208 05:03:25.749302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21176 > 2) by scale factor 0.622711\nI1208 05:03:29.899875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.36404 > 2) by scale factor 0.372853\nI1208 05:03:34.050405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32039 > 2) by scale factor 0.602338\nI1208 05:03:38.201093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81002 > 2) by scale factor 0.711739\nI1208 05:03:42.352622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5964 > 2) by scale factor 0.556112\nI1208 05:03:46.502830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29373 > 2) by scale factor 0.607215\nI1208 05:03:50.653296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93979 > 2) by scale factor 0.507641\nI1208 05:03:54.804433  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11968 > 2) by scale factor 0.641092\nI1208 05:03:58.955368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07536 > 2) by scale factor 0.650331\nI1208 05:04:03.106184  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04893 > 2) by scale factor 0.655968\nI1208 05:04:07.257323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.026 > 2) by scale factor 0.39793\nI1208 05:04:11.407999  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08936 > 2) by scale factor 0.489075\nI1208 05:04:15.558650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21728 > 2) by scale factor 0.621644\nI1208 05:04:19.710003  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6599 > 2) by scale factor 0.751908\nI1208 05:04:23.860734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56113 > 2) by scale factor 0.561619\nI1208 05:04:28.011154  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.85878 > 2) by scale factor 0.411626\nI1208 05:04:32.162276  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10162 > 2) by scale factor 0.644824\nI1208 05:04:36.313823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35234 > 2) by scale factor 0.459523\nI1208 05:04:40.464761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.05311 > 2) by scale factor 0.974131\nI1208 05:04:48.764678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9994 > 2) by scale factor 0.500075\nI1208 05:04:52.916064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89609 > 2) by scale factor 0.513335\nI1208 05:04:57.066926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61492 > 2) by scale factor 0.553262\nI1208 05:05:01.217658  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6693 > 2) by scale factor 0.545063\nI1208 05:05:05.369315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00818 > 2) by scale factor 0.664853\nI1208 05:05:09.520627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.811 > 2) by scale factor 0.711491\nI1208 05:05:13.672098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66469 > 2) by scale factor 0.750557\nI1208 05:05:17.823098  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49676 > 2) by scale factor 0.80104\nI1208 05:05:21.974354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5328 > 2) by scale factor 0.789639\nI1208 05:05:26.125720  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39238 > 2) by scale factor 0.589557\nI1208 05:05:30.276159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06433 > 2) by scale factor 0.652671\nI1208 05:05:34.426908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94546 > 2) by scale factor 0.67901\nI1208 05:05:38.578785  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04777 > 2) by scale factor 0.656217\nI1208 05:05:42.729286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95699 > 2) by scale factor 0.505434\nI1208 05:05:46.880064  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40811 > 2) by scale factor 0.586835\nI1208 05:05:51.030879  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58456 > 2) by scale factor 0.773827\nI1208 05:05:55.182090  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74541 > 2) by scale factor 0.728489\nI1208 05:05:59.332831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75043 > 2) by scale factor 0.727159\nI1208 05:06:03.483191  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41669 > 2) by scale factor 0.585362\nI1208 05:06:07.634240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75554 > 2) by scale factor 0.725811\nI1208 05:06:11.786597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69193 > 2) by scale factor 0.742963\nI1208 05:06:15.938841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62153 > 2) by scale factor 0.762915\nI1208 05:06:20.089763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20684 > 2) by scale factor 0.623666\nI1208 05:06:24.241400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37767 > 2) by scale factor 0.456864\nI1208 05:06:28.391912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43778 > 2) by scale factor 0.450676\nI1208 05:06:32.541676  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01921 > 2) by scale factor 0.49761\nI1208 05:06:36.692354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67907 > 2) by scale factor 0.543616\nI1208 05:06:40.843677  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80119 > 2) by scale factor 0.526151\nI1208 05:06:44.994771  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70425 > 2) by scale factor 0.53992\nI1208 05:06:49.145112  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09006 > 2) by scale factor 0.647236\nI1208 05:06:53.295861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4605 > 2) by scale factor 0.44838\nI1208 05:06:57.446662  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02156 > 2) by scale factor 0.497319\nI1208 05:07:01.597952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76309 > 2) by scale factor 0.723828\nI1208 05:07:09.897663  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9896 > 2) by scale factor 0.501303\nI1208 05:07:14.048674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36216 > 2) by scale factor 0.846683\nI1208 05:07:18.199780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04208 > 2) by scale factor 0.657445\nI1208 05:07:22.351403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31167 > 2) by scale factor 0.603925\nI1208 05:07:26.502573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41145 > 2) by scale factor 0.829376\nI1208 05:07:30.653097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27022 > 2) by scale factor 0.61158\nI1208 05:07:34.803859  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16702 > 2) by scale factor 0.479959\nI1208 05:07:38.955015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93996 > 2) by scale factor 0.507619\nI1208 05:07:43.105927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04508 > 2) by scale factor 0.656798\nI1208 05:07:47.257179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85193 > 2) by scale factor 0.519221\nI1208 05:07:51.408938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03393 > 2) by scale factor 0.397304\nI1208 05:07:55.560919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22312 > 2) by scale factor 0.473583\nI1208 05:07:59.712146  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75264 > 2) by scale factor 0.420819\nI1208 05:08:03.863250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58267 > 2) by scale factor 0.436427\nI1208 05:08:08.014920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20965 > 2) by scale factor 0.623121\nI1208 05:08:12.166755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29964 > 2) by scale factor 0.606126\nI1208 05:08:16.317153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90919 > 2) by scale factor 0.687476\nI1208 05:08:20.467594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11074 > 2) by scale factor 0.947534\nI1208 05:08:24.618964  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78895 > 2) by scale factor 0.717116\nI1208 05:08:28.770294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86992 > 2) by scale factor 0.696883\nI1208 05:08:32.920613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47115 > 2) by scale factor 0.809339\nI1208 05:08:37.080845  1922 solver.cpp:337] Iteration 14700, Testing net (#0)\nI1208 05:11:15.341130  1922 solver.cpp:404]     Test net output #0: accuracy = 0.155588\nI1208 05:11:15.341609  1922 solver.cpp:404]     Test net output #1: loss = 12.5327 (* 1 = 12.5327 loss)\nI1208 05:11:19.280303  1922 solver.cpp:228] Iteration 14700, loss = 11.7356\nI1208 05:11:19.280344  1922 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1208 05:11:19.280367  1922 solver.cpp:244]     Train net output #1: loss = 11.7356 (* 1 = 11.7356 loss)\nI1208 05:11:19.480191  1922 sgd_solver.cpp:166] Iteration 14700, lr = 2.205\nI1208 05:11:19.490293  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09297 > 2) by scale factor 0.646627\nI1208 05:11:23.643440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15258 > 2) by scale factor 0.481628\nI1208 05:11:27.796207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54871 > 2) by scale factor 0.439685\nI1208 05:11:31.948853  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93527 > 2) by scale factor 0.508224\nI1208 05:11:36.101752  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28783 > 2) by scale factor 0.874191\nI1208 05:11:40.254307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67316 > 2) by scale factor 0.748179\nI1208 05:11:44.406774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65844 > 2) by scale factor 0.75232\nI1208 05:11:48.559937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49775 > 2) by scale factor 0.571797\nI1208 05:11:52.712385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50044 > 2) by scale factor 0.571357\nI1208 05:11:56.864636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46018 > 2) by scale factor 0.578005\nI1208 05:12:01.017033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.32089 > 2) by scale factor 0.462867\nI1208 05:12:05.169695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9368 > 2) by scale factor 0.681014\nI1208 05:12:09.322898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2377 > 2) by scale factor 0.471954\nI1208 05:12:13.475581  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77023 > 2) by scale factor 0.721961\nI1208 05:12:17.628993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80434 > 2) by scale factor 0.525715\nI1208 05:12:21.780905  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79082 > 2) by scale factor 0.527591\nI1208 05:12:25.934047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55779 > 2) by scale factor 0.781924\nI1208 05:12:30.086802  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8458 > 2) by scale factor 0.702789\nI1208 05:12:34.239136  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42141 > 2) by scale factor 0.825966\nI1208 05:12:38.391912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79178 > 2) by scale factor 0.716388\nI1208 05:12:42.545490  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96128 > 2) by scale factor 0.675384\nI1208 05:12:46.698071  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26353 > 2) by scale factor 0.612833\nI1208 05:12:50.850874  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68559 > 2) by scale factor 0.744715\nI1208 05:12:55.003269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30311 > 2) by scale factor 0.868393\nI1208 05:12:59.155834  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53919 > 2) by scale factor 0.565101\nI1208 05:13:03.308512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18621 > 2) by scale factor 0.477759\nI1208 05:13:07.461236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28738 > 2) by scale factor 0.874365\nI1208 05:13:11.613729  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57323 > 2) by scale factor 0.559717\nI1208 05:13:15.764986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13638 > 2) by scale factor 0.936164\nI1208 05:13:19.919190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.34492 > 2) by scale factor 0.852909\nI1208 05:13:24.071844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61658 > 2) by scale factor 0.433221\nI1208 05:13:28.223948  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99588 > 2) by scale factor 0.500515\nI1208 05:13:32.378170  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91882 > 2) by scale factor 0.685209\nI1208 05:13:36.530823  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10247 > 2) by scale factor 0.487512\nI1208 05:13:40.684087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2267 > 2) by scale factor 0.473183\nI1208 05:13:44.837595  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18462 > 2) by scale factor 0.915489\nI1208 05:13:48.991207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44361 > 2) by scale factor 0.450084\nI1208 05:13:53.144230  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75077 > 2) by scale factor 0.533224\nI1208 05:13:57.297504  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96564 > 2) by scale factor 0.674391\nI1208 05:14:01.450047  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33249 > 2) by scale factor 0.461628\nI1208 05:14:05.603880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57969 > 2) by scale factor 0.436711\nI1208 05:14:09.756207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14672 > 2) by scale factor 0.635582\nI1208 05:14:13.909076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11203 > 2) by scale factor 0.486378\nI1208 05:14:18.061601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38453 > 2) by scale factor 0.590925\nI1208 05:14:22.215824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95156 > 2) by scale factor 0.677607\nI1208 05:14:26.367732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32387 > 2) by scale factor 0.601708\nI1208 05:14:30.521584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08859 > 2) by scale factor 0.489166\nI1208 05:14:34.674674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9007 > 2) by scale factor 0.512728\nI1208 05:14:38.828002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51313 > 2) by scale factor 0.795819\nI1208 05:14:42.981031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31668 > 2) by scale factor 0.603013\nI1208 05:14:47.134105  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83515 > 2) by scale factor 0.521492\nI1208 05:14:51.286099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50368 > 2) by scale factor 0.570828\nI1208 05:14:55.439242  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31948 > 2) by scale factor 0.602503\nI1208 05:14:59.591641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34564 > 2) by scale factor 0.460231\nI1208 05:15:03.743943  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42499 > 2) by scale factor 0.451978\nI1208 05:15:07.896483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78343 > 2) by scale factor 0.41811\nI1208 05:15:12.050197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68273 > 2) by scale factor 0.543075\nI1208 05:15:16.204620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25191 > 2) by scale factor 0.888134\nI1208 05:15:20.356799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90896 > 2) by scale factor 0.511645\nI1208 05:15:24.509485  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0448 > 2) by scale factor 0.494462\nI1208 05:15:28.662998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3617 > 2) by scale factor 0.594937\nI1208 05:15:32.815079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97075 > 2) by scale factor 0.503683\nI1208 05:15:36.967182  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60284 > 2) by scale factor 0.76839\nI1208 05:15:41.120960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33386 > 2) by scale factor 0.856949\nI1208 05:15:45.274277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08526 > 2) by scale factor 0.489565\nI1208 05:15:49.428181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02631 > 2) by scale factor 0.660871\nI1208 05:15:53.581159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77086 > 2) by scale factor 0.530383\nI1208 05:15:57.733775  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97875 > 2) by scale factor 0.50267\nI1208 05:16:01.886782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23303 > 2) by scale factor 0.618614\nI1208 05:16:06.039744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39747 > 2) by scale factor 0.834211\nI1208 05:16:10.193225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33297 > 2) by scale factor 0.600065\nI1208 05:16:14.345613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38945 > 2) by scale factor 0.590066\nI1208 05:16:18.499171  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34453 > 2) by scale factor 0.460349\nI1208 05:16:22.652513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6464 > 2) by scale factor 0.548487\nI1208 05:16:26.805728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81148 > 2) by scale factor 0.524731\nI1208 05:16:30.958360  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89819 > 2) by scale factor 0.690087\nI1208 05:16:35.111024  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48539 > 2) by scale factor 0.445892\nI1208 05:16:39.263875  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15635 > 2) by scale factor 0.633643\nI1208 05:16:43.417260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.72678 > 2) by scale factor 0.423121\nI1208 05:16:47.570497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76501 > 2) by scale factor 0.531207\nI1208 05:16:51.723732  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12361 > 2) by scale factor 0.39035\nI1208 05:16:55.876205  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01771 > 2) by scale factor 0.662755\nI1208 05:17:00.029108  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2087 > 2) by scale factor 0.623305\nI1208 05:17:04.182477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93687 > 2) by scale factor 0.508018\nI1208 05:17:08.335515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05274 > 2) by scale factor 0.655149\nI1208 05:17:12.487843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12972 > 2) by scale factor 0.484294\nI1208 05:17:16.641078  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68633 > 2) by scale factor 0.426773\nI1208 05:17:20.794867  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64155 > 2) by scale factor 0.430891\nI1208 05:17:24.947947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01446 > 2) by scale factor 0.4982\nI1208 05:17:29.101218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10786 > 2) by scale factor 0.391553\nI1208 05:17:33.253229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.28741 > 2) by scale factor 0.378257\nI1208 05:17:37.406366  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53216 > 2) by scale factor 0.441291\nI1208 05:17:41.558038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6709 > 2) by scale factor 0.428183\nI1208 05:17:45.710328  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73687 > 2) by scale factor 0.730761\nI1208 05:17:49.863893  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63987 > 2) by scale factor 0.54947\nI1208 05:17:54.015799  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3517 > 2) by scale factor 0.459591\nI1208 05:17:58.168403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17564 > 2) by scale factor 0.478969\nI1208 05:18:02.321135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69775 > 2) by scale factor 0.741358\nI1208 05:18:06.473826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44507 > 2) by scale factor 0.58054\nI1208 05:18:10.626447  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4339 > 2) by scale factor 0.821725\nI1208 05:18:10.638305  1922 solver.cpp:337] Iteration 14800, Testing net (#0)\nI1208 05:20:48.894824  1922 solver.cpp:404]     Test net output #0: accuracy = 0.210059\nI1208 05:20:48.895336  1922 solver.cpp:404]     Test net output #1: loss = 15.7593 (* 1 = 15.7593 loss)\nI1208 05:20:52.834374  1922 solver.cpp:228] Iteration 14800, loss = 15.9174\nI1208 05:20:52.834411  1922 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1208 05:20:52.834435  1922 solver.cpp:244]     Train net output #1: loss = 15.9174 (* 1 = 15.9174 loss)\nI1208 05:20:53.035284  1922 sgd_solver.cpp:166] Iteration 14800, lr = 2.22\nI1208 05:20:53.045392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84334 > 2) by scale factor 0.703399\nI1208 05:20:57.199981  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7379 > 2) by scale factor 0.53506\nI1208 05:21:01.353266  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09646 > 2) by scale factor 0.488226\nI1208 05:21:05.507298  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28643 > 2) by scale factor 0.466588\nI1208 05:21:09.660980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85687 > 2) by scale factor 0.700067\nI1208 05:21:13.814193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03405 > 2) by scale factor 0.659185\nI1208 05:21:17.968422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69999 > 2) by scale factor 0.540542\nI1208 05:21:22.121795  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97631 > 2) by scale factor 0.671972\nI1208 05:21:26.274786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38221 > 2) by scale factor 0.591329\nI1208 05:21:30.427443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59989 > 2) by scale factor 0.434793\nI1208 05:21:34.581076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8688 > 2) by scale factor 0.697157\nI1208 05:21:38.734998  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18627 > 2) by scale factor 0.627693\nI1208 05:21:42.889665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.474 > 2) by scale factor 0.575706\nI1208 05:21:47.043264  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8155 > 2) by scale factor 0.710353\nI1208 05:21:51.196818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05952 > 2) by scale factor 0.492669\nI1208 05:21:55.350901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24708 > 2) by scale factor 0.470912\nI1208 05:21:59.505220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21026 > 2) by scale factor 0.623002\nI1208 05:22:03.659261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5916 > 2) by scale factor 0.556855\nI1208 05:22:07.813174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22574 > 2) by scale factor 0.898579\nI1208 05:22:11.966200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8239 > 2) by scale factor 0.708241\nI1208 05:22:16.119607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74636 > 2) by scale factor 0.533852\nI1208 05:22:20.273479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94144 > 2) by scale factor 0.679939\nI1208 05:22:24.426945  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00176 > 2) by scale factor 0.49978\nI1208 05:22:28.580757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32557 > 2) by scale factor 0.860003\nI1208 05:22:32.735034  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40536 > 2) by scale factor 0.58731\nI1208 05:22:36.888207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4509 > 2) by scale factor 0.449348\nI1208 05:22:41.042202  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50942 > 2) by scale factor 0.569895\nI1208 05:22:45.197022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06605 > 2) by scale factor 0.491878\nI1208 05:22:49.351361  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15447 > 2) by scale factor 0.634021\nI1208 05:22:53.504573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64058 > 2) by scale factor 0.757411\nI1208 05:22:57.657641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03446 > 2) by scale factor 0.495729\nI1208 05:23:01.810286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48451 > 2) by scale factor 0.44598\nI1208 05:23:05.964983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29264 > 2) by scale factor 0.465914\nI1208 05:23:10.119313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51074 > 2) by scale factor 0.443386\nI1208 05:23:14.273794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.57337 > 2) by scale factor 0.559696\nI1208 05:23:18.426962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36257 > 2) by scale factor 0.594783\nI1208 05:23:22.579877  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2798 > 2) by scale factor 0.467311\nI1208 05:23:26.733609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27992 > 2) by scale factor 0.877224\nI1208 05:23:30.887292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73609 > 2) by scale factor 0.422289\nI1208 05:23:35.041337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.55462 > 2) by scale factor 0.439115\nI1208 05:23:39.195340  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19303 > 2) by scale factor 0.476982\nI1208 05:23:43.348752  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14726 > 2) by scale factor 0.482247\nI1208 05:23:47.501595  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15181 > 2) by scale factor 0.481718\nI1208 05:23:51.653707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9856 > 2) by scale factor 0.501807\nI1208 05:23:55.806285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72319 > 2) by scale factor 0.537174\nI1208 05:23:59.958987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07472 > 2) by scale factor 0.963983\nI1208 05:24:04.112301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8433 > 2) by scale factor 0.520386\nI1208 05:24:08.266350  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03947 > 2) by scale factor 0.65801\nI1208 05:24:12.419445  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1564 > 2) by scale factor 0.633633\nI1208 05:24:16.572643  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.57823 > 2) by scale factor 0.43685\nI1208 05:24:20.725931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06357 > 2) by scale factor 0.492178\nI1208 05:24:24.879518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.99557 > 2) by scale factor 0.400355\nI1208 05:24:29.032734  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45903 > 2) by scale factor 0.578196\nI1208 05:24:33.186559  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1043 > 2) by scale factor 0.487294\nI1208 05:24:37.340277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7952 > 2) by scale factor 0.526982\nI1208 05:24:41.494590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42838 > 2) by scale factor 0.583365\nI1208 05:24:45.648038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2372 > 2) by scale factor 0.47201\nI1208 05:24:49.801337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02471 > 2) by scale factor 0.49693\nI1208 05:24:53.955256  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01923 > 2) by scale factor 0.66242\nI1208 05:24:58.107751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01456 > 2) by scale factor 0.498186\nI1208 05:25:02.262315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40652 > 2) by scale factor 0.831076\nI1208 05:25:06.416393  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7421 > 2) by scale factor 0.729368\nI1208 05:25:10.569380  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47646 > 2) by scale factor 0.575297\nI1208 05:25:14.722965  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9953 > 2) by scale factor 0.667713\nI1208 05:25:18.876911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20958 > 2) by scale factor 0.475107\nI1208 05:25:23.030930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47173 > 2) by scale factor 0.447254\nI1208 05:25:27.182970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81826 > 2) by scale factor 0.523799\nI1208 05:25:31.335414  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51121 > 2) by scale factor 0.79643\nI1208 05:25:35.488924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17152 > 2) by scale factor 0.630612\nI1208 05:25:39.642590  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75884 > 2) by scale factor 0.724943\nI1208 05:25:43.794672  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64658 > 2) by scale factor 0.548458\nI1208 05:25:47.949009  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9477 > 2) by scale factor 0.678496\nI1208 05:25:52.102722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11358 > 2) by scale factor 0.642348\nI1208 05:25:56.256747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97209 > 2) by scale factor 0.672927\nI1208 05:26:00.409418  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38349 > 2) by scale factor 0.456258\nI1208 05:26:04.563050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03493 > 2) by scale factor 0.658994\nI1208 05:26:08.716689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25644 > 2) by scale factor 0.614168\nI1208 05:26:12.869292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47483 > 2) by scale factor 0.575568\nI1208 05:26:17.022588  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03414 > 2) by scale factor 0.659166\nI1208 05:26:21.176240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51518 > 2) by scale factor 0.568961\nI1208 05:26:25.329897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03335 > 2) by scale factor 0.495865\nI1208 05:26:29.484244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48621 > 2) by scale factor 0.573689\nI1208 05:26:33.637907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27682 > 2) by scale factor 0.610348\nI1208 05:26:37.790702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17173 > 2) by scale factor 0.630571\nI1208 05:26:41.944144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82768 > 2) by scale factor 0.707295\nI1208 05:26:46.098250  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.683 > 2) by scale factor 0.745434\nI1208 05:26:50.251122  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28485 > 2) by scale factor 0.466761\nI1208 05:26:54.405243  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22462 > 2) by scale factor 0.620228\nI1208 05:26:58.558784  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23066 > 2) by scale factor 0.619069\nI1208 05:27:02.712815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99535 > 2) by scale factor 0.500582\nI1208 05:27:06.865924  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36389 > 2) by scale factor 0.59455\nI1208 05:27:11.019399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08244 > 2) by scale factor 0.960412\nI1208 05:27:15.173236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54766 > 2) by scale factor 0.785034\nI1208 05:27:19.326292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99811 > 2) by scale factor 0.500237\nI1208 05:27:23.479310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7904 > 2) by scale factor 0.716742\nI1208 05:27:27.632288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55097 > 2) by scale factor 0.784014\nI1208 05:27:31.785681  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50408 > 2) by scale factor 0.444042\nI1208 05:27:35.938446  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00298 > 2) by scale factor 0.499628\nI1208 05:27:40.092789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00749 > 2) by scale factor 0.665005\nI1208 05:27:44.244931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38307 > 2) by scale factor 0.591178\nI1208 05:27:44.256862  1922 solver.cpp:337] Iteration 14900, Testing net (#0)\nI1208 05:30:22.546321  1922 solver.cpp:404]     Test net output #0: accuracy = 0.18\nI1208 05:30:22.546823  1922 solver.cpp:404]     Test net output #1: loss = 15.6083 (* 1 = 15.6083 loss)\nI1208 05:30:26.487958  1922 solver.cpp:228] Iteration 14900, loss = 17.2619\nI1208 05:30:26.487995  1922 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1208 05:30:26.488020  1922 solver.cpp:244]     Train net output #1: loss = 17.2619 (* 1 = 17.2619 loss)\nI1208 05:30:26.688570  1922 sgd_solver.cpp:166] Iteration 14900, lr = 2.235\nI1208 05:30:26.698695  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61718 > 2) by scale factor 0.764181\nI1208 05:30:30.852978  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46652 > 2) by scale factor 0.576947\nI1208 05:30:35.007051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87466 > 2) by scale factor 0.695735\nI1208 05:30:39.161170  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44711 > 2) by scale factor 0.817292\nI1208 05:30:43.314656  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63631 > 2) by scale factor 0.758637\nI1208 05:30:47.466955  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25725 > 2) by scale factor 0.614016\nI1208 05:30:51.619974  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84997 > 2) by scale factor 0.519485\nI1208 05:30:55.771644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26549 > 2) by scale factor 0.612465\nI1208 05:30:59.925292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27829 > 2) by scale factor 0.610073\nI1208 05:31:04.078402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02862 > 2) by scale factor 0.660366\nI1208 05:31:08.231793  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93356 > 2) by scale factor 0.508445\nI1208 05:31:12.385305  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54227 > 2) by scale factor 0.56461\nI1208 05:31:16.538341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54172 > 2) by scale factor 0.786868\nI1208 05:31:20.689911  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77331 > 2) by scale factor 0.530039\nI1208 05:31:24.843713  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16525 > 2) by scale factor 0.631861\nI1208 05:31:28.996534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65717 > 2) by scale factor 0.752681\nI1208 05:31:33.150949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84916 > 2) by scale factor 0.519594\nI1208 05:31:37.304308  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48109 > 2) by scale factor 0.574533\nI1208 05:31:41.457890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48649 > 2) by scale factor 0.804347\nI1208 05:31:45.611362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83571 > 2) by scale factor 0.521415\nI1208 05:31:49.764513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0341 > 2) by scale factor 0.495774\nI1208 05:31:53.917505  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10624 > 2) by scale factor 0.643866\nI1208 05:31:58.070806  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59607 > 2) by scale factor 0.556163\nI1208 05:32:02.224578  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98822 > 2) by scale factor 0.669294\nI1208 05:32:06.377419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17753 > 2) by scale factor 0.629419\nI1208 05:32:10.530742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.165 > 2) by scale factor 0.631912\nI1208 05:32:14.684494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35607 > 2) by scale factor 0.848873\nI1208 05:32:18.837270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30949 > 2) by scale factor 0.464092\nI1208 05:32:22.991097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75232 > 2) by scale factor 0.726658\nI1208 05:32:27.144852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93627 > 2) by scale factor 0.508095\nI1208 05:32:31.298470  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22074 > 2) by scale factor 0.620975\nI1208 05:32:35.451339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37743 > 2) by scale factor 0.456889\nI1208 05:32:39.604882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98829 > 2) by scale factor 0.66928\nI1208 05:32:43.757525  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66491 > 2) by scale factor 0.545715\nI1208 05:32:47.910202  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27457 > 2) by scale factor 0.467883\nI1208 05:32:52.062798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96631 > 2) by scale factor 0.674239\nI1208 05:32:56.215801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54192 > 2) by scale factor 0.786806\nI1208 05:33:00.369436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42397 > 2) by scale factor 0.825093\nI1208 05:33:04.524042  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40816 > 2) by scale factor 0.586827\nI1208 05:33:08.676450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90987 > 2) by scale factor 0.511527\nI1208 05:33:12.830402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52759 > 2) by scale factor 0.56696\nI1208 05:33:16.982923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.16451 > 2) by scale factor 0.923996\nI1208 05:33:21.135937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39583 > 2) by scale factor 0.588958\nI1208 05:33:25.289088  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26161 > 2) by scale factor 0.613195\nI1208 05:33:29.442075  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53882 > 2) by scale factor 0.787767\nI1208 05:33:33.595152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1054 > 2) by scale factor 0.64404\nI1208 05:33:37.748718  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16878 > 2) by scale factor 0.631157\nI1208 05:33:41.901481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49991 > 2) by scale factor 0.444453\nI1208 05:33:46.054666  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27069 > 2) by scale factor 0.611491\nI1208 05:33:50.207659  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15818 > 2) by scale factor 0.48098\nI1208 05:33:54.361407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45862 > 2) by scale factor 0.448569\nI1208 05:33:58.514727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43643 > 2) by scale factor 0.450813\nI1208 05:34:02.667134  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19792 > 2) by scale factor 0.625407\nI1208 05:34:06.820248  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97925 > 2) by scale factor 0.671311\nI1208 05:34:10.973750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.88348 > 2) by scale factor 0.409544\nI1208 05:34:15.125489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27826 > 2) by scale factor 0.877861\nI1208 05:34:19.279698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21853 > 2) by scale factor 0.474098\nI1208 05:34:23.433073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8405 > 2) by scale factor 0.520766\nI1208 05:34:27.586161  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64674 > 2) by scale factor 0.548435\nI1208 05:34:31.739825  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21251 > 2) by scale factor 0.622567\nI1208 05:34:35.892628  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.93767 > 2) by scale factor 0.405049\nI1208 05:34:40.045290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63066 > 2) by scale factor 0.431904\nI1208 05:34:44.198544  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72308 > 2) by scale factor 0.53719\nI1208 05:34:48.352437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87738 > 2) by scale factor 0.515813\nI1208 05:34:52.506397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52975 > 2) by scale factor 0.790591\nI1208 05:34:56.660169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.69264 > 2) by scale factor 0.4262\nI1208 05:35:00.813614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46432 > 2) by scale factor 0.577313\nI1208 05:35:04.966989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56002 > 2) by scale factor 0.438594\nI1208 05:35:09.120755  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52434 > 2) by scale factor 0.567482\nI1208 05:35:13.274157  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56032 > 2) by scale factor 0.561748\nI1208 05:35:17.426192  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24996 > 2) by scale factor 0.615393\nI1208 05:35:21.578440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46712 > 2) by scale factor 0.447715\nI1208 05:35:25.732128  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51714 > 2) by scale factor 0.568644\nI1208 05:35:29.884207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27449 > 2) by scale factor 0.610782\nI1208 05:35:34.037127  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64374 > 2) by scale factor 0.548887\nI1208 05:35:38.189803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38116 > 2) by scale factor 0.591513\nI1208 05:35:42.343526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80113 > 2) by scale factor 0.713997\nI1208 05:35:46.496170  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55763 > 2) by scale factor 0.562173\nI1208 05:35:50.649260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03787 > 2) by scale factor 0.495311\nI1208 05:35:54.801419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46607 > 2) by scale factor 0.577023\nI1208 05:35:58.955648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73392 > 2) by scale factor 0.53563\nI1208 05:36:03.108922  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96113 > 2) by scale factor 0.675418\nI1208 05:36:07.262053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10857 > 2) by scale factor 0.643383\nI1208 05:36:15.566169  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87767 > 2) by scale factor 0.695008\nI1208 05:36:19.719022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01472 > 2) by scale factor 0.663411\nI1208 05:36:23.871398  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06624 > 2) by scale factor 0.652264\nI1208 05:36:28.024617  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94997 > 2) by scale factor 0.677974\nI1208 05:36:32.177959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05346 > 2) by scale factor 0.654995\nI1208 05:36:36.332464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53367 > 2) by scale factor 0.565984\nI1208 05:36:40.484422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19868 > 2) by scale factor 0.625258\nI1208 05:36:44.637934  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55072 > 2) by scale factor 0.563265\nI1208 05:36:48.790222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4529 > 2) by scale factor 0.579224\nI1208 05:36:52.943789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99404 > 2) by scale factor 0.667994\nI1208 05:36:57.096722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46662 > 2) by scale factor 0.810826\nI1208 05:37:01.249899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12171 > 2) by scale factor 0.485235\nI1208 05:37:05.403692  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13118 > 2) by scale factor 0.638736\nI1208 05:37:09.557026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64046 > 2) by scale factor 0.430992\nI1208 05:37:13.711019  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80732 > 2) by scale factor 0.712424\nI1208 05:37:17.863124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78056 > 2) by scale factor 0.71928\nI1208 05:37:17.875109  1922 solver.cpp:337] Iteration 15000, Testing net (#0)\nI1208 05:39:56.103286  1922 solver.cpp:404]     Test net output #0: accuracy = 0.162118\nI1208 05:39:56.103794  1922 solver.cpp:404]     Test net output #1: loss = 17.8853 (* 1 = 17.8853 loss)\nI1208 05:40:00.043540  1922 solver.cpp:228] Iteration 15000, loss = 18.8342\nI1208 05:40:00.043576  1922 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1208 05:40:00.043599  1922 solver.cpp:244]     Train net output #1: loss = 18.8342 (* 1 = 18.8342 loss)\nI1208 05:40:00.248091  1922 sgd_solver.cpp:166] Iteration 15000, lr = 2.25\nI1208 05:40:00.258200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75969 > 2) by scale factor 0.531959\nI1208 05:40:04.414896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7421 > 2) by scale factor 0.53446\nI1208 05:40:08.572209  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4017 > 2) by scale factor 0.587942\nI1208 05:40:12.729038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65277 > 2) by scale factor 0.429852\nI1208 05:40:16.885118  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73349 > 2) by scale factor 0.422521\nI1208 05:40:21.039901  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00344 > 2) by scale factor 0.665903\nI1208 05:40:25.195710  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80897 > 2) by scale factor 0.525076\nI1208 05:40:29.351748  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6459 > 2) by scale factor 0.755887\nI1208 05:40:33.508462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70419 > 2) by scale factor 0.539929\nI1208 05:40:37.663702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.34336 > 2) by scale factor 0.598201\nI1208 05:40:41.819296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23667 > 2) by scale factor 0.617918\nI1208 05:40:50.129516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6981 > 2) by scale factor 0.540819\nI1208 05:40:54.285006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85095 > 2) by scale factor 0.701522\nI1208 05:40:58.439378  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88883 > 2) by scale factor 0.514293\nI1208 05:41:02.595811  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04713 > 2) by scale factor 0.656355\nI1208 05:41:10.906095  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53122 > 2) by scale factor 0.566377\nI1208 05:41:15.062348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90109 > 2) by scale factor 0.689396\nI1208 05:41:19.217861  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14716 > 2) by scale factor 0.635493\nI1208 05:41:23.374008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86686 > 2) by scale factor 0.517215\nI1208 05:41:27.528591  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13549 > 2) by scale factor 0.637859\nI1208 05:41:31.684193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29822 > 2) by scale factor 0.606387\nI1208 05:41:35.838327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96256 > 2) by scale factor 0.504724\nI1208 05:41:39.993629  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37708 > 2) by scale factor 0.592227\nI1208 05:41:44.150503  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70739 > 2) by scale factor 0.424864\nI1208 05:41:48.305989  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2236 > 2) by scale factor 0.47353\nI1208 05:41:52.461969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30728 > 2) by scale factor 0.604726\nI1208 05:41:56.617431  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20417 > 2) by scale factor 0.475718\nI1208 05:42:00.774093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90332 > 2) by scale factor 0.688867\nI1208 05:42:04.930737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76946 > 2) by scale factor 0.722162\nI1208 05:42:09.086361  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65689 > 2) by scale factor 0.752758\nI1208 05:42:13.242664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21589 > 2) by scale factor 0.621911\nI1208 05:42:17.397668  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88335 > 2) by scale factor 0.693637\nI1208 05:42:21.553432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15303 > 2) by scale factor 0.481576\nI1208 05:42:25.709010  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76242 > 2) by scale factor 0.531573\nI1208 05:42:34.018337  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94149 > 2) by scale factor 0.679928\nI1208 05:42:38.174038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08362 > 2) by scale factor 0.648588\nI1208 05:42:42.329869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.11354 > 2) by scale factor 0.946278\nI1208 05:42:50.639051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54142 > 2) by scale factor 0.564745\nI1208 05:42:54.794847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18385 > 2) by scale factor 0.478029\nI1208 05:42:58.951011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09167 > 2) by scale factor 0.646899\nI1208 05:43:03.105752  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66915 > 2) by scale factor 0.545085\nI1208 05:43:07.261236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59337 > 2) by scale factor 0.771197\nI1208 05:43:11.417474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06684 > 2) by scale factor 0.491782\nI1208 05:43:15.572018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74544 > 2) by scale factor 0.728481\nI1208 05:43:19.726986  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22868 > 2) by scale factor 0.619448\nI1208 05:43:23.882392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92773 > 2) by scale factor 0.683124\nI1208 05:43:28.038208  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.43018 > 2) by scale factor 0.451449\nI1208 05:43:32.193892  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12951 > 2) by scale factor 0.484319\nI1208 05:43:36.350564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28297 > 2) by scale factor 0.609204\nI1208 05:43:40.505971  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.08427 > 2) by scale factor 0.959567\nI1208 05:43:44.661058  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76536 > 2) by scale factor 0.723233\nI1208 05:43:48.815501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03484 > 2) by scale factor 0.659013\nI1208 05:43:52.971016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72715 > 2) by scale factor 0.733365\nI1208 05:43:57.126133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49594 > 2) by scale factor 0.801302\nI1208 05:44:01.281765  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.076 > 2) by scale factor 0.490678\nI1208 05:44:05.437618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46051 > 2) by scale factor 0.57795\nI1208 05:44:09.592345  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25084 > 2) by scale factor 0.615226\nI1208 05:44:13.747542  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68173 > 2) by scale factor 0.745788\nI1208 05:44:17.902834  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48821 > 2) by scale factor 0.803791\nI1208 05:44:22.059049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64102 > 2) by scale factor 0.549296\nI1208 05:44:26.214371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92972 > 2) by scale factor 0.508942\nI1208 05:44:30.369937  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59662 > 2) by scale factor 0.435103\nI1208 05:44:34.525841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30369 > 2) by scale factor 0.605384\nI1208 05:44:38.682479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77276 > 2) by scale factor 0.530116\nI1208 05:44:42.838373  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18782 > 2) by scale factor 0.627388\nI1208 05:44:46.994056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87436 > 2) by scale factor 0.516214\nI1208 05:44:51.150602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98612 > 2) by scale factor 0.669766\nI1208 05:44:55.305311  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26774 > 2) by scale factor 0.468632\nI1208 05:44:59.461403  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67638 > 2) by scale factor 0.544013\nI1208 05:45:03.616821  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08189 > 2) by scale factor 0.648951\nI1208 05:45:07.772409  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35712 > 2) by scale factor 0.595749\nI1208 05:45:11.927402  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16797 > 2) by scale factor 0.63132\nI1208 05:45:16.083451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1461 > 2) by scale factor 0.635708\nI1208 05:45:20.238521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.88777 > 2) by scale factor 0.409184\nI1208 05:45:24.393541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74469 > 2) by scale factor 0.421523\nI1208 05:45:28.548462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98332 > 2) by scale factor 0.502093\nI1208 05:45:32.703419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3113 > 2) by scale factor 0.865312\nI1208 05:45:36.859087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58048 > 2) by scale factor 0.775049\nI1208 05:45:41.014573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52794 > 2) by scale factor 0.566903\nI1208 05:45:45.168923  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11951 > 2) by scale factor 0.641127\nI1208 05:45:49.323457  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26579 > 2) by scale factor 0.612409\nI1208 05:45:53.478783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94534 > 2) by scale factor 0.679038\nI1208 05:45:57.633781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25585 > 2) by scale factor 0.614278\nI1208 05:46:01.789361  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65766 > 2) by scale factor 0.546798\nI1208 05:46:05.945888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10466 > 2) by scale factor 0.644194\nI1208 05:46:10.101738  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2467 > 2) by scale factor 0.616011\nI1208 05:46:14.257753  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03429 > 2) by scale factor 0.659133\nI1208 05:46:18.413091  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90216 > 2) by scale factor 0.512537\nI1208 05:46:22.567829  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83335 > 2) by scale factor 0.521736\nI1208 05:46:26.723518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0852 > 2) by scale factor 0.648257\nI1208 05:46:30.879030  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9004 > 2) by scale factor 0.512768\nI1208 05:46:35.035619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.32255 > 2) by scale factor 0.861124\nI1208 05:46:39.191947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84153 > 2) by scale factor 0.703847\nI1208 05:46:43.347685  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.53479 > 2) by scale factor 0.789022\nI1208 05:46:47.503644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85465 > 2) by scale factor 0.70061\nI1208 05:46:51.659983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41768 > 2) by scale factor 0.827238\nI1208 05:46:51.671795  1922 solver.cpp:337] Iteration 15100, Testing net (#0)\nI1208 05:49:30.047792  1922 solver.cpp:404]     Test net output #0: accuracy = 0.150059\nI1208 05:49:30.048337  1922 solver.cpp:404]     Test net output #1: loss = 9.33179 (* 1 = 9.33179 loss)\nI1208 05:49:33.990741  1922 solver.cpp:228] Iteration 15100, loss = 9.78419\nI1208 05:49:33.990775  1922 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1208 05:49:33.990792  1922 solver.cpp:244]     Train net output #1: loss = 9.78419 (* 1 = 9.78419 loss)\nI1208 05:49:34.205376  1922 sgd_solver.cpp:166] Iteration 15100, lr = 2.265\nI1208 05:49:34.215478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89202 > 2) by scale factor 0.691558\nI1208 05:49:38.385121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93538 > 2) by scale factor 0.50821\nI1208 05:49:42.554412  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14874 > 2) by scale factor 0.635174\nI1208 05:49:46.723006  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5198 > 2) by scale factor 0.793715\nI1208 05:49:50.891839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61309 > 2) by scale factor 0.765379\nI1208 05:49:55.060946  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72702 > 2) by scale factor 0.536622\nI1208 05:49:59.230492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28457 > 2) by scale factor 0.466791\nI1208 05:50:03.398463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45845 > 2) by scale factor 0.448586\nI1208 05:50:07.568519  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99335 > 2) by scale factor 0.500832\nI1208 05:50:11.736285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1447 > 2) by scale factor 0.635991\nI1208 05:50:15.904510  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30229 > 2) by scale factor 0.605641\nI1208 05:50:20.073532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01337 > 2) by scale factor 0.66371\nI1208 05:50:24.241530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34859 > 2) by scale factor 0.459919\nI1208 05:50:28.410181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89782 > 2) by scale factor 0.513107\nI1208 05:50:32.573055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95005 > 2) by scale factor 0.677954\nI1208 05:50:36.740782  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55872 > 2) by scale factor 0.781641\nI1208 05:50:40.909126  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74396 > 2) by scale factor 0.728873\nI1208 05:50:45.076596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55815 > 2) by scale factor 0.781816\nI1208 05:50:49.243921  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85812 > 2) by scale factor 0.518387\nI1208 05:50:53.411689  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3772 > 2) by scale factor 0.456913\nI1208 05:50:57.579831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97581 > 2) by scale factor 0.503042\nI1208 05:51:01.748106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.97198 > 2) by scale factor 0.402254\nI1208 05:51:05.916537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44907 > 2) by scale factor 0.579867\nI1208 05:51:10.084789  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03163 > 2) by scale factor 0.496078\nI1208 05:51:14.253407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50195 > 2) by scale factor 0.444252\nI1208 05:51:18.421108  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84786 > 2) by scale factor 0.702281\nI1208 05:51:22.588557  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0181 > 2) by scale factor 0.662669\nI1208 05:51:26.756534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73347 > 2) by scale factor 0.535694\nI1208 05:51:30.923961  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85411 > 2) by scale factor 0.700745\nI1208 05:51:35.092381  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92424 > 2) by scale factor 0.683937\nI1208 05:51:39.259857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39953 > 2) by scale factor 0.833497\nI1208 05:51:43.427619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47674 > 2) by scale factor 0.807513\nI1208 05:51:47.595984  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75748 > 2) by scale factor 0.532271\nI1208 05:51:51.763608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33994 > 2) by scale factor 0.598814\nI1208 05:51:55.931831  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4291 > 2) by scale factor 0.823351\nI1208 05:52:00.101699  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.70333 > 2) by scale factor 0.540055\nI1208 05:52:04.270368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82428 > 2) by scale factor 0.708146\nI1208 05:52:08.438644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20467 > 2) by scale factor 0.624088\nI1208 05:52:12.607451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04248 > 2) by scale factor 0.657359\nI1208 05:52:16.774624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.0368 > 2) by scale factor 0.981934\nI1208 05:52:20.943440  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42611 > 2) by scale factor 0.583752\nI1208 05:52:25.111841  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64085 > 2) by scale factor 0.549322\nI1208 05:52:29.279399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78961 > 2) by scale factor 0.716945\nI1208 05:52:33.447188  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33879 > 2) by scale factor 0.59902\nI1208 05:52:37.615687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5422 > 2) by scale factor 0.78672\nI1208 05:52:41.782452  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72611 > 2) by scale factor 0.536752\nI1208 05:52:45.951067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81859 > 2) by scale factor 0.523753\nI1208 05:52:50.119487  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27541 > 2) by scale factor 0.878962\nI1208 05:52:54.288326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19228 > 2) by scale factor 0.91229\nI1208 05:52:58.454780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22592 > 2) by scale factor 0.619978\nI1208 05:53:02.622115  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3482 > 2) by scale factor 0.597335\nI1208 05:53:06.789152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92344 > 2) by scale factor 0.509757\nI1208 05:53:10.957141  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39302 > 2) by scale factor 0.589445\nI1208 05:53:15.125177  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10419 > 2) by scale factor 0.644291\nI1208 05:53:19.292376  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83995 > 2) by scale factor 0.704237\nI1208 05:53:23.460639  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9677 > 2) by scale factor 0.402601\nI1208 05:53:27.628815  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95491 > 2) by scale factor 0.676839\nI1208 05:53:31.796067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5519 > 2) by scale factor 0.563079\nI1208 05:53:35.964711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68489 > 2) by scale factor 0.542758\nI1208 05:53:40.133474  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29059 > 2) by scale factor 0.607794\nI1208 05:53:44.301476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1204 > 2) by scale factor 0.640944\nI1208 05:53:48.469830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06268 > 2) by scale factor 0.653022\nI1208 05:53:52.637385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46228 > 2) by scale factor 0.812256\nI1208 05:53:56.804846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63267 > 2) by scale factor 0.759685\nI1208 05:54:00.973422  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80313 > 2) by scale factor 0.713489\nI1208 05:54:05.141207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91705 > 2) by scale factor 0.685623\nI1208 05:54:09.308650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68063 > 2) by scale factor 0.746095\nI1208 05:54:13.476763  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17648 > 2) by scale factor 0.629628\nI1208 05:54:17.644237  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57641 > 2) by scale factor 0.776275\nI1208 05:54:21.812376  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91574 > 2) by scale factor 0.510759\nI1208 05:54:25.981324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31502 > 2) by scale factor 0.863924\nI1208 05:54:30.148864  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84095 > 2) by scale factor 0.70399\nI1208 05:54:34.316902  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35057 > 2) by scale factor 0.45971\nI1208 05:54:38.485327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43111 > 2) by scale factor 0.82267\nI1208 05:54:42.653045  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27698 > 2) by scale factor 0.878358\nI1208 05:54:46.821213  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02524 > 2) by scale factor 0.661104\nI1208 05:54:50.989679  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03677 > 2) by scale factor 0.658594\nI1208 05:54:55.158383  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66279 > 2) by scale factor 0.546031\nI1208 05:54:59.326100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02808 > 2) by scale factor 0.496514\nI1208 05:55:03.494124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07199 > 2) by scale factor 0.491161\nI1208 05:55:07.663018  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85271 > 2) by scale factor 0.701088\nI1208 05:55:11.830737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25194 > 2) by scale factor 0.470374\nI1208 05:55:15.999152  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15384 > 2) by scale factor 0.481482\nI1208 05:55:24.333003  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79469 > 2) by scale factor 0.527052\nI1208 05:55:28.502341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6939 > 2) by scale factor 0.541433\nI1208 05:55:32.670881  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86319 > 2) by scale factor 0.698522\nI1208 05:55:36.839365  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66244 > 2) by scale factor 0.42896\nI1208 05:55:41.007309  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63803 > 2) by scale factor 0.431217\nI1208 05:55:45.175354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66276 > 2) by scale factor 0.546036\nI1208 05:55:49.343201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95932 > 2) by scale factor 0.403281\nI1208 05:55:53.511651  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39733 > 2) by scale factor 0.588697\nI1208 05:55:57.679060  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24724 > 2) by scale factor 0.615908\nI1208 05:56:01.846913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01481 > 2) by scale factor 0.663391\nI1208 05:56:06.015928  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.3398 > 2) by scale factor 0.854776\nI1208 05:56:10.183332  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96446 > 2) by scale factor 0.504482\nI1208 05:56:14.382969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78939 > 2) by scale factor 0.717003\nI1208 05:56:18.535109  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45263 > 2) by scale factor 0.579269\nI1208 05:56:22.689196  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35661 > 2) by scale factor 0.595839\nI1208 05:56:26.842593  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.26737 > 2) by scale factor 0.468673\nI1208 05:56:26.854480  1922 solver.cpp:337] Iteration 15200, Testing net (#0)\nI1208 05:59:05.063292  1922 solver.cpp:404]     Test net output #0: accuracy = 0.144471\nI1208 05:59:05.063814  1922 solver.cpp:404]     Test net output #1: loss = 18.4447 (* 1 = 18.4447 loss)\nI1208 05:59:09.006783  1922 solver.cpp:228] Iteration 15200, loss = 18.0198\nI1208 05:59:09.006819  1922 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1208 05:59:09.006835  1922 solver.cpp:244]     Train net output #1: loss = 18.0198 (* 1 = 18.0198 loss)\nI1208 05:59:09.201514  1922 sgd_solver.cpp:166] Iteration 15200, lr = 2.28\nI1208 05:59:09.211640  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2761 > 2) by scale factor 0.610482\nI1208 05:59:13.362633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93177 > 2) by scale factor 0.682182\nI1208 05:59:17.513263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17157 > 2) by scale factor 0.479436\nI1208 05:59:21.663931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.07012 > 2) by scale factor 0.491386\nI1208 05:59:25.815333  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99962 > 2) by scale factor 0.66675\nI1208 05:59:29.966419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97496 > 2) by scale factor 0.503149\nI1208 05:59:34.118072  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38056 > 2) by scale factor 0.456563\nI1208 05:59:38.268473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58908 > 2) by scale factor 0.557247\nI1208 05:59:42.419469  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92532 > 2) by scale factor 0.509513\nI1208 05:59:46.570737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9235 > 2) by scale factor 0.50975\nI1208 05:59:50.721634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47271 > 2) by scale factor 0.80883\nI1208 05:59:54.872287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94784 > 2) by scale factor 0.678463\nI1208 05:59:59.023025  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59864 > 2) by scale factor 0.769633\nI1208 06:00:03.174584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01955 > 2) by scale factor 0.66235\nI1208 06:00:07.325073  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08234 > 2) by scale factor 0.489915\nI1208 06:00:11.476730  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53226 > 2) by scale factor 0.566209\nI1208 06:00:15.627496  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76087 > 2) by scale factor 0.72441\nI1208 06:00:19.777601  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22202 > 2) by scale factor 0.620729\nI1208 06:00:23.928684  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87259 > 2) by scale factor 0.51645\nI1208 06:00:28.079061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58652 > 2) by scale factor 0.557644\nI1208 06:00:32.230648  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26833 > 2) by scale factor 0.611933\nI1208 06:00:36.382181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97299 > 2) by scale factor 0.672724\nI1208 06:00:40.532770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96372 > 2) by scale factor 0.674828\nI1208 06:00:44.683843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0026 > 2) by scale factor 0.666089\nI1208 06:00:48.834828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18051 > 2) by scale factor 0.478411\nI1208 06:00:52.985781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88548 > 2) by scale factor 0.514736\nI1208 06:00:57.137099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05409 > 2) by scale factor 0.493329\nI1208 06:01:01.289458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12485 > 2) by scale factor 0.64003\nI1208 06:01:05.440016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3189 > 2) by scale factor 0.602609\nI1208 06:01:09.591461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97529 > 2) by scale factor 0.672204\nI1208 06:01:13.743261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97613 > 2) by scale factor 0.672014\nI1208 06:01:17.895265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2451 > 2) by scale factor 0.616314\nI1208 06:01:22.047021  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54925 > 2) by scale factor 0.784544\nI1208 06:01:26.197574  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36339 > 2) by scale factor 0.846243\nI1208 06:01:30.349560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42117 > 2) by scale factor 0.452368\nI1208 06:01:34.500234  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95992 > 2) by scale factor 0.403233\nI1208 06:01:38.649744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69542 > 2) by scale factor 0.541211\nI1208 06:01:42.800416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64526 > 2) by scale factor 0.548658\nI1208 06:01:46.950347  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85657 > 2) by scale factor 0.700139\nI1208 06:01:51.100406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29155 > 2) by scale factor 0.466033\nI1208 06:01:55.250491  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48312 > 2) by scale factor 0.446118\nI1208 06:01:59.400238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7034 > 2) by scale factor 0.425224\nI1208 06:02:03.550918  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6055 > 2) by scale factor 0.434263\nI1208 06:02:07.700320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3951 > 2) by scale factor 0.589084\nI1208 06:02:11.850751  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63056 > 2) by scale factor 0.760295\nI1208 06:02:16.001130  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0531 > 2) by scale factor 0.493449\nI1208 06:02:20.151238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09911 > 2) by scale factor 0.645346\nI1208 06:02:24.302306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5728 > 2) by scale factor 0.559786\nI1208 06:02:28.453387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64583 > 2) by scale factor 0.755907\nI1208 06:02:32.603283  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82871 > 2) by scale factor 0.707035\nI1208 06:02:36.754320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44169 > 2) by scale factor 0.819104\nI1208 06:02:40.904717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64088 > 2) by scale factor 0.757322\nI1208 06:02:45.055672  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51579 > 2) by scale factor 0.568862\nI1208 06:02:49.206826  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32415 > 2) by scale factor 0.601657\nI1208 06:02:53.357653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24161 > 2) by scale factor 0.616978\nI1208 06:02:57.508862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74481 > 2) by scale factor 0.728649\nI1208 06:03:01.659997  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08727 > 2) by scale factor 0.489324\nI1208 06:03:05.809772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31773 > 2) by scale factor 0.602821\nI1208 06:03:09.960548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98088 > 2) by scale factor 0.670942\nI1208 06:03:14.111657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91543 > 2) by scale factor 0.5108\nI1208 06:03:18.262778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21248 > 2) by scale factor 0.47478\nI1208 06:03:22.413389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24186 > 2) by scale factor 0.471491\nI1208 06:03:26.564525  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12152 > 2) by scale factor 0.485258\nI1208 06:03:30.714941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29271 > 2) by scale factor 0.465906\nI1208 06:03:34.865697  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63092 > 2) by scale factor 0.550825\nI1208 06:03:39.017133  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43602 > 2) by scale factor 0.821011\nI1208 06:03:43.169080  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19751 > 2) by scale factor 0.91012\nI1208 06:03:47.319798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.6708 > 2) by scale factor 0.748839\nI1208 06:03:51.470407  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36457 > 2) by scale factor 0.594429\nI1208 06:03:55.621142  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98395 > 2) by scale factor 0.502015\nI1208 06:03:59.771261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60108 > 2) by scale factor 0.55539\nI1208 06:04:03.921644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44635 > 2) by scale factor 0.580325\nI1208 06:04:08.073489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06155 > 2) by scale factor 0.492423\nI1208 06:04:12.224424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68989 > 2) by scale factor 0.542022\nI1208 06:04:16.375386  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00042 > 2) by scale factor 0.666573\nI1208 06:04:20.526304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84601 > 2) by scale factor 0.52002\nI1208 06:04:24.677294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24948 > 2) by scale factor 0.889093\nI1208 06:04:28.827209  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98202 > 2) by scale factor 0.502258\nI1208 06:04:32.977329  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13461 > 2) by scale factor 0.638038\nI1208 06:04:37.129254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62547 > 2) by scale factor 0.761768\nI1208 06:04:41.280807  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41686 > 2) by scale factor 0.82752\nI1208 06:04:45.430586  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26082 > 2) by scale factor 0.613343\nI1208 06:04:49.580657  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6898 > 2) by scale factor 0.542035\nI1208 06:04:53.731132  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31348 > 2) by scale factor 0.463663\nI1208 06:04:57.881158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03227 > 2) by scale factor 0.659573\nI1208 06:05:02.031575  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19785 > 2) by scale factor 0.909981\nI1208 06:05:06.182083  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2129 > 2) by scale factor 0.622491\nI1208 06:05:10.333354  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18473 > 2) by scale factor 0.915444\nI1208 06:05:14.483711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90155 > 2) by scale factor 0.689286\nI1208 06:05:18.634820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15458 > 2) by scale factor 0.928254\nI1208 06:05:22.786893  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.48102 > 2) by scale factor 0.574545\nI1208 06:05:26.937448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54245 > 2) by scale factor 0.440291\nI1208 06:05:31.088497  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66188 > 2) by scale factor 0.751348\nI1208 06:05:35.239508  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29296 > 2) by scale factor 0.607357\nI1208 06:05:39.389784  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06263 > 2) by scale factor 0.653034\nI1208 06:05:43.540369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15203 > 2) by scale factor 0.481692\nI1208 06:05:47.691406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95662 > 2) by scale factor 0.505481\nI1208 06:05:51.842417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80168 > 2) by scale factor 0.713856\nI1208 06:05:55.993582  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76848 > 2) by scale factor 0.722417\nI1208 06:06:00.144428  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19498 > 2) by scale factor 0.47676\nI1208 06:06:00.156280  1922 solver.cpp:337] Iteration 15300, Testing net (#0)\nI1208 06:08:38.435430  1922 solver.cpp:404]     Test net output #0: accuracy = 0.120765\nI1208 06:08:38.435969  1922 solver.cpp:404]     Test net output #1: loss = 22.1611 (* 1 = 22.1611 loss)\nI1208 06:08:42.380825  1922 solver.cpp:228] Iteration 15300, loss = 20.4971\nI1208 06:08:42.380861  1922 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1208 06:08:42.380882  1922 solver.cpp:244]     Train net output #1: loss = 20.4971 (* 1 = 20.4971 loss)\nI1208 06:08:42.575755  1922 sgd_solver.cpp:166] Iteration 15300, lr = 2.295\nI1208 06:08:42.585903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.53724 > 2) by scale factor 0.440797\nI1208 06:08:46.736727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79054 > 2) by scale factor 0.527629\nI1208 06:08:50.888645  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37249 > 2) by scale factor 0.457405\nI1208 06:08:55.038931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7274 > 2) by scale factor 0.423065\nI1208 06:08:59.189610  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66668 > 2) by scale factor 0.749996\nI1208 06:09:03.340783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92046 > 2) by scale factor 0.510144\nI1208 06:09:07.492435  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35739 > 2) by scale factor 0.595701\nI1208 06:09:11.643683  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50651 > 2) by scale factor 0.443803\nI1208 06:09:15.795521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08124 > 2) by scale factor 0.490047\nI1208 06:09:19.946707  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.14482 > 2) by scale factor 0.932481\nI1208 06:09:24.097434  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4429 > 2) by scale factor 0.8187\nI1208 06:09:28.247942  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80508 > 2) by scale factor 0.712991\nI1208 06:09:32.399443  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1382 > 2) by scale factor 0.483302\nI1208 06:09:36.551158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56948 > 2) by scale factor 0.560306\nI1208 06:09:40.701804  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49387 > 2) by scale factor 0.572431\nI1208 06:09:44.852650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72039 > 2) by scale factor 0.73519\nI1208 06:09:49.004797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07779 > 2) by scale factor 0.962561\nI1208 06:09:53.155313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51267 > 2) by scale factor 0.569368\nI1208 06:09:57.306413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.01098 > 2) by scale factor 0.99454\nI1208 06:10:01.457897  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66156 > 2) by scale factor 0.751439\nI1208 06:10:05.609889  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23352 > 2) by scale factor 0.618521\nI1208 06:10:09.761451  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80472 > 2) by scale factor 0.713084\nI1208 06:10:18.061573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66321 > 2) by scale factor 0.750972\nI1208 06:10:22.213227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41962 > 2) by scale factor 0.826577\nI1208 06:10:26.364886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37073 > 2) by scale factor 0.593342\nI1208 06:10:30.515492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2804 > 2) by scale factor 0.467246\nI1208 06:10:34.667454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.25824 > 2) by scale factor 0.469677\nI1208 06:10:38.818264  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58574 > 2) by scale factor 0.436134\nI1208 06:10:42.968526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71651 > 2) by scale factor 0.736239\nI1208 06:10:47.119685  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18944 > 2) by scale factor 0.627068\nI1208 06:10:51.271461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00116 > 2) by scale factor 0.499855\nI1208 06:10:55.422637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47033 > 2) by scale factor 0.447394\nI1208 06:10:59.572791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68568 > 2) by scale factor 0.426832\nI1208 06:11:03.723528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03468 > 2) by scale factor 0.495702\nI1208 06:11:07.874647  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08679 > 2) by scale factor 0.489381\nI1208 06:11:12.026310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96633 > 2) by scale factor 0.674235\nI1208 06:11:16.177675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56638 > 2) by scale factor 0.437984\nI1208 06:11:20.329221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27793 > 2) by scale factor 0.467515\nI1208 06:11:24.480226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76002 > 2) by scale factor 0.531912\nI1208 06:11:28.631919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19548 > 2) by scale factor 0.910962\nI1208 06:11:32.782413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11379 > 2) by scale factor 0.642304\nI1208 06:11:36.934389  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23409 > 2) by scale factor 0.472356\nI1208 06:11:41.085417  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83282 > 2) by scale factor 0.70601\nI1208 06:11:45.235545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11364 > 2) by scale factor 0.486187\nI1208 06:11:49.386155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68073 > 2) by scale factor 0.746066\nI1208 06:11:53.537930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5101 > 2) by scale factor 0.569785\nI1208 06:12:05.985898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10959 > 2) by scale factor 0.486667\nI1208 06:12:10.137750  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50467 > 2) by scale factor 0.443984\nI1208 06:12:14.288673  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35554 > 2) by scale factor 0.596029\nI1208 06:12:18.439343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78921 > 2) by scale factor 0.717048\nI1208 06:12:22.590339  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81861 > 2) by scale factor 0.709571\nI1208 06:12:26.741987  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.694 > 2) by scale factor 0.74239\nI1208 06:12:30.892798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92161 > 2) by scale factor 0.684554\nI1208 06:12:35.044700  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17593 > 2) by scale factor 0.478936\nI1208 06:12:39.196313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.93511 > 2) by scale factor 0.40526\nI1208 06:12:43.347555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48763 > 2) by scale factor 0.44567\nI1208 06:12:47.498229  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90011 > 2) by scale factor 0.512806\nI1208 06:12:51.648849  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00674 > 2) by scale factor 0.665171\nI1208 06:12:55.799830  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37119 > 2) by scale factor 0.593262\nI1208 06:12:59.951279  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38592 > 2) by scale factor 0.456005\nI1208 06:13:04.102341  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41808 > 2) by scale factor 0.585124\nI1208 06:13:08.254173  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84204 > 2) by scale factor 0.520557\nI1208 06:13:12.404803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17641 > 2) by scale factor 0.478881\nI1208 06:13:16.555958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41711 > 2) by scale factor 0.585291\nI1208 06:13:20.706822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75664 > 2) by scale factor 0.53239\nI1208 06:13:24.857900  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4875 > 2) by scale factor 0.573477\nI1208 06:13:29.008541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62336 > 2) by scale factor 0.762382\nI1208 06:13:33.159212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00334 > 2) by scale factor 0.665925\nI1208 06:13:37.309528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.83716 > 2) by scale factor 0.521219\nI1208 06:13:41.459477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45906 > 2) by scale factor 0.578191\nI1208 06:13:45.610353  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97638 > 2) by scale factor 0.671957\nI1208 06:13:49.762069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7871 > 2) by scale factor 0.528109\nI1208 06:13:53.913664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55577 > 2) by scale factor 0.562466\nI1208 06:13:58.064597  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31427 > 2) by scale factor 0.864204\nI1208 06:14:02.214820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90103 > 2) by scale factor 0.512685\nI1208 06:14:06.365862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.648 > 2) by scale factor 0.755287\nI1208 06:14:10.517144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51265 > 2) by scale factor 0.795973\nI1208 06:14:18.815520  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94869 > 2) by scale factor 0.678267\nI1208 06:14:22.966239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99835 > 2) by scale factor 0.667035\nI1208 06:14:27.117436  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49758 > 2) by scale factor 0.444684\nI1208 06:14:31.268301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.22929 > 2) by scale factor 0.897147\nI1208 06:14:35.418501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.07167 > 2) by scale factor 0.651112\nI1208 06:14:39.568905  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10034 > 2) by scale factor 0.487765\nI1208 06:14:43.719715  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89946 > 2) by scale factor 0.512891\nI1208 06:14:47.871153  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82959 > 2) by scale factor 0.414114\nI1208 06:14:52.023023  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05076 > 2) by scale factor 0.655574\nI1208 06:14:56.173890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90836 > 2) by scale factor 0.687672\nI1208 06:15:00.324198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8255 > 2) by scale factor 0.522808\nI1208 06:15:04.475172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95588 > 2) by scale factor 0.676617\nI1208 06:15:08.626261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88391 > 2) by scale factor 0.514945\nI1208 06:15:12.777292  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03529 > 2) by scale factor 0.658915\nI1208 06:15:16.928376  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80187 > 2) by scale factor 0.526057\nI1208 06:15:21.078850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68674 > 2) by scale factor 0.426736\nI1208 06:15:25.230506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7677 > 2) by scale factor 0.530828\nI1208 06:15:29.381853  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20264 > 2) by scale factor 0.475892\nI1208 06:15:33.533424  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70943 > 2) by scale factor 0.738163\nI1208 06:15:33.545348  1922 solver.cpp:337] Iteration 15400, Testing net (#0)\nI1208 06:18:11.814538  1922 solver.cpp:404]     Test net output #0: accuracy = 0.138471\nI1208 06:18:11.815076  1922 solver.cpp:404]     Test net output #1: loss = 15.264 (* 1 = 15.264 loss)\nI1208 06:18:15.759150  1922 solver.cpp:228] Iteration 15400, loss = 14.9669\nI1208 06:18:15.759191  1922 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1208 06:18:15.759207  1922 solver.cpp:244]     Train net output #1: loss = 14.9669 (* 1 = 14.9669 loss)\nI1208 06:18:15.957396  1922 sgd_solver.cpp:166] Iteration 15400, lr = 2.31\nI1208 06:18:15.967444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.62764 > 2) by scale factor 0.432186\nI1208 06:18:20.120980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45143 > 2) by scale factor 0.57947\nI1208 06:18:24.274314  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67146 > 2) by scale factor 0.544743\nI1208 06:18:28.427472  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.4912 > 2) by scale factor 0.445315\nI1208 06:18:32.580579  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38309 > 2) by scale factor 0.839245\nI1208 06:18:36.733361  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21893 > 2) by scale factor 0.621324\nI1208 06:18:40.887290  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91664 > 2) by scale factor 0.510642\nI1208 06:18:45.039968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.34467 > 2) by scale factor 0.460334\nI1208 06:18:49.193243  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10864 > 2) by scale factor 0.643369\nI1208 06:18:53.345773  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35591 > 2) by scale factor 0.595963\nI1208 06:18:57.498980  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74421 > 2) by scale factor 0.534158\nI1208 06:19:01.653380  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.30175 > 2) by scale factor 0.377234\nI1208 06:19:05.806097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82572 > 2) by scale factor 0.414446\nI1208 06:19:09.958839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75951 > 2) by scale factor 0.724767\nI1208 06:19:14.113265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59591 > 2) by scale factor 0.770443\nI1208 06:19:18.267004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.70974 > 2) by scale factor 0.738078\nI1208 06:19:22.421566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.65486 > 2) by scale factor 0.429659\nI1208 06:19:26.575129  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01057 > 2) by scale factor 0.664327\nI1208 06:19:30.728843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49122 > 2) by scale factor 0.802821\nI1208 06:19:34.882167  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58583 > 2) by scale factor 0.55775\nI1208 06:19:39.034966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.31824 > 2) by scale factor 0.463152\nI1208 06:19:43.187800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40636 > 2) by scale factor 0.587137\nI1208 06:19:47.342270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21476 > 2) by scale factor 0.622131\nI1208 06:19:51.495049  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81726 > 2) by scale factor 0.415174\nI1208 06:19:55.649391  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76131 > 2) by scale factor 0.531729\nI1208 06:19:59.802464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98357 > 2) by scale factor 0.502062\nI1208 06:20:03.956248  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02489 > 2) by scale factor 0.661181\nI1208 06:20:08.109100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.59558 > 2) by scale factor 0.556238\nI1208 06:20:12.262419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90992 > 2) by scale factor 0.687303\nI1208 06:20:16.415756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1013 > 2) by scale factor 0.48765\nI1208 06:20:20.568714  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.53644 > 2) by scale factor 0.56554\nI1208 06:20:24.722177  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12455 > 2) by scale factor 0.484901\nI1208 06:20:28.875654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42758 > 2) by scale factor 0.451714\nI1208 06:20:33.028573  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79567 > 2) by scale factor 0.526917\nI1208 06:20:37.181413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52073 > 2) by scale factor 0.568064\nI1208 06:20:41.335069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56255 > 2) by scale factor 0.438351\nI1208 06:20:45.487917  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.44264 > 2) by scale factor 0.450183\nI1208 06:20:49.641711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80117 > 2) by scale factor 0.713988\nI1208 06:20:53.794801  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77792 > 2) by scale factor 0.529393\nI1208 06:20:57.946856  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39112 > 2) by scale factor 0.455465\nI1208 06:21:02.099882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7809 > 2) by scale factor 0.528974\nI1208 06:21:06.252972  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12254 > 2) by scale factor 0.485138\nI1208 06:21:10.406824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48675 > 2) by scale factor 0.445757\nI1208 06:21:14.560181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18424 > 2) by scale factor 0.628093\nI1208 06:21:18.712508  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77523 > 2) by scale factor 0.720661\nI1208 06:21:22.865175  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.39059 > 2) by scale factor 0.836615\nI1208 06:21:27.018051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38942 > 2) by scale factor 0.590071\nI1208 06:21:31.170492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92392 > 2) by scale factor 0.684013\nI1208 06:21:35.324765  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91905 > 2) by scale factor 0.510328\nI1208 06:21:39.478404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36576 > 2) by scale factor 0.45811\nI1208 06:21:43.632262  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66352 > 2) by scale factor 0.42886\nI1208 06:21:47.784931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81855 > 2) by scale factor 0.709584\nI1208 06:21:51.938038  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06202 > 2) by scale factor 0.653164\nI1208 06:21:56.091256  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46316 > 2) by scale factor 0.811965\nI1208 06:22:00.244518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9216 > 2) by scale factor 0.509996\nI1208 06:22:04.397819  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36528 > 2) by scale factor 0.45816\nI1208 06:22:08.551560  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01257 > 2) by scale factor 0.663885\nI1208 06:22:12.704486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66656 > 2) by scale factor 0.428581\nI1208 06:22:16.857753  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86509 > 2) by scale factor 0.517453\nI1208 06:22:21.011607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66895 > 2) by scale factor 0.428362\nI1208 06:22:25.164427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72755 > 2) by scale factor 0.536545\nI1208 06:22:29.317070  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68051 > 2) by scale factor 0.543403\nI1208 06:22:33.470489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.89388 > 2) by scale factor 0.408674\nI1208 06:22:37.624528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.09525 > 2) by scale factor 0.48837\nI1208 06:22:41.778278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86929 > 2) by scale factor 0.697037\nI1208 06:22:45.931746  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64509 > 2) by scale factor 0.756117\nI1208 06:22:50.085252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.21124 > 2) by scale factor 0.622813\nI1208 06:22:54.238157  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02456 > 2) by scale factor 0.661253\nI1208 06:22:58.392060  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35802 > 2) by scale factor 0.595589\nI1208 06:23:02.545226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7746 > 2) by scale factor 0.529857\nI1208 06:23:06.697620  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88978 > 2) by scale factor 0.514168\nI1208 06:23:10.850102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51839 > 2) by scale factor 0.568442\nI1208 06:23:15.003197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66618 > 2) by scale factor 0.750137\nI1208 06:23:19.156461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8042 > 2) by scale factor 0.525735\nI1208 06:23:23.309469  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67589 > 2) by scale factor 0.427726\nI1208 06:23:27.462798  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21594 > 2) by scale factor 0.47439\nI1208 06:23:31.616889  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.54483 > 2) by scale factor 0.44006\nI1208 06:23:35.770627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.23516 > 2) by scale factor 0.472237\nI1208 06:23:39.922869  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63281 > 2) by scale factor 0.759644\nI1208 06:23:44.076664  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90746 > 2) by scale factor 0.687885\nI1208 06:23:48.230868  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12305 > 2) by scale factor 0.485077\nI1208 06:23:52.384327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.89829 > 2) by scale factor 0.513045\nI1208 06:23:56.537585  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94255 > 2) by scale factor 0.679683\nI1208 06:24:00.690757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.92561 > 2) by scale factor 0.406041\nI1208 06:24:04.843297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10756 > 2) by scale factor 0.486908\nI1208 06:24:08.996912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03096 > 2) by scale factor 0.659858\nI1208 06:24:13.149665  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50906 > 2) by scale factor 0.569953\nI1208 06:24:17.302285  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73334 > 2) by scale factor 0.731707\nI1208 06:24:21.456238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08344 > 2) by scale factor 0.489783\nI1208 06:24:25.608780  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33895 > 2) by scale factor 0.855086\nI1208 06:24:29.762020  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35932 > 2) by scale factor 0.458787\nI1208 06:24:33.915026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.45828 > 2) by scale factor 0.578322\nI1208 06:24:38.068322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40196 > 2) by scale factor 0.454343\nI1208 06:24:42.221566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43315 > 2) by scale factor 0.821978\nI1208 06:24:46.373970  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9543 > 2) by scale factor 0.676979\nI1208 06:24:50.526203  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15294 > 2) by scale factor 0.481587\nI1208 06:24:54.679395  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73271 > 2) by scale factor 0.535804\nI1208 06:24:58.832969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.59306 > 2) by scale factor 0.357586\nI1208 06:25:02.986124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51836 > 2) by scale factor 0.442638\nI1208 06:25:07.139528  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38992 > 2) by scale factor 0.836847\nI1208 06:25:07.151425  1922 solver.cpp:337] Iteration 15500, Testing net (#0)\nI1208 06:27:45.436502  1922 solver.cpp:404]     Test net output #0: accuracy = 0.167118\nI1208 06:27:45.437039  1922 solver.cpp:404]     Test net output #1: loss = 19.6713 (* 1 = 19.6713 loss)\nI1208 06:27:49.380487  1922 solver.cpp:228] Iteration 15500, loss = 19.0889\nI1208 06:27:49.380524  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1208 06:27:49.380539  1922 solver.cpp:244]     Train net output #1: loss = 19.0889 (* 1 = 19.0889 loss)\nI1208 06:27:49.578125  1922 sgd_solver.cpp:166] Iteration 15500, lr = 2.325\nI1208 06:27:49.588220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00213 > 2) by scale factor 0.666194\nI1208 06:27:53.740052  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92323 > 2) by scale factor 0.684174\nI1208 06:27:57.891674  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03925 > 2) by scale factor 0.495141\nI1208 06:28:02.044816  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73372 > 2) by scale factor 0.731604\nI1208 06:28:06.195871  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4285 > 2) by scale factor 0.583346\nI1208 06:28:10.347744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75093 > 2) by scale factor 0.533201\nI1208 06:28:14.500282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90393 > 2) by scale factor 0.688722\nI1208 06:28:18.652104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.75703 > 2) by scale factor 0.42043\nI1208 06:28:22.803390  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10484 > 2) by scale factor 0.48723\nI1208 06:28:26.954915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17541 > 2) by scale factor 0.629839\nI1208 06:28:35.254150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88523 > 2) by scale factor 0.693186\nI1208 06:28:39.405905  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.82137 > 2) by scale factor 0.41482\nI1208 06:28:43.556999  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4989 > 2) by scale factor 0.571608\nI1208 06:28:47.708539  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24411 > 2) by scale factor 0.891223\nI1208 06:28:51.859650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15991 > 2) by scale factor 0.925966\nI1208 06:28:56.012387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89823 > 2) by scale factor 0.690076\nI1208 06:29:00.164952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33182 > 2) by scale factor 0.857701\nI1208 06:29:04.316265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71571 > 2) by scale factor 0.736457\nI1208 06:29:08.467512  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15983 > 2) by scale factor 0.926\nI1208 06:29:12.618896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.346 > 2) by scale factor 0.460194\nI1208 06:29:16.770289  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46926 > 2) by scale factor 0.809959\nI1208 06:29:20.921268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25668 > 2) by scale factor 0.614122\nI1208 06:29:25.073274  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93366 > 2) by scale factor 0.508433\nI1208 06:29:29.225709  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7777 > 2) by scale factor 0.72002\nI1208 06:29:33.377954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10445 > 2) by scale factor 0.644236\nI1208 06:29:37.529650  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64359 > 2) by scale factor 0.756546\nI1208 06:29:41.680770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3343 > 2) by scale factor 0.461436\nI1208 06:29:45.832438  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0063 > 2) by scale factor 0.665269\nI1208 06:29:49.985178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50982 > 2) by scale factor 0.443476\nI1208 06:29:54.136353  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76715 > 2) by scale factor 0.530906\nI1208 06:29:58.287286  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88339 > 2) by scale factor 0.515013\nI1208 06:30:02.439051  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81583 > 2) by scale factor 0.710269\nI1208 06:30:06.590683  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08325 > 2) by scale factor 0.489806\nI1208 06:30:10.743011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93759 > 2) by scale factor 0.680829\nI1208 06:30:14.894847  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20158 > 2) by scale factor 0.476011\nI1208 06:30:19.046494  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.15061 > 2) by scale factor 0.929971\nI1208 06:30:23.197372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95471 > 2) by scale factor 0.676886\nI1208 06:30:27.348899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01624 > 2) by scale factor 0.497979\nI1208 06:30:31.500725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35446 > 2) by scale factor 0.596222\nI1208 06:30:35.652797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15622 > 2) by scale factor 0.481206\nI1208 06:30:39.804306  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11974 > 2) by scale factor 0.641078\nI1208 06:30:43.955458  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69891 > 2) by scale factor 0.5407\nI1208 06:30:48.107702  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38769 > 2) by scale factor 0.590373\nI1208 06:30:52.259387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88579 > 2) by scale factor 0.69305\nI1208 06:30:56.410377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13973 > 2) by scale factor 0.483124\nI1208 06:31:00.561066  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63092 > 2) by scale factor 0.550825\nI1208 06:31:04.712483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37292 > 2) by scale factor 0.592959\nI1208 06:31:08.864181  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32308 > 2) by scale factor 0.601851\nI1208 06:31:13.014932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20301 > 2) by scale factor 0.624413\nI1208 06:31:17.165076  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91437 > 2) by scale factor 0.510938\nI1208 06:31:21.317077  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05946 > 2) by scale factor 0.65371\nI1208 06:31:25.468956  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02017 > 2) by scale factor 0.497491\nI1208 06:31:29.620671  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19348 > 2) by scale factor 0.626275\nI1208 06:31:33.772619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.84448 > 2) by scale factor 0.520227\nI1208 06:31:37.924651  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57895 > 2) by scale factor 0.77551\nI1208 06:31:42.076822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88436 > 2) by scale factor 0.693396\nI1208 06:31:46.227593  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75336 > 2) by scale factor 0.726384\nI1208 06:31:50.379174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.5375 > 2) by scale factor 0.788177\nI1208 06:31:54.531177  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22987 > 2) by scale factor 0.619221\nI1208 06:31:58.682896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67327 > 2) by scale factor 0.544475\nI1208 06:32:02.834488  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64732 > 2) by scale factor 0.755481\nI1208 06:32:06.986053  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15056 > 2) by scale factor 0.634809\nI1208 06:32:11.138787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9572 > 2) by scale factor 0.676316\nI1208 06:32:15.291533  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78325 > 2) by scale factor 0.718584\nI1208 06:32:19.442164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82424 > 2) by scale factor 0.708155\nI1208 06:32:23.593400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65606 > 2) by scale factor 0.752994\nI1208 06:32:27.744537  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95454 > 2) by scale factor 0.505748\nI1208 06:32:31.895927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93157 > 2) by scale factor 0.508702\nI1208 06:32:36.047222  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42735 > 2) by scale factor 0.583542\nI1208 06:32:40.198837  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01418 > 2) by scale factor 0.66353\nI1208 06:32:44.351920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25655 > 2) by scale factor 0.614148\nI1208 06:32:48.503377  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56156 > 2) by scale factor 0.780774\nI1208 06:32:52.654863  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48565 > 2) by scale factor 0.445866\nI1208 06:33:00.955420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01435 > 2) by scale factor 0.398855\nI1208 06:33:05.106954  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66283 > 2) by scale factor 0.546026\nI1208 06:33:09.258304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29601 > 2) by scale factor 0.871075\nI1208 06:33:13.409890  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13287 > 2) by scale factor 0.937705\nI1208 06:33:21.710400  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.94849 > 2) by scale factor 0.678314\nI1208 06:33:25.862896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59295 > 2) by scale factor 0.43545\nI1208 06:33:30.014238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85319 > 2) by scale factor 0.700969\nI1208 06:33:34.166383  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2796 > 2) by scale factor 0.609831\nI1208 06:33:38.317442  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.56266 > 2) by scale factor 0.438341\nI1208 06:33:42.468857  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13268 > 2) by scale factor 0.483947\nI1208 06:33:46.619660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66621 > 2) by scale factor 0.545523\nI1208 06:33:50.771438  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90367 > 2) by scale factor 0.688783\nI1208 06:33:54.922812  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8367 > 2) by scale factor 0.705044\nI1208 06:33:59.074959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38201 > 2) by scale factor 0.839627\nI1208 06:34:03.226351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43442 > 2) by scale factor 0.82155\nI1208 06:34:07.376687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35468 > 2) by scale factor 0.849372\nI1208 06:34:11.527307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66348 > 2) by scale factor 0.545929\nI1208 06:34:15.679093  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.0813 > 2) by scale factor 0.49004\nI1208 06:34:19.830301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91875 > 2) by scale factor 0.510367\nI1208 06:34:23.981120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36761 > 2) by scale factor 0.844734\nI1208 06:34:28.132563  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18159 > 2) by scale factor 0.478287\nI1208 06:34:32.285001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67709 > 2) by scale factor 0.747079\nI1208 06:34:36.436844  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50768 > 2) by scale factor 0.797548\nI1208 06:34:40.587990  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00389 > 2) by scale factor 0.665803\nI1208 06:34:40.599869  1922 solver.cpp:337] Iteration 15600, Testing net (#0)\nI1208 06:37:18.738999  1922 solver.cpp:404]     Test net output #0: accuracy = 0.163\nI1208 06:37:18.739545  1922 solver.cpp:404]     Test net output #1: loss = 13.014 (* 1 = 13.014 loss)\nI1208 06:37:22.683172  1922 solver.cpp:228] Iteration 15600, loss = 11.3953\nI1208 06:37:22.683212  1922 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1208 06:37:22.683228  1922 solver.cpp:244]     Train net output #1: loss = 11.3953 (* 1 = 11.3953 loss)\nI1208 06:37:22.879318  1922 sgd_solver.cpp:166] Iteration 15600, lr = 2.34\nI1208 06:37:22.889454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11732 > 2) by scale factor 0.641577\nI1208 06:37:27.041896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06189 > 2) by scale factor 0.969985\nI1208 06:37:31.194413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83805 > 2) by scale factor 0.70471\nI1208 06:37:35.346493  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29423 > 2) by scale factor 0.465741\nI1208 06:37:39.498201  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95372 > 2) by scale factor 0.505853\nI1208 06:37:43.650219  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24951 > 2) by scale factor 0.615478\nI1208 06:37:47.803349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21551 > 2) by scale factor 0.474439\nI1208 06:37:51.955260  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.52387 > 2) by scale factor 0.4421\nI1208 06:37:56.107756  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87848 > 2) by scale factor 0.515666\nI1208 06:38:00.260269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37966 > 2) by scale factor 0.591776\nI1208 06:38:04.413552  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54217 > 2) by scale factor 0.564625\nI1208 06:38:08.565915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76701 > 2) by scale factor 0.722801\nI1208 06:38:12.718394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20676 > 2) by scale factor 0.906308\nI1208 06:38:16.870056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.29871 > 2) by scale factor 0.870053\nI1208 06:38:21.022737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30687 > 2) by scale factor 0.604801\nI1208 06:38:25.174684  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65333 > 2) by scale factor 0.547446\nI1208 06:38:29.326705  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27476 > 2) by scale factor 0.610733\nI1208 06:38:33.479218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.10958 > 2) by scale factor 0.486668\nI1208 06:38:37.631958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0227 > 2) by scale factor 0.66166\nI1208 06:38:41.783483  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16117 > 2) by scale factor 0.632677\nI1208 06:38:45.936022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67382 > 2) by scale factor 0.427916\nI1208 06:38:50.088722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67812 > 2) by scale factor 0.427522\nI1208 06:38:54.240772  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28166 > 2) by scale factor 0.876556\nI1208 06:38:58.393100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90906 > 2) by scale factor 0.687508\nI1208 06:39:02.545866  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96374 > 2) by scale factor 0.504574\nI1208 06:39:06.698067  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.33678 > 2) by scale factor 0.461171\nI1208 06:39:10.851207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.1274 > 2) by scale factor 0.484566\nI1208 06:39:15.002600  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14673 > 2) by scale factor 0.482308\nI1208 06:39:19.154839  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50612 > 2) by scale factor 0.443841\nI1208 06:39:23.307082  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.36542 > 2) by scale factor 0.845518\nI1208 06:39:27.458855  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.17248 > 2) by scale factor 0.479331\nI1208 06:39:31.611197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63186 > 2) by scale factor 0.75992\nI1208 06:39:35.764031  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00437 > 2) by scale factor 0.997821\nI1208 06:39:39.916144  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.06972 > 2) by scale factor 0.966316\nI1208 06:39:44.068358  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15875 > 2) by scale factor 0.633162\nI1208 06:39:48.220613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74898 > 2) by scale factor 0.727542\nI1208 06:39:52.373015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57211 > 2) by scale factor 0.77757\nI1208 06:39:56.525429  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48951 > 2) by scale factor 0.445483\nI1208 06:40:00.677124  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72704 > 2) by scale factor 0.536619\nI1208 06:40:04.829797  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50037 > 2) by scale factor 0.571369\nI1208 06:40:08.982653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98742 > 2) by scale factor 0.501577\nI1208 06:40:13.134846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05833 > 2) by scale factor 0.653951\nI1208 06:40:17.287101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90737 > 2) by scale factor 0.687906\nI1208 06:40:21.439723  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03687 > 2) by scale factor 0.658573\nI1208 06:40:25.592280  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80567 > 2) by scale factor 0.525531\nI1208 06:40:29.745069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16907 > 2) by scale factor 0.6311\nI1208 06:40:33.897270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6109 > 2) by scale factor 0.433755\nI1208 06:40:38.048636  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32465 > 2) by scale factor 0.601567\nI1208 06:40:42.201794  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.04026 > 2) by scale factor 0.657838\nI1208 06:40:46.353479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50765 > 2) by scale factor 0.79756\nI1208 06:40:50.506287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10719 > 2) by scale factor 0.643668\nI1208 06:40:54.658112  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65 > 2) by scale factor 0.754717\nI1208 06:40:58.811301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46036 > 2) by scale factor 0.577974\nI1208 06:41:02.963651  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15022 > 2) by scale factor 0.481902\nI1208 06:41:07.116101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86263 > 2) by scale factor 0.698659\nI1208 06:41:11.268808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40137 > 2) by scale factor 0.587998\nI1208 06:41:15.421022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41397 > 2) by scale factor 0.585828\nI1208 06:41:19.572438  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85519 > 2) by scale factor 0.518781\nI1208 06:41:23.724455  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25836 > 2) by scale factor 0.613806\nI1208 06:41:27.876781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94029 > 2) by scale factor 0.507577\nI1208 06:41:32.029812  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54443 > 2) by scale factor 0.564265\nI1208 06:41:36.182163  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78932 > 2) by scale factor 0.717019\nI1208 06:41:40.333919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91239 > 2) by scale factor 0.686721\nI1208 06:41:44.486078  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.42514 > 2) by scale factor 0.824696\nI1208 06:41:48.638480  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2476 > 2) by scale factor 0.615839\nI1208 06:41:52.790418  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.29048 > 2) by scale factor 0.607814\nI1208 06:41:56.942711  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.09786 > 2) by scale factor 0.953354\nI1208 06:42:01.095252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90711 > 2) by scale factor 0.511888\nI1208 06:42:05.247958  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16424 > 2) by scale factor 0.632064\nI1208 06:42:09.400040  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13917 > 2) by scale factor 0.934944\nI1208 06:42:13.552315  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64864 > 2) by scale factor 0.755104\nI1208 06:42:17.705518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97757 > 2) by scale factor 0.671689\nI1208 06:42:21.858541  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41099 > 2) by scale factor 0.586339\nI1208 06:42:26.010453  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95571 > 2) by scale factor 0.676656\nI1208 06:42:30.162374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.49639 > 2) by scale factor 0.572019\nI1208 06:42:34.315179  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27795 > 2) by scale factor 0.610138\nI1208 06:42:38.468268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57168 > 2) by scale factor 0.777701\nI1208 06:42:42.620167  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46152 > 2) by scale factor 0.57778\nI1208 06:42:46.772653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.15383 > 2) by scale factor 0.634149\nI1208 06:42:50.925462  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02866 > 2) by scale factor 0.660358\nI1208 06:42:55.078933  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38676 > 2) by scale factor 0.590535\nI1208 06:42:59.230561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79589 > 2) by scale factor 0.526886\nI1208 06:43:03.382016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.55995 > 2) by scale factor 0.561805\nI1208 06:43:07.534808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95981 > 2) by scale factor 0.403241\nI1208 06:43:11.686952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42991 > 2) by scale factor 0.451477\nI1208 06:43:15.838143  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16717 > 2) by scale factor 0.479942\nI1208 06:43:19.989254  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.30978 > 2) by scale factor 0.865882\nI1208 06:43:24.140607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79303 > 2) by scale factor 0.527284\nI1208 06:43:28.293687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00272 > 2) by scale factor 0.666063\nI1208 06:43:32.444507  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03947 > 2) by scale factor 0.396867\nI1208 06:43:36.594092  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.48594 > 2) by scale factor 0.445837\nI1208 06:43:40.743439  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54918 > 2) by scale factor 0.784565\nI1208 06:43:44.892117  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.38144 > 2) by scale factor 0.456471\nI1208 06:43:49.040706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.3776 > 2) by scale factor 0.456871\nI1208 06:43:53.189576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73971 > 2) by scale factor 0.730004\nI1208 06:43:57.338369  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.48792 > 2) by scale factor 0.803884\nI1208 06:44:01.487097  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79658 > 2) by scale factor 0.71516\nI1208 06:44:05.635550  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55021 > 2) by scale factor 0.784249\nI1208 06:44:09.784526  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66184 > 2) by scale factor 0.75136\nI1208 06:44:13.933121  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92811 > 2) by scale factor 0.509151\nI1208 06:44:13.943364  1922 solver.cpp:337] Iteration 15700, Testing net (#0)\nI1208 06:46:47.243723  1922 solver.cpp:404]     Test net output #0: accuracy = 0.162471\nI1208 06:46:47.244269  1922 solver.cpp:404]     Test net output #1: loss = 18.7772 (* 1 = 18.7772 loss)\nI1208 06:46:51.172439  1922 solver.cpp:228] Iteration 15700, loss = 16.5009\nI1208 06:46:51.172483  1922 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1208 06:46:51.172500  1922 solver.cpp:244]     Train net output #1: loss = 16.5009 (* 1 = 16.5009 loss)\nI1208 06:46:51.387747  1922 sgd_solver.cpp:166] Iteration 15700, lr = 2.355\nI1208 06:46:51.396852  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91864 > 2) by scale factor 0.510381\nI1208 06:46:55.549609  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96299 > 2) by scale factor 0.674994\nI1208 06:46:59.702728  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38946 > 2) by scale factor 0.837009\nI1208 06:47:03.854630  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55858 > 2) by scale factor 0.781685\nI1208 06:47:08.007385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9875 > 2) by scale factor 0.669455\nI1208 06:47:12.160696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82502 > 2) by scale factor 0.522873\nI1208 06:47:16.314200  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3894 > 2) by scale factor 0.590076\nI1208 06:47:20.466876  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36269 > 2) by scale factor 0.458432\nI1208 06:47:24.619736  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63376 > 2) by scale factor 0.759369\nI1208 06:47:28.772336  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.891 > 2) by scale factor 0.691801\nI1208 06:47:32.924624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42937 > 2) by scale factor 0.583198\nI1208 06:47:37.077275  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37911 > 2) by scale factor 0.840651\nI1208 06:47:41.230680  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89252 > 2) by scale factor 0.69144\nI1208 06:47:45.383880  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.46187 > 2) by scale factor 0.812391\nI1208 06:47:49.536698  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.37149 > 2) by scale factor 0.843352\nI1208 06:47:53.689317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.30285 > 2) by scale factor 0.605538\nI1208 06:47:57.841603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79613 > 2) by scale factor 0.526852\nI1208 06:48:01.993737  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4653 > 2) by scale factor 0.57715\nI1208 06:48:06.146239  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22004 > 2) by scale factor 0.473929\nI1208 06:48:10.299693  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39547 > 2) by scale factor 0.58902\nI1208 06:48:14.451941  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19976 > 2) by scale factor 0.625047\nI1208 06:48:18.606011  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17389 > 2) by scale factor 0.630143\nI1208 06:48:22.758781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.85071 > 2) by scale factor 0.519385\nI1208 06:48:26.911214  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61333 > 2) by scale factor 0.765306\nI1208 06:48:31.063460  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88426 > 2) by scale factor 0.514898\nI1208 06:48:35.215612  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86995 > 2) by scale factor 0.696875\nI1208 06:48:39.368516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91398 > 2) by scale factor 0.510988\nI1208 06:48:43.521227  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24743 > 2) by scale factor 0.470873\nI1208 06:48:47.673564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13883 > 2) by scale factor 0.483228\nI1208 06:48:51.825790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.81776 > 2) by scale factor 0.709783\nI1208 06:48:55.978099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3084 > 2) by scale factor 0.604521\nI1208 06:49:00.130470  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58687 > 2) by scale factor 0.557589\nI1208 06:49:04.284013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91341 > 2) by scale factor 0.686482\nI1208 06:49:08.436327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39979 > 2) by scale factor 0.454567\nI1208 06:49:12.589100  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82885 > 2) by scale factor 0.707001\nI1208 06:49:16.742175  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55866 > 2) by scale factor 0.781659\nI1208 06:49:20.894655  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.65204 > 2) by scale factor 0.754135\nI1208 06:49:25.046479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90049 > 2) by scale factor 0.689539\nI1208 06:49:29.198921  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80014 > 2) by scale factor 0.416655\nI1208 06:49:33.351322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26893 > 2) by scale factor 0.611821\nI1208 06:49:37.503450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46155 > 2) by scale factor 0.577776\nI1208 06:49:41.656406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.41787 > 2) by scale factor 0.452706\nI1208 06:49:45.809351  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26169 > 2) by scale factor 0.61318\nI1208 06:49:49.961580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47858 > 2) by scale factor 0.574947\nI1208 06:49:54.113489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38043 > 2) by scale factor 0.591641\nI1208 06:49:58.265836  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91115 > 2) by scale factor 0.687013\nI1208 06:50:02.418104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13186 > 2) by scale factor 0.389722\nI1208 06:50:06.570271  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.73701 > 2) by scale factor 0.422207\nI1208 06:50:10.723336  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00884 > 2) by scale factor 0.399294\nI1208 06:50:14.875404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06742 > 2) by scale factor 0.491712\nI1208 06:50:19.027663  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54553 > 2) by scale factor 0.785692\nI1208 06:50:23.180932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19213 > 2) by scale factor 0.477084\nI1208 06:50:27.332479  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42329 > 2) by scale factor 0.584234\nI1208 06:50:31.484951  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.51319 > 2) by scale factor 0.443146\nI1208 06:50:35.637511  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.36451 > 2) by scale factor 0.458241\nI1208 06:50:39.789331  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14098 > 2) by scale factor 0.482978\nI1208 06:50:43.941929  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79382 > 2) by scale factor 0.715867\nI1208 06:50:48.094287  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71258 > 2) by scale factor 0.737305\nI1208 06:50:52.247004  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05094 > 2) by scale factor 0.493712\nI1208 06:50:56.399236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02374 > 2) by scale factor 0.49705\nI1208 06:51:00.551667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54798 > 2) by scale factor 0.563701\nI1208 06:51:04.703653  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25039 > 2) by scale factor 0.888735\nI1208 06:51:08.855726  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.95155 > 2) by scale factor 0.403914\nI1208 06:51:13.007489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.49874 > 2) by scale factor 0.444569\nI1208 06:51:17.159773  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27695 > 2) by scale factor 0.467622\nI1208 06:51:21.312397  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22484 > 2) by scale factor 0.473391\nI1208 06:51:25.465349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28797 > 2) by scale factor 0.608279\nI1208 06:51:29.617727  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03793 > 2) by scale factor 0.981387\nI1208 06:51:33.769614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.20476 > 2) by scale factor 0.907129\nI1208 06:51:37.921825  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7015 > 2) by scale factor 0.425396\nI1208 06:51:42.074164  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13949 > 2) by scale factor 0.637047\nI1208 06:51:46.226860  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.6719 > 2) by scale factor 0.428091\nI1208 06:51:50.380039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93537 > 2) by scale factor 0.508212\nI1208 06:51:54.531843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20635 > 2) by scale factor 0.475472\nI1208 06:51:58.684059  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8317 > 2) by scale factor 0.70629\nI1208 06:52:02.841783  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25104 > 2) by scale factor 0.615188\nI1208 06:52:06.999246  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55879 > 2) by scale factor 0.78162\nI1208 06:52:11.151198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89513 > 2) by scale factor 0.690815\nI1208 06:52:15.303645  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75706 > 2) by scale factor 0.532331\nI1208 06:52:19.455962  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72317 > 2) by scale factor 0.734439\nI1208 06:52:23.608721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40103 > 2) by scale factor 0.588057\nI1208 06:52:27.761576  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77696 > 2) by scale factor 0.720213\nI1208 06:52:31.912885  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23484 > 2) by scale factor 0.618269\nI1208 06:52:36.065516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91644 > 2) by scale factor 0.510668\nI1208 06:52:40.218147  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33151 > 2) by scale factor 0.600328\nI1208 06:52:44.369920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98951 > 2) by scale factor 0.501315\nI1208 06:52:48.522115  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39011 > 2) by scale factor 0.455569\nI1208 06:52:52.674002  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92041 > 2) by scale factor 0.684837\nI1208 06:52:56.827277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24236 > 2) by scale factor 0.616835\nI1208 06:53:00.979131  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.243 > 2) by scale factor 0.616713\nI1208 06:53:05.131114  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08701 > 2) by scale factor 0.489355\nI1208 06:53:09.284085  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13303 > 2) by scale factor 0.63836\nI1208 06:53:13.435675  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73476 > 2) by scale factor 0.731326\nI1208 06:53:17.587641  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00474 > 2) by scale factor 0.665614\nI1208 06:53:21.740135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03188 > 2) by scale factor 0.659657\nI1208 06:53:25.892282  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.24928 > 2) by scale factor 0.889172\nI1208 06:53:30.045828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50286 > 2) by scale factor 0.799087\nI1208 06:53:34.198312  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78075 > 2) by scale factor 0.719229\nI1208 06:53:38.350594  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.19115 > 2) by scale factor 0.912764\nI1208 06:53:42.502459  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.55195 > 2) by scale factor 0.783715\nI1208 06:53:42.512760  1922 solver.cpp:337] Iteration 15800, Testing net (#0)\nI1208 06:56:15.900213  1922 solver.cpp:404]     Test net output #0: accuracy = 0.162235\nI1208 06:56:15.900765  1922 solver.cpp:404]     Test net output #1: loss = 12.5769 (* 1 = 12.5769 loss)\nI1208 06:56:19.826630  1922 solver.cpp:228] Iteration 15800, loss = 12.8376\nI1208 06:56:19.826675  1922 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1208 06:56:19.826700  1922 solver.cpp:244]     Train net output #1: loss = 12.8376 (* 1 = 12.8376 loss)\nI1208 06:56:20.039664  1922 sgd_solver.cpp:166] Iteration 15800, lr = 2.37\nI1208 06:56:20.048846  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18639 > 2) by scale factor 0.62767\nI1208 06:56:24.197473  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02197 > 2) by scale factor 0.66182\nI1208 06:56:28.346966  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82824 > 2) by scale factor 0.522433\nI1208 06:56:32.496027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63324 > 2) by scale factor 0.759519\nI1208 06:56:36.644642  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78716 > 2) by scale factor 0.717576\nI1208 06:56:40.794582  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.63647 > 2) by scale factor 0.758591\nI1208 06:56:44.942394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75891 > 2) by scale factor 0.532069\nI1208 06:56:49.090739  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01117 > 2) by scale factor 0.664194\nI1208 06:56:53.241166  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30041 > 2) by scale factor 0.465072\nI1208 06:56:57.389791  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.65375 > 2) by scale factor 0.547382\nI1208 06:57:01.539206  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47147 > 2) by scale factor 0.576124\nI1208 06:57:05.688035  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78498 > 2) by scale factor 0.528404\nI1208 06:57:09.836432  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.01922 > 2) by scale factor 0.662422\nI1208 06:57:13.985416  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79715 > 2) by scale factor 0.526711\nI1208 06:57:18.133803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18025 > 2) by scale factor 0.628882\nI1208 06:57:22.282843  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56674 > 2) by scale factor 0.779199\nI1208 06:57:26.431733  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58075 > 2) by scale factor 0.558543\nI1208 06:57:30.580787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12924 > 2) by scale factor 0.639133\nI1208 06:57:34.729441  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36994 > 2) by scale factor 0.593482\nI1208 06:57:38.878099  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73523 > 2) by scale factor 0.731201\nI1208 06:57:43.027032  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76331 > 2) by scale factor 0.531447\nI1208 06:57:47.176383  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86773 > 2) by scale factor 0.697415\nI1208 06:57:51.324848  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00477 > 2) by scale factor 0.499404\nI1208 06:57:55.473393  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.79388 > 2) by scale factor 0.715849\nI1208 06:57:59.623509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95813 > 2) by scale factor 0.676102\nI1208 06:58:03.771983  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64324 > 2) by scale factor 0.756646\nI1208 06:58:07.920740  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45378 > 2) by scale factor 0.449057\nI1208 06:58:12.069931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40981 > 2) by scale factor 0.453534\nI1208 06:58:16.218947  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.8656 > 2) by scale factor 0.697935\nI1208 06:58:20.367884  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60393 > 2) by scale factor 0.434411\nI1208 06:58:24.517113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75501 > 2) by scale factor 0.532622\nI1208 06:58:28.666185  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.63738 > 2) by scale factor 0.431278\nI1208 06:58:32.815106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29631 > 2) by scale factor 0.465516\nI1208 06:58:36.964454  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14916 > 2) by scale factor 0.63509\nI1208 06:58:41.113113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6564 > 2) by scale factor 0.546987\nI1208 06:58:45.261886  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57205 > 2) by scale factor 0.777589\nI1208 06:58:49.410531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.41353 > 2) by scale factor 0.828662\nI1208 06:58:53.559372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68284 > 2) by scale factor 0.543059\nI1208 06:58:57.708094  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45852 > 2) by scale factor 0.813498\nI1208 06:59:01.857022  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.98491 > 2) by scale factor 0.670036\nI1208 06:59:06.006198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62353 > 2) by scale factor 0.762331\nI1208 06:59:10.154757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10286 > 2) by scale factor 0.644567\nI1208 06:59:14.303463  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14017 > 2) by scale factor 0.636907\nI1208 06:59:18.452226  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95677 > 2) by scale factor 0.505463\nI1208 06:59:22.600731  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88184 > 2) by scale factor 0.515219\nI1208 06:59:26.749270  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73649 > 2) by scale factor 0.730862\nI1208 06:59:30.897387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60251 > 2) by scale factor 0.76849\nI1208 06:59:35.045706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44992 > 2) by scale factor 0.579724\nI1208 06:59:39.194747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14997 > 2) by scale factor 0.481931\nI1208 06:59:43.343257  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96198 > 2) by scale factor 0.675224\nI1208 06:59:47.491197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33632 > 2) by scale factor 0.599463\nI1208 06:59:51.639375  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74781 > 2) by scale factor 0.533645\nI1208 06:59:55.788761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61612 > 2) by scale factor 0.76449\nI1208 06:59:59.936822  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.2183 > 2) by scale factor 0.621445\nI1208 07:00:04.084949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11192 > 2) by scale factor 0.642691\nI1208 07:00:08.232920  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82449 > 2) by scale factor 0.522945\nI1208 07:00:12.381899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.78065 > 2) by scale factor 0.52901\nI1208 07:00:16.530532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22424 > 2) by scale factor 0.473458\nI1208 07:00:20.678773  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18947 > 2) by scale factor 0.627063\nI1208 07:00:24.827513  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.03481 > 2) by scale factor 0.982891\nI1208 07:00:28.976832  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03103 > 2) by scale factor 0.659841\nI1208 07:00:33.125752  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96629 > 2) by scale factor 0.50425\nI1208 07:00:37.274900  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28831 > 2) by scale factor 0.608215\nI1208 07:00:41.423565  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29173 > 2) by scale factor 0.466013\nI1208 07:00:45.575026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.67764 > 2) by scale factor 0.427566\nI1208 07:00:49.724870  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.56176 > 2) by scale factor 0.780713\nI1208 07:00:53.873808  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46293 > 2) by scale factor 0.577545\nI1208 07:00:58.022637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87755 > 2) by scale factor 0.51579\nI1208 07:01:02.171963  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60897 > 2) by scale factor 0.766585\nI1208 07:01:06.320888  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0895 > 2) by scale factor 0.647354\nI1208 07:01:10.469862  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36529 > 2) by scale factor 0.594302\nI1208 07:01:14.618913  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25277 > 2) by scale factor 0.614861\nI1208 07:01:18.768712  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15585 > 2) by scale factor 0.481249\nI1208 07:01:22.917240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.7967 > 2) by scale factor 0.715129\nI1208 07:01:27.066190  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.18 > 2) by scale factor 0.628932\nI1208 07:01:31.215593  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12796 > 2) by scale factor 0.639395\nI1208 07:01:35.363811  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96992 > 2) by scale factor 0.67342\nI1208 07:01:39.513198  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35624 > 2) by scale factor 0.459111\nI1208 07:01:43.662288  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33267 > 2) by scale factor 0.600119\nI1208 07:01:47.811033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31441 > 2) by scale factor 0.603426\nI1208 07:01:51.960896  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67991 > 2) by scale factor 0.746295\nI1208 07:01:56.110611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.43511 > 2) by scale factor 0.821319\nI1208 07:02:04.405215  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.0236 > 2) by scale factor 0.661462\nI1208 07:02:08.554644  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86479 > 2) by scale factor 0.517492\nI1208 07:02:12.703584  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00791 > 2) by scale factor 0.664913\nI1208 07:02:16.851960  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27468 > 2) by scale factor 0.610746\nI1208 07:02:21.001587  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49609 > 2) by scale factor 0.801255\nI1208 07:02:25.151062  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9782 > 2) by scale factor 0.502739\nI1208 07:02:29.299803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03507 > 2) by scale factor 0.495654\nI1208 07:02:33.447788  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.57659 > 2) by scale factor 0.77622\nI1208 07:02:37.597553  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28916 > 2) by scale factor 0.466291\nI1208 07:02:41.747103  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.66757 > 2) by scale factor 0.54532\nI1208 07:02:45.895536  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99523 > 2) by scale factor 0.667728\nI1208 07:02:50.043809  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.89096 > 2) by scale factor 0.691812\nI1208 07:02:54.193611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25437 > 2) by scale factor 0.614559\nI1208 07:02:58.344027  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.76606 > 2) by scale factor 0.72305\nI1208 07:03:02.492506  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38378 > 2) by scale factor 0.591055\nI1208 07:03:06.641778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.51339 > 2) by scale factor 0.795737\nI1208 07:03:10.789754  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.35378 > 2) by scale factor 0.849696\nI1208 07:03:10.799852  1922 solver.cpp:337] Iteration 15900, Testing net (#0)\nI1208 07:05:44.123423  1922 solver.cpp:404]     Test net output #0: accuracy = 0.138294\nI1208 07:05:44.124006  1922 solver.cpp:404]     Test net output #1: loss = 18.0927 (* 1 = 18.0927 loss)\nI1208 07:05:48.048287  1922 solver.cpp:228] Iteration 15900, loss = 17.6606\nI1208 07:05:48.048326  1922 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1208 07:05:48.048344  1922 solver.cpp:244]     Train net output #1: loss = 17.6606 (* 1 = 17.6606 loss)\nI1208 07:05:48.266003  1922 sgd_solver.cpp:166] Iteration 15900, lr = 2.385\nI1208 07:05:48.275104  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.63772 > 2) by scale factor 0.549796\nI1208 07:05:52.425420  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.14001 > 2) by scale factor 0.636941\nI1208 07:05:56.575978  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17235 > 2) by scale factor 0.630447\nI1208 07:06:00.726548  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95704 > 2) by scale factor 0.505428\nI1208 07:06:04.877362  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.61563 > 2) by scale factor 0.553154\nI1208 07:06:09.027721  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92486 > 2) by scale factor 0.509572\nI1208 07:06:13.178253  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.87614 > 2) by scale factor 0.515978\nI1208 07:06:17.329033  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27981 > 2) by scale factor 0.46731\nI1208 07:06:21.480324  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74373 > 2) by scale factor 0.534226\nI1208 07:06:25.631670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27854 > 2) by scale factor 0.877756\nI1208 07:06:29.782747  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.77879 > 2) by scale factor 0.719738\nI1208 07:06:33.933158  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39249 > 2) by scale factor 0.589538\nI1208 07:06:38.083343  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41202 > 2) by scale factor 0.586163\nI1208 07:06:42.233464  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36464 > 2) by scale factor 0.594417\nI1208 07:06:46.383534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9625 > 2) by scale factor 0.675106\nI1208 07:06:50.533015  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46844 > 2) by scale factor 0.576629\nI1208 07:06:54.683845  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.10013 > 2) by scale factor 0.645133\nI1208 07:06:58.833770  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21248 > 2) by scale factor 0.474779\nI1208 07:07:02.984323  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92714 > 2) by scale factor 0.68326\nI1208 07:07:07.134371  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.88706 > 2) by scale factor 0.692746\nI1208 07:07:11.285368  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80857 > 2) by scale factor 0.525131\nI1208 07:07:15.435914  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.11789 > 2) by scale factor 0.64146\nI1208 07:07:19.586299  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93978 > 2) by scale factor 0.507642\nI1208 07:07:23.737145  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74005 > 2) by scale factor 0.534753\nI1208 07:07:27.887614  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.14936 > 2) by scale factor 0.482002\nI1208 07:07:32.037113  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90763 > 2) by scale factor 0.687846\nI1208 07:07:36.186102  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.82401 > 2) by scale factor 0.708212\nI1208 07:07:40.337184  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23222 > 2) by scale factor 0.618769\nI1208 07:07:44.487263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98366 > 2) by scale factor 0.502051\nI1208 07:07:48.637925  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.45801 > 2) by scale factor 0.448631\nI1208 07:07:52.788518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.05227 > 2) by scale factor 0.655251\nI1208 07:07:56.939106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7003 > 2) by scale factor 0.540496\nI1208 07:08:01.090687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75181 > 2) by scale factor 0.726794\nI1208 07:08:05.242079  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.9125 > 2) by scale factor 0.686695\nI1208 07:08:09.392261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22451 > 2) by scale factor 0.62025\nI1208 07:08:13.542596  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.91051 > 2) by scale factor 0.687164\nI1208 07:08:17.693193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72214 > 2) by scale factor 0.537326\nI1208 07:08:21.844882  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88764 > 2) by scale factor 0.514451\nI1208 07:08:25.995599  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52658 > 2) by scale factor 0.567122\nI1208 07:08:30.146106  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62229 > 2) by scale factor 0.762694\nI1208 07:08:34.297278  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.75846 > 2) by scale factor 0.725043\nI1208 07:08:38.446696  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31719 > 2) by scale factor 0.863114\nI1208 07:08:42.596781  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.99125 > 2) by scale factor 0.501096\nI1208 07:08:46.746619  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03858 > 2) by scale factor 0.396937\nI1208 07:08:50.896930  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6913 > 2) by scale factor 0.541815\nI1208 07:08:55.047349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.8655 > 2) by scale factor 0.517398\nI1208 07:08:59.197543  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4086 > 2) by scale factor 0.830359\nI1208 07:09:03.347939  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66892 > 2) by scale factor 0.749366\nI1208 07:09:07.500016  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19088 > 2) by scale factor 0.626787\nI1208 07:09:11.650790  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.28414 > 2) by scale factor 0.608987\nI1208 07:09:15.801252  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03801 > 2) by scale factor 0.495293\nI1208 07:09:19.952745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.60323 > 2) by scale factor 0.768276\nI1208 07:09:24.103413  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49957 > 2) by scale factor 0.800138\nI1208 07:09:28.253486  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26285 > 2) by scale factor 0.612961\nI1208 07:09:32.403515  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.51344 > 2) by scale factor 0.569242\nI1208 07:09:36.555197  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.4107 > 2) by scale factor 0.58639\nI1208 07:09:40.706534  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.31579 > 2) by scale factor 0.863636\nI1208 07:09:44.857301  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.74693 > 2) by scale factor 0.728084\nI1208 07:09:49.007446  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.04017 > 2) by scale factor 0.495029\nI1208 07:09:53.157990  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.20514 > 2) by scale factor 0.475609\nI1208 07:09:57.307919  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4907 > 2) by scale factor 0.802987\nI1208 07:10:01.458858  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.18252 > 2) by scale factor 0.91637\nI1208 07:10:05.609580  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.32119 > 2) by scale factor 0.602193\nI1208 07:10:09.759564  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40322 > 2) by scale factor 0.587679\nI1208 07:10:13.910236  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.95584 > 2) by scale factor 0.505581\nI1208 07:10:18.060446  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.66738 > 2) by scale factor 0.749799\nI1208 07:10:22.210851  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.08872 > 2) by scale factor 0.647517\nI1208 07:10:26.360854  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.16957 > 2) by scale factor 0.631001\nI1208 07:10:30.510269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85094 > 2) by scale factor 0.701524\nI1208 07:10:34.661182  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56951 > 2) by scale factor 0.560301\nI1208 07:10:38.810968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.90761 > 2) by scale factor 0.511821\nI1208 07:10:42.961602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78743 > 2) by scale factor 0.717507\nI1208 07:10:47.112906  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.64093 > 2) by scale factor 0.757308\nI1208 07:10:51.264297  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.94359 > 2) by scale factor 0.507153\nI1208 07:10:55.416081  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.77286 > 2) by scale factor 0.530102\nI1208 07:10:59.567042  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92898 > 2) by scale factor 0.509038\nI1208 07:11:03.718634  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.20275 > 2) by scale factor 0.624464\nI1208 07:11:07.868952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22007 > 2) by scale factor 0.621105\nI1208 07:11:12.018054  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58514 > 2) by scale factor 0.773653\nI1208 07:11:16.168308  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.16448 > 2) by scale factor 0.480252\nI1208 07:11:20.318936  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62617 > 2) by scale factor 0.761565\nI1208 07:11:24.468952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.06729 > 2) by scale factor 0.652042\nI1208 07:11:32.767307  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.81356 > 2) by scale factor 0.524445\nI1208 07:11:36.917320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.50048 > 2) by scale factor 0.57135\nI1208 07:11:41.067267  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.03944 > 2) by scale factor 0.495118\nI1208 07:11:45.217717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.22682 > 2) by scale factor 0.619806\nI1208 07:11:49.367561  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50061 > 2) by scale factor 0.799804\nI1208 07:11:53.518918  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92053 > 2) by scale factor 0.684808\nI1208 07:11:57.668828  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.40493 > 2) by scale factor 0.587383\nI1208 07:12:01.820401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4164 > 2) by scale factor 0.827678\nI1208 07:12:05.972517  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.22649 > 2) by scale factor 0.473205\nI1208 07:12:10.121094  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35406 > 2) by scale factor 0.459341\nI1208 07:12:14.271313  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71694 > 2) by scale factor 0.538077\nI1208 07:12:18.421774  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.66115 > 2) by scale factor 0.429079\nI1208 07:12:22.570926  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.09119 > 2) by scale factor 0.647001\nI1208 07:12:26.722304  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.6679 > 2) by scale factor 0.545271\nI1208 07:12:30.873598  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.31765 > 2) by scale factor 0.602836\nI1208 07:12:35.023722  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02047 > 2) by scale factor 0.497454\nI1208 07:12:39.173478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30285 > 2) by scale factor 0.464808\nI1208 07:12:39.183630  1922 solver.cpp:337] Iteration 16000, Testing net (#0)\nI1208 07:15:12.553731  1922 solver.cpp:404]     Test net output #0: accuracy = 0.148176\nI1208 07:15:12.554306  1922 solver.cpp:404]     Test net output #1: loss = 29.0048 (* 1 = 29.0048 loss)\nI1208 07:15:16.478744  1922 solver.cpp:228] Iteration 16000, loss = 29.3696\nI1208 07:15:16.478785  1922 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1208 07:15:16.478801  1922 solver.cpp:244]     Train net output #1: loss = 29.3696 (* 1 = 29.3696 loss)\nI1208 07:15:16.700541  1922 sgd_solver.cpp:166] Iteration 16000, lr = 2.4\nI1208 07:15:16.709602  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.75269 > 2) by scale factor 0.53295\nI1208 07:15:20.862800  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18097 > 2) by scale factor 0.478358\nI1208 07:15:25.015820  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52116 > 2) by scale factor 0.567995\nI1208 07:15:29.168687  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.7355 > 2) by scale factor 0.422342\nI1208 07:15:33.322300  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86568 > 2) by scale factor 0.697914\nI1208 07:15:37.474593  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39891 > 2) by scale factor 0.454658\nI1208 07:15:41.627704  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02559 > 2) by scale factor 0.496822\nI1208 07:15:45.780787  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86259 > 2) by scale factor 0.517787\nI1208 07:15:49.933501  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.01188 > 2) by scale factor 0.498519\nI1208 07:15:54.085660  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.07408 > 2) by scale factor 0.39416\nI1208 07:15:58.238566  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42834 > 2) by scale factor 0.583373\nI1208 07:16:02.391069  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50358 > 2) by scale factor 0.444091\nI1208 07:16:06.543757  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96577 > 2) by scale factor 0.674362\nI1208 07:16:10.697196  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.92731 > 2) by scale factor 0.509254\nI1208 07:16:14.849949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.05344 > 2) by scale factor 0.493408\nI1208 07:16:19.003039  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.70523 > 2) by scale factor 0.425059\nI1208 07:16:23.157155  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12434 > 2) by scale factor 0.484926\nI1208 07:16:27.309317  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9348 > 2) by scale factor 0.405285\nI1208 07:16:31.462450  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.21842 > 2) by scale factor 0.474111\nI1208 07:16:35.614516  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.78459 > 2) by scale factor 0.418009\nI1208 07:16:39.766261  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.24469 > 2) by scale factor 0.616391\nI1208 07:16:43.918437  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9692 > 2) by scale factor 0.503879\nI1208 07:16:48.071135  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.68713 > 2) by scale factor 0.426701\nI1208 07:16:52.224562  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.35275 > 2) by scale factor 0.596525\nI1208 07:16:56.378338  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.24633 > 2) by scale factor 0.470995\nI1208 07:17:00.530524  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.06842 > 2) by scale factor 0.491592\nI1208 07:17:04.683959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04982 > 2) by scale factor 0.396053\nI1208 07:17:08.836899  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.35972 > 2) by scale factor 0.458745\nI1208 07:17:12.989387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.73223 > 2) by scale factor 0.535873\nI1208 07:17:17.141670  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25724 > 2) by scale factor 0.614016\nI1208 07:17:21.294477  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.82782 > 2) by scale factor 0.522491\nI1208 07:17:25.446895  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.64979 > 2) by scale factor 0.430127\nI1208 07:17:29.600949  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76751 > 2) by scale factor 0.530855\nI1208 07:17:33.754070  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93328 > 2) by scale factor 0.508481\nI1208 07:17:37.906903  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91905 > 2) by scale factor 0.510328\nI1208 07:17:42.059269  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25985 > 2) by scale factor 0.613526\nI1208 07:17:46.212478  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96622 > 2) by scale factor 0.674258\nI1208 07:17:50.365120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.86749 > 2) by scale factor 0.517132\nI1208 07:17:54.517221  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.08997 > 2) by scale factor 0.489002\nI1208 07:17:58.670637  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.30081 > 2) by scale factor 0.465028\nI1208 07:18:02.824461  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.0251 > 2) by scale factor 0.398002\nI1208 07:18:06.976959  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.02291 > 2) by scale factor 0.497153\nI1208 07:18:11.128444  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.68993 > 2) by scale factor 0.542016\nI1208 07:18:15.281898  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93988 > 2) by scale factor 0.507629\nI1208 07:18:19.435271  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.96049 > 2) by scale factor 0.504988\nI1208 07:18:23.586187  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81713 > 2) by scale factor 0.415185\nI1208 07:18:27.738531  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.64285 > 2) by scale factor 0.54902\nI1208 07:18:31.889778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.80661 > 2) by scale factor 0.525402\nI1208 07:18:36.041744  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71971 > 2) by scale factor 0.423755\nI1208 07:18:40.193545  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2324 > 2) by scale factor 0.472545\nI1208 07:18:44.346240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19669 > 2) by scale factor 0.625647\nI1208 07:18:48.498725  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.60132 > 2) by scale factor 0.434658\nI1208 07:18:52.650717  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.50895 > 2) by scale factor 0.797146\nI1208 07:18:56.803457  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.47853 > 2) by scale factor 0.574955\nI1208 07:19:00.956008  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.27352 > 2) by scale factor 0.610963\nI1208 07:19:05.108883  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.07407 > 2) by scale factor 0.964289\nI1208 07:19:09.262265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38807 > 2) by scale factor 0.590306\nI1208 07:19:13.414558  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91748 > 2) by scale factor 0.510533\nI1208 07:19:17.567392  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.682 > 2) by scale factor 0.543182\nI1208 07:19:21.720489  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.68073 > 2) by scale factor 0.746067\nI1208 07:19:25.872931  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.44404 > 2) by scale factor 0.818318\nI1208 07:19:30.024778  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.67115 > 2) by scale factor 0.748741\nI1208 07:19:34.176678  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.84706 > 2) by scale factor 0.702479\nI1208 07:19:38.328508  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72822 > 2) by scale factor 0.536448\nI1208 07:19:42.481817  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02208 > 2) by scale factor 0.661797\nI1208 07:19:46.634394  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.7847 > 2) by scale factor 0.528444\nI1208 07:19:50.785912  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67933 > 2) by scale factor 0.543578\nI1208 07:19:54.938050  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.47442 > 2) by scale factor 0.446985\nI1208 07:19:59.091775  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.29321 > 2) by scale factor 0.465852\nI1208 07:20:03.244607  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.91992 > 2) by scale factor 0.510215\nI1208 07:20:07.396296  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.42839 > 2) by scale factor 0.583364\nI1208 07:20:11.548521  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.98716 > 2) by scale factor 0.50161\nI1208 07:20:15.701824  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.76347 > 2) by scale factor 0.531424\nI1208 07:20:19.853993  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.1834 > 2) by scale factor 0.62826\nI1208 07:20:24.005322  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.67842 > 2) by scale factor 0.543712\nI1208 07:20:28.158509  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.12034 > 2) by scale factor 0.640955\nI1208 07:20:32.311358  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71967 > 2) by scale factor 0.537682\nI1208 07:20:36.464777  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23151 > 2) by scale factor 0.618905\nI1208 07:20:40.616999  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3534 > 2) by scale factor 0.59641\nI1208 07:20:44.769603  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.2924 > 2) by scale factor 0.465939\nI1208 07:20:48.923372  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97024 > 2) by scale factor 0.503748\nI1208 07:20:53.075760  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37694 > 2) by scale factor 0.456941\nI1208 07:20:57.228193  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73993 > 2) by scale factor 0.729945\nI1208 07:21:01.380786  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.50741 > 2) by scale factor 0.443714\nI1208 07:21:05.533186  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.33925 > 2) by scale factor 0.598938\nI1208 07:21:09.685595  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.95836 > 2) by scale factor 0.67605\nI1208 07:21:13.837908  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5544 > 2) by scale factor 0.562682\nI1208 07:21:17.990263  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.39423 > 2) by scale factor 0.589235\nI1208 07:21:22.144017  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.4967 > 2) by scale factor 0.801058\nI1208 07:21:26.297427  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.85972 > 2) by scale factor 0.699368\nI1208 07:21:30.449627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52425 > 2) by scale factor 0.567497\nI1208 07:21:34.601850  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.28614 > 2) by scale factor 0.466621\nI1208 07:21:38.755123  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.59872 > 2) by scale factor 0.434903\nI1208 07:21:42.907150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25628 > 2) by scale factor 0.614198\nI1208 07:21:47.059608  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.03248 > 2) by scale factor 0.659525\nI1208 07:21:51.212492  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.96276 > 2) by scale factor 0.675046\nI1208 07:21:55.366618  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.83582 > 2) by scale factor 0.705263\nI1208 07:21:59.519404  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17695 > 2) by scale factor 0.629535\nI1208 07:22:03.672055  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74917 > 2) by scale factor 0.421126\nI1208 07:22:07.824745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.13973 > 2) by scale factor 0.636997\nI1208 07:22:07.834792  1922 solver.cpp:337] Iteration 16100, Testing net (#0)\nI1208 07:24:41.142555  1922 solver.cpp:404]     Test net output #0: accuracy = 0.182294\nI1208 07:24:41.143124  1922 solver.cpp:404]     Test net output #1: loss = 19.0908 (* 1 = 19.0908 loss)\nI1208 07:24:45.064673  1922 solver.cpp:228] Iteration 16100, loss = 22.0466\nI1208 07:24:45.064712  1922 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1208 07:24:45.064733  1922 solver.cpp:244]     Train net output #1: loss = 22.0466 (* 1 = 22.0466 loss)\nI1208 07:24:45.283154  1922 sgd_solver.cpp:166] Iteration 16100, lr = 2.415\nI1208 07:24:45.292232  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.73894 > 2) by scale factor 0.730209\nI1208 07:24:49.443096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.33764 > 2) by scale factor 0.855563\nI1208 07:24:53.594766  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.41353 > 2) by scale factor 0.585904\nI1208 07:24:57.745218  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87114 > 2) by scale factor 0.696589\nI1208 07:25:01.896399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.58962 > 2) by scale factor 0.772315\nI1208 07:25:06.047969  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.9188 > 2) by scale factor 0.510361\nI1208 07:25:10.200613  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.80241 > 2) by scale factor 0.713671\nI1208 07:25:14.352952  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.47693 > 2) by scale factor 0.807451\nI1208 07:25:18.506026  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.13915 > 2) by scale factor 0.93495\nI1208 07:25:22.657704  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.61913 > 2) by scale factor 0.763612\nI1208 07:25:26.809063  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.27256 > 2) by scale factor 0.880064\nI1208 07:25:30.960355  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.74466 > 2) by scale factor 0.421526\nI1208 07:25:35.111174  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.52273 > 2) by scale factor 0.792791\nI1208 07:25:39.262406  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.36101 > 2) by scale factor 0.59506\nI1208 07:25:43.414654  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86278 > 2) by scale factor 0.698622\nI1208 07:25:47.565915  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.9924 > 2) by scale factor 0.400609\nI1208 07:25:51.716686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.93985 > 2) by scale factor 0.680307\nI1208 07:25:55.868481  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.93593 > 2) by scale factor 0.508139\nI1208 07:26:00.019661  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5481 > 2) by scale factor 0.563682\nI1208 07:26:04.170302  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3389 > 2) by scale factor 0.599\nI1208 07:26:08.321207  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86768 > 2) by scale factor 0.697428\nI1208 07:26:12.472244  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.40122 > 2) by scale factor 0.45442\nI1208 07:26:16.622745  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.45188 > 2) by scale factor 0.8157\nI1208 07:26:20.774056  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.56644 > 2) by scale factor 0.560783\nI1208 07:26:24.925624  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.62483 > 2) by scale factor 0.761955\nI1208 07:26:29.076087  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11606 > 2) by scale factor 0.485902\nI1208 07:26:33.227172  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.81185 > 2) by scale factor 0.415641\nI1208 07:26:37.377555  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.5913 > 2) by scale factor 0.556901\nI1208 07:26:41.529742  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.59712 > 2) by scale factor 0.770084\nI1208 07:26:45.681327  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.88403 > 2) by scale factor 0.51493\nI1208 07:26:49.832240  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.12276 > 2) by scale factor 0.485112\nI1208 07:26:53.983150  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.87719 > 2) by scale factor 0.695122\nI1208 07:26:58.134238  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.3888 > 2) by scale factor 0.590179\nI1208 07:27:02.284927  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.00245 > 2) by scale factor 0.998776\nI1208 07:27:06.435627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.43465 > 2) by scale factor 0.582301\nI1208 07:27:10.586061  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69338 > 2) by scale factor 0.54151\nI1208 07:27:14.736399  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.42058 > 2) by scale factor 0.452429\nI1208 07:27:18.887773  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.80487 > 2) by scale factor 0.416245\nI1208 07:27:23.039326  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.79245 > 2) by scale factor 0.527363\nI1208 07:27:27.190950  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.90192 > 2) by scale factor 0.689199\nI1208 07:27:31.343101  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.58937 > 2) by scale factor 0.43579\nI1208 07:27:35.493139  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.00763 > 2) by scale factor 0.499048\nI1208 07:27:39.645401  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28247 > 2) by scale factor 0.876245\nI1208 07:27:43.797365  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.28639 > 2) by scale factor 0.874742\nI1208 07:27:47.948449  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.97097 > 2) by scale factor 0.503655\nI1208 07:27:52.099932  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40039 > 2) by scale factor 0.833198\nI1208 07:27:56.250633  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.39309 > 2) by scale factor 0.45526\nI1208 07:28:00.400405  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.44873 > 2) by scale factor 0.579924\nI1208 07:28:04.552268  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.92482 > 2) by scale factor 0.683802\nI1208 07:28:08.702978  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.19286 > 2) by scale factor 0.626399\nI1208 07:28:12.853220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.40405 > 2) by scale factor 0.831929\nI1208 07:28:17.003803  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.77242 > 2) by scale factor 0.419075\nI1208 07:28:21.155212  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.19417 > 2) by scale factor 0.476852\nI1208 07:28:25.307703  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38893 > 2) by scale factor 0.837195\nI1208 07:28:29.458909  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.71433 > 2) by scale factor 0.424239\nI1208 07:28:33.609349  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.46733 > 2) by scale factor 0.447695\nI1208 07:28:37.760627  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60236 > 2) by scale factor 0.555191\nI1208 07:28:41.911265  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.71907 > 2) by scale factor 0.537769\nI1208 07:28:46.062320  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.13415 > 2) by scale factor 0.483776\nI1208 07:28:50.213001  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.38478 > 2) by scale factor 0.838651\nI1208 07:28:54.363907  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.99773 > 2) by scale factor 0.667172\nI1208 07:28:58.515148  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72087 > 2) by scale factor 0.735058\nI1208 07:29:02.666013  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.00519 > 2) by scale factor 0.665515\nI1208 07:29:06.817667  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.52909 > 2) by scale factor 0.566719\nI1208 07:29:15.118096  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.97236 > 2) by scale factor 0.672865\nI1208 07:29:19.269129  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46256 > 2) by scale factor 0.577608\nI1208 07:29:23.419905  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.78095 > 2) by scale factor 0.71918\nI1208 07:29:27.570277  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.02082 > 2) by scale factor 0.662072\nI1208 07:29:31.721940  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.15044 > 2) by scale factor 0.481877\nI1208 07:29:35.873374  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.60523 > 2) by scale factor 0.55475\nI1208 07:29:40.024356  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.49644 > 2) by scale factor 0.801141\nI1208 07:29:44.174938  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.26208 > 2) by scale factor 0.613106\nI1208 07:29:48.326611  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.37097 > 2) by scale factor 0.457565\nI1208 07:29:52.477476  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.58068 > 2) by scale factor 0.558554\nI1208 07:29:56.629310  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.38136 > 2) by scale factor 0.591479\nI1208 07:30:00.780622  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.69512 > 2) by scale factor 0.541255\nI1208 07:30:04.931530  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.86078 > 2) by scale factor 0.699109\nI1208 07:30:09.083120  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.21942 > 2) by scale factor 0.901137\nI1208 07:30:13.233294  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.25561 > 2) by scale factor 0.886678\nI1208 07:30:17.385167  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.27136 > 2) by scale factor 0.468235\nI1208 07:30:21.535686  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.46289 > 2) by scale factor 0.577552\nI1208 07:30:25.686615  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72799 > 2) by scale factor 0.733141\nI1208 07:30:29.837220  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.18878 > 2) by scale factor 0.385447\nI1208 07:30:33.988178  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.11704 > 2) by scale factor 0.485786\nI1208 07:30:38.138761  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.61565 > 2) by scale factor 0.433309\nI1208 07:30:42.290518  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.76193 > 2) by scale factor 0.419997\nI1208 07:30:46.440889  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.69478 > 2) by scale factor 0.742174\nI1208 07:30:50.592159  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.72222 > 2) by scale factor 0.734694\nI1208 07:30:54.743968  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17854 > 2) by scale factor 0.62922\nI1208 07:30:58.894706  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.54847 > 2) by scale factor 0.563623\nI1208 07:31:03.045348  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.74389 > 2) by scale factor 0.534203\nI1208 07:31:07.196419  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.17423 > 2) by scale factor 0.630073\nI1208 07:31:11.348225  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 4.18994 > 2) by scale factor 0.477334\nI1208 07:31:15.499387  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.23556 > 2) by scale factor 0.618132\nI1208 07:31:19.649818  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.25911 > 2) by scale factor 0.613665\nI1208 07:31:23.801532  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.71189 > 2) by scale factor 0.737494\nI1208 07:31:27.953385  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.72654 > 2) by scale factor 0.536691\nI1208 07:31:32.103448  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 3.37992 > 2) by scale factor 0.591731\nI1208 07:31:36.254593  1922 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 2.54778 > 2) by scale factor 0.784996\nI1208 07:31:36.264657  1922 solver.cpp:337] Iteration 16200, Testing net (#0)\n"
  },
  {
    "path": "Results/lrRange3SS520kClip5Fig12a",
    "content": "I1206 09:09:45.052772 22755 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1206 09:09:45.055718 22755 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1206 09:09:45.056913 22755 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1206 09:09:45.058100 22755 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1206 09:09:45.059291 22755 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1206 09:09:45.060497 22755 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1206 09:09:45.061703 22755 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1206 09:09:45.063097 22755 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1206 09:09:45.064312 22755 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1206 09:09:45.546123 22755 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/Fig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nclip_gradients: 5\nmax_lr: 3\nI1206 09:09:45.548815 22755 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1206 09:09:45.595631 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:45.595705 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:45.596678 22755 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1206 09:09:45.598340 22755 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3\nI1206 09:09:45.600075 22755 layer_factory.hpp:77] Creating layer dataLayer\nI1206 09:09:45.602274 22755 net.cpp:100] Creating Layer dataLayer\nI1206 09:09:45.602360 22755 net.cpp:408] dataLayer -> data_top\nI1206 09:09:45.602586 22755 net.cpp:408] dataLayer -> label\nI1206 09:09:45.602720 22755 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1206 09:09:45.612563 22760 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1206 09:09:45.667789 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:45.674774 22755 net.cpp:150] Setting up dataLayer\nI1206 09:09:45.674863 22755 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI1206 09:09:45.674885 22755 net.cpp:157] Top shape: 100 (100)\nI1206 09:09:45.674896 22755 net.cpp:165] Memory required for data: 1229200\nI1206 09:09:45.674922 22755 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1206 09:09:45.674948 22755 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1206 09:09:45.674963 22755 net.cpp:434] label_dataLayer_1_split <- label\nI1206 09:09:45.675001 22755 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1206 09:09:45.675030 22755 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1206 09:09:45.675151 22755 net.cpp:150] Setting up label_dataLayer_1_split\nI1206 09:09:45.675177 22755 net.cpp:157] Top shape: 100 (100)\nI1206 09:09:45.675192 22755 net.cpp:157] Top shape: 100 (100)\nI1206 09:09:45.675202 22755 net.cpp:165] Memory required for data: 1230000\nI1206 09:09:45.675213 22755 layer_factory.hpp:77] Creating layer pre_conv\nI1206 09:09:45.675308 22755 net.cpp:100] Creating Layer pre_conv\nI1206 09:09:45.675324 22755 net.cpp:434] pre_conv <- data_top\nI1206 09:09:45.675343 22755 net.cpp:408] pre_conv -> pre_conv_top\nI1206 09:09:45.677299 22755 net.cpp:150] Setting up pre_conv\nI1206 09:09:45.677322 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.677336 22755 net.cpp:165] Memory required for data: 7783600\nI1206 09:09:45.677426 22755 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1206 09:09:45.677448 22755 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1206 09:09:45.677459 22755 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1206 09:09:45.677481 22755 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1206 09:09:45.677501 22755 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1206 09:09:45.677752 22755 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1206 09:09:45.677772 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.677784 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.677793 22755 net.cpp:165] Memory required for data: 20890800\nI1206 09:09:45.677812 22755 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1206 09:09:45.677856 22761 blocking_queue.cpp:50] Waiting for data\nI1206 09:09:45.677932 22755 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1206 09:09:45.677949 22755 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1206 09:09:45.677964 22755 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1206 09:09:45.678515 22755 net.cpp:150] Setting up L1_b1_brc1_bn\nI1206 09:09:45.678536 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.678546 22755 net.cpp:165] Memory required for data: 27444400\nI1206 09:09:45.678576 22755 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1206 09:09:45.678654 22755 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1206 09:09:45.678669 22755 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1206 09:09:45.678684 22755 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1206 09:09:45.678704 22755 net.cpp:150] Setting up L1_b1_brc1_relu\nI1206 09:09:45.678719 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.678728 22755 net.cpp:165] Memory required for data: 33998000\nI1206 09:09:45.678742 22755 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1206 09:09:45.678772 22755 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1206 09:09:45.678786 22755 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1206 09:09:45.678818 22755 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1206 09:09:45.679141 22755 net.cpp:150] Setting up L1_b1_brc1_conv\nI1206 09:09:45.679162 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.679170 22755 net.cpp:165] Memory required for data: 40551600\nI1206 09:09:45.679188 22755 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1206 09:09:45.679206 22755 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1206 09:09:45.679217 22755 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1206 09:09:45.679234 22755 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1206 09:09:45.679508 22755 net.cpp:150] Setting up L1_b1_brc2_bn\nI1206 09:09:45.679527 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.679536 22755 net.cpp:165] Memory required for data: 47105200\nI1206 09:09:45.679564 22755 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1206 09:09:45.679592 22755 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1206 09:09:45.679606 22755 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1206 09:09:45.679628 22755 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1206 09:09:45.679647 22755 net.cpp:150] Setting up L1_b1_brc2_relu\nI1206 09:09:45.679673 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.679683 22755 net.cpp:165] Memory required for data: 53658800\nI1206 09:09:45.679694 22755 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1206 09:09:45.679718 22755 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1206 09:09:45.679730 22755 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1206 09:09:45.679752 22755 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1206 09:09:45.680156 22755 net.cpp:150] Setting up L1_b1_brc2_conv\nI1206 09:09:45.680177 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.680187 22755 net.cpp:165] Memory required for data: 60212400\nI1206 09:09:45.680205 22755 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1206 09:09:45.680223 22755 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1206 09:09:45.680234 22755 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1206 09:09:45.680256 22755 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1206 09:09:45.680539 22755 net.cpp:150] Setting up L1_b1_brc3_bn\nI1206 09:09:45.680558 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.680567 22755 net.cpp:165] Memory required for data: 66766000\nI1206 09:09:45.680589 22755 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1206 09:09:45.680613 22755 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1206 09:09:45.680625 22755 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1206 09:09:45.680640 22755 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1206 09:09:45.680658 22755 net.cpp:150] Setting up L1_b1_brc3_relu\nI1206 09:09:45.680672 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.680682 22755 net.cpp:165] Memory required for data: 73319600\nI1206 09:09:45.680691 22755 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1206 09:09:45.680714 22755 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1206 09:09:45.680725 22755 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1206 09:09:45.680748 22755 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1206 09:09:45.681102 22755 net.cpp:150] Setting up L1_b1_brc3_conv\nI1206 09:09:45.681120 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.681129 22755 net.cpp:165] Memory required for data: 99534000\nI1206 09:09:45.681154 22755 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1206 09:09:45.681185 22755 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1206 09:09:45.681197 22755 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1206 09:09:45.681221 22755 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1206 09:09:45.681560 22755 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1206 09:09:45.681578 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.681587 22755 net.cpp:165] Memory required for data: 125748400\nI1206 09:09:45.681604 22755 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1206 09:09:45.681681 22755 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1206 09:09:45.681696 22755 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1206 09:09:45.681710 22755 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1206 09:09:45.681732 22755 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1206 09:09:45.681843 22755 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1206 09:09:45.681862 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.681872 22755 net.cpp:165] Memory required for data: 151962800\nI1206 09:09:45.681885 22755 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:09:45.681900 22755 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:09:45.681912 22755 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1206 09:09:45.681933 22755 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:09:45.681954 22755 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:09:45.682055 22755 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:09:45.682082 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.682098 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.682107 22755 net.cpp:165] Memory required for data: 204391600\nI1206 09:09:45.682118 22755 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1206 09:09:45.682144 22755 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1206 09:09:45.682157 22755 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:09:45.682175 22755 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1206 09:09:45.682461 22755 net.cpp:150] Setting up L1_b2_brc1_bn\nI1206 09:09:45.682481 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.682490 22755 net.cpp:165] Memory required for data: 230606000\nI1206 09:09:45.682512 22755 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1206 09:09:45.682529 22755 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1206 09:09:45.682541 22755 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1206 09:09:45.682562 22755 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1206 09:09:45.682584 22755 net.cpp:150] Setting up L1_b2_brc1_relu\nI1206 09:09:45.682597 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.682608 22755 net.cpp:165] Memory required for data: 256820400\nI1206 09:09:45.682621 22755 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1206 09:09:45.682648 22755 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1206 09:09:45.682662 22755 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1206 09:09:45.682679 22755 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1206 09:09:45.683029 22755 net.cpp:150] Setting up L1_b2_brc1_conv\nI1206 09:09:45.683053 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.683063 22755 net.cpp:165] Memory required for data: 263374000\nI1206 09:09:45.683082 22755 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1206 09:09:45.683099 22755 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1206 09:09:45.683110 22755 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1206 09:09:45.683132 22755 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1206 09:09:45.683408 22755 net.cpp:150] Setting up L1_b2_brc2_bn\nI1206 09:09:45.683426 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.683434 22755 net.cpp:165] Memory required for data: 269927600\nI1206 09:09:45.683457 22755 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1206 09:09:45.683476 22755 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1206 09:09:45.683487 22755 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1206 09:09:45.683503 22755 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1206 09:09:45.683522 22755 net.cpp:150] Setting up L1_b2_brc2_relu\nI1206 09:09:45.683537 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.683547 22755 net.cpp:165] Memory required for data: 276481200\nI1206 09:09:45.683559 22755 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1206 09:09:45.683588 22755 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1206 09:09:45.683600 22755 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1206 09:09:45.683624 22755 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1206 09:09:45.683989 22755 net.cpp:150] Setting up L1_b2_brc2_conv\nI1206 09:09:45.684008 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.684017 22755 net.cpp:165] Memory required for data: 283034800\nI1206 09:09:45.684038 22755 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1206 09:09:45.684067 22755 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1206 09:09:45.684079 22755 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1206 09:09:45.684103 22755 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1206 09:09:45.684394 22755 net.cpp:150] Setting up L1_b2_brc3_bn\nI1206 09:09:45.684413 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.684423 22755 net.cpp:165] Memory required for data: 289588400\nI1206 09:09:45.684444 22755 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1206 09:09:45.684471 22755 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1206 09:09:45.684484 22755 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1206 09:09:45.684499 22755 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1206 09:09:45.684518 22755 net.cpp:150] Setting up L1_b2_brc3_relu\nI1206 09:09:45.684533 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.684542 22755 net.cpp:165] Memory required for data: 296142000\nI1206 09:09:45.684552 22755 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1206 09:09:45.684581 22755 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1206 09:09:45.684593 22755 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1206 09:09:45.684617 22755 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1206 09:09:45.684948 22755 net.cpp:150] Setting up L1_b2_brc3_conv\nI1206 09:09:45.684968 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.684978 22755 net.cpp:165] Memory required for data: 322356400\nI1206 09:09:45.685019 22755 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1206 09:09:45.685037 22755 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1206 09:09:45.685056 22755 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1206 09:09:45.685070 22755 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:09:45.685086 22755 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1206 09:09:45.685147 22755 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1206 09:09:45.685168 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.685178 22755 net.cpp:165] Memory required for data: 348570800\nI1206 09:09:45.685189 22755 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:09:45.685204 22755 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:09:45.685214 22755 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1206 09:09:45.685238 22755 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:09:45.685258 22755 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:09:45.685336 22755 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:09:45.685361 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.685374 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.685384 22755 net.cpp:165] Memory required for data: 400999600\nI1206 09:09:45.685396 22755 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1206 09:09:45.685415 22755 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1206 09:09:45.685427 22755 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:09:45.685449 22755 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1206 09:09:45.685726 22755 net.cpp:150] Setting up L1_b3_brc1_bn\nI1206 09:09:45.685745 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.685755 22755 net.cpp:165] Memory required for data: 427214000\nI1206 09:09:45.685781 22755 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1206 09:09:45.685811 22755 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1206 09:09:45.685827 22755 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1206 09:09:45.685842 22755 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1206 09:09:45.685861 22755 net.cpp:150] Setting up L1_b3_brc1_relu\nI1206 09:09:45.685876 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.685885 22755 net.cpp:165] Memory required for data: 453428400\nI1206 09:09:45.685899 22755 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1206 09:09:45.685921 22755 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1206 09:09:45.685935 22755 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1206 09:09:45.685958 22755 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1206 09:09:45.686331 22755 net.cpp:150] Setting up L1_b3_brc1_conv\nI1206 09:09:45.686421 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.686434 22755 net.cpp:165] Memory required for data: 459982000\nI1206 09:09:45.686450 22755 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1206 09:09:45.686467 22755 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1206 09:09:45.686480 22755 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1206 09:09:45.686501 22755 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1206 09:09:45.686794 22755 net.cpp:150] Setting up L1_b3_brc2_bn\nI1206 09:09:45.686820 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.686828 22755 net.cpp:165] Memory required for data: 466535600\nI1206 09:09:45.686853 22755 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1206 09:09:45.686877 22755 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1206 09:09:45.686890 22755 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1206 09:09:45.686906 22755 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1206 09:09:45.686925 22755 net.cpp:150] Setting up L1_b3_brc2_relu\nI1206 09:09:45.686940 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.686950 22755 net.cpp:165] Memory required for data: 473089200\nI1206 09:09:45.686960 22755 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1206 09:09:45.686993 22755 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1206 09:09:45.687007 22755 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1206 09:09:45.687026 22755 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1206 09:09:45.687378 22755 net.cpp:150] Setting up L1_b3_brc2_conv\nI1206 09:09:45.687397 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.687407 22755 net.cpp:165] Memory required for data: 479642800\nI1206 09:09:45.687427 22755 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1206 09:09:45.687445 22755 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1206 09:09:45.687456 22755 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1206 09:09:45.687477 22755 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1206 09:09:45.687763 22755 net.cpp:150] Setting up L1_b3_brc3_bn\nI1206 09:09:45.687783 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.687793 22755 net.cpp:165] Memory required for data: 486196400\nI1206 09:09:45.687824 22755 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1206 09:09:45.687849 22755 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1206 09:09:45.687862 22755 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1206 09:09:45.687878 22755 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1206 09:09:45.687898 22755 net.cpp:150] Setting up L1_b3_brc3_relu\nI1206 09:09:45.687913 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.687923 22755 net.cpp:165] Memory required for data: 492750000\nI1206 09:09:45.687933 22755 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1206 09:09:45.687957 22755 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1206 09:09:45.687968 22755 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1206 09:09:45.687993 22755 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1206 09:09:45.688328 22755 net.cpp:150] Setting up L1_b3_brc3_conv\nI1206 09:09:45.688346 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.688355 22755 net.cpp:165] Memory required for data: 518964400\nI1206 09:09:45.688376 22755 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1206 09:09:45.688395 22755 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1206 09:09:45.688407 22755 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1206 09:09:45.688423 22755 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:09:45.688439 22755 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1206 09:09:45.688506 22755 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1206 09:09:45.688531 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.688541 22755 net.cpp:165] Memory required for data: 545178800\nI1206 09:09:45.688554 22755 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:09:45.688580 22755 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:09:45.688593 22755 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1206 09:09:45.688606 22755 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:09:45.688621 22755 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:09:45.688671 22755 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:09:45.688681 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.688688 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.688693 22755 net.cpp:165] Memory required for data: 597607600\nI1206 09:09:45.688697 22755 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1206 09:09:45.688711 22755 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1206 09:09:45.688717 22755 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:09:45.688730 22755 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1206 09:09:45.688973 22755 net.cpp:150] Setting up L1_b4_brc1_bn\nI1206 09:09:45.688990 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.688999 22755 net.cpp:165] Memory required for data: 623822000\nI1206 09:09:45.689020 22755 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1206 09:09:45.689038 22755 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1206 09:09:45.689049 22755 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1206 09:09:45.689064 22755 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1206 09:09:45.689083 22755 net.cpp:150] Setting up L1_b4_brc1_relu\nI1206 09:09:45.689097 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.689107 22755 net.cpp:165] Memory required for data: 650036400\nI1206 09:09:45.689119 22755 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1206 09:09:45.689147 22755 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1206 09:09:45.689162 22755 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1206 09:09:45.689185 22755 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1206 09:09:45.689527 22755 net.cpp:150] Setting up L1_b4_brc1_conv\nI1206 09:09:45.689545 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.689555 22755 net.cpp:165] Memory required for data: 656590000\nI1206 09:09:45.689574 22755 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1206 09:09:45.689597 22755 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1206 09:09:45.689609 22755 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1206 09:09:45.689625 22755 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1206 09:09:45.689923 22755 net.cpp:150] Setting up L1_b4_brc2_bn\nI1206 09:09:45.689941 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.689951 22755 net.cpp:165] Memory required for data: 663143600\nI1206 09:09:45.689975 22755 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1206 09:09:45.689991 22755 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1206 09:09:45.690003 22755 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1206 09:09:45.690019 22755 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1206 09:09:45.690039 22755 net.cpp:150] Setting up L1_b4_brc2_relu\nI1206 09:09:45.690054 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.690064 22755 net.cpp:165] Memory required for data: 669697200\nI1206 09:09:45.690076 22755 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1206 09:09:45.690112 22755 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1206 09:09:45.690125 22755 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1206 09:09:45.690150 22755 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1206 09:09:45.690529 22755 net.cpp:150] Setting up L1_b4_brc2_conv\nI1206 09:09:45.690548 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.690558 22755 net.cpp:165] Memory required for data: 676250800\nI1206 09:09:45.690575 22755 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1206 09:09:45.690611 22755 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1206 09:09:45.690624 22755 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1206 09:09:45.690640 22755 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1206 09:09:45.690946 22755 net.cpp:150] Setting up L1_b4_brc3_bn\nI1206 09:09:45.690971 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.690982 22755 net.cpp:165] Memory required for data: 682804400\nI1206 09:09:45.691004 22755 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1206 09:09:45.691021 22755 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1206 09:09:45.691032 22755 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1206 09:09:45.691047 22755 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1206 09:09:45.691066 22755 net.cpp:150] Setting up L1_b4_brc3_relu\nI1206 09:09:45.691082 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.691092 22755 net.cpp:165] Memory required for data: 689358000\nI1206 09:09:45.691103 22755 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1206 09:09:45.691131 22755 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1206 09:09:45.691144 22755 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1206 09:09:45.691162 22755 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1206 09:09:45.691504 22755 net.cpp:150] Setting up L1_b4_brc3_conv\nI1206 09:09:45.691524 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.691532 22755 net.cpp:165] Memory required for data: 715572400\nI1206 09:09:45.691553 22755 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1206 09:09:45.691572 22755 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1206 09:09:45.691584 22755 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1206 09:09:45.691597 22755 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:09:45.691620 22755 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1206 09:09:45.691673 22755 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1206 09:09:45.691699 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.691709 22755 net.cpp:165] Memory required for data: 741786800\nI1206 09:09:45.691720 22755 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:09:45.691735 22755 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:09:45.691746 22755 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1206 09:09:45.691761 22755 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:09:45.691787 22755 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:09:45.691877 22755 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:09:45.691900 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.691912 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.691922 22755 net.cpp:165] Memory required for data: 794215600\nI1206 09:09:45.691934 22755 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1206 09:09:45.691956 22755 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1206 09:09:45.691969 22755 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:09:45.691987 22755 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1206 09:09:45.692276 22755 net.cpp:150] Setting up L1_b5_brc1_bn\nI1206 09:09:45.692294 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.692303 22755 net.cpp:165] Memory required for data: 820430000\nI1206 09:09:45.692343 22755 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1206 09:09:45.692366 22755 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1206 09:09:45.692379 22755 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1206 09:09:45.692395 22755 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1206 09:09:45.692414 22755 net.cpp:150] Setting up L1_b5_brc1_relu\nI1206 09:09:45.692442 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.692453 22755 net.cpp:165] Memory required for data: 846644400\nI1206 09:09:45.692463 22755 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1206 09:09:45.692494 22755 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1206 09:09:45.692508 22755 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1206 09:09:45.692526 22755 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1206 09:09:45.692868 22755 net.cpp:150] Setting up L1_b5_brc1_conv\nI1206 09:09:45.692888 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.692898 22755 net.cpp:165] Memory required for data: 853198000\nI1206 09:09:45.692915 22755 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1206 09:09:45.692932 22755 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1206 09:09:45.692945 22755 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1206 09:09:45.692965 22755 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1206 09:09:45.693253 22755 net.cpp:150] Setting up L1_b5_brc2_bn\nI1206 09:09:45.693279 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.693289 22755 net.cpp:165] Memory required for data: 859751600\nI1206 09:09:45.693310 22755 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1206 09:09:45.693330 22755 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1206 09:09:45.693341 22755 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1206 09:09:45.693356 22755 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1206 09:09:45.693374 22755 net.cpp:150] Setting up L1_b5_brc2_relu\nI1206 09:09:45.693388 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.693398 22755 net.cpp:165] Memory required for data: 866305200\nI1206 09:09:45.693413 22755 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1206 09:09:45.693434 22755 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1206 09:09:45.693447 22755 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1206 09:09:45.693470 22755 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1206 09:09:45.693842 22755 net.cpp:150] Setting up L1_b5_brc2_conv\nI1206 09:09:45.693862 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.693871 22755 net.cpp:165] Memory required for data: 872858800\nI1206 09:09:45.693892 22755 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1206 09:09:45.693909 22755 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1206 09:09:45.693922 22755 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1206 09:09:45.693945 22755 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1206 09:09:45.694252 22755 net.cpp:150] Setting up L1_b5_brc3_bn\nI1206 09:09:45.694272 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.694280 22755 net.cpp:165] Memory required for data: 879412400\nI1206 09:09:45.694303 22755 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1206 09:09:45.694329 22755 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1206 09:09:45.694341 22755 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1206 09:09:45.694356 22755 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1206 09:09:45.694375 22755 net.cpp:150] Setting up L1_b5_brc3_relu\nI1206 09:09:45.694391 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.694399 22755 net.cpp:165] Memory required for data: 885966000\nI1206 09:09:45.694412 22755 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1206 09:09:45.694433 22755 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1206 09:09:45.694447 22755 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1206 09:09:45.694470 22755 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1206 09:09:45.694818 22755 net.cpp:150] Setting up L1_b5_brc3_conv\nI1206 09:09:45.694839 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.694847 22755 net.cpp:165] Memory required for data: 912180400\nI1206 09:09:45.694869 22755 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1206 09:09:45.694886 22755 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1206 09:09:45.694898 22755 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1206 09:09:45.694923 22755 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:09:45.694941 22755 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1206 09:09:45.695003 22755 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1206 09:09:45.695024 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.695034 22755 net.cpp:165] Memory required for data: 938394800\nI1206 09:09:45.695044 22755 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:09:45.695066 22755 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:09:45.695080 22755 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1206 09:09:45.695096 22755 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:09:45.695114 22755 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:09:45.695204 22755 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:09:45.695222 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.695236 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.695245 22755 net.cpp:165] Memory required for data: 990823600\nI1206 09:09:45.695258 22755 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1206 09:09:45.695276 22755 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1206 09:09:45.695287 22755 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:09:45.695310 22755 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1206 09:09:45.695597 22755 net.cpp:150] Setting up L1_b6_brc1_bn\nI1206 09:09:45.695621 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.695632 22755 net.cpp:165] Memory required for data: 1017038000\nI1206 09:09:45.695654 22755 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1206 09:09:45.695670 22755 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1206 09:09:45.695683 22755 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1206 09:09:45.695696 22755 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1206 09:09:45.695716 22755 net.cpp:150] Setting up L1_b6_brc1_relu\nI1206 09:09:45.695730 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.695740 22755 net.cpp:165] Memory required for data: 1043252400\nI1206 09:09:45.695752 22755 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1206 09:09:45.695775 22755 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1206 09:09:45.695788 22755 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1206 09:09:45.695818 22755 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1206 09:09:45.696167 22755 net.cpp:150] Setting up L1_b6_brc1_conv\nI1206 09:09:45.696187 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.696197 22755 net.cpp:165] Memory required for data: 1049806000\nI1206 09:09:45.696218 22755 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1206 09:09:45.696234 22755 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1206 09:09:45.696245 22755 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1206 09:09:45.696267 22755 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1206 09:09:45.696552 22755 net.cpp:150] Setting up L1_b6_brc2_bn\nI1206 09:09:45.696575 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.696586 22755 net.cpp:165] Memory required for data: 1056359600\nI1206 09:09:45.696609 22755 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1206 09:09:45.696635 22755 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1206 09:09:45.696647 22755 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1206 09:09:45.696665 22755 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1206 09:09:45.696683 22755 net.cpp:150] Setting up L1_b6_brc2_relu\nI1206 09:09:45.696698 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.696707 22755 net.cpp:165] Memory required for data: 1062913200\nI1206 09:09:45.696718 22755 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1206 09:09:45.696756 22755 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1206 09:09:45.696770 22755 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1206 09:09:45.696794 22755 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1206 09:09:45.697168 22755 net.cpp:150] Setting up L1_b6_brc2_conv\nI1206 09:09:45.697188 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.697197 22755 net.cpp:165] Memory required for data: 1069466800\nI1206 09:09:45.697217 22755 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1206 09:09:45.697242 22755 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1206 09:09:45.697254 22755 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1206 09:09:45.697271 22755 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1206 09:09:45.697557 22755 net.cpp:150] Setting up L1_b6_brc3_bn\nI1206 09:09:45.697576 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.697584 22755 net.cpp:165] Memory required for data: 1076020400\nI1206 09:09:45.697607 22755 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1206 09:09:45.697625 22755 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1206 09:09:45.697638 22755 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1206 09:09:45.697651 22755 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1206 09:09:45.697670 22755 net.cpp:150] Setting up L1_b6_brc3_relu\nI1206 09:09:45.697685 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.697695 22755 net.cpp:165] Memory required for data: 1082574000\nI1206 09:09:45.697708 22755 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1206 09:09:45.697736 22755 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1206 09:09:45.697749 22755 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1206 09:09:45.697772 22755 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1206 09:09:45.698125 22755 net.cpp:150] Setting up L1_b6_brc3_conv\nI1206 09:09:45.698145 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.698154 22755 net.cpp:165] Memory required for data: 1108788400\nI1206 09:09:45.698173 22755 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1206 09:09:45.698191 22755 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1206 09:09:45.698204 22755 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1206 09:09:45.698217 22755 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:09:45.698240 22755 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1206 09:09:45.698302 22755 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1206 09:09:45.698319 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.698328 22755 net.cpp:165] Memory required for data: 1135002800\nI1206 09:09:45.698340 22755 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:09:45.698357 22755 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:09:45.698369 22755 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1206 09:09:45.698390 22755 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:09:45.698410 22755 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:09:45.698490 22755 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:09:45.698518 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.698532 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.698542 22755 net.cpp:165] Memory required for data: 1187431600\nI1206 09:09:45.698554 22755 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1206 09:09:45.698571 22755 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1206 09:09:45.698583 22755 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:09:45.698607 22755 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1206 09:09:45.698915 22755 net.cpp:150] Setting up L2_b1_brc1_bn\nI1206 09:09:45.698945 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.698954 22755 net.cpp:165] Memory required for data: 1213646000\nI1206 09:09:45.698976 22755 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1206 09:09:45.698995 22755 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1206 09:09:45.699007 22755 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1206 09:09:45.699023 22755 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1206 09:09:45.699043 22755 net.cpp:150] Setting up L2_b1_brc1_relu\nI1206 09:09:45.699057 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.699067 22755 net.cpp:165] Memory required for data: 1239860400\nI1206 09:09:45.699080 22755 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1206 09:09:45.699110 22755 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1206 09:09:45.699123 22755 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1206 09:09:45.699146 22755 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1206 09:09:45.699517 22755 net.cpp:150] Setting up L2_b1_brc1_conv\nI1206 09:09:45.699537 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.699545 22755 net.cpp:165] Memory required for data: 1243137200\nI1206 09:09:45.699565 22755 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1206 09:09:45.699589 22755 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1206 09:09:45.699601 22755 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1206 09:09:45.699623 22755 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1206 09:09:45.699923 22755 net.cpp:150] Setting up L2_b1_brc2_bn\nI1206 09:09:45.699944 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.699954 22755 net.cpp:165] Memory required for data: 1246414000\nI1206 09:09:45.699976 22755 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1206 09:09:45.699995 22755 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1206 09:09:45.700006 22755 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1206 09:09:45.700037 22755 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1206 09:09:45.700057 22755 net.cpp:150] Setting up L2_b1_brc2_relu\nI1206 09:09:45.700073 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.700083 22755 net.cpp:165] Memory required for data: 1249690800\nI1206 09:09:45.700095 22755 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1206 09:09:45.700125 22755 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1206 09:09:45.700139 22755 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1206 09:09:45.700157 22755 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1206 09:09:45.701942 22755 net.cpp:150] Setting up L2_b1_brc2_conv\nI1206 09:09:45.701963 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.701973 22755 net.cpp:165] Memory required for data: 1252967600\nI1206 09:09:45.701994 22755 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1206 09:09:45.702018 22755 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1206 09:09:45.702031 22755 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1206 09:09:45.702054 22755 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1206 09:09:45.702343 22755 net.cpp:150] Setting up L2_b1_brc3_bn\nI1206 09:09:45.702363 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.702371 22755 net.cpp:165] Memory required for data: 1256244400\nI1206 09:09:45.702395 22755 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1206 09:09:45.702414 22755 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1206 09:09:45.702425 22755 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1206 09:09:45.702440 22755 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1206 09:09:45.702461 22755 net.cpp:150] Setting up L2_b1_brc3_relu\nI1206 09:09:45.702476 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.702484 22755 net.cpp:165] Memory required for data: 1259521200\nI1206 09:09:45.702495 22755 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1206 09:09:45.702525 22755 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1206 09:09:45.702539 22755 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1206 09:09:45.702569 22755 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1206 09:09:45.703032 22755 net.cpp:150] Setting up L2_b1_brc3_conv\nI1206 09:09:45.703052 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.703063 22755 net.cpp:165] Memory required for data: 1272628400\nI1206 09:09:45.703083 22755 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1206 09:09:45.703112 22755 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1206 09:09:45.703127 22755 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:09:45.703152 22755 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1206 09:09:45.703639 22755 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1206 09:09:45.703658 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.703667 22755 net.cpp:165] Memory required for data: 1285735600\nI1206 09:09:45.703687 22755 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1206 09:09:45.703707 22755 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1206 09:09:45.703718 22755 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1206 09:09:45.703732 22755 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1206 09:09:45.703758 22755 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1206 09:09:45.703810 22755 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1206 09:09:45.703830 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.703838 22755 net.cpp:165] Memory required for data: 1298842800\nI1206 09:09:45.703850 22755 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:09:45.703871 22755 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:09:45.703884 22755 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1206 09:09:45.703900 22755 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:09:45.703920 22755 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:09:45.704008 22755 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:09:45.704030 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.704044 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.704053 22755 net.cpp:165] Memory required for data: 1325057200\nI1206 09:09:45.704064 22755 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1206 09:09:45.704087 22755 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1206 09:09:45.704102 22755 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:09:45.704118 22755 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1206 09:09:45.704401 22755 net.cpp:150] Setting up L2_b2_brc1_bn\nI1206 09:09:45.704419 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.704428 22755 net.cpp:165] Memory required for data: 1338164400\nI1206 09:09:45.704452 22755 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1206 09:09:45.704476 22755 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1206 09:09:45.704489 22755 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1206 09:09:45.704504 22755 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1206 09:09:45.704524 22755 net.cpp:150] Setting up L2_b2_brc1_relu\nI1206 09:09:45.704540 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.704550 22755 net.cpp:165] Memory required for data: 1351271600\nI1206 09:09:45.704562 22755 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1206 09:09:45.704586 22755 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1206 09:09:45.704598 22755 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1206 09:09:45.704622 22755 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1206 09:09:45.705051 22755 net.cpp:150] Setting up L2_b2_brc1_conv\nI1206 09:09:45.705071 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.705081 22755 net.cpp:165] Memory required for data: 1354548400\nI1206 09:09:45.705111 22755 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1206 09:09:45.705128 22755 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1206 09:09:45.705140 22755 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1206 09:09:45.705163 22755 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1206 09:09:45.705480 22755 net.cpp:150] Setting up L2_b2_brc2_bn\nI1206 09:09:45.705500 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.705509 22755 net.cpp:165] Memory required for data: 1357825200\nI1206 09:09:45.705533 22755 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1206 09:09:45.705559 22755 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1206 09:09:45.705571 22755 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1206 09:09:45.705586 22755 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1206 09:09:45.705606 22755 net.cpp:150] Setting up L2_b2_brc2_relu\nI1206 09:09:45.705621 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.705631 22755 net.cpp:165] Memory required for data: 1361102000\nI1206 09:09:45.705642 22755 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1206 09:09:45.705664 22755 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1206 09:09:45.705677 22755 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1206 09:09:45.705700 22755 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1206 09:09:45.706223 22755 net.cpp:150] Setting up L2_b2_brc2_conv\nI1206 09:09:45.706243 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.706252 22755 net.cpp:165] Memory required for data: 1364378800\nI1206 09:09:45.706272 22755 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1206 09:09:45.706291 22755 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1206 09:09:45.706303 22755 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1206 09:09:45.706320 22755 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1206 09:09:45.706609 22755 net.cpp:150] Setting up L2_b2_brc3_bn\nI1206 09:09:45.706629 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.706637 22755 net.cpp:165] Memory required for data: 1367655600\nI1206 09:09:45.706660 22755 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1206 09:09:45.706678 22755 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1206 09:09:45.706691 22755 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1206 09:09:45.706710 22755 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1206 09:09:45.706732 22755 net.cpp:150] Setting up L2_b2_brc3_relu\nI1206 09:09:45.706746 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.706756 22755 net.cpp:165] Memory required for data: 1370932400\nI1206 09:09:45.706768 22755 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1206 09:09:45.706804 22755 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1206 09:09:45.706820 22755 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1206 09:09:45.706837 22755 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1206 09:09:45.707260 22755 net.cpp:150] Setting up L2_b2_brc3_conv\nI1206 09:09:45.707280 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.707289 22755 net.cpp:165] Memory required for data: 1384039600\nI1206 09:09:45.707309 22755 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1206 09:09:45.707329 22755 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1206 09:09:45.707341 22755 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1206 09:09:45.707355 22755 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:09:45.707377 22755 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1206 09:09:45.707424 22755 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1206 09:09:45.707442 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.707453 22755 net.cpp:165] Memory required for data: 1397146800\nI1206 09:09:45.707465 22755 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:09:45.707494 22755 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:09:45.707509 22755 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1206 09:09:45.707535 22755 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:09:45.707556 22755 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:09:45.707648 22755 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:09:45.707667 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.707680 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.707690 22755 net.cpp:165] Memory required for data: 1423361200\nI1206 09:09:45.707705 22755 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1206 09:09:45.707726 22755 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1206 09:09:45.707738 22755 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:09:45.707754 22755 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1206 09:09:45.708051 22755 net.cpp:150] Setting up L2_b3_brc1_bn\nI1206 09:09:45.708071 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.708081 22755 net.cpp:165] Memory required for data: 1436468400\nI1206 09:09:45.708135 22755 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1206 09:09:45.708159 22755 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1206 09:09:45.708173 22755 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1206 09:09:45.708187 22755 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1206 09:09:45.708207 22755 net.cpp:150] Setting up L2_b3_brc1_relu\nI1206 09:09:45.708221 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.708232 22755 net.cpp:165] Memory required for data: 1449575600\nI1206 09:09:45.708243 22755 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1206 09:09:45.708272 22755 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1206 09:09:45.708286 22755 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1206 09:09:45.708304 22755 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1206 09:09:45.708724 22755 net.cpp:150] Setting up L2_b3_brc1_conv\nI1206 09:09:45.708744 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.708752 22755 net.cpp:165] Memory required for data: 1452852400\nI1206 09:09:45.708772 22755 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1206 09:09:45.708803 22755 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1206 09:09:45.708818 22755 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1206 09:09:45.708837 22755 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1206 09:09:45.709137 22755 net.cpp:150] Setting up L2_b3_brc2_bn\nI1206 09:09:45.709161 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.709172 22755 net.cpp:165] Memory required for data: 1456129200\nI1206 09:09:45.709193 22755 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1206 09:09:45.709210 22755 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1206 09:09:45.709223 22755 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1206 09:09:45.709237 22755 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1206 09:09:45.709257 22755 net.cpp:150] Setting up L2_b3_brc2_relu\nI1206 09:09:45.709272 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.709281 22755 net.cpp:165] Memory required for data: 1459406000\nI1206 09:09:45.709295 22755 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1206 09:09:45.709317 22755 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1206 09:09:45.709331 22755 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1206 09:09:45.709353 22755 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1206 09:09:45.709887 22755 net.cpp:150] Setting up L2_b3_brc2_conv\nI1206 09:09:45.709906 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.709915 22755 net.cpp:165] Memory required for data: 1462682800\nI1206 09:09:45.709936 22755 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1206 09:09:45.709959 22755 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1206 09:09:45.709972 22755 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1206 09:09:45.710000 22755 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1206 09:09:45.710300 22755 net.cpp:150] Setting up L2_b3_brc3_bn\nI1206 09:09:45.710319 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.710330 22755 net.cpp:165] Memory required for data: 1465959600\nI1206 09:09:45.710352 22755 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1206 09:09:45.710376 22755 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1206 09:09:45.710389 22755 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1206 09:09:45.710404 22755 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1206 09:09:45.710423 22755 net.cpp:150] Setting up L2_b3_brc3_relu\nI1206 09:09:45.710438 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.710448 22755 net.cpp:165] Memory required for data: 1469236400\nI1206 09:09:45.710458 22755 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1206 09:09:45.710481 22755 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1206 09:09:45.710494 22755 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1206 09:09:45.710518 22755 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1206 09:09:45.710942 22755 net.cpp:150] Setting up L2_b3_brc3_conv\nI1206 09:09:45.710961 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.710970 22755 net.cpp:165] Memory required for data: 1482343600\nI1206 09:09:45.710990 22755 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1206 09:09:45.711009 22755 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1206 09:09:45.711020 22755 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1206 09:09:45.711035 22755 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:09:45.711051 22755 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1206 09:09:45.711104 22755 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1206 09:09:45.711123 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.711133 22755 net.cpp:165] Memory required for data: 1495450800\nI1206 09:09:45.711145 22755 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:09:45.711163 22755 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:09:45.711174 22755 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1206 09:09:45.711195 22755 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:09:45.711216 22755 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:09:45.711298 22755 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:09:45.711324 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.711339 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.711349 22755 net.cpp:165] Memory required for data: 1521665200\nI1206 09:09:45.711359 22755 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1206 09:09:45.711374 22755 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1206 09:09:45.711387 22755 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:09:45.711410 22755 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1206 09:09:45.711700 22755 net.cpp:150] Setting up L2_b4_brc1_bn\nI1206 09:09:45.711719 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.711727 22755 net.cpp:165] Memory required for data: 1534772400\nI1206 09:09:45.711750 22755 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1206 09:09:45.711769 22755 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1206 09:09:45.711781 22755 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1206 09:09:45.711808 22755 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1206 09:09:45.711830 22755 net.cpp:150] Setting up L2_b4_brc1_relu\nI1206 09:09:45.711845 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.711854 22755 net.cpp:165] Memory required for data: 1547879600\nI1206 09:09:45.711874 22755 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1206 09:09:45.711899 22755 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1206 09:09:45.711911 22755 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1206 09:09:45.711930 22755 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1206 09:09:45.712358 22755 net.cpp:150] Setting up L2_b4_brc1_conv\nI1206 09:09:45.712378 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.712388 22755 net.cpp:165] Memory required for data: 1551156400\nI1206 09:09:45.712409 22755 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1206 09:09:45.712432 22755 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1206 09:09:45.712445 22755 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1206 09:09:45.712462 22755 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1206 09:09:45.712751 22755 net.cpp:150] Setting up L2_b4_brc2_bn\nI1206 09:09:45.712769 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.712779 22755 net.cpp:165] Memory required for data: 1554433200\nI1206 09:09:45.712810 22755 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1206 09:09:45.712831 22755 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1206 09:09:45.712842 22755 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1206 09:09:45.712865 22755 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1206 09:09:45.712887 22755 net.cpp:150] Setting up L2_b4_brc2_relu\nI1206 09:09:45.712901 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.712910 22755 net.cpp:165] Memory required for data: 1557710000\nI1206 09:09:45.712921 22755 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1206 09:09:45.712949 22755 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1206 09:09:45.712962 22755 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1206 09:09:45.712980 22755 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1206 09:09:45.713490 22755 net.cpp:150] Setting up L2_b4_brc2_conv\nI1206 09:09:45.713510 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.713518 22755 net.cpp:165] Memory required for data: 1560986800\nI1206 09:09:45.713537 22755 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1206 09:09:45.713562 22755 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1206 09:09:45.713573 22755 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1206 09:09:45.713589 22755 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1206 09:09:45.713892 22755 net.cpp:150] Setting up L2_b4_brc3_bn\nI1206 09:09:45.713912 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.713920 22755 net.cpp:165] Memory required for data: 1564263600\nI1206 09:09:45.713943 22755 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1206 09:09:45.713959 22755 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1206 09:09:45.713970 22755 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1206 09:09:45.713985 22755 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1206 09:09:45.714005 22755 net.cpp:150] Setting up L2_b4_brc3_relu\nI1206 09:09:45.714020 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.714030 22755 net.cpp:165] Memory required for data: 1567540400\nI1206 09:09:45.714040 22755 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1206 09:09:45.714066 22755 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1206 09:09:45.714079 22755 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1206 09:09:45.714102 22755 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1206 09:09:45.714511 22755 net.cpp:150] Setting up L2_b4_brc3_conv\nI1206 09:09:45.714530 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.714540 22755 net.cpp:165] Memory required for data: 1580647600\nI1206 09:09:45.714557 22755 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1206 09:09:45.714581 22755 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1206 09:09:45.714594 22755 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1206 09:09:45.714608 22755 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:09:45.714640 22755 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1206 09:09:45.714689 22755 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1206 09:09:45.714709 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.714718 22755 net.cpp:165] Memory required for data: 1593754800\nI1206 09:09:45.714730 22755 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:09:45.714745 22755 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:09:45.714756 22755 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1206 09:09:45.714777 22755 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:09:45.714807 22755 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:09:45.714893 22755 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:09:45.714920 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.714936 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.714944 22755 net.cpp:165] Memory required for data: 1619969200\nI1206 09:09:45.714956 22755 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1206 09:09:45.714970 22755 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1206 09:09:45.714982 22755 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:09:45.715005 22755 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1206 09:09:45.715289 22755 net.cpp:150] Setting up L2_b5_brc1_bn\nI1206 09:09:45.715308 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.715317 22755 net.cpp:165] Memory required for data: 1633076400\nI1206 09:09:45.715338 22755 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1206 09:09:45.715355 22755 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1206 09:09:45.715366 22755 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1206 09:09:45.715381 22755 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1206 09:09:45.715401 22755 net.cpp:150] Setting up L2_b5_brc1_relu\nI1206 09:09:45.715415 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.715425 22755 net.cpp:165] Memory required for data: 1646183600\nI1206 09:09:45.715435 22755 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1206 09:09:45.715461 22755 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1206 09:09:45.715476 22755 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1206 09:09:45.715492 22755 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1206 09:09:45.715914 22755 net.cpp:150] Setting up L2_b5_brc1_conv\nI1206 09:09:45.715935 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.715945 22755 net.cpp:165] Memory required for data: 1649460400\nI1206 09:09:45.715961 22755 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1206 09:09:45.715986 22755 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1206 09:09:45.715999 22755 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1206 09:09:45.716017 22755 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1206 09:09:45.716311 22755 net.cpp:150] Setting up L2_b5_brc2_bn\nI1206 09:09:45.716336 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.716346 22755 net.cpp:165] Memory required for data: 1652737200\nI1206 09:09:45.716367 22755 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1206 09:09:45.716384 22755 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1206 09:09:45.716395 22755 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1206 09:09:45.716410 22755 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1206 09:09:45.716429 22755 net.cpp:150] Setting up L2_b5_brc2_relu\nI1206 09:09:45.716444 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.716454 22755 net.cpp:165] Memory required for data: 1656014000\nI1206 09:09:45.716464 22755 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1206 09:09:45.716485 22755 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1206 09:09:45.716507 22755 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1206 09:09:45.716534 22755 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1206 09:09:45.717075 22755 net.cpp:150] Setting up L2_b5_brc2_conv\nI1206 09:09:45.717097 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.717105 22755 net.cpp:165] Memory required for data: 1659290800\nI1206 09:09:45.717124 22755 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1206 09:09:45.717147 22755 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1206 09:09:45.717161 22755 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1206 09:09:45.717178 22755 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1206 09:09:45.717465 22755 net.cpp:150] Setting up L2_b5_brc3_bn\nI1206 09:09:45.717489 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.717499 22755 net.cpp:165] Memory required for data: 1662567600\nI1206 09:09:45.717521 22755 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1206 09:09:45.717538 22755 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1206 09:09:45.717550 22755 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1206 09:09:45.717566 22755 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1206 09:09:45.717584 22755 net.cpp:150] Setting up L2_b5_brc3_relu\nI1206 09:09:45.717598 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.717608 22755 net.cpp:165] Memory required for data: 1665844400\nI1206 09:09:45.717618 22755 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1206 09:09:45.717639 22755 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1206 09:09:45.717653 22755 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1206 09:09:45.717679 22755 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1206 09:09:45.718106 22755 net.cpp:150] Setting up L2_b5_brc3_conv\nI1206 09:09:45.718127 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.718135 22755 net.cpp:165] Memory required for data: 1678951600\nI1206 09:09:45.718153 22755 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1206 09:09:45.718173 22755 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1206 09:09:45.718183 22755 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1206 09:09:45.718197 22755 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:09:45.718214 22755 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1206 09:09:45.718276 22755 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1206 09:09:45.718294 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.718304 22755 net.cpp:165] Memory required for data: 1692058800\nI1206 09:09:45.718314 22755 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:09:45.718329 22755 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:09:45.718340 22755 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1206 09:09:45.718360 22755 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:09:45.718381 22755 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:09:45.718467 22755 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:09:45.718489 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.718502 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.718513 22755 net.cpp:165] Memory required for data: 1718273200\nI1206 09:09:45.718523 22755 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1206 09:09:45.718538 22755 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1206 09:09:45.718550 22755 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:09:45.718572 22755 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1206 09:09:45.718937 22755 net.cpp:150] Setting up L2_b6_brc1_bn\nI1206 09:09:45.718957 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.718967 22755 net.cpp:165] Memory required for data: 1731380400\nI1206 09:09:45.719000 22755 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1206 09:09:45.719038 22755 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1206 09:09:45.719054 22755 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1206 09:09:45.719070 22755 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1206 09:09:45.719090 22755 net.cpp:150] Setting up L2_b6_brc1_relu\nI1206 09:09:45.719105 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.719115 22755 net.cpp:165] Memory required for data: 1744487600\nI1206 09:09:45.719126 22755 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1206 09:09:45.719156 22755 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1206 09:09:45.719168 22755 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1206 09:09:45.719192 22755 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1206 09:09:45.719604 22755 net.cpp:150] Setting up L2_b6_brc1_conv\nI1206 09:09:45.719624 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.719632 22755 net.cpp:165] Memory required for data: 1747764400\nI1206 09:09:45.719650 22755 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1206 09:09:45.719668 22755 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1206 09:09:45.719681 22755 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1206 09:09:45.719702 22755 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1206 09:09:45.720003 22755 net.cpp:150] Setting up L2_b6_brc2_bn\nI1206 09:09:45.720022 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.720031 22755 net.cpp:165] Memory required for data: 1751041200\nI1206 09:09:45.720053 22755 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1206 09:09:45.720070 22755 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1206 09:09:45.720082 22755 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1206 09:09:45.720096 22755 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1206 09:09:45.720118 22755 net.cpp:150] Setting up L2_b6_brc2_relu\nI1206 09:09:45.720131 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.720141 22755 net.cpp:165] Memory required for data: 1754318000\nI1206 09:09:45.720151 22755 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1206 09:09:45.720177 22755 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1206 09:09:45.720191 22755 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1206 09:09:45.720216 22755 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1206 09:09:45.720736 22755 net.cpp:150] Setting up L2_b6_brc2_conv\nI1206 09:09:45.720754 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.720762 22755 net.cpp:165] Memory required for data: 1757594800\nI1206 09:09:45.720780 22755 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1206 09:09:45.720809 22755 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1206 09:09:45.720824 22755 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1206 09:09:45.720847 22755 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1206 09:09:45.721153 22755 net.cpp:150] Setting up L2_b6_brc3_bn\nI1206 09:09:45.721171 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.721180 22755 net.cpp:165] Memory required for data: 1760871600\nI1206 09:09:45.721204 22755 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1206 09:09:45.721220 22755 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1206 09:09:45.721232 22755 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1206 09:09:45.721247 22755 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1206 09:09:45.721267 22755 net.cpp:150] Setting up L2_b6_brc3_relu\nI1206 09:09:45.721282 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.721292 22755 net.cpp:165] Memory required for data: 1764148400\nI1206 09:09:45.721302 22755 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1206 09:09:45.721328 22755 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1206 09:09:45.721343 22755 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1206 09:09:45.721365 22755 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1206 09:09:45.721788 22755 net.cpp:150] Setting up L2_b6_brc3_conv\nI1206 09:09:45.721822 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.721832 22755 net.cpp:165] Memory required for data: 1777255600\nI1206 09:09:45.721850 22755 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1206 09:09:45.721868 22755 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1206 09:09:45.721880 22755 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1206 09:09:45.721894 22755 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:09:45.721918 22755 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1206 09:09:45.721966 22755 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1206 09:09:45.721992 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.722002 22755 net.cpp:165] Memory required for data: 1790362800\nI1206 09:09:45.722013 22755 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:09:45.722028 22755 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:09:45.722040 22755 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1206 09:09:45.722055 22755 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:09:45.722075 22755 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:09:45.722169 22755 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:09:45.722192 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.722204 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.722213 22755 net.cpp:165] Memory required for data: 1816577200\nI1206 09:09:45.722224 22755 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1206 09:09:45.722246 22755 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1206 09:09:45.722259 22755 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:09:45.722276 22755 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1206 09:09:45.722568 22755 net.cpp:150] Setting up L3_b1_brc1_bn\nI1206 09:09:45.722586 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.722595 22755 net.cpp:165] Memory required for data: 1829684400\nI1206 09:09:45.722617 22755 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1206 09:09:45.722647 22755 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1206 09:09:45.722661 22755 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1206 09:09:45.722677 22755 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1206 09:09:45.722697 22755 net.cpp:150] Setting up L3_b1_brc1_relu\nI1206 09:09:45.722710 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.722720 22755 net.cpp:165] Memory required for data: 1842791600\nI1206 09:09:45.722730 22755 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1206 09:09:45.722754 22755 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1206 09:09:45.722765 22755 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1206 09:09:45.722789 22755 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1206 09:09:45.723292 22755 net.cpp:150] Setting up L3_b1_brc1_conv\nI1206 09:09:45.723312 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.723321 22755 net.cpp:165] Memory required for data: 1844430000\nI1206 09:09:45.723340 22755 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1206 09:09:45.723357 22755 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1206 09:09:45.723369 22755 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1206 09:09:45.723392 22755 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1206 09:09:45.723695 22755 net.cpp:150] Setting up L3_b1_brc2_bn\nI1206 09:09:45.723713 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.723722 22755 net.cpp:165] Memory required for data: 1846068400\nI1206 09:09:45.723744 22755 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1206 09:09:45.723760 22755 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1206 09:09:45.723783 22755 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1206 09:09:45.723805 22755 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1206 09:09:45.723829 22755 net.cpp:150] Setting up L3_b1_brc2_relu\nI1206 09:09:45.723843 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.723852 22755 net.cpp:165] Memory required for data: 1847706800\nI1206 09:09:45.723863 22755 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1206 09:09:45.723892 22755 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1206 09:09:45.723906 22755 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1206 09:09:45.723925 22755 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1206 09:09:45.726176 22755 net.cpp:150] Setting up L3_b1_brc2_conv\nI1206 09:09:45.726198 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.726208 22755 net.cpp:165] Memory required for data: 1849345200\nI1206 09:09:45.726227 22755 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1206 09:09:45.726249 22755 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1206 09:09:45.726263 22755 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1206 09:09:45.726279 22755 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1206 09:09:45.726579 22755 net.cpp:150] Setting up L3_b1_brc3_bn\nI1206 09:09:45.726599 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.726608 22755 net.cpp:165] Memory required for data: 1850983600\nI1206 09:09:45.726630 22755 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1206 09:09:45.726653 22755 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1206 09:09:45.726666 22755 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1206 09:09:45.726681 22755 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1206 09:09:45.726701 22755 net.cpp:150] Setting up L3_b1_brc3_relu\nI1206 09:09:45.726716 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.726727 22755 net.cpp:165] Memory required for data: 1852622000\nI1206 09:09:45.726737 22755 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1206 09:09:45.726763 22755 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1206 09:09:45.726778 22755 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1206 09:09:45.726806 22755 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1206 09:09:45.727458 22755 net.cpp:150] Setting up L3_b1_brc3_conv\nI1206 09:09:45.727478 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.727486 22755 net.cpp:165] Memory required for data: 1859175600\nI1206 09:09:45.727504 22755 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1206 09:09:45.727527 22755 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1206 09:09:45.727540 22755 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:09:45.727565 22755 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1206 09:09:45.728550 22755 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1206 09:09:45.728570 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.728579 22755 net.cpp:165] Memory required for data: 1865729200\nI1206 09:09:45.728597 22755 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1206 09:09:45.728615 22755 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1206 09:09:45.728627 22755 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1206 09:09:45.728641 22755 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1206 09:09:45.728657 22755 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1206 09:09:45.728720 22755 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1206 09:09:45.728739 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.728749 22755 net.cpp:165] Memory required for data: 1872282800\nI1206 09:09:45.728760 22755 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:09:45.728780 22755 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:09:45.728793 22755 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1206 09:09:45.728818 22755 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:09:45.728850 22755 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:09:45.728950 22755 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:09:45.728973 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.728987 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.728997 22755 net.cpp:165] Memory required for data: 1885390000\nI1206 09:09:45.729008 22755 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1206 09:09:45.729029 22755 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1206 09:09:45.729043 22755 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:09:45.729059 22755 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1206 09:09:45.729351 22755 net.cpp:150] Setting up L3_b2_brc1_bn\nI1206 09:09:45.729373 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.729384 22755 net.cpp:165] Memory required for data: 1891943600\nI1206 09:09:45.729405 22755 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1206 09:09:45.729423 22755 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1206 09:09:45.729434 22755 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1206 09:09:45.729449 22755 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1206 09:09:45.729470 22755 net.cpp:150] Setting up L3_b2_brc1_relu\nI1206 09:09:45.729485 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.729496 22755 net.cpp:165] Memory required for data: 1898497200\nI1206 09:09:45.729506 22755 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1206 09:09:45.729526 22755 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1206 09:09:45.729540 22755 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1206 09:09:45.729564 22755 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1206 09:09:45.730252 22755 net.cpp:150] Setting up L3_b2_brc1_conv\nI1206 09:09:45.730278 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.730288 22755 net.cpp:165] Memory required for data: 1900135600\nI1206 09:09:45.730306 22755 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1206 09:09:45.730324 22755 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1206 09:09:45.730336 22755 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1206 09:09:45.730357 22755 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1206 09:09:45.730674 22755 net.cpp:150] Setting up L3_b2_brc2_bn\nI1206 09:09:45.730697 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.730708 22755 net.cpp:165] Memory required for data: 1901774000\nI1206 09:09:45.730731 22755 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1206 09:09:45.730747 22755 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1206 09:09:45.730759 22755 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1206 09:09:45.730775 22755 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1206 09:09:45.730795 22755 net.cpp:150] Setting up L3_b2_brc2_relu\nI1206 09:09:45.730818 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.730828 22755 net.cpp:165] Memory required for data: 1903412400\nI1206 09:09:45.730837 22755 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1206 09:09:45.730859 22755 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1206 09:09:45.730872 22755 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1206 09:09:45.730898 22755 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1206 09:09:45.731957 22755 net.cpp:150] Setting up L3_b2_brc2_conv\nI1206 09:09:45.731978 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.731986 22755 net.cpp:165] Memory required for data: 1905050800\nI1206 09:09:45.732005 22755 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1206 09:09:45.732028 22755 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1206 09:09:45.732041 22755 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1206 09:09:45.732059 22755 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1206 09:09:45.732379 22755 net.cpp:150] Setting up L3_b2_brc3_bn\nI1206 09:09:45.732408 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.732417 22755 net.cpp:165] Memory required for data: 1906689200\nI1206 09:09:45.732439 22755 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1206 09:09:45.732456 22755 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1206 09:09:45.732470 22755 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1206 09:09:45.732484 22755 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1206 09:09:45.732506 22755 net.cpp:150] Setting up L3_b2_brc3_relu\nI1206 09:09:45.732520 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.732529 22755 net.cpp:165] Memory required for data: 1908327600\nI1206 09:09:45.732540 22755 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1206 09:09:45.732568 22755 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1206 09:09:45.732583 22755 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1206 09:09:45.732606 22755 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1206 09:09:45.733264 22755 net.cpp:150] Setting up L3_b2_brc3_conv\nI1206 09:09:45.733284 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.733294 22755 net.cpp:165] Memory required for data: 1914881200\nI1206 09:09:45.733311 22755 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1206 09:09:45.733330 22755 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1206 09:09:45.733342 22755 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1206 09:09:45.733356 22755 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:09:45.733381 22755 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1206 09:09:45.733446 22755 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1206 09:09:45.733464 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.733474 22755 net.cpp:165] Memory required for data: 1921434800\nI1206 09:09:45.733484 22755 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:09:45.733500 22755 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:09:45.733511 22755 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1206 09:09:45.733532 22755 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:09:45.733554 22755 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:09:45.733640 22755 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:09:45.733661 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.733675 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.733685 22755 net.cpp:165] Memory required for data: 1934542000\nI1206 09:09:45.733695 22755 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1206 09:09:45.733716 22755 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1206 09:09:45.733729 22755 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:09:45.733755 22755 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1206 09:09:45.734113 22755 net.cpp:150] Setting up L3_b3_brc1_bn\nI1206 09:09:45.734135 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.734146 22755 net.cpp:165] Memory required for data: 1941095600\nI1206 09:09:45.734165 22755 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1206 09:09:45.734179 22755 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1206 09:09:45.734189 22755 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1206 09:09:45.734203 22755 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1206 09:09:45.734218 22755 net.cpp:150] Setting up L3_b3_brc1_relu\nI1206 09:09:45.734225 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.734230 22755 net.cpp:165] Memory required for data: 1947649200\nI1206 09:09:45.734236 22755 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1206 09:09:45.734252 22755 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1206 09:09:45.734266 22755 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1206 09:09:45.734288 22755 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1206 09:09:45.734957 22755 net.cpp:150] Setting up L3_b3_brc1_conv\nI1206 09:09:45.734971 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.734975 22755 net.cpp:165] Memory required for data: 1949287600\nI1206 09:09:45.734985 22755 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1206 09:09:45.734998 22755 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1206 09:09:45.735005 22755 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1206 09:09:45.735013 22755 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1206 09:09:45.735278 22755 net.cpp:150] Setting up L3_b3_brc2_bn\nI1206 09:09:45.735290 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.735294 22755 net.cpp:165] Memory required for data: 1950926000\nI1206 09:09:45.735304 22755 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1206 09:09:45.735313 22755 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1206 09:09:45.735319 22755 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1206 09:09:45.735327 22755 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1206 09:09:45.735337 22755 net.cpp:150] Setting up L3_b3_brc2_relu\nI1206 09:09:45.735343 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.735348 22755 net.cpp:165] Memory required for data: 1952564400\nI1206 09:09:45.735353 22755 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1206 09:09:45.735368 22755 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1206 09:09:45.735375 22755 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1206 09:09:45.735388 22755 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1206 09:09:45.736505 22755 net.cpp:150] Setting up L3_b3_brc2_conv\nI1206 09:09:45.736526 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.736536 22755 net.cpp:165] Memory required for data: 1954202800\nI1206 09:09:45.736552 22755 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1206 09:09:45.736572 22755 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1206 09:09:45.736583 22755 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1206 09:09:45.736606 22755 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1206 09:09:45.736924 22755 net.cpp:150] Setting up L3_b3_brc3_bn\nI1206 09:09:45.736944 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.736953 22755 net.cpp:165] Memory required for data: 1955841200\nI1206 09:09:45.736974 22755 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1206 09:09:45.736991 22755 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1206 09:09:45.737004 22755 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1206 09:09:45.737025 22755 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1206 09:09:45.737046 22755 net.cpp:150] Setting up L3_b3_brc3_relu\nI1206 09:09:45.737059 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.737069 22755 net.cpp:165] Memory required for data: 1957479600\nI1206 09:09:45.737080 22755 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1206 09:09:45.737107 22755 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1206 09:09:45.737120 22755 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1206 09:09:45.737138 22755 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1206 09:09:45.737794 22755 net.cpp:150] Setting up L3_b3_brc3_conv\nI1206 09:09:45.737820 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.737830 22755 net.cpp:165] Memory required for data: 1964033200\nI1206 09:09:45.737848 22755 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1206 09:09:45.737867 22755 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1206 09:09:45.737880 22755 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1206 09:09:45.737895 22755 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:09:45.737917 22755 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1206 09:09:45.737974 22755 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1206 09:09:45.737993 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.738013 22755 net.cpp:165] Memory required for data: 1970586800\nI1206 09:09:45.738024 22755 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:09:45.738045 22755 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:09:45.738059 22755 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1206 09:09:45.738075 22755 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:09:45.738101 22755 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:09:45.738184 22755 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:09:45.738204 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.738215 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.738225 22755 net.cpp:165] Memory required for data: 1983694000\nI1206 09:09:45.738235 22755 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1206 09:09:45.738257 22755 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1206 09:09:45.738270 22755 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:09:45.738287 22755 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1206 09:09:45.738582 22755 net.cpp:150] Setting up L3_b4_brc1_bn\nI1206 09:09:45.738601 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.738610 22755 net.cpp:165] Memory required for data: 1990247600\nI1206 09:09:45.738631 22755 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1206 09:09:45.738647 22755 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1206 09:09:45.738659 22755 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1206 09:09:45.738679 22755 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1206 09:09:45.738700 22755 net.cpp:150] Setting up L3_b4_brc1_relu\nI1206 09:09:45.738715 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.738725 22755 net.cpp:165] Memory required for data: 1996801200\nI1206 09:09:45.738735 22755 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1206 09:09:45.738765 22755 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1206 09:09:45.738778 22755 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1206 09:09:45.738796 22755 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1206 09:09:45.739460 22755 net.cpp:150] Setting up L3_b4_brc1_conv\nI1206 09:09:45.739480 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.739490 22755 net.cpp:165] Memory required for data: 1998439600\nI1206 09:09:45.739507 22755 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1206 09:09:45.739531 22755 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1206 09:09:45.739543 22755 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1206 09:09:45.739559 22755 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1206 09:09:45.739881 22755 net.cpp:150] Setting up L3_b4_brc2_bn\nI1206 09:09:45.739900 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.739909 22755 net.cpp:165] Memory required for data: 2000078000\nI1206 09:09:45.739930 22755 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1206 09:09:45.739953 22755 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1206 09:09:45.739966 22755 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1206 09:09:45.739981 22755 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1206 09:09:45.740001 22755 net.cpp:150] Setting up L3_b4_brc2_relu\nI1206 09:09:45.740016 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.740025 22755 net.cpp:165] Memory required for data: 2001716400\nI1206 09:09:45.740036 22755 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1206 09:09:45.740062 22755 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1206 09:09:45.740075 22755 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1206 09:09:45.740098 22755 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1206 09:09:45.741173 22755 net.cpp:150] Setting up L3_b4_brc2_conv\nI1206 09:09:45.741201 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.741211 22755 net.cpp:165] Memory required for data: 2003354800\nI1206 09:09:45.741230 22755 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1206 09:09:45.741248 22755 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1206 09:09:45.741261 22755 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1206 09:09:45.741277 22755 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1206 09:09:45.741585 22755 net.cpp:150] Setting up L3_b4_brc3_bn\nI1206 09:09:45.741603 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.741612 22755 net.cpp:165] Memory required for data: 2004993200\nI1206 09:09:45.741633 22755 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1206 09:09:45.741657 22755 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1206 09:09:45.741669 22755 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1206 09:09:45.741685 22755 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1206 09:09:45.741710 22755 net.cpp:150] Setting up L3_b4_brc3_relu\nI1206 09:09:45.741726 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.741736 22755 net.cpp:165] Memory required for data: 2006631600\nI1206 09:09:45.741745 22755 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1206 09:09:45.741766 22755 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1206 09:09:45.741780 22755 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1206 09:09:45.741808 22755 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1206 09:09:45.742460 22755 net.cpp:150] Setting up L3_b4_brc3_conv\nI1206 09:09:45.742480 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.742489 22755 net.cpp:165] Memory required for data: 2013185200\nI1206 09:09:45.742508 22755 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1206 09:09:45.742527 22755 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1206 09:09:45.742537 22755 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1206 09:09:45.742552 22755 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:09:45.742568 22755 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1206 09:09:45.742640 22755 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1206 09:09:45.742658 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.742667 22755 net.cpp:165] Memory required for data: 2019738800\nI1206 09:09:45.742678 22755 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:09:45.742699 22755 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:09:45.742712 22755 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1206 09:09:45.742727 22755 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:09:45.742748 22755 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:09:45.742848 22755 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:09:45.742866 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.742879 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.742888 22755 net.cpp:165] Memory required for data: 2032846000\nI1206 09:09:45.742899 22755 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1206 09:09:45.742920 22755 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1206 09:09:45.742933 22755 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:09:45.742951 22755 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1206 09:09:45.743245 22755 net.cpp:150] Setting up L3_b5_brc1_bn\nI1206 09:09:45.743264 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.743273 22755 net.cpp:165] Memory required for data: 2039399600\nI1206 09:09:45.743295 22755 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1206 09:09:45.743319 22755 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1206 09:09:45.743331 22755 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1206 09:09:45.743357 22755 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1206 09:09:45.743377 22755 net.cpp:150] Setting up L3_b5_brc1_relu\nI1206 09:09:45.743392 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.743402 22755 net.cpp:165] Memory required for data: 2045953200\nI1206 09:09:45.743413 22755 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1206 09:09:45.743434 22755 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1206 09:09:45.743448 22755 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1206 09:09:45.743470 22755 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1206 09:09:45.744156 22755 net.cpp:150] Setting up L3_b5_brc1_conv\nI1206 09:09:45.744176 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.744185 22755 net.cpp:165] Memory required for data: 2047591600\nI1206 09:09:45.744204 22755 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1206 09:09:45.744221 22755 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1206 09:09:45.744232 22755 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1206 09:09:45.744253 22755 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1206 09:09:45.744557 22755 net.cpp:150] Setting up L3_b5_brc2_bn\nI1206 09:09:45.744576 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.744585 22755 net.cpp:165] Memory required for data: 2049230000\nI1206 09:09:45.744607 22755 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1206 09:09:45.744624 22755 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1206 09:09:45.744637 22755 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1206 09:09:45.744650 22755 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1206 09:09:45.744670 22755 net.cpp:150] Setting up L3_b5_brc2_relu\nI1206 09:09:45.744684 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.744693 22755 net.cpp:165] Memory required for data: 2050868400\nI1206 09:09:45.744704 22755 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1206 09:09:45.744731 22755 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1206 09:09:45.744745 22755 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1206 09:09:45.744762 22755 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1206 09:09:45.745839 22755 net.cpp:150] Setting up L3_b5_brc2_conv\nI1206 09:09:45.745859 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.745868 22755 net.cpp:165] Memory required for data: 2052506800\nI1206 09:09:45.745940 22755 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1206 09:09:45.745965 22755 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1206 09:09:45.745980 22755 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1206 09:09:45.745995 22755 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1206 09:09:45.746315 22755 net.cpp:150] Setting up L3_b5_brc3_bn\nI1206 09:09:45.746335 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.746343 22755 net.cpp:165] Memory required for data: 2054145200\nI1206 09:09:45.746366 22755 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1206 09:09:45.746388 22755 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1206 09:09:45.746402 22755 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1206 09:09:45.746417 22755 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1206 09:09:45.746436 22755 net.cpp:150] Setting up L3_b5_brc3_relu\nI1206 09:09:45.746451 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.746461 22755 net.cpp:165] Memory required for data: 2055783600\nI1206 09:09:45.746471 22755 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1206 09:09:45.746491 22755 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1206 09:09:45.746505 22755 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1206 09:09:45.746527 22755 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1206 09:09:45.747200 22755 net.cpp:150] Setting up L3_b5_brc3_conv\nI1206 09:09:45.747220 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.747229 22755 net.cpp:165] Memory required for data: 2062337200\nI1206 09:09:45.747246 22755 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1206 09:09:45.747277 22755 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1206 09:09:45.747288 22755 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1206 09:09:45.747303 22755 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:09:45.747320 22755 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1206 09:09:45.747385 22755 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1206 09:09:45.747406 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.747416 22755 net.cpp:165] Memory required for data: 2068890800\nI1206 09:09:45.747426 22755 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:09:45.747442 22755 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:09:45.747454 22755 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1206 09:09:45.747476 22755 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:09:45.747496 22755 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:09:45.747588 22755 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:09:45.747608 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.747622 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.747632 22755 net.cpp:165] Memory required for data: 2081998000\nI1206 09:09:45.747642 22755 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1206 09:09:45.747658 22755 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1206 09:09:45.747669 22755 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:09:45.747694 22755 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1206 09:09:45.748008 22755 net.cpp:150] Setting up L3_b6_brc1_bn\nI1206 09:09:45.748028 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.748036 22755 net.cpp:165] Memory required for data: 2088551600\nI1206 09:09:45.748059 22755 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1206 09:09:45.748081 22755 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1206 09:09:45.748095 22755 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1206 09:09:45.748111 22755 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1206 09:09:45.748131 22755 net.cpp:150] Setting up L3_b6_brc1_relu\nI1206 09:09:45.748144 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.748154 22755 net.cpp:165] Memory required for data: 2095105200\nI1206 09:09:45.748164 22755 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1206 09:09:45.748186 22755 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1206 09:09:45.748199 22755 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1206 09:09:45.748222 22755 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1206 09:09:45.748898 22755 net.cpp:150] Setting up L3_b6_brc1_conv\nI1206 09:09:45.748917 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.748926 22755 net.cpp:165] Memory required for data: 2096743600\nI1206 09:09:45.748944 22755 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1206 09:09:45.748962 22755 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1206 09:09:45.748973 22755 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1206 09:09:45.748999 22755 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1206 09:09:45.749307 22755 net.cpp:150] Setting up L3_b6_brc2_bn\nI1206 09:09:45.749331 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.749341 22755 net.cpp:165] Memory required for data: 2098382000\nI1206 09:09:45.749363 22755 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1206 09:09:45.749380 22755 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1206 09:09:45.749392 22755 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1206 09:09:45.749408 22755 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1206 09:09:45.749428 22755 net.cpp:150] Setting up L3_b6_brc2_relu\nI1206 09:09:45.749442 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.749461 22755 net.cpp:165] Memory required for data: 2100020400\nI1206 09:09:45.749474 22755 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1206 09:09:45.749495 22755 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1206 09:09:45.749507 22755 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1206 09:09:45.749532 22755 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1206 09:09:45.750627 22755 net.cpp:150] Setting up L3_b6_brc2_conv\nI1206 09:09:45.750648 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.750656 22755 net.cpp:165] Memory required for data: 2101658800\nI1206 09:09:45.750674 22755 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1206 09:09:45.750696 22755 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1206 09:09:45.750710 22755 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1206 09:09:45.750726 22755 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1206 09:09:45.751049 22755 net.cpp:150] Setting up L3_b6_brc3_bn\nI1206 09:09:45.751067 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.751076 22755 net.cpp:165] Memory required for data: 2103297200\nI1206 09:09:45.751098 22755 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1206 09:09:45.751114 22755 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1206 09:09:45.751127 22755 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1206 09:09:45.751142 22755 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1206 09:09:45.751161 22755 net.cpp:150] Setting up L3_b6_brc3_relu\nI1206 09:09:45.751176 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.751186 22755 net.cpp:165] Memory required for data: 2104935600\nI1206 09:09:45.751196 22755 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1206 09:09:45.751224 22755 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1206 09:09:45.751237 22755 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1206 09:09:45.751260 22755 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1206 09:09:45.751929 22755 net.cpp:150] Setting up L3_b6_brc3_conv\nI1206 09:09:45.751948 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.751957 22755 net.cpp:165] Memory required for data: 2111489200\nI1206 09:09:45.751976 22755 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1206 09:09:45.751993 22755 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1206 09:09:45.752005 22755 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1206 09:09:45.752018 22755 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:09:45.752043 22755 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1206 09:09:45.752107 22755 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1206 09:09:45.752125 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.752135 22755 net.cpp:165] Memory required for data: 2118042800\nI1206 09:09:45.752146 22755 layer_factory.hpp:77] Creating layer post_bn\nI1206 09:09:45.752161 22755 net.cpp:100] Creating Layer post_bn\nI1206 09:09:45.752173 22755 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1206 09:09:45.752195 22755 net.cpp:408] post_bn -> post_bn_top\nI1206 09:09:45.752485 22755 net.cpp:150] Setting up post_bn\nI1206 09:09:45.752503 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.752512 22755 net.cpp:165] Memory required for data: 2124596400\nI1206 09:09:45.752534 22755 layer_factory.hpp:77] Creating layer post_relu\nI1206 09:09:45.752555 22755 net.cpp:100] Creating Layer post_relu\nI1206 09:09:45.752568 22755 net.cpp:434] post_relu <- post_bn_top\nI1206 09:09:45.752584 22755 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1206 09:09:45.752604 22755 net.cpp:150] Setting up post_relu\nI1206 09:09:45.752619 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.752629 22755 net.cpp:165] Memory required for data: 2131150000\nI1206 09:09:45.752638 22755 layer_factory.hpp:77] Creating layer post_pool\nI1206 09:09:45.752655 22755 net.cpp:100] Creating Layer post_pool\nI1206 09:09:45.752666 22755 net.cpp:434] post_pool <- post_bn_top\nI1206 09:09:45.752696 22755 net.cpp:408] post_pool -> post_pool\nI1206 09:09:45.752836 22755 net.cpp:150] Setting up post_pool\nI1206 09:09:45.752856 22755 net.cpp:157] Top shape: 100 256 1 1 (25600)\nI1206 09:09:45.752866 22755 net.cpp:165] Memory required for data: 2131252400\nI1206 09:09:45.752876 22755 layer_factory.hpp:77] Creating layer post_FC\nI1206 09:09:45.752986 22755 net.cpp:100] Creating Layer post_FC\nI1206 09:09:45.753008 22755 net.cpp:434] post_FC <- post_pool\nI1206 09:09:45.753026 22755 net.cpp:408] post_FC -> post_FC_top\nI1206 09:09:45.753335 22755 net.cpp:150] Setting up post_FC\nI1206 09:09:45.753356 22755 net.cpp:157] Top shape: 100 10 (1000)\nI1206 09:09:45.753365 22755 net.cpp:165] Memory required for data: 2131256400\nI1206 09:09:45.753383 22755 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1206 09:09:45.753401 22755 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1206 09:09:45.753412 22755 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1206 09:09:45.753442 22755 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1206 09:09:45.753463 22755 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1206 09:09:45.753551 22755 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1206 09:09:45.753571 22755 net.cpp:157] Top shape: 100 10 (1000)\nI1206 09:09:45.753583 22755 net.cpp:157] Top shape: 100 10 (1000)\nI1206 09:09:45.753593 22755 net.cpp:165] Memory required for data: 2131264400\nI1206 09:09:45.753604 22755 layer_factory.hpp:77] Creating layer accuracy\nI1206 09:09:45.753674 22755 net.cpp:100] Creating Layer accuracy\nI1206 09:09:45.753690 22755 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1206 09:09:45.753705 22755 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1206 09:09:45.753720 22755 net.cpp:408] accuracy -> accuracy\nI1206 09:09:45.753793 22755 net.cpp:150] Setting up accuracy\nI1206 09:09:45.753818 22755 net.cpp:157] Top shape: (1)\nI1206 09:09:45.753828 22755 net.cpp:165] Memory required for data: 2131264404\nI1206 09:09:45.753839 22755 layer_factory.hpp:77] Creating layer loss\nI1206 09:09:45.753854 22755 net.cpp:100] Creating Layer loss\nI1206 09:09:45.753865 22755 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1206 09:09:45.753878 22755 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1206 09:09:45.753895 22755 net.cpp:408] loss -> loss\nI1206 09:09:45.754844 22755 layer_factory.hpp:77] Creating layer loss\nI1206 09:09:45.756047 22755 net.cpp:150] Setting up loss\nI1206 09:09:45.756067 22755 net.cpp:157] Top shape: (1)\nI1206 09:09:45.756076 22755 net.cpp:160]     with loss weight 1\nI1206 09:09:45.756184 22755 net.cpp:165] Memory required for data: 2131264408\nI1206 09:09:45.756198 22755 net.cpp:226] loss needs backward computation.\nI1206 09:09:45.756211 22755 net.cpp:228] accuracy does not need backward computation.\nI1206 09:09:45.756223 22755 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1206 09:09:45.756233 22755 net.cpp:226] post_FC needs backward computation.\nI1206 09:09:45.756243 22755 net.cpp:226] post_pool needs backward computation.\nI1206 09:09:45.756254 22755 net.cpp:226] post_relu needs backward computation.\nI1206 09:09:45.756264 22755 net.cpp:226] post_bn needs backward computation.\nI1206 09:09:45.756273 22755 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1206 09:09:45.756284 22755 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1206 09:09:45.756295 22755 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1206 09:09:45.756305 22755 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1206 09:09:45.756315 22755 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1206 09:09:45.756325 22755 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1206 09:09:45.756336 22755 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1206 09:09:45.756346 22755 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1206 09:09:45.756356 22755 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1206 09:09:45.756366 22755 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1206 09:09:45.756389 22755 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.756402 22755 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1206 09:09:45.756412 22755 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1206 09:09:45.756423 22755 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1206 09:09:45.756434 22755 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1206 09:09:45.756444 22755 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1206 09:09:45.756454 22755 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1206 09:09:45.756464 22755 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1206 09:09:45.756474 22755 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1206 09:09:45.756484 22755 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1206 09:09:45.756494 22755 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1206 09:09:45.756505 22755 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.756515 22755 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1206 09:09:45.756526 22755 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1206 09:09:45.756537 22755 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1206 09:09:45.756547 22755 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1206 09:09:45.756558 22755 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1206 09:09:45.756568 22755 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1206 09:09:45.756579 22755 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1206 09:09:45.756589 22755 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1206 09:09:45.756599 22755 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1206 09:09:45.756609 22755 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1206 09:09:45.756620 22755 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.756631 22755 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1206 09:09:45.756641 22755 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1206 09:09:45.756652 22755 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1206 09:09:45.756664 22755 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1206 09:09:45.756673 22755 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1206 09:09:45.756683 22755 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1206 09:09:45.756693 22755 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1206 09:09:45.756705 22755 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1206 09:09:45.756723 22755 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1206 09:09:45.756736 22755 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1206 09:09:45.756747 22755 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.756757 22755 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1206 09:09:45.756767 22755 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1206 09:09:45.756778 22755 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1206 09:09:45.756789 22755 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1206 09:09:45.756806 22755 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1206 09:09:45.756819 22755 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1206 09:09:45.756829 22755 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1206 09:09:45.756839 22755 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1206 09:09:45.756850 22755 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1206 09:09:45.756860 22755 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1206 09:09:45.756871 22755 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.756881 22755 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1206 09:09:45.756904 22755 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1206 09:09:45.756916 22755 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1206 09:09:45.756927 22755 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1206 09:09:45.756937 22755 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1206 09:09:45.756948 22755 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1206 09:09:45.756961 22755 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1206 09:09:45.756971 22755 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1206 09:09:45.756983 22755 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1206 09:09:45.756992 22755 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1206 09:09:45.757002 22755 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1206 09:09:45.757014 22755 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.757025 22755 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1206 09:09:45.757035 22755 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1206 09:09:45.757045 22755 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1206 09:09:45.757056 22755 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1206 09:09:45.757067 22755 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1206 09:09:45.757077 22755 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1206 09:09:45.757087 22755 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1206 09:09:45.757098 22755 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1206 09:09:45.757108 22755 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1206 09:09:45.757118 22755 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1206 09:09:45.757129 22755 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.757140 22755 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1206 09:09:45.757151 22755 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1206 09:09:45.757163 22755 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1206 09:09:45.757172 22755 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1206 09:09:45.757184 22755 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1206 09:09:45.757194 22755 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1206 09:09:45.757205 22755 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1206 09:09:45.757216 22755 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1206 09:09:45.757226 22755 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1206 09:09:45.757247 22755 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1206 09:09:45.757259 22755 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.757268 22755 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1206 09:09:45.757280 22755 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1206 09:09:45.757292 22755 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1206 09:09:45.757302 22755 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1206 09:09:45.757313 22755 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1206 09:09:45.757324 22755 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1206 09:09:45.757334 22755 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1206 09:09:45.757345 22755 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1206 09:09:45.757355 22755 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1206 09:09:45.757365 22755 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1206 09:09:45.757377 22755 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.757387 22755 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1206 09:09:45.757400 22755 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1206 09:09:45.757411 22755 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1206 09:09:45.757431 22755 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1206 09:09:45.757443 22755 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1206 09:09:45.757454 22755 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1206 09:09:45.757467 22755 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1206 09:09:45.757477 22755 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1206 09:09:45.757488 22755 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1206 09:09:45.757498 22755 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1206 09:09:45.757508 22755 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.757519 22755 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1206 09:09:45.757531 22755 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1206 09:09:45.757541 22755 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1206 09:09:45.757552 22755 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1206 09:09:45.757565 22755 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1206 09:09:45.757575 22755 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1206 09:09:45.757586 22755 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1206 09:09:45.757596 22755 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1206 09:09:45.757607 22755 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1206 09:09:45.757617 22755 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1206 09:09:45.757628 22755 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.757639 22755 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1206 09:09:45.757652 22755 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1206 09:09:45.757661 22755 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1206 09:09:45.757673 22755 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1206 09:09:45.757683 22755 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1206 09:09:45.757694 22755 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1206 09:09:45.757704 22755 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1206 09:09:45.757714 22755 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1206 09:09:45.757725 22755 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1206 09:09:45.757736 22755 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1206 09:09:45.757746 22755 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1206 09:09:45.757757 22755 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.757768 22755 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1206 09:09:45.757781 22755 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1206 09:09:45.757792 22755 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1206 09:09:45.757809 22755 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1206 09:09:45.757820 22755 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1206 09:09:45.757832 22755 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1206 09:09:45.757843 22755 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1206 09:09:45.757853 22755 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1206 09:09:45.757863 22755 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1206 09:09:45.757874 22755 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1206 09:09:45.757885 22755 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.757896 22755 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1206 09:09:45.757915 22755 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1206 09:09:45.757928 22755 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1206 09:09:45.757938 22755 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1206 09:09:45.757951 22755 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1206 09:09:45.757972 22755 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1206 09:09:45.757984 22755 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1206 09:09:45.757997 22755 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1206 09:09:45.758008 22755 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1206 09:09:45.758019 22755 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1206 09:09:45.758031 22755 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.758041 22755 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1206 09:09:45.758052 22755 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1206 09:09:45.758064 22755 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1206 09:09:45.758076 22755 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1206 09:09:45.758087 22755 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1206 09:09:45.758098 22755 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1206 09:09:45.758110 22755 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1206 09:09:45.758121 22755 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1206 09:09:45.758132 22755 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1206 09:09:45.758143 22755 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1206 09:09:45.758154 22755 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.758164 22755 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1206 09:09:45.758178 22755 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1206 09:09:45.758190 22755 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1206 09:09:45.758201 22755 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1206 09:09:45.758214 22755 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1206 09:09:45.758225 22755 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1206 09:09:45.758236 22755 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1206 09:09:45.758247 22755 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1206 09:09:45.758260 22755 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1206 09:09:45.758270 22755 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1206 09:09:45.758280 22755 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.758293 22755 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1206 09:09:45.758306 22755 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1206 09:09:45.758317 22755 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1206 09:09:45.758328 22755 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1206 09:09:45.758340 22755 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1206 09:09:45.758352 22755 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1206 09:09:45.758363 22755 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1206 09:09:45.758373 22755 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1206 09:09:45.758386 22755 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1206 09:09:45.758397 22755 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1206 09:09:45.758409 22755 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.758421 22755 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1206 09:09:45.758433 22755 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1206 09:09:45.758445 22755 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1206 09:09:45.758458 22755 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1206 09:09:45.758469 22755 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1206 09:09:45.758481 22755 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1206 09:09:45.758491 22755 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1206 09:09:45.758503 22755 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1206 09:09:45.758524 22755 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1206 09:09:45.758536 22755 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1206 09:09:45.758548 22755 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1206 09:09:45.758560 22755 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1206 09:09:45.758571 22755 net.cpp:226] pre_conv needs backward computation.\nI1206 09:09:45.758584 22755 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1206 09:09:45.758595 22755 net.cpp:228] dataLayer does not need backward computation.\nI1206 09:09:45.758605 22755 net.cpp:270] This network produces output accuracy\nI1206 09:09:45.758618 22755 net.cpp:270] This network produces output loss\nI1206 09:09:45.758945 22755 net.cpp:283] Network initialization done.\nI1206 09:09:45.764714 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:45.764752 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:45.764833 22755 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1206 09:09:45.765116 22755 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1206 09:09:45.766626 22755 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_brc1_conv_top\"\n  top: \nI1206 09:09:45.768043 22755 layer_factory.hpp:77] Creating layer dataLayer\nI1206 09:09:45.769134 22755 net.cpp:100] Creating Layer dataLayer\nI1206 09:09:45.769165 22755 net.cpp:408] dataLayer -> data_top\nI1206 09:09:45.769193 22755 net.cpp:408] dataLayer -> label\nI1206 09:09:45.769217 22755 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1206 09:09:45.778304 22762 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1206 09:09:45.778533 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:45.785657 22755 net.cpp:150] Setting up dataLayer\nI1206 09:09:45.785681 22755 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI1206 09:09:45.785696 22755 net.cpp:157] Top shape: 100 (100)\nI1206 09:09:45.785706 22755 net.cpp:165] Memory required for data: 1229200\nI1206 09:09:45.785717 22755 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1206 09:09:45.785740 22755 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1206 09:09:45.785751 22755 net.cpp:434] label_dataLayer_1_split <- label\nI1206 09:09:45.785766 22755 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1206 09:09:45.785787 22755 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1206 09:09:45.785953 22755 net.cpp:150] Setting up label_dataLayer_1_split\nI1206 09:09:45.785974 22755 net.cpp:157] Top shape: 100 (100)\nI1206 09:09:45.786000 22755 net.cpp:157] Top shape: 100 (100)\nI1206 09:09:45.786011 22755 net.cpp:165] Memory required for data: 1230000\nI1206 09:09:45.786022 22755 layer_factory.hpp:77] Creating layer pre_conv\nI1206 09:09:45.786067 22755 net.cpp:100] Creating Layer pre_conv\nI1206 09:09:45.786082 22755 net.cpp:434] pre_conv <- data_top\nI1206 09:09:45.786115 22755 net.cpp:408] pre_conv -> pre_conv_top\nI1206 09:09:45.786586 22755 net.cpp:150] Setting up pre_conv\nI1206 09:09:45.786607 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.786617 22755 net.cpp:165] Memory required for data: 7783600\nI1206 09:09:45.786645 22755 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1206 09:09:45.786669 22755 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1206 09:09:45.786684 22755 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1206 09:09:45.786698 22755 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1206 09:09:45.786716 22755 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1206 09:09:45.786825 22755 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1206 09:09:45.786845 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.786862 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.786872 22755 net.cpp:165] Memory required for data: 20890800\nI1206 09:09:45.786882 22755 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1206 09:09:45.786901 22755 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1206 09:09:45.786913 22755 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1206 09:09:45.786936 22755 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1206 09:09:45.787309 22755 net.cpp:150] Setting up L1_b1_brc1_bn\nI1206 09:09:45.787335 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.787348 22755 net.cpp:165] Memory required for data: 27444400\nI1206 09:09:45.787381 22755 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1206 09:09:45.787397 22755 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1206 09:09:45.787410 22755 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1206 09:09:45.787425 22755 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1206 09:09:45.787443 22755 net.cpp:150] Setting up L1_b1_brc1_relu\nI1206 09:09:45.787461 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.787470 22755 net.cpp:165] Memory required for data: 33998000\nI1206 09:09:45.787482 22755 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1206 09:09:45.787519 22755 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1206 09:09:45.787533 22755 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1206 09:09:45.787551 22755 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1206 09:09:45.788058 22755 net.cpp:150] Setting up L1_b1_brc1_conv\nI1206 09:09:45.788080 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.788089 22755 net.cpp:165] Memory required for data: 40551600\nI1206 09:09:45.788111 22755 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1206 09:09:45.788130 22755 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1206 09:09:45.788143 22755 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1206 09:09:45.788166 22755 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1206 09:09:45.789811 22755 net.cpp:150] Setting up L1_b1_brc2_bn\nI1206 09:09:45.789834 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.789844 22755 net.cpp:165] Memory required for data: 47105200\nI1206 09:09:45.789873 22755 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1206 09:09:45.789896 22755 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1206 09:09:45.789907 22755 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1206 09:09:45.789927 22755 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1206 09:09:45.789945 22755 net.cpp:150] Setting up L1_b1_brc2_relu\nI1206 09:09:45.789963 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.789973 22755 net.cpp:165] Memory required for data: 53658800\nI1206 09:09:45.789996 22755 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1206 09:09:45.790029 22755 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1206 09:09:45.790042 22755 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1206 09:09:45.790066 22755 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1206 09:09:45.790581 22755 net.cpp:150] Setting up L1_b1_brc2_conv\nI1206 09:09:45.790603 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.790612 22755 net.cpp:165] Memory required for data: 60212400\nI1206 09:09:45.790635 22755 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1206 09:09:45.790658 22755 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1206 09:09:45.790669 22755 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1206 09:09:45.790690 22755 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1206 09:09:45.791008 22755 net.cpp:150] Setting up L1_b1_brc3_bn\nI1206 09:09:45.791028 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.791038 22755 net.cpp:165] Memory required for data: 66766000\nI1206 09:09:45.791057 22755 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1206 09:09:45.791074 22755 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1206 09:09:45.791085 22755 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1206 09:09:45.791100 22755 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1206 09:09:45.791119 22755 net.cpp:150] Setting up L1_b1_brc3_relu\nI1206 09:09:45.791133 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.791142 22755 net.cpp:165] Memory required for data: 73319600\nI1206 09:09:45.791152 22755 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1206 09:09:45.791178 22755 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1206 09:09:45.791190 22755 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1206 09:09:45.791213 22755 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1206 09:09:45.791601 22755 net.cpp:150] Setting up L1_b1_brc3_conv\nI1206 09:09:45.791625 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.791635 22755 net.cpp:165] Memory required for data: 99534000\nI1206 09:09:45.791663 22755 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1206 09:09:45.791695 22755 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1206 09:09:45.791708 22755 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1206 09:09:45.791726 22755 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1206 09:09:45.792102 22755 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1206 09:09:45.792124 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.792132 22755 net.cpp:165] Memory required for data: 125748400\nI1206 09:09:45.792150 22755 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1206 09:09:45.792170 22755 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1206 09:09:45.792186 22755 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1206 09:09:45.792198 22755 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1206 09:09:45.792220 22755 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1206 09:09:45.792275 22755 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1206 09:09:45.792294 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.792304 22755 net.cpp:165] Memory required for data: 151962800\nI1206 09:09:45.792315 22755 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:09:45.792335 22755 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:09:45.792351 22755 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1206 09:09:45.792371 22755 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:09:45.792392 22755 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:09:45.792479 22755 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:09:45.792505 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.792544 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.792556 22755 net.cpp:165] Memory required for data: 204391600\nI1206 09:09:45.792567 22755 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1206 09:09:45.792584 22755 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1206 09:09:45.792598 22755 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:09:45.792619 22755 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1206 09:09:45.793066 22755 net.cpp:150] Setting up L1_b2_brc1_bn\nI1206 09:09:45.793090 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.793102 22755 net.cpp:165] Memory required for data: 230606000\nI1206 09:09:45.793123 22755 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1206 09:09:45.793139 22755 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1206 09:09:45.793151 22755 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1206 09:09:45.793166 22755 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1206 09:09:45.793186 22755 net.cpp:150] Setting up L1_b2_brc1_relu\nI1206 09:09:45.793201 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.793211 22755 net.cpp:165] Memory required for data: 256820400\nI1206 09:09:45.793231 22755 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1206 09:09:45.793257 22755 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1206 09:09:45.793269 22755 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1206 09:09:45.793293 22755 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1206 09:09:45.793704 22755 net.cpp:150] Setting up L1_b2_brc1_conv\nI1206 09:09:45.793725 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.793733 22755 net.cpp:165] Memory required for data: 263374000\nI1206 09:09:45.793756 22755 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1206 09:09:45.793773 22755 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1206 09:09:45.793783 22755 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1206 09:09:45.793805 22755 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1206 09:09:45.794142 22755 net.cpp:150] Setting up L1_b2_brc2_bn\nI1206 09:09:45.794164 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.794174 22755 net.cpp:165] Memory required for data: 269927600\nI1206 09:09:45.794198 22755 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1206 09:09:45.794221 22755 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1206 09:09:45.794232 22755 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1206 09:09:45.794251 22755 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1206 09:09:45.794275 22755 net.cpp:150] Setting up L1_b2_brc2_relu\nI1206 09:09:45.794291 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.794299 22755 net.cpp:165] Memory required for data: 276481200\nI1206 09:09:45.794313 22755 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1206 09:09:45.794344 22755 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1206 09:09:45.794360 22755 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1206 09:09:45.794384 22755 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1206 09:09:45.794852 22755 net.cpp:150] Setting up L1_b2_brc2_conv\nI1206 09:09:45.794872 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.794883 22755 net.cpp:165] Memory required for data: 283034800\nI1206 09:09:45.794903 22755 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1206 09:09:45.794920 22755 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1206 09:09:45.794931 22755 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1206 09:09:45.794946 22755 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1206 09:09:45.795332 22755 net.cpp:150] Setting up L1_b2_brc3_bn\nI1206 09:09:45.795351 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.795361 22755 net.cpp:165] Memory required for data: 289588400\nI1206 09:09:45.795384 22755 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1206 09:09:45.795402 22755 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1206 09:09:45.795413 22755 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1206 09:09:45.795446 22755 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1206 09:09:45.795466 22755 net.cpp:150] Setting up L1_b2_brc3_relu\nI1206 09:09:45.795480 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.795493 22755 net.cpp:165] Memory required for data: 296142000\nI1206 09:09:45.795506 22755 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1206 09:09:45.795536 22755 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1206 09:09:45.795550 22755 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1206 09:09:45.795569 22755 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1206 09:09:45.795989 22755 net.cpp:150] Setting up L1_b2_brc3_conv\nI1206 09:09:45.796010 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.796020 22755 net.cpp:165] Memory required for data: 322356400\nI1206 09:09:45.796058 22755 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1206 09:09:45.796079 22755 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1206 09:09:45.796092 22755 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1206 09:09:45.796109 22755 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:09:45.796133 22755 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1206 09:09:45.796191 22755 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1206 09:09:45.796210 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.796221 22755 net.cpp:165] Memory required for data: 348570800\nI1206 09:09:45.796232 22755 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:09:45.796253 22755 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:09:45.796268 22755 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1206 09:09:45.796284 22755 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:09:45.796308 22755 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:09:45.796402 22755 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:09:45.796423 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.796437 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.796448 22755 net.cpp:165] Memory required for data: 400999600\nI1206 09:09:45.796459 22755 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1206 09:09:45.796480 22755 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1206 09:09:45.796492 22755 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:09:45.796509 22755 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1206 09:09:45.796860 22755 net.cpp:150] Setting up L1_b3_brc1_bn\nI1206 09:09:45.796887 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.796901 22755 net.cpp:165] Memory required for data: 427214000\nI1206 09:09:45.796923 22755 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1206 09:09:45.796941 22755 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1206 09:09:45.796954 22755 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1206 09:09:45.796969 22755 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1206 09:09:45.796988 22755 net.cpp:150] Setting up L1_b3_brc1_relu\nI1206 09:09:45.797001 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.797011 22755 net.cpp:165] Memory required for data: 453428400\nI1206 09:09:45.797024 22755 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1206 09:09:45.797053 22755 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1206 09:09:45.797068 22755 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1206 09:09:45.797086 22755 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1206 09:09:45.797519 22755 net.cpp:150] Setting up L1_b3_brc1_conv\nI1206 09:09:45.797539 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.797554 22755 net.cpp:165] Memory required for data: 459982000\nI1206 09:09:45.797571 22755 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1206 09:09:45.797597 22755 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1206 09:09:45.797610 22755 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1206 09:09:45.797636 22755 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1206 09:09:45.797996 22755 net.cpp:150] Setting up L1_b3_brc2_bn\nI1206 09:09:45.798020 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.798032 22755 net.cpp:165] Memory required for data: 466535600\nI1206 09:09:45.798056 22755 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1206 09:09:45.798072 22755 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1206 09:09:45.798084 22755 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1206 09:09:45.798102 22755 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1206 09:09:45.798121 22755 net.cpp:150] Setting up L1_b3_brc2_relu\nI1206 09:09:45.798138 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.798148 22755 net.cpp:165] Memory required for data: 473089200\nI1206 09:09:45.798158 22755 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1206 09:09:45.798197 22755 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1206 09:09:45.798211 22755 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1206 09:09:45.798229 22755 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1206 09:09:45.798663 22755 net.cpp:150] Setting up L1_b3_brc2_conv\nI1206 09:09:45.798686 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.798694 22755 net.cpp:165] Memory required for data: 479642800\nI1206 09:09:45.798715 22755 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1206 09:09:45.798732 22755 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1206 09:09:45.798745 22755 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1206 09:09:45.798768 22755 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1206 09:09:45.799078 22755 net.cpp:150] Setting up L1_b3_brc3_bn\nI1206 09:09:45.799103 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.799113 22755 net.cpp:165] Memory required for data: 486196400\nI1206 09:09:45.799134 22755 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1206 09:09:45.799149 22755 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1206 09:09:45.799160 22755 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1206 09:09:45.799175 22755 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1206 09:09:45.799193 22755 net.cpp:150] Setting up L1_b3_brc3_relu\nI1206 09:09:45.799207 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.799216 22755 net.cpp:165] Memory required for data: 492750000\nI1206 09:09:45.799226 22755 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1206 09:09:45.799247 22755 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1206 09:09:45.799258 22755 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1206 09:09:45.799280 22755 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1206 09:09:45.799646 22755 net.cpp:150] Setting up L1_b3_brc3_conv\nI1206 09:09:45.799665 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.799674 22755 net.cpp:165] Memory required for data: 518964400\nI1206 09:09:45.799691 22755 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1206 09:09:45.799708 22755 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1206 09:09:45.799720 22755 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1206 09:09:45.799732 22755 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:09:45.799747 22755 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1206 09:09:45.799836 22755 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1206 09:09:45.799860 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.799868 22755 net.cpp:165] Memory required for data: 545178800\nI1206 09:09:45.799880 22755 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:09:45.799895 22755 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:09:45.799906 22755 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1206 09:09:45.799934 22755 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:09:45.799957 22755 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:09:45.800045 22755 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:09:45.800065 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.800078 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.800088 22755 net.cpp:165] Memory required for data: 597607600\nI1206 09:09:45.800099 22755 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1206 09:09:45.800122 22755 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1206 09:09:45.800134 22755 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:09:45.800151 22755 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1206 09:09:45.800452 22755 net.cpp:150] Setting up L1_b4_brc1_bn\nI1206 09:09:45.800472 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.800480 22755 net.cpp:165] Memory required for data: 623822000\nI1206 09:09:45.800501 22755 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1206 09:09:45.800518 22755 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1206 09:09:45.800529 22755 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1206 09:09:45.800542 22755 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1206 09:09:45.800560 22755 net.cpp:150] Setting up L1_b4_brc1_relu\nI1206 09:09:45.800575 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.800583 22755 net.cpp:165] Memory required for data: 650036400\nI1206 09:09:45.800595 22755 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1206 09:09:45.800621 22755 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1206 09:09:45.800633 22755 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1206 09:09:45.800657 22755 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1206 09:09:45.801019 22755 net.cpp:150] Setting up L1_b4_brc1_conv\nI1206 09:09:45.801039 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.801048 22755 net.cpp:165] Memory required for data: 656590000\nI1206 09:09:45.801064 22755 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1206 09:09:45.801086 22755 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1206 09:09:45.801100 22755 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1206 09:09:45.801115 22755 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1206 09:09:45.801424 22755 net.cpp:150] Setting up L1_b4_brc2_bn\nI1206 09:09:45.801446 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.801456 22755 net.cpp:165] Memory required for data: 663143600\nI1206 09:09:45.801477 22755 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1206 09:09:45.801492 22755 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1206 09:09:45.801503 22755 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1206 09:09:45.801517 22755 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1206 09:09:45.801535 22755 net.cpp:150] Setting up L1_b4_brc2_relu\nI1206 09:09:45.801549 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.801558 22755 net.cpp:165] Memory required for data: 669697200\nI1206 09:09:45.801568 22755 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1206 09:09:45.801594 22755 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1206 09:09:45.801609 22755 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1206 09:09:45.801625 22755 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1206 09:09:45.802654 22755 net.cpp:150] Setting up L1_b4_brc2_conv\nI1206 09:09:45.802675 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.802685 22755 net.cpp:165] Memory required for data: 676250800\nI1206 09:09:45.802703 22755 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1206 09:09:45.802719 22755 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1206 09:09:45.802732 22755 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1206 09:09:45.802762 22755 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1206 09:09:45.803095 22755 net.cpp:150] Setting up L1_b4_brc3_bn\nI1206 09:09:45.803120 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.803130 22755 net.cpp:165] Memory required for data: 682804400\nI1206 09:09:45.803153 22755 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1206 09:09:45.803169 22755 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1206 09:09:45.803180 22755 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1206 09:09:45.803195 22755 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1206 09:09:45.803215 22755 net.cpp:150] Setting up L1_b4_brc3_relu\nI1206 09:09:45.803230 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.803239 22755 net.cpp:165] Memory required for data: 689358000\nI1206 09:09:45.803249 22755 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1206 09:09:45.803267 22755 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1206 09:09:45.803280 22755 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1206 09:09:45.803304 22755 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1206 09:09:45.803673 22755 net.cpp:150] Setting up L1_b4_brc3_conv\nI1206 09:09:45.803694 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.803702 22755 net.cpp:165] Memory required for data: 715572400\nI1206 09:09:45.803720 22755 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1206 09:09:45.803736 22755 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1206 09:09:45.803748 22755 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1206 09:09:45.803761 22755 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:09:45.803783 22755 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1206 09:09:45.803845 22755 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1206 09:09:45.803864 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.803872 22755 net.cpp:165] Memory required for data: 741786800\nI1206 09:09:45.803884 22755 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:09:45.803903 22755 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:09:45.803915 22755 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1206 09:09:45.803931 22755 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:09:45.803951 22755 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:09:45.804039 22755 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:09:45.804059 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.804074 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.804083 22755 net.cpp:165] Memory required for data: 794215600\nI1206 09:09:45.804093 22755 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1206 09:09:45.804113 22755 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1206 09:09:45.804126 22755 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:09:45.804143 22755 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1206 09:09:45.804455 22755 net.cpp:150] Setting up L1_b5_brc1_bn\nI1206 09:09:45.804474 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.804483 22755 net.cpp:165] Memory required for data: 820430000\nI1206 09:09:45.804525 22755 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1206 09:09:45.804543 22755 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1206 09:09:45.804554 22755 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1206 09:09:45.804575 22755 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1206 09:09:45.804595 22755 net.cpp:150] Setting up L1_b5_brc1_relu\nI1206 09:09:45.804610 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.804618 22755 net.cpp:165] Memory required for data: 846644400\nI1206 09:09:45.804630 22755 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1206 09:09:45.804659 22755 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1206 09:09:45.804672 22755 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1206 09:09:45.804695 22755 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1206 09:09:45.805120 22755 net.cpp:150] Setting up L1_b5_brc1_conv\nI1206 09:09:45.805163 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.805173 22755 net.cpp:165] Memory required for data: 853198000\nI1206 09:09:45.805189 22755 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1206 09:09:45.805205 22755 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1206 09:09:45.805217 22755 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1206 09:09:45.805238 22755 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1206 09:09:45.805609 22755 net.cpp:150] Setting up L1_b5_brc2_bn\nI1206 09:09:45.805629 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.805639 22755 net.cpp:165] Memory required for data: 859751600\nI1206 09:09:45.805660 22755 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1206 09:09:45.805680 22755 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1206 09:09:45.805693 22755 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1206 09:09:45.805706 22755 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1206 09:09:45.805727 22755 net.cpp:150] Setting up L1_b5_brc2_relu\nI1206 09:09:45.805740 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.805748 22755 net.cpp:165] Memory required for data: 866305200\nI1206 09:09:45.805758 22755 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1206 09:09:45.805778 22755 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1206 09:09:45.805789 22755 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1206 09:09:45.805820 22755 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1206 09:09:45.806216 22755 net.cpp:150] Setting up L1_b5_brc2_conv\nI1206 09:09:45.806234 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.806243 22755 net.cpp:165] Memory required for data: 872858800\nI1206 09:09:45.806260 22755 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1206 09:09:45.806278 22755 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1206 09:09:45.806289 22755 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1206 09:09:45.806303 22755 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1206 09:09:45.806622 22755 net.cpp:150] Setting up L1_b5_brc3_bn\nI1206 09:09:45.806643 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.806653 22755 net.cpp:165] Memory required for data: 879412400\nI1206 09:09:45.806674 22755 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1206 09:09:45.806694 22755 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1206 09:09:45.806706 22755 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1206 09:09:45.806721 22755 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1206 09:09:45.806740 22755 net.cpp:150] Setting up L1_b5_brc3_relu\nI1206 09:09:45.806754 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.806763 22755 net.cpp:165] Memory required for data: 885966000\nI1206 09:09:45.806773 22755 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1206 09:09:45.806805 22755 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1206 09:09:45.806818 22755 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1206 09:09:45.806843 22755 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1206 09:09:45.807204 22755 net.cpp:150] Setting up L1_b5_brc3_conv\nI1206 09:09:45.807224 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.807231 22755 net.cpp:165] Memory required for data: 912180400\nI1206 09:09:45.807248 22755 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1206 09:09:45.807265 22755 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1206 09:09:45.807277 22755 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1206 09:09:45.807289 22755 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:09:45.807305 22755 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1206 09:09:45.807415 22755 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1206 09:09:45.807435 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.807445 22755 net.cpp:165] Memory required for data: 938394800\nI1206 09:09:45.807454 22755 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:09:45.807468 22755 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:09:45.807481 22755 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1206 09:09:45.807502 22755 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:09:45.807523 22755 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:09:45.807608 22755 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:09:45.807633 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.807647 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.807657 22755 net.cpp:165] Memory required for data: 990823600\nI1206 09:09:45.807667 22755 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1206 09:09:45.807680 22755 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1206 09:09:45.807691 22755 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:09:45.807713 22755 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1206 09:09:45.808018 22755 net.cpp:150] Setting up L1_b6_brc1_bn\nI1206 09:09:45.808037 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.808046 22755 net.cpp:165] Memory required for data: 1017038000\nI1206 09:09:45.808068 22755 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1206 09:09:45.808089 22755 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1206 09:09:45.808101 22755 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1206 09:09:45.808115 22755 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1206 09:09:45.808135 22755 net.cpp:150] Setting up L1_b6_brc1_relu\nI1206 09:09:45.808147 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.808156 22755 net.cpp:165] Memory required for data: 1043252400\nI1206 09:09:45.808166 22755 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1206 09:09:45.808187 22755 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1206 09:09:45.808198 22755 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1206 09:09:45.808220 22755 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1206 09:09:45.808600 22755 net.cpp:150] Setting up L1_b6_brc1_conv\nI1206 09:09:45.808619 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.808629 22755 net.cpp:165] Memory required for data: 1049806000\nI1206 09:09:45.808645 22755 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1206 09:09:45.808662 22755 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1206 09:09:45.808673 22755 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1206 09:09:45.808694 22755 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1206 09:09:45.809013 22755 net.cpp:150] Setting up L1_b6_brc2_bn\nI1206 09:09:45.809032 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.809041 22755 net.cpp:165] Memory required for data: 1056359600\nI1206 09:09:45.809061 22755 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1206 09:09:45.809092 22755 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1206 09:09:45.809104 22755 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1206 09:09:45.809120 22755 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1206 09:09:45.809139 22755 net.cpp:150] Setting up L1_b6_brc2_relu\nI1206 09:09:45.809151 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.809161 22755 net.cpp:165] Memory required for data: 1062913200\nI1206 09:09:45.809172 22755 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1206 09:09:45.809200 22755 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1206 09:09:45.809211 22755 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1206 09:09:45.809245 22755 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1206 09:09:45.809689 22755 net.cpp:150] Setting up L1_b6_brc2_conv\nI1206 09:09:45.809708 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.809718 22755 net.cpp:165] Memory required for data: 1069466800\nI1206 09:09:45.809734 22755 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1206 09:09:45.809756 22755 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1206 09:09:45.809769 22755 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1206 09:09:45.809785 22755 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1206 09:09:45.810104 22755 net.cpp:150] Setting up L1_b6_brc3_bn\nI1206 09:09:45.810137 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.810147 22755 net.cpp:165] Memory required for data: 1076020400\nI1206 09:09:45.810168 22755 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1206 09:09:45.810184 22755 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1206 09:09:45.810194 22755 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1206 09:09:45.810209 22755 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1206 09:09:45.810228 22755 net.cpp:150] Setting up L1_b6_brc3_relu\nI1206 09:09:45.810243 22755 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI1206 09:09:45.810253 22755 net.cpp:165] Memory required for data: 1082574000\nI1206 09:09:45.810263 22755 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1206 09:09:45.810289 22755 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1206 09:09:45.810303 22755 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1206 09:09:45.810322 22755 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1206 09:09:45.810696 22755 net.cpp:150] Setting up L1_b6_brc3_conv\nI1206 09:09:45.810716 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.810725 22755 net.cpp:165] Memory required for data: 1108788400\nI1206 09:09:45.810745 22755 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1206 09:09:45.810763 22755 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1206 09:09:45.810775 22755 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1206 09:09:45.810788 22755 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:09:45.810818 22755 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1206 09:09:45.810875 22755 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1206 09:09:45.810900 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.810910 22755 net.cpp:165] Memory required for data: 1135002800\nI1206 09:09:45.810921 22755 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:09:45.810937 22755 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:09:45.810948 22755 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1206 09:09:45.810964 22755 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:09:45.810989 22755 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:09:45.811079 22755 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:09:45.811096 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.811110 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.811120 22755 net.cpp:165] Memory required for data: 1187431600\nI1206 09:09:45.811132 22755 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1206 09:09:45.811153 22755 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1206 09:09:45.811167 22755 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:09:45.811183 22755 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1206 09:09:45.811497 22755 net.cpp:150] Setting up L2_b1_brc1_bn\nI1206 09:09:45.811517 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.811525 22755 net.cpp:165] Memory required for data: 1213646000\nI1206 09:09:45.811547 22755 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1206 09:09:45.811576 22755 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1206 09:09:45.811589 22755 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1206 09:09:45.811604 22755 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1206 09:09:45.811624 22755 net.cpp:150] Setting up L2_b1_brc1_relu\nI1206 09:09:45.811638 22755 net.cpp:157] Top shape: 100 64 32 32 (6553600)\nI1206 09:09:45.811647 22755 net.cpp:165] Memory required for data: 1239860400\nI1206 09:09:45.811658 22755 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1206 09:09:45.811687 22755 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1206 09:09:45.811700 22755 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1206 09:09:45.811724 22755 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1206 09:09:45.812125 22755 net.cpp:150] Setting up L2_b1_brc1_conv\nI1206 09:09:45.812145 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.812155 22755 net.cpp:165] Memory required for data: 1243137200\nI1206 09:09:45.812173 22755 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1206 09:09:45.812196 22755 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1206 09:09:45.812208 22755 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1206 09:09:45.812230 22755 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1206 09:09:45.812536 22755 net.cpp:150] Setting up L2_b1_brc2_bn\nI1206 09:09:45.812556 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.812564 22755 net.cpp:165] Memory required for data: 1246414000\nI1206 09:09:45.812585 22755 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1206 09:09:45.812609 22755 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1206 09:09:45.812620 22755 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1206 09:09:45.812641 22755 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1206 09:09:45.812661 22755 net.cpp:150] Setting up L2_b1_brc2_relu\nI1206 09:09:45.812676 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.812686 22755 net.cpp:165] Memory required for data: 1249690800\nI1206 09:09:45.812700 22755 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1206 09:09:45.812726 22755 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1206 09:09:45.812741 22755 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1206 09:09:45.812758 22755 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1206 09:09:45.813302 22755 net.cpp:150] Setting up L2_b1_brc2_conv\nI1206 09:09:45.813323 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.813331 22755 net.cpp:165] Memory required for data: 1252967600\nI1206 09:09:45.813351 22755 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1206 09:09:45.813374 22755 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1206 09:09:45.813386 22755 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1206 09:09:45.813403 22755 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1206 09:09:45.813704 22755 net.cpp:150] Setting up L2_b1_brc3_bn\nI1206 09:09:45.813724 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.813733 22755 net.cpp:165] Memory required for data: 1256244400\nI1206 09:09:45.813755 22755 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1206 09:09:45.813771 22755 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1206 09:09:45.813782 22755 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1206 09:09:45.813804 22755 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1206 09:09:45.813827 22755 net.cpp:150] Setting up L2_b1_brc3_relu\nI1206 09:09:45.813841 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.813850 22755 net.cpp:165] Memory required for data: 1259521200\nI1206 09:09:45.813863 22755 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1206 09:09:45.813890 22755 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1206 09:09:45.813905 22755 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1206 09:09:45.813928 22755 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1206 09:09:45.814369 22755 net.cpp:150] Setting up L2_b1_brc3_conv\nI1206 09:09:45.814389 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.814411 22755 net.cpp:165] Memory required for data: 1272628400\nI1206 09:09:45.814430 22755 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1206 09:09:45.814469 22755 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1206 09:09:45.814486 22755 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:09:45.814503 22755 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1206 09:09:45.816283 22755 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1206 09:09:45.816305 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.816315 22755 net.cpp:165] Memory required for data: 1285735600\nI1206 09:09:45.816334 22755 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1206 09:09:45.816354 22755 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1206 09:09:45.816366 22755 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1206 09:09:45.816381 22755 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1206 09:09:45.816406 22755 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1206 09:09:45.816452 22755 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1206 09:09:45.816470 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.816481 22755 net.cpp:165] Memory required for data: 1298842800\nI1206 09:09:45.816494 22755 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:09:45.816509 22755 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:09:45.816529 22755 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1206 09:09:45.816545 22755 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:09:45.816565 22755 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:09:45.816654 22755 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:09:45.816673 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.816686 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.816696 22755 net.cpp:165] Memory required for data: 1325057200\nI1206 09:09:45.816709 22755 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1206 09:09:45.816727 22755 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1206 09:09:45.816740 22755 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:09:45.816763 22755 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1206 09:09:45.817062 22755 net.cpp:150] Setting up L2_b2_brc1_bn\nI1206 09:09:45.817082 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.817091 22755 net.cpp:165] Memory required for data: 1338164400\nI1206 09:09:45.817116 22755 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1206 09:09:45.817133 22755 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1206 09:09:45.817145 22755 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1206 09:09:45.817170 22755 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1206 09:09:45.817191 22755 net.cpp:150] Setting up L2_b2_brc1_relu\nI1206 09:09:45.817207 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.817217 22755 net.cpp:165] Memory required for data: 1351271600\nI1206 09:09:45.817229 22755 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1206 09:09:45.817252 22755 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1206 09:09:45.817265 22755 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1206 09:09:45.817283 22755 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1206 09:09:45.817718 22755 net.cpp:150] Setting up L2_b2_brc1_conv\nI1206 09:09:45.817741 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.817751 22755 net.cpp:165] Memory required for data: 1354548400\nI1206 09:09:45.817770 22755 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1206 09:09:45.817788 22755 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1206 09:09:45.817807 22755 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1206 09:09:45.817847 22755 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1206 09:09:45.818174 22755 net.cpp:150] Setting up L2_b2_brc2_bn\nI1206 09:09:45.818194 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.818204 22755 net.cpp:165] Memory required for data: 1357825200\nI1206 09:09:45.818228 22755 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1206 09:09:45.818245 22755 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1206 09:09:45.818258 22755 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1206 09:09:45.818279 22755 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1206 09:09:45.818300 22755 net.cpp:150] Setting up L2_b2_brc2_relu\nI1206 09:09:45.818315 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.818325 22755 net.cpp:165] Memory required for data: 1361102000\nI1206 09:09:45.818336 22755 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1206 09:09:45.818364 22755 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1206 09:09:45.818377 22755 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1206 09:09:45.818395 22755 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1206 09:09:45.818994 22755 net.cpp:150] Setting up L2_b2_brc2_conv\nI1206 09:09:45.819015 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.819023 22755 net.cpp:165] Memory required for data: 1364378800\nI1206 09:09:45.819041 22755 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1206 09:09:45.819067 22755 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1206 09:09:45.819079 22755 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1206 09:09:45.819097 22755 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1206 09:09:45.819406 22755 net.cpp:150] Setting up L2_b2_brc3_bn\nI1206 09:09:45.819423 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.819432 22755 net.cpp:165] Memory required for data: 1367655600\nI1206 09:09:45.819456 22755 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1206 09:09:45.819474 22755 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1206 09:09:45.819485 22755 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1206 09:09:45.819502 22755 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1206 09:09:45.819530 22755 net.cpp:150] Setting up L2_b2_brc3_relu\nI1206 09:09:45.819545 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.819555 22755 net.cpp:165] Memory required for data: 1370932400\nI1206 09:09:45.819566 22755 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1206 09:09:45.819588 22755 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1206 09:09:45.819608 22755 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1206 09:09:45.819628 22755 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1206 09:09:45.820077 22755 net.cpp:150] Setting up L2_b2_brc3_conv\nI1206 09:09:45.820097 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.820106 22755 net.cpp:165] Memory required for data: 1384039600\nI1206 09:09:45.820124 22755 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1206 09:09:45.820147 22755 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1206 09:09:45.820160 22755 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1206 09:09:45.820175 22755 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:09:45.820199 22755 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1206 09:09:45.820245 22755 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1206 09:09:45.820263 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.820274 22755 net.cpp:165] Memory required for data: 1397146800\nI1206 09:09:45.820286 22755 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:09:45.820302 22755 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:09:45.820313 22755 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1206 09:09:45.820335 22755 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:09:45.820364 22755 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:09:45.820459 22755 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:09:45.820485 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.820500 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.820510 22755 net.cpp:165] Memory required for data: 1423361200\nI1206 09:09:45.820523 22755 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1206 09:09:45.820538 22755 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1206 09:09:45.820550 22755 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:09:45.820574 22755 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1206 09:09:45.820883 22755 net.cpp:150] Setting up L2_b3_brc1_bn\nI1206 09:09:45.820902 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.820911 22755 net.cpp:165] Memory required for data: 1436468400\nI1206 09:09:45.820967 22755 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1206 09:09:45.820987 22755 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1206 09:09:45.821000 22755 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1206 09:09:45.821020 22755 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1206 09:09:45.821041 22755 net.cpp:150] Setting up L2_b3_brc1_relu\nI1206 09:09:45.821056 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.821066 22755 net.cpp:165] Memory required for data: 1449575600\nI1206 09:09:45.821077 22755 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1206 09:09:45.821100 22755 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1206 09:09:45.821113 22755 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1206 09:09:45.821137 22755 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1206 09:09:45.821583 22755 net.cpp:150] Setting up L2_b3_brc1_conv\nI1206 09:09:45.821602 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.821611 22755 net.cpp:165] Memory required for data: 1452852400\nI1206 09:09:45.821631 22755 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1206 09:09:45.821650 22755 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1206 09:09:45.821661 22755 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1206 09:09:45.821684 22755 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1206 09:09:45.821998 22755 net.cpp:150] Setting up L2_b3_brc2_bn\nI1206 09:09:45.822017 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.822026 22755 net.cpp:165] Memory required for data: 1456129200\nI1206 09:09:45.822051 22755 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1206 09:09:45.822073 22755 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1206 09:09:45.822087 22755 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1206 09:09:45.822103 22755 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1206 09:09:45.822123 22755 net.cpp:150] Setting up L2_b3_brc2_relu\nI1206 09:09:45.822139 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.822147 22755 net.cpp:165] Memory required for data: 1459406000\nI1206 09:09:45.822158 22755 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1206 09:09:45.822180 22755 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1206 09:09:45.822194 22755 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1206 09:09:45.822227 22755 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1206 09:09:45.822777 22755 net.cpp:150] Setting up L2_b3_brc2_conv\nI1206 09:09:45.822803 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.822813 22755 net.cpp:165] Memory required for data: 1462682800\nI1206 09:09:45.822836 22755 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1206 09:09:45.822852 22755 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1206 09:09:45.822865 22755 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1206 09:09:45.822887 22755 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1206 09:09:45.823230 22755 net.cpp:150] Setting up L2_b3_brc3_bn\nI1206 09:09:45.823257 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.823266 22755 net.cpp:165] Memory required for data: 1465959600\nI1206 09:09:45.823292 22755 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1206 09:09:45.823313 22755 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1206 09:09:45.823328 22755 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1206 09:09:45.823343 22755 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1206 09:09:45.823361 22755 net.cpp:150] Setting up L2_b3_brc3_relu\nI1206 09:09:45.823377 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.823386 22755 net.cpp:165] Memory required for data: 1469236400\nI1206 09:09:45.823400 22755 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1206 09:09:45.823421 22755 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1206 09:09:45.823433 22755 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1206 09:09:45.823460 22755 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1206 09:09:45.823896 22755 net.cpp:150] Setting up L2_b3_brc3_conv\nI1206 09:09:45.823915 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.823925 22755 net.cpp:165] Memory required for data: 1482343600\nI1206 09:09:45.823945 22755 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1206 09:09:45.823964 22755 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1206 09:09:45.823976 22755 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1206 09:09:45.823992 22755 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:09:45.824007 22755 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1206 09:09:45.824054 22755 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1206 09:09:45.824071 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.824081 22755 net.cpp:165] Memory required for data: 1495450800\nI1206 09:09:45.824095 22755 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:09:45.824116 22755 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:09:45.824129 22755 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1206 09:09:45.824151 22755 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:09:45.824170 22755 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:09:45.824254 22755 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:09:45.824276 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.824290 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.824300 22755 net.cpp:165] Memory required for data: 1521665200\nI1206 09:09:45.824311 22755 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1206 09:09:45.824333 22755 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1206 09:09:45.824347 22755 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:09:45.824364 22755 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1206 09:09:45.824671 22755 net.cpp:150] Setting up L2_b4_brc1_bn\nI1206 09:09:45.824688 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.824697 22755 net.cpp:165] Memory required for data: 1534772400\nI1206 09:09:45.824721 22755 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1206 09:09:45.824739 22755 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1206 09:09:45.824750 22755 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1206 09:09:45.824766 22755 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1206 09:09:45.824785 22755 net.cpp:150] Setting up L2_b4_brc1_relu\nI1206 09:09:45.824806 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.824817 22755 net.cpp:165] Memory required for data: 1547879600\nI1206 09:09:45.824831 22755 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1206 09:09:45.824856 22755 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1206 09:09:45.824883 22755 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1206 09:09:45.824908 22755 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1206 09:09:45.825354 22755 net.cpp:150] Setting up L2_b4_brc1_conv\nI1206 09:09:45.825374 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.825383 22755 net.cpp:165] Memory required for data: 1551156400\nI1206 09:09:45.825405 22755 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1206 09:09:45.825428 22755 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1206 09:09:45.825440 22755 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1206 09:09:45.825458 22755 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1206 09:09:45.825757 22755 net.cpp:150] Setting up L2_b4_brc2_bn\nI1206 09:09:45.825776 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.825786 22755 net.cpp:165] Memory required for data: 1554433200\nI1206 09:09:45.825814 22755 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1206 09:09:45.825834 22755 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1206 09:09:45.825845 22755 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1206 09:09:45.825860 22755 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1206 09:09:45.825881 22755 net.cpp:150] Setting up L2_b4_brc2_relu\nI1206 09:09:45.825896 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.825906 22755 net.cpp:165] Memory required for data: 1557710000\nI1206 09:09:45.825917 22755 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1206 09:09:45.825945 22755 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1206 09:09:45.825958 22755 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1206 09:09:45.825984 22755 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1206 09:09:45.826511 22755 net.cpp:150] Setting up L2_b4_brc2_conv\nI1206 09:09:45.826530 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.826540 22755 net.cpp:165] Memory required for data: 1560986800\nI1206 09:09:45.826560 22755 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1206 09:09:45.826577 22755 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1206 09:09:45.826591 22755 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1206 09:09:45.826612 22755 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1206 09:09:45.826930 22755 net.cpp:150] Setting up L2_b4_brc3_bn\nI1206 09:09:45.826949 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.826959 22755 net.cpp:165] Memory required for data: 1564263600\nI1206 09:09:45.826983 22755 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1206 09:09:45.827000 22755 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1206 09:09:45.827013 22755 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1206 09:09:45.827028 22755 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1206 09:09:45.827047 22755 net.cpp:150] Setting up L2_b4_brc3_relu\nI1206 09:09:45.827061 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.827072 22755 net.cpp:165] Memory required for data: 1567540400\nI1206 09:09:45.827085 22755 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1206 09:09:45.827111 22755 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1206 09:09:45.827126 22755 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1206 09:09:45.827149 22755 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1206 09:09:45.827579 22755 net.cpp:150] Setting up L2_b4_brc3_conv\nI1206 09:09:45.827597 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.827607 22755 net.cpp:165] Memory required for data: 1580647600\nI1206 09:09:45.827626 22755 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1206 09:09:45.827653 22755 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1206 09:09:45.827667 22755 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1206 09:09:45.827682 22755 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:09:45.827699 22755 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1206 09:09:45.827754 22755 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1206 09:09:45.827771 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.827792 22755 net.cpp:165] Memory required for data: 1593754800\nI1206 09:09:45.827812 22755 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:09:45.827829 22755 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:09:45.827841 22755 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1206 09:09:45.827864 22755 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:09:45.827885 22755 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:09:45.827971 22755 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:09:45.827989 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.828002 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.828012 22755 net.cpp:165] Memory required for data: 1619969200\nI1206 09:09:45.828022 22755 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1206 09:09:45.828044 22755 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1206 09:09:45.828058 22755 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:09:45.828080 22755 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1206 09:09:45.828378 22755 net.cpp:150] Setting up L2_b5_brc1_bn\nI1206 09:09:45.828402 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.828413 22755 net.cpp:165] Memory required for data: 1633076400\nI1206 09:09:45.828433 22755 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1206 09:09:45.828451 22755 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1206 09:09:45.828464 22755 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1206 09:09:45.828480 22755 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1206 09:09:45.828498 22755 net.cpp:150] Setting up L2_b5_brc1_relu\nI1206 09:09:45.828513 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.828523 22755 net.cpp:165] Memory required for data: 1646183600\nI1206 09:09:45.828536 22755 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1206 09:09:45.828557 22755 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1206 09:09:45.828570 22755 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1206 09:09:45.828594 22755 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1206 09:09:45.829047 22755 net.cpp:150] Setting up L2_b5_brc1_conv\nI1206 09:09:45.829068 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.829077 22755 net.cpp:165] Memory required for data: 1649460400\nI1206 09:09:45.829097 22755 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1206 09:09:45.829128 22755 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1206 09:09:45.829141 22755 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1206 09:09:45.829159 22755 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1206 09:09:45.829473 22755 net.cpp:150] Setting up L2_b5_brc2_bn\nI1206 09:09:45.829493 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.829501 22755 net.cpp:165] Memory required for data: 1652737200\nI1206 09:09:45.829527 22755 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1206 09:09:45.829551 22755 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1206 09:09:45.829565 22755 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1206 09:09:45.829581 22755 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1206 09:09:45.829599 22755 net.cpp:150] Setting up L2_b5_brc2_relu\nI1206 09:09:45.829614 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.829624 22755 net.cpp:165] Memory required for data: 1656014000\nI1206 09:09:45.829638 22755 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1206 09:09:45.829659 22755 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1206 09:09:45.829672 22755 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1206 09:09:45.829696 22755 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1206 09:09:45.830260 22755 net.cpp:150] Setting up L2_b5_brc2_conv\nI1206 09:09:45.830288 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.830298 22755 net.cpp:165] Memory required for data: 1659290800\nI1206 09:09:45.830318 22755 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1206 09:09:45.830338 22755 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1206 09:09:45.830349 22755 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1206 09:09:45.830373 22755 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1206 09:09:45.830677 22755 net.cpp:150] Setting up L2_b5_brc3_bn\nI1206 09:09:45.830695 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.830704 22755 net.cpp:165] Memory required for data: 1662567600\nI1206 09:09:45.830727 22755 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1206 09:09:45.830754 22755 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1206 09:09:45.830770 22755 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1206 09:09:45.830785 22755 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1206 09:09:45.830811 22755 net.cpp:150] Setting up L2_b5_brc3_relu\nI1206 09:09:45.830827 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.830835 22755 net.cpp:165] Memory required for data: 1665844400\nI1206 09:09:45.830848 22755 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1206 09:09:45.830869 22755 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1206 09:09:45.830883 22755 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1206 09:09:45.830907 22755 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1206 09:09:45.831364 22755 net.cpp:150] Setting up L2_b5_brc3_conv\nI1206 09:09:45.831383 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.831393 22755 net.cpp:165] Memory required for data: 1678951600\nI1206 09:09:45.831410 22755 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1206 09:09:45.831429 22755 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1206 09:09:45.831440 22755 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1206 09:09:45.831454 22755 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:09:45.831470 22755 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1206 09:09:45.831517 22755 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1206 09:09:45.831535 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.831545 22755 net.cpp:165] Memory required for data: 1692058800\nI1206 09:09:45.831559 22755 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:09:45.831581 22755 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:09:45.831594 22755 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1206 09:09:45.831616 22755 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:09:45.831637 22755 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:09:45.831722 22755 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:09:45.831746 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.831760 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.831770 22755 net.cpp:165] Memory required for data: 1718273200\nI1206 09:09:45.831782 22755 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1206 09:09:45.831804 22755 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1206 09:09:45.831818 22755 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:09:45.831841 22755 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1206 09:09:45.832141 22755 net.cpp:150] Setting up L2_b6_brc1_bn\nI1206 09:09:45.832160 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.832170 22755 net.cpp:165] Memory required for data: 1731380400\nI1206 09:09:45.832195 22755 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1206 09:09:45.832231 22755 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1206 09:09:45.832257 22755 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1206 09:09:45.832273 22755 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1206 09:09:45.832293 22755 net.cpp:150] Setting up L2_b6_brc1_relu\nI1206 09:09:45.832309 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.832319 22755 net.cpp:165] Memory required for data: 1744487600\nI1206 09:09:45.832330 22755 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1206 09:09:45.832357 22755 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1206 09:09:45.832371 22755 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1206 09:09:45.832396 22755 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1206 09:09:45.832837 22755 net.cpp:150] Setting up L2_b6_brc1_conv\nI1206 09:09:45.832857 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.832867 22755 net.cpp:165] Memory required for data: 1747764400\nI1206 09:09:45.832885 22755 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1206 09:09:45.832908 22755 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1206 09:09:45.832921 22755 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1206 09:09:45.832942 22755 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1206 09:09:45.833251 22755 net.cpp:150] Setting up L2_b6_brc2_bn\nI1206 09:09:45.833271 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.833281 22755 net.cpp:165] Memory required for data: 1751041200\nI1206 09:09:45.833303 22755 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1206 09:09:45.833319 22755 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1206 09:09:45.833331 22755 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1206 09:09:45.833348 22755 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1206 09:09:45.833366 22755 net.cpp:150] Setting up L2_b6_brc2_relu\nI1206 09:09:45.833382 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.833391 22755 net.cpp:165] Memory required for data: 1754318000\nI1206 09:09:45.833405 22755 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1206 09:09:45.833431 22755 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1206 09:09:45.833446 22755 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1206 09:09:45.833480 22755 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1206 09:09:45.834038 22755 net.cpp:150] Setting up L2_b6_brc2_conv\nI1206 09:09:45.834058 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.834066 22755 net.cpp:165] Memory required for data: 1757594800\nI1206 09:09:45.834084 22755 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1206 09:09:45.834110 22755 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1206 09:09:45.834125 22755 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1206 09:09:45.834142 22755 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1206 09:09:45.834446 22755 net.cpp:150] Setting up L2_b6_brc3_bn\nI1206 09:09:45.834471 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.834481 22755 net.cpp:165] Memory required for data: 1760871600\nI1206 09:09:45.834503 22755 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1206 09:09:45.834519 22755 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1206 09:09:45.834532 22755 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1206 09:09:45.834547 22755 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1206 09:09:45.834568 22755 net.cpp:150] Setting up L2_b6_brc3_relu\nI1206 09:09:45.834583 22755 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI1206 09:09:45.834592 22755 net.cpp:165] Memory required for data: 1764148400\nI1206 09:09:45.834604 22755 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1206 09:09:45.834631 22755 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1206 09:09:45.834645 22755 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1206 09:09:45.834663 22755 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1206 09:09:45.835117 22755 net.cpp:150] Setting up L2_b6_brc3_conv\nI1206 09:09:45.835137 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.835147 22755 net.cpp:165] Memory required for data: 1777255600\nI1206 09:09:45.835176 22755 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1206 09:09:45.835196 22755 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1206 09:09:45.835207 22755 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1206 09:09:45.835222 22755 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:09:45.835245 22755 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1206 09:09:45.835295 22755 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1206 09:09:45.835316 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.835325 22755 net.cpp:165] Memory required for data: 1790362800\nI1206 09:09:45.835340 22755 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:09:45.835361 22755 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:09:45.835374 22755 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1206 09:09:45.835391 22755 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:09:45.835410 22755 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:09:45.835505 22755 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:09:45.835522 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.835536 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.835544 22755 net.cpp:165] Memory required for data: 1816577200\nI1206 09:09:45.835556 22755 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1206 09:09:45.835578 22755 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1206 09:09:45.835592 22755 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:09:45.835608 22755 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1206 09:09:45.835913 22755 net.cpp:150] Setting up L3_b1_brc1_bn\nI1206 09:09:45.835933 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.835942 22755 net.cpp:165] Memory required for data: 1829684400\nI1206 09:09:45.835965 22755 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1206 09:09:45.835990 22755 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1206 09:09:45.836004 22755 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1206 09:09:45.836019 22755 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1206 09:09:45.836038 22755 net.cpp:150] Setting up L3_b1_brc1_relu\nI1206 09:09:45.836055 22755 net.cpp:157] Top shape: 100 128 16 16 (3276800)\nI1206 09:09:45.836064 22755 net.cpp:165] Memory required for data: 1842791600\nI1206 09:09:45.836076 22755 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1206 09:09:45.836098 22755 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1206 09:09:45.836112 22755 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1206 09:09:45.836135 22755 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1206 09:09:45.836652 22755 net.cpp:150] Setting up L3_b1_brc1_conv\nI1206 09:09:45.836673 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.836681 22755 net.cpp:165] Memory required for data: 1844430000\nI1206 09:09:45.836701 22755 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1206 09:09:45.836719 22755 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1206 09:09:45.836731 22755 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1206 09:09:45.836753 22755 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1206 09:09:45.837071 22755 net.cpp:150] Setting up L3_b1_brc2_bn\nI1206 09:09:45.837098 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.837108 22755 net.cpp:165] Memory required for data: 1846068400\nI1206 09:09:45.837132 22755 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1206 09:09:45.837152 22755 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1206 09:09:45.837164 22755 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1206 09:09:45.837178 22755 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1206 09:09:45.837200 22755 net.cpp:150] Setting up L3_b1_brc2_relu\nI1206 09:09:45.837226 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.837236 22755 net.cpp:165] Memory required for data: 1847706800\nI1206 09:09:45.837246 22755 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1206 09:09:45.837272 22755 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1206 09:09:45.837285 22755 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1206 09:09:45.837311 22755 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1206 09:09:45.839653 22755 net.cpp:150] Setting up L3_b1_brc2_conv\nI1206 09:09:45.839675 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.839685 22755 net.cpp:165] Memory required for data: 1849345200\nI1206 09:09:45.839709 22755 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1206 09:09:45.839732 22755 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1206 09:09:45.839745 22755 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1206 09:09:45.839763 22755 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1206 09:09:45.840090 22755 net.cpp:150] Setting up L3_b1_brc3_bn\nI1206 09:09:45.840109 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.840119 22755 net.cpp:165] Memory required for data: 1850983600\nI1206 09:09:45.840142 22755 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1206 09:09:45.840160 22755 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1206 09:09:45.840173 22755 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1206 09:09:45.840194 22755 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1206 09:09:45.840216 22755 net.cpp:150] Setting up L3_b1_brc3_relu\nI1206 09:09:45.840230 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.840240 22755 net.cpp:165] Memory required for data: 1852622000\nI1206 09:09:45.840251 22755 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1206 09:09:45.840281 22755 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1206 09:09:45.840294 22755 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1206 09:09:45.840312 22755 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1206 09:09:45.840994 22755 net.cpp:150] Setting up L3_b1_brc3_conv\nI1206 09:09:45.841014 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.841023 22755 net.cpp:165] Memory required for data: 1859175600\nI1206 09:09:45.841044 22755 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1206 09:09:45.841073 22755 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1206 09:09:45.841087 22755 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:09:45.841112 22755 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1206 09:09:45.842120 22755 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1206 09:09:45.842141 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.842150 22755 net.cpp:165] Memory required for data: 1865729200\nI1206 09:09:45.842170 22755 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1206 09:09:45.842190 22755 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1206 09:09:45.842201 22755 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1206 09:09:45.842216 22755 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1206 09:09:45.842231 22755 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1206 09:09:45.842298 22755 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1206 09:09:45.842316 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.842325 22755 net.cpp:165] Memory required for data: 1872282800\nI1206 09:09:45.842339 22755 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:09:45.842353 22755 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:09:45.842365 22755 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1206 09:09:45.842387 22755 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:09:45.842409 22755 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:09:45.842521 22755 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:09:45.842545 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.842558 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.842567 22755 net.cpp:165] Memory required for data: 1885390000\nI1206 09:09:45.842581 22755 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1206 09:09:45.842597 22755 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1206 09:09:45.842610 22755 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:09:45.842634 22755 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1206 09:09:45.842953 22755 net.cpp:150] Setting up L3_b2_brc1_bn\nI1206 09:09:45.842973 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.842983 22755 net.cpp:165] Memory required for data: 1891943600\nI1206 09:09:45.843005 22755 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1206 09:09:45.843029 22755 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1206 09:09:45.843042 22755 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1206 09:09:45.843058 22755 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1206 09:09:45.843078 22755 net.cpp:150] Setting up L3_b2_brc1_relu\nI1206 09:09:45.843093 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.843102 22755 net.cpp:165] Memory required for data: 1898497200\nI1206 09:09:45.843114 22755 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1206 09:09:45.843138 22755 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1206 09:09:45.843150 22755 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1206 09:09:45.843174 22755 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1206 09:09:45.843873 22755 net.cpp:150] Setting up L3_b2_brc1_conv\nI1206 09:09:45.843899 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.843910 22755 net.cpp:165] Memory required for data: 1900135600\nI1206 09:09:45.843930 22755 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1206 09:09:45.843948 22755 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1206 09:09:45.843961 22755 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1206 09:09:45.843984 22755 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1206 09:09:45.844321 22755 net.cpp:150] Setting up L3_b2_brc2_bn\nI1206 09:09:45.844339 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.844348 22755 net.cpp:165] Memory required for data: 1901774000\nI1206 09:09:45.844373 22755 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1206 09:09:45.844396 22755 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1206 09:09:45.844410 22755 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1206 09:09:45.844425 22755 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1206 09:09:45.844444 22755 net.cpp:150] Setting up L3_b2_brc2_relu\nI1206 09:09:45.844460 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.844470 22755 net.cpp:165] Memory required for data: 1903412400\nI1206 09:09:45.844480 22755 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1206 09:09:45.844503 22755 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1206 09:09:45.844516 22755 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1206 09:09:45.844543 22755 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1206 09:09:45.845634 22755 net.cpp:150] Setting up L3_b2_brc2_conv\nI1206 09:09:45.845655 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.845664 22755 net.cpp:165] Memory required for data: 1905050800\nI1206 09:09:45.845686 22755 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1206 09:09:45.845705 22755 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1206 09:09:45.845716 22755 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1206 09:09:45.845739 22755 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1206 09:09:45.846065 22755 net.cpp:150] Setting up L3_b2_brc3_bn\nI1206 09:09:45.846089 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.846099 22755 net.cpp:165] Memory required for data: 1906689200\nI1206 09:09:45.846132 22755 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1206 09:09:45.846150 22755 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1206 09:09:45.846163 22755 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1206 09:09:45.846179 22755 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1206 09:09:45.846199 22755 net.cpp:150] Setting up L3_b2_brc3_relu\nI1206 09:09:45.846212 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.846222 22755 net.cpp:165] Memory required for data: 1908327600\nI1206 09:09:45.846235 22755 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1206 09:09:45.846263 22755 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1206 09:09:45.846277 22755 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1206 09:09:45.846295 22755 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1206 09:09:45.846987 22755 net.cpp:150] Setting up L3_b2_brc3_conv\nI1206 09:09:45.847007 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.847017 22755 net.cpp:165] Memory required for data: 1914881200\nI1206 09:09:45.847035 22755 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1206 09:09:45.847054 22755 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1206 09:09:45.847065 22755 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1206 09:09:45.847079 22755 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:09:45.847106 22755 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1206 09:09:45.847165 22755 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1206 09:09:45.847192 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.847201 22755 net.cpp:165] Memory required for data: 1921434800\nI1206 09:09:45.847213 22755 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:09:45.847229 22755 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:09:45.847240 22755 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1206 09:09:45.847256 22755 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:09:45.847276 22755 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:09:45.847373 22755 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:09:45.847391 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.847405 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.847414 22755 net.cpp:165] Memory required for data: 1934542000\nI1206 09:09:45.847426 22755 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1206 09:09:45.847448 22755 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1206 09:09:45.847462 22755 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:09:45.847478 22755 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1206 09:09:45.847792 22755 net.cpp:150] Setting up L3_b3_brc1_bn\nI1206 09:09:45.847825 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.847836 22755 net.cpp:165] Memory required for data: 1941095600\nI1206 09:09:45.847857 22755 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1206 09:09:45.847874 22755 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1206 09:09:45.847887 22755 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1206 09:09:45.847903 22755 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1206 09:09:45.847921 22755 net.cpp:150] Setting up L3_b3_brc1_relu\nI1206 09:09:45.847936 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.847946 22755 net.cpp:165] Memory required for data: 1947649200\nI1206 09:09:45.847957 22755 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1206 09:09:45.847993 22755 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1206 09:09:45.848008 22755 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1206 09:09:45.848027 22755 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1206 09:09:45.848740 22755 net.cpp:150] Setting up L3_b3_brc1_conv\nI1206 09:09:45.848769 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.848778 22755 net.cpp:165] Memory required for data: 1949287600\nI1206 09:09:45.848804 22755 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1206 09:09:45.848830 22755 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1206 09:09:45.848843 22755 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1206 09:09:45.848861 22755 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1206 09:09:45.849198 22755 net.cpp:150] Setting up L3_b3_brc2_bn\nI1206 09:09:45.849217 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.849226 22755 net.cpp:165] Memory required for data: 1950926000\nI1206 09:09:45.849248 22755 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1206 09:09:45.849268 22755 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1206 09:09:45.849282 22755 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1206 09:09:45.849298 22755 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1206 09:09:45.849316 22755 net.cpp:150] Setting up L3_b3_brc2_relu\nI1206 09:09:45.849330 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.849340 22755 net.cpp:165] Memory required for data: 1952564400\nI1206 09:09:45.849352 22755 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1206 09:09:45.849381 22755 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1206 09:09:45.849395 22755 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1206 09:09:45.849419 22755 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1206 09:09:45.850498 22755 net.cpp:150] Setting up L3_b3_brc2_conv\nI1206 09:09:45.850519 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.850528 22755 net.cpp:165] Memory required for data: 1954202800\nI1206 09:09:45.850548 22755 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1206 09:09:45.850570 22755 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1206 09:09:45.850584 22755 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1206 09:09:45.850606 22755 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1206 09:09:45.850942 22755 net.cpp:150] Setting up L3_b3_brc3_bn\nI1206 09:09:45.850961 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.850971 22755 net.cpp:165] Memory required for data: 1955841200\nI1206 09:09:45.850994 22755 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1206 09:09:45.851014 22755 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1206 09:09:45.851027 22755 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1206 09:09:45.851042 22755 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1206 09:09:45.851063 22755 net.cpp:150] Setting up L3_b3_brc3_relu\nI1206 09:09:45.851078 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.851089 22755 net.cpp:165] Memory required for data: 1957479600\nI1206 09:09:45.851100 22755 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1206 09:09:45.851130 22755 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1206 09:09:45.851143 22755 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1206 09:09:45.851167 22755 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1206 09:09:45.851852 22755 net.cpp:150] Setting up L3_b3_brc3_conv\nI1206 09:09:45.851872 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.851881 22755 net.cpp:165] Memory required for data: 1964033200\nI1206 09:09:45.851900 22755 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1206 09:09:45.851925 22755 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1206 09:09:45.851939 22755 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1206 09:09:45.851954 22755 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:09:45.851976 22755 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1206 09:09:45.852035 22755 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1206 09:09:45.852052 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.852061 22755 net.cpp:165] Memory required for data: 1970586800\nI1206 09:09:45.852072 22755 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:09:45.852105 22755 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:09:45.852118 22755 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1206 09:09:45.852138 22755 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:09:45.852159 22755 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:09:45.852252 22755 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:09:45.852274 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.852288 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.852298 22755 net.cpp:165] Memory required for data: 1983694000\nI1206 09:09:45.852309 22755 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1206 09:09:45.852330 22755 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1206 09:09:45.852344 22755 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:09:45.852360 22755 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1206 09:09:45.852664 22755 net.cpp:150] Setting up L3_b4_brc1_bn\nI1206 09:09:45.852684 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.852694 22755 net.cpp:165] Memory required for data: 1990247600\nI1206 09:09:45.852716 22755 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1206 09:09:45.852735 22755 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1206 09:09:45.852747 22755 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1206 09:09:45.852762 22755 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1206 09:09:45.852782 22755 net.cpp:150] Setting up L3_b4_brc1_relu\nI1206 09:09:45.852804 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.852815 22755 net.cpp:165] Memory required for data: 1996801200\nI1206 09:09:45.852826 22755 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1206 09:09:45.852855 22755 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1206 09:09:45.852869 22755 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1206 09:09:45.852895 22755 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1206 09:09:45.853602 22755 net.cpp:150] Setting up L3_b4_brc1_conv\nI1206 09:09:45.853623 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.853632 22755 net.cpp:165] Memory required for data: 1998439600\nI1206 09:09:45.853651 22755 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1206 09:09:45.853668 22755 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1206 09:09:45.853688 22755 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1206 09:09:45.853704 22755 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1206 09:09:45.854043 22755 net.cpp:150] Setting up L3_b4_brc2_bn\nI1206 09:09:45.854061 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.854070 22755 net.cpp:165] Memory required for data: 2000078000\nI1206 09:09:45.854094 22755 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1206 09:09:45.854113 22755 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1206 09:09:45.854125 22755 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1206 09:09:45.854147 22755 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1206 09:09:45.854167 22755 net.cpp:150] Setting up L3_b4_brc2_relu\nI1206 09:09:45.854182 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.854192 22755 net.cpp:165] Memory required for data: 2001716400\nI1206 09:09:45.854202 22755 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1206 09:09:45.854230 22755 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1206 09:09:45.854243 22755 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1206 09:09:45.854261 22755 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1206 09:09:45.855347 22755 net.cpp:150] Setting up L3_b4_brc2_conv\nI1206 09:09:45.855367 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.855377 22755 net.cpp:165] Memory required for data: 2003354800\nI1206 09:09:45.855394 22755 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1206 09:09:45.855427 22755 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1206 09:09:45.855440 22755 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1206 09:09:45.855458 22755 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1206 09:09:45.855790 22755 net.cpp:150] Setting up L3_b4_brc3_bn\nI1206 09:09:45.855815 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.855825 22755 net.cpp:165] Memory required for data: 2004993200\nI1206 09:09:45.855849 22755 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1206 09:09:45.855873 22755 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1206 09:09:45.855887 22755 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1206 09:09:45.855902 22755 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1206 09:09:45.855921 22755 net.cpp:150] Setting up L3_b4_brc3_relu\nI1206 09:09:45.855937 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.855947 22755 net.cpp:165] Memory required for data: 2006631600\nI1206 09:09:45.855958 22755 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1206 09:09:45.855986 22755 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1206 09:09:45.856000 22755 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1206 09:09:45.856024 22755 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1206 09:09:45.856708 22755 net.cpp:150] Setting up L3_b4_brc3_conv\nI1206 09:09:45.856726 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.856735 22755 net.cpp:165] Memory required for data: 2013185200\nI1206 09:09:45.856753 22755 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1206 09:09:45.856773 22755 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1206 09:09:45.856786 22755 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1206 09:09:45.856806 22755 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:09:45.856825 22755 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1206 09:09:45.856902 22755 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1206 09:09:45.856925 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.856935 22755 net.cpp:165] Memory required for data: 2019738800\nI1206 09:09:45.856946 22755 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:09:45.856964 22755 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:09:45.856976 22755 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1206 09:09:45.856999 22755 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:09:45.857020 22755 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:09:45.857111 22755 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:09:45.857133 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.857146 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.857156 22755 net.cpp:165] Memory required for data: 2032846000\nI1206 09:09:45.857169 22755 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1206 09:09:45.857185 22755 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1206 09:09:45.857197 22755 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:09:45.857223 22755 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1206 09:09:45.857534 22755 net.cpp:150] Setting up L3_b5_brc1_bn\nI1206 09:09:45.857553 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.857563 22755 net.cpp:165] Memory required for data: 2039399600\nI1206 09:09:45.857584 22755 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1206 09:09:45.857609 22755 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1206 09:09:45.857623 22755 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1206 09:09:45.857640 22755 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1206 09:09:45.857661 22755 net.cpp:150] Setting up L3_b5_brc1_relu\nI1206 09:09:45.857686 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.857697 22755 net.cpp:165] Memory required for data: 2045953200\nI1206 09:09:45.857707 22755 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1206 09:09:45.857728 22755 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1206 09:09:45.857741 22755 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1206 09:09:45.857766 22755 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1206 09:09:45.858470 22755 net.cpp:150] Setting up L3_b5_brc1_conv\nI1206 09:09:45.858490 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.858500 22755 net.cpp:165] Memory required for data: 2047591600\nI1206 09:09:45.858521 22755 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1206 09:09:45.858539 22755 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1206 09:09:45.858552 22755 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1206 09:09:45.858575 22755 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1206 09:09:45.858898 22755 net.cpp:150] Setting up L3_b5_brc2_bn\nI1206 09:09:45.858922 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.858932 22755 net.cpp:165] Memory required for data: 2049230000\nI1206 09:09:45.858954 22755 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1206 09:09:45.858973 22755 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1206 09:09:45.858984 22755 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1206 09:09:45.858999 22755 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1206 09:09:45.859019 22755 net.cpp:150] Setting up L3_b5_brc2_relu\nI1206 09:09:45.859035 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.859045 22755 net.cpp:165] Memory required for data: 2050868400\nI1206 09:09:45.859055 22755 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1206 09:09:45.859081 22755 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1206 09:09:45.859093 22755 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1206 09:09:45.859117 22755 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1206 09:09:45.860213 22755 net.cpp:150] Setting up L3_b5_brc2_conv\nI1206 09:09:45.860232 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.860241 22755 net.cpp:165] Memory required for data: 2052506800\nI1206 09:09:45.860316 22755 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1206 09:09:45.860342 22755 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1206 09:09:45.860355 22755 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1206 09:09:45.860373 22755 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1206 09:09:45.860707 22755 net.cpp:150] Setting up L3_b5_brc3_bn\nI1206 09:09:45.860726 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.860735 22755 net.cpp:165] Memory required for data: 2054145200\nI1206 09:09:45.860759 22755 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1206 09:09:45.860777 22755 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1206 09:09:45.860790 22755 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1206 09:09:45.860817 22755 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1206 09:09:45.860839 22755 net.cpp:150] Setting up L3_b5_brc3_relu\nI1206 09:09:45.860854 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.860863 22755 net.cpp:165] Memory required for data: 2055783600\nI1206 09:09:45.860877 22755 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1206 09:09:45.860898 22755 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1206 09:09:45.860911 22755 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1206 09:09:45.860929 22755 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1206 09:09:45.861610 22755 net.cpp:150] Setting up L3_b5_brc3_conv\nI1206 09:09:45.861629 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.861639 22755 net.cpp:165] Memory required for data: 2062337200\nI1206 09:09:45.861659 22755 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1206 09:09:45.861685 22755 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1206 09:09:45.861698 22755 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1206 09:09:45.861723 22755 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:09:45.861740 22755 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1206 09:09:45.861807 22755 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1206 09:09:45.861827 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.861837 22755 net.cpp:165] Memory required for data: 2068890800\nI1206 09:09:45.861850 22755 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:09:45.861872 22755 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:09:45.861886 22755 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1206 09:09:45.861907 22755 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:09:45.861928 22755 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:09:45.862022 22755 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:09:45.862046 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.862061 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.862071 22755 net.cpp:165] Memory required for data: 2081998000\nI1206 09:09:45.862082 22755 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1206 09:09:45.862100 22755 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1206 09:09:45.862113 22755 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:09:45.862140 22755 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1206 09:09:45.862467 22755 net.cpp:150] Setting up L3_b6_brc1_bn\nI1206 09:09:45.862485 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.862495 22755 net.cpp:165] Memory required for data: 2088551600\nI1206 09:09:45.862520 22755 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1206 09:09:45.862537 22755 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1206 09:09:45.862550 22755 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1206 09:09:45.862571 22755 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1206 09:09:45.862592 22755 net.cpp:150] Setting up L3_b6_brc1_relu\nI1206 09:09:45.862607 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.862617 22755 net.cpp:165] Memory required for data: 2095105200\nI1206 09:09:45.862629 22755 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1206 09:09:45.862651 22755 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1206 09:09:45.862665 22755 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1206 09:09:45.862682 22755 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1206 09:09:45.863385 22755 net.cpp:150] Setting up L3_b6_brc1_conv\nI1206 09:09:45.863411 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.863421 22755 net.cpp:165] Memory required for data: 2096743600\nI1206 09:09:45.863440 22755 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1206 09:09:45.863456 22755 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1206 09:09:45.863468 22755 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1206 09:09:45.863485 22755 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1206 09:09:45.863816 22755 net.cpp:150] Setting up L3_b6_brc2_bn\nI1206 09:09:45.863836 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.863844 22755 net.cpp:165] Memory required for data: 2098382000\nI1206 09:09:45.863868 22755 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1206 09:09:45.863891 22755 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1206 09:09:45.863905 22755 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1206 09:09:45.863921 22755 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1206 09:09:45.863941 22755 net.cpp:150] Setting up L3_b6_brc2_relu\nI1206 09:09:45.863956 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.863965 22755 net.cpp:165] Memory required for data: 2100020400\nI1206 09:09:45.863976 22755 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1206 09:09:45.864014 22755 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1206 09:09:45.864028 22755 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1206 09:09:45.864053 22755 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1206 09:09:45.865170 22755 net.cpp:150] Setting up L3_b6_brc2_conv\nI1206 09:09:45.865190 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.865200 22755 net.cpp:165] Memory required for data: 2101658800\nI1206 09:09:45.865221 22755 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1206 09:09:45.865238 22755 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1206 09:09:45.865250 22755 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1206 09:09:45.865272 22755 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1206 09:09:45.865605 22755 net.cpp:150] Setting up L3_b6_brc3_bn\nI1206 09:09:45.865630 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.865640 22755 net.cpp:165] Memory required for data: 2103297200\nI1206 09:09:45.865662 22755 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1206 09:09:45.865680 22755 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1206 09:09:45.865692 22755 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1206 09:09:45.865708 22755 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1206 09:09:45.865727 22755 net.cpp:150] Setting up L3_b6_brc3_relu\nI1206 09:09:45.865742 22755 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI1206 09:09:45.865752 22755 net.cpp:165] Memory required for data: 2104935600\nI1206 09:09:45.865762 22755 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1206 09:09:45.865784 22755 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1206 09:09:45.865804 22755 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1206 09:09:45.865833 22755 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1206 09:09:45.866528 22755 net.cpp:150] Setting up L3_b6_brc3_conv\nI1206 09:09:45.866549 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.866557 22755 net.cpp:165] Memory required for data: 2111489200\nI1206 09:09:45.866577 22755 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1206 09:09:45.866596 22755 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1206 09:09:45.866608 22755 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1206 09:09:45.866622 22755 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:09:45.866648 22755 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1206 09:09:45.866706 22755 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1206 09:09:45.866734 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.866744 22755 net.cpp:165] Memory required for data: 2118042800\nI1206 09:09:45.866755 22755 layer_factory.hpp:77] Creating layer post_bn\nI1206 09:09:45.866771 22755 net.cpp:100] Creating Layer post_bn\nI1206 09:09:45.866783 22755 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1206 09:09:45.866812 22755 net.cpp:408] post_bn -> post_bn_top\nI1206 09:09:45.867135 22755 net.cpp:150] Setting up post_bn\nI1206 09:09:45.867153 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.867162 22755 net.cpp:165] Memory required for data: 2124596400\nI1206 09:09:45.867184 22755 layer_factory.hpp:77] Creating layer post_relu\nI1206 09:09:45.867207 22755 net.cpp:100] Creating Layer post_relu\nI1206 09:09:45.867219 22755 net.cpp:434] post_relu <- post_bn_top\nI1206 09:09:45.867235 22755 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1206 09:09:45.867254 22755 net.cpp:150] Setting up post_relu\nI1206 09:09:45.867269 22755 net.cpp:157] Top shape: 100 256 8 8 (1638400)\nI1206 09:09:45.867278 22755 net.cpp:165] Memory required for data: 2131150000\nI1206 09:09:45.867290 22755 layer_factory.hpp:77] Creating layer post_pool\nI1206 09:09:45.867308 22755 net.cpp:100] Creating Layer post_pool\nI1206 09:09:45.867321 22755 net.cpp:434] post_pool <- post_bn_top\nI1206 09:09:45.867336 22755 net.cpp:408] post_pool -> post_pool\nI1206 09:09:45.867395 22755 net.cpp:150] Setting up post_pool\nI1206 09:09:45.867424 22755 net.cpp:157] Top shape: 100 256 1 1 (25600)\nI1206 09:09:45.867444 22755 net.cpp:165] Memory required for data: 2131252400\nI1206 09:09:45.867461 22755 layer_factory.hpp:77] Creating layer post_FC\nI1206 09:09:45.867481 22755 net.cpp:100] Creating Layer post_FC\nI1206 09:09:45.867494 22755 net.cpp:434] post_FC <- post_pool\nI1206 09:09:45.867516 22755 net.cpp:408] post_FC -> post_FC_top\nI1206 09:09:45.867748 22755 net.cpp:150] Setting up post_FC\nI1206 09:09:45.867774 22755 net.cpp:157] Top shape: 100 10 (1000)\nI1206 09:09:45.867784 22755 net.cpp:165] Memory required for data: 2131256400\nI1206 09:09:45.867808 22755 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1206 09:09:45.867827 22755 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1206 09:09:45.867838 22755 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1206 09:09:45.867856 22755 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1206 09:09:45.867874 22755 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1206 09:09:45.867976 22755 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1206 09:09:45.867995 22755 net.cpp:157] Top shape: 100 10 (1000)\nI1206 09:09:45.868007 22755 net.cpp:157] Top shape: 100 10 (1000)\nI1206 09:09:45.868016 22755 net.cpp:165] Memory required for data: 2131264400\nI1206 09:09:45.868028 22755 layer_factory.hpp:77] Creating layer accuracy\nI1206 09:09:45.868044 22755 net.cpp:100] Creating Layer accuracy\nI1206 09:09:45.868057 22755 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1206 09:09:45.868075 22755 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1206 09:09:45.868093 22755 net.cpp:408] accuracy -> accuracy\nI1206 09:09:45.868118 22755 net.cpp:150] Setting up accuracy\nI1206 09:09:45.868132 22755 net.cpp:157] Top shape: (1)\nI1206 09:09:45.868141 22755 net.cpp:165] Memory required for data: 2131264404\nI1206 09:09:45.868154 22755 layer_factory.hpp:77] Creating layer loss\nI1206 09:09:45.868170 22755 net.cpp:100] Creating Layer loss\nI1206 09:09:45.868181 22755 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1206 09:09:45.868194 22755 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1206 09:09:45.868209 22755 net.cpp:408] loss -> loss\nI1206 09:09:45.868232 22755 layer_factory.hpp:77] Creating layer loss\nI1206 09:09:45.868389 22755 net.cpp:150] Setting up loss\nI1206 09:09:45.868413 22755 net.cpp:157] Top shape: (1)\nI1206 09:09:45.868423 22755 net.cpp:160]     with loss weight 1\nI1206 09:09:45.868446 22755 net.cpp:165] Memory required for data: 2131264408\nI1206 09:09:45.868458 22755 net.cpp:226] loss needs backward computation.\nI1206 09:09:45.868469 22755 net.cpp:228] accuracy does not need backward computation.\nI1206 09:09:45.868481 22755 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1206 09:09:45.868492 22755 net.cpp:226] post_FC needs backward computation.\nI1206 09:09:45.868501 22755 net.cpp:226] post_pool needs backward computation.\nI1206 09:09:45.868511 22755 net.cpp:226] post_relu needs backward computation.\nI1206 09:09:45.868521 22755 net.cpp:226] post_bn needs backward computation.\nI1206 09:09:45.868532 22755 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1206 09:09:45.868542 22755 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1206 09:09:45.868553 22755 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1206 09:09:45.868563 22755 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1206 09:09:45.868574 22755 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1206 09:09:45.868584 22755 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1206 09:09:45.868592 22755 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1206 09:09:45.868602 22755 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1206 09:09:45.868613 22755 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1206 09:09:45.868623 22755 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1206 09:09:45.868633 22755 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.868643 22755 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1206 09:09:45.868669 22755 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1206 09:09:45.868681 22755 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1206 09:09:45.868691 22755 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1206 09:09:45.868702 22755 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1206 09:09:45.868715 22755 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1206 09:09:45.868723 22755 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1206 09:09:45.868734 22755 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1206 09:09:45.868744 22755 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1206 09:09:45.868753 22755 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1206 09:09:45.868764 22755 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.868775 22755 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1206 09:09:45.868788 22755 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1206 09:09:45.868805 22755 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1206 09:09:45.868818 22755 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1206 09:09:45.868829 22755 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1206 09:09:45.868839 22755 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1206 09:09:45.868849 22755 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1206 09:09:45.868860 22755 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1206 09:09:45.868870 22755 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1206 09:09:45.868880 22755 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1206 09:09:45.868891 22755 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.868902 22755 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1206 09:09:45.868913 22755 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1206 09:09:45.868924 22755 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1206 09:09:45.868934 22755 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1206 09:09:45.868945 22755 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1206 09:09:45.868955 22755 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1206 09:09:45.868966 22755 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1206 09:09:45.868978 22755 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1206 09:09:45.868988 22755 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1206 09:09:45.868998 22755 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1206 09:09:45.869009 22755 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.869019 22755 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1206 09:09:45.869030 22755 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1206 09:09:45.869040 22755 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1206 09:09:45.869051 22755 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1206 09:09:45.869062 22755 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1206 09:09:45.869072 22755 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1206 09:09:45.869082 22755 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1206 09:09:45.869093 22755 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1206 09:09:45.869104 22755 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1206 09:09:45.869114 22755 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1206 09:09:45.869125 22755 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.869135 22755 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1206 09:09:45.869154 22755 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1206 09:09:45.869166 22755 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1206 09:09:45.869177 22755 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1206 09:09:45.869199 22755 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1206 09:09:45.869210 22755 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1206 09:09:45.869221 22755 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1206 09:09:45.869232 22755 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1206 09:09:45.869243 22755 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1206 09:09:45.869253 22755 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1206 09:09:45.869262 22755 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1206 09:09:45.869273 22755 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.869285 22755 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1206 09:09:45.869297 22755 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1206 09:09:45.869307 22755 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1206 09:09:45.869318 22755 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1206 09:09:45.869328 22755 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1206 09:09:45.869339 22755 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1206 09:09:45.869349 22755 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1206 09:09:45.869360 22755 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1206 09:09:45.869371 22755 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1206 09:09:45.869381 22755 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1206 09:09:45.869391 22755 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.869402 22755 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1206 09:09:45.869415 22755 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1206 09:09:45.869424 22755 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1206 09:09:45.869436 22755 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1206 09:09:45.869447 22755 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1206 09:09:45.869457 22755 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1206 09:09:45.869468 22755 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1206 09:09:45.869479 22755 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1206 09:09:45.869490 22755 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1206 09:09:45.869499 22755 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1206 09:09:45.869509 22755 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.869521 22755 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1206 09:09:45.869534 22755 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1206 09:09:45.869544 22755 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1206 09:09:45.869554 22755 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1206 09:09:45.869565 22755 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1206 09:09:45.869576 22755 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1206 09:09:45.869585 22755 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1206 09:09:45.869596 22755 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1206 09:09:45.869607 22755 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1206 09:09:45.869618 22755 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1206 09:09:45.869628 22755 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.869647 22755 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1206 09:09:45.869659 22755 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1206 09:09:45.869669 22755 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1206 09:09:45.869681 22755 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1206 09:09:45.869693 22755 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1206 09:09:45.869714 22755 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1206 09:09:45.869725 22755 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1206 09:09:45.869736 22755 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1206 09:09:45.869747 22755 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1206 09:09:45.869757 22755 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1206 09:09:45.869768 22755 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.869781 22755 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1206 09:09:45.869796 22755 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1206 09:09:45.869814 22755 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1206 09:09:45.869825 22755 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1206 09:09:45.869837 22755 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1206 09:09:45.869848 22755 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1206 09:09:45.869858 22755 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1206 09:09:45.869868 22755 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1206 09:09:45.869880 22755 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1206 09:09:45.869890 22755 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1206 09:09:45.869900 22755 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.869911 22755 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1206 09:09:45.869925 22755 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1206 09:09:45.869935 22755 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1206 09:09:45.869946 22755 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1206 09:09:45.869957 22755 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1206 09:09:45.869968 22755 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1206 09:09:45.869978 22755 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1206 09:09:45.869989 22755 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1206 09:09:45.870002 22755 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1206 09:09:45.870014 22755 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1206 09:09:45.870025 22755 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1206 09:09:45.870036 22755 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.870048 22755 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1206 09:09:45.870059 22755 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1206 09:09:45.870069 22755 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1206 09:09:45.870080 22755 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1206 09:09:45.870091 22755 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1206 09:09:45.870103 22755 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1206 09:09:45.870113 22755 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1206 09:09:45.870126 22755 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1206 09:09:45.870136 22755 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1206 09:09:45.870147 22755 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1206 09:09:45.870158 22755 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.870169 22755 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1206 09:09:45.870182 22755 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1206 09:09:45.870193 22755 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1206 09:09:45.870204 22755 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1206 09:09:45.870215 22755 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1206 09:09:45.870226 22755 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1206 09:09:45.870237 22755 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1206 09:09:45.870260 22755 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1206 09:09:45.870271 22755 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1206 09:09:45.870282 22755 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1206 09:09:45.870296 22755 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.870306 22755 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1206 09:09:45.870318 22755 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1206 09:09:45.870337 22755 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1206 09:09:45.870349 22755 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1206 09:09:45.870360 22755 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1206 09:09:45.870371 22755 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1206 09:09:45.870381 22755 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1206 09:09:45.870394 22755 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1206 09:09:45.870405 22755 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1206 09:09:45.870414 22755 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1206 09:09:45.870424 22755 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.870434 22755 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1206 09:09:45.870446 22755 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1206 09:09:45.870458 22755 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1206 09:09:45.870468 22755 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1206 09:09:45.870479 22755 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1206 09:09:45.870491 22755 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1206 09:09:45.870502 22755 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1206 09:09:45.870513 22755 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1206 09:09:45.870524 22755 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1206 09:09:45.870535 22755 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1206 09:09:45.870546 22755 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.870558 22755 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1206 09:09:45.870571 22755 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1206 09:09:45.870584 22755 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1206 09:09:45.870594 22755 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1206 09:09:45.870604 22755 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1206 09:09:45.870615 22755 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1206 09:09:45.870627 22755 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1206 09:09:45.870638 22755 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1206 09:09:45.870648 22755 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1206 09:09:45.870661 22755 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1206 09:09:45.870672 22755 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:09:45.870683 22755 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1206 09:09:45.870695 22755 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1206 09:09:45.870707 22755 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1206 09:09:45.870719 22755 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1206 09:09:45.870730 22755 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1206 09:09:45.870743 22755 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1206 09:09:45.870754 22755 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1206 09:09:45.870765 22755 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1206 09:09:45.870777 22755 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1206 09:09:45.870790 22755 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1206 09:09:45.870816 22755 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1206 09:09:45.870829 22755 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1206 09:09:45.870841 22755 net.cpp:226] pre_conv needs backward computation.\nI1206 09:09:45.870854 22755 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1206 09:09:45.870867 22755 net.cpp:228] dataLayer does not need backward computation.\nI1206 09:09:45.870877 22755 net.cpp:270] This network produces output accuracy\nI1206 09:09:45.870888 22755 net.cpp:270] This network produces output loss\nI1206 09:09:45.871170 22755 net.cpp:283] Network initialization done.\nI1206 09:09:45.871927 22755 solver.cpp:60] Solver scaffolding done.\nI1206 09:09:46.091907 22755 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1206 09:09:46.456940 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:46.456995 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:46.463591 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:47.135758 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:47.135818 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:47.142539 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:47.901835 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:47.901882 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:47.909947 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:48.269166 22755 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1206 09:09:48.727898 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:48.727946 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:48.737226 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:49.633141 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:49.633211 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:49.642896 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:50.646093 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:50.646150 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:50.657600 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:51.731142 22755 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:09:51.731192 22755 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:09:51.742952 22755 data_layer.cpp:41] output data size: 100,3,32,32\nI1206 09:09:51.811034 22772 blocking_queue.cpp:50] Waiting for data\nI1206 09:09:51.887933 22769 blocking_queue.cpp:50] Waiting for data\nI1206 09:09:52.389014 22755 parallel.cpp:425] Starting Optimization\nI1206 09:09:52.391293 22755 solver.cpp:279] Solving Cifar-Resnet\nI1206 09:09:52.391315 22755 solver.cpp:280] Learning Rate Policy: triangular\nI1206 09:09:52.394729 22755 solver.cpp:337] Iteration 0, Testing net (#0)\nI1206 09:10:44.821339 22755 solver.cpp:404]     Test net output #0: accuracy = 0.11895\nI1206 09:10:44.821627 22755 solver.cpp:404]     Test net output #1: loss = 2.34946 (* 1 = 2.34946 loss)\nI1206 09:10:47.469782 22755 solver.cpp:228] Iteration 0, loss = 2.3119\nI1206 09:10:47.470019 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 09:10:47.470129 22755 solver.cpp:244]     Train net output #1: loss = 2.3119 (* 1 = 2.3119 loss)\nI1206 09:10:47.559799 22755 sgd_solver.cpp:166] Iteration 0, lr = 0\nI1206 09:12:20.993270 22755 solver.cpp:337] Iteration 100, Testing net (#0)\nI1206 09:13:13.215795 22755 solver.cpp:404]     Test net output #0: accuracy = 0.28575\nI1206 09:13:13.216068 22755 solver.cpp:404]     Test net output #1: loss = 1.99975 (* 1 = 1.99975 loss)\nI1206 09:13:14.102167 22755 solver.cpp:228] Iteration 100, loss = 1.99676\nI1206 09:13:14.102201 22755 solver.cpp:244]     Train net output #0: accuracy = 0.29\nI1206 09:13:14.102217 22755 solver.cpp:244]     Train net output #1: loss = 1.99676 (* 1 = 1.99676 loss)\nI1206 09:13:14.166127 22755 sgd_solver.cpp:166] Iteration 100, lr = 0.015\nI1206 09:14:47.494642 22755 solver.cpp:337] Iteration 200, Testing net (#0)\nI1206 09:15:39.981551 22755 solver.cpp:404]     Test net output #0: accuracy = 0.31735\nI1206 09:15:39.981828 22755 solver.cpp:404]     Test net output #1: loss = 1.89742 (* 1 = 1.89742 loss)\nI1206 09:15:40.852957 22755 solver.cpp:228] Iteration 200, loss = 1.92392\nI1206 09:15:40.852990 22755 solver.cpp:244]     Train net output #0: accuracy = 0.3\nI1206 09:15:40.853014 22755 solver.cpp:244]     Train net output #1: loss = 1.92392 (* 1 = 1.92392 loss)\nI1206 09:15:40.923028 22755 sgd_solver.cpp:166] Iteration 200, lr = 0.03\nI1206 09:17:14.125597 22755 solver.cpp:337] Iteration 300, Testing net (#0)\nI1206 09:18:06.615679 22755 solver.cpp:404]     Test net output #0: accuracy = 0.3538\nI1206 09:18:06.615942 22755 solver.cpp:404]     Test net output #1: loss = 1.81683 (* 1 = 1.81683 loss)\nI1206 09:18:07.487154 22755 solver.cpp:228] Iteration 300, loss = 1.76786\nI1206 09:18:07.487188 22755 solver.cpp:244]     Train net output #0: accuracy = 0.38\nI1206 09:18:07.487212 22755 solver.cpp:244]     Train net output #1: loss = 1.76786 (* 1 = 1.76786 loss)\nI1206 09:18:07.564999 22755 sgd_solver.cpp:166] Iteration 300, lr = 0.045\nI1206 09:19:41.137394 22755 solver.cpp:337] Iteration 400, Testing net (#0)\nI1206 09:20:33.641991 22755 solver.cpp:404]     Test net output #0: accuracy = 0.37335\nI1206 09:20:33.642262 22755 solver.cpp:404]     Test net output #1: loss = 1.75261 (* 1 = 1.75261 loss)\nI1206 09:20:34.513566 22755 solver.cpp:228] Iteration 400, loss = 1.69995\nI1206 09:20:34.513599 22755 solver.cpp:244]     Train net output #0: accuracy = 0.37\nI1206 09:20:34.513623 22755 solver.cpp:244]     Train net output #1: loss = 1.69995 (* 1 = 1.69995 loss)\nI1206 09:20:34.583708 22755 sgd_solver.cpp:166] Iteration 400, lr = 0.0599999\nI1206 09:22:08.164870 22755 solver.cpp:337] Iteration 500, Testing net (#0)\nI1206 09:23:00.758530 22755 solver.cpp:404]     Test net output #0: accuracy = 0.3871\nI1206 09:23:00.758807 22755 solver.cpp:404]     Test net output #1: loss = 1.69104 (* 1 = 1.69104 loss)\nI1206 09:23:01.630228 22755 solver.cpp:228] Iteration 500, loss = 1.5803\nI1206 09:23:01.630264 22755 solver.cpp:244]     Train net output #0: accuracy = 0.43\nI1206 09:23:01.630287 22755 solver.cpp:244]     Train net output #1: loss = 1.5803 (* 1 = 1.5803 loss)\nI1206 09:23:01.699549 22755 sgd_solver.cpp:166] Iteration 500, lr = 0.0749999\nI1206 09:24:35.265033 22755 solver.cpp:337] Iteration 600, Testing net (#0)\nI1206 09:25:27.829743 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4025\nI1206 09:25:27.830014 22755 solver.cpp:404]     Test net output #1: loss = 1.65427 (* 1 = 1.65427 loss)\nI1206 09:25:28.701617 22755 solver.cpp:228] Iteration 600, loss = 1.69891\nI1206 09:25:28.701653 22755 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1206 09:25:28.701676 22755 solver.cpp:244]     Train net output #1: loss = 1.69891 (* 1 = 1.69891 loss)\nI1206 09:25:28.775966 22755 sgd_solver.cpp:166] Iteration 600, lr = 0.0899999\nI1206 09:27:02.361027 22755 solver.cpp:337] Iteration 700, Testing net (#0)\nI1206 09:27:55.027348 22755 solver.cpp:404]     Test net output #0: accuracy = 0.41105\nI1206 09:27:55.027624 22755 solver.cpp:404]     Test net output #1: loss = 1.63278 (* 1 = 1.63278 loss)\nI1206 09:27:55.899844 22755 solver.cpp:228] Iteration 700, loss = 1.69927\nI1206 09:27:55.899878 22755 solver.cpp:244]     Train net output #0: accuracy = 0.39\nI1206 09:27:55.899901 22755 solver.cpp:244]     Train net output #1: loss = 1.69927 (* 1 = 1.69927 loss)\nI1206 09:27:55.971076 22755 sgd_solver.cpp:166] Iteration 700, lr = 0.105\nI1206 09:29:29.513839 22755 solver.cpp:337] Iteration 800, Testing net (#0)\nI1206 09:30:22.118271 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4153\nI1206 09:30:22.118556 22755 solver.cpp:404]     Test net output #1: loss = 1.60634 (* 1 = 1.60634 loss)\nI1206 09:30:22.989862 22755 solver.cpp:228] Iteration 800, loss = 1.52939\nI1206 09:30:22.989897 22755 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI1206 09:30:22.989920 22755 solver.cpp:244]     Train net output #1: loss = 1.52939 (* 1 = 1.52939 loss)\nI1206 09:30:23.062852 22755 sgd_solver.cpp:166] Iteration 800, lr = 0.12\nI1206 09:31:56.640831 22755 solver.cpp:337] Iteration 900, Testing net (#0)\nI1206 09:32:49.291963 22755 solver.cpp:404]     Test net output #0: accuracy = 0.43585\nI1206 09:32:49.292238 22755 solver.cpp:404]     Test net output #1: loss = 1.54871 (* 1 = 1.54871 loss)\nI1206 09:32:50.163224 22755 solver.cpp:228] Iteration 900, loss = 1.50059\nI1206 09:32:50.163259 22755 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1206 09:32:50.163283 22755 solver.cpp:244]     Train net output #1: loss = 1.50059 (* 1 = 1.50059 loss)\nI1206 09:32:50.240644 22755 sgd_solver.cpp:166] Iteration 900, lr = 0.135\nI1206 09:34:23.810046 22755 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1206 09:35:16.386863 22755 solver.cpp:404]     Test net output #0: accuracy = 0.42525\nI1206 09:35:16.387136 22755 solver.cpp:404]     Test net output #1: loss = 1.56532 (* 1 = 1.56532 loss)\nI1206 09:35:17.257541 22755 solver.cpp:228] Iteration 1000, loss = 1.40792\nI1206 09:35:17.257575 22755 solver.cpp:244]     Train net output #0: accuracy = 0.51\nI1206 09:35:17.257601 22755 solver.cpp:244]     Train net output #1: loss = 1.40792 (* 1 = 1.40792 loss)\nI1206 09:35:17.341243 22755 sgd_solver.cpp:166] Iteration 1000, lr = 0.15\nI1206 09:36:50.911496 22755 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1206 09:37:43.479571 22755 solver.cpp:404]     Test net output #0: accuracy = 0.45595\nI1206 09:37:43.479848 22755 solver.cpp:404]     Test net output #1: loss = 1.49723 (* 1 = 1.49723 loss)\nI1206 09:37:44.350998 22755 solver.cpp:228] Iteration 1100, loss = 1.53133\nI1206 09:37:44.351032 22755 solver.cpp:244]     Train net output #0: accuracy = 0.43\nI1206 09:37:44.351047 22755 solver.cpp:244]     Train net output #1: loss = 1.53133 (* 1 = 1.53133 loss)\nI1206 09:37:44.424340 22755 sgd_solver.cpp:166] Iteration 1100, lr = 0.165\nI1206 09:39:18.026116 22755 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1206 09:40:10.590767 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4391\nI1206 09:40:10.591020 22755 solver.cpp:404]     Test net output #1: loss = 1.53644 (* 1 = 1.53644 loss)\nI1206 09:40:11.461920 22755 solver.cpp:228] Iteration 1200, loss = 1.64636\nI1206 09:40:11.461954 22755 solver.cpp:244]     Train net output #0: accuracy = 0.38\nI1206 09:40:11.461971 22755 solver.cpp:244]     Train net output #1: loss = 1.64636 (* 1 = 1.64636 loss)\nI1206 09:40:11.534533 22755 sgd_solver.cpp:166] Iteration 1200, lr = 0.18\nI1206 09:41:45.076362 22755 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1206 09:42:37.635924 22755 solver.cpp:404]     Test net output #0: accuracy = 0.45565\nI1206 09:42:37.636190 22755 solver.cpp:404]     Test net output #1: loss = 1.49828 (* 1 = 1.49828 loss)\nI1206 09:42:38.506778 22755 solver.cpp:228] Iteration 1300, loss = 1.41966\nI1206 09:42:38.506811 22755 solver.cpp:244]     Train net output #0: accuracy = 0.5\nI1206 09:42:38.506827 22755 solver.cpp:244]     Train net output #1: loss = 1.41966 (* 1 = 1.41966 loss)\nI1206 09:42:38.581274 22755 sgd_solver.cpp:166] Iteration 1300, lr = 0.195\nI1206 09:44:12.130704 22755 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1206 09:45:04.698580 22755 solver.cpp:404]     Test net output #0: accuracy = 0.44985\nI1206 09:45:04.698845 22755 solver.cpp:404]     Test net output #1: loss = 1.50691 (* 1 = 1.50691 loss)\nI1206 09:45:05.570231 22755 solver.cpp:228] Iteration 1400, loss = 1.40311\nI1206 09:45:05.570264 22755 solver.cpp:244]     Train net output #0: accuracy = 0.52\nI1206 09:45:05.570281 22755 solver.cpp:244]     Train net output #1: loss = 1.40311 (* 1 = 1.40311 loss)\nI1206 09:45:05.644083 22755 sgd_solver.cpp:166] Iteration 1400, lr = 0.21\nI1206 09:46:39.259526 22755 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1206 09:47:31.832370 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4625\nI1206 09:47:31.832715 22755 solver.cpp:404]     Test net output #1: loss = 1.46037 (* 1 = 1.46037 loss)\nI1206 09:47:32.703430 22755 solver.cpp:228] Iteration 1500, loss = 1.31548\nI1206 09:47:32.703471 22755 solver.cpp:244]     Train net output #0: accuracy = 0.53\nI1206 09:47:32.703488 22755 solver.cpp:244]     Train net output #1: loss = 1.31548 (* 1 = 1.31548 loss)\nI1206 09:47:32.774595 22755 sgd_solver.cpp:166] Iteration 1500, lr = 0.225\nI1206 09:49:06.366436 22755 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1206 09:49:58.926592 22755 solver.cpp:404]     Test net output #0: accuracy = 0.42455\nI1206 09:49:58.926861 22755 solver.cpp:404]     Test net output #1: loss = 1.58241 (* 1 = 1.58241 loss)\nI1206 09:49:59.798120 22755 solver.cpp:228] Iteration 1600, loss = 1.51324\nI1206 09:49:59.798154 22755 solver.cpp:244]     Train net output #0: accuracy = 0.46\nI1206 09:49:59.798169 22755 solver.cpp:244]     Train net output #1: loss = 1.51324 (* 1 = 1.51324 loss)\nI1206 09:49:59.871505 22755 sgd_solver.cpp:166] Iteration 1600, lr = 0.24\nI1206 09:51:33.419059 22755 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1206 09:52:25.987048 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4739\nI1206 09:52:25.987320 22755 solver.cpp:404]     Test net output #1: loss = 1.43253 (* 1 = 1.43253 loss)\nI1206 09:52:26.858242 22755 solver.cpp:228] Iteration 1700, loss = 1.54855\nI1206 09:52:26.858274 22755 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI1206 09:52:26.858290 22755 solver.cpp:244]     Train net output #1: loss = 1.54855 (* 1 = 1.54855 loss)\nI1206 09:52:26.932137 22755 sgd_solver.cpp:166] Iteration 1700, lr = 0.255\nI1206 09:54:00.122100 22755 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1206 09:54:52.692359 22755 solver.cpp:404]     Test net output #0: accuracy = 0.35765\nI1206 09:54:52.692627 22755 solver.cpp:404]     Test net output #1: loss = 1.77138 (* 1 = 1.77138 loss)\nI1206 09:54:53.563283 22755 solver.cpp:228] Iteration 1800, loss = 1.64901\nI1206 09:54:53.563316 22755 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI1206 09:54:53.563333 22755 solver.cpp:244]     Train net output #1: loss = 1.64901 (* 1 = 1.64901 loss)\nI1206 09:54:53.635450 22755 sgd_solver.cpp:166] Iteration 1800, lr = 0.27\nI1206 09:56:26.814177 22755 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1206 09:57:19.377910 22755 solver.cpp:404]     Test net output #0: accuracy = 0.43545\nI1206 09:57:19.378180 22755 solver.cpp:404]     Test net output #1: loss = 1.52759 (* 1 = 1.52759 loss)\nI1206 09:57:20.249577 22755 solver.cpp:228] Iteration 1900, loss = 1.48276\nI1206 09:57:20.249610 22755 solver.cpp:244]     Train net output #0: accuracy = 0.42\nI1206 09:57:20.249627 22755 solver.cpp:244]     Train net output #1: loss = 1.48276 (* 1 = 1.48276 loss)\nI1206 09:57:20.325354 22755 sgd_solver.cpp:166] Iteration 1900, lr = 0.285\nI1206 09:58:53.934515 22755 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1206 09:59:46.500690 22755 solver.cpp:404]     Test net output #0: accuracy = 0.46575\nI1206 09:59:46.500960 22755 solver.cpp:404]     Test net output #1: loss = 1.50773 (* 1 = 1.50773 loss)\nI1206 09:59:47.372045 22755 solver.cpp:228] Iteration 2000, loss = 1.38238\nI1206 09:59:47.372079 22755 solver.cpp:244]     Train net output #0: accuracy = 0.56\nI1206 09:59:47.372095 22755 solver.cpp:244]     Train net output #1: loss = 1.38238 (* 1 = 1.38238 loss)\nI1206 09:59:47.444264 22755 sgd_solver.cpp:166] Iteration 2000, lr = 0.3\nI1206 10:01:21.024237 22755 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1206 10:02:13.581398 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4804\nI1206 10:02:13.581671 22755 solver.cpp:404]     Test net output #1: loss = 1.44685 (* 1 = 1.44685 loss)\nI1206 10:02:14.452925 22755 solver.cpp:228] Iteration 2100, loss = 1.40503\nI1206 10:02:14.452960 22755 solver.cpp:244]     Train net output #0: accuracy = 0.46\nI1206 10:02:14.452977 22755 solver.cpp:244]     Train net output #1: loss = 1.40503 (* 1 = 1.40503 loss)\nI1206 10:02:14.526880 22755 sgd_solver.cpp:166] Iteration 2100, lr = 0.315\nI1206 10:03:48.110044 22755 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1206 10:04:40.664120 22755 solver.cpp:404]     Test net output #0: accuracy = 0.41485\nI1206 10:04:40.664388 22755 solver.cpp:404]     Test net output #1: loss = 1.6058 (* 1 = 1.6058 loss)\nI1206 10:04:41.536254 22755 solver.cpp:228] Iteration 2200, loss = 1.73069\nI1206 10:04:41.536289 22755 solver.cpp:244]     Train net output #0: accuracy = 0.37\nI1206 10:04:41.536305 22755 solver.cpp:244]     Train net output #1: loss = 1.73069 (* 1 = 1.73069 loss)\nI1206 10:04:41.608891 22755 sgd_solver.cpp:166] Iteration 2200, lr = 0.33\nI1206 10:06:14.830493 22755 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1206 10:07:08.014638 22755 solver.cpp:404]     Test net output #0: accuracy = 0.41565\nI1206 10:07:08.014870 22755 solver.cpp:404]     Test net output #1: loss = 1.60365 (* 1 = 1.60365 loss)\nI1206 10:07:08.888192 22755 solver.cpp:228] Iteration 2300, loss = 1.51472\nI1206 10:07:08.888239 22755 solver.cpp:244]     Train net output #0: accuracy = 0.42\nI1206 10:07:08.888257 22755 solver.cpp:244]     Train net output #1: loss = 1.51472 (* 1 = 1.51472 loss)\nI1206 10:07:08.961076 22755 sgd_solver.cpp:166] Iteration 2300, lr = 0.345\nI1206 10:08:42.384292 22755 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1206 10:09:35.622140 22755 solver.cpp:404]     Test net output #0: accuracy = 0.42315\nI1206 10:09:35.622364 22755 solver.cpp:404]     Test net output #1: loss = 1.56856 (* 1 = 1.56856 loss)\nI1206 10:09:36.495658 22755 solver.cpp:228] Iteration 2400, loss = 1.49465\nI1206 10:09:36.495699 22755 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1206 10:09:36.495717 22755 solver.cpp:244]     Train net output #1: loss = 1.49465 (* 1 = 1.49465 loss)\nI1206 10:09:36.568917 22755 sgd_solver.cpp:166] Iteration 2400, lr = 0.36\nI1206 10:11:09.996345 22755 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1206 10:12:03.230541 22755 solver.cpp:404]     Test net output #0: accuracy = 0.46995\nI1206 10:12:03.230739 22755 solver.cpp:404]     Test net output #1: loss = 1.46235 (* 1 = 1.46235 loss)\nI1206 10:12:04.103512 22755 solver.cpp:228] Iteration 2500, loss = 1.32973\nI1206 10:12:04.103548 22755 solver.cpp:244]     Train net output #0: accuracy = 0.55\nI1206 10:12:04.103564 22755 solver.cpp:244]     Train net output #1: loss = 1.32973 (* 1 = 1.32973 loss)\nI1206 10:12:04.175137 22755 sgd_solver.cpp:166] Iteration 2500, lr = 0.375\nI1206 10:13:37.406378 22755 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1206 10:14:29.969197 22755 solver.cpp:404]     Test net output #0: accuracy = 0.44795\nI1206 10:14:29.969468 22755 solver.cpp:404]     Test net output #1: loss = 1.5793 (* 1 = 1.5793 loss)\nI1206 10:14:30.840272 22755 solver.cpp:228] Iteration 2600, loss = 1.60154\nI1206 10:14:30.840307 22755 solver.cpp:244]     Train net output #0: accuracy = 0.46\nI1206 10:14:30.840323 22755 solver.cpp:244]     Train net output #1: loss = 1.60154 (* 1 = 1.60154 loss)\nI1206 10:14:30.913573 22755 sgd_solver.cpp:166] Iteration 2600, lr = 0.39\nI1206 10:16:04.188910 22755 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1206 10:16:56.736469 22755 solver.cpp:404]     Test net output #0: accuracy = 0.32695\nI1206 10:16:56.736730 22755 solver.cpp:404]     Test net output #1: loss = 1.90786 (* 1 = 1.90786 loss)\nI1206 10:16:57.608325 22755 solver.cpp:228] Iteration 2700, loss = 2.04629\nI1206 10:16:57.608362 22755 solver.cpp:244]     Train net output #0: accuracy = 0.34\nI1206 10:16:57.608378 22755 solver.cpp:244]     Train net output #1: loss = 2.04629 (* 1 = 2.04629 loss)\nI1206 10:16:57.679567 22755 sgd_solver.cpp:166] Iteration 2700, lr = 0.405\nI1206 10:18:30.867141 22755 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1206 10:19:23.435648 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4817\nI1206 10:19:23.435927 22755 solver.cpp:404]     Test net output #1: loss = 1.39132 (* 1 = 1.39132 loss)\nI1206 10:19:24.307405 22755 solver.cpp:228] Iteration 2800, loss = 1.29345\nI1206 10:19:24.307441 22755 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI1206 10:19:24.307458 22755 solver.cpp:244]     Train net output #1: loss = 1.29345 (* 1 = 1.29345 loss)\nI1206 10:19:24.379315 22755 sgd_solver.cpp:166] Iteration 2800, lr = 0.42\nI1206 10:20:57.596698 22755 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1206 10:21:50.163697 22755 solver.cpp:404]     Test net output #0: accuracy = 0.47655\nI1206 10:21:50.163962 22755 solver.cpp:404]     Test net output #1: loss = 1.44101 (* 1 = 1.44101 loss)\nI1206 10:21:51.035769 22755 solver.cpp:228] Iteration 2900, loss = 1.34097\nI1206 10:21:51.035805 22755 solver.cpp:244]     Train net output #0: accuracy = 0.45\nI1206 10:21:51.035821 22755 solver.cpp:244]     Train net output #1: loss = 1.34097 (* 1 = 1.34097 loss)\nI1206 10:21:51.109757 22755 sgd_solver.cpp:166] Iteration 2900, lr = 0.435\nI1206 10:23:24.680328 22755 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1206 10:24:17.248086 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4162\nI1206 10:24:17.248358 22755 solver.cpp:404]     Test net output #1: loss = 1.60185 (* 1 = 1.60185 loss)\nI1206 10:24:18.119886 22755 solver.cpp:228] Iteration 3000, loss = 1.44503\nI1206 10:24:18.119935 22755 solver.cpp:244]     Train net output #0: accuracy = 0.42\nI1206 10:24:18.119953 22755 solver.cpp:244]     Train net output #1: loss = 1.44503 (* 1 = 1.44503 loss)\nI1206 10:24:18.192766 22755 sgd_solver.cpp:166] Iteration 3000, lr = 0.45\nI1206 10:25:51.352056 22755 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1206 10:26:43.917090 22755 solver.cpp:404]     Test net output #0: accuracy = 0.44605\nI1206 10:26:43.917353 22755 solver.cpp:404]     Test net output #1: loss = 1.52127 (* 1 = 1.52127 loss)\nI1206 10:26:44.788462 22755 solver.cpp:228] Iteration 3100, loss = 1.54804\nI1206 10:26:44.788496 22755 solver.cpp:244]     Train net output #0: accuracy = 0.42\nI1206 10:26:44.788511 22755 solver.cpp:244]     Train net output #1: loss = 1.54804 (* 1 = 1.54804 loss)\nI1206 10:26:44.864933 22755 sgd_solver.cpp:166] Iteration 3100, lr = 0.465\nI1206 10:28:18.106051 22755 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1206 10:29:10.676196 22755 solver.cpp:404]     Test net output #0: accuracy = 0.4371\nI1206 10:29:10.676467 22755 solver.cpp:404]     Test net output #1: loss = 1.56149 (* 1 = 1.56149 loss)\nI1206 10:29:11.547621 22755 solver.cpp:228] Iteration 3200, loss = 1.71057\nI1206 10:29:11.547653 22755 solver.cpp:244]     Train net output #0: accuracy = 0.43\nI1206 10:29:11.547669 22755 solver.cpp:244]     Train net output #1: loss = 1.71057 (* 1 = 1.71057 loss)\nI1206 10:29:11.622797 22755 sgd_solver.cpp:166] Iteration 3200, lr = 0.48\nI1206 10:30:44.850028 22755 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1206 10:31:37.410794 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2247\nI1206 10:31:37.411070 22755 solver.cpp:404]     Test net output #1: loss = 2.42651 (* 1 = 2.42651 loss)\nI1206 10:31:38.282371 22755 solver.cpp:228] Iteration 3300, loss = 2.34745\nI1206 10:31:38.282403 22755 solver.cpp:244]     Train net output #0: accuracy = 0.28\nI1206 10:31:38.282419 22755 solver.cpp:244]     Train net output #1: loss = 2.34745 (* 1 = 2.34745 loss)\nI1206 10:31:38.357162 22755 sgd_solver.cpp:166] Iteration 3300, lr = 0.495\nI1206 10:33:11.572724 22755 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1206 10:34:04.135273 22755 solver.cpp:404]     Test net output #0: accuracy = 0.3361\nI1206 10:34:04.135543 22755 solver.cpp:404]     Test net output #1: loss = 1.8044 (* 1 = 1.8044 loss)\nI1206 10:34:05.006563 22755 solver.cpp:228] Iteration 3400, loss = 1.80764\nI1206 10:34:05.006597 22755 solver.cpp:244]     Train net output #0: accuracy = 0.35\nI1206 10:34:05.006613 22755 solver.cpp:244]     Train net output #1: loss = 1.80764 (* 1 = 1.80764 loss)\nI1206 10:34:05.081928 22755 sgd_solver.cpp:166] Iteration 3400, lr = 0.51\nI1206 10:35:38.286497 22755 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1206 10:36:30.852232 22755 solver.cpp:404]     Test net output #0: accuracy = 0.37475\nI1206 10:36:30.852519 22755 solver.cpp:404]     Test net output #1: loss = 1.71344 (* 1 = 1.71344 loss)\nI1206 10:36:31.723345 22755 solver.cpp:228] Iteration 3500, loss = 1.56944\nI1206 10:36:31.723387 22755 solver.cpp:244]     Train net output #0: accuracy = 0.38\nI1206 10:36:31.723403 22755 solver.cpp:244]     Train net output #1: loss = 1.56944 (* 1 = 1.56944 loss)\nI1206 10:36:31.812425 22755 sgd_solver.cpp:166] Iteration 3500, lr = 0.525\nI1206 10:38:05.080554 22755 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1206 10:38:57.635002 22755 solver.cpp:404]     Test net output #0: accuracy = 0.37115\nI1206 10:38:57.635278 22755 solver.cpp:404]     Test net output #1: loss = 1.71039 (* 1 = 1.71039 loss)\nI1206 10:38:58.507079 22755 solver.cpp:228] Iteration 3600, loss = 1.83415\nI1206 10:38:58.507115 22755 solver.cpp:244]     Train net output #0: accuracy = 0.38\nI1206 10:38:58.507131 22755 solver.cpp:244]     Train net output #1: loss = 1.83415 (* 1 = 1.83415 loss)\nI1206 10:38:58.582307 22755 sgd_solver.cpp:166] Iteration 3600, lr = 0.54\nI1206 10:40:31.795487 22755 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1206 10:41:24.337724 22755 solver.cpp:404]     Test net output #0: accuracy = 0.39255\nI1206 10:41:24.337998 22755 solver.cpp:404]     Test net output #1: loss = 1.62694 (* 1 = 1.62694 loss)\nI1206 10:41:25.209482 22755 solver.cpp:228] Iteration 3700, loss = 1.65236\nI1206 10:41:25.209517 22755 solver.cpp:244]     Train net output #0: accuracy = 0.4\nI1206 10:41:25.209533 22755 solver.cpp:244]     Train net output #1: loss = 1.65236 (* 1 = 1.65236 loss)\nI1206 10:41:25.287050 22755 sgd_solver.cpp:166] Iteration 3700, lr = 0.555\nI1206 10:42:58.482136 22755 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1206 10:43:51.028915 22755 solver.cpp:404]     Test net output #0: accuracy = 0.409\nI1206 10:43:51.029192 22755 solver.cpp:404]     Test net output #1: loss = 1.58624 (* 1 = 1.58624 loss)\nI1206 10:43:51.900122 22755 solver.cpp:228] Iteration 3800, loss = 1.4926\nI1206 10:43:51.900167 22755 solver.cpp:244]     Train net output #0: accuracy = 0.43\nI1206 10:43:51.900184 22755 solver.cpp:244]     Train net output #1: loss = 1.4926 (* 1 = 1.4926 loss)\nI1206 10:43:51.971433 22755 sgd_solver.cpp:166] Iteration 3800, lr = 0.57\nI1206 10:45:25.198807 22755 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1206 10:46:17.734817 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2133\nI1206 10:46:17.735091 22755 solver.cpp:404]     Test net output #1: loss = 7.927 (* 1 = 7.927 loss)\nI1206 10:46:18.606173 22755 solver.cpp:228] Iteration 3900, loss = 8.0306\nI1206 10:46:18.606209 22755 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1206 10:46:18.606225 22755 solver.cpp:244]     Train net output #1: loss = 8.0306 (* 1 = 8.0306 loss)\nI1206 10:46:18.687527 22755 sgd_solver.cpp:166] Iteration 3900, lr = 0.585\nI1206 10:47:18.949306 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.47609 > 5) by scale factor 0.91306\nI1206 10:47:51.900009 22755 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1206 10:48:44.445950 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2065\nI1206 10:48:44.446225 22755 solver.cpp:404]     Test net output #1: loss = 6.04943 (* 1 = 6.04943 loss)\nI1206 10:48:45.316437 22755 solver.cpp:228] Iteration 4000, loss = 5.64925\nI1206 10:48:45.316471 22755 solver.cpp:244]     Train net output #0: accuracy = 0.29\nI1206 10:48:45.316488 22755 solver.cpp:244]     Train net output #1: loss = 5.64925 (* 1 = 5.64925 loss)\nI1206 10:48:45.392916 22755 sgd_solver.cpp:166] Iteration 4000, lr = 0.6\nI1206 10:50:18.665967 22755 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1206 10:51:11.889312 22755 solver.cpp:404]     Test net output #0: accuracy = 0.17825\nI1206 10:51:11.889575 22755 solver.cpp:404]     Test net output #1: loss = 7.01271 (* 1 = 7.01271 loss)\nI1206 10:51:12.778322 22755 solver.cpp:228] Iteration 4100, loss = 7.04761\nI1206 10:51:12.778367 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 10:51:12.778384 22755 solver.cpp:244]     Train net output #1: loss = 7.04761 (* 1 = 7.04761 loss)\nI1206 10:51:12.843286 22755 sgd_solver.cpp:166] Iteration 4100, lr = 0.615\nI1206 10:52:46.038520 22755 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1206 10:53:38.651150 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2215\nI1206 10:53:38.651432 22755 solver.cpp:404]     Test net output #1: loss = 2.93695 (* 1 = 2.93695 loss)\nI1206 10:53:39.523099 22755 solver.cpp:228] Iteration 4200, loss = 2.93671\nI1206 10:53:39.523134 22755 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1206 10:53:39.523150 22755 solver.cpp:244]     Train net output #1: loss = 2.93671 (* 1 = 2.93671 loss)\nI1206 10:53:39.595003 22755 sgd_solver.cpp:166] Iteration 4200, lr = 0.63\nI1206 10:55:12.800312 22755 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1206 10:56:05.383277 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1648\nI1206 10:56:05.383553 22755 solver.cpp:404]     Test net output #1: loss = 4.73031 (* 1 = 4.73031 loss)\nI1206 10:56:06.255038 22755 solver.cpp:228] Iteration 4300, loss = 4.51342\nI1206 10:56:06.255074 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 10:56:06.255089 22755 solver.cpp:244]     Train net output #1: loss = 4.51342 (* 1 = 4.51342 loss)\nI1206 10:56:06.327016 22755 sgd_solver.cpp:166] Iteration 4300, lr = 0.645\nI1206 10:57:39.552444 22755 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1206 10:58:32.105314 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1988\nI1206 10:58:32.105588 22755 solver.cpp:404]     Test net output #1: loss = 5.88723 (* 1 = 5.88723 loss)\nI1206 10:58:32.976459 22755 solver.cpp:228] Iteration 4400, loss = 5.45512\nI1206 10:58:32.976496 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 10:58:32.976521 22755 solver.cpp:244]     Train net output #1: loss = 5.45512 (* 1 = 5.45512 loss)\nI1206 10:58:33.050004 22755 sgd_solver.cpp:166] Iteration 4400, lr = 0.66\nI1206 11:00:06.245182 22755 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1206 11:00:58.797451 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2627\nI1206 11:00:58.797691 22755 solver.cpp:404]     Test net output #1: loss = 3.09993 (* 1 = 3.09993 loss)\nI1206 11:00:59.669199 22755 solver.cpp:228] Iteration 4500, loss = 3.01745\nI1206 11:00:59.669235 22755 solver.cpp:244]     Train net output #0: accuracy = 0.39\nI1206 11:00:59.669260 22755 solver.cpp:244]     Train net output #1: loss = 3.01745 (* 1 = 3.01745 loss)\nI1206 11:00:59.740736 22755 sgd_solver.cpp:166] Iteration 4500, lr = 0.675\nI1206 11:02:32.958492 22755 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1206 11:03:25.531939 22755 solver.cpp:404]     Test net output #0: accuracy = 0.19685\nI1206 11:03:25.532218 22755 solver.cpp:404]     Test net output #1: loss = 5.50077 (* 1 = 5.50077 loss)\nI1206 11:03:26.404315 22755 solver.cpp:228] Iteration 4600, loss = 6.08606\nI1206 11:03:26.404350 22755 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1206 11:03:26.404373 22755 solver.cpp:244]     Train net output #1: loss = 6.08606 (* 1 = 6.08606 loss)\nI1206 11:03:26.475512 22755 sgd_solver.cpp:166] Iteration 4600, lr = 0.69\nI1206 11:04:59.699667 22755 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1206 11:05:52.306607 22755 solver.cpp:404]     Test net output #0: accuracy = 0.23265\nI1206 11:05:52.306885 22755 solver.cpp:404]     Test net output #1: loss = 2.90802 (* 1 = 2.90802 loss)\nI1206 11:05:53.178771 22755 solver.cpp:228] Iteration 4700, loss = 3.03702\nI1206 11:05:53.178805 22755 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1206 11:05:53.178834 22755 solver.cpp:244]     Train net output #1: loss = 3.03702 (* 1 = 3.03702 loss)\nI1206 11:05:53.256083 22755 sgd_solver.cpp:166] Iteration 4700, lr = 0.705\nI1206 11:07:26.489519 22755 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1206 11:08:19.085988 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18025\nI1206 11:08:19.086272 22755 solver.cpp:404]     Test net output #1: loss = 4.98699 (* 1 = 4.98699 loss)\nI1206 11:08:19.958737 22755 solver.cpp:228] Iteration 4800, loss = 4.86022\nI1206 11:08:19.958776 22755 solver.cpp:244]     Train net output #0: accuracy = 0.25\nI1206 11:08:19.958806 22755 solver.cpp:244]     Train net output #1: loss = 4.86022 (* 1 = 4.86022 loss)\nI1206 11:08:20.033381 22755 sgd_solver.cpp:166] Iteration 4800, lr = 0.72\nI1206 11:09:53.243129 22755 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1206 11:10:45.825037 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1964\nI1206 11:10:45.825320 22755 solver.cpp:404]     Test net output #1: loss = 10.4289 (* 1 = 10.4289 loss)\nI1206 11:10:46.697278 22755 solver.cpp:228] Iteration 4900, loss = 10.4336\nI1206 11:10:46.697322 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 11:10:46.697348 22755 solver.cpp:244]     Train net output #1: loss = 10.4336 (* 1 = 10.4336 loss)\nI1206 11:10:46.766638 22755 sgd_solver.cpp:166] Iteration 4900, lr = 0.735\nI1206 11:12:20.013962 22755 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1206 11:13:12.514926 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2102\nI1206 11:13:12.515175 22755 solver.cpp:404]     Test net output #1: loss = 3.47412 (* 1 = 3.47412 loss)\nI1206 11:13:13.385985 22755 solver.cpp:228] Iteration 5000, loss = 3.42521\nI1206 11:13:13.386018 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 11:13:13.386032 22755 solver.cpp:244]     Train net output #1: loss = 3.42521 (* 1 = 3.42521 loss)\nI1206 11:13:13.458328 22755 sgd_solver.cpp:166] Iteration 5000, lr = 0.75\nI1206 11:14:46.714524 22755 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1206 11:15:39.202389 22755 solver.cpp:404]     Test net output #0: accuracy = 0.24625\nI1206 11:15:39.202663 22755 solver.cpp:404]     Test net output #1: loss = 5.42152 (* 1 = 5.42152 loss)\nI1206 11:15:40.073251 22755 solver.cpp:228] Iteration 5100, loss = 5.33533\nI1206 11:15:40.073285 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 11:15:40.073302 22755 solver.cpp:244]     Train net output #1: loss = 5.33533 (* 1 = 5.33533 loss)\nI1206 11:15:40.149260 22755 sgd_solver.cpp:166] Iteration 5100, lr = 0.765\nI1206 11:17:13.397356 22755 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1206 11:18:05.867568 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18845\nI1206 11:18:05.867862 22755 solver.cpp:404]     Test net output #1: loss = 5.44315 (* 1 = 5.44315 loss)\nI1206 11:18:06.738054 22755 solver.cpp:228] Iteration 5200, loss = 5.04599\nI1206 11:18:06.738087 22755 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1206 11:18:06.738103 22755 solver.cpp:244]     Train net output #1: loss = 5.04599 (* 1 = 5.04599 loss)\nI1206 11:18:06.813766 22755 sgd_solver.cpp:166] Iteration 5200, lr = 0.78\nI1206 11:19:40.045686 22755 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1206 11:20:32.513442 22755 solver.cpp:404]     Test net output #0: accuracy = 0.17755\nI1206 11:20:32.513703 22755 solver.cpp:404]     Test net output #1: loss = 7.44276 (* 1 = 7.44276 loss)\nI1206 11:20:33.384526 22755 solver.cpp:228] Iteration 5300, loss = 6.62265\nI1206 11:20:33.384558 22755 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1206 11:20:33.384573 22755 solver.cpp:244]     Train net output #1: loss = 6.62265 (* 1 = 6.62265 loss)\nI1206 11:20:33.459444 22755 sgd_solver.cpp:166] Iteration 5300, lr = 0.795\nI1206 11:22:06.685420 22755 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1206 11:22:59.176199 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1497\nI1206 11:22:59.176482 22755 solver.cpp:404]     Test net output #1: loss = 11.3327 (* 1 = 11.3327 loss)\nI1206 11:23:00.047044 22755 solver.cpp:228] Iteration 5400, loss = 10.8819\nI1206 11:23:00.047075 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 11:23:00.047093 22755 solver.cpp:244]     Train net output #1: loss = 10.8819 (* 1 = 10.8819 loss)\nI1206 11:23:00.122690 22755 sgd_solver.cpp:166] Iteration 5400, lr = 0.81\nI1206 11:24:33.329797 22755 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1206 11:25:25.815124 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1953\nI1206 11:25:25.815403 22755 solver.cpp:404]     Test net output #1: loss = 10.0381 (* 1 = 10.0381 loss)\nI1206 11:25:26.685875 22755 solver.cpp:228] Iteration 5500, loss = 11.597\nI1206 11:25:26.685910 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 11:25:26.685927 22755 solver.cpp:244]     Train net output #1: loss = 11.597 (* 1 = 11.597 loss)\nI1206 11:25:26.762583 22755 sgd_solver.cpp:166] Iteration 5500, lr = 0.825\nI1206 11:26:59.993240 22755 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1206 11:27:52.486214 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2499\nI1206 11:27:52.486488 22755 solver.cpp:404]     Test net output #1: loss = 7.50176 (* 1 = 7.50176 loss)\nI1206 11:27:53.356986 22755 solver.cpp:228] Iteration 5600, loss = 7.29538\nI1206 11:27:53.357017 22755 solver.cpp:244]     Train net output #0: accuracy = 0.27\nI1206 11:27:53.357033 22755 solver.cpp:244]     Train net output #1: loss = 7.29538 (* 1 = 7.29538 loss)\nI1206 11:27:53.431146 22755 sgd_solver.cpp:166] Iteration 5600, lr = 0.84\nI1206 11:29:26.646687 22755 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1206 11:30:19.138229 22755 solver.cpp:404]     Test net output #0: accuracy = 0.14685\nI1206 11:30:19.138490 22755 solver.cpp:404]     Test net output #1: loss = 7.30289 (* 1 = 7.30289 loss)\nI1206 11:30:20.009601 22755 solver.cpp:228] Iteration 5700, loss = 7.65\nI1206 11:30:20.009637 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 11:30:20.009654 22755 solver.cpp:244]     Train net output #1: loss = 7.65 (* 1 = 7.65 loss)\nI1206 11:30:20.084218 22755 sgd_solver.cpp:166] Iteration 5700, lr = 0.855\nI1206 11:31:53.311738 22755 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1206 11:32:45.816725 22755 solver.cpp:404]     Test net output #0: accuracy = 0.22905\nI1206 11:32:45.816956 22755 solver.cpp:404]     Test net output #1: loss = 4.73963 (* 1 = 4.73963 loss)\nI1206 11:32:46.687719 22755 solver.cpp:228] Iteration 5800, loss = 4.21385\nI1206 11:32:46.687753 22755 solver.cpp:244]     Train net output #0: accuracy = 0.34\nI1206 11:32:46.687769 22755 solver.cpp:244]     Train net output #1: loss = 4.21385 (* 1 = 4.21385 loss)\nI1206 11:32:46.761338 22755 sgd_solver.cpp:166] Iteration 5800, lr = 0.87\nI1206 11:34:19.993839 22755 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1206 11:35:12.494150 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15445\nI1206 11:35:12.494395 22755 solver.cpp:404]     Test net output #1: loss = 9.39788 (* 1 = 9.39788 loss)\nI1206 11:35:13.365504 22755 solver.cpp:228] Iteration 5900, loss = 8.49632\nI1206 11:35:13.365545 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 11:35:13.365562 22755 solver.cpp:244]     Train net output #1: loss = 8.49632 (* 1 = 8.49632 loss)\nI1206 11:35:13.438786 22755 sgd_solver.cpp:166] Iteration 5900, lr = 0.885\nI1206 11:36:46.645071 22755 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1206 11:37:39.147549 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2355\nI1206 11:37:39.147775 22755 solver.cpp:404]     Test net output #1: loss = 2.50508 (* 1 = 2.50508 loss)\nI1206 11:37:40.019331 22755 solver.cpp:228] Iteration 6000, loss = 2.53386\nI1206 11:37:40.019366 22755 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1206 11:37:40.019382 22755 solver.cpp:244]     Train net output #1: loss = 2.53386 (* 1 = 2.53386 loss)\nI1206 11:37:40.096307 22755 sgd_solver.cpp:166] Iteration 6000, lr = 0.9\nI1206 11:38:59.197077 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05862 > 5) by scale factor 0.988413\nI1206 11:39:02.022809 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10245 > 5) by scale factor 0.979921\nI1206 11:39:13.330694 22755 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1206 11:40:05.822719 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2584\nI1206 11:40:05.822971 22755 solver.cpp:404]     Test net output #1: loss = 8.46363 (* 1 = 8.46363 loss)\nI1206 11:40:06.694084 22755 solver.cpp:228] Iteration 6100, loss = 7.36125\nI1206 11:40:06.694118 22755 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1206 11:40:06.694134 22755 solver.cpp:244]     Train net output #1: loss = 7.36125 (* 1 = 7.36125 loss)\nI1206 11:40:06.766108 22755 sgd_solver.cpp:166] Iteration 6100, lr = 0.915\nI1206 11:41:40.013871 22755 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1206 11:42:32.516326 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2366\nI1206 11:42:32.516585 22755 solver.cpp:404]     Test net output #1: loss = 6.81485 (* 1 = 6.81485 loss)\nI1206 11:42:33.387465 22755 solver.cpp:228] Iteration 6200, loss = 8.24352\nI1206 11:42:33.387500 22755 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1206 11:42:33.387517 22755 solver.cpp:244]     Train net output #1: loss = 8.24352 (* 1 = 8.24352 loss)\nI1206 11:42:33.465445 22755 sgd_solver.cpp:166] Iteration 6200, lr = 0.93\nI1206 11:44:06.704705 22755 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1206 11:44:59.270771 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2012\nI1206 11:44:59.271029 22755 solver.cpp:404]     Test net output #1: loss = 9.28133 (* 1 = 9.28133 loss)\nI1206 11:45:00.141707 22755 solver.cpp:228] Iteration 6300, loss = 8.00116\nI1206 11:45:00.141741 22755 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1206 11:45:00.141758 22755 solver.cpp:244]     Train net output #1: loss = 8.00116 (* 1 = 8.00116 loss)\nI1206 11:45:00.217633 22755 sgd_solver.cpp:166] Iteration 6300, lr = 0.945\nI1206 11:46:33.491369 22755 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1206 11:47:26.056891 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1196\nI1206 11:47:26.057093 22755 solver.cpp:404]     Test net output #1: loss = 10.5734 (* 1 = 10.5734 loss)\nI1206 11:47:26.927968 22755 solver.cpp:228] Iteration 6400, loss = 11.8146\nI1206 11:47:26.928011 22755 solver.cpp:244]     Train net output #0: accuracy = 0.08\nI1206 11:47:26.928028 22755 solver.cpp:244]     Train net output #1: loss = 11.8146 (* 1 = 11.8146 loss)\nI1206 11:47:26.999637 22755 sgd_solver.cpp:166] Iteration 6400, lr = 0.96\nI1206 11:49:00.228327 22755 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1206 11:49:52.800812 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1752\nI1206 11:49:52.801023 22755 solver.cpp:404]     Test net output #1: loss = 9.0302 (* 1 = 9.0302 loss)\nI1206 11:49:53.672024 22755 solver.cpp:228] Iteration 6500, loss = 11.4025\nI1206 11:49:53.672067 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 11:49:53.672086 22755 solver.cpp:244]     Train net output #1: loss = 11.4025 (* 1 = 11.4025 loss)\nI1206 11:49:53.743940 22755 sgd_solver.cpp:166] Iteration 6500, lr = 0.975\nI1206 11:51:26.919065 22755 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1206 11:52:19.491410 22755 solver.cpp:404]     Test net output #0: accuracy = 0.251\nI1206 11:52:19.491672 22755 solver.cpp:404]     Test net output #1: loss = 10.5434 (* 1 = 10.5434 loss)\nI1206 11:52:20.362994 22755 solver.cpp:228] Iteration 6600, loss = 13.5371\nI1206 11:52:20.363028 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 11:52:20.363044 22755 solver.cpp:244]     Train net output #1: loss = 13.5371 (* 1 = 13.5371 loss)\nI1206 11:52:20.436838 22755 sgd_solver.cpp:166] Iteration 6600, lr = 0.99\nI1206 11:53:53.667840 22755 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1206 11:54:46.222534 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18125\nI1206 11:54:46.222811 22755 solver.cpp:404]     Test net output #1: loss = 11.4273 (* 1 = 11.4273 loss)\nI1206 11:54:47.093684 22755 solver.cpp:228] Iteration 6700, loss = 11.884\nI1206 11:54:47.093716 22755 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1206 11:54:47.093732 22755 solver.cpp:244]     Train net output #1: loss = 11.884 (* 1 = 11.884 loss)\nI1206 11:54:47.169174 22755 sgd_solver.cpp:166] Iteration 6700, lr = 1.005\nI1206 11:56:20.387511 22755 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1206 11:57:12.934362 22755 solver.cpp:404]     Test net output #0: accuracy = 0.25485\nI1206 11:57:12.934619 22755 solver.cpp:404]     Test net output #1: loss = 10.9998 (* 1 = 10.9998 loss)\nI1206 11:57:13.805208 22755 solver.cpp:228] Iteration 6800, loss = 11.7871\nI1206 11:57:13.805239 22755 solver.cpp:244]     Train net output #0: accuracy = 0.28\nI1206 11:57:13.805256 22755 solver.cpp:244]     Train net output #1: loss = 11.7871 (* 1 = 11.7871 loss)\nI1206 11:57:13.881927 22755 sgd_solver.cpp:166] Iteration 6800, lr = 1.02\nI1206 11:58:47.080090 22755 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1206 11:59:39.624389 22755 solver.cpp:404]     Test net output #0: accuracy = 0.22505\nI1206 11:59:39.624624 22755 solver.cpp:404]     Test net output #1: loss = 8.78776 (* 1 = 8.78776 loss)\nI1206 11:59:40.495409 22755 solver.cpp:228] Iteration 6900, loss = 10.3002\nI1206 11:59:40.495447 22755 solver.cpp:244]     Train net output #0: accuracy = 0.28\nI1206 11:59:40.495463 22755 solver.cpp:244]     Train net output #1: loss = 10.3002 (* 1 = 10.3002 loss)\nI1206 11:59:40.570932 22755 sgd_solver.cpp:166] Iteration 6900, lr = 1.035\nI1206 12:00:30.475965 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.66491 > 5) by scale factor 0.882626\nI1206 12:01:13.777956 22755 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1206 12:02:06.333461 22755 solver.cpp:404]     Test net output #0: accuracy = 0.19585\nI1206 12:02:06.333714 22755 solver.cpp:404]     Test net output #1: loss = 7.14628 (* 1 = 7.14628 loss)\nI1206 12:02:07.203708 22755 solver.cpp:228] Iteration 7000, loss = 6.92606\nI1206 12:02:07.203738 22755 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1206 12:02:07.203754 22755 solver.cpp:244]     Train net output #1: loss = 6.92606 (* 1 = 6.92606 loss)\nI1206 12:02:07.276599 22755 sgd_solver.cpp:166] Iteration 7000, lr = 1.05\nI1206 12:03:40.417286 22755 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1206 12:04:32.969907 22755 solver.cpp:404]     Test net output #0: accuracy = 0.21665\nI1206 12:04:32.970155 22755 solver.cpp:404]     Test net output #1: loss = 7.88734 (* 1 = 7.88734 loss)\nI1206 12:04:33.840843 22755 solver.cpp:228] Iteration 7100, loss = 9.14189\nI1206 12:04:33.840873 22755 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1206 12:04:33.840889 22755 solver.cpp:244]     Train net output #1: loss = 9.14189 (* 1 = 9.14189 loss)\nI1206 12:04:33.917479 22755 sgd_solver.cpp:166] Iteration 7100, lr = 1.065\nI1206 12:06:07.084017 22755 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1206 12:06:59.643230 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2259\nI1206 12:06:59.643487 22755 solver.cpp:404]     Test net output #1: loss = 5.38847 (* 1 = 5.38847 loss)\nI1206 12:07:00.514838 22755 solver.cpp:228] Iteration 7200, loss = 5.04742\nI1206 12:07:00.514868 22755 solver.cpp:244]     Train net output #0: accuracy = 0.25\nI1206 12:07:00.514883 22755 solver.cpp:244]     Train net output #1: loss = 5.04742 (* 1 = 5.04742 loss)\nI1206 12:07:00.584229 22755 sgd_solver.cpp:166] Iteration 7200, lr = 1.08\nI1206 12:08:33.603271 22755 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1206 12:09:26.163164 22755 solver.cpp:404]     Test net output #0: accuracy = 0.17455\nI1206 12:09:26.163446 22755 solver.cpp:404]     Test net output #1: loss = 11.5694 (* 1 = 11.5694 loss)\nI1206 12:09:27.034013 22755 solver.cpp:228] Iteration 7300, loss = 11.7397\nI1206 12:09:27.034044 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 12:09:27.034060 22755 solver.cpp:244]     Train net output #1: loss = 11.7397 (* 1 = 11.7397 loss)\nI1206 12:09:27.110709 22755 sgd_solver.cpp:166] Iteration 7300, lr = 1.095\nI1206 12:11:00.145316 22755 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1206 12:11:52.691666 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16395\nI1206 12:11:52.691949 22755 solver.cpp:404]     Test net output #1: loss = 9.54837 (* 1 = 9.54837 loss)\nI1206 12:11:53.562497 22755 solver.cpp:228] Iteration 7400, loss = 9.1573\nI1206 12:11:53.562526 22755 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1206 12:11:53.562542 22755 solver.cpp:244]     Train net output #1: loss = 9.1573 (* 1 = 9.1573 loss)\nI1206 12:11:53.636085 22755 sgd_solver.cpp:166] Iteration 7400, lr = 1.11\nI1206 12:13:26.700618 22755 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1206 12:14:19.243042 22755 solver.cpp:404]     Test net output #0: accuracy = 0.23615\nI1206 12:14:19.243321 22755 solver.cpp:404]     Test net output #1: loss = 9.54584 (* 1 = 9.54584 loss)\nI1206 12:14:20.113965 22755 solver.cpp:228] Iteration 7500, loss = 7.95737\nI1206 12:14:20.113998 22755 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1206 12:14:20.114014 22755 solver.cpp:244]     Train net output #1: loss = 7.95737 (* 1 = 7.95737 loss)\nI1206 12:14:20.186812 22755 sgd_solver.cpp:166] Iteration 7500, lr = 1.125\nI1206 12:15:53.229288 22755 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1206 12:16:45.765558 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2531\nI1206 12:16:45.765830 22755 solver.cpp:404]     Test net output #1: loss = 8.00327 (* 1 = 8.00327 loss)\nI1206 12:16:46.636145 22755 solver.cpp:228] Iteration 7600, loss = 8.94354\nI1206 12:16:46.636188 22755 solver.cpp:244]     Train net output #0: accuracy = 0.28\nI1206 12:16:46.636204 22755 solver.cpp:244]     Train net output #1: loss = 8.94354 (* 1 = 8.94354 loss)\nI1206 12:16:46.711737 22755 sgd_solver.cpp:166] Iteration 7600, lr = 1.14\nI1206 12:17:30.894752 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00507 > 5) by scale factor 0.998986\nI1206 12:17:50.630450 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.18395 > 5) by scale factor 0.964515\nI1206 12:18:19.774390 22755 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1206 12:19:12.326169 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16445\nI1206 12:19:12.326444 22755 solver.cpp:404]     Test net output #1: loss = 13.9337 (* 1 = 13.9337 loss)\nI1206 12:19:13.197962 22755 solver.cpp:228] Iteration 7700, loss = 13.9956\nI1206 12:19:13.198007 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 12:19:13.198025 22755 solver.cpp:244]     Train net output #1: loss = 13.9956 (* 1 = 13.9956 loss)\nI1206 12:19:13.273669 22755 sgd_solver.cpp:166] Iteration 7700, lr = 1.155\nI1206 12:20:31.264421 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.25435 > 5) by scale factor 0.951592\nI1206 12:20:43.482558 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10703 > 5) by scale factor 0.979043\nI1206 12:20:46.311357 22755 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1206 12:21:38.867787 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1948\nI1206 12:21:38.868084 22755 solver.cpp:404]     Test net output #1: loss = 14.6609 (* 1 = 14.6609 loss)\nI1206 12:21:39.739809 22755 solver.cpp:228] Iteration 7800, loss = 13.4453\nI1206 12:21:39.739842 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 12:21:39.739859 22755 solver.cpp:244]     Train net output #1: loss = 13.4453 (* 1 = 13.4453 loss)\nI1206 12:21:39.808497 22755 sgd_solver.cpp:166] Iteration 7800, lr = 1.17\nI1206 12:23:12.822532 22755 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1206 12:24:05.386674 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16455\nI1206 12:24:05.386929 22755 solver.cpp:404]     Test net output #1: loss = 11.7898 (* 1 = 11.7898 loss)\nI1206 12:24:06.257553 22755 solver.cpp:228] Iteration 7900, loss = 10.9259\nI1206 12:24:06.257589 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 12:24:06.257606 22755 solver.cpp:244]     Train net output #1: loss = 10.9259 (* 1 = 10.9259 loss)\nI1206 12:24:06.329113 22755 sgd_solver.cpp:166] Iteration 7900, lr = 1.185\nI1206 12:25:39.394927 22755 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1206 12:26:31.954427 22755 solver.cpp:404]     Test net output #0: accuracy = 0.19035\nI1206 12:26:31.954685 22755 solver.cpp:404]     Test net output #1: loss = 9.66669 (* 1 = 9.66669 loss)\nI1206 12:26:32.825100 22755 solver.cpp:228] Iteration 8000, loss = 9.01369\nI1206 12:26:32.825134 22755 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1206 12:26:32.825150 22755 solver.cpp:244]     Train net output #1: loss = 9.01369 (* 1 = 9.01369 loss)\nI1206 12:26:32.897192 22755 sgd_solver.cpp:166] Iteration 8000, lr = 1.2\nI1206 12:28:05.966022 22755 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1206 12:28:58.529714 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1906\nI1206 12:28:58.529989 22755 solver.cpp:404]     Test net output #1: loss = 8.67596 (* 1 = 8.67596 loss)\nI1206 12:28:59.400827 22755 solver.cpp:228] Iteration 8100, loss = 8.69591\nI1206 12:28:59.400861 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 12:28:59.400877 22755 solver.cpp:244]     Train net output #1: loss = 8.69591 (* 1 = 8.69591 loss)\nI1206 12:28:59.472689 22755 sgd_solver.cpp:166] Iteration 8100, lr = 1.215\nI1206 12:30:32.499202 22755 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1206 12:31:25.063872 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1452\nI1206 12:31:25.064155 22755 solver.cpp:404]     Test net output #1: loss = 18.9488 (* 1 = 18.9488 loss)\nI1206 12:31:25.935465 22755 solver.cpp:228] Iteration 8200, loss = 19.1749\nI1206 12:31:25.935500 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 12:31:25.935516 22755 solver.cpp:244]     Train net output #1: loss = 19.1749 (* 1 = 19.1749 loss)\nI1206 12:31:26.010967 22755 sgd_solver.cpp:166] Iteration 8200, lr = 1.23\nI1206 12:32:59.029098 22755 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1206 12:33:51.596949 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2044\nI1206 12:33:51.597235 22755 solver.cpp:404]     Test net output #1: loss = 10.2266 (* 1 = 10.2266 loss)\nI1206 12:33:52.468616 22755 solver.cpp:228] Iteration 8300, loss = 10.4081\nI1206 12:33:52.468652 22755 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1206 12:33:52.468668 22755 solver.cpp:244]     Train net output #1: loss = 10.4081 (* 1 = 10.4081 loss)\nI1206 12:33:52.543953 22755 sgd_solver.cpp:166] Iteration 8300, lr = 1.245\nI1206 12:34:44.249163 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.45727 > 5) by scale factor 0.916209\nI1206 12:35:25.604786 22755 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1206 12:36:18.164109 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1451\nI1206 12:36:18.164393 22755 solver.cpp:404]     Test net output #1: loss = 14.4497 (* 1 = 14.4497 loss)\nI1206 12:36:19.035800 22755 solver.cpp:228] Iteration 8400, loss = 14.8916\nI1206 12:36:19.035835 22755 solver.cpp:244]     Train net output #0: accuracy = 0.08\nI1206 12:36:19.035852 22755 solver.cpp:244]     Train net output #1: loss = 14.8916 (* 1 = 14.8916 loss)\nI1206 12:36:19.111790 22755 sgd_solver.cpp:166] Iteration 8400, lr = 1.26\nI1206 12:37:52.110213 22755 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1206 12:38:44.672848 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1473\nI1206 12:38:44.673107 22755 solver.cpp:404]     Test net output #1: loss = 28.9876 (* 1 = 28.9876 loss)\nI1206 12:38:45.544812 22755 solver.cpp:228] Iteration 8500, loss = 31.5695\nI1206 12:38:45.544847 22755 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1206 12:38:45.544862 22755 solver.cpp:244]     Train net output #1: loss = 31.5695 (* 1 = 31.5695 loss)\nI1206 12:38:45.613593 22755 sgd_solver.cpp:166] Iteration 8500, lr = 1.275\nI1206 12:40:18.669553 22755 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1206 12:41:11.227263 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2005\nI1206 12:41:11.227532 22755 solver.cpp:404]     Test net output #1: loss = 15.0467 (* 1 = 15.0467 loss)\nI1206 12:41:12.098839 22755 solver.cpp:228] Iteration 8600, loss = 15.2021\nI1206 12:41:12.098872 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 12:41:12.098889 22755 solver.cpp:244]     Train net output #1: loss = 15.2021 (* 1 = 15.2021 loss)\nI1206 12:41:12.170035 22755 sgd_solver.cpp:166] Iteration 8600, lr = 1.29\nI1206 12:42:04.787467 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.29823 > 5) by scale factor 0.943712\nI1206 12:42:45.201970 22755 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1206 12:43:37.781355 22755 solver.cpp:404]     Test net output #0: accuracy = 0.228\nI1206 12:43:37.781630 22755 solver.cpp:404]     Test net output #1: loss = 13.0189 (* 1 = 13.0189 loss)\nI1206 12:43:38.652503 22755 solver.cpp:228] Iteration 8700, loss = 16.2123\nI1206 12:43:38.652539 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 12:43:38.652556 22755 solver.cpp:244]     Train net output #1: loss = 16.2123 (* 1 = 16.2123 loss)\nI1206 12:43:38.727710 22755 sgd_solver.cpp:166] Iteration 8700, lr = 1.305\nI1206 12:45:11.790309 22755 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1206 12:46:04.354861 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2093\nI1206 12:46:04.355131 22755 solver.cpp:404]     Test net output #1: loss = 11.6062 (* 1 = 11.6062 loss)\nI1206 12:46:05.226497 22755 solver.cpp:228] Iteration 8800, loss = 10.8573\nI1206 12:46:05.226533 22755 solver.cpp:244]     Train net output #0: accuracy = 0.29\nI1206 12:46:05.226549 22755 solver.cpp:244]     Train net output #1: loss = 10.8573 (* 1 = 10.8573 loss)\nI1206 12:46:05.299850 22755 sgd_solver.cpp:166] Iteration 8800, lr = 1.32\nI1206 12:47:38.339313 22755 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1206 12:48:30.900157 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2208\nI1206 12:48:30.900382 22755 solver.cpp:404]     Test net output #1: loss = 10.8774 (* 1 = 10.8774 loss)\nI1206 12:48:31.771564 22755 solver.cpp:228] Iteration 8900, loss = 8.87781\nI1206 12:48:31.771598 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 12:48:31.771620 22755 solver.cpp:244]     Train net output #1: loss = 8.87781 (* 1 = 8.87781 loss)\nI1206 12:48:31.846148 22755 sgd_solver.cpp:166] Iteration 8900, lr = 1.335\nI1206 12:50:04.877509 22755 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1206 12:50:57.442123 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1691\nI1206 12:50:57.442312 22755 solver.cpp:404]     Test net output #1: loss = 20.5945 (* 1 = 20.5945 loss)\nI1206 12:50:58.313768 22755 solver.cpp:228] Iteration 9000, loss = 19.2843\nI1206 12:50:58.313803 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 12:50:58.313822 22755 solver.cpp:244]     Train net output #1: loss = 19.2843 (* 1 = 19.2843 loss)\nI1206 12:50:58.390168 22755 sgd_solver.cpp:166] Iteration 9000, lr = 1.35\nI1206 12:51:05.919664 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03364 > 5) by scale factor 0.993317\nI1206 12:51:12.500355 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.19764 > 5) by scale factor 0.961974\nI1206 12:51:14.382248 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.37541 > 5) by scale factor 0.930161\nI1206 12:51:22.845038 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.52846 > 5) by scale factor 0.904412\nI1206 12:52:31.480895 22755 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1206 12:53:24.051657 22755 solver.cpp:404]     Test net output #0: accuracy = 0.201\nI1206 12:53:24.051888 22755 solver.cpp:404]     Test net output #1: loss = 18.639 (* 1 = 18.639 loss)\nI1206 12:53:24.923867 22755 solver.cpp:228] Iteration 9100, loss = 16.9218\nI1206 12:53:24.923907 22755 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1206 12:53:24.923930 22755 solver.cpp:244]     Train net output #1: loss = 16.9218 (* 1 = 16.9218 loss)\nI1206 12:53:24.997822 22755 sgd_solver.cpp:166] Iteration 9100, lr = 1.365\nI1206 12:54:58.161296 22755 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1206 12:55:50.738171 22755 solver.cpp:404]     Test net output #0: accuracy = 0.20005\nI1206 12:55:50.738401 22755 solver.cpp:404]     Test net output #1: loss = 22.0146 (* 1 = 22.0146 loss)\nI1206 12:55:51.610440 22755 solver.cpp:228] Iteration 9200, loss = 21.0994\nI1206 12:55:51.610479 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 12:55:51.610504 22755 solver.cpp:244]     Train net output #1: loss = 21.0994 (* 1 = 21.0994 loss)\nI1206 12:55:51.685611 22755 sgd_solver.cpp:166] Iteration 9200, lr = 1.38\nI1206 12:56:56.596943 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06672 > 5) by scale factor 0.986831\nI1206 12:57:01.297345 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04298 > 5) by scale factor 0.991478\nI1206 12:57:05.056836 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.66109 > 5) by scale factor 0.883222\nI1206 12:57:24.795706 22755 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1206 12:58:17.369343 22755 solver.cpp:404]     Test net output #0: accuracy = 0.23455\nI1206 12:58:17.369618 22755 solver.cpp:404]     Test net output #1: loss = 7.59166 (* 1 = 7.59166 loss)\nI1206 12:58:18.241334 22755 solver.cpp:228] Iteration 9300, loss = 6.87682\nI1206 12:58:18.241380 22755 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1206 12:58:18.241405 22755 solver.cpp:244]     Train net output #1: loss = 6.87682 (* 1 = 6.87682 loss)\nI1206 12:58:18.313768 22755 sgd_solver.cpp:166] Iteration 9300, lr = 1.395\nI1206 12:59:51.477387 22755 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1206 13:00:44.057314 22755 solver.cpp:404]     Test net output #0: accuracy = 0.26345\nI1206 13:00:44.057577 22755 solver.cpp:404]     Test net output #1: loss = 17.6029 (* 1 = 17.6029 loss)\nI1206 13:00:44.930008 22755 solver.cpp:228] Iteration 9400, loss = 21.6991\nI1206 13:00:44.930049 22755 solver.cpp:244]     Train net output #0: accuracy = 0.29\nI1206 13:00:44.930075 22755 solver.cpp:244]     Train net output #1: loss = 21.6991 (* 1 = 21.6991 loss)\nI1206 13:00:44.998145 22755 sgd_solver.cpp:166] Iteration 9400, lr = 1.41\nI1206 13:02:18.082526 22755 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1206 13:03:10.629371 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1618\nI1206 13:03:10.629607 22755 solver.cpp:404]     Test net output #1: loss = 13.6042 (* 1 = 13.6042 loss)\nI1206 13:03:11.501770 22755 solver.cpp:228] Iteration 9500, loss = 11.5516\nI1206 13:03:11.501813 22755 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1206 13:03:11.501838 22755 solver.cpp:244]     Train net output #1: loss = 11.5516 (* 1 = 11.5516 loss)\nI1206 13:03:11.568048 22755 sgd_solver.cpp:166] Iteration 9500, lr = 1.425\nI1206 13:04:44.666949 22755 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1206 13:05:37.205971 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1883\nI1206 13:05:37.206207 22755 solver.cpp:404]     Test net output #1: loss = 11.0978 (* 1 = 11.0978 loss)\nI1206 13:05:38.078022 22755 solver.cpp:228] Iteration 9600, loss = 8.48281\nI1206 13:05:38.078070 22755 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1206 13:05:38.078095 22755 solver.cpp:244]     Train net output #1: loss = 8.48281 (* 1 = 8.48281 loss)\nI1206 13:05:38.149037 22755 sgd_solver.cpp:166] Iteration 9600, lr = 1.44\nI1206 13:05:51.314488 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.29741 > 5) by scale factor 0.943857\nI1206 13:07:11.221170 22755 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1206 13:08:03.808050 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1915\nI1206 13:08:03.808282 22755 solver.cpp:404]     Test net output #1: loss = 16.8467 (* 1 = 16.8467 loss)\nI1206 13:08:04.680460 22755 solver.cpp:228] Iteration 9700, loss = 16.2762\nI1206 13:08:04.680500 22755 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1206 13:08:04.680526 22755 solver.cpp:244]     Train net output #1: loss = 16.2762 (* 1 = 16.2762 loss)\nI1206 13:08:04.758271 22755 sgd_solver.cpp:166] Iteration 9700, lr = 1.455\nI1206 13:08:32.014178 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13704 > 5) by scale factor 0.973323\nI1206 13:09:37.839074 22755 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1206 13:10:30.435272 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18505\nI1206 13:10:30.435541 22755 solver.cpp:404]     Test net output #1: loss = 11.628 (* 1 = 11.628 loss)\nI1206 13:10:31.307179 22755 solver.cpp:228] Iteration 9800, loss = 11.6821\nI1206 13:10:31.307217 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 13:10:31.307242 22755 solver.cpp:244]     Train net output #1: loss = 11.6821 (* 1 = 11.6821 loss)\nI1206 13:10:31.382886 22755 sgd_solver.cpp:166] Iteration 9800, lr = 1.47\nI1206 13:11:44.711611 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.07661 > 5) by scale factor 0.98491\nI1206 13:11:51.292155 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.9092 > 5) by scale factor 0.846138\nI1206 13:12:04.482678 22755 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1206 13:12:57.052871 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1477\nI1206 13:12:57.053139 22755 solver.cpp:404]     Test net output #1: loss = 16.8282 (* 1 = 16.8282 loss)\nI1206 13:12:57.923938 22755 solver.cpp:228] Iteration 9900, loss = 20.6338\nI1206 13:12:57.923971 22755 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1206 13:12:57.923988 22755 solver.cpp:244]     Train net output #1: loss = 20.6338 (* 1 = 20.6338 loss)\nI1206 13:12:57.995561 22755 sgd_solver.cpp:166] Iteration 9900, lr = 1.485\nI1206 13:14:31.122334 22755 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1206 13:15:23.692603 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2009\nI1206 13:15:23.692826 22755 solver.cpp:404]     Test net output #1: loss = 12.7544 (* 1 = 12.7544 loss)\nI1206 13:15:24.564575 22755 solver.cpp:228] Iteration 10000, loss = 11.3765\nI1206 13:15:24.564615 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 13:15:24.564635 22755 solver.cpp:244]     Train net output #1: loss = 11.3765 (* 1 = 11.3765 loss)\nI1206 13:15:24.640709 22755 sgd_solver.cpp:166] Iteration 10000, lr = 1.5\nI1206 13:15:34.994464 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.28067 > 5) by scale factor 0.946849\nI1206 13:16:57.726541 22755 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1206 13:17:50.293361 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2059\nI1206 13:17:50.293582 22755 solver.cpp:404]     Test net output #1: loss = 13.0569 (* 1 = 13.0569 loss)\nI1206 13:17:51.165257 22755 solver.cpp:228] Iteration 10100, loss = 11.2403\nI1206 13:17:51.165300 22755 solver.cpp:244]     Train net output #0: accuracy = 0.29\nI1206 13:17:51.165318 22755 solver.cpp:244]     Train net output #1: loss = 11.2403 (* 1 = 11.2403 loss)\nI1206 13:17:51.241149 22755 sgd_solver.cpp:166] Iteration 10100, lr = 1.515\nI1206 13:19:24.301921 22755 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1206 13:20:16.886992 22755 solver.cpp:404]     Test net output #0: accuracy = 0.20595\nI1206 13:20:16.887214 22755 solver.cpp:404]     Test net output #1: loss = 14.3766 (* 1 = 14.3766 loss)\nI1206 13:20:17.759034 22755 solver.cpp:228] Iteration 10200, loss = 14.8112\nI1206 13:20:17.759069 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 13:20:17.759086 22755 solver.cpp:244]     Train net output #1: loss = 14.8112 (* 1 = 14.8112 loss)\nI1206 13:20:17.830674 22755 sgd_solver.cpp:166] Iteration 10200, lr = 1.53\nI1206 13:21:51.175401 22755 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1206 13:22:44.448361 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2289\nI1206 13:22:44.448634 22755 solver.cpp:404]     Test net output #1: loss = 32.3401 (* 1 = 32.3401 loss)\nI1206 13:22:45.320930 22755 solver.cpp:228] Iteration 10300, loss = 30.2972\nI1206 13:22:45.320974 22755 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1206 13:22:45.320991 22755 solver.cpp:244]     Train net output #1: loss = 30.2972 (* 1 = 30.2972 loss)\nI1206 13:22:45.414655 22755 sgd_solver.cpp:166] Iteration 10300, lr = 1.545\nI1206 13:24:19.087927 22755 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1206 13:25:12.374541 22755 solver.cpp:404]     Test net output #0: accuracy = 0.218\nI1206 13:25:12.374812 22755 solver.cpp:404]     Test net output #1: loss = 14.751 (* 1 = 14.751 loss)\nI1206 13:25:13.247663 22755 solver.cpp:228] Iteration 10400, loss = 13.1049\nI1206 13:25:13.247704 22755 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1206 13:25:13.247720 22755 solver.cpp:244]     Train net output #1: loss = 13.1049 (* 1 = 13.1049 loss)\nI1206 13:25:13.325780 22755 sgd_solver.cpp:166] Iteration 10400, lr = 1.56\nI1206 13:26:15.678678 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.1937 > 5) by scale factor 0.962704\nI1206 13:26:46.853418 22755 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1206 13:27:40.132655 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18605\nI1206 13:27:40.132930 22755 solver.cpp:404]     Test net output #1: loss = 21.1548 (* 1 = 21.1548 loss)\nI1206 13:27:41.005254 22755 solver.cpp:228] Iteration 10500, loss = 21.3627\nI1206 13:27:41.005306 22755 solver.cpp:244]     Train net output #0: accuracy = 0.26\nI1206 13:27:41.005323 22755 solver.cpp:244]     Train net output #1: loss = 21.3627 (* 1 = 21.3627 loss)\nI1206 13:27:41.081869 22755 sgd_solver.cpp:166] Iteration 10500, lr = 1.575\nI1206 13:29:14.722836 22755 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1206 13:30:07.993043 22755 solver.cpp:404]     Test net output #0: accuracy = 0.22635\nI1206 13:30:07.993329 22755 solver.cpp:404]     Test net output #1: loss = 8.52555 (* 1 = 8.52555 loss)\nI1206 13:30:08.866103 22755 solver.cpp:228] Iteration 10600, loss = 8.96521\nI1206 13:30:08.866143 22755 solver.cpp:244]     Train net output #0: accuracy = 0.3\nI1206 13:30:08.866160 22755 solver.cpp:244]     Train net output #1: loss = 8.96521 (* 1 = 8.96521 loss)\nI1206 13:30:08.936477 22755 sgd_solver.cpp:166] Iteration 10600, lr = 1.59\nI1206 13:31:42.593376 22755 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1206 13:32:35.874631 22755 solver.cpp:404]     Test net output #0: accuracy = 0.19655\nI1206 13:32:35.874915 22755 solver.cpp:404]     Test net output #1: loss = 15.9754 (* 1 = 15.9754 loss)\nI1206 13:32:36.748432 22755 solver.cpp:228] Iteration 10700, loss = 15.2022\nI1206 13:32:36.748476 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 13:32:36.748492 22755 solver.cpp:244]     Train net output #1: loss = 15.2022 (* 1 = 15.2022 loss)\nI1206 13:32:36.820142 22755 sgd_solver.cpp:166] Iteration 10700, lr = 1.605\nI1206 13:34:10.469736 22755 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1206 13:35:03.747674 22755 solver.cpp:404]     Test net output #0: accuracy = 0.20165\nI1206 13:35:03.747965 22755 solver.cpp:404]     Test net output #1: loss = 16.5067 (* 1 = 16.5067 loss)\nI1206 13:35:04.621657 22755 solver.cpp:228] Iteration 10800, loss = 16.2272\nI1206 13:35:04.621713 22755 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1206 13:35:04.621731 22755 solver.cpp:244]     Train net output #1: loss = 16.2272 (* 1 = 16.2272 loss)\nI1206 13:35:04.692628 22755 sgd_solver.cpp:166] Iteration 10800, lr = 1.62\nI1206 13:35:50.995996 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12015 > 5) by scale factor 0.976534\nI1206 13:36:38.253823 22755 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1206 13:37:31.538427 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15795\nI1206 13:37:31.538727 22755 solver.cpp:404]     Test net output #1: loss = 24.4289 (* 1 = 24.4289 loss)\nI1206 13:37:32.412040 22755 solver.cpp:228] Iteration 10900, loss = 22.4557\nI1206 13:37:32.412098 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 13:37:32.412117 22755 solver.cpp:244]     Train net output #1: loss = 22.4557 (* 1 = 22.4557 loss)\nI1206 13:37:32.480681 22755 sgd_solver.cpp:166] Iteration 10900, lr = 1.635\nI1206 13:39:06.044615 22755 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1206 13:39:59.323498 22755 solver.cpp:404]     Test net output #0: accuracy = 0.13145\nI1206 13:39:59.323772 22755 solver.cpp:404]     Test net output #1: loss = 25.2101 (* 1 = 25.2101 loss)\nI1206 13:40:00.196511 22755 solver.cpp:228] Iteration 11000, loss = 26.397\nI1206 13:40:00.196552 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 13:40:00.196568 22755 solver.cpp:244]     Train net output #1: loss = 26.397 (* 1 = 26.397 loss)\nI1206 13:40:00.272914 22755 sgd_solver.cpp:166] Iteration 11000, lr = 1.65\nI1206 13:41:33.817174 22755 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1206 13:42:27.099716 22755 solver.cpp:404]     Test net output #0: accuracy = 0.159\nI1206 13:42:27.100003 22755 solver.cpp:404]     Test net output #1: loss = 23.3583 (* 1 = 23.3583 loss)\nI1206 13:42:27.973302 22755 solver.cpp:228] Iteration 11100, loss = 23.4045\nI1206 13:42:27.973350 22755 solver.cpp:244]     Train net output #0: accuracy = 0.25\nI1206 13:42:27.973374 22755 solver.cpp:244]     Train net output #1: loss = 23.4045 (* 1 = 23.4045 loss)\nI1206 13:42:28.046984 22755 sgd_solver.cpp:166] Iteration 11100, lr = 1.665\nI1206 13:44:01.620122 22755 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1206 13:44:54.896162 22755 solver.cpp:404]     Test net output #0: accuracy = 0.17145\nI1206 13:44:54.896451 22755 solver.cpp:404]     Test net output #1: loss = 14.4267 (* 1 = 14.4267 loss)\nI1206 13:44:55.769963 22755 solver.cpp:228] Iteration 11200, loss = 15.5217\nI1206 13:44:55.770005 22755 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1206 13:44:55.770023 22755 solver.cpp:244]     Train net output #1: loss = 15.5217 (* 1 = 15.5217 loss)\nI1206 13:44:55.842156 22755 sgd_solver.cpp:166] Iteration 11200, lr = 1.68\nI1206 13:45:41.269856 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.45762 > 5) by scale factor 0.91615\nI1206 13:45:42.218253 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06835 > 5) by scale factor 0.986515\nI1206 13:45:47.895768 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.74581 > 5) by scale factor 0.8702\nI1206 13:45:48.843830 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.22484 > 5) by scale factor 0.956968\nI1206 13:45:50.737524 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08657 > 5) by scale factor 0.982981\nI1206 13:46:29.526329 22755 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1206 13:47:22.787470 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15195\nI1206 13:47:22.787752 22755 solver.cpp:404]     Test net output #1: loss = 23.7494 (* 1 = 23.7494 loss)\nI1206 13:47:23.660874 22755 solver.cpp:228] Iteration 11300, loss = 22.3771\nI1206 13:47:23.660918 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 13:47:23.660935 22755 solver.cpp:244]     Train net output #1: loss = 22.3771 (* 1 = 22.3771 loss)\nI1206 13:47:23.732914 22755 sgd_solver.cpp:166] Iteration 11300, lr = 1.695\nI1206 13:48:57.265733 22755 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1206 13:49:50.530092 22755 solver.cpp:404]     Test net output #0: accuracy = 0.19975\nI1206 13:49:50.530360 22755 solver.cpp:404]     Test net output #1: loss = 17.8021 (* 1 = 17.8021 loss)\nI1206 13:49:51.403241 22755 solver.cpp:228] Iteration 11400, loss = 14.8486\nI1206 13:49:51.403287 22755 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1206 13:49:51.403304 22755 solver.cpp:244]     Train net output #1: loss = 14.8486 (* 1 = 14.8486 loss)\nI1206 13:49:51.478706 22755 sgd_solver.cpp:166] Iteration 11400, lr = 1.71\nI1206 13:51:25.107748 22755 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1206 13:52:18.367352 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16435\nI1206 13:52:18.367635 22755 solver.cpp:404]     Test net output #1: loss = 22.1035 (* 1 = 22.1035 loss)\nI1206 13:52:19.241302 22755 solver.cpp:228] Iteration 11500, loss = 17.0392\nI1206 13:52:19.241354 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 13:52:19.241371 22755 solver.cpp:244]     Train net output #1: loss = 17.0392 (* 1 = 17.0392 loss)\nI1206 13:52:19.312644 22755 sgd_solver.cpp:166] Iteration 11500, lr = 1.725\nI1206 13:53:52.968924 22755 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1206 13:54:45.472769 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1696\nI1206 13:54:45.473052 22755 solver.cpp:404]     Test net output #1: loss = 28.0374 (* 1 = 28.0374 loss)\nI1206 13:54:46.353380 22755 solver.cpp:228] Iteration 11600, loss = 26.5514\nI1206 13:54:46.353418 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 13:54:46.353435 22755 solver.cpp:244]     Train net output #1: loss = 26.5514 (* 1 = 26.5514 loss)\nI1206 13:54:46.430246 22755 sgd_solver.cpp:166] Iteration 11600, lr = 1.74\nI1206 13:56:20.021185 22755 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1206 13:57:13.284312 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1563\nI1206 13:57:13.284582 22755 solver.cpp:404]     Test net output #1: loss = 37.0512 (* 1 = 37.0512 loss)\nI1206 13:57:14.174221 22755 solver.cpp:228] Iteration 11700, loss = 38.7123\nI1206 13:57:14.174264 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 13:57:14.174280 22755 solver.cpp:244]     Train net output #1: loss = 38.7123 (* 1 = 38.7123 loss)\nI1206 13:57:14.230644 22755 sgd_solver.cpp:166] Iteration 11700, lr = 1.755\nI1206 13:57:17.078583 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03615 > 5) by scale factor 0.992822\nI1206 13:58:47.876034 22755 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1206 13:59:41.134282 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16925\nI1206 13:59:41.134551 22755 solver.cpp:404]     Test net output #1: loss = 28.5624 (* 1 = 28.5624 loss)\nI1206 13:59:42.006724 22755 solver.cpp:228] Iteration 11800, loss = 26.9972\nI1206 13:59:42.006772 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 13:59:42.006788 22755 solver.cpp:244]     Train net output #1: loss = 26.9972 (* 1 = 26.9972 loss)\nI1206 13:59:42.084102 22755 sgd_solver.cpp:166] Iteration 11800, lr = 1.77\nI1206 14:00:56.817755 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21344 > 5) by scale factor 0.959059\nI1206 14:01:15.737609 22755 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1206 14:02:09.011456 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1548\nI1206 14:02:09.011749 22755 solver.cpp:404]     Test net output #1: loss = 22.339 (* 1 = 22.339 loss)\nI1206 14:02:09.884227 22755 solver.cpp:228] Iteration 11900, loss = 20.8624\nI1206 14:02:09.884264 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 14:02:09.884280 22755 solver.cpp:244]     Train net output #1: loss = 20.8624 (* 1 = 20.8624 loss)\nI1206 14:02:09.956919 22755 sgd_solver.cpp:166] Iteration 11900, lr = 1.785\nI1206 14:02:55.359340 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06987 > 5) by scale factor 0.986219\nI1206 14:03:43.589193 22755 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1206 14:04:36.861484 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18015\nI1206 14:04:36.861758 22755 solver.cpp:404]     Test net output #1: loss = 22.6879 (* 1 = 22.6879 loss)\nI1206 14:04:37.734031 22755 solver.cpp:228] Iteration 12000, loss = 24.8812\nI1206 14:04:37.734071 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 14:04:37.734086 22755 solver.cpp:244]     Train net output #1: loss = 24.8812 (* 1 = 24.8812 loss)\nI1206 14:04:37.809471 22755 sgd_solver.cpp:166] Iteration 12000, lr = 1.8\nI1206 14:06:11.444428 22755 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1206 14:07:04.711323 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1454\nI1206 14:07:04.711596 22755 solver.cpp:404]     Test net output #1: loss = 28.1036 (* 1 = 28.1036 loss)\nI1206 14:07:05.584193 22755 solver.cpp:228] Iteration 12100, loss = 24.8904\nI1206 14:07:05.584237 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 14:07:05.584255 22755 solver.cpp:244]     Train net output #1: loss = 24.8904 (* 1 = 24.8904 loss)\nI1206 14:07:05.660370 22755 sgd_solver.cpp:166] Iteration 12100, lr = 1.815\nI1206 14:07:48.222353 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.18527 > 5) by scale factor 0.96427\nI1206 14:07:50.115568 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.11004 > 5) by scale factor 0.978467\nI1206 14:08:11.858676 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.26498 > 5) by scale factor 0.949671\nI1206 14:08:39.278542 22755 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1206 14:09:32.258612 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1329\nI1206 14:09:32.258883 22755 solver.cpp:404]     Test net output #1: loss = 21.27 (* 1 = 21.27 loss)\nI1206 14:09:33.131489 22755 solver.cpp:228] Iteration 12200, loss = 23.0346\nI1206 14:09:33.131531 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 14:09:33.131549 22755 solver.cpp:244]     Train net output #1: loss = 23.0346 (* 1 = 23.0346 loss)\nI1206 14:09:33.200404 22755 sgd_solver.cpp:166] Iteration 12200, lr = 1.83\nI1206 14:11:06.871858 22755 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1206 14:12:00.147752 22755 solver.cpp:404]     Test net output #0: accuracy = 0.17605\nI1206 14:12:00.148047 22755 solver.cpp:404]     Test net output #1: loss = 24.8438 (* 1 = 24.8438 loss)\nI1206 14:12:01.020503 22755 solver.cpp:228] Iteration 12300, loss = 22.2899\nI1206 14:12:01.020541 22755 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1206 14:12:01.020558 22755 solver.cpp:244]     Train net output #1: loss = 22.29 (* 1 = 22.29 loss)\nI1206 14:12:01.092567 22755 sgd_solver.cpp:166] Iteration 12300, lr = 1.845\nI1206 14:13:34.671963 22755 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1206 14:14:27.951614 22755 solver.cpp:404]     Test net output #0: accuracy = 0.2438\nI1206 14:14:27.951910 22755 solver.cpp:404]     Test net output #1: loss = 17.5776 (* 1 = 17.5776 loss)\nI1206 14:14:28.824777 22755 solver.cpp:228] Iteration 12400, loss = 15.6954\nI1206 14:14:28.824828 22755 solver.cpp:244]     Train net output #0: accuracy = 0.28\nI1206 14:14:28.824846 22755 solver.cpp:244]     Train net output #1: loss = 15.6954 (* 1 = 15.6954 loss)\nI1206 14:14:28.900825 22755 sgd_solver.cpp:166] Iteration 12400, lr = 1.86\nI1206 14:15:04.840072 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.11829 > 5) by scale factor 0.976889\nI1206 14:15:15.241751 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03926 > 5) by scale factor 0.992209\nI1206 14:16:02.512903 22755 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1206 14:16:55.791606 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1356\nI1206 14:16:55.791903 22755 solver.cpp:404]     Test net output #1: loss = 30.4865 (* 1 = 30.4865 loss)\nI1206 14:16:56.665185 22755 solver.cpp:228] Iteration 12500, loss = 27.9908\nI1206 14:16:56.665230 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 14:16:56.665246 22755 solver.cpp:244]     Train net output #1: loss = 27.9908 (* 1 = 27.9908 loss)\nI1206 14:16:56.735404 22755 sgd_solver.cpp:166] Iteration 12500, lr = 1.875\nI1206 14:17:24.160755 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.52997 > 5) by scale factor 0.904163\nI1206 14:17:25.108546 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.76993 > 5) by scale factor 0.866562\nI1206 14:18:30.293203 22755 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1206 14:19:23.581864 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1812\nI1206 14:19:23.582170 22755 solver.cpp:404]     Test net output #1: loss = 13.2617 (* 1 = 13.2617 loss)\nI1206 14:19:24.455437 22755 solver.cpp:228] Iteration 12600, loss = 15.162\nI1206 14:19:24.455480 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 14:19:24.455498 22755 solver.cpp:244]     Train net output #1: loss = 15.162 (* 1 = 15.162 loss)\nI1206 14:19:24.531605 22755 sgd_solver.cpp:166] Iteration 12600, lr = 1.89\nI1206 14:20:58.141033 22755 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1206 14:21:51.419162 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1923\nI1206 14:21:51.419440 22755 solver.cpp:404]     Test net output #1: loss = 14.7146 (* 1 = 14.7146 loss)\nI1206 14:21:52.292843 22755 solver.cpp:228] Iteration 12700, loss = 16.4038\nI1206 14:21:52.292886 22755 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1206 14:21:52.292903 22755 solver.cpp:244]     Train net output #1: loss = 16.4038 (* 1 = 16.4038 loss)\nI1206 14:21:52.364186 22755 sgd_solver.cpp:166] Iteration 12700, lr = 1.905\nI1206 14:23:00.464087 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.0401 > 5) by scale factor 0.992043\nI1206 14:23:26.002948 22755 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1206 14:24:19.266425 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15445\nI1206 14:24:19.266727 22755 solver.cpp:404]     Test net output #1: loss = 16.9912 (* 1 = 16.9912 loss)\nI1206 14:24:20.139256 22755 solver.cpp:228] Iteration 12800, loss = 18.1076\nI1206 14:24:20.139305 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 14:24:20.139324 22755 solver.cpp:244]     Train net output #1: loss = 18.1076 (* 1 = 18.1076 loss)\nI1206 14:24:20.213796 22755 sgd_solver.cpp:166] Iteration 12800, lr = 1.92\nI1206 14:25:53.786062 22755 solver.cpp:337] Iteration 12900, Testing net (#0)\nI1206 14:26:47.061467 22755 solver.cpp:404]     Test net output #0: accuracy = 0.12935\nI1206 14:26:47.061724 22755 solver.cpp:404]     Test net output #1: loss = 31.0254 (* 1 = 31.0254 loss)\nI1206 14:26:47.934438 22755 solver.cpp:228] Iteration 12900, loss = 31.1837\nI1206 14:26:47.934489 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 14:26:47.934509 22755 solver.cpp:244]     Train net output #1: loss = 31.1837 (* 1 = 31.1837 loss)\nI1206 14:26:48.008235 22755 sgd_solver.cpp:166] Iteration 12900, lr = 1.935\nI1206 14:27:04.093065 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.68085 > 5) by scale factor 0.88015\nI1206 14:27:42.856215 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.46526 > 5) by scale factor 0.91487\nI1206 14:28:01.759163 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04792 > 5) by scale factor 0.990506\nI1206 14:28:21.616911 22755 solver.cpp:337] Iteration 13000, Testing net (#0)\nI1206 14:29:14.898012 22755 solver.cpp:404]     Test net output #0: accuracy = 0.17135\nI1206 14:29:14.898283 22755 solver.cpp:404]     Test net output #1: loss = 20.9893 (* 1 = 20.9893 loss)\nI1206 14:29:15.771167 22755 solver.cpp:228] Iteration 13000, loss = 18.1701\nI1206 14:29:15.771209 22755 solver.cpp:244]     Train net output #0: accuracy = 0.22\nI1206 14:29:15.771225 22755 solver.cpp:244]     Train net output #1: loss = 18.1701 (* 1 = 18.1701 loss)\nI1206 14:29:15.849328 22755 sgd_solver.cpp:166] Iteration 13000, lr = 1.95\nI1206 14:30:49.566944 22755 solver.cpp:337] Iteration 13100, Testing net (#0)\nI1206 14:31:42.775996 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15065\nI1206 14:31:42.776288 22755 solver.cpp:404]     Test net output #1: loss = 13.6608 (* 1 = 13.6608 loss)\nI1206 14:31:43.648423 22755 solver.cpp:228] Iteration 13100, loss = 14.2847\nI1206 14:31:43.648465 22755 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI1206 14:31:43.648483 22755 solver.cpp:244]     Train net output #1: loss = 14.2847 (* 1 = 14.2847 loss)\nI1206 14:31:43.720245 22755 sgd_solver.cpp:166] Iteration 13100, lr = 1.965\nI1206 14:32:05.495622 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.31568 > 5) by scale factor 0.940613\nI1206 14:32:07.390451 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.42445 > 5) by scale factor 0.921753\nI1206 14:33:17.411916 22755 solver.cpp:337] Iteration 13200, Testing net (#0)\nI1206 14:34:10.630970 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1343\nI1206 14:34:10.631279 22755 solver.cpp:404]     Test net output #1: loss = 26.8513 (* 1 = 26.8513 loss)\nI1206 14:34:11.504124 22755 solver.cpp:228] Iteration 13200, loss = 28.5478\nI1206 14:34:11.504176 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 14:34:11.504195 22755 solver.cpp:244]     Train net output #1: loss = 28.5478 (* 1 = 28.5478 loss)\nI1206 14:34:11.579210 22755 sgd_solver.cpp:166] Iteration 13200, lr = 1.98\nI1206 14:35:45.178030 22755 solver.cpp:337] Iteration 13300, Testing net (#0)\nI1206 14:36:38.381891 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18425\nI1206 14:36:38.382184 22755 solver.cpp:404]     Test net output #1: loss = 29.4306 (* 1 = 29.4306 loss)\nI1206 14:36:39.255700 22755 solver.cpp:228] Iteration 13300, loss = 31.561\nI1206 14:36:39.255754 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 14:36:39.255774 22755 solver.cpp:244]     Train net output #1: loss = 31.561 (* 1 = 31.561 loss)\nI1206 14:36:39.329008 22755 sgd_solver.cpp:166] Iteration 13300, lr = 1.995\nI1206 14:38:12.923898 22755 solver.cpp:337] Iteration 13400, Testing net (#0)\nI1206 14:39:06.196887 22755 solver.cpp:404]     Test net output #0: accuracy = 0.20035\nI1206 14:39:06.197157 22755 solver.cpp:404]     Test net output #1: loss = 14.9529 (* 1 = 14.9529 loss)\nI1206 14:39:07.070281 22755 solver.cpp:228] Iteration 13400, loss = 12.1018\nI1206 14:39:07.070324 22755 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1206 14:39:07.070343 22755 solver.cpp:244]     Train net output #1: loss = 12.1018 (* 1 = 12.1018 loss)\nI1206 14:39:07.147714 22755 sgd_solver.cpp:166] Iteration 13400, lr = 2.01\nI1206 14:39:31.715862 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.26103 > 5) by scale factor 0.950385\nI1206 14:39:59.114866 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01337 > 5) by scale factor 0.997333\nI1206 14:40:04.789918 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.17348 > 5) by scale factor 0.966468\nI1206 14:40:06.681905 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.83286 > 5) by scale factor 0.857213\nI1206 14:40:08.574245 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.34181 > 5) by scale factor 0.936012\nI1206 14:40:10.465796 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21985 > 5) by scale factor 0.957882\nI1206 14:40:40.702325 22755 solver.cpp:337] Iteration 13500, Testing net (#0)\nI1206 14:41:33.977156 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1777\nI1206 14:41:33.977448 22755 solver.cpp:404]     Test net output #1: loss = 29.6283 (* 1 = 29.6283 loss)\nI1206 14:41:34.851438 22755 solver.cpp:228] Iteration 13500, loss = 25.6267\nI1206 14:41:34.851480 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 14:41:34.851498 22755 solver.cpp:244]     Train net output #1: loss = 25.6267 (* 1 = 25.6267 loss)\nI1206 14:41:34.925667 22755 sgd_solver.cpp:166] Iteration 13500, lr = 2.025\nI1206 14:42:22.143108 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.17327 > 5) by scale factor 0.966507\nI1206 14:43:03.674000 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02834 > 5) by scale factor 0.994364\nI1206 14:43:08.394846 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00627 > 5) by scale factor 0.998748\nI1206 14:43:08.406829 22755 solver.cpp:337] Iteration 13600, Testing net (#0)\nI1206 14:44:01.449514 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15505\nI1206 14:44:01.449810 22755 solver.cpp:404]     Test net output #1: loss = 38.9121 (* 1 = 38.9121 loss)\nI1206 14:44:02.323331 22755 solver.cpp:228] Iteration 13600, loss = 37.4081\nI1206 14:44:02.323384 22755 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1206 14:44:02.323403 22755 solver.cpp:244]     Train net output #1: loss = 37.4081 (* 1 = 37.4081 loss)\nI1206 14:44:02.397275 22755 sgd_solver.cpp:166] Iteration 13600, lr = 2.04\nI1206 14:44:08.074614 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01133 > 5) by scale factor 0.997739\nI1206 14:45:35.960641 22755 solver.cpp:337] Iteration 13700, Testing net (#0)\nI1206 14:46:29.185565 22755 solver.cpp:404]     Test net output #0: accuracy = 0.205\nI1206 14:46:29.185883 22755 solver.cpp:404]     Test net output #1: loss = 23.9618 (* 1 = 23.9618 loss)\nI1206 14:46:30.060875 22755 solver.cpp:228] Iteration 13700, loss = 21.6014\nI1206 14:46:30.060922 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 14:46:30.060948 22755 solver.cpp:244]     Train net output #1: loss = 21.6014 (* 1 = 21.6014 loss)\nI1206 14:46:30.132611 22755 sgd_solver.cpp:166] Iteration 13700, lr = 2.055\nI1206 14:46:54.732697 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.23782 > 5) by scale factor 0.954596\nI1206 14:47:55.237910 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.8296 > 5) by scale factor 0.857692\nI1206 14:48:03.757261 22755 solver.cpp:337] Iteration 13800, Testing net (#0)\nI1206 14:48:57.023216 22755 solver.cpp:404]     Test net output #0: accuracy = 0.17785\nI1206 14:48:57.023490 22755 solver.cpp:404]     Test net output #1: loss = 19.6335 (* 1 = 19.6335 loss)\nI1206 14:48:57.897502 22755 solver.cpp:228] Iteration 13800, loss = 19.2701\nI1206 14:48:57.897552 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 14:48:57.897575 22755 solver.cpp:244]     Train net output #1: loss = 19.2701 (* 1 = 19.2701 loss)\nI1206 14:48:57.972286 22755 sgd_solver.cpp:166] Iteration 13800, lr = 2.07\nI1206 14:50:31.577219 22755 solver.cpp:337] Iteration 13900, Testing net (#0)\nI1206 14:51:24.857002 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1788\nI1206 14:51:24.857290 22755 solver.cpp:404]     Test net output #1: loss = 30.3176 (* 1 = 30.3176 loss)\nI1206 14:51:25.731011 22755 solver.cpp:228] Iteration 13900, loss = 27.5072\nI1206 14:51:25.731066 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 14:51:25.731093 22755 solver.cpp:244]     Train net output #1: loss = 27.5072 (* 1 = 27.5072 loss)\nI1206 14:51:25.811679 22755 sgd_solver.cpp:166] Iteration 13900, lr = 2.085\nI1206 14:52:59.411778 22755 solver.cpp:337] Iteration 14000, Testing net (#0)\nI1206 14:53:52.676049 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16275\nI1206 14:53:52.676321 22755 solver.cpp:404]     Test net output #1: loss = 30.1029 (* 1 = 30.1029 loss)\nI1206 14:53:53.549669 22755 solver.cpp:228] Iteration 14000, loss = 30.3602\nI1206 14:53:53.549727 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 14:53:53.549746 22755 solver.cpp:244]     Train net output #1: loss = 30.3602 (* 1 = 30.3602 loss)\nI1206 14:53:53.624320 22755 sgd_solver.cpp:166] Iteration 14000, lr = 2.1\nI1206 14:54:33.312729 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.18085 > 5) by scale factor 0.965092\nI1206 14:55:15.820575 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02031 > 5) by scale factor 0.995955\nI1206 14:55:27.169721 22755 solver.cpp:337] Iteration 14100, Testing net (#0)\nI1206 14:56:20.385329 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1774\nI1206 14:56:20.385629 22755 solver.cpp:404]     Test net output #1: loss = 21.6073 (* 1 = 21.6073 loss)\nI1206 14:56:21.258747 22755 solver.cpp:228] Iteration 14100, loss = 22.1242\nI1206 14:56:21.258800 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 14:56:21.258821 22755 solver.cpp:244]     Train net output #1: loss = 22.1242 (* 1 = 22.1242 loss)\nI1206 14:56:21.337137 22755 sgd_solver.cpp:166] Iteration 14100, lr = 2.115\nI1206 14:57:15.215345 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.0057 > 5) by scale factor 0.998862\nI1206 14:57:17.108000 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.53329 > 5) by scale factor 0.903622\nI1206 14:57:26.557967 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.55256 > 5) by scale factor 0.900486\nI1206 14:57:27.504772 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.75498 > 5) by scale factor 0.868812\nI1206 14:57:28.452239 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.09214 > 5) by scale factor 0.981906\nI1206 14:57:54.923351 22755 solver.cpp:337] Iteration 14200, Testing net (#0)\nI1206 14:58:48.029558 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1813\nI1206 14:58:48.029863 22755 solver.cpp:404]     Test net output #1: loss = 26.9811 (* 1 = 26.9811 loss)\nI1206 14:58:48.903640 22755 solver.cpp:228] Iteration 14200, loss = 26.874\nI1206 14:58:48.903692 22755 solver.cpp:244]     Train net output #0: accuracy = 0.24\nI1206 14:58:48.903709 22755 solver.cpp:244]     Train net output #1: loss = 26.874 (* 1 = 26.874 loss)\nI1206 14:58:48.978509 22755 sgd_solver.cpp:166] Iteration 14200, lr = 2.13\nI1206 15:00:22.562721 22755 solver.cpp:337] Iteration 14300, Testing net (#0)\nI1206 15:01:15.682081 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18075\nI1206 15:01:15.682343 22755 solver.cpp:404]     Test net output #1: loss = 33.3317 (* 1 = 33.3317 loss)\nI1206 15:01:16.556107 22755 solver.cpp:228] Iteration 14300, loss = 33.2058\nI1206 15:01:16.556161 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 15:01:16.556180 22755 solver.cpp:244]     Train net output #1: loss = 33.2058 (* 1 = 33.2058 loss)\nI1206 15:01:16.626541 22755 sgd_solver.cpp:166] Iteration 14300, lr = 2.145\nI1206 15:01:24.195574 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10649 > 5) by scale factor 0.979147\nI1206 15:01:26.087575 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 6.18475 > 5) by scale factor 0.80844\nI1206 15:01:27.033675 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.44096 > 5) by scale factor 0.918956\nI1206 15:01:27.980618 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21573 > 5) by scale factor 0.958638\nI1206 15:02:03.877079 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.22096 > 5) by scale factor 0.957678\nI1206 15:02:05.769057 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.27314 > 5) by scale factor 0.948202\nI1206 15:02:50.168270 22755 solver.cpp:337] Iteration 14400, Testing net (#0)\nI1206 15:03:43.438293 22755 solver.cpp:404]     Test net output #0: accuracy = 0.11515\nI1206 15:03:43.438567 22755 solver.cpp:404]     Test net output #1: loss = 48.2355 (* 1 = 48.2355 loss)\nI1206 15:03:44.311924 22755 solver.cpp:228] Iteration 14400, loss = 50.7212\nI1206 15:03:44.311977 22755 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1206 15:03:44.311996 22755 solver.cpp:244]     Train net output #1: loss = 50.7212 (* 1 = 50.7212 loss)\nI1206 15:03:44.381937 22755 sgd_solver.cpp:166] Iteration 14400, lr = 2.16\nI1206 15:05:17.898339 22755 solver.cpp:337] Iteration 14500, Testing net (#0)\nI1206 15:06:10.942862 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1716\nI1206 15:06:10.943130 22755 solver.cpp:404]     Test net output #1: loss = 27.9125 (* 1 = 27.9125 loss)\nI1206 15:06:11.816277 22755 solver.cpp:228] Iteration 14500, loss = 24.4872\nI1206 15:06:11.816332 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 15:06:11.816351 22755 solver.cpp:244]     Train net output #1: loss = 24.4872 (* 1 = 24.4872 loss)\nI1206 15:06:11.891377 22755 sgd_solver.cpp:166] Iteration 14500, lr = 2.175\nI1206 15:07:45.423527 22755 solver.cpp:337] Iteration 14600, Testing net (#0)\nI1206 15:08:38.695559 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15305\nI1206 15:08:38.695837 22755 solver.cpp:404]     Test net output #1: loss = 38.8531 (* 1 = 38.8531 loss)\nI1206 15:08:39.569103 22755 solver.cpp:228] Iteration 14600, loss = 42.5182\nI1206 15:08:39.569156 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 15:08:39.569175 22755 solver.cpp:244]     Train net output #1: loss = 42.5182 (* 1 = 42.5182 loss)\nI1206 15:08:39.642189 22755 sgd_solver.cpp:166] Iteration 14600, lr = 2.19\nI1206 15:10:02.782794 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.33348 > 5) by scale factor 0.937474\nI1206 15:10:13.182238 22755 solver.cpp:337] Iteration 14700, Testing net (#0)\nI1206 15:11:06.456897 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1845\nI1206 15:11:06.457171 22755 solver.cpp:404]     Test net output #1: loss = 19.0092 (* 1 = 19.0092 loss)\nI1206 15:11:07.330740 22755 solver.cpp:228] Iteration 14700, loss = 17.401\nI1206 15:11:07.330795 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 15:11:07.330813 22755 solver.cpp:244]     Train net output #1: loss = 17.401 (* 1 = 17.401 loss)\nI1206 15:11:07.403270 22755 sgd_solver.cpp:166] Iteration 14700, lr = 2.205\nI1206 15:12:41.037765 22755 solver.cpp:337] Iteration 14800, Testing net (#0)\nI1206 15:13:34.312084 22755 solver.cpp:404]     Test net output #0: accuracy = 0.11525\nI1206 15:13:34.312361 22755 solver.cpp:404]     Test net output #1: loss = 33.0776 (* 1 = 33.0776 loss)\nI1206 15:13:35.185652 22755 solver.cpp:228] Iteration 14800, loss = 33.4799\nI1206 15:13:35.185709 22755 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1206 15:13:35.185729 22755 solver.cpp:244]     Train net output #1: loss = 33.4799 (* 1 = 33.4799 loss)\nI1206 15:13:35.260761 22755 sgd_solver.cpp:166] Iteration 14800, lr = 2.22\nI1206 15:14:11.183390 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08185 > 5) by scale factor 0.983894\nI1206 15:15:08.821997 22755 solver.cpp:337] Iteration 14900, Testing net (#0)\nI1206 15:16:02.089174 22755 solver.cpp:404]     Test net output #0: accuracy = 0.11885\nI1206 15:16:02.089447 22755 solver.cpp:404]     Test net output #1: loss = 39.9642 (* 1 = 39.9642 loss)\nI1206 15:16:02.962813 22755 solver.cpp:228] Iteration 14900, loss = 37.3754\nI1206 15:16:02.962867 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 15:16:02.962883 22755 solver.cpp:244]     Train net output #1: loss = 37.3754 (* 1 = 37.3754 loss)\nI1206 15:16:03.036255 22755 sgd_solver.cpp:166] Iteration 14900, lr = 2.235\nI1206 15:16:03.992096 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.19733 > 5) by scale factor 0.962032\nI1206 15:17:36.667384 22755 solver.cpp:337] Iteration 15000, Testing net (#0)\nI1206 15:18:29.932535 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18385\nI1206 15:18:29.932833 22755 solver.cpp:404]     Test net output #1: loss = 30.9469 (* 1 = 30.9469 loss)\nI1206 15:18:30.805902 22755 solver.cpp:228] Iteration 15000, loss = 28.5467\nI1206 15:18:30.805954 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 15:18:30.805974 22755 solver.cpp:244]     Train net output #1: loss = 28.5467 (* 1 = 28.5467 loss)\nI1206 15:18:30.883047 22755 sgd_solver.cpp:166] Iteration 15000, lr = 2.25\nI1206 15:19:30.397619 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12862 > 5) by scale factor 0.974922\nI1206 15:20:04.404887 22755 solver.cpp:337] Iteration 15100, Testing net (#0)\nI1206 15:20:57.682173 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16115\nI1206 15:20:57.682472 22755 solver.cpp:404]     Test net output #1: loss = 35.3499 (* 1 = 35.3499 loss)\nI1206 15:20:58.556206 22755 solver.cpp:228] Iteration 15100, loss = 36.9272\nI1206 15:20:58.556251 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 15:20:58.556268 22755 solver.cpp:244]     Train net output #1: loss = 36.9272 (* 1 = 36.9272 loss)\nI1206 15:20:58.631340 22755 sgd_solver.cpp:166] Iteration 15100, lr = 2.265\nI1206 15:21:27.951030 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.07912 > 5) by scale factor 0.984422\nI1206 15:21:44.022147 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.53246 > 5) by scale factor 0.903757\nI1206 15:22:08.591529 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.20205 > 5) by scale factor 0.96116\nI1206 15:22:11.428468 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08537 > 5) by scale factor 0.983212\nI1206 15:22:32.232569 22755 solver.cpp:337] Iteration 15200, Testing net (#0)\nI1206 15:23:25.500789 22755 solver.cpp:404]     Test net output #0: accuracy = 0.14955\nI1206 15:23:25.501096 22755 solver.cpp:404]     Test net output #1: loss = 20.3775 (* 1 = 20.3775 loss)\nI1206 15:23:26.374455 22755 solver.cpp:228] Iteration 15200, loss = 19.3608\nI1206 15:23:26.374507 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 15:23:26.374526 22755 solver.cpp:244]     Train net output #1: loss = 19.3608 (* 1 = 19.3608 loss)\nI1206 15:23:26.452383 22755 sgd_solver.cpp:166] Iteration 15200, lr = 2.28\nI1206 15:23:52.910862 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.19565 > 5) by scale factor 0.962343\nI1206 15:23:59.524087 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.09316 > 5) by scale factor 0.981708\nI1206 15:24:06.138397 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01678 > 5) by scale factor 0.996655\nI1206 15:24:08.974056 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00528 > 5) by scale factor 0.998945\nI1206 15:24:59.978657 22755 solver.cpp:337] Iteration 15300, Testing net (#0)\nI1206 15:25:53.255084 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1427\nI1206 15:25:53.255359 22755 solver.cpp:404]     Test net output #1: loss = 35.2263 (* 1 = 35.2263 loss)\nI1206 15:25:54.128280 22755 solver.cpp:228] Iteration 15300, loss = 33.5728\nI1206 15:25:54.128334 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 15:25:54.128352 22755 solver.cpp:244]     Train net output #1: loss = 33.5728 (* 1 = 33.5728 loss)\nI1206 15:25:54.200476 22755 sgd_solver.cpp:166] Iteration 15300, lr = 2.295\nI1206 15:27:27.733546 22755 solver.cpp:337] Iteration 15400, Testing net (#0)\nI1206 15:28:21.011572 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1329\nI1206 15:28:21.011888 22755 solver.cpp:404]     Test net output #1: loss = 28.3347 (* 1 = 28.3347 loss)\nI1206 15:28:21.885740 22755 solver.cpp:228] Iteration 15400, loss = 28.0506\nI1206 15:28:21.885793 22755 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1206 15:28:21.885812 22755 solver.cpp:244]     Train net output #1: loss = 28.0506 (* 1 = 28.0506 loss)\nI1206 15:28:21.955325 22755 sgd_solver.cpp:166] Iteration 15400, lr = 2.31\nI1206 15:28:45.584164 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.16877 > 5) by scale factor 0.967348\nI1206 15:28:49.365808 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.37245 > 5) by scale factor 0.930675\nI1206 15:28:50.312916 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.47209 > 5) by scale factor 0.913728\nI1206 15:28:52.204452 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.30277 > 5) by scale factor 0.942904\nI1206 15:28:55.986493 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00293 > 5) by scale factor 0.999414\nI1206 15:29:55.499397 22755 solver.cpp:337] Iteration 15500, Testing net (#0)\nI1206 15:30:48.770160 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18475\nI1206 15:30:48.770431 22755 solver.cpp:404]     Test net output #1: loss = 27.4183 (* 1 = 27.4183 loss)\nI1206 15:30:49.643260 22755 solver.cpp:228] Iteration 15500, loss = 28.0993\nI1206 15:30:49.643317 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 15:30:49.643337 22755 solver.cpp:244]     Train net output #1: loss = 28.0993 (* 1 = 28.0993 loss)\nI1206 15:30:49.716433 22755 sgd_solver.cpp:166] Iteration 15500, lr = 2.325\nI1206 15:31:25.632858 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13469 > 5) by scale factor 0.973769\nI1206 15:32:23.264051 22755 solver.cpp:337] Iteration 15600, Testing net (#0)\nI1206 15:33:16.373353 22755 solver.cpp:404]     Test net output #0: accuracy = 0.14075\nI1206 15:33:16.373656 22755 solver.cpp:404]     Test net output #1: loss = 37.9519 (* 1 = 37.9519 loss)\nI1206 15:33:17.246383 22755 solver.cpp:228] Iteration 15600, loss = 37.6188\nI1206 15:33:17.246417 22755 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1206 15:33:17.246433 22755 solver.cpp:244]     Train net output #1: loss = 37.6188 (* 1 = 37.6188 loss)\nI1206 15:33:17.319332 22755 sgd_solver.cpp:166] Iteration 15600, lr = 2.34\nI1206 15:34:34.641386 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.20239 > 5) by scale factor 0.961097\nI1206 15:34:42.185273 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.07278 > 5) by scale factor 0.985654\nI1206 15:34:50.679622 22755 solver.cpp:337] Iteration 15700, Testing net (#0)\nI1206 15:35:43.234694 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1571\nI1206 15:35:43.234962 22755 solver.cpp:404]     Test net output #1: loss = 30.0697 (* 1 = 30.0697 loss)\nI1206 15:35:44.105630 22755 solver.cpp:228] Iteration 15700, loss = 28.3494\nI1206 15:35:44.105661 22755 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1206 15:35:44.105677 22755 solver.cpp:244]     Train net output #1: loss = 28.3494 (* 1 = 28.3494 loss)\nI1206 15:35:44.178117 22755 sgd_solver.cpp:166] Iteration 15700, lr = 2.355\nI1206 15:37:05.287083 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05583 > 5) by scale factor 0.988958\nI1206 15:37:07.174643 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.47316 > 5) by scale factor 0.913549\nI1206 15:37:08.119474 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.82106 > 5) by scale factor 0.85895\nI1206 15:37:09.064515 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03133 > 5) by scale factor 0.993774\nI1206 15:37:10.009423 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.88025 > 5) by scale factor 0.850304\nI1206 15:37:17.562299 22755 solver.cpp:337] Iteration 15800, Testing net (#0)\nI1206 15:38:10.091071 22755 solver.cpp:404]     Test net output #0: accuracy = 0.14605\nI1206 15:38:10.091358 22755 solver.cpp:404]     Test net output #1: loss = 35.9871 (* 1 = 35.9871 loss)\nI1206 15:38:10.962743 22755 solver.cpp:228] Iteration 15800, loss = 35.6884\nI1206 15:38:10.962775 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 15:38:10.962792 22755 solver.cpp:244]     Train net output #1: loss = 35.6884 (* 1 = 35.6884 loss)\nI1206 15:38:11.037788 22755 sgd_solver.cpp:166] Iteration 15800, lr = 2.37\nI1206 15:39:44.392122 22755 solver.cpp:337] Iteration 15900, Testing net (#0)\nI1206 15:40:36.928659 22755 solver.cpp:404]     Test net output #0: accuracy = 0.20635\nI1206 15:40:36.928936 22755 solver.cpp:404]     Test net output #1: loss = 19.8053 (* 1 = 19.8053 loss)\nI1206 15:40:37.799401 22755 solver.cpp:228] Iteration 15900, loss = 15.897\nI1206 15:40:37.799433 22755 solver.cpp:244]     Train net output #0: accuracy = 0.25\nI1206 15:40:37.799450 22755 solver.cpp:244]     Train net output #1: loss = 15.897 (* 1 = 15.897 loss)\nI1206 15:40:37.878767 22755 sgd_solver.cpp:166] Iteration 15900, lr = 2.385\nI1206 15:41:16.576114 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02426 > 5) by scale factor 0.995171\nI1206 15:41:20.352210 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.49523 > 5) by scale factor 0.90988\nI1206 15:41:21.297122 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.48699 > 5) by scale factor 0.911247\nI1206 15:42:11.204289 22755 solver.cpp:337] Iteration 16000, Testing net (#0)\nI1206 15:43:03.701778 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15235\nI1206 15:43:03.701999 22755 solver.cpp:404]     Test net output #1: loss = 29.013 (* 1 = 29.013 loss)\nI1206 15:43:04.572654 22755 solver.cpp:228] Iteration 16000, loss = 29.8769\nI1206 15:43:04.572686 22755 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1206 15:43:04.572702 22755 solver.cpp:244]     Train net output #1: loss = 29.8769 (* 1 = 29.8769 loss)\nI1206 15:43:04.648813 22755 sgd_solver.cpp:166] Iteration 16000, lr = 2.4\nI1206 15:44:34.981576 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10868 > 5) by scale factor 0.978726\nI1206 15:44:37.819232 22755 solver.cpp:337] Iteration 16100, Testing net (#0)\nI1206 15:45:30.310056 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1625\nI1206 15:45:30.310343 22755 solver.cpp:404]     Test net output #1: loss = 27.5029 (* 1 = 27.5029 loss)\nI1206 15:45:31.181213 22755 solver.cpp:228] Iteration 16100, loss = 28.6415\nI1206 15:45:31.181244 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 15:45:31.181260 22755 solver.cpp:244]     Train net output #1: loss = 28.6415 (* 1 = 28.6415 loss)\nI1206 15:45:31.254628 22755 sgd_solver.cpp:166] Iteration 16100, lr = 2.415\nI1206 15:45:39.724171 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02448 > 5) by scale factor 0.995127\nI1206 15:46:21.081099 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.22335 > 5) by scale factor 0.95724\nI1206 15:46:30.472086 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.60165 > 5) by scale factor 0.892594\nI1206 15:46:32.351321 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.31618 > 5) by scale factor 0.940525\nI1206 15:46:33.292367 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04324 > 5) by scale factor 0.991425\nI1206 15:46:35.171408 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.60489 > 5) by scale factor 0.892078\nI1206 15:46:37.989694 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.27671 > 5) by scale factor 0.947561\nI1206 15:46:38.930095 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.67587 > 5) by scale factor 0.880922\nI1206 15:46:48.319855 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.69014 > 5) by scale factor 0.878712\nI1206 15:46:49.260488 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.37289 > 5) by scale factor 0.930597\nI1206 15:47:04.318259 22755 solver.cpp:337] Iteration 16200, Testing net (#0)\nI1206 15:47:56.822192 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1683\nI1206 15:47:56.822463 22755 solver.cpp:404]     Test net output #1: loss = 27.3807 (* 1 = 27.3807 loss)\nI1206 15:47:57.693261 22755 solver.cpp:228] Iteration 16200, loss = 29.5588\nI1206 15:47:57.693291 22755 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1206 15:47:57.693307 22755 solver.cpp:244]     Train net output #1: loss = 29.5588 (* 1 = 29.5588 loss)\nI1206 15:47:57.769930 22755 sgd_solver.cpp:166] Iteration 16200, lr = 2.43\nI1206 15:49:08.202512 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01433 > 5) by scale factor 0.997142\nI1206 15:49:11.021826 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.43406 > 5) by scale factor 0.920122\nI1206 15:49:16.655480 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.22374 > 5) by scale factor 0.957168\nI1206 15:49:17.598574 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.14822 > 5) by scale factor 0.971209\nI1206 15:49:22.305939 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12919 > 5) by scale factor 0.974813\nI1206 15:49:30.781919 22755 solver.cpp:337] Iteration 16300, Testing net (#0)\nI1206 15:50:23.276767 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1191\nI1206 15:50:23.277050 22755 solver.cpp:404]     Test net output #1: loss = 55.2514 (* 1 = 55.2514 loss)\nI1206 15:50:24.147856 22755 solver.cpp:228] Iteration 16300, loss = 58.416\nI1206 15:50:24.147887 22755 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1206 15:50:24.147903 22755 solver.cpp:244]     Train net output #1: loss = 58.416 (* 1 = 58.416 loss)\nI1206 15:50:24.223951 22755 sgd_solver.cpp:166] Iteration 16300, lr = 2.445\nI1206 15:50:24.233110 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.23479 > 5) by scale factor 0.955148\nI1206 15:51:57.229985 22755 solver.cpp:337] Iteration 16400, Testing net (#0)\nI1206 15:52:49.734786 22755 solver.cpp:404]     Test net output #0: accuracy = 0.19745\nI1206 15:52:49.735085 22755 solver.cpp:404]     Test net output #1: loss = 41.5102 (* 1 = 41.5102 loss)\nI1206 15:52:50.605808 22755 solver.cpp:228] Iteration 16400, loss = 46.8048\nI1206 15:52:50.605840 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 15:52:50.605856 22755 solver.cpp:244]     Train net output #1: loss = 46.8048 (* 1 = 46.8048 loss)\nI1206 15:52:50.679288 22755 sgd_solver.cpp:166] Iteration 16400, lr = 2.46\nI1206 15:54:02.131184 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.1252 > 5) by scale factor 0.975572\nI1206 15:54:23.779517 22755 solver.cpp:337] Iteration 16500, Testing net (#0)\nI1206 15:55:16.287454 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1568\nI1206 15:55:16.287760 22755 solver.cpp:404]     Test net output #1: loss = 34.6238 (* 1 = 34.6238 loss)\nI1206 15:55:17.158638 22755 solver.cpp:228] Iteration 16500, loss = 31.2186\nI1206 15:55:17.158676 22755 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1206 15:55:17.158700 22755 solver.cpp:244]     Train net output #1: loss = 31.2186 (* 1 = 31.2186 loss)\nI1206 15:55:17.231794 22755 sgd_solver.cpp:166] Iteration 16500, lr = 2.475\nI1206 15:55:29.477211 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.0838 > 5) by scale factor 0.983516\nI1206 15:55:38.889981 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08316 > 5) by scale factor 0.983641\nI1206 15:56:50.429559 22755 solver.cpp:337] Iteration 16600, Testing net (#0)\nI1206 15:57:42.947137 22755 solver.cpp:404]     Test net output #0: accuracy = 0.14985\nI1206 15:57:42.947422 22755 solver.cpp:404]     Test net output #1: loss = 28.0246 (* 1 = 28.0246 loss)\nI1206 15:57:43.819146 22755 solver.cpp:228] Iteration 16600, loss = 24.678\nI1206 15:57:43.819185 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 15:57:43.819209 22755 solver.cpp:244]     Train net output #1: loss = 24.678 (* 1 = 24.678 loss)\nI1206 15:57:43.894541 22755 sgd_solver.cpp:166] Iteration 16600, lr = 2.49\nI1206 15:58:51.657032 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.15893 > 5) by scale factor 0.969193\nI1206 15:59:17.096410 22755 solver.cpp:337] Iteration 16700, Testing net (#0)\nI1206 16:00:09.593343 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16895\nI1206 16:00:09.593626 22755 solver.cpp:404]     Test net output #1: loss = 34.6178 (* 1 = 34.6178 loss)\nI1206 16:00:10.465076 22755 solver.cpp:228] Iteration 16700, loss = 39.0389\nI1206 16:00:10.465112 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 16:00:10.465129 22755 solver.cpp:244]     Train net output #1: loss = 39.0389 (* 1 = 39.0389 loss)\nI1206 16:00:10.540823 22755 sgd_solver.cpp:166] Iteration 16700, lr = 2.505\nI1206 16:01:03.215571 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10801 > 5) by scale factor 0.978854\nI1206 16:01:43.687325 22755 solver.cpp:337] Iteration 16800, Testing net (#0)\nI1206 16:02:36.199687 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1491\nI1206 16:02:36.199975 22755 solver.cpp:404]     Test net output #1: loss = 33.6459 (* 1 = 33.6459 loss)\nI1206 16:02:37.071892 22755 solver.cpp:228] Iteration 16800, loss = 35.4933\nI1206 16:02:37.071928 22755 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1206 16:02:37.071945 22755 solver.cpp:244]     Train net output #1: loss = 35.4933 (* 1 = 35.4933 loss)\nI1206 16:02:37.146987 22755 sgd_solver.cpp:166] Iteration 16800, lr = 2.52\nI1206 16:04:10.254662 22755 solver.cpp:337] Iteration 16900, Testing net (#0)\nI1206 16:05:02.768621 22755 solver.cpp:404]     Test net output #0: accuracy = 0.179\nI1206 16:05:02.768900 22755 solver.cpp:404]     Test net output #1: loss = 41.5923 (* 1 = 41.5923 loss)\nI1206 16:05:03.640422 22755 solver.cpp:228] Iteration 16900, loss = 39.1829\nI1206 16:05:03.640458 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 16:05:03.640475 22755 solver.cpp:244]     Train net output #1: loss = 39.1829 (* 1 = 39.1829 loss)\nI1206 16:05:03.713295 22755 sgd_solver.cpp:166] Iteration 16900, lr = 2.535\nI1206 16:05:37.588614 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.15888 > 5) by scale factor 0.969203\nI1206 16:06:36.857030 22755 solver.cpp:337] Iteration 17000, Testing net (#0)\nI1206 16:07:29.345176 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15375\nI1206 16:07:29.345463 22755 solver.cpp:404]     Test net output #1: loss = 35.5249 (* 1 = 35.5249 loss)\nI1206 16:07:30.216539 22755 solver.cpp:228] Iteration 17000, loss = 38.9652\nI1206 16:07:30.216574 22755 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1206 16:07:30.216591 22755 solver.cpp:244]     Train net output #1: loss = 38.9652 (* 1 = 38.9652 loss)\nI1206 16:07:30.288601 22755 sgd_solver.cpp:166] Iteration 17000, lr = 2.55\nI1206 16:08:11.666600 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03651 > 5) by scale factor 0.992751\nI1206 16:08:12.608767 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12968 > 5) by scale factor 0.97472\nI1206 16:08:16.370813 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00408 > 5) by scale factor 0.999185\nI1206 16:09:03.398414 22755 solver.cpp:337] Iteration 17100, Testing net (#0)\nI1206 16:09:55.892093 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1501\nI1206 16:09:55.892390 22755 solver.cpp:404]     Test net output #1: loss = 47.907 (* 1 = 47.907 loss)\nI1206 16:09:56.764274 22755 solver.cpp:228] Iteration 17100, loss = 48.3378\nI1206 16:09:56.764309 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 16:09:56.764328 22755 solver.cpp:244]     Train net output #1: loss = 48.3378 (* 1 = 48.3378 loss)\nI1206 16:09:56.842562 22755 sgd_solver.cpp:166] Iteration 17100, lr = 2.565\nI1206 16:09:58.732595 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.38819 > 5) by scale factor 0.927955\nI1206 16:10:27.879477 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.07711 > 5) by scale factor 0.984812\nI1206 16:11:29.931974 22755 solver.cpp:337] Iteration 17200, Testing net (#0)\nI1206 16:12:22.436853 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16995\nI1206 16:12:22.437155 22755 solver.cpp:404]     Test net output #1: loss = 30.5083 (* 1 = 30.5083 loss)\nI1206 16:12:23.308941 22755 solver.cpp:228] Iteration 17200, loss = 27.2328\nI1206 16:12:23.308977 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 16:12:23.308995 22755 solver.cpp:244]     Train net output #1: loss = 27.2329 (* 1 = 27.2329 loss)\nI1206 16:12:23.383535 22755 sgd_solver.cpp:166] Iteration 17200, lr = 2.58\nI1206 16:13:56.512176 22755 solver.cpp:337] Iteration 17300, Testing net (#0)\nI1206 16:14:49.010146 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1787\nI1206 16:14:49.010447 22755 solver.cpp:404]     Test net output #1: loss = 23.341 (* 1 = 23.341 loss)\nI1206 16:14:49.882174 22755 solver.cpp:228] Iteration 17300, loss = 23.4931\nI1206 16:14:49.882210 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 16:14:49.882226 22755 solver.cpp:244]     Train net output #1: loss = 23.4931 (* 1 = 23.4931 loss)\nI1206 16:14:49.954057 22755 sgd_solver.cpp:166] Iteration 17300, lr = 2.595\nI1206 16:15:05.947258 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21108 > 5) by scale factor 0.959493\nI1206 16:15:13.467907 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 6.09811 > 5) by scale factor 0.819926\nI1206 16:16:23.068310 22755 solver.cpp:337] Iteration 17400, Testing net (#0)\nI1206 16:17:15.600829 22755 solver.cpp:404]     Test net output #0: accuracy = 0.11795\nI1206 16:17:15.601136 22755 solver.cpp:404]     Test net output #1: loss = 37.2712 (* 1 = 37.2712 loss)\nI1206 16:17:16.472676 22755 solver.cpp:228] Iteration 17400, loss = 37.7363\nI1206 16:17:16.472713 22755 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI1206 16:17:16.472730 22755 solver.cpp:244]     Train net output #1: loss = 37.7363 (* 1 = 37.7363 loss)\nI1206 16:17:16.548033 22755 sgd_solver.cpp:166] Iteration 17400, lr = 2.61\nI1206 16:18:12.948828 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00855 > 5) by scale factor 0.998294\nI1206 16:18:22.352843 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02181 > 5) by scale factor 0.995656\nI1206 16:18:23.294920 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.68112 > 5) by scale factor 0.880109\nI1206 16:18:24.236764 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 6.598 > 5) by scale factor 0.757805\nI1206 16:18:25.178350 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 6.04487 > 5) by scale factor 0.827147\nI1206 16:18:26.120791 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13474 > 5) by scale factor 0.973759\nI1206 16:18:27.062999 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.48322 > 5) by scale factor 0.911873\nI1206 16:18:36.464018 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.69586 > 5) by scale factor 0.87783\nI1206 16:18:43.984967 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.39072 > 5) by scale factor 0.92752\nI1206 16:18:49.639472 22755 solver.cpp:337] Iteration 17500, Testing net (#0)\nI1206 16:19:42.135630 22755 solver.cpp:404]     Test net output #0: accuracy = 0.12495\nI1206 16:19:42.135937 22755 solver.cpp:404]     Test net output #1: loss = 42.4716 (* 1 = 42.4716 loss)\nI1206 16:19:43.007006 22755 solver.cpp:228] Iteration 17500, loss = 39.7763\nI1206 16:19:43.007050 22755 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1206 16:19:43.007067 22755 solver.cpp:244]     Train net output #1: loss = 39.7763 (* 1 = 39.7763 loss)\nI1206 16:19:43.088472 22755 sgd_solver.cpp:166] Iteration 17500, lr = 2.625\nI1206 16:19:43.097658 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.66406 > 5) by scale factor 0.882759\nI1206 16:19:47.803429 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 6.18074 > 5) by scale factor 0.808964\nI1206 16:19:48.745843 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.8589 > 5) by scale factor 0.853402\nI1206 16:19:56.272387 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.25161 > 5) by scale factor 0.952089\nI1206 16:20:06.621088 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12503 > 5) by scale factor 0.975603\nI1206 16:20:13.206920 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.05717 > 5) by scale factor 0.988695\nI1206 16:20:16.971519 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.29388 > 5) by scale factor 0.944487\nI1206 16:20:34.842639 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.32945 > 5) by scale factor 0.938183\nI1206 16:20:36.725181 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.00733 > 5) by scale factor 0.998537\nI1206 16:20:45.189679 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.33368 > 5) by scale factor 0.937438\nI1206 16:21:16.242208 22755 solver.cpp:337] Iteration 17600, Testing net (#0)\nI1206 16:22:08.779261 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1773\nI1206 16:22:08.779551 22755 solver.cpp:404]     Test net output #1: loss = 28.4938 (* 1 = 28.4938 loss)\nI1206 16:22:09.650521 22755 solver.cpp:228] Iteration 17600, loss = 24.7104\nI1206 16:22:09.650554 22755 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 16:22:09.650570 22755 solver.cpp:244]     Train net output #1: loss = 24.7104 (* 1 = 24.7104 loss)\nI1206 16:22:09.722705 22755 sgd_solver.cpp:166] Iteration 17600, lr = 2.64\nI1206 16:22:15.373628 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.09046 > 5) by scale factor 0.982229\nI1206 16:23:20.230093 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.21087 > 5) by scale factor 0.959532\nI1206 16:23:34.351929 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06386 > 5) by scale factor 0.987389\nI1206 16:23:42.835624 22755 solver.cpp:337] Iteration 17700, Testing net (#0)\nI1206 16:24:35.358400 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1724\nI1206 16:24:35.358701 22755 solver.cpp:404]     Test net output #1: loss = 28.3313 (* 1 = 28.3313 loss)\nI1206 16:24:36.230381 22755 solver.cpp:228] Iteration 17700, loss = 30.891\nI1206 16:24:36.230417 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 16:24:36.230432 22755 solver.cpp:244]     Train net output #1: loss = 30.891 (* 1 = 30.891 loss)\nI1206 16:24:36.302098 22755 sgd_solver.cpp:166] Iteration 17700, lr = 2.655\nI1206 16:24:55.114259 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.41965 > 5) by scale factor 0.922568\nI1206 16:24:56.056637 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.06895 > 5) by scale factor 0.986397\nI1206 16:25:55.235194 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.14878 > 5) by scale factor 0.971104\nI1206 16:25:56.176364 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01077 > 5) by scale factor 0.997851\nI1206 16:26:09.359982 22755 solver.cpp:337] Iteration 17800, Testing net (#0)\nI1206 16:27:01.891654 22755 solver.cpp:404]     Test net output #0: accuracy = 0.124\nI1206 16:27:01.891935 22755 solver.cpp:404]     Test net output #1: loss = 36.2047 (* 1 = 36.2047 loss)\nI1206 16:27:02.763830 22755 solver.cpp:228] Iteration 17800, loss = 36.9304\nI1206 16:27:02.763873 22755 solver.cpp:244]     Train net output #0: accuracy = 0.09\nI1206 16:27:02.763896 22755 solver.cpp:244]     Train net output #1: loss = 36.9304 (* 1 = 36.9304 loss)\nI1206 16:27:02.836951 22755 sgd_solver.cpp:166] Iteration 17800, lr = 2.67\nI1206 16:28:35.859697 22755 solver.cpp:337] Iteration 17900, Testing net (#0)\nI1206 16:29:28.367810 22755 solver.cpp:404]     Test net output #0: accuracy = 0.14875\nI1206 16:29:28.368118 22755 solver.cpp:404]     Test net output #1: loss = 34.0575 (* 1 = 34.0575 loss)\nI1206 16:29:29.239619 22755 solver.cpp:228] Iteration 17900, loss = 34.7861\nI1206 16:29:29.239652 22755 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1206 16:29:29.239668 22755 solver.cpp:244]     Train net output #1: loss = 34.7861 (* 1 = 34.7861 loss)\nI1206 16:29:29.314386 22755 sgd_solver.cpp:166] Iteration 17900, lr = 2.685\nI1206 16:31:02.465071 22755 solver.cpp:337] Iteration 18000, Testing net (#0)\nI1206 16:31:54.994663 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1168\nI1206 16:31:54.994964 22755 solver.cpp:404]     Test net output #1: loss = 30.2496 (* 1 = 30.2496 loss)\nI1206 16:31:55.866719 22755 solver.cpp:228] Iteration 18000, loss = 25.1347\nI1206 16:31:55.866755 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 16:31:55.866771 22755 solver.cpp:244]     Train net output #1: loss = 25.1347 (* 1 = 25.1347 loss)\nI1206 16:31:55.938879 22755 sgd_solver.cpp:166] Iteration 18000, lr = 2.7\nI1206 16:33:29.101459 22755 solver.cpp:337] Iteration 18100, Testing net (#0)\nI1206 16:34:21.620035 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16495\nI1206 16:34:21.620327 22755 solver.cpp:404]     Test net output #1: loss = 33.7326 (* 1 = 33.7326 loss)\nI1206 16:34:22.491987 22755 solver.cpp:228] Iteration 18100, loss = 34.6805\nI1206 16:34:22.492022 22755 solver.cpp:244]     Train net output #0: accuracy = 0.17\nI1206 16:34:22.492038 22755 solver.cpp:244]     Train net output #1: loss = 34.6805 (* 1 = 34.6805 loss)\nI1206 16:34:22.566004 22755 sgd_solver.cpp:166] Iteration 18100, lr = 2.715\nI1206 16:35:55.654237 22755 solver.cpp:337] Iteration 18200, Testing net (#0)\nI1206 16:36:48.149389 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1518\nI1206 16:36:48.149633 22755 solver.cpp:404]     Test net output #1: loss = 26.469 (* 1 = 26.469 loss)\nI1206 16:36:49.021581 22755 solver.cpp:228] Iteration 18200, loss = 26.663\nI1206 16:36:49.021615 22755 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1206 16:36:49.021631 22755 solver.cpp:244]     Train net output #1: loss = 26.663 (* 1 = 26.663 loss)\nI1206 16:36:49.096374 22755 sgd_solver.cpp:166] Iteration 18200, lr = 2.73\nI1206 16:38:22.118507 22755 solver.cpp:337] Iteration 18300, Testing net (#0)\nI1206 16:39:14.631599 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1518\nI1206 16:39:14.631861 22755 solver.cpp:404]     Test net output #1: loss = 27.876 (* 1 = 27.876 loss)\nI1206 16:39:15.503115 22755 solver.cpp:228] Iteration 18300, loss = 29.1682\nI1206 16:39:15.503149 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 16:39:15.503165 22755 solver.cpp:244]     Train net output #1: loss = 29.1682 (* 1 = 29.1682 loss)\nI1206 16:39:15.579346 22755 sgd_solver.cpp:166] Iteration 18300, lr = 2.745\nI1206 16:40:48.624456 22755 solver.cpp:337] Iteration 18400, Testing net (#0)\nI1206 16:41:41.130542 22755 solver.cpp:404]     Test net output #0: accuracy = 0.17395\nI1206 16:41:41.130828 22755 solver.cpp:404]     Test net output #1: loss = 23.8676 (* 1 = 23.8676 loss)\nI1206 16:41:42.001649 22755 solver.cpp:228] Iteration 18400, loss = 21.7267\nI1206 16:41:42.001684 22755 solver.cpp:244]     Train net output #0: accuracy = 0.14\nI1206 16:41:42.001701 22755 solver.cpp:244]     Train net output #1: loss = 21.7267 (* 1 = 21.7267 loss)\nI1206 16:41:42.077558 22755 sgd_solver.cpp:166] Iteration 18400, lr = 2.76\nI1206 16:43:15.143144 22755 solver.cpp:337] Iteration 18500, Testing net (#0)\nI1206 16:44:07.632305 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1536\nI1206 16:44:07.632582 22755 solver.cpp:404]     Test net output #1: loss = 34.243 (* 1 = 34.243 loss)\nI1206 16:44:08.503788 22755 solver.cpp:228] Iteration 18500, loss = 31.9394\nI1206 16:44:08.503823 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 16:44:08.503839 22755 solver.cpp:244]     Train net output #1: loss = 31.9394 (* 1 = 31.9394 loss)\nI1206 16:44:08.578696 22755 sgd_solver.cpp:166] Iteration 18500, lr = 2.775\nI1206 16:45:41.751602 22755 solver.cpp:337] Iteration 18600, Testing net (#0)\nI1206 16:46:34.232694 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1356\nI1206 16:46:34.232991 22755 solver.cpp:404]     Test net output #1: loss = 36.1805 (* 1 = 36.1805 loss)\nI1206 16:46:35.103830 22755 solver.cpp:228] Iteration 18600, loss = 40.1457\nI1206 16:46:35.103865 22755 solver.cpp:244]     Train net output #0: accuracy = 0.12\nI1206 16:46:35.103885 22755 solver.cpp:244]     Train net output #1: loss = 40.1457 (* 1 = 40.1457 loss)\nI1206 16:46:35.177661 22755 sgd_solver.cpp:166] Iteration 18600, lr = 2.79\nI1206 16:48:08.312801 22755 solver.cpp:337] Iteration 18700, Testing net (#0)\nI1206 16:49:00.815490 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1828\nI1206 16:49:00.815784 22755 solver.cpp:404]     Test net output #1: loss = 27.6603 (* 1 = 27.6603 loss)\nI1206 16:49:01.687453 22755 solver.cpp:228] Iteration 18700, loss = 25.7949\nI1206 16:49:01.687487 22755 solver.cpp:244]     Train net output #0: accuracy = 0.19\nI1206 16:49:01.687503 22755 solver.cpp:244]     Train net output #1: loss = 25.7949 (* 1 = 25.7949 loss)\nI1206 16:49:01.758697 22755 sgd_solver.cpp:166] Iteration 18700, lr = 2.805\nI1206 16:49:52.520330 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.43209 > 5) by scale factor 0.920457\nI1206 16:50:02.859817 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.10195 > 5) by scale factor 0.980018\nI1206 16:50:34.848294 22755 solver.cpp:337] Iteration 18800, Testing net (#0)\nI1206 16:51:27.356374 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1487\nI1206 16:51:27.356675 22755 solver.cpp:404]     Test net output #1: loss = 46.1534 (* 1 = 46.1534 loss)\nI1206 16:51:28.227957 22755 solver.cpp:228] Iteration 18800, loss = 45.4397\nI1206 16:51:28.227991 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 16:51:28.228009 22755 solver.cpp:244]     Train net output #1: loss = 45.4397 (* 1 = 45.4397 loss)\nI1206 16:51:28.296567 22755 sgd_solver.cpp:166] Iteration 18800, lr = 2.82\nI1206 16:53:01.392992 22755 solver.cpp:337] Iteration 18900, Testing net (#0)\nI1206 16:53:53.888706 22755 solver.cpp:404]     Test net output #0: accuracy = 0.13735\nI1206 16:53:53.888996 22755 solver.cpp:404]     Test net output #1: loss = 39.4631 (* 1 = 39.4631 loss)\nI1206 16:53:54.760614 22755 solver.cpp:228] Iteration 18900, loss = 37.7515\nI1206 16:53:54.760650 22755 solver.cpp:244]     Train net output #0: accuracy = 0.18\nI1206 16:53:54.760666 22755 solver.cpp:244]     Train net output #1: loss = 37.7515 (* 1 = 37.7515 loss)\nI1206 16:53:54.837859 22755 sgd_solver.cpp:166] Iteration 18900, lr = 2.835\nI1206 16:53:55.786883 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.2408 > 5) by scale factor 0.954053\nI1206 16:53:56.727898 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.39557 > 5) by scale factor 0.926686\nI1206 16:54:19.278331 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.02658 > 5) by scale factor 0.994712\nI1206 16:55:27.885442 22755 solver.cpp:337] Iteration 19000, Testing net (#0)\nI1206 16:56:20.392931 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1772\nI1206 16:56:20.393234 22755 solver.cpp:404]     Test net output #1: loss = 25.8323 (* 1 = 25.8323 loss)\nI1206 16:56:21.265354 22755 solver.cpp:228] Iteration 19000, loss = 28.9018\nI1206 16:56:21.265391 22755 solver.cpp:244]     Train net output #0: accuracy = 0.23\nI1206 16:56:21.265408 22755 solver.cpp:244]     Train net output #1: loss = 28.9018 (* 1 = 28.9018 loss)\nI1206 16:56:21.337008 22755 sgd_solver.cpp:166] Iteration 19000, lr = 2.85\nI1206 16:57:54.376852 22755 solver.cpp:337] Iteration 19100, Testing net (#0)\nI1206 16:58:46.876737 22755 solver.cpp:404]     Test net output #0: accuracy = 0.12075\nI1206 16:58:46.877038 22755 solver.cpp:404]     Test net output #1: loss = 32.2917 (* 1 = 32.2917 loss)\nI1206 16:58:47.747624 22755 solver.cpp:228] Iteration 19100, loss = 35.1343\nI1206 16:58:47.747669 22755 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1206 16:58:47.747687 22755 solver.cpp:244]     Train net output #1: loss = 35.1343 (* 1 = 35.1343 loss)\nI1206 16:58:47.818869 22755 sgd_solver.cpp:166] Iteration 19100, lr = 2.865\nI1206 17:00:20.915866 22755 solver.cpp:337] Iteration 19200, Testing net (#0)\nI1206 17:01:13.405951 22755 solver.cpp:404]     Test net output #0: accuracy = 0.16965\nI1206 17:01:13.406250 22755 solver.cpp:404]     Test net output #1: loss = 37.7649 (* 1 = 37.7649 loss)\nI1206 17:01:14.277214 22755 solver.cpp:228] Iteration 19200, loss = 39.3752\nI1206 17:01:14.277247 22755 solver.cpp:244]     Train net output #0: accuracy = 0.15\nI1206 17:01:14.277264 22755 solver.cpp:244]     Train net output #1: loss = 39.3752 (* 1 = 39.3752 loss)\nI1206 17:01:14.349062 22755 sgd_solver.cpp:166] Iteration 19200, lr = 2.88\nI1206 17:02:30.506997 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.0616 > 5) by scale factor 0.987829\nI1206 17:02:47.469763 22755 solver.cpp:337] Iteration 19300, Testing net (#0)\nI1206 17:03:39.967140 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1451\nI1206 17:03:39.967442 22755 solver.cpp:404]     Test net output #1: loss = 34.6661 (* 1 = 34.6661 loss)\nI1206 17:03:40.838840 22755 solver.cpp:228] Iteration 19300, loss = 36.764\nI1206 17:03:40.838874 22755 solver.cpp:244]     Train net output #0: accuracy = 0.1\nI1206 17:03:40.838891 22755 solver.cpp:244]     Train net output #1: loss = 36.764 (* 1 = 36.764 loss)\nI1206 17:03:40.913291 22755 sgd_solver.cpp:166] Iteration 19300, lr = 2.895\nI1206 17:05:14.052013 22755 solver.cpp:337] Iteration 19400, Testing net (#0)\nI1206 17:06:06.540652 22755 solver.cpp:404]     Test net output #0: accuracy = 0.18495\nI1206 17:06:06.540948 22755 solver.cpp:404]     Test net output #1: loss = 28.5015 (* 1 = 28.5015 loss)\nI1206 17:06:07.411914 22755 solver.cpp:228] Iteration 19400, loss = 26.1222\nI1206 17:06:07.411949 22755 solver.cpp:244]     Train net output #0: accuracy = 0.21\nI1206 17:06:07.411967 22755 solver.cpp:244]     Train net output #1: loss = 26.1222 (* 1 = 26.1222 loss)\nI1206 17:06:07.487314 22755 sgd_solver.cpp:166] Iteration 19400, lr = 2.91\nI1206 17:07:27.385316 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.03687 > 5) by scale factor 0.99268\nI1206 17:07:40.554961 22755 solver.cpp:337] Iteration 19500, Testing net (#0)\nI1206 17:08:33.047505 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1476\nI1206 17:08:33.047806 22755 solver.cpp:404]     Test net output #1: loss = 34.2029 (* 1 = 34.2029 loss)\nI1206 17:08:33.918758 22755 solver.cpp:228] Iteration 19500, loss = 31.0829\nI1206 17:08:33.918793 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 17:08:33.918812 22755 solver.cpp:244]     Train net output #1: loss = 31.0829 (* 1 = 31.0829 loss)\nI1206 17:08:33.994405 22755 sgd_solver.cpp:166] Iteration 19500, lr = 2.925\nI1206 17:10:07.070168 22755 solver.cpp:337] Iteration 19600, Testing net (#0)\nI1206 17:10:59.550367 22755 solver.cpp:404]     Test net output #0: accuracy = 0.15635\nI1206 17:10:59.550673 22755 solver.cpp:404]     Test net output #1: loss = 46.7605 (* 1 = 46.7605 loss)\nI1206 17:11:00.421708 22755 solver.cpp:228] Iteration 19600, loss = 44.6313\nI1206 17:11:00.421743 22755 solver.cpp:244]     Train net output #0: accuracy = 0.13\nI1206 17:11:00.421761 22755 solver.cpp:244]     Train net output #1: loss = 44.6313 (* 1 = 44.6313 loss)\nI1206 17:11:00.493566 22755 sgd_solver.cpp:166] Iteration 19600, lr = 2.94\nI1206 17:12:33.593410 22755 solver.cpp:337] Iteration 19700, Testing net (#0)\nI1206 17:13:26.089700 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1672\nI1206 17:13:26.090013 22755 solver.cpp:404]     Test net output #1: loss = 40.0467 (* 1 = 40.0467 loss)\nI1206 17:13:26.961982 22755 solver.cpp:228] Iteration 19700, loss = 37.6402\nI1206 17:13:26.962016 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 17:13:26.962034 22755 solver.cpp:244]     Train net output #1: loss = 37.6402 (* 1 = 37.6402 loss)\nI1206 17:13:27.036144 22755 sgd_solver.cpp:166] Iteration 19700, lr = 2.955\nI1206 17:13:48.663306 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08951 > 5) by scale factor 0.982413\nI1206 17:15:00.090941 22755 solver.cpp:337] Iteration 19800, Testing net (#0)\nI1206 17:15:52.590651 22755 solver.cpp:404]     Test net output #0: accuracy = 0.11155\nI1206 17:15:52.590950 22755 solver.cpp:404]     Test net output #1: loss = 32.1082 (* 1 = 32.1082 loss)\nI1206 17:15:53.461940 22755 solver.cpp:228] Iteration 19800, loss = 33.3768\nI1206 17:15:53.461977 22755 solver.cpp:244]     Train net output #0: accuracy = 0.11\nI1206 17:15:53.461992 22755 solver.cpp:244]     Train net output #1: loss = 33.3768 (* 1 = 33.3768 loss)\nI1206 17:15:53.536556 22755 sgd_solver.cpp:166] Iteration 19800, lr = 2.97\nI1206 17:16:55.533145 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.26608 > 5) by scale factor 0.949473\nI1206 17:17:26.554874 22755 solver.cpp:337] Iteration 19900, Testing net (#0)\nI1206 17:18:19.044941 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1663\nI1206 17:18:19.045243 22755 solver.cpp:404]     Test net output #1: loss = 44.9032 (* 1 = 44.9032 loss)\nI1206 17:18:19.916626 22755 solver.cpp:228] Iteration 19900, loss = 42.1288\nI1206 17:18:19.916668 22755 solver.cpp:244]     Train net output #0: accuracy = 0.16\nI1206 17:18:19.916684 22755 solver.cpp:244]     Train net output #1: loss = 42.1288 (* 1 = 42.1288 loss)\nI1206 17:18:19.987969 22755 sgd_solver.cpp:166] Iteration 19900, lr = 2.985\nI1206 17:18:55.733356 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.31215 > 5) by scale factor 0.941239\nI1206 17:18:58.555670 22755 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.04187 > 5) by scale factor 0.991696\nI1206 17:19:53.077396 22755 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/Fig2b_iter_20000.caffemodel\nI1206 17:19:53.212337 22755 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/Fig2b_iter_20000.solverstate\nI1206 17:19:53.491559 22755 solver.cpp:317] Iteration 20000, loss = 26.9828\nI1206 17:19:53.491597 22755 solver.cpp:337] Iteration 20000, Testing net (#0)\nI1206 17:20:45.995242 22755 solver.cpp:404]     Test net output #0: accuracy = 0.1196\nI1206 17:20:45.995553 22755 solver.cpp:404]     Test net output #1: loss = 32.9087 (* 1 = 32.9087 loss)\nI1206 17:20:45.995565 22755 solver.cpp:322] Optimization Done.\nI1206 17:20:50.203152 22755 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/lrRange3SS520kClip5Fig12b",
    "content": "I1206 09:10:59.632695 23310 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1206 09:10:59.635998 23310 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1206 09:10:59.637467 23310 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1206 09:10:59.638751 23310 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1206 09:10:59.640029 23310 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1206 09:10:59.641397 23310 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1206 09:10:59.643079 23310 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1206 09:10:59.644371 23310 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1206 09:10:59.645673 23310 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1206 09:11:00.115253 23310 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/Fig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nclip_gradients: 5\nmax_lr: 3\nI1206 09:11:00.117662 23310 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1206 09:11:00.186995 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:00.187072 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:00.188133 23310 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1206 09:11:00.189815 23310 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-ResNeXt\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stri\nI1206 09:11:00.191654 23310 layer_factory.hpp:77] Creating layer dataLayer\nI1206 09:11:00.195101 23310 net.cpp:100] Creating Layer dataLayer\nI1206 09:11:00.195195 23310 net.cpp:408] dataLayer -> data_top\nI1206 09:11:00.195439 23310 net.cpp:408] dataLayer -> label\nI1206 09:11:00.195605 23310 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1206 09:11:00.203526 23315 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1206 09:11:00.264348 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:00.271190 23310 net.cpp:150] Setting up dataLayer\nI1206 09:11:00.271265 23310 net.cpp:157] Top shape: 85 3 32 32 (261120)\nI1206 09:11:00.271281 23310 net.cpp:157] Top shape: 85 (85)\nI1206 09:11:00.271286 23310 net.cpp:165] Memory required for data: 1044820\nI1206 09:11:00.271303 23310 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1206 09:11:00.271322 23310 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1206 09:11:00.271332 23310 net.cpp:434] label_dataLayer_1_split <- label\nI1206 09:11:00.271350 23310 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1206 09:11:00.271365 23310 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1206 09:11:00.271453 23310 net.cpp:150] Setting up label_dataLayer_1_split\nI1206 09:11:00.271467 23310 net.cpp:157] Top shape: 85 (85)\nI1206 09:11:00.271474 23310 net.cpp:157] Top shape: 85 (85)\nI1206 09:11:00.271479 23310 net.cpp:165] Memory required for data: 1045500\nI1206 09:11:00.271486 23310 layer_factory.hpp:77] Creating layer pre_conv\nI1206 09:11:00.271566 23310 net.cpp:100] Creating Layer pre_conv\nI1206 09:11:00.271579 23310 net.cpp:434] pre_conv <- data_top\nI1206 09:11:00.271592 23310 net.cpp:408] pre_conv -> pre_conv_top\nI1206 09:11:00.273730 23310 net.cpp:150] Setting up pre_conv\nI1206 09:11:00.273751 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.273757 23310 net.cpp:165] Memory required for data: 6616060\nI1206 09:11:00.273820 23310 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1206 09:11:00.273833 23310 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1206 09:11:00.273839 23310 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1206 09:11:00.273852 23310 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1206 09:11:00.273864 23310 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1206 09:11:00.273916 23310 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1206 09:11:00.273918 23316 blocking_queue.cpp:50] Waiting for data\nI1206 09:11:00.273936 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.273944 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.273949 23310 net.cpp:165] Memory required for data: 17757180\nI1206 09:11:00.273955 23310 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1206 09:11:00.274055 23310 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1206 09:11:00.274068 23310 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1206 09:11:00.274081 23310 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1206 09:11:00.274570 23310 net.cpp:150] Setting up L1_b1_brc1_bn\nI1206 09:11:00.274587 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.274593 23310 net.cpp:165] Memory required for data: 23327740\nI1206 09:11:00.274610 23310 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1206 09:11:00.274672 23310 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1206 09:11:00.274682 23310 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1206 09:11:00.274689 23310 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1206 09:11:00.274700 23310 net.cpp:150] Setting up L1_b1_brc1_relu\nI1206 09:11:00.274708 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.274721 23310 net.cpp:165] Memory required for data: 28898300\nI1206 09:11:00.274727 23310 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1206 09:11:00.274741 23310 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1206 09:11:00.274746 23310 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1206 09:11:00.274760 23310 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1206 09:11:00.275056 23310 net.cpp:150] Setting up L1_b1_brc1_conv\nI1206 09:11:00.275072 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.275077 23310 net.cpp:165] Memory required for data: 40039420\nI1206 09:11:00.275087 23310 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1206 09:11:00.275097 23310 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1206 09:11:00.275104 23310 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1206 09:11:00.275115 23310 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1206 09:11:00.275349 23310 net.cpp:150] Setting up L1_b1_brc2_bn\nI1206 09:11:00.275363 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.275368 23310 net.cpp:165] Memory required for data: 51180540\nI1206 09:11:00.275387 23310 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1206 09:11:00.275398 23310 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1206 09:11:00.275403 23310 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1206 09:11:00.275410 23310 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1206 09:11:00.275420 23310 net.cpp:150] Setting up L1_b1_brc2_relu\nI1206 09:11:00.275436 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.275441 23310 net.cpp:165] Memory required for data: 62321660\nI1206 09:11:00.275447 23310 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1206 09:11:00.275463 23310 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1206 09:11:00.275470 23310 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1206 09:11:00.275478 23310 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1206 09:11:00.275761 23310 net.cpp:150] Setting up L1_b1_brc2_conv\nI1206 09:11:00.275776 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.275782 23310 net.cpp:165] Memory required for data: 73462780\nI1206 09:11:00.275791 23310 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1206 09:11:00.275804 23310 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1206 09:11:00.275811 23310 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1206 09:11:00.275820 23310 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1206 09:11:00.276055 23310 net.cpp:150] Setting up L1_b1_brc3_bn\nI1206 09:11:00.276068 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.276074 23310 net.cpp:165] Memory required for data: 84603900\nI1206 09:11:00.276084 23310 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1206 09:11:00.276093 23310 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1206 09:11:00.276099 23310 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1206 09:11:00.276106 23310 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1206 09:11:00.276116 23310 net.cpp:150] Setting up L1_b1_brc3_relu\nI1206 09:11:00.276124 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.276129 23310 net.cpp:165] Memory required for data: 95745020\nI1206 09:11:00.276134 23310 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1206 09:11:00.276147 23310 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1206 09:11:00.276154 23310 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1206 09:11:00.276162 23310 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1206 09:11:00.276465 23310 net.cpp:150] Setting up L1_b1_brc3_conv\nI1206 09:11:00.276479 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.276484 23310 net.cpp:165] Memory required for data: 118027260\nI1206 09:11:00.276497 23310 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1206 09:11:00.276515 23310 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1206 09:11:00.276520 23310 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1206 09:11:00.276536 23310 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1206 09:11:00.276834 23310 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1206 09:11:00.276849 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.276854 23310 net.cpp:165] Memory required for data: 140309500\nI1206 09:11:00.276863 23310 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1206 09:11:00.276932 23310 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1206 09:11:00.276943 23310 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1206 09:11:00.276950 23310 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1206 09:11:00.276962 23310 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1206 09:11:00.277043 23310 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1206 09:11:00.277060 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.277065 23310 net.cpp:165] Memory required for data: 162591740\nI1206 09:11:00.277070 23310 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:11:00.277083 23310 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:11:00.277089 23310 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1206 09:11:00.277097 23310 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:11:00.277107 23310 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:11:00.277165 23310 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:11:00.277176 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.277186 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.277191 23310 net.cpp:165] Memory required for data: 207156220\nI1206 09:11:00.277197 23310 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1206 09:11:00.277209 23310 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1206 09:11:00.277215 23310 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:11:00.277226 23310 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1206 09:11:00.277451 23310 net.cpp:150] Setting up L1_b2_brc1_bn\nI1206 09:11:00.277464 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.277469 23310 net.cpp:165] Memory required for data: 229438460\nI1206 09:11:00.277480 23310 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1206 09:11:00.277493 23310 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1206 09:11:00.277498 23310 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1206 09:11:00.277505 23310 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1206 09:11:00.277515 23310 net.cpp:150] Setting up L1_b2_brc1_relu\nI1206 09:11:00.277523 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.277528 23310 net.cpp:165] Memory required for data: 251720700\nI1206 09:11:00.277532 23310 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1206 09:11:00.277544 23310 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1206 09:11:00.277550 23310 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1206 09:11:00.277562 23310 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1206 09:11:00.277878 23310 net.cpp:150] Setting up L1_b2_brc1_conv\nI1206 09:11:00.277892 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.277899 23310 net.cpp:165] Memory required for data: 262861820\nI1206 09:11:00.277906 23310 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1206 09:11:00.277915 23310 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1206 09:11:00.277921 23310 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1206 09:11:00.277930 23310 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1206 09:11:00.278167 23310 net.cpp:150] Setting up L1_b2_brc2_bn\nI1206 09:11:00.278180 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.278185 23310 net.cpp:165] Memory required for data: 274002940\nI1206 09:11:00.278197 23310 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1206 09:11:00.278208 23310 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1206 09:11:00.278214 23310 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1206 09:11:00.278221 23310 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1206 09:11:00.278231 23310 net.cpp:150] Setting up L1_b2_brc2_relu\nI1206 09:11:00.278239 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.278244 23310 net.cpp:165] Memory required for data: 285144060\nI1206 09:11:00.278249 23310 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1206 09:11:00.278264 23310 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1206 09:11:00.278270 23310 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1206 09:11:00.278278 23310 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1206 09:11:00.278553 23310 net.cpp:150] Setting up L1_b2_brc2_conv\nI1206 09:11:00.278570 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.278575 23310 net.cpp:165] Memory required for data: 296285180\nI1206 09:11:00.278584 23310 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1206 09:11:00.278594 23310 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1206 09:11:00.278599 23310 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1206 09:11:00.278607 23310 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1206 09:11:00.278852 23310 net.cpp:150] Setting up L1_b2_brc3_bn\nI1206 09:11:00.278867 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.278872 23310 net.cpp:165] Memory required for data: 307426300\nI1206 09:11:00.278882 23310 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1206 09:11:00.278903 23310 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1206 09:11:00.278908 23310 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1206 09:11:00.278915 23310 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1206 09:11:00.278925 23310 net.cpp:150] Setting up L1_b2_brc3_relu\nI1206 09:11:00.278934 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.278937 23310 net.cpp:165] Memory required for data: 318567420\nI1206 09:11:00.278944 23310 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1206 09:11:00.278959 23310 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1206 09:11:00.278964 23310 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1206 09:11:00.278976 23310 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1206 09:11:00.279283 23310 net.cpp:150] Setting up L1_b2_brc3_conv\nI1206 09:11:00.279296 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.279302 23310 net.cpp:165] Memory required for data: 340849660\nI1206 09:11:00.279316 23310 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1206 09:11:00.279332 23310 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1206 09:11:00.279340 23310 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1206 09:11:00.279346 23310 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:11:00.279355 23310 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1206 09:11:00.279389 23310 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1206 09:11:00.279399 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.279404 23310 net.cpp:165] Memory required for data: 363131900\nI1206 09:11:00.279409 23310 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:11:00.279417 23310 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:11:00.279423 23310 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1206 09:11:00.279438 23310 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:11:00.279448 23310 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:11:00.279491 23310 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:11:00.279505 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.279511 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.279515 23310 net.cpp:165] Memory required for data: 407696380\nI1206 09:11:00.279521 23310 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1206 09:11:00.279533 23310 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1206 09:11:00.279539 23310 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:11:00.279551 23310 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1206 09:11:00.279781 23310 net.cpp:150] Setting up L1_b3_brc1_bn\nI1206 09:11:00.279795 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.279800 23310 net.cpp:165] Memory required for data: 429978620\nI1206 09:11:00.279811 23310 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1206 09:11:00.279820 23310 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1206 09:11:00.279826 23310 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1206 09:11:00.279834 23310 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1206 09:11:00.279844 23310 net.cpp:150] Setting up L1_b3_brc1_relu\nI1206 09:11:00.279851 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.279855 23310 net.cpp:165] Memory required for data: 452260860\nI1206 09:11:00.279860 23310 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1206 09:11:00.279876 23310 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1206 09:11:00.279882 23310 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1206 09:11:00.279894 23310 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1206 09:11:00.280202 23310 net.cpp:150] Setting up L1_b3_brc1_conv\nI1206 09:11:00.280223 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.280230 23310 net.cpp:165] Memory required for data: 463401980\nI1206 09:11:00.280239 23310 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1206 09:11:00.280251 23310 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1206 09:11:00.280258 23310 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1206 09:11:00.280267 23310 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1206 09:11:00.280503 23310 net.cpp:150] Setting up L1_b3_brc2_bn\nI1206 09:11:00.280516 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.280521 23310 net.cpp:165] Memory required for data: 474543100\nI1206 09:11:00.280531 23310 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1206 09:11:00.280540 23310 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1206 09:11:00.280545 23310 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1206 09:11:00.280552 23310 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1206 09:11:00.280562 23310 net.cpp:150] Setting up L1_b3_brc2_relu\nI1206 09:11:00.280570 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.280573 23310 net.cpp:165] Memory required for data: 485684220\nI1206 09:11:00.280578 23310 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1206 09:11:00.280596 23310 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1206 09:11:00.280602 23310 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1206 09:11:00.280614 23310 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1206 09:11:00.280911 23310 net.cpp:150] Setting up L1_b3_brc2_conv\nI1206 09:11:00.280926 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.280932 23310 net.cpp:165] Memory required for data: 496825340\nI1206 09:11:00.280941 23310 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1206 09:11:00.280951 23310 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1206 09:11:00.280961 23310 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1206 09:11:00.280968 23310 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1206 09:11:00.281203 23310 net.cpp:150] Setting up L1_b3_brc3_bn\nI1206 09:11:00.281216 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.281221 23310 net.cpp:165] Memory required for data: 507966460\nI1206 09:11:00.281231 23310 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1206 09:11:00.281240 23310 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1206 09:11:00.281246 23310 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1206 09:11:00.281253 23310 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1206 09:11:00.281262 23310 net.cpp:150] Setting up L1_b3_brc3_relu\nI1206 09:11:00.281270 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.281275 23310 net.cpp:165] Memory required for data: 519107580\nI1206 09:11:00.281280 23310 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1206 09:11:00.281296 23310 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1206 09:11:00.281301 23310 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1206 09:11:00.281313 23310 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1206 09:11:00.281625 23310 net.cpp:150] Setting up L1_b3_brc3_conv\nI1206 09:11:00.281639 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.281644 23310 net.cpp:165] Memory required for data: 541389820\nI1206 09:11:00.281653 23310 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1206 09:11:00.281666 23310 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1206 09:11:00.281673 23310 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1206 09:11:00.281680 23310 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:11:00.281692 23310 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1206 09:11:00.281733 23310 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1206 09:11:00.281751 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.281757 23310 net.cpp:165] Memory required for data: 563672060\nI1206 09:11:00.281762 23310 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:11:00.281778 23310 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:11:00.281785 23310 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1206 09:11:00.281795 23310 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:11:00.281805 23310 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:11:00.281852 23310 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:11:00.281865 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.281872 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.281877 23310 net.cpp:165] Memory required for data: 608236540\nI1206 09:11:00.281883 23310 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1206 09:11:00.281890 23310 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1206 09:11:00.281896 23310 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:11:00.281908 23310 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1206 09:11:00.282136 23310 net.cpp:150] Setting up L1_b4_brc1_bn\nI1206 09:11:00.282150 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.282155 23310 net.cpp:165] Memory required for data: 630518780\nI1206 09:11:00.282165 23310 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1206 09:11:00.282177 23310 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1206 09:11:00.282183 23310 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1206 09:11:00.282191 23310 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1206 09:11:00.282199 23310 net.cpp:150] Setting up L1_b4_brc1_relu\nI1206 09:11:00.282207 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.282212 23310 net.cpp:165] Memory required for data: 652801020\nI1206 09:11:00.282217 23310 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1206 09:11:00.282228 23310 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1206 09:11:00.282233 23310 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1206 09:11:00.282249 23310 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1206 09:11:00.282568 23310 net.cpp:150] Setting up L1_b4_brc1_conv\nI1206 09:11:00.282582 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.282588 23310 net.cpp:165] Memory required for data: 663942140\nI1206 09:11:00.282596 23310 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1206 09:11:00.282605 23310 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1206 09:11:00.282611 23310 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1206 09:11:00.282619 23310 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1206 09:11:00.282867 23310 net.cpp:150] Setting up L1_b4_brc2_bn\nI1206 09:11:00.282881 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.282887 23310 net.cpp:165] Memory required for data: 675083260\nI1206 09:11:00.282897 23310 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1206 09:11:00.282910 23310 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1206 09:11:00.282917 23310 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1206 09:11:00.282923 23310 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1206 09:11:00.282932 23310 net.cpp:150] Setting up L1_b4_brc2_relu\nI1206 09:11:00.282939 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.282944 23310 net.cpp:165] Memory required for data: 686224380\nI1206 09:11:00.282949 23310 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1206 09:11:00.282963 23310 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1206 09:11:00.282969 23310 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1206 09:11:00.282980 23310 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1206 09:11:00.283257 23310 net.cpp:150] Setting up L1_b4_brc2_conv\nI1206 09:11:00.283270 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.283277 23310 net.cpp:165] Memory required for data: 697365500\nI1206 09:11:00.283285 23310 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1206 09:11:00.283301 23310 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1206 09:11:00.283308 23310 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1206 09:11:00.283316 23310 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1206 09:11:00.283556 23310 net.cpp:150] Setting up L1_b4_brc3_bn\nI1206 09:11:00.283571 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.283576 23310 net.cpp:165] Memory required for data: 708506620\nI1206 09:11:00.283586 23310 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1206 09:11:00.283601 23310 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1206 09:11:00.283607 23310 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1206 09:11:00.283613 23310 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1206 09:11:00.283623 23310 net.cpp:150] Setting up L1_b4_brc3_relu\nI1206 09:11:00.283630 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.283635 23310 net.cpp:165] Memory required for data: 719647740\nI1206 09:11:00.283639 23310 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1206 09:11:00.283654 23310 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1206 09:11:00.283660 23310 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1206 09:11:00.283673 23310 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1206 09:11:00.283993 23310 net.cpp:150] Setting up L1_b4_brc3_conv\nI1206 09:11:00.284008 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.284013 23310 net.cpp:165] Memory required for data: 741929980\nI1206 09:11:00.284023 23310 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1206 09:11:00.284032 23310 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1206 09:11:00.284039 23310 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1206 09:11:00.284045 23310 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:11:00.284054 23310 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1206 09:11:00.284090 23310 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1206 09:11:00.284102 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.284107 23310 net.cpp:165] Memory required for data: 764212220\nI1206 09:11:00.284113 23310 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:11:00.284121 23310 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:11:00.284126 23310 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1206 09:11:00.284137 23310 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:11:00.284147 23310 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:11:00.284191 23310 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:11:00.284206 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.284214 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.284219 23310 net.cpp:165] Memory required for data: 808776700\nI1206 09:11:00.284224 23310 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1206 09:11:00.284232 23310 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1206 09:11:00.284238 23310 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:11:00.284250 23310 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1206 09:11:00.284478 23310 net.cpp:150] Setting up L1_b5_brc1_bn\nI1206 09:11:00.284492 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.284497 23310 net.cpp:165] Memory required for data: 831058940\nI1206 09:11:00.284518 23310 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1206 09:11:00.284533 23310 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1206 09:11:00.284538 23310 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1206 09:11:00.284545 23310 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1206 09:11:00.284555 23310 net.cpp:150] Setting up L1_b5_brc1_relu\nI1206 09:11:00.284564 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.284575 23310 net.cpp:165] Memory required for data: 853341180\nI1206 09:11:00.284581 23310 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1206 09:11:00.284596 23310 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1206 09:11:00.284602 23310 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1206 09:11:00.284615 23310 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1206 09:11:00.284946 23310 net.cpp:150] Setting up L1_b5_brc1_conv\nI1206 09:11:00.284962 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.284968 23310 net.cpp:165] Memory required for data: 864482300\nI1206 09:11:00.284977 23310 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1206 09:11:00.284986 23310 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1206 09:11:00.284992 23310 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1206 09:11:00.285001 23310 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1206 09:11:00.285245 23310 net.cpp:150] Setting up L1_b5_brc2_bn\nI1206 09:11:00.285259 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.285264 23310 net.cpp:165] Memory required for data: 875623420\nI1206 09:11:00.285275 23310 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1206 09:11:00.285284 23310 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1206 09:11:00.285290 23310 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1206 09:11:00.285300 23310 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1206 09:11:00.285310 23310 net.cpp:150] Setting up L1_b5_brc2_relu\nI1206 09:11:00.285318 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.285322 23310 net.cpp:165] Memory required for data: 886764540\nI1206 09:11:00.285327 23310 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1206 09:11:00.285343 23310 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1206 09:11:00.285349 23310 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1206 09:11:00.285357 23310 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1206 09:11:00.285636 23310 net.cpp:150] Setting up L1_b5_brc2_conv\nI1206 09:11:00.285651 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.285656 23310 net.cpp:165] Memory required for data: 897905660\nI1206 09:11:00.285665 23310 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1206 09:11:00.285678 23310 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1206 09:11:00.285684 23310 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1206 09:11:00.285692 23310 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1206 09:11:00.285944 23310 net.cpp:150] Setting up L1_b5_brc3_bn\nI1206 09:11:00.285959 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.285964 23310 net.cpp:165] Memory required for data: 909046780\nI1206 09:11:00.285974 23310 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1206 09:11:00.285987 23310 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1206 09:11:00.285995 23310 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1206 09:11:00.286001 23310 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1206 09:11:00.286011 23310 net.cpp:150] Setting up L1_b5_brc3_relu\nI1206 09:11:00.286018 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.286023 23310 net.cpp:165] Memory required for data: 920187900\nI1206 09:11:00.286028 23310 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1206 09:11:00.286042 23310 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1206 09:11:00.286048 23310 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1206 09:11:00.286061 23310 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1206 09:11:00.286375 23310 net.cpp:150] Setting up L1_b5_brc3_conv\nI1206 09:11:00.286389 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.286394 23310 net.cpp:165] Memory required for data: 942470140\nI1206 09:11:00.286403 23310 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1206 09:11:00.286413 23310 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1206 09:11:00.286419 23310 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1206 09:11:00.286427 23310 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:11:00.286442 23310 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1206 09:11:00.286483 23310 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1206 09:11:00.286492 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.286497 23310 net.cpp:165] Memory required for data: 964752380\nI1206 09:11:00.286502 23310 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:11:00.286510 23310 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:11:00.286516 23310 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1206 09:11:00.286527 23310 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:11:00.286537 23310 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:11:00.286583 23310 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:11:00.286599 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.286607 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.286610 23310 net.cpp:165] Memory required for data: 1009316860\nI1206 09:11:00.286617 23310 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1206 09:11:00.286625 23310 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1206 09:11:00.286631 23310 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:11:00.286643 23310 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1206 09:11:00.286881 23310 net.cpp:150] Setting up L1_b6_brc1_bn\nI1206 09:11:00.286896 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.286901 23310 net.cpp:165] Memory required for data: 1031599100\nI1206 09:11:00.286911 23310 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1206 09:11:00.286923 23310 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1206 09:11:00.286931 23310 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1206 09:11:00.286937 23310 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1206 09:11:00.286947 23310 net.cpp:150] Setting up L1_b6_brc1_relu\nI1206 09:11:00.286954 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.286959 23310 net.cpp:165] Memory required for data: 1053881340\nI1206 09:11:00.286964 23310 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1206 09:11:00.286975 23310 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1206 09:11:00.286980 23310 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1206 09:11:00.286993 23310 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1206 09:11:00.287308 23310 net.cpp:150] Setting up L1_b6_brc1_conv\nI1206 09:11:00.287323 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.287328 23310 net.cpp:165] Memory required for data: 1065022460\nI1206 09:11:00.287336 23310 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1206 09:11:00.287345 23310 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1206 09:11:00.287351 23310 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1206 09:11:00.287359 23310 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1206 09:11:00.287600 23310 net.cpp:150] Setting up L1_b6_brc2_bn\nI1206 09:11:00.287612 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.287618 23310 net.cpp:165] Memory required for data: 1076163580\nI1206 09:11:00.287628 23310 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1206 09:11:00.287648 23310 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1206 09:11:00.287654 23310 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1206 09:11:00.287662 23310 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1206 09:11:00.287672 23310 net.cpp:150] Setting up L1_b6_brc2_relu\nI1206 09:11:00.287679 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.287684 23310 net.cpp:165] Memory required for data: 1087304700\nI1206 09:11:00.287689 23310 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1206 09:11:00.287719 23310 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1206 09:11:00.287726 23310 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1206 09:11:00.287735 23310 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1206 09:11:00.288023 23310 net.cpp:150] Setting up L1_b6_brc2_conv\nI1206 09:11:00.288038 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.288044 23310 net.cpp:165] Memory required for data: 1098445820\nI1206 09:11:00.288053 23310 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1206 09:11:00.288065 23310 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1206 09:11:00.288071 23310 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1206 09:11:00.288080 23310 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1206 09:11:00.288417 23310 net.cpp:150] Setting up L1_b6_brc3_bn\nI1206 09:11:00.288432 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.288437 23310 net.cpp:165] Memory required for data: 1109586940\nI1206 09:11:00.288450 23310 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1206 09:11:00.288458 23310 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1206 09:11:00.288465 23310 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1206 09:11:00.288471 23310 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1206 09:11:00.288481 23310 net.cpp:150] Setting up L1_b6_brc3_relu\nI1206 09:11:00.288488 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.288493 23310 net.cpp:165] Memory required for data: 1120728060\nI1206 09:11:00.288498 23310 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1206 09:11:00.288625 23310 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1206 09:11:00.288632 23310 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1206 09:11:00.288646 23310 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1206 09:11:00.288971 23310 net.cpp:150] Setting up L1_b6_brc3_conv\nI1206 09:11:00.288986 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.288991 23310 net.cpp:165] Memory required for data: 1143010300\nI1206 09:11:00.289000 23310 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1206 09:11:00.289011 23310 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1206 09:11:00.289016 23310 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1206 09:11:00.289024 23310 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:11:00.289037 23310 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1206 09:11:00.289073 23310 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1206 09:11:00.289086 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.289091 23310 net.cpp:165] Memory required for data: 1165292540\nI1206 09:11:00.289096 23310 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:11:00.289104 23310 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:11:00.289110 23310 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1206 09:11:00.289120 23310 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:11:00.289131 23310 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:11:00.289176 23310 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:11:00.289188 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.289196 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.289201 23310 net.cpp:165] Memory required for data: 1209857020\nI1206 09:11:00.289206 23310 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1206 09:11:00.289217 23310 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1206 09:11:00.289223 23310 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:11:00.289234 23310 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1206 09:11:00.289463 23310 net.cpp:150] Setting up L2_b1_brc1_bn\nI1206 09:11:00.289476 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.289490 23310 net.cpp:165] Memory required for data: 1232139260\nI1206 09:11:00.289501 23310 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1206 09:11:00.289510 23310 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1206 09:11:00.289516 23310 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1206 09:11:00.289523 23310 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1206 09:11:00.289533 23310 net.cpp:150] Setting up L2_b1_brc1_relu\nI1206 09:11:00.289541 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.289546 23310 net.cpp:165] Memory required for data: 1254421500\nI1206 09:11:00.289551 23310 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1206 09:11:00.289569 23310 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1206 09:11:00.289575 23310 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1206 09:11:00.289587 23310 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1206 09:11:00.289954 23310 net.cpp:150] Setting up L2_b1_brc1_conv\nI1206 09:11:00.289969 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.289974 23310 net.cpp:165] Memory required for data: 1259992060\nI1206 09:11:00.289984 23310 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1206 09:11:00.289997 23310 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1206 09:11:00.290004 23310 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1206 09:11:00.290015 23310 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1206 09:11:00.290261 23310 net.cpp:150] Setting up L2_b1_brc2_bn\nI1206 09:11:00.290274 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.290279 23310 net.cpp:165] Memory required for data: 1265562620\nI1206 09:11:00.290290 23310 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1206 09:11:00.290299 23310 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1206 09:11:00.290305 23310 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1206 09:11:00.290316 23310 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1206 09:11:00.290326 23310 net.cpp:150] Setting up L2_b1_brc2_relu\nI1206 09:11:00.290333 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.290338 23310 net.cpp:165] Memory required for data: 1271133180\nI1206 09:11:00.290343 23310 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1206 09:11:00.290359 23310 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1206 09:11:00.290365 23310 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1206 09:11:00.290374 23310 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1206 09:11:00.290678 23310 net.cpp:150] Setting up L2_b1_brc2_conv\nI1206 09:11:00.290693 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.290697 23310 net.cpp:165] Memory required for data: 1276703740\nI1206 09:11:00.290706 23310 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1206 09:11:00.290724 23310 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1206 09:11:00.290732 23310 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1206 09:11:00.290741 23310 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1206 09:11:00.290989 23310 net.cpp:150] Setting up L2_b1_brc3_bn\nI1206 09:11:00.291002 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.291007 23310 net.cpp:165] Memory required for data: 1282274300\nI1206 09:11:00.291018 23310 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1206 09:11:00.291028 23310 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1206 09:11:00.291033 23310 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1206 09:11:00.291044 23310 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1206 09:11:00.291054 23310 net.cpp:150] Setting up L2_b1_brc3_relu\nI1206 09:11:00.291062 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.291066 23310 net.cpp:165] Memory required for data: 1287844860\nI1206 09:11:00.291071 23310 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1206 09:11:00.291086 23310 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1206 09:11:00.291092 23310 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1206 09:11:00.291101 23310 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1206 09:11:00.292860 23310 net.cpp:150] Setting up L2_b1_brc3_conv\nI1206 09:11:00.292878 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.292883 23310 net.cpp:165] Memory required for data: 1298985980\nI1206 09:11:00.292893 23310 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1206 09:11:00.292910 23310 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1206 09:11:00.292917 23310 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:11:00.292930 23310 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1206 09:11:00.293364 23310 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1206 09:11:00.293378 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.293383 23310 net.cpp:165] Memory required for data: 1310127100\nI1206 09:11:00.293392 23310 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1206 09:11:00.293402 23310 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1206 09:11:00.293408 23310 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1206 09:11:00.293416 23310 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1206 09:11:00.293427 23310 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1206 09:11:00.293454 23310 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1206 09:11:00.293463 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.293468 23310 net.cpp:165] Memory required for data: 1321268220\nI1206 09:11:00.293474 23310 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:11:00.293486 23310 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:11:00.293493 23310 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1206 09:11:00.293500 23310 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:11:00.293509 23310 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:11:00.293560 23310 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:11:00.293572 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.293579 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.293584 23310 net.cpp:165] Memory required for data: 1343550460\nI1206 09:11:00.293589 23310 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1206 09:11:00.293597 23310 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1206 09:11:00.293603 23310 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:11:00.293617 23310 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1206 09:11:00.293854 23310 net.cpp:150] Setting up L2_b2_brc1_bn\nI1206 09:11:00.293869 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.293874 23310 net.cpp:165] Memory required for data: 1354691580\nI1206 09:11:00.293885 23310 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1206 09:11:00.293897 23310 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1206 09:11:00.293905 23310 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1206 09:11:00.293911 23310 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1206 09:11:00.293921 23310 net.cpp:150] Setting up L2_b2_brc1_relu\nI1206 09:11:00.293928 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.293933 23310 net.cpp:165] Memory required for data: 1365832700\nI1206 09:11:00.293938 23310 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1206 09:11:00.293949 23310 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1206 09:11:00.293956 23310 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1206 09:11:00.293967 23310 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1206 09:11:00.294414 23310 net.cpp:150] Setting up L2_b2_brc1_conv\nI1206 09:11:00.294427 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.294433 23310 net.cpp:165] Memory required for data: 1371403260\nI1206 09:11:00.294442 23310 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1206 09:11:00.294459 23310 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1206 09:11:00.294466 23310 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1206 09:11:00.294477 23310 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1206 09:11:00.294726 23310 net.cpp:150] Setting up L2_b2_brc2_bn\nI1206 09:11:00.294740 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.294745 23310 net.cpp:165] Memory required for data: 1376973820\nI1206 09:11:00.294756 23310 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1206 09:11:00.294765 23310 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1206 09:11:00.294771 23310 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1206 09:11:00.294781 23310 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1206 09:11:00.294792 23310 net.cpp:150] Setting up L2_b2_brc2_relu\nI1206 09:11:00.294800 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.294805 23310 net.cpp:165] Memory required for data: 1382544380\nI1206 09:11:00.294809 23310 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1206 09:11:00.294821 23310 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1206 09:11:00.294827 23310 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1206 09:11:00.294836 23310 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1206 09:11:00.295148 23310 net.cpp:150] Setting up L2_b2_brc2_conv\nI1206 09:11:00.295166 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.295171 23310 net.cpp:165] Memory required for data: 1388114940\nI1206 09:11:00.295181 23310 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1206 09:11:00.295191 23310 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1206 09:11:00.295197 23310 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1206 09:11:00.295204 23310 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1206 09:11:00.295451 23310 net.cpp:150] Setting up L2_b2_brc3_bn\nI1206 09:11:00.295464 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.295470 23310 net.cpp:165] Memory required for data: 1393685500\nI1206 09:11:00.295480 23310 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1206 09:11:00.295490 23310 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1206 09:11:00.295495 23310 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1206 09:11:00.295507 23310 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1206 09:11:00.295518 23310 net.cpp:150] Setting up L2_b2_brc3_relu\nI1206 09:11:00.295526 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.295531 23310 net.cpp:165] Memory required for data: 1399256060\nI1206 09:11:00.295536 23310 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1206 09:11:00.295550 23310 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1206 09:11:00.295557 23310 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1206 09:11:00.295565 23310 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1206 09:11:00.296010 23310 net.cpp:150] Setting up L2_b2_brc3_conv\nI1206 09:11:00.296025 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.296031 23310 net.cpp:165] Memory required for data: 1410397180\nI1206 09:11:00.296041 23310 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1206 09:11:00.296051 23310 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1206 09:11:00.296056 23310 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1206 09:11:00.296063 23310 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:11:00.296075 23310 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1206 09:11:00.296103 23310 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1206 09:11:00.296111 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.296116 23310 net.cpp:165] Memory required for data: 1421538300\nI1206 09:11:00.296121 23310 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:11:00.296133 23310 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:11:00.296139 23310 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1206 09:11:00.296154 23310 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:11:00.296164 23310 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:11:00.296214 23310 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:11:00.296223 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.296231 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.296236 23310 net.cpp:165] Memory required for data: 1443820540\nI1206 09:11:00.296241 23310 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1206 09:11:00.296252 23310 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1206 09:11:00.296258 23310 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:11:00.296267 23310 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1206 09:11:00.296494 23310 net.cpp:150] Setting up L2_b3_brc1_bn\nI1206 09:11:00.296506 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.296511 23310 net.cpp:165] Memory required for data: 1454961660\nI1206 09:11:00.296545 23310 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1206 09:11:00.296557 23310 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1206 09:11:00.296563 23310 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1206 09:11:00.296571 23310 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1206 09:11:00.296581 23310 net.cpp:150] Setting up L2_b3_brc1_relu\nI1206 09:11:00.296589 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.296593 23310 net.cpp:165] Memory required for data: 1466102780\nI1206 09:11:00.296598 23310 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1206 09:11:00.296610 23310 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1206 09:11:00.296617 23310 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1206 09:11:00.296628 23310 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1206 09:11:00.297085 23310 net.cpp:150] Setting up L2_b3_brc1_conv\nI1206 09:11:00.297099 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.297104 23310 net.cpp:165] Memory required for data: 1471673340\nI1206 09:11:00.297114 23310 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1206 09:11:00.297127 23310 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1206 09:11:00.297133 23310 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1206 09:11:00.297142 23310 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1206 09:11:00.297387 23310 net.cpp:150] Setting up L2_b3_brc2_bn\nI1206 09:11:00.297405 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.297410 23310 net.cpp:165] Memory required for data: 1477243900\nI1206 09:11:00.297420 23310 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1206 09:11:00.297428 23310 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1206 09:11:00.297435 23310 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1206 09:11:00.297442 23310 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1206 09:11:00.297451 23310 net.cpp:150] Setting up L2_b3_brc2_relu\nI1206 09:11:00.297459 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.297463 23310 net.cpp:165] Memory required for data: 1482814460\nI1206 09:11:00.297468 23310 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1206 09:11:00.297480 23310 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1206 09:11:00.297487 23310 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1206 09:11:00.297498 23310 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1206 09:11:00.297818 23310 net.cpp:150] Setting up L2_b3_brc2_conv\nI1206 09:11:00.297833 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.297839 23310 net.cpp:165] Memory required for data: 1488385020\nI1206 09:11:00.297848 23310 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1206 09:11:00.297857 23310 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1206 09:11:00.297863 23310 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1206 09:11:00.297874 23310 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1206 09:11:00.298130 23310 net.cpp:150] Setting up L2_b3_brc3_bn\nI1206 09:11:00.298144 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.298149 23310 net.cpp:165] Memory required for data: 1493955580\nI1206 09:11:00.298159 23310 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1206 09:11:00.298172 23310 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1206 09:11:00.298178 23310 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1206 09:11:00.298185 23310 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1206 09:11:00.298195 23310 net.cpp:150] Setting up L2_b3_brc3_relu\nI1206 09:11:00.298202 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.298207 23310 net.cpp:165] Memory required for data: 1499526140\nI1206 09:11:00.298213 23310 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1206 09:11:00.298223 23310 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1206 09:11:00.298228 23310 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1206 09:11:00.298243 23310 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1206 09:11:00.298686 23310 net.cpp:150] Setting up L2_b3_brc3_conv\nI1206 09:11:00.298699 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.298705 23310 net.cpp:165] Memory required for data: 1510667260\nI1206 09:11:00.298720 23310 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1206 09:11:00.298732 23310 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1206 09:11:00.298738 23310 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1206 09:11:00.298745 23310 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:11:00.298753 23310 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1206 09:11:00.298784 23310 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1206 09:11:00.298794 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.298799 23310 net.cpp:165] Memory required for data: 1521808380\nI1206 09:11:00.298804 23310 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:11:00.298812 23310 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:11:00.298817 23310 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1206 09:11:00.298830 23310 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:11:00.298838 23310 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:11:00.298884 23310 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:11:00.298900 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.298907 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.298913 23310 net.cpp:165] Memory required for data: 1544090620\nI1206 09:11:00.298918 23310 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1206 09:11:00.298925 23310 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1206 09:11:00.298931 23310 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:11:00.298943 23310 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1206 09:11:00.299175 23310 net.cpp:150] Setting up L2_b4_brc1_bn\nI1206 09:11:00.299190 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.299194 23310 net.cpp:165] Memory required for data: 1555231740\nI1206 09:11:00.299204 23310 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1206 09:11:00.299213 23310 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1206 09:11:00.299219 23310 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1206 09:11:00.299230 23310 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1206 09:11:00.299240 23310 net.cpp:150] Setting up L2_b4_brc1_relu\nI1206 09:11:00.299247 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.299252 23310 net.cpp:165] Memory required for data: 1566372860\nI1206 09:11:00.299257 23310 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1206 09:11:00.299275 23310 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1206 09:11:00.299283 23310 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1206 09:11:00.299290 23310 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1206 09:11:00.299743 23310 net.cpp:150] Setting up L2_b4_brc1_conv\nI1206 09:11:00.299759 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.299764 23310 net.cpp:165] Memory required for data: 1571943420\nI1206 09:11:00.299773 23310 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1206 09:11:00.299785 23310 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1206 09:11:00.299793 23310 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1206 09:11:00.299800 23310 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1206 09:11:00.300050 23310 net.cpp:150] Setting up L2_b4_brc2_bn\nI1206 09:11:00.300062 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.300068 23310 net.cpp:165] Memory required for data: 1577513980\nI1206 09:11:00.300078 23310 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1206 09:11:00.300087 23310 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1206 09:11:00.300093 23310 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1206 09:11:00.300101 23310 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1206 09:11:00.300109 23310 net.cpp:150] Setting up L2_b4_brc2_relu\nI1206 09:11:00.300117 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.300122 23310 net.cpp:165] Memory required for data: 1583084540\nI1206 09:11:00.300127 23310 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1206 09:11:00.300142 23310 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1206 09:11:00.300148 23310 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1206 09:11:00.300159 23310 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1206 09:11:00.300472 23310 net.cpp:150] Setting up L2_b4_brc2_conv\nI1206 09:11:00.300487 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.300492 23310 net.cpp:165] Memory required for data: 1588655100\nI1206 09:11:00.300500 23310 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1206 09:11:00.300513 23310 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1206 09:11:00.300518 23310 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1206 09:11:00.300526 23310 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1206 09:11:00.300776 23310 net.cpp:150] Setting up L2_b4_brc3_bn\nI1206 09:11:00.300791 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.300796 23310 net.cpp:165] Memory required for data: 1594225660\nI1206 09:11:00.300806 23310 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1206 09:11:00.300814 23310 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1206 09:11:00.300820 23310 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1206 09:11:00.300827 23310 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1206 09:11:00.300837 23310 net.cpp:150] Setting up L2_b4_brc3_relu\nI1206 09:11:00.300844 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.300848 23310 net.cpp:165] Memory required for data: 1599796220\nI1206 09:11:00.300853 23310 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1206 09:11:00.300873 23310 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1206 09:11:00.300879 23310 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1206 09:11:00.300891 23310 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1206 09:11:00.301331 23310 net.cpp:150] Setting up L2_b4_brc3_conv\nI1206 09:11:00.301344 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.301349 23310 net.cpp:165] Memory required for data: 1610937340\nI1206 09:11:00.301358 23310 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1206 09:11:00.301371 23310 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1206 09:11:00.301378 23310 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1206 09:11:00.301385 23310 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:11:00.301393 23310 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1206 09:11:00.301424 23310 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1206 09:11:00.301440 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.301445 23310 net.cpp:165] Memory required for data: 1622078460\nI1206 09:11:00.301451 23310 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:11:00.301460 23310 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:11:00.301465 23310 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1206 09:11:00.301476 23310 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:11:00.301486 23310 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:11:00.301533 23310 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:11:00.301553 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.301560 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.301565 23310 net.cpp:165] Memory required for data: 1644360700\nI1206 09:11:00.301570 23310 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1206 09:11:00.301578 23310 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1206 09:11:00.301584 23310 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:11:00.301596 23310 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1206 09:11:00.301836 23310 net.cpp:150] Setting up L2_b5_brc1_bn\nI1206 09:11:00.301853 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.301858 23310 net.cpp:165] Memory required for data: 1655501820\nI1206 09:11:00.301869 23310 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1206 09:11:00.301878 23310 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1206 09:11:00.301884 23310 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1206 09:11:00.301892 23310 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1206 09:11:00.301901 23310 net.cpp:150] Setting up L2_b5_brc1_relu\nI1206 09:11:00.301908 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.301913 23310 net.cpp:165] Memory required for data: 1666642940\nI1206 09:11:00.301918 23310 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1206 09:11:00.301933 23310 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1206 09:11:00.301939 23310 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1206 09:11:00.301947 23310 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1206 09:11:00.302397 23310 net.cpp:150] Setting up L2_b5_brc1_conv\nI1206 09:11:00.302410 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.302415 23310 net.cpp:165] Memory required for data: 1672213500\nI1206 09:11:00.302424 23310 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1206 09:11:00.302436 23310 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1206 09:11:00.302443 23310 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1206 09:11:00.302451 23310 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1206 09:11:00.302700 23310 net.cpp:150] Setting up L2_b5_brc2_bn\nI1206 09:11:00.302724 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.302731 23310 net.cpp:165] Memory required for data: 1677784060\nI1206 09:11:00.302742 23310 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1206 09:11:00.302750 23310 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1206 09:11:00.302757 23310 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1206 09:11:00.302764 23310 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1206 09:11:00.302774 23310 net.cpp:150] Setting up L2_b5_brc2_relu\nI1206 09:11:00.302781 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.302786 23310 net.cpp:165] Memory required for data: 1683354620\nI1206 09:11:00.302791 23310 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1206 09:11:00.302803 23310 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1206 09:11:00.302809 23310 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1206 09:11:00.302820 23310 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1206 09:11:00.303143 23310 net.cpp:150] Setting up L2_b5_brc2_conv\nI1206 09:11:00.303158 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.303162 23310 net.cpp:165] Memory required for data: 1688925180\nI1206 09:11:00.303171 23310 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1206 09:11:00.303187 23310 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1206 09:11:00.303194 23310 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1206 09:11:00.303202 23310 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1206 09:11:00.303450 23310 net.cpp:150] Setting up L2_b5_brc3_bn\nI1206 09:11:00.303463 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.303469 23310 net.cpp:165] Memory required for data: 1694495740\nI1206 09:11:00.303479 23310 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1206 09:11:00.303491 23310 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1206 09:11:00.303498 23310 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1206 09:11:00.303505 23310 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1206 09:11:00.303515 23310 net.cpp:150] Setting up L2_b5_brc3_relu\nI1206 09:11:00.303522 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.303527 23310 net.cpp:165] Memory required for data: 1700066300\nI1206 09:11:00.303532 23310 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1206 09:11:00.303544 23310 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1206 09:11:00.303548 23310 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1206 09:11:00.303560 23310 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1206 09:11:00.304011 23310 net.cpp:150] Setting up L2_b5_brc3_conv\nI1206 09:11:00.304028 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.304033 23310 net.cpp:165] Memory required for data: 1711207420\nI1206 09:11:00.304041 23310 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1206 09:11:00.304050 23310 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1206 09:11:00.304057 23310 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1206 09:11:00.304064 23310 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:11:00.304072 23310 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1206 09:11:00.304103 23310 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1206 09:11:00.304113 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.304117 23310 net.cpp:165] Memory required for data: 1722348540\nI1206 09:11:00.304123 23310 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:11:00.304131 23310 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:11:00.304136 23310 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1206 09:11:00.304147 23310 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:11:00.304157 23310 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:11:00.304204 23310 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:11:00.304219 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.304226 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.304231 23310 net.cpp:165] Memory required for data: 1744630780\nI1206 09:11:00.304236 23310 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1206 09:11:00.304245 23310 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1206 09:11:00.304251 23310 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:11:00.304262 23310 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1206 09:11:00.304498 23310 net.cpp:150] Setting up L2_b6_brc1_bn\nI1206 09:11:00.304512 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.304517 23310 net.cpp:165] Memory required for data: 1755771900\nI1206 09:11:00.304527 23310 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1206 09:11:00.304555 23310 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1206 09:11:00.304563 23310 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1206 09:11:00.304570 23310 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1206 09:11:00.304580 23310 net.cpp:150] Setting up L2_b6_brc1_relu\nI1206 09:11:00.304589 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.304592 23310 net.cpp:165] Memory required for data: 1766913020\nI1206 09:11:00.304599 23310 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1206 09:11:00.304613 23310 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1206 09:11:00.304620 23310 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1206 09:11:00.304633 23310 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1206 09:11:00.305088 23310 net.cpp:150] Setting up L2_b6_brc1_conv\nI1206 09:11:00.305102 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.305107 23310 net.cpp:165] Memory required for data: 1772483580\nI1206 09:11:00.305117 23310 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1206 09:11:00.305130 23310 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1206 09:11:00.305136 23310 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1206 09:11:00.305147 23310 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1206 09:11:00.305393 23310 net.cpp:150] Setting up L2_b6_brc2_bn\nI1206 09:11:00.305407 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.305411 23310 net.cpp:165] Memory required for data: 1778054140\nI1206 09:11:00.305421 23310 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1206 09:11:00.305430 23310 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1206 09:11:00.305436 23310 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1206 09:11:00.305444 23310 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1206 09:11:00.305454 23310 net.cpp:150] Setting up L2_b6_brc2_relu\nI1206 09:11:00.305460 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.305464 23310 net.cpp:165] Memory required for data: 1783624700\nI1206 09:11:00.305470 23310 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1206 09:11:00.305486 23310 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1206 09:11:00.305493 23310 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1206 09:11:00.305505 23310 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1206 09:11:00.305824 23310 net.cpp:150] Setting up L2_b6_brc2_conv\nI1206 09:11:00.305840 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.305845 23310 net.cpp:165] Memory required for data: 1789195260\nI1206 09:11:00.305853 23310 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1206 09:11:00.305866 23310 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1206 09:11:00.305872 23310 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1206 09:11:00.305884 23310 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1206 09:11:00.306131 23310 net.cpp:150] Setting up L2_b6_brc3_bn\nI1206 09:11:00.306144 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.306149 23310 net.cpp:165] Memory required for data: 1794765820\nI1206 09:11:00.306160 23310 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1206 09:11:00.306169 23310 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1206 09:11:00.306175 23310 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1206 09:11:00.306182 23310 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1206 09:11:00.306192 23310 net.cpp:150] Setting up L2_b6_brc3_relu\nI1206 09:11:00.306200 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.306203 23310 net.cpp:165] Memory required for data: 1800336380\nI1206 09:11:00.306208 23310 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1206 09:11:00.306223 23310 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1206 09:11:00.306229 23310 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1206 09:11:00.306237 23310 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1206 09:11:00.306679 23310 net.cpp:150] Setting up L2_b6_brc3_conv\nI1206 09:11:00.306694 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.306707 23310 net.cpp:165] Memory required for data: 1811477500\nI1206 09:11:00.306722 23310 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1206 09:11:00.306733 23310 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1206 09:11:00.306740 23310 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1206 09:11:00.306747 23310 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:11:00.306759 23310 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1206 09:11:00.306787 23310 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1206 09:11:00.306802 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.306807 23310 net.cpp:165] Memory required for data: 1822618620\nI1206 09:11:00.306813 23310 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:11:00.306819 23310 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:11:00.306825 23310 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1206 09:11:00.306833 23310 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:11:00.306843 23310 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:11:00.306892 23310 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:11:00.306905 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.306911 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.306916 23310 net.cpp:165] Memory required for data: 1844900860\nI1206 09:11:00.306921 23310 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1206 09:11:00.306934 23310 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1206 09:11:00.306941 23310 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:11:00.306949 23310 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1206 09:11:00.307185 23310 net.cpp:150] Setting up L3_b1_brc1_bn\nI1206 09:11:00.307199 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.307204 23310 net.cpp:165] Memory required for data: 1856041980\nI1206 09:11:00.307214 23310 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1206 09:11:00.307225 23310 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1206 09:11:00.307232 23310 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1206 09:11:00.307240 23310 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1206 09:11:00.307250 23310 net.cpp:150] Setting up L3_b1_brc1_relu\nI1206 09:11:00.307256 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.307261 23310 net.cpp:165] Memory required for data: 1867183100\nI1206 09:11:00.307266 23310 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1206 09:11:00.307277 23310 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1206 09:11:00.307283 23310 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1206 09:11:00.307296 23310 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1206 09:11:00.307914 23310 net.cpp:150] Setting up L3_b1_brc1_conv\nI1206 09:11:00.307929 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.307934 23310 net.cpp:165] Memory required for data: 1869968380\nI1206 09:11:00.307943 23310 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1206 09:11:00.307952 23310 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1206 09:11:00.307958 23310 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1206 09:11:00.307966 23310 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1206 09:11:00.308219 23310 net.cpp:150] Setting up L3_b1_brc2_bn\nI1206 09:11:00.308233 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.308238 23310 net.cpp:165] Memory required for data: 1872753660\nI1206 09:11:00.308248 23310 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1206 09:11:00.308256 23310 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1206 09:11:00.308262 23310 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1206 09:11:00.308275 23310 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1206 09:11:00.308293 23310 net.cpp:150] Setting up L3_b1_brc2_relu\nI1206 09:11:00.308301 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.308305 23310 net.cpp:165] Memory required for data: 1875538940\nI1206 09:11:00.308310 23310 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1206 09:11:00.308326 23310 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1206 09:11:00.308331 23310 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1206 09:11:00.308339 23310 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1206 09:11:00.308722 23310 net.cpp:150] Setting up L3_b1_brc2_conv\nI1206 09:11:00.308737 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.308743 23310 net.cpp:165] Memory required for data: 1878324220\nI1206 09:11:00.308751 23310 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1206 09:11:00.308764 23310 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1206 09:11:00.308771 23310 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1206 09:11:00.308779 23310 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1206 09:11:00.309027 23310 net.cpp:150] Setting up L3_b1_brc3_bn\nI1206 09:11:00.309041 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.309046 23310 net.cpp:165] Memory required for data: 1881109500\nI1206 09:11:00.309056 23310 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1206 09:11:00.309064 23310 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1206 09:11:00.309070 23310 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1206 09:11:00.309077 23310 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1206 09:11:00.309087 23310 net.cpp:150] Setting up L3_b1_brc3_relu\nI1206 09:11:00.309093 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.309098 23310 net.cpp:165] Memory required for data: 1883894780\nI1206 09:11:00.309103 23310 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1206 09:11:00.309118 23310 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1206 09:11:00.309124 23310 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1206 09:11:00.309136 23310 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1206 09:11:00.311367 23310 net.cpp:150] Setting up L3_b1_brc3_conv\nI1206 09:11:00.311388 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.311393 23310 net.cpp:165] Memory required for data: 1889465340\nI1206 09:11:00.311403 23310 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1206 09:11:00.311416 23310 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1206 09:11:00.311424 23310 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:11:00.311436 23310 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1206 09:11:00.312381 23310 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1206 09:11:00.312397 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.312403 23310 net.cpp:165] Memory required for data: 1895035900\nI1206 09:11:00.312412 23310 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1206 09:11:00.312422 23310 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1206 09:11:00.312428 23310 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1206 09:11:00.312435 23310 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1206 09:11:00.312448 23310 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1206 09:11:00.312481 23310 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1206 09:11:00.312497 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.312502 23310 net.cpp:165] Memory required for data: 1900606460\nI1206 09:11:00.312508 23310 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:11:00.312516 23310 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:11:00.312522 23310 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1206 09:11:00.312530 23310 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:11:00.312539 23310 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:11:00.312602 23310 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:11:00.312614 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.312621 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.312625 23310 net.cpp:165] Memory required for data: 1911747580\nI1206 09:11:00.312631 23310 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1206 09:11:00.312644 23310 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1206 09:11:00.312649 23310 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:11:00.312657 23310 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1206 09:11:00.312901 23310 net.cpp:150] Setting up L3_b2_brc1_bn\nI1206 09:11:00.312918 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.312924 23310 net.cpp:165] Memory required for data: 1917318140\nI1206 09:11:00.312935 23310 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1206 09:11:00.312944 23310 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1206 09:11:00.312950 23310 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1206 09:11:00.312958 23310 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1206 09:11:00.312968 23310 net.cpp:150] Setting up L3_b2_brc1_relu\nI1206 09:11:00.312974 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.312979 23310 net.cpp:165] Memory required for data: 1922888700\nI1206 09:11:00.312984 23310 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1206 09:11:00.312995 23310 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1206 09:11:00.313001 23310 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1206 09:11:00.313014 23310 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1206 09:11:00.313963 23310 net.cpp:150] Setting up L3_b2_brc1_conv\nI1206 09:11:00.313983 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.313989 23310 net.cpp:165] Memory required for data: 1925673980\nI1206 09:11:00.313998 23310 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1206 09:11:00.314007 23310 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1206 09:11:00.314014 23310 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1206 09:11:00.314028 23310 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1206 09:11:00.314278 23310 net.cpp:150] Setting up L3_b2_brc2_bn\nI1206 09:11:00.314291 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.314296 23310 net.cpp:165] Memory required for data: 1928459260\nI1206 09:11:00.314306 23310 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1206 09:11:00.314316 23310 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1206 09:11:00.314321 23310 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1206 09:11:00.314332 23310 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1206 09:11:00.314343 23310 net.cpp:150] Setting up L3_b2_brc2_relu\nI1206 09:11:00.314350 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.314354 23310 net.cpp:165] Memory required for data: 1931244540\nI1206 09:11:00.314360 23310 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1206 09:11:00.314371 23310 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1206 09:11:00.314378 23310 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1206 09:11:00.314385 23310 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1206 09:11:00.314781 23310 net.cpp:150] Setting up L3_b2_brc2_conv\nI1206 09:11:00.314796 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.314801 23310 net.cpp:165] Memory required for data: 1934029820\nI1206 09:11:00.314811 23310 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1206 09:11:00.314824 23310 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1206 09:11:00.314831 23310 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1206 09:11:00.314839 23310 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1206 09:11:00.315088 23310 net.cpp:150] Setting up L3_b2_brc3_bn\nI1206 09:11:00.315100 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.315105 23310 net.cpp:165] Memory required for data: 1936815100\nI1206 09:11:00.315124 23310 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1206 09:11:00.315134 23310 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1206 09:11:00.315140 23310 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1206 09:11:00.315146 23310 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1206 09:11:00.315156 23310 net.cpp:150] Setting up L3_b2_brc3_relu\nI1206 09:11:00.315163 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.315168 23310 net.cpp:165] Memory required for data: 1939600380\nI1206 09:11:00.315173 23310 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1206 09:11:00.315188 23310 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1206 09:11:00.315194 23310 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1206 09:11:00.315207 23310 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1206 09:11:00.316154 23310 net.cpp:150] Setting up L3_b2_brc3_conv\nI1206 09:11:00.316169 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.316175 23310 net.cpp:165] Memory required for data: 1945170940\nI1206 09:11:00.316184 23310 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1206 09:11:00.316198 23310 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1206 09:11:00.316205 23310 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1206 09:11:00.316212 23310 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:11:00.316224 23310 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1206 09:11:00.316257 23310 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1206 09:11:00.316269 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.316274 23310 net.cpp:165] Memory required for data: 1950741500\nI1206 09:11:00.316279 23310 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:11:00.316293 23310 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:11:00.316298 23310 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1206 09:11:00.316306 23310 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:11:00.316316 23310 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:11:00.316366 23310 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:11:00.316378 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.316385 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.316390 23310 net.cpp:165] Memory required for data: 1961882620\nI1206 09:11:00.316395 23310 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1206 09:11:00.316407 23310 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1206 09:11:00.316413 23310 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:11:00.316421 23310 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1206 09:11:00.316659 23310 net.cpp:150] Setting up L3_b3_brc1_bn\nI1206 09:11:00.316673 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.316679 23310 net.cpp:165] Memory required for data: 1967453180\nI1206 09:11:00.316689 23310 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1206 09:11:00.316697 23310 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1206 09:11:00.316704 23310 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1206 09:11:00.316710 23310 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1206 09:11:00.316727 23310 net.cpp:150] Setting up L3_b3_brc1_relu\nI1206 09:11:00.316735 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.316740 23310 net.cpp:165] Memory required for data: 1973023740\nI1206 09:11:00.316745 23310 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1206 09:11:00.316761 23310 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1206 09:11:00.316766 23310 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1206 09:11:00.316778 23310 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1206 09:11:00.317756 23310 net.cpp:150] Setting up L3_b3_brc1_conv\nI1206 09:11:00.317772 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.317777 23310 net.cpp:165] Memory required for data: 1975809020\nI1206 09:11:00.317787 23310 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1206 09:11:00.317800 23310 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1206 09:11:00.317806 23310 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1206 09:11:00.317818 23310 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1206 09:11:00.318064 23310 net.cpp:150] Setting up L3_b3_brc2_bn\nI1206 09:11:00.318078 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.318084 23310 net.cpp:165] Memory required for data: 1978594300\nI1206 09:11:00.318094 23310 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1206 09:11:00.318102 23310 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1206 09:11:00.318109 23310 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1206 09:11:00.318116 23310 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1206 09:11:00.318126 23310 net.cpp:150] Setting up L3_b3_brc2_relu\nI1206 09:11:00.318133 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.318138 23310 net.cpp:165] Memory required for data: 1981379580\nI1206 09:11:00.318143 23310 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1206 09:11:00.318158 23310 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1206 09:11:00.318166 23310 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1206 09:11:00.318177 23310 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1206 09:11:00.318564 23310 net.cpp:150] Setting up L3_b3_brc2_conv\nI1206 09:11:00.318579 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.318584 23310 net.cpp:165] Memory required for data: 1984164860\nI1206 09:11:00.318593 23310 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1206 09:11:00.318606 23310 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1206 09:11:00.318612 23310 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1206 09:11:00.318620 23310 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1206 09:11:00.318881 23310 net.cpp:150] Setting up L3_b3_brc3_bn\nI1206 09:11:00.318898 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.318904 23310 net.cpp:165] Memory required for data: 1986950140\nI1206 09:11:00.318914 23310 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1206 09:11:00.318923 23310 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1206 09:11:00.318929 23310 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1206 09:11:00.318936 23310 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1206 09:11:00.318946 23310 net.cpp:150] Setting up L3_b3_brc3_relu\nI1206 09:11:00.318953 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.318958 23310 net.cpp:165] Memory required for data: 1989735420\nI1206 09:11:00.318964 23310 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1206 09:11:00.318974 23310 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1206 09:11:00.318980 23310 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1206 09:11:00.318992 23310 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1206 09:11:00.319929 23310 net.cpp:150] Setting up L3_b3_brc3_conv\nI1206 09:11:00.319946 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.319950 23310 net.cpp:165] Memory required for data: 1995305980\nI1206 09:11:00.319959 23310 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1206 09:11:00.319970 23310 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1206 09:11:00.319977 23310 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1206 09:11:00.319984 23310 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:11:00.319999 23310 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1206 09:11:00.320034 23310 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1206 09:11:00.320046 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.320051 23310 net.cpp:165] Memory required for data: 2000876540\nI1206 09:11:00.320056 23310 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:11:00.320076 23310 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:11:00.320083 23310 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1206 09:11:00.320091 23310 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:11:00.320101 23310 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:11:00.320153 23310 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:11:00.320166 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.320173 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.320178 23310 net.cpp:165] Memory required for data: 2012017660\nI1206 09:11:00.320183 23310 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1206 09:11:00.320195 23310 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1206 09:11:00.320201 23310 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:11:00.320209 23310 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1206 09:11:00.320453 23310 net.cpp:150] Setting up L3_b4_brc1_bn\nI1206 09:11:00.320471 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.320475 23310 net.cpp:165] Memory required for data: 2017588220\nI1206 09:11:00.320487 23310 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1206 09:11:00.320494 23310 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1206 09:11:00.320500 23310 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1206 09:11:00.320508 23310 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1206 09:11:00.320518 23310 net.cpp:150] Setting up L3_b4_brc1_relu\nI1206 09:11:00.320525 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.320530 23310 net.cpp:165] Memory required for data: 2023158780\nI1206 09:11:00.320534 23310 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1206 09:11:00.320546 23310 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1206 09:11:00.320551 23310 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1206 09:11:00.320564 23310 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1206 09:11:00.321501 23310 net.cpp:150] Setting up L3_b4_brc1_conv\nI1206 09:11:00.321516 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.321521 23310 net.cpp:165] Memory required for data: 2025944060\nI1206 09:11:00.321530 23310 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1206 09:11:00.321539 23310 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1206 09:11:00.321547 23310 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1206 09:11:00.321558 23310 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1206 09:11:00.321815 23310 net.cpp:150] Setting up L3_b4_brc2_bn\nI1206 09:11:00.321828 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.321835 23310 net.cpp:165] Memory required for data: 2028729340\nI1206 09:11:00.321844 23310 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1206 09:11:00.321856 23310 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1206 09:11:00.321863 23310 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1206 09:11:00.321871 23310 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1206 09:11:00.321880 23310 net.cpp:150] Setting up L3_b4_brc2_relu\nI1206 09:11:00.321887 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.321892 23310 net.cpp:165] Memory required for data: 2031514620\nI1206 09:11:00.321897 23310 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1206 09:11:00.321908 23310 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1206 09:11:00.321914 23310 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1206 09:11:00.321926 23310 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1206 09:11:00.322314 23310 net.cpp:150] Setting up L3_b4_brc2_conv\nI1206 09:11:00.322327 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.322333 23310 net.cpp:165] Memory required for data: 2034299900\nI1206 09:11:00.322350 23310 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1206 09:11:00.322360 23310 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1206 09:11:00.322365 23310 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1206 09:11:00.322373 23310 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1206 09:11:00.322623 23310 net.cpp:150] Setting up L3_b4_brc3_bn\nI1206 09:11:00.322636 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.322641 23310 net.cpp:165] Memory required for data: 2037085180\nI1206 09:11:00.322652 23310 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1206 09:11:00.322660 23310 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1206 09:11:00.322666 23310 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1206 09:11:00.322677 23310 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1206 09:11:00.322688 23310 net.cpp:150] Setting up L3_b4_brc3_relu\nI1206 09:11:00.322696 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.322700 23310 net.cpp:165] Memory required for data: 2039870460\nI1206 09:11:00.322706 23310 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1206 09:11:00.322723 23310 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1206 09:11:00.322736 23310 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1206 09:11:00.322746 23310 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1206 09:11:00.323678 23310 net.cpp:150] Setting up L3_b4_brc3_conv\nI1206 09:11:00.323693 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.323698 23310 net.cpp:165] Memory required for data: 2045441020\nI1206 09:11:00.323707 23310 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1206 09:11:00.323729 23310 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1206 09:11:00.323742 23310 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1206 09:11:00.323753 23310 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:11:00.323766 23310 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1206 09:11:00.323802 23310 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1206 09:11:00.323815 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.323820 23310 net.cpp:165] Memory required for data: 2051011580\nI1206 09:11:00.323827 23310 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:11:00.323837 23310 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:11:00.323844 23310 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1206 09:11:00.323853 23310 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:11:00.323863 23310 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:11:00.323912 23310 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:11:00.323925 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.323931 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.323936 23310 net.cpp:165] Memory required for data: 2062152700\nI1206 09:11:00.323941 23310 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1206 09:11:00.323952 23310 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1206 09:11:00.323959 23310 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:11:00.323967 23310 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1206 09:11:00.324204 23310 net.cpp:150] Setting up L3_b5_brc1_bn\nI1206 09:11:00.324218 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.324223 23310 net.cpp:165] Memory required for data: 2067723260\nI1206 09:11:00.324234 23310 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1206 09:11:00.324242 23310 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1206 09:11:00.324249 23310 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1206 09:11:00.324255 23310 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1206 09:11:00.324265 23310 net.cpp:150] Setting up L3_b5_brc1_relu\nI1206 09:11:00.324280 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.324285 23310 net.cpp:165] Memory required for data: 2073293820\nI1206 09:11:00.324290 23310 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1206 09:11:00.324306 23310 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1206 09:11:00.324312 23310 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1206 09:11:00.324324 23310 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1206 09:11:00.326545 23310 net.cpp:150] Setting up L3_b5_brc1_conv\nI1206 09:11:00.326566 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.326572 23310 net.cpp:165] Memory required for data: 2076079100\nI1206 09:11:00.326582 23310 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1206 09:11:00.326591 23310 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1206 09:11:00.326598 23310 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1206 09:11:00.326606 23310 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1206 09:11:00.326865 23310 net.cpp:150] Setting up L3_b5_brc2_bn\nI1206 09:11:00.326879 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.326885 23310 net.cpp:165] Memory required for data: 2078864380\nI1206 09:11:00.326896 23310 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1206 09:11:00.326905 23310 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1206 09:11:00.326911 23310 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1206 09:11:00.326921 23310 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1206 09:11:00.326933 23310 net.cpp:150] Setting up L3_b5_brc2_relu\nI1206 09:11:00.326941 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.326946 23310 net.cpp:165] Memory required for data: 2081649660\nI1206 09:11:00.326951 23310 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1206 09:11:00.326967 23310 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1206 09:11:00.326972 23310 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1206 09:11:00.326982 23310 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1206 09:11:00.327365 23310 net.cpp:150] Setting up L3_b5_brc2_conv\nI1206 09:11:00.327380 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.327385 23310 net.cpp:165] Memory required for data: 2084434940\nI1206 09:11:00.327432 23310 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1206 09:11:00.327448 23310 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1206 09:11:00.327455 23310 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1206 09:11:00.327463 23310 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1206 09:11:00.327731 23310 net.cpp:150] Setting up L3_b5_brc3_bn\nI1206 09:11:00.327745 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.327750 23310 net.cpp:165] Memory required for data: 2087220220\nI1206 09:11:00.327761 23310 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1206 09:11:00.327774 23310 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1206 09:11:00.327780 23310 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1206 09:11:00.327787 23310 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1206 09:11:00.327798 23310 net.cpp:150] Setting up L3_b5_brc3_relu\nI1206 09:11:00.327805 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.327811 23310 net.cpp:165] Memory required for data: 2090005500\nI1206 09:11:00.327816 23310 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1206 09:11:00.327826 23310 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1206 09:11:00.327832 23310 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1206 09:11:00.327844 23310 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1206 09:11:00.328795 23310 net.cpp:150] Setting up L3_b5_brc3_conv\nI1206 09:11:00.328811 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.328816 23310 net.cpp:165] Memory required for data: 2095576060\nI1206 09:11:00.328825 23310 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1206 09:11:00.328835 23310 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1206 09:11:00.328842 23310 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1206 09:11:00.328858 23310 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:11:00.328867 23310 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1206 09:11:00.328907 23310 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1206 09:11:00.328917 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.328922 23310 net.cpp:165] Memory required for data: 2101146620\nI1206 09:11:00.328927 23310 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:11:00.328938 23310 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:11:00.328944 23310 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1206 09:11:00.328953 23310 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:11:00.328963 23310 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:11:00.329015 23310 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:11:00.329027 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.329035 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.329040 23310 net.cpp:165] Memory required for data: 2112287740\nI1206 09:11:00.329046 23310 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1206 09:11:00.329053 23310 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1206 09:11:00.329059 23310 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:11:00.329071 23310 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1206 09:11:00.329314 23310 net.cpp:150] Setting up L3_b6_brc1_bn\nI1206 09:11:00.329327 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.329332 23310 net.cpp:165] Memory required for data: 2117858300\nI1206 09:11:00.329344 23310 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1206 09:11:00.329355 23310 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1206 09:11:00.329362 23310 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1206 09:11:00.329370 23310 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1206 09:11:00.329380 23310 net.cpp:150] Setting up L3_b6_brc1_relu\nI1206 09:11:00.329386 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.329391 23310 net.cpp:165] Memory required for data: 2123428860\nI1206 09:11:00.329396 23310 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1206 09:11:00.329407 23310 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1206 09:11:00.329413 23310 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1206 09:11:00.329426 23310 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1206 09:11:00.330368 23310 net.cpp:150] Setting up L3_b6_brc1_conv\nI1206 09:11:00.330384 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.330389 23310 net.cpp:165] Memory required for data: 2126214140\nI1206 09:11:00.330397 23310 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1206 09:11:00.330406 23310 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1206 09:11:00.330413 23310 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1206 09:11:00.330425 23310 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1206 09:11:00.330679 23310 net.cpp:150] Setting up L3_b6_brc2_bn\nI1206 09:11:00.330693 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.330698 23310 net.cpp:165] Memory required for data: 2128999420\nI1206 09:11:00.330708 23310 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1206 09:11:00.330724 23310 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1206 09:11:00.330730 23310 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1206 09:11:00.330742 23310 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1206 09:11:00.330754 23310 net.cpp:150] Setting up L3_b6_brc2_relu\nI1206 09:11:00.330760 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.330766 23310 net.cpp:165] Memory required for data: 2131784700\nI1206 09:11:00.330771 23310 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1206 09:11:00.330790 23310 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1206 09:11:00.330796 23310 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1206 09:11:00.330806 23310 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1206 09:11:00.331202 23310 net.cpp:150] Setting up L3_b6_brc2_conv\nI1206 09:11:00.331218 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.331223 23310 net.cpp:165] Memory required for data: 2134569980\nI1206 09:11:00.331231 23310 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1206 09:11:00.331244 23310 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1206 09:11:00.331250 23310 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1206 09:11:00.331259 23310 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1206 09:11:00.331508 23310 net.cpp:150] Setting up L3_b6_brc3_bn\nI1206 09:11:00.331521 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.331526 23310 net.cpp:165] Memory required for data: 2137355260\nI1206 09:11:00.331537 23310 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1206 09:11:00.331545 23310 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1206 09:11:00.331552 23310 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1206 09:11:00.331559 23310 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1206 09:11:00.331569 23310 net.cpp:150] Setting up L3_b6_brc3_relu\nI1206 09:11:00.331576 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.331580 23310 net.cpp:165] Memory required for data: 2140140540\nI1206 09:11:00.331585 23310 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1206 09:11:00.331601 23310 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1206 09:11:00.331607 23310 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1206 09:11:00.331619 23310 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1206 09:11:00.332564 23310 net.cpp:150] Setting up L3_b6_brc3_conv\nI1206 09:11:00.332581 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.332587 23310 net.cpp:165] Memory required for data: 2145711100\nI1206 09:11:00.332595 23310 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1206 09:11:00.332612 23310 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1206 09:11:00.332618 23310 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1206 09:11:00.332625 23310 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:11:00.332638 23310 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1206 09:11:00.332671 23310 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1206 09:11:00.332684 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.332689 23310 net.cpp:165] Memory required for data: 2151281660\nI1206 09:11:00.332695 23310 layer_factory.hpp:77] Creating layer post_bn\nI1206 09:11:00.332706 23310 net.cpp:100] Creating Layer post_bn\nI1206 09:11:00.332720 23310 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1206 09:11:00.332728 23310 net.cpp:408] post_bn -> post_bn_top\nI1206 09:11:00.332978 23310 net.cpp:150] Setting up post_bn\nI1206 09:11:00.332991 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.332996 23310 net.cpp:165] Memory required for data: 2156852220\nI1206 09:11:00.333007 23310 layer_factory.hpp:77] Creating layer post_relu\nI1206 09:11:00.333015 23310 net.cpp:100] Creating Layer post_relu\nI1206 09:11:00.333021 23310 net.cpp:434] post_relu <- post_bn_top\nI1206 09:11:00.333029 23310 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1206 09:11:00.333039 23310 net.cpp:150] Setting up post_relu\nI1206 09:11:00.333045 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.333050 23310 net.cpp:165] Memory required for data: 2162422780\nI1206 09:11:00.333055 23310 layer_factory.hpp:77] Creating layer post_pool\nI1206 09:11:00.333066 23310 net.cpp:100] Creating Layer post_pool\nI1206 09:11:00.333071 23310 net.cpp:434] post_pool <- post_bn_top\nI1206 09:11:00.333084 23310 net.cpp:408] post_pool -> post_pool\nI1206 09:11:00.333196 23310 net.cpp:150] Setting up post_pool\nI1206 09:11:00.333212 23310 net.cpp:157] Top shape: 85 256 1 1 (21760)\nI1206 09:11:00.333226 23310 net.cpp:165] Memory required for data: 2162509820\nI1206 09:11:00.333232 23310 layer_factory.hpp:77] Creating layer post_FC\nI1206 09:11:00.333333 23310 net.cpp:100] Creating Layer post_FC\nI1206 09:11:00.333346 23310 net.cpp:434] post_FC <- post_pool\nI1206 09:11:00.333360 23310 net.cpp:408] post_FC -> post_FC_top\nI1206 09:11:00.333645 23310 net.cpp:150] Setting up post_FC\nI1206 09:11:00.333662 23310 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:11:00.333667 23310 net.cpp:165] Memory required for data: 2162513220\nI1206 09:11:00.333675 23310 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1206 09:11:00.333688 23310 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1206 09:11:00.333695 23310 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1206 09:11:00.333703 23310 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1206 09:11:00.333719 23310 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1206 09:11:00.333777 23310 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1206 09:11:00.333791 23310 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:11:00.333797 23310 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:11:00.333801 23310 net.cpp:165] Memory required for data: 2162520020\nI1206 09:11:00.333807 23310 layer_factory.hpp:77] Creating layer accuracy\nI1206 09:11:00.333863 23310 net.cpp:100] Creating Layer accuracy\nI1206 09:11:00.333874 23310 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1206 09:11:00.333883 23310 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1206 09:11:00.333890 23310 net.cpp:408] accuracy -> accuracy\nI1206 09:11:00.333945 23310 net.cpp:150] Setting up accuracy\nI1206 09:11:00.333958 23310 net.cpp:157] Top shape: (1)\nI1206 09:11:00.333963 23310 net.cpp:165] Memory required for data: 2162520024\nI1206 09:11:00.333969 23310 layer_factory.hpp:77] Creating layer loss\nI1206 09:11:00.333978 23310 net.cpp:100] Creating Layer loss\nI1206 09:11:00.333984 23310 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1206 09:11:00.333992 23310 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1206 09:11:00.334003 23310 net.cpp:408] loss -> loss\nI1206 09:11:00.334942 23310 layer_factory.hpp:77] Creating layer loss\nI1206 09:11:00.336009 23310 net.cpp:150] Setting up loss\nI1206 09:11:00.336025 23310 net.cpp:157] Top shape: (1)\nI1206 09:11:00.336031 23310 net.cpp:160]     with loss weight 1\nI1206 09:11:00.336123 23310 net.cpp:165] Memory required for data: 2162520028\nI1206 09:11:00.336133 23310 net.cpp:226] loss needs backward computation.\nI1206 09:11:00.336139 23310 net.cpp:228] accuracy does not need backward computation.\nI1206 09:11:00.336145 23310 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1206 09:11:00.336151 23310 net.cpp:226] post_FC needs backward computation.\nI1206 09:11:00.336156 23310 net.cpp:226] post_pool needs backward computation.\nI1206 09:11:00.336161 23310 net.cpp:226] post_relu needs backward computation.\nI1206 09:11:00.336166 23310 net.cpp:226] post_bn needs backward computation.\nI1206 09:11:00.336171 23310 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1206 09:11:00.336177 23310 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1206 09:11:00.336182 23310 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1206 09:11:00.336187 23310 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1206 09:11:00.336192 23310 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1206 09:11:00.336197 23310 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1206 09:11:00.336202 23310 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1206 09:11:00.336207 23310 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1206 09:11:00.336212 23310 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1206 09:11:00.336217 23310 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1206 09:11:00.336223 23310 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336237 23310 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1206 09:11:00.336243 23310 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1206 09:11:00.336253 23310 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1206 09:11:00.336258 23310 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1206 09:11:00.336264 23310 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1206 09:11:00.336269 23310 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1206 09:11:00.336274 23310 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1206 09:11:00.336279 23310 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1206 09:11:00.336284 23310 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1206 09:11:00.336289 23310 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1206 09:11:00.336295 23310 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336300 23310 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1206 09:11:00.336307 23310 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1206 09:11:00.336311 23310 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1206 09:11:00.336316 23310 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1206 09:11:00.336321 23310 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1206 09:11:00.336326 23310 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1206 09:11:00.336331 23310 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1206 09:11:00.336336 23310 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1206 09:11:00.336343 23310 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1206 09:11:00.336347 23310 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1206 09:11:00.336354 23310 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336359 23310 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1206 09:11:00.336364 23310 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1206 09:11:00.336369 23310 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1206 09:11:00.336374 23310 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1206 09:11:00.336380 23310 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1206 09:11:00.336385 23310 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1206 09:11:00.336390 23310 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1206 09:11:00.336395 23310 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1206 09:11:00.336400 23310 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1206 09:11:00.336405 23310 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1206 09:11:00.336410 23310 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336416 23310 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1206 09:11:00.336421 23310 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1206 09:11:00.336426 23310 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1206 09:11:00.336431 23310 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1206 09:11:00.336436 23310 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1206 09:11:00.336441 23310 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1206 09:11:00.336447 23310 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1206 09:11:00.336452 23310 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1206 09:11:00.336457 23310 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1206 09:11:00.336462 23310 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1206 09:11:00.336467 23310 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336473 23310 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1206 09:11:00.336478 23310 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1206 09:11:00.336484 23310 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1206 09:11:00.336499 23310 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1206 09:11:00.336505 23310 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1206 09:11:00.336510 23310 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1206 09:11:00.336516 23310 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1206 09:11:00.336521 23310 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1206 09:11:00.336526 23310 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1206 09:11:00.336531 23310 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1206 09:11:00.336536 23310 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1206 09:11:00.336542 23310 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336547 23310 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1206 09:11:00.336554 23310 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1206 09:11:00.336558 23310 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1206 09:11:00.336563 23310 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1206 09:11:00.336568 23310 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1206 09:11:00.336575 23310 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1206 09:11:00.336580 23310 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1206 09:11:00.336585 23310 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1206 09:11:00.336589 23310 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1206 09:11:00.336594 23310 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1206 09:11:00.336601 23310 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336606 23310 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1206 09:11:00.336611 23310 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1206 09:11:00.336616 23310 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1206 09:11:00.336622 23310 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1206 09:11:00.336627 23310 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1206 09:11:00.336632 23310 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1206 09:11:00.336637 23310 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1206 09:11:00.336642 23310 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1206 09:11:00.336648 23310 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1206 09:11:00.336653 23310 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1206 09:11:00.336658 23310 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336664 23310 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1206 09:11:00.336669 23310 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1206 09:11:00.336675 23310 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1206 09:11:00.336680 23310 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1206 09:11:00.336685 23310 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1206 09:11:00.336690 23310 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1206 09:11:00.336696 23310 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1206 09:11:00.336701 23310 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1206 09:11:00.336706 23310 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1206 09:11:00.336711 23310 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1206 09:11:00.336726 23310 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336732 23310 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1206 09:11:00.336738 23310 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1206 09:11:00.336743 23310 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1206 09:11:00.336750 23310 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1206 09:11:00.336755 23310 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1206 09:11:00.336766 23310 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1206 09:11:00.336772 23310 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1206 09:11:00.336778 23310 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1206 09:11:00.336783 23310 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1206 09:11:00.336788 23310 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1206 09:11:00.336794 23310 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336799 23310 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1206 09:11:00.336805 23310 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1206 09:11:00.336812 23310 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1206 09:11:00.336817 23310 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1206 09:11:00.336822 23310 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1206 09:11:00.336827 23310 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1206 09:11:00.336833 23310 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1206 09:11:00.336843 23310 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1206 09:11:00.336849 23310 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1206 09:11:00.336855 23310 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1206 09:11:00.336860 23310 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336866 23310 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1206 09:11:00.336872 23310 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1206 09:11:00.336879 23310 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1206 09:11:00.336884 23310 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1206 09:11:00.336889 23310 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1206 09:11:00.336895 23310 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1206 09:11:00.336901 23310 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1206 09:11:00.336906 23310 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1206 09:11:00.336911 23310 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1206 09:11:00.336917 23310 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1206 09:11:00.336923 23310 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1206 09:11:00.336928 23310 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336933 23310 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1206 09:11:00.336940 23310 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1206 09:11:00.336946 23310 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1206 09:11:00.336951 23310 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1206 09:11:00.336956 23310 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1206 09:11:00.336961 23310 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1206 09:11:00.336966 23310 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1206 09:11:00.336971 23310 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1206 09:11:00.336977 23310 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1206 09:11:00.336982 23310 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1206 09:11:00.336987 23310 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.336992 23310 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1206 09:11:00.336998 23310 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1206 09:11:00.337004 23310 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1206 09:11:00.337009 23310 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1206 09:11:00.337015 23310 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1206 09:11:00.337020 23310 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1206 09:11:00.337025 23310 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1206 09:11:00.337039 23310 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1206 09:11:00.337045 23310 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1206 09:11:00.337050 23310 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1206 09:11:00.337056 23310 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.337062 23310 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1206 09:11:00.337069 23310 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1206 09:11:00.337074 23310 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1206 09:11:00.337080 23310 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1206 09:11:00.337085 23310 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1206 09:11:00.337090 23310 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1206 09:11:00.337095 23310 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1206 09:11:00.337101 23310 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1206 09:11:00.337106 23310 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1206 09:11:00.337112 23310 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1206 09:11:00.337118 23310 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.337123 23310 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1206 09:11:00.337129 23310 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1206 09:11:00.337136 23310 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1206 09:11:00.337141 23310 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1206 09:11:00.337146 23310 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1206 09:11:00.337152 23310 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1206 09:11:00.337157 23310 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1206 09:11:00.337162 23310 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1206 09:11:00.337168 23310 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1206 09:11:00.337174 23310 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1206 09:11:00.337180 23310 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.337185 23310 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1206 09:11:00.337191 23310 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1206 09:11:00.337198 23310 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1206 09:11:00.337203 23310 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1206 09:11:00.337208 23310 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1206 09:11:00.337213 23310 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1206 09:11:00.337218 23310 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1206 09:11:00.337225 23310 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1206 09:11:00.337230 23310 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1206 09:11:00.337235 23310 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1206 09:11:00.337241 23310 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.337247 23310 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1206 09:11:00.337254 23310 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1206 09:11:00.337260 23310 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1206 09:11:00.337265 23310 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1206 09:11:00.337270 23310 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1206 09:11:00.337276 23310 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1206 09:11:00.337281 23310 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1206 09:11:00.337286 23310 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1206 09:11:00.337292 23310 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1206 09:11:00.337297 23310 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1206 09:11:00.337308 23310 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1206 09:11:00.337314 23310 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1206 09:11:00.337321 23310 net.cpp:226] pre_conv needs backward computation.\nI1206 09:11:00.337327 23310 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1206 09:11:00.337333 23310 net.cpp:228] dataLayer does not need backward computation.\nI1206 09:11:00.337338 23310 net.cpp:270] This network produces output accuracy\nI1206 09:11:00.337345 23310 net.cpp:270] This network produces output loss\nI1206 09:11:00.337646 23310 net.cpp:283] Network initialization done.\nI1206 09:11:00.343839 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:00.343873 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:00.343951 23310 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1206 09:11:00.344203 23310 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1206 09:11:00.345674 23310 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-ResNeXt\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: false\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    group: 32\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer {\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\nI1206 09:11:00.346930 23310 layer_factory.hpp:77] Creating layer dataLayer\nI1206 09:11:00.347131 23310 net.cpp:100] Creating Layer dataLayer\nI1206 09:11:00.347152 23310 net.cpp:408] dataLayer -> data_top\nI1206 09:11:00.347168 23310 net.cpp:408] dataLayer -> label\nI1206 09:11:00.347182 23310 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1206 09:11:00.356024 23317 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1206 09:11:00.356277 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:00.363329 23310 net.cpp:150] Setting up dataLayer\nI1206 09:11:00.363351 23310 net.cpp:157] Top shape: 85 3 32 32 (261120)\nI1206 09:11:00.363386 23310 net.cpp:157] Top shape: 85 (85)\nI1206 09:11:00.363394 23310 net.cpp:165] Memory required for data: 1044820\nI1206 09:11:00.363401 23310 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1206 09:11:00.363412 23310 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1206 09:11:00.363418 23310 net.cpp:434] label_dataLayer_1_split <- label\nI1206 09:11:00.363430 23310 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1206 09:11:00.363441 23310 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1206 09:11:00.363572 23310 net.cpp:150] Setting up label_dataLayer_1_split\nI1206 09:11:00.363587 23310 net.cpp:157] Top shape: 85 (85)\nI1206 09:11:00.363595 23310 net.cpp:157] Top shape: 85 (85)\nI1206 09:11:00.363600 23310 net.cpp:165] Memory required for data: 1045500\nI1206 09:11:00.363617 23310 layer_factory.hpp:77] Creating layer pre_conv\nI1206 09:11:00.363636 23310 net.cpp:100] Creating Layer pre_conv\nI1206 09:11:00.363642 23310 net.cpp:434] pre_conv <- data_top\nI1206 09:11:00.363656 23310 net.cpp:408] pre_conv -> pre_conv_top\nI1206 09:11:00.364123 23310 net.cpp:150] Setting up pre_conv\nI1206 09:11:00.364141 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.364147 23310 net.cpp:165] Memory required for data: 6616060\nI1206 09:11:00.364166 23310 layer_factory.hpp:77] Creating layer pre_conv_top_pre_conv_0_split\nI1206 09:11:00.364178 23310 net.cpp:100] Creating Layer pre_conv_top_pre_conv_0_split\nI1206 09:11:00.364184 23310 net.cpp:434] pre_conv_top_pre_conv_0_split <- pre_conv_top\nI1206 09:11:00.364192 23310 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_0\nI1206 09:11:00.364202 23310 net.cpp:408] pre_conv_top_pre_conv_0_split -> pre_conv_top_pre_conv_0_split_1\nI1206 09:11:00.364266 23310 net.cpp:150] Setting up pre_conv_top_pre_conv_0_split\nI1206 09:11:00.364302 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.364311 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.364316 23310 net.cpp:165] Memory required for data: 17757180\nI1206 09:11:00.364322 23310 layer_factory.hpp:77] Creating layer L1_b1_brc1_bn\nI1206 09:11:00.364336 23310 net.cpp:100] Creating Layer L1_b1_brc1_bn\nI1206 09:11:00.364341 23310 net.cpp:434] L1_b1_brc1_bn <- pre_conv_top_pre_conv_0_split_0\nI1206 09:11:00.364367 23310 net.cpp:408] L1_b1_brc1_bn -> L1_b1_brc1_bn_top\nI1206 09:11:00.364718 23310 net.cpp:150] Setting up L1_b1_brc1_bn\nI1206 09:11:00.364734 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.364742 23310 net.cpp:165] Memory required for data: 23327740\nI1206 09:11:00.364760 23310 layer_factory.hpp:77] Creating layer L1_b1_brc1_relu\nI1206 09:11:00.364799 23310 net.cpp:100] Creating Layer L1_b1_brc1_relu\nI1206 09:11:00.364809 23310 net.cpp:434] L1_b1_brc1_relu <- L1_b1_brc1_bn_top\nI1206 09:11:00.364816 23310 net.cpp:395] L1_b1_brc1_relu -> L1_b1_brc1_bn_top (in-place)\nI1206 09:11:00.364826 23310 net.cpp:150] Setting up L1_b1_brc1_relu\nI1206 09:11:00.364837 23310 net.cpp:157] Top shape: 85 16 32 32 (1392640)\nI1206 09:11:00.364843 23310 net.cpp:165] Memory required for data: 28898300\nI1206 09:11:00.364848 23310 layer_factory.hpp:77] Creating layer L1_b1_brc1_conv\nI1206 09:11:00.364859 23310 net.cpp:100] Creating Layer L1_b1_brc1_conv\nI1206 09:11:00.364866 23310 net.cpp:434] L1_b1_brc1_conv <- L1_b1_brc1_bn_top\nI1206 09:11:00.364892 23310 net.cpp:408] L1_b1_brc1_conv -> L1_b1_brc1_conv_top\nI1206 09:11:00.365288 23310 net.cpp:150] Setting up L1_b1_brc1_conv\nI1206 09:11:00.365303 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.365309 23310 net.cpp:165] Memory required for data: 40039420\nI1206 09:11:00.365319 23310 layer_factory.hpp:77] Creating layer L1_b1_brc2_bn\nI1206 09:11:00.365326 23310 net.cpp:100] Creating Layer L1_b1_brc2_bn\nI1206 09:11:00.365334 23310 net.cpp:434] L1_b1_brc2_bn <- L1_b1_brc1_conv_top\nI1206 09:11:00.365348 23310 net.cpp:408] L1_b1_brc2_bn -> L1_b1_brc2_bn_top\nI1206 09:11:00.365669 23310 net.cpp:150] Setting up L1_b1_brc2_bn\nI1206 09:11:00.365686 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.365691 23310 net.cpp:165] Memory required for data: 51180540\nI1206 09:11:00.365706 23310 layer_factory.hpp:77] Creating layer L1_b1_brc2_relu\nI1206 09:11:00.365728 23310 net.cpp:100] Creating Layer L1_b1_brc2_relu\nI1206 09:11:00.365736 23310 net.cpp:434] L1_b1_brc2_relu <- L1_b1_brc2_bn_top\nI1206 09:11:00.365744 23310 net.cpp:395] L1_b1_brc2_relu -> L1_b1_brc2_bn_top (in-place)\nI1206 09:11:00.365754 23310 net.cpp:150] Setting up L1_b1_brc2_relu\nI1206 09:11:00.365762 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.365767 23310 net.cpp:165] Memory required for data: 62321660\nI1206 09:11:00.365772 23310 layer_factory.hpp:77] Creating layer L1_b1_brc2_conv\nI1206 09:11:00.365782 23310 net.cpp:100] Creating Layer L1_b1_brc2_conv\nI1206 09:11:00.365792 23310 net.cpp:434] L1_b1_brc2_conv <- L1_b1_brc2_bn_top\nI1206 09:11:00.365815 23310 net.cpp:408] L1_b1_brc2_conv -> L1_b1_brc2_conv_top\nI1206 09:11:00.367817 23310 net.cpp:150] Setting up L1_b1_brc2_conv\nI1206 09:11:00.367835 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.367841 23310 net.cpp:165] Memory required for data: 73462780\nI1206 09:11:00.367849 23310 layer_factory.hpp:77] Creating layer L1_b1_brc3_bn\nI1206 09:11:00.367859 23310 net.cpp:100] Creating Layer L1_b1_brc3_bn\nI1206 09:11:00.367864 23310 net.cpp:434] L1_b1_brc3_bn <- L1_b1_brc2_conv_top\nI1206 09:11:00.367877 23310 net.cpp:408] L1_b1_brc3_bn -> L1_b1_brc3_bn_top\nI1206 09:11:00.368140 23310 net.cpp:150] Setting up L1_b1_brc3_bn\nI1206 09:11:00.368154 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.368160 23310 net.cpp:165] Memory required for data: 84603900\nI1206 09:11:00.368170 23310 layer_factory.hpp:77] Creating layer L1_b1_brc3_relu\nI1206 09:11:00.368178 23310 net.cpp:100] Creating Layer L1_b1_brc3_relu\nI1206 09:11:00.368183 23310 net.cpp:434] L1_b1_brc3_relu <- L1_b1_brc3_bn_top\nI1206 09:11:00.368192 23310 net.cpp:395] L1_b1_brc3_relu -> L1_b1_brc3_bn_top (in-place)\nI1206 09:11:00.368202 23310 net.cpp:150] Setting up L1_b1_brc3_relu\nI1206 09:11:00.368209 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.368214 23310 net.cpp:165] Memory required for data: 95745020\nI1206 09:11:00.368219 23310 layer_factory.hpp:77] Creating layer L1_b1_brc3_conv\nI1206 09:11:00.368335 23310 net.cpp:100] Creating Layer L1_b1_brc3_conv\nI1206 09:11:00.368350 23310 net.cpp:434] L1_b1_brc3_conv <- L1_b1_brc3_bn_top\nI1206 09:11:00.368368 23310 net.cpp:408] L1_b1_brc3_conv -> L1_b1_brc3_conv_top\nI1206 09:11:00.368719 23310 net.cpp:150] Setting up L1_b1_brc3_conv\nI1206 09:11:00.368736 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.368741 23310 net.cpp:165] Memory required for data: 118027260\nI1206 09:11:00.368758 23310 layer_factory.hpp:77] Creating layer L1_b1_chanInc_conv\nI1206 09:11:00.368774 23310 net.cpp:100] Creating Layer L1_b1_chanInc_conv\nI1206 09:11:00.368780 23310 net.cpp:434] L1_b1_chanInc_conv <- pre_conv_top_pre_conv_0_split_1\nI1206 09:11:00.368793 23310 net.cpp:408] L1_b1_chanInc_conv -> L1_b1_chanInc_conv_top\nI1206 09:11:00.369119 23310 net.cpp:150] Setting up L1_b1_chanInc_conv\nI1206 09:11:00.369135 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.369141 23310 net.cpp:165] Memory required for data: 140309500\nI1206 09:11:00.369150 23310 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1206 09:11:00.369159 23310 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1206 09:11:00.369164 23310 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_brc3_conv_top\nI1206 09:11:00.369171 23310 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_chanInc_conv_top\nI1206 09:11:00.369179 23310 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1206 09:11:00.369212 23310 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1206 09:11:00.369222 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.369227 23310 net.cpp:165] Memory required for data: 162591740\nI1206 09:11:00.369232 23310 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:11:00.369243 23310 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:11:00.369249 23310 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split <- L1_b1_sum_eltwise_top\nI1206 09:11:00.369259 23310 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:11:00.369268 23310 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split -> L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:11:00.369318 23310 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split\nI1206 09:11:00.369333 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.369339 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.369344 23310 net.cpp:165] Memory required for data: 207156220\nI1206 09:11:00.369349 23310 layer_factory.hpp:77] Creating layer L1_b2_brc1_bn\nI1206 09:11:00.369371 23310 net.cpp:100] Creating Layer L1_b2_brc1_bn\nI1206 09:11:00.369382 23310 net.cpp:434] L1_b2_brc1_bn <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_0\nI1206 09:11:00.369395 23310 net.cpp:408] L1_b2_brc1_bn -> L1_b2_brc1_bn_top\nI1206 09:11:00.369901 23310 net.cpp:150] Setting up L1_b2_brc1_bn\nI1206 09:11:00.369917 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.369923 23310 net.cpp:165] Memory required for data: 229438460\nI1206 09:11:00.369937 23310 layer_factory.hpp:77] Creating layer L1_b2_brc1_relu\nI1206 09:11:00.369951 23310 net.cpp:100] Creating Layer L1_b2_brc1_relu\nI1206 09:11:00.369957 23310 net.cpp:434] L1_b2_brc1_relu <- L1_b2_brc1_bn_top\nI1206 09:11:00.369964 23310 net.cpp:395] L1_b2_brc1_relu -> L1_b2_brc1_bn_top (in-place)\nI1206 09:11:00.369978 23310 net.cpp:150] Setting up L1_b2_brc1_relu\nI1206 09:11:00.369987 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.369992 23310 net.cpp:165] Memory required for data: 251720700\nI1206 09:11:00.369997 23310 layer_factory.hpp:77] Creating layer L1_b2_brc1_conv\nI1206 09:11:00.370008 23310 net.cpp:100] Creating Layer L1_b2_brc1_conv\nI1206 09:11:00.370016 23310 net.cpp:434] L1_b2_brc1_conv <- L1_b2_brc1_bn_top\nI1206 09:11:00.370030 23310 net.cpp:408] L1_b2_brc1_conv -> L1_b2_brc1_conv_top\nI1206 09:11:00.370421 23310 net.cpp:150] Setting up L1_b2_brc1_conv\nI1206 09:11:00.370435 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.370442 23310 net.cpp:165] Memory required for data: 262861820\nI1206 09:11:00.370455 23310 layer_factory.hpp:77] Creating layer L1_b2_brc2_bn\nI1206 09:11:00.370463 23310 net.cpp:100] Creating Layer L1_b2_brc2_bn\nI1206 09:11:00.370470 23310 net.cpp:434] L1_b2_brc2_bn <- L1_b2_brc1_conv_top\nI1206 09:11:00.370477 23310 net.cpp:408] L1_b2_brc2_bn -> L1_b2_brc2_bn_top\nI1206 09:11:00.370784 23310 net.cpp:150] Setting up L1_b2_brc2_bn\nI1206 09:11:00.370798 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.370805 23310 net.cpp:165] Memory required for data: 274002940\nI1206 09:11:00.370815 23310 layer_factory.hpp:77] Creating layer L1_b2_brc2_relu\nI1206 09:11:00.370827 23310 net.cpp:100] Creating Layer L1_b2_brc2_relu\nI1206 09:11:00.370833 23310 net.cpp:434] L1_b2_brc2_relu <- L1_b2_brc2_bn_top\nI1206 09:11:00.370844 23310 net.cpp:395] L1_b2_brc2_relu -> L1_b2_brc2_bn_top (in-place)\nI1206 09:11:00.370854 23310 net.cpp:150] Setting up L1_b2_brc2_relu\nI1206 09:11:00.370862 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.370867 23310 net.cpp:165] Memory required for data: 285144060\nI1206 09:11:00.370872 23310 layer_factory.hpp:77] Creating layer L1_b2_brc2_conv\nI1206 09:11:00.370895 23310 net.cpp:100] Creating Layer L1_b2_brc2_conv\nI1206 09:11:00.370903 23310 net.cpp:434] L1_b2_brc2_conv <- L1_b2_brc2_bn_top\nI1206 09:11:00.370914 23310 net.cpp:408] L1_b2_brc2_conv -> L1_b2_brc2_conv_top\nI1206 09:11:00.371260 23310 net.cpp:150] Setting up L1_b2_brc2_conv\nI1206 09:11:00.371278 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.371284 23310 net.cpp:165] Memory required for data: 296285180\nI1206 09:11:00.371292 23310 layer_factory.hpp:77] Creating layer L1_b2_brc3_bn\nI1206 09:11:00.371300 23310 net.cpp:100] Creating Layer L1_b2_brc3_bn\nI1206 09:11:00.371309 23310 net.cpp:434] L1_b2_brc3_bn <- L1_b2_brc2_conv_top\nI1206 09:11:00.371317 23310 net.cpp:408] L1_b2_brc3_bn -> L1_b2_brc3_bn_top\nI1206 09:11:00.371655 23310 net.cpp:150] Setting up L1_b2_brc3_bn\nI1206 09:11:00.371670 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.371675 23310 net.cpp:165] Memory required for data: 307426300\nI1206 09:11:00.371686 23310 layer_factory.hpp:77] Creating layer L1_b2_brc3_relu\nI1206 09:11:00.371697 23310 net.cpp:100] Creating Layer L1_b2_brc3_relu\nI1206 09:11:00.371704 23310 net.cpp:434] L1_b2_brc3_relu <- L1_b2_brc3_bn_top\nI1206 09:11:00.371711 23310 net.cpp:395] L1_b2_brc3_relu -> L1_b2_brc3_bn_top (in-place)\nI1206 09:11:00.371731 23310 net.cpp:150] Setting up L1_b2_brc3_relu\nI1206 09:11:00.371739 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.371754 23310 net.cpp:165] Memory required for data: 318567420\nI1206 09:11:00.371762 23310 layer_factory.hpp:77] Creating layer L1_b2_brc3_conv\nI1206 09:11:00.371778 23310 net.cpp:100] Creating Layer L1_b2_brc3_conv\nI1206 09:11:00.371783 23310 net.cpp:434] L1_b2_brc3_conv <- L1_b2_brc3_bn_top\nI1206 09:11:00.371799 23310 net.cpp:408] L1_b2_brc3_conv -> L1_b2_brc3_conv_top\nI1206 09:11:00.372190 23310 net.cpp:150] Setting up L1_b2_brc3_conv\nI1206 09:11:00.372206 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.372213 23310 net.cpp:165] Memory required for data: 340849660\nI1206 09:11:00.372232 23310 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1206 09:11:00.372285 23310 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1206 09:11:00.372295 23310 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_brc3_conv_top\nI1206 09:11:00.372303 23310 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split_1\nI1206 09:11:00.372311 23310 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1206 09:11:00.372349 23310 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1206 09:11:00.372365 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.372371 23310 net.cpp:165] Memory required for data: 363131900\nI1206 09:11:00.372376 23310 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:11:00.372385 23310 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:11:00.372390 23310 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split <- L1_b2_sum_eltwise_top\nI1206 09:11:00.372400 23310 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:11:00.372413 23310 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split -> L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:11:00.372468 23310 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split\nI1206 09:11:00.372481 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.372488 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.372493 23310 net.cpp:165] Memory required for data: 407696380\nI1206 09:11:00.372498 23310 layer_factory.hpp:77] Creating layer L1_b3_brc1_bn\nI1206 09:11:00.372515 23310 net.cpp:100] Creating Layer L1_b3_brc1_bn\nI1206 09:11:00.372521 23310 net.cpp:434] L1_b3_brc1_bn <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_0\nI1206 09:11:00.372529 23310 net.cpp:408] L1_b3_brc1_bn -> L1_b3_brc1_bn_top\nI1206 09:11:00.372828 23310 net.cpp:150] Setting up L1_b3_brc1_bn\nI1206 09:11:00.372843 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.372848 23310 net.cpp:165] Memory required for data: 429978620\nI1206 09:11:00.372859 23310 layer_factory.hpp:77] Creating layer L1_b3_brc1_relu\nI1206 09:11:00.372867 23310 net.cpp:100] Creating Layer L1_b3_brc1_relu\nI1206 09:11:00.372874 23310 net.cpp:434] L1_b3_brc1_relu <- L1_b3_brc1_bn_top\nI1206 09:11:00.372884 23310 net.cpp:395] L1_b3_brc1_relu -> L1_b3_brc1_bn_top (in-place)\nI1206 09:11:00.372895 23310 net.cpp:150] Setting up L1_b3_brc1_relu\nI1206 09:11:00.372902 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.372907 23310 net.cpp:165] Memory required for data: 452260860\nI1206 09:11:00.372913 23310 layer_factory.hpp:77] Creating layer L1_b3_brc1_conv\nI1206 09:11:00.372927 23310 net.cpp:100] Creating Layer L1_b3_brc1_conv\nI1206 09:11:00.372933 23310 net.cpp:434] L1_b3_brc1_conv <- L1_b3_brc1_bn_top\nI1206 09:11:00.372946 23310 net.cpp:408] L1_b3_brc1_conv -> L1_b3_brc1_conv_top\nI1206 09:11:00.373286 23310 net.cpp:150] Setting up L1_b3_brc1_conv\nI1206 09:11:00.373301 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.373306 23310 net.cpp:165] Memory required for data: 463401980\nI1206 09:11:00.373316 23310 layer_factory.hpp:77] Creating layer L1_b3_brc2_bn\nI1206 09:11:00.373328 23310 net.cpp:100] Creating Layer L1_b3_brc2_bn\nI1206 09:11:00.373335 23310 net.cpp:434] L1_b3_brc2_bn <- L1_b3_brc1_conv_top\nI1206 09:11:00.373351 23310 net.cpp:408] L1_b3_brc2_bn -> L1_b3_brc2_bn_top\nI1206 09:11:00.373620 23310 net.cpp:150] Setting up L1_b3_brc2_bn\nI1206 09:11:00.373632 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.373637 23310 net.cpp:165] Memory required for data: 474543100\nI1206 09:11:00.373647 23310 layer_factory.hpp:77] Creating layer L1_b3_brc2_relu\nI1206 09:11:00.373656 23310 net.cpp:100] Creating Layer L1_b3_brc2_relu\nI1206 09:11:00.373661 23310 net.cpp:434] L1_b3_brc2_relu <- L1_b3_brc2_bn_top\nI1206 09:11:00.373668 23310 net.cpp:395] L1_b3_brc2_relu -> L1_b3_brc2_bn_top (in-place)\nI1206 09:11:00.373678 23310 net.cpp:150] Setting up L1_b3_brc2_relu\nI1206 09:11:00.373685 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.373690 23310 net.cpp:165] Memory required for data: 485684220\nI1206 09:11:00.373694 23310 layer_factory.hpp:77] Creating layer L1_b3_brc2_conv\nI1206 09:11:00.373711 23310 net.cpp:100] Creating Layer L1_b3_brc2_conv\nI1206 09:11:00.373723 23310 net.cpp:434] L1_b3_brc2_conv <- L1_b3_brc2_bn_top\nI1206 09:11:00.373736 23310 net.cpp:408] L1_b3_brc2_conv -> L1_b3_brc2_conv_top\nI1206 09:11:00.374052 23310 net.cpp:150] Setting up L1_b3_brc2_conv\nI1206 09:11:00.374066 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.374071 23310 net.cpp:165] Memory required for data: 496825340\nI1206 09:11:00.374080 23310 layer_factory.hpp:77] Creating layer L1_b3_brc3_bn\nI1206 09:11:00.374094 23310 net.cpp:100] Creating Layer L1_b3_brc3_bn\nI1206 09:11:00.374099 23310 net.cpp:434] L1_b3_brc3_bn <- L1_b3_brc2_conv_top\nI1206 09:11:00.374110 23310 net.cpp:408] L1_b3_brc3_bn -> L1_b3_brc3_bn_top\nI1206 09:11:00.374382 23310 net.cpp:150] Setting up L1_b3_brc3_bn\nI1206 09:11:00.374395 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.374400 23310 net.cpp:165] Memory required for data: 507966460\nI1206 09:11:00.374411 23310 layer_factory.hpp:77] Creating layer L1_b3_brc3_relu\nI1206 09:11:00.374419 23310 net.cpp:100] Creating Layer L1_b3_brc3_relu\nI1206 09:11:00.374425 23310 net.cpp:434] L1_b3_brc3_relu <- L1_b3_brc3_bn_top\nI1206 09:11:00.374433 23310 net.cpp:395] L1_b3_brc3_relu -> L1_b3_brc3_bn_top (in-place)\nI1206 09:11:00.374441 23310 net.cpp:150] Setting up L1_b3_brc3_relu\nI1206 09:11:00.374449 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.374454 23310 net.cpp:165] Memory required for data: 519107580\nI1206 09:11:00.374459 23310 layer_factory.hpp:77] Creating layer L1_b3_brc3_conv\nI1206 09:11:00.374471 23310 net.cpp:100] Creating Layer L1_b3_brc3_conv\nI1206 09:11:00.374477 23310 net.cpp:434] L1_b3_brc3_conv <- L1_b3_brc3_bn_top\nI1206 09:11:00.374490 23310 net.cpp:408] L1_b3_brc3_conv -> L1_b3_brc3_conv_top\nI1206 09:11:00.374840 23310 net.cpp:150] Setting up L1_b3_brc3_conv\nI1206 09:11:00.374855 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.374860 23310 net.cpp:165] Memory required for data: 541389820\nI1206 09:11:00.374868 23310 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1206 09:11:00.374877 23310 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1206 09:11:00.374883 23310 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_brc3_conv_top\nI1206 09:11:00.374894 23310 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split_1\nI1206 09:11:00.374902 23310 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1206 09:11:00.374943 23310 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1206 09:11:00.374953 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.374958 23310 net.cpp:165] Memory required for data: 563672060\nI1206 09:11:00.374963 23310 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:11:00.374975 23310 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:11:00.374981 23310 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split <- L1_b3_sum_eltwise_top\nI1206 09:11:00.374989 23310 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:11:00.375006 23310 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split -> L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:11:00.375058 23310 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split\nI1206 09:11:00.375067 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.375074 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.375079 23310 net.cpp:165] Memory required for data: 608236540\nI1206 09:11:00.375084 23310 layer_factory.hpp:77] Creating layer L1_b4_brc1_bn\nI1206 09:11:00.375095 23310 net.cpp:100] Creating Layer L1_b4_brc1_bn\nI1206 09:11:00.375102 23310 net.cpp:434] L1_b4_brc1_bn <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_0\nI1206 09:11:00.375109 23310 net.cpp:408] L1_b4_brc1_bn -> L1_b4_brc1_bn_top\nI1206 09:11:00.375360 23310 net.cpp:150] Setting up L1_b4_brc1_bn\nI1206 09:11:00.375372 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.375377 23310 net.cpp:165] Memory required for data: 630518780\nI1206 09:11:00.375388 23310 layer_factory.hpp:77] Creating layer L1_b4_brc1_relu\nI1206 09:11:00.375396 23310 net.cpp:100] Creating Layer L1_b4_brc1_relu\nI1206 09:11:00.375401 23310 net.cpp:434] L1_b4_brc1_relu <- L1_b4_brc1_bn_top\nI1206 09:11:00.375414 23310 net.cpp:395] L1_b4_brc1_relu -> L1_b4_brc1_bn_top (in-place)\nI1206 09:11:00.375424 23310 net.cpp:150] Setting up L1_b4_brc1_relu\nI1206 09:11:00.375432 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.375437 23310 net.cpp:165] Memory required for data: 652801020\nI1206 09:11:00.375442 23310 layer_factory.hpp:77] Creating layer L1_b4_brc1_conv\nI1206 09:11:00.375454 23310 net.cpp:100] Creating Layer L1_b4_brc1_conv\nI1206 09:11:00.375460 23310 net.cpp:434] L1_b4_brc1_conv <- L1_b4_brc1_bn_top\nI1206 09:11:00.375469 23310 net.cpp:408] L1_b4_brc1_conv -> L1_b4_brc1_conv_top\nI1206 09:11:00.375824 23310 net.cpp:150] Setting up L1_b4_brc1_conv\nI1206 09:11:00.375838 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.375844 23310 net.cpp:165] Memory required for data: 663942140\nI1206 09:11:00.375854 23310 layer_factory.hpp:77] Creating layer L1_b4_brc2_bn\nI1206 09:11:00.375865 23310 net.cpp:100] Creating Layer L1_b4_brc2_bn\nI1206 09:11:00.375872 23310 net.cpp:434] L1_b4_brc2_bn <- L1_b4_brc1_conv_top\nI1206 09:11:00.375880 23310 net.cpp:408] L1_b4_brc2_bn -> L1_b4_brc2_bn_top\nI1206 09:11:00.376142 23310 net.cpp:150] Setting up L1_b4_brc2_bn\nI1206 09:11:00.376154 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.376159 23310 net.cpp:165] Memory required for data: 675083260\nI1206 09:11:00.376170 23310 layer_factory.hpp:77] Creating layer L1_b4_brc2_relu\nI1206 09:11:00.376178 23310 net.cpp:100] Creating Layer L1_b4_brc2_relu\nI1206 09:11:00.376183 23310 net.cpp:434] L1_b4_brc2_relu <- L1_b4_brc2_bn_top\nI1206 09:11:00.376194 23310 net.cpp:395] L1_b4_brc2_relu -> L1_b4_brc2_bn_top (in-place)\nI1206 09:11:00.376204 23310 net.cpp:150] Setting up L1_b4_brc2_relu\nI1206 09:11:00.376211 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.376216 23310 net.cpp:165] Memory required for data: 686224380\nI1206 09:11:00.376221 23310 layer_factory.hpp:77] Creating layer L1_b4_brc2_conv\nI1206 09:11:00.376231 23310 net.cpp:100] Creating Layer L1_b4_brc2_conv\nI1206 09:11:00.376241 23310 net.cpp:434] L1_b4_brc2_conv <- L1_b4_brc2_bn_top\nI1206 09:11:00.376250 23310 net.cpp:408] L1_b4_brc2_conv -> L1_b4_brc2_conv_top\nI1206 09:11:00.376562 23310 net.cpp:150] Setting up L1_b4_brc2_conv\nI1206 09:11:00.376577 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.376582 23310 net.cpp:165] Memory required for data: 697365500\nI1206 09:11:00.376591 23310 layer_factory.hpp:77] Creating layer L1_b4_brc3_bn\nI1206 09:11:00.376603 23310 net.cpp:100] Creating Layer L1_b4_brc3_bn\nI1206 09:11:00.376610 23310 net.cpp:434] L1_b4_brc3_bn <- L1_b4_brc2_conv_top\nI1206 09:11:00.376617 23310 net.cpp:408] L1_b4_brc3_bn -> L1_b4_brc3_bn_top\nI1206 09:11:00.376889 23310 net.cpp:150] Setting up L1_b4_brc3_bn\nI1206 09:11:00.376904 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.376916 23310 net.cpp:165] Memory required for data: 708506620\nI1206 09:11:00.376927 23310 layer_factory.hpp:77] Creating layer L1_b4_brc3_relu\nI1206 09:11:00.376935 23310 net.cpp:100] Creating Layer L1_b4_brc3_relu\nI1206 09:11:00.376941 23310 net.cpp:434] L1_b4_brc3_relu <- L1_b4_brc3_bn_top\nI1206 09:11:00.376951 23310 net.cpp:395] L1_b4_brc3_relu -> L1_b4_brc3_bn_top (in-place)\nI1206 09:11:00.376962 23310 net.cpp:150] Setting up L1_b4_brc3_relu\nI1206 09:11:00.376969 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.376974 23310 net.cpp:165] Memory required for data: 719647740\nI1206 09:11:00.376978 23310 layer_factory.hpp:77] Creating layer L1_b4_brc3_conv\nI1206 09:11:00.376992 23310 net.cpp:100] Creating Layer L1_b4_brc3_conv\nI1206 09:11:00.376998 23310 net.cpp:434] L1_b4_brc3_conv <- L1_b4_brc3_bn_top\nI1206 09:11:00.377007 23310 net.cpp:408] L1_b4_brc3_conv -> L1_b4_brc3_conv_top\nI1206 09:11:00.377355 23310 net.cpp:150] Setting up L1_b4_brc3_conv\nI1206 09:11:00.377370 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.377375 23310 net.cpp:165] Memory required for data: 741929980\nI1206 09:11:00.377384 23310 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1206 09:11:00.377393 23310 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1206 09:11:00.377399 23310 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_brc3_conv_top\nI1206 09:11:00.377406 23310 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split_1\nI1206 09:11:00.377418 23310 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1206 09:11:00.377452 23310 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1206 09:11:00.377461 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.377466 23310 net.cpp:165] Memory required for data: 764212220\nI1206 09:11:00.377471 23310 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:11:00.377486 23310 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:11:00.377492 23310 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split <- L1_b4_sum_eltwise_top\nI1206 09:11:00.377501 23310 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:11:00.377514 23310 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split -> L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:11:00.377562 23310 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split\nI1206 09:11:00.377574 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.377581 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.377586 23310 net.cpp:165] Memory required for data: 808776700\nI1206 09:11:00.377591 23310 layer_factory.hpp:77] Creating layer L1_b5_brc1_bn\nI1206 09:11:00.377602 23310 net.cpp:100] Creating Layer L1_b5_brc1_bn\nI1206 09:11:00.377609 23310 net.cpp:434] L1_b5_brc1_bn <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_0\nI1206 09:11:00.377616 23310 net.cpp:408] L1_b5_brc1_bn -> L1_b5_brc1_bn_top\nI1206 09:11:00.377873 23310 net.cpp:150] Setting up L1_b5_brc1_bn\nI1206 09:11:00.377887 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.377892 23310 net.cpp:165] Memory required for data: 831058940\nI1206 09:11:00.377918 23310 layer_factory.hpp:77] Creating layer L1_b5_brc1_relu\nI1206 09:11:00.377928 23310 net.cpp:100] Creating Layer L1_b5_brc1_relu\nI1206 09:11:00.377933 23310 net.cpp:434] L1_b5_brc1_relu <- L1_b5_brc1_bn_top\nI1206 09:11:00.377944 23310 net.cpp:395] L1_b5_brc1_relu -> L1_b5_brc1_bn_top (in-place)\nI1206 09:11:00.377954 23310 net.cpp:150] Setting up L1_b5_brc1_relu\nI1206 09:11:00.377962 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.377966 23310 net.cpp:165] Memory required for data: 853341180\nI1206 09:11:00.377971 23310 layer_factory.hpp:77] Creating layer L1_b5_brc1_conv\nI1206 09:11:00.377986 23310 net.cpp:100] Creating Layer L1_b5_brc1_conv\nI1206 09:11:00.377993 23310 net.cpp:434] L1_b5_brc1_conv <- L1_b5_brc1_bn_top\nI1206 09:11:00.378001 23310 net.cpp:408] L1_b5_brc1_conv -> L1_b5_brc1_conv_top\nI1206 09:11:00.378355 23310 net.cpp:150] Setting up L1_b5_brc1_conv\nI1206 09:11:00.378371 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.378376 23310 net.cpp:165] Memory required for data: 864482300\nI1206 09:11:00.378384 23310 layer_factory.hpp:77] Creating layer L1_b5_brc2_bn\nI1206 09:11:00.378392 23310 net.cpp:100] Creating Layer L1_b5_brc2_bn\nI1206 09:11:00.378399 23310 net.cpp:434] L1_b5_brc2_bn <- L1_b5_brc1_conv_top\nI1206 09:11:00.378412 23310 net.cpp:408] L1_b5_brc2_bn -> L1_b5_brc2_bn_top\nI1206 09:11:00.378676 23310 net.cpp:150] Setting up L1_b5_brc2_bn\nI1206 09:11:00.378690 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.378695 23310 net.cpp:165] Memory required for data: 875623420\nI1206 09:11:00.378705 23310 layer_factory.hpp:77] Creating layer L1_b5_brc2_relu\nI1206 09:11:00.378720 23310 net.cpp:100] Creating Layer L1_b5_brc2_relu\nI1206 09:11:00.378727 23310 net.cpp:434] L1_b5_brc2_relu <- L1_b5_brc2_bn_top\nI1206 09:11:00.378736 23310 net.cpp:395] L1_b5_brc2_relu -> L1_b5_brc2_bn_top (in-place)\nI1206 09:11:00.378746 23310 net.cpp:150] Setting up L1_b5_brc2_relu\nI1206 09:11:00.378752 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.378757 23310 net.cpp:165] Memory required for data: 886764540\nI1206 09:11:00.378762 23310 layer_factory.hpp:77] Creating layer L1_b5_brc2_conv\nI1206 09:11:00.378777 23310 net.cpp:100] Creating Layer L1_b5_brc2_conv\nI1206 09:11:00.378783 23310 net.cpp:434] L1_b5_brc2_conv <- L1_b5_brc2_bn_top\nI1206 09:11:00.378794 23310 net.cpp:408] L1_b5_brc2_conv -> L1_b5_brc2_conv_top\nI1206 09:11:00.379117 23310 net.cpp:150] Setting up L1_b5_brc2_conv\nI1206 09:11:00.379130 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.379137 23310 net.cpp:165] Memory required for data: 897905660\nI1206 09:11:00.379145 23310 layer_factory.hpp:77] Creating layer L1_b5_brc3_bn\nI1206 09:11:00.379156 23310 net.cpp:100] Creating Layer L1_b5_brc3_bn\nI1206 09:11:00.379163 23310 net.cpp:434] L1_b5_brc3_bn <- L1_b5_brc2_conv_top\nI1206 09:11:00.379171 23310 net.cpp:408] L1_b5_brc3_bn -> L1_b5_brc3_bn_top\nI1206 09:11:00.379431 23310 net.cpp:150] Setting up L1_b5_brc3_bn\nI1206 09:11:00.379443 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.379449 23310 net.cpp:165] Memory required for data: 909046780\nI1206 09:11:00.379459 23310 layer_factory.hpp:77] Creating layer L1_b5_brc3_relu\nI1206 09:11:00.379467 23310 net.cpp:100] Creating Layer L1_b5_brc3_relu\nI1206 09:11:00.379473 23310 net.cpp:434] L1_b5_brc3_relu <- L1_b5_brc3_bn_top\nI1206 09:11:00.379480 23310 net.cpp:395] L1_b5_brc3_relu -> L1_b5_brc3_bn_top (in-place)\nI1206 09:11:00.379494 23310 net.cpp:150] Setting up L1_b5_brc3_relu\nI1206 09:11:00.379501 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.379506 23310 net.cpp:165] Memory required for data: 920187900\nI1206 09:11:00.379511 23310 layer_factory.hpp:77] Creating layer L1_b5_brc3_conv\nI1206 09:11:00.379521 23310 net.cpp:100] Creating Layer L1_b5_brc3_conv\nI1206 09:11:00.379532 23310 net.cpp:434] L1_b5_brc3_conv <- L1_b5_brc3_bn_top\nI1206 09:11:00.379541 23310 net.cpp:408] L1_b5_brc3_conv -> L1_b5_brc3_conv_top\nI1206 09:11:00.379904 23310 net.cpp:150] Setting up L1_b5_brc3_conv\nI1206 09:11:00.379920 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.379925 23310 net.cpp:165] Memory required for data: 942470140\nI1206 09:11:00.379935 23310 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1206 09:11:00.379946 23310 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1206 09:11:00.379953 23310 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_brc3_conv_top\nI1206 09:11:00.379961 23310 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split_1\nI1206 09:11:00.379971 23310 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1206 09:11:00.380004 23310 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1206 09:11:00.380014 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.380019 23310 net.cpp:165] Memory required for data: 964752380\nI1206 09:11:00.380033 23310 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:11:00.380044 23310 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:11:00.380050 23310 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split <- L1_b5_sum_eltwise_top\nI1206 09:11:00.380059 23310 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:11:00.380067 23310 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split -> L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:11:00.380120 23310 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split\nI1206 09:11:00.380131 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.380138 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.380143 23310 net.cpp:165] Memory required for data: 1009316860\nI1206 09:11:00.380148 23310 layer_factory.hpp:77] Creating layer L1_b6_brc1_bn\nI1206 09:11:00.380163 23310 net.cpp:100] Creating Layer L1_b6_brc1_bn\nI1206 09:11:00.380169 23310 net.cpp:434] L1_b6_brc1_bn <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_0\nI1206 09:11:00.380177 23310 net.cpp:408] L1_b6_brc1_bn -> L1_b6_brc1_bn_top\nI1206 09:11:00.380430 23310 net.cpp:150] Setting up L1_b6_brc1_bn\nI1206 09:11:00.380445 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.380450 23310 net.cpp:165] Memory required for data: 1031599100\nI1206 09:11:00.380460 23310 layer_factory.hpp:77] Creating layer L1_b6_brc1_relu\nI1206 09:11:00.380467 23310 net.cpp:100] Creating Layer L1_b6_brc1_relu\nI1206 09:11:00.380473 23310 net.cpp:434] L1_b6_brc1_relu <- L1_b6_brc1_bn_top\nI1206 09:11:00.380484 23310 net.cpp:395] L1_b6_brc1_relu -> L1_b6_brc1_bn_top (in-place)\nI1206 09:11:00.380494 23310 net.cpp:150] Setting up L1_b6_brc1_relu\nI1206 09:11:00.380502 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.380506 23310 net.cpp:165] Memory required for data: 1053881340\nI1206 09:11:00.380511 23310 layer_factory.hpp:77] Creating layer L1_b6_brc1_conv\nI1206 09:11:00.380525 23310 net.cpp:100] Creating Layer L1_b6_brc1_conv\nI1206 09:11:00.380532 23310 net.cpp:434] L1_b6_brc1_conv <- L1_b6_brc1_bn_top\nI1206 09:11:00.380539 23310 net.cpp:408] L1_b6_brc1_conv -> L1_b6_brc1_conv_top\nI1206 09:11:00.380895 23310 net.cpp:150] Setting up L1_b6_brc1_conv\nI1206 09:11:00.380911 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.380916 23310 net.cpp:165] Memory required for data: 1065022460\nI1206 09:11:00.380925 23310 layer_factory.hpp:77] Creating layer L1_b6_brc2_bn\nI1206 09:11:00.380939 23310 net.cpp:100] Creating Layer L1_b6_brc2_bn\nI1206 09:11:00.380944 23310 net.cpp:434] L1_b6_brc2_bn <- L1_b6_brc1_conv_top\nI1206 09:11:00.380952 23310 net.cpp:408] L1_b6_brc2_bn -> L1_b6_brc2_bn_top\nI1206 09:11:00.381223 23310 net.cpp:150] Setting up L1_b6_brc2_bn\nI1206 09:11:00.381237 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.381242 23310 net.cpp:165] Memory required for data: 1076163580\nI1206 09:11:00.381253 23310 layer_factory.hpp:77] Creating layer L1_b6_brc2_relu\nI1206 09:11:00.381269 23310 net.cpp:100] Creating Layer L1_b6_brc2_relu\nI1206 09:11:00.381276 23310 net.cpp:434] L1_b6_brc2_relu <- L1_b6_brc2_bn_top\nI1206 09:11:00.381287 23310 net.cpp:395] L1_b6_brc2_relu -> L1_b6_brc2_bn_top (in-place)\nI1206 09:11:00.381297 23310 net.cpp:150] Setting up L1_b6_brc2_relu\nI1206 09:11:00.381304 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.381309 23310 net.cpp:165] Memory required for data: 1087304700\nI1206 09:11:00.381314 23310 layer_factory.hpp:77] Creating layer L1_b6_brc2_conv\nI1206 09:11:00.381325 23310 net.cpp:100] Creating Layer L1_b6_brc2_conv\nI1206 09:11:00.381330 23310 net.cpp:434] L1_b6_brc2_conv <- L1_b6_brc2_bn_top\nI1206 09:11:00.381342 23310 net.cpp:408] L1_b6_brc2_conv -> L1_b6_brc2_conv_top\nI1206 09:11:00.381661 23310 net.cpp:150] Setting up L1_b6_brc2_conv\nI1206 09:11:00.381675 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.381688 23310 net.cpp:165] Memory required for data: 1098445820\nI1206 09:11:00.381697 23310 layer_factory.hpp:77] Creating layer L1_b6_brc3_bn\nI1206 09:11:00.381706 23310 net.cpp:100] Creating Layer L1_b6_brc3_bn\nI1206 09:11:00.381718 23310 net.cpp:434] L1_b6_brc3_bn <- L1_b6_brc2_conv_top\nI1206 09:11:00.381736 23310 net.cpp:408] L1_b6_brc3_bn -> L1_b6_brc3_bn_top\nI1206 09:11:00.382007 23310 net.cpp:150] Setting up L1_b6_brc3_bn\nI1206 09:11:00.382025 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.382030 23310 net.cpp:165] Memory required for data: 1109586940\nI1206 09:11:00.382040 23310 layer_factory.hpp:77] Creating layer L1_b6_brc3_relu\nI1206 09:11:00.382048 23310 net.cpp:100] Creating Layer L1_b6_brc3_relu\nI1206 09:11:00.382055 23310 net.cpp:434] L1_b6_brc3_relu <- L1_b6_brc3_bn_top\nI1206 09:11:00.382061 23310 net.cpp:395] L1_b6_brc3_relu -> L1_b6_brc3_bn_top (in-place)\nI1206 09:11:00.382071 23310 net.cpp:150] Setting up L1_b6_brc3_relu\nI1206 09:11:00.382078 23310 net.cpp:157] Top shape: 85 32 32 32 (2785280)\nI1206 09:11:00.382082 23310 net.cpp:165] Memory required for data: 1120728060\nI1206 09:11:00.382087 23310 layer_factory.hpp:77] Creating layer L1_b6_brc3_conv\nI1206 09:11:00.382097 23310 net.cpp:100] Creating Layer L1_b6_brc3_conv\nI1206 09:11:00.382102 23310 net.cpp:434] L1_b6_brc3_conv <- L1_b6_brc3_bn_top\nI1206 09:11:00.382114 23310 net.cpp:408] L1_b6_brc3_conv -> L1_b6_brc3_conv_top\nI1206 09:11:00.382468 23310 net.cpp:150] Setting up L1_b6_brc3_conv\nI1206 09:11:00.382483 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.382488 23310 net.cpp:165] Memory required for data: 1143010300\nI1206 09:11:00.382496 23310 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1206 09:11:00.382505 23310 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1206 09:11:00.382511 23310 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_brc3_conv_top\nI1206 09:11:00.382519 23310 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split_1\nI1206 09:11:00.382526 23310 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1206 09:11:00.382565 23310 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1206 09:11:00.382578 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.382583 23310 net.cpp:165] Memory required for data: 1165292540\nI1206 09:11:00.382591 23310 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:11:00.382601 23310 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:11:00.382607 23310 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split <- L1_b6_sum_eltwise_top\nI1206 09:11:00.382616 23310 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:11:00.382624 23310 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split -> L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:11:00.382678 23310 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split\nI1206 09:11:00.382689 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.382696 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.382700 23310 net.cpp:165] Memory required for data: 1209857020\nI1206 09:11:00.382706 23310 layer_factory.hpp:77] Creating layer L2_b1_brc1_bn\nI1206 09:11:00.382725 23310 net.cpp:100] Creating Layer L2_b1_brc1_bn\nI1206 09:11:00.382731 23310 net.cpp:434] L2_b1_brc1_bn <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_0\nI1206 09:11:00.382740 23310 net.cpp:408] L2_b1_brc1_bn -> L2_b1_brc1_bn_top\nI1206 09:11:00.383002 23310 net.cpp:150] Setting up L2_b1_brc1_bn\nI1206 09:11:00.383018 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.383023 23310 net.cpp:165] Memory required for data: 1232139260\nI1206 09:11:00.383034 23310 layer_factory.hpp:77] Creating layer L2_b1_brc1_relu\nI1206 09:11:00.383042 23310 net.cpp:100] Creating Layer L2_b1_brc1_relu\nI1206 09:11:00.383049 23310 net.cpp:434] L2_b1_brc1_relu <- L2_b1_brc1_bn_top\nI1206 09:11:00.383055 23310 net.cpp:395] L2_b1_brc1_relu -> L2_b1_brc1_bn_top (in-place)\nI1206 09:11:00.383074 23310 net.cpp:150] Setting up L2_b1_brc1_relu\nI1206 09:11:00.383081 23310 net.cpp:157] Top shape: 85 64 32 32 (5570560)\nI1206 09:11:00.383086 23310 net.cpp:165] Memory required for data: 1254421500\nI1206 09:11:00.383091 23310 layer_factory.hpp:77] Creating layer L2_b1_brc1_conv\nI1206 09:11:00.383105 23310 net.cpp:100] Creating Layer L2_b1_brc1_conv\nI1206 09:11:00.383111 23310 net.cpp:434] L2_b1_brc1_conv <- L2_b1_brc1_bn_top\nI1206 09:11:00.383119 23310 net.cpp:408] L2_b1_brc1_conv -> L2_b1_brc1_conv_top\nI1206 09:11:00.383515 23310 net.cpp:150] Setting up L2_b1_brc1_conv\nI1206 09:11:00.383530 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.383535 23310 net.cpp:165] Memory required for data: 1259992060\nI1206 09:11:00.383544 23310 layer_factory.hpp:77] Creating layer L2_b1_brc2_bn\nI1206 09:11:00.383558 23310 net.cpp:100] Creating Layer L2_b1_brc2_bn\nI1206 09:11:00.383564 23310 net.cpp:434] L2_b1_brc2_bn <- L2_b1_brc1_conv_top\nI1206 09:11:00.383571 23310 net.cpp:408] L2_b1_brc2_bn -> L2_b1_brc2_bn_top\nI1206 09:11:00.383849 23310 net.cpp:150] Setting up L2_b1_brc2_bn\nI1206 09:11:00.383864 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.383870 23310 net.cpp:165] Memory required for data: 1265562620\nI1206 09:11:00.383880 23310 layer_factory.hpp:77] Creating layer L2_b1_brc2_relu\nI1206 09:11:00.383888 23310 net.cpp:100] Creating Layer L2_b1_brc2_relu\nI1206 09:11:00.383894 23310 net.cpp:434] L2_b1_brc2_relu <- L2_b1_brc2_bn_top\nI1206 09:11:00.383901 23310 net.cpp:395] L2_b1_brc2_relu -> L2_b1_brc2_bn_top (in-place)\nI1206 09:11:00.383911 23310 net.cpp:150] Setting up L2_b1_brc2_relu\nI1206 09:11:00.383919 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.383924 23310 net.cpp:165] Memory required for data: 1271133180\nI1206 09:11:00.383929 23310 layer_factory.hpp:77] Creating layer L2_b1_brc2_conv\nI1206 09:11:00.383945 23310 net.cpp:100] Creating Layer L2_b1_brc2_conv\nI1206 09:11:00.383952 23310 net.cpp:434] L2_b1_brc2_conv <- L2_b1_brc2_bn_top\nI1206 09:11:00.383965 23310 net.cpp:408] L2_b1_brc2_conv -> L2_b1_brc2_conv_top\nI1206 09:11:00.384302 23310 net.cpp:150] Setting up L2_b1_brc2_conv\nI1206 09:11:00.384317 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.384322 23310 net.cpp:165] Memory required for data: 1276703740\nI1206 09:11:00.384331 23310 layer_factory.hpp:77] Creating layer L2_b1_brc3_bn\nI1206 09:11:00.384341 23310 net.cpp:100] Creating Layer L2_b1_brc3_bn\nI1206 09:11:00.384352 23310 net.cpp:434] L2_b1_brc3_bn <- L2_b1_brc2_conv_top\nI1206 09:11:00.384361 23310 net.cpp:408] L2_b1_brc3_bn -> L2_b1_brc3_bn_top\nI1206 09:11:00.384623 23310 net.cpp:150] Setting up L2_b1_brc3_bn\nI1206 09:11:00.384635 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.384640 23310 net.cpp:165] Memory required for data: 1282274300\nI1206 09:11:00.384650 23310 layer_factory.hpp:77] Creating layer L2_b1_brc3_relu\nI1206 09:11:00.384660 23310 net.cpp:100] Creating Layer L2_b1_brc3_relu\nI1206 09:11:00.384665 23310 net.cpp:434] L2_b1_brc3_relu <- L2_b1_brc3_bn_top\nI1206 09:11:00.384671 23310 net.cpp:395] L2_b1_brc3_relu -> L2_b1_brc3_bn_top (in-place)\nI1206 09:11:00.384681 23310 net.cpp:150] Setting up L2_b1_brc3_relu\nI1206 09:11:00.384688 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.384693 23310 net.cpp:165] Memory required for data: 1287844860\nI1206 09:11:00.384698 23310 layer_factory.hpp:77] Creating layer L2_b1_brc3_conv\nI1206 09:11:00.384718 23310 net.cpp:100] Creating Layer L2_b1_brc3_conv\nI1206 09:11:00.384727 23310 net.cpp:434] L2_b1_brc3_conv <- L2_b1_brc3_bn_top\nI1206 09:11:00.384739 23310 net.cpp:408] L2_b1_brc3_conv -> L2_b1_brc3_conv_top\nI1206 09:11:00.385217 23310 net.cpp:150] Setting up L2_b1_brc3_conv\nI1206 09:11:00.385232 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.385237 23310 net.cpp:165] Memory required for data: 1298985980\nI1206 09:11:00.385246 23310 layer_factory.hpp:77] Creating layer L2_b1_chanInc_conv\nI1206 09:11:00.385262 23310 net.cpp:100] Creating Layer L2_b1_chanInc_conv\nI1206 09:11:00.385278 23310 net.cpp:434] L2_b1_chanInc_conv <- L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split_1\nI1206 09:11:00.385291 23310 net.cpp:408] L2_b1_chanInc_conv -> L2_b1_chanInc_conv_top\nI1206 09:11:00.385764 23310 net.cpp:150] Setting up L2_b1_chanInc_conv\nI1206 09:11:00.385779 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.385785 23310 net.cpp:165] Memory required for data: 1310127100\nI1206 09:11:00.385794 23310 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1206 09:11:00.385807 23310 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1206 09:11:00.385814 23310 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_brc3_conv_top\nI1206 09:11:00.385821 23310 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_chanInc_conv_top\nI1206 09:11:00.385829 23310 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1206 09:11:00.385861 23310 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1206 09:11:00.385871 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.385876 23310 net.cpp:165] Memory required for data: 1321268220\nI1206 09:11:00.385881 23310 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:11:00.385890 23310 net.cpp:100] Creating Layer L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:11:00.385895 23310 net.cpp:434] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split <- L2_b1_sum_eltwise_top\nI1206 09:11:00.385902 23310 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:11:00.385916 23310 net.cpp:408] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split -> L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:11:00.385965 23310 net.cpp:150] Setting up L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split\nI1206 09:11:00.385977 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.385984 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.385989 23310 net.cpp:165] Memory required for data: 1343550460\nI1206 09:11:00.385994 23310 layer_factory.hpp:77] Creating layer L2_b2_brc1_bn\nI1206 09:11:00.386006 23310 net.cpp:100] Creating Layer L2_b2_brc1_bn\nI1206 09:11:00.386013 23310 net.cpp:434] L2_b2_brc1_bn <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_0\nI1206 09:11:00.386024 23310 net.cpp:408] L2_b2_brc1_bn -> L2_b2_brc1_bn_top\nI1206 09:11:00.386262 23310 net.cpp:150] Setting up L2_b2_brc1_bn\nI1206 09:11:00.386282 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.386288 23310 net.cpp:165] Memory required for data: 1354691580\nI1206 09:11:00.386298 23310 layer_factory.hpp:77] Creating layer L2_b2_brc1_relu\nI1206 09:11:00.386307 23310 net.cpp:100] Creating Layer L2_b2_brc1_relu\nI1206 09:11:00.386312 23310 net.cpp:434] L2_b2_brc1_relu <- L2_b2_brc1_bn_top\nI1206 09:11:00.386319 23310 net.cpp:395] L2_b2_brc1_relu -> L2_b2_brc1_bn_top (in-place)\nI1206 09:11:00.386329 23310 net.cpp:150] Setting up L2_b2_brc1_relu\nI1206 09:11:00.386337 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.386342 23310 net.cpp:165] Memory required for data: 1365832700\nI1206 09:11:00.386345 23310 layer_factory.hpp:77] Creating layer L2_b2_brc1_conv\nI1206 09:11:00.386358 23310 net.cpp:100] Creating Layer L2_b2_brc1_conv\nI1206 09:11:00.386363 23310 net.cpp:434] L2_b2_brc1_conv <- L2_b2_brc1_bn_top\nI1206 09:11:00.386375 23310 net.cpp:408] L2_b2_brc1_conv -> L2_b2_brc1_conv_top\nI1206 09:11:00.386862 23310 net.cpp:150] Setting up L2_b2_brc1_conv\nI1206 09:11:00.386876 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.386883 23310 net.cpp:165] Memory required for data: 1371403260\nI1206 09:11:00.386891 23310 layer_factory.hpp:77] Creating layer L2_b2_brc2_bn\nI1206 09:11:00.386905 23310 net.cpp:100] Creating Layer L2_b2_brc2_bn\nI1206 09:11:00.386911 23310 net.cpp:434] L2_b2_brc2_bn <- L2_b2_brc1_conv_top\nI1206 09:11:00.386919 23310 net.cpp:408] L2_b2_brc2_bn -> L2_b2_brc2_bn_top\nI1206 09:11:00.387181 23310 net.cpp:150] Setting up L2_b2_brc2_bn\nI1206 09:11:00.387194 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.387207 23310 net.cpp:165] Memory required for data: 1376973820\nI1206 09:11:00.387218 23310 layer_factory.hpp:77] Creating layer L2_b2_brc2_relu\nI1206 09:11:00.387230 23310 net.cpp:100] Creating Layer L2_b2_brc2_relu\nI1206 09:11:00.387238 23310 net.cpp:434] L2_b2_brc2_relu <- L2_b2_brc2_bn_top\nI1206 09:11:00.387244 23310 net.cpp:395] L2_b2_brc2_relu -> L2_b2_brc2_bn_top (in-place)\nI1206 09:11:00.387254 23310 net.cpp:150] Setting up L2_b2_brc2_relu\nI1206 09:11:00.387261 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.387266 23310 net.cpp:165] Memory required for data: 1382544380\nI1206 09:11:00.387271 23310 layer_factory.hpp:77] Creating layer L2_b2_brc2_conv\nI1206 09:11:00.387282 23310 net.cpp:100] Creating Layer L2_b2_brc2_conv\nI1206 09:11:00.387289 23310 net.cpp:434] L2_b2_brc2_conv <- L2_b2_brc2_bn_top\nI1206 09:11:00.387301 23310 net.cpp:408] L2_b2_brc2_conv -> L2_b2_brc2_conv_top\nI1206 09:11:00.387635 23310 net.cpp:150] Setting up L2_b2_brc2_conv\nI1206 09:11:00.387650 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.387655 23310 net.cpp:165] Memory required for data: 1388114940\nI1206 09:11:00.387665 23310 layer_factory.hpp:77] Creating layer L2_b2_brc3_bn\nI1206 09:11:00.387672 23310 net.cpp:100] Creating Layer L2_b2_brc3_bn\nI1206 09:11:00.387679 23310 net.cpp:434] L2_b2_brc3_bn <- L2_b2_brc2_conv_top\nI1206 09:11:00.387691 23310 net.cpp:408] L2_b2_brc3_bn -> L2_b2_brc3_bn_top\nI1206 09:11:00.387958 23310 net.cpp:150] Setting up L2_b2_brc3_bn\nI1206 09:11:00.387971 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.387976 23310 net.cpp:165] Memory required for data: 1393685500\nI1206 09:11:00.387986 23310 layer_factory.hpp:77] Creating layer L2_b2_brc3_relu\nI1206 09:11:00.388000 23310 net.cpp:100] Creating Layer L2_b2_brc3_relu\nI1206 09:11:00.388006 23310 net.cpp:434] L2_b2_brc3_relu <- L2_b2_brc3_bn_top\nI1206 09:11:00.388015 23310 net.cpp:395] L2_b2_brc3_relu -> L2_b2_brc3_bn_top (in-place)\nI1206 09:11:00.388025 23310 net.cpp:150] Setting up L2_b2_brc3_relu\nI1206 09:11:00.388031 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.388036 23310 net.cpp:165] Memory required for data: 1399256060\nI1206 09:11:00.388042 23310 layer_factory.hpp:77] Creating layer L2_b2_brc3_conv\nI1206 09:11:00.388054 23310 net.cpp:100] Creating Layer L2_b2_brc3_conv\nI1206 09:11:00.388059 23310 net.cpp:434] L2_b2_brc3_conv <- L2_b2_brc3_bn_top\nI1206 09:11:00.388074 23310 net.cpp:408] L2_b2_brc3_conv -> L2_b2_brc3_conv_top\nI1206 09:11:00.388548 23310 net.cpp:150] Setting up L2_b2_brc3_conv\nI1206 09:11:00.388562 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.388567 23310 net.cpp:165] Memory required for data: 1410397180\nI1206 09:11:00.388576 23310 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1206 09:11:00.388586 23310 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1206 09:11:00.388592 23310 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_brc3_conv_top\nI1206 09:11:00.388598 23310 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split_1\nI1206 09:11:00.388607 23310 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1206 09:11:00.388633 23310 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1206 09:11:00.388645 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.388650 23310 net.cpp:165] Memory required for data: 1421538300\nI1206 09:11:00.388655 23310 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:11:00.388667 23310 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:11:00.388674 23310 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split <- L2_b2_sum_eltwise_top\nI1206 09:11:00.388685 23310 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:11:00.388695 23310 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split -> L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:11:00.388754 23310 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split\nI1206 09:11:00.388778 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.388787 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.388792 23310 net.cpp:165] Memory required for data: 1443820540\nI1206 09:11:00.388797 23310 layer_factory.hpp:77] Creating layer L2_b3_brc1_bn\nI1206 09:11:00.388804 23310 net.cpp:100] Creating Layer L2_b3_brc1_bn\nI1206 09:11:00.388810 23310 net.cpp:434] L2_b3_brc1_bn <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_0\nI1206 09:11:00.388818 23310 net.cpp:408] L2_b3_brc1_bn -> L2_b3_brc1_bn_top\nI1206 09:11:00.389070 23310 net.cpp:150] Setting up L2_b3_brc1_bn\nI1206 09:11:00.389083 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.389088 23310 net.cpp:165] Memory required for data: 1454961660\nI1206 09:11:00.389122 23310 layer_factory.hpp:77] Creating layer L2_b3_brc1_relu\nI1206 09:11:00.389134 23310 net.cpp:100] Creating Layer L2_b3_brc1_relu\nI1206 09:11:00.389142 23310 net.cpp:434] L2_b3_brc1_relu <- L2_b3_brc1_bn_top\nI1206 09:11:00.389148 23310 net.cpp:395] L2_b3_brc1_relu -> L2_b3_brc1_bn_top (in-place)\nI1206 09:11:00.389159 23310 net.cpp:150] Setting up L2_b3_brc1_relu\nI1206 09:11:00.389166 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.389170 23310 net.cpp:165] Memory required for data: 1466102780\nI1206 09:11:00.389175 23310 layer_factory.hpp:77] Creating layer L2_b3_brc1_conv\nI1206 09:11:00.389191 23310 net.cpp:100] Creating Layer L2_b3_brc1_conv\nI1206 09:11:00.389199 23310 net.cpp:434] L2_b3_brc1_conv <- L2_b3_brc1_bn_top\nI1206 09:11:00.389206 23310 net.cpp:408] L2_b3_brc1_conv -> L2_b3_brc1_conv_top\nI1206 09:11:00.389688 23310 net.cpp:150] Setting up L2_b3_brc1_conv\nI1206 09:11:00.389703 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.389708 23310 net.cpp:165] Memory required for data: 1471673340\nI1206 09:11:00.389724 23310 layer_factory.hpp:77] Creating layer L2_b3_brc2_bn\nI1206 09:11:00.389739 23310 net.cpp:100] Creating Layer L2_b3_brc2_bn\nI1206 09:11:00.389745 23310 net.cpp:434] L2_b3_brc2_bn <- L2_b3_brc1_conv_top\nI1206 09:11:00.389756 23310 net.cpp:408] L2_b3_brc2_bn -> L2_b3_brc2_bn_top\nI1206 09:11:00.390027 23310 net.cpp:150] Setting up L2_b3_brc2_bn\nI1206 09:11:00.390039 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.390045 23310 net.cpp:165] Memory required for data: 1477243900\nI1206 09:11:00.390055 23310 layer_factory.hpp:77] Creating layer L2_b3_brc2_relu\nI1206 09:11:00.390065 23310 net.cpp:100] Creating Layer L2_b3_brc2_relu\nI1206 09:11:00.390071 23310 net.cpp:434] L2_b3_brc2_relu <- L2_b3_brc2_bn_top\nI1206 09:11:00.390079 23310 net.cpp:395] L2_b3_brc2_relu -> L2_b3_brc2_bn_top (in-place)\nI1206 09:11:00.390089 23310 net.cpp:150] Setting up L2_b3_brc2_relu\nI1206 09:11:00.390095 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.390100 23310 net.cpp:165] Memory required for data: 1482814460\nI1206 09:11:00.390105 23310 layer_factory.hpp:77] Creating layer L2_b3_brc2_conv\nI1206 09:11:00.390120 23310 net.cpp:100] Creating Layer L2_b3_brc2_conv\nI1206 09:11:00.390126 23310 net.cpp:434] L2_b3_brc2_conv <- L2_b3_brc2_bn_top\nI1206 09:11:00.390139 23310 net.cpp:408] L2_b3_brc2_conv -> L2_b3_brc2_conv_top\nI1206 09:11:00.390476 23310 net.cpp:150] Setting up L2_b3_brc2_conv\nI1206 09:11:00.390491 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.390496 23310 net.cpp:165] Memory required for data: 1488385020\nI1206 09:11:00.390506 23310 layer_factory.hpp:77] Creating layer L2_b3_brc3_bn\nI1206 09:11:00.390519 23310 net.cpp:100] Creating Layer L2_b3_brc3_bn\nI1206 09:11:00.390525 23310 net.cpp:434] L2_b3_brc3_bn <- L2_b3_brc2_conv_top\nI1206 09:11:00.390533 23310 net.cpp:408] L2_b3_brc3_bn -> L2_b3_brc3_bn_top\nI1206 09:11:00.390803 23310 net.cpp:150] Setting up L2_b3_brc3_bn\nI1206 09:11:00.390821 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.390826 23310 net.cpp:165] Memory required for data: 1493955580\nI1206 09:11:00.390837 23310 layer_factory.hpp:77] Creating layer L2_b3_brc3_relu\nI1206 09:11:00.390846 23310 net.cpp:100] Creating Layer L2_b3_brc3_relu\nI1206 09:11:00.390859 23310 net.cpp:434] L2_b3_brc3_relu <- L2_b3_brc3_bn_top\nI1206 09:11:00.390867 23310 net.cpp:395] L2_b3_brc3_relu -> L2_b3_brc3_bn_top (in-place)\nI1206 09:11:00.390877 23310 net.cpp:150] Setting up L2_b3_brc3_relu\nI1206 09:11:00.390885 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.390890 23310 net.cpp:165] Memory required for data: 1499526140\nI1206 09:11:00.390894 23310 layer_factory.hpp:77] Creating layer L2_b3_brc3_conv\nI1206 09:11:00.390910 23310 net.cpp:100] Creating Layer L2_b3_brc3_conv\nI1206 09:11:00.390916 23310 net.cpp:434] L2_b3_brc3_conv <- L2_b3_brc3_bn_top\nI1206 09:11:00.390925 23310 net.cpp:408] L2_b3_brc3_conv -> L2_b3_brc3_conv_top\nI1206 09:11:00.391398 23310 net.cpp:150] Setting up L2_b3_brc3_conv\nI1206 09:11:00.391413 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.391418 23310 net.cpp:165] Memory required for data: 1510667260\nI1206 09:11:00.391427 23310 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1206 09:11:00.391436 23310 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1206 09:11:00.391443 23310 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_brc3_conv_top\nI1206 09:11:00.391449 23310 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split_1\nI1206 09:11:00.391463 23310 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1206 09:11:00.391490 23310 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1206 09:11:00.391499 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.391505 23310 net.cpp:165] Memory required for data: 1521808380\nI1206 09:11:00.391510 23310 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:11:00.391522 23310 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:11:00.391528 23310 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split <- L2_b3_sum_eltwise_top\nI1206 09:11:00.391535 23310 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:11:00.391546 23310 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split -> L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:11:00.391598 23310 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split\nI1206 09:11:00.391610 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.391618 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.391623 23310 net.cpp:165] Memory required for data: 1544090620\nI1206 09:11:00.391628 23310 layer_factory.hpp:77] Creating layer L2_b4_brc1_bn\nI1206 09:11:00.391639 23310 net.cpp:100] Creating Layer L2_b4_brc1_bn\nI1206 09:11:00.391645 23310 net.cpp:434] L2_b4_brc1_bn <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_0\nI1206 09:11:00.391654 23310 net.cpp:408] L2_b4_brc1_bn -> L2_b4_brc1_bn_top\nI1206 09:11:00.391917 23310 net.cpp:150] Setting up L2_b4_brc1_bn\nI1206 09:11:00.391932 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.391937 23310 net.cpp:165] Memory required for data: 1555231740\nI1206 09:11:00.391948 23310 layer_factory.hpp:77] Creating layer L2_b4_brc1_relu\nI1206 09:11:00.391963 23310 net.cpp:100] Creating Layer L2_b4_brc1_relu\nI1206 09:11:00.391968 23310 net.cpp:434] L2_b4_brc1_relu <- L2_b4_brc1_bn_top\nI1206 09:11:00.391976 23310 net.cpp:395] L2_b4_brc1_relu -> L2_b4_brc1_bn_top (in-place)\nI1206 09:11:00.391986 23310 net.cpp:150] Setting up L2_b4_brc1_relu\nI1206 09:11:00.391993 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.391997 23310 net.cpp:165] Memory required for data: 1566372860\nI1206 09:11:00.392002 23310 layer_factory.hpp:77] Creating layer L2_b4_brc1_conv\nI1206 09:11:00.392012 23310 net.cpp:100] Creating Layer L2_b4_brc1_conv\nI1206 09:11:00.392019 23310 net.cpp:434] L2_b4_brc1_conv <- L2_b4_brc1_bn_top\nI1206 09:11:00.392032 23310 net.cpp:408] L2_b4_brc1_conv -> L2_b4_brc1_conv_top\nI1206 09:11:00.392510 23310 net.cpp:150] Setting up L2_b4_brc1_conv\nI1206 09:11:00.392525 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.392541 23310 net.cpp:165] Memory required for data: 1571943420\nI1206 09:11:00.392551 23310 layer_factory.hpp:77] Creating layer L2_b4_brc2_bn\nI1206 09:11:00.392560 23310 net.cpp:100] Creating Layer L2_b4_brc2_bn\nI1206 09:11:00.392566 23310 net.cpp:434] L2_b4_brc2_bn <- L2_b4_brc1_conv_top\nI1206 09:11:00.392578 23310 net.cpp:408] L2_b4_brc2_bn -> L2_b4_brc2_bn_top\nI1206 09:11:00.392853 23310 net.cpp:150] Setting up L2_b4_brc2_bn\nI1206 09:11:00.392866 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.392871 23310 net.cpp:165] Memory required for data: 1577513980\nI1206 09:11:00.392882 23310 layer_factory.hpp:77] Creating layer L2_b4_brc2_relu\nI1206 09:11:00.392890 23310 net.cpp:100] Creating Layer L2_b4_brc2_relu\nI1206 09:11:00.392896 23310 net.cpp:434] L2_b4_brc2_relu <- L2_b4_brc2_bn_top\nI1206 09:11:00.392909 23310 net.cpp:395] L2_b4_brc2_relu -> L2_b4_brc2_bn_top (in-place)\nI1206 09:11:00.392920 23310 net.cpp:150] Setting up L2_b4_brc2_relu\nI1206 09:11:00.392926 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.392931 23310 net.cpp:165] Memory required for data: 1583084540\nI1206 09:11:00.392936 23310 layer_factory.hpp:77] Creating layer L2_b4_brc2_conv\nI1206 09:11:00.392946 23310 net.cpp:100] Creating Layer L2_b4_brc2_conv\nI1206 09:11:00.392952 23310 net.cpp:434] L2_b4_brc2_conv <- L2_b4_brc2_bn_top\nI1206 09:11:00.392961 23310 net.cpp:408] L2_b4_brc2_conv -> L2_b4_brc2_conv_top\nI1206 09:11:00.393302 23310 net.cpp:150] Setting up L2_b4_brc2_conv\nI1206 09:11:00.393316 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.393322 23310 net.cpp:165] Memory required for data: 1588655100\nI1206 09:11:00.393330 23310 layer_factory.hpp:77] Creating layer L2_b4_brc3_bn\nI1206 09:11:00.393344 23310 net.cpp:100] Creating Layer L2_b4_brc3_bn\nI1206 09:11:00.393352 23310 net.cpp:434] L2_b4_brc3_bn <- L2_b4_brc2_conv_top\nI1206 09:11:00.393359 23310 net.cpp:408] L2_b4_brc3_bn -> L2_b4_brc3_bn_top\nI1206 09:11:00.393627 23310 net.cpp:150] Setting up L2_b4_brc3_bn\nI1206 09:11:00.393640 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.393646 23310 net.cpp:165] Memory required for data: 1594225660\nI1206 09:11:00.393656 23310 layer_factory.hpp:77] Creating layer L2_b4_brc3_relu\nI1206 09:11:00.393663 23310 net.cpp:100] Creating Layer L2_b4_brc3_relu\nI1206 09:11:00.393669 23310 net.cpp:434] L2_b4_brc3_relu <- L2_b4_brc3_bn_top\nI1206 09:11:00.393683 23310 net.cpp:395] L2_b4_brc3_relu -> L2_b4_brc3_bn_top (in-place)\nI1206 09:11:00.393693 23310 net.cpp:150] Setting up L2_b4_brc3_relu\nI1206 09:11:00.393702 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.393707 23310 net.cpp:165] Memory required for data: 1599796220\nI1206 09:11:00.393710 23310 layer_factory.hpp:77] Creating layer L2_b4_brc3_conv\nI1206 09:11:00.393735 23310 net.cpp:100] Creating Layer L2_b4_brc3_conv\nI1206 09:11:00.393743 23310 net.cpp:434] L2_b4_brc3_conv <- L2_b4_brc3_bn_top\nI1206 09:11:00.393750 23310 net.cpp:408] L2_b4_brc3_conv -> L2_b4_brc3_conv_top\nI1206 09:11:00.394229 23310 net.cpp:150] Setting up L2_b4_brc3_conv\nI1206 09:11:00.394243 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.394249 23310 net.cpp:165] Memory required for data: 1610937340\nI1206 09:11:00.394258 23310 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1206 09:11:00.394266 23310 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1206 09:11:00.394273 23310 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_brc3_conv_top\nI1206 09:11:00.394280 23310 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split_1\nI1206 09:11:00.394291 23310 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1206 09:11:00.394320 23310 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1206 09:11:00.394328 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.394333 23310 net.cpp:165] Memory required for data: 1622078460\nI1206 09:11:00.394338 23310 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:11:00.394352 23310 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:11:00.394366 23310 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split <- L2_b4_sum_eltwise_top\nI1206 09:11:00.394374 23310 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:11:00.394383 23310 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split -> L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:11:00.394435 23310 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split\nI1206 09:11:00.394448 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.394454 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.394459 23310 net.cpp:165] Memory required for data: 1644360700\nI1206 09:11:00.394464 23310 layer_factory.hpp:77] Creating layer L2_b5_brc1_bn\nI1206 09:11:00.394479 23310 net.cpp:100] Creating Layer L2_b5_brc1_bn\nI1206 09:11:00.394484 23310 net.cpp:434] L2_b5_brc1_bn <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_0\nI1206 09:11:00.394492 23310 net.cpp:408] L2_b5_brc1_bn -> L2_b5_brc1_bn_top\nI1206 09:11:00.394747 23310 net.cpp:150] Setting up L2_b5_brc1_bn\nI1206 09:11:00.394762 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.394767 23310 net.cpp:165] Memory required for data: 1655501820\nI1206 09:11:00.394778 23310 layer_factory.hpp:77] Creating layer L2_b5_brc1_relu\nI1206 09:11:00.394788 23310 net.cpp:100] Creating Layer L2_b5_brc1_relu\nI1206 09:11:00.394793 23310 net.cpp:434] L2_b5_brc1_relu <- L2_b5_brc1_bn_top\nI1206 09:11:00.394800 23310 net.cpp:395] L2_b5_brc1_relu -> L2_b5_brc1_bn_top (in-place)\nI1206 09:11:00.394810 23310 net.cpp:150] Setting up L2_b5_brc1_relu\nI1206 09:11:00.394817 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.394822 23310 net.cpp:165] Memory required for data: 1666642940\nI1206 09:11:00.394827 23310 layer_factory.hpp:77] Creating layer L2_b5_brc1_conv\nI1206 09:11:00.394842 23310 net.cpp:100] Creating Layer L2_b5_brc1_conv\nI1206 09:11:00.394850 23310 net.cpp:434] L2_b5_brc1_conv <- L2_b5_brc1_bn_top\nI1206 09:11:00.394860 23310 net.cpp:408] L2_b5_brc1_conv -> L2_b5_brc1_conv_top\nI1206 09:11:00.395349 23310 net.cpp:150] Setting up L2_b5_brc1_conv\nI1206 09:11:00.395364 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.395370 23310 net.cpp:165] Memory required for data: 1672213500\nI1206 09:11:00.395378 23310 layer_factory.hpp:77] Creating layer L2_b5_brc2_bn\nI1206 09:11:00.395392 23310 net.cpp:100] Creating Layer L2_b5_brc2_bn\nI1206 09:11:00.395400 23310 net.cpp:434] L2_b5_brc2_bn <- L2_b5_brc1_conv_top\nI1206 09:11:00.395411 23310 net.cpp:408] L2_b5_brc2_bn -> L2_b5_brc2_bn_top\nI1206 09:11:00.395679 23310 net.cpp:150] Setting up L2_b5_brc2_bn\nI1206 09:11:00.395692 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.395697 23310 net.cpp:165] Memory required for data: 1677784060\nI1206 09:11:00.395709 23310 layer_factory.hpp:77] Creating layer L2_b5_brc2_relu\nI1206 09:11:00.395725 23310 net.cpp:100] Creating Layer L2_b5_brc2_relu\nI1206 09:11:00.395731 23310 net.cpp:434] L2_b5_brc2_relu <- L2_b5_brc2_bn_top\nI1206 09:11:00.395740 23310 net.cpp:395] L2_b5_brc2_relu -> L2_b5_brc2_bn_top (in-place)\nI1206 09:11:00.395750 23310 net.cpp:150] Setting up L2_b5_brc2_relu\nI1206 09:11:00.395756 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.395761 23310 net.cpp:165] Memory required for data: 1683354620\nI1206 09:11:00.395766 23310 layer_factory.hpp:77] Creating layer L2_b5_brc2_conv\nI1206 09:11:00.395782 23310 net.cpp:100] Creating Layer L2_b5_brc2_conv\nI1206 09:11:00.395789 23310 net.cpp:434] L2_b5_brc2_conv <- L2_b5_brc2_bn_top\nI1206 09:11:00.395802 23310 net.cpp:408] L2_b5_brc2_conv -> L2_b5_brc2_conv_top\nI1206 09:11:00.396140 23310 net.cpp:150] Setting up L2_b5_brc2_conv\nI1206 09:11:00.396155 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.396160 23310 net.cpp:165] Memory required for data: 1688925180\nI1206 09:11:00.396169 23310 layer_factory.hpp:77] Creating layer L2_b5_brc3_bn\nI1206 09:11:00.396190 23310 net.cpp:100] Creating Layer L2_b5_brc3_bn\nI1206 09:11:00.396198 23310 net.cpp:434] L2_b5_brc3_bn <- L2_b5_brc2_conv_top\nI1206 09:11:00.396206 23310 net.cpp:408] L2_b5_brc3_bn -> L2_b5_brc3_bn_top\nI1206 09:11:00.396479 23310 net.cpp:150] Setting up L2_b5_brc3_bn\nI1206 09:11:00.396492 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.396497 23310 net.cpp:165] Memory required for data: 1694495740\nI1206 09:11:00.396508 23310 layer_factory.hpp:77] Creating layer L2_b5_brc3_relu\nI1206 09:11:00.396518 23310 net.cpp:100] Creating Layer L2_b5_brc3_relu\nI1206 09:11:00.396524 23310 net.cpp:434] L2_b5_brc3_relu <- L2_b5_brc3_bn_top\nI1206 09:11:00.396531 23310 net.cpp:395] L2_b5_brc3_relu -> L2_b5_brc3_bn_top (in-place)\nI1206 09:11:00.396540 23310 net.cpp:150] Setting up L2_b5_brc3_relu\nI1206 09:11:00.396548 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.396553 23310 net.cpp:165] Memory required for data: 1700066300\nI1206 09:11:00.396558 23310 layer_factory.hpp:77] Creating layer L2_b5_brc3_conv\nI1206 09:11:00.396571 23310 net.cpp:100] Creating Layer L2_b5_brc3_conv\nI1206 09:11:00.396579 23310 net.cpp:434] L2_b5_brc3_conv <- L2_b5_brc3_bn_top\nI1206 09:11:00.396586 23310 net.cpp:408] L2_b5_brc3_conv -> L2_b5_brc3_conv_top\nI1206 09:11:00.397076 23310 net.cpp:150] Setting up L2_b5_brc3_conv\nI1206 09:11:00.397091 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.397096 23310 net.cpp:165] Memory required for data: 1711207420\nI1206 09:11:00.397105 23310 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1206 09:11:00.397116 23310 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1206 09:11:00.397122 23310 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_brc3_conv_top\nI1206 09:11:00.397130 23310 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split_1\nI1206 09:11:00.397142 23310 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1206 09:11:00.397171 23310 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1206 09:11:00.397179 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.397184 23310 net.cpp:165] Memory required for data: 1722348540\nI1206 09:11:00.397191 23310 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:11:00.397203 23310 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:11:00.397209 23310 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split <- L2_b5_sum_eltwise_top\nI1206 09:11:00.397217 23310 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:11:00.397228 23310 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split -> L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:11:00.397281 23310 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split\nI1206 09:11:00.397294 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.397300 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.397305 23310 net.cpp:165] Memory required for data: 1744630780\nI1206 09:11:00.397310 23310 layer_factory.hpp:77] Creating layer L2_b6_brc1_bn\nI1206 09:11:00.397322 23310 net.cpp:100] Creating Layer L2_b6_brc1_bn\nI1206 09:11:00.397330 23310 net.cpp:434] L2_b6_brc1_bn <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_0\nI1206 09:11:00.397337 23310 net.cpp:408] L2_b6_brc1_bn -> L2_b6_brc1_bn_top\nI1206 09:11:00.397588 23310 net.cpp:150] Setting up L2_b6_brc1_bn\nI1206 09:11:00.397601 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.397608 23310 net.cpp:165] Memory required for data: 1755771900\nI1206 09:11:00.397617 23310 layer_factory.hpp:77] Creating layer L2_b6_brc1_relu\nI1206 09:11:00.397642 23310 net.cpp:100] Creating Layer L2_b6_brc1_relu\nI1206 09:11:00.397651 23310 net.cpp:434] L2_b6_brc1_relu <- L2_b6_brc1_bn_top\nI1206 09:11:00.397660 23310 net.cpp:395] L2_b6_brc1_relu -> L2_b6_brc1_bn_top (in-place)\nI1206 09:11:00.397670 23310 net.cpp:150] Setting up L2_b6_brc1_relu\nI1206 09:11:00.397676 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.397689 23310 net.cpp:165] Memory required for data: 1766913020\nI1206 09:11:00.397696 23310 layer_factory.hpp:77] Creating layer L2_b6_brc1_conv\nI1206 09:11:00.397709 23310 net.cpp:100] Creating Layer L2_b6_brc1_conv\nI1206 09:11:00.397722 23310 net.cpp:434] L2_b6_brc1_conv <- L2_b6_brc1_bn_top\nI1206 09:11:00.397732 23310 net.cpp:408] L2_b6_brc1_conv -> L2_b6_brc1_conv_top\nI1206 09:11:00.398222 23310 net.cpp:150] Setting up L2_b6_brc1_conv\nI1206 09:11:00.398237 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.398242 23310 net.cpp:165] Memory required for data: 1772483580\nI1206 09:11:00.398252 23310 layer_factory.hpp:77] Creating layer L2_b6_brc2_bn\nI1206 09:11:00.398265 23310 net.cpp:100] Creating Layer L2_b6_brc2_bn\nI1206 09:11:00.398272 23310 net.cpp:434] L2_b6_brc2_bn <- L2_b6_brc1_conv_top\nI1206 09:11:00.398280 23310 net.cpp:408] L2_b6_brc2_bn -> L2_b6_brc2_bn_top\nI1206 09:11:00.398550 23310 net.cpp:150] Setting up L2_b6_brc2_bn\nI1206 09:11:00.398563 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.398568 23310 net.cpp:165] Memory required for data: 1778054140\nI1206 09:11:00.398578 23310 layer_factory.hpp:77] Creating layer L2_b6_brc2_relu\nI1206 09:11:00.398587 23310 net.cpp:100] Creating Layer L2_b6_brc2_relu\nI1206 09:11:00.398593 23310 net.cpp:434] L2_b6_brc2_relu <- L2_b6_brc2_bn_top\nI1206 09:11:00.398600 23310 net.cpp:395] L2_b6_brc2_relu -> L2_b6_brc2_bn_top (in-place)\nI1206 09:11:00.398610 23310 net.cpp:150] Setting up L2_b6_brc2_relu\nI1206 09:11:00.398617 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.398622 23310 net.cpp:165] Memory required for data: 1783624700\nI1206 09:11:00.398627 23310 layer_factory.hpp:77] Creating layer L2_b6_brc2_conv\nI1206 09:11:00.398641 23310 net.cpp:100] Creating Layer L2_b6_brc2_conv\nI1206 09:11:00.398648 23310 net.cpp:434] L2_b6_brc2_conv <- L2_b6_brc2_bn_top\nI1206 09:11:00.398660 23310 net.cpp:408] L2_b6_brc2_conv -> L2_b6_brc2_conv_top\nI1206 09:11:00.399014 23310 net.cpp:150] Setting up L2_b6_brc2_conv\nI1206 09:11:00.399030 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.399035 23310 net.cpp:165] Memory required for data: 1789195260\nI1206 09:11:00.399044 23310 layer_factory.hpp:77] Creating layer L2_b6_brc3_bn\nI1206 09:11:00.399058 23310 net.cpp:100] Creating Layer L2_b6_brc3_bn\nI1206 09:11:00.399065 23310 net.cpp:434] L2_b6_brc3_bn <- L2_b6_brc2_conv_top\nI1206 09:11:00.399073 23310 net.cpp:408] L2_b6_brc3_bn -> L2_b6_brc3_bn_top\nI1206 09:11:00.399333 23310 net.cpp:150] Setting up L2_b6_brc3_bn\nI1206 09:11:00.399346 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.399351 23310 net.cpp:165] Memory required for data: 1794765820\nI1206 09:11:00.399363 23310 layer_factory.hpp:77] Creating layer L2_b6_brc3_relu\nI1206 09:11:00.399370 23310 net.cpp:100] Creating Layer L2_b6_brc3_relu\nI1206 09:11:00.399376 23310 net.cpp:434] L2_b6_brc3_relu <- L2_b6_brc3_bn_top\nI1206 09:11:00.399384 23310 net.cpp:395] L2_b6_brc3_relu -> L2_b6_brc3_bn_top (in-place)\nI1206 09:11:00.399394 23310 net.cpp:150] Setting up L2_b6_brc3_relu\nI1206 09:11:00.399400 23310 net.cpp:157] Top shape: 85 64 16 16 (1392640)\nI1206 09:11:00.399405 23310 net.cpp:165] Memory required for data: 1800336380\nI1206 09:11:00.399410 23310 layer_factory.hpp:77] Creating layer L2_b6_brc3_conv\nI1206 09:11:00.399423 23310 net.cpp:100] Creating Layer L2_b6_brc3_conv\nI1206 09:11:00.399430 23310 net.cpp:434] L2_b6_brc3_conv <- L2_b6_brc3_bn_top\nI1206 09:11:00.399441 23310 net.cpp:408] L2_b6_brc3_conv -> L2_b6_brc3_conv_top\nI1206 09:11:00.399927 23310 net.cpp:150] Setting up L2_b6_brc3_conv\nI1206 09:11:00.399943 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.399948 23310 net.cpp:165] Memory required for data: 1811477500\nI1206 09:11:00.399957 23310 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1206 09:11:00.399971 23310 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1206 09:11:00.399977 23310 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_brc3_conv_top\nI1206 09:11:00.399983 23310 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split_1\nI1206 09:11:00.399999 23310 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1206 09:11:00.400030 23310 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1206 09:11:00.400040 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.400045 23310 net.cpp:165] Memory required for data: 1822618620\nI1206 09:11:00.400050 23310 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:11:00.400058 23310 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:11:00.400063 23310 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split <- L2_b6_sum_eltwise_top\nI1206 09:11:00.400075 23310 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:11:00.400086 23310 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split -> L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:11:00.400135 23310 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split\nI1206 09:11:00.400151 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.400158 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.400163 23310 net.cpp:165] Memory required for data: 1844900860\nI1206 09:11:00.400169 23310 layer_factory.hpp:77] Creating layer L3_b1_brc1_bn\nI1206 09:11:00.400177 23310 net.cpp:100] Creating Layer L3_b1_brc1_bn\nI1206 09:11:00.400183 23310 net.cpp:434] L3_b1_brc1_bn <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_0\nI1206 09:11:00.400194 23310 net.cpp:408] L3_b1_brc1_bn -> L3_b1_brc1_bn_top\nI1206 09:11:00.400440 23310 net.cpp:150] Setting up L3_b1_brc1_bn\nI1206 09:11:00.400456 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.400462 23310 net.cpp:165] Memory required for data: 1856041980\nI1206 09:11:00.400472 23310 layer_factory.hpp:77] Creating layer L3_b1_brc1_relu\nI1206 09:11:00.400481 23310 net.cpp:100] Creating Layer L3_b1_brc1_relu\nI1206 09:11:00.400487 23310 net.cpp:434] L3_b1_brc1_relu <- L3_b1_brc1_bn_top\nI1206 09:11:00.400494 23310 net.cpp:395] L3_b1_brc1_relu -> L3_b1_brc1_bn_top (in-place)\nI1206 09:11:00.400503 23310 net.cpp:150] Setting up L3_b1_brc1_relu\nI1206 09:11:00.400511 23310 net.cpp:157] Top shape: 85 128 16 16 (2785280)\nI1206 09:11:00.400516 23310 net.cpp:165] Memory required for data: 1867183100\nI1206 09:11:00.400521 23310 layer_factory.hpp:77] Creating layer L3_b1_brc1_conv\nI1206 09:11:00.400534 23310 net.cpp:100] Creating Layer L3_b1_brc1_conv\nI1206 09:11:00.400540 23310 net.cpp:434] L3_b1_brc1_conv <- L3_b1_brc1_bn_top\nI1206 09:11:00.400549 23310 net.cpp:408] L3_b1_brc1_conv -> L3_b1_brc1_conv_top\nI1206 09:11:00.401198 23310 net.cpp:150] Setting up L3_b1_brc1_conv\nI1206 09:11:00.401214 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.401219 23310 net.cpp:165] Memory required for data: 1869968380\nI1206 09:11:00.401228 23310 layer_factory.hpp:77] Creating layer L3_b1_brc2_bn\nI1206 09:11:00.401237 23310 net.cpp:100] Creating Layer L3_b1_brc2_bn\nI1206 09:11:00.401243 23310 net.cpp:434] L3_b1_brc2_bn <- L3_b1_brc1_conv_top\nI1206 09:11:00.401255 23310 net.cpp:408] L3_b1_brc2_bn -> L3_b1_brc2_bn_top\nI1206 09:11:00.401525 23310 net.cpp:150] Setting up L3_b1_brc2_bn\nI1206 09:11:00.401537 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.401542 23310 net.cpp:165] Memory required for data: 1872753660\nI1206 09:11:00.401553 23310 layer_factory.hpp:77] Creating layer L3_b1_brc2_relu\nI1206 09:11:00.401566 23310 net.cpp:100] Creating Layer L3_b1_brc2_relu\nI1206 09:11:00.401572 23310 net.cpp:434] L3_b1_brc2_relu <- L3_b1_brc2_bn_top\nI1206 09:11:00.401581 23310 net.cpp:395] L3_b1_brc2_relu -> L3_b1_brc2_bn_top (in-place)\nI1206 09:11:00.401589 23310 net.cpp:150] Setting up L3_b1_brc2_relu\nI1206 09:11:00.401597 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.401602 23310 net.cpp:165] Memory required for data: 1875538940\nI1206 09:11:00.401607 23310 layer_factory.hpp:77] Creating layer L3_b1_brc2_conv\nI1206 09:11:00.401625 23310 net.cpp:100] Creating Layer L3_b1_brc2_conv\nI1206 09:11:00.401631 23310 net.cpp:434] L3_b1_brc2_conv <- L3_b1_brc2_bn_top\nI1206 09:11:00.401643 23310 net.cpp:408] L3_b1_brc2_conv -> L3_b1_brc2_conv_top\nI1206 09:11:00.402057 23310 net.cpp:150] Setting up L3_b1_brc2_conv\nI1206 09:11:00.402072 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.402078 23310 net.cpp:165] Memory required for data: 1878324220\nI1206 09:11:00.402087 23310 layer_factory.hpp:77] Creating layer L3_b1_brc3_bn\nI1206 09:11:00.402096 23310 net.cpp:100] Creating Layer L3_b1_brc3_bn\nI1206 09:11:00.402102 23310 net.cpp:434] L3_b1_brc3_bn <- L3_b1_brc2_conv_top\nI1206 09:11:00.402109 23310 net.cpp:408] L3_b1_brc3_bn -> L3_b1_brc3_bn_top\nI1206 09:11:00.402382 23310 net.cpp:150] Setting up L3_b1_brc3_bn\nI1206 09:11:00.402395 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.402401 23310 net.cpp:165] Memory required for data: 1881109500\nI1206 09:11:00.402411 23310 layer_factory.hpp:77] Creating layer L3_b1_brc3_relu\nI1206 09:11:00.402418 23310 net.cpp:100] Creating Layer L3_b1_brc3_relu\nI1206 09:11:00.402426 23310 net.cpp:434] L3_b1_brc3_relu <- L3_b1_brc3_bn_top\nI1206 09:11:00.402436 23310 net.cpp:395] L3_b1_brc3_relu -> L3_b1_brc3_bn_top (in-place)\nI1206 09:11:00.402446 23310 net.cpp:150] Setting up L3_b1_brc3_relu\nI1206 09:11:00.402453 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.402458 23310 net.cpp:165] Memory required for data: 1883894780\nI1206 09:11:00.402463 23310 layer_factory.hpp:77] Creating layer L3_b1_brc3_conv\nI1206 09:11:00.402478 23310 net.cpp:100] Creating Layer L3_b1_brc3_conv\nI1206 09:11:00.402484 23310 net.cpp:434] L3_b1_brc3_conv <- L3_b1_brc3_bn_top\nI1206 09:11:00.402493 23310 net.cpp:408] L3_b1_brc3_conv -> L3_b1_brc3_conv_top\nI1206 09:11:00.403475 23310 net.cpp:150] Setting up L3_b1_brc3_conv\nI1206 09:11:00.403489 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.403496 23310 net.cpp:165] Memory required for data: 1889465340\nI1206 09:11:00.403504 23310 layer_factory.hpp:77] Creating layer L3_b1_chanInc_conv\nI1206 09:11:00.403519 23310 net.cpp:100] Creating Layer L3_b1_chanInc_conv\nI1206 09:11:00.403527 23310 net.cpp:434] L3_b1_chanInc_conv <- L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split_1\nI1206 09:11:00.403538 23310 net.cpp:408] L3_b1_chanInc_conv -> L3_b1_chanInc_conv_top\nI1206 09:11:00.404515 23310 net.cpp:150] Setting up L3_b1_chanInc_conv\nI1206 09:11:00.404531 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.404536 23310 net.cpp:165] Memory required for data: 1895035900\nI1206 09:11:00.404544 23310 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1206 09:11:00.404553 23310 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1206 09:11:00.404559 23310 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_brc3_conv_top\nI1206 09:11:00.404567 23310 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_chanInc_conv_top\nI1206 09:11:00.404574 23310 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1206 09:11:00.404613 23310 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1206 09:11:00.404625 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.404630 23310 net.cpp:165] Memory required for data: 1900606460\nI1206 09:11:00.404636 23310 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:11:00.404644 23310 net.cpp:100] Creating Layer L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:11:00.404649 23310 net.cpp:434] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split <- L3_b1_sum_eltwise_top\nI1206 09:11:00.404661 23310 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:11:00.404671 23310 net.cpp:408] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split -> L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:11:00.404728 23310 net.cpp:150] Setting up L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split\nI1206 09:11:00.404744 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.404752 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.404763 23310 net.cpp:165] Memory required for data: 1911747580\nI1206 09:11:00.404770 23310 layer_factory.hpp:77] Creating layer L3_b2_brc1_bn\nI1206 09:11:00.404778 23310 net.cpp:100] Creating Layer L3_b2_brc1_bn\nI1206 09:11:00.404784 23310 net.cpp:434] L3_b2_brc1_bn <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_0\nI1206 09:11:00.404796 23310 net.cpp:408] L3_b2_brc1_bn -> L3_b2_brc1_bn_top\nI1206 09:11:00.405057 23310 net.cpp:150] Setting up L3_b2_brc1_bn\nI1206 09:11:00.405071 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.405076 23310 net.cpp:165] Memory required for data: 1917318140\nI1206 09:11:00.405086 23310 layer_factory.hpp:77] Creating layer L3_b2_brc1_relu\nI1206 09:11:00.405100 23310 net.cpp:100] Creating Layer L3_b2_brc1_relu\nI1206 09:11:00.405107 23310 net.cpp:434] L3_b2_brc1_relu <- L3_b2_brc1_bn_top\nI1206 09:11:00.405113 23310 net.cpp:395] L3_b2_brc1_relu -> L3_b2_brc1_bn_top (in-place)\nI1206 09:11:00.405123 23310 net.cpp:150] Setting up L3_b2_brc1_relu\nI1206 09:11:00.405130 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.405135 23310 net.cpp:165] Memory required for data: 1922888700\nI1206 09:11:00.405140 23310 layer_factory.hpp:77] Creating layer L3_b2_brc1_conv\nI1206 09:11:00.405150 23310 net.cpp:100] Creating Layer L3_b2_brc1_conv\nI1206 09:11:00.405156 23310 net.cpp:434] L3_b2_brc1_conv <- L3_b2_brc1_bn_top\nI1206 09:11:00.405164 23310 net.cpp:408] L3_b2_brc1_conv -> L3_b2_brc1_conv_top\nI1206 09:11:00.406148 23310 net.cpp:150] Setting up L3_b2_brc1_conv\nI1206 09:11:00.406168 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.406173 23310 net.cpp:165] Memory required for data: 1925673980\nI1206 09:11:00.406183 23310 layer_factory.hpp:77] Creating layer L3_b2_brc2_bn\nI1206 09:11:00.406194 23310 net.cpp:100] Creating Layer L3_b2_brc2_bn\nI1206 09:11:00.406201 23310 net.cpp:434] L3_b2_brc2_bn <- L3_b2_brc1_conv_top\nI1206 09:11:00.406209 23310 net.cpp:408] L3_b2_brc2_bn -> L3_b2_brc2_bn_top\nI1206 09:11:00.406472 23310 net.cpp:150] Setting up L3_b2_brc2_bn\nI1206 09:11:00.406486 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.406491 23310 net.cpp:165] Memory required for data: 1928459260\nI1206 09:11:00.406500 23310 layer_factory.hpp:77] Creating layer L3_b2_brc2_relu\nI1206 09:11:00.406508 23310 net.cpp:100] Creating Layer L3_b2_brc2_relu\nI1206 09:11:00.406514 23310 net.cpp:434] L3_b2_brc2_relu <- L3_b2_brc2_bn_top\nI1206 09:11:00.406522 23310 net.cpp:395] L3_b2_brc2_relu -> L3_b2_brc2_bn_top (in-place)\nI1206 09:11:00.406532 23310 net.cpp:150] Setting up L3_b2_brc2_relu\nI1206 09:11:00.406538 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.406543 23310 net.cpp:165] Memory required for data: 1931244540\nI1206 09:11:00.406548 23310 layer_factory.hpp:77] Creating layer L3_b2_brc2_conv\nI1206 09:11:00.406563 23310 net.cpp:100] Creating Layer L3_b2_brc2_conv\nI1206 09:11:00.406569 23310 net.cpp:434] L3_b2_brc2_conv <- L3_b2_brc2_bn_top\nI1206 09:11:00.406580 23310 net.cpp:408] L3_b2_brc2_conv -> L3_b2_brc2_conv_top\nI1206 09:11:00.408298 23310 net.cpp:150] Setting up L3_b2_brc2_conv\nI1206 09:11:00.408319 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.408325 23310 net.cpp:165] Memory required for data: 1934029820\nI1206 09:11:00.408335 23310 layer_factory.hpp:77] Creating layer L3_b2_brc3_bn\nI1206 09:11:00.408344 23310 net.cpp:100] Creating Layer L3_b2_brc3_bn\nI1206 09:11:00.408351 23310 net.cpp:434] L3_b2_brc3_bn <- L3_b2_brc2_conv_top\nI1206 09:11:00.408360 23310 net.cpp:408] L3_b2_brc3_bn -> L3_b2_brc3_bn_top\nI1206 09:11:00.408625 23310 net.cpp:150] Setting up L3_b2_brc3_bn\nI1206 09:11:00.408638 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.408643 23310 net.cpp:165] Memory required for data: 1936815100\nI1206 09:11:00.408654 23310 layer_factory.hpp:77] Creating layer L3_b2_brc3_relu\nI1206 09:11:00.408663 23310 net.cpp:100] Creating Layer L3_b2_brc3_relu\nI1206 09:11:00.408668 23310 net.cpp:434] L3_b2_brc3_relu <- L3_b2_brc3_bn_top\nI1206 09:11:00.408679 23310 net.cpp:395] L3_b2_brc3_relu -> L3_b2_brc3_bn_top (in-place)\nI1206 09:11:00.408699 23310 net.cpp:150] Setting up L3_b2_brc3_relu\nI1206 09:11:00.408707 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.408712 23310 net.cpp:165] Memory required for data: 1939600380\nI1206 09:11:00.408723 23310 layer_factory.hpp:77] Creating layer L3_b2_brc3_conv\nI1206 09:11:00.408740 23310 net.cpp:100] Creating Layer L3_b2_brc3_conv\nI1206 09:11:00.408746 23310 net.cpp:434] L3_b2_brc3_conv <- L3_b2_brc3_bn_top\nI1206 09:11:00.408754 23310 net.cpp:408] L3_b2_brc3_conv -> L3_b2_brc3_conv_top\nI1206 09:11:00.409719 23310 net.cpp:150] Setting up L3_b2_brc3_conv\nI1206 09:11:00.409735 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.409740 23310 net.cpp:165] Memory required for data: 1945170940\nI1206 09:11:00.409749 23310 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1206 09:11:00.409763 23310 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1206 09:11:00.409770 23310 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_brc3_conv_top\nI1206 09:11:00.409777 23310 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split_1\nI1206 09:11:00.409788 23310 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1206 09:11:00.409824 23310 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1206 09:11:00.409837 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.409842 23310 net.cpp:165] Memory required for data: 1950741500\nI1206 09:11:00.409847 23310 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:11:00.409859 23310 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:11:00.409865 23310 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split <- L3_b2_sum_eltwise_top\nI1206 09:11:00.409873 23310 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:11:00.409883 23310 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split -> L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:11:00.409939 23310 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split\nI1206 09:11:00.409950 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.409957 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.409961 23310 net.cpp:165] Memory required for data: 1961882620\nI1206 09:11:00.409967 23310 layer_factory.hpp:77] Creating layer L3_b3_brc1_bn\nI1206 09:11:00.409978 23310 net.cpp:100] Creating Layer L3_b3_brc1_bn\nI1206 09:11:00.409984 23310 net.cpp:434] L3_b3_brc1_bn <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_0\nI1206 09:11:00.409992 23310 net.cpp:408] L3_b3_brc1_bn -> L3_b3_brc1_bn_top\nI1206 09:11:00.410243 23310 net.cpp:150] Setting up L3_b3_brc1_bn\nI1206 09:11:00.410256 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.410261 23310 net.cpp:165] Memory required for data: 1967453180\nI1206 09:11:00.410272 23310 layer_factory.hpp:77] Creating layer L3_b3_brc1_relu\nI1206 09:11:00.410280 23310 net.cpp:100] Creating Layer L3_b3_brc1_relu\nI1206 09:11:00.410286 23310 net.cpp:434] L3_b3_brc1_relu <- L3_b3_brc1_bn_top\nI1206 09:11:00.410293 23310 net.cpp:395] L3_b3_brc1_relu -> L3_b3_brc1_bn_top (in-place)\nI1206 09:11:00.410303 23310 net.cpp:150] Setting up L3_b3_brc1_relu\nI1206 09:11:00.410310 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.410315 23310 net.cpp:165] Memory required for data: 1973023740\nI1206 09:11:00.410320 23310 layer_factory.hpp:77] Creating layer L3_b3_brc1_conv\nI1206 09:11:00.410334 23310 net.cpp:100] Creating Layer L3_b3_brc1_conv\nI1206 09:11:00.410341 23310 net.cpp:434] L3_b3_brc1_conv <- L3_b3_brc1_bn_top\nI1206 09:11:00.410352 23310 net.cpp:408] L3_b3_brc1_conv -> L3_b3_brc1_conv_top\nI1206 09:11:00.412607 23310 net.cpp:150] Setting up L3_b3_brc1_conv\nI1206 09:11:00.412626 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.412631 23310 net.cpp:165] Memory required for data: 1975809020\nI1206 09:11:00.412641 23310 layer_factory.hpp:77] Creating layer L3_b3_brc2_bn\nI1206 09:11:00.412650 23310 net.cpp:100] Creating Layer L3_b3_brc2_bn\nI1206 09:11:00.412665 23310 net.cpp:434] L3_b3_brc2_bn <- L3_b3_brc1_conv_top\nI1206 09:11:00.412678 23310 net.cpp:408] L3_b3_brc2_bn -> L3_b3_brc2_bn_top\nI1206 09:11:00.412966 23310 net.cpp:150] Setting up L3_b3_brc2_bn\nI1206 09:11:00.412981 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.412986 23310 net.cpp:165] Memory required for data: 1978594300\nI1206 09:11:00.412997 23310 layer_factory.hpp:77] Creating layer L3_b3_brc2_relu\nI1206 09:11:00.413009 23310 net.cpp:100] Creating Layer L3_b3_brc2_relu\nI1206 09:11:00.413017 23310 net.cpp:434] L3_b3_brc2_relu <- L3_b3_brc2_bn_top\nI1206 09:11:00.413023 23310 net.cpp:395] L3_b3_brc2_relu -> L3_b3_brc2_bn_top (in-place)\nI1206 09:11:00.413034 23310 net.cpp:150] Setting up L3_b3_brc2_relu\nI1206 09:11:00.413041 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.413046 23310 net.cpp:165] Memory required for data: 1981379580\nI1206 09:11:00.413051 23310 layer_factory.hpp:77] Creating layer L3_b3_brc2_conv\nI1206 09:11:00.413063 23310 net.cpp:100] Creating Layer L3_b3_brc2_conv\nI1206 09:11:00.413069 23310 net.cpp:434] L3_b3_brc2_conv <- L3_b3_brc2_bn_top\nI1206 09:11:00.413081 23310 net.cpp:408] L3_b3_brc2_conv -> L3_b3_brc2_conv_top\nI1206 09:11:00.413487 23310 net.cpp:150] Setting up L3_b3_brc2_conv\nI1206 09:11:00.413502 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.413507 23310 net.cpp:165] Memory required for data: 1984164860\nI1206 09:11:00.413517 23310 layer_factory.hpp:77] Creating layer L3_b3_brc3_bn\nI1206 09:11:00.413527 23310 net.cpp:100] Creating Layer L3_b3_brc3_bn\nI1206 09:11:00.413532 23310 net.cpp:434] L3_b3_brc3_bn <- L3_b3_brc2_conv_top\nI1206 09:11:00.413540 23310 net.cpp:408] L3_b3_brc3_bn -> L3_b3_brc3_bn_top\nI1206 09:11:00.413813 23310 net.cpp:150] Setting up L3_b3_brc3_bn\nI1206 09:11:00.413827 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.413832 23310 net.cpp:165] Memory required for data: 1986950140\nI1206 09:11:00.413843 23310 layer_factory.hpp:77] Creating layer L3_b3_brc3_relu\nI1206 09:11:00.413852 23310 net.cpp:100] Creating Layer L3_b3_brc3_relu\nI1206 09:11:00.413858 23310 net.cpp:434] L3_b3_brc3_relu <- L3_b3_brc3_bn_top\nI1206 09:11:00.413869 23310 net.cpp:395] L3_b3_brc3_relu -> L3_b3_brc3_bn_top (in-place)\nI1206 09:11:00.413880 23310 net.cpp:150] Setting up L3_b3_brc3_relu\nI1206 09:11:00.413887 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.413892 23310 net.cpp:165] Memory required for data: 1989735420\nI1206 09:11:00.413897 23310 layer_factory.hpp:77] Creating layer L3_b3_brc3_conv\nI1206 09:11:00.413913 23310 net.cpp:100] Creating Layer L3_b3_brc3_conv\nI1206 09:11:00.413919 23310 net.cpp:434] L3_b3_brc3_conv <- L3_b3_brc3_bn_top\nI1206 09:11:00.413928 23310 net.cpp:408] L3_b3_brc3_conv -> L3_b3_brc3_conv_top\nI1206 09:11:00.414885 23310 net.cpp:150] Setting up L3_b3_brc3_conv\nI1206 09:11:00.414901 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.414906 23310 net.cpp:165] Memory required for data: 1995305980\nI1206 09:11:00.414916 23310 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1206 09:11:00.414925 23310 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1206 09:11:00.414932 23310 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_brc3_conv_top\nI1206 09:11:00.414938 23310 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split_1\nI1206 09:11:00.414950 23310 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1206 09:11:00.414986 23310 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1206 09:11:00.414999 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.415004 23310 net.cpp:165] Memory required for data: 2000876540\nI1206 09:11:00.415009 23310 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:11:00.415024 23310 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:11:00.415030 23310 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split <- L3_b3_sum_eltwise_top\nI1206 09:11:00.415037 23310 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:11:00.415060 23310 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split -> L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:11:00.415113 23310 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split\nI1206 09:11:00.415127 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.415132 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.415138 23310 net.cpp:165] Memory required for data: 2012017660\nI1206 09:11:00.415143 23310 layer_factory.hpp:77] Creating layer L3_b4_brc1_bn\nI1206 09:11:00.415154 23310 net.cpp:100] Creating Layer L3_b4_brc1_bn\nI1206 09:11:00.415161 23310 net.cpp:434] L3_b4_brc1_bn <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_0\nI1206 09:11:00.415169 23310 net.cpp:408] L3_b4_brc1_bn -> L3_b4_brc1_bn_top\nI1206 09:11:00.415427 23310 net.cpp:150] Setting up L3_b4_brc1_bn\nI1206 09:11:00.415441 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.415446 23310 net.cpp:165] Memory required for data: 2017588220\nI1206 09:11:00.415457 23310 layer_factory.hpp:77] Creating layer L3_b4_brc1_relu\nI1206 09:11:00.415465 23310 net.cpp:100] Creating Layer L3_b4_brc1_relu\nI1206 09:11:00.415472 23310 net.cpp:434] L3_b4_brc1_relu <- L3_b4_brc1_bn_top\nI1206 09:11:00.415483 23310 net.cpp:395] L3_b4_brc1_relu -> L3_b4_brc1_bn_top (in-place)\nI1206 09:11:00.415493 23310 net.cpp:150] Setting up L3_b4_brc1_relu\nI1206 09:11:00.415500 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.415505 23310 net.cpp:165] Memory required for data: 2023158780\nI1206 09:11:00.415510 23310 layer_factory.hpp:77] Creating layer L3_b4_brc1_conv\nI1206 09:11:00.415526 23310 net.cpp:100] Creating Layer L3_b4_brc1_conv\nI1206 09:11:00.415532 23310 net.cpp:434] L3_b4_brc1_conv <- L3_b4_brc1_bn_top\nI1206 09:11:00.415540 23310 net.cpp:408] L3_b4_brc1_conv -> L3_b4_brc1_conv_top\nI1206 09:11:00.416497 23310 net.cpp:150] Setting up L3_b4_brc1_conv\nI1206 09:11:00.416513 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.416519 23310 net.cpp:165] Memory required for data: 2025944060\nI1206 09:11:00.416528 23310 layer_factory.hpp:77] Creating layer L3_b4_brc2_bn\nI1206 09:11:00.416538 23310 net.cpp:100] Creating Layer L3_b4_brc2_bn\nI1206 09:11:00.416543 23310 net.cpp:434] L3_b4_brc2_bn <- L3_b4_brc1_conv_top\nI1206 09:11:00.416555 23310 net.cpp:408] L3_b4_brc2_bn -> L3_b4_brc2_bn_top\nI1206 09:11:00.416829 23310 net.cpp:150] Setting up L3_b4_brc2_bn\nI1206 09:11:00.416843 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.416848 23310 net.cpp:165] Memory required for data: 2028729340\nI1206 09:11:00.416859 23310 layer_factory.hpp:77] Creating layer L3_b4_brc2_relu\nI1206 09:11:00.416867 23310 net.cpp:100] Creating Layer L3_b4_brc2_relu\nI1206 09:11:00.416874 23310 net.cpp:434] L3_b4_brc2_relu <- L3_b4_brc2_bn_top\nI1206 09:11:00.416882 23310 net.cpp:395] L3_b4_brc2_relu -> L3_b4_brc2_bn_top (in-place)\nI1206 09:11:00.416891 23310 net.cpp:150] Setting up L3_b4_brc2_relu\nI1206 09:11:00.416898 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.416903 23310 net.cpp:165] Memory required for data: 2031514620\nI1206 09:11:00.416908 23310 layer_factory.hpp:77] Creating layer L3_b4_brc2_conv\nI1206 09:11:00.416923 23310 net.cpp:100] Creating Layer L3_b4_brc2_conv\nI1206 09:11:00.416929 23310 net.cpp:434] L3_b4_brc2_conv <- L3_b4_brc2_bn_top\nI1206 09:11:00.416941 23310 net.cpp:408] L3_b4_brc2_conv -> L3_b4_brc2_conv_top\nI1206 09:11:00.417368 23310 net.cpp:150] Setting up L3_b4_brc2_conv\nI1206 09:11:00.417383 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.417389 23310 net.cpp:165] Memory required for data: 2034299900\nI1206 09:11:00.417398 23310 layer_factory.hpp:77] Creating layer L3_b4_brc3_bn\nI1206 09:11:00.417412 23310 net.cpp:100] Creating Layer L3_b4_brc3_bn\nI1206 09:11:00.417419 23310 net.cpp:434] L3_b4_brc3_bn <- L3_b4_brc2_conv_top\nI1206 09:11:00.417428 23310 net.cpp:408] L3_b4_brc3_bn -> L3_b4_brc3_bn_top\nI1206 09:11:00.417695 23310 net.cpp:150] Setting up L3_b4_brc3_bn\nI1206 09:11:00.417724 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.417732 23310 net.cpp:165] Memory required for data: 2037085180\nI1206 09:11:00.417742 23310 layer_factory.hpp:77] Creating layer L3_b4_brc3_relu\nI1206 09:11:00.417752 23310 net.cpp:100] Creating Layer L3_b4_brc3_relu\nI1206 09:11:00.417757 23310 net.cpp:434] L3_b4_brc3_relu <- L3_b4_brc3_bn_top\nI1206 09:11:00.417764 23310 net.cpp:395] L3_b4_brc3_relu -> L3_b4_brc3_bn_top (in-place)\nI1206 09:11:00.417774 23310 net.cpp:150] Setting up L3_b4_brc3_relu\nI1206 09:11:00.417783 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.417786 23310 net.cpp:165] Memory required for data: 2039870460\nI1206 09:11:00.417791 23310 layer_factory.hpp:77] Creating layer L3_b4_brc3_conv\nI1206 09:11:00.417805 23310 net.cpp:100] Creating Layer L3_b4_brc3_conv\nI1206 09:11:00.417811 23310 net.cpp:434] L3_b4_brc3_conv <- L3_b4_brc3_bn_top\nI1206 09:11:00.417820 23310 net.cpp:408] L3_b4_brc3_conv -> L3_b4_brc3_conv_top\nI1206 09:11:00.418781 23310 net.cpp:150] Setting up L3_b4_brc3_conv\nI1206 09:11:00.418797 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.418802 23310 net.cpp:165] Memory required for data: 2045441020\nI1206 09:11:00.418812 23310 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1206 09:11:00.418822 23310 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1206 09:11:00.418828 23310 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_brc3_conv_top\nI1206 09:11:00.418835 23310 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split_1\nI1206 09:11:00.418846 23310 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1206 09:11:00.418882 23310 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1206 09:11:00.418898 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.418905 23310 net.cpp:165] Memory required for data: 2051011580\nI1206 09:11:00.418910 23310 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:11:00.418917 23310 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:11:00.418923 23310 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split <- L3_b4_sum_eltwise_top\nI1206 09:11:00.418931 23310 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:11:00.418941 23310 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split -> L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:11:00.418995 23310 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split\nI1206 09:11:00.419008 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.419013 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.419018 23310 net.cpp:165] Memory required for data: 2062152700\nI1206 09:11:00.419024 23310 layer_factory.hpp:77] Creating layer L3_b5_brc1_bn\nI1206 09:11:00.419035 23310 net.cpp:100] Creating Layer L3_b5_brc1_bn\nI1206 09:11:00.419042 23310 net.cpp:434] L3_b5_brc1_bn <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_0\nI1206 09:11:00.419050 23310 net.cpp:408] L3_b5_brc1_bn -> L3_b5_brc1_bn_top\nI1206 09:11:00.419306 23310 net.cpp:150] Setting up L3_b5_brc1_bn\nI1206 09:11:00.419322 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.419328 23310 net.cpp:165] Memory required for data: 2067723260\nI1206 09:11:00.419338 23310 layer_factory.hpp:77] Creating layer L3_b5_brc1_relu\nI1206 09:11:00.419348 23310 net.cpp:100] Creating Layer L3_b5_brc1_relu\nI1206 09:11:00.419353 23310 net.cpp:434] L3_b5_brc1_relu <- L3_b5_brc1_bn_top\nI1206 09:11:00.419360 23310 net.cpp:395] L3_b5_brc1_relu -> L3_b5_brc1_bn_top (in-place)\nI1206 09:11:00.419370 23310 net.cpp:150] Setting up L3_b5_brc1_relu\nI1206 09:11:00.419378 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.419383 23310 net.cpp:165] Memory required for data: 2073293820\nI1206 09:11:00.419387 23310 layer_factory.hpp:77] Creating layer L3_b5_brc1_conv\nI1206 09:11:00.419401 23310 net.cpp:100] Creating Layer L3_b5_brc1_conv\nI1206 09:11:00.419414 23310 net.cpp:434] L3_b5_brc1_conv <- L3_b5_brc1_bn_top\nI1206 09:11:00.419425 23310 net.cpp:408] L3_b5_brc1_conv -> L3_b5_brc1_conv_top\nI1206 09:11:00.420380 23310 net.cpp:150] Setting up L3_b5_brc1_conv\nI1206 09:11:00.420395 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.420402 23310 net.cpp:165] Memory required for data: 2076079100\nI1206 09:11:00.420410 23310 layer_factory.hpp:77] Creating layer L3_b5_brc2_bn\nI1206 09:11:00.420418 23310 net.cpp:100] Creating Layer L3_b5_brc2_bn\nI1206 09:11:00.420425 23310 net.cpp:434] L3_b5_brc2_bn <- L3_b5_brc1_conv_top\nI1206 09:11:00.420437 23310 net.cpp:408] L3_b5_brc2_bn -> L3_b5_brc2_bn_top\nI1206 09:11:00.420702 23310 net.cpp:150] Setting up L3_b5_brc2_bn\nI1206 09:11:00.420722 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.420727 23310 net.cpp:165] Memory required for data: 2078864380\nI1206 09:11:00.420738 23310 layer_factory.hpp:77] Creating layer L3_b5_brc2_relu\nI1206 09:11:00.420750 23310 net.cpp:100] Creating Layer L3_b5_brc2_relu\nI1206 09:11:00.420758 23310 net.cpp:434] L3_b5_brc2_relu <- L3_b5_brc2_bn_top\nI1206 09:11:00.420764 23310 net.cpp:395] L3_b5_brc2_relu -> L3_b5_brc2_bn_top (in-place)\nI1206 09:11:00.420774 23310 net.cpp:150] Setting up L3_b5_brc2_relu\nI1206 09:11:00.420783 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.420788 23310 net.cpp:165] Memory required for data: 2081649660\nI1206 09:11:00.420792 23310 layer_factory.hpp:77] Creating layer L3_b5_brc2_conv\nI1206 09:11:00.420802 23310 net.cpp:100] Creating Layer L3_b5_brc2_conv\nI1206 09:11:00.420809 23310 net.cpp:434] L3_b5_brc2_conv <- L3_b5_brc2_bn_top\nI1206 09:11:00.420822 23310 net.cpp:408] L3_b5_brc2_conv -> L3_b5_brc2_conv_top\nI1206 09:11:00.421217 23310 net.cpp:150] Setting up L3_b5_brc2_conv\nI1206 09:11:00.421231 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.421237 23310 net.cpp:165] Memory required for data: 2084434940\nI1206 09:11:00.421285 23310 layer_factory.hpp:77] Creating layer L3_b5_brc3_bn\nI1206 09:11:00.421298 23310 net.cpp:100] Creating Layer L3_b5_brc3_bn\nI1206 09:11:00.421306 23310 net.cpp:434] L3_b5_brc3_bn <- L3_b5_brc2_conv_top\nI1206 09:11:00.421314 23310 net.cpp:408] L3_b5_brc3_bn -> L3_b5_brc3_bn_top\nI1206 09:11:00.421583 23310 net.cpp:150] Setting up L3_b5_brc3_bn\nI1206 09:11:00.421597 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.421602 23310 net.cpp:165] Memory required for data: 2087220220\nI1206 09:11:00.421612 23310 layer_factory.hpp:77] Creating layer L3_b5_brc3_relu\nI1206 09:11:00.421619 23310 net.cpp:100] Creating Layer L3_b5_brc3_relu\nI1206 09:11:00.421627 23310 net.cpp:434] L3_b5_brc3_relu <- L3_b5_brc3_bn_top\nI1206 09:11:00.421633 23310 net.cpp:395] L3_b5_brc3_relu -> L3_b5_brc3_bn_top (in-place)\nI1206 09:11:00.421643 23310 net.cpp:150] Setting up L3_b5_brc3_relu\nI1206 09:11:00.421650 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.421655 23310 net.cpp:165] Memory required for data: 2090005500\nI1206 09:11:00.421659 23310 layer_factory.hpp:77] Creating layer L3_b5_brc3_conv\nI1206 09:11:00.421674 23310 net.cpp:100] Creating Layer L3_b5_brc3_conv\nI1206 09:11:00.421680 23310 net.cpp:434] L3_b5_brc3_conv <- L3_b5_brc3_bn_top\nI1206 09:11:00.421689 23310 net.cpp:408] L3_b5_brc3_conv -> L3_b5_brc3_conv_top\nI1206 09:11:00.422647 23310 net.cpp:150] Setting up L3_b5_brc3_conv\nI1206 09:11:00.422662 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.422668 23310 net.cpp:165] Memory required for data: 2095576060\nI1206 09:11:00.422677 23310 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1206 09:11:00.422686 23310 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1206 09:11:00.422693 23310 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_brc3_conv_top\nI1206 09:11:00.422700 23310 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split_1\nI1206 09:11:00.422716 23310 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1206 09:11:00.422755 23310 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1206 09:11:00.422771 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.422787 23310 net.cpp:165] Memory required for data: 2101146620\nI1206 09:11:00.422794 23310 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:11:00.422802 23310 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:11:00.422808 23310 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split <- L3_b5_sum_eltwise_top\nI1206 09:11:00.422816 23310 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:11:00.422832 23310 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split -> L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:11:00.422884 23310 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split\nI1206 09:11:00.422896 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.422904 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.422909 23310 net.cpp:165] Memory required for data: 2112287740\nI1206 09:11:00.422914 23310 layer_factory.hpp:77] Creating layer L3_b6_brc1_bn\nI1206 09:11:00.422925 23310 net.cpp:100] Creating Layer L3_b6_brc1_bn\nI1206 09:11:00.422932 23310 net.cpp:434] L3_b6_brc1_bn <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_0\nI1206 09:11:00.422943 23310 net.cpp:408] L3_b6_brc1_bn -> L3_b6_brc1_bn_top\nI1206 09:11:00.423202 23310 net.cpp:150] Setting up L3_b6_brc1_bn\nI1206 09:11:00.423214 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.423219 23310 net.cpp:165] Memory required for data: 2117858300\nI1206 09:11:00.423230 23310 layer_factory.hpp:77] Creating layer L3_b6_brc1_relu\nI1206 09:11:00.423238 23310 net.cpp:100] Creating Layer L3_b6_brc1_relu\nI1206 09:11:00.423244 23310 net.cpp:434] L3_b6_brc1_relu <- L3_b6_brc1_bn_top\nI1206 09:11:00.423251 23310 net.cpp:395] L3_b6_brc1_relu -> L3_b6_brc1_bn_top (in-place)\nI1206 09:11:00.423261 23310 net.cpp:150] Setting up L3_b6_brc1_relu\nI1206 09:11:00.423269 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.423274 23310 net.cpp:165] Memory required for data: 2123428860\nI1206 09:11:00.423279 23310 layer_factory.hpp:77] Creating layer L3_b6_brc1_conv\nI1206 09:11:00.423293 23310 net.cpp:100] Creating Layer L3_b6_brc1_conv\nI1206 09:11:00.423300 23310 net.cpp:434] L3_b6_brc1_conv <- L3_b6_brc1_bn_top\nI1206 09:11:00.423308 23310 net.cpp:408] L3_b6_brc1_conv -> L3_b6_brc1_conv_top\nI1206 09:11:00.424266 23310 net.cpp:150] Setting up L3_b6_brc1_conv\nI1206 09:11:00.424283 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.424288 23310 net.cpp:165] Memory required for data: 2126214140\nI1206 09:11:00.424296 23310 layer_factory.hpp:77] Creating layer L3_b6_brc2_bn\nI1206 09:11:00.424305 23310 net.cpp:100] Creating Layer L3_b6_brc2_bn\nI1206 09:11:00.424311 23310 net.cpp:434] L3_b6_brc2_bn <- L3_b6_brc1_conv_top\nI1206 09:11:00.424324 23310 net.cpp:408] L3_b6_brc2_bn -> L3_b6_brc2_bn_top\nI1206 09:11:00.424590 23310 net.cpp:150] Setting up L3_b6_brc2_bn\nI1206 09:11:00.424603 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.424608 23310 net.cpp:165] Memory required for data: 2128999420\nI1206 09:11:00.424618 23310 layer_factory.hpp:77] Creating layer L3_b6_brc2_relu\nI1206 09:11:00.424630 23310 net.cpp:100] Creating Layer L3_b6_brc2_relu\nI1206 09:11:00.424636 23310 net.cpp:434] L3_b6_brc2_relu <- L3_b6_brc2_bn_top\nI1206 09:11:00.424644 23310 net.cpp:395] L3_b6_brc2_relu -> L3_b6_brc2_bn_top (in-place)\nI1206 09:11:00.424654 23310 net.cpp:150] Setting up L3_b6_brc2_relu\nI1206 09:11:00.424661 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.424665 23310 net.cpp:165] Memory required for data: 2131784700\nI1206 09:11:00.424670 23310 layer_factory.hpp:77] Creating layer L3_b6_brc2_conv\nI1206 09:11:00.424681 23310 net.cpp:100] Creating Layer L3_b6_brc2_conv\nI1206 09:11:00.424687 23310 net.cpp:434] L3_b6_brc2_conv <- L3_b6_brc2_bn_top\nI1206 09:11:00.424700 23310 net.cpp:408] L3_b6_brc2_conv -> L3_b6_brc2_conv_top\nI1206 09:11:00.425117 23310 net.cpp:150] Setting up L3_b6_brc2_conv\nI1206 09:11:00.425138 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.425144 23310 net.cpp:165] Memory required for data: 2134569980\nI1206 09:11:00.425153 23310 layer_factory.hpp:77] Creating layer L3_b6_brc3_bn\nI1206 09:11:00.425163 23310 net.cpp:100] Creating Layer L3_b6_brc3_bn\nI1206 09:11:00.425169 23310 net.cpp:434] L3_b6_brc3_bn <- L3_b6_brc2_conv_top\nI1206 09:11:00.425177 23310 net.cpp:408] L3_b6_brc3_bn -> L3_b6_brc3_bn_top\nI1206 09:11:00.425444 23310 net.cpp:150] Setting up L3_b6_brc3_bn\nI1206 09:11:00.425458 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.425463 23310 net.cpp:165] Memory required for data: 2137355260\nI1206 09:11:00.425473 23310 layer_factory.hpp:77] Creating layer L3_b6_brc3_relu\nI1206 09:11:00.425482 23310 net.cpp:100] Creating Layer L3_b6_brc3_relu\nI1206 09:11:00.425488 23310 net.cpp:434] L3_b6_brc3_relu <- L3_b6_brc3_bn_top\nI1206 09:11:00.425499 23310 net.cpp:395] L3_b6_brc3_relu -> L3_b6_brc3_bn_top (in-place)\nI1206 09:11:00.425510 23310 net.cpp:150] Setting up L3_b6_brc3_relu\nI1206 09:11:00.425518 23310 net.cpp:157] Top shape: 85 128 8 8 (696320)\nI1206 09:11:00.425521 23310 net.cpp:165] Memory required for data: 2140140540\nI1206 09:11:00.425526 23310 layer_factory.hpp:77] Creating layer L3_b6_brc3_conv\nI1206 09:11:00.425540 23310 net.cpp:100] Creating Layer L3_b6_brc3_conv\nI1206 09:11:00.425546 23310 net.cpp:434] L3_b6_brc3_conv <- L3_b6_brc3_bn_top\nI1206 09:11:00.425555 23310 net.cpp:408] L3_b6_brc3_conv -> L3_b6_brc3_conv_top\nI1206 09:11:00.426513 23310 net.cpp:150] Setting up L3_b6_brc3_conv\nI1206 09:11:00.426528 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.426534 23310 net.cpp:165] Memory required for data: 2145711100\nI1206 09:11:00.426543 23310 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1206 09:11:00.426553 23310 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1206 09:11:00.426559 23310 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_brc3_conv_top\nI1206 09:11:00.426566 23310 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split_1\nI1206 09:11:00.426578 23310 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1206 09:11:00.426612 23310 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1206 09:11:00.426625 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.426630 23310 net.cpp:165] Memory required for data: 2151281660\nI1206 09:11:00.426635 23310 layer_factory.hpp:77] Creating layer post_bn\nI1206 09:11:00.426647 23310 net.cpp:100] Creating Layer post_bn\nI1206 09:11:00.426653 23310 net.cpp:434] post_bn <- L3_b6_sum_eltwise_top\nI1206 09:11:00.426664 23310 net.cpp:408] post_bn -> post_bn_top\nI1206 09:11:00.426928 23310 net.cpp:150] Setting up post_bn\nI1206 09:11:00.426944 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.426949 23310 net.cpp:165] Memory required for data: 2156852220\nI1206 09:11:00.426959 23310 layer_factory.hpp:77] Creating layer post_relu\nI1206 09:11:00.426967 23310 net.cpp:100] Creating Layer post_relu\nI1206 09:11:00.426973 23310 net.cpp:434] post_relu <- post_bn_top\nI1206 09:11:00.426981 23310 net.cpp:395] post_relu -> post_bn_top (in-place)\nI1206 09:11:00.426990 23310 net.cpp:150] Setting up post_relu\nI1206 09:11:00.426997 23310 net.cpp:157] Top shape: 85 256 8 8 (1392640)\nI1206 09:11:00.427002 23310 net.cpp:165] Memory required for data: 2162422780\nI1206 09:11:00.427007 23310 layer_factory.hpp:77] Creating layer post_pool\nI1206 09:11:00.427023 23310 net.cpp:100] Creating Layer post_pool\nI1206 09:11:00.427029 23310 net.cpp:434] post_pool <- post_bn_top\nI1206 09:11:00.427037 23310 net.cpp:408] post_pool -> post_pool\nI1206 09:11:00.427078 23310 net.cpp:150] Setting up post_pool\nI1206 09:11:00.427089 23310 net.cpp:157] Top shape: 85 256 1 1 (21760)\nI1206 09:11:00.427095 23310 net.cpp:165] Memory required for data: 2162509820\nI1206 09:11:00.427100 23310 layer_factory.hpp:77] Creating layer post_FC\nI1206 09:11:00.427111 23310 net.cpp:100] Creating Layer post_FC\nI1206 09:11:00.427117 23310 net.cpp:434] post_FC <- post_pool\nI1206 09:11:00.427129 23310 net.cpp:408] post_FC -> post_FC_top\nI1206 09:11:00.427325 23310 net.cpp:150] Setting up post_FC\nI1206 09:11:00.427338 23310 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:11:00.427343 23310 net.cpp:165] Memory required for data: 2162513220\nI1206 09:11:00.427352 23310 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1206 09:11:00.427364 23310 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1206 09:11:00.427371 23310 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1206 09:11:00.427379 23310 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1206 09:11:00.427392 23310 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1206 09:11:00.427444 23310 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1206 09:11:00.427458 23310 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:11:00.427464 23310 net.cpp:157] Top shape: 85 10 (850)\nI1206 09:11:00.427469 23310 net.cpp:165] Memory required for data: 2162520020\nI1206 09:11:00.427474 23310 layer_factory.hpp:77] Creating layer accuracy\nI1206 09:11:00.427485 23310 net.cpp:100] Creating Layer accuracy\nI1206 09:11:00.427491 23310 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1206 09:11:00.427498 23310 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1206 09:11:00.427506 23310 net.cpp:408] accuracy -> accuracy\nI1206 09:11:00.427520 23310 net.cpp:150] Setting up accuracy\nI1206 09:11:00.427526 23310 net.cpp:157] Top shape: (1)\nI1206 09:11:00.427531 23310 net.cpp:165] Memory required for data: 2162520024\nI1206 09:11:00.427536 23310 layer_factory.hpp:77] Creating layer loss\nI1206 09:11:00.427547 23310 net.cpp:100] Creating Layer loss\nI1206 09:11:00.427553 23310 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1206 09:11:00.427561 23310 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1206 09:11:00.427567 23310 net.cpp:408] loss -> loss\nI1206 09:11:00.427579 23310 layer_factory.hpp:77] Creating layer loss\nI1206 09:11:00.427701 23310 net.cpp:150] Setting up loss\nI1206 09:11:00.427723 23310 net.cpp:157] Top shape: (1)\nI1206 09:11:00.427731 23310 net.cpp:160]     with loss weight 1\nI1206 09:11:00.427745 23310 net.cpp:165] Memory required for data: 2162520028\nI1206 09:11:00.427753 23310 net.cpp:226] loss needs backward computation.\nI1206 09:11:00.427758 23310 net.cpp:228] accuracy does not need backward computation.\nI1206 09:11:00.427765 23310 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1206 09:11:00.427770 23310 net.cpp:226] post_FC needs backward computation.\nI1206 09:11:00.427775 23310 net.cpp:226] post_pool needs backward computation.\nI1206 09:11:00.427780 23310 net.cpp:226] post_relu needs backward computation.\nI1206 09:11:00.427785 23310 net.cpp:226] post_bn needs backward computation.\nI1206 09:11:00.427790 23310 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1206 09:11:00.427795 23310 net.cpp:226] L3_b6_brc3_conv needs backward computation.\nI1206 09:11:00.427800 23310 net.cpp:226] L3_b6_brc3_relu needs backward computation.\nI1206 09:11:00.427805 23310 net.cpp:226] L3_b6_brc3_bn needs backward computation.\nI1206 09:11:00.427810 23310 net.cpp:226] L3_b6_brc2_conv needs backward computation.\nI1206 09:11:00.427815 23310 net.cpp:226] L3_b6_brc2_relu needs backward computation.\nI1206 09:11:00.427820 23310 net.cpp:226] L3_b6_brc2_bn needs backward computation.\nI1206 09:11:00.427825 23310 net.cpp:226] L3_b6_brc1_conv needs backward computation.\nI1206 09:11:00.427830 23310 net.cpp:226] L3_b6_brc1_relu needs backward computation.\nI1206 09:11:00.427835 23310 net.cpp:226] L3_b6_brc1_bn needs backward computation.\nI1206 09:11:00.427840 23310 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.427845 23310 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1206 09:11:00.427850 23310 net.cpp:226] L3_b5_brc3_conv needs backward computation.\nI1206 09:11:00.427855 23310 net.cpp:226] L3_b5_brc3_relu needs backward computation.\nI1206 09:11:00.427860 23310 net.cpp:226] L3_b5_brc3_bn needs backward computation.\nI1206 09:11:00.427865 23310 net.cpp:226] L3_b5_brc2_conv needs backward computation.\nI1206 09:11:00.427878 23310 net.cpp:226] L3_b5_brc2_relu needs backward computation.\nI1206 09:11:00.427884 23310 net.cpp:226] L3_b5_brc2_bn needs backward computation.\nI1206 09:11:00.427889 23310 net.cpp:226] L3_b5_brc1_conv needs backward computation.\nI1206 09:11:00.427894 23310 net.cpp:226] L3_b5_brc1_relu needs backward computation.\nI1206 09:11:00.427899 23310 net.cpp:226] L3_b5_brc1_bn needs backward computation.\nI1206 09:11:00.427904 23310 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.427909 23310 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1206 09:11:00.427916 23310 net.cpp:226] L3_b4_brc3_conv needs backward computation.\nI1206 09:11:00.427920 23310 net.cpp:226] L3_b4_brc3_relu needs backward computation.\nI1206 09:11:00.427925 23310 net.cpp:226] L3_b4_brc3_bn needs backward computation.\nI1206 09:11:00.427930 23310 net.cpp:226] L3_b4_brc2_conv needs backward computation.\nI1206 09:11:00.427935 23310 net.cpp:226] L3_b4_brc2_relu needs backward computation.\nI1206 09:11:00.427940 23310 net.cpp:226] L3_b4_brc2_bn needs backward computation.\nI1206 09:11:00.427945 23310 net.cpp:226] L3_b4_brc1_conv needs backward computation.\nI1206 09:11:00.427951 23310 net.cpp:226] L3_b4_brc1_relu needs backward computation.\nI1206 09:11:00.427955 23310 net.cpp:226] L3_b4_brc1_bn needs backward computation.\nI1206 09:11:00.427961 23310 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.427966 23310 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1206 09:11:00.427973 23310 net.cpp:226] L3_b3_brc3_conv needs backward computation.\nI1206 09:11:00.427978 23310 net.cpp:226] L3_b3_brc3_relu needs backward computation.\nI1206 09:11:00.427983 23310 net.cpp:226] L3_b3_brc3_bn needs backward computation.\nI1206 09:11:00.427987 23310 net.cpp:226] L3_b3_brc2_conv needs backward computation.\nI1206 09:11:00.427992 23310 net.cpp:226] L3_b3_brc2_relu needs backward computation.\nI1206 09:11:00.427997 23310 net.cpp:226] L3_b3_brc2_bn needs backward computation.\nI1206 09:11:00.428002 23310 net.cpp:226] L3_b3_brc1_conv needs backward computation.\nI1206 09:11:00.428009 23310 net.cpp:226] L3_b3_brc1_relu needs backward computation.\nI1206 09:11:00.428019 23310 net.cpp:226] L3_b3_brc1_bn needs backward computation.\nI1206 09:11:00.428025 23310 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428030 23310 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1206 09:11:00.428035 23310 net.cpp:226] L3_b2_brc3_conv needs backward computation.\nI1206 09:11:00.428040 23310 net.cpp:226] L3_b2_brc3_relu needs backward computation.\nI1206 09:11:00.428045 23310 net.cpp:226] L3_b2_brc3_bn needs backward computation.\nI1206 09:11:00.428050 23310 net.cpp:226] L3_b2_brc2_conv needs backward computation.\nI1206 09:11:00.428056 23310 net.cpp:226] L3_b2_brc2_relu needs backward computation.\nI1206 09:11:00.428061 23310 net.cpp:226] L3_b2_brc2_bn needs backward computation.\nI1206 09:11:00.428066 23310 net.cpp:226] L3_b2_brc1_conv needs backward computation.\nI1206 09:11:00.428071 23310 net.cpp:226] L3_b2_brc1_relu needs backward computation.\nI1206 09:11:00.428076 23310 net.cpp:226] L3_b2_brc1_bn needs backward computation.\nI1206 09:11:00.428082 23310 net.cpp:226] L3_b1_sum_eltwise_top_L3_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428087 23310 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1206 09:11:00.428093 23310 net.cpp:226] L3_b1_chanInc_conv needs backward computation.\nI1206 09:11:00.428098 23310 net.cpp:226] L3_b1_brc3_conv needs backward computation.\nI1206 09:11:00.428103 23310 net.cpp:226] L3_b1_brc3_relu needs backward computation.\nI1206 09:11:00.428108 23310 net.cpp:226] L3_b1_brc3_bn needs backward computation.\nI1206 09:11:00.428114 23310 net.cpp:226] L3_b1_brc2_conv needs backward computation.\nI1206 09:11:00.428119 23310 net.cpp:226] L3_b1_brc2_relu needs backward computation.\nI1206 09:11:00.428130 23310 net.cpp:226] L3_b1_brc2_bn needs backward computation.\nI1206 09:11:00.428136 23310 net.cpp:226] L3_b1_brc1_conv needs backward computation.\nI1206 09:11:00.428141 23310 net.cpp:226] L3_b1_brc1_relu needs backward computation.\nI1206 09:11:00.428146 23310 net.cpp:226] L3_b1_brc1_bn needs backward computation.\nI1206 09:11:00.428153 23310 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428158 23310 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1206 09:11:00.428164 23310 net.cpp:226] L2_b6_brc3_conv needs backward computation.\nI1206 09:11:00.428169 23310 net.cpp:226] L2_b6_brc3_relu needs backward computation.\nI1206 09:11:00.428174 23310 net.cpp:226] L2_b6_brc3_bn needs backward computation.\nI1206 09:11:00.428179 23310 net.cpp:226] L2_b6_brc2_conv needs backward computation.\nI1206 09:11:00.428184 23310 net.cpp:226] L2_b6_brc2_relu needs backward computation.\nI1206 09:11:00.428189 23310 net.cpp:226] L2_b6_brc2_bn needs backward computation.\nI1206 09:11:00.428194 23310 net.cpp:226] L2_b6_brc1_conv needs backward computation.\nI1206 09:11:00.428200 23310 net.cpp:226] L2_b6_brc1_relu needs backward computation.\nI1206 09:11:00.428205 23310 net.cpp:226] L2_b6_brc1_bn needs backward computation.\nI1206 09:11:00.428210 23310 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428215 23310 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1206 09:11:00.428221 23310 net.cpp:226] L2_b5_brc3_conv needs backward computation.\nI1206 09:11:00.428226 23310 net.cpp:226] L2_b5_brc3_relu needs backward computation.\nI1206 09:11:00.428231 23310 net.cpp:226] L2_b5_brc3_bn needs backward computation.\nI1206 09:11:00.428236 23310 net.cpp:226] L2_b5_brc2_conv needs backward computation.\nI1206 09:11:00.428242 23310 net.cpp:226] L2_b5_brc2_relu needs backward computation.\nI1206 09:11:00.428247 23310 net.cpp:226] L2_b5_brc2_bn needs backward computation.\nI1206 09:11:00.428252 23310 net.cpp:226] L2_b5_brc1_conv needs backward computation.\nI1206 09:11:00.428258 23310 net.cpp:226] L2_b5_brc1_relu needs backward computation.\nI1206 09:11:00.428263 23310 net.cpp:226] L2_b5_brc1_bn needs backward computation.\nI1206 09:11:00.428268 23310 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428273 23310 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1206 09:11:00.428279 23310 net.cpp:226] L2_b4_brc3_conv needs backward computation.\nI1206 09:11:00.428285 23310 net.cpp:226] L2_b4_brc3_relu needs backward computation.\nI1206 09:11:00.428290 23310 net.cpp:226] L2_b4_brc3_bn needs backward computation.\nI1206 09:11:00.428295 23310 net.cpp:226] L2_b4_brc2_conv needs backward computation.\nI1206 09:11:00.428300 23310 net.cpp:226] L2_b4_brc2_relu needs backward computation.\nI1206 09:11:00.428305 23310 net.cpp:226] L2_b4_brc2_bn needs backward computation.\nI1206 09:11:00.428310 23310 net.cpp:226] L2_b4_brc1_conv needs backward computation.\nI1206 09:11:00.428316 23310 net.cpp:226] L2_b4_brc1_relu needs backward computation.\nI1206 09:11:00.428321 23310 net.cpp:226] L2_b4_brc1_bn needs backward computation.\nI1206 09:11:00.428326 23310 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428333 23310 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1206 09:11:00.428341 23310 net.cpp:226] L2_b3_brc3_conv needs backward computation.\nI1206 09:11:00.428347 23310 net.cpp:226] L2_b3_brc3_relu needs backward computation.\nI1206 09:11:00.428352 23310 net.cpp:226] L2_b3_brc3_bn needs backward computation.\nI1206 09:11:00.428357 23310 net.cpp:226] L2_b3_brc2_conv needs backward computation.\nI1206 09:11:00.428364 23310 net.cpp:226] L2_b3_brc2_relu needs backward computation.\nI1206 09:11:00.428369 23310 net.cpp:226] L2_b3_brc2_bn needs backward computation.\nI1206 09:11:00.428375 23310 net.cpp:226] L2_b3_brc1_conv needs backward computation.\nI1206 09:11:00.428380 23310 net.cpp:226] L2_b3_brc1_relu needs backward computation.\nI1206 09:11:00.428391 23310 net.cpp:226] L2_b3_brc1_bn needs backward computation.\nI1206 09:11:00.428397 23310 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428403 23310 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1206 09:11:00.428409 23310 net.cpp:226] L2_b2_brc3_conv needs backward computation.\nI1206 09:11:00.428416 23310 net.cpp:226] L2_b2_brc3_relu needs backward computation.\nI1206 09:11:00.428421 23310 net.cpp:226] L2_b2_brc3_bn needs backward computation.\nI1206 09:11:00.428426 23310 net.cpp:226] L2_b2_brc2_conv needs backward computation.\nI1206 09:11:00.428431 23310 net.cpp:226] L2_b2_brc2_relu needs backward computation.\nI1206 09:11:00.428436 23310 net.cpp:226] L2_b2_brc2_bn needs backward computation.\nI1206 09:11:00.428442 23310 net.cpp:226] L2_b2_brc1_conv needs backward computation.\nI1206 09:11:00.428447 23310 net.cpp:226] L2_b2_brc1_relu needs backward computation.\nI1206 09:11:00.428452 23310 net.cpp:226] L2_b2_brc1_bn needs backward computation.\nI1206 09:11:00.428458 23310 net.cpp:226] L2_b1_sum_eltwise_top_L2_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428463 23310 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1206 09:11:00.428469 23310 net.cpp:226] L2_b1_chanInc_conv needs backward computation.\nI1206 09:11:00.428475 23310 net.cpp:226] L2_b1_brc3_conv needs backward computation.\nI1206 09:11:00.428503 23310 net.cpp:226] L2_b1_brc3_relu needs backward computation.\nI1206 09:11:00.428509 23310 net.cpp:226] L2_b1_brc3_bn needs backward computation.\nI1206 09:11:00.428515 23310 net.cpp:226] L2_b1_brc2_conv needs backward computation.\nI1206 09:11:00.428520 23310 net.cpp:226] L2_b1_brc2_relu needs backward computation.\nI1206 09:11:00.428526 23310 net.cpp:226] L2_b1_brc2_bn needs backward computation.\nI1206 09:11:00.428531 23310 net.cpp:226] L2_b1_brc1_conv needs backward computation.\nI1206 09:11:00.428537 23310 net.cpp:226] L2_b1_brc1_relu needs backward computation.\nI1206 09:11:00.428542 23310 net.cpp:226] L2_b1_brc1_bn needs backward computation.\nI1206 09:11:00.428548 23310 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428553 23310 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1206 09:11:00.428560 23310 net.cpp:226] L1_b6_brc3_conv needs backward computation.\nI1206 09:11:00.428566 23310 net.cpp:226] L1_b6_brc3_relu needs backward computation.\nI1206 09:11:00.428571 23310 net.cpp:226] L1_b6_brc3_bn needs backward computation.\nI1206 09:11:00.428577 23310 net.cpp:226] L1_b6_brc2_conv needs backward computation.\nI1206 09:11:00.428582 23310 net.cpp:226] L1_b6_brc2_relu needs backward computation.\nI1206 09:11:00.428588 23310 net.cpp:226] L1_b6_brc2_bn needs backward computation.\nI1206 09:11:00.428593 23310 net.cpp:226] L1_b6_brc1_conv needs backward computation.\nI1206 09:11:00.428599 23310 net.cpp:226] L1_b6_brc1_relu needs backward computation.\nI1206 09:11:00.428604 23310 net.cpp:226] L1_b6_brc1_bn needs backward computation.\nI1206 09:11:00.428611 23310 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428616 23310 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1206 09:11:00.428622 23310 net.cpp:226] L1_b5_brc3_conv needs backward computation.\nI1206 09:11:00.428627 23310 net.cpp:226] L1_b5_brc3_relu needs backward computation.\nI1206 09:11:00.428632 23310 net.cpp:226] L1_b5_brc3_bn needs backward computation.\nI1206 09:11:00.428638 23310 net.cpp:226] L1_b5_brc2_conv needs backward computation.\nI1206 09:11:00.428644 23310 net.cpp:226] L1_b5_brc2_relu needs backward computation.\nI1206 09:11:00.428649 23310 net.cpp:226] L1_b5_brc2_bn needs backward computation.\nI1206 09:11:00.428655 23310 net.cpp:226] L1_b5_brc1_conv needs backward computation.\nI1206 09:11:00.428660 23310 net.cpp:226] L1_b5_brc1_relu needs backward computation.\nI1206 09:11:00.428666 23310 net.cpp:226] L1_b5_brc1_bn needs backward computation.\nI1206 09:11:00.428671 23310 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428684 23310 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1206 09:11:00.428691 23310 net.cpp:226] L1_b4_brc3_conv needs backward computation.\nI1206 09:11:00.428697 23310 net.cpp:226] L1_b4_brc3_relu needs backward computation.\nI1206 09:11:00.428704 23310 net.cpp:226] L1_b4_brc3_bn needs backward computation.\nI1206 09:11:00.428709 23310 net.cpp:226] L1_b4_brc2_conv needs backward computation.\nI1206 09:11:00.428720 23310 net.cpp:226] L1_b4_brc2_relu needs backward computation.\nI1206 09:11:00.428727 23310 net.cpp:226] L1_b4_brc2_bn needs backward computation.\nI1206 09:11:00.428733 23310 net.cpp:226] L1_b4_brc1_conv needs backward computation.\nI1206 09:11:00.428740 23310 net.cpp:226] L1_b4_brc1_relu needs backward computation.\nI1206 09:11:00.428745 23310 net.cpp:226] L1_b4_brc1_bn needs backward computation.\nI1206 09:11:00.428750 23310 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428756 23310 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1206 09:11:00.428762 23310 net.cpp:226] L1_b3_brc3_conv needs backward computation.\nI1206 09:11:00.428768 23310 net.cpp:226] L1_b3_brc3_relu needs backward computation.\nI1206 09:11:00.428773 23310 net.cpp:226] L1_b3_brc3_bn needs backward computation.\nI1206 09:11:00.428779 23310 net.cpp:226] L1_b3_brc2_conv needs backward computation.\nI1206 09:11:00.428786 23310 net.cpp:226] L1_b3_brc2_relu needs backward computation.\nI1206 09:11:00.428791 23310 net.cpp:226] L1_b3_brc2_bn needs backward computation.\nI1206 09:11:00.428797 23310 net.cpp:226] L1_b3_brc1_conv needs backward computation.\nI1206 09:11:00.428802 23310 net.cpp:226] L1_b3_brc1_relu needs backward computation.\nI1206 09:11:00.428807 23310 net.cpp:226] L1_b3_brc1_bn needs backward computation.\nI1206 09:11:00.428812 23310 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428818 23310 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1206 09:11:00.428824 23310 net.cpp:226] L1_b2_brc3_conv needs backward computation.\nI1206 09:11:00.428830 23310 net.cpp:226] L1_b2_brc3_relu needs backward computation.\nI1206 09:11:00.428835 23310 net.cpp:226] L1_b2_brc3_bn needs backward computation.\nI1206 09:11:00.428841 23310 net.cpp:226] L1_b2_brc2_conv needs backward computation.\nI1206 09:11:00.428848 23310 net.cpp:226] L1_b2_brc2_relu needs backward computation.\nI1206 09:11:00.428853 23310 net.cpp:226] L1_b2_brc2_bn needs backward computation.\nI1206 09:11:00.428858 23310 net.cpp:226] L1_b2_brc1_conv needs backward computation.\nI1206 09:11:00.428864 23310 net.cpp:226] L1_b2_brc1_relu needs backward computation.\nI1206 09:11:00.428874 23310 net.cpp:226] L1_b2_brc1_bn needs backward computation.\nI1206 09:11:00.428880 23310 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_sum_eltwise_0_split needs backward computation.\nI1206 09:11:00.428886 23310 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1206 09:11:00.428892 23310 net.cpp:226] L1_b1_chanInc_conv needs backward computation.\nI1206 09:11:00.428899 23310 net.cpp:226] L1_b1_brc3_conv needs backward computation.\nI1206 09:11:00.428905 23310 net.cpp:226] L1_b1_brc3_relu needs backward computation.\nI1206 09:11:00.428910 23310 net.cpp:226] L1_b1_brc3_bn needs backward computation.\nI1206 09:11:00.428915 23310 net.cpp:226] L1_b1_brc2_conv needs backward computation.\nI1206 09:11:00.428921 23310 net.cpp:226] L1_b1_brc2_relu needs backward computation.\nI1206 09:11:00.428927 23310 net.cpp:226] L1_b1_brc2_bn needs backward computation.\nI1206 09:11:00.428933 23310 net.cpp:226] L1_b1_brc1_conv needs backward computation.\nI1206 09:11:00.428938 23310 net.cpp:226] L1_b1_brc1_relu needs backward computation.\nI1206 09:11:00.428944 23310 net.cpp:226] L1_b1_brc1_bn needs backward computation.\nI1206 09:11:00.428949 23310 net.cpp:226] pre_conv_top_pre_conv_0_split needs backward computation.\nI1206 09:11:00.428956 23310 net.cpp:226] pre_conv needs backward computation.\nI1206 09:11:00.428961 23310 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1206 09:11:00.428978 23310 net.cpp:228] dataLayer does not need backward computation.\nI1206 09:11:00.428983 23310 net.cpp:270] This network produces output accuracy\nI1206 09:11:00.428990 23310 net.cpp:270] This network produces output loss\nI1206 09:11:00.429244 23310 net.cpp:283] Network initialization done.\nI1206 09:11:00.429955 23310 solver.cpp:60] Solver scaffolding done.\nI1206 09:11:00.660133 23310 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1206 09:11:01.005885 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:01.005936 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:01.012629 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:01.691320 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:01.691385 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:01.698281 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:02.430146 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:02.430212 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:02.438375 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:02.807389 23310 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1206 09:11:03.256198 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:03.256249 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:03.266088 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:04.183459 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:04.183527 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:04.192731 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:05.203635 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:05.203694 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:05.215448 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:06.340507 23310 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1206 09:11:06.340565 23310 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1206 09:11:06.353310 23310 data_layer.cpp:41] output data size: 85,3,32,32\nI1206 09:11:06.418817 23316 blocking_queue.cpp:50] Waiting for data\nI1206 09:11:07.000993 23310 parallel.cpp:425] Starting Optimization\nI1206 09:11:07.002635 23310 solver.cpp:279] Solving Cifar-ResNeXt\nI1206 09:11:07.002653 23310 solver.cpp:280] Learning Rate Policy: triangular\nI1206 09:11:07.006574 23310 solver.cpp:337] Iteration 0, Testing net (#0)\nI1206 09:13:47.146050 23310 solver.cpp:404]     Test net output #0: accuracy = 0.0715294\nI1206 09:13:47.146337 23310 solver.cpp:404]     Test net output #1: loss = 2.47178 (* 1 = 2.47178 loss)\nI1206 09:13:52.390269 23310 solver.cpp:228] Iteration 0, loss = 2.41556\nI1206 09:13:52.390313 23310 solver.cpp:244]     Train net output #0: accuracy = 0.0352941\nI1206 09:13:52.390331 23310 solver.cpp:244]     Train net output #1: loss = 2.41556 (* 1 = 2.41556 loss)\nI1206 09:13:52.480248 23310 sgd_solver.cpp:166] Iteration 0, lr = 0\nI1206 09:20:44.276162 23310 solver.cpp:337] Iteration 100, Testing net (#0)\nI1206 09:23:23.794638 23310 solver.cpp:404]     Test net output #0: accuracy = 0.270176\nI1206 09:23:23.794898 23310 solver.cpp:404]     Test net output #1: loss = 2.01344 (* 1 = 2.01344 loss)\nI1206 09:23:27.774777 23310 solver.cpp:228] Iteration 100, loss = 2.11749\nI1206 09:23:27.774812 23310 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1206 09:23:27.774828 23310 solver.cpp:244]     Train net output #1: loss = 2.11749 (* 1 = 2.11749 loss)\nI1206 09:23:27.944974 23310 sgd_solver.cpp:166] Iteration 100, lr = 0.015\nI1206 09:30:17.860397 23310 solver.cpp:337] Iteration 200, Testing net (#0)\nI1206 09:32:57.521674 23310 solver.cpp:404]     Test net output #0: accuracy = 0.303941\nI1206 09:32:57.521931 23310 solver.cpp:404]     Test net output #1: loss = 1.91868 (* 1 = 1.91868 loss)\nI1206 09:33:01.502674 23310 solver.cpp:228] Iteration 200, loss = 2.02077\nI1206 09:33:01.502710 23310 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1206 09:33:01.502727 23310 solver.cpp:244]     Train net output #1: loss = 2.02077 (* 1 = 2.02077 loss)\nI1206 09:33:01.671821 23310 sgd_solver.cpp:166] Iteration 200, lr = 0.03\nI1206 09:39:51.661667 23310 solver.cpp:337] Iteration 300, Testing net (#0)\nI1206 09:42:31.270326 23310 solver.cpp:404]     Test net output #0: accuracy = 0.331059\nI1206 09:42:31.270582 23310 solver.cpp:404]     Test net output #1: loss = 1.84347 (* 1 = 1.84347 loss)\nI1206 09:42:35.251248 23310 solver.cpp:228] Iteration 300, loss = 1.83176\nI1206 09:42:35.251286 23310 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 09:42:35.251302 23310 solver.cpp:244]     Train net output #1: loss = 1.83176 (* 1 = 1.83176 loss)\nI1206 09:42:35.413503 23310 sgd_solver.cpp:166] Iteration 300, lr = 0.045\nI1206 09:49:28.515485 23310 solver.cpp:337] Iteration 400, Testing net (#0)\nI1206 09:52:08.972702 23310 solver.cpp:404]     Test net output #0: accuracy = 0.356412\nI1206 09:52:08.972946 23310 solver.cpp:404]     Test net output #1: loss = 1.77902 (* 1 = 1.77902 loss)\nI1206 09:52:12.990684 23310 solver.cpp:228] Iteration 400, loss = 1.79452\nI1206 09:52:12.990720 23310 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 09:52:12.990737 23310 solver.cpp:244]     Train net output #1: loss = 1.79452 (* 1 = 1.79452 loss)\nI1206 09:52:13.120134 23310 sgd_solver.cpp:166] Iteration 400, lr = 0.0599999\nI1206 09:59:03.395895 23310 solver.cpp:337] Iteration 500, Testing net (#0)\nI1206 10:01:43.755784 23310 solver.cpp:404]     Test net output #0: accuracy = 0.38253\nI1206 10:01:43.756045 23310 solver.cpp:404]     Test net output #1: loss = 1.73279 (* 1 = 1.73279 loss)\nI1206 10:01:47.772518 23310 solver.cpp:228] Iteration 500, loss = 1.76291\nI1206 10:01:47.772555 23310 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1206 10:01:47.772572 23310 solver.cpp:244]     Train net output #1: loss = 1.76291 (* 1 = 1.76291 loss)\nI1206 10:01:47.904585 23310 sgd_solver.cpp:166] Iteration 500, lr = 0.0749999\nI1206 10:08:37.794746 23310 solver.cpp:337] Iteration 600, Testing net (#0)\nI1206 10:11:18.182226 23310 solver.cpp:404]     Test net output #0: accuracy = 0.37053\nI1206 10:11:18.182484 23310 solver.cpp:404]     Test net output #1: loss = 1.74372 (* 1 = 1.74372 loss)\nI1206 10:11:22.199300 23310 solver.cpp:228] Iteration 600, loss = 1.71158\nI1206 10:11:22.199335 23310 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1206 10:11:22.199350 23310 solver.cpp:244]     Train net output #1: loss = 1.71158 (* 1 = 1.71158 loss)\nI1206 10:11:22.332166 23310 sgd_solver.cpp:166] Iteration 600, lr = 0.0899999\nI1206 10:18:12.181663 23310 solver.cpp:337] Iteration 700, Testing net (#0)\nI1206 10:20:52.489222 23310 solver.cpp:404]     Test net output #0: accuracy = 0.394353\nI1206 10:20:52.489480 23310 solver.cpp:404]     Test net output #1: loss = 1.67234 (* 1 = 1.67234 loss)\nI1206 10:20:56.507510 23310 solver.cpp:228] Iteration 700, loss = 1.61555\nI1206 10:20:56.507544 23310 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1206 10:20:56.507560 23310 solver.cpp:244]     Train net output #1: loss = 1.61555 (* 1 = 1.61555 loss)\nI1206 10:20:56.642199 23310 sgd_solver.cpp:166] Iteration 700, lr = 0.105\nI1206 10:27:46.477321 23310 solver.cpp:337] Iteration 800, Testing net (#0)\nI1206 10:30:26.969095 23310 solver.cpp:404]     Test net output #0: accuracy = 0.410589\nI1206 10:30:26.969353 23310 solver.cpp:404]     Test net output #1: loss = 1.65722 (* 1 = 1.65722 loss)\nI1206 10:30:30.987207 23310 solver.cpp:228] Iteration 800, loss = 1.80771\nI1206 10:30:30.987242 23310 solver.cpp:244]     Train net output #0: accuracy = 0.329412\nI1206 10:30:30.987258 23310 solver.cpp:244]     Train net output #1: loss = 1.80771 (* 1 = 1.80771 loss)\nI1206 10:30:31.112915 23310 sgd_solver.cpp:166] Iteration 800, lr = 0.12\nI1206 10:37:21.199159 23310 solver.cpp:337] Iteration 900, Testing net (#0)\nI1206 10:39:59.429239 23310 solver.cpp:404]     Test net output #0: accuracy = 0.406942\nI1206 10:39:59.429499 23310 solver.cpp:404]     Test net output #1: loss = 1.65115 (* 1 = 1.65115 loss)\nI1206 10:40:03.362036 23310 solver.cpp:228] Iteration 900, loss = 1.62681\nI1206 10:40:03.362072 23310 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1206 10:40:03.362089 23310 solver.cpp:244]     Train net output #1: loss = 1.62681 (* 1 = 1.62681 loss)\nI1206 10:40:03.577226 23310 sgd_solver.cpp:166] Iteration 900, lr = 0.135\nI1206 10:46:53.145859 23310 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1206 10:49:31.321104 23310 solver.cpp:404]     Test net output #0: accuracy = 0.408942\nI1206 10:49:31.321342 23310 solver.cpp:404]     Test net output #1: loss = 1.66967 (* 1 = 1.66967 loss)\nI1206 10:49:35.254622 23310 solver.cpp:228] Iteration 1000, loss = 1.76952\nI1206 10:49:35.254662 23310 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1206 10:49:35.254678 23310 solver.cpp:244]     Train net output #1: loss = 1.76952 (* 1 = 1.76952 loss)\nI1206 10:49:35.468070 23310 sgd_solver.cpp:166] Iteration 1000, lr = 0.15\nI1206 10:56:25.325960 23310 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1206 10:59:03.589288 23310 solver.cpp:404]     Test net output #0: accuracy = 0.428706\nI1206 10:59:03.589545 23310 solver.cpp:404]     Test net output #1: loss = 1.61154 (* 1 = 1.61154 loss)\nI1206 10:59:07.522194 23310 solver.cpp:228] Iteration 1100, loss = 1.67737\nI1206 10:59:07.522231 23310 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1206 10:59:07.522248 23310 solver.cpp:244]     Train net output #1: loss = 1.67737 (* 1 = 1.67737 loss)\nI1206 10:59:07.736239 23310 sgd_solver.cpp:166] Iteration 1100, lr = 0.165\nI1206 11:05:57.373189 23310 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1206 11:08:35.588709 23310 solver.cpp:404]     Test net output #0: accuracy = 0.399589\nI1206 11:08:35.588977 23310 solver.cpp:404]     Test net output #1: loss = 1.67941 (* 1 = 1.67941 loss)\nI1206 11:08:39.520804 23310 solver.cpp:228] Iteration 1200, loss = 1.71321\nI1206 11:08:39.520841 23310 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1206 11:08:39.520858 23310 solver.cpp:244]     Train net output #1: loss = 1.71321 (* 1 = 1.71321 loss)\nI1206 11:08:39.735740 23310 sgd_solver.cpp:166] Iteration 1200, lr = 0.18\nI1206 11:15:29.851446 23310 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1206 11:18:08.033895 23310 solver.cpp:404]     Test net output #0: accuracy = 0.416706\nI1206 11:18:08.034152 23310 solver.cpp:404]     Test net output #1: loss = 1.62139 (* 1 = 1.62139 loss)\nI1206 11:18:11.967686 23310 solver.cpp:228] Iteration 1300, loss = 1.44524\nI1206 11:18:11.967723 23310 solver.cpp:244]     Train net output #0: accuracy = 0.494118\nI1206 11:18:11.967739 23310 solver.cpp:244]     Train net output #1: loss = 1.44524 (* 1 = 1.44524 loss)\nI1206 11:18:12.182899 23310 sgd_solver.cpp:166] Iteration 1300, lr = 0.195\nI1206 11:25:02.131934 23310 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1206 11:27:40.263743 23310 solver.cpp:404]     Test net output #0: accuracy = 0.433\nI1206 11:27:40.264070 23310 solver.cpp:404]     Test net output #1: loss = 1.57742 (* 1 = 1.57742 loss)\nI1206 11:27:44.196873 23310 solver.cpp:228] Iteration 1400, loss = 1.36354\nI1206 11:27:44.196909 23310 solver.cpp:244]     Train net output #0: accuracy = 0.505882\nI1206 11:27:44.196925 23310 solver.cpp:244]     Train net output #1: loss = 1.36354 (* 1 = 1.36354 loss)\nI1206 11:27:44.411334 23310 sgd_solver.cpp:166] Iteration 1400, lr = 0.21\nI1206 11:34:34.283516 23310 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1206 11:37:12.385903 23310 solver.cpp:404]     Test net output #0: accuracy = 0.422353\nI1206 11:37:12.386163 23310 solver.cpp:404]     Test net output #1: loss = 1.60763 (* 1 = 1.60763 loss)\nI1206 11:37:16.320169 23310 solver.cpp:228] Iteration 1500, loss = 1.48771\nI1206 11:37:16.320207 23310 solver.cpp:244]     Train net output #0: accuracy = 0.494118\nI1206 11:37:16.320224 23310 solver.cpp:244]     Train net output #1: loss = 1.48771 (* 1 = 1.48771 loss)\nI1206 11:37:16.537487 23310 sgd_solver.cpp:166] Iteration 1500, lr = 0.225\nI1206 11:44:06.376775 23310 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1206 11:46:44.504190 23310 solver.cpp:404]     Test net output #0: accuracy = 0.423765\nI1206 11:46:44.504449 23310 solver.cpp:404]     Test net output #1: loss = 1.61265 (* 1 = 1.61265 loss)\nI1206 11:46:48.435695 23310 solver.cpp:228] Iteration 1600, loss = 1.7738\nI1206 11:46:48.435734 23310 solver.cpp:244]     Train net output #0: accuracy = 0.364706\nI1206 11:46:48.435748 23310 solver.cpp:244]     Train net output #1: loss = 1.7738 (* 1 = 1.7738 loss)\nI1206 11:46:48.703994 23310 sgd_solver.cpp:166] Iteration 1600, lr = 0.24\nI1206 11:53:38.955339 23310 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1206 11:56:17.029284 23310 solver.cpp:404]     Test net output #0: accuracy = 0.456824\nI1206 11:56:17.029541 23310 solver.cpp:404]     Test net output #1: loss = 1.51931 (* 1 = 1.51931 loss)\nI1206 11:56:20.962296 23310 solver.cpp:228] Iteration 1700, loss = 1.45283\nI1206 11:56:20.962332 23310 solver.cpp:244]     Train net output #0: accuracy = 0.447059\nI1206 11:56:20.962347 23310 solver.cpp:244]     Train net output #1: loss = 1.45283 (* 1 = 1.45283 loss)\nI1206 11:56:21.182821 23310 sgd_solver.cpp:166] Iteration 1700, lr = 0.255\nI1206 12:03:11.460451 23310 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1206 12:05:49.463974 23310 solver.cpp:404]     Test net output #0: accuracy = 0.449706\nI1206 12:05:49.464237 23310 solver.cpp:404]     Test net output #1: loss = 1.56324 (* 1 = 1.56324 loss)\nI1206 12:05:53.396951 23310 solver.cpp:228] Iteration 1800, loss = 1.27782\nI1206 12:05:53.396987 23310 solver.cpp:244]     Train net output #0: accuracy = 0.541176\nI1206 12:05:53.397003 23310 solver.cpp:244]     Train net output #1: loss = 1.27782 (* 1 = 1.27782 loss)\nI1206 12:05:53.613664 23310 sgd_solver.cpp:166] Iteration 1800, lr = 0.27\nI1206 12:12:43.544265 23310 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1206 12:15:21.726475 23310 solver.cpp:404]     Test net output #0: accuracy = 0.42153\nI1206 12:15:21.726742 23310 solver.cpp:404]     Test net output #1: loss = 1.60843 (* 1 = 1.60843 loss)\nI1206 12:15:25.659217 23310 solver.cpp:228] Iteration 1900, loss = 1.70596\nI1206 12:15:25.659252 23310 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1206 12:15:25.659268 23310 solver.cpp:244]     Train net output #1: loss = 1.70596 (* 1 = 1.70596 loss)\nI1206 12:15:25.885033 23310 sgd_solver.cpp:166] Iteration 1900, lr = 0.285\nI1206 12:22:15.961912 23310 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1206 12:24:54.182489 23310 solver.cpp:404]     Test net output #0: accuracy = 0.437412\nI1206 12:24:54.182752 23310 solver.cpp:404]     Test net output #1: loss = 1.56113 (* 1 = 1.56113 loss)\nI1206 12:24:58.114825 23310 solver.cpp:228] Iteration 2000, loss = 1.5938\nI1206 12:24:58.114862 23310 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1206 12:24:58.114878 23310 solver.cpp:244]     Train net output #1: loss = 1.5938 (* 1 = 1.5938 loss)\nI1206 12:24:58.361667 23310 sgd_solver.cpp:166] Iteration 2000, lr = 0.3\nI1206 12:31:48.298454 23310 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1206 12:34:26.422979 23310 solver.cpp:404]     Test net output #0: accuracy = 0.417353\nI1206 12:34:26.423256 23310 solver.cpp:404]     Test net output #1: loss = 1.5912 (* 1 = 1.5912 loss)\nI1206 12:34:30.355875 23310 solver.cpp:228] Iteration 2100, loss = 1.47072\nI1206 12:34:30.355914 23310 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 12:34:30.355931 23310 solver.cpp:244]     Train net output #1: loss = 1.47072 (* 1 = 1.47072 loss)\nI1206 12:34:30.570778 23310 sgd_solver.cpp:166] Iteration 2100, lr = 0.315\nI1206 12:41:20.583590 23310 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1206 12:43:58.701573 23310 solver.cpp:404]     Test net output #0: accuracy = 0.446471\nI1206 12:43:58.701828 23310 solver.cpp:404]     Test net output #1: loss = 1.53265 (* 1 = 1.53265 loss)\nI1206 12:44:02.636821 23310 solver.cpp:228] Iteration 2200, loss = 1.51192\nI1206 12:44:02.636857 23310 solver.cpp:244]     Train net output #0: accuracy = 0.388235\nI1206 12:44:02.636874 23310 solver.cpp:244]     Train net output #1: loss = 1.51192 (* 1 = 1.51192 loss)\nI1206 12:44:02.855737 23310 sgd_solver.cpp:166] Iteration 2200, lr = 0.33\nI1206 12:50:53.129973 23310 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1206 12:53:31.360461 23310 solver.cpp:404]     Test net output #0: accuracy = 0.440412\nI1206 12:53:31.360723 23310 solver.cpp:404]     Test net output #1: loss = 1.55425 (* 1 = 1.55425 loss)\nI1206 12:53:35.293730 23310 solver.cpp:228] Iteration 2300, loss = 1.55433\nI1206 12:53:35.293766 23310 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1206 12:53:35.293783 23310 solver.cpp:244]     Train net output #1: loss = 1.55433 (* 1 = 1.55433 loss)\nI1206 12:53:35.512909 23310 sgd_solver.cpp:166] Iteration 2300, lr = 0.345\nI1206 13:00:25.582134 23310 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1206 13:03:03.840607 23310 solver.cpp:404]     Test net output #0: accuracy = 0.447883\nI1206 13:03:03.840862 23310 solver.cpp:404]     Test net output #1: loss = 1.49317 (* 1 = 1.49317 loss)\nI1206 13:03:07.773851 23310 solver.cpp:228] Iteration 2400, loss = 1.30561\nI1206 13:03:07.773887 23310 solver.cpp:244]     Train net output #0: accuracy = 0.6\nI1206 13:03:07.773903 23310 solver.cpp:244]     Train net output #1: loss = 1.30561 (* 1 = 1.30561 loss)\nI1206 13:03:07.994338 23310 sgd_solver.cpp:166] Iteration 2400, lr = 0.36\nI1206 13:09:58.118857 23310 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1206 13:12:36.226186 23310 solver.cpp:404]     Test net output #0: accuracy = 0.270294\nI1206 13:12:36.226431 23310 solver.cpp:404]     Test net output #1: loss = 2.19817 (* 1 = 2.19817 loss)\nI1206 13:12:40.160233 23310 solver.cpp:228] Iteration 2500, loss = 2.11627\nI1206 13:12:40.160269 23310 solver.cpp:244]     Train net output #0: accuracy = 0.305882\nI1206 13:12:40.160285 23310 solver.cpp:244]     Train net output #1: loss = 2.11627 (* 1 = 2.11627 loss)\nI1206 13:12:40.377547 23310 sgd_solver.cpp:166] Iteration 2500, lr = 0.375\nI1206 13:19:30.444991 23310 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1206 13:22:08.701645 23310 solver.cpp:404]     Test net output #0: accuracy = 0.38653\nI1206 13:22:08.701892 23310 solver.cpp:404]     Test net output #1: loss = 1.6688 (* 1 = 1.6688 loss)\nI1206 13:22:12.634651 23310 solver.cpp:228] Iteration 2600, loss = 1.73141\nI1206 13:22:12.634690 23310 solver.cpp:244]     Train net output #0: accuracy = 0.317647\nI1206 13:22:12.634706 23310 solver.cpp:244]     Train net output #1: loss = 1.73141 (* 1 = 1.73141 loss)\nI1206 13:22:12.851933 23310 sgd_solver.cpp:166] Iteration 2600, lr = 0.39\nI1206 13:29:03.248239 23310 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1206 13:31:41.477847 23310 solver.cpp:404]     Test net output #0: accuracy = 0.462177\nI1206 13:31:41.478108 23310 solver.cpp:404]     Test net output #1: loss = 1.47115 (* 1 = 1.47115 loss)\nI1206 13:31:45.411098 23310 solver.cpp:228] Iteration 2700, loss = 1.39906\nI1206 13:31:45.411134 23310 solver.cpp:244]     Train net output #0: accuracy = 0.552941\nI1206 13:31:45.411150 23310 solver.cpp:244]     Train net output #1: loss = 1.39906 (* 1 = 1.39906 loss)\nI1206 13:31:45.630568 23310 sgd_solver.cpp:166] Iteration 2700, lr = 0.405\nI1206 13:38:35.981238 23310 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1206 13:41:14.255918 23310 solver.cpp:404]     Test net output #0: accuracy = 0.445706\nI1206 13:41:14.256178 23310 solver.cpp:404]     Test net output #1: loss = 1.53487 (* 1 = 1.53487 loss)\nI1206 13:41:18.187652 23310 solver.cpp:228] Iteration 2800, loss = 1.35612\nI1206 13:41:18.187690 23310 solver.cpp:244]     Train net output #0: accuracy = 0.529412\nI1206 13:41:18.187706 23310 solver.cpp:244]     Train net output #1: loss = 1.35612 (* 1 = 1.35612 loss)\nI1206 13:41:18.411931 23310 sgd_solver.cpp:166] Iteration 2800, lr = 0.42\nI1206 13:48:08.798988 23310 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1206 13:50:47.088207 23310 solver.cpp:404]     Test net output #0: accuracy = 0.409941\nI1206 13:50:47.088462 23310 solver.cpp:404]     Test net output #1: loss = 1.67005 (* 1 = 1.67005 loss)\nI1206 13:50:51.021644 23310 solver.cpp:228] Iteration 2900, loss = 1.64542\nI1206 13:50:51.021682 23310 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 13:50:51.021697 23310 solver.cpp:244]     Train net output #1: loss = 1.64542 (* 1 = 1.64542 loss)\nI1206 13:50:51.236130 23310 sgd_solver.cpp:166] Iteration 2900, lr = 0.435\nI1206 13:57:41.421059 23310 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1206 14:00:19.646350 23310 solver.cpp:404]     Test net output #0: accuracy = 0.437824\nI1206 14:00:19.646613 23310 solver.cpp:404]     Test net output #1: loss = 1.54288 (* 1 = 1.54288 loss)\nI1206 14:00:23.579325 23310 solver.cpp:228] Iteration 3000, loss = 1.51678\nI1206 14:00:23.579360 23310 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 14:00:23.579375 23310 solver.cpp:244]     Train net output #1: loss = 1.51678 (* 1 = 1.51678 loss)\nI1206 14:00:23.798538 23310 sgd_solver.cpp:166] Iteration 3000, lr = 0.45\nI1206 14:07:13.814028 23310 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1206 14:09:52.072927 23310 solver.cpp:404]     Test net output #0: accuracy = 0.432295\nI1206 14:09:52.073177 23310 solver.cpp:404]     Test net output #1: loss = 1.55787 (* 1 = 1.55787 loss)\nI1206 14:09:56.006049 23310 solver.cpp:228] Iteration 3100, loss = 1.50654\nI1206 14:09:56.006084 23310 solver.cpp:244]     Train net output #0: accuracy = 0.458824\nI1206 14:09:56.006099 23310 solver.cpp:244]     Train net output #1: loss = 1.50654 (* 1 = 1.50654 loss)\nI1206 14:09:56.223448 23310 sgd_solver.cpp:166] Iteration 3100, lr = 0.465\nI1206 14:16:46.000380 23310 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1206 14:19:24.139415 23310 solver.cpp:404]     Test net output #0: accuracy = 0.44353\nI1206 14:19:24.139672 23310 solver.cpp:404]     Test net output #1: loss = 1.5185 (* 1 = 1.5185 loss)\nI1206 14:19:28.072521 23310 solver.cpp:228] Iteration 3200, loss = 1.48311\nI1206 14:19:28.072557 23310 solver.cpp:244]     Train net output #0: accuracy = 0.494118\nI1206 14:19:28.072572 23310 solver.cpp:244]     Train net output #1: loss = 1.48311 (* 1 = 1.48311 loss)\nI1206 14:19:28.285997 23310 sgd_solver.cpp:166] Iteration 3200, lr = 0.48\nI1206 14:26:18.516283 23310 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1206 14:28:53.373589 23310 solver.cpp:404]     Test net output #0: accuracy = 0.364118\nI1206 14:28:53.373798 23310 solver.cpp:404]     Test net output #1: loss = 1.83509 (* 1 = 1.83509 loss)\nI1206 14:28:57.315704 23310 solver.cpp:228] Iteration 3300, loss = 2.05686\nI1206 14:28:57.315752 23310 solver.cpp:244]     Train net output #0: accuracy = 0.305882\nI1206 14:28:57.315769 23310 solver.cpp:244]     Train net output #1: loss = 2.05686 (* 1 = 2.05686 loss)\nI1206 14:28:57.533975 23310 sgd_solver.cpp:166] Iteration 3300, lr = 0.495\nI1206 14:35:48.361781 23310 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1206 14:38:23.022145 23310 solver.cpp:404]     Test net output #0: accuracy = 0.407236\nI1206 14:38:23.022366 23310 solver.cpp:404]     Test net output #1: loss = 1.62168 (* 1 = 1.62168 loss)\nI1206 14:38:26.963554 23310 solver.cpp:228] Iteration 3400, loss = 1.52623\nI1206 14:38:26.963599 23310 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 14:38:26.963616 23310 solver.cpp:244]     Train net output #1: loss = 1.52623 (* 1 = 1.52623 loss)\nI1206 14:38:27.186440 23310 sgd_solver.cpp:166] Iteration 3400, lr = 0.51\nI1206 14:45:18.155062 23310 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1206 14:47:52.986445 23310 solver.cpp:404]     Test net output #0: accuracy = 0.449412\nI1206 14:47:52.986637 23310 solver.cpp:404]     Test net output #1: loss = 1.53778 (* 1 = 1.53778 loss)\nI1206 14:47:56.929623 23310 solver.cpp:228] Iteration 3500, loss = 1.62728\nI1206 14:47:56.929673 23310 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 14:47:56.929690 23310 solver.cpp:244]     Train net output #1: loss = 1.62728 (* 1 = 1.62728 loss)\nI1206 14:47:57.156200 23310 sgd_solver.cpp:166] Iteration 3500, lr = 0.525\nI1206 14:54:48.006916 23310 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1206 14:57:22.832312 23310 solver.cpp:404]     Test net output #0: accuracy = 0.301412\nI1206 14:57:22.832509 23310 solver.cpp:404]     Test net output #1: loss = 2.14323 (* 1 = 2.14323 loss)\nI1206 14:57:26.773655 23310 solver.cpp:228] Iteration 3600, loss = 2.03959\nI1206 14:57:26.773705 23310 solver.cpp:244]     Train net output #0: accuracy = 0.376471\nI1206 14:57:26.773722 23310 solver.cpp:244]     Train net output #1: loss = 2.03959 (* 1 = 2.03959 loss)\nI1206 14:57:26.994880 23310 sgd_solver.cpp:166] Iteration 3600, lr = 0.54\nI1206 15:04:18.079506 23310 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1206 15:06:52.724658 23310 solver.cpp:404]     Test net output #0: accuracy = 0.413059\nI1206 15:06:52.724889 23310 solver.cpp:404]     Test net output #1: loss = 1.62047 (* 1 = 1.62047 loss)\nI1206 15:06:56.665849 23310 solver.cpp:228] Iteration 3700, loss = 1.60464\nI1206 15:06:56.665900 23310 solver.cpp:244]     Train net output #0: accuracy = 0.411765\nI1206 15:06:56.665917 23310 solver.cpp:244]     Train net output #1: loss = 1.60464 (* 1 = 1.60464 loss)\nI1206 15:06:56.892478 23310 sgd_solver.cpp:166] Iteration 3700, lr = 0.555\nI1206 15:13:13.542734 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13019 > 5) by scale factor 0.974623\nI1206 15:13:46.684336 23310 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1206 15:16:24.804406 23310 solver.cpp:404]     Test net output #0: accuracy = 0.168647\nI1206 15:16:24.804664 23310 solver.cpp:404]     Test net output #1: loss = 4.84322 (* 1 = 4.84322 loss)\nI1206 15:16:28.741173 23310 solver.cpp:228] Iteration 3800, loss = 4.90708\nI1206 15:16:28.741206 23310 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 15:16:28.741222 23310 solver.cpp:244]     Train net output #1: loss = 4.90708 (* 1 = 4.90708 loss)\nI1206 15:16:28.953944 23310 sgd_solver.cpp:166] Iteration 3800, lr = 0.57\nI1206 15:23:18.703142 23310 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1206 15:25:56.823334 23310 solver.cpp:404]     Test net output #0: accuracy = 0.155941\nI1206 15:25:56.823590 23310 solver.cpp:404]     Test net output #1: loss = 4.88429 (* 1 = 4.88429 loss)\nI1206 15:26:00.757467 23310 solver.cpp:228] Iteration 3900, loss = 4.78868\nI1206 15:26:00.757500 23310 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 15:26:00.757515 23310 solver.cpp:244]     Train net output #1: loss = 4.78868 (* 1 = 4.78868 loss)\nI1206 15:26:00.975317 23310 sgd_solver.cpp:166] Iteration 3900, lr = 0.585\nI1206 15:32:51.203085 23310 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1206 15:35:29.406251 23310 solver.cpp:404]     Test net output #0: accuracy = 0.241059\nI1206 15:35:29.406513 23310 solver.cpp:404]     Test net output #1: loss = 2.15562 (* 1 = 2.15562 loss)\nI1206 15:35:33.341258 23310 solver.cpp:228] Iteration 4000, loss = 2.16009\nI1206 15:35:33.341292 23310 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 15:35:33.341308 23310 solver.cpp:244]     Train net output #1: loss = 2.16009 (* 1 = 2.16009 loss)\nI1206 15:35:33.552366 23310 sgd_solver.cpp:166] Iteration 4000, lr = 0.6\nI1206 15:42:23.033519 23310 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1206 15:45:01.314066 23310 solver.cpp:404]     Test net output #0: accuracy = 0.277412\nI1206 15:45:01.314304 23310 solver.cpp:404]     Test net output #1: loss = 3.13729 (* 1 = 3.13729 loss)\nI1206 15:45:05.249330 23310 solver.cpp:228] Iteration 4100, loss = 3.59157\nI1206 15:45:05.249366 23310 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1206 15:45:05.249382 23310 solver.cpp:244]     Train net output #1: loss = 3.59157 (* 1 = 3.59157 loss)\nI1206 15:45:05.459825 23310 sgd_solver.cpp:166] Iteration 4100, lr = 0.615\nI1206 15:51:55.757102 23310 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1206 15:54:33.941248 23310 solver.cpp:404]     Test net output #0: accuracy = 0.269412\nI1206 15:54:33.941514 23310 solver.cpp:404]     Test net output #1: loss = 2.19582 (* 1 = 2.19582 loss)\nI1206 15:54:37.875318 23310 solver.cpp:228] Iteration 4200, loss = 2.2215\nI1206 15:54:37.875355 23310 solver.cpp:244]     Train net output #0: accuracy = 0.294118\nI1206 15:54:37.875371 23310 solver.cpp:244]     Train net output #1: loss = 2.2215 (* 1 = 2.2215 loss)\nI1206 15:54:38.089180 23310 sgd_solver.cpp:166] Iteration 4200, lr = 0.63\nI1206 16:01:28.226025 23310 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1206 16:04:03.835705 23310 solver.cpp:404]     Test net output #0: accuracy = 0.311117\nI1206 16:04:03.835958 23310 solver.cpp:404]     Test net output #1: loss = 2.081 (* 1 = 2.081 loss)\nI1206 16:04:07.792467 23310 solver.cpp:228] Iteration 4300, loss = 1.73478\nI1206 16:04:07.792513 23310 solver.cpp:244]     Train net output #0: accuracy = 0.423529\nI1206 16:04:07.792538 23310 solver.cpp:244]     Train net output #1: loss = 1.73478 (* 1 = 1.73478 loss)\nI1206 16:04:08.002029 23310 sgd_solver.cpp:166] Iteration 4300, lr = 0.645\nI1206 16:10:58.745477 23310 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1206 16:13:33.428211 23310 solver.cpp:404]     Test net output #0: accuracy = 0.147765\nI1206 16:13:33.428458 23310 solver.cpp:404]     Test net output #1: loss = 7.4093 (* 1 = 7.4093 loss)\nI1206 16:13:37.372187 23310 solver.cpp:228] Iteration 4400, loss = 7.33089\nI1206 16:13:37.372237 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1206 16:13:37.372264 23310 solver.cpp:244]     Train net output #1: loss = 7.33089 (* 1 = 7.33089 loss)\nI1206 16:13:37.589203 23310 sgd_solver.cpp:166] Iteration 4400, lr = 0.66\nI1206 16:20:28.572064 23310 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1206 16:23:03.503103 23310 solver.cpp:404]     Test net output #0: accuracy = 0.223059\nI1206 16:23:03.503362 23310 solver.cpp:404]     Test net output #1: loss = 3.3553 (* 1 = 3.3553 loss)\nI1206 16:23:07.478328 23310 solver.cpp:228] Iteration 4500, loss = 3.97826\nI1206 16:23:07.478374 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 16:23:07.478397 23310 solver.cpp:244]     Train net output #1: loss = 3.97826 (* 1 = 3.97826 loss)\nI1206 16:23:07.662673 23310 sgd_solver.cpp:166] Iteration 4500, lr = 0.675\nI1206 16:29:58.740150 23310 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1206 16:32:33.525529 23310 solver.cpp:404]     Test net output #0: accuracy = 0.261529\nI1206 16:32:33.525775 23310 solver.cpp:404]     Test net output #1: loss = 3.08125 (* 1 = 3.08125 loss)\nI1206 16:32:37.486225 23310 solver.cpp:228] Iteration 4600, loss = 2.68017\nI1206 16:32:37.486273 23310 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1206 16:32:37.486297 23310 solver.cpp:244]     Train net output #1: loss = 2.68017 (* 1 = 2.68017 loss)\nI1206 16:32:37.692560 23310 sgd_solver.cpp:166] Iteration 4600, lr = 0.69\nI1206 16:39:28.781376 23310 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1206 16:42:03.740628 23310 solver.cpp:404]     Test net output #0: accuracy = 0.234941\nI1206 16:42:03.740859 23310 solver.cpp:404]     Test net output #1: loss = 3.72081 (* 1 = 3.72081 loss)\nI1206 16:42:07.682611 23310 solver.cpp:228] Iteration 4700, loss = 4.17606\nI1206 16:42:07.682662 23310 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 16:42:07.682687 23310 solver.cpp:244]     Train net output #1: loss = 4.17606 (* 1 = 4.17606 loss)\nI1206 16:42:07.903743 23310 sgd_solver.cpp:166] Iteration 4700, lr = 0.705\nI1206 16:48:58.894748 23310 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1206 16:51:33.855165 23310 solver.cpp:404]     Test net output #0: accuracy = 0.237294\nI1206 16:51:33.855401 23310 solver.cpp:404]     Test net output #1: loss = 3.10141 (* 1 = 3.10141 loss)\nI1206 16:51:37.798635 23310 solver.cpp:228] Iteration 4800, loss = 2.9778\nI1206 16:51:37.798686 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 16:51:37.798712 23310 solver.cpp:244]     Train net output #1: loss = 2.9778 (* 1 = 2.9778 loss)\nI1206 16:51:38.020380 23310 sgd_solver.cpp:166] Iteration 4800, lr = 0.72\nI1206 16:58:28.963491 23310 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1206 17:01:03.852417 23310 solver.cpp:404]     Test net output #0: accuracy = 0.18653\nI1206 17:01:03.852658 23310 solver.cpp:404]     Test net output #1: loss = 5.04686 (* 1 = 5.04686 loss)\nI1206 17:01:07.794661 23310 solver.cpp:228] Iteration 4900, loss = 5.33327\nI1206 17:01:07.794714 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 17:01:07.794739 23310 solver.cpp:244]     Train net output #1: loss = 5.33327 (* 1 = 5.33327 loss)\nI1206 17:01:08.014678 23310 sgd_solver.cpp:166] Iteration 4900, lr = 0.735\nI1206 17:07:59.007175 23310 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1206 17:10:33.881151 23310 solver.cpp:404]     Test net output #0: accuracy = 0.284706\nI1206 17:10:33.881372 23310 solver.cpp:404]     Test net output #1: loss = 2.0421 (* 1 = 2.0421 loss)\nI1206 17:10:37.823036 23310 solver.cpp:228] Iteration 5000, loss = 2.05709\nI1206 17:10:37.823076 23310 solver.cpp:244]     Train net output #0: accuracy = 0.341176\nI1206 17:10:37.823091 23310 solver.cpp:244]     Train net output #1: loss = 2.05709 (* 1 = 2.05709 loss)\nI1206 17:10:38.043732 23310 sgd_solver.cpp:166] Iteration 5000, lr = 0.75\nI1206 17:17:29.018736 23310 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1206 17:20:03.890704 23310 solver.cpp:404]     Test net output #0: accuracy = 0.234294\nI1206 17:20:03.890930 23310 solver.cpp:404]     Test net output #1: loss = 4.76645 (* 1 = 4.76645 loss)\nI1206 17:20:07.833477 23310 solver.cpp:228] Iteration 5100, loss = 5.98568\nI1206 17:20:07.833518 23310 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 17:20:07.833534 23310 solver.cpp:244]     Train net output #1: loss = 5.98568 (* 1 = 5.98568 loss)\nI1206 17:20:08.052951 23310 sgd_solver.cpp:166] Iteration 5100, lr = 0.765\nI1206 17:26:59.270740 23310 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1206 17:29:34.044343 23310 solver.cpp:404]     Test net output #0: accuracy = 0.208294\nI1206 17:29:34.044540 23310 solver.cpp:404]     Test net output #1: loss = 5.88354 (* 1 = 5.88354 loss)\nI1206 17:29:37.987782 23310 solver.cpp:228] Iteration 5200, loss = 6.69097\nI1206 17:29:37.987824 23310 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 17:29:37.987839 23310 solver.cpp:244]     Train net output #1: loss = 6.69097 (* 1 = 6.69097 loss)\nI1206 17:29:38.209981 23310 sgd_solver.cpp:166] Iteration 5200, lr = 0.78\nI1206 17:36:28.908128 23310 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1206 17:39:07.003430 23310 solver.cpp:404]     Test net output #0: accuracy = 0.153765\nI1206 17:39:07.003697 23310 solver.cpp:404]     Test net output #1: loss = 7.31739 (* 1 = 7.31739 loss)\nI1206 17:39:10.937674 23310 solver.cpp:228] Iteration 5300, loss = 7.98248\nI1206 17:39:10.937712 23310 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1206 17:39:10.937731 23310 solver.cpp:244]     Train net output #1: loss = 7.98248 (* 1 = 7.98248 loss)\nI1206 17:39:11.150015 23310 sgd_solver.cpp:166] Iteration 5300, lr = 0.795\nI1206 17:40:21.614645 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.08998 > 5) by scale factor 0.982323\nI1206 17:46:01.150759 23310 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1206 17:48:39.324812 23310 solver.cpp:404]     Test net output #0: accuracy = 0.220294\nI1206 17:48:39.325057 23310 solver.cpp:404]     Test net output #1: loss = 4.5609 (* 1 = 4.5609 loss)\nI1206 17:48:43.258021 23310 solver.cpp:228] Iteration 5400, loss = 4.45029\nI1206 17:48:43.258064 23310 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 17:48:43.258081 23310 solver.cpp:244]     Train net output #1: loss = 4.45029 (* 1 = 4.45029 loss)\nI1206 17:48:43.477560 23310 sgd_solver.cpp:166] Iteration 5400, lr = 0.81\nI1206 17:55:33.886912 23310 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1206 17:58:12.098623 23310 solver.cpp:404]     Test net output #0: accuracy = 0.165176\nI1206 17:58:12.098888 23310 solver.cpp:404]     Test net output #1: loss = 4.8267 (* 1 = 4.8267 loss)\nI1206 17:58:16.031939 23310 solver.cpp:228] Iteration 5500, loss = 4.80606\nI1206 17:58:16.031980 23310 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 17:58:16.031996 23310 solver.cpp:244]     Train net output #1: loss = 4.80606 (* 1 = 4.80606 loss)\nI1206 17:58:16.246440 23310 sgd_solver.cpp:166] Iteration 5500, lr = 0.825\nI1206 18:05:06.199750 23310 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1206 18:07:44.428341 23310 solver.cpp:404]     Test net output #0: accuracy = 0.195883\nI1206 18:07:44.428602 23310 solver.cpp:404]     Test net output #1: loss = 4.5371 (* 1 = 4.5371 loss)\nI1206 18:07:48.362177 23310 solver.cpp:228] Iteration 5600, loss = 4.15177\nI1206 18:07:48.362220 23310 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1206 18:07:48.362234 23310 solver.cpp:244]     Train net output #1: loss = 4.15177 (* 1 = 4.15177 loss)\nI1206 18:07:48.578151 23310 sgd_solver.cpp:166] Iteration 5600, lr = 0.84\nI1206 18:14:38.806980 23310 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1206 18:17:16.962507 23310 solver.cpp:404]     Test net output #0: accuracy = 0.204824\nI1206 18:17:16.962782 23310 solver.cpp:404]     Test net output #1: loss = 6.3478 (* 1 = 6.3478 loss)\nI1206 18:17:20.895701 23310 solver.cpp:228] Iteration 5700, loss = 6.85015\nI1206 18:17:20.895745 23310 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1206 18:17:20.895766 23310 solver.cpp:244]     Train net output #1: loss = 6.85015 (* 1 = 6.85015 loss)\nI1206 18:17:21.112793 23310 sgd_solver.cpp:166] Iteration 5700, lr = 0.855\nI1206 18:24:11.741611 23310 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1206 18:26:49.854077 23310 solver.cpp:404]     Test net output #0: accuracy = 0.237471\nI1206 18:26:49.854349 23310 solver.cpp:404]     Test net output #1: loss = 7.04074 (* 1 = 7.04074 loss)\nI1206 18:26:53.787704 23310 solver.cpp:228] Iteration 5800, loss = 7.28941\nI1206 18:26:53.787747 23310 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 18:26:53.787770 23310 solver.cpp:244]     Train net output #1: loss = 7.28941 (* 1 = 7.28941 loss)\nI1206 18:26:54.004771 23310 sgd_solver.cpp:166] Iteration 5800, lr = 0.87\nI1206 18:33:44.129056 23310 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1206 18:36:22.287111 23310 solver.cpp:404]     Test net output #0: accuracy = 0.237471\nI1206 18:36:22.287355 23310 solver.cpp:404]     Test net output #1: loss = 5.60798 (* 1 = 5.60798 loss)\nI1206 18:36:26.220999 23310 solver.cpp:228] Iteration 5900, loss = 4.97445\nI1206 18:36:26.221040 23310 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 18:36:26.221057 23310 solver.cpp:244]     Train net output #1: loss = 4.97445 (* 1 = 4.97445 loss)\nI1206 18:36:26.434674 23310 sgd_solver.cpp:166] Iteration 5900, lr = 0.885\nI1206 18:43:17.065870 23310 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1206 18:45:55.303519 23310 solver.cpp:404]     Test net output #0: accuracy = 0.219941\nI1206 18:45:55.303791 23310 solver.cpp:404]     Test net output #1: loss = 6.19818 (* 1 = 6.19818 loss)\nI1206 18:45:59.236531 23310 solver.cpp:228] Iteration 6000, loss = 6.73547\nI1206 18:45:59.236572 23310 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 18:45:59.236588 23310 solver.cpp:244]     Train net output #1: loss = 6.73547 (* 1 = 6.73547 loss)\nI1206 18:45:59.452134 23310 sgd_solver.cpp:166] Iteration 6000, lr = 0.9\nI1206 18:52:50.297894 23310 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1206 18:55:28.485728 23310 solver.cpp:404]     Test net output #0: accuracy = 0.222\nI1206 18:55:28.486001 23310 solver.cpp:404]     Test net output #1: loss = 7.24756 (* 1 = 7.24756 loss)\nI1206 18:55:32.426884 23310 solver.cpp:228] Iteration 6100, loss = 7.41372\nI1206 18:55:32.426924 23310 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 18:55:32.426940 23310 solver.cpp:244]     Train net output #1: loss = 7.41372 (* 1 = 7.41372 loss)\nI1206 18:55:32.635891 23310 sgd_solver.cpp:166] Iteration 6100, lr = 0.915\nI1206 19:02:22.716840 23310 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1206 19:05:00.833190 23310 solver.cpp:404]     Test net output #0: accuracy = 0.205118\nI1206 19:05:00.833446 23310 solver.cpp:404]     Test net output #1: loss = 6.36988 (* 1 = 6.36988 loss)\nI1206 19:05:04.766681 23310 solver.cpp:228] Iteration 6200, loss = 7.02007\nI1206 19:05:04.766719 23310 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 19:05:04.766736 23310 solver.cpp:244]     Train net output #1: loss = 7.02007 (* 1 = 7.02007 loss)\nI1206 19:05:04.977788 23310 sgd_solver.cpp:166] Iteration 6200, lr = 0.93\nI1206 19:11:55.292964 23310 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1206 19:14:33.433555 23310 solver.cpp:404]     Test net output #0: accuracy = 0.193588\nI1206 19:14:33.433816 23310 solver.cpp:404]     Test net output #1: loss = 3.35264 (* 1 = 3.35264 loss)\nI1206 19:14:37.366058 23310 solver.cpp:228] Iteration 6300, loss = 3.31069\nI1206 19:14:37.366096 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 19:14:37.366112 23310 solver.cpp:244]     Train net output #1: loss = 3.31069 (* 1 = 3.31069 loss)\nI1206 19:14:37.586343 23310 sgd_solver.cpp:166] Iteration 6300, lr = 0.945\nI1206 19:21:27.710475 23310 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1206 19:24:05.922052 23310 solver.cpp:404]     Test net output #0: accuracy = 0.204294\nI1206 19:24:05.922318 23310 solver.cpp:404]     Test net output #1: loss = 4.04557 (* 1 = 4.04557 loss)\nI1206 19:24:09.855859 23310 solver.cpp:228] Iteration 6400, loss = 3.83376\nI1206 19:24:09.855896 23310 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 19:24:09.855912 23310 solver.cpp:244]     Train net output #1: loss = 3.83376 (* 1 = 3.83376 loss)\nI1206 19:24:10.077476 23310 sgd_solver.cpp:166] Iteration 6400, lr = 0.96\nI1206 19:31:00.292589 23310 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1206 19:33:38.515631 23310 solver.cpp:404]     Test net output #0: accuracy = 0.251471\nI1206 19:33:38.515900 23310 solver.cpp:404]     Test net output #1: loss = 7.96434 (* 1 = 7.96434 loss)\nI1206 19:33:42.449609 23310 solver.cpp:228] Iteration 6500, loss = 10.2461\nI1206 19:33:42.449651 23310 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 19:33:42.449669 23310 solver.cpp:244]     Train net output #1: loss = 10.2461 (* 1 = 10.2461 loss)\nI1206 19:33:42.677381 23310 sgd_solver.cpp:166] Iteration 6500, lr = 0.975\nI1206 19:40:33.002430 23310 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1206 19:43:11.138265 23310 solver.cpp:404]     Test net output #0: accuracy = 0.192059\nI1206 19:43:11.138528 23310 solver.cpp:404]     Test net output #1: loss = 6.54691 (* 1 = 6.54691 loss)\nI1206 19:43:15.070544 23310 solver.cpp:228] Iteration 6600, loss = 8.25403\nI1206 19:43:15.070588 23310 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1206 19:43:15.070605 23310 solver.cpp:244]     Train net output #1: loss = 8.25403 (* 1 = 8.25403 loss)\nI1206 19:43:15.290834 23310 sgd_solver.cpp:166] Iteration 6600, lr = 0.99\nI1206 19:50:06.019444 23310 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1206 19:52:44.179672 23310 solver.cpp:404]     Test net output #0: accuracy = 0.196235\nI1206 19:52:44.179946 23310 solver.cpp:404]     Test net output #1: loss = 15.4373 (* 1 = 15.4373 loss)\nI1206 19:52:48.113044 23310 solver.cpp:228] Iteration 6700, loss = 14.0375\nI1206 19:52:48.113090 23310 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1206 19:52:48.113107 23310 solver.cpp:244]     Train net output #1: loss = 14.0375 (* 1 = 14.0375 loss)\nI1206 19:52:48.332016 23310 sgd_solver.cpp:166] Iteration 6700, lr = 1.005\nI1206 19:59:39.125216 23310 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1206 20:02:17.273495 23310 solver.cpp:404]     Test net output #0: accuracy = 0.224882\nI1206 20:02:17.273762 23310 solver.cpp:404]     Test net output #1: loss = 6.3611 (* 1 = 6.3611 loss)\nI1206 20:02:21.206212 23310 solver.cpp:228] Iteration 6800, loss = 6.0421\nI1206 20:02:21.206254 23310 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1206 20:02:21.206269 23310 solver.cpp:244]     Train net output #1: loss = 6.0421 (* 1 = 6.0421 loss)\nI1206 20:02:21.423280 23310 sgd_solver.cpp:166] Iteration 6800, lr = 1.02\nI1206 20:09:12.162178 23310 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1206 20:11:50.294433 23310 solver.cpp:404]     Test net output #0: accuracy = 0.205294\nI1206 20:11:50.294698 23310 solver.cpp:404]     Test net output #1: loss = 5.99873 (* 1 = 5.99873 loss)\nI1206 20:11:54.227826 23310 solver.cpp:228] Iteration 6900, loss = 5.79362\nI1206 20:11:54.227866 23310 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 20:11:54.227882 23310 solver.cpp:244]     Train net output #1: loss = 5.79362 (* 1 = 5.79362 loss)\nI1206 20:11:54.442790 23310 sgd_solver.cpp:166] Iteration 6900, lr = 1.035\nI1206 20:18:44.567361 23310 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1206 20:21:22.808226 23310 solver.cpp:404]     Test net output #0: accuracy = 0.19953\nI1206 20:21:22.808480 23310 solver.cpp:404]     Test net output #1: loss = 9.97792 (* 1 = 9.97792 loss)\nI1206 20:21:26.741629 23310 solver.cpp:228] Iteration 7000, loss = 12.8158\nI1206 20:21:26.741677 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1206 20:21:26.741693 23310 solver.cpp:244]     Train net output #1: loss = 12.8158 (* 1 = 12.8158 loss)\nI1206 20:21:26.958809 23310 sgd_solver.cpp:166] Iteration 7000, lr = 1.05\nI1206 20:27:48.242836 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.27342 > 5) by scale factor 0.948152\nI1206 20:28:17.405648 23310 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1206 20:30:55.624366 23310 solver.cpp:404]     Test net output #0: accuracy = 0.175765\nI1206 20:30:55.624637 23310 solver.cpp:404]     Test net output #1: loss = 16.4391 (* 1 = 16.4391 loss)\nI1206 20:30:59.558521 23310 solver.cpp:228] Iteration 7100, loss = 13.4669\nI1206 20:30:59.558568 23310 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 20:30:59.558585 23310 solver.cpp:244]     Train net output #1: loss = 13.4669 (* 1 = 13.4669 loss)\nI1206 20:30:59.772368 23310 sgd_solver.cpp:166] Iteration 7100, lr = 1.065\nI1206 20:37:49.867930 23310 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1206 20:40:27.996680 23310 solver.cpp:404]     Test net output #0: accuracy = 0.219765\nI1206 20:40:27.996938 23310 solver.cpp:404]     Test net output #1: loss = 10.7434 (* 1 = 10.7434 loss)\nI1206 20:40:31.931669 23310 solver.cpp:228] Iteration 7200, loss = 11.1727\nI1206 20:40:31.931715 23310 solver.cpp:244]     Train net output #0: accuracy = 0.2\nI1206 20:40:31.931732 23310 solver.cpp:244]     Train net output #1: loss = 11.1727 (* 1 = 11.1727 loss)\nI1206 20:40:32.141825 23310 sgd_solver.cpp:166] Iteration 7200, lr = 1.08\nI1206 20:47:22.740358 23310 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1206 20:50:00.890640 23310 solver.cpp:404]     Test net output #0: accuracy = 0.144647\nI1206 20:50:00.890913 23310 solver.cpp:404]     Test net output #1: loss = 8.43764 (* 1 = 8.43764 loss)\nI1206 20:50:04.824786 23310 solver.cpp:228] Iteration 7300, loss = 7.64425\nI1206 20:50:04.824831 23310 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 20:50:04.824848 23310 solver.cpp:244]     Train net output #1: loss = 7.64425 (* 1 = 7.64425 loss)\nI1206 20:50:05.045163 23310 sgd_solver.cpp:166] Iteration 7300, lr = 1.095\nI1206 20:56:55.751248 23310 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1206 20:59:33.917129 23310 solver.cpp:404]     Test net output #0: accuracy = 0.162941\nI1206 20:59:33.917379 23310 solver.cpp:404]     Test net output #1: loss = 9.89468 (* 1 = 9.89468 loss)\nI1206 20:59:37.851542 23310 solver.cpp:228] Iteration 7400, loss = 10.5357\nI1206 20:59:37.851590 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1206 20:59:37.851609 23310 solver.cpp:244]     Train net output #1: loss = 10.5357 (* 1 = 10.5357 loss)\nI1206 20:59:38.065990 23310 sgd_solver.cpp:166] Iteration 7400, lr = 1.11\nI1206 21:06:28.276422 23310 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1206 21:09:06.386137 23310 solver.cpp:404]     Test net output #0: accuracy = 0.252118\nI1206 21:09:06.386399 23310 solver.cpp:404]     Test net output #1: loss = 12.0512 (* 1 = 12.0512 loss)\nI1206 21:09:10.318778 23310 solver.cpp:228] Iteration 7500, loss = 10.2016\nI1206 21:09:10.318826 23310 solver.cpp:244]     Train net output #0: accuracy = 0.282353\nI1206 21:09:10.318845 23310 solver.cpp:244]     Train net output #1: loss = 10.2016 (* 1 = 10.2016 loss)\nI1206 21:09:10.542898 23310 sgd_solver.cpp:166] Iteration 7500, lr = 1.125\nI1206 21:16:01.008110 23310 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1206 21:18:39.155735 23310 solver.cpp:404]     Test net output #0: accuracy = 0.149941\nI1206 21:18:39.156016 23310 solver.cpp:404]     Test net output #1: loss = 10.2666 (* 1 = 10.2666 loss)\nI1206 21:18:43.091132 23310 solver.cpp:228] Iteration 7600, loss = 12.8307\nI1206 21:18:43.091181 23310 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1206 21:18:43.091198 23310 solver.cpp:244]     Train net output #1: loss = 12.8307 (* 1 = 12.8307 loss)\nI1206 21:18:43.303365 23310 sgd_solver.cpp:166] Iteration 7600, lr = 1.14\nI1206 21:25:34.031034 23310 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1206 21:28:12.261615 23310 solver.cpp:404]     Test net output #0: accuracy = 0.204\nI1206 21:28:12.261884 23310 solver.cpp:404]     Test net output #1: loss = 6.3681 (* 1 = 6.3681 loss)\nI1206 21:28:16.196400 23310 solver.cpp:228] Iteration 7700, loss = 7.89045\nI1206 21:28:16.196449 23310 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1206 21:28:16.196465 23310 solver.cpp:244]     Train net output #1: loss = 7.89045 (* 1 = 7.89045 loss)\nI1206 21:28:16.409242 23310 sgd_solver.cpp:166] Iteration 7700, lr = 1.155\nI1206 21:35:07.172137 23310 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1206 21:37:45.399410 23310 solver.cpp:404]     Test net output #0: accuracy = 0.210706\nI1206 21:37:45.399682 23310 solver.cpp:404]     Test net output #1: loss = 8.26021 (* 1 = 8.26021 loss)\nI1206 21:37:49.331722 23310 solver.cpp:228] Iteration 7800, loss = 7.63912\nI1206 21:37:49.331773 23310 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 21:37:49.331791 23310 solver.cpp:244]     Train net output #1: loss = 7.63912 (* 1 = 7.63912 loss)\nI1206 21:37:49.555217 23310 sgd_solver.cpp:166] Iteration 7800, lr = 1.17\nI1206 21:44:39.639454 23310 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1206 21:47:17.821369 23310 solver.cpp:404]     Test net output #0: accuracy = 0.245118\nI1206 21:47:17.821626 23310 solver.cpp:404]     Test net output #1: loss = 6.99253 (* 1 = 6.99253 loss)\nI1206 21:47:21.755821 23310 solver.cpp:228] Iteration 7900, loss = 7.10427\nI1206 21:47:21.755868 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 21:47:21.755885 23310 solver.cpp:244]     Train net output #1: loss = 7.10427 (* 1 = 7.10427 loss)\nI1206 21:47:21.971802 23310 sgd_solver.cpp:166] Iteration 7900, lr = 1.185\nI1206 21:54:12.259737 23310 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1206 21:56:50.434731 23310 solver.cpp:404]     Test net output #0: accuracy = 0.139941\nI1206 21:56:50.435015 23310 solver.cpp:404]     Test net output #1: loss = 14.5482 (* 1 = 14.5482 loss)\nI1206 21:56:54.369611 23310 solver.cpp:228] Iteration 8000, loss = 15.604\nI1206 21:56:54.369657 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1206 21:56:54.369673 23310 solver.cpp:244]     Train net output #1: loss = 15.604 (* 1 = 15.604 loss)\nI1206 21:56:54.583415 23310 sgd_solver.cpp:166] Iteration 8000, lr = 1.2\nI1206 22:03:45.486397 23310 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1206 22:06:23.693109 23310 solver.cpp:404]     Test net output #0: accuracy = 0.216\nI1206 22:06:23.693353 23310 solver.cpp:404]     Test net output #1: loss = 5.8669 (* 1 = 5.8669 loss)\nI1206 22:06:27.628170 23310 solver.cpp:228] Iteration 8100, loss = 5.61133\nI1206 22:06:27.628217 23310 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1206 22:06:27.628235 23310 solver.cpp:244]     Train net output #1: loss = 5.61133 (* 1 = 5.61133 loss)\nI1206 22:06:27.842010 23310 sgd_solver.cpp:166] Iteration 8100, lr = 1.215\nI1206 22:13:18.086146 23310 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1206 22:15:56.201064 23310 solver.cpp:404]     Test net output #0: accuracy = 0.23053\nI1206 22:15:56.201325 23310 solver.cpp:404]     Test net output #1: loss = 5.68423 (* 1 = 5.68423 loss)\nI1206 22:16:00.134930 23310 solver.cpp:228] Iteration 8200, loss = 5.41967\nI1206 22:16:00.134975 23310 solver.cpp:244]     Train net output #0: accuracy = 0.270588\nI1206 22:16:00.134991 23310 solver.cpp:244]     Train net output #1: loss = 5.41967 (* 1 = 5.41967 loss)\nI1206 22:16:00.350198 23310 sgd_solver.cpp:166] Iteration 8200, lr = 1.23\nI1206 22:22:50.417799 23310 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1206 22:25:28.616456 23310 solver.cpp:404]     Test net output #0: accuracy = 0.193765\nI1206 22:25:28.616704 23310 solver.cpp:404]     Test net output #1: loss = 10.617 (* 1 = 10.617 loss)\nI1206 22:25:32.550335 23310 solver.cpp:228] Iteration 8300, loss = 10.6116\nI1206 22:25:32.550382 23310 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1206 22:25:32.550400 23310 solver.cpp:244]     Train net output #1: loss = 10.6116 (* 1 = 10.6116 loss)\nI1206 22:25:32.764776 23310 sgd_solver.cpp:166] Iteration 8300, lr = 1.245\nI1206 22:32:23.646520 23310 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1206 22:35:01.851210 23310 solver.cpp:404]     Test net output #0: accuracy = 0.202765\nI1206 22:35:01.851477 23310 solver.cpp:404]     Test net output #1: loss = 8.86336 (* 1 = 8.86336 loss)\nI1206 22:35:05.785823 23310 solver.cpp:228] Iteration 8400, loss = 8.58122\nI1206 22:35:05.785872 23310 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 22:35:05.785890 23310 solver.cpp:244]     Train net output #1: loss = 8.58122 (* 1 = 8.58122 loss)\nI1206 22:35:06.000350 23310 sgd_solver.cpp:166] Iteration 8400, lr = 1.26\nI1206 22:41:56.232692 23310 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1206 22:44:34.358573 23310 solver.cpp:404]     Test net output #0: accuracy = 0.152353\nI1206 22:44:34.358849 23310 solver.cpp:404]     Test net output #1: loss = 16.3849 (* 1 = 16.3849 loss)\nI1206 22:44:38.290844 23310 solver.cpp:228] Iteration 8500, loss = 15.0523\nI1206 22:44:38.290892 23310 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1206 22:44:38.290910 23310 solver.cpp:244]     Train net output #1: loss = 15.0523 (* 1 = 15.0523 loss)\nI1206 22:44:38.593539 23310 sgd_solver.cpp:166] Iteration 8500, lr = 1.275\nI1206 22:51:29.344390 23310 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1206 22:54:07.446702 23310 solver.cpp:404]     Test net output #0: accuracy = 0.226235\nI1206 22:54:07.446985 23310 solver.cpp:404]     Test net output #1: loss = 9.67921 (* 1 = 9.67921 loss)\nI1206 22:54:11.380281 23310 solver.cpp:228] Iteration 8600, loss = 9.25588\nI1206 22:54:11.380326 23310 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1206 22:54:11.380342 23310 solver.cpp:244]     Train net output #1: loss = 9.25588 (* 1 = 9.25588 loss)\nI1206 22:54:11.597822 23310 sgd_solver.cpp:166] Iteration 8600, lr = 1.29\nI1206 23:01:02.429898 23310 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1206 23:03:40.567970 23310 solver.cpp:404]     Test net output #0: accuracy = 0.188177\nI1206 23:03:40.568238 23310 solver.cpp:404]     Test net output #1: loss = 8.15178 (* 1 = 8.15178 loss)\nI1206 23:03:44.501040 23310 solver.cpp:228] Iteration 8700, loss = 8.84648\nI1206 23:03:44.501087 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1206 23:03:44.501104 23310 solver.cpp:244]     Train net output #1: loss = 8.84648 (* 1 = 8.84648 loss)\nI1206 23:03:44.761986 23310 sgd_solver.cpp:166] Iteration 8700, lr = 1.305\nI1206 23:10:35.322935 23310 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1206 23:13:13.471876 23310 solver.cpp:404]     Test net output #0: accuracy = 0.142\nI1206 23:13:13.472146 23310 solver.cpp:404]     Test net output #1: loss = 9.59598 (* 1 = 9.59598 loss)\nI1206 23:13:17.405961 23310 solver.cpp:228] Iteration 8800, loss = 8.92657\nI1206 23:13:17.406010 23310 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1206 23:13:17.406026 23310 solver.cpp:244]     Train net output #1: loss = 8.92657 (* 1 = 8.92657 loss)\nI1206 23:13:17.619812 23310 sgd_solver.cpp:166] Iteration 8800, lr = 1.32\nI1206 23:20:08.335392 23310 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1206 23:22:46.559250 23310 solver.cpp:404]     Test net output #0: accuracy = 0.222647\nI1206 23:22:46.559486 23310 solver.cpp:404]     Test net output #1: loss = 7.92441 (* 1 = 7.92441 loss)\nI1206 23:22:50.490649 23310 solver.cpp:228] Iteration 8900, loss = 9.09244\nI1206 23:22:50.490694 23310 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1206 23:22:50.490710 23310 solver.cpp:244]     Train net output #1: loss = 9.09244 (* 1 = 9.09244 loss)\nI1206 23:22:50.715154 23310 sgd_solver.cpp:166] Iteration 8900, lr = 1.335\nI1206 23:29:41.694660 23310 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1206 23:32:16.294574 23310 solver.cpp:404]     Test net output #0: accuracy = 0.111176\nI1206 23:32:16.294837 23310 solver.cpp:404]     Test net output #1: loss = 15.1365 (* 1 = 15.1365 loss)\nI1206 23:32:20.236519 23310 solver.cpp:228] Iteration 9000, loss = 14.8373\nI1206 23:32:20.236558 23310 solver.cpp:244]     Train net output #0: accuracy = 0.0823529\nI1206 23:32:20.236575 23310 solver.cpp:244]     Train net output #1: loss = 14.8373 (* 1 = 14.8373 loss)\nI1206 23:32:20.460945 23310 sgd_solver.cpp:166] Iteration 9000, lr = 1.35\nI1206 23:39:11.364223 23310 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1206 23:41:45.998899 23310 solver.cpp:404]     Test net output #0: accuracy = 0.205883\nI1206 23:41:45.999152 23310 solver.cpp:404]     Test net output #1: loss = 7.92198 (* 1 = 7.92198 loss)\nI1206 23:41:49.941762 23310 solver.cpp:228] Iteration 9100, loss = 8.18936\nI1206 23:41:49.941797 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1206 23:41:49.941815 23310 solver.cpp:244]     Train net output #1: loss = 8.18936 (* 1 = 8.18936 loss)\nI1206 23:41:50.162773 23310 sgd_solver.cpp:166] Iteration 9100, lr = 1.365\nI1206 23:48:40.996512 23310 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1206 23:51:15.754716 23310 solver.cpp:404]     Test net output #0: accuracy = 0.192235\nI1206 23:51:15.754940 23310 solver.cpp:404]     Test net output #1: loss = 13.4885 (* 1 = 13.4885 loss)\nI1206 23:51:19.695780 23310 solver.cpp:228] Iteration 9200, loss = 14.4126\nI1206 23:51:19.695818 23310 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1206 23:51:19.695835 23310 solver.cpp:244]     Train net output #1: loss = 14.4126 (* 1 = 14.4126 loss)\nI1206 23:51:19.922680 23310 sgd_solver.cpp:166] Iteration 9200, lr = 1.38\nI1206 23:58:10.952849 23310 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1207 00:00:45.684553 23310 solver.cpp:404]     Test net output #0: accuracy = 0.192647\nI1207 00:00:45.684792 23310 solver.cpp:404]     Test net output #1: loss = 7.95462 (* 1 = 7.95462 loss)\nI1207 00:00:49.627598 23310 solver.cpp:228] Iteration 9300, loss = 7.93179\nI1207 00:00:49.627635 23310 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 00:00:49.627651 23310 solver.cpp:244]     Train net output #1: loss = 7.93179 (* 1 = 7.93179 loss)\nI1207 00:00:49.848927 23310 sgd_solver.cpp:166] Iteration 9300, lr = 1.395\nI1207 00:07:41.219856 23310 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1207 00:10:15.907610 23310 solver.cpp:404]     Test net output #0: accuracy = 0.219353\nI1207 00:10:15.907811 23310 solver.cpp:404]     Test net output #1: loss = 12.4048 (* 1 = 12.4048 loss)\nI1207 00:10:19.848109 23310 solver.cpp:228] Iteration 9400, loss = 13.6479\nI1207 00:10:19.848150 23310 solver.cpp:244]     Train net output #0: accuracy = 0.211765\nI1207 00:10:19.848168 23310 solver.cpp:244]     Train net output #1: loss = 13.6479 (* 1 = 13.6479 loss)\nI1207 00:10:20.066707 23310 sgd_solver.cpp:166] Iteration 9400, lr = 1.41\nI1207 00:17:10.878371 23310 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1207 00:19:45.475977 23310 solver.cpp:404]     Test net output #0: accuracy = 0.191647\nI1207 00:19:45.476186 23310 solver.cpp:404]     Test net output #1: loss = 13.2629 (* 1 = 13.2629 loss)\nI1207 00:19:49.417023 23310 solver.cpp:228] Iteration 9500, loss = 14.3537\nI1207 00:19:49.417062 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 00:19:49.417078 23310 solver.cpp:244]     Train net output #1: loss = 14.3537 (* 1 = 14.3537 loss)\nI1207 00:19:49.634656 23310 sgd_solver.cpp:166] Iteration 9500, lr = 1.425\nI1207 00:26:40.646586 23310 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1207 00:29:15.330135 23310 solver.cpp:404]     Test net output #0: accuracy = 0.169177\nI1207 00:29:15.330363 23310 solver.cpp:404]     Test net output #1: loss = 11.573 (* 1 = 11.573 loss)\nI1207 00:29:19.272001 23310 solver.cpp:228] Iteration 9600, loss = 10.2561\nI1207 00:29:19.272039 23310 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 00:29:19.272056 23310 solver.cpp:244]     Train net output #1: loss = 10.2561 (* 1 = 10.2561 loss)\nI1207 00:29:19.493821 23310 sgd_solver.cpp:166] Iteration 9600, lr = 1.44\nI1207 00:36:10.445475 23310 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1207 00:38:45.380460 23310 solver.cpp:404]     Test net output #0: accuracy = 0.227471\nI1207 00:38:45.380687 23310 solver.cpp:404]     Test net output #1: loss = 11.9273 (* 1 = 11.9273 loss)\nI1207 00:38:49.322299 23310 solver.cpp:228] Iteration 9700, loss = 12.1172\nI1207 00:38:49.322337 23310 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 00:38:49.322353 23310 solver.cpp:244]     Train net output #1: loss = 12.1172 (* 1 = 12.1172 loss)\nI1207 00:38:49.546175 23310 sgd_solver.cpp:166] Iteration 9700, lr = 1.455\nI1207 00:45:41.528174 23310 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1207 00:48:15.737574 23310 solver.cpp:404]     Test net output #0: accuracy = 0.158235\nI1207 00:48:15.737817 23310 solver.cpp:404]     Test net output #1: loss = 11.2199 (* 1 = 11.2199 loss)\nI1207 00:48:19.680011 23310 solver.cpp:228] Iteration 9800, loss = 12.1439\nI1207 00:48:19.680058 23310 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 00:48:19.680074 23310 solver.cpp:244]     Train net output #1: loss = 12.1439 (* 1 = 12.1439 loss)\nI1207 00:48:19.898993 23310 sgd_solver.cpp:166] Iteration 9800, lr = 1.47\nI1207 00:55:10.667888 23310 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1207 00:57:48.879072 23310 solver.cpp:404]     Test net output #0: accuracy = 0.134\nI1207 00:57:48.879391 23310 solver.cpp:404]     Test net output #1: loss = 21.4811 (* 1 = 21.4811 loss)\nI1207 00:57:52.816607 23310 solver.cpp:228] Iteration 9900, loss = 23.8806\nI1207 00:57:52.816653 23310 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 00:57:52.816670 23310 solver.cpp:244]     Train net output #1: loss = 23.8806 (* 1 = 23.8806 loss)\nI1207 00:57:53.044688 23310 sgd_solver.cpp:166] Iteration 9900, lr = 1.485\nI1207 01:04:44.884016 23310 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1207 01:07:23.084533 23310 solver.cpp:404]     Test net output #0: accuracy = 0.196706\nI1207 01:07:23.084791 23310 solver.cpp:404]     Test net output #1: loss = 13.231 (* 1 = 13.231 loss)\nI1207 01:07:27.023424 23310 solver.cpp:228] Iteration 10000, loss = 15.8279\nI1207 01:07:27.023465 23310 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1207 01:07:27.023483 23310 solver.cpp:244]     Train net output #1: loss = 15.8279 (* 1 = 15.8279 loss)\nI1207 01:07:27.246297 23310 sgd_solver.cpp:166] Iteration 10000, lr = 1.5\nI1207 01:13:20.670269 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01578 > 5) by scale factor 0.996854\nI1207 01:14:19.092305 23310 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1207 01:16:57.303671 23310 solver.cpp:404]     Test net output #0: accuracy = 0.207235\nI1207 01:16:57.303957 23310 solver.cpp:404]     Test net output #1: loss = 19.8399 (* 1 = 19.8399 loss)\nI1207 01:17:01.242828 23310 solver.cpp:228] Iteration 10100, loss = 20.8162\nI1207 01:17:01.242871 23310 solver.cpp:244]     Train net output #0: accuracy = 0.141176\nI1207 01:17:01.242889 23310 solver.cpp:244]     Train net output #1: loss = 20.8162 (* 1 = 20.8162 loss)\nI1207 01:17:01.465292 23310 sgd_solver.cpp:166] Iteration 10100, lr = 1.515\nI1207 01:23:52.924074 23310 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1207 01:26:31.139277 23310 solver.cpp:404]     Test net output #0: accuracy = 0.184353\nI1207 01:26:31.139561 23310 solver.cpp:404]     Test net output #1: loss = 9.06928 (* 1 = 9.06928 loss)\nI1207 01:26:35.076696 23310 solver.cpp:228] Iteration 10200, loss = 8.68553\nI1207 01:26:35.076740 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 01:26:35.076763 23310 solver.cpp:244]     Train net output #1: loss = 8.68553 (* 1 = 8.68553 loss)\nI1207 01:26:35.307644 23310 sgd_solver.cpp:166] Iteration 10200, lr = 1.53\nI1207 01:33:26.343353 23310 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1207 01:36:04.568068 23310 solver.cpp:404]     Test net output #0: accuracy = 0.197412\nI1207 01:36:04.568346 23310 solver.cpp:404]     Test net output #1: loss = 9.29947 (* 1 = 9.29947 loss)\nI1207 01:36:08.505067 23310 solver.cpp:228] Iteration 10300, loss = 9.02574\nI1207 01:36:08.505110 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 01:36:08.505126 23310 solver.cpp:244]     Train net output #1: loss = 9.02574 (* 1 = 9.02574 loss)\nI1207 01:36:08.735981 23310 sgd_solver.cpp:166] Iteration 10300, lr = 1.545\nI1207 01:39:11.640871 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.12994 > 5) by scale factor 0.97467\nI1207 01:43:00.249289 23310 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1207 01:45:38.375002 23310 solver.cpp:404]     Test net output #0: accuracy = 0.170235\nI1207 01:45:38.375247 23310 solver.cpp:404]     Test net output #1: loss = 17.3396 (* 1 = 17.3396 loss)\nI1207 01:45:42.311473 23310 solver.cpp:228] Iteration 10400, loss = 18.051\nI1207 01:45:42.311518 23310 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 01:45:42.311534 23310 solver.cpp:244]     Train net output #1: loss = 18.051 (* 1 = 18.051 loss)\nI1207 01:45:42.532424 23310 sgd_solver.cpp:166] Iteration 10400, lr = 1.56\nI1207 01:52:32.540777 23310 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1207 01:55:10.684346 23310 solver.cpp:404]     Test net output #0: accuracy = 0.131824\nI1207 01:55:10.684623 23310 solver.cpp:404]     Test net output #1: loss = 15.1859 (* 1 = 15.1859 loss)\nI1207 01:55:14.617329 23310 solver.cpp:228] Iteration 10500, loss = 16.0812\nI1207 01:55:14.617374 23310 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1207 01:55:14.617391 23310 solver.cpp:244]     Train net output #1: loss = 16.0812 (* 1 = 16.0812 loss)\nI1207 01:55:14.871577 23310 sgd_solver.cpp:166] Iteration 10500, lr = 1.575\nI1207 02:02:05.183135 23310 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1207 02:04:43.336349 23310 solver.cpp:404]     Test net output #0: accuracy = 0.155059\nI1207 02:04:43.336616 23310 solver.cpp:404]     Test net output #1: loss = 11.2825 (* 1 = 11.2825 loss)\nI1207 02:04:47.268137 23310 solver.cpp:228] Iteration 10600, loss = 10.0044\nI1207 02:04:47.268180 23310 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 02:04:47.268198 23310 solver.cpp:244]     Train net output #1: loss = 10.0044 (* 1 = 10.0044 loss)\nI1207 02:04:47.489487 23310 sgd_solver.cpp:166] Iteration 10600, lr = 1.59\nI1207 02:11:38.246459 23310 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1207 02:14:16.413497 23310 solver.cpp:404]     Test net output #0: accuracy = 0.205647\nI1207 02:14:16.413781 23310 solver.cpp:404]     Test net output #1: loss = 17.2615 (* 1 = 17.2615 loss)\nI1207 02:14:20.346148 23310 solver.cpp:228] Iteration 10700, loss = 14.8209\nI1207 02:14:20.346190 23310 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 02:14:20.346207 23310 solver.cpp:244]     Train net output #1: loss = 14.8209 (* 1 = 14.8209 loss)\nI1207 02:14:20.567071 23310 sgd_solver.cpp:166] Iteration 10700, lr = 1.605\nI1207 02:21:11.268695 23310 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1207 02:23:49.433848 23310 solver.cpp:404]     Test net output #0: accuracy = 0.151529\nI1207 02:23:49.434121 23310 solver.cpp:404]     Test net output #1: loss = 13.7759 (* 1 = 13.7759 loss)\nI1207 02:23:53.371157 23310 solver.cpp:228] Iteration 10800, loss = 13.469\nI1207 02:23:53.371202 23310 solver.cpp:244]     Train net output #0: accuracy = 0.188235\nI1207 02:23:53.371218 23310 solver.cpp:244]     Train net output #1: loss = 13.469 (* 1 = 13.469 loss)\nI1207 02:23:53.595371 23310 sgd_solver.cpp:166] Iteration 10800, lr = 1.62\nI1207 02:30:44.807057 23310 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1207 02:33:23.062207 23310 solver.cpp:404]     Test net output #0: accuracy = 0.219647\nI1207 02:33:23.062486 23310 solver.cpp:404]     Test net output #1: loss = 9.0623 (* 1 = 9.0623 loss)\nI1207 02:33:26.998210 23310 solver.cpp:228] Iteration 10900, loss = 8.62484\nI1207 02:33:26.998252 23310 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 02:33:26.998268 23310 solver.cpp:244]     Train net output #1: loss = 8.62484 (* 1 = 8.62484 loss)\nI1207 02:33:27.226692 23310 sgd_solver.cpp:166] Iteration 10900, lr = 1.635\nI1207 02:40:18.836019 23310 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1207 02:42:57.012552 23310 solver.cpp:404]     Test net output #0: accuracy = 0.216765\nI1207 02:42:57.012835 23310 solver.cpp:404]     Test net output #1: loss = 19.6413 (* 1 = 19.6413 loss)\nI1207 02:43:00.949002 23310 solver.cpp:228] Iteration 11000, loss = 20.234\nI1207 02:43:00.949048 23310 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1207 02:43:00.949065 23310 solver.cpp:244]     Train net output #1: loss = 20.234 (* 1 = 20.234 loss)\nI1207 02:43:01.173096 23310 sgd_solver.cpp:166] Iteration 11000, lr = 1.65\nI1207 02:49:52.172128 23310 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1207 02:52:30.371948 23310 solver.cpp:404]     Test net output #0: accuracy = 0.159294\nI1207 02:52:30.372220 23310 solver.cpp:404]     Test net output #1: loss = 15.9554 (* 1 = 15.9554 loss)\nI1207 02:52:34.308681 23310 solver.cpp:228] Iteration 11100, loss = 14.1021\nI1207 02:52:34.308728 23310 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 02:52:34.308745 23310 solver.cpp:244]     Train net output #1: loss = 14.1021 (* 1 = 14.1021 loss)\nI1207 02:52:34.533792 23310 sgd_solver.cpp:166] Iteration 11100, lr = 1.665\nI1207 02:59:25.573674 23310 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1207 03:02:03.819268 23310 solver.cpp:404]     Test net output #0: accuracy = 0.160765\nI1207 03:02:03.819516 23310 solver.cpp:404]     Test net output #1: loss = 10.4081 (* 1 = 10.4081 loss)\nI1207 03:02:07.756664 23310 solver.cpp:228] Iteration 11200, loss = 10.3083\nI1207 03:02:07.756711 23310 solver.cpp:244]     Train net output #0: accuracy = 0.129412\nI1207 03:02:07.756726 23310 solver.cpp:244]     Train net output #1: loss = 10.3083 (* 1 = 10.3083 loss)\nI1207 03:02:07.985306 23310 sgd_solver.cpp:166] Iteration 11200, lr = 1.68\nI1207 03:08:59.104704 23310 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1207 03:11:37.354358 23310 solver.cpp:404]     Test net output #0: accuracy = 0.129059\nI1207 03:11:37.354636 23310 solver.cpp:404]     Test net output #1: loss = 26.8063 (* 1 = 26.8063 loss)\nI1207 03:11:41.291190 23310 solver.cpp:228] Iteration 11300, loss = 23.8434\nI1207 03:11:41.291236 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 03:11:41.291252 23310 solver.cpp:244]     Train net output #1: loss = 23.8434 (* 1 = 23.8434 loss)\nI1207 03:11:41.517709 23310 sgd_solver.cpp:166] Iteration 11300, lr = 1.695\nI1207 03:18:33.347784 23310 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1207 03:21:11.521646 23310 solver.cpp:404]     Test net output #0: accuracy = 0.183059\nI1207 03:21:11.521927 23310 solver.cpp:404]     Test net output #1: loss = 20.4201 (* 1 = 20.4201 loss)\nI1207 03:21:15.459039 23310 solver.cpp:228] Iteration 11400, loss = 22.342\nI1207 03:21:15.459085 23310 solver.cpp:244]     Train net output #0: accuracy = 0.235294\nI1207 03:21:15.459101 23310 solver.cpp:244]     Train net output #1: loss = 22.342 (* 1 = 22.342 loss)\nI1207 03:21:15.680148 23310 sgd_solver.cpp:166] Iteration 11400, lr = 1.71\nI1207 03:28:07.910475 23310 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1207 03:30:45.985642 23310 solver.cpp:404]     Test net output #0: accuracy = 0.182471\nI1207 03:30:45.985910 23310 solver.cpp:404]     Test net output #1: loss = 27.535 (* 1 = 27.535 loss)\nI1207 03:30:49.921571 23310 solver.cpp:228] Iteration 11500, loss = 24.6093\nI1207 03:30:49.921615 23310 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1207 03:30:49.921633 23310 solver.cpp:244]     Train net output #1: loss = 24.6093 (* 1 = 24.6093 loss)\nI1207 03:30:50.153594 23310 sgd_solver.cpp:166] Iteration 11500, lr = 1.725\nI1207 03:37:41.287015 23310 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1207 03:40:19.400199 23310 solver.cpp:404]     Test net output #0: accuracy = 0.163118\nI1207 03:40:19.400477 23310 solver.cpp:404]     Test net output #1: loss = 15.3933 (* 1 = 15.3933 loss)\nI1207 03:40:23.337093 23310 solver.cpp:228] Iteration 11600, loss = 16.2677\nI1207 03:40:23.337138 23310 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1207 03:40:23.337157 23310 solver.cpp:244]     Train net output #1: loss = 16.2677 (* 1 = 16.2677 loss)\nI1207 03:40:23.563398 23310 sgd_solver.cpp:166] Iteration 11600, lr = 1.74\nI1207 03:40:36.047610 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.07223 > 5) by scale factor 0.985759\nI1207 03:47:14.609967 23310 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1207 03:49:52.790277 23310 solver.cpp:404]     Test net output #0: accuracy = 0.208706\nI1207 03:49:52.790552 23310 solver.cpp:404]     Test net output #1: loss = 13.262 (* 1 = 13.262 loss)\nI1207 03:49:56.726384 23310 solver.cpp:228] Iteration 11700, loss = 12.7632\nI1207 03:49:56.726431 23310 solver.cpp:244]     Train net output #0: accuracy = 0.258824\nI1207 03:49:56.726449 23310 solver.cpp:244]     Train net output #1: loss = 12.7632 (* 1 = 12.7632 loss)\nI1207 03:49:56.954475 23310 sgd_solver.cpp:166] Iteration 11700, lr = 1.755\nI1207 03:56:48.870270 23310 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1207 03:59:27.062458 23310 solver.cpp:404]     Test net output #0: accuracy = 0.171471\nI1207 03:59:27.062736 23310 solver.cpp:404]     Test net output #1: loss = 18.5421 (* 1 = 18.5421 loss)\nI1207 03:59:31.000051 23310 solver.cpp:228] Iteration 11800, loss = 15.4415\nI1207 03:59:31.000097 23310 solver.cpp:244]     Train net output #0: accuracy = 0.223529\nI1207 03:59:31.000115 23310 solver.cpp:244]     Train net output #1: loss = 15.4415 (* 1 = 15.4415 loss)\nI1207 03:59:31.224056 23310 sgd_solver.cpp:166] Iteration 11800, lr = 1.77\nI1207 04:06:23.302062 23310 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1207 04:09:01.481961 23310 solver.cpp:404]     Test net output #0: accuracy = 0.184588\nI1207 04:09:01.482244 23310 solver.cpp:404]     Test net output #1: loss = 15.2606 (* 1 = 15.2606 loss)\nI1207 04:09:05.418967 23310 solver.cpp:228] Iteration 11900, loss = 16.0562\nI1207 04:09:05.419014 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 04:09:05.419033 23310 solver.cpp:244]     Train net output #1: loss = 16.0562 (* 1 = 16.0562 loss)\nI1207 04:09:05.642347 23310 sgd_solver.cpp:166] Iteration 11900, lr = 1.785\nI1207 04:15:57.464900 23310 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1207 04:18:35.637308 23310 solver.cpp:404]     Test net output #0: accuracy = 0.128941\nI1207 04:18:35.637586 23310 solver.cpp:404]     Test net output #1: loss = 24.1107 (* 1 = 24.1107 loss)\nI1207 04:18:39.574585 23310 solver.cpp:228] Iteration 12000, loss = 25.8095\nI1207 04:18:39.574633 23310 solver.cpp:244]     Train net output #0: accuracy = 0.0941176\nI1207 04:18:39.574651 23310 solver.cpp:244]     Train net output #1: loss = 25.8095 (* 1 = 25.8095 loss)\nI1207 04:18:39.799753 23310 sgd_solver.cpp:166] Iteration 12000, lr = 1.8\nI1207 04:21:42.616828 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.29639 > 5) by scale factor 0.944038\nI1207 04:25:31.485015 23310 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1207 04:28:09.515730 23310 solver.cpp:404]     Test net output #0: accuracy = 0.131353\nI1207 04:28:09.516005 23310 solver.cpp:404]     Test net output #1: loss = 23.3646 (* 1 = 23.3646 loss)\nI1207 04:28:13.452342 23310 solver.cpp:228] Iteration 12100, loss = 19.8366\nI1207 04:28:13.452389 23310 solver.cpp:244]     Train net output #0: accuracy = 0.176471\nI1207 04:28:13.452406 23310 solver.cpp:244]     Train net output #1: loss = 19.8366 (* 1 = 19.8366 loss)\nI1207 04:28:13.678804 23310 sgd_solver.cpp:166] Iteration 12100, lr = 1.815\nI1207 04:35:04.677021 23310 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1207 04:37:42.869256 23310 solver.cpp:404]     Test net output #0: accuracy = 0.149\nI1207 04:37:42.869534 23310 solver.cpp:404]     Test net output #1: loss = 17.6079 (* 1 = 17.6079 loss)\nI1207 04:37:46.806535 23310 solver.cpp:228] Iteration 12200, loss = 17.2631\nI1207 04:37:46.806581 23310 solver.cpp:244]     Train net output #0: accuracy = 0.117647\nI1207 04:37:46.806599 23310 solver.cpp:244]     Train net output #1: loss = 17.2631 (* 1 = 17.2631 loss)\nI1207 04:37:47.033457 23310 sgd_solver.cpp:166] Iteration 12200, lr = 1.83\nI1207 04:44:38.138272 23310 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1207 04:47:16.327692 23310 solver.cpp:404]     Test net output #0: accuracy = 0.210412\nI1207 04:47:16.327981 23310 solver.cpp:404]     Test net output #1: loss = 12.8677 (* 1 = 12.8677 loss)\nI1207 04:47:20.262610 23310 solver.cpp:228] Iteration 12300, loss = 12.0965\nI1207 04:47:20.262656 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 04:47:20.262673 23310 solver.cpp:244]     Train net output #1: loss = 12.0965 (* 1 = 12.0965 loss)\nI1207 04:47:20.490731 23310 sgd_solver.cpp:166] Iteration 12300, lr = 1.845\nI1207 04:54:11.866272 23310 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1207 04:56:50.058907 23310 solver.cpp:404]     Test net output #0: accuracy = 0.172647\nI1207 04:56:50.059168 23310 solver.cpp:404]     Test net output #1: loss = 23.8804 (* 1 = 23.8804 loss)\nI1207 04:56:53.995663 23310 solver.cpp:228] Iteration 12400, loss = 23.4947\nI1207 04:56:53.995708 23310 solver.cpp:244]     Train net output #0: accuracy = 0.247059\nI1207 04:56:53.995726 23310 solver.cpp:244]     Train net output #1: loss = 23.4947 (* 1 = 23.4947 loss)\nI1207 04:56:54.221813 23310 sgd_solver.cpp:166] Iteration 12400, lr = 1.86\nI1207 05:03:46.402721 23310 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1207 05:06:24.532783 23310 solver.cpp:404]     Test net output #0: accuracy = 0.185824\nI1207 05:06:24.533066 23310 solver.cpp:404]     Test net output #1: loss = 15.9333 (* 1 = 15.9333 loss)\nI1207 05:06:28.469774 23310 solver.cpp:228] Iteration 12500, loss = 15.5964\nI1207 05:06:28.469821 23310 solver.cpp:244]     Train net output #0: accuracy = 0.282353\nI1207 05:06:28.469838 23310 solver.cpp:244]     Train net output #1: loss = 15.5964 (* 1 = 15.5964 loss)\nI1207 05:06:28.695122 23310 sgd_solver.cpp:166] Iteration 12500, lr = 1.875\nI1207 05:13:20.507033 23310 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1207 05:15:58.583024 23310 solver.cpp:404]     Test net output #0: accuracy = 0.130941\nI1207 05:15:58.583312 23310 solver.cpp:404]     Test net output #1: loss = 27.9181 (* 1 = 27.9181 loss)\nI1207 05:16:02.519614 23310 solver.cpp:228] Iteration 12600, loss = 27.8782\nI1207 05:16:02.519660 23310 solver.cpp:244]     Train net output #0: accuracy = 0.105882\nI1207 05:16:02.519680 23310 solver.cpp:244]     Train net output #1: loss = 27.8782 (* 1 = 27.8782 loss)\nI1207 05:16:02.748445 23310 sgd_solver.cpp:166] Iteration 12600, lr = 1.89\nI1207 05:17:25.898172 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.01206 > 5) by scale factor 0.997594\nI1207 05:22:54.122979 23310 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1207 05:25:32.295771 23310 solver.cpp:404]     Test net output #0: accuracy = 0.160118\nI1207 05:25:32.296056 23310 solver.cpp:404]     Test net output #1: loss = 13.5558 (* 1 = 13.5558 loss)\nI1207 05:25:36.232089 23310 solver.cpp:228] Iteration 12700, loss = 16.0898\nI1207 05:25:36.232132 23310 solver.cpp:244]     Train net output #0: accuracy = 0.152941\nI1207 05:25:36.232151 23310 solver.cpp:244]     Train net output #1: loss = 16.0898 (* 1 = 16.0898 loss)\nI1207 05:25:36.461782 23310 sgd_solver.cpp:166] Iteration 12700, lr = 1.905\nI1207 05:25:40.633599 23310 sgd_solver.cpp:152] Gradient clipping: scaling down gradients (L2 norm 5.13981 > 5) by scale factor 0.972799\nI1207 05:32:28.147121 23310 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1207 05:35:06.213176 23310 solver.cpp:404]     Test net output #0: accuracy = 0.21053\nI1207 05:35:06.213456 23310 solver.cpp:404]     Test net output #1: loss = 20.4146 (* 1 = 20.4146 loss)\nI1207 05:35:10.149229 23310 solver.cpp:228] Iteration 12800, loss = 19.4313\nI1207 05:35:10.149273 23310 solver.cpp:244]     Train net output #0: accuracy = 0.164706\nI1207 05:35:10.149291 23310 solver.cpp:244]     Train net output #1: loss = 19.4313 (* 1 = 19.4313 loss)\nI1207 05:35:10.380534 23310 sgd_solver.cpp:166] Iteration 12800, lr = 1.92\n"
  },
  {
    "path": "Results/lrRange3SS520kDropuoutFig10a",
    "content": "I1209 19:32:18.246683  1002 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1209 19:32:18.249079  1002 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1209 19:32:18.250296  1002 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1209 19:32:18.251514  1002 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1209 19:32:18.252728  1002 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1209 19:32:18.253954  1002 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1209 19:32:18.255184  1002 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1209 19:32:18.256410  1002 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1209 19:32:18.257637  1002 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1209 19:32:18.693116  1002 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/Fig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI1209 19:32:18.696131  1002 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1209 19:32:18.744985  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:18.745071  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:18.746403  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1209 19:32:18.746464  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI1209 19:32:18.746484  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI1209 19:32:18.746511  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI1209 19:32:18.746533  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI1209 19:32:18.746554  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI1209 19:32:18.746574  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI1209 19:32:18.746594  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI1209 19:32:18.746615  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI1209 19:32:18.746635  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI1209 19:32:18.746656  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI1209 19:32:18.746676  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI1209 19:32:18.746697  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI1209 19:32:18.746714  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI1209 19:32:18.746737  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI1209 19:32:18.746757  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI1209 19:32:18.746778  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI1209 19:32:18.746796  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI1209 19:32:18.746817  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI1209 19:32:18.746836  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI1209 19:32:18.746855  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI1209 19:32:18.746889  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI1209 19:32:18.746917  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI1209 19:32:18.746937  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI1209 19:32:18.746956  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI1209 19:32:18.746984  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI1209 19:32:18.747004  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI1209 19:32:18.747023  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI1209 19:32:18.747045  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI1209 19:32:18.747063  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI1209 19:32:18.747086  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI1209 19:32:18.747103  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI1209 19:32:18.747123  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI1209 19:32:18.747143  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI1209 19:32:18.747164  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI1209 19:32:18.747184  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI1209 19:32:18.747203  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI1209 19:32:18.747223  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI1209 19:32:18.747242  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI1209 19:32:18.747261  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI1209 19:32:18.747287  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI1209 19:32:18.747308  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI1209 19:32:18.747326  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI1209 19:32:18.747346  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI1209 19:32:18.747366  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI1209 19:32:18.747386  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI1209 19:32:18.747406  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI1209 19:32:18.747426  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI1209 19:32:18.747447  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI1209 19:32:18.747465  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI1209 19:32:18.747485  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI1209 19:32:18.747519  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI1209 19:32:18.747537  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI1209 19:32:18.747558  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI1209 19:32:18.747579  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI1209 19:32:18.747598  1002 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI1209 19:32:18.749644  1002 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term:\nI1209 19:32:18.752132  1002 layer_factory.hpp:77] Creating layer dataLayer\nI1209 19:32:18.758045  1002 net.cpp:100] Creating Layer dataLayer\nI1209 19:32:18.758129  1002 net.cpp:408] dataLayer -> data_top\nI1209 19:32:18.758337  1002 net.cpp:408] dataLayer -> label\nI1209 19:32:18.758467  1002 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1209 19:32:18.820346  1008 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1209 19:32:18.996662  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:19.006731  1002 net.cpp:150] Setting up dataLayer\nI1209 19:32:19.006826  1002 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI1209 19:32:19.006847  1002 net.cpp:157] Top shape: 125 (125)\nI1209 19:32:19.006858  1002 net.cpp:165] Memory required for data: 1536500\nI1209 19:32:19.006883  1002 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1209 19:32:19.006908  1002 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1209 19:32:19.006922  1002 net.cpp:434] label_dataLayer_1_split <- label\nI1209 19:32:19.006953  1002 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1209 19:32:19.006986  1002 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1209 19:32:19.007091  1002 net.cpp:150] Setting up label_dataLayer_1_split\nI1209 19:32:19.007115  1002 net.cpp:157] Top shape: 125 (125)\nI1209 19:32:19.007129  1002 net.cpp:157] Top shape: 125 (125)\nI1209 19:32:19.007139  1002 net.cpp:165] Memory required for data: 1537500\nI1209 19:32:19.007150  1002 layer_factory.hpp:77] Creating layer pre_conv\nI1209 19:32:19.007252  1002 net.cpp:100] Creating Layer pre_conv\nI1209 19:32:19.007268  1002 net.cpp:434] pre_conv <- data_top\nI1209 19:32:19.007284  1002 net.cpp:408] pre_conv -> pre_conv_top\nI1209 19:32:19.009516  1002 net.cpp:150] Setting up pre_conv\nI1209 19:32:19.009549  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.009562  1002 net.cpp:165] Memory required for data: 9729500\nI1209 19:32:19.009654  1002 layer_factory.hpp:77] Creating layer pre_bn\nI1209 19:32:19.009760  1009 blocking_queue.cpp:50] Waiting for data\nI1209 19:32:19.009819  1002 net.cpp:100] Creating Layer pre_bn\nI1209 19:32:19.009836  1002 net.cpp:434] pre_bn <- pre_conv_top\nI1209 19:32:19.009861  1002 net.cpp:408] pre_bn -> pre_bn_top\nI1209 19:32:19.010391  1002 net.cpp:150] Setting up pre_bn\nI1209 19:32:19.010417  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.010429  1002 net.cpp:165] Memory required for data: 17921500\nI1209 19:32:19.010458  1002 layer_factory.hpp:77] Creating layer pre_scale\nI1209 19:32:19.010540  1002 net.cpp:100] Creating Layer pre_scale\nI1209 19:32:19.010555  1002 net.cpp:434] pre_scale <- pre_bn_top\nI1209 19:32:19.010571  1002 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI1209 19:32:19.010828  1002 layer_factory.hpp:77] Creating layer pre_scale\nI1209 19:32:19.023093  1002 net.cpp:150] Setting up pre_scale\nI1209 19:32:19.023119  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.023130  1002 net.cpp:165] Memory required for data: 26113500\nI1209 19:32:19.023151  1002 layer_factory.hpp:77] Creating layer pre_relu\nI1209 19:32:19.023241  1002 net.cpp:100] Creating Layer pre_relu\nI1209 19:32:19.023260  1002 net.cpp:434] pre_relu <- pre_bn_top\nI1209 19:32:19.023283  1002 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI1209 19:32:19.023303  1002 net.cpp:150] Setting up pre_relu\nI1209 19:32:19.023319  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.023329  1002 net.cpp:165] Memory required for data: 34305500\nI1209 19:32:19.023340  1002 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI1209 19:32:19.023356  1002 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI1209 19:32:19.023367  1002 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI1209 19:32:19.023382  1002 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI1209 19:32:19.023401  1002 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI1209 19:32:19.023488  1002 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI1209 19:32:19.023509  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.023521  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.023531  1002 net.cpp:165] Memory required for data: 50689500\nI1209 19:32:19.023543  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI1209 19:32:19.023571  1002 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI1209 19:32:19.023583  1002 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI1209 19:32:19.023602  1002 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI1209 19:32:19.023972  1002 net.cpp:150] Setting up L1_b1_cbr1_conv\nI1209 19:32:19.023993  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.024004  1002 net.cpp:165] Memory required for data: 58881500\nI1209 19:32:19.024034  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI1209 19:32:19.024061  1002 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI1209 19:32:19.024073  1002 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI1209 19:32:19.024091  1002 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI1209 19:32:19.024363  1002 net.cpp:150] Setting up L1_b1_cbr1_bn\nI1209 19:32:19.024381  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.024391  1002 net.cpp:165] Memory required for data: 67073500\nI1209 19:32:19.024413  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1209 19:32:19.024432  1002 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI1209 19:32:19.024444  1002 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI1209 19:32:19.024464  1002 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.024556  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1209 19:32:19.024746  1002 net.cpp:150] Setting up L1_b1_cbr1_scale\nI1209 19:32:19.024780  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.024791  1002 net.cpp:165] Memory required for data: 75265500\nI1209 19:32:19.024808  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_dropout\nI1209 19:32:19.024909  1002 net.cpp:100] Creating Layer L1_b1_cbr1_dropout\nI1209 19:32:19.024924  1002 net.cpp:434] L1_b1_cbr1_dropout <- L1_b1_cbr1_bn_top\nI1209 19:32:19.024938  1002 net.cpp:395] L1_b1_cbr1_dropout -> L1_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.025009  1002 net.cpp:150] Setting up L1_b1_cbr1_dropout\nI1209 19:32:19.025028  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.025038  1002 net.cpp:165] Memory required for data: 83457500\nI1209 19:32:19.025050  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI1209 19:32:19.025064  1002 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI1209 19:32:19.025075  1002 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI1209 19:32:19.025096  1002 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.025117  1002 net.cpp:150] Setting up L1_b1_cbr1_relu\nI1209 19:32:19.025132  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.025141  1002 net.cpp:165] Memory required for data: 91649500\nI1209 19:32:19.025153  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI1209 19:32:19.025182  1002 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI1209 19:32:19.025194  1002 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI1209 19:32:19.025213  1002 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI1209 19:32:19.025571  1002 net.cpp:150] Setting up L1_b1_cbr2_conv\nI1209 19:32:19.025591  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.025601  1002 net.cpp:165] Memory required for data: 99841500\nI1209 19:32:19.025619  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI1209 19:32:19.025641  1002 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI1209 19:32:19.025653  1002 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI1209 19:32:19.025669  1002 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI1209 19:32:19.025979  1002 net.cpp:150] Setting up L1_b1_cbr2_bn\nI1209 19:32:19.026000  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.026010  1002 net.cpp:165] Memory required for data: 108033500\nI1209 19:32:19.026037  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1209 19:32:19.026057  1002 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI1209 19:32:19.026069  1002 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI1209 19:32:19.026090  1002 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.026185  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1209 19:32:19.026367  1002 net.cpp:150] Setting up L1_b1_cbr2_scale\nI1209 19:32:19.026386  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.026396  1002 net.cpp:165] Memory required for data: 116225500\nI1209 19:32:19.026414  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_dropout\nI1209 19:32:19.026437  1002 net.cpp:100] Creating Layer L1_b1_cbr2_dropout\nI1209 19:32:19.026448  1002 net.cpp:434] L1_b1_cbr2_dropout <- L1_b1_cbr2_bn_top\nI1209 19:32:19.026463  1002 net.cpp:395] L1_b1_cbr2_dropout -> L1_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.026520  1002 net.cpp:150] Setting up L1_b1_cbr2_dropout\nI1209 19:32:19.026536  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.026546  1002 net.cpp:165] Memory required for data: 124417500\nI1209 19:32:19.026559  1002 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1209 19:32:19.026641  1002 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1209 19:32:19.026657  1002 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI1209 19:32:19.026671  1002 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI1209 19:32:19.026687  1002 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1209 19:32:19.026811  1002 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1209 19:32:19.026831  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.026841  1002 net.cpp:165] Memory required for data: 132609500\nI1209 19:32:19.026854  1002 layer_factory.hpp:77] Creating layer L1_b1_relu\nI1209 19:32:19.026870  1002 net.cpp:100] Creating Layer L1_b1_relu\nI1209 19:32:19.026880  1002 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI1209 19:32:19.026906  1002 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI1209 19:32:19.026926  1002 net.cpp:150] Setting up L1_b1_relu\nI1209 19:32:19.026943  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.026952  1002 net.cpp:165] Memory required for data: 140801500\nI1209 19:32:19.026970  1002 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1209 19:32:19.026985  1002 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1209 19:32:19.026996  1002 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI1209 19:32:19.027017  1002 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1209 19:32:19.027037  1002 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1209 19:32:19.027112  1002 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1209 19:32:19.027132  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.027144  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.027153  1002 net.cpp:165] Memory required for data: 157185500\nI1209 19:32:19.027164  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI1209 19:32:19.027196  1002 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI1209 19:32:19.027210  1002 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1209 19:32:19.027235  1002 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI1209 19:32:19.027608  1002 net.cpp:150] Setting up L1_b2_cbr1_conv\nI1209 19:32:19.027628  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.027638  1002 net.cpp:165] Memory required for data: 165377500\nI1209 19:32:19.027655  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI1209 19:32:19.027679  1002 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI1209 19:32:19.027691  1002 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI1209 19:32:19.027712  1002 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI1209 19:32:19.028012  1002 net.cpp:150] Setting up L1_b2_cbr1_bn\nI1209 19:32:19.028033  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.028043  1002 net.cpp:165] Memory required for data: 173569500\nI1209 19:32:19.028064  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1209 19:32:19.028082  1002 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI1209 19:32:19.028093  1002 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI1209 19:32:19.028112  1002 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.028205  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1209 19:32:19.028388  1002 net.cpp:150] Setting up L1_b2_cbr1_scale\nI1209 19:32:19.028411  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.028422  1002 net.cpp:165] Memory required for data: 181761500\nI1209 19:32:19.028440  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_dropout\nI1209 19:32:19.028457  1002 net.cpp:100] Creating Layer L1_b2_cbr1_dropout\nI1209 19:32:19.028468  1002 net.cpp:434] L1_b2_cbr1_dropout <- L1_b2_cbr1_bn_top\nI1209 19:32:19.028483  1002 net.cpp:395] L1_b2_cbr1_dropout -> L1_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.028538  1002 net.cpp:150] Setting up L1_b2_cbr1_dropout\nI1209 19:32:19.028558  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.028568  1002 net.cpp:165] Memory required for data: 189953500\nI1209 19:32:19.028578  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI1209 19:32:19.028591  1002 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI1209 19:32:19.028602  1002 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI1209 19:32:19.028622  1002 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.028640  1002 net.cpp:150] Setting up L1_b2_cbr1_relu\nI1209 19:32:19.028656  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.028664  1002 net.cpp:165] Memory required for data: 198145500\nI1209 19:32:19.028676  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI1209 19:32:19.028710  1002 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI1209 19:32:19.028723  1002 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI1209 19:32:19.028743  1002 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI1209 19:32:19.029114  1002 net.cpp:150] Setting up L1_b2_cbr2_conv\nI1209 19:32:19.029134  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.029145  1002 net.cpp:165] Memory required for data: 206337500\nI1209 19:32:19.029163  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI1209 19:32:19.029186  1002 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI1209 19:32:19.029199  1002 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI1209 19:32:19.029215  1002 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI1209 19:32:19.029508  1002 net.cpp:150] Setting up L1_b2_cbr2_bn\nI1209 19:32:19.029527  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.029537  1002 net.cpp:165] Memory required for data: 214529500\nI1209 19:32:19.029566  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1209 19:32:19.029590  1002 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI1209 19:32:19.029603  1002 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI1209 19:32:19.029618  1002 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.029709  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1209 19:32:19.029896  1002 net.cpp:150] Setting up L1_b2_cbr2_scale\nI1209 19:32:19.029914  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.029924  1002 net.cpp:165] Memory required for data: 222721500\nI1209 19:32:19.029942  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_dropout\nI1209 19:32:19.029966  1002 net.cpp:100] Creating Layer L1_b2_cbr2_dropout\nI1209 19:32:19.029979  1002 net.cpp:434] L1_b2_cbr2_dropout <- L1_b2_cbr2_bn_top\nI1209 19:32:19.029994  1002 net.cpp:395] L1_b2_cbr2_dropout -> L1_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.030052  1002 net.cpp:150] Setting up L1_b2_cbr2_dropout\nI1209 19:32:19.030071  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.030081  1002 net.cpp:165] Memory required for data: 230913500\nI1209 19:32:19.030091  1002 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1209 19:32:19.030107  1002 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1209 19:32:19.030117  1002 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI1209 19:32:19.030130  1002 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1209 19:32:19.030144  1002 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1209 19:32:19.030200  1002 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1209 19:32:19.030220  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.030228  1002 net.cpp:165] Memory required for data: 239105500\nI1209 19:32:19.030239  1002 layer_factory.hpp:77] Creating layer L1_b2_relu\nI1209 19:32:19.030254  1002 net.cpp:100] Creating Layer L1_b2_relu\nI1209 19:32:19.030266  1002 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI1209 19:32:19.030279  1002 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI1209 19:32:19.030297  1002 net.cpp:150] Setting up L1_b2_relu\nI1209 19:32:19.030313  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.030323  1002 net.cpp:165] Memory required for data: 247297500\nI1209 19:32:19.030333  1002 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1209 19:32:19.030346  1002 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1209 19:32:19.030356  1002 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI1209 19:32:19.030380  1002 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1209 19:32:19.030401  1002 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1209 19:32:19.030473  1002 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1209 19:32:19.030491  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.030504  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.030524  1002 net.cpp:165] Memory required for data: 263681500\nI1209 19:32:19.030535  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI1209 19:32:19.030561  1002 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI1209 19:32:19.030575  1002 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1209 19:32:19.030596  1002 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI1209 19:32:19.030956  1002 net.cpp:150] Setting up L1_b3_cbr1_conv\nI1209 19:32:19.030982  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.030992  1002 net.cpp:165] Memory required for data: 271873500\nI1209 19:32:19.031010  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI1209 19:32:19.031041  1002 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI1209 19:32:19.031054  1002 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI1209 19:32:19.031070  1002 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI1209 19:32:19.031354  1002 net.cpp:150] Setting up L1_b3_cbr1_bn\nI1209 19:32:19.031373  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.031383  1002 net.cpp:165] Memory required for data: 280065500\nI1209 19:32:19.031404  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1209 19:32:19.031422  1002 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI1209 19:32:19.031433  1002 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI1209 19:32:19.031455  1002 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.031549  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1209 19:32:19.031733  1002 net.cpp:150] Setting up L1_b3_cbr1_scale\nI1209 19:32:19.031752  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.031761  1002 net.cpp:165] Memory required for data: 288257500\nI1209 19:32:19.031780  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_dropout\nI1209 19:32:19.031796  1002 net.cpp:100] Creating Layer L1_b3_cbr1_dropout\nI1209 19:32:19.031807  1002 net.cpp:434] L1_b3_cbr1_dropout <- L1_b3_cbr1_bn_top\nI1209 19:32:19.031827  1002 net.cpp:395] L1_b3_cbr1_dropout -> L1_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.031879  1002 net.cpp:150] Setting up L1_b3_cbr1_dropout\nI1209 19:32:19.031903  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.031915  1002 net.cpp:165] Memory required for data: 296449500\nI1209 19:32:19.031925  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI1209 19:32:19.031940  1002 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI1209 19:32:19.031952  1002 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI1209 19:32:19.031975  1002 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.031994  1002 net.cpp:150] Setting up L1_b3_cbr1_relu\nI1209 19:32:19.032009  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.032019  1002 net.cpp:165] Memory required for data: 304641500\nI1209 19:32:19.032029  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI1209 19:32:19.032055  1002 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI1209 19:32:19.032068  1002 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI1209 19:32:19.032086  1002 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI1209 19:32:19.032455  1002 net.cpp:150] Setting up L1_b3_cbr2_conv\nI1209 19:32:19.032474  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.032485  1002 net.cpp:165] Memory required for data: 312833500\nI1209 19:32:19.032502  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI1209 19:32:19.032528  1002 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI1209 19:32:19.032541  1002 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI1209 19:32:19.032560  1002 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI1209 19:32:19.032852  1002 net.cpp:150] Setting up L1_b3_cbr2_bn\nI1209 19:32:19.032876  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.032886  1002 net.cpp:165] Memory required for data: 321025500\nI1209 19:32:19.032907  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1209 19:32:19.032924  1002 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI1209 19:32:19.032945  1002 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI1209 19:32:19.032969  1002 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.033069  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1209 19:32:19.033253  1002 net.cpp:150] Setting up L1_b3_cbr2_scale\nI1209 19:32:19.033272  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.033282  1002 net.cpp:165] Memory required for data: 329217500\nI1209 19:32:19.033300  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_dropout\nI1209 19:32:19.033325  1002 net.cpp:100] Creating Layer L1_b3_cbr2_dropout\nI1209 19:32:19.033337  1002 net.cpp:434] L1_b3_cbr2_dropout <- L1_b3_cbr2_bn_top\nI1209 19:32:19.033352  1002 net.cpp:395] L1_b3_cbr2_dropout -> L1_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.033403  1002 net.cpp:150] Setting up L1_b3_cbr2_dropout\nI1209 19:32:19.033427  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.033437  1002 net.cpp:165] Memory required for data: 337409500\nI1209 19:32:19.033448  1002 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1209 19:32:19.033464  1002 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1209 19:32:19.033476  1002 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI1209 19:32:19.033489  1002 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1209 19:32:19.033509  1002 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1209 19:32:19.033562  1002 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1209 19:32:19.033581  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.033591  1002 net.cpp:165] Memory required for data: 345601500\nI1209 19:32:19.033601  1002 layer_factory.hpp:77] Creating layer L1_b3_relu\nI1209 19:32:19.033623  1002 net.cpp:100] Creating Layer L1_b3_relu\nI1209 19:32:19.033637  1002 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI1209 19:32:19.033650  1002 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI1209 19:32:19.033668  1002 net.cpp:150] Setting up L1_b3_relu\nI1209 19:32:19.033682  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.033691  1002 net.cpp:165] Memory required for data: 353793500\nI1209 19:32:19.033701  1002 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1209 19:32:19.033715  1002 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1209 19:32:19.033726  1002 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI1209 19:32:19.033741  1002 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1209 19:32:19.033761  1002 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1209 19:32:19.033843  1002 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1209 19:32:19.033861  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.033874  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.033884  1002 net.cpp:165] Memory required for data: 370177500\nI1209 19:32:19.033893  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI1209 19:32:19.033921  1002 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI1209 19:32:19.033934  1002 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1209 19:32:19.033965  1002 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI1209 19:32:19.034338  1002 net.cpp:150] Setting up L1_b4_cbr1_conv\nI1209 19:32:19.034358  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.034366  1002 net.cpp:165] Memory required for data: 378369500\nI1209 19:32:19.034385  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI1209 19:32:19.034407  1002 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI1209 19:32:19.034420  1002 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI1209 19:32:19.034436  1002 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI1209 19:32:19.034737  1002 net.cpp:150] Setting up L1_b4_cbr1_bn\nI1209 19:32:19.034755  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.034775  1002 net.cpp:165] Memory required for data: 386561500\nI1209 19:32:19.034797  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1209 19:32:19.034816  1002 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI1209 19:32:19.034826  1002 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI1209 19:32:19.034842  1002 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.034945  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1209 19:32:19.035131  1002 net.cpp:150] Setting up L1_b4_cbr1_scale\nI1209 19:32:19.035151  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.035161  1002 net.cpp:165] Memory required for data: 394753500\nI1209 19:32:19.035178  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_dropout\nI1209 19:32:19.035195  1002 net.cpp:100] Creating Layer L1_b4_cbr1_dropout\nI1209 19:32:19.035207  1002 net.cpp:434] L1_b4_cbr1_dropout <- L1_b4_cbr1_bn_top\nI1209 19:32:19.035228  1002 net.cpp:395] L1_b4_cbr1_dropout -> L1_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.035279  1002 net.cpp:150] Setting up L1_b4_cbr1_dropout\nI1209 19:32:19.035297  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.035307  1002 net.cpp:165] Memory required for data: 402945500\nI1209 19:32:19.035317  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI1209 19:32:19.035337  1002 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI1209 19:32:19.035349  1002 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI1209 19:32:19.035363  1002 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.035382  1002 net.cpp:150] Setting up L1_b4_cbr1_relu\nI1209 19:32:19.035395  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.035405  1002 net.cpp:165] Memory required for data: 411137500\nI1209 19:32:19.035415  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI1209 19:32:19.035436  1002 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI1209 19:32:19.035449  1002 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI1209 19:32:19.035473  1002 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI1209 19:32:19.035837  1002 net.cpp:150] Setting up L1_b4_cbr2_conv\nI1209 19:32:19.035857  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.035867  1002 net.cpp:165] Memory required for data: 419329500\nI1209 19:32:19.035886  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI1209 19:32:19.035903  1002 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI1209 19:32:19.035914  1002 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI1209 19:32:19.035935  1002 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI1209 19:32:19.036234  1002 net.cpp:150] Setting up L1_b4_cbr2_bn\nI1209 19:32:19.036253  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.036264  1002 net.cpp:165] Memory required for data: 427521500\nI1209 19:32:19.036290  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1209 19:32:19.036309  1002 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI1209 19:32:19.036319  1002 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI1209 19:32:19.036334  1002 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.036423  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1209 19:32:19.036609  1002 net.cpp:150] Setting up L1_b4_cbr2_scale\nI1209 19:32:19.036628  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.036638  1002 net.cpp:165] Memory required for data: 435713500\nI1209 19:32:19.036656  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_dropout\nI1209 19:32:19.036679  1002 net.cpp:100] Creating Layer L1_b4_cbr2_dropout\nI1209 19:32:19.036691  1002 net.cpp:434] L1_b4_cbr2_dropout <- L1_b4_cbr2_bn_top\nI1209 19:32:19.036711  1002 net.cpp:395] L1_b4_cbr2_dropout -> L1_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.036764  1002 net.cpp:150] Setting up L1_b4_cbr2_dropout\nI1209 19:32:19.036778  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.036783  1002 net.cpp:165] Memory required for data: 443905500\nI1209 19:32:19.036789  1002 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1209 19:32:19.036810  1002 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1209 19:32:19.036818  1002 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI1209 19:32:19.036824  1002 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1209 19:32:19.036833  1002 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1209 19:32:19.036865  1002 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1209 19:32:19.036875  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.036880  1002 net.cpp:165] Memory required for data: 452097500\nI1209 19:32:19.036885  1002 layer_factory.hpp:77] Creating layer L1_b4_relu\nI1209 19:32:19.036893  1002 net.cpp:100] Creating Layer L1_b4_relu\nI1209 19:32:19.036898  1002 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI1209 19:32:19.036909  1002 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI1209 19:32:19.036919  1002 net.cpp:150] Setting up L1_b4_relu\nI1209 19:32:19.036926  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.036931  1002 net.cpp:165] Memory required for data: 460289500\nI1209 19:32:19.036936  1002 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1209 19:32:19.036942  1002 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1209 19:32:19.036947  1002 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI1209 19:32:19.036954  1002 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1209 19:32:19.036970  1002 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1209 19:32:19.037017  1002 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1209 19:32:19.037026  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.037034  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.037037  1002 net.cpp:165] Memory required for data: 476673500\nI1209 19:32:19.037042  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI1209 19:32:19.037055  1002 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI1209 19:32:19.037061  1002 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1209 19:32:19.037073  1002 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI1209 19:32:19.037389  1002 net.cpp:150] Setting up L1_b5_cbr1_conv\nI1209 19:32:19.037407  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.037417  1002 net.cpp:165] Memory required for data: 484865500\nI1209 19:32:19.037458  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI1209 19:32:19.037482  1002 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI1209 19:32:19.037494  1002 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI1209 19:32:19.037513  1002 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI1209 19:32:19.037796  1002 net.cpp:150] Setting up L1_b5_cbr1_bn\nI1209 19:32:19.037813  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.037823  1002 net.cpp:165] Memory required for data: 493057500\nI1209 19:32:19.037845  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1209 19:32:19.037868  1002 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI1209 19:32:19.037880  1002 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI1209 19:32:19.037895  1002 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.037994  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1209 19:32:19.038182  1002 net.cpp:150] Setting up L1_b5_cbr1_scale\nI1209 19:32:19.038202  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.038211  1002 net.cpp:165] Memory required for data: 501249500\nI1209 19:32:19.038230  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_dropout\nI1209 19:32:19.038252  1002 net.cpp:100] Creating Layer L1_b5_cbr1_dropout\nI1209 19:32:19.038265  1002 net.cpp:434] L1_b5_cbr1_dropout <- L1_b5_cbr1_bn_top\nI1209 19:32:19.038280  1002 net.cpp:395] L1_b5_cbr1_dropout -> L1_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.038339  1002 net.cpp:150] Setting up L1_b5_cbr1_dropout\nI1209 19:32:19.038367  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.038378  1002 net.cpp:165] Memory required for data: 509441500\nI1209 19:32:19.038389  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI1209 19:32:19.038403  1002 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI1209 19:32:19.038414  1002 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI1209 19:32:19.038434  1002 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.038455  1002 net.cpp:150] Setting up L1_b5_cbr1_relu\nI1209 19:32:19.038470  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.038478  1002 net.cpp:165] Memory required for data: 517633500\nI1209 19:32:19.038488  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI1209 19:32:19.038511  1002 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI1209 19:32:19.038522  1002 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI1209 19:32:19.038539  1002 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI1209 19:32:19.038897  1002 net.cpp:150] Setting up L1_b5_cbr2_conv\nI1209 19:32:19.038918  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.038928  1002 net.cpp:165] Memory required for data: 525825500\nI1209 19:32:19.038944  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI1209 19:32:19.038975  1002 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI1209 19:32:19.038988  1002 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI1209 19:32:19.039005  1002 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI1209 19:32:19.039291  1002 net.cpp:150] Setting up L1_b5_cbr2_bn\nI1209 19:32:19.039311  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.039321  1002 net.cpp:165] Memory required for data: 534017500\nI1209 19:32:19.039341  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1209 19:32:19.039358  1002 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI1209 19:32:19.039371  1002 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI1209 19:32:19.039391  1002 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.039482  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1209 19:32:19.039667  1002 net.cpp:150] Setting up L1_b5_cbr2_scale\nI1209 19:32:19.039691  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.039702  1002 net.cpp:165] Memory required for data: 542209500\nI1209 19:32:19.039721  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_dropout\nI1209 19:32:19.039738  1002 net.cpp:100] Creating Layer L1_b5_cbr2_dropout\nI1209 19:32:19.039750  1002 net.cpp:434] L1_b5_cbr2_dropout <- L1_b5_cbr2_bn_top\nI1209 19:32:19.039764  1002 net.cpp:395] L1_b5_cbr2_dropout -> L1_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.039829  1002 net.cpp:150] Setting up L1_b5_cbr2_dropout\nI1209 19:32:19.039847  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.039857  1002 net.cpp:165] Memory required for data: 550401500\nI1209 19:32:19.039867  1002 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1209 19:32:19.039899  1002 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1209 19:32:19.039913  1002 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI1209 19:32:19.039927  1002 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1209 19:32:19.039942  1002 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1209 19:32:19.040002  1002 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1209 19:32:19.040020  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.040030  1002 net.cpp:165] Memory required for data: 558593500\nI1209 19:32:19.040041  1002 layer_factory.hpp:77] Creating layer L1_b5_relu\nI1209 19:32:19.040057  1002 net.cpp:100] Creating Layer L1_b5_relu\nI1209 19:32:19.040068  1002 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI1209 19:32:19.040082  1002 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI1209 19:32:19.040100  1002 net.cpp:150] Setting up L1_b5_relu\nI1209 19:32:19.040114  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.040124  1002 net.cpp:165] Memory required for data: 566785500\nI1209 19:32:19.040146  1002 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1209 19:32:19.040166  1002 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1209 19:32:19.040179  1002 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI1209 19:32:19.040194  1002 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1209 19:32:19.040213  1002 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1209 19:32:19.040294  1002 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1209 19:32:19.040313  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.040328  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.040338  1002 net.cpp:165] Memory required for data: 583169500\nI1209 19:32:19.040347  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI1209 19:32:19.040376  1002 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI1209 19:32:19.040390  1002 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1209 19:32:19.040413  1002 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI1209 19:32:19.040774  1002 net.cpp:150] Setting up L1_b6_cbr1_conv\nI1209 19:32:19.040793  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.040803  1002 net.cpp:165] Memory required for data: 591361500\nI1209 19:32:19.040822  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI1209 19:32:19.040839  1002 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI1209 19:32:19.040851  1002 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI1209 19:32:19.040868  1002 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI1209 19:32:19.041173  1002 net.cpp:150] Setting up L1_b6_cbr1_bn\nI1209 19:32:19.041193  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.041203  1002 net.cpp:165] Memory required for data: 599553500\nI1209 19:32:19.041224  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1209 19:32:19.041246  1002 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI1209 19:32:19.041259  1002 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI1209 19:32:19.041273  1002 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.041373  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1209 19:32:19.041556  1002 net.cpp:150] Setting up L1_b6_cbr1_scale\nI1209 19:32:19.041575  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.041585  1002 net.cpp:165] Memory required for data: 607745500\nI1209 19:32:19.041604  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_dropout\nI1209 19:32:19.041620  1002 net.cpp:100] Creating Layer L1_b6_cbr1_dropout\nI1209 19:32:19.041631  1002 net.cpp:434] L1_b6_cbr1_dropout <- L1_b6_cbr1_bn_top\nI1209 19:32:19.041651  1002 net.cpp:395] L1_b6_cbr1_dropout -> L1_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.041704  1002 net.cpp:150] Setting up L1_b6_cbr1_dropout\nI1209 19:32:19.041728  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.041738  1002 net.cpp:165] Memory required for data: 615937500\nI1209 19:32:19.041750  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI1209 19:32:19.041765  1002 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI1209 19:32:19.041774  1002 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI1209 19:32:19.041780  1002 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.041791  1002 net.cpp:150] Setting up L1_b6_cbr1_relu\nI1209 19:32:19.041798  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.041803  1002 net.cpp:165] Memory required for data: 624129500\nI1209 19:32:19.041808  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI1209 19:32:19.041826  1002 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI1209 19:32:19.041831  1002 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI1209 19:32:19.041841  1002 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI1209 19:32:19.042171  1002 net.cpp:150] Setting up L1_b6_cbr2_conv\nI1209 19:32:19.042191  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.042210  1002 net.cpp:165] Memory required for data: 632321500\nI1209 19:32:19.042229  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI1209 19:32:19.042253  1002 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI1209 19:32:19.042264  1002 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI1209 19:32:19.042285  1002 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI1209 19:32:19.042570  1002 net.cpp:150] Setting up L1_b6_cbr2_bn\nI1209 19:32:19.042589  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.042599  1002 net.cpp:165] Memory required for data: 640513500\nI1209 19:32:19.042619  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1209 19:32:19.042636  1002 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI1209 19:32:19.042647  1002 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI1209 19:32:19.042667  1002 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.042762  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1209 19:32:19.043115  1002 net.cpp:150] Setting up L1_b6_cbr2_scale\nI1209 19:32:19.043141  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.043153  1002 net.cpp:165] Memory required for data: 648705500\nI1209 19:32:19.043171  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_dropout\nI1209 19:32:19.043189  1002 net.cpp:100] Creating Layer L1_b6_cbr2_dropout\nI1209 19:32:19.043200  1002 net.cpp:434] L1_b6_cbr2_dropout <- L1_b6_cbr2_bn_top\nI1209 19:32:19.043215  1002 net.cpp:395] L1_b6_cbr2_dropout -> L1_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.043272  1002 net.cpp:150] Setting up L1_b6_cbr2_dropout\nI1209 19:32:19.043292  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.043301  1002 net.cpp:165] Memory required for data: 656897500\nI1209 19:32:19.043311  1002 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1209 19:32:19.043328  1002 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1209 19:32:19.043347  1002 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI1209 19:32:19.043361  1002 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1209 19:32:19.043376  1002 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1209 19:32:19.043431  1002 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1209 19:32:19.043450  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.043459  1002 net.cpp:165] Memory required for data: 665089500\nI1209 19:32:19.043469  1002 layer_factory.hpp:77] Creating layer L1_b6_relu\nI1209 19:32:19.043489  1002 net.cpp:100] Creating Layer L1_b6_relu\nI1209 19:32:19.043503  1002 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI1209 19:32:19.043516  1002 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI1209 19:32:19.043535  1002 net.cpp:150] Setting up L1_b6_relu\nI1209 19:32:19.043550  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.043560  1002 net.cpp:165] Memory required for data: 673281500\nI1209 19:32:19.043568  1002 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1209 19:32:19.043582  1002 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1209 19:32:19.043593  1002 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI1209 19:32:19.043608  1002 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1209 19:32:19.043627  1002 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1209 19:32:19.043707  1002 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1209 19:32:19.043726  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.043740  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.043750  1002 net.cpp:165] Memory required for data: 689665500\nI1209 19:32:19.043759  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI1209 19:32:19.043786  1002 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI1209 19:32:19.043797  1002 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1209 19:32:19.043831  1002 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI1209 19:32:19.044231  1002 net.cpp:150] Setting up L1_b7_cbr1_conv\nI1209 19:32:19.044251  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.044261  1002 net.cpp:165] Memory required for data: 697857500\nI1209 19:32:19.044278  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI1209 19:32:19.044302  1002 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI1209 19:32:19.044314  1002 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI1209 19:32:19.044335  1002 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI1209 19:32:19.044630  1002 net.cpp:150] Setting up L1_b7_cbr1_bn\nI1209 19:32:19.044648  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.044657  1002 net.cpp:165] Memory required for data: 706049500\nI1209 19:32:19.044678  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1209 19:32:19.044697  1002 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI1209 19:32:19.044708  1002 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI1209 19:32:19.044723  1002 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.044824  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1209 19:32:19.045017  1002 net.cpp:150] Setting up L1_b7_cbr1_scale\nI1209 19:32:19.045037  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.045047  1002 net.cpp:165] Memory required for data: 714241500\nI1209 19:32:19.045064  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_dropout\nI1209 19:32:19.045086  1002 net.cpp:100] Creating Layer L1_b7_cbr1_dropout\nI1209 19:32:19.045099  1002 net.cpp:434] L1_b7_cbr1_dropout <- L1_b7_cbr1_bn_top\nI1209 19:32:19.045112  1002 net.cpp:395] L1_b7_cbr1_dropout -> L1_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.045164  1002 net.cpp:150] Setting up L1_b7_cbr1_dropout\nI1209 19:32:19.045188  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.045199  1002 net.cpp:165] Memory required for data: 722433500\nI1209 19:32:19.045209  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI1209 19:32:19.045223  1002 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI1209 19:32:19.045234  1002 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI1209 19:32:19.045254  1002 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.045274  1002 net.cpp:150] Setting up L1_b7_cbr1_relu\nI1209 19:32:19.045289  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.045297  1002 net.cpp:165] Memory required for data: 730625500\nI1209 19:32:19.045307  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI1209 19:32:19.045328  1002 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI1209 19:32:19.045341  1002 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI1209 19:32:19.045363  1002 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI1209 19:32:19.045737  1002 net.cpp:150] Setting up L1_b7_cbr2_conv\nI1209 19:32:19.045756  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.045766  1002 net.cpp:165] Memory required for data: 738817500\nI1209 19:32:19.045784  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI1209 19:32:19.045804  1002 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI1209 19:32:19.045815  1002 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI1209 19:32:19.045835  1002 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI1209 19:32:19.046136  1002 net.cpp:150] Setting up L1_b7_cbr2_bn\nI1209 19:32:19.046159  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.046170  1002 net.cpp:165] Memory required for data: 747009500\nI1209 19:32:19.046191  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1209 19:32:19.046210  1002 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI1209 19:32:19.046221  1002 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI1209 19:32:19.046236  1002 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.046327  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1209 19:32:19.046530  1002 net.cpp:150] Setting up L1_b7_cbr2_scale\nI1209 19:32:19.046558  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.046568  1002 net.cpp:165] Memory required for data: 755201500\nI1209 19:32:19.046586  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_dropout\nI1209 19:32:19.046609  1002 net.cpp:100] Creating Layer L1_b7_cbr2_dropout\nI1209 19:32:19.046622  1002 net.cpp:434] L1_b7_cbr2_dropout <- L1_b7_cbr2_bn_top\nI1209 19:32:19.046636  1002 net.cpp:395] L1_b7_cbr2_dropout -> L1_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.046695  1002 net.cpp:150] Setting up L1_b7_cbr2_dropout\nI1209 19:32:19.046715  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.046725  1002 net.cpp:165] Memory required for data: 763393500\nI1209 19:32:19.046736  1002 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI1209 19:32:19.046751  1002 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI1209 19:32:19.046761  1002 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI1209 19:32:19.046780  1002 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1209 19:32:19.046797  1002 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI1209 19:32:19.046850  1002 net.cpp:150] Setting up L1_b7_sum_eltwise\nI1209 19:32:19.046869  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.046880  1002 net.cpp:165] Memory required for data: 771585500\nI1209 19:32:19.046890  1002 layer_factory.hpp:77] Creating layer L1_b7_relu\nI1209 19:32:19.046905  1002 net.cpp:100] Creating Layer L1_b7_relu\nI1209 19:32:19.046916  1002 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI1209 19:32:19.046936  1002 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI1209 19:32:19.046954  1002 net.cpp:150] Setting up L1_b7_relu\nI1209 19:32:19.046979  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.046989  1002 net.cpp:165] Memory required for data: 779777500\nI1209 19:32:19.047000  1002 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1209 19:32:19.047014  1002 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1209 19:32:19.047025  1002 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI1209 19:32:19.047040  1002 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1209 19:32:19.047060  1002 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1209 19:32:19.047139  1002 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1209 19:32:19.047158  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.047171  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.047181  1002 net.cpp:165] Memory required for data: 796161500\nI1209 19:32:19.047193  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI1209 19:32:19.047214  1002 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI1209 19:32:19.047226  1002 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1209 19:32:19.047250  1002 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI1209 19:32:19.047632  1002 net.cpp:150] Setting up L1_b8_cbr1_conv\nI1209 19:32:19.047653  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.047662  1002 net.cpp:165] Memory required for data: 804353500\nI1209 19:32:19.047682  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI1209 19:32:19.047699  1002 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI1209 19:32:19.047711  1002 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI1209 19:32:19.047734  1002 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI1209 19:32:19.048014  1002 net.cpp:150] Setting up L1_b8_cbr1_bn\nI1209 19:32:19.048034  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.048043  1002 net.cpp:165] Memory required for data: 812545500\nI1209 19:32:19.048064  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1209 19:32:19.048086  1002 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI1209 19:32:19.048099  1002 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI1209 19:32:19.048123  1002 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.048218  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1209 19:32:19.048413  1002 net.cpp:150] Setting up L1_b8_cbr1_scale\nI1209 19:32:19.048432  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.048442  1002 net.cpp:165] Memory required for data: 820737500\nI1209 19:32:19.048461  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_dropout\nI1209 19:32:19.048482  1002 net.cpp:100] Creating Layer L1_b8_cbr1_dropout\nI1209 19:32:19.048496  1002 net.cpp:434] L1_b8_cbr1_dropout <- L1_b8_cbr1_bn_top\nI1209 19:32:19.048511  1002 net.cpp:395] L1_b8_cbr1_dropout -> L1_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.048570  1002 net.cpp:150] Setting up L1_b8_cbr1_dropout\nI1209 19:32:19.048588  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.048599  1002 net.cpp:165] Memory required for data: 828929500\nI1209 19:32:19.048609  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI1209 19:32:19.048624  1002 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI1209 19:32:19.048635  1002 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI1209 19:32:19.048655  1002 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.048674  1002 net.cpp:150] Setting up L1_b8_cbr1_relu\nI1209 19:32:19.048689  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.048698  1002 net.cpp:165] Memory required for data: 837121500\nI1209 19:32:19.048708  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI1209 19:32:19.048730  1002 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI1209 19:32:19.048743  1002 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI1209 19:32:19.048760  1002 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI1209 19:32:19.049151  1002 net.cpp:150] Setting up L1_b8_cbr2_conv\nI1209 19:32:19.049172  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.049182  1002 net.cpp:165] Memory required for data: 845313500\nI1209 19:32:19.049199  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI1209 19:32:19.049222  1002 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI1209 19:32:19.049235  1002 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI1209 19:32:19.049252  1002 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI1209 19:32:19.049546  1002 net.cpp:150] Setting up L1_b8_cbr2_bn\nI1209 19:32:19.049563  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.049573  1002 net.cpp:165] Memory required for data: 853505500\nI1209 19:32:19.049594  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1209 19:32:19.049612  1002 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI1209 19:32:19.049624  1002 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI1209 19:32:19.049644  1002 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.049741  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1209 19:32:19.049929  1002 net.cpp:150] Setting up L1_b8_cbr2_scale\nI1209 19:32:19.049953  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.049970  1002 net.cpp:165] Memory required for data: 861697500\nI1209 19:32:19.049989  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_dropout\nI1209 19:32:19.050006  1002 net.cpp:100] Creating Layer L1_b8_cbr2_dropout\nI1209 19:32:19.050017  1002 net.cpp:434] L1_b8_cbr2_dropout <- L1_b8_cbr2_bn_top\nI1209 19:32:19.050032  1002 net.cpp:395] L1_b8_cbr2_dropout -> L1_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.050091  1002 net.cpp:150] Setting up L1_b8_cbr2_dropout\nI1209 19:32:19.050109  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.050118  1002 net.cpp:165] Memory required for data: 869889500\nI1209 19:32:19.050129  1002 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI1209 19:32:19.050149  1002 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI1209 19:32:19.050163  1002 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI1209 19:32:19.050175  1002 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1209 19:32:19.050190  1002 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI1209 19:32:19.050261  1002 net.cpp:150] Setting up L1_b8_sum_eltwise\nI1209 19:32:19.050281  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.050292  1002 net.cpp:165] Memory required for data: 878081500\nI1209 19:32:19.050302  1002 layer_factory.hpp:77] Creating layer L1_b8_relu\nI1209 19:32:19.050315  1002 net.cpp:100] Creating Layer L1_b8_relu\nI1209 19:32:19.050326  1002 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI1209 19:32:19.050340  1002 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI1209 19:32:19.050359  1002 net.cpp:150] Setting up L1_b8_relu\nI1209 19:32:19.050372  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.050381  1002 net.cpp:165] Memory required for data: 886273500\nI1209 19:32:19.050392  1002 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1209 19:32:19.050406  1002 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1209 19:32:19.050416  1002 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI1209 19:32:19.050437  1002 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1209 19:32:19.050458  1002 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1209 19:32:19.050535  1002 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1209 19:32:19.050554  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.050566  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.050576  1002 net.cpp:165] Memory required for data: 902657500\nI1209 19:32:19.050586  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI1209 19:32:19.050613  1002 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI1209 19:32:19.050626  1002 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1209 19:32:19.050648  1002 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI1209 19:32:19.051034  1002 net.cpp:150] Setting up L1_b9_cbr1_conv\nI1209 19:32:19.051059  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.051070  1002 net.cpp:165] Memory required for data: 910849500\nI1209 19:32:19.051089  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI1209 19:32:19.051106  1002 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI1209 19:32:19.051117  1002 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI1209 19:32:19.051138  1002 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI1209 19:32:19.051443  1002 net.cpp:150] Setting up L1_b9_cbr1_bn\nI1209 19:32:19.051462  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.051472  1002 net.cpp:165] Memory required for data: 919041500\nI1209 19:32:19.051494  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1209 19:32:19.051517  1002 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI1209 19:32:19.051528  1002 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI1209 19:32:19.051543  1002 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.051636  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1209 19:32:19.051829  1002 net.cpp:150] Setting up L1_b9_cbr1_scale\nI1209 19:32:19.051849  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.051859  1002 net.cpp:165] Memory required for data: 927233500\nI1209 19:32:19.051877  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_dropout\nI1209 19:32:19.051900  1002 net.cpp:100] Creating Layer L1_b9_cbr1_dropout\nI1209 19:32:19.051913  1002 net.cpp:434] L1_b9_cbr1_dropout <- L1_b9_cbr1_bn_top\nI1209 19:32:19.051928  1002 net.cpp:395] L1_b9_cbr1_dropout -> L1_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.051995  1002 net.cpp:150] Setting up L1_b9_cbr1_dropout\nI1209 19:32:19.052013  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.052022  1002 net.cpp:165] Memory required for data: 935425500\nI1209 19:32:19.052033  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI1209 19:32:19.052048  1002 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI1209 19:32:19.052070  1002 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI1209 19:32:19.052090  1002 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.052109  1002 net.cpp:150] Setting up L1_b9_cbr1_relu\nI1209 19:32:19.052126  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.052135  1002 net.cpp:165] Memory required for data: 943617500\nI1209 19:32:19.052146  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI1209 19:32:19.052167  1002 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI1209 19:32:19.052179  1002 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI1209 19:32:19.052196  1002 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI1209 19:32:19.052565  1002 net.cpp:150] Setting up L1_b9_cbr2_conv\nI1209 19:32:19.052585  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.052595  1002 net.cpp:165] Memory required for data: 951809500\nI1209 19:32:19.052613  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI1209 19:32:19.052635  1002 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI1209 19:32:19.052649  1002 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI1209 19:32:19.052665  1002 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI1209 19:32:19.052970  1002 net.cpp:150] Setting up L1_b9_cbr2_bn\nI1209 19:32:19.052990  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.052999  1002 net.cpp:165] Memory required for data: 960001500\nI1209 19:32:19.053055  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1209 19:32:19.053083  1002 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI1209 19:32:19.053097  1002 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI1209 19:32:19.053112  1002 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.053207  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1209 19:32:19.053395  1002 net.cpp:150] Setting up L1_b9_cbr2_scale\nI1209 19:32:19.053413  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.053423  1002 net.cpp:165] Memory required for data: 968193500\nI1209 19:32:19.053441  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_dropout\nI1209 19:32:19.053457  1002 net.cpp:100] Creating Layer L1_b9_cbr2_dropout\nI1209 19:32:19.053469  1002 net.cpp:434] L1_b9_cbr2_dropout <- L1_b9_cbr2_bn_top\nI1209 19:32:19.053483  1002 net.cpp:395] L1_b9_cbr2_dropout -> L1_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.053536  1002 net.cpp:150] Setting up L1_b9_cbr2_dropout\nI1209 19:32:19.053560  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.053570  1002 net.cpp:165] Memory required for data: 976385500\nI1209 19:32:19.053581  1002 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI1209 19:32:19.053596  1002 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI1209 19:32:19.053608  1002 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI1209 19:32:19.053622  1002 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1209 19:32:19.053642  1002 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI1209 19:32:19.053696  1002 net.cpp:150] Setting up L1_b9_sum_eltwise\nI1209 19:32:19.053715  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.053726  1002 net.cpp:165] Memory required for data: 984577500\nI1209 19:32:19.053736  1002 layer_factory.hpp:77] Creating layer L1_b9_relu\nI1209 19:32:19.053757  1002 net.cpp:100] Creating Layer L1_b9_relu\nI1209 19:32:19.053769  1002 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI1209 19:32:19.053784  1002 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI1209 19:32:19.053803  1002 net.cpp:150] Setting up L1_b9_relu\nI1209 19:32:19.053817  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.053827  1002 net.cpp:165] Memory required for data: 992769500\nI1209 19:32:19.053838  1002 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1209 19:32:19.053853  1002 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1209 19:32:19.053864  1002 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI1209 19:32:19.053889  1002 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1209 19:32:19.053910  1002 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1209 19:32:19.054004  1002 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1209 19:32:19.054024  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.054039  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.054049  1002 net.cpp:165] Memory required for data: 1009153500\nI1209 19:32:19.054059  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI1209 19:32:19.054085  1002 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI1209 19:32:19.054098  1002 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1209 19:32:19.054121  1002 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI1209 19:32:19.054487  1002 net.cpp:150] Setting up L2_b1_cbr1_conv\nI1209 19:32:19.054507  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.054517  1002 net.cpp:165] Memory required for data: 1011201500\nI1209 19:32:19.054535  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI1209 19:32:19.054558  1002 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI1209 19:32:19.054571  1002 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI1209 19:32:19.054591  1002 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI1209 19:32:19.054888  1002 net.cpp:150] Setting up L2_b1_cbr1_bn\nI1209 19:32:19.054906  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.054916  1002 net.cpp:165] Memory required for data: 1013249500\nI1209 19:32:19.054937  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1209 19:32:19.054955  1002 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI1209 19:32:19.054975  1002 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI1209 19:32:19.054991  1002 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.055088  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1209 19:32:19.055275  1002 net.cpp:150] Setting up L2_b1_cbr1_scale\nI1209 19:32:19.055294  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.055304  1002 net.cpp:165] Memory required for data: 1015297500\nI1209 19:32:19.055321  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_dropout\nI1209 19:32:19.055349  1002 net.cpp:100] Creating Layer L2_b1_cbr1_dropout\nI1209 19:32:19.055363  1002 net.cpp:434] L2_b1_cbr1_dropout <- L2_b1_cbr1_bn_top\nI1209 19:32:19.055378  1002 net.cpp:395] L2_b1_cbr1_dropout -> L2_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.055425  1002 net.cpp:150] Setting up L2_b1_cbr1_dropout\nI1209 19:32:19.055444  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.055454  1002 net.cpp:165] Memory required for data: 1017345500\nI1209 19:32:19.055462  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI1209 19:32:19.055482  1002 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI1209 19:32:19.055495  1002 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI1209 19:32:19.055510  1002 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.055527  1002 net.cpp:150] Setting up L2_b1_cbr1_relu\nI1209 19:32:19.055541  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.055552  1002 net.cpp:165] Memory required for data: 1019393500\nI1209 19:32:19.055562  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI1209 19:32:19.055582  1002 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI1209 19:32:19.055593  1002 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI1209 19:32:19.055618  1002 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI1209 19:32:19.056012  1002 net.cpp:150] Setting up L2_b1_cbr2_conv\nI1209 19:32:19.056031  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.056041  1002 net.cpp:165] Memory required for data: 1021441500\nI1209 19:32:19.056059  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI1209 19:32:19.056077  1002 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI1209 19:32:19.056089  1002 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI1209 19:32:19.056118  1002 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI1209 19:32:19.056429  1002 net.cpp:150] Setting up L2_b1_cbr2_bn\nI1209 19:32:19.056448  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.056459  1002 net.cpp:165] Memory required for data: 1023489500\nI1209 19:32:19.056479  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1209 19:32:19.056501  1002 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI1209 19:32:19.056514  1002 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI1209 19:32:19.056529  1002 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.056622  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1209 19:32:19.056813  1002 net.cpp:150] Setting up L2_b1_cbr2_scale\nI1209 19:32:19.056833  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.056843  1002 net.cpp:165] Memory required for data: 1025537500\nI1209 19:32:19.056860  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_dropout\nI1209 19:32:19.056884  1002 net.cpp:100] Creating Layer L2_b1_cbr2_dropout\nI1209 19:32:19.056896  1002 net.cpp:434] L2_b1_cbr2_dropout <- L2_b1_cbr2_bn_top\nI1209 19:32:19.056911  1002 net.cpp:395] L2_b1_cbr2_dropout -> L2_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.056970  1002 net.cpp:150] Setting up L2_b1_cbr2_dropout\nI1209 19:32:19.056989  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.056999  1002 net.cpp:165] Memory required for data: 1027585500\nI1209 19:32:19.057009  1002 layer_factory.hpp:77] Creating layer L2_b1_pool\nI1209 19:32:19.057027  1002 net.cpp:100] Creating Layer L2_b1_pool\nI1209 19:32:19.057040  1002 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1209 19:32:19.057060  1002 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI1209 19:32:19.057188  1002 net.cpp:150] Setting up L2_b1_pool\nI1209 19:32:19.057209  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.057217  1002 net.cpp:165] Memory required for data: 1029633500\nI1209 19:32:19.057229  1002 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1209 19:32:19.057245  1002 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1209 19:32:19.057256  1002 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI1209 19:32:19.057270  1002 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI1209 19:32:19.057291  1002 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1209 19:32:19.057345  1002 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1209 19:32:19.057363  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.057374  1002 net.cpp:165] Memory required for data: 1031681500\nI1209 19:32:19.057384  1002 layer_factory.hpp:77] Creating layer L2_b1_relu\nI1209 19:32:19.057399  1002 net.cpp:100] Creating Layer L2_b1_relu\nI1209 19:32:19.057410  1002 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI1209 19:32:19.057425  1002 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI1209 19:32:19.057442  1002 net.cpp:150] Setting up L2_b1_relu\nI1209 19:32:19.057456  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.057466  1002 net.cpp:165] Memory required for data: 1033729500\nI1209 19:32:19.057476  1002 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI1209 19:32:19.057562  1002 net.cpp:100] Creating Layer L2_b1_zeros\nI1209 19:32:19.057584  1002 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI1209 19:32:19.061003  1002 net.cpp:150] Setting up L2_b1_zeros\nI1209 19:32:19.061031  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.061043  1002 net.cpp:165] Memory required for data: 1035777500\nI1209 19:32:19.061053  1002 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI1209 19:32:19.061092  1002 net.cpp:100] Creating Layer L2_b1_concat0\nI1209 19:32:19.061106  1002 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI1209 19:32:19.061121  1002 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI1209 19:32:19.061137  1002 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI1209 19:32:19.061259  1002 net.cpp:150] Setting up L2_b1_concat0\nI1209 19:32:19.061288  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.061311  1002 net.cpp:165] Memory required for data: 1039873500\nI1209 19:32:19.061324  1002 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI1209 19:32:19.061341  1002 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI1209 19:32:19.061352  1002 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI1209 19:32:19.061367  1002 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI1209 19:32:19.061386  1002 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI1209 19:32:19.061475  1002 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI1209 19:32:19.061494  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.061507  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.061517  1002 net.cpp:165] Memory required for data: 1048065500\nI1209 19:32:19.061528  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI1209 19:32:19.061549  1002 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI1209 19:32:19.061561  1002 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI1209 19:32:19.061585  1002 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI1209 19:32:19.063380  1002 net.cpp:150] Setting up L2_b2_cbr1_conv\nI1209 19:32:19.063401  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.063411  1002 net.cpp:165] Memory required for data: 1052161500\nI1209 19:32:19.063429  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI1209 19:32:19.063452  1002 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI1209 19:32:19.063467  1002 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI1209 19:32:19.063482  1002 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI1209 19:32:19.063783  1002 net.cpp:150] Setting up L2_b2_cbr1_bn\nI1209 19:32:19.063802  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.063812  1002 net.cpp:165] Memory required for data: 1056257500\nI1209 19:32:19.063833  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1209 19:32:19.063851  1002 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI1209 19:32:19.063863  1002 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI1209 19:32:19.063884  1002 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.063987  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1209 19:32:19.064184  1002 net.cpp:150] Setting up L2_b2_cbr1_scale\nI1209 19:32:19.064203  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.064214  1002 net.cpp:165] Memory required for data: 1060353500\nI1209 19:32:19.064231  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_dropout\nI1209 19:32:19.064249  1002 net.cpp:100] Creating Layer L2_b2_cbr1_dropout\nI1209 19:32:19.064260  1002 net.cpp:434] L2_b2_cbr1_dropout <- L2_b2_cbr1_bn_top\nI1209 19:32:19.064280  1002 net.cpp:395] L2_b2_cbr1_dropout -> L2_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.064327  1002 net.cpp:150] Setting up L2_b2_cbr1_dropout\nI1209 19:32:19.064343  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.064353  1002 net.cpp:165] Memory required for data: 1064449500\nI1209 19:32:19.064363  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI1209 19:32:19.064383  1002 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI1209 19:32:19.064395  1002 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI1209 19:32:19.064410  1002 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.064429  1002 net.cpp:150] Setting up L2_b2_cbr1_relu\nI1209 19:32:19.064443  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.064453  1002 net.cpp:165] Memory required for data: 1068545500\nI1209 19:32:19.064463  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI1209 19:32:19.064489  1002 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI1209 19:32:19.064502  1002 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI1209 19:32:19.064519  1002 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI1209 19:32:19.065069  1002 net.cpp:150] Setting up L2_b2_cbr2_conv\nI1209 19:32:19.065098  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.065109  1002 net.cpp:165] Memory required for data: 1072641500\nI1209 19:32:19.065126  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI1209 19:32:19.065150  1002 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI1209 19:32:19.065162  1002 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI1209 19:32:19.065183  1002 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI1209 19:32:19.065485  1002 net.cpp:150] Setting up L2_b2_cbr2_bn\nI1209 19:32:19.065505  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.065515  1002 net.cpp:165] Memory required for data: 1076737500\nI1209 19:32:19.065536  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1209 19:32:19.065553  1002 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI1209 19:32:19.065565  1002 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI1209 19:32:19.065579  1002 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.065680  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1209 19:32:19.065874  1002 net.cpp:150] Setting up L2_b2_cbr2_scale\nI1209 19:32:19.065893  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.065903  1002 net.cpp:165] Memory required for data: 1080833500\nI1209 19:32:19.065922  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_dropout\nI1209 19:32:19.065944  1002 net.cpp:100] Creating Layer L2_b2_cbr2_dropout\nI1209 19:32:19.065963  1002 net.cpp:434] L2_b2_cbr2_dropout <- L2_b2_cbr2_bn_top\nI1209 19:32:19.065981  1002 net.cpp:395] L2_b2_cbr2_dropout -> L2_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.066027  1002 net.cpp:150] Setting up L2_b2_cbr2_dropout\nI1209 19:32:19.066051  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.066062  1002 net.cpp:165] Memory required for data: 1084929500\nI1209 19:32:19.066072  1002 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1209 19:32:19.066089  1002 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1209 19:32:19.066102  1002 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI1209 19:32:19.066115  1002 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI1209 19:32:19.066136  1002 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1209 19:32:19.066192  1002 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1209 19:32:19.066210  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.066218  1002 net.cpp:165] Memory required for data: 1089025500\nI1209 19:32:19.066229  1002 layer_factory.hpp:77] Creating layer L2_b2_relu\nI1209 19:32:19.066251  1002 net.cpp:100] Creating Layer L2_b2_relu\nI1209 19:32:19.066262  1002 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI1209 19:32:19.066277  1002 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI1209 19:32:19.066295  1002 net.cpp:150] Setting up L2_b2_relu\nI1209 19:32:19.066310  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.066320  1002 net.cpp:165] Memory required for data: 1093121500\nI1209 19:32:19.066329  1002 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1209 19:32:19.066344  1002 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1209 19:32:19.066355  1002 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI1209 19:32:19.066370  1002 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1209 19:32:19.066388  1002 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1209 19:32:19.066473  1002 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1209 19:32:19.066493  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.066506  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.066515  1002 net.cpp:165] Memory required for data: 1101313500\nI1209 19:32:19.066526  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI1209 19:32:19.066552  1002 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI1209 19:32:19.066573  1002 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1209 19:32:19.066602  1002 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI1209 19:32:19.067126  1002 net.cpp:150] Setting up L2_b3_cbr1_conv\nI1209 19:32:19.067147  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.067157  1002 net.cpp:165] Memory required for data: 1105409500\nI1209 19:32:19.067173  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI1209 19:32:19.067196  1002 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI1209 19:32:19.067209  1002 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI1209 19:32:19.067229  1002 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI1209 19:32:19.067528  1002 net.cpp:150] Setting up L2_b3_cbr1_bn\nI1209 19:32:19.067548  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.067559  1002 net.cpp:165] Memory required for data: 1109505500\nI1209 19:32:19.067579  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1209 19:32:19.067596  1002 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI1209 19:32:19.067608  1002 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI1209 19:32:19.067622  1002 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.067720  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1209 19:32:19.067911  1002 net.cpp:150] Setting up L2_b3_cbr1_scale\nI1209 19:32:19.067930  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.067940  1002 net.cpp:165] Memory required for data: 1113601500\nI1209 19:32:19.067965  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_dropout\nI1209 19:32:19.067984  1002 net.cpp:100] Creating Layer L2_b3_cbr1_dropout\nI1209 19:32:19.068002  1002 net.cpp:434] L2_b3_cbr1_dropout <- L2_b3_cbr1_bn_top\nI1209 19:32:19.068017  1002 net.cpp:395] L2_b3_cbr1_dropout -> L2_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.068064  1002 net.cpp:150] Setting up L2_b3_cbr1_dropout\nI1209 19:32:19.068083  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.068092  1002 net.cpp:165] Memory required for data: 1117697500\nI1209 19:32:19.068102  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI1209 19:32:19.068122  1002 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI1209 19:32:19.068135  1002 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI1209 19:32:19.068150  1002 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.068166  1002 net.cpp:150] Setting up L2_b3_cbr1_relu\nI1209 19:32:19.068181  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.068190  1002 net.cpp:165] Memory required for data: 1121793500\nI1209 19:32:19.068200  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI1209 19:32:19.068222  1002 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI1209 19:32:19.068235  1002 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI1209 19:32:19.068259  1002 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI1209 19:32:19.068786  1002 net.cpp:150] Setting up L2_b3_cbr2_conv\nI1209 19:32:19.068806  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.068816  1002 net.cpp:165] Memory required for data: 1125889500\nI1209 19:32:19.068832  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI1209 19:32:19.068851  1002 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI1209 19:32:19.068863  1002 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI1209 19:32:19.068886  1002 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI1209 19:32:19.069200  1002 net.cpp:150] Setting up L2_b3_cbr2_bn\nI1209 19:32:19.069218  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.069228  1002 net.cpp:165] Memory required for data: 1129985500\nI1209 19:32:19.069249  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1209 19:32:19.069272  1002 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI1209 19:32:19.069284  1002 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI1209 19:32:19.069299  1002 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.069396  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1209 19:32:19.069602  1002 net.cpp:150] Setting up L2_b3_cbr2_scale\nI1209 19:32:19.069620  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.069629  1002 net.cpp:165] Memory required for data: 1134081500\nI1209 19:32:19.069648  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_dropout\nI1209 19:32:19.069669  1002 net.cpp:100] Creating Layer L2_b3_cbr2_dropout\nI1209 19:32:19.069682  1002 net.cpp:434] L2_b3_cbr2_dropout <- L2_b3_cbr2_bn_top\nI1209 19:32:19.069699  1002 net.cpp:395] L2_b3_cbr2_dropout -> L2_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.069751  1002 net.cpp:150] Setting up L2_b3_cbr2_dropout\nI1209 19:32:19.069769  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.069779  1002 net.cpp:165] Memory required for data: 1138177500\nI1209 19:32:19.069790  1002 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1209 19:32:19.069806  1002 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1209 19:32:19.069818  1002 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI1209 19:32:19.069831  1002 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1209 19:32:19.069852  1002 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1209 19:32:19.069908  1002 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1209 19:32:19.069926  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.069936  1002 net.cpp:165] Memory required for data: 1142273500\nI1209 19:32:19.069947  1002 layer_factory.hpp:77] Creating layer L2_b3_relu\nI1209 19:32:19.069969  1002 net.cpp:100] Creating Layer L2_b3_relu\nI1209 19:32:19.069983  1002 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI1209 19:32:19.069998  1002 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI1209 19:32:19.070016  1002 net.cpp:150] Setting up L2_b3_relu\nI1209 19:32:19.070030  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.070041  1002 net.cpp:165] Memory required for data: 1146369500\nI1209 19:32:19.070051  1002 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1209 19:32:19.070070  1002 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1209 19:32:19.070081  1002 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI1209 19:32:19.070097  1002 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1209 19:32:19.070117  1002 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1209 19:32:19.070200  1002 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1209 19:32:19.070219  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.070232  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.070241  1002 net.cpp:165] Memory required for data: 1154561500\nI1209 19:32:19.070253  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI1209 19:32:19.070274  1002 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI1209 19:32:19.070286  1002 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1209 19:32:19.070310  1002 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI1209 19:32:19.070812  1002 net.cpp:150] Setting up L2_b4_cbr1_conv\nI1209 19:32:19.070832  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.070842  1002 net.cpp:165] Memory required for data: 1158657500\nI1209 19:32:19.070858  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI1209 19:32:19.070876  1002 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI1209 19:32:19.070888  1002 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI1209 19:32:19.070909  1002 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI1209 19:32:19.071219  1002 net.cpp:150] Setting up L2_b4_cbr1_bn\nI1209 19:32:19.071238  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.071249  1002 net.cpp:165] Memory required for data: 1162753500\nI1209 19:32:19.071269  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1209 19:32:19.071291  1002 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI1209 19:32:19.071311  1002 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI1209 19:32:19.071327  1002 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.071427  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1209 19:32:19.071620  1002 net.cpp:150] Setting up L2_b4_cbr1_scale\nI1209 19:32:19.071640  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.071650  1002 net.cpp:165] Memory required for data: 1166849500\nI1209 19:32:19.071666  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_dropout\nI1209 19:32:19.071684  1002 net.cpp:100] Creating Layer L2_b4_cbr1_dropout\nI1209 19:32:19.071696  1002 net.cpp:434] L2_b4_cbr1_dropout <- L2_b4_cbr1_bn_top\nI1209 19:32:19.071717  1002 net.cpp:395] L2_b4_cbr1_dropout -> L2_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.071764  1002 net.cpp:150] Setting up L2_b4_cbr1_dropout\nI1209 19:32:19.071789  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.071799  1002 net.cpp:165] Memory required for data: 1170945500\nI1209 19:32:19.071810  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI1209 19:32:19.071823  1002 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI1209 19:32:19.071835  1002 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI1209 19:32:19.071849  1002 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.071867  1002 net.cpp:150] Setting up L2_b4_cbr1_relu\nI1209 19:32:19.071882  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.071892  1002 net.cpp:165] Memory required for data: 1175041500\nI1209 19:32:19.071902  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI1209 19:32:19.071929  1002 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI1209 19:32:19.071941  1002 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI1209 19:32:19.071966  1002 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI1209 19:32:19.072495  1002 net.cpp:150] Setting up L2_b4_cbr2_conv\nI1209 19:32:19.072515  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.072525  1002 net.cpp:165] Memory required for data: 1179137500\nI1209 19:32:19.072541  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI1209 19:32:19.072559  1002 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI1209 19:32:19.072578  1002 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI1209 19:32:19.072594  1002 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI1209 19:32:19.072890  1002 net.cpp:150] Setting up L2_b4_cbr2_bn\nI1209 19:32:19.072908  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.072918  1002 net.cpp:165] Memory required for data: 1183233500\nI1209 19:32:19.072939  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1209 19:32:19.072964  1002 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI1209 19:32:19.072978  1002 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI1209 19:32:19.072993  1002 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.073092  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1209 19:32:19.073282  1002 net.cpp:150] Setting up L2_b4_cbr2_scale\nI1209 19:32:19.073305  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.073316  1002 net.cpp:165] Memory required for data: 1187329500\nI1209 19:32:19.073334  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_dropout\nI1209 19:32:19.073351  1002 net.cpp:100] Creating Layer L2_b4_cbr2_dropout\nI1209 19:32:19.073362  1002 net.cpp:434] L2_b4_cbr2_dropout <- L2_b4_cbr2_bn_top\nI1209 19:32:19.073376  1002 net.cpp:395] L2_b4_cbr2_dropout -> L2_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.073429  1002 net.cpp:150] Setting up L2_b4_cbr2_dropout\nI1209 19:32:19.073448  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.073457  1002 net.cpp:165] Memory required for data: 1191425500\nI1209 19:32:19.073467  1002 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1209 19:32:19.073484  1002 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1209 19:32:19.073495  1002 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI1209 19:32:19.073508  1002 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1209 19:32:19.073539  1002 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1209 19:32:19.073596  1002 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1209 19:32:19.073614  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.073624  1002 net.cpp:165] Memory required for data: 1195521500\nI1209 19:32:19.073634  1002 layer_factory.hpp:77] Creating layer L2_b4_relu\nI1209 19:32:19.073654  1002 net.cpp:100] Creating Layer L2_b4_relu\nI1209 19:32:19.073667  1002 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI1209 19:32:19.073681  1002 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI1209 19:32:19.073700  1002 net.cpp:150] Setting up L2_b4_relu\nI1209 19:32:19.073715  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.073724  1002 net.cpp:165] Memory required for data: 1199617500\nI1209 19:32:19.073734  1002 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1209 19:32:19.073748  1002 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1209 19:32:19.073760  1002 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI1209 19:32:19.073768  1002 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1209 19:32:19.073778  1002 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1209 19:32:19.073833  1002 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1209 19:32:19.073843  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.073849  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.073854  1002 net.cpp:165] Memory required for data: 1207809500\nI1209 19:32:19.073859  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI1209 19:32:19.073878  1002 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI1209 19:32:19.073884  1002 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1209 19:32:19.073896  1002 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI1209 19:32:19.074376  1002 net.cpp:150] Setting up L2_b5_cbr1_conv\nI1209 19:32:19.074395  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.074405  1002 net.cpp:165] Memory required for data: 1211905500\nI1209 19:32:19.074422  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI1209 19:32:19.074445  1002 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI1209 19:32:19.074457  1002 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI1209 19:32:19.074481  1002 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI1209 19:32:19.074784  1002 net.cpp:150] Setting up L2_b5_cbr1_bn\nI1209 19:32:19.074802  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.074812  1002 net.cpp:165] Memory required for data: 1216001500\nI1209 19:32:19.074832  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1209 19:32:19.074851  1002 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI1209 19:32:19.074862  1002 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI1209 19:32:19.074877  1002 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.074985  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1209 19:32:19.075176  1002 net.cpp:150] Setting up L2_b5_cbr1_scale\nI1209 19:32:19.075196  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.075206  1002 net.cpp:165] Memory required for data: 1220097500\nI1209 19:32:19.075223  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_dropout\nI1209 19:32:19.075245  1002 net.cpp:100] Creating Layer L2_b5_cbr1_dropout\nI1209 19:32:19.075258  1002 net.cpp:434] L2_b5_cbr1_dropout <- L2_b5_cbr1_bn_top\nI1209 19:32:19.075273  1002 net.cpp:395] L2_b5_cbr1_dropout -> L2_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.075318  1002 net.cpp:150] Setting up L2_b5_cbr1_dropout\nI1209 19:32:19.075345  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.075356  1002 net.cpp:165] Memory required for data: 1224193500\nI1209 19:32:19.075366  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI1209 19:32:19.075389  1002 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI1209 19:32:19.075402  1002 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI1209 19:32:19.075417  1002 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.075441  1002 net.cpp:150] Setting up L2_b5_cbr1_relu\nI1209 19:32:19.075456  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.075467  1002 net.cpp:165] Memory required for data: 1228289500\nI1209 19:32:19.075477  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI1209 19:32:19.075500  1002 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI1209 19:32:19.075512  1002 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI1209 19:32:19.075536  1002 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI1209 19:32:19.076052  1002 net.cpp:150] Setting up L2_b5_cbr2_conv\nI1209 19:32:19.076073  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.076083  1002 net.cpp:165] Memory required for data: 1232385500\nI1209 19:32:19.076100  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI1209 19:32:19.076118  1002 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI1209 19:32:19.076131  1002 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI1209 19:32:19.076151  1002 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI1209 19:32:19.076462  1002 net.cpp:150] Setting up L2_b5_cbr2_bn\nI1209 19:32:19.076480  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.076490  1002 net.cpp:165] Memory required for data: 1236481500\nI1209 19:32:19.076511  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1209 19:32:19.076534  1002 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI1209 19:32:19.076547  1002 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI1209 19:32:19.076562  1002 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.076658  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1209 19:32:19.076858  1002 net.cpp:150] Setting up L2_b5_cbr2_scale\nI1209 19:32:19.076876  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.076886  1002 net.cpp:165] Memory required for data: 1240577500\nI1209 19:32:19.076905  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_dropout\nI1209 19:32:19.076927  1002 net.cpp:100] Creating Layer L2_b5_cbr2_dropout\nI1209 19:32:19.076941  1002 net.cpp:434] L2_b5_cbr2_dropout <- L2_b5_cbr2_bn_top\nI1209 19:32:19.076967  1002 net.cpp:395] L2_b5_cbr2_dropout -> L2_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.077015  1002 net.cpp:150] Setting up L2_b5_cbr2_dropout\nI1209 19:32:19.077033  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.077042  1002 net.cpp:165] Memory required for data: 1244673500\nI1209 19:32:19.077054  1002 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1209 19:32:19.077069  1002 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1209 19:32:19.077081  1002 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI1209 19:32:19.077095  1002 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1209 19:32:19.077114  1002 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1209 19:32:19.077170  1002 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1209 19:32:19.077189  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.077199  1002 net.cpp:165] Memory required for data: 1248769500\nI1209 19:32:19.077209  1002 layer_factory.hpp:77] Creating layer L2_b5_relu\nI1209 19:32:19.077224  1002 net.cpp:100] Creating Layer L2_b5_relu\nI1209 19:32:19.077236  1002 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI1209 19:32:19.077251  1002 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI1209 19:32:19.077275  1002 net.cpp:150] Setting up L2_b5_relu\nI1209 19:32:19.077291  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.077299  1002 net.cpp:165] Memory required for data: 1252865500\nI1209 19:32:19.077309  1002 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1209 19:32:19.077323  1002 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1209 19:32:19.077344  1002 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI1209 19:32:19.077361  1002 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1209 19:32:19.077381  1002 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1209 19:32:19.077466  1002 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1209 19:32:19.077486  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.077499  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.077508  1002 net.cpp:165] Memory required for data: 1261057500\nI1209 19:32:19.077519  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI1209 19:32:19.077540  1002 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI1209 19:32:19.077553  1002 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1209 19:32:19.077574  1002 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI1209 19:32:19.078099  1002 net.cpp:150] Setting up L2_b6_cbr1_conv\nI1209 19:32:19.078119  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.078130  1002 net.cpp:165] Memory required for data: 1265153500\nI1209 19:32:19.078147  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI1209 19:32:19.078164  1002 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI1209 19:32:19.078176  1002 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI1209 19:32:19.078197  1002 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI1209 19:32:19.078503  1002 net.cpp:150] Setting up L2_b6_cbr1_bn\nI1209 19:32:19.078522  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.078531  1002 net.cpp:165] Memory required for data: 1269249500\nI1209 19:32:19.078553  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1209 19:32:19.078577  1002 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI1209 19:32:19.078588  1002 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI1209 19:32:19.078603  1002 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.078701  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1209 19:32:19.078897  1002 net.cpp:150] Setting up L2_b6_cbr1_scale\nI1209 19:32:19.078917  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.078927  1002 net.cpp:165] Memory required for data: 1273345500\nI1209 19:32:19.078943  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_dropout\nI1209 19:32:19.078971  1002 net.cpp:100] Creating Layer L2_b6_cbr1_dropout\nI1209 19:32:19.078985  1002 net.cpp:434] L2_b6_cbr1_dropout <- L2_b6_cbr1_bn_top\nI1209 19:32:19.079000  1002 net.cpp:395] L2_b6_cbr1_dropout -> L2_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.079046  1002 net.cpp:150] Setting up L2_b6_cbr1_dropout\nI1209 19:32:19.079071  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.079080  1002 net.cpp:165] Memory required for data: 1277441500\nI1209 19:32:19.079090  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI1209 19:32:19.079105  1002 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI1209 19:32:19.079116  1002 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI1209 19:32:19.079131  1002 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.079154  1002 net.cpp:150] Setting up L2_b6_cbr1_relu\nI1209 19:32:19.079170  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.079180  1002 net.cpp:165] Memory required for data: 1281537500\nI1209 19:32:19.079190  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI1209 19:32:19.079211  1002 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI1209 19:32:19.079224  1002 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI1209 19:32:19.079241  1002 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI1209 19:32:19.079769  1002 net.cpp:150] Setting up L2_b6_cbr2_conv\nI1209 19:32:19.079789  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.079799  1002 net.cpp:165] Memory required for data: 1285633500\nI1209 19:32:19.079816  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI1209 19:32:19.079849  1002 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI1209 19:32:19.079862  1002 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI1209 19:32:19.079879  1002 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI1209 19:32:19.080199  1002 net.cpp:150] Setting up L2_b6_cbr2_bn\nI1209 19:32:19.080217  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.080229  1002 net.cpp:165] Memory required for data: 1289729500\nI1209 19:32:19.080250  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1209 19:32:19.080266  1002 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI1209 19:32:19.080278  1002 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI1209 19:32:19.080299  1002 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.080397  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1209 19:32:19.080592  1002 net.cpp:150] Setting up L2_b6_cbr2_scale\nI1209 19:32:19.080610  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.080621  1002 net.cpp:165] Memory required for data: 1293825500\nI1209 19:32:19.080637  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_dropout\nI1209 19:32:19.080654  1002 net.cpp:100] Creating Layer L2_b6_cbr2_dropout\nI1209 19:32:19.080667  1002 net.cpp:434] L2_b6_cbr2_dropout <- L2_b6_cbr2_bn_top\nI1209 19:32:19.080687  1002 net.cpp:395] L2_b6_cbr2_dropout -> L2_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.080734  1002 net.cpp:150] Setting up L2_b6_cbr2_dropout\nI1209 19:32:19.080752  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.080762  1002 net.cpp:165] Memory required for data: 1297921500\nI1209 19:32:19.080772  1002 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1209 19:32:19.080788  1002 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1209 19:32:19.080806  1002 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI1209 19:32:19.080821  1002 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1209 19:32:19.080837  1002 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1209 19:32:19.080898  1002 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1209 19:32:19.080914  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.080925  1002 net.cpp:165] Memory required for data: 1302017500\nI1209 19:32:19.080936  1002 layer_factory.hpp:77] Creating layer L2_b6_relu\nI1209 19:32:19.080950  1002 net.cpp:100] Creating Layer L2_b6_relu\nI1209 19:32:19.080971  1002 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI1209 19:32:19.080986  1002 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI1209 19:32:19.081004  1002 net.cpp:150] Setting up L2_b6_relu\nI1209 19:32:19.081018  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.081027  1002 net.cpp:165] Memory required for data: 1306113500\nI1209 19:32:19.081037  1002 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1209 19:32:19.081053  1002 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1209 19:32:19.081063  1002 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI1209 19:32:19.081079  1002 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1209 19:32:19.081097  1002 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1209 19:32:19.081187  1002 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1209 19:32:19.081208  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.081221  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.081231  1002 net.cpp:165] Memory required for data: 1314305500\nI1209 19:32:19.081243  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI1209 19:32:19.081269  1002 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI1209 19:32:19.081282  1002 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1209 19:32:19.081306  1002 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI1209 19:32:19.081854  1002 net.cpp:150] Setting up L2_b7_cbr1_conv\nI1209 19:32:19.081882  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.081892  1002 net.cpp:165] Memory required for data: 1318401500\nI1209 19:32:19.081910  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI1209 19:32:19.081928  1002 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI1209 19:32:19.081940  1002 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI1209 19:32:19.081967  1002 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI1209 19:32:19.082274  1002 net.cpp:150] Setting up L2_b7_cbr1_bn\nI1209 19:32:19.082293  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.082304  1002 net.cpp:165] Memory required for data: 1322497500\nI1209 19:32:19.082324  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1209 19:32:19.082342  1002 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI1209 19:32:19.082355  1002 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI1209 19:32:19.082370  1002 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.082473  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1209 19:32:19.082665  1002 net.cpp:150] Setting up L2_b7_cbr1_scale\nI1209 19:32:19.082690  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.082700  1002 net.cpp:165] Memory required for data: 1326593500\nI1209 19:32:19.082720  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_dropout\nI1209 19:32:19.082736  1002 net.cpp:100] Creating Layer L2_b7_cbr1_dropout\nI1209 19:32:19.082748  1002 net.cpp:434] L2_b7_cbr1_dropout <- L2_b7_cbr1_bn_top\nI1209 19:32:19.082762  1002 net.cpp:395] L2_b7_cbr1_dropout -> L2_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.082809  1002 net.cpp:150] Setting up L2_b7_cbr1_dropout\nI1209 19:32:19.082832  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.082844  1002 net.cpp:165] Memory required for data: 1330689500\nI1209 19:32:19.082854  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI1209 19:32:19.082870  1002 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI1209 19:32:19.082880  1002 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI1209 19:32:19.082901  1002 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.082921  1002 net.cpp:150] Setting up L2_b7_cbr1_relu\nI1209 19:32:19.082934  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.082944  1002 net.cpp:165] Memory required for data: 1334785500\nI1209 19:32:19.082954  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI1209 19:32:19.082983  1002 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI1209 19:32:19.082996  1002 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI1209 19:32:19.083019  1002 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI1209 19:32:19.083546  1002 net.cpp:150] Setting up L2_b7_cbr2_conv\nI1209 19:32:19.083566  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.083577  1002 net.cpp:165] Memory required for data: 1338881500\nI1209 19:32:19.083595  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI1209 19:32:19.083621  1002 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI1209 19:32:19.083632  1002 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI1209 19:32:19.083648  1002 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI1209 19:32:19.083964  1002 net.cpp:150] Setting up L2_b7_cbr2_bn\nI1209 19:32:19.083989  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.084000  1002 net.cpp:165] Memory required for data: 1342977500\nI1209 19:32:19.084022  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1209 19:32:19.084039  1002 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI1209 19:32:19.084051  1002 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI1209 19:32:19.084066  1002 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.084162  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1209 19:32:19.084360  1002 net.cpp:150] Setting up L2_b7_cbr2_scale\nI1209 19:32:19.084379  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.084389  1002 net.cpp:165] Memory required for data: 1347073500\nI1209 19:32:19.084406  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_dropout\nI1209 19:32:19.084436  1002 net.cpp:100] Creating Layer L2_b7_cbr2_dropout\nI1209 19:32:19.084450  1002 net.cpp:434] L2_b7_cbr2_dropout <- L2_b7_cbr2_bn_top\nI1209 19:32:19.084470  1002 net.cpp:395] L2_b7_cbr2_dropout -> L2_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.084517  1002 net.cpp:150] Setting up L2_b7_cbr2_dropout\nI1209 19:32:19.084535  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.084545  1002 net.cpp:165] Memory required for data: 1351169500\nI1209 19:32:19.084555  1002 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI1209 19:32:19.084571  1002 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI1209 19:32:19.084583  1002 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI1209 19:32:19.084601  1002 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1209 19:32:19.084619  1002 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI1209 19:32:19.084674  1002 net.cpp:150] Setting up L2_b7_sum_eltwise\nI1209 19:32:19.084694  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.084704  1002 net.cpp:165] Memory required for data: 1355265500\nI1209 19:32:19.084715  1002 layer_factory.hpp:77] Creating layer L2_b7_relu\nI1209 19:32:19.084731  1002 net.cpp:100] Creating Layer L2_b7_relu\nI1209 19:32:19.084743  1002 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI1209 19:32:19.084763  1002 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI1209 19:32:19.084782  1002 net.cpp:150] Setting up L2_b7_relu\nI1209 19:32:19.084797  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.084807  1002 net.cpp:165] Memory required for data: 1359361500\nI1209 19:32:19.084817  1002 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1209 19:32:19.084831  1002 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1209 19:32:19.084843  1002 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI1209 19:32:19.084857  1002 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1209 19:32:19.084877  1002 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1209 19:32:19.084969  1002 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1209 19:32:19.084987  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.085000  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.085011  1002 net.cpp:165] Memory required for data: 1367553500\nI1209 19:32:19.085021  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI1209 19:32:19.085042  1002 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI1209 19:32:19.085055  1002 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1209 19:32:19.085078  1002 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI1209 19:32:19.085597  1002 net.cpp:150] Setting up L2_b8_cbr1_conv\nI1209 19:32:19.085615  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.085625  1002 net.cpp:165] Memory required for data: 1371649500\nI1209 19:32:19.085642  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI1209 19:32:19.085660  1002 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI1209 19:32:19.085672  1002 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI1209 19:32:19.085693  1002 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI1209 19:32:19.086014  1002 net.cpp:150] Setting up L2_b8_cbr1_bn\nI1209 19:32:19.086032  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.086042  1002 net.cpp:165] Memory required for data: 1375745500\nI1209 19:32:19.086063  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1209 19:32:19.086091  1002 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI1209 19:32:19.086104  1002 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI1209 19:32:19.086120  1002 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.086218  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1209 19:32:19.086428  1002 net.cpp:150] Setting up L2_b8_cbr1_scale\nI1209 19:32:19.086457  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.086467  1002 net.cpp:165] Memory required for data: 1379841500\nI1209 19:32:19.086485  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_dropout\nI1209 19:32:19.086508  1002 net.cpp:100] Creating Layer L2_b8_cbr1_dropout\nI1209 19:32:19.086521  1002 net.cpp:434] L2_b8_cbr1_dropout <- L2_b8_cbr1_bn_top\nI1209 19:32:19.086536  1002 net.cpp:395] L2_b8_cbr1_dropout -> L2_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.086589  1002 net.cpp:150] Setting up L2_b8_cbr1_dropout\nI1209 19:32:19.086607  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.086618  1002 net.cpp:165] Memory required for data: 1383937500\nI1209 19:32:19.086628  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI1209 19:32:19.086642  1002 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI1209 19:32:19.086655  1002 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI1209 19:32:19.086676  1002 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.086695  1002 net.cpp:150] Setting up L2_b8_cbr1_relu\nI1209 19:32:19.086710  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.086720  1002 net.cpp:165] Memory required for data: 1388033500\nI1209 19:32:19.086730  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI1209 19:32:19.086751  1002 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI1209 19:32:19.086763  1002 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI1209 19:32:19.086774  1002 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI1209 19:32:19.087266  1002 net.cpp:150] Setting up L2_b8_cbr2_conv\nI1209 19:32:19.087285  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.087296  1002 net.cpp:165] Memory required for data: 1392129500\nI1209 19:32:19.087312  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI1209 19:32:19.087333  1002 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI1209 19:32:19.087347  1002 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI1209 19:32:19.087363  1002 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI1209 19:32:19.087676  1002 net.cpp:150] Setting up L2_b8_cbr2_bn\nI1209 19:32:19.087693  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.087703  1002 net.cpp:165] Memory required for data: 1396225500\nI1209 19:32:19.087724  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1209 19:32:19.087741  1002 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI1209 19:32:19.087754  1002 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI1209 19:32:19.087774  1002 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.087872  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1209 19:32:19.088080  1002 net.cpp:150] Setting up L2_b8_cbr2_scale\nI1209 19:32:19.088099  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.088109  1002 net.cpp:165] Memory required for data: 1400321500\nI1209 19:32:19.088126  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_dropout\nI1209 19:32:19.088142  1002 net.cpp:100] Creating Layer L2_b8_cbr2_dropout\nI1209 19:32:19.088155  1002 net.cpp:434] L2_b8_cbr2_dropout <- L2_b8_cbr2_bn_top\nI1209 19:32:19.088174  1002 net.cpp:395] L2_b8_cbr2_dropout -> L2_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.088222  1002 net.cpp:150] Setting up L2_b8_cbr2_dropout\nI1209 19:32:19.088239  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.088249  1002 net.cpp:165] Memory required for data: 1404417500\nI1209 19:32:19.088260  1002 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI1209 19:32:19.088281  1002 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI1209 19:32:19.088294  1002 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI1209 19:32:19.088306  1002 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1209 19:32:19.088322  1002 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI1209 19:32:19.088387  1002 net.cpp:150] Setting up L2_b8_sum_eltwise\nI1209 19:32:19.088405  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.088426  1002 net.cpp:165] Memory required for data: 1408513500\nI1209 19:32:19.088438  1002 layer_factory.hpp:77] Creating layer L2_b8_relu\nI1209 19:32:19.088454  1002 net.cpp:100] Creating Layer L2_b8_relu\nI1209 19:32:19.088465  1002 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI1209 19:32:19.088480  1002 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI1209 19:32:19.088498  1002 net.cpp:150] Setting up L2_b8_relu\nI1209 19:32:19.088513  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.088522  1002 net.cpp:165] Memory required for data: 1412609500\nI1209 19:32:19.088532  1002 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1209 19:32:19.088548  1002 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1209 19:32:19.088559  1002 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI1209 19:32:19.088579  1002 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1209 19:32:19.088619  1002 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1209 19:32:19.088711  1002 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1209 19:32:19.088731  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.088743  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.088753  1002 net.cpp:165] Memory required for data: 1420801500\nI1209 19:32:19.088764  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI1209 19:32:19.088791  1002 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI1209 19:32:19.088804  1002 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1209 19:32:19.088829  1002 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI1209 19:32:19.089366  1002 net.cpp:150] Setting up L2_b9_cbr1_conv\nI1209 19:32:19.089386  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.089395  1002 net.cpp:165] Memory required for data: 1424897500\nI1209 19:32:19.089413  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI1209 19:32:19.089438  1002 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI1209 19:32:19.089452  1002 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI1209 19:32:19.089473  1002 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI1209 19:32:19.089774  1002 net.cpp:150] Setting up L2_b9_cbr1_bn\nI1209 19:32:19.089793  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.089803  1002 net.cpp:165] Memory required for data: 1428993500\nI1209 19:32:19.089824  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1209 19:32:19.089843  1002 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI1209 19:32:19.089855  1002 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI1209 19:32:19.089870  1002 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.089978  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1209 19:32:19.090175  1002 net.cpp:150] Setting up L2_b9_cbr1_scale\nI1209 19:32:19.090201  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.090212  1002 net.cpp:165] Memory required for data: 1433089500\nI1209 19:32:19.090231  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_dropout\nI1209 19:32:19.090247  1002 net.cpp:100] Creating Layer L2_b9_cbr1_dropout\nI1209 19:32:19.090260  1002 net.cpp:434] L2_b9_cbr1_dropout <- L2_b9_cbr1_bn_top\nI1209 19:32:19.090275  1002 net.cpp:395] L2_b9_cbr1_dropout -> L2_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.090322  1002 net.cpp:150] Setting up L2_b9_cbr1_dropout\nI1209 19:32:19.090345  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.090356  1002 net.cpp:165] Memory required for data: 1437185500\nI1209 19:32:19.090368  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI1209 19:32:19.090381  1002 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI1209 19:32:19.090394  1002 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI1209 19:32:19.090412  1002 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.090440  1002 net.cpp:150] Setting up L2_b9_cbr1_relu\nI1209 19:32:19.090456  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.090466  1002 net.cpp:165] Memory required for data: 1441281500\nI1209 19:32:19.090476  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI1209 19:32:19.090497  1002 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI1209 19:32:19.090510  1002 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI1209 19:32:19.090534  1002 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI1209 19:32:19.091068  1002 net.cpp:150] Setting up L2_b9_cbr2_conv\nI1209 19:32:19.091087  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.091097  1002 net.cpp:165] Memory required for data: 1445377500\nI1209 19:32:19.091114  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI1209 19:32:19.091137  1002 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI1209 19:32:19.091150  1002 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI1209 19:32:19.091167  1002 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI1209 19:32:19.091480  1002 net.cpp:150] Setting up L2_b9_cbr2_bn\nI1209 19:32:19.091503  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.091514  1002 net.cpp:165] Memory required for data: 1449473500\nI1209 19:32:19.091584  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1209 19:32:19.091610  1002 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI1209 19:32:19.091624  1002 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI1209 19:32:19.091637  1002 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.091732  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1209 19:32:19.091923  1002 net.cpp:150] Setting up L2_b9_cbr2_scale\nI1209 19:32:19.091941  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.091951  1002 net.cpp:165] Memory required for data: 1453569500\nI1209 19:32:19.091977  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_dropout\nI1209 19:32:19.091995  1002 net.cpp:100] Creating Layer L2_b9_cbr2_dropout\nI1209 19:32:19.092007  1002 net.cpp:434] L2_b9_cbr2_dropout <- L2_b9_cbr2_bn_top\nI1209 19:32:19.092026  1002 net.cpp:395] L2_b9_cbr2_dropout -> L2_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.092075  1002 net.cpp:150] Setting up L2_b9_cbr2_dropout\nI1209 19:32:19.092094  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.092104  1002 net.cpp:165] Memory required for data: 1457665500\nI1209 19:32:19.092114  1002 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI1209 19:32:19.092135  1002 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI1209 19:32:19.092149  1002 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI1209 19:32:19.092161  1002 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1209 19:32:19.092177  1002 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI1209 19:32:19.092238  1002 net.cpp:150] Setting up L2_b9_sum_eltwise\nI1209 19:32:19.092258  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.092267  1002 net.cpp:165] Memory required for data: 1461761500\nI1209 19:32:19.092278  1002 layer_factory.hpp:77] Creating layer L2_b9_relu\nI1209 19:32:19.092293  1002 net.cpp:100] Creating Layer L2_b9_relu\nI1209 19:32:19.092304  1002 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI1209 19:32:19.092327  1002 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI1209 19:32:19.092348  1002 net.cpp:150] Setting up L2_b9_relu\nI1209 19:32:19.092363  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.092372  1002 net.cpp:165] Memory required for data: 1465857500\nI1209 19:32:19.092381  1002 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1209 19:32:19.092397  1002 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1209 19:32:19.092408  1002 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI1209 19:32:19.092429  1002 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1209 19:32:19.092449  1002 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1209 19:32:19.092542  1002 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1209 19:32:19.092561  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.092574  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.092584  1002 net.cpp:165] Memory required for data: 1474049500\nI1209 19:32:19.092595  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI1209 19:32:19.092623  1002 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI1209 19:32:19.092638  1002 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1209 19:32:19.092660  1002 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI1209 19:32:19.093211  1002 net.cpp:150] Setting up L3_b1_cbr1_conv\nI1209 19:32:19.093232  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.093242  1002 net.cpp:165] Memory required for data: 1475073500\nI1209 19:32:19.093261  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI1209 19:32:19.093282  1002 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI1209 19:32:19.093297  1002 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI1209 19:32:19.093313  1002 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI1209 19:32:19.093636  1002 net.cpp:150] Setting up L3_b1_cbr1_bn\nI1209 19:32:19.093655  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.093664  1002 net.cpp:165] Memory required for data: 1476097500\nI1209 19:32:19.093685  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1209 19:32:19.093708  1002 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI1209 19:32:19.093721  1002 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI1209 19:32:19.093736  1002 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.093840  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1209 19:32:19.094050  1002 net.cpp:150] Setting up L3_b1_cbr1_scale\nI1209 19:32:19.094069  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.094079  1002 net.cpp:165] Memory required for data: 1477121500\nI1209 19:32:19.094096  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_dropout\nI1209 19:32:19.094118  1002 net.cpp:100] Creating Layer L3_b1_cbr1_dropout\nI1209 19:32:19.094131  1002 net.cpp:434] L3_b1_cbr1_dropout <- L3_b1_cbr1_bn_top\nI1209 19:32:19.094146  1002 net.cpp:395] L3_b1_cbr1_dropout -> L3_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.094207  1002 net.cpp:150] Setting up L3_b1_cbr1_dropout\nI1209 19:32:19.094224  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.094234  1002 net.cpp:165] Memory required for data: 1478145500\nI1209 19:32:19.094246  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI1209 19:32:19.094260  1002 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI1209 19:32:19.094274  1002 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI1209 19:32:19.094292  1002 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.094311  1002 net.cpp:150] Setting up L3_b1_cbr1_relu\nI1209 19:32:19.094326  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.094336  1002 net.cpp:165] Memory required for data: 1479169500\nI1209 19:32:19.094346  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI1209 19:32:19.094368  1002 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI1209 19:32:19.094380  1002 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI1209 19:32:19.094398  1002 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI1209 19:32:19.094924  1002 net.cpp:150] Setting up L3_b1_cbr2_conv\nI1209 19:32:19.094944  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.094954  1002 net.cpp:165] Memory required for data: 1480193500\nI1209 19:32:19.094980  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI1209 19:32:19.095003  1002 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI1209 19:32:19.095016  1002 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI1209 19:32:19.095032  1002 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI1209 19:32:19.095362  1002 net.cpp:150] Setting up L3_b1_cbr2_bn\nI1209 19:32:19.095391  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.095401  1002 net.cpp:165] Memory required for data: 1481217500\nI1209 19:32:19.095422  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1209 19:32:19.095443  1002 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI1209 19:32:19.095456  1002 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI1209 19:32:19.095477  1002 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.095577  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1209 19:32:19.095784  1002 net.cpp:150] Setting up L3_b1_cbr2_scale\nI1209 19:32:19.095803  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.095813  1002 net.cpp:165] Memory required for data: 1482241500\nI1209 19:32:19.095831  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_dropout\nI1209 19:32:19.095849  1002 net.cpp:100] Creating Layer L3_b1_cbr2_dropout\nI1209 19:32:19.095860  1002 net.cpp:434] L3_b1_cbr2_dropout <- L3_b1_cbr2_bn_top\nI1209 19:32:19.095881  1002 net.cpp:395] L3_b1_cbr2_dropout -> L3_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.095937  1002 net.cpp:150] Setting up L3_b1_cbr2_dropout\nI1209 19:32:19.095963  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.095973  1002 net.cpp:165] Memory required for data: 1483265500\nI1209 19:32:19.095984  1002 layer_factory.hpp:77] Creating layer L3_b1_pool\nI1209 19:32:19.096005  1002 net.cpp:100] Creating Layer L3_b1_pool\nI1209 19:32:19.096019  1002 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1209 19:32:19.096035  1002 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI1209 19:32:19.096093  1002 net.cpp:150] Setting up L3_b1_pool\nI1209 19:32:19.096112  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.096122  1002 net.cpp:165] Memory required for data: 1484289500\nI1209 19:32:19.096132  1002 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1209 19:32:19.096151  1002 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1209 19:32:19.096164  1002 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI1209 19:32:19.096176  1002 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI1209 19:32:19.096192  1002 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1209 19:32:19.096247  1002 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1209 19:32:19.096266  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.096276  1002 net.cpp:165] Memory required for data: 1485313500\nI1209 19:32:19.096285  1002 layer_factory.hpp:77] Creating layer L3_b1_relu\nI1209 19:32:19.096302  1002 net.cpp:100] Creating Layer L3_b1_relu\nI1209 19:32:19.096312  1002 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI1209 19:32:19.096331  1002 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI1209 19:32:19.096350  1002 net.cpp:150] Setting up L3_b1_relu\nI1209 19:32:19.096365  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.096374  1002 net.cpp:165] Memory required for data: 1486337500\nI1209 19:32:19.096385  1002 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI1209 19:32:19.096401  1002 net.cpp:100] Creating Layer L3_b1_zeros\nI1209 19:32:19.096417  1002 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI1209 19:32:19.097997  1002 net.cpp:150] Setting up L3_b1_zeros\nI1209 19:32:19.098019  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.098031  1002 net.cpp:165] Memory required for data: 1487361500\nI1209 19:32:19.098040  1002 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI1209 19:32:19.098065  1002 net.cpp:100] Creating Layer L3_b1_concat0\nI1209 19:32:19.098079  1002 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI1209 19:32:19.098093  1002 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI1209 19:32:19.098109  1002 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI1209 19:32:19.098176  1002 net.cpp:150] Setting up L3_b1_concat0\nI1209 19:32:19.098193  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.098203  1002 net.cpp:165] Memory required for data: 1489409500\nI1209 19:32:19.098214  1002 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI1209 19:32:19.098240  1002 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI1209 19:32:19.098253  1002 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI1209 19:32:19.098273  1002 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI1209 19:32:19.098295  1002 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI1209 19:32:19.098377  1002 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI1209 19:32:19.098403  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.098418  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.098428  1002 net.cpp:165] Memory required for data: 1493505500\nI1209 19:32:19.098439  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI1209 19:32:19.098459  1002 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI1209 19:32:19.098472  1002 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI1209 19:32:19.098491  1002 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI1209 19:32:19.101073  1002 net.cpp:150] Setting up L3_b2_cbr1_conv\nI1209 19:32:19.101095  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.101105  1002 net.cpp:165] Memory required for data: 1495553500\nI1209 19:32:19.101124  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI1209 19:32:19.101148  1002 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI1209 19:32:19.101161  1002 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI1209 19:32:19.101183  1002 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI1209 19:32:19.101507  1002 net.cpp:150] Setting up L3_b2_cbr1_bn\nI1209 19:32:19.101526  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.101536  1002 net.cpp:165] Memory required for data: 1497601500\nI1209 19:32:19.101557  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1209 19:32:19.101577  1002 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI1209 19:32:19.101589  1002 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI1209 19:32:19.101610  1002 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.101709  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1209 19:32:19.101914  1002 net.cpp:150] Setting up L3_b2_cbr1_scale\nI1209 19:32:19.101933  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.101943  1002 net.cpp:165] Memory required for data: 1499649500\nI1209 19:32:19.101969  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_dropout\nI1209 19:32:19.101986  1002 net.cpp:100] Creating Layer L3_b2_cbr1_dropout\nI1209 19:32:19.101999  1002 net.cpp:434] L3_b2_cbr1_dropout <- L3_b2_cbr1_bn_top\nI1209 19:32:19.102018  1002 net.cpp:395] L3_b2_cbr1_dropout -> L3_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.102075  1002 net.cpp:150] Setting up L3_b2_cbr1_dropout\nI1209 19:32:19.102098  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.102109  1002 net.cpp:165] Memory required for data: 1501697500\nI1209 19:32:19.102119  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI1209 19:32:19.102134  1002 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI1209 19:32:19.102146  1002 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI1209 19:32:19.102161  1002 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.102180  1002 net.cpp:150] Setting up L3_b2_cbr1_relu\nI1209 19:32:19.102195  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.102203  1002 net.cpp:165] Memory required for data: 1503745500\nI1209 19:32:19.102213  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI1209 19:32:19.102241  1002 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI1209 19:32:19.102253  1002 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI1209 19:32:19.102272  1002 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI1209 19:32:19.103353  1002 net.cpp:150] Setting up L3_b2_cbr2_conv\nI1209 19:32:19.103374  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.103384  1002 net.cpp:165] Memory required for data: 1505793500\nI1209 19:32:19.103400  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI1209 19:32:19.103435  1002 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI1209 19:32:19.103449  1002 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI1209 19:32:19.103466  1002 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI1209 19:32:19.103799  1002 net.cpp:150] Setting up L3_b2_cbr2_bn\nI1209 19:32:19.103821  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.103830  1002 net.cpp:165] Memory required for data: 1507841500\nI1209 19:32:19.103852  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1209 19:32:19.103873  1002 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI1209 19:32:19.103886  1002 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI1209 19:32:19.103902  1002 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.104007  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1209 19:32:19.104210  1002 net.cpp:150] Setting up L3_b2_cbr2_scale\nI1209 19:32:19.104230  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.104239  1002 net.cpp:165] Memory required for data: 1509889500\nI1209 19:32:19.104257  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_dropout\nI1209 19:32:19.104280  1002 net.cpp:100] Creating Layer L3_b2_cbr2_dropout\nI1209 19:32:19.104293  1002 net.cpp:434] L3_b2_cbr2_dropout <- L3_b2_cbr2_bn_top\nI1209 19:32:19.104308  1002 net.cpp:395] L3_b2_cbr2_dropout -> L3_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.104369  1002 net.cpp:150] Setting up L3_b2_cbr2_dropout\nI1209 19:32:19.104387  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.104398  1002 net.cpp:165] Memory required for data: 1511937500\nI1209 19:32:19.104408  1002 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1209 19:32:19.104423  1002 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1209 19:32:19.104436  1002 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI1209 19:32:19.104449  1002 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI1209 19:32:19.104472  1002 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1209 19:32:19.104531  1002 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1209 19:32:19.104548  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.104558  1002 net.cpp:165] Memory required for data: 1513985500\nI1209 19:32:19.104568  1002 layer_factory.hpp:77] Creating layer L3_b2_relu\nI1209 19:32:19.104583  1002 net.cpp:100] Creating Layer L3_b2_relu\nI1209 19:32:19.104595  1002 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI1209 19:32:19.104614  1002 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI1209 19:32:19.104635  1002 net.cpp:150] Setting up L3_b2_relu\nI1209 19:32:19.104648  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.104658  1002 net.cpp:165] Memory required for data: 1516033500\nI1209 19:32:19.104668  1002 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1209 19:32:19.104682  1002 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1209 19:32:19.104694  1002 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI1209 19:32:19.104709  1002 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1209 19:32:19.104729  1002 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1209 19:32:19.104799  1002 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1209 19:32:19.104811  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.104818  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.104822  1002 net.cpp:165] Memory required for data: 1520129500\nI1209 19:32:19.104828  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI1209 19:32:19.104845  1002 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI1209 19:32:19.104851  1002 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1209 19:32:19.104863  1002 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI1209 19:32:19.105885  1002 net.cpp:150] Setting up L3_b3_cbr1_conv\nI1209 19:32:19.105911  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.105921  1002 net.cpp:165] Memory required for data: 1522177500\nI1209 19:32:19.105938  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI1209 19:32:19.105955  1002 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI1209 19:32:19.105975  1002 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI1209 19:32:19.105998  1002 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI1209 19:32:19.106330  1002 net.cpp:150] Setting up L3_b3_cbr1_bn\nI1209 19:32:19.106357  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.106369  1002 net.cpp:165] Memory required for data: 1524225500\nI1209 19:32:19.106390  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1209 19:32:19.106407  1002 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI1209 19:32:19.106418  1002 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI1209 19:32:19.106432  1002 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.106534  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1209 19:32:19.106741  1002 net.cpp:150] Setting up L3_b3_cbr1_scale\nI1209 19:32:19.106760  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.106770  1002 net.cpp:165] Memory required for data: 1526273500\nI1209 19:32:19.106788  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_dropout\nI1209 19:32:19.106838  1002 net.cpp:100] Creating Layer L3_b3_cbr1_dropout\nI1209 19:32:19.106854  1002 net.cpp:434] L3_b3_cbr1_dropout <- L3_b3_cbr1_bn_top\nI1209 19:32:19.106870  1002 net.cpp:395] L3_b3_cbr1_dropout -> L3_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.106928  1002 net.cpp:150] Setting up L3_b3_cbr1_dropout\nI1209 19:32:19.106946  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.106956  1002 net.cpp:165] Memory required for data: 1528321500\nI1209 19:32:19.106974  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI1209 19:32:19.106989  1002 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI1209 19:32:19.107002  1002 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI1209 19:32:19.107020  1002 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.107040  1002 net.cpp:150] Setting up L3_b3_cbr1_relu\nI1209 19:32:19.107055  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.107064  1002 net.cpp:165] Memory required for data: 1530369500\nI1209 19:32:19.107074  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI1209 19:32:19.107095  1002 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI1209 19:32:19.107110  1002 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI1209 19:32:19.107126  1002 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI1209 19:32:19.108213  1002 net.cpp:150] Setting up L3_b3_cbr2_conv\nI1209 19:32:19.108233  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.108244  1002 net.cpp:165] Memory required for data: 1532417500\nI1209 19:32:19.108261  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI1209 19:32:19.108283  1002 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI1209 19:32:19.108296  1002 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI1209 19:32:19.108314  1002 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI1209 19:32:19.108644  1002 net.cpp:150] Setting up L3_b3_cbr2_bn\nI1209 19:32:19.108664  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.108674  1002 net.cpp:165] Memory required for data: 1534465500\nI1209 19:32:19.108695  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1209 19:32:19.108716  1002 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI1209 19:32:19.108731  1002 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI1209 19:32:19.108745  1002 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.108850  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1209 19:32:19.109073  1002 net.cpp:150] Setting up L3_b3_cbr2_scale\nI1209 19:32:19.109092  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.109102  1002 net.cpp:165] Memory required for data: 1536513500\nI1209 19:32:19.109120  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_dropout\nI1209 19:32:19.109148  1002 net.cpp:100] Creating Layer L3_b3_cbr2_dropout\nI1209 19:32:19.109160  1002 net.cpp:434] L3_b3_cbr2_dropout <- L3_b3_cbr2_bn_top\nI1209 19:32:19.109179  1002 net.cpp:395] L3_b3_cbr2_dropout -> L3_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.109237  1002 net.cpp:150] Setting up L3_b3_cbr2_dropout\nI1209 19:32:19.109256  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.109266  1002 net.cpp:165] Memory required for data: 1538561500\nI1209 19:32:19.109275  1002 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1209 19:32:19.109297  1002 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1209 19:32:19.109309  1002 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI1209 19:32:19.109323  1002 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1209 19:32:19.109338  1002 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1209 19:32:19.109395  1002 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1209 19:32:19.109414  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.109423  1002 net.cpp:165] Memory required for data: 1540609500\nI1209 19:32:19.109433  1002 layer_factory.hpp:77] Creating layer L3_b3_relu\nI1209 19:32:19.109448  1002 net.cpp:100] Creating Layer L3_b3_relu\nI1209 19:32:19.109460  1002 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI1209 19:32:19.109479  1002 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI1209 19:32:19.109499  1002 net.cpp:150] Setting up L3_b3_relu\nI1209 19:32:19.109513  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.109524  1002 net.cpp:165] Memory required for data: 1542657500\nI1209 19:32:19.109534  1002 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1209 19:32:19.109547  1002 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1209 19:32:19.109558  1002 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI1209 19:32:19.109575  1002 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1209 19:32:19.109593  1002 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1209 19:32:19.109679  1002 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1209 19:32:19.109699  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.109712  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.109721  1002 net.cpp:165] Memory required for data: 1546753500\nI1209 19:32:19.109731  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI1209 19:32:19.109752  1002 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI1209 19:32:19.109766  1002 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1209 19:32:19.109788  1002 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI1209 19:32:19.110882  1002 net.cpp:150] Setting up L3_b4_cbr1_conv\nI1209 19:32:19.110901  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.110913  1002 net.cpp:165] Memory required for data: 1548801500\nI1209 19:32:19.110930  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI1209 19:32:19.110955  1002 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI1209 19:32:19.110975  1002 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI1209 19:32:19.110992  1002 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI1209 19:32:19.111321  1002 net.cpp:150] Setting up L3_b4_cbr1_bn\nI1209 19:32:19.111340  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.111351  1002 net.cpp:165] Memory required for data: 1550849500\nI1209 19:32:19.111371  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1209 19:32:19.111388  1002 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI1209 19:32:19.111402  1002 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI1209 19:32:19.111415  1002 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.111521  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1209 19:32:19.111745  1002 net.cpp:150] Setting up L3_b4_cbr1_scale\nI1209 19:32:19.111773  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.111783  1002 net.cpp:165] Memory required for data: 1552897500\nI1209 19:32:19.111801  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_dropout\nI1209 19:32:19.111819  1002 net.cpp:100] Creating Layer L3_b4_cbr1_dropout\nI1209 19:32:19.111830  1002 net.cpp:434] L3_b4_cbr1_dropout <- L3_b4_cbr1_bn_top\nI1209 19:32:19.111845  1002 net.cpp:395] L3_b4_cbr1_dropout -> L3_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.111907  1002 net.cpp:150] Setting up L3_b4_cbr1_dropout\nI1209 19:32:19.111924  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.111934  1002 net.cpp:165] Memory required for data: 1554945500\nI1209 19:32:19.111945  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI1209 19:32:19.111973  1002 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI1209 19:32:19.111986  1002 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI1209 19:32:19.112001  1002 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.112020  1002 net.cpp:150] Setting up L3_b4_cbr1_relu\nI1209 19:32:19.112035  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.112043  1002 net.cpp:165] Memory required for data: 1556993500\nI1209 19:32:19.112054  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI1209 19:32:19.112082  1002 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI1209 19:32:19.112094  1002 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI1209 19:32:19.112112  1002 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI1209 19:32:19.113199  1002 net.cpp:150] Setting up L3_b4_cbr2_conv\nI1209 19:32:19.113219  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.113229  1002 net.cpp:165] Memory required for data: 1559041500\nI1209 19:32:19.113245  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI1209 19:32:19.113268  1002 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI1209 19:32:19.113281  1002 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI1209 19:32:19.113302  1002 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI1209 19:32:19.114907  1002 net.cpp:150] Setting up L3_b4_cbr2_bn\nI1209 19:32:19.114929  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.114938  1002 net.cpp:165] Memory required for data: 1561089500\nI1209 19:32:19.114969  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1209 19:32:19.114994  1002 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI1209 19:32:19.115006  1002 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI1209 19:32:19.115027  1002 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.115129  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1209 19:32:19.115339  1002 net.cpp:150] Setting up L3_b4_cbr2_scale\nI1209 19:32:19.115358  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.115368  1002 net.cpp:165] Memory required for data: 1563137500\nI1209 19:32:19.115387  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_dropout\nI1209 19:32:19.115404  1002 net.cpp:100] Creating Layer L3_b4_cbr2_dropout\nI1209 19:32:19.115417  1002 net.cpp:434] L3_b4_cbr2_dropout <- L3_b4_cbr2_bn_top\nI1209 19:32:19.115438  1002 net.cpp:395] L3_b4_cbr2_dropout -> L3_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.115494  1002 net.cpp:150] Setting up L3_b4_cbr2_dropout\nI1209 19:32:19.115514  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.115522  1002 net.cpp:165] Memory required for data: 1565185500\nI1209 19:32:19.115533  1002 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1209 19:32:19.115555  1002 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1209 19:32:19.115566  1002 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI1209 19:32:19.115581  1002 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1209 19:32:19.115597  1002 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1209 19:32:19.115653  1002 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1209 19:32:19.115671  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.115681  1002 net.cpp:165] Memory required for data: 1567233500\nI1209 19:32:19.115702  1002 layer_factory.hpp:77] Creating layer L3_b4_relu\nI1209 19:32:19.115720  1002 net.cpp:100] Creating Layer L3_b4_relu\nI1209 19:32:19.115732  1002 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI1209 19:32:19.115751  1002 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI1209 19:32:19.115770  1002 net.cpp:150] Setting up L3_b4_relu\nI1209 19:32:19.115779  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.115784  1002 net.cpp:165] Memory required for data: 1569281500\nI1209 19:32:19.115789  1002 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1209 19:32:19.115797  1002 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1209 19:32:19.115803  1002 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI1209 19:32:19.115809  1002 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1209 19:32:19.115819  1002 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1209 19:32:19.115878  1002 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1209 19:32:19.115895  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.115907  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.115916  1002 net.cpp:165] Memory required for data: 1573377500\nI1209 19:32:19.115926  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI1209 19:32:19.115952  1002 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI1209 19:32:19.116027  1002 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1209 19:32:19.116048  1002 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI1209 19:32:19.117122  1002 net.cpp:150] Setting up L3_b5_cbr1_conv\nI1209 19:32:19.117142  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.117152  1002 net.cpp:165] Memory required for data: 1575425500\nI1209 19:32:19.117171  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI1209 19:32:19.117193  1002 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI1209 19:32:19.117207  1002 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI1209 19:32:19.117223  1002 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI1209 19:32:19.117537  1002 net.cpp:150] Setting up L3_b5_cbr1_bn\nI1209 19:32:19.117555  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.117565  1002 net.cpp:165] Memory required for data: 1577473500\nI1209 19:32:19.117588  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1209 19:32:19.117604  1002 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI1209 19:32:19.117616  1002 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI1209 19:32:19.117630  1002 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.117734  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1209 19:32:19.117933  1002 net.cpp:150] Setting up L3_b5_cbr1_scale\nI1209 19:32:19.117952  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.117969  1002 net.cpp:165] Memory required for data: 1579521500\nI1209 19:32:19.117990  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_dropout\nI1209 19:32:19.118006  1002 net.cpp:100] Creating Layer L3_b5_cbr1_dropout\nI1209 19:32:19.118018  1002 net.cpp:434] L3_b5_cbr1_dropout <- L3_b5_cbr1_bn_top\nI1209 19:32:19.118038  1002 net.cpp:395] L3_b5_cbr1_dropout -> L3_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.118093  1002 net.cpp:150] Setting up L3_b5_cbr1_dropout\nI1209 19:32:19.118110  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.118120  1002 net.cpp:165] Memory required for data: 1581569500\nI1209 19:32:19.118130  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI1209 19:32:19.118150  1002 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI1209 19:32:19.118162  1002 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI1209 19:32:19.118177  1002 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.118196  1002 net.cpp:150] Setting up L3_b5_cbr1_relu\nI1209 19:32:19.118209  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.118228  1002 net.cpp:165] Memory required for data: 1583617500\nI1209 19:32:19.118239  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI1209 19:32:19.118266  1002 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI1209 19:32:19.118279  1002 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI1209 19:32:19.118299  1002 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI1209 19:32:19.120666  1002 net.cpp:150] Setting up L3_b5_cbr2_conv\nI1209 19:32:19.120687  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.120697  1002 net.cpp:165] Memory required for data: 1585665500\nI1209 19:32:19.120715  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI1209 19:32:19.120733  1002 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI1209 19:32:19.120746  1002 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI1209 19:32:19.120769  1002 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI1209 19:32:19.121098  1002 net.cpp:150] Setting up L3_b5_cbr2_bn\nI1209 19:32:19.121124  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.121134  1002 net.cpp:165] Memory required for data: 1587713500\nI1209 19:32:19.121156  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1209 19:32:19.121175  1002 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI1209 19:32:19.121186  1002 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI1209 19:32:19.121202  1002 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.121296  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1209 19:32:19.121500  1002 net.cpp:150] Setting up L3_b5_cbr2_scale\nI1209 19:32:19.121518  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.121528  1002 net.cpp:165] Memory required for data: 1589761500\nI1209 19:32:19.121547  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_dropout\nI1209 19:32:19.121569  1002 net.cpp:100] Creating Layer L3_b5_cbr2_dropout\nI1209 19:32:19.121582  1002 net.cpp:434] L3_b5_cbr2_dropout <- L3_b5_cbr2_bn_top\nI1209 19:32:19.121598  1002 net.cpp:395] L3_b5_cbr2_dropout -> L3_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.121659  1002 net.cpp:150] Setting up L3_b5_cbr2_dropout\nI1209 19:32:19.121676  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.121685  1002 net.cpp:165] Memory required for data: 1591809500\nI1209 19:32:19.121696  1002 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1209 19:32:19.121713  1002 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1209 19:32:19.121726  1002 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI1209 19:32:19.121737  1002 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1209 19:32:19.121753  1002 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1209 19:32:19.121809  1002 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1209 19:32:19.121829  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.121839  1002 net.cpp:165] Memory required for data: 1593857500\nI1209 19:32:19.121850  1002 layer_factory.hpp:77] Creating layer L3_b5_relu\nI1209 19:32:19.121870  1002 net.cpp:100] Creating Layer L3_b5_relu\nI1209 19:32:19.121882  1002 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI1209 19:32:19.121896  1002 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI1209 19:32:19.121914  1002 net.cpp:150] Setting up L3_b5_relu\nI1209 19:32:19.121928  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.121937  1002 net.cpp:165] Memory required for data: 1595905500\nI1209 19:32:19.121948  1002 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1209 19:32:19.121970  1002 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1209 19:32:19.121984  1002 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI1209 19:32:19.121999  1002 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1209 19:32:19.122020  1002 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1209 19:32:19.122117  1002 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1209 19:32:19.122138  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.122150  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.122159  1002 net.cpp:165] Memory required for data: 1600001500\nI1209 19:32:19.122169  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI1209 19:32:19.122197  1002 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI1209 19:32:19.122210  1002 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1209 19:32:19.122236  1002 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI1209 19:32:19.123318  1002 net.cpp:150] Setting up L3_b6_cbr1_conv\nI1209 19:32:19.123339  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.123349  1002 net.cpp:165] Memory required for data: 1602049500\nI1209 19:32:19.123368  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI1209 19:32:19.123395  1002 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI1209 19:32:19.123409  1002 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI1209 19:32:19.123435  1002 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI1209 19:32:19.123738  1002 net.cpp:150] Setting up L3_b6_cbr1_bn\nI1209 19:32:19.123756  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.123766  1002 net.cpp:165] Memory required for data: 1604097500\nI1209 19:32:19.123787  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1209 19:32:19.123805  1002 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI1209 19:32:19.123817  1002 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI1209 19:32:19.123842  1002 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.123940  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1209 19:32:19.124145  1002 net.cpp:150] Setting up L3_b6_cbr1_scale\nI1209 19:32:19.124164  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.124174  1002 net.cpp:165] Memory required for data: 1606145500\nI1209 19:32:19.124192  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_dropout\nI1209 19:32:19.124210  1002 net.cpp:100] Creating Layer L3_b6_cbr1_dropout\nI1209 19:32:19.124222  1002 net.cpp:434] L3_b6_cbr1_dropout <- L3_b6_cbr1_bn_top\nI1209 19:32:19.124245  1002 net.cpp:395] L3_b6_cbr1_dropout -> L3_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.124308  1002 net.cpp:150] Setting up L3_b6_cbr1_dropout\nI1209 19:32:19.124325  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.124336  1002 net.cpp:165] Memory required for data: 1608193500\nI1209 19:32:19.124346  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI1209 19:32:19.124361  1002 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI1209 19:32:19.124372  1002 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI1209 19:32:19.124392  1002 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.124413  1002 net.cpp:150] Setting up L3_b6_cbr1_relu\nI1209 19:32:19.124426  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.124435  1002 net.cpp:165] Memory required for data: 1610241500\nI1209 19:32:19.124445  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI1209 19:32:19.124467  1002 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI1209 19:32:19.124480  1002 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI1209 19:32:19.124497  1002 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI1209 19:32:19.125613  1002 net.cpp:150] Setting up L3_b6_cbr2_conv\nI1209 19:32:19.125633  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.125643  1002 net.cpp:165] Memory required for data: 1612289500\nI1209 19:32:19.125663  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI1209 19:32:19.125687  1002 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI1209 19:32:19.125700  1002 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI1209 19:32:19.125715  1002 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI1209 19:32:19.126047  1002 net.cpp:150] Setting up L3_b6_cbr2_bn\nI1209 19:32:19.126067  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.126077  1002 net.cpp:165] Memory required for data: 1614337500\nI1209 19:32:19.126111  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1209 19:32:19.126137  1002 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI1209 19:32:19.126152  1002 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI1209 19:32:19.126168  1002 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.126272  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1209 19:32:19.126474  1002 net.cpp:150] Setting up L3_b6_cbr2_scale\nI1209 19:32:19.126493  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.126502  1002 net.cpp:165] Memory required for data: 1616385500\nI1209 19:32:19.126521  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_dropout\nI1209 19:32:19.126543  1002 net.cpp:100] Creating Layer L3_b6_cbr2_dropout\nI1209 19:32:19.126555  1002 net.cpp:434] L3_b6_cbr2_dropout <- L3_b6_cbr2_bn_top\nI1209 19:32:19.126574  1002 net.cpp:395] L3_b6_cbr2_dropout -> L3_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.126631  1002 net.cpp:150] Setting up L3_b6_cbr2_dropout\nI1209 19:32:19.126648  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.126657  1002 net.cpp:165] Memory required for data: 1618433500\nI1209 19:32:19.126669  1002 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1209 19:32:19.126690  1002 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1209 19:32:19.126703  1002 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI1209 19:32:19.126716  1002 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1209 19:32:19.126732  1002 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1209 19:32:19.126787  1002 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1209 19:32:19.126806  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.126816  1002 net.cpp:165] Memory required for data: 1620481500\nI1209 19:32:19.126826  1002 layer_factory.hpp:77] Creating layer L3_b6_relu\nI1209 19:32:19.126840  1002 net.cpp:100] Creating Layer L3_b6_relu\nI1209 19:32:19.126852  1002 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI1209 19:32:19.126873  1002 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI1209 19:32:19.126893  1002 net.cpp:150] Setting up L3_b6_relu\nI1209 19:32:19.126907  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.126916  1002 net.cpp:165] Memory required for data: 1622529500\nI1209 19:32:19.126926  1002 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1209 19:32:19.126940  1002 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1209 19:32:19.126951  1002 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI1209 19:32:19.126976  1002 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1209 19:32:19.126996  1002 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1209 19:32:19.127082  1002 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1209 19:32:19.127102  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.127116  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.127125  1002 net.cpp:165] Memory required for data: 1626625500\nI1209 19:32:19.127135  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI1209 19:32:19.127157  1002 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI1209 19:32:19.127171  1002 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1209 19:32:19.127197  1002 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI1209 19:32:19.128284  1002 net.cpp:150] Setting up L3_b7_cbr1_conv\nI1209 19:32:19.128304  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.128315  1002 net.cpp:165] Memory required for data: 1628673500\nI1209 19:32:19.128334  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI1209 19:32:19.128356  1002 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI1209 19:32:19.128371  1002 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI1209 19:32:19.128387  1002 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI1209 19:32:19.128723  1002 net.cpp:150] Setting up L3_b7_cbr1_bn\nI1209 19:32:19.128743  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.128753  1002 net.cpp:165] Memory required for data: 1630721500\nI1209 19:32:19.128774  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1209 19:32:19.128793  1002 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI1209 19:32:19.128803  1002 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI1209 19:32:19.128818  1002 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.128921  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1209 19:32:19.129128  1002 net.cpp:150] Setting up L3_b7_cbr1_scale\nI1209 19:32:19.129153  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.129163  1002 net.cpp:165] Memory required for data: 1632769500\nI1209 19:32:19.129182  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_dropout\nI1209 19:32:19.129199  1002 net.cpp:100] Creating Layer L3_b7_cbr1_dropout\nI1209 19:32:19.129210  1002 net.cpp:434] L3_b7_cbr1_dropout <- L3_b7_cbr1_bn_top\nI1209 19:32:19.129225  1002 net.cpp:395] L3_b7_cbr1_dropout -> L3_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.129287  1002 net.cpp:150] Setting up L3_b7_cbr1_dropout\nI1209 19:32:19.129304  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.129314  1002 net.cpp:165] Memory required for data: 1634817500\nI1209 19:32:19.129324  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI1209 19:32:19.129340  1002 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI1209 19:32:19.129351  1002 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI1209 19:32:19.129365  1002 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.129384  1002 net.cpp:150] Setting up L3_b7_cbr1_relu\nI1209 19:32:19.129400  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.129407  1002 net.cpp:165] Memory required for data: 1636865500\nI1209 19:32:19.129418  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI1209 19:32:19.129446  1002 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI1209 19:32:19.129459  1002 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI1209 19:32:19.129477  1002 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI1209 19:32:19.130538  1002 net.cpp:150] Setting up L3_b7_cbr2_conv\nI1209 19:32:19.130559  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.130569  1002 net.cpp:165] Memory required for data: 1638913500\nI1209 19:32:19.130586  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI1209 19:32:19.130610  1002 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI1209 19:32:19.130622  1002 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI1209 19:32:19.130645  1002 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI1209 19:32:19.130965  1002 net.cpp:150] Setting up L3_b7_cbr2_bn\nI1209 19:32:19.130985  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.130995  1002 net.cpp:165] Memory required for data: 1640961500\nI1209 19:32:19.131016  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1209 19:32:19.131034  1002 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI1209 19:32:19.131047  1002 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI1209 19:32:19.131067  1002 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.131165  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1209 19:32:19.131371  1002 net.cpp:150] Setting up L3_b7_cbr2_scale\nI1209 19:32:19.131389  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.131399  1002 net.cpp:165] Memory required for data: 1643009500\nI1209 19:32:19.131417  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_dropout\nI1209 19:32:19.131435  1002 net.cpp:100] Creating Layer L3_b7_cbr2_dropout\nI1209 19:32:19.131448  1002 net.cpp:434] L3_b7_cbr2_dropout <- L3_b7_cbr2_bn_top\nI1209 19:32:19.131469  1002 net.cpp:395] L3_b7_cbr2_dropout -> L3_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.131525  1002 net.cpp:150] Setting up L3_b7_cbr2_dropout\nI1209 19:32:19.131552  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.131572  1002 net.cpp:165] Memory required for data: 1645057500\nI1209 19:32:19.131584  1002 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI1209 19:32:19.131602  1002 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI1209 19:32:19.131613  1002 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI1209 19:32:19.131626  1002 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1209 19:32:19.131642  1002 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI1209 19:32:19.131705  1002 net.cpp:150] Setting up L3_b7_sum_eltwise\nI1209 19:32:19.131724  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.131734  1002 net.cpp:165] Memory required for data: 1647105500\nI1209 19:32:19.131745  1002 layer_factory.hpp:77] Creating layer L3_b7_relu\nI1209 19:32:19.131759  1002 net.cpp:100] Creating Layer L3_b7_relu\nI1209 19:32:19.131772  1002 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI1209 19:32:19.131786  1002 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI1209 19:32:19.131805  1002 net.cpp:150] Setting up L3_b7_relu\nI1209 19:32:19.131819  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.131829  1002 net.cpp:165] Memory required for data: 1649153500\nI1209 19:32:19.131839  1002 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1209 19:32:19.131857  1002 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1209 19:32:19.131870  1002 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI1209 19:32:19.131886  1002 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1209 19:32:19.131906  1002 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1209 19:32:19.131992  1002 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1209 19:32:19.132017  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.132032  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.132041  1002 net.cpp:165] Memory required for data: 1653249500\nI1209 19:32:19.132052  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI1209 19:32:19.132079  1002 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI1209 19:32:19.132093  1002 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1209 19:32:19.132112  1002 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI1209 19:32:19.133206  1002 net.cpp:150] Setting up L3_b8_cbr1_conv\nI1209 19:32:19.133226  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.133235  1002 net.cpp:165] Memory required for data: 1655297500\nI1209 19:32:19.133253  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI1209 19:32:19.133277  1002 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI1209 19:32:19.133289  1002 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI1209 19:32:19.133306  1002 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI1209 19:32:19.133630  1002 net.cpp:150] Setting up L3_b8_cbr1_bn\nI1209 19:32:19.133649  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.133658  1002 net.cpp:165] Memory required for data: 1657345500\nI1209 19:32:19.133680  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1209 19:32:19.133703  1002 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI1209 19:32:19.133716  1002 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI1209 19:32:19.133738  1002 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.133834  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1209 19:32:19.134049  1002 net.cpp:150] Setting up L3_b8_cbr1_scale\nI1209 19:32:19.134068  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.134078  1002 net.cpp:165] Memory required for data: 1659393500\nI1209 19:32:19.134095  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_dropout\nI1209 19:32:19.134112  1002 net.cpp:100] Creating Layer L3_b8_cbr1_dropout\nI1209 19:32:19.134124  1002 net.cpp:434] L3_b8_cbr1_dropout <- L3_b8_cbr1_bn_top\nI1209 19:32:19.134145  1002 net.cpp:395] L3_b8_cbr1_dropout -> L3_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.134212  1002 net.cpp:150] Setting up L3_b8_cbr1_dropout\nI1209 19:32:19.134232  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.134241  1002 net.cpp:165] Memory required for data: 1661441500\nI1209 19:32:19.134251  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI1209 19:32:19.134271  1002 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI1209 19:32:19.134284  1002 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI1209 19:32:19.134300  1002 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.134320  1002 net.cpp:150] Setting up L3_b8_cbr1_relu\nI1209 19:32:19.134335  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.134344  1002 net.cpp:165] Memory required for data: 1663489500\nI1209 19:32:19.134354  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI1209 19:32:19.134376  1002 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI1209 19:32:19.134388  1002 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI1209 19:32:19.134412  1002 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI1209 19:32:19.135488  1002 net.cpp:150] Setting up L3_b8_cbr2_conv\nI1209 19:32:19.135509  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.135519  1002 net.cpp:165] Memory required for data: 1665537500\nI1209 19:32:19.135537  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI1209 19:32:19.135555  1002 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI1209 19:32:19.135567  1002 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI1209 19:32:19.135591  1002 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI1209 19:32:19.135918  1002 net.cpp:150] Setting up L3_b8_cbr2_bn\nI1209 19:32:19.135942  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.135953  1002 net.cpp:165] Memory required for data: 1667585500\nI1209 19:32:19.135983  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1209 19:32:19.136000  1002 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI1209 19:32:19.136013  1002 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI1209 19:32:19.136029  1002 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.136127  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1209 19:32:19.136332  1002 net.cpp:150] Setting up L3_b8_cbr2_scale\nI1209 19:32:19.136350  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.136359  1002 net.cpp:165] Memory required for data: 1669633500\nI1209 19:32:19.136379  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_dropout\nI1209 19:32:19.136402  1002 net.cpp:100] Creating Layer L3_b8_cbr2_dropout\nI1209 19:32:19.136415  1002 net.cpp:434] L3_b8_cbr2_dropout <- L3_b8_cbr2_bn_top\nI1209 19:32:19.136430  1002 net.cpp:395] L3_b8_cbr2_dropout -> L3_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.136490  1002 net.cpp:150] Setting up L3_b8_cbr2_dropout\nI1209 19:32:19.136508  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.136518  1002 net.cpp:165] Memory required for data: 1671681500\nI1209 19:32:19.136531  1002 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI1209 19:32:19.136548  1002 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI1209 19:32:19.136559  1002 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI1209 19:32:19.136571  1002 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1209 19:32:19.136587  1002 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI1209 19:32:19.136644  1002 net.cpp:150] Setting up L3_b8_sum_eltwise\nI1209 19:32:19.136665  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.136677  1002 net.cpp:165] Memory required for data: 1673729500\nI1209 19:32:19.136687  1002 layer_factory.hpp:77] Creating layer L3_b8_relu\nI1209 19:32:19.136708  1002 net.cpp:100] Creating Layer L3_b8_relu\nI1209 19:32:19.136720  1002 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI1209 19:32:19.136734  1002 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI1209 19:32:19.136754  1002 net.cpp:150] Setting up L3_b8_relu\nI1209 19:32:19.136770  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.136788  1002 net.cpp:165] Memory required for data: 1675777500\nI1209 19:32:19.136798  1002 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1209 19:32:19.136813  1002 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1209 19:32:19.136826  1002 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI1209 19:32:19.136842  1002 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1209 19:32:19.136862  1002 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1209 19:32:19.136951  1002 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1209 19:32:19.136981  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.136996  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.137006  1002 net.cpp:165] Memory required for data: 1679873500\nI1209 19:32:19.137017  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI1209 19:32:19.137043  1002 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI1209 19:32:19.137058  1002 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1209 19:32:19.137082  1002 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI1209 19:32:19.139601  1002 net.cpp:150] Setting up L3_b9_cbr1_conv\nI1209 19:32:19.139623  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.139633  1002 net.cpp:165] Memory required for data: 1681921500\nI1209 19:32:19.139654  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI1209 19:32:19.139673  1002 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI1209 19:32:19.139686  1002 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI1209 19:32:19.139709  1002 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI1209 19:32:19.140061  1002 net.cpp:150] Setting up L3_b9_cbr1_bn\nI1209 19:32:19.140089  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.140100  1002 net.cpp:165] Memory required for data: 1683969500\nI1209 19:32:19.140125  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1209 19:32:19.140142  1002 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI1209 19:32:19.140156  1002 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI1209 19:32:19.140172  1002 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.140267  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1209 19:32:19.140476  1002 net.cpp:150] Setting up L3_b9_cbr1_scale\nI1209 19:32:19.140496  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.140504  1002 net.cpp:165] Memory required for data: 1686017500\nI1209 19:32:19.140525  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_dropout\nI1209 19:32:19.140547  1002 net.cpp:100] Creating Layer L3_b9_cbr1_dropout\nI1209 19:32:19.140560  1002 net.cpp:434] L3_b9_cbr1_dropout <- L3_b9_cbr1_bn_top\nI1209 19:32:19.140575  1002 net.cpp:395] L3_b9_cbr1_dropout -> L3_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.140640  1002 net.cpp:150] Setting up L3_b9_cbr1_dropout\nI1209 19:32:19.140660  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.140669  1002 net.cpp:165] Memory required for data: 1688065500\nI1209 19:32:19.140681  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI1209 19:32:19.140698  1002 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI1209 19:32:19.140710  1002 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI1209 19:32:19.140724  1002 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.140744  1002 net.cpp:150] Setting up L3_b9_cbr1_relu\nI1209 19:32:19.140759  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.140769  1002 net.cpp:165] Memory required for data: 1690113500\nI1209 19:32:19.140781  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI1209 19:32:19.140810  1002 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI1209 19:32:19.140825  1002 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI1209 19:32:19.140842  1002 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI1209 19:32:19.141947  1002 net.cpp:150] Setting up L3_b9_cbr2_conv\nI1209 19:32:19.141973  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.141983  1002 net.cpp:165] Memory required for data: 1692161500\nI1209 19:32:19.142004  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI1209 19:32:19.142027  1002 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI1209 19:32:19.142041  1002 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI1209 19:32:19.142058  1002 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI1209 19:32:19.142372  1002 net.cpp:150] Setting up L3_b9_cbr2_bn\nI1209 19:32:19.142391  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.142401  1002 net.cpp:165] Memory required for data: 1694209500\nI1209 19:32:19.142423  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1209 19:32:19.142443  1002 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI1209 19:32:19.142455  1002 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI1209 19:32:19.142470  1002 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.142573  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1209 19:32:19.142774  1002 net.cpp:150] Setting up L3_b9_cbr2_scale\nI1209 19:32:19.142792  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.142990  1002 net.cpp:165] Memory required for data: 1696257500\nI1209 19:32:19.143015  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_dropout\nI1209 19:32:19.143034  1002 net.cpp:100] Creating Layer L3_b9_cbr2_dropout\nI1209 19:32:19.143046  1002 net.cpp:434] L3_b9_cbr2_dropout <- L3_b9_cbr2_bn_top\nI1209 19:32:19.143067  1002 net.cpp:395] L3_b9_cbr2_dropout -> L3_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.143126  1002 net.cpp:150] Setting up L3_b9_cbr2_dropout\nI1209 19:32:19.143144  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.143153  1002 net.cpp:165] Memory required for data: 1698305500\nI1209 19:32:19.143165  1002 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI1209 19:32:19.143188  1002 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI1209 19:32:19.143200  1002 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI1209 19:32:19.143214  1002 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1209 19:32:19.143230  1002 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI1209 19:32:19.143296  1002 net.cpp:150] Setting up L3_b9_sum_eltwise\nI1209 19:32:19.143316  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.143326  1002 net.cpp:165] Memory required for data: 1700353500\nI1209 19:32:19.143337  1002 layer_factory.hpp:77] Creating layer L3_b9_relu\nI1209 19:32:19.143352  1002 net.cpp:100] Creating Layer L3_b9_relu\nI1209 19:32:19.143364  1002 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI1209 19:32:19.143379  1002 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI1209 19:32:19.143398  1002 net.cpp:150] Setting up L3_b9_relu\nI1209 19:32:19.143412  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.143422  1002 net.cpp:165] Memory required for data: 1702401500\nI1209 19:32:19.143434  1002 layer_factory.hpp:77] Creating layer post_pool\nI1209 19:32:19.143458  1002 net.cpp:100] Creating Layer post_pool\nI1209 19:32:19.143471  1002 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI1209 19:32:19.143487  1002 net.cpp:408] post_pool -> post_pool\nI1209 19:32:19.143545  1002 net.cpp:150] Setting up post_pool\nI1209 19:32:19.143563  1002 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI1209 19:32:19.143573  1002 net.cpp:165] Memory required for data: 1702433500\nI1209 19:32:19.143585  1002 layer_factory.hpp:77] Creating layer post_FC\nI1209 19:32:19.143718  1002 net.cpp:100] Creating Layer post_FC\nI1209 19:32:19.143734  1002 net.cpp:434] post_FC <- post_pool\nI1209 19:32:19.143759  1002 net.cpp:408] post_FC -> post_FC_top\nI1209 19:32:19.144114  1002 net.cpp:150] Setting up post_FC\nI1209 19:32:19.144134  1002 net.cpp:157] Top shape: 125 10 (1250)\nI1209 19:32:19.144145  1002 net.cpp:165] Memory required for data: 1702438500\nI1209 19:32:19.144163  1002 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1209 19:32:19.144196  1002 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1209 19:32:19.144207  1002 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1209 19:32:19.144223  1002 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1209 19:32:19.144244  1002 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1209 19:32:19.144331  1002 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1209 19:32:19.144352  1002 net.cpp:157] Top shape: 125 10 (1250)\nI1209 19:32:19.144366  1002 net.cpp:157] Top shape: 125 10 (1250)\nI1209 19:32:19.144376  1002 net.cpp:165] Memory required for data: 1702448500\nI1209 19:32:19.144388  1002 layer_factory.hpp:77] Creating layer accuracy\nI1209 19:32:19.144465  1002 net.cpp:100] Creating Layer accuracy\nI1209 19:32:19.144482  1002 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1209 19:32:19.144495  1002 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1209 19:32:19.144511  1002 net.cpp:408] accuracy -> accuracy\nI1209 19:32:19.144593  1002 net.cpp:150] Setting up accuracy\nI1209 19:32:19.144611  1002 net.cpp:157] Top shape: (1)\nI1209 19:32:19.144621  1002 net.cpp:165] Memory required for data: 1702448504\nI1209 19:32:19.144634  1002 layer_factory.hpp:77] Creating layer loss\nI1209 19:32:19.144655  1002 net.cpp:100] Creating Layer loss\nI1209 19:32:19.144668  1002 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1209 19:32:19.144682  1002 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1209 19:32:19.144697  1002 net.cpp:408] loss -> loss\nI1209 19:32:19.148983  1002 layer_factory.hpp:77] Creating layer loss\nI1209 19:32:19.153085  1002 net.cpp:150] Setting up loss\nI1209 19:32:19.153112  1002 net.cpp:157] Top shape: (1)\nI1209 19:32:19.153123  1002 net.cpp:160]     with loss weight 1\nI1209 19:32:19.153229  1002 net.cpp:165] Memory required for data: 1702448508\nI1209 19:32:19.153244  1002 net.cpp:226] loss needs backward computation.\nI1209 19:32:19.153255  1002 net.cpp:228] accuracy does not need backward computation.\nI1209 19:32:19.153268  1002 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1209 19:32:19.153278  1002 net.cpp:226] post_FC needs backward computation.\nI1209 19:32:19.153287  1002 net.cpp:226] post_pool needs backward computation.\nI1209 19:32:19.153296  1002 net.cpp:226] L3_b9_relu needs backward computation.\nI1209 19:32:19.153306  1002 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI1209 19:32:19.153317  1002 net.cpp:226] L3_b9_cbr2_dropout needs backward computation.\nI1209 19:32:19.153326  1002 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI1209 19:32:19.153336  1002 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI1209 19:32:19.153345  1002 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI1209 19:32:19.153355  1002 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI1209 19:32:19.153365  1002 net.cpp:226] L3_b9_cbr1_dropout needs backward computation.\nI1209 19:32:19.153374  1002 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI1209 19:32:19.153383  1002 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI1209 19:32:19.153393  1002 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI1209 19:32:19.153404  1002 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI1209 19:32:19.153414  1002 net.cpp:226] L3_b8_relu needs backward computation.\nI1209 19:32:19.153424  1002 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI1209 19:32:19.153434  1002 net.cpp:226] L3_b8_cbr2_dropout needs backward computation.\nI1209 19:32:19.153445  1002 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI1209 19:32:19.153455  1002 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI1209 19:32:19.153465  1002 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI1209 19:32:19.153475  1002 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI1209 19:32:19.153486  1002 net.cpp:226] L3_b8_cbr1_dropout needs backward computation.\nI1209 19:32:19.153494  1002 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI1209 19:32:19.153515  1002 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI1209 19:32:19.153527  1002 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI1209 19:32:19.153537  1002 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI1209 19:32:19.153548  1002 net.cpp:226] L3_b7_relu needs backward computation.\nI1209 19:32:19.153556  1002 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI1209 19:32:19.153568  1002 net.cpp:226] L3_b7_cbr2_dropout needs backward computation.\nI1209 19:32:19.153578  1002 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI1209 19:32:19.153586  1002 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI1209 19:32:19.153597  1002 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI1209 19:32:19.153606  1002 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI1209 19:32:19.153615  1002 net.cpp:226] L3_b7_cbr1_dropout needs backward computation.\nI1209 19:32:19.153625  1002 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI1209 19:32:19.153635  1002 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI1209 19:32:19.153645  1002 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI1209 19:32:19.153656  1002 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI1209 19:32:19.153666  1002 net.cpp:226] L3_b6_relu needs backward computation.\nI1209 19:32:19.153676  1002 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1209 19:32:19.153687  1002 net.cpp:226] L3_b6_cbr2_dropout needs backward computation.\nI1209 19:32:19.153697  1002 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI1209 19:32:19.153707  1002 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI1209 19:32:19.153717  1002 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI1209 19:32:19.153726  1002 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI1209 19:32:19.153735  1002 net.cpp:226] L3_b6_cbr1_dropout needs backward computation.\nI1209 19:32:19.153746  1002 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI1209 19:32:19.153756  1002 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI1209 19:32:19.153766  1002 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI1209 19:32:19.153776  1002 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI1209 19:32:19.153786  1002 net.cpp:226] L3_b5_relu needs backward computation.\nI1209 19:32:19.153796  1002 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1209 19:32:19.153807  1002 net.cpp:226] L3_b5_cbr2_dropout needs backward computation.\nI1209 19:32:19.153817  1002 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI1209 19:32:19.153827  1002 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI1209 19:32:19.153838  1002 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI1209 19:32:19.153848  1002 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI1209 19:32:19.153857  1002 net.cpp:226] L3_b5_cbr1_dropout needs backward computation.\nI1209 19:32:19.153867  1002 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI1209 19:32:19.153877  1002 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI1209 19:32:19.153887  1002 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI1209 19:32:19.153898  1002 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI1209 19:32:19.153908  1002 net.cpp:226] L3_b4_relu needs backward computation.\nI1209 19:32:19.153918  1002 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1209 19:32:19.153928  1002 net.cpp:226] L3_b4_cbr2_dropout needs backward computation.\nI1209 19:32:19.153939  1002 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI1209 19:32:19.153949  1002 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI1209 19:32:19.153967  1002 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI1209 19:32:19.153980  1002 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI1209 19:32:19.153990  1002 net.cpp:226] L3_b4_cbr1_dropout needs backward computation.\nI1209 19:32:19.154011  1002 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI1209 19:32:19.154021  1002 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI1209 19:32:19.154031  1002 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI1209 19:32:19.154043  1002 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI1209 19:32:19.154060  1002 net.cpp:226] L3_b3_relu needs backward computation.\nI1209 19:32:19.154070  1002 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1209 19:32:19.154081  1002 net.cpp:226] L3_b3_cbr2_dropout needs backward computation.\nI1209 19:32:19.154091  1002 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI1209 19:32:19.154101  1002 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI1209 19:32:19.154112  1002 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI1209 19:32:19.154124  1002 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI1209 19:32:19.154132  1002 net.cpp:226] L3_b3_cbr1_dropout needs backward computation.\nI1209 19:32:19.154142  1002 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI1209 19:32:19.154153  1002 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI1209 19:32:19.154163  1002 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI1209 19:32:19.154175  1002 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI1209 19:32:19.154186  1002 net.cpp:226] L3_b2_relu needs backward computation.\nI1209 19:32:19.154194  1002 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1209 19:32:19.154206  1002 net.cpp:226] L3_b2_cbr2_dropout needs backward computation.\nI1209 19:32:19.154215  1002 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI1209 19:32:19.154225  1002 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI1209 19:32:19.154235  1002 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI1209 19:32:19.154245  1002 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI1209 19:32:19.154254  1002 net.cpp:226] L3_b2_cbr1_dropout needs backward computation.\nI1209 19:32:19.154264  1002 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI1209 19:32:19.154274  1002 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI1209 19:32:19.154285  1002 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI1209 19:32:19.154295  1002 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI1209 19:32:19.154306  1002 net.cpp:226] L3_b1_concat0 needs backward computation.\nI1209 19:32:19.154319  1002 net.cpp:228] L3_b1_zeros does not need backward computation.\nI1209 19:32:19.154328  1002 net.cpp:226] L3_b1_relu needs backward computation.\nI1209 19:32:19.154337  1002 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1209 19:32:19.154348  1002 net.cpp:226] L3_b1_pool needs backward computation.\nI1209 19:32:19.154359  1002 net.cpp:226] L3_b1_cbr2_dropout needs backward computation.\nI1209 19:32:19.154371  1002 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI1209 19:32:19.154381  1002 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI1209 19:32:19.154392  1002 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI1209 19:32:19.154402  1002 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI1209 19:32:19.154412  1002 net.cpp:226] L3_b1_cbr1_dropout needs backward computation.\nI1209 19:32:19.154422  1002 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI1209 19:32:19.154431  1002 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI1209 19:32:19.154443  1002 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI1209 19:32:19.154453  1002 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI1209 19:32:19.154464  1002 net.cpp:226] L2_b9_relu needs backward computation.\nI1209 19:32:19.154474  1002 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI1209 19:32:19.154485  1002 net.cpp:226] L2_b9_cbr2_dropout needs backward computation.\nI1209 19:32:19.154495  1002 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI1209 19:32:19.154517  1002 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI1209 19:32:19.154531  1002 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI1209 19:32:19.154541  1002 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI1209 19:32:19.154551  1002 net.cpp:226] L2_b9_cbr1_dropout needs backward computation.\nI1209 19:32:19.154562  1002 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI1209 19:32:19.154573  1002 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI1209 19:32:19.154584  1002 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI1209 19:32:19.154595  1002 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI1209 19:32:19.154605  1002 net.cpp:226] L2_b8_relu needs backward computation.\nI1209 19:32:19.154615  1002 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI1209 19:32:19.154626  1002 net.cpp:226] L2_b8_cbr2_dropout needs backward computation.\nI1209 19:32:19.154637  1002 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI1209 19:32:19.154647  1002 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI1209 19:32:19.154659  1002 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI1209 19:32:19.154670  1002 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI1209 19:32:19.154680  1002 net.cpp:226] L2_b8_cbr1_dropout needs backward computation.\nI1209 19:32:19.154690  1002 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI1209 19:32:19.154700  1002 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI1209 19:32:19.154712  1002 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI1209 19:32:19.154723  1002 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI1209 19:32:19.154736  1002 net.cpp:226] L2_b7_relu needs backward computation.\nI1209 19:32:19.154747  1002 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI1209 19:32:19.154757  1002 net.cpp:226] L2_b7_cbr2_dropout needs backward computation.\nI1209 19:32:19.154767  1002 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI1209 19:32:19.154778  1002 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI1209 19:32:19.154790  1002 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI1209 19:32:19.154801  1002 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI1209 19:32:19.154811  1002 net.cpp:226] L2_b7_cbr1_dropout needs backward computation.\nI1209 19:32:19.154822  1002 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI1209 19:32:19.154834  1002 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI1209 19:32:19.154844  1002 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI1209 19:32:19.154856  1002 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI1209 19:32:19.154867  1002 net.cpp:226] L2_b6_relu needs backward computation.\nI1209 19:32:19.154878  1002 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1209 19:32:19.154897  1002 net.cpp:226] L2_b6_cbr2_dropout needs backward computation.\nI1209 19:32:19.154908  1002 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI1209 19:32:19.154919  1002 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI1209 19:32:19.154930  1002 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI1209 19:32:19.154940  1002 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI1209 19:32:19.154952  1002 net.cpp:226] L2_b6_cbr1_dropout needs backward computation.\nI1209 19:32:19.154973  1002 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI1209 19:32:19.154985  1002 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI1209 19:32:19.154995  1002 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI1209 19:32:19.155006  1002 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI1209 19:32:19.155016  1002 net.cpp:226] L2_b5_relu needs backward computation.\nI1209 19:32:19.155026  1002 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1209 19:32:19.155040  1002 net.cpp:226] L2_b5_cbr2_dropout needs backward computation.\nI1209 19:32:19.155059  1002 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI1209 19:32:19.155071  1002 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI1209 19:32:19.155081  1002 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI1209 19:32:19.155092  1002 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI1209 19:32:19.155102  1002 net.cpp:226] L2_b5_cbr1_dropout needs backward computation.\nI1209 19:32:19.155112  1002 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI1209 19:32:19.155123  1002 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI1209 19:32:19.155134  1002 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI1209 19:32:19.155145  1002 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI1209 19:32:19.155156  1002 net.cpp:226] L2_b4_relu needs backward computation.\nI1209 19:32:19.155166  1002 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1209 19:32:19.155179  1002 net.cpp:226] L2_b4_cbr2_dropout needs backward computation.\nI1209 19:32:19.155189  1002 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI1209 19:32:19.155200  1002 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI1209 19:32:19.155210  1002 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI1209 19:32:19.155221  1002 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI1209 19:32:19.155232  1002 net.cpp:226] L2_b4_cbr1_dropout needs backward computation.\nI1209 19:32:19.155242  1002 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI1209 19:32:19.155252  1002 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI1209 19:32:19.155263  1002 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI1209 19:32:19.155274  1002 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI1209 19:32:19.155285  1002 net.cpp:226] L2_b3_relu needs backward computation.\nI1209 19:32:19.155297  1002 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1209 19:32:19.155308  1002 net.cpp:226] L2_b3_cbr2_dropout needs backward computation.\nI1209 19:32:19.155318  1002 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI1209 19:32:19.155329  1002 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI1209 19:32:19.155340  1002 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI1209 19:32:19.155351  1002 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI1209 19:32:19.155362  1002 net.cpp:226] L2_b3_cbr1_dropout needs backward computation.\nI1209 19:32:19.155372  1002 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI1209 19:32:19.155382  1002 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI1209 19:32:19.155393  1002 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI1209 19:32:19.155405  1002 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI1209 19:32:19.155417  1002 net.cpp:226] L2_b2_relu needs backward computation.\nI1209 19:32:19.155427  1002 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1209 19:32:19.155439  1002 net.cpp:226] L2_b2_cbr2_dropout needs backward computation.\nI1209 19:32:19.155450  1002 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI1209 19:32:19.155460  1002 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI1209 19:32:19.155472  1002 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI1209 19:32:19.155483  1002 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI1209 19:32:19.155493  1002 net.cpp:226] L2_b2_cbr1_dropout needs backward computation.\nI1209 19:32:19.155503  1002 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI1209 19:32:19.155513  1002 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI1209 19:32:19.155524  1002 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI1209 19:32:19.155535  1002 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI1209 19:32:19.155545  1002 net.cpp:226] L2_b1_concat0 needs backward computation.\nI1209 19:32:19.155558  1002 net.cpp:228] L2_b1_zeros does not need backward computation.\nI1209 19:32:19.155578  1002 net.cpp:226] L2_b1_relu needs backward computation.\nI1209 19:32:19.155591  1002 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1209 19:32:19.155602  1002 net.cpp:226] L2_b1_pool needs backward computation.\nI1209 19:32:19.155614  1002 net.cpp:226] L2_b1_cbr2_dropout needs backward computation.\nI1209 19:32:19.155627  1002 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI1209 19:32:19.155637  1002 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI1209 19:32:19.155649  1002 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI1209 19:32:19.155660  1002 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI1209 19:32:19.155670  1002 net.cpp:226] L2_b1_cbr1_dropout needs backward computation.\nI1209 19:32:19.155683  1002 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI1209 19:32:19.155694  1002 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI1209 19:32:19.155705  1002 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI1209 19:32:19.155716  1002 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI1209 19:32:19.155727  1002 net.cpp:226] L1_b9_relu needs backward computation.\nI1209 19:32:19.155738  1002 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI1209 19:32:19.155750  1002 net.cpp:226] L1_b9_cbr2_dropout needs backward computation.\nI1209 19:32:19.155761  1002 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI1209 19:32:19.155771  1002 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI1209 19:32:19.155783  1002 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI1209 19:32:19.155794  1002 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI1209 19:32:19.155805  1002 net.cpp:226] L1_b9_cbr1_dropout needs backward computation.\nI1209 19:32:19.155815  1002 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI1209 19:32:19.155825  1002 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI1209 19:32:19.155838  1002 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI1209 19:32:19.155848  1002 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI1209 19:32:19.155861  1002 net.cpp:226] L1_b8_relu needs backward computation.\nI1209 19:32:19.155872  1002 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI1209 19:32:19.155885  1002 net.cpp:226] L1_b8_cbr2_dropout needs backward computation.\nI1209 19:32:19.155895  1002 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI1209 19:32:19.155905  1002 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI1209 19:32:19.155916  1002 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI1209 19:32:19.155927  1002 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI1209 19:32:19.155938  1002 net.cpp:226] L1_b8_cbr1_dropout needs backward computation.\nI1209 19:32:19.155949  1002 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI1209 19:32:19.155968  1002 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI1209 19:32:19.155982  1002 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI1209 19:32:19.155994  1002 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI1209 19:32:19.156005  1002 net.cpp:226] L1_b7_relu needs backward computation.\nI1209 19:32:19.156016  1002 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI1209 19:32:19.156028  1002 net.cpp:226] L1_b7_cbr2_dropout needs backward computation.\nI1209 19:32:19.156039  1002 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI1209 19:32:19.156049  1002 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI1209 19:32:19.156061  1002 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI1209 19:32:19.156074  1002 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI1209 19:32:19.156083  1002 net.cpp:226] L1_b7_cbr1_dropout needs backward computation.\nI1209 19:32:19.156093  1002 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI1209 19:32:19.156105  1002 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI1209 19:32:19.156126  1002 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI1209 19:32:19.156139  1002 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI1209 19:32:19.156150  1002 net.cpp:226] L1_b6_relu needs backward computation.\nI1209 19:32:19.156162  1002 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1209 19:32:19.156174  1002 net.cpp:226] L1_b6_cbr2_dropout needs backward computation.\nI1209 19:32:19.156186  1002 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI1209 19:32:19.156196  1002 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI1209 19:32:19.156208  1002 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI1209 19:32:19.156219  1002 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI1209 19:32:19.156230  1002 net.cpp:226] L1_b6_cbr1_dropout needs backward computation.\nI1209 19:32:19.156240  1002 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI1209 19:32:19.156251  1002 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI1209 19:32:19.156262  1002 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI1209 19:32:19.156275  1002 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI1209 19:32:19.156285  1002 net.cpp:226] L1_b5_relu needs backward computation.\nI1209 19:32:19.156296  1002 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1209 19:32:19.156308  1002 net.cpp:226] L1_b5_cbr2_dropout needs backward computation.\nI1209 19:32:19.156319  1002 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI1209 19:32:19.156330  1002 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI1209 19:32:19.156342  1002 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI1209 19:32:19.156354  1002 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI1209 19:32:19.156366  1002 net.cpp:226] L1_b5_cbr1_dropout needs backward computation.\nI1209 19:32:19.156376  1002 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI1209 19:32:19.156386  1002 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI1209 19:32:19.156399  1002 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI1209 19:32:19.156410  1002 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI1209 19:32:19.156421  1002 net.cpp:226] L1_b4_relu needs backward computation.\nI1209 19:32:19.156432  1002 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1209 19:32:19.156445  1002 net.cpp:226] L1_b4_cbr2_dropout needs backward computation.\nI1209 19:32:19.156456  1002 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI1209 19:32:19.156466  1002 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI1209 19:32:19.156477  1002 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI1209 19:32:19.156489  1002 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI1209 19:32:19.156500  1002 net.cpp:226] L1_b4_cbr1_dropout needs backward computation.\nI1209 19:32:19.156510  1002 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI1209 19:32:19.156520  1002 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI1209 19:32:19.156532  1002 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI1209 19:32:19.156556  1002 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI1209 19:32:19.156570  1002 net.cpp:226] L1_b3_relu needs backward computation.\nI1209 19:32:19.156582  1002 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1209 19:32:19.156594  1002 net.cpp:226] L1_b3_cbr2_dropout needs backward computation.\nI1209 19:32:19.156605  1002 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI1209 19:32:19.156615  1002 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI1209 19:32:19.156628  1002 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI1209 19:32:19.156639  1002 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI1209 19:32:19.156651  1002 net.cpp:226] L1_b3_cbr1_dropout needs backward computation.\nI1209 19:32:19.156661  1002 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI1209 19:32:19.156682  1002 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI1209 19:32:19.156694  1002 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI1209 19:32:19.156707  1002 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI1209 19:32:19.156718  1002 net.cpp:226] L1_b2_relu needs backward computation.\nI1209 19:32:19.156729  1002 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1209 19:32:19.156741  1002 net.cpp:226] L1_b2_cbr2_dropout needs backward computation.\nI1209 19:32:19.156754  1002 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI1209 19:32:19.156764  1002 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI1209 19:32:19.156775  1002 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI1209 19:32:19.156787  1002 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI1209 19:32:19.156798  1002 net.cpp:226] L1_b2_cbr1_dropout needs backward computation.\nI1209 19:32:19.156810  1002 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI1209 19:32:19.156821  1002 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI1209 19:32:19.156832  1002 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI1209 19:32:19.156844  1002 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI1209 19:32:19.156855  1002 net.cpp:226] L1_b1_relu needs backward computation.\nI1209 19:32:19.156867  1002 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1209 19:32:19.156880  1002 net.cpp:226] L1_b1_cbr2_dropout needs backward computation.\nI1209 19:32:19.156891  1002 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI1209 19:32:19.156903  1002 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI1209 19:32:19.156914  1002 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI1209 19:32:19.156925  1002 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI1209 19:32:19.156935  1002 net.cpp:226] L1_b1_cbr1_dropout needs backward computation.\nI1209 19:32:19.156946  1002 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI1209 19:32:19.156965  1002 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI1209 19:32:19.156977  1002 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI1209 19:32:19.156988  1002 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI1209 19:32:19.156999  1002 net.cpp:226] pre_relu needs backward computation.\nI1209 19:32:19.157011  1002 net.cpp:226] pre_scale needs backward computation.\nI1209 19:32:19.157019  1002 net.cpp:226] pre_bn needs backward computation.\nI1209 19:32:19.157030  1002 net.cpp:226] pre_conv needs backward computation.\nI1209 19:32:19.157042  1002 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1209 19:32:19.157055  1002 net.cpp:228] dataLayer does not need backward computation.\nI1209 19:32:19.157064  1002 net.cpp:270] This network produces output accuracy\nI1209 19:32:19.157078  1002 net.cpp:270] This network produces output loss\nI1209 19:32:19.157526  1002 net.cpp:283] Network initialization done.\nI1209 19:32:19.167508  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:19.167562  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:19.167640  1002 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1209 19:32:19.168120  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1209 19:32:19.168146  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI1209 19:32:19.168165  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI1209 19:32:19.168186  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI1209 19:32:19.168210  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI1209 19:32:19.168241  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI1209 19:32:19.168267  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI1209 19:32:19.168287  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI1209 19:32:19.168308  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI1209 19:32:19.168328  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI1209 19:32:19.168349  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI1209 19:32:19.168368  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI1209 19:32:19.168390  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI1209 19:32:19.168407  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI1209 19:32:19.168428  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI1209 19:32:19.168449  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI1209 19:32:19.168469  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI1209 19:32:19.168488  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI1209 19:32:19.168509  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI1209 19:32:19.168527  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI1209 19:32:19.168546  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI1209 19:32:19.168565  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI1209 19:32:19.168591  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI1209 19:32:19.168612  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI1209 19:32:19.168632  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI1209 19:32:19.168653  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI1209 19:32:19.168673  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI1209 19:32:19.168691  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI1209 19:32:19.168712  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI1209 19:32:19.168731  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI1209 19:32:19.168751  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI1209 19:32:19.168771  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI1209 19:32:19.168792  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI1209 19:32:19.168809  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI1209 19:32:19.168831  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI1209 19:32:19.168859  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI1209 19:32:19.168884  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI1209 19:32:19.168902  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI1209 19:32:19.168922  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI1209 19:32:19.168939  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI1209 19:32:19.168972  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI1209 19:32:19.168994  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI1209 19:32:19.169013  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI1209 19:32:19.169033  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI1209 19:32:19.169052  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI1209 19:32:19.169071  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI1209 19:32:19.169091  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI1209 19:32:19.169111  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI1209 19:32:19.169131  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI1209 19:32:19.169152  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI1209 19:32:19.169172  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI1209 19:32:19.169189  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI1209 19:32:19.169209  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI1209 19:32:19.169226  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI1209 19:32:19.169246  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI1209 19:32:19.169265  1002 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI1209 19:32:19.171242  1002 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  dropout_para\nI1209 19:32:19.173387  1002 layer_factory.hpp:77] Creating layer dataLayer\nI1209 19:32:19.173667  1002 net.cpp:100] Creating Layer dataLayer\nI1209 19:32:19.173696  1002 net.cpp:408] dataLayer -> data_top\nI1209 19:32:19.173722  1002 net.cpp:408] dataLayer -> label\nI1209 19:32:19.173744  1002 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1209 19:32:19.250725  1010 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1209 19:32:19.250995  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:19.261754  1002 net.cpp:150] Setting up dataLayer\nI1209 19:32:19.261777  1002 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI1209 19:32:19.261829  1002 net.cpp:157] Top shape: 125 (125)\nI1209 19:32:19.261842  1002 net.cpp:165] Memory required for data: 1536500\nI1209 19:32:19.261853  1002 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1209 19:32:19.261869  1002 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1209 19:32:19.261879  1002 net.cpp:434] label_dataLayer_1_split <- label\nI1209 19:32:19.261898  1002 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1209 19:32:19.261924  1002 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1209 19:32:19.262086  1002 net.cpp:150] Setting up label_dataLayer_1_split\nI1209 19:32:19.262109  1002 net.cpp:157] Top shape: 125 (125)\nI1209 19:32:19.262122  1002 net.cpp:157] Top shape: 125 (125)\nI1209 19:32:19.262135  1002 net.cpp:165] Memory required for data: 1537500\nI1209 19:32:19.262145  1002 layer_factory.hpp:77] Creating layer pre_conv\nI1209 19:32:19.262178  1002 net.cpp:100] Creating Layer pre_conv\nI1209 19:32:19.262195  1002 net.cpp:434] pre_conv <- data_top\nI1209 19:32:19.262219  1002 net.cpp:408] pre_conv -> pre_conv_top\nI1209 19:32:19.262696  1002 net.cpp:150] Setting up pre_conv\nI1209 19:32:19.262718  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.262732  1002 net.cpp:165] Memory required for data: 9729500\nI1209 19:32:19.262763  1002 layer_factory.hpp:77] Creating layer pre_bn\nI1209 19:32:19.262821  1002 net.cpp:100] Creating Layer pre_bn\nI1209 19:32:19.262835  1002 net.cpp:434] pre_bn <- pre_conv_top\nI1209 19:32:19.262856  1002 net.cpp:408] pre_bn -> pre_bn_top\nI1209 19:32:19.263222  1002 net.cpp:150] Setting up pre_bn\nI1209 19:32:19.263242  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.263252  1002 net.cpp:165] Memory required for data: 17921500\nI1209 19:32:19.263279  1002 layer_factory.hpp:77] Creating layer pre_scale\nI1209 19:32:19.263299  1002 net.cpp:100] Creating Layer pre_scale\nI1209 19:32:19.263310  1002 net.cpp:434] pre_scale <- pre_bn_top\nI1209 19:32:19.263331  1002 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI1209 19:32:19.263928  1002 layer_factory.hpp:77] Creating layer pre_scale\nI1209 19:32:19.264138  1002 net.cpp:150] Setting up pre_scale\nI1209 19:32:19.264158  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.264174  1002 net.cpp:165] Memory required for data: 26113500\nI1209 19:32:19.264192  1002 layer_factory.hpp:77] Creating layer pre_relu\nI1209 19:32:19.264207  1002 net.cpp:100] Creating Layer pre_relu\nI1209 19:32:19.264219  1002 net.cpp:434] pre_relu <- pre_bn_top\nI1209 19:32:19.264241  1002 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI1209 19:32:19.264261  1002 net.cpp:150] Setting up pre_relu\nI1209 19:32:19.264276  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.264286  1002 net.cpp:165] Memory required for data: 34305500\nI1209 19:32:19.264294  1002 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI1209 19:32:19.264307  1002 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI1209 19:32:19.264318  1002 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI1209 19:32:19.264338  1002 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI1209 19:32:19.264358  1002 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI1209 19:32:19.264457  1002 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI1209 19:32:19.264477  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.264490  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.264499  1002 net.cpp:165] Memory required for data: 50689500\nI1209 19:32:19.264509  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI1209 19:32:19.264533  1002 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI1209 19:32:19.264545  1002 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI1209 19:32:19.264576  1002 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI1209 19:32:19.265221  1002 net.cpp:150] Setting up L1_b1_cbr1_conv\nI1209 19:32:19.265244  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.265254  1002 net.cpp:165] Memory required for data: 58881500\nI1209 19:32:19.265275  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI1209 19:32:19.265300  1002 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI1209 19:32:19.265312  1002 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI1209 19:32:19.265328  1002 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI1209 19:32:19.265662  1002 net.cpp:150] Setting up L1_b1_cbr1_bn\nI1209 19:32:19.265686  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.265697  1002 net.cpp:165] Memory required for data: 67073500\nI1209 19:32:19.265717  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1209 19:32:19.265734  1002 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI1209 19:32:19.265745  1002 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI1209 19:32:19.265761  1002 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.265859  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1209 19:32:19.266074  1002 net.cpp:150] Setting up L1_b1_cbr1_scale\nI1209 19:32:19.266094  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.266104  1002 net.cpp:165] Memory required for data: 75265500\nI1209 19:32:19.266121  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_dropout\nI1209 19:32:19.266144  1002 net.cpp:100] Creating Layer L1_b1_cbr1_dropout\nI1209 19:32:19.266155  1002 net.cpp:434] L1_b1_cbr1_dropout <- L1_b1_cbr1_bn_top\nI1209 19:32:19.266170  1002 net.cpp:395] L1_b1_cbr1_dropout -> L1_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.266233  1002 net.cpp:150] Setting up L1_b1_cbr1_dropout\nI1209 19:32:19.266252  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.266263  1002 net.cpp:165] Memory required for data: 83457500\nI1209 19:32:19.266273  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI1209 19:32:19.266288  1002 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI1209 19:32:19.266299  1002 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI1209 19:32:19.266319  1002 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.266338  1002 net.cpp:150] Setting up L1_b1_cbr1_relu\nI1209 19:32:19.266352  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.266361  1002 net.cpp:165] Memory required for data: 91649500\nI1209 19:32:19.266371  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI1209 19:32:19.266391  1002 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI1209 19:32:19.266402  1002 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI1209 19:32:19.266418  1002 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI1209 19:32:19.266821  1002 net.cpp:150] Setting up L1_b1_cbr2_conv\nI1209 19:32:19.266841  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.266851  1002 net.cpp:165] Memory required for data: 99841500\nI1209 19:32:19.266870  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI1209 19:32:19.266891  1002 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI1209 19:32:19.266904  1002 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI1209 19:32:19.266919  1002 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI1209 19:32:19.267289  1002 net.cpp:150] Setting up L1_b1_cbr2_bn\nI1209 19:32:19.267308  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.267318  1002 net.cpp:165] Memory required for data: 108033500\nI1209 19:32:19.267356  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1209 19:32:19.267381  1002 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI1209 19:32:19.267397  1002 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI1209 19:32:19.267421  1002 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.267632  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1209 19:32:19.267853  1002 net.cpp:150] Setting up L1_b1_cbr2_scale\nI1209 19:32:19.267873  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.267882  1002 net.cpp:165] Memory required for data: 116225500\nI1209 19:32:19.267899  1002 layer_factory.hpp:77] Creating layer L1_b1_cbr2_dropout\nI1209 19:32:19.267916  1002 net.cpp:100] Creating Layer L1_b1_cbr2_dropout\nI1209 19:32:19.267927  1002 net.cpp:434] L1_b1_cbr2_dropout <- L1_b1_cbr2_bn_top\nI1209 19:32:19.267947  1002 net.cpp:395] L1_b1_cbr2_dropout -> L1_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.268016  1002 net.cpp:150] Setting up L1_b1_cbr2_dropout\nI1209 19:32:19.268036  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.268051  1002 net.cpp:165] Memory required for data: 124417500\nI1209 19:32:19.268061  1002 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1209 19:32:19.268080  1002 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1209 19:32:19.268093  1002 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI1209 19:32:19.268106  1002 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI1209 19:32:19.268126  1002 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1209 19:32:19.268182  1002 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1209 19:32:19.268201  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.268211  1002 net.cpp:165] Memory required for data: 132609500\nI1209 19:32:19.268221  1002 layer_factory.hpp:77] Creating layer L1_b1_relu\nI1209 19:32:19.268234  1002 net.cpp:100] Creating Layer L1_b1_relu\nI1209 19:32:19.268244  1002 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI1209 19:32:19.268259  1002 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI1209 19:32:19.268286  1002 net.cpp:150] Setting up L1_b1_relu\nI1209 19:32:19.268302  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.268311  1002 net.cpp:165] Memory required for data: 140801500\nI1209 19:32:19.268321  1002 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1209 19:32:19.268339  1002 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1209 19:32:19.268349  1002 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI1209 19:32:19.268365  1002 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1209 19:32:19.268388  1002 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1209 19:32:19.268476  1002 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1209 19:32:19.268496  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.268508  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.268517  1002 net.cpp:165] Memory required for data: 157185500\nI1209 19:32:19.268527  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI1209 19:32:19.268548  1002 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI1209 19:32:19.268559  1002 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1209 19:32:19.268582  1002 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI1209 19:32:19.268987  1002 net.cpp:150] Setting up L1_b2_cbr1_conv\nI1209 19:32:19.269006  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.269017  1002 net.cpp:165] Memory required for data: 165377500\nI1209 19:32:19.269035  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI1209 19:32:19.269052  1002 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI1209 19:32:19.269064  1002 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI1209 19:32:19.269085  1002 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI1209 19:32:19.269435  1002 net.cpp:150] Setting up L1_b2_cbr1_bn\nI1209 19:32:19.269456  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.269470  1002 net.cpp:165] Memory required for data: 173569500\nI1209 19:32:19.269492  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1209 19:32:19.269513  1002 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI1209 19:32:19.269526  1002 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI1209 19:32:19.269542  1002 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.269637  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1209 19:32:19.269850  1002 net.cpp:150] Setting up L1_b2_cbr1_scale\nI1209 19:32:19.269870  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.269881  1002 net.cpp:165] Memory required for data: 181761500\nI1209 19:32:19.269897  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_dropout\nI1209 19:32:19.269918  1002 net.cpp:100] Creating Layer L1_b2_cbr1_dropout\nI1209 19:32:19.269930  1002 net.cpp:434] L1_b2_cbr1_dropout <- L1_b2_cbr1_bn_top\nI1209 19:32:19.269945  1002 net.cpp:395] L1_b2_cbr1_dropout -> L1_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.270016  1002 net.cpp:150] Setting up L1_b2_cbr1_dropout\nI1209 19:32:19.270035  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.270045  1002 net.cpp:165] Memory required for data: 189953500\nI1209 19:32:19.270054  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI1209 19:32:19.270067  1002 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI1209 19:32:19.270078  1002 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI1209 19:32:19.270098  1002 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.270117  1002 net.cpp:150] Setting up L1_b2_cbr1_relu\nI1209 19:32:19.270133  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.270141  1002 net.cpp:165] Memory required for data: 198145500\nI1209 19:32:19.270153  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI1209 19:32:19.270174  1002 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI1209 19:32:19.270185  1002 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI1209 19:32:19.270211  1002 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI1209 19:32:19.270694  1002 net.cpp:150] Setting up L1_b2_cbr2_conv\nI1209 19:32:19.270714  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.270725  1002 net.cpp:165] Memory required for data: 206337500\nI1209 19:32:19.270741  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI1209 19:32:19.270766  1002 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI1209 19:32:19.270779  1002 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI1209 19:32:19.270800  1002 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI1209 19:32:19.271169  1002 net.cpp:150] Setting up L1_b2_cbr2_bn\nI1209 19:32:19.271190  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.271200  1002 net.cpp:165] Memory required for data: 214529500\nI1209 19:32:19.271229  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1209 19:32:19.271255  1002 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI1209 19:32:19.271271  1002 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI1209 19:32:19.271296  1002 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.271396  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1209 19:32:19.271618  1002 net.cpp:150] Setting up L1_b2_cbr2_scale\nI1209 19:32:19.271639  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.271649  1002 net.cpp:165] Memory required for data: 222721500\nI1209 19:32:19.271670  1002 layer_factory.hpp:77] Creating layer L1_b2_cbr2_dropout\nI1209 19:32:19.271693  1002 net.cpp:100] Creating Layer L1_b2_cbr2_dropout\nI1209 19:32:19.271705  1002 net.cpp:434] L1_b2_cbr2_dropout <- L1_b2_cbr2_bn_top\nI1209 19:32:19.271724  1002 net.cpp:395] L1_b2_cbr2_dropout -> L1_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.271790  1002 net.cpp:150] Setting up L1_b2_cbr2_dropout\nI1209 19:32:19.271808  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.271831  1002 net.cpp:165] Memory required for data: 230913500\nI1209 19:32:19.271845  1002 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1209 19:32:19.271860  1002 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1209 19:32:19.271872  1002 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI1209 19:32:19.271884  1002 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1209 19:32:19.271908  1002 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1209 19:32:19.271977  1002 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1209 19:32:19.271996  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.272006  1002 net.cpp:165] Memory required for data: 239105500\nI1209 19:32:19.272017  1002 layer_factory.hpp:77] Creating layer L1_b2_relu\nI1209 19:32:19.272033  1002 net.cpp:100] Creating Layer L1_b2_relu\nI1209 19:32:19.272045  1002 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI1209 19:32:19.272059  1002 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI1209 19:32:19.272081  1002 net.cpp:150] Setting up L1_b2_relu\nI1209 19:32:19.272095  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.272104  1002 net.cpp:165] Memory required for data: 247297500\nI1209 19:32:19.272117  1002 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1209 19:32:19.272137  1002 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1209 19:32:19.272148  1002 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI1209 19:32:19.272163  1002 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1209 19:32:19.272182  1002 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1209 19:32:19.272284  1002 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1209 19:32:19.272303  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.272316  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.272325  1002 net.cpp:165] Memory required for data: 263681500\nI1209 19:32:19.272336  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI1209 19:32:19.272354  1002 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI1209 19:32:19.272367  1002 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1209 19:32:19.272392  1002 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI1209 19:32:19.272848  1002 net.cpp:150] Setting up L1_b3_cbr1_conv\nI1209 19:32:19.272868  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.272877  1002 net.cpp:165] Memory required for data: 271873500\nI1209 19:32:19.272898  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI1209 19:32:19.272919  1002 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI1209 19:32:19.272934  1002 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI1209 19:32:19.272964  1002 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI1209 19:32:19.273336  1002 net.cpp:150] Setting up L1_b3_cbr1_bn\nI1209 19:32:19.273355  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.273365  1002 net.cpp:165] Memory required for data: 280065500\nI1209 19:32:19.273389  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1209 19:32:19.273447  1002 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI1209 19:32:19.273463  1002 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI1209 19:32:19.273481  1002 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.273586  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1209 19:32:19.273813  1002 net.cpp:150] Setting up L1_b3_cbr1_scale\nI1209 19:32:19.273833  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.273843  1002 net.cpp:165] Memory required for data: 288257500\nI1209 19:32:19.273859  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_dropout\nI1209 19:32:19.273875  1002 net.cpp:100] Creating Layer L1_b3_cbr1_dropout\nI1209 19:32:19.273886  1002 net.cpp:434] L1_b3_cbr1_dropout <- L1_b3_cbr1_bn_top\nI1209 19:32:19.273918  1002 net.cpp:395] L1_b3_cbr1_dropout -> L1_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.273995  1002 net.cpp:150] Setting up L1_b3_cbr1_dropout\nI1209 19:32:19.274016  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.274027  1002 net.cpp:165] Memory required for data: 296449500\nI1209 19:32:19.274039  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI1209 19:32:19.274054  1002 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI1209 19:32:19.274065  1002 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI1209 19:32:19.274080  1002 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.274106  1002 net.cpp:150] Setting up L1_b3_cbr1_relu\nI1209 19:32:19.274121  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.274130  1002 net.cpp:165] Memory required for data: 304641500\nI1209 19:32:19.274143  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI1209 19:32:19.274163  1002 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI1209 19:32:19.274174  1002 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI1209 19:32:19.274195  1002 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI1209 19:32:19.274632  1002 net.cpp:150] Setting up L1_b3_cbr2_conv\nI1209 19:32:19.274654  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.274664  1002 net.cpp:165] Memory required for data: 312833500\nI1209 19:32:19.274685  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI1209 19:32:19.274709  1002 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI1209 19:32:19.274726  1002 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI1209 19:32:19.274747  1002 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI1209 19:32:19.275112  1002 net.cpp:150] Setting up L1_b3_cbr2_bn\nI1209 19:32:19.275135  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.275144  1002 net.cpp:165] Memory required for data: 321025500\nI1209 19:32:19.275169  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1209 19:32:19.275192  1002 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI1209 19:32:19.275205  1002 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI1209 19:32:19.275228  1002 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.275333  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1209 19:32:19.275593  1002 net.cpp:150] Setting up L1_b3_cbr2_scale\nI1209 19:32:19.275622  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.275634  1002 net.cpp:165] Memory required for data: 329217500\nI1209 19:32:19.275652  1002 layer_factory.hpp:77] Creating layer L1_b3_cbr2_dropout\nI1209 19:32:19.275671  1002 net.cpp:100] Creating Layer L1_b3_cbr2_dropout\nI1209 19:32:19.275683  1002 net.cpp:434] L1_b3_cbr2_dropout <- L1_b3_cbr2_bn_top\nI1209 19:32:19.275696  1002 net.cpp:395] L1_b3_cbr2_dropout -> L1_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.275763  1002 net.cpp:150] Setting up L1_b3_cbr2_dropout\nI1209 19:32:19.275784  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.275794  1002 net.cpp:165] Memory required for data: 337409500\nI1209 19:32:19.275805  1002 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1209 19:32:19.275822  1002 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1209 19:32:19.275833  1002 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI1209 19:32:19.275849  1002 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1209 19:32:19.275872  1002 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1209 19:32:19.275934  1002 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1209 19:32:19.275952  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.275974  1002 net.cpp:165] Memory required for data: 345601500\nI1209 19:32:19.275985  1002 layer_factory.hpp:77] Creating layer L1_b3_relu\nI1209 19:32:19.276016  1002 net.cpp:100] Creating Layer L1_b3_relu\nI1209 19:32:19.276031  1002 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI1209 19:32:19.276046  1002 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI1209 19:32:19.276063  1002 net.cpp:150] Setting up L1_b3_relu\nI1209 19:32:19.276091  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.276101  1002 net.cpp:165] Memory required for data: 353793500\nI1209 19:32:19.276110  1002 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1209 19:32:19.276127  1002 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1209 19:32:19.276139  1002 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI1209 19:32:19.276156  1002 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1209 19:32:19.276176  1002 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1209 19:32:19.276275  1002 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1209 19:32:19.276295  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.276307  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.276319  1002 net.cpp:165] Memory required for data: 370177500\nI1209 19:32:19.276331  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI1209 19:32:19.276360  1002 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI1209 19:32:19.276371  1002 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1209 19:32:19.276398  1002 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI1209 19:32:19.276849  1002 net.cpp:150] Setting up L1_b4_cbr1_conv\nI1209 19:32:19.276870  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.276882  1002 net.cpp:165] Memory required for data: 378369500\nI1209 19:32:19.276901  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI1209 19:32:19.276927  1002 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI1209 19:32:19.276940  1002 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI1209 19:32:19.276963  1002 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI1209 19:32:19.277338  1002 net.cpp:150] Setting up L1_b4_cbr1_bn\nI1209 19:32:19.277356  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.277369  1002 net.cpp:165] Memory required for data: 386561500\nI1209 19:32:19.277390  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1209 19:32:19.277410  1002 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI1209 19:32:19.277420  1002 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI1209 19:32:19.277436  1002 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.277561  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1209 19:32:19.277791  1002 net.cpp:150] Setting up L1_b4_cbr1_scale\nI1209 19:32:19.277812  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.277822  1002 net.cpp:165] Memory required for data: 394753500\nI1209 19:32:19.277842  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_dropout\nI1209 19:32:19.277859  1002 net.cpp:100] Creating Layer L1_b4_cbr1_dropout\nI1209 19:32:19.277870  1002 net.cpp:434] L1_b4_cbr1_dropout <- L1_b4_cbr1_bn_top\nI1209 19:32:19.277894  1002 net.cpp:395] L1_b4_cbr1_dropout -> L1_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.277952  1002 net.cpp:150] Setting up L1_b4_cbr1_dropout\nI1209 19:32:19.277982  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.277997  1002 net.cpp:165] Memory required for data: 402945500\nI1209 19:32:19.278007  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI1209 19:32:19.278021  1002 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI1209 19:32:19.278039  1002 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI1209 19:32:19.278054  1002 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.278076  1002 net.cpp:150] Setting up L1_b4_cbr1_relu\nI1209 19:32:19.278090  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.278100  1002 net.cpp:165] Memory required for data: 411137500\nI1209 19:32:19.278112  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI1209 19:32:19.278132  1002 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI1209 19:32:19.278144  1002 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI1209 19:32:19.278187  1002 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI1209 19:32:19.278653  1002 net.cpp:150] Setting up L1_b4_cbr2_conv\nI1209 19:32:19.278673  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.278687  1002 net.cpp:165] Memory required for data: 419329500\nI1209 19:32:19.278704  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI1209 19:32:19.278724  1002 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI1209 19:32:19.278738  1002 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI1209 19:32:19.278753  1002 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI1209 19:32:19.279145  1002 net.cpp:150] Setting up L1_b4_cbr2_bn\nI1209 19:32:19.279165  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.279175  1002 net.cpp:165] Memory required for data: 427521500\nI1209 19:32:19.279204  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1209 19:32:19.279222  1002 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI1209 19:32:19.279233  1002 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI1209 19:32:19.279255  1002 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.279363  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1209 19:32:19.279613  1002 net.cpp:150] Setting up L1_b4_cbr2_scale\nI1209 19:32:19.279633  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.279642  1002 net.cpp:165] Memory required for data: 435713500\nI1209 19:32:19.279660  1002 layer_factory.hpp:77] Creating layer L1_b4_cbr2_dropout\nI1209 19:32:19.279688  1002 net.cpp:100] Creating Layer L1_b4_cbr2_dropout\nI1209 19:32:19.279706  1002 net.cpp:434] L1_b4_cbr2_dropout <- L1_b4_cbr2_bn_top\nI1209 19:32:19.279721  1002 net.cpp:395] L1_b4_cbr2_dropout -> L1_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.279788  1002 net.cpp:150] Setting up L1_b4_cbr2_dropout\nI1209 19:32:19.279808  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.279817  1002 net.cpp:165] Memory required for data: 443905500\nI1209 19:32:19.279827  1002 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1209 19:32:19.279842  1002 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1209 19:32:19.279857  1002 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI1209 19:32:19.279870  1002 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1209 19:32:19.279903  1002 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1209 19:32:19.279974  1002 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1209 19:32:19.279992  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.280001  1002 net.cpp:165] Memory required for data: 452097500\nI1209 19:32:19.280011  1002 layer_factory.hpp:77] Creating layer L1_b4_relu\nI1209 19:32:19.280025  1002 net.cpp:100] Creating Layer L1_b4_relu\nI1209 19:32:19.280035  1002 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI1209 19:32:19.280055  1002 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI1209 19:32:19.280073  1002 net.cpp:150] Setting up L1_b4_relu\nI1209 19:32:19.280087  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.280097  1002 net.cpp:165] Memory required for data: 460289500\nI1209 19:32:19.280107  1002 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1209 19:32:19.280120  1002 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1209 19:32:19.280130  1002 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI1209 19:32:19.280145  1002 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1209 19:32:19.280164  1002 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1209 19:32:19.280247  1002 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1209 19:32:19.280267  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.280278  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.280287  1002 net.cpp:165] Memory required for data: 476673500\nI1209 19:32:19.280297  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI1209 19:32:19.280328  1002 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI1209 19:32:19.280340  1002 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1209 19:32:19.280362  1002 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI1209 19:32:19.280786  1002 net.cpp:150] Setting up L1_b5_cbr1_conv\nI1209 19:32:19.280805  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.280815  1002 net.cpp:165] Memory required for data: 484865500\nI1209 19:32:19.280860  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI1209 19:32:19.280879  1002 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI1209 19:32:19.280892  1002 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI1209 19:32:19.280913  1002 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI1209 19:32:19.281239  1002 net.cpp:150] Setting up L1_b5_cbr1_bn\nI1209 19:32:19.281258  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.281268  1002 net.cpp:165] Memory required for data: 493057500\nI1209 19:32:19.281289  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1209 19:32:19.281312  1002 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI1209 19:32:19.281324  1002 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI1209 19:32:19.281340  1002 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.281441  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1209 19:32:19.281651  1002 net.cpp:150] Setting up L1_b5_cbr1_scale\nI1209 19:32:19.281671  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.281679  1002 net.cpp:165] Memory required for data: 501249500\nI1209 19:32:19.281697  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_dropout\nI1209 19:32:19.281713  1002 net.cpp:100] Creating Layer L1_b5_cbr1_dropout\nI1209 19:32:19.281725  1002 net.cpp:434] L1_b5_cbr1_dropout <- L1_b5_cbr1_bn_top\nI1209 19:32:19.281746  1002 net.cpp:395] L1_b5_cbr1_dropout -> L1_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.281803  1002 net.cpp:150] Setting up L1_b5_cbr1_dropout\nI1209 19:32:19.281826  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.281836  1002 net.cpp:165] Memory required for data: 509441500\nI1209 19:32:19.281847  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI1209 19:32:19.281860  1002 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI1209 19:32:19.281872  1002 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI1209 19:32:19.281885  1002 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.281905  1002 net.cpp:150] Setting up L1_b5_cbr1_relu\nI1209 19:32:19.281919  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.281929  1002 net.cpp:165] Memory required for data: 517633500\nI1209 19:32:19.281939  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI1209 19:32:19.281973  1002 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI1209 19:32:19.281988  1002 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI1209 19:32:19.282006  1002 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI1209 19:32:19.282419  1002 net.cpp:150] Setting up L1_b5_cbr2_conv\nI1209 19:32:19.282438  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.282449  1002 net.cpp:165] Memory required for data: 525825500\nI1209 19:32:19.282465  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI1209 19:32:19.282486  1002 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI1209 19:32:19.282500  1002 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI1209 19:32:19.282521  1002 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI1209 19:32:19.282853  1002 net.cpp:150] Setting up L1_b5_cbr2_bn\nI1209 19:32:19.282872  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.282882  1002 net.cpp:165] Memory required for data: 534017500\nI1209 19:32:19.282902  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1209 19:32:19.282918  1002 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI1209 19:32:19.282929  1002 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI1209 19:32:19.282950  1002 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.283077  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1209 19:32:19.283290  1002 net.cpp:150] Setting up L1_b5_cbr2_scale\nI1209 19:32:19.283315  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.283325  1002 net.cpp:165] Memory required for data: 542209500\nI1209 19:32:19.283344  1002 layer_factory.hpp:77] Creating layer L1_b5_cbr2_dropout\nI1209 19:32:19.283360  1002 net.cpp:100] Creating Layer L1_b5_cbr2_dropout\nI1209 19:32:19.283372  1002 net.cpp:434] L1_b5_cbr2_dropout <- L1_b5_cbr2_bn_top\nI1209 19:32:19.283386  1002 net.cpp:395] L1_b5_cbr2_dropout -> L1_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.283447  1002 net.cpp:150] Setting up L1_b5_cbr2_dropout\nI1209 19:32:19.283465  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.283475  1002 net.cpp:165] Memory required for data: 550401500\nI1209 19:32:19.283485  1002 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1209 19:32:19.283512  1002 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1209 19:32:19.283525  1002 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI1209 19:32:19.283540  1002 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1209 19:32:19.283558  1002 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1209 19:32:19.283617  1002 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1209 19:32:19.283634  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.283644  1002 net.cpp:165] Memory required for data: 558593500\nI1209 19:32:19.283655  1002 layer_factory.hpp:77] Creating layer L1_b5_relu\nI1209 19:32:19.283670  1002 net.cpp:100] Creating Layer L1_b5_relu\nI1209 19:32:19.283682  1002 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI1209 19:32:19.283696  1002 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI1209 19:32:19.283715  1002 net.cpp:150] Setting up L1_b5_relu\nI1209 19:32:19.283730  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.283740  1002 net.cpp:165] Memory required for data: 566785500\nI1209 19:32:19.283749  1002 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1209 19:32:19.283768  1002 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1209 19:32:19.283779  1002 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI1209 19:32:19.283795  1002 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1209 19:32:19.283814  1002 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1209 19:32:19.283901  1002 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1209 19:32:19.283922  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.283936  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.283943  1002 net.cpp:165] Memory required for data: 583169500\nI1209 19:32:19.283954  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI1209 19:32:19.283988  1002 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI1209 19:32:19.284001  1002 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1209 19:32:19.284024  1002 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI1209 19:32:19.284430  1002 net.cpp:150] Setting up L1_b6_cbr1_conv\nI1209 19:32:19.284449  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.284459  1002 net.cpp:165] Memory required for data: 591361500\nI1209 19:32:19.284476  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI1209 19:32:19.284492  1002 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI1209 19:32:19.284505  1002 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI1209 19:32:19.284521  1002 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI1209 19:32:19.284870  1002 net.cpp:150] Setting up L1_b6_cbr1_bn\nI1209 19:32:19.284889  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.284899  1002 net.cpp:165] Memory required for data: 599553500\nI1209 19:32:19.284919  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1209 19:32:19.284955  1002 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI1209 19:32:19.284976  1002 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI1209 19:32:19.284994  1002 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.285104  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1209 19:32:19.285326  1002 net.cpp:150] Setting up L1_b6_cbr1_scale\nI1209 19:32:19.285346  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.285356  1002 net.cpp:165] Memory required for data: 607745500\nI1209 19:32:19.285372  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_dropout\nI1209 19:32:19.285389  1002 net.cpp:100] Creating Layer L1_b6_cbr1_dropout\nI1209 19:32:19.285401  1002 net.cpp:434] L1_b6_cbr1_dropout <- L1_b6_cbr1_bn_top\nI1209 19:32:19.285420  1002 net.cpp:395] L1_b6_cbr1_dropout -> L1_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.285478  1002 net.cpp:150] Setting up L1_b6_cbr1_dropout\nI1209 19:32:19.285502  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.285513  1002 net.cpp:165] Memory required for data: 615937500\nI1209 19:32:19.285523  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI1209 19:32:19.285537  1002 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI1209 19:32:19.285548  1002 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI1209 19:32:19.285562  1002 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.285581  1002 net.cpp:150] Setting up L1_b6_cbr1_relu\nI1209 19:32:19.285595  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.285604  1002 net.cpp:165] Memory required for data: 624129500\nI1209 19:32:19.285614  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI1209 19:32:19.285640  1002 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI1209 19:32:19.285652  1002 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI1209 19:32:19.285670  1002 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI1209 19:32:19.286097  1002 net.cpp:150] Setting up L1_b6_cbr2_conv\nI1209 19:32:19.286116  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.286126  1002 net.cpp:165] Memory required for data: 632321500\nI1209 19:32:19.286144  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI1209 19:32:19.286166  1002 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI1209 19:32:19.286180  1002 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI1209 19:32:19.286201  1002 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI1209 19:32:19.286537  1002 net.cpp:150] Setting up L1_b6_cbr2_bn\nI1209 19:32:19.286556  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.286566  1002 net.cpp:165] Memory required for data: 640513500\nI1209 19:32:19.286586  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1209 19:32:19.286602  1002 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI1209 19:32:19.286613  1002 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI1209 19:32:19.286628  1002 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.286734  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1209 19:32:19.286942  1002 net.cpp:150] Setting up L1_b6_cbr2_scale\nI1209 19:32:19.286967  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.286978  1002 net.cpp:165] Memory required for data: 648705500\nI1209 19:32:19.286995  1002 layer_factory.hpp:77] Creating layer L1_b6_cbr2_dropout\nI1209 19:32:19.287019  1002 net.cpp:100] Creating Layer L1_b6_cbr2_dropout\nI1209 19:32:19.287031  1002 net.cpp:434] L1_b6_cbr2_dropout <- L1_b6_cbr2_bn_top\nI1209 19:32:19.287046  1002 net.cpp:395] L1_b6_cbr2_dropout -> L1_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.287102  1002 net.cpp:150] Setting up L1_b6_cbr2_dropout\nI1209 19:32:19.287128  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.287138  1002 net.cpp:165] Memory required for data: 656897500\nI1209 19:32:19.287149  1002 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1209 19:32:19.287165  1002 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1209 19:32:19.287178  1002 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI1209 19:32:19.287200  1002 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1209 19:32:19.287223  1002 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1209 19:32:19.287283  1002 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1209 19:32:19.287303  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.287313  1002 net.cpp:165] Memory required for data: 665089500\nI1209 19:32:19.287324  1002 layer_factory.hpp:77] Creating layer L1_b6_relu\nI1209 19:32:19.287346  1002 net.cpp:100] Creating Layer L1_b6_relu\nI1209 19:32:19.287359  1002 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI1209 19:32:19.287372  1002 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI1209 19:32:19.287391  1002 net.cpp:150] Setting up L1_b6_relu\nI1209 19:32:19.287406  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.287415  1002 net.cpp:165] Memory required for data: 673281500\nI1209 19:32:19.287427  1002 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1209 19:32:19.287442  1002 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1209 19:32:19.287453  1002 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI1209 19:32:19.287468  1002 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1209 19:32:19.287487  1002 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1209 19:32:19.287577  1002 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1209 19:32:19.287595  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.287608  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.287618  1002 net.cpp:165] Memory required for data: 689665500\nI1209 19:32:19.287629  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI1209 19:32:19.287655  1002 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI1209 19:32:19.287668  1002 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1209 19:32:19.287693  1002 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI1209 19:32:19.288142  1002 net.cpp:150] Setting up L1_b7_cbr1_conv\nI1209 19:32:19.288162  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.288173  1002 net.cpp:165] Memory required for data: 697857500\nI1209 19:32:19.288190  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI1209 19:32:19.288215  1002 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI1209 19:32:19.288228  1002 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI1209 19:32:19.288244  1002 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI1209 19:32:19.288584  1002 net.cpp:150] Setting up L1_b7_cbr1_bn\nI1209 19:32:19.288604  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.288614  1002 net.cpp:165] Memory required for data: 706049500\nI1209 19:32:19.288635  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1209 19:32:19.288652  1002 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI1209 19:32:19.288664  1002 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI1209 19:32:19.288678  1002 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.288785  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1209 19:32:19.289002  1002 net.cpp:150] Setting up L1_b7_cbr1_scale\nI1209 19:32:19.289021  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.289031  1002 net.cpp:165] Memory required for data: 714241500\nI1209 19:32:19.289049  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_dropout\nI1209 19:32:19.289067  1002 net.cpp:100] Creating Layer L1_b7_cbr1_dropout\nI1209 19:32:19.289079  1002 net.cpp:434] L1_b7_cbr1_dropout <- L1_b7_cbr1_bn_top\nI1209 19:32:19.289100  1002 net.cpp:395] L1_b7_cbr1_dropout -> L1_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.289158  1002 net.cpp:150] Setting up L1_b7_cbr1_dropout\nI1209 19:32:19.289183  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.289193  1002 net.cpp:165] Memory required for data: 722433500\nI1209 19:32:19.289212  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI1209 19:32:19.289228  1002 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI1209 19:32:19.289239  1002 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI1209 19:32:19.289254  1002 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.289273  1002 net.cpp:150] Setting up L1_b7_cbr1_relu\nI1209 19:32:19.289288  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.289296  1002 net.cpp:165] Memory required for data: 730625500\nI1209 19:32:19.289306  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI1209 19:32:19.289328  1002 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI1209 19:32:19.289340  1002 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI1209 19:32:19.289364  1002 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI1209 19:32:19.289777  1002 net.cpp:150] Setting up L1_b7_cbr2_conv\nI1209 19:32:19.289798  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.289808  1002 net.cpp:165] Memory required for data: 738817500\nI1209 19:32:19.289824  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI1209 19:32:19.289844  1002 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI1209 19:32:19.289855  1002 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI1209 19:32:19.289877  1002 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI1209 19:32:19.290225  1002 net.cpp:150] Setting up L1_b7_cbr2_bn\nI1209 19:32:19.290243  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.290253  1002 net.cpp:165] Memory required for data: 747009500\nI1209 19:32:19.290274  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1209 19:32:19.290298  1002 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI1209 19:32:19.290311  1002 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI1209 19:32:19.290326  1002 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.290426  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1209 19:32:19.290640  1002 net.cpp:150] Setting up L1_b7_cbr2_scale\nI1209 19:32:19.290660  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.290670  1002 net.cpp:165] Memory required for data: 755201500\nI1209 19:32:19.290688  1002 layer_factory.hpp:77] Creating layer L1_b7_cbr2_dropout\nI1209 19:32:19.290714  1002 net.cpp:100] Creating Layer L1_b7_cbr2_dropout\nI1209 19:32:19.290727  1002 net.cpp:434] L1_b7_cbr2_dropout <- L1_b7_cbr2_bn_top\nI1209 19:32:19.290743  1002 net.cpp:395] L1_b7_cbr2_dropout -> L1_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.290807  1002 net.cpp:150] Setting up L1_b7_cbr2_dropout\nI1209 19:32:19.290824  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.290834  1002 net.cpp:165] Memory required for data: 763393500\nI1209 19:32:19.290844  1002 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI1209 19:32:19.290863  1002 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI1209 19:32:19.290874  1002 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI1209 19:32:19.290886  1002 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1209 19:32:19.290910  1002 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI1209 19:32:19.290978  1002 net.cpp:150] Setting up L1_b7_sum_eltwise\nI1209 19:32:19.291000  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.291010  1002 net.cpp:165] Memory required for data: 771585500\nI1209 19:32:19.291020  1002 layer_factory.hpp:77] Creating layer L1_b7_relu\nI1209 19:32:19.291038  1002 net.cpp:100] Creating Layer L1_b7_relu\nI1209 19:32:19.291049  1002 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI1209 19:32:19.291062  1002 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI1209 19:32:19.291081  1002 net.cpp:150] Setting up L1_b7_relu\nI1209 19:32:19.291096  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.291105  1002 net.cpp:165] Memory required for data: 779777500\nI1209 19:32:19.291115  1002 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1209 19:32:19.291137  1002 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1209 19:32:19.291159  1002 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI1209 19:32:19.291175  1002 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1209 19:32:19.291195  1002 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1209 19:32:19.291287  1002 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1209 19:32:19.291309  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.291323  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.291333  1002 net.cpp:165] Memory required for data: 796161500\nI1209 19:32:19.291343  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI1209 19:32:19.291365  1002 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI1209 19:32:19.291378  1002 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1209 19:32:19.291400  1002 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI1209 19:32:19.291807  1002 net.cpp:150] Setting up L1_b8_cbr1_conv\nI1209 19:32:19.291826  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.291836  1002 net.cpp:165] Memory required for data: 804353500\nI1209 19:32:19.291856  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI1209 19:32:19.291872  1002 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI1209 19:32:19.291884  1002 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI1209 19:32:19.291900  1002 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI1209 19:32:19.292246  1002 net.cpp:150] Setting up L1_b8_cbr1_bn\nI1209 19:32:19.292265  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.292275  1002 net.cpp:165] Memory required for data: 812545500\nI1209 19:32:19.292296  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1209 19:32:19.292323  1002 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI1209 19:32:19.292336  1002 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI1209 19:32:19.292352  1002 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.292459  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1209 19:32:19.292672  1002 net.cpp:150] Setting up L1_b8_cbr1_scale\nI1209 19:32:19.292691  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.292701  1002 net.cpp:165] Memory required for data: 820737500\nI1209 19:32:19.292718  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_dropout\nI1209 19:32:19.292734  1002 net.cpp:100] Creating Layer L1_b8_cbr1_dropout\nI1209 19:32:19.292747  1002 net.cpp:434] L1_b8_cbr1_dropout <- L1_b8_cbr1_bn_top\nI1209 19:32:19.292767  1002 net.cpp:395] L1_b8_cbr1_dropout -> L1_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.292825  1002 net.cpp:150] Setting up L1_b8_cbr1_dropout\nI1209 19:32:19.292850  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.292860  1002 net.cpp:165] Memory required for data: 828929500\nI1209 19:32:19.292870  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI1209 19:32:19.292886  1002 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI1209 19:32:19.292896  1002 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI1209 19:32:19.292912  1002 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.292930  1002 net.cpp:150] Setting up L1_b8_cbr1_relu\nI1209 19:32:19.292945  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.292955  1002 net.cpp:165] Memory required for data: 837121500\nI1209 19:32:19.292973  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI1209 19:32:19.292999  1002 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI1209 19:32:19.293014  1002 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI1209 19:32:19.293030  1002 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI1209 19:32:19.293450  1002 net.cpp:150] Setting up L1_b8_cbr2_conv\nI1209 19:32:19.293470  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.293481  1002 net.cpp:165] Memory required for data: 845313500\nI1209 19:32:19.293509  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI1209 19:32:19.293534  1002 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI1209 19:32:19.293547  1002 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI1209 19:32:19.293570  1002 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI1209 19:32:19.293910  1002 net.cpp:150] Setting up L1_b8_cbr2_bn\nI1209 19:32:19.293929  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.293939  1002 net.cpp:165] Memory required for data: 853505500\nI1209 19:32:19.293968  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1209 19:32:19.293985  1002 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI1209 19:32:19.293998  1002 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI1209 19:32:19.294018  1002 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.294121  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1209 19:32:19.294332  1002 net.cpp:150] Setting up L1_b8_cbr2_scale\nI1209 19:32:19.294356  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.294368  1002 net.cpp:165] Memory required for data: 861697500\nI1209 19:32:19.294387  1002 layer_factory.hpp:77] Creating layer L1_b8_cbr2_dropout\nI1209 19:32:19.294405  1002 net.cpp:100] Creating Layer L1_b8_cbr2_dropout\nI1209 19:32:19.294416  1002 net.cpp:434] L1_b8_cbr2_dropout <- L1_b8_cbr2_bn_top\nI1209 19:32:19.294430  1002 net.cpp:395] L1_b8_cbr2_dropout -> L1_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.294494  1002 net.cpp:150] Setting up L1_b8_cbr2_dropout\nI1209 19:32:19.294512  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.294522  1002 net.cpp:165] Memory required for data: 869889500\nI1209 19:32:19.294531  1002 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI1209 19:32:19.294550  1002 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI1209 19:32:19.294562  1002 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI1209 19:32:19.294580  1002 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1209 19:32:19.294596  1002 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI1209 19:32:19.294656  1002 net.cpp:150] Setting up L1_b8_sum_eltwise\nI1209 19:32:19.294674  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.294684  1002 net.cpp:165] Memory required for data: 878081500\nI1209 19:32:19.294694  1002 layer_factory.hpp:77] Creating layer L1_b8_relu\nI1209 19:32:19.294718  1002 net.cpp:100] Creating Layer L1_b8_relu\nI1209 19:32:19.294730  1002 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI1209 19:32:19.294745  1002 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI1209 19:32:19.294764  1002 net.cpp:150] Setting up L1_b8_relu\nI1209 19:32:19.294777  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.294786  1002 net.cpp:165] Memory required for data: 886273500\nI1209 19:32:19.294795  1002 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1209 19:32:19.294811  1002 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1209 19:32:19.294822  1002 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI1209 19:32:19.294837  1002 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1209 19:32:19.294857  1002 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1209 19:32:19.294947  1002 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1209 19:32:19.294973  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.294986  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.294996  1002 net.cpp:165] Memory required for data: 902657500\nI1209 19:32:19.295006  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI1209 19:32:19.295034  1002 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI1209 19:32:19.295048  1002 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1209 19:32:19.295073  1002 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI1209 19:32:19.295528  1002 net.cpp:150] Setting up L1_b9_cbr1_conv\nI1209 19:32:19.295554  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.295565  1002 net.cpp:165] Memory required for data: 910849500\nI1209 19:32:19.295583  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI1209 19:32:19.295603  1002 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI1209 19:32:19.295614  1002 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI1209 19:32:19.295629  1002 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI1209 19:32:19.295974  1002 net.cpp:150] Setting up L1_b9_cbr1_bn\nI1209 19:32:19.295994  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.296003  1002 net.cpp:165] Memory required for data: 919041500\nI1209 19:32:19.296026  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1209 19:32:19.296052  1002 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI1209 19:32:19.296066  1002 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI1209 19:32:19.296082  1002 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.296188  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1209 19:32:19.296401  1002 net.cpp:150] Setting up L1_b9_cbr1_scale\nI1209 19:32:19.296419  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.296429  1002 net.cpp:165] Memory required for data: 927233500\nI1209 19:32:19.296448  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_dropout\nI1209 19:32:19.296468  1002 net.cpp:100] Creating Layer L1_b9_cbr1_dropout\nI1209 19:32:19.296478  1002 net.cpp:434] L1_b9_cbr1_dropout <- L1_b9_cbr1_bn_top\nI1209 19:32:19.296499  1002 net.cpp:395] L1_b9_cbr1_dropout -> L1_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.296556  1002 net.cpp:150] Setting up L1_b9_cbr1_dropout\nI1209 19:32:19.296581  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.296591  1002 net.cpp:165] Memory required for data: 935425500\nI1209 19:32:19.296602  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI1209 19:32:19.296617  1002 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI1209 19:32:19.296627  1002 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI1209 19:32:19.296641  1002 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.296659  1002 net.cpp:150] Setting up L1_b9_cbr1_relu\nI1209 19:32:19.296675  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.296684  1002 net.cpp:165] Memory required for data: 943617500\nI1209 19:32:19.296694  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI1209 19:32:19.296720  1002 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI1209 19:32:19.296735  1002 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI1209 19:32:19.296752  1002 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI1209 19:32:19.297183  1002 net.cpp:150] Setting up L1_b9_cbr2_conv\nI1209 19:32:19.297204  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.297214  1002 net.cpp:165] Memory required for data: 951809500\nI1209 19:32:19.297231  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI1209 19:32:19.297257  1002 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI1209 19:32:19.297269  1002 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI1209 19:32:19.297291  1002 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI1209 19:32:19.297619  1002 net.cpp:150] Setting up L1_b9_cbr2_bn\nI1209 19:32:19.297639  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.297649  1002 net.cpp:165] Memory required for data: 960001500\nI1209 19:32:19.297705  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1209 19:32:19.297729  1002 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI1209 19:32:19.297742  1002 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI1209 19:32:19.297758  1002 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.297853  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1209 19:32:19.298076  1002 net.cpp:150] Setting up L1_b9_cbr2_scale\nI1209 19:32:19.298095  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.298105  1002 net.cpp:165] Memory required for data: 968193500\nI1209 19:32:19.298132  1002 layer_factory.hpp:77] Creating layer L1_b9_cbr2_dropout\nI1209 19:32:19.298156  1002 net.cpp:100] Creating Layer L1_b9_cbr2_dropout\nI1209 19:32:19.298167  1002 net.cpp:434] L1_b9_cbr2_dropout <- L1_b9_cbr2_bn_top\nI1209 19:32:19.298182  1002 net.cpp:395] L1_b9_cbr2_dropout -> L1_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.298239  1002 net.cpp:150] Setting up L1_b9_cbr2_dropout\nI1209 19:32:19.298257  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.298267  1002 net.cpp:165] Memory required for data: 976385500\nI1209 19:32:19.298276  1002 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI1209 19:32:19.298298  1002 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI1209 19:32:19.298310  1002 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI1209 19:32:19.298323  1002 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1209 19:32:19.298337  1002 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI1209 19:32:19.298403  1002 net.cpp:150] Setting up L1_b9_sum_eltwise\nI1209 19:32:19.298420  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.298429  1002 net.cpp:165] Memory required for data: 984577500\nI1209 19:32:19.298439  1002 layer_factory.hpp:77] Creating layer L1_b9_relu\nI1209 19:32:19.298451  1002 net.cpp:100] Creating Layer L1_b9_relu\nI1209 19:32:19.298465  1002 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI1209 19:32:19.298483  1002 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI1209 19:32:19.298501  1002 net.cpp:150] Setting up L1_b9_relu\nI1209 19:32:19.298516  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.298526  1002 net.cpp:165] Memory required for data: 992769500\nI1209 19:32:19.298533  1002 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1209 19:32:19.298547  1002 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1209 19:32:19.298557  1002 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI1209 19:32:19.298573  1002 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1209 19:32:19.298590  1002 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1209 19:32:19.298678  1002 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1209 19:32:19.298696  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.298709  1002 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1209 19:32:19.298719  1002 net.cpp:165] Memory required for data: 1009153500\nI1209 19:32:19.298729  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI1209 19:32:19.298749  1002 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI1209 19:32:19.298761  1002 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1209 19:32:19.298784  1002 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI1209 19:32:19.299230  1002 net.cpp:150] Setting up L2_b1_cbr1_conv\nI1209 19:32:19.299252  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.299262  1002 net.cpp:165] Memory required for data: 1011201500\nI1209 19:32:19.299278  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI1209 19:32:19.299301  1002 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI1209 19:32:19.299314  1002 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI1209 19:32:19.299329  1002 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI1209 19:32:19.299676  1002 net.cpp:150] Setting up L2_b1_cbr1_bn\nI1209 19:32:19.299698  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.299708  1002 net.cpp:165] Memory required for data: 1013249500\nI1209 19:32:19.299729  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1209 19:32:19.299751  1002 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI1209 19:32:19.299764  1002 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI1209 19:32:19.299780  1002 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.299882  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1209 19:32:19.300120  1002 net.cpp:150] Setting up L2_b1_cbr1_scale\nI1209 19:32:19.300140  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.300149  1002 net.cpp:165] Memory required for data: 1015297500\nI1209 19:32:19.300166  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_dropout\nI1209 19:32:19.300189  1002 net.cpp:100] Creating Layer L2_b1_cbr1_dropout\nI1209 19:32:19.300200  1002 net.cpp:434] L2_b1_cbr1_dropout <- L2_b1_cbr1_bn_top\nI1209 19:32:19.300220  1002 net.cpp:395] L2_b1_cbr1_dropout -> L2_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.300269  1002 net.cpp:150] Setting up L2_b1_cbr1_dropout\nI1209 19:32:19.300287  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.300297  1002 net.cpp:165] Memory required for data: 1017345500\nI1209 19:32:19.300307  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI1209 19:32:19.300320  1002 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI1209 19:32:19.300330  1002 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI1209 19:32:19.300350  1002 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.300370  1002 net.cpp:150] Setting up L2_b1_cbr1_relu\nI1209 19:32:19.300385  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.300395  1002 net.cpp:165] Memory required for data: 1019393500\nI1209 19:32:19.300405  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI1209 19:32:19.300423  1002 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI1209 19:32:19.300436  1002 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI1209 19:32:19.300452  1002 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI1209 19:32:19.300869  1002 net.cpp:150] Setting up L2_b1_cbr2_conv\nI1209 19:32:19.300889  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.300899  1002 net.cpp:165] Memory required for data: 1021441500\nI1209 19:32:19.300916  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI1209 19:32:19.300937  1002 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI1209 19:32:19.300951  1002 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI1209 19:32:19.300972  1002 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI1209 19:32:19.301311  1002 net.cpp:150] Setting up L2_b1_cbr2_bn\nI1209 19:32:19.301329  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.301338  1002 net.cpp:165] Memory required for data: 1023489500\nI1209 19:32:19.301359  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1209 19:32:19.301375  1002 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI1209 19:32:19.301388  1002 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI1209 19:32:19.301406  1002 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.301507  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1209 19:32:19.301717  1002 net.cpp:150] Setting up L2_b1_cbr2_scale\nI1209 19:32:19.301736  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.301746  1002 net.cpp:165] Memory required for data: 1025537500\nI1209 19:32:19.301764  1002 layer_factory.hpp:77] Creating layer L2_b1_cbr2_dropout\nI1209 19:32:19.301779  1002 net.cpp:100] Creating Layer L2_b1_cbr2_dropout\nI1209 19:32:19.301791  1002 net.cpp:434] L2_b1_cbr2_dropout <- L2_b1_cbr2_bn_top\nI1209 19:32:19.301811  1002 net.cpp:395] L2_b1_cbr2_dropout -> L2_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.301859  1002 net.cpp:150] Setting up L2_b1_cbr2_dropout\nI1209 19:32:19.301877  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.301885  1002 net.cpp:165] Memory required for data: 1027585500\nI1209 19:32:19.301895  1002 layer_factory.hpp:77] Creating layer L2_b1_pool\nI1209 19:32:19.301918  1002 net.cpp:100] Creating Layer L2_b1_pool\nI1209 19:32:19.301929  1002 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1209 19:32:19.301945  1002 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI1209 19:32:19.302017  1002 net.cpp:150] Setting up L2_b1_pool\nI1209 19:32:19.302037  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.302045  1002 net.cpp:165] Memory required for data: 1029633500\nI1209 19:32:19.302064  1002 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1209 19:32:19.302080  1002 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1209 19:32:19.302093  1002 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI1209 19:32:19.302105  1002 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI1209 19:32:19.302120  1002 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1209 19:32:19.302187  1002 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1209 19:32:19.302206  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.302214  1002 net.cpp:165] Memory required for data: 1031681500\nI1209 19:32:19.302224  1002 layer_factory.hpp:77] Creating layer L2_b1_relu\nI1209 19:32:19.302237  1002 net.cpp:100] Creating Layer L2_b1_relu\nI1209 19:32:19.302249  1002 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI1209 19:32:19.302263  1002 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI1209 19:32:19.302280  1002 net.cpp:150] Setting up L2_b1_relu\nI1209 19:32:19.302295  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.302304  1002 net.cpp:165] Memory required for data: 1033729500\nI1209 19:32:19.302314  1002 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI1209 19:32:19.302330  1002 net.cpp:100] Creating Layer L2_b1_zeros\nI1209 19:32:19.302352  1002 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI1209 19:32:19.305676  1002 net.cpp:150] Setting up L2_b1_zeros\nI1209 19:32:19.305699  1002 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1209 19:32:19.305717  1002 net.cpp:165] Memory required for data: 1035777500\nI1209 19:32:19.305728  1002 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI1209 19:32:19.305770  1002 net.cpp:100] Creating Layer L2_b1_concat0\nI1209 19:32:19.305784  1002 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI1209 19:32:19.305797  1002 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI1209 19:32:19.305814  1002 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI1209 19:32:19.305877  1002 net.cpp:150] Setting up L2_b1_concat0\nI1209 19:32:19.305896  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.305905  1002 net.cpp:165] Memory required for data: 1039873500\nI1209 19:32:19.305917  1002 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI1209 19:32:19.305935  1002 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI1209 19:32:19.305948  1002 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI1209 19:32:19.305970  1002 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI1209 19:32:19.305990  1002 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI1209 19:32:19.306082  1002 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI1209 19:32:19.306102  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.306115  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.306124  1002 net.cpp:165] Memory required for data: 1048065500\nI1209 19:32:19.306134  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI1209 19:32:19.306155  1002 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI1209 19:32:19.306167  1002 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI1209 19:32:19.306190  1002 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI1209 19:32:19.306754  1002 net.cpp:150] Setting up L2_b2_cbr1_conv\nI1209 19:32:19.306776  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.306785  1002 net.cpp:165] Memory required for data: 1052161500\nI1209 19:32:19.306802  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI1209 19:32:19.306818  1002 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI1209 19:32:19.306831  1002 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI1209 19:32:19.306852  1002 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI1209 19:32:19.307202  1002 net.cpp:150] Setting up L2_b2_cbr1_bn\nI1209 19:32:19.307221  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.307232  1002 net.cpp:165] Memory required for data: 1056257500\nI1209 19:32:19.307263  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1209 19:32:19.307286  1002 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI1209 19:32:19.307298  1002 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI1209 19:32:19.307313  1002 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.307416  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1209 19:32:19.307628  1002 net.cpp:150] Setting up L2_b2_cbr1_scale\nI1209 19:32:19.307646  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.307657  1002 net.cpp:165] Memory required for data: 1060353500\nI1209 19:32:19.307674  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_dropout\nI1209 19:32:19.307704  1002 net.cpp:100] Creating Layer L2_b2_cbr1_dropout\nI1209 19:32:19.307718  1002 net.cpp:434] L2_b2_cbr1_dropout <- L2_b2_cbr1_bn_top\nI1209 19:32:19.307734  1002 net.cpp:395] L2_b2_cbr1_dropout -> L2_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.307790  1002 net.cpp:150] Setting up L2_b2_cbr1_dropout\nI1209 19:32:19.307807  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.307816  1002 net.cpp:165] Memory required for data: 1064449500\nI1209 19:32:19.307827  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI1209 19:32:19.307842  1002 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI1209 19:32:19.307853  1002 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI1209 19:32:19.307873  1002 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.307893  1002 net.cpp:150] Setting up L2_b2_cbr1_relu\nI1209 19:32:19.307906  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.307915  1002 net.cpp:165] Memory required for data: 1068545500\nI1209 19:32:19.307924  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI1209 19:32:19.307945  1002 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI1209 19:32:19.307965  1002 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI1209 19:32:19.307983  1002 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI1209 19:32:19.308547  1002 net.cpp:150] Setting up L2_b2_cbr2_conv\nI1209 19:32:19.308568  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.308578  1002 net.cpp:165] Memory required for data: 1072641500\nI1209 19:32:19.308594  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI1209 19:32:19.308616  1002 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI1209 19:32:19.308629  1002 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI1209 19:32:19.308646  1002 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI1209 19:32:19.308996  1002 net.cpp:150] Setting up L2_b2_cbr2_bn\nI1209 19:32:19.309016  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.309026  1002 net.cpp:165] Memory required for data: 1076737500\nI1209 19:32:19.309047  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1209 19:32:19.309064  1002 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI1209 19:32:19.309077  1002 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI1209 19:32:19.309096  1002 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.309198  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1209 19:32:19.309411  1002 net.cpp:150] Setting up L2_b2_cbr2_scale\nI1209 19:32:19.309429  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.309440  1002 net.cpp:165] Memory required for data: 1080833500\nI1209 19:32:19.309458  1002 layer_factory.hpp:77] Creating layer L2_b2_cbr2_dropout\nI1209 19:32:19.309474  1002 net.cpp:100] Creating Layer L2_b2_cbr2_dropout\nI1209 19:32:19.309487  1002 net.cpp:434] L2_b2_cbr2_dropout <- L2_b2_cbr2_bn_top\nI1209 19:32:19.309505  1002 net.cpp:395] L2_b2_cbr2_dropout -> L2_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.309556  1002 net.cpp:150] Setting up L2_b2_cbr2_dropout\nI1209 19:32:19.309573  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.309582  1002 net.cpp:165] Memory required for data: 1084929500\nI1209 19:32:19.309592  1002 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1209 19:32:19.309615  1002 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1209 19:32:19.309636  1002 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI1209 19:32:19.309650  1002 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI1209 19:32:19.309667  1002 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1209 19:32:19.309731  1002 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1209 19:32:19.309751  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.309761  1002 net.cpp:165] Memory required for data: 1089025500\nI1209 19:32:19.309772  1002 layer_factory.hpp:77] Creating layer L2_b2_relu\nI1209 19:32:19.309787  1002 net.cpp:100] Creating Layer L2_b2_relu\nI1209 19:32:19.309799  1002 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI1209 19:32:19.309813  1002 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI1209 19:32:19.309833  1002 net.cpp:150] Setting up L2_b2_relu\nI1209 19:32:19.309847  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.309856  1002 net.cpp:165] Memory required for data: 1093121500\nI1209 19:32:19.309866  1002 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1209 19:32:19.309880  1002 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1209 19:32:19.309891  1002 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI1209 19:32:19.309911  1002 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1209 19:32:19.309932  1002 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1209 19:32:19.310026  1002 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1209 19:32:19.310046  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.310058  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.310067  1002 net.cpp:165] Memory required for data: 1101313500\nI1209 19:32:19.310078  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI1209 19:32:19.310104  1002 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI1209 19:32:19.310117  1002 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1209 19:32:19.310139  1002 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI1209 19:32:19.310703  1002 net.cpp:150] Setting up L2_b3_cbr1_conv\nI1209 19:32:19.310726  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.310736  1002 net.cpp:165] Memory required for data: 1105409500\nI1209 19:32:19.310753  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI1209 19:32:19.310770  1002 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI1209 19:32:19.310788  1002 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI1209 19:32:19.310804  1002 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI1209 19:32:19.311148  1002 net.cpp:150] Setting up L2_b3_cbr1_bn\nI1209 19:32:19.311168  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.311178  1002 net.cpp:165] Memory required for data: 1109505500\nI1209 19:32:19.311203  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1209 19:32:19.311219  1002 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI1209 19:32:19.311231  1002 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI1209 19:32:19.311246  1002 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.311353  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1209 19:32:19.311566  1002 net.cpp:150] Setting up L2_b3_cbr1_scale\nI1209 19:32:19.311590  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.311600  1002 net.cpp:165] Memory required for data: 1113601500\nI1209 19:32:19.311617  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_dropout\nI1209 19:32:19.311635  1002 net.cpp:100] Creating Layer L2_b3_cbr1_dropout\nI1209 19:32:19.311645  1002 net.cpp:434] L2_b3_cbr1_dropout <- L2_b3_cbr1_bn_top\nI1209 19:32:19.311659  1002 net.cpp:395] L2_b3_cbr1_dropout -> L2_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.311713  1002 net.cpp:150] Setting up L2_b3_cbr1_dropout\nI1209 19:32:19.311731  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.311749  1002 net.cpp:165] Memory required for data: 1117697500\nI1209 19:32:19.311760  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI1209 19:32:19.311774  1002 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI1209 19:32:19.311784  1002 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI1209 19:32:19.311805  1002 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.311826  1002 net.cpp:150] Setting up L2_b3_cbr1_relu\nI1209 19:32:19.311841  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.311851  1002 net.cpp:165] Memory required for data: 1121793500\nI1209 19:32:19.311861  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI1209 19:32:19.311880  1002 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI1209 19:32:19.311892  1002 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI1209 19:32:19.311915  1002 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI1209 19:32:19.312482  1002 net.cpp:150] Setting up L2_b3_cbr2_conv\nI1209 19:32:19.312502  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.312512  1002 net.cpp:165] Memory required for data: 1125889500\nI1209 19:32:19.312530  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI1209 19:32:19.312554  1002 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI1209 19:32:19.312567  1002 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI1209 19:32:19.312583  1002 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI1209 19:32:19.312917  1002 net.cpp:150] Setting up L2_b3_cbr2_bn\nI1209 19:32:19.312947  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.312965  1002 net.cpp:165] Memory required for data: 1129985500\nI1209 19:32:19.312989  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1209 19:32:19.313006  1002 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI1209 19:32:19.313019  1002 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI1209 19:32:19.313033  1002 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.313133  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1209 19:32:19.313349  1002 net.cpp:150] Setting up L2_b3_cbr2_scale\nI1209 19:32:19.313367  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.313376  1002 net.cpp:165] Memory required for data: 1134081500\nI1209 19:32:19.313395  1002 layer_factory.hpp:77] Creating layer L2_b3_cbr2_dropout\nI1209 19:32:19.313410  1002 net.cpp:100] Creating Layer L2_b3_cbr2_dropout\nI1209 19:32:19.313422  1002 net.cpp:434] L2_b3_cbr2_dropout <- L2_b3_cbr2_bn_top\nI1209 19:32:19.313442  1002 net.cpp:395] L2_b3_cbr2_dropout -> L2_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.313493  1002 net.cpp:150] Setting up L2_b3_cbr2_dropout\nI1209 19:32:19.313509  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.313519  1002 net.cpp:165] Memory required for data: 1138177500\nI1209 19:32:19.313529  1002 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1209 19:32:19.313549  1002 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1209 19:32:19.313561  1002 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI1209 19:32:19.313575  1002 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1209 19:32:19.313591  1002 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1209 19:32:19.313648  1002 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1209 19:32:19.313668  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.313678  1002 net.cpp:165] Memory required for data: 1142273500\nI1209 19:32:19.313688  1002 layer_factory.hpp:77] Creating layer L2_b3_relu\nI1209 19:32:19.313702  1002 net.cpp:100] Creating Layer L2_b3_relu\nI1209 19:32:19.313714  1002 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI1209 19:32:19.313735  1002 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI1209 19:32:19.313755  1002 net.cpp:150] Setting up L2_b3_relu\nI1209 19:32:19.313768  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.313777  1002 net.cpp:165] Memory required for data: 1146369500\nI1209 19:32:19.313787  1002 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1209 19:32:19.313810  1002 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1209 19:32:19.313822  1002 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI1209 19:32:19.313838  1002 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1209 19:32:19.313858  1002 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1209 19:32:19.313951  1002 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1209 19:32:19.313977  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.313989  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.313997  1002 net.cpp:165] Memory required for data: 1154561500\nI1209 19:32:19.314008  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI1209 19:32:19.314028  1002 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI1209 19:32:19.314039  1002 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1209 19:32:19.314062  1002 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI1209 19:32:19.314623  1002 net.cpp:150] Setting up L2_b4_cbr1_conv\nI1209 19:32:19.314643  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.314653  1002 net.cpp:165] Memory required for data: 1158657500\nI1209 19:32:19.314671  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI1209 19:32:19.314689  1002 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI1209 19:32:19.314700  1002 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI1209 19:32:19.314721  1002 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI1209 19:32:19.315066  1002 net.cpp:150] Setting up L2_b4_cbr1_bn\nI1209 19:32:19.315086  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.315096  1002 net.cpp:165] Memory required for data: 1162753500\nI1209 19:32:19.315117  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1209 19:32:19.315138  1002 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI1209 19:32:19.315150  1002 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI1209 19:32:19.315165  1002 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.315265  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1209 19:32:19.315477  1002 net.cpp:150] Setting up L2_b4_cbr1_scale\nI1209 19:32:19.315496  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.315506  1002 net.cpp:165] Memory required for data: 1166849500\nI1209 19:32:19.315524  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_dropout\nI1209 19:32:19.315549  1002 net.cpp:100] Creating Layer L2_b4_cbr1_dropout\nI1209 19:32:19.315562  1002 net.cpp:434] L2_b4_cbr1_dropout <- L2_b4_cbr1_bn_top\nI1209 19:32:19.315582  1002 net.cpp:395] L2_b4_cbr1_dropout -> L2_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.315634  1002 net.cpp:150] Setting up L2_b4_cbr1_dropout\nI1209 19:32:19.315651  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.315661  1002 net.cpp:165] Memory required for data: 1170945500\nI1209 19:32:19.315675  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI1209 19:32:19.315690  1002 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI1209 19:32:19.315702  1002 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI1209 19:32:19.315722  1002 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.315743  1002 net.cpp:150] Setting up L2_b4_cbr1_relu\nI1209 19:32:19.315759  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.315768  1002 net.cpp:165] Memory required for data: 1175041500\nI1209 19:32:19.315779  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI1209 19:32:19.315801  1002 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI1209 19:32:19.315815  1002 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI1209 19:32:19.315832  1002 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI1209 19:32:19.316398  1002 net.cpp:150] Setting up L2_b4_cbr2_conv\nI1209 19:32:19.316419  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.316442  1002 net.cpp:165] Memory required for data: 1179137500\nI1209 19:32:19.316460  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI1209 19:32:19.316484  1002 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI1209 19:32:19.316498  1002 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI1209 19:32:19.316514  1002 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI1209 19:32:19.316851  1002 net.cpp:150] Setting up L2_b4_cbr2_bn\nI1209 19:32:19.316870  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.316881  1002 net.cpp:165] Memory required for data: 1183233500\nI1209 19:32:19.316903  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1209 19:32:19.316923  1002 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI1209 19:32:19.316936  1002 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI1209 19:32:19.316963  1002 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.317065  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1209 19:32:19.317276  1002 net.cpp:150] Setting up L2_b4_cbr2_scale\nI1209 19:32:19.317296  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.317306  1002 net.cpp:165] Memory required for data: 1187329500\nI1209 19:32:19.317323  1002 layer_factory.hpp:77] Creating layer L2_b4_cbr2_dropout\nI1209 19:32:19.317340  1002 net.cpp:100] Creating Layer L2_b4_cbr2_dropout\nI1209 19:32:19.317353  1002 net.cpp:434] L2_b4_cbr2_dropout <- L2_b4_cbr2_bn_top\nI1209 19:32:19.317373  1002 net.cpp:395] L2_b4_cbr2_dropout -> L2_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.317422  1002 net.cpp:150] Setting up L2_b4_cbr2_dropout\nI1209 19:32:19.317440  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.317451  1002 net.cpp:165] Memory required for data: 1191425500\nI1209 19:32:19.317461  1002 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1209 19:32:19.317481  1002 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1209 19:32:19.317494  1002 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI1209 19:32:19.317507  1002 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1209 19:32:19.317523  1002 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1209 19:32:19.317589  1002 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1209 19:32:19.317606  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.317617  1002 net.cpp:165] Memory required for data: 1195521500\nI1209 19:32:19.317629  1002 layer_factory.hpp:77] Creating layer L2_b4_relu\nI1209 19:32:19.317644  1002 net.cpp:100] Creating Layer L2_b4_relu\nI1209 19:32:19.317656  1002 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI1209 19:32:19.317670  1002 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI1209 19:32:19.317688  1002 net.cpp:150] Setting up L2_b4_relu\nI1209 19:32:19.317703  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.317713  1002 net.cpp:165] Memory required for data: 1199617500\nI1209 19:32:19.317724  1002 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1209 19:32:19.317739  1002 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1209 19:32:19.317750  1002 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI1209 19:32:19.317771  1002 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1209 19:32:19.317791  1002 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1209 19:32:19.317878  1002 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1209 19:32:19.317898  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.317911  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.317921  1002 net.cpp:165] Memory required for data: 1207809500\nI1209 19:32:19.317934  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI1209 19:32:19.317970  1002 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI1209 19:32:19.317984  1002 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1209 19:32:19.318018  1002 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI1209 19:32:19.318604  1002 net.cpp:150] Setting up L2_b5_cbr1_conv\nI1209 19:32:19.318624  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.318635  1002 net.cpp:165] Memory required for data: 1211905500\nI1209 19:32:19.318655  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI1209 19:32:19.318680  1002 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI1209 19:32:19.318692  1002 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI1209 19:32:19.318708  1002 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI1209 19:32:19.319042  1002 net.cpp:150] Setting up L2_b5_cbr1_bn\nI1209 19:32:19.319061  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.319072  1002 net.cpp:165] Memory required for data: 1216001500\nI1209 19:32:19.319095  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1209 19:32:19.319113  1002 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI1209 19:32:19.319124  1002 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI1209 19:32:19.319145  1002 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.319248  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1209 19:32:19.319458  1002 net.cpp:150] Setting up L2_b5_cbr1_scale\nI1209 19:32:19.319475  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.319485  1002 net.cpp:165] Memory required for data: 1220097500\nI1209 19:32:19.319505  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_dropout\nI1209 19:32:19.319524  1002 net.cpp:100] Creating Layer L2_b5_cbr1_dropout\nI1209 19:32:19.319535  1002 net.cpp:434] L2_b5_cbr1_dropout <- L2_b5_cbr1_bn_top\nI1209 19:32:19.319555  1002 net.cpp:395] L2_b5_cbr1_dropout -> L2_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.319604  1002 net.cpp:150] Setting up L2_b5_cbr1_dropout\nI1209 19:32:19.319622  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.319633  1002 net.cpp:165] Memory required for data: 1224193500\nI1209 19:32:19.319643  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI1209 19:32:19.319656  1002 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI1209 19:32:19.319667  1002 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI1209 19:32:19.319687  1002 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.319706  1002 net.cpp:150] Setting up L2_b5_cbr1_relu\nI1209 19:32:19.319721  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.319731  1002 net.cpp:165] Memory required for data: 1228289500\nI1209 19:32:19.319744  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI1209 19:32:19.319772  1002 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI1209 19:32:19.319785  1002 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI1209 19:32:19.319802  1002 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI1209 19:32:19.320384  1002 net.cpp:150] Setting up L2_b5_cbr2_conv\nI1209 19:32:19.320405  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.320415  1002 net.cpp:165] Memory required for data: 1232385500\nI1209 19:32:19.320435  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI1209 19:32:19.320458  1002 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI1209 19:32:19.320472  1002 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI1209 19:32:19.320495  1002 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI1209 19:32:19.320816  1002 net.cpp:150] Setting up L2_b5_cbr2_bn\nI1209 19:32:19.320839  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.320850  1002 net.cpp:165] Memory required for data: 1236481500\nI1209 19:32:19.320873  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1209 19:32:19.320889  1002 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI1209 19:32:19.320900  1002 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI1209 19:32:19.320915  1002 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.321025  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1209 19:32:19.321234  1002 net.cpp:150] Setting up L2_b5_cbr2_scale\nI1209 19:32:19.321254  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.321272  1002 net.cpp:165] Memory required for data: 1240577500\nI1209 19:32:19.321292  1002 layer_factory.hpp:77] Creating layer L2_b5_cbr2_dropout\nI1209 19:32:19.321310  1002 net.cpp:100] Creating Layer L2_b5_cbr2_dropout\nI1209 19:32:19.321322  1002 net.cpp:434] L2_b5_cbr2_dropout <- L2_b5_cbr2_bn_top\nI1209 19:32:19.321346  1002 net.cpp:395] L2_b5_cbr2_dropout -> L2_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.321394  1002 net.cpp:150] Setting up L2_b5_cbr2_dropout\nI1209 19:32:19.321413  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.321422  1002 net.cpp:165] Memory required for data: 1244673500\nI1209 19:32:19.321434  1002 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1209 19:32:19.321456  1002 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1209 19:32:19.321470  1002 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI1209 19:32:19.321483  1002 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1209 19:32:19.321499  1002 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1209 19:32:19.321557  1002 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1209 19:32:19.321575  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.321586  1002 net.cpp:165] Memory required for data: 1248769500\nI1209 19:32:19.321599  1002 layer_factory.hpp:77] Creating layer L2_b5_relu\nI1209 19:32:19.321614  1002 net.cpp:100] Creating Layer L2_b5_relu\nI1209 19:32:19.321624  1002 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI1209 19:32:19.321646  1002 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI1209 19:32:19.321666  1002 net.cpp:150] Setting up L2_b5_relu\nI1209 19:32:19.321679  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.321688  1002 net.cpp:165] Memory required for data: 1252865500\nI1209 19:32:19.321699  1002 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1209 19:32:19.321715  1002 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1209 19:32:19.321727  1002 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI1209 19:32:19.321743  1002 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1209 19:32:19.321763  1002 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1209 19:32:19.321853  1002 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1209 19:32:19.321874  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.321888  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.321898  1002 net.cpp:165] Memory required for data: 1261057500\nI1209 19:32:19.321910  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI1209 19:32:19.321931  1002 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI1209 19:32:19.321943  1002 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1209 19:32:19.321976  1002 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI1209 19:32:19.322543  1002 net.cpp:150] Setting up L2_b6_cbr1_conv\nI1209 19:32:19.322563  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.322574  1002 net.cpp:165] Memory required for data: 1265153500\nI1209 19:32:19.322593  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI1209 19:32:19.322618  1002 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI1209 19:32:19.322630  1002 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI1209 19:32:19.322646  1002 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI1209 19:32:19.322990  1002 net.cpp:150] Setting up L2_b6_cbr1_bn\nI1209 19:32:19.323015  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.323027  1002 net.cpp:165] Memory required for data: 1269249500\nI1209 19:32:19.323048  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1209 19:32:19.323065  1002 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI1209 19:32:19.323078  1002 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI1209 19:32:19.323093  1002 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.323210  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1209 19:32:19.323431  1002 net.cpp:150] Setting up L2_b6_cbr1_scale\nI1209 19:32:19.323451  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.323462  1002 net.cpp:165] Memory required for data: 1273345500\nI1209 19:32:19.323479  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_dropout\nI1209 19:32:19.323498  1002 net.cpp:100] Creating Layer L2_b6_cbr1_dropout\nI1209 19:32:19.323511  1002 net.cpp:434] L2_b6_cbr1_dropout <- L2_b6_cbr1_bn_top\nI1209 19:32:19.323534  1002 net.cpp:395] L2_b6_cbr1_dropout -> L2_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.323582  1002 net.cpp:150] Setting up L2_b6_cbr1_dropout\nI1209 19:32:19.323601  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.323611  1002 net.cpp:165] Memory required for data: 1277441500\nI1209 19:32:19.323621  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI1209 19:32:19.323637  1002 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI1209 19:32:19.323649  1002 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI1209 19:32:19.323669  1002 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.323689  1002 net.cpp:150] Setting up L2_b6_cbr1_relu\nI1209 19:32:19.323704  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.323714  1002 net.cpp:165] Memory required for data: 1281537500\nI1209 19:32:19.323727  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI1209 19:32:19.323750  1002 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI1209 19:32:19.323763  1002 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI1209 19:32:19.323781  1002 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI1209 19:32:19.324353  1002 net.cpp:150] Setting up L2_b6_cbr2_conv\nI1209 19:32:19.324378  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.324388  1002 net.cpp:165] Memory required for data: 1285633500\nI1209 19:32:19.324406  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI1209 19:32:19.324424  1002 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI1209 19:32:19.324437  1002 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI1209 19:32:19.324458  1002 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI1209 19:32:19.324782  1002 net.cpp:150] Setting up L2_b6_cbr2_bn\nI1209 19:32:19.324802  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.324812  1002 net.cpp:165] Memory required for data: 1289729500\nI1209 19:32:19.324831  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1209 19:32:19.324851  1002 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI1209 19:32:19.324862  1002 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI1209 19:32:19.324877  1002 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.324970  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1209 19:32:19.325209  1002 net.cpp:150] Setting up L2_b6_cbr2_scale\nI1209 19:32:19.325228  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.325238  1002 net.cpp:165] Memory required for data: 1293825500\nI1209 19:32:19.325259  1002 layer_factory.hpp:77] Creating layer L2_b6_cbr2_dropout\nI1209 19:32:19.325276  1002 net.cpp:100] Creating Layer L2_b6_cbr2_dropout\nI1209 19:32:19.325289  1002 net.cpp:434] L2_b6_cbr2_dropout <- L2_b6_cbr2_bn_top\nI1209 19:32:19.325309  1002 net.cpp:395] L2_b6_cbr2_dropout -> L2_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.325358  1002 net.cpp:150] Setting up L2_b6_cbr2_dropout\nI1209 19:32:19.325386  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.325395  1002 net.cpp:165] Memory required for data: 1297921500\nI1209 19:32:19.325407  1002 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1209 19:32:19.325423  1002 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1209 19:32:19.325435  1002 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI1209 19:32:19.325449  1002 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1209 19:32:19.325465  1002 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1209 19:32:19.325541  1002 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1209 19:32:19.325561  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.325570  1002 net.cpp:165] Memory required for data: 1302017500\nI1209 19:32:19.325582  1002 layer_factory.hpp:77] Creating layer L2_b6_relu\nI1209 19:32:19.325598  1002 net.cpp:100] Creating Layer L2_b6_relu\nI1209 19:32:19.325609  1002 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI1209 19:32:19.325624  1002 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI1209 19:32:19.325644  1002 net.cpp:150] Setting up L2_b6_relu\nI1209 19:32:19.325659  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.325669  1002 net.cpp:165] Memory required for data: 1306113500\nI1209 19:32:19.325680  1002 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1209 19:32:19.325695  1002 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1209 19:32:19.325706  1002 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI1209 19:32:19.325727  1002 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1209 19:32:19.325748  1002 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1209 19:32:19.325834  1002 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1209 19:32:19.325860  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.325875  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.325886  1002 net.cpp:165] Memory required for data: 1314305500\nI1209 19:32:19.325898  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI1209 19:32:19.325923  1002 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI1209 19:32:19.325937  1002 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1209 19:32:19.325955  1002 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI1209 19:32:19.327883  1002 net.cpp:150] Setting up L2_b7_cbr1_conv\nI1209 19:32:19.327905  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.327915  1002 net.cpp:165] Memory required for data: 1318401500\nI1209 19:32:19.327935  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI1209 19:32:19.327968  1002 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI1209 19:32:19.327982  1002 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI1209 19:32:19.328006  1002 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI1209 19:32:19.328332  1002 net.cpp:150] Setting up L2_b7_cbr1_bn\nI1209 19:32:19.328351  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.328361  1002 net.cpp:165] Memory required for data: 1322497500\nI1209 19:32:19.328384  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1209 19:32:19.328402  1002 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI1209 19:32:19.328414  1002 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI1209 19:32:19.328429  1002 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.328532  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1209 19:32:19.328738  1002 net.cpp:150] Setting up L2_b7_cbr1_scale\nI1209 19:32:19.328758  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.328768  1002 net.cpp:165] Memory required for data: 1326593500\nI1209 19:32:19.328788  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_dropout\nI1209 19:32:19.328811  1002 net.cpp:100] Creating Layer L2_b7_cbr1_dropout\nI1209 19:32:19.328824  1002 net.cpp:434] L2_b7_cbr1_dropout <- L2_b7_cbr1_bn_top\nI1209 19:32:19.328840  1002 net.cpp:395] L2_b7_cbr1_dropout -> L2_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.328887  1002 net.cpp:150] Setting up L2_b7_cbr1_dropout\nI1209 19:32:19.328905  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.328914  1002 net.cpp:165] Memory required for data: 1330689500\nI1209 19:32:19.328925  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI1209 19:32:19.328943  1002 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI1209 19:32:19.328963  1002 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI1209 19:32:19.328991  1002 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.329011  1002 net.cpp:150] Setting up L2_b7_cbr1_relu\nI1209 19:32:19.329026  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.329036  1002 net.cpp:165] Memory required for data: 1334785500\nI1209 19:32:19.329047  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI1209 19:32:19.329069  1002 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI1209 19:32:19.329082  1002 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI1209 19:32:19.329105  1002 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI1209 19:32:19.329655  1002 net.cpp:150] Setting up L2_b7_cbr2_conv\nI1209 19:32:19.329675  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.329686  1002 net.cpp:165] Memory required for data: 1338881500\nI1209 19:32:19.329706  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI1209 19:32:19.329725  1002 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI1209 19:32:19.329737  1002 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI1209 19:32:19.329761  1002 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI1209 19:32:19.330108  1002 net.cpp:150] Setting up L2_b7_cbr2_bn\nI1209 19:32:19.330128  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.330138  1002 net.cpp:165] Memory required for data: 1342977500\nI1209 19:32:19.330159  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1209 19:32:19.330183  1002 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI1209 19:32:19.330196  1002 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI1209 19:32:19.330212  1002 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.330312  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1209 19:32:19.330528  1002 net.cpp:150] Setting up L2_b7_cbr2_scale\nI1209 19:32:19.330548  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.330557  1002 net.cpp:165] Memory required for data: 1347073500\nI1209 19:32:19.330576  1002 layer_factory.hpp:77] Creating layer L2_b7_cbr2_dropout\nI1209 19:32:19.330601  1002 net.cpp:100] Creating Layer L2_b7_cbr2_dropout\nI1209 19:32:19.330615  1002 net.cpp:434] L2_b7_cbr2_dropout <- L2_b7_cbr2_bn_top\nI1209 19:32:19.330629  1002 net.cpp:395] L2_b7_cbr2_dropout -> L2_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.330684  1002 net.cpp:150] Setting up L2_b7_cbr2_dropout\nI1209 19:32:19.330703  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.330713  1002 net.cpp:165] Memory required for data: 1351169500\nI1209 19:32:19.330724  1002 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI1209 19:32:19.330741  1002 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI1209 19:32:19.330754  1002 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI1209 19:32:19.330766  1002 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1209 19:32:19.330790  1002 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI1209 19:32:19.330852  1002 net.cpp:150] Setting up L2_b7_sum_eltwise\nI1209 19:32:19.330870  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.330879  1002 net.cpp:165] Memory required for data: 1355265500\nI1209 19:32:19.330893  1002 layer_factory.hpp:77] Creating layer L2_b7_relu\nI1209 19:32:19.330907  1002 net.cpp:100] Creating Layer L2_b7_relu\nI1209 19:32:19.330919  1002 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI1209 19:32:19.330932  1002 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI1209 19:32:19.330951  1002 net.cpp:150] Setting up L2_b7_relu\nI1209 19:32:19.330974  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.330984  1002 net.cpp:165] Memory required for data: 1359361500\nI1209 19:32:19.330998  1002 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1209 19:32:19.331019  1002 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1209 19:32:19.331032  1002 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI1209 19:32:19.331048  1002 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1209 19:32:19.331079  1002 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1209 19:32:19.331176  1002 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1209 19:32:19.331195  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.331209  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.331218  1002 net.cpp:165] Memory required for data: 1367553500\nI1209 19:32:19.331231  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI1209 19:32:19.331254  1002 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI1209 19:32:19.331266  1002 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1209 19:32:19.331292  1002 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI1209 19:32:19.331830  1002 net.cpp:150] Setting up L2_b8_cbr1_conv\nI1209 19:32:19.331851  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.331861  1002 net.cpp:165] Memory required for data: 1371649500\nI1209 19:32:19.331879  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI1209 19:32:19.331898  1002 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI1209 19:32:19.331912  1002 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI1209 19:32:19.331933  1002 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI1209 19:32:19.332279  1002 net.cpp:150] Setting up L2_b8_cbr1_bn\nI1209 19:32:19.332298  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.332309  1002 net.cpp:165] Memory required for data: 1375745500\nI1209 19:32:19.332329  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1209 19:32:19.332353  1002 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI1209 19:32:19.332365  1002 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI1209 19:32:19.332381  1002 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.332486  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1209 19:32:19.332697  1002 net.cpp:150] Setting up L2_b8_cbr1_scale\nI1209 19:32:19.332716  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.332726  1002 net.cpp:165] Memory required for data: 1379841500\nI1209 19:32:19.332746  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_dropout\nI1209 19:32:19.332769  1002 net.cpp:100] Creating Layer L2_b8_cbr1_dropout\nI1209 19:32:19.332782  1002 net.cpp:434] L2_b8_cbr1_dropout <- L2_b8_cbr1_bn_top\nI1209 19:32:19.332797  1002 net.cpp:395] L2_b8_cbr1_dropout -> L2_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.332845  1002 net.cpp:150] Setting up L2_b8_cbr1_dropout\nI1209 19:32:19.332868  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.332878  1002 net.cpp:165] Memory required for data: 1383937500\nI1209 19:32:19.332890  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI1209 19:32:19.332903  1002 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI1209 19:32:19.332916  1002 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI1209 19:32:19.332931  1002 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.332949  1002 net.cpp:150] Setting up L2_b8_cbr1_relu\nI1209 19:32:19.332970  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.332981  1002 net.cpp:165] Memory required for data: 1388033500\nI1209 19:32:19.332994  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI1209 19:32:19.333022  1002 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI1209 19:32:19.333035  1002 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI1209 19:32:19.333053  1002 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI1209 19:32:19.333595  1002 net.cpp:150] Setting up L2_b8_cbr2_conv\nI1209 19:32:19.333614  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.333624  1002 net.cpp:165] Memory required for data: 1392129500\nI1209 19:32:19.333644  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI1209 19:32:19.333672  1002 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI1209 19:32:19.333685  1002 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI1209 19:32:19.333717  1002 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI1209 19:32:19.335386  1002 net.cpp:150] Setting up L2_b8_cbr2_bn\nI1209 19:32:19.335407  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.335417  1002 net.cpp:165] Memory required for data: 1396225500\nI1209 19:32:19.335441  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1209 19:32:19.335465  1002 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI1209 19:32:19.335477  1002 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI1209 19:32:19.335494  1002 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.335602  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1209 19:32:19.335811  1002 net.cpp:150] Setting up L2_b8_cbr2_scale\nI1209 19:32:19.335830  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.335840  1002 net.cpp:165] Memory required for data: 1400321500\nI1209 19:32:19.335858  1002 layer_factory.hpp:77] Creating layer L2_b8_cbr2_dropout\nI1209 19:32:19.335881  1002 net.cpp:100] Creating Layer L2_b8_cbr2_dropout\nI1209 19:32:19.335893  1002 net.cpp:434] L2_b8_cbr2_dropout <- L2_b8_cbr2_bn_top\nI1209 19:32:19.335908  1002 net.cpp:395] L2_b8_cbr2_dropout -> L2_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.335955  1002 net.cpp:150] Setting up L2_b8_cbr2_dropout\nI1209 19:32:19.335988  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.336000  1002 net.cpp:165] Memory required for data: 1404417500\nI1209 19:32:19.336010  1002 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI1209 19:32:19.336026  1002 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI1209 19:32:19.336040  1002 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI1209 19:32:19.336052  1002 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1209 19:32:19.336074  1002 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI1209 19:32:19.336133  1002 net.cpp:150] Setting up L2_b8_sum_eltwise\nI1209 19:32:19.336151  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.336160  1002 net.cpp:165] Memory required for data: 1408513500\nI1209 19:32:19.336174  1002 layer_factory.hpp:77] Creating layer L2_b8_relu\nI1209 19:32:19.336189  1002 net.cpp:100] Creating Layer L2_b8_relu\nI1209 19:32:19.336201  1002 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI1209 19:32:19.336215  1002 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI1209 19:32:19.336232  1002 net.cpp:150] Setting up L2_b8_relu\nI1209 19:32:19.336247  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.336256  1002 net.cpp:165] Memory required for data: 1412609500\nI1209 19:32:19.336268  1002 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1209 19:32:19.336290  1002 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1209 19:32:19.336302  1002 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI1209 19:32:19.336318  1002 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1209 19:32:19.336361  1002 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1209 19:32:19.336449  1002 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1209 19:32:19.336470  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.336485  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.336494  1002 net.cpp:165] Memory required for data: 1420801500\nI1209 19:32:19.336508  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI1209 19:32:19.336539  1002 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI1209 19:32:19.336552  1002 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1209 19:32:19.336571  1002 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI1209 19:32:19.337129  1002 net.cpp:150] Setting up L2_b9_cbr1_conv\nI1209 19:32:19.337149  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.337159  1002 net.cpp:165] Memory required for data: 1424897500\nI1209 19:32:19.337191  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI1209 19:32:19.337215  1002 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI1209 19:32:19.337229  1002 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI1209 19:32:19.337244  1002 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI1209 19:32:19.337579  1002 net.cpp:150] Setting up L2_b9_cbr1_bn\nI1209 19:32:19.337599  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.337610  1002 net.cpp:165] Memory required for data: 1428993500\nI1209 19:32:19.337633  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1209 19:32:19.337652  1002 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI1209 19:32:19.337664  1002 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI1209 19:32:19.337690  1002 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.337793  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1209 19:32:19.338007  1002 net.cpp:150] Setting up L2_b9_cbr1_scale\nI1209 19:32:19.338027  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.338037  1002 net.cpp:165] Memory required for data: 1433089500\nI1209 19:32:19.338057  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_dropout\nI1209 19:32:19.338074  1002 net.cpp:100] Creating Layer L2_b9_cbr1_dropout\nI1209 19:32:19.338088  1002 net.cpp:434] L2_b9_cbr1_dropout <- L2_b9_cbr1_bn_top\nI1209 19:32:19.338107  1002 net.cpp:395] L2_b9_cbr1_dropout -> L2_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.338155  1002 net.cpp:150] Setting up L2_b9_cbr1_dropout\nI1209 19:32:19.338173  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.338183  1002 net.cpp:165] Memory required for data: 1437185500\nI1209 19:32:19.338196  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI1209 19:32:19.338210  1002 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI1209 19:32:19.338229  1002 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI1209 19:32:19.338243  1002 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.338261  1002 net.cpp:150] Setting up L2_b9_cbr1_relu\nI1209 19:32:19.338276  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.338285  1002 net.cpp:165] Memory required for data: 1441281500\nI1209 19:32:19.338296  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI1209 19:32:19.338323  1002 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI1209 19:32:19.338337  1002 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI1209 19:32:19.338353  1002 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI1209 19:32:19.338903  1002 net.cpp:150] Setting up L2_b9_cbr2_conv\nI1209 19:32:19.338923  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.338933  1002 net.cpp:165] Memory required for data: 1445377500\nI1209 19:32:19.338953  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI1209 19:32:19.338984  1002 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI1209 19:32:19.338999  1002 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI1209 19:32:19.339021  1002 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI1209 19:32:19.339340  1002 net.cpp:150] Setting up L2_b9_cbr2_bn\nI1209 19:32:19.339359  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.339370  1002 net.cpp:165] Memory required for data: 1449473500\nI1209 19:32:19.339445  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1209 19:32:19.339473  1002 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI1209 19:32:19.339485  1002 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI1209 19:32:19.339499  1002 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.339599  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1209 19:32:19.339800  1002 net.cpp:150] Setting up L2_b9_cbr2_scale\nI1209 19:32:19.339819  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.339829  1002 net.cpp:165] Memory required for data: 1453569500\nI1209 19:32:19.339849  1002 layer_factory.hpp:77] Creating layer L2_b9_cbr2_dropout\nI1209 19:32:19.339869  1002 net.cpp:100] Creating Layer L2_b9_cbr2_dropout\nI1209 19:32:19.339891  1002 net.cpp:434] L2_b9_cbr2_dropout <- L2_b9_cbr2_bn_top\nI1209 19:32:19.339913  1002 net.cpp:395] L2_b9_cbr2_dropout -> L2_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.339970  1002 net.cpp:150] Setting up L2_b9_cbr2_dropout\nI1209 19:32:19.339998  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.340008  1002 net.cpp:165] Memory required for data: 1457665500\nI1209 19:32:19.340019  1002 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI1209 19:32:19.340035  1002 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI1209 19:32:19.340047  1002 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI1209 19:32:19.340060  1002 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1209 19:32:19.340075  1002 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI1209 19:32:19.340140  1002 net.cpp:150] Setting up L2_b9_sum_eltwise\nI1209 19:32:19.340159  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.340169  1002 net.cpp:165] Memory required for data: 1461761500\nI1209 19:32:19.340180  1002 layer_factory.hpp:77] Creating layer L2_b9_relu\nI1209 19:32:19.340200  1002 net.cpp:100] Creating Layer L2_b9_relu\nI1209 19:32:19.340214  1002 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI1209 19:32:19.340229  1002 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI1209 19:32:19.340247  1002 net.cpp:150] Setting up L2_b9_relu\nI1209 19:32:19.340262  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.340271  1002 net.cpp:165] Memory required for data: 1465857500\nI1209 19:32:19.340281  1002 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1209 19:32:19.340307  1002 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1209 19:32:19.340322  1002 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI1209 19:32:19.340337  1002 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1209 19:32:19.340358  1002 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1209 19:32:19.340438  1002 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1209 19:32:19.340464  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.340479  1002 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1209 19:32:19.340489  1002 net.cpp:165] Memory required for data: 1474049500\nI1209 19:32:19.340502  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI1209 19:32:19.340524  1002 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI1209 19:32:19.340538  1002 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1209 19:32:19.340556  1002 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI1209 19:32:19.341136  1002 net.cpp:150] Setting up L3_b1_cbr1_conv\nI1209 19:32:19.341156  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.341166  1002 net.cpp:165] Memory required for data: 1475073500\nI1209 19:32:19.341186  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI1209 19:32:19.341208  1002 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI1209 19:32:19.341222  1002 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI1209 19:32:19.341238  1002 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI1209 19:32:19.341576  1002 net.cpp:150] Setting up L3_b1_cbr1_bn\nI1209 19:32:19.341595  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.341605  1002 net.cpp:165] Memory required for data: 1476097500\nI1209 19:32:19.341629  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1209 19:32:19.341651  1002 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI1209 19:32:19.341665  1002 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI1209 19:32:19.341686  1002 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.341783  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1209 19:32:19.342007  1002 net.cpp:150] Setting up L3_b1_cbr1_scale\nI1209 19:32:19.342026  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.342046  1002 net.cpp:165] Memory required for data: 1477121500\nI1209 19:32:19.342066  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_dropout\nI1209 19:32:19.342083  1002 net.cpp:100] Creating Layer L3_b1_cbr1_dropout\nI1209 19:32:19.342097  1002 net.cpp:434] L3_b1_cbr1_dropout <- L3_b1_cbr1_bn_top\nI1209 19:32:19.342116  1002 net.cpp:395] L3_b1_cbr1_dropout -> L3_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.342173  1002 net.cpp:150] Setting up L3_b1_cbr1_dropout\nI1209 19:32:19.342191  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.342201  1002 net.cpp:165] Memory required for data: 1478145500\nI1209 19:32:19.342214  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI1209 19:32:19.342234  1002 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI1209 19:32:19.342247  1002 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI1209 19:32:19.342262  1002 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI1209 19:32:19.342280  1002 net.cpp:150] Setting up L3_b1_cbr1_relu\nI1209 19:32:19.342294  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.342305  1002 net.cpp:165] Memory required for data: 1479169500\nI1209 19:32:19.342315  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI1209 19:32:19.342337  1002 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI1209 19:32:19.342350  1002 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI1209 19:32:19.342375  1002 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI1209 19:32:19.343107  1002 net.cpp:150] Setting up L3_b1_cbr2_conv\nI1209 19:32:19.343128  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.343138  1002 net.cpp:165] Memory required for data: 1480193500\nI1209 19:32:19.343158  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI1209 19:32:19.343176  1002 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI1209 19:32:19.343189  1002 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI1209 19:32:19.343210  1002 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI1209 19:32:19.343554  1002 net.cpp:150] Setting up L3_b1_cbr2_bn\nI1209 19:32:19.343577  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.343588  1002 net.cpp:165] Memory required for data: 1481217500\nI1209 19:32:19.343612  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1209 19:32:19.343631  1002 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI1209 19:32:19.343643  1002 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI1209 19:32:19.343659  1002 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.343760  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1209 19:32:19.343984  1002 net.cpp:150] Setting up L3_b1_cbr2_scale\nI1209 19:32:19.344003  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.344013  1002 net.cpp:165] Memory required for data: 1482241500\nI1209 19:32:19.344033  1002 layer_factory.hpp:77] Creating layer L3_b1_cbr2_dropout\nI1209 19:32:19.344058  1002 net.cpp:100] Creating Layer L3_b1_cbr2_dropout\nI1209 19:32:19.344070  1002 net.cpp:434] L3_b1_cbr2_dropout <- L3_b1_cbr2_bn_top\nI1209 19:32:19.344084  1002 net.cpp:395] L3_b1_cbr2_dropout -> L3_b1_cbr2_bn_top (in-place)\nI1209 19:32:19.344149  1002 net.cpp:150] Setting up L3_b1_cbr2_dropout\nI1209 19:32:19.344167  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.344177  1002 net.cpp:165] Memory required for data: 1483265500\nI1209 19:32:19.344190  1002 layer_factory.hpp:77] Creating layer L3_b1_pool\nI1209 19:32:19.344207  1002 net.cpp:100] Creating Layer L3_b1_pool\nI1209 19:32:19.344218  1002 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1209 19:32:19.344234  1002 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI1209 19:32:19.344292  1002 net.cpp:150] Setting up L3_b1_pool\nI1209 19:32:19.344310  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.344319  1002 net.cpp:165] Memory required for data: 1484289500\nI1209 19:32:19.344333  1002 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1209 19:32:19.344355  1002 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1209 19:32:19.344368  1002 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI1209 19:32:19.344391  1002 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI1209 19:32:19.344408  1002 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1209 19:32:19.344465  1002 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1209 19:32:19.344483  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.344493  1002 net.cpp:165] Memory required for data: 1485313500\nI1209 19:32:19.344504  1002 layer_factory.hpp:77] Creating layer L3_b1_relu\nI1209 19:32:19.344524  1002 net.cpp:100] Creating Layer L3_b1_relu\nI1209 19:32:19.344537  1002 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI1209 19:32:19.344552  1002 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI1209 19:32:19.344570  1002 net.cpp:150] Setting up L3_b1_relu\nI1209 19:32:19.344585  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.344595  1002 net.cpp:165] Memory required for data: 1486337500\nI1209 19:32:19.344606  1002 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI1209 19:32:19.344624  1002 net.cpp:100] Creating Layer L3_b1_zeros\nI1209 19:32:19.344638  1002 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI1209 19:32:19.346294  1002 net.cpp:150] Setting up L3_b1_zeros\nI1209 19:32:19.346323  1002 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1209 19:32:19.346334  1002 net.cpp:165] Memory required for data: 1487361500\nI1209 19:32:19.346345  1002 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI1209 19:32:19.346362  1002 net.cpp:100] Creating Layer L3_b1_concat0\nI1209 19:32:19.346375  1002 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI1209 19:32:19.346388  1002 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI1209 19:32:19.346410  1002 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI1209 19:32:19.346473  1002 net.cpp:150] Setting up L3_b1_concat0\nI1209 19:32:19.346494  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.346504  1002 net.cpp:165] Memory required for data: 1489409500\nI1209 19:32:19.346516  1002 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI1209 19:32:19.346537  1002 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI1209 19:32:19.346549  1002 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI1209 19:32:19.346565  1002 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI1209 19:32:19.346585  1002 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI1209 19:32:19.346676  1002 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI1209 19:32:19.346694  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.346709  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.346719  1002 net.cpp:165] Memory required for data: 1493505500\nI1209 19:32:19.346730  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI1209 19:32:19.346752  1002 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI1209 19:32:19.346765  1002 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI1209 19:32:19.346787  1002 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI1209 19:32:19.347890  1002 net.cpp:150] Setting up L3_b2_cbr1_conv\nI1209 19:32:19.347911  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.347921  1002 net.cpp:165] Memory required for data: 1495553500\nI1209 19:32:19.347942  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI1209 19:32:19.347967  1002 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI1209 19:32:19.347981  1002 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI1209 19:32:19.348003  1002 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI1209 19:32:19.348335  1002 net.cpp:150] Setting up L3_b2_cbr1_bn\nI1209 19:32:19.348359  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.348371  1002 net.cpp:165] Memory required for data: 1497601500\nI1209 19:32:19.348394  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1209 19:32:19.348413  1002 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI1209 19:32:19.348426  1002 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI1209 19:32:19.348453  1002 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.348557  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1209 19:32:19.348775  1002 net.cpp:150] Setting up L3_b2_cbr1_scale\nI1209 19:32:19.348794  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.348804  1002 net.cpp:165] Memory required for data: 1499649500\nI1209 19:32:19.348825  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_dropout\nI1209 19:32:19.348848  1002 net.cpp:100] Creating Layer L3_b2_cbr1_dropout\nI1209 19:32:19.348862  1002 net.cpp:434] L3_b2_cbr1_dropout <- L3_b2_cbr1_bn_top\nI1209 19:32:19.348877  1002 net.cpp:395] L3_b2_cbr1_dropout -> L3_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.348940  1002 net.cpp:150] Setting up L3_b2_cbr1_dropout\nI1209 19:32:19.348964  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.348975  1002 net.cpp:165] Memory required for data: 1501697500\nI1209 19:32:19.348989  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI1209 19:32:19.349004  1002 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI1209 19:32:19.349016  1002 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI1209 19:32:19.349030  1002 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI1209 19:32:19.349050  1002 net.cpp:150] Setting up L3_b2_cbr1_relu\nI1209 19:32:19.349063  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.349072  1002 net.cpp:165] Memory required for data: 1503745500\nI1209 19:32:19.349086  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI1209 19:32:19.349107  1002 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI1209 19:32:19.349120  1002 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI1209 19:32:19.349144  1002 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI1209 19:32:19.350260  1002 net.cpp:150] Setting up L3_b2_cbr2_conv\nI1209 19:32:19.350281  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.350291  1002 net.cpp:165] Memory required for data: 1505793500\nI1209 19:32:19.350307  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI1209 19:32:19.350332  1002 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI1209 19:32:19.350345  1002 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI1209 19:32:19.350363  1002 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI1209 19:32:19.350699  1002 net.cpp:150] Setting up L3_b2_cbr2_bn\nI1209 19:32:19.350718  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.350728  1002 net.cpp:165] Memory required for data: 1507841500\nI1209 19:32:19.350749  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1209 19:32:19.350766  1002 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI1209 19:32:19.350778  1002 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI1209 19:32:19.350793  1002 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.350901  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1209 19:32:19.351119  1002 net.cpp:150] Setting up L3_b2_cbr2_scale\nI1209 19:32:19.351138  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.351148  1002 net.cpp:165] Memory required for data: 1509889500\nI1209 19:32:19.351168  1002 layer_factory.hpp:77] Creating layer L3_b2_cbr2_dropout\nI1209 19:32:19.351186  1002 net.cpp:100] Creating Layer L3_b2_cbr2_dropout\nI1209 19:32:19.351199  1002 net.cpp:434] L3_b2_cbr2_dropout <- L3_b2_cbr2_bn_top\nI1209 19:32:19.351214  1002 net.cpp:395] L3_b2_cbr2_dropout -> L3_b2_cbr2_bn_top (in-place)\nI1209 19:32:19.351279  1002 net.cpp:150] Setting up L3_b2_cbr2_dropout\nI1209 19:32:19.351296  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.351305  1002 net.cpp:165] Memory required for data: 1511937500\nI1209 19:32:19.351318  1002 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1209 19:32:19.351341  1002 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1209 19:32:19.351354  1002 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI1209 19:32:19.351367  1002 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI1209 19:32:19.351383  1002 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1209 19:32:19.351459  1002 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1209 19:32:19.351477  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.351488  1002 net.cpp:165] Memory required for data: 1513985500\nI1209 19:32:19.351497  1002 layer_factory.hpp:77] Creating layer L3_b2_relu\nI1209 19:32:19.351514  1002 net.cpp:100] Creating Layer L3_b2_relu\nI1209 19:32:19.351526  1002 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI1209 19:32:19.351541  1002 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI1209 19:32:19.351559  1002 net.cpp:150] Setting up L3_b2_relu\nI1209 19:32:19.351574  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.351583  1002 net.cpp:165] Memory required for data: 1516033500\nI1209 19:32:19.351598  1002 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1209 19:32:19.351611  1002 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1209 19:32:19.351624  1002 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI1209 19:32:19.351644  1002 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1209 19:32:19.351665  1002 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1209 19:32:19.351747  1002 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1209 19:32:19.351768  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.351783  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.351792  1002 net.cpp:165] Memory required for data: 1520129500\nI1209 19:32:19.351805  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI1209 19:32:19.351832  1002 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI1209 19:32:19.351850  1002 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1209 19:32:19.351869  1002 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI1209 19:32:19.353003  1002 net.cpp:150] Setting up L3_b3_cbr1_conv\nI1209 19:32:19.353024  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.353034  1002 net.cpp:165] Memory required for data: 1522177500\nI1209 19:32:19.353054  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI1209 19:32:19.353080  1002 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI1209 19:32:19.353093  1002 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI1209 19:32:19.353109  1002 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI1209 19:32:19.353443  1002 net.cpp:150] Setting up L3_b3_cbr1_bn\nI1209 19:32:19.353462  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.353472  1002 net.cpp:165] Memory required for data: 1524225500\nI1209 19:32:19.353494  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1209 19:32:19.353518  1002 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI1209 19:32:19.353530  1002 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI1209 19:32:19.353545  1002 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.353652  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1209 19:32:19.353868  1002 net.cpp:150] Setting up L3_b3_cbr1_scale\nI1209 19:32:19.353888  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.353898  1002 net.cpp:165] Memory required for data: 1526273500\nI1209 19:32:19.353917  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_dropout\nI1209 19:32:19.353977  1002 net.cpp:100] Creating Layer L3_b3_cbr1_dropout\nI1209 19:32:19.353994  1002 net.cpp:434] L3_b3_cbr1_dropout <- L3_b3_cbr1_bn_top\nI1209 19:32:19.354010  1002 net.cpp:395] L3_b3_cbr1_dropout -> L3_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.354068  1002 net.cpp:150] Setting up L3_b3_cbr1_dropout\nI1209 19:32:19.354087  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.354097  1002 net.cpp:165] Memory required for data: 1528321500\nI1209 19:32:19.354110  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI1209 19:32:19.354127  1002 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI1209 19:32:19.354140  1002 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI1209 19:32:19.354166  1002 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI1209 19:32:19.354187  1002 net.cpp:150] Setting up L3_b3_cbr1_relu\nI1209 19:32:19.354202  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.354212  1002 net.cpp:165] Memory required for data: 1530369500\nI1209 19:32:19.354221  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI1209 19:32:19.354249  1002 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI1209 19:32:19.354264  1002 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI1209 19:32:19.354281  1002 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI1209 19:32:19.355393  1002 net.cpp:150] Setting up L3_b3_cbr2_conv\nI1209 19:32:19.355413  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.355423  1002 net.cpp:165] Memory required for data: 1532417500\nI1209 19:32:19.355442  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI1209 19:32:19.355466  1002 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI1209 19:32:19.355479  1002 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI1209 19:32:19.355501  1002 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI1209 19:32:19.355835  1002 net.cpp:150] Setting up L3_b3_cbr2_bn\nI1209 19:32:19.355855  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.355865  1002 net.cpp:165] Memory required for data: 1534465500\nI1209 19:32:19.355888  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1209 19:32:19.355906  1002 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI1209 19:32:19.355917  1002 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI1209 19:32:19.355937  1002 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.356045  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1209 19:32:19.356264  1002 net.cpp:150] Setting up L3_b3_cbr2_scale\nI1209 19:32:19.356284  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.356293  1002 net.cpp:165] Memory required for data: 1536513500\nI1209 19:32:19.356312  1002 layer_factory.hpp:77] Creating layer L3_b3_cbr2_dropout\nI1209 19:32:19.356329  1002 net.cpp:100] Creating Layer L3_b3_cbr2_dropout\nI1209 19:32:19.356341  1002 net.cpp:434] L3_b3_cbr2_dropout <- L3_b3_cbr2_bn_top\nI1209 19:32:19.356361  1002 net.cpp:395] L3_b3_cbr2_dropout -> L3_b3_cbr2_bn_top (in-place)\nI1209 19:32:19.356418  1002 net.cpp:150] Setting up L3_b3_cbr2_dropout\nI1209 19:32:19.356442  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.356452  1002 net.cpp:165] Memory required for data: 1538561500\nI1209 19:32:19.356462  1002 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1209 19:32:19.356479  1002 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1209 19:32:19.356492  1002 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI1209 19:32:19.356504  1002 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1209 19:32:19.356525  1002 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1209 19:32:19.356583  1002 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1209 19:32:19.356602  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.356611  1002 net.cpp:165] Memory required for data: 1540609500\nI1209 19:32:19.356622  1002 layer_factory.hpp:77] Creating layer L3_b3_relu\nI1209 19:32:19.356640  1002 net.cpp:100] Creating Layer L3_b3_relu\nI1209 19:32:19.356652  1002 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI1209 19:32:19.356667  1002 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI1209 19:32:19.356688  1002 net.cpp:150] Setting up L3_b3_relu\nI1209 19:32:19.356701  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.356710  1002 net.cpp:165] Memory required for data: 1542657500\nI1209 19:32:19.356722  1002 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1209 19:32:19.356745  1002 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1209 19:32:19.356757  1002 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI1209 19:32:19.356773  1002 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1209 19:32:19.356803  1002 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1209 19:32:19.356889  1002 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1209 19:32:19.356919  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.356933  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.356945  1002 net.cpp:165] Memory required for data: 1546753500\nI1209 19:32:19.356963  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI1209 19:32:19.356993  1002 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI1209 19:32:19.357008  1002 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1209 19:32:19.357028  1002 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI1209 19:32:19.358144  1002 net.cpp:150] Setting up L3_b4_cbr1_conv\nI1209 19:32:19.358170  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.358180  1002 net.cpp:165] Memory required for data: 1548801500\nI1209 19:32:19.358199  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI1209 19:32:19.358217  1002 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI1209 19:32:19.358229  1002 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI1209 19:32:19.358250  1002 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI1209 19:32:19.358587  1002 net.cpp:150] Setting up L3_b4_cbr1_bn\nI1209 19:32:19.358605  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.358615  1002 net.cpp:165] Memory required for data: 1550849500\nI1209 19:32:19.358639  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1209 19:32:19.358661  1002 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI1209 19:32:19.358675  1002 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI1209 19:32:19.358690  1002 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.358791  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1209 19:32:19.359015  1002 net.cpp:150] Setting up L3_b4_cbr1_scale\nI1209 19:32:19.359035  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.359045  1002 net.cpp:165] Memory required for data: 1552897500\nI1209 19:32:19.359066  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_dropout\nI1209 19:32:19.359084  1002 net.cpp:100] Creating Layer L3_b4_cbr1_dropout\nI1209 19:32:19.359097  1002 net.cpp:434] L3_b4_cbr1_dropout <- L3_b4_cbr1_bn_top\nI1209 19:32:19.359118  1002 net.cpp:395] L3_b4_cbr1_dropout -> L3_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.359174  1002 net.cpp:150] Setting up L3_b4_cbr1_dropout\nI1209 19:32:19.359200  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.359210  1002 net.cpp:165] Memory required for data: 1554945500\nI1209 19:32:19.359220  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI1209 19:32:19.359236  1002 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI1209 19:32:19.359249  1002 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI1209 19:32:19.359262  1002 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI1209 19:32:19.359282  1002 net.cpp:150] Setting up L3_b4_cbr1_relu\nI1209 19:32:19.359295  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.359304  1002 net.cpp:165] Memory required for data: 1556993500\nI1209 19:32:19.359318  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI1209 19:32:19.359339  1002 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI1209 19:32:19.359351  1002 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI1209 19:32:19.359375  1002 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI1209 19:32:19.362066  1002 net.cpp:150] Setting up L3_b4_cbr2_conv\nI1209 19:32:19.362087  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.362098  1002 net.cpp:165] Memory required for data: 1559041500\nI1209 19:32:19.362115  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI1209 19:32:19.362139  1002 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI1209 19:32:19.362152  1002 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI1209 19:32:19.362169  1002 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI1209 19:32:19.362522  1002 net.cpp:150] Setting up L3_b4_cbr2_bn\nI1209 19:32:19.362543  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.362553  1002 net.cpp:165] Memory required for data: 1561089500\nI1209 19:32:19.362579  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1209 19:32:19.362602  1002 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI1209 19:32:19.362615  1002 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI1209 19:32:19.362632  1002 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.362738  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1209 19:32:19.362954  1002 net.cpp:150] Setting up L3_b4_cbr2_scale\nI1209 19:32:19.362979  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.362989  1002 net.cpp:165] Memory required for data: 1563137500\nI1209 19:32:19.363008  1002 layer_factory.hpp:77] Creating layer L3_b4_cbr2_dropout\nI1209 19:32:19.363030  1002 net.cpp:100] Creating Layer L3_b4_cbr2_dropout\nI1209 19:32:19.363044  1002 net.cpp:434] L3_b4_cbr2_dropout <- L3_b4_cbr2_bn_top\nI1209 19:32:19.363059  1002 net.cpp:395] L3_b4_cbr2_dropout -> L3_b4_cbr2_bn_top (in-place)\nI1209 19:32:19.363123  1002 net.cpp:150] Setting up L3_b4_cbr2_dropout\nI1209 19:32:19.363140  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.363149  1002 net.cpp:165] Memory required for data: 1565185500\nI1209 19:32:19.363160  1002 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1209 19:32:19.363178  1002 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1209 19:32:19.363189  1002 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI1209 19:32:19.363203  1002 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1209 19:32:19.363224  1002 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1209 19:32:19.363282  1002 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1209 19:32:19.363301  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.363312  1002 net.cpp:165] Memory required for data: 1567233500\nI1209 19:32:19.363322  1002 layer_factory.hpp:77] Creating layer L3_b4_relu\nI1209 19:32:19.363337  1002 net.cpp:100] Creating Layer L3_b4_relu\nI1209 19:32:19.363348  1002 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI1209 19:32:19.363369  1002 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI1209 19:32:19.363390  1002 net.cpp:150] Setting up L3_b4_relu\nI1209 19:32:19.363404  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.363414  1002 net.cpp:165] Memory required for data: 1569281500\nI1209 19:32:19.363425  1002 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1209 19:32:19.363440  1002 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1209 19:32:19.363451  1002 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI1209 19:32:19.363466  1002 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1209 19:32:19.363487  1002 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1209 19:32:19.363572  1002 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1209 19:32:19.363591  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.363605  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.363612  1002 net.cpp:165] Memory required for data: 1573377500\nI1209 19:32:19.363625  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI1209 19:32:19.363648  1002 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI1209 19:32:19.363660  1002 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1209 19:32:19.363685  1002 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI1209 19:32:19.364769  1002 net.cpp:150] Setting up L3_b5_cbr1_conv\nI1209 19:32:19.364790  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.364802  1002 net.cpp:165] Memory required for data: 1575425500\nI1209 19:32:19.364820  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI1209 19:32:19.364847  1002 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI1209 19:32:19.364861  1002 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI1209 19:32:19.364882  1002 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI1209 19:32:19.365247  1002 net.cpp:150] Setting up L3_b5_cbr1_bn\nI1209 19:32:19.365267  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.365276  1002 net.cpp:165] Memory required for data: 1577473500\nI1209 19:32:19.365299  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1209 19:32:19.365319  1002 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI1209 19:32:19.365330  1002 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI1209 19:32:19.365345  1002 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.365454  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1209 19:32:19.365666  1002 net.cpp:150] Setting up L3_b5_cbr1_scale\nI1209 19:32:19.365692  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.365702  1002 net.cpp:165] Memory required for data: 1579521500\nI1209 19:32:19.365721  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_dropout\nI1209 19:32:19.365739  1002 net.cpp:100] Creating Layer L3_b5_cbr1_dropout\nI1209 19:32:19.365751  1002 net.cpp:434] L3_b5_cbr1_dropout <- L3_b5_cbr1_bn_top\nI1209 19:32:19.365766  1002 net.cpp:395] L3_b5_cbr1_dropout -> L3_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.365828  1002 net.cpp:150] Setting up L3_b5_cbr1_dropout\nI1209 19:32:19.365846  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.365857  1002 net.cpp:165] Memory required for data: 1581569500\nI1209 19:32:19.365869  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI1209 19:32:19.365885  1002 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI1209 19:32:19.365897  1002 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI1209 19:32:19.365911  1002 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI1209 19:32:19.365931  1002 net.cpp:150] Setting up L3_b5_cbr1_relu\nI1209 19:32:19.365945  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.365955  1002 net.cpp:165] Memory required for data: 1583617500\nI1209 19:32:19.365977  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI1209 19:32:19.366003  1002 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI1209 19:32:19.366017  1002 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI1209 19:32:19.366035  1002 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI1209 19:32:19.367146  1002 net.cpp:150] Setting up L3_b5_cbr2_conv\nI1209 19:32:19.367166  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.367177  1002 net.cpp:165] Memory required for data: 1585665500\nI1209 19:32:19.367194  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI1209 19:32:19.367218  1002 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI1209 19:32:19.367231  1002 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI1209 19:32:19.367249  1002 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI1209 19:32:19.367586  1002 net.cpp:150] Setting up L3_b5_cbr2_bn\nI1209 19:32:19.367605  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.367615  1002 net.cpp:165] Memory required for data: 1587713500\nI1209 19:32:19.367637  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1209 19:32:19.367655  1002 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI1209 19:32:19.367666  1002 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI1209 19:32:19.367682  1002 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.367795  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1209 19:32:19.368018  1002 net.cpp:150] Setting up L3_b5_cbr2_scale\nI1209 19:32:19.368037  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.368048  1002 net.cpp:165] Memory required for data: 1589761500\nI1209 19:32:19.368068  1002 layer_factory.hpp:77] Creating layer L3_b5_cbr2_dropout\nI1209 19:32:19.368088  1002 net.cpp:100] Creating Layer L3_b5_cbr2_dropout\nI1209 19:32:19.368100  1002 net.cpp:434] L3_b5_cbr2_dropout <- L3_b5_cbr2_bn_top\nI1209 19:32:19.368129  1002 net.cpp:395] L3_b5_cbr2_dropout -> L3_b5_cbr2_bn_top (in-place)\nI1209 19:32:19.368188  1002 net.cpp:150] Setting up L3_b5_cbr2_dropout\nI1209 19:32:19.368214  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.368224  1002 net.cpp:165] Memory required for data: 1591809500\nI1209 19:32:19.368235  1002 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1209 19:32:19.368252  1002 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1209 19:32:19.368264  1002 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI1209 19:32:19.368278  1002 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1209 19:32:19.368293  1002 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1209 19:32:19.368360  1002 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1209 19:32:19.368378  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.368387  1002 net.cpp:165] Memory required for data: 1593857500\nI1209 19:32:19.368399  1002 layer_factory.hpp:77] Creating layer L3_b5_relu\nI1209 19:32:19.368417  1002 net.cpp:100] Creating Layer L3_b5_relu\nI1209 19:32:19.368429  1002 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI1209 19:32:19.368443  1002 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI1209 19:32:19.368463  1002 net.cpp:150] Setting up L3_b5_relu\nI1209 19:32:19.368479  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.368489  1002 net.cpp:165] Memory required for data: 1595905500\nI1209 19:32:19.368500  1002 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1209 19:32:19.368515  1002 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1209 19:32:19.368525  1002 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI1209 19:32:19.368546  1002 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1209 19:32:19.368566  1002 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1209 19:32:19.368649  1002 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1209 19:32:19.368676  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.368691  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.368701  1002 net.cpp:165] Memory required for data: 1600001500\nI1209 19:32:19.368715  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI1209 19:32:19.368741  1002 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI1209 19:32:19.368755  1002 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1209 19:32:19.368773  1002 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI1209 19:32:19.369880  1002 net.cpp:150] Setting up L3_b6_cbr1_conv\nI1209 19:32:19.369901  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.369911  1002 net.cpp:165] Memory required for data: 1602049500\nI1209 19:32:19.369931  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI1209 19:32:19.369956  1002 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI1209 19:32:19.369976  1002 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI1209 19:32:19.369993  1002 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI1209 19:32:19.370334  1002 net.cpp:150] Setting up L3_b6_cbr1_bn\nI1209 19:32:19.370354  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.370363  1002 net.cpp:165] Memory required for data: 1604097500\nI1209 19:32:19.370388  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1209 19:32:19.370411  1002 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI1209 19:32:19.370425  1002 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI1209 19:32:19.370440  1002 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.370545  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1209 19:32:19.370762  1002 net.cpp:150] Setting up L3_b6_cbr1_scale\nI1209 19:32:19.370781  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.370791  1002 net.cpp:165] Memory required for data: 1606145500\nI1209 19:32:19.370810  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_dropout\nI1209 19:32:19.370841  1002 net.cpp:100] Creating Layer L3_b6_cbr1_dropout\nI1209 19:32:19.370856  1002 net.cpp:434] L3_b6_cbr1_dropout <- L3_b6_cbr1_bn_top\nI1209 19:32:19.370877  1002 net.cpp:395] L3_b6_cbr1_dropout -> L3_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.370935  1002 net.cpp:150] Setting up L3_b6_cbr1_dropout\nI1209 19:32:19.370954  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.370971  1002 net.cpp:165] Memory required for data: 1608193500\nI1209 19:32:19.370985  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI1209 19:32:19.371006  1002 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI1209 19:32:19.371018  1002 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI1209 19:32:19.371033  1002 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI1209 19:32:19.371053  1002 net.cpp:150] Setting up L3_b6_cbr1_relu\nI1209 19:32:19.371068  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.371078  1002 net.cpp:165] Memory required for data: 1610241500\nI1209 19:32:19.371089  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI1209 19:32:19.371112  1002 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI1209 19:32:19.371125  1002 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI1209 19:32:19.371147  1002 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI1209 19:32:19.372232  1002 net.cpp:150] Setting up L3_b6_cbr2_conv\nI1209 19:32:19.372252  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.372262  1002 net.cpp:165] Memory required for data: 1612289500\nI1209 19:32:19.372284  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI1209 19:32:19.372301  1002 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI1209 19:32:19.372314  1002 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI1209 19:32:19.372337  1002 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI1209 19:32:19.372673  1002 net.cpp:150] Setting up L3_b6_cbr2_bn\nI1209 19:32:19.372695  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.372706  1002 net.cpp:165] Memory required for data: 1614337500\nI1209 19:32:19.372728  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1209 19:32:19.372746  1002 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI1209 19:32:19.372759  1002 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI1209 19:32:19.372774  1002 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.372877  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1209 19:32:19.373100  1002 net.cpp:150] Setting up L3_b6_cbr2_scale\nI1209 19:32:19.373121  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.373131  1002 net.cpp:165] Memory required for data: 1616385500\nI1209 19:32:19.373150  1002 layer_factory.hpp:77] Creating layer L3_b6_cbr2_dropout\nI1209 19:32:19.373175  1002 net.cpp:100] Creating Layer L3_b6_cbr2_dropout\nI1209 19:32:19.373188  1002 net.cpp:434] L3_b6_cbr2_dropout <- L3_b6_cbr2_bn_top\nI1209 19:32:19.373204  1002 net.cpp:395] L3_b6_cbr2_dropout -> L3_b6_cbr2_bn_top (in-place)\nI1209 19:32:19.373261  1002 net.cpp:150] Setting up L3_b6_cbr2_dropout\nI1209 19:32:19.373289  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.373301  1002 net.cpp:165] Memory required for data: 1618433500\nI1209 19:32:19.373311  1002 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1209 19:32:19.373327  1002 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1209 19:32:19.373338  1002 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI1209 19:32:19.373350  1002 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1209 19:32:19.373366  1002 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1209 19:32:19.373425  1002 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1209 19:32:19.373445  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.373456  1002 net.cpp:165] Memory required for data: 1620481500\nI1209 19:32:19.373468  1002 layer_factory.hpp:77] Creating layer L3_b6_relu\nI1209 19:32:19.373489  1002 net.cpp:100] Creating Layer L3_b6_relu\nI1209 19:32:19.373503  1002 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI1209 19:32:19.373527  1002 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI1209 19:32:19.373548  1002 net.cpp:150] Setting up L3_b6_relu\nI1209 19:32:19.373564  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.373572  1002 net.cpp:165] Memory required for data: 1622529500\nI1209 19:32:19.373584  1002 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1209 19:32:19.373600  1002 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1209 19:32:19.373611  1002 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI1209 19:32:19.373626  1002 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1209 19:32:19.373646  1002 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1209 19:32:19.373736  1002 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1209 19:32:19.373759  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.373772  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.373781  1002 net.cpp:165] Memory required for data: 1626625500\nI1209 19:32:19.373795  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI1209 19:32:19.373821  1002 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI1209 19:32:19.373836  1002 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1209 19:32:19.373859  1002 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI1209 19:32:19.374961  1002 net.cpp:150] Setting up L3_b7_cbr1_conv\nI1209 19:32:19.374981  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.374991  1002 net.cpp:165] Memory required for data: 1628673500\nI1209 19:32:19.375011  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI1209 19:32:19.375036  1002 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI1209 19:32:19.375048  1002 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI1209 19:32:19.375072  1002 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI1209 19:32:19.375403  1002 net.cpp:150] Setting up L3_b7_cbr1_bn\nI1209 19:32:19.375422  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.375432  1002 net.cpp:165] Memory required for data: 1630721500\nI1209 19:32:19.375454  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1209 19:32:19.375473  1002 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI1209 19:32:19.375484  1002 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI1209 19:32:19.375505  1002 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.375608  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1209 19:32:19.375823  1002 net.cpp:150] Setting up L3_b7_cbr1_scale\nI1209 19:32:19.375841  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.375851  1002 net.cpp:165] Memory required for data: 1632769500\nI1209 19:32:19.375871  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_dropout\nI1209 19:32:19.375890  1002 net.cpp:100] Creating Layer L3_b7_cbr1_dropout\nI1209 19:32:19.375902  1002 net.cpp:434] L3_b7_cbr1_dropout <- L3_b7_cbr1_bn_top\nI1209 19:32:19.375921  1002 net.cpp:395] L3_b7_cbr1_dropout -> L3_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.375985  1002 net.cpp:150] Setting up L3_b7_cbr1_dropout\nI1209 19:32:19.376011  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.376022  1002 net.cpp:165] Memory required for data: 1634817500\nI1209 19:32:19.376032  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI1209 19:32:19.376049  1002 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI1209 19:32:19.376060  1002 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI1209 19:32:19.376075  1002 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI1209 19:32:19.376094  1002 net.cpp:150] Setting up L3_b7_cbr1_relu\nI1209 19:32:19.376108  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.376117  1002 net.cpp:165] Memory required for data: 1636865500\nI1209 19:32:19.376128  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI1209 19:32:19.376171  1002 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI1209 19:32:19.376185  1002 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI1209 19:32:19.376204  1002 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI1209 19:32:19.377308  1002 net.cpp:150] Setting up L3_b7_cbr2_conv\nI1209 19:32:19.377331  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.377341  1002 net.cpp:165] Memory required for data: 1638913500\nI1209 19:32:19.377360  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI1209 19:32:19.377385  1002 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI1209 19:32:19.377399  1002 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI1209 19:32:19.377415  1002 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI1209 19:32:19.377750  1002 net.cpp:150] Setting up L3_b7_cbr2_bn\nI1209 19:32:19.377768  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.377779  1002 net.cpp:165] Memory required for data: 1640961500\nI1209 19:32:19.377800  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1209 19:32:19.377822  1002 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI1209 19:32:19.377836  1002 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI1209 19:32:19.377851  1002 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.377966  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1209 19:32:19.378186  1002 net.cpp:150] Setting up L3_b7_cbr2_scale\nI1209 19:32:19.378206  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.378216  1002 net.cpp:165] Memory required for data: 1643009500\nI1209 19:32:19.378234  1002 layer_factory.hpp:77] Creating layer L3_b7_cbr2_dropout\nI1209 19:32:19.378258  1002 net.cpp:100] Creating Layer L3_b7_cbr2_dropout\nI1209 19:32:19.378273  1002 net.cpp:434] L3_b7_cbr2_dropout <- L3_b7_cbr2_bn_top\nI1209 19:32:19.378288  1002 net.cpp:395] L3_b7_cbr2_dropout -> L3_b7_cbr2_bn_top (in-place)\nI1209 19:32:19.378350  1002 net.cpp:150] Setting up L3_b7_cbr2_dropout\nI1209 19:32:19.378368  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.378378  1002 net.cpp:165] Memory required for data: 1645057500\nI1209 19:32:19.378391  1002 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI1209 19:32:19.378408  1002 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI1209 19:32:19.378420  1002 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI1209 19:32:19.378439  1002 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1209 19:32:19.378458  1002 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI1209 19:32:19.378515  1002 net.cpp:150] Setting up L3_b7_sum_eltwise\nI1209 19:32:19.378532  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.378542  1002 net.cpp:165] Memory required for data: 1647105500\nI1209 19:32:19.378556  1002 layer_factory.hpp:77] Creating layer L3_b7_relu\nI1209 19:32:19.378571  1002 net.cpp:100] Creating Layer L3_b7_relu\nI1209 19:32:19.378582  1002 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI1209 19:32:19.378602  1002 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI1209 19:32:19.378623  1002 net.cpp:150] Setting up L3_b7_relu\nI1209 19:32:19.378638  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.378648  1002 net.cpp:165] Memory required for data: 1649153500\nI1209 19:32:19.378659  1002 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1209 19:32:19.378672  1002 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1209 19:32:19.378684  1002 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI1209 19:32:19.378698  1002 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1209 19:32:19.378718  1002 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1209 19:32:19.378808  1002 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1209 19:32:19.378828  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.378841  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.378861  1002 net.cpp:165] Memory required for data: 1653249500\nI1209 19:32:19.378873  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI1209 19:32:19.378895  1002 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI1209 19:32:19.378906  1002 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1209 19:32:19.378931  1002 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI1209 19:32:19.381382  1002 net.cpp:150] Setting up L3_b8_cbr1_conv\nI1209 19:32:19.381405  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.381415  1002 net.cpp:165] Memory required for data: 1655297500\nI1209 19:32:19.381436  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI1209 19:32:19.381461  1002 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI1209 19:32:19.381474  1002 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI1209 19:32:19.381490  1002 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI1209 19:32:19.381830  1002 net.cpp:150] Setting up L3_b8_cbr1_bn\nI1209 19:32:19.381850  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.381860  1002 net.cpp:165] Memory required for data: 1657345500\nI1209 19:32:19.381882  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1209 19:32:19.381908  1002 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI1209 19:32:19.381922  1002 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI1209 19:32:19.381938  1002 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.382053  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1209 19:32:19.382272  1002 net.cpp:150] Setting up L3_b8_cbr1_scale\nI1209 19:32:19.382292  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.382302  1002 net.cpp:165] Memory required for data: 1659393500\nI1209 19:32:19.382323  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_dropout\nI1209 19:32:19.382345  1002 net.cpp:100] Creating Layer L3_b8_cbr1_dropout\nI1209 19:32:19.382359  1002 net.cpp:434] L3_b8_cbr1_dropout <- L3_b8_cbr1_bn_top\nI1209 19:32:19.382375  1002 net.cpp:395] L3_b8_cbr1_dropout -> L3_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.382438  1002 net.cpp:150] Setting up L3_b8_cbr1_dropout\nI1209 19:32:19.382457  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.382467  1002 net.cpp:165] Memory required for data: 1661441500\nI1209 19:32:19.382477  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI1209 19:32:19.382494  1002 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI1209 19:32:19.382508  1002 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI1209 19:32:19.382527  1002 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI1209 19:32:19.382549  1002 net.cpp:150] Setting up L3_b8_cbr1_relu\nI1209 19:32:19.382562  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.382572  1002 net.cpp:165] Memory required for data: 1663489500\nI1209 19:32:19.382585  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI1209 19:32:19.382607  1002 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI1209 19:32:19.382619  1002 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI1209 19:32:19.382637  1002 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI1209 19:32:19.383728  1002 net.cpp:150] Setting up L3_b8_cbr2_conv\nI1209 19:32:19.383756  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.383767  1002 net.cpp:165] Memory required for data: 1665537500\nI1209 19:32:19.383786  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI1209 19:32:19.383805  1002 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI1209 19:32:19.383817  1002 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI1209 19:32:19.383839  1002 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI1209 19:32:19.384176  1002 net.cpp:150] Setting up L3_b8_cbr2_bn\nI1209 19:32:19.384196  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.384204  1002 net.cpp:165] Memory required for data: 1667585500\nI1209 19:32:19.384227  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1209 19:32:19.384251  1002 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI1209 19:32:19.384274  1002 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI1209 19:32:19.384289  1002 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.384397  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1209 19:32:19.384618  1002 net.cpp:150] Setting up L3_b8_cbr2_scale\nI1209 19:32:19.384637  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.384646  1002 net.cpp:165] Memory required for data: 1669633500\nI1209 19:32:19.384667  1002 layer_factory.hpp:77] Creating layer L3_b8_cbr2_dropout\nI1209 19:32:19.384685  1002 net.cpp:100] Creating Layer L3_b8_cbr2_dropout\nI1209 19:32:19.384697  1002 net.cpp:434] L3_b8_cbr2_dropout <- L3_b8_cbr2_bn_top\nI1209 19:32:19.384719  1002 net.cpp:395] L3_b8_cbr2_dropout -> L3_b8_cbr2_bn_top (in-place)\nI1209 19:32:19.384778  1002 net.cpp:150] Setting up L3_b8_cbr2_dropout\nI1209 19:32:19.384805  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.384815  1002 net.cpp:165] Memory required for data: 1671681500\nI1209 19:32:19.384825  1002 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI1209 19:32:19.384841  1002 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI1209 19:32:19.384855  1002 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI1209 19:32:19.384866  1002 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1209 19:32:19.384882  1002 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI1209 19:32:19.384939  1002 net.cpp:150] Setting up L3_b8_sum_eltwise\nI1209 19:32:19.384965  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.384977  1002 net.cpp:165] Memory required for data: 1673729500\nI1209 19:32:19.384990  1002 layer_factory.hpp:77] Creating layer L3_b8_relu\nI1209 19:32:19.385010  1002 net.cpp:100] Creating Layer L3_b8_relu\nI1209 19:32:19.385023  1002 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI1209 19:32:19.385038  1002 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI1209 19:32:19.385057  1002 net.cpp:150] Setting up L3_b8_relu\nI1209 19:32:19.385071  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.385080  1002 net.cpp:165] Memory required for data: 1675777500\nI1209 19:32:19.385092  1002 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1209 19:32:19.385107  1002 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1209 19:32:19.385118  1002 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI1209 19:32:19.385133  1002 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1209 19:32:19.385154  1002 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1209 19:32:19.385239  1002 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1209 19:32:19.385257  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.385272  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.385280  1002 net.cpp:165] Memory required for data: 1679873500\nI1209 19:32:19.385290  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI1209 19:32:19.385318  1002 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI1209 19:32:19.385331  1002 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1209 19:32:19.385350  1002 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI1209 19:32:19.386451  1002 net.cpp:150] Setting up L3_b9_cbr1_conv\nI1209 19:32:19.386472  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.386482  1002 net.cpp:165] Memory required for data: 1681921500\nI1209 19:32:19.386500  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI1209 19:32:19.386525  1002 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI1209 19:32:19.386538  1002 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI1209 19:32:19.386560  1002 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI1209 19:32:19.386903  1002 net.cpp:150] Setting up L3_b9_cbr1_bn\nI1209 19:32:19.386921  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.386931  1002 net.cpp:165] Memory required for data: 1683969500\nI1209 19:32:19.386972  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1209 19:32:19.386992  1002 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI1209 19:32:19.387006  1002 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI1209 19:32:19.387020  1002 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.387127  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1209 19:32:19.387347  1002 net.cpp:150] Setting up L3_b9_cbr1_scale\nI1209 19:32:19.387367  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.387377  1002 net.cpp:165] Memory required for data: 1686017500\nI1209 19:32:19.387396  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_dropout\nI1209 19:32:19.387415  1002 net.cpp:100] Creating Layer L3_b9_cbr1_dropout\nI1209 19:32:19.387428  1002 net.cpp:434] L3_b9_cbr1_dropout <- L3_b9_cbr1_bn_top\nI1209 19:32:19.387449  1002 net.cpp:395] L3_b9_cbr1_dropout -> L3_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.387506  1002 net.cpp:150] Setting up L3_b9_cbr1_dropout\nI1209 19:32:19.387532  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.387542  1002 net.cpp:165] Memory required for data: 1688065500\nI1209 19:32:19.387553  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI1209 19:32:19.387569  1002 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI1209 19:32:19.387583  1002 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI1209 19:32:19.387596  1002 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI1209 19:32:19.387615  1002 net.cpp:150] Setting up L3_b9_cbr1_relu\nI1209 19:32:19.387630  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.387639  1002 net.cpp:165] Memory required for data: 1690113500\nI1209 19:32:19.387651  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI1209 19:32:19.387681  1002 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI1209 19:32:19.387693  1002 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI1209 19:32:19.387711  1002 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI1209 19:32:19.388801  1002 net.cpp:150] Setting up L3_b9_cbr2_conv\nI1209 19:32:19.388823  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.388833  1002 net.cpp:165] Memory required for data: 1692161500\nI1209 19:32:19.388852  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI1209 19:32:19.388876  1002 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI1209 19:32:19.388890  1002 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI1209 19:32:19.388911  1002 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI1209 19:32:19.389245  1002 net.cpp:150] Setting up L3_b9_cbr2_bn\nI1209 19:32:19.389266  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.389276  1002 net.cpp:165] Memory required for data: 1694209500\nI1209 19:32:19.389299  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1209 19:32:19.389317  1002 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI1209 19:32:19.389328  1002 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI1209 19:32:19.389349  1002 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.389451  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1209 19:32:19.389673  1002 net.cpp:150] Setting up L3_b9_cbr2_scale\nI1209 19:32:19.389693  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.389701  1002 net.cpp:165] Memory required for data: 1696257500\nI1209 19:32:19.389722  1002 layer_factory.hpp:77] Creating layer L3_b9_cbr2_dropout\nI1209 19:32:19.389747  1002 net.cpp:100] Creating Layer L3_b9_cbr2_dropout\nI1209 19:32:19.389760  1002 net.cpp:434] L3_b9_cbr2_dropout <- L3_b9_cbr2_bn_top\nI1209 19:32:19.389775  1002 net.cpp:395] L3_b9_cbr2_dropout -> L3_b9_cbr2_bn_top (in-place)\nI1209 19:32:19.389839  1002 net.cpp:150] Setting up L3_b9_cbr2_dropout\nI1209 19:32:19.389858  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.389866  1002 net.cpp:165] Memory required for data: 1698305500\nI1209 19:32:19.389879  1002 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI1209 19:32:19.389907  1002 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI1209 19:32:19.389921  1002 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI1209 19:32:19.389935  1002 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1209 19:32:19.389966  1002 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI1209 19:32:19.390029  1002 net.cpp:150] Setting up L3_b9_sum_eltwise\nI1209 19:32:19.390049  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.390058  1002 net.cpp:165] Memory required for data: 1700353500\nI1209 19:32:19.390072  1002 layer_factory.hpp:77] Creating layer L3_b9_relu\nI1209 19:32:19.390087  1002 net.cpp:100] Creating Layer L3_b9_relu\nI1209 19:32:19.390100  1002 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI1209 19:32:19.390112  1002 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI1209 19:32:19.390133  1002 net.cpp:150] Setting up L3_b9_relu\nI1209 19:32:19.390147  1002 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1209 19:32:19.390156  1002 net.cpp:165] Memory required for data: 1702401500\nI1209 19:32:19.390167  1002 layer_factory.hpp:77] Creating layer post_pool\nI1209 19:32:19.390190  1002 net.cpp:100] Creating Layer post_pool\nI1209 19:32:19.390202  1002 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI1209 19:32:19.390219  1002 net.cpp:408] post_pool -> post_pool\nI1209 19:32:19.390280  1002 net.cpp:150] Setting up post_pool\nI1209 19:32:19.390297  1002 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI1209 19:32:19.390306  1002 net.cpp:165] Memory required for data: 1702433500\nI1209 19:32:19.390319  1002 layer_factory.hpp:77] Creating layer post_FC\nI1209 19:32:19.390347  1002 net.cpp:100] Creating Layer post_FC\nI1209 19:32:19.390358  1002 net.cpp:434] post_FC <- post_pool\nI1209 19:32:19.390375  1002 net.cpp:408] post_FC -> post_FC_top\nI1209 19:32:19.390599  1002 net.cpp:150] Setting up post_FC\nI1209 19:32:19.390617  1002 net.cpp:157] Top shape: 125 10 (1250)\nI1209 19:32:19.390627  1002 net.cpp:165] Memory required for data: 1702438500\nI1209 19:32:19.390648  1002 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1209 19:32:19.390664  1002 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1209 19:32:19.390676  1002 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1209 19:32:19.390691  1002 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1209 19:32:19.390710  1002 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1209 19:32:19.390801  1002 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1209 19:32:19.390820  1002 net.cpp:157] Top shape: 125 10 (1250)\nI1209 19:32:19.390832  1002 net.cpp:157] Top shape: 125 10 (1250)\nI1209 19:32:19.390842  1002 net.cpp:165] Memory required for data: 1702448500\nI1209 19:32:19.390853  1002 layer_factory.hpp:77] Creating layer accuracy\nI1209 19:32:19.390871  1002 net.cpp:100] Creating Layer accuracy\nI1209 19:32:19.390882  1002 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1209 19:32:19.390894  1002 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1209 19:32:19.390918  1002 net.cpp:408] accuracy -> accuracy\nI1209 19:32:19.390944  1002 net.cpp:150] Setting up accuracy\nI1209 19:32:19.390965  1002 net.cpp:157] Top shape: (1)\nI1209 19:32:19.390976  1002 net.cpp:165] Memory required for data: 1702448504\nI1209 19:32:19.390987  1002 layer_factory.hpp:77] Creating layer loss\nI1209 19:32:19.391005  1002 net.cpp:100] Creating Layer loss\nI1209 19:32:19.391016  1002 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1209 19:32:19.391029  1002 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1209 19:32:19.391044  1002 net.cpp:408] loss -> loss\nI1209 19:32:19.391069  1002 layer_factory.hpp:77] Creating layer loss\nI1209 19:32:19.391255  1002 net.cpp:150] Setting up loss\nI1209 19:32:19.391273  1002 net.cpp:157] Top shape: (1)\nI1209 19:32:19.391283  1002 net.cpp:160]     with loss weight 1\nI1209 19:32:19.391307  1002 net.cpp:165] Memory required for data: 1702448508\nI1209 19:32:19.391320  1002 net.cpp:226] loss needs backward computation.\nI1209 19:32:19.391331  1002 net.cpp:228] accuracy does not need backward computation.\nI1209 19:32:19.391352  1002 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1209 19:32:19.391363  1002 net.cpp:226] post_FC needs backward computation.\nI1209 19:32:19.391373  1002 net.cpp:226] post_pool needs backward computation.\nI1209 19:32:19.391384  1002 net.cpp:226] L3_b9_relu needs backward computation.\nI1209 19:32:19.391393  1002 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI1209 19:32:19.391403  1002 net.cpp:226] L3_b9_cbr2_dropout needs backward computation.\nI1209 19:32:19.391412  1002 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI1209 19:32:19.391422  1002 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI1209 19:32:19.391430  1002 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI1209 19:32:19.391440  1002 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI1209 19:32:19.391449  1002 net.cpp:226] L3_b9_cbr1_dropout needs backward computation.\nI1209 19:32:19.391458  1002 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI1209 19:32:19.391468  1002 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI1209 19:32:19.391477  1002 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI1209 19:32:19.391487  1002 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI1209 19:32:19.391497  1002 net.cpp:226] L3_b8_relu needs backward computation.\nI1209 19:32:19.391507  1002 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI1209 19:32:19.391517  1002 net.cpp:226] L3_b8_cbr2_dropout needs backward computation.\nI1209 19:32:19.391527  1002 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI1209 19:32:19.391536  1002 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI1209 19:32:19.391546  1002 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI1209 19:32:19.391556  1002 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI1209 19:32:19.391566  1002 net.cpp:226] L3_b8_cbr1_dropout needs backward computation.\nI1209 19:32:19.391574  1002 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI1209 19:32:19.391584  1002 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI1209 19:32:19.391594  1002 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI1209 19:32:19.391605  1002 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI1209 19:32:19.391614  1002 net.cpp:226] L3_b7_relu needs backward computation.\nI1209 19:32:19.391624  1002 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI1209 19:32:19.391635  1002 net.cpp:226] L3_b7_cbr2_dropout needs backward computation.\nI1209 19:32:19.391644  1002 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI1209 19:32:19.391654  1002 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI1209 19:32:19.391664  1002 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI1209 19:32:19.391683  1002 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI1209 19:32:19.391693  1002 net.cpp:226] L3_b7_cbr1_dropout needs backward computation.\nI1209 19:32:19.391702  1002 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI1209 19:32:19.391711  1002 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI1209 19:32:19.391721  1002 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI1209 19:32:19.391731  1002 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI1209 19:32:19.391742  1002 net.cpp:226] L3_b6_relu needs backward computation.\nI1209 19:32:19.391752  1002 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1209 19:32:19.391763  1002 net.cpp:226] L3_b6_cbr2_dropout needs backward computation.\nI1209 19:32:19.391772  1002 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI1209 19:32:19.391782  1002 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI1209 19:32:19.391793  1002 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI1209 19:32:19.391803  1002 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI1209 19:32:19.391811  1002 net.cpp:226] L3_b6_cbr1_dropout needs backward computation.\nI1209 19:32:19.391830  1002 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI1209 19:32:19.391840  1002 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI1209 19:32:19.391850  1002 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI1209 19:32:19.391862  1002 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI1209 19:32:19.391873  1002 net.cpp:226] L3_b5_relu needs backward computation.\nI1209 19:32:19.391882  1002 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1209 19:32:19.391893  1002 net.cpp:226] L3_b5_cbr2_dropout needs backward computation.\nI1209 19:32:19.391903  1002 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI1209 19:32:19.391912  1002 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI1209 19:32:19.391923  1002 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI1209 19:32:19.391933  1002 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI1209 19:32:19.391943  1002 net.cpp:226] L3_b5_cbr1_dropout needs backward computation.\nI1209 19:32:19.391953  1002 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI1209 19:32:19.391971  1002 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI1209 19:32:19.391983  1002 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI1209 19:32:19.391993  1002 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI1209 19:32:19.392004  1002 net.cpp:226] L3_b4_relu needs backward computation.\nI1209 19:32:19.392014  1002 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1209 19:32:19.392024  1002 net.cpp:226] L3_b4_cbr2_dropout needs backward computation.\nI1209 19:32:19.392035  1002 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI1209 19:32:19.392045  1002 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI1209 19:32:19.392055  1002 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI1209 19:32:19.392066  1002 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI1209 19:32:19.392076  1002 net.cpp:226] L3_b4_cbr1_dropout needs backward computation.\nI1209 19:32:19.392086  1002 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI1209 19:32:19.392094  1002 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI1209 19:32:19.392104  1002 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI1209 19:32:19.392115  1002 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI1209 19:32:19.392127  1002 net.cpp:226] L3_b3_relu needs backward computation.\nI1209 19:32:19.392137  1002 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1209 19:32:19.392148  1002 net.cpp:226] L3_b3_cbr2_dropout needs backward computation.\nI1209 19:32:19.392158  1002 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI1209 19:32:19.392168  1002 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI1209 19:32:19.392179  1002 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI1209 19:32:19.392187  1002 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI1209 19:32:19.392197  1002 net.cpp:226] L3_b3_cbr1_dropout needs backward computation.\nI1209 19:32:19.392206  1002 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI1209 19:32:19.392216  1002 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI1209 19:32:19.392226  1002 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI1209 19:32:19.392237  1002 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI1209 19:32:19.392247  1002 net.cpp:226] L3_b2_relu needs backward computation.\nI1209 19:32:19.392257  1002 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1209 19:32:19.392268  1002 net.cpp:226] L3_b2_cbr2_dropout needs backward computation.\nI1209 19:32:19.392278  1002 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI1209 19:32:19.392287  1002 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI1209 19:32:19.392298  1002 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI1209 19:32:19.392308  1002 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI1209 19:32:19.392329  1002 net.cpp:226] L3_b2_cbr1_dropout needs backward computation.\nI1209 19:32:19.392341  1002 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI1209 19:32:19.392351  1002 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI1209 19:32:19.392361  1002 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI1209 19:32:19.392371  1002 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI1209 19:32:19.392382  1002 net.cpp:226] L3_b1_concat0 needs backward computation.\nI1209 19:32:19.392393  1002 net.cpp:228] L3_b1_zeros does not need backward computation.\nI1209 19:32:19.392403  1002 net.cpp:226] L3_b1_relu needs backward computation.\nI1209 19:32:19.392412  1002 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1209 19:32:19.392423  1002 net.cpp:226] L3_b1_pool needs backward computation.\nI1209 19:32:19.392433  1002 net.cpp:226] L3_b1_cbr2_dropout needs backward computation.\nI1209 19:32:19.392444  1002 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI1209 19:32:19.392454  1002 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI1209 19:32:19.392464  1002 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI1209 19:32:19.392475  1002 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI1209 19:32:19.392485  1002 net.cpp:226] L3_b1_cbr1_dropout needs backward computation.\nI1209 19:32:19.392495  1002 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI1209 19:32:19.392504  1002 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI1209 19:32:19.392518  1002 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI1209 19:32:19.392534  1002 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI1209 19:32:19.392546  1002 net.cpp:226] L2_b9_relu needs backward computation.\nI1209 19:32:19.392556  1002 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI1209 19:32:19.392567  1002 net.cpp:226] L2_b9_cbr2_dropout needs backward computation.\nI1209 19:32:19.392577  1002 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI1209 19:32:19.392588  1002 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI1209 19:32:19.392598  1002 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI1209 19:32:19.392609  1002 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI1209 19:32:19.392619  1002 net.cpp:226] L2_b9_cbr1_dropout needs backward computation.\nI1209 19:32:19.392629  1002 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI1209 19:32:19.392640  1002 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI1209 19:32:19.392650  1002 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI1209 19:32:19.392660  1002 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI1209 19:32:19.392673  1002 net.cpp:226] L2_b8_relu needs backward computation.\nI1209 19:32:19.392683  1002 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI1209 19:32:19.392694  1002 net.cpp:226] L2_b8_cbr2_dropout needs backward computation.\nI1209 19:32:19.392704  1002 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI1209 19:32:19.392714  1002 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI1209 19:32:19.392725  1002 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI1209 19:32:19.392736  1002 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI1209 19:32:19.392746  1002 net.cpp:226] L2_b8_cbr1_dropout needs backward computation.\nI1209 19:32:19.392755  1002 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI1209 19:32:19.392765  1002 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI1209 19:32:19.392776  1002 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI1209 19:32:19.392786  1002 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI1209 19:32:19.392797  1002 net.cpp:226] L2_b7_relu needs backward computation.\nI1209 19:32:19.392807  1002 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI1209 19:32:19.392819  1002 net.cpp:226] L2_b7_cbr2_dropout needs backward computation.\nI1209 19:32:19.392838  1002 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI1209 19:32:19.392849  1002 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI1209 19:32:19.392859  1002 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI1209 19:32:19.392870  1002 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI1209 19:32:19.392880  1002 net.cpp:226] L2_b7_cbr1_dropout needs backward computation.\nI1209 19:32:19.392889  1002 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI1209 19:32:19.392899  1002 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI1209 19:32:19.392910  1002 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI1209 19:32:19.392921  1002 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI1209 19:32:19.392931  1002 net.cpp:226] L2_b6_relu needs backward computation.\nI1209 19:32:19.392942  1002 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1209 19:32:19.392954  1002 net.cpp:226] L2_b6_cbr2_dropout needs backward computation.\nI1209 19:32:19.392973  1002 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI1209 19:32:19.392984  1002 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI1209 19:32:19.392997  1002 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI1209 19:32:19.393008  1002 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI1209 19:32:19.393018  1002 net.cpp:226] L2_b6_cbr1_dropout needs backward computation.\nI1209 19:32:19.393029  1002 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI1209 19:32:19.393038  1002 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI1209 19:32:19.393049  1002 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI1209 19:32:19.393062  1002 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI1209 19:32:19.393074  1002 net.cpp:226] L2_b5_relu needs backward computation.\nI1209 19:32:19.393085  1002 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1209 19:32:19.393098  1002 net.cpp:226] L2_b5_cbr2_dropout needs backward computation.\nI1209 19:32:19.393110  1002 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI1209 19:32:19.393120  1002 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI1209 19:32:19.393131  1002 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI1209 19:32:19.393142  1002 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI1209 19:32:19.393153  1002 net.cpp:226] L2_b5_cbr1_dropout needs backward computation.\nI1209 19:32:19.393164  1002 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI1209 19:32:19.393175  1002 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI1209 19:32:19.393187  1002 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI1209 19:32:19.393198  1002 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI1209 19:32:19.393208  1002 net.cpp:226] L2_b4_relu needs backward computation.\nI1209 19:32:19.393221  1002 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1209 19:32:19.393239  1002 net.cpp:226] L2_b4_cbr2_dropout needs backward computation.\nI1209 19:32:19.393250  1002 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI1209 19:32:19.393261  1002 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI1209 19:32:19.393273  1002 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI1209 19:32:19.393285  1002 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI1209 19:32:19.393296  1002 net.cpp:226] L2_b4_cbr1_dropout needs backward computation.\nI1209 19:32:19.393307  1002 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI1209 19:32:19.393318  1002 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI1209 19:32:19.393329  1002 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI1209 19:32:19.393340  1002 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI1209 19:32:19.393352  1002 net.cpp:226] L2_b3_relu needs backward computation.\nI1209 19:32:19.393362  1002 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1209 19:32:19.393384  1002 net.cpp:226] L2_b3_cbr2_dropout needs backward computation.\nI1209 19:32:19.393396  1002 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI1209 19:32:19.393407  1002 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI1209 19:32:19.393419  1002 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI1209 19:32:19.393431  1002 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI1209 19:32:19.393441  1002 net.cpp:226] L2_b3_cbr1_dropout needs backward computation.\nI1209 19:32:19.393451  1002 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI1209 19:32:19.393461  1002 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI1209 19:32:19.393472  1002 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI1209 19:32:19.393483  1002 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI1209 19:32:19.393496  1002 net.cpp:226] L2_b2_relu needs backward computation.\nI1209 19:32:19.393506  1002 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1209 19:32:19.393518  1002 net.cpp:226] L2_b2_cbr2_dropout needs backward computation.\nI1209 19:32:19.393529  1002 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI1209 19:32:19.393539  1002 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI1209 19:32:19.393550  1002 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI1209 19:32:19.393561  1002 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI1209 19:32:19.393573  1002 net.cpp:226] L2_b2_cbr1_dropout needs backward computation.\nI1209 19:32:19.393584  1002 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI1209 19:32:19.393594  1002 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI1209 19:32:19.393604  1002 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI1209 19:32:19.393615  1002 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI1209 19:32:19.393627  1002 net.cpp:226] L2_b1_concat0 needs backward computation.\nI1209 19:32:19.393640  1002 net.cpp:228] L2_b1_zeros does not need backward computation.\nI1209 19:32:19.393651  1002 net.cpp:226] L2_b1_relu needs backward computation.\nI1209 19:32:19.393661  1002 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1209 19:32:19.393673  1002 net.cpp:226] L2_b1_pool needs backward computation.\nI1209 19:32:19.393685  1002 net.cpp:226] L2_b1_cbr2_dropout needs backward computation.\nI1209 19:32:19.393697  1002 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI1209 19:32:19.393707  1002 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI1209 19:32:19.393718  1002 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI1209 19:32:19.393730  1002 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI1209 19:32:19.393740  1002 net.cpp:226] L2_b1_cbr1_dropout needs backward computation.\nI1209 19:32:19.393751  1002 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI1209 19:32:19.393761  1002 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI1209 19:32:19.393774  1002 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI1209 19:32:19.393785  1002 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI1209 19:32:19.393796  1002 net.cpp:226] L1_b9_relu needs backward computation.\nI1209 19:32:19.393806  1002 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI1209 19:32:19.393818  1002 net.cpp:226] L1_b9_cbr2_dropout needs backward computation.\nI1209 19:32:19.393828  1002 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI1209 19:32:19.393839  1002 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI1209 19:32:19.393851  1002 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI1209 19:32:19.393862  1002 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI1209 19:32:19.393872  1002 net.cpp:226] L1_b9_cbr1_dropout needs backward computation.\nI1209 19:32:19.393883  1002 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI1209 19:32:19.393893  1002 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI1209 19:32:19.393914  1002 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI1209 19:32:19.393929  1002 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI1209 19:32:19.393940  1002 net.cpp:226] L1_b8_relu needs backward computation.\nI1209 19:32:19.393951  1002 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI1209 19:32:19.393972  1002 net.cpp:226] L1_b8_cbr2_dropout needs backward computation.\nI1209 19:32:19.393985  1002 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI1209 19:32:19.393995  1002 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI1209 19:32:19.394006  1002 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI1209 19:32:19.394017  1002 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI1209 19:32:19.394028  1002 net.cpp:226] L1_b8_cbr1_dropout needs backward computation.\nI1209 19:32:19.394038  1002 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI1209 19:32:19.394048  1002 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI1209 19:32:19.394060  1002 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI1209 19:32:19.394073  1002 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI1209 19:32:19.394083  1002 net.cpp:226] L1_b7_relu needs backward computation.\nI1209 19:32:19.394094  1002 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI1209 19:32:19.394106  1002 net.cpp:226] L1_b7_cbr2_dropout needs backward computation.\nI1209 19:32:19.394119  1002 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI1209 19:32:19.394129  1002 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI1209 19:32:19.394140  1002 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI1209 19:32:19.394156  1002 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI1209 19:32:19.394170  1002 net.cpp:226] L1_b7_cbr1_dropout needs backward computation.\nI1209 19:32:19.394179  1002 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI1209 19:32:19.394189  1002 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI1209 19:32:19.394201  1002 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI1209 19:32:19.394212  1002 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI1209 19:32:19.394223  1002 net.cpp:226] L1_b6_relu needs backward computation.\nI1209 19:32:19.394234  1002 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1209 19:32:19.394245  1002 net.cpp:226] L1_b6_cbr2_dropout needs backward computation.\nI1209 19:32:19.394258  1002 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI1209 19:32:19.394268  1002 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI1209 19:32:19.394280  1002 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI1209 19:32:19.394291  1002 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI1209 19:32:19.394304  1002 net.cpp:226] L1_b6_cbr1_dropout needs backward computation.\nI1209 19:32:19.394314  1002 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI1209 19:32:19.394325  1002 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI1209 19:32:19.394335  1002 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI1209 19:32:19.394347  1002 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI1209 19:32:19.394358  1002 net.cpp:226] L1_b5_relu needs backward computation.\nI1209 19:32:19.394369  1002 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1209 19:32:19.394381  1002 net.cpp:226] L1_b5_cbr2_dropout needs backward computation.\nI1209 19:32:19.394392  1002 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI1209 19:32:19.394403  1002 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI1209 19:32:19.394414  1002 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI1209 19:32:19.394426  1002 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI1209 19:32:19.394438  1002 net.cpp:226] L1_b5_cbr1_dropout needs backward computation.\nI1209 19:32:19.394448  1002 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI1209 19:32:19.394470  1002 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI1209 19:32:19.394484  1002 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI1209 19:32:19.394495  1002 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI1209 19:32:19.394508  1002 net.cpp:226] L1_b4_relu needs backward computation.\nI1209 19:32:19.394520  1002 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1209 19:32:19.394531  1002 net.cpp:226] L1_b4_cbr2_dropout needs backward computation.\nI1209 19:32:19.394542  1002 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI1209 19:32:19.394554  1002 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI1209 19:32:19.394565  1002 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI1209 19:32:19.394577  1002 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI1209 19:32:19.394588  1002 net.cpp:226] L1_b4_cbr1_dropout needs backward computation.\nI1209 19:32:19.394599  1002 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI1209 19:32:19.394610  1002 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI1209 19:32:19.394623  1002 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI1209 19:32:19.394634  1002 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI1209 19:32:19.394645  1002 net.cpp:226] L1_b3_relu needs backward computation.\nI1209 19:32:19.394656  1002 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1209 19:32:19.394668  1002 net.cpp:226] L1_b3_cbr2_dropout needs backward computation.\nI1209 19:32:19.394680  1002 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI1209 19:32:19.394690  1002 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI1209 19:32:19.394701  1002 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI1209 19:32:19.394712  1002 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI1209 19:32:19.394723  1002 net.cpp:226] L1_b3_cbr1_dropout needs backward computation.\nI1209 19:32:19.394734  1002 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI1209 19:32:19.394745  1002 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI1209 19:32:19.394757  1002 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI1209 19:32:19.394768  1002 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI1209 19:32:19.394781  1002 net.cpp:226] L1_b2_relu needs backward computation.\nI1209 19:32:19.394793  1002 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1209 19:32:19.394804  1002 net.cpp:226] L1_b2_cbr2_dropout needs backward computation.\nI1209 19:32:19.394815  1002 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI1209 19:32:19.394826  1002 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI1209 19:32:19.394837  1002 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI1209 19:32:19.394850  1002 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI1209 19:32:19.394860  1002 net.cpp:226] L1_b2_cbr1_dropout needs backward computation.\nI1209 19:32:19.394872  1002 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI1209 19:32:19.394882  1002 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI1209 19:32:19.394894  1002 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI1209 19:32:19.394906  1002 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI1209 19:32:19.394918  1002 net.cpp:226] L1_b1_relu needs backward computation.\nI1209 19:32:19.394929  1002 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1209 19:32:19.394942  1002 net.cpp:226] L1_b1_cbr2_dropout needs backward computation.\nI1209 19:32:19.394953  1002 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI1209 19:32:19.394973  1002 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI1209 19:32:19.394985  1002 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI1209 19:32:19.394997  1002 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI1209 19:32:19.395009  1002 net.cpp:226] L1_b1_cbr1_dropout needs backward computation.\nI1209 19:32:19.395030  1002 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI1209 19:32:19.395042  1002 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI1209 19:32:19.395053  1002 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI1209 19:32:19.395066  1002 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI1209 19:32:19.395078  1002 net.cpp:226] pre_relu needs backward computation.\nI1209 19:32:19.395089  1002 net.cpp:226] pre_scale needs backward computation.\nI1209 19:32:19.395100  1002 net.cpp:226] pre_bn needs backward computation.\nI1209 19:32:19.395112  1002 net.cpp:226] pre_conv needs backward computation.\nI1209 19:32:19.395124  1002 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1209 19:32:19.395136  1002 net.cpp:228] dataLayer does not need backward computation.\nI1209 19:32:19.395146  1002 net.cpp:270] This network produces output accuracy\nI1209 19:32:19.395159  1002 net.cpp:270] This network produces output loss\nI1209 19:32:19.395568  1002 net.cpp:283] Network initialization done.\nI1209 19:32:19.397133  1002 solver.cpp:60] Solver scaffolding done.\nI1209 19:32:19.623332  1002 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1209 19:32:20.015992  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:20.016060  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:20.023928  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:20.255004  1002 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1209 19:32:20.255117  1002 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1209 19:32:20.292281  1002 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1209 19:32:20.292395  1002 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1209 19:32:20.777251  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:20.777323  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:20.786072  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:21.033346  1002 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1209 19:32:21.033485  1002 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1209 19:32:21.088305  1002 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1209 19:32:21.088444  1002 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1209 19:32:21.640427  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:21.640493  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:21.650283  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:21.926249  1002 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1209 19:32:21.926379  1002 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1209 19:32:22.001760  1002 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1209 19:32:22.001891  1002 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1209 19:32:22.091006  1002 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1209 19:32:22.608793  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:22.608857  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:22.619496  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:22.917637  1002 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1209 19:32:22.917845  1002 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1209 19:32:23.014569  1002 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1209 19:32:23.014763  1002 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1209 19:32:23.707684  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:23.707756  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:23.719367  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:24.043756  1002 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1209 19:32:24.043972  1002 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1209 19:32:24.162711  1002 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1209 19:32:24.162927  1002 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1209 19:32:24.929363  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:24.929436  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:24.941931  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:25.286309  1002 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1209 19:32:25.286556  1002 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1209 19:32:25.425354  1002 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1209 19:32:25.425591  1002 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1209 19:32:26.263645  1002 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1209 19:32:26.263712  1002 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1209 19:32:26.277000  1002 data_layer.cpp:41] output data size: 125,3,32,32\nI1209 19:32:26.369261  1017 blocking_queue.cpp:50] Waiting for data\nI1209 19:32:26.497220  1020 blocking_queue.cpp:50] Waiting for data\nI1209 19:32:26.767079  1002 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1209 19:32:26.767361  1002 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1209 19:32:26.930822  1002 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1209 19:32:26.931113  1002 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1209 19:32:27.114872  1002 parallel.cpp:425] Starting Optimization\nI1209 19:32:27.116605  1002 solver.cpp:279] Solving Cifar-Resnet\nI1209 19:32:27.116632  1002 solver.cpp:280] Learning Rate Policy: triangular\nI1209 19:32:27.120476  1002 solver.cpp:337] Iteration 0, Testing net (#0)\nI1209 19:33:48.430917  1002 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI1209 19:33:48.431262  1002 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI1209 19:33:52.855252  1002 solver.cpp:228] Iteration 0, loss = 3.03112\nI1209 19:33:52.855304  1002 solver.cpp:244]     Train net output #0: accuracy = 0.112\nI1209 19:33:52.855326  1002 solver.cpp:244]     Train net output #1: loss = 3.03112 (* 1 = 3.03112 loss)\nI1209 19:33:52.885442  1002 sgd_solver.cpp:166] Iteration 0, lr = 0\nI1209 19:36:11.376921  1002 solver.cpp:337] Iteration 100, Testing net (#0)\nI1209 19:37:33.054373  1002 solver.cpp:404]     Test net output #0: accuracy = 0.30892\nI1209 19:37:33.054672  1002 solver.cpp:404]     Test net output #1: loss = 1.91102 (* 1 = 1.91102 loss)\nI1209 19:37:34.378327  1002 solver.cpp:228] Iteration 100, loss = 1.70358\nI1209 19:37:34.378383  1002 solver.cpp:244]     Train net output #0: accuracy = 0.312\nI1209 19:37:34.378401  1002 solver.cpp:244]     Train net output #1: loss = 1.70358 (* 1 = 1.70358 loss)\nI1209 19:37:34.480160  1002 sgd_solver.cpp:166] Iteration 100, lr = 0.015\nI1209 19:39:53.121690  1002 solver.cpp:337] Iteration 200, Testing net (#0)\nI1209 19:41:14.779201  1002 solver.cpp:404]     Test net output #0: accuracy = 0.35224\nI1209 19:41:14.779503  1002 solver.cpp:404]     Test net output #1: loss = 2.09881 (* 1 = 2.09881 loss)\nI1209 19:41:16.102825  1002 solver.cpp:228] Iteration 200, loss = 1.43102\nI1209 19:41:16.102881  1002 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI1209 19:41:16.102905  1002 solver.cpp:244]     Train net output #1: loss = 1.43102 (* 1 = 1.43102 loss)\nI1209 19:41:16.203382  1002 sgd_solver.cpp:166] Iteration 200, lr = 0.03\nI1209 19:43:34.935475  1002 solver.cpp:337] Iteration 300, Testing net (#0)\nI1209 19:44:56.587081  1002 solver.cpp:404]     Test net output #0: accuracy = 0.507\nI1209 19:44:56.587376  1002 solver.cpp:404]     Test net output #1: loss = 1.43858 (* 1 = 1.43858 loss)\nI1209 19:44:57.910207  1002 solver.cpp:228] Iteration 300, loss = 1.31638\nI1209 19:44:57.910262  1002 solver.cpp:244]     Train net output #0: accuracy = 0.552\nI1209 19:44:57.910280  1002 solver.cpp:244]     Train net output #1: loss = 1.31638 (* 1 = 1.31638 loss)\nI1209 19:44:58.015215  1002 sgd_solver.cpp:166] Iteration 300, lr = 0.045\nI1209 19:47:16.766927  1002 solver.cpp:337] Iteration 400, Testing net (#0)\nI1209 19:48:38.489856  1002 solver.cpp:404]     Test net output #0: accuracy = 0.57504\nI1209 19:48:38.490162  1002 solver.cpp:404]     Test net output #1: loss = 1.34351 (* 1 = 1.34351 loss)\nI1209 19:48:39.813066  1002 solver.cpp:228] Iteration 400, loss = 0.914001\nI1209 19:48:39.813127  1002 solver.cpp:244]     Train net output #0: accuracy = 0.608\nI1209 19:48:39.813144  1002 solver.cpp:244]     Train net output #1: loss = 0.914001 (* 1 = 0.914001 loss)\nI1209 19:48:39.913702  1002 sgd_solver.cpp:166] Iteration 400, lr = 0.0599999\nI1209 19:50:58.562225  1002 solver.cpp:337] Iteration 500, Testing net (#0)\nI1209 19:52:19.687814  1002 solver.cpp:404]     Test net output #0: accuracy = 0.65084\nI1209 19:52:19.688108  1002 solver.cpp:404]     Test net output #1: loss = 1.09406 (* 1 = 1.09406 loss)\nI1209 19:52:21.009030  1002 solver.cpp:228] Iteration 500, loss = 0.886332\nI1209 19:52:21.009078  1002 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI1209 19:52:21.009093  1002 solver.cpp:244]     Train net output #1: loss = 0.886332 (* 1 = 0.886332 loss)\nI1209 19:52:21.109779  1002 sgd_solver.cpp:166] Iteration 500, lr = 0.0749999\nI1209 19:54:39.606307  1002 solver.cpp:337] Iteration 600, Testing net (#0)\nI1209 19:56:00.315304  1002 solver.cpp:404]     Test net output #0: accuracy = 0.68792\nI1209 19:56:00.315585  1002 solver.cpp:404]     Test net output #1: loss = 0.935917 (* 1 = 0.935917 loss)\nI1209 19:56:01.634402  1002 solver.cpp:228] Iteration 600, loss = 0.817234\nI1209 19:56:01.634444  1002 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI1209 19:56:01.634462  1002 solver.cpp:244]     Train net output #1: loss = 0.817234 (* 1 = 0.817234 loss)\nI1209 19:56:01.737447  1002 sgd_solver.cpp:166] Iteration 600, lr = 0.0899999\nI1209 19:58:20.542487  1002 solver.cpp:337] Iteration 700, Testing net (#0)\nI1209 19:59:41.254058  1002 solver.cpp:404]     Test net output #0: accuracy = 0.70716\nI1209 19:59:41.254341  1002 solver.cpp:404]     Test net output #1: loss = 0.881882 (* 1 = 0.881882 loss)\nI1209 19:59:42.575259  1002 solver.cpp:228] Iteration 700, loss = 0.738353\nI1209 19:59:42.575305  1002 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI1209 19:59:42.575322  1002 solver.cpp:244]     Train net output #1: loss = 0.738353 (* 1 = 0.738353 loss)\nI1209 19:59:42.676110  1002 sgd_solver.cpp:166] Iteration 700, lr = 0.105\nI1209 20:02:01.144474  1002 solver.cpp:337] Iteration 800, Testing net (#0)\nI1209 20:03:21.852316  1002 solver.cpp:404]     Test net output #0: accuracy = 0.74436\nI1209 20:03:21.852613  1002 solver.cpp:404]     Test net output #1: loss = 0.763852 (* 1 = 0.763852 loss)\nI1209 20:03:23.171373  1002 solver.cpp:228] Iteration 800, loss = 0.698862\nI1209 20:03:23.171419  1002 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI1209 20:03:23.171437  1002 solver.cpp:244]     Train net output #1: loss = 0.698862 (* 1 = 0.698862 loss)\nI1209 20:03:23.272204  1002 sgd_solver.cpp:166] Iteration 800, lr = 0.12\nI1209 20:05:41.712405  1002 solver.cpp:337] Iteration 900, Testing net (#0)\nI1209 20:07:02.408646  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76592\nI1209 20:07:02.408948  1002 solver.cpp:404]     Test net output #1: loss = 0.694661 (* 1 = 0.694661 loss)\nI1209 20:07:03.728477  1002 solver.cpp:228] Iteration 900, loss = 0.586974\nI1209 20:07:03.728523  1002 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI1209 20:07:03.728539  1002 solver.cpp:244]     Train net output #1: loss = 0.586974 (* 1 = 0.586974 loss)\nI1209 20:07:03.830965  1002 sgd_solver.cpp:166] Iteration 900, lr = 0.135\nI1209 20:09:22.591922  1002 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1209 20:10:43.278100  1002 solver.cpp:404]     Test net output #0: accuracy = 0.763\nI1209 20:10:43.278440  1002 solver.cpp:404]     Test net output #1: loss = 0.712573 (* 1 = 0.712573 loss)\nI1209 20:10:44.597764  1002 solver.cpp:228] Iteration 1000, loss = 0.569532\nI1209 20:10:44.597810  1002 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI1209 20:10:44.597826  1002 solver.cpp:244]     Train net output #1: loss = 0.569532 (* 1 = 0.569532 loss)\nI1209 20:10:44.703691  1002 sgd_solver.cpp:166] Iteration 1000, lr = 0.15\nI1209 20:13:03.267601  1002 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1209 20:14:24.015837  1002 solver.cpp:404]     Test net output #0: accuracy = 0.75988\nI1209 20:14:24.016120  1002 solver.cpp:404]     Test net output #1: loss = 0.744009 (* 1 = 0.744009 loss)\nI1209 20:14:25.334684  1002 solver.cpp:228] Iteration 1100, loss = 0.477467\nI1209 20:14:25.334736  1002 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI1209 20:14:25.334754  1002 solver.cpp:244]     Train net output #1: loss = 0.477467 (* 1 = 0.477467 loss)\nI1209 20:14:25.443565  1002 sgd_solver.cpp:166] Iteration 1100, lr = 0.165\nI1209 20:16:43.548936  1002 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1209 20:18:04.300572  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79156\nI1209 20:18:04.300865  1002 solver.cpp:404]     Test net output #1: loss = 0.643518 (* 1 = 0.643518 loss)\nI1209 20:18:05.620151  1002 solver.cpp:228] Iteration 1200, loss = 0.39362\nI1209 20:18:05.620196  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1209 20:18:05.620213  1002 solver.cpp:244]     Train net output #1: loss = 0.39362 (* 1 = 0.39362 loss)\nI1209 20:18:05.720262  1002 sgd_solver.cpp:166] Iteration 1200, lr = 0.18\nI1209 20:20:23.850605  1002 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1209 20:21:44.581645  1002 solver.cpp:404]     Test net output #0: accuracy = 0.75984\nI1209 20:21:44.581919  1002 solver.cpp:404]     Test net output #1: loss = 0.760481 (* 1 = 0.760481 loss)\nI1209 20:21:45.900893  1002 solver.cpp:228] Iteration 1300, loss = 0.375423\nI1209 20:21:45.900940  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 20:21:45.900957  1002 solver.cpp:244]     Train net output #1: loss = 0.375423 (* 1 = 0.375423 loss)\nI1209 20:21:46.003556  1002 sgd_solver.cpp:166] Iteration 1300, lr = 0.195\nI1209 20:24:04.808396  1002 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1209 20:25:25.501819  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76176\nI1209 20:25:25.502107  1002 solver.cpp:404]     Test net output #1: loss = 0.746531 (* 1 = 0.746531 loss)\nI1209 20:25:26.821266  1002 solver.cpp:228] Iteration 1400, loss = 0.403872\nI1209 20:25:26.821311  1002 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1209 20:25:26.821328  1002 solver.cpp:244]     Train net output #1: loss = 0.403872 (* 1 = 0.403872 loss)\nI1209 20:25:26.926434  1002 sgd_solver.cpp:166] Iteration 1400, lr = 0.21\nI1209 20:27:45.217017  1002 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1209 20:29:05.911497  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77968\nI1209 20:29:05.911792  1002 solver.cpp:404]     Test net output #1: loss = 0.699187 (* 1 = 0.699187 loss)\nI1209 20:29:07.229996  1002 solver.cpp:228] Iteration 1500, loss = 0.325399\nI1209 20:29:07.230041  1002 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1209 20:29:07.230058  1002 solver.cpp:244]     Train net output #1: loss = 0.325399 (* 1 = 0.325399 loss)\nI1209 20:29:07.334506  1002 sgd_solver.cpp:166] Iteration 1500, lr = 0.225\nI1209 20:31:26.063935  1002 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1209 20:32:46.767429  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79044\nI1209 20:32:46.767737  1002 solver.cpp:404]     Test net output #1: loss = 0.634348 (* 1 = 0.634348 loss)\nI1209 20:32:48.086875  1002 solver.cpp:228] Iteration 1600, loss = 0.374587\nI1209 20:32:48.086921  1002 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1209 20:32:48.086938  1002 solver.cpp:244]     Train net output #1: loss = 0.374587 (* 1 = 0.374587 loss)\nI1209 20:32:48.183547  1002 sgd_solver.cpp:166] Iteration 1600, lr = 0.24\nI1209 20:35:06.692083  1002 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1209 20:36:27.393383  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79516\nI1209 20:36:27.393664  1002 solver.cpp:404]     Test net output #1: loss = 0.625031 (* 1 = 0.625031 loss)\nI1209 20:36:28.712689  1002 solver.cpp:228] Iteration 1700, loss = 0.368086\nI1209 20:36:28.712743  1002 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1209 20:36:28.712760  1002 solver.cpp:244]     Train net output #1: loss = 0.368086 (* 1 = 0.368086 loss)\nI1209 20:36:28.810570  1002 sgd_solver.cpp:166] Iteration 1700, lr = 0.255\nI1209 20:38:46.820312  1002 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1209 20:40:07.553902  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80824\nI1209 20:40:07.554209  1002 solver.cpp:404]     Test net output #1: loss = 0.624931 (* 1 = 0.624931 loss)\nI1209 20:40:08.873181  1002 solver.cpp:228] Iteration 1800, loss = 0.309863\nI1209 20:40:08.873227  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 20:40:08.873245  1002 solver.cpp:244]     Train net output #1: loss = 0.309863 (* 1 = 0.309863 loss)\nI1209 20:40:08.972275  1002 sgd_solver.cpp:166] Iteration 1800, lr = 0.27\nI1209 20:42:26.982425  1002 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1209 20:43:47.678944  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81144\nI1209 20:43:47.679250  1002 solver.cpp:404]     Test net output #1: loss = 0.628744 (* 1 = 0.628744 loss)\nI1209 20:43:48.997620  1002 solver.cpp:228] Iteration 1900, loss = 0.357987\nI1209 20:43:48.997666  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1209 20:43:48.997684  1002 solver.cpp:244]     Train net output #1: loss = 0.357986 (* 1 = 0.357986 loss)\nI1209 20:43:49.097353  1002 sgd_solver.cpp:166] Iteration 1900, lr = 0.285\nI1209 20:46:07.069568  1002 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1209 20:47:27.754801  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76724\nI1209 20:47:27.755095  1002 solver.cpp:404]     Test net output #1: loss = 0.829864 (* 1 = 0.829864 loss)\nI1209 20:47:29.073468  1002 solver.cpp:228] Iteration 2000, loss = 0.28174\nI1209 20:47:29.073514  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1209 20:47:29.073530  1002 solver.cpp:244]     Train net output #1: loss = 0.28174 (* 1 = 0.28174 loss)\nI1209 20:47:29.170500  1002 sgd_solver.cpp:166] Iteration 2000, lr = 0.3\nI1209 20:49:47.169984  1002 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1209 20:51:07.853679  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78228\nI1209 20:51:07.853976  1002 solver.cpp:404]     Test net output #1: loss = 0.664768 (* 1 = 0.664768 loss)\nI1209 20:51:09.172686  1002 solver.cpp:228] Iteration 2100, loss = 0.309516\nI1209 20:51:09.172732  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 20:51:09.172749  1002 solver.cpp:244]     Train net output #1: loss = 0.309516 (* 1 = 0.309516 loss)\nI1209 20:51:09.267967  1002 sgd_solver.cpp:166] Iteration 2100, lr = 0.315\nI1209 20:53:27.237335  1002 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1209 20:54:47.926535  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80072\nI1209 20:54:47.926831  1002 solver.cpp:404]     Test net output #1: loss = 0.617573 (* 1 = 0.617573 loss)\nI1209 20:54:49.245265  1002 solver.cpp:228] Iteration 2200, loss = 0.277756\nI1209 20:54:49.245311  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 20:54:49.245327  1002 solver.cpp:244]     Train net output #1: loss = 0.277756 (* 1 = 0.277756 loss)\nI1209 20:54:49.341644  1002 sgd_solver.cpp:166] Iteration 2200, lr = 0.33\nI1209 20:57:07.315978  1002 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1209 20:58:27.996350  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85016\nI1209 20:58:27.996647  1002 solver.cpp:404]     Test net output #1: loss = 0.468357 (* 1 = 0.468357 loss)\nI1209 20:58:29.316097  1002 solver.cpp:228] Iteration 2300, loss = 0.237677\nI1209 20:58:29.316143  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 20:58:29.316159  1002 solver.cpp:244]     Train net output #1: loss = 0.237677 (* 1 = 0.237677 loss)\nI1209 20:58:29.412714  1002 sgd_solver.cpp:166] Iteration 2300, lr = 0.345\nI1209 21:00:47.436797  1002 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1209 21:02:08.133509  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7914\nI1209 21:02:08.133800  1002 solver.cpp:404]     Test net output #1: loss = 0.661539 (* 1 = 0.661539 loss)\nI1209 21:02:09.451807  1002 solver.cpp:228] Iteration 2400, loss = 0.236612\nI1209 21:02:09.451854  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 21:02:09.451870  1002 solver.cpp:244]     Train net output #1: loss = 0.236612 (* 1 = 0.236612 loss)\nI1209 21:02:09.555807  1002 sgd_solver.cpp:166] Iteration 2400, lr = 0.36\nI1209 21:04:27.550835  1002 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1209 21:05:48.235553  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85832\nI1209 21:05:48.235853  1002 solver.cpp:404]     Test net output #1: loss = 0.437521 (* 1 = 0.437521 loss)\nI1209 21:05:49.554266  1002 solver.cpp:228] Iteration 2500, loss = 0.219561\nI1209 21:05:49.554311  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1209 21:05:49.554327  1002 solver.cpp:244]     Train net output #1: loss = 0.219561 (* 1 = 0.219561 loss)\nI1209 21:05:49.657492  1002 sgd_solver.cpp:166] Iteration 2500, lr = 0.375\nI1209 21:08:07.628155  1002 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1209 21:09:28.376046  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8516\nI1209 21:09:28.376344  1002 solver.cpp:404]     Test net output #1: loss = 0.470836 (* 1 = 0.470836 loss)\nI1209 21:09:29.695080  1002 solver.cpp:228] Iteration 2600, loss = 0.277564\nI1209 21:09:29.695125  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 21:09:29.695142  1002 solver.cpp:244]     Train net output #1: loss = 0.277564 (* 1 = 0.277564 loss)\nI1209 21:09:29.795351  1002 sgd_solver.cpp:166] Iteration 2600, lr = 0.39\nI1209 21:11:47.757397  1002 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1209 21:13:08.440621  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83864\nI1209 21:13:08.440919  1002 solver.cpp:404]     Test net output #1: loss = 0.551367 (* 1 = 0.551367 loss)\nI1209 21:13:09.759645  1002 solver.cpp:228] Iteration 2700, loss = 0.297674\nI1209 21:13:09.759691  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 21:13:09.759707  1002 solver.cpp:244]     Train net output #1: loss = 0.297674 (* 1 = 0.297674 loss)\nI1209 21:13:09.861096  1002 sgd_solver.cpp:166] Iteration 2700, lr = 0.405\nI1209 21:15:28.349651  1002 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1209 21:16:49.013365  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82424\nI1209 21:16:49.013660  1002 solver.cpp:404]     Test net output #1: loss = 0.584941 (* 1 = 0.584941 loss)\nI1209 21:16:50.332840  1002 solver.cpp:228] Iteration 2800, loss = 0.249495\nI1209 21:16:50.332885  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 21:16:50.332902  1002 solver.cpp:244]     Train net output #1: loss = 0.249495 (* 1 = 0.249495 loss)\nI1209 21:16:50.438465  1002 sgd_solver.cpp:166] Iteration 2800, lr = 0.42\nI1209 21:19:08.492626  1002 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1209 21:20:29.173748  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8106\nI1209 21:20:29.174052  1002 solver.cpp:404]     Test net output #1: loss = 0.62497 (* 1 = 0.62497 loss)\nI1209 21:20:30.493293  1002 solver.cpp:228] Iteration 2900, loss = 0.271956\nI1209 21:20:30.493337  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1209 21:20:30.493353  1002 solver.cpp:244]     Train net output #1: loss = 0.271956 (* 1 = 0.271956 loss)\nI1209 21:20:30.590694  1002 sgd_solver.cpp:166] Iteration 2900, lr = 0.435\nI1209 21:22:48.870646  1002 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1209 21:24:09.533432  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8496\nI1209 21:24:09.533730  1002 solver.cpp:404]     Test net output #1: loss = 0.47084 (* 1 = 0.47084 loss)\nI1209 21:24:10.852041  1002 solver.cpp:228] Iteration 3000, loss = 0.170905\nI1209 21:24:10.852087  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 21:24:10.852102  1002 solver.cpp:244]     Train net output #1: loss = 0.170905 (* 1 = 0.170905 loss)\nI1209 21:24:10.954659  1002 sgd_solver.cpp:166] Iteration 3000, lr = 0.45\nI1209 21:26:29.732575  1002 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1209 21:27:50.408100  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82304\nI1209 21:27:50.408403  1002 solver.cpp:404]     Test net output #1: loss = 0.652712 (* 1 = 0.652712 loss)\nI1209 21:27:51.728111  1002 solver.cpp:228] Iteration 3100, loss = 0.244624\nI1209 21:27:51.728157  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 21:27:51.728174  1002 solver.cpp:244]     Train net output #1: loss = 0.244624 (* 1 = 0.244624 loss)\nI1209 21:27:51.835337  1002 sgd_solver.cpp:166] Iteration 3100, lr = 0.465\nI1209 21:30:10.081430  1002 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1209 21:31:30.769686  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83788\nI1209 21:31:30.769989  1002 solver.cpp:404]     Test net output #1: loss = 0.548962 (* 1 = 0.548962 loss)\nI1209 21:31:32.088977  1002 solver.cpp:228] Iteration 3200, loss = 0.21115\nI1209 21:31:32.089025  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1209 21:31:32.089042  1002 solver.cpp:244]     Train net output #1: loss = 0.21115 (* 1 = 0.21115 loss)\nI1209 21:31:32.188278  1002 sgd_solver.cpp:166] Iteration 3200, lr = 0.48\nI1209 21:33:50.930631  1002 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1209 21:35:11.583578  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84752\nI1209 21:35:11.583876  1002 solver.cpp:404]     Test net output #1: loss = 0.471874 (* 1 = 0.471874 loss)\nI1209 21:35:12.902789  1002 solver.cpp:228] Iteration 3300, loss = 0.177832\nI1209 21:35:12.902834  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1209 21:35:12.902851  1002 solver.cpp:244]     Train net output #1: loss = 0.177832 (* 1 = 0.177832 loss)\nI1209 21:35:13.004556  1002 sgd_solver.cpp:166] Iteration 3300, lr = 0.495\nI1209 21:37:31.313710  1002 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1209 21:38:51.958958  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8138\nI1209 21:38:51.959264  1002 solver.cpp:404]     Test net output #1: loss = 0.588199 (* 1 = 0.588199 loss)\nI1209 21:38:53.277513  1002 solver.cpp:228] Iteration 3400, loss = 0.300176\nI1209 21:38:53.277557  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 21:38:53.277573  1002 solver.cpp:244]     Train net output #1: loss = 0.300176 (* 1 = 0.300176 loss)\nI1209 21:38:53.379088  1002 sgd_solver.cpp:166] Iteration 3400, lr = 0.51\nI1209 21:41:12.034160  1002 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1209 21:42:32.679493  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85732\nI1209 21:42:32.679800  1002 solver.cpp:404]     Test net output #1: loss = 0.465275 (* 1 = 0.465275 loss)\nI1209 21:42:33.998855  1002 solver.cpp:228] Iteration 3500, loss = 0.265369\nI1209 21:42:33.998900  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 21:42:33.998916  1002 solver.cpp:244]     Train net output #1: loss = 0.265369 (* 1 = 0.265369 loss)\nI1209 21:42:34.102038  1002 sgd_solver.cpp:166] Iteration 3500, lr = 0.525\nI1209 21:44:52.879385  1002 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1209 21:46:13.520006  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83444\nI1209 21:46:13.520292  1002 solver.cpp:404]     Test net output #1: loss = 0.563716 (* 1 = 0.563716 loss)\nI1209 21:46:14.838856  1002 solver.cpp:228] Iteration 3600, loss = 0.267721\nI1209 21:46:14.838901  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 21:46:14.838917  1002 solver.cpp:244]     Train net output #1: loss = 0.267721 (* 1 = 0.267721 loss)\nI1209 21:46:14.944800  1002 sgd_solver.cpp:166] Iteration 3600, lr = 0.54\nI1209 21:48:33.241291  1002 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1209 21:49:53.886586  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8452\nI1209 21:49:53.886884  1002 solver.cpp:404]     Test net output #1: loss = 0.496664 (* 1 = 0.496664 loss)\nI1209 21:49:55.205185  1002 solver.cpp:228] Iteration 3700, loss = 0.169763\nI1209 21:49:55.205230  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 21:49:55.205246  1002 solver.cpp:244]     Train net output #1: loss = 0.169763 (* 1 = 0.169763 loss)\nI1209 21:49:55.308616  1002 sgd_solver.cpp:166] Iteration 3700, lr = 0.555\nI1209 21:52:13.559391  1002 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1209 21:53:34.241999  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82644\nI1209 21:53:34.242305  1002 solver.cpp:404]     Test net output #1: loss = 0.580333 (* 1 = 0.580333 loss)\nI1209 21:53:35.560380  1002 solver.cpp:228] Iteration 3800, loss = 0.199426\nI1209 21:53:35.560425  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 21:53:35.560441  1002 solver.cpp:244]     Train net output #1: loss = 0.199426 (* 1 = 0.199426 loss)\nI1209 21:53:35.664865  1002 sgd_solver.cpp:166] Iteration 3800, lr = 0.57\nI1209 21:55:53.941052  1002 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1209 21:57:14.618300  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84488\nI1209 21:57:14.618602  1002 solver.cpp:404]     Test net output #1: loss = 0.498956 (* 1 = 0.498956 loss)\nI1209 21:57:15.937835  1002 solver.cpp:228] Iteration 3900, loss = 0.221293\nI1209 21:57:15.937880  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1209 21:57:15.937896  1002 solver.cpp:244]     Train net output #1: loss = 0.221293 (* 1 = 0.221293 loss)\nI1209 21:57:16.040669  1002 sgd_solver.cpp:166] Iteration 3900, lr = 0.585\nI1209 21:59:34.335649  1002 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1209 22:00:55.015547  1002 solver.cpp:404]     Test net output #0: accuracy = 0.858121\nI1209 22:00:55.015847  1002 solver.cpp:404]     Test net output #1: loss = 0.458216 (* 1 = 0.458216 loss)\nI1209 22:00:56.333670  1002 solver.cpp:228] Iteration 4000, loss = 0.249228\nI1209 22:00:56.333716  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1209 22:00:56.333734  1002 solver.cpp:244]     Train net output #1: loss = 0.249228 (* 1 = 0.249228 loss)\nI1209 22:00:56.438391  1002 sgd_solver.cpp:166] Iteration 4000, lr = 0.6\nI1209 22:03:15.216502  1002 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1209 22:04:35.902427  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8524\nI1209 22:04:35.902732  1002 solver.cpp:404]     Test net output #1: loss = 0.488634 (* 1 = 0.488634 loss)\nI1209 22:04:37.220706  1002 solver.cpp:228] Iteration 4100, loss = 0.227623\nI1209 22:04:37.220752  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 22:04:37.220767  1002 solver.cpp:244]     Train net output #1: loss = 0.227623 (* 1 = 0.227623 loss)\nI1209 22:04:37.327069  1002 sgd_solver.cpp:166] Iteration 4100, lr = 0.615\nI1209 22:06:56.036924  1002 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1209 22:08:16.715320  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8266\nI1209 22:08:16.715580  1002 solver.cpp:404]     Test net output #1: loss = 0.609617 (* 1 = 0.609617 loss)\nI1209 22:08:18.034171  1002 solver.cpp:228] Iteration 4200, loss = 0.149166\nI1209 22:08:18.034217  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1209 22:08:18.034235  1002 solver.cpp:244]     Train net output #1: loss = 0.149166 (* 1 = 0.149166 loss)\nI1209 22:08:18.146812  1002 sgd_solver.cpp:166] Iteration 4200, lr = 0.63\nI1209 22:10:36.826318  1002 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1209 22:11:57.507215  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI1209 22:11:57.507495  1002 solver.cpp:404]     Test net output #1: loss = 0.685477 (* 1 = 0.685477 loss)\nI1209 22:11:58.825650  1002 solver.cpp:228] Iteration 4300, loss = 0.16858\nI1209 22:11:58.825695  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 22:11:58.825721  1002 solver.cpp:244]     Train net output #1: loss = 0.16858 (* 1 = 0.16858 loss)\nI1209 22:11:58.930109  1002 sgd_solver.cpp:166] Iteration 4300, lr = 0.645\nI1209 22:14:17.703827  1002 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1209 22:15:38.388548  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83936\nI1209 22:15:38.388839  1002 solver.cpp:404]     Test net output #1: loss = 0.52235 (* 1 = 0.52235 loss)\nI1209 22:15:39.706959  1002 solver.cpp:228] Iteration 4400, loss = 0.16721\nI1209 22:15:39.707003  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1209 22:15:39.707033  1002 solver.cpp:244]     Train net output #1: loss = 0.16721 (* 1 = 0.16721 loss)\nI1209 22:15:39.811859  1002 sgd_solver.cpp:166] Iteration 4400, lr = 0.66\nI1209 22:17:58.505729  1002 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1209 22:19:19.179348  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84868\nI1209 22:19:19.179639  1002 solver.cpp:404]     Test net output #1: loss = 0.480869 (* 1 = 0.480869 loss)\nI1209 22:19:20.497867  1002 solver.cpp:228] Iteration 4500, loss = 0.148081\nI1209 22:19:20.497911  1002 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1209 22:19:20.497936  1002 solver.cpp:244]     Train net output #1: loss = 0.148081 (* 1 = 0.148081 loss)\nI1209 22:19:20.601397  1002 sgd_solver.cpp:166] Iteration 4500, lr = 0.675\nI1209 22:21:38.857453  1002 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1209 22:22:59.539496  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84492\nI1209 22:22:59.539748  1002 solver.cpp:404]     Test net output #1: loss = 0.5525 (* 1 = 0.5525 loss)\nI1209 22:23:00.858131  1002 solver.cpp:228] Iteration 4600, loss = 0.274866\nI1209 22:23:00.858176  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 22:23:00.858201  1002 solver.cpp:244]     Train net output #1: loss = 0.274866 (* 1 = 0.274866 loss)\nI1209 22:23:00.964331  1002 sgd_solver.cpp:166] Iteration 4600, lr = 0.69\nI1209 22:25:19.300123  1002 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1209 22:26:39.989477  1002 solver.cpp:404]     Test net output #0: accuracy = 0.86328\nI1209 22:26:39.989745  1002 solver.cpp:404]     Test net output #1: loss = 0.428655 (* 1 = 0.428655 loss)\nI1209 22:26:41.308814  1002 solver.cpp:228] Iteration 4700, loss = 0.20991\nI1209 22:26:41.308858  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1209 22:26:41.308882  1002 solver.cpp:244]     Train net output #1: loss = 0.20991 (* 1 = 0.20991 loss)\nI1209 22:26:41.409600  1002 sgd_solver.cpp:166] Iteration 4700, lr = 0.705\nI1209 22:28:59.678290  1002 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1209 22:30:20.394017  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83344\nI1209 22:30:20.394310  1002 solver.cpp:404]     Test net output #1: loss = 0.54488 (* 1 = 0.54488 loss)\nI1209 22:30:21.713060  1002 solver.cpp:228] Iteration 4800, loss = 0.209977\nI1209 22:30:21.713107  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 22:30:21.713131  1002 solver.cpp:244]     Train net output #1: loss = 0.209977 (* 1 = 0.209977 loss)\nI1209 22:30:21.814947  1002 sgd_solver.cpp:166] Iteration 4800, lr = 0.72\nI1209 22:32:40.196231  1002 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1209 22:34:00.874974  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8416\nI1209 22:34:00.875203  1002 solver.cpp:404]     Test net output #1: loss = 0.543927 (* 1 = 0.543927 loss)\nI1209 22:34:02.193105  1002 solver.cpp:228] Iteration 4900, loss = 0.193767\nI1209 22:34:02.193150  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1209 22:34:02.193174  1002 solver.cpp:244]     Train net output #1: loss = 0.193766 (* 1 = 0.193766 loss)\nI1209 22:34:02.296241  1002 sgd_solver.cpp:166] Iteration 4900, lr = 0.735\nI1209 22:36:20.808786  1002 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1209 22:37:41.517154  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83448\nI1209 22:37:41.517407  1002 solver.cpp:404]     Test net output #1: loss = 0.565799 (* 1 = 0.565799 loss)\nI1209 22:37:42.836908  1002 solver.cpp:228] Iteration 5000, loss = 0.179494\nI1209 22:37:42.836956  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1209 22:37:42.836980  1002 solver.cpp:244]     Train net output #1: loss = 0.179494 (* 1 = 0.179494 loss)\nI1209 22:37:42.938449  1002 sgd_solver.cpp:166] Iteration 5000, lr = 0.75\nI1209 22:40:01.304554  1002 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1209 22:41:22.033133  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85288\nI1209 22:41:22.033432  1002 solver.cpp:404]     Test net output #1: loss = 0.47127 (* 1 = 0.47127 loss)\nI1209 22:41:23.353340  1002 solver.cpp:228] Iteration 5100, loss = 0.169983\nI1209 22:41:23.353387  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1209 22:41:23.353411  1002 solver.cpp:244]     Train net output #1: loss = 0.169983 (* 1 = 0.169983 loss)\nI1209 22:41:23.459491  1002 sgd_solver.cpp:166] Iteration 5100, lr = 0.765\nI1209 22:43:41.737562  1002 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1209 22:45:02.424149  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83844\nI1209 22:45:02.424454  1002 solver.cpp:404]     Test net output #1: loss = 0.556574 (* 1 = 0.556574 loss)\nI1209 22:45:03.742489  1002 solver.cpp:228] Iteration 5200, loss = 0.208678\nI1209 22:45:03.742537  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1209 22:45:03.742561  1002 solver.cpp:244]     Train net output #1: loss = 0.208678 (* 1 = 0.208678 loss)\nI1209 22:45:03.842624  1002 sgd_solver.cpp:166] Iteration 5200, lr = 0.78\nI1209 22:47:22.700542  1002 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1209 22:48:43.386112  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83076\nI1209 22:48:43.386368  1002 solver.cpp:404]     Test net output #1: loss = 0.598937 (* 1 = 0.598937 loss)\nI1209 22:48:44.705622  1002 solver.cpp:228] Iteration 5300, loss = 0.224551\nI1209 22:48:44.705672  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 22:48:44.705698  1002 solver.cpp:244]     Train net output #1: loss = 0.224551 (* 1 = 0.224551 loss)\nI1209 22:48:44.813731  1002 sgd_solver.cpp:166] Iteration 5300, lr = 0.795\nI1209 22:51:03.167325  1002 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1209 22:52:23.860297  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8308\nI1209 22:52:23.860594  1002 solver.cpp:404]     Test net output #1: loss = 0.557845 (* 1 = 0.557845 loss)\nI1209 22:52:25.179581  1002 solver.cpp:228] Iteration 5400, loss = 0.210916\nI1209 22:52:25.179630  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1209 22:52:25.179656  1002 solver.cpp:244]     Train net output #1: loss = 0.210916 (* 1 = 0.210916 loss)\nI1209 22:52:25.284072  1002 sgd_solver.cpp:166] Iteration 5400, lr = 0.81\nI1209 22:54:43.655176  1002 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1209 22:56:04.336011  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85944\nI1209 22:56:04.336315  1002 solver.cpp:404]     Test net output #1: loss = 0.452964 (* 1 = 0.452964 loss)\nI1209 22:56:05.654980  1002 solver.cpp:228] Iteration 5500, loss = 0.156802\nI1209 22:56:05.655031  1002 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1209 22:56:05.655057  1002 solver.cpp:244]     Train net output #1: loss = 0.156802 (* 1 = 0.156802 loss)\nI1209 22:56:05.758625  1002 sgd_solver.cpp:166] Iteration 5500, lr = 0.825\nI1209 22:58:24.184010  1002 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1209 22:59:44.861487  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82884\nI1209 22:59:44.861738  1002 solver.cpp:404]     Test net output #1: loss = 0.620684 (* 1 = 0.620684 loss)\nI1209 22:59:46.180882  1002 solver.cpp:228] Iteration 5600, loss = 0.139529\nI1209 22:59:46.180930  1002 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1209 22:59:46.180955  1002 solver.cpp:244]     Train net output #1: loss = 0.139529 (* 1 = 0.139529 loss)\nI1209 22:59:46.285461  1002 sgd_solver.cpp:166] Iteration 5600, lr = 0.84\nI1209 23:02:04.687921  1002 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1209 23:03:25.381290  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8554\nI1209 23:03:25.381549  1002 solver.cpp:404]     Test net output #1: loss = 0.462972 (* 1 = 0.462972 loss)\nI1209 23:03:26.701220  1002 solver.cpp:228] Iteration 5700, loss = 0.162658\nI1209 23:03:26.701266  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 23:03:26.701289  1002 solver.cpp:244]     Train net output #1: loss = 0.162658 (* 1 = 0.162658 loss)\nI1209 23:03:26.800242  1002 sgd_solver.cpp:166] Iteration 5700, lr = 0.855\nI1209 23:05:45.446276  1002 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1209 23:07:06.176919  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83956\nI1209 23:07:06.177222  1002 solver.cpp:404]     Test net output #1: loss = 0.517751 (* 1 = 0.517751 loss)\nI1209 23:07:07.496248  1002 solver.cpp:228] Iteration 5800, loss = 0.197966\nI1209 23:07:07.496294  1002 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1209 23:07:07.496318  1002 solver.cpp:244]     Train net output #1: loss = 0.197966 (* 1 = 0.197966 loss)\nI1209 23:07:07.603713  1002 sgd_solver.cpp:166] Iteration 5800, lr = 0.87\nI1209 23:09:25.948159  1002 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1209 23:10:46.657872  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83496\nI1209 23:10:46.658161  1002 solver.cpp:404]     Test net output #1: loss = 0.613079 (* 1 = 0.613079 loss)\nI1209 23:10:47.976317  1002 solver.cpp:228] Iteration 5900, loss = 0.192516\nI1209 23:10:47.976362  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 23:10:47.976387  1002 solver.cpp:244]     Train net output #1: loss = 0.192516 (* 1 = 0.192516 loss)\nI1209 23:10:48.078871  1002 sgd_solver.cpp:166] Iteration 5900, lr = 0.885\nI1209 23:13:06.359747  1002 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1209 23:14:27.094007  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82924\nI1209 23:14:27.094272  1002 solver.cpp:404]     Test net output #1: loss = 0.554387 (* 1 = 0.554387 loss)\nI1209 23:14:28.412765  1002 solver.cpp:228] Iteration 6000, loss = 0.202725\nI1209 23:14:28.412809  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1209 23:14:28.412834  1002 solver.cpp:244]     Train net output #1: loss = 0.202725 (* 1 = 0.202725 loss)\nI1209 23:14:28.515756  1002 sgd_solver.cpp:166] Iteration 6000, lr = 0.9\nI1209 23:16:47.284248  1002 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1209 23:18:07.961388  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85088\nI1209 23:18:07.961686  1002 solver.cpp:404]     Test net output #1: loss = 0.507986 (* 1 = 0.507986 loss)\nI1209 23:18:09.280493  1002 solver.cpp:228] Iteration 6100, loss = 0.202391\nI1209 23:18:09.280537  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 23:18:09.280562  1002 solver.cpp:244]     Train net output #1: loss = 0.202391 (* 1 = 0.202391 loss)\nI1209 23:18:09.384742  1002 sgd_solver.cpp:166] Iteration 6100, lr = 0.915\nI1209 23:20:27.743412  1002 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1209 23:21:48.424068  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82304\nI1209 23:21:48.424361  1002 solver.cpp:404]     Test net output #1: loss = 0.594914 (* 1 = 0.594914 loss)\nI1209 23:21:49.742733  1002 solver.cpp:228] Iteration 6200, loss = 0.21722\nI1209 23:21:49.742779  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1209 23:21:49.742805  1002 solver.cpp:244]     Train net output #1: loss = 0.217219 (* 1 = 0.217219 loss)\nI1209 23:21:49.842270  1002 sgd_solver.cpp:166] Iteration 6200, lr = 0.93\nI1209 23:24:08.580298  1002 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1209 23:25:29.270308  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82716\nI1209 23:25:29.270558  1002 solver.cpp:404]     Test net output #1: loss = 0.55869 (* 1 = 0.55869 loss)\nI1209 23:25:30.589586  1002 solver.cpp:228] Iteration 6300, loss = 0.164061\nI1209 23:25:30.589632  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 23:25:30.589656  1002 solver.cpp:244]     Train net output #1: loss = 0.164061 (* 1 = 0.164061 loss)\nI1209 23:25:30.693624  1002 sgd_solver.cpp:166] Iteration 6300, lr = 0.945\nI1209 23:27:49.036042  1002 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1209 23:29:09.752600  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80756\nI1209 23:29:09.752894  1002 solver.cpp:404]     Test net output #1: loss = 0.706282 (* 1 = 0.706282 loss)\nI1209 23:29:11.071476  1002 solver.cpp:228] Iteration 6400, loss = 0.189997\nI1209 23:29:11.071521  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1209 23:29:11.071547  1002 solver.cpp:244]     Train net output #1: loss = 0.189997 (* 1 = 0.189997 loss)\nI1209 23:29:11.172719  1002 sgd_solver.cpp:166] Iteration 6400, lr = 0.96\nI1209 23:31:29.828791  1002 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1209 23:32:50.506896  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79268\nI1209 23:32:50.507199  1002 solver.cpp:404]     Test net output #1: loss = 0.747528 (* 1 = 0.747528 loss)\nI1209 23:32:51.826637  1002 solver.cpp:228] Iteration 6500, loss = 0.243137\nI1209 23:32:51.826681  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1209 23:32:51.826699  1002 solver.cpp:244]     Train net output #1: loss = 0.243137 (* 1 = 0.243137 loss)\nI1209 23:32:51.925204  1002 sgd_solver.cpp:166] Iteration 6500, lr = 0.975\nI1209 23:35:10.245127  1002 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1209 23:36:30.913421  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85616\nI1209 23:36:30.913684  1002 solver.cpp:404]     Test net output #1: loss = 0.470701 (* 1 = 0.470701 loss)\nI1209 23:36:32.232058  1002 solver.cpp:228] Iteration 6600, loss = 0.190191\nI1209 23:36:32.232100  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1209 23:36:32.232116  1002 solver.cpp:244]     Train net output #1: loss = 0.190191 (* 1 = 0.190191 loss)\nI1209 23:36:32.333230  1002 sgd_solver.cpp:166] Iteration 6600, lr = 0.99\nI1209 23:38:50.670150  1002 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1209 23:40:11.318084  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85636\nI1209 23:40:11.318347  1002 solver.cpp:404]     Test net output #1: loss = 0.44911 (* 1 = 0.44911 loss)\nI1209 23:40:12.636704  1002 solver.cpp:228] Iteration 6700, loss = 0.173349\nI1209 23:40:12.636746  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 23:40:12.636762  1002 solver.cpp:244]     Train net output #1: loss = 0.173349 (* 1 = 0.173349 loss)\nI1209 23:40:12.740869  1002 sgd_solver.cpp:166] Iteration 6700, lr = 1.005\nI1209 23:42:31.559126  1002 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1209 23:43:52.212095  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85356\nI1209 23:43:52.212347  1002 solver.cpp:404]     Test net output #1: loss = 0.461317 (* 1 = 0.461317 loss)\nI1209 23:43:53.531059  1002 solver.cpp:228] Iteration 6800, loss = 0.137719\nI1209 23:43:53.531100  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 23:43:53.531116  1002 solver.cpp:244]     Train net output #1: loss = 0.137719 (* 1 = 0.137719 loss)\nI1209 23:43:53.635831  1002 sgd_solver.cpp:166] Iteration 6800, lr = 1.02\nI1209 23:46:12.494472  1002 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1209 23:47:33.141484  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85044\nI1209 23:47:33.141762  1002 solver.cpp:404]     Test net output #1: loss = 0.498495 (* 1 = 0.498495 loss)\nI1209 23:47:34.459606  1002 solver.cpp:228] Iteration 6900, loss = 0.152516\nI1209 23:47:34.459650  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1209 23:47:34.459666  1002 solver.cpp:244]     Train net output #1: loss = 0.152516 (* 1 = 0.152516 loss)\nI1209 23:47:34.565163  1002 sgd_solver.cpp:166] Iteration 6900, lr = 1.035\nI1209 23:49:52.954123  1002 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1209 23:51:13.609622  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85644\nI1209 23:51:13.609905  1002 solver.cpp:404]     Test net output #1: loss = 0.476766 (* 1 = 0.476766 loss)\nI1209 23:51:14.928011  1002 solver.cpp:228] Iteration 7000, loss = 0.279144\nI1209 23:51:14.928056  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1209 23:51:14.928071  1002 solver.cpp:244]     Train net output #1: loss = 0.279144 (* 1 = 0.279144 loss)\nI1209 23:51:15.028126  1002 sgd_solver.cpp:166] Iteration 7000, lr = 1.05\nI1209 23:53:33.310693  1002 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1209 23:54:53.957036  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82612\nI1209 23:54:53.957285  1002 solver.cpp:404]     Test net output #1: loss = 0.598274 (* 1 = 0.598274 loss)\nI1209 23:54:55.275411  1002 solver.cpp:228] Iteration 7100, loss = 0.180564\nI1209 23:54:55.275454  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1209 23:54:55.275470  1002 solver.cpp:244]     Train net output #1: loss = 0.180564 (* 1 = 0.180564 loss)\nI1209 23:54:55.386920  1002 sgd_solver.cpp:166] Iteration 7100, lr = 1.065\nI1209 23:57:13.650707  1002 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1209 23:58:34.294517  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83644\nI1209 23:58:34.294780  1002 solver.cpp:404]     Test net output #1: loss = 0.577961 (* 1 = 0.577961 loss)\nI1209 23:58:35.613049  1002 solver.cpp:228] Iteration 7200, loss = 0.263713\nI1209 23:58:35.613091  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1209 23:58:35.613108  1002 solver.cpp:244]     Train net output #1: loss = 0.263713 (* 1 = 0.263713 loss)\nI1209 23:58:35.712853  1002 sgd_solver.cpp:166] Iteration 7200, lr = 1.08\nI1210 00:00:54.009325  1002 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1210 00:02:14.659451  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84552\nI1210 00:02:14.659726  1002 solver.cpp:404]     Test net output #1: loss = 0.519478 (* 1 = 0.519478 loss)\nI1210 00:02:15.977622  1002 solver.cpp:228] Iteration 7300, loss = 0.27212\nI1210 00:02:15.977665  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 00:02:15.977680  1002 solver.cpp:244]     Train net output #1: loss = 0.27212 (* 1 = 0.27212 loss)\nI1210 00:02:16.080255  1002 sgd_solver.cpp:166] Iteration 7300, lr = 1.095\nI1210 00:04:34.407840  1002 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1210 00:05:55.056975  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78576\nI1210 00:05:55.057245  1002 solver.cpp:404]     Test net output #1: loss = 0.785015 (* 1 = 0.785015 loss)\nI1210 00:05:56.375442  1002 solver.cpp:228] Iteration 7400, loss = 0.181362\nI1210 00:05:56.375485  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 00:05:56.375501  1002 solver.cpp:244]     Train net output #1: loss = 0.181362 (* 1 = 0.181362 loss)\nI1210 00:05:56.478251  1002 sgd_solver.cpp:166] Iteration 7400, lr = 1.11\nI1210 00:08:14.798089  1002 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1210 00:09:35.487046  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83048\nI1210 00:09:35.487334  1002 solver.cpp:404]     Test net output #1: loss = 0.553424 (* 1 = 0.553424 loss)\nI1210 00:09:36.805666  1002 solver.cpp:228] Iteration 7500, loss = 0.157202\nI1210 00:09:36.805708  1002 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1210 00:09:36.805724  1002 solver.cpp:244]     Train net output #1: loss = 0.157202 (* 1 = 0.157202 loss)\nI1210 00:09:36.906297  1002 sgd_solver.cpp:166] Iteration 7500, lr = 1.125\nI1210 00:11:55.704314  1002 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1210 00:13:16.424939  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8522\nI1210 00:13:16.425240  1002 solver.cpp:404]     Test net output #1: loss = 0.460291 (* 1 = 0.460291 loss)\nI1210 00:13:17.744094  1002 solver.cpp:228] Iteration 7600, loss = 0.213683\nI1210 00:13:17.744135  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 00:13:17.744151  1002 solver.cpp:244]     Train net output #1: loss = 0.213683 (* 1 = 0.213683 loss)\nI1210 00:13:17.848449  1002 sgd_solver.cpp:166] Iteration 7600, lr = 1.14\nI1210 00:15:36.104182  1002 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1210 00:16:56.839591  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82644\nI1210 00:16:56.839848  1002 solver.cpp:404]     Test net output #1: loss = 0.537197 (* 1 = 0.537197 loss)\nI1210 00:16:58.158601  1002 solver.cpp:228] Iteration 7700, loss = 0.268205\nI1210 00:16:58.158641  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 00:16:58.158658  1002 solver.cpp:244]     Train net output #1: loss = 0.268205 (* 1 = 0.268205 loss)\nI1210 00:16:58.258440  1002 sgd_solver.cpp:166] Iteration 7700, lr = 1.155\nI1210 00:19:16.672696  1002 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1210 00:20:37.411859  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82184\nI1210 00:20:37.412106  1002 solver.cpp:404]     Test net output #1: loss = 0.548169 (* 1 = 0.548169 loss)\nI1210 00:20:38.730655  1002 solver.cpp:228] Iteration 7800, loss = 0.238078\nI1210 00:20:38.730695  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 00:20:38.730710  1002 solver.cpp:244]     Train net output #1: loss = 0.238078 (* 1 = 0.238078 loss)\nI1210 00:20:38.826220  1002 sgd_solver.cpp:166] Iteration 7800, lr = 1.17\nI1210 00:22:57.590237  1002 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1210 00:24:18.334192  1002 solver.cpp:404]     Test net output #0: accuracy = 0.828\nI1210 00:24:18.334455  1002 solver.cpp:404]     Test net output #1: loss = 0.541779 (* 1 = 0.541779 loss)\nI1210 00:24:19.653409  1002 solver.cpp:228] Iteration 7900, loss = 0.255127\nI1210 00:24:19.653450  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 00:24:19.653465  1002 solver.cpp:244]     Train net output #1: loss = 0.255127 (* 1 = 0.255127 loss)\nI1210 00:24:19.756645  1002 sgd_solver.cpp:166] Iteration 7900, lr = 1.185\nI1210 00:26:38.118362  1002 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1210 00:27:58.855752  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83896\nI1210 00:27:58.856029  1002 solver.cpp:404]     Test net output #1: loss = 0.504729 (* 1 = 0.504729 loss)\nI1210 00:28:00.175142  1002 solver.cpp:228] Iteration 8000, loss = 0.209794\nI1210 00:28:00.175186  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 00:28:00.175202  1002 solver.cpp:244]     Train net output #1: loss = 0.209794 (* 1 = 0.209794 loss)\nI1210 00:28:00.272174  1002 sgd_solver.cpp:166] Iteration 8000, lr = 1.2\nI1210 00:30:18.602707  1002 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1210 00:31:39.322930  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77884\nI1210 00:31:39.323175  1002 solver.cpp:404]     Test net output #1: loss = 0.760431 (* 1 = 0.760431 loss)\nI1210 00:31:40.641530  1002 solver.cpp:228] Iteration 8100, loss = 0.2087\nI1210 00:31:40.641577  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 00:31:40.641593  1002 solver.cpp:244]     Train net output #1: loss = 0.2087 (* 1 = 0.2087 loss)\nI1210 00:31:40.740108  1002 sgd_solver.cpp:166] Iteration 8100, lr = 1.215\nI1210 00:33:59.050626  1002 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1210 00:35:19.737143  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83936\nI1210 00:35:19.737424  1002 solver.cpp:404]     Test net output #1: loss = 0.531708 (* 1 = 0.531708 loss)\nI1210 00:35:21.055996  1002 solver.cpp:228] Iteration 8200, loss = 0.216957\nI1210 00:35:21.056041  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 00:35:21.056057  1002 solver.cpp:244]     Train net output #1: loss = 0.216957 (* 1 = 0.216957 loss)\nI1210 00:35:21.150192  1002 sgd_solver.cpp:166] Iteration 8200, lr = 1.23\nI1210 00:37:39.453580  1002 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1210 00:39:00.138083  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80484\nI1210 00:39:00.138324  1002 solver.cpp:404]     Test net output #1: loss = 0.647505 (* 1 = 0.647505 loss)\nI1210 00:39:01.455925  1002 solver.cpp:228] Iteration 8300, loss = 0.193804\nI1210 00:39:01.455968  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1210 00:39:01.455986  1002 solver.cpp:244]     Train net output #1: loss = 0.193804 (* 1 = 0.193804 loss)\nI1210 00:39:01.556443  1002 sgd_solver.cpp:166] Iteration 8300, lr = 1.245\nI1210 00:41:19.926875  1002 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1210 00:42:40.621274  1002 solver.cpp:404]     Test net output #0: accuracy = 0.849\nI1210 00:42:40.621574  1002 solver.cpp:404]     Test net output #1: loss = 0.47774 (* 1 = 0.47774 loss)\nI1210 00:42:41.939872  1002 solver.cpp:228] Iteration 8400, loss = 0.194457\nI1210 00:42:41.939923  1002 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1210 00:42:41.939939  1002 solver.cpp:244]     Train net output #1: loss = 0.194457 (* 1 = 0.194457 loss)\nI1210 00:42:42.045871  1002 sgd_solver.cpp:166] Iteration 8400, lr = 1.26\nI1210 00:45:00.560056  1002 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1210 00:46:21.274080  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81472\nI1210 00:46:21.274364  1002 solver.cpp:404]     Test net output #1: loss = 0.625812 (* 1 = 0.625812 loss)\nI1210 00:46:22.593370  1002 solver.cpp:228] Iteration 8500, loss = 0.168492\nI1210 00:46:22.593416  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1210 00:46:22.593433  1002 solver.cpp:244]     Train net output #1: loss = 0.168492 (* 1 = 0.168492 loss)\nI1210 00:46:22.696300  1002 sgd_solver.cpp:166] Iteration 8500, lr = 1.275\nI1210 00:48:41.057363  1002 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1210 00:50:01.771466  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI1210 00:50:01.771736  1002 solver.cpp:404]     Test net output #1: loss = 0.538404 (* 1 = 0.538404 loss)\nI1210 00:50:03.091248  1002 solver.cpp:228] Iteration 8600, loss = 0.194596\nI1210 00:50:03.091295  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 00:50:03.091312  1002 solver.cpp:244]     Train net output #1: loss = 0.194596 (* 1 = 0.194596 loss)\nI1210 00:50:03.195858  1002 sgd_solver.cpp:166] Iteration 8600, lr = 1.29\nI1210 00:52:21.579877  1002 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1210 00:53:42.333833  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80924\nI1210 00:53:42.334130  1002 solver.cpp:404]     Test net output #1: loss = 0.642215 (* 1 = 0.642215 loss)\nI1210 00:53:43.652550  1002 solver.cpp:228] Iteration 8700, loss = 0.207985\nI1210 00:53:43.652592  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 00:53:43.652609  1002 solver.cpp:244]     Train net output #1: loss = 0.207985 (* 1 = 0.207985 loss)\nI1210 00:53:43.759546  1002 sgd_solver.cpp:166] Iteration 8700, lr = 1.305\nI1210 00:56:02.513272  1002 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1210 00:57:23.253746  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80628\nI1210 00:57:23.254060  1002 solver.cpp:404]     Test net output #1: loss = 0.630314 (* 1 = 0.630314 loss)\nI1210 00:57:24.572681  1002 solver.cpp:228] Iteration 8800, loss = 0.18085\nI1210 00:57:24.572729  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1210 00:57:24.572746  1002 solver.cpp:244]     Train net output #1: loss = 0.18085 (* 1 = 0.18085 loss)\nI1210 00:57:24.675614  1002 sgd_solver.cpp:166] Iteration 8800, lr = 1.32\nI1210 00:59:43.055083  1002 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1210 01:01:03.735493  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82536\nI1210 01:01:03.735793  1002 solver.cpp:404]     Test net output #1: loss = 0.565204 (* 1 = 0.565204 loss)\nI1210 01:01:05.054702  1002 solver.cpp:228] Iteration 8900, loss = 0.244177\nI1210 01:01:05.054745  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 01:01:05.054762  1002 solver.cpp:244]     Train net output #1: loss = 0.244177 (* 1 = 0.244177 loss)\nI1210 01:01:05.160316  1002 sgd_solver.cpp:166] Iteration 8900, lr = 1.335\nI1210 01:03:23.578567  1002 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1210 01:04:44.264566  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8288\nI1210 01:04:44.264820  1002 solver.cpp:404]     Test net output #1: loss = 0.567156 (* 1 = 0.567156 loss)\nI1210 01:04:45.584183  1002 solver.cpp:228] Iteration 9000, loss = 0.18462\nI1210 01:04:45.584228  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 01:04:45.584244  1002 solver.cpp:244]     Train net output #1: loss = 0.18462 (* 1 = 0.18462 loss)\nI1210 01:04:45.686971  1002 sgd_solver.cpp:166] Iteration 9000, lr = 1.35\nI1210 01:07:04.071887  1002 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1210 01:08:24.753329  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82288\nI1210 01:08:24.753599  1002 solver.cpp:404]     Test net output #1: loss = 0.537358 (* 1 = 0.537358 loss)\nI1210 01:08:26.071849  1002 solver.cpp:228] Iteration 9100, loss = 0.253772\nI1210 01:08:26.071895  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 01:08:26.071913  1002 solver.cpp:244]     Train net output #1: loss = 0.253772 (* 1 = 0.253772 loss)\nI1210 01:08:26.178922  1002 sgd_solver.cpp:166] Iteration 9100, lr = 1.365\nI1210 01:10:44.583180  1002 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1210 01:12:05.307876  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83304\nI1210 01:12:05.308173  1002 solver.cpp:404]     Test net output #1: loss = 0.554906 (* 1 = 0.554906 loss)\nI1210 01:12:06.627080  1002 solver.cpp:228] Iteration 9200, loss = 0.185142\nI1210 01:12:06.627126  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1210 01:12:06.627143  1002 solver.cpp:244]     Train net output #1: loss = 0.185142 (* 1 = 0.185142 loss)\nI1210 01:12:06.723404  1002 sgd_solver.cpp:166] Iteration 9200, lr = 1.38\nI1210 01:14:25.071144  1002 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1210 01:15:45.810912  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81632\nI1210 01:15:45.811213  1002 solver.cpp:404]     Test net output #1: loss = 0.624979 (* 1 = 0.624979 loss)\nI1210 01:15:47.129523  1002 solver.cpp:228] Iteration 9300, loss = 0.315188\nI1210 01:15:47.129570  1002 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1210 01:15:47.129585  1002 solver.cpp:244]     Train net output #1: loss = 0.315188 (* 1 = 0.315188 loss)\nI1210 01:15:47.236945  1002 sgd_solver.cpp:166] Iteration 9300, lr = 1.395\nI1210 01:18:05.562994  1002 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1210 01:19:26.262500  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85748\nI1210 01:19:26.262792  1002 solver.cpp:404]     Test net output #1: loss = 0.455869 (* 1 = 0.455869 loss)\nI1210 01:19:27.581511  1002 solver.cpp:228] Iteration 9400, loss = 0.161942\nI1210 01:19:27.581554  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1210 01:19:27.581571  1002 solver.cpp:244]     Train net output #1: loss = 0.161942 (* 1 = 0.161942 loss)\nI1210 01:19:27.690224  1002 sgd_solver.cpp:166] Iteration 9400, lr = 1.41\nI1210 01:21:45.978379  1002 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1210 01:23:06.653823  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85336\nI1210 01:23:06.654132  1002 solver.cpp:404]     Test net output #1: loss = 0.442701 (* 1 = 0.442701 loss)\nI1210 01:23:07.972347  1002 solver.cpp:228] Iteration 9500, loss = 0.306674\nI1210 01:23:07.972393  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 01:23:07.972409  1002 solver.cpp:244]     Train net output #1: loss = 0.306674 (* 1 = 0.306674 loss)\nI1210 01:23:08.083117  1002 sgd_solver.cpp:166] Iteration 9500, lr = 1.425\nI1210 01:25:26.438689  1002 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1210 01:26:47.149190  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82608\nI1210 01:26:47.149452  1002 solver.cpp:404]     Test net output #1: loss = 0.566574 (* 1 = 0.566574 loss)\nI1210 01:26:48.471587  1002 solver.cpp:228] Iteration 9600, loss = 0.214917\nI1210 01:26:48.471634  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 01:26:48.471652  1002 solver.cpp:244]     Train net output #1: loss = 0.214917 (* 1 = 0.214917 loss)\nI1210 01:26:48.568799  1002 sgd_solver.cpp:166] Iteration 9600, lr = 1.44\nI1210 01:29:07.241361  1002 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1210 01:30:27.935344  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78636\nI1210 01:30:27.935593  1002 solver.cpp:404]     Test net output #1: loss = 0.738059 (* 1 = 0.738059 loss)\nI1210 01:30:29.254644  1002 solver.cpp:228] Iteration 9700, loss = 0.185832\nI1210 01:30:29.254689  1002 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1210 01:30:29.254706  1002 solver.cpp:244]     Train net output #1: loss = 0.185832 (* 1 = 0.185832 loss)\nI1210 01:30:29.356881  1002 sgd_solver.cpp:166] Iteration 9700, lr = 1.455\nI1210 01:32:48.189827  1002 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1210 01:34:08.877620  1002 solver.cpp:404]     Test net output #0: accuracy = 0.74\nI1210 01:34:08.877905  1002 solver.cpp:404]     Test net output #1: loss = 0.883519 (* 1 = 0.883519 loss)\nI1210 01:34:10.197664  1002 solver.cpp:228] Iteration 9800, loss = 0.245123\nI1210 01:34:10.197710  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 01:34:10.197726  1002 solver.cpp:244]     Train net output #1: loss = 0.245123 (* 1 = 0.245123 loss)\nI1210 01:34:10.297700  1002 sgd_solver.cpp:166] Iteration 9800, lr = 1.47\nI1210 01:36:28.605756  1002 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1210 01:37:49.341568  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81572\nI1210 01:37:49.341866  1002 solver.cpp:404]     Test net output #1: loss = 0.606686 (* 1 = 0.606686 loss)\nI1210 01:37:50.660429  1002 solver.cpp:228] Iteration 9900, loss = 0.230927\nI1210 01:37:50.660476  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1210 01:37:50.660492  1002 solver.cpp:244]     Train net output #1: loss = 0.230927 (* 1 = 0.230927 loss)\nI1210 01:37:50.757598  1002 sgd_solver.cpp:166] Iteration 9900, lr = 1.485\nI1210 01:40:09.112718  1002 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1210 01:41:29.791381  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82624\nI1210 01:41:29.791630  1002 solver.cpp:404]     Test net output #1: loss = 0.526554 (* 1 = 0.526554 loss)\nI1210 01:41:31.109840  1002 solver.cpp:228] Iteration 10000, loss = 0.159358\nI1210 01:41:31.109884  1002 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1210 01:41:31.109901  1002 solver.cpp:244]     Train net output #1: loss = 0.159358 (* 1 = 0.159358 loss)\nI1210 01:41:31.216308  1002 sgd_solver.cpp:166] Iteration 10000, lr = 1.5\nI1210 01:43:49.597690  1002 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1210 01:45:10.272415  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8428\nI1210 01:45:10.272702  1002 solver.cpp:404]     Test net output #1: loss = 0.475013 (* 1 = 0.475013 loss)\nI1210 01:45:11.592047  1002 solver.cpp:228] Iteration 10100, loss = 0.187771\nI1210 01:45:11.592092  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1210 01:45:11.592108  1002 solver.cpp:244]     Train net output #1: loss = 0.187771 (* 1 = 0.187771 loss)\nI1210 01:45:11.694001  1002 sgd_solver.cpp:166] Iteration 10100, lr = 1.515\nI1210 01:47:30.511023  1002 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1210 01:48:51.181191  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8248\nI1210 01:48:51.181455  1002 solver.cpp:404]     Test net output #1: loss = 0.577541 (* 1 = 0.577541 loss)\nI1210 01:48:52.500860  1002 solver.cpp:228] Iteration 10200, loss = 0.271529\nI1210 01:48:52.500905  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 01:48:52.500922  1002 solver.cpp:244]     Train net output #1: loss = 0.271529 (* 1 = 0.271529 loss)\nI1210 01:48:52.602471  1002 sgd_solver.cpp:166] Iteration 10200, lr = 1.53\nI1210 01:51:11.510095  1002 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1210 01:52:32.169278  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83484\nI1210 01:52:32.169575  1002 solver.cpp:404]     Test net output #1: loss = 0.552435 (* 1 = 0.552435 loss)\nI1210 01:52:33.489048  1002 solver.cpp:228] Iteration 10300, loss = 0.374332\nI1210 01:52:33.489096  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 01:52:33.489120  1002 solver.cpp:244]     Train net output #1: loss = 0.374332 (* 1 = 0.374332 loss)\nI1210 01:52:33.589320  1002 sgd_solver.cpp:166] Iteration 10300, lr = 1.545\nI1210 01:54:52.044337  1002 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1210 01:56:12.756229  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78872\nI1210 01:56:12.756520  1002 solver.cpp:404]     Test net output #1: loss = 0.636795 (* 1 = 0.636795 loss)\nI1210 01:56:14.076354  1002 solver.cpp:228] Iteration 10400, loss = 0.300152\nI1210 01:56:14.076401  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 01:56:14.076426  1002 solver.cpp:244]     Train net output #1: loss = 0.300152 (* 1 = 0.300152 loss)\nI1210 01:56:14.176707  1002 sgd_solver.cpp:166] Iteration 10400, lr = 1.56\nI1210 01:58:32.938450  1002 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1210 01:59:53.602082  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI1210 01:59:53.602335  1002 solver.cpp:404]     Test net output #1: loss = 0.824322 (* 1 = 0.824322 loss)\nI1210 01:59:54.921576  1002 solver.cpp:228] Iteration 10500, loss = 0.253491\nI1210 01:59:54.921624  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 01:59:54.921650  1002 solver.cpp:244]     Train net output #1: loss = 0.253491 (* 1 = 0.253491 loss)\nI1210 01:59:55.022701  1002 sgd_solver.cpp:166] Iteration 10500, lr = 1.575\nI1210 02:02:13.783993  1002 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1210 02:03:34.446928  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83104\nI1210 02:03:34.447234  1002 solver.cpp:404]     Test net output #1: loss = 0.544683 (* 1 = 0.544683 loss)\nI1210 02:03:35.766567  1002 solver.cpp:228] Iteration 10600, loss = 0.216231\nI1210 02:03:35.766615  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 02:03:35.766640  1002 solver.cpp:244]     Train net output #1: loss = 0.216231 (* 1 = 0.216231 loss)\nI1210 02:03:35.880645  1002 sgd_solver.cpp:166] Iteration 10600, lr = 1.59\nI1210 02:05:54.753590  1002 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1210 02:07:15.412915  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81796\nI1210 02:07:15.413209  1002 solver.cpp:404]     Test net output #1: loss = 0.55727 (* 1 = 0.55727 loss)\nI1210 02:07:16.733013  1002 solver.cpp:228] Iteration 10700, loss = 0.20222\nI1210 02:07:16.733063  1002 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1210 02:07:16.733089  1002 solver.cpp:244]     Train net output #1: loss = 0.20222 (* 1 = 0.20222 loss)\nI1210 02:07:16.834736  1002 sgd_solver.cpp:166] Iteration 10700, lr = 1.605\nI1210 02:09:35.207415  1002 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1210 02:10:55.875677  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76896\nI1210 02:10:55.875972  1002 solver.cpp:404]     Test net output #1: loss = 0.810137 (* 1 = 0.810137 loss)\nI1210 02:10:57.196358  1002 solver.cpp:228] Iteration 10800, loss = 0.33106\nI1210 02:10:57.196408  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 02:10:57.196431  1002 solver.cpp:244]     Train net output #1: loss = 0.33106 (* 1 = 0.33106 loss)\nI1210 02:10:57.295853  1002 sgd_solver.cpp:166] Iteration 10800, lr = 1.62\nI1210 02:13:16.062391  1002 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1210 02:14:36.727082  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85716\nI1210 02:14:36.727360  1002 solver.cpp:404]     Test net output #1: loss = 0.452029 (* 1 = 0.452029 loss)\nI1210 02:14:38.046741  1002 solver.cpp:228] Iteration 10900, loss = 0.254083\nI1210 02:14:38.046788  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 02:14:38.046813  1002 solver.cpp:244]     Train net output #1: loss = 0.254083 (* 1 = 0.254083 loss)\nI1210 02:14:38.151238  1002 sgd_solver.cpp:166] Iteration 10900, lr = 1.635\nI1210 02:16:56.669327  1002 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1210 02:18:17.330965  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8292\nI1210 02:18:17.331274  1002 solver.cpp:404]     Test net output #1: loss = 0.502255 (* 1 = 0.502255 loss)\nI1210 02:18:18.650310  1002 solver.cpp:228] Iteration 11000, loss = 0.145563\nI1210 02:18:18.650357  1002 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1210 02:18:18.650380  1002 solver.cpp:244]     Train net output #1: loss = 0.145562 (* 1 = 0.145562 loss)\nI1210 02:18:18.751920  1002 sgd_solver.cpp:166] Iteration 11000, lr = 1.65\nI1210 02:20:37.473935  1002 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1210 02:21:58.136798  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77804\nI1210 02:21:58.137106  1002 solver.cpp:404]     Test net output #1: loss = 0.792273 (* 1 = 0.792273 loss)\nI1210 02:21:59.456454  1002 solver.cpp:228] Iteration 11100, loss = 0.212178\nI1210 02:21:59.456499  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 02:21:59.456524  1002 solver.cpp:244]     Train net output #1: loss = 0.212178 (* 1 = 0.212178 loss)\nI1210 02:21:59.557327  1002 sgd_solver.cpp:166] Iteration 11100, lr = 1.665\nI1210 02:24:18.071322  1002 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1210 02:25:38.768728  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81412\nI1210 02:25:38.769004  1002 solver.cpp:404]     Test net output #1: loss = 0.582428 (* 1 = 0.582428 loss)\nI1210 02:25:40.088289  1002 solver.cpp:228] Iteration 11200, loss = 0.260758\nI1210 02:25:40.088332  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 02:25:40.088356  1002 solver.cpp:244]     Train net output #1: loss = 0.260758 (* 1 = 0.260758 loss)\nI1210 02:25:40.196355  1002 sgd_solver.cpp:166] Iteration 11200, lr = 1.68\nI1210 02:27:58.748301  1002 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1210 02:29:19.442215  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77568\nI1210 02:29:19.442519  1002 solver.cpp:404]     Test net output #1: loss = 0.772617 (* 1 = 0.772617 loss)\nI1210 02:29:20.761255  1002 solver.cpp:228] Iteration 11300, loss = 0.321215\nI1210 02:29:20.761301  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 02:29:20.761325  1002 solver.cpp:244]     Train net output #1: loss = 0.321215 (* 1 = 0.321215 loss)\nI1210 02:29:20.865844  1002 sgd_solver.cpp:166] Iteration 11300, lr = 1.695\nI1210 02:31:39.328778  1002 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1210 02:33:00.033926  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78576\nI1210 02:33:00.034205  1002 solver.cpp:404]     Test net output #1: loss = 0.735556 (* 1 = 0.735556 loss)\nI1210 02:33:01.352655  1002 solver.cpp:228] Iteration 11400, loss = 0.252789\nI1210 02:33:01.352701  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 02:33:01.352725  1002 solver.cpp:244]     Train net output #1: loss = 0.252789 (* 1 = 0.252789 loss)\nI1210 02:33:01.456574  1002 sgd_solver.cpp:166] Iteration 11400, lr = 1.71\nI1210 02:35:19.947155  1002 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1210 02:36:40.679781  1002 solver.cpp:404]     Test net output #0: accuracy = 0.82624\nI1210 02:36:40.680060  1002 solver.cpp:404]     Test net output #1: loss = 0.553222 (* 1 = 0.553222 loss)\nI1210 02:36:41.998512  1002 solver.cpp:228] Iteration 11500, loss = 0.268523\nI1210 02:36:41.998558  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 02:36:41.998582  1002 solver.cpp:244]     Train net output #1: loss = 0.268523 (* 1 = 0.268523 loss)\nI1210 02:36:42.100106  1002 sgd_solver.cpp:166] Iteration 11500, lr = 1.725\nI1210 02:39:00.859040  1002 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1210 02:40:21.557235  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8046\nI1210 02:40:21.557487  1002 solver.cpp:404]     Test net output #1: loss = 0.575173 (* 1 = 0.575173 loss)\nI1210 02:40:22.877053  1002 solver.cpp:228] Iteration 11600, loss = 0.192971\nI1210 02:40:22.877102  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 02:40:22.877126  1002 solver.cpp:244]     Train net output #1: loss = 0.192971 (* 1 = 0.192971 loss)\nI1210 02:40:22.979713  1002 sgd_solver.cpp:166] Iteration 11600, lr = 1.74\nI1210 02:42:41.535766  1002 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1210 02:44:02.240077  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8114\nI1210 02:44:02.240348  1002 solver.cpp:404]     Test net output #1: loss = 0.620058 (* 1 = 0.620058 loss)\nI1210 02:44:03.558696  1002 solver.cpp:228] Iteration 11700, loss = 0.226297\nI1210 02:44:03.558749  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1210 02:44:03.558773  1002 solver.cpp:244]     Train net output #1: loss = 0.226297 (* 1 = 0.226297 loss)\nI1210 02:44:03.663956  1002 sgd_solver.cpp:166] Iteration 11700, lr = 1.755\nI1210 02:46:22.162987  1002 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1210 02:47:42.842561  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80492\nI1210 02:47:42.842818  1002 solver.cpp:404]     Test net output #1: loss = 0.620113 (* 1 = 0.620113 loss)\nI1210 02:47:44.162472  1002 solver.cpp:228] Iteration 11800, loss = 0.18321\nI1210 02:47:44.162521  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1210 02:47:44.162545  1002 solver.cpp:244]     Train net output #1: loss = 0.18321 (* 1 = 0.18321 loss)\nI1210 02:47:44.266193  1002 sgd_solver.cpp:166] Iteration 11800, lr = 1.77\nI1210 02:50:03.007915  1002 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1210 02:51:23.690368  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80112\nI1210 02:51:23.690657  1002 solver.cpp:404]     Test net output #1: loss = 0.656541 (* 1 = 0.656541 loss)\nI1210 02:51:25.010514  1002 solver.cpp:228] Iteration 11900, loss = 0.306094\nI1210 02:51:25.010562  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 02:51:25.010587  1002 solver.cpp:244]     Train net output #1: loss = 0.306094 (* 1 = 0.306094 loss)\nI1210 02:51:25.118703  1002 sgd_solver.cpp:166] Iteration 11900, lr = 1.785\nI1210 02:53:43.895520  1002 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1210 02:55:04.609454  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7858\nI1210 02:55:04.609683  1002 solver.cpp:404]     Test net output #1: loss = 0.75211 (* 1 = 0.75211 loss)\nI1210 02:55:05.928859  1002 solver.cpp:228] Iteration 12000, loss = 0.28573\nI1210 02:55:05.928908  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 02:55:05.928932  1002 solver.cpp:244]     Train net output #1: loss = 0.28573 (* 1 = 0.28573 loss)\nI1210 02:55:06.036566  1002 sgd_solver.cpp:166] Iteration 12000, lr = 1.8\nI1210 02:57:24.494998  1002 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1210 02:58:45.236377  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78516\nI1210 02:58:45.236640  1002 solver.cpp:404]     Test net output #1: loss = 0.654946 (* 1 = 0.654946 loss)\nI1210 02:58:46.556474  1002 solver.cpp:228] Iteration 12100, loss = 0.18743\nI1210 02:58:46.556524  1002 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1210 02:58:46.556548  1002 solver.cpp:244]     Train net output #1: loss = 0.18743 (* 1 = 0.18743 loss)\nI1210 02:58:46.660723  1002 sgd_solver.cpp:166] Iteration 12100, lr = 1.815\nI1210 03:01:05.259346  1002 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1210 03:02:25.960340  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80576\nI1210 03:02:25.960641  1002 solver.cpp:404]     Test net output #1: loss = 0.659069 (* 1 = 0.659069 loss)\nI1210 03:02:27.280275  1002 solver.cpp:228] Iteration 12200, loss = 0.255868\nI1210 03:02:27.280323  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 03:02:27.280339  1002 solver.cpp:244]     Train net output #1: loss = 0.255868 (* 1 = 0.255868 loss)\nI1210 03:02:27.385656  1002 sgd_solver.cpp:166] Iteration 12200, lr = 1.83\nI1210 03:04:45.856123  1002 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1210 03:06:06.552959  1002 solver.cpp:404]     Test net output #0: accuracy = 0.75096\nI1210 03:06:06.553239  1002 solver.cpp:404]     Test net output #1: loss = 0.806891 (* 1 = 0.806891 loss)\nI1210 03:06:07.871508  1002 solver.cpp:228] Iteration 12300, loss = 0.169258\nI1210 03:06:07.871553  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 03:06:07.871570  1002 solver.cpp:244]     Train net output #1: loss = 0.169258 (* 1 = 0.169258 loss)\nI1210 03:06:07.975853  1002 sgd_solver.cpp:166] Iteration 12300, lr = 1.845\nI1210 03:08:26.480383  1002 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1210 03:09:47.170579  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79724\nI1210 03:09:47.170837  1002 solver.cpp:404]     Test net output #1: loss = 0.598653 (* 1 = 0.598653 loss)\nI1210 03:09:48.489732  1002 solver.cpp:228] Iteration 12400, loss = 0.309892\nI1210 03:09:48.489778  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 03:09:48.489799  1002 solver.cpp:244]     Train net output #1: loss = 0.309892 (* 1 = 0.309892 loss)\nI1210 03:09:48.594404  1002 sgd_solver.cpp:166] Iteration 12400, lr = 1.86\nI1210 03:12:07.448472  1002 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1210 03:13:28.142042  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84304\nI1210 03:13:28.142325  1002 solver.cpp:404]     Test net output #1: loss = 0.498633 (* 1 = 0.498633 loss)\nI1210 03:13:29.460706  1002 solver.cpp:228] Iteration 12500, loss = 0.2437\nI1210 03:13:29.460749  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 03:13:29.460765  1002 solver.cpp:244]     Train net output #1: loss = 0.2437 (* 1 = 0.2437 loss)\nI1210 03:13:29.563009  1002 sgd_solver.cpp:166] Iteration 12500, lr = 1.875\nI1210 03:15:47.971974  1002 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1210 03:17:08.671030  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77436\nI1210 03:17:08.671289  1002 solver.cpp:404]     Test net output #1: loss = 0.729008 (* 1 = 0.729008 loss)\nI1210 03:17:09.989512  1002 solver.cpp:228] Iteration 12600, loss = 0.321667\nI1210 03:17:09.989558  1002 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1210 03:17:09.989574  1002 solver.cpp:244]     Train net output #1: loss = 0.321667 (* 1 = 0.321667 loss)\nI1210 03:17:10.094188  1002 sgd_solver.cpp:166] Iteration 12600, lr = 1.89\nI1210 03:19:28.857019  1002 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1210 03:20:49.551512  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7826\nI1210 03:20:49.551769  1002 solver.cpp:404]     Test net output #1: loss = 0.729461 (* 1 = 0.729461 loss)\nI1210 03:20:50.871402  1002 solver.cpp:228] Iteration 12700, loss = 0.309892\nI1210 03:20:50.871448  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 03:20:50.871464  1002 solver.cpp:244]     Train net output #1: loss = 0.309892 (* 1 = 0.309892 loss)\nI1210 03:20:50.982555  1002 sgd_solver.cpp:166] Iteration 12700, lr = 1.905\nI1210 03:23:09.485904  1002 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1210 03:24:30.193750  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76968\nI1210 03:24:30.194033  1002 solver.cpp:404]     Test net output #1: loss = 0.773806 (* 1 = 0.773806 loss)\nI1210 03:24:31.512332  1002 solver.cpp:228] Iteration 12800, loss = 0.277217\nI1210 03:24:31.512373  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 03:24:31.512389  1002 solver.cpp:244]     Train net output #1: loss = 0.277217 (* 1 = 0.277217 loss)\nI1210 03:24:31.618938  1002 sgd_solver.cpp:166] Iteration 12800, lr = 1.92\nI1210 03:26:50.126185  1002 solver.cpp:337] Iteration 12900, Testing net (#0)\nI1210 03:28:10.832666  1002 solver.cpp:404]     Test net output #0: accuracy = 0.83328\nI1210 03:28:10.832962  1002 solver.cpp:404]     Test net output #1: loss = 0.540432 (* 1 = 0.540432 loss)\nI1210 03:28:12.151129  1002 solver.cpp:228] Iteration 12900, loss = 0.218364\nI1210 03:28:12.151170  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 03:28:12.151187  1002 solver.cpp:244]     Train net output #1: loss = 0.218364 (* 1 = 0.218364 loss)\nI1210 03:28:12.253458  1002 sgd_solver.cpp:166] Iteration 12900, lr = 1.935\nI1210 03:30:30.726238  1002 solver.cpp:337] Iteration 13000, Testing net (#0)\nI1210 03:31:51.429045  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77924\nI1210 03:31:51.429347  1002 solver.cpp:404]     Test net output #1: loss = 0.747813 (* 1 = 0.747813 loss)\nI1210 03:31:52.747495  1002 solver.cpp:228] Iteration 13000, loss = 0.250393\nI1210 03:31:52.747537  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 03:31:52.747555  1002 solver.cpp:244]     Train net output #1: loss = 0.250393 (* 1 = 0.250393 loss)\nI1210 03:31:52.849483  1002 sgd_solver.cpp:166] Iteration 13000, lr = 1.95\nI1210 03:34:11.527056  1002 solver.cpp:337] Iteration 13100, Testing net (#0)\nI1210 03:35:32.231637  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80592\nI1210 03:35:32.231921  1002 solver.cpp:404]     Test net output #1: loss = 0.683488 (* 1 = 0.683488 loss)\nI1210 03:35:33.551534  1002 solver.cpp:228] Iteration 13100, loss = 0.190865\nI1210 03:35:33.551579  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 03:35:33.551604  1002 solver.cpp:244]     Train net output #1: loss = 0.190865 (* 1 = 0.190865 loss)\nI1210 03:35:33.649077  1002 sgd_solver.cpp:166] Iteration 13100, lr = 1.965\nI1210 03:37:52.341032  1002 solver.cpp:337] Iteration 13200, Testing net (#0)\nI1210 03:39:13.039422  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77308\nI1210 03:39:13.039702  1002 solver.cpp:404]     Test net output #1: loss = 0.714316 (* 1 = 0.714316 loss)\nI1210 03:39:14.357748  1002 solver.cpp:228] Iteration 13200, loss = 0.38876\nI1210 03:39:14.357794  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 03:39:14.357817  1002 solver.cpp:244]     Train net output #1: loss = 0.38876 (* 1 = 0.38876 loss)\nI1210 03:39:14.459409  1002 sgd_solver.cpp:166] Iteration 13200, lr = 1.98\nI1210 03:41:32.861706  1002 solver.cpp:337] Iteration 13300, Testing net (#0)\nI1210 03:42:53.558790  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8016\nI1210 03:42:53.559103  1002 solver.cpp:404]     Test net output #1: loss = 0.63241 (* 1 = 0.63241 loss)\nI1210 03:42:54.877388  1002 solver.cpp:228] Iteration 13300, loss = 0.260135\nI1210 03:42:54.877432  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 03:42:54.877456  1002 solver.cpp:244]     Train net output #1: loss = 0.260135 (* 1 = 0.260135 loss)\nI1210 03:42:54.981076  1002 sgd_solver.cpp:166] Iteration 13300, lr = 1.995\nI1210 03:45:13.397362  1002 solver.cpp:337] Iteration 13400, Testing net (#0)\nI1210 03:46:34.092813  1002 solver.cpp:404]     Test net output #0: accuracy = 0.73084\nI1210 03:46:34.093137  1002 solver.cpp:404]     Test net output #1: loss = 0.881441 (* 1 = 0.881441 loss)\nI1210 03:46:35.411180  1002 solver.cpp:228] Iteration 13400, loss = 0.233318\nI1210 03:46:35.411228  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 03:46:35.411252  1002 solver.cpp:244]     Train net output #1: loss = 0.233318 (* 1 = 0.233318 loss)\nI1210 03:46:35.516486  1002 sgd_solver.cpp:166] Iteration 13400, lr = 2.01\nI1210 03:48:53.938503  1002 solver.cpp:337] Iteration 13500, Testing net (#0)\nI1210 03:50:14.647191  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80256\nI1210 03:50:14.647490  1002 solver.cpp:404]     Test net output #1: loss = 0.591024 (* 1 = 0.591024 loss)\nI1210 03:50:15.967016  1002 solver.cpp:228] Iteration 13500, loss = 0.342061\nI1210 03:50:15.967063  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 03:50:15.967088  1002 solver.cpp:244]     Train net output #1: loss = 0.342061 (* 1 = 0.342061 loss)\nI1210 03:50:16.070472  1002 sgd_solver.cpp:166] Iteration 13500, lr = 2.025\nI1210 03:52:34.817473  1002 solver.cpp:337] Iteration 13600, Testing net (#0)\nI1210 03:53:55.527582  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81276\nI1210 03:53:55.527850  1002 solver.cpp:404]     Test net output #1: loss = 0.682834 (* 1 = 0.682834 loss)\nI1210 03:53:56.847090  1002 solver.cpp:228] Iteration 13600, loss = 0.249738\nI1210 03:53:56.847136  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 03:53:56.847159  1002 solver.cpp:244]     Train net output #1: loss = 0.249738 (* 1 = 0.249738 loss)\nI1210 03:53:56.945554  1002 sgd_solver.cpp:166] Iteration 13600, lr = 2.04\nI1210 03:56:15.385363  1002 solver.cpp:337] Iteration 13700, Testing net (#0)\nI1210 03:57:36.083356  1002 solver.cpp:404]     Test net output #0: accuracy = 0.72184\nI1210 03:57:36.083665  1002 solver.cpp:404]     Test net output #1: loss = 1.01519 (* 1 = 1.01519 loss)\nI1210 03:57:37.401813  1002 solver.cpp:228] Iteration 13700, loss = 0.276355\nI1210 03:57:37.401859  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 03:57:37.401882  1002 solver.cpp:244]     Train net output #1: loss = 0.276355 (* 1 = 0.276355 loss)\nI1210 03:57:37.511880  1002 sgd_solver.cpp:166] Iteration 13700, lr = 2.055\nI1210 03:59:56.037029  1002 solver.cpp:337] Iteration 13800, Testing net (#0)\nI1210 04:01:16.730804  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7756\nI1210 04:01:16.731112  1002 solver.cpp:404]     Test net output #1: loss = 0.715688 (* 1 = 0.715688 loss)\nI1210 04:01:18.049698  1002 solver.cpp:228] Iteration 13800, loss = 0.321969\nI1210 04:01:18.049749  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 04:01:18.049774  1002 solver.cpp:244]     Train net output #1: loss = 0.321969 (* 1 = 0.321969 loss)\nI1210 04:01:18.150867  1002 sgd_solver.cpp:166] Iteration 13800, lr = 2.07\nI1210 04:03:36.894428  1002 solver.cpp:337] Iteration 13900, Testing net (#0)\nI1210 04:04:57.579226  1002 solver.cpp:404]     Test net output #0: accuracy = 0.85\nI1210 04:04:57.579496  1002 solver.cpp:404]     Test net output #1: loss = 0.469434 (* 1 = 0.469434 loss)\nI1210 04:04:58.898526  1002 solver.cpp:228] Iteration 13900, loss = 0.316645\nI1210 04:04:58.898569  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 04:04:58.898593  1002 solver.cpp:244]     Train net output #1: loss = 0.316645 (* 1 = 0.316645 loss)\nI1210 04:04:59.000710  1002 sgd_solver.cpp:166] Iteration 13900, lr = 2.085\nI1210 04:07:17.689327  1002 solver.cpp:337] Iteration 14000, Testing net (#0)\nI1210 04:08:38.401042  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81264\nI1210 04:08:38.401351  1002 solver.cpp:404]     Test net output #1: loss = 0.594912 (* 1 = 0.594912 loss)\nI1210 04:08:39.720146  1002 solver.cpp:228] Iteration 14000, loss = 0.359595\nI1210 04:08:39.720187  1002 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1210 04:08:39.720213  1002 solver.cpp:244]     Train net output #1: loss = 0.359595 (* 1 = 0.359595 loss)\nI1210 04:08:39.828547  1002 sgd_solver.cpp:166] Iteration 14000, lr = 2.1\nI1210 04:10:58.530225  1002 solver.cpp:337] Iteration 14100, Testing net (#0)\nI1210 04:12:19.199700  1002 solver.cpp:404]     Test net output #0: accuracy = 0.68348\nI1210 04:12:19.200018  1002 solver.cpp:404]     Test net output #1: loss = 1.26946 (* 1 = 1.26946 loss)\nI1210 04:12:20.518177  1002 solver.cpp:228] Iteration 14100, loss = 0.214008\nI1210 04:12:20.518218  1002 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1210 04:12:20.518241  1002 solver.cpp:244]     Train net output #1: loss = 0.214008 (* 1 = 0.214008 loss)\nI1210 04:12:20.625138  1002 sgd_solver.cpp:166] Iteration 14100, lr = 2.115\nI1210 04:14:39.008711  1002 solver.cpp:337] Iteration 14200, Testing net (#0)\nI1210 04:15:59.666082  1002 solver.cpp:404]     Test net output #0: accuracy = 0.75976\nI1210 04:15:59.666374  1002 solver.cpp:404]     Test net output #1: loss = 0.764812 (* 1 = 0.764812 loss)\nI1210 04:16:00.984755  1002 solver.cpp:228] Iteration 14200, loss = 0.293572\nI1210 04:16:00.984802  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 04:16:00.984825  1002 solver.cpp:244]     Train net output #1: loss = 0.293572 (* 1 = 0.293572 loss)\nI1210 04:16:01.084784  1002 sgd_solver.cpp:166] Iteration 14200, lr = 2.13\nI1210 04:18:19.456179  1002 solver.cpp:337] Iteration 14300, Testing net (#0)\nI1210 04:19:41.116518  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81292\nI1210 04:19:41.116834  1002 solver.cpp:404]     Test net output #1: loss = 0.612298 (* 1 = 0.612298 loss)\nI1210 04:19:42.440544  1002 solver.cpp:228] Iteration 14300, loss = 0.331552\nI1210 04:19:42.440603  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 04:19:42.440620  1002 solver.cpp:244]     Train net output #1: loss = 0.331552 (* 1 = 0.331552 loss)\nI1210 04:19:42.535333  1002 sgd_solver.cpp:166] Iteration 14300, lr = 2.145\nI1210 04:22:01.065075  1002 solver.cpp:337] Iteration 14400, Testing net (#0)\nI1210 04:23:22.737479  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81472\nI1210 04:23:22.737802  1002 solver.cpp:404]     Test net output #1: loss = 0.573606 (* 1 = 0.573606 loss)\nI1210 04:23:24.061269  1002 solver.cpp:228] Iteration 14400, loss = 0.159094\nI1210 04:23:24.061329  1002 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1210 04:23:24.061347  1002 solver.cpp:244]     Train net output #1: loss = 0.159094 (* 1 = 0.159094 loss)\nI1210 04:23:24.169528  1002 sgd_solver.cpp:166] Iteration 14400, lr = 2.16\nI1210 04:25:42.670892  1002 solver.cpp:337] Iteration 14500, Testing net (#0)\nI1210 04:27:04.344692  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78796\nI1210 04:27:04.345049  1002 solver.cpp:404]     Test net output #1: loss = 0.750177 (* 1 = 0.750177 loss)\nI1210 04:27:05.668470  1002 solver.cpp:228] Iteration 14500, loss = 0.30526\nI1210 04:27:05.668529  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 04:27:05.668547  1002 solver.cpp:244]     Train net output #1: loss = 0.30526 (* 1 = 0.30526 loss)\nI1210 04:27:05.769415  1002 sgd_solver.cpp:166] Iteration 14500, lr = 2.175\nI1210 04:29:24.226850  1002 solver.cpp:337] Iteration 14600, Testing net (#0)\nI1210 04:30:45.890012  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79816\nI1210 04:30:45.890322  1002 solver.cpp:404]     Test net output #1: loss = 0.668495 (* 1 = 0.668495 loss)\nI1210 04:30:47.213551  1002 solver.cpp:228] Iteration 14600, loss = 0.253224\nI1210 04:30:47.213608  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 04:30:47.213626  1002 solver.cpp:244]     Train net output #1: loss = 0.253224 (* 1 = 0.253224 loss)\nI1210 04:30:47.307437  1002 sgd_solver.cpp:166] Iteration 14600, lr = 2.19\nI1210 04:33:05.839712  1002 solver.cpp:337] Iteration 14700, Testing net (#0)\nI1210 04:34:27.503113  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76152\nI1210 04:34:27.503399  1002 solver.cpp:404]     Test net output #1: loss = 0.812022 (* 1 = 0.812022 loss)\nI1210 04:34:28.825698  1002 solver.cpp:228] Iteration 14700, loss = 0.252448\nI1210 04:34:28.825757  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 04:34:28.825774  1002 solver.cpp:244]     Train net output #1: loss = 0.252448 (* 1 = 0.252448 loss)\nI1210 04:34:28.925959  1002 sgd_solver.cpp:166] Iteration 14700, lr = 2.205\nI1210 04:36:47.453333  1002 solver.cpp:337] Iteration 14800, Testing net (#0)\nI1210 04:38:09.118381  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84004\nI1210 04:38:09.118693  1002 solver.cpp:404]     Test net output #1: loss = 0.491427 (* 1 = 0.491427 loss)\nI1210 04:38:10.440752  1002 solver.cpp:228] Iteration 14800, loss = 0.314315\nI1210 04:38:10.440807  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 04:38:10.440825  1002 solver.cpp:244]     Train net output #1: loss = 0.314315 (* 1 = 0.314315 loss)\nI1210 04:38:10.547513  1002 sgd_solver.cpp:166] Iteration 14800, lr = 2.22\nI1210 04:40:29.468844  1002 solver.cpp:337] Iteration 14900, Testing net (#0)\nI1210 04:41:51.127256  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79976\nI1210 04:41:51.127566  1002 solver.cpp:404]     Test net output #1: loss = 0.689789 (* 1 = 0.689789 loss)\nI1210 04:41:52.450085  1002 solver.cpp:228] Iteration 14900, loss = 0.264648\nI1210 04:41:52.450139  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 04:41:52.450156  1002 solver.cpp:244]     Train net output #1: loss = 0.264648 (* 1 = 0.264648 loss)\nI1210 04:41:52.547205  1002 sgd_solver.cpp:166] Iteration 14900, lr = 2.235\nI1210 04:44:11.120851  1002 solver.cpp:337] Iteration 15000, Testing net (#0)\nI1210 04:45:32.778033  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78048\nI1210 04:45:32.778347  1002 solver.cpp:404]     Test net output #1: loss = 0.695126 (* 1 = 0.695126 loss)\nI1210 04:45:34.100476  1002 solver.cpp:228] Iteration 15000, loss = 0.215539\nI1210 04:45:34.100533  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 04:45:34.100549  1002 solver.cpp:244]     Train net output #1: loss = 0.215539 (* 1 = 0.215539 loss)\nI1210 04:45:34.197005  1002 sgd_solver.cpp:166] Iteration 15000, lr = 2.25\nI1210 04:47:53.086156  1002 solver.cpp:337] Iteration 15100, Testing net (#0)\nI1210 04:49:14.747452  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76824\nI1210 04:49:14.747761  1002 solver.cpp:404]     Test net output #1: loss = 0.768006 (* 1 = 0.768006 loss)\nI1210 04:49:16.070329  1002 solver.cpp:228] Iteration 15100, loss = 0.286707\nI1210 04:49:16.070385  1002 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1210 04:49:16.070403  1002 solver.cpp:244]     Train net output #1: loss = 0.286707 (* 1 = 0.286707 loss)\nI1210 04:49:16.175571  1002 sgd_solver.cpp:166] Iteration 15100, lr = 2.265\nI1210 04:51:34.764442  1002 solver.cpp:337] Iteration 15200, Testing net (#0)\nI1210 04:52:56.409464  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81712\nI1210 04:52:56.409775  1002 solver.cpp:404]     Test net output #1: loss = 0.557276 (* 1 = 0.557276 loss)\nI1210 04:52:57.731818  1002 solver.cpp:228] Iteration 15200, loss = 0.326146\nI1210 04:52:57.731875  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 04:52:57.731894  1002 solver.cpp:244]     Train net output #1: loss = 0.326146 (* 1 = 0.326146 loss)\nI1210 04:52:57.831565  1002 sgd_solver.cpp:166] Iteration 15200, lr = 2.28\nI1210 04:55:16.817759  1002 solver.cpp:337] Iteration 15300, Testing net (#0)\nI1210 04:56:38.484632  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84108\nI1210 04:56:38.484926  1002 solver.cpp:404]     Test net output #1: loss = 0.509408 (* 1 = 0.509408 loss)\nI1210 04:56:39.807065  1002 solver.cpp:228] Iteration 15300, loss = 0.319905\nI1210 04:56:39.807123  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 04:56:39.807142  1002 solver.cpp:244]     Train net output #1: loss = 0.319905 (* 1 = 0.319905 loss)\nI1210 04:56:39.905982  1002 sgd_solver.cpp:166] Iteration 15300, lr = 2.295\nI1210 04:58:58.836199  1002 solver.cpp:337] Iteration 15400, Testing net (#0)\nI1210 05:00:20.490675  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77528\nI1210 05:00:20.490983  1002 solver.cpp:404]     Test net output #1: loss = 0.755489 (* 1 = 0.755489 loss)\nI1210 05:00:21.813268  1002 solver.cpp:228] Iteration 15400, loss = 0.291583\nI1210 05:00:21.813326  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 05:00:21.813344  1002 solver.cpp:244]     Train net output #1: loss = 0.291583 (* 1 = 0.291583 loss)\nI1210 05:00:21.911346  1002 sgd_solver.cpp:166] Iteration 15400, lr = 2.31\nI1210 05:02:40.450043  1002 solver.cpp:337] Iteration 15500, Testing net (#0)\nI1210 05:04:02.109262  1002 solver.cpp:404]     Test net output #0: accuracy = 0.70212\nI1210 05:04:02.109570  1002 solver.cpp:404]     Test net output #1: loss = 0.994086 (* 1 = 0.994086 loss)\nI1210 05:04:03.431707  1002 solver.cpp:228] Iteration 15500, loss = 0.339036\nI1210 05:04:03.431766  1002 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1210 05:04:03.431784  1002 solver.cpp:244]     Train net output #1: loss = 0.339036 (* 1 = 0.339036 loss)\nI1210 05:04:03.530119  1002 sgd_solver.cpp:166] Iteration 15500, lr = 2.325\nI1210 05:06:22.037132  1002 solver.cpp:337] Iteration 15600, Testing net (#0)\nI1210 05:07:43.700034  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80712\nI1210 05:07:43.700330  1002 solver.cpp:404]     Test net output #1: loss = 0.619124 (* 1 = 0.619124 loss)\nI1210 05:07:45.022481  1002 solver.cpp:228] Iteration 15600, loss = 0.340946\nI1210 05:07:45.022539  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 05:07:45.022558  1002 solver.cpp:244]     Train net output #1: loss = 0.340946 (* 1 = 0.340946 loss)\nI1210 05:07:45.117180  1002 sgd_solver.cpp:166] Iteration 15600, lr = 2.34\nI1210 05:10:03.593552  1002 solver.cpp:337] Iteration 15700, Testing net (#0)\nI1210 05:11:25.244168  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79948\nI1210 05:11:25.244479  1002 solver.cpp:404]     Test net output #1: loss = 0.603671 (* 1 = 0.603671 loss)\nI1210 05:11:26.566700  1002 solver.cpp:228] Iteration 15700, loss = 0.255376\nI1210 05:11:26.566758  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 05:11:26.566776  1002 solver.cpp:244]     Train net output #1: loss = 0.255375 (* 1 = 0.255375 loss)\nI1210 05:11:26.666779  1002 sgd_solver.cpp:166] Iteration 15700, lr = 2.355\nI1210 05:13:45.209422  1002 solver.cpp:337] Iteration 15800, Testing net (#0)\nI1210 05:15:06.861531  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7988\nI1210 05:15:06.861843  1002 solver.cpp:404]     Test net output #1: loss = 0.70927 (* 1 = 0.70927 loss)\nI1210 05:15:08.184108  1002 solver.cpp:228] Iteration 15800, loss = 0.371331\nI1210 05:15:08.184165  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 05:15:08.184181  1002 solver.cpp:244]     Train net output #1: loss = 0.371331 (* 1 = 0.371331 loss)\nI1210 05:15:08.282316  1002 sgd_solver.cpp:166] Iteration 15800, lr = 2.37\nI1210 05:17:26.884830  1002 solver.cpp:337] Iteration 15900, Testing net (#0)\nI1210 05:18:48.548285  1002 solver.cpp:404]     Test net output #0: accuracy = 0.73436\nI1210 05:18:48.548576  1002 solver.cpp:404]     Test net output #1: loss = 1.02086 (* 1 = 1.02086 loss)\nI1210 05:18:49.871173  1002 solver.cpp:228] Iteration 15900, loss = 0.338988\nI1210 05:18:49.871232  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 05:18:49.871249  1002 solver.cpp:244]     Train net output #1: loss = 0.338988 (* 1 = 0.338988 loss)\nI1210 05:18:49.972815  1002 sgd_solver.cpp:166] Iteration 15900, lr = 2.385\nI1210 05:21:08.508153  1002 solver.cpp:337] Iteration 16000, Testing net (#0)\nI1210 05:22:30.163975  1002 solver.cpp:404]     Test net output #0: accuracy = 0.815\nI1210 05:22:30.164289  1002 solver.cpp:404]     Test net output #1: loss = 0.588976 (* 1 = 0.588976 loss)\nI1210 05:22:31.486523  1002 solver.cpp:228] Iteration 16000, loss = 0.335245\nI1210 05:22:31.486582  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 05:22:31.486601  1002 solver.cpp:244]     Train net output #1: loss = 0.335245 (* 1 = 0.335245 loss)\nI1210 05:22:31.585984  1002 sgd_solver.cpp:166] Iteration 16000, lr = 2.4\nI1210 05:24:50.572244  1002 solver.cpp:337] Iteration 16100, Testing net (#0)\nI1210 05:26:12.248610  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8098\nI1210 05:26:12.248903  1002 solver.cpp:404]     Test net output #1: loss = 0.594303 (* 1 = 0.594303 loss)\nI1210 05:26:13.572438  1002 solver.cpp:228] Iteration 16100, loss = 0.311145\nI1210 05:26:13.572494  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 05:26:13.572512  1002 solver.cpp:244]     Train net output #1: loss = 0.311145 (* 1 = 0.311145 loss)\nI1210 05:26:13.670523  1002 sgd_solver.cpp:166] Iteration 16100, lr = 2.415\nI1210 05:28:32.138003  1002 solver.cpp:337] Iteration 16200, Testing net (#0)\nI1210 05:29:53.801973  1002 solver.cpp:404]     Test net output #0: accuracy = 0.6914\nI1210 05:29:53.802263  1002 solver.cpp:404]     Test net output #1: loss = 1.36089 (* 1 = 1.36089 loss)\nI1210 05:29:55.125900  1002 solver.cpp:228] Iteration 16200, loss = 0.353131\nI1210 05:29:55.125962  1002 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1210 05:29:55.125982  1002 solver.cpp:244]     Train net output #1: loss = 0.353131 (* 1 = 0.353131 loss)\nI1210 05:29:55.228571  1002 sgd_solver.cpp:166] Iteration 16200, lr = 2.43\nI1210 05:32:13.743407  1002 solver.cpp:337] Iteration 16300, Testing net (#0)\nI1210 05:33:35.412891  1002 solver.cpp:404]     Test net output #0: accuracy = 0.73404\nI1210 05:33:35.413195  1002 solver.cpp:404]     Test net output #1: loss = 0.915967 (* 1 = 0.915967 loss)\nI1210 05:33:36.735924  1002 solver.cpp:228] Iteration 16300, loss = 0.324726\nI1210 05:33:36.735980  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 05:33:36.735996  1002 solver.cpp:244]     Train net output #1: loss = 0.324726 (* 1 = 0.324726 loss)\nI1210 05:33:36.833501  1002 sgd_solver.cpp:166] Iteration 16300, lr = 2.445\nI1210 05:35:55.352238  1002 solver.cpp:337] Iteration 16400, Testing net (#0)\nI1210 05:37:17.028488  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7992\nI1210 05:37:17.028813  1002 solver.cpp:404]     Test net output #1: loss = 0.626975 (* 1 = 0.626975 loss)\nI1210 05:37:18.351146  1002 solver.cpp:228] Iteration 16400, loss = 0.357228\nI1210 05:37:18.351200  1002 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1210 05:37:18.351218  1002 solver.cpp:244]     Train net output #1: loss = 0.357227 (* 1 = 0.357227 loss)\nI1210 05:37:18.452006  1002 sgd_solver.cpp:166] Iteration 16400, lr = 2.46\nI1210 05:39:37.464830  1002 solver.cpp:337] Iteration 16500, Testing net (#0)\nI1210 05:40:59.138062  1002 solver.cpp:404]     Test net output #0: accuracy = 0.84864\nI1210 05:40:59.138373  1002 solver.cpp:404]     Test net output #1: loss = 0.47278 (* 1 = 0.47278 loss)\nI1210 05:41:00.460619  1002 solver.cpp:228] Iteration 16500, loss = 0.305999\nI1210 05:41:00.460677  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 05:41:00.460695  1002 solver.cpp:244]     Train net output #1: loss = 0.305999 (* 1 = 0.305999 loss)\nI1210 05:41:00.559376  1002 sgd_solver.cpp:166] Iteration 16500, lr = 2.475\nI1210 05:43:19.499505  1002 solver.cpp:337] Iteration 16600, Testing net (#0)\nI1210 05:44:41.154631  1002 solver.cpp:404]     Test net output #0: accuracy = 0.75248\nI1210 05:44:41.154950  1002 solver.cpp:404]     Test net output #1: loss = 0.796691 (* 1 = 0.796691 loss)\nI1210 05:44:42.477205  1002 solver.cpp:228] Iteration 16600, loss = 0.232566\nI1210 05:44:42.477262  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 05:44:42.477279  1002 solver.cpp:244]     Train net output #1: loss = 0.232565 (* 1 = 0.232565 loss)\nI1210 05:44:42.574170  1002 sgd_solver.cpp:166] Iteration 16600, lr = 2.49\nI1210 05:47:01.114181  1002 solver.cpp:337] Iteration 16700, Testing net (#0)\nI1210 05:48:22.771617  1002 solver.cpp:404]     Test net output #0: accuracy = 0.75836\nI1210 05:48:22.771936  1002 solver.cpp:404]     Test net output #1: loss = 0.848796 (* 1 = 0.848796 loss)\nI1210 05:48:24.093389  1002 solver.cpp:228] Iteration 16700, loss = 0.372633\nI1210 05:48:24.093443  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 05:48:24.093461  1002 solver.cpp:244]     Train net output #1: loss = 0.372633 (* 1 = 0.372633 loss)\nI1210 05:48:24.198168  1002 sgd_solver.cpp:166] Iteration 16700, lr = 2.505\nI1210 05:50:42.713094  1002 solver.cpp:337] Iteration 16800, Testing net (#0)\nI1210 05:52:04.375844  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78932\nI1210 05:52:04.376148  1002 solver.cpp:404]     Test net output #1: loss = 0.698623 (* 1 = 0.698623 loss)\nI1210 05:52:05.698616  1002 solver.cpp:228] Iteration 16800, loss = 0.305525\nI1210 05:52:05.698670  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 05:52:05.698688  1002 solver.cpp:244]     Train net output #1: loss = 0.305525 (* 1 = 0.305525 loss)\nI1210 05:52:05.800237  1002 sgd_solver.cpp:166] Iteration 16800, lr = 2.52\nI1210 05:54:24.246484  1002 solver.cpp:337] Iteration 16900, Testing net (#0)\nI1210 05:55:45.924904  1002 solver.cpp:404]     Test net output #0: accuracy = 0.74036\nI1210 05:55:45.925223  1002 solver.cpp:404]     Test net output #1: loss = 0.817751 (* 1 = 0.817751 loss)\nI1210 05:55:47.248492  1002 solver.cpp:228] Iteration 16900, loss = 0.412324\nI1210 05:55:47.248546  1002 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI1210 05:55:47.248564  1002 solver.cpp:244]     Train net output #1: loss = 0.412324 (* 1 = 0.412324 loss)\nI1210 05:55:47.352318  1002 sgd_solver.cpp:166] Iteration 16900, lr = 2.535\nI1210 05:58:05.774861  1002 solver.cpp:337] Iteration 17000, Testing net (#0)\nI1210 05:59:27.436992  1002 solver.cpp:404]     Test net output #0: accuracy = 0.73384\nI1210 05:59:27.437314  1002 solver.cpp:404]     Test net output #1: loss = 0.821929 (* 1 = 0.821929 loss)\nI1210 05:59:28.759624  1002 solver.cpp:228] Iteration 17000, loss = 0.329522\nI1210 05:59:28.759677  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 05:59:28.759694  1002 solver.cpp:244]     Train net output #1: loss = 0.329521 (* 1 = 0.329521 loss)\nI1210 05:59:28.865758  1002 sgd_solver.cpp:166] Iteration 17000, lr = 2.55\nI1210 06:01:47.879010  1002 solver.cpp:337] Iteration 17100, Testing net (#0)\nI1210 06:03:09.516835  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78488\nI1210 06:03:09.517174  1002 solver.cpp:404]     Test net output #1: loss = 0.660461 (* 1 = 0.660461 loss)\nI1210 06:03:10.839565  1002 solver.cpp:228] Iteration 17100, loss = 0.359302\nI1210 06:03:10.839619  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 06:03:10.839637  1002 solver.cpp:244]     Train net output #1: loss = 0.359302 (* 1 = 0.359302 loss)\nI1210 06:03:10.933156  1002 sgd_solver.cpp:166] Iteration 17100, lr = 2.565\nI1210 06:05:29.451020  1002 solver.cpp:337] Iteration 17200, Testing net (#0)\nI1210 06:06:51.105067  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7636\nI1210 06:06:51.105362  1002 solver.cpp:404]     Test net output #1: loss = 0.812266 (* 1 = 0.812266 loss)\nI1210 06:06:52.427290  1002 solver.cpp:228] Iteration 17200, loss = 0.454227\nI1210 06:06:52.427345  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 06:06:52.427362  1002 solver.cpp:244]     Train net output #1: loss = 0.454227 (* 1 = 0.454227 loss)\nI1210 06:06:52.525324  1002 sgd_solver.cpp:166] Iteration 17200, lr = 2.58\nI1210 06:09:11.066550  1002 solver.cpp:337] Iteration 17300, Testing net (#0)\nI1210 06:10:32.713404  1002 solver.cpp:404]     Test net output #0: accuracy = 0.81284\nI1210 06:10:32.713718  1002 solver.cpp:404]     Test net output #1: loss = 0.598564 (* 1 = 0.598564 loss)\nI1210 06:10:34.035799  1002 solver.cpp:228] Iteration 17300, loss = 0.281961\nI1210 06:10:34.035852  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 06:10:34.035871  1002 solver.cpp:244]     Train net output #1: loss = 0.281961 (* 1 = 0.281961 loss)\nI1210 06:10:34.132483  1002 sgd_solver.cpp:166] Iteration 17300, lr = 2.595\nI1210 06:12:52.594904  1002 solver.cpp:337] Iteration 17400, Testing net (#0)\nI1210 06:14:14.260900  1002 solver.cpp:404]     Test net output #0: accuracy = 0.8102\nI1210 06:14:14.261265  1002 solver.cpp:404]     Test net output #1: loss = 0.588587 (* 1 = 0.588587 loss)\nI1210 06:14:15.583451  1002 solver.cpp:228] Iteration 17400, loss = 0.318887\nI1210 06:14:15.583506  1002 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1210 06:14:15.583523  1002 solver.cpp:244]     Train net output #1: loss = 0.318887 (* 1 = 0.318887 loss)\nI1210 06:14:15.687196  1002 sgd_solver.cpp:166] Iteration 17400, lr = 2.61\nI1210 06:16:34.323962  1002 solver.cpp:337] Iteration 17500, Testing net (#0)\nI1210 06:17:55.962644  1002 solver.cpp:404]     Test net output #0: accuracy = 0.68828\nI1210 06:17:55.962967  1002 solver.cpp:404]     Test net output #1: loss = 1.08773 (* 1 = 1.08773 loss)\nI1210 06:17:57.284852  1002 solver.cpp:228] Iteration 17500, loss = 0.381414\nI1210 06:17:57.284907  1002 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1210 06:17:57.284925  1002 solver.cpp:244]     Train net output #1: loss = 0.381414 (* 1 = 0.381414 loss)\nI1210 06:17:57.385674  1002 sgd_solver.cpp:166] Iteration 17500, lr = 2.625\nI1210 06:20:16.347596  1002 solver.cpp:337] Iteration 17600, Testing net (#0)\nI1210 06:21:37.985280  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76072\nI1210 06:21:37.985576  1002 solver.cpp:404]     Test net output #1: loss = 0.736806 (* 1 = 0.736806 loss)\nI1210 06:21:39.308631  1002 solver.cpp:228] Iteration 17600, loss = 0.295521\nI1210 06:21:39.308687  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 06:21:39.308706  1002 solver.cpp:244]     Train net output #1: loss = 0.29552 (* 1 = 0.29552 loss)\nI1210 06:21:39.406873  1002 sgd_solver.cpp:166] Iteration 17600, lr = 2.64\nI1210 06:23:58.486866  1002 solver.cpp:337] Iteration 17700, Testing net (#0)\nI1210 06:25:20.132483  1002 solver.cpp:404]     Test net output #0: accuracy = 0.71484\nI1210 06:25:20.132799  1002 solver.cpp:404]     Test net output #1: loss = 0.982431 (* 1 = 0.982431 loss)\nI1210 06:25:21.456113  1002 solver.cpp:228] Iteration 17700, loss = 0.36737\nI1210 06:25:21.456167  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 06:25:21.456184  1002 solver.cpp:244]     Train net output #1: loss = 0.36737 (* 1 = 0.36737 loss)\nI1210 06:25:21.557447  1002 sgd_solver.cpp:166] Iteration 17700, lr = 2.655\nI1210 06:27:40.052955  1002 solver.cpp:337] Iteration 17800, Testing net (#0)\nI1210 06:29:01.708196  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7634\nI1210 06:29:01.708513  1002 solver.cpp:404]     Test net output #1: loss = 0.728492 (* 1 = 0.728492 loss)\nI1210 06:29:03.031599  1002 solver.cpp:228] Iteration 17800, loss = 0.321966\nI1210 06:29:03.031652  1002 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1210 06:29:03.031669  1002 solver.cpp:244]     Train net output #1: loss = 0.321966 (* 1 = 0.321966 loss)\nI1210 06:29:03.131783  1002 sgd_solver.cpp:166] Iteration 17800, lr = 2.67\nI1210 06:31:22.108526  1002 solver.cpp:337] Iteration 17900, Testing net (#0)\nI1210 06:32:43.789067  1002 solver.cpp:404]     Test net output #0: accuracy = 0.80632\nI1210 06:32:43.789386  1002 solver.cpp:404]     Test net output #1: loss = 0.56662 (* 1 = 0.56662 loss)\nI1210 06:32:45.113042  1002 solver.cpp:228] Iteration 17900, loss = 0.303933\nI1210 06:32:45.113103  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 06:32:45.113121  1002 solver.cpp:244]     Train net output #1: loss = 0.303933 (* 1 = 0.303933 loss)\nI1210 06:32:45.213956  1002 sgd_solver.cpp:166] Iteration 17900, lr = 2.685\nI1210 06:35:03.953938  1002 solver.cpp:337] Iteration 18000, Testing net (#0)\nI1210 06:36:25.628063  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77672\nI1210 06:36:25.628358  1002 solver.cpp:404]     Test net output #1: loss = 0.667468 (* 1 = 0.667468 loss)\nI1210 06:36:26.951736  1002 solver.cpp:228] Iteration 18000, loss = 0.358418\nI1210 06:36:26.951797  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 06:36:26.951815  1002 solver.cpp:244]     Train net output #1: loss = 0.358418 (* 1 = 0.358418 loss)\nI1210 06:36:27.054546  1002 sgd_solver.cpp:166] Iteration 18000, lr = 2.7\nI1210 06:38:45.790359  1002 solver.cpp:337] Iteration 18100, Testing net (#0)\nI1210 06:40:07.456892  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7502\nI1210 06:40:07.457203  1002 solver.cpp:404]     Test net output #1: loss = 0.726339 (* 1 = 0.726339 loss)\nI1210 06:40:08.780999  1002 solver.cpp:228] Iteration 18100, loss = 0.279316\nI1210 06:40:08.781056  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 06:40:08.781075  1002 solver.cpp:244]     Train net output #1: loss = 0.279315 (* 1 = 0.279315 loss)\nI1210 06:40:08.879485  1002 sgd_solver.cpp:166] Iteration 18100, lr = 2.715\nI1210 06:42:27.778246  1002 solver.cpp:337] Iteration 18200, Testing net (#0)\nI1210 06:43:49.439932  1002 solver.cpp:404]     Test net output #0: accuracy = 0.72592\nI1210 06:43:49.440234  1002 solver.cpp:404]     Test net output #1: loss = 1.05885 (* 1 = 1.05885 loss)\nI1210 06:43:50.763605  1002 solver.cpp:228] Iteration 18200, loss = 0.281217\nI1210 06:43:50.763664  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 06:43:50.763682  1002 solver.cpp:244]     Train net output #1: loss = 0.281217 (* 1 = 0.281217 loss)\nI1210 06:43:50.864200  1002 sgd_solver.cpp:166] Iteration 18200, lr = 2.73\nI1210 06:46:09.632339  1002 solver.cpp:337] Iteration 18300, Testing net (#0)\nI1210 06:47:31.304049  1002 solver.cpp:404]     Test net output #0: accuracy = 0.69056\nI1210 06:47:31.304365  1002 solver.cpp:404]     Test net output #1: loss = 1.03228 (* 1 = 1.03228 loss)\nI1210 06:47:32.626837  1002 solver.cpp:228] Iteration 18300, loss = 0.336247\nI1210 06:47:32.626894  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 06:47:32.626912  1002 solver.cpp:244]     Train net output #1: loss = 0.336246 (* 1 = 0.336246 loss)\nI1210 06:47:32.732117  1002 sgd_solver.cpp:166] Iteration 18300, lr = 2.745\nI1210 06:49:51.645907  1002 solver.cpp:337] Iteration 18400, Testing net (#0)\nI1210 06:51:13.320806  1002 solver.cpp:404]     Test net output #0: accuracy = 0.76048\nI1210 06:51:13.321127  1002 solver.cpp:404]     Test net output #1: loss = 0.824773 (* 1 = 0.824773 loss)\nI1210 06:51:14.643508  1002 solver.cpp:228] Iteration 18400, loss = 0.386487\nI1210 06:51:14.643568  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 06:51:14.643587  1002 solver.cpp:244]     Train net output #1: loss = 0.386487 (* 1 = 0.386487 loss)\nI1210 06:51:14.746749  1002 sgd_solver.cpp:166] Iteration 18400, lr = 2.76\nI1210 06:53:33.580894  1002 solver.cpp:337] Iteration 18500, Testing net (#0)\nI1210 06:54:55.267891  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79724\nI1210 06:54:55.268204  1002 solver.cpp:404]     Test net output #1: loss = 0.677517 (* 1 = 0.677517 loss)\nI1210 06:54:56.590848  1002 solver.cpp:228] Iteration 18500, loss = 0.306872\nI1210 06:54:56.590906  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 06:54:56.590925  1002 solver.cpp:244]     Train net output #1: loss = 0.306872 (* 1 = 0.306872 loss)\nI1210 06:54:56.693245  1002 sgd_solver.cpp:166] Iteration 18500, lr = 2.775\nI1210 06:57:15.824717  1002 solver.cpp:337] Iteration 18600, Testing net (#0)\nI1210 06:58:37.505180  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79348\nI1210 06:58:37.505476  1002 solver.cpp:404]     Test net output #1: loss = 0.592911 (* 1 = 0.592911 loss)\nI1210 06:58:38.828346  1002 solver.cpp:228] Iteration 18600, loss = 0.417773\nI1210 06:58:38.828407  1002 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI1210 06:58:38.828424  1002 solver.cpp:244]     Train net output #1: loss = 0.417772 (* 1 = 0.417772 loss)\nI1210 06:58:38.929476  1002 sgd_solver.cpp:166] Iteration 18600, lr = 2.79\nI1210 07:00:57.717253  1002 solver.cpp:337] Iteration 18700, Testing net (#0)\nI1210 07:02:19.396023  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7732\nI1210 07:02:19.396351  1002 solver.cpp:404]     Test net output #1: loss = 0.693996 (* 1 = 0.693996 loss)\nI1210 07:02:20.718569  1002 solver.cpp:228] Iteration 18700, loss = 0.41202\nI1210 07:02:20.718627  1002 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI1210 07:02:20.718644  1002 solver.cpp:244]     Train net output #1: loss = 0.41202 (* 1 = 0.41202 loss)\nI1210 07:02:20.824698  1002 sgd_solver.cpp:166] Iteration 18700, lr = 2.805\nI1210 07:04:40.007439  1002 solver.cpp:337] Iteration 18800, Testing net (#0)\nI1210 07:06:01.756367  1002 solver.cpp:404]     Test net output #0: accuracy = 0.75396\nI1210 07:06:01.756665  1002 solver.cpp:404]     Test net output #1: loss = 0.879841 (* 1 = 0.879841 loss)\nI1210 07:06:03.079757  1002 solver.cpp:228] Iteration 18800, loss = 0.502537\nI1210 07:06:03.079818  1002 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI1210 07:06:03.079835  1002 solver.cpp:244]     Train net output #1: loss = 0.502537 (* 1 = 0.502537 loss)\nI1210 07:06:03.186444  1002 sgd_solver.cpp:166] Iteration 18800, lr = 2.82\nI1210 07:08:22.031204  1002 solver.cpp:337] Iteration 18900, Testing net (#0)\nI1210 07:09:43.690480  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79788\nI1210 07:09:43.690778  1002 solver.cpp:404]     Test net output #1: loss = 0.615147 (* 1 = 0.615147 loss)\nI1210 07:09:45.012989  1002 solver.cpp:228] Iteration 18900, loss = 0.279105\nI1210 07:09:45.013047  1002 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1210 07:09:45.013065  1002 solver.cpp:244]     Train net output #1: loss = 0.279105 (* 1 = 0.279105 loss)\nI1210 07:09:45.120354  1002 sgd_solver.cpp:166] Iteration 18900, lr = 2.835\nI1210 07:12:03.986310  1002 solver.cpp:337] Iteration 19000, Testing net (#0)\nI1210 07:13:25.652287  1002 solver.cpp:404]     Test net output #0: accuracy = 0.78272\nI1210 07:13:25.652606  1002 solver.cpp:404]     Test net output #1: loss = 0.674972 (* 1 = 0.674972 loss)\nI1210 07:13:26.975850  1002 solver.cpp:228] Iteration 19000, loss = 0.26452\nI1210 07:13:26.975913  1002 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1210 07:13:26.975930  1002 solver.cpp:244]     Train net output #1: loss = 0.26452 (* 1 = 0.26452 loss)\nI1210 07:13:27.080304  1002 sgd_solver.cpp:166] Iteration 19000, lr = 2.85\nI1210 07:15:45.915726  1002 solver.cpp:337] Iteration 19100, Testing net (#0)\nI1210 07:17:07.578305  1002 solver.cpp:404]     Test net output #0: accuracy = 0.774\nI1210 07:17:07.578600  1002 solver.cpp:404]     Test net output #1: loss = 0.672912 (* 1 = 0.672912 loss)\nI1210 07:17:08.902107  1002 solver.cpp:228] Iteration 19100, loss = 0.405474\nI1210 07:17:08.902163  1002 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI1210 07:17:08.902180  1002 solver.cpp:244]     Train net output #1: loss = 0.405474 (* 1 = 0.405474 loss)\nI1210 07:17:09.014987  1002 sgd_solver.cpp:166] Iteration 19100, lr = 2.865\nI1210 07:19:28.276793  1002 solver.cpp:337] Iteration 19200, Testing net (#0)\nI1210 07:20:49.936826  1002 solver.cpp:404]     Test net output #0: accuracy = 0.799\nI1210 07:20:49.937158  1002 solver.cpp:404]     Test net output #1: loss = 0.616376 (* 1 = 0.616376 loss)\nI1210 07:20:51.260712  1002 solver.cpp:228] Iteration 19200, loss = 0.480167\nI1210 07:20:51.260771  1002 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1210 07:20:51.260788  1002 solver.cpp:244]     Train net output #1: loss = 0.480166 (* 1 = 0.480166 loss)\nI1210 07:20:51.366137  1002 sgd_solver.cpp:166] Iteration 19200, lr = 2.88\nI1210 07:23:10.223783  1002 solver.cpp:337] Iteration 19300, Testing net (#0)\nI1210 07:24:31.891652  1002 solver.cpp:404]     Test net output #0: accuracy = 0.75184\nI1210 07:24:31.891958  1002 solver.cpp:404]     Test net output #1: loss = 0.741659 (* 1 = 0.741659 loss)\nI1210 07:24:33.215376  1002 solver.cpp:228] Iteration 19300, loss = 0.316737\nI1210 07:24:33.215433  1002 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1210 07:24:33.215451  1002 solver.cpp:244]     Train net output #1: loss = 0.316737 (* 1 = 0.316737 loss)\nI1210 07:24:33.319068  1002 sgd_solver.cpp:166] Iteration 19300, lr = 2.895\nI1210 07:26:52.099689  1002 solver.cpp:337] Iteration 19400, Testing net (#0)\nI1210 07:28:13.765668  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77388\nI1210 07:28:13.765995  1002 solver.cpp:404]     Test net output #1: loss = 0.724603 (* 1 = 0.724603 loss)\nI1210 07:28:15.087762  1002 solver.cpp:228] Iteration 19400, loss = 0.38693\nI1210 07:28:15.087821  1002 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1210 07:28:15.087838  1002 solver.cpp:244]     Train net output #1: loss = 0.386929 (* 1 = 0.386929 loss)\nI1210 07:28:15.197129  1002 sgd_solver.cpp:166] Iteration 19400, lr = 2.91\nI1210 07:30:34.102568  1002 solver.cpp:337] Iteration 19500, Testing net (#0)\nI1210 07:31:55.761960  1002 solver.cpp:404]     Test net output #0: accuracy = 0.7844\nI1210 07:31:55.762276  1002 solver.cpp:404]     Test net output #1: loss = 0.648538 (* 1 = 0.648538 loss)\nI1210 07:31:57.085645  1002 solver.cpp:228] Iteration 19500, loss = 0.436843\nI1210 07:31:57.085700  1002 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI1210 07:31:57.085718  1002 solver.cpp:244]     Train net output #1: loss = 0.436842 (* 1 = 0.436842 loss)\nI1210 07:31:57.185803  1002 sgd_solver.cpp:166] Iteration 19500, lr = 2.925\nI1210 07:34:16.026837  1002 solver.cpp:337] Iteration 19600, Testing net (#0)\nI1210 07:35:37.704643  1002 solver.cpp:404]     Test net output #0: accuracy = 0.69352\nI1210 07:35:37.704946  1002 solver.cpp:404]     Test net output #1: loss = 1.27458 (* 1 = 1.27458 loss)\nI1210 07:35:39.027931  1002 solver.cpp:228] Iteration 19600, loss = 0.342318\nI1210 07:35:39.027983  1002 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1210 07:35:39.028000  1002 solver.cpp:244]     Train net output #1: loss = 0.342317 (* 1 = 0.342317 loss)\nI1210 07:35:39.127224  1002 sgd_solver.cpp:166] Iteration 19600, lr = 2.94\nI1210 07:37:57.916718  1002 solver.cpp:337] Iteration 19700, Testing net (#0)\nI1210 07:39:19.575609  1002 solver.cpp:404]     Test net output #0: accuracy = 0.67476\nI1210 07:39:19.575922  1002 solver.cpp:404]     Test net output #1: loss = 1.13728 (* 1 = 1.13728 loss)\nI1210 07:39:20.897708  1002 solver.cpp:228] Iteration 19700, loss = 0.377581\nI1210 07:39:20.897763  1002 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1210 07:39:20.897781  1002 solver.cpp:244]     Train net output #1: loss = 0.37758 (* 1 = 0.37758 loss)\nI1210 07:39:21.003341  1002 sgd_solver.cpp:166] Iteration 19700, lr = 2.955\nI1210 07:41:39.886981  1002 solver.cpp:337] Iteration 19800, Testing net (#0)\nI1210 07:43:01.531011  1002 solver.cpp:404]     Test net output #0: accuracy = 0.71164\nI1210 07:43:01.531327  1002 solver.cpp:404]     Test net output #1: loss = 1.07777 (* 1 = 1.07777 loss)\nI1210 07:43:02.853294  1002 solver.cpp:228] Iteration 19800, loss = 0.389473\nI1210 07:43:02.853349  1002 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1210 07:43:02.853368  1002 solver.cpp:244]     Train net output #1: loss = 0.389473 (* 1 = 0.389473 loss)\nI1210 07:43:02.957823  1002 sgd_solver.cpp:166] Iteration 19800, lr = 2.97\nI1210 07:45:22.199304  1002 solver.cpp:337] Iteration 19900, Testing net (#0)\nI1210 07:46:43.853442  1002 solver.cpp:404]     Test net output #0: accuracy = 0.79348\nI1210 07:46:43.853732  1002 solver.cpp:404]     Test net output #1: loss = 0.659452 (* 1 = 0.659452 loss)\nI1210 07:46:45.176496  1002 solver.cpp:228] Iteration 19900, loss = 0.33545\nI1210 07:46:45.176553  1002 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1210 07:46:45.176571  1002 solver.cpp:244]     Train net output #1: loss = 0.33545 (* 1 = 0.33545 loss)\nI1210 07:46:45.274310  1002 sgd_solver.cpp:166] Iteration 19900, lr = 2.985\nI1210 07:49:04.065673  1002 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/Fig2b_iter_20000.caffemodel\nI1210 07:49:04.278822  1002 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/Fig2b_iter_20000.solverstate\nI1210 07:49:04.730283  1002 solver.cpp:317] Iteration 20000, loss = 0.346545\nI1210 07:49:04.730329  1002 solver.cpp:337] Iteration 20000, Testing net (#0)\nI1210 07:50:26.404001  1002 solver.cpp:404]     Test net output #0: accuracy = 0.77872\nI1210 07:50:26.404330  1002 solver.cpp:404]     Test net output #1: loss = 0.650065 (* 1 = 0.650065 loss)\nI1210 07:50:26.404342  1002 solver.cpp:322] Optimization Done.\nI1210 07:50:32.082340  1002 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range2Res110Fig4b",
    "content": "I0818 15:07:31.288087 21769 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0818 15:07:31.290539 21769 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0818 15:07:31.291733 21769 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0818 15:07:31.292937 21769 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0818 15:07:31.294286 21769 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0818 15:07:31.295488 21769 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0818 15:07:31.296689 21769 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0818 15:07:31.297894 21769 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0818 15:07:31.299093 21769 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0818 15:07:31.720741 21769 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 80000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range2Res110Fig4b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 2\nI0818 15:07:31.724416 21769 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0818 15:07:31.750591 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:31.750679 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:31.752303 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0818 15:07:31.752359 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0818 15:07:31.752374 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0818 15:07:31.752384 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0818 15:07:31.752394 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0818 15:07:31.752403 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0818 15:07:31.752413 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0818 15:07:31.752421 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0818 15:07:31.752431 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0818 15:07:31.752440 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0818 15:07:31.752449 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0818 15:07:31.752457 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0818 15:07:31.752466 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0818 15:07:31.752476 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0818 15:07:31.752485 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0818 15:07:31.752495 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0818 15:07:31.752503 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0818 15:07:31.752512 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0818 15:07:31.752521 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0818 15:07:31.752529 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0818 15:07:31.752552 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b10_cbr1_bn\nI0818 15:07:31.752562 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b10_cbr2_bn\nI0818 15:07:31.752569 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b11_cbr1_bn\nI0818 15:07:31.752578 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b11_cbr2_bn\nI0818 15:07:31.752588 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b12_cbr1_bn\nI0818 15:07:31.752598 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b12_cbr2_bn\nI0818 15:07:31.752606 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b13_cbr1_bn\nI0818 15:07:31.752614 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b13_cbr2_bn\nI0818 15:07:31.752624 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b14_cbr1_bn\nI0818 15:07:31.752631 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b14_cbr2_bn\nI0818 15:07:31.752641 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b15_cbr1_bn\nI0818 15:07:31.752650 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b15_cbr2_bn\nI0818 15:07:31.752660 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b16_cbr1_bn\nI0818 15:07:31.752666 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b16_cbr2_bn\nI0818 15:07:31.752676 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b17_cbr1_bn\nI0818 15:07:31.752691 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b17_cbr2_bn\nI0818 15:07:31.752702 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b18_cbr1_bn\nI0818 15:07:31.752710 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b18_cbr2_bn\nI0818 15:07:31.752719 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0818 15:07:31.752728 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0818 15:07:31.752743 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0818 15:07:31.752751 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0818 15:07:31.752759 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0818 15:07:31.752768 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0818 15:07:31.752777 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0818 15:07:31.752786 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0818 15:07:31.752795 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0818 15:07:31.752804 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0818 15:07:31.752812 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0818 15:07:31.752821 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0818 15:07:31.752830 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0818 15:07:31.752846 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0818 15:07:31.752856 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0818 15:07:31.752866 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0818 15:07:31.752874 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0818 15:07:31.752882 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0818 15:07:31.752892 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b10_cbr1_bn\nI0818 15:07:31.752900 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b10_cbr2_bn\nI0818 15:07:31.752909 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b11_cbr1_bn\nI0818 15:07:31.752918 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b11_cbr2_bn\nI0818 15:07:31.752928 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b12_cbr1_bn\nI0818 15:07:31.752935 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b12_cbr2_bn\nI0818 15:07:31.752944 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b13_cbr1_bn\nI0818 15:07:31.752954 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b13_cbr2_bn\nI0818 15:07:31.752962 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b14_cbr1_bn\nI0818 15:07:31.752970 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b14_cbr2_bn\nI0818 15:07:31.752979 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b15_cbr1_bn\nI0818 15:07:31.752988 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b15_cbr2_bn\nI0818 15:07:31.752997 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b16_cbr1_bn\nI0818 15:07:31.753006 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b16_cbr2_bn\nI0818 15:07:31.753015 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b17_cbr1_bn\nI0818 15:07:31.753023 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b17_cbr2_bn\nI0818 15:07:31.753031 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b18_cbr1_bn\nI0818 15:07:31.753041 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b18_cbr2_bn\nI0818 15:07:31.753049 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0818 15:07:31.753058 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0818 15:07:31.753070 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0818 15:07:31.753080 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0818 15:07:31.753089 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0818 15:07:31.753098 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0818 15:07:31.753106 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0818 15:07:31.753121 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0818 15:07:31.753131 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0818 15:07:31.753140 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0818 15:07:31.753150 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0818 15:07:31.753159 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0818 15:07:31.753166 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0818 15:07:31.753175 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0818 15:07:31.753185 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0818 15:07:31.753192 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0818 15:07:31.753201 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0818 15:07:31.753209 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0818 15:07:31.753219 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b10_cbr1_bn\nI0818 15:07:31.753226 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b10_cbr2_bn\nI0818 15:07:31.753235 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b11_cbr1_bn\nI0818 15:07:31.753244 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b11_cbr2_bn\nI0818 15:07:31.753253 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b12_cbr1_bn\nI0818 15:07:31.753262 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b12_cbr2_bn\nI0818 15:07:31.753270 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b13_cbr1_bn\nI0818 15:07:31.753278 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b13_cbr2_bn\nI0818 15:07:31.753288 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b14_cbr1_bn\nI0818 15:07:31.753296 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b14_cbr2_bn\nI0818 15:07:31.753305 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b15_cbr1_bn\nI0818 15:07:31.753314 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b15_cbr2_bn\nI0818 15:07:31.753324 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b16_cbr1_bn\nI0818 15:07:31.753331 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b16_cbr2_bn\nI0818 15:07:31.753340 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b17_cbr1_bn\nI0818 15:07:31.753350 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b17_cbr2_bn\nI0818 15:07:31.753358 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b18_cbr1_bn\nI0818 15:07:31.753367 21769 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b18_cbr2_bn\nI0818 15:07:31.756638 21769 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\nI0818 15:07:31.760318 21769 layer_factory.hpp:77] Creating layer dataLayer\nI0818 15:07:31.762596 21769 net.cpp:100] Creating Layer dataLayer\nI0818 15:07:31.762657 21769 net.cpp:408] dataLayer -> data_top\nI0818 15:07:31.762866 21769 net.cpp:408] dataLayer -> label\nI0818 15:07:31.762966 21769 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 15:07:31.792853 21774 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0818 15:07:31.834955 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:31.840919 21769 net.cpp:150] Setting up dataLayer\nI0818 15:07:31.840986 21769 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI0818 15:07:31.840999 21769 net.cpp:157] Top shape: 100 (100)\nI0818 15:07:31.841006 21769 net.cpp:165] Memory required for data: 1229200\nI0818 15:07:31.841022 21769 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 15:07:31.841037 21769 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 15:07:31.841045 21769 net.cpp:434] label_dataLayer_1_split <- label\nI0818 15:07:31.841063 21769 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 15:07:31.841085 21769 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 15:07:31.841156 21769 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 15:07:31.841169 21769 net.cpp:157] Top shape: 100 (100)\nI0818 15:07:31.841176 21769 net.cpp:157] Top shape: 100 (100)\nI0818 15:07:31.841181 21769 net.cpp:165] Memory required for data: 1230000\nI0818 15:07:31.841187 21769 layer_factory.hpp:77] Creating layer pre_conv\nI0818 15:07:31.841254 21769 net.cpp:100] Creating Layer pre_conv\nI0818 15:07:31.841267 21769 net.cpp:434] pre_conv <- data_top\nI0818 15:07:31.841280 21769 net.cpp:408] pre_conv -> pre_conv_top\nI0818 15:07:31.843139 21769 net.cpp:150] Setting up pre_conv\nI0818 15:07:31.843160 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.843166 21769 net.cpp:165] Memory required for data: 7783600\nI0818 15:07:31.843225 21769 layer_factory.hpp:77] Creating layer pre_bn\nI0818 15:07:31.843300 21769 net.cpp:100] Creating Layer pre_bn\nI0818 15:07:31.843313 21769 net.cpp:434] pre_bn <- pre_conv_top\nI0818 15:07:31.843322 21769 net.cpp:408] pre_bn -> pre_bn_top\nI0818 15:07:31.843447 21775 blocking_queue.cpp:50] Waiting for data\nI0818 15:07:31.843674 21769 net.cpp:150] Setting up pre_bn\nI0818 15:07:31.843698 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.843704 21769 net.cpp:165] Memory required for data: 14337200\nI0818 15:07:31.843721 21769 layer_factory.hpp:77] Creating layer pre_scale\nI0818 15:07:31.843777 21769 net.cpp:100] Creating Layer pre_scale\nI0818 15:07:31.843788 21769 net.cpp:434] pre_scale <- pre_bn_top\nI0818 15:07:31.843797 21769 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0818 15:07:31.843981 21769 layer_factory.hpp:77] Creating layer pre_scale\nI0818 15:07:31.846307 21769 net.cpp:150] Setting up pre_scale\nI0818 15:07:31.846324 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.846330 21769 net.cpp:165] Memory required for data: 20890800\nI0818 15:07:31.846343 21769 layer_factory.hpp:77] Creating layer pre_relu\nI0818 15:07:31.846391 21769 net.cpp:100] Creating Layer pre_relu\nI0818 15:07:31.846401 21769 net.cpp:434] pre_relu <- pre_bn_top\nI0818 15:07:31.846413 21769 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0818 15:07:31.846424 21769 net.cpp:150] Setting up pre_relu\nI0818 15:07:31.846432 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.846437 21769 net.cpp:165] Memory required for data: 27444400\nI0818 15:07:31.846442 21769 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0818 15:07:31.846453 21769 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0818 15:07:31.846458 21769 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0818 15:07:31.846467 21769 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0818 15:07:31.846475 21769 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0818 15:07:31.846524 21769 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0818 15:07:31.846536 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.846544 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.846547 21769 net.cpp:165] Memory required for data: 40551600\nI0818 15:07:31.846552 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0818 15:07:31.846567 21769 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0818 15:07:31.846573 21769 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0818 15:07:31.846582 21769 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0818 15:07:31.846887 21769 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0818 15:07:31.846904 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.846909 21769 net.cpp:165] Memory required for data: 47105200\nI0818 15:07:31.846921 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0818 15:07:31.846935 21769 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0818 15:07:31.846942 21769 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0818 15:07:31.846953 21769 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0818 15:07:31.847183 21769 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0818 15:07:31.847196 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.847201 21769 net.cpp:165] Memory required for data: 53658800\nI0818 15:07:31.847213 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 15:07:31.847221 21769 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0818 15:07:31.847228 21769 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0818 15:07:31.847235 21769 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0818 15:07:31.847290 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 15:07:31.847424 21769 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0818 15:07:31.847437 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.847442 21769 net.cpp:165] Memory required for data: 60212400\nI0818 15:07:31.847451 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0818 15:07:31.847460 21769 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0818 15:07:31.847465 21769 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0818 15:07:31.847476 21769 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0818 15:07:31.847486 21769 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0818 15:07:31.847492 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.847497 21769 net.cpp:165] Memory required for data: 66766000\nI0818 15:07:31.847502 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0818 15:07:31.847515 21769 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0818 15:07:31.847522 21769 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0818 15:07:31.847530 21769 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0818 15:07:31.847843 21769 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0818 15:07:31.847867 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.847873 21769 net.cpp:165] Memory required for data: 73319600\nI0818 15:07:31.847882 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0818 15:07:31.847895 21769 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0818 15:07:31.847901 21769 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0818 15:07:31.847909 21769 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0818 15:07:31.848140 21769 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0818 15:07:31.848155 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.848160 21769 net.cpp:165] Memory required for data: 79873200\nI0818 15:07:31.848176 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 15:07:31.848184 21769 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0818 15:07:31.848191 21769 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0818 15:07:31.848198 21769 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0818 15:07:31.848255 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 15:07:31.848392 21769 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0818 15:07:31.848404 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.848409 21769 net.cpp:165] Memory required for data: 86426800\nI0818 15:07:31.848419 21769 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0818 15:07:31.848479 21769 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0818 15:07:31.848490 21769 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0818 15:07:31.848498 21769 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0818 15:07:31.848506 21769 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0818 15:07:31.848585 21769 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0818 15:07:31.848600 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.848605 21769 net.cpp:165] Memory required for data: 92980400\nI0818 15:07:31.848611 21769 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0818 15:07:31.848619 21769 net.cpp:100] Creating Layer L1_b1_relu\nI0818 15:07:31.848625 21769 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0818 15:07:31.848633 21769 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0818 15:07:31.848641 21769 net.cpp:150] Setting up L1_b1_relu\nI0818 15:07:31.848649 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.848654 21769 net.cpp:165] Memory required for data: 99534000\nI0818 15:07:31.848659 21769 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:31.848667 21769 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:31.848672 21769 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0818 15:07:31.848688 21769 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 15:07:31.848700 21769 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 15:07:31.848742 21769 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:31.848753 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.848759 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.848764 21769 net.cpp:165] Memory required for data: 112641200\nI0818 15:07:31.848769 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0818 15:07:31.848784 21769 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0818 15:07:31.848791 21769 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 15:07:31.848800 21769 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0818 15:07:31.849108 21769 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0818 15:07:31.849123 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.849128 21769 net.cpp:165] Memory required for data: 119194800\nI0818 15:07:31.849138 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0818 15:07:31.849149 21769 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0818 15:07:31.849155 21769 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0818 15:07:31.849171 21769 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0818 15:07:31.849407 21769 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0818 15:07:31.849421 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.849426 21769 net.cpp:165] Memory required for data: 125748400\nI0818 15:07:31.849437 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 15:07:31.849447 21769 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0818 15:07:31.849452 21769 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0818 15:07:31.849462 21769 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0818 15:07:31.849516 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 15:07:31.849656 21769 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0818 15:07:31.849670 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.849675 21769 net.cpp:165] Memory required for data: 132302000\nI0818 15:07:31.849691 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0818 15:07:31.849700 21769 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0818 15:07:31.849705 21769 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0818 15:07:31.849716 21769 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0818 15:07:31.849726 21769 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0818 15:07:31.849733 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.849738 21769 net.cpp:165] Memory required for data: 138855600\nI0818 15:07:31.849743 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0818 15:07:31.849757 21769 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0818 15:07:31.849763 21769 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0818 15:07:31.849771 21769 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0818 15:07:31.850076 21769 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0818 15:07:31.850091 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.850096 21769 net.cpp:165] Memory required for data: 145409200\nI0818 15:07:31.850105 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0818 15:07:31.850113 21769 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0818 15:07:31.850119 21769 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0818 15:07:31.850131 21769 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0818 15:07:31.850359 21769 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0818 15:07:31.850373 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.850378 21769 net.cpp:165] Memory required for data: 151962800\nI0818 15:07:31.850396 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 15:07:31.850406 21769 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0818 15:07:31.850412 21769 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0818 15:07:31.850422 21769 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0818 15:07:31.850476 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 15:07:31.850610 21769 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0818 15:07:31.850623 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.850628 21769 net.cpp:165] Memory required for data: 158516400\nI0818 15:07:31.850636 21769 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0818 15:07:31.850649 21769 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0818 15:07:31.850656 21769 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0818 15:07:31.850662 21769 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 15:07:31.850669 21769 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0818 15:07:31.850715 21769 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0818 15:07:31.850728 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.850733 21769 net.cpp:165] Memory required for data: 165070000\nI0818 15:07:31.850739 21769 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0818 15:07:31.850745 21769 net.cpp:100] Creating Layer L1_b2_relu\nI0818 15:07:31.850751 21769 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0818 15:07:31.850761 21769 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0818 15:07:31.850778 21769 net.cpp:150] Setting up L1_b2_relu\nI0818 15:07:31.850786 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.850790 21769 net.cpp:165] Memory required for data: 171623600\nI0818 15:07:31.850795 21769 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:31.850802 21769 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:31.850807 21769 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0818 15:07:31.850814 21769 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 15:07:31.850824 21769 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 15:07:31.850867 21769 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:31.850879 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.850886 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.850890 21769 net.cpp:165] Memory required for data: 184730800\nI0818 15:07:31.850896 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0818 15:07:31.850908 21769 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0818 15:07:31.850913 21769 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 15:07:31.850924 21769 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0818 15:07:31.851217 21769 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0818 15:07:31.851233 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.851238 21769 net.cpp:165] Memory required for data: 191284400\nI0818 15:07:31.851246 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0818 15:07:31.851255 21769 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0818 15:07:31.851261 21769 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0818 15:07:31.851274 21769 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0818 15:07:31.851508 21769 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0818 15:07:31.851524 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.851531 21769 net.cpp:165] Memory required for data: 197838000\nI0818 15:07:31.851541 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 15:07:31.851549 21769 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0818 15:07:31.851555 21769 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0818 15:07:31.851562 21769 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0818 15:07:31.851613 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 15:07:31.851758 21769 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0818 15:07:31.851773 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.851778 21769 net.cpp:165] Memory required for data: 204391600\nI0818 15:07:31.851786 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0818 15:07:31.851797 21769 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0818 15:07:31.851804 21769 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0818 15:07:31.851811 21769 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0818 15:07:31.851820 21769 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0818 15:07:31.851827 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.851832 21769 net.cpp:165] Memory required for data: 210945200\nI0818 15:07:31.851837 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0818 15:07:31.851851 21769 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0818 15:07:31.851857 21769 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0818 15:07:31.851868 21769 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0818 15:07:31.852166 21769 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0818 15:07:31.852180 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.852185 21769 net.cpp:165] Memory required for data: 217498800\nI0818 15:07:31.852195 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0818 15:07:31.852208 21769 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0818 15:07:31.852214 21769 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0818 15:07:31.852231 21769 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0818 15:07:31.852461 21769 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0818 15:07:31.852478 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.852483 21769 net.cpp:165] Memory required for data: 224052400\nI0818 15:07:31.852494 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 15:07:31.852504 21769 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0818 15:07:31.852509 21769 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0818 15:07:31.852517 21769 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0818 15:07:31.852568 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 15:07:31.852710 21769 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0818 15:07:31.852723 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.852728 21769 net.cpp:165] Memory required for data: 230606000\nI0818 15:07:31.852737 21769 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0818 15:07:31.852754 21769 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0818 15:07:31.852761 21769 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0818 15:07:31.852769 21769 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 15:07:31.852779 21769 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0818 15:07:31.852810 21769 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0818 15:07:31.852821 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.852826 21769 net.cpp:165] Memory required for data: 237159600\nI0818 15:07:31.852831 21769 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0818 15:07:31.852843 21769 net.cpp:100] Creating Layer L1_b3_relu\nI0818 15:07:31.852849 21769 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0818 15:07:31.852855 21769 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0818 15:07:31.852864 21769 net.cpp:150] Setting up L1_b3_relu\nI0818 15:07:31.852871 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.852875 21769 net.cpp:165] Memory required for data: 243713200\nI0818 15:07:31.852880 21769 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:31.852887 21769 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:31.852893 21769 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0818 15:07:31.852900 21769 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 15:07:31.852910 21769 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 15:07:31.852954 21769 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:31.852967 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.852972 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.852977 21769 net.cpp:165] Memory required for data: 256820400\nI0818 15:07:31.852982 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0818 15:07:31.852996 21769 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0818 15:07:31.853003 21769 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 15:07:31.853011 21769 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0818 15:07:31.853310 21769 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0818 15:07:31.853323 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.853328 21769 net.cpp:165] Memory required for data: 263374000\nI0818 15:07:31.853338 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0818 15:07:31.853349 21769 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0818 15:07:31.853356 21769 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0818 15:07:31.853364 21769 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0818 15:07:31.853595 21769 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0818 15:07:31.853611 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.853617 21769 net.cpp:165] Memory required for data: 269927600\nI0818 15:07:31.853636 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 15:07:31.853644 21769 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0818 15:07:31.853651 21769 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0818 15:07:31.853658 21769 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0818 15:07:31.853721 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 15:07:31.853860 21769 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0818 15:07:31.853874 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.853879 21769 net.cpp:165] Memory required for data: 276481200\nI0818 15:07:31.853888 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0818 15:07:31.853895 21769 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0818 15:07:31.853901 21769 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0818 15:07:31.853911 21769 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0818 15:07:31.853921 21769 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0818 15:07:31.853929 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.853934 21769 net.cpp:165] Memory required for data: 283034800\nI0818 15:07:31.853938 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0818 15:07:31.853950 21769 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0818 15:07:31.853955 21769 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0818 15:07:31.853965 21769 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0818 15:07:31.854266 21769 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0818 15:07:31.854280 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.854285 21769 net.cpp:165] Memory required for data: 289588400\nI0818 15:07:31.854295 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0818 15:07:31.854303 21769 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0818 15:07:31.854310 21769 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0818 15:07:31.854321 21769 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0818 15:07:31.854562 21769 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0818 15:07:31.854576 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.854583 21769 net.cpp:165] Memory required for data: 296142000\nI0818 15:07:31.854593 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 15:07:31.854601 21769 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0818 15:07:31.854606 21769 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0818 15:07:31.854614 21769 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0818 15:07:31.854668 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 15:07:31.854809 21769 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0818 15:07:31.854825 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.854830 21769 net.cpp:165] Memory required for data: 302695600\nI0818 15:07:31.854838 21769 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0818 15:07:31.854847 21769 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0818 15:07:31.854853 21769 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0818 15:07:31.854861 21769 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 15:07:31.854871 21769 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0818 15:07:31.854902 21769 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0818 15:07:31.854912 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.854917 21769 net.cpp:165] Memory required for data: 309249200\nI0818 15:07:31.854921 21769 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0818 15:07:31.854933 21769 net.cpp:100] Creating Layer L1_b4_relu\nI0818 15:07:31.854938 21769 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0818 15:07:31.854945 21769 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0818 15:07:31.854954 21769 net.cpp:150] Setting up L1_b4_relu\nI0818 15:07:31.854961 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.854965 21769 net.cpp:165] Memory required for data: 315802800\nI0818 15:07:31.854970 21769 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:31.854984 21769 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:31.854990 21769 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0818 15:07:31.854998 21769 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 15:07:31.855007 21769 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 15:07:31.855051 21769 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:31.855063 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.855070 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.855074 21769 net.cpp:165] Memory required for data: 328910000\nI0818 15:07:31.855079 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0818 15:07:31.855093 21769 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0818 15:07:31.855100 21769 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 15:07:31.855109 21769 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0818 15:07:31.855412 21769 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0818 15:07:31.855425 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.855430 21769 net.cpp:165] Memory required for data: 335463600\nI0818 15:07:31.855453 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0818 15:07:31.855464 21769 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0818 15:07:31.855471 21769 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0818 15:07:31.855482 21769 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0818 15:07:31.855727 21769 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0818 15:07:31.855741 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.855746 21769 net.cpp:165] Memory required for data: 342017200\nI0818 15:07:31.855757 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 15:07:31.855767 21769 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0818 15:07:31.855772 21769 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0818 15:07:31.855779 21769 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0818 15:07:31.855834 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 15:07:31.855973 21769 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0818 15:07:31.855985 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.855990 21769 net.cpp:165] Memory required for data: 348570800\nI0818 15:07:31.855999 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0818 15:07:31.856007 21769 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0818 15:07:31.856014 21769 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0818 15:07:31.856024 21769 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0818 15:07:31.856034 21769 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0818 15:07:31.856040 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.856045 21769 net.cpp:165] Memory required for data: 355124400\nI0818 15:07:31.856050 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0818 15:07:31.856063 21769 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0818 15:07:31.856070 21769 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0818 15:07:31.856078 21769 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0818 15:07:31.856385 21769 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0818 15:07:31.856400 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.856405 21769 net.cpp:165] Memory required for data: 361678000\nI0818 15:07:31.856413 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0818 15:07:31.856422 21769 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0818 15:07:31.856428 21769 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0818 15:07:31.856439 21769 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0818 15:07:31.856673 21769 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0818 15:07:31.856693 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.856699 21769 net.cpp:165] Memory required for data: 368231600\nI0818 15:07:31.856717 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 15:07:31.856727 21769 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0818 15:07:31.856732 21769 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0818 15:07:31.856740 21769 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0818 15:07:31.856794 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 15:07:31.856932 21769 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0818 15:07:31.856945 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.856950 21769 net.cpp:165] Memory required for data: 374785200\nI0818 15:07:31.856961 21769 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0818 15:07:31.856972 21769 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0818 15:07:31.856978 21769 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0818 15:07:31.856986 21769 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 15:07:31.856999 21769 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0818 15:07:31.857029 21769 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0818 15:07:31.857038 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.857043 21769 net.cpp:165] Memory required for data: 381338800\nI0818 15:07:31.857048 21769 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0818 15:07:31.857059 21769 net.cpp:100] Creating Layer L1_b5_relu\nI0818 15:07:31.857065 21769 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0818 15:07:31.857072 21769 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0818 15:07:31.857080 21769 net.cpp:150] Setting up L1_b5_relu\nI0818 15:07:31.857087 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.857092 21769 net.cpp:165] Memory required for data: 387892400\nI0818 15:07:31.857097 21769 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:31.857105 21769 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:31.857110 21769 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0818 15:07:31.857116 21769 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 15:07:31.857125 21769 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 15:07:31.857168 21769 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:31.857180 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.857187 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.857192 21769 net.cpp:165] Memory required for data: 400999600\nI0818 15:07:31.857197 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0818 15:07:31.857210 21769 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0818 15:07:31.857216 21769 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 15:07:31.857225 21769 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0818 15:07:31.857525 21769 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0818 15:07:31.857539 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.857544 21769 net.cpp:165] Memory required for data: 407553200\nI0818 15:07:31.857553 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0818 15:07:31.857565 21769 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0818 15:07:31.857571 21769 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0818 15:07:31.857579 21769 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0818 15:07:31.857852 21769 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0818 15:07:31.857870 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.857877 21769 net.cpp:165] Memory required for data: 414106800\nI0818 15:07:31.857887 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 15:07:31.857897 21769 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0818 15:07:31.857903 21769 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0818 15:07:31.857910 21769 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0818 15:07:31.857971 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 15:07:31.858114 21769 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0818 15:07:31.858129 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.858134 21769 net.cpp:165] Memory required for data: 420660400\nI0818 15:07:31.858144 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0818 15:07:31.858155 21769 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0818 15:07:31.858161 21769 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0818 15:07:31.858171 21769 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0818 15:07:31.858181 21769 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0818 15:07:31.858188 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.858192 21769 net.cpp:165] Memory required for data: 427214000\nI0818 15:07:31.858197 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0818 15:07:31.858209 21769 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0818 15:07:31.858214 21769 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0818 15:07:31.858225 21769 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0818 15:07:31.858534 21769 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0818 15:07:31.858548 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.858553 21769 net.cpp:165] Memory required for data: 433767600\nI0818 15:07:31.858562 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0818 15:07:31.858572 21769 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0818 15:07:31.858577 21769 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0818 15:07:31.858588 21769 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0818 15:07:31.858836 21769 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0818 15:07:31.858850 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.858855 21769 net.cpp:165] Memory required for data: 440321200\nI0818 15:07:31.858866 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 15:07:31.858877 21769 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0818 15:07:31.858885 21769 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0818 15:07:31.858892 21769 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0818 15:07:31.858944 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 15:07:31.859086 21769 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0818 15:07:31.859098 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.859103 21769 net.cpp:165] Memory required for data: 446874800\nI0818 15:07:31.859112 21769 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0818 15:07:31.859131 21769 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0818 15:07:31.859138 21769 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0818 15:07:31.859145 21769 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 15:07:31.859153 21769 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0818 15:07:31.859187 21769 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0818 15:07:31.859200 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.859205 21769 net.cpp:165] Memory required for data: 453428400\nI0818 15:07:31.859210 21769 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0818 15:07:31.859217 21769 net.cpp:100] Creating Layer L1_b6_relu\nI0818 15:07:31.859222 21769 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0818 15:07:31.859230 21769 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0818 15:07:31.859238 21769 net.cpp:150] Setting up L1_b6_relu\nI0818 15:07:31.859244 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.859249 21769 net.cpp:165] Memory required for data: 459982000\nI0818 15:07:31.859254 21769 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:31.859261 21769 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:31.859266 21769 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0818 15:07:31.859277 21769 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 15:07:31.859295 21769 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 15:07:31.859338 21769 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:31.859347 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.859354 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.859359 21769 net.cpp:165] Memory required for data: 473089200\nI0818 15:07:31.859364 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0818 15:07:31.859378 21769 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0818 15:07:31.859385 21769 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 15:07:31.859393 21769 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0818 15:07:31.859705 21769 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0818 15:07:31.859720 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.859725 21769 net.cpp:165] Memory required for data: 479642800\nI0818 15:07:31.859735 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0818 15:07:31.859750 21769 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0818 15:07:31.859756 21769 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0818 15:07:31.859767 21769 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0818 15:07:31.860004 21769 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0818 15:07:31.860018 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.860023 21769 net.cpp:165] Memory required for data: 486196400\nI0818 15:07:31.860033 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 15:07:31.860043 21769 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0818 15:07:31.860049 21769 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0818 15:07:31.860055 21769 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0818 15:07:31.860116 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 15:07:31.860255 21769 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0818 15:07:31.860267 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.860272 21769 net.cpp:165] Memory required for data: 492750000\nI0818 15:07:31.860282 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0818 15:07:31.860292 21769 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0818 15:07:31.860299 21769 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0818 15:07:31.860306 21769 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0818 15:07:31.860316 21769 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0818 15:07:31.860322 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.860327 21769 net.cpp:165] Memory required for data: 499303600\nI0818 15:07:31.860332 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0818 15:07:31.860347 21769 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0818 15:07:31.860352 21769 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0818 15:07:31.860365 21769 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0818 15:07:31.860671 21769 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0818 15:07:31.860692 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.860697 21769 net.cpp:165] Memory required for data: 505857200\nI0818 15:07:31.860705 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0818 15:07:31.860718 21769 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0818 15:07:31.860724 21769 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0818 15:07:31.860735 21769 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0818 15:07:31.860970 21769 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0818 15:07:31.860983 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.860987 21769 net.cpp:165] Memory required for data: 512410800\nI0818 15:07:31.860997 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 15:07:31.861006 21769 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0818 15:07:31.861012 21769 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0818 15:07:31.861021 21769 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0818 15:07:31.861085 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 15:07:31.861224 21769 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0818 15:07:31.861238 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.861243 21769 net.cpp:165] Memory required for data: 518964400\nI0818 15:07:31.861253 21769 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0818 15:07:31.861261 21769 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0818 15:07:31.861270 21769 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0818 15:07:31.861279 21769 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 15:07:31.861285 21769 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0818 15:07:31.861318 21769 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0818 15:07:31.861328 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.861333 21769 net.cpp:165] Memory required for data: 525518000\nI0818 15:07:31.861338 21769 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0818 15:07:31.861346 21769 net.cpp:100] Creating Layer L1_b7_relu\nI0818 15:07:31.861351 21769 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0818 15:07:31.861361 21769 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0818 15:07:31.861371 21769 net.cpp:150] Setting up L1_b7_relu\nI0818 15:07:31.861377 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.861382 21769 net.cpp:165] Memory required for data: 532071600\nI0818 15:07:31.861387 21769 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:31.861394 21769 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:31.861399 21769 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0818 15:07:31.861407 21769 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 15:07:31.861415 21769 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 15:07:31.861459 21769 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:31.861471 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.861477 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.861482 21769 net.cpp:165] Memory required for data: 545178800\nI0818 15:07:31.861487 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0818 15:07:31.861505 21769 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0818 15:07:31.861511 21769 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 15:07:31.861521 21769 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0818 15:07:31.861840 21769 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0818 15:07:31.861855 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.861860 21769 net.cpp:165] Memory required for data: 551732400\nI0818 15:07:31.861868 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0818 15:07:31.861881 21769 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0818 15:07:31.861888 21769 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0818 15:07:31.861899 21769 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0818 15:07:31.862138 21769 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0818 15:07:31.862150 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.862155 21769 net.cpp:165] Memory required for data: 558286000\nI0818 15:07:31.862165 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 15:07:31.862174 21769 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0818 15:07:31.862180 21769 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0818 15:07:31.862187 21769 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0818 15:07:31.862242 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 15:07:31.862380 21769 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0818 15:07:31.862393 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.862398 21769 net.cpp:165] Memory required for data: 564839600\nI0818 15:07:31.862407 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0818 15:07:31.862426 21769 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0818 15:07:31.862433 21769 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0818 15:07:31.862440 21769 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0818 15:07:31.862450 21769 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0818 15:07:31.862457 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.862462 21769 net.cpp:165] Memory required for data: 571393200\nI0818 15:07:31.862467 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0818 15:07:31.862480 21769 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0818 15:07:31.862486 21769 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0818 15:07:31.862498 21769 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0818 15:07:31.862819 21769 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0818 15:07:31.862834 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.862839 21769 net.cpp:165] Memory required for data: 577946800\nI0818 15:07:31.862848 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0818 15:07:31.862860 21769 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0818 15:07:31.862866 21769 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0818 15:07:31.862874 21769 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0818 15:07:31.863124 21769 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0818 15:07:31.863138 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.863143 21769 net.cpp:165] Memory required for data: 584500400\nI0818 15:07:31.863153 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 15:07:31.863162 21769 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0818 15:07:31.863168 21769 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0818 15:07:31.863176 21769 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0818 15:07:31.863232 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 15:07:31.863371 21769 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0818 15:07:31.863385 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.863390 21769 net.cpp:165] Memory required for data: 591054000\nI0818 15:07:31.863399 21769 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0818 15:07:31.863407 21769 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0818 15:07:31.863414 21769 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0818 15:07:31.863420 21769 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 15:07:31.863431 21769 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0818 15:07:31.863462 21769 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0818 15:07:31.863471 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.863476 21769 net.cpp:165] Memory required for data: 597607600\nI0818 15:07:31.863481 21769 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0818 15:07:31.863492 21769 net.cpp:100] Creating Layer L1_b8_relu\nI0818 15:07:31.863498 21769 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0818 15:07:31.863505 21769 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0818 15:07:31.863514 21769 net.cpp:150] Setting up L1_b8_relu\nI0818 15:07:31.863521 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.863525 21769 net.cpp:165] Memory required for data: 604161200\nI0818 15:07:31.863530 21769 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:31.863538 21769 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:31.863543 21769 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0818 15:07:31.863550 21769 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 15:07:31.863559 21769 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 15:07:31.863605 21769 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:31.863616 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.863631 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.863636 21769 net.cpp:165] Memory required for data: 617268400\nI0818 15:07:31.863641 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0818 15:07:31.863654 21769 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0818 15:07:31.863662 21769 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 15:07:31.863670 21769 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0818 15:07:31.863996 21769 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0818 15:07:31.864011 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.864015 21769 net.cpp:165] Memory required for data: 623822000\nI0818 15:07:31.864024 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0818 15:07:31.864037 21769 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0818 15:07:31.864043 21769 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0818 15:07:31.864051 21769 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0818 15:07:31.864295 21769 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0818 15:07:31.864310 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.864315 21769 net.cpp:165] Memory required for data: 630375600\nI0818 15:07:31.864325 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 15:07:31.864336 21769 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0818 15:07:31.864343 21769 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0818 15:07:31.864351 21769 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0818 15:07:31.864406 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 15:07:31.864554 21769 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0818 15:07:31.864567 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.864573 21769 net.cpp:165] Memory required for data: 636929200\nI0818 15:07:31.864581 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0818 15:07:31.864589 21769 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0818 15:07:31.864595 21769 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0818 15:07:31.864605 21769 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0818 15:07:31.864615 21769 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0818 15:07:31.864622 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.864627 21769 net.cpp:165] Memory required for data: 643482800\nI0818 15:07:31.864632 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0818 15:07:31.864646 21769 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0818 15:07:31.864652 21769 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0818 15:07:31.864660 21769 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0818 15:07:31.864987 21769 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0818 15:07:31.865002 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.865007 21769 net.cpp:165] Memory required for data: 650036400\nI0818 15:07:31.865016 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0818 15:07:31.865028 21769 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0818 15:07:31.865036 21769 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0818 15:07:31.865043 21769 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0818 15:07:31.865281 21769 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0818 15:07:31.865294 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.865299 21769 net.cpp:165] Memory required for data: 656590000\nI0818 15:07:31.865331 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 15:07:31.865344 21769 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0818 15:07:31.865350 21769 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0818 15:07:31.865358 21769 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0818 15:07:31.865414 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 15:07:31.865552 21769 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0818 15:07:31.865566 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.865571 21769 net.cpp:165] Memory required for data: 663143600\nI0818 15:07:31.865586 21769 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0818 15:07:31.865597 21769 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0818 15:07:31.865602 21769 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0818 15:07:31.865609 21769 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 15:07:31.865617 21769 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0818 15:07:31.865651 21769 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0818 15:07:31.865664 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.865669 21769 net.cpp:165] Memory required for data: 669697200\nI0818 15:07:31.865674 21769 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0818 15:07:31.865681 21769 net.cpp:100] Creating Layer L1_b9_relu\nI0818 15:07:31.865694 21769 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0818 15:07:31.865703 21769 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0818 15:07:31.865713 21769 net.cpp:150] Setting up L1_b9_relu\nI0818 15:07:31.865720 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.865725 21769 net.cpp:165] Memory required for data: 676250800\nI0818 15:07:31.865731 21769 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:31.865737 21769 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:31.865742 21769 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0818 15:07:31.865754 21769 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 15:07:31.865764 21769 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 15:07:31.865808 21769 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:31.865818 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.865825 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.865829 21769 net.cpp:165] Memory required for data: 689358000\nI0818 15:07:31.865835 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_conv\nI0818 15:07:31.865852 21769 net.cpp:100] Creating Layer L1_b10_cbr1_conv\nI0818 15:07:31.865859 21769 net.cpp:434] L1_b10_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 15:07:31.865869 21769 net.cpp:408] L1_b10_cbr1_conv -> L1_b10_cbr1_conv_top\nI0818 15:07:31.866180 21769 net.cpp:150] Setting up L1_b10_cbr1_conv\nI0818 15:07:31.866194 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.866199 21769 net.cpp:165] Memory required for data: 695911600\nI0818 15:07:31.866207 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_bn\nI0818 15:07:31.866219 21769 net.cpp:100] Creating Layer L1_b10_cbr1_bn\nI0818 15:07:31.866226 21769 net.cpp:434] L1_b10_cbr1_bn <- L1_b10_cbr1_conv_top\nI0818 15:07:31.866237 21769 net.cpp:408] L1_b10_cbr1_bn -> L1_b10_cbr1_bn_top\nI0818 15:07:31.866477 21769 net.cpp:150] Setting up L1_b10_cbr1_bn\nI0818 15:07:31.866490 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.866495 21769 net.cpp:165] Memory required for data: 702465200\nI0818 15:07:31.866504 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0818 15:07:31.866513 21769 net.cpp:100] Creating Layer L1_b10_cbr1_scale\nI0818 15:07:31.866519 21769 net.cpp:434] L1_b10_cbr1_scale <- L1_b10_cbr1_bn_top\nI0818 15:07:31.866529 21769 net.cpp:395] L1_b10_cbr1_scale -> L1_b10_cbr1_bn_top (in-place)\nI0818 15:07:31.866587 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0818 15:07:31.866734 21769 net.cpp:150] Setting up L1_b10_cbr1_scale\nI0818 15:07:31.866747 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.866752 21769 net.cpp:165] Memory required for data: 709018800\nI0818 15:07:31.866761 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_relu\nI0818 15:07:31.866773 21769 net.cpp:100] Creating Layer L1_b10_cbr1_relu\nI0818 15:07:31.866780 21769 net.cpp:434] L1_b10_cbr1_relu <- L1_b10_cbr1_bn_top\nI0818 15:07:31.866787 21769 net.cpp:395] L1_b10_cbr1_relu -> L1_b10_cbr1_bn_top (in-place)\nI0818 15:07:31.866803 21769 net.cpp:150] Setting up L1_b10_cbr1_relu\nI0818 15:07:31.866811 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.866816 21769 net.cpp:165] Memory required for data: 715572400\nI0818 15:07:31.866821 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr2_conv\nI0818 15:07:31.866834 21769 net.cpp:100] Creating Layer L1_b10_cbr2_conv\nI0818 15:07:31.866840 21769 net.cpp:434] L1_b10_cbr2_conv <- L1_b10_cbr1_bn_top\nI0818 15:07:31.866852 21769 net.cpp:408] L1_b10_cbr2_conv -> L1_b10_cbr2_conv_top\nI0818 15:07:31.867166 21769 net.cpp:150] Setting up L1_b10_cbr2_conv\nI0818 15:07:31.867179 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.867184 21769 net.cpp:165] Memory required for data: 722126000\nI0818 15:07:31.867193 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr2_bn\nI0818 15:07:31.867205 21769 net.cpp:100] Creating Layer L1_b10_cbr2_bn\nI0818 15:07:31.867213 21769 net.cpp:434] L1_b10_cbr2_bn <- L1_b10_cbr2_conv_top\nI0818 15:07:31.867225 21769 net.cpp:408] L1_b10_cbr2_bn -> L1_b10_cbr2_bn_top\nI0818 15:07:31.867475 21769 net.cpp:150] Setting up L1_b10_cbr2_bn\nI0818 15:07:31.867487 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.867492 21769 net.cpp:165] Memory required for data: 728679600\nI0818 15:07:31.867502 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0818 15:07:31.867511 21769 net.cpp:100] Creating Layer L1_b10_cbr2_scale\nI0818 15:07:31.867517 21769 net.cpp:434] L1_b10_cbr2_scale <- L1_b10_cbr2_bn_top\nI0818 15:07:31.867525 21769 net.cpp:395] L1_b10_cbr2_scale -> L1_b10_cbr2_bn_top (in-place)\nI0818 15:07:31.867581 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0818 15:07:31.867765 21769 net.cpp:150] Setting up L1_b10_cbr2_scale\nI0818 15:07:31.867787 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.867796 21769 net.cpp:165] Memory required for data: 735233200\nI0818 15:07:31.867812 21769 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise\nI0818 15:07:31.867825 21769 net.cpp:100] Creating Layer L1_b10_sum_eltwise\nI0818 15:07:31.867841 21769 net.cpp:434] L1_b10_sum_eltwise <- L1_b10_cbr2_bn_top\nI0818 15:07:31.867852 21769 net.cpp:434] L1_b10_sum_eltwise <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 15:07:31.867866 21769 net.cpp:408] L1_b10_sum_eltwise -> L1_b10_sum_eltwise_top\nI0818 15:07:31.867910 21769 net.cpp:150] Setting up L1_b10_sum_eltwise\nI0818 15:07:31.867921 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.867926 21769 net.cpp:165] Memory required for data: 741786800\nI0818 15:07:31.867933 21769 layer_factory.hpp:77] Creating layer L1_b10_relu\nI0818 15:07:31.867940 21769 net.cpp:100] Creating Layer L1_b10_relu\nI0818 15:07:31.867946 21769 net.cpp:434] L1_b10_relu <- L1_b10_sum_eltwise_top\nI0818 15:07:31.867955 21769 net.cpp:395] L1_b10_relu -> L1_b10_sum_eltwise_top (in-place)\nI0818 15:07:31.867965 21769 net.cpp:150] Setting up L1_b10_relu\nI0818 15:07:31.867975 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.867983 21769 net.cpp:165] Memory required for data: 748340400\nI0818 15:07:31.867992 21769 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0818 15:07:31.868005 21769 net.cpp:100] Creating Layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0818 15:07:31.868016 21769 net.cpp:434] L1_b10_sum_eltwise_top_L1_b10_relu_0_split <- L1_b10_sum_eltwise_top\nI0818 15:07:31.868031 21769 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0818 15:07:31.868047 21769 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0818 15:07:31.868110 21769 net.cpp:150] Setting up L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0818 15:07:31.868125 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.868131 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.868136 21769 net.cpp:165] Memory required for data: 761447600\nI0818 15:07:31.868142 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_conv\nI0818 15:07:31.868165 21769 net.cpp:100] Creating Layer L1_b11_cbr1_conv\nI0818 15:07:31.868171 21769 net.cpp:434] L1_b11_cbr1_conv <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0818 15:07:31.868181 21769 net.cpp:408] L1_b11_cbr1_conv -> L1_b11_cbr1_conv_top\nI0818 15:07:31.868513 21769 net.cpp:150] Setting up L1_b11_cbr1_conv\nI0818 15:07:31.868533 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.868541 21769 net.cpp:165] Memory required for data: 768001200\nI0818 15:07:31.868557 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_bn\nI0818 15:07:31.868577 21769 net.cpp:100] Creating Layer L1_b11_cbr1_bn\nI0818 15:07:31.868587 21769 net.cpp:434] L1_b11_cbr1_bn <- L1_b11_cbr1_conv_top\nI0818 15:07:31.868605 21769 net.cpp:408] L1_b11_cbr1_bn -> L1_b11_cbr1_bn_top\nI0818 15:07:31.868876 21769 net.cpp:150] Setting up L1_b11_cbr1_bn\nI0818 15:07:31.868892 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.868897 21769 net.cpp:165] Memory required for data: 774554800\nI0818 15:07:31.868908 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0818 15:07:31.868917 21769 net.cpp:100] Creating Layer L1_b11_cbr1_scale\nI0818 15:07:31.868923 21769 net.cpp:434] L1_b11_cbr1_scale <- L1_b11_cbr1_bn_top\nI0818 15:07:31.868932 21769 net.cpp:395] L1_b11_cbr1_scale -> L1_b11_cbr1_bn_top (in-place)\nI0818 15:07:31.868988 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0818 15:07:31.869133 21769 net.cpp:150] Setting up L1_b11_cbr1_scale\nI0818 15:07:31.869148 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.869153 21769 net.cpp:165] Memory required for data: 781108400\nI0818 15:07:31.869161 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_relu\nI0818 15:07:31.869173 21769 net.cpp:100] Creating Layer L1_b11_cbr1_relu\nI0818 15:07:31.869179 21769 net.cpp:434] L1_b11_cbr1_relu <- L1_b11_cbr1_bn_top\nI0818 15:07:31.869186 21769 net.cpp:395] L1_b11_cbr1_relu -> L1_b11_cbr1_bn_top (in-place)\nI0818 15:07:31.869196 21769 net.cpp:150] Setting up L1_b11_cbr1_relu\nI0818 15:07:31.869204 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.869207 21769 net.cpp:165] Memory required for data: 787662000\nI0818 15:07:31.869212 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr2_conv\nI0818 15:07:31.869226 21769 net.cpp:100] Creating Layer L1_b11_cbr2_conv\nI0818 15:07:31.869233 21769 net.cpp:434] L1_b11_cbr2_conv <- L1_b11_cbr1_bn_top\nI0818 15:07:31.869244 21769 net.cpp:408] L1_b11_cbr2_conv -> L1_b11_cbr2_conv_top\nI0818 15:07:31.869565 21769 net.cpp:150] Setting up L1_b11_cbr2_conv\nI0818 15:07:31.869578 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.869583 21769 net.cpp:165] Memory required for data: 794215600\nI0818 15:07:31.869592 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr2_bn\nI0818 15:07:31.869604 21769 net.cpp:100] Creating Layer L1_b11_cbr2_bn\nI0818 15:07:31.869611 21769 net.cpp:434] L1_b11_cbr2_bn <- L1_b11_cbr2_conv_top\nI0818 15:07:31.869619 21769 net.cpp:408] L1_b11_cbr2_bn -> L1_b11_cbr2_bn_top\nI0818 15:07:31.869871 21769 net.cpp:150] Setting up L1_b11_cbr2_bn\nI0818 15:07:31.869886 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.869891 21769 net.cpp:165] Memory required for data: 800769200\nI0818 15:07:31.869901 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0818 15:07:31.869910 21769 net.cpp:100] Creating Layer L1_b11_cbr2_scale\nI0818 15:07:31.869916 21769 net.cpp:434] L1_b11_cbr2_scale <- L1_b11_cbr2_bn_top\nI0818 15:07:31.869925 21769 net.cpp:395] L1_b11_cbr2_scale -> L1_b11_cbr2_bn_top (in-place)\nI0818 15:07:31.869981 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0818 15:07:31.870126 21769 net.cpp:150] Setting up L1_b11_cbr2_scale\nI0818 15:07:31.870139 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.870144 21769 net.cpp:165] Memory required for data: 807322800\nI0818 15:07:31.870153 21769 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise\nI0818 15:07:31.870162 21769 net.cpp:100] Creating Layer L1_b11_sum_eltwise\nI0818 15:07:31.870168 21769 net.cpp:434] L1_b11_sum_eltwise <- L1_b11_cbr2_bn_top\nI0818 15:07:31.870182 21769 net.cpp:434] L1_b11_sum_eltwise <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0818 15:07:31.870194 21769 net.cpp:408] L1_b11_sum_eltwise -> L1_b11_sum_eltwise_top\nI0818 15:07:31.870226 21769 net.cpp:150] Setting up L1_b11_sum_eltwise\nI0818 15:07:31.870235 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.870240 21769 net.cpp:165] Memory required for data: 813876400\nI0818 15:07:31.870246 21769 layer_factory.hpp:77] Creating layer L1_b11_relu\nI0818 15:07:31.870256 21769 net.cpp:100] Creating Layer L1_b11_relu\nI0818 15:07:31.870262 21769 net.cpp:434] L1_b11_relu <- L1_b11_sum_eltwise_top\nI0818 15:07:31.870270 21769 net.cpp:395] L1_b11_relu -> L1_b11_sum_eltwise_top (in-place)\nI0818 15:07:31.870280 21769 net.cpp:150] Setting up L1_b11_relu\nI0818 15:07:31.870286 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.870291 21769 net.cpp:165] Memory required for data: 820430000\nI0818 15:07:31.870296 21769 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0818 15:07:31.870303 21769 net.cpp:100] Creating Layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0818 15:07:31.870308 21769 net.cpp:434] L1_b11_sum_eltwise_top_L1_b11_relu_0_split <- L1_b11_sum_eltwise_top\nI0818 15:07:31.870316 21769 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0818 15:07:31.870324 21769 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0818 15:07:31.870369 21769 net.cpp:150] Setting up L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0818 15:07:31.870381 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.870388 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.870393 21769 net.cpp:165] Memory required for data: 833537200\nI0818 15:07:31.870398 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_conv\nI0818 15:07:31.870412 21769 net.cpp:100] Creating Layer L1_b12_cbr1_conv\nI0818 15:07:31.870419 21769 net.cpp:434] L1_b12_cbr1_conv <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0818 15:07:31.870427 21769 net.cpp:408] L1_b12_cbr1_conv -> L1_b12_cbr1_conv_top\nI0818 15:07:31.870753 21769 net.cpp:150] Setting up L1_b12_cbr1_conv\nI0818 15:07:31.870767 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.870772 21769 net.cpp:165] Memory required for data: 840090800\nI0818 15:07:31.870781 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_bn\nI0818 15:07:31.870793 21769 net.cpp:100] Creating Layer L1_b12_cbr1_bn\nI0818 15:07:31.870800 21769 net.cpp:434] L1_b12_cbr1_bn <- L1_b12_cbr1_conv_top\nI0818 15:07:31.870810 21769 net.cpp:408] L1_b12_cbr1_bn -> L1_b12_cbr1_bn_top\nI0818 15:07:31.871054 21769 net.cpp:150] Setting up L1_b12_cbr1_bn\nI0818 15:07:31.871068 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.871073 21769 net.cpp:165] Memory required for data: 846644400\nI0818 15:07:31.871083 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0818 15:07:31.871093 21769 net.cpp:100] Creating Layer L1_b12_cbr1_scale\nI0818 15:07:31.871098 21769 net.cpp:434] L1_b12_cbr1_scale <- L1_b12_cbr1_bn_top\nI0818 15:07:31.871106 21769 net.cpp:395] L1_b12_cbr1_scale -> L1_b12_cbr1_bn_top (in-place)\nI0818 15:07:31.871162 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0818 15:07:31.871302 21769 net.cpp:150] Setting up L1_b12_cbr1_scale\nI0818 15:07:31.871315 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.871320 21769 net.cpp:165] Memory required for data: 853198000\nI0818 15:07:31.871330 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_relu\nI0818 15:07:31.871337 21769 net.cpp:100] Creating Layer L1_b12_cbr1_relu\nI0818 15:07:31.871345 21769 net.cpp:434] L1_b12_cbr1_relu <- L1_b12_cbr1_bn_top\nI0818 15:07:31.871354 21769 net.cpp:395] L1_b12_cbr1_relu -> L1_b12_cbr1_bn_top (in-place)\nI0818 15:07:31.871363 21769 net.cpp:150] Setting up L1_b12_cbr1_relu\nI0818 15:07:31.871371 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.871376 21769 net.cpp:165] Memory required for data: 859751600\nI0818 15:07:31.871387 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr2_conv\nI0818 15:07:31.871402 21769 net.cpp:100] Creating Layer L1_b12_cbr2_conv\nI0818 15:07:31.871407 21769 net.cpp:434] L1_b12_cbr2_conv <- L1_b12_cbr1_bn_top\nI0818 15:07:31.871419 21769 net.cpp:408] L1_b12_cbr2_conv -> L1_b12_cbr2_conv_top\nI0818 15:07:31.871744 21769 net.cpp:150] Setting up L1_b12_cbr2_conv\nI0818 15:07:31.871758 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.871763 21769 net.cpp:165] Memory required for data: 866305200\nI0818 15:07:31.871773 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr2_bn\nI0818 15:07:31.871784 21769 net.cpp:100] Creating Layer L1_b12_cbr2_bn\nI0818 15:07:31.871791 21769 net.cpp:434] L1_b12_cbr2_bn <- L1_b12_cbr2_conv_top\nI0818 15:07:31.871800 21769 net.cpp:408] L1_b12_cbr2_bn -> L1_b12_cbr2_bn_top\nI0818 15:07:31.872048 21769 net.cpp:150] Setting up L1_b12_cbr2_bn\nI0818 15:07:31.872062 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.872067 21769 net.cpp:165] Memory required for data: 872858800\nI0818 15:07:31.872078 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0818 15:07:31.872087 21769 net.cpp:100] Creating Layer L1_b12_cbr2_scale\nI0818 15:07:31.872092 21769 net.cpp:434] L1_b12_cbr2_scale <- L1_b12_cbr2_bn_top\nI0818 15:07:31.872100 21769 net.cpp:395] L1_b12_cbr2_scale -> L1_b12_cbr2_bn_top (in-place)\nI0818 15:07:31.872155 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0818 15:07:31.872298 21769 net.cpp:150] Setting up L1_b12_cbr2_scale\nI0818 15:07:31.872311 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.872316 21769 net.cpp:165] Memory required for data: 879412400\nI0818 15:07:31.872324 21769 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise\nI0818 15:07:31.872334 21769 net.cpp:100] Creating Layer L1_b12_sum_eltwise\nI0818 15:07:31.872340 21769 net.cpp:434] L1_b12_sum_eltwise <- L1_b12_cbr2_bn_top\nI0818 15:07:31.872346 21769 net.cpp:434] L1_b12_sum_eltwise <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0818 15:07:31.872357 21769 net.cpp:408] L1_b12_sum_eltwise -> L1_b12_sum_eltwise_top\nI0818 15:07:31.872390 21769 net.cpp:150] Setting up L1_b12_sum_eltwise\nI0818 15:07:31.872398 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.872402 21769 net.cpp:165] Memory required for data: 885966000\nI0818 15:07:31.872408 21769 layer_factory.hpp:77] Creating layer L1_b12_relu\nI0818 15:07:31.872418 21769 net.cpp:100] Creating Layer L1_b12_relu\nI0818 15:07:31.872426 21769 net.cpp:434] L1_b12_relu <- L1_b12_sum_eltwise_top\nI0818 15:07:31.872432 21769 net.cpp:395] L1_b12_relu -> L1_b12_sum_eltwise_top (in-place)\nI0818 15:07:31.872442 21769 net.cpp:150] Setting up L1_b12_relu\nI0818 15:07:31.872448 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.872453 21769 net.cpp:165] Memory required for data: 892519600\nI0818 15:07:31.872457 21769 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0818 15:07:31.872464 21769 net.cpp:100] Creating Layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0818 15:07:31.872473 21769 net.cpp:434] L1_b12_sum_eltwise_top_L1_b12_relu_0_split <- L1_b12_sum_eltwise_top\nI0818 15:07:31.872484 21769 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0818 15:07:31.872501 21769 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0818 15:07:31.872570 21769 net.cpp:150] Setting up L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0818 15:07:31.872588 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.872601 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.872611 21769 net.cpp:165] Memory required for data: 905626800\nI0818 15:07:31.872619 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_conv\nI0818 15:07:31.872642 21769 net.cpp:100] Creating Layer L1_b13_cbr1_conv\nI0818 15:07:31.872653 21769 net.cpp:434] L1_b13_cbr1_conv <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0818 15:07:31.872676 21769 net.cpp:408] L1_b13_cbr1_conv -> L1_b13_cbr1_conv_top\nI0818 15:07:31.873008 21769 net.cpp:150] Setting up L1_b13_cbr1_conv\nI0818 15:07:31.873023 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.873028 21769 net.cpp:165] Memory required for data: 912180400\nI0818 15:07:31.873037 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_bn\nI0818 15:07:31.873062 21769 net.cpp:100] Creating Layer L1_b13_cbr1_bn\nI0818 15:07:31.873070 21769 net.cpp:434] L1_b13_cbr1_bn <- L1_b13_cbr1_conv_top\nI0818 15:07:31.873077 21769 net.cpp:408] L1_b13_cbr1_bn -> L1_b13_cbr1_bn_top\nI0818 15:07:31.873324 21769 net.cpp:150] Setting up L1_b13_cbr1_bn\nI0818 15:07:31.873337 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.873342 21769 net.cpp:165] Memory required for data: 918734000\nI0818 15:07:31.873353 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0818 15:07:31.873365 21769 net.cpp:100] Creating Layer L1_b13_cbr1_scale\nI0818 15:07:31.873373 21769 net.cpp:434] L1_b13_cbr1_scale <- L1_b13_cbr1_bn_top\nI0818 15:07:31.873379 21769 net.cpp:395] L1_b13_cbr1_scale -> L1_b13_cbr1_bn_top (in-place)\nI0818 15:07:31.873435 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0818 15:07:31.873580 21769 net.cpp:150] Setting up L1_b13_cbr1_scale\nI0818 15:07:31.873594 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.873598 21769 net.cpp:165] Memory required for data: 925287600\nI0818 15:07:31.873607 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_relu\nI0818 15:07:31.873620 21769 net.cpp:100] Creating Layer L1_b13_cbr1_relu\nI0818 15:07:31.873627 21769 net.cpp:434] L1_b13_cbr1_relu <- L1_b13_cbr1_bn_top\nI0818 15:07:31.873634 21769 net.cpp:395] L1_b13_cbr1_relu -> L1_b13_cbr1_bn_top (in-place)\nI0818 15:07:31.873644 21769 net.cpp:150] Setting up L1_b13_cbr1_relu\nI0818 15:07:31.873651 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.873656 21769 net.cpp:165] Memory required for data: 931841200\nI0818 15:07:31.873661 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr2_conv\nI0818 15:07:31.873674 21769 net.cpp:100] Creating Layer L1_b13_cbr2_conv\nI0818 15:07:31.873680 21769 net.cpp:434] L1_b13_cbr2_conv <- L1_b13_cbr1_bn_top\nI0818 15:07:31.873698 21769 net.cpp:408] L1_b13_cbr2_conv -> L1_b13_cbr2_conv_top\nI0818 15:07:31.874017 21769 net.cpp:150] Setting up L1_b13_cbr2_conv\nI0818 15:07:31.874032 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.874037 21769 net.cpp:165] Memory required for data: 938394800\nI0818 15:07:31.874045 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr2_bn\nI0818 15:07:31.874055 21769 net.cpp:100] Creating Layer L1_b13_cbr2_bn\nI0818 15:07:31.874061 21769 net.cpp:434] L1_b13_cbr2_bn <- L1_b13_cbr2_conv_top\nI0818 15:07:31.874073 21769 net.cpp:408] L1_b13_cbr2_bn -> L1_b13_cbr2_bn_top\nI0818 15:07:31.874318 21769 net.cpp:150] Setting up L1_b13_cbr2_bn\nI0818 15:07:31.874331 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.874336 21769 net.cpp:165] Memory required for data: 944948400\nI0818 15:07:31.874346 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0818 15:07:31.874359 21769 net.cpp:100] Creating Layer L1_b13_cbr2_scale\nI0818 15:07:31.874366 21769 net.cpp:434] L1_b13_cbr2_scale <- L1_b13_cbr2_bn_top\nI0818 15:07:31.874374 21769 net.cpp:395] L1_b13_cbr2_scale -> L1_b13_cbr2_bn_top (in-place)\nI0818 15:07:31.874428 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0818 15:07:31.874569 21769 net.cpp:150] Setting up L1_b13_cbr2_scale\nI0818 15:07:31.874583 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.874588 21769 net.cpp:165] Memory required for data: 951502000\nI0818 15:07:31.874596 21769 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise\nI0818 15:07:31.874606 21769 net.cpp:100] Creating Layer L1_b13_sum_eltwise\nI0818 15:07:31.874613 21769 net.cpp:434] L1_b13_sum_eltwise <- L1_b13_cbr2_bn_top\nI0818 15:07:31.874622 21769 net.cpp:434] L1_b13_sum_eltwise <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0818 15:07:31.874637 21769 net.cpp:408] L1_b13_sum_eltwise -> L1_b13_sum_eltwise_top\nI0818 15:07:31.874672 21769 net.cpp:150] Setting up L1_b13_sum_eltwise\nI0818 15:07:31.874688 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.874694 21769 net.cpp:165] Memory required for data: 958055600\nI0818 15:07:31.874699 21769 layer_factory.hpp:77] Creating layer L1_b13_relu\nI0818 15:07:31.874707 21769 net.cpp:100] Creating Layer L1_b13_relu\nI0818 15:07:31.874713 21769 net.cpp:434] L1_b13_relu <- L1_b13_sum_eltwise_top\nI0818 15:07:31.874723 21769 net.cpp:395] L1_b13_relu -> L1_b13_sum_eltwise_top (in-place)\nI0818 15:07:31.874733 21769 net.cpp:150] Setting up L1_b13_relu\nI0818 15:07:31.874740 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.874744 21769 net.cpp:165] Memory required for data: 964609200\nI0818 15:07:31.874749 21769 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0818 15:07:31.874756 21769 net.cpp:100] Creating Layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0818 15:07:31.874761 21769 net.cpp:434] L1_b13_sum_eltwise_top_L1_b13_relu_0_split <- L1_b13_sum_eltwise_top\nI0818 15:07:31.874769 21769 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0818 15:07:31.874778 21769 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0818 15:07:31.874825 21769 net.cpp:150] Setting up L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0818 15:07:31.874836 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.874843 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.874848 21769 net.cpp:165] Memory required for data: 977716400\nI0818 15:07:31.874853 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_conv\nI0818 15:07:31.874864 21769 net.cpp:100] Creating Layer L1_b14_cbr1_conv\nI0818 15:07:31.874871 21769 net.cpp:434] L1_b14_cbr1_conv <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0818 15:07:31.874882 21769 net.cpp:408] L1_b14_cbr1_conv -> L1_b14_cbr1_conv_top\nI0818 15:07:31.875200 21769 net.cpp:150] Setting up L1_b14_cbr1_conv\nI0818 15:07:31.875214 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.875219 21769 net.cpp:165] Memory required for data: 984270000\nI0818 15:07:31.875228 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_bn\nI0818 15:07:31.875237 21769 net.cpp:100] Creating Layer L1_b14_cbr1_bn\nI0818 15:07:31.875243 21769 net.cpp:434] L1_b14_cbr1_bn <- L1_b14_cbr1_conv_top\nI0818 15:07:31.875255 21769 net.cpp:408] L1_b14_cbr1_bn -> L1_b14_cbr1_bn_top\nI0818 15:07:31.875499 21769 net.cpp:150] Setting up L1_b14_cbr1_bn\nI0818 15:07:31.875511 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.875516 21769 net.cpp:165] Memory required for data: 990823600\nI0818 15:07:31.875526 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0818 15:07:31.875538 21769 net.cpp:100] Creating Layer L1_b14_cbr1_scale\nI0818 15:07:31.875545 21769 net.cpp:434] L1_b14_cbr1_scale <- L1_b14_cbr1_bn_top\nI0818 15:07:31.875552 21769 net.cpp:395] L1_b14_cbr1_scale -> L1_b14_cbr1_bn_top (in-place)\nI0818 15:07:31.875607 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0818 15:07:31.875759 21769 net.cpp:150] Setting up L1_b14_cbr1_scale\nI0818 15:07:31.875773 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.875778 21769 net.cpp:165] Memory required for data: 997377200\nI0818 15:07:31.875788 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_relu\nI0818 15:07:31.875799 21769 net.cpp:100] Creating Layer L1_b14_cbr1_relu\nI0818 15:07:31.875807 21769 net.cpp:434] L1_b14_cbr1_relu <- L1_b14_cbr1_bn_top\nI0818 15:07:31.875813 21769 net.cpp:395] L1_b14_cbr1_relu -> L1_b14_cbr1_bn_top (in-place)\nI0818 15:07:31.875823 21769 net.cpp:150] Setting up L1_b14_cbr1_relu\nI0818 15:07:31.875830 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.875834 21769 net.cpp:165] Memory required for data: 1003930800\nI0818 15:07:31.875839 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr2_conv\nI0818 15:07:31.875860 21769 net.cpp:100] Creating Layer L1_b14_cbr2_conv\nI0818 15:07:31.875866 21769 net.cpp:434] L1_b14_cbr2_conv <- L1_b14_cbr1_bn_top\nI0818 15:07:31.875877 21769 net.cpp:408] L1_b14_cbr2_conv -> L1_b14_cbr2_conv_top\nI0818 15:07:31.876196 21769 net.cpp:150] Setting up L1_b14_cbr2_conv\nI0818 15:07:31.876210 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.876215 21769 net.cpp:165] Memory required for data: 1010484400\nI0818 15:07:31.876224 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr2_bn\nI0818 15:07:31.876233 21769 net.cpp:100] Creating Layer L1_b14_cbr2_bn\nI0818 15:07:31.876240 21769 net.cpp:434] L1_b14_cbr2_bn <- L1_b14_cbr2_conv_top\nI0818 15:07:31.876248 21769 net.cpp:408] L1_b14_cbr2_bn -> L1_b14_cbr2_bn_top\nI0818 15:07:31.876497 21769 net.cpp:150] Setting up L1_b14_cbr2_bn\nI0818 15:07:31.876510 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.876515 21769 net.cpp:165] Memory required for data: 1017038000\nI0818 15:07:31.876525 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0818 15:07:31.876540 21769 net.cpp:100] Creating Layer L1_b14_cbr2_scale\nI0818 15:07:31.876547 21769 net.cpp:434] L1_b14_cbr2_scale <- L1_b14_cbr2_bn_top\nI0818 15:07:31.876555 21769 net.cpp:395] L1_b14_cbr2_scale -> L1_b14_cbr2_bn_top (in-place)\nI0818 15:07:31.876612 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0818 15:07:31.876763 21769 net.cpp:150] Setting up L1_b14_cbr2_scale\nI0818 15:07:31.876777 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.876782 21769 net.cpp:165] Memory required for data: 1023591600\nI0818 15:07:31.876791 21769 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise\nI0818 15:07:31.876801 21769 net.cpp:100] Creating Layer L1_b14_sum_eltwise\nI0818 15:07:31.876806 21769 net.cpp:434] L1_b14_sum_eltwise <- L1_b14_cbr2_bn_top\nI0818 15:07:31.876812 21769 net.cpp:434] L1_b14_sum_eltwise <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0818 15:07:31.876823 21769 net.cpp:408] L1_b14_sum_eltwise -> L1_b14_sum_eltwise_top\nI0818 15:07:31.876858 21769 net.cpp:150] Setting up L1_b14_sum_eltwise\nI0818 15:07:31.876868 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.876873 21769 net.cpp:165] Memory required for data: 1030145200\nI0818 15:07:31.876878 21769 layer_factory.hpp:77] Creating layer L1_b14_relu\nI0818 15:07:31.876885 21769 net.cpp:100] Creating Layer L1_b14_relu\nI0818 15:07:31.876890 21769 net.cpp:434] L1_b14_relu <- L1_b14_sum_eltwise_top\nI0818 15:07:31.876900 21769 net.cpp:395] L1_b14_relu -> L1_b14_sum_eltwise_top (in-place)\nI0818 15:07:31.876910 21769 net.cpp:150] Setting up L1_b14_relu\nI0818 15:07:31.876917 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.876922 21769 net.cpp:165] Memory required for data: 1036698800\nI0818 15:07:31.876926 21769 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0818 15:07:31.876935 21769 net.cpp:100] Creating Layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0818 15:07:31.876940 21769 net.cpp:434] L1_b14_sum_eltwise_top_L1_b14_relu_0_split <- L1_b14_sum_eltwise_top\nI0818 15:07:31.876946 21769 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0818 15:07:31.876955 21769 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0818 15:07:31.877002 21769 net.cpp:150] Setting up L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0818 15:07:31.877013 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.877020 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.877024 21769 net.cpp:165] Memory required for data: 1049806000\nI0818 15:07:31.877029 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_conv\nI0818 15:07:31.877040 21769 net.cpp:100] Creating Layer L1_b15_cbr1_conv\nI0818 15:07:31.877048 21769 net.cpp:434] L1_b15_cbr1_conv <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0818 15:07:31.877058 21769 net.cpp:408] L1_b15_cbr1_conv -> L1_b15_cbr1_conv_top\nI0818 15:07:31.877377 21769 net.cpp:150] Setting up L1_b15_cbr1_conv\nI0818 15:07:31.877398 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.877403 21769 net.cpp:165] Memory required for data: 1056359600\nI0818 15:07:31.877413 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_bn\nI0818 15:07:31.877421 21769 net.cpp:100] Creating Layer L1_b15_cbr1_bn\nI0818 15:07:31.877427 21769 net.cpp:434] L1_b15_cbr1_bn <- L1_b15_cbr1_conv_top\nI0818 15:07:31.877441 21769 net.cpp:408] L1_b15_cbr1_bn -> L1_b15_cbr1_bn_top\nI0818 15:07:31.877697 21769 net.cpp:150] Setting up L1_b15_cbr1_bn\nI0818 15:07:31.877712 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.877717 21769 net.cpp:165] Memory required for data: 1062913200\nI0818 15:07:31.877727 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0818 15:07:31.877739 21769 net.cpp:100] Creating Layer L1_b15_cbr1_scale\nI0818 15:07:31.877746 21769 net.cpp:434] L1_b15_cbr1_scale <- L1_b15_cbr1_bn_top\nI0818 15:07:31.877754 21769 net.cpp:395] L1_b15_cbr1_scale -> L1_b15_cbr1_bn_top (in-place)\nI0818 15:07:31.877809 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0818 15:07:31.877959 21769 net.cpp:150] Setting up L1_b15_cbr1_scale\nI0818 15:07:31.877971 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.877976 21769 net.cpp:165] Memory required for data: 1069466800\nI0818 15:07:31.877985 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_relu\nI0818 15:07:31.877993 21769 net.cpp:100] Creating Layer L1_b15_cbr1_relu\nI0818 15:07:31.878000 21769 net.cpp:434] L1_b15_cbr1_relu <- L1_b15_cbr1_bn_top\nI0818 15:07:31.878010 21769 net.cpp:395] L1_b15_cbr1_relu -> L1_b15_cbr1_bn_top (in-place)\nI0818 15:07:31.878021 21769 net.cpp:150] Setting up L1_b15_cbr1_relu\nI0818 15:07:31.878027 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.878032 21769 net.cpp:165] Memory required for data: 1076020400\nI0818 15:07:31.878037 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr2_conv\nI0818 15:07:31.878051 21769 net.cpp:100] Creating Layer L1_b15_cbr2_conv\nI0818 15:07:31.878057 21769 net.cpp:434] L1_b15_cbr2_conv <- L1_b15_cbr1_bn_top\nI0818 15:07:31.878065 21769 net.cpp:408] L1_b15_cbr2_conv -> L1_b15_cbr2_conv_top\nI0818 15:07:31.878378 21769 net.cpp:150] Setting up L1_b15_cbr2_conv\nI0818 15:07:31.878392 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.878397 21769 net.cpp:165] Memory required for data: 1082574000\nI0818 15:07:31.878407 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr2_bn\nI0818 15:07:31.878418 21769 net.cpp:100] Creating Layer L1_b15_cbr2_bn\nI0818 15:07:31.878425 21769 net.cpp:434] L1_b15_cbr2_bn <- L1_b15_cbr2_conv_top\nI0818 15:07:31.878433 21769 net.cpp:408] L1_b15_cbr2_bn -> L1_b15_cbr2_bn_top\nI0818 15:07:31.878680 21769 net.cpp:150] Setting up L1_b15_cbr2_bn\nI0818 15:07:31.878700 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.878705 21769 net.cpp:165] Memory required for data: 1089127600\nI0818 15:07:31.878715 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0818 15:07:31.878727 21769 net.cpp:100] Creating Layer L1_b15_cbr2_scale\nI0818 15:07:31.878733 21769 net.cpp:434] L1_b15_cbr2_scale <- L1_b15_cbr2_bn_top\nI0818 15:07:31.878741 21769 net.cpp:395] L1_b15_cbr2_scale -> L1_b15_cbr2_bn_top (in-place)\nI0818 15:07:31.878795 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0818 15:07:31.878942 21769 net.cpp:150] Setting up L1_b15_cbr2_scale\nI0818 15:07:31.878954 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.878959 21769 net.cpp:165] Memory required for data: 1095681200\nI0818 15:07:31.878968 21769 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise\nI0818 15:07:31.878978 21769 net.cpp:100] Creating Layer L1_b15_sum_eltwise\nI0818 15:07:31.878983 21769 net.cpp:434] L1_b15_sum_eltwise <- L1_b15_cbr2_bn_top\nI0818 15:07:31.878990 21769 net.cpp:434] L1_b15_sum_eltwise <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0818 15:07:31.879004 21769 net.cpp:408] L1_b15_sum_eltwise -> L1_b15_sum_eltwise_top\nI0818 15:07:31.879035 21769 net.cpp:150] Setting up L1_b15_sum_eltwise\nI0818 15:07:31.879055 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.879061 21769 net.cpp:165] Memory required for data: 1102234800\nI0818 15:07:31.879066 21769 layer_factory.hpp:77] Creating layer L1_b15_relu\nI0818 15:07:31.879075 21769 net.cpp:100] Creating Layer L1_b15_relu\nI0818 15:07:31.879079 21769 net.cpp:434] L1_b15_relu <- L1_b15_sum_eltwise_top\nI0818 15:07:31.879086 21769 net.cpp:395] L1_b15_relu -> L1_b15_sum_eltwise_top (in-place)\nI0818 15:07:31.879096 21769 net.cpp:150] Setting up L1_b15_relu\nI0818 15:07:31.879102 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.879107 21769 net.cpp:165] Memory required for data: 1108788400\nI0818 15:07:31.879112 21769 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0818 15:07:31.879122 21769 net.cpp:100] Creating Layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0818 15:07:31.879127 21769 net.cpp:434] L1_b15_sum_eltwise_top_L1_b15_relu_0_split <- L1_b15_sum_eltwise_top\nI0818 15:07:31.879135 21769 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0818 15:07:31.879145 21769 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0818 15:07:31.879192 21769 net.cpp:150] Setting up L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0818 15:07:31.879204 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.879210 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.879215 21769 net.cpp:165] Memory required for data: 1121895600\nI0818 15:07:31.879220 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_conv\nI0818 15:07:31.879232 21769 net.cpp:100] Creating Layer L1_b16_cbr1_conv\nI0818 15:07:31.879238 21769 net.cpp:434] L1_b16_cbr1_conv <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0818 15:07:31.879251 21769 net.cpp:408] L1_b16_cbr1_conv -> L1_b16_cbr1_conv_top\nI0818 15:07:31.879575 21769 net.cpp:150] Setting up L1_b16_cbr1_conv\nI0818 15:07:31.879588 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.879593 21769 net.cpp:165] Memory required for data: 1128449200\nI0818 15:07:31.879602 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_bn\nI0818 15:07:31.879611 21769 net.cpp:100] Creating Layer L1_b16_cbr1_bn\nI0818 15:07:31.879617 21769 net.cpp:434] L1_b16_cbr1_bn <- L1_b16_cbr1_conv_top\nI0818 15:07:31.879626 21769 net.cpp:408] L1_b16_cbr1_bn -> L1_b16_cbr1_bn_top\nI0818 15:07:31.879885 21769 net.cpp:150] Setting up L1_b16_cbr1_bn\nI0818 15:07:31.879899 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.879904 21769 net.cpp:165] Memory required for data: 1135002800\nI0818 15:07:31.879914 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0818 15:07:31.879926 21769 net.cpp:100] Creating Layer L1_b16_cbr1_scale\nI0818 15:07:31.879933 21769 net.cpp:434] L1_b16_cbr1_scale <- L1_b16_cbr1_bn_top\nI0818 15:07:31.879940 21769 net.cpp:395] L1_b16_cbr1_scale -> L1_b16_cbr1_bn_top (in-place)\nI0818 15:07:31.880028 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0818 15:07:31.880174 21769 net.cpp:150] Setting up L1_b16_cbr1_scale\nI0818 15:07:31.880188 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.880193 21769 net.cpp:165] Memory required for data: 1141556400\nI0818 15:07:31.880203 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_relu\nI0818 15:07:31.880210 21769 net.cpp:100] Creating Layer L1_b16_cbr1_relu\nI0818 15:07:31.880216 21769 net.cpp:434] L1_b16_cbr1_relu <- L1_b16_cbr1_bn_top\nI0818 15:07:31.880226 21769 net.cpp:395] L1_b16_cbr1_relu -> L1_b16_cbr1_bn_top (in-place)\nI0818 15:07:31.880236 21769 net.cpp:150] Setting up L1_b16_cbr1_relu\nI0818 15:07:31.880244 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.880249 21769 net.cpp:165] Memory required for data: 1148110000\nI0818 15:07:31.880259 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr2_conv\nI0818 15:07:31.880272 21769 net.cpp:100] Creating Layer L1_b16_cbr2_conv\nI0818 15:07:31.880278 21769 net.cpp:434] L1_b16_cbr2_conv <- L1_b16_cbr1_bn_top\nI0818 15:07:31.880295 21769 net.cpp:408] L1_b16_cbr2_conv -> L1_b16_cbr2_conv_top\nI0818 15:07:31.880622 21769 net.cpp:150] Setting up L1_b16_cbr2_conv\nI0818 15:07:31.880640 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.880650 21769 net.cpp:165] Memory required for data: 1154663600\nI0818 15:07:31.880664 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr2_bn\nI0818 15:07:31.880692 21769 net.cpp:100] Creating Layer L1_b16_cbr2_bn\nI0818 15:07:31.880704 21769 net.cpp:434] L1_b16_cbr2_bn <- L1_b16_cbr2_conv_top\nI0818 15:07:31.880723 21769 net.cpp:408] L1_b16_cbr2_bn -> L1_b16_cbr2_bn_top\nI0818 15:07:31.881009 21769 net.cpp:150] Setting up L1_b16_cbr2_bn\nI0818 15:07:31.881024 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.881029 21769 net.cpp:165] Memory required for data: 1161217200\nI0818 15:07:31.881041 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0818 15:07:31.881049 21769 net.cpp:100] Creating Layer L1_b16_cbr2_scale\nI0818 15:07:31.881055 21769 net.cpp:434] L1_b16_cbr2_scale <- L1_b16_cbr2_bn_top\nI0818 15:07:31.881067 21769 net.cpp:395] L1_b16_cbr2_scale -> L1_b16_cbr2_bn_top (in-place)\nI0818 15:07:31.881124 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0818 15:07:31.881274 21769 net.cpp:150] Setting up L1_b16_cbr2_scale\nI0818 15:07:31.881289 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.881294 21769 net.cpp:165] Memory required for data: 1167770800\nI0818 15:07:31.881302 21769 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise\nI0818 15:07:31.881311 21769 net.cpp:100] Creating Layer L1_b16_sum_eltwise\nI0818 15:07:31.881317 21769 net.cpp:434] L1_b16_sum_eltwise <- L1_b16_cbr2_bn_top\nI0818 15:07:31.881325 21769 net.cpp:434] L1_b16_sum_eltwise <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0818 15:07:31.881335 21769 net.cpp:408] L1_b16_sum_eltwise -> L1_b16_sum_eltwise_top\nI0818 15:07:31.881368 21769 net.cpp:150] Setting up L1_b16_sum_eltwise\nI0818 15:07:31.881381 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.881386 21769 net.cpp:165] Memory required for data: 1174324400\nI0818 15:07:31.881392 21769 layer_factory.hpp:77] Creating layer L1_b16_relu\nI0818 15:07:31.881398 21769 net.cpp:100] Creating Layer L1_b16_relu\nI0818 15:07:31.881404 21769 net.cpp:434] L1_b16_relu <- L1_b16_sum_eltwise_top\nI0818 15:07:31.881412 21769 net.cpp:395] L1_b16_relu -> L1_b16_sum_eltwise_top (in-place)\nI0818 15:07:31.881420 21769 net.cpp:150] Setting up L1_b16_relu\nI0818 15:07:31.881428 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.881431 21769 net.cpp:165] Memory required for data: 1180878000\nI0818 15:07:31.881436 21769 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0818 15:07:31.881448 21769 net.cpp:100] Creating Layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0818 15:07:31.881453 21769 net.cpp:434] L1_b16_sum_eltwise_top_L1_b16_relu_0_split <- L1_b16_sum_eltwise_top\nI0818 15:07:31.881460 21769 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0818 15:07:31.881470 21769 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0818 15:07:31.881513 21769 net.cpp:150] Setting up L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0818 15:07:31.881527 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.881536 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.881539 21769 net.cpp:165] Memory required for data: 1193985200\nI0818 15:07:31.881546 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_conv\nI0818 15:07:31.881556 21769 net.cpp:100] Creating Layer L1_b17_cbr1_conv\nI0818 15:07:31.881562 21769 net.cpp:434] L1_b17_cbr1_conv <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0818 15:07:31.881572 21769 net.cpp:408] L1_b17_cbr1_conv -> L1_b17_cbr1_conv_top\nI0818 15:07:31.881904 21769 net.cpp:150] Setting up L1_b17_cbr1_conv\nI0818 15:07:31.881919 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.881924 21769 net.cpp:165] Memory required for data: 1200538800\nI0818 15:07:31.881942 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_bn\nI0818 15:07:31.881954 21769 net.cpp:100] Creating Layer L1_b17_cbr1_bn\nI0818 15:07:31.881961 21769 net.cpp:434] L1_b17_cbr1_bn <- L1_b17_cbr1_conv_top\nI0818 15:07:31.881970 21769 net.cpp:408] L1_b17_cbr1_bn -> L1_b17_cbr1_bn_top\nI0818 15:07:31.882223 21769 net.cpp:150] Setting up L1_b17_cbr1_bn\nI0818 15:07:31.882237 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.882242 21769 net.cpp:165] Memory required for data: 1207092400\nI0818 15:07:31.882252 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0818 15:07:31.882266 21769 net.cpp:100] Creating Layer L1_b17_cbr1_scale\nI0818 15:07:31.882272 21769 net.cpp:434] L1_b17_cbr1_scale <- L1_b17_cbr1_bn_top\nI0818 15:07:31.882280 21769 net.cpp:395] L1_b17_cbr1_scale -> L1_b17_cbr1_bn_top (in-place)\nI0818 15:07:31.882339 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0818 15:07:31.882484 21769 net.cpp:150] Setting up L1_b17_cbr1_scale\nI0818 15:07:31.882498 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.882503 21769 net.cpp:165] Memory required for data: 1213646000\nI0818 15:07:31.882511 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_relu\nI0818 15:07:31.882519 21769 net.cpp:100] Creating Layer L1_b17_cbr1_relu\nI0818 15:07:31.882525 21769 net.cpp:434] L1_b17_cbr1_relu <- L1_b17_cbr1_bn_top\nI0818 15:07:31.882535 21769 net.cpp:395] L1_b17_cbr1_relu -> L1_b17_cbr1_bn_top (in-place)\nI0818 15:07:31.882545 21769 net.cpp:150] Setting up L1_b17_cbr1_relu\nI0818 15:07:31.882552 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.882557 21769 net.cpp:165] Memory required for data: 1220199600\nI0818 15:07:31.882562 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr2_conv\nI0818 15:07:31.882580 21769 net.cpp:100] Creating Layer L1_b17_cbr2_conv\nI0818 15:07:31.882586 21769 net.cpp:434] L1_b17_cbr2_conv <- L1_b17_cbr1_bn_top\nI0818 15:07:31.882594 21769 net.cpp:408] L1_b17_cbr2_conv -> L1_b17_cbr2_conv_top\nI0818 15:07:31.882932 21769 net.cpp:150] Setting up L1_b17_cbr2_conv\nI0818 15:07:31.882947 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.882952 21769 net.cpp:165] Memory required for data: 1226753200\nI0818 15:07:31.882962 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr2_bn\nI0818 15:07:31.882975 21769 net.cpp:100] Creating Layer L1_b17_cbr2_bn\nI0818 15:07:31.882982 21769 net.cpp:434] L1_b17_cbr2_bn <- L1_b17_cbr2_conv_top\nI0818 15:07:31.882990 21769 net.cpp:408] L1_b17_cbr2_bn -> L1_b17_cbr2_bn_top\nI0818 15:07:31.883244 21769 net.cpp:150] Setting up L1_b17_cbr2_bn\nI0818 15:07:31.883256 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.883261 21769 net.cpp:165] Memory required for data: 1233306800\nI0818 15:07:31.883272 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0818 15:07:31.883281 21769 net.cpp:100] Creating Layer L1_b17_cbr2_scale\nI0818 15:07:31.883287 21769 net.cpp:434] L1_b17_cbr2_scale <- L1_b17_cbr2_bn_top\nI0818 15:07:31.883298 21769 net.cpp:395] L1_b17_cbr2_scale -> L1_b17_cbr2_bn_top (in-place)\nI0818 15:07:31.883354 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0818 15:07:31.883498 21769 net.cpp:150] Setting up L1_b17_cbr2_scale\nI0818 15:07:31.883514 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.883520 21769 net.cpp:165] Memory required for data: 1239860400\nI0818 15:07:31.883528 21769 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise\nI0818 15:07:31.883538 21769 net.cpp:100] Creating Layer L1_b17_sum_eltwise\nI0818 15:07:31.883544 21769 net.cpp:434] L1_b17_sum_eltwise <- L1_b17_cbr2_bn_top\nI0818 15:07:31.883551 21769 net.cpp:434] L1_b17_sum_eltwise <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0818 15:07:31.883558 21769 net.cpp:408] L1_b17_sum_eltwise -> L1_b17_sum_eltwise_top\nI0818 15:07:31.883594 21769 net.cpp:150] Setting up L1_b17_sum_eltwise\nI0818 15:07:31.883604 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.883608 21769 net.cpp:165] Memory required for data: 1246414000\nI0818 15:07:31.883620 21769 layer_factory.hpp:77] Creating layer L1_b17_relu\nI0818 15:07:31.883631 21769 net.cpp:100] Creating Layer L1_b17_relu\nI0818 15:07:31.883638 21769 net.cpp:434] L1_b17_relu <- L1_b17_sum_eltwise_top\nI0818 15:07:31.883646 21769 net.cpp:395] L1_b17_relu -> L1_b17_sum_eltwise_top (in-place)\nI0818 15:07:31.883656 21769 net.cpp:150] Setting up L1_b17_relu\nI0818 15:07:31.883662 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.883666 21769 net.cpp:165] Memory required for data: 1252967600\nI0818 15:07:31.883671 21769 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0818 15:07:31.883682 21769 net.cpp:100] Creating Layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0818 15:07:31.883694 21769 net.cpp:434] L1_b17_sum_eltwise_top_L1_b17_relu_0_split <- L1_b17_sum_eltwise_top\nI0818 15:07:31.883702 21769 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0818 15:07:31.883713 21769 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0818 15:07:31.883759 21769 net.cpp:150] Setting up L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0818 15:07:31.883774 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.883780 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.883785 21769 net.cpp:165] Memory required for data: 1266074800\nI0818 15:07:31.883790 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_conv\nI0818 15:07:31.883802 21769 net.cpp:100] Creating Layer L1_b18_cbr1_conv\nI0818 15:07:31.883808 21769 net.cpp:434] L1_b18_cbr1_conv <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0818 15:07:31.883817 21769 net.cpp:408] L1_b18_cbr1_conv -> L1_b18_cbr1_conv_top\nI0818 15:07:31.884140 21769 net.cpp:150] Setting up L1_b18_cbr1_conv\nI0818 15:07:31.884155 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.884160 21769 net.cpp:165] Memory required for data: 1272628400\nI0818 15:07:31.884169 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_bn\nI0818 15:07:31.884181 21769 net.cpp:100] Creating Layer L1_b18_cbr1_bn\nI0818 15:07:31.884188 21769 net.cpp:434] L1_b18_cbr1_bn <- L1_b18_cbr1_conv_top\nI0818 15:07:31.884196 21769 net.cpp:408] L1_b18_cbr1_bn -> L1_b18_cbr1_bn_top\nI0818 15:07:31.884464 21769 net.cpp:150] Setting up L1_b18_cbr1_bn\nI0818 15:07:31.884481 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.884487 21769 net.cpp:165] Memory required for data: 1279182000\nI0818 15:07:31.884497 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0818 15:07:31.884506 21769 net.cpp:100] Creating Layer L1_b18_cbr1_scale\nI0818 15:07:31.884513 21769 net.cpp:434] L1_b18_cbr1_scale <- L1_b18_cbr1_bn_top\nI0818 15:07:31.884523 21769 net.cpp:395] L1_b18_cbr1_scale -> L1_b18_cbr1_bn_top (in-place)\nI0818 15:07:31.884579 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0818 15:07:31.884732 21769 net.cpp:150] Setting up L1_b18_cbr1_scale\nI0818 15:07:31.884749 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.884754 21769 net.cpp:165] Memory required for data: 1285735600\nI0818 15:07:31.884763 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_relu\nI0818 15:07:31.884771 21769 net.cpp:100] Creating Layer L1_b18_cbr1_relu\nI0818 15:07:31.884778 21769 net.cpp:434] L1_b18_cbr1_relu <- L1_b18_cbr1_bn_top\nI0818 15:07:31.884785 21769 net.cpp:395] L1_b18_cbr1_relu -> L1_b18_cbr1_bn_top (in-place)\nI0818 15:07:31.884794 21769 net.cpp:150] Setting up L1_b18_cbr1_relu\nI0818 15:07:31.884802 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.884806 21769 net.cpp:165] Memory required for data: 1292289200\nI0818 15:07:31.884811 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr2_conv\nI0818 15:07:31.884824 21769 net.cpp:100] Creating Layer L1_b18_cbr2_conv\nI0818 15:07:31.884831 21769 net.cpp:434] L1_b18_cbr2_conv <- L1_b18_cbr1_bn_top\nI0818 15:07:31.884842 21769 net.cpp:408] L1_b18_cbr2_conv -> L1_b18_cbr2_conv_top\nI0818 15:07:31.885169 21769 net.cpp:150] Setting up L1_b18_cbr2_conv\nI0818 15:07:31.885190 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.885195 21769 net.cpp:165] Memory required for data: 1298842800\nI0818 15:07:31.885205 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr2_bn\nI0818 15:07:31.885216 21769 net.cpp:100] Creating Layer L1_b18_cbr2_bn\nI0818 15:07:31.885223 21769 net.cpp:434] L1_b18_cbr2_bn <- L1_b18_cbr2_conv_top\nI0818 15:07:31.885234 21769 net.cpp:408] L1_b18_cbr2_bn -> L1_b18_cbr2_bn_top\nI0818 15:07:31.885483 21769 net.cpp:150] Setting up L1_b18_cbr2_bn\nI0818 15:07:31.885495 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.885500 21769 net.cpp:165] Memory required for data: 1305396400\nI0818 15:07:31.885550 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0818 15:07:31.885561 21769 net.cpp:100] Creating Layer L1_b18_cbr2_scale\nI0818 15:07:31.885568 21769 net.cpp:434] L1_b18_cbr2_scale <- L1_b18_cbr2_bn_top\nI0818 15:07:31.885576 21769 net.cpp:395] L1_b18_cbr2_scale -> L1_b18_cbr2_bn_top (in-place)\nI0818 15:07:31.885634 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0818 15:07:31.885785 21769 net.cpp:150] Setting up L1_b18_cbr2_scale\nI0818 15:07:31.885799 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.885804 21769 net.cpp:165] Memory required for data: 1311950000\nI0818 15:07:31.885813 21769 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise\nI0818 15:07:31.885826 21769 net.cpp:100] Creating Layer L1_b18_sum_eltwise\nI0818 15:07:31.885833 21769 net.cpp:434] L1_b18_sum_eltwise <- L1_b18_cbr2_bn_top\nI0818 15:07:31.885840 21769 net.cpp:434] L1_b18_sum_eltwise <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0818 15:07:31.885848 21769 net.cpp:408] L1_b18_sum_eltwise -> L1_b18_sum_eltwise_top\nI0818 15:07:31.885884 21769 net.cpp:150] Setting up L1_b18_sum_eltwise\nI0818 15:07:31.885893 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.885898 21769 net.cpp:165] Memory required for data: 1318503600\nI0818 15:07:31.885903 21769 layer_factory.hpp:77] Creating layer L1_b18_relu\nI0818 15:07:31.885910 21769 net.cpp:100] Creating Layer L1_b18_relu\nI0818 15:07:31.885916 21769 net.cpp:434] L1_b18_relu <- L1_b18_sum_eltwise_top\nI0818 15:07:31.885926 21769 net.cpp:395] L1_b18_relu -> L1_b18_sum_eltwise_top (in-place)\nI0818 15:07:31.885936 21769 net.cpp:150] Setting up L1_b18_relu\nI0818 15:07:31.885943 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.885948 21769 net.cpp:165] Memory required for data: 1325057200\nI0818 15:07:31.885952 21769 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0818 15:07:31.885959 21769 net.cpp:100] Creating Layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0818 15:07:31.885965 21769 net.cpp:434] L1_b18_sum_eltwise_top_L1_b18_relu_0_split <- L1_b18_sum_eltwise_top\nI0818 15:07:31.885975 21769 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0818 15:07:31.885985 21769 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0818 15:07:31.886032 21769 net.cpp:150] Setting up L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0818 15:07:31.886044 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.886050 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:31.886055 21769 net.cpp:165] Memory required for data: 1338164400\nI0818 15:07:31.886060 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0818 15:07:31.886071 21769 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0818 15:07:31.886077 21769 net.cpp:434] L2_b1_cbr1_conv <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0818 15:07:31.886090 21769 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0818 15:07:31.886416 21769 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0818 15:07:31.886431 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.886436 21769 net.cpp:165] Memory required for data: 1339802800\nI0818 15:07:31.886445 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0818 15:07:31.886457 21769 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0818 15:07:31.886471 21769 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0818 15:07:31.886479 21769 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0818 15:07:31.886734 21769 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0818 15:07:31.886749 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.886754 21769 net.cpp:165] Memory required for data: 1341441200\nI0818 15:07:31.886765 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 15:07:31.886776 21769 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0818 15:07:31.886783 21769 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0818 15:07:31.886791 21769 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0818 15:07:31.886847 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 15:07:31.887001 21769 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0818 15:07:31.887014 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.887019 21769 net.cpp:165] Memory required for data: 1343079600\nI0818 15:07:31.887028 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0818 15:07:31.887039 21769 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0818 15:07:31.887045 21769 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0818 15:07:31.887055 21769 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0818 15:07:31.887065 21769 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0818 15:07:31.887073 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.887078 21769 net.cpp:165] Memory required for data: 1344718000\nI0818 15:07:31.887082 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0818 15:07:31.887094 21769 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0818 15:07:31.887099 21769 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0818 15:07:31.887109 21769 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0818 15:07:31.887431 21769 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0818 15:07:31.887446 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.887451 21769 net.cpp:165] Memory required for data: 1346356400\nI0818 15:07:31.887459 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0818 15:07:31.887470 21769 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0818 15:07:31.887475 21769 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0818 15:07:31.887486 21769 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0818 15:07:31.887740 21769 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0818 15:07:31.887754 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.887759 21769 net.cpp:165] Memory required for data: 1347994800\nI0818 15:07:31.887769 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 15:07:31.887781 21769 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0818 15:07:31.887789 21769 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0818 15:07:31.887796 21769 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0818 15:07:31.887851 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 15:07:31.888000 21769 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0818 15:07:31.888013 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.888018 21769 net.cpp:165] Memory required for data: 1349633200\nI0818 15:07:31.888027 21769 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0818 15:07:31.888041 21769 net.cpp:100] Creating Layer L2_b1_pool\nI0818 15:07:31.888048 21769 net.cpp:434] L2_b1_pool <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0818 15:07:31.888056 21769 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0818 15:07:31.888150 21769 net.cpp:150] Setting up L2_b1_pool\nI0818 15:07:31.888166 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.888171 21769 net.cpp:165] Memory required for data: 1351271600\nI0818 15:07:31.888177 21769 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0818 15:07:31.888186 21769 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0818 15:07:31.888192 21769 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0818 15:07:31.888200 21769 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0818 15:07:31.888218 21769 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0818 15:07:31.888254 21769 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0818 15:07:31.888263 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.888268 21769 net.cpp:165] Memory required for data: 1352910000\nI0818 15:07:31.888273 21769 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0818 15:07:31.888281 21769 net.cpp:100] Creating Layer L2_b1_relu\nI0818 15:07:31.888288 21769 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0818 15:07:31.888294 21769 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0818 15:07:31.888303 21769 net.cpp:150] Setting up L2_b1_relu\nI0818 15:07:31.888310 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.888314 21769 net.cpp:165] Memory required for data: 1354548400\nI0818 15:07:31.888319 21769 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0818 15:07:31.888375 21769 net.cpp:100] Creating Layer L2_b1_zeros\nI0818 15:07:31.888391 21769 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0818 15:07:31.890424 21769 net.cpp:150] Setting up L2_b1_zeros\nI0818 15:07:31.890444 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:31.890450 21769 net.cpp:165] Memory required for data: 1356186800\nI0818 15:07:31.890455 21769 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0818 15:07:31.890465 21769 net.cpp:100] Creating Layer L2_b1_concat0\nI0818 15:07:31.890472 21769 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0818 15:07:31.890480 21769 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0818 15:07:31.890491 21769 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0818 15:07:31.890573 21769 net.cpp:150] Setting up L2_b1_concat0\nI0818 15:07:31.890592 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.890599 21769 net.cpp:165] Memory required for data: 1359463600\nI0818 15:07:31.890604 21769 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:31.890612 21769 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:31.890619 21769 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0818 15:07:31.890626 21769 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 15:07:31.890636 21769 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 15:07:31.890694 21769 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:31.890707 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.890714 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.890719 21769 net.cpp:165] Memory required for data: 1366017200\nI0818 15:07:31.890724 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0818 15:07:31.890739 21769 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0818 15:07:31.890746 21769 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 15:07:31.890755 21769 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0818 15:07:31.892228 21769 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0818 15:07:31.892246 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.892251 21769 net.cpp:165] Memory required for data: 1369294000\nI0818 15:07:31.892261 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0818 15:07:31.892274 21769 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0818 15:07:31.892282 21769 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0818 15:07:31.892290 21769 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0818 15:07:31.892544 21769 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0818 15:07:31.892560 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.892565 21769 net.cpp:165] Memory required for data: 1372570800\nI0818 15:07:31.892575 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 15:07:31.892585 21769 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0818 15:07:31.892591 21769 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0818 15:07:31.892599 21769 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0818 15:07:31.892655 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 15:07:31.892825 21769 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0818 15:07:31.892840 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.892845 21769 net.cpp:165] Memory required for data: 1375847600\nI0818 15:07:31.892854 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0818 15:07:31.892863 21769 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0818 15:07:31.892869 21769 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0818 15:07:31.892879 21769 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0818 15:07:31.892910 21769 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0818 15:07:31.892917 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.892922 21769 net.cpp:165] Memory required for data: 1379124400\nI0818 15:07:31.892927 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0818 15:07:31.892940 21769 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0818 15:07:31.892946 21769 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0818 15:07:31.892956 21769 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0818 15:07:31.893419 21769 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0818 15:07:31.893434 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.893438 21769 net.cpp:165] Memory required for data: 1382401200\nI0818 15:07:31.893447 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0818 15:07:31.893457 21769 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0818 15:07:31.893463 21769 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0818 15:07:31.893477 21769 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0818 15:07:31.893733 21769 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0818 15:07:31.893748 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.893752 21769 net.cpp:165] Memory required for data: 1385678000\nI0818 15:07:31.893764 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 15:07:31.893775 21769 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0818 15:07:31.893781 21769 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0818 15:07:31.893790 21769 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0818 15:07:31.893846 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 15:07:31.893996 21769 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0818 15:07:31.894008 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.894013 21769 net.cpp:165] Memory required for data: 1388954800\nI0818 15:07:31.894022 21769 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0818 15:07:31.894034 21769 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0818 15:07:31.894042 21769 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0818 15:07:31.894048 21769 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 15:07:31.894057 21769 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0818 15:07:31.894086 21769 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0818 15:07:31.894096 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.894100 21769 net.cpp:165] Memory required for data: 1392231600\nI0818 15:07:31.894105 21769 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0818 15:07:31.894114 21769 net.cpp:100] Creating Layer L2_b2_relu\nI0818 15:07:31.894119 21769 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0818 15:07:31.894129 21769 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0818 15:07:31.894138 21769 net.cpp:150] Setting up L2_b2_relu\nI0818 15:07:31.894145 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.894150 21769 net.cpp:165] Memory required for data: 1395508400\nI0818 15:07:31.894155 21769 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:31.894161 21769 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:31.894167 21769 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0818 15:07:31.894174 21769 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 15:07:31.894191 21769 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 15:07:31.894243 21769 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:31.894253 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.894259 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.894264 21769 net.cpp:165] Memory required for data: 1402062000\nI0818 15:07:31.894269 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0818 15:07:31.894280 21769 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0818 15:07:31.894286 21769 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 15:07:31.894299 21769 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0818 15:07:31.894763 21769 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0818 15:07:31.894778 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.894783 21769 net.cpp:165] Memory required for data: 1405338800\nI0818 15:07:31.894793 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0818 15:07:31.894803 21769 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0818 15:07:31.894809 21769 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0818 15:07:31.894819 21769 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0818 15:07:31.895069 21769 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0818 15:07:31.895083 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.895088 21769 net.cpp:165] Memory required for data: 1408615600\nI0818 15:07:31.895098 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 15:07:31.895110 21769 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0818 15:07:31.895117 21769 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0818 15:07:31.895124 21769 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0818 15:07:31.895181 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 15:07:31.895330 21769 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0818 15:07:31.895344 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.895349 21769 net.cpp:165] Memory required for data: 1411892400\nI0818 15:07:31.895359 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0818 15:07:31.895370 21769 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0818 15:07:31.895376 21769 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0818 15:07:31.895383 21769 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0818 15:07:31.895396 21769 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0818 15:07:31.895403 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.895408 21769 net.cpp:165] Memory required for data: 1415169200\nI0818 15:07:31.895413 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0818 15:07:31.895424 21769 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0818 15:07:31.895429 21769 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0818 15:07:31.895442 21769 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0818 15:07:31.895911 21769 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0818 15:07:31.895926 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.895931 21769 net.cpp:165] Memory required for data: 1418446000\nI0818 15:07:31.895941 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0818 15:07:31.895951 21769 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0818 15:07:31.895956 21769 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0818 15:07:31.895967 21769 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0818 15:07:31.896220 21769 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0818 15:07:31.896234 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.896239 21769 net.cpp:165] Memory required for data: 1421722800\nI0818 15:07:31.896248 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 15:07:31.896260 21769 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0818 15:07:31.896267 21769 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0818 15:07:31.896275 21769 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0818 15:07:31.896332 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 15:07:31.896488 21769 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0818 15:07:31.896502 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.896507 21769 net.cpp:165] Memory required for data: 1424999600\nI0818 15:07:31.896515 21769 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0818 15:07:31.896528 21769 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0818 15:07:31.896534 21769 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0818 15:07:31.896541 21769 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 15:07:31.896549 21769 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0818 15:07:31.896585 21769 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0818 15:07:31.896595 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.896600 21769 net.cpp:165] Memory required for data: 1428276400\nI0818 15:07:31.896605 21769 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0818 15:07:31.896612 21769 net.cpp:100] Creating Layer L2_b3_relu\nI0818 15:07:31.896618 21769 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0818 15:07:31.896628 21769 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0818 15:07:31.896638 21769 net.cpp:150] Setting up L2_b3_relu\nI0818 15:07:31.896646 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.896651 21769 net.cpp:165] Memory required for data: 1431553200\nI0818 15:07:31.896654 21769 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:31.896662 21769 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:31.896667 21769 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0818 15:07:31.896675 21769 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 15:07:31.896690 21769 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 15:07:31.896741 21769 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:31.896754 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.896760 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.896765 21769 net.cpp:165] Memory required for data: 1438106800\nI0818 15:07:31.896770 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0818 15:07:31.896781 21769 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0818 15:07:31.896788 21769 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 15:07:31.896800 21769 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0818 15:07:31.897270 21769 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0818 15:07:31.897284 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.897289 21769 net.cpp:165] Memory required for data: 1441383600\nI0818 15:07:31.897298 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0818 15:07:31.897307 21769 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0818 15:07:31.897315 21769 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0818 15:07:31.897327 21769 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0818 15:07:31.897583 21769 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0818 15:07:31.897596 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.897601 21769 net.cpp:165] Memory required for data: 1444660400\nI0818 15:07:31.897611 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 15:07:31.897624 21769 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0818 15:07:31.897630 21769 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0818 15:07:31.897639 21769 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0818 15:07:31.897704 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 15:07:31.897855 21769 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0818 15:07:31.897868 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.897873 21769 net.cpp:165] Memory required for data: 1447937200\nI0818 15:07:31.897882 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0818 15:07:31.897899 21769 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0818 15:07:31.897907 21769 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0818 15:07:31.897914 21769 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0818 15:07:31.897923 21769 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0818 15:07:31.897931 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.897935 21769 net.cpp:165] Memory required for data: 1451214000\nI0818 15:07:31.897940 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0818 15:07:31.897954 21769 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0818 15:07:31.897960 21769 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0818 15:07:31.897971 21769 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0818 15:07:31.898434 21769 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0818 15:07:31.898449 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.898454 21769 net.cpp:165] Memory required for data: 1454490800\nI0818 15:07:31.898463 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0818 15:07:31.898473 21769 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0818 15:07:31.898478 21769 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0818 15:07:31.898491 21769 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0818 15:07:31.898756 21769 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0818 15:07:31.898771 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.898775 21769 net.cpp:165] Memory required for data: 1457767600\nI0818 15:07:31.898785 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 15:07:31.898797 21769 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0818 15:07:31.898804 21769 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0818 15:07:31.898813 21769 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0818 15:07:31.898869 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 15:07:31.899020 21769 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0818 15:07:31.899034 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.899039 21769 net.cpp:165] Memory required for data: 1461044400\nI0818 15:07:31.899049 21769 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0818 15:07:31.899062 21769 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0818 15:07:31.899070 21769 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0818 15:07:31.899077 21769 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 15:07:31.899085 21769 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0818 15:07:31.899111 21769 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0818 15:07:31.899124 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.899129 21769 net.cpp:165] Memory required for data: 1464321200\nI0818 15:07:31.899134 21769 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0818 15:07:31.899142 21769 net.cpp:100] Creating Layer L2_b4_relu\nI0818 15:07:31.899148 21769 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0818 15:07:31.899155 21769 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0818 15:07:31.899164 21769 net.cpp:150] Setting up L2_b4_relu\nI0818 15:07:31.899171 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.899175 21769 net.cpp:165] Memory required for data: 1467598000\nI0818 15:07:31.899180 21769 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:31.899190 21769 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:31.899196 21769 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0818 15:07:31.899204 21769 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 15:07:31.899214 21769 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 15:07:31.899260 21769 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:31.899272 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.899279 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.899291 21769 net.cpp:165] Memory required for data: 1474151600\nI0818 15:07:31.899296 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0818 15:07:31.899307 21769 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0818 15:07:31.899314 21769 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 15:07:31.899325 21769 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0818 15:07:31.899806 21769 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0818 15:07:31.899821 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.899827 21769 net.cpp:165] Memory required for data: 1477428400\nI0818 15:07:31.899835 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0818 15:07:31.899844 21769 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0818 15:07:31.899850 21769 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0818 15:07:31.899862 21769 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0818 15:07:31.900116 21769 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0818 15:07:31.900130 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.900135 21769 net.cpp:165] Memory required for data: 1480705200\nI0818 15:07:31.900146 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 15:07:31.900157 21769 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0818 15:07:31.900163 21769 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0818 15:07:31.900171 21769 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0818 15:07:31.900230 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 15:07:31.900380 21769 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0818 15:07:31.900393 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.900398 21769 net.cpp:165] Memory required for data: 1483982000\nI0818 15:07:31.900408 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0818 15:07:31.900418 21769 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0818 15:07:31.900425 21769 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0818 15:07:31.900432 21769 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0818 15:07:31.900441 21769 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0818 15:07:31.900449 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.900454 21769 net.cpp:165] Memory required for data: 1487258800\nI0818 15:07:31.900459 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0818 15:07:31.900472 21769 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0818 15:07:31.900478 21769 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0818 15:07:31.900490 21769 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0818 15:07:31.900960 21769 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0818 15:07:31.900975 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.900980 21769 net.cpp:165] Memory required for data: 1490535600\nI0818 15:07:31.900990 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0818 15:07:31.900998 21769 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0818 15:07:31.901005 21769 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0818 15:07:31.901012 21769 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0818 15:07:31.901265 21769 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0818 15:07:31.901278 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.901283 21769 net.cpp:165] Memory required for data: 1493812400\nI0818 15:07:31.901294 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 15:07:31.901302 21769 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0818 15:07:31.901309 21769 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0818 15:07:31.901319 21769 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0818 15:07:31.901377 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 15:07:31.901528 21769 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0818 15:07:31.901541 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.901546 21769 net.cpp:165] Memory required for data: 1497089200\nI0818 15:07:31.901556 21769 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0818 15:07:31.901571 21769 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0818 15:07:31.901578 21769 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0818 15:07:31.901585 21769 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 15:07:31.901595 21769 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0818 15:07:31.901623 21769 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0818 15:07:31.901638 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.901644 21769 net.cpp:165] Memory required for data: 1500366000\nI0818 15:07:31.901649 21769 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0818 15:07:31.901657 21769 net.cpp:100] Creating Layer L2_b5_relu\nI0818 15:07:31.901662 21769 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0818 15:07:31.901669 21769 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0818 15:07:31.901679 21769 net.cpp:150] Setting up L2_b5_relu\nI0818 15:07:31.901691 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.901696 21769 net.cpp:165] Memory required for data: 1503642800\nI0818 15:07:31.901701 21769 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:31.901711 21769 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:31.901717 21769 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0818 15:07:31.901724 21769 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 15:07:31.901734 21769 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 15:07:31.901783 21769 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:31.901795 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.901801 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.901806 21769 net.cpp:165] Memory required for data: 1510196400\nI0818 15:07:31.901811 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0818 15:07:31.901823 21769 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0818 15:07:31.901829 21769 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 15:07:31.901844 21769 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0818 15:07:31.902308 21769 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0818 15:07:31.902323 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.902328 21769 net.cpp:165] Memory required for data: 1513473200\nI0818 15:07:31.902338 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0818 15:07:31.902346 21769 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0818 15:07:31.902353 21769 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0818 15:07:31.902364 21769 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0818 15:07:31.902616 21769 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0818 15:07:31.902628 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.902633 21769 net.cpp:165] Memory required for data: 1516750000\nI0818 15:07:31.902643 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 15:07:31.902655 21769 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0818 15:07:31.902662 21769 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0818 15:07:31.902669 21769 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0818 15:07:31.902731 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 15:07:31.903867 21769 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0818 15:07:31.903884 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.903890 21769 net.cpp:165] Memory required for data: 1520026800\nI0818 15:07:31.903900 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0818 15:07:31.903909 21769 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0818 15:07:31.903915 21769 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0818 15:07:31.903926 21769 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0818 15:07:31.903937 21769 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0818 15:07:31.903952 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.903957 21769 net.cpp:165] Memory required for data: 1523303600\nI0818 15:07:31.903962 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0818 15:07:31.903977 21769 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0818 15:07:31.903985 21769 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0818 15:07:31.903993 21769 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0818 15:07:31.904471 21769 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0818 15:07:31.904486 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.904491 21769 net.cpp:165] Memory required for data: 1526580400\nI0818 15:07:31.904500 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0818 15:07:31.904513 21769 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0818 15:07:31.904520 21769 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0818 15:07:31.904528 21769 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0818 15:07:31.904785 21769 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0818 15:07:31.904800 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.904805 21769 net.cpp:165] Memory required for data: 1529857200\nI0818 15:07:31.904815 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 15:07:31.904827 21769 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0818 15:07:31.904834 21769 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0818 15:07:31.904842 21769 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0818 15:07:31.904898 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 15:07:31.905047 21769 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0818 15:07:31.905061 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.905066 21769 net.cpp:165] Memory required for data: 1533134000\nI0818 15:07:31.905074 21769 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0818 15:07:31.905086 21769 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0818 15:07:31.905093 21769 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0818 15:07:31.905100 21769 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 15:07:31.905110 21769 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0818 15:07:31.905138 21769 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0818 15:07:31.905146 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.905151 21769 net.cpp:165] Memory required for data: 1536410800\nI0818 15:07:31.905156 21769 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0818 15:07:31.905164 21769 net.cpp:100] Creating Layer L2_b6_relu\nI0818 15:07:31.905169 21769 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0818 15:07:31.905179 21769 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0818 15:07:31.905189 21769 net.cpp:150] Setting up L2_b6_relu\nI0818 15:07:31.905196 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.905201 21769 net.cpp:165] Memory required for data: 1539687600\nI0818 15:07:31.905205 21769 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:31.905213 21769 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:31.905218 21769 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0818 15:07:31.905225 21769 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 15:07:31.905236 21769 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 15:07:31.905282 21769 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:31.905294 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.905302 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.905306 21769 net.cpp:165] Memory required for data: 1546241200\nI0818 15:07:31.905311 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0818 15:07:31.905325 21769 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0818 15:07:31.905333 21769 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 15:07:31.905349 21769 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0818 15:07:31.905819 21769 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0818 15:07:31.905834 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.905839 21769 net.cpp:165] Memory required for data: 1549518000\nI0818 15:07:31.905848 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0818 15:07:31.905860 21769 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0818 15:07:31.905867 21769 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0818 15:07:31.905875 21769 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0818 15:07:31.906127 21769 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0818 15:07:31.906143 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.906148 21769 net.cpp:165] Memory required for data: 1552794800\nI0818 15:07:31.906159 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 15:07:31.906168 21769 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0818 15:07:31.906174 21769 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0818 15:07:31.906182 21769 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0818 15:07:31.906237 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 15:07:31.906415 21769 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0818 15:07:31.906430 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.906435 21769 net.cpp:165] Memory required for data: 1556071600\nI0818 15:07:31.906445 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0818 15:07:31.906453 21769 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0818 15:07:31.906460 21769 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0818 15:07:31.906469 21769 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0818 15:07:31.906481 21769 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0818 15:07:31.906487 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.906492 21769 net.cpp:165] Memory required for data: 1559348400\nI0818 15:07:31.906497 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0818 15:07:31.906512 21769 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0818 15:07:31.906517 21769 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0818 15:07:31.906525 21769 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0818 15:07:31.906994 21769 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0818 15:07:31.907009 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.907014 21769 net.cpp:165] Memory required for data: 1562625200\nI0818 15:07:31.907022 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0818 15:07:31.907032 21769 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0818 15:07:31.907038 21769 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0818 15:07:31.907049 21769 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0818 15:07:31.907297 21769 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0818 15:07:31.907310 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.907316 21769 net.cpp:165] Memory required for data: 1565902000\nI0818 15:07:31.907326 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 15:07:31.907362 21769 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0818 15:07:31.907371 21769 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0818 15:07:31.907380 21769 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0818 15:07:31.907438 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 15:07:31.907589 21769 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0818 15:07:31.907603 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.907608 21769 net.cpp:165] Memory required for data: 1569178800\nI0818 15:07:31.907618 21769 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0818 15:07:31.907627 21769 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0818 15:07:31.907634 21769 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0818 15:07:31.907640 21769 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 15:07:31.907649 21769 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0818 15:07:31.907691 21769 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0818 15:07:31.907702 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.907707 21769 net.cpp:165] Memory required for data: 1572455600\nI0818 15:07:31.907712 21769 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0818 15:07:31.907721 21769 net.cpp:100] Creating Layer L2_b7_relu\nI0818 15:07:31.907727 21769 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0818 15:07:31.907737 21769 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0818 15:07:31.907747 21769 net.cpp:150] Setting up L2_b7_relu\nI0818 15:07:31.907753 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.907758 21769 net.cpp:165] Memory required for data: 1575732400\nI0818 15:07:31.907763 21769 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:31.907771 21769 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:31.907776 21769 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0818 15:07:31.907786 21769 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 15:07:31.907796 21769 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 15:07:31.907847 21769 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:31.907860 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.907866 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.907871 21769 net.cpp:165] Memory required for data: 1582286000\nI0818 15:07:31.907876 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0818 15:07:31.907889 21769 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0818 15:07:31.907896 21769 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 15:07:31.907905 21769 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0818 15:07:31.908380 21769 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0818 15:07:31.908393 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.908399 21769 net.cpp:165] Memory required for data: 1585562800\nI0818 15:07:31.908407 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0818 15:07:31.908419 21769 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0818 15:07:31.908427 21769 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0818 15:07:31.908434 21769 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0818 15:07:31.908694 21769 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0818 15:07:31.908709 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.908713 21769 net.cpp:165] Memory required for data: 1588839600\nI0818 15:07:31.908725 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 15:07:31.908732 21769 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0818 15:07:31.908738 21769 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0818 15:07:31.908751 21769 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0818 15:07:31.908809 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 15:07:31.908959 21769 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0818 15:07:31.908972 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.908977 21769 net.cpp:165] Memory required for data: 1592116400\nI0818 15:07:31.908987 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0818 15:07:31.908994 21769 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0818 15:07:31.909000 21769 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0818 15:07:31.909008 21769 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0818 15:07:31.909019 21769 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0818 15:07:31.909027 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.909031 21769 net.cpp:165] Memory required for data: 1595393200\nI0818 15:07:31.909036 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0818 15:07:31.909047 21769 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0818 15:07:31.909056 21769 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0818 15:07:31.909072 21769 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0818 15:07:31.909545 21769 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0818 15:07:31.909559 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.909564 21769 net.cpp:165] Memory required for data: 1598670000\nI0818 15:07:31.909574 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0818 15:07:31.909585 21769 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0818 15:07:31.909592 21769 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0818 15:07:31.909605 21769 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0818 15:07:31.909862 21769 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0818 15:07:31.909876 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.909881 21769 net.cpp:165] Memory required for data: 1601946800\nI0818 15:07:31.909911 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 15:07:31.909921 21769 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0818 15:07:31.909929 21769 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0818 15:07:31.909935 21769 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0818 15:07:31.909997 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 15:07:31.910148 21769 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0818 15:07:31.910164 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.910171 21769 net.cpp:165] Memory required for data: 1605223600\nI0818 15:07:31.910179 21769 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0818 15:07:31.910188 21769 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0818 15:07:31.910194 21769 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0818 15:07:31.910202 21769 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 15:07:31.910208 21769 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0818 15:07:31.910239 21769 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0818 15:07:31.910249 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.910254 21769 net.cpp:165] Memory required for data: 1608500400\nI0818 15:07:31.910259 21769 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0818 15:07:31.910267 21769 net.cpp:100] Creating Layer L2_b8_relu\nI0818 15:07:31.910272 21769 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0818 15:07:31.910282 21769 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0818 15:07:31.910292 21769 net.cpp:150] Setting up L2_b8_relu\nI0818 15:07:31.910300 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.910303 21769 net.cpp:165] Memory required for data: 1611777200\nI0818 15:07:31.910308 21769 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:31.910315 21769 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:31.910320 21769 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0818 15:07:31.910331 21769 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 15:07:31.910341 21769 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 15:07:31.910384 21769 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:31.910395 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.910403 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.910408 21769 net.cpp:165] Memory required for data: 1618330800\nI0818 15:07:31.910413 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0818 15:07:31.910426 21769 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0818 15:07:31.910434 21769 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 15:07:31.910442 21769 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0818 15:07:31.910918 21769 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0818 15:07:31.910933 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.910938 21769 net.cpp:165] Memory required for data: 1621607600\nI0818 15:07:31.910954 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0818 15:07:31.910967 21769 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0818 15:07:31.910974 21769 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0818 15:07:31.910982 21769 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0818 15:07:31.911234 21769 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0818 15:07:31.911247 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.911253 21769 net.cpp:165] Memory required for data: 1624884400\nI0818 15:07:31.911263 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 15:07:31.911272 21769 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0818 15:07:31.911278 21769 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0818 15:07:31.911285 21769 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0818 15:07:31.911345 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 15:07:31.911494 21769 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0818 15:07:31.911510 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.911515 21769 net.cpp:165] Memory required for data: 1628161200\nI0818 15:07:31.911525 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0818 15:07:31.911531 21769 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0818 15:07:31.911538 21769 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0818 15:07:31.911545 21769 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0818 15:07:31.911554 21769 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0818 15:07:31.911561 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.911566 21769 net.cpp:165] Memory required for data: 1631438000\nI0818 15:07:31.911571 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0818 15:07:31.911583 21769 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0818 15:07:31.911590 21769 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0818 15:07:31.911602 21769 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0818 15:07:31.912078 21769 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0818 15:07:31.912093 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.912098 21769 net.cpp:165] Memory required for data: 1634714800\nI0818 15:07:31.912107 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0818 15:07:31.912119 21769 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0818 15:07:31.912127 21769 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0818 15:07:31.912137 21769 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0818 15:07:31.912389 21769 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0818 15:07:31.912402 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.912407 21769 net.cpp:165] Memory required for data: 1637991600\nI0818 15:07:31.912418 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 15:07:31.912426 21769 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0818 15:07:31.912433 21769 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0818 15:07:31.912441 21769 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0818 15:07:31.912502 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 15:07:31.912650 21769 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0818 15:07:31.912663 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.912668 21769 net.cpp:165] Memory required for data: 1641268400\nI0818 15:07:31.912678 21769 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0818 15:07:31.912698 21769 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0818 15:07:31.912706 21769 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0818 15:07:31.912714 21769 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 15:07:31.912721 21769 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0818 15:07:31.912750 21769 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0818 15:07:31.912757 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.912762 21769 net.cpp:165] Memory required for data: 1644545200\nI0818 15:07:31.912767 21769 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0818 15:07:31.912784 21769 net.cpp:100] Creating Layer L2_b9_relu\nI0818 15:07:31.912792 21769 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0818 15:07:31.912801 21769 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0818 15:07:31.912812 21769 net.cpp:150] Setting up L2_b9_relu\nI0818 15:07:31.912819 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.912823 21769 net.cpp:165] Memory required for data: 1647822000\nI0818 15:07:31.912828 21769 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:31.912835 21769 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:31.912842 21769 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0818 15:07:31.912848 21769 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 15:07:31.912858 21769 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 15:07:31.912907 21769 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:31.912919 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.912926 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.912930 21769 net.cpp:165] Memory required for data: 1654375600\nI0818 15:07:31.912935 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_conv\nI0818 15:07:31.912950 21769 net.cpp:100] Creating Layer L2_b10_cbr1_conv\nI0818 15:07:31.912956 21769 net.cpp:434] L2_b10_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 15:07:31.912966 21769 net.cpp:408] L2_b10_cbr1_conv -> L2_b10_cbr1_conv_top\nI0818 15:07:31.913434 21769 net.cpp:150] Setting up L2_b10_cbr1_conv\nI0818 15:07:31.913449 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.913453 21769 net.cpp:165] Memory required for data: 1657652400\nI0818 15:07:31.913462 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_bn\nI0818 15:07:31.913475 21769 net.cpp:100] Creating Layer L2_b10_cbr1_bn\nI0818 15:07:31.913481 21769 net.cpp:434] L2_b10_cbr1_bn <- L2_b10_cbr1_conv_top\nI0818 15:07:31.913492 21769 net.cpp:408] L2_b10_cbr1_bn -> L2_b10_cbr1_bn_top\nI0818 15:07:31.913774 21769 net.cpp:150] Setting up L2_b10_cbr1_bn\nI0818 15:07:31.913789 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.913795 21769 net.cpp:165] Memory required for data: 1660929200\nI0818 15:07:31.913805 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0818 15:07:31.913815 21769 net.cpp:100] Creating Layer L2_b10_cbr1_scale\nI0818 15:07:31.913820 21769 net.cpp:434] L2_b10_cbr1_scale <- L2_b10_cbr1_bn_top\nI0818 15:07:31.913828 21769 net.cpp:395] L2_b10_cbr1_scale -> L2_b10_cbr1_bn_top (in-place)\nI0818 15:07:31.913888 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0818 15:07:31.914033 21769 net.cpp:150] Setting up L2_b10_cbr1_scale\nI0818 15:07:31.914049 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.914054 21769 net.cpp:165] Memory required for data: 1664206000\nI0818 15:07:31.914063 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_relu\nI0818 15:07:31.914072 21769 net.cpp:100] Creating Layer L2_b10_cbr1_relu\nI0818 15:07:31.914077 21769 net.cpp:434] L2_b10_cbr1_relu <- L2_b10_cbr1_bn_top\nI0818 15:07:31.914084 21769 net.cpp:395] L2_b10_cbr1_relu -> L2_b10_cbr1_bn_top (in-place)\nI0818 15:07:31.914094 21769 net.cpp:150] Setting up L2_b10_cbr1_relu\nI0818 15:07:31.914100 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.914105 21769 net.cpp:165] Memory required for data: 1667482800\nI0818 15:07:31.914110 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr2_conv\nI0818 15:07:31.914124 21769 net.cpp:100] Creating Layer L2_b10_cbr2_conv\nI0818 15:07:31.914130 21769 net.cpp:434] L2_b10_cbr2_conv <- L2_b10_cbr1_bn_top\nI0818 15:07:31.914141 21769 net.cpp:408] L2_b10_cbr2_conv -> L2_b10_cbr2_conv_top\nI0818 15:07:31.914609 21769 net.cpp:150] Setting up L2_b10_cbr2_conv\nI0818 15:07:31.914623 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.914639 21769 net.cpp:165] Memory required for data: 1670759600\nI0818 15:07:31.914649 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr2_bn\nI0818 15:07:31.914662 21769 net.cpp:100] Creating Layer L2_b10_cbr2_bn\nI0818 15:07:31.914669 21769 net.cpp:434] L2_b10_cbr2_bn <- L2_b10_cbr2_conv_top\nI0818 15:07:31.914680 21769 net.cpp:408] L2_b10_cbr2_bn -> L2_b10_cbr2_bn_top\nI0818 15:07:31.914940 21769 net.cpp:150] Setting up L2_b10_cbr2_bn\nI0818 15:07:31.914954 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.914959 21769 net.cpp:165] Memory required for data: 1674036400\nI0818 15:07:31.914969 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0818 15:07:31.914978 21769 net.cpp:100] Creating Layer L2_b10_cbr2_scale\nI0818 15:07:31.914984 21769 net.cpp:434] L2_b10_cbr2_scale <- L2_b10_cbr2_bn_top\nI0818 15:07:31.914993 21769 net.cpp:395] L2_b10_cbr2_scale -> L2_b10_cbr2_bn_top (in-place)\nI0818 15:07:31.915051 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0818 15:07:31.915202 21769 net.cpp:150] Setting up L2_b10_cbr2_scale\nI0818 15:07:31.915215 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.915220 21769 net.cpp:165] Memory required for data: 1677313200\nI0818 15:07:31.915230 21769 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise\nI0818 15:07:31.915241 21769 net.cpp:100] Creating Layer L2_b10_sum_eltwise\nI0818 15:07:31.915247 21769 net.cpp:434] L2_b10_sum_eltwise <- L2_b10_cbr2_bn_top\nI0818 15:07:31.915256 21769 net.cpp:434] L2_b10_sum_eltwise <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 15:07:31.915263 21769 net.cpp:408] L2_b10_sum_eltwise -> L2_b10_sum_eltwise_top\nI0818 15:07:31.915289 21769 net.cpp:150] Setting up L2_b10_sum_eltwise\nI0818 15:07:31.915298 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.915302 21769 net.cpp:165] Memory required for data: 1680590000\nI0818 15:07:31.915308 21769 layer_factory.hpp:77] Creating layer L2_b10_relu\nI0818 15:07:31.915320 21769 net.cpp:100] Creating Layer L2_b10_relu\nI0818 15:07:31.915326 21769 net.cpp:434] L2_b10_relu <- L2_b10_sum_eltwise_top\nI0818 15:07:31.915333 21769 net.cpp:395] L2_b10_relu -> L2_b10_sum_eltwise_top (in-place)\nI0818 15:07:31.915343 21769 net.cpp:150] Setting up L2_b10_relu\nI0818 15:07:31.915349 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.915354 21769 net.cpp:165] Memory required for data: 1683866800\nI0818 15:07:31.915359 21769 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0818 15:07:31.915365 21769 net.cpp:100] Creating Layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0818 15:07:31.915370 21769 net.cpp:434] L2_b10_sum_eltwise_top_L2_b10_relu_0_split <- L2_b10_sum_eltwise_top\nI0818 15:07:31.915377 21769 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0818 15:07:31.915386 21769 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0818 15:07:31.915433 21769 net.cpp:150] Setting up L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0818 15:07:31.915446 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.915452 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.915457 21769 net.cpp:165] Memory required for data: 1690420400\nI0818 15:07:31.915462 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_conv\nI0818 15:07:31.915475 21769 net.cpp:100] Creating Layer L2_b11_cbr1_conv\nI0818 15:07:31.915482 21769 net.cpp:434] L2_b11_cbr1_conv <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0818 15:07:31.915491 21769 net.cpp:408] L2_b11_cbr1_conv -> L2_b11_cbr1_conv_top\nI0818 15:07:31.915968 21769 net.cpp:150] Setting up L2_b11_cbr1_conv\nI0818 15:07:31.915983 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.915988 21769 net.cpp:165] Memory required for data: 1693697200\nI0818 15:07:31.915997 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_bn\nI0818 15:07:31.916009 21769 net.cpp:100] Creating Layer L2_b11_cbr1_bn\nI0818 15:07:31.916016 21769 net.cpp:434] L2_b11_cbr1_bn <- L2_b11_cbr1_conv_top\nI0818 15:07:31.916033 21769 net.cpp:408] L2_b11_cbr1_bn -> L2_b11_cbr1_bn_top\nI0818 15:07:31.916287 21769 net.cpp:150] Setting up L2_b11_cbr1_bn\nI0818 15:07:31.916301 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.916306 21769 net.cpp:165] Memory required for data: 1696974000\nI0818 15:07:31.916316 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0818 15:07:31.916326 21769 net.cpp:100] Creating Layer L2_b11_cbr1_scale\nI0818 15:07:31.916332 21769 net.cpp:434] L2_b11_cbr1_scale <- L2_b11_cbr1_bn_top\nI0818 15:07:31.916339 21769 net.cpp:395] L2_b11_cbr1_scale -> L2_b11_cbr1_bn_top (in-place)\nI0818 15:07:31.916399 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0818 15:07:31.916550 21769 net.cpp:150] Setting up L2_b11_cbr1_scale\nI0818 15:07:31.916563 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.916569 21769 net.cpp:165] Memory required for data: 1700250800\nI0818 15:07:31.916579 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_relu\nI0818 15:07:31.916589 21769 net.cpp:100] Creating Layer L2_b11_cbr1_relu\nI0818 15:07:31.916595 21769 net.cpp:434] L2_b11_cbr1_relu <- L2_b11_cbr1_bn_top\nI0818 15:07:31.916604 21769 net.cpp:395] L2_b11_cbr1_relu -> L2_b11_cbr1_bn_top (in-place)\nI0818 15:07:31.916612 21769 net.cpp:150] Setting up L2_b11_cbr1_relu\nI0818 15:07:31.916620 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.916625 21769 net.cpp:165] Memory required for data: 1703527600\nI0818 15:07:31.916628 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr2_conv\nI0818 15:07:31.916642 21769 net.cpp:100] Creating Layer L2_b11_cbr2_conv\nI0818 15:07:31.916649 21769 net.cpp:434] L2_b11_cbr2_conv <- L2_b11_cbr1_bn_top\nI0818 15:07:31.916661 21769 net.cpp:408] L2_b11_cbr2_conv -> L2_b11_cbr2_conv_top\nI0818 15:07:31.917136 21769 net.cpp:150] Setting up L2_b11_cbr2_conv\nI0818 15:07:31.917151 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.917156 21769 net.cpp:165] Memory required for data: 1706804400\nI0818 15:07:31.917165 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr2_bn\nI0818 15:07:31.917177 21769 net.cpp:100] Creating Layer L2_b11_cbr2_bn\nI0818 15:07:31.917184 21769 net.cpp:434] L2_b11_cbr2_bn <- L2_b11_cbr2_conv_top\nI0818 15:07:31.917196 21769 net.cpp:408] L2_b11_cbr2_bn -> L2_b11_cbr2_bn_top\nI0818 15:07:31.917445 21769 net.cpp:150] Setting up L2_b11_cbr2_bn\nI0818 15:07:31.917461 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.917466 21769 net.cpp:165] Memory required for data: 1710081200\nI0818 15:07:31.917477 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0818 15:07:31.917485 21769 net.cpp:100] Creating Layer L2_b11_cbr2_scale\nI0818 15:07:31.917492 21769 net.cpp:434] L2_b11_cbr2_scale <- L2_b11_cbr2_bn_top\nI0818 15:07:31.917500 21769 net.cpp:395] L2_b11_cbr2_scale -> L2_b11_cbr2_bn_top (in-place)\nI0818 15:07:31.917557 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0818 15:07:31.917712 21769 net.cpp:150] Setting up L2_b11_cbr2_scale\nI0818 15:07:31.917726 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.917732 21769 net.cpp:165] Memory required for data: 1713358000\nI0818 15:07:31.917740 21769 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise\nI0818 15:07:31.917749 21769 net.cpp:100] Creating Layer L2_b11_sum_eltwise\nI0818 15:07:31.917759 21769 net.cpp:434] L2_b11_sum_eltwise <- L2_b11_cbr2_bn_top\nI0818 15:07:31.917767 21769 net.cpp:434] L2_b11_sum_eltwise <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0818 15:07:31.917774 21769 net.cpp:408] L2_b11_sum_eltwise -> L2_b11_sum_eltwise_top\nI0818 15:07:31.917800 21769 net.cpp:150] Setting up L2_b11_sum_eltwise\nI0818 15:07:31.917809 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.917814 21769 net.cpp:165] Memory required for data: 1716634800\nI0818 15:07:31.917819 21769 layer_factory.hpp:77] Creating layer L2_b11_relu\nI0818 15:07:31.917832 21769 net.cpp:100] Creating Layer L2_b11_relu\nI0818 15:07:31.917840 21769 net.cpp:434] L2_b11_relu <- L2_b11_sum_eltwise_top\nI0818 15:07:31.917846 21769 net.cpp:395] L2_b11_relu -> L2_b11_sum_eltwise_top (in-place)\nI0818 15:07:31.917862 21769 net.cpp:150] Setting up L2_b11_relu\nI0818 15:07:31.917870 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.917876 21769 net.cpp:165] Memory required for data: 1719911600\nI0818 15:07:31.917881 21769 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0818 15:07:31.917887 21769 net.cpp:100] Creating Layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0818 15:07:31.917892 21769 net.cpp:434] L2_b11_sum_eltwise_top_L2_b11_relu_0_split <- L2_b11_sum_eltwise_top\nI0818 15:07:31.917899 21769 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0818 15:07:31.917909 21769 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0818 15:07:31.917958 21769 net.cpp:150] Setting up L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0818 15:07:31.917970 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.917978 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.917982 21769 net.cpp:165] Memory required for data: 1726465200\nI0818 15:07:31.917987 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_conv\nI0818 15:07:31.918001 21769 net.cpp:100] Creating Layer L2_b12_cbr1_conv\nI0818 15:07:31.918009 21769 net.cpp:434] L2_b12_cbr1_conv <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0818 15:07:31.918017 21769 net.cpp:408] L2_b12_cbr1_conv -> L2_b12_cbr1_conv_top\nI0818 15:07:31.918491 21769 net.cpp:150] Setting up L2_b12_cbr1_conv\nI0818 15:07:31.918505 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.918510 21769 net.cpp:165] Memory required for data: 1729742000\nI0818 15:07:31.918519 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_bn\nI0818 15:07:31.918534 21769 net.cpp:100] Creating Layer L2_b12_cbr1_bn\nI0818 15:07:31.918540 21769 net.cpp:434] L2_b12_cbr1_bn <- L2_b12_cbr1_conv_top\nI0818 15:07:31.918550 21769 net.cpp:408] L2_b12_cbr1_bn -> L2_b12_cbr1_bn_top\nI0818 15:07:31.918815 21769 net.cpp:150] Setting up L2_b12_cbr1_bn\nI0818 15:07:31.918829 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.918834 21769 net.cpp:165] Memory required for data: 1733018800\nI0818 15:07:31.918844 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0818 15:07:31.918853 21769 net.cpp:100] Creating Layer L2_b12_cbr1_scale\nI0818 15:07:31.918860 21769 net.cpp:434] L2_b12_cbr1_scale <- L2_b12_cbr1_bn_top\nI0818 15:07:31.918867 21769 net.cpp:395] L2_b12_cbr1_scale -> L2_b12_cbr1_bn_top (in-place)\nI0818 15:07:31.918927 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0818 15:07:31.919080 21769 net.cpp:150] Setting up L2_b12_cbr1_scale\nI0818 15:07:31.919092 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.919097 21769 net.cpp:165] Memory required for data: 1736295600\nI0818 15:07:31.919106 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_relu\nI0818 15:07:31.919117 21769 net.cpp:100] Creating Layer L2_b12_cbr1_relu\nI0818 15:07:31.919124 21769 net.cpp:434] L2_b12_cbr1_relu <- L2_b12_cbr1_bn_top\nI0818 15:07:31.919131 21769 net.cpp:395] L2_b12_cbr1_relu -> L2_b12_cbr1_bn_top (in-place)\nI0818 15:07:31.919140 21769 net.cpp:150] Setting up L2_b12_cbr1_relu\nI0818 15:07:31.919147 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.919152 21769 net.cpp:165] Memory required for data: 1739572400\nI0818 15:07:31.919157 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr2_conv\nI0818 15:07:31.919170 21769 net.cpp:100] Creating Layer L2_b12_cbr2_conv\nI0818 15:07:31.919178 21769 net.cpp:434] L2_b12_cbr2_conv <- L2_b12_cbr1_bn_top\nI0818 15:07:31.919185 21769 net.cpp:408] L2_b12_cbr2_conv -> L2_b12_cbr2_conv_top\nI0818 15:07:31.919657 21769 net.cpp:150] Setting up L2_b12_cbr2_conv\nI0818 15:07:31.919672 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.919677 21769 net.cpp:165] Memory required for data: 1742849200\nI0818 15:07:31.919692 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr2_bn\nI0818 15:07:31.919713 21769 net.cpp:100] Creating Layer L2_b12_cbr2_bn\nI0818 15:07:31.919721 21769 net.cpp:434] L2_b12_cbr2_bn <- L2_b12_cbr2_conv_top\nI0818 15:07:31.919729 21769 net.cpp:408] L2_b12_cbr2_bn -> L2_b12_cbr2_bn_top\nI0818 15:07:31.919987 21769 net.cpp:150] Setting up L2_b12_cbr2_bn\nI0818 15:07:31.920003 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.920009 21769 net.cpp:165] Memory required for data: 1746126000\nI0818 15:07:31.920019 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0818 15:07:31.920029 21769 net.cpp:100] Creating Layer L2_b12_cbr2_scale\nI0818 15:07:31.920035 21769 net.cpp:434] L2_b12_cbr2_scale <- L2_b12_cbr2_bn_top\nI0818 15:07:31.920042 21769 net.cpp:395] L2_b12_cbr2_scale -> L2_b12_cbr2_bn_top (in-place)\nI0818 15:07:31.920099 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0818 15:07:31.920250 21769 net.cpp:150] Setting up L2_b12_cbr2_scale\nI0818 15:07:31.920264 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.920269 21769 net.cpp:165] Memory required for data: 1749402800\nI0818 15:07:31.920279 21769 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise\nI0818 15:07:31.920287 21769 net.cpp:100] Creating Layer L2_b12_sum_eltwise\nI0818 15:07:31.920294 21769 net.cpp:434] L2_b12_sum_eltwise <- L2_b12_cbr2_bn_top\nI0818 15:07:31.920300 21769 net.cpp:434] L2_b12_sum_eltwise <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0818 15:07:31.920311 21769 net.cpp:408] L2_b12_sum_eltwise -> L2_b12_sum_eltwise_top\nI0818 15:07:31.920337 21769 net.cpp:150] Setting up L2_b12_sum_eltwise\nI0818 15:07:31.920346 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.920351 21769 net.cpp:165] Memory required for data: 1752679600\nI0818 15:07:31.920356 21769 layer_factory.hpp:77] Creating layer L2_b12_relu\nI0818 15:07:31.920367 21769 net.cpp:100] Creating Layer L2_b12_relu\nI0818 15:07:31.920373 21769 net.cpp:434] L2_b12_relu <- L2_b12_sum_eltwise_top\nI0818 15:07:31.920382 21769 net.cpp:395] L2_b12_relu -> L2_b12_sum_eltwise_top (in-place)\nI0818 15:07:31.920390 21769 net.cpp:150] Setting up L2_b12_relu\nI0818 15:07:31.920397 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.920402 21769 net.cpp:165] Memory required for data: 1755956400\nI0818 15:07:31.920406 21769 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0818 15:07:31.920413 21769 net.cpp:100] Creating Layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0818 15:07:31.920418 21769 net.cpp:434] L2_b12_sum_eltwise_top_L2_b12_relu_0_split <- L2_b12_sum_eltwise_top\nI0818 15:07:31.920425 21769 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0818 15:07:31.920434 21769 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0818 15:07:31.920481 21769 net.cpp:150] Setting up L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0818 15:07:31.920493 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.920500 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.920505 21769 net.cpp:165] Memory required for data: 1762510000\nI0818 15:07:31.920511 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_conv\nI0818 15:07:31.920524 21769 net.cpp:100] Creating Layer L2_b13_cbr1_conv\nI0818 15:07:31.920531 21769 net.cpp:434] L2_b13_cbr1_conv <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0818 15:07:31.920541 21769 net.cpp:408] L2_b13_cbr1_conv -> L2_b13_cbr1_conv_top\nI0818 15:07:31.921018 21769 net.cpp:150] Setting up L2_b13_cbr1_conv\nI0818 15:07:31.921033 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.921038 21769 net.cpp:165] Memory required for data: 1765786800\nI0818 15:07:31.921047 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_bn\nI0818 15:07:31.921059 21769 net.cpp:100] Creating Layer L2_b13_cbr1_bn\nI0818 15:07:31.921066 21769 net.cpp:434] L2_b13_cbr1_bn <- L2_b13_cbr1_conv_top\nI0818 15:07:31.921078 21769 net.cpp:408] L2_b13_cbr1_bn -> L2_b13_cbr1_bn_top\nI0818 15:07:31.921331 21769 net.cpp:150] Setting up L2_b13_cbr1_bn\nI0818 15:07:31.921357 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.921363 21769 net.cpp:165] Memory required for data: 1769063600\nI0818 15:07:31.921375 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0818 15:07:31.921383 21769 net.cpp:100] Creating Layer L2_b13_cbr1_scale\nI0818 15:07:31.921389 21769 net.cpp:434] L2_b13_cbr1_scale <- L2_b13_cbr1_bn_top\nI0818 15:07:31.921397 21769 net.cpp:395] L2_b13_cbr1_scale -> L2_b13_cbr1_bn_top (in-place)\nI0818 15:07:31.921458 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0818 15:07:31.921607 21769 net.cpp:150] Setting up L2_b13_cbr1_scale\nI0818 15:07:31.921620 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.921625 21769 net.cpp:165] Memory required for data: 1772340400\nI0818 15:07:31.921634 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_relu\nI0818 15:07:31.921643 21769 net.cpp:100] Creating Layer L2_b13_cbr1_relu\nI0818 15:07:31.921648 21769 net.cpp:434] L2_b13_cbr1_relu <- L2_b13_cbr1_bn_top\nI0818 15:07:31.921658 21769 net.cpp:395] L2_b13_cbr1_relu -> L2_b13_cbr1_bn_top (in-place)\nI0818 15:07:31.921669 21769 net.cpp:150] Setting up L2_b13_cbr1_relu\nI0818 15:07:31.921676 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.921680 21769 net.cpp:165] Memory required for data: 1775617200\nI0818 15:07:31.921691 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr2_conv\nI0818 15:07:31.921706 21769 net.cpp:100] Creating Layer L2_b13_cbr2_conv\nI0818 15:07:31.921713 21769 net.cpp:434] L2_b13_cbr2_conv <- L2_b13_cbr1_bn_top\nI0818 15:07:31.921721 21769 net.cpp:408] L2_b13_cbr2_conv -> L2_b13_cbr2_conv_top\nI0818 15:07:31.922186 21769 net.cpp:150] Setting up L2_b13_cbr2_conv\nI0818 15:07:31.922200 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.922206 21769 net.cpp:165] Memory required for data: 1778894000\nI0818 15:07:31.922215 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr2_bn\nI0818 15:07:31.922227 21769 net.cpp:100] Creating Layer L2_b13_cbr2_bn\nI0818 15:07:31.922235 21769 net.cpp:434] L2_b13_cbr2_bn <- L2_b13_cbr2_conv_top\nI0818 15:07:31.922242 21769 net.cpp:408] L2_b13_cbr2_bn -> L2_b13_cbr2_bn_top\nI0818 15:07:31.922497 21769 net.cpp:150] Setting up L2_b13_cbr2_bn\nI0818 15:07:31.922513 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.922518 21769 net.cpp:165] Memory required for data: 1782170800\nI0818 15:07:31.922528 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0818 15:07:31.922538 21769 net.cpp:100] Creating Layer L2_b13_cbr2_scale\nI0818 15:07:31.922544 21769 net.cpp:434] L2_b13_cbr2_scale <- L2_b13_cbr2_bn_top\nI0818 15:07:31.922550 21769 net.cpp:395] L2_b13_cbr2_scale -> L2_b13_cbr2_bn_top (in-place)\nI0818 15:07:31.922607 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0818 15:07:31.922770 21769 net.cpp:150] Setting up L2_b13_cbr2_scale\nI0818 15:07:31.922783 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.922788 21769 net.cpp:165] Memory required for data: 1785447600\nI0818 15:07:31.922797 21769 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise\nI0818 15:07:31.922807 21769 net.cpp:100] Creating Layer L2_b13_sum_eltwise\nI0818 15:07:31.922813 21769 net.cpp:434] L2_b13_sum_eltwise <- L2_b13_cbr2_bn_top\nI0818 15:07:31.922821 21769 net.cpp:434] L2_b13_sum_eltwise <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0818 15:07:31.922832 21769 net.cpp:408] L2_b13_sum_eltwise -> L2_b13_sum_eltwise_top\nI0818 15:07:31.922859 21769 net.cpp:150] Setting up L2_b13_sum_eltwise\nI0818 15:07:31.922868 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.922873 21769 net.cpp:165] Memory required for data: 1788724400\nI0818 15:07:31.922878 21769 layer_factory.hpp:77] Creating layer L2_b13_relu\nI0818 15:07:31.922885 21769 net.cpp:100] Creating Layer L2_b13_relu\nI0818 15:07:31.922891 21769 net.cpp:434] L2_b13_relu <- L2_b13_sum_eltwise_top\nI0818 15:07:31.922901 21769 net.cpp:395] L2_b13_relu -> L2_b13_sum_eltwise_top (in-place)\nI0818 15:07:31.922911 21769 net.cpp:150] Setting up L2_b13_relu\nI0818 15:07:31.922919 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.922933 21769 net.cpp:165] Memory required for data: 1792001200\nI0818 15:07:31.922940 21769 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0818 15:07:31.922946 21769 net.cpp:100] Creating Layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0818 15:07:31.922952 21769 net.cpp:434] L2_b13_sum_eltwise_top_L2_b13_relu_0_split <- L2_b13_sum_eltwise_top\nI0818 15:07:31.922960 21769 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0818 15:07:31.922969 21769 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0818 15:07:31.923018 21769 net.cpp:150] Setting up L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0818 15:07:31.923032 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.923038 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.923043 21769 net.cpp:165] Memory required for data: 1798554800\nI0818 15:07:31.923048 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_conv\nI0818 15:07:31.923063 21769 net.cpp:100] Creating Layer L2_b14_cbr1_conv\nI0818 15:07:31.923070 21769 net.cpp:434] L2_b14_cbr1_conv <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0818 15:07:31.923079 21769 net.cpp:408] L2_b14_cbr1_conv -> L2_b14_cbr1_conv_top\nI0818 15:07:31.923552 21769 net.cpp:150] Setting up L2_b14_cbr1_conv\nI0818 15:07:31.923566 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.923571 21769 net.cpp:165] Memory required for data: 1801831600\nI0818 15:07:31.923580 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_bn\nI0818 15:07:31.923593 21769 net.cpp:100] Creating Layer L2_b14_cbr1_bn\nI0818 15:07:31.923600 21769 net.cpp:434] L2_b14_cbr1_bn <- L2_b14_cbr1_conv_top\nI0818 15:07:31.923609 21769 net.cpp:408] L2_b14_cbr1_bn -> L2_b14_cbr1_bn_top\nI0818 15:07:31.923871 21769 net.cpp:150] Setting up L2_b14_cbr1_bn\nI0818 15:07:31.923887 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.923893 21769 net.cpp:165] Memory required for data: 1805108400\nI0818 15:07:31.923903 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0818 15:07:31.923913 21769 net.cpp:100] Creating Layer L2_b14_cbr1_scale\nI0818 15:07:31.923918 21769 net.cpp:434] L2_b14_cbr1_scale <- L2_b14_cbr1_bn_top\nI0818 15:07:31.923926 21769 net.cpp:395] L2_b14_cbr1_scale -> L2_b14_cbr1_bn_top (in-place)\nI0818 15:07:31.923984 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0818 15:07:31.924139 21769 net.cpp:150] Setting up L2_b14_cbr1_scale\nI0818 15:07:31.924151 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.924156 21769 net.cpp:165] Memory required for data: 1808385200\nI0818 15:07:31.924165 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_relu\nI0818 15:07:31.924173 21769 net.cpp:100] Creating Layer L2_b14_cbr1_relu\nI0818 15:07:31.924180 21769 net.cpp:434] L2_b14_cbr1_relu <- L2_b14_cbr1_bn_top\nI0818 15:07:31.924190 21769 net.cpp:395] L2_b14_cbr1_relu -> L2_b14_cbr1_bn_top (in-place)\nI0818 15:07:31.924199 21769 net.cpp:150] Setting up L2_b14_cbr1_relu\nI0818 15:07:31.924207 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.924211 21769 net.cpp:165] Memory required for data: 1811662000\nI0818 15:07:31.924216 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr2_conv\nI0818 15:07:31.924230 21769 net.cpp:100] Creating Layer L2_b14_cbr2_conv\nI0818 15:07:31.924237 21769 net.cpp:434] L2_b14_cbr2_conv <- L2_b14_cbr1_bn_top\nI0818 15:07:31.924245 21769 net.cpp:408] L2_b14_cbr2_conv -> L2_b14_cbr2_conv_top\nI0818 15:07:31.924721 21769 net.cpp:150] Setting up L2_b14_cbr2_conv\nI0818 15:07:31.924736 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.924741 21769 net.cpp:165] Memory required for data: 1814938800\nI0818 15:07:31.924749 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr2_bn\nI0818 15:07:31.924758 21769 net.cpp:100] Creating Layer L2_b14_cbr2_bn\nI0818 15:07:31.924764 21769 net.cpp:434] L2_b14_cbr2_bn <- L2_b14_cbr2_conv_top\nI0818 15:07:31.924775 21769 net.cpp:408] L2_b14_cbr2_bn -> L2_b14_cbr2_bn_top\nI0818 15:07:31.925043 21769 net.cpp:150] Setting up L2_b14_cbr2_bn\nI0818 15:07:31.925057 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.925062 21769 net.cpp:165] Memory required for data: 1818215600\nI0818 15:07:31.925073 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0818 15:07:31.925084 21769 net.cpp:100] Creating Layer L2_b14_cbr2_scale\nI0818 15:07:31.925091 21769 net.cpp:434] L2_b14_cbr2_scale <- L2_b14_cbr2_bn_top\nI0818 15:07:31.925099 21769 net.cpp:395] L2_b14_cbr2_scale -> L2_b14_cbr2_bn_top (in-place)\nI0818 15:07:31.925158 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0818 15:07:31.925309 21769 net.cpp:150] Setting up L2_b14_cbr2_scale\nI0818 15:07:31.925323 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.925328 21769 net.cpp:165] Memory required for data: 1821492400\nI0818 15:07:31.925336 21769 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise\nI0818 15:07:31.925348 21769 net.cpp:100] Creating Layer L2_b14_sum_eltwise\nI0818 15:07:31.925355 21769 net.cpp:434] L2_b14_sum_eltwise <- L2_b14_cbr2_bn_top\nI0818 15:07:31.925362 21769 net.cpp:434] L2_b14_sum_eltwise <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0818 15:07:31.925372 21769 net.cpp:408] L2_b14_sum_eltwise -> L2_b14_sum_eltwise_top\nI0818 15:07:31.925400 21769 net.cpp:150] Setting up L2_b14_sum_eltwise\nI0818 15:07:31.925408 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.925412 21769 net.cpp:165] Memory required for data: 1824769200\nI0818 15:07:31.925418 21769 layer_factory.hpp:77] Creating layer L2_b14_relu\nI0818 15:07:31.925426 21769 net.cpp:100] Creating Layer L2_b14_relu\nI0818 15:07:31.925431 21769 net.cpp:434] L2_b14_relu <- L2_b14_sum_eltwise_top\nI0818 15:07:31.925443 21769 net.cpp:395] L2_b14_relu -> L2_b14_sum_eltwise_top (in-place)\nI0818 15:07:31.925453 21769 net.cpp:150] Setting up L2_b14_relu\nI0818 15:07:31.925460 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.925464 21769 net.cpp:165] Memory required for data: 1828046000\nI0818 15:07:31.925469 21769 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0818 15:07:31.925477 21769 net.cpp:100] Creating Layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0818 15:07:31.925482 21769 net.cpp:434] L2_b14_sum_eltwise_top_L2_b14_relu_0_split <- L2_b14_sum_eltwise_top\nI0818 15:07:31.925489 21769 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0818 15:07:31.925499 21769 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0818 15:07:31.925547 21769 net.cpp:150] Setting up L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0818 15:07:31.925559 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.925566 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.925570 21769 net.cpp:165] Memory required for data: 1834599600\nI0818 15:07:31.925575 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_conv\nI0818 15:07:31.925587 21769 net.cpp:100] Creating Layer L2_b15_cbr1_conv\nI0818 15:07:31.925593 21769 net.cpp:434] L2_b15_cbr1_conv <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0818 15:07:31.925606 21769 net.cpp:408] L2_b15_cbr1_conv -> L2_b15_cbr1_conv_top\nI0818 15:07:31.926090 21769 net.cpp:150] Setting up L2_b15_cbr1_conv\nI0818 15:07:31.926105 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.926110 21769 net.cpp:165] Memory required for data: 1837876400\nI0818 15:07:31.926120 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_bn\nI0818 15:07:31.926131 21769 net.cpp:100] Creating Layer L2_b15_cbr1_bn\nI0818 15:07:31.926138 21769 net.cpp:434] L2_b15_cbr1_bn <- L2_b15_cbr1_conv_top\nI0818 15:07:31.926147 21769 net.cpp:408] L2_b15_cbr1_bn -> L2_b15_cbr1_bn_top\nI0818 15:07:31.926404 21769 net.cpp:150] Setting up L2_b15_cbr1_bn\nI0818 15:07:31.926422 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.926427 21769 net.cpp:165] Memory required for data: 1841153200\nI0818 15:07:31.926445 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0818 15:07:31.926455 21769 net.cpp:100] Creating Layer L2_b15_cbr1_scale\nI0818 15:07:31.926460 21769 net.cpp:434] L2_b15_cbr1_scale <- L2_b15_cbr1_bn_top\nI0818 15:07:31.926468 21769 net.cpp:395] L2_b15_cbr1_scale -> L2_b15_cbr1_bn_top (in-place)\nI0818 15:07:31.926525 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0818 15:07:31.926687 21769 net.cpp:150] Setting up L2_b15_cbr1_scale\nI0818 15:07:31.926702 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.926707 21769 net.cpp:165] Memory required for data: 1844430000\nI0818 15:07:31.926715 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_relu\nI0818 15:07:31.926723 21769 net.cpp:100] Creating Layer L2_b15_cbr1_relu\nI0818 15:07:31.926729 21769 net.cpp:434] L2_b15_cbr1_relu <- L2_b15_cbr1_bn_top\nI0818 15:07:31.926740 21769 net.cpp:395] L2_b15_cbr1_relu -> L2_b15_cbr1_bn_top (in-place)\nI0818 15:07:31.926750 21769 net.cpp:150] Setting up L2_b15_cbr1_relu\nI0818 15:07:31.926758 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.926762 21769 net.cpp:165] Memory required for data: 1847706800\nI0818 15:07:31.926767 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr2_conv\nI0818 15:07:31.926777 21769 net.cpp:100] Creating Layer L2_b15_cbr2_conv\nI0818 15:07:31.926784 21769 net.cpp:434] L2_b15_cbr2_conv <- L2_b15_cbr1_bn_top\nI0818 15:07:31.926795 21769 net.cpp:408] L2_b15_cbr2_conv -> L2_b15_cbr2_conv_top\nI0818 15:07:31.927265 21769 net.cpp:150] Setting up L2_b15_cbr2_conv\nI0818 15:07:31.927279 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.927284 21769 net.cpp:165] Memory required for data: 1850983600\nI0818 15:07:31.927294 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr2_bn\nI0818 15:07:31.927304 21769 net.cpp:100] Creating Layer L2_b15_cbr2_bn\nI0818 15:07:31.927309 21769 net.cpp:434] L2_b15_cbr2_bn <- L2_b15_cbr2_conv_top\nI0818 15:07:31.927320 21769 net.cpp:408] L2_b15_cbr2_bn -> L2_b15_cbr2_bn_top\nI0818 15:07:31.927578 21769 net.cpp:150] Setting up L2_b15_cbr2_bn\nI0818 15:07:31.927592 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.927597 21769 net.cpp:165] Memory required for data: 1854260400\nI0818 15:07:31.927606 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0818 15:07:31.927618 21769 net.cpp:100] Creating Layer L2_b15_cbr2_scale\nI0818 15:07:31.927626 21769 net.cpp:434] L2_b15_cbr2_scale <- L2_b15_cbr2_bn_top\nI0818 15:07:31.927634 21769 net.cpp:395] L2_b15_cbr2_scale -> L2_b15_cbr2_bn_top (in-place)\nI0818 15:07:31.927698 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0818 15:07:31.927852 21769 net.cpp:150] Setting up L2_b15_cbr2_scale\nI0818 15:07:31.927865 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.927870 21769 net.cpp:165] Memory required for data: 1857537200\nI0818 15:07:31.927880 21769 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise\nI0818 15:07:31.927892 21769 net.cpp:100] Creating Layer L2_b15_sum_eltwise\nI0818 15:07:31.927899 21769 net.cpp:434] L2_b15_sum_eltwise <- L2_b15_cbr2_bn_top\nI0818 15:07:31.927907 21769 net.cpp:434] L2_b15_sum_eltwise <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0818 15:07:31.927914 21769 net.cpp:408] L2_b15_sum_eltwise -> L2_b15_sum_eltwise_top\nI0818 15:07:31.927944 21769 net.cpp:150] Setting up L2_b15_sum_eltwise\nI0818 15:07:31.927953 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.927958 21769 net.cpp:165] Memory required for data: 1860814000\nI0818 15:07:31.927963 21769 layer_factory.hpp:77] Creating layer L2_b15_relu\nI0818 15:07:31.927970 21769 net.cpp:100] Creating Layer L2_b15_relu\nI0818 15:07:31.927976 21769 net.cpp:434] L2_b15_relu <- L2_b15_sum_eltwise_top\nI0818 15:07:31.927986 21769 net.cpp:395] L2_b15_relu -> L2_b15_sum_eltwise_top (in-place)\nI0818 15:07:31.927996 21769 net.cpp:150] Setting up L2_b15_relu\nI0818 15:07:31.928004 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.928007 21769 net.cpp:165] Memory required for data: 1864090800\nI0818 15:07:31.928012 21769 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0818 15:07:31.928026 21769 net.cpp:100] Creating Layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0818 15:07:31.928032 21769 net.cpp:434] L2_b15_sum_eltwise_top_L2_b15_relu_0_split <- L2_b15_sum_eltwise_top\nI0818 15:07:31.928040 21769 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0818 15:07:31.928050 21769 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0818 15:07:31.928099 21769 net.cpp:150] Setting up L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0818 15:07:31.928112 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.928117 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.928122 21769 net.cpp:165] Memory required for data: 1870644400\nI0818 15:07:31.928128 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_conv\nI0818 15:07:31.928138 21769 net.cpp:100] Creating Layer L2_b16_cbr1_conv\nI0818 15:07:31.928144 21769 net.cpp:434] L2_b16_cbr1_conv <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0818 15:07:31.928160 21769 net.cpp:408] L2_b16_cbr1_conv -> L2_b16_cbr1_conv_top\nI0818 15:07:31.929653 21769 net.cpp:150] Setting up L2_b16_cbr1_conv\nI0818 15:07:31.929672 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.929677 21769 net.cpp:165] Memory required for data: 1873921200\nI0818 15:07:31.929692 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_bn\nI0818 15:07:31.929707 21769 net.cpp:100] Creating Layer L2_b16_cbr1_bn\nI0818 15:07:31.929714 21769 net.cpp:434] L2_b16_cbr1_bn <- L2_b16_cbr1_conv_top\nI0818 15:07:31.929723 21769 net.cpp:408] L2_b16_cbr1_bn -> L2_b16_cbr1_bn_top\nI0818 15:07:31.929987 21769 net.cpp:150] Setting up L2_b16_cbr1_bn\nI0818 15:07:31.930001 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.930006 21769 net.cpp:165] Memory required for data: 1877198000\nI0818 15:07:31.930016 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0818 15:07:31.930024 21769 net.cpp:100] Creating Layer L2_b16_cbr1_scale\nI0818 15:07:31.930030 21769 net.cpp:434] L2_b16_cbr1_scale <- L2_b16_cbr1_bn_top\nI0818 15:07:31.930042 21769 net.cpp:395] L2_b16_cbr1_scale -> L2_b16_cbr1_bn_top (in-place)\nI0818 15:07:31.930101 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0818 15:07:31.930260 21769 net.cpp:150] Setting up L2_b16_cbr1_scale\nI0818 15:07:31.930274 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.930279 21769 net.cpp:165] Memory required for data: 1880474800\nI0818 15:07:31.930289 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_relu\nI0818 15:07:31.930296 21769 net.cpp:100] Creating Layer L2_b16_cbr1_relu\nI0818 15:07:31.930304 21769 net.cpp:434] L2_b16_cbr1_relu <- L2_b16_cbr1_bn_top\nI0818 15:07:31.930310 21769 net.cpp:395] L2_b16_cbr1_relu -> L2_b16_cbr1_bn_top (in-place)\nI0818 15:07:31.930320 21769 net.cpp:150] Setting up L2_b16_cbr1_relu\nI0818 15:07:31.930326 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.930331 21769 net.cpp:165] Memory required for data: 1883751600\nI0818 15:07:31.930335 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr2_conv\nI0818 15:07:31.930351 21769 net.cpp:100] Creating Layer L2_b16_cbr2_conv\nI0818 15:07:31.930356 21769 net.cpp:434] L2_b16_cbr2_conv <- L2_b16_cbr1_bn_top\nI0818 15:07:31.930368 21769 net.cpp:408] L2_b16_cbr2_conv -> L2_b16_cbr2_conv_top\nI0818 15:07:31.930855 21769 net.cpp:150] Setting up L2_b16_cbr2_conv\nI0818 15:07:31.930871 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.930876 21769 net.cpp:165] Memory required for data: 1887028400\nI0818 15:07:31.930884 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr2_bn\nI0818 15:07:31.930897 21769 net.cpp:100] Creating Layer L2_b16_cbr2_bn\nI0818 15:07:31.930904 21769 net.cpp:434] L2_b16_cbr2_bn <- L2_b16_cbr2_conv_top\nI0818 15:07:31.930915 21769 net.cpp:408] L2_b16_cbr2_bn -> L2_b16_cbr2_bn_top\nI0818 15:07:31.931176 21769 net.cpp:150] Setting up L2_b16_cbr2_bn\nI0818 15:07:31.931190 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.931203 21769 net.cpp:165] Memory required for data: 1890305200\nI0818 15:07:31.931215 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0818 15:07:31.931222 21769 net.cpp:100] Creating Layer L2_b16_cbr2_scale\nI0818 15:07:31.931229 21769 net.cpp:434] L2_b16_cbr2_scale <- L2_b16_cbr2_bn_top\nI0818 15:07:31.931237 21769 net.cpp:395] L2_b16_cbr2_scale -> L2_b16_cbr2_bn_top (in-place)\nI0818 15:07:31.931298 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0818 15:07:31.931450 21769 net.cpp:150] Setting up L2_b16_cbr2_scale\nI0818 15:07:31.931468 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.931473 21769 net.cpp:165] Memory required for data: 1893582000\nI0818 15:07:31.931481 21769 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise\nI0818 15:07:31.931490 21769 net.cpp:100] Creating Layer L2_b16_sum_eltwise\nI0818 15:07:31.931496 21769 net.cpp:434] L2_b16_sum_eltwise <- L2_b16_cbr2_bn_top\nI0818 15:07:31.931504 21769 net.cpp:434] L2_b16_sum_eltwise <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0818 15:07:31.931511 21769 net.cpp:408] L2_b16_sum_eltwise -> L2_b16_sum_eltwise_top\nI0818 15:07:31.931541 21769 net.cpp:150] Setting up L2_b16_sum_eltwise\nI0818 15:07:31.931551 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.931556 21769 net.cpp:165] Memory required for data: 1896858800\nI0818 15:07:31.931561 21769 layer_factory.hpp:77] Creating layer L2_b16_relu\nI0818 15:07:31.931568 21769 net.cpp:100] Creating Layer L2_b16_relu\nI0818 15:07:31.931574 21769 net.cpp:434] L2_b16_relu <- L2_b16_sum_eltwise_top\nI0818 15:07:31.931584 21769 net.cpp:395] L2_b16_relu -> L2_b16_sum_eltwise_top (in-place)\nI0818 15:07:31.931594 21769 net.cpp:150] Setting up L2_b16_relu\nI0818 15:07:31.931602 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.931605 21769 net.cpp:165] Memory required for data: 1900135600\nI0818 15:07:31.931610 21769 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0818 15:07:31.931617 21769 net.cpp:100] Creating Layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0818 15:07:31.931623 21769 net.cpp:434] L2_b16_sum_eltwise_top_L2_b16_relu_0_split <- L2_b16_sum_eltwise_top\nI0818 15:07:31.931633 21769 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0818 15:07:31.931643 21769 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0818 15:07:31.931696 21769 net.cpp:150] Setting up L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0818 15:07:31.931710 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.931715 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.931720 21769 net.cpp:165] Memory required for data: 1906689200\nI0818 15:07:31.931725 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_conv\nI0818 15:07:31.931740 21769 net.cpp:100] Creating Layer L2_b17_cbr1_conv\nI0818 15:07:31.931747 21769 net.cpp:434] L2_b17_cbr1_conv <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0818 15:07:31.931756 21769 net.cpp:408] L2_b17_cbr1_conv -> L2_b17_cbr1_conv_top\nI0818 15:07:31.932238 21769 net.cpp:150] Setting up L2_b17_cbr1_conv\nI0818 15:07:31.932252 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.932257 21769 net.cpp:165] Memory required for data: 1909966000\nI0818 15:07:31.932266 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_bn\nI0818 15:07:31.932279 21769 net.cpp:100] Creating Layer L2_b17_cbr1_bn\nI0818 15:07:31.932286 21769 net.cpp:434] L2_b17_cbr1_bn <- L2_b17_cbr1_conv_top\nI0818 15:07:31.932294 21769 net.cpp:408] L2_b17_cbr1_bn -> L2_b17_cbr1_bn_top\nI0818 15:07:31.932557 21769 net.cpp:150] Setting up L2_b17_cbr1_bn\nI0818 15:07:31.932571 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.932576 21769 net.cpp:165] Memory required for data: 1913242800\nI0818 15:07:31.932587 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0818 15:07:31.932596 21769 net.cpp:100] Creating Layer L2_b17_cbr1_scale\nI0818 15:07:31.932610 21769 net.cpp:434] L2_b17_cbr1_scale <- L2_b17_cbr1_bn_top\nI0818 15:07:31.932617 21769 net.cpp:395] L2_b17_cbr1_scale -> L2_b17_cbr1_bn_top (in-place)\nI0818 15:07:31.932680 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0818 15:07:31.932843 21769 net.cpp:150] Setting up L2_b17_cbr1_scale\nI0818 15:07:31.932860 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.932865 21769 net.cpp:165] Memory required for data: 1916519600\nI0818 15:07:31.932874 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_relu\nI0818 15:07:31.932883 21769 net.cpp:100] Creating Layer L2_b17_cbr1_relu\nI0818 15:07:31.932888 21769 net.cpp:434] L2_b17_cbr1_relu <- L2_b17_cbr1_bn_top\nI0818 15:07:31.932896 21769 net.cpp:395] L2_b17_cbr1_relu -> L2_b17_cbr1_bn_top (in-place)\nI0818 15:07:31.932905 21769 net.cpp:150] Setting up L2_b17_cbr1_relu\nI0818 15:07:31.932912 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.932917 21769 net.cpp:165] Memory required for data: 1919796400\nI0818 15:07:31.932922 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr2_conv\nI0818 15:07:31.932936 21769 net.cpp:100] Creating Layer L2_b17_cbr2_conv\nI0818 15:07:31.932942 21769 net.cpp:434] L2_b17_cbr2_conv <- L2_b17_cbr1_bn_top\nI0818 15:07:31.932955 21769 net.cpp:408] L2_b17_cbr2_conv -> L2_b17_cbr2_conv_top\nI0818 15:07:31.933429 21769 net.cpp:150] Setting up L2_b17_cbr2_conv\nI0818 15:07:31.933442 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.933447 21769 net.cpp:165] Memory required for data: 1923073200\nI0818 15:07:31.933456 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr2_bn\nI0818 15:07:31.933468 21769 net.cpp:100] Creating Layer L2_b17_cbr2_bn\nI0818 15:07:31.933475 21769 net.cpp:434] L2_b17_cbr2_bn <- L2_b17_cbr2_conv_top\nI0818 15:07:31.933486 21769 net.cpp:408] L2_b17_cbr2_bn -> L2_b17_cbr2_bn_top\nI0818 15:07:31.933754 21769 net.cpp:150] Setting up L2_b17_cbr2_bn\nI0818 15:07:31.933768 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.933773 21769 net.cpp:165] Memory required for data: 1926350000\nI0818 15:07:31.933784 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0818 15:07:31.933792 21769 net.cpp:100] Creating Layer L2_b17_cbr2_scale\nI0818 15:07:31.933799 21769 net.cpp:434] L2_b17_cbr2_scale <- L2_b17_cbr2_bn_top\nI0818 15:07:31.933806 21769 net.cpp:395] L2_b17_cbr2_scale -> L2_b17_cbr2_bn_top (in-place)\nI0818 15:07:31.933866 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0818 15:07:31.934023 21769 net.cpp:150] Setting up L2_b17_cbr2_scale\nI0818 15:07:31.934036 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.934041 21769 net.cpp:165] Memory required for data: 1929626800\nI0818 15:07:31.934051 21769 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise\nI0818 15:07:31.934062 21769 net.cpp:100] Creating Layer L2_b17_sum_eltwise\nI0818 15:07:31.934069 21769 net.cpp:434] L2_b17_sum_eltwise <- L2_b17_cbr2_bn_top\nI0818 15:07:31.934077 21769 net.cpp:434] L2_b17_sum_eltwise <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0818 15:07:31.934084 21769 net.cpp:408] L2_b17_sum_eltwise -> L2_b17_sum_eltwise_top\nI0818 15:07:31.934111 21769 net.cpp:150] Setting up L2_b17_sum_eltwise\nI0818 15:07:31.934120 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.934125 21769 net.cpp:165] Memory required for data: 1932903600\nI0818 15:07:31.934130 21769 layer_factory.hpp:77] Creating layer L2_b17_relu\nI0818 15:07:31.934140 21769 net.cpp:100] Creating Layer L2_b17_relu\nI0818 15:07:31.934147 21769 net.cpp:434] L2_b17_relu <- L2_b17_sum_eltwise_top\nI0818 15:07:31.934154 21769 net.cpp:395] L2_b17_relu -> L2_b17_sum_eltwise_top (in-place)\nI0818 15:07:31.934164 21769 net.cpp:150] Setting up L2_b17_relu\nI0818 15:07:31.934171 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.934175 21769 net.cpp:165] Memory required for data: 1936180400\nI0818 15:07:31.934180 21769 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0818 15:07:31.934187 21769 net.cpp:100] Creating Layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0818 15:07:31.934200 21769 net.cpp:434] L2_b17_sum_eltwise_top_L2_b17_relu_0_split <- L2_b17_sum_eltwise_top\nI0818 15:07:31.934208 21769 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0818 15:07:31.934217 21769 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0818 15:07:31.934269 21769 net.cpp:150] Setting up L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0818 15:07:31.934283 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.934289 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.934293 21769 net.cpp:165] Memory required for data: 1942734000\nI0818 15:07:31.934298 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_conv\nI0818 15:07:31.934312 21769 net.cpp:100] Creating Layer L2_b18_cbr1_conv\nI0818 15:07:31.934319 21769 net.cpp:434] L2_b18_cbr1_conv <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0818 15:07:31.934329 21769 net.cpp:408] L2_b18_cbr1_conv -> L2_b18_cbr1_conv_top\nI0818 15:07:31.934859 21769 net.cpp:150] Setting up L2_b18_cbr1_conv\nI0818 15:07:31.934876 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.934881 21769 net.cpp:165] Memory required for data: 1946010800\nI0818 15:07:31.934890 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_bn\nI0818 15:07:31.934903 21769 net.cpp:100] Creating Layer L2_b18_cbr1_bn\nI0818 15:07:31.934911 21769 net.cpp:434] L2_b18_cbr1_bn <- L2_b18_cbr1_conv_top\nI0818 15:07:31.934921 21769 net.cpp:408] L2_b18_cbr1_bn -> L2_b18_cbr1_bn_top\nI0818 15:07:31.935185 21769 net.cpp:150] Setting up L2_b18_cbr1_bn\nI0818 15:07:31.935201 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.935206 21769 net.cpp:165] Memory required for data: 1949287600\nI0818 15:07:31.935217 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0818 15:07:31.935226 21769 net.cpp:100] Creating Layer L2_b18_cbr1_scale\nI0818 15:07:31.935232 21769 net.cpp:434] L2_b18_cbr1_scale <- L2_b18_cbr1_bn_top\nI0818 15:07:31.935240 21769 net.cpp:395] L2_b18_cbr1_scale -> L2_b18_cbr1_bn_top (in-place)\nI0818 15:07:31.935300 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0818 15:07:31.935457 21769 net.cpp:150] Setting up L2_b18_cbr1_scale\nI0818 15:07:31.935472 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.935477 21769 net.cpp:165] Memory required for data: 1952564400\nI0818 15:07:31.935485 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_relu\nI0818 15:07:31.935493 21769 net.cpp:100] Creating Layer L2_b18_cbr1_relu\nI0818 15:07:31.935499 21769 net.cpp:434] L2_b18_cbr1_relu <- L2_b18_cbr1_bn_top\nI0818 15:07:31.935515 21769 net.cpp:395] L2_b18_cbr1_relu -> L2_b18_cbr1_bn_top (in-place)\nI0818 15:07:31.935525 21769 net.cpp:150] Setting up L2_b18_cbr1_relu\nI0818 15:07:31.935533 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.935537 21769 net.cpp:165] Memory required for data: 1955841200\nI0818 15:07:31.935542 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr2_conv\nI0818 15:07:31.935555 21769 net.cpp:100] Creating Layer L2_b18_cbr2_conv\nI0818 15:07:31.935561 21769 net.cpp:434] L2_b18_cbr2_conv <- L2_b18_cbr1_bn_top\nI0818 15:07:31.935570 21769 net.cpp:408] L2_b18_cbr2_conv -> L2_b18_cbr2_conv_top\nI0818 15:07:31.936055 21769 net.cpp:150] Setting up L2_b18_cbr2_conv\nI0818 15:07:31.936071 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.936076 21769 net.cpp:165] Memory required for data: 1959118000\nI0818 15:07:31.936085 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr2_bn\nI0818 15:07:31.936097 21769 net.cpp:100] Creating Layer L2_b18_cbr2_bn\nI0818 15:07:31.936105 21769 net.cpp:434] L2_b18_cbr2_bn <- L2_b18_cbr2_conv_top\nI0818 15:07:31.936113 21769 net.cpp:408] L2_b18_cbr2_bn -> L2_b18_cbr2_bn_top\nI0818 15:07:31.936378 21769 net.cpp:150] Setting up L2_b18_cbr2_bn\nI0818 15:07:31.936390 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.936395 21769 net.cpp:165] Memory required for data: 1962394800\nI0818 15:07:31.936405 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0818 15:07:31.936424 21769 net.cpp:100] Creating Layer L2_b18_cbr2_scale\nI0818 15:07:31.936431 21769 net.cpp:434] L2_b18_cbr2_scale <- L2_b18_cbr2_bn_top\nI0818 15:07:31.936439 21769 net.cpp:395] L2_b18_cbr2_scale -> L2_b18_cbr2_bn_top (in-place)\nI0818 15:07:31.936497 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0818 15:07:31.936658 21769 net.cpp:150] Setting up L2_b18_cbr2_scale\nI0818 15:07:31.936671 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.936676 21769 net.cpp:165] Memory required for data: 1965671600\nI0818 15:07:31.936691 21769 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise\nI0818 15:07:31.936704 21769 net.cpp:100] Creating Layer L2_b18_sum_eltwise\nI0818 15:07:31.936712 21769 net.cpp:434] L2_b18_sum_eltwise <- L2_b18_cbr2_bn_top\nI0818 15:07:31.936718 21769 net.cpp:434] L2_b18_sum_eltwise <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0818 15:07:31.936729 21769 net.cpp:408] L2_b18_sum_eltwise -> L2_b18_sum_eltwise_top\nI0818 15:07:31.936758 21769 net.cpp:150] Setting up L2_b18_sum_eltwise\nI0818 15:07:31.936767 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.936772 21769 net.cpp:165] Memory required for data: 1968948400\nI0818 15:07:31.936777 21769 layer_factory.hpp:77] Creating layer L2_b18_relu\nI0818 15:07:31.936785 21769 net.cpp:100] Creating Layer L2_b18_relu\nI0818 15:07:31.936790 21769 net.cpp:434] L2_b18_relu <- L2_b18_sum_eltwise_top\nI0818 15:07:31.936801 21769 net.cpp:395] L2_b18_relu -> L2_b18_sum_eltwise_top (in-place)\nI0818 15:07:31.936810 21769 net.cpp:150] Setting up L2_b18_relu\nI0818 15:07:31.936817 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.936822 21769 net.cpp:165] Memory required for data: 1972225200\nI0818 15:07:31.936827 21769 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0818 15:07:31.936835 21769 net.cpp:100] Creating Layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0818 15:07:31.936839 21769 net.cpp:434] L2_b18_sum_eltwise_top_L2_b18_relu_0_split <- L2_b18_sum_eltwise_top\nI0818 15:07:31.936846 21769 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0818 15:07:31.936856 21769 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0818 15:07:31.936908 21769 net.cpp:150] Setting up L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0818 15:07:31.936919 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.936926 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:31.936930 21769 net.cpp:165] Memory required for data: 1978778800\nI0818 15:07:31.936935 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0818 15:07:31.936947 21769 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0818 15:07:31.936954 21769 net.cpp:434] L3_b1_cbr1_conv <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0818 15:07:31.936965 21769 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0818 15:07:31.937469 21769 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0818 15:07:31.937484 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.937489 21769 net.cpp:165] Memory required for data: 1979598000\nI0818 15:07:31.937572 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0818 15:07:31.937587 21769 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0818 15:07:31.937593 21769 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0818 15:07:31.937602 21769 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0818 15:07:31.937888 21769 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0818 15:07:31.937903 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.937908 21769 net.cpp:165] Memory required for data: 1980417200\nI0818 15:07:31.937918 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 15:07:31.937927 21769 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0818 15:07:31.937933 21769 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0818 15:07:31.937942 21769 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0818 15:07:31.938011 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 15:07:31.938179 21769 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0818 15:07:31.938191 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.938196 21769 net.cpp:165] Memory required for data: 1981236400\nI0818 15:07:31.938205 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0818 15:07:31.938213 21769 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0818 15:07:31.938220 21769 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0818 15:07:31.938227 21769 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0818 15:07:31.938239 21769 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0818 15:07:31.938247 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.938251 21769 net.cpp:165] Memory required for data: 1982055600\nI0818 15:07:31.938256 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0818 15:07:31.938267 21769 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0818 15:07:31.938277 21769 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0818 15:07:31.938284 21769 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0818 15:07:31.938786 21769 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0818 15:07:31.938802 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.938807 21769 net.cpp:165] Memory required for data: 1982874800\nI0818 15:07:31.938815 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0818 15:07:31.938827 21769 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0818 15:07:31.938834 21769 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0818 15:07:31.938843 21769 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0818 15:07:31.939116 21769 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0818 15:07:31.939131 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.939136 21769 net.cpp:165] Memory required for data: 1983694000\nI0818 15:07:31.939146 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 15:07:31.939160 21769 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0818 15:07:31.939167 21769 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0818 15:07:31.939175 21769 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0818 15:07:31.939235 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 15:07:31.939395 21769 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0818 15:07:31.939409 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.939414 21769 net.cpp:165] Memory required for data: 1984513200\nI0818 15:07:31.939424 21769 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0818 15:07:31.939435 21769 net.cpp:100] Creating Layer L3_b1_pool\nI0818 15:07:31.939442 21769 net.cpp:434] L3_b1_pool <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0818 15:07:31.939455 21769 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0818 15:07:31.939491 21769 net.cpp:150] Setting up L3_b1_pool\nI0818 15:07:31.939501 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.939505 21769 net.cpp:165] Memory required for data: 1985332400\nI0818 15:07:31.939510 21769 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0818 15:07:31.939522 21769 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0818 15:07:31.939528 21769 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0818 15:07:31.939535 21769 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0818 15:07:31.939543 21769 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0818 15:07:31.939576 21769 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0818 15:07:31.939585 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.939589 21769 net.cpp:165] Memory required for data: 1986151600\nI0818 15:07:31.939594 21769 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0818 15:07:31.939602 21769 net.cpp:100] Creating Layer L3_b1_relu\nI0818 15:07:31.939607 21769 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0818 15:07:31.939620 21769 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0818 15:07:31.939630 21769 net.cpp:150] Setting up L3_b1_relu\nI0818 15:07:31.939636 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.939641 21769 net.cpp:165] Memory required for data: 1986970800\nI0818 15:07:31.939653 21769 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0818 15:07:31.939663 21769 net.cpp:100] Creating Layer L3_b1_zeros\nI0818 15:07:31.939671 21769 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0818 15:07:31.940887 21769 net.cpp:150] Setting up L3_b1_zeros\nI0818 15:07:31.940907 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:31.940912 21769 net.cpp:165] Memory required for data: 1987790000\nI0818 15:07:31.940917 21769 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0818 15:07:31.940930 21769 net.cpp:100] Creating Layer L3_b1_concat0\nI0818 15:07:31.940937 21769 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0818 15:07:31.940945 21769 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0818 15:07:31.940953 21769 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0818 15:07:31.940996 21769 net.cpp:150] Setting up L3_b1_concat0\nI0818 15:07:31.941009 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.941015 21769 net.cpp:165] Memory required for data: 1989428400\nI0818 15:07:31.941020 21769 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:31.941027 21769 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:31.941033 21769 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0818 15:07:31.941043 21769 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 15:07:31.941053 21769 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 15:07:31.941104 21769 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:31.941119 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.941126 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.941131 21769 net.cpp:165] Memory required for data: 1992705200\nI0818 15:07:31.941136 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0818 15:07:31.941148 21769 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0818 15:07:31.941155 21769 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 15:07:31.941164 21769 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0818 15:07:31.943181 21769 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0818 15:07:31.943199 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.943204 21769 net.cpp:165] Memory required for data: 1994343600\nI0818 15:07:31.943214 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0818 15:07:31.943226 21769 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0818 15:07:31.943234 21769 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0818 15:07:31.943245 21769 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0818 15:07:31.943519 21769 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0818 15:07:31.943533 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.943538 21769 net.cpp:165] Memory required for data: 1995982000\nI0818 15:07:31.943548 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 15:07:31.943557 21769 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0818 15:07:31.943564 21769 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0818 15:07:31.943575 21769 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0818 15:07:31.943637 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 15:07:31.943806 21769 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0818 15:07:31.943820 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.943826 21769 net.cpp:165] Memory required for data: 1997620400\nI0818 15:07:31.943835 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0818 15:07:31.943843 21769 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0818 15:07:31.943850 21769 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0818 15:07:31.943861 21769 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0818 15:07:31.943871 21769 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0818 15:07:31.943877 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.943882 21769 net.cpp:165] Memory required for data: 1999258800\nI0818 15:07:31.943895 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0818 15:07:31.943912 21769 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0818 15:07:31.943917 21769 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0818 15:07:31.943927 21769 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0818 15:07:31.944959 21769 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0818 15:07:31.944974 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.944979 21769 net.cpp:165] Memory required for data: 2000897200\nI0818 15:07:31.944989 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0818 15:07:31.945001 21769 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0818 15:07:31.945008 21769 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0818 15:07:31.945016 21769 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0818 15:07:31.945292 21769 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0818 15:07:31.945307 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.945312 21769 net.cpp:165] Memory required for data: 2002535600\nI0818 15:07:31.945322 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 15:07:31.945333 21769 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0818 15:07:31.945341 21769 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0818 15:07:31.945351 21769 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0818 15:07:31.945411 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 15:07:31.945574 21769 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0818 15:07:31.945586 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.945592 21769 net.cpp:165] Memory required for data: 2004174000\nI0818 15:07:31.945601 21769 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0818 15:07:31.945611 21769 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0818 15:07:31.945616 21769 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0818 15:07:31.945623 21769 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 15:07:31.945634 21769 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0818 15:07:31.945668 21769 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0818 15:07:31.945677 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.945688 21769 net.cpp:165] Memory required for data: 2005812400\nI0818 15:07:31.945694 21769 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0818 15:07:31.945708 21769 net.cpp:100] Creating Layer L3_b2_relu\nI0818 15:07:31.945713 21769 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0818 15:07:31.945721 21769 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0818 15:07:31.945730 21769 net.cpp:150] Setting up L3_b2_relu\nI0818 15:07:31.945737 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.945742 21769 net.cpp:165] Memory required for data: 2007450800\nI0818 15:07:31.945747 21769 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:31.945755 21769 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:31.945760 21769 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0818 15:07:31.945767 21769 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 15:07:31.945776 21769 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 15:07:31.945827 21769 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:31.945838 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.945845 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.945850 21769 net.cpp:165] Memory required for data: 2010727600\nI0818 15:07:31.945855 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0818 15:07:31.945869 21769 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0818 15:07:31.945876 21769 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 15:07:31.945885 21769 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0818 15:07:31.946924 21769 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0818 15:07:31.946939 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.946944 21769 net.cpp:165] Memory required for data: 2012366000\nI0818 15:07:31.946954 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0818 15:07:31.946965 21769 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0818 15:07:31.946972 21769 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0818 15:07:31.946985 21769 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0818 15:07:31.947254 21769 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0818 15:07:31.947268 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.947273 21769 net.cpp:165] Memory required for data: 2014004400\nI0818 15:07:31.947283 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 15:07:31.947291 21769 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0818 15:07:31.947299 21769 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0818 15:07:31.947310 21769 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0818 15:07:31.947376 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 15:07:31.947538 21769 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0818 15:07:31.947552 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.947557 21769 net.cpp:165] Memory required for data: 2015642800\nI0818 15:07:31.947566 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0818 15:07:31.947574 21769 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0818 15:07:31.947580 21769 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0818 15:07:31.947590 21769 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0818 15:07:31.947602 21769 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0818 15:07:31.947608 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.947613 21769 net.cpp:165] Memory required for data: 2017281200\nI0818 15:07:31.947618 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0818 15:07:31.947633 21769 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0818 15:07:31.947638 21769 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0818 15:07:31.947648 21769 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0818 15:07:31.948837 21769 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0818 15:07:31.948855 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.948860 21769 net.cpp:165] Memory required for data: 2018919600\nI0818 15:07:31.948869 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0818 15:07:31.948879 21769 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0818 15:07:31.948886 21769 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0818 15:07:31.948899 21769 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0818 15:07:31.949167 21769 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0818 15:07:31.949180 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.949185 21769 net.cpp:165] Memory required for data: 2020558000\nI0818 15:07:31.949195 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 15:07:31.949208 21769 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0818 15:07:31.949214 21769 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0818 15:07:31.949223 21769 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0818 15:07:31.949282 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 15:07:31.949450 21769 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0818 15:07:31.949465 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.949470 21769 net.cpp:165] Memory required for data: 2022196400\nI0818 15:07:31.949478 21769 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0818 15:07:31.949487 21769 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0818 15:07:31.949494 21769 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0818 15:07:31.949501 21769 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 15:07:31.949512 21769 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0818 15:07:31.949548 21769 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0818 15:07:31.949556 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.949568 21769 net.cpp:165] Memory required for data: 2023834800\nI0818 15:07:31.949574 21769 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0818 15:07:31.949589 21769 net.cpp:100] Creating Layer L3_b3_relu\nI0818 15:07:31.949594 21769 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0818 15:07:31.949602 21769 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0818 15:07:31.949611 21769 net.cpp:150] Setting up L3_b3_relu\nI0818 15:07:31.949618 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.949623 21769 net.cpp:165] Memory required for data: 2025473200\nI0818 15:07:31.949628 21769 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:31.949635 21769 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:31.949640 21769 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0818 15:07:31.949648 21769 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 15:07:31.949657 21769 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 15:07:31.949715 21769 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:31.949729 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.949735 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.949740 21769 net.cpp:165] Memory required for data: 2028750000\nI0818 15:07:31.949745 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0818 15:07:31.949759 21769 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0818 15:07:31.949766 21769 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 15:07:31.949775 21769 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0818 15:07:31.950801 21769 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0818 15:07:31.950816 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.950821 21769 net.cpp:165] Memory required for data: 2030388400\nI0818 15:07:31.950831 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0818 15:07:31.950844 21769 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0818 15:07:31.950850 21769 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0818 15:07:31.950861 21769 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0818 15:07:31.951133 21769 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0818 15:07:31.951145 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.951150 21769 net.cpp:165] Memory required for data: 2032026800\nI0818 15:07:31.951161 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 15:07:31.951170 21769 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0818 15:07:31.951176 21769 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0818 15:07:31.951187 21769 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0818 15:07:31.951248 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 15:07:31.951407 21769 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0818 15:07:31.951421 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.951426 21769 net.cpp:165] Memory required for data: 2033665200\nI0818 15:07:31.951434 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0818 15:07:31.951442 21769 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0818 15:07:31.951448 21769 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0818 15:07:31.951458 21769 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0818 15:07:31.951469 21769 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0818 15:07:31.951477 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.951480 21769 net.cpp:165] Memory required for data: 2035303600\nI0818 15:07:31.951485 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0818 15:07:31.951499 21769 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0818 15:07:31.951506 21769 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0818 15:07:31.951517 21769 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0818 15:07:31.952558 21769 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0818 15:07:31.952579 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.952585 21769 net.cpp:165] Memory required for data: 2036942000\nI0818 15:07:31.952594 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0818 15:07:31.952603 21769 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0818 15:07:31.952610 21769 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0818 15:07:31.952623 21769 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0818 15:07:31.952908 21769 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0818 15:07:31.952926 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.952931 21769 net.cpp:165] Memory required for data: 2038580400\nI0818 15:07:31.952942 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 15:07:31.952950 21769 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0818 15:07:31.952957 21769 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0818 15:07:31.952965 21769 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0818 15:07:31.953025 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 15:07:31.953184 21769 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0818 15:07:31.953198 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.953203 21769 net.cpp:165] Memory required for data: 2040218800\nI0818 15:07:31.953212 21769 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0818 15:07:31.953223 21769 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0818 15:07:31.953230 21769 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0818 15:07:31.953238 21769 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 15:07:31.953245 21769 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0818 15:07:31.953281 21769 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0818 15:07:31.953291 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.953295 21769 net.cpp:165] Memory required for data: 2041857200\nI0818 15:07:31.953301 21769 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0818 15:07:31.953308 21769 net.cpp:100] Creating Layer L3_b4_relu\nI0818 15:07:31.953315 21769 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0818 15:07:31.953321 21769 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0818 15:07:31.953330 21769 net.cpp:150] Setting up L3_b4_relu\nI0818 15:07:31.953337 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.953341 21769 net.cpp:165] Memory required for data: 2043495600\nI0818 15:07:31.953346 21769 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:31.953353 21769 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:31.953359 21769 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0818 15:07:31.953369 21769 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 15:07:31.953379 21769 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 15:07:31.953426 21769 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:31.953438 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.953445 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.953449 21769 net.cpp:165] Memory required for data: 2046772400\nI0818 15:07:31.953454 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0818 15:07:31.953469 21769 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0818 15:07:31.953475 21769 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 15:07:31.953485 21769 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0818 15:07:31.954514 21769 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0818 15:07:31.954529 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.954533 21769 net.cpp:165] Memory required for data: 2048410800\nI0818 15:07:31.954542 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0818 15:07:31.954552 21769 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0818 15:07:31.954567 21769 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0818 15:07:31.954577 21769 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0818 15:07:31.954859 21769 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0818 15:07:31.954872 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.954877 21769 net.cpp:165] Memory required for data: 2050049200\nI0818 15:07:31.954888 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 15:07:31.954897 21769 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0818 15:07:31.954903 21769 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0818 15:07:31.954915 21769 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0818 15:07:31.954978 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 15:07:31.955142 21769 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0818 15:07:31.955154 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.955159 21769 net.cpp:165] Memory required for data: 2051687600\nI0818 15:07:31.955168 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0818 15:07:31.955179 21769 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0818 15:07:31.955186 21769 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0818 15:07:31.955193 21769 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0818 15:07:31.955204 21769 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0818 15:07:31.955210 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.955214 21769 net.cpp:165] Memory required for data: 2053326000\nI0818 15:07:31.955219 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0818 15:07:31.955235 21769 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0818 15:07:31.955241 21769 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0818 15:07:31.955252 21769 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0818 15:07:31.957274 21769 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0818 15:07:31.957293 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.957298 21769 net.cpp:165] Memory required for data: 2054964400\nI0818 15:07:31.957309 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0818 15:07:31.957320 21769 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0818 15:07:31.957329 21769 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0818 15:07:31.957340 21769 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0818 15:07:31.957612 21769 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0818 15:07:31.957624 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.957629 21769 net.cpp:165] Memory required for data: 2056602800\nI0818 15:07:31.957660 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 15:07:31.957671 21769 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0818 15:07:31.957679 21769 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0818 15:07:31.957695 21769 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0818 15:07:31.957762 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 15:07:31.957928 21769 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0818 15:07:31.957942 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.957947 21769 net.cpp:165] Memory required for data: 2058241200\nI0818 15:07:31.957957 21769 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0818 15:07:31.957969 21769 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0818 15:07:31.957976 21769 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0818 15:07:31.957983 21769 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 15:07:31.957991 21769 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0818 15:07:31.958029 21769 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0818 15:07:31.958041 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.958046 21769 net.cpp:165] Memory required for data: 2059879600\nI0818 15:07:31.958051 21769 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0818 15:07:31.958060 21769 net.cpp:100] Creating Layer L3_b5_relu\nI0818 15:07:31.958065 21769 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0818 15:07:31.958076 21769 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0818 15:07:31.958093 21769 net.cpp:150] Setting up L3_b5_relu\nI0818 15:07:31.958101 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.958106 21769 net.cpp:165] Memory required for data: 2061518000\nI0818 15:07:31.958111 21769 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:31.958120 21769 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:31.958125 21769 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0818 15:07:31.958133 21769 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 15:07:31.958142 21769 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 15:07:31.958192 21769 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:31.958204 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.958211 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.958216 21769 net.cpp:165] Memory required for data: 2064794800\nI0818 15:07:31.958221 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0818 15:07:31.958232 21769 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0818 15:07:31.958240 21769 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 15:07:31.958251 21769 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0818 15:07:31.959283 21769 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0818 15:07:31.959300 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.959305 21769 net.cpp:165] Memory required for data: 2066433200\nI0818 15:07:31.959313 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0818 15:07:31.959326 21769 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0818 15:07:31.959332 21769 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0818 15:07:31.959342 21769 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0818 15:07:31.959617 21769 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0818 15:07:31.959631 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.959636 21769 net.cpp:165] Memory required for data: 2068071600\nI0818 15:07:31.959646 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 15:07:31.959656 21769 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0818 15:07:31.959661 21769 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0818 15:07:31.959669 21769 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0818 15:07:31.959739 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 15:07:31.959902 21769 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0818 15:07:31.959919 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.959925 21769 net.cpp:165] Memory required for data: 2069710000\nI0818 15:07:31.959934 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0818 15:07:31.959942 21769 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0818 15:07:31.959949 21769 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0818 15:07:31.959956 21769 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0818 15:07:31.959965 21769 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0818 15:07:31.959972 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.959977 21769 net.cpp:165] Memory required for data: 2071348400\nI0818 15:07:31.959982 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0818 15:07:31.960000 21769 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0818 15:07:31.960006 21769 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0818 15:07:31.960016 21769 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0818 15:07:31.961046 21769 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0818 15:07:31.961061 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.961066 21769 net.cpp:165] Memory required for data: 2072986800\nI0818 15:07:31.961076 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0818 15:07:31.961087 21769 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0818 15:07:31.961094 21769 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0818 15:07:31.961109 21769 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0818 15:07:31.961382 21769 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0818 15:07:31.961396 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.961401 21769 net.cpp:165] Memory required for data: 2074625200\nI0818 15:07:31.961411 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 15:07:31.961421 21769 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0818 15:07:31.961426 21769 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0818 15:07:31.961438 21769 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0818 15:07:31.961500 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 15:07:31.961663 21769 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0818 15:07:31.961676 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.961681 21769 net.cpp:165] Memory required for data: 2076263600\nI0818 15:07:31.961696 21769 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0818 15:07:31.961709 21769 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0818 15:07:31.961716 21769 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0818 15:07:31.961724 21769 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 15:07:31.961731 21769 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0818 15:07:31.961768 21769 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0818 15:07:31.961781 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.961786 21769 net.cpp:165] Memory required for data: 2077902000\nI0818 15:07:31.961791 21769 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0818 15:07:31.961800 21769 net.cpp:100] Creating Layer L3_b6_relu\nI0818 15:07:31.961805 21769 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0818 15:07:31.961817 21769 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0818 15:07:31.961827 21769 net.cpp:150] Setting up L3_b6_relu\nI0818 15:07:31.961833 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.961838 21769 net.cpp:165] Memory required for data: 2079540400\nI0818 15:07:31.961843 21769 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:31.961849 21769 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:31.961855 21769 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0818 15:07:31.961863 21769 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 15:07:31.961872 21769 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 15:07:31.961921 21769 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:31.961933 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.961941 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.961944 21769 net.cpp:165] Memory required for data: 2082817200\nI0818 15:07:31.961949 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0818 15:07:31.961961 21769 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0818 15:07:31.961966 21769 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 15:07:31.961978 21769 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0818 15:07:31.963008 21769 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0818 15:07:31.963023 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.963028 21769 net.cpp:165] Memory required for data: 2084455600\nI0818 15:07:31.963037 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0818 15:07:31.963049 21769 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0818 15:07:31.963057 21769 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0818 15:07:31.963065 21769 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0818 15:07:31.963349 21769 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0818 15:07:31.963362 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.963367 21769 net.cpp:165] Memory required for data: 2086094000\nI0818 15:07:31.963385 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 15:07:31.963394 21769 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0818 15:07:31.963402 21769 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0818 15:07:31.963408 21769 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0818 15:07:31.963472 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 15:07:31.963639 21769 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0818 15:07:31.963652 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.963657 21769 net.cpp:165] Memory required for data: 2087732400\nI0818 15:07:31.963666 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0818 15:07:31.963675 21769 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0818 15:07:31.963680 21769 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0818 15:07:31.963695 21769 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0818 15:07:31.963703 21769 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0818 15:07:31.963711 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.963716 21769 net.cpp:165] Memory required for data: 2089370800\nI0818 15:07:31.963721 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0818 15:07:31.963734 21769 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0818 15:07:31.963742 21769 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0818 15:07:31.963752 21769 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0818 15:07:31.964798 21769 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0818 15:07:31.964813 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.964818 21769 net.cpp:165] Memory required for data: 2091009200\nI0818 15:07:31.964828 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0818 15:07:31.964840 21769 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0818 15:07:31.964848 21769 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0818 15:07:31.964855 21769 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0818 15:07:31.965127 21769 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0818 15:07:31.965142 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.965147 21769 net.cpp:165] Memory required for data: 2092647600\nI0818 15:07:31.965157 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 15:07:31.965168 21769 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0818 15:07:31.965175 21769 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0818 15:07:31.965183 21769 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0818 15:07:31.965250 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 15:07:31.965412 21769 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0818 15:07:31.965426 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.965431 21769 net.cpp:165] Memory required for data: 2094286000\nI0818 15:07:31.965440 21769 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0818 15:07:31.965452 21769 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0818 15:07:31.965459 21769 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0818 15:07:31.965466 21769 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 15:07:31.965474 21769 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0818 15:07:31.965510 21769 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0818 15:07:31.965519 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.965524 21769 net.cpp:165] Memory required for data: 2095924400\nI0818 15:07:31.965530 21769 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0818 15:07:31.965538 21769 net.cpp:100] Creating Layer L3_b7_relu\nI0818 15:07:31.965543 21769 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0818 15:07:31.965553 21769 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0818 15:07:31.965564 21769 net.cpp:150] Setting up L3_b7_relu\nI0818 15:07:31.965570 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.965575 21769 net.cpp:165] Memory required for data: 2097562800\nI0818 15:07:31.965580 21769 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:31.965595 21769 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:31.965600 21769 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0818 15:07:31.965608 21769 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 15:07:31.965618 21769 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 15:07:31.965670 21769 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:31.965687 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.965695 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.965700 21769 net.cpp:165] Memory required for data: 2100839600\nI0818 15:07:31.965705 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0818 15:07:31.965719 21769 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0818 15:07:31.965728 21769 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 15:07:31.965736 21769 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0818 15:07:31.966758 21769 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0818 15:07:31.966773 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.966778 21769 net.cpp:165] Memory required for data: 2102478000\nI0818 15:07:31.966789 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0818 15:07:31.966800 21769 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0818 15:07:31.966807 21769 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0818 15:07:31.966816 21769 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0818 15:07:31.967089 21769 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0818 15:07:31.967103 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.967108 21769 net.cpp:165] Memory required for data: 2104116400\nI0818 15:07:31.967118 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 15:07:31.967126 21769 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0818 15:07:31.967133 21769 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0818 15:07:31.967140 21769 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0818 15:07:31.967206 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 15:07:31.967368 21769 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0818 15:07:31.967382 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.967387 21769 net.cpp:165] Memory required for data: 2105754800\nI0818 15:07:31.967396 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0818 15:07:31.967404 21769 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0818 15:07:31.967411 21769 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0818 15:07:31.967422 21769 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0818 15:07:31.967432 21769 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0818 15:07:31.967438 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.967443 21769 net.cpp:165] Memory required for data: 2107393200\nI0818 15:07:31.967447 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0818 15:07:31.967465 21769 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0818 15:07:31.967471 21769 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0818 15:07:31.967480 21769 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0818 15:07:31.968513 21769 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0818 15:07:31.968528 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.968533 21769 net.cpp:165] Memory required for data: 2109031600\nI0818 15:07:31.968541 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0818 15:07:31.968554 21769 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0818 15:07:31.968561 21769 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0818 15:07:31.968569 21769 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0818 15:07:31.968848 21769 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0818 15:07:31.968863 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.968868 21769 net.cpp:165] Memory required for data: 2110670000\nI0818 15:07:31.968878 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 15:07:31.968896 21769 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0818 15:07:31.968904 21769 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0818 15:07:31.968911 21769 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0818 15:07:31.968974 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 15:07:31.969139 21769 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0818 15:07:31.969152 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.969157 21769 net.cpp:165] Memory required for data: 2112308400\nI0818 15:07:31.969166 21769 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0818 15:07:31.969179 21769 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0818 15:07:31.969187 21769 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0818 15:07:31.969194 21769 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 15:07:31.969205 21769 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0818 15:07:31.969239 21769 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0818 15:07:31.969251 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.969255 21769 net.cpp:165] Memory required for data: 2113946800\nI0818 15:07:31.969261 21769 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0818 15:07:31.969272 21769 net.cpp:100] Creating Layer L3_b8_relu\nI0818 15:07:31.969280 21769 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0818 15:07:31.969286 21769 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0818 15:07:31.969295 21769 net.cpp:150] Setting up L3_b8_relu\nI0818 15:07:31.969302 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.969307 21769 net.cpp:165] Memory required for data: 2115585200\nI0818 15:07:31.969312 21769 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:31.969319 21769 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:31.969324 21769 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0818 15:07:31.969331 21769 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 15:07:31.969341 21769 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 15:07:31.969391 21769 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:31.969403 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.969409 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.969414 21769 net.cpp:165] Memory required for data: 2118862000\nI0818 15:07:31.969419 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0818 15:07:31.969434 21769 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0818 15:07:31.969440 21769 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 15:07:31.969449 21769 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0818 15:07:31.971485 21769 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0818 15:07:31.971506 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.971513 21769 net.cpp:165] Memory required for data: 2120500400\nI0818 15:07:31.971521 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0818 15:07:31.971532 21769 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0818 15:07:31.971539 21769 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0818 15:07:31.971550 21769 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0818 15:07:31.971837 21769 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0818 15:07:31.971850 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.971855 21769 net.cpp:165] Memory required for data: 2122138800\nI0818 15:07:31.971866 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 15:07:31.971879 21769 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0818 15:07:31.971885 21769 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0818 15:07:31.971894 21769 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0818 15:07:31.971956 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 15:07:31.972131 21769 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0818 15:07:31.972146 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.972151 21769 net.cpp:165] Memory required for data: 2123777200\nI0818 15:07:31.972159 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0818 15:07:31.972167 21769 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0818 15:07:31.972173 21769 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0818 15:07:31.972184 21769 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0818 15:07:31.972194 21769 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0818 15:07:31.972201 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.972206 21769 net.cpp:165] Memory required for data: 2125415600\nI0818 15:07:31.972211 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0818 15:07:31.972224 21769 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0818 15:07:31.972231 21769 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0818 15:07:31.972240 21769 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0818 15:07:31.973332 21769 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0818 15:07:31.973348 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.973353 21769 net.cpp:165] Memory required for data: 2127054000\nI0818 15:07:31.973363 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0818 15:07:31.973376 21769 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0818 15:07:31.973383 21769 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0818 15:07:31.973392 21769 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0818 15:07:31.973670 21769 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0818 15:07:31.973688 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.973695 21769 net.cpp:165] Memory required for data: 2128692400\nI0818 15:07:31.973704 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 15:07:31.973713 21769 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0818 15:07:31.973721 21769 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0818 15:07:31.973727 21769 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0818 15:07:31.973791 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 15:07:31.973958 21769 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0818 15:07:31.973970 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.973976 21769 net.cpp:165] Memory required for data: 2130330800\nI0818 15:07:31.973985 21769 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0818 15:07:31.973994 21769 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0818 15:07:31.974001 21769 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0818 15:07:31.974009 21769 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 15:07:31.974020 21769 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0818 15:07:31.974054 21769 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0818 15:07:31.974067 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.974072 21769 net.cpp:165] Memory required for data: 2131969200\nI0818 15:07:31.974077 21769 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0818 15:07:31.974086 21769 net.cpp:100] Creating Layer L3_b9_relu\nI0818 15:07:31.974092 21769 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0818 15:07:31.974098 21769 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0818 15:07:31.974107 21769 net.cpp:150] Setting up L3_b9_relu\nI0818 15:07:31.974114 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.974119 21769 net.cpp:165] Memory required for data: 2133607600\nI0818 15:07:31.974123 21769 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0818 15:07:31.974133 21769 net.cpp:100] Creating Layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0818 15:07:31.974140 21769 net.cpp:434] L3_b9_sum_eltwise_top_L3_b9_relu_0_split <- L3_b9_sum_eltwise_top\nI0818 15:07:31.974148 21769 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0818 15:07:31.974158 21769 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0818 15:07:31.974218 21769 net.cpp:150] Setting up L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0818 15:07:31.974231 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.974237 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.974242 21769 net.cpp:165] Memory required for data: 2136884400\nI0818 15:07:31.974247 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_conv\nI0818 15:07:31.974258 21769 net.cpp:100] Creating Layer L3_b10_cbr1_conv\nI0818 15:07:31.974264 21769 net.cpp:434] L3_b10_cbr1_conv <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0818 15:07:31.974277 21769 net.cpp:408] L3_b10_cbr1_conv -> L3_b10_cbr1_conv_top\nI0818 15:07:31.975304 21769 net.cpp:150] Setting up L3_b10_cbr1_conv\nI0818 15:07:31.975319 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.975324 21769 net.cpp:165] Memory required for data: 2138522800\nI0818 15:07:31.975334 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_bn\nI0818 15:07:31.975343 21769 net.cpp:100] Creating Layer L3_b10_cbr1_bn\nI0818 15:07:31.975349 21769 net.cpp:434] L3_b10_cbr1_bn <- L3_b10_cbr1_conv_top\nI0818 15:07:31.975360 21769 net.cpp:408] L3_b10_cbr1_bn -> L3_b10_cbr1_bn_top\nI0818 15:07:31.976649 21769 net.cpp:150] Setting up L3_b10_cbr1_bn\nI0818 15:07:31.976666 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.976671 21769 net.cpp:165] Memory required for data: 2140161200\nI0818 15:07:31.976688 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0818 15:07:31.976699 21769 net.cpp:100] Creating Layer L3_b10_cbr1_scale\nI0818 15:07:31.976706 21769 net.cpp:434] L3_b10_cbr1_scale <- L3_b10_cbr1_bn_top\nI0818 15:07:31.976713 21769 net.cpp:395] L3_b10_cbr1_scale -> L3_b10_cbr1_bn_top (in-place)\nI0818 15:07:31.976781 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0818 15:07:31.976944 21769 net.cpp:150] Setting up L3_b10_cbr1_scale\nI0818 15:07:31.976958 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.976963 21769 net.cpp:165] Memory required for data: 2141799600\nI0818 15:07:31.976972 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_relu\nI0818 15:07:31.976980 21769 net.cpp:100] Creating Layer L3_b10_cbr1_relu\nI0818 15:07:31.976986 21769 net.cpp:434] L3_b10_cbr1_relu <- L3_b10_cbr1_bn_top\nI0818 15:07:31.976994 21769 net.cpp:395] L3_b10_cbr1_relu -> L3_b10_cbr1_bn_top (in-place)\nI0818 15:07:31.977006 21769 net.cpp:150] Setting up L3_b10_cbr1_relu\nI0818 15:07:31.977013 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.977018 21769 net.cpp:165] Memory required for data: 2143438000\nI0818 15:07:31.977022 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr2_conv\nI0818 15:07:31.977035 21769 net.cpp:100] Creating Layer L3_b10_cbr2_conv\nI0818 15:07:31.977043 21769 net.cpp:434] L3_b10_cbr2_conv <- L3_b10_cbr1_bn_top\nI0818 15:07:31.977052 21769 net.cpp:408] L3_b10_cbr2_conv -> L3_b10_cbr2_conv_top\nI0818 15:07:31.978085 21769 net.cpp:150] Setting up L3_b10_cbr2_conv\nI0818 15:07:31.978101 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.978106 21769 net.cpp:165] Memory required for data: 2145076400\nI0818 15:07:31.978114 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr2_bn\nI0818 15:07:31.978127 21769 net.cpp:100] Creating Layer L3_b10_cbr2_bn\nI0818 15:07:31.978134 21769 net.cpp:434] L3_b10_cbr2_bn <- L3_b10_cbr2_conv_top\nI0818 15:07:31.978142 21769 net.cpp:408] L3_b10_cbr2_bn -> L3_b10_cbr2_bn_top\nI0818 15:07:31.978412 21769 net.cpp:150] Setting up L3_b10_cbr2_bn\nI0818 15:07:31.978425 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.978430 21769 net.cpp:165] Memory required for data: 2146714800\nI0818 15:07:31.978441 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0818 15:07:31.978453 21769 net.cpp:100] Creating Layer L3_b10_cbr2_scale\nI0818 15:07:31.978461 21769 net.cpp:434] L3_b10_cbr2_scale <- L3_b10_cbr2_bn_top\nI0818 15:07:31.978468 21769 net.cpp:395] L3_b10_cbr2_scale -> L3_b10_cbr2_bn_top (in-place)\nI0818 15:07:31.978534 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0818 15:07:31.978713 21769 net.cpp:150] Setting up L3_b10_cbr2_scale\nI0818 15:07:31.978727 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.978734 21769 net.cpp:165] Memory required for data: 2148353200\nI0818 15:07:31.978742 21769 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise\nI0818 15:07:31.978754 21769 net.cpp:100] Creating Layer L3_b10_sum_eltwise\nI0818 15:07:31.978761 21769 net.cpp:434] L3_b10_sum_eltwise <- L3_b10_cbr2_bn_top\nI0818 15:07:31.978770 21769 net.cpp:434] L3_b10_sum_eltwise <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0818 15:07:31.978780 21769 net.cpp:408] L3_b10_sum_eltwise -> L3_b10_sum_eltwise_top\nI0818 15:07:31.978814 21769 net.cpp:150] Setting up L3_b10_sum_eltwise\nI0818 15:07:31.978826 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.978832 21769 net.cpp:165] Memory required for data: 2149991600\nI0818 15:07:31.978837 21769 layer_factory.hpp:77] Creating layer L3_b10_relu\nI0818 15:07:31.978848 21769 net.cpp:100] Creating Layer L3_b10_relu\nI0818 15:07:31.978854 21769 net.cpp:434] L3_b10_relu <- L3_b10_sum_eltwise_top\nI0818 15:07:31.978862 21769 net.cpp:395] L3_b10_relu -> L3_b10_sum_eltwise_top (in-place)\nI0818 15:07:31.978870 21769 net.cpp:150] Setting up L3_b10_relu\nI0818 15:07:31.978878 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.978883 21769 net.cpp:165] Memory required for data: 2151630000\nI0818 15:07:31.978888 21769 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0818 15:07:31.978894 21769 net.cpp:100] Creating Layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0818 15:07:31.978899 21769 net.cpp:434] L3_b10_sum_eltwise_top_L3_b10_relu_0_split <- L3_b10_sum_eltwise_top\nI0818 15:07:31.978906 21769 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0818 15:07:31.978916 21769 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0818 15:07:31.978966 21769 net.cpp:150] Setting up L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0818 15:07:31.978978 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.978984 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.978989 21769 net.cpp:165] Memory required for data: 2154906800\nI0818 15:07:31.978994 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_conv\nI0818 15:07:31.979008 21769 net.cpp:100] Creating Layer L3_b11_cbr1_conv\nI0818 15:07:31.979014 21769 net.cpp:434] L3_b11_cbr1_conv <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0818 15:07:31.979024 21769 net.cpp:408] L3_b11_cbr1_conv -> L3_b11_cbr1_conv_top\nI0818 15:07:31.980048 21769 net.cpp:150] Setting up L3_b11_cbr1_conv\nI0818 15:07:31.980063 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.980068 21769 net.cpp:165] Memory required for data: 2156545200\nI0818 15:07:31.980077 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_bn\nI0818 15:07:31.980089 21769 net.cpp:100] Creating Layer L3_b11_cbr1_bn\nI0818 15:07:31.980096 21769 net.cpp:434] L3_b11_cbr1_bn <- L3_b11_cbr1_conv_top\nI0818 15:07:31.980108 21769 net.cpp:408] L3_b11_cbr1_bn -> L3_b11_cbr1_bn_top\nI0818 15:07:31.980376 21769 net.cpp:150] Setting up L3_b11_cbr1_bn\nI0818 15:07:31.980389 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.980394 21769 net.cpp:165] Memory required for data: 2158183600\nI0818 15:07:31.980404 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0818 15:07:31.980413 21769 net.cpp:100] Creating Layer L3_b11_cbr1_scale\nI0818 15:07:31.980419 21769 net.cpp:434] L3_b11_cbr1_scale <- L3_b11_cbr1_bn_top\nI0818 15:07:31.980427 21769 net.cpp:395] L3_b11_cbr1_scale -> L3_b11_cbr1_bn_top (in-place)\nI0818 15:07:31.980489 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0818 15:07:31.980649 21769 net.cpp:150] Setting up L3_b11_cbr1_scale\nI0818 15:07:31.980662 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.980667 21769 net.cpp:165] Memory required for data: 2159822000\nI0818 15:07:31.980676 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_relu\nI0818 15:07:31.980701 21769 net.cpp:100] Creating Layer L3_b11_cbr1_relu\nI0818 15:07:31.980708 21769 net.cpp:434] L3_b11_cbr1_relu <- L3_b11_cbr1_bn_top\nI0818 15:07:31.980720 21769 net.cpp:395] L3_b11_cbr1_relu -> L3_b11_cbr1_bn_top (in-place)\nI0818 15:07:31.980729 21769 net.cpp:150] Setting up L3_b11_cbr1_relu\nI0818 15:07:31.980737 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.980741 21769 net.cpp:165] Memory required for data: 2161460400\nI0818 15:07:31.980746 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr2_conv\nI0818 15:07:31.980762 21769 net.cpp:100] Creating Layer L3_b11_cbr2_conv\nI0818 15:07:31.980769 21769 net.cpp:434] L3_b11_cbr2_conv <- L3_b11_cbr1_bn_top\nI0818 15:07:31.980777 21769 net.cpp:408] L3_b11_cbr2_conv -> L3_b11_cbr2_conv_top\nI0818 15:07:31.981803 21769 net.cpp:150] Setting up L3_b11_cbr2_conv\nI0818 15:07:31.981818 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.981823 21769 net.cpp:165] Memory required for data: 2163098800\nI0818 15:07:31.981833 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr2_bn\nI0818 15:07:31.981845 21769 net.cpp:100] Creating Layer L3_b11_cbr2_bn\nI0818 15:07:31.981853 21769 net.cpp:434] L3_b11_cbr2_bn <- L3_b11_cbr2_conv_top\nI0818 15:07:31.981860 21769 net.cpp:408] L3_b11_cbr2_bn -> L3_b11_cbr2_bn_top\nI0818 15:07:31.982137 21769 net.cpp:150] Setting up L3_b11_cbr2_bn\nI0818 15:07:31.982151 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.982156 21769 net.cpp:165] Memory required for data: 2164737200\nI0818 15:07:31.982165 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0818 15:07:31.982177 21769 net.cpp:100] Creating Layer L3_b11_cbr2_scale\nI0818 15:07:31.982184 21769 net.cpp:434] L3_b11_cbr2_scale <- L3_b11_cbr2_bn_top\nI0818 15:07:31.982192 21769 net.cpp:395] L3_b11_cbr2_scale -> L3_b11_cbr2_bn_top (in-place)\nI0818 15:07:31.982255 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0818 15:07:31.982414 21769 net.cpp:150] Setting up L3_b11_cbr2_scale\nI0818 15:07:31.982427 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.982432 21769 net.cpp:165] Memory required for data: 2166375600\nI0818 15:07:31.982441 21769 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise\nI0818 15:07:31.982455 21769 net.cpp:100] Creating Layer L3_b11_sum_eltwise\nI0818 15:07:31.982461 21769 net.cpp:434] L3_b11_sum_eltwise <- L3_b11_cbr2_bn_top\nI0818 15:07:31.982467 21769 net.cpp:434] L3_b11_sum_eltwise <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0818 15:07:31.982478 21769 net.cpp:408] L3_b11_sum_eltwise -> L3_b11_sum_eltwise_top\nI0818 15:07:31.982512 21769 net.cpp:150] Setting up L3_b11_sum_eltwise\nI0818 15:07:31.982522 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.982525 21769 net.cpp:165] Memory required for data: 2168014000\nI0818 15:07:31.982532 21769 layer_factory.hpp:77] Creating layer L3_b11_relu\nI0818 15:07:31.982542 21769 net.cpp:100] Creating Layer L3_b11_relu\nI0818 15:07:31.982547 21769 net.cpp:434] L3_b11_relu <- L3_b11_sum_eltwise_top\nI0818 15:07:31.982555 21769 net.cpp:395] L3_b11_relu -> L3_b11_sum_eltwise_top (in-place)\nI0818 15:07:31.982564 21769 net.cpp:150] Setting up L3_b11_relu\nI0818 15:07:31.982571 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.982575 21769 net.cpp:165] Memory required for data: 2169652400\nI0818 15:07:31.982580 21769 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0818 15:07:31.982587 21769 net.cpp:100] Creating Layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0818 15:07:31.982592 21769 net.cpp:434] L3_b11_sum_eltwise_top_L3_b11_relu_0_split <- L3_b11_sum_eltwise_top\nI0818 15:07:31.982599 21769 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0818 15:07:31.982609 21769 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0818 15:07:31.982658 21769 net.cpp:150] Setting up L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0818 15:07:31.982671 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.982689 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.982695 21769 net.cpp:165] Memory required for data: 2172929200\nI0818 15:07:31.982700 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_conv\nI0818 15:07:31.982714 21769 net.cpp:100] Creating Layer L3_b12_cbr1_conv\nI0818 15:07:31.982722 21769 net.cpp:434] L3_b12_cbr1_conv <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0818 15:07:31.982731 21769 net.cpp:408] L3_b12_cbr1_conv -> L3_b12_cbr1_conv_top\nI0818 15:07:31.983764 21769 net.cpp:150] Setting up L3_b12_cbr1_conv\nI0818 15:07:31.983779 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.983784 21769 net.cpp:165] Memory required for data: 2174567600\nI0818 15:07:31.983793 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_bn\nI0818 15:07:31.983808 21769 net.cpp:100] Creating Layer L3_b12_cbr1_bn\nI0818 15:07:31.983815 21769 net.cpp:434] L3_b12_cbr1_bn <- L3_b12_cbr1_conv_top\nI0818 15:07:31.983826 21769 net.cpp:408] L3_b12_cbr1_bn -> L3_b12_cbr1_bn_top\nI0818 15:07:31.984097 21769 net.cpp:150] Setting up L3_b12_cbr1_bn\nI0818 15:07:31.984110 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.984115 21769 net.cpp:165] Memory required for data: 2176206000\nI0818 15:07:31.984125 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0818 15:07:31.984134 21769 net.cpp:100] Creating Layer L3_b12_cbr1_scale\nI0818 15:07:31.984140 21769 net.cpp:434] L3_b12_cbr1_scale <- L3_b12_cbr1_bn_top\nI0818 15:07:31.984151 21769 net.cpp:395] L3_b12_cbr1_scale -> L3_b12_cbr1_bn_top (in-place)\nI0818 15:07:31.984211 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0818 15:07:31.984372 21769 net.cpp:150] Setting up L3_b12_cbr1_scale\nI0818 15:07:31.984385 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.984390 21769 net.cpp:165] Memory required for data: 2177844400\nI0818 15:07:31.984402 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_relu\nI0818 15:07:31.984411 21769 net.cpp:100] Creating Layer L3_b12_cbr1_relu\nI0818 15:07:31.984417 21769 net.cpp:434] L3_b12_cbr1_relu <- L3_b12_cbr1_bn_top\nI0818 15:07:31.984427 21769 net.cpp:395] L3_b12_cbr1_relu -> L3_b12_cbr1_bn_top (in-place)\nI0818 15:07:31.984437 21769 net.cpp:150] Setting up L3_b12_cbr1_relu\nI0818 15:07:31.984444 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.984449 21769 net.cpp:165] Memory required for data: 2179482800\nI0818 15:07:31.984453 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr2_conv\nI0818 15:07:31.984467 21769 net.cpp:100] Creating Layer L3_b12_cbr2_conv\nI0818 15:07:31.984474 21769 net.cpp:434] L3_b12_cbr2_conv <- L3_b12_cbr1_bn_top\nI0818 15:07:31.984483 21769 net.cpp:408] L3_b12_cbr2_conv -> L3_b12_cbr2_conv_top\nI0818 15:07:31.986507 21769 net.cpp:150] Setting up L3_b12_cbr2_conv\nI0818 15:07:31.986526 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.986531 21769 net.cpp:165] Memory required for data: 2181121200\nI0818 15:07:31.986541 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr2_bn\nI0818 15:07:31.986553 21769 net.cpp:100] Creating Layer L3_b12_cbr2_bn\nI0818 15:07:31.986560 21769 net.cpp:434] L3_b12_cbr2_bn <- L3_b12_cbr2_conv_top\nI0818 15:07:31.986572 21769 net.cpp:408] L3_b12_cbr2_bn -> L3_b12_cbr2_bn_top\nI0818 15:07:31.986855 21769 net.cpp:150] Setting up L3_b12_cbr2_bn\nI0818 15:07:31.986870 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.986874 21769 net.cpp:165] Memory required for data: 2182759600\nI0818 15:07:31.986884 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0818 15:07:31.986893 21769 net.cpp:100] Creating Layer L3_b12_cbr2_scale\nI0818 15:07:31.986901 21769 net.cpp:434] L3_b12_cbr2_scale <- L3_b12_cbr2_bn_top\nI0818 15:07:31.986907 21769 net.cpp:395] L3_b12_cbr2_scale -> L3_b12_cbr2_bn_top (in-place)\nI0818 15:07:31.986970 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0818 15:07:31.987138 21769 net.cpp:150] Setting up L3_b12_cbr2_scale\nI0818 15:07:31.987151 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.987156 21769 net.cpp:165] Memory required for data: 2184398000\nI0818 15:07:31.987174 21769 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise\nI0818 15:07:31.987185 21769 net.cpp:100] Creating Layer L3_b12_sum_eltwise\nI0818 15:07:31.987190 21769 net.cpp:434] L3_b12_sum_eltwise <- L3_b12_cbr2_bn_top\nI0818 15:07:31.987198 21769 net.cpp:434] L3_b12_sum_eltwise <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0818 15:07:31.987210 21769 net.cpp:408] L3_b12_sum_eltwise -> L3_b12_sum_eltwise_top\nI0818 15:07:31.987244 21769 net.cpp:150] Setting up L3_b12_sum_eltwise\nI0818 15:07:31.987259 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.987264 21769 net.cpp:165] Memory required for data: 2186036400\nI0818 15:07:31.987270 21769 layer_factory.hpp:77] Creating layer L3_b12_relu\nI0818 15:07:31.987277 21769 net.cpp:100] Creating Layer L3_b12_relu\nI0818 15:07:31.987283 21769 net.cpp:434] L3_b12_relu <- L3_b12_sum_eltwise_top\nI0818 15:07:31.987290 21769 net.cpp:395] L3_b12_relu -> L3_b12_sum_eltwise_top (in-place)\nI0818 15:07:31.987300 21769 net.cpp:150] Setting up L3_b12_relu\nI0818 15:07:31.987308 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.987311 21769 net.cpp:165] Memory required for data: 2187674800\nI0818 15:07:31.987316 21769 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0818 15:07:31.987326 21769 net.cpp:100] Creating Layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0818 15:07:31.987332 21769 net.cpp:434] L3_b12_sum_eltwise_top_L3_b12_relu_0_split <- L3_b12_sum_eltwise_top\nI0818 15:07:31.987340 21769 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0818 15:07:31.987350 21769 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0818 15:07:31.987401 21769 net.cpp:150] Setting up L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0818 15:07:31.987411 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.987418 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.987423 21769 net.cpp:165] Memory required for data: 2190951600\nI0818 15:07:31.987428 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_conv\nI0818 15:07:31.987439 21769 net.cpp:100] Creating Layer L3_b13_cbr1_conv\nI0818 15:07:31.987447 21769 net.cpp:434] L3_b13_cbr1_conv <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0818 15:07:31.987458 21769 net.cpp:408] L3_b13_cbr1_conv -> L3_b13_cbr1_conv_top\nI0818 15:07:31.988484 21769 net.cpp:150] Setting up L3_b13_cbr1_conv\nI0818 15:07:31.988499 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.988504 21769 net.cpp:165] Memory required for data: 2192590000\nI0818 15:07:31.988513 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_bn\nI0818 15:07:31.988523 21769 net.cpp:100] Creating Layer L3_b13_cbr1_bn\nI0818 15:07:31.988529 21769 net.cpp:434] L3_b13_cbr1_bn <- L3_b13_cbr1_conv_top\nI0818 15:07:31.988543 21769 net.cpp:408] L3_b13_cbr1_bn -> L3_b13_cbr1_bn_top\nI0818 15:07:31.988831 21769 net.cpp:150] Setting up L3_b13_cbr1_bn\nI0818 15:07:31.988847 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.988852 21769 net.cpp:165] Memory required for data: 2194228400\nI0818 15:07:31.988863 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0818 15:07:31.988873 21769 net.cpp:100] Creating Layer L3_b13_cbr1_scale\nI0818 15:07:31.988878 21769 net.cpp:434] L3_b13_cbr1_scale <- L3_b13_cbr1_bn_top\nI0818 15:07:31.988886 21769 net.cpp:395] L3_b13_cbr1_scale -> L3_b13_cbr1_bn_top (in-place)\nI0818 15:07:31.988947 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0818 15:07:31.989112 21769 net.cpp:150] Setting up L3_b13_cbr1_scale\nI0818 15:07:31.989126 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.989131 21769 net.cpp:165] Memory required for data: 2195866800\nI0818 15:07:31.989140 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_relu\nI0818 15:07:31.989151 21769 net.cpp:100] Creating Layer L3_b13_cbr1_relu\nI0818 15:07:31.989158 21769 net.cpp:434] L3_b13_cbr1_relu <- L3_b13_cbr1_bn_top\nI0818 15:07:31.989172 21769 net.cpp:395] L3_b13_cbr1_relu -> L3_b13_cbr1_bn_top (in-place)\nI0818 15:07:31.989183 21769 net.cpp:150] Setting up L3_b13_cbr1_relu\nI0818 15:07:31.989190 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.989195 21769 net.cpp:165] Memory required for data: 2197505200\nI0818 15:07:31.989199 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr2_conv\nI0818 15:07:31.989213 21769 net.cpp:100] Creating Layer L3_b13_cbr2_conv\nI0818 15:07:31.989220 21769 net.cpp:434] L3_b13_cbr2_conv <- L3_b13_cbr1_bn_top\nI0818 15:07:31.989229 21769 net.cpp:408] L3_b13_cbr2_conv -> L3_b13_cbr2_conv_top\nI0818 15:07:31.990254 21769 net.cpp:150] Setting up L3_b13_cbr2_conv\nI0818 15:07:31.990269 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.990274 21769 net.cpp:165] Memory required for data: 2199143600\nI0818 15:07:31.990283 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr2_bn\nI0818 15:07:31.990298 21769 net.cpp:100] Creating Layer L3_b13_cbr2_bn\nI0818 15:07:31.990304 21769 net.cpp:434] L3_b13_cbr2_bn <- L3_b13_cbr2_conv_top\nI0818 15:07:31.990316 21769 net.cpp:408] L3_b13_cbr2_bn -> L3_b13_cbr2_bn_top\nI0818 15:07:31.990588 21769 net.cpp:150] Setting up L3_b13_cbr2_bn\nI0818 15:07:31.990602 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.990607 21769 net.cpp:165] Memory required for data: 2200782000\nI0818 15:07:31.990617 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0818 15:07:31.990625 21769 net.cpp:100] Creating Layer L3_b13_cbr2_scale\nI0818 15:07:31.990631 21769 net.cpp:434] L3_b13_cbr2_scale <- L3_b13_cbr2_bn_top\nI0818 15:07:31.990643 21769 net.cpp:395] L3_b13_cbr2_scale -> L3_b13_cbr2_bn_top (in-place)\nI0818 15:07:31.990710 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0818 15:07:31.990875 21769 net.cpp:150] Setting up L3_b13_cbr2_scale\nI0818 15:07:31.990890 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.990895 21769 net.cpp:165] Memory required for data: 2202420400\nI0818 15:07:31.990903 21769 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise\nI0818 15:07:31.990912 21769 net.cpp:100] Creating Layer L3_b13_sum_eltwise\nI0818 15:07:31.990918 21769 net.cpp:434] L3_b13_sum_eltwise <- L3_b13_cbr2_bn_top\nI0818 15:07:31.990926 21769 net.cpp:434] L3_b13_sum_eltwise <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0818 15:07:31.990936 21769 net.cpp:408] L3_b13_sum_eltwise -> L3_b13_sum_eltwise_top\nI0818 15:07:31.990974 21769 net.cpp:150] Setting up L3_b13_sum_eltwise\nI0818 15:07:31.990986 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.990991 21769 net.cpp:165] Memory required for data: 2204058800\nI0818 15:07:31.990996 21769 layer_factory.hpp:77] Creating layer L3_b13_relu\nI0818 15:07:31.991005 21769 net.cpp:100] Creating Layer L3_b13_relu\nI0818 15:07:31.991011 21769 net.cpp:434] L3_b13_relu <- L3_b13_sum_eltwise_top\nI0818 15:07:31.991021 21769 net.cpp:395] L3_b13_relu -> L3_b13_sum_eltwise_top (in-place)\nI0818 15:07:31.991031 21769 net.cpp:150] Setting up L3_b13_relu\nI0818 15:07:31.991039 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.991044 21769 net.cpp:165] Memory required for data: 2205697200\nI0818 15:07:31.991047 21769 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0818 15:07:31.991055 21769 net.cpp:100] Creating Layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0818 15:07:31.991060 21769 net.cpp:434] L3_b13_sum_eltwise_top_L3_b13_relu_0_split <- L3_b13_sum_eltwise_top\nI0818 15:07:31.991067 21769 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0818 15:07:31.991076 21769 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0818 15:07:31.991127 21769 net.cpp:150] Setting up L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0818 15:07:31.991139 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.991145 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.991150 21769 net.cpp:165] Memory required for data: 2208974000\nI0818 15:07:31.991163 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_conv\nI0818 15:07:31.991174 21769 net.cpp:100] Creating Layer L3_b14_cbr1_conv\nI0818 15:07:31.991181 21769 net.cpp:434] L3_b14_cbr1_conv <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0818 15:07:31.991194 21769 net.cpp:408] L3_b14_cbr1_conv -> L3_b14_cbr1_conv_top\nI0818 15:07:31.992225 21769 net.cpp:150] Setting up L3_b14_cbr1_conv\nI0818 15:07:31.992241 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.992246 21769 net.cpp:165] Memory required for data: 2210612400\nI0818 15:07:31.992255 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_bn\nI0818 15:07:31.992264 21769 net.cpp:100] Creating Layer L3_b14_cbr1_bn\nI0818 15:07:31.992271 21769 net.cpp:434] L3_b14_cbr1_bn <- L3_b14_cbr1_conv_top\nI0818 15:07:31.992282 21769 net.cpp:408] L3_b14_cbr1_bn -> L3_b14_cbr1_bn_top\nI0818 15:07:31.992552 21769 net.cpp:150] Setting up L3_b14_cbr1_bn\nI0818 15:07:31.992568 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.992574 21769 net.cpp:165] Memory required for data: 2212250800\nI0818 15:07:31.992584 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0818 15:07:31.992594 21769 net.cpp:100] Creating Layer L3_b14_cbr1_scale\nI0818 15:07:31.992599 21769 net.cpp:434] L3_b14_cbr1_scale <- L3_b14_cbr1_bn_top\nI0818 15:07:31.992607 21769 net.cpp:395] L3_b14_cbr1_scale -> L3_b14_cbr1_bn_top (in-place)\nI0818 15:07:31.992666 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0818 15:07:31.992836 21769 net.cpp:150] Setting up L3_b14_cbr1_scale\nI0818 15:07:31.992851 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.992856 21769 net.cpp:165] Memory required for data: 2213889200\nI0818 15:07:31.992866 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_relu\nI0818 15:07:31.992877 21769 net.cpp:100] Creating Layer L3_b14_cbr1_relu\nI0818 15:07:31.992883 21769 net.cpp:434] L3_b14_cbr1_relu <- L3_b14_cbr1_bn_top\nI0818 15:07:31.992913 21769 net.cpp:395] L3_b14_cbr1_relu -> L3_b14_cbr1_bn_top (in-place)\nI0818 15:07:31.992925 21769 net.cpp:150] Setting up L3_b14_cbr1_relu\nI0818 15:07:31.992933 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.992938 21769 net.cpp:165] Memory required for data: 2215527600\nI0818 15:07:31.992943 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr2_conv\nI0818 15:07:31.992956 21769 net.cpp:100] Creating Layer L3_b14_cbr2_conv\nI0818 15:07:31.992964 21769 net.cpp:434] L3_b14_cbr2_conv <- L3_b14_cbr1_bn_top\nI0818 15:07:31.992972 21769 net.cpp:408] L3_b14_cbr2_conv -> L3_b14_cbr2_conv_top\nI0818 15:07:31.994004 21769 net.cpp:150] Setting up L3_b14_cbr2_conv\nI0818 15:07:31.994019 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.994024 21769 net.cpp:165] Memory required for data: 2217166000\nI0818 15:07:31.994033 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr2_bn\nI0818 15:07:31.994045 21769 net.cpp:100] Creating Layer L3_b14_cbr2_bn\nI0818 15:07:31.994052 21769 net.cpp:434] L3_b14_cbr2_bn <- L3_b14_cbr2_conv_top\nI0818 15:07:31.994063 21769 net.cpp:408] L3_b14_cbr2_bn -> L3_b14_cbr2_bn_top\nI0818 15:07:31.994333 21769 net.cpp:150] Setting up L3_b14_cbr2_bn\nI0818 15:07:31.994345 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.994350 21769 net.cpp:165] Memory required for data: 2218804400\nI0818 15:07:31.994360 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0818 15:07:31.994369 21769 net.cpp:100] Creating Layer L3_b14_cbr2_scale\nI0818 15:07:31.994375 21769 net.cpp:434] L3_b14_cbr2_scale <- L3_b14_cbr2_bn_top\nI0818 15:07:31.994386 21769 net.cpp:395] L3_b14_cbr2_scale -> L3_b14_cbr2_bn_top (in-place)\nI0818 15:07:31.994447 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0818 15:07:31.994607 21769 net.cpp:150] Setting up L3_b14_cbr2_scale\nI0818 15:07:31.994621 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.994626 21769 net.cpp:165] Memory required for data: 2220442800\nI0818 15:07:31.994634 21769 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise\nI0818 15:07:31.994643 21769 net.cpp:100] Creating Layer L3_b14_sum_eltwise\nI0818 15:07:31.994657 21769 net.cpp:434] L3_b14_sum_eltwise <- L3_b14_cbr2_bn_top\nI0818 15:07:31.994664 21769 net.cpp:434] L3_b14_sum_eltwise <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0818 15:07:31.994675 21769 net.cpp:408] L3_b14_sum_eltwise -> L3_b14_sum_eltwise_top\nI0818 15:07:31.994720 21769 net.cpp:150] Setting up L3_b14_sum_eltwise\nI0818 15:07:31.994734 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.994738 21769 net.cpp:165] Memory required for data: 2222081200\nI0818 15:07:31.994745 21769 layer_factory.hpp:77] Creating layer L3_b14_relu\nI0818 15:07:31.994752 21769 net.cpp:100] Creating Layer L3_b14_relu\nI0818 15:07:31.994758 21769 net.cpp:434] L3_b14_relu <- L3_b14_sum_eltwise_top\nI0818 15:07:31.994768 21769 net.cpp:395] L3_b14_relu -> L3_b14_sum_eltwise_top (in-place)\nI0818 15:07:31.994778 21769 net.cpp:150] Setting up L3_b14_relu\nI0818 15:07:31.994786 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.994791 21769 net.cpp:165] Memory required for data: 2223719600\nI0818 15:07:31.994796 21769 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0818 15:07:31.994870 21769 net.cpp:100] Creating Layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0818 15:07:31.994881 21769 net.cpp:434] L3_b14_sum_eltwise_top_L3_b14_relu_0_split <- L3_b14_sum_eltwise_top\nI0818 15:07:31.994889 21769 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0818 15:07:31.994899 21769 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0818 15:07:31.994951 21769 net.cpp:150] Setting up L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0818 15:07:31.994966 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.994972 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.994977 21769 net.cpp:165] Memory required for data: 2226996400\nI0818 15:07:31.994983 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_conv\nI0818 15:07:31.994995 21769 net.cpp:100] Creating Layer L3_b15_cbr1_conv\nI0818 15:07:31.995002 21769 net.cpp:434] L3_b15_cbr1_conv <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0818 15:07:31.995013 21769 net.cpp:408] L3_b15_cbr1_conv -> L3_b15_cbr1_conv_top\nI0818 15:07:31.996044 21769 net.cpp:150] Setting up L3_b15_cbr1_conv\nI0818 15:07:31.996060 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.996065 21769 net.cpp:165] Memory required for data: 2228634800\nI0818 15:07:31.996074 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_bn\nI0818 15:07:31.996083 21769 net.cpp:100] Creating Layer L3_b15_cbr1_bn\nI0818 15:07:31.996090 21769 net.cpp:434] L3_b15_cbr1_bn <- L3_b15_cbr1_conv_top\nI0818 15:07:31.996101 21769 net.cpp:408] L3_b15_cbr1_bn -> L3_b15_cbr1_bn_top\nI0818 15:07:31.996378 21769 net.cpp:150] Setting up L3_b15_cbr1_bn\nI0818 15:07:31.996392 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.996397 21769 net.cpp:165] Memory required for data: 2230273200\nI0818 15:07:31.996407 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0818 15:07:31.996417 21769 net.cpp:100] Creating Layer L3_b15_cbr1_scale\nI0818 15:07:31.996423 21769 net.cpp:434] L3_b15_cbr1_scale <- L3_b15_cbr1_bn_top\nI0818 15:07:31.996430 21769 net.cpp:395] L3_b15_cbr1_scale -> L3_b15_cbr1_bn_top (in-place)\nI0818 15:07:31.996492 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0818 15:07:31.996650 21769 net.cpp:150] Setting up L3_b15_cbr1_scale\nI0818 15:07:31.996666 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.996672 21769 net.cpp:165] Memory required for data: 2231911600\nI0818 15:07:31.996680 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_relu\nI0818 15:07:31.996695 21769 net.cpp:100] Creating Layer L3_b15_cbr1_relu\nI0818 15:07:31.996701 21769 net.cpp:434] L3_b15_cbr1_relu <- L3_b15_cbr1_bn_top\nI0818 15:07:31.996708 21769 net.cpp:395] L3_b15_cbr1_relu -> L3_b15_cbr1_bn_top (in-place)\nI0818 15:07:31.996718 21769 net.cpp:150] Setting up L3_b15_cbr1_relu\nI0818 15:07:31.996726 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.996737 21769 net.cpp:165] Memory required for data: 2233550000\nI0818 15:07:31.996742 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr2_conv\nI0818 15:07:31.996757 21769 net.cpp:100] Creating Layer L3_b15_cbr2_conv\nI0818 15:07:31.996763 21769 net.cpp:434] L3_b15_cbr2_conv <- L3_b15_cbr1_bn_top\nI0818 15:07:31.996773 21769 net.cpp:408] L3_b15_cbr2_conv -> L3_b15_cbr2_conv_top\nI0818 15:07:31.997798 21769 net.cpp:150] Setting up L3_b15_cbr2_conv\nI0818 15:07:31.997813 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.997818 21769 net.cpp:165] Memory required for data: 2235188400\nI0818 15:07:31.997828 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr2_bn\nI0818 15:07:31.997840 21769 net.cpp:100] Creating Layer L3_b15_cbr2_bn\nI0818 15:07:31.997848 21769 net.cpp:434] L3_b15_cbr2_bn <- L3_b15_cbr2_conv_top\nI0818 15:07:31.997859 21769 net.cpp:408] L3_b15_cbr2_bn -> L3_b15_cbr2_bn_top\nI0818 15:07:31.998126 21769 net.cpp:150] Setting up L3_b15_cbr2_bn\nI0818 15:07:31.998139 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.998144 21769 net.cpp:165] Memory required for data: 2236826800\nI0818 15:07:31.998154 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0818 15:07:31.998163 21769 net.cpp:100] Creating Layer L3_b15_cbr2_scale\nI0818 15:07:31.998169 21769 net.cpp:434] L3_b15_cbr2_scale <- L3_b15_cbr2_bn_top\nI0818 15:07:31.998180 21769 net.cpp:395] L3_b15_cbr2_scale -> L3_b15_cbr2_bn_top (in-place)\nI0818 15:07:31.998241 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0818 15:07:31.998410 21769 net.cpp:150] Setting up L3_b15_cbr2_scale\nI0818 15:07:31.998425 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.998430 21769 net.cpp:165] Memory required for data: 2238465200\nI0818 15:07:31.998438 21769 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise\nI0818 15:07:31.998450 21769 net.cpp:100] Creating Layer L3_b15_sum_eltwise\nI0818 15:07:31.998457 21769 net.cpp:434] L3_b15_sum_eltwise <- L3_b15_cbr2_bn_top\nI0818 15:07:31.998464 21769 net.cpp:434] L3_b15_sum_eltwise <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0818 15:07:31.998472 21769 net.cpp:408] L3_b15_sum_eltwise -> L3_b15_sum_eltwise_top\nI0818 15:07:31.998512 21769 net.cpp:150] Setting up L3_b15_sum_eltwise\nI0818 15:07:31.998523 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.998528 21769 net.cpp:165] Memory required for data: 2240103600\nI0818 15:07:31.998533 21769 layer_factory.hpp:77] Creating layer L3_b15_relu\nI0818 15:07:31.998541 21769 net.cpp:100] Creating Layer L3_b15_relu\nI0818 15:07:31.998548 21769 net.cpp:434] L3_b15_relu <- L3_b15_sum_eltwise_top\nI0818 15:07:31.998558 21769 net.cpp:395] L3_b15_relu -> L3_b15_sum_eltwise_top (in-place)\nI0818 15:07:31.998567 21769 net.cpp:150] Setting up L3_b15_relu\nI0818 15:07:31.998574 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.998579 21769 net.cpp:165] Memory required for data: 2241742000\nI0818 15:07:31.998584 21769 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0818 15:07:31.998591 21769 net.cpp:100] Creating Layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0818 15:07:31.998596 21769 net.cpp:434] L3_b15_sum_eltwise_top_L3_b15_relu_0_split <- L3_b15_sum_eltwise_top\nI0818 15:07:31.998605 21769 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0818 15:07:31.998613 21769 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0818 15:07:31.998664 21769 net.cpp:150] Setting up L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0818 15:07:31.998677 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.998688 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:31.998693 21769 net.cpp:165] Memory required for data: 2245018800\nI0818 15:07:31.998699 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_conv\nI0818 15:07:31.998711 21769 net.cpp:100] Creating Layer L3_b16_cbr1_conv\nI0818 15:07:31.998718 21769 net.cpp:434] L3_b16_cbr1_conv <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0818 15:07:31.998738 21769 net.cpp:408] L3_b16_cbr1_conv -> L3_b16_cbr1_conv_top\nI0818 15:07:32.000771 21769 net.cpp:150] Setting up L3_b16_cbr1_conv\nI0818 15:07:32.000788 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.000793 21769 net.cpp:165] Memory required for data: 2246657200\nI0818 15:07:32.000803 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_bn\nI0818 15:07:32.000816 21769 net.cpp:100] Creating Layer L3_b16_cbr1_bn\nI0818 15:07:32.000824 21769 net.cpp:434] L3_b16_cbr1_bn <- L3_b16_cbr1_conv_top\nI0818 15:07:32.000833 21769 net.cpp:408] L3_b16_cbr1_bn -> L3_b16_cbr1_bn_top\nI0818 15:07:32.001119 21769 net.cpp:150] Setting up L3_b16_cbr1_bn\nI0818 15:07:32.001133 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.001138 21769 net.cpp:165] Memory required for data: 2248295600\nI0818 15:07:32.001148 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0818 15:07:32.001160 21769 net.cpp:100] Creating Layer L3_b16_cbr1_scale\nI0818 15:07:32.001168 21769 net.cpp:434] L3_b16_cbr1_scale <- L3_b16_cbr1_bn_top\nI0818 15:07:32.001176 21769 net.cpp:395] L3_b16_cbr1_scale -> L3_b16_cbr1_bn_top (in-place)\nI0818 15:07:32.001243 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0818 15:07:32.001410 21769 net.cpp:150] Setting up L3_b16_cbr1_scale\nI0818 15:07:32.001422 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.001428 21769 net.cpp:165] Memory required for data: 2249934000\nI0818 15:07:32.001437 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_relu\nI0818 15:07:32.001448 21769 net.cpp:100] Creating Layer L3_b16_cbr1_relu\nI0818 15:07:32.001456 21769 net.cpp:434] L3_b16_cbr1_relu <- L3_b16_cbr1_bn_top\nI0818 15:07:32.001466 21769 net.cpp:395] L3_b16_cbr1_relu -> L3_b16_cbr1_bn_top (in-place)\nI0818 15:07:32.001477 21769 net.cpp:150] Setting up L3_b16_cbr1_relu\nI0818 15:07:32.001483 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.001488 21769 net.cpp:165] Memory required for data: 2251572400\nI0818 15:07:32.001493 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr2_conv\nI0818 15:07:32.001504 21769 net.cpp:100] Creating Layer L3_b16_cbr2_conv\nI0818 15:07:32.001510 21769 net.cpp:434] L3_b16_cbr2_conv <- L3_b16_cbr1_bn_top\nI0818 15:07:32.001521 21769 net.cpp:408] L3_b16_cbr2_conv -> L3_b16_cbr2_conv_top\nI0818 15:07:32.002552 21769 net.cpp:150] Setting up L3_b16_cbr2_conv\nI0818 15:07:32.002566 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.002573 21769 net.cpp:165] Memory required for data: 2253210800\nI0818 15:07:32.002581 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr2_bn\nI0818 15:07:32.002593 21769 net.cpp:100] Creating Layer L3_b16_cbr2_bn\nI0818 15:07:32.002601 21769 net.cpp:434] L3_b16_cbr2_bn <- L3_b16_cbr2_conv_top\nI0818 15:07:32.002609 21769 net.cpp:408] L3_b16_cbr2_bn -> L3_b16_cbr2_bn_top\nI0818 15:07:32.002902 21769 net.cpp:150] Setting up L3_b16_cbr2_bn\nI0818 15:07:32.002915 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.002920 21769 net.cpp:165] Memory required for data: 2254849200\nI0818 15:07:32.002931 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0818 15:07:32.002940 21769 net.cpp:100] Creating Layer L3_b16_cbr2_scale\nI0818 15:07:32.002946 21769 net.cpp:434] L3_b16_cbr2_scale <- L3_b16_cbr2_bn_top\nI0818 15:07:32.002954 21769 net.cpp:395] L3_b16_cbr2_scale -> L3_b16_cbr2_bn_top (in-place)\nI0818 15:07:32.003020 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0818 15:07:32.003177 21769 net.cpp:150] Setting up L3_b16_cbr2_scale\nI0818 15:07:32.003193 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.003199 21769 net.cpp:165] Memory required for data: 2256487600\nI0818 15:07:32.003208 21769 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise\nI0818 15:07:32.003217 21769 net.cpp:100] Creating Layer L3_b16_sum_eltwise\nI0818 15:07:32.003223 21769 net.cpp:434] L3_b16_sum_eltwise <- L3_b16_cbr2_bn_top\nI0818 15:07:32.003231 21769 net.cpp:434] L3_b16_sum_eltwise <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0818 15:07:32.003247 21769 net.cpp:408] L3_b16_sum_eltwise -> L3_b16_sum_eltwise_top\nI0818 15:07:32.003286 21769 net.cpp:150] Setting up L3_b16_sum_eltwise\nI0818 15:07:32.003296 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.003301 21769 net.cpp:165] Memory required for data: 2258126000\nI0818 15:07:32.003307 21769 layer_factory.hpp:77] Creating layer L3_b16_relu\nI0818 15:07:32.003314 21769 net.cpp:100] Creating Layer L3_b16_relu\nI0818 15:07:32.003320 21769 net.cpp:434] L3_b16_relu <- L3_b16_sum_eltwise_top\nI0818 15:07:32.003327 21769 net.cpp:395] L3_b16_relu -> L3_b16_sum_eltwise_top (in-place)\nI0818 15:07:32.003337 21769 net.cpp:150] Setting up L3_b16_relu\nI0818 15:07:32.003345 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.003348 21769 net.cpp:165] Memory required for data: 2259764400\nI0818 15:07:32.003353 21769 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0818 15:07:32.003363 21769 net.cpp:100] Creating Layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0818 15:07:32.003370 21769 net.cpp:434] L3_b16_sum_eltwise_top_L3_b16_relu_0_split <- L3_b16_sum_eltwise_top\nI0818 15:07:32.003377 21769 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0818 15:07:32.003387 21769 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0818 15:07:32.003434 21769 net.cpp:150] Setting up L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0818 15:07:32.003455 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.003463 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.003468 21769 net.cpp:165] Memory required for data: 2263041200\nI0818 15:07:32.003473 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_conv\nI0818 15:07:32.003484 21769 net.cpp:100] Creating Layer L3_b17_cbr1_conv\nI0818 15:07:32.003490 21769 net.cpp:434] L3_b17_cbr1_conv <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0818 15:07:32.003500 21769 net.cpp:408] L3_b17_cbr1_conv -> L3_b17_cbr1_conv_top\nI0818 15:07:32.004534 21769 net.cpp:150] Setting up L3_b17_cbr1_conv\nI0818 15:07:32.004549 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.004554 21769 net.cpp:165] Memory required for data: 2264679600\nI0818 15:07:32.004562 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_bn\nI0818 15:07:32.004575 21769 net.cpp:100] Creating Layer L3_b17_cbr1_bn\nI0818 15:07:32.004581 21769 net.cpp:434] L3_b17_cbr1_bn <- L3_b17_cbr1_conv_top\nI0818 15:07:32.004590 21769 net.cpp:408] L3_b17_cbr1_bn -> L3_b17_cbr1_bn_top\nI0818 15:07:32.004870 21769 net.cpp:150] Setting up L3_b17_cbr1_bn\nI0818 15:07:32.004884 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.004889 21769 net.cpp:165] Memory required for data: 2266318000\nI0818 15:07:32.004899 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0818 15:07:32.004911 21769 net.cpp:100] Creating Layer L3_b17_cbr1_scale\nI0818 15:07:32.004918 21769 net.cpp:434] L3_b17_cbr1_scale <- L3_b17_cbr1_bn_top\nI0818 15:07:32.004927 21769 net.cpp:395] L3_b17_cbr1_scale -> L3_b17_cbr1_bn_top (in-place)\nI0818 15:07:32.004995 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0818 15:07:32.005161 21769 net.cpp:150] Setting up L3_b17_cbr1_scale\nI0818 15:07:32.005173 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.005179 21769 net.cpp:165] Memory required for data: 2267956400\nI0818 15:07:32.005187 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_relu\nI0818 15:07:32.005195 21769 net.cpp:100] Creating Layer L3_b17_cbr1_relu\nI0818 15:07:32.005203 21769 net.cpp:434] L3_b17_cbr1_relu <- L3_b17_cbr1_bn_top\nI0818 15:07:32.005213 21769 net.cpp:395] L3_b17_cbr1_relu -> L3_b17_cbr1_bn_top (in-place)\nI0818 15:07:32.005223 21769 net.cpp:150] Setting up L3_b17_cbr1_relu\nI0818 15:07:32.005230 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.005234 21769 net.cpp:165] Memory required for data: 2269594800\nI0818 15:07:32.005239 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr2_conv\nI0818 15:07:32.005257 21769 net.cpp:100] Creating Layer L3_b17_cbr2_conv\nI0818 15:07:32.005264 21769 net.cpp:434] L3_b17_cbr2_conv <- L3_b17_cbr1_bn_top\nI0818 15:07:32.005276 21769 net.cpp:408] L3_b17_cbr2_conv -> L3_b17_cbr2_conv_top\nI0818 15:07:32.006300 21769 net.cpp:150] Setting up L3_b17_cbr2_conv\nI0818 15:07:32.006316 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.006321 21769 net.cpp:165] Memory required for data: 2271233200\nI0818 15:07:32.006330 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr2_bn\nI0818 15:07:32.006342 21769 net.cpp:100] Creating Layer L3_b17_cbr2_bn\nI0818 15:07:32.006350 21769 net.cpp:434] L3_b17_cbr2_bn <- L3_b17_cbr2_conv_top\nI0818 15:07:32.006357 21769 net.cpp:408] L3_b17_cbr2_bn -> L3_b17_cbr2_bn_top\nI0818 15:07:32.006639 21769 net.cpp:150] Setting up L3_b17_cbr2_bn\nI0818 15:07:32.006654 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.006659 21769 net.cpp:165] Memory required for data: 2272871600\nI0818 15:07:32.006669 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0818 15:07:32.006677 21769 net.cpp:100] Creating Layer L3_b17_cbr2_scale\nI0818 15:07:32.006688 21769 net.cpp:434] L3_b17_cbr2_scale <- L3_b17_cbr2_bn_top\nI0818 15:07:32.006697 21769 net.cpp:395] L3_b17_cbr2_scale -> L3_b17_cbr2_bn_top (in-place)\nI0818 15:07:32.006762 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0818 15:07:32.006922 21769 net.cpp:150] Setting up L3_b17_cbr2_scale\nI0818 15:07:32.006938 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.006944 21769 net.cpp:165] Memory required for data: 2274510000\nI0818 15:07:32.006953 21769 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise\nI0818 15:07:32.006963 21769 net.cpp:100] Creating Layer L3_b17_sum_eltwise\nI0818 15:07:32.006969 21769 net.cpp:434] L3_b17_sum_eltwise <- L3_b17_cbr2_bn_top\nI0818 15:07:32.006976 21769 net.cpp:434] L3_b17_sum_eltwise <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0818 15:07:32.006983 21769 net.cpp:408] L3_b17_sum_eltwise -> L3_b17_sum_eltwise_top\nI0818 15:07:32.007022 21769 net.cpp:150] Setting up L3_b17_sum_eltwise\nI0818 15:07:32.007035 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.007040 21769 net.cpp:165] Memory required for data: 2276148400\nI0818 15:07:32.007045 21769 layer_factory.hpp:77] Creating layer L3_b17_relu\nI0818 15:07:32.007055 21769 net.cpp:100] Creating Layer L3_b17_relu\nI0818 15:07:32.007061 21769 net.cpp:434] L3_b17_relu <- L3_b17_sum_eltwise_top\nI0818 15:07:32.007068 21769 net.cpp:395] L3_b17_relu -> L3_b17_sum_eltwise_top (in-place)\nI0818 15:07:32.007078 21769 net.cpp:150] Setting up L3_b17_relu\nI0818 15:07:32.007086 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.007089 21769 net.cpp:165] Memory required for data: 2277786800\nI0818 15:07:32.007094 21769 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0818 15:07:32.007104 21769 net.cpp:100] Creating Layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0818 15:07:32.007110 21769 net.cpp:434] L3_b17_sum_eltwise_top_L3_b17_relu_0_split <- L3_b17_sum_eltwise_top\nI0818 15:07:32.007117 21769 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0818 15:07:32.007128 21769 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0818 15:07:32.007174 21769 net.cpp:150] Setting up L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0818 15:07:32.007189 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.007195 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.007200 21769 net.cpp:165] Memory required for data: 2281063600\nI0818 15:07:32.007205 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_conv\nI0818 15:07:32.007220 21769 net.cpp:100] Creating Layer L3_b18_cbr1_conv\nI0818 15:07:32.007226 21769 net.cpp:434] L3_b18_cbr1_conv <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0818 15:07:32.007236 21769 net.cpp:408] L3_b18_cbr1_conv -> L3_b18_cbr1_conv_top\nI0818 15:07:32.008270 21769 net.cpp:150] Setting up L3_b18_cbr1_conv\nI0818 15:07:32.008291 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.008297 21769 net.cpp:165] Memory required for data: 2282702000\nI0818 15:07:32.008306 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_bn\nI0818 15:07:32.008319 21769 net.cpp:100] Creating Layer L3_b18_cbr1_bn\nI0818 15:07:32.008327 21769 net.cpp:434] L3_b18_cbr1_bn <- L3_b18_cbr1_conv_top\nI0818 15:07:32.008335 21769 net.cpp:408] L3_b18_cbr1_bn -> L3_b18_cbr1_bn_top\nI0818 15:07:32.008615 21769 net.cpp:150] Setting up L3_b18_cbr1_bn\nI0818 15:07:32.008628 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.008633 21769 net.cpp:165] Memory required for data: 2284340400\nI0818 15:07:32.008644 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0818 15:07:32.008656 21769 net.cpp:100] Creating Layer L3_b18_cbr1_scale\nI0818 15:07:32.008662 21769 net.cpp:434] L3_b18_cbr1_scale <- L3_b18_cbr1_bn_top\nI0818 15:07:32.008677 21769 net.cpp:395] L3_b18_cbr1_scale -> L3_b18_cbr1_bn_top (in-place)\nI0818 15:07:32.008744 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0818 15:07:32.008908 21769 net.cpp:150] Setting up L3_b18_cbr1_scale\nI0818 15:07:32.008922 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.008927 21769 net.cpp:165] Memory required for data: 2285978800\nI0818 15:07:32.008935 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_relu\nI0818 15:07:32.008944 21769 net.cpp:100] Creating Layer L3_b18_cbr1_relu\nI0818 15:07:32.008950 21769 net.cpp:434] L3_b18_cbr1_relu <- L3_b18_cbr1_bn_top\nI0818 15:07:32.008960 21769 net.cpp:395] L3_b18_cbr1_relu -> L3_b18_cbr1_bn_top (in-place)\nI0818 15:07:32.008971 21769 net.cpp:150] Setting up L3_b18_cbr1_relu\nI0818 15:07:32.008978 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.008982 21769 net.cpp:165] Memory required for data: 2287617200\nI0818 15:07:32.008987 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr2_conv\nI0818 15:07:32.009001 21769 net.cpp:100] Creating Layer L3_b18_cbr2_conv\nI0818 15:07:32.009007 21769 net.cpp:434] L3_b18_cbr2_conv <- L3_b18_cbr1_bn_top\nI0818 15:07:32.009016 21769 net.cpp:408] L3_b18_cbr2_conv -> L3_b18_cbr2_conv_top\nI0818 15:07:32.010051 21769 net.cpp:150] Setting up L3_b18_cbr2_conv\nI0818 15:07:32.010066 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.010071 21769 net.cpp:165] Memory required for data: 2289255600\nI0818 15:07:32.010080 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr2_bn\nI0818 15:07:32.010092 21769 net.cpp:100] Creating Layer L3_b18_cbr2_bn\nI0818 15:07:32.010099 21769 net.cpp:434] L3_b18_cbr2_bn <- L3_b18_cbr2_conv_top\nI0818 15:07:32.010108 21769 net.cpp:408] L3_b18_cbr2_bn -> L3_b18_cbr2_bn_top\nI0818 15:07:32.010380 21769 net.cpp:150] Setting up L3_b18_cbr2_bn\nI0818 15:07:32.010392 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.010397 21769 net.cpp:165] Memory required for data: 2290894000\nI0818 15:07:32.010407 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0818 15:07:32.010416 21769 net.cpp:100] Creating Layer L3_b18_cbr2_scale\nI0818 15:07:32.010422 21769 net.cpp:434] L3_b18_cbr2_scale <- L3_b18_cbr2_bn_top\nI0818 15:07:32.010430 21769 net.cpp:395] L3_b18_cbr2_scale -> L3_b18_cbr2_bn_top (in-place)\nI0818 15:07:32.010494 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0818 15:07:32.010660 21769 net.cpp:150] Setting up L3_b18_cbr2_scale\nI0818 15:07:32.010673 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.010679 21769 net.cpp:165] Memory required for data: 2292532400\nI0818 15:07:32.010694 21769 layer_factory.hpp:77] Creating layer L3_b18_sum_eltwise\nI0818 15:07:32.010704 21769 net.cpp:100] Creating Layer L3_b18_sum_eltwise\nI0818 15:07:32.010710 21769 net.cpp:434] L3_b18_sum_eltwise <- L3_b18_cbr2_bn_top\nI0818 15:07:32.010717 21769 net.cpp:434] L3_b18_sum_eltwise <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0818 15:07:32.010728 21769 net.cpp:408] L3_b18_sum_eltwise -> L3_b18_sum_eltwise_top\nI0818 15:07:32.010764 21769 net.cpp:150] Setting up L3_b18_sum_eltwise\nI0818 15:07:32.010777 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.010788 21769 net.cpp:165] Memory required for data: 2294170800\nI0818 15:07:32.010795 21769 layer_factory.hpp:77] Creating layer L3_b18_relu\nI0818 15:07:32.010802 21769 net.cpp:100] Creating Layer L3_b18_relu\nI0818 15:07:32.010808 21769 net.cpp:434] L3_b18_relu <- L3_b18_sum_eltwise_top\nI0818 15:07:32.010815 21769 net.cpp:395] L3_b18_relu -> L3_b18_sum_eltwise_top (in-place)\nI0818 15:07:32.010825 21769 net.cpp:150] Setting up L3_b18_relu\nI0818 15:07:32.010833 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.010836 21769 net.cpp:165] Memory required for data: 2295809200\nI0818 15:07:32.010841 21769 layer_factory.hpp:77] Creating layer post_pool\nI0818 15:07:32.010855 21769 net.cpp:100] Creating Layer post_pool\nI0818 15:07:32.010862 21769 net.cpp:434] post_pool <- L3_b18_sum_eltwise_top\nI0818 15:07:32.010870 21769 net.cpp:408] post_pool -> post_pool\nI0818 15:07:32.010907 21769 net.cpp:150] Setting up post_pool\nI0818 15:07:32.010921 21769 net.cpp:157] Top shape: 100 64 1 1 (6400)\nI0818 15:07:32.010924 21769 net.cpp:165] Memory required for data: 2295834800\nI0818 15:07:32.010931 21769 layer_factory.hpp:77] Creating layer post_FC\nI0818 15:07:32.011062 21769 net.cpp:100] Creating Layer post_FC\nI0818 15:07:32.011077 21769 net.cpp:434] post_FC <- post_pool\nI0818 15:07:32.011091 21769 net.cpp:408] post_FC -> post_FC_top\nI0818 15:07:32.011375 21769 net.cpp:150] Setting up post_FC\nI0818 15:07:32.011392 21769 net.cpp:157] Top shape: 100 10 (1000)\nI0818 15:07:32.011397 21769 net.cpp:165] Memory required for data: 2295838800\nI0818 15:07:32.011406 21769 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0818 15:07:32.011415 21769 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0818 15:07:32.011421 21769 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0818 15:07:32.011433 21769 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0818 15:07:32.011443 21769 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0818 15:07:32.011497 21769 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0818 15:07:32.011509 21769 net.cpp:157] Top shape: 100 10 (1000)\nI0818 15:07:32.011517 21769 net.cpp:157] Top shape: 100 10 (1000)\nI0818 15:07:32.011520 21769 net.cpp:165] Memory required for data: 2295846800\nI0818 15:07:32.011526 21769 layer_factory.hpp:77] Creating layer accuracy\nI0818 15:07:32.011574 21769 net.cpp:100] Creating Layer accuracy\nI0818 15:07:32.011586 21769 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0818 15:07:32.011595 21769 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 15:07:32.011602 21769 net.cpp:408] accuracy -> accuracy\nI0818 15:07:32.011649 21769 net.cpp:150] Setting up accuracy\nI0818 15:07:32.011662 21769 net.cpp:157] Top shape: (1)\nI0818 15:07:32.011667 21769 net.cpp:165] Memory required for data: 2295846804\nI0818 15:07:32.011673 21769 layer_factory.hpp:77] Creating layer loss\nI0818 15:07:32.011693 21769 net.cpp:100] Creating Layer loss\nI0818 15:07:32.011701 21769 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0818 15:07:32.011708 21769 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 15:07:32.011716 21769 net.cpp:408] loss -> loss\nI0818 15:07:32.012629 21769 layer_factory.hpp:77] Creating layer loss\nI0818 15:07:32.013576 21769 net.cpp:150] Setting up loss\nI0818 15:07:32.013593 21769 net.cpp:157] Top shape: (1)\nI0818 15:07:32.013599 21769 net.cpp:160]     with loss weight 1\nI0818 15:07:32.013695 21769 net.cpp:165] Memory required for data: 2295846808\nI0818 15:07:32.013705 21769 net.cpp:226] loss needs backward computation.\nI0818 15:07:32.013730 21769 net.cpp:228] accuracy does not need backward computation.\nI0818 15:07:32.013736 21769 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0818 15:07:32.013742 21769 net.cpp:226] post_FC needs backward computation.\nI0818 15:07:32.013747 21769 net.cpp:226] post_pool needs backward computation.\nI0818 15:07:32.013752 21769 net.cpp:226] L3_b18_relu needs backward computation.\nI0818 15:07:32.013757 21769 net.cpp:226] L3_b18_sum_eltwise needs backward computation.\nI0818 15:07:32.013770 21769 net.cpp:226] L3_b18_cbr2_scale needs backward computation.\nI0818 15:07:32.013777 21769 net.cpp:226] L3_b18_cbr2_bn needs backward computation.\nI0818 15:07:32.013782 21769 net.cpp:226] L3_b18_cbr2_conv needs backward computation.\nI0818 15:07:32.013787 21769 net.cpp:226] L3_b18_cbr1_relu needs backward computation.\nI0818 15:07:32.013792 21769 net.cpp:226] L3_b18_cbr1_scale needs backward computation.\nI0818 15:07:32.013797 21769 net.cpp:226] L3_b18_cbr1_bn needs backward computation.\nI0818 15:07:32.013803 21769 net.cpp:226] L3_b18_cbr1_conv needs backward computation.\nI0818 15:07:32.013808 21769 net.cpp:226] L3_b17_sum_eltwise_top_L3_b17_relu_0_split needs backward computation.\nI0818 15:07:32.013813 21769 net.cpp:226] L3_b17_relu needs backward computation.\nI0818 15:07:32.013818 21769 net.cpp:226] L3_b17_sum_eltwise needs backward computation.\nI0818 15:07:32.013823 21769 net.cpp:226] L3_b17_cbr2_scale needs backward computation.\nI0818 15:07:32.013828 21769 net.cpp:226] L3_b17_cbr2_bn needs backward computation.\nI0818 15:07:32.013833 21769 net.cpp:226] L3_b17_cbr2_conv needs backward computation.\nI0818 15:07:32.013839 21769 net.cpp:226] L3_b17_cbr1_relu needs backward computation.\nI0818 15:07:32.013844 21769 net.cpp:226] L3_b17_cbr1_scale needs backward computation.\nI0818 15:07:32.013847 21769 net.cpp:226] L3_b17_cbr1_bn needs backward computation.\nI0818 15:07:32.013852 21769 net.cpp:226] L3_b17_cbr1_conv needs backward computation.\nI0818 15:07:32.013859 21769 net.cpp:226] L3_b16_sum_eltwise_top_L3_b16_relu_0_split needs backward computation.\nI0818 15:07:32.013864 21769 net.cpp:226] L3_b16_relu needs backward computation.\nI0818 15:07:32.013869 21769 net.cpp:226] L3_b16_sum_eltwise needs backward computation.\nI0818 15:07:32.013873 21769 net.cpp:226] L3_b16_cbr2_scale needs backward computation.\nI0818 15:07:32.013878 21769 net.cpp:226] L3_b16_cbr2_bn needs backward computation.\nI0818 15:07:32.013883 21769 net.cpp:226] L3_b16_cbr2_conv needs backward computation.\nI0818 15:07:32.013888 21769 net.cpp:226] L3_b16_cbr1_relu needs backward computation.\nI0818 15:07:32.013893 21769 net.cpp:226] L3_b16_cbr1_scale needs backward computation.\nI0818 15:07:32.013897 21769 net.cpp:226] L3_b16_cbr1_bn needs backward computation.\nI0818 15:07:32.013902 21769 net.cpp:226] L3_b16_cbr1_conv needs backward computation.\nI0818 15:07:32.013908 21769 net.cpp:226] L3_b15_sum_eltwise_top_L3_b15_relu_0_split needs backward computation.\nI0818 15:07:32.013913 21769 net.cpp:226] L3_b15_relu needs backward computation.\nI0818 15:07:32.013918 21769 net.cpp:226] L3_b15_sum_eltwise needs backward computation.\nI0818 15:07:32.013923 21769 net.cpp:226] L3_b15_cbr2_scale needs backward computation.\nI0818 15:07:32.013928 21769 net.cpp:226] L3_b15_cbr2_bn needs backward computation.\nI0818 15:07:32.013933 21769 net.cpp:226] L3_b15_cbr2_conv needs backward computation.\nI0818 15:07:32.013938 21769 net.cpp:226] L3_b15_cbr1_relu needs backward computation.\nI0818 15:07:32.013943 21769 net.cpp:226] L3_b15_cbr1_scale needs backward computation.\nI0818 15:07:32.013948 21769 net.cpp:226] L3_b15_cbr1_bn needs backward computation.\nI0818 15:07:32.013953 21769 net.cpp:226] L3_b15_cbr1_conv needs backward computation.\nI0818 15:07:32.013958 21769 net.cpp:226] L3_b14_sum_eltwise_top_L3_b14_relu_0_split needs backward computation.\nI0818 15:07:32.013967 21769 net.cpp:226] L3_b14_relu needs backward computation.\nI0818 15:07:32.013973 21769 net.cpp:226] L3_b14_sum_eltwise needs backward computation.\nI0818 15:07:32.013979 21769 net.cpp:226] L3_b14_cbr2_scale needs backward computation.\nI0818 15:07:32.013984 21769 net.cpp:226] L3_b14_cbr2_bn needs backward computation.\nI0818 15:07:32.013989 21769 net.cpp:226] L3_b14_cbr2_conv needs backward computation.\nI0818 15:07:32.013994 21769 net.cpp:226] L3_b14_cbr1_relu needs backward computation.\nI0818 15:07:32.013999 21769 net.cpp:226] L3_b14_cbr1_scale needs backward computation.\nI0818 15:07:32.014004 21769 net.cpp:226] L3_b14_cbr1_bn needs backward computation.\nI0818 15:07:32.014015 21769 net.cpp:226] L3_b14_cbr1_conv needs backward computation.\nI0818 15:07:32.014021 21769 net.cpp:226] L3_b13_sum_eltwise_top_L3_b13_relu_0_split needs backward computation.\nI0818 15:07:32.014026 21769 net.cpp:226] L3_b13_relu needs backward computation.\nI0818 15:07:32.014031 21769 net.cpp:226] L3_b13_sum_eltwise needs backward computation.\nI0818 15:07:32.014037 21769 net.cpp:226] L3_b13_cbr2_scale needs backward computation.\nI0818 15:07:32.014042 21769 net.cpp:226] L3_b13_cbr2_bn needs backward computation.\nI0818 15:07:32.014047 21769 net.cpp:226] L3_b13_cbr2_conv needs backward computation.\nI0818 15:07:32.014052 21769 net.cpp:226] L3_b13_cbr1_relu needs backward computation.\nI0818 15:07:32.014057 21769 net.cpp:226] L3_b13_cbr1_scale needs backward computation.\nI0818 15:07:32.014062 21769 net.cpp:226] L3_b13_cbr1_bn needs backward computation.\nI0818 15:07:32.014067 21769 net.cpp:226] L3_b13_cbr1_conv needs backward computation.\nI0818 15:07:32.014072 21769 net.cpp:226] L3_b12_sum_eltwise_top_L3_b12_relu_0_split needs backward computation.\nI0818 15:07:32.014077 21769 net.cpp:226] L3_b12_relu needs backward computation.\nI0818 15:07:32.014082 21769 net.cpp:226] L3_b12_sum_eltwise needs backward computation.\nI0818 15:07:32.014088 21769 net.cpp:226] L3_b12_cbr2_scale needs backward computation.\nI0818 15:07:32.014093 21769 net.cpp:226] L3_b12_cbr2_bn needs backward computation.\nI0818 15:07:32.014098 21769 net.cpp:226] L3_b12_cbr2_conv needs backward computation.\nI0818 15:07:32.014103 21769 net.cpp:226] L3_b12_cbr1_relu needs backward computation.\nI0818 15:07:32.014108 21769 net.cpp:226] L3_b12_cbr1_scale needs backward computation.\nI0818 15:07:32.014113 21769 net.cpp:226] L3_b12_cbr1_bn needs backward computation.\nI0818 15:07:32.014118 21769 net.cpp:226] L3_b12_cbr1_conv needs backward computation.\nI0818 15:07:32.014123 21769 net.cpp:226] L3_b11_sum_eltwise_top_L3_b11_relu_0_split needs backward computation.\nI0818 15:07:32.014129 21769 net.cpp:226] L3_b11_relu needs backward computation.\nI0818 15:07:32.014134 21769 net.cpp:226] L3_b11_sum_eltwise needs backward computation.\nI0818 15:07:32.014139 21769 net.cpp:226] L3_b11_cbr2_scale needs backward computation.\nI0818 15:07:32.014144 21769 net.cpp:226] L3_b11_cbr2_bn needs backward computation.\nI0818 15:07:32.014150 21769 net.cpp:226] L3_b11_cbr2_conv needs backward computation.\nI0818 15:07:32.014155 21769 net.cpp:226] L3_b11_cbr1_relu needs backward computation.\nI0818 15:07:32.014160 21769 net.cpp:226] L3_b11_cbr1_scale needs backward computation.\nI0818 15:07:32.014165 21769 net.cpp:226] L3_b11_cbr1_bn needs backward computation.\nI0818 15:07:32.014170 21769 net.cpp:226] L3_b11_cbr1_conv needs backward computation.\nI0818 15:07:32.014175 21769 net.cpp:226] L3_b10_sum_eltwise_top_L3_b10_relu_0_split needs backward computation.\nI0818 15:07:32.014180 21769 net.cpp:226] L3_b10_relu needs backward computation.\nI0818 15:07:32.014185 21769 net.cpp:226] L3_b10_sum_eltwise needs backward computation.\nI0818 15:07:32.014191 21769 net.cpp:226] L3_b10_cbr2_scale needs backward computation.\nI0818 15:07:32.014196 21769 net.cpp:226] L3_b10_cbr2_bn needs backward computation.\nI0818 15:07:32.014201 21769 net.cpp:226] L3_b10_cbr2_conv needs backward computation.\nI0818 15:07:32.014207 21769 net.cpp:226] L3_b10_cbr1_relu needs backward computation.\nI0818 15:07:32.014212 21769 net.cpp:226] L3_b10_cbr1_scale needs backward computation.\nI0818 15:07:32.014217 21769 net.cpp:226] L3_b10_cbr1_bn needs backward computation.\nI0818 15:07:32.014222 21769 net.cpp:226] L3_b10_cbr1_conv needs backward computation.\nI0818 15:07:32.014228 21769 net.cpp:226] L3_b9_sum_eltwise_top_L3_b9_relu_0_split needs backward computation.\nI0818 15:07:32.014233 21769 net.cpp:226] L3_b9_relu needs backward computation.\nI0818 15:07:32.014238 21769 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0818 15:07:32.014245 21769 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0818 15:07:32.014250 21769 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0818 15:07:32.014255 21769 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0818 15:07:32.014266 21769 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0818 15:07:32.014271 21769 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0818 15:07:32.014276 21769 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0818 15:07:32.014281 21769 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0818 15:07:32.014287 21769 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0818 15:07:32.014292 21769 net.cpp:226] L3_b8_relu needs backward computation.\nI0818 15:07:32.014297 21769 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0818 15:07:32.014303 21769 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0818 15:07:32.014308 21769 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0818 15:07:32.014314 21769 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0818 15:07:32.014319 21769 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0818 15:07:32.014324 21769 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0818 15:07:32.014329 21769 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0818 15:07:32.014334 21769 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0818 15:07:32.014343 21769 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0818 15:07:32.014349 21769 net.cpp:226] L3_b7_relu needs backward computation.\nI0818 15:07:32.014355 21769 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0818 15:07:32.014361 21769 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0818 15:07:32.014366 21769 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0818 15:07:32.014372 21769 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0818 15:07:32.014377 21769 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0818 15:07:32.014382 21769 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0818 15:07:32.014387 21769 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0818 15:07:32.014392 21769 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0818 15:07:32.014398 21769 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0818 15:07:32.014403 21769 net.cpp:226] L3_b6_relu needs backward computation.\nI0818 15:07:32.014410 21769 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0818 15:07:32.014415 21769 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0818 15:07:32.014420 21769 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0818 15:07:32.014426 21769 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0818 15:07:32.014431 21769 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0818 15:07:32.014436 21769 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0818 15:07:32.014441 21769 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0818 15:07:32.014446 21769 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0818 15:07:32.014452 21769 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0818 15:07:32.014457 21769 net.cpp:226] L3_b5_relu needs backward computation.\nI0818 15:07:32.014462 21769 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0818 15:07:32.014468 21769 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0818 15:07:32.014473 21769 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0818 15:07:32.014479 21769 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0818 15:07:32.014484 21769 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0818 15:07:32.014490 21769 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0818 15:07:32.014495 21769 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0818 15:07:32.014500 21769 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0818 15:07:32.014506 21769 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0818 15:07:32.014511 21769 net.cpp:226] L3_b4_relu needs backward computation.\nI0818 15:07:32.014518 21769 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0818 15:07:32.014528 21769 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0818 15:07:32.014534 21769 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0818 15:07:32.014540 21769 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0818 15:07:32.014546 21769 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0818 15:07:32.014551 21769 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0818 15:07:32.014556 21769 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0818 15:07:32.014561 21769 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0818 15:07:32.014567 21769 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0818 15:07:32.014572 21769 net.cpp:226] L3_b3_relu needs backward computation.\nI0818 15:07:32.014577 21769 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0818 15:07:32.014583 21769 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0818 15:07:32.014588 21769 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0818 15:07:32.014595 21769 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0818 15:07:32.014600 21769 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0818 15:07:32.014605 21769 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0818 15:07:32.014611 21769 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0818 15:07:32.014616 21769 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0818 15:07:32.014621 21769 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0818 15:07:32.014626 21769 net.cpp:226] L3_b2_relu needs backward computation.\nI0818 15:07:32.014632 21769 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0818 15:07:32.014638 21769 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0818 15:07:32.014643 21769 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0818 15:07:32.014649 21769 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0818 15:07:32.014654 21769 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0818 15:07:32.014659 21769 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0818 15:07:32.014665 21769 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0818 15:07:32.014670 21769 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0818 15:07:32.014677 21769 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0818 15:07:32.014681 21769 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0818 15:07:32.014696 21769 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0818 15:07:32.014701 21769 net.cpp:226] L3_b1_relu needs backward computation.\nI0818 15:07:32.014708 21769 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0818 15:07:32.014714 21769 net.cpp:226] L3_b1_pool needs backward computation.\nI0818 15:07:32.014720 21769 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0818 15:07:32.014726 21769 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0818 15:07:32.014731 21769 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0818 15:07:32.014737 21769 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0818 15:07:32.014742 21769 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0818 15:07:32.014749 21769 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0818 15:07:32.014753 21769 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0818 15:07:32.014760 21769 net.cpp:226] L2_b18_sum_eltwise_top_L2_b18_relu_0_split needs backward computation.\nI0818 15:07:32.014765 21769 net.cpp:226] L2_b18_relu needs backward computation.\nI0818 15:07:32.014770 21769 net.cpp:226] L2_b18_sum_eltwise needs backward computation.\nI0818 15:07:32.014776 21769 net.cpp:226] L2_b18_cbr2_scale needs backward computation.\nI0818 15:07:32.014781 21769 net.cpp:226] L2_b18_cbr2_bn needs backward computation.\nI0818 15:07:32.014787 21769 net.cpp:226] L2_b18_cbr2_conv needs backward computation.\nI0818 15:07:32.014793 21769 net.cpp:226] L2_b18_cbr1_relu needs backward computation.\nI0818 15:07:32.014804 21769 net.cpp:226] L2_b18_cbr1_scale needs backward computation.\nI0818 15:07:32.014811 21769 net.cpp:226] L2_b18_cbr1_bn needs backward computation.\nI0818 15:07:32.014816 21769 net.cpp:226] L2_b18_cbr1_conv needs backward computation.\nI0818 15:07:32.014822 21769 net.cpp:226] L2_b17_sum_eltwise_top_L2_b17_relu_0_split needs backward computation.\nI0818 15:07:32.014827 21769 net.cpp:226] L2_b17_relu needs backward computation.\nI0818 15:07:32.014832 21769 net.cpp:226] L2_b17_sum_eltwise needs backward computation.\nI0818 15:07:32.014838 21769 net.cpp:226] L2_b17_cbr2_scale needs backward computation.\nI0818 15:07:32.014844 21769 net.cpp:226] L2_b17_cbr2_bn needs backward computation.\nI0818 15:07:32.014849 21769 net.cpp:226] L2_b17_cbr2_conv needs backward computation.\nI0818 15:07:32.014855 21769 net.cpp:226] L2_b17_cbr1_relu needs backward computation.\nI0818 15:07:32.014860 21769 net.cpp:226] L2_b17_cbr1_scale needs backward computation.\nI0818 15:07:32.014865 21769 net.cpp:226] L2_b17_cbr1_bn needs backward computation.\nI0818 15:07:32.014871 21769 net.cpp:226] L2_b17_cbr1_conv needs backward computation.\nI0818 15:07:32.014878 21769 net.cpp:226] L2_b16_sum_eltwise_top_L2_b16_relu_0_split needs backward computation.\nI0818 15:07:32.014883 21769 net.cpp:226] L2_b16_relu needs backward computation.\nI0818 15:07:32.014888 21769 net.cpp:226] L2_b16_sum_eltwise needs backward computation.\nI0818 15:07:32.014894 21769 net.cpp:226] L2_b16_cbr2_scale needs backward computation.\nI0818 15:07:32.014899 21769 net.cpp:226] L2_b16_cbr2_bn needs backward computation.\nI0818 15:07:32.014904 21769 net.cpp:226] L2_b16_cbr2_conv needs backward computation.\nI0818 15:07:32.014910 21769 net.cpp:226] L2_b16_cbr1_relu needs backward computation.\nI0818 15:07:32.014915 21769 net.cpp:226] L2_b16_cbr1_scale needs backward computation.\nI0818 15:07:32.014920 21769 net.cpp:226] L2_b16_cbr1_bn needs backward computation.\nI0818 15:07:32.014926 21769 net.cpp:226] L2_b16_cbr1_conv needs backward computation.\nI0818 15:07:32.014931 21769 net.cpp:226] L2_b15_sum_eltwise_top_L2_b15_relu_0_split needs backward computation.\nI0818 15:07:32.014937 21769 net.cpp:226] L2_b15_relu needs backward computation.\nI0818 15:07:32.014942 21769 net.cpp:226] L2_b15_sum_eltwise needs backward computation.\nI0818 15:07:32.014948 21769 net.cpp:226] L2_b15_cbr2_scale needs backward computation.\nI0818 15:07:32.014953 21769 net.cpp:226] L2_b15_cbr2_bn needs backward computation.\nI0818 15:07:32.014960 21769 net.cpp:226] L2_b15_cbr2_conv needs backward computation.\nI0818 15:07:32.014964 21769 net.cpp:226] L2_b15_cbr1_relu needs backward computation.\nI0818 15:07:32.014969 21769 net.cpp:226] L2_b15_cbr1_scale needs backward computation.\nI0818 15:07:32.014976 21769 net.cpp:226] L2_b15_cbr1_bn needs backward computation.\nI0818 15:07:32.014981 21769 net.cpp:226] L2_b15_cbr1_conv needs backward computation.\nI0818 15:07:32.014986 21769 net.cpp:226] L2_b14_sum_eltwise_top_L2_b14_relu_0_split needs backward computation.\nI0818 15:07:32.014992 21769 net.cpp:226] L2_b14_relu needs backward computation.\nI0818 15:07:32.014997 21769 net.cpp:226] L2_b14_sum_eltwise needs backward computation.\nI0818 15:07:32.015003 21769 net.cpp:226] L2_b14_cbr2_scale needs backward computation.\nI0818 15:07:32.015008 21769 net.cpp:226] L2_b14_cbr2_bn needs backward computation.\nI0818 15:07:32.015018 21769 net.cpp:226] L2_b14_cbr2_conv needs backward computation.\nI0818 15:07:32.015024 21769 net.cpp:226] L2_b14_cbr1_relu needs backward computation.\nI0818 15:07:32.015030 21769 net.cpp:226] L2_b14_cbr1_scale needs backward computation.\nI0818 15:07:32.015035 21769 net.cpp:226] L2_b14_cbr1_bn needs backward computation.\nI0818 15:07:32.015041 21769 net.cpp:226] L2_b14_cbr1_conv needs backward computation.\nI0818 15:07:32.015048 21769 net.cpp:226] L2_b13_sum_eltwise_top_L2_b13_relu_0_split needs backward computation.\nI0818 15:07:32.015053 21769 net.cpp:226] L2_b13_relu needs backward computation.\nI0818 15:07:32.015058 21769 net.cpp:226] L2_b13_sum_eltwise needs backward computation.\nI0818 15:07:32.015071 21769 net.cpp:226] L2_b13_cbr2_scale needs backward computation.\nI0818 15:07:32.015077 21769 net.cpp:226] L2_b13_cbr2_bn needs backward computation.\nI0818 15:07:32.015084 21769 net.cpp:226] L2_b13_cbr2_conv needs backward computation.\nI0818 15:07:32.015089 21769 net.cpp:226] L2_b13_cbr1_relu needs backward computation.\nI0818 15:07:32.015094 21769 net.cpp:226] L2_b13_cbr1_scale needs backward computation.\nI0818 15:07:32.015100 21769 net.cpp:226] L2_b13_cbr1_bn needs backward computation.\nI0818 15:07:32.015106 21769 net.cpp:226] L2_b13_cbr1_conv needs backward computation.\nI0818 15:07:32.015112 21769 net.cpp:226] L2_b12_sum_eltwise_top_L2_b12_relu_0_split needs backward computation.\nI0818 15:07:32.015118 21769 net.cpp:226] L2_b12_relu needs backward computation.\nI0818 15:07:32.015123 21769 net.cpp:226] L2_b12_sum_eltwise needs backward computation.\nI0818 15:07:32.015130 21769 net.cpp:226] L2_b12_cbr2_scale needs backward computation.\nI0818 15:07:32.015136 21769 net.cpp:226] L2_b12_cbr2_bn needs backward computation.\nI0818 15:07:32.015142 21769 net.cpp:226] L2_b12_cbr2_conv needs backward computation.\nI0818 15:07:32.015147 21769 net.cpp:226] L2_b12_cbr1_relu needs backward computation.\nI0818 15:07:32.015153 21769 net.cpp:226] L2_b12_cbr1_scale needs backward computation.\nI0818 15:07:32.015158 21769 net.cpp:226] L2_b12_cbr1_bn needs backward computation.\nI0818 15:07:32.015164 21769 net.cpp:226] L2_b12_cbr1_conv needs backward computation.\nI0818 15:07:32.015171 21769 net.cpp:226] L2_b11_sum_eltwise_top_L2_b11_relu_0_split needs backward computation.\nI0818 15:07:32.015177 21769 net.cpp:226] L2_b11_relu needs backward computation.\nI0818 15:07:32.015182 21769 net.cpp:226] L2_b11_sum_eltwise needs backward computation.\nI0818 15:07:32.015188 21769 net.cpp:226] L2_b11_cbr2_scale needs backward computation.\nI0818 15:07:32.015193 21769 net.cpp:226] L2_b11_cbr2_bn needs backward computation.\nI0818 15:07:32.015199 21769 net.cpp:226] L2_b11_cbr2_conv needs backward computation.\nI0818 15:07:32.015205 21769 net.cpp:226] L2_b11_cbr1_relu needs backward computation.\nI0818 15:07:32.015210 21769 net.cpp:226] L2_b11_cbr1_scale needs backward computation.\nI0818 15:07:32.015216 21769 net.cpp:226] L2_b11_cbr1_bn needs backward computation.\nI0818 15:07:32.015221 21769 net.cpp:226] L2_b11_cbr1_conv needs backward computation.\nI0818 15:07:32.015228 21769 net.cpp:226] L2_b10_sum_eltwise_top_L2_b10_relu_0_split needs backward computation.\nI0818 15:07:32.015233 21769 net.cpp:226] L2_b10_relu needs backward computation.\nI0818 15:07:32.015239 21769 net.cpp:226] L2_b10_sum_eltwise needs backward computation.\nI0818 15:07:32.015245 21769 net.cpp:226] L2_b10_cbr2_scale needs backward computation.\nI0818 15:07:32.015251 21769 net.cpp:226] L2_b10_cbr2_bn needs backward computation.\nI0818 15:07:32.015256 21769 net.cpp:226] L2_b10_cbr2_conv needs backward computation.\nI0818 15:07:32.015262 21769 net.cpp:226] L2_b10_cbr1_relu needs backward computation.\nI0818 15:07:32.015267 21769 net.cpp:226] L2_b10_cbr1_scale needs backward computation.\nI0818 15:07:32.015274 21769 net.cpp:226] L2_b10_cbr1_bn needs backward computation.\nI0818 15:07:32.015280 21769 net.cpp:226] L2_b10_cbr1_conv needs backward computation.\nI0818 15:07:32.015285 21769 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0818 15:07:32.015290 21769 net.cpp:226] L2_b9_relu needs backward computation.\nI0818 15:07:32.015296 21769 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0818 15:07:32.015302 21769 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0818 15:07:32.015308 21769 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0818 15:07:32.015314 21769 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0818 15:07:32.015321 21769 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0818 15:07:32.015326 21769 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0818 15:07:32.015331 21769 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0818 15:07:32.015336 21769 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0818 15:07:32.015348 21769 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0818 15:07:32.015355 21769 net.cpp:226] L2_b8_relu needs backward computation.\nI0818 15:07:32.015360 21769 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0818 15:07:32.015367 21769 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0818 15:07:32.015373 21769 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0818 15:07:32.015379 21769 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0818 15:07:32.015385 21769 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0818 15:07:32.015390 21769 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0818 15:07:32.015395 21769 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0818 15:07:32.015401 21769 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0818 15:07:32.015408 21769 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0818 15:07:32.015413 21769 net.cpp:226] L2_b7_relu needs backward computation.\nI0818 15:07:32.015419 21769 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0818 15:07:32.015425 21769 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0818 15:07:32.015431 21769 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0818 15:07:32.015436 21769 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0818 15:07:32.015442 21769 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0818 15:07:32.015449 21769 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0818 15:07:32.015453 21769 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0818 15:07:32.015460 21769 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0818 15:07:32.015465 21769 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0818 15:07:32.015471 21769 net.cpp:226] L2_b6_relu needs backward computation.\nI0818 15:07:32.015477 21769 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0818 15:07:32.015483 21769 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0818 15:07:32.015489 21769 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0818 15:07:32.015496 21769 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0818 15:07:32.015501 21769 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0818 15:07:32.015506 21769 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0818 15:07:32.015511 21769 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0818 15:07:32.015517 21769 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0818 15:07:32.015523 21769 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0818 15:07:32.015529 21769 net.cpp:226] L2_b5_relu needs backward computation.\nI0818 15:07:32.015535 21769 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0818 15:07:32.015542 21769 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0818 15:07:32.015547 21769 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0818 15:07:32.015552 21769 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0818 15:07:32.015558 21769 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0818 15:07:32.015564 21769 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0818 15:07:32.015569 21769 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0818 15:07:32.015575 21769 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0818 15:07:32.015581 21769 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0818 15:07:32.015586 21769 net.cpp:226] L2_b4_relu needs backward computation.\nI0818 15:07:32.015592 21769 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0818 15:07:32.015599 21769 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0818 15:07:32.015605 21769 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0818 15:07:32.015610 21769 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0818 15:07:32.015616 21769 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0818 15:07:32.015627 21769 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0818 15:07:32.015633 21769 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0818 15:07:32.015640 21769 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0818 15:07:32.015645 21769 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0818 15:07:32.015651 21769 net.cpp:226] L2_b3_relu needs backward computation.\nI0818 15:07:32.015657 21769 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0818 15:07:32.015664 21769 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0818 15:07:32.015669 21769 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0818 15:07:32.015676 21769 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0818 15:07:32.015682 21769 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0818 15:07:32.015694 21769 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0818 15:07:32.015699 21769 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0818 15:07:32.015707 21769 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0818 15:07:32.015712 21769 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0818 15:07:32.015718 21769 net.cpp:226] L2_b2_relu needs backward computation.\nI0818 15:07:32.015724 21769 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0818 15:07:32.015732 21769 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0818 15:07:32.015736 21769 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0818 15:07:32.015743 21769 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0818 15:07:32.015748 21769 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0818 15:07:32.015754 21769 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0818 15:07:32.015760 21769 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0818 15:07:32.015769 21769 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0818 15:07:32.015776 21769 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0818 15:07:32.015782 21769 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0818 15:07:32.015789 21769 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0818 15:07:32.015794 21769 net.cpp:226] L2_b1_relu needs backward computation.\nI0818 15:07:32.015800 21769 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0818 15:07:32.015807 21769 net.cpp:226] L2_b1_pool needs backward computation.\nI0818 15:07:32.015815 21769 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0818 15:07:32.015820 21769 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0818 15:07:32.015825 21769 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0818 15:07:32.015831 21769 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0818 15:07:32.015836 21769 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0818 15:07:32.015842 21769 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0818 15:07:32.015847 21769 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0818 15:07:32.015853 21769 net.cpp:226] L1_b18_sum_eltwise_top_L1_b18_relu_0_split needs backward computation.\nI0818 15:07:32.015859 21769 net.cpp:226] L1_b18_relu needs backward computation.\nI0818 15:07:32.015866 21769 net.cpp:226] L1_b18_sum_eltwise needs backward computation.\nI0818 15:07:32.015872 21769 net.cpp:226] L1_b18_cbr2_scale needs backward computation.\nI0818 15:07:32.015877 21769 net.cpp:226] L1_b18_cbr2_bn needs backward computation.\nI0818 15:07:32.015882 21769 net.cpp:226] L1_b18_cbr2_conv needs backward computation.\nI0818 15:07:32.015888 21769 net.cpp:226] L1_b18_cbr1_relu needs backward computation.\nI0818 15:07:32.015893 21769 net.cpp:226] L1_b18_cbr1_scale needs backward computation.\nI0818 15:07:32.015898 21769 net.cpp:226] L1_b18_cbr1_bn needs backward computation.\nI0818 15:07:32.015904 21769 net.cpp:226] L1_b18_cbr1_conv needs backward computation.\nI0818 15:07:32.015911 21769 net.cpp:226] L1_b17_sum_eltwise_top_L1_b17_relu_0_split needs backward computation.\nI0818 15:07:32.015923 21769 net.cpp:226] L1_b17_relu needs backward computation.\nI0818 15:07:32.015928 21769 net.cpp:226] L1_b17_sum_eltwise needs backward computation.\nI0818 15:07:32.015934 21769 net.cpp:226] L1_b17_cbr2_scale needs backward computation.\nI0818 15:07:32.015940 21769 net.cpp:226] L1_b17_cbr2_bn needs backward computation.\nI0818 15:07:32.015946 21769 net.cpp:226] L1_b17_cbr2_conv needs backward computation.\nI0818 15:07:32.015952 21769 net.cpp:226] L1_b17_cbr1_relu needs backward computation.\nI0818 15:07:32.015957 21769 net.cpp:226] L1_b17_cbr1_scale needs backward computation.\nI0818 15:07:32.015962 21769 net.cpp:226] L1_b17_cbr1_bn needs backward computation.\nI0818 15:07:32.015969 21769 net.cpp:226] L1_b17_cbr1_conv needs backward computation.\nI0818 15:07:32.015974 21769 net.cpp:226] L1_b16_sum_eltwise_top_L1_b16_relu_0_split needs backward computation.\nI0818 15:07:32.015980 21769 net.cpp:226] L1_b16_relu needs backward computation.\nI0818 15:07:32.015985 21769 net.cpp:226] L1_b16_sum_eltwise needs backward computation.\nI0818 15:07:32.015991 21769 net.cpp:226] L1_b16_cbr2_scale needs backward computation.\nI0818 15:07:32.015997 21769 net.cpp:226] L1_b16_cbr2_bn needs backward computation.\nI0818 15:07:32.016003 21769 net.cpp:226] L1_b16_cbr2_conv needs backward computation.\nI0818 15:07:32.016010 21769 net.cpp:226] L1_b16_cbr1_relu needs backward computation.\nI0818 15:07:32.016014 21769 net.cpp:226] L1_b16_cbr1_scale needs backward computation.\nI0818 15:07:32.016019 21769 net.cpp:226] L1_b16_cbr1_bn needs backward computation.\nI0818 15:07:32.016026 21769 net.cpp:226] L1_b16_cbr1_conv needs backward computation.\nI0818 15:07:32.016031 21769 net.cpp:226] L1_b15_sum_eltwise_top_L1_b15_relu_0_split needs backward computation.\nI0818 15:07:32.016037 21769 net.cpp:226] L1_b15_relu needs backward computation.\nI0818 15:07:32.016042 21769 net.cpp:226] L1_b15_sum_eltwise needs backward computation.\nI0818 15:07:32.016049 21769 net.cpp:226] L1_b15_cbr2_scale needs backward computation.\nI0818 15:07:32.016054 21769 net.cpp:226] L1_b15_cbr2_bn needs backward computation.\nI0818 15:07:32.016060 21769 net.cpp:226] L1_b15_cbr2_conv needs backward computation.\nI0818 15:07:32.016067 21769 net.cpp:226] L1_b15_cbr1_relu needs backward computation.\nI0818 15:07:32.016072 21769 net.cpp:226] L1_b15_cbr1_scale needs backward computation.\nI0818 15:07:32.016077 21769 net.cpp:226] L1_b15_cbr1_bn needs backward computation.\nI0818 15:07:32.016083 21769 net.cpp:226] L1_b15_cbr1_conv needs backward computation.\nI0818 15:07:32.016088 21769 net.cpp:226] L1_b14_sum_eltwise_top_L1_b14_relu_0_split needs backward computation.\nI0818 15:07:32.016093 21769 net.cpp:226] L1_b14_relu needs backward computation.\nI0818 15:07:32.016099 21769 net.cpp:226] L1_b14_sum_eltwise needs backward computation.\nI0818 15:07:32.016106 21769 net.cpp:226] L1_b14_cbr2_scale needs backward computation.\nI0818 15:07:32.016111 21769 net.cpp:226] L1_b14_cbr2_bn needs backward computation.\nI0818 15:07:32.016118 21769 net.cpp:226] L1_b14_cbr2_conv needs backward computation.\nI0818 15:07:32.016122 21769 net.cpp:226] L1_b14_cbr1_relu needs backward computation.\nI0818 15:07:32.016129 21769 net.cpp:226] L1_b14_cbr1_scale needs backward computation.\nI0818 15:07:32.016134 21769 net.cpp:226] L1_b14_cbr1_bn needs backward computation.\nI0818 15:07:32.016139 21769 net.cpp:226] L1_b14_cbr1_conv needs backward computation.\nI0818 15:07:32.016145 21769 net.cpp:226] L1_b13_sum_eltwise_top_L1_b13_relu_0_split needs backward computation.\nI0818 15:07:32.016150 21769 net.cpp:226] L1_b13_relu needs backward computation.\nI0818 15:07:32.016156 21769 net.cpp:226] L1_b13_sum_eltwise needs backward computation.\nI0818 15:07:32.016162 21769 net.cpp:226] L1_b13_cbr2_scale needs backward computation.\nI0818 15:07:32.016167 21769 net.cpp:226] L1_b13_cbr2_bn needs backward computation.\nI0818 15:07:32.016173 21769 net.cpp:226] L1_b13_cbr2_conv needs backward computation.\nI0818 15:07:32.016180 21769 net.cpp:226] L1_b13_cbr1_relu needs backward computation.\nI0818 15:07:32.016185 21769 net.cpp:226] L1_b13_cbr1_scale needs backward computation.\nI0818 15:07:32.016196 21769 net.cpp:226] L1_b13_cbr1_bn needs backward computation.\nI0818 15:07:32.016202 21769 net.cpp:226] L1_b13_cbr1_conv needs backward computation.\nI0818 15:07:32.016208 21769 net.cpp:226] L1_b12_sum_eltwise_top_L1_b12_relu_0_split needs backward computation.\nI0818 15:07:32.016213 21769 net.cpp:226] L1_b12_relu needs backward computation.\nI0818 15:07:32.016219 21769 net.cpp:226] L1_b12_sum_eltwise needs backward computation.\nI0818 15:07:32.016225 21769 net.cpp:226] L1_b12_cbr2_scale needs backward computation.\nI0818 15:07:32.016232 21769 net.cpp:226] L1_b12_cbr2_bn needs backward computation.\nI0818 15:07:32.016237 21769 net.cpp:226] L1_b12_cbr2_conv needs backward computation.\nI0818 15:07:32.016242 21769 net.cpp:226] L1_b12_cbr1_relu needs backward computation.\nI0818 15:07:32.016247 21769 net.cpp:226] L1_b12_cbr1_scale needs backward computation.\nI0818 15:07:32.016253 21769 net.cpp:226] L1_b12_cbr1_bn needs backward computation.\nI0818 15:07:32.016259 21769 net.cpp:226] L1_b12_cbr1_conv needs backward computation.\nI0818 15:07:32.016265 21769 net.cpp:226] L1_b11_sum_eltwise_top_L1_b11_relu_0_split needs backward computation.\nI0818 15:07:32.016271 21769 net.cpp:226] L1_b11_relu needs backward computation.\nI0818 15:07:32.016276 21769 net.cpp:226] L1_b11_sum_eltwise needs backward computation.\nI0818 15:07:32.016283 21769 net.cpp:226] L1_b11_cbr2_scale needs backward computation.\nI0818 15:07:32.016288 21769 net.cpp:226] L1_b11_cbr2_bn needs backward computation.\nI0818 15:07:32.016294 21769 net.cpp:226] L1_b11_cbr2_conv needs backward computation.\nI0818 15:07:32.016300 21769 net.cpp:226] L1_b11_cbr1_relu needs backward computation.\nI0818 15:07:32.016306 21769 net.cpp:226] L1_b11_cbr1_scale needs backward computation.\nI0818 15:07:32.016311 21769 net.cpp:226] L1_b11_cbr1_bn needs backward computation.\nI0818 15:07:32.016317 21769 net.cpp:226] L1_b11_cbr1_conv needs backward computation.\nI0818 15:07:32.016324 21769 net.cpp:226] L1_b10_sum_eltwise_top_L1_b10_relu_0_split needs backward computation.\nI0818 15:07:32.016329 21769 net.cpp:226] L1_b10_relu needs backward computation.\nI0818 15:07:32.016335 21769 net.cpp:226] L1_b10_sum_eltwise needs backward computation.\nI0818 15:07:32.016340 21769 net.cpp:226] L1_b10_cbr2_scale needs backward computation.\nI0818 15:07:32.016346 21769 net.cpp:226] L1_b10_cbr2_bn needs backward computation.\nI0818 15:07:32.016352 21769 net.cpp:226] L1_b10_cbr2_conv needs backward computation.\nI0818 15:07:32.016358 21769 net.cpp:226] L1_b10_cbr1_relu needs backward computation.\nI0818 15:07:32.016363 21769 net.cpp:226] L1_b10_cbr1_scale needs backward computation.\nI0818 15:07:32.016369 21769 net.cpp:226] L1_b10_cbr1_bn needs backward computation.\nI0818 15:07:32.016376 21769 net.cpp:226] L1_b10_cbr1_conv needs backward computation.\nI0818 15:07:32.016381 21769 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0818 15:07:32.016387 21769 net.cpp:226] L1_b9_relu needs backward computation.\nI0818 15:07:32.016393 21769 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0818 15:07:32.016399 21769 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0818 15:07:32.016405 21769 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0818 15:07:32.016410 21769 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0818 15:07:32.016417 21769 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0818 15:07:32.016422 21769 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0818 15:07:32.016427 21769 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0818 15:07:32.016433 21769 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0818 15:07:32.016439 21769 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0818 15:07:32.016445 21769 net.cpp:226] L1_b8_relu needs backward computation.\nI0818 15:07:32.016450 21769 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0818 15:07:32.016460 21769 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0818 15:07:32.016472 21769 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0818 15:07:32.016479 21769 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0818 15:07:32.016485 21769 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0818 15:07:32.016490 21769 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0818 15:07:32.016496 21769 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0818 15:07:32.016502 21769 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0818 15:07:32.016508 21769 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0818 15:07:32.016515 21769 net.cpp:226] L1_b7_relu needs backward computation.\nI0818 15:07:32.016520 21769 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0818 15:07:32.016527 21769 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0818 15:07:32.016533 21769 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0818 15:07:32.016540 21769 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0818 15:07:32.016544 21769 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0818 15:07:32.016551 21769 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0818 15:07:32.016556 21769 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0818 15:07:32.016561 21769 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0818 15:07:32.016568 21769 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0818 15:07:32.016574 21769 net.cpp:226] L1_b6_relu needs backward computation.\nI0818 15:07:32.016579 21769 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0818 15:07:32.016587 21769 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0818 15:07:32.016592 21769 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0818 15:07:32.016598 21769 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0818 15:07:32.016604 21769 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0818 15:07:32.016609 21769 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0818 15:07:32.016615 21769 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0818 15:07:32.016621 21769 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0818 15:07:32.016628 21769 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0818 15:07:32.016633 21769 net.cpp:226] L1_b5_relu needs backward computation.\nI0818 15:07:32.016639 21769 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0818 15:07:32.016646 21769 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0818 15:07:32.016652 21769 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0818 15:07:32.016659 21769 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0818 15:07:32.016664 21769 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0818 15:07:32.016669 21769 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0818 15:07:32.016675 21769 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0818 15:07:32.016681 21769 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0818 15:07:32.016692 21769 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0818 15:07:32.016700 21769 net.cpp:226] L1_b4_relu needs backward computation.\nI0818 15:07:32.016705 21769 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0818 15:07:32.016712 21769 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0818 15:07:32.016718 21769 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0818 15:07:32.016724 21769 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0818 15:07:32.016731 21769 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0818 15:07:32.016736 21769 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0818 15:07:32.016741 21769 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0818 15:07:32.016747 21769 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0818 15:07:32.016753 21769 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0818 15:07:32.016764 21769 net.cpp:226] L1_b3_relu needs backward computation.\nI0818 15:07:32.016772 21769 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0818 15:07:32.016778 21769 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0818 15:07:32.016784 21769 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0818 15:07:32.016790 21769 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0818 15:07:32.016796 21769 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0818 15:07:32.016801 21769 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0818 15:07:32.016808 21769 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0818 15:07:32.016813 21769 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0818 15:07:32.016819 21769 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0818 15:07:32.016825 21769 net.cpp:226] L1_b2_relu needs backward computation.\nI0818 15:07:32.016831 21769 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0818 15:07:32.016837 21769 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0818 15:07:32.016844 21769 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0818 15:07:32.016850 21769 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0818 15:07:32.016855 21769 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0818 15:07:32.016861 21769 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0818 15:07:32.016867 21769 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0818 15:07:32.016873 21769 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0818 15:07:32.016880 21769 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0818 15:07:32.016886 21769 net.cpp:226] L1_b1_relu needs backward computation.\nI0818 15:07:32.016894 21769 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0818 15:07:32.016901 21769 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0818 15:07:32.016907 21769 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0818 15:07:32.016913 21769 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0818 15:07:32.016919 21769 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0818 15:07:32.016926 21769 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0818 15:07:32.016932 21769 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0818 15:07:32.016937 21769 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0818 15:07:32.016943 21769 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0818 15:07:32.016948 21769 net.cpp:226] pre_relu needs backward computation.\nI0818 15:07:32.016953 21769 net.cpp:226] pre_scale needs backward computation.\nI0818 15:07:32.016958 21769 net.cpp:226] pre_bn needs backward computation.\nI0818 15:07:32.016964 21769 net.cpp:226] pre_conv needs backward computation.\nI0818 15:07:32.016971 21769 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 15:07:32.016978 21769 net.cpp:228] dataLayer does not need backward computation.\nI0818 15:07:32.016983 21769 net.cpp:270] This network produces output accuracy\nI0818 15:07:32.016989 21769 net.cpp:270] This network produces output loss\nI0818 15:07:32.017735 21769 net.cpp:283] Network initialization done.\nI0818 15:07:32.034482 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:32.034543 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:32.034600 21769 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0818 15:07:32.035318 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0818 15:07:32.035337 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0818 15:07:32.035348 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0818 15:07:32.035367 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0818 15:07:32.035377 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0818 15:07:32.035387 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0818 15:07:32.035395 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0818 15:07:32.035404 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0818 15:07:32.035414 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0818 15:07:32.035423 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0818 15:07:32.035432 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0818 15:07:32.035441 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0818 15:07:32.035450 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0818 15:07:32.035459 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0818 15:07:32.035468 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0818 15:07:32.035477 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0818 15:07:32.035485 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0818 15:07:32.035495 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0818 15:07:32.035503 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0818 15:07:32.035511 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0818 15:07:32.035521 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b10_cbr1_bn\nI0818 15:07:32.035531 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b10_cbr2_bn\nI0818 15:07:32.035538 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b11_cbr1_bn\nI0818 15:07:32.035547 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b11_cbr2_bn\nI0818 15:07:32.035555 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b12_cbr1_bn\nI0818 15:07:32.035564 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b12_cbr2_bn\nI0818 15:07:32.035573 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b13_cbr1_bn\nI0818 15:07:32.035581 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b13_cbr2_bn\nI0818 15:07:32.035589 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b14_cbr1_bn\nI0818 15:07:32.035598 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b14_cbr2_bn\nI0818 15:07:32.035606 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b15_cbr1_bn\nI0818 15:07:32.035615 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b15_cbr2_bn\nI0818 15:07:32.035624 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b16_cbr1_bn\nI0818 15:07:32.035632 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b16_cbr2_bn\nI0818 15:07:32.035648 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b17_cbr1_bn\nI0818 15:07:32.035658 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b17_cbr2_bn\nI0818 15:07:32.035667 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b18_cbr1_bn\nI0818 15:07:32.035676 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b18_cbr2_bn\nI0818 15:07:32.035691 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0818 15:07:32.035701 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0818 15:07:32.035713 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0818 15:07:32.035722 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0818 15:07:32.035730 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0818 15:07:32.035739 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0818 15:07:32.035748 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0818 15:07:32.035756 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0818 15:07:32.035765 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0818 15:07:32.035773 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0818 15:07:32.035782 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0818 15:07:32.035790 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0818 15:07:32.035799 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0818 15:07:32.035807 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0818 15:07:32.035816 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0818 15:07:32.035825 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0818 15:07:32.035835 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0818 15:07:32.035842 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0818 15:07:32.035851 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b10_cbr1_bn\nI0818 15:07:32.035859 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b10_cbr2_bn\nI0818 15:07:32.035868 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b11_cbr1_bn\nI0818 15:07:32.035876 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b11_cbr2_bn\nI0818 15:07:32.035886 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b12_cbr1_bn\nI0818 15:07:32.035893 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b12_cbr2_bn\nI0818 15:07:32.035902 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b13_cbr1_bn\nI0818 15:07:32.035912 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b13_cbr2_bn\nI0818 15:07:32.035928 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b14_cbr1_bn\nI0818 15:07:32.035938 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b14_cbr2_bn\nI0818 15:07:32.035945 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b15_cbr1_bn\nI0818 15:07:32.035953 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b15_cbr2_bn\nI0818 15:07:32.035962 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b16_cbr1_bn\nI0818 15:07:32.035970 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b16_cbr2_bn\nI0818 15:07:32.035979 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b17_cbr1_bn\nI0818 15:07:32.035987 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b17_cbr2_bn\nI0818 15:07:32.035996 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b18_cbr1_bn\nI0818 15:07:32.036005 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b18_cbr2_bn\nI0818 15:07:32.036013 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0818 15:07:32.036022 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0818 15:07:32.036034 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0818 15:07:32.036043 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0818 15:07:32.036052 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0818 15:07:32.036061 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0818 15:07:32.036069 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0818 15:07:32.036077 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0818 15:07:32.036087 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0818 15:07:32.036094 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0818 15:07:32.036103 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0818 15:07:32.036111 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0818 15:07:32.036119 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0818 15:07:32.036128 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0818 15:07:32.036136 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0818 15:07:32.036145 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0818 15:07:32.036154 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0818 15:07:32.036161 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0818 15:07:32.036170 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b10_cbr1_bn\nI0818 15:07:32.036178 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b10_cbr2_bn\nI0818 15:07:32.036193 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b11_cbr1_bn\nI0818 15:07:32.036202 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b11_cbr2_bn\nI0818 15:07:32.036211 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b12_cbr1_bn\nI0818 15:07:32.036221 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b12_cbr2_bn\nI0818 15:07:32.036229 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b13_cbr1_bn\nI0818 15:07:32.036237 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b13_cbr2_bn\nI0818 15:07:32.036245 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b14_cbr1_bn\nI0818 15:07:32.036254 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b14_cbr2_bn\nI0818 15:07:32.036263 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b15_cbr1_bn\nI0818 15:07:32.036272 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b15_cbr2_bn\nI0818 15:07:32.036280 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b16_cbr1_bn\nI0818 15:07:32.036288 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b16_cbr2_bn\nI0818 15:07:32.036298 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b17_cbr1_bn\nI0818 15:07:32.036305 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b17_cbr2_bn\nI0818 15:07:32.036314 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b18_cbr1_bn\nI0818 15:07:32.036324 21769 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b18_cbr2_bn\nI0818 15:07:32.039508 21769 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight\nI0818 15:07:32.042739 21769 layer_factory.hpp:77] Creating layer dataLayer\nI0818 15:07:32.042949 21769 net.cpp:100] Creating Layer dataLayer\nI0818 15:07:32.042968 21769 net.cpp:408] dataLayer -> data_top\nI0818 15:07:32.042986 21769 net.cpp:408] dataLayer -> label\nI0818 15:07:32.042999 21769 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 15:07:32.057493 21776 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0818 15:07:32.057824 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:32.064604 21769 net.cpp:150] Setting up dataLayer\nI0818 15:07:32.064626 21769 net.cpp:157] Top shape: 100 3 32 32 (307200)\nI0818 15:07:32.064635 21769 net.cpp:157] Top shape: 100 (100)\nI0818 15:07:32.064640 21769 net.cpp:165] Memory required for data: 1229200\nI0818 15:07:32.064646 21769 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 15:07:32.064657 21769 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 15:07:32.064663 21769 net.cpp:434] label_dataLayer_1_split <- label\nI0818 15:07:32.064728 21769 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 15:07:32.064746 21769 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 15:07:32.064822 21769 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 15:07:32.064836 21769 net.cpp:157] Top shape: 100 (100)\nI0818 15:07:32.064846 21769 net.cpp:157] Top shape: 100 (100)\nI0818 15:07:32.064851 21769 net.cpp:165] Memory required for data: 1230000\nI0818 15:07:32.064857 21769 layer_factory.hpp:77] Creating layer pre_conv\nI0818 15:07:32.064873 21769 net.cpp:100] Creating Layer pre_conv\nI0818 15:07:32.064879 21769 net.cpp:434] pre_conv <- data_top\nI0818 15:07:32.064891 21769 net.cpp:408] pre_conv -> pre_conv_top\nI0818 15:07:32.065414 21769 net.cpp:150] Setting up pre_conv\nI0818 15:07:32.065431 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.065436 21769 net.cpp:165] Memory required for data: 7783600\nI0818 15:07:32.065451 21769 layer_factory.hpp:77] Creating layer pre_bn\nI0818 15:07:32.065470 21769 net.cpp:100] Creating Layer pre_bn\nI0818 15:07:32.065477 21769 net.cpp:434] pre_bn <- pre_conv_top\nI0818 15:07:32.065486 21769 net.cpp:408] pre_bn -> pre_bn_top\nI0818 15:07:32.065877 21769 net.cpp:150] Setting up pre_bn\nI0818 15:07:32.065894 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.065899 21769 net.cpp:165] Memory required for data: 14337200\nI0818 15:07:32.065915 21769 layer_factory.hpp:77] Creating layer pre_scale\nI0818 15:07:32.065925 21769 net.cpp:100] Creating Layer pre_scale\nI0818 15:07:32.065932 21769 net.cpp:434] pre_scale <- pre_bn_top\nI0818 15:07:32.065939 21769 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0818 15:07:32.066010 21769 layer_factory.hpp:77] Creating layer pre_scale\nI0818 15:07:32.066221 21769 net.cpp:150] Setting up pre_scale\nI0818 15:07:32.066238 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.066244 21769 net.cpp:165] Memory required for data: 20890800\nI0818 15:07:32.066254 21769 layer_factory.hpp:77] Creating layer pre_relu\nI0818 15:07:32.066263 21769 net.cpp:100] Creating Layer pre_relu\nI0818 15:07:32.066268 21769 net.cpp:434] pre_relu <- pre_bn_top\nI0818 15:07:32.066294 21769 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0818 15:07:32.066306 21769 net.cpp:150] Setting up pre_relu\nI0818 15:07:32.066313 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.066318 21769 net.cpp:165] Memory required for data: 27444400\nI0818 15:07:32.066325 21769 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0818 15:07:32.066334 21769 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0818 15:07:32.066339 21769 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0818 15:07:32.066359 21769 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0818 15:07:32.066370 21769 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0818 15:07:32.066427 21769 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0818 15:07:32.066438 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.066444 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.066449 21769 net.cpp:165] Memory required for data: 40551600\nI0818 15:07:32.066454 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0818 15:07:32.066493 21769 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0818 15:07:32.066500 21769 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0818 15:07:32.066510 21769 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0818 15:07:32.066963 21769 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0818 15:07:32.066988 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.066996 21769 net.cpp:165] Memory required for data: 47105200\nI0818 15:07:32.067010 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0818 15:07:32.067025 21769 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0818 15:07:32.067031 21769 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0818 15:07:32.067039 21769 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0818 15:07:32.067587 21769 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0818 15:07:32.067601 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.067606 21769 net.cpp:165] Memory required for data: 53658800\nI0818 15:07:32.067618 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 15:07:32.067627 21769 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0818 15:07:32.067633 21769 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0818 15:07:32.067647 21769 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0818 15:07:32.067728 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 15:07:32.067917 21769 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0818 15:07:32.067935 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.067940 21769 net.cpp:165] Memory required for data: 60212400\nI0818 15:07:32.067953 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0818 15:07:32.067962 21769 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0818 15:07:32.067967 21769 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0818 15:07:32.067975 21769 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0818 15:07:32.067984 21769 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0818 15:07:32.067993 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.068001 21769 net.cpp:165] Memory required for data: 66766000\nI0818 15:07:32.068006 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0818 15:07:32.068019 21769 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0818 15:07:32.068027 21769 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0818 15:07:32.068040 21769 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0818 15:07:32.068450 21769 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0818 15:07:32.068467 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.068473 21769 net.cpp:165] Memory required for data: 73319600\nI0818 15:07:32.068482 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0818 15:07:32.068498 21769 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0818 15:07:32.068505 21769 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0818 15:07:32.068516 21769 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0818 15:07:32.068845 21769 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0818 15:07:32.068862 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.068868 21769 net.cpp:165] Memory required for data: 79873200\nI0818 15:07:32.068884 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 15:07:32.068897 21769 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0818 15:07:32.068905 21769 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0818 15:07:32.068914 21769 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0818 15:07:32.068984 21769 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 15:07:32.069815 21769 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0818 15:07:32.069835 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.069841 21769 net.cpp:165] Memory required for data: 86426800\nI0818 15:07:32.069850 21769 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0818 15:07:32.069860 21769 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0818 15:07:32.069866 21769 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0818 15:07:32.069872 21769 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0818 15:07:32.069880 21769 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0818 15:07:32.069922 21769 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0818 15:07:32.069932 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.069944 21769 net.cpp:165] Memory required for data: 92980400\nI0818 15:07:32.069950 21769 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0818 15:07:32.069960 21769 net.cpp:100] Creating Layer L1_b1_relu\nI0818 15:07:32.069967 21769 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0818 15:07:32.069973 21769 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0818 15:07:32.069983 21769 net.cpp:150] Setting up L1_b1_relu\nI0818 15:07:32.069989 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.069994 21769 net.cpp:165] Memory required for data: 99534000\nI0818 15:07:32.069999 21769 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:32.070011 21769 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:32.070016 21769 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0818 15:07:32.070024 21769 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 15:07:32.070032 21769 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 15:07:32.070184 21769 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:32.070199 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.070205 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.070210 21769 net.cpp:165] Memory required for data: 112641200\nI0818 15:07:32.070215 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0818 15:07:32.070226 21769 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0818 15:07:32.070232 21769 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 15:07:32.070243 21769 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0818 15:07:32.070613 21769 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0818 15:07:32.070627 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.070632 21769 net.cpp:165] Memory required for data: 119194800\nI0818 15:07:32.070641 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0818 15:07:32.070650 21769 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0818 15:07:32.070655 21769 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0818 15:07:32.070663 21769 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0818 15:07:32.070958 21769 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0818 15:07:32.070974 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.070979 21769 net.cpp:165] Memory required for data: 125748400\nI0818 15:07:32.070991 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 15:07:32.071002 21769 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0818 15:07:32.071008 21769 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0818 15:07:32.071015 21769 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0818 15:07:32.071079 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 15:07:32.071247 21769 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0818 15:07:32.071260 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.071266 21769 net.cpp:165] Memory required for data: 132302000\nI0818 15:07:32.071275 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0818 15:07:32.071283 21769 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0818 15:07:32.071288 21769 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0818 15:07:32.071298 21769 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0818 15:07:32.071308 21769 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0818 15:07:32.071316 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.071321 21769 net.cpp:165] Memory required for data: 138855600\nI0818 15:07:32.071326 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0818 15:07:32.071338 21769 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0818 15:07:32.071344 21769 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0818 15:07:32.071354 21769 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0818 15:07:32.071735 21769 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0818 15:07:32.071759 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.071768 21769 net.cpp:165] Memory required for data: 145409200\nI0818 15:07:32.071777 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0818 15:07:32.071791 21769 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0818 15:07:32.071797 21769 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0818 15:07:32.071810 21769 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0818 15:07:32.072144 21769 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0818 15:07:32.072160 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.072166 21769 net.cpp:165] Memory required for data: 151962800\nI0818 15:07:32.072187 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 15:07:32.072196 21769 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0818 15:07:32.072202 21769 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0818 15:07:32.072217 21769 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0818 15:07:32.072304 21769 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 15:07:32.072691 21769 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0818 15:07:32.072710 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.072716 21769 net.cpp:165] Memory required for data: 158516400\nI0818 15:07:32.072726 21769 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0818 15:07:32.072742 21769 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0818 15:07:32.072749 21769 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0818 15:07:32.072757 21769 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 15:07:32.072765 21769 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0818 15:07:32.072809 21769 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0818 15:07:32.072825 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.072830 21769 net.cpp:165] Memory required for data: 165070000\nI0818 15:07:32.072836 21769 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0818 15:07:32.072844 21769 net.cpp:100] Creating Layer L1_b2_relu\nI0818 15:07:32.072849 21769 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0818 15:07:32.072862 21769 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0818 15:07:32.072873 21769 net.cpp:150] Setting up L1_b2_relu\nI0818 15:07:32.072880 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.072885 21769 net.cpp:165] Memory required for data: 171623600\nI0818 15:07:32.072890 21769 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:32.072897 21769 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:32.072902 21769 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0818 15:07:32.072909 21769 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 15:07:32.072918 21769 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 15:07:32.072981 21769 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:32.072991 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.072999 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.073004 21769 net.cpp:165] Memory required for data: 184730800\nI0818 15:07:32.073011 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0818 15:07:32.073025 21769 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0818 15:07:32.073032 21769 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 15:07:32.073041 21769 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0818 15:07:32.073444 21769 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0818 15:07:32.073462 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.073467 21769 net.cpp:165] Memory required for data: 191284400\nI0818 15:07:32.073477 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0818 15:07:32.073487 21769 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0818 15:07:32.073499 21769 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0818 15:07:32.073513 21769 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0818 15:07:32.073848 21769 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0818 15:07:32.073868 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.073873 21769 net.cpp:165] Memory required for data: 197838000\nI0818 15:07:32.073884 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 15:07:32.073896 21769 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0818 15:07:32.073904 21769 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0818 15:07:32.073910 21769 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0818 15:07:32.073977 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 15:07:32.074167 21769 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0818 15:07:32.074184 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.074190 21769 net.cpp:165] Memory required for data: 204391600\nI0818 15:07:32.074199 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0818 15:07:32.074213 21769 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0818 15:07:32.074218 21769 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0818 15:07:32.074229 21769 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0818 15:07:32.074239 21769 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0818 15:07:32.074246 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.074250 21769 net.cpp:165] Memory required for data: 210945200\nI0818 15:07:32.074255 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0818 15:07:32.074270 21769 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0818 15:07:32.074276 21769 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0818 15:07:32.074287 21769 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0818 15:07:32.074715 21769 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0818 15:07:32.074733 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.074738 21769 net.cpp:165] Memory required for data: 217498800\nI0818 15:07:32.074746 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0818 15:07:32.074761 21769 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0818 15:07:32.074769 21769 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0818 15:07:32.074781 21769 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0818 15:07:32.075139 21769 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0818 15:07:32.075155 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.075161 21769 net.cpp:165] Memory required for data: 224052400\nI0818 15:07:32.075172 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 15:07:32.075186 21769 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0818 15:07:32.075192 21769 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0818 15:07:32.075201 21769 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0818 15:07:32.075269 21769 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 15:07:32.075454 21769 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0818 15:07:32.075469 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.075474 21769 net.cpp:165] Memory required for data: 230606000\nI0818 15:07:32.075484 21769 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0818 15:07:32.075495 21769 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0818 15:07:32.075505 21769 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0818 15:07:32.075512 21769 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 15:07:32.075520 21769 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0818 15:07:32.075562 21769 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0818 15:07:32.075572 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.075580 21769 net.cpp:165] Memory required for data: 237159600\nI0818 15:07:32.075587 21769 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0818 15:07:32.075593 21769 net.cpp:100] Creating Layer L1_b3_relu\nI0818 15:07:32.075599 21769 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0818 15:07:32.075618 21769 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0818 15:07:32.075628 21769 net.cpp:150] Setting up L1_b3_relu\nI0818 15:07:32.075635 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.075639 21769 net.cpp:165] Memory required for data: 243713200\nI0818 15:07:32.075647 21769 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:32.075655 21769 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:32.075660 21769 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0818 15:07:32.075669 21769 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 15:07:32.075677 21769 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 15:07:32.075747 21769 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:32.075762 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.075769 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.075774 21769 net.cpp:165] Memory required for data: 256820400\nI0818 15:07:32.075779 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0818 15:07:32.075794 21769 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0818 15:07:32.075800 21769 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 15:07:32.075810 21769 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0818 15:07:32.076225 21769 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0818 15:07:32.076241 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.076246 21769 net.cpp:165] Memory required for data: 263374000\nI0818 15:07:32.076254 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0818 15:07:32.076267 21769 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0818 15:07:32.076273 21769 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0818 15:07:32.076284 21769 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0818 15:07:32.076617 21769 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0818 15:07:32.076632 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.076637 21769 net.cpp:165] Memory required for data: 269927600\nI0818 15:07:32.076647 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 15:07:32.076665 21769 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0818 15:07:32.076673 21769 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0818 15:07:32.076680 21769 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0818 15:07:32.076761 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 15:07:32.076984 21769 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0818 15:07:32.077000 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.077005 21769 net.cpp:165] Memory required for data: 276481200\nI0818 15:07:32.077015 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0818 15:07:32.077026 21769 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0818 15:07:32.077033 21769 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0818 15:07:32.077039 21769 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0818 15:07:32.077049 21769 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0818 15:07:32.077056 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.077060 21769 net.cpp:165] Memory required for data: 283034800\nI0818 15:07:32.077065 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0818 15:07:32.077080 21769 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0818 15:07:32.077085 21769 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0818 15:07:32.077096 21769 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0818 15:07:32.077461 21769 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0818 15:07:32.077476 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.077481 21769 net.cpp:165] Memory required for data: 289588400\nI0818 15:07:32.077489 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0818 15:07:32.077498 21769 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0818 15:07:32.077512 21769 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0818 15:07:32.077520 21769 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0818 15:07:32.077816 21769 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0818 15:07:32.077831 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.077836 21769 net.cpp:165] Memory required for data: 296142000\nI0818 15:07:32.077850 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 15:07:32.077860 21769 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0818 15:07:32.077867 21769 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0818 15:07:32.077877 21769 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0818 15:07:32.077937 21769 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 15:07:32.078101 21769 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0818 15:07:32.078114 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.078119 21769 net.cpp:165] Memory required for data: 302695600\nI0818 15:07:32.078128 21769 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0818 15:07:32.078140 21769 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0818 15:07:32.078147 21769 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0818 15:07:32.078155 21769 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 15:07:32.078164 21769 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0818 15:07:32.078200 21769 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0818 15:07:32.078209 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.078214 21769 net.cpp:165] Memory required for data: 309249200\nI0818 15:07:32.078219 21769 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0818 15:07:32.078227 21769 net.cpp:100] Creating Layer L1_b4_relu\nI0818 15:07:32.078233 21769 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0818 15:07:32.078245 21769 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0818 15:07:32.078255 21769 net.cpp:150] Setting up L1_b4_relu\nI0818 15:07:32.078263 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.078268 21769 net.cpp:165] Memory required for data: 315802800\nI0818 15:07:32.078272 21769 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:32.078279 21769 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:32.078284 21769 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0818 15:07:32.078291 21769 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 15:07:32.078300 21769 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 15:07:32.078354 21769 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:32.078366 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.078373 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.078378 21769 net.cpp:165] Memory required for data: 328910000\nI0818 15:07:32.078383 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0818 15:07:32.078394 21769 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0818 15:07:32.078400 21769 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 15:07:32.078413 21769 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0818 15:07:32.078788 21769 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0818 15:07:32.078804 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.078809 21769 net.cpp:165] Memory required for data: 335463600\nI0818 15:07:32.078845 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0818 15:07:32.078857 21769 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0818 15:07:32.078864 21769 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0818 15:07:32.078876 21769 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0818 15:07:32.079159 21769 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0818 15:07:32.079174 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.079177 21769 net.cpp:165] Memory required for data: 342017200\nI0818 15:07:32.079196 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 15:07:32.079208 21769 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0818 15:07:32.079216 21769 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0818 15:07:32.079223 21769 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0818 15:07:32.079284 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 15:07:32.079450 21769 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0818 15:07:32.079463 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.079469 21769 net.cpp:165] Memory required for data: 348570800\nI0818 15:07:32.079478 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0818 15:07:32.079485 21769 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0818 15:07:32.079491 21769 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0818 15:07:32.079502 21769 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0818 15:07:32.079512 21769 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0818 15:07:32.079519 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.079524 21769 net.cpp:165] Memory required for data: 355124400\nI0818 15:07:32.079529 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0818 15:07:32.079542 21769 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0818 15:07:32.079548 21769 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0818 15:07:32.079560 21769 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0818 15:07:32.079924 21769 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0818 15:07:32.079939 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.079944 21769 net.cpp:165] Memory required for data: 361678000\nI0818 15:07:32.079953 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0818 15:07:32.079962 21769 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0818 15:07:32.079968 21769 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0818 15:07:32.079977 21769 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0818 15:07:32.080293 21769 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0818 15:07:32.080307 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.080313 21769 net.cpp:165] Memory required for data: 368231600\nI0818 15:07:32.080323 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 15:07:32.080335 21769 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0818 15:07:32.080343 21769 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0818 15:07:32.080349 21769 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0818 15:07:32.080415 21769 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 15:07:32.080663 21769 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0818 15:07:32.080690 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.080698 21769 net.cpp:165] Memory required for data: 374785200\nI0818 15:07:32.080708 21769 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0818 15:07:32.080716 21769 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0818 15:07:32.080723 21769 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0818 15:07:32.080729 21769 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 15:07:32.080745 21769 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0818 15:07:32.080804 21769 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0818 15:07:32.080828 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.080837 21769 net.cpp:165] Memory required for data: 381338800\nI0818 15:07:32.080847 21769 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0818 15:07:32.080859 21769 net.cpp:100] Creating Layer L1_b5_relu\nI0818 15:07:32.080868 21769 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0818 15:07:32.080883 21769 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0818 15:07:32.080898 21769 net.cpp:150] Setting up L1_b5_relu\nI0818 15:07:32.080909 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.080916 21769 net.cpp:165] Memory required for data: 387892400\nI0818 15:07:32.080926 21769 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:32.080953 21769 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:32.080961 21769 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0818 15:07:32.080970 21769 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 15:07:32.080981 21769 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 15:07:32.081037 21769 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:32.081049 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.081056 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.081061 21769 net.cpp:165] Memory required for data: 400999600\nI0818 15:07:32.081066 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0818 15:07:32.081077 21769 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0818 15:07:32.081084 21769 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 15:07:32.081095 21769 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0818 15:07:32.081454 21769 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0818 15:07:32.081470 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.081473 21769 net.cpp:165] Memory required for data: 407553200\nI0818 15:07:32.081483 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0818 15:07:32.081492 21769 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0818 15:07:32.081498 21769 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0818 15:07:32.081506 21769 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0818 15:07:32.081804 21769 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0818 15:07:32.081817 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.081822 21769 net.cpp:165] Memory required for data: 414106800\nI0818 15:07:32.081833 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 15:07:32.081845 21769 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0818 15:07:32.081851 21769 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0818 15:07:32.081859 21769 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0818 15:07:32.081928 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 15:07:32.082125 21769 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0818 15:07:32.082140 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.082145 21769 net.cpp:165] Memory required for data: 420660400\nI0818 15:07:32.082154 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0818 15:07:32.082162 21769 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0818 15:07:32.082168 21769 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0818 15:07:32.082180 21769 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0818 15:07:32.082190 21769 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0818 15:07:32.082196 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.082201 21769 net.cpp:165] Memory required for data: 427214000\nI0818 15:07:32.082206 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0818 15:07:32.082226 21769 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0818 15:07:32.082231 21769 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0818 15:07:32.082240 21769 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0818 15:07:32.082607 21769 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0818 15:07:32.082621 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.082626 21769 net.cpp:165] Memory required for data: 433767600\nI0818 15:07:32.082635 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0818 15:07:32.082648 21769 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0818 15:07:32.082654 21769 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0818 15:07:32.082662 21769 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0818 15:07:32.082975 21769 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0818 15:07:32.082990 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.082996 21769 net.cpp:165] Memory required for data: 440321200\nI0818 15:07:32.083015 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 15:07:32.083024 21769 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0818 15:07:32.083031 21769 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0818 15:07:32.083042 21769 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0818 15:07:32.083107 21769 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 15:07:32.083276 21769 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0818 15:07:32.083289 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.083294 21769 net.cpp:165] Memory required for data: 446874800\nI0818 15:07:32.083303 21769 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0818 15:07:32.083322 21769 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0818 15:07:32.083328 21769 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0818 15:07:32.083335 21769 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 15:07:32.083348 21769 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0818 15:07:32.083384 21769 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0818 15:07:32.083394 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.083398 21769 net.cpp:165] Memory required for data: 453428400\nI0818 15:07:32.083403 21769 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0818 15:07:32.083411 21769 net.cpp:100] Creating Layer L1_b6_relu\nI0818 15:07:32.083416 21769 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0818 15:07:32.083427 21769 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0818 15:07:32.083436 21769 net.cpp:150] Setting up L1_b6_relu\nI0818 15:07:32.083443 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.083448 21769 net.cpp:165] Memory required for data: 459982000\nI0818 15:07:32.083453 21769 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:32.083461 21769 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:32.083465 21769 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0818 15:07:32.083472 21769 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 15:07:32.083482 21769 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 15:07:32.083535 21769 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:32.083547 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.083554 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.083559 21769 net.cpp:165] Memory required for data: 473089200\nI0818 15:07:32.083564 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0818 15:07:32.083580 21769 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0818 15:07:32.083587 21769 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 15:07:32.083596 21769 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0818 15:07:32.083968 21769 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0818 15:07:32.083983 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.083988 21769 net.cpp:165] Memory required for data: 479642800\nI0818 15:07:32.083997 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0818 15:07:32.084009 21769 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0818 15:07:32.084017 21769 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0818 15:07:32.084025 21769 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0818 15:07:32.084311 21769 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0818 15:07:32.084327 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.084333 21769 net.cpp:165] Memory required for data: 486196400\nI0818 15:07:32.084343 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 15:07:32.084352 21769 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0818 15:07:32.084358 21769 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0818 15:07:32.084367 21769 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0818 15:07:32.084435 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 15:07:32.084604 21769 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0818 15:07:32.084616 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.084621 21769 net.cpp:165] Memory required for data: 492750000\nI0818 15:07:32.084630 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0818 15:07:32.084641 21769 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0818 15:07:32.084648 21769 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0818 15:07:32.084656 21769 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0818 15:07:32.084667 21769 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0818 15:07:32.084676 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.084679 21769 net.cpp:165] Memory required for data: 499303600\nI0818 15:07:32.084691 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0818 15:07:32.084702 21769 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0818 15:07:32.084707 21769 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0818 15:07:32.084718 21769 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0818 15:07:32.085108 21769 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0818 15:07:32.085124 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.085129 21769 net.cpp:165] Memory required for data: 505857200\nI0818 15:07:32.085139 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0818 15:07:32.085147 21769 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0818 15:07:32.085153 21769 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0818 15:07:32.085165 21769 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0818 15:07:32.085455 21769 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0818 15:07:32.085469 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.085474 21769 net.cpp:165] Memory required for data: 512410800\nI0818 15:07:32.085485 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 15:07:32.085497 21769 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0818 15:07:32.085503 21769 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0818 15:07:32.085511 21769 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0818 15:07:32.085572 21769 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 15:07:32.085747 21769 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0818 15:07:32.085760 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.085765 21769 net.cpp:165] Memory required for data: 518964400\nI0818 15:07:32.085774 21769 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0818 15:07:32.085786 21769 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0818 15:07:32.085793 21769 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0818 15:07:32.085800 21769 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 15:07:32.085808 21769 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0818 15:07:32.085846 21769 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0818 15:07:32.085857 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.085862 21769 net.cpp:165] Memory required for data: 525518000\nI0818 15:07:32.085867 21769 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0818 15:07:32.085875 21769 net.cpp:100] Creating Layer L1_b7_relu\nI0818 15:07:32.085880 21769 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0818 15:07:32.085891 21769 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0818 15:07:32.085901 21769 net.cpp:150] Setting up L1_b7_relu\nI0818 15:07:32.085907 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.085912 21769 net.cpp:165] Memory required for data: 532071600\nI0818 15:07:32.085917 21769 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:32.085924 21769 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:32.085929 21769 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0818 15:07:32.085937 21769 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 15:07:32.085953 21769 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 15:07:32.086006 21769 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:32.086016 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.086024 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.086028 21769 net.cpp:165] Memory required for data: 545178800\nI0818 15:07:32.086033 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0818 15:07:32.086045 21769 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0818 15:07:32.086050 21769 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 15:07:32.086061 21769 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0818 15:07:32.086431 21769 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0818 15:07:32.086446 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.086450 21769 net.cpp:165] Memory required for data: 551732400\nI0818 15:07:32.086459 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0818 15:07:32.086468 21769 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0818 15:07:32.086474 21769 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0818 15:07:32.086485 21769 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0818 15:07:32.086784 21769 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0818 15:07:32.086802 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.086807 21769 net.cpp:165] Memory required for data: 558286000\nI0818 15:07:32.086817 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 15:07:32.086827 21769 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0818 15:07:32.086833 21769 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0818 15:07:32.086839 21769 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0818 15:07:32.086900 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 15:07:32.087103 21769 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0818 15:07:32.087118 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.087123 21769 net.cpp:165] Memory required for data: 564839600\nI0818 15:07:32.087133 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0818 15:07:32.087144 21769 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0818 15:07:32.087151 21769 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0818 15:07:32.087158 21769 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0818 15:07:32.087167 21769 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0818 15:07:32.087174 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.087179 21769 net.cpp:165] Memory required for data: 571393200\nI0818 15:07:32.087183 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0818 15:07:32.087198 21769 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0818 15:07:32.087203 21769 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0818 15:07:32.087214 21769 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0818 15:07:32.087580 21769 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0818 15:07:32.087595 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.087600 21769 net.cpp:165] Memory required for data: 577946800\nI0818 15:07:32.087608 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0818 15:07:32.087617 21769 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0818 15:07:32.087625 21769 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0818 15:07:32.087635 21769 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0818 15:07:32.087929 21769 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0818 15:07:32.087944 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.087949 21769 net.cpp:165] Memory required for data: 584500400\nI0818 15:07:32.087959 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 15:07:32.087970 21769 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0818 15:07:32.087977 21769 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0818 15:07:32.087985 21769 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0818 15:07:32.088053 21769 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 15:07:32.088222 21769 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0818 15:07:32.088235 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.088240 21769 net.cpp:165] Memory required for data: 591054000\nI0818 15:07:32.088249 21769 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0818 15:07:32.088261 21769 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0818 15:07:32.088268 21769 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0818 15:07:32.088275 21769 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 15:07:32.088284 21769 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0818 15:07:32.088320 21769 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0818 15:07:32.088332 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.088337 21769 net.cpp:165] Memory required for data: 597607600\nI0818 15:07:32.088342 21769 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0818 15:07:32.088351 21769 net.cpp:100] Creating Layer L1_b8_relu\nI0818 15:07:32.088356 21769 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0818 15:07:32.088366 21769 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0818 15:07:32.088376 21769 net.cpp:150] Setting up L1_b8_relu\nI0818 15:07:32.088382 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.088387 21769 net.cpp:165] Memory required for data: 604161200\nI0818 15:07:32.088392 21769 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:32.088399 21769 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:32.088404 21769 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0818 15:07:32.088412 21769 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 15:07:32.088421 21769 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 15:07:32.088472 21769 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:32.088485 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.088491 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.088496 21769 net.cpp:165] Memory required for data: 617268400\nI0818 15:07:32.088501 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0818 15:07:32.088511 21769 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0818 15:07:32.088517 21769 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 15:07:32.088529 21769 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0818 15:07:32.088912 21769 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0818 15:07:32.088927 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.088932 21769 net.cpp:165] Memory required for data: 623822000\nI0818 15:07:32.088942 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0818 15:07:32.088956 21769 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0818 15:07:32.088963 21769 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0818 15:07:32.088974 21769 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0818 15:07:32.089260 21769 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0818 15:07:32.089273 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.089278 21769 net.cpp:165] Memory required for data: 630375600\nI0818 15:07:32.089289 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 15:07:32.089298 21769 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0818 15:07:32.089303 21769 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0818 15:07:32.089311 21769 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0818 15:07:32.089375 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 15:07:32.089539 21769 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0818 15:07:32.089551 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.089556 21769 net.cpp:165] Memory required for data: 636929200\nI0818 15:07:32.089573 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0818 15:07:32.089584 21769 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0818 15:07:32.089591 21769 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0818 15:07:32.089598 21769 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0818 15:07:32.089607 21769 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0818 15:07:32.089615 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.089619 21769 net.cpp:165] Memory required for data: 643482800\nI0818 15:07:32.089624 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0818 15:07:32.089638 21769 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0818 15:07:32.089644 21769 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0818 15:07:32.089658 21769 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0818 15:07:32.090032 21769 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0818 15:07:32.090047 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.090052 21769 net.cpp:165] Memory required for data: 650036400\nI0818 15:07:32.090061 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0818 15:07:32.090073 21769 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0818 15:07:32.090080 21769 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0818 15:07:32.090088 21769 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0818 15:07:32.090373 21769 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0818 15:07:32.090385 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.090390 21769 net.cpp:165] Memory required for data: 656590000\nI0818 15:07:32.090423 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 15:07:32.090433 21769 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0818 15:07:32.090440 21769 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0818 15:07:32.090451 21769 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0818 15:07:32.090510 21769 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 15:07:32.090677 21769 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0818 15:07:32.090697 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.090703 21769 net.cpp:165] Memory required for data: 663143600\nI0818 15:07:32.090711 21769 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0818 15:07:32.090723 21769 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0818 15:07:32.090729 21769 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0818 15:07:32.090736 21769 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 15:07:32.090744 21769 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0818 15:07:32.090780 21769 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0818 15:07:32.090790 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.090795 21769 net.cpp:165] Memory required for data: 669697200\nI0818 15:07:32.090800 21769 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0818 15:07:32.090806 21769 net.cpp:100] Creating Layer L1_b9_relu\nI0818 15:07:32.090812 21769 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0818 15:07:32.090822 21769 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0818 15:07:32.090832 21769 net.cpp:150] Setting up L1_b9_relu\nI0818 15:07:32.090839 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.090843 21769 net.cpp:165] Memory required for data: 676250800\nI0818 15:07:32.090848 21769 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:32.090857 21769 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:32.090863 21769 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0818 15:07:32.090872 21769 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 15:07:32.090880 21769 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 15:07:32.090931 21769 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:32.090943 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.090956 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.090961 21769 net.cpp:165] Memory required for data: 689358000\nI0818 15:07:32.090967 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_conv\nI0818 15:07:32.090981 21769 net.cpp:100] Creating Layer L1_b10_cbr1_conv\nI0818 15:07:32.090987 21769 net.cpp:434] L1_b10_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 15:07:32.090996 21769 net.cpp:408] L1_b10_cbr1_conv -> L1_b10_cbr1_conv_top\nI0818 15:07:32.091356 21769 net.cpp:150] Setting up L1_b10_cbr1_conv\nI0818 15:07:32.091372 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.091377 21769 net.cpp:165] Memory required for data: 695911600\nI0818 15:07:32.091384 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_bn\nI0818 15:07:32.091394 21769 net.cpp:100] Creating Layer L1_b10_cbr1_bn\nI0818 15:07:32.091400 21769 net.cpp:434] L1_b10_cbr1_bn <- L1_b10_cbr1_conv_top\nI0818 15:07:32.091411 21769 net.cpp:408] L1_b10_cbr1_bn -> L1_b10_cbr1_bn_top\nI0818 15:07:32.091701 21769 net.cpp:150] Setting up L1_b10_cbr1_bn\nI0818 15:07:32.091720 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.091727 21769 net.cpp:165] Memory required for data: 702465200\nI0818 15:07:32.091737 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0818 15:07:32.091745 21769 net.cpp:100] Creating Layer L1_b10_cbr1_scale\nI0818 15:07:32.091751 21769 net.cpp:434] L1_b10_cbr1_scale <- L1_b10_cbr1_bn_top\nI0818 15:07:32.091759 21769 net.cpp:395] L1_b10_cbr1_scale -> L1_b10_cbr1_bn_top (in-place)\nI0818 15:07:32.091822 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_scale\nI0818 15:07:32.091996 21769 net.cpp:150] Setting up L1_b10_cbr1_scale\nI0818 15:07:32.092010 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.092015 21769 net.cpp:165] Memory required for data: 709018800\nI0818 15:07:32.092025 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr1_relu\nI0818 15:07:32.092034 21769 net.cpp:100] Creating Layer L1_b10_cbr1_relu\nI0818 15:07:32.092041 21769 net.cpp:434] L1_b10_cbr1_relu <- L1_b10_cbr1_bn_top\nI0818 15:07:32.092049 21769 net.cpp:395] L1_b10_cbr1_relu -> L1_b10_cbr1_bn_top (in-place)\nI0818 15:07:32.092061 21769 net.cpp:150] Setting up L1_b10_cbr1_relu\nI0818 15:07:32.092068 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.092073 21769 net.cpp:165] Memory required for data: 715572400\nI0818 15:07:32.092078 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr2_conv\nI0818 15:07:32.092088 21769 net.cpp:100] Creating Layer L1_b10_cbr2_conv\nI0818 15:07:32.092094 21769 net.cpp:434] L1_b10_cbr2_conv <- L1_b10_cbr1_bn_top\nI0818 15:07:32.092105 21769 net.cpp:408] L1_b10_cbr2_conv -> L1_b10_cbr2_conv_top\nI0818 15:07:32.092466 21769 net.cpp:150] Setting up L1_b10_cbr2_conv\nI0818 15:07:32.092480 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.092485 21769 net.cpp:165] Memory required for data: 722126000\nI0818 15:07:32.092494 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr2_bn\nI0818 15:07:32.092504 21769 net.cpp:100] Creating Layer L1_b10_cbr2_bn\nI0818 15:07:32.092509 21769 net.cpp:434] L1_b10_cbr2_bn <- L1_b10_cbr2_conv_top\nI0818 15:07:32.092522 21769 net.cpp:408] L1_b10_cbr2_bn -> L1_b10_cbr2_bn_top\nI0818 15:07:32.092831 21769 net.cpp:150] Setting up L1_b10_cbr2_bn\nI0818 15:07:32.092847 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.092852 21769 net.cpp:165] Memory required for data: 728679600\nI0818 15:07:32.092864 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0818 15:07:32.092875 21769 net.cpp:100] Creating Layer L1_b10_cbr2_scale\nI0818 15:07:32.092881 21769 net.cpp:434] L1_b10_cbr2_scale <- L1_b10_cbr2_bn_top\nI0818 15:07:32.092911 21769 net.cpp:395] L1_b10_cbr2_scale -> L1_b10_cbr2_bn_top (in-place)\nI0818 15:07:32.092978 21769 layer_factory.hpp:77] Creating layer L1_b10_cbr2_scale\nI0818 15:07:32.093144 21769 net.cpp:150] Setting up L1_b10_cbr2_scale\nI0818 15:07:32.093158 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.093170 21769 net.cpp:165] Memory required for data: 735233200\nI0818 15:07:32.093180 21769 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise\nI0818 15:07:32.093192 21769 net.cpp:100] Creating Layer L1_b10_sum_eltwise\nI0818 15:07:32.093199 21769 net.cpp:434] L1_b10_sum_eltwise <- L1_b10_cbr2_bn_top\nI0818 15:07:32.093206 21769 net.cpp:434] L1_b10_sum_eltwise <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 15:07:32.093214 21769 net.cpp:408] L1_b10_sum_eltwise -> L1_b10_sum_eltwise_top\nI0818 15:07:32.093252 21769 net.cpp:150] Setting up L1_b10_sum_eltwise\nI0818 15:07:32.093264 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.093269 21769 net.cpp:165] Memory required for data: 741786800\nI0818 15:07:32.093274 21769 layer_factory.hpp:77] Creating layer L1_b10_relu\nI0818 15:07:32.093282 21769 net.cpp:100] Creating Layer L1_b10_relu\nI0818 15:07:32.093287 21769 net.cpp:434] L1_b10_relu <- L1_b10_sum_eltwise_top\nI0818 15:07:32.093297 21769 net.cpp:395] L1_b10_relu -> L1_b10_sum_eltwise_top (in-place)\nI0818 15:07:32.093307 21769 net.cpp:150] Setting up L1_b10_relu\nI0818 15:07:32.093314 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.093319 21769 net.cpp:165] Memory required for data: 748340400\nI0818 15:07:32.093323 21769 layer_factory.hpp:77] Creating layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0818 15:07:32.093330 21769 net.cpp:100] Creating Layer L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0818 15:07:32.093336 21769 net.cpp:434] L1_b10_sum_eltwise_top_L1_b10_relu_0_split <- L1_b10_sum_eltwise_top\nI0818 15:07:32.093343 21769 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0818 15:07:32.093353 21769 net.cpp:408] L1_b10_sum_eltwise_top_L1_b10_relu_0_split -> L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0818 15:07:32.093403 21769 net.cpp:150] Setting up L1_b10_sum_eltwise_top_L1_b10_relu_0_split\nI0818 15:07:32.093415 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.093422 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.093427 21769 net.cpp:165] Memory required for data: 761447600\nI0818 15:07:32.093432 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_conv\nI0818 15:07:32.093443 21769 net.cpp:100] Creating Layer L1_b11_cbr1_conv\nI0818 15:07:32.093449 21769 net.cpp:434] L1_b11_cbr1_conv <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_0\nI0818 15:07:32.093462 21769 net.cpp:408] L1_b11_cbr1_conv -> L1_b11_cbr1_conv_top\nI0818 15:07:32.093838 21769 net.cpp:150] Setting up L1_b11_cbr1_conv\nI0818 15:07:32.093853 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.093858 21769 net.cpp:165] Memory required for data: 768001200\nI0818 15:07:32.093868 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_bn\nI0818 15:07:32.093876 21769 net.cpp:100] Creating Layer L1_b11_cbr1_bn\nI0818 15:07:32.093883 21769 net.cpp:434] L1_b11_cbr1_bn <- L1_b11_cbr1_conv_top\nI0818 15:07:32.093894 21769 net.cpp:408] L1_b11_cbr1_bn -> L1_b11_cbr1_bn_top\nI0818 15:07:32.094182 21769 net.cpp:150] Setting up L1_b11_cbr1_bn\nI0818 15:07:32.094197 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.094203 21769 net.cpp:165] Memory required for data: 774554800\nI0818 15:07:32.094213 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0818 15:07:32.094223 21769 net.cpp:100] Creating Layer L1_b11_cbr1_scale\nI0818 15:07:32.094228 21769 net.cpp:434] L1_b11_cbr1_scale <- L1_b11_cbr1_bn_top\nI0818 15:07:32.094235 21769 net.cpp:395] L1_b11_cbr1_scale -> L1_b11_cbr1_bn_top (in-place)\nI0818 15:07:32.094296 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_scale\nI0818 15:07:32.094466 21769 net.cpp:150] Setting up L1_b11_cbr1_scale\nI0818 15:07:32.094480 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.094485 21769 net.cpp:165] Memory required for data: 781108400\nI0818 15:07:32.094496 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr1_relu\nI0818 15:07:32.094506 21769 net.cpp:100] Creating Layer L1_b11_cbr1_relu\nI0818 15:07:32.094512 21769 net.cpp:434] L1_b11_cbr1_relu <- L1_b11_cbr1_bn_top\nI0818 15:07:32.094527 21769 net.cpp:395] L1_b11_cbr1_relu -> L1_b11_cbr1_bn_top (in-place)\nI0818 15:07:32.094537 21769 net.cpp:150] Setting up L1_b11_cbr1_relu\nI0818 15:07:32.094543 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.094548 21769 net.cpp:165] Memory required for data: 787662000\nI0818 15:07:32.094552 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr2_conv\nI0818 15:07:32.094566 21769 net.cpp:100] Creating Layer L1_b11_cbr2_conv\nI0818 15:07:32.094573 21769 net.cpp:434] L1_b11_cbr2_conv <- L1_b11_cbr1_bn_top\nI0818 15:07:32.094583 21769 net.cpp:408] L1_b11_cbr2_conv -> L1_b11_cbr2_conv_top\nI0818 15:07:32.094950 21769 net.cpp:150] Setting up L1_b11_cbr2_conv\nI0818 15:07:32.094965 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.094970 21769 net.cpp:165] Memory required for data: 794215600\nI0818 15:07:32.094980 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr2_bn\nI0818 15:07:32.094988 21769 net.cpp:100] Creating Layer L1_b11_cbr2_bn\nI0818 15:07:32.094995 21769 net.cpp:434] L1_b11_cbr2_bn <- L1_b11_cbr2_conv_top\nI0818 15:07:32.095005 21769 net.cpp:408] L1_b11_cbr2_bn -> L1_b11_cbr2_bn_top\nI0818 15:07:32.095294 21769 net.cpp:150] Setting up L1_b11_cbr2_bn\nI0818 15:07:32.095309 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.095314 21769 net.cpp:165] Memory required for data: 800769200\nI0818 15:07:32.095324 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0818 15:07:32.095336 21769 net.cpp:100] Creating Layer L1_b11_cbr2_scale\nI0818 15:07:32.095342 21769 net.cpp:434] L1_b11_cbr2_scale <- L1_b11_cbr2_bn_top\nI0818 15:07:32.095350 21769 net.cpp:395] L1_b11_cbr2_scale -> L1_b11_cbr2_bn_top (in-place)\nI0818 15:07:32.095410 21769 layer_factory.hpp:77] Creating layer L1_b11_cbr2_scale\nI0818 15:07:32.095574 21769 net.cpp:150] Setting up L1_b11_cbr2_scale\nI0818 15:07:32.095588 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.095594 21769 net.cpp:165] Memory required for data: 807322800\nI0818 15:07:32.095603 21769 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise\nI0818 15:07:32.095615 21769 net.cpp:100] Creating Layer L1_b11_sum_eltwise\nI0818 15:07:32.095621 21769 net.cpp:434] L1_b11_sum_eltwise <- L1_b11_cbr2_bn_top\nI0818 15:07:32.095628 21769 net.cpp:434] L1_b11_sum_eltwise <- L1_b10_sum_eltwise_top_L1_b10_relu_0_split_1\nI0818 15:07:32.095636 21769 net.cpp:408] L1_b11_sum_eltwise -> L1_b11_sum_eltwise_top\nI0818 15:07:32.095674 21769 net.cpp:150] Setting up L1_b11_sum_eltwise\nI0818 15:07:32.095691 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.095697 21769 net.cpp:165] Memory required for data: 813876400\nI0818 15:07:32.095702 21769 layer_factory.hpp:77] Creating layer L1_b11_relu\nI0818 15:07:32.095710 21769 net.cpp:100] Creating Layer L1_b11_relu\nI0818 15:07:32.095715 21769 net.cpp:434] L1_b11_relu <- L1_b11_sum_eltwise_top\nI0818 15:07:32.095727 21769 net.cpp:395] L1_b11_relu -> L1_b11_sum_eltwise_top (in-place)\nI0818 15:07:32.095737 21769 net.cpp:150] Setting up L1_b11_relu\nI0818 15:07:32.095746 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.095749 21769 net.cpp:165] Memory required for data: 820430000\nI0818 15:07:32.095754 21769 layer_factory.hpp:77] Creating layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0818 15:07:32.095762 21769 net.cpp:100] Creating Layer L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0818 15:07:32.095767 21769 net.cpp:434] L1_b11_sum_eltwise_top_L1_b11_relu_0_split <- L1_b11_sum_eltwise_top\nI0818 15:07:32.095774 21769 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0818 15:07:32.095783 21769 net.cpp:408] L1_b11_sum_eltwise_top_L1_b11_relu_0_split -> L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0818 15:07:32.095836 21769 net.cpp:150] Setting up L1_b11_sum_eltwise_top_L1_b11_relu_0_split\nI0818 15:07:32.095849 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.095855 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.095860 21769 net.cpp:165] Memory required for data: 833537200\nI0818 15:07:32.095871 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_conv\nI0818 15:07:32.095883 21769 net.cpp:100] Creating Layer L1_b12_cbr1_conv\nI0818 15:07:32.095890 21769 net.cpp:434] L1_b12_cbr1_conv <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_0\nI0818 15:07:32.095901 21769 net.cpp:408] L1_b12_cbr1_conv -> L1_b12_cbr1_conv_top\nI0818 15:07:32.096261 21769 net.cpp:150] Setting up L1_b12_cbr1_conv\nI0818 15:07:32.096276 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.096280 21769 net.cpp:165] Memory required for data: 840090800\nI0818 15:07:32.096289 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_bn\nI0818 15:07:32.096298 21769 net.cpp:100] Creating Layer L1_b12_cbr1_bn\nI0818 15:07:32.096305 21769 net.cpp:434] L1_b12_cbr1_bn <- L1_b12_cbr1_conv_top\nI0818 15:07:32.096318 21769 net.cpp:408] L1_b12_cbr1_bn -> L1_b12_cbr1_bn_top\nI0818 15:07:32.096602 21769 net.cpp:150] Setting up L1_b12_cbr1_bn\nI0818 15:07:32.096616 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.096621 21769 net.cpp:165] Memory required for data: 846644400\nI0818 15:07:32.096632 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0818 15:07:32.096644 21769 net.cpp:100] Creating Layer L1_b12_cbr1_scale\nI0818 15:07:32.096652 21769 net.cpp:434] L1_b12_cbr1_scale <- L1_b12_cbr1_bn_top\nI0818 15:07:32.096659 21769 net.cpp:395] L1_b12_cbr1_scale -> L1_b12_cbr1_bn_top (in-place)\nI0818 15:07:32.096726 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_scale\nI0818 15:07:32.096896 21769 net.cpp:150] Setting up L1_b12_cbr1_scale\nI0818 15:07:32.096910 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.096915 21769 net.cpp:165] Memory required for data: 853198000\nI0818 15:07:32.096925 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr1_relu\nI0818 15:07:32.096935 21769 net.cpp:100] Creating Layer L1_b12_cbr1_relu\nI0818 15:07:32.096941 21769 net.cpp:434] L1_b12_cbr1_relu <- L1_b12_cbr1_bn_top\nI0818 15:07:32.096949 21769 net.cpp:395] L1_b12_cbr1_relu -> L1_b12_cbr1_bn_top (in-place)\nI0818 15:07:32.096958 21769 net.cpp:150] Setting up L1_b12_cbr1_relu\nI0818 15:07:32.096966 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.096971 21769 net.cpp:165] Memory required for data: 859751600\nI0818 15:07:32.096974 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr2_conv\nI0818 15:07:32.096988 21769 net.cpp:100] Creating Layer L1_b12_cbr2_conv\nI0818 15:07:32.096995 21769 net.cpp:434] L1_b12_cbr2_conv <- L1_b12_cbr1_bn_top\nI0818 15:07:32.097005 21769 net.cpp:408] L1_b12_cbr2_conv -> L1_b12_cbr2_conv_top\nI0818 15:07:32.097364 21769 net.cpp:150] Setting up L1_b12_cbr2_conv\nI0818 15:07:32.097378 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.097383 21769 net.cpp:165] Memory required for data: 866305200\nI0818 15:07:32.097393 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr2_bn\nI0818 15:07:32.097401 21769 net.cpp:100] Creating Layer L1_b12_cbr2_bn\nI0818 15:07:32.097407 21769 net.cpp:434] L1_b12_cbr2_bn <- L1_b12_cbr2_conv_top\nI0818 15:07:32.097415 21769 net.cpp:408] L1_b12_cbr2_bn -> L1_b12_cbr2_bn_top\nI0818 15:07:32.097710 21769 net.cpp:150] Setting up L1_b12_cbr2_bn\nI0818 15:07:32.097725 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.097730 21769 net.cpp:165] Memory required for data: 872858800\nI0818 15:07:32.097740 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0818 15:07:32.097753 21769 net.cpp:100] Creating Layer L1_b12_cbr2_scale\nI0818 15:07:32.097759 21769 net.cpp:434] L1_b12_cbr2_scale <- L1_b12_cbr2_bn_top\nI0818 15:07:32.097766 21769 net.cpp:395] L1_b12_cbr2_scale -> L1_b12_cbr2_bn_top (in-place)\nI0818 15:07:32.097829 21769 layer_factory.hpp:77] Creating layer L1_b12_cbr2_scale\nI0818 15:07:32.097998 21769 net.cpp:150] Setting up L1_b12_cbr2_scale\nI0818 15:07:32.098011 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.098016 21769 net.cpp:165] Memory required for data: 879412400\nI0818 15:07:32.098026 21769 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise\nI0818 15:07:32.098042 21769 net.cpp:100] Creating Layer L1_b12_sum_eltwise\nI0818 15:07:32.098048 21769 net.cpp:434] L1_b12_sum_eltwise <- L1_b12_cbr2_bn_top\nI0818 15:07:32.098055 21769 net.cpp:434] L1_b12_sum_eltwise <- L1_b11_sum_eltwise_top_L1_b11_relu_0_split_1\nI0818 15:07:32.098069 21769 net.cpp:408] L1_b12_sum_eltwise -> L1_b12_sum_eltwise_top\nI0818 15:07:32.098109 21769 net.cpp:150] Setting up L1_b12_sum_eltwise\nI0818 15:07:32.098120 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.098124 21769 net.cpp:165] Memory required for data: 885966000\nI0818 15:07:32.098130 21769 layer_factory.hpp:77] Creating layer L1_b12_relu\nI0818 15:07:32.098139 21769 net.cpp:100] Creating Layer L1_b12_relu\nI0818 15:07:32.098145 21769 net.cpp:434] L1_b12_relu <- L1_b12_sum_eltwise_top\nI0818 15:07:32.098155 21769 net.cpp:395] L1_b12_relu -> L1_b12_sum_eltwise_top (in-place)\nI0818 15:07:32.098165 21769 net.cpp:150] Setting up L1_b12_relu\nI0818 15:07:32.098171 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.098176 21769 net.cpp:165] Memory required for data: 892519600\nI0818 15:07:32.098181 21769 layer_factory.hpp:77] Creating layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0818 15:07:32.098187 21769 net.cpp:100] Creating Layer L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0818 15:07:32.098192 21769 net.cpp:434] L1_b12_sum_eltwise_top_L1_b12_relu_0_split <- L1_b12_sum_eltwise_top\nI0818 15:07:32.098199 21769 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0818 15:07:32.098208 21769 net.cpp:408] L1_b12_sum_eltwise_top_L1_b12_relu_0_split -> L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0818 15:07:32.098261 21769 net.cpp:150] Setting up L1_b12_sum_eltwise_top_L1_b12_relu_0_split\nI0818 15:07:32.098273 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.098280 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.098284 21769 net.cpp:165] Memory required for data: 905626800\nI0818 15:07:32.098289 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_conv\nI0818 15:07:32.098300 21769 net.cpp:100] Creating Layer L1_b13_cbr1_conv\nI0818 15:07:32.098306 21769 net.cpp:434] L1_b13_cbr1_conv <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_0\nI0818 15:07:32.098318 21769 net.cpp:408] L1_b13_cbr1_conv -> L1_b13_cbr1_conv_top\nI0818 15:07:32.098687 21769 net.cpp:150] Setting up L1_b13_cbr1_conv\nI0818 15:07:32.098703 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.098708 21769 net.cpp:165] Memory required for data: 912180400\nI0818 15:07:32.098717 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_bn\nI0818 15:07:32.098742 21769 net.cpp:100] Creating Layer L1_b13_cbr1_bn\nI0818 15:07:32.098749 21769 net.cpp:434] L1_b13_cbr1_bn <- L1_b13_cbr1_conv_top\nI0818 15:07:32.098757 21769 net.cpp:408] L1_b13_cbr1_bn -> L1_b13_cbr1_bn_top\nI0818 15:07:32.099045 21769 net.cpp:150] Setting up L1_b13_cbr1_bn\nI0818 15:07:32.099057 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.099062 21769 net.cpp:165] Memory required for data: 918734000\nI0818 15:07:32.099074 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0818 15:07:32.099082 21769 net.cpp:100] Creating Layer L1_b13_cbr1_scale\nI0818 15:07:32.099088 21769 net.cpp:434] L1_b13_cbr1_scale <- L1_b13_cbr1_bn_top\nI0818 15:07:32.099099 21769 net.cpp:395] L1_b13_cbr1_scale -> L1_b13_cbr1_bn_top (in-place)\nI0818 15:07:32.099160 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_scale\nI0818 15:07:32.099326 21769 net.cpp:150] Setting up L1_b13_cbr1_scale\nI0818 15:07:32.099340 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.099345 21769 net.cpp:165] Memory required for data: 925287600\nI0818 15:07:32.099354 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr1_relu\nI0818 15:07:32.099362 21769 net.cpp:100] Creating Layer L1_b13_cbr1_relu\nI0818 15:07:32.099369 21769 net.cpp:434] L1_b13_cbr1_relu <- L1_b13_cbr1_bn_top\nI0818 15:07:32.099375 21769 net.cpp:395] L1_b13_cbr1_relu -> L1_b13_cbr1_bn_top (in-place)\nI0818 15:07:32.099385 21769 net.cpp:150] Setting up L1_b13_cbr1_relu\nI0818 15:07:32.099398 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.099405 21769 net.cpp:165] Memory required for data: 931841200\nI0818 15:07:32.099408 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr2_conv\nI0818 15:07:32.099422 21769 net.cpp:100] Creating Layer L1_b13_cbr2_conv\nI0818 15:07:32.099429 21769 net.cpp:434] L1_b13_cbr2_conv <- L1_b13_cbr1_bn_top\nI0818 15:07:32.099440 21769 net.cpp:408] L1_b13_cbr2_conv -> L1_b13_cbr2_conv_top\nI0818 15:07:32.099820 21769 net.cpp:150] Setting up L1_b13_cbr2_conv\nI0818 15:07:32.099834 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.099840 21769 net.cpp:165] Memory required for data: 938394800\nI0818 15:07:32.099848 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr2_bn\nI0818 15:07:32.099869 21769 net.cpp:100] Creating Layer L1_b13_cbr2_bn\nI0818 15:07:32.099876 21769 net.cpp:434] L1_b13_cbr2_bn <- L1_b13_cbr2_conv_top\nI0818 15:07:32.099887 21769 net.cpp:408] L1_b13_cbr2_bn -> L1_b13_cbr2_bn_top\nI0818 15:07:32.101205 21769 net.cpp:150] Setting up L1_b13_cbr2_bn\nI0818 15:07:32.101227 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.101233 21769 net.cpp:165] Memory required for data: 944948400\nI0818 15:07:32.101244 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0818 15:07:32.101254 21769 net.cpp:100] Creating Layer L1_b13_cbr2_scale\nI0818 15:07:32.101260 21769 net.cpp:434] L1_b13_cbr2_scale <- L1_b13_cbr2_bn_top\nI0818 15:07:32.101271 21769 net.cpp:395] L1_b13_cbr2_scale -> L1_b13_cbr2_bn_top (in-place)\nI0818 15:07:32.101336 21769 layer_factory.hpp:77] Creating layer L1_b13_cbr2_scale\nI0818 15:07:32.101503 21769 net.cpp:150] Setting up L1_b13_cbr2_scale\nI0818 15:07:32.101516 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.101521 21769 net.cpp:165] Memory required for data: 951502000\nI0818 15:07:32.101531 21769 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise\nI0818 15:07:32.101542 21769 net.cpp:100] Creating Layer L1_b13_sum_eltwise\nI0818 15:07:32.101549 21769 net.cpp:434] L1_b13_sum_eltwise <- L1_b13_cbr2_bn_top\nI0818 15:07:32.101557 21769 net.cpp:434] L1_b13_sum_eltwise <- L1_b12_sum_eltwise_top_L1_b12_relu_0_split_1\nI0818 15:07:32.101567 21769 net.cpp:408] L1_b13_sum_eltwise -> L1_b13_sum_eltwise_top\nI0818 15:07:32.101603 21769 net.cpp:150] Setting up L1_b13_sum_eltwise\nI0818 15:07:32.101614 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.101619 21769 net.cpp:165] Memory required for data: 958055600\nI0818 15:07:32.101624 21769 layer_factory.hpp:77] Creating layer L1_b13_relu\nI0818 15:07:32.101634 21769 net.cpp:100] Creating Layer L1_b13_relu\nI0818 15:07:32.101641 21769 net.cpp:434] L1_b13_relu <- L1_b13_sum_eltwise_top\nI0818 15:07:32.101649 21769 net.cpp:395] L1_b13_relu -> L1_b13_sum_eltwise_top (in-place)\nI0818 15:07:32.101657 21769 net.cpp:150] Setting up L1_b13_relu\nI0818 15:07:32.101665 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.101670 21769 net.cpp:165] Memory required for data: 964609200\nI0818 15:07:32.101675 21769 layer_factory.hpp:77] Creating layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0818 15:07:32.101681 21769 net.cpp:100] Creating Layer L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0818 15:07:32.101693 21769 net.cpp:434] L1_b13_sum_eltwise_top_L1_b13_relu_0_split <- L1_b13_sum_eltwise_top\nI0818 15:07:32.101701 21769 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0818 15:07:32.101711 21769 net.cpp:408] L1_b13_sum_eltwise_top_L1_b13_relu_0_split -> L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0818 15:07:32.101764 21769 net.cpp:150] Setting up L1_b13_sum_eltwise_top_L1_b13_relu_0_split\nI0818 15:07:32.101776 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.101783 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.101788 21769 net.cpp:165] Memory required for data: 977716400\nI0818 15:07:32.101794 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_conv\nI0818 15:07:32.101809 21769 net.cpp:100] Creating Layer L1_b14_cbr1_conv\nI0818 15:07:32.101824 21769 net.cpp:434] L1_b14_cbr1_conv <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_0\nI0818 15:07:32.101833 21769 net.cpp:408] L1_b14_cbr1_conv -> L1_b14_cbr1_conv_top\nI0818 15:07:32.102191 21769 net.cpp:150] Setting up L1_b14_cbr1_conv\nI0818 15:07:32.102205 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.102210 21769 net.cpp:165] Memory required for data: 984270000\nI0818 15:07:32.102219 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_bn\nI0818 15:07:32.102232 21769 net.cpp:100] Creating Layer L1_b14_cbr1_bn\nI0818 15:07:32.102239 21769 net.cpp:434] L1_b14_cbr1_bn <- L1_b14_cbr1_conv_top\nI0818 15:07:32.102247 21769 net.cpp:408] L1_b14_cbr1_bn -> L1_b14_cbr1_bn_top\nI0818 15:07:32.102530 21769 net.cpp:150] Setting up L1_b14_cbr1_bn\nI0818 15:07:32.102545 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.102550 21769 net.cpp:165] Memory required for data: 990823600\nI0818 15:07:32.102560 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0818 15:07:32.102569 21769 net.cpp:100] Creating Layer L1_b14_cbr1_scale\nI0818 15:07:32.102576 21769 net.cpp:434] L1_b14_cbr1_scale <- L1_b14_cbr1_bn_top\nI0818 15:07:32.102582 21769 net.cpp:395] L1_b14_cbr1_scale -> L1_b14_cbr1_bn_top (in-place)\nI0818 15:07:32.102649 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_scale\nI0818 15:07:32.102820 21769 net.cpp:150] Setting up L1_b14_cbr1_scale\nI0818 15:07:32.102834 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.102839 21769 net.cpp:165] Memory required for data: 997377200\nI0818 15:07:32.102849 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr1_relu\nI0818 15:07:32.102857 21769 net.cpp:100] Creating Layer L1_b14_cbr1_relu\nI0818 15:07:32.102864 21769 net.cpp:434] L1_b14_cbr1_relu <- L1_b14_cbr1_bn_top\nI0818 15:07:32.102874 21769 net.cpp:395] L1_b14_cbr1_relu -> L1_b14_cbr1_bn_top (in-place)\nI0818 15:07:32.102883 21769 net.cpp:150] Setting up L1_b14_cbr1_relu\nI0818 15:07:32.102891 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.102895 21769 net.cpp:165] Memory required for data: 1003930800\nI0818 15:07:32.102900 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr2_conv\nI0818 15:07:32.102911 21769 net.cpp:100] Creating Layer L1_b14_cbr2_conv\nI0818 15:07:32.102917 21769 net.cpp:434] L1_b14_cbr2_conv <- L1_b14_cbr1_bn_top\nI0818 15:07:32.102928 21769 net.cpp:408] L1_b14_cbr2_conv -> L1_b14_cbr2_conv_top\nI0818 15:07:32.103291 21769 net.cpp:150] Setting up L1_b14_cbr2_conv\nI0818 15:07:32.103305 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.103310 21769 net.cpp:165] Memory required for data: 1010484400\nI0818 15:07:32.103319 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr2_bn\nI0818 15:07:32.103329 21769 net.cpp:100] Creating Layer L1_b14_cbr2_bn\nI0818 15:07:32.103335 21769 net.cpp:434] L1_b14_cbr2_bn <- L1_b14_cbr2_conv_top\nI0818 15:07:32.103346 21769 net.cpp:408] L1_b14_cbr2_bn -> L1_b14_cbr2_bn_top\nI0818 15:07:32.103628 21769 net.cpp:150] Setting up L1_b14_cbr2_bn\nI0818 15:07:32.103646 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.103651 21769 net.cpp:165] Memory required for data: 1017038000\nI0818 15:07:32.103662 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0818 15:07:32.103670 21769 net.cpp:100] Creating Layer L1_b14_cbr2_scale\nI0818 15:07:32.103677 21769 net.cpp:434] L1_b14_cbr2_scale <- L1_b14_cbr2_bn_top\nI0818 15:07:32.103689 21769 net.cpp:395] L1_b14_cbr2_scale -> L1_b14_cbr2_bn_top (in-place)\nI0818 15:07:32.103751 21769 layer_factory.hpp:77] Creating layer L1_b14_cbr2_scale\nI0818 15:07:32.103912 21769 net.cpp:150] Setting up L1_b14_cbr2_scale\nI0818 15:07:32.103925 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.103930 21769 net.cpp:165] Memory required for data: 1023591600\nI0818 15:07:32.103940 21769 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise\nI0818 15:07:32.103951 21769 net.cpp:100] Creating Layer L1_b14_sum_eltwise\nI0818 15:07:32.103958 21769 net.cpp:434] L1_b14_sum_eltwise <- L1_b14_cbr2_bn_top\nI0818 15:07:32.103976 21769 net.cpp:434] L1_b14_sum_eltwise <- L1_b13_sum_eltwise_top_L1_b13_relu_0_split_1\nI0818 15:07:32.103984 21769 net.cpp:408] L1_b14_sum_eltwise -> L1_b14_sum_eltwise_top\nI0818 15:07:32.104023 21769 net.cpp:150] Setting up L1_b14_sum_eltwise\nI0818 15:07:32.104033 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.104038 21769 net.cpp:165] Memory required for data: 1030145200\nI0818 15:07:32.104043 21769 layer_factory.hpp:77] Creating layer L1_b14_relu\nI0818 15:07:32.104050 21769 net.cpp:100] Creating Layer L1_b14_relu\nI0818 15:07:32.104056 21769 net.cpp:434] L1_b14_relu <- L1_b14_sum_eltwise_top\nI0818 15:07:32.104068 21769 net.cpp:395] L1_b14_relu -> L1_b14_sum_eltwise_top (in-place)\nI0818 15:07:32.104077 21769 net.cpp:150] Setting up L1_b14_relu\nI0818 15:07:32.104084 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.104089 21769 net.cpp:165] Memory required for data: 1036698800\nI0818 15:07:32.104094 21769 layer_factory.hpp:77] Creating layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0818 15:07:32.104101 21769 net.cpp:100] Creating Layer L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0818 15:07:32.104106 21769 net.cpp:434] L1_b14_sum_eltwise_top_L1_b14_relu_0_split <- L1_b14_sum_eltwise_top\nI0818 15:07:32.104115 21769 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0818 15:07:32.104123 21769 net.cpp:408] L1_b14_sum_eltwise_top_L1_b14_relu_0_split -> L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0818 15:07:32.104176 21769 net.cpp:150] Setting up L1_b14_sum_eltwise_top_L1_b14_relu_0_split\nI0818 15:07:32.104187 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.104193 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.104198 21769 net.cpp:165] Memory required for data: 1049806000\nI0818 15:07:32.104203 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_conv\nI0818 15:07:32.104218 21769 net.cpp:100] Creating Layer L1_b15_cbr1_conv\nI0818 15:07:32.104224 21769 net.cpp:434] L1_b15_cbr1_conv <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_0\nI0818 15:07:32.104233 21769 net.cpp:408] L1_b15_cbr1_conv -> L1_b15_cbr1_conv_top\nI0818 15:07:32.104593 21769 net.cpp:150] Setting up L1_b15_cbr1_conv\nI0818 15:07:32.104609 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.104614 21769 net.cpp:165] Memory required for data: 1056359600\nI0818 15:07:32.104622 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_bn\nI0818 15:07:32.104635 21769 net.cpp:100] Creating Layer L1_b15_cbr1_bn\nI0818 15:07:32.104641 21769 net.cpp:434] L1_b15_cbr1_bn <- L1_b15_cbr1_conv_top\nI0818 15:07:32.104650 21769 net.cpp:408] L1_b15_cbr1_bn -> L1_b15_cbr1_bn_top\nI0818 15:07:32.104938 21769 net.cpp:150] Setting up L1_b15_cbr1_bn\nI0818 15:07:32.104956 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.104962 21769 net.cpp:165] Memory required for data: 1062913200\nI0818 15:07:32.104972 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0818 15:07:32.104980 21769 net.cpp:100] Creating Layer L1_b15_cbr1_scale\nI0818 15:07:32.104987 21769 net.cpp:434] L1_b15_cbr1_scale <- L1_b15_cbr1_bn_top\nI0818 15:07:32.104995 21769 net.cpp:395] L1_b15_cbr1_scale -> L1_b15_cbr1_bn_top (in-place)\nI0818 15:07:32.105054 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_scale\nI0818 15:07:32.105216 21769 net.cpp:150] Setting up L1_b15_cbr1_scale\nI0818 15:07:32.105229 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.105234 21769 net.cpp:165] Memory required for data: 1069466800\nI0818 15:07:32.105243 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr1_relu\nI0818 15:07:32.105254 21769 net.cpp:100] Creating Layer L1_b15_cbr1_relu\nI0818 15:07:32.105262 21769 net.cpp:434] L1_b15_cbr1_relu <- L1_b15_cbr1_bn_top\nI0818 15:07:32.105268 21769 net.cpp:395] L1_b15_cbr1_relu -> L1_b15_cbr1_bn_top (in-place)\nI0818 15:07:32.105281 21769 net.cpp:150] Setting up L1_b15_cbr1_relu\nI0818 15:07:32.105288 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.105293 21769 net.cpp:165] Memory required for data: 1076020400\nI0818 15:07:32.105304 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr2_conv\nI0818 15:07:32.105316 21769 net.cpp:100] Creating Layer L1_b15_cbr2_conv\nI0818 15:07:32.105322 21769 net.cpp:434] L1_b15_cbr2_conv <- L1_b15_cbr1_bn_top\nI0818 15:07:32.105334 21769 net.cpp:408] L1_b15_cbr2_conv -> L1_b15_cbr2_conv_top\nI0818 15:07:32.105707 21769 net.cpp:150] Setting up L1_b15_cbr2_conv\nI0818 15:07:32.105722 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.105727 21769 net.cpp:165] Memory required for data: 1082574000\nI0818 15:07:32.105736 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr2_bn\nI0818 15:07:32.105746 21769 net.cpp:100] Creating Layer L1_b15_cbr2_bn\nI0818 15:07:32.105751 21769 net.cpp:434] L1_b15_cbr2_bn <- L1_b15_cbr2_conv_top\nI0818 15:07:32.105762 21769 net.cpp:408] L1_b15_cbr2_bn -> L1_b15_cbr2_bn_top\nI0818 15:07:32.106057 21769 net.cpp:150] Setting up L1_b15_cbr2_bn\nI0818 15:07:32.106070 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.106076 21769 net.cpp:165] Memory required for data: 1089127600\nI0818 15:07:32.106086 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0818 15:07:32.106098 21769 net.cpp:100] Creating Layer L1_b15_cbr2_scale\nI0818 15:07:32.106106 21769 net.cpp:434] L1_b15_cbr2_scale <- L1_b15_cbr2_bn_top\nI0818 15:07:32.106113 21769 net.cpp:395] L1_b15_cbr2_scale -> L1_b15_cbr2_bn_top (in-place)\nI0818 15:07:32.106174 21769 layer_factory.hpp:77] Creating layer L1_b15_cbr2_scale\nI0818 15:07:32.107365 21769 net.cpp:150] Setting up L1_b15_cbr2_scale\nI0818 15:07:32.107383 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.107388 21769 net.cpp:165] Memory required for data: 1095681200\nI0818 15:07:32.107398 21769 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise\nI0818 15:07:32.107411 21769 net.cpp:100] Creating Layer L1_b15_sum_eltwise\nI0818 15:07:32.107419 21769 net.cpp:434] L1_b15_sum_eltwise <- L1_b15_cbr2_bn_top\nI0818 15:07:32.107425 21769 net.cpp:434] L1_b15_sum_eltwise <- L1_b14_sum_eltwise_top_L1_b14_relu_0_split_1\nI0818 15:07:32.107434 21769 net.cpp:408] L1_b15_sum_eltwise -> L1_b15_sum_eltwise_top\nI0818 15:07:32.107473 21769 net.cpp:150] Setting up L1_b15_sum_eltwise\nI0818 15:07:32.107486 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.107491 21769 net.cpp:165] Memory required for data: 1102234800\nI0818 15:07:32.107496 21769 layer_factory.hpp:77] Creating layer L1_b15_relu\nI0818 15:07:32.107504 21769 net.cpp:100] Creating Layer L1_b15_relu\nI0818 15:07:32.107511 21769 net.cpp:434] L1_b15_relu <- L1_b15_sum_eltwise_top\nI0818 15:07:32.107517 21769 net.cpp:395] L1_b15_relu -> L1_b15_sum_eltwise_top (in-place)\nI0818 15:07:32.107527 21769 net.cpp:150] Setting up L1_b15_relu\nI0818 15:07:32.107533 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.107538 21769 net.cpp:165] Memory required for data: 1108788400\nI0818 15:07:32.107542 21769 layer_factory.hpp:77] Creating layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0818 15:07:32.107550 21769 net.cpp:100] Creating Layer L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0818 15:07:32.107555 21769 net.cpp:434] L1_b15_sum_eltwise_top_L1_b15_relu_0_split <- L1_b15_sum_eltwise_top\nI0818 15:07:32.107565 21769 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0818 15:07:32.107576 21769 net.cpp:408] L1_b15_sum_eltwise_top_L1_b15_relu_0_split -> L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0818 15:07:32.107625 21769 net.cpp:150] Setting up L1_b15_sum_eltwise_top_L1_b15_relu_0_split\nI0818 15:07:32.107637 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.107643 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.107648 21769 net.cpp:165] Memory required for data: 1121895600\nI0818 15:07:32.107653 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_conv\nI0818 15:07:32.107668 21769 net.cpp:100] Creating Layer L1_b16_cbr1_conv\nI0818 15:07:32.107676 21769 net.cpp:434] L1_b16_cbr1_conv <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_0\nI0818 15:07:32.107699 21769 net.cpp:408] L1_b16_cbr1_conv -> L1_b16_cbr1_conv_top\nI0818 15:07:32.108073 21769 net.cpp:150] Setting up L1_b16_cbr1_conv\nI0818 15:07:32.108088 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.108093 21769 net.cpp:165] Memory required for data: 1128449200\nI0818 15:07:32.108103 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_bn\nI0818 15:07:32.108120 21769 net.cpp:100] Creating Layer L1_b16_cbr1_bn\nI0818 15:07:32.108127 21769 net.cpp:434] L1_b16_cbr1_bn <- L1_b16_cbr1_conv_top\nI0818 15:07:32.108139 21769 net.cpp:408] L1_b16_cbr1_bn -> L1_b16_cbr1_bn_top\nI0818 15:07:32.108419 21769 net.cpp:150] Setting up L1_b16_cbr1_bn\nI0818 15:07:32.108433 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.108438 21769 net.cpp:165] Memory required for data: 1135002800\nI0818 15:07:32.108448 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0818 15:07:32.108458 21769 net.cpp:100] Creating Layer L1_b16_cbr1_scale\nI0818 15:07:32.108464 21769 net.cpp:434] L1_b16_cbr1_scale <- L1_b16_cbr1_bn_top\nI0818 15:07:32.108470 21769 net.cpp:395] L1_b16_cbr1_scale -> L1_b16_cbr1_bn_top (in-place)\nI0818 15:07:32.108534 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_scale\nI0818 15:07:32.108698 21769 net.cpp:150] Setting up L1_b16_cbr1_scale\nI0818 15:07:32.108712 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.108717 21769 net.cpp:165] Memory required for data: 1141556400\nI0818 15:07:32.108727 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr1_relu\nI0818 15:07:32.108737 21769 net.cpp:100] Creating Layer L1_b16_cbr1_relu\nI0818 15:07:32.108744 21769 net.cpp:434] L1_b16_cbr1_relu <- L1_b16_cbr1_bn_top\nI0818 15:07:32.108752 21769 net.cpp:395] L1_b16_cbr1_relu -> L1_b16_cbr1_bn_top (in-place)\nI0818 15:07:32.108762 21769 net.cpp:150] Setting up L1_b16_cbr1_relu\nI0818 15:07:32.108768 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.108773 21769 net.cpp:165] Memory required for data: 1148110000\nI0818 15:07:32.108778 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr2_conv\nI0818 15:07:32.108791 21769 net.cpp:100] Creating Layer L1_b16_cbr2_conv\nI0818 15:07:32.108798 21769 net.cpp:434] L1_b16_cbr2_conv <- L1_b16_cbr1_bn_top\nI0818 15:07:32.108811 21769 net.cpp:408] L1_b16_cbr2_conv -> L1_b16_cbr2_conv_top\nI0818 15:07:32.109174 21769 net.cpp:150] Setting up L1_b16_cbr2_conv\nI0818 15:07:32.109187 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.109192 21769 net.cpp:165] Memory required for data: 1154663600\nI0818 15:07:32.109201 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr2_bn\nI0818 15:07:32.109215 21769 net.cpp:100] Creating Layer L1_b16_cbr2_bn\nI0818 15:07:32.109221 21769 net.cpp:434] L1_b16_cbr2_bn <- L1_b16_cbr2_conv_top\nI0818 15:07:32.109230 21769 net.cpp:408] L1_b16_cbr2_bn -> L1_b16_cbr2_bn_top\nI0818 15:07:32.109508 21769 net.cpp:150] Setting up L1_b16_cbr2_bn\nI0818 15:07:32.109521 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.109526 21769 net.cpp:165] Memory required for data: 1161217200\nI0818 15:07:32.109537 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0818 15:07:32.109546 21769 net.cpp:100] Creating Layer L1_b16_cbr2_scale\nI0818 15:07:32.109552 21769 net.cpp:434] L1_b16_cbr2_scale <- L1_b16_cbr2_bn_top\nI0818 15:07:32.109560 21769 net.cpp:395] L1_b16_cbr2_scale -> L1_b16_cbr2_bn_top (in-place)\nI0818 15:07:32.109627 21769 layer_factory.hpp:77] Creating layer L1_b16_cbr2_scale\nI0818 15:07:32.109797 21769 net.cpp:150] Setting up L1_b16_cbr2_scale\nI0818 15:07:32.109812 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.109817 21769 net.cpp:165] Memory required for data: 1167770800\nI0818 15:07:32.109827 21769 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise\nI0818 15:07:32.109835 21769 net.cpp:100] Creating Layer L1_b16_sum_eltwise\nI0818 15:07:32.109841 21769 net.cpp:434] L1_b16_sum_eltwise <- L1_b16_cbr2_bn_top\nI0818 15:07:32.109848 21769 net.cpp:434] L1_b16_sum_eltwise <- L1_b15_sum_eltwise_top_L1_b15_relu_0_split_1\nI0818 15:07:32.109858 21769 net.cpp:408] L1_b16_sum_eltwise -> L1_b16_sum_eltwise_top\nI0818 15:07:32.109901 21769 net.cpp:150] Setting up L1_b16_sum_eltwise\nI0818 15:07:32.109911 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.109916 21769 net.cpp:165] Memory required for data: 1174324400\nI0818 15:07:32.109921 21769 layer_factory.hpp:77] Creating layer L1_b16_relu\nI0818 15:07:32.109932 21769 net.cpp:100] Creating Layer L1_b16_relu\nI0818 15:07:32.109939 21769 net.cpp:434] L1_b16_relu <- L1_b16_sum_eltwise_top\nI0818 15:07:32.109946 21769 net.cpp:395] L1_b16_relu -> L1_b16_sum_eltwise_top (in-place)\nI0818 15:07:32.109956 21769 net.cpp:150] Setting up L1_b16_relu\nI0818 15:07:32.109962 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.109967 21769 net.cpp:165] Memory required for data: 1180878000\nI0818 15:07:32.109972 21769 layer_factory.hpp:77] Creating layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0818 15:07:32.109979 21769 net.cpp:100] Creating Layer L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0818 15:07:32.109984 21769 net.cpp:434] L1_b16_sum_eltwise_top_L1_b16_relu_0_split <- L1_b16_sum_eltwise_top\nI0818 15:07:32.109992 21769 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0818 15:07:32.110025 21769 net.cpp:408] L1_b16_sum_eltwise_top_L1_b16_relu_0_split -> L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0818 15:07:32.110080 21769 net.cpp:150] Setting up L1_b16_sum_eltwise_top_L1_b16_relu_0_split\nI0818 15:07:32.110093 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.110100 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.110105 21769 net.cpp:165] Memory required for data: 1193985200\nI0818 15:07:32.110110 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_conv\nI0818 15:07:32.110123 21769 net.cpp:100] Creating Layer L1_b17_cbr1_conv\nI0818 15:07:32.110131 21769 net.cpp:434] L1_b17_cbr1_conv <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_0\nI0818 15:07:32.110139 21769 net.cpp:408] L1_b17_cbr1_conv -> L1_b17_cbr1_conv_top\nI0818 15:07:32.110496 21769 net.cpp:150] Setting up L1_b17_cbr1_conv\nI0818 15:07:32.110510 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.110515 21769 net.cpp:165] Memory required for data: 1200538800\nI0818 15:07:32.110523 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_bn\nI0818 15:07:32.110538 21769 net.cpp:100] Creating Layer L1_b17_cbr1_bn\nI0818 15:07:32.110544 21769 net.cpp:434] L1_b17_cbr1_bn <- L1_b17_cbr1_conv_top\nI0818 15:07:32.110555 21769 net.cpp:408] L1_b17_cbr1_bn -> L1_b17_cbr1_bn_top\nI0818 15:07:32.110846 21769 net.cpp:150] Setting up L1_b17_cbr1_bn\nI0818 15:07:32.110859 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.110864 21769 net.cpp:165] Memory required for data: 1207092400\nI0818 15:07:32.110874 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0818 15:07:32.110883 21769 net.cpp:100] Creating Layer L1_b17_cbr1_scale\nI0818 15:07:32.110889 21769 net.cpp:434] L1_b17_cbr1_scale <- L1_b17_cbr1_bn_top\nI0818 15:07:32.110898 21769 net.cpp:395] L1_b17_cbr1_scale -> L1_b17_cbr1_bn_top (in-place)\nI0818 15:07:32.110961 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_scale\nI0818 15:07:32.111119 21769 net.cpp:150] Setting up L1_b17_cbr1_scale\nI0818 15:07:32.111132 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.111137 21769 net.cpp:165] Memory required for data: 1213646000\nI0818 15:07:32.111146 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr1_relu\nI0818 15:07:32.111153 21769 net.cpp:100] Creating Layer L1_b17_cbr1_relu\nI0818 15:07:32.111160 21769 net.cpp:434] L1_b17_cbr1_relu <- L1_b17_cbr1_bn_top\nI0818 15:07:32.111171 21769 net.cpp:395] L1_b17_cbr1_relu -> L1_b17_cbr1_bn_top (in-place)\nI0818 15:07:32.111181 21769 net.cpp:150] Setting up L1_b17_cbr1_relu\nI0818 15:07:32.111189 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.111193 21769 net.cpp:165] Memory required for data: 1220199600\nI0818 15:07:32.111198 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr2_conv\nI0818 15:07:32.111219 21769 net.cpp:100] Creating Layer L1_b17_cbr2_conv\nI0818 15:07:32.111227 21769 net.cpp:434] L1_b17_cbr2_conv <- L1_b17_cbr1_bn_top\nI0818 15:07:32.111240 21769 net.cpp:408] L1_b17_cbr2_conv -> L1_b17_cbr2_conv_top\nI0818 15:07:32.111600 21769 net.cpp:150] Setting up L1_b17_cbr2_conv\nI0818 15:07:32.111614 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.111619 21769 net.cpp:165] Memory required for data: 1226753200\nI0818 15:07:32.111629 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr2_bn\nI0818 15:07:32.111641 21769 net.cpp:100] Creating Layer L1_b17_cbr2_bn\nI0818 15:07:32.111649 21769 net.cpp:434] L1_b17_cbr2_bn <- L1_b17_cbr2_conv_top\nI0818 15:07:32.111656 21769 net.cpp:408] L1_b17_cbr2_bn -> L1_b17_cbr2_bn_top\nI0818 15:07:32.111950 21769 net.cpp:150] Setting up L1_b17_cbr2_bn\nI0818 15:07:32.111965 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.111970 21769 net.cpp:165] Memory required for data: 1233306800\nI0818 15:07:32.111980 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0818 15:07:32.111989 21769 net.cpp:100] Creating Layer L1_b17_cbr2_scale\nI0818 15:07:32.111996 21769 net.cpp:434] L1_b17_cbr2_scale <- L1_b17_cbr2_bn_top\nI0818 15:07:32.112004 21769 net.cpp:395] L1_b17_cbr2_scale -> L1_b17_cbr2_bn_top (in-place)\nI0818 15:07:32.112067 21769 layer_factory.hpp:77] Creating layer L1_b17_cbr2_scale\nI0818 15:07:32.112228 21769 net.cpp:150] Setting up L1_b17_cbr2_scale\nI0818 15:07:32.112242 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.112247 21769 net.cpp:165] Memory required for data: 1239860400\nI0818 15:07:32.112257 21769 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise\nI0818 15:07:32.112265 21769 net.cpp:100] Creating Layer L1_b17_sum_eltwise\nI0818 15:07:32.112272 21769 net.cpp:434] L1_b17_sum_eltwise <- L1_b17_cbr2_bn_top\nI0818 15:07:32.112278 21769 net.cpp:434] L1_b17_sum_eltwise <- L1_b16_sum_eltwise_top_L1_b16_relu_0_split_1\nI0818 15:07:32.112289 21769 net.cpp:408] L1_b17_sum_eltwise -> L1_b17_sum_eltwise_top\nI0818 15:07:32.112323 21769 net.cpp:150] Setting up L1_b17_sum_eltwise\nI0818 15:07:32.112332 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.112337 21769 net.cpp:165] Memory required for data: 1246414000\nI0818 15:07:32.112342 21769 layer_factory.hpp:77] Creating layer L1_b17_relu\nI0818 15:07:32.112354 21769 net.cpp:100] Creating Layer L1_b17_relu\nI0818 15:07:32.112360 21769 net.cpp:434] L1_b17_relu <- L1_b17_sum_eltwise_top\nI0818 15:07:32.112366 21769 net.cpp:395] L1_b17_relu -> L1_b17_sum_eltwise_top (in-place)\nI0818 15:07:32.112376 21769 net.cpp:150] Setting up L1_b17_relu\nI0818 15:07:32.112383 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.112387 21769 net.cpp:165] Memory required for data: 1252967600\nI0818 15:07:32.112392 21769 layer_factory.hpp:77] Creating layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0818 15:07:32.112401 21769 net.cpp:100] Creating Layer L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0818 15:07:32.112406 21769 net.cpp:434] L1_b17_sum_eltwise_top_L1_b17_relu_0_split <- L1_b17_sum_eltwise_top\nI0818 15:07:32.112412 21769 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0818 15:07:32.112422 21769 net.cpp:408] L1_b17_sum_eltwise_top_L1_b17_relu_0_split -> L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0818 15:07:32.112473 21769 net.cpp:150] Setting up L1_b17_sum_eltwise_top_L1_b17_relu_0_split\nI0818 15:07:32.112484 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.112491 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.112495 21769 net.cpp:165] Memory required for data: 1266074800\nI0818 15:07:32.112501 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_conv\nI0818 15:07:32.112515 21769 net.cpp:100] Creating Layer L1_b18_cbr1_conv\nI0818 15:07:32.112522 21769 net.cpp:434] L1_b18_cbr1_conv <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_0\nI0818 15:07:32.112531 21769 net.cpp:408] L1_b18_cbr1_conv -> L1_b18_cbr1_conv_top\nI0818 15:07:32.112906 21769 net.cpp:150] Setting up L1_b18_cbr1_conv\nI0818 15:07:32.112927 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.112933 21769 net.cpp:165] Memory required for data: 1272628400\nI0818 15:07:32.112942 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_bn\nI0818 15:07:32.112954 21769 net.cpp:100] Creating Layer L1_b18_cbr1_bn\nI0818 15:07:32.112962 21769 net.cpp:434] L1_b18_cbr1_bn <- L1_b18_cbr1_conv_top\nI0818 15:07:32.112970 21769 net.cpp:408] L1_b18_cbr1_bn -> L1_b18_cbr1_bn_top\nI0818 15:07:32.113267 21769 net.cpp:150] Setting up L1_b18_cbr1_bn\nI0818 15:07:32.113286 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.113291 21769 net.cpp:165] Memory required for data: 1279182000\nI0818 15:07:32.113301 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0818 15:07:32.113310 21769 net.cpp:100] Creating Layer L1_b18_cbr1_scale\nI0818 15:07:32.113319 21769 net.cpp:434] L1_b18_cbr1_scale <- L1_b18_cbr1_bn_top\nI0818 15:07:32.113327 21769 net.cpp:395] L1_b18_cbr1_scale -> L1_b18_cbr1_bn_top (in-place)\nI0818 15:07:32.113389 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_scale\nI0818 15:07:32.113550 21769 net.cpp:150] Setting up L1_b18_cbr1_scale\nI0818 15:07:32.113564 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.113569 21769 net.cpp:165] Memory required for data: 1285735600\nI0818 15:07:32.113579 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr1_relu\nI0818 15:07:32.113590 21769 net.cpp:100] Creating Layer L1_b18_cbr1_relu\nI0818 15:07:32.113596 21769 net.cpp:434] L1_b18_cbr1_relu <- L1_b18_cbr1_bn_top\nI0818 15:07:32.113606 21769 net.cpp:395] L1_b18_cbr1_relu -> L1_b18_cbr1_bn_top (in-place)\nI0818 15:07:32.113617 21769 net.cpp:150] Setting up L1_b18_cbr1_relu\nI0818 15:07:32.113625 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.113628 21769 net.cpp:165] Memory required for data: 1292289200\nI0818 15:07:32.113633 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr2_conv\nI0818 15:07:32.113644 21769 net.cpp:100] Creating Layer L1_b18_cbr2_conv\nI0818 15:07:32.113651 21769 net.cpp:434] L1_b18_cbr2_conv <- L1_b18_cbr1_bn_top\nI0818 15:07:32.113662 21769 net.cpp:408] L1_b18_cbr2_conv -> L1_b18_cbr2_conv_top\nI0818 15:07:32.114055 21769 net.cpp:150] Setting up L1_b18_cbr2_conv\nI0818 15:07:32.114071 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.114076 21769 net.cpp:165] Memory required for data: 1298842800\nI0818 15:07:32.114085 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr2_bn\nI0818 15:07:32.114095 21769 net.cpp:100] Creating Layer L1_b18_cbr2_bn\nI0818 15:07:32.114101 21769 net.cpp:434] L1_b18_cbr2_bn <- L1_b18_cbr2_conv_top\nI0818 15:07:32.114114 21769 net.cpp:408] L1_b18_cbr2_bn -> L1_b18_cbr2_bn_top\nI0818 15:07:32.114395 21769 net.cpp:150] Setting up L1_b18_cbr2_bn\nI0818 15:07:32.114408 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.114413 21769 net.cpp:165] Memory required for data: 1305396400\nI0818 15:07:32.114459 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0818 15:07:32.114471 21769 net.cpp:100] Creating Layer L1_b18_cbr2_scale\nI0818 15:07:32.114478 21769 net.cpp:434] L1_b18_cbr2_scale <- L1_b18_cbr2_bn_top\nI0818 15:07:32.114487 21769 net.cpp:395] L1_b18_cbr2_scale -> L1_b18_cbr2_bn_top (in-place)\nI0818 15:07:32.114548 21769 layer_factory.hpp:77] Creating layer L1_b18_cbr2_scale\nI0818 15:07:32.114711 21769 net.cpp:150] Setting up L1_b18_cbr2_scale\nI0818 15:07:32.114728 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.114734 21769 net.cpp:165] Memory required for data: 1311950000\nI0818 15:07:32.114743 21769 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise\nI0818 15:07:32.114753 21769 net.cpp:100] Creating Layer L1_b18_sum_eltwise\nI0818 15:07:32.114759 21769 net.cpp:434] L1_b18_sum_eltwise <- L1_b18_cbr2_bn_top\nI0818 15:07:32.114766 21769 net.cpp:434] L1_b18_sum_eltwise <- L1_b17_sum_eltwise_top_L1_b17_relu_0_split_1\nI0818 15:07:32.114773 21769 net.cpp:408] L1_b18_sum_eltwise -> L1_b18_sum_eltwise_top\nI0818 15:07:32.114814 21769 net.cpp:150] Setting up L1_b18_sum_eltwise\nI0818 15:07:32.114830 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.114835 21769 net.cpp:165] Memory required for data: 1318503600\nI0818 15:07:32.114840 21769 layer_factory.hpp:77] Creating layer L1_b18_relu\nI0818 15:07:32.114852 21769 net.cpp:100] Creating Layer L1_b18_relu\nI0818 15:07:32.114859 21769 net.cpp:434] L1_b18_relu <- L1_b18_sum_eltwise_top\nI0818 15:07:32.114866 21769 net.cpp:395] L1_b18_relu -> L1_b18_sum_eltwise_top (in-place)\nI0818 15:07:32.114876 21769 net.cpp:150] Setting up L1_b18_relu\nI0818 15:07:32.114883 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.114887 21769 net.cpp:165] Memory required for data: 1325057200\nI0818 15:07:32.114892 21769 layer_factory.hpp:77] Creating layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0818 15:07:32.114902 21769 net.cpp:100] Creating Layer L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0818 15:07:32.114908 21769 net.cpp:434] L1_b18_sum_eltwise_top_L1_b18_relu_0_split <- L1_b18_sum_eltwise_top\nI0818 15:07:32.114915 21769 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0818 15:07:32.114925 21769 net.cpp:408] L1_b18_sum_eltwise_top_L1_b18_relu_0_split -> L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0818 15:07:32.114977 21769 net.cpp:150] Setting up L1_b18_sum_eltwise_top_L1_b18_relu_0_split\nI0818 15:07:32.114992 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.115000 21769 net.cpp:157] Top shape: 100 16 32 32 (1638400)\nI0818 15:07:32.115005 21769 net.cpp:165] Memory required for data: 1338164400\nI0818 15:07:32.115010 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0818 15:07:32.115020 21769 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0818 15:07:32.115027 21769 net.cpp:434] L2_b1_cbr1_conv <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_0\nI0818 15:07:32.115036 21769 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0818 15:07:32.115402 21769 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0818 15:07:32.115417 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.115422 21769 net.cpp:165] Memory required for data: 1339802800\nI0818 15:07:32.115429 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0818 15:07:32.115442 21769 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0818 15:07:32.115448 21769 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0818 15:07:32.115458 21769 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0818 15:07:32.115741 21769 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0818 15:07:32.115756 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.115761 21769 net.cpp:165] Memory required for data: 1341441200\nI0818 15:07:32.115772 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 15:07:32.115780 21769 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0818 15:07:32.115787 21769 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0818 15:07:32.115798 21769 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0818 15:07:32.115860 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 15:07:32.116019 21769 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0818 15:07:32.116031 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.116036 21769 net.cpp:165] Memory required for data: 1343079600\nI0818 15:07:32.116045 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0818 15:07:32.116053 21769 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0818 15:07:32.116060 21769 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0818 15:07:32.116075 21769 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0818 15:07:32.116084 21769 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0818 15:07:32.116091 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.116096 21769 net.cpp:165] Memory required for data: 1344718000\nI0818 15:07:32.116101 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0818 15:07:32.116114 21769 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0818 15:07:32.116120 21769 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0818 15:07:32.116129 21769 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0818 15:07:32.116503 21769 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0818 15:07:32.116518 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.116523 21769 net.cpp:165] Memory required for data: 1346356400\nI0818 15:07:32.116531 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0818 15:07:32.116544 21769 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0818 15:07:32.116550 21769 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0818 15:07:32.116559 21769 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0818 15:07:32.116838 21769 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0818 15:07:32.116852 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.116858 21769 net.cpp:165] Memory required for data: 1347994800\nI0818 15:07:32.116868 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 15:07:32.116875 21769 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0818 15:07:32.116883 21769 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0818 15:07:32.116889 21769 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0818 15:07:32.116953 21769 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 15:07:32.117116 21769 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0818 15:07:32.117132 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.117137 21769 net.cpp:165] Memory required for data: 1349633200\nI0818 15:07:32.117146 21769 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0818 15:07:32.117156 21769 net.cpp:100] Creating Layer L2_b1_pool\nI0818 15:07:32.117163 21769 net.cpp:434] L2_b1_pool <- L1_b18_sum_eltwise_top_L1_b18_relu_0_split_1\nI0818 15:07:32.117174 21769 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0818 15:07:32.117207 21769 net.cpp:150] Setting up L2_b1_pool\nI0818 15:07:32.117216 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.117221 21769 net.cpp:165] Memory required for data: 1351271600\nI0818 15:07:32.117226 21769 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0818 15:07:32.117238 21769 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0818 15:07:32.117244 21769 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0818 15:07:32.117251 21769 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0818 15:07:32.117259 21769 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0818 15:07:32.117295 21769 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0818 15:07:32.117307 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.117312 21769 net.cpp:165] Memory required for data: 1352910000\nI0818 15:07:32.117317 21769 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0818 15:07:32.117326 21769 net.cpp:100] Creating Layer L2_b1_relu\nI0818 15:07:32.117331 21769 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0818 15:07:32.117337 21769 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0818 15:07:32.117347 21769 net.cpp:150] Setting up L2_b1_relu\nI0818 15:07:32.117353 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.117358 21769 net.cpp:165] Memory required for data: 1354548400\nI0818 15:07:32.117362 21769 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0818 15:07:32.117372 21769 net.cpp:100] Creating Layer L2_b1_zeros\nI0818 15:07:32.117383 21769 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0818 15:07:32.119524 21769 net.cpp:150] Setting up L2_b1_zeros\nI0818 15:07:32.119542 21769 net.cpp:157] Top shape: 100 16 16 16 (409600)\nI0818 15:07:32.119547 21769 net.cpp:165] Memory required for data: 1356186800\nI0818 15:07:32.119554 21769 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0818 15:07:32.119565 21769 net.cpp:100] Creating Layer L2_b1_concat0\nI0818 15:07:32.119575 21769 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0818 15:07:32.119582 21769 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0818 15:07:32.119590 21769 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0818 15:07:32.119637 21769 net.cpp:150] Setting up L2_b1_concat0\nI0818 15:07:32.119652 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.119657 21769 net.cpp:165] Memory required for data: 1359463600\nI0818 15:07:32.119670 21769 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:32.119679 21769 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:32.119691 21769 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0818 15:07:32.119702 21769 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 15:07:32.119714 21769 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 15:07:32.119770 21769 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:32.119784 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.119791 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.119796 21769 net.cpp:165] Memory required for data: 1366017200\nI0818 15:07:32.119801 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0818 15:07:32.119813 21769 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0818 15:07:32.119819 21769 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 15:07:32.119834 21769 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0818 15:07:32.120350 21769 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0818 15:07:32.120364 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.120370 21769 net.cpp:165] Memory required for data: 1369294000\nI0818 15:07:32.120379 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0818 15:07:32.120391 21769 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0818 15:07:32.120398 21769 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0818 15:07:32.120406 21769 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0818 15:07:32.120692 21769 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0818 15:07:32.120709 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.120714 21769 net.cpp:165] Memory required for data: 1372570800\nI0818 15:07:32.120725 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 15:07:32.120735 21769 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0818 15:07:32.120741 21769 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0818 15:07:32.120749 21769 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0818 15:07:32.120811 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 15:07:32.120976 21769 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0818 15:07:32.120990 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.120995 21769 net.cpp:165] Memory required for data: 1375847600\nI0818 15:07:32.121004 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0818 15:07:32.121012 21769 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0818 15:07:32.121018 21769 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0818 15:07:32.121028 21769 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0818 15:07:32.121038 21769 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0818 15:07:32.121047 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.121050 21769 net.cpp:165] Memory required for data: 1379124400\nI0818 15:07:32.121055 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0818 15:07:32.121067 21769 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0818 15:07:32.121073 21769 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0818 15:07:32.121083 21769 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0818 15:07:32.121577 21769 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0818 15:07:32.121592 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.121596 21769 net.cpp:165] Memory required for data: 1382401200\nI0818 15:07:32.121604 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0818 15:07:32.121614 21769 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0818 15:07:32.121620 21769 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0818 15:07:32.121631 21769 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0818 15:07:32.121913 21769 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0818 15:07:32.121927 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.121933 21769 net.cpp:165] Memory required for data: 1385678000\nI0818 15:07:32.121950 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 15:07:32.121966 21769 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0818 15:07:32.121973 21769 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0818 15:07:32.121981 21769 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0818 15:07:32.122042 21769 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 15:07:32.122205 21769 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0818 15:07:32.122217 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.122222 21769 net.cpp:165] Memory required for data: 1388954800\nI0818 15:07:32.122231 21769 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0818 15:07:32.122243 21769 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0818 15:07:32.122251 21769 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0818 15:07:32.122258 21769 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 15:07:32.122265 21769 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0818 15:07:32.122298 21769 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0818 15:07:32.122306 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.122311 21769 net.cpp:165] Memory required for data: 1392231600\nI0818 15:07:32.122316 21769 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0818 15:07:32.122324 21769 net.cpp:100] Creating Layer L2_b2_relu\nI0818 15:07:32.122329 21769 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0818 15:07:32.122340 21769 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0818 15:07:32.122349 21769 net.cpp:150] Setting up L2_b2_relu\nI0818 15:07:32.122356 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.122361 21769 net.cpp:165] Memory required for data: 1395508400\nI0818 15:07:32.122366 21769 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:32.122373 21769 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:32.122378 21769 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0818 15:07:32.122385 21769 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 15:07:32.122395 21769 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 15:07:32.122447 21769 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:32.122458 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.122465 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.122469 21769 net.cpp:165] Memory required for data: 1402062000\nI0818 15:07:32.122474 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0818 15:07:32.122485 21769 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0818 15:07:32.122491 21769 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 15:07:32.122503 21769 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0818 15:07:32.123008 21769 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0818 15:07:32.123023 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.123028 21769 net.cpp:165] Memory required for data: 1405338800\nI0818 15:07:32.123036 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0818 15:07:32.123049 21769 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0818 15:07:32.123055 21769 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0818 15:07:32.123064 21769 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0818 15:07:32.123332 21769 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0818 15:07:32.123347 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.123352 21769 net.cpp:165] Memory required for data: 1408615600\nI0818 15:07:32.123361 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 15:07:32.123373 21769 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0818 15:07:32.123380 21769 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0818 15:07:32.123389 21769 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0818 15:07:32.123456 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 15:07:32.123612 21769 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0818 15:07:32.123626 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.123631 21769 net.cpp:165] Memory required for data: 1411892400\nI0818 15:07:32.123639 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0818 15:07:32.123653 21769 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0818 15:07:32.123661 21769 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0818 15:07:32.123667 21769 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0818 15:07:32.123682 21769 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0818 15:07:32.123697 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.123700 21769 net.cpp:165] Memory required for data: 1415169200\nI0818 15:07:32.123705 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0818 15:07:32.123716 21769 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0818 15:07:32.123723 21769 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0818 15:07:32.123735 21769 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0818 15:07:32.124229 21769 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0818 15:07:32.124244 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.124249 21769 net.cpp:165] Memory required for data: 1418446000\nI0818 15:07:32.124258 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0818 15:07:32.124267 21769 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0818 15:07:32.124274 21769 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0818 15:07:32.124285 21769 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0818 15:07:32.124560 21769 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0818 15:07:32.124573 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.124578 21769 net.cpp:165] Memory required for data: 1421722800\nI0818 15:07:32.124589 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 15:07:32.124600 21769 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0818 15:07:32.124608 21769 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0818 15:07:32.124615 21769 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0818 15:07:32.124675 21769 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 15:07:32.124842 21769 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0818 15:07:32.124856 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.124861 21769 net.cpp:165] Memory required for data: 1424999600\nI0818 15:07:32.124871 21769 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0818 15:07:32.124882 21769 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0818 15:07:32.124889 21769 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0818 15:07:32.124897 21769 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 15:07:32.124903 21769 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0818 15:07:32.124934 21769 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0818 15:07:32.124944 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.124949 21769 net.cpp:165] Memory required for data: 1428276400\nI0818 15:07:32.124954 21769 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0818 15:07:32.124963 21769 net.cpp:100] Creating Layer L2_b3_relu\nI0818 15:07:32.124969 21769 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0818 15:07:32.124979 21769 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0818 15:07:32.124989 21769 net.cpp:150] Setting up L2_b3_relu\nI0818 15:07:32.124995 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.125000 21769 net.cpp:165] Memory required for data: 1431553200\nI0818 15:07:32.125005 21769 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:32.125011 21769 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:32.125017 21769 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0818 15:07:32.125025 21769 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 15:07:32.125041 21769 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 15:07:32.125093 21769 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:32.125105 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.125113 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.125118 21769 net.cpp:165] Memory required for data: 1438106800\nI0818 15:07:32.125123 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0818 15:07:32.125133 21769 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0818 15:07:32.125139 21769 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 15:07:32.125152 21769 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0818 15:07:32.125653 21769 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0818 15:07:32.125668 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.125672 21769 net.cpp:165] Memory required for data: 1441383600\nI0818 15:07:32.125681 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0818 15:07:32.125696 21769 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0818 15:07:32.125704 21769 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0818 15:07:32.125715 21769 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0818 15:07:32.125994 21769 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0818 15:07:32.126008 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.126013 21769 net.cpp:165] Memory required for data: 1444660400\nI0818 15:07:32.126024 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 15:07:32.126035 21769 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0818 15:07:32.126041 21769 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0818 15:07:32.126050 21769 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0818 15:07:32.126111 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 15:07:32.126272 21769 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0818 15:07:32.126286 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.126291 21769 net.cpp:165] Memory required for data: 1447937200\nI0818 15:07:32.126301 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0818 15:07:32.126312 21769 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0818 15:07:32.126319 21769 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0818 15:07:32.126327 21769 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0818 15:07:32.126336 21769 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0818 15:07:32.126343 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.126348 21769 net.cpp:165] Memory required for data: 1451214000\nI0818 15:07:32.126353 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0818 15:07:32.126368 21769 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0818 15:07:32.126374 21769 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0818 15:07:32.126384 21769 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0818 15:07:32.126884 21769 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0818 15:07:32.126899 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.126904 21769 net.cpp:165] Memory required for data: 1454490800\nI0818 15:07:32.126914 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0818 15:07:32.126922 21769 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0818 15:07:32.126929 21769 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0818 15:07:32.126940 21769 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0818 15:07:32.127207 21769 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0818 15:07:32.127220 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.127225 21769 net.cpp:165] Memory required for data: 1457767600\nI0818 15:07:32.127235 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 15:07:32.127249 21769 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0818 15:07:32.127255 21769 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0818 15:07:32.127264 21769 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0818 15:07:32.127331 21769 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 15:07:32.127496 21769 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0818 15:07:32.127508 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.127513 21769 net.cpp:165] Memory required for data: 1461044400\nI0818 15:07:32.127522 21769 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0818 15:07:32.127534 21769 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0818 15:07:32.127542 21769 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0818 15:07:32.127548 21769 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 15:07:32.127557 21769 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0818 15:07:32.127583 21769 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0818 15:07:32.127596 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.127601 21769 net.cpp:165] Memory required for data: 1464321200\nI0818 15:07:32.127607 21769 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0818 15:07:32.127614 21769 net.cpp:100] Creating Layer L2_b4_relu\nI0818 15:07:32.127619 21769 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0818 15:07:32.127626 21769 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0818 15:07:32.127635 21769 net.cpp:150] Setting up L2_b4_relu\nI0818 15:07:32.127642 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.127647 21769 net.cpp:165] Memory required for data: 1467598000\nI0818 15:07:32.127651 21769 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:32.127661 21769 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:32.127667 21769 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0818 15:07:32.127676 21769 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 15:07:32.127691 21769 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 15:07:32.127743 21769 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:32.127755 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.127763 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.127766 21769 net.cpp:165] Memory required for data: 1474151600\nI0818 15:07:32.127771 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0818 15:07:32.127784 21769 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0818 15:07:32.127789 21769 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 15:07:32.127801 21769 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0818 15:07:32.128304 21769 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0818 15:07:32.128317 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.128322 21769 net.cpp:165] Memory required for data: 1477428400\nI0818 15:07:32.128331 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0818 15:07:32.128341 21769 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0818 15:07:32.128347 21769 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0818 15:07:32.128361 21769 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0818 15:07:32.128630 21769 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0818 15:07:32.128643 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.128649 21769 net.cpp:165] Memory required for data: 1480705200\nI0818 15:07:32.128659 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 15:07:32.128671 21769 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0818 15:07:32.128679 21769 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0818 15:07:32.128691 21769 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0818 15:07:32.128753 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 15:07:32.128916 21769 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0818 15:07:32.128929 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.128934 21769 net.cpp:165] Memory required for data: 1483982000\nI0818 15:07:32.128952 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0818 15:07:32.128963 21769 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0818 15:07:32.128970 21769 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0818 15:07:32.128978 21769 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0818 15:07:32.128988 21769 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0818 15:07:32.128995 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.129000 21769 net.cpp:165] Memory required for data: 1487258800\nI0818 15:07:32.129004 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0818 15:07:32.129019 21769 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0818 15:07:32.129025 21769 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0818 15:07:32.129040 21769 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0818 15:07:32.129530 21769 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0818 15:07:32.129545 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.129550 21769 net.cpp:165] Memory required for data: 1490535600\nI0818 15:07:32.129559 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0818 15:07:32.129567 21769 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0818 15:07:32.129575 21769 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0818 15:07:32.129582 21769 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0818 15:07:32.129859 21769 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0818 15:07:32.129873 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.129878 21769 net.cpp:165] Memory required for data: 1493812400\nI0818 15:07:32.129889 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 15:07:32.129897 21769 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0818 15:07:32.129904 21769 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0818 15:07:32.129914 21769 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0818 15:07:32.129976 21769 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 15:07:32.130133 21769 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0818 15:07:32.130147 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.130152 21769 net.cpp:165] Memory required for data: 1497089200\nI0818 15:07:32.130162 21769 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0818 15:07:32.130170 21769 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0818 15:07:32.130177 21769 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0818 15:07:32.130183 21769 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 15:07:32.130197 21769 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0818 15:07:32.130225 21769 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0818 15:07:32.130239 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.130244 21769 net.cpp:165] Memory required for data: 1500366000\nI0818 15:07:32.130249 21769 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0818 15:07:32.130256 21769 net.cpp:100] Creating Layer L2_b5_relu\nI0818 15:07:32.130262 21769 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0818 15:07:32.130270 21769 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0818 15:07:32.130278 21769 net.cpp:150] Setting up L2_b5_relu\nI0818 15:07:32.130286 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.130290 21769 net.cpp:165] Memory required for data: 1503642800\nI0818 15:07:32.130295 21769 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:32.130306 21769 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:32.130311 21769 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0818 15:07:32.130318 21769 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 15:07:32.130328 21769 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 15:07:32.130379 21769 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:32.130391 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.130405 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.130410 21769 net.cpp:165] Memory required for data: 1510196400\nI0818 15:07:32.130416 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0818 15:07:32.130427 21769 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0818 15:07:32.130434 21769 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 15:07:32.130445 21769 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0818 15:07:32.130954 21769 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0818 15:07:32.130970 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.130975 21769 net.cpp:165] Memory required for data: 1513473200\nI0818 15:07:32.130983 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0818 15:07:32.130992 21769 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0818 15:07:32.130998 21769 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0818 15:07:32.131009 21769 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0818 15:07:32.131284 21769 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0818 15:07:32.131299 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.131304 21769 net.cpp:165] Memory required for data: 1516750000\nI0818 15:07:32.131314 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 15:07:32.131326 21769 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0818 15:07:32.131333 21769 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0818 15:07:32.131340 21769 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0818 15:07:32.131399 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 15:07:32.131557 21769 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0818 15:07:32.131572 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.131577 21769 net.cpp:165] Memory required for data: 1520026800\nI0818 15:07:32.131585 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0818 15:07:32.131593 21769 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0818 15:07:32.131599 21769 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0818 15:07:32.131609 21769 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0818 15:07:32.131619 21769 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0818 15:07:32.131626 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.131631 21769 net.cpp:165] Memory required for data: 1523303600\nI0818 15:07:32.131635 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0818 15:07:32.131650 21769 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0818 15:07:32.131656 21769 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0818 15:07:32.131667 21769 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0818 15:07:32.132169 21769 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0818 15:07:32.132184 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.132189 21769 net.cpp:165] Memory required for data: 1526580400\nI0818 15:07:32.132199 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0818 15:07:32.132207 21769 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0818 15:07:32.132215 21769 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0818 15:07:32.132222 21769 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0818 15:07:32.132498 21769 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0818 15:07:32.132513 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.132517 21769 net.cpp:165] Memory required for data: 1529857200\nI0818 15:07:32.132527 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 15:07:32.132536 21769 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0818 15:07:32.132542 21769 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0818 15:07:32.132552 21769 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0818 15:07:32.132614 21769 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 15:07:32.132786 21769 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0818 15:07:32.132799 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.132804 21769 net.cpp:165] Memory required for data: 1533134000\nI0818 15:07:32.132820 21769 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0818 15:07:32.132830 21769 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0818 15:07:32.132836 21769 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0818 15:07:32.132843 21769 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 15:07:32.132854 21769 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0818 15:07:32.132884 21769 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0818 15:07:32.132895 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.132899 21769 net.cpp:165] Memory required for data: 1536410800\nI0818 15:07:32.132905 21769 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0818 15:07:32.132916 21769 net.cpp:100] Creating Layer L2_b6_relu\nI0818 15:07:32.132923 21769 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0818 15:07:32.132930 21769 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0818 15:07:32.132939 21769 net.cpp:150] Setting up L2_b6_relu\nI0818 15:07:32.132946 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.132951 21769 net.cpp:165] Memory required for data: 1539687600\nI0818 15:07:32.132956 21769 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:32.132966 21769 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:32.132972 21769 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0818 15:07:32.132979 21769 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 15:07:32.132989 21769 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 15:07:32.133038 21769 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:32.133052 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.133059 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.133064 21769 net.cpp:165] Memory required for data: 1546241200\nI0818 15:07:32.133070 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0818 15:07:32.133081 21769 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0818 15:07:32.133088 21769 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 15:07:32.133096 21769 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0818 15:07:32.133599 21769 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0818 15:07:32.133612 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.133617 21769 net.cpp:165] Memory required for data: 1549518000\nI0818 15:07:32.133626 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0818 15:07:32.133638 21769 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0818 15:07:32.133646 21769 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0818 15:07:32.133653 21769 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0818 15:07:32.133931 21769 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0818 15:07:32.133944 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.133949 21769 net.cpp:165] Memory required for data: 1552794800\nI0818 15:07:32.133960 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 15:07:32.133968 21769 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0818 15:07:32.133975 21769 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0818 15:07:32.133985 21769 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0818 15:07:32.134047 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 15:07:32.134208 21769 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0818 15:07:32.134222 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.134227 21769 net.cpp:165] Memory required for data: 1556071600\nI0818 15:07:32.134237 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0818 15:07:32.134243 21769 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0818 15:07:32.134250 21769 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0818 15:07:32.134260 21769 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0818 15:07:32.134270 21769 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0818 15:07:32.134284 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.134289 21769 net.cpp:165] Memory required for data: 1559348400\nI0818 15:07:32.134294 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0818 15:07:32.134308 21769 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0818 15:07:32.134315 21769 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0818 15:07:32.134322 21769 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0818 15:07:32.134835 21769 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0818 15:07:32.134850 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.134855 21769 net.cpp:165] Memory required for data: 1562625200\nI0818 15:07:32.134865 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0818 15:07:32.134877 21769 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0818 15:07:32.134884 21769 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0818 15:07:32.134892 21769 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0818 15:07:32.135162 21769 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0818 15:07:32.135175 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.135180 21769 net.cpp:165] Memory required for data: 1565902000\nI0818 15:07:32.135191 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 15:07:32.135229 21769 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0818 15:07:32.135239 21769 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0818 15:07:32.135247 21769 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0818 15:07:32.135310 21769 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 15:07:32.135473 21769 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0818 15:07:32.135486 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.135493 21769 net.cpp:165] Memory required for data: 1569178800\nI0818 15:07:32.135501 21769 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0818 15:07:32.135510 21769 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0818 15:07:32.135516 21769 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0818 15:07:32.135524 21769 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 15:07:32.135531 21769 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0818 15:07:32.135558 21769 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0818 15:07:32.135567 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.135572 21769 net.cpp:165] Memory required for data: 1572455600\nI0818 15:07:32.135577 21769 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0818 15:07:32.135584 21769 net.cpp:100] Creating Layer L2_b7_relu\nI0818 15:07:32.135591 21769 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0818 15:07:32.135601 21769 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0818 15:07:32.135610 21769 net.cpp:150] Setting up L2_b7_relu\nI0818 15:07:32.135617 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.135622 21769 net.cpp:165] Memory required for data: 1575732400\nI0818 15:07:32.135627 21769 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:32.135637 21769 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:32.135643 21769 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0818 15:07:32.135650 21769 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 15:07:32.135659 21769 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 15:07:32.135717 21769 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:32.135730 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.135736 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.135741 21769 net.cpp:165] Memory required for data: 1582286000\nI0818 15:07:32.135746 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0818 15:07:32.135761 21769 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0818 15:07:32.135773 21769 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 15:07:32.135783 21769 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0818 15:07:32.136276 21769 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0818 15:07:32.136289 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.136296 21769 net.cpp:165] Memory required for data: 1585562800\nI0818 15:07:32.136303 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0818 15:07:32.136317 21769 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0818 15:07:32.136323 21769 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0818 15:07:32.136332 21769 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0818 15:07:32.136605 21769 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0818 15:07:32.136620 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.136626 21769 net.cpp:165] Memory required for data: 1588839600\nI0818 15:07:32.136636 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 15:07:32.136646 21769 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0818 15:07:32.136651 21769 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0818 15:07:32.136659 21769 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0818 15:07:32.136729 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 15:07:32.136891 21769 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0818 15:07:32.136905 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.136910 21769 net.cpp:165] Memory required for data: 1592116400\nI0818 15:07:32.136919 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0818 15:07:32.136927 21769 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0818 15:07:32.136934 21769 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0818 15:07:32.136943 21769 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0818 15:07:32.136953 21769 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0818 15:07:32.136961 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.136965 21769 net.cpp:165] Memory required for data: 1595393200\nI0818 15:07:32.136970 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0818 15:07:32.136984 21769 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0818 15:07:32.136991 21769 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0818 15:07:32.136999 21769 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0818 15:07:32.137514 21769 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0818 15:07:32.137529 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.137534 21769 net.cpp:165] Memory required for data: 1598670000\nI0818 15:07:32.137544 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0818 15:07:32.137557 21769 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0818 15:07:32.137563 21769 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0818 15:07:32.137573 21769 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0818 15:07:32.137851 21769 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0818 15:07:32.137866 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.137871 21769 net.cpp:165] Memory required for data: 1601946800\nI0818 15:07:32.137881 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 15:07:32.137893 21769 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0818 15:07:32.137900 21769 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0818 15:07:32.137908 21769 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0818 15:07:32.137967 21769 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 15:07:32.138134 21769 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0818 15:07:32.138146 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.138151 21769 net.cpp:165] Memory required for data: 1605223600\nI0818 15:07:32.138160 21769 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0818 15:07:32.138172 21769 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0818 15:07:32.138180 21769 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0818 15:07:32.138186 21769 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 15:07:32.138203 21769 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0818 15:07:32.138233 21769 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0818 15:07:32.138242 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.138247 21769 net.cpp:165] Memory required for data: 1608500400\nI0818 15:07:32.138252 21769 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0818 15:07:32.138259 21769 net.cpp:100] Creating Layer L2_b8_relu\nI0818 15:07:32.138265 21769 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0818 15:07:32.138275 21769 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0818 15:07:32.138285 21769 net.cpp:150] Setting up L2_b8_relu\nI0818 15:07:32.138293 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.138296 21769 net.cpp:165] Memory required for data: 1611777200\nI0818 15:07:32.138301 21769 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:32.138309 21769 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:32.138314 21769 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0818 15:07:32.138321 21769 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 15:07:32.138329 21769 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 15:07:32.138381 21769 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:32.138392 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.138398 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.138403 21769 net.cpp:165] Memory required for data: 1618330800\nI0818 15:07:32.138408 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0818 15:07:32.138419 21769 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0818 15:07:32.138427 21769 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 15:07:32.138438 21769 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0818 15:07:32.138948 21769 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0818 15:07:32.138963 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.138968 21769 net.cpp:165] Memory required for data: 1621607600\nI0818 15:07:32.138978 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0818 15:07:32.138989 21769 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0818 15:07:32.138996 21769 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0818 15:07:32.139004 21769 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0818 15:07:32.139276 21769 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0818 15:07:32.139293 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.139298 21769 net.cpp:165] Memory required for data: 1624884400\nI0818 15:07:32.139309 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 15:07:32.139318 21769 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0818 15:07:32.139324 21769 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0818 15:07:32.139331 21769 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0818 15:07:32.139392 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 15:07:32.139552 21769 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0818 15:07:32.139566 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.139570 21769 net.cpp:165] Memory required for data: 1628161200\nI0818 15:07:32.139580 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0818 15:07:32.139587 21769 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0818 15:07:32.139593 21769 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0818 15:07:32.139603 21769 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0818 15:07:32.139614 21769 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0818 15:07:32.139621 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.139626 21769 net.cpp:165] Memory required for data: 1631438000\nI0818 15:07:32.139631 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0818 15:07:32.139642 21769 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0818 15:07:32.139654 21769 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0818 15:07:32.139667 21769 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0818 15:07:32.140171 21769 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0818 15:07:32.140185 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.140192 21769 net.cpp:165] Memory required for data: 1634714800\nI0818 15:07:32.140200 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0818 15:07:32.140209 21769 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0818 15:07:32.140216 21769 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0818 15:07:32.140228 21769 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0818 15:07:32.140496 21769 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0818 15:07:32.140509 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.140514 21769 net.cpp:165] Memory required for data: 1637991600\nI0818 15:07:32.140525 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 15:07:32.140537 21769 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0818 15:07:32.140544 21769 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0818 15:07:32.140552 21769 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0818 15:07:32.140611 21769 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 15:07:32.140784 21769 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0818 15:07:32.140799 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.140803 21769 net.cpp:165] Memory required for data: 1641268400\nI0818 15:07:32.140812 21769 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0818 15:07:32.140825 21769 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0818 15:07:32.140831 21769 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0818 15:07:32.140838 21769 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 15:07:32.140848 21769 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0818 15:07:32.140877 21769 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0818 15:07:32.140887 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.140892 21769 net.cpp:165] Memory required for data: 1644545200\nI0818 15:07:32.140897 21769 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0818 15:07:32.140904 21769 net.cpp:100] Creating Layer L2_b9_relu\nI0818 15:07:32.140910 21769 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0818 15:07:32.140920 21769 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0818 15:07:32.140930 21769 net.cpp:150] Setting up L2_b9_relu\nI0818 15:07:32.140938 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.140941 21769 net.cpp:165] Memory required for data: 1647822000\nI0818 15:07:32.140946 21769 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:32.140954 21769 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:32.140959 21769 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0818 15:07:32.140966 21769 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 15:07:32.140976 21769 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 15:07:32.141027 21769 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:32.141039 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.141047 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.141052 21769 net.cpp:165] Memory required for data: 1654375600\nI0818 15:07:32.141057 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_conv\nI0818 15:07:32.141067 21769 net.cpp:100] Creating Layer L2_b10_cbr1_conv\nI0818 15:07:32.141073 21769 net.cpp:434] L2_b10_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 15:07:32.141085 21769 net.cpp:408] L2_b10_cbr1_conv -> L2_b10_cbr1_conv_top\nI0818 15:07:32.141578 21769 net.cpp:150] Setting up L2_b10_cbr1_conv\nI0818 15:07:32.141593 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.141604 21769 net.cpp:165] Memory required for data: 1657652400\nI0818 15:07:32.141614 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_bn\nI0818 15:07:32.141626 21769 net.cpp:100] Creating Layer L2_b10_cbr1_bn\nI0818 15:07:32.141633 21769 net.cpp:434] L2_b10_cbr1_bn <- L2_b10_cbr1_conv_top\nI0818 15:07:32.141641 21769 net.cpp:408] L2_b10_cbr1_bn -> L2_b10_cbr1_bn_top\nI0818 15:07:32.141926 21769 net.cpp:150] Setting up L2_b10_cbr1_bn\nI0818 15:07:32.141940 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.141945 21769 net.cpp:165] Memory required for data: 1660929200\nI0818 15:07:32.141955 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0818 15:07:32.141968 21769 net.cpp:100] Creating Layer L2_b10_cbr1_scale\nI0818 15:07:32.141974 21769 net.cpp:434] L2_b10_cbr1_scale <- L2_b10_cbr1_bn_top\nI0818 15:07:32.141983 21769 net.cpp:395] L2_b10_cbr1_scale -> L2_b10_cbr1_bn_top (in-place)\nI0818 15:07:32.142041 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_scale\nI0818 15:07:32.142201 21769 net.cpp:150] Setting up L2_b10_cbr1_scale\nI0818 15:07:32.142215 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.142220 21769 net.cpp:165] Memory required for data: 1664206000\nI0818 15:07:32.142230 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr1_relu\nI0818 15:07:32.142241 21769 net.cpp:100] Creating Layer L2_b10_cbr1_relu\nI0818 15:07:32.142247 21769 net.cpp:434] L2_b10_cbr1_relu <- L2_b10_cbr1_bn_top\nI0818 15:07:32.142258 21769 net.cpp:395] L2_b10_cbr1_relu -> L2_b10_cbr1_bn_top (in-place)\nI0818 15:07:32.142268 21769 net.cpp:150] Setting up L2_b10_cbr1_relu\nI0818 15:07:32.142276 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.142280 21769 net.cpp:165] Memory required for data: 1667482800\nI0818 15:07:32.142285 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr2_conv\nI0818 15:07:32.142295 21769 net.cpp:100] Creating Layer L2_b10_cbr2_conv\nI0818 15:07:32.142302 21769 net.cpp:434] L2_b10_cbr2_conv <- L2_b10_cbr1_bn_top\nI0818 15:07:32.142313 21769 net.cpp:408] L2_b10_cbr2_conv -> L2_b10_cbr2_conv_top\nI0818 15:07:32.142814 21769 net.cpp:150] Setting up L2_b10_cbr2_conv\nI0818 15:07:32.142829 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.142834 21769 net.cpp:165] Memory required for data: 1670759600\nI0818 15:07:32.142843 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr2_bn\nI0818 15:07:32.142853 21769 net.cpp:100] Creating Layer L2_b10_cbr2_bn\nI0818 15:07:32.142858 21769 net.cpp:434] L2_b10_cbr2_bn <- L2_b10_cbr2_conv_top\nI0818 15:07:32.142873 21769 net.cpp:408] L2_b10_cbr2_bn -> L2_b10_cbr2_bn_top\nI0818 15:07:32.143153 21769 net.cpp:150] Setting up L2_b10_cbr2_bn\nI0818 15:07:32.143167 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.143172 21769 net.cpp:165] Memory required for data: 1674036400\nI0818 15:07:32.143182 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0818 15:07:32.143194 21769 net.cpp:100] Creating Layer L2_b10_cbr2_scale\nI0818 15:07:32.143200 21769 net.cpp:434] L2_b10_cbr2_scale <- L2_b10_cbr2_bn_top\nI0818 15:07:32.143208 21769 net.cpp:395] L2_b10_cbr2_scale -> L2_b10_cbr2_bn_top (in-place)\nI0818 15:07:32.143270 21769 layer_factory.hpp:77] Creating layer L2_b10_cbr2_scale\nI0818 15:07:32.143430 21769 net.cpp:150] Setting up L2_b10_cbr2_scale\nI0818 15:07:32.143443 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.143448 21769 net.cpp:165] Memory required for data: 1677313200\nI0818 15:07:32.143457 21769 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise\nI0818 15:07:32.143470 21769 net.cpp:100] Creating Layer L2_b10_sum_eltwise\nI0818 15:07:32.143476 21769 net.cpp:434] L2_b10_sum_eltwise <- L2_b10_cbr2_bn_top\nI0818 15:07:32.143483 21769 net.cpp:434] L2_b10_sum_eltwise <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 15:07:32.143491 21769 net.cpp:408] L2_b10_sum_eltwise -> L2_b10_sum_eltwise_top\nI0818 15:07:32.143525 21769 net.cpp:150] Setting up L2_b10_sum_eltwise\nI0818 15:07:32.143534 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.143539 21769 net.cpp:165] Memory required for data: 1680590000\nI0818 15:07:32.143551 21769 layer_factory.hpp:77] Creating layer L2_b10_relu\nI0818 15:07:32.143560 21769 net.cpp:100] Creating Layer L2_b10_relu\nI0818 15:07:32.143566 21769 net.cpp:434] L2_b10_relu <- L2_b10_sum_eltwise_top\nI0818 15:07:32.143576 21769 net.cpp:395] L2_b10_relu -> L2_b10_sum_eltwise_top (in-place)\nI0818 15:07:32.143586 21769 net.cpp:150] Setting up L2_b10_relu\nI0818 15:07:32.143594 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.143599 21769 net.cpp:165] Memory required for data: 1683866800\nI0818 15:07:32.143602 21769 layer_factory.hpp:77] Creating layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0818 15:07:32.143610 21769 net.cpp:100] Creating Layer L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0818 15:07:32.143615 21769 net.cpp:434] L2_b10_sum_eltwise_top_L2_b10_relu_0_split <- L2_b10_sum_eltwise_top\nI0818 15:07:32.143622 21769 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0818 15:07:32.143631 21769 net.cpp:408] L2_b10_sum_eltwise_top_L2_b10_relu_0_split -> L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0818 15:07:32.143690 21769 net.cpp:150] Setting up L2_b10_sum_eltwise_top_L2_b10_relu_0_split\nI0818 15:07:32.143703 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.143710 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.143715 21769 net.cpp:165] Memory required for data: 1690420400\nI0818 15:07:32.143720 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_conv\nI0818 15:07:32.143731 21769 net.cpp:100] Creating Layer L2_b11_cbr1_conv\nI0818 15:07:32.143738 21769 net.cpp:434] L2_b11_cbr1_conv <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_0\nI0818 15:07:32.143750 21769 net.cpp:408] L2_b11_cbr1_conv -> L2_b11_cbr1_conv_top\nI0818 15:07:32.144253 21769 net.cpp:150] Setting up L2_b11_cbr1_conv\nI0818 15:07:32.144266 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.144271 21769 net.cpp:165] Memory required for data: 1693697200\nI0818 15:07:32.144280 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_bn\nI0818 15:07:32.144290 21769 net.cpp:100] Creating Layer L2_b11_cbr1_bn\nI0818 15:07:32.144296 21769 net.cpp:434] L2_b11_cbr1_bn <- L2_b11_cbr1_conv_top\nI0818 15:07:32.144309 21769 net.cpp:408] L2_b11_cbr1_bn -> L2_b11_cbr1_bn_top\nI0818 15:07:32.144582 21769 net.cpp:150] Setting up L2_b11_cbr1_bn\nI0818 15:07:32.144596 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.144601 21769 net.cpp:165] Memory required for data: 1696974000\nI0818 15:07:32.144611 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0818 15:07:32.144623 21769 net.cpp:100] Creating Layer L2_b11_cbr1_scale\nI0818 15:07:32.144629 21769 net.cpp:434] L2_b11_cbr1_scale <- L2_b11_cbr1_bn_top\nI0818 15:07:32.144637 21769 net.cpp:395] L2_b11_cbr1_scale -> L2_b11_cbr1_bn_top (in-place)\nI0818 15:07:32.144704 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_scale\nI0818 15:07:32.144870 21769 net.cpp:150] Setting up L2_b11_cbr1_scale\nI0818 15:07:32.144884 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.144889 21769 net.cpp:165] Memory required for data: 1700250800\nI0818 15:07:32.144898 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr1_relu\nI0818 15:07:32.144909 21769 net.cpp:100] Creating Layer L2_b11_cbr1_relu\nI0818 15:07:32.144917 21769 net.cpp:434] L2_b11_cbr1_relu <- L2_b11_cbr1_bn_top\nI0818 15:07:32.144924 21769 net.cpp:395] L2_b11_cbr1_relu -> L2_b11_cbr1_bn_top (in-place)\nI0818 15:07:32.144933 21769 net.cpp:150] Setting up L2_b11_cbr1_relu\nI0818 15:07:32.144940 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.144945 21769 net.cpp:165] Memory required for data: 1703527600\nI0818 15:07:32.144950 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr2_conv\nI0818 15:07:32.144964 21769 net.cpp:100] Creating Layer L2_b11_cbr2_conv\nI0818 15:07:32.144970 21769 net.cpp:434] L2_b11_cbr2_conv <- L2_b11_cbr1_bn_top\nI0818 15:07:32.144981 21769 net.cpp:408] L2_b11_cbr2_conv -> L2_b11_cbr2_conv_top\nI0818 15:07:32.145478 21769 net.cpp:150] Setting up L2_b11_cbr2_conv\nI0818 15:07:32.145499 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.145504 21769 net.cpp:165] Memory required for data: 1706804400\nI0818 15:07:32.145514 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr2_bn\nI0818 15:07:32.145522 21769 net.cpp:100] Creating Layer L2_b11_cbr2_bn\nI0818 15:07:32.145529 21769 net.cpp:434] L2_b11_cbr2_bn <- L2_b11_cbr2_conv_top\nI0818 15:07:32.145540 21769 net.cpp:408] L2_b11_cbr2_bn -> L2_b11_cbr2_bn_top\nI0818 15:07:32.145830 21769 net.cpp:150] Setting up L2_b11_cbr2_bn\nI0818 15:07:32.145844 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.145849 21769 net.cpp:165] Memory required for data: 1710081200\nI0818 15:07:32.145860 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0818 15:07:32.145872 21769 net.cpp:100] Creating Layer L2_b11_cbr2_scale\nI0818 15:07:32.145879 21769 net.cpp:434] L2_b11_cbr2_scale <- L2_b11_cbr2_bn_top\nI0818 15:07:32.145887 21769 net.cpp:395] L2_b11_cbr2_scale -> L2_b11_cbr2_bn_top (in-place)\nI0818 15:07:32.145949 21769 layer_factory.hpp:77] Creating layer L2_b11_cbr2_scale\nI0818 15:07:32.146111 21769 net.cpp:150] Setting up L2_b11_cbr2_scale\nI0818 15:07:32.146124 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.146129 21769 net.cpp:165] Memory required for data: 1713358000\nI0818 15:07:32.146138 21769 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise\nI0818 15:07:32.146150 21769 net.cpp:100] Creating Layer L2_b11_sum_eltwise\nI0818 15:07:32.146157 21769 net.cpp:434] L2_b11_sum_eltwise <- L2_b11_cbr2_bn_top\nI0818 15:07:32.146164 21769 net.cpp:434] L2_b11_sum_eltwise <- L2_b10_sum_eltwise_top_L2_b10_relu_0_split_1\nI0818 15:07:32.146173 21769 net.cpp:408] L2_b11_sum_eltwise -> L2_b11_sum_eltwise_top\nI0818 15:07:32.146203 21769 net.cpp:150] Setting up L2_b11_sum_eltwise\nI0818 15:07:32.146212 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.146217 21769 net.cpp:165] Memory required for data: 1716634800\nI0818 15:07:32.146224 21769 layer_factory.hpp:77] Creating layer L2_b11_relu\nI0818 15:07:32.146230 21769 net.cpp:100] Creating Layer L2_b11_relu\nI0818 15:07:32.146236 21769 net.cpp:434] L2_b11_relu <- L2_b11_sum_eltwise_top\nI0818 15:07:32.146244 21769 net.cpp:395] L2_b11_relu -> L2_b11_sum_eltwise_top (in-place)\nI0818 15:07:32.146252 21769 net.cpp:150] Setting up L2_b11_relu\nI0818 15:07:32.146258 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.146263 21769 net.cpp:165] Memory required for data: 1719911600\nI0818 15:07:32.146268 21769 layer_factory.hpp:77] Creating layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0818 15:07:32.146277 21769 net.cpp:100] Creating Layer L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0818 15:07:32.146283 21769 net.cpp:434] L2_b11_sum_eltwise_top_L2_b11_relu_0_split <- L2_b11_sum_eltwise_top\nI0818 15:07:32.146291 21769 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0818 15:07:32.146301 21769 net.cpp:408] L2_b11_sum_eltwise_top_L2_b11_relu_0_split -> L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0818 15:07:32.146353 21769 net.cpp:150] Setting up L2_b11_sum_eltwise_top_L2_b11_relu_0_split\nI0818 15:07:32.146364 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.146371 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.146376 21769 net.cpp:165] Memory required for data: 1726465200\nI0818 15:07:32.146381 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_conv\nI0818 15:07:32.146392 21769 net.cpp:100] Creating Layer L2_b12_cbr1_conv\nI0818 15:07:32.146399 21769 net.cpp:434] L2_b12_cbr1_conv <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_0\nI0818 15:07:32.146410 21769 net.cpp:408] L2_b12_cbr1_conv -> L2_b12_cbr1_conv_top\nI0818 15:07:32.147927 21769 net.cpp:150] Setting up L2_b12_cbr1_conv\nI0818 15:07:32.147944 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.147950 21769 net.cpp:165] Memory required for data: 1729742000\nI0818 15:07:32.147959 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_bn\nI0818 15:07:32.147982 21769 net.cpp:100] Creating Layer L2_b12_cbr1_bn\nI0818 15:07:32.147990 21769 net.cpp:434] L2_b12_cbr1_bn <- L2_b12_cbr1_conv_top\nI0818 15:07:32.148000 21769 net.cpp:408] L2_b12_cbr1_bn -> L2_b12_cbr1_bn_top\nI0818 15:07:32.148274 21769 net.cpp:150] Setting up L2_b12_cbr1_bn\nI0818 15:07:32.148288 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.148293 21769 net.cpp:165] Memory required for data: 1733018800\nI0818 15:07:32.148303 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0818 15:07:32.148313 21769 net.cpp:100] Creating Layer L2_b12_cbr1_scale\nI0818 15:07:32.148319 21769 net.cpp:434] L2_b12_cbr1_scale <- L2_b12_cbr1_bn_top\nI0818 15:07:32.148326 21769 net.cpp:395] L2_b12_cbr1_scale -> L2_b12_cbr1_bn_top (in-place)\nI0818 15:07:32.148391 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_scale\nI0818 15:07:32.148555 21769 net.cpp:150] Setting up L2_b12_cbr1_scale\nI0818 15:07:32.148571 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.148576 21769 net.cpp:165] Memory required for data: 1736295600\nI0818 15:07:32.148584 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr1_relu\nI0818 15:07:32.148592 21769 net.cpp:100] Creating Layer L2_b12_cbr1_relu\nI0818 15:07:32.148598 21769 net.cpp:434] L2_b12_cbr1_relu <- L2_b12_cbr1_bn_top\nI0818 15:07:32.148607 21769 net.cpp:395] L2_b12_cbr1_relu -> L2_b12_cbr1_bn_top (in-place)\nI0818 15:07:32.148615 21769 net.cpp:150] Setting up L2_b12_cbr1_relu\nI0818 15:07:32.148622 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.148627 21769 net.cpp:165] Memory required for data: 1739572400\nI0818 15:07:32.148633 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr2_conv\nI0818 15:07:32.148646 21769 net.cpp:100] Creating Layer L2_b12_cbr2_conv\nI0818 15:07:32.148653 21769 net.cpp:434] L2_b12_cbr2_conv <- L2_b12_cbr1_bn_top\nI0818 15:07:32.148663 21769 net.cpp:408] L2_b12_cbr2_conv -> L2_b12_cbr2_conv_top\nI0818 15:07:32.149163 21769 net.cpp:150] Setting up L2_b12_cbr2_conv\nI0818 15:07:32.149178 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.149183 21769 net.cpp:165] Memory required for data: 1742849200\nI0818 15:07:32.149194 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr2_bn\nI0818 15:07:32.149205 21769 net.cpp:100] Creating Layer L2_b12_cbr2_bn\nI0818 15:07:32.149212 21769 net.cpp:434] L2_b12_cbr2_bn <- L2_b12_cbr2_conv_top\nI0818 15:07:32.149224 21769 net.cpp:408] L2_b12_cbr2_bn -> L2_b12_cbr2_bn_top\nI0818 15:07:32.149502 21769 net.cpp:150] Setting up L2_b12_cbr2_bn\nI0818 15:07:32.149515 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.149520 21769 net.cpp:165] Memory required for data: 1746126000\nI0818 15:07:32.149530 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0818 15:07:32.149539 21769 net.cpp:100] Creating Layer L2_b12_cbr2_scale\nI0818 15:07:32.149545 21769 net.cpp:434] L2_b12_cbr2_scale <- L2_b12_cbr2_bn_top\nI0818 15:07:32.149554 21769 net.cpp:395] L2_b12_cbr2_scale -> L2_b12_cbr2_bn_top (in-place)\nI0818 15:07:32.149617 21769 layer_factory.hpp:77] Creating layer L2_b12_cbr2_scale\nI0818 15:07:32.149793 21769 net.cpp:150] Setting up L2_b12_cbr2_scale\nI0818 15:07:32.149808 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.149813 21769 net.cpp:165] Memory required for data: 1749402800\nI0818 15:07:32.149822 21769 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise\nI0818 15:07:32.149834 21769 net.cpp:100] Creating Layer L2_b12_sum_eltwise\nI0818 15:07:32.149842 21769 net.cpp:434] L2_b12_sum_eltwise <- L2_b12_cbr2_bn_top\nI0818 15:07:32.149848 21769 net.cpp:434] L2_b12_sum_eltwise <- L2_b11_sum_eltwise_top_L2_b11_relu_0_split_1\nI0818 15:07:32.149857 21769 net.cpp:408] L2_b12_sum_eltwise -> L2_b12_sum_eltwise_top\nI0818 15:07:32.149888 21769 net.cpp:150] Setting up L2_b12_sum_eltwise\nI0818 15:07:32.149897 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.149902 21769 net.cpp:165] Memory required for data: 1752679600\nI0818 15:07:32.149907 21769 layer_factory.hpp:77] Creating layer L2_b12_relu\nI0818 15:07:32.149915 21769 net.cpp:100] Creating Layer L2_b12_relu\nI0818 15:07:32.149929 21769 net.cpp:434] L2_b12_relu <- L2_b12_sum_eltwise_top\nI0818 15:07:32.149940 21769 net.cpp:395] L2_b12_relu -> L2_b12_sum_eltwise_top (in-place)\nI0818 15:07:32.149950 21769 net.cpp:150] Setting up L2_b12_relu\nI0818 15:07:32.149956 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.149960 21769 net.cpp:165] Memory required for data: 1755956400\nI0818 15:07:32.149966 21769 layer_factory.hpp:77] Creating layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0818 15:07:32.149972 21769 net.cpp:100] Creating Layer L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0818 15:07:32.149978 21769 net.cpp:434] L2_b12_sum_eltwise_top_L2_b12_relu_0_split <- L2_b12_sum_eltwise_top\nI0818 15:07:32.149986 21769 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0818 15:07:32.149996 21769 net.cpp:408] L2_b12_sum_eltwise_top_L2_b12_relu_0_split -> L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0818 15:07:32.150049 21769 net.cpp:150] Setting up L2_b12_sum_eltwise_top_L2_b12_relu_0_split\nI0818 15:07:32.150061 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.150068 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.150074 21769 net.cpp:165] Memory required for data: 1762510000\nI0818 15:07:32.150079 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_conv\nI0818 15:07:32.150092 21769 net.cpp:100] Creating Layer L2_b13_cbr1_conv\nI0818 15:07:32.150099 21769 net.cpp:434] L2_b13_cbr1_conv <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_0\nI0818 15:07:32.150110 21769 net.cpp:408] L2_b13_cbr1_conv -> L2_b13_cbr1_conv_top\nI0818 15:07:32.150616 21769 net.cpp:150] Setting up L2_b13_cbr1_conv\nI0818 15:07:32.150630 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.150635 21769 net.cpp:165] Memory required for data: 1765786800\nI0818 15:07:32.150645 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_bn\nI0818 15:07:32.150653 21769 net.cpp:100] Creating Layer L2_b13_cbr1_bn\nI0818 15:07:32.150660 21769 net.cpp:434] L2_b13_cbr1_bn <- L2_b13_cbr1_conv_top\nI0818 15:07:32.150671 21769 net.cpp:408] L2_b13_cbr1_bn -> L2_b13_cbr1_bn_top\nI0818 15:07:32.150955 21769 net.cpp:150] Setting up L2_b13_cbr1_bn\nI0818 15:07:32.150969 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.150974 21769 net.cpp:165] Memory required for data: 1769063600\nI0818 15:07:32.150985 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0818 15:07:32.150993 21769 net.cpp:100] Creating Layer L2_b13_cbr1_scale\nI0818 15:07:32.151000 21769 net.cpp:434] L2_b13_cbr1_scale <- L2_b13_cbr1_bn_top\nI0818 15:07:32.151007 21769 net.cpp:395] L2_b13_cbr1_scale -> L2_b13_cbr1_bn_top (in-place)\nI0818 15:07:32.151070 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_scale\nI0818 15:07:32.151233 21769 net.cpp:150] Setting up L2_b13_cbr1_scale\nI0818 15:07:32.151249 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.151255 21769 net.cpp:165] Memory required for data: 1772340400\nI0818 15:07:32.151264 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr1_relu\nI0818 15:07:32.151273 21769 net.cpp:100] Creating Layer L2_b13_cbr1_relu\nI0818 15:07:32.151279 21769 net.cpp:434] L2_b13_cbr1_relu <- L2_b13_cbr1_bn_top\nI0818 15:07:32.151286 21769 net.cpp:395] L2_b13_cbr1_relu -> L2_b13_cbr1_bn_top (in-place)\nI0818 15:07:32.151295 21769 net.cpp:150] Setting up L2_b13_cbr1_relu\nI0818 15:07:32.151302 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.151307 21769 net.cpp:165] Memory required for data: 1775617200\nI0818 15:07:32.151311 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr2_conv\nI0818 15:07:32.151326 21769 net.cpp:100] Creating Layer L2_b13_cbr2_conv\nI0818 15:07:32.151332 21769 net.cpp:434] L2_b13_cbr2_conv <- L2_b13_cbr1_bn_top\nI0818 15:07:32.151343 21769 net.cpp:408] L2_b13_cbr2_conv -> L2_b13_cbr2_conv_top\nI0818 15:07:32.151852 21769 net.cpp:150] Setting up L2_b13_cbr2_conv\nI0818 15:07:32.151867 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.151872 21769 net.cpp:165] Memory required for data: 1778894000\nI0818 15:07:32.151887 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr2_bn\nI0818 15:07:32.151901 21769 net.cpp:100] Creating Layer L2_b13_cbr2_bn\nI0818 15:07:32.151908 21769 net.cpp:434] L2_b13_cbr2_bn <- L2_b13_cbr2_conv_top\nI0818 15:07:32.151919 21769 net.cpp:408] L2_b13_cbr2_bn -> L2_b13_cbr2_bn_top\nI0818 15:07:32.152200 21769 net.cpp:150] Setting up L2_b13_cbr2_bn\nI0818 15:07:32.152215 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.152220 21769 net.cpp:165] Memory required for data: 1782170800\nI0818 15:07:32.152230 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0818 15:07:32.152238 21769 net.cpp:100] Creating Layer L2_b13_cbr2_scale\nI0818 15:07:32.152245 21769 net.cpp:434] L2_b13_cbr2_scale <- L2_b13_cbr2_bn_top\nI0818 15:07:32.152252 21769 net.cpp:395] L2_b13_cbr2_scale -> L2_b13_cbr2_bn_top (in-place)\nI0818 15:07:32.152318 21769 layer_factory.hpp:77] Creating layer L2_b13_cbr2_scale\nI0818 15:07:32.152479 21769 net.cpp:150] Setting up L2_b13_cbr2_scale\nI0818 15:07:32.152493 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.152498 21769 net.cpp:165] Memory required for data: 1785447600\nI0818 15:07:32.152506 21769 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise\nI0818 15:07:32.152518 21769 net.cpp:100] Creating Layer L2_b13_sum_eltwise\nI0818 15:07:32.152525 21769 net.cpp:434] L2_b13_sum_eltwise <- L2_b13_cbr2_bn_top\nI0818 15:07:32.152532 21769 net.cpp:434] L2_b13_sum_eltwise <- L2_b12_sum_eltwise_top_L2_b12_relu_0_split_1\nI0818 15:07:32.152539 21769 net.cpp:408] L2_b13_sum_eltwise -> L2_b13_sum_eltwise_top\nI0818 15:07:32.152567 21769 net.cpp:150] Setting up L2_b13_sum_eltwise\nI0818 15:07:32.152576 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.152581 21769 net.cpp:165] Memory required for data: 1788724400\nI0818 15:07:32.152586 21769 layer_factory.hpp:77] Creating layer L2_b13_relu\nI0818 15:07:32.152597 21769 net.cpp:100] Creating Layer L2_b13_relu\nI0818 15:07:32.152603 21769 net.cpp:434] L2_b13_relu <- L2_b13_sum_eltwise_top\nI0818 15:07:32.152611 21769 net.cpp:395] L2_b13_relu -> L2_b13_sum_eltwise_top (in-place)\nI0818 15:07:32.152619 21769 net.cpp:150] Setting up L2_b13_relu\nI0818 15:07:32.152626 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.152631 21769 net.cpp:165] Memory required for data: 1792001200\nI0818 15:07:32.152637 21769 layer_factory.hpp:77] Creating layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0818 15:07:32.152643 21769 net.cpp:100] Creating Layer L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0818 15:07:32.152648 21769 net.cpp:434] L2_b13_sum_eltwise_top_L2_b13_relu_0_split <- L2_b13_sum_eltwise_top\nI0818 15:07:32.152657 21769 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0818 15:07:32.152665 21769 net.cpp:408] L2_b13_sum_eltwise_top_L2_b13_relu_0_split -> L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0818 15:07:32.152725 21769 net.cpp:150] Setting up L2_b13_sum_eltwise_top_L2_b13_relu_0_split\nI0818 15:07:32.152739 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.152745 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.152750 21769 net.cpp:165] Memory required for data: 1798554800\nI0818 15:07:32.152755 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_conv\nI0818 15:07:32.152770 21769 net.cpp:100] Creating Layer L2_b14_cbr1_conv\nI0818 15:07:32.152777 21769 net.cpp:434] L2_b14_cbr1_conv <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_0\nI0818 15:07:32.152786 21769 net.cpp:408] L2_b14_cbr1_conv -> L2_b14_cbr1_conv_top\nI0818 15:07:32.153286 21769 net.cpp:150] Setting up L2_b14_cbr1_conv\nI0818 15:07:32.153301 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.153306 21769 net.cpp:165] Memory required for data: 1801831600\nI0818 15:07:32.153314 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_bn\nI0818 15:07:32.153326 21769 net.cpp:100] Creating Layer L2_b14_cbr1_bn\nI0818 15:07:32.153333 21769 net.cpp:434] L2_b14_cbr1_bn <- L2_b14_cbr1_conv_top\nI0818 15:07:32.153344 21769 net.cpp:408] L2_b14_cbr1_bn -> L2_b14_cbr1_bn_top\nI0818 15:07:32.153632 21769 net.cpp:150] Setting up L2_b14_cbr1_bn\nI0818 15:07:32.153646 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.153651 21769 net.cpp:165] Memory required for data: 1805108400\nI0818 15:07:32.153661 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0818 15:07:32.153671 21769 net.cpp:100] Creating Layer L2_b14_cbr1_scale\nI0818 15:07:32.153676 21769 net.cpp:434] L2_b14_cbr1_scale <- L2_b14_cbr1_bn_top\nI0818 15:07:32.153690 21769 net.cpp:395] L2_b14_cbr1_scale -> L2_b14_cbr1_bn_top (in-place)\nI0818 15:07:32.153758 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_scale\nI0818 15:07:32.153918 21769 net.cpp:150] Setting up L2_b14_cbr1_scale\nI0818 15:07:32.153933 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.153937 21769 net.cpp:165] Memory required for data: 1808385200\nI0818 15:07:32.153946 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr1_relu\nI0818 15:07:32.153956 21769 net.cpp:100] Creating Layer L2_b14_cbr1_relu\nI0818 15:07:32.153964 21769 net.cpp:434] L2_b14_cbr1_relu <- L2_b14_cbr1_bn_top\nI0818 15:07:32.153970 21769 net.cpp:395] L2_b14_cbr1_relu -> L2_b14_cbr1_bn_top (in-place)\nI0818 15:07:32.153980 21769 net.cpp:150] Setting up L2_b14_cbr1_relu\nI0818 15:07:32.153987 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.153991 21769 net.cpp:165] Memory required for data: 1811662000\nI0818 15:07:32.153996 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr2_conv\nI0818 15:07:32.154011 21769 net.cpp:100] Creating Layer L2_b14_cbr2_conv\nI0818 15:07:32.154016 21769 net.cpp:434] L2_b14_cbr2_conv <- L2_b14_cbr1_bn_top\nI0818 15:07:32.154028 21769 net.cpp:408] L2_b14_cbr2_conv -> L2_b14_cbr2_conv_top\nI0818 15:07:32.154518 21769 net.cpp:150] Setting up L2_b14_cbr2_conv\nI0818 15:07:32.154533 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.154538 21769 net.cpp:165] Memory required for data: 1814938800\nI0818 15:07:32.154547 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr2_bn\nI0818 15:07:32.154559 21769 net.cpp:100] Creating Layer L2_b14_cbr2_bn\nI0818 15:07:32.154567 21769 net.cpp:434] L2_b14_cbr2_bn <- L2_b14_cbr2_conv_top\nI0818 15:07:32.154578 21769 net.cpp:408] L2_b14_cbr2_bn -> L2_b14_cbr2_bn_top\nI0818 15:07:32.154856 21769 net.cpp:150] Setting up L2_b14_cbr2_bn\nI0818 15:07:32.154873 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.154878 21769 net.cpp:165] Memory required for data: 1818215600\nI0818 15:07:32.154889 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0818 15:07:32.154898 21769 net.cpp:100] Creating Layer L2_b14_cbr2_scale\nI0818 15:07:32.154904 21769 net.cpp:434] L2_b14_cbr2_scale <- L2_b14_cbr2_bn_top\nI0818 15:07:32.154911 21769 net.cpp:395] L2_b14_cbr2_scale -> L2_b14_cbr2_bn_top (in-place)\nI0818 15:07:32.154973 21769 layer_factory.hpp:77] Creating layer L2_b14_cbr2_scale\nI0818 15:07:32.155141 21769 net.cpp:150] Setting up L2_b14_cbr2_scale\nI0818 15:07:32.155155 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.155160 21769 net.cpp:165] Memory required for data: 1821492400\nI0818 15:07:32.155169 21769 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise\nI0818 15:07:32.155177 21769 net.cpp:100] Creating Layer L2_b14_sum_eltwise\nI0818 15:07:32.155189 21769 net.cpp:434] L2_b14_sum_eltwise <- L2_b14_cbr2_bn_top\nI0818 15:07:32.155196 21769 net.cpp:434] L2_b14_sum_eltwise <- L2_b13_sum_eltwise_top_L2_b13_relu_0_split_1\nI0818 15:07:32.155205 21769 net.cpp:408] L2_b14_sum_eltwise -> L2_b14_sum_eltwise_top\nI0818 15:07:32.155231 21769 net.cpp:150] Setting up L2_b14_sum_eltwise\nI0818 15:07:32.155241 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.155246 21769 net.cpp:165] Memory required for data: 1824769200\nI0818 15:07:32.155251 21769 layer_factory.hpp:77] Creating layer L2_b14_relu\nI0818 15:07:32.155262 21769 net.cpp:100] Creating Layer L2_b14_relu\nI0818 15:07:32.155268 21769 net.cpp:434] L2_b14_relu <- L2_b14_sum_eltwise_top\nI0818 15:07:32.155275 21769 net.cpp:395] L2_b14_relu -> L2_b14_sum_eltwise_top (in-place)\nI0818 15:07:32.155292 21769 net.cpp:150] Setting up L2_b14_relu\nI0818 15:07:32.155299 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.155303 21769 net.cpp:165] Memory required for data: 1828046000\nI0818 15:07:32.155308 21769 layer_factory.hpp:77] Creating layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0818 15:07:32.155316 21769 net.cpp:100] Creating Layer L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0818 15:07:32.155321 21769 net.cpp:434] L2_b14_sum_eltwise_top_L2_b14_relu_0_split <- L2_b14_sum_eltwise_top\nI0818 15:07:32.155328 21769 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0818 15:07:32.155339 21769 net.cpp:408] L2_b14_sum_eltwise_top_L2_b14_relu_0_split -> L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0818 15:07:32.155391 21769 net.cpp:150] Setting up L2_b14_sum_eltwise_top_L2_b14_relu_0_split\nI0818 15:07:32.155403 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.155411 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.155416 21769 net.cpp:165] Memory required for data: 1834599600\nI0818 15:07:32.155421 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_conv\nI0818 15:07:32.155434 21769 net.cpp:100] Creating Layer L2_b15_cbr1_conv\nI0818 15:07:32.155441 21769 net.cpp:434] L2_b15_cbr1_conv <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_0\nI0818 15:07:32.155450 21769 net.cpp:408] L2_b15_cbr1_conv -> L2_b15_cbr1_conv_top\nI0818 15:07:32.155956 21769 net.cpp:150] Setting up L2_b15_cbr1_conv\nI0818 15:07:32.155972 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.155977 21769 net.cpp:165] Memory required for data: 1837876400\nI0818 15:07:32.155985 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_bn\nI0818 15:07:32.155998 21769 net.cpp:100] Creating Layer L2_b15_cbr1_bn\nI0818 15:07:32.156005 21769 net.cpp:434] L2_b15_cbr1_bn <- L2_b15_cbr1_conv_top\nI0818 15:07:32.156016 21769 net.cpp:408] L2_b15_cbr1_bn -> L2_b15_cbr1_bn_top\nI0818 15:07:32.156293 21769 net.cpp:150] Setting up L2_b15_cbr1_bn\nI0818 15:07:32.156307 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.156312 21769 net.cpp:165] Memory required for data: 1841153200\nI0818 15:07:32.156322 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0818 15:07:32.156332 21769 net.cpp:100] Creating Layer L2_b15_cbr1_scale\nI0818 15:07:32.156338 21769 net.cpp:434] L2_b15_cbr1_scale <- L2_b15_cbr1_bn_top\nI0818 15:07:32.156345 21769 net.cpp:395] L2_b15_cbr1_scale -> L2_b15_cbr1_bn_top (in-place)\nI0818 15:07:32.156409 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_scale\nI0818 15:07:32.156570 21769 net.cpp:150] Setting up L2_b15_cbr1_scale\nI0818 15:07:32.156584 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.156589 21769 net.cpp:165] Memory required for data: 1844430000\nI0818 15:07:32.156599 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr1_relu\nI0818 15:07:32.156610 21769 net.cpp:100] Creating Layer L2_b15_cbr1_relu\nI0818 15:07:32.156616 21769 net.cpp:434] L2_b15_cbr1_relu <- L2_b15_cbr1_bn_top\nI0818 15:07:32.156623 21769 net.cpp:395] L2_b15_cbr1_relu -> L2_b15_cbr1_bn_top (in-place)\nI0818 15:07:32.156633 21769 net.cpp:150] Setting up L2_b15_cbr1_relu\nI0818 15:07:32.156639 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.156644 21769 net.cpp:165] Memory required for data: 1847706800\nI0818 15:07:32.156649 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr2_conv\nI0818 15:07:32.156663 21769 net.cpp:100] Creating Layer L2_b15_cbr2_conv\nI0818 15:07:32.156669 21769 net.cpp:434] L2_b15_cbr2_conv <- L2_b15_cbr1_bn_top\nI0818 15:07:32.156678 21769 net.cpp:408] L2_b15_cbr2_conv -> L2_b15_cbr2_conv_top\nI0818 15:07:32.157186 21769 net.cpp:150] Setting up L2_b15_cbr2_conv\nI0818 15:07:32.157200 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.157205 21769 net.cpp:165] Memory required for data: 1850983600\nI0818 15:07:32.157214 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr2_bn\nI0818 15:07:32.157227 21769 net.cpp:100] Creating Layer L2_b15_cbr2_bn\nI0818 15:07:32.157240 21769 net.cpp:434] L2_b15_cbr2_bn <- L2_b15_cbr2_conv_top\nI0818 15:07:32.157249 21769 net.cpp:408] L2_b15_cbr2_bn -> L2_b15_cbr2_bn_top\nI0818 15:07:32.157523 21769 net.cpp:150] Setting up L2_b15_cbr2_bn\nI0818 15:07:32.157541 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.157546 21769 net.cpp:165] Memory required for data: 1854260400\nI0818 15:07:32.157557 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0818 15:07:32.157565 21769 net.cpp:100] Creating Layer L2_b15_cbr2_scale\nI0818 15:07:32.157572 21769 net.cpp:434] L2_b15_cbr2_scale <- L2_b15_cbr2_bn_top\nI0818 15:07:32.157578 21769 net.cpp:395] L2_b15_cbr2_scale -> L2_b15_cbr2_bn_top (in-place)\nI0818 15:07:32.157640 21769 layer_factory.hpp:77] Creating layer L2_b15_cbr2_scale\nI0818 15:07:32.157837 21769 net.cpp:150] Setting up L2_b15_cbr2_scale\nI0818 15:07:32.157852 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.157857 21769 net.cpp:165] Memory required for data: 1857537200\nI0818 15:07:32.157866 21769 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise\nI0818 15:07:32.157876 21769 net.cpp:100] Creating Layer L2_b15_sum_eltwise\nI0818 15:07:32.157882 21769 net.cpp:434] L2_b15_sum_eltwise <- L2_b15_cbr2_bn_top\nI0818 15:07:32.157889 21769 net.cpp:434] L2_b15_sum_eltwise <- L2_b14_sum_eltwise_top_L2_b14_relu_0_split_1\nI0818 15:07:32.157901 21769 net.cpp:408] L2_b15_sum_eltwise -> L2_b15_sum_eltwise_top\nI0818 15:07:32.157929 21769 net.cpp:150] Setting up L2_b15_sum_eltwise\nI0818 15:07:32.157938 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.157943 21769 net.cpp:165] Memory required for data: 1860814000\nI0818 15:07:32.157948 21769 layer_factory.hpp:77] Creating layer L2_b15_relu\nI0818 15:07:32.157959 21769 net.cpp:100] Creating Layer L2_b15_relu\nI0818 15:07:32.157966 21769 net.cpp:434] L2_b15_relu <- L2_b15_sum_eltwise_top\nI0818 15:07:32.157974 21769 net.cpp:395] L2_b15_relu -> L2_b15_sum_eltwise_top (in-place)\nI0818 15:07:32.157982 21769 net.cpp:150] Setting up L2_b15_relu\nI0818 15:07:32.157989 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.157994 21769 net.cpp:165] Memory required for data: 1864090800\nI0818 15:07:32.157999 21769 layer_factory.hpp:77] Creating layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0818 15:07:32.158005 21769 net.cpp:100] Creating Layer L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0818 15:07:32.158011 21769 net.cpp:434] L2_b15_sum_eltwise_top_L2_b15_relu_0_split <- L2_b15_sum_eltwise_top\nI0818 15:07:32.158018 21769 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0818 15:07:32.158027 21769 net.cpp:408] L2_b15_sum_eltwise_top_L2_b15_relu_0_split -> L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0818 15:07:32.158078 21769 net.cpp:150] Setting up L2_b15_sum_eltwise_top_L2_b15_relu_0_split\nI0818 15:07:32.158090 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.158097 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.158102 21769 net.cpp:165] Memory required for data: 1870644400\nI0818 15:07:32.158107 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_conv\nI0818 15:07:32.158120 21769 net.cpp:100] Creating Layer L2_b16_cbr1_conv\nI0818 15:07:32.158128 21769 net.cpp:434] L2_b16_cbr1_conv <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_0\nI0818 15:07:32.158138 21769 net.cpp:408] L2_b16_cbr1_conv -> L2_b16_cbr1_conv_top\nI0818 15:07:32.158637 21769 net.cpp:150] Setting up L2_b16_cbr1_conv\nI0818 15:07:32.158651 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.158656 21769 net.cpp:165] Memory required for data: 1873921200\nI0818 15:07:32.158665 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_bn\nI0818 15:07:32.158677 21769 net.cpp:100] Creating Layer L2_b16_cbr1_bn\nI0818 15:07:32.158691 21769 net.cpp:434] L2_b16_cbr1_bn <- L2_b16_cbr1_conv_top\nI0818 15:07:32.158704 21769 net.cpp:408] L2_b16_cbr1_bn -> L2_b16_cbr1_bn_top\nI0818 15:07:32.158993 21769 net.cpp:150] Setting up L2_b16_cbr1_bn\nI0818 15:07:32.159006 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.159018 21769 net.cpp:165] Memory required for data: 1877198000\nI0818 15:07:32.159029 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0818 15:07:32.159039 21769 net.cpp:100] Creating Layer L2_b16_cbr1_scale\nI0818 15:07:32.159045 21769 net.cpp:434] L2_b16_cbr1_scale <- L2_b16_cbr1_bn_top\nI0818 15:07:32.159054 21769 net.cpp:395] L2_b16_cbr1_scale -> L2_b16_cbr1_bn_top (in-place)\nI0818 15:07:32.159118 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_scale\nI0818 15:07:32.159279 21769 net.cpp:150] Setting up L2_b16_cbr1_scale\nI0818 15:07:32.159292 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.159297 21769 net.cpp:165] Memory required for data: 1880474800\nI0818 15:07:32.159307 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr1_relu\nI0818 15:07:32.159314 21769 net.cpp:100] Creating Layer L2_b16_cbr1_relu\nI0818 15:07:32.159320 21769 net.cpp:434] L2_b16_cbr1_relu <- L2_b16_cbr1_bn_top\nI0818 15:07:32.159330 21769 net.cpp:395] L2_b16_cbr1_relu -> L2_b16_cbr1_bn_top (in-place)\nI0818 15:07:32.159342 21769 net.cpp:150] Setting up L2_b16_cbr1_relu\nI0818 15:07:32.159348 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.159353 21769 net.cpp:165] Memory required for data: 1883751600\nI0818 15:07:32.159358 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr2_conv\nI0818 15:07:32.159371 21769 net.cpp:100] Creating Layer L2_b16_cbr2_conv\nI0818 15:07:32.159377 21769 net.cpp:434] L2_b16_cbr2_conv <- L2_b16_cbr1_bn_top\nI0818 15:07:32.159386 21769 net.cpp:408] L2_b16_cbr2_conv -> L2_b16_cbr2_conv_top\nI0818 15:07:32.159884 21769 net.cpp:150] Setting up L2_b16_cbr2_conv\nI0818 15:07:32.159899 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.159904 21769 net.cpp:165] Memory required for data: 1887028400\nI0818 15:07:32.159912 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr2_bn\nI0818 15:07:32.159925 21769 net.cpp:100] Creating Layer L2_b16_cbr2_bn\nI0818 15:07:32.159932 21769 net.cpp:434] L2_b16_cbr2_bn <- L2_b16_cbr2_conv_top\nI0818 15:07:32.159941 21769 net.cpp:408] L2_b16_cbr2_bn -> L2_b16_cbr2_bn_top\nI0818 15:07:32.160214 21769 net.cpp:150] Setting up L2_b16_cbr2_bn\nI0818 15:07:32.160230 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.160236 21769 net.cpp:165] Memory required for data: 1890305200\nI0818 15:07:32.160246 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0818 15:07:32.160255 21769 net.cpp:100] Creating Layer L2_b16_cbr2_scale\nI0818 15:07:32.160262 21769 net.cpp:434] L2_b16_cbr2_scale <- L2_b16_cbr2_bn_top\nI0818 15:07:32.160269 21769 net.cpp:395] L2_b16_cbr2_scale -> L2_b16_cbr2_bn_top (in-place)\nI0818 15:07:32.160331 21769 layer_factory.hpp:77] Creating layer L2_b16_cbr2_scale\nI0818 15:07:32.160498 21769 net.cpp:150] Setting up L2_b16_cbr2_scale\nI0818 15:07:32.160511 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.160516 21769 net.cpp:165] Memory required for data: 1893582000\nI0818 15:07:32.160526 21769 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise\nI0818 15:07:32.160534 21769 net.cpp:100] Creating Layer L2_b16_sum_eltwise\nI0818 15:07:32.160542 21769 net.cpp:434] L2_b16_sum_eltwise <- L2_b16_cbr2_bn_top\nI0818 15:07:32.160548 21769 net.cpp:434] L2_b16_sum_eltwise <- L2_b15_sum_eltwise_top_L2_b15_relu_0_split_1\nI0818 15:07:32.160559 21769 net.cpp:408] L2_b16_sum_eltwise -> L2_b16_sum_eltwise_top\nI0818 15:07:32.160588 21769 net.cpp:150] Setting up L2_b16_sum_eltwise\nI0818 15:07:32.160598 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.160603 21769 net.cpp:165] Memory required for data: 1896858800\nI0818 15:07:32.160607 21769 layer_factory.hpp:77] Creating layer L2_b16_relu\nI0818 15:07:32.160615 21769 net.cpp:100] Creating Layer L2_b16_relu\nI0818 15:07:32.160621 21769 net.cpp:434] L2_b16_relu <- L2_b16_sum_eltwise_top\nI0818 15:07:32.160631 21769 net.cpp:395] L2_b16_relu -> L2_b16_sum_eltwise_top (in-place)\nI0818 15:07:32.160641 21769 net.cpp:150] Setting up L2_b16_relu\nI0818 15:07:32.160648 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.160660 21769 net.cpp:165] Memory required for data: 1900135600\nI0818 15:07:32.160665 21769 layer_factory.hpp:77] Creating layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0818 15:07:32.160673 21769 net.cpp:100] Creating Layer L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0818 15:07:32.160678 21769 net.cpp:434] L2_b16_sum_eltwise_top_L2_b16_relu_0_split <- L2_b16_sum_eltwise_top\nI0818 15:07:32.160691 21769 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0818 15:07:32.160702 21769 net.cpp:408] L2_b16_sum_eltwise_top_L2_b16_relu_0_split -> L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0818 15:07:32.160755 21769 net.cpp:150] Setting up L2_b16_sum_eltwise_top_L2_b16_relu_0_split\nI0818 15:07:32.160768 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.160775 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.160779 21769 net.cpp:165] Memory required for data: 1906689200\nI0818 15:07:32.160784 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_conv\nI0818 15:07:32.160799 21769 net.cpp:100] Creating Layer L2_b17_cbr1_conv\nI0818 15:07:32.160805 21769 net.cpp:434] L2_b17_cbr1_conv <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_0\nI0818 15:07:32.160815 21769 net.cpp:408] L2_b17_cbr1_conv -> L2_b17_cbr1_conv_top\nI0818 15:07:32.161317 21769 net.cpp:150] Setting up L2_b17_cbr1_conv\nI0818 15:07:32.161332 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.161337 21769 net.cpp:165] Memory required for data: 1909966000\nI0818 15:07:32.161346 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_bn\nI0818 15:07:32.161358 21769 net.cpp:100] Creating Layer L2_b17_cbr1_bn\nI0818 15:07:32.161365 21769 net.cpp:434] L2_b17_cbr1_bn <- L2_b17_cbr1_conv_top\nI0818 15:07:32.161375 21769 net.cpp:408] L2_b17_cbr1_bn -> L2_b17_cbr1_bn_top\nI0818 15:07:32.161653 21769 net.cpp:150] Setting up L2_b17_cbr1_bn\nI0818 15:07:32.161669 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.161674 21769 net.cpp:165] Memory required for data: 1913242800\nI0818 15:07:32.161691 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0818 15:07:32.161701 21769 net.cpp:100] Creating Layer L2_b17_cbr1_scale\nI0818 15:07:32.161707 21769 net.cpp:434] L2_b17_cbr1_scale <- L2_b17_cbr1_bn_top\nI0818 15:07:32.161715 21769 net.cpp:395] L2_b17_cbr1_scale -> L2_b17_cbr1_bn_top (in-place)\nI0818 15:07:32.161777 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_scale\nI0818 15:07:32.161940 21769 net.cpp:150] Setting up L2_b17_cbr1_scale\nI0818 15:07:32.161953 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.161958 21769 net.cpp:165] Memory required for data: 1916519600\nI0818 15:07:32.161967 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr1_relu\nI0818 15:07:32.161975 21769 net.cpp:100] Creating Layer L2_b17_cbr1_relu\nI0818 15:07:32.161981 21769 net.cpp:434] L2_b17_cbr1_relu <- L2_b17_cbr1_bn_top\nI0818 15:07:32.161993 21769 net.cpp:395] L2_b17_cbr1_relu -> L2_b17_cbr1_bn_top (in-place)\nI0818 15:07:32.162003 21769 net.cpp:150] Setting up L2_b17_cbr1_relu\nI0818 15:07:32.162010 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.162014 21769 net.cpp:165] Memory required for data: 1919796400\nI0818 15:07:32.162020 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr2_conv\nI0818 15:07:32.162034 21769 net.cpp:100] Creating Layer L2_b17_cbr2_conv\nI0818 15:07:32.162040 21769 net.cpp:434] L2_b17_cbr2_conv <- L2_b17_cbr1_bn_top\nI0818 15:07:32.162050 21769 net.cpp:408] L2_b17_cbr2_conv -> L2_b17_cbr2_conv_top\nI0818 15:07:32.162539 21769 net.cpp:150] Setting up L2_b17_cbr2_conv\nI0818 15:07:32.162554 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.162559 21769 net.cpp:165] Memory required for data: 1923073200\nI0818 15:07:32.162569 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr2_bn\nI0818 15:07:32.162577 21769 net.cpp:100] Creating Layer L2_b17_cbr2_bn\nI0818 15:07:32.162583 21769 net.cpp:434] L2_b17_cbr2_bn <- L2_b17_cbr2_conv_top\nI0818 15:07:32.162598 21769 net.cpp:408] L2_b17_cbr2_bn -> L2_b17_cbr2_bn_top\nI0818 15:07:32.162883 21769 net.cpp:150] Setting up L2_b17_cbr2_bn\nI0818 15:07:32.162897 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.162902 21769 net.cpp:165] Memory required for data: 1926350000\nI0818 15:07:32.162912 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0818 15:07:32.162925 21769 net.cpp:100] Creating Layer L2_b17_cbr2_scale\nI0818 15:07:32.162931 21769 net.cpp:434] L2_b17_cbr2_scale <- L2_b17_cbr2_bn_top\nI0818 15:07:32.162940 21769 net.cpp:395] L2_b17_cbr2_scale -> L2_b17_cbr2_bn_top (in-place)\nI0818 15:07:32.163003 21769 layer_factory.hpp:77] Creating layer L2_b17_cbr2_scale\nI0818 15:07:32.163170 21769 net.cpp:150] Setting up L2_b17_cbr2_scale\nI0818 15:07:32.163184 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.163189 21769 net.cpp:165] Memory required for data: 1929626800\nI0818 15:07:32.163198 21769 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise\nI0818 15:07:32.163210 21769 net.cpp:100] Creating Layer L2_b17_sum_eltwise\nI0818 15:07:32.163218 21769 net.cpp:434] L2_b17_sum_eltwise <- L2_b17_cbr2_bn_top\nI0818 15:07:32.163224 21769 net.cpp:434] L2_b17_sum_eltwise <- L2_b16_sum_eltwise_top_L2_b16_relu_0_split_1\nI0818 15:07:32.163234 21769 net.cpp:408] L2_b17_sum_eltwise -> L2_b17_sum_eltwise_top\nI0818 15:07:32.163264 21769 net.cpp:150] Setting up L2_b17_sum_eltwise\nI0818 15:07:32.163272 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.163277 21769 net.cpp:165] Memory required for data: 1932903600\nI0818 15:07:32.163282 21769 layer_factory.hpp:77] Creating layer L2_b17_relu\nI0818 15:07:32.163290 21769 net.cpp:100] Creating Layer L2_b17_relu\nI0818 15:07:32.163296 21769 net.cpp:434] L2_b17_relu <- L2_b17_sum_eltwise_top\nI0818 15:07:32.163307 21769 net.cpp:395] L2_b17_relu -> L2_b17_sum_eltwise_top (in-place)\nI0818 15:07:32.163317 21769 net.cpp:150] Setting up L2_b17_relu\nI0818 15:07:32.163324 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.163328 21769 net.cpp:165] Memory required for data: 1936180400\nI0818 15:07:32.163333 21769 layer_factory.hpp:77] Creating layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0818 15:07:32.163341 21769 net.cpp:100] Creating Layer L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0818 15:07:32.163345 21769 net.cpp:434] L2_b17_sum_eltwise_top_L2_b17_relu_0_split <- L2_b17_sum_eltwise_top\nI0818 15:07:32.163353 21769 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0818 15:07:32.163363 21769 net.cpp:408] L2_b17_sum_eltwise_top_L2_b17_relu_0_split -> L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0818 15:07:32.163414 21769 net.cpp:150] Setting up L2_b17_sum_eltwise_top_L2_b17_relu_0_split\nI0818 15:07:32.163426 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.163434 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.163437 21769 net.cpp:165] Memory required for data: 1942734000\nI0818 15:07:32.163442 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_conv\nI0818 15:07:32.163455 21769 net.cpp:100] Creating Layer L2_b18_cbr1_conv\nI0818 15:07:32.163460 21769 net.cpp:434] L2_b18_cbr1_conv <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_0\nI0818 15:07:32.163472 21769 net.cpp:408] L2_b18_cbr1_conv -> L2_b18_cbr1_conv_top\nI0818 15:07:32.164005 21769 net.cpp:150] Setting up L2_b18_cbr1_conv\nI0818 15:07:32.164024 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.164031 21769 net.cpp:165] Memory required for data: 1946010800\nI0818 15:07:32.164039 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_bn\nI0818 15:07:32.164052 21769 net.cpp:100] Creating Layer L2_b18_cbr1_bn\nI0818 15:07:32.164059 21769 net.cpp:434] L2_b18_cbr1_bn <- L2_b18_cbr1_conv_top\nI0818 15:07:32.164067 21769 net.cpp:408] L2_b18_cbr1_bn -> L2_b18_cbr1_bn_top\nI0818 15:07:32.164350 21769 net.cpp:150] Setting up L2_b18_cbr1_bn\nI0818 15:07:32.164363 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.164368 21769 net.cpp:165] Memory required for data: 1949287600\nI0818 15:07:32.164378 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0818 15:07:32.164398 21769 net.cpp:100] Creating Layer L2_b18_cbr1_scale\nI0818 15:07:32.164407 21769 net.cpp:434] L2_b18_cbr1_scale <- L2_b18_cbr1_bn_top\nI0818 15:07:32.164414 21769 net.cpp:395] L2_b18_cbr1_scale -> L2_b18_cbr1_bn_top (in-place)\nI0818 15:07:32.164476 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_scale\nI0818 15:07:32.164641 21769 net.cpp:150] Setting up L2_b18_cbr1_scale\nI0818 15:07:32.164655 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.164660 21769 net.cpp:165] Memory required for data: 1952564400\nI0818 15:07:32.164669 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr1_relu\nI0818 15:07:32.164680 21769 net.cpp:100] Creating Layer L2_b18_cbr1_relu\nI0818 15:07:32.164692 21769 net.cpp:434] L2_b18_cbr1_relu <- L2_b18_cbr1_bn_top\nI0818 15:07:32.164700 21769 net.cpp:395] L2_b18_cbr1_relu -> L2_b18_cbr1_bn_top (in-place)\nI0818 15:07:32.164710 21769 net.cpp:150] Setting up L2_b18_cbr1_relu\nI0818 15:07:32.164717 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.164721 21769 net.cpp:165] Memory required for data: 1955841200\nI0818 15:07:32.164726 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr2_conv\nI0818 15:07:32.164741 21769 net.cpp:100] Creating Layer L2_b18_cbr2_conv\nI0818 15:07:32.164747 21769 net.cpp:434] L2_b18_cbr2_conv <- L2_b18_cbr1_bn_top\nI0818 15:07:32.164759 21769 net.cpp:408] L2_b18_cbr2_conv -> L2_b18_cbr2_conv_top\nI0818 15:07:32.165253 21769 net.cpp:150] Setting up L2_b18_cbr2_conv\nI0818 15:07:32.165268 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.165273 21769 net.cpp:165] Memory required for data: 1959118000\nI0818 15:07:32.165282 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr2_bn\nI0818 15:07:32.165292 21769 net.cpp:100] Creating Layer L2_b18_cbr2_bn\nI0818 15:07:32.165298 21769 net.cpp:434] L2_b18_cbr2_bn <- L2_b18_cbr2_conv_top\nI0818 15:07:32.165307 21769 net.cpp:408] L2_b18_cbr2_bn -> L2_b18_cbr2_bn_top\nI0818 15:07:32.165581 21769 net.cpp:150] Setting up L2_b18_cbr2_bn\nI0818 15:07:32.165594 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.165601 21769 net.cpp:165] Memory required for data: 1962394800\nI0818 15:07:32.165611 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0818 15:07:32.165619 21769 net.cpp:100] Creating Layer L2_b18_cbr2_scale\nI0818 15:07:32.165626 21769 net.cpp:434] L2_b18_cbr2_scale <- L2_b18_cbr2_bn_top\nI0818 15:07:32.165637 21769 net.cpp:395] L2_b18_cbr2_scale -> L2_b18_cbr2_bn_top (in-place)\nI0818 15:07:32.165704 21769 layer_factory.hpp:77] Creating layer L2_b18_cbr2_scale\nI0818 15:07:32.165866 21769 net.cpp:150] Setting up L2_b18_cbr2_scale\nI0818 15:07:32.165880 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.165884 21769 net.cpp:165] Memory required for data: 1965671600\nI0818 15:07:32.165894 21769 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise\nI0818 15:07:32.165902 21769 net.cpp:100] Creating Layer L2_b18_sum_eltwise\nI0818 15:07:32.165910 21769 net.cpp:434] L2_b18_sum_eltwise <- L2_b18_cbr2_bn_top\nI0818 15:07:32.165916 21769 net.cpp:434] L2_b18_sum_eltwise <- L2_b17_sum_eltwise_top_L2_b17_relu_0_split_1\nI0818 15:07:32.165930 21769 net.cpp:408] L2_b18_sum_eltwise -> L2_b18_sum_eltwise_top\nI0818 15:07:32.165959 21769 net.cpp:150] Setting up L2_b18_sum_eltwise\nI0818 15:07:32.165972 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.165977 21769 net.cpp:165] Memory required for data: 1968948400\nI0818 15:07:32.165982 21769 layer_factory.hpp:77] Creating layer L2_b18_relu\nI0818 15:07:32.165990 21769 net.cpp:100] Creating Layer L2_b18_relu\nI0818 15:07:32.165997 21769 net.cpp:434] L2_b18_relu <- L2_b18_sum_eltwise_top\nI0818 15:07:32.166003 21769 net.cpp:395] L2_b18_relu -> L2_b18_sum_eltwise_top (in-place)\nI0818 15:07:32.166013 21769 net.cpp:150] Setting up L2_b18_relu\nI0818 15:07:32.166019 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.166023 21769 net.cpp:165] Memory required for data: 1972225200\nI0818 15:07:32.166028 21769 layer_factory.hpp:77] Creating layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0818 15:07:32.166045 21769 net.cpp:100] Creating Layer L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0818 15:07:32.166052 21769 net.cpp:434] L2_b18_sum_eltwise_top_L2_b18_relu_0_split <- L2_b18_sum_eltwise_top\nI0818 15:07:32.166059 21769 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0818 15:07:32.166069 21769 net.cpp:408] L2_b18_sum_eltwise_top_L2_b18_relu_0_split -> L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0818 15:07:32.166122 21769 net.cpp:150] Setting up L2_b18_sum_eltwise_top_L2_b18_relu_0_split\nI0818 15:07:32.166134 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.166141 21769 net.cpp:157] Top shape: 100 32 16 16 (819200)\nI0818 15:07:32.166146 21769 net.cpp:165] Memory required for data: 1978778800\nI0818 15:07:32.166152 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0818 15:07:32.166162 21769 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0818 15:07:32.166169 21769 net.cpp:434] L3_b1_cbr1_conv <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_0\nI0818 15:07:32.166182 21769 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0818 15:07:32.166697 21769 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0818 15:07:32.166712 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.166718 21769 net.cpp:165] Memory required for data: 1979598000\nI0818 15:07:32.166790 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0818 15:07:32.166805 21769 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0818 15:07:32.166812 21769 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0818 15:07:32.166821 21769 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0818 15:07:32.167129 21769 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0818 15:07:32.167145 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.167150 21769 net.cpp:165] Memory required for data: 1980417200\nI0818 15:07:32.167161 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 15:07:32.167171 21769 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0818 15:07:32.167176 21769 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0818 15:07:32.167184 21769 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0818 15:07:32.167245 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 15:07:32.167418 21769 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0818 15:07:32.167431 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.167436 21769 net.cpp:165] Memory required for data: 1981236400\nI0818 15:07:32.167445 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0818 15:07:32.167454 21769 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0818 15:07:32.167459 21769 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0818 15:07:32.167472 21769 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0818 15:07:32.167484 21769 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0818 15:07:32.167490 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.167495 21769 net.cpp:165] Memory required for data: 1982055600\nI0818 15:07:32.167500 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0818 15:07:32.167513 21769 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0818 15:07:32.167520 21769 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0818 15:07:32.167528 21769 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0818 15:07:32.169041 21769 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0818 15:07:32.169059 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.169065 21769 net.cpp:165] Memory required for data: 1982874800\nI0818 15:07:32.169075 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0818 15:07:32.169087 21769 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0818 15:07:32.169095 21769 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0818 15:07:32.169103 21769 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0818 15:07:32.169399 21769 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0818 15:07:32.169411 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.169416 21769 net.cpp:165] Memory required for data: 1983694000\nI0818 15:07:32.169435 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 15:07:32.169448 21769 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0818 15:07:32.169456 21769 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0818 15:07:32.169463 21769 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0818 15:07:32.169530 21769 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 15:07:32.169711 21769 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0818 15:07:32.169725 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.169731 21769 net.cpp:165] Memory required for data: 1984513200\nI0818 15:07:32.169740 21769 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0818 15:07:32.169756 21769 net.cpp:100] Creating Layer L3_b1_pool\nI0818 15:07:32.169764 21769 net.cpp:434] L3_b1_pool <- L2_b18_sum_eltwise_top_L2_b18_relu_0_split_1\nI0818 15:07:32.169775 21769 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0818 15:07:32.169814 21769 net.cpp:150] Setting up L3_b1_pool\nI0818 15:07:32.169826 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.169831 21769 net.cpp:165] Memory required for data: 1985332400\nI0818 15:07:32.169836 21769 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0818 15:07:32.169849 21769 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0818 15:07:32.169855 21769 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0818 15:07:32.169862 21769 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0818 15:07:32.169870 21769 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0818 15:07:32.169905 21769 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0818 15:07:32.169915 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.169919 21769 net.cpp:165] Memory required for data: 1986151600\nI0818 15:07:32.169924 21769 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0818 15:07:32.169932 21769 net.cpp:100] Creating Layer L3_b1_relu\nI0818 15:07:32.169939 21769 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0818 15:07:32.169948 21769 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0818 15:07:32.169958 21769 net.cpp:150] Setting up L3_b1_relu\nI0818 15:07:32.169965 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.169970 21769 net.cpp:165] Memory required for data: 1986970800\nI0818 15:07:32.169975 21769 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0818 15:07:32.169983 21769 net.cpp:100] Creating Layer L3_b1_zeros\nI0818 15:07:32.169991 21769 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0818 15:07:32.171203 21769 net.cpp:150] Setting up L3_b1_zeros\nI0818 15:07:32.171221 21769 net.cpp:157] Top shape: 100 32 8 8 (204800)\nI0818 15:07:32.171227 21769 net.cpp:165] Memory required for data: 1987790000\nI0818 15:07:32.171233 21769 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0818 15:07:32.171245 21769 net.cpp:100] Creating Layer L3_b1_concat0\nI0818 15:07:32.171252 21769 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0818 15:07:32.171260 21769 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0818 15:07:32.171268 21769 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0818 15:07:32.171314 21769 net.cpp:150] Setting up L3_b1_concat0\nI0818 15:07:32.171326 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.171331 21769 net.cpp:165] Memory required for data: 1989428400\nI0818 15:07:32.171336 21769 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:32.171344 21769 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:32.171350 21769 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0818 15:07:32.171363 21769 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 15:07:32.171375 21769 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 15:07:32.171432 21769 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:32.171445 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.171452 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.171463 21769 net.cpp:165] Memory required for data: 1992705200\nI0818 15:07:32.171469 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0818 15:07:32.171481 21769 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0818 15:07:32.171489 21769 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 15:07:32.171500 21769 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0818 15:07:32.172600 21769 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0818 15:07:32.172621 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.172631 21769 net.cpp:165] Memory required for data: 1994343600\nI0818 15:07:32.172646 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0818 15:07:32.172660 21769 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0818 15:07:32.172672 21769 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0818 15:07:32.172698 21769 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0818 15:07:32.172989 21769 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0818 15:07:32.173003 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.173008 21769 net.cpp:165] Memory required for data: 1995982000\nI0818 15:07:32.173019 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 15:07:32.173032 21769 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0818 15:07:32.173039 21769 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0818 15:07:32.173048 21769 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0818 15:07:32.173110 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 15:07:32.173283 21769 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0818 15:07:32.173297 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.173302 21769 net.cpp:165] Memory required for data: 1997620400\nI0818 15:07:32.173311 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0818 15:07:32.173319 21769 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0818 15:07:32.173326 21769 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0818 15:07:32.173336 21769 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0818 15:07:32.173347 21769 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0818 15:07:32.173354 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.173358 21769 net.cpp:165] Memory required for data: 1999258800\nI0818 15:07:32.173363 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0818 15:07:32.173377 21769 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0818 15:07:32.173384 21769 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0818 15:07:32.173393 21769 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0818 15:07:32.175434 21769 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0818 15:07:32.175452 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.175458 21769 net.cpp:165] Memory required for data: 2000897200\nI0818 15:07:32.175467 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0818 15:07:32.175480 21769 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0818 15:07:32.175488 21769 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0818 15:07:32.175496 21769 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0818 15:07:32.175793 21769 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0818 15:07:32.175807 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.175813 21769 net.cpp:165] Memory required for data: 2002535600\nI0818 15:07:32.175823 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 15:07:32.175837 21769 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0818 15:07:32.175843 21769 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0818 15:07:32.175854 21769 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0818 15:07:32.175920 21769 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 15:07:32.176095 21769 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0818 15:07:32.176108 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.176113 21769 net.cpp:165] Memory required for data: 2004174000\nI0818 15:07:32.176122 21769 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0818 15:07:32.176132 21769 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0818 15:07:32.176146 21769 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0818 15:07:32.176154 21769 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 15:07:32.176165 21769 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0818 15:07:32.176203 21769 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0818 15:07:32.176213 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.176218 21769 net.cpp:165] Memory required for data: 2005812400\nI0818 15:07:32.176223 21769 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0818 15:07:32.176234 21769 net.cpp:100] Creating Layer L3_b2_relu\nI0818 15:07:32.176241 21769 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0818 15:07:32.176249 21769 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0818 15:07:32.176257 21769 net.cpp:150] Setting up L3_b2_relu\nI0818 15:07:32.176265 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.176270 21769 net.cpp:165] Memory required for data: 2007450800\nI0818 15:07:32.176275 21769 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:32.176281 21769 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:32.176286 21769 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0818 15:07:32.176293 21769 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 15:07:32.176303 21769 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 15:07:32.176355 21769 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:32.176367 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.176374 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.176379 21769 net.cpp:165] Memory required for data: 2010727600\nI0818 15:07:32.176384 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0818 15:07:32.176398 21769 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0818 15:07:32.176405 21769 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 15:07:32.176414 21769 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0818 15:07:32.177454 21769 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0818 15:07:32.177470 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.177475 21769 net.cpp:165] Memory required for data: 2012366000\nI0818 15:07:32.177484 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0818 15:07:32.177498 21769 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0818 15:07:32.177505 21769 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0818 15:07:32.177516 21769 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0818 15:07:32.177806 21769 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0818 15:07:32.177820 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.177825 21769 net.cpp:165] Memory required for data: 2014004400\nI0818 15:07:32.177835 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 15:07:32.177845 21769 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0818 15:07:32.177850 21769 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0818 15:07:32.177861 21769 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0818 15:07:32.177924 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 15:07:32.178093 21769 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0818 15:07:32.178107 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.178112 21769 net.cpp:165] Memory required for data: 2015642800\nI0818 15:07:32.178120 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0818 15:07:32.178128 21769 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0818 15:07:32.178134 21769 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0818 15:07:32.178145 21769 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0818 15:07:32.178155 21769 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0818 15:07:32.178162 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.178167 21769 net.cpp:165] Memory required for data: 2017281200\nI0818 15:07:32.178179 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0818 15:07:32.178194 21769 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0818 15:07:32.178200 21769 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0818 15:07:32.178215 21769 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0818 15:07:32.179257 21769 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0818 15:07:32.179272 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.179277 21769 net.cpp:165] Memory required for data: 2018919600\nI0818 15:07:32.179286 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0818 15:07:32.179296 21769 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0818 15:07:32.179302 21769 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0818 15:07:32.179314 21769 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0818 15:07:32.179605 21769 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0818 15:07:32.179620 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.179626 21769 net.cpp:165] Memory required for data: 2020558000\nI0818 15:07:32.179637 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 15:07:32.179647 21769 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0818 15:07:32.179653 21769 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0818 15:07:32.179661 21769 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0818 15:07:32.179734 21769 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 15:07:32.179905 21769 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0818 15:07:32.179919 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.179924 21769 net.cpp:165] Memory required for data: 2022196400\nI0818 15:07:32.179934 21769 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0818 15:07:32.179942 21769 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0818 15:07:32.179952 21769 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0818 15:07:32.179960 21769 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 15:07:32.179968 21769 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0818 15:07:32.180006 21769 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0818 15:07:32.180018 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.180023 21769 net.cpp:165] Memory required for data: 2023834800\nI0818 15:07:32.180029 21769 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0818 15:07:32.180037 21769 net.cpp:100] Creating Layer L3_b3_relu\nI0818 15:07:32.180043 21769 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0818 15:07:32.180050 21769 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0818 15:07:32.180059 21769 net.cpp:150] Setting up L3_b3_relu\nI0818 15:07:32.180066 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.180070 21769 net.cpp:165] Memory required for data: 2025473200\nI0818 15:07:32.180075 21769 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:32.180083 21769 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:32.180088 21769 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0818 15:07:32.180095 21769 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 15:07:32.180105 21769 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 15:07:32.180157 21769 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:32.180169 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.180176 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.180181 21769 net.cpp:165] Memory required for data: 2028750000\nI0818 15:07:32.180186 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0818 15:07:32.180200 21769 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0818 15:07:32.180207 21769 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 15:07:32.180217 21769 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0818 15:07:32.181272 21769 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0818 15:07:32.181288 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.181293 21769 net.cpp:165] Memory required for data: 2030388400\nI0818 15:07:32.181301 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0818 15:07:32.181311 21769 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0818 15:07:32.181318 21769 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0818 15:07:32.181329 21769 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0818 15:07:32.181619 21769 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0818 15:07:32.181633 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.181638 21769 net.cpp:165] Memory required for data: 2032026800\nI0818 15:07:32.181648 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 15:07:32.181658 21769 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0818 15:07:32.181663 21769 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0818 15:07:32.181674 21769 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0818 15:07:32.181744 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 15:07:32.181916 21769 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0818 15:07:32.181931 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.181936 21769 net.cpp:165] Memory required for data: 2033665200\nI0818 15:07:32.181944 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0818 15:07:32.181955 21769 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0818 15:07:32.181962 21769 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0818 15:07:32.181970 21769 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0818 15:07:32.181979 21769 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0818 15:07:32.181987 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.181991 21769 net.cpp:165] Memory required for data: 2035303600\nI0818 15:07:32.181996 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0818 15:07:32.182010 21769 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0818 15:07:32.182016 21769 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0818 15:07:32.182027 21769 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0818 15:07:32.183075 21769 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0818 15:07:32.183090 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.183095 21769 net.cpp:165] Memory required for data: 2036942000\nI0818 15:07:32.183104 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0818 15:07:32.183113 21769 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0818 15:07:32.183120 21769 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0818 15:07:32.183131 21769 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0818 15:07:32.183418 21769 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0818 15:07:32.183434 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.183440 21769 net.cpp:165] Memory required for data: 2038580400\nI0818 15:07:32.183450 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 15:07:32.183459 21769 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0818 15:07:32.183465 21769 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0818 15:07:32.183473 21769 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0818 15:07:32.183535 21769 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 15:07:32.183712 21769 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0818 15:07:32.183727 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.183732 21769 net.cpp:165] Memory required for data: 2040218800\nI0818 15:07:32.183742 21769 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0818 15:07:32.183753 21769 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0818 15:07:32.183760 21769 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0818 15:07:32.183768 21769 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 15:07:32.183775 21769 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0818 15:07:32.183815 21769 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0818 15:07:32.183831 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.183836 21769 net.cpp:165] Memory required for data: 2041857200\nI0818 15:07:32.183841 21769 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0818 15:07:32.183850 21769 net.cpp:100] Creating Layer L3_b4_relu\nI0818 15:07:32.183856 21769 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0818 15:07:32.183862 21769 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0818 15:07:32.183871 21769 net.cpp:150] Setting up L3_b4_relu\nI0818 15:07:32.183878 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.183882 21769 net.cpp:165] Memory required for data: 2043495600\nI0818 15:07:32.183887 21769 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:32.183894 21769 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:32.183900 21769 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0818 15:07:32.183912 21769 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 15:07:32.183923 21769 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 15:07:32.183971 21769 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:32.183984 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.183990 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.183995 21769 net.cpp:165] Memory required for data: 2046772400\nI0818 15:07:32.184000 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0818 15:07:32.184015 21769 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0818 15:07:32.184020 21769 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 15:07:32.184031 21769 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0818 15:07:32.185086 21769 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0818 15:07:32.185101 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.185106 21769 net.cpp:165] Memory required for data: 2048410800\nI0818 15:07:32.185114 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0818 15:07:32.185127 21769 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0818 15:07:32.185133 21769 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0818 15:07:32.185142 21769 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0818 15:07:32.185425 21769 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0818 15:07:32.185439 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.185444 21769 net.cpp:165] Memory required for data: 2050049200\nI0818 15:07:32.185454 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 15:07:32.185467 21769 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0818 15:07:32.185474 21769 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0818 15:07:32.185482 21769 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0818 15:07:32.185549 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 15:07:32.185725 21769 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0818 15:07:32.185739 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.185745 21769 net.cpp:165] Memory required for data: 2051687600\nI0818 15:07:32.185753 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0818 15:07:32.185763 21769 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0818 15:07:32.185770 21769 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0818 15:07:32.185778 21769 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0818 15:07:32.185788 21769 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0818 15:07:32.185796 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.185799 21769 net.cpp:165] Memory required for data: 2053326000\nI0818 15:07:32.185804 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0818 15:07:32.185818 21769 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0818 15:07:32.185824 21769 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0818 15:07:32.185839 21769 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0818 15:07:32.186885 21769 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0818 15:07:32.186900 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.186905 21769 net.cpp:165] Memory required for data: 2054964400\nI0818 15:07:32.186914 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0818 15:07:32.186924 21769 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0818 15:07:32.186930 21769 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0818 15:07:32.186944 21769 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0818 15:07:32.187227 21769 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0818 15:07:32.187243 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.187249 21769 net.cpp:165] Memory required for data: 2056602800\nI0818 15:07:32.187259 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 15:07:32.187268 21769 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0818 15:07:32.187274 21769 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0818 15:07:32.187283 21769 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0818 15:07:32.187345 21769 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 15:07:32.187515 21769 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0818 15:07:32.187528 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.187533 21769 net.cpp:165] Memory required for data: 2058241200\nI0818 15:07:32.187542 21769 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0818 15:07:32.187554 21769 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0818 15:07:32.187562 21769 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0818 15:07:32.187569 21769 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 15:07:32.187577 21769 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0818 15:07:32.187616 21769 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0818 15:07:32.187628 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.187633 21769 net.cpp:165] Memory required for data: 2059879600\nI0818 15:07:32.187638 21769 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0818 15:07:32.187646 21769 net.cpp:100] Creating Layer L3_b5_relu\nI0818 15:07:32.187652 21769 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0818 15:07:32.187659 21769 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0818 15:07:32.187669 21769 net.cpp:150] Setting up L3_b5_relu\nI0818 15:07:32.187675 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.187680 21769 net.cpp:165] Memory required for data: 2061518000\nI0818 15:07:32.187691 21769 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:32.187700 21769 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:32.187705 21769 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0818 15:07:32.187716 21769 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 15:07:32.187726 21769 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 15:07:32.187775 21769 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:32.187786 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.187793 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.187798 21769 net.cpp:165] Memory required for data: 2064794800\nI0818 15:07:32.187803 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0818 15:07:32.187819 21769 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0818 15:07:32.187826 21769 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 15:07:32.187835 21769 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0818 15:07:32.189880 21769 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0818 15:07:32.189898 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.189903 21769 net.cpp:165] Memory required for data: 2066433200\nI0818 15:07:32.189913 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0818 15:07:32.189926 21769 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0818 15:07:32.189941 21769 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0818 15:07:32.189951 21769 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0818 15:07:32.190237 21769 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0818 15:07:32.190250 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.190255 21769 net.cpp:165] Memory required for data: 2068071600\nI0818 15:07:32.190266 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 15:07:32.190275 21769 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0818 15:07:32.190281 21769 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0818 15:07:32.190289 21769 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0818 15:07:32.190356 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 15:07:32.190523 21769 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0818 15:07:32.190539 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.190544 21769 net.cpp:165] Memory required for data: 2069710000\nI0818 15:07:32.190554 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0818 15:07:32.190562 21769 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0818 15:07:32.190568 21769 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0818 15:07:32.190577 21769 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0818 15:07:32.190585 21769 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0818 15:07:32.190593 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.190598 21769 net.cpp:165] Memory required for data: 2071348400\nI0818 15:07:32.190603 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0818 15:07:32.190616 21769 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0818 15:07:32.190623 21769 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0818 15:07:32.190634 21769 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0818 15:07:32.191689 21769 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0818 15:07:32.191704 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.191709 21769 net.cpp:165] Memory required for data: 2072986800\nI0818 15:07:32.191720 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0818 15:07:32.191731 21769 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0818 15:07:32.191738 21769 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0818 15:07:32.191747 21769 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0818 15:07:32.192034 21769 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0818 15:07:32.192049 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.192054 21769 net.cpp:165] Memory required for data: 2074625200\nI0818 15:07:32.192064 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 15:07:32.192075 21769 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0818 15:07:32.192082 21769 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0818 15:07:32.192090 21769 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0818 15:07:32.192155 21769 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 15:07:32.192327 21769 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0818 15:07:32.192340 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.192345 21769 net.cpp:165] Memory required for data: 2076263600\nI0818 15:07:32.192354 21769 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0818 15:07:32.192366 21769 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0818 15:07:32.192373 21769 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0818 15:07:32.192380 21769 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 15:07:32.192389 21769 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0818 15:07:32.192427 21769 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0818 15:07:32.192438 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.192443 21769 net.cpp:165] Memory required for data: 2077902000\nI0818 15:07:32.192448 21769 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0818 15:07:32.192456 21769 net.cpp:100] Creating Layer L3_b6_relu\nI0818 15:07:32.192462 21769 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0818 15:07:32.192474 21769 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0818 15:07:32.192492 21769 net.cpp:150] Setting up L3_b6_relu\nI0818 15:07:32.192499 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.192503 21769 net.cpp:165] Memory required for data: 2079540400\nI0818 15:07:32.192508 21769 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:32.192515 21769 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:32.192522 21769 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0818 15:07:32.192528 21769 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 15:07:32.192538 21769 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 15:07:32.192592 21769 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:32.192605 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.192610 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.192615 21769 net.cpp:165] Memory required for data: 2082817200\nI0818 15:07:32.192620 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0818 15:07:32.192636 21769 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0818 15:07:32.192642 21769 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 15:07:32.192651 21769 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0818 15:07:32.193730 21769 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0818 15:07:32.193745 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.193750 21769 net.cpp:165] Memory required for data: 2084455600\nI0818 15:07:32.193760 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0818 15:07:32.193773 21769 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0818 15:07:32.193780 21769 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0818 15:07:32.193789 21769 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0818 15:07:32.194080 21769 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0818 15:07:32.194093 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.194098 21769 net.cpp:165] Memory required for data: 2086094000\nI0818 15:07:32.194109 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 15:07:32.194118 21769 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0818 15:07:32.194124 21769 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0818 15:07:32.194131 21769 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0818 15:07:32.194202 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 15:07:32.194375 21769 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0818 15:07:32.194389 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.194394 21769 net.cpp:165] Memory required for data: 2087732400\nI0818 15:07:32.194404 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0818 15:07:32.194412 21769 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0818 15:07:32.194419 21769 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0818 15:07:32.194427 21769 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0818 15:07:32.194439 21769 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0818 15:07:32.194447 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.194452 21769 net.cpp:165] Memory required for data: 2089370800\nI0818 15:07:32.194456 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0818 15:07:32.194468 21769 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0818 15:07:32.194476 21769 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0818 15:07:32.194485 21769 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0818 15:07:32.195533 21769 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0818 15:07:32.195547 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.195552 21769 net.cpp:165] Memory required for data: 2091009200\nI0818 15:07:32.195561 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0818 15:07:32.195574 21769 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0818 15:07:32.195581 21769 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0818 15:07:32.195596 21769 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0818 15:07:32.195894 21769 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0818 15:07:32.195909 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.195914 21769 net.cpp:165] Memory required for data: 2092647600\nI0818 15:07:32.195924 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 15:07:32.195937 21769 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0818 15:07:32.195945 21769 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0818 15:07:32.195952 21769 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0818 15:07:32.196017 21769 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 15:07:32.196188 21769 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0818 15:07:32.196202 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.196208 21769 net.cpp:165] Memory required for data: 2094286000\nI0818 15:07:32.196216 21769 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0818 15:07:32.196228 21769 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0818 15:07:32.196235 21769 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0818 15:07:32.196243 21769 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 15:07:32.196254 21769 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0818 15:07:32.196290 21769 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0818 15:07:32.196300 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.196305 21769 net.cpp:165] Memory required for data: 2095924400\nI0818 15:07:32.196311 21769 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0818 15:07:32.196326 21769 net.cpp:100] Creating Layer L3_b7_relu\nI0818 15:07:32.196332 21769 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0818 15:07:32.196339 21769 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0818 15:07:32.196348 21769 net.cpp:150] Setting up L3_b7_relu\nI0818 15:07:32.196355 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.196360 21769 net.cpp:165] Memory required for data: 2097562800\nI0818 15:07:32.196365 21769 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:32.196372 21769 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:32.196378 21769 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0818 15:07:32.196385 21769 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 15:07:32.196395 21769 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 15:07:32.196447 21769 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:32.196460 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.196466 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.196471 21769 net.cpp:165] Memory required for data: 2100839600\nI0818 15:07:32.196476 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0818 15:07:32.196491 21769 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0818 15:07:32.196498 21769 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 15:07:32.196507 21769 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0818 15:07:32.197551 21769 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0818 15:07:32.197566 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.197571 21769 net.cpp:165] Memory required for data: 2102478000\nI0818 15:07:32.197579 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0818 15:07:32.197593 21769 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0818 15:07:32.197600 21769 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0818 15:07:32.197613 21769 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0818 15:07:32.197906 21769 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0818 15:07:32.197919 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.197924 21769 net.cpp:165] Memory required for data: 2104116400\nI0818 15:07:32.197942 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 15:07:32.197952 21769 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0818 15:07:32.197958 21769 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0818 15:07:32.197966 21769 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0818 15:07:32.198034 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 15:07:32.198204 21769 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0818 15:07:32.198216 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.198221 21769 net.cpp:165] Memory required for data: 2105754800\nI0818 15:07:32.198230 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0818 15:07:32.198238 21769 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0818 15:07:32.198246 21769 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0818 15:07:32.198256 21769 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0818 15:07:32.198266 21769 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0818 15:07:32.198272 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.198277 21769 net.cpp:165] Memory required for data: 2107393200\nI0818 15:07:32.198282 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0818 15:07:32.198297 21769 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0818 15:07:32.198302 21769 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0818 15:07:32.198312 21769 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0818 15:07:32.199360 21769 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0818 15:07:32.199375 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.199381 21769 net.cpp:165] Memory required for data: 2109031600\nI0818 15:07:32.199390 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0818 15:07:32.199403 21769 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0818 15:07:32.199410 21769 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0818 15:07:32.199419 21769 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0818 15:07:32.199723 21769 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0818 15:07:32.199738 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.199743 21769 net.cpp:165] Memory required for data: 2110670000\nI0818 15:07:32.199753 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 15:07:32.199764 21769 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0818 15:07:32.199771 21769 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0818 15:07:32.199779 21769 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0818 15:07:32.199846 21769 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 15:07:32.200019 21769 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0818 15:07:32.200033 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.200038 21769 net.cpp:165] Memory required for data: 2112308400\nI0818 15:07:32.200047 21769 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0818 15:07:32.200059 21769 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0818 15:07:32.200067 21769 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0818 15:07:32.200073 21769 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 15:07:32.200084 21769 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0818 15:07:32.200121 21769 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0818 15:07:32.200132 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.200137 21769 net.cpp:165] Memory required for data: 2113946800\nI0818 15:07:32.200142 21769 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0818 15:07:32.200155 21769 net.cpp:100] Creating Layer L3_b8_relu\nI0818 15:07:32.200160 21769 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0818 15:07:32.200167 21769 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0818 15:07:32.200177 21769 net.cpp:150] Setting up L3_b8_relu\nI0818 15:07:32.200184 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.200188 21769 net.cpp:165] Memory required for data: 2115585200\nI0818 15:07:32.200193 21769 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:32.200208 21769 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:32.200214 21769 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0818 15:07:32.200222 21769 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 15:07:32.200232 21769 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 15:07:32.200285 21769 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:32.200297 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.200304 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.200309 21769 net.cpp:165] Memory required for data: 2118862000\nI0818 15:07:32.200314 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0818 15:07:32.200328 21769 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0818 15:07:32.200335 21769 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 15:07:32.200345 21769 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0818 15:07:32.201388 21769 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0818 15:07:32.201405 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.201409 21769 net.cpp:165] Memory required for data: 2120500400\nI0818 15:07:32.201417 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0818 15:07:32.201431 21769 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0818 15:07:32.201437 21769 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0818 15:07:32.201448 21769 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0818 15:07:32.201742 21769 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0818 15:07:32.201756 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.201761 21769 net.cpp:165] Memory required for data: 2122138800\nI0818 15:07:32.201772 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 15:07:32.201781 21769 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0818 15:07:32.201787 21769 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0818 15:07:32.201798 21769 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0818 15:07:32.201865 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 15:07:32.202036 21769 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0818 15:07:32.202050 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.202055 21769 net.cpp:165] Memory required for data: 2123777200\nI0818 15:07:32.202064 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0818 15:07:32.202072 21769 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0818 15:07:32.202078 21769 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0818 15:07:32.202090 21769 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0818 15:07:32.202100 21769 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0818 15:07:32.202106 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.202111 21769 net.cpp:165] Memory required for data: 2125415600\nI0818 15:07:32.202116 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0818 15:07:32.202131 21769 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0818 15:07:32.202137 21769 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0818 15:07:32.202145 21769 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0818 15:07:32.204193 21769 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0818 15:07:32.204211 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.204216 21769 net.cpp:165] Memory required for data: 2127054000\nI0818 15:07:32.204226 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0818 15:07:32.204239 21769 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0818 15:07:32.204246 21769 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0818 15:07:32.204258 21769 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0818 15:07:32.204546 21769 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0818 15:07:32.204560 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.204566 21769 net.cpp:165] Memory required for data: 2128692400\nI0818 15:07:32.204576 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 15:07:32.204593 21769 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0818 15:07:32.204601 21769 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0818 15:07:32.204608 21769 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0818 15:07:32.204679 21769 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 15:07:32.204859 21769 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0818 15:07:32.204872 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.204877 21769 net.cpp:165] Memory required for data: 2130330800\nI0818 15:07:32.204886 21769 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0818 15:07:32.204896 21769 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0818 15:07:32.204903 21769 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0818 15:07:32.204910 21769 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 15:07:32.204921 21769 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0818 15:07:32.204958 21769 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0818 15:07:32.204973 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.204978 21769 net.cpp:165] Memory required for data: 2131969200\nI0818 15:07:32.204984 21769 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0818 15:07:32.204993 21769 net.cpp:100] Creating Layer L3_b9_relu\nI0818 15:07:32.204998 21769 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0818 15:07:32.205005 21769 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0818 15:07:32.205014 21769 net.cpp:150] Setting up L3_b9_relu\nI0818 15:07:32.205021 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.205026 21769 net.cpp:165] Memory required for data: 2133607600\nI0818 15:07:32.205030 21769 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0818 15:07:32.205040 21769 net.cpp:100] Creating Layer L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0818 15:07:32.205046 21769 net.cpp:434] L3_b9_sum_eltwise_top_L3_b9_relu_0_split <- L3_b9_sum_eltwise_top\nI0818 15:07:32.205054 21769 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0818 15:07:32.205065 21769 net.cpp:408] L3_b9_sum_eltwise_top_L3_b9_relu_0_split -> L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0818 15:07:32.205116 21769 net.cpp:150] Setting up L3_b9_sum_eltwise_top_L3_b9_relu_0_split\nI0818 15:07:32.205128 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.205135 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.205139 21769 net.cpp:165] Memory required for data: 2136884400\nI0818 15:07:32.205144 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_conv\nI0818 15:07:32.205157 21769 net.cpp:100] Creating Layer L3_b10_cbr1_conv\nI0818 15:07:32.205163 21769 net.cpp:434] L3_b10_cbr1_conv <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_0\nI0818 15:07:32.205175 21769 net.cpp:408] L3_b10_cbr1_conv -> L3_b10_cbr1_conv_top\nI0818 15:07:32.206215 21769 net.cpp:150] Setting up L3_b10_cbr1_conv\nI0818 15:07:32.206231 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.206236 21769 net.cpp:165] Memory required for data: 2138522800\nI0818 15:07:32.206245 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_bn\nI0818 15:07:32.206255 21769 net.cpp:100] Creating Layer L3_b10_cbr1_bn\nI0818 15:07:32.206261 21769 net.cpp:434] L3_b10_cbr1_bn <- L3_b10_cbr1_conv_top\nI0818 15:07:32.206274 21769 net.cpp:408] L3_b10_cbr1_bn -> L3_b10_cbr1_bn_top\nI0818 15:07:32.206560 21769 net.cpp:150] Setting up L3_b10_cbr1_bn\nI0818 15:07:32.206576 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.206583 21769 net.cpp:165] Memory required for data: 2140161200\nI0818 15:07:32.206593 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0818 15:07:32.206603 21769 net.cpp:100] Creating Layer L3_b10_cbr1_scale\nI0818 15:07:32.206609 21769 net.cpp:434] L3_b10_cbr1_scale <- L3_b10_cbr1_bn_top\nI0818 15:07:32.206616 21769 net.cpp:395] L3_b10_cbr1_scale -> L3_b10_cbr1_bn_top (in-place)\nI0818 15:07:32.206681 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_scale\nI0818 15:07:32.206867 21769 net.cpp:150] Setting up L3_b10_cbr1_scale\nI0818 15:07:32.206881 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.206887 21769 net.cpp:165] Memory required for data: 2141799600\nI0818 15:07:32.206895 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr1_relu\nI0818 15:07:32.206909 21769 net.cpp:100] Creating Layer L3_b10_cbr1_relu\nI0818 15:07:32.206917 21769 net.cpp:434] L3_b10_cbr1_relu <- L3_b10_cbr1_bn_top\nI0818 15:07:32.206924 21769 net.cpp:395] L3_b10_cbr1_relu -> L3_b10_cbr1_bn_top (in-place)\nI0818 15:07:32.206934 21769 net.cpp:150] Setting up L3_b10_cbr1_relu\nI0818 15:07:32.206941 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.206946 21769 net.cpp:165] Memory required for data: 2143438000\nI0818 15:07:32.206951 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr2_conv\nI0818 15:07:32.206964 21769 net.cpp:100] Creating Layer L3_b10_cbr2_conv\nI0818 15:07:32.206970 21769 net.cpp:434] L3_b10_cbr2_conv <- L3_b10_cbr1_bn_top\nI0818 15:07:32.206979 21769 net.cpp:408] L3_b10_cbr2_conv -> L3_b10_cbr2_conv_top\nI0818 15:07:32.208025 21769 net.cpp:150] Setting up L3_b10_cbr2_conv\nI0818 15:07:32.208040 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.208046 21769 net.cpp:165] Memory required for data: 2145076400\nI0818 15:07:32.208055 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr2_bn\nI0818 15:07:32.208067 21769 net.cpp:100] Creating Layer L3_b10_cbr2_bn\nI0818 15:07:32.208075 21769 net.cpp:434] L3_b10_cbr2_bn <- L3_b10_cbr2_conv_top\nI0818 15:07:32.208086 21769 net.cpp:408] L3_b10_cbr2_bn -> L3_b10_cbr2_bn_top\nI0818 15:07:32.208370 21769 net.cpp:150] Setting up L3_b10_cbr2_bn\nI0818 15:07:32.208384 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.208389 21769 net.cpp:165] Memory required for data: 2146714800\nI0818 15:07:32.208400 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0818 15:07:32.208408 21769 net.cpp:100] Creating Layer L3_b10_cbr2_scale\nI0818 15:07:32.208415 21769 net.cpp:434] L3_b10_cbr2_scale <- L3_b10_cbr2_bn_top\nI0818 15:07:32.208425 21769 net.cpp:395] L3_b10_cbr2_scale -> L3_b10_cbr2_bn_top (in-place)\nI0818 15:07:32.208489 21769 layer_factory.hpp:77] Creating layer L3_b10_cbr2_scale\nI0818 15:07:32.208655 21769 net.cpp:150] Setting up L3_b10_cbr2_scale\nI0818 15:07:32.208669 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.208673 21769 net.cpp:165] Memory required for data: 2148353200\nI0818 15:07:32.208688 21769 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise\nI0818 15:07:32.208698 21769 net.cpp:100] Creating Layer L3_b10_sum_eltwise\nI0818 15:07:32.208705 21769 net.cpp:434] L3_b10_sum_eltwise <- L3_b10_cbr2_bn_top\nI0818 15:07:32.208712 21769 net.cpp:434] L3_b10_sum_eltwise <- L3_b9_sum_eltwise_top_L3_b9_relu_0_split_1\nI0818 15:07:32.208724 21769 net.cpp:408] L3_b10_sum_eltwise -> L3_b10_sum_eltwise_top\nI0818 15:07:32.208765 21769 net.cpp:150] Setting up L3_b10_sum_eltwise\nI0818 15:07:32.208776 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.208781 21769 net.cpp:165] Memory required for data: 2149991600\nI0818 15:07:32.208786 21769 layer_factory.hpp:77] Creating layer L3_b10_relu\nI0818 15:07:32.208794 21769 net.cpp:100] Creating Layer L3_b10_relu\nI0818 15:07:32.208799 21769 net.cpp:434] L3_b10_relu <- L3_b10_sum_eltwise_top\nI0818 15:07:32.208811 21769 net.cpp:395] L3_b10_relu -> L3_b10_sum_eltwise_top (in-place)\nI0818 15:07:32.208819 21769 net.cpp:150] Setting up L3_b10_relu\nI0818 15:07:32.208827 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.208832 21769 net.cpp:165] Memory required for data: 2151630000\nI0818 15:07:32.208837 21769 layer_factory.hpp:77] Creating layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0818 15:07:32.208843 21769 net.cpp:100] Creating Layer L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0818 15:07:32.208848 21769 net.cpp:434] L3_b10_sum_eltwise_top_L3_b10_relu_0_split <- L3_b10_sum_eltwise_top\nI0818 15:07:32.208855 21769 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0818 15:07:32.208871 21769 net.cpp:408] L3_b10_sum_eltwise_top_L3_b10_relu_0_split -> L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0818 15:07:32.208925 21769 net.cpp:150] Setting up L3_b10_sum_eltwise_top_L3_b10_relu_0_split\nI0818 15:07:32.208936 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.208943 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.208948 21769 net.cpp:165] Memory required for data: 2154906800\nI0818 15:07:32.208953 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_conv\nI0818 15:07:32.208964 21769 net.cpp:100] Creating Layer L3_b11_cbr1_conv\nI0818 15:07:32.208971 21769 net.cpp:434] L3_b11_cbr1_conv <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_0\nI0818 15:07:32.208982 21769 net.cpp:408] L3_b11_cbr1_conv -> L3_b11_cbr1_conv_top\nI0818 15:07:32.210034 21769 net.cpp:150] Setting up L3_b11_cbr1_conv\nI0818 15:07:32.210049 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.210054 21769 net.cpp:165] Memory required for data: 2156545200\nI0818 15:07:32.210063 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_bn\nI0818 15:07:32.210073 21769 net.cpp:100] Creating Layer L3_b11_cbr1_bn\nI0818 15:07:32.210079 21769 net.cpp:434] L3_b11_cbr1_bn <- L3_b11_cbr1_conv_top\nI0818 15:07:32.210094 21769 net.cpp:408] L3_b11_cbr1_bn -> L3_b11_cbr1_bn_top\nI0818 15:07:32.210384 21769 net.cpp:150] Setting up L3_b11_cbr1_bn\nI0818 15:07:32.210400 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.210407 21769 net.cpp:165] Memory required for data: 2158183600\nI0818 15:07:32.210417 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0818 15:07:32.210425 21769 net.cpp:100] Creating Layer L3_b11_cbr1_scale\nI0818 15:07:32.210431 21769 net.cpp:434] L3_b11_cbr1_scale <- L3_b11_cbr1_bn_top\nI0818 15:07:32.210439 21769 net.cpp:395] L3_b11_cbr1_scale -> L3_b11_cbr1_bn_top (in-place)\nI0818 15:07:32.210501 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_scale\nI0818 15:07:32.210670 21769 net.cpp:150] Setting up L3_b11_cbr1_scale\nI0818 15:07:32.210690 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.210695 21769 net.cpp:165] Memory required for data: 2159822000\nI0818 15:07:32.210705 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr1_relu\nI0818 15:07:32.210716 21769 net.cpp:100] Creating Layer L3_b11_cbr1_relu\nI0818 15:07:32.210722 21769 net.cpp:434] L3_b11_cbr1_relu <- L3_b11_cbr1_bn_top\nI0818 15:07:32.210729 21769 net.cpp:395] L3_b11_cbr1_relu -> L3_b11_cbr1_bn_top (in-place)\nI0818 15:07:32.210739 21769 net.cpp:150] Setting up L3_b11_cbr1_relu\nI0818 15:07:32.210747 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.210752 21769 net.cpp:165] Memory required for data: 2161460400\nI0818 15:07:32.210755 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr2_conv\nI0818 15:07:32.210769 21769 net.cpp:100] Creating Layer L3_b11_cbr2_conv\nI0818 15:07:32.210777 21769 net.cpp:434] L3_b11_cbr2_conv <- L3_b11_cbr1_bn_top\nI0818 15:07:32.210784 21769 net.cpp:408] L3_b11_cbr2_conv -> L3_b11_cbr2_conv_top\nI0818 15:07:32.211861 21769 net.cpp:150] Setting up L3_b11_cbr2_conv\nI0818 15:07:32.211877 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.211882 21769 net.cpp:165] Memory required for data: 2163098800\nI0818 15:07:32.211891 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr2_bn\nI0818 15:07:32.211905 21769 net.cpp:100] Creating Layer L3_b11_cbr2_bn\nI0818 15:07:32.211911 21769 net.cpp:434] L3_b11_cbr2_bn <- L3_b11_cbr2_conv_top\nI0818 15:07:32.211922 21769 net.cpp:408] L3_b11_cbr2_bn -> L3_b11_cbr2_bn_top\nI0818 15:07:32.212210 21769 net.cpp:150] Setting up L3_b11_cbr2_bn\nI0818 15:07:32.212224 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.212229 21769 net.cpp:165] Memory required for data: 2164737200\nI0818 15:07:32.212239 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0818 15:07:32.212249 21769 net.cpp:100] Creating Layer L3_b11_cbr2_scale\nI0818 15:07:32.212255 21769 net.cpp:434] L3_b11_cbr2_scale <- L3_b11_cbr2_bn_top\nI0818 15:07:32.212265 21769 net.cpp:395] L3_b11_cbr2_scale -> L3_b11_cbr2_bn_top (in-place)\nI0818 15:07:32.212342 21769 layer_factory.hpp:77] Creating layer L3_b11_cbr2_scale\nI0818 15:07:32.212518 21769 net.cpp:150] Setting up L3_b11_cbr2_scale\nI0818 15:07:32.212532 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.212538 21769 net.cpp:165] Memory required for data: 2166375600\nI0818 15:07:32.212546 21769 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise\nI0818 15:07:32.212555 21769 net.cpp:100] Creating Layer L3_b11_sum_eltwise\nI0818 15:07:32.212563 21769 net.cpp:434] L3_b11_sum_eltwise <- L3_b11_cbr2_bn_top\nI0818 15:07:32.212569 21769 net.cpp:434] L3_b11_sum_eltwise <- L3_b10_sum_eltwise_top_L3_b10_relu_0_split_1\nI0818 15:07:32.212580 21769 net.cpp:408] L3_b11_sum_eltwise -> L3_b11_sum_eltwise_top\nI0818 15:07:32.212620 21769 net.cpp:150] Setting up L3_b11_sum_eltwise\nI0818 15:07:32.212633 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.212638 21769 net.cpp:165] Memory required for data: 2168014000\nI0818 15:07:32.212643 21769 layer_factory.hpp:77] Creating layer L3_b11_relu\nI0818 15:07:32.212651 21769 net.cpp:100] Creating Layer L3_b11_relu\nI0818 15:07:32.212657 21769 net.cpp:434] L3_b11_relu <- L3_b11_sum_eltwise_top\nI0818 15:07:32.212667 21769 net.cpp:395] L3_b11_relu -> L3_b11_sum_eltwise_top (in-place)\nI0818 15:07:32.212677 21769 net.cpp:150] Setting up L3_b11_relu\nI0818 15:07:32.212690 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.212695 21769 net.cpp:165] Memory required for data: 2169652400\nI0818 15:07:32.212700 21769 layer_factory.hpp:77] Creating layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0818 15:07:32.212708 21769 net.cpp:100] Creating Layer L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0818 15:07:32.212713 21769 net.cpp:434] L3_b11_sum_eltwise_top_L3_b11_relu_0_split <- L3_b11_sum_eltwise_top\nI0818 15:07:32.212721 21769 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0818 15:07:32.212731 21769 net.cpp:408] L3_b11_sum_eltwise_top_L3_b11_relu_0_split -> L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0818 15:07:32.212786 21769 net.cpp:150] Setting up L3_b11_sum_eltwise_top_L3_b11_relu_0_split\nI0818 15:07:32.212798 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.212805 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.212810 21769 net.cpp:165] Memory required for data: 2172929200\nI0818 15:07:32.212815 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_conv\nI0818 15:07:32.212826 21769 net.cpp:100] Creating Layer L3_b12_cbr1_conv\nI0818 15:07:32.212832 21769 net.cpp:434] L3_b12_cbr1_conv <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_0\nI0818 15:07:32.212844 21769 net.cpp:408] L3_b12_cbr1_conv -> L3_b12_cbr1_conv_top\nI0818 15:07:32.213914 21769 net.cpp:150] Setting up L3_b12_cbr1_conv\nI0818 15:07:32.213930 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.213935 21769 net.cpp:165] Memory required for data: 2174567600\nI0818 15:07:32.213944 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_bn\nI0818 15:07:32.213954 21769 net.cpp:100] Creating Layer L3_b12_cbr1_bn\nI0818 15:07:32.213960 21769 net.cpp:434] L3_b12_cbr1_bn <- L3_b12_cbr1_conv_top\nI0818 15:07:32.213973 21769 net.cpp:408] L3_b12_cbr1_bn -> L3_b12_cbr1_bn_top\nI0818 15:07:32.214263 21769 net.cpp:150] Setting up L3_b12_cbr1_bn\nI0818 15:07:32.214277 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.214282 21769 net.cpp:165] Memory required for data: 2176206000\nI0818 15:07:32.214293 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0818 15:07:32.214301 21769 net.cpp:100] Creating Layer L3_b12_cbr1_scale\nI0818 15:07:32.214308 21769 net.cpp:434] L3_b12_cbr1_scale <- L3_b12_cbr1_bn_top\nI0818 15:07:32.214315 21769 net.cpp:395] L3_b12_cbr1_scale -> L3_b12_cbr1_bn_top (in-place)\nI0818 15:07:32.214381 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_scale\nI0818 15:07:32.214548 21769 net.cpp:150] Setting up L3_b12_cbr1_scale\nI0818 15:07:32.214565 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.214581 21769 net.cpp:165] Memory required for data: 2177844400\nI0818 15:07:32.214591 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr1_relu\nI0818 15:07:32.214598 21769 net.cpp:100] Creating Layer L3_b12_cbr1_relu\nI0818 15:07:32.214606 21769 net.cpp:434] L3_b12_cbr1_relu <- L3_b12_cbr1_bn_top\nI0818 15:07:32.214612 21769 net.cpp:395] L3_b12_cbr1_relu -> L3_b12_cbr1_bn_top (in-place)\nI0818 15:07:32.214622 21769 net.cpp:150] Setting up L3_b12_cbr1_relu\nI0818 15:07:32.214629 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.214633 21769 net.cpp:165] Memory required for data: 2179482800\nI0818 15:07:32.214638 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr2_conv\nI0818 15:07:32.214653 21769 net.cpp:100] Creating Layer L3_b12_cbr2_conv\nI0818 15:07:32.214658 21769 net.cpp:434] L3_b12_cbr2_conv <- L3_b12_cbr1_bn_top\nI0818 15:07:32.214668 21769 net.cpp:408] L3_b12_cbr2_conv -> L3_b12_cbr2_conv_top\nI0818 15:07:32.215720 21769 net.cpp:150] Setting up L3_b12_cbr2_conv\nI0818 15:07:32.215735 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.215740 21769 net.cpp:165] Memory required for data: 2181121200\nI0818 15:07:32.215749 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr2_bn\nI0818 15:07:32.215764 21769 net.cpp:100] Creating Layer L3_b12_cbr2_bn\nI0818 15:07:32.215771 21769 net.cpp:434] L3_b12_cbr2_bn <- L3_b12_cbr2_conv_top\nI0818 15:07:32.215785 21769 net.cpp:408] L3_b12_cbr2_bn -> L3_b12_cbr2_bn_top\nI0818 15:07:32.216069 21769 net.cpp:150] Setting up L3_b12_cbr2_bn\nI0818 15:07:32.216083 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.216087 21769 net.cpp:165] Memory required for data: 2182759600\nI0818 15:07:32.216099 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0818 15:07:32.216107 21769 net.cpp:100] Creating Layer L3_b12_cbr2_scale\nI0818 15:07:32.216114 21769 net.cpp:434] L3_b12_cbr2_scale <- L3_b12_cbr2_bn_top\nI0818 15:07:32.216125 21769 net.cpp:395] L3_b12_cbr2_scale -> L3_b12_cbr2_bn_top (in-place)\nI0818 15:07:32.216188 21769 layer_factory.hpp:77] Creating layer L3_b12_cbr2_scale\nI0818 15:07:32.216358 21769 net.cpp:150] Setting up L3_b12_cbr2_scale\nI0818 15:07:32.216372 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.216377 21769 net.cpp:165] Memory required for data: 2184398000\nI0818 15:07:32.216387 21769 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise\nI0818 15:07:32.216399 21769 net.cpp:100] Creating Layer L3_b12_sum_eltwise\nI0818 15:07:32.216406 21769 net.cpp:434] L3_b12_sum_eltwise <- L3_b12_cbr2_bn_top\nI0818 15:07:32.216413 21769 net.cpp:434] L3_b12_sum_eltwise <- L3_b11_sum_eltwise_top_L3_b11_relu_0_split_1\nI0818 15:07:32.216421 21769 net.cpp:408] L3_b12_sum_eltwise -> L3_b12_sum_eltwise_top\nI0818 15:07:32.216462 21769 net.cpp:150] Setting up L3_b12_sum_eltwise\nI0818 15:07:32.216475 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.216480 21769 net.cpp:165] Memory required for data: 2186036400\nI0818 15:07:32.216485 21769 layer_factory.hpp:77] Creating layer L3_b12_relu\nI0818 15:07:32.216491 21769 net.cpp:100] Creating Layer L3_b12_relu\nI0818 15:07:32.216497 21769 net.cpp:434] L3_b12_relu <- L3_b12_sum_eltwise_top\nI0818 15:07:32.216508 21769 net.cpp:395] L3_b12_relu -> L3_b12_sum_eltwise_top (in-place)\nI0818 15:07:32.216518 21769 net.cpp:150] Setting up L3_b12_relu\nI0818 15:07:32.216526 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.216529 21769 net.cpp:165] Memory required for data: 2187674800\nI0818 15:07:32.216534 21769 layer_factory.hpp:77] Creating layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0818 15:07:32.216542 21769 net.cpp:100] Creating Layer L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0818 15:07:32.216547 21769 net.cpp:434] L3_b12_sum_eltwise_top_L3_b12_relu_0_split <- L3_b12_sum_eltwise_top\nI0818 15:07:32.216554 21769 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0818 15:07:32.216563 21769 net.cpp:408] L3_b12_sum_eltwise_top_L3_b12_relu_0_split -> L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0818 15:07:32.216617 21769 net.cpp:150] Setting up L3_b12_sum_eltwise_top_L3_b12_relu_0_split\nI0818 15:07:32.216635 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.216642 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.216647 21769 net.cpp:165] Memory required for data: 2190951600\nI0818 15:07:32.216652 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_conv\nI0818 15:07:32.216663 21769 net.cpp:100] Creating Layer L3_b13_cbr1_conv\nI0818 15:07:32.216670 21769 net.cpp:434] L3_b13_cbr1_conv <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_0\nI0818 15:07:32.216687 21769 net.cpp:408] L3_b13_cbr1_conv -> L3_b13_cbr1_conv_top\nI0818 15:07:32.218746 21769 net.cpp:150] Setting up L3_b13_cbr1_conv\nI0818 15:07:32.218765 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.218770 21769 net.cpp:165] Memory required for data: 2192590000\nI0818 15:07:32.218780 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_bn\nI0818 15:07:32.218792 21769 net.cpp:100] Creating Layer L3_b13_cbr1_bn\nI0818 15:07:32.218799 21769 net.cpp:434] L3_b13_cbr1_bn <- L3_b13_cbr1_conv_top\nI0818 15:07:32.218808 21769 net.cpp:408] L3_b13_cbr1_bn -> L3_b13_cbr1_bn_top\nI0818 15:07:32.219103 21769 net.cpp:150] Setting up L3_b13_cbr1_bn\nI0818 15:07:32.219116 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.219121 21769 net.cpp:165] Memory required for data: 2194228400\nI0818 15:07:32.219132 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0818 15:07:32.219144 21769 net.cpp:100] Creating Layer L3_b13_cbr1_scale\nI0818 15:07:32.219151 21769 net.cpp:434] L3_b13_cbr1_scale <- L3_b13_cbr1_bn_top\nI0818 15:07:32.219159 21769 net.cpp:395] L3_b13_cbr1_scale -> L3_b13_cbr1_bn_top (in-place)\nI0818 15:07:32.219229 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_scale\nI0818 15:07:32.219403 21769 net.cpp:150] Setting up L3_b13_cbr1_scale\nI0818 15:07:32.219418 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.219422 21769 net.cpp:165] Memory required for data: 2195866800\nI0818 15:07:32.219431 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr1_relu\nI0818 15:07:32.219442 21769 net.cpp:100] Creating Layer L3_b13_cbr1_relu\nI0818 15:07:32.219450 21769 net.cpp:434] L3_b13_cbr1_relu <- L3_b13_cbr1_bn_top\nI0818 15:07:32.219457 21769 net.cpp:395] L3_b13_cbr1_relu -> L3_b13_cbr1_bn_top (in-place)\nI0818 15:07:32.219467 21769 net.cpp:150] Setting up L3_b13_cbr1_relu\nI0818 15:07:32.219477 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.219482 21769 net.cpp:165] Memory required for data: 2197505200\nI0818 15:07:32.219487 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr2_conv\nI0818 15:07:32.219498 21769 net.cpp:100] Creating Layer L3_b13_cbr2_conv\nI0818 15:07:32.219504 21769 net.cpp:434] L3_b13_cbr2_conv <- L3_b13_cbr1_bn_top\nI0818 15:07:32.219516 21769 net.cpp:408] L3_b13_cbr2_conv -> L3_b13_cbr2_conv_top\nI0818 15:07:32.220562 21769 net.cpp:150] Setting up L3_b13_cbr2_conv\nI0818 15:07:32.220577 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.220582 21769 net.cpp:165] Memory required for data: 2199143600\nI0818 15:07:32.220592 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr2_bn\nI0818 15:07:32.220602 21769 net.cpp:100] Creating Layer L3_b13_cbr2_bn\nI0818 15:07:32.220608 21769 net.cpp:434] L3_b13_cbr2_bn <- L3_b13_cbr2_conv_top\nI0818 15:07:32.220619 21769 net.cpp:408] L3_b13_cbr2_bn -> L3_b13_cbr2_bn_top\nI0818 15:07:32.220926 21769 net.cpp:150] Setting up L3_b13_cbr2_bn\nI0818 15:07:32.220939 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.220944 21769 net.cpp:165] Memory required for data: 2200782000\nI0818 15:07:32.220954 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0818 15:07:32.220963 21769 net.cpp:100] Creating Layer L3_b13_cbr2_scale\nI0818 15:07:32.220970 21769 net.cpp:434] L3_b13_cbr2_scale <- L3_b13_cbr2_bn_top\nI0818 15:07:32.220978 21769 net.cpp:395] L3_b13_cbr2_scale -> L3_b13_cbr2_bn_top (in-place)\nI0818 15:07:32.221045 21769 layer_factory.hpp:77] Creating layer L3_b13_cbr2_scale\nI0818 15:07:32.221210 21769 net.cpp:150] Setting up L3_b13_cbr2_scale\nI0818 15:07:32.221235 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.221240 21769 net.cpp:165] Memory required for data: 2202420400\nI0818 15:07:32.221249 21769 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise\nI0818 15:07:32.221259 21769 net.cpp:100] Creating Layer L3_b13_sum_eltwise\nI0818 15:07:32.221266 21769 net.cpp:434] L3_b13_sum_eltwise <- L3_b13_cbr2_bn_top\nI0818 15:07:32.221272 21769 net.cpp:434] L3_b13_sum_eltwise <- L3_b12_sum_eltwise_top_L3_b12_relu_0_split_1\nI0818 15:07:32.221280 21769 net.cpp:408] L3_b13_sum_eltwise -> L3_b13_sum_eltwise_top\nI0818 15:07:32.221320 21769 net.cpp:150] Setting up L3_b13_sum_eltwise\nI0818 15:07:32.221333 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.221338 21769 net.cpp:165] Memory required for data: 2204058800\nI0818 15:07:32.221343 21769 layer_factory.hpp:77] Creating layer L3_b13_relu\nI0818 15:07:32.221350 21769 net.cpp:100] Creating Layer L3_b13_relu\nI0818 15:07:32.221357 21769 net.cpp:434] L3_b13_relu <- L3_b13_sum_eltwise_top\nI0818 15:07:32.221364 21769 net.cpp:395] L3_b13_relu -> L3_b13_sum_eltwise_top (in-place)\nI0818 15:07:32.221374 21769 net.cpp:150] Setting up L3_b13_relu\nI0818 15:07:32.221380 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.221385 21769 net.cpp:165] Memory required for data: 2205697200\nI0818 15:07:32.221390 21769 layer_factory.hpp:77] Creating layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0818 15:07:32.221396 21769 net.cpp:100] Creating Layer L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0818 15:07:32.221402 21769 net.cpp:434] L3_b13_sum_eltwise_top_L3_b13_relu_0_split <- L3_b13_sum_eltwise_top\nI0818 15:07:32.221412 21769 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0818 15:07:32.221422 21769 net.cpp:408] L3_b13_sum_eltwise_top_L3_b13_relu_0_split -> L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0818 15:07:32.221473 21769 net.cpp:150] Setting up L3_b13_sum_eltwise_top_L3_b13_relu_0_split\nI0818 15:07:32.221489 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.221496 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.221500 21769 net.cpp:165] Memory required for data: 2208974000\nI0818 15:07:32.221505 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_conv\nI0818 15:07:32.221518 21769 net.cpp:100] Creating Layer L3_b14_cbr1_conv\nI0818 15:07:32.221524 21769 net.cpp:434] L3_b14_cbr1_conv <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_0\nI0818 15:07:32.221534 21769 net.cpp:408] L3_b14_cbr1_conv -> L3_b14_cbr1_conv_top\nI0818 15:07:32.222579 21769 net.cpp:150] Setting up L3_b14_cbr1_conv\nI0818 15:07:32.222595 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.222600 21769 net.cpp:165] Memory required for data: 2210612400\nI0818 15:07:32.222609 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_bn\nI0818 15:07:32.222622 21769 net.cpp:100] Creating Layer L3_b14_cbr1_bn\nI0818 15:07:32.222630 21769 net.cpp:434] L3_b14_cbr1_bn <- L3_b14_cbr1_conv_top\nI0818 15:07:32.222637 21769 net.cpp:408] L3_b14_cbr1_bn -> L3_b14_cbr1_bn_top\nI0818 15:07:32.222929 21769 net.cpp:150] Setting up L3_b14_cbr1_bn\nI0818 15:07:32.222944 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.222949 21769 net.cpp:165] Memory required for data: 2212250800\nI0818 15:07:32.222959 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0818 15:07:32.222975 21769 net.cpp:100] Creating Layer L3_b14_cbr1_scale\nI0818 15:07:32.222981 21769 net.cpp:434] L3_b14_cbr1_scale <- L3_b14_cbr1_bn_top\nI0818 15:07:32.222990 21769 net.cpp:395] L3_b14_cbr1_scale -> L3_b14_cbr1_bn_top (in-place)\nI0818 15:07:32.223059 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_scale\nI0818 15:07:32.223230 21769 net.cpp:150] Setting up L3_b14_cbr1_scale\nI0818 15:07:32.223244 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.223249 21769 net.cpp:165] Memory required for data: 2213889200\nI0818 15:07:32.223258 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr1_relu\nI0818 15:07:32.223269 21769 net.cpp:100] Creating Layer L3_b14_cbr1_relu\nI0818 15:07:32.223284 21769 net.cpp:434] L3_b14_cbr1_relu <- L3_b14_cbr1_bn_top\nI0818 15:07:32.223294 21769 net.cpp:395] L3_b14_cbr1_relu -> L3_b14_cbr1_bn_top (in-place)\nI0818 15:07:32.223304 21769 net.cpp:150] Setting up L3_b14_cbr1_relu\nI0818 15:07:32.223311 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.223316 21769 net.cpp:165] Memory required for data: 2215527600\nI0818 15:07:32.223321 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr2_conv\nI0818 15:07:32.223332 21769 net.cpp:100] Creating Layer L3_b14_cbr2_conv\nI0818 15:07:32.223338 21769 net.cpp:434] L3_b14_cbr2_conv <- L3_b14_cbr1_bn_top\nI0818 15:07:32.223350 21769 net.cpp:408] L3_b14_cbr2_conv -> L3_b14_cbr2_conv_top\nI0818 15:07:32.224400 21769 net.cpp:150] Setting up L3_b14_cbr2_conv\nI0818 15:07:32.224416 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.224421 21769 net.cpp:165] Memory required for data: 2217166000\nI0818 15:07:32.224429 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr2_bn\nI0818 15:07:32.224442 21769 net.cpp:100] Creating Layer L3_b14_cbr2_bn\nI0818 15:07:32.224449 21769 net.cpp:434] L3_b14_cbr2_bn <- L3_b14_cbr2_conv_top\nI0818 15:07:32.224457 21769 net.cpp:408] L3_b14_cbr2_bn -> L3_b14_cbr2_bn_top\nI0818 15:07:32.224758 21769 net.cpp:150] Setting up L3_b14_cbr2_bn\nI0818 15:07:32.224772 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.224778 21769 net.cpp:165] Memory required for data: 2218804400\nI0818 15:07:32.224788 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0818 15:07:32.224797 21769 net.cpp:100] Creating Layer L3_b14_cbr2_scale\nI0818 15:07:32.224803 21769 net.cpp:434] L3_b14_cbr2_scale <- L3_b14_cbr2_bn_top\nI0818 15:07:32.224812 21769 net.cpp:395] L3_b14_cbr2_scale -> L3_b14_cbr2_bn_top (in-place)\nI0818 15:07:32.224877 21769 layer_factory.hpp:77] Creating layer L3_b14_cbr2_scale\nI0818 15:07:32.225047 21769 net.cpp:150] Setting up L3_b14_cbr2_scale\nI0818 15:07:32.225065 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.225071 21769 net.cpp:165] Memory required for data: 2220442800\nI0818 15:07:32.225080 21769 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise\nI0818 15:07:32.225090 21769 net.cpp:100] Creating Layer L3_b14_sum_eltwise\nI0818 15:07:32.225095 21769 net.cpp:434] L3_b14_sum_eltwise <- L3_b14_cbr2_bn_top\nI0818 15:07:32.225102 21769 net.cpp:434] L3_b14_sum_eltwise <- L3_b13_sum_eltwise_top_L3_b13_relu_0_split_1\nI0818 15:07:32.225111 21769 net.cpp:408] L3_b14_sum_eltwise -> L3_b14_sum_eltwise_top\nI0818 15:07:32.225153 21769 net.cpp:150] Setting up L3_b14_sum_eltwise\nI0818 15:07:32.225167 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.225172 21769 net.cpp:165] Memory required for data: 2222081200\nI0818 15:07:32.225177 21769 layer_factory.hpp:77] Creating layer L3_b14_relu\nI0818 15:07:32.225184 21769 net.cpp:100] Creating Layer L3_b14_relu\nI0818 15:07:32.225190 21769 net.cpp:434] L3_b14_relu <- L3_b14_sum_eltwise_top\nI0818 15:07:32.225198 21769 net.cpp:395] L3_b14_relu -> L3_b14_sum_eltwise_top (in-place)\nI0818 15:07:32.225208 21769 net.cpp:150] Setting up L3_b14_relu\nI0818 15:07:32.225214 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.225219 21769 net.cpp:165] Memory required for data: 2223719600\nI0818 15:07:32.225224 21769 layer_factory.hpp:77] Creating layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0818 15:07:32.225299 21769 net.cpp:100] Creating Layer L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0818 15:07:32.225309 21769 net.cpp:434] L3_b14_sum_eltwise_top_L3_b14_relu_0_split <- L3_b14_sum_eltwise_top\nI0818 15:07:32.225318 21769 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0818 15:07:32.225328 21769 net.cpp:408] L3_b14_sum_eltwise_top_L3_b14_relu_0_split -> L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0818 15:07:32.225383 21769 net.cpp:150] Setting up L3_b14_sum_eltwise_top_L3_b14_relu_0_split\nI0818 15:07:32.225394 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.225400 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.225412 21769 net.cpp:165] Memory required for data: 2226996400\nI0818 15:07:32.225419 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_conv\nI0818 15:07:32.225435 21769 net.cpp:100] Creating Layer L3_b15_cbr1_conv\nI0818 15:07:32.225442 21769 net.cpp:434] L3_b15_cbr1_conv <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_0\nI0818 15:07:32.225451 21769 net.cpp:408] L3_b15_cbr1_conv -> L3_b15_cbr1_conv_top\nI0818 15:07:32.226505 21769 net.cpp:150] Setting up L3_b15_cbr1_conv\nI0818 15:07:32.226521 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.226526 21769 net.cpp:165] Memory required for data: 2228634800\nI0818 15:07:32.226534 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_bn\nI0818 15:07:32.226547 21769 net.cpp:100] Creating Layer L3_b15_cbr1_bn\nI0818 15:07:32.226553 21769 net.cpp:434] L3_b15_cbr1_bn <- L3_b15_cbr1_conv_top\nI0818 15:07:32.226562 21769 net.cpp:408] L3_b15_cbr1_bn -> L3_b15_cbr1_bn_top\nI0818 15:07:32.226863 21769 net.cpp:150] Setting up L3_b15_cbr1_bn\nI0818 15:07:32.226877 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.226882 21769 net.cpp:165] Memory required for data: 2230273200\nI0818 15:07:32.226894 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0818 15:07:32.226907 21769 net.cpp:100] Creating Layer L3_b15_cbr1_scale\nI0818 15:07:32.226913 21769 net.cpp:434] L3_b15_cbr1_scale <- L3_b15_cbr1_bn_top\nI0818 15:07:32.226924 21769 net.cpp:395] L3_b15_cbr1_scale -> L3_b15_cbr1_bn_top (in-place)\nI0818 15:07:32.226989 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_scale\nI0818 15:07:32.227164 21769 net.cpp:150] Setting up L3_b15_cbr1_scale\nI0818 15:07:32.227176 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.227182 21769 net.cpp:165] Memory required for data: 2231911600\nI0818 15:07:32.227191 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr1_relu\nI0818 15:07:32.227200 21769 net.cpp:100] Creating Layer L3_b15_cbr1_relu\nI0818 15:07:32.227205 21769 net.cpp:434] L3_b15_cbr1_relu <- L3_b15_cbr1_bn_top\nI0818 15:07:32.227216 21769 net.cpp:395] L3_b15_cbr1_relu -> L3_b15_cbr1_bn_top (in-place)\nI0818 15:07:32.227226 21769 net.cpp:150] Setting up L3_b15_cbr1_relu\nI0818 15:07:32.227233 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.227238 21769 net.cpp:165] Memory required for data: 2233550000\nI0818 15:07:32.227242 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr2_conv\nI0818 15:07:32.227253 21769 net.cpp:100] Creating Layer L3_b15_cbr2_conv\nI0818 15:07:32.227259 21769 net.cpp:434] L3_b15_cbr2_conv <- L3_b15_cbr1_bn_top\nI0818 15:07:32.227272 21769 net.cpp:408] L3_b15_cbr2_conv -> L3_b15_cbr2_conv_top\nI0818 15:07:32.228325 21769 net.cpp:150] Setting up L3_b15_cbr2_conv\nI0818 15:07:32.228341 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.228346 21769 net.cpp:165] Memory required for data: 2235188400\nI0818 15:07:32.228355 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr2_bn\nI0818 15:07:32.228368 21769 net.cpp:100] Creating Layer L3_b15_cbr2_bn\nI0818 15:07:32.228374 21769 net.cpp:434] L3_b15_cbr2_bn <- L3_b15_cbr2_conv_top\nI0818 15:07:32.228384 21769 net.cpp:408] L3_b15_cbr2_bn -> L3_b15_cbr2_bn_top\nI0818 15:07:32.228677 21769 net.cpp:150] Setting up L3_b15_cbr2_bn\nI0818 15:07:32.228695 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.228700 21769 net.cpp:165] Memory required for data: 2236826800\nI0818 15:07:32.228711 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0818 15:07:32.228720 21769 net.cpp:100] Creating Layer L3_b15_cbr2_scale\nI0818 15:07:32.228726 21769 net.cpp:434] L3_b15_cbr2_scale <- L3_b15_cbr2_bn_top\nI0818 15:07:32.228734 21769 net.cpp:395] L3_b15_cbr2_scale -> L3_b15_cbr2_bn_top (in-place)\nI0818 15:07:32.228802 21769 layer_factory.hpp:77] Creating layer L3_b15_cbr2_scale\nI0818 15:07:32.228976 21769 net.cpp:150] Setting up L3_b15_cbr2_scale\nI0818 15:07:32.228989 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.228994 21769 net.cpp:165] Memory required for data: 2238465200\nI0818 15:07:32.229003 21769 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise\nI0818 15:07:32.229019 21769 net.cpp:100] Creating Layer L3_b15_sum_eltwise\nI0818 15:07:32.229027 21769 net.cpp:434] L3_b15_sum_eltwise <- L3_b15_cbr2_bn_top\nI0818 15:07:32.229034 21769 net.cpp:434] L3_b15_sum_eltwise <- L3_b14_sum_eltwise_top_L3_b14_relu_0_split_1\nI0818 15:07:32.229044 21769 net.cpp:408] L3_b15_sum_eltwise -> L3_b15_sum_eltwise_top\nI0818 15:07:32.229082 21769 net.cpp:150] Setting up L3_b15_sum_eltwise\nI0818 15:07:32.229095 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.229100 21769 net.cpp:165] Memory required for data: 2240103600\nI0818 15:07:32.229105 21769 layer_factory.hpp:77] Creating layer L3_b15_relu\nI0818 15:07:32.229113 21769 net.cpp:100] Creating Layer L3_b15_relu\nI0818 15:07:32.229120 21769 net.cpp:434] L3_b15_relu <- L3_b15_sum_eltwise_top\nI0818 15:07:32.229127 21769 net.cpp:395] L3_b15_relu -> L3_b15_sum_eltwise_top (in-place)\nI0818 15:07:32.229136 21769 net.cpp:150] Setting up L3_b15_relu\nI0818 15:07:32.229143 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.229147 21769 net.cpp:165] Memory required for data: 2241742000\nI0818 15:07:32.229152 21769 layer_factory.hpp:77] Creating layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0818 15:07:32.229162 21769 net.cpp:100] Creating Layer L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0818 15:07:32.229168 21769 net.cpp:434] L3_b15_sum_eltwise_top_L3_b15_relu_0_split <- L3_b15_sum_eltwise_top\nI0818 15:07:32.229176 21769 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0818 15:07:32.229185 21769 net.cpp:408] L3_b15_sum_eltwise_top_L3_b15_relu_0_split -> L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0818 15:07:32.229235 21769 net.cpp:150] Setting up L3_b15_sum_eltwise_top_L3_b15_relu_0_split\nI0818 15:07:32.229250 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.229257 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.229262 21769 net.cpp:165] Memory required for data: 2245018800\nI0818 15:07:32.229267 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_conv\nI0818 15:07:32.229279 21769 net.cpp:100] Creating Layer L3_b16_cbr1_conv\nI0818 15:07:32.229285 21769 net.cpp:434] L3_b16_cbr1_conv <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_0\nI0818 15:07:32.229295 21769 net.cpp:408] L3_b16_cbr1_conv -> L3_b16_cbr1_conv_top\nI0818 15:07:32.230345 21769 net.cpp:150] Setting up L3_b16_cbr1_conv\nI0818 15:07:32.230362 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.230368 21769 net.cpp:165] Memory required for data: 2246657200\nI0818 15:07:32.230377 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_bn\nI0818 15:07:32.230386 21769 net.cpp:100] Creating Layer L3_b16_cbr1_bn\nI0818 15:07:32.230393 21769 net.cpp:434] L3_b16_cbr1_bn <- L3_b16_cbr1_conv_top\nI0818 15:07:32.230406 21769 net.cpp:408] L3_b16_cbr1_bn -> L3_b16_cbr1_bn_top\nI0818 15:07:32.230711 21769 net.cpp:150] Setting up L3_b16_cbr1_bn\nI0818 15:07:32.230726 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.230731 21769 net.cpp:165] Memory required for data: 2248295600\nI0818 15:07:32.230741 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0818 15:07:32.230752 21769 net.cpp:100] Creating Layer L3_b16_cbr1_scale\nI0818 15:07:32.230759 21769 net.cpp:434] L3_b16_cbr1_scale <- L3_b16_cbr1_bn_top\nI0818 15:07:32.230767 21769 net.cpp:395] L3_b16_cbr1_scale -> L3_b16_cbr1_bn_top (in-place)\nI0818 15:07:32.230835 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_scale\nI0818 15:07:32.231012 21769 net.cpp:150] Setting up L3_b16_cbr1_scale\nI0818 15:07:32.231025 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.231031 21769 net.cpp:165] Memory required for data: 2249934000\nI0818 15:07:32.231040 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr1_relu\nI0818 15:07:32.231047 21769 net.cpp:100] Creating Layer L3_b16_cbr1_relu\nI0818 15:07:32.231055 21769 net.cpp:434] L3_b16_cbr1_relu <- L3_b16_cbr1_bn_top\nI0818 15:07:32.231065 21769 net.cpp:395] L3_b16_cbr1_relu -> L3_b16_cbr1_bn_top (in-place)\nI0818 15:07:32.231081 21769 net.cpp:150] Setting up L3_b16_cbr1_relu\nI0818 15:07:32.231089 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.231094 21769 net.cpp:165] Memory required for data: 2251572400\nI0818 15:07:32.231099 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr2_conv\nI0818 15:07:32.231112 21769 net.cpp:100] Creating Layer L3_b16_cbr2_conv\nI0818 15:07:32.231119 21769 net.cpp:434] L3_b16_cbr2_conv <- L3_b16_cbr1_bn_top\nI0818 15:07:32.231128 21769 net.cpp:408] L3_b16_cbr2_conv -> L3_b16_cbr2_conv_top\nI0818 15:07:32.233176 21769 net.cpp:150] Setting up L3_b16_cbr2_conv\nI0818 15:07:32.233194 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.233199 21769 net.cpp:165] Memory required for data: 2253210800\nI0818 15:07:32.233209 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr2_bn\nI0818 15:07:32.233222 21769 net.cpp:100] Creating Layer L3_b16_cbr2_bn\nI0818 15:07:32.233229 21769 net.cpp:434] L3_b16_cbr2_bn <- L3_b16_cbr2_conv_top\nI0818 15:07:32.233238 21769 net.cpp:408] L3_b16_cbr2_bn -> L3_b16_cbr2_bn_top\nI0818 15:07:32.233537 21769 net.cpp:150] Setting up L3_b16_cbr2_bn\nI0818 15:07:32.233551 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.233556 21769 net.cpp:165] Memory required for data: 2254849200\nI0818 15:07:32.233566 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0818 15:07:32.233578 21769 net.cpp:100] Creating Layer L3_b16_cbr2_scale\nI0818 15:07:32.233585 21769 net.cpp:434] L3_b16_cbr2_scale <- L3_b16_cbr2_bn_top\nI0818 15:07:32.233595 21769 net.cpp:395] L3_b16_cbr2_scale -> L3_b16_cbr2_bn_top (in-place)\nI0818 15:07:32.233662 21769 layer_factory.hpp:77] Creating layer L3_b16_cbr2_scale\nI0818 15:07:32.233844 21769 net.cpp:150] Setting up L3_b16_cbr2_scale\nI0818 15:07:32.233858 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.233865 21769 net.cpp:165] Memory required for data: 2256487600\nI0818 15:07:32.233873 21769 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise\nI0818 15:07:32.233882 21769 net.cpp:100] Creating Layer L3_b16_sum_eltwise\nI0818 15:07:32.233889 21769 net.cpp:434] L3_b16_sum_eltwise <- L3_b16_cbr2_bn_top\nI0818 15:07:32.233896 21769 net.cpp:434] L3_b16_sum_eltwise <- L3_b15_sum_eltwise_top_L3_b15_relu_0_split_1\nI0818 15:07:32.233907 21769 net.cpp:408] L3_b16_sum_eltwise -> L3_b16_sum_eltwise_top\nI0818 15:07:32.233944 21769 net.cpp:150] Setting up L3_b16_sum_eltwise\nI0818 15:07:32.233956 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.233961 21769 net.cpp:165] Memory required for data: 2258126000\nI0818 15:07:32.233966 21769 layer_factory.hpp:77] Creating layer L3_b16_relu\nI0818 15:07:32.233978 21769 net.cpp:100] Creating Layer L3_b16_relu\nI0818 15:07:32.233983 21769 net.cpp:434] L3_b16_relu <- L3_b16_sum_eltwise_top\nI0818 15:07:32.233991 21769 net.cpp:395] L3_b16_relu -> L3_b16_sum_eltwise_top (in-place)\nI0818 15:07:32.234000 21769 net.cpp:150] Setting up L3_b16_relu\nI0818 15:07:32.234009 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.234012 21769 net.cpp:165] Memory required for data: 2259764400\nI0818 15:07:32.234017 21769 layer_factory.hpp:77] Creating layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0818 15:07:32.234025 21769 net.cpp:100] Creating Layer L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0818 15:07:32.234030 21769 net.cpp:434] L3_b16_sum_eltwise_top_L3_b16_relu_0_split <- L3_b16_sum_eltwise_top\nI0818 15:07:32.234037 21769 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0818 15:07:32.234046 21769 net.cpp:408] L3_b16_sum_eltwise_top_L3_b16_relu_0_split -> L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0818 15:07:32.234099 21769 net.cpp:150] Setting up L3_b16_sum_eltwise_top_L3_b16_relu_0_split\nI0818 15:07:32.234112 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.234118 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.234123 21769 net.cpp:165] Memory required for data: 2263041200\nI0818 15:07:32.234128 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_conv\nI0818 15:07:32.234143 21769 net.cpp:100] Creating Layer L3_b17_cbr1_conv\nI0818 15:07:32.234156 21769 net.cpp:434] L3_b17_cbr1_conv <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_0\nI0818 15:07:32.234167 21769 net.cpp:408] L3_b17_cbr1_conv -> L3_b17_cbr1_conv_top\nI0818 15:07:32.235216 21769 net.cpp:150] Setting up L3_b17_cbr1_conv\nI0818 15:07:32.235232 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.235237 21769 net.cpp:165] Memory required for data: 2264679600\nI0818 15:07:32.235246 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_bn\nI0818 15:07:32.235260 21769 net.cpp:100] Creating Layer L3_b17_cbr1_bn\nI0818 15:07:32.235266 21769 net.cpp:434] L3_b17_cbr1_bn <- L3_b17_cbr1_conv_top\nI0818 15:07:32.235278 21769 net.cpp:408] L3_b17_cbr1_bn -> L3_b17_cbr1_bn_top\nI0818 15:07:32.235570 21769 net.cpp:150] Setting up L3_b17_cbr1_bn\nI0818 15:07:32.235584 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.235589 21769 net.cpp:165] Memory required for data: 2266318000\nI0818 15:07:32.235599 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0818 15:07:32.235608 21769 net.cpp:100] Creating Layer L3_b17_cbr1_scale\nI0818 15:07:32.235615 21769 net.cpp:434] L3_b17_cbr1_scale <- L3_b17_cbr1_bn_top\nI0818 15:07:32.235626 21769 net.cpp:395] L3_b17_cbr1_scale -> L3_b17_cbr1_bn_top (in-place)\nI0818 15:07:32.235697 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_scale\nI0818 15:07:32.235877 21769 net.cpp:150] Setting up L3_b17_cbr1_scale\nI0818 15:07:32.235890 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.235895 21769 net.cpp:165] Memory required for data: 2267956400\nI0818 15:07:32.235904 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr1_relu\nI0818 15:07:32.235913 21769 net.cpp:100] Creating Layer L3_b17_cbr1_relu\nI0818 15:07:32.235919 21769 net.cpp:434] L3_b17_cbr1_relu <- L3_b17_cbr1_bn_top\nI0818 15:07:32.235931 21769 net.cpp:395] L3_b17_cbr1_relu -> L3_b17_cbr1_bn_top (in-place)\nI0818 15:07:32.235941 21769 net.cpp:150] Setting up L3_b17_cbr1_relu\nI0818 15:07:32.235949 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.235954 21769 net.cpp:165] Memory required for data: 2269594800\nI0818 15:07:32.235958 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr2_conv\nI0818 15:07:32.235972 21769 net.cpp:100] Creating Layer L3_b17_cbr2_conv\nI0818 15:07:32.235978 21769 net.cpp:434] L3_b17_cbr2_conv <- L3_b17_cbr1_bn_top\nI0818 15:07:32.235991 21769 net.cpp:408] L3_b17_cbr2_conv -> L3_b17_cbr2_conv_top\nI0818 15:07:32.237041 21769 net.cpp:150] Setting up L3_b17_cbr2_conv\nI0818 15:07:32.237056 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.237062 21769 net.cpp:165] Memory required for data: 2271233200\nI0818 15:07:32.237071 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr2_bn\nI0818 15:07:32.237081 21769 net.cpp:100] Creating Layer L3_b17_cbr2_bn\nI0818 15:07:32.237087 21769 net.cpp:434] L3_b17_cbr2_bn <- L3_b17_cbr2_conv_top\nI0818 15:07:32.237098 21769 net.cpp:408] L3_b17_cbr2_bn -> L3_b17_cbr2_bn_top\nI0818 15:07:32.237416 21769 net.cpp:150] Setting up L3_b17_cbr2_bn\nI0818 15:07:32.237432 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.237437 21769 net.cpp:165] Memory required for data: 2272871600\nI0818 15:07:32.237447 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0818 15:07:32.237459 21769 net.cpp:100] Creating Layer L3_b17_cbr2_scale\nI0818 15:07:32.237467 21769 net.cpp:434] L3_b17_cbr2_scale <- L3_b17_cbr2_bn_top\nI0818 15:07:32.237474 21769 net.cpp:395] L3_b17_cbr2_scale -> L3_b17_cbr2_bn_top (in-place)\nI0818 15:07:32.237540 21769 layer_factory.hpp:77] Creating layer L3_b17_cbr2_scale\nI0818 15:07:32.237718 21769 net.cpp:150] Setting up L3_b17_cbr2_scale\nI0818 15:07:32.237733 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.237738 21769 net.cpp:165] Memory required for data: 2274510000\nI0818 15:07:32.237747 21769 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise\nI0818 15:07:32.237756 21769 net.cpp:100] Creating Layer L3_b17_sum_eltwise\nI0818 15:07:32.237763 21769 net.cpp:434] L3_b17_sum_eltwise <- L3_b17_cbr2_bn_top\nI0818 15:07:32.237777 21769 net.cpp:434] L3_b17_sum_eltwise <- L3_b16_sum_eltwise_top_L3_b16_relu_0_split_1\nI0818 15:07:32.237789 21769 net.cpp:408] L3_b17_sum_eltwise -> L3_b17_sum_eltwise_top\nI0818 15:07:32.237828 21769 net.cpp:150] Setting up L3_b17_sum_eltwise\nI0818 15:07:32.237836 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.237841 21769 net.cpp:165] Memory required for data: 2276148400\nI0818 15:07:32.237846 21769 layer_factory.hpp:77] Creating layer L3_b17_relu\nI0818 15:07:32.237857 21769 net.cpp:100] Creating Layer L3_b17_relu\nI0818 15:07:32.237864 21769 net.cpp:434] L3_b17_relu <- L3_b17_sum_eltwise_top\nI0818 15:07:32.237870 21769 net.cpp:395] L3_b17_relu -> L3_b17_sum_eltwise_top (in-place)\nI0818 15:07:32.237880 21769 net.cpp:150] Setting up L3_b17_relu\nI0818 15:07:32.237887 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.237892 21769 net.cpp:165] Memory required for data: 2277786800\nI0818 15:07:32.237897 21769 layer_factory.hpp:77] Creating layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0818 15:07:32.237905 21769 net.cpp:100] Creating Layer L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0818 15:07:32.237910 21769 net.cpp:434] L3_b17_sum_eltwise_top_L3_b17_relu_0_split <- L3_b17_sum_eltwise_top\nI0818 15:07:32.237916 21769 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0818 15:07:32.237926 21769 net.cpp:408] L3_b17_sum_eltwise_top_L3_b17_relu_0_split -> L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0818 15:07:32.237980 21769 net.cpp:150] Setting up L3_b17_sum_eltwise_top_L3_b17_relu_0_split\nI0818 15:07:32.237993 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.237999 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.238003 21769 net.cpp:165] Memory required for data: 2281063600\nI0818 15:07:32.238008 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_conv\nI0818 15:07:32.238023 21769 net.cpp:100] Creating Layer L3_b18_cbr1_conv\nI0818 15:07:32.238029 21769 net.cpp:434] L3_b18_cbr1_conv <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_0\nI0818 15:07:32.238039 21769 net.cpp:408] L3_b18_cbr1_conv -> L3_b18_cbr1_conv_top\nI0818 15:07:32.239089 21769 net.cpp:150] Setting up L3_b18_cbr1_conv\nI0818 15:07:32.239104 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.239109 21769 net.cpp:165] Memory required for data: 2282702000\nI0818 15:07:32.239118 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_bn\nI0818 15:07:32.239130 21769 net.cpp:100] Creating Layer L3_b18_cbr1_bn\nI0818 15:07:32.239137 21769 net.cpp:434] L3_b18_cbr1_bn <- L3_b18_cbr1_conv_top\nI0818 15:07:32.239148 21769 net.cpp:408] L3_b18_cbr1_bn -> L3_b18_cbr1_bn_top\nI0818 15:07:32.239437 21769 net.cpp:150] Setting up L3_b18_cbr1_bn\nI0818 15:07:32.239450 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.239455 21769 net.cpp:165] Memory required for data: 2284340400\nI0818 15:07:32.239466 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0818 15:07:32.239475 21769 net.cpp:100] Creating Layer L3_b18_cbr1_scale\nI0818 15:07:32.239481 21769 net.cpp:434] L3_b18_cbr1_scale <- L3_b18_cbr1_bn_top\nI0818 15:07:32.239492 21769 net.cpp:395] L3_b18_cbr1_scale -> L3_b18_cbr1_bn_top (in-place)\nI0818 15:07:32.239557 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_scale\nI0818 15:07:32.239734 21769 net.cpp:150] Setting up L3_b18_cbr1_scale\nI0818 15:07:32.239748 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.239753 21769 net.cpp:165] Memory required for data: 2285978800\nI0818 15:07:32.239763 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr1_relu\nI0818 15:07:32.239773 21769 net.cpp:100] Creating Layer L3_b18_cbr1_relu\nI0818 15:07:32.239780 21769 net.cpp:434] L3_b18_cbr1_relu <- L3_b18_cbr1_bn_top\nI0818 15:07:32.239789 21769 net.cpp:395] L3_b18_cbr1_relu -> L3_b18_cbr1_bn_top (in-place)\nI0818 15:07:32.239799 21769 net.cpp:150] Setting up L3_b18_cbr1_relu\nI0818 15:07:32.239805 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.239809 21769 net.cpp:165] Memory required for data: 2287617200\nI0818 15:07:32.239821 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr2_conv\nI0818 15:07:32.239835 21769 net.cpp:100] Creating Layer L3_b18_cbr2_conv\nI0818 15:07:32.239842 21769 net.cpp:434] L3_b18_cbr2_conv <- L3_b18_cbr1_bn_top\nI0818 15:07:32.239853 21769 net.cpp:408] L3_b18_cbr2_conv -> L3_b18_cbr2_conv_top\nI0818 15:07:32.240902 21769 net.cpp:150] Setting up L3_b18_cbr2_conv\nI0818 15:07:32.240917 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.240922 21769 net.cpp:165] Memory required for data: 2289255600\nI0818 15:07:32.240936 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr2_bn\nI0818 15:07:32.240945 21769 net.cpp:100] Creating Layer L3_b18_cbr2_bn\nI0818 15:07:32.240952 21769 net.cpp:434] L3_b18_cbr2_bn <- L3_b18_cbr2_conv_top\nI0818 15:07:32.240965 21769 net.cpp:408] L3_b18_cbr2_bn -> L3_b18_cbr2_bn_top\nI0818 15:07:32.241266 21769 net.cpp:150] Setting up L3_b18_cbr2_bn\nI0818 15:07:32.241281 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.241287 21769 net.cpp:165] Memory required for data: 2290894000\nI0818 15:07:32.241298 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0818 15:07:32.241307 21769 net.cpp:100] Creating Layer L3_b18_cbr2_scale\nI0818 15:07:32.241313 21769 net.cpp:434] L3_b18_cbr2_scale <- L3_b18_cbr2_bn_top\nI0818 15:07:32.241322 21769 net.cpp:395] L3_b18_cbr2_scale -> L3_b18_cbr2_bn_top (in-place)\nI0818 15:07:32.241386 21769 layer_factory.hpp:77] Creating layer L3_b18_cbr2_scale\nI0818 15:07:32.241557 21769 net.cpp:150] Setting up L3_b18_cbr2_scale\nI0818 15:07:32.241570 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.241575 21769 net.cpp:165] Memory required for data: 2292532400\nI0818 15:07:32.241585 21769 layer_factory.hpp:77] Creating layer L3_b18_sum_eltwise\nI0818 15:07:32.241596 21769 net.cpp:100] Creating Layer L3_b18_sum_eltwise\nI0818 15:07:32.241603 21769 net.cpp:434] L3_b18_sum_eltwise <- L3_b18_cbr2_bn_top\nI0818 15:07:32.241611 21769 net.cpp:434] L3_b18_sum_eltwise <- L3_b17_sum_eltwise_top_L3_b17_relu_0_split_1\nI0818 15:07:32.241619 21769 net.cpp:408] L3_b18_sum_eltwise -> L3_b18_sum_eltwise_top\nI0818 15:07:32.241657 21769 net.cpp:150] Setting up L3_b18_sum_eltwise\nI0818 15:07:32.241669 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.241674 21769 net.cpp:165] Memory required for data: 2294170800\nI0818 15:07:32.241680 21769 layer_factory.hpp:77] Creating layer L3_b18_relu\nI0818 15:07:32.241694 21769 net.cpp:100] Creating Layer L3_b18_relu\nI0818 15:07:32.241700 21769 net.cpp:434] L3_b18_relu <- L3_b18_sum_eltwise_top\nI0818 15:07:32.241708 21769 net.cpp:395] L3_b18_relu -> L3_b18_sum_eltwise_top (in-place)\nI0818 15:07:32.241717 21769 net.cpp:150] Setting up L3_b18_relu\nI0818 15:07:32.241725 21769 net.cpp:157] Top shape: 100 64 8 8 (409600)\nI0818 15:07:32.241729 21769 net.cpp:165] Memory required for data: 2295809200\nI0818 15:07:32.241734 21769 layer_factory.hpp:77] Creating layer post_pool\nI0818 15:07:32.241742 21769 net.cpp:100] Creating Layer post_pool\nI0818 15:07:32.241749 21769 net.cpp:434] post_pool <- L3_b18_sum_eltwise_top\nI0818 15:07:32.241761 21769 net.cpp:408] post_pool -> post_pool\nI0818 15:07:32.241801 21769 net.cpp:150] Setting up post_pool\nI0818 15:07:32.241812 21769 net.cpp:157] Top shape: 100 64 1 1 (6400)\nI0818 15:07:32.241817 21769 net.cpp:165] Memory required for data: 2295834800\nI0818 15:07:32.241823 21769 layer_factory.hpp:77] Creating layer post_FC\nI0818 15:07:32.241834 21769 net.cpp:100] Creating Layer post_FC\nI0818 15:07:32.241840 21769 net.cpp:434] post_FC <- post_pool\nI0818 15:07:32.241852 21769 net.cpp:408] post_FC -> post_FC_top\nI0818 15:07:32.242025 21769 net.cpp:150] Setting up post_FC\nI0818 15:07:32.242039 21769 net.cpp:157] Top shape: 100 10 (1000)\nI0818 15:07:32.242044 21769 net.cpp:165] Memory required for data: 2295838800\nI0818 15:07:32.242053 21769 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0818 15:07:32.242061 21769 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0818 15:07:32.242067 21769 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0818 15:07:32.242085 21769 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0818 15:07:32.242096 21769 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0818 15:07:32.242156 21769 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0818 15:07:32.242167 21769 net.cpp:157] Top shape: 100 10 (1000)\nI0818 15:07:32.242174 21769 net.cpp:157] Top shape: 100 10 (1000)\nI0818 15:07:32.242178 21769 net.cpp:165] Memory required for data: 2295846800\nI0818 15:07:32.242184 21769 layer_factory.hpp:77] Creating layer accuracy\nI0818 15:07:32.242192 21769 net.cpp:100] Creating Layer accuracy\nI0818 15:07:32.242198 21769 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0818 15:07:32.242205 21769 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 15:07:32.242213 21769 net.cpp:408] accuracy -> accuracy\nI0818 15:07:32.242224 21769 net.cpp:150] Setting up accuracy\nI0818 15:07:32.242231 21769 net.cpp:157] Top shape: (1)\nI0818 15:07:32.242236 21769 net.cpp:165] Memory required for data: 2295846804\nI0818 15:07:32.242241 21769 layer_factory.hpp:77] Creating layer loss\nI0818 15:07:32.242249 21769 net.cpp:100] Creating Layer loss\nI0818 15:07:32.242254 21769 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0818 15:07:32.242260 21769 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 15:07:32.242271 21769 net.cpp:408] loss -> loss\nI0818 15:07:32.242283 21769 layer_factory.hpp:77] Creating layer loss\nI0818 15:07:32.242408 21769 net.cpp:150] Setting up loss\nI0818 15:07:32.242420 21769 net.cpp:157] Top shape: (1)\nI0818 15:07:32.242425 21769 net.cpp:160]     with loss weight 1\nI0818 15:07:32.242442 21769 net.cpp:165] Memory required for data: 2295846808\nI0818 15:07:32.242449 21769 net.cpp:226] loss needs backward computation.\nI0818 15:07:32.242455 21769 net.cpp:228] accuracy does not need backward computation.\nI0818 15:07:32.242460 21769 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0818 15:07:32.242465 21769 net.cpp:226] post_FC needs backward computation.\nI0818 15:07:32.242471 21769 net.cpp:226] post_pool needs backward computation.\nI0818 15:07:32.242476 21769 net.cpp:226] L3_b18_relu needs backward computation.\nI0818 15:07:32.242480 21769 net.cpp:226] L3_b18_sum_eltwise needs backward computation.\nI0818 15:07:32.242486 21769 net.cpp:226] L3_b18_cbr2_scale needs backward computation.\nI0818 15:07:32.242491 21769 net.cpp:226] L3_b18_cbr2_bn needs backward computation.\nI0818 15:07:32.242496 21769 net.cpp:226] L3_b18_cbr2_conv needs backward computation.\nI0818 15:07:32.242501 21769 net.cpp:226] L3_b18_cbr1_relu needs backward computation.\nI0818 15:07:32.242506 21769 net.cpp:226] L3_b18_cbr1_scale needs backward computation.\nI0818 15:07:32.242511 21769 net.cpp:226] L3_b18_cbr1_bn needs backward computation.\nI0818 15:07:32.242516 21769 net.cpp:226] L3_b18_cbr1_conv needs backward computation.\nI0818 15:07:32.242521 21769 net.cpp:226] L3_b17_sum_eltwise_top_L3_b17_relu_0_split needs backward computation.\nI0818 15:07:32.242525 21769 net.cpp:226] L3_b17_relu needs backward computation.\nI0818 15:07:32.242530 21769 net.cpp:226] L3_b17_sum_eltwise needs backward computation.\nI0818 15:07:32.242537 21769 net.cpp:226] L3_b17_cbr2_scale needs backward computation.\nI0818 15:07:32.242542 21769 net.cpp:226] L3_b17_cbr2_bn needs backward computation.\nI0818 15:07:32.242547 21769 net.cpp:226] L3_b17_cbr2_conv needs backward computation.\nI0818 15:07:32.242552 21769 net.cpp:226] L3_b17_cbr1_relu needs backward computation.\nI0818 15:07:32.242557 21769 net.cpp:226] L3_b17_cbr1_scale needs backward computation.\nI0818 15:07:32.242560 21769 net.cpp:226] L3_b17_cbr1_bn needs backward computation.\nI0818 15:07:32.242566 21769 net.cpp:226] L3_b17_cbr1_conv needs backward computation.\nI0818 15:07:32.242571 21769 net.cpp:226] L3_b16_sum_eltwise_top_L3_b16_relu_0_split needs backward computation.\nI0818 15:07:32.242576 21769 net.cpp:226] L3_b16_relu needs backward computation.\nI0818 15:07:32.242581 21769 net.cpp:226] L3_b16_sum_eltwise needs backward computation.\nI0818 15:07:32.242589 21769 net.cpp:226] L3_b16_cbr2_scale needs backward computation.\nI0818 15:07:32.242602 21769 net.cpp:226] L3_b16_cbr2_bn needs backward computation.\nI0818 15:07:32.242609 21769 net.cpp:226] L3_b16_cbr2_conv needs backward computation.\nI0818 15:07:32.242614 21769 net.cpp:226] L3_b16_cbr1_relu needs backward computation.\nI0818 15:07:32.242619 21769 net.cpp:226] L3_b16_cbr1_scale needs backward computation.\nI0818 15:07:32.242622 21769 net.cpp:226] L3_b16_cbr1_bn needs backward computation.\nI0818 15:07:32.242627 21769 net.cpp:226] L3_b16_cbr1_conv needs backward computation.\nI0818 15:07:32.242633 21769 net.cpp:226] L3_b15_sum_eltwise_top_L3_b15_relu_0_split needs backward computation.\nI0818 15:07:32.242638 21769 net.cpp:226] L3_b15_relu needs backward computation.\nI0818 15:07:32.242643 21769 net.cpp:226] L3_b15_sum_eltwise needs backward computation.\nI0818 15:07:32.242650 21769 net.cpp:226] L3_b15_cbr2_scale needs backward computation.\nI0818 15:07:32.242653 21769 net.cpp:226] L3_b15_cbr2_bn needs backward computation.\nI0818 15:07:32.242660 21769 net.cpp:226] L3_b15_cbr2_conv needs backward computation.\nI0818 15:07:32.242664 21769 net.cpp:226] L3_b15_cbr1_relu needs backward computation.\nI0818 15:07:32.242669 21769 net.cpp:226] L3_b15_cbr1_scale needs backward computation.\nI0818 15:07:32.242674 21769 net.cpp:226] L3_b15_cbr1_bn needs backward computation.\nI0818 15:07:32.242679 21769 net.cpp:226] L3_b15_cbr1_conv needs backward computation.\nI0818 15:07:32.242691 21769 net.cpp:226] L3_b14_sum_eltwise_top_L3_b14_relu_0_split needs backward computation.\nI0818 15:07:32.242697 21769 net.cpp:226] L3_b14_relu needs backward computation.\nI0818 15:07:32.242702 21769 net.cpp:226] L3_b14_sum_eltwise needs backward computation.\nI0818 15:07:32.242707 21769 net.cpp:226] L3_b14_cbr2_scale needs backward computation.\nI0818 15:07:32.242712 21769 net.cpp:226] L3_b14_cbr2_bn needs backward computation.\nI0818 15:07:32.242718 21769 net.cpp:226] L3_b14_cbr2_conv needs backward computation.\nI0818 15:07:32.242723 21769 net.cpp:226] L3_b14_cbr1_relu needs backward computation.\nI0818 15:07:32.242728 21769 net.cpp:226] L3_b14_cbr1_scale needs backward computation.\nI0818 15:07:32.242733 21769 net.cpp:226] L3_b14_cbr1_bn needs backward computation.\nI0818 15:07:32.242738 21769 net.cpp:226] L3_b14_cbr1_conv needs backward computation.\nI0818 15:07:32.242744 21769 net.cpp:226] L3_b13_sum_eltwise_top_L3_b13_relu_0_split needs backward computation.\nI0818 15:07:32.242749 21769 net.cpp:226] L3_b13_relu needs backward computation.\nI0818 15:07:32.242754 21769 net.cpp:226] L3_b13_sum_eltwise needs backward computation.\nI0818 15:07:32.242760 21769 net.cpp:226] L3_b13_cbr2_scale needs backward computation.\nI0818 15:07:32.242765 21769 net.cpp:226] L3_b13_cbr2_bn needs backward computation.\nI0818 15:07:32.242770 21769 net.cpp:226] L3_b13_cbr2_conv needs backward computation.\nI0818 15:07:32.242775 21769 net.cpp:226] L3_b13_cbr1_relu needs backward computation.\nI0818 15:07:32.242780 21769 net.cpp:226] L3_b13_cbr1_scale needs backward computation.\nI0818 15:07:32.242785 21769 net.cpp:226] L3_b13_cbr1_bn needs backward computation.\nI0818 15:07:32.242790 21769 net.cpp:226] L3_b13_cbr1_conv needs backward computation.\nI0818 15:07:32.242795 21769 net.cpp:226] L3_b12_sum_eltwise_top_L3_b12_relu_0_split needs backward computation.\nI0818 15:07:32.242800 21769 net.cpp:226] L3_b12_relu needs backward computation.\nI0818 15:07:32.242806 21769 net.cpp:226] L3_b12_sum_eltwise needs backward computation.\nI0818 15:07:32.242811 21769 net.cpp:226] L3_b12_cbr2_scale needs backward computation.\nI0818 15:07:32.242816 21769 net.cpp:226] L3_b12_cbr2_bn needs backward computation.\nI0818 15:07:32.242821 21769 net.cpp:226] L3_b12_cbr2_conv needs backward computation.\nI0818 15:07:32.242826 21769 net.cpp:226] L3_b12_cbr1_relu needs backward computation.\nI0818 15:07:32.242831 21769 net.cpp:226] L3_b12_cbr1_scale needs backward computation.\nI0818 15:07:32.242836 21769 net.cpp:226] L3_b12_cbr1_bn needs backward computation.\nI0818 15:07:32.242841 21769 net.cpp:226] L3_b12_cbr1_conv needs backward computation.\nI0818 15:07:32.242852 21769 net.cpp:226] L3_b11_sum_eltwise_top_L3_b11_relu_0_split needs backward computation.\nI0818 15:07:32.242858 21769 net.cpp:226] L3_b11_relu needs backward computation.\nI0818 15:07:32.242863 21769 net.cpp:226] L3_b11_sum_eltwise needs backward computation.\nI0818 15:07:32.242869 21769 net.cpp:226] L3_b11_cbr2_scale needs backward computation.\nI0818 15:07:32.242874 21769 net.cpp:226] L3_b11_cbr2_bn needs backward computation.\nI0818 15:07:32.242880 21769 net.cpp:226] L3_b11_cbr2_conv needs backward computation.\nI0818 15:07:32.242888 21769 net.cpp:226] L3_b11_cbr1_relu needs backward computation.\nI0818 15:07:32.242894 21769 net.cpp:226] L3_b11_cbr1_scale needs backward computation.\nI0818 15:07:32.242899 21769 net.cpp:226] L3_b11_cbr1_bn needs backward computation.\nI0818 15:07:32.242904 21769 net.cpp:226] L3_b11_cbr1_conv needs backward computation.\nI0818 15:07:32.242910 21769 net.cpp:226] L3_b10_sum_eltwise_top_L3_b10_relu_0_split needs backward computation.\nI0818 15:07:32.242915 21769 net.cpp:226] L3_b10_relu needs backward computation.\nI0818 15:07:32.242920 21769 net.cpp:226] L3_b10_sum_eltwise needs backward computation.\nI0818 15:07:32.242926 21769 net.cpp:226] L3_b10_cbr2_scale needs backward computation.\nI0818 15:07:32.242931 21769 net.cpp:226] L3_b10_cbr2_bn needs backward computation.\nI0818 15:07:32.242938 21769 net.cpp:226] L3_b10_cbr2_conv needs backward computation.\nI0818 15:07:32.242943 21769 net.cpp:226] L3_b10_cbr1_relu needs backward computation.\nI0818 15:07:32.242947 21769 net.cpp:226] L3_b10_cbr1_scale needs backward computation.\nI0818 15:07:32.242952 21769 net.cpp:226] L3_b10_cbr1_bn needs backward computation.\nI0818 15:07:32.242957 21769 net.cpp:226] L3_b10_cbr1_conv needs backward computation.\nI0818 15:07:32.242962 21769 net.cpp:226] L3_b9_sum_eltwise_top_L3_b9_relu_0_split needs backward computation.\nI0818 15:07:32.242969 21769 net.cpp:226] L3_b9_relu needs backward computation.\nI0818 15:07:32.242974 21769 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0818 15:07:32.242980 21769 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0818 15:07:32.242983 21769 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0818 15:07:32.242988 21769 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0818 15:07:32.242995 21769 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0818 15:07:32.242998 21769 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0818 15:07:32.243003 21769 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0818 15:07:32.243008 21769 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0818 15:07:32.243015 21769 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0818 15:07:32.243019 21769 net.cpp:226] L3_b8_relu needs backward computation.\nI0818 15:07:32.243024 21769 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0818 15:07:32.243031 21769 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0818 15:07:32.243036 21769 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0818 15:07:32.243041 21769 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0818 15:07:32.243046 21769 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0818 15:07:32.243050 21769 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0818 15:07:32.243055 21769 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0818 15:07:32.243060 21769 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0818 15:07:32.243065 21769 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0818 15:07:32.243070 21769 net.cpp:226] L3_b7_relu needs backward computation.\nI0818 15:07:32.243077 21769 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0818 15:07:32.243082 21769 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0818 15:07:32.243086 21769 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0818 15:07:32.243093 21769 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0818 15:07:32.243098 21769 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0818 15:07:32.243108 21769 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0818 15:07:32.243113 21769 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0818 15:07:32.243119 21769 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0818 15:07:32.243124 21769 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0818 15:07:32.243129 21769 net.cpp:226] L3_b6_relu needs backward computation.\nI0818 15:07:32.243134 21769 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0818 15:07:32.243140 21769 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0818 15:07:32.243145 21769 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0818 15:07:32.243150 21769 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0818 15:07:32.243156 21769 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0818 15:07:32.243161 21769 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0818 15:07:32.243166 21769 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0818 15:07:32.243171 21769 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0818 15:07:32.243177 21769 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0818 15:07:32.243183 21769 net.cpp:226] L3_b5_relu needs backward computation.\nI0818 15:07:32.243188 21769 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0818 15:07:32.243193 21769 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0818 15:07:32.243198 21769 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0818 15:07:32.243204 21769 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0818 15:07:32.243209 21769 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0818 15:07:32.243214 21769 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0818 15:07:32.243219 21769 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0818 15:07:32.243224 21769 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0818 15:07:32.243230 21769 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0818 15:07:32.243235 21769 net.cpp:226] L3_b4_relu needs backward computation.\nI0818 15:07:32.243240 21769 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0818 15:07:32.243247 21769 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0818 15:07:32.243252 21769 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0818 15:07:32.243260 21769 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0818 15:07:32.243266 21769 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0818 15:07:32.243271 21769 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0818 15:07:32.243276 21769 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0818 15:07:32.243283 21769 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0818 15:07:32.243288 21769 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0818 15:07:32.243294 21769 net.cpp:226] L3_b3_relu needs backward computation.\nI0818 15:07:32.243299 21769 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0818 15:07:32.243304 21769 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0818 15:07:32.243310 21769 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0818 15:07:32.243315 21769 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0818 15:07:32.243320 21769 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0818 15:07:32.243325 21769 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0818 15:07:32.243330 21769 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0818 15:07:32.243336 21769 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0818 15:07:32.243342 21769 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0818 15:07:32.243347 21769 net.cpp:226] L3_b2_relu needs backward computation.\nI0818 15:07:32.243352 21769 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0818 15:07:32.243360 21769 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0818 15:07:32.243369 21769 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0818 15:07:32.243376 21769 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0818 15:07:32.243381 21769 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0818 15:07:32.243386 21769 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0818 15:07:32.243391 21769 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0818 15:07:32.243396 21769 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0818 15:07:32.243402 21769 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0818 15:07:32.243408 21769 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0818 15:07:32.243414 21769 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0818 15:07:32.243419 21769 net.cpp:226] L3_b1_relu needs backward computation.\nI0818 15:07:32.243425 21769 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0818 15:07:32.243432 21769 net.cpp:226] L3_b1_pool needs backward computation.\nI0818 15:07:32.243438 21769 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0818 15:07:32.243443 21769 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0818 15:07:32.243448 21769 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0818 15:07:32.243453 21769 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0818 15:07:32.243458 21769 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0818 15:07:32.243464 21769 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0818 15:07:32.243470 21769 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0818 15:07:32.243475 21769 net.cpp:226] L2_b18_sum_eltwise_top_L2_b18_relu_0_split needs backward computation.\nI0818 15:07:32.243481 21769 net.cpp:226] L2_b18_relu needs backward computation.\nI0818 15:07:32.243487 21769 net.cpp:226] L2_b18_sum_eltwise needs backward computation.\nI0818 15:07:32.243494 21769 net.cpp:226] L2_b18_cbr2_scale needs backward computation.\nI0818 15:07:32.243499 21769 net.cpp:226] L2_b18_cbr2_bn needs backward computation.\nI0818 15:07:32.243504 21769 net.cpp:226] L2_b18_cbr2_conv needs backward computation.\nI0818 15:07:32.243510 21769 net.cpp:226] L2_b18_cbr1_relu needs backward computation.\nI0818 15:07:32.243515 21769 net.cpp:226] L2_b18_cbr1_scale needs backward computation.\nI0818 15:07:32.243520 21769 net.cpp:226] L2_b18_cbr1_bn needs backward computation.\nI0818 15:07:32.243525 21769 net.cpp:226] L2_b18_cbr1_conv needs backward computation.\nI0818 15:07:32.243531 21769 net.cpp:226] L2_b17_sum_eltwise_top_L2_b17_relu_0_split needs backward computation.\nI0818 15:07:32.243537 21769 net.cpp:226] L2_b17_relu needs backward computation.\nI0818 15:07:32.243542 21769 net.cpp:226] L2_b17_sum_eltwise needs backward computation.\nI0818 15:07:32.243548 21769 net.cpp:226] L2_b17_cbr2_scale needs backward computation.\nI0818 15:07:32.243554 21769 net.cpp:226] L2_b17_cbr2_bn needs backward computation.\nI0818 15:07:32.243559 21769 net.cpp:226] L2_b17_cbr2_conv needs backward computation.\nI0818 15:07:32.243566 21769 net.cpp:226] L2_b17_cbr1_relu needs backward computation.\nI0818 15:07:32.243571 21769 net.cpp:226] L2_b17_cbr1_scale needs backward computation.\nI0818 15:07:32.243577 21769 net.cpp:226] L2_b17_cbr1_bn needs backward computation.\nI0818 15:07:32.243582 21769 net.cpp:226] L2_b17_cbr1_conv needs backward computation.\nI0818 15:07:32.243587 21769 net.cpp:226] L2_b16_sum_eltwise_top_L2_b16_relu_0_split needs backward computation.\nI0818 15:07:32.243593 21769 net.cpp:226] L2_b16_relu needs backward computation.\nI0818 15:07:32.243598 21769 net.cpp:226] L2_b16_sum_eltwise needs backward computation.\nI0818 15:07:32.243605 21769 net.cpp:226] L2_b16_cbr2_scale needs backward computation.\nI0818 15:07:32.243610 21769 net.cpp:226] L2_b16_cbr2_bn needs backward computation.\nI0818 15:07:32.243616 21769 net.cpp:226] L2_b16_cbr2_conv needs backward computation.\nI0818 15:07:32.243621 21769 net.cpp:226] L2_b16_cbr1_relu needs backward computation.\nI0818 15:07:32.243628 21769 net.cpp:226] L2_b16_cbr1_scale needs backward computation.\nI0818 15:07:32.243640 21769 net.cpp:226] L2_b16_cbr1_bn needs backward computation.\nI0818 15:07:32.243647 21769 net.cpp:226] L2_b16_cbr1_conv needs backward computation.\nI0818 15:07:32.243654 21769 net.cpp:226] L2_b15_sum_eltwise_top_L2_b15_relu_0_split needs backward computation.\nI0818 15:07:32.243659 21769 net.cpp:226] L2_b15_relu needs backward computation.\nI0818 15:07:32.243664 21769 net.cpp:226] L2_b15_sum_eltwise needs backward computation.\nI0818 15:07:32.243670 21769 net.cpp:226] L2_b15_cbr2_scale needs backward computation.\nI0818 15:07:32.243676 21769 net.cpp:226] L2_b15_cbr2_bn needs backward computation.\nI0818 15:07:32.243687 21769 net.cpp:226] L2_b15_cbr2_conv needs backward computation.\nI0818 15:07:32.243695 21769 net.cpp:226] L2_b15_cbr1_relu needs backward computation.\nI0818 15:07:32.243700 21769 net.cpp:226] L2_b15_cbr1_scale needs backward computation.\nI0818 15:07:32.243705 21769 net.cpp:226] L2_b15_cbr1_bn needs backward computation.\nI0818 15:07:32.243711 21769 net.cpp:226] L2_b15_cbr1_conv needs backward computation.\nI0818 15:07:32.243717 21769 net.cpp:226] L2_b14_sum_eltwise_top_L2_b14_relu_0_split needs backward computation.\nI0818 15:07:32.243722 21769 net.cpp:226] L2_b14_relu needs backward computation.\nI0818 15:07:32.243728 21769 net.cpp:226] L2_b14_sum_eltwise needs backward computation.\nI0818 15:07:32.243734 21769 net.cpp:226] L2_b14_cbr2_scale needs backward computation.\nI0818 15:07:32.243739 21769 net.cpp:226] L2_b14_cbr2_bn needs backward computation.\nI0818 15:07:32.243746 21769 net.cpp:226] L2_b14_cbr2_conv needs backward computation.\nI0818 15:07:32.243752 21769 net.cpp:226] L2_b14_cbr1_relu needs backward computation.\nI0818 15:07:32.243757 21769 net.cpp:226] L2_b14_cbr1_scale needs backward computation.\nI0818 15:07:32.243762 21769 net.cpp:226] L2_b14_cbr1_bn needs backward computation.\nI0818 15:07:32.243767 21769 net.cpp:226] L2_b14_cbr1_conv needs backward computation.\nI0818 15:07:32.243773 21769 net.cpp:226] L2_b13_sum_eltwise_top_L2_b13_relu_0_split needs backward computation.\nI0818 15:07:32.243779 21769 net.cpp:226] L2_b13_relu needs backward computation.\nI0818 15:07:32.243784 21769 net.cpp:226] L2_b13_sum_eltwise needs backward computation.\nI0818 15:07:32.243790 21769 net.cpp:226] L2_b13_cbr2_scale needs backward computation.\nI0818 15:07:32.243796 21769 net.cpp:226] L2_b13_cbr2_bn needs backward computation.\nI0818 15:07:32.243803 21769 net.cpp:226] L2_b13_cbr2_conv needs backward computation.\nI0818 15:07:32.243808 21769 net.cpp:226] L2_b13_cbr1_relu needs backward computation.\nI0818 15:07:32.243813 21769 net.cpp:226] L2_b13_cbr1_scale needs backward computation.\nI0818 15:07:32.243818 21769 net.cpp:226] L2_b13_cbr1_bn needs backward computation.\nI0818 15:07:32.243824 21769 net.cpp:226] L2_b13_cbr1_conv needs backward computation.\nI0818 15:07:32.243830 21769 net.cpp:226] L2_b12_sum_eltwise_top_L2_b12_relu_0_split needs backward computation.\nI0818 15:07:32.243836 21769 net.cpp:226] L2_b12_relu needs backward computation.\nI0818 15:07:32.243841 21769 net.cpp:226] L2_b12_sum_eltwise needs backward computation.\nI0818 15:07:32.243849 21769 net.cpp:226] L2_b12_cbr2_scale needs backward computation.\nI0818 15:07:32.243854 21769 net.cpp:226] L2_b12_cbr2_bn needs backward computation.\nI0818 15:07:32.243860 21769 net.cpp:226] L2_b12_cbr2_conv needs backward computation.\nI0818 15:07:32.243865 21769 net.cpp:226] L2_b12_cbr1_relu needs backward computation.\nI0818 15:07:32.243871 21769 net.cpp:226] L2_b12_cbr1_scale needs backward computation.\nI0818 15:07:32.243876 21769 net.cpp:226] L2_b12_cbr1_bn needs backward computation.\nI0818 15:07:32.243882 21769 net.cpp:226] L2_b12_cbr1_conv needs backward computation.\nI0818 15:07:32.243888 21769 net.cpp:226] L2_b11_sum_eltwise_top_L2_b11_relu_0_split needs backward computation.\nI0818 15:07:32.243894 21769 net.cpp:226] L2_b11_relu needs backward computation.\nI0818 15:07:32.243901 21769 net.cpp:226] L2_b11_sum_eltwise needs backward computation.\nI0818 15:07:32.243906 21769 net.cpp:226] L2_b11_cbr2_scale needs backward computation.\nI0818 15:07:32.243919 21769 net.cpp:226] L2_b11_cbr2_bn needs backward computation.\nI0818 15:07:32.243927 21769 net.cpp:226] L2_b11_cbr2_conv needs backward computation.\nI0818 15:07:32.243932 21769 net.cpp:226] L2_b11_cbr1_relu needs backward computation.\nI0818 15:07:32.243937 21769 net.cpp:226] L2_b11_cbr1_scale needs backward computation.\nI0818 15:07:32.243942 21769 net.cpp:226] L2_b11_cbr1_bn needs backward computation.\nI0818 15:07:32.243948 21769 net.cpp:226] L2_b11_cbr1_conv needs backward computation.\nI0818 15:07:32.243954 21769 net.cpp:226] L2_b10_sum_eltwise_top_L2_b10_relu_0_split needs backward computation.\nI0818 15:07:32.243960 21769 net.cpp:226] L2_b10_relu needs backward computation.\nI0818 15:07:32.243966 21769 net.cpp:226] L2_b10_sum_eltwise needs backward computation.\nI0818 15:07:32.243975 21769 net.cpp:226] L2_b10_cbr2_scale needs backward computation.\nI0818 15:07:32.243981 21769 net.cpp:226] L2_b10_cbr2_bn needs backward computation.\nI0818 15:07:32.243988 21769 net.cpp:226] L2_b10_cbr2_conv needs backward computation.\nI0818 15:07:32.243994 21769 net.cpp:226] L2_b10_cbr1_relu needs backward computation.\nI0818 15:07:32.243999 21769 net.cpp:226] L2_b10_cbr1_scale needs backward computation.\nI0818 15:07:32.244005 21769 net.cpp:226] L2_b10_cbr1_bn needs backward computation.\nI0818 15:07:32.244011 21769 net.cpp:226] L2_b10_cbr1_conv needs backward computation.\nI0818 15:07:32.244017 21769 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0818 15:07:32.244024 21769 net.cpp:226] L2_b9_relu needs backward computation.\nI0818 15:07:32.244029 21769 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0818 15:07:32.244035 21769 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0818 15:07:32.244041 21769 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0818 15:07:32.244047 21769 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0818 15:07:32.244052 21769 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0818 15:07:32.244058 21769 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0818 15:07:32.244063 21769 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0818 15:07:32.244069 21769 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0818 15:07:32.244076 21769 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0818 15:07:32.244081 21769 net.cpp:226] L2_b8_relu needs backward computation.\nI0818 15:07:32.244087 21769 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0818 15:07:32.244093 21769 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0818 15:07:32.244099 21769 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0818 15:07:32.244105 21769 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0818 15:07:32.244112 21769 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0818 15:07:32.244117 21769 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0818 15:07:32.244122 21769 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0818 15:07:32.244128 21769 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0818 15:07:32.244133 21769 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0818 15:07:32.244139 21769 net.cpp:226] L2_b7_relu needs backward computation.\nI0818 15:07:32.244145 21769 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0818 15:07:32.244153 21769 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0818 15:07:32.244158 21769 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0818 15:07:32.244163 21769 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0818 15:07:32.244169 21769 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0818 15:07:32.244175 21769 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0818 15:07:32.244180 21769 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0818 15:07:32.244186 21769 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0818 15:07:32.244192 21769 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0818 15:07:32.244204 21769 net.cpp:226] L2_b6_relu needs backward computation.\nI0818 15:07:32.244210 21769 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0818 15:07:32.244217 21769 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0818 15:07:32.244223 21769 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0818 15:07:32.244230 21769 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0818 15:07:32.244235 21769 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0818 15:07:32.244240 21769 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0818 15:07:32.244246 21769 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0818 15:07:32.244251 21769 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0818 15:07:32.244257 21769 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0818 15:07:32.244263 21769 net.cpp:226] L2_b5_relu needs backward computation.\nI0818 15:07:32.244269 21769 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0818 15:07:32.244276 21769 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0818 15:07:32.244282 21769 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0818 15:07:32.244287 21769 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0818 15:07:32.244293 21769 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0818 15:07:32.244298 21769 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0818 15:07:32.244304 21769 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0818 15:07:32.244310 21769 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0818 15:07:32.244316 21769 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0818 15:07:32.244321 21769 net.cpp:226] L2_b4_relu needs backward computation.\nI0818 15:07:32.244328 21769 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0818 15:07:32.244334 21769 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0818 15:07:32.244339 21769 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0818 15:07:32.244345 21769 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0818 15:07:32.244351 21769 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0818 15:07:32.244357 21769 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0818 15:07:32.244362 21769 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0818 15:07:32.244369 21769 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0818 15:07:32.244374 21769 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0818 15:07:32.244380 21769 net.cpp:226] L2_b3_relu needs backward computation.\nI0818 15:07:32.244385 21769 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0818 15:07:32.244392 21769 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0818 15:07:32.244398 21769 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0818 15:07:32.244405 21769 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0818 15:07:32.244410 21769 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0818 15:07:32.244415 21769 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0818 15:07:32.244421 21769 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0818 15:07:32.244426 21769 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0818 15:07:32.244433 21769 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0818 15:07:32.244439 21769 net.cpp:226] L2_b2_relu needs backward computation.\nI0818 15:07:32.244444 21769 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0818 15:07:32.244451 21769 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0818 15:07:32.244457 21769 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0818 15:07:32.244462 21769 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0818 15:07:32.244467 21769 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0818 15:07:32.244473 21769 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0818 15:07:32.244483 21769 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0818 15:07:32.244490 21769 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0818 15:07:32.244496 21769 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0818 15:07:32.244503 21769 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0818 15:07:32.244508 21769 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0818 15:07:32.244514 21769 net.cpp:226] L2_b1_relu needs backward computation.\nI0818 15:07:32.244519 21769 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0818 15:07:32.244526 21769 net.cpp:226] L2_b1_pool needs backward computation.\nI0818 15:07:32.244532 21769 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0818 15:07:32.244539 21769 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0818 15:07:32.244544 21769 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0818 15:07:32.244550 21769 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0818 15:07:32.244555 21769 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0818 15:07:32.244561 21769 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0818 15:07:32.244567 21769 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0818 15:07:32.244572 21769 net.cpp:226] L1_b18_sum_eltwise_top_L1_b18_relu_0_split needs backward computation.\nI0818 15:07:32.244578 21769 net.cpp:226] L1_b18_relu needs backward computation.\nI0818 15:07:32.244585 21769 net.cpp:226] L1_b18_sum_eltwise needs backward computation.\nI0818 15:07:32.244590 21769 net.cpp:226] L1_b18_cbr2_scale needs backward computation.\nI0818 15:07:32.244596 21769 net.cpp:226] L1_b18_cbr2_bn needs backward computation.\nI0818 15:07:32.244601 21769 net.cpp:226] L1_b18_cbr2_conv needs backward computation.\nI0818 15:07:32.244607 21769 net.cpp:226] L1_b18_cbr1_relu needs backward computation.\nI0818 15:07:32.244612 21769 net.cpp:226] L1_b18_cbr1_scale needs backward computation.\nI0818 15:07:32.244618 21769 net.cpp:226] L1_b18_cbr1_bn needs backward computation.\nI0818 15:07:32.244624 21769 net.cpp:226] L1_b18_cbr1_conv needs backward computation.\nI0818 15:07:32.244630 21769 net.cpp:226] L1_b17_sum_eltwise_top_L1_b17_relu_0_split needs backward computation.\nI0818 15:07:32.244637 21769 net.cpp:226] L1_b17_relu needs backward computation.\nI0818 15:07:32.244642 21769 net.cpp:226] L1_b17_sum_eltwise needs backward computation.\nI0818 15:07:32.244648 21769 net.cpp:226] L1_b17_cbr2_scale needs backward computation.\nI0818 15:07:32.244654 21769 net.cpp:226] L1_b17_cbr2_bn needs backward computation.\nI0818 15:07:32.244660 21769 net.cpp:226] L1_b17_cbr2_conv needs backward computation.\nI0818 15:07:32.244665 21769 net.cpp:226] L1_b17_cbr1_relu needs backward computation.\nI0818 15:07:32.244671 21769 net.cpp:226] L1_b17_cbr1_scale needs backward computation.\nI0818 15:07:32.244678 21769 net.cpp:226] L1_b17_cbr1_bn needs backward computation.\nI0818 15:07:32.244691 21769 net.cpp:226] L1_b17_cbr1_conv needs backward computation.\nI0818 15:07:32.244699 21769 net.cpp:226] L1_b16_sum_eltwise_top_L1_b16_relu_0_split needs backward computation.\nI0818 15:07:32.244706 21769 net.cpp:226] L1_b16_relu needs backward computation.\nI0818 15:07:32.244711 21769 net.cpp:226] L1_b16_sum_eltwise needs backward computation.\nI0818 15:07:32.244719 21769 net.cpp:226] L1_b16_cbr2_scale needs backward computation.\nI0818 15:07:32.244724 21769 net.cpp:226] L1_b16_cbr2_bn needs backward computation.\nI0818 15:07:32.244730 21769 net.cpp:226] L1_b16_cbr2_conv needs backward computation.\nI0818 15:07:32.244736 21769 net.cpp:226] L1_b16_cbr1_relu needs backward computation.\nI0818 15:07:32.244742 21769 net.cpp:226] L1_b16_cbr1_scale needs backward computation.\nI0818 15:07:32.244748 21769 net.cpp:226] L1_b16_cbr1_bn needs backward computation.\nI0818 15:07:32.244755 21769 net.cpp:226] L1_b16_cbr1_conv needs backward computation.\nI0818 15:07:32.244760 21769 net.cpp:226] L1_b15_sum_eltwise_top_L1_b15_relu_0_split needs backward computation.\nI0818 15:07:32.244766 21769 net.cpp:226] L1_b15_relu needs backward computation.\nI0818 15:07:32.244777 21769 net.cpp:226] L1_b15_sum_eltwise needs backward computation.\nI0818 15:07:32.244784 21769 net.cpp:226] L1_b15_cbr2_scale needs backward computation.\nI0818 15:07:32.244791 21769 net.cpp:226] L1_b15_cbr2_bn needs backward computation.\nI0818 15:07:32.244796 21769 net.cpp:226] L1_b15_cbr2_conv needs backward computation.\nI0818 15:07:32.244801 21769 net.cpp:226] L1_b15_cbr1_relu needs backward computation.\nI0818 15:07:32.244807 21769 net.cpp:226] L1_b15_cbr1_scale needs backward computation.\nI0818 15:07:32.244812 21769 net.cpp:226] L1_b15_cbr1_bn needs backward computation.\nI0818 15:07:32.244818 21769 net.cpp:226] L1_b15_cbr1_conv needs backward computation.\nI0818 15:07:32.244824 21769 net.cpp:226] L1_b14_sum_eltwise_top_L1_b14_relu_0_split needs backward computation.\nI0818 15:07:32.244830 21769 net.cpp:226] L1_b14_relu needs backward computation.\nI0818 15:07:32.244835 21769 net.cpp:226] L1_b14_sum_eltwise needs backward computation.\nI0818 15:07:32.244843 21769 net.cpp:226] L1_b14_cbr2_scale needs backward computation.\nI0818 15:07:32.244848 21769 net.cpp:226] L1_b14_cbr2_bn needs backward computation.\nI0818 15:07:32.244853 21769 net.cpp:226] L1_b14_cbr2_conv needs backward computation.\nI0818 15:07:32.244859 21769 net.cpp:226] L1_b14_cbr1_relu needs backward computation.\nI0818 15:07:32.244865 21769 net.cpp:226] L1_b14_cbr1_scale needs backward computation.\nI0818 15:07:32.244870 21769 net.cpp:226] L1_b14_cbr1_bn needs backward computation.\nI0818 15:07:32.244876 21769 net.cpp:226] L1_b14_cbr1_conv needs backward computation.\nI0818 15:07:32.244882 21769 net.cpp:226] L1_b13_sum_eltwise_top_L1_b13_relu_0_split needs backward computation.\nI0818 15:07:32.244889 21769 net.cpp:226] L1_b13_relu needs backward computation.\nI0818 15:07:32.244894 21769 net.cpp:226] L1_b13_sum_eltwise needs backward computation.\nI0818 15:07:32.244901 21769 net.cpp:226] L1_b13_cbr2_scale needs backward computation.\nI0818 15:07:32.244906 21769 net.cpp:226] L1_b13_cbr2_bn needs backward computation.\nI0818 15:07:32.244912 21769 net.cpp:226] L1_b13_cbr2_conv needs backward computation.\nI0818 15:07:32.244917 21769 net.cpp:226] L1_b13_cbr1_relu needs backward computation.\nI0818 15:07:32.244923 21769 net.cpp:226] L1_b13_cbr1_scale needs backward computation.\nI0818 15:07:32.244928 21769 net.cpp:226] L1_b13_cbr1_bn needs backward computation.\nI0818 15:07:32.244935 21769 net.cpp:226] L1_b13_cbr1_conv needs backward computation.\nI0818 15:07:32.244940 21769 net.cpp:226] L1_b12_sum_eltwise_top_L1_b12_relu_0_split needs backward computation.\nI0818 15:07:32.244946 21769 net.cpp:226] L1_b12_relu needs backward computation.\nI0818 15:07:32.244952 21769 net.cpp:226] L1_b12_sum_eltwise needs backward computation.\nI0818 15:07:32.244958 21769 net.cpp:226] L1_b12_cbr2_scale needs backward computation.\nI0818 15:07:32.244964 21769 net.cpp:226] L1_b12_cbr2_bn needs backward computation.\nI0818 15:07:32.244971 21769 net.cpp:226] L1_b12_cbr2_conv needs backward computation.\nI0818 15:07:32.244976 21769 net.cpp:226] L1_b12_cbr1_relu needs backward computation.\nI0818 15:07:32.244982 21769 net.cpp:226] L1_b12_cbr1_scale needs backward computation.\nI0818 15:07:32.244987 21769 net.cpp:226] L1_b12_cbr1_bn needs backward computation.\nI0818 15:07:32.244992 21769 net.cpp:226] L1_b12_cbr1_conv needs backward computation.\nI0818 15:07:32.244998 21769 net.cpp:226] L1_b11_sum_eltwise_top_L1_b11_relu_0_split needs backward computation.\nI0818 15:07:32.245005 21769 net.cpp:226] L1_b11_relu needs backward computation.\nI0818 15:07:32.245010 21769 net.cpp:226] L1_b11_sum_eltwise needs backward computation.\nI0818 15:07:32.245018 21769 net.cpp:226] L1_b11_cbr2_scale needs backward computation.\nI0818 15:07:32.245023 21769 net.cpp:226] L1_b11_cbr2_bn needs backward computation.\nI0818 15:07:32.245029 21769 net.cpp:226] L1_b11_cbr2_conv needs backward computation.\nI0818 15:07:32.245036 21769 net.cpp:226] L1_b11_cbr1_relu needs backward computation.\nI0818 15:07:32.245041 21769 net.cpp:226] L1_b11_cbr1_scale needs backward computation.\nI0818 15:07:32.245048 21769 net.cpp:226] L1_b11_cbr1_bn needs backward computation.\nI0818 15:07:32.245059 21769 net.cpp:226] L1_b11_cbr1_conv needs backward computation.\nI0818 15:07:32.245064 21769 net.cpp:226] L1_b10_sum_eltwise_top_L1_b10_relu_0_split needs backward computation.\nI0818 15:07:32.245070 21769 net.cpp:226] L1_b10_relu needs backward computation.\nI0818 15:07:32.245076 21769 net.cpp:226] L1_b10_sum_eltwise needs backward computation.\nI0818 15:07:32.245084 21769 net.cpp:226] L1_b10_cbr2_scale needs backward computation.\nI0818 15:07:32.245090 21769 net.cpp:226] L1_b10_cbr2_bn needs backward computation.\nI0818 15:07:32.245095 21769 net.cpp:226] L1_b10_cbr2_conv needs backward computation.\nI0818 15:07:32.245101 21769 net.cpp:226] L1_b10_cbr1_relu needs backward computation.\nI0818 15:07:32.245107 21769 net.cpp:226] L1_b10_cbr1_scale needs backward computation.\nI0818 15:07:32.245112 21769 net.cpp:226] L1_b10_cbr1_bn needs backward computation.\nI0818 15:07:32.245118 21769 net.cpp:226] L1_b10_cbr1_conv needs backward computation.\nI0818 15:07:32.245124 21769 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0818 15:07:32.245131 21769 net.cpp:226] L1_b9_relu needs backward computation.\nI0818 15:07:32.245136 21769 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0818 15:07:32.245142 21769 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0818 15:07:32.245147 21769 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0818 15:07:32.245153 21769 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0818 15:07:32.245159 21769 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0818 15:07:32.245164 21769 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0818 15:07:32.245170 21769 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0818 15:07:32.245177 21769 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0818 15:07:32.245182 21769 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0818 15:07:32.245187 21769 net.cpp:226] L1_b8_relu needs backward computation.\nI0818 15:07:32.245193 21769 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0818 15:07:32.245200 21769 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0818 15:07:32.245206 21769 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0818 15:07:32.245213 21769 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0818 15:07:32.245218 21769 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0818 15:07:32.245223 21769 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0818 15:07:32.245229 21769 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0818 15:07:32.245235 21769 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0818 15:07:32.245241 21769 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0818 15:07:32.245247 21769 net.cpp:226] L1_b7_relu needs backward computation.\nI0818 15:07:32.245254 21769 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0818 15:07:32.245260 21769 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0818 15:07:32.245265 21769 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0818 15:07:32.245271 21769 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0818 15:07:32.245277 21769 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0818 15:07:32.245283 21769 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0818 15:07:32.245290 21769 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0818 15:07:32.245296 21769 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0818 15:07:32.245301 21769 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0818 15:07:32.245307 21769 net.cpp:226] L1_b6_relu needs backward computation.\nI0818 15:07:32.245313 21769 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0818 15:07:32.245321 21769 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0818 15:07:32.245326 21769 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0818 15:07:32.245332 21769 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0818 15:07:32.245343 21769 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0818 15:07:32.245349 21769 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0818 15:07:32.245355 21769 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0818 15:07:32.245362 21769 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0818 15:07:32.245368 21769 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0818 15:07:32.245373 21769 net.cpp:226] L1_b5_relu needs backward computation.\nI0818 15:07:32.245379 21769 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0818 15:07:32.245386 21769 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0818 15:07:32.245391 21769 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0818 15:07:32.245398 21769 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0818 15:07:32.245404 21769 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0818 15:07:32.245409 21769 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0818 15:07:32.245415 21769 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0818 15:07:32.245420 21769 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0818 15:07:32.245429 21769 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0818 15:07:32.245436 21769 net.cpp:226] L1_b4_relu needs backward computation.\nI0818 15:07:32.245442 21769 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0818 15:07:32.245450 21769 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0818 15:07:32.245455 21769 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0818 15:07:32.245461 21769 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0818 15:07:32.245467 21769 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0818 15:07:32.245473 21769 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0818 15:07:32.245478 21769 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0818 15:07:32.245484 21769 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0818 15:07:32.245491 21769 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0818 15:07:32.245497 21769 net.cpp:226] L1_b3_relu needs backward computation.\nI0818 15:07:32.245502 21769 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0818 15:07:32.245509 21769 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0818 15:07:32.245515 21769 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0818 15:07:32.245522 21769 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0818 15:07:32.245527 21769 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0818 15:07:32.245533 21769 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0818 15:07:32.245538 21769 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0818 15:07:32.245544 21769 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0818 15:07:32.245550 21769 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0818 15:07:32.245556 21769 net.cpp:226] L1_b2_relu needs backward computation.\nI0818 15:07:32.245563 21769 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0818 15:07:32.245569 21769 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0818 15:07:32.245575 21769 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0818 15:07:32.245581 21769 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0818 15:07:32.245587 21769 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0818 15:07:32.245594 21769 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0818 15:07:32.245599 21769 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0818 15:07:32.245605 21769 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0818 15:07:32.245611 21769 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0818 15:07:32.245617 21769 net.cpp:226] L1_b1_relu needs backward computation.\nI0818 15:07:32.245623 21769 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0818 15:07:32.245635 21769 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0818 15:07:32.245641 21769 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0818 15:07:32.245648 21769 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0818 15:07:32.245654 21769 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0818 15:07:32.245661 21769 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0818 15:07:32.245666 21769 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0818 15:07:32.245671 21769 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0818 15:07:32.245677 21769 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0818 15:07:32.245687 21769 net.cpp:226] pre_relu needs backward computation.\nI0818 15:07:32.245693 21769 net.cpp:226] pre_scale needs backward computation.\nI0818 15:07:32.245699 21769 net.cpp:226] pre_bn needs backward computation.\nI0818 15:07:32.245704 21769 net.cpp:226] pre_conv needs backward computation.\nI0818 15:07:32.245712 21769 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 15:07:32.245718 21769 net.cpp:228] dataLayer does not need backward computation.\nI0818 15:07:32.245723 21769 net.cpp:270] This network produces output accuracy\nI0818 15:07:32.245729 21769 net.cpp:270] This network produces output loss\nI0818 15:07:32.246420 21769 net.cpp:283] Network initialization done.\nI0818 15:07:32.248545 21769 solver.cpp:60] Solver scaffolding done.\nI0818 15:07:32.497190 21769 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0818 15:07:32.945776 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:32.945847 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:32.955116 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:33.219341 21769 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:33.219435 21769 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0818 15:07:33.291290 21769 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:33.291374 21769 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0818 15:07:33.899433 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:33.899507 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:33.909456 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:34.211346 21769 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:34.211468 21769 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0818 15:07:34.321126 21769 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:34.321244 21769 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0818 15:07:35.052086 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:35.052157 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:35.062963 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:35.409222 21769 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:35.409373 21769 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0818 15:07:35.559150 21769 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:35.559293 21769 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0818 15:07:35.736641 21769 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0818 15:07:36.416509 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:36.416635 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:36.428750 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:36.814373 21769 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:36.814599 21769 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0818 15:07:37.009599 21769 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:37.009832 21769 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0818 15:07:38.022464 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:38.022537 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:38.035428 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:38.463333 21769 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:38.463536 21769 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0818 15:07:38.702905 21769 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:38.703114 21769 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0818 15:07:39.836012 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:39.836089 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:39.850028 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:40.329919 21769 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:40.330152 21769 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0818 15:07:40.611604 21769 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:40.611840 21769 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0818 15:07:41.860424 21769 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:41.860494 21769 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:41.876111 21769 data_layer.cpp:41] output data size: 100,3,32,32\nI0818 15:07:41.932495 21787 blocking_queue.cpp:50] Waiting for data\nI0818 15:07:41.998463 21787 blocking_queue.cpp:50] Waiting for data\nI0818 15:07:42.473721 21769 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:42.473984 21769 net.cpp:143] Created top blob 0 (shape: 100 16 16 16 (409600)) for shared layer L2_b1_zeros\nI0818 15:07:42.794359 21769 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:42.794615 21769 net.cpp:143] Created top blob 0 (shape: 100 32 8 8 (204800)) for shared layer L3_b1_zeros\nI0818 15:07:43.154911 21769 parallel.cpp:425] Starting Optimization\nI0818 15:07:43.156217 21769 solver.cpp:279] Solving Cifar-Resnet\nI0818 15:07:43.156235 21769 solver.cpp:280] Learning Rate Policy: triangular\nI0818 15:07:43.163954 21769 solver.cpp:337] Iteration 0, Testing net (#0)\nI0818 15:09:55.491309 21769 solver.cpp:404]     Test net output #0: accuracy = 0.1\nI0818 15:09:55.491694 21769 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0818 15:10:02.975296 21769 solver.cpp:228] Iteration 0, loss = 4.48635\nI0818 15:10:02.975337 21769 solver.cpp:244]     Train net output #0: accuracy = 0.07\nI0818 15:10:02.975358 21769 solver.cpp:244]     Train net output #1: loss = 4.48635 (* 1 = 4.48635 loss)\nI0818 15:10:02.975519 21769 sgd_solver.cpp:166] Iteration 0, lr = 0\nI0818 15:13:41.691304 21769 solver.cpp:337] Iteration 100, Testing net (#0)\nI0818 15:15:52.917301 21769 solver.cpp:404]     Test net output #0: accuracy = 0.311\nI0818 15:15:52.917639 21769 solver.cpp:404]     Test net output #1: loss = 1.85333 (* 1 = 1.85333 loss)\nI0818 15:15:55.045675 21769 solver.cpp:228] Iteration 100, loss = 1.88534\nI0818 15:15:55.045722 21769 solver.cpp:244]     Train net output #0: accuracy = 0.33\nI0818 15:15:55.045737 21769 solver.cpp:244]     Train net output #1: loss = 1.88534 (* 1 = 1.88534 loss)\nI0818 15:15:55.132537 21769 sgd_solver.cpp:166] Iteration 100, lr = 0.00250006\nI0818 15:19:33.547422 21769 solver.cpp:337] Iteration 200, Testing net (#0)\nI0818 15:21:44.779870 21769 solver.cpp:404]     Test net output #0: accuracy = 0.4277\nI0818 15:21:44.780201 21769 solver.cpp:404]     Test net output #1: loss = 1.55435 (* 1 = 1.55435 loss)\nI0818 15:21:46.907881 21769 solver.cpp:228] Iteration 200, loss = 1.58582\nI0818 15:21:46.907927 21769 solver.cpp:244]     Train net output #0: accuracy = 0.37\nI0818 15:21:46.907944 21769 solver.cpp:244]     Train net output #1: loss = 1.58582 (* 1 = 1.58582 loss)\nI0818 15:21:46.993995 21769 sgd_solver.cpp:166] Iteration 200, lr = 0.005\nI0818 15:25:25.347872 21769 solver.cpp:337] Iteration 300, Testing net (#0)\nI0818 15:27:36.569941 21769 solver.cpp:404]     Test net output #0: accuracy = 0.4977\nI0818 15:27:36.570271 21769 solver.cpp:404]     Test net output #1: loss = 1.39253 (* 1 = 1.39253 loss)\nI0818 15:27:38.698106 21769 solver.cpp:228] Iteration 300, loss = 1.25454\nI0818 15:27:38.698150 21769 solver.cpp:244]     Train net output #0: accuracy = 0.52\nI0818 15:27:38.698168 21769 solver.cpp:244]     Train net output #1: loss = 1.25454 (* 1 = 1.25454 loss)\nI0818 15:27:38.791893 21769 sgd_solver.cpp:166] Iteration 300, lr = 0.00750005\nI0818 15:31:17.026438 21769 solver.cpp:337] Iteration 400, Testing net (#0)\nI0818 15:33:28.257326 21769 solver.cpp:404]     Test net output #0: accuracy = 0.5429\nI0818 15:33:28.257668 21769 solver.cpp:404]     Test net output #1: loss = 1.26927 (* 1 = 1.26927 loss)\nI0818 15:33:30.384945 21769 solver.cpp:228] Iteration 400, loss = 1.18482\nI0818 15:33:30.384992 21769 solver.cpp:244]     Train net output #0: accuracy = 0.56\nI0818 15:33:30.385009 21769 solver.cpp:244]     Train net output #1: loss = 1.18482 (* 1 = 1.18482 loss)\nI0818 15:33:30.478132 21769 sgd_solver.cpp:166] Iteration 400, lr = 0.00999999\nI0818 15:37:09.128612 21769 solver.cpp:337] Iteration 500, Testing net (#0)\nI0818 15:39:20.349997 21769 solver.cpp:404]     Test net output #0: accuracy = 0.5855\nI0818 15:39:20.350333 21769 solver.cpp:404]     Test net output #1: loss = 1.16985 (* 1 = 1.16985 loss)\nI0818 15:39:22.477933 21769 solver.cpp:228] Iteration 500, loss = 0.945325\nI0818 15:39:22.477979 21769 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0818 15:39:22.477996 21769 solver.cpp:244]     Train net output #1: loss = 0.945325 (* 1 = 0.945325 loss)\nI0818 15:39:22.566241 21769 sgd_solver.cpp:166] Iteration 500, lr = 0.0125\nI0818 15:43:00.980356 21769 solver.cpp:337] Iteration 600, Testing net (#0)\nI0818 15:45:12.213690 21769 solver.cpp:404]     Test net output #0: accuracy = 0.5266\nI0818 15:45:12.214030 21769 solver.cpp:404]     Test net output #1: loss = 1.46792 (* 1 = 1.46792 loss)\nI0818 15:45:14.340996 21769 solver.cpp:228] Iteration 600, loss = 1.0015\nI0818 15:45:14.341042 21769 solver.cpp:244]     Train net output #0: accuracy = 0.66\nI0818 15:45:14.341058 21769 solver.cpp:244]     Train net output #1: loss = 1.0015 (* 1 = 1.0015 loss)\nI0818 15:45:14.432023 21769 sgd_solver.cpp:166] Iteration 600, lr = 0.015\nI0818 15:48:52.719002 21769 solver.cpp:337] Iteration 700, Testing net (#0)\nI0818 15:51:03.987419 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6329\nI0818 15:51:03.987759 21769 solver.cpp:404]     Test net output #1: loss = 1.08047 (* 1 = 1.08047 loss)\nI0818 15:51:06.115239 21769 solver.cpp:228] Iteration 700, loss = 0.854677\nI0818 15:51:06.115286 21769 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0818 15:51:06.115303 21769 solver.cpp:244]     Train net output #1: loss = 0.854677 (* 1 = 0.854677 loss)\nI0818 15:51:06.205148 21769 sgd_solver.cpp:166] Iteration 700, lr = 0.0175\nI0818 15:54:44.746935 21769 solver.cpp:337] Iteration 800, Testing net (#0)\nI0818 15:56:56.016767 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6138\nI0818 15:56:56.017102 21769 solver.cpp:404]     Test net output #1: loss = 1.21835 (* 1 = 1.21835 loss)\nI0818 15:56:58.145850 21769 solver.cpp:228] Iteration 800, loss = 0.622386\nI0818 15:56:58.145897 21769 solver.cpp:244]     Train net output #0: accuracy = 0.75\nI0818 15:56:58.145915 21769 solver.cpp:244]     Train net output #1: loss = 0.622386 (* 1 = 0.622386 loss)\nI0818 15:56:58.234328 21769 sgd_solver.cpp:166] Iteration 800, lr = 0.02\nI0818 16:00:36.632473 21769 solver.cpp:337] Iteration 900, Testing net (#0)\nI0818 16:02:47.909387 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6943\nI0818 16:02:47.909737 21769 solver.cpp:404]     Test net output #1: loss = 0.888778 (* 1 = 0.888778 loss)\nI0818 16:02:50.037178 21769 solver.cpp:228] Iteration 900, loss = 0.735382\nI0818 16:02:50.037223 21769 solver.cpp:244]     Train net output #0: accuracy = 0.79\nI0818 16:02:50.037240 21769 solver.cpp:244]     Train net output #1: loss = 0.735382 (* 1 = 0.735382 loss)\nI0818 16:02:50.125882 21769 sgd_solver.cpp:166] Iteration 900, lr = 0.0225\nI0818 16:06:28.518961 21769 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0818 16:08:39.793859 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6549\nI0818 16:08:39.794194 21769 solver.cpp:404]     Test net output #1: loss = 1.09101 (* 1 = 1.09101 loss)\nI0818 16:08:41.922129 21769 solver.cpp:228] Iteration 1000, loss = 0.61764\nI0818 16:08:41.922175 21769 solver.cpp:244]     Train net output #0: accuracy = 0.81\nI0818 16:08:41.922193 21769 solver.cpp:244]     Train net output #1: loss = 0.61764 (* 1 = 0.61764 loss)\nI0818 16:08:42.014272 21769 sgd_solver.cpp:166] Iteration 1000, lr = 0.025\nI0818 16:12:20.715409 21769 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0818 16:14:32.002995 21769 solver.cpp:404]     Test net output #0: accuracy = 0.682\nI0818 16:14:32.003327 21769 solver.cpp:404]     Test net output #1: loss = 0.977936 (* 1 = 0.977936 loss)\nI0818 16:14:34.132076 21769 solver.cpp:228] Iteration 1100, loss = 0.536462\nI0818 16:14:34.132124 21769 solver.cpp:244]     Train net output #0: accuracy = 0.78\nI0818 16:14:34.132141 21769 solver.cpp:244]     Train net output #1: loss = 0.536462 (* 1 = 0.536462 loss)\nI0818 16:14:34.222800 21769 sgd_solver.cpp:166] Iteration 1100, lr = 0.0275\nI0818 16:18:12.731313 21769 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0818 16:20:24.013602 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7284\nI0818 16:20:24.013941 21769 solver.cpp:404]     Test net output #1: loss = 0.837806 (* 1 = 0.837806 loss)\nI0818 16:20:26.141988 21769 solver.cpp:228] Iteration 1200, loss = 0.589685\nI0818 16:20:26.142033 21769 solver.cpp:244]     Train net output #0: accuracy = 0.82\nI0818 16:20:26.142050 21769 solver.cpp:244]     Train net output #1: loss = 0.589685 (* 1 = 0.589685 loss)\nI0818 16:20:26.231762 21769 sgd_solver.cpp:166] Iteration 1200, lr = 0.03\nI0818 16:24:04.680637 21769 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0818 16:26:15.963891 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6039\nI0818 16:26:15.964221 21769 solver.cpp:404]     Test net output #1: loss = 1.40968 (* 1 = 1.40968 loss)\nI0818 16:26:18.092886 21769 solver.cpp:228] Iteration 1300, loss = 0.528768\nI0818 16:26:18.092932 21769 solver.cpp:244]     Train net output #0: accuracy = 0.77\nI0818 16:26:18.092949 21769 solver.cpp:244]     Train net output #1: loss = 0.528768 (* 1 = 0.528768 loss)\nI0818 16:26:18.178378 21769 sgd_solver.cpp:166] Iteration 1300, lr = 0.0325\nI0818 16:29:56.664809 21769 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0818 16:32:07.974593 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6396\nI0818 16:32:07.974921 21769 solver.cpp:404]     Test net output #1: loss = 1.37391 (* 1 = 1.37391 loss)\nI0818 16:32:10.103683 21769 solver.cpp:228] Iteration 1400, loss = 0.403625\nI0818 16:32:10.103730 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0818 16:32:10.103752 21769 solver.cpp:244]     Train net output #1: loss = 0.403625 (* 1 = 0.403625 loss)\nI0818 16:32:10.193866 21769 sgd_solver.cpp:166] Iteration 1400, lr = 0.035\nI0818 16:35:48.718755 21769 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0818 16:38:00.009027 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7016\nI0818 16:38:00.009358 21769 solver.cpp:404]     Test net output #1: loss = 0.995763 (* 1 = 0.995763 loss)\nI0818 16:38:02.137157 21769 solver.cpp:228] Iteration 1500, loss = 0.478178\nI0818 16:38:02.137203 21769 solver.cpp:244]     Train net output #0: accuracy = 0.83\nI0818 16:38:02.137220 21769 solver.cpp:244]     Train net output #1: loss = 0.478178 (* 1 = 0.478178 loss)\nI0818 16:38:02.227797 21769 sgd_solver.cpp:166] Iteration 1500, lr = 0.0375\nI0818 16:41:40.585260 21769 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0818 16:43:51.874083 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6542\nI0818 16:43:51.874430 21769 solver.cpp:404]     Test net output #1: loss = 1.33854 (* 1 = 1.33854 loss)\nI0818 16:43:54.001471 21769 solver.cpp:228] Iteration 1600, loss = 0.477855\nI0818 16:43:54.001516 21769 solver.cpp:244]     Train net output #0: accuracy = 0.85\nI0818 16:43:54.001533 21769 solver.cpp:244]     Train net output #1: loss = 0.477855 (* 1 = 0.477855 loss)\nI0818 16:43:54.093340 21769 sgd_solver.cpp:166] Iteration 1600, lr = 0.04\nI0818 16:47:32.493594 21769 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0818 16:49:43.788530 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7119\nI0818 16:49:43.788841 21769 solver.cpp:404]     Test net output #1: loss = 1.00354 (* 1 = 1.00354 loss)\nI0818 16:49:45.917389 21769 solver.cpp:228] Iteration 1700, loss = 0.400931\nI0818 16:49:45.917435 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0818 16:49:45.917453 21769 solver.cpp:244]     Train net output #1: loss = 0.400931 (* 1 = 0.400931 loss)\nI0818 16:49:46.009251 21769 sgd_solver.cpp:166] Iteration 1700, lr = 0.0425\nI0818 16:53:24.571593 21769 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0818 16:55:35.863875 21769 solver.cpp:404]     Test net output #0: accuracy = 0.733\nI0818 16:55:35.864210 21769 solver.cpp:404]     Test net output #1: loss = 0.888259 (* 1 = 0.888259 loss)\nI0818 16:55:37.991946 21769 solver.cpp:228] Iteration 1800, loss = 0.454238\nI0818 16:55:37.991992 21769 solver.cpp:244]     Train net output #0: accuracy = 0.85\nI0818 16:55:37.992009 21769 solver.cpp:244]     Train net output #1: loss = 0.454238 (* 1 = 0.454238 loss)\nI0818 16:55:38.084388 21769 sgd_solver.cpp:166] Iteration 1800, lr = 0.045\nI0818 16:59:16.418031 21769 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0818 17:01:27.718817 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6059\nI0818 17:01:27.719156 21769 solver.cpp:404]     Test net output #1: loss = 1.83862 (* 1 = 1.83862 loss)\nI0818 17:01:29.847704 21769 solver.cpp:228] Iteration 1900, loss = 0.322124\nI0818 17:01:29.847754 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0818 17:01:29.847770 21769 solver.cpp:244]     Train net output #1: loss = 0.322124 (* 1 = 0.322124 loss)\nI0818 17:01:29.943249 21769 sgd_solver.cpp:166] Iteration 1900, lr = 0.0475\nI0818 17:05:08.592103 21769 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0818 17:07:19.888314 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6444\nI0818 17:07:19.888650 21769 solver.cpp:404]     Test net output #1: loss = 1.3214 (* 1 = 1.3214 loss)\nI0818 17:07:22.016743 21769 solver.cpp:228] Iteration 2000, loss = 0.230922\nI0818 17:07:22.016796 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0818 17:07:22.016813 21769 solver.cpp:244]     Train net output #1: loss = 0.230922 (* 1 = 0.230922 loss)\nI0818 17:07:22.104115 21769 sgd_solver.cpp:166] Iteration 2000, lr = 0.05\nI0818 17:11:00.685852 21769 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0818 17:13:11.990164 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7371\nI0818 17:13:11.990509 21769 solver.cpp:404]     Test net output #1: loss = 0.95908 (* 1 = 0.95908 loss)\nI0818 17:13:14.118485 21769 solver.cpp:228] Iteration 2100, loss = 0.280704\nI0818 17:13:14.118532 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0818 17:13:14.118547 21769 solver.cpp:244]     Train net output #1: loss = 0.280704 (* 1 = 0.280704 loss)\nI0818 17:13:14.204005 21769 sgd_solver.cpp:166] Iteration 2100, lr = 0.0525\nI0818 17:16:52.603618 21769 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0818 17:19:03.893755 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6905\nI0818 17:19:03.894086 21769 solver.cpp:404]     Test net output #1: loss = 1.39521 (* 1 = 1.39521 loss)\nI0818 17:19:06.023027 21769 solver.cpp:228] Iteration 2200, loss = 0.222606\nI0818 17:19:06.023075 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0818 17:19:06.023092 21769 solver.cpp:244]     Train net output #1: loss = 0.222606 (* 1 = 0.222606 loss)\nI0818 17:19:06.115170 21769 sgd_solver.cpp:166] Iteration 2200, lr = 0.0549999\nI0818 17:22:44.520328 21769 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0818 17:24:55.790360 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6917\nI0818 17:24:55.790699 21769 solver.cpp:404]     Test net output #1: loss = 1.40053 (* 1 = 1.40053 loss)\nI0818 17:24:57.918437 21769 solver.cpp:228] Iteration 2300, loss = 0.150575\nI0818 17:24:57.918483 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0818 17:24:57.918500 21769 solver.cpp:244]     Train net output #1: loss = 0.150575 (* 1 = 0.150575 loss)\nI0818 17:24:58.010551 21769 sgd_solver.cpp:166] Iteration 2300, lr = 0.0575\nI0818 17:28:36.391012 21769 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0818 17:30:47.678619 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6768\nI0818 17:30:47.678958 21769 solver.cpp:404]     Test net output #1: loss = 1.25486 (* 1 = 1.25486 loss)\nI0818 17:30:49.808195 21769 solver.cpp:228] Iteration 2400, loss = 0.145542\nI0818 17:30:49.808243 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:30:49.808259 21769 solver.cpp:244]     Train net output #1: loss = 0.145542 (* 1 = 0.145542 loss)\nI0818 17:30:49.901329 21769 sgd_solver.cpp:166] Iteration 2400, lr = 0.0599999\nI0818 17:34:28.605070 21769 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0818 17:36:39.867655 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6612\nI0818 17:36:39.867995 21769 solver.cpp:404]     Test net output #1: loss = 1.51066 (* 1 = 1.51066 loss)\nI0818 17:36:41.996984 21769 solver.cpp:228] Iteration 2500, loss = 0.135807\nI0818 17:36:41.997030 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 17:36:41.997046 21769 solver.cpp:244]     Train net output #1: loss = 0.135807 (* 1 = 0.135807 loss)\nI0818 17:36:42.091068 21769 sgd_solver.cpp:166] Iteration 2500, lr = 0.0625\nI0818 17:40:20.620796 21769 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0818 17:42:31.869041 21769 solver.cpp:404]     Test net output #0: accuracy = 0.706\nI0818 17:42:31.869385 21769 solver.cpp:404]     Test net output #1: loss = 1.25521 (* 1 = 1.25521 loss)\nI0818 17:42:33.996846 21769 solver.cpp:228] Iteration 2600, loss = 0.20734\nI0818 17:42:33.996894 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0818 17:42:33.996911 21769 solver.cpp:244]     Train net output #1: loss = 0.20734 (* 1 = 0.20734 loss)\nI0818 17:42:34.088726 21769 sgd_solver.cpp:166] Iteration 2600, lr = 0.0650001\nI0818 17:46:12.493310 21769 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0818 17:48:23.737723 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7142\nI0818 17:48:23.738073 21769 solver.cpp:404]     Test net output #1: loss = 1.53362 (* 1 = 1.53362 loss)\nI0818 17:48:25.866777 21769 solver.cpp:228] Iteration 2700, loss = 0.236371\nI0818 17:48:25.866824 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0818 17:48:25.866840 21769 solver.cpp:244]     Train net output #1: loss = 0.236371 (* 1 = 0.236371 loss)\nI0818 17:48:25.952600 21769 sgd_solver.cpp:166] Iteration 2700, lr = 0.0675\nI0818 17:52:04.359619 21769 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0818 17:54:15.621091 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7178\nI0818 17:54:15.621438 21769 solver.cpp:404]     Test net output #1: loss = 1.23473 (* 1 = 1.23473 loss)\nI0818 17:54:17.749605 21769 solver.cpp:228] Iteration 2800, loss = 0.165709\nI0818 17:54:17.749651 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0818 17:54:17.749668 21769 solver.cpp:244]     Train net output #1: loss = 0.165709 (* 1 = 0.165709 loss)\nI0818 17:54:17.835216 21769 sgd_solver.cpp:166] Iteration 2800, lr = 0.0700001\nI0818 17:57:56.271833 21769 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0818 18:00:07.522409 21769 solver.cpp:404]     Test net output #0: accuracy = 0.523\nI0818 18:00:07.522732 21769 solver.cpp:404]     Test net output #1: loss = 3.16045 (* 1 = 3.16045 loss)\nI0818 18:00:09.650585 21769 solver.cpp:228] Iteration 2900, loss = 0.178358\nI0818 18:00:09.650631 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0818 18:00:09.650648 21769 solver.cpp:244]     Train net output #1: loss = 0.178358 (* 1 = 0.178358 loss)\nI0818 18:00:09.736389 21769 sgd_solver.cpp:166] Iteration 2900, lr = 0.0725\nI0818 18:03:48.141134 21769 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0818 18:05:59.419739 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6786\nI0818 18:05:59.420089 21769 solver.cpp:404]     Test net output #1: loss = 1.49466 (* 1 = 1.49466 loss)\nI0818 18:06:01.548363 21769 solver.cpp:228] Iteration 3000, loss = 0.130166\nI0818 18:06:01.548411 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:06:01.548427 21769 solver.cpp:244]     Train net output #1: loss = 0.130166 (* 1 = 0.130166 loss)\nI0818 18:06:01.643148 21769 sgd_solver.cpp:166] Iteration 3000, lr = 0.075\nI0818 18:09:40.238941 21769 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0818 18:11:51.519443 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7297\nI0818 18:11:51.519789 21769 solver.cpp:404]     Test net output #1: loss = 1.38013 (* 1 = 1.38013 loss)\nI0818 18:11:53.648140 21769 solver.cpp:228] Iteration 3100, loss = 0.118724\nI0818 18:11:53.648187 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 18:11:53.648205 21769 solver.cpp:244]     Train net output #1: loss = 0.118724 (* 1 = 0.118724 loss)\nI0818 18:11:53.736368 21769 sgd_solver.cpp:166] Iteration 3100, lr = 0.0775\nI0818 18:15:32.098047 21769 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0818 18:17:43.371062 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7327\nI0818 18:17:43.371384 21769 solver.cpp:404]     Test net output #1: loss = 1.24728 (* 1 = 1.24728 loss)\nI0818 18:17:45.492893 21769 solver.cpp:228] Iteration 3200, loss = 0.115176\nI0818 18:17:45.492940 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0818 18:17:45.492956 21769 solver.cpp:244]     Train net output #1: loss = 0.115176 (* 1 = 0.115176 loss)\nI0818 18:17:45.585352 21769 sgd_solver.cpp:166] Iteration 3200, lr = 0.08\nI0818 18:21:23.885205 21769 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0818 18:23:35.185894 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7101\nI0818 18:23:35.186239 21769 solver.cpp:404]     Test net output #1: loss = 1.26429 (* 1 = 1.26429 loss)\nI0818 18:23:37.308652 21769 solver.cpp:228] Iteration 3300, loss = 0.120716\nI0818 18:23:37.308699 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0818 18:23:37.308717 21769 solver.cpp:244]     Train net output #1: loss = 0.120716 (* 1 = 0.120716 loss)\nI0818 18:23:37.404719 21769 sgd_solver.cpp:166] Iteration 3300, lr = 0.0825\nI0818 18:27:15.824991 21769 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0818 18:29:27.131289 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6932\nI0818 18:29:27.131600 21769 solver.cpp:404]     Test net output #1: loss = 1.55565 (* 1 = 1.55565 loss)\nI0818 18:29:29.254823 21769 solver.cpp:228] Iteration 3400, loss = 0.168937\nI0818 18:29:29.254870 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0818 18:29:29.254886 21769 solver.cpp:244]     Train net output #1: loss = 0.168937 (* 1 = 0.168937 loss)\nI0818 18:29:29.346560 21769 sgd_solver.cpp:166] Iteration 3400, lr = 0.085\nI0818 18:33:07.560673 21769 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0818 18:35:18.865140 21769 solver.cpp:404]     Test net output #0: accuracy = 0.66\nI0818 18:35:18.865475 21769 solver.cpp:404]     Test net output #1: loss = 1.59065 (* 1 = 1.59065 loss)\nI0818 18:35:20.987802 21769 solver.cpp:228] Iteration 3500, loss = 0.117315\nI0818 18:35:20.987849 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:35:20.987866 21769 solver.cpp:244]     Train net output #1: loss = 0.117315 (* 1 = 0.117315 loss)\nI0818 18:35:21.070358 21769 sgd_solver.cpp:166] Iteration 3500, lr = 0.0875\nI0818 18:38:58.595806 21769 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0818 18:41:09.911065 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7456\nI0818 18:41:09.911399 21769 solver.cpp:404]     Test net output #1: loss = 1.12204 (* 1 = 1.12204 loss)\nI0818 18:41:12.033745 21769 solver.cpp:228] Iteration 3600, loss = 0.0936244\nI0818 18:41:12.033794 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:41:12.033810 21769 solver.cpp:244]     Train net output #1: loss = 0.0936243 (* 1 = 0.0936243 loss)\nI0818 18:41:12.116281 21769 sgd_solver.cpp:166] Iteration 3600, lr = 0.09\nI0818 18:44:49.749109 21769 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0818 18:47:01.074501 21769 solver.cpp:404]     Test net output #0: accuracy = 0.69\nI0818 18:47:01.074838 21769 solver.cpp:404]     Test net output #1: loss = 1.52695 (* 1 = 1.52695 loss)\nI0818 18:47:03.197576 21769 solver.cpp:228] Iteration 3700, loss = 0.0773997\nI0818 18:47:03.197612 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 18:47:03.197628 21769 solver.cpp:244]     Train net output #1: loss = 0.0773996 (* 1 = 0.0773996 loss)\nI0818 18:47:03.282765 21769 sgd_solver.cpp:166] Iteration 3700, lr = 0.0925\nI0818 18:50:40.888263 21769 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0818 18:52:52.239761 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7755\nI0818 18:52:52.240144 21769 solver.cpp:404]     Test net output #1: loss = 0.977517 (* 1 = 0.977517 loss)\nI0818 18:52:54.364655 21769 solver.cpp:228] Iteration 3800, loss = 0.057795\nI0818 18:52:54.364709 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:52:54.364733 21769 solver.cpp:244]     Train net output #1: loss = 0.0577949 (* 1 = 0.0577949 loss)\nI0818 18:52:54.452114 21769 sgd_solver.cpp:166] Iteration 3800, lr = 0.095\nI0818 18:56:32.082547 21769 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0818 18:58:43.412572 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7336\nI0818 18:58:43.412889 21769 solver.cpp:404]     Test net output #1: loss = 1.22414 (* 1 = 1.22414 loss)\nI0818 18:58:45.536754 21769 solver.cpp:228] Iteration 3900, loss = 0.183519\nI0818 18:58:45.536794 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:58:45.536816 21769 solver.cpp:244]     Train net output #1: loss = 0.183519 (* 1 = 0.183519 loss)\nI0818 18:58:45.625202 21769 sgd_solver.cpp:166] Iteration 3900, lr = 0.0975\nI0818 19:02:23.375039 21769 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0818 19:04:36.011226 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6981\nI0818 19:04:36.011622 21769 solver.cpp:404]     Test net output #1: loss = 1.63393 (* 1 = 1.63393 loss)\nI0818 19:04:38.141573 21769 solver.cpp:228] Iteration 4000, loss = 0.0902317\nI0818 19:04:38.141618 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 19:04:38.141634 21769 solver.cpp:244]     Train net output #1: loss = 0.0902316 (* 1 = 0.0902316 loss)\nI0818 19:04:38.228489 21769 sgd_solver.cpp:166] Iteration 4000, lr = 0.1\nI0818 19:08:17.209563 21769 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0818 19:10:29.907743 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8032\nI0818 19:10:29.908125 21769 solver.cpp:404]     Test net output #1: loss = 0.888596 (* 1 = 0.888596 loss)\nI0818 19:10:32.038110 21769 solver.cpp:228] Iteration 4100, loss = 0.0644422\nI0818 19:10:32.038157 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 19:10:32.038174 21769 solver.cpp:244]     Train net output #1: loss = 0.0644421 (* 1 = 0.0644421 loss)\nI0818 19:10:32.129096 21769 sgd_solver.cpp:166] Iteration 4100, lr = 0.1025\nI0818 19:14:11.094290 21769 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0818 19:16:23.802954 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8129\nI0818 19:16:23.803378 21769 solver.cpp:404]     Test net output #1: loss = 0.82339 (* 1 = 0.82339 loss)\nI0818 19:16:25.932896 21769 solver.cpp:228] Iteration 4200, loss = 0.0902042\nI0818 19:16:25.932946 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0818 19:16:25.932971 21769 solver.cpp:244]     Train net output #1: loss = 0.0902041 (* 1 = 0.0902041 loss)\nI0818 19:16:26.018357 21769 sgd_solver.cpp:166] Iteration 4200, lr = 0.105\nI0818 19:20:04.710180 21769 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0818 19:22:17.598711 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7517\nI0818 19:22:17.599067 21769 solver.cpp:404]     Test net output #1: loss = 1.23032 (* 1 = 1.23032 loss)\nI0818 19:22:19.729311 21769 solver.cpp:228] Iteration 4300, loss = 0.0660866\nI0818 19:22:19.729360 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 19:22:19.729385 21769 solver.cpp:244]     Train net output #1: loss = 0.0660865 (* 1 = 0.0660865 loss)\nI0818 19:22:19.818944 21769 sgd_solver.cpp:166] Iteration 4300, lr = 0.1075\nI0818 19:25:58.707643 21769 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0818 19:28:11.401162 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7833\nI0818 19:28:11.401538 21769 solver.cpp:404]     Test net output #1: loss = 1.07644 (* 1 = 1.07644 loss)\nI0818 19:28:13.531448 21769 solver.cpp:228] Iteration 4400, loss = 0.152946\nI0818 19:28:13.531497 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0818 19:28:13.531520 21769 solver.cpp:244]     Train net output #1: loss = 0.152946 (* 1 = 0.152946 loss)\nI0818 19:28:13.614011 21769 sgd_solver.cpp:166] Iteration 4400, lr = 0.11\nI0818 19:31:52.281599 21769 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0818 19:34:04.944646 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7267\nI0818 19:34:04.945031 21769 solver.cpp:404]     Test net output #1: loss = 1.60687 (* 1 = 1.60687 loss)\nI0818 19:34:07.074511 21769 solver.cpp:228] Iteration 4500, loss = 0.0348216\nI0818 19:34:07.074561 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:34:07.074584 21769 solver.cpp:244]     Train net output #1: loss = 0.0348215 (* 1 = 0.0348215 loss)\nI0818 19:34:07.165194 21769 sgd_solver.cpp:166] Iteration 4500, lr = 0.1125\nI0818 19:37:45.794358 21769 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0818 19:39:58.683246 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7423\nI0818 19:39:58.683636 21769 solver.cpp:404]     Test net output #1: loss = 1.32388 (* 1 = 1.32388 loss)\nI0818 19:40:00.813575 21769 solver.cpp:228] Iteration 4600, loss = 0.0855954\nI0818 19:40:00.813623 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 19:40:00.813647 21769 solver.cpp:244]     Train net output #1: loss = 0.0855953 (* 1 = 0.0855953 loss)\nI0818 19:40:00.898674 21769 sgd_solver.cpp:166] Iteration 4600, lr = 0.115\nI0818 19:43:39.597738 21769 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0818 19:45:52.496356 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7836\nI0818 19:45:52.496737 21769 solver.cpp:404]     Test net output #1: loss = 1.16204 (* 1 = 1.16204 loss)\nI0818 19:45:54.626718 21769 solver.cpp:228] Iteration 4700, loss = 0.125044\nI0818 19:45:54.626768 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:45:54.626791 21769 solver.cpp:244]     Train net output #1: loss = 0.125044 (* 1 = 0.125044 loss)\nI0818 19:45:54.714376 21769 sgd_solver.cpp:166] Iteration 4700, lr = 0.1175\nI0818 19:49:33.426594 21769 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0818 19:51:46.382333 21769 solver.cpp:404]     Test net output #0: accuracy = 0.725\nI0818 19:51:46.382746 21769 solver.cpp:404]     Test net output #1: loss = 1.43271 (* 1 = 1.43271 loss)\nI0818 19:51:48.512454 21769 solver.cpp:228] Iteration 4800, loss = 0.0593828\nI0818 19:51:48.512503 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 19:51:48.512528 21769 solver.cpp:244]     Train net output #1: loss = 0.0593827 (* 1 = 0.0593827 loss)\nI0818 19:51:48.599936 21769 sgd_solver.cpp:166] Iteration 4800, lr = 0.12\nI0818 19:55:27.323793 21769 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0818 19:57:40.275715 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7847\nI0818 19:57:40.276085 21769 solver.cpp:404]     Test net output #1: loss = 1.00716 (* 1 = 1.00716 loss)\nI0818 19:57:42.405918 21769 solver.cpp:228] Iteration 4900, loss = 0.143671\nI0818 19:57:42.405971 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 19:57:42.405993 21769 solver.cpp:244]     Train net output #1: loss = 0.143671 (* 1 = 0.143671 loss)\nI0818 19:57:42.494026 21769 sgd_solver.cpp:166] Iteration 4900, lr = 0.1225\nI0818 20:01:20.895920 21769 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0818 20:03:33.850795 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7791\nI0818 20:03:33.851176 21769 solver.cpp:404]     Test net output #1: loss = 1.12346 (* 1 = 1.12346 loss)\nI0818 20:03:35.980631 21769 solver.cpp:228] Iteration 5000, loss = 0.0327776\nI0818 20:03:35.980680 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 20:03:35.980703 21769 solver.cpp:244]     Train net output #1: loss = 0.0327775 (* 1 = 0.0327775 loss)\nI0818 20:03:36.071889 21769 sgd_solver.cpp:166] Iteration 5000, lr = 0.125\nI0818 20:07:14.212901 21769 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0818 20:09:27.146040 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7909\nI0818 20:09:27.146430 21769 solver.cpp:404]     Test net output #1: loss = 1.07729 (* 1 = 1.07729 loss)\nI0818 20:09:29.276201 21769 solver.cpp:228] Iteration 5100, loss = 0.0485026\nI0818 20:09:29.276252 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 20:09:29.276275 21769 solver.cpp:244]     Train net output #1: loss = 0.0485025 (* 1 = 0.0485025 loss)\nI0818 20:09:29.355226 21769 sgd_solver.cpp:166] Iteration 5100, lr = 0.1275\nI0818 20:13:07.525830 21769 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0818 20:15:20.255640 21769 solver.cpp:404]     Test net output #0: accuracy = 0.798\nI0818 20:15:20.256013 21769 solver.cpp:404]     Test net output #1: loss = 0.97931 (* 1 = 0.97931 loss)\nI0818 20:15:22.386204 21769 solver.cpp:228] Iteration 5200, loss = 0.114862\nI0818 20:15:22.386251 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:15:22.386267 21769 solver.cpp:244]     Train net output #1: loss = 0.114861 (* 1 = 0.114861 loss)\nI0818 20:15:22.468449 21769 sgd_solver.cpp:166] Iteration 5200, lr = 0.13\nI0818 20:19:00.438447 21769 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0818 20:21:13.183231 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6499\nI0818 20:21:13.183622 21769 solver.cpp:404]     Test net output #1: loss = 2.20919 (* 1 = 2.20919 loss)\nI0818 20:21:15.313659 21769 solver.cpp:228] Iteration 5300, loss = 0.0665528\nI0818 20:21:15.313706 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 20:21:15.313722 21769 solver.cpp:244]     Train net output #1: loss = 0.0665526 (* 1 = 0.0665526 loss)\nI0818 20:21:15.395558 21769 sgd_solver.cpp:166] Iteration 5300, lr = 0.1325\nI0818 20:24:53.648280 21769 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0818 20:27:06.355445 21769 solver.cpp:404]     Test net output #0: accuracy = 0.807\nI0818 20:27:06.355849 21769 solver.cpp:404]     Test net output #1: loss = 0.869726 (* 1 = 0.869726 loss)\nI0818 20:27:08.485230 21769 solver.cpp:228] Iteration 5400, loss = 0.0660709\nI0818 20:27:08.485275 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 20:27:08.485292 21769 solver.cpp:244]     Train net output #1: loss = 0.0660708 (* 1 = 0.0660708 loss)\nI0818 20:27:08.570511 21769 sgd_solver.cpp:166] Iteration 5400, lr = 0.135\nI0818 20:30:46.574748 21769 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0818 20:32:59.280293 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7683\nI0818 20:32:59.280652 21769 solver.cpp:404]     Test net output #1: loss = 1.19829 (* 1 = 1.19829 loss)\nI0818 20:33:01.411012 21769 solver.cpp:228] Iteration 5500, loss = 0.0568397\nI0818 20:33:01.411058 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 20:33:01.411074 21769 solver.cpp:244]     Train net output #1: loss = 0.0568396 (* 1 = 0.0568396 loss)\nI0818 20:33:01.490598 21769 sgd_solver.cpp:166] Iteration 5500, lr = 0.1375\nI0818 20:36:39.484385 21769 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0818 20:38:52.213311 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7731\nI0818 20:38:52.213687 21769 solver.cpp:404]     Test net output #1: loss = 1.25137 (* 1 = 1.25137 loss)\nI0818 20:38:54.343685 21769 solver.cpp:228] Iteration 5600, loss = 0.0438547\nI0818 20:38:54.343734 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 20:38:54.343750 21769 solver.cpp:244]     Train net output #1: loss = 0.0438545 (* 1 = 0.0438545 loss)\nI0818 20:38:54.423440 21769 sgd_solver.cpp:166] Iteration 5600, lr = 0.14\nI0818 20:42:32.357707 21769 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0818 20:44:45.071776 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7931\nI0818 20:44:45.072175 21769 solver.cpp:404]     Test net output #1: loss = 1.00122 (* 1 = 1.00122 loss)\nI0818 20:44:47.201534 21769 solver.cpp:228] Iteration 5700, loss = 0.0857031\nI0818 20:44:47.201580 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 20:44:47.201596 21769 solver.cpp:244]     Train net output #1: loss = 0.0857029 (* 1 = 0.0857029 loss)\nI0818 20:44:47.284361 21769 sgd_solver.cpp:166] Iteration 5700, lr = 0.1425\nI0818 20:48:25.319443 21769 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0818 20:50:38.019789 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7561\nI0818 20:50:38.020159 21769 solver.cpp:404]     Test net output #1: loss = 1.28511 (* 1 = 1.28511 loss)\nI0818 20:50:40.149518 21769 solver.cpp:228] Iteration 5800, loss = 0.068255\nI0818 20:50:40.149564 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 20:50:40.149580 21769 solver.cpp:244]     Train net output #1: loss = 0.0682549 (* 1 = 0.0682549 loss)\nI0818 20:50:40.228030 21769 sgd_solver.cpp:166] Iteration 5800, lr = 0.145\nI0818 20:54:18.217345 21769 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0818 20:56:30.898878 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8279\nI0818 20:56:30.899256 21769 solver.cpp:404]     Test net output #1: loss = 0.744719 (* 1 = 0.744719 loss)\nI0818 20:56:33.028970 21769 solver.cpp:228] Iteration 5900, loss = 0.0588717\nI0818 20:56:33.029016 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 20:56:33.029031 21769 solver.cpp:244]     Train net output #1: loss = 0.0588715 (* 1 = 0.0588715 loss)\nI0818 20:56:33.107240 21769 sgd_solver.cpp:166] Iteration 5900, lr = 0.1475\nI0818 21:00:11.129225 21769 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0818 21:02:23.822320 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7714\nI0818 21:02:23.822705 21769 solver.cpp:404]     Test net output #1: loss = 1.15289 (* 1 = 1.15289 loss)\nI0818 21:02:25.951586 21769 solver.cpp:228] Iteration 6000, loss = 0.0367044\nI0818 21:02:25.951632 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 21:02:25.951649 21769 solver.cpp:244]     Train net output #1: loss = 0.0367042 (* 1 = 0.0367042 loss)\nI0818 21:02:26.029705 21769 sgd_solver.cpp:166] Iteration 6000, lr = 0.15\nI0818 21:06:04.317931 21769 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0818 21:08:17.015269 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8\nI0818 21:08:17.015635 21769 solver.cpp:404]     Test net output #1: loss = 1.01859 (* 1 = 1.01859 loss)\nI0818 21:08:19.145258 21769 solver.cpp:228] Iteration 6100, loss = 0.0490561\nI0818 21:08:19.145304 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 21:08:19.145321 21769 solver.cpp:244]     Train net output #1: loss = 0.0490559 (* 1 = 0.0490559 loss)\nI0818 21:08:19.219874 21769 sgd_solver.cpp:166] Iteration 6100, lr = 0.1525\nI0818 21:11:57.172835 21769 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0818 21:14:09.845912 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8048\nI0818 21:14:09.846289 21769 solver.cpp:404]     Test net output #1: loss = 0.933105 (* 1 = 0.933105 loss)\nI0818 21:14:11.975105 21769 solver.cpp:228] Iteration 6200, loss = 0.0461279\nI0818 21:14:11.975152 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 21:14:11.975167 21769 solver.cpp:244]     Train net output #1: loss = 0.0461278 (* 1 = 0.0461278 loss)\nI0818 21:14:12.054692 21769 sgd_solver.cpp:166] Iteration 6200, lr = 0.155\nI0818 21:17:50.109122 21769 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0818 21:20:02.791137 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8201\nI0818 21:20:02.791501 21769 solver.cpp:404]     Test net output #1: loss = 0.814786 (* 1 = 0.814786 loss)\nI0818 21:20:04.921062 21769 solver.cpp:228] Iteration 6300, loss = 0.102858\nI0818 21:20:04.921110 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0818 21:20:04.921128 21769 solver.cpp:244]     Train net output #1: loss = 0.102858 (* 1 = 0.102858 loss)\nI0818 21:20:04.996789 21769 sgd_solver.cpp:166] Iteration 6300, lr = 0.1575\nI0818 21:23:42.963457 21769 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0818 21:25:55.661422 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7891\nI0818 21:25:55.661798 21769 solver.cpp:404]     Test net output #1: loss = 1.05995 (* 1 = 1.05995 loss)\nI0818 21:25:57.791965 21769 solver.cpp:228] Iteration 6400, loss = 0.05727\nI0818 21:25:57.792009 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 21:25:57.792026 21769 solver.cpp:244]     Train net output #1: loss = 0.05727 (* 1 = 0.05727 loss)\nI0818 21:25:57.868191 21769 sgd_solver.cpp:166] Iteration 6400, lr = 0.16\nI0818 21:29:35.854348 21769 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0818 21:31:48.563529 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7998\nI0818 21:31:48.563917 21769 solver.cpp:404]     Test net output #1: loss = 0.934346 (* 1 = 0.934346 loss)\nI0818 21:31:50.694515 21769 solver.cpp:228] Iteration 6500, loss = 0.0375901\nI0818 21:31:50.694572 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 21:31:50.694591 21769 solver.cpp:244]     Train net output #1: loss = 0.03759 (* 1 = 0.03759 loss)\nI0818 21:31:50.784500 21769 sgd_solver.cpp:166] Iteration 6500, lr = 0.1625\nI0818 21:35:29.499050 21769 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0818 21:37:42.162194 21769 solver.cpp:404]     Test net output #0: accuracy = 0.774\nI0818 21:37:42.162562 21769 solver.cpp:404]     Test net output #1: loss = 1.13438 (* 1 = 1.13438 loss)\nI0818 21:37:44.292827 21769 solver.cpp:228] Iteration 6600, loss = 0.137025\nI0818 21:37:44.292876 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:37:44.292896 21769 solver.cpp:244]     Train net output #1: loss = 0.137025 (* 1 = 0.137025 loss)\nI0818 21:37:44.383154 21769 sgd_solver.cpp:166] Iteration 6600, lr = 0.165\nI0818 21:41:23.340044 21769 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0818 21:43:36.009037 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7323\nI0818 21:43:36.009421 21769 solver.cpp:404]     Test net output #1: loss = 1.44186 (* 1 = 1.44186 loss)\nI0818 21:43:38.139444 21769 solver.cpp:228] Iteration 6700, loss = 0.0446167\nI0818 21:43:38.139492 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 21:43:38.139508 21769 solver.cpp:244]     Train net output #1: loss = 0.0446166 (* 1 = 0.0446166 loss)\nI0818 21:43:38.224084 21769 sgd_solver.cpp:166] Iteration 6700, lr = 0.1675\nI0818 21:47:17.131348 21769 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0818 21:49:29.833978 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8229\nI0818 21:49:29.834297 21769 solver.cpp:404]     Test net output #1: loss = 0.766262 (* 1 = 0.766262 loss)\nI0818 21:49:31.964731 21769 solver.cpp:228] Iteration 6800, loss = 0.0411815\nI0818 21:49:31.964777 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 21:49:31.964794 21769 solver.cpp:244]     Train net output #1: loss = 0.0411814 (* 1 = 0.0411814 loss)\nI0818 21:49:32.051807 21769 sgd_solver.cpp:166] Iteration 6800, lr = 0.17\nI0818 21:53:10.917217 21769 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0818 21:55:23.589234 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7675\nI0818 21:55:23.589576 21769 solver.cpp:404]     Test net output #1: loss = 1.35065 (* 1 = 1.35065 loss)\nI0818 21:55:25.720019 21769 solver.cpp:228] Iteration 6900, loss = 0.0850995\nI0818 21:55:25.720067 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:55:25.720082 21769 solver.cpp:244]     Train net output #1: loss = 0.0850994 (* 1 = 0.0850994 loss)\nI0818 21:55:25.805968 21769 sgd_solver.cpp:166] Iteration 6900, lr = 0.1725\nI0818 21:59:04.635677 21769 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0818 22:01:17.263798 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8286\nI0818 22:01:17.264149 21769 solver.cpp:404]     Test net output #1: loss = 0.775613 (* 1 = 0.775613 loss)\nI0818 22:01:19.393261 21769 solver.cpp:228] Iteration 7000, loss = 0.0702126\nI0818 22:01:19.393309 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 22:01:19.393324 21769 solver.cpp:244]     Train net output #1: loss = 0.0702126 (* 1 = 0.0702126 loss)\nI0818 22:01:19.481842 21769 sgd_solver.cpp:166] Iteration 7000, lr = 0.175\nI0818 22:04:58.353749 21769 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0818 22:07:11.027632 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7672\nI0818 22:07:11.027974 21769 solver.cpp:404]     Test net output #1: loss = 1.17352 (* 1 = 1.17352 loss)\nI0818 22:07:13.157177 21769 solver.cpp:228] Iteration 7100, loss = 0.0269215\nI0818 22:07:13.157224 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:07:13.157240 21769 solver.cpp:244]     Train net output #1: loss = 0.0269214 (* 1 = 0.0269214 loss)\nI0818 22:07:13.246690 21769 sgd_solver.cpp:166] Iteration 7100, lr = 0.1775\nI0818 22:10:52.085127 21769 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0818 22:13:04.787122 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7667\nI0818 22:13:04.787514 21769 solver.cpp:404]     Test net output #1: loss = 1.20668 (* 1 = 1.20668 loss)\nI0818 22:13:06.917788 21769 solver.cpp:228] Iteration 7200, loss = 0.0574799\nI0818 22:13:06.917834 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 22:13:06.917850 21769 solver.cpp:244]     Train net output #1: loss = 0.0574797 (* 1 = 0.0574797 loss)\nI0818 22:13:07.002809 21769 sgd_solver.cpp:166] Iteration 7200, lr = 0.18\nI0818 22:16:45.806345 21769 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0818 22:18:58.488104 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7972\nI0818 22:18:58.488466 21769 solver.cpp:404]     Test net output #1: loss = 0.964087 (* 1 = 0.964087 loss)\nI0818 22:19:00.618114 21769 solver.cpp:228] Iteration 7300, loss = 0.0406174\nI0818 22:19:00.618162 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 22:19:00.618178 21769 solver.cpp:244]     Train net output #1: loss = 0.0406173 (* 1 = 0.0406173 loss)\nI0818 22:19:00.702569 21769 sgd_solver.cpp:166] Iteration 7300, lr = 0.1825\nI0818 22:22:39.439373 21769 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0818 22:24:52.126090 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8245\nI0818 22:24:52.126461 21769 solver.cpp:404]     Test net output #1: loss = 0.817313 (* 1 = 0.817313 loss)\nI0818 22:24:54.256428 21769 solver.cpp:228] Iteration 7400, loss = 0.0343314\nI0818 22:24:54.256475 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 22:24:54.256491 21769 solver.cpp:244]     Train net output #1: loss = 0.0343313 (* 1 = 0.0343313 loss)\nI0818 22:24:54.348377 21769 sgd_solver.cpp:166] Iteration 7400, lr = 0.185\nI0818 22:28:33.296062 21769 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0818 22:30:46.000214 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7949\nI0818 22:30:46.000535 21769 solver.cpp:404]     Test net output #1: loss = 0.980284 (* 1 = 0.980284 loss)\nI0818 22:30:48.129854 21769 solver.cpp:228] Iteration 7500, loss = 0.0655632\nI0818 22:30:48.129901 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 22:30:48.129917 21769 solver.cpp:244]     Train net output #1: loss = 0.0655631 (* 1 = 0.0655631 loss)\nI0818 22:30:48.221952 21769 sgd_solver.cpp:166] Iteration 7500, lr = 0.1875\nI0818 22:34:27.180375 21769 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0818 22:36:39.858453 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8108\nI0818 22:36:39.858777 21769 solver.cpp:404]     Test net output #1: loss = 0.832161 (* 1 = 0.832161 loss)\nI0818 22:36:41.989647 21769 solver.cpp:228] Iteration 7600, loss = 0.0353874\nI0818 22:36:41.989694 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 22:36:41.989711 21769 solver.cpp:244]     Train net output #1: loss = 0.0353872 (* 1 = 0.0353872 loss)\nI0818 22:36:42.070464 21769 sgd_solver.cpp:166] Iteration 7600, lr = 0.19\nI0818 22:40:20.787060 21769 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0818 22:42:33.447875 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8076\nI0818 22:42:33.448210 21769 solver.cpp:404]     Test net output #1: loss = 0.910171 (* 1 = 0.910171 loss)\nI0818 22:42:35.578135 21769 solver.cpp:228] Iteration 7700, loss = 0.0352667\nI0818 22:42:35.578184 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 22:42:35.578198 21769 solver.cpp:244]     Train net output #1: loss = 0.0352665 (* 1 = 0.0352665 loss)\nI0818 22:42:35.661510 21769 sgd_solver.cpp:166] Iteration 7700, lr = 0.1925\nI0818 22:46:14.591873 21769 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0818 22:48:27.252303 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8003\nI0818 22:48:27.252614 21769 solver.cpp:404]     Test net output #1: loss = 1.00213 (* 1 = 1.00213 loss)\nI0818 22:48:29.382382 21769 solver.cpp:228] Iteration 7800, loss = 0.0410499\nI0818 22:48:29.382429 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 22:48:29.382446 21769 solver.cpp:244]     Train net output #1: loss = 0.0410497 (* 1 = 0.0410497 loss)\nI0818 22:48:29.470525 21769 sgd_solver.cpp:166] Iteration 7800, lr = 0.195\nI0818 22:52:08.322198 21769 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0818 22:54:20.965584 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8025\nI0818 22:54:20.965940 21769 solver.cpp:404]     Test net output #1: loss = 0.96273 (* 1 = 0.96273 loss)\nI0818 22:54:23.095199 21769 solver.cpp:228] Iteration 7900, loss = 0.102934\nI0818 22:54:23.095247 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 22:54:23.095263 21769 solver.cpp:244]     Train net output #1: loss = 0.102934 (* 1 = 0.102934 loss)\nI0818 22:54:23.183452 21769 sgd_solver.cpp:166] Iteration 7900, lr = 0.1975\nI0818 22:58:01.993118 21769 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0818 23:00:14.649243 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7469\nI0818 23:00:14.649567 21769 solver.cpp:404]     Test net output #1: loss = 1.31752 (* 1 = 1.31752 loss)\nI0818 23:00:16.779450 21769 solver.cpp:228] Iteration 8000, loss = 0.164637\nI0818 23:00:16.779496 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:00:16.779512 21769 solver.cpp:244]     Train net output #1: loss = 0.164637 (* 1 = 0.164637 loss)\nI0818 23:00:16.872555 21769 sgd_solver.cpp:166] Iteration 8000, lr = 0.2\nI0818 23:03:55.948020 21769 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0818 23:06:08.613795 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7573\nI0818 23:06:08.614131 21769 solver.cpp:404]     Test net output #1: loss = 1.39233 (* 1 = 1.39233 loss)\nI0818 23:06:10.744320 21769 solver.cpp:228] Iteration 8100, loss = 0.0428497\nI0818 23:06:10.744379 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0818 23:06:10.744395 21769 solver.cpp:244]     Train net output #1: loss = 0.0428496 (* 1 = 0.0428496 loss)\nI0818 23:06:10.827505 21769 sgd_solver.cpp:166] Iteration 8100, lr = 0.2025\nI0818 23:09:49.735342 21769 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0818 23:12:02.396983 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8354\nI0818 23:12:02.397303 21769 solver.cpp:404]     Test net output #1: loss = 0.732629 (* 1 = 0.732629 loss)\nI0818 23:12:04.525858 21769 solver.cpp:228] Iteration 8200, loss = 0.0634206\nI0818 23:12:04.525902 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 23:12:04.525918 21769 solver.cpp:244]     Train net output #1: loss = 0.0634204 (* 1 = 0.0634204 loss)\nI0818 23:12:04.616616 21769 sgd_solver.cpp:166] Iteration 8200, lr = 0.205\nI0818 23:15:43.509420 21769 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0818 23:17:56.153383 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8159\nI0818 23:17:56.153726 21769 solver.cpp:404]     Test net output #1: loss = 0.931317 (* 1 = 0.931317 loss)\nI0818 23:17:58.282685 21769 solver.cpp:228] Iteration 8300, loss = 0.0269011\nI0818 23:17:58.282732 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 23:17:58.282747 21769 solver.cpp:244]     Train net output #1: loss = 0.0269009 (* 1 = 0.0269009 loss)\nI0818 23:17:58.367095 21769 sgd_solver.cpp:166] Iteration 8300, lr = 0.2075\nI0818 23:21:37.372679 21769 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0818 23:23:50.076232 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8267\nI0818 23:23:50.076627 21769 solver.cpp:404]     Test net output #1: loss = 0.695874 (* 1 = 0.695874 loss)\nI0818 23:23:52.207217 21769 solver.cpp:228] Iteration 8400, loss = 0.117849\nI0818 23:23:52.207264 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:23:52.207280 21769 solver.cpp:244]     Train net output #1: loss = 0.117849 (* 1 = 0.117849 loss)\nI0818 23:23:52.300580 21769 sgd_solver.cpp:166] Iteration 8400, lr = 0.21\nI0818 23:27:31.454120 21769 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0818 23:29:44.171579 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8367\nI0818 23:29:44.171936 21769 solver.cpp:404]     Test net output #1: loss = 0.716998 (* 1 = 0.716998 loss)\nI0818 23:29:46.301542 21769 solver.cpp:228] Iteration 8500, loss = 0.101571\nI0818 23:29:46.301590 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 23:29:46.301606 21769 solver.cpp:244]     Train net output #1: loss = 0.101571 (* 1 = 0.101571 loss)\nI0818 23:29:46.391331 21769 sgd_solver.cpp:166] Iteration 8500, lr = 0.2125\nI0818 23:33:25.609058 21769 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0818 23:35:38.314824 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8214\nI0818 23:35:38.315188 21769 solver.cpp:404]     Test net output #1: loss = 0.797061 (* 1 = 0.797061 loss)\nI0818 23:35:40.445529 21769 solver.cpp:228] Iteration 8600, loss = 0.0566359\nI0818 23:35:40.445577 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 23:35:40.445593 21769 solver.cpp:244]     Train net output #1: loss = 0.0566356 (* 1 = 0.0566356 loss)\nI0818 23:35:40.539491 21769 sgd_solver.cpp:166] Iteration 8600, lr = 0.215\nI0818 23:39:19.823966 21769 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0818 23:41:32.549790 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8312\nI0818 23:41:32.550184 21769 solver.cpp:404]     Test net output #1: loss = 0.728876 (* 1 = 0.728876 loss)\nI0818 23:41:34.680485 21769 solver.cpp:228] Iteration 8700, loss = 0.13228\nI0818 23:41:34.680531 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0818 23:41:34.680548 21769 solver.cpp:244]     Train net output #1: loss = 0.13228 (* 1 = 0.13228 loss)\nI0818 23:41:34.774181 21769 sgd_solver.cpp:166] Iteration 8700, lr = 0.2175\nI0818 23:45:13.622606 21769 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0818 23:47:26.361009 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7718\nI0818 23:47:26.361397 21769 solver.cpp:404]     Test net output #1: loss = 1.10476 (* 1 = 1.10476 loss)\nI0818 23:47:28.491483 21769 solver.cpp:228] Iteration 8800, loss = 0.0324829\nI0818 23:47:28.491530 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0818 23:47:28.491545 21769 solver.cpp:244]     Train net output #1: loss = 0.0324826 (* 1 = 0.0324826 loss)\nI0818 23:47:28.574524 21769 sgd_solver.cpp:166] Iteration 8800, lr = 0.22\nI0818 23:51:07.159889 21769 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0818 23:53:19.912485 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8232\nI0818 23:53:19.912848 21769 solver.cpp:404]     Test net output #1: loss = 0.81947 (* 1 = 0.81947 loss)\nI0818 23:53:22.043416 21769 solver.cpp:228] Iteration 8900, loss = 0.0979596\nI0818 23:53:22.043464 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0818 23:53:22.043480 21769 solver.cpp:244]     Train net output #1: loss = 0.0979593 (* 1 = 0.0979593 loss)\nI0818 23:53:22.133030 21769 sgd_solver.cpp:166] Iteration 8900, lr = 0.2225\nI0818 23:57:00.739852 21769 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0818 23:59:13.480957 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8396\nI0818 23:59:13.481331 21769 solver.cpp:404]     Test net output #1: loss = 0.710019 (* 1 = 0.710019 loss)\nI0818 23:59:15.611935 21769 solver.cpp:228] Iteration 9000, loss = 0.181899\nI0818 23:59:15.611984 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:59:15.611999 21769 solver.cpp:244]     Train net output #1: loss = 0.181898 (* 1 = 0.181898 loss)\nI0818 23:59:15.694447 21769 sgd_solver.cpp:166] Iteration 9000, lr = 0.225\nI0819 00:02:54.281116 21769 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0819 00:05:07.013577 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7732\nI0819 00:05:07.013958 21769 solver.cpp:404]     Test net output #1: loss = 1.19253 (* 1 = 1.19253 loss)\nI0819 00:05:09.144107 21769 solver.cpp:228] Iteration 9100, loss = 0.0763177\nI0819 00:05:09.144156 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:05:09.144172 21769 solver.cpp:244]     Train net output #1: loss = 0.0763174 (* 1 = 0.0763174 loss)\nI0819 00:05:09.232832 21769 sgd_solver.cpp:166] Iteration 9100, lr = 0.2275\nI0819 00:08:48.112746 21769 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0819 00:11:00.840574 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8186\nI0819 00:11:00.840942 21769 solver.cpp:404]     Test net output #1: loss = 0.815154 (* 1 = 0.815154 loss)\nI0819 00:11:02.970788 21769 solver.cpp:228] Iteration 9200, loss = 0.132032\nI0819 00:11:02.970830 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:11:02.970845 21769 solver.cpp:244]     Train net output #1: loss = 0.132032 (* 1 = 0.132032 loss)\nI0819 00:11:03.058668 21769 sgd_solver.cpp:166] Iteration 9200, lr = 0.23\nI0819 00:14:41.687608 21769 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0819 00:16:54.423758 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8418\nI0819 00:16:54.424120 21769 solver.cpp:404]     Test net output #1: loss = 0.661002 (* 1 = 0.661002 loss)\nI0819 00:16:56.554121 21769 solver.cpp:228] Iteration 9300, loss = 0.0517968\nI0819 00:16:56.554167 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 00:16:56.554183 21769 solver.cpp:244]     Train net output #1: loss = 0.0517966 (* 1 = 0.0517966 loss)\nI0819 00:16:56.638598 21769 sgd_solver.cpp:166] Iteration 9300, lr = 0.2325\nI0819 00:20:35.313567 21769 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0819 00:22:48.084533 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8122\nI0819 00:22:48.084935 21769 solver.cpp:404]     Test net output #1: loss = 0.859767 (* 1 = 0.859767 loss)\nI0819 00:22:50.215195 21769 solver.cpp:228] Iteration 9400, loss = 0.0104842\nI0819 00:22:50.215241 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:22:50.215257 21769 solver.cpp:244]     Train net output #1: loss = 0.0104839 (* 1 = 0.0104839 loss)\nI0819 00:22:50.297550 21769 sgd_solver.cpp:166] Iteration 9400, lr = 0.235\nI0819 00:26:28.936480 21769 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0819 00:28:41.700333 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8332\nI0819 00:28:41.700709 21769 solver.cpp:404]     Test net output #1: loss = 0.717489 (* 1 = 0.717489 loss)\nI0819 00:28:43.830332 21769 solver.cpp:228] Iteration 9500, loss = 0.120126\nI0819 00:28:43.830380 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:28:43.830397 21769 solver.cpp:244]     Train net output #1: loss = 0.120125 (* 1 = 0.120125 loss)\nI0819 00:28:43.922202 21769 sgd_solver.cpp:166] Iteration 9500, lr = 0.2375\nI0819 00:32:22.583487 21769 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0819 00:34:35.360680 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7903\nI0819 00:34:35.361086 21769 solver.cpp:404]     Test net output #1: loss = 0.968206 (* 1 = 0.968206 loss)\nI0819 00:34:37.490344 21769 solver.cpp:228] Iteration 9600, loss = 0.0483699\nI0819 00:34:37.490388 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 00:34:37.490404 21769 solver.cpp:244]     Train net output #1: loss = 0.0483696 (* 1 = 0.0483696 loss)\nI0819 00:34:37.572036 21769 sgd_solver.cpp:166] Iteration 9600, lr = 0.24\nI0819 00:38:16.191296 21769 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0819 00:40:28.960954 21769 solver.cpp:404]     Test net output #0: accuracy = 0.793\nI0819 00:40:28.961318 21769 solver.cpp:404]     Test net output #1: loss = 0.885281 (* 1 = 0.885281 loss)\nI0819 00:40:31.091002 21769 solver.cpp:228] Iteration 9700, loss = 0.120393\nI0819 00:40:31.091048 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:40:31.091064 21769 solver.cpp:244]     Train net output #1: loss = 0.120393 (* 1 = 0.120393 loss)\nI0819 00:40:31.179076 21769 sgd_solver.cpp:166] Iteration 9700, lr = 0.2425\nI0819 00:44:09.675391 21769 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0819 00:46:21.275749 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8314\nI0819 00:46:21.276087 21769 solver.cpp:404]     Test net output #1: loss = 0.72903 (* 1 = 0.72903 loss)\nI0819 00:46:23.405540 21769 solver.cpp:228] Iteration 9800, loss = 0.0788951\nI0819 00:46:23.405586 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:46:23.405603 21769 solver.cpp:244]     Train net output #1: loss = 0.0788949 (* 1 = 0.0788949 loss)\nI0819 00:46:23.493149 21769 sgd_solver.cpp:166] Iteration 9800, lr = 0.245\nI0819 00:50:02.074020 21769 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0819 00:52:13.926787 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7882\nI0819 00:52:13.927140 21769 solver.cpp:404]     Test net output #1: loss = 1.01553 (* 1 = 1.01553 loss)\nI0819 00:52:16.061240 21769 solver.cpp:228] Iteration 9900, loss = 0.0601916\nI0819 00:52:16.061285 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 00:52:16.061301 21769 solver.cpp:244]     Train net output #1: loss = 0.0601914 (* 1 = 0.0601914 loss)\nI0819 00:52:16.139400 21769 sgd_solver.cpp:166] Iteration 9900, lr = 0.2475\nI0819 00:55:54.682338 21769 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0819 00:58:07.396122 21769 solver.cpp:404]     Test net output #0: accuracy = 0.774\nI0819 00:58:07.396502 21769 solver.cpp:404]     Test net output #1: loss = 1.27238 (* 1 = 1.27238 loss)\nI0819 00:58:09.526021 21769 solver.cpp:228] Iteration 10000, loss = 0.0863187\nI0819 00:58:09.526067 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 00:58:09.526083 21769 solver.cpp:244]     Train net output #1: loss = 0.0863184 (* 1 = 0.0863184 loss)\nI0819 00:58:09.615342 21769 sgd_solver.cpp:166] Iteration 10000, lr = 0.25\nI0819 01:01:48.323062 21769 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0819 01:04:01.030213 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7845\nI0819 01:04:01.030620 21769 solver.cpp:404]     Test net output #1: loss = 1.02966 (* 1 = 1.02966 loss)\nI0819 01:04:03.160012 21769 solver.cpp:228] Iteration 10100, loss = 0.0220061\nI0819 01:04:03.160058 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:04:03.160073 21769 solver.cpp:244]     Train net output #1: loss = 0.0220058 (* 1 = 0.0220058 loss)\nI0819 01:04:03.251626 21769 sgd_solver.cpp:166] Iteration 10100, lr = 0.2525\nI0819 01:07:41.884001 21769 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0819 01:09:54.600261 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8378\nI0819 01:09:54.600641 21769 solver.cpp:404]     Test net output #1: loss = 0.747638 (* 1 = 0.747638 loss)\nI0819 01:09:56.731000 21769 solver.cpp:228] Iteration 10200, loss = 0.0524707\nI0819 01:09:56.731046 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 01:09:56.731062 21769 solver.cpp:244]     Train net output #1: loss = 0.0524705 (* 1 = 0.0524705 loss)\nI0819 01:09:56.812358 21769 sgd_solver.cpp:166] Iteration 10200, lr = 0.255\nI0819 01:13:35.514417 21769 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0819 01:15:48.234797 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8312\nI0819 01:15:48.235180 21769 solver.cpp:404]     Test net output #1: loss = 0.708369 (* 1 = 0.708369 loss)\nI0819 01:15:50.363823 21769 solver.cpp:228] Iteration 10300, loss = 0.0742242\nI0819 01:15:50.363875 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 01:15:50.363893 21769 solver.cpp:244]     Train net output #1: loss = 0.074224 (* 1 = 0.074224 loss)\nI0819 01:15:50.450618 21769 sgd_solver.cpp:166] Iteration 10300, lr = 0.2575\nI0819 01:19:29.035173 21769 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0819 01:21:41.759985 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7447\nI0819 01:21:41.760388 21769 solver.cpp:404]     Test net output #1: loss = 1.22283 (* 1 = 1.22283 loss)\nI0819 01:21:43.890143 21769 solver.cpp:228] Iteration 10400, loss = 0.102158\nI0819 01:21:43.890192 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 01:21:43.890208 21769 solver.cpp:244]     Train net output #1: loss = 0.102158 (* 1 = 0.102158 loss)\nI0819 01:21:43.977840 21769 sgd_solver.cpp:166] Iteration 10400, lr = 0.26\nI0819 01:25:22.651551 21769 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0819 01:27:35.347535 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7612\nI0819 01:27:35.347947 21769 solver.cpp:404]     Test net output #1: loss = 1.18469 (* 1 = 1.18469 loss)\nI0819 01:27:37.476822 21769 solver.cpp:228] Iteration 10500, loss = 0.0480618\nI0819 01:27:37.476872 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 01:27:37.476889 21769 solver.cpp:244]     Train net output #1: loss = 0.0480615 (* 1 = 0.0480615 loss)\nI0819 01:27:37.561444 21769 sgd_solver.cpp:166] Iteration 10500, lr = 0.2625\nI0819 01:31:16.198031 21769 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0819 01:33:28.891639 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6946\nI0819 01:33:28.892019 21769 solver.cpp:404]     Test net output #1: loss = 1.80931 (* 1 = 1.80931 loss)\nI0819 01:33:31.021901 21769 solver.cpp:228] Iteration 10600, loss = 0.154245\nI0819 01:33:31.021950 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:33:31.021965 21769 solver.cpp:244]     Train net output #1: loss = 0.154245 (* 1 = 0.154245 loss)\nI0819 01:33:31.105517 21769 sgd_solver.cpp:166] Iteration 10600, lr = 0.265\nI0819 01:37:09.594377 21769 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0819 01:39:22.298282 21769 solver.cpp:404]     Test net output #0: accuracy = 0.81\nI0819 01:39:22.298689 21769 solver.cpp:404]     Test net output #1: loss = 0.842885 (* 1 = 0.842885 loss)\nI0819 01:39:24.428918 21769 solver.cpp:228] Iteration 10700, loss = 0.0264971\nI0819 01:39:24.428975 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:39:24.428992 21769 solver.cpp:244]     Train net output #1: loss = 0.0264969 (* 1 = 0.0264969 loss)\nI0819 01:39:24.519704 21769 sgd_solver.cpp:166] Iteration 10700, lr = 0.2675\nI0819 01:43:03.103453 21769 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0819 01:45:15.813343 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8363\nI0819 01:45:15.813763 21769 solver.cpp:404]     Test net output #1: loss = 0.713718 (* 1 = 0.713718 loss)\nI0819 01:45:17.943541 21769 solver.cpp:228] Iteration 10800, loss = 0.0540768\nI0819 01:45:17.943585 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 01:45:17.943603 21769 solver.cpp:244]     Train net output #1: loss = 0.0540766 (* 1 = 0.0540766 loss)\nI0819 01:45:18.027501 21769 sgd_solver.cpp:166] Iteration 10800, lr = 0.27\nI0819 01:48:56.665998 21769 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0819 01:51:09.360935 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8333\nI0819 01:51:09.361302 21769 solver.cpp:404]     Test net output #1: loss = 0.709536 (* 1 = 0.709536 loss)\nI0819 01:51:11.491259 21769 solver.cpp:228] Iteration 10900, loss = 0.0808274\nI0819 01:51:11.491305 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 01:51:11.491322 21769 solver.cpp:244]     Train net output #1: loss = 0.0808272 (* 1 = 0.0808272 loss)\nI0819 01:51:11.574553 21769 sgd_solver.cpp:166] Iteration 10900, lr = 0.2725\nI0819 01:54:50.269598 21769 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0819 01:57:02.975698 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8449\nI0819 01:57:02.976076 21769 solver.cpp:404]     Test net output #1: loss = 0.686546 (* 1 = 0.686546 loss)\nI0819 01:57:05.106633 21769 solver.cpp:228] Iteration 11000, loss = 0.083488\nI0819 01:57:05.106683 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 01:57:05.106698 21769 solver.cpp:244]     Train net output #1: loss = 0.0834877 (* 1 = 0.0834877 loss)\nI0819 01:57:05.195449 21769 sgd_solver.cpp:166] Iteration 11000, lr = 0.275\nI0819 02:00:43.826932 21769 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0819 02:02:56.514828 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8303\nI0819 02:02:56.515240 21769 solver.cpp:404]     Test net output #1: loss = 0.741254 (* 1 = 0.741254 loss)\nI0819 02:02:58.644692 21769 solver.cpp:228] Iteration 11100, loss = 0.0542352\nI0819 02:02:58.644739 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 02:02:58.644754 21769 solver.cpp:244]     Train net output #1: loss = 0.054235 (* 1 = 0.054235 loss)\nI0819 02:02:58.729290 21769 sgd_solver.cpp:166] Iteration 11100, lr = 0.2775\nI0819 02:06:37.387553 21769 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0819 02:08:50.106705 21769 solver.cpp:404]     Test net output #0: accuracy = 0.829\nI0819 02:08:50.107110 21769 solver.cpp:404]     Test net output #1: loss = 0.80218 (* 1 = 0.80218 loss)\nI0819 02:08:52.238435 21769 solver.cpp:228] Iteration 11200, loss = 0.115261\nI0819 02:08:52.238484 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:08:52.238500 21769 solver.cpp:244]     Train net output #1: loss = 0.115261 (* 1 = 0.115261 loss)\nI0819 02:08:52.317306 21769 sgd_solver.cpp:166] Iteration 11200, lr = 0.28\nI0819 02:12:30.895560 21769 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0819 02:14:42.227012 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8002\nI0819 02:14:42.227341 21769 solver.cpp:404]     Test net output #1: loss = 0.844698 (* 1 = 0.844698 loss)\nI0819 02:14:44.351366 21769 solver.cpp:228] Iteration 11300, loss = 0.0383429\nI0819 02:14:44.351415 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 02:14:44.351430 21769 solver.cpp:244]     Train net output #1: loss = 0.0383427 (* 1 = 0.0383427 loss)\nI0819 02:14:44.450696 21769 sgd_solver.cpp:166] Iteration 11300, lr = 0.2825\nI0819 02:18:22.751394 21769 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0819 02:20:34.072088 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8044\nI0819 02:20:34.072415 21769 solver.cpp:404]     Test net output #1: loss = 0.952368 (* 1 = 0.952368 loss)\nI0819 02:20:36.196316 21769 solver.cpp:228] Iteration 11400, loss = 0.0627106\nI0819 02:20:36.196365 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 02:20:36.196382 21769 solver.cpp:244]     Train net output #1: loss = 0.0627103 (* 1 = 0.0627103 loss)\nI0819 02:20:36.291846 21769 sgd_solver.cpp:166] Iteration 11400, lr = 0.285\nI0819 02:24:14.549068 21769 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0819 02:26:25.859174 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8046\nI0819 02:26:25.859513 21769 solver.cpp:404]     Test net output #1: loss = 0.934792 (* 1 = 0.934792 loss)\nI0819 02:26:27.983029 21769 solver.cpp:228] Iteration 11500, loss = 0.0193404\nI0819 02:26:27.983078 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 02:26:27.983101 21769 solver.cpp:244]     Train net output #1: loss = 0.0193402 (* 1 = 0.0193402 loss)\nI0819 02:26:28.075103 21769 sgd_solver.cpp:166] Iteration 11500, lr = 0.2875\nI0819 02:30:06.427215 21769 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0819 02:32:17.727506 21769 solver.cpp:404]     Test net output #0: accuracy = 0.829\nI0819 02:32:17.727849 21769 solver.cpp:404]     Test net output #1: loss = 0.808501 (* 1 = 0.808501 loss)\nI0819 02:32:19.852025 21769 solver.cpp:228] Iteration 11600, loss = 0.0731544\nI0819 02:32:19.852075 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:32:19.852099 21769 solver.cpp:244]     Train net output #1: loss = 0.0731541 (* 1 = 0.0731541 loss)\nI0819 02:32:19.954746 21769 sgd_solver.cpp:166] Iteration 11600, lr = 0.29\nI0819 02:35:58.388447 21769 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0819 02:38:09.670415 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7899\nI0819 02:38:09.670750 21769 solver.cpp:404]     Test net output #1: loss = 0.940307 (* 1 = 0.940307 loss)\nI0819 02:38:11.794422 21769 solver.cpp:228] Iteration 11700, loss = 0.0894305\nI0819 02:38:11.794471 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:38:11.794487 21769 solver.cpp:244]     Train net output #1: loss = 0.0894302 (* 1 = 0.0894302 loss)\nI0819 02:38:11.883456 21769 sgd_solver.cpp:166] Iteration 11700, lr = 0.2925\nI0819 02:41:50.174904 21769 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0819 02:44:01.434171 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7701\nI0819 02:44:01.434527 21769 solver.cpp:404]     Test net output #1: loss = 1.06707 (* 1 = 1.06707 loss)\nI0819 02:44:03.558547 21769 solver.cpp:228] Iteration 11800, loss = 0.0311559\nI0819 02:44:03.558594 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 02:44:03.558611 21769 solver.cpp:244]     Train net output #1: loss = 0.0311555 (* 1 = 0.0311555 loss)\nI0819 02:44:03.652400 21769 sgd_solver.cpp:166] Iteration 11800, lr = 0.295\nI0819 02:47:41.850908 21769 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0819 02:49:53.118427 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8308\nI0819 02:49:53.118769 21769 solver.cpp:404]     Test net output #1: loss = 0.717123 (* 1 = 0.717123 loss)\nI0819 02:49:55.241447 21769 solver.cpp:228] Iteration 11900, loss = 0.025588\nI0819 02:49:55.241494 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 02:49:55.241510 21769 solver.cpp:244]     Train net output #1: loss = 0.0255877 (* 1 = 0.0255877 loss)\nI0819 02:49:55.332700 21769 sgd_solver.cpp:166] Iteration 11900, lr = 0.2975\nI0819 02:53:33.597676 21769 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0819 02:55:44.849721 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8493\nI0819 02:55:44.850026 21769 solver.cpp:404]     Test net output #1: loss = 0.629357 (* 1 = 0.629357 loss)\nI0819 02:55:46.973619 21769 solver.cpp:228] Iteration 12000, loss = 0.0920998\nI0819 02:55:46.973665 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 02:55:46.973681 21769 solver.cpp:244]     Train net output #1: loss = 0.0920994 (* 1 = 0.0920994 loss)\nI0819 02:55:47.061494 21769 sgd_solver.cpp:166] Iteration 12000, lr = 0.3\nI0819 02:59:25.309391 21769 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0819 03:01:36.591929 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7578\nI0819 03:01:36.592267 21769 solver.cpp:404]     Test net output #1: loss = 1.2365 (* 1 = 1.2365 loss)\nI0819 03:01:38.716529 21769 solver.cpp:228] Iteration 12100, loss = 0.112794\nI0819 03:01:38.716579 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 03:01:38.716603 21769 solver.cpp:244]     Train net output #1: loss = 0.112793 (* 1 = 0.112793 loss)\nI0819 03:01:38.812855 21769 sgd_solver.cpp:166] Iteration 12100, lr = 0.3025\nI0819 03:05:17.116138 21769 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0819 03:07:28.394776 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8385\nI0819 03:07:28.395123 21769 solver.cpp:404]     Test net output #1: loss = 0.708124 (* 1 = 0.708124 loss)\nI0819 03:07:30.518903 21769 solver.cpp:228] Iteration 12200, loss = 0.0186508\nI0819 03:07:30.518954 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 03:07:30.518980 21769 solver.cpp:244]     Train net output #1: loss = 0.0186505 (* 1 = 0.0186505 loss)\nI0819 03:07:30.608778 21769 sgd_solver.cpp:166] Iteration 12200, lr = 0.305\nI0819 03:11:08.832885 21769 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0819 03:13:20.151113 21769 solver.cpp:404]     Test net output #0: accuracy = 0.797\nI0819 03:13:20.151432 21769 solver.cpp:404]     Test net output #1: loss = 0.990583 (* 1 = 0.990583 loss)\nI0819 03:13:22.274370 21769 solver.cpp:228] Iteration 12300, loss = 0.0206822\nI0819 03:13:22.274416 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:13:22.274441 21769 solver.cpp:244]     Train net output #1: loss = 0.0206818 (* 1 = 0.0206818 loss)\nI0819 03:13:22.362989 21769 sgd_solver.cpp:166] Iteration 12300, lr = 0.3075\nI0819 03:17:00.603277 21769 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0819 03:19:11.914791 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8424\nI0819 03:19:11.915143 21769 solver.cpp:404]     Test net output #1: loss = 0.674505 (* 1 = 0.674505 loss)\nI0819 03:19:14.038674 21769 solver.cpp:228] Iteration 12400, loss = 0.0234416\nI0819 03:19:14.038731 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 03:19:14.038756 21769 solver.cpp:244]     Train net output #1: loss = 0.0234412 (* 1 = 0.0234412 loss)\nI0819 03:19:14.132534 21769 sgd_solver.cpp:166] Iteration 12400, lr = 0.31\nI0819 03:22:52.481889 21769 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0819 03:25:03.807616 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8152\nI0819 03:25:03.807935 21769 solver.cpp:404]     Test net output #1: loss = 0.804579 (* 1 = 0.804579 loss)\nI0819 03:25:05.931835 21769 solver.cpp:228] Iteration 12500, loss = 0.0950199\nI0819 03:25:05.931885 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 03:25:05.931910 21769 solver.cpp:244]     Train net output #1: loss = 0.0950195 (* 1 = 0.0950195 loss)\nI0819 03:25:06.023061 21769 sgd_solver.cpp:166] Iteration 12500, lr = 0.3125\nI0819 03:28:44.257443 21769 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0819 03:30:55.595620 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8202\nI0819 03:30:55.595969 21769 solver.cpp:404]     Test net output #1: loss = 0.802843 (* 1 = 0.802843 loss)\nI0819 03:30:57.719419 21769 solver.cpp:228] Iteration 12600, loss = 0.0497762\nI0819 03:30:57.719465 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 03:30:57.719482 21769 solver.cpp:244]     Train net output #1: loss = 0.0497758 (* 1 = 0.0497758 loss)\nI0819 03:30:57.814872 21769 sgd_solver.cpp:166] Iteration 12600, lr = 0.315\nI0819 03:34:36.149325 21769 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0819 03:36:47.459748 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8379\nI0819 03:36:47.460052 21769 solver.cpp:404]     Test net output #1: loss = 0.740482 (* 1 = 0.740482 loss)\nI0819 03:36:49.583386 21769 solver.cpp:228] Iteration 12700, loss = 0.0535777\nI0819 03:36:49.583436 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 03:36:49.583461 21769 solver.cpp:244]     Train net output #1: loss = 0.0535773 (* 1 = 0.0535773 loss)\nI0819 03:36:49.677229 21769 sgd_solver.cpp:166] Iteration 12700, lr = 0.3175\nI0819 03:40:27.916682 21769 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0819 03:42:39.221725 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8218\nI0819 03:42:39.222028 21769 solver.cpp:404]     Test net output #1: loss = 0.793346 (* 1 = 0.793346 loss)\nI0819 03:42:41.345836 21769 solver.cpp:228] Iteration 12800, loss = 0.0584911\nI0819 03:42:41.345890 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 03:42:41.345916 21769 solver.cpp:244]     Train net output #1: loss = 0.0584907 (* 1 = 0.0584907 loss)\nI0819 03:42:41.437283 21769 sgd_solver.cpp:166] Iteration 12800, lr = 0.32\nI0819 03:46:19.611346 21769 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0819 03:48:30.914738 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8097\nI0819 03:48:30.915042 21769 solver.cpp:404]     Test net output #1: loss = 0.922918 (* 1 = 0.922918 loss)\nI0819 03:48:33.038635 21769 solver.cpp:228] Iteration 12900, loss = 0.0557625\nI0819 03:48:33.038686 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 03:48:33.038712 21769 solver.cpp:244]     Train net output #1: loss = 0.0557621 (* 1 = 0.0557621 loss)\nI0819 03:48:33.130740 21769 sgd_solver.cpp:166] Iteration 12900, lr = 0.3225\nI0819 03:52:11.302582 21769 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0819 03:54:22.619793 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7785\nI0819 03:54:22.620120 21769 solver.cpp:404]     Test net output #1: loss = 1.03681 (* 1 = 1.03681 loss)\nI0819 03:54:24.743557 21769 solver.cpp:228] Iteration 13000, loss = 0.128942\nI0819 03:54:24.743608 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:54:24.743633 21769 solver.cpp:244]     Train net output #1: loss = 0.128941 (* 1 = 0.128941 loss)\nI0819 03:54:24.836273 21769 sgd_solver.cpp:166] Iteration 13000, lr = 0.325\nI0819 03:58:03.088827 21769 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0819 04:00:14.394845 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8182\nI0819 04:00:14.395179 21769 solver.cpp:404]     Test net output #1: loss = 0.844355 (* 1 = 0.844355 loss)\nI0819 04:00:16.518905 21769 solver.cpp:228] Iteration 13100, loss = 0.150149\nI0819 04:00:16.518959 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:00:16.518983 21769 solver.cpp:244]     Train net output #1: loss = 0.150149 (* 1 = 0.150149 loss)\nI0819 04:00:16.615895 21769 sgd_solver.cpp:166] Iteration 13100, lr = 0.3275\nI0819 04:03:54.834276 21769 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0819 04:06:06.106586 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8145\nI0819 04:06:06.106920 21769 solver.cpp:404]     Test net output #1: loss = 0.842651 (* 1 = 0.842651 loss)\nI0819 04:06:08.228950 21769 solver.cpp:228] Iteration 13200, loss = 0.0489372\nI0819 04:06:08.228996 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 04:06:08.229013 21769 solver.cpp:244]     Train net output #1: loss = 0.0489368 (* 1 = 0.0489368 loss)\nI0819 04:06:08.319649 21769 sgd_solver.cpp:166] Iteration 13200, lr = 0.33\nI0819 04:09:46.551231 21769 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0819 04:11:57.856024 21769 solver.cpp:404]     Test net output #0: accuracy = 0.737\nI0819 04:11:57.856348 21769 solver.cpp:404]     Test net output #1: loss = 1.20916 (* 1 = 1.20916 loss)\nI0819 04:11:59.978801 21769 solver.cpp:228] Iteration 13300, loss = 0.0502144\nI0819 04:11:59.978847 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 04:11:59.978863 21769 solver.cpp:244]     Train net output #1: loss = 0.050214 (* 1 = 0.050214 loss)\nI0819 04:12:00.079103 21769 sgd_solver.cpp:166] Iteration 13300, lr = 0.3325\nI0819 04:15:38.613008 21769 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0819 04:17:49.913856 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8284\nI0819 04:17:49.914191 21769 solver.cpp:404]     Test net output #1: loss = 0.758861 (* 1 = 0.758861 loss)\nI0819 04:17:52.037024 21769 solver.cpp:228] Iteration 13400, loss = 0.0168497\nI0819 04:17:52.037070 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:17:52.037086 21769 solver.cpp:244]     Train net output #1: loss = 0.0168493 (* 1 = 0.0168493 loss)\nI0819 04:17:52.131479 21769 sgd_solver.cpp:166] Iteration 13400, lr = 0.335\nI0819 04:21:30.248565 21769 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0819 04:23:41.549696 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8202\nI0819 04:23:41.550055 21769 solver.cpp:404]     Test net output #1: loss = 0.789351 (* 1 = 0.789351 loss)\nI0819 04:23:43.675827 21769 solver.cpp:228] Iteration 13500, loss = 0.0377586\nI0819 04:23:43.675876 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 04:23:43.675894 21769 solver.cpp:244]     Train net output #1: loss = 0.0377582 (* 1 = 0.0377582 loss)\nI0819 04:23:43.765342 21769 sgd_solver.cpp:166] Iteration 13500, lr = 0.3375\nI0819 04:27:21.930516 21769 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0819 04:29:33.247544 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7174\nI0819 04:29:33.247872 21769 solver.cpp:404]     Test net output #1: loss = 1.63908 (* 1 = 1.63908 loss)\nI0819 04:29:35.371012 21769 solver.cpp:228] Iteration 13600, loss = 0.0964897\nI0819 04:29:35.371229 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:29:35.371336 21769 solver.cpp:244]     Train net output #1: loss = 0.0964893 (* 1 = 0.0964893 loss)\nI0819 04:29:35.467542 21769 sgd_solver.cpp:166] Iteration 13600, lr = 0.34\nI0819 04:33:13.752077 21769 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0819 04:35:25.064898 21769 solver.cpp:404]     Test net output #0: accuracy = 0.81\nI0819 04:35:25.065240 21769 solver.cpp:404]     Test net output #1: loss = 0.867665 (* 1 = 0.867665 loss)\nI0819 04:35:27.192590 21769 solver.cpp:228] Iteration 13700, loss = 0.0227638\nI0819 04:35:27.192638 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:35:27.192656 21769 solver.cpp:244]     Train net output #1: loss = 0.0227634 (* 1 = 0.0227634 loss)\nI0819 04:35:27.286960 21769 sgd_solver.cpp:166] Iteration 13700, lr = 0.3425\nI0819 04:39:05.550541 21769 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0819 04:41:16.876567 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI0819 04:41:16.876865 21769 solver.cpp:404]     Test net output #1: loss = 0.741663 (* 1 = 0.741663 loss)\nI0819 04:41:19.001080 21769 solver.cpp:228] Iteration 13800, loss = 0.0253984\nI0819 04:41:19.001130 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 04:41:19.001155 21769 solver.cpp:244]     Train net output #1: loss = 0.025398 (* 1 = 0.025398 loss)\nI0819 04:41:19.090909 21769 sgd_solver.cpp:166] Iteration 13800, lr = 0.345\nI0819 04:44:57.376148 21769 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0819 04:47:08.721642 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8317\nI0819 04:47:08.721982 21769 solver.cpp:404]     Test net output #1: loss = 0.7521 (* 1 = 0.7521 loss)\nI0819 04:47:10.846398 21769 solver.cpp:228] Iteration 13900, loss = 0.142541\nI0819 04:47:10.846448 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 04:47:10.846472 21769 solver.cpp:244]     Train net output #1: loss = 0.142541 (* 1 = 0.142541 loss)\nI0819 04:47:10.937705 21769 sgd_solver.cpp:166] Iteration 13900, lr = 0.3475\nI0819 04:50:49.200053 21769 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0819 04:53:00.554798 21769 solver.cpp:404]     Test net output #0: accuracy = 0.85\nI0819 04:53:00.555109 21769 solver.cpp:404]     Test net output #1: loss = 0.580618 (* 1 = 0.580618 loss)\nI0819 04:53:02.678711 21769 solver.cpp:228] Iteration 14000, loss = 0.0536721\nI0819 04:53:02.678763 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 04:53:02.678788 21769 solver.cpp:244]     Train net output #1: loss = 0.0536718 (* 1 = 0.0536718 loss)\nI0819 04:53:02.774590 21769 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0819 04:56:40.988179 21769 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0819 04:58:52.264106 21769 solver.cpp:404]     Test net output #0: accuracy = 0.775\nI0819 04:58:52.264444 21769 solver.cpp:404]     Test net output #1: loss = 1.07537 (* 1 = 1.07537 loss)\nI0819 04:58:54.387887 21769 solver.cpp:228] Iteration 14100, loss = 0.0424682\nI0819 04:58:54.387933 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 04:58:54.387948 21769 solver.cpp:244]     Train net output #1: loss = 0.0424678 (* 1 = 0.0424678 loss)\nI0819 04:58:54.481441 21769 sgd_solver.cpp:166] Iteration 14100, lr = 0.3525\nI0819 05:02:32.711467 21769 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0819 05:04:43.976994 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8179\nI0819 05:04:43.977291 21769 solver.cpp:404]     Test net output #1: loss = 0.855143 (* 1 = 0.855143 loss)\nI0819 05:04:46.100445 21769 solver.cpp:228] Iteration 14200, loss = 0.0584471\nI0819 05:04:46.100492 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 05:04:46.100508 21769 solver.cpp:244]     Train net output #1: loss = 0.0584467 (* 1 = 0.0584467 loss)\nI0819 05:04:46.197999 21769 sgd_solver.cpp:166] Iteration 14200, lr = 0.355\nI0819 05:08:24.393160 21769 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0819 05:10:35.663285 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8112\nI0819 05:10:35.663591 21769 solver.cpp:404]     Test net output #1: loss = 0.865455 (* 1 = 0.865455 loss)\nI0819 05:10:37.785912 21769 solver.cpp:228] Iteration 14300, loss = 0.130907\nI0819 05:10:37.785959 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 05:10:37.785974 21769 solver.cpp:244]     Train net output #1: loss = 0.130907 (* 1 = 0.130907 loss)\nI0819 05:10:37.878487 21769 sgd_solver.cpp:166] Iteration 14300, lr = 0.3575\nI0819 05:14:16.220044 21769 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0819 05:16:27.500543 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8513\nI0819 05:16:27.500900 21769 solver.cpp:404]     Test net output #1: loss = 0.620977 (* 1 = 0.620977 loss)\nI0819 05:16:29.624727 21769 solver.cpp:228] Iteration 14400, loss = 0.177871\nI0819 05:16:29.624776 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 05:16:29.624801 21769 solver.cpp:244]     Train net output #1: loss = 0.17787 (* 1 = 0.17787 loss)\nI0819 05:16:29.719449 21769 sgd_solver.cpp:166] Iteration 14400, lr = 0.36\nI0819 05:20:08.233739 21769 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0819 05:22:19.531446 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8022\nI0819 05:22:19.531790 21769 solver.cpp:404]     Test net output #1: loss = 1.01595 (* 1 = 1.01595 loss)\nI0819 05:22:21.655479 21769 solver.cpp:228] Iteration 14500, loss = 0.0329345\nI0819 05:22:21.655530 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 05:22:21.655555 21769 solver.cpp:244]     Train net output #1: loss = 0.032934 (* 1 = 0.032934 loss)\nI0819 05:22:21.746196 21769 sgd_solver.cpp:166] Iteration 14500, lr = 0.3625\nI0819 05:25:59.924314 21769 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0819 05:28:11.234021 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8202\nI0819 05:28:11.234375 21769 solver.cpp:404]     Test net output #1: loss = 0.760755 (* 1 = 0.760755 loss)\nI0819 05:28:13.357728 21769 solver.cpp:228] Iteration 14600, loss = 0.0511574\nI0819 05:28:13.357779 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 05:28:13.357805 21769 solver.cpp:244]     Train net output #1: loss = 0.051157 (* 1 = 0.051157 loss)\nI0819 05:28:13.455343 21769 sgd_solver.cpp:166] Iteration 14600, lr = 0.365\nI0819 05:31:51.540796 21769 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0819 05:34:02.877135 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8396\nI0819 05:34:02.877492 21769 solver.cpp:404]     Test net output #1: loss = 0.672492 (* 1 = 0.672492 loss)\nI0819 05:34:05.001754 21769 solver.cpp:228] Iteration 14700, loss = 0.0586982\nI0819 05:34:05.001806 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 05:34:05.001830 21769 solver.cpp:244]     Train net output #1: loss = 0.0586977 (* 1 = 0.0586977 loss)\nI0819 05:34:05.096774 21769 sgd_solver.cpp:166] Iteration 14700, lr = 0.3675\nI0819 05:37:43.445384 21769 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0819 05:39:54.771788 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8136\nI0819 05:39:54.772150 21769 solver.cpp:404]     Test net output #1: loss = 0.911878 (* 1 = 0.911878 loss)\nI0819 05:39:56.895433 21769 solver.cpp:228] Iteration 14800, loss = 0.054087\nI0819 05:39:56.895483 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 05:39:56.895509 21769 solver.cpp:244]     Train net output #1: loss = 0.0540866 (* 1 = 0.0540866 loss)\nI0819 05:39:56.990636 21769 sgd_solver.cpp:166] Iteration 14800, lr = 0.37\nI0819 05:43:35.242895 21769 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0819 05:45:46.561112 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8442\nI0819 05:45:46.561468 21769 solver.cpp:404]     Test net output #1: loss = 0.679336 (* 1 = 0.679336 loss)\nI0819 05:45:48.685389 21769 solver.cpp:228] Iteration 14900, loss = 0.0743994\nI0819 05:45:48.685439 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 05:45:48.685466 21769 solver.cpp:244]     Train net output #1: loss = 0.0743989 (* 1 = 0.0743989 loss)\nI0819 05:45:48.775456 21769 sgd_solver.cpp:166] Iteration 14900, lr = 0.3725\nI0819 05:49:27.001180 21769 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0819 05:51:38.334692 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8397\nI0819 05:51:38.335042 21769 solver.cpp:404]     Test net output #1: loss = 0.725736 (* 1 = 0.725736 loss)\nI0819 05:51:40.459111 21769 solver.cpp:228] Iteration 15000, loss = 0.0347912\nI0819 05:51:40.459161 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 05:51:40.459187 21769 solver.cpp:244]     Train net output #1: loss = 0.0347907 (* 1 = 0.0347907 loss)\nI0819 05:51:40.561699 21769 sgd_solver.cpp:166] Iteration 15000, lr = 0.375\nI0819 05:55:18.845181 21769 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0819 05:57:30.178947 21769 solver.cpp:404]     Test net output #0: accuracy = 0.791\nI0819 05:57:30.179306 21769 solver.cpp:404]     Test net output #1: loss = 0.961182 (* 1 = 0.961182 loss)\nI0819 05:57:32.302448 21769 solver.cpp:228] Iteration 15100, loss = 0.109936\nI0819 05:57:32.302500 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 05:57:32.302525 21769 solver.cpp:244]     Train net output #1: loss = 0.109935 (* 1 = 0.109935 loss)\nI0819 05:57:32.407716 21769 sgd_solver.cpp:166] Iteration 15100, lr = 0.3775\nI0819 06:01:10.743412 21769 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0819 06:03:22.067967 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8259\nI0819 06:03:22.068330 21769 solver.cpp:404]     Test net output #1: loss = 0.721257 (* 1 = 0.721257 loss)\nI0819 06:03:24.192564 21769 solver.cpp:228] Iteration 15200, loss = 0.0943277\nI0819 06:03:24.192613 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 06:03:24.192638 21769 solver.cpp:244]     Train net output #1: loss = 0.0943273 (* 1 = 0.0943273 loss)\nI0819 06:03:24.292234 21769 sgd_solver.cpp:166] Iteration 15200, lr = 0.38\nI0819 06:07:02.592191 21769 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0819 06:09:13.929581 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7919\nI0819 06:09:13.929945 21769 solver.cpp:404]     Test net output #1: loss = 0.889699 (* 1 = 0.889699 loss)\nI0819 06:09:16.054313 21769 solver.cpp:228] Iteration 15300, loss = 0.0536032\nI0819 06:09:16.054366 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 06:09:16.054391 21769 solver.cpp:244]     Train net output #1: loss = 0.0536028 (* 1 = 0.0536028 loss)\nI0819 06:09:16.152325 21769 sgd_solver.cpp:166] Iteration 15300, lr = 0.3825\nI0819 06:12:54.651114 21769 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0819 06:15:05.983762 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8564\nI0819 06:15:05.984112 21769 solver.cpp:404]     Test net output #1: loss = 0.581505 (* 1 = 0.581505 loss)\nI0819 06:15:08.108706 21769 solver.cpp:228] Iteration 15400, loss = 0.103742\nI0819 06:15:08.108757 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:15:08.108783 21769 solver.cpp:244]     Train net output #1: loss = 0.103741 (* 1 = 0.103741 loss)\nI0819 06:15:08.206802 21769 sgd_solver.cpp:166] Iteration 15400, lr = 0.385\nI0819 06:18:46.443902 21769 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0819 06:20:57.751610 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8157\nI0819 06:20:57.751917 21769 solver.cpp:404]     Test net output #1: loss = 0.849278 (* 1 = 0.849278 loss)\nI0819 06:20:59.874512 21769 solver.cpp:228] Iteration 15500, loss = 0.0422642\nI0819 06:20:59.874560 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 06:20:59.874577 21769 solver.cpp:244]     Train net output #1: loss = 0.0422638 (* 1 = 0.0422638 loss)\nI0819 06:20:59.970418 21769 sgd_solver.cpp:166] Iteration 15500, lr = 0.3875\nI0819 06:24:38.224047 21769 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0819 06:26:49.548295 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8054\nI0819 06:26:49.548636 21769 solver.cpp:404]     Test net output #1: loss = 0.88962 (* 1 = 0.88962 loss)\nI0819 06:26:51.672387 21769 solver.cpp:228] Iteration 15600, loss = 0.0775133\nI0819 06:26:51.672438 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:26:51.672463 21769 solver.cpp:244]     Train net output #1: loss = 0.0775128 (* 1 = 0.0775128 loss)\nI0819 06:26:51.759521 21769 sgd_solver.cpp:166] Iteration 15600, lr = 0.39\nI0819 06:30:30.010861 21769 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0819 06:32:41.347620 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7935\nI0819 06:32:41.347993 21769 solver.cpp:404]     Test net output #1: loss = 0.908196 (* 1 = 0.908196 loss)\nI0819 06:32:43.471009 21769 solver.cpp:228] Iteration 15700, loss = 0.043061\nI0819 06:32:43.471060 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 06:32:43.471084 21769 solver.cpp:244]     Train net output #1: loss = 0.0430605 (* 1 = 0.0430605 loss)\nI0819 06:32:43.565928 21769 sgd_solver.cpp:166] Iteration 15700, lr = 0.3925\nI0819 06:36:21.791790 21769 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0819 06:38:33.117372 21769 solver.cpp:404]     Test net output #0: accuracy = 0.864\nI0819 06:38:33.117918 21769 solver.cpp:404]     Test net output #1: loss = 0.537816 (* 1 = 0.537816 loss)\nI0819 06:38:35.240411 21769 solver.cpp:228] Iteration 15800, loss = 0.0405987\nI0819 06:38:35.240458 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 06:38:35.240483 21769 solver.cpp:244]     Train net output #1: loss = 0.0405983 (* 1 = 0.0405983 loss)\nI0819 06:38:35.333895 21769 sgd_solver.cpp:166] Iteration 15800, lr = 0.395\nI0819 06:42:13.613220 21769 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0819 06:44:24.932562 21769 solver.cpp:404]     Test net output #0: accuracy = 0.751799\nI0819 06:44:24.932862 21769 solver.cpp:404]     Test net output #1: loss = 1.13135 (* 1 = 1.13135 loss)\nI0819 06:44:27.056208 21769 solver.cpp:228] Iteration 15900, loss = 0.0712415\nI0819 06:44:27.056255 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:44:27.056272 21769 solver.cpp:244]     Train net output #1: loss = 0.0712411 (* 1 = 0.0712411 loss)\nI0819 06:44:27.160275 21769 sgd_solver.cpp:166] Iteration 15900, lr = 0.3975\nI0819 06:48:05.702123 21769 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0819 06:50:17.028420 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8565\nI0819 06:50:17.028717 21769 solver.cpp:404]     Test net output #1: loss = 0.58532 (* 1 = 0.58532 loss)\nI0819 06:50:19.152272 21769 solver.cpp:228] Iteration 16000, loss = 0.0359474\nI0819 06:50:19.152319 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 06:50:19.152335 21769 solver.cpp:244]     Train net output #1: loss = 0.0359469 (* 1 = 0.0359469 loss)\nI0819 06:50:19.248978 21769 sgd_solver.cpp:166] Iteration 16000, lr = 0.4\nI0819 06:53:57.435472 21769 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0819 06:56:08.757252 21769 solver.cpp:404]     Test net output #0: accuracy = 0.803\nI0819 06:56:08.757593 21769 solver.cpp:404]     Test net output #1: loss = 0.896264 (* 1 = 0.896264 loss)\nI0819 06:56:10.880810 21769 solver.cpp:228] Iteration 16100, loss = 0.234707\nI0819 06:56:10.880856 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 06:56:10.880872 21769 solver.cpp:244]     Train net output #1: loss = 0.234707 (* 1 = 0.234707 loss)\nI0819 06:56:10.971354 21769 sgd_solver.cpp:166] Iteration 16100, lr = 0.4025\nI0819 06:59:49.180831 21769 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0819 07:02:00.488677 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8073\nI0819 07:02:00.489040 21769 solver.cpp:404]     Test net output #1: loss = 0.909814 (* 1 = 0.909814 loss)\nI0819 07:02:02.612262 21769 solver.cpp:228] Iteration 16200, loss = 0.0187287\nI0819 07:02:02.612313 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 07:02:02.612329 21769 solver.cpp:244]     Train net output #1: loss = 0.0187283 (* 1 = 0.0187283 loss)\nI0819 07:02:02.706007 21769 sgd_solver.cpp:166] Iteration 16200, lr = 0.405\nI0819 07:05:40.878684 21769 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0819 07:07:52.181969 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7651\nI0819 07:07:52.182353 21769 solver.cpp:404]     Test net output #1: loss = 1.09481 (* 1 = 1.09481 loss)\nI0819 07:07:54.305747 21769 solver.cpp:228] Iteration 16300, loss = 0.0873823\nI0819 07:07:54.305795 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 07:07:54.305812 21769 solver.cpp:244]     Train net output #1: loss = 0.0873818 (* 1 = 0.0873818 loss)\nI0819 07:07:54.397819 21769 sgd_solver.cpp:166] Iteration 16300, lr = 0.4075\nI0819 07:11:32.889226 21769 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0819 07:13:44.147088 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8254\nI0819 07:13:44.147439 21769 solver.cpp:404]     Test net output #1: loss = 0.861741 (* 1 = 0.861741 loss)\nI0819 07:13:46.270277 21769 solver.cpp:228] Iteration 16400, loss = 0.0418902\nI0819 07:13:46.270323 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 07:13:46.270340 21769 solver.cpp:244]     Train net output #1: loss = 0.0418897 (* 1 = 0.0418897 loss)\nI0819 07:13:46.363714 21769 sgd_solver.cpp:166] Iteration 16400, lr = 0.41\nI0819 07:17:24.699944 21769 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0819 07:19:35.953353 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8304\nI0819 07:19:35.953696 21769 solver.cpp:404]     Test net output #1: loss = 0.709152 (* 1 = 0.709152 loss)\nI0819 07:19:38.076393 21769 solver.cpp:228] Iteration 16500, loss = 0.132008\nI0819 07:19:38.076439 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:19:38.076457 21769 solver.cpp:244]     Train net output #1: loss = 0.132007 (* 1 = 0.132007 loss)\nI0819 07:19:38.168318 21769 sgd_solver.cpp:166] Iteration 16500, lr = 0.4125\nI0819 07:23:16.616372 21769 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0819 07:25:27.882993 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8071\nI0819 07:25:27.883337 21769 solver.cpp:404]     Test net output #1: loss = 0.937883 (* 1 = 0.937883 loss)\nI0819 07:25:30.005933 21769 solver.cpp:228] Iteration 16600, loss = 0.0296704\nI0819 07:25:30.005980 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 07:25:30.005997 21769 solver.cpp:244]     Train net output #1: loss = 0.02967 (* 1 = 0.02967 loss)\nI0819 07:25:30.104285 21769 sgd_solver.cpp:166] Iteration 16600, lr = 0.415\nI0819 07:29:08.352334 21769 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0819 07:31:19.607797 21769 solver.cpp:404]     Test net output #0: accuracy = 0.838\nI0819 07:31:19.608150 21769 solver.cpp:404]     Test net output #1: loss = 0.741366 (* 1 = 0.741366 loss)\nI0819 07:31:21.731423 21769 solver.cpp:228] Iteration 16700, loss = 0.0565306\nI0819 07:31:21.731469 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 07:31:21.731485 21769 solver.cpp:244]     Train net output #1: loss = 0.0565302 (* 1 = 0.0565302 loss)\nI0819 07:31:21.834805 21769 sgd_solver.cpp:166] Iteration 16700, lr = 0.4175\nI0819 07:35:00.036072 21769 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0819 07:37:11.282114 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8489\nI0819 07:37:11.282457 21769 solver.cpp:404]     Test net output #1: loss = 0.592187 (* 1 = 0.592187 loss)\nI0819 07:37:13.405704 21769 solver.cpp:228] Iteration 16800, loss = 0.0413638\nI0819 07:37:13.405751 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 07:37:13.405766 21769 solver.cpp:244]     Train net output #1: loss = 0.0413634 (* 1 = 0.0413634 loss)\nI0819 07:37:13.502301 21769 sgd_solver.cpp:166] Iteration 16800, lr = 0.42\nI0819 07:40:51.703436 21769 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0819 07:43:02.953379 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8441\nI0819 07:43:02.953737 21769 solver.cpp:404]     Test net output #1: loss = 0.658947 (* 1 = 0.658947 loss)\nI0819 07:43:05.077111 21769 solver.cpp:228] Iteration 16900, loss = 0.0658817\nI0819 07:43:05.077157 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:43:05.077174 21769 solver.cpp:244]     Train net output #1: loss = 0.0658813 (* 1 = 0.0658813 loss)\nI0819 07:43:05.164844 21769 sgd_solver.cpp:166] Iteration 16900, lr = 0.4225\nI0819 07:46:43.347699 21769 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0819 07:48:54.643807 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7575\nI0819 07:48:54.644148 21769 solver.cpp:404]     Test net output #1: loss = 1.24381 (* 1 = 1.24381 loss)\nI0819 07:48:56.767220 21769 solver.cpp:228] Iteration 17000, loss = 0.0754244\nI0819 07:48:56.767263 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 07:48:56.767280 21769 solver.cpp:244]     Train net output #1: loss = 0.075424 (* 1 = 0.075424 loss)\nI0819 07:48:56.860477 21769 sgd_solver.cpp:166] Iteration 17000, lr = 0.425\nI0819 07:52:35.103602 21769 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0819 07:54:46.403990 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8536\nI0819 07:54:46.404335 21769 solver.cpp:404]     Test net output #1: loss = 0.608753 (* 1 = 0.608753 loss)\nI0819 07:54:48.529789 21769 solver.cpp:228] Iteration 17100, loss = 0.0470165\nI0819 07:54:48.529837 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 07:54:48.529855 21769 solver.cpp:244]     Train net output #1: loss = 0.047016 (* 1 = 0.047016 loss)\nI0819 07:54:48.623245 21769 sgd_solver.cpp:166] Iteration 17100, lr = 0.4275\nI0819 07:58:26.842634 21769 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0819 08:00:38.146802 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8221\nI0819 08:00:38.147150 21769 solver.cpp:404]     Test net output #1: loss = 0.784559 (* 1 = 0.784559 loss)\nI0819 08:00:40.269804 21769 solver.cpp:228] Iteration 17200, loss = 0.0428359\nI0819 08:00:40.269850 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 08:00:40.269866 21769 solver.cpp:244]     Train net output #1: loss = 0.0428355 (* 1 = 0.0428355 loss)\nI0819 08:00:40.366677 21769 sgd_solver.cpp:166] Iteration 17200, lr = 0.43\nI0819 08:04:18.606863 21769 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0819 08:06:29.892103 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7944\nI0819 08:06:29.892451 21769 solver.cpp:404]     Test net output #1: loss = 0.940334 (* 1 = 0.940334 loss)\nI0819 08:06:32.015123 21769 solver.cpp:228] Iteration 17300, loss = 0.0442727\nI0819 08:06:32.015171 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 08:06:32.015187 21769 solver.cpp:244]     Train net output #1: loss = 0.0442722 (* 1 = 0.0442722 loss)\nI0819 08:06:32.105665 21769 sgd_solver.cpp:166] Iteration 17300, lr = 0.4325\nI0819 08:10:10.332844 21769 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0819 08:12:21.635648 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8527\nI0819 08:12:21.635998 21769 solver.cpp:404]     Test net output #1: loss = 0.626753 (* 1 = 0.626753 loss)\nI0819 08:12:23.759341 21769 solver.cpp:228] Iteration 17400, loss = 0.0453233\nI0819 08:12:23.759388 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 08:12:23.759405 21769 solver.cpp:244]     Train net output #1: loss = 0.0453229 (* 1 = 0.0453229 loss)\nI0819 08:12:23.854933 21769 sgd_solver.cpp:166] Iteration 17400, lr = 0.435\nI0819 08:16:02.026309 21769 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0819 08:18:13.340781 21769 solver.cpp:404]     Test net output #0: accuracy = 0.781\nI0819 08:18:13.341115 21769 solver.cpp:404]     Test net output #1: loss = 0.942365 (* 1 = 0.942365 loss)\nI0819 08:18:15.463909 21769 solver.cpp:228] Iteration 17500, loss = 0.0447788\nI0819 08:18:15.463956 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 08:18:15.463973 21769 solver.cpp:244]     Train net output #1: loss = 0.0447783 (* 1 = 0.0447783 loss)\nI0819 08:18:15.553227 21769 sgd_solver.cpp:166] Iteration 17500, lr = 0.4375\nI0819 08:21:53.792812 21769 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0819 08:24:05.088448 21769 solver.cpp:404]     Test net output #0: accuracy = 0.831\nI0819 08:24:05.088796 21769 solver.cpp:404]     Test net output #1: loss = 0.779514 (* 1 = 0.779514 loss)\nI0819 08:24:07.212719 21769 solver.cpp:228] Iteration 17600, loss = 0.0537699\nI0819 08:24:07.212769 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 08:24:07.212785 21769 solver.cpp:244]     Train net output #1: loss = 0.0537694 (* 1 = 0.0537694 loss)\nI0819 08:24:07.313799 21769 sgd_solver.cpp:166] Iteration 17600, lr = 0.44\nI0819 08:27:45.498456 21769 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0819 08:29:56.785091 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8074\nI0819 08:29:56.785423 21769 solver.cpp:404]     Test net output #1: loss = 0.824989 (* 1 = 0.824989 loss)\nI0819 08:29:58.907925 21769 solver.cpp:228] Iteration 17700, loss = 0.0616091\nI0819 08:29:58.907974 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 08:29:58.907990 21769 solver.cpp:244]     Train net output #1: loss = 0.0616086 (* 1 = 0.0616086 loss)\nI0819 08:29:58.996479 21769 sgd_solver.cpp:166] Iteration 17700, lr = 0.4425\nI0819 08:33:37.242691 21769 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0819 08:35:48.502528 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8558\nI0819 08:35:48.502854 21769 solver.cpp:404]     Test net output #1: loss = 0.57144 (* 1 = 0.57144 loss)\nI0819 08:35:50.625434 21769 solver.cpp:228] Iteration 17800, loss = 0.137501\nI0819 08:35:50.625483 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 08:35:50.625499 21769 solver.cpp:244]     Train net output #1: loss = 0.1375 (* 1 = 0.1375 loss)\nI0819 08:35:50.715020 21769 sgd_solver.cpp:166] Iteration 17800, lr = 0.445\nI0819 08:39:28.867841 21769 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0819 08:41:40.134742 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8371\nI0819 08:41:40.135072 21769 solver.cpp:404]     Test net output #1: loss = 0.667805 (* 1 = 0.667805 loss)\nI0819 08:41:42.257500 21769 solver.cpp:228] Iteration 17900, loss = 0.149113\nI0819 08:41:42.257545 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 08:41:42.257561 21769 solver.cpp:244]     Train net output #1: loss = 0.149113 (* 1 = 0.149113 loss)\nI0819 08:41:42.353464 21769 sgd_solver.cpp:166] Iteration 17900, lr = 0.4475\nI0819 08:45:20.493348 21769 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0819 08:47:31.763423 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8272\nI0819 08:47:31.763763 21769 solver.cpp:404]     Test net output #1: loss = 0.701972 (* 1 = 0.701972 loss)\nI0819 08:47:33.892452 21769 solver.cpp:228] Iteration 18000, loss = 0.0576021\nI0819 08:47:33.892499 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 08:47:33.892515 21769 solver.cpp:244]     Train net output #1: loss = 0.0576016 (* 1 = 0.0576016 loss)\nI0819 08:47:33.979547 21769 sgd_solver.cpp:166] Iteration 18000, lr = 0.45\nI0819 08:51:12.109498 21769 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0819 08:53:23.370893 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7922\nI0819 08:53:23.371232 21769 solver.cpp:404]     Test net output #1: loss = 1.02267 (* 1 = 1.02267 loss)\nI0819 08:53:25.499670 21769 solver.cpp:228] Iteration 18100, loss = 0.0830858\nI0819 08:53:25.499719 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 08:53:25.499742 21769 solver.cpp:244]     Train net output #1: loss = 0.0830853 (* 1 = 0.0830853 loss)\nI0819 08:53:25.589241 21769 sgd_solver.cpp:166] Iteration 18100, lr = 0.4525\nI0819 08:57:03.797729 21769 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0819 08:59:15.056103 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8085\nI0819 08:59:15.056421 21769 solver.cpp:404]     Test net output #1: loss = 0.88816 (* 1 = 0.88816 loss)\nI0819 08:59:17.185410 21769 solver.cpp:228] Iteration 18200, loss = 0.0188745\nI0819 08:59:17.185456 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 08:59:17.185473 21769 solver.cpp:244]     Train net output #1: loss = 0.018874 (* 1 = 0.018874 loss)\nI0819 08:59:17.272619 21769 sgd_solver.cpp:166] Iteration 18200, lr = 0.455\nI0819 09:02:55.385583 21769 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0819 09:05:06.649830 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8265\nI0819 09:05:06.650167 21769 solver.cpp:404]     Test net output #1: loss = 0.841236 (* 1 = 0.841236 loss)\nI0819 09:05:08.778847 21769 solver.cpp:228] Iteration 18300, loss = 0.0635417\nI0819 09:05:08.778894 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 09:05:08.778910 21769 solver.cpp:244]     Train net output #1: loss = 0.0635412 (* 1 = 0.0635412 loss)\nI0819 09:05:08.864601 21769 sgd_solver.cpp:166] Iteration 18300, lr = 0.4575\nI0819 09:08:47.142244 21769 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0819 09:10:58.420486 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8241\nI0819 09:10:58.420833 21769 solver.cpp:404]     Test net output #1: loss = 0.828384 (* 1 = 0.828384 loss)\nI0819 09:11:00.550307 21769 solver.cpp:228] Iteration 18400, loss = 0.100829\nI0819 09:11:00.550359 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 09:11:00.550384 21769 solver.cpp:244]     Train net output #1: loss = 0.100829 (* 1 = 0.100829 loss)\nI0819 09:11:00.641582 21769 sgd_solver.cpp:166] Iteration 18400, lr = 0.46\nI0819 09:14:38.866785 21769 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0819 09:16:50.133865 21769 solver.cpp:404]     Test net output #0: accuracy = 0.837\nI0819 09:16:50.134217 21769 solver.cpp:404]     Test net output #1: loss = 0.688409 (* 1 = 0.688409 loss)\nI0819 09:16:52.262357 21769 solver.cpp:228] Iteration 18500, loss = 0.0718568\nI0819 09:16:52.262404 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 09:16:52.262421 21769 solver.cpp:244]     Train net output #1: loss = 0.0718564 (* 1 = 0.0718564 loss)\nI0819 09:16:52.351289 21769 sgd_solver.cpp:166] Iteration 18500, lr = 0.4625\nI0819 09:20:30.494979 21769 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0819 09:22:41.754106 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8443\nI0819 09:22:41.754451 21769 solver.cpp:404]     Test net output #1: loss = 0.620872 (* 1 = 0.620872 loss)\nI0819 09:22:43.883505 21769 solver.cpp:228] Iteration 18600, loss = 0.114993\nI0819 09:22:43.883553 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 09:22:43.883569 21769 solver.cpp:244]     Train net output #1: loss = 0.114992 (* 1 = 0.114992 loss)\nI0819 09:22:43.969578 21769 sgd_solver.cpp:166] Iteration 18600, lr = 0.465\nI0819 09:26:22.365072 21769 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0819 09:28:33.590785 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8343\nI0819 09:28:33.591123 21769 solver.cpp:404]     Test net output #1: loss = 0.752426 (* 1 = 0.752426 loss)\nI0819 09:28:35.720150 21769 solver.cpp:228] Iteration 18700, loss = 0.0793059\nI0819 09:28:35.720196 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 09:28:35.720213 21769 solver.cpp:244]     Train net output #1: loss = 0.0793055 (* 1 = 0.0793055 loss)\nI0819 09:28:35.810199 21769 sgd_solver.cpp:166] Iteration 18700, lr = 0.4675\nI0819 09:32:14.079586 21769 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0819 09:34:25.299049 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8222\nI0819 09:34:25.299353 21769 solver.cpp:404]     Test net output #1: loss = 0.927124 (* 1 = 0.927124 loss)\nI0819 09:34:27.427755 21769 solver.cpp:228] Iteration 18800, loss = 0.0404818\nI0819 09:34:27.427803 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 09:34:27.427820 21769 solver.cpp:244]     Train net output #1: loss = 0.0404813 (* 1 = 0.0404813 loss)\nI0819 09:34:27.517714 21769 sgd_solver.cpp:166] Iteration 18800, lr = 0.47\nI0819 09:38:05.691114 21769 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0819 09:40:16.888449 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8117\nI0819 09:40:16.888800 21769 solver.cpp:404]     Test net output #1: loss = 0.760681 (* 1 = 0.760681 loss)\nI0819 09:40:19.017040 21769 solver.cpp:228] Iteration 18900, loss = 0.0614844\nI0819 09:40:19.017087 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 09:40:19.017103 21769 solver.cpp:244]     Train net output #1: loss = 0.0614839 (* 1 = 0.0614839 loss)\nI0819 09:40:19.105032 21769 sgd_solver.cpp:166] Iteration 18900, lr = 0.4725\nI0819 09:43:57.293970 21769 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0819 09:46:08.493463 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8065\nI0819 09:46:08.493813 21769 solver.cpp:404]     Test net output #1: loss = 0.878631 (* 1 = 0.878631 loss)\nI0819 09:46:10.622263 21769 solver.cpp:228] Iteration 19000, loss = 0.0487178\nI0819 09:46:10.622311 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 09:46:10.622328 21769 solver.cpp:244]     Train net output #1: loss = 0.0487174 (* 1 = 0.0487174 loss)\nI0819 09:46:10.707687 21769 sgd_solver.cpp:166] Iteration 19000, lr = 0.475\nI0819 09:49:49.006127 21769 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0819 09:52:00.219990 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8304\nI0819 09:52:00.220336 21769 solver.cpp:404]     Test net output #1: loss = 0.730144 (* 1 = 0.730144 loss)\nI0819 09:52:02.348086 21769 solver.cpp:228] Iteration 19100, loss = 0.0521657\nI0819 09:52:02.348135 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 09:52:02.348151 21769 solver.cpp:244]     Train net output #1: loss = 0.0521653 (* 1 = 0.0521653 loss)\nI0819 09:52:02.441259 21769 sgd_solver.cpp:166] Iteration 19100, lr = 0.4775\nI0819 09:55:40.773069 21769 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0819 09:57:51.987169 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8664\nI0819 09:57:51.987489 21769 solver.cpp:404]     Test net output #1: loss = 0.497939 (* 1 = 0.497939 loss)\nI0819 09:57:54.116566 21769 solver.cpp:228] Iteration 19200, loss = 0.0980529\nI0819 09:57:54.116613 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 09:57:54.116631 21769 solver.cpp:244]     Train net output #1: loss = 0.0980525 (* 1 = 0.0980525 loss)\nI0819 09:57:54.211261 21769 sgd_solver.cpp:166] Iteration 19200, lr = 0.48\nI0819 10:01:32.332026 21769 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0819 10:03:43.597009 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8329\nI0819 10:03:43.597349 21769 solver.cpp:404]     Test net output #1: loss = 0.767725 (* 1 = 0.767725 loss)\nI0819 10:03:45.726491 21769 solver.cpp:228] Iteration 19300, loss = 0.0747713\nI0819 10:03:45.726538 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 10:03:45.726555 21769 solver.cpp:244]     Train net output #1: loss = 0.0747709 (* 1 = 0.0747709 loss)\nI0819 10:03:45.810524 21769 sgd_solver.cpp:166] Iteration 19300, lr = 0.4825\nI0819 10:07:24.001824 21769 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0819 10:09:35.276166 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8474\nI0819 10:09:35.276496 21769 solver.cpp:404]     Test net output #1: loss = 0.642976 (* 1 = 0.642976 loss)\nI0819 10:09:37.405547 21769 solver.cpp:228] Iteration 19400, loss = 0.0902825\nI0819 10:09:37.405593 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:09:37.405611 21769 solver.cpp:244]     Train net output #1: loss = 0.090282 (* 1 = 0.090282 loss)\nI0819 10:09:37.493345 21769 sgd_solver.cpp:166] Iteration 19400, lr = 0.485\nI0819 10:13:15.730293 21769 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0819 10:15:26.980517 21769 solver.cpp:404]     Test net output #0: accuracy = 0.797\nI0819 10:15:26.980865 21769 solver.cpp:404]     Test net output #1: loss = 0.90893 (* 1 = 0.90893 loss)\nI0819 10:15:29.109115 21769 solver.cpp:228] Iteration 19500, loss = 0.141613\nI0819 10:15:29.109161 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:15:29.109179 21769 solver.cpp:244]     Train net output #1: loss = 0.141613 (* 1 = 0.141613 loss)\nI0819 10:15:29.198004 21769 sgd_solver.cpp:166] Iteration 19500, lr = 0.4875\nI0819 10:19:07.427326 21769 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0819 10:21:18.687896 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8087\nI0819 10:21:18.688189 21769 solver.cpp:404]     Test net output #1: loss = 1.0076 (* 1 = 1.0076 loss)\nI0819 10:21:20.817405 21769 solver.cpp:228] Iteration 19600, loss = 0.0534685\nI0819 10:21:20.817452 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 10:21:20.817468 21769 solver.cpp:244]     Train net output #1: loss = 0.0534681 (* 1 = 0.0534681 loss)\nI0819 10:21:20.901427 21769 sgd_solver.cpp:166] Iteration 19600, lr = 0.49\nI0819 10:24:59.200382 21769 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0819 10:27:10.442365 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8516\nI0819 10:27:10.442672 21769 solver.cpp:404]     Test net output #1: loss = 0.593512 (* 1 = 0.593512 loss)\nI0819 10:27:12.571599 21769 solver.cpp:228] Iteration 19700, loss = 0.0402627\nI0819 10:27:12.571647 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 10:27:12.571663 21769 solver.cpp:244]     Train net output #1: loss = 0.0402623 (* 1 = 0.0402623 loss)\nI0819 10:27:12.659500 21769 sgd_solver.cpp:166] Iteration 19700, lr = 0.4925\nI0819 10:30:50.888629 21769 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0819 10:33:02.144520 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8391\nI0819 10:33:02.144819 21769 solver.cpp:404]     Test net output #1: loss = 0.666962 (* 1 = 0.666962 loss)\nI0819 10:33:04.274106 21769 solver.cpp:228] Iteration 19800, loss = 0.0377128\nI0819 10:33:04.274153 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 10:33:04.274170 21769 solver.cpp:244]     Train net output #1: loss = 0.0377124 (* 1 = 0.0377124 loss)\nI0819 10:33:04.363518 21769 sgd_solver.cpp:166] Iteration 19800, lr = 0.495\nI0819 10:36:42.718135 21769 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0819 10:38:53.961771 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8305\nI0819 10:38:53.962136 21769 solver.cpp:404]     Test net output #1: loss = 0.738047 (* 1 = 0.738047 loss)\nI0819 10:38:56.092135 21769 solver.cpp:228] Iteration 19900, loss = 0.0796681\nI0819 10:38:56.092183 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:38:56.092209 21769 solver.cpp:244]     Train net output #1: loss = 0.0796677 (* 1 = 0.0796677 loss)\nI0819 10:38:56.177021 21769 sgd_solver.cpp:166] Iteration 19900, lr = 0.4975\nI0819 10:42:34.609185 21769 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0819 10:44:45.859427 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8049\nI0819 10:44:45.859791 21769 solver.cpp:404]     Test net output #1: loss = 0.834396 (* 1 = 0.834396 loss)\nI0819 10:44:47.989408 21769 solver.cpp:228] Iteration 20000, loss = 0.103996\nI0819 10:44:47.989456 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 10:44:47.989481 21769 solver.cpp:244]     Train net output #1: loss = 0.103995 (* 1 = 0.103995 loss)\nI0819 10:44:48.076284 21769 sgd_solver.cpp:166] Iteration 20000, lr = 0.5\nI0819 10:48:26.311439 21769 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0819 10:50:37.550195 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8222\nI0819 10:50:37.550578 21769 solver.cpp:404]     Test net output #1: loss = 0.787193 (* 1 = 0.787193 loss)\nI0819 10:50:39.673898 21769 solver.cpp:228] Iteration 20100, loss = 0.112033\nI0819 10:50:39.673949 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 10:50:39.673975 21769 solver.cpp:244]     Train net output #1: loss = 0.112032 (* 1 = 0.112032 loss)\nI0819 10:50:39.754097 21769 sgd_solver.cpp:166] Iteration 20100, lr = 0.5025\nI0819 10:54:17.351826 21769 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0819 10:56:28.599900 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8\nI0819 10:56:28.600221 21769 solver.cpp:404]     Test net output #1: loss = 0.909643 (* 1 = 0.909643 loss)\nI0819 10:56:30.723162 21769 solver.cpp:228] Iteration 20200, loss = 0.0360566\nI0819 10:56:30.723212 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 10:56:30.723237 21769 solver.cpp:244]     Train net output #1: loss = 0.0360562 (* 1 = 0.0360562 loss)\nI0819 10:56:30.816601 21769 sgd_solver.cpp:166] Iteration 20200, lr = 0.505\nI0819 11:00:08.640040 21769 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0819 11:02:19.860200 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8413\nI0819 11:02:19.860534 21769 solver.cpp:404]     Test net output #1: loss = 0.62641 (* 1 = 0.62641 loss)\nI0819 11:02:21.983714 21769 solver.cpp:228] Iteration 20300, loss = 0.0558366\nI0819 11:02:21.983763 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 11:02:21.983780 21769 solver.cpp:244]     Train net output #1: loss = 0.0558362 (* 1 = 0.0558362 loss)\nI0819 11:02:22.071113 21769 sgd_solver.cpp:166] Iteration 20300, lr = 0.5075\nI0819 11:05:59.972209 21769 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0819 11:08:12.589761 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8469\nI0819 11:08:12.590188 21769 solver.cpp:404]     Test net output #1: loss = 0.661378 (* 1 = 0.661378 loss)\nI0819 11:08:14.720005 21769 solver.cpp:228] Iteration 20400, loss = 0.0955859\nI0819 11:08:14.720052 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 11:08:14.720068 21769 solver.cpp:244]     Train net output #1: loss = 0.0955855 (* 1 = 0.0955855 loss)\nI0819 11:08:14.805817 21769 sgd_solver.cpp:166] Iteration 20400, lr = 0.51\nI0819 11:11:53.448802 21769 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0819 11:14:06.050621 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7298\nI0819 11:14:06.051023 21769 solver.cpp:404]     Test net output #1: loss = 1.46293 (* 1 = 1.46293 loss)\nI0819 11:14:08.180593 21769 solver.cpp:228] Iteration 20500, loss = 0.149623\nI0819 11:14:08.180637 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 11:14:08.180654 21769 solver.cpp:244]     Train net output #1: loss = 0.149622 (* 1 = 0.149622 loss)\nI0819 11:14:08.277760 21769 sgd_solver.cpp:166] Iteration 20500, lr = 0.5125\nI0819 11:17:47.028353 21769 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0819 11:19:59.647789 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8481\nI0819 11:19:59.648187 21769 solver.cpp:404]     Test net output #1: loss = 0.61019 (* 1 = 0.61019 loss)\nI0819 11:20:01.777521 21769 solver.cpp:228] Iteration 20600, loss = 0.0940505\nI0819 11:20:01.777570 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 11:20:01.777585 21769 solver.cpp:244]     Train net output #1: loss = 0.0940502 (* 1 = 0.0940502 loss)\nI0819 11:20:01.859735 21769 sgd_solver.cpp:166] Iteration 20600, lr = 0.515\nI0819 11:23:40.446820 21769 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0819 11:25:53.075917 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8338\nI0819 11:25:53.076308 21769 solver.cpp:404]     Test net output #1: loss = 0.64544 (* 1 = 0.64544 loss)\nI0819 11:25:55.206092 21769 solver.cpp:228] Iteration 20700, loss = 0.085709\nI0819 11:25:55.206138 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 11:25:55.206154 21769 solver.cpp:244]     Train net output #1: loss = 0.0857087 (* 1 = 0.0857087 loss)\nI0819 11:25:55.294786 21769 sgd_solver.cpp:166] Iteration 20700, lr = 0.5175\nI0819 11:29:34.097810 21769 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0819 11:31:46.721259 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8537\nI0819 11:31:46.721675 21769 solver.cpp:404]     Test net output #1: loss = 0.602726 (* 1 = 0.602726 loss)\nI0819 11:31:48.852083 21769 solver.cpp:228] Iteration 20800, loss = 0.0512163\nI0819 11:31:48.852130 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 11:31:48.852147 21769 solver.cpp:244]     Train net output #1: loss = 0.0512159 (* 1 = 0.0512159 loss)\nI0819 11:31:48.933329 21769 sgd_solver.cpp:166] Iteration 20800, lr = 0.52\nI0819 11:35:27.540704 21769 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0819 11:37:40.165694 21769 solver.cpp:404]     Test net output #0: accuracy = 0.844\nI0819 11:37:40.166132 21769 solver.cpp:404]     Test net output #1: loss = 0.671167 (* 1 = 0.671167 loss)\nI0819 11:37:42.294880 21769 solver.cpp:228] Iteration 20900, loss = 0.107682\nI0819 11:37:42.294927 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 11:37:42.294942 21769 solver.cpp:244]     Train net output #1: loss = 0.107681 (* 1 = 0.107681 loss)\nI0819 11:37:42.383612 21769 sgd_solver.cpp:166] Iteration 20900, lr = 0.5225\nI0819 11:41:20.971366 21769 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0819 11:43:33.609386 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8408\nI0819 11:43:33.609793 21769 solver.cpp:404]     Test net output #1: loss = 0.644029 (* 1 = 0.644029 loss)\nI0819 11:43:35.738765 21769 solver.cpp:228] Iteration 21000, loss = 0.0253531\nI0819 11:43:35.738811 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 11:43:35.738826 21769 solver.cpp:244]     Train net output #1: loss = 0.0253528 (* 1 = 0.0253528 loss)\nI0819 11:43:35.824569 21769 sgd_solver.cpp:166] Iteration 21000, lr = 0.525\nI0819 11:47:14.311089 21769 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0819 11:49:26.984035 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7403\nI0819 11:49:26.984457 21769 solver.cpp:404]     Test net output #1: loss = 1.20437 (* 1 = 1.20437 loss)\nI0819 11:49:29.113752 21769 solver.cpp:228] Iteration 21100, loss = 0.134157\nI0819 11:49:29.113798 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 11:49:29.113814 21769 solver.cpp:244]     Train net output #1: loss = 0.134157 (* 1 = 0.134157 loss)\nI0819 11:49:29.203454 21769 sgd_solver.cpp:166] Iteration 21100, lr = 0.5275\nI0819 11:53:07.800015 21769 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0819 11:55:20.480235 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7919\nI0819 11:55:20.480624 21769 solver.cpp:404]     Test net output #1: loss = 0.980416 (* 1 = 0.980416 loss)\nI0819 11:55:22.610563 21769 solver.cpp:228] Iteration 21200, loss = 0.0920353\nI0819 11:55:22.610611 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:55:22.610627 21769 solver.cpp:244]     Train net output #1: loss = 0.0920349 (* 1 = 0.0920349 loss)\nI0819 11:55:22.692286 21769 sgd_solver.cpp:166] Iteration 21200, lr = 0.53\nI0819 11:59:01.176370 21769 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0819 12:01:13.865353 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8279\nI0819 12:01:13.865728 21769 solver.cpp:404]     Test net output #1: loss = 0.731054 (* 1 = 0.731054 loss)\nI0819 12:01:15.995388 21769 solver.cpp:228] Iteration 21300, loss = 0.0245931\nI0819 12:01:15.995434 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 12:01:15.995450 21769 solver.cpp:244]     Train net output #1: loss = 0.0245927 (* 1 = 0.0245927 loss)\nI0819 12:01:16.069844 21769 sgd_solver.cpp:166] Iteration 21300, lr = 0.5325\nI0819 12:04:54.063232 21769 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0819 12:07:06.762672 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8581\nI0819 12:07:06.763068 21769 solver.cpp:404]     Test net output #1: loss = 0.582454 (* 1 = 0.582454 loss)\nI0819 12:07:08.892905 21769 solver.cpp:228] Iteration 21400, loss = 0.0490444\nI0819 12:07:08.892952 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 12:07:08.892969 21769 solver.cpp:244]     Train net output #1: loss = 0.049044 (* 1 = 0.049044 loss)\nI0819 12:07:08.983732 21769 sgd_solver.cpp:166] Iteration 21400, lr = 0.535\nI0819 12:10:46.972457 21769 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0819 12:12:59.665509 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8276\nI0819 12:12:59.665933 21769 solver.cpp:404]     Test net output #1: loss = 0.692252 (* 1 = 0.692252 loss)\nI0819 12:13:01.795729 21769 solver.cpp:228] Iteration 21500, loss = 0.0861135\nI0819 12:13:01.795770 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 12:13:01.795788 21769 solver.cpp:244]     Train net output #1: loss = 0.0861132 (* 1 = 0.0861132 loss)\nI0819 12:13:01.883610 21769 sgd_solver.cpp:166] Iteration 21500, lr = 0.5375\nI0819 12:16:39.883044 21769 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0819 12:18:52.538146 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8266\nI0819 12:18:52.538537 21769 solver.cpp:404]     Test net output #1: loss = 0.739796 (* 1 = 0.739796 loss)\nI0819 12:18:54.667953 21769 solver.cpp:228] Iteration 21600, loss = 0.0650656\nI0819 12:18:54.668000 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 12:18:54.668017 21769 solver.cpp:244]     Train net output #1: loss = 0.0650653 (* 1 = 0.0650653 loss)\nI0819 12:18:54.751008 21769 sgd_solver.cpp:166] Iteration 21600, lr = 0.54\nI0819 12:22:32.850618 21769 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0819 12:24:45.499092 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8406\nI0819 12:24:45.499485 21769 solver.cpp:404]     Test net output #1: loss = 0.646498 (* 1 = 0.646498 loss)\nI0819 12:24:47.629071 21769 solver.cpp:228] Iteration 21700, loss = 0.129769\nI0819 12:24:47.629127 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0819 12:24:47.629145 21769 solver.cpp:244]     Train net output #1: loss = 0.129769 (* 1 = 0.129769 loss)\nI0819 12:24:47.711460 21769 sgd_solver.cpp:166] Iteration 21700, lr = 0.5425\nI0819 12:28:25.787286 21769 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0819 12:30:38.435703 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7502\nI0819 12:30:38.436111 21769 solver.cpp:404]     Test net output #1: loss = 1.19376 (* 1 = 1.19376 loss)\nI0819 12:30:40.565965 21769 solver.cpp:228] Iteration 21800, loss = 0.0578\nI0819 12:30:40.566013 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 12:30:40.566030 21769 solver.cpp:244]     Train net output #1: loss = 0.0577996 (* 1 = 0.0577996 loss)\nI0819 12:30:40.643062 21769 sgd_solver.cpp:166] Iteration 21800, lr = 0.545\nI0819 12:34:18.405395 21769 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0819 12:36:29.630748 21769 solver.cpp:404]     Test net output #0: accuracy = 0.860301\nI0819 12:36:29.631098 21769 solver.cpp:404]     Test net output #1: loss = 0.528052 (* 1 = 0.528052 loss)\nI0819 12:36:31.753619 21769 solver.cpp:228] Iteration 21900, loss = 0.0877765\nI0819 12:36:31.753655 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 12:36:31.753670 21769 solver.cpp:244]     Train net output #1: loss = 0.0877761 (* 1 = 0.0877761 loss)\nI0819 12:36:31.840256 21769 sgd_solver.cpp:166] Iteration 21900, lr = 0.5475\nI0819 12:40:09.366435 21769 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0819 12:42:20.618578 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8681\nI0819 12:42:20.618921 21769 solver.cpp:404]     Test net output #1: loss = 0.544765 (* 1 = 0.544765 loss)\nI0819 12:42:22.742069 21769 solver.cpp:228] Iteration 22000, loss = 0.0798212\nI0819 12:42:22.742105 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:42:22.742120 21769 solver.cpp:244]     Train net output #1: loss = 0.0798209 (* 1 = 0.0798209 loss)\nI0819 12:42:22.832197 21769 sgd_solver.cpp:166] Iteration 22000, lr = 0.55\nI0819 12:46:00.541457 21769 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0819 12:48:11.776053 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7842\nI0819 12:48:11.776409 21769 solver.cpp:404]     Test net output #1: loss = 1.08977 (* 1 = 1.08977 loss)\nI0819 12:48:13.899015 21769 solver.cpp:228] Iteration 22100, loss = 0.102667\nI0819 12:48:13.899052 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 12:48:13.899068 21769 solver.cpp:244]     Train net output #1: loss = 0.102667 (* 1 = 0.102667 loss)\nI0819 12:48:13.979585 21769 sgd_solver.cpp:166] Iteration 22100, lr = 0.5525\nI0819 12:51:51.606340 21769 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0819 12:54:02.849541 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8317\nI0819 12:54:02.849905 21769 solver.cpp:404]     Test net output #1: loss = 0.654244 (* 1 = 0.654244 loss)\nI0819 12:54:04.972496 21769 solver.cpp:228] Iteration 22200, loss = 0.116312\nI0819 12:54:04.972533 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 12:54:04.972548 21769 solver.cpp:244]     Train net output #1: loss = 0.116312 (* 1 = 0.116312 loss)\nI0819 12:54:05.064059 21769 sgd_solver.cpp:166] Iteration 22200, lr = 0.555\nI0819 12:57:42.711014 21769 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0819 12:59:53.933912 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7593\nI0819 12:59:53.934260 21769 solver.cpp:404]     Test net output #1: loss = 1.21181 (* 1 = 1.21181 loss)\nI0819 12:59:56.056402 21769 solver.cpp:228] Iteration 22300, loss = 0.0336329\nI0819 12:59:56.056439 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 12:59:56.056453 21769 solver.cpp:244]     Train net output #1: loss = 0.0336326 (* 1 = 0.0336326 loss)\nI0819 12:59:56.155381 21769 sgd_solver.cpp:166] Iteration 22300, lr = 0.5575\nI0819 13:03:34.114521 21769 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0819 13:05:45.393893 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8239\nI0819 13:05:45.394248 21769 solver.cpp:404]     Test net output #1: loss = 0.708073 (* 1 = 0.708073 loss)\nI0819 13:05:47.517518 21769 solver.cpp:228] Iteration 22400, loss = 0.0782484\nI0819 13:05:47.517558 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 13:05:47.517581 21769 solver.cpp:244]     Train net output #1: loss = 0.078248 (* 1 = 0.078248 loss)\nI0819 13:05:47.603127 21769 sgd_solver.cpp:166] Iteration 22400, lr = 0.56\nI0819 13:09:25.379726 21769 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0819 13:11:36.653259 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8081\nI0819 13:11:36.653583 21769 solver.cpp:404]     Test net output #1: loss = 0.777159 (* 1 = 0.777159 loss)\nI0819 13:11:38.777025 21769 solver.cpp:228] Iteration 22500, loss = 0.106236\nI0819 13:11:38.777066 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 13:11:38.777091 21769 solver.cpp:244]     Train net output #1: loss = 0.106236 (* 1 = 0.106236 loss)\nI0819 13:11:38.857105 21769 sgd_solver.cpp:166] Iteration 22500, lr = 0.5625\nI0819 13:15:16.422397 21769 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0819 13:17:27.681129 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7834\nI0819 13:17:27.681439 21769 solver.cpp:404]     Test net output #1: loss = 1.13252 (* 1 = 1.13252 loss)\nI0819 13:17:29.803874 21769 solver.cpp:228] Iteration 22600, loss = 0.106634\nI0819 13:17:29.803912 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:17:29.803928 21769 solver.cpp:244]     Train net output #1: loss = 0.106634 (* 1 = 0.106634 loss)\nI0819 13:17:29.887866 21769 sgd_solver.cpp:166] Iteration 22600, lr = 0.565\nI0819 13:21:07.585391 21769 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0819 13:23:18.854207 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8503\nI0819 13:23:18.854571 21769 solver.cpp:404]     Test net output #1: loss = 0.594599 (* 1 = 0.594599 loss)\nI0819 13:23:20.978593 21769 solver.cpp:228] Iteration 22700, loss = 0.110641\nI0819 13:23:20.978636 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 13:23:20.978658 21769 solver.cpp:244]     Train net output #1: loss = 0.11064 (* 1 = 0.11064 loss)\nI0819 13:23:21.059810 21769 sgd_solver.cpp:166] Iteration 22700, lr = 0.5675\nI0819 13:26:58.606946 21769 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0819 13:29:09.871464 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7604\nI0819 13:29:09.871820 21769 solver.cpp:404]     Test net output #1: loss = 1.11076 (* 1 = 1.11076 loss)\nI0819 13:29:11.995689 21769 solver.cpp:228] Iteration 22800, loss = 0.0365221\nI0819 13:29:11.995730 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 13:29:11.995754 21769 solver.cpp:244]     Train net output #1: loss = 0.0365216 (* 1 = 0.0365216 loss)\nI0819 13:29:12.080682 21769 sgd_solver.cpp:166] Iteration 22800, lr = 0.57\nI0819 13:32:49.977577 21769 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0819 13:35:01.258491 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8095\nI0819 13:35:01.258842 21769 solver.cpp:404]     Test net output #1: loss = 0.79742 (* 1 = 0.79742 loss)\nI0819 13:35:03.382510 21769 solver.cpp:228] Iteration 22900, loss = 0.135486\nI0819 13:35:03.382552 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 13:35:03.382575 21769 solver.cpp:244]     Train net output #1: loss = 0.135485 (* 1 = 0.135485 loss)\nI0819 13:35:03.466614 21769 sgd_solver.cpp:166] Iteration 22900, lr = 0.5725\nI0819 13:38:41.089861 21769 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0819 13:40:52.358392 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8348\nI0819 13:40:52.358763 21769 solver.cpp:404]     Test net output #1: loss = 0.667942 (* 1 = 0.667942 loss)\nI0819 13:40:54.484663 21769 solver.cpp:228] Iteration 23000, loss = 0.0898651\nI0819 13:40:54.484709 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:40:54.484732 21769 solver.cpp:244]     Train net output #1: loss = 0.0898647 (* 1 = 0.0898647 loss)\nI0819 13:40:54.573346 21769 sgd_solver.cpp:166] Iteration 23000, lr = 0.575\nI0819 13:44:32.996040 21769 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0819 13:46:45.625150 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7745\nI0819 13:46:45.625546 21769 solver.cpp:404]     Test net output #1: loss = 0.969877 (* 1 = 0.969877 loss)\nI0819 13:46:47.755411 21769 solver.cpp:228] Iteration 23100, loss = 0.0792379\nI0819 13:46:47.755455 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 13:46:47.755472 21769 solver.cpp:244]     Train net output #1: loss = 0.0792375 (* 1 = 0.0792375 loss)\nI0819 13:46:47.847407 21769 sgd_solver.cpp:166] Iteration 23100, lr = 0.5775\nI0819 13:50:26.296824 21769 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0819 13:52:38.935879 21769 solver.cpp:404]     Test net output #0: accuracy = 0.789\nI0819 13:52:38.936264 21769 solver.cpp:404]     Test net output #1: loss = 1.05024 (* 1 = 1.05024 loss)\nI0819 13:52:41.071107 21769 solver.cpp:228] Iteration 23200, loss = 0.0569968\nI0819 13:52:41.071156 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 13:52:41.071172 21769 solver.cpp:244]     Train net output #1: loss = 0.0569964 (* 1 = 0.0569964 loss)\nI0819 13:52:41.150827 21769 sgd_solver.cpp:166] Iteration 23200, lr = 0.58\nI0819 13:56:19.702455 21769 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0819 13:58:32.349489 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8604\nI0819 13:58:32.349887 21769 solver.cpp:404]     Test net output #1: loss = 0.59498 (* 1 = 0.59498 loss)\nI0819 13:58:34.483877 21769 solver.cpp:228] Iteration 23300, loss = 0.087462\nI0819 13:58:34.483925 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:58:34.483942 21769 solver.cpp:244]     Train net output #1: loss = 0.0874616 (* 1 = 0.0874616 loss)\nI0819 13:58:34.562911 21769 sgd_solver.cpp:166] Iteration 23300, lr = 0.5825\nI0819 14:02:12.491149 21769 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0819 14:04:23.731240 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8228\nI0819 14:04:23.731571 21769 solver.cpp:404]     Test net output #1: loss = 0.723365 (* 1 = 0.723365 loss)\nI0819 14:04:25.861171 21769 solver.cpp:228] Iteration 23400, loss = 0.118823\nI0819 14:04:25.861207 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 14:04:25.861222 21769 solver.cpp:244]     Train net output #1: loss = 0.118822 (* 1 = 0.118822 loss)\nI0819 14:04:25.936352 21769 sgd_solver.cpp:166] Iteration 23400, lr = 0.585\nI0819 14:08:03.532917 21769 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0819 14:10:14.738443 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8424\nI0819 14:10:14.738798 21769 solver.cpp:404]     Test net output #1: loss = 0.651631 (* 1 = 0.651631 loss)\nI0819 14:10:16.867297 21769 solver.cpp:228] Iteration 23500, loss = 0.0875705\nI0819 14:10:16.867336 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 14:10:16.867352 21769 solver.cpp:244]     Train net output #1: loss = 0.08757 (* 1 = 0.08757 loss)\nI0819 14:10:16.944780 21769 sgd_solver.cpp:166] Iteration 23500, lr = 0.5875\nI0819 14:13:54.604807 21769 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0819 14:16:05.796882 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8292\nI0819 14:16:05.797193 21769 solver.cpp:404]     Test net output #1: loss = 0.710666 (* 1 = 0.710666 loss)\nI0819 14:16:07.927754 21769 solver.cpp:228] Iteration 23600, loss = 0.0976342\nI0819 14:16:07.927793 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:16:07.927809 21769 solver.cpp:244]     Train net output #1: loss = 0.0976338 (* 1 = 0.0976338 loss)\nI0819 14:16:08.004003 21769 sgd_solver.cpp:166] Iteration 23600, lr = 0.59\nI0819 14:19:45.823417 21769 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0819 14:21:57.031769 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7823\nI0819 14:21:57.032116 21769 solver.cpp:404]     Test net output #1: loss = 0.978671 (* 1 = 0.978671 loss)\nI0819 14:21:59.160176 21769 solver.cpp:228] Iteration 23700, loss = 0.0689746\nI0819 14:21:59.160215 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 14:21:59.160239 21769 solver.cpp:244]     Train net output #1: loss = 0.0689743 (* 1 = 0.0689743 loss)\nI0819 14:21:59.250620 21769 sgd_solver.cpp:166] Iteration 23700, lr = 0.5925\nI0819 14:25:36.975363 21769 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0819 14:27:48.189528 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8447\nI0819 14:27:48.189880 21769 solver.cpp:404]     Test net output #1: loss = 0.635787 (* 1 = 0.635787 loss)\nI0819 14:27:50.318823 21769 solver.cpp:228] Iteration 23800, loss = 0.0435867\nI0819 14:27:50.318864 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 14:27:50.318889 21769 solver.cpp:244]     Train net output #1: loss = 0.0435864 (* 1 = 0.0435864 loss)\nI0819 14:27:50.396524 21769 sgd_solver.cpp:166] Iteration 23800, lr = 0.595\nI0819 14:31:28.232314 21769 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0819 14:33:39.461901 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8453\nI0819 14:33:39.462445 21769 solver.cpp:404]     Test net output #1: loss = 0.59647 (* 1 = 0.59647 loss)\nI0819 14:33:41.592041 21769 solver.cpp:228] Iteration 23900, loss = 0.085581\nI0819 14:33:41.592080 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 14:33:41.592105 21769 solver.cpp:244]     Train net output #1: loss = 0.0855807 (* 1 = 0.0855807 loss)\nI0819 14:33:41.675014 21769 sgd_solver.cpp:166] Iteration 23900, lr = 0.5975\nI0819 14:37:19.342350 21769 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0819 14:39:30.589614 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7831\nI0819 14:39:30.589963 21769 solver.cpp:404]     Test net output #1: loss = 0.919057 (* 1 = 0.919057 loss)\nI0819 14:39:32.717656 21769 solver.cpp:228] Iteration 24000, loss = 0.134035\nI0819 14:39:32.717700 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:39:32.717726 21769 solver.cpp:244]     Train net output #1: loss = 0.134035 (* 1 = 0.134035 loss)\nI0819 14:39:32.796669 21769 sgd_solver.cpp:166] Iteration 24000, lr = 0.6\nI0819 14:43:10.479619 21769 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0819 14:45:21.739331 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8627\nI0819 14:45:21.739692 21769 solver.cpp:404]     Test net output #1: loss = 0.514828 (* 1 = 0.514828 loss)\nI0819 14:45:23.868505 21769 solver.cpp:228] Iteration 24100, loss = 0.0513517\nI0819 14:45:23.868547 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 14:45:23.868572 21769 solver.cpp:244]     Train net output #1: loss = 0.0513514 (* 1 = 0.0513514 loss)\nI0819 14:45:23.954450 21769 sgd_solver.cpp:166] Iteration 24100, lr = 0.6025\nI0819 14:49:01.534310 21769 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0819 14:51:12.774492 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8335\nI0819 14:51:12.774857 21769 solver.cpp:404]     Test net output #1: loss = 0.693547 (* 1 = 0.693547 loss)\nI0819 14:51:14.903349 21769 solver.cpp:228] Iteration 24200, loss = 0.0872395\nI0819 14:51:14.903391 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 14:51:14.903415 21769 solver.cpp:244]     Train net output #1: loss = 0.0872392 (* 1 = 0.0872392 loss)\nI0819 14:51:14.983184 21769 sgd_solver.cpp:166] Iteration 24200, lr = 0.605\nI0819 14:54:52.621361 21769 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0819 14:57:03.858616 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8456\nI0819 14:57:03.858927 21769 solver.cpp:404]     Test net output #1: loss = 0.600655 (* 1 = 0.600655 loss)\nI0819 14:57:05.987280 21769 solver.cpp:228] Iteration 24300, loss = 0.0748468\nI0819 14:57:05.987323 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 14:57:05.987345 21769 solver.cpp:244]     Train net output #1: loss = 0.0748466 (* 1 = 0.0748466 loss)\nI0819 14:57:06.070354 21769 sgd_solver.cpp:166] Iteration 24300, lr = 0.6075\nI0819 15:00:43.756525 21769 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0819 15:02:55.015272 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8603\nI0819 15:02:55.015594 21769 solver.cpp:404]     Test net output #1: loss = 0.556457 (* 1 = 0.556457 loss)\nI0819 15:02:57.145035 21769 solver.cpp:228] Iteration 24400, loss = 0.0183542\nI0819 15:02:57.145074 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 15:02:57.145097 21769 solver.cpp:244]     Train net output #1: loss = 0.0183539 (* 1 = 0.0183539 loss)\nI0819 15:02:57.224771 21769 sgd_solver.cpp:166] Iteration 24400, lr = 0.61\nI0819 15:06:34.857053 21769 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0819 15:08:46.106945 21769 solver.cpp:404]     Test net output #0: accuracy = 0.844\nI0819 15:08:46.107302 21769 solver.cpp:404]     Test net output #1: loss = 0.63119 (* 1 = 0.63119 loss)\nI0819 15:08:48.236143 21769 solver.cpp:228] Iteration 24500, loss = 0.0576422\nI0819 15:08:48.236182 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 15:08:48.236207 21769 solver.cpp:244]     Train net output #1: loss = 0.0576419 (* 1 = 0.0576419 loss)\nI0819 15:08:48.329861 21769 sgd_solver.cpp:166] Iteration 24500, lr = 0.6125\nI0819 15:12:26.013293 21769 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0819 15:14:37.265923 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8154\nI0819 15:14:37.266290 21769 solver.cpp:404]     Test net output #1: loss = 0.901736 (* 1 = 0.901736 loss)\nI0819 15:14:39.395094 21769 solver.cpp:228] Iteration 24600, loss = 0.0381905\nI0819 15:14:39.395135 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 15:14:39.395159 21769 solver.cpp:244]     Train net output #1: loss = 0.0381902 (* 1 = 0.0381902 loss)\nI0819 15:14:39.476419 21769 sgd_solver.cpp:166] Iteration 24600, lr = 0.615\nI0819 15:18:17.402107 21769 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0819 15:20:28.646363 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8512\nI0819 15:20:28.646718 21769 solver.cpp:404]     Test net output #1: loss = 0.630985 (* 1 = 0.630985 loss)\nI0819 15:20:30.774822 21769 solver.cpp:228] Iteration 24700, loss = 0.0510689\nI0819 15:20:30.774863 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 15:20:30.774886 21769 solver.cpp:244]     Train net output #1: loss = 0.0510686 (* 1 = 0.0510686 loss)\nI0819 15:20:30.855387 21769 sgd_solver.cpp:166] Iteration 24700, lr = 0.6175\nI0819 15:24:08.425457 21769 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0819 15:26:19.672943 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8234\nI0819 15:26:19.673298 21769 solver.cpp:404]     Test net output #1: loss = 0.657231 (* 1 = 0.657231 loss)\nI0819 15:26:21.802476 21769 solver.cpp:228] Iteration 24800, loss = 0.0360492\nI0819 15:26:21.802512 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 15:26:21.802527 21769 solver.cpp:244]     Train net output #1: loss = 0.0360489 (* 1 = 0.0360489 loss)\nI0819 15:26:21.883237 21769 sgd_solver.cpp:166] Iteration 24800, lr = 0.62\nI0819 15:29:59.511189 21769 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0819 15:32:10.753868 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7973\nI0819 15:32:10.754207 21769 solver.cpp:404]     Test net output #1: loss = 1.00904 (* 1 = 1.00904 loss)\nI0819 15:32:12.881793 21769 solver.cpp:228] Iteration 24900, loss = 0.204026\nI0819 15:32:12.881829 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 15:32:12.881844 21769 solver.cpp:244]     Train net output #1: loss = 0.204026 (* 1 = 0.204026 loss)\nI0819 15:32:12.970351 21769 sgd_solver.cpp:166] Iteration 24900, lr = 0.6225\nI0819 15:35:50.759683 21769 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0819 15:38:01.993535 21769 solver.cpp:404]     Test net output #0: accuracy = 0.791\nI0819 15:38:01.993850 21769 solver.cpp:404]     Test net output #1: loss = 1.00416 (* 1 = 1.00416 loss)\nI0819 15:38:04.122197 21769 solver.cpp:228] Iteration 25000, loss = 0.114742\nI0819 15:38:04.122236 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 15:38:04.122251 21769 solver.cpp:244]     Train net output #1: loss = 0.114741 (* 1 = 0.114741 loss)\nI0819 15:38:04.197883 21769 sgd_solver.cpp:166] Iteration 25000, lr = 0.625\nI0819 15:41:41.811136 21769 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0819 15:43:53.056195 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8314\nI0819 15:43:53.056553 21769 solver.cpp:404]     Test net output #1: loss = 0.666911 (* 1 = 0.666911 loss)\nI0819 15:43:55.185938 21769 solver.cpp:228] Iteration 25100, loss = 0.0559721\nI0819 15:43:55.185976 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 15:43:55.185992 21769 solver.cpp:244]     Train net output #1: loss = 0.0559719 (* 1 = 0.0559719 loss)\nI0819 15:43:55.270514 21769 sgd_solver.cpp:166] Iteration 25100, lr = 0.6275\nI0819 15:47:32.924430 21769 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0819 15:49:44.179728 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8452\nI0819 15:49:44.180095 21769 solver.cpp:404]     Test net output #1: loss = 0.535947 (* 1 = 0.535947 loss)\nI0819 15:49:46.308456 21769 solver.cpp:228] Iteration 25200, loss = 0.0575\nI0819 15:49:46.308495 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 15:49:46.308511 21769 solver.cpp:244]     Train net output #1: loss = 0.0574998 (* 1 = 0.0574998 loss)\nI0819 15:49:46.388242 21769 sgd_solver.cpp:166] Iteration 25200, lr = 0.63\nI0819 15:53:24.110325 21769 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0819 15:55:35.366842 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8033\nI0819 15:55:35.367214 21769 solver.cpp:404]     Test net output #1: loss = 0.938455 (* 1 = 0.938455 loss)\nI0819 15:55:37.496111 21769 solver.cpp:228] Iteration 25300, loss = 0.0324339\nI0819 15:55:37.496147 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 15:55:37.496162 21769 solver.cpp:244]     Train net output #1: loss = 0.0324337 (* 1 = 0.0324337 loss)\nI0819 15:55:37.582213 21769 sgd_solver.cpp:166] Iteration 25300, lr = 0.6325\nI0819 15:59:15.206668 21769 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0819 16:01:26.449147 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI0819 16:01:26.449448 21769 solver.cpp:404]     Test net output #1: loss = 0.644024 (* 1 = 0.644024 loss)\nI0819 16:01:28.578012 21769 solver.cpp:228] Iteration 25400, loss = 0.157809\nI0819 16:01:28.578052 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 16:01:28.578068 21769 solver.cpp:244]     Train net output #1: loss = 0.157809 (* 1 = 0.157809 loss)\nI0819 16:01:28.663547 21769 sgd_solver.cpp:166] Iteration 25400, lr = 0.635\nI0819 16:05:06.846094 21769 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0819 16:07:18.100878 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8505\nI0819 16:07:18.101222 21769 solver.cpp:404]     Test net output #1: loss = 0.586858 (* 1 = 0.586858 loss)\nI0819 16:07:20.230257 21769 solver.cpp:228] Iteration 25500, loss = 0.0987027\nI0819 16:07:20.230298 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 16:07:20.230320 21769 solver.cpp:244]     Train net output #1: loss = 0.0987024 (* 1 = 0.0987024 loss)\nI0819 16:07:20.324072 21769 sgd_solver.cpp:166] Iteration 25500, lr = 0.6375\nI0819 16:10:58.882876 21769 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0819 16:13:10.157641 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7512\nI0819 16:13:10.157997 21769 solver.cpp:404]     Test net output #1: loss = 1.17259 (* 1 = 1.17259 loss)\nI0819 16:13:12.287420 21769 solver.cpp:228] Iteration 25600, loss = 0.123276\nI0819 16:13:12.287461 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 16:13:12.287485 21769 solver.cpp:244]     Train net output #1: loss = 0.123276 (* 1 = 0.123276 loss)\nI0819 16:13:12.377468 21769 sgd_solver.cpp:166] Iteration 25600, lr = 0.64\nI0819 16:16:50.727591 21769 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0819 16:19:01.930363 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7945\nI0819 16:19:01.930750 21769 solver.cpp:404]     Test net output #1: loss = 0.880409 (* 1 = 0.880409 loss)\nI0819 16:19:04.060467 21769 solver.cpp:228] Iteration 25700, loss = 0.070467\nI0819 16:19:04.060506 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 16:19:04.060530 21769 solver.cpp:244]     Train net output #1: loss = 0.0704667 (* 1 = 0.0704667 loss)\nI0819 16:19:04.149562 21769 sgd_solver.cpp:166] Iteration 25700, lr = 0.6425\nI0819 16:22:42.404106 21769 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0819 16:24:53.630501 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8531\nI0819 16:24:53.630856 21769 solver.cpp:404]     Test net output #1: loss = 0.537723 (* 1 = 0.537723 loss)\nI0819 16:24:55.759675 21769 solver.cpp:228] Iteration 25800, loss = 0.0712822\nI0819 16:24:55.759719 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 16:24:55.759735 21769 solver.cpp:244]     Train net output #1: loss = 0.0712819 (* 1 = 0.0712819 loss)\nI0819 16:24:55.847126 21769 sgd_solver.cpp:166] Iteration 25800, lr = 0.645\nI0819 16:28:34.083817 21769 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0819 16:30:45.288306 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8233\nI0819 16:30:45.288604 21769 solver.cpp:404]     Test net output #1: loss = 0.748381 (* 1 = 0.748381 loss)\nI0819 16:30:47.417814 21769 solver.cpp:228] Iteration 25900, loss = 0.220647\nI0819 16:30:47.417851 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0819 16:30:47.417867 21769 solver.cpp:244]     Train net output #1: loss = 0.220647 (* 1 = 0.220647 loss)\nI0819 16:30:47.510363 21769 sgd_solver.cpp:166] Iteration 25900, lr = 0.6475\nI0819 16:34:26.098719 21769 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0819 16:36:37.279217 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7783\nI0819 16:36:37.279566 21769 solver.cpp:404]     Test net output #1: loss = 1.05306 (* 1 = 1.05306 loss)\nI0819 16:36:39.408123 21769 solver.cpp:228] Iteration 26000, loss = 0.096976\nI0819 16:36:39.408164 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 16:36:39.408186 21769 solver.cpp:244]     Train net output #1: loss = 0.0969757 (* 1 = 0.0969757 loss)\nI0819 16:36:39.496220 21769 sgd_solver.cpp:166] Iteration 26000, lr = 0.65\nI0819 16:40:17.857620 21769 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0819 16:42:29.028328 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8355\nI0819 16:42:29.028635 21769 solver.cpp:404]     Test net output #1: loss = 0.701776 (* 1 = 0.701776 loss)\nI0819 16:42:31.159392 21769 solver.cpp:228] Iteration 26100, loss = 0.100926\nI0819 16:42:31.159431 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 16:42:31.159447 21769 solver.cpp:244]     Train net output #1: loss = 0.100925 (* 1 = 0.100925 loss)\nI0819 16:42:31.245443 21769 sgd_solver.cpp:166] Iteration 26100, lr = 0.6525\nI0819 16:46:09.697726 21769 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0819 16:48:20.900768 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8053\nI0819 16:48:20.901085 21769 solver.cpp:404]     Test net output #1: loss = 0.76288 (* 1 = 0.76288 loss)\nI0819 16:48:23.033818 21769 solver.cpp:228] Iteration 26200, loss = 0.115795\nI0819 16:48:23.033870 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 16:48:23.033895 21769 solver.cpp:244]     Train net output #1: loss = 0.115795 (* 1 = 0.115795 loss)\nI0819 16:48:23.114260 21769 sgd_solver.cpp:166] Iteration 26200, lr = 0.655\nI0819 16:52:01.342633 21769 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0819 16:54:12.597949 21769 solver.cpp:404]     Test net output #0: accuracy = 0.843\nI0819 16:54:12.598309 21769 solver.cpp:404]     Test net output #1: loss = 0.661181 (* 1 = 0.661181 loss)\nI0819 16:54:14.722973 21769 solver.cpp:228] Iteration 26300, loss = 0.0499465\nI0819 16:54:14.723023 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 16:54:14.723049 21769 solver.cpp:244]     Train net output #1: loss = 0.0499462 (* 1 = 0.0499462 loss)\nI0819 16:54:14.812000 21769 sgd_solver.cpp:166] Iteration 26300, lr = 0.6575\nI0819 16:57:53.005729 21769 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0819 17:00:04.231858 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7972\nI0819 17:00:04.232151 21769 solver.cpp:404]     Test net output #1: loss = 0.867461 (* 1 = 0.867461 loss)\nI0819 17:00:06.354495 21769 solver.cpp:228] Iteration 26400, loss = 0.149219\nI0819 17:00:06.354542 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 17:00:06.354558 21769 solver.cpp:244]     Train net output #1: loss = 0.149219 (* 1 = 0.149219 loss)\nI0819 17:00:06.445281 21769 sgd_solver.cpp:166] Iteration 26400, lr = 0.66\nI0819 17:03:44.737102 21769 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0819 17:05:55.975216 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7804\nI0819 17:05:55.975581 21769 solver.cpp:404]     Test net output #1: loss = 1.08501 (* 1 = 1.08501 loss)\nI0819 17:05:58.098095 21769 solver.cpp:228] Iteration 26500, loss = 0.0985427\nI0819 17:05:58.098140 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 17:05:58.098156 21769 solver.cpp:244]     Train net output #1: loss = 0.0985424 (* 1 = 0.0985424 loss)\nI0819 17:05:58.195106 21769 sgd_solver.cpp:166] Iteration 26500, lr = 0.6625\nI0819 17:09:36.504755 21769 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0819 17:11:47.763949 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8315\nI0819 17:11:47.764307 21769 solver.cpp:404]     Test net output #1: loss = 0.689291 (* 1 = 0.689291 loss)\nI0819 17:11:49.887632 21769 solver.cpp:228] Iteration 26600, loss = 0.0928528\nI0819 17:11:49.887681 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 17:11:49.887697 21769 solver.cpp:244]     Train net output #1: loss = 0.0928525 (* 1 = 0.0928525 loss)\nI0819 17:11:49.981632 21769 sgd_solver.cpp:166] Iteration 26600, lr = 0.665\nI0819 17:15:28.356861 21769 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0819 17:17:39.593807 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8129\nI0819 17:17:39.594123 21769 solver.cpp:404]     Test net output #1: loss = 0.806601 (* 1 = 0.806601 loss)\nI0819 17:17:41.717159 21769 solver.cpp:228] Iteration 26700, loss = 0.0928501\nI0819 17:17:41.717206 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 17:17:41.717221 21769 solver.cpp:244]     Train net output #1: loss = 0.0928498 (* 1 = 0.0928498 loss)\nI0819 17:17:41.814301 21769 sgd_solver.cpp:166] Iteration 26700, lr = 0.6675\nI0819 17:21:20.145634 21769 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0819 17:23:31.396981 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8102\nI0819 17:23:31.397327 21769 solver.cpp:404]     Test net output #1: loss = 0.799041 (* 1 = 0.799041 loss)\nI0819 17:23:33.519522 21769 solver.cpp:228] Iteration 26800, loss = 0.138508\nI0819 17:23:33.519568 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:23:33.519585 21769 solver.cpp:244]     Train net output #1: loss = 0.138508 (* 1 = 0.138508 loss)\nI0819 17:23:33.611510 21769 sgd_solver.cpp:166] Iteration 26800, lr = 0.67\nI0819 17:27:11.891155 21769 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0819 17:29:23.129730 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8291\nI0819 17:29:23.130107 21769 solver.cpp:404]     Test net output #1: loss = 0.684895 (* 1 = 0.684895 loss)\nI0819 17:29:25.252514 21769 solver.cpp:228] Iteration 26900, loss = 0.0916023\nI0819 17:29:25.252562 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:29:25.252578 21769 solver.cpp:244]     Train net output #1: loss = 0.0916019 (* 1 = 0.0916019 loss)\nI0819 17:29:25.343722 21769 sgd_solver.cpp:166] Iteration 26900, lr = 0.6725\nI0819 17:33:03.721149 21769 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0819 17:35:14.966593 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7449\nI0819 17:35:14.966960 21769 solver.cpp:404]     Test net output #1: loss = 1.34894 (* 1 = 1.34894 loss)\nI0819 17:35:17.089949 21769 solver.cpp:228] Iteration 27000, loss = 0.107975\nI0819 17:35:17.089995 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 17:35:17.090011 21769 solver.cpp:244]     Train net output #1: loss = 0.107975 (* 1 = 0.107975 loss)\nI0819 17:35:17.183456 21769 sgd_solver.cpp:166] Iteration 27000, lr = 0.675\nI0819 17:38:55.358584 21769 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0819 17:41:06.589705 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8373\nI0819 17:41:06.590075 21769 solver.cpp:404]     Test net output #1: loss = 0.622202 (* 1 = 0.622202 loss)\nI0819 17:41:08.713305 21769 solver.cpp:228] Iteration 27100, loss = 0.06971\nI0819 17:41:08.713351 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 17:41:08.713368 21769 solver.cpp:244]     Train net output #1: loss = 0.0697096 (* 1 = 0.0697096 loss)\nI0819 17:41:08.808624 21769 sgd_solver.cpp:166] Iteration 27100, lr = 0.6775\nI0819 17:44:47.101025 21769 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0819 17:46:58.341928 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8444\nI0819 17:46:58.342296 21769 solver.cpp:404]     Test net output #1: loss = 0.60937 (* 1 = 0.60937 loss)\nI0819 17:47:00.465493 21769 solver.cpp:228] Iteration 27200, loss = 0.106715\nI0819 17:47:00.465539 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:47:00.465554 21769 solver.cpp:244]     Train net output #1: loss = 0.106715 (* 1 = 0.106715 loss)\nI0819 17:47:00.561012 21769 sgd_solver.cpp:166] Iteration 27200, lr = 0.68\nI0819 17:50:38.803450 21769 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0819 17:52:50.043238 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7896\nI0819 17:52:50.043598 21769 solver.cpp:404]     Test net output #1: loss = 0.935111 (* 1 = 0.935111 loss)\nI0819 17:52:52.166445 21769 solver.cpp:228] Iteration 27300, loss = 0.0347875\nI0819 17:52:52.166493 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 17:52:52.166509 21769 solver.cpp:244]     Train net output #1: loss = 0.0347871 (* 1 = 0.0347871 loss)\nI0819 17:52:52.258152 21769 sgd_solver.cpp:166] Iteration 27300, lr = 0.6825\nI0819 17:56:30.465795 21769 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0819 17:58:41.693032 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8408\nI0819 17:58:41.693403 21769 solver.cpp:404]     Test net output #1: loss = 0.652275 (* 1 = 0.652275 loss)\nI0819 17:58:43.815564 21769 solver.cpp:228] Iteration 27400, loss = 0.0351707\nI0819 17:58:43.815613 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 17:58:43.815629 21769 solver.cpp:244]     Train net output #1: loss = 0.0351704 (* 1 = 0.0351704 loss)\nI0819 17:58:43.906108 21769 sgd_solver.cpp:166] Iteration 27400, lr = 0.685\nI0819 18:02:22.059187 21769 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0819 18:04:33.275192 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8423\nI0819 18:04:33.275562 21769 solver.cpp:404]     Test net output #1: loss = 0.695978 (* 1 = 0.695978 loss)\nI0819 18:04:35.397254 21769 solver.cpp:228] Iteration 27500, loss = 0.0971883\nI0819 18:04:35.397301 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:04:35.397317 21769 solver.cpp:244]     Train net output #1: loss = 0.0971879 (* 1 = 0.0971879 loss)\nI0819 18:04:35.487450 21769 sgd_solver.cpp:166] Iteration 27500, lr = 0.6875\nI0819 18:08:13.561468 21769 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0819 18:10:24.789964 21769 solver.cpp:404]     Test net output #0: accuracy = 0.796\nI0819 18:10:24.790323 21769 solver.cpp:404]     Test net output #1: loss = 0.776633 (* 1 = 0.776633 loss)\nI0819 18:10:26.913229 21769 solver.cpp:228] Iteration 27600, loss = 0.177718\nI0819 18:10:26.913275 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 18:10:26.913292 21769 solver.cpp:244]     Train net output #1: loss = 0.177718 (* 1 = 0.177718 loss)\nI0819 18:10:27.005825 21769 sgd_solver.cpp:166] Iteration 27600, lr = 0.69\nI0819 18:14:05.284446 21769 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0819 18:16:16.524230 21769 solver.cpp:404]     Test net output #0: accuracy = 0.805\nI0819 18:16:16.524619 21769 solver.cpp:404]     Test net output #1: loss = 0.823412 (* 1 = 0.823412 loss)\nI0819 18:16:18.647717 21769 solver.cpp:228] Iteration 27700, loss = 0.0878971\nI0819 18:16:18.647764 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:16:18.647785 21769 solver.cpp:244]     Train net output #1: loss = 0.0878966 (* 1 = 0.0878966 loss)\nI0819 18:16:18.742236 21769 sgd_solver.cpp:166] Iteration 27700, lr = 0.6925\nI0819 18:19:56.952188 21769 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0819 18:22:08.196377 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7893\nI0819 18:22:08.196735 21769 solver.cpp:404]     Test net output #1: loss = 0.934465 (* 1 = 0.934465 loss)\nI0819 18:22:10.319213 21769 solver.cpp:228] Iteration 27800, loss = 0.0526875\nI0819 18:22:10.319260 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 18:22:10.319277 21769 solver.cpp:244]     Train net output #1: loss = 0.052687 (* 1 = 0.052687 loss)\nI0819 18:22:10.414384 21769 sgd_solver.cpp:166] Iteration 27800, lr = 0.695\nI0819 18:25:48.695252 21769 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0819 18:27:59.947976 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8423\nI0819 18:27:59.948357 21769 solver.cpp:404]     Test net output #1: loss = 0.613439 (* 1 = 0.613439 loss)\nI0819 18:28:02.071038 21769 solver.cpp:228] Iteration 27900, loss = 0.0695388\nI0819 18:28:02.071085 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 18:28:02.071102 21769 solver.cpp:244]     Train net output #1: loss = 0.0695384 (* 1 = 0.0695384 loss)\nI0819 18:28:02.162751 21769 sgd_solver.cpp:166] Iteration 27900, lr = 0.6975\nI0819 18:31:40.330622 21769 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0819 18:33:51.549347 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8185\nI0819 18:33:51.549713 21769 solver.cpp:404]     Test net output #1: loss = 0.710931 (* 1 = 0.710931 loss)\nI0819 18:33:53.673135 21769 solver.cpp:228] Iteration 28000, loss = 0.0885633\nI0819 18:33:53.673182 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 18:33:53.673199 21769 solver.cpp:244]     Train net output #1: loss = 0.0885628 (* 1 = 0.0885628 loss)\nI0819 18:33:53.770313 21769 sgd_solver.cpp:166] Iteration 28000, lr = 0.7\nI0819 18:37:32.081975 21769 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0819 18:39:43.438639 21769 solver.cpp:404]     Test net output #0: accuracy = 0.816\nI0819 18:39:43.439026 21769 solver.cpp:404]     Test net output #1: loss = 0.787674 (* 1 = 0.787674 loss)\nI0819 18:39:45.561939 21769 solver.cpp:228] Iteration 28100, loss = 0.122809\nI0819 18:39:45.561988 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:39:45.562013 21769 solver.cpp:244]     Train net output #1: loss = 0.122808 (* 1 = 0.122808 loss)\nI0819 18:39:45.661454 21769 sgd_solver.cpp:166] Iteration 28100, lr = 0.7025\nI0819 18:43:24.253306 21769 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0819 18:45:35.471127 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8291\nI0819 18:45:35.471508 21769 solver.cpp:404]     Test net output #1: loss = 0.644124 (* 1 = 0.644124 loss)\nI0819 18:45:37.597342 21769 solver.cpp:228] Iteration 28200, loss = 0.05657\nI0819 18:45:37.597389 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 18:45:37.597406 21769 solver.cpp:244]     Train net output #1: loss = 0.0565695 (* 1 = 0.0565695 loss)\nI0819 18:45:37.684840 21769 sgd_solver.cpp:166] Iteration 28200, lr = 0.705\nI0819 18:49:15.864707 21769 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0819 18:51:27.222771 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8418\nI0819 18:51:27.223165 21769 solver.cpp:404]     Test net output #1: loss = 0.642326 (* 1 = 0.642326 loss)\nI0819 18:51:29.348832 21769 solver.cpp:228] Iteration 28300, loss = 0.0301519\nI0819 18:51:29.348881 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 18:51:29.348906 21769 solver.cpp:244]     Train net output #1: loss = 0.0301514 (* 1 = 0.0301514 loss)\nI0819 18:51:29.440690 21769 sgd_solver.cpp:166] Iteration 28300, lr = 0.7075\nI0819 18:55:07.923568 21769 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0819 18:57:19.135215 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7599\nI0819 18:57:19.135586 21769 solver.cpp:404]     Test net output #1: loss = 1.0538 (* 1 = 1.0538 loss)\nI0819 18:57:21.258632 21769 solver.cpp:228] Iteration 28400, loss = 0.167082\nI0819 18:57:21.258677 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0819 18:57:21.258692 21769 solver.cpp:244]     Train net output #1: loss = 0.167082 (* 1 = 0.167082 loss)\nI0819 18:57:21.356577 21769 sgd_solver.cpp:166] Iteration 28400, lr = 0.71\nI0819 19:00:59.805665 21769 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0819 19:03:11.017962 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8224\nI0819 19:03:11.018342 21769 solver.cpp:404]     Test net output #1: loss = 0.723009 (* 1 = 0.723009 loss)\nI0819 19:03:13.140877 21769 solver.cpp:228] Iteration 28500, loss = 0.0974055\nI0819 19:03:13.140923 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:03:13.140939 21769 solver.cpp:244]     Train net output #1: loss = 0.0974051 (* 1 = 0.0974051 loss)\nI0819 19:03:13.247859 21769 sgd_solver.cpp:166] Iteration 28500, lr = 0.7125\nI0819 19:06:51.599350 21769 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0819 19:09:02.853561 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8342\nI0819 19:09:02.853924 21769 solver.cpp:404]     Test net output #1: loss = 0.667628 (* 1 = 0.667628 loss)\nI0819 19:09:04.976637 21769 solver.cpp:228] Iteration 28600, loss = 0.0913353\nI0819 19:09:04.976685 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 19:09:04.976701 21769 solver.cpp:244]     Train net output #1: loss = 0.0913348 (* 1 = 0.0913348 loss)\nI0819 19:09:05.066736 21769 sgd_solver.cpp:166] Iteration 28600, lr = 0.715\nI0819 19:12:43.348812 21769 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0819 19:14:54.627480 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7994\nI0819 19:14:54.627835 21769 solver.cpp:404]     Test net output #1: loss = 0.877994 (* 1 = 0.877994 loss)\nI0819 19:14:56.751606 21769 solver.cpp:228] Iteration 28700, loss = 0.166912\nI0819 19:14:56.751652 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 19:14:56.751667 21769 solver.cpp:244]     Train net output #1: loss = 0.166912 (* 1 = 0.166912 loss)\nI0819 19:14:56.845618 21769 sgd_solver.cpp:166] Iteration 28700, lr = 0.7175\nI0819 19:18:35.209609 21769 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0819 19:20:46.487215 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8118\nI0819 19:20:46.487589 21769 solver.cpp:404]     Test net output #1: loss = 0.833419 (* 1 = 0.833419 loss)\nI0819 19:20:48.609673 21769 solver.cpp:228] Iteration 28800, loss = 0.0406114\nI0819 19:20:48.609719 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 19:20:48.609735 21769 solver.cpp:244]     Train net output #1: loss = 0.0406109 (* 1 = 0.0406109 loss)\nI0819 19:20:48.707238 21769 sgd_solver.cpp:166] Iteration 28800, lr = 0.72\nI0819 19:24:26.845731 21769 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0819 19:26:38.117835 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8003\nI0819 19:26:38.118209 21769 solver.cpp:404]     Test net output #1: loss = 0.900898 (* 1 = 0.900898 loss)\nI0819 19:26:40.240453 21769 solver.cpp:228] Iteration 28900, loss = 0.0586504\nI0819 19:26:40.240499 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 19:26:40.240515 21769 solver.cpp:244]     Train net output #1: loss = 0.0586499 (* 1 = 0.0586499 loss)\nI0819 19:26:40.332721 21769 sgd_solver.cpp:166] Iteration 28900, lr = 0.7225\nI0819 19:30:18.628087 21769 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0819 19:32:29.895735 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7741\nI0819 19:32:29.896092 21769 solver.cpp:404]     Test net output #1: loss = 0.999708 (* 1 = 0.999708 loss)\nI0819 19:32:32.018179 21769 solver.cpp:228] Iteration 29000, loss = 0.122138\nI0819 19:32:32.018226 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:32:32.018242 21769 solver.cpp:244]     Train net output #1: loss = 0.122137 (* 1 = 0.122137 loss)\nI0819 19:32:32.116219 21769 sgd_solver.cpp:166] Iteration 29000, lr = 0.725\nI0819 19:36:10.422888 21769 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0819 19:38:21.706317 21769 solver.cpp:404]     Test net output #0: accuracy = 0.78\nI0819 19:38:21.706686 21769 solver.cpp:404]     Test net output #1: loss = 0.913304 (* 1 = 0.913304 loss)\nI0819 19:38:23.829006 21769 solver.cpp:228] Iteration 29100, loss = 0.10409\nI0819 19:38:23.829051 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 19:38:23.829068 21769 solver.cpp:244]     Train net output #1: loss = 0.10409 (* 1 = 0.10409 loss)\nI0819 19:38:23.917850 21769 sgd_solver.cpp:166] Iteration 29100, lr = 0.7275\nI0819 19:42:02.116102 21769 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0819 19:44:13.396767 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8469\nI0819 19:44:13.397150 21769 solver.cpp:404]     Test net output #1: loss = 0.55583 (* 1 = 0.55583 loss)\nI0819 19:44:15.520050 21769 solver.cpp:228] Iteration 29200, loss = 0.110796\nI0819 19:44:15.520095 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:44:15.520112 21769 solver.cpp:244]     Train net output #1: loss = 0.110796 (* 1 = 0.110796 loss)\nI0819 19:44:15.610563 21769 sgd_solver.cpp:166] Iteration 29200, lr = 0.73\nI0819 19:47:53.879750 21769 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0819 19:50:05.159687 21769 solver.cpp:404]     Test net output #0: accuracy = 0.5948\nI0819 19:50:05.160035 21769 solver.cpp:404]     Test net output #1: loss = 2.53923 (* 1 = 2.53923 loss)\nI0819 19:50:07.282220 21769 solver.cpp:228] Iteration 29300, loss = 0.0610138\nI0819 19:50:07.282264 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 19:50:07.282280 21769 solver.cpp:244]     Train net output #1: loss = 0.0610133 (* 1 = 0.0610133 loss)\nI0819 19:50:07.372350 21769 sgd_solver.cpp:166] Iteration 29300, lr = 0.7325\nI0819 19:53:45.566937 21769 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0819 19:55:56.842591 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7705\nI0819 19:55:56.842965 21769 solver.cpp:404]     Test net output #1: loss = 1.01576 (* 1 = 1.01576 loss)\nI0819 19:55:58.966478 21769 solver.cpp:228] Iteration 29400, loss = 0.123249\nI0819 19:55:58.966523 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:55:58.966540 21769 solver.cpp:244]     Train net output #1: loss = 0.123249 (* 1 = 0.123249 loss)\nI0819 19:55:59.053680 21769 sgd_solver.cpp:166] Iteration 29400, lr = 0.735\nI0819 19:59:37.259785 21769 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0819 20:01:48.535634 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7776\nI0819 20:01:48.536032 21769 solver.cpp:404]     Test net output #1: loss = 1.07644 (* 1 = 1.07644 loss)\nI0819 20:01:50.658680 21769 solver.cpp:228] Iteration 29500, loss = 0.103501\nI0819 20:01:50.658726 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 20:01:50.658743 21769 solver.cpp:244]     Train net output #1: loss = 0.103501 (* 1 = 0.103501 loss)\nI0819 20:01:50.748494 21769 sgd_solver.cpp:166] Iteration 29500, lr = 0.7375\nI0819 20:05:29.074610 21769 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0819 20:07:40.342411 21769 solver.cpp:404]     Test net output #0: accuracy = 0.86\nI0819 20:07:40.342761 21769 solver.cpp:404]     Test net output #1: loss = 0.511548 (* 1 = 0.511548 loss)\nI0819 20:07:42.465798 21769 solver.cpp:228] Iteration 29600, loss = 0.089054\nI0819 20:07:42.465844 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 20:07:42.465862 21769 solver.cpp:244]     Train net output #1: loss = 0.0890535 (* 1 = 0.0890535 loss)\nI0819 20:07:42.560906 21769 sgd_solver.cpp:166] Iteration 29600, lr = 0.74\nI0819 20:11:21.010558 21769 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0819 20:13:32.264031 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8391\nI0819 20:13:32.264416 21769 solver.cpp:404]     Test net output #1: loss = 0.656667 (* 1 = 0.656667 loss)\nI0819 20:13:34.386590 21769 solver.cpp:228] Iteration 29700, loss = 0.0260111\nI0819 20:13:34.386638 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 20:13:34.386656 21769 solver.cpp:244]     Train net output #1: loss = 0.0260106 (* 1 = 0.0260106 loss)\nI0819 20:13:34.479192 21769 sgd_solver.cpp:166] Iteration 29700, lr = 0.7425\nI0819 20:17:12.864980 21769 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0819 20:19:24.128445 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7838\nI0819 20:19:24.128813 21769 solver.cpp:404]     Test net output #1: loss = 0.890464 (* 1 = 0.890464 loss)\nI0819 20:19:26.251550 21769 solver.cpp:228] Iteration 29800, loss = 0.0373937\nI0819 20:19:26.251595 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 20:19:26.251611 21769 solver.cpp:244]     Train net output #1: loss = 0.0373932 (* 1 = 0.0373932 loss)\nI0819 20:19:26.343698 21769 sgd_solver.cpp:166] Iteration 29800, lr = 0.745\nI0819 20:23:04.496390 21769 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0819 20:25:15.760151 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7461\nI0819 20:25:15.760530 21769 solver.cpp:404]     Test net output #1: loss = 1.05361 (* 1 = 1.05361 loss)\nI0819 20:25:17.883316 21769 solver.cpp:228] Iteration 29900, loss = 0.117025\nI0819 20:25:17.883361 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 20:25:17.883376 21769 solver.cpp:244]     Train net output #1: loss = 0.117024 (* 1 = 0.117024 loss)\nI0819 20:25:17.979461 21769 sgd_solver.cpp:166] Iteration 29900, lr = 0.7475\nI0819 20:28:56.271495 21769 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0819 20:31:07.543639 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8261\nI0819 20:31:07.543993 21769 solver.cpp:404]     Test net output #1: loss = 0.67804 (* 1 = 0.67804 loss)\nI0819 20:31:09.666123 21769 solver.cpp:228] Iteration 30000, loss = 0.12421\nI0819 20:31:09.666169 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 20:31:09.666185 21769 solver.cpp:244]     Train net output #1: loss = 0.124209 (* 1 = 0.124209 loss)\nI0819 20:31:09.755920 21769 sgd_solver.cpp:166] Iteration 30000, lr = 0.75\nI0819 20:34:47.981537 21769 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0819 20:36:59.236974 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8143\nI0819 20:36:59.237326 21769 solver.cpp:404]     Test net output #1: loss = 0.736595 (* 1 = 0.736595 loss)\nI0819 20:37:01.360218 21769 solver.cpp:228] Iteration 30100, loss = 0.107427\nI0819 20:37:01.360263 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:37:01.360280 21769 solver.cpp:244]     Train net output #1: loss = 0.107427 (* 1 = 0.107427 loss)\nI0819 20:37:01.450119 21769 sgd_solver.cpp:166] Iteration 30100, lr = 0.7525\nI0819 20:40:39.635447 21769 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0819 20:42:50.900423 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7896\nI0819 20:42:50.900774 21769 solver.cpp:404]     Test net output #1: loss = 0.927525 (* 1 = 0.927525 loss)\nI0819 20:42:53.023248 21769 solver.cpp:228] Iteration 30200, loss = 0.117051\nI0819 20:42:53.023295 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 20:42:53.023311 21769 solver.cpp:244]     Train net output #1: loss = 0.117051 (* 1 = 0.117051 loss)\nI0819 20:42:53.117079 21769 sgd_solver.cpp:166] Iteration 30200, lr = 0.755\nI0819 20:46:31.382375 21769 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0819 20:48:42.631717 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8428\nI0819 20:48:42.632091 21769 solver.cpp:404]     Test net output #1: loss = 0.600571 (* 1 = 0.600571 loss)\nI0819 20:48:44.754624 21769 solver.cpp:228] Iteration 30300, loss = 0.0719938\nI0819 20:48:44.754672 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 20:48:44.754688 21769 solver.cpp:244]     Train net output #1: loss = 0.0719934 (* 1 = 0.0719934 loss)\nI0819 20:48:44.846865 21769 sgd_solver.cpp:166] Iteration 30300, lr = 0.7575\nI0819 20:52:22.954128 21769 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0819 20:54:34.167069 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8425\nI0819 20:54:34.167438 21769 solver.cpp:404]     Test net output #1: loss = 0.622812 (* 1 = 0.622812 loss)\nI0819 20:54:36.290838 21769 solver.cpp:228] Iteration 30400, loss = 0.121603\nI0819 20:54:36.290884 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 20:54:36.290900 21769 solver.cpp:244]     Train net output #1: loss = 0.121602 (* 1 = 0.121602 loss)\nI0819 20:54:36.383873 21769 sgd_solver.cpp:166] Iteration 30400, lr = 0.76\nI0819 20:58:14.887555 21769 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0819 21:00:26.103386 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7242\nI0819 21:00:26.103760 21769 solver.cpp:404]     Test net output #1: loss = 1.25526 (* 1 = 1.25526 loss)\nI0819 21:00:28.225848 21769 solver.cpp:228] Iteration 30500, loss = 0.0975781\nI0819 21:00:28.225893 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 21:00:28.225910 21769 solver.cpp:244]     Train net output #1: loss = 0.0975776 (* 1 = 0.0975776 loss)\nI0819 21:00:28.332717 21769 sgd_solver.cpp:166] Iteration 30500, lr = 0.7625\nI0819 21:04:07.518867 21769 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0819 21:06:18.742131 21769 solver.cpp:404]     Test net output #0: accuracy = 0.793\nI0819 21:06:18.742508 21769 solver.cpp:404]     Test net output #1: loss = 0.935274 (* 1 = 0.935274 loss)\nI0819 21:06:20.864766 21769 solver.cpp:228] Iteration 30600, loss = 0.136295\nI0819 21:06:20.864816 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 21:06:20.864833 21769 solver.cpp:244]     Train net output #1: loss = 0.136294 (* 1 = 0.136294 loss)\nI0819 21:06:20.959167 21769 sgd_solver.cpp:166] Iteration 30600, lr = 0.765\nI0819 21:09:59.809386 21769 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0819 21:12:11.018204 21769 solver.cpp:404]     Test net output #0: accuracy = 0.76\nI0819 21:12:11.018582 21769 solver.cpp:404]     Test net output #1: loss = 1.03459 (* 1 = 1.03459 loss)\nI0819 21:12:13.141242 21769 solver.cpp:228] Iteration 30700, loss = 0.0486039\nI0819 21:12:13.141288 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 21:12:13.141304 21769 solver.cpp:244]     Train net output #1: loss = 0.0486034 (* 1 = 0.0486034 loss)\nI0819 21:12:13.243782 21769 sgd_solver.cpp:166] Iteration 30700, lr = 0.7675\nI0819 21:15:52.170424 21769 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0819 21:18:03.369464 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8016\nI0819 21:18:03.369844 21769 solver.cpp:404]     Test net output #1: loss = 0.831862 (* 1 = 0.831862 loss)\nI0819 21:18:05.493207 21769 solver.cpp:228] Iteration 30800, loss = 0.080469\nI0819 21:18:05.493254 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:18:05.493270 21769 solver.cpp:244]     Train net output #1: loss = 0.0804685 (* 1 = 0.0804685 loss)\nI0819 21:18:05.593827 21769 sgd_solver.cpp:166] Iteration 30800, lr = 0.77\nI0819 21:21:44.513094 21769 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0819 21:23:55.728678 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7534\nI0819 21:23:55.729064 21769 solver.cpp:404]     Test net output #1: loss = 1.10848 (* 1 = 1.10848 loss)\nI0819 21:23:57.852325 21769 solver.cpp:228] Iteration 30900, loss = 0.076624\nI0819 21:23:57.852370 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 21:23:57.852387 21769 solver.cpp:244]     Train net output #1: loss = 0.0766235 (* 1 = 0.0766235 loss)\nI0819 21:23:57.946065 21769 sgd_solver.cpp:166] Iteration 30900, lr = 0.7725\nI0819 21:27:36.841645 21769 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0819 21:29:48.095901 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7888\nI0819 21:29:48.096249 21769 solver.cpp:404]     Test net output #1: loss = 0.956406 (* 1 = 0.956406 loss)\nI0819 21:29:50.218780 21769 solver.cpp:228] Iteration 31000, loss = 0.145988\nI0819 21:29:50.218830 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 21:29:50.218847 21769 solver.cpp:244]     Train net output #1: loss = 0.145988 (* 1 = 0.145988 loss)\nI0819 21:29:50.317409 21769 sgd_solver.cpp:166] Iteration 31000, lr = 0.775\nI0819 21:33:29.181401 21769 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0819 21:35:40.433466 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8264\nI0819 21:35:40.433848 21769 solver.cpp:404]     Test net output #1: loss = 0.70609 (* 1 = 0.70609 loss)\nI0819 21:35:42.556329 21769 solver.cpp:228] Iteration 31100, loss = 0.0730504\nI0819 21:35:42.556375 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 21:35:42.556392 21769 solver.cpp:244]     Train net output #1: loss = 0.0730499 (* 1 = 0.0730499 loss)\nI0819 21:35:42.660341 21769 sgd_solver.cpp:166] Iteration 31100, lr = 0.7775\nI0819 21:39:21.690320 21769 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0819 21:41:32.927590 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8439\nI0819 21:41:32.927964 21769 solver.cpp:404]     Test net output #1: loss = 0.607629 (* 1 = 0.607629 loss)\nI0819 21:41:35.051023 21769 solver.cpp:228] Iteration 31200, loss = 0.0412931\nI0819 21:41:35.051071 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 21:41:35.051087 21769 solver.cpp:244]     Train net output #1: loss = 0.0412926 (* 1 = 0.0412926 loss)\nI0819 21:41:35.157785 21769 sgd_solver.cpp:166] Iteration 31200, lr = 0.78\nI0819 21:45:14.265333 21769 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0819 21:47:25.513754 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8062\nI0819 21:47:25.514129 21769 solver.cpp:404]     Test net output #1: loss = 0.819498 (* 1 = 0.819498 loss)\nI0819 21:47:27.636754 21769 solver.cpp:228] Iteration 31300, loss = 0.0922183\nI0819 21:47:27.636803 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 21:47:27.636821 21769 solver.cpp:244]     Train net output #1: loss = 0.0922178 (* 1 = 0.0922178 loss)\nI0819 21:47:27.735056 21769 sgd_solver.cpp:166] Iteration 31300, lr = 0.7825\nI0819 21:51:06.966488 21769 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0819 21:53:18.208108 21769 solver.cpp:404]     Test net output #0: accuracy = 0.847\nI0819 21:53:18.208488 21769 solver.cpp:404]     Test net output #1: loss = 0.590202 (* 1 = 0.590202 loss)\nI0819 21:53:20.336865 21769 solver.cpp:228] Iteration 31400, loss = 0.142207\nI0819 21:53:20.336912 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 21:53:20.336928 21769 solver.cpp:244]     Train net output #1: loss = 0.142206 (* 1 = 0.142206 loss)\nI0819 21:53:20.431051 21769 sgd_solver.cpp:166] Iteration 31400, lr = 0.785\nI0819 21:56:59.497388 21769 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0819 21:59:10.753510 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8673\nI0819 21:59:10.753855 21769 solver.cpp:404]     Test net output #1: loss = 0.495737 (* 1 = 0.495737 loss)\nI0819 21:59:12.882879 21769 solver.cpp:228] Iteration 31500, loss = 0.137976\nI0819 21:59:12.882925 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 21:59:12.882941 21769 solver.cpp:244]     Train net output #1: loss = 0.137975 (* 1 = 0.137975 loss)\nI0819 21:59:12.974261 21769 sgd_solver.cpp:166] Iteration 31500, lr = 0.7875\nI0819 22:02:51.802978 21769 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0819 22:05:03.066437 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8227\nI0819 22:05:03.066812 21769 solver.cpp:404]     Test net output #1: loss = 0.646839 (* 1 = 0.646839 loss)\nI0819 22:05:05.196378 21769 solver.cpp:228] Iteration 31600, loss = 0.0747557\nI0819 22:05:05.196427 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 22:05:05.196444 21769 solver.cpp:244]     Train net output #1: loss = 0.0747552 (* 1 = 0.0747552 loss)\nI0819 22:05:05.285280 21769 sgd_solver.cpp:166] Iteration 31600, lr = 0.79\nI0819 22:08:44.220131 21769 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0819 22:10:55.460464 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8198\nI0819 22:10:55.460835 21769 solver.cpp:404]     Test net output #1: loss = 0.738351 (* 1 = 0.738351 loss)\nI0819 22:10:57.591279 21769 solver.cpp:228] Iteration 31700, loss = 0.0521256\nI0819 22:10:57.591327 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 22:10:57.591344 21769 solver.cpp:244]     Train net output #1: loss = 0.0521251 (* 1 = 0.0521251 loss)\nI0819 22:10:57.687599 21769 sgd_solver.cpp:166] Iteration 31700, lr = 0.7925\nI0819 22:14:36.557633 21769 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0819 22:16:47.818382 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7921\nI0819 22:16:47.818763 21769 solver.cpp:404]     Test net output #1: loss = 0.809812 (* 1 = 0.809812 loss)\nI0819 22:16:49.948242 21769 solver.cpp:228] Iteration 31800, loss = 0.182411\nI0819 22:16:49.948289 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0819 22:16:49.948305 21769 solver.cpp:244]     Train net output #1: loss = 0.182411 (* 1 = 0.182411 loss)\nI0819 22:16:50.036265 21769 sgd_solver.cpp:166] Iteration 31800, lr = 0.795\nI0819 22:20:28.840373 21769 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0819 22:22:40.088510 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8425\nI0819 22:22:40.088876 21769 solver.cpp:404]     Test net output #1: loss = 0.613853 (* 1 = 0.613853 loss)\nI0819 22:22:42.218649 21769 solver.cpp:228] Iteration 31900, loss = 0.115803\nI0819 22:22:42.218704 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 22:22:42.218720 21769 solver.cpp:244]     Train net output #1: loss = 0.115802 (* 1 = 0.115802 loss)\nI0819 22:22:42.315449 21769 sgd_solver.cpp:166] Iteration 31900, lr = 0.7975\nI0819 22:26:21.178227 21769 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0819 22:28:32.440127 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8394\nI0819 22:28:32.440493 21769 solver.cpp:404]     Test net output #1: loss = 0.64586 (* 1 = 0.64586 loss)\nI0819 22:28:34.569404 21769 solver.cpp:228] Iteration 32000, loss = 0.102672\nI0819 22:28:34.569453 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 22:28:34.569470 21769 solver.cpp:244]     Train net output #1: loss = 0.102671 (* 1 = 0.102671 loss)\nI0819 22:28:34.657011 21769 sgd_solver.cpp:166] Iteration 32000, lr = 0.8\nI0819 22:32:13.555972 21769 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0819 22:34:26.132709 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8202\nI0819 22:34:26.133141 21769 solver.cpp:404]     Test net output #1: loss = 0.830029 (* 1 = 0.830029 loss)\nI0819 22:34:28.265606 21769 solver.cpp:228] Iteration 32100, loss = 0.109866\nI0819 22:34:28.265655 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 22:34:28.265672 21769 solver.cpp:244]     Train net output #1: loss = 0.109865 (* 1 = 0.109865 loss)\nI0819 22:34:28.351681 21769 sgd_solver.cpp:166] Iteration 32100, lr = 0.8025\nI0819 22:38:07.286103 21769 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0819 22:40:18.558753 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8556\nI0819 22:40:18.559130 21769 solver.cpp:404]     Test net output #1: loss = 0.545815 (* 1 = 0.545815 loss)\nI0819 22:40:20.688393 21769 solver.cpp:228] Iteration 32200, loss = 0.097923\nI0819 22:40:20.688442 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 22:40:20.688459 21769 solver.cpp:244]     Train net output #1: loss = 0.0979226 (* 1 = 0.0979226 loss)\nI0819 22:40:20.777544 21769 sgd_solver.cpp:166] Iteration 32200, lr = 0.805\nI0819 22:43:59.654350 21769 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0819 22:46:10.918833 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7985\nI0819 22:46:10.919244 21769 solver.cpp:404]     Test net output #1: loss = 0.835611 (* 1 = 0.835611 loss)\nI0819 22:46:13.048952 21769 solver.cpp:228] Iteration 32300, loss = 0.0217648\nI0819 22:46:13.049001 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 22:46:13.049018 21769 solver.cpp:244]     Train net output #1: loss = 0.0217643 (* 1 = 0.0217643 loss)\nI0819 22:46:13.139571 21769 sgd_solver.cpp:166] Iteration 32300, lr = 0.8075\nI0819 22:49:51.919337 21769 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0819 22:52:03.195570 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8551\nI0819 22:52:03.195888 21769 solver.cpp:404]     Test net output #1: loss = 0.50806 (* 1 = 0.50806 loss)\nI0819 22:52:05.324764 21769 solver.cpp:228] Iteration 32400, loss = 0.0580036\nI0819 22:52:05.324811 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0819 22:52:05.324828 21769 solver.cpp:244]     Train net output #1: loss = 0.0580032 (* 1 = 0.0580032 loss)\nI0819 22:52:05.412120 21769 sgd_solver.cpp:166] Iteration 32400, lr = 0.81\nI0819 22:55:44.294939 21769 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0819 22:57:55.547395 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8069\nI0819 22:57:55.547749 21769 solver.cpp:404]     Test net output #1: loss = 0.696452 (* 1 = 0.696452 loss)\nI0819 22:57:57.676129 21769 solver.cpp:228] Iteration 32500, loss = 0.101569\nI0819 22:57:57.676177 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 22:57:57.676193 21769 solver.cpp:244]     Train net output #1: loss = 0.101568 (* 1 = 0.101568 loss)\nI0819 22:57:57.770447 21769 sgd_solver.cpp:166] Iteration 32500, lr = 0.8125\nI0819 23:01:36.944512 21769 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0819 23:03:48.194917 21769 solver.cpp:404]     Test net output #0: accuracy = 0.772\nI0819 23:03:48.195296 21769 solver.cpp:404]     Test net output #1: loss = 1.01087 (* 1 = 1.01087 loss)\nI0819 23:03:50.323381 21769 solver.cpp:228] Iteration 32600, loss = 0.0697143\nI0819 23:03:50.323428 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 23:03:50.323446 21769 solver.cpp:244]     Train net output #1: loss = 0.0697138 (* 1 = 0.0697138 loss)\nI0819 23:03:50.419404 21769 sgd_solver.cpp:166] Iteration 32600, lr = 0.815\nI0819 23:07:29.224846 21769 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0819 23:09:40.421804 21769 solver.cpp:404]     Test net output #0: accuracy = 0.817\nI0819 23:09:40.422168 21769 solver.cpp:404]     Test net output #1: loss = 0.737123 (* 1 = 0.737123 loss)\nI0819 23:09:42.550880 21769 solver.cpp:228] Iteration 32700, loss = 0.0201296\nI0819 23:09:42.550926 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 23:09:42.550942 21769 solver.cpp:244]     Train net output #1: loss = 0.0201291 (* 1 = 0.0201291 loss)\nI0819 23:09:42.645679 21769 sgd_solver.cpp:166] Iteration 32700, lr = 0.8175\nI0819 23:13:21.464798 21769 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0819 23:15:32.682045 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8361\nI0819 23:15:32.682413 21769 solver.cpp:404]     Test net output #1: loss = 0.636679 (* 1 = 0.636679 loss)\nI0819 23:15:34.811311 21769 solver.cpp:228] Iteration 32800, loss = 0.105649\nI0819 23:15:34.811360 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:15:34.811385 21769 solver.cpp:244]     Train net output #1: loss = 0.105648 (* 1 = 0.105648 loss)\nI0819 23:15:34.903977 21769 sgd_solver.cpp:166] Iteration 32800, lr = 0.82\nI0819 23:19:13.716349 21769 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0819 23:21:24.939149 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8213\nI0819 23:21:24.939515 21769 solver.cpp:404]     Test net output #1: loss = 0.705724 (* 1 = 0.705724 loss)\nI0819 23:21:27.068336 21769 solver.cpp:228] Iteration 32900, loss = 0.179535\nI0819 23:21:27.068383 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0819 23:21:27.068408 21769 solver.cpp:244]     Train net output #1: loss = 0.179535 (* 1 = 0.179535 loss)\nI0819 23:21:27.159612 21769 sgd_solver.cpp:166] Iteration 32900, lr = 0.8225\nI0819 23:25:06.010231 21769 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0819 23:27:17.249231 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8082\nI0819 23:27:17.249627 21769 solver.cpp:404]     Test net output #1: loss = 0.786582 (* 1 = 0.786582 loss)\nI0819 23:27:19.379081 21769 solver.cpp:228] Iteration 33000, loss = 0.154015\nI0819 23:27:19.379132 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0819 23:27:19.379156 21769 solver.cpp:244]     Train net output #1: loss = 0.154015 (* 1 = 0.154015 loss)\nI0819 23:27:19.473445 21769 sgd_solver.cpp:166] Iteration 33000, lr = 0.825\nI0819 23:30:57.804030 21769 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0819 23:33:09.045434 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8258\nI0819 23:33:09.045812 21769 solver.cpp:404]     Test net output #1: loss = 0.679601 (* 1 = 0.679601 loss)\nI0819 23:33:11.169677 21769 solver.cpp:228] Iteration 33100, loss = 0.152713\nI0819 23:33:11.169733 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:33:11.169756 21769 solver.cpp:244]     Train net output #1: loss = 0.152713 (* 1 = 0.152713 loss)\nI0819 23:33:11.260761 21769 sgd_solver.cpp:166] Iteration 33100, lr = 0.8275\nI0819 23:36:49.760332 21769 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0819 23:39:01.009078 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8529\nI0819 23:39:01.009414 21769 solver.cpp:404]     Test net output #1: loss = 0.548432 (* 1 = 0.548432 loss)\nI0819 23:39:03.132149 21769 solver.cpp:228] Iteration 33200, loss = 0.0495816\nI0819 23:39:03.132200 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0819 23:39:03.132225 21769 solver.cpp:244]     Train net output #1: loss = 0.0495811 (* 1 = 0.0495811 loss)\nI0819 23:39:03.237205 21769 sgd_solver.cpp:166] Iteration 33200, lr = 0.83\nI0819 23:42:41.911111 21769 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0819 23:44:53.159296 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8039\nI0819 23:44:53.159639 21769 solver.cpp:404]     Test net output #1: loss = 0.827804 (* 1 = 0.827804 loss)\nI0819 23:44:55.283577 21769 solver.cpp:228] Iteration 33300, loss = 0.0937121\nI0819 23:44:55.283627 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0819 23:44:55.283644 21769 solver.cpp:244]     Train net output #1: loss = 0.0937115 (* 1 = 0.0937115 loss)\nI0819 23:44:55.381091 21769 sgd_solver.cpp:166] Iteration 33300, lr = 0.8325\nI0819 23:48:33.925714 21769 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0819 23:50:45.215709 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8279\nI0819 23:50:45.216049 21769 solver.cpp:404]     Test net output #1: loss = 0.623559 (* 1 = 0.623559 loss)\nI0819 23:50:47.340376 21769 solver.cpp:228] Iteration 33400, loss = 0.101434\nI0819 23:50:47.340422 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:50:47.340440 21769 solver.cpp:244]     Train net output #1: loss = 0.101433 (* 1 = 0.101433 loss)\nI0819 23:50:47.428388 21769 sgd_solver.cpp:166] Iteration 33400, lr = 0.835\nI0819 23:54:25.865378 21769 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0819 23:56:37.168222 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8179\nI0819 23:56:37.168570 21769 solver.cpp:404]     Test net output #1: loss = 0.720261 (* 1 = 0.720261 loss)\nI0819 23:56:39.291564 21769 solver.cpp:228] Iteration 33500, loss = 0.189966\nI0819 23:56:39.291612 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0819 23:56:39.291628 21769 solver.cpp:244]     Train net output #1: loss = 0.189965 (* 1 = 0.189965 loss)\nI0819 23:56:39.388020 21769 sgd_solver.cpp:166] Iteration 33500, lr = 0.8375\nI0820 00:00:17.776330 21769 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0820 00:02:29.063971 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8085\nI0820 00:02:29.064328 21769 solver.cpp:404]     Test net output #1: loss = 0.74036 (* 1 = 0.74036 loss)\nI0820 00:02:31.187235 21769 solver.cpp:228] Iteration 33600, loss = 0.120192\nI0820 00:02:31.187283 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 00:02:31.187300 21769 solver.cpp:244]     Train net output #1: loss = 0.120191 (* 1 = 0.120191 loss)\nI0820 00:02:31.289616 21769 sgd_solver.cpp:166] Iteration 33600, lr = 0.84\nI0820 00:06:09.692421 21769 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0820 00:08:20.970563 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8366\nI0820 00:08:20.970944 21769 solver.cpp:404]     Test net output #1: loss = 0.607008 (* 1 = 0.607008 loss)\nI0820 00:08:23.093384 21769 solver.cpp:228] Iteration 33700, loss = 0.0855802\nI0820 00:08:23.093430 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 00:08:23.093446 21769 solver.cpp:244]     Train net output #1: loss = 0.0855796 (* 1 = 0.0855796 loss)\nI0820 00:08:23.189584 21769 sgd_solver.cpp:166] Iteration 33700, lr = 0.8425\nI0820 00:12:01.699888 21769 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0820 00:14:12.981586 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7673\nI0820 00:14:12.981969 21769 solver.cpp:404]     Test net output #1: loss = 0.941955 (* 1 = 0.941955 loss)\nI0820 00:14:15.109048 21769 solver.cpp:228] Iteration 33800, loss = 0.0833842\nI0820 00:14:15.109099 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 00:14:15.109123 21769 solver.cpp:244]     Train net output #1: loss = 0.0833836 (* 1 = 0.0833836 loss)\nI0820 00:14:15.205047 21769 sgd_solver.cpp:166] Iteration 33800, lr = 0.845\nI0820 00:17:53.628520 21769 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0820 00:20:04.908850 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8191\nI0820 00:20:04.909222 21769 solver.cpp:404]     Test net output #1: loss = 0.674351 (* 1 = 0.674351 loss)\nI0820 00:20:07.032021 21769 solver.cpp:228] Iteration 33900, loss = 0.148545\nI0820 00:20:07.032069 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 00:20:07.032094 21769 solver.cpp:244]     Train net output #1: loss = 0.148545 (* 1 = 0.148545 loss)\nI0820 00:20:07.125985 21769 sgd_solver.cpp:166] Iteration 33900, lr = 0.8475\nI0820 00:23:45.556927 21769 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0820 00:25:56.837069 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8096\nI0820 00:25:56.837456 21769 solver.cpp:404]     Test net output #1: loss = 0.773009 (* 1 = 0.773009 loss)\nI0820 00:25:58.960832 21769 solver.cpp:228] Iteration 34000, loss = 0.137365\nI0820 00:25:58.960882 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 00:25:58.960907 21769 solver.cpp:244]     Train net output #1: loss = 0.137365 (* 1 = 0.137365 loss)\nI0820 00:25:59.051367 21769 sgd_solver.cpp:166] Iteration 34000, lr = 0.85\nI0820 00:29:37.504926 21769 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0820 00:31:48.797473 21769 solver.cpp:404]     Test net output #0: accuracy = 0.823\nI0820 00:31:48.797842 21769 solver.cpp:404]     Test net output #1: loss = 0.75784 (* 1 = 0.75784 loss)\nI0820 00:31:50.921094 21769 solver.cpp:228] Iteration 34100, loss = 0.16255\nI0820 00:31:50.921144 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 00:31:50.921169 21769 solver.cpp:244]     Train net output #1: loss = 0.162549 (* 1 = 0.162549 loss)\nI0820 00:31:51.015735 21769 sgd_solver.cpp:166] Iteration 34100, lr = 0.8525\nI0820 00:35:29.380615 21769 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0820 00:37:40.643025 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8364\nI0820 00:37:40.643398 21769 solver.cpp:404]     Test net output #1: loss = 0.622121 (* 1 = 0.622121 loss)\nI0820 00:37:42.767673 21769 solver.cpp:228] Iteration 34200, loss = 0.0501132\nI0820 00:37:42.767726 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 00:37:42.767743 21769 solver.cpp:244]     Train net output #1: loss = 0.0501127 (* 1 = 0.0501127 loss)\nI0820 00:37:42.861191 21769 sgd_solver.cpp:166] Iteration 34200, lr = 0.855\nI0820 00:41:21.116828 21769 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0820 00:43:33.207451 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8276\nI0820 00:43:33.207839 21769 solver.cpp:404]     Test net output #1: loss = 0.688491 (* 1 = 0.688491 loss)\nI0820 00:43:35.333747 21769 solver.cpp:228] Iteration 34300, loss = 0.209836\nI0820 00:43:35.333798 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 00:43:35.333817 21769 solver.cpp:244]     Train net output #1: loss = 0.209835 (* 1 = 0.209835 loss)\nI0820 00:43:35.426358 21769 sgd_solver.cpp:166] Iteration 34300, lr = 0.8575\nI0820 00:47:13.430610 21769 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0820 00:49:25.071382 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8377\nI0820 00:49:25.071745 21769 solver.cpp:404]     Test net output #1: loss = 0.580025 (* 1 = 0.580025 loss)\nI0820 00:49:27.198534 21769 solver.cpp:228] Iteration 34400, loss = 0.077407\nI0820 00:49:27.198585 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 00:49:27.198603 21769 solver.cpp:244]     Train net output #1: loss = 0.0774064 (* 1 = 0.0774064 loss)\nI0820 00:49:27.279367 21769 sgd_solver.cpp:166] Iteration 34400, lr = 0.86\nI0820 00:53:05.278710 21769 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0820 00:55:16.555269 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8348\nI0820 00:55:16.555663 21769 solver.cpp:404]     Test net output #1: loss = 0.621354 (* 1 = 0.621354 loss)\nI0820 00:55:18.680773 21769 solver.cpp:228] Iteration 34500, loss = 0.145103\nI0820 00:55:18.680822 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 00:55:18.680847 21769 solver.cpp:244]     Train net output #1: loss = 0.145102 (* 1 = 0.145102 loss)\nI0820 00:55:18.766175 21769 sgd_solver.cpp:166] Iteration 34500, lr = 0.8625\nI0820 00:58:56.708861 21769 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0820 01:01:07.965075 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7563\nI0820 01:01:07.965440 21769 solver.cpp:404]     Test net output #1: loss = 0.984326 (* 1 = 0.984326 loss)\nI0820 01:01:10.089102 21769 solver.cpp:228] Iteration 34600, loss = 0.263367\nI0820 01:01:10.089151 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 01:01:10.089176 21769 solver.cpp:244]     Train net output #1: loss = 0.263367 (* 1 = 0.263367 loss)\nI0820 01:01:10.173022 21769 sgd_solver.cpp:166] Iteration 34600, lr = 0.865\nI0820 01:04:47.870692 21769 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0820 01:06:59.130237 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8659\nI0820 01:06:59.130573 21769 solver.cpp:404]     Test net output #1: loss = 0.547414 (* 1 = 0.547414 loss)\nI0820 01:07:01.252621 21769 solver.cpp:228] Iteration 34700, loss = 0.0814785\nI0820 01:07:01.252670 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 01:07:01.252701 21769 solver.cpp:244]     Train net output #1: loss = 0.0814779 (* 1 = 0.0814779 loss)\nI0820 01:07:01.340064 21769 sgd_solver.cpp:166] Iteration 34700, lr = 0.8675\nI0820 01:10:38.939925 21769 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0820 01:12:50.225989 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8376\nI0820 01:12:50.226374 21769 solver.cpp:404]     Test net output #1: loss = 0.652941 (* 1 = 0.652941 loss)\nI0820 01:12:52.350705 21769 solver.cpp:228] Iteration 34800, loss = 0.139412\nI0820 01:12:52.350755 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:12:52.350780 21769 solver.cpp:244]     Train net output #1: loss = 0.139411 (* 1 = 0.139411 loss)\nI0820 01:12:52.433997 21769 sgd_solver.cpp:166] Iteration 34800, lr = 0.87\nI0820 01:16:30.225603 21769 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0820 01:18:42.998447 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7992\nI0820 01:18:42.998983 21769 solver.cpp:404]     Test net output #1: loss = 0.854284 (* 1 = 0.854284 loss)\nI0820 01:18:45.128173 21769 solver.cpp:228] Iteration 34900, loss = 0.102952\nI0820 01:18:45.128221 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 01:18:45.128235 21769 solver.cpp:244]     Train net output #1: loss = 0.102951 (* 1 = 0.102951 loss)\nI0820 01:18:45.214908 21769 sgd_solver.cpp:166] Iteration 34900, lr = 0.8725\nI0820 01:22:23.081526 21769 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0820 01:24:34.296056 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8192\nI0820 01:24:34.296416 21769 solver.cpp:404]     Test net output #1: loss = 0.715726 (* 1 = 0.715726 loss)\nI0820 01:24:36.419756 21769 solver.cpp:228] Iteration 35000, loss = 0.0920195\nI0820 01:24:36.419795 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 01:24:36.419811 21769 solver.cpp:244]     Train net output #1: loss = 0.0920189 (* 1 = 0.0920189 loss)\nI0820 01:24:36.506525 21769 sgd_solver.cpp:166] Iteration 35000, lr = 0.875\nI0820 01:28:14.190562 21769 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0820 01:30:25.411967 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8395\nI0820 01:30:25.412328 21769 solver.cpp:404]     Test net output #1: loss = 0.625469 (* 1 = 0.625469 loss)\nI0820 01:30:27.535148 21769 solver.cpp:228] Iteration 35100, loss = 0.0737267\nI0820 01:30:27.535195 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 01:30:27.535212 21769 solver.cpp:244]     Train net output #1: loss = 0.0737261 (* 1 = 0.0737261 loss)\nI0820 01:30:27.618654 21769 sgd_solver.cpp:166] Iteration 35100, lr = 0.8775\nI0820 01:34:05.297335 21769 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0820 01:36:16.533532 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8389\nI0820 01:36:16.533910 21769 solver.cpp:404]     Test net output #1: loss = 0.635205 (* 1 = 0.635205 loss)\nI0820 01:36:18.657291 21769 solver.cpp:228] Iteration 35200, loss = 0.106807\nI0820 01:36:18.657331 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:36:18.657356 21769 solver.cpp:244]     Train net output #1: loss = 0.106807 (* 1 = 0.106807 loss)\nI0820 01:36:18.747310 21769 sgd_solver.cpp:166] Iteration 35200, lr = 0.88\nI0820 01:39:56.447863 21769 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0820 01:42:07.686828 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8657\nI0820 01:42:07.687194 21769 solver.cpp:404]     Test net output #1: loss = 0.485738 (* 1 = 0.485738 loss)\nI0820 01:42:09.810652 21769 solver.cpp:228] Iteration 35300, loss = 0.0848582\nI0820 01:42:09.810703 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 01:42:09.810727 21769 solver.cpp:244]     Train net output #1: loss = 0.0848576 (* 1 = 0.0848576 loss)\nI0820 01:42:09.893870 21769 sgd_solver.cpp:166] Iteration 35300, lr = 0.8825\nI0820 01:45:47.534752 21769 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0820 01:47:58.778319 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8295\nI0820 01:47:58.778700 21769 solver.cpp:404]     Test net output #1: loss = 0.61865 (* 1 = 0.61865 loss)\nI0820 01:48:00.902468 21769 solver.cpp:228] Iteration 35400, loss = 0.102953\nI0820 01:48:00.902519 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 01:48:00.902545 21769 solver.cpp:244]     Train net output #1: loss = 0.102953 (* 1 = 0.102953 loss)\nI0820 01:48:00.986416 21769 sgd_solver.cpp:166] Iteration 35400, lr = 0.885\nI0820 01:51:38.697492 21769 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0820 01:53:49.910917 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7652\nI0820 01:53:49.911252 21769 solver.cpp:404]     Test net output #1: loss = 1.01484 (* 1 = 1.01484 loss)\nI0820 01:53:52.033468 21769 solver.cpp:228] Iteration 35500, loss = 0.141519\nI0820 01:53:52.033504 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 01:53:52.033519 21769 solver.cpp:244]     Train net output #1: loss = 0.141518 (* 1 = 0.141518 loss)\nI0820 01:53:52.124846 21769 sgd_solver.cpp:166] Iteration 35500, lr = 0.8875\nI0820 01:57:29.988428 21769 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0820 01:59:41.228353 21769 solver.cpp:404]     Test net output #0: accuracy = 0.811\nI0820 01:59:41.228691 21769 solver.cpp:404]     Test net output #1: loss = 0.839691 (* 1 = 0.839691 loss)\nI0820 01:59:43.351788 21769 solver.cpp:228] Iteration 35600, loss = 0.136696\nI0820 01:59:43.351833 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 01:59:43.351850 21769 solver.cpp:244]     Train net output #1: loss = 0.136695 (* 1 = 0.136695 loss)\nI0820 01:59:43.441654 21769 sgd_solver.cpp:166] Iteration 35600, lr = 0.89\nI0820 02:03:21.078315 21769 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0820 02:05:32.317919 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7758\nI0820 02:05:32.318305 21769 solver.cpp:404]     Test net output #1: loss = 0.837002 (* 1 = 0.837002 loss)\nI0820 02:05:34.440771 21769 solver.cpp:228] Iteration 35700, loss = 0.0386792\nI0820 02:05:34.440817 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 02:05:34.440834 21769 solver.cpp:244]     Train net output #1: loss = 0.0386785 (* 1 = 0.0386785 loss)\nI0820 02:05:34.529734 21769 sgd_solver.cpp:166] Iteration 35700, lr = 0.8925\nI0820 02:09:12.123131 21769 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0820 02:11:23.364038 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7718\nI0820 02:11:23.364403 21769 solver.cpp:404]     Test net output #1: loss = 1.00548 (* 1 = 1.00548 loss)\nI0820 02:11:25.487442 21769 solver.cpp:228] Iteration 35800, loss = 0.0693394\nI0820 02:11:25.487488 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 02:11:25.487504 21769 solver.cpp:244]     Train net output #1: loss = 0.0693388 (* 1 = 0.0693388 loss)\nI0820 02:11:25.576215 21769 sgd_solver.cpp:166] Iteration 35800, lr = 0.895\nI0820 02:15:03.192121 21769 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0820 02:17:14.436491 21769 solver.cpp:404]     Test net output #0: accuracy = 0.798\nI0820 02:17:14.436863 21769 solver.cpp:404]     Test net output #1: loss = 0.802074 (* 1 = 0.802074 loss)\nI0820 02:17:16.560052 21769 solver.cpp:228] Iteration 35900, loss = 0.298893\nI0820 02:17:16.560089 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0820 02:17:16.560106 21769 solver.cpp:244]     Train net output #1: loss = 0.298892 (* 1 = 0.298892 loss)\nI0820 02:17:16.648663 21769 sgd_solver.cpp:166] Iteration 35900, lr = 0.8975\nI0820 02:20:54.282176 21769 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0820 02:23:05.530973 21769 solver.cpp:404]     Test net output #0: accuracy = 0.834\nI0820 02:23:05.531302 21769 solver.cpp:404]     Test net output #1: loss = 0.568683 (* 1 = 0.568683 loss)\nI0820 02:23:07.655067 21769 solver.cpp:228] Iteration 36000, loss = 0.106392\nI0820 02:23:07.655104 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 02:23:07.655118 21769 solver.cpp:244]     Train net output #1: loss = 0.106391 (* 1 = 0.106391 loss)\nI0820 02:23:07.739955 21769 sgd_solver.cpp:166] Iteration 36000, lr = 0.9\nI0820 02:26:45.476318 21769 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0820 02:28:56.713616 21769 solver.cpp:404]     Test net output #0: accuracy = 0.738\nI0820 02:28:56.713948 21769 solver.cpp:404]     Test net output #1: loss = 1.0278 (* 1 = 1.0278 loss)\nI0820 02:28:58.841930 21769 solver.cpp:228] Iteration 36100, loss = 0.0632009\nI0820 02:28:58.841969 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 02:28:58.841984 21769 solver.cpp:244]     Train net output #1: loss = 0.0632002 (* 1 = 0.0632002 loss)\nI0820 02:28:58.928710 21769 sgd_solver.cpp:166] Iteration 36100, lr = 0.9025\nI0820 02:32:36.583469 21769 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0820 02:34:47.827329 21769 solver.cpp:404]     Test net output #0: accuracy = 0.808\nI0820 02:34:47.827682 21769 solver.cpp:404]     Test net output #1: loss = 0.843908 (* 1 = 0.843908 loss)\nI0820 02:34:49.956286 21769 solver.cpp:228] Iteration 36200, loss = 0.068516\nI0820 02:34:49.956323 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 02:34:49.956338 21769 solver.cpp:244]     Train net output #1: loss = 0.0685153 (* 1 = 0.0685153 loss)\nI0820 02:34:50.042675 21769 sgd_solver.cpp:166] Iteration 36200, lr = 0.905\nI0820 02:38:27.721856 21769 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0820 02:40:38.966650 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7471\nI0820 02:40:38.967025 21769 solver.cpp:404]     Test net output #1: loss = 1.05506 (* 1 = 1.05506 loss)\nI0820 02:40:41.095113 21769 solver.cpp:228] Iteration 36300, loss = 0.0871588\nI0820 02:40:41.095149 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 02:40:41.095165 21769 solver.cpp:244]     Train net output #1: loss = 0.0871581 (* 1 = 0.0871581 loss)\nI0820 02:40:41.176254 21769 sgd_solver.cpp:166] Iteration 36300, lr = 0.9075\nI0820 02:44:18.889129 21769 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0820 02:46:30.137536 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8263\nI0820 02:46:30.137857 21769 solver.cpp:404]     Test net output #1: loss = 0.667668 (* 1 = 0.667668 loss)\nI0820 02:46:32.266337 21769 solver.cpp:228] Iteration 36400, loss = 0.218922\nI0820 02:46:32.266373 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 02:46:32.266388 21769 solver.cpp:244]     Train net output #1: loss = 0.218922 (* 1 = 0.218922 loss)\nI0820 02:46:32.350505 21769 sgd_solver.cpp:166] Iteration 36400, lr = 0.91\nI0820 02:50:10.040822 21769 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0820 02:52:21.284490 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8134\nI0820 02:52:21.284855 21769 solver.cpp:404]     Test net output #1: loss = 0.770385 (* 1 = 0.770385 loss)\nI0820 02:52:23.412693 21769 solver.cpp:228] Iteration 36500, loss = 0.0301601\nI0820 02:52:23.412729 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 02:52:23.412744 21769 solver.cpp:244]     Train net output #1: loss = 0.0301594 (* 1 = 0.0301594 loss)\nI0820 02:52:23.492583 21769 sgd_solver.cpp:166] Iteration 36500, lr = 0.9125\nI0820 02:56:01.403420 21769 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0820 02:58:12.658560 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7305\nI0820 02:58:12.658931 21769 solver.cpp:404]     Test net output #1: loss = 1.32952 (* 1 = 1.32952 loss)\nI0820 02:58:14.787272 21769 solver.cpp:228] Iteration 36600, loss = 0.156813\nI0820 02:58:14.787308 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 02:58:14.787323 21769 solver.cpp:244]     Train net output #1: loss = 0.156812 (* 1 = 0.156812 loss)\nI0820 02:58:14.868897 21769 sgd_solver.cpp:166] Iteration 36600, lr = 0.915\nI0820 03:01:52.434023 21769 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0820 03:04:03.697273 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8072\nI0820 03:04:03.697646 21769 solver.cpp:404]     Test net output #1: loss = 0.807139 (* 1 = 0.807139 loss)\nI0820 03:04:05.826465 21769 solver.cpp:228] Iteration 36700, loss = 0.0786174\nI0820 03:04:05.826503 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 03:04:05.826519 21769 solver.cpp:244]     Train net output #1: loss = 0.0786167 (* 1 = 0.0786167 loss)\nI0820 03:04:05.906728 21769 sgd_solver.cpp:166] Iteration 36700, lr = 0.9175\nI0820 03:07:43.564131 21769 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0820 03:09:54.843791 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8327\nI0820 03:09:54.844180 21769 solver.cpp:404]     Test net output #1: loss = 0.690874 (* 1 = 0.690874 loss)\nI0820 03:09:56.974086 21769 solver.cpp:228] Iteration 36800, loss = 0.107372\nI0820 03:09:56.974126 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 03:09:56.974151 21769 solver.cpp:244]     Train net output #1: loss = 0.107372 (* 1 = 0.107372 loss)\nI0820 03:09:57.056113 21769 sgd_solver.cpp:166] Iteration 36800, lr = 0.92\nI0820 03:13:34.785470 21769 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0820 03:15:46.078807 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8177\nI0820 03:15:46.079190 21769 solver.cpp:404]     Test net output #1: loss = 0.67931 (* 1 = 0.67931 loss)\nI0820 03:15:48.208218 21769 solver.cpp:228] Iteration 36900, loss = 0.113909\nI0820 03:15:48.208259 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 03:15:48.208284 21769 solver.cpp:244]     Train net output #1: loss = 0.113908 (* 1 = 0.113908 loss)\nI0820 03:15:48.294245 21769 sgd_solver.cpp:166] Iteration 36900, lr = 0.9225\nI0820 03:19:25.860555 21769 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0820 03:21:37.143872 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8098\nI0820 03:21:37.144244 21769 solver.cpp:404]     Test net output #1: loss = 0.728473 (* 1 = 0.728473 loss)\nI0820 03:21:39.272537 21769 solver.cpp:228] Iteration 37000, loss = 0.152229\nI0820 03:21:39.272579 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 03:21:39.272603 21769 solver.cpp:244]     Train net output #1: loss = 0.152228 (* 1 = 0.152228 loss)\nI0820 03:21:39.357354 21769 sgd_solver.cpp:166] Iteration 37000, lr = 0.925\nI0820 03:25:16.985937 21769 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0820 03:27:28.272004 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7911\nI0820 03:27:28.272384 21769 solver.cpp:404]     Test net output #1: loss = 0.793429 (* 1 = 0.793429 loss)\nI0820 03:27:30.402561 21769 solver.cpp:228] Iteration 37100, loss = 0.143685\nI0820 03:27:30.402601 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 03:27:30.402626 21769 solver.cpp:244]     Train net output #1: loss = 0.143684 (* 1 = 0.143684 loss)\nI0820 03:27:30.484518 21769 sgd_solver.cpp:166] Iteration 37100, lr = 0.9275\nI0820 03:31:08.340209 21769 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0820 03:33:19.638022 21769 solver.cpp:404]     Test net output #0: accuracy = 0.5962\nI0820 03:33:19.638392 21769 solver.cpp:404]     Test net output #1: loss = 2.45619 (* 1 = 2.45619 loss)\nI0820 03:33:21.768342 21769 solver.cpp:228] Iteration 37200, loss = 0.0454803\nI0820 03:33:21.768383 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 03:33:21.768406 21769 solver.cpp:244]     Train net output #1: loss = 0.0454795 (* 1 = 0.0454795 loss)\nI0820 03:33:21.856678 21769 sgd_solver.cpp:166] Iteration 37200, lr = 0.93\nI0820 03:36:59.461002 21769 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0820 03:39:10.738101 21769 solver.cpp:404]     Test net output #0: accuracy = 0.784\nI0820 03:39:10.738476 21769 solver.cpp:404]     Test net output #1: loss = 1.11944 (* 1 = 1.11944 loss)\nI0820 03:39:12.867993 21769 solver.cpp:228] Iteration 37300, loss = 0.12236\nI0820 03:39:12.868036 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 03:39:12.868058 21769 solver.cpp:244]     Train net output #1: loss = 0.12236 (* 1 = 0.12236 loss)\nI0820 03:39:12.943236 21769 sgd_solver.cpp:166] Iteration 37300, lr = 0.9325\nI0820 03:42:50.466500 21769 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0820 03:45:01.700395 21769 solver.cpp:404]     Test net output #0: accuracy = 0.839\nI0820 03:45:01.700783 21769 solver.cpp:404]     Test net output #1: loss = 0.61494 (* 1 = 0.61494 loss)\nI0820 03:45:03.830636 21769 solver.cpp:228] Iteration 37400, loss = 0.0690656\nI0820 03:45:03.830677 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 03:45:03.830703 21769 solver.cpp:244]     Train net output #1: loss = 0.0690649 (* 1 = 0.0690649 loss)\nI0820 03:45:03.911886 21769 sgd_solver.cpp:166] Iteration 37400, lr = 0.935\nI0820 03:48:41.581858 21769 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0820 03:50:52.834388 21769 solver.cpp:404]     Test net output #0: accuracy = 0.847\nI0820 03:50:52.834743 21769 solver.cpp:404]     Test net output #1: loss = 0.565495 (* 1 = 0.565495 loss)\nI0820 03:50:54.966200 21769 solver.cpp:228] Iteration 37500, loss = 0.176137\nI0820 03:50:54.966241 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 03:50:54.966265 21769 solver.cpp:244]     Train net output #1: loss = 0.176136 (* 1 = 0.176136 loss)\nI0820 03:50:55.051440 21769 sgd_solver.cpp:166] Iteration 37500, lr = 0.9375\nI0820 03:54:32.750237 21769 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0820 03:56:43.989636 21769 solver.cpp:404]     Test net output #0: accuracy = 0.772\nI0820 03:56:43.989969 21769 solver.cpp:404]     Test net output #1: loss = 0.975268 (* 1 = 0.975268 loss)\nI0820 03:56:46.113570 21769 solver.cpp:228] Iteration 37600, loss = 0.169023\nI0820 03:56:46.113610 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 03:56:46.113636 21769 solver.cpp:244]     Train net output #1: loss = 0.169022 (* 1 = 0.169022 loss)\nI0820 03:56:46.205343 21769 sgd_solver.cpp:166] Iteration 37600, lr = 0.94\nI0820 04:00:23.929378 21769 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0820 04:02:35.202455 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8546\nI0820 04:02:35.202822 21769 solver.cpp:404]     Test net output #1: loss = 0.518815 (* 1 = 0.518815 loss)\nI0820 04:02:37.326568 21769 solver.cpp:228] Iteration 37700, loss = 0.0839758\nI0820 04:02:37.326607 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 04:02:37.326632 21769 solver.cpp:244]     Train net output #1: loss = 0.083975 (* 1 = 0.083975 loss)\nI0820 04:02:37.416692 21769 sgd_solver.cpp:166] Iteration 37700, lr = 0.9425\nI0820 04:06:15.119920 21769 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0820 04:08:26.364562 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8098\nI0820 04:08:26.364931 21769 solver.cpp:404]     Test net output #1: loss = 0.846861 (* 1 = 0.846861 loss)\nI0820 04:08:28.488327 21769 solver.cpp:228] Iteration 37800, loss = 0.0387926\nI0820 04:08:28.488378 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 04:08:28.488402 21769 solver.cpp:244]     Train net output #1: loss = 0.0387919 (* 1 = 0.0387919 loss)\nI0820 04:08:28.581347 21769 sgd_solver.cpp:166] Iteration 37800, lr = 0.945\nI0820 04:12:06.268642 21769 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0820 04:14:17.555727 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7914\nI0820 04:14:17.556094 21769 solver.cpp:404]     Test net output #1: loss = 0.756245 (* 1 = 0.756245 loss)\nI0820 04:14:19.679055 21769 solver.cpp:228] Iteration 37900, loss = 0.152133\nI0820 04:14:19.679093 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 04:14:19.679110 21769 solver.cpp:244]     Train net output #1: loss = 0.152132 (* 1 = 0.152132 loss)\nI0820 04:14:19.766999 21769 sgd_solver.cpp:166] Iteration 37900, lr = 0.9475\nI0820 04:17:57.401898 21769 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0820 04:20:08.693936 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7868\nI0820 04:20:08.694298 21769 solver.cpp:404]     Test net output #1: loss = 0.893742 (* 1 = 0.893742 loss)\nI0820 04:20:10.817345 21769 solver.cpp:228] Iteration 38000, loss = 0.131879\nI0820 04:20:10.817384 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 04:20:10.817399 21769 solver.cpp:244]     Train net output #1: loss = 0.131878 (* 1 = 0.131878 loss)\nI0820 04:20:10.908540 21769 sgd_solver.cpp:166] Iteration 38000, lr = 0.95\nI0820 04:23:48.517897 21769 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0820 04:25:59.798233 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8628\nI0820 04:25:59.798565 21769 solver.cpp:404]     Test net output #1: loss = 0.534908 (* 1 = 0.534908 loss)\nI0820 04:26:01.921807 21769 solver.cpp:228] Iteration 38100, loss = 0.104254\nI0820 04:26:01.921846 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 04:26:01.921861 21769 solver.cpp:244]     Train net output #1: loss = 0.104254 (* 1 = 0.104254 loss)\nI0820 04:26:02.009755 21769 sgd_solver.cpp:166] Iteration 38100, lr = 0.9525\nI0820 04:29:39.681857 21769 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0820 04:31:50.978021 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8355\nI0820 04:31:50.978394 21769 solver.cpp:404]     Test net output #1: loss = 0.622059 (* 1 = 0.622059 loss)\nI0820 04:31:53.101582 21769 solver.cpp:228] Iteration 38200, loss = 0.0980111\nI0820 04:31:53.101620 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 04:31:53.101635 21769 solver.cpp:244]     Train net output #1: loss = 0.0980104 (* 1 = 0.0980104 loss)\nI0820 04:31:53.187788 21769 sgd_solver.cpp:166] Iteration 38200, lr = 0.955\nI0820 04:35:31.046866 21769 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0820 04:37:42.321269 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7518\nI0820 04:37:42.321636 21769 solver.cpp:404]     Test net output #1: loss = 1.16543 (* 1 = 1.16543 loss)\nI0820 04:37:44.444245 21769 solver.cpp:228] Iteration 38300, loss = 0.0472634\nI0820 04:37:44.444285 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 04:37:44.444309 21769 solver.cpp:244]     Train net output #1: loss = 0.0472628 (* 1 = 0.0472628 loss)\nI0820 04:37:44.537516 21769 sgd_solver.cpp:166] Iteration 38300, lr = 0.9575\nI0820 04:41:22.593044 21769 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0820 04:43:33.853013 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8439\nI0820 04:43:33.853385 21769 solver.cpp:404]     Test net output #1: loss = 0.569715 (* 1 = 0.569715 loss)\nI0820 04:43:35.980796 21769 solver.cpp:228] Iteration 38400, loss = 0.102356\nI0820 04:43:35.980836 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 04:43:35.980852 21769 solver.cpp:244]     Train net output #1: loss = 0.102356 (* 1 = 0.102356 loss)\nI0820 04:43:36.072002 21769 sgd_solver.cpp:166] Iteration 38400, lr = 0.96\nI0820 04:47:14.293581 21769 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0820 04:49:25.564436 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7496\nI0820 04:49:25.564802 21769 solver.cpp:404]     Test net output #1: loss = 1.05541 (* 1 = 1.05541 loss)\nI0820 04:49:27.687664 21769 solver.cpp:228] Iteration 38500, loss = 0.0495606\nI0820 04:49:27.687708 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 04:49:27.687732 21769 solver.cpp:244]     Train net output #1: loss = 0.0495599 (* 1 = 0.0495599 loss)\nI0820 04:49:27.777040 21769 sgd_solver.cpp:166] Iteration 38500, lr = 0.9625\nI0820 04:53:06.025915 21769 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0820 04:55:17.300303 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7238\nI0820 04:55:17.300698 21769 solver.cpp:404]     Test net output #1: loss = 1.17784 (* 1 = 1.17784 loss)\nI0820 04:55:19.424804 21769 solver.cpp:228] Iteration 38600, loss = 0.208843\nI0820 04:55:19.424845 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 04:55:19.424868 21769 solver.cpp:244]     Train net output #1: loss = 0.208842 (* 1 = 0.208842 loss)\nI0820 04:55:19.519222 21769 sgd_solver.cpp:166] Iteration 38600, lr = 0.965\nI0820 04:58:57.715090 21769 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0820 05:01:08.989289 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8616\nI0820 05:01:08.989670 21769 solver.cpp:404]     Test net output #1: loss = 0.467211 (* 1 = 0.467211 loss)\nI0820 05:01:11.113714 21769 solver.cpp:228] Iteration 38700, loss = 0.139148\nI0820 05:01:11.113754 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 05:01:11.113777 21769 solver.cpp:244]     Train net output #1: loss = 0.139148 (* 1 = 0.139148 loss)\nI0820 05:01:11.210177 21769 sgd_solver.cpp:166] Iteration 38700, lr = 0.9675\nI0820 05:04:49.613396 21769 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0820 05:07:00.890789 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8315\nI0820 05:07:00.891109 21769 solver.cpp:404]     Test net output #1: loss = 0.66088 (* 1 = 0.66088 loss)\nI0820 05:07:03.015033 21769 solver.cpp:228] Iteration 38800, loss = 0.0580187\nI0820 05:07:03.015070 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 05:07:03.015086 21769 solver.cpp:244]     Train net output #1: loss = 0.058018 (* 1 = 0.058018 loss)\nI0820 05:07:03.112069 21769 sgd_solver.cpp:166] Iteration 38800, lr = 0.97\nI0820 05:10:41.427347 21769 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0820 05:12:52.696765 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8353\nI0820 05:12:52.697120 21769 solver.cpp:404]     Test net output #1: loss = 0.583566 (* 1 = 0.583566 loss)\nI0820 05:12:54.820248 21769 solver.cpp:228] Iteration 38900, loss = 0.120766\nI0820 05:12:54.820291 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 05:12:54.820314 21769 solver.cpp:244]     Train net output #1: loss = 0.120765 (* 1 = 0.120765 loss)\nI0820 05:12:54.917773 21769 sgd_solver.cpp:166] Iteration 38900, lr = 0.9725\nI0820 05:16:33.127792 21769 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0820 05:18:44.371999 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8022\nI0820 05:18:44.372354 21769 solver.cpp:404]     Test net output #1: loss = 0.79255 (* 1 = 0.79255 loss)\nI0820 05:18:46.495715 21769 solver.cpp:228] Iteration 39000, loss = 0.119204\nI0820 05:18:46.495756 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 05:18:46.495779 21769 solver.cpp:244]     Train net output #1: loss = 0.119203 (* 1 = 0.119203 loss)\nI0820 05:18:46.593225 21769 sgd_solver.cpp:166] Iteration 39000, lr = 0.975\nI0820 05:22:24.990013 21769 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0820 05:24:36.275753 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8433\nI0820 05:24:36.276140 21769 solver.cpp:404]     Test net output #1: loss = 0.594446 (* 1 = 0.594446 loss)\nI0820 05:24:38.399405 21769 solver.cpp:228] Iteration 39100, loss = 0.174639\nI0820 05:24:38.399446 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 05:24:38.399468 21769 solver.cpp:244]     Train net output #1: loss = 0.174638 (* 1 = 0.174638 loss)\nI0820 05:24:38.501610 21769 sgd_solver.cpp:166] Iteration 39100, lr = 0.9775\nI0820 05:28:16.862323 21769 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0820 05:30:28.141602 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8169\nI0820 05:30:28.142006 21769 solver.cpp:404]     Test net output #1: loss = 0.706304 (* 1 = 0.706304 loss)\nI0820 05:30:30.266093 21769 solver.cpp:228] Iteration 39200, loss = 0.0569422\nI0820 05:30:30.266134 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 05:30:30.266158 21769 solver.cpp:244]     Train net output #1: loss = 0.0569415 (* 1 = 0.0569415 loss)\nI0820 05:30:30.358546 21769 sgd_solver.cpp:166] Iteration 39200, lr = 0.98\nI0820 05:34:08.586429 21769 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0820 05:36:19.860736 21769 solver.cpp:404]     Test net output #0: accuracy = 0.817101\nI0820 05:36:19.861125 21769 solver.cpp:404]     Test net output #1: loss = 0.70756 (* 1 = 0.70756 loss)\nI0820 05:36:21.985283 21769 solver.cpp:228] Iteration 39300, loss = 0.0949564\nI0820 05:36:21.985323 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 05:36:21.985347 21769 solver.cpp:244]     Train net output #1: loss = 0.0949558 (* 1 = 0.0949558 loss)\nI0820 05:36:22.077216 21769 sgd_solver.cpp:166] Iteration 39300, lr = 0.9825\nI0820 05:40:00.180393 21769 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0820 05:42:11.477829 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8277\nI0820 05:42:11.478233 21769 solver.cpp:404]     Test net output #1: loss = 0.64106 (* 1 = 0.64106 loss)\nI0820 05:42:13.601598 21769 solver.cpp:228] Iteration 39400, loss = 0.168722\nI0820 05:42:13.601637 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 05:42:13.601661 21769 solver.cpp:244]     Train net output #1: loss = 0.168721 (* 1 = 0.168721 loss)\nI0820 05:42:13.691599 21769 sgd_solver.cpp:166] Iteration 39400, lr = 0.985\nI0820 05:45:51.998883 21769 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0820 05:48:03.281623 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7338\nI0820 05:48:03.282029 21769 solver.cpp:404]     Test net output #1: loss = 1.16617 (* 1 = 1.16617 loss)\nI0820 05:48:05.405974 21769 solver.cpp:228] Iteration 39500, loss = 0.179932\nI0820 05:48:05.406015 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 05:48:05.406038 21769 solver.cpp:244]     Train net output #1: loss = 0.179932 (* 1 = 0.179932 loss)\nI0820 05:48:05.498299 21769 sgd_solver.cpp:166] Iteration 39500, lr = 0.9875\nI0820 05:51:43.680671 21769 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0820 05:53:54.967051 21769 solver.cpp:404]     Test net output #0: accuracy = 0.759\nI0820 05:53:54.967444 21769 solver.cpp:404]     Test net output #1: loss = 1.06626 (* 1 = 1.06626 loss)\nI0820 05:53:57.091262 21769 solver.cpp:228] Iteration 39600, loss = 0.155599\nI0820 05:53:57.091300 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0820 05:53:57.091323 21769 solver.cpp:244]     Train net output #1: loss = 0.155599 (* 1 = 0.155599 loss)\nI0820 05:53:57.190904 21769 sgd_solver.cpp:166] Iteration 39600, lr = 0.99\nI0820 05:57:35.409276 21769 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0820 05:59:46.653955 21769 solver.cpp:404]     Test net output #0: accuracy = 0.827301\nI0820 05:59:46.654371 21769 solver.cpp:404]     Test net output #1: loss = 0.603076 (* 1 = 0.603076 loss)\nI0820 05:59:48.776605 21769 solver.cpp:228] Iteration 39700, loss = 0.0616185\nI0820 05:59:48.776644 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 05:59:48.776667 21769 solver.cpp:244]     Train net output #1: loss = 0.0616179 (* 1 = 0.0616179 loss)\nI0820 05:59:48.865630 21769 sgd_solver.cpp:166] Iteration 39700, lr = 0.9925\nI0820 06:03:27.007028 21769 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0820 06:05:38.257331 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8102\nI0820 06:05:38.257714 21769 solver.cpp:404]     Test net output #1: loss = 0.676034 (* 1 = 0.676034 loss)\nI0820 06:05:40.380659 21769 solver.cpp:228] Iteration 39800, loss = 0.071104\nI0820 06:05:40.380702 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 06:05:40.380726 21769 solver.cpp:244]     Train net output #1: loss = 0.0711034 (* 1 = 0.0711034 loss)\nI0820 06:05:40.475520 21769 sgd_solver.cpp:166] Iteration 39800, lr = 0.995\nI0820 06:09:18.696573 21769 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0820 06:11:29.936180 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8082\nI0820 06:11:29.936579 21769 solver.cpp:404]     Test net output #1: loss = 0.785077 (* 1 = 0.785077 loss)\nI0820 06:11:32.059980 21769 solver.cpp:228] Iteration 39900, loss = 0.0820294\nI0820 06:11:32.060020 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 06:11:32.060045 21769 solver.cpp:244]     Train net output #1: loss = 0.0820289 (* 1 = 0.0820289 loss)\nI0820 06:11:32.155706 21769 sgd_solver.cpp:166] Iteration 39900, lr = 0.9975\nI0820 06:15:10.440215 21769 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0820 06:17:21.666954 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7806\nI0820 06:17:21.667326 21769 solver.cpp:404]     Test net output #1: loss = 0.896505 (* 1 = 0.896505 loss)\nI0820 06:17:23.790700 21769 solver.cpp:228] Iteration 40000, loss = 0.0533561\nI0820 06:17:23.790741 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 06:17:23.790765 21769 solver.cpp:244]     Train net output #1: loss = 0.0533555 (* 1 = 0.0533555 loss)\nI0820 06:17:23.885202 21769 sgd_solver.cpp:166] Iteration 40000, lr = 1\nI0820 06:21:02.148471 21769 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0820 06:23:13.387493 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8197\nI0820 06:23:13.387931 21769 solver.cpp:404]     Test net output #1: loss = 0.672819 (* 1 = 0.672819 loss)\nI0820 06:23:15.511128 21769 solver.cpp:228] Iteration 40100, loss = 0.175935\nI0820 06:23:15.511168 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 06:23:15.511190 21769 solver.cpp:244]     Train net output #1: loss = 0.175934 (* 1 = 0.175934 loss)\nI0820 06:23:15.604676 21769 sgd_solver.cpp:166] Iteration 40100, lr = 1.0025\nI0820 06:26:53.856137 21769 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0820 06:29:05.091996 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8375\nI0820 06:29:05.092358 21769 solver.cpp:404]     Test net output #1: loss = 0.622139 (* 1 = 0.622139 loss)\nI0820 06:29:07.214859 21769 solver.cpp:228] Iteration 40200, loss = 0.155389\nI0820 06:29:07.214897 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 06:29:07.214920 21769 solver.cpp:244]     Train net output #1: loss = 0.155389 (* 1 = 0.155389 loss)\nI0820 06:29:07.309214 21769 sgd_solver.cpp:166] Iteration 40200, lr = 1.005\nI0820 06:32:45.479542 21769 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0820 06:34:56.742344 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7818\nI0820 06:34:56.742733 21769 solver.cpp:404]     Test net output #1: loss = 0.888131 (* 1 = 0.888131 loss)\nI0820 06:34:58.865098 21769 solver.cpp:228] Iteration 40300, loss = 0.0500109\nI0820 06:34:58.865135 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 06:34:58.865150 21769 solver.cpp:244]     Train net output #1: loss = 0.0500104 (* 1 = 0.0500104 loss)\nI0820 06:34:58.953096 21769 sgd_solver.cpp:166] Iteration 40300, lr = 1.0075\nI0820 06:38:37.149303 21769 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0820 06:40:48.406144 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7562\nI0820 06:40:48.406533 21769 solver.cpp:404]     Test net output #1: loss = 0.97964 (* 1 = 0.97964 loss)\nI0820 06:40:50.529387 21769 solver.cpp:228] Iteration 40400, loss = 0.190953\nI0820 06:40:50.529435 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 06:40:50.529450 21769 solver.cpp:244]     Train net output #1: loss = 0.190952 (* 1 = 0.190952 loss)\nI0820 06:40:50.627472 21769 sgd_solver.cpp:166] Iteration 40400, lr = 1.01\nI0820 06:44:28.878195 21769 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0820 06:46:40.135058 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7897\nI0820 06:46:40.135437 21769 solver.cpp:404]     Test net output #1: loss = 0.864039 (* 1 = 0.864039 loss)\nI0820 06:46:42.257876 21769 solver.cpp:228] Iteration 40500, loss = 0.112139\nI0820 06:46:42.257912 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 06:46:42.257927 21769 solver.cpp:244]     Train net output #1: loss = 0.112138 (* 1 = 0.112138 loss)\nI0820 06:46:42.346918 21769 sgd_solver.cpp:166] Iteration 40500, lr = 1.0125\nI0820 06:50:20.439240 21769 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0820 06:52:31.704687 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8238\nI0820 06:52:31.705066 21769 solver.cpp:404]     Test net output #1: loss = 0.640712 (* 1 = 0.640712 loss)\nI0820 06:52:33.828464 21769 solver.cpp:228] Iteration 40600, loss = 0.111843\nI0820 06:52:33.828500 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 06:52:33.828516 21769 solver.cpp:244]     Train net output #1: loss = 0.111843 (* 1 = 0.111843 loss)\nI0820 06:52:33.923395 21769 sgd_solver.cpp:166] Iteration 40600, lr = 1.015\nI0820 06:56:12.137004 21769 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0820 06:58:23.398703 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8353\nI0820 06:58:23.399092 21769 solver.cpp:404]     Test net output #1: loss = 0.662311 (* 1 = 0.662311 loss)\nI0820 06:58:25.521946 21769 solver.cpp:228] Iteration 40700, loss = 0.159308\nI0820 06:58:25.521982 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 06:58:25.521997 21769 solver.cpp:244]     Train net output #1: loss = 0.159307 (* 1 = 0.159307 loss)\nI0820 06:58:25.619505 21769 sgd_solver.cpp:166] Iteration 40700, lr = 1.0175\nI0820 07:02:03.773738 21769 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0820 07:04:15.050119 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7803\nI0820 07:04:15.050506 21769 solver.cpp:404]     Test net output #1: loss = 0.887422 (* 1 = 0.887422 loss)\nI0820 07:04:17.173308 21769 solver.cpp:228] Iteration 40800, loss = 0.0858605\nI0820 07:04:17.173344 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:04:17.173360 21769 solver.cpp:244]     Train net output #1: loss = 0.0858601 (* 1 = 0.0858601 loss)\nI0820 07:04:17.262392 21769 sgd_solver.cpp:166] Iteration 40800, lr = 1.02\nI0820 07:07:55.380858 21769 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0820 07:10:06.650996 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7593\nI0820 07:10:06.651374 21769 solver.cpp:404]     Test net output #1: loss = 0.996269 (* 1 = 0.996269 loss)\nI0820 07:10:08.773983 21769 solver.cpp:228] Iteration 40900, loss = 0.115749\nI0820 07:10:08.774019 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 07:10:08.774034 21769 solver.cpp:244]     Train net output #1: loss = 0.115749 (* 1 = 0.115749 loss)\nI0820 07:10:08.870280 21769 sgd_solver.cpp:166] Iteration 40900, lr = 1.0225\nI0820 07:13:47.032632 21769 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0820 07:15:58.287586 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8237\nI0820 07:15:58.287981 21769 solver.cpp:404]     Test net output #1: loss = 0.76815 (* 1 = 0.76815 loss)\nI0820 07:16:00.410712 21769 solver.cpp:228] Iteration 41000, loss = 0.10306\nI0820 07:16:00.410758 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:16:00.410778 21769 solver.cpp:244]     Train net output #1: loss = 0.10306 (* 1 = 0.10306 loss)\nI0820 07:16:00.501571 21769 sgd_solver.cpp:166] Iteration 41000, lr = 1.025\nI0820 07:19:38.765460 21769 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0820 07:21:50.067090 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7526\nI0820 07:21:50.067489 21769 solver.cpp:404]     Test net output #1: loss = 1.11925 (* 1 = 1.11925 loss)\nI0820 07:21:52.190716 21769 solver.cpp:228] Iteration 41100, loss = 0.117644\nI0820 07:21:52.190763 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:21:52.190788 21769 solver.cpp:244]     Train net output #1: loss = 0.117643 (* 1 = 0.117643 loss)\nI0820 07:21:52.287832 21769 sgd_solver.cpp:166] Iteration 41100, lr = 1.0275\nI0820 07:25:30.642863 21769 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0820 07:27:41.916054 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7984\nI0820 07:27:41.916452 21769 solver.cpp:404]     Test net output #1: loss = 0.751162 (* 1 = 0.751162 loss)\nI0820 07:27:44.039127 21769 solver.cpp:228] Iteration 41200, loss = 0.126704\nI0820 07:27:44.039176 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 07:27:44.039201 21769 solver.cpp:244]     Train net output #1: loss = 0.126704 (* 1 = 0.126704 loss)\nI0820 07:27:44.140043 21769 sgd_solver.cpp:166] Iteration 41200, lr = 1.03\nI0820 07:31:22.376886 21769 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0820 07:33:33.640558 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8731\nI0820 07:33:33.640954 21769 solver.cpp:404]     Test net output #1: loss = 0.454057 (* 1 = 0.454057 loss)\nI0820 07:33:35.763159 21769 solver.cpp:228] Iteration 41300, loss = 0.108079\nI0820 07:33:35.763204 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 07:33:35.763221 21769 solver.cpp:244]     Train net output #1: loss = 0.108079 (* 1 = 0.108079 loss)\nI0820 07:33:35.861395 21769 sgd_solver.cpp:166] Iteration 41300, lr = 1.0325\nI0820 07:37:14.243187 21769 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0820 07:39:25.490943 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7695\nI0820 07:39:25.491328 21769 solver.cpp:404]     Test net output #1: loss = 0.936034 (* 1 = 0.936034 loss)\nI0820 07:39:27.613287 21769 solver.cpp:228] Iteration 41400, loss = 0.184824\nI0820 07:39:27.613332 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 07:39:27.613348 21769 solver.cpp:244]     Train net output #1: loss = 0.184824 (* 1 = 0.184824 loss)\nI0820 07:39:27.706750 21769 sgd_solver.cpp:166] Iteration 41400, lr = 1.035\nI0820 07:43:05.887703 21769 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0820 07:45:17.120306 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7653\nI0820 07:45:17.120692 21769 solver.cpp:404]     Test net output #1: loss = 1.03514 (* 1 = 1.03514 loss)\nI0820 07:45:19.243446 21769 solver.cpp:228] Iteration 41500, loss = 0.134344\nI0820 07:45:19.243490 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:45:19.243506 21769 solver.cpp:244]     Train net output #1: loss = 0.134343 (* 1 = 0.134343 loss)\nI0820 07:45:19.337203 21769 sgd_solver.cpp:166] Iteration 41500, lr = 1.0375\nI0820 07:48:57.409865 21769 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0820 07:51:08.643210 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8289\nI0820 07:51:08.643601 21769 solver.cpp:404]     Test net output #1: loss = 0.613535 (* 1 = 0.613535 loss)\nI0820 07:51:10.765805 21769 solver.cpp:228] Iteration 41600, loss = 0.11942\nI0820 07:51:10.765852 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 07:51:10.765869 21769 solver.cpp:244]     Train net output #1: loss = 0.11942 (* 1 = 0.11942 loss)\nI0820 07:51:10.868597 21769 sgd_solver.cpp:166] Iteration 41600, lr = 1.04\nI0820 07:54:48.896703 21769 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0820 07:57:00.162420 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8287\nI0820 07:57:00.162796 21769 solver.cpp:404]     Test net output #1: loss = 0.705613 (* 1 = 0.705613 loss)\nI0820 07:57:02.286550 21769 solver.cpp:228] Iteration 41700, loss = 0.113427\nI0820 07:57:02.286597 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:57:02.286613 21769 solver.cpp:244]     Train net output #1: loss = 0.113426 (* 1 = 0.113426 loss)\nI0820 07:57:02.373423 21769 sgd_solver.cpp:166] Iteration 41700, lr = 1.0425\nI0820 08:00:40.447044 21769 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0820 08:02:51.686002 21769 solver.cpp:404]     Test net output #0: accuracy = 0.816501\nI0820 08:02:51.686404 21769 solver.cpp:404]     Test net output #1: loss = 0.615724 (* 1 = 0.615724 loss)\nI0820 08:02:53.808557 21769 solver.cpp:228] Iteration 41800, loss = 0.100121\nI0820 08:02:53.808605 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 08:02:53.808620 21769 solver.cpp:244]     Train net output #1: loss = 0.10012 (* 1 = 0.10012 loss)\nI0820 08:02:53.899942 21769 sgd_solver.cpp:166] Iteration 41800, lr = 1.045\nI0820 08:06:32.101338 21769 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0820 08:08:43.363487 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8221\nI0820 08:08:43.363873 21769 solver.cpp:404]     Test net output #1: loss = 0.754359 (* 1 = 0.754359 loss)\nI0820 08:08:45.486248 21769 solver.cpp:228] Iteration 41900, loss = 0.0880743\nI0820 08:08:45.486294 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 08:08:45.486310 21769 solver.cpp:244]     Train net output #1: loss = 0.088074 (* 1 = 0.088074 loss)\nI0820 08:08:45.580075 21769 sgd_solver.cpp:166] Iteration 41900, lr = 1.0475\nI0820 08:12:23.968031 21769 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0820 08:14:35.189784 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8057\nI0820 08:14:35.190177 21769 solver.cpp:404]     Test net output #1: loss = 0.785899 (* 1 = 0.785899 loss)\nI0820 08:14:37.313107 21769 solver.cpp:228] Iteration 42000, loss = 0.0956148\nI0820 08:14:37.313153 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 08:14:37.313169 21769 solver.cpp:244]     Train net output #1: loss = 0.0956145 (* 1 = 0.0956145 loss)\nI0820 08:14:37.413684 21769 sgd_solver.cpp:166] Iteration 42000, lr = 1.05\nI0820 08:18:16.227696 21769 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0820 08:20:27.443348 21769 solver.cpp:404]     Test net output #0: accuracy = 0.817\nI0820 08:20:27.443729 21769 solver.cpp:404]     Test net output #1: loss = 0.69781 (* 1 = 0.69781 loss)\nI0820 08:20:29.566711 21769 solver.cpp:228] Iteration 42100, loss = 0.179368\nI0820 08:20:29.566756 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 08:20:29.566776 21769 solver.cpp:244]     Train net output #1: loss = 0.179368 (* 1 = 0.179368 loss)\nI0820 08:20:29.668066 21769 sgd_solver.cpp:166] Iteration 42100, lr = 1.0525\nI0820 08:24:08.401841 21769 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0820 08:26:19.630522 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8247\nI0820 08:26:19.630884 21769 solver.cpp:404]     Test net output #1: loss = 0.688212 (* 1 = 0.688212 loss)\nI0820 08:26:21.753446 21769 solver.cpp:228] Iteration 42200, loss = 0.161062\nI0820 08:26:21.753494 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 08:26:21.753510 21769 solver.cpp:244]     Train net output #1: loss = 0.161062 (* 1 = 0.161062 loss)\nI0820 08:26:21.855191 21769 sgd_solver.cpp:166] Iteration 42200, lr = 1.055\nI0820 08:30:00.656239 21769 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0820 08:32:11.893568 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7455\nI0820 08:32:11.893903 21769 solver.cpp:404]     Test net output #1: loss = 1.02689 (* 1 = 1.02689 loss)\nI0820 08:32:14.016499 21769 solver.cpp:228] Iteration 42300, loss = 0.0641882\nI0820 08:32:14.016547 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 08:32:14.016563 21769 solver.cpp:244]     Train net output #1: loss = 0.0641879 (* 1 = 0.0641879 loss)\nI0820 08:32:14.122419 21769 sgd_solver.cpp:166] Iteration 42300, lr = 1.0575\nI0820 08:35:53.027748 21769 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0820 08:38:04.232796 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8083\nI0820 08:38:04.233172 21769 solver.cpp:404]     Test net output #1: loss = 0.703359 (* 1 = 0.703359 loss)\nI0820 08:38:06.355376 21769 solver.cpp:228] Iteration 42400, loss = 0.246078\nI0820 08:38:06.355422 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0820 08:38:06.355438 21769 solver.cpp:244]     Train net output #1: loss = 0.246077 (* 1 = 0.246077 loss)\nI0820 08:38:06.458984 21769 sgd_solver.cpp:166] Iteration 42400, lr = 1.06\nI0820 08:41:45.480983 21769 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0820 08:43:56.692728 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7846\nI0820 08:43:56.693066 21769 solver.cpp:404]     Test net output #1: loss = 0.972074 (* 1 = 0.972074 loss)\nI0820 08:43:58.815491 21769 solver.cpp:228] Iteration 42500, loss = 0.150313\nI0820 08:43:58.815537 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 08:43:58.815553 21769 solver.cpp:244]     Train net output #1: loss = 0.150313 (* 1 = 0.150313 loss)\nI0820 08:43:58.912853 21769 sgd_solver.cpp:166] Iteration 42500, lr = 1.0625\nI0820 08:47:37.740496 21769 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0820 08:49:49.003260 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7459\nI0820 08:49:49.003633 21769 solver.cpp:404]     Test net output #1: loss = 1.12097 (* 1 = 1.12097 loss)\nI0820 08:49:51.126942 21769 solver.cpp:228] Iteration 42600, loss = 0.0662099\nI0820 08:49:51.126989 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 08:49:51.127007 21769 solver.cpp:244]     Train net output #1: loss = 0.0662097 (* 1 = 0.0662097 loss)\nI0820 08:49:51.226953 21769 sgd_solver.cpp:166] Iteration 42600, lr = 1.065\nI0820 08:53:29.864822 21769 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0820 08:55:41.125880 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8373\nI0820 08:55:41.126252 21769 solver.cpp:404]     Test net output #1: loss = 0.613064 (* 1 = 0.613064 loss)\nI0820 08:55:43.249733 21769 solver.cpp:228] Iteration 42700, loss = 0.0724284\nI0820 08:55:43.249779 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 08:55:43.249800 21769 solver.cpp:244]     Train net output #1: loss = 0.0724282 (* 1 = 0.0724282 loss)\nI0820 08:55:43.346321 21769 sgd_solver.cpp:166] Iteration 42700, lr = 1.0675\nI0820 08:59:22.147033 21769 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0820 09:01:33.413350 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8191\nI0820 09:01:33.413705 21769 solver.cpp:404]     Test net output #1: loss = 0.637177 (* 1 = 0.637177 loss)\nI0820 09:01:35.535931 21769 solver.cpp:228] Iteration 42800, loss = 0.0936783\nI0820 09:01:35.535979 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 09:01:35.535995 21769 solver.cpp:244]     Train net output #1: loss = 0.093678 (* 1 = 0.093678 loss)\nI0820 09:01:35.631892 21769 sgd_solver.cpp:166] Iteration 42800, lr = 1.07\nI0820 09:05:14.491423 21769 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0820 09:07:25.769981 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8092\nI0820 09:07:25.770361 21769 solver.cpp:404]     Test net output #1: loss = 0.669412 (* 1 = 0.669412 loss)\nI0820 09:07:27.893517 21769 solver.cpp:228] Iteration 42900, loss = 0.199214\nI0820 09:07:27.893563 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 09:07:27.893579 21769 solver.cpp:244]     Train net output #1: loss = 0.199214 (* 1 = 0.199214 loss)\nI0820 09:07:27.988674 21769 sgd_solver.cpp:166] Iteration 42900, lr = 1.0725\nI0820 09:11:06.900846 21769 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0820 09:13:18.163095 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6601\nI0820 09:13:18.163470 21769 solver.cpp:404]     Test net output #1: loss = 1.66009 (* 1 = 1.66009 loss)\nI0820 09:13:20.290285 21769 solver.cpp:228] Iteration 43000, loss = 0.215901\nI0820 09:13:20.290334 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 09:13:20.290351 21769 solver.cpp:244]     Train net output #1: loss = 0.2159 (* 1 = 0.2159 loss)\nI0820 09:13:20.384719 21769 sgd_solver.cpp:166] Iteration 43000, lr = 1.075\nI0820 09:16:59.220921 21769 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0820 09:19:10.489850 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8088\nI0820 09:19:10.490170 21769 solver.cpp:404]     Test net output #1: loss = 0.748233 (* 1 = 0.748233 loss)\nI0820 09:19:12.613387 21769 solver.cpp:228] Iteration 43100, loss = 0.171485\nI0820 09:19:12.613437 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 09:19:12.613463 21769 solver.cpp:244]     Train net output #1: loss = 0.171485 (* 1 = 0.171485 loss)\nI0820 09:19:12.708484 21769 sgd_solver.cpp:166] Iteration 43100, lr = 1.0775\nI0820 09:22:51.536720 21769 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0820 09:25:02.816087 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8186\nI0820 09:25:02.816468 21769 solver.cpp:404]     Test net output #1: loss = 0.773049 (* 1 = 0.773049 loss)\nI0820 09:25:04.939095 21769 solver.cpp:228] Iteration 43200, loss = 0.099486\nI0820 09:25:04.939146 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 09:25:04.939170 21769 solver.cpp:244]     Train net output #1: loss = 0.0994856 (* 1 = 0.0994856 loss)\nI0820 09:25:05.044616 21769 sgd_solver.cpp:166] Iteration 43200, lr = 1.08\nI0820 09:28:43.909927 21769 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0820 09:30:55.191575 21769 solver.cpp:404]     Test net output #0: accuracy = 0.749\nI0820 09:30:55.191936 21769 solver.cpp:404]     Test net output #1: loss = 1.08543 (* 1 = 1.08543 loss)\nI0820 09:30:57.315207 21769 solver.cpp:228] Iteration 43300, loss = 0.168987\nI0820 09:30:57.315258 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 09:30:57.315284 21769 solver.cpp:244]     Train net output #1: loss = 0.168987 (* 1 = 0.168987 loss)\nI0820 09:30:57.413405 21769 sgd_solver.cpp:166] Iteration 43300, lr = 1.0825\nI0820 09:34:36.099453 21769 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0820 09:36:47.377909 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7919\nI0820 09:36:47.378265 21769 solver.cpp:404]     Test net output #1: loss = 0.902013 (* 1 = 0.902013 loss)\nI0820 09:36:49.502779 21769 solver.cpp:228] Iteration 43400, loss = 0.0641265\nI0820 09:36:49.502830 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 09:36:49.502854 21769 solver.cpp:244]     Train net output #1: loss = 0.0641262 (* 1 = 0.0641262 loss)\nI0820 09:36:49.602819 21769 sgd_solver.cpp:166] Iteration 43400, lr = 1.085\nI0820 09:40:28.453747 21769 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0820 09:42:39.745462 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7741\nI0820 09:42:39.745844 21769 solver.cpp:404]     Test net output #1: loss = 0.850349 (* 1 = 0.850349 loss)\nI0820 09:42:41.869375 21769 solver.cpp:228] Iteration 43500, loss = 0.161625\nI0820 09:42:41.869426 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 09:42:41.869449 21769 solver.cpp:244]     Train net output #1: loss = 0.161625 (* 1 = 0.161625 loss)\nI0820 09:42:41.961760 21769 sgd_solver.cpp:166] Iteration 43500, lr = 1.0875\nI0820 09:46:20.792469 21769 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0820 09:48:32.080755 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7585\nI0820 09:48:32.081106 21769 solver.cpp:404]     Test net output #1: loss = 1.07777 (* 1 = 1.07777 loss)\nI0820 09:48:34.203881 21769 solver.cpp:228] Iteration 43600, loss = 0.117193\nI0820 09:48:34.203930 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 09:48:34.203955 21769 solver.cpp:244]     Train net output #1: loss = 0.117192 (* 1 = 0.117192 loss)\nI0820 09:48:34.303622 21769 sgd_solver.cpp:166] Iteration 43600, lr = 1.09\nI0820 09:52:13.277494 21769 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0820 09:54:24.559515 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8321\nI0820 09:54:24.559900 21769 solver.cpp:404]     Test net output #1: loss = 0.647037 (* 1 = 0.647037 loss)\nI0820 09:54:26.683907 21769 solver.cpp:228] Iteration 43700, loss = 0.0944383\nI0820 09:54:26.683959 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 09:54:26.683984 21769 solver.cpp:244]     Train net output #1: loss = 0.094438 (* 1 = 0.094438 loss)\nI0820 09:54:26.783859 21769 sgd_solver.cpp:166] Iteration 43700, lr = 1.0925\nI0820 09:58:05.566598 21769 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0820 10:00:16.850996 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8142\nI0820 10:00:16.851383 21769 solver.cpp:404]     Test net output #1: loss = 0.701431 (* 1 = 0.701431 loss)\nI0820 10:00:18.974815 21769 solver.cpp:228] Iteration 43800, loss = 0.040777\nI0820 10:00:18.974866 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 10:00:18.974891 21769 solver.cpp:244]     Train net output #1: loss = 0.0407767 (* 1 = 0.0407767 loss)\nI0820 10:00:19.072293 21769 sgd_solver.cpp:166] Iteration 43800, lr = 1.095\nI0820 10:03:57.945849 21769 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0820 10:06:09.239425 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7685\nI0820 10:06:09.239804 21769 solver.cpp:404]     Test net output #1: loss = 0.95786 (* 1 = 0.95786 loss)\nI0820 10:06:11.363436 21769 solver.cpp:228] Iteration 43900, loss = 0.190649\nI0820 10:06:11.363487 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 10:06:11.363509 21769 solver.cpp:244]     Train net output #1: loss = 0.190649 (* 1 = 0.190649 loss)\nI0820 10:06:11.459377 21769 sgd_solver.cpp:166] Iteration 43900, lr = 1.0975\nI0820 10:09:50.248090 21769 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0820 10:12:01.525799 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7369\nI0820 10:12:01.526469 21769 solver.cpp:404]     Test net output #1: loss = 1.13675 (* 1 = 1.13675 loss)\nI0820 10:12:03.655310 21769 solver.cpp:228] Iteration 44000, loss = 0.0797126\nI0820 10:12:03.655359 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 10:12:03.655385 21769 solver.cpp:244]     Train net output #1: loss = 0.0797123 (* 1 = 0.0797123 loss)\nI0820 10:12:03.749331 21769 sgd_solver.cpp:166] Iteration 44000, lr = 1.1\nI0820 10:15:42.679973 21769 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0820 10:17:53.949128 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8308\nI0820 10:17:53.949453 21769 solver.cpp:404]     Test net output #1: loss = 0.595537 (* 1 = 0.595537 loss)\nI0820 10:17:56.072957 21769 solver.cpp:228] Iteration 44100, loss = 0.0862688\nI0820 10:17:56.072995 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 10:17:56.073019 21769 solver.cpp:244]     Train net output #1: loss = 0.0862685 (* 1 = 0.0862685 loss)\nI0820 10:17:56.168073 21769 sgd_solver.cpp:166] Iteration 44100, lr = 1.1025\nI0820 10:21:34.518999 21769 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0820 10:23:45.795223 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8048\nI0820 10:23:45.795631 21769 solver.cpp:404]     Test net output #1: loss = 0.716586 (* 1 = 0.716586 loss)\nI0820 10:23:47.920137 21769 solver.cpp:228] Iteration 44200, loss = 0.127364\nI0820 10:23:47.920177 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 10:23:47.920202 21769 solver.cpp:244]     Train net output #1: loss = 0.127364 (* 1 = 0.127364 loss)\nI0820 10:23:48.013209 21769 sgd_solver.cpp:166] Iteration 44200, lr = 1.105\nI0820 10:27:26.232856 21769 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0820 10:29:37.492440 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8244\nI0820 10:29:37.492823 21769 solver.cpp:404]     Test net output #1: loss = 0.662512 (* 1 = 0.662512 loss)\nI0820 10:29:39.616474 21769 solver.cpp:228] Iteration 44300, loss = 0.124296\nI0820 10:29:39.616514 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 10:29:39.616538 21769 solver.cpp:244]     Train net output #1: loss = 0.124295 (* 1 = 0.124295 loss)\nI0820 10:29:39.714376 21769 sgd_solver.cpp:166] Iteration 44300, lr = 1.1075\nI0820 10:33:17.987831 21769 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0820 10:35:29.224051 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7248\nI0820 10:35:29.224423 21769 solver.cpp:404]     Test net output #1: loss = 1.4026 (* 1 = 1.4026 loss)\nI0820 10:35:31.347964 21769 solver.cpp:228] Iteration 44400, loss = 0.194202\nI0820 10:35:31.348003 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 10:35:31.348026 21769 solver.cpp:244]     Train net output #1: loss = 0.194201 (* 1 = 0.194201 loss)\nI0820 10:35:31.441349 21769 sgd_solver.cpp:166] Iteration 44400, lr = 1.11\nI0820 10:39:09.696750 21769 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0820 10:41:20.937022 21769 solver.cpp:404]     Test net output #0: accuracy = 0.786\nI0820 10:41:20.937391 21769 solver.cpp:404]     Test net output #1: loss = 0.969044 (* 1 = 0.969044 loss)\nI0820 10:41:23.060616 21769 solver.cpp:228] Iteration 44500, loss = 0.185949\nI0820 10:41:23.060664 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 10:41:23.060681 21769 solver.cpp:244]     Train net output #1: loss = 0.185949 (* 1 = 0.185949 loss)\nI0820 10:41:23.159204 21769 sgd_solver.cpp:166] Iteration 44500, lr = 1.1125\nI0820 10:45:01.383244 21769 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0820 10:47:12.588176 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7885\nI0820 10:47:12.588547 21769 solver.cpp:404]     Test net output #1: loss = 0.861744 (* 1 = 0.861744 loss)\nI0820 10:47:14.711103 21769 solver.cpp:228] Iteration 44600, loss = 0.243816\nI0820 10:47:14.711150 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0820 10:47:14.711165 21769 solver.cpp:244]     Train net output #1: loss = 0.243815 (* 1 = 0.243815 loss)\nI0820 10:47:14.797972 21769 sgd_solver.cpp:166] Iteration 44600, lr = 1.115\nI0820 10:50:52.497908 21769 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0820 10:53:03.673087 21769 solver.cpp:404]     Test net output #0: accuracy = 0.818\nI0820 10:53:03.673461 21769 solver.cpp:404]     Test net output #1: loss = 0.670664 (* 1 = 0.670664 loss)\nI0820 10:53:05.796226 21769 solver.cpp:228] Iteration 44700, loss = 0.134178\nI0820 10:53:05.796272 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 10:53:05.796288 21769 solver.cpp:244]     Train net output #1: loss = 0.134178 (* 1 = 0.134178 loss)\nI0820 10:53:05.885277 21769 sgd_solver.cpp:166] Iteration 44700, lr = 1.1175\nI0820 10:56:43.537609 21769 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0820 10:58:54.741678 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7029\nI0820 10:58:54.742056 21769 solver.cpp:404]     Test net output #1: loss = 1.34402 (* 1 = 1.34402 loss)\nI0820 10:58:56.864363 21769 solver.cpp:228] Iteration 44800, loss = 0.0946486\nI0820 10:58:56.864403 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 10:58:56.864426 21769 solver.cpp:244]     Train net output #1: loss = 0.0946483 (* 1 = 0.0946483 loss)\nI0820 10:58:56.955607 21769 sgd_solver.cpp:166] Iteration 44800, lr = 1.12\nI0820 11:02:34.559980 21769 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0820 11:04:45.861609 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8175\nI0820 11:04:45.861960 21769 solver.cpp:404]     Test net output #1: loss = 0.715711 (* 1 = 0.715711 loss)\nI0820 11:04:47.985656 21769 solver.cpp:228] Iteration 44900, loss = 0.194631\nI0820 11:04:47.985699 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 11:04:47.985724 21769 solver.cpp:244]     Train net output #1: loss = 0.19463 (* 1 = 0.19463 loss)\nI0820 11:04:48.070715 21769 sgd_solver.cpp:166] Iteration 44900, lr = 1.1225\nI0820 11:08:25.690179 21769 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0820 11:10:36.968978 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8101\nI0820 11:10:36.969347 21769 solver.cpp:404]     Test net output #1: loss = 0.697728 (* 1 = 0.697728 loss)\nI0820 11:10:39.092255 21769 solver.cpp:228] Iteration 45000, loss = 0.115461\nI0820 11:10:39.092305 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 11:10:39.092330 21769 solver.cpp:244]     Train net output #1: loss = 0.11546 (* 1 = 0.11546 loss)\nI0820 11:10:39.182256 21769 sgd_solver.cpp:166] Iteration 45000, lr = 1.125\nI0820 11:14:16.829691 21769 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0820 11:16:28.114523 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8521\nI0820 11:16:28.114867 21769 solver.cpp:404]     Test net output #1: loss = 0.549938 (* 1 = 0.549938 loss)\nI0820 11:16:30.237968 21769 solver.cpp:228] Iteration 45100, loss = 0.174917\nI0820 11:16:30.238009 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 11:16:30.238031 21769 solver.cpp:244]     Train net output #1: loss = 0.174916 (* 1 = 0.174916 loss)\nI0820 11:16:30.322203 21769 sgd_solver.cpp:166] Iteration 45100, lr = 1.1275\nI0820 11:20:08.024816 21769 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0820 11:22:19.321404 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8102\nI0820 11:22:19.321792 21769 solver.cpp:404]     Test net output #1: loss = 0.750035 (* 1 = 0.750035 loss)\nI0820 11:22:21.445003 21769 solver.cpp:228] Iteration 45200, loss = 0.126329\nI0820 11:22:21.445044 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 11:22:21.445067 21769 solver.cpp:244]     Train net output #1: loss = 0.126329 (* 1 = 0.126329 loss)\nI0820 11:22:21.530179 21769 sgd_solver.cpp:166] Iteration 45200, lr = 1.13\nI0820 11:25:59.401084 21769 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0820 11:28:10.688868 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7828\nI0820 11:28:10.689239 21769 solver.cpp:404]     Test net output #1: loss = 0.941325 (* 1 = 0.941325 loss)\nI0820 11:28:12.812997 21769 solver.cpp:228] Iteration 45300, loss = 0.21065\nI0820 11:28:12.813038 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 11:28:12.813061 21769 solver.cpp:244]     Train net output #1: loss = 0.21065 (* 1 = 0.21065 loss)\nI0820 11:28:12.894661 21769 sgd_solver.cpp:166] Iteration 45300, lr = 1.1325\nI0820 11:31:50.540817 21769 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0820 11:34:01.810597 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7849\nI0820 11:34:01.810994 21769 solver.cpp:404]     Test net output #1: loss = 0.837249 (* 1 = 0.837249 loss)\nI0820 11:34:03.934770 21769 solver.cpp:228] Iteration 45400, loss = 0.161066\nI0820 11:34:03.934810 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 11:34:03.934834 21769 solver.cpp:244]     Train net output #1: loss = 0.161066 (* 1 = 0.161066 loss)\nI0820 11:34:04.022445 21769 sgd_solver.cpp:166] Iteration 45400, lr = 1.135\nI0820 11:37:41.567128 21769 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0820 11:39:52.843401 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7616\nI0820 11:39:52.843789 21769 solver.cpp:404]     Test net output #1: loss = 0.957556 (* 1 = 0.957556 loss)\nI0820 11:39:54.967432 21769 solver.cpp:228] Iteration 45500, loss = 0.126435\nI0820 11:39:54.967483 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 11:39:54.967509 21769 solver.cpp:244]     Train net output #1: loss = 0.126435 (* 1 = 0.126435 loss)\nI0820 11:39:55.056500 21769 sgd_solver.cpp:166] Iteration 45500, lr = 1.1375\nI0820 11:43:32.733799 21769 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0820 11:45:44.022222 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7915\nI0820 11:45:44.022598 21769 solver.cpp:404]     Test net output #1: loss = 0.895275 (* 1 = 0.895275 loss)\nI0820 11:45:46.145308 21769 solver.cpp:228] Iteration 45600, loss = 0.0923431\nI0820 11:45:46.145359 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:45:46.145382 21769 solver.cpp:244]     Train net output #1: loss = 0.0923428 (* 1 = 0.0923428 loss)\nI0820 11:45:46.245139 21769 sgd_solver.cpp:166] Iteration 45600, lr = 1.14\nI0820 11:49:23.963340 21769 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0820 11:51:35.248579 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7482\nI0820 11:51:35.248980 21769 solver.cpp:404]     Test net output #1: loss = 1.05455 (* 1 = 1.05455 loss)\nI0820 11:51:37.371845 21769 solver.cpp:228] Iteration 45700, loss = 0.111661\nI0820 11:51:37.371894 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:51:37.371918 21769 solver.cpp:244]     Train net output #1: loss = 0.11166 (* 1 = 0.11166 loss)\nI0820 11:51:37.471194 21769 sgd_solver.cpp:166] Iteration 45700, lr = 1.1425\nI0820 11:55:15.313750 21769 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0820 11:57:26.595302 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8479\nI0820 11:57:26.595665 21769 solver.cpp:404]     Test net output #1: loss = 0.510517 (* 1 = 0.510517 loss)\nI0820 11:57:28.719694 21769 solver.cpp:228] Iteration 45800, loss = 0.0684639\nI0820 11:57:28.719732 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 11:57:28.719755 21769 solver.cpp:244]     Train net output #1: loss = 0.0684636 (* 1 = 0.0684636 loss)\nI0820 11:57:28.809051 21769 sgd_solver.cpp:166] Iteration 45800, lr = 1.145\nI0820 12:01:06.474743 21769 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0820 12:03:17.745543 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7893\nI0820 12:03:17.745916 21769 solver.cpp:404]     Test net output #1: loss = 0.859645 (* 1 = 0.859645 loss)\nI0820 12:03:19.870620 21769 solver.cpp:228] Iteration 45900, loss = 0.205268\nI0820 12:03:19.870659 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0820 12:03:19.870676 21769 solver.cpp:244]     Train net output #1: loss = 0.205268 (* 1 = 0.205268 loss)\nI0820 12:03:19.951962 21769 sgd_solver.cpp:166] Iteration 45900, lr = 1.1475\nI0820 12:06:57.634368 21769 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0820 12:09:08.906816 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8068\nI0820 12:09:08.907191 21769 solver.cpp:404]     Test net output #1: loss = 0.711275 (* 1 = 0.711275 loss)\nI0820 12:09:11.030048 21769 solver.cpp:228] Iteration 46000, loss = 0.175378\nI0820 12:09:11.030099 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 12:09:11.030125 21769 solver.cpp:244]     Train net output #1: loss = 0.175377 (* 1 = 0.175377 loss)\nI0820 12:09:11.116317 21769 sgd_solver.cpp:166] Iteration 46000, lr = 1.15\nI0820 12:12:48.753187 21769 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0820 12:15:00.054152 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8124\nI0820 12:15:00.054541 21769 solver.cpp:404]     Test net output #1: loss = 0.633066 (* 1 = 0.633066 loss)\nI0820 12:15:02.177796 21769 solver.cpp:228] Iteration 46100, loss = 0.0503009\nI0820 12:15:02.177837 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 12:15:02.177860 21769 solver.cpp:244]     Train net output #1: loss = 0.0503007 (* 1 = 0.0503007 loss)\nI0820 12:15:02.267943 21769 sgd_solver.cpp:166] Iteration 46100, lr = 1.1525\nI0820 12:18:39.911981 21769 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0820 12:20:51.207115 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8027\nI0820 12:20:51.207500 21769 solver.cpp:404]     Test net output #1: loss = 0.776917 (* 1 = 0.776917 loss)\nI0820 12:20:53.330363 21769 solver.cpp:228] Iteration 46200, loss = 0.132033\nI0820 12:20:53.330412 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 12:20:53.330436 21769 solver.cpp:244]     Train net output #1: loss = 0.132033 (* 1 = 0.132033 loss)\nI0820 12:20:53.422755 21769 sgd_solver.cpp:166] Iteration 46200, lr = 1.155\nI0820 12:24:31.117318 21769 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0820 12:26:42.385730 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8118\nI0820 12:26:42.386118 21769 solver.cpp:404]     Test net output #1: loss = 0.698651 (* 1 = 0.698651 loss)\nI0820 12:26:44.509505 21769 solver.cpp:228] Iteration 46300, loss = 0.0455645\nI0820 12:26:44.509539 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 12:26:44.509554 21769 solver.cpp:244]     Train net output #1: loss = 0.0455642 (* 1 = 0.0455642 loss)\nI0820 12:26:44.597617 21769 sgd_solver.cpp:166] Iteration 46300, lr = 1.1575\nI0820 12:30:22.248400 21769 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0820 12:32:33.526870 21769 solver.cpp:404]     Test net output #0: accuracy = 0.857\nI0820 12:32:33.527259 21769 solver.cpp:404]     Test net output #1: loss = 0.460737 (* 1 = 0.460737 loss)\nI0820 12:32:35.650213 21769 solver.cpp:228] Iteration 46400, loss = 0.202674\nI0820 12:32:35.650257 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 12:32:35.650274 21769 solver.cpp:244]     Train net output #1: loss = 0.202674 (* 1 = 0.202674 loss)\nI0820 12:32:35.741693 21769 sgd_solver.cpp:166] Iteration 46400, lr = 1.16\nI0820 12:36:13.434463 21769 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0820 12:38:24.727149 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7727\nI0820 12:38:24.727531 21769 solver.cpp:404]     Test net output #1: loss = 0.861563 (* 1 = 0.861563 loss)\nI0820 12:38:26.850716 21769 solver.cpp:228] Iteration 46500, loss = 0.120945\nI0820 12:38:26.850762 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 12:38:26.850780 21769 solver.cpp:244]     Train net output #1: loss = 0.120945 (* 1 = 0.120945 loss)\nI0820 12:38:26.941301 21769 sgd_solver.cpp:166] Iteration 46500, lr = 1.1625\nI0820 12:42:04.642491 21769 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0820 12:44:15.940258 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8407\nI0820 12:44:15.940641 21769 solver.cpp:404]     Test net output #1: loss = 0.564994 (* 1 = 0.564994 loss)\nI0820 12:44:18.063586 21769 solver.cpp:228] Iteration 46600, loss = 0.177385\nI0820 12:44:18.063632 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 12:44:18.063647 21769 solver.cpp:244]     Train net output #1: loss = 0.177385 (* 1 = 0.177385 loss)\nI0820 12:44:18.146281 21769 sgd_solver.cpp:166] Iteration 46600, lr = 1.165\nI0820 12:47:55.799022 21769 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0820 12:50:07.029898 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7781\nI0820 12:50:07.030262 21769 solver.cpp:404]     Test net output #1: loss = 0.869252 (* 1 = 0.869252 loss)\nI0820 12:50:09.153988 21769 solver.cpp:228] Iteration 46700, loss = 0.0682319\nI0820 12:50:09.154024 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 12:50:09.154039 21769 solver.cpp:244]     Train net output #1: loss = 0.0682315 (* 1 = 0.0682315 loss)\nI0820 12:50:09.242741 21769 sgd_solver.cpp:166] Iteration 46700, lr = 1.1675\nI0820 12:53:46.925813 21769 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0820 12:55:58.154975 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8039\nI0820 12:55:58.155340 21769 solver.cpp:404]     Test net output #1: loss = 0.634172 (* 1 = 0.634172 loss)\nI0820 12:56:00.278921 21769 solver.cpp:228] Iteration 46800, loss = 0.033581\nI0820 12:56:00.278959 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0820 12:56:00.278975 21769 solver.cpp:244]     Train net output #1: loss = 0.0335806 (* 1 = 0.0335806 loss)\nI0820 12:56:00.373594 21769 sgd_solver.cpp:166] Iteration 46800, lr = 1.17\nI0820 12:59:38.109490 21769 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0820 13:01:49.339401 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8349\nI0820 13:01:49.339797 21769 solver.cpp:404]     Test net output #1: loss = 0.619264 (* 1 = 0.619264 loss)\nI0820 13:01:51.463258 21769 solver.cpp:228] Iteration 46900, loss = 0.24079\nI0820 13:01:51.463306 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 13:01:51.463322 21769 solver.cpp:244]     Train net output #1: loss = 0.240789 (* 1 = 0.240789 loss)\nI0820 13:01:51.552222 21769 sgd_solver.cpp:166] Iteration 46900, lr = 1.1725\nI0820 13:05:29.238380 21769 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0820 13:07:40.473984 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8032\nI0820 13:07:40.474354 21769 solver.cpp:404]     Test net output #1: loss = 0.796758 (* 1 = 0.796758 loss)\nI0820 13:07:42.597736 21769 solver.cpp:228] Iteration 47000, loss = 0.149902\nI0820 13:07:42.597774 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 13:07:42.597792 21769 solver.cpp:244]     Train net output #1: loss = 0.149902 (* 1 = 0.149902 loss)\nI0820 13:07:42.679584 21769 sgd_solver.cpp:166] Iteration 47000, lr = 1.175\nI0820 13:11:20.394798 21769 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0820 13:13:31.743724 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8275\nI0820 13:13:31.744138 21769 solver.cpp:404]     Test net output #1: loss = 0.55899 (* 1 = 0.55899 loss)\nI0820 13:13:33.867729 21769 solver.cpp:228] Iteration 47100, loss = 0.268576\nI0820 13:13:33.867766 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0820 13:13:33.867782 21769 solver.cpp:244]     Train net output #1: loss = 0.268575 (* 1 = 0.268575 loss)\nI0820 13:13:33.961912 21769 sgd_solver.cpp:166] Iteration 47100, lr = 1.1775\nI0820 13:17:11.660137 21769 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0820 13:19:22.899349 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8083\nI0820 13:19:22.899744 21769 solver.cpp:404]     Test net output #1: loss = 0.914779 (* 1 = 0.914779 loss)\nI0820 13:19:25.023013 21769 solver.cpp:228] Iteration 47200, loss = 0.11755\nI0820 13:19:25.023051 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 13:19:25.023066 21769 solver.cpp:244]     Train net output #1: loss = 0.11755 (* 1 = 0.11755 loss)\nI0820 13:19:25.105329 21769 sgd_solver.cpp:166] Iteration 47200, lr = 1.18\nI0820 13:23:02.758605 21769 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0820 13:25:14.087606 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8308\nI0820 13:25:14.087980 21769 solver.cpp:404]     Test net output #1: loss = 0.607116 (* 1 = 0.607116 loss)\nI0820 13:25:16.212193 21769 solver.cpp:228] Iteration 47300, loss = 0.0876013\nI0820 13:25:16.212235 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 13:25:16.212258 21769 solver.cpp:244]     Train net output #1: loss = 0.087601 (* 1 = 0.087601 loss)\nI0820 13:25:16.292320 21769 sgd_solver.cpp:166] Iteration 47300, lr = 1.1825\nI0820 13:28:53.887827 21769 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0820 13:31:05.197680 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7529\nI0820 13:31:05.198082 21769 solver.cpp:404]     Test net output #1: loss = 1.03781 (* 1 = 1.03781 loss)\nI0820 13:31:07.321604 21769 solver.cpp:228] Iteration 47400, loss = 0.110161\nI0820 13:31:07.321645 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 13:31:07.321666 21769 solver.cpp:244]     Train net output #1: loss = 0.110161 (* 1 = 0.110161 loss)\nI0820 13:31:07.411799 21769 sgd_solver.cpp:166] Iteration 47400, lr = 1.185\nI0820 13:34:45.091629 21769 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0820 13:36:56.401365 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7844\nI0820 13:36:56.401772 21769 solver.cpp:404]     Test net output #1: loss = 0.746129 (* 1 = 0.746129 loss)\nI0820 13:36:58.525686 21769 solver.cpp:228] Iteration 47500, loss = 0.224576\nI0820 13:36:58.525725 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0820 13:36:58.525749 21769 solver.cpp:244]     Train net output #1: loss = 0.224575 (* 1 = 0.224575 loss)\nI0820 13:36:58.615135 21769 sgd_solver.cpp:166] Iteration 47500, lr = 1.1875\nI0820 13:40:36.261232 21769 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0820 13:42:47.548632 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7873\nI0820 13:42:47.549053 21769 solver.cpp:404]     Test net output #1: loss = 0.9248 (* 1 = 0.9248 loss)\nI0820 13:42:49.672431 21769 solver.cpp:228] Iteration 47600, loss = 0.238235\nI0820 13:42:49.672480 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0820 13:42:49.672497 21769 solver.cpp:244]     Train net output #1: loss = 0.238234 (* 1 = 0.238234 loss)\nI0820 13:42:49.762310 21769 sgd_solver.cpp:166] Iteration 47600, lr = 1.19\nI0820 13:46:27.400300 21769 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0820 13:48:38.680141 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8057\nI0820 13:48:38.680541 21769 solver.cpp:404]     Test net output #1: loss = 0.772998 (* 1 = 0.772998 loss)\nI0820 13:48:40.803448 21769 solver.cpp:228] Iteration 47700, loss = 0.108315\nI0820 13:48:40.803485 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 13:48:40.803501 21769 solver.cpp:244]     Train net output #1: loss = 0.108315 (* 1 = 0.108315 loss)\nI0820 13:48:40.892860 21769 sgd_solver.cpp:166] Iteration 47700, lr = 1.1925\nI0820 13:52:18.634618 21769 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0820 13:54:29.894075 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7527\nI0820 13:54:29.894474 21769 solver.cpp:404]     Test net output #1: loss = 0.919284 (* 1 = 0.919284 loss)\nI0820 13:54:32.017815 21769 solver.cpp:228] Iteration 47800, loss = 0.148783\nI0820 13:54:32.017853 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 13:54:32.017869 21769 solver.cpp:244]     Train net output #1: loss = 0.148783 (* 1 = 0.148783 loss)\nI0820 13:54:32.104517 21769 sgd_solver.cpp:166] Iteration 47800, lr = 1.195\nI0820 13:58:09.651633 21769 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0820 14:00:20.917358 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8193\nI0820 14:00:20.917737 21769 solver.cpp:404]     Test net output #1: loss = 0.642417 (* 1 = 0.642417 loss)\nI0820 14:00:23.040747 21769 solver.cpp:228] Iteration 47900, loss = 0.199679\nI0820 14:00:23.040786 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 14:00:23.040799 21769 solver.cpp:244]     Train net output #1: loss = 0.199679 (* 1 = 0.199679 loss)\nI0820 14:00:23.129753 21769 sgd_solver.cpp:166] Iteration 47900, lr = 1.1975\nI0820 14:04:00.662559 21769 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0820 14:06:11.933735 21769 solver.cpp:404]     Test net output #0: accuracy = 0.769\nI0820 14:06:11.934123 21769 solver.cpp:404]     Test net output #1: loss = 0.826315 (* 1 = 0.826315 loss)\nI0820 14:06:14.057412 21769 solver.cpp:228] Iteration 48000, loss = 0.139672\nI0820 14:06:14.057451 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0820 14:06:14.057466 21769 solver.cpp:244]     Train net output #1: loss = 0.139671 (* 1 = 0.139671 loss)\nI0820 14:06:14.147452 21769 sgd_solver.cpp:166] Iteration 48000, lr = 1.2\nI0820 14:09:51.717699 21769 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0820 14:12:02.999990 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7876\nI0820 14:12:03.000365 21769 solver.cpp:404]     Test net output #1: loss = 0.810442 (* 1 = 0.810442 loss)\nI0820 14:12:05.124125 21769 solver.cpp:228] Iteration 48100, loss = 0.0794715\nI0820 14:12:05.124162 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 14:12:05.124178 21769 solver.cpp:244]     Train net output #1: loss = 0.0794712 (* 1 = 0.0794712 loss)\nI0820 14:12:05.210736 21769 sgd_solver.cpp:166] Iteration 48100, lr = 1.2025\nI0820 14:15:42.766400 21769 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0820 14:17:54.064242 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7874\nI0820 14:17:54.064636 21769 solver.cpp:404]     Test net output #1: loss = 0.801059 (* 1 = 0.801059 loss)\nI0820 14:17:56.188830 21769 solver.cpp:228] Iteration 48200, loss = 0.0897534\nI0820 14:17:56.188872 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 14:17:56.188897 21769 solver.cpp:244]     Train net output #1: loss = 0.0897531 (* 1 = 0.0897531 loss)\nI0820 14:17:56.272644 21769 sgd_solver.cpp:166] Iteration 48200, lr = 1.205\nI0820 14:21:33.938482 21769 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0820 14:23:45.219566 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6995\nI0820 14:23:45.219923 21769 solver.cpp:404]     Test net output #1: loss = 1.31097 (* 1 = 1.31097 loss)\nI0820 14:23:47.342087 21769 solver.cpp:228] Iteration 48300, loss = 0.0650635\nI0820 14:23:47.342123 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 14:23:47.342139 21769 solver.cpp:244]     Train net output #1: loss = 0.0650632 (* 1 = 0.0650632 loss)\nI0820 14:23:47.431007 21769 sgd_solver.cpp:166] Iteration 48300, lr = 1.2075\nI0820 14:27:25.047181 21769 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0820 14:29:36.322273 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7906\nI0820 14:29:36.322650 21769 solver.cpp:404]     Test net output #1: loss = 0.813062 (* 1 = 0.813062 loss)\nI0820 14:29:38.445659 21769 solver.cpp:228] Iteration 48400, loss = 0.127258\nI0820 14:29:38.445696 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 14:29:38.445713 21769 solver.cpp:244]     Train net output #1: loss = 0.127258 (* 1 = 0.127258 loss)\nI0820 14:29:38.542985 21769 sgd_solver.cpp:166] Iteration 48400, lr = 1.21\nI0820 14:33:16.273325 21769 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0820 14:35:27.531335 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7495\nI0820 14:35:27.531713 21769 solver.cpp:404]     Test net output #1: loss = 1.06897 (* 1 = 1.06897 loss)\nI0820 14:35:29.654633 21769 solver.cpp:228] Iteration 48500, loss = 0.165014\nI0820 14:35:29.654670 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 14:35:29.654685 21769 solver.cpp:244]     Train net output #1: loss = 0.165014 (* 1 = 0.165014 loss)\nI0820 14:35:29.745059 21769 sgd_solver.cpp:166] Iteration 48500, lr = 1.2125\nI0820 14:39:07.366300 21769 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0820 14:41:18.640000 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8336\nI0820 14:41:18.640326 21769 solver.cpp:404]     Test net output #1: loss = 0.582267 (* 1 = 0.582267 loss)\nI0820 14:41:20.763567 21769 solver.cpp:228] Iteration 48600, loss = 0.13789\nI0820 14:41:20.763615 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 14:41:20.763631 21769 solver.cpp:244]     Train net output #1: loss = 0.13789 (* 1 = 0.13789 loss)\nI0820 14:41:20.852387 21769 sgd_solver.cpp:166] Iteration 48600, lr = 1.215\nI0820 14:44:58.767241 21769 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0820 14:47:10.044423 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8204\nI0820 14:47:10.044805 21769 solver.cpp:404]     Test net output #1: loss = 0.594965 (* 1 = 0.594965 loss)\nI0820 14:47:12.167851 21769 solver.cpp:228] Iteration 48700, loss = 0.0797573\nI0820 14:47:12.167899 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 14:47:12.167917 21769 solver.cpp:244]     Train net output #1: loss = 0.079757 (* 1 = 0.079757 loss)\nI0820 14:47:12.252208 21769 sgd_solver.cpp:166] Iteration 48700, lr = 1.2175\nI0820 14:50:49.968384 21769 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0820 14:53:01.238755 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8094\nI0820 14:53:01.239137 21769 solver.cpp:404]     Test net output #1: loss = 0.753624 (* 1 = 0.753624 loss)\nI0820 14:53:03.361176 21769 solver.cpp:228] Iteration 48800, loss = 0.0381145\nI0820 14:53:03.361225 21769 solver.cpp:244]     Train net output #0: accuracy = 1\nI0820 14:53:03.361241 21769 solver.cpp:244]     Train net output #1: loss = 0.0381142 (* 1 = 0.0381142 loss)\nI0820 14:53:03.454758 21769 sgd_solver.cpp:166] Iteration 48800, lr = 1.22\nI0820 14:56:41.100457 21769 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0820 14:58:52.381858 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7636\nI0820 14:58:52.382222 21769 solver.cpp:404]     Test net output #1: loss = 0.898322 (* 1 = 0.898322 loss)\nI0820 14:58:54.505017 21769 solver.cpp:228] Iteration 48900, loss = 0.152185\nI0820 14:58:54.505053 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 14:58:54.505069 21769 solver.cpp:244]     Train net output #1: loss = 0.152184 (* 1 = 0.152184 loss)\nI0820 14:58:54.593560 21769 sgd_solver.cpp:166] Iteration 48900, lr = 1.2225\nI0820 15:02:32.076403 21769 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0820 15:04:43.299679 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7501\nI0820 15:04:43.300066 21769 solver.cpp:404]     Test net output #1: loss = 0.932052 (* 1 = 0.932052 loss)\nI0820 15:04:45.423260 21769 solver.cpp:228] Iteration 49000, loss = 0.154248\nI0820 15:04:45.423296 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 15:04:45.423311 21769 solver.cpp:244]     Train net output #1: loss = 0.154247 (* 1 = 0.154247 loss)\nI0820 15:04:45.509596 21769 sgd_solver.cpp:166] Iteration 49000, lr = 1.225\nI0820 15:08:23.099673 21769 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0820 15:10:34.309455 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7986\nI0820 15:10:34.309805 21769 solver.cpp:404]     Test net output #1: loss = 0.790044 (* 1 = 0.790044 loss)\nI0820 15:10:36.432521 21769 solver.cpp:228] Iteration 49100, loss = 0.137192\nI0820 15:10:36.432569 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 15:10:36.432586 21769 solver.cpp:244]     Train net output #1: loss = 0.137191 (* 1 = 0.137191 loss)\nI0820 15:10:36.527765 21769 sgd_solver.cpp:166] Iteration 49100, lr = 1.2275\nI0820 15:14:14.238469 21769 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0820 15:16:25.437858 21769 solver.cpp:404]     Test net output #0: accuracy = 0.838\nI0820 15:16:25.438238 21769 solver.cpp:404]     Test net output #1: loss = 0.578116 (* 1 = 0.578116 loss)\nI0820 15:16:27.561259 21769 solver.cpp:228] Iteration 49200, loss = 0.0852023\nI0820 15:16:27.561307 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 15:16:27.561323 21769 solver.cpp:244]     Train net output #1: loss = 0.0852018 (* 1 = 0.0852018 loss)\nI0820 15:16:27.665722 21769 sgd_solver.cpp:166] Iteration 49200, lr = 1.23\nI0820 15:20:05.948720 21769 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0820 15:22:17.151629 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8067\nI0820 15:22:17.152009 21769 solver.cpp:404]     Test net output #1: loss = 0.738302 (* 1 = 0.738302 loss)\nI0820 15:22:19.273918 21769 solver.cpp:228] Iteration 49300, loss = 0.0644232\nI0820 15:22:19.273964 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 15:22:19.273980 21769 solver.cpp:244]     Train net output #1: loss = 0.0644227 (* 1 = 0.0644227 loss)\nI0820 15:22:19.373925 21769 sgd_solver.cpp:166] Iteration 49300, lr = 1.2325\nI0820 15:25:57.700418 21769 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0820 15:28:08.898603 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7849\nI0820 15:28:08.899001 21769 solver.cpp:404]     Test net output #1: loss = 0.741573 (* 1 = 0.741573 loss)\nI0820 15:28:11.022301 21769 solver.cpp:228] Iteration 49400, loss = 0.177472\nI0820 15:28:11.022347 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 15:28:11.022363 21769 solver.cpp:244]     Train net output #1: loss = 0.177472 (* 1 = 0.177472 loss)\nI0820 15:28:11.117177 21769 sgd_solver.cpp:166] Iteration 49400, lr = 1.235\nI0820 15:31:49.292320 21769 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0820 15:34:00.587687 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7987\nI0820 15:34:00.588090 21769 solver.cpp:404]     Test net output #1: loss = 0.706234 (* 1 = 0.706234 loss)\nI0820 15:34:02.711581 21769 solver.cpp:228] Iteration 49500, loss = 0.105421\nI0820 15:34:02.711630 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 15:34:02.711655 21769 solver.cpp:244]     Train net output #1: loss = 0.10542 (* 1 = 0.10542 loss)\nI0820 15:34:02.805434 21769 sgd_solver.cpp:166] Iteration 49500, lr = 1.2375\nI0820 15:37:41.000829 21769 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0820 15:39:52.349231 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7984\nI0820 15:39:52.349622 21769 solver.cpp:404]     Test net output #1: loss = 0.773737 (* 1 = 0.773737 loss)\nI0820 15:39:54.472694 21769 solver.cpp:228] Iteration 49600, loss = 0.168078\nI0820 15:39:54.472746 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 15:39:54.472772 21769 solver.cpp:244]     Train net output #1: loss = 0.168078 (* 1 = 0.168078 loss)\nI0820 15:39:54.576555 21769 sgd_solver.cpp:166] Iteration 49600, lr = 1.24\nI0820 15:43:33.004055 21769 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0820 15:45:44.338611 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8048\nI0820 15:45:44.339013 21769 solver.cpp:404]     Test net output #1: loss = 0.710428 (* 1 = 0.710428 loss)\nI0820 15:45:46.462435 21769 solver.cpp:228] Iteration 49700, loss = 0.110446\nI0820 15:45:46.462476 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 15:45:46.462502 21769 solver.cpp:244]     Train net output #1: loss = 0.110446 (* 1 = 0.110446 loss)\nI0820 15:45:46.551712 21769 sgd_solver.cpp:166] Iteration 49700, lr = 1.2425\nI0820 15:49:24.902789 21769 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0820 15:51:36.252840 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8365\nI0820 15:51:36.253255 21769 solver.cpp:404]     Test net output #1: loss = 0.636286 (* 1 = 0.636286 loss)\nI0820 15:51:38.377238 21769 solver.cpp:228] Iteration 49800, loss = 0.116616\nI0820 15:51:38.377279 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 15:51:38.377302 21769 solver.cpp:244]     Train net output #1: loss = 0.116616 (* 1 = 0.116616 loss)\nI0820 15:51:38.474215 21769 sgd_solver.cpp:166] Iteration 49800, lr = 1.245\nI0820 15:55:16.762677 21769 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0820 15:57:28.092370 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7979\nI0820 15:57:28.092772 21769 solver.cpp:404]     Test net output #1: loss = 0.709985 (* 1 = 0.709985 loss)\nI0820 15:57:30.216469 21769 solver.cpp:228] Iteration 49900, loss = 0.155836\nI0820 15:57:30.216511 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 15:57:30.216534 21769 solver.cpp:244]     Train net output #1: loss = 0.155836 (* 1 = 0.155836 loss)\nI0820 15:57:30.308068 21769 sgd_solver.cpp:166] Iteration 49900, lr = 1.2475\nI0820 16:01:08.565029 21769 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0820 16:03:19.956671 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7519\nI0820 16:03:19.957079 21769 solver.cpp:404]     Test net output #1: loss = 0.986314 (* 1 = 0.986314 loss)\nI0820 16:03:22.080281 21769 solver.cpp:228] Iteration 50000, loss = 0.15746\nI0820 16:03:22.080322 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 16:03:22.080344 21769 solver.cpp:244]     Train net output #1: loss = 0.157459 (* 1 = 0.157459 loss)\nI0820 16:03:22.171437 21769 sgd_solver.cpp:166] Iteration 50000, lr = 1.25\nI0820 16:07:00.380895 21769 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0820 16:09:11.779822 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8314\nI0820 16:09:11.780233 21769 solver.cpp:404]     Test net output #1: loss = 0.583673 (* 1 = 0.583673 loss)\nI0820 16:09:13.903923 21769 solver.cpp:228] Iteration 50100, loss = 0.167827\nI0820 16:09:13.903980 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 16:09:13.904006 21769 solver.cpp:244]     Train net output #1: loss = 0.167827 (* 1 = 0.167827 loss)\nI0820 16:09:13.997676 21769 sgd_solver.cpp:166] Iteration 50100, lr = 1.2525\nI0820 16:12:52.287878 21769 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0820 16:15:03.571534 21769 solver.cpp:404]     Test net output #0: accuracy = 0.837\nI0820 16:15:03.571940 21769 solver.cpp:404]     Test net output #1: loss = 0.674642 (* 1 = 0.674642 loss)\nI0820 16:15:05.695258 21769 solver.cpp:228] Iteration 50200, loss = 0.124974\nI0820 16:15:05.695299 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 16:15:05.695322 21769 solver.cpp:244]     Train net output #1: loss = 0.124974 (* 1 = 0.124974 loss)\nI0820 16:15:05.785121 21769 sgd_solver.cpp:166] Iteration 50200, lr = 1.255\nI0820 16:18:43.998235 21769 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0820 16:20:55.298501 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7554\nI0820 16:20:55.298887 21769 solver.cpp:404]     Test net output #1: loss = 1.14729 (* 1 = 1.14729 loss)\nI0820 16:20:57.422456 21769 solver.cpp:228] Iteration 50300, loss = 0.0968748\nI0820 16:20:57.422495 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 16:20:57.422519 21769 solver.cpp:244]     Train net output #1: loss = 0.0968744 (* 1 = 0.0968744 loss)\nI0820 16:20:57.513756 21769 sgd_solver.cpp:166] Iteration 50300, lr = 1.2575\nI0820 16:24:35.863576 21769 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0820 16:26:47.107102 21769 solver.cpp:404]     Test net output #0: accuracy = 0.765\nI0820 16:26:47.107488 21769 solver.cpp:404]     Test net output #1: loss = 0.940288 (* 1 = 0.940288 loss)\nI0820 16:26:49.232972 21769 solver.cpp:228] Iteration 50400, loss = 0.221502\nI0820 16:26:49.233023 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0820 16:26:49.233039 21769 solver.cpp:244]     Train net output #1: loss = 0.221502 (* 1 = 0.221502 loss)\nI0820 16:26:49.323763 21769 sgd_solver.cpp:166] Iteration 50400, lr = 1.26\nI0820 16:30:27.649185 21769 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0820 16:32:38.896415 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7243\nI0820 16:32:38.896742 21769 solver.cpp:404]     Test net output #1: loss = 1.00777 (* 1 = 1.00777 loss)\nI0820 16:32:41.019484 21769 solver.cpp:228] Iteration 50500, loss = 0.121641\nI0820 16:32:41.019531 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 16:32:41.019547 21769 solver.cpp:244]     Train net output #1: loss = 0.12164 (* 1 = 0.12164 loss)\nI0820 16:32:41.114665 21769 sgd_solver.cpp:166] Iteration 50500, lr = 1.2625\nI0820 16:36:19.339735 21769 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0820 16:38:30.589901 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7629\nI0820 16:38:30.590258 21769 solver.cpp:404]     Test net output #1: loss = 0.897965 (* 1 = 0.897965 loss)\nI0820 16:38:32.712682 21769 solver.cpp:228] Iteration 50600, loss = 0.221549\nI0820 16:38:32.712720 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 16:38:32.712734 21769 solver.cpp:244]     Train net output #1: loss = 0.221549 (* 1 = 0.221549 loss)\nI0820 16:38:32.803414 21769 sgd_solver.cpp:166] Iteration 50600, lr = 1.265\nI0820 16:42:11.090286 21769 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0820 16:44:22.340131 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7602\nI0820 16:44:22.340509 21769 solver.cpp:404]     Test net output #1: loss = 1.04814 (* 1 = 1.04814 loss)\nI0820 16:44:24.462098 21769 solver.cpp:228] Iteration 50700, loss = 0.108824\nI0820 16:44:24.462136 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 16:44:24.462152 21769 solver.cpp:244]     Train net output #1: loss = 0.108823 (* 1 = 0.108823 loss)\nI0820 16:44:24.561316 21769 sgd_solver.cpp:166] Iteration 50700, lr = 1.2675\nI0820 16:48:02.702594 21769 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0820 16:50:13.961623 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7949\nI0820 16:50:13.962010 21769 solver.cpp:404]     Test net output #1: loss = 0.766399 (* 1 = 0.766399 loss)\nI0820 16:50:16.085477 21769 solver.cpp:228] Iteration 50800, loss = 0.152652\nI0820 16:50:16.085525 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 16:50:16.085541 21769 solver.cpp:244]     Train net output #1: loss = 0.152652 (* 1 = 0.152652 loss)\nI0820 16:50:16.178614 21769 sgd_solver.cpp:166] Iteration 50800, lr = 1.27\nI0820 16:53:54.254055 21769 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0820 16:56:05.528064 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7893\nI0820 16:56:05.528455 21769 solver.cpp:404]     Test net output #1: loss = 0.803572 (* 1 = 0.803572 loss)\nI0820 16:56:07.652302 21769 solver.cpp:228] Iteration 50900, loss = 0.251477\nI0820 16:56:07.652350 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 16:56:07.652367 21769 solver.cpp:244]     Train net output #1: loss = 0.251476 (* 1 = 0.251476 loss)\nI0820 16:56:07.749650 21769 sgd_solver.cpp:166] Iteration 50900, lr = 1.2725\nI0820 16:59:45.943483 21769 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0820 17:01:57.190762 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7471\nI0820 17:01:57.191144 21769 solver.cpp:404]     Test net output #1: loss = 0.893667 (* 1 = 0.893667 loss)\nI0820 17:01:59.314990 21769 solver.cpp:228] Iteration 51000, loss = 0.16211\nI0820 17:01:59.315030 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 17:01:59.315047 21769 solver.cpp:244]     Train net output #1: loss = 0.16211 (* 1 = 0.16211 loss)\nI0820 17:01:59.410193 21769 sgd_solver.cpp:166] Iteration 51000, lr = 1.275\nI0820 17:05:37.592136 21769 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0820 17:07:48.833730 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8483\nI0820 17:07:48.834072 21769 solver.cpp:404]     Test net output #1: loss = 0.540952 (* 1 = 0.540952 loss)\nI0820 17:07:50.957167 21769 solver.cpp:228] Iteration 51100, loss = 0.144076\nI0820 17:07:50.957204 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 17:07:50.957219 21769 solver.cpp:244]     Train net output #1: loss = 0.144076 (* 1 = 0.144076 loss)\nI0820 17:07:51.049003 21769 sgd_solver.cpp:166] Iteration 51100, lr = 1.2775\nI0820 17:11:29.219115 21769 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0820 17:13:40.468111 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7659\nI0820 17:13:40.468468 21769 solver.cpp:404]     Test net output #1: loss = 1.01639 (* 1 = 1.01639 loss)\nI0820 17:13:42.591222 21769 solver.cpp:228] Iteration 51200, loss = 0.166319\nI0820 17:13:42.591269 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 17:13:42.591284 21769 solver.cpp:244]     Train net output #1: loss = 0.166318 (* 1 = 0.166318 loss)\nI0820 17:13:42.679563 21769 sgd_solver.cpp:166] Iteration 51200, lr = 1.28\nI0820 17:17:20.764407 21769 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0820 17:19:31.983364 21769 solver.cpp:404]     Test net output #0: accuracy = 0.819\nI0820 17:19:31.983754 21769 solver.cpp:404]     Test net output #1: loss = 0.683217 (* 1 = 0.683217 loss)\nI0820 17:19:34.106779 21769 solver.cpp:228] Iteration 51300, loss = 0.057542\nI0820 17:19:34.106817 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 17:19:34.106832 21769 solver.cpp:244]     Train net output #1: loss = 0.0575417 (* 1 = 0.0575417 loss)\nI0820 17:19:34.197379 21769 sgd_solver.cpp:166] Iteration 51300, lr = 1.2825\nI0820 17:23:12.414861 21769 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0820 17:25:23.607134 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8159\nI0820 17:25:23.607522 21769 solver.cpp:404]     Test net output #1: loss = 0.620376 (* 1 = 0.620376 loss)\nI0820 17:25:25.730001 21769 solver.cpp:228] Iteration 51400, loss = 0.366998\nI0820 17:25:25.730051 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0820 17:25:25.730067 21769 solver.cpp:244]     Train net output #1: loss = 0.366998 (* 1 = 0.366998 loss)\nI0820 17:25:25.824873 21769 sgd_solver.cpp:166] Iteration 51400, lr = 1.285\nI0820 17:29:03.904498 21769 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0820 17:31:15.117615 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8158\nI0820 17:31:15.118013 21769 solver.cpp:404]     Test net output #1: loss = 0.680054 (* 1 = 0.680054 loss)\nI0820 17:31:17.241837 21769 solver.cpp:228] Iteration 51500, loss = 0.130878\nI0820 17:31:17.241873 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 17:31:17.241888 21769 solver.cpp:244]     Train net output #1: loss = 0.130877 (* 1 = 0.130877 loss)\nI0820 17:31:17.331161 21769 sgd_solver.cpp:166] Iteration 51500, lr = 1.2875\nI0820 17:34:55.532234 21769 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0820 17:37:06.746645 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7834\nI0820 17:37:06.747020 21769 solver.cpp:404]     Test net output #1: loss = 0.798162 (* 1 = 0.798162 loss)\nI0820 17:37:08.870438 21769 solver.cpp:228] Iteration 51600, loss = 0.165666\nI0820 17:37:08.870484 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 17:37:08.870501 21769 solver.cpp:244]     Train net output #1: loss = 0.165666 (* 1 = 0.165666 loss)\nI0820 17:37:08.964313 21769 sgd_solver.cpp:166] Iteration 51600, lr = 1.29\nI0820 17:40:47.285284 21769 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0820 17:42:58.471597 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8541\nI0820 17:42:58.471972 21769 solver.cpp:404]     Test net output #1: loss = 0.526237 (* 1 = 0.526237 loss)\nI0820 17:43:00.594274 21769 solver.cpp:228] Iteration 51700, loss = 0.126157\nI0820 17:43:00.594321 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 17:43:00.594336 21769 solver.cpp:244]     Train net output #1: loss = 0.126157 (* 1 = 0.126157 loss)\nI0820 17:43:00.693748 21769 sgd_solver.cpp:166] Iteration 51700, lr = 1.2925\nI0820 17:46:38.813822 21769 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0820 17:48:50.001570 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8096\nI0820 17:48:50.001957 21769 solver.cpp:404]     Test net output #1: loss = 0.734677 (* 1 = 0.734677 loss)\nI0820 17:48:52.125447 21769 solver.cpp:228] Iteration 51800, loss = 0.109307\nI0820 17:48:52.125494 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 17:48:52.125510 21769 solver.cpp:244]     Train net output #1: loss = 0.109307 (* 1 = 0.109307 loss)\nI0820 17:48:52.222628 21769 sgd_solver.cpp:166] Iteration 51800, lr = 1.295\nI0820 17:52:30.513036 21769 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0820 17:54:41.772363 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8094\nI0820 17:54:41.772747 21769 solver.cpp:404]     Test net output #1: loss = 0.70275 (* 1 = 0.70275 loss)\nI0820 17:54:43.895078 21769 solver.cpp:228] Iteration 51900, loss = 0.141175\nI0820 17:54:43.895124 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 17:54:43.895141 21769 solver.cpp:244]     Train net output #1: loss = 0.141175 (* 1 = 0.141175 loss)\nI0820 17:54:43.987596 21769 sgd_solver.cpp:166] Iteration 51900, lr = 1.2975\nI0820 17:58:22.411856 21769 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0820 18:00:33.645834 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8059\nI0820 18:00:33.646234 21769 solver.cpp:404]     Test net output #1: loss = 0.724618 (* 1 = 0.724618 loss)\nI0820 18:00:35.768626 21769 solver.cpp:228] Iteration 52000, loss = 0.137543\nI0820 18:00:35.768673 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 18:00:35.768689 21769 solver.cpp:244]     Train net output #1: loss = 0.137543 (* 1 = 0.137543 loss)\nI0820 18:00:35.867635 21769 sgd_solver.cpp:166] Iteration 52000, lr = 1.3\nI0820 18:04:14.097157 21769 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0820 18:06:25.353500 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7973\nI0820 18:06:25.353890 21769 solver.cpp:404]     Test net output #1: loss = 0.749855 (* 1 = 0.749855 loss)\nI0820 18:06:27.476409 21769 solver.cpp:228] Iteration 52100, loss = 0.136776\nI0820 18:06:27.476454 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 18:06:27.476471 21769 solver.cpp:244]     Train net output #1: loss = 0.136776 (* 1 = 0.136776 loss)\nI0820 18:06:27.577963 21769 sgd_solver.cpp:166] Iteration 52100, lr = 1.3025\nI0820 18:10:05.753307 21769 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0820 18:12:17.208307 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7352\nI0820 18:12:17.208762 21769 solver.cpp:404]     Test net output #1: loss = 0.978034 (* 1 = 0.978034 loss)\nI0820 18:12:19.338345 21769 solver.cpp:228] Iteration 52200, loss = 0.141441\nI0820 18:12:19.338383 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 18:12:19.338399 21769 solver.cpp:244]     Train net output #1: loss = 0.14144 (* 1 = 0.14144 loss)\nI0820 18:12:19.426311 21769 sgd_solver.cpp:166] Iteration 52200, lr = 1.305\nI0820 18:15:57.736254 21769 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0820 18:18:09.028664 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7742\nI0820 18:18:09.029014 21769 solver.cpp:404]     Test net output #1: loss = 0.989581 (* 1 = 0.989581 loss)\nI0820 18:18:11.152621 21769 solver.cpp:228] Iteration 52300, loss = 0.0462749\nI0820 18:18:11.152662 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 18:18:11.152685 21769 solver.cpp:244]     Train net output #1: loss = 0.0462743 (* 1 = 0.0462743 loss)\nI0820 18:18:11.249229 21769 sgd_solver.cpp:166] Iteration 52300, lr = 1.3075\nI0820 18:21:49.381072 21769 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0820 18:24:00.690207 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7702\nI0820 18:24:00.690595 21769 solver.cpp:404]     Test net output #1: loss = 0.816854 (* 1 = 0.816854 loss)\nI0820 18:24:02.814721 21769 solver.cpp:228] Iteration 52400, loss = 0.152396\nI0820 18:24:02.814769 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 18:24:02.814795 21769 solver.cpp:244]     Train net output #1: loss = 0.152395 (* 1 = 0.152395 loss)\nI0820 18:24:02.906554 21769 sgd_solver.cpp:166] Iteration 52400, lr = 1.31\nI0820 18:27:41.034014 21769 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0820 18:29:52.340723 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7561\nI0820 18:29:52.341091 21769 solver.cpp:404]     Test net output #1: loss = 0.957611 (* 1 = 0.957611 loss)\nI0820 18:29:54.464996 21769 solver.cpp:228] Iteration 52500, loss = 0.191738\nI0820 18:29:54.465037 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 18:29:54.465060 21769 solver.cpp:244]     Train net output #1: loss = 0.191737 (* 1 = 0.191737 loss)\nI0820 18:29:54.554005 21769 sgd_solver.cpp:166] Iteration 52500, lr = 1.3125\nI0820 18:33:32.861791 21769 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0820 18:35:44.150698 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6857\nI0820 18:35:44.151104 21769 solver.cpp:404]     Test net output #1: loss = 1.11922 (* 1 = 1.11922 loss)\nI0820 18:35:46.274543 21769 solver.cpp:228] Iteration 52600, loss = 0.226044\nI0820 18:35:46.274593 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 18:35:46.274619 21769 solver.cpp:244]     Train net output #1: loss = 0.226044 (* 1 = 0.226044 loss)\nI0820 18:35:46.367777 21769 sgd_solver.cpp:166] Iteration 52600, lr = 1.315\nI0820 18:39:24.477948 21769 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0820 18:41:35.775717 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7475\nI0820 18:41:35.776091 21769 solver.cpp:404]     Test net output #1: loss = 1.06317 (* 1 = 1.06317 loss)\nI0820 18:41:37.900454 21769 solver.cpp:228] Iteration 52700, loss = 0.100655\nI0820 18:41:37.900493 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 18:41:37.900518 21769 solver.cpp:244]     Train net output #1: loss = 0.100654 (* 1 = 0.100654 loss)\nI0820 18:41:37.994449 21769 sgd_solver.cpp:166] Iteration 52700, lr = 1.3175\nI0820 18:45:16.078879 21769 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0820 18:47:27.366891 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7626\nI0820 18:47:27.367267 21769 solver.cpp:404]     Test net output #1: loss = 0.767818 (* 1 = 0.767818 loss)\nI0820 18:47:29.491046 21769 solver.cpp:228] Iteration 52800, loss = 0.0843547\nI0820 18:47:29.491087 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 18:47:29.491111 21769 solver.cpp:244]     Train net output #1: loss = 0.0843542 (* 1 = 0.0843542 loss)\nI0820 18:47:29.586280 21769 sgd_solver.cpp:166] Iteration 52800, lr = 1.32\nI0820 18:51:07.707353 21769 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0820 18:53:18.996157 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8245\nI0820 18:53:18.996549 21769 solver.cpp:404]     Test net output #1: loss = 0.618006 (* 1 = 0.618006 loss)\nI0820 18:53:21.120352 21769 solver.cpp:228] Iteration 52900, loss = 0.156961\nI0820 18:53:21.120401 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 18:53:21.120426 21769 solver.cpp:244]     Train net output #1: loss = 0.156961 (* 1 = 0.156961 loss)\nI0820 18:53:21.212025 21769 sgd_solver.cpp:166] Iteration 52900, lr = 1.3225\nI0820 18:56:59.412713 21769 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0820 18:59:10.689091 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7283\nI0820 18:59:10.689486 21769 solver.cpp:404]     Test net output #1: loss = 1.13494 (* 1 = 1.13494 loss)\nI0820 18:59:12.812198 21769 solver.cpp:228] Iteration 53000, loss = 0.220573\nI0820 18:59:12.812248 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 18:59:12.812273 21769 solver.cpp:244]     Train net output #1: loss = 0.220573 (* 1 = 0.220573 loss)\nI0820 18:59:12.904227 21769 sgd_solver.cpp:166] Iteration 53000, lr = 1.325\nI0820 19:02:51.104662 21769 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0820 19:05:02.349376 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7898\nI0820 19:05:02.349757 21769 solver.cpp:404]     Test net output #1: loss = 0.805102 (* 1 = 0.805102 loss)\nI0820 19:05:04.473327 21769 solver.cpp:228] Iteration 53100, loss = 0.217572\nI0820 19:05:04.473363 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 19:05:04.473378 21769 solver.cpp:244]     Train net output #1: loss = 0.217572 (* 1 = 0.217572 loss)\nI0820 19:05:04.566164 21769 sgd_solver.cpp:166] Iteration 53100, lr = 1.3275\nI0820 19:08:42.755520 21769 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0820 19:10:54.008883 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8231\nI0820 19:10:54.009268 21769 solver.cpp:404]     Test net output #1: loss = 0.686083 (* 1 = 0.686083 loss)\nI0820 19:10:56.132282 21769 solver.cpp:228] Iteration 53200, loss = 0.156167\nI0820 19:10:56.132326 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 19:10:56.132342 21769 solver.cpp:244]     Train net output #1: loss = 0.156167 (* 1 = 0.156167 loss)\nI0820 19:10:56.222240 21769 sgd_solver.cpp:166] Iteration 53200, lr = 1.33\nI0820 19:14:34.542570 21769 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0820 19:16:45.824205 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7682\nI0820 19:16:45.824586 21769 solver.cpp:404]     Test net output #1: loss = 0.932195 (* 1 = 0.932195 loss)\nI0820 19:16:47.947710 21769 solver.cpp:228] Iteration 53300, loss = 0.095226\nI0820 19:16:47.947746 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 19:16:47.947762 21769 solver.cpp:244]     Train net output #1: loss = 0.0952255 (* 1 = 0.0952255 loss)\nI0820 19:16:48.048781 21769 sgd_solver.cpp:166] Iteration 53300, lr = 1.3325\nI0820 19:20:26.191061 21769 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0820 19:22:37.445237 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7496\nI0820 19:22:37.445588 21769 solver.cpp:404]     Test net output #1: loss = 0.964109 (* 1 = 0.964109 loss)\nI0820 19:22:39.569130 21769 solver.cpp:228] Iteration 53400, loss = 0.14887\nI0820 19:22:39.569176 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 19:22:39.569193 21769 solver.cpp:244]     Train net output #1: loss = 0.14887 (* 1 = 0.14887 loss)\nI0820 19:22:39.658098 21769 sgd_solver.cpp:166] Iteration 53400, lr = 1.335\nI0820 19:26:17.827327 21769 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0820 19:28:29.071800 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7917\nI0820 19:28:29.072187 21769 solver.cpp:404]     Test net output #1: loss = 0.742424 (* 1 = 0.742424 loss)\nI0820 19:28:31.194952 21769 solver.cpp:228] Iteration 53500, loss = 0.150324\nI0820 19:28:31.194988 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 19:28:31.195003 21769 solver.cpp:244]     Train net output #1: loss = 0.150324 (* 1 = 0.150324 loss)\nI0820 19:28:31.291159 21769 sgd_solver.cpp:166] Iteration 53500, lr = 1.3375\nI0820 19:32:09.494293 21769 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0820 19:34:20.746974 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8221\nI0820 19:34:20.747344 21769 solver.cpp:404]     Test net output #1: loss = 0.602604 (* 1 = 0.602604 loss)\nI0820 19:34:22.870090 21769 solver.cpp:228] Iteration 53600, loss = 0.149099\nI0820 19:34:22.870134 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 19:34:22.870151 21769 solver.cpp:244]     Train net output #1: loss = 0.149099 (* 1 = 0.149099 loss)\nI0820 19:34:22.958451 21769 sgd_solver.cpp:166] Iteration 53600, lr = 1.34\nI0820 19:38:01.164733 21769 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0820 19:40:12.361137 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7631\nI0820 19:40:12.361510 21769 solver.cpp:404]     Test net output #1: loss = 0.891427 (* 1 = 0.891427 loss)\nI0820 19:40:14.483685 21769 solver.cpp:228] Iteration 53700, loss = 0.248621\nI0820 19:40:14.483721 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 19:40:14.483736 21769 solver.cpp:244]     Train net output #1: loss = 0.248621 (* 1 = 0.248621 loss)\nI0820 19:40:14.578212 21769 sgd_solver.cpp:166] Iteration 53700, lr = 1.3425\nI0820 19:43:52.739356 21769 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0820 19:46:03.948446 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8018\nI0820 19:46:03.948824 21769 solver.cpp:404]     Test net output #1: loss = 0.653838 (* 1 = 0.653838 loss)\nI0820 19:46:06.071116 21769 solver.cpp:228] Iteration 53800, loss = 0.0844759\nI0820 19:46:06.071163 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 19:46:06.071179 21769 solver.cpp:244]     Train net output #1: loss = 0.0844754 (* 1 = 0.0844754 loss)\nI0820 19:46:06.161473 21769 sgd_solver.cpp:166] Iteration 53800, lr = 1.345\nI0820 19:49:44.532055 21769 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0820 19:51:55.734153 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6077\nI0820 19:51:55.734532 21769 solver.cpp:404]     Test net output #1: loss = 2.41348 (* 1 = 2.41348 loss)\nI0820 19:51:57.856835 21769 solver.cpp:228] Iteration 53900, loss = 0.160815\nI0820 19:51:57.856879 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 19:51:57.856895 21769 solver.cpp:244]     Train net output #1: loss = 0.160815 (* 1 = 0.160815 loss)\nI0820 19:51:57.953065 21769 sgd_solver.cpp:166] Iteration 53900, lr = 1.3475\nI0820 19:55:36.315404 21769 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0820 19:57:47.537997 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8196\nI0820 19:57:47.538414 21769 solver.cpp:404]     Test net output #1: loss = 0.601431 (* 1 = 0.601431 loss)\nI0820 19:57:49.661034 21769 solver.cpp:228] Iteration 54000, loss = 0.20653\nI0820 19:57:49.661072 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 19:57:49.661087 21769 solver.cpp:244]     Train net output #1: loss = 0.20653 (* 1 = 0.20653 loss)\nI0820 19:57:49.759879 21769 sgd_solver.cpp:166] Iteration 54000, lr = 1.35\nI0820 20:01:28.149854 21769 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0820 20:03:39.360919 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7789\nI0820 20:03:39.361310 21769 solver.cpp:404]     Test net output #1: loss = 0.697959 (* 1 = 0.697959 loss)\nI0820 20:03:41.483258 21769 solver.cpp:228] Iteration 54100, loss = 0.180362\nI0820 20:03:41.483302 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 20:03:41.483319 21769 solver.cpp:244]     Train net output #1: loss = 0.180362 (* 1 = 0.180362 loss)\nI0820 20:03:41.580157 21769 sgd_solver.cpp:166] Iteration 54100, lr = 1.3525\nI0820 20:07:19.803680 21769 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0820 20:09:31.017303 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7954\nI0820 20:09:31.017688 21769 solver.cpp:404]     Test net output #1: loss = 0.789468 (* 1 = 0.789468 loss)\nI0820 20:09:33.140208 21769 solver.cpp:228] Iteration 54200, loss = 0.101501\nI0820 20:09:33.140256 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 20:09:33.140272 21769 solver.cpp:244]     Train net output #1: loss = 0.101501 (* 1 = 0.101501 loss)\nI0820 20:09:33.237962 21769 sgd_solver.cpp:166] Iteration 54200, lr = 1.355\nI0820 20:13:11.424047 21769 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0820 20:15:22.689203 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7534\nI0820 20:15:22.689591 21769 solver.cpp:404]     Test net output #1: loss = 0.996985 (* 1 = 0.996985 loss)\nI0820 20:15:24.811810 21769 solver.cpp:228] Iteration 54300, loss = 0.0833698\nI0820 20:15:24.811856 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 20:15:24.811872 21769 solver.cpp:244]     Train net output #1: loss = 0.0833693 (* 1 = 0.0833693 loss)\nI0820 20:15:24.909481 21769 sgd_solver.cpp:166] Iteration 54300, lr = 1.3575\nI0820 20:19:03.048446 21769 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0820 20:21:14.314281 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8249\nI0820 20:21:14.314676 21769 solver.cpp:404]     Test net output #1: loss = 0.607625 (* 1 = 0.607625 loss)\nI0820 20:21:16.437955 21769 solver.cpp:228] Iteration 54400, loss = 0.167763\nI0820 20:21:16.437993 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 20:21:16.438006 21769 solver.cpp:244]     Train net output #1: loss = 0.167762 (* 1 = 0.167762 loss)\nI0820 20:21:16.527649 21769 sgd_solver.cpp:166] Iteration 54400, lr = 1.36\nI0820 20:24:54.151440 21769 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0820 20:27:05.412014 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7361\nI0820 20:27:05.412379 21769 solver.cpp:404]     Test net output #1: loss = 1.01493 (* 1 = 1.01493 loss)\nI0820 20:27:07.534313 21769 solver.cpp:228] Iteration 54500, loss = 0.152466\nI0820 20:27:07.534350 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 20:27:07.534364 21769 solver.cpp:244]     Train net output #1: loss = 0.152465 (* 1 = 0.152465 loss)\nI0820 20:27:07.619756 21769 sgd_solver.cpp:166] Iteration 54500, lr = 1.3625\nI0820 20:30:45.184612 21769 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0820 20:32:56.436887 21769 solver.cpp:404]     Test net output #0: accuracy = 0.844\nI0820 20:32:56.437271 21769 solver.cpp:404]     Test net output #1: loss = 0.551391 (* 1 = 0.551391 loss)\nI0820 20:32:58.559520 21769 solver.cpp:228] Iteration 54600, loss = 0.193011\nI0820 20:32:58.559556 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 20:32:58.559571 21769 solver.cpp:244]     Train net output #1: loss = 0.193011 (* 1 = 0.193011 loss)\nI0820 20:32:58.649250 21769 sgd_solver.cpp:166] Iteration 54600, lr = 1.365\nI0820 20:36:36.192363 21769 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0820 20:38:47.467486 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8356\nI0820 20:38:47.467851 21769 solver.cpp:404]     Test net output #1: loss = 0.63984 (* 1 = 0.63984 loss)\nI0820 20:38:49.591267 21769 solver.cpp:228] Iteration 54700, loss = 0.130227\nI0820 20:38:49.591303 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 20:38:49.591318 21769 solver.cpp:244]     Train net output #1: loss = 0.130226 (* 1 = 0.130226 loss)\nI0820 20:38:49.677623 21769 sgd_solver.cpp:166] Iteration 54700, lr = 1.3675\nI0820 20:42:27.296023 21769 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0820 20:44:38.577031 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7459\nI0820 20:44:38.577394 21769 solver.cpp:404]     Test net output #1: loss = 1.04588 (* 1 = 1.04588 loss)\nI0820 20:44:40.699908 21769 solver.cpp:228] Iteration 54800, loss = 0.134969\nI0820 20:44:40.699945 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 20:44:40.699961 21769 solver.cpp:244]     Train net output #1: loss = 0.134969 (* 1 = 0.134969 loss)\nI0820 20:44:40.785169 21769 sgd_solver.cpp:166] Iteration 54800, lr = 1.37\nI0820 20:48:18.327401 21769 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0820 20:50:29.598564 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8159\nI0820 20:50:29.598920 21769 solver.cpp:404]     Test net output #1: loss = 0.641929 (* 1 = 0.641929 loss)\nI0820 20:50:31.721920 21769 solver.cpp:228] Iteration 54900, loss = 0.143751\nI0820 20:50:31.721957 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 20:50:31.721972 21769 solver.cpp:244]     Train net output #1: loss = 0.14375 (* 1 = 0.14375 loss)\nI0820 20:50:31.804525 21769 sgd_solver.cpp:166] Iteration 54900, lr = 1.3725\nI0820 20:54:09.424060 21769 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0820 20:56:20.683943 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7624\nI0820 20:56:20.684319 21769 solver.cpp:404]     Test net output #1: loss = 0.838586 (* 1 = 0.838586 loss)\nI0820 20:56:22.806049 21769 solver.cpp:228] Iteration 55000, loss = 0.142364\nI0820 20:56:22.806085 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 20:56:22.806100 21769 solver.cpp:244]     Train net output #1: loss = 0.142363 (* 1 = 0.142363 loss)\nI0820 20:56:22.896844 21769 sgd_solver.cpp:166] Iteration 55000, lr = 1.375\nI0820 21:00:00.501111 21769 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0820 21:02:11.751217 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8074\nI0820 21:02:11.751616 21769 solver.cpp:404]     Test net output #1: loss = 0.645406 (* 1 = 0.645406 loss)\nI0820 21:02:13.874065 21769 solver.cpp:228] Iteration 55100, loss = 0.143037\nI0820 21:02:13.874112 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 21:02:13.874130 21769 solver.cpp:244]     Train net output #1: loss = 0.143037 (* 1 = 0.143037 loss)\nI0820 21:02:13.964334 21769 sgd_solver.cpp:166] Iteration 55100, lr = 1.3775\nI0820 21:05:51.588675 21769 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0820 21:08:02.845752 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7846\nI0820 21:08:02.846158 21769 solver.cpp:404]     Test net output #1: loss = 0.808458 (* 1 = 0.808458 loss)\nI0820 21:08:04.968262 21769 solver.cpp:228] Iteration 55200, loss = 0.206877\nI0820 21:08:04.968299 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0820 21:08:04.968314 21769 solver.cpp:244]     Train net output #1: loss = 0.206877 (* 1 = 0.206877 loss)\nI0820 21:08:05.064911 21769 sgd_solver.cpp:166] Iteration 55200, lr = 1.38\nI0820 21:11:42.944813 21769 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0820 21:13:54.209415 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8063\nI0820 21:13:54.209820 21769 solver.cpp:404]     Test net output #1: loss = 0.656207 (* 1 = 0.656207 loss)\nI0820 21:13:56.332350 21769 solver.cpp:228] Iteration 55300, loss = 0.0897761\nI0820 21:13:56.332394 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 21:13:56.332412 21769 solver.cpp:244]     Train net output #1: loss = 0.0897756 (* 1 = 0.0897756 loss)\nI0820 21:13:56.429483 21769 sgd_solver.cpp:166] Iteration 55300, lr = 1.3825\nI0820 21:17:34.110123 21769 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0820 21:19:45.369601 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8253\nI0820 21:19:45.369985 21769 solver.cpp:404]     Test net output #1: loss = 0.634737 (* 1 = 0.634737 loss)\nI0820 21:19:47.492662 21769 solver.cpp:228] Iteration 55400, loss = 0.0837214\nI0820 21:19:47.492697 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 21:19:47.492712 21769 solver.cpp:244]     Train net output #1: loss = 0.0837209 (* 1 = 0.0837209 loss)\nI0820 21:19:47.581332 21769 sgd_solver.cpp:166] Iteration 55400, lr = 1.385\nI0820 21:23:25.121374 21769 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0820 21:25:36.371819 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7547\nI0820 21:25:36.372220 21769 solver.cpp:404]     Test net output #1: loss = 0.856161 (* 1 = 0.856161 loss)\nI0820 21:25:38.494814 21769 solver.cpp:228] Iteration 55500, loss = 0.311807\nI0820 21:25:38.494851 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0820 21:25:38.494866 21769 solver.cpp:244]     Train net output #1: loss = 0.311807 (* 1 = 0.311807 loss)\nI0820 21:25:38.582584 21769 sgd_solver.cpp:166] Iteration 55500, lr = 1.3875\nI0820 21:29:16.242059 21769 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0820 21:31:27.500744 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8515\nI0820 21:31:27.501143 21769 solver.cpp:404]     Test net output #1: loss = 0.528133 (* 1 = 0.528133 loss)\nI0820 21:31:29.623425 21769 solver.cpp:228] Iteration 55600, loss = 0.0809308\nI0820 21:31:29.623472 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 21:31:29.623488 21769 solver.cpp:244]     Train net output #1: loss = 0.0809303 (* 1 = 0.0809303 loss)\nI0820 21:31:29.762384 21769 sgd_solver.cpp:166] Iteration 55600, lr = 1.39\nI0820 21:35:07.361475 21769 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0820 21:37:18.723034 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7577\nI0820 21:37:18.723428 21769 solver.cpp:404]     Test net output #1: loss = 0.909623 (* 1 = 0.909623 loss)\nI0820 21:37:20.847653 21769 solver.cpp:228] Iteration 55700, loss = 0.148821\nI0820 21:37:20.847699 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 21:37:20.847721 21769 solver.cpp:244]     Train net output #1: loss = 0.14882 (* 1 = 0.14882 loss)\nI0820 21:37:20.932432 21769 sgd_solver.cpp:166] Iteration 55700, lr = 1.3925\nI0820 21:40:58.665731 21769 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0820 21:43:09.960472 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8004\nI0820 21:43:09.960916 21769 solver.cpp:404]     Test net output #1: loss = 0.764761 (* 1 = 0.764761 loss)\nI0820 21:43:12.084115 21769 solver.cpp:228] Iteration 55800, loss = 0.0975628\nI0820 21:43:12.084167 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0820 21:43:12.084190 21769 solver.cpp:244]     Train net output #1: loss = 0.0975622 (* 1 = 0.0975622 loss)\nI0820 21:43:12.172250 21769 sgd_solver.cpp:166] Iteration 55800, lr = 1.395\nI0820 21:46:49.707151 21769 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0820 21:49:01.014611 21769 solver.cpp:404]     Test net output #0: accuracy = 0.833\nI0820 21:49:01.015017 21769 solver.cpp:404]     Test net output #1: loss = 0.592839 (* 1 = 0.592839 loss)\nI0820 21:49:03.138021 21769 solver.cpp:228] Iteration 55900, loss = 0.177103\nI0820 21:49:03.138061 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 21:49:03.138084 21769 solver.cpp:244]     Train net output #1: loss = 0.177102 (* 1 = 0.177102 loss)\nI0820 21:49:03.229986 21769 sgd_solver.cpp:166] Iteration 55900, lr = 1.3975\nI0820 21:52:40.914335 21769 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0820 21:54:52.166739 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7461\nI0820 21:54:52.167148 21769 solver.cpp:404]     Test net output #1: loss = 1.01325 (* 1 = 1.01325 loss)\nI0820 21:54:54.290601 21769 solver.cpp:228] Iteration 56000, loss = 0.181375\nI0820 21:54:54.290642 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 21:54:54.290664 21769 solver.cpp:244]     Train net output #1: loss = 0.181375 (* 1 = 0.181375 loss)\nI0820 21:54:54.374879 21769 sgd_solver.cpp:166] Iteration 56000, lr = 1.4\nI0820 21:58:32.041601 21769 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0820 22:00:43.290756 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7938\nI0820 22:00:43.291162 21769 solver.cpp:404]     Test net output #1: loss = 0.698161 (* 1 = 0.698161 loss)\nI0820 22:00:45.414580 21769 solver.cpp:228] Iteration 56100, loss = 0.140469\nI0820 22:00:45.414621 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 22:00:45.414644 21769 solver.cpp:244]     Train net output #1: loss = 0.140468 (* 1 = 0.140468 loss)\nI0820 22:00:45.507874 21769 sgd_solver.cpp:166] Iteration 56100, lr = 1.4025\nI0820 22:04:23.105861 21769 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0820 22:06:34.365790 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7899\nI0820 22:06:34.366184 21769 solver.cpp:404]     Test net output #1: loss = 0.840063 (* 1 = 0.840063 loss)\nI0820 22:06:36.490242 21769 solver.cpp:228] Iteration 56200, loss = 0.106496\nI0820 22:06:36.490283 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 22:06:36.490304 21769 solver.cpp:244]     Train net output #1: loss = 0.106495 (* 1 = 0.106495 loss)\nI0820 22:06:36.579627 21769 sgd_solver.cpp:166] Iteration 56200, lr = 1.405\nI0820 22:10:14.212539 21769 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0820 22:12:25.475567 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8203\nI0820 22:12:25.475934 21769 solver.cpp:404]     Test net output #1: loss = 0.603363 (* 1 = 0.603363 loss)\nI0820 22:12:27.600567 21769 solver.cpp:228] Iteration 56300, loss = 0.113819\nI0820 22:12:27.600607 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 22:12:27.600631 21769 solver.cpp:244]     Train net output #1: loss = 0.113819 (* 1 = 0.113819 loss)\nI0820 22:12:27.684944 21769 sgd_solver.cpp:166] Iteration 56300, lr = 1.4075\nI0820 22:16:05.339702 21769 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0820 22:18:16.566973 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7616\nI0820 22:18:16.567368 21769 solver.cpp:404]     Test net output #1: loss = 0.880657 (* 1 = 0.880657 loss)\nI0820 22:18:18.690333 21769 solver.cpp:228] Iteration 56400, loss = 0.197932\nI0820 22:18:18.690374 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 22:18:18.690397 21769 solver.cpp:244]     Train net output #1: loss = 0.197931 (* 1 = 0.197931 loss)\nI0820 22:18:18.773025 21769 sgd_solver.cpp:166] Iteration 56400, lr = 1.41\nI0820 22:21:56.574460 21769 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0820 22:24:07.820984 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7998\nI0820 22:24:07.821409 21769 solver.cpp:404]     Test net output #1: loss = 0.685293 (* 1 = 0.685293 loss)\nI0820 22:24:09.945163 21769 solver.cpp:228] Iteration 56500, loss = 0.205856\nI0820 22:24:09.945202 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 22:24:09.945225 21769 solver.cpp:244]     Train net output #1: loss = 0.205855 (* 1 = 0.205855 loss)\nI0820 22:24:10.034796 21769 sgd_solver.cpp:166] Iteration 56500, lr = 1.4125\nI0820 22:27:47.682272 21769 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0820 22:29:58.951855 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8162\nI0820 22:29:58.952293 21769 solver.cpp:404]     Test net output #1: loss = 0.700745 (* 1 = 0.700745 loss)\nI0820 22:30:01.075904 21769 solver.cpp:228] Iteration 56600, loss = 0.165359\nI0820 22:30:01.075944 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 22:30:01.075968 21769 solver.cpp:244]     Train net output #1: loss = 0.165359 (* 1 = 0.165359 loss)\nI0820 22:30:01.164363 21769 sgd_solver.cpp:166] Iteration 56600, lr = 1.415\nI0820 22:33:38.775935 21769 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0820 22:35:50.034962 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8497\nI0820 22:35:50.035370 21769 solver.cpp:404]     Test net output #1: loss = 0.474638 (* 1 = 0.474638 loss)\nI0820 22:35:52.158314 21769 solver.cpp:228] Iteration 56700, loss = 0.0566415\nI0820 22:35:52.158352 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 22:35:52.158368 21769 solver.cpp:244]     Train net output #1: loss = 0.056641 (* 1 = 0.056641 loss)\nI0820 22:35:52.243754 21769 sgd_solver.cpp:166] Iteration 56700, lr = 1.4175\nI0820 22:39:29.895313 21769 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0820 22:41:41.136561 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7573\nI0820 22:41:41.136977 21769 solver.cpp:404]     Test net output #1: loss = 1.10133 (* 1 = 1.10133 loss)\nI0820 22:41:43.259541 21769 solver.cpp:228] Iteration 56800, loss = 0.111651\nI0820 22:41:43.259577 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 22:41:43.259591 21769 solver.cpp:244]     Train net output #1: loss = 0.11165 (* 1 = 0.11165 loss)\nI0820 22:41:43.350987 21769 sgd_solver.cpp:166] Iteration 56800, lr = 1.42\nI0820 22:45:21.022334 21769 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0820 22:47:32.265810 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7531\nI0820 22:47:32.266222 21769 solver.cpp:404]     Test net output #1: loss = 1.05252 (* 1 = 1.05252 loss)\nI0820 22:47:34.389255 21769 solver.cpp:228] Iteration 56900, loss = 0.163545\nI0820 22:47:34.389291 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 22:47:34.389305 21769 solver.cpp:244]     Train net output #1: loss = 0.163544 (* 1 = 0.163544 loss)\nI0820 22:47:34.476402 21769 sgd_solver.cpp:166] Iteration 56900, lr = 1.4225\nI0820 22:51:12.330291 21769 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0820 22:53:23.574091 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8052\nI0820 22:53:23.574502 21769 solver.cpp:404]     Test net output #1: loss = 0.684936 (* 1 = 0.684936 loss)\nI0820 22:53:25.697811 21769 solver.cpp:228] Iteration 57000, loss = 0.189979\nI0820 22:53:25.697852 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 22:53:25.697875 21769 solver.cpp:244]     Train net output #1: loss = 0.189978 (* 1 = 0.189978 loss)\nI0820 22:53:25.786028 21769 sgd_solver.cpp:166] Iteration 57000, lr = 1.425\nI0820 22:57:03.328614 21769 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0820 22:59:14.726421 21769 solver.cpp:404]     Test net output #0: accuracy = 0.773\nI0820 22:59:14.726820 21769 solver.cpp:404]     Test net output #1: loss = 0.738201 (* 1 = 0.738201 loss)\nI0820 22:59:16.850213 21769 solver.cpp:228] Iteration 57100, loss = 0.20551\nI0820 22:59:16.850260 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 22:59:16.850276 21769 solver.cpp:244]     Train net output #1: loss = 0.205509 (* 1 = 0.205509 loss)\nI0820 22:59:16.946171 21769 sgd_solver.cpp:166] Iteration 57100, lr = 1.4275\nI0820 23:02:54.736951 21769 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0820 23:05:06.029193 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7597\nI0820 23:05:06.029597 21769 solver.cpp:404]     Test net output #1: loss = 0.84309 (* 1 = 0.84309 loss)\nI0820 23:05:08.153091 21769 solver.cpp:228] Iteration 57200, loss = 0.211477\nI0820 23:05:08.153131 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 23:05:08.153146 21769 solver.cpp:244]     Train net output #1: loss = 0.211476 (* 1 = 0.211476 loss)\nI0820 23:05:08.246856 21769 sgd_solver.cpp:166] Iteration 57200, lr = 1.43\nI0820 23:08:46.573364 21769 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0820 23:10:57.846491 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8239\nI0820 23:10:57.846901 21769 solver.cpp:404]     Test net output #1: loss = 0.612799 (* 1 = 0.612799 loss)\nI0820 23:10:59.970304 21769 solver.cpp:228] Iteration 57300, loss = 0.0703022\nI0820 23:10:59.970351 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 23:10:59.970368 21769 solver.cpp:244]     Train net output #1: loss = 0.0703017 (* 1 = 0.0703017 loss)\nI0820 23:11:00.065331 21769 sgd_solver.cpp:166] Iteration 57300, lr = 1.4325\nI0820 23:14:38.327162 21769 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0820 23:16:49.605309 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8008\nI0820 23:16:49.605710 21769 solver.cpp:404]     Test net output #1: loss = 0.700276 (* 1 = 0.700276 loss)\nI0820 23:16:51.728806 21769 solver.cpp:228] Iteration 57400, loss = 0.315883\nI0820 23:16:51.728852 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0820 23:16:51.728869 21769 solver.cpp:244]     Train net output #1: loss = 0.315882 (* 1 = 0.315882 loss)\nI0820 23:16:51.822118 21769 sgd_solver.cpp:166] Iteration 57400, lr = 1.435\nI0820 23:20:30.092275 21769 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0820 23:22:41.370945 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6209\nI0820 23:22:41.371345 21769 solver.cpp:404]     Test net output #1: loss = 1.74536 (* 1 = 1.74536 loss)\nI0820 23:22:43.496892 21769 solver.cpp:228] Iteration 57500, loss = 0.0962552\nI0820 23:22:43.496943 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0820 23:22:43.496969 21769 solver.cpp:244]     Train net output #1: loss = 0.0962546 (* 1 = 0.0962546 loss)\nI0820 23:22:43.591469 21769 sgd_solver.cpp:166] Iteration 57500, lr = 1.4375\nI0820 23:26:21.949312 21769 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0820 23:28:33.238241 21769 solver.cpp:404]     Test net output #0: accuracy = 0.736\nI0820 23:28:33.238652 21769 solver.cpp:404]     Test net output #1: loss = 0.947109 (* 1 = 0.947109 loss)\nI0820 23:28:35.367410 21769 solver.cpp:228] Iteration 57600, loss = 0.253918\nI0820 23:28:35.367451 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 23:28:35.367475 21769 solver.cpp:244]     Train net output #1: loss = 0.253917 (* 1 = 0.253917 loss)\nI0820 23:28:35.455790 21769 sgd_solver.cpp:166] Iteration 57600, lr = 1.44\nI0820 23:32:13.799455 21769 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0820 23:34:25.137362 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8031\nI0820 23:34:25.137775 21769 solver.cpp:404]     Test net output #1: loss = 0.801336 (* 1 = 0.801336 loss)\nI0820 23:34:27.265441 21769 solver.cpp:228] Iteration 57700, loss = 0.216178\nI0820 23:34:27.265482 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0820 23:34:27.265504 21769 solver.cpp:244]     Train net output #1: loss = 0.216177 (* 1 = 0.216177 loss)\nI0820 23:34:27.357434 21769 sgd_solver.cpp:166] Iteration 57700, lr = 1.4425\nI0820 23:38:05.776391 21769 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0820 23:40:17.112119 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8279\nI0820 23:40:17.112571 21769 solver.cpp:404]     Test net output #1: loss = 0.591701 (* 1 = 0.591701 loss)\nI0820 23:40:19.242522 21769 solver.cpp:228] Iteration 57800, loss = 0.135848\nI0820 23:40:19.242563 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 23:40:19.242585 21769 solver.cpp:244]     Train net output #1: loss = 0.135848 (* 1 = 0.135848 loss)\nI0820 23:40:19.324087 21769 sgd_solver.cpp:166] Iteration 57800, lr = 1.445\nI0820 23:43:57.585912 21769 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0820 23:46:08.884387 21769 solver.cpp:404]     Test net output #0: accuracy = 0.826\nI0820 23:46:08.884794 21769 solver.cpp:404]     Test net output #1: loss = 0.60163 (* 1 = 0.60163 loss)\nI0820 23:46:11.014477 21769 solver.cpp:228] Iteration 57900, loss = 0.164791\nI0820 23:46:11.014518 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 23:46:11.014540 21769 solver.cpp:244]     Train net output #1: loss = 0.164791 (* 1 = 0.164791 loss)\nI0820 23:46:11.104480 21769 sgd_solver.cpp:166] Iteration 57900, lr = 1.4475\nI0820 23:49:49.578166 21769 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0820 23:52:00.914482 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8004\nI0820 23:52:00.914897 21769 solver.cpp:404]     Test net output #1: loss = 0.777554 (* 1 = 0.777554 loss)\nI0820 23:52:03.044916 21769 solver.cpp:228] Iteration 58000, loss = 0.287541\nI0820 23:52:03.044955 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0820 23:52:03.044986 21769 solver.cpp:244]     Train net output #1: loss = 0.287541 (* 1 = 0.287541 loss)\nI0820 23:52:03.134834 21769 sgd_solver.cpp:166] Iteration 58000, lr = 1.45\nI0820 23:55:41.401554 21769 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0820 23:57:52.691097 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7057\nI0820 23:57:52.691505 21769 solver.cpp:404]     Test net output #1: loss = 1.30043 (* 1 = 1.30043 loss)\nI0820 23:57:54.820886 21769 solver.cpp:228] Iteration 58100, loss = 0.213601\nI0820 23:57:54.820926 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0820 23:57:54.820950 21769 solver.cpp:244]     Train net output #1: loss = 0.213601 (* 1 = 0.213601 loss)\nI0820 23:57:54.904706 21769 sgd_solver.cpp:166] Iteration 58100, lr = 1.4525\nI0821 00:01:33.101586 21769 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0821 00:03:44.375506 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8106\nI0821 00:03:44.375941 21769 solver.cpp:404]     Test net output #1: loss = 0.679929 (* 1 = 0.679929 loss)\nI0821 00:03:46.506335 21769 solver.cpp:228] Iteration 58200, loss = 0.0672458\nI0821 00:03:46.506376 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 00:03:46.506399 21769 solver.cpp:244]     Train net output #1: loss = 0.0672452 (* 1 = 0.0672452 loss)\nI0821 00:03:46.589742 21769 sgd_solver.cpp:166] Iteration 58200, lr = 1.455\nI0821 00:07:24.771728 21769 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0821 00:09:36.161550 21769 solver.cpp:404]     Test net output #0: accuracy = 0.842\nI0821 00:09:36.161968 21769 solver.cpp:404]     Test net output #1: loss = 0.512541 (* 1 = 0.512541 loss)\nI0821 00:09:38.294230 21769 solver.cpp:228] Iteration 58300, loss = 0.0757401\nI0821 00:09:38.294267 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 00:09:38.294283 21769 solver.cpp:244]     Train net output #1: loss = 0.0757395 (* 1 = 0.0757395 loss)\nI0821 00:09:38.372123 21769 sgd_solver.cpp:166] Iteration 58300, lr = 1.4575\nI0821 00:13:16.631742 21769 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0821 00:15:27.990666 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7366\nI0821 00:15:27.991052 21769 solver.cpp:404]     Test net output #1: loss = 1.0947 (* 1 = 1.0947 loss)\nI0821 00:15:30.119750 21769 solver.cpp:228] Iteration 58400, loss = 0.174321\nI0821 00:15:30.119787 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 00:15:30.119803 21769 solver.cpp:244]     Train net output #1: loss = 0.174321 (* 1 = 0.174321 loss)\nI0821 00:15:30.206143 21769 sgd_solver.cpp:166] Iteration 58400, lr = 1.46\nI0821 00:19:08.638140 21769 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0821 00:21:19.860935 21769 solver.cpp:404]     Test net output #0: accuracy = 0.809\nI0821 00:21:19.861338 21769 solver.cpp:404]     Test net output #1: loss = 0.61334 (* 1 = 0.61334 loss)\nI0821 00:21:21.989748 21769 solver.cpp:228] Iteration 58500, loss = 0.179765\nI0821 00:21:21.989787 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 00:21:21.989804 21769 solver.cpp:244]     Train net output #1: loss = 0.179765 (* 1 = 0.179765 loss)\nI0821 00:21:22.078451 21769 sgd_solver.cpp:166] Iteration 58500, lr = 1.4625\nI0821 00:25:00.318380 21769 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0821 00:27:11.528620 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7736\nI0821 00:27:11.529036 21769 solver.cpp:404]     Test net output #1: loss = 0.940076 (* 1 = 0.940076 loss)\nI0821 00:27:13.657809 21769 solver.cpp:228] Iteration 58600, loss = 0.163052\nI0821 00:27:13.657843 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 00:27:13.657860 21769 solver.cpp:244]     Train net output #1: loss = 0.163052 (* 1 = 0.163052 loss)\nI0821 00:27:13.745396 21769 sgd_solver.cpp:166] Iteration 58600, lr = 1.465\nI0821 00:30:51.974970 21769 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0821 00:33:03.183375 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7758\nI0821 00:33:03.183797 21769 solver.cpp:404]     Test net output #1: loss = 0.799942 (* 1 = 0.799942 loss)\nI0821 00:33:05.311609 21769 solver.cpp:228] Iteration 58700, loss = 0.166873\nI0821 00:33:05.311645 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 00:33:05.311659 21769 solver.cpp:244]     Train net output #1: loss = 0.166872 (* 1 = 0.166872 loss)\nI0821 00:33:05.396049 21769 sgd_solver.cpp:166] Iteration 58700, lr = 1.4675\nI0821 00:36:43.701196 21769 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0821 00:38:54.922510 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8124\nI0821 00:38:54.922919 21769 solver.cpp:404]     Test net output #1: loss = 0.660899 (* 1 = 0.660899 loss)\nI0821 00:38:57.052338 21769 solver.cpp:228] Iteration 58800, loss = 0.0967749\nI0821 00:38:57.052373 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 00:38:57.052388 21769 solver.cpp:244]     Train net output #1: loss = 0.0967744 (* 1 = 0.0967744 loss)\nI0821 00:38:57.134049 21769 sgd_solver.cpp:166] Iteration 58800, lr = 1.47\nI0821 00:42:35.297214 21769 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0821 00:44:47.245803 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7879\nI0821 00:44:47.246201 21769 solver.cpp:404]     Test net output #1: loss = 0.704136 (* 1 = 0.704136 loss)\nI0821 00:44:49.380713 21769 solver.cpp:228] Iteration 58900, loss = 0.109839\nI0821 00:44:49.380761 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 00:44:49.380777 21769 solver.cpp:244]     Train net output #1: loss = 0.109838 (* 1 = 0.109838 loss)\nI0821 00:44:49.460455 21769 sgd_solver.cpp:166] Iteration 58900, lr = 1.4725\nI0821 00:48:27.911612 21769 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0821 00:50:39.697666 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7806\nI0821 00:50:39.698043 21769 solver.cpp:404]     Test net output #1: loss = 0.721896 (* 1 = 0.721896 loss)\nI0821 00:50:41.832073 21769 solver.cpp:228] Iteration 59000, loss = 0.118463\nI0821 00:50:41.832132 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 00:50:41.832150 21769 solver.cpp:244]     Train net output #1: loss = 0.118463 (* 1 = 0.118463 loss)\nI0821 00:50:41.914783 21769 sgd_solver.cpp:166] Iteration 59000, lr = 1.475\nI0821 00:54:20.812158 21769 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0821 00:56:33.403403 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8106\nI0821 00:56:33.403802 21769 solver.cpp:404]     Test net output #1: loss = 0.69735 (* 1 = 0.69735 loss)\nI0821 00:56:35.537631 21769 solver.cpp:228] Iteration 59100, loss = 0.171668\nI0821 00:56:35.537678 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 00:56:35.537694 21769 solver.cpp:244]     Train net output #1: loss = 0.171668 (* 1 = 0.171668 loss)\nI0821 00:56:35.621264 21769 sgd_solver.cpp:166] Iteration 59100, lr = 1.4775\nI0821 01:00:14.485256 21769 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0821 01:02:27.081257 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7797\nI0821 01:02:27.081698 21769 solver.cpp:404]     Test net output #1: loss = 0.795158 (* 1 = 0.795158 loss)\nI0821 01:02:29.215553 21769 solver.cpp:228] Iteration 59200, loss = 0.18052\nI0821 01:02:29.215610 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 01:02:29.215626 21769 solver.cpp:244]     Train net output #1: loss = 0.18052 (* 1 = 0.18052 loss)\nI0821 01:02:29.299895 21769 sgd_solver.cpp:166] Iteration 59200, lr = 1.48\nI0821 01:06:08.030767 21769 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0821 01:08:20.572546 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8044\nI0821 01:08:20.572947 21769 solver.cpp:404]     Test net output #1: loss = 0.633827 (* 1 = 0.633827 loss)\nI0821 01:08:22.706995 21769 solver.cpp:228] Iteration 59300, loss = 0.148572\nI0821 01:08:22.707051 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 01:08:22.707068 21769 solver.cpp:244]     Train net output #1: loss = 0.148571 (* 1 = 0.148571 loss)\nI0821 01:08:22.794822 21769 sgd_solver.cpp:166] Iteration 59300, lr = 1.4825\nI0821 01:12:01.645226 21769 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0821 01:14:14.235071 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7684\nI0821 01:14:14.235476 21769 solver.cpp:404]     Test net output #1: loss = 0.94136 (* 1 = 0.94136 loss)\nI0821 01:14:16.370082 21769 solver.cpp:228] Iteration 59400, loss = 0.185666\nI0821 01:14:16.370138 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 01:14:16.370156 21769 solver.cpp:244]     Train net output #1: loss = 0.185665 (* 1 = 0.185665 loss)\nI0821 01:14:16.455077 21769 sgd_solver.cpp:166] Iteration 59400, lr = 1.485\nI0821 01:17:55.473479 21769 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0821 01:20:08.065199 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7342\nI0821 01:20:08.065572 21769 solver.cpp:404]     Test net output #1: loss = 0.993153 (* 1 = 0.993153 loss)\nI0821 01:20:10.200053 21769 solver.cpp:228] Iteration 59500, loss = 0.169105\nI0821 01:20:10.200111 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 01:20:10.200129 21769 solver.cpp:244]     Train net output #1: loss = 0.169105 (* 1 = 0.169105 loss)\nI0821 01:20:10.288446 21769 sgd_solver.cpp:166] Iteration 59500, lr = 1.4875\nI0821 01:23:49.239012 21769 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0821 01:26:01.833114 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8063\nI0821 01:26:01.833515 21769 solver.cpp:404]     Test net output #1: loss = 0.702267 (* 1 = 0.702267 loss)\nI0821 01:26:03.968085 21769 solver.cpp:228] Iteration 59600, loss = 0.207514\nI0821 01:26:03.968142 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 01:26:03.968159 21769 solver.cpp:244]     Train net output #1: loss = 0.207514 (* 1 = 0.207514 loss)\nI0821 01:26:04.050855 21769 sgd_solver.cpp:166] Iteration 59600, lr = 1.49\nI0821 01:29:42.847349 21769 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0821 01:31:55.430697 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7924\nI0821 01:31:55.431097 21769 solver.cpp:404]     Test net output #1: loss = 0.719579 (* 1 = 0.719579 loss)\nI0821 01:31:57.565119 21769 solver.cpp:228] Iteration 59700, loss = 0.203237\nI0821 01:31:57.565176 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 01:31:57.565192 21769 solver.cpp:244]     Train net output #1: loss = 0.203236 (* 1 = 0.203236 loss)\nI0821 01:31:57.650063 21769 sgd_solver.cpp:166] Iteration 59700, lr = 1.4925\nI0821 01:35:36.839649 21769 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0821 01:37:49.416314 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8149\nI0821 01:37:49.416688 21769 solver.cpp:404]     Test net output #1: loss = 0.695739 (* 1 = 0.695739 loss)\nI0821 01:37:51.550824 21769 solver.cpp:228] Iteration 59800, loss = 0.105298\nI0821 01:37:51.550889 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 01:37:51.550909 21769 solver.cpp:244]     Train net output #1: loss = 0.105297 (* 1 = 0.105297 loss)\nI0821 01:37:51.635860 21769 sgd_solver.cpp:166] Iteration 59800, lr = 1.495\nI0821 01:41:30.521910 21769 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0821 01:43:43.075312 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7798\nI0821 01:43:43.075759 21769 solver.cpp:404]     Test net output #1: loss = 0.782423 (* 1 = 0.782423 loss)\nI0821 01:43:45.210290 21769 solver.cpp:228] Iteration 59900, loss = 0.194377\nI0821 01:43:45.210346 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 01:43:45.210363 21769 solver.cpp:244]     Train net output #1: loss = 0.194377 (* 1 = 0.194377 loss)\nI0821 01:43:45.286289 21769 sgd_solver.cpp:166] Iteration 59900, lr = 1.4975\nI0821 01:47:23.959842 21769 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0821 01:49:36.444589 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8207\nI0821 01:49:36.444973 21769 solver.cpp:404]     Test net output #1: loss = 0.603496 (* 1 = 0.603496 loss)\nI0821 01:49:38.578487 21769 solver.cpp:228] Iteration 60000, loss = 0.320202\nI0821 01:49:38.578542 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 01:49:38.578559 21769 solver.cpp:244]     Train net output #1: loss = 0.320202 (* 1 = 0.320202 loss)\nI0821 01:49:38.658131 21769 sgd_solver.cpp:166] Iteration 60000, lr = 1.5\nI0821 01:53:17.452071 21769 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0821 01:55:29.987762 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7739\nI0821 01:55:29.988160 21769 solver.cpp:404]     Test net output #1: loss = 0.884281 (* 1 = 0.884281 loss)\nI0821 01:55:32.122472 21769 solver.cpp:228] Iteration 60100, loss = 0.230896\nI0821 01:55:32.122529 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 01:55:32.122546 21769 solver.cpp:244]     Train net output #1: loss = 0.230896 (* 1 = 0.230896 loss)\nI0821 01:55:32.203238 21769 sgd_solver.cpp:166] Iteration 60100, lr = 1.5025\nI0821 01:59:11.123992 21769 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0821 02:01:23.678333 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7633\nI0821 02:01:23.678776 21769 solver.cpp:404]     Test net output #1: loss = 0.867437 (* 1 = 0.867437 loss)\nI0821 02:01:25.812333 21769 solver.cpp:228] Iteration 60200, loss = 0.102274\nI0821 02:01:25.812391 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 02:01:25.812408 21769 solver.cpp:244]     Train net output #1: loss = 0.102274 (* 1 = 0.102274 loss)\nI0821 02:01:25.898634 21769 sgd_solver.cpp:166] Iteration 60200, lr = 1.505\nI0821 02:05:04.952615 21769 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0821 02:07:17.542760 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7402\nI0821 02:07:17.543177 21769 solver.cpp:404]     Test net output #1: loss = 0.98553 (* 1 = 0.98553 loss)\nI0821 02:07:19.677444 21769 solver.cpp:228] Iteration 60300, loss = 0.182838\nI0821 02:07:19.677501 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 02:07:19.677520 21769 solver.cpp:244]     Train net output #1: loss = 0.182838 (* 1 = 0.182838 loss)\nI0821 02:07:19.761570 21769 sgd_solver.cpp:166] Iteration 60300, lr = 1.5075\nI0821 02:10:58.467865 21769 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0821 02:13:11.038801 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7918\nI0821 02:13:11.039243 21769 solver.cpp:404]     Test net output #1: loss = 0.828416 (* 1 = 0.828416 loss)\nI0821 02:13:13.173540 21769 solver.cpp:228] Iteration 60400, loss = 0.33872\nI0821 02:13:13.173596 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 02:13:13.173614 21769 solver.cpp:244]     Train net output #1: loss = 0.33872 (* 1 = 0.33872 loss)\nI0821 02:13:13.260424 21769 sgd_solver.cpp:166] Iteration 60400, lr = 1.51\nI0821 02:16:52.098198 21769 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0821 02:19:04.645272 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7589\nI0821 02:19:04.645663 21769 solver.cpp:404]     Test net output #1: loss = 0.9352 (* 1 = 0.9352 loss)\nI0821 02:19:06.779731 21769 solver.cpp:228] Iteration 60500, loss = 0.177422\nI0821 02:19:06.779788 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 02:19:06.779804 21769 solver.cpp:244]     Train net output #1: loss = 0.177422 (* 1 = 0.177422 loss)\nI0821 02:19:06.862917 21769 sgd_solver.cpp:166] Iteration 60500, lr = 1.5125\nI0821 02:22:45.629480 21769 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0821 02:24:58.187563 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8026\nI0821 02:24:58.187961 21769 solver.cpp:404]     Test net output #1: loss = 0.640472 (* 1 = 0.640472 loss)\nI0821 02:25:00.322252 21769 solver.cpp:228] Iteration 60600, loss = 0.2704\nI0821 02:25:00.322309 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 02:25:00.322326 21769 solver.cpp:244]     Train net output #1: loss = 0.2704 (* 1 = 0.2704 loss)\nI0821 02:25:00.405092 21769 sgd_solver.cpp:166] Iteration 60600, lr = 1.515\nI0821 02:28:39.166523 21769 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0821 02:30:51.779863 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8105\nI0821 02:30:51.780287 21769 solver.cpp:404]     Test net output #1: loss = 0.677994 (* 1 = 0.677994 loss)\nI0821 02:30:53.914389 21769 solver.cpp:228] Iteration 60700, loss = 0.14912\nI0821 02:30:53.914445 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 02:30:53.914461 21769 solver.cpp:244]     Train net output #1: loss = 0.14912 (* 1 = 0.14912 loss)\nI0821 02:30:54.005918 21769 sgd_solver.cpp:166] Iteration 60700, lr = 1.5175\nI0821 02:34:32.855352 21769 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0821 02:36:45.438964 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7344\nI0821 02:36:45.439333 21769 solver.cpp:404]     Test net output #1: loss = 1.13802 (* 1 = 1.13802 loss)\nI0821 02:36:47.573894 21769 solver.cpp:228] Iteration 60800, loss = 0.110631\nI0821 02:36:47.573949 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 02:36:47.573966 21769 solver.cpp:244]     Train net output #1: loss = 0.110631 (* 1 = 0.110631 loss)\nI0821 02:36:47.663241 21769 sgd_solver.cpp:166] Iteration 60800, lr = 1.52\nI0821 02:40:26.619815 21769 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0821 02:42:39.223028 21769 solver.cpp:404]     Test net output #0: accuracy = 0.817\nI0821 02:42:39.223485 21769 solver.cpp:404]     Test net output #1: loss = 0.620409 (* 1 = 0.620409 loss)\nI0821 02:42:41.356454 21769 solver.cpp:228] Iteration 60900, loss = 0.251304\nI0821 02:42:41.356513 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 02:42:41.356528 21769 solver.cpp:244]     Train net output #1: loss = 0.251303 (* 1 = 0.251303 loss)\nI0821 02:42:41.442889 21769 sgd_solver.cpp:166] Iteration 60900, lr = 1.5225\nI0821 02:46:20.190783 21769 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0821 02:48:32.846694 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8333\nI0821 02:48:32.847090 21769 solver.cpp:404]     Test net output #1: loss = 0.540302 (* 1 = 0.540302 loss)\nI0821 02:48:34.981760 21769 solver.cpp:228] Iteration 61000, loss = 0.282345\nI0821 02:48:34.981817 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 02:48:34.981833 21769 solver.cpp:244]     Train net output #1: loss = 0.282344 (* 1 = 0.282344 loss)\nI0821 02:48:35.061297 21769 sgd_solver.cpp:166] Iteration 61000, lr = 1.525\nI0821 02:52:13.942898 21769 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0821 02:54:26.596632 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7258\nI0821 02:54:26.597043 21769 solver.cpp:404]     Test net output #1: loss = 1.08365 (* 1 = 1.08365 loss)\nI0821 02:54:28.731345 21769 solver.cpp:228] Iteration 61100, loss = 0.122894\nI0821 02:54:28.731401 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 02:54:28.731417 21769 solver.cpp:244]     Train net output #1: loss = 0.122894 (* 1 = 0.122894 loss)\nI0821 02:54:28.816637 21769 sgd_solver.cpp:166] Iteration 61100, lr = 1.5275\nI0821 02:58:07.586823 21769 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0821 03:00:20.199393 21769 solver.cpp:404]     Test net output #0: accuracy = 0.826\nI0821 03:00:20.199808 21769 solver.cpp:404]     Test net output #1: loss = 0.602978 (* 1 = 0.602978 loss)\nI0821 03:00:22.334378 21769 solver.cpp:228] Iteration 61200, loss = 0.132715\nI0821 03:00:22.334434 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 03:00:22.334450 21769 solver.cpp:244]     Train net output #1: loss = 0.132714 (* 1 = 0.132714 loss)\nI0821 03:00:22.419507 21769 sgd_solver.cpp:166] Iteration 61200, lr = 1.53\nI0821 03:04:01.112102 21769 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0821 03:06:13.698572 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8099\nI0821 03:06:13.698954 21769 solver.cpp:404]     Test net output #1: loss = 0.718253 (* 1 = 0.718253 loss)\nI0821 03:06:15.833407 21769 solver.cpp:228] Iteration 61300, loss = 0.0740824\nI0821 03:06:15.833465 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0821 03:06:15.833482 21769 solver.cpp:244]     Train net output #1: loss = 0.0740819 (* 1 = 0.0740819 loss)\nI0821 03:06:15.919179 21769 sgd_solver.cpp:166] Iteration 61300, lr = 1.5325\nI0821 03:09:54.672544 21769 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0821 03:12:07.261847 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7939\nI0821 03:12:07.262266 21769 solver.cpp:404]     Test net output #1: loss = 0.743269 (* 1 = 0.743269 loss)\nI0821 03:12:09.396941 21769 solver.cpp:228] Iteration 61400, loss = 0.278434\nI0821 03:12:09.396998 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 03:12:09.397016 21769 solver.cpp:244]     Train net output #1: loss = 0.278434 (* 1 = 0.278434 loss)\nI0821 03:12:09.480351 21769 sgd_solver.cpp:166] Iteration 61400, lr = 1.535\nI0821 03:15:48.180141 21769 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0821 03:18:00.780546 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7309\nI0821 03:18:00.780951 21769 solver.cpp:404]     Test net output #1: loss = 1.10544 (* 1 = 1.10544 loss)\nI0821 03:18:02.915431 21769 solver.cpp:228] Iteration 61500, loss = 0.155653\nI0821 03:18:02.915483 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 03:18:02.915500 21769 solver.cpp:244]     Train net output #1: loss = 0.155652 (* 1 = 0.155652 loss)\nI0821 03:18:03.011759 21769 sgd_solver.cpp:166] Iteration 61500, lr = 1.5375\nI0821 03:21:41.908203 21769 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0821 03:23:54.516129 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7209\nI0821 03:23:54.516589 21769 solver.cpp:404]     Test net output #1: loss = 1.02271 (* 1 = 1.02271 loss)\nI0821 03:23:56.650305 21769 solver.cpp:228] Iteration 61600, loss = 0.225321\nI0821 03:23:56.650360 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 03:23:56.650377 21769 solver.cpp:244]     Train net output #1: loss = 0.225321 (* 1 = 0.225321 loss)\nI0821 03:23:56.738229 21769 sgd_solver.cpp:166] Iteration 61600, lr = 1.54\nI0821 03:27:35.584321 21769 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0821 03:29:48.150053 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8089\nI0821 03:29:48.150425 21769 solver.cpp:404]     Test net output #1: loss = 0.570278 (* 1 = 0.570278 loss)\nI0821 03:29:50.284772 21769 solver.cpp:228] Iteration 61700, loss = 0.0852972\nI0821 03:29:50.284828 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 03:29:50.284845 21769 solver.cpp:244]     Train net output #1: loss = 0.0852967 (* 1 = 0.0852967 loss)\nI0821 03:29:50.368683 21769 sgd_solver.cpp:166] Iteration 61700, lr = 1.5425\nI0821 03:33:29.032356 21769 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0821 03:35:41.586689 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8035\nI0821 03:35:41.587143 21769 solver.cpp:404]     Test net output #1: loss = 0.699888 (* 1 = 0.699888 loss)\nI0821 03:35:43.721113 21769 solver.cpp:228] Iteration 61800, loss = 0.0928971\nI0821 03:35:43.721170 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 03:35:43.721189 21769 solver.cpp:244]     Train net output #1: loss = 0.0928967 (* 1 = 0.0928967 loss)\nI0821 03:35:43.798130 21769 sgd_solver.cpp:166] Iteration 61800, lr = 1.545\nI0821 03:39:22.442770 21769 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0821 03:41:34.979161 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7925\nI0821 03:41:34.979580 21769 solver.cpp:404]     Test net output #1: loss = 0.711035 (* 1 = 0.711035 loss)\nI0821 03:41:37.113140 21769 solver.cpp:228] Iteration 61900, loss = 0.18316\nI0821 03:41:37.113195 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 03:41:37.113214 21769 solver.cpp:244]     Train net output #1: loss = 0.183159 (* 1 = 0.183159 loss)\nI0821 03:41:37.195901 21769 sgd_solver.cpp:166] Iteration 61900, lr = 1.5475\nI0821 03:45:15.864583 21769 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0821 03:47:28.404345 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8115\nI0821 03:47:28.404752 21769 solver.cpp:404]     Test net output #1: loss = 0.695955 (* 1 = 0.695955 loss)\nI0821 03:47:30.539331 21769 solver.cpp:228] Iteration 62000, loss = 0.135542\nI0821 03:47:30.539387 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 03:47:30.539405 21769 solver.cpp:244]     Train net output #1: loss = 0.135542 (* 1 = 0.135542 loss)\nI0821 03:47:30.626273 21769 sgd_solver.cpp:166] Iteration 62000, lr = 1.55\nI0821 03:51:09.652148 21769 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0821 03:53:22.235466 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8194\nI0821 03:53:22.235934 21769 solver.cpp:404]     Test net output #1: loss = 0.606376 (* 1 = 0.606376 loss)\nI0821 03:53:24.369403 21769 solver.cpp:228] Iteration 62100, loss = 0.100278\nI0821 03:53:24.369463 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 03:53:24.369480 21769 solver.cpp:244]     Train net output #1: loss = 0.100278 (* 1 = 0.100278 loss)\nI0821 03:53:24.449998 21769 sgd_solver.cpp:166] Iteration 62100, lr = 1.5525\nI0821 03:57:03.074200 21769 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0821 03:59:15.627301 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8119\nI0821 03:59:15.627696 21769 solver.cpp:404]     Test net output #1: loss = 0.622117 (* 1 = 0.622117 loss)\nI0821 03:59:17.760882 21769 solver.cpp:228] Iteration 62200, loss = 0.131124\nI0821 03:59:17.760941 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 03:59:17.760958 21769 solver.cpp:244]     Train net output #1: loss = 0.131124 (* 1 = 0.131124 loss)\nI0821 03:59:17.844182 21769 sgd_solver.cpp:166] Iteration 62200, lr = 1.555\nI0821 04:02:56.822432 21769 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0821 04:05:09.261070 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7946\nI0821 04:05:09.261481 21769 solver.cpp:404]     Test net output #1: loss = 0.797583 (* 1 = 0.797583 loss)\nI0821 04:05:11.394953 21769 solver.cpp:228] Iteration 62300, loss = 0.239836\nI0821 04:05:11.395009 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 04:05:11.395026 21769 solver.cpp:244]     Train net output #1: loss = 0.239836 (* 1 = 0.239836 loss)\nI0821 04:05:11.477043 21769 sgd_solver.cpp:166] Iteration 62300, lr = 1.5575\nI0821 04:08:50.497982 21769 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0821 04:11:03.029598 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6478\nI0821 04:11:03.030016 21769 solver.cpp:404]     Test net output #1: loss = 1.61813 (* 1 = 1.61813 loss)\nI0821 04:11:05.163955 21769 solver.cpp:228] Iteration 62400, loss = 0.257515\nI0821 04:11:05.164013 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 04:11:05.164031 21769 solver.cpp:244]     Train net output #1: loss = 0.257514 (* 1 = 0.257514 loss)\nI0821 04:11:05.246600 21769 sgd_solver.cpp:166] Iteration 62400, lr = 1.56\nI0821 04:14:43.867609 21769 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0821 04:16:56.448482 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7327\nI0821 04:16:56.448901 21769 solver.cpp:404]     Test net output #1: loss = 0.942298 (* 1 = 0.942298 loss)\nI0821 04:16:58.582825 21769 solver.cpp:228] Iteration 62500, loss = 0.256929\nI0821 04:16:58.582881 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 04:16:58.582902 21769 solver.cpp:244]     Train net output #1: loss = 0.256929 (* 1 = 0.256929 loss)\nI0821 04:16:58.666188 21769 sgd_solver.cpp:166] Iteration 62500, lr = 1.5625\nI0821 04:20:37.303908 21769 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0821 04:22:49.881832 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7771\nI0821 04:22:49.882302 21769 solver.cpp:404]     Test net output #1: loss = 0.815003 (* 1 = 0.815003 loss)\nI0821 04:22:52.016561 21769 solver.cpp:228] Iteration 62600, loss = 0.292619\nI0821 04:22:52.016618 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 04:22:52.016634 21769 solver.cpp:244]     Train net output #1: loss = 0.292619 (* 1 = 0.292619 loss)\nI0821 04:22:52.097371 21769 sgd_solver.cpp:166] Iteration 62600, lr = 1.565\nI0821 04:26:30.909153 21769 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0821 04:28:43.101212 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7967\nI0821 04:28:43.101593 21769 solver.cpp:404]     Test net output #1: loss = 0.737856 (* 1 = 0.737856 loss)\nI0821 04:28:45.234642 21769 solver.cpp:228] Iteration 62700, loss = 0.153429\nI0821 04:28:45.234699 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 04:28:45.234716 21769 solver.cpp:244]     Train net output #1: loss = 0.153429 (* 1 = 0.153429 loss)\nI0821 04:28:45.314522 21769 sgd_solver.cpp:166] Iteration 62700, lr = 1.5675\nI0821 04:32:24.074322 21769 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0821 04:34:36.261363 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8029\nI0821 04:34:36.261801 21769 solver.cpp:404]     Test net output #1: loss = 0.76131 (* 1 = 0.76131 loss)\nI0821 04:34:38.394810 21769 solver.cpp:228] Iteration 62800, loss = 0.105395\nI0821 04:34:38.394865 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 04:34:38.394882 21769 solver.cpp:244]     Train net output #1: loss = 0.105395 (* 1 = 0.105395 loss)\nI0821 04:34:38.480713 21769 sgd_solver.cpp:166] Iteration 62800, lr = 1.57\nI0821 04:38:17.441131 21769 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0821 04:40:29.832947 21769 solver.cpp:404]     Test net output #0: accuracy = 0.735\nI0821 04:40:29.833395 21769 solver.cpp:404]     Test net output #1: loss = 1.04411 (* 1 = 1.04411 loss)\nI0821 04:40:31.966774 21769 solver.cpp:228] Iteration 62900, loss = 0.184182\nI0821 04:40:31.966830 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 04:40:31.966847 21769 solver.cpp:244]     Train net output #1: loss = 0.184181 (* 1 = 0.184181 loss)\nI0821 04:40:32.051273 21769 sgd_solver.cpp:166] Iteration 62900, lr = 1.5725\nI0821 04:44:11.050937 21769 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0821 04:46:23.473698 21769 solver.cpp:404]     Test net output #0: accuracy = 0.774\nI0821 04:46:23.474052 21769 solver.cpp:404]     Test net output #1: loss = 0.772309 (* 1 = 0.772309 loss)\nI0821 04:46:25.607342 21769 solver.cpp:228] Iteration 63000, loss = 0.2197\nI0821 04:46:25.607398 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 04:46:25.607414 21769 solver.cpp:244]     Train net output #1: loss = 0.2197 (* 1 = 0.2197 loss)\nI0821 04:46:25.688000 21769 sgd_solver.cpp:166] Iteration 63000, lr = 1.575\nI0821 04:50:04.547523 21769 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0821 04:52:16.905611 21769 solver.cpp:404]     Test net output #0: accuracy = 0.725\nI0821 04:52:16.906015 21769 solver.cpp:404]     Test net output #1: loss = 1.05948 (* 1 = 1.05948 loss)\nI0821 04:52:19.039719 21769 solver.cpp:228] Iteration 63100, loss = 0.244859\nI0821 04:52:19.039775 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 04:52:19.039793 21769 solver.cpp:244]     Train net output #1: loss = 0.244859 (* 1 = 0.244859 loss)\nI0821 04:52:19.120709 21769 sgd_solver.cpp:166] Iteration 63100, lr = 1.5775\nI0821 04:55:57.856487 21769 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0821 04:58:10.473554 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7481\nI0821 04:58:10.473955 21769 solver.cpp:404]     Test net output #1: loss = 1.00382 (* 1 = 1.00382 loss)\nI0821 04:58:12.607455 21769 solver.cpp:228] Iteration 63200, loss = 0.102744\nI0821 04:58:12.607511 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 04:58:12.607528 21769 solver.cpp:244]     Train net output #1: loss = 0.102743 (* 1 = 0.102743 loss)\nI0821 04:58:12.691771 21769 sgd_solver.cpp:166] Iteration 63200, lr = 1.58\nI0821 05:01:51.361363 21769 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0821 05:04:03.786237 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6185\nI0821 05:04:03.786712 21769 solver.cpp:404]     Test net output #1: loss = 1.51 (* 1 = 1.51 loss)\nI0821 05:04:05.919553 21769 solver.cpp:228] Iteration 63300, loss = 0.215805\nI0821 05:04:05.919610 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 05:04:05.919626 21769 solver.cpp:244]     Train net output #1: loss = 0.215805 (* 1 = 0.215805 loss)\nI0821 05:04:06.003036 21769 sgd_solver.cpp:166] Iteration 63300, lr = 1.5825\nI0821 05:07:44.907603 21769 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0821 05:09:57.461058 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7694\nI0821 05:09:57.461455 21769 solver.cpp:404]     Test net output #1: loss = 0.832946 (* 1 = 0.832946 loss)\nI0821 05:09:59.594027 21769 solver.cpp:228] Iteration 63400, loss = 0.147661\nI0821 05:09:59.594084 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 05:09:59.594101 21769 solver.cpp:244]     Train net output #1: loss = 0.14766 (* 1 = 0.14766 loss)\nI0821 05:09:59.678081 21769 sgd_solver.cpp:166] Iteration 63400, lr = 1.585\nI0821 05:13:38.342694 21769 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0821 05:15:50.745556 21769 solver.cpp:404]     Test net output #0: accuracy = 0.726\nI0821 05:15:50.745965 21769 solver.cpp:404]     Test net output #1: loss = 1.18666 (* 1 = 1.18666 loss)\nI0821 05:15:52.878237 21769 solver.cpp:228] Iteration 63500, loss = 0.279969\nI0821 05:15:52.878293 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 05:15:52.878310 21769 solver.cpp:244]     Train net output #1: loss = 0.279968 (* 1 = 0.279968 loss)\nI0821 05:15:52.962095 21769 sgd_solver.cpp:166] Iteration 63500, lr = 1.5875\nI0821 05:19:31.880900 21769 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0821 05:21:44.212546 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7323\nI0821 05:21:44.212962 21769 solver.cpp:404]     Test net output #1: loss = 1.05727 (* 1 = 1.05727 loss)\nI0821 05:21:46.346501 21769 solver.cpp:228] Iteration 63600, loss = 0.271106\nI0821 05:21:46.346557 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 05:21:46.346575 21769 solver.cpp:244]     Train net output #1: loss = 0.271106 (* 1 = 0.271106 loss)\nI0821 05:21:46.432193 21769 sgd_solver.cpp:166] Iteration 63600, lr = 1.59\nI0821 05:25:25.104385 21769 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0821 05:27:37.392843 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8122\nI0821 05:27:37.393227 21769 solver.cpp:404]     Test net output #1: loss = 0.650583 (* 1 = 0.650583 loss)\nI0821 05:27:39.526036 21769 solver.cpp:228] Iteration 63700, loss = 0.262302\nI0821 05:27:39.526093 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 05:27:39.526110 21769 solver.cpp:244]     Train net output #1: loss = 0.262301 (* 1 = 0.262301 loss)\nI0821 05:27:39.611969 21769 sgd_solver.cpp:166] Iteration 63700, lr = 1.5925\nI0821 05:31:18.224308 21769 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0821 05:33:30.577672 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7709\nI0821 05:33:30.578152 21769 solver.cpp:404]     Test net output #1: loss = 0.889211 (* 1 = 0.889211 loss)\nI0821 05:33:32.711277 21769 solver.cpp:228] Iteration 63800, loss = 0.12241\nI0821 05:33:32.711336 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 05:33:32.711352 21769 solver.cpp:244]     Train net output #1: loss = 0.12241 (* 1 = 0.12241 loss)\nI0821 05:33:32.796701 21769 sgd_solver.cpp:166] Iteration 63800, lr = 1.595\nI0821 05:37:11.644767 21769 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0821 05:39:24.262591 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7619\nI0821 05:39:24.263046 21769 solver.cpp:404]     Test net output #1: loss = 0.926829 (* 1 = 0.926829 loss)\nI0821 05:39:26.397073 21769 solver.cpp:228] Iteration 63900, loss = 0.263278\nI0821 05:39:26.397131 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 05:39:26.397147 21769 solver.cpp:244]     Train net output #1: loss = 0.263277 (* 1 = 0.263277 loss)\nI0821 05:39:26.477912 21769 sgd_solver.cpp:166] Iteration 63900, lr = 1.5975\nI0821 05:43:04.983846 21769 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0821 05:45:17.611239 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7982\nI0821 05:45:17.611691 21769 solver.cpp:404]     Test net output #1: loss = 0.757067 (* 1 = 0.757067 loss)\nI0821 05:45:19.745669 21769 solver.cpp:228] Iteration 64000, loss = 0.145424\nI0821 05:45:19.745726 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 05:45:19.745744 21769 solver.cpp:244]     Train net output #1: loss = 0.145424 (* 1 = 0.145424 loss)\nI0821 05:45:19.826354 21769 sgd_solver.cpp:166] Iteration 64000, lr = 1.6\nI0821 05:48:58.329031 21769 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0821 05:51:10.930707 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8302\nI0821 05:51:10.931177 21769 solver.cpp:404]     Test net output #1: loss = 0.569595 (* 1 = 0.569595 loss)\nI0821 05:51:13.065167 21769 solver.cpp:228] Iteration 64100, loss = 0.168775\nI0821 05:51:13.065224 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 05:51:13.065243 21769 solver.cpp:244]     Train net output #1: loss = 0.168774 (* 1 = 0.168774 loss)\nI0821 05:51:13.148458 21769 sgd_solver.cpp:166] Iteration 64100, lr = 1.6025\nI0821 05:54:51.754110 21769 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0821 05:57:04.376257 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7824\nI0821 05:57:04.376683 21769 solver.cpp:404]     Test net output #1: loss = 0.788678 (* 1 = 0.788678 loss)\nI0821 05:57:06.510640 21769 solver.cpp:228] Iteration 64200, loss = 0.111741\nI0821 05:57:06.510699 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 05:57:06.510716 21769 solver.cpp:244]     Train net output #1: loss = 0.11174 (* 1 = 0.11174 loss)\nI0821 05:57:06.604162 21769 sgd_solver.cpp:166] Iteration 64200, lr = 1.605\nI0821 06:00:45.399610 21769 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0821 06:02:56.685253 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8355\nI0821 06:02:56.685672 21769 solver.cpp:404]     Test net output #1: loss = 0.559989 (* 1 = 0.559989 loss)\nI0821 06:02:58.816117 21769 solver.cpp:228] Iteration 64300, loss = 0.177856\nI0821 06:02:58.816156 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 06:02:58.816172 21769 solver.cpp:244]     Train net output #1: loss = 0.177856 (* 1 = 0.177856 loss)\nI0821 06:02:58.895859 21769 sgd_solver.cpp:166] Iteration 64300, lr = 1.6075\nI0821 06:06:36.504026 21769 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0821 06:08:47.808501 21769 solver.cpp:404]     Test net output #0: accuracy = 0.609\nI0821 06:08:47.808917 21769 solver.cpp:404]     Test net output #1: loss = 1.83766 (* 1 = 1.83766 loss)\nI0821 06:08:49.938479 21769 solver.cpp:228] Iteration 64400, loss = 0.205393\nI0821 06:08:49.938520 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 06:08:49.938544 21769 solver.cpp:244]     Train net output #1: loss = 0.205393 (* 1 = 0.205393 loss)\nI0821 06:08:50.022857 21769 sgd_solver.cpp:166] Iteration 64400, lr = 1.61\nI0821 06:12:27.761551 21769 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0821 06:14:39.042948 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7584\nI0821 06:14:39.043368 21769 solver.cpp:404]     Test net output #1: loss = 0.849015 (* 1 = 0.849015 loss)\nI0821 06:14:41.172142 21769 solver.cpp:228] Iteration 64500, loss = 0.175873\nI0821 06:14:41.172179 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 06:14:41.172195 21769 solver.cpp:244]     Train net output #1: loss = 0.175873 (* 1 = 0.175873 loss)\nI0821 06:14:41.255151 21769 sgd_solver.cpp:166] Iteration 64500, lr = 1.6125\nI0821 06:18:18.878232 21769 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0821 06:20:30.159942 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7857\nI0821 06:20:30.160362 21769 solver.cpp:404]     Test net output #1: loss = 0.655068 (* 1 = 0.655068 loss)\nI0821 06:20:32.289203 21769 solver.cpp:228] Iteration 64600, loss = 0.138603\nI0821 06:20:32.289240 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 06:20:32.289257 21769 solver.cpp:244]     Train net output #1: loss = 0.138602 (* 1 = 0.138602 loss)\nI0821 06:20:32.369294 21769 sgd_solver.cpp:166] Iteration 64600, lr = 1.615\nI0821 06:24:09.954807 21769 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0821 06:26:21.228832 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8277\nI0821 06:26:21.229254 21769 solver.cpp:404]     Test net output #1: loss = 0.550545 (* 1 = 0.550545 loss)\nI0821 06:26:23.361649 21769 solver.cpp:228] Iteration 64700, loss = 0.12364\nI0821 06:26:23.361690 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 06:26:23.361711 21769 solver.cpp:244]     Train net output #1: loss = 0.12364 (* 1 = 0.12364 loss)\nI0821 06:26:23.432730 21769 sgd_solver.cpp:166] Iteration 64700, lr = 1.6175\nI0821 06:30:00.962672 21769 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0821 06:32:12.270349 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8155\nI0821 06:32:12.270731 21769 solver.cpp:404]     Test net output #1: loss = 0.619608 (* 1 = 0.619608 loss)\nI0821 06:32:14.399186 21769 solver.cpp:228] Iteration 64800, loss = 0.110966\nI0821 06:32:14.399224 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 06:32:14.399240 21769 solver.cpp:244]     Train net output #1: loss = 0.110965 (* 1 = 0.110965 loss)\nI0821 06:32:14.477852 21769 sgd_solver.cpp:166] Iteration 64800, lr = 1.62\nI0821 06:35:52.112481 21769 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0821 06:38:03.390667 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7285\nI0821 06:38:03.391026 21769 solver.cpp:404]     Test net output #1: loss = 1.08083 (* 1 = 1.08083 loss)\nI0821 06:38:05.519477 21769 solver.cpp:228] Iteration 64900, loss = 0.234246\nI0821 06:38:05.519515 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 06:38:05.519531 21769 solver.cpp:244]     Train net output #1: loss = 0.234245 (* 1 = 0.234245 loss)\nI0821 06:38:05.602818 21769 sgd_solver.cpp:166] Iteration 64900, lr = 1.6225\nI0821 06:41:43.302363 21769 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0821 06:43:54.568568 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6445\nI0821 06:43:54.568979 21769 solver.cpp:404]     Test net output #1: loss = 1.74671 (* 1 = 1.74671 loss)\nI0821 06:43:56.696277 21769 solver.cpp:228] Iteration 65000, loss = 0.205664\nI0821 06:43:56.696315 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 06:43:56.696329 21769 solver.cpp:244]     Train net output #1: loss = 0.205663 (* 1 = 0.205663 loss)\nI0821 06:43:56.784304 21769 sgd_solver.cpp:166] Iteration 65000, lr = 1.625\nI0821 06:47:34.577343 21769 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0821 06:49:45.860647 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8123\nI0821 06:49:45.861055 21769 solver.cpp:404]     Test net output #1: loss = 0.609868 (* 1 = 0.609868 loss)\nI0821 06:49:47.989197 21769 solver.cpp:228] Iteration 65100, loss = 0.148424\nI0821 06:49:47.989233 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 06:49:47.989248 21769 solver.cpp:244]     Train net output #1: loss = 0.148424 (* 1 = 0.148424 loss)\nI0821 06:49:48.068233 21769 sgd_solver.cpp:166] Iteration 65100, lr = 1.6275\nI0821 06:53:25.729315 21769 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0821 06:55:36.995756 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7666\nI0821 06:55:36.996134 21769 solver.cpp:404]     Test net output #1: loss = 0.79119 (* 1 = 0.79119 loss)\nI0821 06:55:39.125365 21769 solver.cpp:228] Iteration 65200, loss = 0.238891\nI0821 06:55:39.125401 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 06:55:39.125416 21769 solver.cpp:244]     Train net output #1: loss = 0.23889 (* 1 = 0.23889 loss)\nI0821 06:55:39.208227 21769 sgd_solver.cpp:166] Iteration 65200, lr = 1.63\nI0821 06:59:16.707425 21769 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0821 07:01:27.941043 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7562\nI0821 07:01:27.941422 21769 solver.cpp:404]     Test net output #1: loss = 0.810618 (* 1 = 0.810618 loss)\nI0821 07:01:30.070927 21769 solver.cpp:228] Iteration 65300, loss = 0.101467\nI0821 07:01:30.070971 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 07:01:30.070997 21769 solver.cpp:244]     Train net output #1: loss = 0.101466 (* 1 = 0.101466 loss)\nI0821 07:01:30.153556 21769 sgd_solver.cpp:166] Iteration 65300, lr = 1.6325\nI0821 07:05:07.814337 21769 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0821 07:07:19.035975 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6849\nI0821 07:07:19.036388 21769 solver.cpp:404]     Test net output #1: loss = 1.29134 (* 1 = 1.29134 loss)\nI0821 07:07:21.165257 21769 solver.cpp:228] Iteration 65400, loss = 0.147599\nI0821 07:07:21.165298 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 07:07:21.165323 21769 solver.cpp:244]     Train net output #1: loss = 0.147599 (* 1 = 0.147599 loss)\nI0821 07:07:21.249300 21769 sgd_solver.cpp:166] Iteration 65400, lr = 1.635\nI0821 07:10:58.987059 21769 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0821 07:13:10.226292 21769 solver.cpp:404]     Test net output #0: accuracy = 0.82\nI0821 07:13:10.226655 21769 solver.cpp:404]     Test net output #1: loss = 0.559818 (* 1 = 0.559818 loss)\nI0821 07:13:12.355212 21769 solver.cpp:228] Iteration 65500, loss = 0.160161\nI0821 07:13:12.355252 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 07:13:12.355276 21769 solver.cpp:244]     Train net output #1: loss = 0.160161 (* 1 = 0.160161 loss)\nI0821 07:13:12.437005 21769 sgd_solver.cpp:166] Iteration 65500, lr = 1.6375\nI0821 07:16:49.994683 21769 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0821 07:19:01.225317 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8046\nI0821 07:19:01.225709 21769 solver.cpp:404]     Test net output #1: loss = 0.627751 (* 1 = 0.627751 loss)\nI0821 07:19:03.354262 21769 solver.cpp:228] Iteration 65600, loss = 0.141033\nI0821 07:19:03.354301 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 07:19:03.354324 21769 solver.cpp:244]     Train net output #1: loss = 0.141033 (* 1 = 0.141033 loss)\nI0821 07:19:03.439368 21769 sgd_solver.cpp:166] Iteration 65600, lr = 1.64\nI0821 07:22:41.193367 21769 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0821 07:24:52.422482 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7759\nI0821 07:24:52.422865 21769 solver.cpp:404]     Test net output #1: loss = 0.77081 (* 1 = 0.77081 loss)\nI0821 07:24:54.551174 21769 solver.cpp:228] Iteration 65700, loss = 0.0783105\nI0821 07:24:54.551213 21769 solver.cpp:244]     Train net output #0: accuracy = 0.99\nI0821 07:24:54.551229 21769 solver.cpp:244]     Train net output #1: loss = 0.0783099 (* 1 = 0.0783099 loss)\nI0821 07:24:54.630030 21769 sgd_solver.cpp:166] Iteration 65700, lr = 1.6425\nI0821 07:28:32.450886 21769 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0821 07:30:43.681768 21769 solver.cpp:404]     Test net output #0: accuracy = 0.829\nI0821 07:30:43.682126 21769 solver.cpp:404]     Test net output #1: loss = 0.578903 (* 1 = 0.578903 loss)\nI0821 07:30:45.810411 21769 solver.cpp:228] Iteration 65800, loss = 0.140092\nI0821 07:30:45.810449 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 07:30:45.810466 21769 solver.cpp:244]     Train net output #1: loss = 0.140091 (* 1 = 0.140091 loss)\nI0821 07:30:45.894767 21769 sgd_solver.cpp:166] Iteration 65800, lr = 1.645\nI0821 07:34:23.596285 21769 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0821 07:36:34.862016 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8187\nI0821 07:36:34.862411 21769 solver.cpp:404]     Test net output #1: loss = 0.615376 (* 1 = 0.615376 loss)\nI0821 07:36:36.990069 21769 solver.cpp:228] Iteration 65900, loss = 0.263063\nI0821 07:36:36.990108 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 07:36:36.990123 21769 solver.cpp:244]     Train net output #1: loss = 0.263062 (* 1 = 0.263062 loss)\nI0821 07:36:37.068718 21769 sgd_solver.cpp:166] Iteration 65900, lr = 1.6475\nI0821 07:40:14.732620 21769 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0821 07:42:26.010532 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8366\nI0821 07:42:26.010924 21769 solver.cpp:404]     Test net output #1: loss = 0.536407 (* 1 = 0.536407 loss)\nI0821 07:42:28.138674 21769 solver.cpp:228] Iteration 66000, loss = 0.130792\nI0821 07:42:28.138715 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 07:42:28.138731 21769 solver.cpp:244]     Train net output #1: loss = 0.130791 (* 1 = 0.130791 loss)\nI0821 07:42:28.219003 21769 sgd_solver.cpp:166] Iteration 66000, lr = 1.65\nI0821 07:46:05.824733 21769 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0821 07:48:17.099017 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8193\nI0821 07:48:17.099423 21769 solver.cpp:404]     Test net output #1: loss = 0.560731 (* 1 = 0.560731 loss)\nI0821 07:48:19.228196 21769 solver.cpp:228] Iteration 66100, loss = 0.184564\nI0821 07:48:19.228233 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 07:48:19.228250 21769 solver.cpp:244]     Train net output #1: loss = 0.184563 (* 1 = 0.184563 loss)\nI0821 07:48:19.307970 21769 sgd_solver.cpp:166] Iteration 66100, lr = 1.6525\nI0821 07:51:57.144564 21769 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0821 07:54:08.419544 21769 solver.cpp:404]     Test net output #0: accuracy = 0.809\nI0821 07:54:08.419912 21769 solver.cpp:404]     Test net output #1: loss = 0.633569 (* 1 = 0.633569 loss)\nI0821 07:54:10.548893 21769 solver.cpp:228] Iteration 66200, loss = 0.221368\nI0821 07:54:10.548931 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 07:54:10.548948 21769 solver.cpp:244]     Train net output #1: loss = 0.221368 (* 1 = 0.221368 loss)\nI0821 07:54:10.628224 21769 sgd_solver.cpp:166] Iteration 66200, lr = 1.655\nI0821 07:57:48.163830 21769 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0821 07:59:59.445960 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8264\nI0821 07:59:59.446359 21769 solver.cpp:404]     Test net output #1: loss = 0.559659 (* 1 = 0.559659 loss)\nI0821 08:00:01.574283 21769 solver.cpp:228] Iteration 66300, loss = 0.229599\nI0821 08:00:01.574321 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 08:00:01.574335 21769 solver.cpp:244]     Train net output #1: loss = 0.229599 (* 1 = 0.229599 loss)\nI0821 08:00:01.654084 21769 sgd_solver.cpp:166] Iteration 66300, lr = 1.6575\nI0821 08:03:39.182958 21769 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0821 08:05:50.433759 21769 solver.cpp:404]     Test net output #0: accuracy = 0.812\nI0821 08:05:50.434140 21769 solver.cpp:404]     Test net output #1: loss = 0.629808 (* 1 = 0.629808 loss)\nI0821 08:05:52.562515 21769 solver.cpp:228] Iteration 66400, loss = 0.341114\nI0821 08:05:52.562552 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 08:05:52.562566 21769 solver.cpp:244]     Train net output #1: loss = 0.341113 (* 1 = 0.341113 loss)\nI0821 08:05:52.648192 21769 sgd_solver.cpp:166] Iteration 66400, lr = 1.66\nI0821 08:09:30.228698 21769 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0821 08:11:41.496758 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8192\nI0821 08:11:41.497169 21769 solver.cpp:404]     Test net output #1: loss = 0.562523 (* 1 = 0.562523 loss)\nI0821 08:11:43.625701 21769 solver.cpp:228] Iteration 66500, loss = 0.324987\nI0821 08:11:43.625743 21769 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 08:11:43.625758 21769 solver.cpp:244]     Train net output #1: loss = 0.324986 (* 1 = 0.324986 loss)\nI0821 08:11:43.711495 21769 sgd_solver.cpp:166] Iteration 66500, lr = 1.6625\nI0821 08:15:21.387058 21769 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0821 08:17:32.656837 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8281\nI0821 08:17:32.657245 21769 solver.cpp:404]     Test net output #1: loss = 0.558409 (* 1 = 0.558409 loss)\nI0821 08:17:34.785286 21769 solver.cpp:228] Iteration 66600, loss = 0.208947\nI0821 08:17:34.785322 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 08:17:34.785337 21769 solver.cpp:244]     Train net output #1: loss = 0.208946 (* 1 = 0.208946 loss)\nI0821 08:17:34.863773 21769 sgd_solver.cpp:166] Iteration 66600, lr = 1.665\nI0821 08:21:12.591820 21769 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0821 08:23:23.840961 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6461\nI0821 08:23:23.841325 21769 solver.cpp:404]     Test net output #1: loss = 1.68515 (* 1 = 1.68515 loss)\nI0821 08:23:25.970032 21769 solver.cpp:228] Iteration 66700, loss = 0.173429\nI0821 08:23:25.970068 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 08:23:25.970084 21769 solver.cpp:244]     Train net output #1: loss = 0.173429 (* 1 = 0.173429 loss)\nI0821 08:23:26.048611 21769 sgd_solver.cpp:166] Iteration 66700, lr = 1.6675\nI0821 08:27:03.619436 21769 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0821 08:29:14.872968 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6624\nI0821 08:29:14.873360 21769 solver.cpp:404]     Test net output #1: loss = 1.34806 (* 1 = 1.34806 loss)\nI0821 08:29:17.002096 21769 solver.cpp:228] Iteration 66800, loss = 0.135892\nI0821 08:29:17.002132 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 08:29:17.002147 21769 solver.cpp:244]     Train net output #1: loss = 0.135891 (* 1 = 0.135891 loss)\nI0821 08:29:17.085733 21769 sgd_solver.cpp:166] Iteration 66800, lr = 1.67\nI0821 08:32:54.565852 21769 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0821 08:35:05.819429 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7486\nI0821 08:35:05.819818 21769 solver.cpp:404]     Test net output #1: loss = 0.870705 (* 1 = 0.870705 loss)\nI0821 08:35:07.948621 21769 solver.cpp:228] Iteration 66900, loss = 0.187475\nI0821 08:35:07.948657 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 08:35:07.948671 21769 solver.cpp:244]     Train net output #1: loss = 0.187474 (* 1 = 0.187474 loss)\nI0821 08:35:08.026182 21769 sgd_solver.cpp:166] Iteration 66900, lr = 1.6725\nI0821 08:38:45.657366 21769 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0821 08:40:56.915110 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8205\nI0821 08:40:56.915495 21769 solver.cpp:404]     Test net output #1: loss = 0.606733 (* 1 = 0.606733 loss)\nI0821 08:40:59.043828 21769 solver.cpp:228] Iteration 67000, loss = 0.221317\nI0821 08:40:59.043864 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 08:40:59.043879 21769 solver.cpp:244]     Train net output #1: loss = 0.221316 (* 1 = 0.221316 loss)\nI0821 08:40:59.129442 21769 sgd_solver.cpp:166] Iteration 67000, lr = 1.675\nI0821 08:44:36.733392 21769 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0821 08:46:47.970660 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7721\nI0821 08:46:47.971071 21769 solver.cpp:404]     Test net output #1: loss = 0.834148 (* 1 = 0.834148 loss)\nI0821 08:46:50.098229 21769 solver.cpp:228] Iteration 67100, loss = 0.232646\nI0821 08:46:50.098268 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 08:46:50.098282 21769 solver.cpp:244]     Train net output #1: loss = 0.232645 (* 1 = 0.232645 loss)\nI0821 08:46:50.184937 21769 sgd_solver.cpp:166] Iteration 67100, lr = 1.6775\nI0821 08:50:27.715672 21769 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0821 08:52:38.973526 21769 solver.cpp:404]     Test net output #0: accuracy = 0.724\nI0821 08:52:38.973913 21769 solver.cpp:404]     Test net output #1: loss = 1.03271 (* 1 = 1.03271 loss)\nI0821 08:52:41.102990 21769 solver.cpp:228] Iteration 67200, loss = 0.274882\nI0821 08:52:41.103026 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 08:52:41.103042 21769 solver.cpp:244]     Train net output #1: loss = 0.274881 (* 1 = 0.274881 loss)\nI0821 08:52:41.181308 21769 sgd_solver.cpp:166] Iteration 67200, lr = 1.68\nI0821 08:56:18.891517 21769 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0821 08:58:30.153187 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7792\nI0821 08:58:30.153578 21769 solver.cpp:404]     Test net output #1: loss = 0.732684 (* 1 = 0.732684 loss)\nI0821 08:58:32.282920 21769 solver.cpp:228] Iteration 67300, loss = 0.0847096\nI0821 08:58:32.282956 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 08:58:32.282973 21769 solver.cpp:244]     Train net output #1: loss = 0.0847089 (* 1 = 0.0847089 loss)\nI0821 08:58:32.362062 21769 sgd_solver.cpp:166] Iteration 67300, lr = 1.6825\nI0821 09:02:09.992244 21769 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0821 09:04:21.239570 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7081\nI0821 09:04:21.239989 21769 solver.cpp:404]     Test net output #1: loss = 1.05187 (* 1 = 1.05187 loss)\nI0821 09:04:23.369093 21769 solver.cpp:228] Iteration 67400, loss = 0.230029\nI0821 09:04:23.369133 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 09:04:23.369149 21769 solver.cpp:244]     Train net output #1: loss = 0.230028 (* 1 = 0.230028 loss)\nI0821 09:04:23.455974 21769 sgd_solver.cpp:166] Iteration 67400, lr = 1.685\nI0821 09:08:00.968783 21769 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0821 09:10:12.247159 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7662\nI0821 09:10:12.247510 21769 solver.cpp:404]     Test net output #1: loss = 0.872088 (* 1 = 0.872088 loss)\nI0821 09:10:14.376708 21769 solver.cpp:228] Iteration 67500, loss = 0.200148\nI0821 09:10:14.376749 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 09:10:14.376772 21769 solver.cpp:244]     Train net output #1: loss = 0.200147 (* 1 = 0.200147 loss)\nI0821 09:10:14.456802 21769 sgd_solver.cpp:166] Iteration 67500, lr = 1.6875\nI0821 09:13:51.953043 21769 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0821 09:16:03.172129 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8103\nI0821 09:16:03.172560 21769 solver.cpp:404]     Test net output #1: loss = 0.653145 (* 1 = 0.653145 loss)\nI0821 09:16:05.301653 21769 solver.cpp:228] Iteration 67600, loss = 0.271759\nI0821 09:16:05.301694 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 09:16:05.301718 21769 solver.cpp:244]     Train net output #1: loss = 0.271759 (* 1 = 0.271759 loss)\nI0821 09:16:05.381355 21769 sgd_solver.cpp:166] Iteration 67600, lr = 1.69\nI0821 09:19:42.825662 21769 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0821 09:21:54.045931 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8311\nI0821 09:21:54.046345 21769 solver.cpp:404]     Test net output #1: loss = 0.533256 (* 1 = 0.533256 loss)\nI0821 09:21:56.174064 21769 solver.cpp:228] Iteration 67700, loss = 0.0922763\nI0821 09:21:56.174106 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 09:21:56.174129 21769 solver.cpp:244]     Train net output #1: loss = 0.0922755 (* 1 = 0.0922755 loss)\nI0821 09:21:56.259658 21769 sgd_solver.cpp:166] Iteration 67700, lr = 1.6925\nI0821 09:25:33.761749 21769 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0821 09:27:44.991044 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7772\nI0821 09:27:44.991446 21769 solver.cpp:404]     Test net output #1: loss = 0.738914 (* 1 = 0.738914 loss)\nI0821 09:27:47.121065 21769 solver.cpp:228] Iteration 67800, loss = 0.159053\nI0821 09:27:47.121100 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 09:27:47.121114 21769 solver.cpp:244]     Train net output #1: loss = 0.159052 (* 1 = 0.159052 loss)\nI0821 09:27:47.198302 21769 sgd_solver.cpp:166] Iteration 67800, lr = 1.695\nI0821 09:31:24.710512 21769 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0821 09:33:35.931656 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8045\nI0821 09:33:35.932046 21769 solver.cpp:404]     Test net output #1: loss = 0.640817 (* 1 = 0.640817 loss)\nI0821 09:33:38.061336 21769 solver.cpp:228] Iteration 67900, loss = 0.230759\nI0821 09:33:38.061373 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 09:33:38.061388 21769 solver.cpp:244]     Train net output #1: loss = 0.230758 (* 1 = 0.230758 loss)\nI0821 09:33:38.146999 21769 sgd_solver.cpp:166] Iteration 67900, lr = 1.6975\nI0821 09:37:15.702584 21769 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0821 09:39:26.917250 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7506\nI0821 09:39:26.917605 21769 solver.cpp:404]     Test net output #1: loss = 0.85856 (* 1 = 0.85856 loss)\nI0821 09:39:29.046136 21769 solver.cpp:228] Iteration 68000, loss = 0.301674\nI0821 09:39:29.046175 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 09:39:29.046188 21769 solver.cpp:244]     Train net output #1: loss = 0.301674 (* 1 = 0.301674 loss)\nI0821 09:39:29.124692 21769 sgd_solver.cpp:166] Iteration 68000, lr = 1.7\nI0821 09:43:06.683120 21769 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0821 09:45:17.911564 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7482\nI0821 09:45:17.911974 21769 solver.cpp:404]     Test net output #1: loss = 0.877232 (* 1 = 0.877232 loss)\nI0821 09:45:20.044689 21769 solver.cpp:228] Iteration 68100, loss = 0.193293\nI0821 09:45:20.044730 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 09:45:20.044754 21769 solver.cpp:244]     Train net output #1: loss = 0.193292 (* 1 = 0.193292 loss)\nI0821 09:45:20.129683 21769 sgd_solver.cpp:166] Iteration 68100, lr = 1.7025\nI0821 09:48:57.979177 21769 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0821 09:51:09.249620 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7997\nI0821 09:51:09.249994 21769 solver.cpp:404]     Test net output #1: loss = 0.731471 (* 1 = 0.731471 loss)\nI0821 09:51:11.378705 21769 solver.cpp:228] Iteration 68200, loss = 0.164482\nI0821 09:51:11.378746 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 09:51:11.378767 21769 solver.cpp:244]     Train net output #1: loss = 0.164481 (* 1 = 0.164481 loss)\nI0821 09:51:11.461755 21769 sgd_solver.cpp:166] Iteration 68200, lr = 1.705\nI0821 09:54:49.113404 21769 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0821 09:57:00.379472 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7526\nI0821 09:57:00.379834 21769 solver.cpp:404]     Test net output #1: loss = 0.978613 (* 1 = 0.978613 loss)\nI0821 09:57:02.502946 21769 solver.cpp:228] Iteration 68300, loss = 0.119111\nI0821 09:57:02.502985 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 09:57:02.503010 21769 solver.cpp:244]     Train net output #1: loss = 0.11911 (* 1 = 0.11911 loss)\nI0821 09:57:02.593971 21769 sgd_solver.cpp:166] Iteration 68300, lr = 1.7075\nI0821 10:00:40.097095 21769 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0821 10:02:51.371708 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7491\nI0821 10:02:51.372113 21769 solver.cpp:404]     Test net output #1: loss = 0.879679 (* 1 = 0.879679 loss)\nI0821 10:02:53.495281 21769 solver.cpp:228] Iteration 68400, loss = 0.285462\nI0821 10:02:53.495332 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 10:02:53.495359 21769 solver.cpp:244]     Train net output #1: loss = 0.285461 (* 1 = 0.285461 loss)\nI0821 10:02:53.586372 21769 sgd_solver.cpp:166] Iteration 68400, lr = 1.71\nI0821 10:06:31.175667 21769 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0821 10:08:42.452805 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7511\nI0821 10:08:42.453224 21769 solver.cpp:404]     Test net output #1: loss = 0.801757 (* 1 = 0.801757 loss)\nI0821 10:08:44.577147 21769 solver.cpp:228] Iteration 68500, loss = 0.300393\nI0821 10:08:44.577188 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 10:08:44.577211 21769 solver.cpp:244]     Train net output #1: loss = 0.300392 (* 1 = 0.300392 loss)\nI0821 10:08:44.658577 21769 sgd_solver.cpp:166] Iteration 68500, lr = 1.7125\nI0821 10:12:22.169715 21769 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0821 10:14:33.448791 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7293\nI0821 10:14:33.449214 21769 solver.cpp:404]     Test net output #1: loss = 1.1364 (* 1 = 1.1364 loss)\nI0821 10:14:35.572417 21769 solver.cpp:228] Iteration 68600, loss = 0.191385\nI0821 10:14:35.572466 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 10:14:35.572491 21769 solver.cpp:244]     Train net output #1: loss = 0.191384 (* 1 = 0.191384 loss)\nI0821 10:14:35.663591 21769 sgd_solver.cpp:166] Iteration 68600, lr = 1.715\nI0821 10:18:13.128692 21769 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0821 10:20:24.401540 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7966\nI0821 10:20:24.401952 21769 solver.cpp:404]     Test net output #1: loss = 0.699272 (* 1 = 0.699272 loss)\nI0821 10:20:26.524528 21769 solver.cpp:228] Iteration 68700, loss = 0.165831\nI0821 10:20:26.524569 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 10:20:26.524591 21769 solver.cpp:244]     Train net output #1: loss = 0.16583 (* 1 = 0.16583 loss)\nI0821 10:20:26.609016 21769 sgd_solver.cpp:166] Iteration 68700, lr = 1.7175\nI0821 10:24:04.182886 21769 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0821 10:26:15.451381 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7936\nI0821 10:26:15.451772 21769 solver.cpp:404]     Test net output #1: loss = 0.734489 (* 1 = 0.734489 loss)\nI0821 10:26:17.574695 21769 solver.cpp:228] Iteration 68800, loss = 0.147307\nI0821 10:26:17.574735 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 10:26:17.574759 21769 solver.cpp:244]     Train net output #1: loss = 0.147307 (* 1 = 0.147307 loss)\nI0821 10:26:17.657104 21769 sgd_solver.cpp:166] Iteration 68800, lr = 1.72\nI0821 10:29:55.226912 21769 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0821 10:32:06.501400 21769 solver.cpp:404]     Test net output #0: accuracy = 0.744\nI0821 10:32:06.501741 21769 solver.cpp:404]     Test net output #1: loss = 0.921998 (* 1 = 0.921998 loss)\nI0821 10:32:08.626569 21769 solver.cpp:228] Iteration 68900, loss = 0.282896\nI0821 10:32:08.626608 21769 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 10:32:08.626631 21769 solver.cpp:244]     Train net output #1: loss = 0.282895 (* 1 = 0.282895 loss)\nI0821 10:32:08.708099 21769 sgd_solver.cpp:166] Iteration 68900, lr = 1.7225\nI0821 10:35:46.352460 21769 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0821 10:37:57.622870 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7486\nI0821 10:37:57.623265 21769 solver.cpp:404]     Test net output #1: loss = 0.8059 (* 1 = 0.8059 loss)\nI0821 10:37:59.747350 21769 solver.cpp:228] Iteration 69000, loss = 0.255034\nI0821 10:37:59.747391 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 10:37:59.747416 21769 solver.cpp:244]     Train net output #1: loss = 0.255033 (* 1 = 0.255033 loss)\nI0821 10:37:59.833294 21769 sgd_solver.cpp:166] Iteration 69000, lr = 1.725\nI0821 10:41:37.570587 21769 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0821 10:43:48.848309 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7182\nI0821 10:43:48.848716 21769 solver.cpp:404]     Test net output #1: loss = 1.02991 (* 1 = 1.02991 loss)\nI0821 10:43:50.972035 21769 solver.cpp:228] Iteration 69100, loss = 0.41753\nI0821 10:43:50.972075 21769 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 10:43:50.972097 21769 solver.cpp:244]     Train net output #1: loss = 0.41753 (* 1 = 0.41753 loss)\nI0821 10:43:51.056385 21769 sgd_solver.cpp:166] Iteration 69100, lr = 1.7275\nI0821 10:47:28.606818 21769 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0821 10:49:39.873033 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8382\nI0821 10:49:39.873457 21769 solver.cpp:404]     Test net output #1: loss = 0.505038 (* 1 = 0.505038 loss)\nI0821 10:49:41.996891 21769 solver.cpp:228] Iteration 69200, loss = 0.189258\nI0821 10:49:41.996932 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 10:49:41.996959 21769 solver.cpp:244]     Train net output #1: loss = 0.189257 (* 1 = 0.189257 loss)\nI0821 10:49:42.085970 21769 sgd_solver.cpp:166] Iteration 69200, lr = 1.73\nI0821 10:53:19.724257 21769 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0821 10:55:30.974804 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7765\nI0821 10:55:30.975227 21769 solver.cpp:404]     Test net output #1: loss = 0.896186 (* 1 = 0.896186 loss)\nI0821 10:55:33.098707 21769 solver.cpp:228] Iteration 69300, loss = 0.116118\nI0821 10:55:33.098748 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 10:55:33.098773 21769 solver.cpp:244]     Train net output #1: loss = 0.116117 (* 1 = 0.116117 loss)\nI0821 10:55:33.181330 21769 sgd_solver.cpp:166] Iteration 69300, lr = 1.7325\nI0821 10:59:10.772881 21769 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0821 11:01:22.042310 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7027\nI0821 11:01:22.042701 21769 solver.cpp:404]     Test net output #1: loss = 1.19645 (* 1 = 1.19645 loss)\nI0821 11:01:24.165875 21769 solver.cpp:228] Iteration 69400, loss = 0.366983\nI0821 11:01:24.165925 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 11:01:24.165956 21769 solver.cpp:244]     Train net output #1: loss = 0.366982 (* 1 = 0.366982 loss)\nI0821 11:01:24.246327 21769 sgd_solver.cpp:166] Iteration 69400, lr = 1.735\nI0821 11:05:01.739747 21769 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0821 11:07:13.012666 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8341\nI0821 11:07:13.013075 21769 solver.cpp:404]     Test net output #1: loss = 0.508082 (* 1 = 0.508082 loss)\nI0821 11:07:15.136040 21769 solver.cpp:228] Iteration 69500, loss = 0.172698\nI0821 11:07:15.136077 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 11:07:15.136093 21769 solver.cpp:244]     Train net output #1: loss = 0.172697 (* 1 = 0.172697 loss)\nI0821 11:07:15.222121 21769 sgd_solver.cpp:166] Iteration 69500, lr = 1.7375\nI0821 11:10:52.875794 21769 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0821 11:13:04.147816 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7298\nI0821 11:13:04.148226 21769 solver.cpp:404]     Test net output #1: loss = 0.932072 (* 1 = 0.932072 loss)\nI0821 11:13:06.271201 21769 solver.cpp:228] Iteration 69600, loss = 0.255196\nI0821 11:13:06.271248 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 11:13:06.271265 21769 solver.cpp:244]     Train net output #1: loss = 0.255195 (* 1 = 0.255195 loss)\nI0821 11:13:06.367147 21769 sgd_solver.cpp:166] Iteration 69600, lr = 1.74\nI0821 11:16:43.945690 21769 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0821 11:18:55.205415 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8496\nI0821 11:18:55.205823 21769 solver.cpp:404]     Test net output #1: loss = 0.485307 (* 1 = 0.485307 loss)\nI0821 11:18:57.329499 21769 solver.cpp:228] Iteration 69700, loss = 0.168303\nI0821 11:18:57.329547 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 11:18:57.329573 21769 solver.cpp:244]     Train net output #1: loss = 0.168302 (* 1 = 0.168302 loss)\nI0821 11:18:57.410583 21769 sgd_solver.cpp:166] Iteration 69700, lr = 1.7425\nI0821 11:22:35.070611 21769 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0821 11:24:46.344835 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7489\nI0821 11:24:46.345222 21769 solver.cpp:404]     Test net output #1: loss = 0.890532 (* 1 = 0.890532 loss)\nI0821 11:24:48.474838 21769 solver.cpp:228] Iteration 69800, loss = 0.128155\nI0821 11:24:48.474877 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 11:24:48.474901 21769 solver.cpp:244]     Train net output #1: loss = 0.128154 (* 1 = 0.128154 loss)\nI0821 11:24:48.555541 21769 sgd_solver.cpp:166] Iteration 69800, lr = 1.745\nI0821 11:28:26.088310 21769 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0821 11:30:37.349949 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7796\nI0821 11:30:37.350332 21769 solver.cpp:404]     Test net output #1: loss = 0.705884 (* 1 = 0.705884 loss)\nI0821 11:30:39.479732 21769 solver.cpp:228] Iteration 69900, loss = 0.193132\nI0821 11:30:39.479769 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 11:30:39.479785 21769 solver.cpp:244]     Train net output #1: loss = 0.193132 (* 1 = 0.193132 loss)\nI0821 11:30:39.563740 21769 sgd_solver.cpp:166] Iteration 69900, lr = 1.7475\nI0821 11:34:17.099385 21769 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0821 11:36:28.340320 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7085\nI0821 11:36:28.340735 21769 solver.cpp:404]     Test net output #1: loss = 1.24891 (* 1 = 1.24891 loss)\nI0821 11:36:30.464519 21769 solver.cpp:228] Iteration 70000, loss = 0.27584\nI0821 11:36:30.464560 21769 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 11:36:30.464583 21769 solver.cpp:244]     Train net output #1: loss = 0.275839 (* 1 = 0.275839 loss)\nI0821 11:36:30.546125 21769 sgd_solver.cpp:166] Iteration 70000, lr = 1.75\nI0821 11:40:08.111960 21769 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0821 11:42:19.335697 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7352\nI0821 11:42:19.336107 21769 solver.cpp:404]     Test net output #1: loss = 0.875397 (* 1 = 0.875397 loss)\nI0821 11:42:21.459883 21769 solver.cpp:228] Iteration 70100, loss = 0.214595\nI0821 11:42:21.459924 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 11:42:21.459954 21769 solver.cpp:244]     Train net output #1: loss = 0.214594 (* 1 = 0.214594 loss)\nI0821 11:42:21.544159 21769 sgd_solver.cpp:166] Iteration 70100, lr = 1.7525\nI0821 11:45:59.090204 21769 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0821 11:48:10.314594 21769 solver.cpp:404]     Test net output #0: accuracy = 0.5869\nI0821 11:48:10.315024 21769 solver.cpp:404]     Test net output #1: loss = 2.42094 (* 1 = 2.42094 loss)\nI0821 11:48:12.438072 21769 solver.cpp:228] Iteration 70200, loss = 0.320656\nI0821 11:48:12.438122 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 11:48:12.438148 21769 solver.cpp:244]     Train net output #1: loss = 0.320655 (* 1 = 0.320655 loss)\nI0821 11:48:12.530279 21769 sgd_solver.cpp:166] Iteration 70200, lr = 1.755\nI0821 11:51:50.039382 21769 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0821 11:54:01.270193 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7474\nI0821 11:54:01.270620 21769 solver.cpp:404]     Test net output #1: loss = 0.978102 (* 1 = 0.978102 loss)\nI0821 11:54:03.393123 21769 solver.cpp:228] Iteration 70300, loss = 0.205636\nI0821 11:54:03.393160 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 11:54:03.393177 21769 solver.cpp:244]     Train net output #1: loss = 0.205635 (* 1 = 0.205635 loss)\nI0821 11:54:03.475548 21769 sgd_solver.cpp:166] Iteration 70300, lr = 1.7575\nI0821 11:57:40.956305 21769 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0821 11:59:52.176983 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7804\nI0821 11:59:52.177386 21769 solver.cpp:404]     Test net output #1: loss = 0.72897 (* 1 = 0.72897 loss)\nI0821 11:59:54.300529 21769 solver.cpp:228] Iteration 70400, loss = 0.220005\nI0821 11:59:54.300565 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 11:59:54.300581 21769 solver.cpp:244]     Train net output #1: loss = 0.220004 (* 1 = 0.220004 loss)\nI0821 11:59:54.392026 21769 sgd_solver.cpp:166] Iteration 70400, lr = 1.76\nI0821 12:03:32.136109 21769 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0821 12:05:43.380344 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6816\nI0821 12:05:43.380755 21769 solver.cpp:404]     Test net output #1: loss = 1.4512 (* 1 = 1.4512 loss)\nI0821 12:05:45.503739 21769 solver.cpp:228] Iteration 70500, loss = 0.161777\nI0821 12:05:45.503777 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 12:05:45.503792 21769 solver.cpp:244]     Train net output #1: loss = 0.161777 (* 1 = 0.161777 loss)\nI0821 12:05:45.595561 21769 sgd_solver.cpp:166] Iteration 70500, lr = 1.7625\nI0821 12:09:23.332087 21769 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0821 12:11:34.593194 21769 solver.cpp:404]     Test net output #0: accuracy = 0.777\nI0821 12:11:34.593595 21769 solver.cpp:404]     Test net output #1: loss = 0.755697 (* 1 = 0.755697 loss)\nI0821 12:11:36.716428 21769 solver.cpp:228] Iteration 70600, loss = 0.201742\nI0821 12:11:36.716464 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 12:11:36.716480 21769 solver.cpp:244]     Train net output #1: loss = 0.201741 (* 1 = 0.201741 loss)\nI0821 12:11:36.806000 21769 sgd_solver.cpp:166] Iteration 70600, lr = 1.765\nI0821 12:15:14.383657 21769 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0821 12:17:25.632516 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8207\nI0821 12:17:25.632937 21769 solver.cpp:404]     Test net output #1: loss = 0.54647 (* 1 = 0.54647 loss)\nI0821 12:17:27.755148 21769 solver.cpp:228] Iteration 70700, loss = 0.179704\nI0821 12:17:27.755183 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 12:17:27.755199 21769 solver.cpp:244]     Train net output #1: loss = 0.179703 (* 1 = 0.179703 loss)\nI0821 12:17:27.837554 21769 sgd_solver.cpp:166] Iteration 70700, lr = 1.7675\nI0821 12:21:05.406646 21769 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0821 12:23:16.669533 21769 solver.cpp:404]     Test net output #0: accuracy = 0.742\nI0821 12:23:16.669960 21769 solver.cpp:404]     Test net output #1: loss = 1.02368 (* 1 = 1.02368 loss)\nI0821 12:23:18.791589 21769 solver.cpp:228] Iteration 70800, loss = 0.116315\nI0821 12:23:18.791626 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 12:23:18.791641 21769 solver.cpp:244]     Train net output #1: loss = 0.116314 (* 1 = 0.116314 loss)\nI0821 12:23:18.879631 21769 sgd_solver.cpp:166] Iteration 70800, lr = 1.77\nI0821 12:26:56.479334 21769 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0821 12:29:07.736663 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7423\nI0821 12:29:07.737027 21769 solver.cpp:404]     Test net output #1: loss = 0.938177 (* 1 = 0.938177 loss)\nI0821 12:29:09.859524 21769 solver.cpp:228] Iteration 70900, loss = 0.187753\nI0821 12:29:09.859561 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 12:29:09.859576 21769 solver.cpp:244]     Train net output #1: loss = 0.187752 (* 1 = 0.187752 loss)\nI0821 12:29:09.948578 21769 sgd_solver.cpp:166] Iteration 70900, lr = 1.7725\nI0821 12:32:47.519609 21769 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0821 12:34:58.769891 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7972\nI0821 12:34:58.770270 21769 solver.cpp:404]     Test net output #1: loss = 0.750578 (* 1 = 0.750578 loss)\nI0821 12:35:00.892558 21769 solver.cpp:228] Iteration 71000, loss = 0.284133\nI0821 12:35:00.892596 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 12:35:00.892612 21769 solver.cpp:244]     Train net output #1: loss = 0.284132 (* 1 = 0.284132 loss)\nI0821 12:35:00.981449 21769 sgd_solver.cpp:166] Iteration 71000, lr = 1.775\nI0821 12:38:38.455911 21769 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0821 12:40:49.689862 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8546\nI0821 12:40:49.690270 21769 solver.cpp:404]     Test net output #1: loss = 0.443251 (* 1 = 0.443251 loss)\nI0821 12:40:51.812721 21769 solver.cpp:228] Iteration 71100, loss = 0.355684\nI0821 12:40:51.812770 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 12:40:51.812786 21769 solver.cpp:244]     Train net output #1: loss = 0.355683 (* 1 = 0.355683 loss)\nI0821 12:40:51.902297 21769 sgd_solver.cpp:166] Iteration 71100, lr = 1.7775\nI0821 12:44:29.446861 21769 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0821 12:46:40.683143 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6963\nI0821 12:46:40.683531 21769 solver.cpp:404]     Test net output #1: loss = 1.25657 (* 1 = 1.25657 loss)\nI0821 12:46:42.807739 21769 solver.cpp:228] Iteration 71200, loss = 0.139462\nI0821 12:46:42.807782 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 12:46:42.807798 21769 solver.cpp:244]     Train net output #1: loss = 0.139462 (* 1 = 0.139462 loss)\nI0821 12:46:42.895845 21769 sgd_solver.cpp:166] Iteration 71200, lr = 1.78\nI0821 12:50:20.413419 21769 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0821 12:52:31.646381 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7342\nI0821 12:52:31.646812 21769 solver.cpp:404]     Test net output #1: loss = 1.03102 (* 1 = 1.03102 loss)\nI0821 12:52:33.770092 21769 solver.cpp:228] Iteration 71300, loss = 0.159269\nI0821 12:52:33.770139 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 12:52:33.770155 21769 solver.cpp:244]     Train net output #1: loss = 0.159268 (* 1 = 0.159268 loss)\nI0821 12:52:33.852354 21769 sgd_solver.cpp:166] Iteration 71300, lr = 1.7825\nI0821 12:56:11.391726 21769 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0821 12:58:22.606787 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7783\nI0821 12:58:22.607201 21769 solver.cpp:404]     Test net output #1: loss = 0.816993 (* 1 = 0.816993 loss)\nI0821 12:58:24.730640 21769 solver.cpp:228] Iteration 71400, loss = 0.355117\nI0821 12:58:24.730677 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 12:58:24.730693 21769 solver.cpp:244]     Train net output #1: loss = 0.355116 (* 1 = 0.355116 loss)\nI0821 12:58:24.819620 21769 sgd_solver.cpp:166] Iteration 71400, lr = 1.785\nI0821 13:02:02.365439 21769 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0821 13:04:13.613905 21769 solver.cpp:404]     Test net output #0: accuracy = 0.843\nI0821 13:04:13.614284 21769 solver.cpp:404]     Test net output #1: loss = 0.515572 (* 1 = 0.515572 loss)\nI0821 13:04:15.737350 21769 solver.cpp:228] Iteration 71500, loss = 0.25666\nI0821 13:04:15.737397 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 13:04:15.737413 21769 solver.cpp:244]     Train net output #1: loss = 0.256659 (* 1 = 0.256659 loss)\nI0821 13:04:15.820435 21769 sgd_solver.cpp:166] Iteration 71500, lr = 1.7875\nI0821 13:07:53.322815 21769 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0821 13:10:04.583928 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8158\nI0821 13:10:04.584306 21769 solver.cpp:404]     Test net output #1: loss = 0.637992 (* 1 = 0.637992 loss)\nI0821 13:10:06.706411 21769 solver.cpp:228] Iteration 71600, loss = 0.144027\nI0821 13:10:06.706447 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 13:10:06.706462 21769 solver.cpp:244]     Train net output #1: loss = 0.144026 (* 1 = 0.144026 loss)\nI0821 13:10:06.793455 21769 sgd_solver.cpp:166] Iteration 71600, lr = 1.79\nI0821 13:13:44.208521 21769 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0821 13:15:55.452112 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8365\nI0821 13:15:55.452539 21769 solver.cpp:404]     Test net output #1: loss = 0.512124 (* 1 = 0.512124 loss)\nI0821 13:15:57.574793 21769 solver.cpp:228] Iteration 71700, loss = 0.173772\nI0821 13:15:57.574837 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 13:15:57.574853 21769 solver.cpp:244]     Train net output #1: loss = 0.173771 (* 1 = 0.173771 loss)\nI0821 13:15:57.667191 21769 sgd_solver.cpp:166] Iteration 71700, lr = 1.7925\nI0821 13:19:35.408367 21769 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0821 13:21:46.650846 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6997\nI0821 13:21:46.651207 21769 solver.cpp:404]     Test net output #1: loss = 1.04552 (* 1 = 1.04552 loss)\nI0821 13:21:48.773183 21769 solver.cpp:228] Iteration 71800, loss = 0.18391\nI0821 13:21:48.773228 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 13:21:48.773243 21769 solver.cpp:244]     Train net output #1: loss = 0.183909 (* 1 = 0.183909 loss)\nI0821 13:21:48.865582 21769 sgd_solver.cpp:166] Iteration 71800, lr = 1.795\nI0821 13:25:26.394750 21769 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0821 13:27:37.626585 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7827\nI0821 13:27:37.627045 21769 solver.cpp:404]     Test net output #1: loss = 0.755034 (* 1 = 0.755034 loss)\nI0821 13:27:39.749348 21769 solver.cpp:228] Iteration 71900, loss = 0.343764\nI0821 13:27:39.749394 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 13:27:39.749410 21769 solver.cpp:244]     Train net output #1: loss = 0.343764 (* 1 = 0.343764 loss)\nI0821 13:27:39.832702 21769 sgd_solver.cpp:166] Iteration 71900, lr = 1.7975\nI0821 13:31:17.336235 21769 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0821 13:33:28.575536 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7821\nI0821 13:33:28.575953 21769 solver.cpp:404]     Test net output #1: loss = 0.762445 (* 1 = 0.762445 loss)\nI0821 13:33:30.697974 21769 solver.cpp:228] Iteration 72000, loss = 0.186551\nI0821 13:33:30.698020 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 13:33:30.698036 21769 solver.cpp:244]     Train net output #1: loss = 0.186551 (* 1 = 0.186551 loss)\nI0821 13:33:30.788738 21769 sgd_solver.cpp:166] Iteration 72000, lr = 1.8\nI0821 13:37:08.489727 21769 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0821 13:39:19.723516 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8336\nI0821 13:39:19.723879 21769 solver.cpp:404]     Test net output #1: loss = 0.549398 (* 1 = 0.549398 loss)\nI0821 13:39:21.845762 21769 solver.cpp:228] Iteration 72100, loss = 0.25094\nI0821 13:39:21.845803 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 13:39:21.845818 21769 solver.cpp:244]     Train net output #1: loss = 0.250939 (* 1 = 0.250939 loss)\nI0821 13:39:21.938072 21769 sgd_solver.cpp:166] Iteration 72100, lr = 1.8025\nI0821 13:42:59.545899 21769 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0821 13:45:10.779592 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8168\nI0821 13:45:10.780017 21769 solver.cpp:404]     Test net output #1: loss = 0.609993 (* 1 = 0.609993 loss)\nI0821 13:45:12.902310 21769 solver.cpp:228] Iteration 72200, loss = 0.167253\nI0821 13:45:12.902356 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 13:45:12.902372 21769 solver.cpp:244]     Train net output #1: loss = 0.167253 (* 1 = 0.167253 loss)\nI0821 13:45:12.994401 21769 sgd_solver.cpp:166] Iteration 72200, lr = 1.805\nI0821 13:48:50.637718 21769 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0821 13:51:01.833125 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7496\nI0821 13:51:01.833536 21769 solver.cpp:404]     Test net output #1: loss = 0.952665 (* 1 = 0.952665 loss)\nI0821 13:51:03.955670 21769 solver.cpp:228] Iteration 72300, loss = 0.138251\nI0821 13:51:03.955718 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 13:51:03.955734 21769 solver.cpp:244]     Train net output #1: loss = 0.13825 (* 1 = 0.13825 loss)\nI0821 13:51:04.046834 21769 sgd_solver.cpp:166] Iteration 72300, lr = 1.8075\nI0821 13:54:41.764263 21769 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0821 13:56:52.944344 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8012\nI0821 13:56:52.944756 21769 solver.cpp:404]     Test net output #1: loss = 0.643484 (* 1 = 0.643484 loss)\nI0821 13:56:55.066861 21769 solver.cpp:228] Iteration 72400, loss = 0.23342\nI0821 13:56:55.066896 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 13:56:55.066911 21769 solver.cpp:244]     Train net output #1: loss = 0.233419 (* 1 = 0.233419 loss)\nI0821 13:56:55.153728 21769 sgd_solver.cpp:166] Iteration 72400, lr = 1.81\nI0821 14:00:32.681843 21769 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0821 14:02:43.881212 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7806\nI0821 14:02:43.881625 21769 solver.cpp:404]     Test net output #1: loss = 0.770245 (* 1 = 0.770245 loss)\nI0821 14:02:46.004194 21769 solver.cpp:228] Iteration 72500, loss = 0.171722\nI0821 14:02:46.004230 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 14:02:46.004245 21769 solver.cpp:244]     Train net output #1: loss = 0.171721 (* 1 = 0.171721 loss)\nI0821 14:02:46.086984 21769 sgd_solver.cpp:166] Iteration 72500, lr = 1.8125\nI0821 14:06:23.762698 21769 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0821 14:08:34.953013 21769 solver.cpp:404]     Test net output #0: accuracy = 0.76\nI0821 14:08:34.953436 21769 solver.cpp:404]     Test net output #1: loss = 0.889009 (* 1 = 0.889009 loss)\nI0821 14:08:37.075552 21769 solver.cpp:228] Iteration 72600, loss = 0.179099\nI0821 14:08:37.075587 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 14:08:37.075603 21769 solver.cpp:244]     Train net output #1: loss = 0.179099 (* 1 = 0.179099 loss)\nI0821 14:08:37.166167 21769 sgd_solver.cpp:166] Iteration 72600, lr = 1.815\nI0821 14:12:14.712971 21769 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0821 14:14:25.905334 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7839\nI0821 14:14:25.905704 21769 solver.cpp:404]     Test net output #1: loss = 0.697702 (* 1 = 0.697702 loss)\nI0821 14:14:28.028306 21769 solver.cpp:228] Iteration 72700, loss = 0.171593\nI0821 14:14:28.028352 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 14:14:28.028367 21769 solver.cpp:244]     Train net output #1: loss = 0.171592 (* 1 = 0.171592 loss)\nI0821 14:14:28.123761 21769 sgd_solver.cpp:166] Iteration 72700, lr = 1.8175\nI0821 14:18:05.896790 21769 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0821 14:20:17.112879 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8051\nI0821 14:20:17.113260 21769 solver.cpp:404]     Test net output #1: loss = 0.608298 (* 1 = 0.608298 loss)\nI0821 14:20:19.235613 21769 solver.cpp:228] Iteration 72800, loss = 0.113302\nI0821 14:20:19.235661 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 14:20:19.235677 21769 solver.cpp:244]     Train net output #1: loss = 0.113302 (* 1 = 0.113302 loss)\nI0821 14:20:19.321310 21769 sgd_solver.cpp:166] Iteration 72800, lr = 1.82\nI0821 14:23:56.864974 21769 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0821 14:26:08.161123 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8247\nI0821 14:26:08.161517 21769 solver.cpp:404]     Test net output #1: loss = 0.619765 (* 1 = 0.619765 loss)\nI0821 14:26:10.284420 21769 solver.cpp:228] Iteration 72900, loss = 0.246072\nI0821 14:26:10.284459 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 14:26:10.284482 21769 solver.cpp:244]     Train net output #1: loss = 0.246071 (* 1 = 0.246071 loss)\nI0821 14:26:10.368942 21769 sgd_solver.cpp:166] Iteration 72900, lr = 1.8225\nI0821 14:29:47.963034 21769 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0821 14:31:59.254117 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7734\nI0821 14:31:59.254541 21769 solver.cpp:404]     Test net output #1: loss = 0.888663 (* 1 = 0.888663 loss)\nI0821 14:32:01.377771 21769 solver.cpp:228] Iteration 73000, loss = 0.0747388\nI0821 14:32:01.377820 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 14:32:01.377847 21769 solver.cpp:244]     Train net output #1: loss = 0.0747379 (* 1 = 0.0747379 loss)\nI0821 14:32:01.460666 21769 sgd_solver.cpp:166] Iteration 73000, lr = 1.825\nI0821 14:35:39.018656 21769 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0821 14:37:50.300612 21769 solver.cpp:404]     Test net output #0: accuracy = 0.789\nI0821 14:37:50.300971 21769 solver.cpp:404]     Test net output #1: loss = 0.717237 (* 1 = 0.717237 loss)\nI0821 14:37:52.423220 21769 solver.cpp:228] Iteration 73100, loss = 0.376025\nI0821 14:37:52.423269 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 14:37:52.423295 21769 solver.cpp:244]     Train net output #1: loss = 0.376024 (* 1 = 0.376024 loss)\nI0821 14:37:52.506717 21769 sgd_solver.cpp:166] Iteration 73100, lr = 1.8275\nI0821 14:41:30.027233 21769 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0821 14:43:41.307255 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7481\nI0821 14:43:41.307672 21769 solver.cpp:404]     Test net output #1: loss = 0.840425 (* 1 = 0.840425 loss)\nI0821 14:43:43.431062 21769 solver.cpp:228] Iteration 73200, loss = 0.164353\nI0821 14:43:43.431102 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 14:43:43.431124 21769 solver.cpp:244]     Train net output #1: loss = 0.164352 (* 1 = 0.164352 loss)\nI0821 14:43:43.514945 21769 sgd_solver.cpp:166] Iteration 73200, lr = 1.83\nI0821 14:47:21.149606 21769 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0821 14:49:32.419106 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7993\nI0821 14:49:32.419518 21769 solver.cpp:404]     Test net output #1: loss = 0.659918 (* 1 = 0.659918 loss)\nI0821 14:49:34.542491 21769 solver.cpp:228] Iteration 73300, loss = 0.111463\nI0821 14:49:34.542541 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 14:49:34.542557 21769 solver.cpp:244]     Train net output #1: loss = 0.111462 (* 1 = 0.111462 loss)\nI0821 14:49:34.631405 21769 sgd_solver.cpp:166] Iteration 73300, lr = 1.8325\nI0821 14:53:12.225405 21769 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0821 14:55:23.527853 21769 solver.cpp:404]     Test net output #0: accuracy = 0.774\nI0821 14:55:23.528259 21769 solver.cpp:404]     Test net output #1: loss = 0.752005 (* 1 = 0.752005 loss)\nI0821 14:55:25.650786 21769 solver.cpp:228] Iteration 73400, loss = 0.196298\nI0821 14:55:25.650825 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 14:55:25.650840 21769 solver.cpp:244]     Train net output #1: loss = 0.196297 (* 1 = 0.196297 loss)\nI0821 14:55:25.739419 21769 sgd_solver.cpp:166] Iteration 73400, lr = 1.835\nI0821 14:59:03.360747 21769 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0821 15:01:14.642949 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8058\nI0821 15:01:14.643348 21769 solver.cpp:404]     Test net output #1: loss = 0.658686 (* 1 = 0.658686 loss)\nI0821 15:01:16.767647 21769 solver.cpp:228] Iteration 73500, loss = 0.280386\nI0821 15:01:16.767694 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 15:01:16.767711 21769 solver.cpp:244]     Train net output #1: loss = 0.280386 (* 1 = 0.280386 loss)\nI0821 15:01:16.851789 21769 sgd_solver.cpp:166] Iteration 73500, lr = 1.8375\nI0821 15:04:54.537991 21769 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0821 15:07:05.811014 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7387\nI0821 15:07:05.811429 21769 solver.cpp:404]     Test net output #1: loss = 0.815167 (* 1 = 0.815167 loss)\nI0821 15:07:07.934633 21769 solver.cpp:228] Iteration 73600, loss = 0.259608\nI0821 15:07:07.934669 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 15:07:07.934684 21769 solver.cpp:244]     Train net output #1: loss = 0.259607 (* 1 = 0.259607 loss)\nI0821 15:07:08.022778 21769 sgd_solver.cpp:166] Iteration 73600, lr = 1.84\nI0821 15:10:45.862520 21769 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0821 15:12:57.173786 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7778\nI0821 15:12:57.174209 21769 solver.cpp:404]     Test net output #1: loss = 0.773093 (* 1 = 0.773093 loss)\nI0821 15:12:59.297570 21769 solver.cpp:228] Iteration 73700, loss = 0.206612\nI0821 15:12:59.297610 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 15:12:59.297633 21769 solver.cpp:244]     Train net output #1: loss = 0.206611 (* 1 = 0.206611 loss)\nI0821 15:12:59.384829 21769 sgd_solver.cpp:166] Iteration 73700, lr = 1.8425\nI0821 15:16:36.983786 21769 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0821 15:18:48.274525 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8286\nI0821 15:18:48.274960 21769 solver.cpp:404]     Test net output #1: loss = 0.555104 (* 1 = 0.555104 loss)\nI0821 15:18:50.398684 21769 solver.cpp:228] Iteration 73800, loss = 0.133134\nI0821 15:18:50.398725 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 15:18:50.398751 21769 solver.cpp:244]     Train net output #1: loss = 0.133133 (* 1 = 0.133133 loss)\nI0821 15:18:50.478833 21769 sgd_solver.cpp:166] Iteration 73800, lr = 1.845\nI0821 15:22:27.998273 21769 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0821 15:24:39.257860 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6893\nI0821 15:24:39.258288 21769 solver.cpp:404]     Test net output #1: loss = 1.06603 (* 1 = 1.06603 loss)\nI0821 15:24:41.381630 21769 solver.cpp:228] Iteration 73900, loss = 0.280349\nI0821 15:24:41.381680 21769 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 15:24:41.381705 21769 solver.cpp:244]     Train net output #1: loss = 0.280348 (* 1 = 0.280348 loss)\nI0821 15:24:41.473978 21769 sgd_solver.cpp:166] Iteration 73900, lr = 1.8475\nI0821 15:28:19.047091 21769 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0821 15:30:30.317059 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7618\nI0821 15:30:30.317428 21769 solver.cpp:404]     Test net output #1: loss = 0.717791 (* 1 = 0.717791 loss)\nI0821 15:30:32.440716 21769 solver.cpp:228] Iteration 74000, loss = 0.265604\nI0821 15:30:32.440765 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 15:30:32.440790 21769 solver.cpp:244]     Train net output #1: loss = 0.265603 (* 1 = 0.265603 loss)\nI0821 15:30:32.524482 21769 sgd_solver.cpp:166] Iteration 74000, lr = 1.85\nI0821 15:34:10.541410 21769 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0821 15:36:21.820426 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8085\nI0821 15:36:21.820847 21769 solver.cpp:404]     Test net output #1: loss = 0.59377 (* 1 = 0.59377 loss)\nI0821 15:36:23.949460 21769 solver.cpp:228] Iteration 74100, loss = 0.296978\nI0821 15:36:23.949501 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 15:36:23.949525 21769 solver.cpp:244]     Train net output #1: loss = 0.296977 (* 1 = 0.296977 loss)\nI0821 15:36:24.039942 21769 sgd_solver.cpp:166] Iteration 74100, lr = 1.8525\nI0821 15:40:02.424268 21769 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0821 15:42:13.725395 21769 solver.cpp:404]     Test net output #0: accuracy = 0.748\nI0821 15:42:13.725795 21769 solver.cpp:404]     Test net output #1: loss = 0.834053 (* 1 = 0.834053 loss)\nI0821 15:42:15.855093 21769 solver.cpp:228] Iteration 74200, loss = 0.146064\nI0821 15:42:15.855134 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 15:42:15.855159 21769 solver.cpp:244]     Train net output #1: loss = 0.146063 (* 1 = 0.146063 loss)\nI0821 15:42:15.943420 21769 sgd_solver.cpp:166] Iteration 74200, lr = 1.855\nI0821 15:45:54.336308 21769 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0821 15:48:05.620292 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8004\nI0821 15:48:05.620694 21769 solver.cpp:404]     Test net output #1: loss = 0.677982 (* 1 = 0.677982 loss)\nI0821 15:48:07.749732 21769 solver.cpp:228] Iteration 74300, loss = 0.100358\nI0821 15:48:07.749781 21769 solver.cpp:244]     Train net output #0: accuracy = 0.98\nI0821 15:48:07.749809 21769 solver.cpp:244]     Train net output #1: loss = 0.100357 (* 1 = 0.100357 loss)\nI0821 15:48:07.840205 21769 sgd_solver.cpp:166] Iteration 74300, lr = 1.8575\nI0821 15:51:46.069792 21769 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0821 15:53:57.332981 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7956\nI0821 15:53:57.333386 21769 solver.cpp:404]     Test net output #1: loss = 0.678314 (* 1 = 0.678314 loss)\nI0821 15:53:59.461719 21769 solver.cpp:228] Iteration 74400, loss = 0.264806\nI0821 15:53:59.461766 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 15:53:59.461781 21769 solver.cpp:244]     Train net output #1: loss = 0.264805 (* 1 = 0.264805 loss)\nI0821 15:53:59.547883 21769 sgd_solver.cpp:166] Iteration 74400, lr = 1.86\nI0821 15:57:37.836169 21769 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0821 15:59:49.101263 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7739\nI0821 15:59:49.101675 21769 solver.cpp:404]     Test net output #1: loss = 0.796471 (* 1 = 0.796471 loss)\nI0821 15:59:51.229492 21769 solver.cpp:228] Iteration 74500, loss = 0.238436\nI0821 15:59:51.229533 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 15:59:51.229558 21769 solver.cpp:244]     Train net output #1: loss = 0.238435 (* 1 = 0.238435 loss)\nI0821 15:59:51.321815 21769 sgd_solver.cpp:166] Iteration 74500, lr = 1.8625\nI0821 16:03:29.697710 21769 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0821 16:05:40.921730 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7661\nI0821 16:05:40.922144 21769 solver.cpp:404]     Test net output #1: loss = 0.839233 (* 1 = 0.839233 loss)\nI0821 16:05:43.050781 21769 solver.cpp:228] Iteration 74600, loss = 0.199597\nI0821 16:05:43.050832 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 16:05:43.050858 21769 solver.cpp:244]     Train net output #1: loss = 0.199596 (* 1 = 0.199596 loss)\nI0821 16:05:43.135947 21769 sgd_solver.cpp:166] Iteration 74600, lr = 1.865\nI0821 16:09:21.302855 21769 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0821 16:11:32.531555 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8058\nI0821 16:11:32.531929 21769 solver.cpp:404]     Test net output #1: loss = 0.678491 (* 1 = 0.678491 loss)\nI0821 16:11:34.660472 21769 solver.cpp:228] Iteration 74700, loss = 0.216036\nI0821 16:11:34.660512 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 16:11:34.660536 21769 solver.cpp:244]     Train net output #1: loss = 0.216035 (* 1 = 0.216035 loss)\nI0821 16:11:34.752266 21769 sgd_solver.cpp:166] Iteration 74700, lr = 1.8675\nI0821 16:15:13.213830 21769 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0821 16:17:24.423457 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7954\nI0821 16:17:24.423871 21769 solver.cpp:404]     Test net output #1: loss = 0.614762 (* 1 = 0.614762 loss)\nI0821 16:17:26.552291 21769 solver.cpp:228] Iteration 74800, loss = 0.263709\nI0821 16:17:26.552327 21769 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 16:17:26.552343 21769 solver.cpp:244]     Train net output #1: loss = 0.263708 (* 1 = 0.263708 loss)\nI0821 16:17:26.639947 21769 sgd_solver.cpp:166] Iteration 74800, lr = 1.87\nI0821 16:21:04.833741 21769 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0821 16:23:16.061702 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8216\nI0821 16:23:16.062083 21769 solver.cpp:404]     Test net output #1: loss = 0.57243 (* 1 = 0.57243 loss)\nI0821 16:23:18.189357 21769 solver.cpp:228] Iteration 74900, loss = 0.221626\nI0821 16:23:18.189393 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 16:23:18.189409 21769 solver.cpp:244]     Train net output #1: loss = 0.221625 (* 1 = 0.221625 loss)\nI0821 16:23:18.278308 21769 sgd_solver.cpp:166] Iteration 74900, lr = 1.8725\nI0821 16:26:56.620672 21769 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0821 16:29:07.828078 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7012\nI0821 16:29:07.828485 21769 solver.cpp:404]     Test net output #1: loss = 0.999049 (* 1 = 0.999049 loss)\nI0821 16:29:09.957207 21769 solver.cpp:228] Iteration 75000, loss = 0.21284\nI0821 16:29:09.957244 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 16:29:09.957260 21769 solver.cpp:244]     Train net output #1: loss = 0.212839 (* 1 = 0.212839 loss)\nI0821 16:29:10.046334 21769 sgd_solver.cpp:166] Iteration 75000, lr = 1.875\nI0821 16:32:48.397119 21769 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0821 16:34:59.630702 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7075\nI0821 16:34:59.631112 21769 solver.cpp:404]     Test net output #1: loss = 1.15286 (* 1 = 1.15286 loss)\nI0821 16:35:01.761271 21769 solver.cpp:228] Iteration 75100, loss = 0.257435\nI0821 16:35:01.761307 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 16:35:01.761323 21769 solver.cpp:244]     Train net output #1: loss = 0.257434 (* 1 = 0.257434 loss)\nI0821 16:35:01.843853 21769 sgd_solver.cpp:166] Iteration 75100, lr = 1.8775\nI0821 16:38:40.063979 21769 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0821 16:40:51.320042 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7461\nI0821 16:40:51.320454 21769 solver.cpp:404]     Test net output #1: loss = 0.874077 (* 1 = 0.874077 loss)\nI0821 16:40:53.448385 21769 solver.cpp:228] Iteration 75200, loss = 0.246632\nI0821 16:40:53.448421 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 16:40:53.448436 21769 solver.cpp:244]     Train net output #1: loss = 0.246631 (* 1 = 0.246631 loss)\nI0821 16:40:53.534687 21769 sgd_solver.cpp:166] Iteration 75200, lr = 1.88\nI0821 16:44:31.845630 21769 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0821 16:46:43.101754 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8244\nI0821 16:46:43.102159 21769 solver.cpp:404]     Test net output #1: loss = 0.54584 (* 1 = 0.54584 loss)\nI0821 16:46:45.230494 21769 solver.cpp:228] Iteration 75300, loss = 0.217284\nI0821 16:46:45.230531 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 16:46:45.230546 21769 solver.cpp:244]     Train net output #1: loss = 0.217283 (* 1 = 0.217283 loss)\nI0821 16:46:45.321512 21769 sgd_solver.cpp:166] Iteration 75300, lr = 1.8825\nI0821 16:50:23.717104 21769 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0821 16:52:34.981250 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7788\nI0821 16:52:34.981645 21769 solver.cpp:404]     Test net output #1: loss = 0.740888 (* 1 = 0.740888 loss)\nI0821 16:52:37.110328 21769 solver.cpp:228] Iteration 75400, loss = 0.32146\nI0821 16:52:37.110365 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 16:52:37.110380 21769 solver.cpp:244]     Train net output #1: loss = 0.32146 (* 1 = 0.32146 loss)\nI0821 16:52:37.202479 21769 sgd_solver.cpp:166] Iteration 75400, lr = 1.885\nI0821 16:56:15.703950 21769 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0821 16:58:26.974417 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8021\nI0821 16:58:26.974822 21769 solver.cpp:404]     Test net output #1: loss = 0.662028 (* 1 = 0.662028 loss)\nI0821 16:58:29.103745 21769 solver.cpp:228] Iteration 75500, loss = 0.356876\nI0821 16:58:29.103782 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 16:58:29.103797 21769 solver.cpp:244]     Train net output #1: loss = 0.356875 (* 1 = 0.356875 loss)\nI0821 16:58:29.194331 21769 sgd_solver.cpp:166] Iteration 75500, lr = 1.8875\nI0821 17:02:07.420207 21769 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0821 17:04:18.684140 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8233\nI0821 17:04:18.684517 21769 solver.cpp:404]     Test net output #1: loss = 0.630047 (* 1 = 0.630047 loss)\nI0821 17:04:20.812433 21769 solver.cpp:228] Iteration 75600, loss = 0.208405\nI0821 17:04:20.812472 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 17:04:20.812487 21769 solver.cpp:244]     Train net output #1: loss = 0.208404 (* 1 = 0.208404 loss)\nI0821 17:04:20.907235 21769 sgd_solver.cpp:166] Iteration 75600, lr = 1.89\nI0821 17:07:59.239434 21769 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0821 17:10:10.510572 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8079\nI0821 17:10:10.510977 21769 solver.cpp:404]     Test net output #1: loss = 0.605093 (* 1 = 0.605093 loss)\nI0821 17:10:12.639506 21769 solver.cpp:228] Iteration 75700, loss = 0.211558\nI0821 17:10:12.639554 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 17:10:12.639571 21769 solver.cpp:244]     Train net output #1: loss = 0.211558 (* 1 = 0.211558 loss)\nI0821 17:10:12.725031 21769 sgd_solver.cpp:166] Iteration 75700, lr = 1.8925\nI0821 17:13:51.055500 21769 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0821 17:16:02.348366 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8178\nI0821 17:16:02.348778 21769 solver.cpp:404]     Test net output #1: loss = 0.600261 (* 1 = 0.600261 loss)\nI0821 17:16:04.476415 21769 solver.cpp:228] Iteration 75800, loss = 0.189665\nI0821 17:16:04.476452 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 17:16:04.476467 21769 solver.cpp:244]     Train net output #1: loss = 0.189664 (* 1 = 0.189664 loss)\nI0821 17:16:04.573266 21769 sgd_solver.cpp:166] Iteration 75800, lr = 1.895\nI0821 17:19:42.952841 21769 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0821 17:21:54.241858 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7804\nI0821 17:21:54.242310 21769 solver.cpp:404]     Test net output #1: loss = 0.671127 (* 1 = 0.671127 loss)\nI0821 17:21:56.370419 21769 solver.cpp:228] Iteration 75900, loss = 0.250173\nI0821 17:21:56.370455 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 17:21:56.370471 21769 solver.cpp:244]     Train net output #1: loss = 0.250173 (* 1 = 0.250173 loss)\nI0821 17:21:56.463685 21769 sgd_solver.cpp:166] Iteration 75900, lr = 1.8975\nI0821 17:25:34.857308 21769 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0821 17:27:46.141407 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7164\nI0821 17:27:46.141824 21769 solver.cpp:404]     Test net output #1: loss = 0.875165 (* 1 = 0.875165 loss)\nI0821 17:27:48.268679 21769 solver.cpp:228] Iteration 76000, loss = 0.256476\nI0821 17:27:48.268720 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 17:27:48.268736 21769 solver.cpp:244]     Train net output #1: loss = 0.256476 (* 1 = 0.256476 loss)\nI0821 17:27:48.353982 21769 sgd_solver.cpp:166] Iteration 76000, lr = 1.9\nI0821 17:31:26.675034 21769 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0821 17:33:37.939743 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6933\nI0821 17:33:37.940156 21769 solver.cpp:404]     Test net output #1: loss = 1.09333 (* 1 = 1.09333 loss)\nI0821 17:33:40.068107 21769 solver.cpp:228] Iteration 76100, loss = 0.361614\nI0821 17:33:40.068145 21769 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0821 17:33:40.068161 21769 solver.cpp:244]     Train net output #1: loss = 0.361613 (* 1 = 0.361613 loss)\nI0821 17:33:40.157068 21769 sgd_solver.cpp:166] Iteration 76100, lr = 1.9025\nI0821 17:37:18.520014 21769 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0821 17:39:29.783879 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7583\nI0821 17:39:29.784292 21769 solver.cpp:404]     Test net output #1: loss = 0.828704 (* 1 = 0.828704 loss)\nI0821 17:39:31.912385 21769 solver.cpp:228] Iteration 76200, loss = 0.219615\nI0821 17:39:31.912421 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 17:39:31.912436 21769 solver.cpp:244]     Train net output #1: loss = 0.219614 (* 1 = 0.219614 loss)\nI0821 17:39:32.004297 21769 sgd_solver.cpp:166] Iteration 76200, lr = 1.905\nI0821 17:43:10.509356 21769 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0821 17:45:21.818972 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7919\nI0821 17:45:21.819394 21769 solver.cpp:404]     Test net output #1: loss = 0.691175 (* 1 = 0.691175 loss)\nI0821 17:45:23.948537 21769 solver.cpp:228] Iteration 76300, loss = 0.222209\nI0821 17:45:23.948577 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 17:45:23.948601 21769 solver.cpp:244]     Train net output #1: loss = 0.222208 (* 1 = 0.222208 loss)\nI0821 17:45:24.038950 21769 sgd_solver.cpp:166] Iteration 76300, lr = 1.9075\nI0821 17:49:02.775137 21769 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0821 17:51:15.449201 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7233\nI0821 17:51:15.449697 21769 solver.cpp:404]     Test net output #1: loss = 0.882251 (* 1 = 0.882251 loss)\nI0821 17:51:17.584211 21769 solver.cpp:228] Iteration 76400, loss = 0.335062\nI0821 17:51:17.584267 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 17:51:17.584285 21769 solver.cpp:244]     Train net output #1: loss = 0.335061 (* 1 = 0.335061 loss)\nI0821 17:51:17.664075 21769 sgd_solver.cpp:166] Iteration 76400, lr = 1.91\nI0821 17:54:56.403750 21769 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0821 17:57:09.041512 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7442\nI0821 17:57:09.041995 21769 solver.cpp:404]     Test net output #1: loss = 0.955678 (* 1 = 0.955678 loss)\nI0821 17:57:11.175285 21769 solver.cpp:228] Iteration 76500, loss = 0.194481\nI0821 17:57:11.175343 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 17:57:11.175360 21769 solver.cpp:244]     Train net output #1: loss = 0.194481 (* 1 = 0.194481 loss)\nI0821 17:57:11.252743 21769 sgd_solver.cpp:166] Iteration 76500, lr = 1.9125\nI0821 18:00:50.002193 21769 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0821 18:03:02.629855 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8029\nI0821 18:03:02.630337 21769 solver.cpp:404]     Test net output #1: loss = 0.613974 (* 1 = 0.613974 loss)\nI0821 18:03:04.763669 21769 solver.cpp:228] Iteration 76600, loss = 0.195202\nI0821 18:03:04.763725 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 18:03:04.763742 21769 solver.cpp:244]     Train net output #1: loss = 0.195201 (* 1 = 0.195201 loss)\nI0821 18:03:04.855407 21769 sgd_solver.cpp:166] Iteration 76600, lr = 1.915\nI0821 18:06:43.560312 21769 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0821 18:08:56.184523 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8236\nI0821 18:08:56.185019 21769 solver.cpp:404]     Test net output #1: loss = 0.591913 (* 1 = 0.591913 loss)\nI0821 18:08:58.317764 21769 solver.cpp:228] Iteration 76700, loss = 0.285913\nI0821 18:08:58.317818 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 18:08:58.317836 21769 solver.cpp:244]     Train net output #1: loss = 0.285912 (* 1 = 0.285912 loss)\nI0821 18:08:58.405256 21769 sgd_solver.cpp:166] Iteration 76700, lr = 1.9175\nI0821 18:12:37.279309 21769 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0821 18:14:49.894953 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6938\nI0821 18:14:49.895457 21769 solver.cpp:404]     Test net output #1: loss = 1.20031 (* 1 = 1.20031 loss)\nI0821 18:14:52.028609 21769 solver.cpp:228] Iteration 76800, loss = 0.116092\nI0821 18:14:52.028666 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 18:14:52.028682 21769 solver.cpp:244]     Train net output #1: loss = 0.116091 (* 1 = 0.116091 loss)\nI0821 18:14:52.115743 21769 sgd_solver.cpp:166] Iteration 76800, lr = 1.92\nI0821 18:18:30.839819 21769 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0821 18:20:43.482014 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6988\nI0821 18:20:43.482476 21769 solver.cpp:404]     Test net output #1: loss = 1.10448 (* 1 = 1.10448 loss)\nI0821 18:20:45.615463 21769 solver.cpp:228] Iteration 76900, loss = 0.301223\nI0821 18:20:45.615520 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 18:20:45.615536 21769 solver.cpp:244]     Train net output #1: loss = 0.301223 (* 1 = 0.301223 loss)\nI0821 18:20:45.700037 21769 sgd_solver.cpp:166] Iteration 76900, lr = 1.9225\nI0821 18:24:24.452219 21769 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0821 18:26:37.131119 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6372\nI0821 18:26:37.131608 21769 solver.cpp:404]     Test net output #1: loss = 1.58255 (* 1 = 1.58255 loss)\nI0821 18:26:39.264017 21769 solver.cpp:228] Iteration 77000, loss = 0.230647\nI0821 18:26:39.264073 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 18:26:39.264091 21769 solver.cpp:244]     Train net output #1: loss = 0.230646 (* 1 = 0.230646 loss)\nI0821 18:26:39.350741 21769 sgd_solver.cpp:166] Iteration 77000, lr = 1.925\nI0821 18:30:17.973153 21769 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0821 18:32:30.634004 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8182\nI0821 18:32:30.634491 21769 solver.cpp:404]     Test net output #1: loss = 0.605684 (* 1 = 0.605684 loss)\nI0821 18:32:32.767401 21769 solver.cpp:228] Iteration 77100, loss = 0.269961\nI0821 18:32:32.767458 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 18:32:32.767475 21769 solver.cpp:244]     Train net output #1: loss = 0.269961 (* 1 = 0.269961 loss)\nI0821 18:32:32.849942 21769 sgd_solver.cpp:166] Iteration 77100, lr = 1.9275\nI0821 18:36:11.818228 21769 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0821 18:38:24.482918 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8493\nI0821 18:38:24.483388 21769 solver.cpp:404]     Test net output #1: loss = 0.546332 (* 1 = 0.546332 loss)\nI0821 18:38:26.615684 21769 solver.cpp:228] Iteration 77200, loss = 0.140342\nI0821 18:38:26.615739 21769 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0821 18:38:26.615756 21769 solver.cpp:244]     Train net output #1: loss = 0.140341 (* 1 = 0.140341 loss)\nI0821 18:38:26.699054 21769 sgd_solver.cpp:166] Iteration 77200, lr = 1.93\nI0821 18:42:05.265619 21769 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0821 18:44:17.944703 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8314\nI0821 18:44:17.945176 21769 solver.cpp:404]     Test net output #1: loss = 0.504188 (* 1 = 0.504188 loss)\nI0821 18:44:20.077158 21769 solver.cpp:228] Iteration 77300, loss = 0.122444\nI0821 18:44:20.077216 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 18:44:20.077234 21769 solver.cpp:244]     Train net output #1: loss = 0.122443 (* 1 = 0.122443 loss)\nI0821 18:44:20.162961 21769 sgd_solver.cpp:166] Iteration 77300, lr = 1.9325\nI0821 18:47:58.789149 21769 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0821 18:50:11.482118 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7267\nI0821 18:50:11.482594 21769 solver.cpp:404]     Test net output #1: loss = 0.901528 (* 1 = 0.901528 loss)\nI0821 18:50:13.615833 21769 solver.cpp:228] Iteration 77400, loss = 0.353773\nI0821 18:50:13.615890 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 18:50:13.615906 21769 solver.cpp:244]     Train net output #1: loss = 0.353772 (* 1 = 0.353772 loss)\nI0821 18:50:13.697849 21769 sgd_solver.cpp:166] Iteration 77400, lr = 1.935\nI0821 18:53:52.489609 21769 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0821 18:56:05.129015 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7884\nI0821 18:56:05.129495 21769 solver.cpp:404]     Test net output #1: loss = 0.722563 (* 1 = 0.722563 loss)\nI0821 18:56:07.261806 21769 solver.cpp:228] Iteration 77500, loss = 0.175805\nI0821 18:56:07.261862 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 18:56:07.261879 21769 solver.cpp:244]     Train net output #1: loss = 0.175805 (* 1 = 0.175805 loss)\nI0821 18:56:07.347200 21769 sgd_solver.cpp:166] Iteration 77500, lr = 1.9375\nI0821 18:59:46.103746 21769 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0821 19:01:58.738386 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7845\nI0821 19:01:58.738806 21769 solver.cpp:404]     Test net output #1: loss = 0.707344 (* 1 = 0.707344 loss)\nI0821 19:02:00.872007 21769 solver.cpp:228] Iteration 77600, loss = 0.296665\nI0821 19:02:00.872064 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 19:02:00.872081 21769 solver.cpp:244]     Train net output #1: loss = 0.296665 (* 1 = 0.296665 loss)\nI0821 19:02:00.957645 21769 sgd_solver.cpp:166] Iteration 77600, lr = 1.94\nI0821 19:05:39.857009 21769 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0821 19:07:52.499980 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7845\nI0821 19:07:52.500460 21769 solver.cpp:404]     Test net output #1: loss = 0.760636 (* 1 = 0.760636 loss)\nI0821 19:07:54.633239 21769 solver.cpp:228] Iteration 77700, loss = 0.155635\nI0821 19:07:54.633296 21769 solver.cpp:244]     Train net output #0: accuracy = 0.97\nI0821 19:07:54.633312 21769 solver.cpp:244]     Train net output #1: loss = 0.155635 (* 1 = 0.155635 loss)\nI0821 19:07:54.722666 21769 sgd_solver.cpp:166] Iteration 77700, lr = 1.9425\nI0821 19:11:33.533620 21769 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0821 19:13:46.155066 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7656\nI0821 19:13:46.155568 21769 solver.cpp:404]     Test net output #1: loss = 0.776759 (* 1 = 0.776759 loss)\nI0821 19:13:48.288621 21769 solver.cpp:228] Iteration 77800, loss = 0.213344\nI0821 19:13:48.288676 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 19:13:48.288694 21769 solver.cpp:244]     Train net output #1: loss = 0.213344 (* 1 = 0.213344 loss)\nI0821 19:13:48.381614 21769 sgd_solver.cpp:166] Iteration 77800, lr = 1.945\nI0821 19:17:27.151238 21769 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0821 19:19:39.799846 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7445\nI0821 19:19:39.800354 21769 solver.cpp:404]     Test net output #1: loss = 0.840231 (* 1 = 0.840231 loss)\nI0821 19:19:41.932051 21769 solver.cpp:228] Iteration 77900, loss = 0.184547\nI0821 19:19:41.932106 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 19:19:41.932123 21769 solver.cpp:244]     Train net output #1: loss = 0.184546 (* 1 = 0.184546 loss)\nI0821 19:19:42.018355 21769 sgd_solver.cpp:166] Iteration 77900, lr = 1.9475\nI0821 19:23:20.849616 21769 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0821 19:25:33.483876 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7471\nI0821 19:25:33.484380 21769 solver.cpp:404]     Test net output #1: loss = 0.879666 (* 1 = 0.879666 loss)\nI0821 19:25:35.618412 21769 solver.cpp:228] Iteration 78000, loss = 0.364837\nI0821 19:25:35.618468 21769 solver.cpp:244]     Train net output #0: accuracy = 0.86\nI0821 19:25:35.618485 21769 solver.cpp:244]     Train net output #1: loss = 0.364836 (* 1 = 0.364836 loss)\nI0821 19:25:35.704846 21769 sgd_solver.cpp:166] Iteration 78000, lr = 1.95\nI0821 19:29:14.580250 21769 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0821 19:31:27.224874 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7577\nI0821 19:31:27.225363 21769 solver.cpp:404]     Test net output #1: loss = 1.06311 (* 1 = 1.06311 loss)\nI0821 19:31:29.359583 21769 solver.cpp:228] Iteration 78100, loss = 0.342382\nI0821 19:31:29.359639 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 19:31:29.359658 21769 solver.cpp:244]     Train net output #1: loss = 0.342381 (* 1 = 0.342381 loss)\nI0821 19:31:29.443506 21769 sgd_solver.cpp:166] Iteration 78100, lr = 1.9525\nI0821 19:35:08.229019 21769 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0821 19:37:20.864341 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7855\nI0821 19:37:20.864832 21769 solver.cpp:404]     Test net output #1: loss = 0.668684 (* 1 = 0.668684 loss)\nI0821 19:37:22.998699 21769 solver.cpp:228] Iteration 78200, loss = 0.172161\nI0821 19:37:22.998756 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 19:37:22.998773 21769 solver.cpp:244]     Train net output #1: loss = 0.17216 (* 1 = 0.17216 loss)\nI0821 19:37:23.088383 21769 sgd_solver.cpp:166] Iteration 78200, lr = 1.955\nI0821 19:41:01.885155 21769 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0821 19:43:14.506705 21769 solver.cpp:404]     Test net output #0: accuracy = 0.787\nI0821 19:43:14.507246 21769 solver.cpp:404]     Test net output #1: loss = 0.749431 (* 1 = 0.749431 loss)\nI0821 19:43:16.639509 21769 solver.cpp:228] Iteration 78300, loss = 0.194016\nI0821 19:43:16.639566 21769 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0821 19:43:16.639583 21769 solver.cpp:244]     Train net output #1: loss = 0.194016 (* 1 = 0.194016 loss)\nI0821 19:43:16.723762 21769 sgd_solver.cpp:166] Iteration 78300, lr = 1.9575\nI0821 19:46:55.459498 21769 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0821 19:49:08.087756 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7284\nI0821 19:49:08.088169 21769 solver.cpp:404]     Test net output #1: loss = 0.91661 (* 1 = 0.91661 loss)\nI0821 19:49:10.220821 21769 solver.cpp:228] Iteration 78400, loss = 0.26058\nI0821 19:49:10.220877 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 19:49:10.220896 21769 solver.cpp:244]     Train net output #1: loss = 0.26058 (* 1 = 0.26058 loss)\nI0821 19:49:10.311007 21769 sgd_solver.cpp:166] Iteration 78400, lr = 1.96\nI0821 19:52:49.340453 21769 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0821 19:55:01.938987 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6387\nI0821 19:55:01.939442 21769 solver.cpp:404]     Test net output #1: loss = 1.74116 (* 1 = 1.74116 loss)\nI0821 19:55:04.071892 21769 solver.cpp:228] Iteration 78500, loss = 0.357054\nI0821 19:55:04.071954 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 19:55:04.071970 21769 solver.cpp:244]     Train net output #1: loss = 0.357053 (* 1 = 0.357053 loss)\nI0821 19:55:04.162209 21769 sgd_solver.cpp:166] Iteration 78500, lr = 1.9625\nI0821 19:58:43.138900 21769 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0821 20:00:55.734719 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8179\nI0821 20:00:55.735159 21769 solver.cpp:404]     Test net output #1: loss = 0.587812 (* 1 = 0.587812 loss)\nI0821 20:00:57.868057 21769 solver.cpp:228] Iteration 78600, loss = 0.222442\nI0821 20:00:57.868110 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 20:00:57.868127 21769 solver.cpp:244]     Train net output #1: loss = 0.222441 (* 1 = 0.222441 loss)\nI0821 20:00:57.950546 21769 sgd_solver.cpp:166] Iteration 78600, lr = 1.965\nI0821 20:04:36.659329 21769 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0821 20:06:49.233963 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7724\nI0821 20:06:49.234360 21769 solver.cpp:404]     Test net output #1: loss = 0.741438 (* 1 = 0.741438 loss)\nI0821 20:06:51.367403 21769 solver.cpp:228] Iteration 78700, loss = 0.202878\nI0821 20:06:51.367460 21769 solver.cpp:244]     Train net output #0: accuracy = 0.95\nI0821 20:06:51.367476 21769 solver.cpp:244]     Train net output #1: loss = 0.202877 (* 1 = 0.202877 loss)\nI0821 20:06:51.454107 21769 sgd_solver.cpp:166] Iteration 78700, lr = 1.9675\nI0821 20:10:30.322624 21769 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0821 20:12:42.937014 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7677\nI0821 20:12:42.937472 21769 solver.cpp:404]     Test net output #1: loss = 0.75727 (* 1 = 0.75727 loss)\nI0821 20:12:45.070677 21769 solver.cpp:228] Iteration 78800, loss = 0.198379\nI0821 20:12:45.070735 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 20:12:45.070752 21769 solver.cpp:244]     Train net output #1: loss = 0.198378 (* 1 = 0.198378 loss)\nI0821 20:12:45.159029 21769 sgd_solver.cpp:166] Iteration 78800, lr = 1.97\nI0821 20:16:23.889787 21769 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0821 20:18:36.281236 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7139\nI0821 20:18:36.281625 21769 solver.cpp:404]     Test net output #1: loss = 1.01644 (* 1 = 1.01644 loss)\nI0821 20:18:38.414487 21769 solver.cpp:228] Iteration 78900, loss = 0.27035\nI0821 20:18:38.414543 21769 solver.cpp:244]     Train net output #0: accuracy = 0.91\nI0821 20:18:38.414561 21769 solver.cpp:244]     Train net output #1: loss = 0.27035 (* 1 = 0.27035 loss)\nI0821 20:18:38.499920 21769 sgd_solver.cpp:166] Iteration 78900, lr = 1.9725\nI0821 20:22:17.417315 21769 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0821 20:24:29.835206 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8008\nI0821 20:24:29.835646 21769 solver.cpp:404]     Test net output #1: loss = 0.708654 (* 1 = 0.708654 loss)\nI0821 20:24:31.968551 21769 solver.cpp:228] Iteration 79000, loss = 0.230965\nI0821 20:24:31.968606 21769 solver.cpp:244]     Train net output #0: accuracy = 0.94\nI0821 20:24:31.968623 21769 solver.cpp:244]     Train net output #1: loss = 0.230965 (* 1 = 0.230965 loss)\nI0821 20:24:32.051467 21769 sgd_solver.cpp:166] Iteration 79000, lr = 1.975\nI0821 20:28:10.774502 21769 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0821 20:30:22.867842 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7449\nI0821 20:30:22.868263 21769 solver.cpp:404]     Test net output #1: loss = 1.0412 (* 1 = 1.0412 loss)\nI0821 20:30:25.000556 21769 solver.cpp:228] Iteration 79100, loss = 0.255119\nI0821 20:30:25.000614 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 20:30:25.000630 21769 solver.cpp:244]     Train net output #1: loss = 0.255118 (* 1 = 0.255118 loss)\nI0821 20:30:25.082947 21769 sgd_solver.cpp:166] Iteration 79100, lr = 1.9775\nI0821 20:34:03.691398 21769 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0821 20:36:15.893111 21769 solver.cpp:404]     Test net output #0: accuracy = 0.8388\nI0821 20:36:15.893533 21769 solver.cpp:404]     Test net output #1: loss = 0.50751 (* 1 = 0.50751 loss)\nI0821 20:36:18.026646 21769 solver.cpp:228] Iteration 79200, loss = 0.181235\nI0821 20:36:18.026702 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 20:36:18.026720 21769 solver.cpp:244]     Train net output #1: loss = 0.181234 (* 1 = 0.181234 loss)\nI0821 20:36:18.109189 21769 sgd_solver.cpp:166] Iteration 79200, lr = 1.98\nI0821 20:39:56.947124 21769 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0821 20:42:09.621366 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7447\nI0821 20:42:09.621814 21769 solver.cpp:404]     Test net output #1: loss = 0.912805 (* 1 = 0.912805 loss)\nI0821 20:42:11.754454 21769 solver.cpp:228] Iteration 79300, loss = 0.172682\nI0821 20:42:11.754510 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 20:42:11.754528 21769 solver.cpp:244]     Train net output #1: loss = 0.172681 (* 1 = 0.172681 loss)\nI0821 20:42:11.837359 21769 sgd_solver.cpp:166] Iteration 79300, lr = 1.9825\nI0821 20:45:50.535851 21769 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0821 20:48:03.189199 21769 solver.cpp:404]     Test net output #0: accuracy = 0.741\nI0821 20:48:03.189622 21769 solver.cpp:404]     Test net output #1: loss = 0.817162 (* 1 = 0.817162 loss)\nI0821 20:48:05.323205 21769 solver.cpp:228] Iteration 79400, loss = 0.263404\nI0821 20:48:05.323262 21769 solver.cpp:244]     Train net output #0: accuracy = 0.89\nI0821 20:48:05.323279 21769 solver.cpp:244]     Train net output #1: loss = 0.263403 (* 1 = 0.263403 loss)\nI0821 20:48:05.402776 21769 sgd_solver.cpp:166] Iteration 79400, lr = 1.985\nI0821 20:51:43.966030 21769 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0821 20:53:56.499315 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7046\nI0821 20:53:56.499773 21769 solver.cpp:404]     Test net output #1: loss = 1.19187 (* 1 = 1.19187 loss)\nI0821 20:53:58.634002 21769 solver.cpp:228] Iteration 79500, loss = 0.280553\nI0821 20:53:58.634057 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 20:53:58.634074 21769 solver.cpp:244]     Train net output #1: loss = 0.280552 (* 1 = 0.280552 loss)\nI0821 20:53:58.717994 21769 sgd_solver.cpp:166] Iteration 79500, lr = 1.9875\nI0821 20:57:37.312542 21769 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0821 20:59:49.886332 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7361\nI0821 20:59:49.886749 21769 solver.cpp:404]     Test net output #1: loss = 0.911661 (* 1 = 0.911661 loss)\nI0821 20:59:52.020794 21769 solver.cpp:228] Iteration 79600, loss = 0.266374\nI0821 20:59:52.020853 21769 solver.cpp:244]     Train net output #0: accuracy = 0.9\nI0821 20:59:52.020874 21769 solver.cpp:244]     Train net output #1: loss = 0.266373 (* 1 = 0.266373 loss)\nI0821 20:59:52.108250 21769 sgd_solver.cpp:166] Iteration 79600, lr = 1.99\nI0821 21:03:30.853235 21769 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0821 21:05:43.285310 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7889\nI0821 21:05:43.285727 21769 solver.cpp:404]     Test net output #1: loss = 0.679167 (* 1 = 0.679167 loss)\nI0821 21:05:45.418715 21769 solver.cpp:228] Iteration 79700, loss = 0.218327\nI0821 21:05:45.418769 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 21:05:45.418787 21769 solver.cpp:244]     Train net output #1: loss = 0.218327 (* 1 = 0.218327 loss)\nI0821 21:05:45.505235 21769 sgd_solver.cpp:166] Iteration 79700, lr = 1.9925\nI0821 21:09:24.205503 21769 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0821 21:11:36.815838 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7058\nI0821 21:11:36.816269 21769 solver.cpp:404]     Test net output #1: loss = 1.12313 (* 1 = 1.12313 loss)\nI0821 21:11:38.949424 21769 solver.cpp:228] Iteration 79800, loss = 0.146333\nI0821 21:11:38.949481 21769 solver.cpp:244]     Train net output #0: accuracy = 0.93\nI0821 21:11:38.949497 21769 solver.cpp:244]     Train net output #1: loss = 0.146333 (* 1 = 0.146333 loss)\nI0821 21:11:39.038043 21769 sgd_solver.cpp:166] Iteration 79800, lr = 1.995\nI0821 21:15:18.068565 21769 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0821 21:17:30.634312 21769 solver.cpp:404]     Test net output #0: accuracy = 0.6804\nI0821 21:17:30.634713 21769 solver.cpp:404]     Test net output #1: loss = 1.16619 (* 1 = 1.16619 loss)\nI0821 21:17:32.767035 21769 solver.cpp:228] Iteration 79900, loss = 0.326934\nI0821 21:17:32.767091 21769 solver.cpp:244]     Train net output #0: accuracy = 0.87\nI0821 21:17:32.767109 21769 solver.cpp:244]     Train net output #1: loss = 0.326934 (* 1 = 0.326934 loss)\nI0821 21:17:32.847004 21769 sgd_solver.cpp:166] Iteration 79900, lr = 1.9975\nI0821 21:21:11.644110 21769 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range2Res110Fig4b_iter_80000.caffemodel\nI0821 21:21:12.271935 21769 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/range2Res110Fig4b_iter_80000.solverstate\nI0821 21:21:13.001299 21769 solver.cpp:317] Iteration 80000, loss = 0.26585\nI0821 21:21:13.001361 21769 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0821 21:23:25.582408 21769 solver.cpp:404]     Test net output #0: accuracy = 0.7138\nI0821 21:23:25.582888 21769 solver.cpp:404]     Test net output #1: loss = 0.973029 (* 1 = 0.973029 loss)\nI0821 21:23:25.582902 21769 solver.cpp:322] Optimization Done.\nI0821 21:23:36.098345 21769 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range2Res20Fig4b",
    "content": "I0818 14:09:05.615973 21603 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0818 14:09:05.618459 21603 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0818 14:09:05.619679 21603 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0818 14:09:05.620893 21603 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0818 14:09:05.622107 21603 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0818 14:09:05.623344 21603 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0818 14:09:05.624574 21603 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0818 14:09:05.626013 21603 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0818 14:09:05.627483 21603 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0818 14:09:06.048099 21603 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 80000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range2Res20Fig4b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 2\nI0818 14:09:06.052261 21603 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0818 14:09:06.061293 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:06.061355 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:06.062067 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0818 14:09:06.062129 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0818 14:09:06.062157 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0818 14:09:06.062177 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0818 14:09:06.062198 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0818 14:09:06.062216 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0818 14:09:06.062233 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0818 14:09:06.062252 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0818 14:09:06.062270 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0818 14:09:06.062288 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0818 14:09:06.062312 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0818 14:09:06.062332 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0818 14:09:06.062351 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0818 14:09:06.062369 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0818 14:09:06.062389 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0818 14:09:06.062407 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0818 14:09:06.062432 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0818 14:09:06.062449 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0818 14:09:06.062469 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0818 14:09:06.062487 21603 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0818 14:09:06.063251 21603 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 32\n      dim: 8\n      dim: 8\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer {\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer {\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\nI0818 14:09:06.064198 21603 layer_factory.hpp:77] Creating layer dataLayer\nI0818 14:09:06.065397 21603 net.cpp:100] Creating Layer dataLayer\nI0818 14:09:06.065474 21603 net.cpp:408] dataLayer -> data_top\nI0818 14:09:06.065701 21603 net.cpp:408] dataLayer -> label\nI0818 14:09:06.065819 21603 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 14:09:06.076412 21608 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0818 14:09:06.098423 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:06.105553 21603 net.cpp:150] Setting up dataLayer\nI0818 14:09:06.105614 21603 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0818 14:09:06.105628 21603 net.cpp:157] Top shape: 125 (125)\nI0818 14:09:06.105633 21603 net.cpp:165] Memory required for data: 1536500\nI0818 14:09:06.105648 21603 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 14:09:06.105661 21603 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 14:09:06.105669 21603 net.cpp:434] label_dataLayer_1_split <- label\nI0818 14:09:06.105686 21603 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 14:09:06.105701 21603 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 14:09:06.105782 21603 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 14:09:06.105796 21603 net.cpp:157] Top shape: 125 (125)\nI0818 14:09:06.105803 21603 net.cpp:157] Top shape: 125 (125)\nI0818 14:09:06.105808 21603 net.cpp:165] Memory required for data: 1537500\nI0818 14:09:06.105813 21603 layer_factory.hpp:77] Creating layer pre_conv\nI0818 14:09:06.105873 21603 net.cpp:100] Creating Layer pre_conv\nI0818 14:09:06.105885 21603 net.cpp:434] pre_conv <- data_top\nI0818 14:09:06.105901 21603 net.cpp:408] pre_conv -> pre_conv_top\nI0818 14:09:06.107622 21603 net.cpp:150] Setting up pre_conv\nI0818 14:09:06.107641 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.107648 21603 net.cpp:165] Memory required for data: 9729500\nI0818 14:09:06.107697 21603 layer_factory.hpp:77] Creating layer pre_bn\nI0818 14:09:06.107774 21603 net.cpp:100] Creating Layer pre_bn\nI0818 14:09:06.107786 21603 net.cpp:434] pre_bn <- pre_conv_top\nI0818 14:09:06.107795 21603 net.cpp:408] pre_bn -> pre_bn_top\nI0818 14:09:06.108125 21609 blocking_queue.cpp:50] Waiting for data\nI0818 14:09:06.108268 21603 net.cpp:150] Setting up pre_bn\nI0818 14:09:06.108288 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.108294 21603 net.cpp:165] Memory required for data: 17921500\nI0818 14:09:06.108311 21603 layer_factory.hpp:77] Creating layer pre_scale\nI0818 14:09:06.108369 21603 net.cpp:100] Creating Layer pre_scale\nI0818 14:09:06.108379 21603 net.cpp:434] pre_scale <- pre_bn_top\nI0818 14:09:06.108388 21603 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0818 14:09:06.108561 21603 layer_factory.hpp:77] Creating layer pre_scale\nI0818 14:09:06.108824 21603 net.cpp:150] Setting up pre_scale\nI0818 14:09:06.108839 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.108845 21603 net.cpp:165] Memory required for data: 26113500\nI0818 14:09:06.108855 21603 layer_factory.hpp:77] Creating layer pre_relu\nI0818 14:09:06.108897 21603 net.cpp:100] Creating Layer pre_relu\nI0818 14:09:06.108906 21603 net.cpp:434] pre_relu <- pre_bn_top\nI0818 14:09:06.108917 21603 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0818 14:09:06.108929 21603 net.cpp:150] Setting up pre_relu\nI0818 14:09:06.108937 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.108942 21603 net.cpp:165] Memory required for data: 34305500\nI0818 14:09:06.108947 21603 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0818 14:09:06.108958 21603 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0818 14:09:06.108963 21603 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0818 14:09:06.108981 21603 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0818 14:09:06.108991 21603 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0818 14:09:06.109040 21603 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0818 14:09:06.109050 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.109057 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.109062 21603 net.cpp:165] Memory required for data: 50689500\nI0818 14:09:06.109067 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0818 14:09:06.109079 21603 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0818 14:09:06.109086 21603 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0818 14:09:06.109097 21603 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0818 14:09:06.109424 21603 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0818 14:09:06.109441 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.109447 21603 net.cpp:165] Memory required for data: 58881500\nI0818 14:09:06.109459 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0818 14:09:06.109473 21603 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0818 14:09:06.109479 21603 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0818 14:09:06.109493 21603 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0818 14:09:06.109745 21603 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0818 14:09:06.109760 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.109764 21603 net.cpp:165] Memory required for data: 67073500\nI0818 14:09:06.109776 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 14:09:06.109783 21603 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0818 14:09:06.109789 21603 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0818 14:09:06.109797 21603 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.109851 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 14:09:06.109987 21603 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0818 14:09:06.109999 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.110004 21603 net.cpp:165] Memory required for data: 75265500\nI0818 14:09:06.110015 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0818 14:09:06.110023 21603 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0818 14:09:06.110028 21603 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0818 14:09:06.110040 21603 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.110050 21603 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0818 14:09:06.110057 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.110062 21603 net.cpp:165] Memory required for data: 83457500\nI0818 14:09:06.110067 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0818 14:09:06.110081 21603 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0818 14:09:06.110087 21603 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0818 14:09:06.110095 21603 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0818 14:09:06.110402 21603 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0818 14:09:06.110416 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.110422 21603 net.cpp:165] Memory required for data: 91649500\nI0818 14:09:06.110431 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0818 14:09:06.110443 21603 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0818 14:09:06.110450 21603 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0818 14:09:06.110458 21603 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0818 14:09:06.110698 21603 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0818 14:09:06.110713 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.110718 21603 net.cpp:165] Memory required for data: 99841500\nI0818 14:09:06.110733 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 14:09:06.110743 21603 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0818 14:09:06.110749 21603 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0818 14:09:06.110755 21603 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0818 14:09:06.110824 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 14:09:06.110960 21603 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0818 14:09:06.110972 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.110977 21603 net.cpp:165] Memory required for data: 108033500\nI0818 14:09:06.110986 21603 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0818 14:09:06.111039 21603 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0818 14:09:06.111052 21603 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0818 14:09:06.111058 21603 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0818 14:09:06.111066 21603 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0818 14:09:06.111150 21603 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0818 14:09:06.111166 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.111171 21603 net.cpp:165] Memory required for data: 116225500\nI0818 14:09:06.111176 21603 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0818 14:09:06.111184 21603 net.cpp:100] Creating Layer L1_b1_relu\nI0818 14:09:06.111191 21603 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0818 14:09:06.111197 21603 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0818 14:09:06.111207 21603 net.cpp:150] Setting up L1_b1_relu\nI0818 14:09:06.111214 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.111219 21603 net.cpp:165] Memory required for data: 124417500\nI0818 14:09:06.111223 21603 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 14:09:06.111232 21603 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 14:09:06.111238 21603 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0818 14:09:06.111248 21603 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 14:09:06.111258 21603 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 14:09:06.111299 21603 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 14:09:06.111311 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.111318 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.111323 21603 net.cpp:165] Memory required for data: 140801500\nI0818 14:09:06.111328 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0818 14:09:06.111342 21603 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0818 14:09:06.111348 21603 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 14:09:06.111357 21603 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0818 14:09:06.111667 21603 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0818 14:09:06.111682 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.111687 21603 net.cpp:165] Memory required for data: 148993500\nI0818 14:09:06.111696 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0818 14:09:06.111706 21603 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0818 14:09:06.111714 21603 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0818 14:09:06.111723 21603 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0818 14:09:06.111959 21603 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0818 14:09:06.111973 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.111977 21603 net.cpp:165] Memory required for data: 157185500\nI0818 14:09:06.111989 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 14:09:06.111997 21603 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0818 14:09:06.112004 21603 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0818 14:09:06.112013 21603 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.112066 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 14:09:06.112211 21603 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0818 14:09:06.112226 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.112231 21603 net.cpp:165] Memory required for data: 165377500\nI0818 14:09:06.112239 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0818 14:09:06.112256 21603 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0818 14:09:06.112262 21603 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0818 14:09:06.112269 21603 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.112282 21603 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0818 14:09:06.112289 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.112293 21603 net.cpp:165] Memory required for data: 173569500\nI0818 14:09:06.112298 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0818 14:09:06.112309 21603 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0818 14:09:06.112318 21603 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0818 14:09:06.112326 21603 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0818 14:09:06.112633 21603 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0818 14:09:06.112648 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.112653 21603 net.cpp:165] Memory required for data: 181761500\nI0818 14:09:06.112660 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0818 14:09:06.112673 21603 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0818 14:09:06.112679 21603 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0818 14:09:06.112689 21603 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0818 14:09:06.112920 21603 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0818 14:09:06.112933 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.112939 21603 net.cpp:165] Memory required for data: 189953500\nI0818 14:09:06.112957 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 14:09:06.112967 21603 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0818 14:09:06.112972 21603 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0818 14:09:06.112983 21603 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0818 14:09:06.113034 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 14:09:06.113183 21603 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0818 14:09:06.113196 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.113201 21603 net.cpp:165] Memory required for data: 198145500\nI0818 14:09:06.113211 21603 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0818 14:09:06.113224 21603 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0818 14:09:06.113229 21603 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0818 14:09:06.113236 21603 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 14:09:06.113245 21603 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0818 14:09:06.113278 21603 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0818 14:09:06.113287 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.113292 21603 net.cpp:165] Memory required for data: 206337500\nI0818 14:09:06.113297 21603 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0818 14:09:06.113306 21603 net.cpp:100] Creating Layer L1_b2_relu\nI0818 14:09:06.113310 21603 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0818 14:09:06.113320 21603 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0818 14:09:06.113330 21603 net.cpp:150] Setting up L1_b2_relu\nI0818 14:09:06.113337 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.113342 21603 net.cpp:165] Memory required for data: 214529500\nI0818 14:09:06.113346 21603 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 14:09:06.113353 21603 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 14:09:06.113359 21603 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0818 14:09:06.113366 21603 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 14:09:06.113375 21603 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 14:09:06.113418 21603 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 14:09:06.113430 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.113445 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.113450 21603 net.cpp:165] Memory required for data: 230913500\nI0818 14:09:06.113454 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0818 14:09:06.113466 21603 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0818 14:09:06.113471 21603 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 14:09:06.113483 21603 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0818 14:09:06.113781 21603 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0818 14:09:06.113795 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.113801 21603 net.cpp:165] Memory required for data: 239105500\nI0818 14:09:06.113809 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0818 14:09:06.113818 21603 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0818 14:09:06.113824 21603 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0818 14:09:06.113836 21603 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0818 14:09:06.114071 21603 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0818 14:09:06.114084 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.114089 21603 net.cpp:165] Memory required for data: 247297500\nI0818 14:09:06.114099 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 14:09:06.114111 21603 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0818 14:09:06.114122 21603 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0818 14:09:06.114131 21603 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.114183 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 14:09:06.114323 21603 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0818 14:09:06.114336 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.114341 21603 net.cpp:165] Memory required for data: 255489500\nI0818 14:09:06.114349 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0818 14:09:06.114362 21603 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0818 14:09:06.114367 21603 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0818 14:09:06.114374 21603 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.114383 21603 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0818 14:09:06.114392 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.114395 21603 net.cpp:165] Memory required for data: 263681500\nI0818 14:09:06.114400 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0818 14:09:06.114414 21603 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0818 14:09:06.114420 21603 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0818 14:09:06.114430 21603 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0818 14:09:06.114735 21603 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0818 14:09:06.114749 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.114754 21603 net.cpp:165] Memory required for data: 271873500\nI0818 14:09:06.114763 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0818 14:09:06.114780 21603 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0818 14:09:06.114787 21603 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0818 14:09:06.114796 21603 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0818 14:09:06.115031 21603 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0818 14:09:06.115046 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.115051 21603 net.cpp:165] Memory required for data: 280065500\nI0818 14:09:06.115062 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 14:09:06.115072 21603 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0818 14:09:06.115077 21603 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0818 14:09:06.115084 21603 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0818 14:09:06.115150 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 14:09:06.115293 21603 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0818 14:09:06.115305 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.115310 21603 net.cpp:165] Memory required for data: 288257500\nI0818 14:09:06.115327 21603 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0818 14:09:06.115339 21603 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0818 14:09:06.115346 21603 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0818 14:09:06.115352 21603 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 14:09:06.115363 21603 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0818 14:09:06.115394 21603 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0818 14:09:06.115406 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.115411 21603 net.cpp:165] Memory required for data: 296449500\nI0818 14:09:06.115417 21603 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0818 14:09:06.115427 21603 net.cpp:100] Creating Layer L1_b3_relu\nI0818 14:09:06.115433 21603 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0818 14:09:06.115440 21603 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0818 14:09:06.115450 21603 net.cpp:150] Setting up L1_b3_relu\nI0818 14:09:06.115458 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.115461 21603 net.cpp:165] Memory required for data: 304641500\nI0818 14:09:06.115466 21603 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 14:09:06.115473 21603 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 14:09:06.115479 21603 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0818 14:09:06.115486 21603 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 14:09:06.115495 21603 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 14:09:06.115541 21603 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 14:09:06.115553 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.115561 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.115564 21603 net.cpp:165] Memory required for data: 321025500\nI0818 14:09:06.115569 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0818 14:09:06.115583 21603 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0818 14:09:06.115589 21603 net.cpp:434] L2_b1_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 14:09:06.115598 21603 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0818 14:09:06.115906 21603 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0818 14:09:06.115919 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.115924 21603 net.cpp:165] Memory required for data: 323073500\nI0818 14:09:06.115932 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0818 14:09:06.115944 21603 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0818 14:09:06.115950 21603 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0818 14:09:06.115959 21603 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0818 14:09:06.116199 21603 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0818 14:09:06.116214 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.116220 21603 net.cpp:165] Memory required for data: 325121500\nI0818 14:09:06.116230 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 14:09:06.116240 21603 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0818 14:09:06.116245 21603 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0818 14:09:06.116253 21603 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.116307 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 14:09:06.116451 21603 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0818 14:09:06.116464 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.116469 21603 net.cpp:165] Memory required for data: 327169500\nI0818 14:09:06.116478 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0818 14:09:06.116487 21603 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0818 14:09:06.116492 21603 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0818 14:09:06.116503 21603 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.116513 21603 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0818 14:09:06.116526 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.116531 21603 net.cpp:165] Memory required for data: 329217500\nI0818 14:09:06.116536 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0818 14:09:06.116550 21603 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0818 14:09:06.116556 21603 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0818 14:09:06.116564 21603 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0818 14:09:06.116874 21603 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0818 14:09:06.116888 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.116894 21603 net.cpp:165] Memory required for data: 331265500\nI0818 14:09:06.116902 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0818 14:09:06.116914 21603 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0818 14:09:06.116920 21603 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0818 14:09:06.116928 21603 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0818 14:09:06.117174 21603 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0818 14:09:06.117188 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.117193 21603 net.cpp:165] Memory required for data: 333313500\nI0818 14:09:06.117204 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 14:09:06.117213 21603 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0818 14:09:06.117218 21603 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0818 14:09:06.117226 21603 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0818 14:09:06.117281 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 14:09:06.117419 21603 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0818 14:09:06.117431 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.117436 21603 net.cpp:165] Memory required for data: 335361500\nI0818 14:09:06.117445 21603 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0818 14:09:06.117458 21603 net.cpp:100] Creating Layer L2_b1_pool\nI0818 14:09:06.117465 21603 net.cpp:434] L2_b1_pool <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 14:09:06.117473 21603 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0818 14:09:06.117555 21603 net.cpp:150] Setting up L2_b1_pool\nI0818 14:09:06.117570 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.117575 21603 net.cpp:165] Memory required for data: 337409500\nI0818 14:09:06.117581 21603 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0818 14:09:06.117595 21603 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0818 14:09:06.117601 21603 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0818 14:09:06.117609 21603 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0818 14:09:06.117616 21603 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0818 14:09:06.117647 21603 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0818 14:09:06.117656 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.117661 21603 net.cpp:165] Memory required for data: 339457500\nI0818 14:09:06.117666 21603 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0818 14:09:06.117677 21603 net.cpp:100] Creating Layer L2_b1_relu\nI0818 14:09:06.117683 21603 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0818 14:09:06.117691 21603 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0818 14:09:06.117699 21603 net.cpp:150] Setting up L2_b1_relu\nI0818 14:09:06.117707 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.117712 21603 net.cpp:165] Memory required for data: 341505500\nI0818 14:09:06.117717 21603 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0818 14:09:06.117763 21603 net.cpp:100] Creating Layer L2_b1_zeros\nI0818 14:09:06.117775 21603 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0818 14:09:06.120136 21603 net.cpp:150] Setting up L2_b1_zeros\nI0818 14:09:06.120157 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.120163 21603 net.cpp:165] Memory required for data: 343553500\nI0818 14:09:06.120169 21603 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0818 14:09:06.120179 21603 net.cpp:100] Creating Layer L2_b1_concat0\nI0818 14:09:06.120193 21603 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0818 14:09:06.120201 21603 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0818 14:09:06.120213 21603 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0818 14:09:06.120288 21603 net.cpp:150] Setting up L2_b1_concat0\nI0818 14:09:06.120303 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.120308 21603 net.cpp:165] Memory required for data: 347649500\nI0818 14:09:06.120314 21603 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 14:09:06.120326 21603 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 14:09:06.120333 21603 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0818 14:09:06.120342 21603 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 14:09:06.120352 21603 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 14:09:06.120402 21603 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0818 14:09:06.120414 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.120421 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.120426 21603 net.cpp:165] Memory required for data: 355841500\nI0818 14:09:06.120431 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0818 14:09:06.120445 21603 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0818 14:09:06.120452 21603 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 14:09:06.120463 21603 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0818 14:09:06.122267 21603 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0818 14:09:06.122289 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.122300 21603 net.cpp:165] Memory required for data: 359937500\nI0818 14:09:06.122339 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0818 14:09:06.122361 21603 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0818 14:09:06.122383 21603 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0818 14:09:06.122401 21603 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0818 14:09:06.122680 21603 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0818 14:09:06.122700 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.122709 21603 net.cpp:165] Memory required for data: 364033500\nI0818 14:09:06.122732 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 14:09:06.122750 21603 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0818 14:09:06.122761 21603 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0818 14:09:06.122777 21603 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.122865 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 14:09:06.123042 21603 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0818 14:09:06.123061 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.123070 21603 net.cpp:165] Memory required for data: 368129500\nI0818 14:09:06.123090 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0818 14:09:06.123111 21603 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0818 14:09:06.123122 21603 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0818 14:09:06.123137 21603 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.123157 21603 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0818 14:09:06.123172 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.123183 21603 net.cpp:165] Memory required for data: 372225500\nI0818 14:09:06.123193 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0818 14:09:06.123217 21603 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0818 14:09:06.123231 21603 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0818 14:09:06.123253 21603 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0818 14:09:06.123750 21603 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0818 14:09:06.123770 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.123780 21603 net.cpp:165] Memory required for data: 376321500\nI0818 14:09:06.123797 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0818 14:09:06.123831 21603 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0818 14:09:06.123843 21603 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0818 14:09:06.123865 21603 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0818 14:09:06.124145 21603 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0818 14:09:06.124168 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.124179 21603 net.cpp:165] Memory required for data: 380417500\nI0818 14:09:06.124202 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 14:09:06.124217 21603 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0818 14:09:06.124229 21603 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0818 14:09:06.124244 21603 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0818 14:09:06.124328 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 14:09:06.124511 21603 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0818 14:09:06.124531 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.124541 21603 net.cpp:165] Memory required for data: 384513500\nI0818 14:09:06.124558 21603 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0818 14:09:06.124577 21603 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0818 14:09:06.124588 21603 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0818 14:09:06.124606 21603 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 14:09:06.124624 21603 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0818 14:09:06.124668 21603 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0818 14:09:06.124686 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.124696 21603 net.cpp:165] Memory required for data: 388609500\nI0818 14:09:06.124706 21603 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0818 14:09:06.124724 21603 net.cpp:100] Creating Layer L2_b2_relu\nI0818 14:09:06.124737 21603 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0818 14:09:06.124752 21603 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0818 14:09:06.124769 21603 net.cpp:150] Setting up L2_b2_relu\nI0818 14:09:06.124784 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.124794 21603 net.cpp:165] Memory required for data: 392705500\nI0818 14:09:06.124804 21603 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 14:09:06.124819 21603 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 14:09:06.124830 21603 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0818 14:09:06.124845 21603 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 14:09:06.124863 21603 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 14:09:06.124938 21603 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 14:09:06.124958 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.124970 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.124979 21603 net.cpp:165] Memory required for data: 400897500\nI0818 14:09:06.124990 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0818 14:09:06.125017 21603 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0818 14:09:06.125031 21603 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 14:09:06.125049 21603 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0818 14:09:06.125551 21603 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0818 14:09:06.125571 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.125581 21603 net.cpp:165] Memory required for data: 404993500\nI0818 14:09:06.125598 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0818 14:09:06.125620 21603 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0818 14:09:06.125633 21603 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0818 14:09:06.125654 21603 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0818 14:09:06.125939 21603 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0818 14:09:06.125965 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.125977 21603 net.cpp:165] Memory required for data: 409089500\nI0818 14:09:06.125998 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 14:09:06.126015 21603 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0818 14:09:06.126027 21603 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0818 14:09:06.126044 21603 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.126132 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 14:09:06.126313 21603 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0818 14:09:06.126333 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.126343 21603 net.cpp:165] Memory required for data: 413185500\nI0818 14:09:06.126361 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0818 14:09:06.126389 21603 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0818 14:09:06.126402 21603 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0818 14:09:06.126417 21603 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.126435 21603 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0818 14:09:06.126451 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.126461 21603 net.cpp:165] Memory required for data: 417281500\nI0818 14:09:06.126471 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0818 14:09:06.126502 21603 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0818 14:09:06.126514 21603 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0818 14:09:06.126533 21603 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0818 14:09:06.127014 21603 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0818 14:09:06.127033 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.127043 21603 net.cpp:165] Memory required for data: 421377500\nI0818 14:09:06.127063 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0818 14:09:06.127082 21603 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0818 14:09:06.127095 21603 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0818 14:09:06.127112 21603 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0818 14:09:06.127389 21603 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0818 14:09:06.127408 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.127418 21603 net.cpp:165] Memory required for data: 425473500\nI0818 14:09:06.127440 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 14:09:06.127456 21603 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0818 14:09:06.127467 21603 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0818 14:09:06.127480 21603 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0818 14:09:06.127571 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 14:09:06.127784 21603 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0818 14:09:06.127806 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.127816 21603 net.cpp:165] Memory required for data: 429569500\nI0818 14:09:06.127835 21603 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0818 14:09:06.127852 21603 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0818 14:09:06.127864 21603 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0818 14:09:06.127878 21603 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 14:09:06.127898 21603 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0818 14:09:06.127944 21603 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0818 14:09:06.127964 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.127972 21603 net.cpp:165] Memory required for data: 433665500\nI0818 14:09:06.127984 21603 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0818 14:09:06.127998 21603 net.cpp:100] Creating Layer L2_b3_relu\nI0818 14:09:06.128010 21603 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0818 14:09:06.128029 21603 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0818 14:09:06.128049 21603 net.cpp:150] Setting up L2_b3_relu\nI0818 14:09:06.128064 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.128074 21603 net.cpp:165] Memory required for data: 437761500\nI0818 14:09:06.128093 21603 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 14:09:06.128108 21603 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 14:09:06.128119 21603 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0818 14:09:06.128139 21603 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 14:09:06.128159 21603 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 14:09:06.128237 21603 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 14:09:06.128257 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.128270 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.128280 21603 net.cpp:165] Memory required for data: 445953500\nI0818 14:09:06.128290 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0818 14:09:06.128314 21603 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0818 14:09:06.128329 21603 net.cpp:434] L3_b1_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 14:09:06.128348 21603 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0818 14:09:06.128854 21603 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0818 14:09:06.128873 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.128883 21603 net.cpp:165] Memory required for data: 446977500\nI0818 14:09:06.128901 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0818 14:09:06.128922 21603 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0818 14:09:06.128935 21603 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0818 14:09:06.128952 21603 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0818 14:09:06.129231 21603 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0818 14:09:06.129251 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.129261 21603 net.cpp:165] Memory required for data: 448001500\nI0818 14:09:06.129283 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 14:09:06.129305 21603 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0818 14:09:06.129317 21603 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0818 14:09:06.129333 21603 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.129426 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 14:09:06.129606 21603 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0818 14:09:06.129626 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.129637 21603 net.cpp:165] Memory required for data: 449025500\nI0818 14:09:06.129655 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0818 14:09:06.129674 21603 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0818 14:09:06.129686 21603 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0818 14:09:06.129701 21603 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.129725 21603 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0818 14:09:06.129740 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.129750 21603 net.cpp:165] Memory required for data: 450049500\nI0818 14:09:06.129760 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0818 14:09:06.129781 21603 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0818 14:09:06.129793 21603 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0818 14:09:06.129814 21603 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0818 14:09:06.130306 21603 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0818 14:09:06.130326 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.130336 21603 net.cpp:165] Memory required for data: 451073500\nI0818 14:09:06.130354 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0818 14:09:06.130378 21603 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0818 14:09:06.130391 21603 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0818 14:09:06.130414 21603 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0818 14:09:06.130725 21603 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0818 14:09:06.130745 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.130764 21603 net.cpp:165] Memory required for data: 452097500\nI0818 14:09:06.130787 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 14:09:06.130805 21603 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0818 14:09:06.130815 21603 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0818 14:09:06.130831 21603 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0818 14:09:06.130925 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 14:09:06.131108 21603 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0818 14:09:06.131130 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.131141 21603 net.cpp:165] Memory required for data: 453121500\nI0818 14:09:06.131160 21603 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0818 14:09:06.131177 21603 net.cpp:100] Creating Layer L3_b1_pool\nI0818 14:09:06.131188 21603 net.cpp:434] L3_b1_pool <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 14:09:06.131206 21603 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0818 14:09:06.131264 21603 net.cpp:150] Setting up L3_b1_pool\nI0818 14:09:06.131284 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.131294 21603 net.cpp:165] Memory required for data: 454145500\nI0818 14:09:06.131304 21603 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0818 14:09:06.131320 21603 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0818 14:09:06.131331 21603 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0818 14:09:06.131345 21603 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0818 14:09:06.131359 21603 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0818 14:09:06.131425 21603 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0818 14:09:06.131444 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.131453 21603 net.cpp:165] Memory required for data: 455169500\nI0818 14:09:06.131464 21603 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0818 14:09:06.131479 21603 net.cpp:100] Creating Layer L3_b1_relu\nI0818 14:09:06.131490 21603 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0818 14:09:06.131503 21603 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0818 14:09:06.131521 21603 net.cpp:150] Setting up L3_b1_relu\nI0818 14:09:06.131536 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.131546 21603 net.cpp:165] Memory required for data: 456193500\nI0818 14:09:06.131556 21603 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0818 14:09:06.131573 21603 net.cpp:100] Creating Layer L3_b1_zeros\nI0818 14:09:06.131593 21603 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0818 14:09:06.132867 21603 net.cpp:150] Setting up L3_b1_zeros\nI0818 14:09:06.132889 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.132899 21603 net.cpp:165] Memory required for data: 457217500\nI0818 14:09:06.132910 21603 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0818 14:09:06.132930 21603 net.cpp:100] Creating Layer L3_b1_concat0\nI0818 14:09:06.132943 21603 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0818 14:09:06.132956 21603 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0818 14:09:06.132972 21603 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0818 14:09:06.133034 21603 net.cpp:150] Setting up L3_b1_concat0\nI0818 14:09:06.133054 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.133062 21603 net.cpp:165] Memory required for data: 459265500\nI0818 14:09:06.133074 21603 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 14:09:06.133088 21603 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 14:09:06.133100 21603 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0818 14:09:06.133119 21603 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 14:09:06.133141 21603 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 14:09:06.133219 21603 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0818 14:09:06.133242 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.133266 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.133277 21603 net.cpp:165] Memory required for data: 463361500\nI0818 14:09:06.133288 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0818 14:09:06.133313 21603 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0818 14:09:06.133327 21603 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 14:09:06.133345 21603 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0818 14:09:06.135375 21603 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0818 14:09:06.135397 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.135407 21603 net.cpp:165] Memory required for data: 465409500\nI0818 14:09:06.135426 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0818 14:09:06.135449 21603 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0818 14:09:06.135462 21603 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0818 14:09:06.135479 21603 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0818 14:09:06.135771 21603 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0818 14:09:06.135790 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.135800 21603 net.cpp:165] Memory required for data: 467457500\nI0818 14:09:06.135823 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 14:09:06.135843 21603 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0818 14:09:06.135856 21603 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0818 14:09:06.135872 21603 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.135962 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 14:09:06.136150 21603 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0818 14:09:06.136169 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.136179 21603 net.cpp:165] Memory required for data: 469505500\nI0818 14:09:06.136198 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0818 14:09:06.136214 21603 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0818 14:09:06.136226 21603 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0818 14:09:06.136246 21603 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.136266 21603 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0818 14:09:06.136281 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.136291 21603 net.cpp:165] Memory required for data: 471553500\nI0818 14:09:06.136301 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0818 14:09:06.136325 21603 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0818 14:09:06.136339 21603 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0818 14:09:06.136356 21603 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0818 14:09:06.137435 21603 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0818 14:09:06.137456 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.137465 21603 net.cpp:165] Memory required for data: 473601500\nI0818 14:09:06.137485 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0818 14:09:06.137506 21603 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0818 14:09:06.137518 21603 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0818 14:09:06.137536 21603 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0818 14:09:06.137820 21603 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0818 14:09:06.137838 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.137848 21603 net.cpp:165] Memory required for data: 475649500\nI0818 14:09:06.137869 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 14:09:06.137892 21603 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0818 14:09:06.137904 21603 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0818 14:09:06.137920 21603 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0818 14:09:06.138008 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 14:09:06.138190 21603 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0818 14:09:06.138209 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.138219 21603 net.cpp:165] Memory required for data: 477697500\nI0818 14:09:06.138237 21603 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0818 14:09:06.138268 21603 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0818 14:09:06.138283 21603 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0818 14:09:06.138296 21603 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 14:09:06.138315 21603 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0818 14:09:06.138376 21603 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0818 14:09:06.138396 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.138406 21603 net.cpp:165] Memory required for data: 479745500\nI0818 14:09:06.138417 21603 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0818 14:09:06.138437 21603 net.cpp:100] Creating Layer L3_b2_relu\nI0818 14:09:06.138449 21603 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0818 14:09:06.138464 21603 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0818 14:09:06.138484 21603 net.cpp:150] Setting up L3_b2_relu\nI0818 14:09:06.138499 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.138509 21603 net.cpp:165] Memory required for data: 481793500\nI0818 14:09:06.138520 21603 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 14:09:06.138532 21603 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 14:09:06.138543 21603 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0818 14:09:06.138558 21603 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 14:09:06.138576 21603 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 14:09:06.138658 21603 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 14:09:06.138675 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.138689 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.138698 21603 net.cpp:165] Memory required for data: 485889500\nI0818 14:09:06.138708 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0818 14:09:06.138733 21603 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0818 14:09:06.138746 21603 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 14:09:06.138766 21603 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0818 14:09:06.139816 21603 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0818 14:09:06.139835 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.139845 21603 net.cpp:165] Memory required for data: 487937500\nI0818 14:09:06.139864 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0818 14:09:06.139885 21603 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0818 14:09:06.139897 21603 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0818 14:09:06.139919 21603 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0818 14:09:06.140202 21603 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0818 14:09:06.140221 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.140231 21603 net.cpp:165] Memory required for data: 489985500\nI0818 14:09:06.140252 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 14:09:06.140269 21603 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0818 14:09:06.140281 21603 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0818 14:09:06.140297 21603 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.140398 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 14:09:06.140583 21603 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0818 14:09:06.140602 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.140611 21603 net.cpp:165] Memory required for data: 492033500\nI0818 14:09:06.140630 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0818 14:09:06.140646 21603 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0818 14:09:06.140657 21603 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0818 14:09:06.140676 21603 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.140697 21603 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0818 14:09:06.140712 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.140730 21603 net.cpp:165] Memory required for data: 494081500\nI0818 14:09:06.140741 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0818 14:09:06.140766 21603 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0818 14:09:06.140779 21603 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0818 14:09:06.140797 21603 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0818 14:09:06.141840 21603 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0818 14:09:06.141860 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.141871 21603 net.cpp:165] Memory required for data: 496129500\nI0818 14:09:06.141890 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0818 14:09:06.141911 21603 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0818 14:09:06.141922 21603 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0818 14:09:06.141939 21603 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0818 14:09:06.142220 21603 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0818 14:09:06.142238 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.142248 21603 net.cpp:165] Memory required for data: 498177500\nI0818 14:09:06.142302 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 14:09:06.142321 21603 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0818 14:09:06.142338 21603 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0818 14:09:06.142354 21603 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0818 14:09:06.142444 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 14:09:06.142629 21603 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0818 14:09:06.142648 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.142658 21603 net.cpp:165] Memory required for data: 500225500\nI0818 14:09:06.142678 21603 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0818 14:09:06.142699 21603 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0818 14:09:06.142712 21603 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0818 14:09:06.142725 21603 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 14:09:06.142741 21603 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0818 14:09:06.142802 21603 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0818 14:09:06.142822 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.142830 21603 net.cpp:165] Memory required for data: 502273500\nI0818 14:09:06.142841 21603 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0818 14:09:06.142856 21603 net.cpp:100] Creating Layer L3_b3_relu\nI0818 14:09:06.142868 21603 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0818 14:09:06.142885 21603 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0818 14:09:06.142905 21603 net.cpp:150] Setting up L3_b3_relu\nI0818 14:09:06.142920 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.142930 21603 net.cpp:165] Memory required for data: 504321500\nI0818 14:09:06.142940 21603 layer_factory.hpp:77] Creating layer post_pool\nI0818 14:09:06.142954 21603 net.cpp:100] Creating Layer post_pool\nI0818 14:09:06.142966 21603 net.cpp:434] post_pool <- L3_b3_sum_eltwise_top\nI0818 14:09:06.142982 21603 net.cpp:408] post_pool -> post_pool\nI0818 14:09:06.143035 21603 net.cpp:150] Setting up post_pool\nI0818 14:09:06.143055 21603 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0818 14:09:06.143065 21603 net.cpp:165] Memory required for data: 504353500\nI0818 14:09:06.143075 21603 layer_factory.hpp:77] Creating layer post_FC\nI0818 14:09:06.143180 21603 net.cpp:100] Creating Layer post_FC\nI0818 14:09:06.143196 21603 net.cpp:434] post_FC <- post_pool\nI0818 14:09:06.143213 21603 net.cpp:408] post_FC -> post_FC_top\nI0818 14:09:06.143482 21603 net.cpp:150] Setting up post_FC\nI0818 14:09:06.143507 21603 net.cpp:157] Top shape: 125 10 (1250)\nI0818 14:09:06.143517 21603 net.cpp:165] Memory required for data: 504358500\nI0818 14:09:06.143535 21603 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0818 14:09:06.143550 21603 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0818 14:09:06.143569 21603 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0818 14:09:06.143585 21603 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0818 14:09:06.143605 21603 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0818 14:09:06.143688 21603 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0818 14:09:06.143707 21603 net.cpp:157] Top shape: 125 10 (1250)\nI0818 14:09:06.143718 21603 net.cpp:157] Top shape: 125 10 (1250)\nI0818 14:09:06.143728 21603 net.cpp:165] Memory required for data: 504368500\nI0818 14:09:06.143738 21603 layer_factory.hpp:77] Creating layer accuracy\nI0818 14:09:06.143797 21603 net.cpp:100] Creating Layer accuracy\nI0818 14:09:06.143813 21603 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0818 14:09:06.143826 21603 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 14:09:06.143846 21603 net.cpp:408] accuracy -> accuracy\nI0818 14:09:06.143910 21603 net.cpp:150] Setting up accuracy\nI0818 14:09:06.143929 21603 net.cpp:157] Top shape: (1)\nI0818 14:09:06.143939 21603 net.cpp:165] Memory required for data: 504368504\nI0818 14:09:06.143949 21603 layer_factory.hpp:77] Creating layer loss\nI0818 14:09:06.143965 21603 net.cpp:100] Creating Layer loss\nI0818 14:09:06.143976 21603 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0818 14:09:06.143988 21603 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 14:09:06.144003 21603 net.cpp:408] loss -> loss\nI0818 14:09:06.144069 21603 layer_factory.hpp:77] Creating layer loss\nI0818 14:09:06.144248 21603 net.cpp:150] Setting up loss\nI0818 14:09:06.144268 21603 net.cpp:157] Top shape: (1)\nI0818 14:09:06.144282 21603 net.cpp:160]     with loss weight 1\nI0818 14:09:06.144388 21603 net.cpp:165] Memory required for data: 504368508\nI0818 14:09:06.144402 21603 net.cpp:226] loss needs backward computation.\nI0818 14:09:06.144414 21603 net.cpp:228] accuracy does not need backward computation.\nI0818 14:09:06.144425 21603 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0818 14:09:06.144435 21603 net.cpp:226] post_FC needs backward computation.\nI0818 14:09:06.144445 21603 net.cpp:226] post_pool needs backward computation.\nI0818 14:09:06.144455 21603 net.cpp:226] L3_b3_relu needs backward computation.\nI0818 14:09:06.144465 21603 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0818 14:09:06.144476 21603 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0818 14:09:06.144486 21603 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0818 14:09:06.144496 21603 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0818 14:09:06.144506 21603 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0818 14:09:06.144516 21603 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0818 14:09:06.144526 21603 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0818 14:09:06.144536 21603 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0818 14:09:06.144547 21603 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0818 14:09:06.144557 21603 net.cpp:226] L3_b2_relu needs backward computation.\nI0818 14:09:06.144567 21603 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0818 14:09:06.144578 21603 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0818 14:09:06.144589 21603 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0818 14:09:06.144599 21603 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0818 14:09:06.144609 21603 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0818 14:09:06.144619 21603 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0818 14:09:06.144629 21603 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0818 14:09:06.144639 21603 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0818 14:09:06.144649 21603 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0818 14:09:06.144660 21603 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0818 14:09:06.144672 21603 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0818 14:09:06.144688 21603 net.cpp:226] L3_b1_relu needs backward computation.\nI0818 14:09:06.144700 21603 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0818 14:09:06.144711 21603 net.cpp:226] L3_b1_pool needs backward computation.\nI0818 14:09:06.144721 21603 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0818 14:09:06.144731 21603 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0818 14:09:06.144742 21603 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0818 14:09:06.144750 21603 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0818 14:09:06.144760 21603 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0818 14:09:06.144770 21603 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0818 14:09:06.144780 21603 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0818 14:09:06.144790 21603 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0818 14:09:06.144801 21603 net.cpp:226] L2_b3_relu needs backward computation.\nI0818 14:09:06.144811 21603 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0818 14:09:06.144822 21603 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0818 14:09:06.144831 21603 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0818 14:09:06.144842 21603 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0818 14:09:06.144852 21603 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0818 14:09:06.144862 21603 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0818 14:09:06.144871 21603 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0818 14:09:06.144882 21603 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0818 14:09:06.144893 21603 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0818 14:09:06.144903 21603 net.cpp:226] L2_b2_relu needs backward computation.\nI0818 14:09:06.144913 21603 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0818 14:09:06.144924 21603 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0818 14:09:06.144934 21603 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0818 14:09:06.144944 21603 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0818 14:09:06.144954 21603 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0818 14:09:06.144964 21603 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0818 14:09:06.144975 21603 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0818 14:09:06.144985 21603 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0818 14:09:06.144995 21603 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0818 14:09:06.145006 21603 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0818 14:09:06.145018 21603 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0818 14:09:06.145028 21603 net.cpp:226] L2_b1_relu needs backward computation.\nI0818 14:09:06.145038 21603 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0818 14:09:06.145051 21603 net.cpp:226] L2_b1_pool needs backward computation.\nI0818 14:09:06.145061 21603 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0818 14:09:06.145071 21603 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0818 14:09:06.145081 21603 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0818 14:09:06.145093 21603 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0818 14:09:06.145103 21603 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0818 14:09:06.145112 21603 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0818 14:09:06.145123 21603 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0818 14:09:06.145134 21603 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0818 14:09:06.145145 21603 net.cpp:226] L1_b3_relu needs backward computation.\nI0818 14:09:06.145155 21603 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0818 14:09:06.145166 21603 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0818 14:09:06.145186 21603 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0818 14:09:06.145205 21603 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0818 14:09:06.145216 21603 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0818 14:09:06.145226 21603 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0818 14:09:06.145237 21603 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0818 14:09:06.145248 21603 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0818 14:09:06.145258 21603 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0818 14:09:06.145269 21603 net.cpp:226] L1_b2_relu needs backward computation.\nI0818 14:09:06.145279 21603 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0818 14:09:06.145290 21603 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0818 14:09:06.145303 21603 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0818 14:09:06.145313 21603 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0818 14:09:06.145323 21603 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0818 14:09:06.145334 21603 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0818 14:09:06.145344 21603 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0818 14:09:06.145354 21603 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0818 14:09:06.145365 21603 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0818 14:09:06.145387 21603 net.cpp:226] L1_b1_relu needs backward computation.\nI0818 14:09:06.145397 21603 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0818 14:09:06.145409 21603 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0818 14:09:06.145421 21603 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0818 14:09:06.145431 21603 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0818 14:09:06.145440 21603 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0818 14:09:06.145450 21603 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0818 14:09:06.145460 21603 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0818 14:09:06.145472 21603 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0818 14:09:06.145483 21603 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0818 14:09:06.145493 21603 net.cpp:226] pre_relu needs backward computation.\nI0818 14:09:06.145503 21603 net.cpp:226] pre_scale needs backward computation.\nI0818 14:09:06.145512 21603 net.cpp:226] pre_bn needs backward computation.\nI0818 14:09:06.145524 21603 net.cpp:226] pre_conv needs backward computation.\nI0818 14:09:06.145535 21603 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 14:09:06.145547 21603 net.cpp:228] dataLayer does not need backward computation.\nI0818 14:09:06.145556 21603 net.cpp:270] This network produces output accuracy\nI0818 14:09:06.145568 21603 net.cpp:270] This network produces output loss\nI0818 14:09:06.145743 21603 net.cpp:283] Network initialization done.\nI0818 14:09:06.150288 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:06.150318 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:06.150383 21603 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0818 14:09:06.150578 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0818 14:09:06.150602 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0818 14:09:06.150621 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0818 14:09:06.150641 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0818 14:09:06.150660 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0818 14:09:06.150689 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0818 14:09:06.150708 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0818 14:09:06.150727 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0818 14:09:06.150745 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0818 14:09:06.150763 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0818 14:09:06.150787 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0818 14:09:06.150807 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0818 14:09:06.150825 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0818 14:09:06.150843 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0818 14:09:06.150862 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0818 14:09:06.150880 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0818 14:09:06.150903 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0818 14:09:06.150921 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0818 14:09:06.150941 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0818 14:09:06.150959 21603 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0818 14:09:06.151652 21603 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 32\n      dim: 8\n      dim: 8\n    }\n  }\n}\nlayer {\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer {\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer {\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\nI0818 14:09:06.152348 21603 layer_factory.hpp:77] Creating layer dataLayer\nI0818 14:09:06.152618 21603 net.cpp:100] Creating Layer dataLayer\nI0818 14:09:06.152658 21603 net.cpp:408] dataLayer -> data_top\nI0818 14:09:06.152686 21603 net.cpp:408] dataLayer -> label\nI0818 14:09:06.152709 21603 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 14:09:06.162233 21610 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0818 14:09:06.162499 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:06.169744 21603 net.cpp:150] Setting up dataLayer\nI0818 14:09:06.169770 21603 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0818 14:09:06.169785 21603 net.cpp:157] Top shape: 125 (125)\nI0818 14:09:06.169795 21603 net.cpp:165] Memory required for data: 1536500\nI0818 14:09:06.169811 21603 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 14:09:06.169833 21603 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 14:09:06.169844 21603 net.cpp:434] label_dataLayer_1_split <- label\nI0818 14:09:06.169859 21603 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 14:09:06.169879 21603 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 14:09:06.170032 21603 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 14:09:06.170054 21603 net.cpp:157] Top shape: 125 (125)\nI0818 14:09:06.170068 21603 net.cpp:157] Top shape: 125 (125)\nI0818 14:09:06.170078 21603 net.cpp:165] Memory required for data: 1537500\nI0818 14:09:06.170090 21603 layer_factory.hpp:77] Creating layer pre_conv\nI0818 14:09:06.170130 21603 net.cpp:100] Creating Layer pre_conv\nI0818 14:09:06.170145 21603 net.cpp:434] pre_conv <- data_top\nI0818 14:09:06.170171 21603 net.cpp:408] pre_conv -> pre_conv_top\nI0818 14:09:06.170613 21603 net.cpp:150] Setting up pre_conv\nI0818 14:09:06.170634 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.170655 21603 net.cpp:165] Memory required for data: 9729500\nI0818 14:09:06.170683 21603 layer_factory.hpp:77] Creating layer pre_bn\nI0818 14:09:06.170702 21603 net.cpp:100] Creating Layer pre_bn\nI0818 14:09:06.170713 21603 net.cpp:434] pre_bn <- pre_conv_top\nI0818 14:09:06.170737 21603 net.cpp:408] pre_bn -> pre_bn_top\nI0818 14:09:06.171121 21603 net.cpp:150] Setting up pre_bn\nI0818 14:09:06.171144 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.171159 21603 net.cpp:165] Memory required for data: 17921500\nI0818 14:09:06.171190 21603 layer_factory.hpp:77] Creating layer pre_scale\nI0818 14:09:06.171210 21603 net.cpp:100] Creating Layer pre_scale\nI0818 14:09:06.171221 21603 net.cpp:434] pre_scale <- pre_bn_top\nI0818 14:09:06.171236 21603 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0818 14:09:06.171337 21603 layer_factory.hpp:77] Creating layer pre_scale\nI0818 14:09:06.171591 21603 net.cpp:150] Setting up pre_scale\nI0818 14:09:06.171610 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.171620 21603 net.cpp:165] Memory required for data: 26113500\nI0818 14:09:06.171638 21603 layer_factory.hpp:77] Creating layer pre_relu\nI0818 14:09:06.171661 21603 net.cpp:100] Creating Layer pre_relu\nI0818 14:09:06.171674 21603 net.cpp:434] pre_relu <- pre_bn_top\nI0818 14:09:06.171694 21603 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0818 14:09:06.171716 21603 net.cpp:150] Setting up pre_relu\nI0818 14:09:06.171731 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.171741 21603 net.cpp:165] Memory required for data: 34305500\nI0818 14:09:06.171754 21603 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0818 14:09:06.171768 21603 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0818 14:09:06.171778 21603 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0818 14:09:06.171792 21603 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0818 14:09:06.171814 21603 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0818 14:09:06.171911 21603 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0818 14:09:06.171931 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.171946 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.171954 21603 net.cpp:165] Memory required for data: 50689500\nI0818 14:09:06.171968 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0818 14:09:06.171993 21603 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0818 14:09:06.172008 21603 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0818 14:09:06.172025 21603 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0818 14:09:06.172582 21603 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0818 14:09:06.172606 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.172616 21603 net.cpp:165] Memory required for data: 58881500\nI0818 14:09:06.172642 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0818 14:09:06.172670 21603 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0818 14:09:06.172684 21603 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0818 14:09:06.172705 21603 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0818 14:09:06.173050 21603 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0818 14:09:06.173069 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.173079 21603 net.cpp:165] Memory required for data: 67073500\nI0818 14:09:06.173105 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 14:09:06.173123 21603 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0818 14:09:06.173135 21603 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0818 14:09:06.173153 21603 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.173257 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 14:09:06.173475 21603 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0818 14:09:06.173496 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.173507 21603 net.cpp:165] Memory required for data: 75265500\nI0818 14:09:06.173527 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0818 14:09:06.173552 21603 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0818 14:09:06.173564 21603 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0818 14:09:06.173588 21603 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.173607 21603 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0818 14:09:06.173622 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.173635 21603 net.cpp:165] Memory required for data: 83457500\nI0818 14:09:06.173646 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0818 14:09:06.173676 21603 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0818 14:09:06.173688 21603 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0818 14:09:06.173708 21603 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0818 14:09:06.174132 21603 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0818 14:09:06.174154 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.174163 21603 net.cpp:165] Memory required for data: 91649500\nI0818 14:09:06.174185 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0818 14:09:06.174211 21603 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0818 14:09:06.174222 21603 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0818 14:09:06.174240 21603 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0818 14:09:06.174660 21603 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0818 14:09:06.174680 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.174690 21603 net.cpp:165] Memory required for data: 99841500\nI0818 14:09:06.174731 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 14:09:06.174753 21603 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0818 14:09:06.174765 21603 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0818 14:09:06.174780 21603 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0818 14:09:06.174897 21603 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 14:09:06.175235 21603 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0818 14:09:06.175257 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.175267 21603 net.cpp:165] Memory required for data: 108033500\nI0818 14:09:06.175289 21603 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0818 14:09:06.175310 21603 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0818 14:09:06.175321 21603 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0818 14:09:06.175335 21603 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0818 14:09:06.175360 21603 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0818 14:09:06.175431 21603 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0818 14:09:06.175451 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.175459 21603 net.cpp:165] Memory required for data: 116225500\nI0818 14:09:06.175468 21603 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0818 14:09:06.175490 21603 net.cpp:100] Creating Layer L1_b1_relu\nI0818 14:09:06.175503 21603 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0818 14:09:06.175519 21603 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0818 14:09:06.175539 21603 net.cpp:150] Setting up L1_b1_relu\nI0818 14:09:06.175556 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.175565 21603 net.cpp:165] Memory required for data: 124417500\nI0818 14:09:06.175576 21603 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 14:09:06.175591 21603 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 14:09:06.175601 21603 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0818 14:09:06.175619 21603 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 14:09:06.175638 21603 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 14:09:06.175729 21603 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 14:09:06.175751 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.175781 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.175792 21603 net.cpp:165] Memory required for data: 140801500\nI0818 14:09:06.175802 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0818 14:09:06.175830 21603 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0818 14:09:06.175844 21603 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 14:09:06.175863 21603 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0818 14:09:06.176295 21603 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0818 14:09:06.176316 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.176326 21603 net.cpp:165] Memory required for data: 148993500\nI0818 14:09:06.176347 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0818 14:09:06.176376 21603 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0818 14:09:06.176390 21603 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0818 14:09:06.176415 21603 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0818 14:09:06.176769 21603 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0818 14:09:06.176789 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.176802 21603 net.cpp:165] Memory required for data: 157185500\nI0818 14:09:06.176825 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 14:09:06.176843 21603 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0818 14:09:06.176856 21603 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0818 14:09:06.176873 21603 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.176976 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 14:09:06.177184 21603 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0818 14:09:06.177206 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.177217 21603 net.cpp:165] Memory required for data: 165377500\nI0818 14:09:06.177237 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0818 14:09:06.177259 21603 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0818 14:09:06.177275 21603 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0818 14:09:06.177291 21603 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.177309 21603 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0818 14:09:06.177322 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.177336 21603 net.cpp:165] Memory required for data: 173569500\nI0818 14:09:06.177346 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0818 14:09:06.177381 21603 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0818 14:09:06.177397 21603 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0818 14:09:06.177422 21603 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0818 14:09:06.178017 21603 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0818 14:09:06.178037 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.178050 21603 net.cpp:165] Memory required for data: 181761500\nI0818 14:09:06.178071 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0818 14:09:06.178092 21603 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0818 14:09:06.178102 21603 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0818 14:09:06.178123 21603 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0818 14:09:06.178469 21603 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0818 14:09:06.178491 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.178503 21603 net.cpp:165] Memory required for data: 189953500\nI0818 14:09:06.178536 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 14:09:06.178558 21603 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0818 14:09:06.178573 21603 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0818 14:09:06.178591 21603 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0818 14:09:06.178699 21603 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 14:09:06.178910 21603 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0818 14:09:06.178939 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.178951 21603 net.cpp:165] Memory required for data: 198145500\nI0818 14:09:06.178970 21603 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0818 14:09:06.179004 21603 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0818 14:09:06.179018 21603 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0818 14:09:06.179033 21603 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 14:09:06.179051 21603 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0818 14:09:06.179114 21603 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0818 14:09:06.179132 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.179147 21603 net.cpp:165] Memory required for data: 206337500\nI0818 14:09:06.179157 21603 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0818 14:09:06.179173 21603 net.cpp:100] Creating Layer L1_b2_relu\nI0818 14:09:06.179185 21603 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0818 14:09:06.179199 21603 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0818 14:09:06.179220 21603 net.cpp:150] Setting up L1_b2_relu\nI0818 14:09:06.179235 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.179249 21603 net.cpp:165] Memory required for data: 214529500\nI0818 14:09:06.179263 21603 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 14:09:06.179281 21603 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 14:09:06.179293 21603 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0818 14:09:06.179316 21603 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 14:09:06.179335 21603 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 14:09:06.179427 21603 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 14:09:06.179453 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.179471 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.179481 21603 net.cpp:165] Memory required for data: 230913500\nI0818 14:09:06.179492 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0818 14:09:06.179513 21603 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0818 14:09:06.179525 21603 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 14:09:06.179543 21603 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0818 14:09:06.179977 21603 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0818 14:09:06.179999 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.180011 21603 net.cpp:165] Memory required for data: 239105500\nI0818 14:09:06.180029 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0818 14:09:06.180054 21603 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0818 14:09:06.180066 21603 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0818 14:09:06.180083 21603 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0818 14:09:06.180434 21603 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0818 14:09:06.180454 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.180464 21603 net.cpp:165] Memory required for data: 247297500\nI0818 14:09:06.180485 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 14:09:06.180500 21603 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0818 14:09:06.180512 21603 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0818 14:09:06.180531 21603 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.180627 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 14:09:06.180830 21603 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0818 14:09:06.180855 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.180865 21603 net.cpp:165] Memory required for data: 255489500\nI0818 14:09:06.180882 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0818 14:09:06.180897 21603 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0818 14:09:06.180908 21603 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0818 14:09:06.180922 21603 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.180939 21603 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0818 14:09:06.180964 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.180974 21603 net.cpp:165] Memory required for data: 263681500\nI0818 14:09:06.180984 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0818 14:09:06.181010 21603 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0818 14:09:06.181025 21603 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0818 14:09:06.181044 21603 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0818 14:09:06.181440 21603 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0818 14:09:06.181460 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.181469 21603 net.cpp:165] Memory required for data: 271873500\nI0818 14:09:06.181488 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0818 14:09:06.181519 21603 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0818 14:09:06.181532 21603 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0818 14:09:06.181548 21603 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0818 14:09:06.181860 21603 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0818 14:09:06.181879 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.181888 21603 net.cpp:165] Memory required for data: 280065500\nI0818 14:09:06.181910 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 14:09:06.181926 21603 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0818 14:09:06.181937 21603 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0818 14:09:06.181958 21603 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0818 14:09:06.182054 21603 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 14:09:06.182245 21603 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0818 14:09:06.182268 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.182278 21603 net.cpp:165] Memory required for data: 288257500\nI0818 14:09:06.182297 21603 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0818 14:09:06.182313 21603 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0818 14:09:06.182324 21603 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0818 14:09:06.182337 21603 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 14:09:06.182351 21603 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0818 14:09:06.182417 21603 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0818 14:09:06.182440 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.182449 21603 net.cpp:165] Memory required for data: 296449500\nI0818 14:09:06.182461 21603 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0818 14:09:06.182478 21603 net.cpp:100] Creating Layer L1_b3_relu\nI0818 14:09:06.182490 21603 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0818 14:09:06.182503 21603 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0818 14:09:06.182521 21603 net.cpp:150] Setting up L1_b3_relu\nI0818 14:09:06.182535 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.182544 21603 net.cpp:165] Memory required for data: 304641500\nI0818 14:09:06.182554 21603 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 14:09:06.182571 21603 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 14:09:06.182582 21603 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0818 14:09:06.182597 21603 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 14:09:06.182615 21603 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 14:09:06.182695 21603 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 14:09:06.182719 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.182734 21603 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 14:09:06.182742 21603 net.cpp:165] Memory required for data: 321025500\nI0818 14:09:06.182752 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0818 14:09:06.182771 21603 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0818 14:09:06.182783 21603 net.cpp:434] L2_b1_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 14:09:06.182809 21603 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0818 14:09:06.183203 21603 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0818 14:09:06.183223 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.183233 21603 net.cpp:165] Memory required for data: 323073500\nI0818 14:09:06.183250 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0818 14:09:06.183271 21603 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0818 14:09:06.183284 21603 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0818 14:09:06.183298 21603 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0818 14:09:06.183605 21603 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0818 14:09:06.183624 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.183634 21603 net.cpp:165] Memory required for data: 325121500\nI0818 14:09:06.183655 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 14:09:06.183671 21603 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0818 14:09:06.183682 21603 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0818 14:09:06.183706 21603 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.183800 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 14:09:06.183992 21603 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0818 14:09:06.184011 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.184020 21603 net.cpp:165] Memory required for data: 327169500\nI0818 14:09:06.184038 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0818 14:09:06.184052 21603 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0818 14:09:06.184063 21603 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0818 14:09:06.184082 21603 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.184101 21603 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0818 14:09:06.184116 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.184125 21603 net.cpp:165] Memory required for data: 329217500\nI0818 14:09:06.184135 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0818 14:09:06.184159 21603 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0818 14:09:06.184172 21603 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0818 14:09:06.184188 21603 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0818 14:09:06.184577 21603 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0818 14:09:06.184597 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.184607 21603 net.cpp:165] Memory required for data: 331265500\nI0818 14:09:06.184623 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0818 14:09:06.184644 21603 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0818 14:09:06.184656 21603 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0818 14:09:06.184671 21603 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0818 14:09:06.184969 21603 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0818 14:09:06.184989 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.184998 21603 net.cpp:165] Memory required for data: 333313500\nI0818 14:09:06.185020 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 14:09:06.185040 21603 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0818 14:09:06.185052 21603 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0818 14:09:06.185066 21603 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0818 14:09:06.185163 21603 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 14:09:06.185353 21603 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0818 14:09:06.185379 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.185389 21603 net.cpp:165] Memory required for data: 335361500\nI0818 14:09:06.185407 21603 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0818 14:09:06.185422 21603 net.cpp:100] Creating Layer L2_b1_pool\nI0818 14:09:06.185430 21603 net.cpp:434] L2_b1_pool <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 14:09:06.185441 21603 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0818 14:09:06.185474 21603 net.cpp:150] Setting up L2_b1_pool\nI0818 14:09:06.185494 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.185500 21603 net.cpp:165] Memory required for data: 337409500\nI0818 14:09:06.185505 21603 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0818 14:09:06.185514 21603 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0818 14:09:06.185519 21603 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0818 14:09:06.185526 21603 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0818 14:09:06.185539 21603 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0818 14:09:06.185573 21603 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0818 14:09:06.185583 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.185588 21603 net.cpp:165] Memory required for data: 339457500\nI0818 14:09:06.185592 21603 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0818 14:09:06.185600 21603 net.cpp:100] Creating Layer L2_b1_relu\nI0818 14:09:06.185606 21603 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0818 14:09:06.185613 21603 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0818 14:09:06.185622 21603 net.cpp:150] Setting up L2_b1_relu\nI0818 14:09:06.185629 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.185633 21603 net.cpp:165] Memory required for data: 341505500\nI0818 14:09:06.185638 21603 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0818 14:09:06.185652 21603 net.cpp:100] Creating Layer L2_b1_zeros\nI0818 14:09:06.185658 21603 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0818 14:09:06.187916 21603 net.cpp:150] Setting up L2_b1_zeros\nI0818 14:09:06.187938 21603 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 14:09:06.187948 21603 net.cpp:165] Memory required for data: 343553500\nI0818 14:09:06.187958 21603 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0818 14:09:06.187973 21603 net.cpp:100] Creating Layer L2_b1_concat0\nI0818 14:09:06.187984 21603 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0818 14:09:06.187997 21603 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0818 14:09:06.188016 21603 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0818 14:09:06.188076 21603 net.cpp:150] Setting up L2_b1_concat0\nI0818 14:09:06.188092 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.188108 21603 net.cpp:165] Memory required for data: 347649500\nI0818 14:09:06.188118 21603 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 14:09:06.188133 21603 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 14:09:06.188143 21603 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0818 14:09:06.188158 21603 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 14:09:06.188176 21603 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 14:09:06.188262 21603 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0818 14:09:06.188280 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.188293 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.188302 21603 net.cpp:165] Memory required for data: 355841500\nI0818 14:09:06.188313 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0818 14:09:06.188344 21603 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0818 14:09:06.188359 21603 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 14:09:06.188385 21603 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0818 14:09:06.188905 21603 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0818 14:09:06.188925 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.188932 21603 net.cpp:165] Memory required for data: 359937500\nI0818 14:09:06.188973 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0818 14:09:06.188994 21603 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0818 14:09:06.189007 21603 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0818 14:09:06.189021 21603 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0818 14:09:06.189328 21603 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0818 14:09:06.189348 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.189366 21603 net.cpp:165] Memory required for data: 364033500\nI0818 14:09:06.189398 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 14:09:06.189414 21603 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0818 14:09:06.189425 21603 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0818 14:09:06.189440 21603 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.189539 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 14:09:06.189725 21603 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0818 14:09:06.189744 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.189754 21603 net.cpp:165] Memory required for data: 368129500\nI0818 14:09:06.189774 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0818 14:09:06.189796 21603 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0818 14:09:06.189810 21603 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0818 14:09:06.189824 21603 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.189842 21603 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0818 14:09:06.189858 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.189868 21603 net.cpp:165] Memory required for data: 372225500\nI0818 14:09:06.189878 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0818 14:09:06.189901 21603 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0818 14:09:06.189915 21603 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0818 14:09:06.189931 21603 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0818 14:09:06.190459 21603 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0818 14:09:06.190479 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.190488 21603 net.cpp:165] Memory required for data: 376321500\nI0818 14:09:06.190506 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0818 14:09:06.190528 21603 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0818 14:09:06.190541 21603 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0818 14:09:06.190557 21603 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0818 14:09:06.190850 21603 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0818 14:09:06.190871 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.190881 21603 net.cpp:165] Memory required for data: 380417500\nI0818 14:09:06.190904 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 14:09:06.190920 21603 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0818 14:09:06.190932 21603 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0818 14:09:06.190948 21603 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0818 14:09:06.191036 21603 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 14:09:06.191227 21603 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0818 14:09:06.191247 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.191257 21603 net.cpp:165] Memory required for data: 384513500\nI0818 14:09:06.191277 21603 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0818 14:09:06.191293 21603 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0818 14:09:06.191304 21603 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0818 14:09:06.191318 21603 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 14:09:06.191339 21603 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0818 14:09:06.191395 21603 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0818 14:09:06.191413 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.191423 21603 net.cpp:165] Memory required for data: 388609500\nI0818 14:09:06.191433 21603 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0818 14:09:06.191452 21603 net.cpp:100] Creating Layer L2_b2_relu\nI0818 14:09:06.191463 21603 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0818 14:09:06.191478 21603 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0818 14:09:06.191495 21603 net.cpp:150] Setting up L2_b2_relu\nI0818 14:09:06.191509 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.191519 21603 net.cpp:165] Memory required for data: 392705500\nI0818 14:09:06.191537 21603 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 14:09:06.191552 21603 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 14:09:06.191563 21603 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0818 14:09:06.191578 21603 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 14:09:06.191598 21603 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 14:09:06.191684 21603 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 14:09:06.191704 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.191717 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.191727 21603 net.cpp:165] Memory required for data: 400897500\nI0818 14:09:06.191738 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0818 14:09:06.191763 21603 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0818 14:09:06.191777 21603 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 14:09:06.191795 21603 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0818 14:09:06.192311 21603 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0818 14:09:06.192330 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.192339 21603 net.cpp:165] Memory required for data: 404993500\nI0818 14:09:06.192358 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0818 14:09:06.192386 21603 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0818 14:09:06.192400 21603 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0818 14:09:06.192425 21603 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0818 14:09:06.192728 21603 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0818 14:09:06.192747 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.192756 21603 net.cpp:165] Memory required for data: 409089500\nI0818 14:09:06.192778 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 14:09:06.192795 21603 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0818 14:09:06.192806 21603 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0818 14:09:06.192821 21603 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.192911 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 14:09:06.193094 21603 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0818 14:09:06.193114 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.193123 21603 net.cpp:165] Memory required for data: 413185500\nI0818 14:09:06.193141 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0818 14:09:06.193157 21603 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0818 14:09:06.193168 21603 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0818 14:09:06.193187 21603 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.193207 21603 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0818 14:09:06.193222 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.193230 21603 net.cpp:165] Memory required for data: 417281500\nI0818 14:09:06.193241 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0818 14:09:06.193274 21603 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0818 14:09:06.193289 21603 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0818 14:09:06.193306 21603 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0818 14:09:06.193825 21603 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0818 14:09:06.193843 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.193853 21603 net.cpp:165] Memory required for data: 421377500\nI0818 14:09:06.193872 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0818 14:09:06.193891 21603 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0818 14:09:06.193907 21603 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0818 14:09:06.193923 21603 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0818 14:09:06.194221 21603 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0818 14:09:06.194242 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.194259 21603 net.cpp:165] Memory required for data: 425473500\nI0818 14:09:06.194283 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 14:09:06.194298 21603 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0818 14:09:06.194311 21603 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0818 14:09:06.194326 21603 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0818 14:09:06.194434 21603 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 14:09:06.194624 21603 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0818 14:09:06.194648 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.194656 21603 net.cpp:165] Memory required for data: 429569500\nI0818 14:09:06.194675 21603 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0818 14:09:06.194692 21603 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0818 14:09:06.194703 21603 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0818 14:09:06.194716 21603 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 14:09:06.194731 21603 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0818 14:09:06.194784 21603 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0818 14:09:06.194802 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.194813 21603 net.cpp:165] Memory required for data: 433665500\nI0818 14:09:06.194823 21603 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0818 14:09:06.194838 21603 net.cpp:100] Creating Layer L2_b3_relu\nI0818 14:09:06.194849 21603 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0818 14:09:06.194867 21603 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0818 14:09:06.194887 21603 net.cpp:150] Setting up L2_b3_relu\nI0818 14:09:06.194902 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.194911 21603 net.cpp:165] Memory required for data: 437761500\nI0818 14:09:06.194921 21603 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 14:09:06.194936 21603 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 14:09:06.194946 21603 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0818 14:09:06.194963 21603 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 14:09:06.194984 21603 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 14:09:06.195065 21603 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 14:09:06.195086 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.195101 21603 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 14:09:06.195111 21603 net.cpp:165] Memory required for data: 445953500\nI0818 14:09:06.195121 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0818 14:09:06.195144 21603 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0818 14:09:06.195158 21603 net.cpp:434] L3_b1_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 14:09:06.195175 21603 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0818 14:09:06.195718 21603 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0818 14:09:06.195737 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.195747 21603 net.cpp:165] Memory required for data: 446977500\nI0818 14:09:06.195766 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0818 14:09:06.195787 21603 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0818 14:09:06.195798 21603 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0818 14:09:06.195816 21603 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0818 14:09:06.196105 21603 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0818 14:09:06.196123 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.196132 21603 net.cpp:165] Memory required for data: 448001500\nI0818 14:09:06.196153 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 14:09:06.196174 21603 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0818 14:09:06.196187 21603 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0818 14:09:06.196213 21603 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.196311 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 14:09:06.196517 21603 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0818 14:09:06.196537 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.196548 21603 net.cpp:165] Memory required for data: 449025500\nI0818 14:09:06.196566 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0818 14:09:06.196588 21603 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0818 14:09:06.196599 21603 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0818 14:09:06.196614 21603 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0818 14:09:06.196632 21603 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0818 14:09:06.196647 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.196657 21603 net.cpp:165] Memory required for data: 450049500\nI0818 14:09:06.196667 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0818 14:09:06.196696 21603 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0818 14:09:06.196709 21603 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0818 14:09:06.196730 21603 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0818 14:09:06.197247 21603 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0818 14:09:06.197266 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.197276 21603 net.cpp:165] Memory required for data: 451073500\nI0818 14:09:06.197294 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0818 14:09:06.197311 21603 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0818 14:09:06.197324 21603 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0818 14:09:06.197345 21603 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0818 14:09:06.197651 21603 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0818 14:09:06.197669 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.197679 21603 net.cpp:165] Memory required for data: 452097500\nI0818 14:09:06.197701 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 14:09:06.197718 21603 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0818 14:09:06.197729 21603 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0818 14:09:06.197744 21603 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0818 14:09:06.197837 21603 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 14:09:06.198031 21603 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0818 14:09:06.198050 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.198060 21603 net.cpp:165] Memory required for data: 453121500\nI0818 14:09:06.198077 21603 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0818 14:09:06.198098 21603 net.cpp:100] Creating Layer L3_b1_pool\nI0818 14:09:06.198110 21603 net.cpp:434] L3_b1_pool <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 14:09:06.198127 21603 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0818 14:09:06.198189 21603 net.cpp:150] Setting up L3_b1_pool\nI0818 14:09:06.198210 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.198220 21603 net.cpp:165] Memory required for data: 454145500\nI0818 14:09:06.198231 21603 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0818 14:09:06.198247 21603 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0818 14:09:06.198258 21603 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0818 14:09:06.198271 21603 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0818 14:09:06.198287 21603 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0818 14:09:06.198348 21603 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0818 14:09:06.198365 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.198382 21603 net.cpp:165] Memory required for data: 455169500\nI0818 14:09:06.198395 21603 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0818 14:09:06.198407 21603 net.cpp:100] Creating Layer L3_b1_relu\nI0818 14:09:06.198418 21603 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0818 14:09:06.198432 21603 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0818 14:09:06.198451 21603 net.cpp:150] Setting up L3_b1_relu\nI0818 14:09:06.198477 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.198487 21603 net.cpp:165] Memory required for data: 456193500\nI0818 14:09:06.198498 21603 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0818 14:09:06.198513 21603 net.cpp:100] Creating Layer L3_b1_zeros\nI0818 14:09:06.198534 21603 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0818 14:09:06.199803 21603 net.cpp:150] Setting up L3_b1_zeros\nI0818 14:09:06.199826 21603 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 14:09:06.199836 21603 net.cpp:165] Memory required for data: 457217500\nI0818 14:09:06.199846 21603 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0818 14:09:06.199867 21603 net.cpp:100] Creating Layer L3_b1_concat0\nI0818 14:09:06.199880 21603 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0818 14:09:06.199893 21603 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0818 14:09:06.199908 21603 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0818 14:09:06.199973 21603 net.cpp:150] Setting up L3_b1_concat0\nI0818 14:09:06.199992 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.200001 21603 net.cpp:165] Memory required for data: 459265500\nI0818 14:09:06.200012 21603 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 14:09:06.200026 21603 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 14:09:06.200037 21603 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0818 14:09:06.200057 21603 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 14:09:06.200078 21603 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 14:09:06.200163 21603 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0818 14:09:06.200186 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.200201 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.200212 21603 net.cpp:165] Memory required for data: 463361500\nI0818 14:09:06.200222 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0818 14:09:06.200244 21603 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0818 14:09:06.200258 21603 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 14:09:06.200276 21603 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0818 14:09:06.201346 21603 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0818 14:09:06.201371 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.201383 21603 net.cpp:165] Memory required for data: 465409500\nI0818 14:09:06.201402 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0818 14:09:06.201424 21603 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0818 14:09:06.201438 21603 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0818 14:09:06.201454 21603 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0818 14:09:06.201772 21603 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0818 14:09:06.201792 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.201802 21603 net.cpp:165] Memory required for data: 467457500\nI0818 14:09:06.201824 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 14:09:06.201848 21603 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0818 14:09:06.201860 21603 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0818 14:09:06.201876 21603 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.201978 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 14:09:06.202169 21603 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0818 14:09:06.202188 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.202198 21603 net.cpp:165] Memory required for data: 469505500\nI0818 14:09:06.202217 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0818 14:09:06.202237 21603 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0818 14:09:06.202250 21603 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0818 14:09:06.202268 21603 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0818 14:09:06.202289 21603 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0818 14:09:06.202313 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.202324 21603 net.cpp:165] Memory required for data: 471553500\nI0818 14:09:06.202335 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0818 14:09:06.202355 21603 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0818 14:09:06.202374 21603 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0818 14:09:06.202399 21603 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0818 14:09:06.203488 21603 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0818 14:09:06.203510 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.203519 21603 net.cpp:165] Memory required for data: 473601500\nI0818 14:09:06.203537 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0818 14:09:06.203560 21603 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0818 14:09:06.203573 21603 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0818 14:09:06.203590 21603 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0818 14:09:06.203889 21603 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0818 14:09:06.203908 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.203917 21603 net.cpp:165] Memory required for data: 475649500\nI0818 14:09:06.203939 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 14:09:06.203956 21603 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0818 14:09:06.203969 21603 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0818 14:09:06.203984 21603 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0818 14:09:06.204078 21603 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 14:09:06.204277 21603 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0818 14:09:06.204299 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.204310 21603 net.cpp:165] Memory required for data: 477697500\nI0818 14:09:06.204329 21603 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0818 14:09:06.204345 21603 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0818 14:09:06.204357 21603 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0818 14:09:06.204378 21603 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 14:09:06.204396 21603 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0818 14:09:06.204457 21603 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0818 14:09:06.204475 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.204484 21603 net.cpp:165] Memory required for data: 479745500\nI0818 14:09:06.204495 21603 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0818 14:09:06.204509 21603 net.cpp:100] Creating Layer L3_b2_relu\nI0818 14:09:06.204520 21603 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0818 14:09:06.204535 21603 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0818 14:09:06.204553 21603 net.cpp:150] Setting up L3_b2_relu\nI0818 14:09:06.204568 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.204577 21603 net.cpp:165] Memory required for data: 481793500\nI0818 14:09:06.204587 21603 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 14:09:06.204607 21603 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 14:09:06.204618 21603 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0818 14:09:06.204633 21603 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 14:09:06.204653 21603 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 14:09:06.204733 21603 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 14:09:06.204756 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.204771 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.204782 21603 net.cpp:165] Memory required for data: 485889500\nI0818 14:09:06.204792 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0818 14:09:06.204812 21603 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0818 14:09:06.204825 21603 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 14:09:06.204852 21603 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0818 14:09:06.205950 21603 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0818 14:09:06.205971 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.205981 21603 net.cpp:165] Memory required for data: 487937500\nI0818 14:09:06.205998 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0818 14:09:06.206022 21603 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0818 14:09:06.206033 21603 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0818 14:09:06.206050 21603 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0818 14:09:06.206356 21603 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0818 14:09:06.206380 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.206392 21603 net.cpp:165] Memory required for data: 489985500\nI0818 14:09:06.206413 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 14:09:06.206434 21603 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0818 14:09:06.206446 21603 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0818 14:09:06.206466 21603 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.206564 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 14:09:06.206761 21603 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0818 14:09:06.206781 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.206791 21603 net.cpp:165] Memory required for data: 492033500\nI0818 14:09:06.206809 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0818 14:09:06.206825 21603 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0818 14:09:06.206836 21603 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0818 14:09:06.206856 21603 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0818 14:09:06.206876 21603 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0818 14:09:06.206889 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.206900 21603 net.cpp:165] Memory required for data: 494081500\nI0818 14:09:06.206910 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0818 14:09:06.206929 21603 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0818 14:09:06.206941 21603 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0818 14:09:06.206964 21603 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0818 14:09:06.208998 21603 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0818 14:09:06.209020 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.209030 21603 net.cpp:165] Memory required for data: 496129500\nI0818 14:09:06.209048 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0818 14:09:06.209070 21603 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0818 14:09:06.209084 21603 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0818 14:09:06.209100 21603 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0818 14:09:06.209417 21603 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0818 14:09:06.209437 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.209447 21603 net.cpp:165] Memory required for data: 498177500\nI0818 14:09:06.209504 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 14:09:06.209524 21603 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0818 14:09:06.209542 21603 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0818 14:09:06.209558 21603 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0818 14:09:06.209646 21603 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 14:09:06.209837 21603 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0818 14:09:06.209856 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.209867 21603 net.cpp:165] Memory required for data: 500225500\nI0818 14:09:06.209885 21603 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0818 14:09:06.209906 21603 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0818 14:09:06.209918 21603 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0818 14:09:06.209933 21603 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 14:09:06.209949 21603 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0818 14:09:06.210018 21603 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0818 14:09:06.210036 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.210047 21603 net.cpp:165] Memory required for data: 502273500\nI0818 14:09:06.210057 21603 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0818 14:09:06.210072 21603 net.cpp:100] Creating Layer L3_b3_relu\nI0818 14:09:06.210083 21603 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0818 14:09:06.210101 21603 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0818 14:09:06.210121 21603 net.cpp:150] Setting up L3_b3_relu\nI0818 14:09:06.210137 21603 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 14:09:06.210146 21603 net.cpp:165] Memory required for data: 504321500\nI0818 14:09:06.210155 21603 layer_factory.hpp:77] Creating layer post_pool\nI0818 14:09:06.210171 21603 net.cpp:100] Creating Layer post_pool\nI0818 14:09:06.210183 21603 net.cpp:434] post_pool <- L3_b3_sum_eltwise_top\nI0818 14:09:06.210198 21603 net.cpp:408] post_pool -> post_pool\nI0818 14:09:06.210256 21603 net.cpp:150] Setting up post_pool\nI0818 14:09:06.210274 21603 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0818 14:09:06.210283 21603 net.cpp:165] Memory required for data: 504353500\nI0818 14:09:06.210294 21603 layer_factory.hpp:77] Creating layer post_FC\nI0818 14:09:06.210316 21603 net.cpp:100] Creating Layer post_FC\nI0818 14:09:06.210328 21603 net.cpp:434] post_FC <- post_pool\nI0818 14:09:06.210345 21603 net.cpp:408] post_FC -> post_FC_top\nI0818 14:09:06.210572 21603 net.cpp:150] Setting up post_FC\nI0818 14:09:06.210598 21603 net.cpp:157] Top shape: 125 10 (1250)\nI0818 14:09:06.210608 21603 net.cpp:165] Memory required for data: 504358500\nI0818 14:09:06.210623 21603 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0818 14:09:06.210638 21603 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0818 14:09:06.210646 21603 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0818 14:09:06.210655 21603 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0818 14:09:06.210665 21603 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0818 14:09:06.210721 21603 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0818 14:09:06.210731 21603 net.cpp:157] Top shape: 125 10 (1250)\nI0818 14:09:06.210736 21603 net.cpp:157] Top shape: 125 10 (1250)\nI0818 14:09:06.210741 21603 net.cpp:165] Memory required for data: 504368500\nI0818 14:09:06.210747 21603 layer_factory.hpp:77] Creating layer accuracy\nI0818 14:09:06.210754 21603 net.cpp:100] Creating Layer accuracy\nI0818 14:09:06.210760 21603 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0818 14:09:06.210767 21603 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 14:09:06.210777 21603 net.cpp:408] accuracy -> accuracy\nI0818 14:09:06.210790 21603 net.cpp:150] Setting up accuracy\nI0818 14:09:06.210798 21603 net.cpp:157] Top shape: (1)\nI0818 14:09:06.210803 21603 net.cpp:165] Memory required for data: 504368504\nI0818 14:09:06.210806 21603 layer_factory.hpp:77] Creating layer loss\nI0818 14:09:06.210814 21603 net.cpp:100] Creating Layer loss\nI0818 14:09:06.210819 21603 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0818 14:09:06.210826 21603 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 14:09:06.210834 21603 net.cpp:408] loss -> loss\nI0818 14:09:06.210844 21603 layer_factory.hpp:77] Creating layer loss\nI0818 14:09:06.210958 21603 net.cpp:150] Setting up loss\nI0818 14:09:06.210971 21603 net.cpp:157] Top shape: (1)\nI0818 14:09:06.210976 21603 net.cpp:160]     with loss weight 1\nI0818 14:09:06.210989 21603 net.cpp:165] Memory required for data: 504368508\nI0818 14:09:06.210996 21603 net.cpp:226] loss needs backward computation.\nI0818 14:09:06.211001 21603 net.cpp:228] accuracy does not need backward computation.\nI0818 14:09:06.211007 21603 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0818 14:09:06.211014 21603 net.cpp:226] post_FC needs backward computation.\nI0818 14:09:06.211019 21603 net.cpp:226] post_pool needs backward computation.\nI0818 14:09:06.211024 21603 net.cpp:226] L3_b3_relu needs backward computation.\nI0818 14:09:06.211035 21603 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0818 14:09:06.211041 21603 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0818 14:09:06.211046 21603 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0818 14:09:06.211051 21603 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0818 14:09:06.211056 21603 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0818 14:09:06.211061 21603 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0818 14:09:06.211066 21603 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0818 14:09:06.211071 21603 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0818 14:09:06.211076 21603 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0818 14:09:06.211082 21603 net.cpp:226] L3_b2_relu needs backward computation.\nI0818 14:09:06.211087 21603 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0818 14:09:06.211092 21603 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0818 14:09:06.211097 21603 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0818 14:09:06.211103 21603 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0818 14:09:06.211108 21603 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0818 14:09:06.211113 21603 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0818 14:09:06.211118 21603 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0818 14:09:06.211124 21603 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0818 14:09:06.211129 21603 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0818 14:09:06.211135 21603 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0818 14:09:06.211141 21603 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0818 14:09:06.211146 21603 net.cpp:226] L3_b1_relu needs backward computation.\nI0818 14:09:06.211151 21603 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0818 14:09:06.211156 21603 net.cpp:226] L3_b1_pool needs backward computation.\nI0818 14:09:06.211163 21603 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0818 14:09:06.211168 21603 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0818 14:09:06.211172 21603 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0818 14:09:06.211177 21603 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0818 14:09:06.211182 21603 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0818 14:09:06.211187 21603 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0818 14:09:06.211192 21603 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0818 14:09:06.211197 21603 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0818 14:09:06.211203 21603 net.cpp:226] L2_b3_relu needs backward computation.\nI0818 14:09:06.211208 21603 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0818 14:09:06.211213 21603 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0818 14:09:06.211218 21603 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0818 14:09:06.211225 21603 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0818 14:09:06.211230 21603 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0818 14:09:06.211233 21603 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0818 14:09:06.211238 21603 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0818 14:09:06.211244 21603 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0818 14:09:06.211249 21603 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0818 14:09:06.211254 21603 net.cpp:226] L2_b2_relu needs backward computation.\nI0818 14:09:06.211259 21603 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0818 14:09:06.211266 21603 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0818 14:09:06.211271 21603 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0818 14:09:06.211277 21603 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0818 14:09:06.211287 21603 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0818 14:09:06.211292 21603 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0818 14:09:06.211297 21603 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0818 14:09:06.211302 21603 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0818 14:09:06.211308 21603 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0818 14:09:06.211313 21603 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0818 14:09:06.211319 21603 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0818 14:09:06.211324 21603 net.cpp:226] L2_b1_relu needs backward computation.\nI0818 14:09:06.211329 21603 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0818 14:09:06.211335 21603 net.cpp:226] L2_b1_pool needs backward computation.\nI0818 14:09:06.211340 21603 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0818 14:09:06.211345 21603 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0818 14:09:06.211351 21603 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0818 14:09:06.211356 21603 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0818 14:09:06.211361 21603 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0818 14:09:06.211366 21603 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0818 14:09:06.211380 21603 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0818 14:09:06.211386 21603 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0818 14:09:06.211391 21603 net.cpp:226] L1_b3_relu needs backward computation.\nI0818 14:09:06.211397 21603 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0818 14:09:06.211406 21603 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0818 14:09:06.211411 21603 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0818 14:09:06.211417 21603 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0818 14:09:06.211423 21603 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0818 14:09:06.211431 21603 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0818 14:09:06.211436 21603 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0818 14:09:06.211442 21603 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0818 14:09:06.211447 21603 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0818 14:09:06.211453 21603 net.cpp:226] L1_b2_relu needs backward computation.\nI0818 14:09:06.211458 21603 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0818 14:09:06.211464 21603 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0818 14:09:06.211469 21603 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0818 14:09:06.211475 21603 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0818 14:09:06.211480 21603 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0818 14:09:06.211486 21603 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0818 14:09:06.211491 21603 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0818 14:09:06.211496 21603 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0818 14:09:06.211503 21603 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0818 14:09:06.211508 21603 net.cpp:226] L1_b1_relu needs backward computation.\nI0818 14:09:06.211513 21603 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0818 14:09:06.211519 21603 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0818 14:09:06.211525 21603 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0818 14:09:06.211530 21603 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0818 14:09:06.211536 21603 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0818 14:09:06.211541 21603 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0818 14:09:06.211547 21603 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0818 14:09:06.211552 21603 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0818 14:09:06.211563 21603 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0818 14:09:06.211570 21603 net.cpp:226] pre_relu needs backward computation.\nI0818 14:09:06.211575 21603 net.cpp:226] pre_scale needs backward computation.\nI0818 14:09:06.211580 21603 net.cpp:226] pre_bn needs backward computation.\nI0818 14:09:06.211585 21603 net.cpp:226] pre_conv needs backward computation.\nI0818 14:09:06.211592 21603 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 14:09:06.211598 21603 net.cpp:228] dataLayer does not need backward computation.\nI0818 14:09:06.211603 21603 net.cpp:270] This network produces output accuracy\nI0818 14:09:06.211609 21603 net.cpp:270] This network produces output loss\nI0818 14:09:06.211724 21603 net.cpp:283] Network initialization done.\nI0818 14:09:06.212075 21603 solver.cpp:60] Solver scaffolding done.\nI0818 14:09:06.421891 21603 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0818 14:09:06.719238 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:06.719285 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:06.724687 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:06.932452 21603 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 14:09:06.932565 21603 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 14:09:06.944139 21603 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 14:09:06.944244 21603 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 14:09:07.278100 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:07.278162 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:07.284690 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:07.490373 21603 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 14:09:07.490511 21603 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 14:09:07.507522 21603 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 14:09:07.507647 21603 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 14:09:07.874488 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:07.874536 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:07.881906 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:08.095749 21603 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 14:09:08.095898 21603 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 14:09:08.118767 21603 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 14:09:08.118911 21603 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 14:09:08.139174 21603 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0818 14:09:08.494756 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:08.494818 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:08.502964 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:08.724486 21603 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 14:09:08.724664 21603 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 14:09:08.754343 21603 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 14:09:08.754520 21603 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 14:09:09.171197 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:09.171257 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:09.180362 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:09.412989 21603 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 14:09:09.413158 21603 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 14:09:09.449429 21603 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 14:09:09.449589 21603 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 14:09:09.885928 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:09.885977 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:09.895764 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:10.144246 21603 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 14:09:10.144471 21603 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 14:09:10.186735 21603 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 14:09:10.186949 21603 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 14:09:10.656193 21603 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 14:09:10.656242 21603 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 14:09:10.666666 21603 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 14:09:10.718538 21630 blocking_queue.cpp:50] Waiting for data\nI0818 14:09:10.772424 21630 blocking_queue.cpp:50] Waiting for data\nI0818 14:09:10.979281 21603 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 14:09:10.979498 21603 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 14:09:11.027990 21603 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 14:09:11.028197 21603 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 14:09:11.072477 21603 parallel.cpp:425] Starting Optimization\nI0818 14:09:11.074380 21603 solver.cpp:279] Solving Cifar-Resnet\nI0818 14:09:11.074398 21603 solver.cpp:280] Learning Rate Policy: triangular\nI0818 14:09:11.076261 21603 solver.cpp:337] Iteration 0, Testing net (#0)\nI0818 14:09:37.218925 21603 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI0818 14:09:37.219120 21603 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0818 14:09:38.724869 21603 solver.cpp:228] Iteration 0, loss = 2.58301\nI0818 14:09:38.724921 21603 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0818 14:09:38.724946 21603 solver.cpp:244]     Train net output #1: loss = 2.58301 (* 1 = 2.58301 loss)\nI0818 14:09:38.801800 21603 sgd_solver.cpp:166] Iteration 0, lr = 0\nI0818 14:10:26.161713 21603 solver.cpp:337] Iteration 100, Testing net (#0)\nI0818 14:10:52.450045 21603 solver.cpp:404]     Test net output #0: accuracy = 0.25768\nI0818 14:10:52.450124 21603 solver.cpp:404]     Test net output #1: loss = 2.06441 (* 1 = 2.06441 loss)\nI0818 14:10:52.862208 21603 solver.cpp:228] Iteration 100, loss = 2.05301\nI0818 14:10:52.862269 21603 solver.cpp:244]     Train net output #0: accuracy = 0.264\nI0818 14:10:52.862295 21603 solver.cpp:244]     Train net output #1: loss = 2.05301 (* 1 = 2.05301 loss)\nI0818 14:10:52.960136 21603 sgd_solver.cpp:166] Iteration 100, lr = 0.00250006\nI0818 14:11:40.255156 21603 solver.cpp:337] Iteration 200, Testing net (#0)\nI0818 14:12:06.508538 21603 solver.cpp:404]     Test net output #0: accuracy = 0.38304\nI0818 14:12:06.508610 21603 solver.cpp:404]     Test net output #1: loss = 1.66418 (* 1 = 1.66418 loss)\nI0818 14:12:06.922766 21603 solver.cpp:228] Iteration 200, loss = 1.61601\nI0818 14:12:06.922823 21603 solver.cpp:244]     Train net output #0: accuracy = 0.44\nI0818 14:12:06.922850 21603 solver.cpp:244]     Train net output #1: loss = 1.61601 (* 1 = 1.61601 loss)\nI0818 14:12:07.018087 21603 sgd_solver.cpp:166] Iteration 200, lr = 0.005\nI0818 14:12:54.296425 21603 solver.cpp:337] Iteration 300, Testing net (#0)\nI0818 14:13:20.663298 21603 solver.cpp:404]     Test net output #0: accuracy = 0.446\nI0818 14:13:20.663386 21603 solver.cpp:404]     Test net output #1: loss = 1.49254 (* 1 = 1.49254 loss)\nI0818 14:13:21.075233 21603 solver.cpp:228] Iteration 300, loss = 1.43741\nI0818 14:13:21.075286 21603 solver.cpp:244]     Train net output #0: accuracy = 0.488\nI0818 14:13:21.075305 21603 solver.cpp:244]     Train net output #1: loss = 1.43741 (* 1 = 1.43741 loss)\nI0818 14:13:21.169420 21603 sgd_solver.cpp:166] Iteration 300, lr = 0.00750005\nI0818 14:14:08.396771 21603 solver.cpp:337] Iteration 400, Testing net (#0)\nI0818 14:14:34.666817 21603 solver.cpp:404]     Test net output #0: accuracy = 0.4696\nI0818 14:14:34.666890 21603 solver.cpp:404]     Test net output #1: loss = 1.49524 (* 1 = 1.49524 loss)\nI0818 14:14:35.080046 21603 solver.cpp:228] Iteration 400, loss = 1.20553\nI0818 14:14:35.080104 21603 solver.cpp:244]     Train net output #0: accuracy = 0.544\nI0818 14:14:35.080123 21603 solver.cpp:244]     Train net output #1: loss = 1.20553 (* 1 = 1.20553 loss)\nI0818 14:14:35.168097 21603 sgd_solver.cpp:166] Iteration 400, lr = 0.00999999\nI0818 14:15:22.531381 21603 solver.cpp:337] Iteration 500, Testing net (#0)\nI0818 14:15:48.802954 21603 solver.cpp:404]     Test net output #0: accuracy = 0.53868\nI0818 14:15:48.803027 21603 solver.cpp:404]     Test net output #1: loss = 1.28565 (* 1 = 1.28565 loss)\nI0818 14:15:49.214665 21603 solver.cpp:228] Iteration 500, loss = 1.10651\nI0818 14:15:49.214722 21603 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0818 14:15:49.214741 21603 solver.cpp:244]     Train net output #1: loss = 1.10651 (* 1 = 1.10651 loss)\nI0818 14:15:49.304122 21603 sgd_solver.cpp:166] Iteration 500, lr = 0.0125\nI0818 14:16:36.775369 21603 solver.cpp:337] Iteration 600, Testing net (#0)\nI0818 14:17:03.048637 21603 solver.cpp:404]     Test net output #0: accuracy = 0.60756\nI0818 14:17:03.048708 21603 solver.cpp:404]     Test net output #1: loss = 1.08873 (* 1 = 1.08873 loss)\nI0818 14:17:03.460783 21603 solver.cpp:228] Iteration 600, loss = 0.959996\nI0818 14:17:03.460839 21603 solver.cpp:244]     Train net output #0: accuracy = 0.608\nI0818 14:17:03.460855 21603 solver.cpp:244]     Train net output #1: loss = 0.959996 (* 1 = 0.959996 loss)\nI0818 14:17:03.549377 21603 sgd_solver.cpp:166] Iteration 600, lr = 0.015\nI0818 14:17:50.986476 21603 solver.cpp:337] Iteration 700, Testing net (#0)\nI0818 14:18:17.254493 21603 solver.cpp:404]     Test net output #0: accuracy = 0.63088\nI0818 14:18:17.254562 21603 solver.cpp:404]     Test net output #1: loss = 1.03563 (* 1 = 1.03563 loss)\nI0818 14:18:17.666618 21603 solver.cpp:228] Iteration 700, loss = 0.892115\nI0818 14:18:17.666674 21603 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0818 14:18:17.666692 21603 solver.cpp:244]     Train net output #1: loss = 0.892115 (* 1 = 0.892115 loss)\nI0818 14:18:17.757194 21603 sgd_solver.cpp:166] Iteration 700, lr = 0.0175\nI0818 14:19:05.194885 21603 solver.cpp:337] Iteration 800, Testing net (#0)\nI0818 14:19:31.458818 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6554\nI0818 14:19:31.458884 21603 solver.cpp:404]     Test net output #1: loss = 0.968635 (* 1 = 0.968635 loss)\nI0818 14:19:31.871749 21603 solver.cpp:228] Iteration 800, loss = 0.83916\nI0818 14:19:31.871805 21603 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0818 14:19:31.871822 21603 solver.cpp:244]     Train net output #1: loss = 0.83916 (* 1 = 0.83916 loss)\nI0818 14:19:31.961365 21603 sgd_solver.cpp:166] Iteration 800, lr = 0.02\nI0818 14:20:19.212849 21603 solver.cpp:337] Iteration 900, Testing net (#0)\nI0818 14:20:45.483968 21603 solver.cpp:404]     Test net output #0: accuracy = 0.64864\nI0818 14:20:45.484036 21603 solver.cpp:404]     Test net output #1: loss = 0.993659 (* 1 = 0.993659 loss)\nI0818 14:20:45.897186 21603 solver.cpp:228] Iteration 900, loss = 0.713025\nI0818 14:20:45.897243 21603 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0818 14:20:45.897258 21603 solver.cpp:244]     Train net output #1: loss = 0.713025 (* 1 = 0.713025 loss)\nI0818 14:20:45.989502 21603 sgd_solver.cpp:166] Iteration 900, lr = 0.0225\nI0818 14:21:33.388620 21603 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0818 14:21:59.660495 21603 solver.cpp:404]     Test net output #0: accuracy = 0.65324\nI0818 14:21:59.660563 21603 solver.cpp:404]     Test net output #1: loss = 0.988789 (* 1 = 0.988789 loss)\nI0818 14:22:00.073771 21603 solver.cpp:228] Iteration 1000, loss = 0.706922\nI0818 14:22:00.073829 21603 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0818 14:22:00.073846 21603 solver.cpp:244]     Train net output #1: loss = 0.706922 (* 1 = 0.706922 loss)\nI0818 14:22:00.168853 21603 sgd_solver.cpp:166] Iteration 1000, lr = 0.025\nI0818 14:22:47.554607 21603 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0818 14:23:13.823832 21603 solver.cpp:404]     Test net output #0: accuracy = 0.54008\nI0818 14:23:13.823899 21603 solver.cpp:404]     Test net output #1: loss = 1.48464 (* 1 = 1.48464 loss)\nI0818 14:23:14.237005 21603 solver.cpp:228] Iteration 1100, loss = 0.677125\nI0818 14:23:14.237061 21603 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0818 14:23:14.237076 21603 solver.cpp:244]     Train net output #1: loss = 0.677125 (* 1 = 0.677125 loss)\nI0818 14:23:14.325987 21603 sgd_solver.cpp:166] Iteration 1100, lr = 0.0275\nI0818 14:24:01.755434 21603 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0818 14:24:28.027251 21603 solver.cpp:404]     Test net output #0: accuracy = 0.61744\nI0818 14:24:28.027324 21603 solver.cpp:404]     Test net output #1: loss = 1.19548 (* 1 = 1.19548 loss)\nI0818 14:24:28.439196 21603 solver.cpp:228] Iteration 1200, loss = 0.670781\nI0818 14:24:28.439254 21603 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0818 14:24:28.439278 21603 solver.cpp:244]     Train net output #1: loss = 0.670781 (* 1 = 0.670781 loss)\nI0818 14:24:28.526392 21603 sgd_solver.cpp:166] Iteration 1200, lr = 0.03\nI0818 14:25:16.015060 21603 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0818 14:25:42.290769 21603 solver.cpp:404]     Test net output #0: accuracy = 0.625\nI0818 14:25:42.290837 21603 solver.cpp:404]     Test net output #1: loss = 1.18797 (* 1 = 1.18797 loss)\nI0818 14:25:42.704620 21603 solver.cpp:228] Iteration 1300, loss = 0.59815\nI0818 14:25:42.704675 21603 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0818 14:25:42.704691 21603 solver.cpp:244]     Train net output #1: loss = 0.59815 (* 1 = 0.59815 loss)\nI0818 14:25:42.792289 21603 sgd_solver.cpp:166] Iteration 1300, lr = 0.0325\nI0818 14:26:30.257799 21603 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0818 14:26:56.527957 21603 solver.cpp:404]     Test net output #0: accuracy = 0.55372\nI0818 14:26:56.528025 21603 solver.cpp:404]     Test net output #1: loss = 1.65536 (* 1 = 1.65536 loss)\nI0818 14:26:56.941769 21603 solver.cpp:228] Iteration 1400, loss = 0.583184\nI0818 14:26:56.941824 21603 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0818 14:26:56.941841 21603 solver.cpp:244]     Train net output #1: loss = 0.583184 (* 1 = 0.583184 loss)\nI0818 14:26:57.032481 21603 sgd_solver.cpp:166] Iteration 1400, lr = 0.035\nI0818 14:27:44.482739 21603 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0818 14:28:10.753528 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70364\nI0818 14:28:10.753597 21603 solver.cpp:404]     Test net output #1: loss = 0.890163 (* 1 = 0.890163 loss)\nI0818 14:28:11.166952 21603 solver.cpp:228] Iteration 1500, loss = 0.498153\nI0818 14:28:11.166995 21603 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 14:28:11.167008 21603 solver.cpp:244]     Train net output #1: loss = 0.498153 (* 1 = 0.498153 loss)\nI0818 14:28:11.267335 21603 sgd_solver.cpp:166] Iteration 1500, lr = 0.0375\nI0818 14:28:58.777137 21603 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0818 14:29:25.046013 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66508\nI0818 14:29:25.046080 21603 solver.cpp:404]     Test net output #1: loss = 1.08467 (* 1 = 1.08467 loss)\nI0818 14:29:25.459702 21603 solver.cpp:228] Iteration 1600, loss = 0.383333\nI0818 14:29:25.459746 21603 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0818 14:29:25.459761 21603 solver.cpp:244]     Train net output #1: loss = 0.383333 (* 1 = 0.383333 loss)\nI0818 14:29:25.547977 21603 sgd_solver.cpp:166] Iteration 1600, lr = 0.04\nI0818 14:30:13.058028 21603 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0818 14:30:39.325212 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68968\nI0818 14:30:39.325281 21603 solver.cpp:404]     Test net output #1: loss = 0.954247 (* 1 = 0.954247 loss)\nI0818 14:30:39.739099 21603 solver.cpp:228] Iteration 1700, loss = 0.443106\nI0818 14:30:39.739140 21603 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 14:30:39.739156 21603 solver.cpp:244]     Train net output #1: loss = 0.443106 (* 1 = 0.443106 loss)\nI0818 14:30:39.823990 21603 sgd_solver.cpp:166] Iteration 1700, lr = 0.0425\nI0818 14:31:27.257570 21603 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0818 14:31:53.524972 21603 solver.cpp:404]     Test net output #0: accuracy = 0.62844\nI0818 14:31:53.525043 21603 solver.cpp:404]     Test net output #1: loss = 1.28668 (* 1 = 1.28668 loss)\nI0818 14:31:53.938570 21603 solver.cpp:228] Iteration 1800, loss = 0.405144\nI0818 14:31:53.938611 21603 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 14:31:53.938627 21603 solver.cpp:244]     Train net output #1: loss = 0.405144 (* 1 = 0.405144 loss)\nI0818 14:31:54.030777 21603 sgd_solver.cpp:166] Iteration 1800, lr = 0.045\nI0818 14:32:41.460680 21603 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0818 14:33:07.729931 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71352\nI0818 14:33:07.730002 21603 solver.cpp:404]     Test net output #1: loss = 0.891176 (* 1 = 0.891176 loss)\nI0818 14:33:08.142066 21603 solver.cpp:228] Iteration 1900, loss = 0.371067\nI0818 14:33:08.142107 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 14:33:08.142122 21603 solver.cpp:244]     Train net output #1: loss = 0.371067 (* 1 = 0.371067 loss)\nI0818 14:33:08.233569 21603 sgd_solver.cpp:166] Iteration 1900, lr = 0.0475\nI0818 14:33:55.782213 21603 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0818 14:34:22.055651 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69428\nI0818 14:34:22.055718 21603 solver.cpp:404]     Test net output #1: loss = 0.989118 (* 1 = 0.989118 loss)\nI0818 14:34:22.469213 21603 solver.cpp:228] Iteration 2000, loss = 0.325086\nI0818 14:34:22.469255 21603 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 14:34:22.469275 21603 solver.cpp:244]     Train net output #1: loss = 0.325086 (* 1 = 0.325086 loss)\nI0818 14:34:22.567147 21603 sgd_solver.cpp:166] Iteration 2000, lr = 0.05\nI0818 14:39:59.604238 21603 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0818 14:40:25.908879 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66372\nI0818 14:40:25.910812 21603 solver.cpp:404]     Test net output #1: loss = 1.32507 (* 1 = 1.32507 loss)\nI0818 14:40:26.324790 21603 solver.cpp:228] Iteration 2100, loss = 0.363937\nI0818 14:40:26.324826 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 14:40:26.324841 21603 solver.cpp:244]     Train net output #1: loss = 0.363937 (* 1 = 0.363937 loss)\nI0818 14:40:26.417480 21603 sgd_solver.cpp:166] Iteration 2100, lr = 0.0525\nI0818 14:41:13.728943 21603 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0818 14:41:40.009372 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7078\nI0818 14:41:40.009433 21603 solver.cpp:404]     Test net output #1: loss = 1.04862 (* 1 = 1.04862 loss)\nI0818 14:41:40.422724 21603 solver.cpp:228] Iteration 2200, loss = 0.296782\nI0818 14:41:40.422761 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 14:41:40.422777 21603 solver.cpp:244]     Train net output #1: loss = 0.296782 (* 1 = 0.296782 loss)\nI0818 14:41:40.517622 21603 sgd_solver.cpp:166] Iteration 2200, lr = 0.0549999\nI0818 14:42:27.768705 21603 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0818 14:42:54.042824 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66412\nI0818 14:42:54.042886 21603 solver.cpp:404]     Test net output #1: loss = 1.16769 (* 1 = 1.16769 loss)\nI0818 14:42:54.456215 21603 solver.cpp:228] Iteration 2300, loss = 0.285923\nI0818 14:42:54.456255 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 14:42:54.456270 21603 solver.cpp:244]     Train net output #1: loss = 0.285923 (* 1 = 0.285923 loss)\nI0818 14:42:54.568408 21603 sgd_solver.cpp:166] Iteration 2300, lr = 0.0575\nI0818 14:43:56.162477 21603 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0818 14:44:22.444353 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69696\nI0818 14:44:22.444489 21603 solver.cpp:404]     Test net output #1: loss = 1.0941 (* 1 = 1.0941 loss)\nI0818 14:44:22.858361 21603 solver.cpp:228] Iteration 2400, loss = 0.382625\nI0818 14:44:22.858402 21603 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 14:44:22.858417 21603 solver.cpp:244]     Train net output #1: loss = 0.382625 (* 1 = 0.382625 loss)\nI0818 14:44:22.945329 21603 sgd_solver.cpp:166] Iteration 2400, lr = 0.0599999\nI0818 14:45:10.299561 21603 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0818 14:45:36.578585 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73368\nI0818 14:45:36.578649 21603 solver.cpp:404]     Test net output #1: loss = 0.917678 (* 1 = 0.917678 loss)\nI0818 14:45:36.992388 21603 solver.cpp:228] Iteration 2500, loss = 0.294015\nI0818 14:45:36.992430 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 14:45:36.992445 21603 solver.cpp:244]     Train net output #1: loss = 0.294015 (* 1 = 0.294015 loss)\nI0818 14:45:37.081773 21603 sgd_solver.cpp:166] Iteration 2500, lr = 0.0625\nI0818 14:46:24.443733 21603 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0818 14:46:50.728425 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72168\nI0818 14:46:50.728488 21603 solver.cpp:404]     Test net output #1: loss = 0.943366 (* 1 = 0.943366 loss)\nI0818 14:46:51.142243 21603 solver.cpp:228] Iteration 2600, loss = 0.239099\nI0818 14:46:51.142287 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 14:46:51.142302 21603 solver.cpp:244]     Train net output #1: loss = 0.239099 (* 1 = 0.239099 loss)\nI0818 14:46:51.234043 21603 sgd_solver.cpp:166] Iteration 2600, lr = 0.0650001\nI0818 14:47:38.597195 21603 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0818 14:48:04.879339 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6854\nI0818 14:48:04.879402 21603 solver.cpp:404]     Test net output #1: loss = 1.19976 (* 1 = 1.19976 loss)\nI0818 14:48:05.293210 21603 solver.cpp:228] Iteration 2700, loss = 0.272174\nI0818 14:48:05.293259 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 14:48:05.293275 21603 solver.cpp:244]     Train net output #1: loss = 0.272174 (* 1 = 0.272174 loss)\nI0818 14:48:05.387643 21603 sgd_solver.cpp:166] Iteration 2700, lr = 0.0675\nI0818 14:48:52.782310 21603 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0818 14:49:19.065961 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74012\nI0818 14:49:19.066026 21603 solver.cpp:404]     Test net output #1: loss = 0.90316 (* 1 = 0.90316 loss)\nI0818 14:49:19.479857 21603 solver.cpp:228] Iteration 2800, loss = 0.248149\nI0818 14:49:19.479908 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 14:49:19.479925 21603 solver.cpp:244]     Train net output #1: loss = 0.248149 (* 1 = 0.248149 loss)\nI0818 14:49:19.567538 21603 sgd_solver.cpp:166] Iteration 2800, lr = 0.0700001\nI0818 14:50:06.790006 21603 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0818 14:50:33.071516 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76204\nI0818 14:50:33.071584 21603 solver.cpp:404]     Test net output #1: loss = 0.882606 (* 1 = 0.882606 loss)\nI0818 14:50:33.485116 21603 solver.cpp:228] Iteration 2900, loss = 0.185854\nI0818 14:50:33.485168 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 14:50:33.485186 21603 solver.cpp:244]     Train net output #1: loss = 0.185854 (* 1 = 0.185854 loss)\nI0818 14:50:33.575640 21603 sgd_solver.cpp:166] Iteration 2900, lr = 0.0725\nI0818 14:51:20.836989 21603 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0818 14:51:47.121175 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78396\nI0818 14:51:47.121240 21603 solver.cpp:404]     Test net output #1: loss = 0.741341 (* 1 = 0.741341 loss)\nI0818 14:51:47.533917 21603 solver.cpp:228] Iteration 3000, loss = 0.291262\nI0818 14:51:47.533965 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 14:51:47.533983 21603 solver.cpp:244]     Train net output #1: loss = 0.291262 (* 1 = 0.291262 loss)\nI0818 14:51:47.626027 21603 sgd_solver.cpp:166] Iteration 3000, lr = 0.075\nI0818 14:52:34.716414 21603 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0818 14:53:01.000715 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70788\nI0818 14:53:01.000784 21603 solver.cpp:404]     Test net output #1: loss = 1.13178 (* 1 = 1.13178 loss)\nI0818 14:53:01.414463 21603 solver.cpp:228] Iteration 3100, loss = 0.253251\nI0818 14:53:01.414513 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 14:53:01.414530 21603 solver.cpp:244]     Train net output #1: loss = 0.253251 (* 1 = 0.253251 loss)\nI0818 14:53:01.504806 21603 sgd_solver.cpp:166] Iteration 3100, lr = 0.0775\nI0818 14:53:48.794389 21603 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0818 14:54:15.078222 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72512\nI0818 14:54:15.078286 21603 solver.cpp:404]     Test net output #1: loss = 1.09302 (* 1 = 1.09302 loss)\nI0818 14:54:15.492120 21603 solver.cpp:228] Iteration 3200, loss = 0.19967\nI0818 14:54:15.492171 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 14:54:15.492188 21603 solver.cpp:244]     Train net output #1: loss = 0.19967 (* 1 = 0.19967 loss)\nI0818 14:54:15.582015 21603 sgd_solver.cpp:166] Iteration 3200, lr = 0.08\nI0818 14:55:02.834700 21603 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0818 14:55:29.119225 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77272\nI0818 14:55:29.119287 21603 solver.cpp:404]     Test net output #1: loss = 0.821085 (* 1 = 0.821085 loss)\nI0818 14:55:29.533251 21603 solver.cpp:228] Iteration 3300, loss = 0.217833\nI0818 14:55:29.533301 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 14:55:29.533318 21603 solver.cpp:244]     Train net output #1: loss = 0.217833 (* 1 = 0.217833 loss)\nI0818 14:55:29.625296 21603 sgd_solver.cpp:166] Iteration 3300, lr = 0.0825\nI0818 14:56:16.817129 21603 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0818 14:56:43.095512 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7828\nI0818 14:56:43.095579 21603 solver.cpp:404]     Test net output #1: loss = 0.846099 (* 1 = 0.846099 loss)\nI0818 14:56:43.508998 21603 solver.cpp:228] Iteration 3400, loss = 0.167197\nI0818 14:56:43.509048 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 14:56:43.509065 21603 solver.cpp:244]     Train net output #1: loss = 0.167197 (* 1 = 0.167197 loss)\nI0818 14:56:43.603945 21603 sgd_solver.cpp:166] Iteration 3400, lr = 0.085\nI0818 14:57:30.898268 21603 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0818 14:57:57.177423 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76524\nI0818 14:57:57.177490 21603 solver.cpp:404]     Test net output #1: loss = 0.903682 (* 1 = 0.903682 loss)\nI0818 14:57:57.591176 21603 solver.cpp:228] Iteration 3500, loss = 0.241958\nI0818 14:57:57.591228 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 14:57:57.591245 21603 solver.cpp:244]     Train net output #1: loss = 0.241958 (* 1 = 0.241958 loss)\nI0818 14:57:57.687034 21603 sgd_solver.cpp:166] Iteration 3500, lr = 0.0875\nI0818 14:58:44.888562 21603 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0818 14:59:11.174124 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75184\nI0818 14:59:11.174190 21603 solver.cpp:404]     Test net output #1: loss = 0.960807 (* 1 = 0.960807 loss)\nI0818 14:59:11.587751 21603 solver.cpp:228] Iteration 3600, loss = 0.223286\nI0818 14:59:11.587801 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 14:59:11.587818 21603 solver.cpp:244]     Train net output #1: loss = 0.223286 (* 1 = 0.223286 loss)\nI0818 14:59:11.677109 21603 sgd_solver.cpp:166] Iteration 3600, lr = 0.09\nI0818 14:59:58.963224 21603 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0818 15:00:25.240365 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7652\nI0818 15:00:25.240433 21603 solver.cpp:404]     Test net output #1: loss = 0.93077 (* 1 = 0.93077 loss)\nI0818 15:00:25.653836 21603 solver.cpp:228] Iteration 3700, loss = 0.174938\nI0818 15:00:25.653888 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:00:25.653905 21603 solver.cpp:244]     Train net output #1: loss = 0.174938 (* 1 = 0.174938 loss)\nI0818 15:00:25.744087 21603 sgd_solver.cpp:166] Iteration 3700, lr = 0.0925\nI0818 15:01:13.022379 21603 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0818 15:01:39.280721 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76132\nI0818 15:01:39.280769 21603 solver.cpp:404]     Test net output #1: loss = 0.929676 (* 1 = 0.929676 loss)\nI0818 15:01:39.694640 21603 solver.cpp:228] Iteration 3800, loss = 0.187075\nI0818 15:01:39.694682 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 15:01:39.694699 21603 solver.cpp:244]     Train net output #1: loss = 0.187075 (* 1 = 0.187075 loss)\nI0818 15:01:39.784157 21603 sgd_solver.cpp:166] Iteration 3800, lr = 0.095\nI0818 15:02:27.244088 21603 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0818 15:02:53.517663 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7158\nI0818 15:02:53.517709 21603 solver.cpp:404]     Test net output #1: loss = 1.18372 (* 1 = 1.18372 loss)\nI0818 15:02:53.931725 21603 solver.cpp:228] Iteration 3900, loss = 0.127366\nI0818 15:02:53.931766 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:02:53.931780 21603 solver.cpp:244]     Train net output #1: loss = 0.127366 (* 1 = 0.127366 loss)\nI0818 15:02:54.020571 21603 sgd_solver.cpp:166] Iteration 3900, lr = 0.0975\nI0818 15:03:41.391321 21603 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0818 15:04:07.648831 21603 solver.cpp:404]     Test net output #0: accuracy = 0.649\nI0818 15:04:07.648876 21603 solver.cpp:404]     Test net output #1: loss = 1.65934 (* 1 = 1.65934 loss)\nI0818 15:04:08.062500 21603 solver.cpp:228] Iteration 4000, loss = 0.154475\nI0818 15:04:08.062541 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:04:08.062556 21603 solver.cpp:244]     Train net output #1: loss = 0.154475 (* 1 = 0.154475 loss)\nI0818 15:04:08.155964 21603 sgd_solver.cpp:166] Iteration 4000, lr = 0.1\nI0818 15:04:55.557667 21603 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0818 15:05:21.809857 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71476\nI0818 15:05:21.809903 21603 solver.cpp:404]     Test net output #1: loss = 1.26983 (* 1 = 1.26983 loss)\nI0818 15:05:22.223168 21603 solver.cpp:228] Iteration 4100, loss = 0.124366\nI0818 15:05:22.223209 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:05:22.223225 21603 solver.cpp:244]     Train net output #1: loss = 0.124366 (* 1 = 0.124366 loss)\nI0818 15:05:22.314385 21603 sgd_solver.cpp:166] Iteration 4100, lr = 0.1025\nI0818 15:06:09.645722 21603 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0818 15:06:35.817452 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75424\nI0818 15:06:35.817497 21603 solver.cpp:404]     Test net output #1: loss = 1.05324 (* 1 = 1.05324 loss)\nI0818 15:06:36.231151 21603 solver.cpp:228] Iteration 4200, loss = 0.170708\nI0818 15:06:36.231194 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:06:36.231209 21603 solver.cpp:244]     Train net output #1: loss = 0.170708 (* 1 = 0.170708 loss)\nI0818 15:06:36.321445 21603 sgd_solver.cpp:166] Iteration 4200, lr = 0.105\nI0818 15:07:23.203131 21603 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0818 15:07:49.466485 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72\nI0818 15:07:49.466532 21603 solver.cpp:404]     Test net output #1: loss = 1.13373 (* 1 = 1.13373 loss)\nI0818 15:07:49.880622 21603 solver.cpp:228] Iteration 4300, loss = 0.177695\nI0818 15:07:49.880661 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:07:49.880676 21603 solver.cpp:244]     Train net output #1: loss = 0.177695 (* 1 = 0.177695 loss)\nI0818 15:07:49.969247 21603 sgd_solver.cpp:166] Iteration 4300, lr = 0.1075\nI0818 15:08:36.759974 21603 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0818 15:09:02.849484 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7526\nI0818 15:09:02.849529 21603 solver.cpp:404]     Test net output #1: loss = 0.998675 (* 1 = 0.998675 loss)\nI0818 15:09:03.263594 21603 solver.cpp:228] Iteration 4400, loss = 0.087649\nI0818 15:09:03.263639 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:09:03.263654 21603 solver.cpp:244]     Train net output #1: loss = 0.087649 (* 1 = 0.087649 loss)\nI0818 15:09:03.351246 21603 sgd_solver.cpp:166] Iteration 4400, lr = 0.11\nI0818 15:09:50.247685 21603 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0818 15:10:16.487435 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7266\nI0818 15:10:16.487483 21603 solver.cpp:404]     Test net output #1: loss = 1.19681 (* 1 = 1.19681 loss)\nI0818 15:10:16.901077 21603 solver.cpp:228] Iteration 4500, loss = 0.224308\nI0818 15:10:16.901118 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 15:10:16.901134 21603 solver.cpp:244]     Train net output #1: loss = 0.224308 (* 1 = 0.224308 loss)\nI0818 15:10:16.994899 21603 sgd_solver.cpp:166] Iteration 4500, lr = 0.1125\nI0818 15:11:03.740510 21603 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0818 15:11:30.006193 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76892\nI0818 15:11:30.006238 21603 solver.cpp:404]     Test net output #1: loss = 0.974304 (* 1 = 0.974304 loss)\nI0818 15:11:30.420033 21603 solver.cpp:228] Iteration 4600, loss = 0.227664\nI0818 15:11:30.420075 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 15:11:30.420091 21603 solver.cpp:244]     Train net output #1: loss = 0.227664 (* 1 = 0.227664 loss)\nI0818 15:11:30.503362 21603 sgd_solver.cpp:166] Iteration 4600, lr = 0.115\nI0818 15:12:17.395710 21603 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0818 15:12:43.598397 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7516\nI0818 15:12:43.598443 21603 solver.cpp:404]     Test net output #1: loss = 1.00718 (* 1 = 1.00718 loss)\nI0818 15:12:44.012182 21603 solver.cpp:228] Iteration 4700, loss = 0.143363\nI0818 15:12:44.012228 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:12:44.012243 21603 solver.cpp:244]     Train net output #1: loss = 0.143363 (* 1 = 0.143363 loss)\nI0818 15:12:44.106160 21603 sgd_solver.cpp:166] Iteration 4700, lr = 0.1175\nI0818 15:13:30.945466 21603 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0818 15:13:57.208535 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77776\nI0818 15:13:57.208581 21603 solver.cpp:404]     Test net output #1: loss = 0.939158 (* 1 = 0.939158 loss)\nI0818 15:13:57.621855 21603 solver.cpp:228] Iteration 4800, loss = 0.183231\nI0818 15:13:57.621897 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:13:57.621913 21603 solver.cpp:244]     Train net output #1: loss = 0.183231 (* 1 = 0.183231 loss)\nI0818 15:13:57.713805 21603 sgd_solver.cpp:166] Iteration 4800, lr = 0.12\nI0818 15:14:44.581425 21603 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0818 15:15:10.837760 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7662\nI0818 15:15:10.837805 21603 solver.cpp:404]     Test net output #1: loss = 1.07579 (* 1 = 1.07579 loss)\nI0818 15:15:11.251265 21603 solver.cpp:228] Iteration 4900, loss = 0.204403\nI0818 15:15:11.251305 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 15:15:11.251320 21603 solver.cpp:244]     Train net output #1: loss = 0.204403 (* 1 = 0.204403 loss)\nI0818 15:15:11.338106 21603 sgd_solver.cpp:166] Iteration 4900, lr = 0.1225\nI0818 15:15:58.240085 21603 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0818 15:16:24.511194 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78608\nI0818 15:16:24.511238 21603 solver.cpp:404]     Test net output #1: loss = 0.867775 (* 1 = 0.867775 loss)\nI0818 15:16:24.924898 21603 solver.cpp:228] Iteration 5000, loss = 0.155566\nI0818 15:16:24.924939 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:16:24.924954 21603 solver.cpp:244]     Train net output #1: loss = 0.155566 (* 1 = 0.155566 loss)\nI0818 15:16:25.015538 21603 sgd_solver.cpp:166] Iteration 5000, lr = 0.125\nI0818 15:17:11.907274 21603 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0818 15:17:38.160897 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73864\nI0818 15:17:38.160944 21603 solver.cpp:404]     Test net output #1: loss = 1.12149 (* 1 = 1.12149 loss)\nI0818 15:17:38.574769 21603 solver.cpp:228] Iteration 5100, loss = 0.0917775\nI0818 15:17:38.574820 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:17:38.574837 21603 solver.cpp:244]     Train net output #1: loss = 0.0917775 (* 1 = 0.0917775 loss)\nI0818 15:17:38.658449 21603 sgd_solver.cpp:166] Iteration 5100, lr = 0.1275\nI0818 15:18:25.441579 21603 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0818 15:18:51.659679 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77544\nI0818 15:18:51.659725 21603 solver.cpp:404]     Test net output #1: loss = 0.961592 (* 1 = 0.961592 loss)\nI0818 15:18:52.073616 21603 solver.cpp:228] Iteration 5200, loss = 0.0935493\nI0818 15:18:52.073670 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:18:52.073688 21603 solver.cpp:244]     Train net output #1: loss = 0.0935493 (* 1 = 0.0935493 loss)\nI0818 15:18:52.161942 21603 sgd_solver.cpp:166] Iteration 5200, lr = 0.13\nI0818 15:19:39.003486 21603 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0818 15:20:05.239493 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7448\nI0818 15:20:05.239539 21603 solver.cpp:404]     Test net output #1: loss = 1.06362 (* 1 = 1.06362 loss)\nI0818 15:20:05.653170 21603 solver.cpp:228] Iteration 5300, loss = 0.147492\nI0818 15:20:05.653223 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:20:05.653239 21603 solver.cpp:244]     Train net output #1: loss = 0.147492 (* 1 = 0.147492 loss)\nI0818 15:20:05.739820 21603 sgd_solver.cpp:166] Iteration 5300, lr = 0.1325\nI0818 15:20:52.911290 21603 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0818 15:21:19.149134 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77036\nI0818 15:21:19.149180 21603 solver.cpp:404]     Test net output #1: loss = 1.0071 (* 1 = 1.0071 loss)\nI0818 15:21:19.562811 21603 solver.cpp:228] Iteration 5400, loss = 0.148643\nI0818 15:21:19.562863 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:21:19.562880 21603 solver.cpp:244]     Train net output #1: loss = 0.148643 (* 1 = 0.148643 loss)\nI0818 15:21:19.646848 21603 sgd_solver.cpp:166] Iteration 5400, lr = 0.135\nI0818 15:22:06.849190 21603 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0818 15:22:33.058804 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76192\nI0818 15:22:33.058848 21603 solver.cpp:404]     Test net output #1: loss = 1.06521 (* 1 = 1.06521 loss)\nI0818 15:22:33.472790 21603 solver.cpp:228] Iteration 5500, loss = 0.177601\nI0818 15:22:33.472842 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:22:33.472860 21603 solver.cpp:244]     Train net output #1: loss = 0.177601 (* 1 = 0.177601 loss)\nI0818 15:22:33.561491 21603 sgd_solver.cpp:166] Iteration 5500, lr = 0.1375\nI0818 15:23:20.689062 21603 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0818 15:23:46.890786 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7464\nI0818 15:23:46.890832 21603 solver.cpp:404]     Test net output #1: loss = 1.18725 (* 1 = 1.18725 loss)\nI0818 15:23:47.304765 21603 solver.cpp:228] Iteration 5600, loss = 0.177336\nI0818 15:23:47.304818 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:23:47.304836 21603 solver.cpp:244]     Train net output #1: loss = 0.177336 (* 1 = 0.177336 loss)\nI0818 15:23:47.390995 21603 sgd_solver.cpp:166] Iteration 5600, lr = 0.14\nI0818 15:24:34.376821 21603 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0818 15:25:00.515225 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7788\nI0818 15:25:00.515271 21603 solver.cpp:404]     Test net output #1: loss = 0.943053 (* 1 = 0.943053 loss)\nI0818 15:25:00.929070 21603 solver.cpp:228] Iteration 5700, loss = 0.123124\nI0818 15:25:00.929121 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:25:00.929138 21603 solver.cpp:244]     Train net output #1: loss = 0.123124 (* 1 = 0.123124 loss)\nI0818 15:25:01.024760 21603 sgd_solver.cpp:166] Iteration 5700, lr = 0.1425\nI0818 15:25:48.187165 21603 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0818 15:26:14.391641 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7454\nI0818 15:26:14.391687 21603 solver.cpp:404]     Test net output #1: loss = 1.18505 (* 1 = 1.18505 loss)\nI0818 15:26:14.805187 21603 solver.cpp:228] Iteration 5800, loss = 0.0756123\nI0818 15:26:14.805239 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 15:26:14.805255 21603 solver.cpp:244]     Train net output #1: loss = 0.0756124 (* 1 = 0.0756124 loss)\nI0818 15:26:14.889961 21603 sgd_solver.cpp:166] Iteration 5800, lr = 0.145\nI0818 15:27:01.953469 21603 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0818 15:27:28.231345 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75524\nI0818 15:27:28.231413 21603 solver.cpp:404]     Test net output #1: loss = 1.19986 (* 1 = 1.19986 loss)\nI0818 15:27:28.645166 21603 solver.cpp:228] Iteration 5900, loss = 0.0934365\nI0818 15:27:28.645215 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:27:28.645231 21603 solver.cpp:244]     Train net output #1: loss = 0.0934366 (* 1 = 0.0934366 loss)\nI0818 15:27:28.731974 21603 sgd_solver.cpp:166] Iteration 5900, lr = 0.1475\nI0818 15:28:15.809487 21603 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0818 15:28:42.082507 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70636\nI0818 15:28:42.082577 21603 solver.cpp:404]     Test net output #1: loss = 1.65832 (* 1 = 1.65832 loss)\nI0818 15:28:42.496471 21603 solver.cpp:228] Iteration 6000, loss = 0.0892193\nI0818 15:28:42.496520 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:28:42.496536 21603 solver.cpp:244]     Train net output #1: loss = 0.0892194 (* 1 = 0.0892194 loss)\nI0818 15:28:42.585616 21603 sgd_solver.cpp:166] Iteration 6000, lr = 0.15\nI0818 15:29:29.450460 21603 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0818 15:29:55.725653 21603 solver.cpp:404]     Test net output #0: accuracy = 0.719\nI0818 15:29:55.725719 21603 solver.cpp:404]     Test net output #1: loss = 1.43066 (* 1 = 1.43066 loss)\nI0818 15:29:56.138373 21603 solver.cpp:228] Iteration 6100, loss = 0.087573\nI0818 15:29:56.138419 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:29:56.138435 21603 solver.cpp:244]     Train net output #1: loss = 0.0875731 (* 1 = 0.0875731 loss)\nI0818 15:29:56.226722 21603 sgd_solver.cpp:166] Iteration 6100, lr = 0.1525\nI0818 15:30:43.266324 21603 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0818 15:31:09.542906 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76096\nI0818 15:31:09.542973 21603 solver.cpp:404]     Test net output #1: loss = 1.18944 (* 1 = 1.18944 loss)\nI0818 15:31:09.955438 21603 solver.cpp:228] Iteration 6200, loss = 0.09992\nI0818 15:31:09.955487 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:31:09.955502 21603 solver.cpp:244]     Train net output #1: loss = 0.09992 (* 1 = 0.09992 loss)\nI0818 15:31:10.048931 21603 sgd_solver.cpp:166] Iteration 6200, lr = 0.155\nI0818 15:31:57.058995 21603 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0818 15:32:23.332932 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78\nI0818 15:32:23.332996 21603 solver.cpp:404]     Test net output #1: loss = 1.00556 (* 1 = 1.00556 loss)\nI0818 15:32:23.746515 21603 solver.cpp:228] Iteration 6300, loss = 0.0817513\nI0818 15:32:23.746562 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:32:23.746578 21603 solver.cpp:244]     Train net output #1: loss = 0.0817514 (* 1 = 0.0817514 loss)\nI0818 15:32:23.834341 21603 sgd_solver.cpp:166] Iteration 6300, lr = 0.1575\nI0818 15:33:10.972682 21603 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0818 15:33:37.223223 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75984\nI0818 15:33:37.223271 21603 solver.cpp:404]     Test net output #1: loss = 1.23624 (* 1 = 1.23624 loss)\nI0818 15:33:37.636818 21603 solver.cpp:228] Iteration 6400, loss = 0.0816771\nI0818 15:33:37.636869 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:33:37.636886 21603 solver.cpp:244]     Train net output #1: loss = 0.0816772 (* 1 = 0.0816772 loss)\nI0818 15:33:37.722196 21603 sgd_solver.cpp:166] Iteration 6400, lr = 0.16\nI0818 15:34:24.950286 21603 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0818 15:34:51.217460 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75236\nI0818 15:34:51.217509 21603 solver.cpp:404]     Test net output #1: loss = 1.21633 (* 1 = 1.21633 loss)\nI0818 15:34:51.630965 21603 solver.cpp:228] Iteration 6500, loss = 0.099799\nI0818 15:34:51.631014 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:34:51.631031 21603 solver.cpp:244]     Train net output #1: loss = 0.099799 (* 1 = 0.099799 loss)\nI0818 15:34:51.716573 21603 sgd_solver.cpp:166] Iteration 6500, lr = 0.1625\nI0818 15:35:38.645190 21603 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0818 15:36:04.890502 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7374\nI0818 15:36:04.890548 21603 solver.cpp:404]     Test net output #1: loss = 1.38168 (* 1 = 1.38168 loss)\nI0818 15:36:05.304003 21603 solver.cpp:228] Iteration 6600, loss = 0.123558\nI0818 15:36:05.304051 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:36:05.304067 21603 solver.cpp:244]     Train net output #1: loss = 0.123558 (* 1 = 0.123558 loss)\nI0818 15:36:05.404656 21603 sgd_solver.cpp:166] Iteration 6600, lr = 0.165\nI0818 15:36:52.448853 21603 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0818 15:37:18.704749 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70056\nI0818 15:37:18.704797 21603 solver.cpp:404]     Test net output #1: loss = 1.58494 (* 1 = 1.58494 loss)\nI0818 15:37:19.117218 21603 solver.cpp:228] Iteration 6700, loss = 0.103816\nI0818 15:37:19.117264 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:37:19.117280 21603 solver.cpp:244]     Train net output #1: loss = 0.103816 (* 1 = 0.103816 loss)\nI0818 15:37:19.208369 21603 sgd_solver.cpp:166] Iteration 6700, lr = 0.1675\nI0818 15:38:06.145999 21603 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0818 15:38:32.429179 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75868\nI0818 15:38:32.429226 21603 solver.cpp:404]     Test net output #1: loss = 1.20105 (* 1 = 1.20105 loss)\nI0818 15:38:32.841439 21603 solver.cpp:228] Iteration 6800, loss = 0.0897645\nI0818 15:38:32.841473 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:38:32.841490 21603 solver.cpp:244]     Train net output #1: loss = 0.0897646 (* 1 = 0.0897646 loss)\nI0818 15:38:32.931888 21603 sgd_solver.cpp:166] Iteration 6800, lr = 0.17\nI0818 15:39:19.945255 21603 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0818 15:39:46.229645 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73824\nI0818 15:39:46.229692 21603 solver.cpp:404]     Test net output #1: loss = 1.33757 (* 1 = 1.33757 loss)\nI0818 15:39:46.641929 21603 solver.cpp:228] Iteration 6900, loss = 0.132834\nI0818 15:39:46.641964 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:39:46.641979 21603 solver.cpp:244]     Train net output #1: loss = 0.132834 (* 1 = 0.132834 loss)\nI0818 15:39:46.737375 21603 sgd_solver.cpp:166] Iteration 6900, lr = 0.1725\nI0818 15:40:33.796385 21603 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0818 15:40:59.994005 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78492\nI0818 15:40:59.994050 21603 solver.cpp:404]     Test net output #1: loss = 1.03988 (* 1 = 1.03988 loss)\nI0818 15:41:00.406404 21603 solver.cpp:228] Iteration 7000, loss = 0.0925914\nI0818 15:41:00.406443 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:41:00.406460 21603 solver.cpp:244]     Train net output #1: loss = 0.0925915 (* 1 = 0.0925915 loss)\nI0818 15:41:00.498196 21603 sgd_solver.cpp:166] Iteration 7000, lr = 0.175\nI0818 15:41:47.528107 21603 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0818 15:42:13.583071 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77136\nI0818 15:42:13.583117 21603 solver.cpp:404]     Test net output #1: loss = 1.13403 (* 1 = 1.13403 loss)\nI0818 15:42:13.996660 21603 solver.cpp:228] Iteration 7100, loss = 0.144009\nI0818 15:42:13.996701 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:42:13.996717 21603 solver.cpp:244]     Train net output #1: loss = 0.144009 (* 1 = 0.144009 loss)\nI0818 15:42:14.086215 21603 sgd_solver.cpp:166] Iteration 7100, lr = 0.1775\nI0818 15:43:01.023128 21603 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0818 15:43:27.299332 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7838\nI0818 15:43:27.299386 21603 solver.cpp:404]     Test net output #1: loss = 1.0379 (* 1 = 1.0379 loss)\nI0818 15:43:27.713186 21603 solver.cpp:228] Iteration 7200, loss = 0.136598\nI0818 15:43:27.713227 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:43:27.713251 21603 solver.cpp:244]     Train net output #1: loss = 0.136598 (* 1 = 0.136598 loss)\nI0818 15:43:27.802652 21603 sgd_solver.cpp:166] Iteration 7200, lr = 0.18\nI0818 15:44:14.775869 21603 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0818 15:44:41.055663 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7776\nI0818 15:44:41.055717 21603 solver.cpp:404]     Test net output #1: loss = 1.01556 (* 1 = 1.01556 loss)\nI0818 15:44:41.469183 21603 solver.cpp:228] Iteration 7300, loss = 0.110943\nI0818 15:44:41.469229 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:44:41.469255 21603 solver.cpp:244]     Train net output #1: loss = 0.110943 (* 1 = 0.110943 loss)\nI0818 15:44:41.556473 21603 sgd_solver.cpp:166] Iteration 7300, lr = 0.1825\nI0818 15:45:28.495743 21603 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0818 15:45:54.759212 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77756\nI0818 15:45:54.759265 21603 solver.cpp:404]     Test net output #1: loss = 1.0011 (* 1 = 1.0011 loss)\nI0818 15:45:55.171831 21603 solver.cpp:228] Iteration 7400, loss = 0.0894251\nI0818 15:45:55.171875 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:45:55.171898 21603 solver.cpp:244]     Train net output #1: loss = 0.0894253 (* 1 = 0.0894253 loss)\nI0818 15:45:55.268204 21603 sgd_solver.cpp:166] Iteration 7400, lr = 0.185\nI0818 15:46:42.184445 21603 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0818 15:47:08.445957 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7354\nI0818 15:47:08.446010 21603 solver.cpp:404]     Test net output #1: loss = 1.43824 (* 1 = 1.43824 loss)\nI0818 15:47:08.858651 21603 solver.cpp:228] Iteration 7500, loss = 0.061128\nI0818 15:47:08.858685 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:47:08.858710 21603 solver.cpp:244]     Train net output #1: loss = 0.0611282 (* 1 = 0.0611282 loss)\nI0818 15:47:08.952924 21603 sgd_solver.cpp:166] Iteration 7500, lr = 0.1875\nI0818 15:47:55.870759 21603 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0818 15:48:22.099778 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80868\nI0818 15:48:22.099824 21603 solver.cpp:404]     Test net output #1: loss = 0.863917 (* 1 = 0.863917 loss)\nI0818 15:48:22.512019 21603 solver.cpp:228] Iteration 7600, loss = 0.0379183\nI0818 15:48:22.512054 21603 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 15:48:22.512070 21603 solver.cpp:244]     Train net output #1: loss = 0.0379184 (* 1 = 0.0379184 loss)\nI0818 15:48:22.603477 21603 sgd_solver.cpp:166] Iteration 7600, lr = 0.19\nI0818 15:49:09.532469 21603 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0818 15:49:35.672560 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78908\nI0818 15:49:35.672611 21603 solver.cpp:404]     Test net output #1: loss = 1.02897 (* 1 = 1.02897 loss)\nI0818 15:49:36.085072 21603 solver.cpp:228] Iteration 7700, loss = 0.0987821\nI0818 15:49:36.085114 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:49:36.085137 21603 solver.cpp:244]     Train net output #1: loss = 0.0987822 (* 1 = 0.0987822 loss)\nI0818 15:49:36.181143 21603 sgd_solver.cpp:166] Iteration 7700, lr = 0.1925\nI0818 15:50:23.132289 21603 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0818 15:50:49.401832 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75924\nI0818 15:50:49.401886 21603 solver.cpp:404]     Test net output #1: loss = 1.22969 (* 1 = 1.22969 loss)\nI0818 15:50:49.814285 21603 solver.cpp:228] Iteration 7800, loss = 0.08382\nI0818 15:50:49.814327 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 15:50:49.814352 21603 solver.cpp:244]     Train net output #1: loss = 0.0838201 (* 1 = 0.0838201 loss)\nI0818 15:50:49.905093 21603 sgd_solver.cpp:166] Iteration 7800, lr = 0.195\nI0818 15:51:36.880291 21603 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0818 15:52:03.075700 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77016\nI0818 15:52:03.075757 21603 solver.cpp:404]     Test net output #1: loss = 1.17831 (* 1 = 1.17831 loss)\nI0818 15:52:03.487887 21603 solver.cpp:228] Iteration 7900, loss = 0.137584\nI0818 15:52:03.487929 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:52:03.487954 21603 solver.cpp:244]     Train net output #1: loss = 0.137584 (* 1 = 0.137584 loss)\nI0818 15:52:03.578586 21603 sgd_solver.cpp:166] Iteration 7900, lr = 0.1975\nI0818 15:52:50.498627 21603 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0818 15:53:16.773912 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76588\nI0818 15:53:16.773963 21603 solver.cpp:404]     Test net output #1: loss = 1.04175 (* 1 = 1.04175 loss)\nI0818 15:53:17.187826 21603 solver.cpp:228] Iteration 8000, loss = 0.0549467\nI0818 15:53:17.187880 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:53:17.187904 21603 solver.cpp:244]     Train net output #1: loss = 0.0549468 (* 1 = 0.0549468 loss)\nI0818 15:53:17.280139 21603 sgd_solver.cpp:166] Iteration 8000, lr = 0.2\nI0818 15:54:04.204762 21603 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0818 15:54:30.382597 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79208\nI0818 15:54:30.382647 21603 solver.cpp:404]     Test net output #1: loss = 0.987074 (* 1 = 0.987074 loss)\nI0818 15:54:30.796285 21603 solver.cpp:228] Iteration 8100, loss = 0.104675\nI0818 15:54:30.796341 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:54:30.796366 21603 solver.cpp:244]     Train net output #1: loss = 0.104675 (* 1 = 0.104675 loss)\nI0818 15:54:30.883785 21603 sgd_solver.cpp:166] Iteration 8100, lr = 0.2025\nI0818 15:55:17.849315 21603 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0818 15:55:44.112558 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75212\nI0818 15:55:44.112612 21603 solver.cpp:404]     Test net output #1: loss = 1.28552 (* 1 = 1.28552 loss)\nI0818 15:55:44.525315 21603 solver.cpp:228] Iteration 8200, loss = 0.138997\nI0818 15:55:44.525369 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 15:55:44.525394 21603 solver.cpp:244]     Train net output #1: loss = 0.138997 (* 1 = 0.138997 loss)\nI0818 15:55:44.615417 21603 sgd_solver.cpp:166] Iteration 8200, lr = 0.205\nI0818 15:56:31.548584 21603 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0818 15:56:57.809721 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75816\nI0818 15:56:57.809775 21603 solver.cpp:404]     Test net output #1: loss = 1.32552 (* 1 = 1.32552 loss)\nI0818 15:56:58.223320 21603 solver.cpp:228] Iteration 8300, loss = 0.109564\nI0818 15:56:58.223376 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:56:58.223400 21603 solver.cpp:244]     Train net output #1: loss = 0.109564 (* 1 = 0.109564 loss)\nI0818 15:56:58.313526 21603 sgd_solver.cpp:166] Iteration 8300, lr = 0.2075\nI0818 15:57:45.250795 21603 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0818 15:58:11.521890 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74092\nI0818 15:58:11.521942 21603 solver.cpp:404]     Test net output #1: loss = 1.26637 (* 1 = 1.26637 loss)\nI0818 15:58:11.935401 21603 solver.cpp:228] Iteration 8400, loss = 0.114878\nI0818 15:58:11.935458 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:58:11.935482 21603 solver.cpp:244]     Train net output #1: loss = 0.114879 (* 1 = 0.114879 loss)\nI0818 15:58:12.025394 21603 sgd_solver.cpp:166] Iteration 8400, lr = 0.21\nI0818 15:58:59.031219 21603 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0818 15:59:25.309906 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79112\nI0818 15:59:25.309957 21603 solver.cpp:404]     Test net output #1: loss = 0.902608 (* 1 = 0.902608 loss)\nI0818 15:59:25.724107 21603 solver.cpp:228] Iteration 8500, loss = 0.0435626\nI0818 15:59:25.724164 21603 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 15:59:25.724189 21603 solver.cpp:244]     Train net output #1: loss = 0.0435627 (* 1 = 0.0435627 loss)\nI0818 15:59:25.811954 21603 sgd_solver.cpp:166] Iteration 8500, lr = 0.2125\nI0818 16:00:12.797780 21603 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0818 16:00:38.890841 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7598\nI0818 16:00:38.890892 21603 solver.cpp:404]     Test net output #1: loss = 1.24696 (* 1 = 1.24696 loss)\nI0818 16:00:39.305696 21603 solver.cpp:228] Iteration 8600, loss = 0.132359\nI0818 16:00:39.305750 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:00:39.305768 21603 solver.cpp:244]     Train net output #1: loss = 0.132359 (* 1 = 0.132359 loss)\nI0818 16:00:39.395467 21603 sgd_solver.cpp:166] Iteration 8600, lr = 0.215\nI0818 16:01:26.335849 21603 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0818 16:01:52.555829 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7986\nI0818 16:01:52.555874 21603 solver.cpp:404]     Test net output #1: loss = 0.843511 (* 1 = 0.843511 loss)\nI0818 16:01:52.969714 21603 solver.cpp:228] Iteration 8700, loss = 0.0759886\nI0818 16:01:52.969769 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:01:52.969786 21603 solver.cpp:244]     Train net output #1: loss = 0.0759887 (* 1 = 0.0759887 loss)\nI0818 16:01:53.059959 21603 sgd_solver.cpp:166] Iteration 8700, lr = 0.2175\nI0818 16:02:40.044929 21603 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0818 16:03:06.323590 21603 solver.cpp:404]     Test net output #0: accuracy = 0.797\nI0818 16:03:06.323635 21603 solver.cpp:404]     Test net output #1: loss = 0.87287 (* 1 = 0.87287 loss)\nI0818 16:03:06.736138 21603 solver.cpp:228] Iteration 8800, loss = 0.18873\nI0818 16:03:06.736194 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 16:03:06.736212 21603 solver.cpp:244]     Train net output #1: loss = 0.18873 (* 1 = 0.18873 loss)\nI0818 16:03:06.829824 21603 sgd_solver.cpp:166] Iteration 8800, lr = 0.22\nI0818 16:03:53.829836 21603 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0818 16:04:20.108117 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7706\nI0818 16:04:20.108165 21603 solver.cpp:404]     Test net output #1: loss = 1.05043 (* 1 = 1.05043 loss)\nI0818 16:04:20.520900 21603 solver.cpp:228] Iteration 8900, loss = 0.06588\nI0818 16:04:20.520953 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:04:20.520970 21603 solver.cpp:244]     Train net output #1: loss = 0.0658801 (* 1 = 0.0658801 loss)\nI0818 16:04:20.614676 21603 sgd_solver.cpp:166] Iteration 8900, lr = 0.2225\nI0818 16:05:07.603462 21603 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0818 16:05:33.864778 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76228\nI0818 16:05:33.864823 21603 solver.cpp:404]     Test net output #1: loss = 1.15707 (* 1 = 1.15707 loss)\nI0818 16:05:34.277345 21603 solver.cpp:228] Iteration 9000, loss = 0.196678\nI0818 16:05:34.277396 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:05:34.277413 21603 solver.cpp:244]     Train net output #1: loss = 0.196678 (* 1 = 0.196678 loss)\nI0818 16:05:34.369736 21603 sgd_solver.cpp:166] Iteration 9000, lr = 0.225\nI0818 16:06:21.390523 21603 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0818 16:06:47.679112 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72316\nI0818 16:06:47.679159 21603 solver.cpp:404]     Test net output #1: loss = 1.37923 (* 1 = 1.37923 loss)\nI0818 16:06:48.091538 21603 solver.cpp:228] Iteration 9100, loss = 0.107898\nI0818 16:06:48.091591 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:06:48.091608 21603 solver.cpp:244]     Train net output #1: loss = 0.107899 (* 1 = 0.107899 loss)\nI0818 16:06:48.191027 21603 sgd_solver.cpp:166] Iteration 9100, lr = 0.2275\nI0818 16:07:35.269984 21603 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0818 16:08:01.549233 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66832\nI0818 16:08:01.549291 21603 solver.cpp:404]     Test net output #1: loss = 1.79383 (* 1 = 1.79383 loss)\nI0818 16:08:01.963074 21603 solver.cpp:228] Iteration 9200, loss = 0.0841307\nI0818 16:08:01.963127 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:08:01.963145 21603 solver.cpp:244]     Train net output #1: loss = 0.0841309 (* 1 = 0.0841309 loss)\nI0818 16:08:02.049942 21603 sgd_solver.cpp:166] Iteration 9200, lr = 0.23\nI0818 16:08:49.148090 21603 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0818 16:09:15.419981 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78668\nI0818 16:09:15.420027 21603 solver.cpp:404]     Test net output #1: loss = 0.970248 (* 1 = 0.970248 loss)\nI0818 16:09:15.833461 21603 solver.cpp:228] Iteration 9300, loss = 0.138296\nI0818 16:09:15.833508 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:09:15.833526 21603 solver.cpp:244]     Train net output #1: loss = 0.138297 (* 1 = 0.138297 loss)\nI0818 16:09:15.921667 21603 sgd_solver.cpp:166] Iteration 9300, lr = 0.2325\nI0818 16:10:02.869691 21603 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0818 16:10:29.132768 21603 solver.cpp:404]     Test net output #0: accuracy = 0.814\nI0818 16:10:29.132815 21603 solver.cpp:404]     Test net output #1: loss = 0.81157 (* 1 = 0.81157 loss)\nI0818 16:10:29.545306 21603 solver.cpp:228] Iteration 9400, loss = 0.119894\nI0818 16:10:29.545354 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:10:29.545372 21603 solver.cpp:244]     Train net output #1: loss = 0.119894 (* 1 = 0.119894 loss)\nI0818 16:10:29.635645 21603 sgd_solver.cpp:166] Iteration 9400, lr = 0.235\nI0818 16:11:16.587110 21603 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0818 16:11:42.857980 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77448\nI0818 16:11:42.858026 21603 solver.cpp:404]     Test net output #1: loss = 0.966944 (* 1 = 0.966944 loss)\nI0818 16:11:43.270350 21603 solver.cpp:228] Iteration 9500, loss = 0.113808\nI0818 16:11:43.270400 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:11:43.270416 21603 solver.cpp:244]     Train net output #1: loss = 0.113808 (* 1 = 0.113808 loss)\nI0818 16:11:43.366559 21603 sgd_solver.cpp:166] Iteration 9500, lr = 0.2375\nI0818 16:12:30.377908 21603 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0818 16:12:56.646526 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72404\nI0818 16:12:56.646572 21603 solver.cpp:404]     Test net output #1: loss = 1.29309 (* 1 = 1.29309 loss)\nI0818 16:12:57.060519 21603 solver.cpp:228] Iteration 9600, loss = 0.0774078\nI0818 16:12:57.060569 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:12:57.060586 21603 solver.cpp:244]     Train net output #1: loss = 0.0774079 (* 1 = 0.0774079 loss)\nI0818 16:12:57.153132 21603 sgd_solver.cpp:166] Iteration 9600, lr = 0.24\nI0818 16:13:44.146632 21603 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0818 16:14:10.387923 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7418\nI0818 16:14:10.387969 21603 solver.cpp:404]     Test net output #1: loss = 1.32416 (* 1 = 1.32416 loss)\nI0818 16:14:10.801271 21603 solver.cpp:228] Iteration 9700, loss = 0.120406\nI0818 16:14:10.801323 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:14:10.801340 21603 solver.cpp:244]     Train net output #1: loss = 0.120406 (* 1 = 0.120406 loss)\nI0818 16:14:10.892388 21603 sgd_solver.cpp:166] Iteration 9700, lr = 0.2425\nI0818 16:14:57.744304 21603 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0818 16:15:23.942543 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77748\nI0818 16:15:23.942595 21603 solver.cpp:404]     Test net output #1: loss = 0.951074 (* 1 = 0.951074 loss)\nI0818 16:15:24.356386 21603 solver.cpp:228] Iteration 9800, loss = 0.157014\nI0818 16:15:24.356436 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:15:24.356462 21603 solver.cpp:244]     Train net output #1: loss = 0.157015 (* 1 = 0.157015 loss)\nI0818 16:15:24.447983 21603 sgd_solver.cpp:166] Iteration 9800, lr = 0.245\nI0818 16:16:11.247687 21603 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0818 16:16:37.521875 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76892\nI0818 16:16:37.521926 21603 solver.cpp:404]     Test net output #1: loss = 1.05812 (* 1 = 1.05812 loss)\nI0818 16:16:37.935520 21603 solver.cpp:228] Iteration 9900, loss = 0.0986022\nI0818 16:16:37.935572 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:16:37.935598 21603 solver.cpp:244]     Train net output #1: loss = 0.0986023 (* 1 = 0.0986023 loss)\nI0818 16:16:38.024941 21603 sgd_solver.cpp:166] Iteration 9900, lr = 0.2475\nI0818 16:17:24.948559 21603 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0818 16:17:51.229462 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73616\nI0818 16:17:51.229521 21603 solver.cpp:404]     Test net output #1: loss = 1.33491 (* 1 = 1.33491 loss)\nI0818 16:17:51.643082 21603 solver.cpp:228] Iteration 10000, loss = 0.0695916\nI0818 16:17:51.643141 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:17:51.643167 21603 solver.cpp:244]     Train net output #1: loss = 0.0695917 (* 1 = 0.0695917 loss)\nI0818 16:17:51.734201 21603 sgd_solver.cpp:166] Iteration 10000, lr = 0.25\nI0818 16:18:38.825464 21603 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0818 16:19:05.084528 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8036\nI0818 16:19:05.084579 21603 solver.cpp:404]     Test net output #1: loss = 0.866407 (* 1 = 0.866407 loss)\nI0818 16:19:05.498337 21603 solver.cpp:228] Iteration 10100, loss = 0.130791\nI0818 16:19:05.498394 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:19:05.498419 21603 solver.cpp:244]     Train net output #1: loss = 0.130791 (* 1 = 0.130791 loss)\nI0818 16:19:05.590809 21603 sgd_solver.cpp:166] Iteration 10100, lr = 0.2525\nI0818 16:19:52.587065 21603 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0818 16:20:18.841559 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79388\nI0818 16:20:18.841609 21603 solver.cpp:404]     Test net output #1: loss = 0.862015 (* 1 = 0.862015 loss)\nI0818 16:20:19.255056 21603 solver.cpp:228] Iteration 10200, loss = 0.0872753\nI0818 16:20:19.255115 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:20:19.255141 21603 solver.cpp:244]     Train net output #1: loss = 0.0872753 (* 1 = 0.0872753 loss)\nI0818 16:20:19.347704 21603 sgd_solver.cpp:166] Iteration 10200, lr = 0.255\nI0818 16:21:06.417603 21603 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0818 16:21:32.709249 21603 solver.cpp:404]     Test net output #0: accuracy = 0.61004\nI0818 16:21:32.709323 21603 solver.cpp:404]     Test net output #1: loss = 2.45381 (* 1 = 2.45381 loss)\nI0818 16:21:33.123334 21603 solver.cpp:228] Iteration 10300, loss = 0.146384\nI0818 16:21:33.123389 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:21:33.123414 21603 solver.cpp:244]     Train net output #1: loss = 0.146384 (* 1 = 0.146384 loss)\nI0818 16:21:33.222899 21603 sgd_solver.cpp:166] Iteration 10300, lr = 0.2575\nI0818 16:22:20.491657 21603 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0818 16:22:46.782387 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75144\nI0818 16:22:46.782461 21603 solver.cpp:404]     Test net output #1: loss = 1.13067 (* 1 = 1.13067 loss)\nI0818 16:22:47.196113 21603 solver.cpp:228] Iteration 10400, loss = 0.0965824\nI0818 16:22:47.196167 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:22:47.196192 21603 solver.cpp:244]     Train net output #1: loss = 0.0965825 (* 1 = 0.0965825 loss)\nI0818 16:22:47.281805 21603 sgd_solver.cpp:166] Iteration 10400, lr = 0.26\nI0818 16:23:34.555119 21603 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0818 16:24:00.876323 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79444\nI0818 16:24:00.876396 21603 solver.cpp:404]     Test net output #1: loss = 0.960703 (* 1 = 0.960703 loss)\nI0818 16:24:01.290563 21603 solver.cpp:228] Iteration 10500, loss = 0.0938329\nI0818 16:24:01.290618 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:24:01.290643 21603 solver.cpp:244]     Train net output #1: loss = 0.093833 (* 1 = 0.093833 loss)\nI0818 16:24:01.384958 21603 sgd_solver.cpp:166] Iteration 10500, lr = 0.2625\nI0818 16:24:48.530900 21603 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0818 16:25:14.849395 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7654\nI0818 16:25:14.849469 21603 solver.cpp:404]     Test net output #1: loss = 1.07974 (* 1 = 1.07974 loss)\nI0818 16:25:15.263483 21603 solver.cpp:228] Iteration 10600, loss = 0.0913832\nI0818 16:25:15.263522 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:25:15.263548 21603 solver.cpp:244]     Train net output #1: loss = 0.0913833 (* 1 = 0.0913833 loss)\nI0818 16:25:15.353307 21603 sgd_solver.cpp:166] Iteration 10600, lr = 0.265\nI0818 16:26:02.323796 21603 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0818 16:26:28.626303 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76632\nI0818 16:26:28.626376 21603 solver.cpp:404]     Test net output #1: loss = 1.02549 (* 1 = 1.02549 loss)\nI0818 16:26:29.040518 21603 solver.cpp:228] Iteration 10700, loss = 0.0937087\nI0818 16:26:29.040566 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:26:29.040591 21603 solver.cpp:244]     Train net output #1: loss = 0.0937088 (* 1 = 0.0937088 loss)\nI0818 16:26:29.125620 21603 sgd_solver.cpp:166] Iteration 10700, lr = 0.2675\nI0818 16:27:16.214556 21603 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0818 16:27:42.562079 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78176\nI0818 16:27:42.562151 21603 solver.cpp:404]     Test net output #1: loss = 0.977106 (* 1 = 0.977106 loss)\nI0818 16:27:42.974627 21603 solver.cpp:228] Iteration 10800, loss = 0.122893\nI0818 16:27:42.974673 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:27:42.974697 21603 solver.cpp:244]     Train net output #1: loss = 0.122893 (* 1 = 0.122893 loss)\nI0818 16:27:43.075844 21603 sgd_solver.cpp:166] Iteration 10800, lr = 0.27\nI0818 16:28:30.288467 21603 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0818 16:28:56.653439 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7708\nI0818 16:28:56.653515 21603 solver.cpp:404]     Test net output #1: loss = 1.10035 (* 1 = 1.10035 loss)\nI0818 16:28:57.065896 21603 solver.cpp:228] Iteration 10900, loss = 0.135322\nI0818 16:28:57.065939 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:28:57.065964 21603 solver.cpp:244]     Train net output #1: loss = 0.135322 (* 1 = 0.135322 loss)\nI0818 16:28:57.158344 21603 sgd_solver.cpp:166] Iteration 10900, lr = 0.2725\nI0818 16:29:44.115456 21603 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0818 16:30:10.413233 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72884\nI0818 16:30:10.413305 21603 solver.cpp:404]     Test net output #1: loss = 1.30944 (* 1 = 1.30944 loss)\nI0818 16:30:10.826593 21603 solver.cpp:228] Iteration 11000, loss = 0.0887477\nI0818 16:30:10.826625 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:30:10.826650 21603 solver.cpp:244]     Train net output #1: loss = 0.0887478 (* 1 = 0.0887478 loss)\nI0818 16:30:10.914855 21603 sgd_solver.cpp:166] Iteration 11000, lr = 0.275\nI0818 16:30:57.946434 21603 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0818 16:31:24.092996 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79628\nI0818 16:31:24.093046 21603 solver.cpp:404]     Test net output #1: loss = 0.923915 (* 1 = 0.923915 loss)\nI0818 16:31:24.505839 21603 solver.cpp:228] Iteration 11100, loss = 0.0958727\nI0818 16:31:24.505879 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:31:24.505903 21603 solver.cpp:244]     Train net output #1: loss = 0.0958728 (* 1 = 0.0958728 loss)\nI0818 16:31:24.592149 21603 sgd_solver.cpp:166] Iteration 11100, lr = 0.2775\nI0818 16:32:11.751294 21603 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0818 16:32:37.844146 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78872\nI0818 16:32:37.844197 21603 solver.cpp:404]     Test net output #1: loss = 0.91077 (* 1 = 0.91077 loss)\nI0818 16:32:38.257040 21603 solver.cpp:228] Iteration 11200, loss = 0.124218\nI0818 16:32:38.257078 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:32:38.257103 21603 solver.cpp:244]     Train net output #1: loss = 0.124218 (* 1 = 0.124218 loss)\nI0818 16:32:38.348181 21603 sgd_solver.cpp:166] Iteration 11200, lr = 0.28\nI0818 16:33:25.546259 21603 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0818 16:33:51.660267 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79904\nI0818 16:33:51.660323 21603 solver.cpp:404]     Test net output #1: loss = 0.871923 (* 1 = 0.871923 loss)\nI0818 16:33:52.072886 21603 solver.cpp:228] Iteration 11300, loss = 0.0627896\nI0818 16:33:52.072921 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:33:52.072945 21603 solver.cpp:244]     Train net output #1: loss = 0.0627896 (* 1 = 0.0627896 loss)\nI0818 16:33:52.160759 21603 sgd_solver.cpp:166] Iteration 11300, lr = 0.2825\nI0818 16:34:39.588865 21603 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0818 16:35:05.672364 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7864\nI0818 16:35:05.672413 21603 solver.cpp:404]     Test net output #1: loss = 0.971865 (* 1 = 0.971865 loss)\nI0818 16:35:06.086395 21603 solver.cpp:228] Iteration 11400, loss = 0.0515921\nI0818 16:35:06.086438 21603 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:35:06.086462 21603 solver.cpp:244]     Train net output #1: loss = 0.0515922 (* 1 = 0.0515922 loss)\nI0818 16:35:06.173384 21603 sgd_solver.cpp:166] Iteration 11400, lr = 0.285\nI0818 16:35:53.658862 21603 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0818 16:36:19.851626 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77644\nI0818 16:36:19.851675 21603 solver.cpp:404]     Test net output #1: loss = 1.01828 (* 1 = 1.01828 loss)\nI0818 16:36:20.265442 21603 solver.cpp:228] Iteration 11500, loss = 0.129256\nI0818 16:36:20.265485 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 16:36:20.265514 21603 solver.cpp:244]     Train net output #1: loss = 0.129256 (* 1 = 0.129256 loss)\nI0818 16:36:20.356461 21603 sgd_solver.cpp:166] Iteration 11500, lr = 0.2875\nI0818 16:37:07.805102 21603 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0818 16:37:33.890959 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79364\nI0818 16:37:33.891007 21603 solver.cpp:404]     Test net output #1: loss = 0.918866 (* 1 = 0.918866 loss)\nI0818 16:37:34.305083 21603 solver.cpp:228] Iteration 11600, loss = 0.0828317\nI0818 16:37:34.305127 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:37:34.305152 21603 solver.cpp:244]     Train net output #1: loss = 0.0828317 (* 1 = 0.0828317 loss)\nI0818 16:37:34.394388 21603 sgd_solver.cpp:166] Iteration 11600, lr = 0.29\nI0818 16:38:21.825114 21603 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0818 16:38:47.918609 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8038\nI0818 16:38:47.918658 21603 solver.cpp:404]     Test net output #1: loss = 0.816963 (* 1 = 0.816963 loss)\nI0818 16:38:48.332280 21603 solver.cpp:228] Iteration 11700, loss = 0.0871282\nI0818 16:38:48.332324 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:38:48.332348 21603 solver.cpp:244]     Train net output #1: loss = 0.0871283 (* 1 = 0.0871283 loss)\nI0818 16:38:48.425619 21603 sgd_solver.cpp:166] Iteration 11700, lr = 0.2925\nI0818 16:39:35.785956 21603 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0818 16:40:01.866528 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7516\nI0818 16:40:01.866577 21603 solver.cpp:404]     Test net output #1: loss = 1.16809 (* 1 = 1.16809 loss)\nI0818 16:40:02.280215 21603 solver.cpp:228] Iteration 11800, loss = 0.109038\nI0818 16:40:02.280256 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:40:02.280280 21603 solver.cpp:244]     Train net output #1: loss = 0.109039 (* 1 = 0.109039 loss)\nI0818 16:40:02.365057 21603 sgd_solver.cpp:166] Iteration 11800, lr = 0.295\nI0818 16:40:49.911558 21603 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0818 16:41:15.995640 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71472\nI0818 16:41:15.995688 21603 solver.cpp:404]     Test net output #1: loss = 1.41177 (* 1 = 1.41177 loss)\nI0818 16:41:16.408370 21603 solver.cpp:228] Iteration 11900, loss = 0.155233\nI0818 16:41:16.408412 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 16:41:16.408437 21603 solver.cpp:244]     Train net output #1: loss = 0.155233 (* 1 = 0.155233 loss)\nI0818 16:41:16.495671 21603 sgd_solver.cpp:166] Iteration 11900, lr = 0.2975\nI0818 16:42:03.920284 21603 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0818 16:42:30.113310 21603 solver.cpp:404]     Test net output #0: accuracy = 0.753\nI0818 16:42:30.113363 21603 solver.cpp:404]     Test net output #1: loss = 1.07768 (* 1 = 1.07768 loss)\nI0818 16:42:30.527125 21603 solver.cpp:228] Iteration 12000, loss = 0.105738\nI0818 16:42:30.527164 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:42:30.527189 21603 solver.cpp:244]     Train net output #1: loss = 0.105738 (* 1 = 0.105738 loss)\nI0818 16:42:30.623574 21603 sgd_solver.cpp:166] Iteration 12000, lr = 0.3\nI0818 16:43:18.003607 21603 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0818 16:43:44.195899 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76056\nI0818 16:43:44.195950 21603 solver.cpp:404]     Test net output #1: loss = 1.08633 (* 1 = 1.08633 loss)\nI0818 16:43:44.608700 21603 solver.cpp:228] Iteration 12100, loss = 0.081202\nI0818 16:43:44.608739 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:43:44.608763 21603 solver.cpp:244]     Train net output #1: loss = 0.081202 (* 1 = 0.081202 loss)\nI0818 16:43:44.694963 21603 sgd_solver.cpp:166] Iteration 12100, lr = 0.3025\nI0818 16:44:32.156774 21603 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0818 16:44:58.245353 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75576\nI0818 16:44:58.245406 21603 solver.cpp:404]     Test net output #1: loss = 1.09578 (* 1 = 1.09578 loss)\nI0818 16:44:58.657836 21603 solver.cpp:228] Iteration 12200, loss = 0.0787866\nI0818 16:44:58.657874 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:44:58.657899 21603 solver.cpp:244]     Train net output #1: loss = 0.0787867 (* 1 = 0.0787867 loss)\nI0818 16:44:58.753839 21603 sgd_solver.cpp:166] Iteration 12200, lr = 0.305\nI0818 16:45:46.142971 21603 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0818 16:46:12.234747 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7738\nI0818 16:46:12.234798 21603 solver.cpp:404]     Test net output #1: loss = 1.04171 (* 1 = 1.04171 loss)\nI0818 16:46:12.648892 21603 solver.cpp:228] Iteration 12300, loss = 0.159347\nI0818 16:46:12.648937 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:46:12.648962 21603 solver.cpp:244]     Train net output #1: loss = 0.159347 (* 1 = 0.159347 loss)\nI0818 16:46:12.736583 21603 sgd_solver.cpp:166] Iteration 12300, lr = 0.3075\nI0818 16:47:00.242965 21603 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0818 16:47:26.371142 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7496\nI0818 16:47:26.371191 21603 solver.cpp:404]     Test net output #1: loss = 1.2486 (* 1 = 1.2486 loss)\nI0818 16:47:26.784987 21603 solver.cpp:228] Iteration 12400, loss = 0.0595111\nI0818 16:47:26.785030 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:47:26.785054 21603 solver.cpp:244]     Train net output #1: loss = 0.0595112 (* 1 = 0.0595112 loss)\nI0818 16:47:26.867902 21603 sgd_solver.cpp:166] Iteration 12400, lr = 0.31\nI0818 16:48:14.214758 21603 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0818 16:48:40.339802 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75876\nI0818 16:48:40.339854 21603 solver.cpp:404]     Test net output #1: loss = 1.16685 (* 1 = 1.16685 loss)\nI0818 16:48:40.753633 21603 solver.cpp:228] Iteration 12500, loss = 0.0924379\nI0818 16:48:40.753680 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:48:40.753705 21603 solver.cpp:244]     Train net output #1: loss = 0.0924379 (* 1 = 0.0924379 loss)\nI0818 16:48:40.843909 21603 sgd_solver.cpp:166] Iteration 12500, lr = 0.3125\nI0818 16:49:28.204861 21603 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0818 16:49:54.292804 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67424\nI0818 16:49:54.292860 21603 solver.cpp:404]     Test net output #1: loss = 1.781 (* 1 = 1.781 loss)\nI0818 16:49:54.706671 21603 solver.cpp:228] Iteration 12600, loss = 0.0723342\nI0818 16:49:54.706720 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:49:54.706745 21603 solver.cpp:244]     Train net output #1: loss = 0.0723342 (* 1 = 0.0723342 loss)\nI0818 16:49:54.793929 21603 sgd_solver.cpp:166] Iteration 12600, lr = 0.315\nI0818 16:50:42.043839 21603 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 16:51:08.229521 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75084\nI0818 16:51:08.229578 21603 solver.cpp:404]     Test net output #1: loss = 1.13389 (* 1 = 1.13389 loss)\nI0818 16:51:08.641890 21603 solver.cpp:228] Iteration 12700, loss = 0.0665105\nI0818 16:51:08.641937 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:51:08.641961 21603 solver.cpp:244]     Train net output #1: loss = 0.0665105 (* 1 = 0.0665105 loss)\nI0818 16:51:08.730478 21603 sgd_solver.cpp:166] Iteration 12700, lr = 0.3175\nI0818 16:51:56.090030 21603 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 16:52:22.231660 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7664\nI0818 16:52:22.231710 21603 solver.cpp:404]     Test net output #1: loss = 1.11945 (* 1 = 1.11945 loss)\nI0818 16:52:22.645586 21603 solver.cpp:228] Iteration 12800, loss = 0.0895291\nI0818 16:52:22.645638 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:52:22.645663 21603 solver.cpp:244]     Train net output #1: loss = 0.0895291 (* 1 = 0.0895291 loss)\nI0818 16:52:22.736080 21603 sgd_solver.cpp:166] Iteration 12800, lr = 0.32\nI0818 16:53:09.912032 21603 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 16:53:36.189610 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77512\nI0818 16:53:36.189662 21603 solver.cpp:404]     Test net output #1: loss = 1.07351 (* 1 = 1.07351 loss)\nI0818 16:53:36.602475 21603 solver.cpp:228] Iteration 12900, loss = 0.158159\nI0818 16:53:36.602526 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:53:36.602551 21603 solver.cpp:244]     Train net output #1: loss = 0.158159 (* 1 = 0.158159 loss)\nI0818 16:53:36.699195 21603 sgd_solver.cpp:166] Iteration 12900, lr = 0.3225\nI0818 16:54:23.958335 21603 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 16:54:50.226598 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73992\nI0818 16:54:50.226650 21603 solver.cpp:404]     Test net output #1: loss = 1.32722 (* 1 = 1.32722 loss)\nI0818 16:54:50.640800 21603 solver.cpp:228] Iteration 13000, loss = 0.101677\nI0818 16:54:50.640847 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:54:50.640872 21603 solver.cpp:244]     Train net output #1: loss = 0.101677 (* 1 = 0.101677 loss)\nI0818 16:54:50.730903 21603 sgd_solver.cpp:166] Iteration 13000, lr = 0.325\nI0818 16:55:38.086549 21603 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 16:56:04.290499 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8106\nI0818 16:56:04.290549 21603 solver.cpp:404]     Test net output #1: loss = 0.813076 (* 1 = 0.813076 loss)\nI0818 16:56:04.704762 21603 solver.cpp:228] Iteration 13100, loss = 0.174241\nI0818 16:56:04.704818 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:56:04.704843 21603 solver.cpp:244]     Train net output #1: loss = 0.174241 (* 1 = 0.174241 loss)\nI0818 16:56:04.791726 21603 sgd_solver.cpp:166] Iteration 13100, lr = 0.3275\nI0818 16:56:51.975841 21603 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 16:57:18.073853 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77604\nI0818 16:57:18.073904 21603 solver.cpp:404]     Test net output #1: loss = 0.948326 (* 1 = 0.948326 loss)\nI0818 16:57:18.487479 21603 solver.cpp:228] Iteration 13200, loss = 0.0715871\nI0818 16:57:18.487538 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:57:18.487563 21603 solver.cpp:244]     Train net output #1: loss = 0.0715872 (* 1 = 0.0715872 loss)\nI0818 16:57:18.580938 21603 sgd_solver.cpp:166] Iteration 13200, lr = 0.33\nI0818 16:58:05.870331 21603 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 16:58:32.156796 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79932\nI0818 16:58:32.156849 21603 solver.cpp:404]     Test net output #1: loss = 0.823561 (* 1 = 0.823561 loss)\nI0818 16:58:32.569298 21603 solver.cpp:228] Iteration 13300, loss = 0.0914343\nI0818 16:58:32.569350 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:58:32.569376 21603 solver.cpp:244]     Train net output #1: loss = 0.0914344 (* 1 = 0.0914344 loss)\nI0818 16:58:32.663475 21603 sgd_solver.cpp:166] Iteration 13300, lr = 0.3325\nI0818 16:59:19.978833 21603 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 16:59:46.244912 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7698\nI0818 16:59:46.244971 21603 solver.cpp:404]     Test net output #1: loss = 1.02367 (* 1 = 1.02367 loss)\nI0818 16:59:46.657723 21603 solver.cpp:228] Iteration 13400, loss = 0.108663\nI0818 16:59:46.657776 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:59:46.657801 21603 solver.cpp:244]     Train net output #1: loss = 0.108663 (* 1 = 0.108663 loss)\nI0818 16:59:46.750833 21603 sgd_solver.cpp:166] Iteration 13400, lr = 0.335\nI0818 17:00:34.005807 21603 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 17:01:00.099874 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74944\nI0818 17:01:00.099925 21603 solver.cpp:404]     Test net output #1: loss = 1.3414 (* 1 = 1.3414 loss)\nI0818 17:01:00.512538 21603 solver.cpp:228] Iteration 13500, loss = 0.10106\nI0818 17:01:00.512593 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:01:00.512619 21603 solver.cpp:244]     Train net output #1: loss = 0.10106 (* 1 = 0.10106 loss)\nI0818 17:01:00.600859 21603 sgd_solver.cpp:166] Iteration 13500, lr = 0.3375\nI0818 17:01:47.817811 21603 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 17:02:13.976326 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7424\nI0818 17:02:13.976380 21603 solver.cpp:404]     Test net output #1: loss = 1.28378 (* 1 = 1.28378 loss)\nI0818 17:02:14.389159 21603 solver.cpp:228] Iteration 13600, loss = 0.0698688\nI0818 17:02:14.389219 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:02:14.389247 21603 solver.cpp:244]     Train net output #1: loss = 0.0698689 (* 1 = 0.0698689 loss)\nI0818 17:02:14.483937 21603 sgd_solver.cpp:166] Iteration 13600, lr = 0.34\nI0818 17:03:01.621525 21603 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 17:03:27.735759 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80424\nI0818 17:03:27.735807 21603 solver.cpp:404]     Test net output #1: loss = 0.859778 (* 1 = 0.859778 loss)\nI0818 17:03:28.150068 21603 solver.cpp:228] Iteration 13700, loss = 0.0863293\nI0818 17:03:28.150123 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:03:28.150148 21603 solver.cpp:244]     Train net output #1: loss = 0.0863294 (* 1 = 0.0863294 loss)\nI0818 17:03:28.241730 21603 sgd_solver.cpp:166] Iteration 13700, lr = 0.3425\nI0818 17:04:15.578455 21603 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 17:04:41.665809 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75524\nI0818 17:04:41.665858 21603 solver.cpp:404]     Test net output #1: loss = 1.17703 (* 1 = 1.17703 loss)\nI0818 17:04:42.080021 21603 solver.cpp:228] Iteration 13800, loss = 0.0913389\nI0818 17:04:42.080076 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:04:42.080101 21603 solver.cpp:244]     Train net output #1: loss = 0.091339 (* 1 = 0.091339 loss)\nI0818 17:04:42.170728 21603 sgd_solver.cpp:166] Iteration 13800, lr = 0.345\nI0818 17:05:29.376485 21603 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 17:05:55.487120 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82004\nI0818 17:05:55.487174 21603 solver.cpp:404]     Test net output #1: loss = 0.763406 (* 1 = 0.763406 loss)\nI0818 17:05:55.901041 21603 solver.cpp:228] Iteration 13900, loss = 0.0469854\nI0818 17:05:55.901095 21603 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:05:55.901119 21603 solver.cpp:244]     Train net output #1: loss = 0.0469855 (* 1 = 0.0469855 loss)\nI0818 17:05:55.990270 21603 sgd_solver.cpp:166] Iteration 13900, lr = 0.3475\nI0818 17:06:43.451452 21603 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 17:07:09.618721 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8014\nI0818 17:07:09.618774 21603 solver.cpp:404]     Test net output #1: loss = 0.820611 (* 1 = 0.820611 loss)\nI0818 17:07:10.032434 21603 solver.cpp:228] Iteration 14000, loss = 0.088818\nI0818 17:07:10.032491 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:07:10.032522 21603 solver.cpp:244]     Train net output #1: loss = 0.0888181 (* 1 = 0.0888181 loss)\nI0818 17:07:10.119256 21603 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 17:07:57.473147 21603 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 17:08:23.625954 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78752\nI0818 17:08:23.626003 21603 solver.cpp:404]     Test net output #1: loss = 0.978379 (* 1 = 0.978379 loss)\nI0818 17:08:24.039839 21603 solver.cpp:228] Iteration 14100, loss = 0.112093\nI0818 17:08:24.039893 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:08:24.039918 21603 solver.cpp:244]     Train net output #1: loss = 0.112093 (* 1 = 0.112093 loss)\nI0818 17:08:24.133322 21603 sgd_solver.cpp:166] Iteration 14100, lr = 0.3525\nI0818 17:09:11.402781 21603 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 17:09:37.487349 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77404\nI0818 17:09:37.487399 21603 solver.cpp:404]     Test net output #1: loss = 1.13107 (* 1 = 1.13107 loss)\nI0818 17:09:37.901157 21603 solver.cpp:228] Iteration 14200, loss = 0.160638\nI0818 17:09:37.901212 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 17:09:37.901238 21603 solver.cpp:244]     Train net output #1: loss = 0.160638 (* 1 = 0.160638 loss)\nI0818 17:09:37.995404 21603 sgd_solver.cpp:166] Iteration 14200, lr = 0.355\nI0818 17:10:25.440807 21603 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 17:10:51.539924 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76144\nI0818 17:10:51.539968 21603 solver.cpp:404]     Test net output #1: loss = 1.13879 (* 1 = 1.13879 loss)\nI0818 17:10:51.952262 21603 solver.cpp:228] Iteration 14300, loss = 0.111077\nI0818 17:10:51.952312 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:10:51.952328 21603 solver.cpp:244]     Train net output #1: loss = 0.111077 (* 1 = 0.111077 loss)\nI0818 17:10:52.046077 21603 sgd_solver.cpp:166] Iteration 14300, lr = 0.3575\nI0818 17:11:39.434039 21603 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 17:12:05.502136 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81108\nI0818 17:12:05.502182 21603 solver.cpp:404]     Test net output #1: loss = 0.74709 (* 1 = 0.74709 loss)\nI0818 17:12:05.914731 21603 solver.cpp:228] Iteration 14400, loss = 0.129524\nI0818 17:12:05.914783 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:12:05.914799 21603 solver.cpp:244]     Train net output #1: loss = 0.129524 (* 1 = 0.129524 loss)\nI0818 17:12:06.011406 21603 sgd_solver.cpp:166] Iteration 14400, lr = 0.36\nI0818 17:12:53.254559 21603 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 17:13:19.333089 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80036\nI0818 17:13:19.333132 21603 solver.cpp:404]     Test net output #1: loss = 0.786338 (* 1 = 0.786338 loss)\nI0818 17:13:19.745664 21603 solver.cpp:228] Iteration 14500, loss = 0.113723\nI0818 17:13:19.745718 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:13:19.745735 21603 solver.cpp:244]     Train net output #1: loss = 0.113723 (* 1 = 0.113723 loss)\nI0818 17:13:19.836971 21603 sgd_solver.cpp:166] Iteration 14500, lr = 0.3625\nI0818 17:14:07.067543 21603 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 17:14:33.153355 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75636\nI0818 17:14:33.153398 21603 solver.cpp:404]     Test net output #1: loss = 1.14458 (* 1 = 1.14458 loss)\nI0818 17:14:33.565688 21603 solver.cpp:228] Iteration 14600, loss = 0.0650772\nI0818 17:14:33.565740 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:14:33.565757 21603 solver.cpp:244]     Train net output #1: loss = 0.0650773 (* 1 = 0.0650773 loss)\nI0818 17:14:33.660379 21603 sgd_solver.cpp:166] Iteration 14600, lr = 0.365\nI0818 17:15:20.920708 21603 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 17:15:47.019331 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79528\nI0818 17:15:47.019379 21603 solver.cpp:404]     Test net output #1: loss = 0.853214 (* 1 = 0.853214 loss)\nI0818 17:15:47.431702 21603 solver.cpp:228] Iteration 14700, loss = 0.0679101\nI0818 17:15:47.431756 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:15:47.431772 21603 solver.cpp:244]     Train net output #1: loss = 0.0679102 (* 1 = 0.0679102 loss)\nI0818 17:15:47.523219 21603 sgd_solver.cpp:166] Iteration 14700, lr = 0.3675\nI0818 17:16:34.733938 21603 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 17:17:00.815755 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78388\nI0818 17:17:00.815799 21603 solver.cpp:404]     Test net output #1: loss = 0.957675 (* 1 = 0.957675 loss)\nI0818 17:17:01.228212 21603 solver.cpp:228] Iteration 14800, loss = 0.07945\nI0818 17:17:01.228257 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:17:01.228272 21603 solver.cpp:244]     Train net output #1: loss = 0.0794501 (* 1 = 0.0794501 loss)\nI0818 17:17:01.320376 21603 sgd_solver.cpp:166] Iteration 14800, lr = 0.37\nI0818 17:17:48.551113 21603 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 17:18:14.645709 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77828\nI0818 17:18:14.645754 21603 solver.cpp:404]     Test net output #1: loss = 1.02737 (* 1 = 1.02737 loss)\nI0818 17:18:15.058324 21603 solver.cpp:228] Iteration 14900, loss = 0.0934414\nI0818 17:18:15.058373 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:18:15.058390 21603 solver.cpp:244]     Train net output #1: loss = 0.0934415 (* 1 = 0.0934415 loss)\nI0818 17:18:15.152835 21603 sgd_solver.cpp:166] Iteration 14900, lr = 0.3725\nI0818 17:19:02.392464 21603 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 17:19:28.503005 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78364\nI0818 17:19:28.503048 21603 solver.cpp:404]     Test net output #1: loss = 0.98366 (* 1 = 0.98366 loss)\nI0818 17:19:28.915161 21603 solver.cpp:228] Iteration 15000, loss = 0.110005\nI0818 17:19:28.915207 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:19:28.915223 21603 solver.cpp:244]     Train net output #1: loss = 0.110005 (* 1 = 0.110005 loss)\nI0818 17:19:29.005659 21603 sgd_solver.cpp:166] Iteration 15000, lr = 0.375\nI0818 17:20:16.141269 21603 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 17:20:42.237936 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73316\nI0818 17:20:42.237982 21603 solver.cpp:404]     Test net output #1: loss = 1.2921 (* 1 = 1.2921 loss)\nI0818 17:20:42.650496 21603 solver.cpp:228] Iteration 15100, loss = 0.139766\nI0818 17:20:42.650537 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:20:42.650552 21603 solver.cpp:244]     Train net output #1: loss = 0.139766 (* 1 = 0.139766 loss)\nI0818 17:20:42.739126 21603 sgd_solver.cpp:166] Iteration 15100, lr = 0.3775\nI0818 17:21:29.923328 21603 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 17:21:56.022784 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69756\nI0818 17:21:56.022828 21603 solver.cpp:404]     Test net output #1: loss = 1.51343 (* 1 = 1.51343 loss)\nI0818 17:21:56.434871 21603 solver.cpp:228] Iteration 15200, loss = 0.159412\nI0818 17:21:56.434916 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 17:21:56.434931 21603 solver.cpp:244]     Train net output #1: loss = 0.159412 (* 1 = 0.159412 loss)\nI0818 17:21:56.524193 21603 sgd_solver.cpp:166] Iteration 15200, lr = 0.38\nI0818 17:22:43.774811 21603 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 17:23:09.880388 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7792\nI0818 17:23:09.880429 21603 solver.cpp:404]     Test net output #1: loss = 0.989539 (* 1 = 0.989539 loss)\nI0818 17:23:10.292914 21603 solver.cpp:228] Iteration 15300, loss = 0.173792\nI0818 17:23:10.292959 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 17:23:10.292974 21603 solver.cpp:244]     Train net output #1: loss = 0.173792 (* 1 = 0.173792 loss)\nI0818 17:23:10.389266 21603 sgd_solver.cpp:166] Iteration 15300, lr = 0.3825\nI0818 17:23:57.631331 21603 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 17:24:23.766790 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74968\nI0818 17:24:23.766832 21603 solver.cpp:404]     Test net output #1: loss = 1.21963 (* 1 = 1.21963 loss)\nI0818 17:24:24.179112 21603 solver.cpp:228] Iteration 15400, loss = 0.0444425\nI0818 17:24:24.179157 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:24:24.179172 21603 solver.cpp:244]     Train net output #1: loss = 0.0444426 (* 1 = 0.0444426 loss)\nI0818 17:24:24.275248 21603 sgd_solver.cpp:166] Iteration 15400, lr = 0.385\nI0818 17:25:11.602882 21603 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 17:25:37.822466 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80112\nI0818 17:25:37.822509 21603 solver.cpp:404]     Test net output #1: loss = 0.787624 (* 1 = 0.787624 loss)\nI0818 17:25:38.234949 21603 solver.cpp:228] Iteration 15500, loss = 0.144146\nI0818 17:25:38.234992 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:25:38.235008 21603 solver.cpp:244]     Train net output #1: loss = 0.144146 (* 1 = 0.144146 loss)\nI0818 17:25:38.332782 21603 sgd_solver.cpp:166] Iteration 15500, lr = 0.3875\nI0818 17:26:25.615099 21603 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 17:26:51.820333 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76784\nI0818 17:26:51.820384 21603 solver.cpp:404]     Test net output #1: loss = 1.03319 (* 1 = 1.03319 loss)\nI0818 17:26:52.233084 21603 solver.cpp:228] Iteration 15600, loss = 0.0802287\nI0818 17:26:52.233129 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:26:52.233144 21603 solver.cpp:244]     Train net output #1: loss = 0.0802288 (* 1 = 0.0802288 loss)\nI0818 17:26:52.322585 21603 sgd_solver.cpp:166] Iteration 15600, lr = 0.39\nI0818 17:27:39.658325 21603 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 17:28:05.900449 21603 solver.cpp:404]     Test net output #0: accuracy = 0.752\nI0818 17:28:05.900494 21603 solver.cpp:404]     Test net output #1: loss = 1.12457 (* 1 = 1.12457 loss)\nI0818 17:28:06.313019 21603 solver.cpp:228] Iteration 15700, loss = 0.0570974\nI0818 17:28:06.313058 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:28:06.313073 21603 solver.cpp:244]     Train net output #1: loss = 0.0570975 (* 1 = 0.0570975 loss)\nI0818 17:28:06.401980 21603 sgd_solver.cpp:166] Iteration 15700, lr = 0.3925\nI0818 17:28:53.919545 21603 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 17:29:20.067834 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82212\nI0818 17:29:20.067878 21603 solver.cpp:404]     Test net output #1: loss = 0.74642 (* 1 = 0.74642 loss)\nI0818 17:29:20.480306 21603 solver.cpp:228] Iteration 15800, loss = 0.137759\nI0818 17:29:20.480350 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:29:20.480366 21603 solver.cpp:244]     Train net output #1: loss = 0.137759 (* 1 = 0.137759 loss)\nI0818 17:29:20.572697 21603 sgd_solver.cpp:166] Iteration 15800, lr = 0.395\nI0818 17:30:07.941107 21603 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 17:30:34.184722 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80684\nI0818 17:30:34.184768 21603 solver.cpp:404]     Test net output #1: loss = 0.88951 (* 1 = 0.88951 loss)\nI0818 17:30:34.597419 21603 solver.cpp:228] Iteration 15900, loss = 0.206575\nI0818 17:30:34.597460 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 17:30:34.597476 21603 solver.cpp:244]     Train net output #1: loss = 0.206576 (* 1 = 0.206576 loss)\nI0818 17:30:34.687228 21603 sgd_solver.cpp:166] Iteration 15900, lr = 0.3975\nI0818 17:31:21.936524 21603 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 17:31:48.138051 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77336\nI0818 17:31:48.138097 21603 solver.cpp:404]     Test net output #1: loss = 0.942824 (* 1 = 0.942824 loss)\nI0818 17:31:48.550319 21603 solver.cpp:228] Iteration 16000, loss = 0.056839\nI0818 17:31:48.550364 21603 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:31:48.550379 21603 solver.cpp:244]     Train net output #1: loss = 0.0568391 (* 1 = 0.0568391 loss)\nI0818 17:31:48.646309 21603 sgd_solver.cpp:166] Iteration 16000, lr = 0.4\nI0818 17:32:36.049180 21603 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 17:33:02.330684 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78552\nI0818 17:33:02.330730 21603 solver.cpp:404]     Test net output #1: loss = 0.934211 (* 1 = 0.934211 loss)\nI0818 17:33:02.743028 21603 solver.cpp:228] Iteration 16100, loss = 0.114078\nI0818 17:33:02.743064 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:33:02.743079 21603 solver.cpp:244]     Train net output #1: loss = 0.114078 (* 1 = 0.114078 loss)\nI0818 17:33:02.840427 21603 sgd_solver.cpp:166] Iteration 16100, lr = 0.4025\nI0818 17:33:50.167570 21603 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 17:34:16.444800 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81252\nI0818 17:34:16.444845 21603 solver.cpp:404]     Test net output #1: loss = 0.77402 (* 1 = 0.77402 loss)\nI0818 17:34:16.857333 21603 solver.cpp:228] Iteration 16200, loss = 0.105884\nI0818 17:34:16.857376 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 17:34:16.857391 21603 solver.cpp:244]     Train net output #1: loss = 0.105884 (* 1 = 0.105884 loss)\nI0818 17:34:16.950866 21603 sgd_solver.cpp:166] Iteration 16200, lr = 0.405\nI0818 17:35:04.393362 21603 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 17:35:30.670687 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79252\nI0818 17:35:30.670732 21603 solver.cpp:404]     Test net output #1: loss = 0.909409 (* 1 = 0.909409 loss)\nI0818 17:35:31.083029 21603 solver.cpp:228] Iteration 16300, loss = 0.113199\nI0818 17:35:31.083068 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:35:31.083084 21603 solver.cpp:244]     Train net output #1: loss = 0.113199 (* 1 = 0.113199 loss)\nI0818 17:35:31.176694 21603 sgd_solver.cpp:166] Iteration 16300, lr = 0.4075\nI0818 17:36:18.612195 21603 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 17:36:44.887660 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75368\nI0818 17:36:44.887703 21603 solver.cpp:404]     Test net output #1: loss = 1.15948 (* 1 = 1.15948 loss)\nI0818 17:36:45.300129 21603 solver.cpp:228] Iteration 16400, loss = 0.081696\nI0818 17:36:45.300171 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:36:45.300187 21603 solver.cpp:244]     Train net output #1: loss = 0.0816961 (* 1 = 0.0816961 loss)\nI0818 17:36:45.393704 21603 sgd_solver.cpp:166] Iteration 16400, lr = 0.41\nI0818 17:37:32.756479 21603 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0818 17:37:59.026715 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75508\nI0818 17:37:59.026762 21603 solver.cpp:404]     Test net output #1: loss = 1.04955 (* 1 = 1.04955 loss)\nI0818 17:37:59.439146 21603 solver.cpp:228] Iteration 16500, loss = 0.148368\nI0818 17:37:59.439190 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 17:37:59.439206 21603 solver.cpp:244]     Train net output #1: loss = 0.148368 (* 1 = 0.148368 loss)\nI0818 17:37:59.529834 21603 sgd_solver.cpp:166] Iteration 16500, lr = 0.4125\nI0818 17:38:46.956885 21603 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0818 17:39:13.222991 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77736\nI0818 17:39:13.223037 21603 solver.cpp:404]     Test net output #1: loss = 1.00946 (* 1 = 1.00946 loss)\nI0818 17:39:13.635586 21603 solver.cpp:228] Iteration 16600, loss = 0.166069\nI0818 17:39:13.635625 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:39:13.635641 21603 solver.cpp:244]     Train net output #1: loss = 0.166069 (* 1 = 0.166069 loss)\nI0818 17:39:13.731026 21603 sgd_solver.cpp:166] Iteration 16600, lr = 0.415\nI0818 17:40:01.145102 21603 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0818 17:40:27.250651 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81852\nI0818 17:40:27.250690 21603 solver.cpp:404]     Test net output #1: loss = 0.739786 (* 1 = 0.739786 loss)\nI0818 17:40:27.663202 21603 solver.cpp:228] Iteration 16700, loss = 0.0613889\nI0818 17:40:27.663245 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:40:27.663261 21603 solver.cpp:244]     Train net output #1: loss = 0.0613889 (* 1 = 0.0613889 loss)\nI0818 17:40:27.759223 21603 sgd_solver.cpp:166] Iteration 16700, lr = 0.4175\nI0818 17:41:15.187265 21603 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0818 17:41:41.267783 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76944\nI0818 17:41:41.267828 21603 solver.cpp:404]     Test net output #1: loss = 1.01593 (* 1 = 1.01593 loss)\nI0818 17:41:41.679987 21603 solver.cpp:228] Iteration 16800, loss = 0.123595\nI0818 17:41:41.680028 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:41:41.680043 21603 solver.cpp:244]     Train net output #1: loss = 0.123595 (* 1 = 0.123595 loss)\nI0818 17:41:41.776525 21603 sgd_solver.cpp:166] Iteration 16800, lr = 0.42\nI0818 17:42:29.171768 21603 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0818 17:42:55.423954 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75876\nI0818 17:42:55.424001 21603 solver.cpp:404]     Test net output #1: loss = 1.14248 (* 1 = 1.14248 loss)\nI0818 17:42:55.836202 21603 solver.cpp:228] Iteration 16900, loss = 0.187947\nI0818 17:42:55.836241 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 17:42:55.836256 21603 solver.cpp:244]     Train net output #1: loss = 0.187947 (* 1 = 0.187947 loss)\nI0818 17:42:55.928015 21603 sgd_solver.cpp:166] Iteration 16900, lr = 0.4225\nI0818 17:43:43.440353 21603 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0818 17:44:09.679055 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82192\nI0818 17:44:09.679101 21603 solver.cpp:404]     Test net output #1: loss = 0.742833 (* 1 = 0.742833 loss)\nI0818 17:44:10.091630 21603 solver.cpp:228] Iteration 17000, loss = 0.119612\nI0818 17:44:10.091671 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:44:10.091686 21603 solver.cpp:244]     Train net output #1: loss = 0.119612 (* 1 = 0.119612 loss)\nI0818 17:44:10.183178 21603 sgd_solver.cpp:166] Iteration 17000, lr = 0.425\nI0818 17:44:57.581754 21603 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0818 17:45:23.832363 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79212\nI0818 17:45:23.832408 21603 solver.cpp:404]     Test net output #1: loss = 0.90974 (* 1 = 0.90974 loss)\nI0818 17:45:24.244809 21603 solver.cpp:228] Iteration 17100, loss = 0.0966081\nI0818 17:45:24.244849 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:45:24.244865 21603 solver.cpp:244]     Train net output #1: loss = 0.0966082 (* 1 = 0.0966082 loss)\nI0818 17:45:24.337553 21603 sgd_solver.cpp:166] Iteration 17100, lr = 0.4275\nI0818 17:46:11.548224 21603 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0818 17:46:37.669713 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76144\nI0818 17:46:37.669759 21603 solver.cpp:404]     Test net output #1: loss = 1.08377 (* 1 = 1.08377 loss)\nI0818 17:46:38.082216 21603 solver.cpp:228] Iteration 17200, loss = 0.101518\nI0818 17:46:38.082257 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:46:38.082271 21603 solver.cpp:244]     Train net output #1: loss = 0.101518 (* 1 = 0.101518 loss)\nI0818 17:46:38.178488 21603 sgd_solver.cpp:166] Iteration 17200, lr = 0.43\nI0818 17:47:25.405733 21603 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0818 17:47:51.520989 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7566\nI0818 17:47:51.521033 21603 solver.cpp:404]     Test net output #1: loss = 1.15848 (* 1 = 1.15848 loss)\nI0818 17:47:51.933274 21603 solver.cpp:228] Iteration 17300, loss = 0.102041\nI0818 17:47:51.933318 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:47:51.933333 21603 solver.cpp:244]     Train net output #1: loss = 0.102041 (* 1 = 0.102041 loss)\nI0818 17:47:52.021802 21603 sgd_solver.cpp:166] Iteration 17300, lr = 0.4325\nI0818 17:48:39.378015 21603 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0818 17:49:05.584734 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73432\nI0818 17:49:05.584779 21603 solver.cpp:404]     Test net output #1: loss = 1.16861 (* 1 = 1.16861 loss)\nI0818 17:49:05.997220 21603 solver.cpp:228] Iteration 17400, loss = 0.0898131\nI0818 17:49:05.997267 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:49:05.997282 21603 solver.cpp:244]     Train net output #1: loss = 0.0898132 (* 1 = 0.0898132 loss)\nI0818 17:49:06.087155 21603 sgd_solver.cpp:166] Iteration 17400, lr = 0.435\nI0818 17:49:53.447252 21603 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0818 17:50:19.715914 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77664\nI0818 17:50:19.715956 21603 solver.cpp:404]     Test net output #1: loss = 1.03442 (* 1 = 1.03442 loss)\nI0818 17:50:20.128334 21603 solver.cpp:228] Iteration 17500, loss = 0.160596\nI0818 17:50:20.128381 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 17:50:20.128397 21603 solver.cpp:244]     Train net output #1: loss = 0.160596 (* 1 = 0.160596 loss)\nI0818 17:50:20.221877 21603 sgd_solver.cpp:166] Iteration 17500, lr = 0.4375\nI0818 17:51:07.548830 21603 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0818 17:51:33.801021 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76156\nI0818 17:51:33.801069 21603 solver.cpp:404]     Test net output #1: loss = 1.12105 (* 1 = 1.12105 loss)\nI0818 17:51:34.213621 21603 solver.cpp:228] Iteration 17600, loss = 0.152451\nI0818 17:51:34.213665 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:51:34.213680 21603 solver.cpp:244]     Train net output #1: loss = 0.152451 (* 1 = 0.152451 loss)\nI0818 17:51:34.305888 21603 sgd_solver.cpp:166] Iteration 17600, lr = 0.44\nI0818 17:52:21.623371 21603 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0818 17:52:47.914460 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81388\nI0818 17:52:47.914506 21603 solver.cpp:404]     Test net output #1: loss = 0.798695 (* 1 = 0.798695 loss)\nI0818 17:52:48.328382 21603 solver.cpp:228] Iteration 17700, loss = 0.121227\nI0818 17:52:48.328429 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 17:52:48.328446 21603 solver.cpp:244]     Train net output #1: loss = 0.121228 (* 1 = 0.121228 loss)\nI0818 17:52:48.417345 21603 sgd_solver.cpp:166] Iteration 17700, lr = 0.4425\nI0818 17:53:35.778918 21603 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0818 17:54:02.056454 21603 solver.cpp:404]     Test net output #0: accuracy = 0.799\nI0818 17:54:02.056500 21603 solver.cpp:404]     Test net output #1: loss = 0.828418 (* 1 = 0.828418 loss)\nI0818 17:54:02.470252 21603 solver.cpp:228] Iteration 17800, loss = 0.0953197\nI0818 17:54:02.470297 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:54:02.470317 21603 solver.cpp:244]     Train net output #1: loss = 0.0953199 (* 1 = 0.0953199 loss)\nI0818 17:54:02.562285 21603 sgd_solver.cpp:166] Iteration 17800, lr = 0.445\nI0818 17:54:49.971689 21603 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0818 17:55:16.247769 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75776\nI0818 17:55:16.247813 21603 solver.cpp:404]     Test net output #1: loss = 1.07819 (* 1 = 1.07819 loss)\nI0818 17:55:16.661593 21603 solver.cpp:228] Iteration 17900, loss = 0.124914\nI0818 17:55:16.661639 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:55:16.661654 21603 solver.cpp:244]     Train net output #1: loss = 0.124914 (* 1 = 0.124914 loss)\nI0818 17:55:16.756222 21603 sgd_solver.cpp:166] Iteration 17900, lr = 0.4475\nI0818 17:56:04.164690 21603 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0818 17:56:30.443565 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75376\nI0818 17:56:30.443611 21603 solver.cpp:404]     Test net output #1: loss = 1.15775 (* 1 = 1.15775 loss)\nI0818 17:56:30.857218 21603 solver.cpp:228] Iteration 18000, loss = 0.103672\nI0818 17:56:30.857264 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:56:30.857280 21603 solver.cpp:244]     Train net output #1: loss = 0.103672 (* 1 = 0.103672 loss)\nI0818 17:56:30.945359 21603 sgd_solver.cpp:166] Iteration 18000, lr = 0.45\nI0818 17:57:18.235800 21603 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0818 17:57:44.504323 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78944\nI0818 17:57:44.504369 21603 solver.cpp:404]     Test net output #1: loss = 0.88827 (* 1 = 0.88827 loss)\nI0818 17:57:44.918011 21603 solver.cpp:228] Iteration 18100, loss = 0.132348\nI0818 17:57:44.918056 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 17:57:44.918072 21603 solver.cpp:244]     Train net output #1: loss = 0.132348 (* 1 = 0.132348 loss)\nI0818 17:57:45.011236 21603 sgd_solver.cpp:166] Iteration 18100, lr = 0.4525\nI0818 17:58:32.350890 21603 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0818 17:58:58.616991 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79512\nI0818 17:58:58.617036 21603 solver.cpp:404]     Test net output #1: loss = 0.845611 (* 1 = 0.845611 loss)\nI0818 17:58:59.031065 21603 solver.cpp:228] Iteration 18200, loss = 0.087908\nI0818 17:58:59.031111 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:58:59.031126 21603 solver.cpp:244]     Train net output #1: loss = 0.0879081 (* 1 = 0.0879081 loss)\nI0818 17:58:59.124996 21603 sgd_solver.cpp:166] Iteration 18200, lr = 0.455\nI0818 17:59:46.541020 21603 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0818 18:00:12.810323 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77028\nI0818 18:00:12.810370 21603 solver.cpp:404]     Test net output #1: loss = 0.930834 (* 1 = 0.930834 loss)\nI0818 18:00:13.224017 21603 solver.cpp:228] Iteration 18300, loss = 0.0838387\nI0818 18:00:13.224066 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:00:13.224081 21603 solver.cpp:244]     Train net output #1: loss = 0.0838388 (* 1 = 0.0838388 loss)\nI0818 18:00:13.322213 21603 sgd_solver.cpp:166] Iteration 18300, lr = 0.4575\nI0818 18:01:00.736044 21603 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0818 18:01:27.010418 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI0818 18:01:27.010460 21603 solver.cpp:404]     Test net output #1: loss = 1.01604 (* 1 = 1.01604 loss)\nI0818 18:01:27.422647 21603 solver.cpp:228] Iteration 18400, loss = 0.137674\nI0818 18:01:27.422688 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:01:27.422703 21603 solver.cpp:244]     Train net output #1: loss = 0.137674 (* 1 = 0.137674 loss)\nI0818 18:01:27.510015 21603 sgd_solver.cpp:166] Iteration 18400, lr = 0.46\nI0818 18:02:14.953750 21603 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0818 18:02:41.227934 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81376\nI0818 18:02:41.227980 21603 solver.cpp:404]     Test net output #1: loss = 0.787965 (* 1 = 0.787965 loss)\nI0818 18:02:41.640143 21603 solver.cpp:228] Iteration 18500, loss = 0.0779235\nI0818 18:02:41.640182 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:02:41.640197 21603 solver.cpp:244]     Train net output #1: loss = 0.0779236 (* 1 = 0.0779236 loss)\nI0818 18:02:41.728279 21603 sgd_solver.cpp:166] Iteration 18500, lr = 0.4625\nI0818 18:03:29.207546 21603 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0818 18:03:55.294495 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80432\nI0818 18:03:55.294538 21603 solver.cpp:404]     Test net output #1: loss = 0.808268 (* 1 = 0.808268 loss)\nI0818 18:03:55.707936 21603 solver.cpp:228] Iteration 18600, loss = 0.159254\nI0818 18:03:55.707978 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:03:55.707994 21603 solver.cpp:244]     Train net output #1: loss = 0.159254 (* 1 = 0.159254 loss)\nI0818 18:03:55.796007 21603 sgd_solver.cpp:166] Iteration 18600, lr = 0.465\nI0818 18:04:43.118795 21603 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0818 18:05:09.181567 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75448\nI0818 18:05:09.181610 21603 solver.cpp:404]     Test net output #1: loss = 1.24822 (* 1 = 1.24822 loss)\nI0818 18:05:09.595399 21603 solver.cpp:228] Iteration 18700, loss = 0.0620507\nI0818 18:05:09.595441 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:05:09.595458 21603 solver.cpp:244]     Train net output #1: loss = 0.0620508 (* 1 = 0.0620508 loss)\nI0818 18:05:09.680429 21603 sgd_solver.cpp:166] Iteration 18700, lr = 0.4675\nI0818 18:05:56.938019 21603 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0818 18:06:23.121044 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74516\nI0818 18:06:23.121088 21603 solver.cpp:404]     Test net output #1: loss = 1.19891 (* 1 = 1.19891 loss)\nI0818 18:06:23.534790 21603 solver.cpp:228] Iteration 18800, loss = 0.154244\nI0818 18:06:23.534831 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 18:06:23.534847 21603 solver.cpp:244]     Train net output #1: loss = 0.154244 (* 1 = 0.154244 loss)\nI0818 18:06:23.627557 21603 sgd_solver.cpp:166] Iteration 18800, lr = 0.47\nI0818 18:07:11.032510 21603 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0818 18:07:37.096122 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7872\nI0818 18:07:37.096164 21603 solver.cpp:404]     Test net output #1: loss = 0.929686 (* 1 = 0.929686 loss)\nI0818 18:07:37.509824 21603 solver.cpp:228] Iteration 18900, loss = 0.129487\nI0818 18:07:37.509873 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:07:37.509889 21603 solver.cpp:244]     Train net output #1: loss = 0.129487 (* 1 = 0.129487 loss)\nI0818 18:07:37.596786 21603 sgd_solver.cpp:166] Iteration 18900, lr = 0.4725\nI0818 18:08:24.855204 21603 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0818 18:08:50.937095 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8222\nI0818 18:08:50.937140 21603 solver.cpp:404]     Test net output #1: loss = 0.738246 (* 1 = 0.738246 loss)\nI0818 18:08:51.350757 21603 solver.cpp:228] Iteration 19000, loss = 0.103259\nI0818 18:08:51.350811 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:08:51.350827 21603 solver.cpp:244]     Train net output #1: loss = 0.103259 (* 1 = 0.103259 loss)\nI0818 18:08:51.440826 21603 sgd_solver.cpp:166] Iteration 19000, lr = 0.475\nI0818 18:09:38.779659 21603 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0818 18:10:04.864426 21603 solver.cpp:404]     Test net output #0: accuracy = 0.802\nI0818 18:10:04.864471 21603 solver.cpp:404]     Test net output #1: loss = 0.810295 (* 1 = 0.810295 loss)\nI0818 18:10:05.278177 21603 solver.cpp:228] Iteration 19100, loss = 0.0541997\nI0818 18:10:05.278221 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:10:05.278237 21603 solver.cpp:244]     Train net output #1: loss = 0.0541998 (* 1 = 0.0541998 loss)\nI0818 18:10:05.367735 21603 sgd_solver.cpp:166] Iteration 19100, lr = 0.4775\nI0818 18:10:52.522019 21603 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0818 18:11:18.810372 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79852\nI0818 18:11:18.810418 21603 solver.cpp:404]     Test net output #1: loss = 0.831568 (* 1 = 0.831568 loss)\nI0818 18:11:19.224282 21603 solver.cpp:228] Iteration 19200, loss = 0.196279\nI0818 18:11:19.224325 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:11:19.224341 21603 solver.cpp:244]     Train net output #1: loss = 0.196279 (* 1 = 0.196279 loss)\nI0818 18:11:19.308176 21603 sgd_solver.cpp:166] Iteration 19200, lr = 0.48\nI0818 18:12:06.605814 21603 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0818 18:12:32.863842 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74984\nI0818 18:12:32.863888 21603 solver.cpp:404]     Test net output #1: loss = 1.23932 (* 1 = 1.23932 loss)\nI0818 18:12:33.284241 21603 solver.cpp:228] Iteration 19300, loss = 0.0745177\nI0818 18:12:33.284294 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:12:33.284324 21603 solver.cpp:244]     Train net output #1: loss = 0.0745178 (* 1 = 0.0745178 loss)\nI0818 18:12:33.373040 21603 sgd_solver.cpp:166] Iteration 19300, lr = 0.4825\nI0818 18:13:20.580788 21603 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0818 18:13:46.859858 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81576\nI0818 18:13:46.859905 21603 solver.cpp:404]     Test net output #1: loss = 0.751818 (* 1 = 0.751818 loss)\nI0818 18:13:47.272521 21603 solver.cpp:228] Iteration 19400, loss = 0.0773843\nI0818 18:13:47.272573 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:13:47.272598 21603 solver.cpp:244]     Train net output #1: loss = 0.0773844 (* 1 = 0.0773844 loss)\nI0818 18:13:47.362046 21603 sgd_solver.cpp:166] Iteration 19400, lr = 0.485\nI0818 18:14:34.679105 21603 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0818 18:15:00.933925 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81056\nI0818 18:15:00.933979 21603 solver.cpp:404]     Test net output #1: loss = 0.767597 (* 1 = 0.767597 loss)\nI0818 18:15:01.347740 21603 solver.cpp:228] Iteration 19500, loss = 0.0776116\nI0818 18:15:01.347795 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:15:01.347820 21603 solver.cpp:244]     Train net output #1: loss = 0.0776117 (* 1 = 0.0776117 loss)\nI0818 18:15:01.437073 21603 sgd_solver.cpp:166] Iteration 19500, lr = 0.4875\nI0818 18:15:48.775679 21603 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0818 18:16:15.028352 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7776\nI0818 18:16:15.028404 21603 solver.cpp:404]     Test net output #1: loss = 1.00737 (* 1 = 1.00737 loss)\nI0818 18:16:15.442240 21603 solver.cpp:228] Iteration 19600, loss = 0.157581\nI0818 18:16:15.442296 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:16:15.442322 21603 solver.cpp:244]     Train net output #1: loss = 0.157581 (* 1 = 0.157581 loss)\nI0818 18:16:15.528836 21603 sgd_solver.cpp:166] Iteration 19600, lr = 0.49\nI0818 18:17:02.742203 21603 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0818 18:17:29.027565 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80088\nI0818 18:17:29.027623 21603 solver.cpp:404]     Test net output #1: loss = 0.807025 (* 1 = 0.807025 loss)\nI0818 18:17:29.440167 21603 solver.cpp:228] Iteration 19700, loss = 0.100741\nI0818 18:17:29.440222 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:17:29.440248 21603 solver.cpp:244]     Train net output #1: loss = 0.100741 (* 1 = 0.100741 loss)\nI0818 18:17:29.531383 21603 sgd_solver.cpp:166] Iteration 19700, lr = 0.4925\nI0818 18:18:16.869966 21603 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0818 18:18:42.974941 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75244\nI0818 18:18:42.974990 21603 solver.cpp:404]     Test net output #1: loss = 1.21223 (* 1 = 1.21223 loss)\nI0818 18:18:43.388867 21603 solver.cpp:228] Iteration 19800, loss = 0.0855727\nI0818 18:18:43.388924 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:18:43.388948 21603 solver.cpp:244]     Train net output #1: loss = 0.0855728 (* 1 = 0.0855728 loss)\nI0818 18:18:43.475205 21603 sgd_solver.cpp:166] Iteration 19800, lr = 0.495\nI0818 18:19:30.713553 21603 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0818 18:19:56.910338 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7266\nI0818 18:19:56.910392 21603 solver.cpp:404]     Test net output #1: loss = 1.34434 (* 1 = 1.34434 loss)\nI0818 18:19:57.324481 21603 solver.cpp:228] Iteration 19900, loss = 0.140221\nI0818 18:19:57.324539 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:19:57.324566 21603 solver.cpp:244]     Train net output #1: loss = 0.140221 (* 1 = 0.140221 loss)\nI0818 18:19:57.418054 21603 sgd_solver.cpp:166] Iteration 19900, lr = 0.4975\nI0818 18:20:44.776587 21603 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0818 18:21:11.056223 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75656\nI0818 18:21:11.056273 21603 solver.cpp:404]     Test net output #1: loss = 1.10984 (* 1 = 1.10984 loss)\nI0818 18:21:11.469977 21603 solver.cpp:228] Iteration 20000, loss = 0.201474\nI0818 18:21:11.470032 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 18:21:11.470057 21603 solver.cpp:244]     Train net output #1: loss = 0.201474 (* 1 = 0.201474 loss)\nI0818 18:21:11.557785 21603 sgd_solver.cpp:166] Iteration 20000, lr = 0.5\nI0818 18:21:58.849607 21603 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0818 18:22:24.980545 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72432\nI0818 18:22:24.980599 21603 solver.cpp:404]     Test net output #1: loss = 1.27072 (* 1 = 1.27072 loss)\nI0818 18:22:25.394888 21603 solver.cpp:228] Iteration 20100, loss = 0.0673008\nI0818 18:22:25.394945 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:22:25.394971 21603 solver.cpp:244]     Train net output #1: loss = 0.0673009 (* 1 = 0.0673009 loss)\nI0818 18:22:25.479591 21603 sgd_solver.cpp:166] Iteration 20100, lr = 0.5025\nI0818 18:23:12.887076 21603 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0818 18:23:38.972427 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78224\nI0818 18:23:38.972477 21603 solver.cpp:404]     Test net output #1: loss = 0.919783 (* 1 = 0.919783 loss)\nI0818 18:23:39.386075 21603 solver.cpp:228] Iteration 20200, loss = 0.173351\nI0818 18:23:39.386132 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:23:39.386157 21603 solver.cpp:244]     Train net output #1: loss = 0.173351 (* 1 = 0.173351 loss)\nI0818 18:23:39.472652 21603 sgd_solver.cpp:166] Iteration 20200, lr = 0.505\nI0818 18:24:26.632128 21603 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0818 18:24:52.859030 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75108\nI0818 18:24:52.859081 21603 solver.cpp:404]     Test net output #1: loss = 1.07918 (* 1 = 1.07918 loss)\nI0818 18:24:53.272454 21603 solver.cpp:228] Iteration 20300, loss = 0.0719517\nI0818 18:24:53.272509 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:24:53.272541 21603 solver.cpp:244]     Train net output #1: loss = 0.0719518 (* 1 = 0.0719518 loss)\nI0818 18:24:53.363235 21603 sgd_solver.cpp:166] Iteration 20300, lr = 0.5075\nI0818 18:25:40.497861 21603 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0818 18:26:06.559578 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75388\nI0818 18:26:06.559626 21603 solver.cpp:404]     Test net output #1: loss = 1.01199 (* 1 = 1.01199 loss)\nI0818 18:26:06.973332 21603 solver.cpp:228] Iteration 20400, loss = 0.178062\nI0818 18:26:06.973388 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:26:06.973413 21603 solver.cpp:244]     Train net output #1: loss = 0.178062 (* 1 = 0.178062 loss)\nI0818 18:26:07.064995 21603 sgd_solver.cpp:166] Iteration 20400, lr = 0.51\nI0818 18:26:54.218693 21603 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0818 18:27:20.311269 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75088\nI0818 18:27:20.311321 21603 solver.cpp:404]     Test net output #1: loss = 1.18185 (* 1 = 1.18185 loss)\nI0818 18:27:20.723276 21603 solver.cpp:228] Iteration 20500, loss = 0.09678\nI0818 18:27:20.723332 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:27:20.723357 21603 solver.cpp:244]     Train net output #1: loss = 0.0967803 (* 1 = 0.0967803 loss)\nI0818 18:27:20.814316 21603 sgd_solver.cpp:166] Iteration 20500, lr = 0.5125\nI0818 18:28:07.867398 21603 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0818 18:28:34.086252 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8052\nI0818 18:28:34.086304 21603 solver.cpp:404]     Test net output #1: loss = 0.803847 (* 1 = 0.803847 loss)\nI0818 18:28:34.499869 21603 solver.cpp:228] Iteration 20600, loss = 0.0976889\nI0818 18:28:34.499927 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:28:34.499951 21603 solver.cpp:244]     Train net output #1: loss = 0.0976891 (* 1 = 0.0976891 loss)\nI0818 18:28:34.586922 21603 sgd_solver.cpp:166] Iteration 20600, lr = 0.515\nI0818 18:29:21.757534 21603 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0818 18:29:47.850318 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79692\nI0818 18:29:47.850368 21603 solver.cpp:404]     Test net output #1: loss = 0.872526 (* 1 = 0.872526 loss)\nI0818 18:29:48.263679 21603 solver.cpp:228] Iteration 20700, loss = 0.194573\nI0818 18:29:48.263736 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:29:48.263761 21603 solver.cpp:244]     Train net output #1: loss = 0.194574 (* 1 = 0.194574 loss)\nI0818 18:29:48.347365 21603 sgd_solver.cpp:166] Iteration 20700, lr = 0.5175\nI0818 18:30:35.534600 21603 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0818 18:31:01.648542 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7596\nI0818 18:31:01.648593 21603 solver.cpp:404]     Test net output #1: loss = 1.06559 (* 1 = 1.06559 loss)\nI0818 18:31:02.061975 21603 solver.cpp:228] Iteration 20800, loss = 0.171758\nI0818 18:31:02.062032 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:31:02.062057 21603 solver.cpp:244]     Train net output #1: loss = 0.171759 (* 1 = 0.171759 loss)\nI0818 18:31:02.153470 21603 sgd_solver.cpp:166] Iteration 20800, lr = 0.52\nI0818 18:31:49.383496 21603 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0818 18:32:15.477867 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75712\nI0818 18:32:15.477926 21603 solver.cpp:404]     Test net output #1: loss = 1.25596 (* 1 = 1.25596 loss)\nI0818 18:32:15.891368 21603 solver.cpp:228] Iteration 20900, loss = 0.102183\nI0818 18:32:15.891427 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:32:15.891453 21603 solver.cpp:244]     Train net output #1: loss = 0.102183 (* 1 = 0.102183 loss)\nI0818 18:32:15.977632 21603 sgd_solver.cpp:166] Iteration 20900, lr = 0.5225\nI0818 18:33:03.172248 21603 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0818 18:33:29.262301 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78512\nI0818 18:33:29.262351 21603 solver.cpp:404]     Test net output #1: loss = 0.969376 (* 1 = 0.969376 loss)\nI0818 18:33:29.675459 21603 solver.cpp:228] Iteration 21000, loss = 0.101176\nI0818 18:33:29.675523 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:33:29.675550 21603 solver.cpp:244]     Train net output #1: loss = 0.101176 (* 1 = 0.101176 loss)\nI0818 18:33:29.769248 21603 sgd_solver.cpp:166] Iteration 21000, lr = 0.525\nI0818 18:34:16.848147 21603 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0818 18:34:42.963083 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7898\nI0818 18:34:42.963135 21603 solver.cpp:404]     Test net output #1: loss = 0.850884 (* 1 = 0.850884 loss)\nI0818 18:34:43.376564 21603 solver.cpp:228] Iteration 21100, loss = 0.176653\nI0818 18:34:43.376619 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:34:43.376644 21603 solver.cpp:244]     Train net output #1: loss = 0.176653 (* 1 = 0.176653 loss)\nI0818 18:34:43.469357 21603 sgd_solver.cpp:166] Iteration 21100, lr = 0.5275\nI0818 18:35:30.358012 21603 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0818 18:35:56.510375 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77036\nI0818 18:35:56.510427 21603 solver.cpp:404]     Test net output #1: loss = 1.09514 (* 1 = 1.09514 loss)\nI0818 18:35:56.924080 21603 solver.cpp:228] Iteration 21200, loss = 0.147475\nI0818 18:35:56.924136 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:35:56.924160 21603 solver.cpp:244]     Train net output #1: loss = 0.147475 (* 1 = 0.147475 loss)\nI0818 18:35:57.013533 21603 sgd_solver.cpp:166] Iteration 21200, lr = 0.53\nI0818 18:36:44.141086 21603 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0818 18:37:10.305604 21603 solver.cpp:404]     Test net output #0: accuracy = 0.60908\nI0818 18:37:10.305656 21603 solver.cpp:404]     Test net output #1: loss = 2.39792 (* 1 = 2.39792 loss)\nI0818 18:37:10.719606 21603 solver.cpp:228] Iteration 21300, loss = 0.121335\nI0818 18:37:10.719666 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:37:10.719689 21603 solver.cpp:244]     Train net output #1: loss = 0.121335 (* 1 = 0.121335 loss)\nI0818 18:37:10.811350 21603 sgd_solver.cpp:166] Iteration 21300, lr = 0.5325\nI0818 18:37:58.052359 21603 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0818 18:38:24.308609 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77352\nI0818 18:38:24.308660 21603 solver.cpp:404]     Test net output #1: loss = 0.887665 (* 1 = 0.887665 loss)\nI0818 18:38:24.722507 21603 solver.cpp:228] Iteration 21400, loss = 0.12237\nI0818 18:38:24.722564 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:38:24.722589 21603 solver.cpp:244]     Train net output #1: loss = 0.12237 (* 1 = 0.12237 loss)\nI0818 18:38:24.814440 21603 sgd_solver.cpp:166] Iteration 21400, lr = 0.535\nI0818 18:39:12.042027 21603 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0818 18:39:38.328696 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78104\nI0818 18:39:38.328747 21603 solver.cpp:404]     Test net output #1: loss = 0.894932 (* 1 = 0.894932 loss)\nI0818 18:39:38.742277 21603 solver.cpp:228] Iteration 21500, loss = 0.0633159\nI0818 18:39:38.742334 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:39:38.742359 21603 solver.cpp:244]     Train net output #1: loss = 0.0633161 (* 1 = 0.0633161 loss)\nI0818 18:39:38.828294 21603 sgd_solver.cpp:166] Iteration 21500, lr = 0.5375\nI0818 18:40:25.965811 21603 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0818 18:40:52.250818 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68944\nI0818 18:40:52.250869 21603 solver.cpp:404]     Test net output #1: loss = 1.67578 (* 1 = 1.67578 loss)\nI0818 18:40:52.664680 21603 solver.cpp:228] Iteration 21600, loss = 0.125977\nI0818 18:40:52.664727 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 18:40:52.664752 21603 solver.cpp:244]     Train net output #1: loss = 0.125977 (* 1 = 0.125977 loss)\nI0818 18:40:52.757269 21603 sgd_solver.cpp:166] Iteration 21600, lr = 0.54\nI0818 18:41:39.943761 21603 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0818 18:42:06.210448 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81268\nI0818 18:42:06.210515 21603 solver.cpp:404]     Test net output #1: loss = 0.738405 (* 1 = 0.738405 loss)\nI0818 18:42:06.624102 21603 solver.cpp:228] Iteration 21700, loss = 0.155523\nI0818 18:42:06.624151 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 18:42:06.624176 21603 solver.cpp:244]     Train net output #1: loss = 0.155523 (* 1 = 0.155523 loss)\nI0818 18:42:06.712397 21603 sgd_solver.cpp:166] Iteration 21700, lr = 0.5425\nI0818 18:42:53.913676 21603 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0818 18:43:20.202359 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76144\nI0818 18:43:20.202410 21603 solver.cpp:404]     Test net output #1: loss = 1.18759 (* 1 = 1.18759 loss)\nI0818 18:43:20.615720 21603 solver.cpp:228] Iteration 21800, loss = 0.124842\nI0818 18:43:20.615769 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:43:20.615794 21603 solver.cpp:244]     Train net output #1: loss = 0.124842 (* 1 = 0.124842 loss)\nI0818 18:43:20.707945 21603 sgd_solver.cpp:166] Iteration 21800, lr = 0.545\nI0818 18:44:07.869246 21603 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0818 18:44:34.155944 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77896\nI0818 18:44:34.155997 21603 solver.cpp:404]     Test net output #1: loss = 1.01363 (* 1 = 1.01363 loss)\nI0818 18:44:34.569393 21603 solver.cpp:228] Iteration 21900, loss = 0.190921\nI0818 18:44:34.569442 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:44:34.569466 21603 solver.cpp:244]     Train net output #1: loss = 0.190921 (* 1 = 0.190921 loss)\nI0818 18:44:34.658706 21603 sgd_solver.cpp:166] Iteration 21900, lr = 0.5475\nI0818 18:45:21.853806 21603 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0818 18:45:48.140169 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78268\nI0818 18:45:48.140223 21603 solver.cpp:404]     Test net output #1: loss = 0.834878 (* 1 = 0.834878 loss)\nI0818 18:45:48.553792 21603 solver.cpp:228] Iteration 22000, loss = 0.0644059\nI0818 18:45:48.553834 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:45:48.553859 21603 solver.cpp:244]     Train net output #1: loss = 0.0644062 (* 1 = 0.0644062 loss)\nI0818 18:45:48.647644 21603 sgd_solver.cpp:166] Iteration 22000, lr = 0.55\nI0818 18:46:35.900799 21603 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0818 18:47:02.184136 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75908\nI0818 18:47:02.184190 21603 solver.cpp:404]     Test net output #1: loss = 0.965578 (* 1 = 0.965578 loss)\nI0818 18:47:02.597226 21603 solver.cpp:228] Iteration 22100, loss = 0.171397\nI0818 18:47:02.597272 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:47:02.597298 21603 solver.cpp:244]     Train net output #1: loss = 0.171397 (* 1 = 0.171397 loss)\nI0818 18:47:02.681694 21603 sgd_solver.cpp:166] Iteration 22100, lr = 0.5525\nI0818 18:47:49.871078 21603 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0818 18:48:16.149739 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81196\nI0818 18:48:16.149791 21603 solver.cpp:404]     Test net output #1: loss = 0.780695 (* 1 = 0.780695 loss)\nI0818 18:48:16.561652 21603 solver.cpp:228] Iteration 22200, loss = 0.18171\nI0818 18:48:16.561699 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:48:16.561723 21603 solver.cpp:244]     Train net output #1: loss = 0.181711 (* 1 = 0.181711 loss)\nI0818 18:48:16.652326 21603 sgd_solver.cpp:166] Iteration 22200, lr = 0.555\nI0818 18:49:03.873030 21603 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0818 18:49:30.156128 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78424\nI0818 18:49:30.156179 21603 solver.cpp:404]     Test net output #1: loss = 0.946766 (* 1 = 0.946766 loss)\nI0818 18:49:30.569816 21603 solver.cpp:228] Iteration 22300, loss = 0.0657109\nI0818 18:49:30.569862 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:49:30.569887 21603 solver.cpp:244]     Train net output #1: loss = 0.0657111 (* 1 = 0.0657111 loss)\nI0818 18:49:30.663899 21603 sgd_solver.cpp:166] Iteration 22300, lr = 0.5575\nI0818 18:50:17.921725 21603 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0818 18:50:44.217264 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78412\nI0818 18:50:44.217316 21603 solver.cpp:404]     Test net output #1: loss = 0.811232 (* 1 = 0.811232 loss)\nI0818 18:50:44.629451 21603 solver.cpp:228] Iteration 22400, loss = 0.238482\nI0818 18:50:44.629501 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 18:50:44.629526 21603 solver.cpp:244]     Train net output #1: loss = 0.238482 (* 1 = 0.238482 loss)\nI0818 18:50:44.726328 21603 sgd_solver.cpp:166] Iteration 22400, lr = 0.56\nI0818 18:51:31.973065 21603 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0818 18:51:58.260551 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71604\nI0818 18:51:58.260603 21603 solver.cpp:404]     Test net output #1: loss = 1.35842 (* 1 = 1.35842 loss)\nI0818 18:51:58.672798 21603 solver.cpp:228] Iteration 22500, loss = 0.135335\nI0818 18:51:58.672844 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:51:58.672869 21603 solver.cpp:244]     Train net output #1: loss = 0.135335 (* 1 = 0.135335 loss)\nI0818 18:51:58.761247 21603 sgd_solver.cpp:166] Iteration 22500, lr = 0.5625\nI0818 18:52:45.976447 21603 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0818 18:53:12.251191 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77972\nI0818 18:53:12.251241 21603 solver.cpp:404]     Test net output #1: loss = 0.946818 (* 1 = 0.946818 loss)\nI0818 18:53:12.663326 21603 solver.cpp:228] Iteration 22600, loss = 0.168093\nI0818 18:53:12.663367 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 18:53:12.663393 21603 solver.cpp:244]     Train net output #1: loss = 0.168093 (* 1 = 0.168093 loss)\nI0818 18:53:12.753092 21603 sgd_solver.cpp:166] Iteration 22600, lr = 0.565\nI0818 18:53:59.947177 21603 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0818 18:54:26.215585 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78384\nI0818 18:54:26.215637 21603 solver.cpp:404]     Test net output #1: loss = 0.872489 (* 1 = 0.872489 loss)\nI0818 18:54:26.627647 21603 solver.cpp:228] Iteration 22700, loss = 0.0633717\nI0818 18:54:26.627692 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:54:26.627717 21603 solver.cpp:244]     Train net output #1: loss = 0.0633719 (* 1 = 0.0633719 loss)\nI0818 18:54:26.723490 21603 sgd_solver.cpp:166] Iteration 22700, lr = 0.5675\nI0818 18:55:13.851846 21603 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0818 18:55:40.034307 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78092\nI0818 18:55:40.034363 21603 solver.cpp:404]     Test net output #1: loss = 0.953264 (* 1 = 0.953264 loss)\nI0818 18:55:40.446357 21603 solver.cpp:228] Iteration 22800, loss = 0.0970021\nI0818 18:55:40.446400 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:55:40.446424 21603 solver.cpp:244]     Train net output #1: loss = 0.0970024 (* 1 = 0.0970024 loss)\nI0818 18:55:40.535835 21603 sgd_solver.cpp:166] Iteration 22800, lr = 0.57\nI0818 18:56:27.682804 21603 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0818 18:56:53.862704 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77384\nI0818 18:56:53.862758 21603 solver.cpp:404]     Test net output #1: loss = 1.0253 (* 1 = 1.0253 loss)\nI0818 18:56:54.275864 21603 solver.cpp:228] Iteration 22900, loss = 0.142488\nI0818 18:56:54.275902 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 18:56:54.275926 21603 solver.cpp:244]     Train net output #1: loss = 0.142488 (* 1 = 0.142488 loss)\nI0818 18:56:54.368080 21603 sgd_solver.cpp:166] Iteration 22900, lr = 0.5725\nI0818 18:57:41.600985 21603 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0818 18:58:07.881898 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8076\nI0818 18:58:07.881949 21603 solver.cpp:404]     Test net output #1: loss = 0.755965 (* 1 = 0.755965 loss)\nI0818 18:58:08.294046 21603 solver.cpp:228] Iteration 23000, loss = 0.233469\nI0818 18:58:08.294087 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 18:58:08.294111 21603 solver.cpp:244]     Train net output #1: loss = 0.233469 (* 1 = 0.233469 loss)\nI0818 18:58:08.389698 21603 sgd_solver.cpp:166] Iteration 23000, lr = 0.575\nI0818 18:58:55.502928 21603 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0818 18:59:21.716974 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78884\nI0818 18:59:21.717023 21603 solver.cpp:404]     Test net output #1: loss = 0.83146 (* 1 = 0.83146 loss)\nI0818 18:59:22.129004 21603 solver.cpp:228] Iteration 23100, loss = 0.137173\nI0818 18:59:22.129041 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:59:22.129065 21603 solver.cpp:244]     Train net output #1: loss = 0.137173 (* 1 = 0.137173 loss)\nI0818 18:59:22.217744 21603 sgd_solver.cpp:166] Iteration 23100, lr = 0.5775\nI0818 19:00:09.276777 21603 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0818 19:00:35.350551 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76304\nI0818 19:00:35.350603 21603 solver.cpp:404]     Test net output #1: loss = 1.06372 (* 1 = 1.06372 loss)\nI0818 19:00:35.764161 21603 solver.cpp:228] Iteration 23200, loss = 0.113986\nI0818 19:00:35.764204 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:00:35.764228 21603 solver.cpp:244]     Train net output #1: loss = 0.113987 (* 1 = 0.113987 loss)\nI0818 19:00:35.858165 21603 sgd_solver.cpp:166] Iteration 23200, lr = 0.58\nI0818 19:01:22.987455 21603 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0818 19:01:49.270644 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7832\nI0818 19:01:49.270697 21603 solver.cpp:404]     Test net output #1: loss = 0.978389 (* 1 = 0.978389 loss)\nI0818 19:01:49.684345 21603 solver.cpp:228] Iteration 23300, loss = 0.157339\nI0818 19:01:49.684391 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 19:01:49.684415 21603 solver.cpp:244]     Train net output #1: loss = 0.157339 (* 1 = 0.157339 loss)\nI0818 19:01:49.776929 21603 sgd_solver.cpp:166] Iteration 23300, lr = 0.5825\nI0818 19:02:36.999140 21603 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0818 19:03:03.277762 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77632\nI0818 19:03:03.277815 21603 solver.cpp:404]     Test net output #1: loss = 0.883416 (* 1 = 0.883416 loss)\nI0818 19:03:03.698532 21603 solver.cpp:228] Iteration 23400, loss = 0.151085\nI0818 19:03:03.698575 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:03:03.698601 21603 solver.cpp:244]     Train net output #1: loss = 0.151086 (* 1 = 0.151086 loss)\nI0818 19:03:03.778616 21603 sgd_solver.cpp:166] Iteration 23400, lr = 0.585\nI0818 19:03:51.013748 21603 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0818 19:04:17.304913 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79884\nI0818 19:04:17.304965 21603 solver.cpp:404]     Test net output #1: loss = 0.759173 (* 1 = 0.759173 loss)\nI0818 19:04:17.717000 21603 solver.cpp:228] Iteration 23500, loss = 0.141952\nI0818 19:04:17.717041 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:04:17.717066 21603 solver.cpp:244]     Train net output #1: loss = 0.141952 (* 1 = 0.141952 loss)\nI0818 19:04:17.804752 21603 sgd_solver.cpp:166] Iteration 23500, lr = 0.5875\nI0818 19:05:05.047209 21603 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0818 19:05:31.303371 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82972\nI0818 19:05:31.303423 21603 solver.cpp:404]     Test net output #1: loss = 0.623592 (* 1 = 0.623592 loss)\nI0818 19:05:31.716868 21603 solver.cpp:228] Iteration 23600, loss = 0.13649\nI0818 19:05:31.716910 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:05:31.716934 21603 solver.cpp:244]     Train net output #1: loss = 0.136491 (* 1 = 0.136491 loss)\nI0818 19:05:31.799836 21603 sgd_solver.cpp:166] Iteration 23600, lr = 0.59\nI0818 19:06:18.878991 21603 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0818 19:06:45.156162 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7794\nI0818 19:06:45.156214 21603 solver.cpp:404]     Test net output #1: loss = 0.929975 (* 1 = 0.929975 loss)\nI0818 19:06:45.569720 21603 solver.cpp:228] Iteration 23700, loss = 0.137381\nI0818 19:06:45.569762 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:06:45.569787 21603 solver.cpp:244]     Train net output #1: loss = 0.137381 (* 1 = 0.137381 loss)\nI0818 19:06:45.665572 21603 sgd_solver.cpp:166] Iteration 23700, lr = 0.5925\nI0818 19:07:32.758410 21603 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0818 19:07:59.023339 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76628\nI0818 19:07:59.023391 21603 solver.cpp:404]     Test net output #1: loss = 0.971254 (* 1 = 0.971254 loss)\nI0818 19:07:59.435689 21603 solver.cpp:228] Iteration 23800, loss = 0.140505\nI0818 19:07:59.435732 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:07:59.435756 21603 solver.cpp:244]     Train net output #1: loss = 0.140505 (* 1 = 0.140505 loss)\nI0818 19:07:59.525466 21603 sgd_solver.cpp:166] Iteration 23800, lr = 0.595\nI0818 19:08:46.627164 21603 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0818 19:09:12.915457 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75892\nI0818 19:09:12.915514 21603 solver.cpp:404]     Test net output #1: loss = 1.07889 (* 1 = 1.07889 loss)\nI0818 19:09:13.327334 21603 solver.cpp:228] Iteration 23900, loss = 0.0551971\nI0818 19:09:13.327380 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:09:13.327405 21603 solver.cpp:244]     Train net output #1: loss = 0.0551973 (* 1 = 0.0551973 loss)\nI0818 19:09:13.422111 21603 sgd_solver.cpp:166] Iteration 23900, lr = 0.5975\nI0818 19:10:00.417032 21603 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0818 19:10:26.693734 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74276\nI0818 19:10:26.693778 21603 solver.cpp:404]     Test net output #1: loss = 1.18199 (* 1 = 1.18199 loss)\nI0818 19:10:27.106750 21603 solver.cpp:228] Iteration 24000, loss = 0.0988877\nI0818 19:10:27.106793 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:10:27.106811 21603 solver.cpp:244]     Train net output #1: loss = 0.0988879 (* 1 = 0.0988879 loss)\nI0818 19:10:27.197036 21603 sgd_solver.cpp:166] Iteration 24000, lr = 0.6\nI0818 19:11:14.223201 21603 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0818 19:11:40.505108 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78192\nI0818 19:11:40.505162 21603 solver.cpp:404]     Test net output #1: loss = 0.846402 (* 1 = 0.846402 loss)\nI0818 19:11:40.918618 21603 solver.cpp:228] Iteration 24100, loss = 0.152924\nI0818 19:11:40.918659 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:11:40.918684 21603 solver.cpp:244]     Train net output #1: loss = 0.152924 (* 1 = 0.152924 loss)\nI0818 19:11:41.004951 21603 sgd_solver.cpp:166] Iteration 24100, lr = 0.6025\nI0818 19:12:27.941020 21603 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0818 19:12:54.214045 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74436\nI0818 19:12:54.214097 21603 solver.cpp:404]     Test net output #1: loss = 1.26214 (* 1 = 1.26214 loss)\nI0818 19:12:54.627485 21603 solver.cpp:228] Iteration 24200, loss = 0.102865\nI0818 19:12:54.627538 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:12:54.627563 21603 solver.cpp:244]     Train net output #1: loss = 0.102865 (* 1 = 0.102865 loss)\nI0818 19:12:54.716830 21603 sgd_solver.cpp:166] Iteration 24200, lr = 0.605\nI0818 19:13:41.918789 21603 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0818 19:14:08.191962 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78784\nI0818 19:14:08.192013 21603 solver.cpp:404]     Test net output #1: loss = 0.920427 (* 1 = 0.920427 loss)\nI0818 19:14:08.605650 21603 solver.cpp:228] Iteration 24300, loss = 0.152694\nI0818 19:14:08.605705 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:14:08.605729 21603 solver.cpp:244]     Train net output #1: loss = 0.152694 (* 1 = 0.152694 loss)\nI0818 19:14:08.697610 21603 sgd_solver.cpp:166] Iteration 24300, lr = 0.6075\nI0818 19:14:55.828341 21603 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0818 19:15:22.110862 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81248\nI0818 19:15:22.110911 21603 solver.cpp:404]     Test net output #1: loss = 0.754119 (* 1 = 0.754119 loss)\nI0818 19:15:22.524265 21603 solver.cpp:228] Iteration 24400, loss = 0.081501\nI0818 19:15:22.524322 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:15:22.524348 21603 solver.cpp:244]     Train net output #1: loss = 0.0815012 (* 1 = 0.0815012 loss)\nI0818 19:15:22.615177 21603 sgd_solver.cpp:166] Iteration 24400, lr = 0.61\nI0818 19:16:09.799772 21603 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0818 19:16:36.074040 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75988\nI0818 19:16:36.074092 21603 solver.cpp:404]     Test net output #1: loss = 0.963224 (* 1 = 0.963224 loss)\nI0818 19:16:36.486187 21603 solver.cpp:228] Iteration 24500, loss = 0.126601\nI0818 19:16:36.486232 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:16:36.486256 21603 solver.cpp:244]     Train net output #1: loss = 0.126602 (* 1 = 0.126602 loss)\nI0818 19:16:36.571596 21603 sgd_solver.cpp:166] Iteration 24500, lr = 0.6125\nI0818 19:17:23.768497 21603 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0818 19:17:49.871371 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77704\nI0818 19:17:49.871425 21603 solver.cpp:404]     Test net output #1: loss = 0.94157 (* 1 = 0.94157 loss)\nI0818 19:17:50.284142 21603 solver.cpp:228] Iteration 24600, loss = 0.208472\nI0818 19:17:50.284188 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 19:17:50.284214 21603 solver.cpp:244]     Train net output #1: loss = 0.208472 (* 1 = 0.208472 loss)\nI0818 19:17:50.381671 21603 sgd_solver.cpp:166] Iteration 24600, lr = 0.615\nI0818 19:18:37.574147 21603 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0818 19:19:03.700927 21603 solver.cpp:404]     Test net output #0: accuracy = 0.64476\nI0818 19:19:03.700978 21603 solver.cpp:404]     Test net output #1: loss = 1.90842 (* 1 = 1.90842 loss)\nI0818 19:19:04.121446 21603 solver.cpp:228] Iteration 24700, loss = 0.0888171\nI0818 19:19:04.121680 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:19:04.121783 21603 solver.cpp:244]     Train net output #1: loss = 0.0888173 (* 1 = 0.0888173 loss)\nI0818 19:19:04.202411 21603 sgd_solver.cpp:166] Iteration 24700, lr = 0.6175\nI0818 19:19:51.306659 21603 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0818 19:20:17.410759 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74572\nI0818 19:20:17.410807 21603 solver.cpp:404]     Test net output #1: loss = 1.10074 (* 1 = 1.10074 loss)\nI0818 19:20:17.831393 21603 solver.cpp:228] Iteration 24800, loss = 0.162669\nI0818 19:20:17.831629 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 19:20:17.831744 21603 solver.cpp:244]     Train net output #1: loss = 0.162669 (* 1 = 0.162669 loss)\nI0818 19:20:17.906500 21603 sgd_solver.cpp:166] Iteration 24800, lr = 0.62\nI0818 19:21:05.003777 21603 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0818 19:21:31.100323 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80356\nI0818 19:21:31.100375 21603 solver.cpp:404]     Test net output #1: loss = 0.717163 (* 1 = 0.717163 loss)\nI0818 19:21:31.512621 21603 solver.cpp:228] Iteration 24900, loss = 0.0997336\nI0818 19:21:31.512670 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:21:31.512696 21603 solver.cpp:244]     Train net output #1: loss = 0.0997338 (* 1 = 0.0997338 loss)\nI0818 19:21:31.601904 21603 sgd_solver.cpp:166] Iteration 24900, lr = 0.6225\nI0818 19:22:18.516727 21603 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0818 19:22:44.677088 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81904\nI0818 19:22:44.677137 21603 solver.cpp:404]     Test net output #1: loss = 0.670005 (* 1 = 0.670005 loss)\nI0818 19:22:45.096666 21603 solver.cpp:228] Iteration 25000, loss = 0.200513\nI0818 19:22:45.096902 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 19:22:45.097007 21603 solver.cpp:244]     Train net output #1: loss = 0.200513 (* 1 = 0.200513 loss)\nI0818 19:22:45.173683 21603 sgd_solver.cpp:166] Iteration 25000, lr = 0.625\nI0818 19:23:32.050561 21603 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0818 19:23:58.301060 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80452\nI0818 19:23:58.301115 21603 solver.cpp:404]     Test net output #1: loss = 0.766185 (* 1 = 0.766185 loss)\nI0818 19:23:58.715203 21603 solver.cpp:228] Iteration 25100, loss = 0.0720584\nI0818 19:23:58.715251 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:23:58.715276 21603 solver.cpp:244]     Train net output #1: loss = 0.0720585 (* 1 = 0.0720585 loss)\nI0818 19:23:58.803923 21603 sgd_solver.cpp:166] Iteration 25100, lr = 0.6275\nI0818 19:24:45.711477 21603 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0818 19:25:11.824090 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80348\nI0818 19:25:11.824139 21603 solver.cpp:404]     Test net output #1: loss = 0.781902 (* 1 = 0.781902 loss)\nI0818 19:25:12.237637 21603 solver.cpp:228] Iteration 25200, loss = 0.208222\nI0818 19:25:12.237684 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 19:25:12.237709 21603 solver.cpp:244]     Train net output #1: loss = 0.208222 (* 1 = 0.208222 loss)\nI0818 19:25:12.329370 21603 sgd_solver.cpp:166] Iteration 25200, lr = 0.63\nI0818 19:25:59.293071 21603 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0818 19:26:25.528825 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74984\nI0818 19:26:25.528873 21603 solver.cpp:404]     Test net output #1: loss = 1.08674 (* 1 = 1.08674 loss)\nI0818 19:26:25.942385 21603 solver.cpp:228] Iteration 25300, loss = 0.131655\nI0818 19:26:25.942431 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:26:25.942454 21603 solver.cpp:244]     Train net output #1: loss = 0.131655 (* 1 = 0.131655 loss)\nI0818 19:26:26.035565 21603 sgd_solver.cpp:166] Iteration 25300, lr = 0.6325\nI0818 19:27:13.032296 21603 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0818 19:27:39.137256 21603 solver.cpp:404]     Test net output #0: accuracy = 0.787\nI0818 19:27:39.137305 21603 solver.cpp:404]     Test net output #1: loss = 0.902224 (* 1 = 0.902224 loss)\nI0818 19:27:39.550644 21603 solver.cpp:228] Iteration 25400, loss = 0.133113\nI0818 19:27:39.550690 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:27:39.550717 21603 solver.cpp:244]     Train net output #1: loss = 0.133113 (* 1 = 0.133113 loss)\nI0818 19:27:39.639683 21603 sgd_solver.cpp:166] Iteration 25400, lr = 0.635\nI0818 19:28:26.704895 21603 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0818 19:28:52.985203 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78752\nI0818 19:28:52.985257 21603 solver.cpp:404]     Test net output #1: loss = 0.874197 (* 1 = 0.874197 loss)\nI0818 19:28:53.398805 21603 solver.cpp:228] Iteration 25500, loss = 0.132896\nI0818 19:28:53.398847 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:28:53.398871 21603 solver.cpp:244]     Train net output #1: loss = 0.132896 (* 1 = 0.132896 loss)\nI0818 19:28:53.492766 21603 sgd_solver.cpp:166] Iteration 25500, lr = 0.6375\nI0818 19:29:40.444689 21603 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0818 19:30:06.561835 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7916\nI0818 19:30:06.561880 21603 solver.cpp:404]     Test net output #1: loss = 0.789909 (* 1 = 0.789909 loss)\nI0818 19:30:06.975210 21603 solver.cpp:228] Iteration 25600, loss = 0.13986\nI0818 19:30:06.975252 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:30:06.975277 21603 solver.cpp:244]     Train net output #1: loss = 0.13986 (* 1 = 0.13986 loss)\nI0818 19:30:07.067596 21603 sgd_solver.cpp:166] Iteration 25600, lr = 0.64\nI0818 19:30:54.000074 21603 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0818 19:31:20.246368 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77864\nI0818 19:31:20.246423 21603 solver.cpp:404]     Test net output #1: loss = 0.906682 (* 1 = 0.906682 loss)\nI0818 19:31:20.660199 21603 solver.cpp:228] Iteration 25700, loss = 0.157712\nI0818 19:31:20.660243 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:31:20.660269 21603 solver.cpp:244]     Train net output #1: loss = 0.157712 (* 1 = 0.157712 loss)\nI0818 19:31:20.757028 21603 sgd_solver.cpp:166] Iteration 25700, lr = 0.6425\nI0818 19:32:07.698179 21603 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0818 19:32:33.989709 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78796\nI0818 19:32:33.989761 21603 solver.cpp:404]     Test net output #1: loss = 0.854956 (* 1 = 0.854956 loss)\nI0818 19:32:34.403156 21603 solver.cpp:228] Iteration 25800, loss = 0.175934\nI0818 19:32:34.403201 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 19:32:34.403224 21603 solver.cpp:244]     Train net output #1: loss = 0.175934 (* 1 = 0.175934 loss)\nI0818 19:32:34.498049 21603 sgd_solver.cpp:166] Iteration 25800, lr = 0.645\nI0818 19:33:21.455817 21603 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0818 19:33:47.620477 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80948\nI0818 19:33:47.620537 21603 solver.cpp:404]     Test net output #1: loss = 0.706933 (* 1 = 0.706933 loss)\nI0818 19:33:48.033938 21603 solver.cpp:228] Iteration 25900, loss = 0.114156\nI0818 19:33:48.033979 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:33:48.034003 21603 solver.cpp:244]     Train net output #1: loss = 0.114157 (* 1 = 0.114157 loss)\nI0818 19:33:48.124351 21603 sgd_solver.cpp:166] Iteration 25900, lr = 0.6475\nI0818 19:34:35.093986 21603 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0818 19:35:01.387703 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74304\nI0818 19:35:01.387755 21603 solver.cpp:404]     Test net output #1: loss = 1.07955 (* 1 = 1.07955 loss)\nI0818 19:35:01.801367 21603 solver.cpp:228] Iteration 26000, loss = 0.109411\nI0818 19:35:01.801411 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:35:01.801436 21603 solver.cpp:244]     Train net output #1: loss = 0.109411 (* 1 = 0.109411 loss)\nI0818 19:35:01.890169 21603 sgd_solver.cpp:166] Iteration 26000, lr = 0.65\nI0818 19:35:48.823329 21603 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0818 19:36:15.085602 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78928\nI0818 19:36:15.085664 21603 solver.cpp:404]     Test net output #1: loss = 0.8999 (* 1 = 0.8999 loss)\nI0818 19:36:15.497997 21603 solver.cpp:228] Iteration 26100, loss = 0.0684314\nI0818 19:36:15.498040 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:36:15.498065 21603 solver.cpp:244]     Train net output #1: loss = 0.0684316 (* 1 = 0.0684316 loss)\nI0818 19:36:15.587555 21603 sgd_solver.cpp:166] Iteration 26100, lr = 0.6525\nI0818 19:37:02.520490 21603 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0818 19:37:28.804388 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75248\nI0818 19:37:28.804442 21603 solver.cpp:404]     Test net output #1: loss = 0.971588 (* 1 = 0.971588 loss)\nI0818 19:37:29.216792 21603 solver.cpp:228] Iteration 26200, loss = 0.056032\nI0818 19:37:29.216835 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:37:29.216859 21603 solver.cpp:244]     Train net output #1: loss = 0.0560322 (* 1 = 0.0560322 loss)\nI0818 19:37:29.306704 21603 sgd_solver.cpp:166] Iteration 26200, lr = 0.655\nI0818 19:38:16.174495 21603 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0818 19:38:42.165725 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76756\nI0818 19:38:42.165784 21603 solver.cpp:404]     Test net output #1: loss = 1.1065 (* 1 = 1.1065 loss)\nI0818 19:38:42.576918 21603 solver.cpp:228] Iteration 26300, loss = 0.136444\nI0818 19:38:42.576966 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:38:42.576983 21603 solver.cpp:244]     Train net output #1: loss = 0.136444 (* 1 = 0.136444 loss)\nI0818 19:38:42.663426 21603 sgd_solver.cpp:166] Iteration 26300, lr = 0.6575\nI0818 19:39:29.480998 21603 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0818 19:39:55.475037 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7606\nI0818 19:39:55.475093 21603 solver.cpp:404]     Test net output #1: loss = 1.11336 (* 1 = 1.11336 loss)\nI0818 19:39:55.886461 21603 solver.cpp:228] Iteration 26400, loss = 0.141264\nI0818 19:39:55.886505 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:39:55.886523 21603 solver.cpp:244]     Train net output #1: loss = 0.141264 (* 1 = 0.141264 loss)\nI0818 19:39:55.975543 21603 sgd_solver.cpp:166] Iteration 26400, lr = 0.66\nI0818 19:40:42.812566 21603 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0818 19:41:08.804447 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80976\nI0818 19:41:08.804505 21603 solver.cpp:404]     Test net output #1: loss = 0.724134 (* 1 = 0.724134 loss)\nI0818 19:41:09.215351 21603 solver.cpp:228] Iteration 26500, loss = 0.191379\nI0818 19:41:09.215397 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 19:41:09.215414 21603 solver.cpp:244]     Train net output #1: loss = 0.191379 (* 1 = 0.191379 loss)\nI0818 19:41:09.306123 21603 sgd_solver.cpp:166] Iteration 26500, lr = 0.6625\nI0818 19:41:56.361047 21603 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0818 19:42:22.523576 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80744\nI0818 19:42:22.523633 21603 solver.cpp:404]     Test net output #1: loss = 0.773099 (* 1 = 0.773099 loss)\nI0818 19:42:22.944579 21603 solver.cpp:228] Iteration 26600, loss = 0.164572\nI0818 19:42:22.944640 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 19:42:22.944674 21603 solver.cpp:244]     Train net output #1: loss = 0.164572 (* 1 = 0.164572 loss)\nI0818 19:42:23.029595 21603 sgd_solver.cpp:166] Iteration 26600, lr = 0.665\nI0818 19:43:10.486507 21603 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0818 19:43:36.591377 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82304\nI0818 19:43:36.591421 21603 solver.cpp:404]     Test net output #1: loss = 0.689233 (* 1 = 0.689233 loss)\nI0818 19:43:37.004633 21603 solver.cpp:228] Iteration 26700, loss = 0.161097\nI0818 19:43:37.004673 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:43:37.004688 21603 solver.cpp:244]     Train net output #1: loss = 0.161097 (* 1 = 0.161097 loss)\nI0818 19:43:37.091308 21603 sgd_solver.cpp:166] Iteration 26700, lr = 0.6675\nI0818 19:44:24.574990 21603 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0818 19:44:50.689601 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75444\nI0818 19:44:50.689646 21603 solver.cpp:404]     Test net output #1: loss = 1.13403 (* 1 = 1.13403 loss)\nI0818 19:44:51.102138 21603 solver.cpp:228] Iteration 26800, loss = 0.171756\nI0818 19:44:51.102171 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:44:51.102187 21603 solver.cpp:244]     Train net output #1: loss = 0.171756 (* 1 = 0.171756 loss)\nI0818 19:44:51.198174 21603 sgd_solver.cpp:166] Iteration 26800, lr = 0.67\nI0818 19:45:38.639230 21603 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0818 19:46:04.731530 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8182\nI0818 19:46:04.731575 21603 solver.cpp:404]     Test net output #1: loss = 0.724944 (* 1 = 0.724944 loss)\nI0818 19:46:05.144904 21603 solver.cpp:228] Iteration 26900, loss = 0.118858\nI0818 19:46:05.144942 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 19:46:05.144958 21603 solver.cpp:244]     Train net output #1: loss = 0.118858 (* 1 = 0.118858 loss)\nI0818 19:46:05.228988 21603 sgd_solver.cpp:166] Iteration 26900, lr = 0.6725\nI0818 19:46:52.678850 21603 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0818 19:47:18.825408 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7982\nI0818 19:47:18.825455 21603 solver.cpp:404]     Test net output #1: loss = 0.762603 (* 1 = 0.762603 loss)\nI0818 19:47:19.238925 21603 solver.cpp:228] Iteration 27000, loss = 0.198358\nI0818 19:47:19.238962 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:47:19.238978 21603 solver.cpp:244]     Train net output #1: loss = 0.198358 (* 1 = 0.198358 loss)\nI0818 19:47:19.333000 21603 sgd_solver.cpp:166] Iteration 27000, lr = 0.675\nI0818 19:48:06.696332 21603 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0818 19:48:32.787469 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7294\nI0818 19:48:32.787518 21603 solver.cpp:404]     Test net output #1: loss = 1.27581 (* 1 = 1.27581 loss)\nI0818 19:48:33.199800 21603 solver.cpp:228] Iteration 27100, loss = 0.170506\nI0818 19:48:33.199834 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 19:48:33.199849 21603 solver.cpp:244]     Train net output #1: loss = 0.170506 (* 1 = 0.170506 loss)\nI0818 19:48:33.289489 21603 sgd_solver.cpp:166] Iteration 27100, lr = 0.6775\nI0818 19:49:20.707562 21603 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0818 19:49:46.802631 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80304\nI0818 19:49:46.802677 21603 solver.cpp:404]     Test net output #1: loss = 0.864868 (* 1 = 0.864868 loss)\nI0818 19:49:47.216655 21603 solver.cpp:228] Iteration 27200, loss = 0.135002\nI0818 19:49:47.216689 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:49:47.216704 21603 solver.cpp:244]     Train net output #1: loss = 0.135002 (* 1 = 0.135002 loss)\nI0818 19:49:47.308454 21603 sgd_solver.cpp:166] Iteration 27200, lr = 0.68\nI0818 19:50:34.723718 21603 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0818 19:51:00.868155 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8286\nI0818 19:51:00.868199 21603 solver.cpp:404]     Test net output #1: loss = 0.665638 (* 1 = 0.665638 loss)\nI0818 19:51:01.281874 21603 solver.cpp:228] Iteration 27300, loss = 0.104693\nI0818 19:51:01.281919 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 19:51:01.281935 21603 solver.cpp:244]     Train net output #1: loss = 0.104693 (* 1 = 0.104693 loss)\nI0818 19:51:01.375236 21603 sgd_solver.cpp:166] Iteration 27300, lr = 0.6825\nI0818 19:51:48.609786 21603 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0818 19:52:14.820523 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77896\nI0818 19:52:14.820580 21603 solver.cpp:404]     Test net output #1: loss = 0.905105 (* 1 = 0.905105 loss)\nI0818 19:52:15.234230 21603 solver.cpp:228] Iteration 27400, loss = 0.153376\nI0818 19:52:15.234282 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 19:52:15.234308 21603 solver.cpp:244]     Train net output #1: loss = 0.153376 (* 1 = 0.153376 loss)\nI0818 19:52:15.321125 21603 sgd_solver.cpp:166] Iteration 27400, lr = 0.685\nI0818 19:53:02.589059 21603 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0818 19:53:28.876663 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78012\nI0818 19:53:28.876718 21603 solver.cpp:404]     Test net output #1: loss = 0.927738 (* 1 = 0.927738 loss)\nI0818 19:53:29.297655 21603 solver.cpp:228] Iteration 27500, loss = 0.152989\nI0818 19:53:29.297703 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:53:29.297720 21603 solver.cpp:244]     Train net output #1: loss = 0.152989 (* 1 = 0.152989 loss)\nI0818 19:53:29.379951 21603 sgd_solver.cpp:166] Iteration 27500, lr = 0.6875\nI0818 19:54:16.607283 21603 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0818 19:54:42.879940 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75652\nI0818 19:54:42.879987 21603 solver.cpp:404]     Test net output #1: loss = 1.10893 (* 1 = 1.10893 loss)\nI0818 19:54:43.293345 21603 solver.cpp:228] Iteration 27600, loss = 0.127739\nI0818 19:54:43.293393 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:54:43.293409 21603 solver.cpp:244]     Train net output #1: loss = 0.12774 (* 1 = 0.12774 loss)\nI0818 19:54:43.384727 21603 sgd_solver.cpp:166] Iteration 27600, lr = 0.69\nI0818 19:55:30.641342 21603 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0818 19:55:56.908581 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81844\nI0818 19:55:56.908627 21603 solver.cpp:404]     Test net output #1: loss = 0.729045 (* 1 = 0.729045 loss)\nI0818 19:55:57.321768 21603 solver.cpp:228] Iteration 27700, loss = 0.0749741\nI0818 19:55:57.321820 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:55:57.321837 21603 solver.cpp:244]     Train net output #1: loss = 0.0749743 (* 1 = 0.0749743 loss)\nI0818 19:55:57.415004 21603 sgd_solver.cpp:166] Iteration 27700, lr = 0.6925\nI0818 19:56:44.634266 21603 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0818 19:57:10.776759 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73372\nI0818 19:57:10.776804 21603 solver.cpp:404]     Test net output #1: loss = 1.16627 (* 1 = 1.16627 loss)\nI0818 19:57:11.190484 21603 solver.cpp:228] Iteration 27800, loss = 0.135945\nI0818 19:57:11.190536 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:57:11.190551 21603 solver.cpp:244]     Train net output #1: loss = 0.135945 (* 1 = 0.135945 loss)\nI0818 19:57:11.286710 21603 sgd_solver.cpp:166] Iteration 27800, lr = 0.695\nI0818 19:57:58.697130 21603 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0818 19:58:24.784200 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7872\nI0818 19:58:24.784243 21603 solver.cpp:404]     Test net output #1: loss = 0.841344 (* 1 = 0.841344 loss)\nI0818 19:58:25.198017 21603 solver.cpp:228] Iteration 27900, loss = 0.126783\nI0818 19:58:25.198065 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 19:58:25.198081 21603 solver.cpp:244]     Train net output #1: loss = 0.126783 (* 1 = 0.126783 loss)\nI0818 19:58:25.285842 21603 sgd_solver.cpp:166] Iteration 27900, lr = 0.6975\nI0818 19:59:12.527820 21603 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0818 19:59:38.646315 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82396\nI0818 19:59:38.646361 21603 solver.cpp:404]     Test net output #1: loss = 0.671052 (* 1 = 0.671052 loss)\nI0818 19:59:39.059684 21603 solver.cpp:228] Iteration 28000, loss = 0.136142\nI0818 19:59:39.059737 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:59:39.059754 21603 solver.cpp:244]     Train net output #1: loss = 0.136142 (* 1 = 0.136142 loss)\nI0818 19:59:39.148648 21603 sgd_solver.cpp:166] Iteration 28000, lr = 0.7\nI0818 20:00:26.243336 21603 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0818 20:00:52.412068 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70848\nI0818 20:00:52.412116 21603 solver.cpp:404]     Test net output #1: loss = 1.33704 (* 1 = 1.33704 loss)\nI0818 20:00:52.825683 21603 solver.cpp:228] Iteration 28100, loss = 0.0748654\nI0818 20:00:52.825736 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:00:52.825753 21603 solver.cpp:244]     Train net output #1: loss = 0.0748656 (* 1 = 0.0748656 loss)\nI0818 20:00:52.911692 21603 sgd_solver.cpp:166] Iteration 28100, lr = 0.7025\nI0818 20:01:40.128262 21603 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0818 20:02:06.304224 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80896\nI0818 20:02:06.304272 21603 solver.cpp:404]     Test net output #1: loss = 0.886523 (* 1 = 0.886523 loss)\nI0818 20:02:06.716405 21603 solver.cpp:228] Iteration 28200, loss = 0.16173\nI0818 20:02:06.716454 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:02:06.716469 21603 solver.cpp:244]     Train net output #1: loss = 0.161731 (* 1 = 0.161731 loss)\nI0818 20:02:06.805480 21603 sgd_solver.cpp:166] Iteration 28200, lr = 0.705\nI0818 20:02:54.100035 21603 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0818 20:03:20.263869 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81092\nI0818 20:03:20.263913 21603 solver.cpp:404]     Test net output #1: loss = 0.705514 (* 1 = 0.705514 loss)\nI0818 20:03:20.677565 21603 solver.cpp:228] Iteration 28300, loss = 0.122894\nI0818 20:03:20.677620 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:03:20.677636 21603 solver.cpp:244]     Train net output #1: loss = 0.122894 (* 1 = 0.122894 loss)\nI0818 20:03:20.772339 21603 sgd_solver.cpp:166] Iteration 28300, lr = 0.7075\nI0818 20:04:08.237720 21603 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0818 20:04:34.320451 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80464\nI0818 20:04:34.320502 21603 solver.cpp:404]     Test net output #1: loss = 0.766596 (* 1 = 0.766596 loss)\nI0818 20:04:34.734036 21603 solver.cpp:228] Iteration 28400, loss = 0.221127\nI0818 20:04:34.734091 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 20:04:34.734107 21603 solver.cpp:244]     Train net output #1: loss = 0.221128 (* 1 = 0.221128 loss)\nI0818 20:04:34.828943 21603 sgd_solver.cpp:166] Iteration 28400, lr = 0.71\nI0818 20:05:22.247678 21603 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0818 20:05:48.505762 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8026\nI0818 20:05:48.505817 21603 solver.cpp:404]     Test net output #1: loss = 0.836979 (* 1 = 0.836979 loss)\nI0818 20:05:48.917984 21603 solver.cpp:228] Iteration 28500, loss = 0.0810348\nI0818 20:05:48.918021 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:05:48.918036 21603 solver.cpp:244]     Train net output #1: loss = 0.081035 (* 1 = 0.081035 loss)\nI0818 20:05:49.013358 21603 sgd_solver.cpp:166] Iteration 28500, lr = 0.7125\nI0818 20:06:36.364785 21603 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0818 20:07:02.652088 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7978\nI0818 20:07:02.652140 21603 solver.cpp:404]     Test net output #1: loss = 0.8356 (* 1 = 0.8356 loss)\nI0818 20:07:03.064404 21603 solver.cpp:228] Iteration 28600, loss = 0.215061\nI0818 20:07:03.064445 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 20:07:03.064460 21603 solver.cpp:244]     Train net output #1: loss = 0.215061 (* 1 = 0.215061 loss)\nI0818 20:07:03.154940 21603 sgd_solver.cpp:166] Iteration 28600, lr = 0.715\nI0818 20:07:50.589987 21603 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0818 20:08:16.866179 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66716\nI0818 20:08:16.866232 21603 solver.cpp:404]     Test net output #1: loss = 1.62595 (* 1 = 1.62595 loss)\nI0818 20:08:17.278280 21603 solver.cpp:228] Iteration 28700, loss = 0.157342\nI0818 20:08:17.278323 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:08:17.278339 21603 solver.cpp:244]     Train net output #1: loss = 0.157342 (* 1 = 0.157342 loss)\nI0818 20:08:17.372894 21603 sgd_solver.cpp:166] Iteration 28700, lr = 0.7175\nI0818 20:09:04.819576 21603 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0818 20:09:31.104084 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77612\nI0818 20:09:31.104137 21603 solver.cpp:404]     Test net output #1: loss = 0.979329 (* 1 = 0.979329 loss)\nI0818 20:09:31.516134 21603 solver.cpp:228] Iteration 28800, loss = 0.162035\nI0818 20:09:31.516175 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:09:31.516191 21603 solver.cpp:244]     Train net output #1: loss = 0.162035 (* 1 = 0.162035 loss)\nI0818 20:09:31.610066 21603 sgd_solver.cpp:166] Iteration 28800, lr = 0.72\nI0818 20:10:19.007369 21603 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0818 20:10:45.309342 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71484\nI0818 20:10:45.309396 21603 solver.cpp:404]     Test net output #1: loss = 1.55525 (* 1 = 1.55525 loss)\nI0818 20:10:45.721781 21603 solver.cpp:228] Iteration 28900, loss = 0.103517\nI0818 20:10:45.721820 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:10:45.721837 21603 solver.cpp:244]     Train net output #1: loss = 0.103517 (* 1 = 0.103517 loss)\nI0818 20:10:45.814970 21603 sgd_solver.cpp:166] Iteration 28900, lr = 0.7225\nI0818 20:11:33.241384 21603 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0818 20:11:59.536746 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72996\nI0818 20:11:59.536811 21603 solver.cpp:404]     Test net output #1: loss = 1.06811 (* 1 = 1.06811 loss)\nI0818 20:11:59.948771 21603 solver.cpp:228] Iteration 29000, loss = 0.106234\nI0818 20:11:59.948822 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:11:59.948838 21603 solver.cpp:244]     Train net output #1: loss = 0.106234 (* 1 = 0.106234 loss)\nI0818 20:12:00.039976 21603 sgd_solver.cpp:166] Iteration 29000, lr = 0.725\nI0818 20:12:47.433411 21603 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0818 20:13:13.733466 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80956\nI0818 20:13:13.733533 21603 solver.cpp:404]     Test net output #1: loss = 0.768813 (* 1 = 0.768813 loss)\nI0818 20:13:14.145462 21603 solver.cpp:228] Iteration 29100, loss = 0.200197\nI0818 20:13:14.145509 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 20:13:14.145526 21603 solver.cpp:244]     Train net output #1: loss = 0.200197 (* 1 = 0.200197 loss)\nI0818 20:13:14.237603 21603 sgd_solver.cpp:166] Iteration 29100, lr = 0.7275\nI0818 20:14:01.611157 21603 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0818 20:14:27.915659 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77024\nI0818 20:14:27.915725 21603 solver.cpp:404]     Test net output #1: loss = 0.930809 (* 1 = 0.930809 loss)\nI0818 20:14:28.327950 21603 solver.cpp:228] Iteration 29200, loss = 0.156462\nI0818 20:14:28.327999 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:14:28.328017 21603 solver.cpp:244]     Train net output #1: loss = 0.156463 (* 1 = 0.156463 loss)\nI0818 20:14:28.412938 21603 sgd_solver.cpp:166] Iteration 29200, lr = 0.73\nI0818 20:15:15.761090 21603 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0818 20:15:42.066872 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7078\nI0818 20:15:42.066941 21603 solver.cpp:404]     Test net output #1: loss = 1.50988 (* 1 = 1.50988 loss)\nI0818 20:15:42.480499 21603 solver.cpp:228] Iteration 29300, loss = 0.108168\nI0818 20:15:42.480548 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:15:42.480564 21603 solver.cpp:244]     Train net output #1: loss = 0.108168 (* 1 = 0.108168 loss)\nI0818 20:15:42.570034 21603 sgd_solver.cpp:166] Iteration 29300, lr = 0.7325\nI0818 20:16:29.928494 21603 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0818 20:16:56.187644 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76908\nI0818 20:16:56.187691 21603 solver.cpp:404]     Test net output #1: loss = 0.927193 (* 1 = 0.927193 loss)\nI0818 20:16:56.601617 21603 solver.cpp:228] Iteration 29400, loss = 0.0718599\nI0818 20:16:56.601670 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:16:56.601687 21603 solver.cpp:244]     Train net output #1: loss = 0.0718602 (* 1 = 0.0718602 loss)\nI0818 20:16:56.685674 21603 sgd_solver.cpp:166] Iteration 29400, lr = 0.735\nI0818 20:17:44.057361 21603 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0818 20:18:10.317903 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80632\nI0818 20:18:10.317948 21603 solver.cpp:404]     Test net output #1: loss = 0.715795 (* 1 = 0.715795 loss)\nI0818 20:18:10.731312 21603 solver.cpp:228] Iteration 29500, loss = 0.126803\nI0818 20:18:10.731364 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:18:10.731381 21603 solver.cpp:244]     Train net output #1: loss = 0.126803 (* 1 = 0.126803 loss)\nI0818 20:18:10.824117 21603 sgd_solver.cpp:166] Iteration 29500, lr = 0.7375\nI0818 20:18:58.115630 21603 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0818 20:19:24.236647 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7988\nI0818 20:19:24.236693 21603 solver.cpp:404]     Test net output #1: loss = 0.81776 (* 1 = 0.81776 loss)\nI0818 20:19:24.650403 21603 solver.cpp:228] Iteration 29600, loss = 0.117746\nI0818 20:19:24.650440 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:19:24.650457 21603 solver.cpp:244]     Train net output #1: loss = 0.117746 (* 1 = 0.117746 loss)\nI0818 20:19:24.739984 21603 sgd_solver.cpp:166] Iteration 29600, lr = 0.74\nI0818 20:20:12.121832 21603 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0818 20:20:38.235388 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70212\nI0818 20:20:38.235437 21603 solver.cpp:404]     Test net output #1: loss = 1.50646 (* 1 = 1.50646 loss)\nI0818 20:20:38.656512 21603 solver.cpp:228] Iteration 29700, loss = 0.197365\nI0818 20:20:38.656564 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:20:38.656589 21603 solver.cpp:244]     Train net output #1: loss = 0.197365 (* 1 = 0.197365 loss)\nI0818 20:20:38.751509 21603 sgd_solver.cpp:166] Iteration 29700, lr = 0.7425\nI0818 20:21:26.132823 21603 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0818 20:21:52.220458 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81692\nI0818 20:21:52.220508 21603 solver.cpp:404]     Test net output #1: loss = 0.691082 (* 1 = 0.691082 loss)\nI0818 20:21:52.632956 21603 solver.cpp:228] Iteration 29800, loss = 0.118356\nI0818 20:21:52.633002 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:21:52.633025 21603 solver.cpp:244]     Train net output #1: loss = 0.118356 (* 1 = 0.118356 loss)\nI0818 20:21:52.722184 21603 sgd_solver.cpp:166] Iteration 29800, lr = 0.745\nI0818 20:22:40.171969 21603 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0818 20:23:06.258601 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8184\nI0818 20:23:06.258647 21603 solver.cpp:404]     Test net output #1: loss = 0.68666 (* 1 = 0.68666 loss)\nI0818 20:23:06.672129 21603 solver.cpp:228] Iteration 29900, loss = 0.112609\nI0818 20:23:06.672174 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:23:06.672189 21603 solver.cpp:244]     Train net output #1: loss = 0.112609 (* 1 = 0.112609 loss)\nI0818 20:23:06.764808 21603 sgd_solver.cpp:166] Iteration 29900, lr = 0.7475\nI0818 20:23:54.153787 21603 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0818 20:24:20.238816 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0818 20:24:20.238862 21603 solver.cpp:404]     Test net output #1: loss = 0.753315 (* 1 = 0.753315 loss)\nI0818 20:24:20.652266 21603 solver.cpp:228] Iteration 30000, loss = 0.103733\nI0818 20:24:20.652315 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:24:20.652333 21603 solver.cpp:244]     Train net output #1: loss = 0.103733 (* 1 = 0.103733 loss)\nI0818 20:24:20.738276 21603 sgd_solver.cpp:166] Iteration 30000, lr = 0.75\nI0818 20:25:08.013510 21603 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0818 20:25:34.100500 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77592\nI0818 20:25:34.100543 21603 solver.cpp:404]     Test net output #1: loss = 0.97508 (* 1 = 0.97508 loss)\nI0818 20:25:34.514374 21603 solver.cpp:228] Iteration 30100, loss = 0.128466\nI0818 20:25:34.514416 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:25:34.514433 21603 solver.cpp:244]     Train net output #1: loss = 0.128467 (* 1 = 0.128467 loss)\nI0818 20:25:34.600234 21603 sgd_solver.cpp:166] Iteration 30100, lr = 0.7525\nI0818 20:26:21.951823 21603 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0818 20:26:48.053647 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78424\nI0818 20:26:48.053691 21603 solver.cpp:404]     Test net output #1: loss = 0.912883 (* 1 = 0.912883 loss)\nI0818 20:26:48.466864 21603 solver.cpp:228] Iteration 30200, loss = 0.139449\nI0818 20:26:48.466912 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:26:48.466928 21603 solver.cpp:244]     Train net output #1: loss = 0.139449 (* 1 = 0.139449 loss)\nI0818 20:26:48.551381 21603 sgd_solver.cpp:166] Iteration 30200, lr = 0.755\nI0818 20:27:35.897042 21603 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0818 20:28:01.979722 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79392\nI0818 20:28:01.979765 21603 solver.cpp:404]     Test net output #1: loss = 0.854251 (* 1 = 0.854251 loss)\nI0818 20:28:02.393003 21603 solver.cpp:228] Iteration 30300, loss = 0.140608\nI0818 20:28:02.393050 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:28:02.393067 21603 solver.cpp:244]     Train net output #1: loss = 0.140608 (* 1 = 0.140608 loss)\nI0818 20:28:02.479568 21603 sgd_solver.cpp:166] Iteration 30300, lr = 0.7575\nI0818 20:28:49.734544 21603 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0818 20:29:15.819216 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71304\nI0818 20:29:15.819258 21603 solver.cpp:404]     Test net output #1: loss = 1.2982 (* 1 = 1.2982 loss)\nI0818 20:29:16.232852 21603 solver.cpp:228] Iteration 30400, loss = 0.141664\nI0818 20:29:16.232895 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:29:16.232913 21603 solver.cpp:244]     Train net output #1: loss = 0.141665 (* 1 = 0.141665 loss)\nI0818 20:29:16.317554 21603 sgd_solver.cpp:166] Iteration 30400, lr = 0.76\nI0818 20:30:03.590387 21603 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0818 20:30:29.671742 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8192\nI0818 20:30:29.671785 21603 solver.cpp:404]     Test net output #1: loss = 0.705608 (* 1 = 0.705608 loss)\nI0818 20:30:30.084049 21603 solver.cpp:228] Iteration 30500, loss = 0.107972\nI0818 20:30:30.084094 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:30:30.084110 21603 solver.cpp:244]     Train net output #1: loss = 0.107972 (* 1 = 0.107972 loss)\nI0818 20:30:30.178004 21603 sgd_solver.cpp:166] Iteration 30500, lr = 0.7625\nI0818 20:31:17.489011 21603 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0818 20:31:43.558399 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79816\nI0818 20:31:43.558441 21603 solver.cpp:404]     Test net output #1: loss = 0.832103 (* 1 = 0.832103 loss)\nI0818 20:31:43.970417 21603 solver.cpp:228] Iteration 30600, loss = 0.100688\nI0818 20:31:43.970461 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:31:43.970476 21603 solver.cpp:244]     Train net output #1: loss = 0.100688 (* 1 = 0.100688 loss)\nI0818 20:31:44.055794 21603 sgd_solver.cpp:166] Iteration 30600, lr = 0.765\nI0818 20:32:31.319130 21603 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0818 20:32:57.405369 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78976\nI0818 20:32:57.405408 21603 solver.cpp:404]     Test net output #1: loss = 0.862371 (* 1 = 0.862371 loss)\nI0818 20:32:57.819027 21603 solver.cpp:228] Iteration 30700, loss = 0.162056\nI0818 20:32:57.819070 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:32:57.819087 21603 solver.cpp:244]     Train net output #1: loss = 0.162057 (* 1 = 0.162057 loss)\nI0818 20:32:57.907778 21603 sgd_solver.cpp:166] Iteration 30700, lr = 0.7675\nI0818 20:33:45.192149 21603 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0818 20:34:11.313241 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76264\nI0818 20:34:11.313283 21603 solver.cpp:404]     Test net output #1: loss = 1.06443 (* 1 = 1.06443 loss)\nI0818 20:34:11.727126 21603 solver.cpp:228] Iteration 30800, loss = 0.153748\nI0818 20:34:11.727169 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:34:11.727186 21603 solver.cpp:244]     Train net output #1: loss = 0.153748 (* 1 = 0.153748 loss)\nI0818 20:34:11.814860 21603 sgd_solver.cpp:166] Iteration 30800, lr = 0.77\nI0818 20:34:59.118500 21603 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0818 20:35:25.191400 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7988\nI0818 20:35:25.191442 21603 solver.cpp:404]     Test net output #1: loss = 0.801663 (* 1 = 0.801663 loss)\nI0818 20:35:25.604748 21603 solver.cpp:228] Iteration 30900, loss = 0.0820988\nI0818 20:35:25.604795 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:35:25.604812 21603 solver.cpp:244]     Train net output #1: loss = 0.0820991 (* 1 = 0.0820991 loss)\nI0818 20:35:25.693177 21603 sgd_solver.cpp:166] Iteration 30900, lr = 0.7725\nI0818 20:36:12.950433 21603 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0818 20:36:39.057150 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76564\nI0818 20:36:39.057193 21603 solver.cpp:404]     Test net output #1: loss = 0.990232 (* 1 = 0.990232 loss)\nI0818 20:36:39.470671 21603 solver.cpp:228] Iteration 31000, loss = 0.121358\nI0818 20:36:39.470719 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:36:39.470736 21603 solver.cpp:244]     Train net output #1: loss = 0.121358 (* 1 = 0.121358 loss)\nI0818 20:36:39.558271 21603 sgd_solver.cpp:166] Iteration 31000, lr = 0.775\nI0818 20:37:26.823601 21603 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0818 20:37:52.921495 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68692\nI0818 20:37:52.921540 21603 solver.cpp:404]     Test net output #1: loss = 1.58894 (* 1 = 1.58894 loss)\nI0818 20:37:53.335208 21603 solver.cpp:228] Iteration 31100, loss = 0.136301\nI0818 20:37:53.335256 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:37:53.335273 21603 solver.cpp:244]     Train net output #1: loss = 0.136302 (* 1 = 0.136302 loss)\nI0818 20:37:53.426121 21603 sgd_solver.cpp:166] Iteration 31100, lr = 0.7775\nI0818 20:38:40.651881 21603 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0818 20:39:06.744879 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81024\nI0818 20:39:06.744922 21603 solver.cpp:404]     Test net output #1: loss = 0.70449 (* 1 = 0.70449 loss)\nI0818 20:39:07.158290 21603 solver.cpp:228] Iteration 31200, loss = 0.117168\nI0818 20:39:07.158344 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:39:07.158361 21603 solver.cpp:244]     Train net output #1: loss = 0.117168 (* 1 = 0.117168 loss)\nI0818 20:39:07.254511 21603 sgd_solver.cpp:166] Iteration 31200, lr = 0.78\nI0818 20:39:54.545127 21603 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0818 20:40:20.814961 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76832\nI0818 20:40:20.815007 21603 solver.cpp:404]     Test net output #1: loss = 1.02211 (* 1 = 1.02211 loss)\nI0818 20:40:21.229231 21603 solver.cpp:228] Iteration 31300, loss = 0.15987\nI0818 20:40:21.229287 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:40:21.229311 21603 solver.cpp:244]     Train net output #1: loss = 0.15987 (* 1 = 0.15987 loss)\nI0818 20:40:21.325985 21603 sgd_solver.cpp:166] Iteration 31300, lr = 0.7825\nI0818 20:41:08.549093 21603 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0818 20:41:34.718515 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76316\nI0818 20:41:34.718560 21603 solver.cpp:404]     Test net output #1: loss = 1.05592 (* 1 = 1.05592 loss)\nI0818 20:41:35.130842 21603 solver.cpp:228] Iteration 31400, loss = 0.103321\nI0818 20:41:35.130885 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:41:35.130901 21603 solver.cpp:244]     Train net output #1: loss = 0.103321 (* 1 = 0.103321 loss)\nI0818 20:41:35.226814 21603 sgd_solver.cpp:166] Iteration 31400, lr = 0.785\nI0818 20:42:22.570188 21603 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0818 20:42:48.678601 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83412\nI0818 20:42:48.678644 21603 solver.cpp:404]     Test net output #1: loss = 0.596347 (* 1 = 0.596347 loss)\nI0818 20:42:49.090925 21603 solver.cpp:228] Iteration 31500, loss = 0.0967624\nI0818 20:42:49.090970 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:42:49.090987 21603 solver.cpp:244]     Train net output #1: loss = 0.0967628 (* 1 = 0.0967628 loss)\nI0818 20:42:49.180716 21603 sgd_solver.cpp:166] Iteration 31500, lr = 0.7875\nI0818 20:43:36.540271 21603 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0818 20:44:02.631660 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79892\nI0818 20:44:02.631706 21603 solver.cpp:404]     Test net output #1: loss = 0.818166 (* 1 = 0.818166 loss)\nI0818 20:44:03.045269 21603 solver.cpp:228] Iteration 31600, loss = 0.0955581\nI0818 20:44:03.045317 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:44:03.045336 21603 solver.cpp:244]     Train net output #1: loss = 0.0955585 (* 1 = 0.0955585 loss)\nI0818 20:44:03.137962 21603 sgd_solver.cpp:166] Iteration 31600, lr = 0.79\nI0818 20:44:50.522605 21603 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0818 20:45:16.632705 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8104\nI0818 20:45:16.632748 21603 solver.cpp:404]     Test net output #1: loss = 0.734863 (* 1 = 0.734863 loss)\nI0818 20:45:17.046087 21603 solver.cpp:228] Iteration 31700, loss = 0.123999\nI0818 20:45:17.046135 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:45:17.046152 21603 solver.cpp:244]     Train net output #1: loss = 0.123999 (* 1 = 0.123999 loss)\nI0818 20:45:17.132199 21603 sgd_solver.cpp:166] Iteration 31700, lr = 0.7925\nI0818 20:46:04.548473 21603 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0818 20:46:30.658463 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74548\nI0818 20:46:30.658509 21603 solver.cpp:404]     Test net output #1: loss = 1.11639 (* 1 = 1.11639 loss)\nI0818 20:46:31.071959 21603 solver.cpp:228] Iteration 31800, loss = 0.110071\nI0818 20:46:31.071997 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:46:31.072013 21603 solver.cpp:244]     Train net output #1: loss = 0.110071 (* 1 = 0.110071 loss)\nI0818 20:46:31.161489 21603 sgd_solver.cpp:166] Iteration 31800, lr = 0.795\nI0818 20:47:18.656512 21603 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0818 20:47:44.763588 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80548\nI0818 20:47:44.763638 21603 solver.cpp:404]     Test net output #1: loss = 0.730223 (* 1 = 0.730223 loss)\nI0818 20:47:45.176471 21603 solver.cpp:228] Iteration 31900, loss = 0.163254\nI0818 20:47:45.176513 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:47:45.176538 21603 solver.cpp:244]     Train net output #1: loss = 0.163254 (* 1 = 0.163254 loss)\nI0818 20:47:45.264428 21603 sgd_solver.cpp:166] Iteration 31900, lr = 0.7975\nI0818 20:48:32.740198 21603 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0818 20:48:58.839318 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73112\nI0818 20:48:58.839362 21603 solver.cpp:404]     Test net output #1: loss = 1.3103 (* 1 = 1.3103 loss)\nI0818 20:48:59.252496 21603 solver.cpp:228] Iteration 32000, loss = 0.153632\nI0818 20:48:59.252537 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:48:59.252552 21603 solver.cpp:244]     Train net output #1: loss = 0.153633 (* 1 = 0.153633 loss)\nI0818 20:48:59.339058 21603 sgd_solver.cpp:166] Iteration 32000, lr = 0.8\nI0818 20:49:46.792659 21603 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0818 20:50:12.890990 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81988\nI0818 20:50:12.891031 21603 solver.cpp:404]     Test net output #1: loss = 0.744693 (* 1 = 0.744693 loss)\nI0818 20:50:13.303175 21603 solver.cpp:228] Iteration 32100, loss = 0.200031\nI0818 20:50:13.303218 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 20:50:13.303234 21603 solver.cpp:244]     Train net output #1: loss = 0.200031 (* 1 = 0.200031 loss)\nI0818 20:50:13.390663 21603 sgd_solver.cpp:166] Iteration 32100, lr = 0.8025\nI0818 20:51:00.894304 21603 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0818 20:51:26.991905 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7788\nI0818 20:51:26.991948 21603 solver.cpp:404]     Test net output #1: loss = 0.970075 (* 1 = 0.970075 loss)\nI0818 20:51:27.403918 21603 solver.cpp:228] Iteration 32200, loss = 0.167082\nI0818 20:51:27.403956 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:51:27.403972 21603 solver.cpp:244]     Train net output #1: loss = 0.167082 (* 1 = 0.167082 loss)\nI0818 20:51:27.490643 21603 sgd_solver.cpp:166] Iteration 32200, lr = 0.805\nI0818 20:52:14.920081 21603 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0818 20:52:41.098691 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70812\nI0818 20:52:41.098738 21603 solver.cpp:404]     Test net output #1: loss = 1.61769 (* 1 = 1.61769 loss)\nI0818 20:52:41.511031 21603 solver.cpp:228] Iteration 32300, loss = 0.0779076\nI0818 20:52:41.511071 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:52:41.511086 21603 solver.cpp:244]     Train net output #1: loss = 0.077908 (* 1 = 0.077908 loss)\nI0818 20:52:41.604696 21603 sgd_solver.cpp:166] Iteration 32300, lr = 0.8075\nI0818 20:53:29.059470 21603 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0818 20:53:55.209460 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80644\nI0818 20:53:55.209506 21603 solver.cpp:404]     Test net output #1: loss = 0.794881 (* 1 = 0.794881 loss)\nI0818 20:53:55.621695 21603 solver.cpp:228] Iteration 32400, loss = 0.127789\nI0818 20:53:55.621733 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:53:55.621748 21603 solver.cpp:244]     Train net output #1: loss = 0.127789 (* 1 = 0.127789 loss)\nI0818 20:53:55.713887 21603 sgd_solver.cpp:166] Iteration 32400, lr = 0.81\nI0818 20:54:43.176378 21603 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0818 20:55:09.288528 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78716\nI0818 20:55:09.288574 21603 solver.cpp:404]     Test net output #1: loss = 0.842461 (* 1 = 0.842461 loss)\nI0818 20:55:09.700734 21603 solver.cpp:228] Iteration 32500, loss = 0.136288\nI0818 20:55:09.700775 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:55:09.700790 21603 solver.cpp:244]     Train net output #1: loss = 0.136288 (* 1 = 0.136288 loss)\nI0818 20:55:09.790102 21603 sgd_solver.cpp:166] Iteration 32500, lr = 0.8125\nI0818 20:55:57.327559 21603 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0818 20:56:23.437814 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82704\nI0818 20:56:23.437857 21603 solver.cpp:404]     Test net output #1: loss = 0.649557 (* 1 = 0.649557 loss)\nI0818 20:56:23.850221 21603 solver.cpp:228] Iteration 32600, loss = 0.147174\nI0818 20:56:23.850263 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:56:23.850280 21603 solver.cpp:244]     Train net output #1: loss = 0.147175 (* 1 = 0.147175 loss)\nI0818 20:56:23.945394 21603 sgd_solver.cpp:166] Iteration 32600, lr = 0.815\nI0818 20:57:11.418647 21603 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0818 20:57:37.508874 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI0818 20:57:37.508918 21603 solver.cpp:404]     Test net output #1: loss = 0.888036 (* 1 = 0.888036 loss)\nI0818 20:57:37.921109 21603 solver.cpp:228] Iteration 32700, loss = 0.0936878\nI0818 20:57:37.921140 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:57:37.921155 21603 solver.cpp:244]     Train net output #1: loss = 0.0936882 (* 1 = 0.0936882 loss)\nI0818 20:57:38.015254 21603 sgd_solver.cpp:166] Iteration 32700, lr = 0.8175\nI0818 20:58:25.450539 21603 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0818 20:58:51.539086 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78576\nI0818 20:58:51.539129 21603 solver.cpp:404]     Test net output #1: loss = 0.85489 (* 1 = 0.85489 loss)\nI0818 20:58:51.951076 21603 solver.cpp:228] Iteration 32800, loss = 0.151329\nI0818 20:58:51.951113 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:58:51.951129 21603 solver.cpp:244]     Train net output #1: loss = 0.15133 (* 1 = 0.15133 loss)\nI0818 20:58:52.052368 21603 sgd_solver.cpp:166] Iteration 32800, lr = 0.82\nI0818 20:59:39.498659 21603 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0818 21:00:05.612745 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75172\nI0818 21:00:05.612787 21603 solver.cpp:404]     Test net output #1: loss = 1.15019 (* 1 = 1.15019 loss)\nI0818 21:00:06.024751 21603 solver.cpp:228] Iteration 32900, loss = 0.0889422\nI0818 21:00:06.024788 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:00:06.024804 21603 solver.cpp:244]     Train net output #1: loss = 0.0889425 (* 1 = 0.0889425 loss)\nI0818 21:00:06.119626 21603 sgd_solver.cpp:166] Iteration 32900, lr = 0.8225\nI0818 21:00:53.620048 21603 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0818 21:01:19.746768 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83436\nI0818 21:01:19.746810 21603 solver.cpp:404]     Test net output #1: loss = 0.615096 (* 1 = 0.615096 loss)\nI0818 21:01:20.158931 21603 solver.cpp:228] Iteration 33000, loss = 0.15473\nI0818 21:01:20.158967 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 21:01:20.158982 21603 solver.cpp:244]     Train net output #1: loss = 0.154731 (* 1 = 0.154731 loss)\nI0818 21:01:20.247964 21603 sgd_solver.cpp:166] Iteration 33000, lr = 0.825\nI0818 21:02:07.714453 21603 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0818 21:02:33.802320 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76184\nI0818 21:02:33.802367 21603 solver.cpp:404]     Test net output #1: loss = 0.989834 (* 1 = 0.989834 loss)\nI0818 21:02:34.214807 21603 solver.cpp:228] Iteration 33100, loss = 0.118359\nI0818 21:02:34.214840 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:02:34.214856 21603 solver.cpp:244]     Train net output #1: loss = 0.11836 (* 1 = 0.11836 loss)\nI0818 21:02:34.304195 21603 sgd_solver.cpp:166] Iteration 33100, lr = 0.8275\nI0818 21:03:21.769472 21603 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0818 21:03:47.854256 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78868\nI0818 21:03:47.854300 21603 solver.cpp:404]     Test net output #1: loss = 0.865664 (* 1 = 0.865664 loss)\nI0818 21:03:48.266554 21603 solver.cpp:228] Iteration 33200, loss = 0.143419\nI0818 21:03:48.266598 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:03:48.266615 21603 solver.cpp:244]     Train net output #1: loss = 0.143419 (* 1 = 0.143419 loss)\nI0818 21:03:48.356984 21603 sgd_solver.cpp:166] Iteration 33200, lr = 0.83\nI0818 21:04:35.774776 21603 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0818 21:05:01.904472 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77008\nI0818 21:05:01.904525 21603 solver.cpp:404]     Test net output #1: loss = 0.925916 (* 1 = 0.925916 loss)\nI0818 21:05:02.317687 21603 solver.cpp:228] Iteration 33300, loss = 0.135093\nI0818 21:05:02.317736 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:05:02.317754 21603 solver.cpp:244]     Train net output #1: loss = 0.135094 (* 1 = 0.135094 loss)\nI0818 21:05:02.410992 21603 sgd_solver.cpp:166] Iteration 33300, lr = 0.8325\nI0818 21:05:49.902482 21603 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0818 21:06:16.000246 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6688\nI0818 21:06:16.000289 21603 solver.cpp:404]     Test net output #1: loss = 1.47487 (* 1 = 1.47487 loss)\nI0818 21:06:16.413996 21603 solver.cpp:228] Iteration 33400, loss = 0.183643\nI0818 21:06:16.414047 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:06:16.414072 21603 solver.cpp:244]     Train net output #1: loss = 0.183644 (* 1 = 0.183644 loss)\nI0818 21:06:16.503044 21603 sgd_solver.cpp:166] Iteration 33400, lr = 0.835\nI0818 21:07:03.714951 21603 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0818 21:07:29.946733 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81588\nI0818 21:07:29.946786 21603 solver.cpp:404]     Test net output #1: loss = 0.760114 (* 1 = 0.760114 loss)\nI0818 21:07:30.360306 21603 solver.cpp:228] Iteration 33500, loss = 0.223572\nI0818 21:07:30.360364 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 21:07:30.360389 21603 solver.cpp:244]     Train net output #1: loss = 0.223572 (* 1 = 0.223572 loss)\nI0818 21:07:30.448209 21603 sgd_solver.cpp:166] Iteration 33500, lr = 0.8375\nI0818 21:08:17.603349 21603 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0818 21:08:43.895162 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70804\nI0818 21:08:43.895215 21603 solver.cpp:404]     Test net output #1: loss = 1.35632 (* 1 = 1.35632 loss)\nI0818 21:08:44.308540 21603 solver.cpp:228] Iteration 33600, loss = 0.143613\nI0818 21:08:44.308598 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:08:44.308621 21603 solver.cpp:244]     Train net output #1: loss = 0.143613 (* 1 = 0.143613 loss)\nI0818 21:08:44.401239 21603 sgd_solver.cpp:166] Iteration 33600, lr = 0.84\nI0818 21:09:31.690599 21603 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0818 21:09:57.974834 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70916\nI0818 21:09:57.974884 21603 solver.cpp:404]     Test net output #1: loss = 1.46131 (* 1 = 1.46131 loss)\nI0818 21:09:58.387959 21603 solver.cpp:228] Iteration 33700, loss = 0.155593\nI0818 21:09:58.388017 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:09:58.388042 21603 solver.cpp:244]     Train net output #1: loss = 0.155594 (* 1 = 0.155594 loss)\nI0818 21:09:58.475628 21603 sgd_solver.cpp:166] Iteration 33700, lr = 0.8425\nI0818 21:10:45.766352 21603 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0818 21:11:12.024839 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66332\nI0818 21:11:12.024891 21603 solver.cpp:404]     Test net output #1: loss = 1.89741 (* 1 = 1.89741 loss)\nI0818 21:11:12.438377 21603 solver.cpp:228] Iteration 33800, loss = 0.14768\nI0818 21:11:12.438436 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:11:12.438460 21603 solver.cpp:244]     Train net output #1: loss = 0.14768 (* 1 = 0.14768 loss)\nI0818 21:11:12.530313 21603 sgd_solver.cpp:166] Iteration 33800, lr = 0.845\nI0818 21:11:59.794443 21603 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0818 21:12:26.022380 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7956\nI0818 21:12:26.022435 21603 solver.cpp:404]     Test net output #1: loss = 0.741602 (* 1 = 0.741602 loss)\nI0818 21:12:26.434299 21603 solver.cpp:228] Iteration 33900, loss = 0.111854\nI0818 21:12:26.434356 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:12:26.434381 21603 solver.cpp:244]     Train net output #1: loss = 0.111854 (* 1 = 0.111854 loss)\nI0818 21:12:26.523123 21603 sgd_solver.cpp:166] Iteration 33900, lr = 0.8475\nI0818 21:13:13.907074 21603 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0818 21:13:40.197330 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76532\nI0818 21:13:40.197391 21603 solver.cpp:404]     Test net output #1: loss = 1.09753 (* 1 = 1.09753 loss)\nI0818 21:13:40.611006 21603 solver.cpp:228] Iteration 34000, loss = 0.23708\nI0818 21:13:40.611063 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 21:13:40.611088 21603 solver.cpp:244]     Train net output #1: loss = 0.23708 (* 1 = 0.23708 loss)\nI0818 21:13:40.701750 21603 sgd_solver.cpp:166] Iteration 34000, lr = 0.85\nI0818 21:14:28.029206 21603 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0818 21:14:54.288534 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80416\nI0818 21:14:54.288588 21603 solver.cpp:404]     Test net output #1: loss = 0.743775 (* 1 = 0.743775 loss)\nI0818 21:14:54.700917 21603 solver.cpp:228] Iteration 34100, loss = 0.138726\nI0818 21:14:54.700973 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:14:54.700999 21603 solver.cpp:244]     Train net output #1: loss = 0.138726 (* 1 = 0.138726 loss)\nI0818 21:14:54.788069 21603 sgd_solver.cpp:166] Iteration 34100, lr = 0.8525\nI0818 21:15:42.001739 21603 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0818 21:16:08.181694 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81604\nI0818 21:16:08.181742 21603 solver.cpp:404]     Test net output #1: loss = 0.677916 (* 1 = 0.677916 loss)\nI0818 21:16:08.595397 21603 solver.cpp:228] Iteration 34200, loss = 0.10318\nI0818 21:16:08.595453 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:16:08.595479 21603 solver.cpp:244]     Train net output #1: loss = 0.103181 (* 1 = 0.103181 loss)\nI0818 21:16:08.680409 21603 sgd_solver.cpp:166] Iteration 34200, lr = 0.855\nI0818 21:16:56.093966 21603 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0818 21:17:22.286495 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75708\nI0818 21:17:22.286574 21603 solver.cpp:404]     Test net output #1: loss = 0.929492 (* 1 = 0.929492 loss)\nI0818 21:17:22.699848 21603 solver.cpp:228] Iteration 34300, loss = 0.194989\nI0818 21:17:22.699888 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 21:17:22.699913 21603 solver.cpp:244]     Train net output #1: loss = 0.194989 (* 1 = 0.194989 loss)\nI0818 21:17:22.793217 21603 sgd_solver.cpp:166] Iteration 34300, lr = 0.8575\nI0818 21:18:10.272701 21603 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0818 21:18:36.602672 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78624\nI0818 21:18:36.602746 21603 solver.cpp:404]     Test net output #1: loss = 0.835641 (* 1 = 0.835641 loss)\nI0818 21:18:37.016445 21603 solver.cpp:228] Iteration 34400, loss = 0.223006\nI0818 21:18:37.016484 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 21:18:37.016508 21603 solver.cpp:244]     Train net output #1: loss = 0.223007 (* 1 = 0.223007 loss)\nI0818 21:18:37.103471 21603 sgd_solver.cpp:166] Iteration 34400, lr = 0.86\nI0818 21:19:24.444998 21603 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0818 21:19:50.778690 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74804\nI0818 21:19:50.778764 21603 solver.cpp:404]     Test net output #1: loss = 1.12563 (* 1 = 1.12563 loss)\nI0818 21:19:51.192152 21603 solver.cpp:228] Iteration 34500, loss = 0.192323\nI0818 21:19:51.192188 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 21:19:51.192214 21603 solver.cpp:244]     Train net output #1: loss = 0.192323 (* 1 = 0.192323 loss)\nI0818 21:19:51.286782 21603 sgd_solver.cpp:166] Iteration 34500, lr = 0.8625\nI0818 21:20:38.659889 21603 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0818 21:21:04.999230 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82552\nI0818 21:21:04.999306 21603 solver.cpp:404]     Test net output #1: loss = 0.651771 (* 1 = 0.651771 loss)\nI0818 21:21:05.411583 21603 solver.cpp:228] Iteration 34600, loss = 0.138762\nI0818 21:21:05.411619 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:21:05.411645 21603 solver.cpp:244]     Train net output #1: loss = 0.138763 (* 1 = 0.138763 loss)\nI0818 21:21:05.507213 21603 sgd_solver.cpp:166] Iteration 34600, lr = 0.865\nI0818 21:21:52.916193 21603 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0818 21:22:19.200891 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83508\nI0818 21:22:19.200953 21603 solver.cpp:404]     Test net output #1: loss = 0.606335 (* 1 = 0.606335 loss)\nI0818 21:22:19.613039 21603 solver.cpp:228] Iteration 34700, loss = 0.201102\nI0818 21:22:19.613080 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:22:19.613095 21603 solver.cpp:244]     Train net output #1: loss = 0.201102 (* 1 = 0.201102 loss)\nI0818 21:22:19.710003 21603 sgd_solver.cpp:166] Iteration 34700, lr = 0.8675\nI0818 21:23:06.985913 21603 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0818 21:23:33.277843 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76956\nI0818 21:23:33.277909 21603 solver.cpp:404]     Test net output #1: loss = 0.93315 (* 1 = 0.93315 loss)\nI0818 21:23:33.689822 21603 solver.cpp:228] Iteration 34800, loss = 0.108892\nI0818 21:23:33.689862 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:23:33.689878 21603 solver.cpp:244]     Train net output #1: loss = 0.108892 (* 1 = 0.108892 loss)\nI0818 21:23:33.785291 21603 sgd_solver.cpp:166] Iteration 34800, lr = 0.87\nI0818 21:24:21.199709 21603 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0818 21:24:47.490674 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76164\nI0818 21:24:47.490737 21603 solver.cpp:404]     Test net output #1: loss = 1.07023 (* 1 = 1.07023 loss)\nI0818 21:24:47.902842 21603 solver.cpp:228] Iteration 34900, loss = 0.171046\nI0818 21:24:47.902882 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:24:47.902899 21603 solver.cpp:244]     Train net output #1: loss = 0.171047 (* 1 = 0.171047 loss)\nI0818 21:24:47.999276 21603 sgd_solver.cpp:166] Iteration 34900, lr = 0.8725\nI0818 21:25:35.449972 21603 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0818 21:26:01.534435 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80184\nI0818 21:26:01.534497 21603 solver.cpp:404]     Test net output #1: loss = 0.749001 (* 1 = 0.749001 loss)\nI0818 21:26:01.946692 21603 solver.cpp:228] Iteration 35000, loss = 0.230743\nI0818 21:26:01.946741 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 21:26:01.946758 21603 solver.cpp:244]     Train net output #1: loss = 0.230743 (* 1 = 0.230743 loss)\nI0818 21:26:02.038923 21603 sgd_solver.cpp:166] Iteration 35000, lr = 0.875\nI0818 21:26:48.886369 21603 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0818 21:27:14.889195 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6766\nI0818 21:27:14.889237 21603 solver.cpp:404]     Test net output #1: loss = 1.57023 (* 1 = 1.57023 loss)\nI0818 21:27:15.301475 21603 solver.cpp:228] Iteration 35100, loss = 0.153348\nI0818 21:27:15.301520 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 21:27:15.301537 21603 solver.cpp:244]     Train net output #1: loss = 0.153349 (* 1 = 0.153349 loss)\nI0818 21:27:15.388847 21603 sgd_solver.cpp:166] Iteration 35100, lr = 0.8775\nI0818 21:28:02.180086 21603 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0818 21:28:28.185966 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7556\nI0818 21:28:28.186007 21603 solver.cpp:404]     Test net output #1: loss = 1.03911 (* 1 = 1.03911 loss)\nI0818 21:28:28.597172 21603 solver.cpp:228] Iteration 35200, loss = 0.0643273\nI0818 21:28:28.597218 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:28:28.597235 21603 solver.cpp:244]     Train net output #1: loss = 0.0643278 (* 1 = 0.0643278 loss)\nI0818 21:28:28.691843 21603 sgd_solver.cpp:166] Iteration 35200, lr = 0.88\nI0818 21:29:15.451184 21603 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0818 21:29:41.455168 21603 solver.cpp:404]     Test net output #0: accuracy = 0.65304\nI0818 21:29:41.455209 21603 solver.cpp:404]     Test net output #1: loss = 1.87394 (* 1 = 1.87394 loss)\nI0818 21:29:41.867816 21603 solver.cpp:228] Iteration 35300, loss = 0.151034\nI0818 21:29:41.867862 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:29:41.867879 21603 solver.cpp:244]     Train net output #1: loss = 0.151034 (* 1 = 0.151034 loss)\nI0818 21:29:41.956420 21603 sgd_solver.cpp:166] Iteration 35300, lr = 0.8825\nI0818 21:30:28.780877 21603 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0818 21:30:54.784890 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75456\nI0818 21:30:54.784930 21603 solver.cpp:404]     Test net output #1: loss = 1.05333 (* 1 = 1.05333 loss)\nI0818 21:30:55.197300 21603 solver.cpp:228] Iteration 35400, loss = 0.154041\nI0818 21:30:55.197348 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:30:55.197365 21603 solver.cpp:244]     Train net output #1: loss = 0.154042 (* 1 = 0.154042 loss)\nI0818 21:30:55.282662 21603 sgd_solver.cpp:166] Iteration 35400, lr = 0.885\nI0818 21:31:42.077520 21603 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0818 21:32:08.087632 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8216\nI0818 21:32:08.087682 21603 solver.cpp:404]     Test net output #1: loss = 0.716337 (* 1 = 0.716337 loss)\nI0818 21:32:08.499706 21603 solver.cpp:228] Iteration 35500, loss = 0.162392\nI0818 21:32:08.499755 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 21:32:08.499773 21603 solver.cpp:244]     Train net output #1: loss = 0.162392 (* 1 = 0.162392 loss)\nI0818 21:32:08.588318 21603 sgd_solver.cpp:166] Iteration 35500, lr = 0.8875\nI0818 21:32:55.495509 21603 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0818 21:33:21.793516 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7942\nI0818 21:33:21.793583 21603 solver.cpp:404]     Test net output #1: loss = 0.75451 (* 1 = 0.75451 loss)\nI0818 21:33:22.205452 21603 solver.cpp:228] Iteration 35600, loss = 0.20469\nI0818 21:33:22.205493 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 21:33:22.205509 21603 solver.cpp:244]     Train net output #1: loss = 0.20469 (* 1 = 0.20469 loss)\nI0818 21:33:22.304700 21603 sgd_solver.cpp:166] Iteration 35600, lr = 0.89\nI0818 21:34:09.252746 21603 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0818 21:34:35.549924 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79672\nI0818 21:34:35.549988 21603 solver.cpp:404]     Test net output #1: loss = 0.712663 (* 1 = 0.712663 loss)\nI0818 21:34:35.962074 21603 solver.cpp:228] Iteration 35700, loss = 0.156\nI0818 21:34:35.962116 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:34:35.962131 21603 solver.cpp:244]     Train net output #1: loss = 0.156 (* 1 = 0.156 loss)\nI0818 21:34:36.054982 21603 sgd_solver.cpp:166] Iteration 35700, lr = 0.8925\nI0818 21:35:22.935421 21603 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0818 21:35:49.231647 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78956\nI0818 21:35:49.231714 21603 solver.cpp:404]     Test net output #1: loss = 0.904467 (* 1 = 0.904467 loss)\nI0818 21:35:49.645267 21603 solver.cpp:228] Iteration 35800, loss = 0.130192\nI0818 21:35:49.645308 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:35:49.645323 21603 solver.cpp:244]     Train net output #1: loss = 0.130193 (* 1 = 0.130193 loss)\nI0818 21:35:49.736039 21603 sgd_solver.cpp:166] Iteration 35800, lr = 0.895\nI0818 21:36:36.640491 21603 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0818 21:37:02.936305 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8018\nI0818 21:37:02.936369 21603 solver.cpp:404]     Test net output #1: loss = 0.785066 (* 1 = 0.785066 loss)\nI0818 21:37:03.349640 21603 solver.cpp:228] Iteration 35900, loss = 0.1225\nI0818 21:37:03.349681 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:37:03.349699 21603 solver.cpp:244]     Train net output #1: loss = 0.122501 (* 1 = 0.122501 loss)\nI0818 21:37:03.441944 21603 sgd_solver.cpp:166] Iteration 35900, lr = 0.8975\nI0818 21:37:50.307268 21603 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0818 21:38:16.596873 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79544\nI0818 21:38:16.596940 21603 solver.cpp:404]     Test net output #1: loss = 0.878121 (* 1 = 0.878121 loss)\nI0818 21:38:17.010380 21603 solver.cpp:228] Iteration 36000, loss = 0.149417\nI0818 21:38:17.010421 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:38:17.010437 21603 solver.cpp:244]     Train net output #1: loss = 0.149418 (* 1 = 0.149418 loss)\nI0818 21:38:17.095751 21603 sgd_solver.cpp:166] Iteration 36000, lr = 0.9\nI0818 21:39:03.965818 21603 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0818 21:39:30.255458 21603 solver.cpp:404]     Test net output #0: accuracy = 0.64724\nI0818 21:39:30.255527 21603 solver.cpp:404]     Test net output #1: loss = 1.94349 (* 1 = 1.94349 loss)\nI0818 21:39:30.669397 21603 solver.cpp:228] Iteration 36100, loss = 0.231362\nI0818 21:39:30.669437 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 21:39:30.669453 21603 solver.cpp:244]     Train net output #1: loss = 0.231362 (* 1 = 0.231362 loss)\nI0818 21:39:30.761160 21603 sgd_solver.cpp:166] Iteration 36100, lr = 0.9025\nI0818 21:40:17.629377 21603 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0818 21:40:43.918313 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66388\nI0818 21:40:43.918381 21603 solver.cpp:404]     Test net output #1: loss = 1.72176 (* 1 = 1.72176 loss)\nI0818 21:40:44.332067 21603 solver.cpp:228] Iteration 36200, loss = 0.165227\nI0818 21:40:44.332108 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:40:44.332124 21603 solver.cpp:244]     Train net output #1: loss = 0.165227 (* 1 = 0.165227 loss)\nI0818 21:40:44.416620 21603 sgd_solver.cpp:166] Iteration 36200, lr = 0.905\nI0818 21:41:31.327322 21603 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0818 21:41:57.621712 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75996\nI0818 21:41:57.621778 21603 solver.cpp:404]     Test net output #1: loss = 1.00979 (* 1 = 1.00979 loss)\nI0818 21:41:58.035320 21603 solver.cpp:228] Iteration 36300, loss = 0.111586\nI0818 21:41:58.035370 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:41:58.035388 21603 solver.cpp:244]     Train net output #1: loss = 0.111587 (* 1 = 0.111587 loss)\nI0818 21:41:58.119266 21603 sgd_solver.cpp:166] Iteration 36300, lr = 0.9075\nI0818 21:42:45.060361 21603 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0818 21:43:11.357260 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77084\nI0818 21:43:11.357328 21603 solver.cpp:404]     Test net output #1: loss = 0.851948 (* 1 = 0.851948 loss)\nI0818 21:43:11.770920 21603 solver.cpp:228] Iteration 36400, loss = 0.123801\nI0818 21:43:11.770968 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:43:11.770987 21603 solver.cpp:244]     Train net output #1: loss = 0.123802 (* 1 = 0.123802 loss)\nI0818 21:43:11.857002 21603 sgd_solver.cpp:166] Iteration 36400, lr = 0.91\nI0818 21:43:58.754127 21603 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0818 21:44:25.044343 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75324\nI0818 21:44:25.044411 21603 solver.cpp:404]     Test net output #1: loss = 1.03413 (* 1 = 1.03413 loss)\nI0818 21:44:25.457551 21603 solver.cpp:228] Iteration 36500, loss = 0.20184\nI0818 21:44:25.457605 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 21:44:25.457623 21603 solver.cpp:244]     Train net output #1: loss = 0.201841 (* 1 = 0.201841 loss)\nI0818 21:44:25.544948 21603 sgd_solver.cpp:166] Iteration 36500, lr = 0.9125\nI0818 21:45:12.470717 21603 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0818 21:45:38.762048 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72368\nI0818 21:45:38.762123 21603 solver.cpp:404]     Test net output #1: loss = 1.29377 (* 1 = 1.29377 loss)\nI0818 21:45:39.175736 21603 solver.cpp:228] Iteration 36600, loss = 0.251535\nI0818 21:45:39.175786 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 21:45:39.175802 21603 solver.cpp:244]     Train net output #1: loss = 0.251535 (* 1 = 0.251535 loss)\nI0818 21:45:39.265209 21603 sgd_solver.cpp:166] Iteration 36600, lr = 0.915\nI0818 21:46:26.113837 21603 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0818 21:46:52.396993 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8094\nI0818 21:46:52.397060 21603 solver.cpp:404]     Test net output #1: loss = 0.739967 (* 1 = 0.739967 loss)\nI0818 21:46:52.810748 21603 solver.cpp:228] Iteration 36700, loss = 0.160468\nI0818 21:46:52.810794 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:46:52.810811 21603 solver.cpp:244]     Train net output #1: loss = 0.160468 (* 1 = 0.160468 loss)\nI0818 21:46:52.894237 21603 sgd_solver.cpp:166] Iteration 36700, lr = 0.9175\nI0818 21:47:39.853957 21603 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0818 21:48:06.132071 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7694\nI0818 21:48:06.132114 21603 solver.cpp:404]     Test net output #1: loss = 0.900075 (* 1 = 0.900075 loss)\nI0818 21:48:06.544358 21603 solver.cpp:228] Iteration 36800, loss = 0.193206\nI0818 21:48:06.544407 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:48:06.544423 21603 solver.cpp:244]     Train net output #1: loss = 0.193206 (* 1 = 0.193206 loss)\nI0818 21:48:06.628631 21603 sgd_solver.cpp:166] Iteration 36800, lr = 0.92\nI0818 21:48:53.635372 21603 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0818 21:49:19.919306 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79416\nI0818 21:49:19.919366 21603 solver.cpp:404]     Test net output #1: loss = 0.794381 (* 1 = 0.794381 loss)\nI0818 21:49:20.332794 21603 solver.cpp:228] Iteration 36900, loss = 0.128352\nI0818 21:49:20.332845 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:49:20.332870 21603 solver.cpp:244]     Train net output #1: loss = 0.128353 (* 1 = 0.128353 loss)\nI0818 21:49:20.423344 21603 sgd_solver.cpp:166] Iteration 36900, lr = 0.9225\nI0818 21:50:07.402856 21603 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0818 21:50:33.569656 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69376\nI0818 21:50:33.569700 21603 solver.cpp:404]     Test net output #1: loss = 1.46846 (* 1 = 1.46846 loss)\nI0818 21:50:33.981916 21603 solver.cpp:228] Iteration 37000, loss = 0.19806\nI0818 21:50:33.981966 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 21:50:33.981983 21603 solver.cpp:244]     Train net output #1: loss = 0.198061 (* 1 = 0.198061 loss)\nI0818 21:50:34.064748 21603 sgd_solver.cpp:166] Iteration 37000, lr = 0.925\nI0818 21:51:21.159453 21603 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0818 21:51:47.317802 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75612\nI0818 21:51:47.317848 21603 solver.cpp:404]     Test net output #1: loss = 1.06807 (* 1 = 1.06807 loss)\nI0818 21:51:47.731423 21603 solver.cpp:228] Iteration 37100, loss = 0.188093\nI0818 21:51:47.731472 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:51:47.731489 21603 solver.cpp:244]     Train net output #1: loss = 0.188094 (* 1 = 0.188094 loss)\nI0818 21:51:47.824503 21603 sgd_solver.cpp:166] Iteration 37100, lr = 0.9275\nI0818 21:52:34.996950 21603 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0818 21:53:01.115545 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83228\nI0818 21:53:01.115592 21603 solver.cpp:404]     Test net output #1: loss = 0.639045 (* 1 = 0.639045 loss)\nI0818 21:53:01.529212 21603 solver.cpp:228] Iteration 37200, loss = 0.2064\nI0818 21:53:01.529263 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 21:53:01.529278 21603 solver.cpp:244]     Train net output #1: loss = 0.2064 (* 1 = 0.2064 loss)\nI0818 21:53:01.618381 21603 sgd_solver.cpp:166] Iteration 37200, lr = 0.93\nI0818 21:53:48.841930 21603 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0818 21:54:15.058578 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8154\nI0818 21:54:15.058624 21603 solver.cpp:404]     Test net output #1: loss = 0.678059 (* 1 = 0.678059 loss)\nI0818 21:54:15.470984 21603 solver.cpp:228] Iteration 37300, loss = 0.191543\nI0818 21:54:15.471035 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 21:54:15.471050 21603 solver.cpp:244]     Train net output #1: loss = 0.191543 (* 1 = 0.191543 loss)\nI0818 21:54:15.557806 21603 sgd_solver.cpp:166] Iteration 37300, lr = 0.9325\nI0818 21:55:02.614053 21603 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0818 21:55:28.718080 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7896\nI0818 21:55:28.718122 21603 solver.cpp:404]     Test net output #1: loss = 0.792773 (* 1 = 0.792773 loss)\nI0818 21:55:29.130360 21603 solver.cpp:228] Iteration 37400, loss = 0.120439\nI0818 21:55:29.130412 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:55:29.130429 21603 solver.cpp:244]     Train net output #1: loss = 0.12044 (* 1 = 0.12044 loss)\nI0818 21:55:29.221397 21603 sgd_solver.cpp:166] Iteration 37400, lr = 0.935\nI0818 21:56:16.386811 21603 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0818 21:56:42.473745 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69092\nI0818 21:56:42.473788 21603 solver.cpp:404]     Test net output #1: loss = 1.66443 (* 1 = 1.66443 loss)\nI0818 21:56:42.886039 21603 solver.cpp:228] Iteration 37500, loss = 0.14589\nI0818 21:56:42.886088 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:56:42.886104 21603 solver.cpp:244]     Train net output #1: loss = 0.14589 (* 1 = 0.14589 loss)\nI0818 21:56:42.978157 21603 sgd_solver.cpp:166] Iteration 37500, lr = 0.9375\nI0818 21:57:30.085841 21603 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0818 21:57:56.162287 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80552\nI0818 21:57:56.162331 21603 solver.cpp:404]     Test net output #1: loss = 0.735449 (* 1 = 0.735449 loss)\nI0818 21:57:56.574745 21603 solver.cpp:228] Iteration 37600, loss = 0.179268\nI0818 21:57:56.574796 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:57:56.574812 21603 solver.cpp:244]     Train net output #1: loss = 0.179269 (* 1 = 0.179269 loss)\nI0818 21:57:56.667233 21603 sgd_solver.cpp:166] Iteration 37600, lr = 0.94\nI0818 21:58:43.630762 21603 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0818 21:59:09.808130 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83044\nI0818 21:59:09.808184 21603 solver.cpp:404]     Test net output #1: loss = 0.60673 (* 1 = 0.60673 loss)\nI0818 21:59:10.220587 21603 solver.cpp:228] Iteration 37700, loss = 0.108515\nI0818 21:59:10.220631 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:59:10.220648 21603 solver.cpp:244]     Train net output #1: loss = 0.108516 (* 1 = 0.108516 loss)\nI0818 21:59:10.307622 21603 sgd_solver.cpp:166] Iteration 37700, lr = 0.9425\nI0818 21:59:57.276639 21603 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0818 22:00:23.568985 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77732\nI0818 22:00:23.569036 21603 solver.cpp:404]     Test net output #1: loss = 0.867606 (* 1 = 0.867606 loss)\nI0818 22:00:23.980885 21603 solver.cpp:228] Iteration 37800, loss = 0.112968\nI0818 22:00:23.980929 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:00:23.980945 21603 solver.cpp:244]     Train net output #1: loss = 0.112968 (* 1 = 0.112968 loss)\nI0818 22:00:24.066026 21603 sgd_solver.cpp:166] Iteration 37800, lr = 0.945\nI0818 22:01:10.939368 21603 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0818 22:01:37.226037 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70376\nI0818 22:01:37.226091 21603 solver.cpp:404]     Test net output #1: loss = 1.31556 (* 1 = 1.31556 loss)\nI0818 22:01:37.638069 21603 solver.cpp:228] Iteration 37900, loss = 0.157127\nI0818 22:01:37.638113 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:01:37.638129 21603 solver.cpp:244]     Train net output #1: loss = 0.157128 (* 1 = 0.157128 loss)\nI0818 22:01:37.727556 21603 sgd_solver.cpp:166] Iteration 37900, lr = 0.9475\nI0818 22:02:24.766269 21603 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0818 22:02:51.055327 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74572\nI0818 22:02:51.055385 21603 solver.cpp:404]     Test net output #1: loss = 1.02377 (* 1 = 1.02377 loss)\nI0818 22:02:51.467629 21603 solver.cpp:228] Iteration 38000, loss = 0.150615\nI0818 22:02:51.467676 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:02:51.467694 21603 solver.cpp:244]     Train net output #1: loss = 0.150616 (* 1 = 0.150616 loss)\nI0818 22:02:51.560271 21603 sgd_solver.cpp:166] Iteration 38000, lr = 0.95\nI0818 22:03:38.724009 21603 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0818 22:04:05.015187 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76384\nI0818 22:04:05.015244 21603 solver.cpp:404]     Test net output #1: loss = 0.89827 (* 1 = 0.89827 loss)\nI0818 22:04:05.429105 21603 solver.cpp:228] Iteration 38100, loss = 0.153844\nI0818 22:04:05.429152 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:04:05.429167 21603 solver.cpp:244]     Train net output #1: loss = 0.153844 (* 1 = 0.153844 loss)\nI0818 22:04:05.518685 21603 sgd_solver.cpp:166] Iteration 38100, lr = 0.9525\nI0818 22:04:52.682237 21603 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0818 22:05:18.978396 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83\nI0818 22:05:18.978457 21603 solver.cpp:404]     Test net output #1: loss = 0.614439 (* 1 = 0.614439 loss)\nI0818 22:05:19.390966 21603 solver.cpp:228] Iteration 38200, loss = 0.147817\nI0818 22:05:19.391013 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:05:19.391038 21603 solver.cpp:244]     Train net output #1: loss = 0.147817 (* 1 = 0.147817 loss)\nI0818 22:05:19.479902 21603 sgd_solver.cpp:166] Iteration 38200, lr = 0.955\nI0818 22:06:06.629577 21603 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0818 22:06:32.920605 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83116\nI0818 22:06:32.920667 21603 solver.cpp:404]     Test net output #1: loss = 0.603568 (* 1 = 0.603568 loss)\nI0818 22:06:33.332959 21603 solver.cpp:228] Iteration 38300, loss = 0.202417\nI0818 22:06:33.333008 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:06:33.333031 21603 solver.cpp:244]     Train net output #1: loss = 0.202417 (* 1 = 0.202417 loss)\nI0818 22:06:33.427692 21603 sgd_solver.cpp:166] Iteration 38300, lr = 0.9575\nI0818 22:07:20.456954 21603 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0818 22:07:46.751777 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80704\nI0818 22:07:46.751837 21603 solver.cpp:404]     Test net output #1: loss = 0.754807 (* 1 = 0.754807 loss)\nI0818 22:07:47.165571 21603 solver.cpp:228] Iteration 38400, loss = 0.152339\nI0818 22:07:47.165618 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:07:47.165642 21603 solver.cpp:244]     Train net output #1: loss = 0.152339 (* 1 = 0.152339 loss)\nI0818 22:07:47.252710 21603 sgd_solver.cpp:166] Iteration 38400, lr = 0.96\nI0818 22:08:34.382302 21603 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0818 22:09:00.680547 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82728\nI0818 22:09:00.680610 21603 solver.cpp:404]     Test net output #1: loss = 0.613546 (* 1 = 0.613546 loss)\nI0818 22:09:01.092977 21603 solver.cpp:228] Iteration 38500, loss = 0.146537\nI0818 22:09:01.093021 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:09:01.093044 21603 solver.cpp:244]     Train net output #1: loss = 0.146538 (* 1 = 0.146538 loss)\nI0818 22:09:01.181161 21603 sgd_solver.cpp:166] Iteration 38500, lr = 0.9625\nI0818 22:09:48.383677 21603 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0818 22:10:14.666718 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75704\nI0818 22:10:14.666781 21603 solver.cpp:404]     Test net output #1: loss = 1.07437 (* 1 = 1.07437 loss)\nI0818 22:10:15.078805 21603 solver.cpp:228] Iteration 38600, loss = 0.150309\nI0818 22:10:15.078852 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:10:15.078897 21603 solver.cpp:244]     Train net output #1: loss = 0.150309 (* 1 = 0.150309 loss)\nI0818 22:10:15.171555 21603 sgd_solver.cpp:166] Iteration 38600, lr = 0.965\nI0818 22:11:02.362010 21603 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0818 22:11:28.649781 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76136\nI0818 22:11:28.649840 21603 solver.cpp:404]     Test net output #1: loss = 0.881266 (* 1 = 0.881266 loss)\nI0818 22:11:29.062117 21603 solver.cpp:228] Iteration 38700, loss = 0.150942\nI0818 22:11:29.062163 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:11:29.062188 21603 solver.cpp:244]     Train net output #1: loss = 0.150942 (* 1 = 0.150942 loss)\nI0818 22:11:29.158510 21603 sgd_solver.cpp:166] Iteration 38700, lr = 0.9675\nI0818 22:12:16.361326 21603 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0818 22:12:42.648882 21603 solver.cpp:404]     Test net output #0: accuracy = 0.813\nI0818 22:12:42.648941 21603 solver.cpp:404]     Test net output #1: loss = 0.690632 (* 1 = 0.690632 loss)\nI0818 22:12:43.062175 21603 solver.cpp:228] Iteration 38800, loss = 0.123629\nI0818 22:12:43.062222 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:12:43.062253 21603 solver.cpp:244]     Train net output #1: loss = 0.12363 (* 1 = 0.12363 loss)\nI0818 22:12:43.150660 21603 sgd_solver.cpp:166] Iteration 38800, lr = 0.97\nI0818 22:13:30.217937 21603 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0818 22:13:56.490394 21603 solver.cpp:404]     Test net output #0: accuracy = 0.84628\nI0818 22:13:56.490456 21603 solver.cpp:404]     Test net output #1: loss = 0.518671 (* 1 = 0.518671 loss)\nI0818 22:13:56.903924 21603 solver.cpp:228] Iteration 38900, loss = 0.237109\nI0818 22:13:56.903970 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 22:13:56.903995 21603 solver.cpp:244]     Train net output #1: loss = 0.23711 (* 1 = 0.23711 loss)\nI0818 22:13:56.994464 21603 sgd_solver.cpp:166] Iteration 38900, lr = 0.9725\nI0818 22:14:43.915448 21603 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0818 22:15:10.202055 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78744\nI0818 22:15:10.202112 21603 solver.cpp:404]     Test net output #1: loss = 0.8301 (* 1 = 0.8301 loss)\nI0818 22:15:10.615617 21603 solver.cpp:228] Iteration 39000, loss = 0.216192\nI0818 22:15:10.615662 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 22:15:10.615687 21603 solver.cpp:244]     Train net output #1: loss = 0.216192 (* 1 = 0.216192 loss)\nI0818 22:15:10.707654 21603 sgd_solver.cpp:166] Iteration 39000, lr = 0.975\nI0818 22:15:57.748371 21603 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0818 22:16:24.029727 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77924\nI0818 22:16:24.029786 21603 solver.cpp:404]     Test net output #1: loss = 0.949969 (* 1 = 0.949969 loss)\nI0818 22:16:24.441906 21603 solver.cpp:228] Iteration 39100, loss = 0.150247\nI0818 22:16:24.441953 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:16:24.441977 21603 solver.cpp:244]     Train net output #1: loss = 0.150247 (* 1 = 0.150247 loss)\nI0818 22:16:24.529175 21603 sgd_solver.cpp:166] Iteration 39100, lr = 0.9775\nI0818 22:17:11.444142 21603 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0818 22:17:37.734793 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81976\nI0818 22:17:37.734854 21603 solver.cpp:404]     Test net output #1: loss = 0.651232 (* 1 = 0.651232 loss)\nI0818 22:17:38.148308 21603 solver.cpp:228] Iteration 39200, loss = 0.127934\nI0818 22:17:38.148355 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:17:38.148380 21603 solver.cpp:244]     Train net output #1: loss = 0.127934 (* 1 = 0.127934 loss)\nI0818 22:17:38.233516 21603 sgd_solver.cpp:166] Iteration 39200, lr = 0.98\nI0818 22:18:25.191995 21603 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0818 22:18:51.440892 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80328\nI0818 22:18:51.440950 21603 solver.cpp:404]     Test net output #1: loss = 0.723047 (* 1 = 0.723047 loss)\nI0818 22:18:51.853185 21603 solver.cpp:228] Iteration 39300, loss = 0.204719\nI0818 22:18:51.853232 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:18:51.853261 21603 solver.cpp:244]     Train net output #1: loss = 0.20472 (* 1 = 0.20472 loss)\nI0818 22:18:51.940438 21603 sgd_solver.cpp:166] Iteration 39300, lr = 0.9825\nI0818 22:19:38.973358 21603 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0818 22:20:05.257880 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82312\nI0818 22:20:05.257937 21603 solver.cpp:404]     Test net output #1: loss = 0.612598 (* 1 = 0.612598 loss)\nI0818 22:20:05.671699 21603 solver.cpp:228] Iteration 39400, loss = 0.154045\nI0818 22:20:05.671746 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 22:20:05.671769 21603 solver.cpp:244]     Train net output #1: loss = 0.154046 (* 1 = 0.154046 loss)\nI0818 22:20:05.762193 21603 sgd_solver.cpp:166] Iteration 39400, lr = 0.985\nI0818 22:20:52.736811 21603 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0818 22:21:18.909941 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7646\nI0818 22:21:18.909997 21603 solver.cpp:404]     Test net output #1: loss = 0.955897 (* 1 = 0.955897 loss)\nI0818 22:21:19.322696 21603 solver.cpp:228] Iteration 39500, loss = 0.189725\nI0818 22:21:19.322741 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 22:21:19.322767 21603 solver.cpp:244]     Train net output #1: loss = 0.189725 (* 1 = 0.189725 loss)\nI0818 22:21:19.408962 21603 sgd_solver.cpp:166] Iteration 39500, lr = 0.9875\nI0818 22:22:06.265298 21603 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0818 22:22:32.407694 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76352\nI0818 22:22:32.407744 21603 solver.cpp:404]     Test net output #1: loss = 0.985 (* 1 = 0.985 loss)\nI0818 22:22:32.821347 21603 solver.cpp:228] Iteration 39600, loss = 0.140186\nI0818 22:22:32.821393 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:22:32.821418 21603 solver.cpp:244]     Train net output #1: loss = 0.140186 (* 1 = 0.140186 loss)\nI0818 22:22:32.908529 21603 sgd_solver.cpp:166] Iteration 39600, lr = 0.99\nI0818 22:23:19.845749 21603 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0818 22:23:46.080440 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79568\nI0818 22:23:46.080488 21603 solver.cpp:404]     Test net output #1: loss = 0.791687 (* 1 = 0.791687 loss)\nI0818 22:23:46.496379 21603 solver.cpp:228] Iteration 39700, loss = 0.180528\nI0818 22:23:46.496423 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 22:23:46.496439 21603 solver.cpp:244]     Train net output #1: loss = 0.180528 (* 1 = 0.180528 loss)\nI0818 22:23:46.587081 21603 sgd_solver.cpp:166] Iteration 39700, lr = 0.9925\nI0818 22:24:33.522292 21603 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0818 22:24:59.629926 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77756\nI0818 22:24:59.629979 21603 solver.cpp:404]     Test net output #1: loss = 0.841516 (* 1 = 0.841516 loss)\nI0818 22:25:00.042573 21603 solver.cpp:228] Iteration 39800, loss = 0.10319\nI0818 22:25:00.042611 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:25:00.042634 21603 solver.cpp:244]     Train net output #1: loss = 0.10319 (* 1 = 0.10319 loss)\nI0818 22:25:00.136216 21603 sgd_solver.cpp:166] Iteration 39800, lr = 0.995\nI0818 22:25:46.989451 21603 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0818 22:26:13.129462 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74084\nI0818 22:26:13.129513 21603 solver.cpp:404]     Test net output #1: loss = 1.09942 (* 1 = 1.09942 loss)\nI0818 22:26:13.543527 21603 solver.cpp:228] Iteration 39900, loss = 0.158672\nI0818 22:26:13.543570 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:26:13.543593 21603 solver.cpp:244]     Train net output #1: loss = 0.158672 (* 1 = 0.158672 loss)\nI0818 22:26:13.629408 21603 sgd_solver.cpp:166] Iteration 39900, lr = 0.9975\nI0818 22:27:00.479854 21603 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0818 22:27:26.717051 21603 solver.cpp:404]     Test net output #0: accuracy = 0.772\nI0818 22:27:26.717102 21603 solver.cpp:404]     Test net output #1: loss = 0.99252 (* 1 = 0.99252 loss)\nI0818 22:27:27.129966 21603 solver.cpp:228] Iteration 40000, loss = 0.185555\nI0818 22:27:27.130007 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 22:27:27.130030 21603 solver.cpp:244]     Train net output #1: loss = 0.185555 (* 1 = 0.185555 loss)\nI0818 22:27:27.213543 21603 sgd_solver.cpp:166] Iteration 40000, lr = 1\nI0818 22:28:14.069514 21603 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0818 22:28:40.164969 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69796\nI0818 22:28:40.165020 21603 solver.cpp:404]     Test net output #1: loss = 1.46321 (* 1 = 1.46321 loss)\nI0818 22:28:40.577275 21603 solver.cpp:228] Iteration 40100, loss = 0.209958\nI0818 22:28:40.577316 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 22:28:40.577339 21603 solver.cpp:244]     Train net output #1: loss = 0.209959 (* 1 = 0.209959 loss)\nI0818 22:28:40.670689 21603 sgd_solver.cpp:166] Iteration 40100, lr = 1.0025\nI0818 22:29:27.591315 21603 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0818 22:29:53.796495 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74068\nI0818 22:29:53.796546 21603 solver.cpp:404]     Test net output #1: loss = 1.16002 (* 1 = 1.16002 loss)\nI0818 22:29:54.210414 21603 solver.cpp:228] Iteration 40200, loss = 0.205952\nI0818 22:29:54.210453 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:29:54.210479 21603 solver.cpp:244]     Train net output #1: loss = 0.205952 (* 1 = 0.205952 loss)\nI0818 22:29:54.299341 21603 sgd_solver.cpp:166] Iteration 40200, lr = 1.005\nI0818 22:30:41.310595 21603 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0818 22:31:07.383937 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8074\nI0818 22:31:07.383987 21603 solver.cpp:404]     Test net output #1: loss = 0.759388 (* 1 = 0.759388 loss)\nI0818 22:31:07.796597 21603 solver.cpp:228] Iteration 40300, loss = 0.119363\nI0818 22:31:07.796636 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:31:07.796661 21603 solver.cpp:244]     Train net output #1: loss = 0.119363 (* 1 = 0.119363 loss)\nI0818 22:31:07.882073 21603 sgd_solver.cpp:166] Iteration 40300, lr = 1.0075\nI0818 22:31:54.920907 21603 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0818 22:32:21.159402 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77208\nI0818 22:32:21.159463 21603 solver.cpp:404]     Test net output #1: loss = 0.923745 (* 1 = 0.923745 loss)\nI0818 22:32:21.573590 21603 solver.cpp:228] Iteration 40400, loss = 0.0793312\nI0818 22:32:21.573637 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:32:21.573662 21603 solver.cpp:244]     Train net output #1: loss = 0.0793315 (* 1 = 0.0793315 loss)\nI0818 22:32:21.658747 21603 sgd_solver.cpp:166] Iteration 40400, lr = 1.01\nI0818 22:33:08.593238 21603 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0818 22:33:34.863328 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74164\nI0818 22:33:34.863389 21603 solver.cpp:404]     Test net output #1: loss = 1.03726 (* 1 = 1.03726 loss)\nI0818 22:33:35.277428 21603 solver.cpp:228] Iteration 40500, loss = 0.200077\nI0818 22:33:35.277474 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 22:33:35.277498 21603 solver.cpp:244]     Train net output #1: loss = 0.200077 (* 1 = 0.200077 loss)\nI0818 22:33:35.365598 21603 sgd_solver.cpp:166] Iteration 40500, lr = 1.0125\nI0818 22:34:22.457634 21603 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0818 22:34:48.750212 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78236\nI0818 22:34:48.750272 21603 solver.cpp:404]     Test net output #1: loss = 0.908606 (* 1 = 0.908606 loss)\nI0818 22:34:49.162946 21603 solver.cpp:228] Iteration 40600, loss = 0.147175\nI0818 22:34:49.162992 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:34:49.163015 21603 solver.cpp:244]     Train net output #1: loss = 0.147175 (* 1 = 0.147175 loss)\nI0818 22:34:49.252122 21603 sgd_solver.cpp:166] Iteration 40600, lr = 1.015\nI0818 22:35:36.226878 21603 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0818 22:36:02.520174 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80208\nI0818 22:36:02.520234 21603 solver.cpp:404]     Test net output #1: loss = 0.725744 (* 1 = 0.725744 loss)\nI0818 22:36:02.934061 21603 solver.cpp:228] Iteration 40700, loss = 0.135436\nI0818 22:36:02.934106 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:36:02.934130 21603 solver.cpp:244]     Train net output #1: loss = 0.135436 (* 1 = 0.135436 loss)\nI0818 22:36:03.019168 21603 sgd_solver.cpp:166] Iteration 40700, lr = 1.0175\nI0818 22:36:50.053640 21603 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0818 22:37:16.352493 21603 solver.cpp:404]     Test net output #0: accuracy = 0.799\nI0818 22:37:16.352558 21603 solver.cpp:404]     Test net output #1: loss = 0.737897 (* 1 = 0.737897 loss)\nI0818 22:37:16.765453 21603 solver.cpp:228] Iteration 40800, loss = 0.176998\nI0818 22:37:16.765501 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:37:16.765532 21603 solver.cpp:244]     Train net output #1: loss = 0.176998 (* 1 = 0.176998 loss)\nI0818 22:37:16.854975 21603 sgd_solver.cpp:166] Iteration 40800, lr = 1.02\nI0818 22:38:03.864395 21603 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0818 22:38:30.158668 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79332\nI0818 22:38:30.158730 21603 solver.cpp:404]     Test net output #1: loss = 0.838693 (* 1 = 0.838693 loss)\nI0818 22:38:30.570976 21603 solver.cpp:228] Iteration 40900, loss = 0.156705\nI0818 22:38:30.571019 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:38:30.571044 21603 solver.cpp:244]     Train net output #1: loss = 0.156705 (* 1 = 0.156705 loss)\nI0818 22:38:30.666579 21603 sgd_solver.cpp:166] Iteration 40900, lr = 1.0225\nI0818 22:39:17.645728 21603 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0818 22:39:43.932909 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7754\nI0818 22:39:43.932970 21603 solver.cpp:404]     Test net output #1: loss = 0.982924 (* 1 = 0.982924 loss)\nI0818 22:39:44.345634 21603 solver.cpp:228] Iteration 41000, loss = 0.209789\nI0818 22:39:44.345676 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 22:39:44.345701 21603 solver.cpp:244]     Train net output #1: loss = 0.20979 (* 1 = 0.20979 loss)\nI0818 22:39:44.437507 21603 sgd_solver.cpp:166] Iteration 41000, lr = 1.025\nI0818 22:40:31.377493 21603 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0818 22:40:57.671617 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80588\nI0818 22:40:57.671677 21603 solver.cpp:404]     Test net output #1: loss = 0.733017 (* 1 = 0.733017 loss)\nI0818 22:40:58.084564 21603 solver.cpp:228] Iteration 41100, loss = 0.0907883\nI0818 22:40:58.084607 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:40:58.084630 21603 solver.cpp:244]     Train net output #1: loss = 0.0907886 (* 1 = 0.0907886 loss)\nI0818 22:40:58.177045 21603 sgd_solver.cpp:166] Iteration 41100, lr = 1.0275\nI0818 22:41:45.147128 21603 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0818 22:42:11.440186 21603 solver.cpp:404]     Test net output #0: accuracy = 0.65692\nI0818 22:42:11.440249 21603 solver.cpp:404]     Test net output #1: loss = 1.78617 (* 1 = 1.78617 loss)\nI0818 22:42:11.854583 21603 solver.cpp:228] Iteration 41200, loss = 0.140062\nI0818 22:42:11.854629 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:42:11.854653 21603 solver.cpp:244]     Train net output #1: loss = 0.140062 (* 1 = 0.140062 loss)\nI0818 22:42:11.940794 21603 sgd_solver.cpp:166] Iteration 41200, lr = 1.03\nI0818 22:42:58.914476 21603 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0818 22:43:25.230561 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7764\nI0818 22:43:25.230633 21603 solver.cpp:404]     Test net output #1: loss = 0.891241 (* 1 = 0.891241 loss)\nI0818 22:43:25.644376 21603 solver.cpp:228] Iteration 41300, loss = 0.12347\nI0818 22:43:25.644414 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:43:25.644439 21603 solver.cpp:244]     Train net output #1: loss = 0.123471 (* 1 = 0.123471 loss)\nI0818 22:43:25.736991 21603 sgd_solver.cpp:166] Iteration 41300, lr = 1.0325\nI0818 22:44:12.733994 21603 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0818 22:44:39.046418 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79\nI0818 22:44:39.046485 21603 solver.cpp:404]     Test net output #1: loss = 0.869695 (* 1 = 0.869695 loss)\nI0818 22:44:39.468351 21603 solver.cpp:228] Iteration 41400, loss = 0.139733\nI0818 22:44:39.468400 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 22:44:39.468423 21603 solver.cpp:244]     Train net output #1: loss = 0.139734 (* 1 = 0.139734 loss)\nI0818 22:44:39.547050 21603 sgd_solver.cpp:166] Iteration 41400, lr = 1.035\nI0818 22:45:26.530439 21603 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0818 22:45:52.514050 21603 solver.cpp:404]     Test net output #0: accuracy = 0.737\nI0818 22:45:52.514098 21603 solver.cpp:404]     Test net output #1: loss = 1.20318 (* 1 = 1.20318 loss)\nI0818 22:45:52.930932 21603 solver.cpp:228] Iteration 41500, loss = 0.171544\nI0818 22:45:52.930989 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 22:45:52.931006 21603 solver.cpp:244]     Train net output #1: loss = 0.171544 (* 1 = 0.171544 loss)\nI0818 22:45:53.016575 21603 sgd_solver.cpp:166] Iteration 41500, lr = 1.0375\nI0818 22:46:40.077160 21603 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0818 22:47:06.046494 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8158\nI0818 22:47:06.046543 21603 solver.cpp:404]     Test net output #1: loss = 0.709584 (* 1 = 0.709584 loss)\nI0818 22:47:06.458832 21603 solver.cpp:228] Iteration 41600, loss = 0.174466\nI0818 22:47:06.458879 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:47:06.458895 21603 solver.cpp:244]     Train net output #1: loss = 0.174466 (* 1 = 0.174466 loss)\nI0818 22:47:06.557257 21603 sgd_solver.cpp:166] Iteration 41600, lr = 1.04\nI0818 22:47:53.732496 21603 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0818 22:48:19.700062 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79224\nI0818 22:48:19.700109 21603 solver.cpp:404]     Test net output #1: loss = 0.887363 (* 1 = 0.887363 loss)\nI0818 22:48:20.112494 21603 solver.cpp:228] Iteration 41700, loss = 0.108808\nI0818 22:48:20.112541 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:48:20.112558 21603 solver.cpp:244]     Train net output #1: loss = 0.108808 (* 1 = 0.108808 loss)\nI0818 22:48:20.202091 21603 sgd_solver.cpp:166] Iteration 41700, lr = 1.0425\nI0818 22:49:07.273996 21603 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0818 22:49:33.253861 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70836\nI0818 22:49:33.253916 21603 solver.cpp:404]     Test net output #1: loss = 1.32764 (* 1 = 1.32764 loss)\nI0818 22:49:33.670670 21603 solver.cpp:228] Iteration 41800, loss = 0.14375\nI0818 22:49:33.670718 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 22:49:33.670735 21603 solver.cpp:244]     Train net output #1: loss = 0.143751 (* 1 = 0.143751 loss)\nI0818 22:49:33.755717 21603 sgd_solver.cpp:166] Iteration 41800, lr = 1.045\nI0818 22:50:20.574174 21603 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0818 22:50:46.552914 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76428\nI0818 22:50:46.552959 21603 solver.cpp:404]     Test net output #1: loss = 0.999001 (* 1 = 0.999001 loss)\nI0818 22:50:46.965370 21603 solver.cpp:228] Iteration 41900, loss = 0.16469\nI0818 22:50:46.965420 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 22:50:46.965436 21603 solver.cpp:244]     Train net output #1: loss = 0.16469 (* 1 = 0.16469 loss)\nI0818 22:50:47.061517 21603 sgd_solver.cpp:166] Iteration 41900, lr = 1.0475\nI0818 22:51:33.847930 21603 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0818 22:51:59.827025 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81836\nI0818 22:51:59.827069 21603 solver.cpp:404]     Test net output #1: loss = 0.689445 (* 1 = 0.689445 loss)\nI0818 22:52:00.239364 21603 solver.cpp:228] Iteration 42000, loss = 0.115023\nI0818 22:52:00.239413 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:52:00.239430 21603 solver.cpp:244]     Train net output #1: loss = 0.115023 (* 1 = 0.115023 loss)\nI0818 22:52:00.331938 21603 sgd_solver.cpp:166] Iteration 42000, lr = 1.05\nI0818 22:52:47.187041 21603 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0818 22:53:13.165436 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74088\nI0818 22:53:13.165487 21603 solver.cpp:404]     Test net output #1: loss = 1.0276 (* 1 = 1.0276 loss)\nI0818 22:53:13.578400 21603 solver.cpp:228] Iteration 42100, loss = 0.146819\nI0818 22:53:13.578450 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:53:13.578475 21603 solver.cpp:244]     Train net output #1: loss = 0.146819 (* 1 = 0.146819 loss)\nI0818 22:53:13.673522 21603 sgd_solver.cpp:166] Iteration 42100, lr = 1.0525\nI0818 22:54:00.829623 21603 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0818 22:54:26.810215 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7274\nI0818 22:54:26.810268 21603 solver.cpp:404]     Test net output #1: loss = 1.14589 (* 1 = 1.14589 loss)\nI0818 22:54:27.222527 21603 solver.cpp:228] Iteration 42200, loss = 0.173534\nI0818 22:54:27.222566 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:54:27.222591 21603 solver.cpp:244]     Train net output #1: loss = 0.173534 (* 1 = 0.173534 loss)\nI0818 22:54:27.312062 21603 sgd_solver.cpp:166] Iteration 42200, lr = 1.055\nI0818 22:55:14.456107 21603 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0818 22:55:40.437968 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75436\nI0818 22:55:40.438019 21603 solver.cpp:404]     Test net output #1: loss = 0.973804 (* 1 = 0.973804 loss)\nI0818 22:55:40.850989 21603 solver.cpp:228] Iteration 42300, loss = 0.131389\nI0818 22:55:40.851039 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:55:40.851065 21603 solver.cpp:244]     Train net output #1: loss = 0.13139 (* 1 = 0.13139 loss)\nI0818 22:55:40.945933 21603 sgd_solver.cpp:166] Iteration 42300, lr = 1.0575\nI0818 22:56:27.927558 21603 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0818 22:56:53.907188 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70432\nI0818 22:56:53.907240 21603 solver.cpp:404]     Test net output #1: loss = 1.3136 (* 1 = 1.3136 loss)\nI0818 22:56:54.320240 21603 solver.cpp:228] Iteration 42400, loss = 0.137175\nI0818 22:56:54.320291 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:56:54.320317 21603 solver.cpp:244]     Train net output #1: loss = 0.137176 (* 1 = 0.137176 loss)\nI0818 22:56:54.409126 21603 sgd_solver.cpp:166] Iteration 42400, lr = 1.06\nI0818 22:57:41.349056 21603 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0818 22:58:07.329550 21603 solver.cpp:404]     Test net output #0: accuracy = 0.789441\nI0818 22:58:07.329601 21603 solver.cpp:404]     Test net output #1: loss = 0.754837 (* 1 = 0.754837 loss)\nI0818 22:58:07.742189 21603 solver.cpp:228] Iteration 42500, loss = 0.223143\nI0818 22:58:07.742238 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 22:58:07.742264 21603 solver.cpp:244]     Train net output #1: loss = 0.223143 (* 1 = 0.223143 loss)\nI0818 22:58:07.830142 21603 sgd_solver.cpp:166] Iteration 42500, lr = 1.0625\nI0818 22:58:54.752519 21603 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0818 22:59:20.740094 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79892\nI0818 22:59:20.740157 21603 solver.cpp:404]     Test net output #1: loss = 0.79328 (* 1 = 0.79328 loss)\nI0818 22:59:21.152858 21603 solver.cpp:228] Iteration 42600, loss = 0.17395\nI0818 22:59:21.152906 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 22:59:21.152931 21603 solver.cpp:244]     Train net output #1: loss = 0.173951 (* 1 = 0.173951 loss)\nI0818 22:59:21.237798 21603 sgd_solver.cpp:166] Iteration 42600, lr = 1.065\nI0818 23:00:08.165779 21603 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0818 23:00:34.187656 21603 solver.cpp:404]     Test net output #0: accuracy = 0.84404\nI0818 23:00:34.187721 21603 solver.cpp:404]     Test net output #1: loss = 0.545971 (* 1 = 0.545971 loss)\nI0818 23:00:34.600476 21603 solver.cpp:228] Iteration 42700, loss = 0.234173\nI0818 23:00:34.600527 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 23:00:34.600550 21603 solver.cpp:244]     Train net output #1: loss = 0.234173 (* 1 = 0.234173 loss)\nI0818 23:00:34.692890 21603 sgd_solver.cpp:166] Iteration 42700, lr = 1.0675\nI0818 23:01:21.630311 21603 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0818 23:01:47.642019 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7832\nI0818 23:01:47.642088 21603 solver.cpp:404]     Test net output #1: loss = 0.786484 (* 1 = 0.786484 loss)\nI0818 23:01:48.054824 21603 solver.cpp:228] Iteration 42800, loss = 0.150174\nI0818 23:01:48.054875 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 23:01:48.054899 21603 solver.cpp:244]     Train net output #1: loss = 0.150174 (* 1 = 0.150174 loss)\nI0818 23:01:48.144031 21603 sgd_solver.cpp:166] Iteration 42800, lr = 1.07\nI0818 23:02:35.077376 21603 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0818 23:03:01.408900 21603 solver.cpp:404]     Test net output #0: accuracy = 0.757\nI0818 23:03:01.408956 21603 solver.cpp:404]     Test net output #1: loss = 1.01555 (* 1 = 1.01555 loss)\nI0818 23:03:01.822541 21603 solver.cpp:228] Iteration 42900, loss = 0.234659\nI0818 23:03:01.822583 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 23:03:01.822599 21603 solver.cpp:244]     Train net output #1: loss = 0.234659 (* 1 = 0.234659 loss)\nI0818 23:03:01.907191 21603 sgd_solver.cpp:166] Iteration 42900, lr = 1.0725\nI0818 23:03:48.913312 21603 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0818 23:04:15.197403 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80856\nI0818 23:04:15.197458 21603 solver.cpp:404]     Test net output #1: loss = 0.659312 (* 1 = 0.659312 loss)\nI0818 23:04:15.611109 21603 solver.cpp:228] Iteration 43000, loss = 0.159584\nI0818 23:04:15.611155 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 23:04:15.611171 21603 solver.cpp:244]     Train net output #1: loss = 0.159584 (* 1 = 0.159584 loss)\nI0818 23:04:15.702605 21603 sgd_solver.cpp:166] Iteration 43000, lr = 1.075\nI0818 23:05:02.709034 21603 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0818 23:05:29.026603 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7596\nI0818 23:05:29.026664 21603 solver.cpp:404]     Test net output #1: loss = 0.93352 (* 1 = 0.93352 loss)\nI0818 23:05:29.440532 21603 solver.cpp:228] Iteration 43100, loss = 0.11005\nI0818 23:05:29.440575 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:05:29.440598 21603 solver.cpp:244]     Train net output #1: loss = 0.11005 (* 1 = 0.11005 loss)\nI0818 23:05:29.526396 21603 sgd_solver.cpp:166] Iteration 43100, lr = 1.0775\nI0818 23:06:16.703052 21603 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0818 23:06:42.990823 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81\nI0818 23:06:42.990883 21603 solver.cpp:404]     Test net output #1: loss = 0.799812 (* 1 = 0.799812 loss)\nI0818 23:06:43.404695 21603 solver.cpp:228] Iteration 43200, loss = 0.115483\nI0818 23:06:43.404747 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:06:43.404772 21603 solver.cpp:244]     Train net output #1: loss = 0.115483 (* 1 = 0.115483 loss)\nI0818 23:06:43.495630 21603 sgd_solver.cpp:166] Iteration 43200, lr = 1.08\nI0818 23:07:30.635653 21603 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0818 23:07:56.921617 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76748\nI0818 23:07:56.921671 21603 solver.cpp:404]     Test net output #1: loss = 0.907534 (* 1 = 0.907534 loss)\nI0818 23:07:57.333770 21603 solver.cpp:228] Iteration 43300, loss = 0.0994344\nI0818 23:07:57.333818 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:07:57.333837 21603 solver.cpp:244]     Train net output #1: loss = 0.0994347 (* 1 = 0.0994347 loss)\nI0818 23:07:57.423190 21603 sgd_solver.cpp:166] Iteration 43300, lr = 1.0825\nI0818 23:08:44.428128 21603 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0818 23:09:10.701318 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77172\nI0818 23:09:10.701370 21603 solver.cpp:404]     Test net output #1: loss = 0.949067 (* 1 = 0.949067 loss)\nI0818 23:09:11.114578 21603 solver.cpp:228] Iteration 43400, loss = 0.154602\nI0818 23:09:11.114624 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 23:09:11.114640 21603 solver.cpp:244]     Train net output #1: loss = 0.154603 (* 1 = 0.154603 loss)\nI0818 23:09:11.198859 21603 sgd_solver.cpp:166] Iteration 43400, lr = 1.085\nI0818 23:09:58.414230 21603 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0818 23:10:24.650256 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75008\nI0818 23:10:24.650315 21603 solver.cpp:404]     Test net output #1: loss = 1.01793 (* 1 = 1.01793 loss)\nI0818 23:10:25.062134 21603 solver.cpp:228] Iteration 43500, loss = 0.077606\nI0818 23:10:25.062186 21603 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 23:10:25.062203 21603 solver.cpp:244]     Train net output #1: loss = 0.0776063 (* 1 = 0.0776063 loss)\nI0818 23:10:25.156772 21603 sgd_solver.cpp:166] Iteration 43500, lr = 1.0875\nI0818 23:11:12.133641 21603 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0818 23:11:38.132804 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75696\nI0818 23:11:38.132864 21603 solver.cpp:404]     Test net output #1: loss = 0.977488 (* 1 = 0.977488 loss)\nI0818 23:11:38.544342 21603 solver.cpp:228] Iteration 43600, loss = 0.116232\nI0818 23:11:38.544391 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:11:38.544407 21603 solver.cpp:244]     Train net output #1: loss = 0.116232 (* 1 = 0.116232 loss)\nI0818 23:11:38.631870 21603 sgd_solver.cpp:166] Iteration 43600, lr = 1.09\nI0818 23:12:25.639442 21603 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0818 23:12:51.634709 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8064\nI0818 23:12:51.634749 21603 solver.cpp:404]     Test net output #1: loss = 0.77679 (* 1 = 0.77679 loss)\nI0818 23:12:52.046578 21603 solver.cpp:228] Iteration 43700, loss = 0.0688775\nI0818 23:12:52.046612 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:12:52.046627 21603 solver.cpp:244]     Train net output #1: loss = 0.0688779 (* 1 = 0.0688779 loss)\nI0818 23:12:52.138353 21603 sgd_solver.cpp:166] Iteration 43700, lr = 1.0925\nI0818 23:13:39.174281 21603 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0818 23:14:05.175222 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82248\nI0818 23:14:05.175261 21603 solver.cpp:404]     Test net output #1: loss = 0.650113 (* 1 = 0.650113 loss)\nI0818 23:14:05.587529 21603 solver.cpp:228] Iteration 43800, loss = 0.236402\nI0818 23:14:05.587575 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 23:14:05.587591 21603 solver.cpp:244]     Train net output #1: loss = 0.236402 (* 1 = 0.236402 loss)\nI0818 23:14:05.678560 21603 sgd_solver.cpp:166] Iteration 43800, lr = 1.095\nI0818 23:14:52.472280 21603 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0818 23:15:18.470316 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79852\nI0818 23:15:18.470360 21603 solver.cpp:404]     Test net output #1: loss = 0.716002 (* 1 = 0.716002 loss)\nI0818 23:15:18.881175 21603 solver.cpp:228] Iteration 43900, loss = 0.153869\nI0818 23:15:18.881220 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 23:15:18.881237 21603 solver.cpp:244]     Train net output #1: loss = 0.153869 (* 1 = 0.153869 loss)\nI0818 23:15:18.967718 21603 sgd_solver.cpp:166] Iteration 43900, lr = 1.0975\nI0818 23:16:05.811594 21603 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0818 23:16:31.810171 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76576\nI0818 23:16:31.810211 21603 solver.cpp:404]     Test net output #1: loss = 0.984313 (* 1 = 0.984313 loss)\nI0818 23:16:32.222510 21603 solver.cpp:228] Iteration 44000, loss = 0.184711\nI0818 23:16:32.222556 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 23:16:32.222573 21603 solver.cpp:244]     Train net output #1: loss = 0.184711 (* 1 = 0.184711 loss)\nI0818 23:16:32.313423 21603 sgd_solver.cpp:166] Iteration 44000, lr = 1.1\nI0818 23:17:19.443377 21603 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0818 23:17:45.442204 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79288\nI0818 23:17:45.442245 21603 solver.cpp:404]     Test net output #1: loss = 0.760631 (* 1 = 0.760631 loss)\nI0818 23:17:45.853212 21603 solver.cpp:228] Iteration 44100, loss = 0.225718\nI0818 23:17:45.853258 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 23:17:45.853274 21603 solver.cpp:244]     Train net output #1: loss = 0.225718 (* 1 = 0.225718 loss)\nI0818 23:17:45.949766 21603 sgd_solver.cpp:166] Iteration 44100, lr = 1.1025\nI0818 23:18:33.059382 21603 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0818 23:18:59.058914 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76796\nI0818 23:18:59.058954 21603 solver.cpp:404]     Test net output #1: loss = 0.886748 (* 1 = 0.886748 loss)\nI0818 23:18:59.470820 21603 solver.cpp:228] Iteration 44200, loss = 0.137527\nI0818 23:18:59.470866 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:18:59.470882 21603 solver.cpp:244]     Train net output #1: loss = 0.137527 (* 1 = 0.137527 loss)\nI0818 23:18:59.562901 21603 sgd_solver.cpp:166] Iteration 44200, lr = 1.105\nI0818 23:19:46.715121 21603 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0818 23:20:12.709889 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79388\nI0818 23:20:12.709941 21603 solver.cpp:404]     Test net output #1: loss = 0.715377 (* 1 = 0.715377 loss)\nI0818 23:20:13.122098 21603 solver.cpp:228] Iteration 44300, loss = 0.139561\nI0818 23:20:13.122148 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:20:13.122164 21603 solver.cpp:244]     Train net output #1: loss = 0.139561 (* 1 = 0.139561 loss)\nI0818 23:20:13.207381 21603 sgd_solver.cpp:166] Iteration 44300, lr = 1.1075\nI0818 23:21:00.352107 21603 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0818 23:21:26.349324 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74884\nI0818 23:21:26.349367 21603 solver.cpp:404]     Test net output #1: loss = 1.03984 (* 1 = 1.03984 loss)\nI0818 23:21:26.761131 21603 solver.cpp:228] Iteration 44400, loss = 0.221751\nI0818 23:21:26.761168 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 23:21:26.761185 21603 solver.cpp:244]     Train net output #1: loss = 0.221752 (* 1 = 0.221752 loss)\nI0818 23:21:26.852262 21603 sgd_solver.cpp:166] Iteration 44400, lr = 1.11\nI0818 23:22:14.019728 21603 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0818 23:22:40.288290 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77148\nI0818 23:22:40.288352 21603 solver.cpp:404]     Test net output #1: loss = 0.892212 (* 1 = 0.892212 loss)\nI0818 23:22:40.702251 21603 solver.cpp:228] Iteration 44500, loss = 0.148662\nI0818 23:22:40.702302 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 23:22:40.702319 21603 solver.cpp:244]     Train net output #1: loss = 0.148662 (* 1 = 0.148662 loss)\nI0818 23:22:40.789299 21603 sgd_solver.cpp:166] Iteration 44500, lr = 1.1125\nI0818 23:23:28.184859 21603 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0818 23:23:54.482714 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80904\nI0818 23:23:54.482779 21603 solver.cpp:404]     Test net output #1: loss = 0.662923 (* 1 = 0.662923 loss)\nI0818 23:23:54.896304 21603 solver.cpp:228] Iteration 44600, loss = 0.174221\nI0818 23:23:54.896356 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 23:23:54.896373 21603 solver.cpp:244]     Train net output #1: loss = 0.174222 (* 1 = 0.174222 loss)\nI0818 23:23:54.986259 21603 sgd_solver.cpp:166] Iteration 44600, lr = 1.115\nI0818 23:24:42.347568 21603 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0818 23:25:08.644618 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81568\nI0818 23:25:08.644682 21603 solver.cpp:404]     Test net output #1: loss = 0.759057 (* 1 = 0.759057 loss)\nI0818 23:25:09.058436 21603 solver.cpp:228] Iteration 44700, loss = 0.145939\nI0818 23:25:09.058488 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:25:09.058506 21603 solver.cpp:244]     Train net output #1: loss = 0.145939 (* 1 = 0.145939 loss)\nI0818 23:25:09.153694 21603 sgd_solver.cpp:166] Iteration 44700, lr = 1.1175\nI0818 23:25:56.476838 21603 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0818 23:26:22.773028 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71816\nI0818 23:26:22.773093 21603 solver.cpp:404]     Test net output #1: loss = 1.27486 (* 1 = 1.27486 loss)\nI0818 23:26:23.186656 21603 solver.cpp:228] Iteration 44800, loss = 0.150674\nI0818 23:26:23.186707 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:26:23.186722 21603 solver.cpp:244]     Train net output #1: loss = 0.150674 (* 1 = 0.150674 loss)\nI0818 23:26:23.276543 21603 sgd_solver.cpp:166] Iteration 44800, lr = 1.12\nI0818 23:27:10.631713 21603 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0818 23:27:36.928953 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76704\nI0818 23:27:36.929014 21603 solver.cpp:404]     Test net output #1: loss = 0.96518 (* 1 = 0.96518 loss)\nI0818 23:27:37.342599 21603 solver.cpp:228] Iteration 44900, loss = 0.135452\nI0818 23:27:37.342651 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 23:27:37.342669 21603 solver.cpp:244]     Train net output #1: loss = 0.135452 (* 1 = 0.135452 loss)\nI0818 23:27:37.437041 21603 sgd_solver.cpp:166] Iteration 44900, lr = 1.1225\nI0818 23:28:24.801934 21603 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0818 23:28:51.096139 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7856\nI0818 23:28:51.096204 21603 solver.cpp:404]     Test net output #1: loss = 0.782864 (* 1 = 0.782864 loss)\nI0818 23:28:51.509620 21603 solver.cpp:228] Iteration 45000, loss = 0.181466\nI0818 23:28:51.509671 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 23:28:51.509688 21603 solver.cpp:244]     Train net output #1: loss = 0.181466 (* 1 = 0.181466 loss)\nI0818 23:28:51.599565 21603 sgd_solver.cpp:166] Iteration 45000, lr = 1.125\nI0818 23:29:38.971973 21603 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0818 23:30:05.269289 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83048\nI0818 23:30:05.269351 21603 solver.cpp:404]     Test net output #1: loss = 0.575299 (* 1 = 0.575299 loss)\nI0818 23:30:05.682844 21603 solver.cpp:228] Iteration 45100, loss = 0.117186\nI0818 23:30:05.682896 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:30:05.682911 21603 solver.cpp:244]     Train net output #1: loss = 0.117187 (* 1 = 0.117187 loss)\nI0818 23:30:05.773645 21603 sgd_solver.cpp:166] Iteration 45100, lr = 1.1275\nI0818 23:30:53.135823 21603 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0818 23:31:19.431813 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81544\nI0818 23:31:19.431875 21603 solver.cpp:404]     Test net output #1: loss = 0.63764 (* 1 = 0.63764 loss)\nI0818 23:31:19.845504 21603 solver.cpp:228] Iteration 45200, loss = 0.201838\nI0818 23:31:19.845553 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 23:31:19.845571 21603 solver.cpp:244]     Train net output #1: loss = 0.201839 (* 1 = 0.201839 loss)\nI0818 23:31:19.939052 21603 sgd_solver.cpp:166] Iteration 45200, lr = 1.13\nI0818 23:32:07.311830 21603 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0818 23:32:33.608472 21603 solver.cpp:404]     Test net output #0: accuracy = 0.816\nI0818 23:32:33.608538 21603 solver.cpp:404]     Test net output #1: loss = 0.593291 (* 1 = 0.593291 loss)\nI0818 23:32:34.021371 21603 solver.cpp:228] Iteration 45300, loss = 0.158383\nI0818 23:32:34.021425 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 23:32:34.021441 21603 solver.cpp:244]     Train net output #1: loss = 0.158383 (* 1 = 0.158383 loss)\nI0818 23:32:34.108927 21603 sgd_solver.cpp:166] Iteration 45300, lr = 1.1325\nI0818 23:33:21.458252 21603 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0818 23:33:47.752003 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74356\nI0818 23:33:47.752068 21603 solver.cpp:404]     Test net output #1: loss = 1.10951 (* 1 = 1.10951 loss)\nI0818 23:33:48.164145 21603 solver.cpp:228] Iteration 45400, loss = 0.163925\nI0818 23:33:48.164199 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:33:48.164216 21603 solver.cpp:244]     Train net output #1: loss = 0.163925 (* 1 = 0.163925 loss)\nI0818 23:33:48.252519 21603 sgd_solver.cpp:166] Iteration 45400, lr = 1.135\nI0818 23:34:35.562438 21603 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0818 23:35:01.849238 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74236\nI0818 23:35:01.849303 21603 solver.cpp:404]     Test net output #1: loss = 0.917726 (* 1 = 0.917726 loss)\nI0818 23:35:02.262526 21603 solver.cpp:228] Iteration 45500, loss = 0.10758\nI0818 23:35:02.262578 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:35:02.262594 21603 solver.cpp:244]     Train net output #1: loss = 0.10758 (* 1 = 0.10758 loss)\nI0818 23:35:02.351855 21603 sgd_solver.cpp:166] Iteration 45500, lr = 1.1375\nI0818 23:35:49.763365 21603 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0818 23:36:16.056483 21603 solver.cpp:404]     Test net output #0: accuracy = 0.807\nI0818 23:36:16.056548 21603 solver.cpp:404]     Test net output #1: loss = 0.645429 (* 1 = 0.645429 loss)\nI0818 23:36:16.469991 21603 solver.cpp:228] Iteration 45600, loss = 0.160743\nI0818 23:36:16.470041 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 23:36:16.470057 21603 solver.cpp:244]     Train net output #1: loss = 0.160744 (* 1 = 0.160744 loss)\nI0818 23:36:16.566555 21603 sgd_solver.cpp:166] Iteration 45600, lr = 1.14\nI0818 23:37:03.989142 21603 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0818 23:37:30.283015 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76096\nI0818 23:37:30.283080 21603 solver.cpp:404]     Test net output #1: loss = 0.919828 (* 1 = 0.919828 loss)\nI0818 23:37:30.696771 21603 solver.cpp:228] Iteration 45700, loss = 0.113172\nI0818 23:37:30.696822 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:37:30.696840 21603 solver.cpp:244]     Train net output #1: loss = 0.113173 (* 1 = 0.113173 loss)\nI0818 23:37:30.788295 21603 sgd_solver.cpp:166] Iteration 45700, lr = 1.1425\nI0818 23:38:18.111923 21603 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0818 23:38:44.402596 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77704\nI0818 23:38:44.402664 21603 solver.cpp:404]     Test net output #1: loss = 0.802221 (* 1 = 0.802221 loss)\nI0818 23:38:44.815809 21603 solver.cpp:228] Iteration 45800, loss = 0.181884\nI0818 23:38:44.815861 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 23:38:44.815879 21603 solver.cpp:244]     Train net output #1: loss = 0.181885 (* 1 = 0.181885 loss)\nI0818 23:38:44.903969 21603 sgd_solver.cpp:166] Iteration 45800, lr = 1.145\nI0818 23:39:32.152021 21603 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0818 23:39:58.437119 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77644\nI0818 23:39:58.437188 21603 solver.cpp:404]     Test net output #1: loss = 0.957663 (* 1 = 0.957663 loss)\nI0818 23:39:58.849459 21603 solver.cpp:228] Iteration 45900, loss = 0.18162\nI0818 23:39:58.849511 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 23:39:58.849529 21603 solver.cpp:244]     Train net output #1: loss = 0.181621 (* 1 = 0.181621 loss)\nI0818 23:39:58.943253 21603 sgd_solver.cpp:166] Iteration 45900, lr = 1.1475\nI0818 23:40:46.295204 21603 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0818 23:41:12.574982 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77096\nI0818 23:41:12.575028 21603 solver.cpp:404]     Test net output #1: loss = 0.905559 (* 1 = 0.905559 loss)\nI0818 23:41:12.987406 21603 solver.cpp:228] Iteration 46000, loss = 0.161838\nI0818 23:41:12.987459 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:41:12.987475 21603 solver.cpp:244]     Train net output #1: loss = 0.161838 (* 1 = 0.161838 loss)\nI0818 23:41:13.083730 21603 sgd_solver.cpp:166] Iteration 46000, lr = 1.15\nI0818 23:42:00.453613 21603 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0818 23:42:26.720767 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74392\nI0818 23:42:26.720813 21603 solver.cpp:404]     Test net output #1: loss = 1.06005 (* 1 = 1.06005 loss)\nI0818 23:42:27.132997 21603 solver.cpp:228] Iteration 46100, loss = 0.125205\nI0818 23:42:27.133049 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 23:42:27.133064 21603 solver.cpp:244]     Train net output #1: loss = 0.125206 (* 1 = 0.125206 loss)\nI0818 23:42:27.224563 21603 sgd_solver.cpp:166] Iteration 46100, lr = 1.1525\nI0818 23:43:14.693691 21603 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0818 23:43:40.955647 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69796\nI0818 23:43:40.955694 21603 solver.cpp:404]     Test net output #1: loss = 1.32229 (* 1 = 1.32229 loss)\nI0818 23:43:41.367908 21603 solver.cpp:228] Iteration 46200, loss = 0.217414\nI0818 23:43:41.367956 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 23:43:41.367972 21603 solver.cpp:244]     Train net output #1: loss = 0.217414 (* 1 = 0.217414 loss)\nI0818 23:43:41.457059 21603 sgd_solver.cpp:166] Iteration 46200, lr = 1.155\nI0818 23:44:28.717525 21603 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0818 23:44:54.991421 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7716\nI0818 23:44:54.991467 21603 solver.cpp:404]     Test net output #1: loss = 0.882131 (* 1 = 0.882131 loss)\nI0818 23:44:55.403755 21603 solver.cpp:228] Iteration 46300, loss = 0.217361\nI0818 23:44:55.403806 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 23:44:55.403822 21603 solver.cpp:244]     Train net output #1: loss = 0.217361 (* 1 = 0.217361 loss)\nI0818 23:44:55.494493 21603 sgd_solver.cpp:166] Iteration 46300, lr = 1.1575\nI0818 23:45:42.700960 21603 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0818 23:46:08.950392 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75196\nI0818 23:46:08.950438 21603 solver.cpp:404]     Test net output #1: loss = 0.9654 (* 1 = 0.9654 loss)\nI0818 23:46:09.362411 21603 solver.cpp:228] Iteration 46400, loss = 0.237028\nI0818 23:46:09.362462 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 23:46:09.362478 21603 solver.cpp:244]     Train net output #1: loss = 0.237029 (* 1 = 0.237029 loss)\nI0818 23:46:09.449362 21603 sgd_solver.cpp:166] Iteration 46400, lr = 1.16\nI0818 23:46:56.728889 21603 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0818 23:47:23.007185 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79688\nI0818 23:47:23.007241 21603 solver.cpp:404]     Test net output #1: loss = 0.823853 (* 1 = 0.823853 loss)\nI0818 23:47:23.419612 21603 solver.cpp:228] Iteration 46500, loss = 0.237908\nI0818 23:47:23.419661 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 23:47:23.419677 21603 solver.cpp:244]     Train net output #1: loss = 0.237908 (* 1 = 0.237908 loss)\nI0818 23:47:23.504870 21603 sgd_solver.cpp:166] Iteration 46500, lr = 1.1625\nI0818 23:48:10.806602 21603 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0818 23:48:37.086057 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77724\nI0818 23:48:37.086104 21603 solver.cpp:404]     Test net output #1: loss = 0.983234 (* 1 = 0.983234 loss)\nI0818 23:48:37.498287 21603 solver.cpp:228] Iteration 46600, loss = 0.223236\nI0818 23:48:37.498338 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 23:48:37.498355 21603 solver.cpp:244]     Train net output #1: loss = 0.223237 (* 1 = 0.223237 loss)\nI0818 23:48:37.592054 21603 sgd_solver.cpp:166] Iteration 46600, lr = 1.165\nI0818 23:49:24.925465 21603 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0818 23:49:51.186518 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75324\nI0818 23:49:51.186560 21603 solver.cpp:404]     Test net output #1: loss = 1.02521 (* 1 = 1.02521 loss)\nI0818 23:49:51.598644 21603 solver.cpp:228] Iteration 46700, loss = 0.15983\nI0818 23:49:51.598692 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 23:49:51.598708 21603 solver.cpp:244]     Train net output #1: loss = 0.159831 (* 1 = 0.159831 loss)\nI0818 23:49:51.690292 21603 sgd_solver.cpp:166] Iteration 46700, lr = 1.1675\nI0818 23:50:39.173974 21603 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0818 23:51:05.266518 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8202\nI0818 23:51:05.266563 21603 solver.cpp:404]     Test net output #1: loss = 0.704534 (* 1 = 0.704534 loss)\nI0818 23:51:05.678516 21603 solver.cpp:228] Iteration 46800, loss = 0.177657\nI0818 23:51:05.678565 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 23:51:05.678581 21603 solver.cpp:244]     Train net output #1: loss = 0.177658 (* 1 = 0.177658 loss)\nI0818 23:51:05.768100 21603 sgd_solver.cpp:166] Iteration 46800, lr = 1.17\nI0818 23:51:53.095456 21603 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0818 23:52:19.201122 21603 solver.cpp:404]     Test net output #0: accuracy = 0.782\nI0818 23:52:19.201177 21603 solver.cpp:404]     Test net output #1: loss = 0.869221 (* 1 = 0.869221 loss)\nI0818 23:52:19.613456 21603 solver.cpp:228] Iteration 46900, loss = 0.154244\nI0818 23:52:19.613492 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0818 23:52:19.613507 21603 solver.cpp:244]     Train net output #1: loss = 0.154244 (* 1 = 0.154244 loss)\nI0818 23:52:19.701611 21603 sgd_solver.cpp:166] Iteration 46900, lr = 1.1725\nI0818 23:53:07.152539 21603 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0818 23:53:33.427868 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74016\nI0818 23:53:33.427922 21603 solver.cpp:404]     Test net output #1: loss = 1.05245 (* 1 = 1.05245 loss)\nI0818 23:53:33.840065 21603 solver.cpp:228] Iteration 47000, loss = 0.272842\nI0818 23:53:33.840106 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 23:53:33.840122 21603 solver.cpp:244]     Train net output #1: loss = 0.272843 (* 1 = 0.272843 loss)\nI0818 23:53:33.931277 21603 sgd_solver.cpp:166] Iteration 47000, lr = 1.175\nI0818 23:54:21.421619 21603 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0818 23:54:47.714470 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74452\nI0818 23:54:47.714531 21603 solver.cpp:404]     Test net output #1: loss = 0.97492 (* 1 = 0.97492 loss)\nI0818 23:54:48.128048 21603 solver.cpp:228] Iteration 47100, loss = 0.12246\nI0818 23:54:48.128090 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:54:48.128114 21603 solver.cpp:244]     Train net output #1: loss = 0.122461 (* 1 = 0.122461 loss)\nI0818 23:54:48.220625 21603 sgd_solver.cpp:166] Iteration 47100, lr = 1.1775\nI0818 23:55:35.674526 21603 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0818 23:56:01.953861 21603 solver.cpp:404]     Test net output #0: accuracy = 0.761\nI0818 23:56:01.953922 21603 solver.cpp:404]     Test net output #1: loss = 0.998944 (* 1 = 0.998944 loss)\nI0818 23:56:02.366029 21603 solver.cpp:228] Iteration 47200, loss = 0.14881\nI0818 23:56:02.366086 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 23:56:02.366111 21603 solver.cpp:244]     Train net output #1: loss = 0.14881 (* 1 = 0.14881 loss)\nI0818 23:56:02.449617 21603 sgd_solver.cpp:166] Iteration 47200, lr = 1.18\nI0818 23:56:49.747401 21603 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0818 23:57:16.026675 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81604\nI0818 23:57:16.026729 21603 solver.cpp:404]     Test net output #1: loss = 0.676654 (* 1 = 0.676654 loss)\nI0818 23:57:16.439018 21603 solver.cpp:228] Iteration 47300, loss = 0.137727\nI0818 23:57:16.439072 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 23:57:16.439098 21603 solver.cpp:244]     Train net output #1: loss = 0.137727 (* 1 = 0.137727 loss)\nI0818 23:57:16.534045 21603 sgd_solver.cpp:166] Iteration 47300, lr = 1.1825\nI0818 23:58:03.786029 21603 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0818 23:58:30.054189 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81636\nI0818 23:58:30.054249 21603 solver.cpp:404]     Test net output #1: loss = 0.627731 (* 1 = 0.627731 loss)\nI0818 23:58:30.467196 21603 solver.cpp:228] Iteration 47400, loss = 0.158846\nI0818 23:58:30.467252 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:58:30.467278 21603 solver.cpp:244]     Train net output #1: loss = 0.158846 (* 1 = 0.158846 loss)\nI0818 23:58:30.558171 21603 sgd_solver.cpp:166] Iteration 47400, lr = 1.185\nI0818 23:59:17.803314 21603 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0818 23:59:44.080863 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74432\nI0818 23:59:44.080922 21603 solver.cpp:404]     Test net output #1: loss = 1.05959 (* 1 = 1.05959 loss)\nI0818 23:59:44.494485 21603 solver.cpp:228] Iteration 47500, loss = 0.238223\nI0818 23:59:44.494544 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 23:59:44.494570 21603 solver.cpp:244]     Train net output #1: loss = 0.238223 (* 1 = 0.238223 loss)\nI0818 23:59:44.582947 21603 sgd_solver.cpp:166] Iteration 47500, lr = 1.1875\nI0819 00:00:31.791609 21603 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0819 00:00:58.048368 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80492\nI0819 00:00:58.048426 21603 solver.cpp:404]     Test net output #1: loss = 0.715579 (* 1 = 0.715579 loss)\nI0819 00:00:58.460552 21603 solver.cpp:228] Iteration 47600, loss = 0.167339\nI0819 00:00:58.460606 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 00:00:58.460631 21603 solver.cpp:244]     Train net output #1: loss = 0.167339 (* 1 = 0.167339 loss)\nI0819 00:00:58.550580 21603 sgd_solver.cpp:166] Iteration 47600, lr = 1.19\nI0819 00:01:45.910600 21603 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0819 00:02:12.182682 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78532\nI0819 00:02:12.182742 21603 solver.cpp:404]     Test net output #1: loss = 0.834015 (* 1 = 0.834015 loss)\nI0819 00:02:12.595379 21603 solver.cpp:228] Iteration 47700, loss = 0.229206\nI0819 00:02:12.595433 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 00:02:12.595458 21603 solver.cpp:244]     Train net output #1: loss = 0.229206 (* 1 = 0.229206 loss)\nI0819 00:02:12.686875 21603 sgd_solver.cpp:166] Iteration 47700, lr = 1.1925\nI0819 00:03:00.028367 21603 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0819 00:03:26.318912 21603 solver.cpp:404]     Test net output #0: accuracy = 0.62748\nI0819 00:03:26.318970 21603 solver.cpp:404]     Test net output #1: loss = 1.62994 (* 1 = 1.62994 loss)\nI0819 00:03:26.731173 21603 solver.cpp:228] Iteration 47800, loss = 0.266902\nI0819 00:03:26.731227 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 00:03:26.731253 21603 solver.cpp:244]     Train net output #1: loss = 0.266902 (* 1 = 0.266902 loss)\nI0819 00:03:26.819108 21603 sgd_solver.cpp:166] Iteration 47800, lr = 1.195\nI0819 00:04:14.099092 21603 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0819 00:04:40.373991 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7866\nI0819 00:04:40.374049 21603 solver.cpp:404]     Test net output #1: loss = 0.802987 (* 1 = 0.802987 loss)\nI0819 00:04:40.787580 21603 solver.cpp:228] Iteration 47900, loss = 0.131881\nI0819 00:04:40.787633 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 00:04:40.787658 21603 solver.cpp:244]     Train net output #1: loss = 0.131882 (* 1 = 0.131882 loss)\nI0819 00:04:40.873350 21603 sgd_solver.cpp:166] Iteration 47900, lr = 1.1975\nI0819 00:05:28.221606 21603 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0819 00:05:54.498415 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76424\nI0819 00:05:54.498472 21603 solver.cpp:404]     Test net output #1: loss = 0.899138 (* 1 = 0.899138 loss)\nI0819 00:05:54.912015 21603 solver.cpp:228] Iteration 48000, loss = 0.239837\nI0819 00:05:54.912070 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 00:05:54.912094 21603 solver.cpp:244]     Train net output #1: loss = 0.239838 (* 1 = 0.239838 loss)\nI0819 00:05:55.005709 21603 sgd_solver.cpp:166] Iteration 48000, lr = 1.2\nI0819 00:06:42.237617 21603 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0819 00:07:08.514122 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79616\nI0819 00:07:08.514179 21603 solver.cpp:404]     Test net output #1: loss = 0.796121 (* 1 = 0.796121 loss)\nI0819 00:07:08.927978 21603 solver.cpp:228] Iteration 48100, loss = 0.1447\nI0819 00:07:08.928030 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 00:07:08.928055 21603 solver.cpp:244]     Train net output #1: loss = 0.1447 (* 1 = 0.1447 loss)\nI0819 00:07:09.015569 21603 sgd_solver.cpp:166] Iteration 48100, lr = 1.2025\nI0819 00:07:56.498222 21603 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0819 00:08:22.767613 21603 solver.cpp:404]     Test net output #0: accuracy = 0.719\nI0819 00:08:22.767673 21603 solver.cpp:404]     Test net output #1: loss = 1.12975 (* 1 = 1.12975 loss)\nI0819 00:08:23.181098 21603 solver.cpp:228] Iteration 48200, loss = 0.0772901\nI0819 00:08:23.181155 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:08:23.181182 21603 solver.cpp:244]     Train net output #1: loss = 0.0772904 (* 1 = 0.0772904 loss)\nI0819 00:08:23.276819 21603 sgd_solver.cpp:166] Iteration 48200, lr = 1.205\nI0819 00:09:10.727674 21603 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0819 00:09:37.026286 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78876\nI0819 00:09:37.026356 21603 solver.cpp:404]     Test net output #1: loss = 0.740929 (* 1 = 0.740929 loss)\nI0819 00:09:37.439963 21603 solver.cpp:228] Iteration 48300, loss = 0.164214\nI0819 00:09:37.440016 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 00:09:37.440042 21603 solver.cpp:244]     Train net output #1: loss = 0.164214 (* 1 = 0.164214 loss)\nI0819 00:09:37.537819 21603 sgd_solver.cpp:166] Iteration 48300, lr = 1.2075\nI0819 00:10:24.998623 21603 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0819 00:10:51.408725 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75392\nI0819 00:10:51.408797 21603 solver.cpp:404]     Test net output #1: loss = 0.955917 (* 1 = 0.955917 loss)\nI0819 00:10:51.821976 21603 solver.cpp:228] Iteration 48400, loss = 0.18997\nI0819 00:10:51.822031 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:10:51.822055 21603 solver.cpp:244]     Train net output #1: loss = 0.18997 (* 1 = 0.18997 loss)\nI0819 00:10:51.912528 21603 sgd_solver.cpp:166] Iteration 48400, lr = 1.21\nI0819 00:11:39.364017 21603 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0819 00:12:05.657814 21603 solver.cpp:404]     Test net output #0: accuracy = 0.84016\nI0819 00:12:05.657874 21603 solver.cpp:404]     Test net output #1: loss = 0.534775 (* 1 = 0.534775 loss)\nI0819 00:12:06.071230 21603 solver.cpp:228] Iteration 48500, loss = 0.157713\nI0819 00:12:06.071283 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 00:12:06.071300 21603 solver.cpp:244]     Train net output #1: loss = 0.157713 (* 1 = 0.157713 loss)\nI0819 00:12:06.162156 21603 sgd_solver.cpp:166] Iteration 48500, lr = 1.2125\nI0819 00:12:53.502589 21603 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0819 00:13:19.802783 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72476\nI0819 00:13:19.802851 21603 solver.cpp:404]     Test net output #1: loss = 1.25815 (* 1 = 1.25815 loss)\nI0819 00:13:20.224766 21603 solver.cpp:228] Iteration 48600, loss = 0.107806\nI0819 00:13:20.224828 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:13:20.224859 21603 solver.cpp:244]     Train net output #1: loss = 0.107806 (* 1 = 0.107806 loss)\nI0819 00:13:20.310365 21603 sgd_solver.cpp:166] Iteration 48600, lr = 1.215\nI0819 00:14:07.689226 21603 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0819 00:14:33.983145 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79112\nI0819 00:14:33.983220 21603 solver.cpp:404]     Test net output #1: loss = 0.760442 (* 1 = 0.760442 loss)\nI0819 00:14:34.395396 21603 solver.cpp:228] Iteration 48700, loss = 0.166492\nI0819 00:14:34.395427 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:14:34.395452 21603 solver.cpp:244]     Train net output #1: loss = 0.166492 (* 1 = 0.166492 loss)\nI0819 00:14:34.492398 21603 sgd_solver.cpp:166] Iteration 48700, lr = 1.2175\nI0819 00:15:21.963788 21603 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0819 00:15:48.259922 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72892\nI0819 00:15:48.259989 21603 solver.cpp:404]     Test net output #1: loss = 1.24785 (* 1 = 1.24785 loss)\nI0819 00:15:48.674348 21603 solver.cpp:228] Iteration 48800, loss = 0.167426\nI0819 00:15:48.674382 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 00:15:48.674399 21603 solver.cpp:244]     Train net output #1: loss = 0.167427 (* 1 = 0.167427 loss)\nI0819 00:15:48.767318 21603 sgd_solver.cpp:166] Iteration 48800, lr = 1.22\nI0819 00:16:36.202736 21603 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0819 00:17:02.514580 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80068\nI0819 00:17:02.514644 21603 solver.cpp:404]     Test net output #1: loss = 0.731579 (* 1 = 0.731579 loss)\nI0819 00:17:02.927984 21603 solver.cpp:228] Iteration 48900, loss = 0.166743\nI0819 00:17:02.928017 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:17:02.928042 21603 solver.cpp:244]     Train net output #1: loss = 0.166743 (* 1 = 0.166743 loss)\nI0819 00:17:03.022560 21603 sgd_solver.cpp:166] Iteration 48900, lr = 1.2225\nI0819 00:17:50.588460 21603 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0819 00:18:16.909541 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7626\nI0819 00:18:16.909623 21603 solver.cpp:404]     Test net output #1: loss = 0.907001 (* 1 = 0.907001 loss)\nI0819 00:18:17.323462 21603 solver.cpp:228] Iteration 49000, loss = 0.152705\nI0819 00:18:17.323493 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 00:18:17.323524 21603 solver.cpp:244]     Train net output #1: loss = 0.152705 (* 1 = 0.152705 loss)\nI0819 00:18:17.416708 21603 sgd_solver.cpp:166] Iteration 49000, lr = 1.225\nI0819 00:19:04.862035 21603 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0819 00:19:31.156961 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75164\nI0819 00:19:31.157035 21603 solver.cpp:404]     Test net output #1: loss = 0.938854 (* 1 = 0.938854 loss)\nI0819 00:19:31.570543 21603 solver.cpp:228] Iteration 49100, loss = 0.169191\nI0819 00:19:31.570578 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:19:31.570603 21603 solver.cpp:244]     Train net output #1: loss = 0.169191 (* 1 = 0.169191 loss)\nI0819 00:19:31.661502 21603 sgd_solver.cpp:166] Iteration 49100, lr = 1.2275\nI0819 00:20:19.132436 21603 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0819 00:20:45.429390 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81076\nI0819 00:20:45.429466 21603 solver.cpp:404]     Test net output #1: loss = 0.710086 (* 1 = 0.710086 loss)\nI0819 00:20:45.841939 21603 solver.cpp:228] Iteration 49200, loss = 0.104241\nI0819 00:20:45.841970 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:20:45.841995 21603 solver.cpp:244]     Train net output #1: loss = 0.104241 (* 1 = 0.104241 loss)\nI0819 00:20:45.932610 21603 sgd_solver.cpp:166] Iteration 49200, lr = 1.23\nI0819 00:21:33.416004 21603 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0819 00:21:59.710554 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73492\nI0819 00:21:59.710628 21603 solver.cpp:404]     Test net output #1: loss = 1.24124 (* 1 = 1.24124 loss)\nI0819 00:22:00.122655 21603 solver.cpp:228] Iteration 49300, loss = 0.161959\nI0819 00:22:00.122687 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 00:22:00.122714 21603 solver.cpp:244]     Train net output #1: loss = 0.161959 (* 1 = 0.161959 loss)\nI0819 00:22:00.211446 21603 sgd_solver.cpp:166] Iteration 49300, lr = 1.2325\nI0819 00:22:47.633795 21603 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0819 00:23:13.954344 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80532\nI0819 00:23:13.954418 21603 solver.cpp:404]     Test net output #1: loss = 0.723057 (* 1 = 0.723057 loss)\nI0819 00:23:14.366582 21603 solver.cpp:228] Iteration 49400, loss = 0.307434\nI0819 00:23:14.366614 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 00:23:14.366641 21603 solver.cpp:244]     Train net output #1: loss = 0.307435 (* 1 = 0.307435 loss)\nI0819 00:23:14.461408 21603 sgd_solver.cpp:166] Iteration 49400, lr = 1.235\nI0819 00:24:01.928858 21603 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0819 00:24:28.228324 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79368\nI0819 00:24:28.228400 21603 solver.cpp:404]     Test net output #1: loss = 0.742625 (* 1 = 0.742625 loss)\nI0819 00:24:28.640751 21603 solver.cpp:228] Iteration 49500, loss = 0.20467\nI0819 00:24:28.640785 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 00:24:28.640810 21603 solver.cpp:244]     Train net output #1: loss = 0.20467 (* 1 = 0.20467 loss)\nI0819 00:24:28.733532 21603 sgd_solver.cpp:166] Iteration 49500, lr = 1.2375\nI0819 00:25:16.182229 21603 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0819 00:25:42.450860 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67492\nI0819 00:25:42.450906 21603 solver.cpp:404]     Test net output #1: loss = 1.43529 (* 1 = 1.43529 loss)\nI0819 00:25:42.862862 21603 solver.cpp:228] Iteration 49600, loss = 0.168108\nI0819 00:25:42.862892 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:25:42.862907 21603 solver.cpp:244]     Train net output #1: loss = 0.168109 (* 1 = 0.168109 loss)\nI0819 00:25:42.957348 21603 sgd_solver.cpp:166] Iteration 49600, lr = 1.24\nI0819 00:26:30.430217 21603 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0819 00:26:56.693970 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78108\nI0819 00:26:56.694020 21603 solver.cpp:404]     Test net output #1: loss = 0.770736 (* 1 = 0.770736 loss)\nI0819 00:26:57.106298 21603 solver.cpp:228] Iteration 49700, loss = 0.185387\nI0819 00:26:57.106323 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 00:26:57.106343 21603 solver.cpp:244]     Train net output #1: loss = 0.185387 (* 1 = 0.185387 loss)\nI0819 00:26:57.197824 21603 sgd_solver.cpp:166] Iteration 49700, lr = 1.2425\nI0819 00:27:44.757843 21603 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0819 00:28:11.018632 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78004\nI0819 00:28:11.018681 21603 solver.cpp:404]     Test net output #1: loss = 0.783243 (* 1 = 0.783243 loss)\nI0819 00:28:11.430968 21603 solver.cpp:228] Iteration 49800, loss = 0.114695\nI0819 00:28:11.430995 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:28:11.431010 21603 solver.cpp:244]     Train net output #1: loss = 0.114695 (* 1 = 0.114695 loss)\nI0819 00:28:11.517964 21603 sgd_solver.cpp:166] Iteration 49800, lr = 1.245\nI0819 00:28:58.963759 21603 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0819 00:29:25.247846 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72172\nI0819 00:29:25.247895 21603 solver.cpp:404]     Test net output #1: loss = 1.10714 (* 1 = 1.10714 loss)\nI0819 00:29:25.660068 21603 solver.cpp:228] Iteration 49900, loss = 0.174471\nI0819 00:29:25.660092 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 00:29:25.660107 21603 solver.cpp:244]     Train net output #1: loss = 0.174471 (* 1 = 0.174471 loss)\nI0819 00:29:25.746309 21603 sgd_solver.cpp:166] Iteration 49900, lr = 1.2475\nI0819 00:30:13.170239 21603 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0819 00:30:39.429390 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79252\nI0819 00:30:39.429437 21603 solver.cpp:404]     Test net output #1: loss = 0.787403 (* 1 = 0.787403 loss)\nI0819 00:30:39.841404 21603 solver.cpp:228] Iteration 50000, loss = 0.196817\nI0819 00:30:39.841434 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 00:30:39.841449 21603 solver.cpp:244]     Train net output #1: loss = 0.196818 (* 1 = 0.196818 loss)\nI0819 00:30:39.931675 21603 sgd_solver.cpp:166] Iteration 50000, lr = 1.25\nI0819 00:31:27.343937 21603 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0819 00:31:53.621273 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76928\nI0819 00:31:53.621328 21603 solver.cpp:404]     Test net output #1: loss = 0.838763 (* 1 = 0.838763 loss)\nI0819 00:31:54.033551 21603 solver.cpp:228] Iteration 50100, loss = 0.120135\nI0819 00:31:54.033591 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:31:54.033614 21603 solver.cpp:244]     Train net output #1: loss = 0.120136 (* 1 = 0.120136 loss)\nI0819 00:31:54.126894 21603 sgd_solver.cpp:166] Iteration 50100, lr = 1.2525\nI0819 00:32:41.577836 21603 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0819 00:33:07.853436 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7996\nI0819 00:33:07.853507 21603 solver.cpp:404]     Test net output #1: loss = 0.750438 (* 1 = 0.750438 loss)\nI0819 00:33:08.267082 21603 solver.cpp:228] Iteration 50200, loss = 0.158144\nI0819 00:33:08.267118 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:33:08.267143 21603 solver.cpp:244]     Train net output #1: loss = 0.158144 (* 1 = 0.158144 loss)\nI0819 00:33:08.357414 21603 sgd_solver.cpp:166] Iteration 50200, lr = 1.255\nI0819 00:33:55.842713 21603 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0819 00:34:22.144618 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77556\nI0819 00:34:22.144690 21603 solver.cpp:404]     Test net output #1: loss = 0.832779 (* 1 = 0.832779 loss)\nI0819 00:34:22.556732 21603 solver.cpp:228] Iteration 50300, loss = 0.171147\nI0819 00:34:22.556768 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 00:34:22.556793 21603 solver.cpp:244]     Train net output #1: loss = 0.171147 (* 1 = 0.171147 loss)\nI0819 00:34:22.643935 21603 sgd_solver.cpp:166] Iteration 50300, lr = 1.2575\nI0819 00:35:10.088778 21603 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0819 00:35:36.385233 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7192\nI0819 00:35:36.385309 21603 solver.cpp:404]     Test net output #1: loss = 1.27901 (* 1 = 1.27901 loss)\nI0819 00:35:36.797302 21603 solver.cpp:228] Iteration 50400, loss = 0.116227\nI0819 00:35:36.797338 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:35:36.797363 21603 solver.cpp:244]     Train net output #1: loss = 0.116228 (* 1 = 0.116228 loss)\nI0819 00:35:36.886056 21603 sgd_solver.cpp:166] Iteration 50400, lr = 1.26\nI0819 00:36:24.232674 21603 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0819 00:36:50.529559 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76964\nI0819 00:36:50.529634 21603 solver.cpp:404]     Test net output #1: loss = 0.864242 (* 1 = 0.864242 loss)\nI0819 00:36:50.941787 21603 solver.cpp:228] Iteration 50500, loss = 0.107881\nI0819 00:36:50.941825 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:36:50.941849 21603 solver.cpp:244]     Train net output #1: loss = 0.107881 (* 1 = 0.107881 loss)\nI0819 00:36:51.036800 21603 sgd_solver.cpp:166] Iteration 50500, lr = 1.2625\nI0819 00:37:38.304858 21603 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0819 00:38:04.599997 21603 solver.cpp:404]     Test net output #0: accuracy = 0.738\nI0819 00:38:04.600069 21603 solver.cpp:404]     Test net output #1: loss = 1.11834 (* 1 = 1.11834 loss)\nI0819 00:38:05.012702 21603 solver.cpp:228] Iteration 50600, loss = 0.154736\nI0819 00:38:05.012754 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 00:38:05.012778 21603 solver.cpp:244]     Train net output #1: loss = 0.154736 (* 1 = 0.154736 loss)\nI0819 00:38:05.103193 21603 sgd_solver.cpp:166] Iteration 50600, lr = 1.265\nI0819 00:38:52.396831 21603 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0819 00:39:18.693074 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77276\nI0819 00:39:18.693146 21603 solver.cpp:404]     Test net output #1: loss = 0.950272 (* 1 = 0.950272 loss)\nI0819 00:39:19.106513 21603 solver.cpp:228] Iteration 50700, loss = 0.132839\nI0819 00:39:19.106564 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 00:39:19.106588 21603 solver.cpp:244]     Train net output #1: loss = 0.132839 (* 1 = 0.132839 loss)\nI0819 00:39:19.193750 21603 sgd_solver.cpp:166] Iteration 50700, lr = 1.2675\nI0819 00:40:06.574669 21603 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0819 00:40:32.865787 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81568\nI0819 00:40:32.865855 21603 solver.cpp:404]     Test net output #1: loss = 0.603959 (* 1 = 0.603959 loss)\nI0819 00:40:33.278029 21603 solver.cpp:228] Iteration 50800, loss = 0.161472\nI0819 00:40:33.278066 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:40:33.278084 21603 solver.cpp:244]     Train net output #1: loss = 0.161473 (* 1 = 0.161473 loss)\nI0819 00:40:33.363595 21603 sgd_solver.cpp:166] Iteration 50800, lr = 1.27\nI0819 00:41:20.805948 21603 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0819 00:41:47.097292 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79072\nI0819 00:41:47.097363 21603 solver.cpp:404]     Test net output #1: loss = 0.740761 (* 1 = 0.740761 loss)\nI0819 00:41:47.509428 21603 solver.cpp:228] Iteration 50900, loss = 0.186003\nI0819 00:41:47.509470 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 00:41:47.509485 21603 solver.cpp:244]     Train net output #1: loss = 0.186003 (* 1 = 0.186003 loss)\nI0819 00:41:47.601024 21603 sgd_solver.cpp:166] Iteration 50900, lr = 1.2725\nI0819 00:42:35.055006 21603 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0819 00:43:01.179184 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7572\nI0819 00:43:01.179249 21603 solver.cpp:404]     Test net output #1: loss = 1.05975 (* 1 = 1.05975 loss)\nI0819 00:43:01.593070 21603 solver.cpp:228] Iteration 51000, loss = 0.0685356\nI0819 00:43:01.593114 21603 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:43:01.593132 21603 solver.cpp:244]     Train net output #1: loss = 0.0685359 (* 1 = 0.0685359 loss)\nI0819 00:43:01.682337 21603 sgd_solver.cpp:166] Iteration 51000, lr = 1.275\nI0819 00:43:48.987594 21603 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0819 00:44:15.074496 21603 solver.cpp:404]     Test net output #0: accuracy = 0.763\nI0819 00:44:15.074561 21603 solver.cpp:404]     Test net output #1: loss = 0.95739 (* 1 = 0.95739 loss)\nI0819 00:44:15.488648 21603 solver.cpp:228] Iteration 51100, loss = 0.230138\nI0819 00:44:15.488688 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 00:44:15.488705 21603 solver.cpp:244]     Train net output #1: loss = 0.230139 (* 1 = 0.230139 loss)\nI0819 00:44:15.579166 21603 sgd_solver.cpp:166] Iteration 51100, lr = 1.2775\nI0819 00:45:02.872757 21603 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0819 00:45:28.912118 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78988\nI0819 00:45:28.912178 21603 solver.cpp:404]     Test net output #1: loss = 0.745743 (* 1 = 0.745743 loss)\nI0819 00:45:29.325083 21603 solver.cpp:228] Iteration 51200, loss = 0.184954\nI0819 00:45:29.325129 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 00:45:29.325145 21603 solver.cpp:244]     Train net output #1: loss = 0.184955 (* 1 = 0.184955 loss)\nI0819 00:45:29.417829 21603 sgd_solver.cpp:166] Iteration 51200, lr = 1.28\nI0819 00:46:16.669088 21603 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0819 00:46:42.750332 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79712\nI0819 00:46:42.750396 21603 solver.cpp:404]     Test net output #1: loss = 0.69174 (* 1 = 0.69174 loss)\nI0819 00:46:43.162837 21603 solver.cpp:228] Iteration 51300, loss = 0.214622\nI0819 00:46:43.162881 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 00:46:43.162897 21603 solver.cpp:244]     Train net output #1: loss = 0.214623 (* 1 = 0.214623 loss)\nI0819 00:46:43.250155 21603 sgd_solver.cpp:166] Iteration 51300, lr = 1.2825\nI0819 00:47:30.564393 21603 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0819 00:47:56.634088 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75636\nI0819 00:47:56.634132 21603 solver.cpp:404]     Test net output #1: loss = 0.924116 (* 1 = 0.924116 loss)\nI0819 00:47:57.046475 21603 solver.cpp:228] Iteration 51400, loss = 0.193991\nI0819 00:47:57.046519 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 00:47:57.046533 21603 solver.cpp:244]     Train net output #1: loss = 0.193991 (* 1 = 0.193991 loss)\nI0819 00:47:57.132951 21603 sgd_solver.cpp:166] Iteration 51400, lr = 1.285\nI0819 00:48:44.540626 21603 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0819 00:49:10.578634 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68068\nI0819 00:49:10.578677 21603 solver.cpp:404]     Test net output #1: loss = 1.527 (* 1 = 1.527 loss)\nI0819 00:49:10.991559 21603 solver.cpp:228] Iteration 51500, loss = 0.110917\nI0819 00:49:10.991600 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:49:10.991616 21603 solver.cpp:244]     Train net output #1: loss = 0.110918 (* 1 = 0.110918 loss)\nI0819 00:49:11.084098 21603 sgd_solver.cpp:166] Iteration 51500, lr = 1.2875\nI0819 00:49:58.608012 21603 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0819 00:50:24.793908 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74212\nI0819 00:50:24.793975 21603 solver.cpp:404]     Test net output #1: loss = 1.10133 (* 1 = 1.10133 loss)\nI0819 00:50:25.206439 21603 solver.cpp:228] Iteration 51600, loss = 0.248675\nI0819 00:50:25.206477 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 00:50:25.206493 21603 solver.cpp:244]     Train net output #1: loss = 0.248676 (* 1 = 0.248676 loss)\nI0819 00:50:25.301489 21603 sgd_solver.cpp:166] Iteration 51600, lr = 1.29\nI0819 00:51:12.747103 21603 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 00:51:38.807344 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7288\nI0819 00:51:38.807404 21603 solver.cpp:404]     Test net output #1: loss = 1.14596 (* 1 = 1.14596 loss)\nI0819 00:51:39.219772 21603 solver.cpp:228] Iteration 51700, loss = 0.165735\nI0819 00:51:39.219811 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:51:39.219827 21603 solver.cpp:244]     Train net output #1: loss = 0.165735 (* 1 = 0.165735 loss)\nI0819 00:51:39.307320 21603 sgd_solver.cpp:166] Iteration 51700, lr = 1.2925\nI0819 00:52:26.847565 21603 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 00:52:52.920459 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78968\nI0819 00:52:52.920516 21603 solver.cpp:404]     Test net output #1: loss = 0.867384 (* 1 = 0.867384 loss)\nI0819 00:52:53.333003 21603 solver.cpp:228] Iteration 51800, loss = 0.131181\nI0819 00:52:53.333042 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:52:53.333058 21603 solver.cpp:244]     Train net output #1: loss = 0.131181 (* 1 = 0.131181 loss)\nI0819 00:52:53.430577 21603 sgd_solver.cpp:166] Iteration 51800, lr = 1.295\nI0819 00:53:40.889586 21603 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 00:54:07.140954 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73132\nI0819 00:54:07.141000 21603 solver.cpp:404]     Test net output #1: loss = 1.17981 (* 1 = 1.17981 loss)\nI0819 00:54:07.553516 21603 solver.cpp:228] Iteration 51900, loss = 0.0951736\nI0819 00:54:07.553558 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:54:07.553575 21603 solver.cpp:244]     Train net output #1: loss = 0.0951739 (* 1 = 0.0951739 loss)\nI0819 00:54:07.647550 21603 sgd_solver.cpp:166] Iteration 51900, lr = 1.2975\nI0819 00:54:55.116014 21603 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 00:55:21.400696 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81\nI0819 00:55:21.400743 21603 solver.cpp:404]     Test net output #1: loss = 0.740324 (* 1 = 0.740324 loss)\nI0819 00:55:21.813428 21603 solver.cpp:228] Iteration 52000, loss = 0.143835\nI0819 00:55:21.813467 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:55:21.813483 21603 solver.cpp:244]     Train net output #1: loss = 0.143836 (* 1 = 0.143836 loss)\nI0819 00:55:21.903708 21603 sgd_solver.cpp:166] Iteration 52000, lr = 1.3\nI0819 00:56:09.432760 21603 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 00:56:35.703019 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74052\nI0819 00:56:35.703065 21603 solver.cpp:404]     Test net output #1: loss = 0.989635 (* 1 = 0.989635 loss)\nI0819 00:56:36.115226 21603 solver.cpp:228] Iteration 52100, loss = 0.115565\nI0819 00:56:36.115268 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:56:36.115283 21603 solver.cpp:244]     Train net output #1: loss = 0.115566 (* 1 = 0.115566 loss)\nI0819 00:56:36.204725 21603 sgd_solver.cpp:166] Iteration 52100, lr = 1.3025\nI0819 00:57:23.532779 21603 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 00:57:49.807116 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73156\nI0819 00:57:49.807162 21603 solver.cpp:404]     Test net output #1: loss = 1.05108 (* 1 = 1.05108 loss)\nI0819 00:57:50.219348 21603 solver.cpp:228] Iteration 52200, loss = 0.148067\nI0819 00:57:50.219383 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:57:50.219398 21603 solver.cpp:244]     Train net output #1: loss = 0.148068 (* 1 = 0.148068 loss)\nI0819 00:57:50.314703 21603 sgd_solver.cpp:166] Iteration 52200, lr = 1.305\nI0819 00:58:37.633437 21603 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 00:59:03.922963 21603 solver.cpp:404]     Test net output #0: accuracy = 0.84584\nI0819 00:59:03.923012 21603 solver.cpp:404]     Test net output #1: loss = 0.514139 (* 1 = 0.514139 loss)\nI0819 00:59:04.335250 21603 solver.cpp:228] Iteration 52300, loss = 0.119662\nI0819 00:59:04.335289 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:59:04.335305 21603 solver.cpp:244]     Train net output #1: loss = 0.119663 (* 1 = 0.119663 loss)\nI0819 00:59:04.427911 21603 sgd_solver.cpp:166] Iteration 52300, lr = 1.3075\nI0819 00:59:51.651226 21603 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 01:00:17.935581 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74864\nI0819 01:00:17.935624 21603 solver.cpp:404]     Test net output #1: loss = 0.88952 (* 1 = 0.88952 loss)\nI0819 01:00:18.347750 21603 solver.cpp:228] Iteration 52400, loss = 0.174512\nI0819 01:00:18.347793 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 01:00:18.347808 21603 solver.cpp:244]     Train net output #1: loss = 0.174512 (* 1 = 0.174512 loss)\nI0819 01:00:18.442891 21603 sgd_solver.cpp:166] Iteration 52400, lr = 1.31\nI0819 01:01:05.650801 21603 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 01:01:31.937531 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74668\nI0819 01:01:31.937577 21603 solver.cpp:404]     Test net output #1: loss = 1.11405 (* 1 = 1.11405 loss)\nI0819 01:01:32.350186 21603 solver.cpp:228] Iteration 52500, loss = 0.171661\nI0819 01:01:32.350224 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:01:32.350239 21603 solver.cpp:244]     Train net output #1: loss = 0.171661 (* 1 = 0.171661 loss)\nI0819 01:01:32.434183 21603 sgd_solver.cpp:166] Iteration 52500, lr = 1.3125\nI0819 01:02:19.821126 21603 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 01:02:46.109127 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8008\nI0819 01:02:46.109174 21603 solver.cpp:404]     Test net output #1: loss = 0.757381 (* 1 = 0.757381 loss)\nI0819 01:02:46.521790 21603 solver.cpp:228] Iteration 52600, loss = 0.172596\nI0819 01:02:46.521827 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 01:02:46.521843 21603 solver.cpp:244]     Train net output #1: loss = 0.172596 (* 1 = 0.172596 loss)\nI0819 01:02:46.607756 21603 sgd_solver.cpp:166] Iteration 52600, lr = 1.315\nI0819 01:03:33.875110 21603 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 01:04:00.163980 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78992\nI0819 01:04:00.164021 21603 solver.cpp:404]     Test net output #1: loss = 0.836735 (* 1 = 0.836735 loss)\nI0819 01:04:00.576475 21603 solver.cpp:228] Iteration 52700, loss = 0.185174\nI0819 01:04:00.576519 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:04:00.576535 21603 solver.cpp:244]     Train net output #1: loss = 0.185174 (* 1 = 0.185174 loss)\nI0819 01:04:00.666440 21603 sgd_solver.cpp:166] Iteration 52700, lr = 1.3175\nI0819 01:04:47.983088 21603 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 01:05:14.256497 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6922\nI0819 01:05:14.256543 21603 solver.cpp:404]     Test net output #1: loss = 1.22665 (* 1 = 1.22665 loss)\nI0819 01:05:14.668612 21603 solver.cpp:228] Iteration 52800, loss = 0.174533\nI0819 01:05:14.668653 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:05:14.668668 21603 solver.cpp:244]     Train net output #1: loss = 0.174534 (* 1 = 0.174534 loss)\nI0819 01:05:14.759433 21603 sgd_solver.cpp:166] Iteration 52800, lr = 1.32\nI0819 01:06:02.136221 21603 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 01:06:28.392132 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75484\nI0819 01:06:28.392176 21603 solver.cpp:404]     Test net output #1: loss = 0.928101 (* 1 = 0.928101 loss)\nI0819 01:06:28.804567 21603 solver.cpp:228] Iteration 52900, loss = 0.201437\nI0819 01:06:28.804611 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 01:06:28.804627 21603 solver.cpp:244]     Train net output #1: loss = 0.201437 (* 1 = 0.201437 loss)\nI0819 01:06:28.897817 21603 sgd_solver.cpp:166] Iteration 52900, lr = 1.3225\nI0819 01:07:16.298193 21603 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 01:07:42.568856 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79848\nI0819 01:07:42.568902 21603 solver.cpp:404]     Test net output #1: loss = 0.724916 (* 1 = 0.724916 loss)\nI0819 01:07:42.981505 21603 solver.cpp:228] Iteration 53000, loss = 0.183077\nI0819 01:07:42.981544 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:07:42.981560 21603 solver.cpp:244]     Train net output #1: loss = 0.183078 (* 1 = 0.183078 loss)\nI0819 01:07:43.070097 21603 sgd_solver.cpp:166] Iteration 53000, lr = 1.325\nI0819 01:08:30.344205 21603 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 01:08:56.627593 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8052\nI0819 01:08:56.627637 21603 solver.cpp:404]     Test net output #1: loss = 0.71263 (* 1 = 0.71263 loss)\nI0819 01:08:57.040081 21603 solver.cpp:228] Iteration 53100, loss = 0.157649\nI0819 01:08:57.040122 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 01:08:57.040138 21603 solver.cpp:244]     Train net output #1: loss = 0.157649 (* 1 = 0.157649 loss)\nI0819 01:08:57.132030 21603 sgd_solver.cpp:166] Iteration 53100, lr = 1.3275\nI0819 01:09:44.437283 21603 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 01:10:10.725375 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77372\nI0819 01:10:10.725417 21603 solver.cpp:404]     Test net output #1: loss = 0.88973 (* 1 = 0.88973 loss)\nI0819 01:10:11.138108 21603 solver.cpp:228] Iteration 53200, loss = 0.249046\nI0819 01:10:11.138159 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 01:10:11.138175 21603 solver.cpp:244]     Train net output #1: loss = 0.249046 (* 1 = 0.249046 loss)\nI0819 01:10:11.231410 21603 sgd_solver.cpp:166] Iteration 53200, lr = 1.33\nI0819 01:10:58.573307 21603 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 01:11:24.851145 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72736\nI0819 01:11:24.851189 21603 solver.cpp:404]     Test net output #1: loss = 1.22324 (* 1 = 1.22324 loss)\nI0819 01:11:25.263895 21603 solver.cpp:228] Iteration 53300, loss = 0.158408\nI0819 01:11:25.263944 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:11:25.263960 21603 solver.cpp:244]     Train net output #1: loss = 0.158408 (* 1 = 0.158408 loss)\nI0819 01:11:25.354707 21603 sgd_solver.cpp:166] Iteration 53300, lr = 1.3325\nI0819 01:12:12.608218 21603 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 01:12:38.854858 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79788\nI0819 01:12:38.854904 21603 solver.cpp:404]     Test net output #1: loss = 0.770645 (* 1 = 0.770645 loss)\nI0819 01:12:39.267195 21603 solver.cpp:228] Iteration 53400, loss = 0.210096\nI0819 01:12:39.267244 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:12:39.267262 21603 solver.cpp:244]     Train net output #1: loss = 0.210096 (* 1 = 0.210096 loss)\nI0819 01:12:39.362615 21603 sgd_solver.cpp:166] Iteration 53400, lr = 1.335\nI0819 01:13:26.661491 21603 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 01:13:52.912537 21603 solver.cpp:404]     Test net output #0: accuracy = 0.758\nI0819 01:13:52.912582 21603 solver.cpp:404]     Test net output #1: loss = 0.945429 (* 1 = 0.945429 loss)\nI0819 01:13:53.324853 21603 solver.cpp:228] Iteration 53500, loss = 0.161518\nI0819 01:13:53.324903 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:13:53.324918 21603 solver.cpp:244]     Train net output #1: loss = 0.161518 (* 1 = 0.161518 loss)\nI0819 01:13:53.411979 21603 sgd_solver.cpp:166] Iteration 53500, lr = 1.3375\nI0819 01:14:40.597242 21603 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 01:15:06.845302 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7748\nI0819 01:15:06.845352 21603 solver.cpp:404]     Test net output #1: loss = 0.974656 (* 1 = 0.974656 loss)\nI0819 01:15:07.257910 21603 solver.cpp:228] Iteration 53600, loss = 0.184659\nI0819 01:15:07.257961 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 01:15:07.257977 21603 solver.cpp:244]     Train net output #1: loss = 0.184659 (* 1 = 0.184659 loss)\nI0819 01:15:07.352749 21603 sgd_solver.cpp:166] Iteration 53600, lr = 1.34\nI0819 01:15:54.666446 21603 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 01:16:20.914265 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81\nI0819 01:16:20.914310 21603 solver.cpp:404]     Test net output #1: loss = 0.731148 (* 1 = 0.731148 loss)\nI0819 01:16:21.326712 21603 solver.cpp:228] Iteration 53700, loss = 0.150491\nI0819 01:16:21.326764 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:16:21.326782 21603 solver.cpp:244]     Train net output #1: loss = 0.150491 (* 1 = 0.150491 loss)\nI0819 01:16:21.421299 21603 sgd_solver.cpp:166] Iteration 53700, lr = 1.3425\nI0819 01:17:08.764089 21603 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 01:17:35.030494 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80352\nI0819 01:17:35.030540 21603 solver.cpp:404]     Test net output #1: loss = 0.731357 (* 1 = 0.731357 loss)\nI0819 01:17:35.443095 21603 solver.cpp:228] Iteration 53800, loss = 0.204807\nI0819 01:17:35.443146 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 01:17:35.443163 21603 solver.cpp:244]     Train net output #1: loss = 0.204807 (* 1 = 0.204807 loss)\nI0819 01:17:35.532732 21603 sgd_solver.cpp:166] Iteration 53800, lr = 1.345\nI0819 01:18:22.740597 21603 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 01:18:49.000977 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72372\nI0819 01:18:49.001021 21603 solver.cpp:404]     Test net output #1: loss = 1.12519 (* 1 = 1.12519 loss)\nI0819 01:18:49.413769 21603 solver.cpp:228] Iteration 53900, loss = 0.151107\nI0819 01:18:49.413818 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 01:18:49.413836 21603 solver.cpp:244]     Train net output #1: loss = 0.151107 (* 1 = 0.151107 loss)\nI0819 01:18:49.501441 21603 sgd_solver.cpp:166] Iteration 53900, lr = 1.3475\nI0819 01:19:36.623827 21603 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 01:20:02.887766 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79548\nI0819 01:20:02.887814 21603 solver.cpp:404]     Test net output #1: loss = 0.808438 (* 1 = 0.808438 loss)\nI0819 01:20:03.300478 21603 solver.cpp:228] Iteration 54000, loss = 0.133414\nI0819 01:20:03.300529 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:20:03.300545 21603 solver.cpp:244]     Train net output #1: loss = 0.133414 (* 1 = 0.133414 loss)\nI0819 01:20:03.388360 21603 sgd_solver.cpp:166] Iteration 54000, lr = 1.35\nI0819 01:20:50.701311 21603 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 01:21:16.979887 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78756\nI0819 01:21:16.979933 21603 solver.cpp:404]     Test net output #1: loss = 0.87286 (* 1 = 0.87286 loss)\nI0819 01:21:17.392380 21603 solver.cpp:228] Iteration 54100, loss = 0.153463\nI0819 01:21:17.392432 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:21:17.392449 21603 solver.cpp:244]     Train net output #1: loss = 0.153463 (* 1 = 0.153463 loss)\nI0819 01:21:17.486968 21603 sgd_solver.cpp:166] Iteration 54100, lr = 1.3525\nI0819 01:22:04.759415 21603 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 01:22:31.011353 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75364\nI0819 01:22:31.011399 21603 solver.cpp:404]     Test net output #1: loss = 0.951145 (* 1 = 0.951145 loss)\nI0819 01:22:31.424134 21603 solver.cpp:228] Iteration 54200, loss = 0.159919\nI0819 01:22:31.424185 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:22:31.424199 21603 solver.cpp:244]     Train net output #1: loss = 0.159919 (* 1 = 0.159919 loss)\nI0819 01:22:31.512434 21603 sgd_solver.cpp:166] Iteration 54200, lr = 1.355\nI0819 01:23:18.799098 21603 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 01:23:45.046702 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7548\nI0819 01:23:45.046747 21603 solver.cpp:404]     Test net output #1: loss = 1.00658 (* 1 = 1.00658 loss)\nI0819 01:23:45.459348 21603 solver.cpp:228] Iteration 54300, loss = 0.192642\nI0819 01:23:45.459396 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 01:23:45.459414 21603 solver.cpp:244]     Train net output #1: loss = 0.192642 (* 1 = 0.192642 loss)\nI0819 01:23:45.547010 21603 sgd_solver.cpp:166] Iteration 54300, lr = 1.3575\nI0819 01:24:32.723023 21603 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 01:24:58.984565 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69772\nI0819 01:24:58.984618 21603 solver.cpp:404]     Test net output #1: loss = 1.50377 (* 1 = 1.50377 loss)\nI0819 01:24:59.398439 21603 solver.cpp:228] Iteration 54400, loss = 0.17831\nI0819 01:24:59.398478 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:24:59.398494 21603 solver.cpp:244]     Train net output #1: loss = 0.17831 (* 1 = 0.17831 loss)\nI0819 01:24:59.485172 21603 sgd_solver.cpp:166] Iteration 54400, lr = 1.36\nI0819 01:25:46.671799 21603 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 01:26:12.657111 21603 solver.cpp:404]     Test net output #0: accuracy = 0.797\nI0819 01:26:12.657171 21603 solver.cpp:404]     Test net output #1: loss = 0.675795 (* 1 = 0.675795 loss)\nI0819 01:26:13.068639 21603 solver.cpp:228] Iteration 54500, loss = 0.190674\nI0819 01:26:13.068686 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 01:26:13.068702 21603 solver.cpp:244]     Train net output #1: loss = 0.190675 (* 1 = 0.190675 loss)\nI0819 01:26:13.159083 21603 sgd_solver.cpp:166] Iteration 54500, lr = 1.3625\nI0819 01:27:00.403898 21603 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 01:27:26.386242 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79092\nI0819 01:27:26.386298 21603 solver.cpp:404]     Test net output #1: loss = 0.797399 (* 1 = 0.797399 loss)\nI0819 01:27:26.797734 21603 solver.cpp:228] Iteration 54600, loss = 0.130162\nI0819 01:27:26.797778 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:27:26.797796 21603 solver.cpp:244]     Train net output #1: loss = 0.130162 (* 1 = 0.130162 loss)\nI0819 01:27:26.882762 21603 sgd_solver.cpp:166] Iteration 54600, lr = 1.365\nI0819 01:28:14.043398 21603 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 01:28:40.027107 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7894\nI0819 01:28:40.027163 21603 solver.cpp:404]     Test net output #1: loss = 0.811402 (* 1 = 0.811402 loss)\nI0819 01:28:40.439694 21603 solver.cpp:228] Iteration 54700, loss = 0.149053\nI0819 01:28:40.439741 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:28:40.439757 21603 solver.cpp:244]     Train net output #1: loss = 0.149053 (* 1 = 0.149053 loss)\nI0819 01:28:40.534523 21603 sgd_solver.cpp:166] Iteration 54700, lr = 1.3675\nI0819 01:29:27.768784 21603 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 01:29:53.754757 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78468\nI0819 01:29:53.754815 21603 solver.cpp:404]     Test net output #1: loss = 0.82252 (* 1 = 0.82252 loss)\nI0819 01:29:54.166435 21603 solver.cpp:228] Iteration 54800, loss = 0.174989\nI0819 01:29:54.166473 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 01:29:54.166489 21603 solver.cpp:244]     Train net output #1: loss = 0.174989 (* 1 = 0.174989 loss)\nI0819 01:29:54.260970 21603 sgd_solver.cpp:166] Iteration 54800, lr = 1.37\nI0819 01:30:41.460101 21603 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0819 01:31:07.447578 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74588\nI0819 01:31:07.447636 21603 solver.cpp:404]     Test net output #1: loss = 1.19127 (* 1 = 1.19127 loss)\nI0819 01:31:07.858845 21603 solver.cpp:228] Iteration 54900, loss = 0.255746\nI0819 01:31:07.858885 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:31:07.858901 21603 solver.cpp:244]     Train net output #1: loss = 0.255746 (* 1 = 0.255746 loss)\nI0819 01:31:07.947453 21603 sgd_solver.cpp:166] Iteration 54900, lr = 1.3725\nI0819 01:31:55.262090 21603 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0819 01:32:21.254169 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77824\nI0819 01:32:21.254235 21603 solver.cpp:404]     Test net output #1: loss = 0.878465 (* 1 = 0.878465 loss)\nI0819 01:32:21.665611 21603 solver.cpp:228] Iteration 55000, loss = 0.175778\nI0819 01:32:21.665649 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 01:32:21.665665 21603 solver.cpp:244]     Train net output #1: loss = 0.175778 (* 1 = 0.175778 loss)\nI0819 01:32:21.757247 21603 sgd_solver.cpp:166] Iteration 55000, lr = 1.375\nI0819 01:33:09.056676 21603 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0819 01:33:35.042382 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78224\nI0819 01:33:35.042441 21603 solver.cpp:404]     Test net output #1: loss = 0.790419 (* 1 = 0.790419 loss)\nI0819 01:33:35.453883 21603 solver.cpp:228] Iteration 55100, loss = 0.162398\nI0819 01:33:35.453920 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:33:35.453936 21603 solver.cpp:244]     Train net output #1: loss = 0.162398 (* 1 = 0.162398 loss)\nI0819 01:33:35.541272 21603 sgd_solver.cpp:166] Iteration 55100, lr = 1.3775\nI0819 01:34:22.739253 21603 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0819 01:34:48.727942 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77196\nI0819 01:34:48.728008 21603 solver.cpp:404]     Test net output #1: loss = 0.84787 (* 1 = 0.84787 loss)\nI0819 01:34:49.139267 21603 solver.cpp:228] Iteration 55200, loss = 0.153264\nI0819 01:34:49.139303 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 01:34:49.139319 21603 solver.cpp:244]     Train net output #1: loss = 0.153264 (* 1 = 0.153264 loss)\nI0819 01:34:49.232507 21603 sgd_solver.cpp:166] Iteration 55200, lr = 1.38\nI0819 01:35:36.315605 21603 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0819 01:36:02.304669 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78492\nI0819 01:36:02.304726 21603 solver.cpp:404]     Test net output #1: loss = 0.785771 (* 1 = 0.785771 loss)\nI0819 01:36:02.716328 21603 solver.cpp:228] Iteration 55300, loss = 0.115123\nI0819 01:36:02.716364 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:36:02.716380 21603 solver.cpp:244]     Train net output #1: loss = 0.115124 (* 1 = 0.115124 loss)\nI0819 01:36:02.801802 21603 sgd_solver.cpp:166] Iteration 55300, lr = 1.3825\nI0819 01:36:49.957474 21603 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0819 01:37:15.947499 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77428\nI0819 01:37:15.947559 21603 solver.cpp:404]     Test net output #1: loss = 0.814615 (* 1 = 0.814615 loss)\nI0819 01:37:16.359006 21603 solver.cpp:228] Iteration 55400, loss = 0.161565\nI0819 01:37:16.359042 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:37:16.359058 21603 solver.cpp:244]     Train net output #1: loss = 0.161565 (* 1 = 0.161565 loss)\nI0819 01:37:16.453049 21603 sgd_solver.cpp:166] Iteration 55400, lr = 1.385\nI0819 01:38:03.642341 21603 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0819 01:38:29.630522 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77156\nI0819 01:38:29.630581 21603 solver.cpp:404]     Test net output #1: loss = 0.958075 (* 1 = 0.958075 loss)\nI0819 01:38:30.041946 21603 solver.cpp:228] Iteration 55500, loss = 0.216504\nI0819 01:38:30.041982 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:38:30.041998 21603 solver.cpp:244]     Train net output #1: loss = 0.216504 (* 1 = 0.216504 loss)\nI0819 01:38:30.146172 21603 sgd_solver.cpp:166] Iteration 55500, lr = 1.3875\nI0819 01:39:17.411032 21603 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0819 01:39:43.399693 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69384\nI0819 01:39:43.399749 21603 solver.cpp:404]     Test net output #1: loss = 1.43332 (* 1 = 1.43332 loss)\nI0819 01:39:43.811058 21603 solver.cpp:228] Iteration 55600, loss = 0.140124\nI0819 01:39:43.811095 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 01:39:43.811110 21603 solver.cpp:244]     Train net output #1: loss = 0.140124 (* 1 = 0.140124 loss)\nI0819 01:39:43.904955 21603 sgd_solver.cpp:166] Iteration 55600, lr = 1.39\nI0819 01:40:31.239389 21603 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0819 01:40:57.226935 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79208\nI0819 01:40:57.226975 21603 solver.cpp:404]     Test net output #1: loss = 0.749801 (* 1 = 0.749801 loss)\nI0819 01:40:57.638065 21603 solver.cpp:228] Iteration 55700, loss = 0.230464\nI0819 01:40:57.638100 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:40:57.638116 21603 solver.cpp:244]     Train net output #1: loss = 0.230464 (* 1 = 0.230464 loss)\nI0819 01:40:57.725162 21603 sgd_solver.cpp:166] Iteration 55700, lr = 1.3925\nI0819 01:41:44.933115 21603 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0819 01:42:10.922354 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82772\nI0819 01:42:10.922412 21603 solver.cpp:404]     Test net output #1: loss = 0.588973 (* 1 = 0.588973 loss)\nI0819 01:42:11.333658 21603 solver.cpp:228] Iteration 55800, loss = 0.143291\nI0819 01:42:11.333696 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:42:11.333712 21603 solver.cpp:244]     Train net output #1: loss = 0.143291 (* 1 = 0.143291 loss)\nI0819 01:42:11.423609 21603 sgd_solver.cpp:166] Iteration 55800, lr = 1.395\nI0819 01:42:58.590315 21603 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0819 01:43:24.581279 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8034\nI0819 01:43:24.581320 21603 solver.cpp:404]     Test net output #1: loss = 0.681524 (* 1 = 0.681524 loss)\nI0819 01:43:24.992952 21603 solver.cpp:228] Iteration 55900, loss = 0.175662\nI0819 01:43:24.992990 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:43:24.993005 21603 solver.cpp:244]     Train net output #1: loss = 0.175663 (* 1 = 0.175663 loss)\nI0819 01:43:25.084427 21603 sgd_solver.cpp:166] Iteration 55900, lr = 1.3975\nI0819 01:44:12.313519 21603 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0819 01:44:38.302562 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81472\nI0819 01:44:38.302604 21603 solver.cpp:404]     Test net output #1: loss = 0.700633 (* 1 = 0.700633 loss)\nI0819 01:44:38.714120 21603 solver.cpp:228] Iteration 56000, loss = 0.235477\nI0819 01:44:38.714157 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:44:38.714174 21603 solver.cpp:244]     Train net output #1: loss = 0.235477 (* 1 = 0.235477 loss)\nI0819 01:44:38.807915 21603 sgd_solver.cpp:166] Iteration 56000, lr = 1.4\nI0819 01:45:25.964568 21603 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0819 01:45:51.952792 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73776\nI0819 01:45:51.952836 21603 solver.cpp:404]     Test net output #1: loss = 1.00792 (* 1 = 1.00792 loss)\nI0819 01:45:52.364166 21603 solver.cpp:228] Iteration 56100, loss = 0.182915\nI0819 01:45:52.364217 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:45:52.364233 21603 solver.cpp:244]     Train net output #1: loss = 0.182915 (* 1 = 0.182915 loss)\nI0819 01:45:52.458802 21603 sgd_solver.cpp:166] Iteration 56100, lr = 1.4025\nI0819 01:46:39.702688 21603 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0819 01:47:05.691432 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79668\nI0819 01:47:05.691473 21603 solver.cpp:404]     Test net output #1: loss = 0.708979 (* 1 = 0.708979 loss)\nI0819 01:47:06.103766 21603 solver.cpp:228] Iteration 56200, loss = 0.170746\nI0819 01:47:06.103813 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:47:06.103829 21603 solver.cpp:244]     Train net output #1: loss = 0.170746 (* 1 = 0.170746 loss)\nI0819 01:47:06.196935 21603 sgd_solver.cpp:166] Iteration 56200, lr = 1.405\nI0819 01:47:53.463335 21603 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0819 01:48:19.457002 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79664\nI0819 01:48:19.457042 21603 solver.cpp:404]     Test net output #1: loss = 0.739101 (* 1 = 0.739101 loss)\nI0819 01:48:19.868734 21603 solver.cpp:228] Iteration 56300, loss = 0.218332\nI0819 01:48:19.868777 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 01:48:19.868793 21603 solver.cpp:244]     Train net output #1: loss = 0.218333 (* 1 = 0.218333 loss)\nI0819 01:48:19.961789 21603 sgd_solver.cpp:166] Iteration 56300, lr = 1.4075\nI0819 01:49:07.152566 21603 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0819 01:49:33.141543 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77816\nI0819 01:49:33.141592 21603 solver.cpp:404]     Test net output #1: loss = 0.945701 (* 1 = 0.945701 loss)\nI0819 01:49:33.553169 21603 solver.cpp:228] Iteration 56400, loss = 0.257747\nI0819 01:49:33.553217 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 01:49:33.553234 21603 solver.cpp:244]     Train net output #1: loss = 0.257747 (* 1 = 0.257747 loss)\nI0819 01:49:33.643738 21603 sgd_solver.cpp:166] Iteration 56400, lr = 1.41\nI0819 01:50:20.855252 21603 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0819 01:50:46.842572 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71308\nI0819 01:50:46.842612 21603 solver.cpp:404]     Test net output #1: loss = 1.12743 (* 1 = 1.12743 loss)\nI0819 01:50:47.254016 21603 solver.cpp:228] Iteration 56500, loss = 0.150414\nI0819 01:50:47.254060 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:50:47.254077 21603 solver.cpp:244]     Train net output #1: loss = 0.150414 (* 1 = 0.150414 loss)\nI0819 01:50:47.342100 21603 sgd_solver.cpp:166] Iteration 56500, lr = 1.4125\nI0819 01:51:34.467170 21603 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0819 01:52:00.455721 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79624\nI0819 01:52:00.455763 21603 solver.cpp:404]     Test net output #1: loss = 0.675272 (* 1 = 0.675272 loss)\nI0819 01:52:00.866888 21603 solver.cpp:228] Iteration 56600, loss = 0.166605\nI0819 01:52:00.866933 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:52:00.866950 21603 solver.cpp:244]     Train net output #1: loss = 0.166605 (* 1 = 0.166605 loss)\nI0819 01:52:00.957278 21603 sgd_solver.cpp:166] Iteration 56600, lr = 1.415\nI0819 01:52:48.203807 21603 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0819 01:53:14.200532 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75528\nI0819 01:53:14.200578 21603 solver.cpp:404]     Test net output #1: loss = 1.05223 (* 1 = 1.05223 loss)\nI0819 01:53:14.612665 21603 solver.cpp:228] Iteration 56700, loss = 0.11518\nI0819 01:53:14.612713 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 01:53:14.612738 21603 solver.cpp:244]     Train net output #1: loss = 0.11518 (* 1 = 0.11518 loss)\nI0819 01:53:14.702625 21603 sgd_solver.cpp:166] Iteration 56700, lr = 1.4175\nI0819 01:54:01.804172 21603 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0819 01:54:27.799165 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80564\nI0819 01:54:27.799211 21603 solver.cpp:404]     Test net output #1: loss = 0.677066 (* 1 = 0.677066 loss)\nI0819 01:54:28.211323 21603 solver.cpp:228] Iteration 56800, loss = 0.150098\nI0819 01:54:28.211371 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:54:28.211396 21603 solver.cpp:244]     Train net output #1: loss = 0.150098 (* 1 = 0.150098 loss)\nI0819 01:54:28.303915 21603 sgd_solver.cpp:166] Iteration 56800, lr = 1.42\nI0819 01:55:15.565675 21603 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0819 01:55:41.559880 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78372\nI0819 01:55:41.559943 21603 solver.cpp:404]     Test net output #1: loss = 0.819361 (* 1 = 0.819361 loss)\nI0819 01:55:41.971464 21603 solver.cpp:228] Iteration 56900, loss = 0.193352\nI0819 01:55:41.971513 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:55:41.971537 21603 solver.cpp:244]     Train net output #1: loss = 0.193353 (* 1 = 0.193353 loss)\nI0819 01:55:42.061266 21603 sgd_solver.cpp:166] Iteration 56900, lr = 1.4225\nI0819 01:56:29.349283 21603 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0819 01:56:55.343781 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76944\nI0819 01:56:55.343847 21603 solver.cpp:404]     Test net output #1: loss = 0.879751 (* 1 = 0.879751 loss)\nI0819 01:56:55.755647 21603 solver.cpp:228] Iteration 57000, loss = 0.173018\nI0819 01:56:55.755699 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 01:56:55.755724 21603 solver.cpp:244]     Train net output #1: loss = 0.173018 (* 1 = 0.173018 loss)\nI0819 01:56:55.838250 21603 sgd_solver.cpp:166] Iteration 57000, lr = 1.425\nI0819 01:57:43.213179 21603 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0819 01:58:09.344872 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74584\nI0819 01:58:09.344934 21603 solver.cpp:404]     Test net output #1: loss = 1.02509 (* 1 = 1.02509 loss)\nI0819 01:58:09.759580 21603 solver.cpp:228] Iteration 57100, loss = 0.0950567\nI0819 01:58:09.759623 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:58:09.759639 21603 solver.cpp:244]     Train net output #1: loss = 0.095057 (* 1 = 0.095057 loss)\nI0819 01:58:09.845347 21603 sgd_solver.cpp:166] Iteration 57100, lr = 1.4275\nI0819 01:58:57.325214 21603 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0819 01:59:23.589670 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80032\nI0819 01:59:23.589725 21603 solver.cpp:404]     Test net output #1: loss = 0.753491 (* 1 = 0.753491 loss)\nI0819 01:59:24.002177 21603 solver.cpp:228] Iteration 57200, loss = 0.194918\nI0819 01:59:24.002209 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 01:59:24.002233 21603 solver.cpp:244]     Train net output #1: loss = 0.194918 (* 1 = 0.194918 loss)\nI0819 01:59:24.089449 21603 sgd_solver.cpp:166] Iteration 57200, lr = 1.43\nI0819 02:00:11.501011 21603 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0819 02:00:37.779558 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78644\nI0819 02:00:37.779608 21603 solver.cpp:404]     Test net output #1: loss = 0.806732 (* 1 = 0.806732 loss)\nI0819 02:00:38.193380 21603 solver.cpp:228] Iteration 57300, loss = 0.187172\nI0819 02:00:38.193413 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:00:38.193437 21603 solver.cpp:244]     Train net output #1: loss = 0.187173 (* 1 = 0.187173 loss)\nI0819 02:00:38.281488 21603 sgd_solver.cpp:166] Iteration 57300, lr = 1.4325\nI0819 02:01:25.749888 21603 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0819 02:01:52.025977 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7786\nI0819 02:01:52.026033 21603 solver.cpp:404]     Test net output #1: loss = 0.730495 (* 1 = 0.730495 loss)\nI0819 02:01:52.438514 21603 solver.cpp:228] Iteration 57400, loss = 0.160045\nI0819 02:01:52.438547 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 02:01:52.438572 21603 solver.cpp:244]     Train net output #1: loss = 0.160046 (* 1 = 0.160046 loss)\nI0819 02:01:52.526309 21603 sgd_solver.cpp:166] Iteration 57400, lr = 1.435\nI0819 02:02:39.969921 21603 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0819 02:03:06.238732 21603 solver.cpp:404]     Test net output #0: accuracy = 0.62572\nI0819 02:03:06.238783 21603 solver.cpp:404]     Test net output #1: loss = 1.67777 (* 1 = 1.67777 loss)\nI0819 02:03:06.651355 21603 solver.cpp:228] Iteration 57500, loss = 0.194029\nI0819 02:03:06.651403 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:03:06.651427 21603 solver.cpp:244]     Train net output #1: loss = 0.194029 (* 1 = 0.194029 loss)\nI0819 02:03:06.742661 21603 sgd_solver.cpp:166] Iteration 57500, lr = 1.4375\nI0819 02:03:54.095795 21603 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0819 02:04:20.356977 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79824\nI0819 02:04:20.357030 21603 solver.cpp:404]     Test net output #1: loss = 0.700397 (* 1 = 0.700397 loss)\nI0819 02:04:20.769520 21603 solver.cpp:228] Iteration 57600, loss = 0.230184\nI0819 02:04:20.769568 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 02:04:20.769593 21603 solver.cpp:244]     Train net output #1: loss = 0.230184 (* 1 = 0.230184 loss)\nI0819 02:04:20.855959 21603 sgd_solver.cpp:166] Iteration 57600, lr = 1.44\nI0819 02:05:08.286644 21603 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0819 02:05:34.421015 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73516\nI0819 02:05:34.421064 21603 solver.cpp:404]     Test net output #1: loss = 1.09891 (* 1 = 1.09891 loss)\nI0819 02:05:34.833101 21603 solver.cpp:228] Iteration 57700, loss = 0.294257\nI0819 02:05:34.833149 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 02:05:34.833173 21603 solver.cpp:244]     Train net output #1: loss = 0.294257 (* 1 = 0.294257 loss)\nI0819 02:05:34.918638 21603 sgd_solver.cpp:166] Iteration 57700, lr = 1.4425\nI0819 02:06:22.362156 21603 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0819 02:06:48.615556 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76968\nI0819 02:06:48.615608 21603 solver.cpp:404]     Test net output #1: loss = 0.849006 (* 1 = 0.849006 loss)\nI0819 02:06:49.028054 21603 solver.cpp:228] Iteration 57800, loss = 0.168789\nI0819 02:06:49.028102 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:06:49.028126 21603 solver.cpp:244]     Train net output #1: loss = 0.16879 (* 1 = 0.16879 loss)\nI0819 02:06:49.118156 21603 sgd_solver.cpp:166] Iteration 57800, lr = 1.445\nI0819 02:07:36.593286 21603 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0819 02:08:02.862957 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76344\nI0819 02:08:02.863008 21603 solver.cpp:404]     Test net output #1: loss = 1.00513 (* 1 = 1.00513 loss)\nI0819 02:08:03.275521 21603 solver.cpp:228] Iteration 57900, loss = 0.230037\nI0819 02:08:03.275570 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 02:08:03.275593 21603 solver.cpp:244]     Train net output #1: loss = 0.230038 (* 1 = 0.230038 loss)\nI0819 02:08:03.367483 21603 sgd_solver.cpp:166] Iteration 57900, lr = 1.4475\nI0819 02:08:50.797943 21603 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0819 02:09:17.052273 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80528\nI0819 02:09:17.052325 21603 solver.cpp:404]     Test net output #1: loss = 0.786681 (* 1 = 0.786681 loss)\nI0819 02:09:17.466267 21603 solver.cpp:228] Iteration 58000, loss = 0.24119\nI0819 02:09:17.466317 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 02:09:17.466341 21603 solver.cpp:244]     Train net output #1: loss = 0.24119 (* 1 = 0.24119 loss)\nI0819 02:09:17.552091 21603 sgd_solver.cpp:166] Iteration 58000, lr = 1.45\nI0819 02:10:04.959022 21603 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0819 02:10:31.232038 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77104\nI0819 02:10:31.232091 21603 solver.cpp:404]     Test net output #1: loss = 0.813715 (* 1 = 0.813715 loss)\nI0819 02:10:31.645413 21603 solver.cpp:228] Iteration 58100, loss = 0.177581\nI0819 02:10:31.645463 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:10:31.645488 21603 solver.cpp:244]     Train net output #1: loss = 0.177581 (* 1 = 0.177581 loss)\nI0819 02:10:31.735711 21603 sgd_solver.cpp:166] Iteration 58100, lr = 1.4525\nI0819 02:11:19.183357 21603 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0819 02:11:45.444123 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81136\nI0819 02:11:45.444172 21603 solver.cpp:404]     Test net output #1: loss = 0.681745 (* 1 = 0.681745 loss)\nI0819 02:11:45.857684 21603 solver.cpp:228] Iteration 58200, loss = 0.16726\nI0819 02:11:45.857733 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:11:45.857758 21603 solver.cpp:244]     Train net output #1: loss = 0.167261 (* 1 = 0.167261 loss)\nI0819 02:11:45.948725 21603 sgd_solver.cpp:166] Iteration 58200, lr = 1.455\nI0819 02:12:33.464298 21603 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0819 02:12:59.615157 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75576\nI0819 02:12:59.615211 21603 solver.cpp:404]     Test net output #1: loss = 0.914143 (* 1 = 0.914143 loss)\nI0819 02:13:00.027707 21603 solver.cpp:228] Iteration 58300, loss = 0.250357\nI0819 02:13:00.027756 21603 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0819 02:13:00.027781 21603 solver.cpp:244]     Train net output #1: loss = 0.250357 (* 1 = 0.250357 loss)\nI0819 02:13:00.116168 21603 sgd_solver.cpp:166] Iteration 58300, lr = 1.4575\nI0819 02:13:47.583295 21603 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0819 02:14:13.848068 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79892\nI0819 02:14:13.848122 21603 solver.cpp:404]     Test net output #1: loss = 0.770815 (* 1 = 0.770815 loss)\nI0819 02:14:14.260490 21603 solver.cpp:228] Iteration 58400, loss = 0.107149\nI0819 02:14:14.260540 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:14:14.260563 21603 solver.cpp:244]     Train net output #1: loss = 0.107149 (* 1 = 0.107149 loss)\nI0819 02:14:14.355187 21603 sgd_solver.cpp:166] Iteration 58400, lr = 1.46\nI0819 02:15:01.803454 21603 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0819 02:15:28.059195 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82496\nI0819 02:15:28.059254 21603 solver.cpp:404]     Test net output #1: loss = 0.647105 (* 1 = 0.647105 loss)\nI0819 02:15:28.471232 21603 solver.cpp:228] Iteration 58500, loss = 0.154076\nI0819 02:15:28.471284 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:15:28.471308 21603 solver.cpp:244]     Train net output #1: loss = 0.154076 (* 1 = 0.154076 loss)\nI0819 02:15:28.557746 21603 sgd_solver.cpp:166] Iteration 58500, lr = 1.4625\nI0819 02:16:16.059453 21603 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0819 02:16:42.313261 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76156\nI0819 02:16:42.313313 21603 solver.cpp:404]     Test net output #1: loss = 0.913839 (* 1 = 0.913839 loss)\nI0819 02:16:42.727149 21603 solver.cpp:228] Iteration 58600, loss = 0.196742\nI0819 02:16:42.727198 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:16:42.727222 21603 solver.cpp:244]     Train net output #1: loss = 0.196742 (* 1 = 0.196742 loss)\nI0819 02:16:42.820839 21603 sgd_solver.cpp:166] Iteration 58600, lr = 1.465\nI0819 02:17:30.231128 21603 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0819 02:17:56.483904 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81476\nI0819 02:17:56.483958 21603 solver.cpp:404]     Test net output #1: loss = 0.724471 (* 1 = 0.724471 loss)\nI0819 02:17:56.896945 21603 solver.cpp:228] Iteration 58700, loss = 0.195659\nI0819 02:17:56.896996 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:17:56.897019 21603 solver.cpp:244]     Train net output #1: loss = 0.19566 (* 1 = 0.19566 loss)\nI0819 02:17:56.983705 21603 sgd_solver.cpp:166] Iteration 58700, lr = 1.4675\nI0819 02:18:44.339632 21603 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0819 02:19:10.594363 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72816\nI0819 02:19:10.594418 21603 solver.cpp:404]     Test net output #1: loss = 1.07946 (* 1 = 1.07946 loss)\nI0819 02:19:11.007984 21603 solver.cpp:228] Iteration 58800, loss = 0.131335\nI0819 02:19:11.008034 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:19:11.008059 21603 solver.cpp:244]     Train net output #1: loss = 0.131336 (* 1 = 0.131336 loss)\nI0819 02:19:11.101202 21603 sgd_solver.cpp:166] Iteration 58800, lr = 1.47\nI0819 02:19:58.463846 21603 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0819 02:20:24.725661 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7144\nI0819 02:20:24.725713 21603 solver.cpp:404]     Test net output #1: loss = 1.36871 (* 1 = 1.36871 loss)\nI0819 02:20:25.139453 21603 solver.cpp:228] Iteration 58900, loss = 0.20115\nI0819 02:20:25.139508 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 02:20:25.139533 21603 solver.cpp:244]     Train net output #1: loss = 0.20115 (* 1 = 0.20115 loss)\nI0819 02:20:25.228428 21603 sgd_solver.cpp:166] Iteration 58900, lr = 1.4725\nI0819 02:21:12.482717 21603 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0819 02:21:38.741940 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77796\nI0819 02:21:38.741986 21603 solver.cpp:404]     Test net output #1: loss = 0.758778 (* 1 = 0.758778 loss)\nI0819 02:21:39.155303 21603 solver.cpp:228] Iteration 59000, loss = 0.125233\nI0819 02:21:39.155359 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 02:21:39.155375 21603 solver.cpp:244]     Train net output #1: loss = 0.125233 (* 1 = 0.125233 loss)\nI0819 02:21:39.246755 21603 sgd_solver.cpp:166] Iteration 59000, lr = 1.475\nI0819 02:22:26.518684 21603 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0819 02:22:52.777779 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75416\nI0819 02:22:52.777825 21603 solver.cpp:404]     Test net output #1: loss = 0.860132 (* 1 = 0.860132 loss)\nI0819 02:22:53.191646 21603 solver.cpp:228] Iteration 59100, loss = 0.14417\nI0819 02:22:53.191699 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:22:53.191715 21603 solver.cpp:244]     Train net output #1: loss = 0.14417 (* 1 = 0.14417 loss)\nI0819 02:22:53.284340 21603 sgd_solver.cpp:166] Iteration 59100, lr = 1.4775\nI0819 02:23:40.545323 21603 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0819 02:24:06.804949 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75196\nI0819 02:24:06.804996 21603 solver.cpp:404]     Test net output #1: loss = 0.885713 (* 1 = 0.885713 loss)\nI0819 02:24:07.218327 21603 solver.cpp:228] Iteration 59200, loss = 0.209186\nI0819 02:24:07.218379 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:24:07.218397 21603 solver.cpp:244]     Train net output #1: loss = 0.209186 (* 1 = 0.209186 loss)\nI0819 02:24:07.305986 21603 sgd_solver.cpp:166] Iteration 59200, lr = 1.48\nI0819 02:24:54.568331 21603 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0819 02:25:20.832808 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79564\nI0819 02:25:20.832862 21603 solver.cpp:404]     Test net output #1: loss = 0.671734 (* 1 = 0.671734 loss)\nI0819 02:25:21.253592 21603 solver.cpp:228] Iteration 59300, loss = 0.196628\nI0819 02:25:21.253648 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:25:21.253672 21603 solver.cpp:244]     Train net output #1: loss = 0.196628 (* 1 = 0.196628 loss)\nI0819 02:25:21.336556 21603 sgd_solver.cpp:166] Iteration 59300, lr = 1.4825\nI0819 02:26:08.652572 21603 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0819 02:26:34.928737 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77532\nI0819 02:26:34.928789 21603 solver.cpp:404]     Test net output #1: loss = 0.824783 (* 1 = 0.824783 loss)\nI0819 02:26:35.342237 21603 solver.cpp:228] Iteration 59400, loss = 0.183568\nI0819 02:26:35.342296 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:26:35.342321 21603 solver.cpp:244]     Train net output #1: loss = 0.183568 (* 1 = 0.183568 loss)\nI0819 02:26:35.436708 21603 sgd_solver.cpp:166] Iteration 59400, lr = 1.485\nI0819 02:27:22.779731 21603 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0819 02:27:49.060391 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68104\nI0819 02:27:49.060439 21603 solver.cpp:404]     Test net output #1: loss = 1.21649 (* 1 = 1.21649 loss)\nI0819 02:27:49.474717 21603 solver.cpp:228] Iteration 59500, loss = 0.316996\nI0819 02:27:49.474771 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0819 02:27:49.474789 21603 solver.cpp:244]     Train net output #1: loss = 0.316996 (* 1 = 0.316996 loss)\nI0819 02:27:49.568523 21603 sgd_solver.cpp:166] Iteration 59500, lr = 1.4875\nI0819 02:28:36.756477 21603 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0819 02:29:03.036130 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76124\nI0819 02:29:03.036176 21603 solver.cpp:404]     Test net output #1: loss = 0.8689 (* 1 = 0.8689 loss)\nI0819 02:29:03.449537 21603 solver.cpp:228] Iteration 59600, loss = 0.198823\nI0819 02:29:03.449589 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 02:29:03.449607 21603 solver.cpp:244]     Train net output #1: loss = 0.198823 (* 1 = 0.198823 loss)\nI0819 02:29:03.535692 21603 sgd_solver.cpp:166] Iteration 59600, lr = 1.49\nI0819 02:29:50.935755 21603 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0819 02:30:17.206661 21603 solver.cpp:404]     Test net output #0: accuracy = 0.5692\nI0819 02:30:17.206707 21603 solver.cpp:404]     Test net output #1: loss = 2.17751 (* 1 = 2.17751 loss)\nI0819 02:30:17.619940 21603 solver.cpp:228] Iteration 59700, loss = 0.188342\nI0819 02:30:17.619990 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:30:17.620008 21603 solver.cpp:244]     Train net output #1: loss = 0.188342 (* 1 = 0.188342 loss)\nI0819 02:30:17.706459 21603 sgd_solver.cpp:166] Iteration 59700, lr = 1.4925\nI0819 02:31:04.980540 21603 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0819 02:31:31.261692 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73988\nI0819 02:31:31.261739 21603 solver.cpp:404]     Test net output #1: loss = 1.0264 (* 1 = 1.0264 loss)\nI0819 02:31:31.675066 21603 solver.cpp:228] Iteration 59800, loss = 0.115107\nI0819 02:31:31.675119 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 02:31:31.675137 21603 solver.cpp:244]     Train net output #1: loss = 0.115107 (* 1 = 0.115107 loss)\nI0819 02:31:31.766324 21603 sgd_solver.cpp:166] Iteration 59800, lr = 1.495\nI0819 02:32:19.140233 21603 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0819 02:32:45.417373 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80672\nI0819 02:32:45.417420 21603 solver.cpp:404]     Test net output #1: loss = 0.659475 (* 1 = 0.659475 loss)\nI0819 02:32:45.831110 21603 solver.cpp:228] Iteration 59900, loss = 0.187203\nI0819 02:32:45.831158 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:32:45.831176 21603 solver.cpp:244]     Train net output #1: loss = 0.187203 (* 1 = 0.187203 loss)\nI0819 02:32:45.915396 21603 sgd_solver.cpp:166] Iteration 59900, lr = 1.4975\nI0819 02:33:33.273263 21603 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0819 02:33:59.542884 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7382\nI0819 02:33:59.542932 21603 solver.cpp:404]     Test net output #1: loss = 0.971226 (* 1 = 0.971226 loss)\nI0819 02:33:59.954984 21603 solver.cpp:228] Iteration 60000, loss = 0.168644\nI0819 02:33:59.955035 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:33:59.955052 21603 solver.cpp:244]     Train net output #1: loss = 0.168644 (* 1 = 0.168644 loss)\nI0819 02:34:00.048753 21603 sgd_solver.cpp:166] Iteration 60000, lr = 1.5\nI0819 02:34:47.448743 21603 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0819 02:35:13.730128 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74312\nI0819 02:35:13.730175 21603 solver.cpp:404]     Test net output #1: loss = 0.953907 (* 1 = 0.953907 loss)\nI0819 02:35:14.144034 21603 solver.cpp:228] Iteration 60100, loss = 0.15389\nI0819 02:35:14.144086 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:35:14.144104 21603 solver.cpp:244]     Train net output #1: loss = 0.15389 (* 1 = 0.15389 loss)\nI0819 02:35:14.238112 21603 sgd_solver.cpp:166] Iteration 60100, lr = 1.5025\nI0819 02:36:01.621937 21603 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0819 02:36:27.900038 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73952\nI0819 02:36:27.900089 21603 solver.cpp:404]     Test net output #1: loss = 1.0451 (* 1 = 1.0451 loss)\nI0819 02:36:28.313444 21603 solver.cpp:228] Iteration 60200, loss = 0.220676\nI0819 02:36:28.313485 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 02:36:28.313510 21603 solver.cpp:244]     Train net output #1: loss = 0.220676 (* 1 = 0.220676 loss)\nI0819 02:36:28.399413 21603 sgd_solver.cpp:166] Iteration 60200, lr = 1.505\nI0819 02:37:15.870309 21603 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0819 02:37:42.151304 21603 solver.cpp:404]     Test net output #0: accuracy = 0.84216\nI0819 02:37:42.151355 21603 solver.cpp:404]     Test net output #1: loss = 0.479609 (* 1 = 0.479609 loss)\nI0819 02:37:42.564390 21603 solver.cpp:228] Iteration 60300, loss = 0.225266\nI0819 02:37:42.564433 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:37:42.564458 21603 solver.cpp:244]     Train net output #1: loss = 0.225266 (* 1 = 0.225266 loss)\nI0819 02:37:42.649401 21603 sgd_solver.cpp:166] Iteration 60300, lr = 1.5075\nI0819 02:38:30.116755 21603 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0819 02:38:56.386129 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76184\nI0819 02:38:56.386183 21603 solver.cpp:404]     Test net output #1: loss = 0.951275 (* 1 = 0.951275 loss)\nI0819 02:38:56.798563 21603 solver.cpp:228] Iteration 60400, loss = 0.273057\nI0819 02:38:56.798604 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 02:38:56.798629 21603 solver.cpp:244]     Train net output #1: loss = 0.273057 (* 1 = 0.273057 loss)\nI0819 02:38:56.885781 21603 sgd_solver.cpp:166] Iteration 60400, lr = 1.51\nI0819 02:39:44.385123 21603 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0819 02:40:10.671943 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73148\nI0819 02:40:10.671990 21603 solver.cpp:404]     Test net output #1: loss = 1.05907 (* 1 = 1.05907 loss)\nI0819 02:40:11.085726 21603 solver.cpp:228] Iteration 60500, loss = 0.144506\nI0819 02:40:11.085767 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:40:11.085784 21603 solver.cpp:244]     Train net output #1: loss = 0.144506 (* 1 = 0.144506 loss)\nI0819 02:40:11.177181 21603 sgd_solver.cpp:166] Iteration 60500, lr = 1.5125\nI0819 02:40:58.697799 21603 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0819 02:41:24.976028 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79204\nI0819 02:41:24.976078 21603 solver.cpp:404]     Test net output #1: loss = 0.679614 (* 1 = 0.679614 loss)\nI0819 02:41:25.388062 21603 solver.cpp:228] Iteration 60600, loss = 0.242789\nI0819 02:41:25.388108 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 02:41:25.388124 21603 solver.cpp:244]     Train net output #1: loss = 0.242789 (* 1 = 0.242789 loss)\nI0819 02:41:25.475613 21603 sgd_solver.cpp:166] Iteration 60600, lr = 1.515\nI0819 02:42:12.943734 21603 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0819 02:42:39.223170 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78004\nI0819 02:42:39.223217 21603 solver.cpp:404]     Test net output #1: loss = 0.776662 (* 1 = 0.776662 loss)\nI0819 02:42:39.635388 21603 solver.cpp:228] Iteration 60700, loss = 0.209008\nI0819 02:42:39.635433 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:42:39.635450 21603 solver.cpp:244]     Train net output #1: loss = 0.209009 (* 1 = 0.209009 loss)\nI0819 02:42:39.720454 21603 sgd_solver.cpp:166] Iteration 60700, lr = 1.5175\nI0819 02:43:27.096731 21603 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0819 02:43:53.354619 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74904\nI0819 02:43:53.354665 21603 solver.cpp:404]     Test net output #1: loss = 0.902057 (* 1 = 0.902057 loss)\nI0819 02:43:53.767047 21603 solver.cpp:228] Iteration 60800, loss = 0.192816\nI0819 02:43:53.767092 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:43:53.767109 21603 solver.cpp:244]     Train net output #1: loss = 0.192816 (* 1 = 0.192816 loss)\nI0819 02:43:53.858166 21603 sgd_solver.cpp:166] Iteration 60800, lr = 1.52\nI0819 02:44:41.202745 21603 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0819 02:45:07.463557 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78692\nI0819 02:45:07.463603 21603 solver.cpp:404]     Test net output #1: loss = 0.825987 (* 1 = 0.825987 loss)\nI0819 02:45:07.875777 21603 solver.cpp:228] Iteration 60900, loss = 0.145665\nI0819 02:45:07.875821 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 02:45:07.875838 21603 solver.cpp:244]     Train net output #1: loss = 0.145665 (* 1 = 0.145665 loss)\nI0819 02:45:07.965502 21603 sgd_solver.cpp:166] Iteration 60900, lr = 1.5225\nI0819 02:45:55.300932 21603 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0819 02:46:21.562961 21603 solver.cpp:404]     Test net output #0: accuracy = 0.764\nI0819 02:46:21.563009 21603 solver.cpp:404]     Test net output #1: loss = 0.928102 (* 1 = 0.928102 loss)\nI0819 02:46:21.975178 21603 solver.cpp:228] Iteration 61000, loss = 0.217868\nI0819 02:46:21.975220 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 02:46:21.975237 21603 solver.cpp:244]     Train net output #1: loss = 0.217868 (* 1 = 0.217868 loss)\nI0819 02:46:22.063302 21603 sgd_solver.cpp:166] Iteration 61000, lr = 1.525\nI0819 02:47:09.430696 21603 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0819 02:47:35.695722 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7768\nI0819 02:47:35.695768 21603 solver.cpp:404]     Test net output #1: loss = 0.780132 (* 1 = 0.780132 loss)\nI0819 02:47:36.107614 21603 solver.cpp:228] Iteration 61100, loss = 0.160253\nI0819 02:47:36.107664 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:47:36.107681 21603 solver.cpp:244]     Train net output #1: loss = 0.160253 (* 1 = 0.160253 loss)\nI0819 02:47:36.201730 21603 sgd_solver.cpp:166] Iteration 61100, lr = 1.5275\nI0819 02:48:23.608376 21603 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0819 02:48:49.866523 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80944\nI0819 02:48:49.866569 21603 solver.cpp:404]     Test net output #1: loss = 0.712969 (* 1 = 0.712969 loss)\nI0819 02:48:50.278247 21603 solver.cpp:228] Iteration 61200, loss = 0.219273\nI0819 02:48:50.278296 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 02:48:50.278314 21603 solver.cpp:244]     Train net output #1: loss = 0.219273 (* 1 = 0.219273 loss)\nI0819 02:48:50.368624 21603 sgd_solver.cpp:166] Iteration 61200, lr = 1.53\nI0819 02:49:37.711410 21603 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0819 02:50:03.957732 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80112\nI0819 02:50:03.957778 21603 solver.cpp:404]     Test net output #1: loss = 0.656792 (* 1 = 0.656792 loss)\nI0819 02:50:04.369936 21603 solver.cpp:228] Iteration 61300, loss = 0.256928\nI0819 02:50:04.369987 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 02:50:04.370004 21603 solver.cpp:244]     Train net output #1: loss = 0.256928 (* 1 = 0.256928 loss)\nI0819 02:50:04.466027 21603 sgd_solver.cpp:166] Iteration 61300, lr = 1.5325\nI0819 02:50:51.895174 21603 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0819 02:51:18.151530 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7876\nI0819 02:51:18.151577 21603 solver.cpp:404]     Test net output #1: loss = 0.761269 (* 1 = 0.761269 loss)\nI0819 02:51:18.563858 21603 solver.cpp:228] Iteration 61400, loss = 0.19654\nI0819 02:51:18.563908 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 02:51:18.563925 21603 solver.cpp:244]     Train net output #1: loss = 0.19654 (* 1 = 0.19654 loss)\nI0819 02:51:18.652632 21603 sgd_solver.cpp:166] Iteration 61400, lr = 1.535\nI0819 02:52:06.194648 21603 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0819 02:52:32.461854 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70988\nI0819 02:52:32.461899 21603 solver.cpp:404]     Test net output #1: loss = 1.1387 (* 1 = 1.1387 loss)\nI0819 02:52:32.874146 21603 solver.cpp:228] Iteration 61500, loss = 0.180679\nI0819 02:52:32.874194 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:52:32.874212 21603 solver.cpp:244]     Train net output #1: loss = 0.180679 (* 1 = 0.180679 loss)\nI0819 02:52:32.967281 21603 sgd_solver.cpp:166] Iteration 61500, lr = 1.5375\nI0819 02:53:20.555389 21603 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0819 02:53:46.830636 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7244\nI0819 02:53:46.830682 21603 solver.cpp:404]     Test net output #1: loss = 1.09481 (* 1 = 1.09481 loss)\nI0819 02:53:47.242573 21603 solver.cpp:228] Iteration 61600, loss = 0.217919\nI0819 02:53:47.242622 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 02:53:47.242640 21603 solver.cpp:244]     Train net output #1: loss = 0.217919 (* 1 = 0.217919 loss)\nI0819 02:53:47.335947 21603 sgd_solver.cpp:166] Iteration 61600, lr = 1.54\nI0819 02:54:34.828835 21603 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0819 02:55:01.096951 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76604\nI0819 02:55:01.096995 21603 solver.cpp:404]     Test net output #1: loss = 0.85142 (* 1 = 0.85142 loss)\nI0819 02:55:01.509088 21603 solver.cpp:228] Iteration 61700, loss = 0.192927\nI0819 02:55:01.509138 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 02:55:01.509155 21603 solver.cpp:244]     Train net output #1: loss = 0.192927 (* 1 = 0.192927 loss)\nI0819 02:55:01.601230 21603 sgd_solver.cpp:166] Iteration 61700, lr = 1.5425\nI0819 02:55:49.024096 21603 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0819 02:56:15.276408 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77708\nI0819 02:56:15.276455 21603 solver.cpp:404]     Test net output #1: loss = 0.763165 (* 1 = 0.763165 loss)\nI0819 02:56:15.688657 21603 solver.cpp:228] Iteration 61800, loss = 0.171638\nI0819 02:56:15.688704 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:56:15.688719 21603 solver.cpp:244]     Train net output #1: loss = 0.171638 (* 1 = 0.171638 loss)\nI0819 02:56:15.786497 21603 sgd_solver.cpp:166] Iteration 61800, lr = 1.545\nI0819 02:57:03.258013 21603 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0819 02:57:29.519263 21603 solver.cpp:404]     Test net output #0: accuracy = 0.741\nI0819 02:57:29.519309 21603 solver.cpp:404]     Test net output #1: loss = 1.04208 (* 1 = 1.04208 loss)\nI0819 02:57:29.931624 21603 solver.cpp:228] Iteration 61900, loss = 0.190099\nI0819 02:57:29.931673 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 02:57:29.931690 21603 solver.cpp:244]     Train net output #1: loss = 0.190099 (* 1 = 0.190099 loss)\nI0819 02:57:30.027032 21603 sgd_solver.cpp:166] Iteration 61900, lr = 1.5475\nI0819 02:58:17.560014 21603 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0819 02:58:43.815692 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74564\nI0819 02:58:43.815737 21603 solver.cpp:404]     Test net output #1: loss = 1.00805 (* 1 = 1.00805 loss)\nI0819 02:58:44.228391 21603 solver.cpp:228] Iteration 62000, loss = 0.188558\nI0819 02:58:44.228441 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 02:58:44.228458 21603 solver.cpp:244]     Train net output #1: loss = 0.188558 (* 1 = 0.188558 loss)\nI0819 02:58:44.324913 21603 sgd_solver.cpp:166] Iteration 62000, lr = 1.55\nI0819 02:59:31.702769 21603 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0819 02:59:57.973572 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8356\nI0819 02:59:57.973618 21603 solver.cpp:404]     Test net output #1: loss = 0.577945 (* 1 = 0.577945 loss)\nI0819 02:59:58.385748 21603 solver.cpp:228] Iteration 62100, loss = 0.163507\nI0819 02:59:58.385795 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 02:59:58.385812 21603 solver.cpp:244]     Train net output #1: loss = 0.163507 (* 1 = 0.163507 loss)\nI0819 02:59:58.480288 21603 sgd_solver.cpp:166] Iteration 62100, lr = 1.5525\nI0819 03:00:45.518483 21603 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0819 03:01:11.794909 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80692\nI0819 03:01:11.794955 21603 solver.cpp:404]     Test net output #1: loss = 0.643328 (* 1 = 0.643328 loss)\nI0819 03:01:12.207031 21603 solver.cpp:228] Iteration 62200, loss = 0.163399\nI0819 03:01:12.207064 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:01:12.207079 21603 solver.cpp:244]     Train net output #1: loss = 0.163399 (* 1 = 0.163399 loss)\nI0819 03:01:12.296238 21603 sgd_solver.cpp:166] Iteration 62200, lr = 1.555\nI0819 03:01:59.294805 21603 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0819 03:02:25.546676 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79888\nI0819 03:02:25.546722 21603 solver.cpp:404]     Test net output #1: loss = 0.701679 (* 1 = 0.701679 loss)\nI0819 03:02:25.959203 21603 solver.cpp:228] Iteration 62300, loss = 0.20735\nI0819 03:02:25.959240 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:02:25.959256 21603 solver.cpp:244]     Train net output #1: loss = 0.20735 (* 1 = 0.20735 loss)\nI0819 03:02:26.049629 21603 sgd_solver.cpp:166] Iteration 62300, lr = 1.5575\nI0819 03:03:12.978540 21603 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0819 03:03:39.250882 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6628\nI0819 03:03:39.250931 21603 solver.cpp:404]     Test net output #1: loss = 1.42927 (* 1 = 1.42927 loss)\nI0819 03:03:39.663563 21603 solver.cpp:228] Iteration 62400, loss = 0.198353\nI0819 03:03:39.663590 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:03:39.663605 21603 solver.cpp:244]     Train net output #1: loss = 0.198353 (* 1 = 0.198353 loss)\nI0819 03:03:39.750957 21603 sgd_solver.cpp:166] Iteration 62400, lr = 1.56\nI0819 03:04:26.673519 21603 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0819 03:04:52.958240 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73504\nI0819 03:04:52.958287 21603 solver.cpp:404]     Test net output #1: loss = 1.05438 (* 1 = 1.05438 loss)\nI0819 03:04:53.370808 21603 solver.cpp:228] Iteration 62500, loss = 0.163848\nI0819 03:04:53.370837 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:04:53.370853 21603 solver.cpp:244]     Train net output #1: loss = 0.163848 (* 1 = 0.163848 loss)\nI0819 03:04:53.458787 21603 sgd_solver.cpp:166] Iteration 62500, lr = 1.5625\nI0819 03:05:40.475268 21603 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0819 03:06:06.658372 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75636\nI0819 03:06:06.658416 21603 solver.cpp:404]     Test net output #1: loss = 0.95464 (* 1 = 0.95464 loss)\nI0819 03:06:07.070802 21603 solver.cpp:228] Iteration 62600, loss = 0.192849\nI0819 03:06:07.070842 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:06:07.070858 21603 solver.cpp:244]     Train net output #1: loss = 0.192849 (* 1 = 0.192849 loss)\nI0819 03:06:07.159327 21603 sgd_solver.cpp:166] Iteration 62600, lr = 1.565\nI0819 03:06:54.235108 21603 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0819 03:07:20.300962 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80524\nI0819 03:07:20.301007 21603 solver.cpp:404]     Test net output #1: loss = 0.653058 (* 1 = 0.653058 loss)\nI0819 03:07:20.713583 21603 solver.cpp:228] Iteration 62700, loss = 0.152477\nI0819 03:07:20.713620 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 03:07:20.713636 21603 solver.cpp:244]     Train net output #1: loss = 0.152477 (* 1 = 0.152477 loss)\nI0819 03:07:20.806704 21603 sgd_solver.cpp:166] Iteration 62700, lr = 1.5675\nI0819 03:08:07.991420 21603 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0819 03:08:34.235661 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76608\nI0819 03:08:34.235707 21603 solver.cpp:404]     Test net output #1: loss = 0.893784 (* 1 = 0.893784 loss)\nI0819 03:08:34.648416 21603 solver.cpp:228] Iteration 62800, loss = 0.291118\nI0819 03:08:34.648453 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 03:08:34.648469 21603 solver.cpp:244]     Train net output #1: loss = 0.291118 (* 1 = 0.291118 loss)\nI0819 03:08:34.740106 21603 sgd_solver.cpp:166] Iteration 62800, lr = 1.57\nI0819 03:09:22.104383 21603 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0819 03:09:48.364120 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76708\nI0819 03:09:48.364166 21603 solver.cpp:404]     Test net output #1: loss = 0.771723 (* 1 = 0.771723 loss)\nI0819 03:09:48.776576 21603 solver.cpp:228] Iteration 62900, loss = 0.185865\nI0819 03:09:48.776610 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:09:48.776626 21603 solver.cpp:244]     Train net output #1: loss = 0.185865 (* 1 = 0.185865 loss)\nI0819 03:09:48.866546 21603 sgd_solver.cpp:166] Iteration 62900, lr = 1.5725\nI0819 03:10:36.123644 21603 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0819 03:11:02.376180 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71192\nI0819 03:11:02.376226 21603 solver.cpp:404]     Test net output #1: loss = 1.28504 (* 1 = 1.28504 loss)\nI0819 03:11:02.788904 21603 solver.cpp:228] Iteration 63000, loss = 0.173144\nI0819 03:11:02.788944 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:11:02.788959 21603 solver.cpp:244]     Train net output #1: loss = 0.173144 (* 1 = 0.173144 loss)\nI0819 03:11:02.880304 21603 sgd_solver.cpp:166] Iteration 63000, lr = 1.575\nI0819 03:11:50.255164 21603 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0819 03:12:16.494016 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80568\nI0819 03:12:16.494061 21603 solver.cpp:404]     Test net output #1: loss = 0.634468 (* 1 = 0.634468 loss)\nI0819 03:12:16.906613 21603 solver.cpp:228] Iteration 63100, loss = 0.140523\nI0819 03:12:16.906658 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:12:16.906674 21603 solver.cpp:244]     Train net output #1: loss = 0.140523 (* 1 = 0.140523 loss)\nI0819 03:12:16.995738 21603 sgd_solver.cpp:166] Iteration 63100, lr = 1.5775\nI0819 03:13:04.242209 21603 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0819 03:13:30.505501 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79284\nI0819 03:13:30.505548 21603 solver.cpp:404]     Test net output #1: loss = 0.655714 (* 1 = 0.655714 loss)\nI0819 03:13:30.918112 21603 solver.cpp:228] Iteration 63200, loss = 0.202242\nI0819 03:13:30.918149 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 03:13:30.918164 21603 solver.cpp:244]     Train net output #1: loss = 0.202242 (* 1 = 0.202242 loss)\nI0819 03:13:31.008436 21603 sgd_solver.cpp:166] Iteration 63200, lr = 1.58\nI0819 03:14:18.570297 21603 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0819 03:14:44.820364 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73996\nI0819 03:14:44.820408 21603 solver.cpp:404]     Test net output #1: loss = 0.934945 (* 1 = 0.934945 loss)\nI0819 03:14:45.232965 21603 solver.cpp:228] Iteration 63300, loss = 0.173758\nI0819 03:14:45.233003 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:14:45.233019 21603 solver.cpp:244]     Train net output #1: loss = 0.173758 (* 1 = 0.173758 loss)\nI0819 03:14:45.328379 21603 sgd_solver.cpp:166] Iteration 63300, lr = 1.5825\nI0819 03:15:32.843119 21603 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0819 03:15:59.095542 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82156\nI0819 03:15:59.095588 21603 solver.cpp:404]     Test net output #1: loss = 0.624948 (* 1 = 0.624948 loss)\nI0819 03:15:59.508044 21603 solver.cpp:228] Iteration 63400, loss = 0.163627\nI0819 03:15:59.508085 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 03:15:59.508100 21603 solver.cpp:244]     Train net output #1: loss = 0.163627 (* 1 = 0.163627 loss)\nI0819 03:15:59.595262 21603 sgd_solver.cpp:166] Iteration 63400, lr = 1.585\nI0819 03:16:47.068287 21603 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0819 03:17:13.337653 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74352\nI0819 03:17:13.337702 21603 solver.cpp:404]     Test net output #1: loss = 1.16321 (* 1 = 1.16321 loss)\nI0819 03:17:13.750156 21603 solver.cpp:228] Iteration 63500, loss = 0.135094\nI0819 03:17:13.750195 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 03:17:13.750210 21603 solver.cpp:244]     Train net output #1: loss = 0.135094 (* 1 = 0.135094 loss)\nI0819 03:17:13.843459 21603 sgd_solver.cpp:166] Iteration 63500, lr = 1.5875\nI0819 03:18:01.238981 21603 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0819 03:18:27.495998 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6988\nI0819 03:18:27.496045 21603 solver.cpp:404]     Test net output #1: loss = 1.44107 (* 1 = 1.44107 loss)\nI0819 03:18:27.908278 21603 solver.cpp:228] Iteration 63600, loss = 0.214997\nI0819 03:18:27.908320 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:18:27.908336 21603 solver.cpp:244]     Train net output #1: loss = 0.214997 (* 1 = 0.214997 loss)\nI0819 03:18:27.996021 21603 sgd_solver.cpp:166] Iteration 63600, lr = 1.59\nI0819 03:19:15.537101 21603 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0819 03:19:41.676772 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83368\nI0819 03:19:41.676818 21603 solver.cpp:404]     Test net output #1: loss = 0.551298 (* 1 = 0.551298 loss)\nI0819 03:19:42.089164 21603 solver.cpp:228] Iteration 63700, loss = 0.217363\nI0819 03:19:42.089207 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:19:42.089222 21603 solver.cpp:244]     Train net output #1: loss = 0.217363 (* 1 = 0.217363 loss)\nI0819 03:19:42.179543 21603 sgd_solver.cpp:166] Iteration 63700, lr = 1.5925\nI0819 03:20:29.573626 21603 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0819 03:20:55.543503 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71716\nI0819 03:20:55.543560 21603 solver.cpp:404]     Test net output #1: loss = 1.13761 (* 1 = 1.13761 loss)\nI0819 03:20:55.955006 21603 solver.cpp:228] Iteration 63800, loss = 0.173277\nI0819 03:20:55.955054 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:20:55.955070 21603 solver.cpp:244]     Train net output #1: loss = 0.173277 (* 1 = 0.173277 loss)\nI0819 03:20:56.050220 21603 sgd_solver.cpp:166] Iteration 63800, lr = 1.595\nI0819 03:21:43.077870 21603 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0819 03:22:09.046103 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78644\nI0819 03:22:09.046165 21603 solver.cpp:404]     Test net output #1: loss = 0.758104 (* 1 = 0.758104 loss)\nI0819 03:22:09.458772 21603 solver.cpp:228] Iteration 63900, loss = 0.209926\nI0819 03:22:09.458822 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:22:09.458838 21603 solver.cpp:244]     Train net output #1: loss = 0.209926 (* 1 = 0.209926 loss)\nI0819 03:22:09.547344 21603 sgd_solver.cpp:166] Iteration 63900, lr = 1.5975\nI0819 03:22:56.585219 21603 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0819 03:23:22.556277 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78648\nI0819 03:23:22.556334 21603 solver.cpp:404]     Test net output #1: loss = 0.829166 (* 1 = 0.829166 loss)\nI0819 03:23:22.967519 21603 solver.cpp:228] Iteration 64000, loss = 0.300339\nI0819 03:23:22.967567 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 03:23:22.967584 21603 solver.cpp:244]     Train net output #1: loss = 0.300339 (* 1 = 0.300339 loss)\nI0819 03:23:23.059648 21603 sgd_solver.cpp:166] Iteration 64000, lr = 1.6\nI0819 03:24:10.018777 21603 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0819 03:24:35.988608 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79796\nI0819 03:24:35.988665 21603 solver.cpp:404]     Test net output #1: loss = 0.699124 (* 1 = 0.699124 loss)\nI0819 03:24:36.399739 21603 solver.cpp:228] Iteration 64100, loss = 0.197951\nI0819 03:24:36.399775 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:24:36.399792 21603 solver.cpp:244]     Train net output #1: loss = 0.197951 (* 1 = 0.197951 loss)\nI0819 03:24:36.492486 21603 sgd_solver.cpp:166] Iteration 64100, lr = 1.6025\nI0819 03:25:23.474573 21603 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0819 03:25:49.442484 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75952\nI0819 03:25:49.442544 21603 solver.cpp:404]     Test net output #1: loss = 0.856059 (* 1 = 0.856059 loss)\nI0819 03:25:49.854465 21603 solver.cpp:228] Iteration 64200, loss = 0.214598\nI0819 03:25:49.854502 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 03:25:49.854518 21603 solver.cpp:244]     Train net output #1: loss = 0.214598 (* 1 = 0.214598 loss)\nI0819 03:25:49.949647 21603 sgd_solver.cpp:166] Iteration 64200, lr = 1.605\nI0819 03:26:36.958153 21603 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0819 03:27:02.929164 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80828\nI0819 03:27:02.929221 21603 solver.cpp:404]     Test net output #1: loss = 0.699507 (* 1 = 0.699507 loss)\nI0819 03:27:03.340415 21603 solver.cpp:228] Iteration 64300, loss = 0.126999\nI0819 03:27:03.340451 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:27:03.340467 21603 solver.cpp:244]     Train net output #1: loss = 0.126999 (* 1 = 0.126999 loss)\nI0819 03:27:03.433717 21603 sgd_solver.cpp:166] Iteration 64300, lr = 1.6075\nI0819 03:27:50.411774 21603 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0819 03:28:16.379804 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7958\nI0819 03:28:16.379848 21603 solver.cpp:404]     Test net output #1: loss = 0.689663 (* 1 = 0.689663 loss)\nI0819 03:28:16.791395 21603 solver.cpp:228] Iteration 64400, loss = 0.28659\nI0819 03:28:16.791436 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0819 03:28:16.791453 21603 solver.cpp:244]     Train net output #1: loss = 0.28659 (* 1 = 0.28659 loss)\nI0819 03:28:16.887609 21603 sgd_solver.cpp:166] Iteration 64400, lr = 1.61\nI0819 03:29:04.036396 21603 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0819 03:29:30.008924 21603 solver.cpp:404]     Test net output #0: accuracy = 0.61392\nI0819 03:29:30.008965 21603 solver.cpp:404]     Test net output #1: loss = 1.67558 (* 1 = 1.67558 loss)\nI0819 03:29:30.420397 21603 solver.cpp:228] Iteration 64500, loss = 0.260885\nI0819 03:29:30.420436 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 03:29:30.420454 21603 solver.cpp:244]     Train net output #1: loss = 0.260885 (* 1 = 0.260885 loss)\nI0819 03:29:30.510833 21603 sgd_solver.cpp:166] Iteration 64500, lr = 1.6125\nI0819 03:30:17.734995 21603 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0819 03:30:43.709412 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79704\nI0819 03:30:43.709460 21603 solver.cpp:404]     Test net output #1: loss = 0.738298 (* 1 = 0.738298 loss)\nI0819 03:30:44.121912 21603 solver.cpp:228] Iteration 64600, loss = 0.170274\nI0819 03:30:44.121953 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 03:30:44.121978 21603 solver.cpp:244]     Train net output #1: loss = 0.170274 (* 1 = 0.170274 loss)\nI0819 03:30:44.212563 21603 sgd_solver.cpp:166] Iteration 64600, lr = 1.615\nI0819 03:31:31.442947 21603 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0819 03:31:57.416793 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82364\nI0819 03:31:57.416842 21603 solver.cpp:404]     Test net output #1: loss = 0.596847 (* 1 = 0.596847 loss)\nI0819 03:31:57.829470 21603 solver.cpp:228] Iteration 64700, loss = 0.247261\nI0819 03:31:57.829511 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:31:57.829536 21603 solver.cpp:244]     Train net output #1: loss = 0.247261 (* 1 = 0.247261 loss)\nI0819 03:31:57.921883 21603 sgd_solver.cpp:166] Iteration 64700, lr = 1.6175\nI0819 03:32:45.243244 21603 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0819 03:33:11.543761 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68876\nI0819 03:33:11.543826 21603 solver.cpp:404]     Test net output #1: loss = 1.38734 (* 1 = 1.38734 loss)\nI0819 03:33:11.957303 21603 solver.cpp:228] Iteration 64800, loss = 0.229259\nI0819 03:33:11.957355 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 03:33:11.957373 21603 solver.cpp:244]     Train net output #1: loss = 0.229258 (* 1 = 0.229258 loss)\nI0819 03:33:12.041208 21603 sgd_solver.cpp:166] Iteration 64800, lr = 1.62\nI0819 03:33:59.498456 21603 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0819 03:34:25.815194 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75556\nI0819 03:34:25.815261 21603 solver.cpp:404]     Test net output #1: loss = 1.02428 (* 1 = 1.02428 loss)\nI0819 03:34:26.229215 21603 solver.cpp:228] Iteration 64900, loss = 0.198717\nI0819 03:34:26.229271 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 03:34:26.229297 21603 solver.cpp:244]     Train net output #1: loss = 0.198717 (* 1 = 0.198717 loss)\nI0819 03:34:26.317438 21603 sgd_solver.cpp:166] Iteration 64900, lr = 1.6225\nI0819 03:35:13.629284 21603 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0819 03:35:39.927717 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7642\nI0819 03:35:39.927788 21603 solver.cpp:404]     Test net output #1: loss = 0.734279 (* 1 = 0.734279 loss)\nI0819 03:35:40.341728 21603 solver.cpp:228] Iteration 65000, loss = 0.238857\nI0819 03:35:40.341783 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 03:35:40.341807 21603 solver.cpp:244]     Train net output #1: loss = 0.238857 (* 1 = 0.238857 loss)\nI0819 03:35:40.436864 21603 sgd_solver.cpp:166] Iteration 65000, lr = 1.625\nI0819 03:36:27.943785 21603 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0819 03:36:54.235110 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7716\nI0819 03:36:54.235188 21603 solver.cpp:404]     Test net output #1: loss = 0.827678 (* 1 = 0.827678 loss)\nI0819 03:36:54.648854 21603 solver.cpp:228] Iteration 65100, loss = 0.213251\nI0819 03:36:54.648908 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 03:36:54.648934 21603 solver.cpp:244]     Train net output #1: loss = 0.21325 (* 1 = 0.21325 loss)\nI0819 03:36:54.734412 21603 sgd_solver.cpp:166] Iteration 65100, lr = 1.6275\nI0819 03:37:41.985806 21603 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0819 03:38:07.976516 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78628\nI0819 03:38:07.976568 21603 solver.cpp:404]     Test net output #1: loss = 0.721281 (* 1 = 0.721281 loss)\nI0819 03:38:08.387588 21603 solver.cpp:228] Iteration 65200, loss = 0.215114\nI0819 03:38:08.387625 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 03:38:08.387641 21603 solver.cpp:244]     Train net output #1: loss = 0.215114 (* 1 = 0.215114 loss)\nI0819 03:38:08.476886 21603 sgd_solver.cpp:166] Iteration 65200, lr = 1.63\nI0819 03:38:55.631209 21603 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0819 03:39:21.621816 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74556\nI0819 03:39:21.621875 21603 solver.cpp:404]     Test net output #1: loss = 1.02887 (* 1 = 1.02887 loss)\nI0819 03:39:22.033108 21603 solver.cpp:228] Iteration 65300, loss = 0.170244\nI0819 03:39:22.033144 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:39:22.033160 21603 solver.cpp:244]     Train net output #1: loss = 0.170244 (* 1 = 0.170244 loss)\nI0819 03:39:22.127307 21603 sgd_solver.cpp:166] Iteration 65300, lr = 1.6325\nI0819 03:40:09.388236 21603 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0819 03:40:35.379415 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71584\nI0819 03:40:35.379473 21603 solver.cpp:404]     Test net output #1: loss = 1.14723 (* 1 = 1.14723 loss)\nI0819 03:40:35.790844 21603 solver.cpp:228] Iteration 65400, loss = 0.274607\nI0819 03:40:35.790880 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0819 03:40:35.790896 21603 solver.cpp:244]     Train net output #1: loss = 0.274607 (* 1 = 0.274607 loss)\nI0819 03:40:35.883816 21603 sgd_solver.cpp:166] Iteration 65400, lr = 1.635\nI0819 03:41:23.262929 21603 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0819 03:41:49.253882 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66784\nI0819 03:41:49.253940 21603 solver.cpp:404]     Test net output #1: loss = 1.35922 (* 1 = 1.35922 loss)\nI0819 03:41:49.665320 21603 solver.cpp:228] Iteration 65500, loss = 0.251434\nI0819 03:41:49.665359 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:41:49.665374 21603 solver.cpp:244]     Train net output #1: loss = 0.251434 (* 1 = 0.251434 loss)\nI0819 03:41:49.759506 21603 sgd_solver.cpp:166] Iteration 65500, lr = 1.6375\nI0819 03:42:37.001660 21603 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0819 03:43:02.990360 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72236\nI0819 03:43:02.990417 21603 solver.cpp:404]     Test net output #1: loss = 1.1692 (* 1 = 1.1692 loss)\nI0819 03:43:03.401911 21603 solver.cpp:228] Iteration 65600, loss = 0.200084\nI0819 03:43:03.401949 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 03:43:03.401965 21603 solver.cpp:244]     Train net output #1: loss = 0.200084 (* 1 = 0.200084 loss)\nI0819 03:43:03.493469 21603 sgd_solver.cpp:166] Iteration 65600, lr = 1.64\nI0819 03:43:50.720291 21603 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0819 03:44:16.708287 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7828\nI0819 03:44:16.708344 21603 solver.cpp:404]     Test net output #1: loss = 0.681582 (* 1 = 0.681582 loss)\nI0819 03:44:17.119657 21603 solver.cpp:228] Iteration 65700, loss = 0.185678\nI0819 03:44:17.119693 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:44:17.119709 21603 solver.cpp:244]     Train net output #1: loss = 0.185678 (* 1 = 0.185678 loss)\nI0819 03:44:17.213851 21603 sgd_solver.cpp:166] Iteration 65700, lr = 1.6425\nI0819 03:45:04.600693 21603 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0819 03:45:30.585587 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78392\nI0819 03:45:30.585644 21603 solver.cpp:404]     Test net output #1: loss = 0.742822 (* 1 = 0.742822 loss)\nI0819 03:45:30.996878 21603 solver.cpp:228] Iteration 65800, loss = 0.174555\nI0819 03:45:30.996914 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:45:30.996930 21603 solver.cpp:244]     Train net output #1: loss = 0.174555 (* 1 = 0.174555 loss)\nI0819 03:45:31.091076 21603 sgd_solver.cpp:166] Iteration 65800, lr = 1.645\nI0819 03:46:18.399622 21603 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0819 03:46:44.386428 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81744\nI0819 03:46:44.386487 21603 solver.cpp:404]     Test net output #1: loss = 0.586142 (* 1 = 0.586142 loss)\nI0819 03:46:44.798044 21603 solver.cpp:228] Iteration 65900, loss = 0.188497\nI0819 03:46:44.798082 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:46:44.798099 21603 solver.cpp:244]     Train net output #1: loss = 0.188497 (* 1 = 0.188497 loss)\nI0819 03:46:44.887869 21603 sgd_solver.cpp:166] Iteration 65900, lr = 1.6475\nI0819 03:47:32.248692 21603 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0819 03:47:58.234733 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81652\nI0819 03:47:58.234787 21603 solver.cpp:404]     Test net output #1: loss = 0.636908 (* 1 = 0.636908 loss)\nI0819 03:47:58.646064 21603 solver.cpp:228] Iteration 66000, loss = 0.125251\nI0819 03:47:58.646100 21603 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:47:58.646116 21603 solver.cpp:244]     Train net output #1: loss = 0.125251 (* 1 = 0.125251 loss)\nI0819 03:47:58.739400 21603 sgd_solver.cpp:166] Iteration 66000, lr = 1.65\nI0819 03:48:45.912175 21603 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0819 03:49:11.902227 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76936\nI0819 03:49:11.902274 21603 solver.cpp:404]     Test net output #1: loss = 0.790766 (* 1 = 0.790766 loss)\nI0819 03:49:12.313660 21603 solver.cpp:228] Iteration 66100, loss = 0.196766\nI0819 03:49:12.313697 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:49:12.313714 21603 solver.cpp:244]     Train net output #1: loss = 0.196766 (* 1 = 0.196766 loss)\nI0819 03:49:12.409072 21603 sgd_solver.cpp:166] Iteration 66100, lr = 1.6525\nI0819 03:49:59.609844 21603 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0819 03:50:25.598585 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78964\nI0819 03:50:25.598639 21603 solver.cpp:404]     Test net output #1: loss = 0.703361 (* 1 = 0.703361 loss)\nI0819 03:50:26.010185 21603 solver.cpp:228] Iteration 66200, loss = 0.143112\nI0819 03:50:26.010226 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:50:26.010243 21603 solver.cpp:244]     Train net output #1: loss = 0.143112 (* 1 = 0.143112 loss)\nI0819 03:50:26.101766 21603 sgd_solver.cpp:166] Iteration 66200, lr = 1.655\nI0819 03:51:13.359033 21603 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0819 03:51:39.346475 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7774\nI0819 03:51:39.346523 21603 solver.cpp:404]     Test net output #1: loss = 0.794087 (* 1 = 0.794087 loss)\nI0819 03:51:39.757647 21603 solver.cpp:228] Iteration 66300, loss = 0.182914\nI0819 03:51:39.757683 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 03:51:39.757699 21603 solver.cpp:244]     Train net output #1: loss = 0.182914 (* 1 = 0.182914 loss)\nI0819 03:51:39.847455 21603 sgd_solver.cpp:166] Iteration 66300, lr = 1.6575\nI0819 03:52:27.052064 21603 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0819 03:52:53.037776 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71812\nI0819 03:52:53.037824 21603 solver.cpp:404]     Test net output #1: loss = 1.14877 (* 1 = 1.14877 loss)\nI0819 03:52:53.449635 21603 solver.cpp:228] Iteration 66400, loss = 0.156318\nI0819 03:52:53.449687 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:52:53.449717 21603 solver.cpp:244]     Train net output #1: loss = 0.156318 (* 1 = 0.156318 loss)\nI0819 03:52:53.541374 21603 sgd_solver.cpp:166] Iteration 66400, lr = 1.66\nI0819 03:53:40.811260 21603 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0819 03:54:06.798023 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76968\nI0819 03:54:06.798072 21603 solver.cpp:404]     Test net output #1: loss = 0.783223 (* 1 = 0.783223 loss)\nI0819 03:54:07.209579 21603 solver.cpp:228] Iteration 66500, loss = 0.18394\nI0819 03:54:07.209621 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:54:07.209638 21603 solver.cpp:244]     Train net output #1: loss = 0.183939 (* 1 = 0.183939 loss)\nI0819 03:54:07.301440 21603 sgd_solver.cpp:166] Iteration 66500, lr = 1.6625\nI0819 03:54:54.638531 21603 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0819 03:55:20.623589 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67256\nI0819 03:55:20.623636 21603 solver.cpp:404]     Test net output #1: loss = 1.36575 (* 1 = 1.36575 loss)\nI0819 03:55:21.035189 21603 solver.cpp:228] Iteration 66600, loss = 0.126319\nI0819 03:55:21.035229 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:55:21.035248 21603 solver.cpp:244]     Train net output #1: loss = 0.126319 (* 1 = 0.126319 loss)\nI0819 03:55:21.130414 21603 sgd_solver.cpp:166] Iteration 66600, lr = 1.665\nI0819 03:56:08.262943 21603 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0819 03:56:34.241953 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7994\nI0819 03:56:34.242002 21603 solver.cpp:404]     Test net output #1: loss = 0.643089 (* 1 = 0.643089 loss)\nI0819 03:56:34.653362 21603 solver.cpp:228] Iteration 66700, loss = 0.219448\nI0819 03:56:34.653403 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:56:34.653419 21603 solver.cpp:244]     Train net output #1: loss = 0.219448 (* 1 = 0.219448 loss)\nI0819 03:56:34.744235 21603 sgd_solver.cpp:166] Iteration 66700, lr = 1.6675\nI0819 03:57:21.964860 21603 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0819 03:57:47.944581 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74288\nI0819 03:57:47.944639 21603 solver.cpp:404]     Test net output #1: loss = 0.870145 (* 1 = 0.870145 loss)\nI0819 03:57:48.355919 21603 solver.cpp:228] Iteration 66800, loss = 0.128821\nI0819 03:57:48.355957 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:57:48.355973 21603 solver.cpp:244]     Train net output #1: loss = 0.128821 (* 1 = 0.128821 loss)\nI0819 03:57:48.447809 21603 sgd_solver.cpp:166] Iteration 66800, lr = 1.67\nI0819 03:58:35.826519 21603 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0819 03:59:01.812151 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79612\nI0819 03:59:01.812212 21603 solver.cpp:404]     Test net output #1: loss = 0.714433 (* 1 = 0.714433 loss)\nI0819 03:59:02.223789 21603 solver.cpp:228] Iteration 66900, loss = 0.124298\nI0819 03:59:02.223829 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:59:02.223845 21603 solver.cpp:244]     Train net output #1: loss = 0.124298 (* 1 = 0.124298 loss)\nI0819 03:59:02.316018 21603 sgd_solver.cpp:166] Iteration 66900, lr = 1.6725\nI0819 03:59:49.797771 21603 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0819 04:00:15.779461 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82308\nI0819 04:00:15.779517 21603 solver.cpp:404]     Test net output #1: loss = 0.616808 (* 1 = 0.616808 loss)\nI0819 04:00:16.190598 21603 solver.cpp:228] Iteration 67000, loss = 0.154964\nI0819 04:00:16.190637 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 04:00:16.190654 21603 solver.cpp:244]     Train net output #1: loss = 0.154963 (* 1 = 0.154963 loss)\nI0819 04:00:16.277632 21603 sgd_solver.cpp:166] Iteration 67000, lr = 1.675\nI0819 04:01:03.392642 21603 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0819 04:01:29.373127 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77552\nI0819 04:01:29.373172 21603 solver.cpp:404]     Test net output #1: loss = 0.827816 (* 1 = 0.827816 loss)\nI0819 04:01:29.784021 21603 solver.cpp:228] Iteration 67100, loss = 0.218921\nI0819 04:01:29.784059 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:01:29.784075 21603 solver.cpp:244]     Train net output #1: loss = 0.218921 (* 1 = 0.218921 loss)\nI0819 04:01:29.879281 21603 sgd_solver.cpp:166] Iteration 67100, lr = 1.6775\nI0819 04:02:17.097765 21603 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0819 04:02:43.083719 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70132\nI0819 04:02:43.083766 21603 solver.cpp:404]     Test net output #1: loss = 1.26085 (* 1 = 1.26085 loss)\nI0819 04:02:43.494765 21603 solver.cpp:228] Iteration 67200, loss = 0.188398\nI0819 04:02:43.494801 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 04:02:43.494817 21603 solver.cpp:244]     Train net output #1: loss = 0.188398 (* 1 = 0.188398 loss)\nI0819 04:02:43.583598 21603 sgd_solver.cpp:166] Iteration 67200, lr = 1.68\nI0819 04:03:30.809310 21603 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0819 04:03:56.792592 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8052\nI0819 04:03:56.792639 21603 solver.cpp:404]     Test net output #1: loss = 0.707576 (* 1 = 0.707576 loss)\nI0819 04:03:57.203752 21603 solver.cpp:228] Iteration 67300, loss = 0.215915\nI0819 04:03:57.203788 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 04:03:57.203804 21603 solver.cpp:244]     Train net output #1: loss = 0.215915 (* 1 = 0.215915 loss)\nI0819 04:03:57.292898 21603 sgd_solver.cpp:166] Iteration 67300, lr = 1.6825\nI0819 04:04:44.668296 21603 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0819 04:05:10.653439 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78244\nI0819 04:05:10.653484 21603 solver.cpp:404]     Test net output #1: loss = 0.827231 (* 1 = 0.827231 loss)\nI0819 04:05:11.065479 21603 solver.cpp:228] Iteration 67400, loss = 0.190637\nI0819 04:05:11.065522 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 04:05:11.065538 21603 solver.cpp:244]     Train net output #1: loss = 0.190637 (* 1 = 0.190637 loss)\nI0819 04:05:11.156833 21603 sgd_solver.cpp:166] Iteration 67400, lr = 1.685\nI0819 04:05:58.544883 21603 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0819 04:06:24.528852 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77792\nI0819 04:06:24.528903 21603 solver.cpp:404]     Test net output #1: loss = 0.75615 (* 1 = 0.75615 loss)\nI0819 04:06:24.941136 21603 solver.cpp:228] Iteration 67500, loss = 0.202833\nI0819 04:06:24.941185 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 04:06:24.941202 21603 solver.cpp:244]     Train net output #1: loss = 0.202833 (* 1 = 0.202833 loss)\nI0819 04:06:25.031738 21603 sgd_solver.cpp:166] Iteration 67500, lr = 1.6875\nI0819 04:07:12.341331 21603 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0819 04:07:38.323125 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83688\nI0819 04:07:38.323171 21603 solver.cpp:404]     Test net output #1: loss = 0.539187 (* 1 = 0.539187 loss)\nI0819 04:07:38.735374 21603 solver.cpp:228] Iteration 67600, loss = 0.172019\nI0819 04:07:38.735417 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 04:07:38.735433 21603 solver.cpp:244]     Train net output #1: loss = 0.172019 (* 1 = 0.172019 loss)\nI0819 04:07:38.822957 21603 sgd_solver.cpp:166] Iteration 67600, lr = 1.69\nI0819 04:08:26.001502 21603 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0819 04:08:51.983968 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74312\nI0819 04:08:51.984014 21603 solver.cpp:404]     Test net output #1: loss = 0.92823 (* 1 = 0.92823 loss)\nI0819 04:08:52.395411 21603 solver.cpp:228] Iteration 67700, loss = 0.178474\nI0819 04:08:52.395447 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 04:08:52.395462 21603 solver.cpp:244]     Train net output #1: loss = 0.178474 (* 1 = 0.178474 loss)\nI0819 04:08:52.486997 21603 sgd_solver.cpp:166] Iteration 67700, lr = 1.6925\nI0819 04:09:39.704630 21603 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0819 04:10:05.690032 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74672\nI0819 04:10:05.690085 21603 solver.cpp:404]     Test net output #1: loss = 0.964291 (* 1 = 0.964291 loss)\nI0819 04:10:06.101434 21603 solver.cpp:228] Iteration 67800, loss = 0.209648\nI0819 04:10:06.101470 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 04:10:06.101486 21603 solver.cpp:244]     Train net output #1: loss = 0.209648 (* 1 = 0.209648 loss)\nI0819 04:10:06.190686 21603 sgd_solver.cpp:166] Iteration 67800, lr = 1.695\nI0819 04:10:53.394783 21603 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0819 04:11:19.381947 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74316\nI0819 04:11:19.381994 21603 solver.cpp:404]     Test net output #1: loss = 0.939814 (* 1 = 0.939814 loss)\nI0819 04:11:19.792975 21603 solver.cpp:228] Iteration 67900, loss = 0.109919\nI0819 04:11:19.793016 21603 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:11:19.793031 21603 solver.cpp:244]     Train net output #1: loss = 0.109919 (* 1 = 0.109919 loss)\nI0819 04:11:19.887051 21603 sgd_solver.cpp:166] Iteration 67900, lr = 1.6975\nI0819 04:12:07.230489 21603 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0819 04:12:33.212867 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74936\nI0819 04:12:33.212911 21603 solver.cpp:404]     Test net output #1: loss = 0.906957 (* 1 = 0.906957 loss)\nI0819 04:12:33.624722 21603 solver.cpp:228] Iteration 68000, loss = 0.179754\nI0819 04:12:33.624771 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:12:33.624788 21603 solver.cpp:244]     Train net output #1: loss = 0.179753 (* 1 = 0.179753 loss)\nI0819 04:12:33.712296 21603 sgd_solver.cpp:166] Iteration 68000, lr = 1.7\nI0819 04:13:20.948518 21603 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0819 04:13:46.934203 21603 solver.cpp:404]     Test net output #0: accuracy = 0.61028\nI0819 04:13:46.934250 21603 solver.cpp:404]     Test net output #1: loss = 2.09998 (* 1 = 2.09998 loss)\nI0819 04:13:47.345806 21603 solver.cpp:228] Iteration 68100, loss = 0.14427\nI0819 04:13:47.345844 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:13:47.345860 21603 solver.cpp:244]     Train net output #1: loss = 0.14427 (* 1 = 0.14427 loss)\nI0819 04:13:47.432345 21603 sgd_solver.cpp:166] Iteration 68100, lr = 1.7025\nI0819 04:14:34.719815 21603 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0819 04:15:00.703060 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73856\nI0819 04:15:00.703109 21603 solver.cpp:404]     Test net output #1: loss = 0.872709 (* 1 = 0.872709 loss)\nI0819 04:15:01.114359 21603 solver.cpp:228] Iteration 68200, loss = 0.225427\nI0819 04:15:01.114395 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 04:15:01.114411 21603 solver.cpp:244]     Train net output #1: loss = 0.225427 (* 1 = 0.225427 loss)\nI0819 04:15:01.209998 21603 sgd_solver.cpp:166] Iteration 68200, lr = 1.705\nI0819 04:15:48.411432 21603 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0819 04:16:14.396987 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74712\nI0819 04:16:14.397037 21603 solver.cpp:404]     Test net output #1: loss = 0.988144 (* 1 = 0.988144 loss)\nI0819 04:16:14.808280 21603 solver.cpp:228] Iteration 68300, loss = 0.196411\nI0819 04:16:14.808318 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 04:16:14.808334 21603 solver.cpp:244]     Train net output #1: loss = 0.19641 (* 1 = 0.19641 loss)\nI0819 04:16:14.895496 21603 sgd_solver.cpp:166] Iteration 68300, lr = 1.7075\nI0819 04:17:02.129251 21603 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0819 04:17:28.113052 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73056\nI0819 04:17:28.113097 21603 solver.cpp:404]     Test net output #1: loss = 1.06438 (* 1 = 1.06438 loss)\nI0819 04:17:28.524601 21603 solver.cpp:228] Iteration 68400, loss = 0.197121\nI0819 04:17:28.524646 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 04:17:28.524662 21603 solver.cpp:244]     Train net output #1: loss = 0.197121 (* 1 = 0.197121 loss)\nI0819 04:17:28.612401 21603 sgd_solver.cpp:166] Iteration 68400, lr = 1.71\nI0819 04:18:15.786917 21603 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0819 04:18:41.772743 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73788\nI0819 04:18:41.772790 21603 solver.cpp:404]     Test net output #1: loss = 0.919297 (* 1 = 0.919297 loss)\nI0819 04:18:42.184032 21603 solver.cpp:228] Iteration 68500, loss = 0.232697\nI0819 04:18:42.184077 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 04:18:42.184092 21603 solver.cpp:244]     Train net output #1: loss = 0.232697 (* 1 = 0.232697 loss)\nI0819 04:18:42.276875 21603 sgd_solver.cpp:166] Iteration 68500, lr = 1.7125\nI0819 04:19:29.515983 21603 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0819 04:19:55.497452 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78488\nI0819 04:19:55.497498 21603 solver.cpp:404]     Test net output #1: loss = 0.777918 (* 1 = 0.777918 loss)\nI0819 04:19:55.909833 21603 solver.cpp:228] Iteration 68600, loss = 0.205238\nI0819 04:19:55.909883 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 04:19:55.909899 21603 solver.cpp:244]     Train net output #1: loss = 0.205238 (* 1 = 0.205238 loss)\nI0819 04:19:56.002997 21603 sgd_solver.cpp:166] Iteration 68600, lr = 1.715\nI0819 04:20:43.245049 21603 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0819 04:21:09.228706 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7136\nI0819 04:21:09.228763 21603 solver.cpp:404]     Test net output #1: loss = 1.20086 (* 1 = 1.20086 loss)\nI0819 04:21:09.641053 21603 solver.cpp:228] Iteration 68700, loss = 0.180115\nI0819 04:21:09.641100 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:21:09.641118 21603 solver.cpp:244]     Train net output #1: loss = 0.180115 (* 1 = 0.180115 loss)\nI0819 04:21:09.733008 21603 sgd_solver.cpp:166] Iteration 68700, lr = 1.7175\nI0819 04:21:56.509934 21603 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0819 04:22:22.494544 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79888\nI0819 04:22:22.494602 21603 solver.cpp:404]     Test net output #1: loss = 0.65787 (* 1 = 0.65787 loss)\nI0819 04:22:22.906385 21603 solver.cpp:228] Iteration 68800, loss = 0.196444\nI0819 04:22:22.906433 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 04:22:22.906450 21603 solver.cpp:244]     Train net output #1: loss = 0.196443 (* 1 = 0.196443 loss)\nI0819 04:22:22.996332 21603 sgd_solver.cpp:166] Iteration 68800, lr = 1.72\nI0819 04:23:09.841776 21603 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0819 04:23:35.821219 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70668\nI0819 04:23:35.821277 21603 solver.cpp:404]     Test net output #1: loss = 1.07391 (* 1 = 1.07391 loss)\nI0819 04:23:36.233952 21603 solver.cpp:228] Iteration 68900, loss = 0.287508\nI0819 04:23:36.233999 21603 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0819 04:23:36.234015 21603 solver.cpp:244]     Train net output #1: loss = 0.287508 (* 1 = 0.287508 loss)\nI0819 04:23:36.327049 21603 sgd_solver.cpp:166] Iteration 68900, lr = 1.7225\nI0819 04:24:23.216857 21603 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0819 04:24:49.195292 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81524\nI0819 04:24:49.195349 21603 solver.cpp:404]     Test net output #1: loss = 0.561912 (* 1 = 0.561912 loss)\nI0819 04:24:49.607566 21603 solver.cpp:228] Iteration 69000, loss = 0.161016\nI0819 04:24:49.607614 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:24:49.607631 21603 solver.cpp:244]     Train net output #1: loss = 0.161016 (* 1 = 0.161016 loss)\nI0819 04:24:49.700116 21603 sgd_solver.cpp:166] Iteration 69000, lr = 1.725\nI0819 04:25:36.888556 21603 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0819 04:26:02.871057 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78392\nI0819 04:26:02.871112 21603 solver.cpp:404]     Test net output #1: loss = 0.77432 (* 1 = 0.77432 loss)\nI0819 04:26:03.283669 21603 solver.cpp:228] Iteration 69100, loss = 0.267502\nI0819 04:26:03.283717 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:26:03.283735 21603 solver.cpp:244]     Train net output #1: loss = 0.267502 (* 1 = 0.267502 loss)\nI0819 04:26:03.370966 21603 sgd_solver.cpp:166] Iteration 69100, lr = 1.7275\nI0819 04:26:50.662746 21603 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0819 04:27:16.641258 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76704\nI0819 04:27:16.641320 21603 solver.cpp:404]     Test net output #1: loss = 0.915004 (* 1 = 0.915004 loss)\nI0819 04:27:17.053794 21603 solver.cpp:228] Iteration 69200, loss = 0.22812\nI0819 04:27:17.053843 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 04:27:17.053859 21603 solver.cpp:244]     Train net output #1: loss = 0.22812 (* 1 = 0.22812 loss)\nI0819 04:27:17.148695 21603 sgd_solver.cpp:166] Iteration 69200, lr = 1.73\nI0819 04:28:04.541447 21603 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0819 04:28:30.524297 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73232\nI0819 04:28:30.524358 21603 solver.cpp:404]     Test net output #1: loss = 1.09214 (* 1 = 1.09214 loss)\nI0819 04:28:30.937036 21603 solver.cpp:228] Iteration 69300, loss = 0.211769\nI0819 04:28:30.937083 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 04:28:30.937100 21603 solver.cpp:244]     Train net output #1: loss = 0.211769 (* 1 = 0.211769 loss)\nI0819 04:28:31.034286 21603 sgd_solver.cpp:166] Iteration 69300, lr = 1.7325\nI0819 04:29:18.421375 21603 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0819 04:29:44.405771 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72372\nI0819 04:29:44.405830 21603 solver.cpp:404]     Test net output #1: loss = 1.17857 (* 1 = 1.17857 loss)\nI0819 04:29:44.818325 21603 solver.cpp:228] Iteration 69400, loss = 0.289874\nI0819 04:29:44.818372 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 04:29:44.818388 21603 solver.cpp:244]     Train net output #1: loss = 0.289874 (* 1 = 0.289874 loss)\nI0819 04:29:44.906055 21603 sgd_solver.cpp:166] Iteration 69400, lr = 1.735\nI0819 04:30:32.359835 21603 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0819 04:30:58.353844 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78124\nI0819 04:30:58.353904 21603 solver.cpp:404]     Test net output #1: loss = 0.776402 (* 1 = 0.776402 loss)\nI0819 04:30:58.765362 21603 solver.cpp:228] Iteration 69500, loss = 0.167953\nI0819 04:30:58.765409 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 04:30:58.765426 21603 solver.cpp:244]     Train net output #1: loss = 0.167953 (* 1 = 0.167953 loss)\nI0819 04:30:58.859382 21603 sgd_solver.cpp:166] Iteration 69500, lr = 1.7375\nI0819 04:31:46.211863 21603 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0819 04:32:12.210721 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77328\nI0819 04:32:12.210782 21603 solver.cpp:404]     Test net output #1: loss = 0.807579 (* 1 = 0.807579 loss)\nI0819 04:32:12.623617 21603 solver.cpp:228] Iteration 69600, loss = 0.251165\nI0819 04:32:12.623664 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:32:12.623680 21603 solver.cpp:244]     Train net output #1: loss = 0.251165 (* 1 = 0.251165 loss)\nI0819 04:32:12.718354 21603 sgd_solver.cpp:166] Iteration 69600, lr = 1.74\nI0819 04:33:00.034795 21603 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0819 04:33:26.026516 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70348\nI0819 04:33:26.026576 21603 solver.cpp:404]     Test net output #1: loss = 1.37546 (* 1 = 1.37546 loss)\nI0819 04:33:26.437592 21603 solver.cpp:228] Iteration 69700, loss = 0.239326\nI0819 04:33:26.437635 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 04:33:26.437651 21603 solver.cpp:244]     Train net output #1: loss = 0.239326 (* 1 = 0.239326 loss)\nI0819 04:33:26.527431 21603 sgd_solver.cpp:166] Iteration 69700, lr = 1.7425\nI0819 04:34:13.825387 21603 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0819 04:34:39.813091 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73036\nI0819 04:34:39.813153 21603 solver.cpp:404]     Test net output #1: loss = 1.00855 (* 1 = 1.00855 loss)\nI0819 04:34:40.224651 21603 solver.cpp:228] Iteration 69800, loss = 0.231589\nI0819 04:34:40.224696 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 04:34:40.224712 21603 solver.cpp:244]     Train net output #1: loss = 0.231589 (* 1 = 0.231589 loss)\nI0819 04:34:40.320999 21603 sgd_solver.cpp:166] Iteration 69800, lr = 1.745\nI0819 04:35:27.635360 21603 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0819 04:35:53.622602 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78072\nI0819 04:35:53.622663 21603 solver.cpp:404]     Test net output #1: loss = 0.73818 (* 1 = 0.73818 loss)\nI0819 04:35:54.033980 21603 solver.cpp:228] Iteration 69900, loss = 0.174571\nI0819 04:35:54.034011 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 04:35:54.034027 21603 solver.cpp:244]     Train net output #1: loss = 0.174571 (* 1 = 0.174571 loss)\nI0819 04:35:54.124181 21603 sgd_solver.cpp:166] Iteration 69900, lr = 1.7475\nI0819 04:36:41.355862 21603 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0819 04:37:07.344364 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7238\nI0819 04:37:07.344424 21603 solver.cpp:404]     Test net output #1: loss = 1.09596 (* 1 = 1.09596 loss)\nI0819 04:37:07.755678 21603 solver.cpp:228] Iteration 70000, loss = 0.243404\nI0819 04:37:07.755709 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:37:07.755725 21603 solver.cpp:244]     Train net output #1: loss = 0.243404 (* 1 = 0.243404 loss)\nI0819 04:37:07.846992 21603 sgd_solver.cpp:166] Iteration 70000, lr = 1.75\nI0819 04:37:55.069955 21603 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0819 04:38:21.055905 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81476\nI0819 04:38:21.055959 21603 solver.cpp:404]     Test net output #1: loss = 0.64352 (* 1 = 0.64352 loss)\nI0819 04:38:21.467185 21603 solver.cpp:228] Iteration 70100, loss = 0.142145\nI0819 04:38:21.467236 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:38:21.467253 21603 solver.cpp:244]     Train net output #1: loss = 0.142145 (* 1 = 0.142145 loss)\nI0819 04:38:21.560045 21603 sgd_solver.cpp:166] Iteration 70100, lr = 1.7525\nI0819 04:39:08.751111 21603 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0819 04:39:34.734714 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7474\nI0819 04:39:34.734753 21603 solver.cpp:404]     Test net output #1: loss = 0.905429 (* 1 = 0.905429 loss)\nI0819 04:39:35.146214 21603 solver.cpp:228] Iteration 70200, loss = 0.212051\nI0819 04:39:35.146258 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 04:39:35.146275 21603 solver.cpp:244]     Train net output #1: loss = 0.21205 (* 1 = 0.21205 loss)\nI0819 04:39:35.242136 21603 sgd_solver.cpp:166] Iteration 70200, lr = 1.755\nI0819 04:40:22.529265 21603 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0819 04:40:48.517701 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69532\nI0819 04:40:48.517741 21603 solver.cpp:404]     Test net output #1: loss = 1.18841 (* 1 = 1.18841 loss)\nI0819 04:40:48.929069 21603 solver.cpp:228] Iteration 70300, loss = 0.244703\nI0819 04:40:48.929114 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 04:40:48.929131 21603 solver.cpp:244]     Train net output #1: loss = 0.244702 (* 1 = 0.244702 loss)\nI0819 04:40:49.022534 21603 sgd_solver.cpp:166] Iteration 70300, lr = 1.7575\nI0819 04:41:36.228548 21603 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0819 04:42:02.210896 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79524\nI0819 04:42:02.210955 21603 solver.cpp:404]     Test net output #1: loss = 0.709409 (* 1 = 0.709409 loss)\nI0819 04:42:02.623312 21603 solver.cpp:228] Iteration 70400, loss = 0.220759\nI0819 04:42:02.623359 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 04:42:02.623378 21603 solver.cpp:244]     Train net output #1: loss = 0.220759 (* 1 = 0.220759 loss)\nI0819 04:42:02.717315 21603 sgd_solver.cpp:166] Iteration 70400, lr = 1.76\nI0819 04:42:50.035135 21603 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0819 04:43:16.019243 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77604\nI0819 04:43:16.019281 21603 solver.cpp:404]     Test net output #1: loss = 0.772281 (* 1 = 0.772281 loss)\nI0819 04:43:16.431736 21603 solver.cpp:228] Iteration 70500, loss = 0.205981\nI0819 04:43:16.431783 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 04:43:16.431800 21603 solver.cpp:244]     Train net output #1: loss = 0.205981 (* 1 = 0.205981 loss)\nI0819 04:43:16.520426 21603 sgd_solver.cpp:166] Iteration 70500, lr = 1.7625\nI0819 04:44:03.803187 21603 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0819 04:44:29.790256 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78124\nI0819 04:44:29.790297 21603 solver.cpp:404]     Test net output #1: loss = 0.807446 (* 1 = 0.807446 loss)\nI0819 04:44:30.202642 21603 solver.cpp:228] Iteration 70600, loss = 0.27493\nI0819 04:44:30.202690 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 04:44:30.202708 21603 solver.cpp:244]     Train net output #1: loss = 0.274929 (* 1 = 0.274929 loss)\nI0819 04:44:30.288749 21603 sgd_solver.cpp:166] Iteration 70600, lr = 1.765\nI0819 04:45:17.585062 21603 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0819 04:45:43.564469 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78908\nI0819 04:45:43.564508 21603 solver.cpp:404]     Test net output #1: loss = 0.762979 (* 1 = 0.762979 loss)\nI0819 04:45:43.977306 21603 solver.cpp:228] Iteration 70700, loss = 0.145658\nI0819 04:45:43.977352 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 04:45:43.977370 21603 solver.cpp:244]     Train net output #1: loss = 0.145658 (* 1 = 0.145658 loss)\nI0819 04:45:44.070870 21603 sgd_solver.cpp:166] Iteration 70700, lr = 1.7675\nI0819 04:46:31.389899 21603 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0819 04:46:57.376266 21603 solver.cpp:404]     Test net output #0: accuracy = 0.782\nI0819 04:46:57.376307 21603 solver.cpp:404]     Test net output #1: loss = 0.73898 (* 1 = 0.73898 loss)\nI0819 04:46:57.788728 21603 solver.cpp:228] Iteration 70800, loss = 0.229251\nI0819 04:46:57.788774 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:46:57.788790 21603 solver.cpp:244]     Train net output #1: loss = 0.229251 (* 1 = 0.229251 loss)\nI0819 04:46:57.878237 21603 sgd_solver.cpp:166] Iteration 70800, lr = 1.77\nI0819 04:47:45.198781 21603 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0819 04:48:11.184679 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69296\nI0819 04:48:11.184718 21603 solver.cpp:404]     Test net output #1: loss = 1.14491 (* 1 = 1.14491 loss)\nI0819 04:48:11.596329 21603 solver.cpp:228] Iteration 70900, loss = 0.236485\nI0819 04:48:11.596375 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:48:11.596391 21603 solver.cpp:244]     Train net output #1: loss = 0.236485 (* 1 = 0.236485 loss)\nI0819 04:48:11.691437 21603 sgd_solver.cpp:166] Iteration 70900, lr = 1.7725\nI0819 04:48:58.993413 21603 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0819 04:49:24.976631 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79448\nI0819 04:49:24.976670 21603 solver.cpp:404]     Test net output #1: loss = 0.738097 (* 1 = 0.738097 loss)\nI0819 04:49:25.387744 21603 solver.cpp:228] Iteration 71000, loss = 0.163369\nI0819 04:49:25.387790 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 04:49:25.387806 21603 solver.cpp:244]     Train net output #1: loss = 0.163369 (* 1 = 0.163369 loss)\nI0819 04:49:25.474536 21603 sgd_solver.cpp:166] Iteration 71000, lr = 1.775\nI0819 04:50:12.753739 21603 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0819 04:50:38.738967 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81148\nI0819 04:50:38.739008 21603 solver.cpp:404]     Test net output #1: loss = 0.623028 (* 1 = 0.623028 loss)\nI0819 04:50:39.151703 21603 solver.cpp:228] Iteration 71100, loss = 0.240017\nI0819 04:50:39.151751 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:50:39.151768 21603 solver.cpp:244]     Train net output #1: loss = 0.240017 (* 1 = 0.240017 loss)\nI0819 04:50:39.241236 21603 sgd_solver.cpp:166] Iteration 71100, lr = 1.7775\nI0819 04:51:26.521173 21603 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0819 04:51:52.508363 21603 solver.cpp:404]     Test net output #0: accuracy = 0.766\nI0819 04:51:52.508404 21603 solver.cpp:404]     Test net output #1: loss = 0.818587 (* 1 = 0.818587 loss)\nI0819 04:51:52.921211 21603 solver.cpp:228] Iteration 71200, loss = 0.236821\nI0819 04:51:52.921257 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 04:51:52.921274 21603 solver.cpp:244]     Train net output #1: loss = 0.236821 (* 1 = 0.236821 loss)\nI0819 04:51:53.008301 21603 sgd_solver.cpp:166] Iteration 71200, lr = 1.78\nI0819 04:52:40.316007 21603 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0819 04:53:06.302594 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67572\nI0819 04:53:06.302634 21603 solver.cpp:404]     Test net output #1: loss = 1.18049 (* 1 = 1.18049 loss)\nI0819 04:53:06.715250 21603 solver.cpp:228] Iteration 71300, loss = 0.266363\nI0819 04:53:06.715298 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0819 04:53:06.715317 21603 solver.cpp:244]     Train net output #1: loss = 0.266363 (* 1 = 0.266363 loss)\nI0819 04:53:06.808226 21603 sgd_solver.cpp:166] Iteration 71300, lr = 1.7825\nI0819 04:53:54.018919 21603 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0819 04:54:20.004264 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75592\nI0819 04:54:20.004307 21603 solver.cpp:404]     Test net output #1: loss = 0.777258 (* 1 = 0.777258 loss)\nI0819 04:54:20.417047 21603 solver.cpp:228] Iteration 71400, loss = 0.272186\nI0819 04:54:20.417093 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 04:54:20.417110 21603 solver.cpp:244]     Train net output #1: loss = 0.272186 (* 1 = 0.272186 loss)\nI0819 04:54:20.512569 21603 sgd_solver.cpp:166] Iteration 71400, lr = 1.785\nI0819 04:55:07.739212 21603 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0819 04:55:33.723145 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74388\nI0819 04:55:33.723186 21603 solver.cpp:404]     Test net output #1: loss = 0.829488 (* 1 = 0.829488 loss)\nI0819 04:55:34.135785 21603 solver.cpp:228] Iteration 71500, loss = 0.24461\nI0819 04:55:34.135831 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 04:55:34.135848 21603 solver.cpp:244]     Train net output #1: loss = 0.24461 (* 1 = 0.24461 loss)\nI0819 04:55:34.230711 21603 sgd_solver.cpp:166] Iteration 71500, lr = 1.7875\nI0819 04:56:21.423388 21603 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0819 04:56:47.406949 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83644\nI0819 04:56:47.406991 21603 solver.cpp:404]     Test net output #1: loss = 0.514764 (* 1 = 0.514764 loss)\nI0819 04:56:47.819334 21603 solver.cpp:228] Iteration 71600, loss = 0.173799\nI0819 04:56:47.819380 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:56:47.819396 21603 solver.cpp:244]     Train net output #1: loss = 0.173799 (* 1 = 0.173799 loss)\nI0819 04:56:47.906936 21603 sgd_solver.cpp:166] Iteration 71600, lr = 1.79\nI0819 04:57:35.054955 21603 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0819 04:58:01.045804 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83208\nI0819 04:58:01.045845 21603 solver.cpp:404]     Test net output #1: loss = 0.529223 (* 1 = 0.529223 loss)\nI0819 04:58:01.458408 21603 solver.cpp:228] Iteration 71700, loss = 0.246208\nI0819 04:58:01.458456 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 04:58:01.458473 21603 solver.cpp:244]     Train net output #1: loss = 0.246208 (* 1 = 0.246208 loss)\nI0819 04:58:01.548310 21603 sgd_solver.cpp:166] Iteration 71700, lr = 1.7925\nI0819 04:58:48.848136 21603 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0819 04:59:14.834878 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78156\nI0819 04:59:14.834919 21603 solver.cpp:404]     Test net output #1: loss = 0.787731 (* 1 = 0.787731 loss)\nI0819 04:59:15.247165 21603 solver.cpp:228] Iteration 71800, loss = 0.191543\nI0819 04:59:15.247213 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:59:15.247231 21603 solver.cpp:244]     Train net output #1: loss = 0.191543 (* 1 = 0.191543 loss)\nI0819 04:59:15.335578 21603 sgd_solver.cpp:166] Iteration 71800, lr = 1.795\nI0819 05:00:02.664964 21603 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0819 05:00:28.652204 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80468\nI0819 05:00:28.652247 21603 solver.cpp:404]     Test net output #1: loss = 0.667824 (* 1 = 0.667824 loss)\nI0819 05:00:29.064716 21603 solver.cpp:228] Iteration 71900, loss = 0.199692\nI0819 05:00:29.064764 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:00:29.064781 21603 solver.cpp:244]     Train net output #1: loss = 0.199692 (* 1 = 0.199692 loss)\nI0819 05:00:29.155941 21603 sgd_solver.cpp:166] Iteration 71900, lr = 1.7975\nI0819 05:01:16.359076 21603 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0819 05:01:42.345913 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7006\nI0819 05:01:42.345954 21603 solver.cpp:404]     Test net output #1: loss = 1.07927 (* 1 = 1.07927 loss)\nI0819 05:01:42.758725 21603 solver.cpp:228] Iteration 72000, loss = 0.255629\nI0819 05:01:42.758774 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 05:01:42.758791 21603 solver.cpp:244]     Train net output #1: loss = 0.255629 (* 1 = 0.255629 loss)\nI0819 05:01:42.852648 21603 sgd_solver.cpp:166] Iteration 72000, lr = 1.8\nI0819 05:02:30.114320 21603 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0819 05:02:56.098877 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80636\nI0819 05:02:56.098918 21603 solver.cpp:404]     Test net output #1: loss = 0.649257 (* 1 = 0.649257 loss)\nI0819 05:02:56.511005 21603 solver.cpp:228] Iteration 72100, loss = 0.18956\nI0819 05:02:56.511052 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 05:02:56.511070 21603 solver.cpp:244]     Train net output #1: loss = 0.18956 (* 1 = 0.18956 loss)\nI0819 05:02:56.604305 21603 sgd_solver.cpp:166] Iteration 72100, lr = 1.8025\nI0819 05:03:43.862982 21603 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0819 05:04:09.843821 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81404\nI0819 05:04:09.843860 21603 solver.cpp:404]     Test net output #1: loss = 0.607626 (* 1 = 0.607626 loss)\nI0819 05:04:10.256006 21603 solver.cpp:228] Iteration 72200, loss = 0.19238\nI0819 05:04:10.256052 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 05:04:10.256070 21603 solver.cpp:244]     Train net output #1: loss = 0.19238 (* 1 = 0.19238 loss)\nI0819 05:04:10.347656 21603 sgd_solver.cpp:166] Iteration 72200, lr = 1.805\nI0819 05:04:57.582017 21603 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0819 05:05:23.565140 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73696\nI0819 05:05:23.565186 21603 solver.cpp:404]     Test net output #1: loss = 0.990075 (* 1 = 0.990075 loss)\nI0819 05:05:23.977598 21603 solver.cpp:228] Iteration 72300, loss = 0.215043\nI0819 05:05:23.977646 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 05:05:23.977663 21603 solver.cpp:244]     Train net output #1: loss = 0.215043 (* 1 = 0.215043 loss)\nI0819 05:05:24.068080 21603 sgd_solver.cpp:166] Iteration 72300, lr = 1.8075\nI0819 05:06:11.264109 21603 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0819 05:06:37.249089 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8172\nI0819 05:06:37.249132 21603 solver.cpp:404]     Test net output #1: loss = 0.632538 (* 1 = 0.632538 loss)\nI0819 05:06:37.661063 21603 solver.cpp:228] Iteration 72400, loss = 0.200372\nI0819 05:06:37.661114 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 05:06:37.661133 21603 solver.cpp:244]     Train net output #1: loss = 0.200372 (* 1 = 0.200372 loss)\nI0819 05:06:37.755290 21603 sgd_solver.cpp:166] Iteration 72400, lr = 1.81\nI0819 05:07:24.968523 21603 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0819 05:07:50.954529 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77196\nI0819 05:07:50.954571 21603 solver.cpp:404]     Test net output #1: loss = 0.906376 (* 1 = 0.906376 loss)\nI0819 05:07:51.366953 21603 solver.cpp:228] Iteration 72500, loss = 0.182584\nI0819 05:07:51.367000 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 05:07:51.367018 21603 solver.cpp:244]     Train net output #1: loss = 0.182584 (* 1 = 0.182584 loss)\nI0819 05:07:51.457123 21603 sgd_solver.cpp:166] Iteration 72500, lr = 1.8125\nI0819 05:08:38.729743 21603 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0819 05:09:04.711756 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72144\nI0819 05:09:04.711796 21603 solver.cpp:404]     Test net output #1: loss = 1.17891 (* 1 = 1.17891 loss)\nI0819 05:09:05.124024 21603 solver.cpp:228] Iteration 72600, loss = 0.226804\nI0819 05:09:05.124075 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 05:09:05.124092 21603 solver.cpp:244]     Train net output #1: loss = 0.226804 (* 1 = 0.226804 loss)\nI0819 05:09:05.217905 21603 sgd_solver.cpp:166] Iteration 72600, lr = 1.815\nI0819 05:09:52.506954 21603 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0819 05:10:18.493021 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82088\nI0819 05:10:18.493074 21603 solver.cpp:404]     Test net output #1: loss = 0.598419 (* 1 = 0.598419 loss)\nI0819 05:10:18.905328 21603 solver.cpp:228] Iteration 72700, loss = 0.188285\nI0819 05:10:18.905378 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 05:10:18.905395 21603 solver.cpp:244]     Train net output #1: loss = 0.188285 (* 1 = 0.188285 loss)\nI0819 05:10:18.995440 21603 sgd_solver.cpp:166] Iteration 72700, lr = 1.8175\nI0819 05:11:06.321564 21603 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0819 05:11:32.306964 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77644\nI0819 05:11:32.307005 21603 solver.cpp:404]     Test net output #1: loss = 0.779139 (* 1 = 0.779139 loss)\nI0819 05:11:32.719207 21603 solver.cpp:228] Iteration 72800, loss = 0.28536\nI0819 05:11:32.719256 21603 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0819 05:11:32.719274 21603 solver.cpp:244]     Train net output #1: loss = 0.28536 (* 1 = 0.28536 loss)\nI0819 05:11:32.813071 21603 sgd_solver.cpp:166] Iteration 72800, lr = 1.82\nI0819 05:12:20.120142 21603 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0819 05:12:46.105996 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76736\nI0819 05:12:46.106036 21603 solver.cpp:404]     Test net output #1: loss = 0.790567 (* 1 = 0.790567 loss)\nI0819 05:12:46.518589 21603 solver.cpp:228] Iteration 72900, loss = 0.209546\nI0819 05:12:46.518637 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 05:12:46.518656 21603 solver.cpp:244]     Train net output #1: loss = 0.209546 (* 1 = 0.209546 loss)\nI0819 05:12:46.608989 21603 sgd_solver.cpp:166] Iteration 72900, lr = 1.8225\nI0819 05:13:33.929772 21603 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0819 05:13:59.914048 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67164\nI0819 05:13:59.914088 21603 solver.cpp:404]     Test net output #1: loss = 1.43917 (* 1 = 1.43917 loss)\nI0819 05:14:00.326094 21603 solver.cpp:228] Iteration 73000, loss = 0.227962\nI0819 05:14:00.326139 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 05:14:00.326156 21603 solver.cpp:244]     Train net output #1: loss = 0.227962 (* 1 = 0.227962 loss)\nI0819 05:14:00.424882 21603 sgd_solver.cpp:166] Iteration 73000, lr = 1.825\nI0819 05:14:47.644728 21603 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0819 05:15:13.628233 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67256\nI0819 05:15:13.628273 21603 solver.cpp:404]     Test net output #1: loss = 1.24805 (* 1 = 1.24805 loss)\nI0819 05:15:14.040700 21603 solver.cpp:228] Iteration 73100, loss = 0.217051\nI0819 05:15:14.040742 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:15:14.040760 21603 solver.cpp:244]     Train net output #1: loss = 0.217051 (* 1 = 0.217051 loss)\nI0819 05:15:14.128582 21603 sgd_solver.cpp:166] Iteration 73100, lr = 1.8275\nI0819 05:16:01.339592 21603 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0819 05:16:27.300063 21603 solver.cpp:404]     Test net output #0: accuracy = 0.81244\nI0819 05:16:27.300106 21603 solver.cpp:404]     Test net output #1: loss = 0.633708 (* 1 = 0.633708 loss)\nI0819 05:16:27.712589 21603 solver.cpp:228] Iteration 73200, loss = 0.2492\nI0819 05:16:27.712630 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:16:27.712647 21603 solver.cpp:244]     Train net output #1: loss = 0.2492 (* 1 = 0.2492 loss)\nI0819 05:16:27.802565 21603 sgd_solver.cpp:166] Iteration 73200, lr = 1.83\nI0819 05:17:14.903933 21603 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0819 05:17:40.860720 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6828\nI0819 05:17:40.860761 21603 solver.cpp:404]     Test net output #1: loss = 1.13947 (* 1 = 1.13947 loss)\nI0819 05:17:41.272176 21603 solver.cpp:228] Iteration 73300, loss = 0.191803\nI0819 05:17:41.272212 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 05:17:41.272228 21603 solver.cpp:244]     Train net output #1: loss = 0.191803 (* 1 = 0.191803 loss)\nI0819 05:17:41.363502 21603 sgd_solver.cpp:166] Iteration 73300, lr = 1.8325\nI0819 05:18:28.628430 21603 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0819 05:18:54.577111 21603 solver.cpp:404]     Test net output #0: accuracy = 0.70644\nI0819 05:18:54.577150 21603 solver.cpp:404]     Test net output #1: loss = 1.07359 (* 1 = 1.07359 loss)\nI0819 05:18:54.989262 21603 solver.cpp:228] Iteration 73400, loss = 0.202741\nI0819 05:18:54.989300 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 05:18:54.989316 21603 solver.cpp:244]     Train net output #1: loss = 0.202741 (* 1 = 0.202741 loss)\nI0819 05:18:55.081876 21603 sgd_solver.cpp:166] Iteration 73400, lr = 1.835\nI0819 05:19:42.371470 21603 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0819 05:20:08.329099 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77356\nI0819 05:20:08.329149 21603 solver.cpp:404]     Test net output #1: loss = 0.840653 (* 1 = 0.840653 loss)\nI0819 05:20:08.742050 21603 solver.cpp:228] Iteration 73500, loss = 0.259544\nI0819 05:20:08.742090 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 05:20:08.742107 21603 solver.cpp:244]     Train net output #1: loss = 0.259544 (* 1 = 0.259544 loss)\nI0819 05:20:08.831586 21603 sgd_solver.cpp:166] Iteration 73500, lr = 1.8375\nI0819 05:20:56.093014 21603 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0819 05:21:22.048064 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73912\nI0819 05:21:22.048102 21603 solver.cpp:404]     Test net output #1: loss = 0.974008 (* 1 = 0.974008 loss)\nI0819 05:21:22.459448 21603 solver.cpp:228] Iteration 73600, loss = 0.221897\nI0819 05:21:22.459487 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 05:21:22.459503 21603 solver.cpp:244]     Train net output #1: loss = 0.221897 (* 1 = 0.221897 loss)\nI0819 05:21:22.552147 21603 sgd_solver.cpp:166] Iteration 73600, lr = 1.84\nI0819 05:22:09.913295 21603 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0819 05:22:35.864996 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83388\nI0819 05:22:35.865036 21603 solver.cpp:404]     Test net output #1: loss = 0.513912 (* 1 = 0.513912 loss)\nI0819 05:22:36.276386 21603 solver.cpp:228] Iteration 73700, loss = 0.197047\nI0819 05:22:36.276425 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 05:22:36.276442 21603 solver.cpp:244]     Train net output #1: loss = 0.197047 (* 1 = 0.197047 loss)\nI0819 05:22:36.373353 21603 sgd_solver.cpp:166] Iteration 73700, lr = 1.8425\nI0819 05:23:23.699239 21603 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0819 05:23:49.654053 21603 solver.cpp:404]     Test net output #0: accuracy = 0.74112\nI0819 05:23:49.654093 21603 solver.cpp:404]     Test net output #1: loss = 0.905338 (* 1 = 0.905338 loss)\nI0819 05:23:50.065310 21603 solver.cpp:228] Iteration 73800, loss = 0.291232\nI0819 05:23:50.065348 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 05:23:50.065366 21603 solver.cpp:244]     Train net output #1: loss = 0.291232 (* 1 = 0.291232 loss)\nI0819 05:23:50.156854 21603 sgd_solver.cpp:166] Iteration 73800, lr = 1.845\nI0819 05:24:37.545456 21603 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0819 05:25:03.495995 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80472\nI0819 05:25:03.496034 21603 solver.cpp:404]     Test net output #1: loss = 0.631081 (* 1 = 0.631081 loss)\nI0819 05:25:03.908076 21603 solver.cpp:228] Iteration 73900, loss = 0.240507\nI0819 05:25:03.908118 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 05:25:03.908134 21603 solver.cpp:244]     Train net output #1: loss = 0.240507 (* 1 = 0.240507 loss)\nI0819 05:25:04.004689 21603 sgd_solver.cpp:166] Iteration 73900, lr = 1.8475\nI0819 05:25:51.263468 21603 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0819 05:26:17.216464 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79424\nI0819 05:26:17.216501 21603 solver.cpp:404]     Test net output #1: loss = 0.760927 (* 1 = 0.760927 loss)\nI0819 05:26:17.628844 21603 solver.cpp:228] Iteration 74000, loss = 0.262338\nI0819 05:26:17.628886 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 05:26:17.628902 21603 solver.cpp:244]     Train net output #1: loss = 0.262337 (* 1 = 0.262337 loss)\nI0819 05:26:17.723600 21603 sgd_solver.cpp:166] Iteration 74000, lr = 1.85\nI0819 05:27:04.811056 21603 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0819 05:27:30.773583 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73364\nI0819 05:27:30.773622 21603 solver.cpp:404]     Test net output #1: loss = 1.2394 (* 1 = 1.2394 loss)\nI0819 05:27:31.185226 21603 solver.cpp:228] Iteration 74100, loss = 0.265878\nI0819 05:27:31.185267 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:27:31.185283 21603 solver.cpp:244]     Train net output #1: loss = 0.265878 (* 1 = 0.265878 loss)\nI0819 05:27:31.281414 21603 sgd_solver.cpp:166] Iteration 74100, lr = 1.8525\nI0819 05:28:18.259647 21603 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0819 05:28:44.223562 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7576\nI0819 05:28:44.223603 21603 solver.cpp:404]     Test net output #1: loss = 0.916498 (* 1 = 0.916498 loss)\nI0819 05:28:44.635587 21603 solver.cpp:228] Iteration 74200, loss = 0.280174\nI0819 05:28:44.635632 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 05:28:44.635648 21603 solver.cpp:244]     Train net output #1: loss = 0.280174 (* 1 = 0.280174 loss)\nI0819 05:28:44.727762 21603 sgd_solver.cpp:166] Iteration 74200, lr = 1.855\nI0819 05:29:31.596606 21603 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0819 05:29:57.557471 21603 solver.cpp:404]     Test net output #0: accuracy = 0.83616\nI0819 05:29:57.557512 21603 solver.cpp:404]     Test net output #1: loss = 0.57761 (* 1 = 0.57761 loss)\nI0819 05:29:57.969425 21603 solver.cpp:228] Iteration 74300, loss = 0.218557\nI0819 05:29:57.969467 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:29:57.969485 21603 solver.cpp:244]     Train net output #1: loss = 0.218557 (* 1 = 0.218557 loss)\nI0819 05:29:58.063259 21603 sgd_solver.cpp:166] Iteration 74300, lr = 1.8575\nI0819 05:30:45.139312 21603 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0819 05:31:11.098599 21603 solver.cpp:404]     Test net output #0: accuracy = 0.6314\nI0819 05:31:11.098639 21603 solver.cpp:404]     Test net output #1: loss = 1.43549 (* 1 = 1.43549 loss)\nI0819 05:31:11.510464 21603 solver.cpp:228] Iteration 74400, loss = 0.274013\nI0819 05:31:11.510507 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 05:31:11.510524 21603 solver.cpp:244]     Train net output #1: loss = 0.274013 (* 1 = 0.274013 loss)\nI0819 05:31:11.605357 21603 sgd_solver.cpp:166] Iteration 74400, lr = 1.86\nI0819 05:31:58.752773 21603 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0819 05:32:24.713171 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72944\nI0819 05:32:24.713234 21603 solver.cpp:404]     Test net output #1: loss = 0.959124 (* 1 = 0.959124 loss)\nI0819 05:32:25.125294 21603 solver.cpp:228] Iteration 74500, loss = 0.240364\nI0819 05:32:25.125335 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0819 05:32:25.125352 21603 solver.cpp:244]     Train net output #1: loss = 0.240364 (* 1 = 0.240364 loss)\nI0819 05:32:25.216522 21603 sgd_solver.cpp:166] Iteration 74500, lr = 1.8625\nI0819 05:33:12.298272 21603 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0819 05:33:38.258910 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78612\nI0819 05:33:38.258951 21603 solver.cpp:404]     Test net output #1: loss = 0.732743 (* 1 = 0.732743 loss)\nI0819 05:33:38.669857 21603 solver.cpp:228] Iteration 74600, loss = 0.238611\nI0819 05:33:38.669891 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 05:33:38.669908 21603 solver.cpp:244]     Train net output #1: loss = 0.238611 (* 1 = 0.238611 loss)\nI0819 05:33:38.763782 21603 sgd_solver.cpp:166] Iteration 74600, lr = 1.865\nI0819 05:34:25.996974 21603 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0819 05:34:51.958073 21603 solver.cpp:404]     Test net output #0: accuracy = 0.843\nI0819 05:34:51.958114 21603 solver.cpp:404]     Test net output #1: loss = 0.516825 (* 1 = 0.516825 loss)\nI0819 05:34:52.369019 21603 solver.cpp:228] Iteration 74700, loss = 0.23755\nI0819 05:34:52.369051 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:34:52.369067 21603 solver.cpp:244]     Train net output #1: loss = 0.23755 (* 1 = 0.23755 loss)\nI0819 05:34:52.463752 21603 sgd_solver.cpp:166] Iteration 74700, lr = 1.8675\nI0819 05:35:39.593544 21603 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0819 05:36:05.553441 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80608\nI0819 05:36:05.553479 21603 solver.cpp:404]     Test net output #1: loss = 0.663259 (* 1 = 0.663259 loss)\nI0819 05:36:05.964402 21603 solver.cpp:228] Iteration 74800, loss = 0.15437\nI0819 05:36:05.964440 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 05:36:05.964457 21603 solver.cpp:244]     Train net output #1: loss = 0.15437 (* 1 = 0.15437 loss)\nI0819 05:36:06.057544 21603 sgd_solver.cpp:166] Iteration 74800, lr = 1.87\nI0819 05:36:53.006696 21603 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0819 05:37:18.967219 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68408\nI0819 05:37:18.967259 21603 solver.cpp:404]     Test net output #1: loss = 1.37422 (* 1 = 1.37422 loss)\nI0819 05:37:19.378468 21603 solver.cpp:228] Iteration 74900, loss = 0.345638\nI0819 05:37:19.378505 21603 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0819 05:37:19.378521 21603 solver.cpp:244]     Train net output #1: loss = 0.345637 (* 1 = 0.345637 loss)\nI0819 05:37:19.475888 21603 sgd_solver.cpp:166] Iteration 74900, lr = 1.8725\nI0819 05:38:06.625402 21603 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0819 05:38:32.586714 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77784\nI0819 05:38:32.586755 21603 solver.cpp:404]     Test net output #1: loss = 0.702385 (* 1 = 0.702385 loss)\nI0819 05:38:32.997697 21603 solver.cpp:228] Iteration 75000, loss = 0.308122\nI0819 05:38:32.997733 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:38:32.997750 21603 solver.cpp:244]     Train net output #1: loss = 0.308122 (* 1 = 0.308122 loss)\nI0819 05:38:33.086334 21603 sgd_solver.cpp:166] Iteration 75000, lr = 1.875\nI0819 05:39:20.091119 21603 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0819 05:39:46.055135 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7646\nI0819 05:39:46.055173 21603 solver.cpp:404]     Test net output #1: loss = 0.841395 (* 1 = 0.841395 loss)\nI0819 05:39:46.466253 21603 solver.cpp:228] Iteration 75100, loss = 0.253321\nI0819 05:39:46.466300 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 05:39:46.466315 21603 solver.cpp:244]     Train net output #1: loss = 0.25332 (* 1 = 0.25332 loss)\nI0819 05:39:46.560498 21603 sgd_solver.cpp:166] Iteration 75100, lr = 1.8775\nI0819 05:40:33.581037 21603 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0819 05:40:59.538931 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77068\nI0819 05:40:59.538970 21603 solver.cpp:404]     Test net output #1: loss = 0.847353 (* 1 = 0.847353 loss)\nI0819 05:40:59.950213 21603 solver.cpp:228] Iteration 75200, loss = 0.179034\nI0819 05:40:59.950259 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 05:40:59.950275 21603 solver.cpp:244]     Train net output #1: loss = 0.179033 (* 1 = 0.179033 loss)\nI0819 05:41:00.045727 21603 sgd_solver.cpp:166] Iteration 75200, lr = 1.88\nI0819 05:41:47.126914 21603 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0819 05:42:13.086318 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72176\nI0819 05:42:13.086371 21603 solver.cpp:404]     Test net output #1: loss = 1.09058 (* 1 = 1.09058 loss)\nI0819 05:42:13.497359 21603 solver.cpp:228] Iteration 75300, loss = 0.277902\nI0819 05:42:13.497407 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 05:42:13.497423 21603 solver.cpp:244]     Train net output #1: loss = 0.277902 (* 1 = 0.277902 loss)\nI0819 05:42:13.588052 21603 sgd_solver.cpp:166] Iteration 75300, lr = 1.8825\nI0819 05:43:00.604542 21603 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0819 05:43:26.565532 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79764\nI0819 05:43:26.565572 21603 solver.cpp:404]     Test net output #1: loss = 0.790253 (* 1 = 0.790253 loss)\nI0819 05:43:26.976439 21603 solver.cpp:228] Iteration 75400, loss = 0.176802\nI0819 05:43:26.976485 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 05:43:26.976501 21603 solver.cpp:244]     Train net output #1: loss = 0.176802 (* 1 = 0.176802 loss)\nI0819 05:43:27.069397 21603 sgd_solver.cpp:166] Iteration 75400, lr = 1.885\nI0819 05:44:14.143215 21603 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0819 05:44:40.105252 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80692\nI0819 05:44:40.105294 21603 solver.cpp:404]     Test net output #1: loss = 0.594522 (* 1 = 0.594522 loss)\nI0819 05:44:40.516325 21603 solver.cpp:228] Iteration 75500, loss = 0.292936\nI0819 05:44:40.516372 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 05:44:40.516388 21603 solver.cpp:244]     Train net output #1: loss = 0.292936 (* 1 = 0.292936 loss)\nI0819 05:44:40.610532 21603 sgd_solver.cpp:166] Iteration 75500, lr = 1.8875\nI0819 05:45:27.618446 21603 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0819 05:45:53.580576 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67676\nI0819 05:45:53.580616 21603 solver.cpp:404]     Test net output #1: loss = 1.16006 (* 1 = 1.16006 loss)\nI0819 05:45:53.992493 21603 solver.cpp:228] Iteration 75600, loss = 0.353349\nI0819 05:45:53.992534 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 05:45:53.992552 21603 solver.cpp:244]     Train net output #1: loss = 0.353349 (* 1 = 0.353349 loss)\nI0819 05:45:54.080951 21603 sgd_solver.cpp:166] Iteration 75600, lr = 1.89\nI0819 05:46:41.383564 21603 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0819 05:47:07.347173 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73712\nI0819 05:47:07.347218 21603 solver.cpp:404]     Test net output #1: loss = 0.922467 (* 1 = 0.922467 loss)\nI0819 05:47:07.759332 21603 solver.cpp:228] Iteration 75700, loss = 0.24982\nI0819 05:47:07.759373 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:47:07.759388 21603 solver.cpp:244]     Train net output #1: loss = 0.24982 (* 1 = 0.24982 loss)\nI0819 05:47:07.847975 21603 sgd_solver.cpp:166] Iteration 75700, lr = 1.8925\nI0819 05:47:54.948106 21603 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0819 05:48:20.908324 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7072\nI0819 05:48:20.908361 21603 solver.cpp:404]     Test net output #1: loss = 1.30832 (* 1 = 1.30832 loss)\nI0819 05:48:21.319175 21603 solver.cpp:228] Iteration 75800, loss = 0.238749\nI0819 05:48:21.319217 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:48:21.319232 21603 solver.cpp:244]     Train net output #1: loss = 0.238749 (* 1 = 0.238749 loss)\nI0819 05:48:21.410542 21603 sgd_solver.cpp:166] Iteration 75800, lr = 1.895\nI0819 05:49:08.445328 21603 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0819 05:49:34.413254 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78508\nI0819 05:49:34.413293 21603 solver.cpp:404]     Test net output #1: loss = 0.849483 (* 1 = 0.849483 loss)\nI0819 05:49:34.823937 21603 solver.cpp:228] Iteration 75900, loss = 0.21725\nI0819 05:49:34.823976 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 05:49:34.823992 21603 solver.cpp:244]     Train net output #1: loss = 0.21725 (* 1 = 0.21725 loss)\nI0819 05:49:34.915616 21603 sgd_solver.cpp:166] Iteration 75900, lr = 1.8975\nI0819 05:50:22.016892 21603 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0819 05:50:48.002887 21603 solver.cpp:404]     Test net output #0: accuracy = 0.745\nI0819 05:50:48.002929 21603 solver.cpp:404]     Test net output #1: loss = 1.02337 (* 1 = 1.02337 loss)\nI0819 05:50:48.414034 21603 solver.cpp:228] Iteration 76000, loss = 0.279114\nI0819 05:50:48.414070 21603 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0819 05:50:48.414085 21603 solver.cpp:244]     Train net output #1: loss = 0.279114 (* 1 = 0.279114 loss)\nI0819 05:50:48.508675 21603 sgd_solver.cpp:166] Iteration 76000, lr = 1.9\nI0819 05:51:35.403791 21603 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0819 05:52:01.385701 21603 solver.cpp:404]     Test net output #0: accuracy = 0.79296\nI0819 05:52:01.385756 21603 solver.cpp:404]     Test net output #1: loss = 0.713784 (* 1 = 0.713784 loss)\nI0819 05:52:01.796859 21603 solver.cpp:228] Iteration 76100, loss = 0.154486\nI0819 05:52:01.796900 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 05:52:01.796916 21603 solver.cpp:244]     Train net output #1: loss = 0.154486 (* 1 = 0.154486 loss)\nI0819 05:52:01.893074 21603 sgd_solver.cpp:166] Iteration 76100, lr = 1.9025\nI0819 05:52:48.779559 21603 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0819 05:53:14.762135 21603 solver.cpp:404]     Test net output #0: accuracy = 0.765\nI0819 05:53:14.762173 21603 solver.cpp:404]     Test net output #1: loss = 0.808492 (* 1 = 0.808492 loss)\nI0819 05:53:15.173230 21603 solver.cpp:228] Iteration 76200, loss = 0.201121\nI0819 05:53:15.173267 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 05:53:15.173283 21603 solver.cpp:244]     Train net output #1: loss = 0.201121 (* 1 = 0.201121 loss)\nI0819 05:53:15.270633 21603 sgd_solver.cpp:166] Iteration 76200, lr = 1.905\nI0819 05:54:02.285221 21603 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0819 05:54:28.267849 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77612\nI0819 05:54:28.267894 21603 solver.cpp:404]     Test net output #1: loss = 0.758746 (* 1 = 0.758746 loss)\nI0819 05:54:28.679008 21603 solver.cpp:228] Iteration 76300, loss = 0.221597\nI0819 05:54:28.679039 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:54:28.679055 21603 solver.cpp:244]     Train net output #1: loss = 0.221597 (* 1 = 0.221597 loss)\nI0819 05:54:28.768335 21603 sgd_solver.cpp:166] Iteration 76300, lr = 1.9075\nI0819 05:55:15.868472 21603 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0819 05:55:41.850303 21603 solver.cpp:404]     Test net output #0: accuracy = 0.786\nI0819 05:55:41.850344 21603 solver.cpp:404]     Test net output #1: loss = 0.676727 (* 1 = 0.676727 loss)\nI0819 05:55:42.260951 21603 solver.cpp:228] Iteration 76400, loss = 0.210817\nI0819 05:55:42.260979 21603 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 05:55:42.260994 21603 solver.cpp:244]     Train net output #1: loss = 0.210817 (* 1 = 0.210817 loss)\nI0819 05:55:42.358228 21603 sgd_solver.cpp:166] Iteration 76400, lr = 1.91\nI0819 05:56:29.604887 21603 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0819 05:56:55.583602 21603 solver.cpp:404]     Test net output #0: accuracy = 0.62284\nI0819 05:56:55.583643 21603 solver.cpp:404]     Test net output #1: loss = 1.57616 (* 1 = 1.57616 loss)\nI0819 05:56:55.994405 21603 solver.cpp:228] Iteration 76500, loss = 0.228708\nI0819 05:56:55.994433 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 05:56:55.994448 21603 solver.cpp:244]     Train net output #1: loss = 0.228708 (* 1 = 0.228708 loss)\nI0819 05:56:56.091833 21603 sgd_solver.cpp:166] Iteration 76500, lr = 1.9125\nI0819 05:57:43.326009 21603 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0819 05:58:09.305719 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67776\nI0819 05:58:09.305759 21603 solver.cpp:404]     Test net output #1: loss = 1.34028 (* 1 = 1.34028 loss)\nI0819 05:58:09.716488 21603 solver.cpp:228] Iteration 76600, loss = 0.22747\nI0819 05:58:09.716516 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 05:58:09.716532 21603 solver.cpp:244]     Train net output #1: loss = 0.22747 (* 1 = 0.22747 loss)\nI0819 05:58:09.811667 21603 sgd_solver.cpp:166] Iteration 76600, lr = 1.915\nI0819 05:58:56.922161 21603 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0819 05:59:22.897456 21603 solver.cpp:404]     Test net output #0: accuracy = 0.73224\nI0819 05:59:22.897496 21603 solver.cpp:404]     Test net output #1: loss = 0.921789 (* 1 = 0.921789 loss)\nI0819 05:59:23.308187 21603 solver.cpp:228] Iteration 76700, loss = 0.278015\nI0819 05:59:23.308225 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 05:59:23.308241 21603 solver.cpp:244]     Train net output #1: loss = 0.278015 (* 1 = 0.278015 loss)\nI0819 05:59:23.402997 21603 sgd_solver.cpp:166] Iteration 76700, lr = 1.9175\nI0819 06:00:10.497143 21603 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0819 06:00:36.474944 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77572\nI0819 06:00:36.474987 21603 solver.cpp:404]     Test net output #1: loss = 0.726915 (* 1 = 0.726915 loss)\nI0819 06:00:36.885706 21603 solver.cpp:228] Iteration 76800, loss = 0.226593\nI0819 06:00:36.885745 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 06:00:36.885761 21603 solver.cpp:244]     Train net output #1: loss = 0.226593 (* 1 = 0.226593 loss)\nI0819 06:00:36.979877 21603 sgd_solver.cpp:166] Iteration 76800, lr = 1.92\nI0819 06:01:24.026146 21603 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0819 06:01:50.005503 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80048\nI0819 06:01:50.005543 21603 solver.cpp:404]     Test net output #1: loss = 0.732218 (* 1 = 0.732218 loss)\nI0819 06:01:50.416476 21603 solver.cpp:228] Iteration 76900, loss = 0.228932\nI0819 06:01:50.416513 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 06:01:50.416529 21603 solver.cpp:244]     Train net output #1: loss = 0.228932 (* 1 = 0.228932 loss)\nI0819 06:01:50.510396 21603 sgd_solver.cpp:166] Iteration 76900, lr = 1.9225\nI0819 06:02:37.525892 21603 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0819 06:03:03.508062 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78288\nI0819 06:03:03.508107 21603 solver.cpp:404]     Test net output #1: loss = 0.737577 (* 1 = 0.737577 loss)\nI0819 06:03:03.920095 21603 solver.cpp:228] Iteration 77000, loss = 0.181311\nI0819 06:03:03.920151 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 06:03:03.920176 21603 solver.cpp:244]     Train net output #1: loss = 0.181311 (* 1 = 0.181311 loss)\nI0819 06:03:04.007437 21603 sgd_solver.cpp:166] Iteration 77000, lr = 1.925\nI0819 06:03:51.022490 21603 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0819 06:04:17.010710 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7542\nI0819 06:04:17.010757 21603 solver.cpp:404]     Test net output #1: loss = 0.843107 (* 1 = 0.843107 loss)\nI0819 06:04:17.422919 21603 solver.cpp:228] Iteration 77100, loss = 0.19858\nI0819 06:04:17.422960 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 06:04:17.422986 21603 solver.cpp:244]     Train net output #1: loss = 0.19858 (* 1 = 0.19858 loss)\nI0819 06:04:17.512996 21603 sgd_solver.cpp:166] Iteration 77100, lr = 1.9275\nI0819 06:05:04.493198 21603 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0819 06:05:30.480585 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82288\nI0819 06:05:30.480631 21603 solver.cpp:404]     Test net output #1: loss = 0.573404 (* 1 = 0.573404 loss)\nI0819 06:05:30.892707 21603 solver.cpp:228] Iteration 77200, loss = 0.221162\nI0819 06:05:30.892746 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 06:05:30.892772 21603 solver.cpp:244]     Train net output #1: loss = 0.221162 (* 1 = 0.221162 loss)\nI0819 06:05:30.987792 21603 sgd_solver.cpp:166] Iteration 77200, lr = 1.93\nI0819 06:06:18.200042 21603 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0819 06:06:44.181648 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78036\nI0819 06:06:44.181692 21603 solver.cpp:404]     Test net output #1: loss = 0.75541 (* 1 = 0.75541 loss)\nI0819 06:06:44.592372 21603 solver.cpp:228] Iteration 77300, loss = 0.28735\nI0819 06:06:44.592412 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 06:06:44.592428 21603 solver.cpp:244]     Train net output #1: loss = 0.28735 (* 1 = 0.28735 loss)\nI0819 06:06:44.682871 21603 sgd_solver.cpp:166] Iteration 77300, lr = 1.9325\nI0819 06:07:31.932973 21603 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0819 06:07:57.917127 21603 solver.cpp:404]     Test net output #0: accuracy = 0.64856\nI0819 06:07:57.917167 21603 solver.cpp:404]     Test net output #1: loss = 1.37845 (* 1 = 1.37845 loss)\nI0819 06:07:58.328618 21603 solver.cpp:228] Iteration 77400, loss = 0.293524\nI0819 06:07:58.328656 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 06:07:58.328672 21603 solver.cpp:244]     Train net output #1: loss = 0.293524 (* 1 = 0.293524 loss)\nI0819 06:07:58.418126 21603 sgd_solver.cpp:166] Iteration 77400, lr = 1.935\nI0819 06:08:45.687929 21603 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0819 06:09:11.672333 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71796\nI0819 06:09:11.672371 21603 solver.cpp:404]     Test net output #1: loss = 0.981646 (* 1 = 0.981646 loss)\nI0819 06:09:12.083909 21603 solver.cpp:228] Iteration 77500, loss = 0.223285\nI0819 06:09:12.083950 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 06:09:12.083966 21603 solver.cpp:244]     Train net output #1: loss = 0.223285 (* 1 = 0.223285 loss)\nI0819 06:09:12.180274 21603 sgd_solver.cpp:166] Iteration 77500, lr = 1.9375\nI0819 06:09:59.475473 21603 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0819 06:10:25.456301 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76024\nI0819 06:10:25.456357 21603 solver.cpp:404]     Test net output #1: loss = 0.924357 (* 1 = 0.924357 loss)\nI0819 06:10:25.868983 21603 solver.cpp:228] Iteration 77600, loss = 0.23609\nI0819 06:10:25.869021 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 06:10:25.869037 21603 solver.cpp:244]     Train net output #1: loss = 0.23609 (* 1 = 0.23609 loss)\nI0819 06:10:25.960274 21603 sgd_solver.cpp:166] Iteration 77600, lr = 1.94\nI0819 06:11:13.379582 21603 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0819 06:11:39.358781 21603 solver.cpp:404]     Test net output #0: accuracy = 0.75284\nI0819 06:11:39.358820 21603 solver.cpp:404]     Test net output #1: loss = 0.913894 (* 1 = 0.913894 loss)\nI0819 06:11:39.770146 21603 solver.cpp:228] Iteration 77700, loss = 0.185205\nI0819 06:11:39.770185 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 06:11:39.770206 21603 solver.cpp:244]     Train net output #1: loss = 0.185205 (* 1 = 0.185205 loss)\nI0819 06:11:39.861973 21603 sgd_solver.cpp:166] Iteration 77700, lr = 1.9425\nI0819 06:12:27.054764 21603 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0819 06:12:53.032050 21603 solver.cpp:404]     Test net output #0: accuracy = 0.666\nI0819 06:12:53.032089 21603 solver.cpp:404]     Test net output #1: loss = 1.46406 (* 1 = 1.46406 loss)\nI0819 06:12:53.444411 21603 solver.cpp:228] Iteration 77800, loss = 0.252079\nI0819 06:12:53.444449 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 06:12:53.444466 21603 solver.cpp:244]     Train net output #1: loss = 0.252079 (* 1 = 0.252079 loss)\nI0819 06:12:53.537266 21603 sgd_solver.cpp:166] Iteration 77800, lr = 1.945\nI0819 06:13:40.900241 21603 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0819 06:14:06.879886 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72512\nI0819 06:14:06.879925 21603 solver.cpp:404]     Test net output #1: loss = 0.974128 (* 1 = 0.974128 loss)\nI0819 06:14:07.291492 21603 solver.cpp:228] Iteration 77900, loss = 0.288258\nI0819 06:14:07.291532 21603 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0819 06:14:07.291548 21603 solver.cpp:244]     Train net output #1: loss = 0.288258 (* 1 = 0.288258 loss)\nI0819 06:14:07.381564 21603 sgd_solver.cpp:166] Iteration 77900, lr = 1.9475\nI0819 06:14:54.747558 21603 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0819 06:15:20.729877 21603 solver.cpp:404]     Test net output #0: accuracy = 0.66292\nI0819 06:15:20.729918 21603 solver.cpp:404]     Test net output #1: loss = 1.35337 (* 1 = 1.35337 loss)\nI0819 06:15:21.141096 21603 solver.cpp:228] Iteration 78000, loss = 0.431441\nI0819 06:15:21.141134 21603 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0819 06:15:21.141149 21603 solver.cpp:244]     Train net output #1: loss = 0.43144 (* 1 = 0.43144 loss)\nI0819 06:15:21.232439 21603 sgd_solver.cpp:166] Iteration 78000, lr = 1.95\nI0819 06:16:08.395588 21603 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0819 06:16:34.379729 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68832\nI0819 06:16:34.379771 21603 solver.cpp:404]     Test net output #1: loss = 1.32907 (* 1 = 1.32907 loss)\nI0819 06:16:34.791038 21603 solver.cpp:228] Iteration 78100, loss = 0.352485\nI0819 06:16:34.791075 21603 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0819 06:16:34.791092 21603 solver.cpp:244]     Train net output #1: loss = 0.352485 (* 1 = 0.352485 loss)\nI0819 06:16:34.885507 21603 sgd_solver.cpp:166] Iteration 78100, lr = 1.9525\nI0819 06:17:22.105727 21603 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0819 06:17:48.085125 21603 solver.cpp:404]     Test net output #0: accuracy = 0.80624\nI0819 06:17:48.085165 21603 solver.cpp:404]     Test net output #1: loss = 0.607131 (* 1 = 0.607131 loss)\nI0819 06:17:48.496175 21603 solver.cpp:228] Iteration 78200, loss = 0.261086\nI0819 06:17:48.496215 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 06:17:48.496232 21603 solver.cpp:244]     Train net output #1: loss = 0.261086 (* 1 = 0.261086 loss)\nI0819 06:17:48.593210 21603 sgd_solver.cpp:166] Iteration 78200, lr = 1.955\nI0819 06:18:35.614660 21603 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0819 06:19:01.594450 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7916\nI0819 06:19:01.594491 21603 solver.cpp:404]     Test net output #1: loss = 0.688277 (* 1 = 0.688277 loss)\nI0819 06:19:02.005587 21603 solver.cpp:228] Iteration 78300, loss = 0.250654\nI0819 06:19:02.005625 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 06:19:02.005640 21603 solver.cpp:244]     Train net output #1: loss = 0.250654 (* 1 = 0.250654 loss)\nI0819 06:19:02.095556 21603 sgd_solver.cpp:166] Iteration 78300, lr = 1.9575\nI0819 06:19:49.609342 21603 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0819 06:20:15.590320 21603 solver.cpp:404]     Test net output #0: accuracy = 0.68512\nI0819 06:20:15.590373 21603 solver.cpp:404]     Test net output #1: loss = 1.27084 (* 1 = 1.27084 loss)\nI0819 06:20:16.002059 21603 solver.cpp:228] Iteration 78400, loss = 0.202093\nI0819 06:20:16.002094 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 06:20:16.002110 21603 solver.cpp:244]     Train net output #1: loss = 0.202093 (* 1 = 0.202093 loss)\nI0819 06:20:16.096030 21603 sgd_solver.cpp:166] Iteration 78400, lr = 1.96\nI0819 06:21:03.491691 21603 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0819 06:21:29.471823 21603 solver.cpp:404]     Test net output #0: accuracy = 0.799\nI0819 06:21:29.471863 21603 solver.cpp:404]     Test net output #1: loss = 0.657295 (* 1 = 0.657295 loss)\nI0819 06:21:29.883517 21603 solver.cpp:228] Iteration 78500, loss = 0.224639\nI0819 06:21:29.883548 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 06:21:29.883564 21603 solver.cpp:244]     Train net output #1: loss = 0.224639 (* 1 = 0.224639 loss)\nI0819 06:21:29.975839 21603 sgd_solver.cpp:166] Iteration 78500, lr = 1.9625\nI0819 06:22:17.419872 21603 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0819 06:22:43.410965 21603 solver.cpp:404]     Test net output #0: accuracy = 0.69972\nI0819 06:22:43.411006 21603 solver.cpp:404]     Test net output #1: loss = 1.07342 (* 1 = 1.07342 loss)\nI0819 06:22:43.822278 21603 solver.cpp:228] Iteration 78600, loss = 0.185518\nI0819 06:22:43.822312 21603 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 06:22:43.822329 21603 solver.cpp:244]     Train net output #1: loss = 0.185518 (* 1 = 0.185518 loss)\nI0819 06:22:43.912714 21603 sgd_solver.cpp:166] Iteration 78600, lr = 1.965\nI0819 06:23:31.217566 21603 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0819 06:23:57.207736 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7312\nI0819 06:23:57.207780 21603 solver.cpp:404]     Test net output #1: loss = 0.960118 (* 1 = 0.960118 loss)\nI0819 06:23:57.619534 21603 solver.cpp:228] Iteration 78700, loss = 0.21244\nI0819 06:23:57.619570 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 06:23:57.619595 21603 solver.cpp:244]     Train net output #1: loss = 0.21244 (* 1 = 0.21244 loss)\nI0819 06:23:57.715307 21603 sgd_solver.cpp:166] Iteration 78700, lr = 1.9675\nI0819 06:24:45.030622 21603 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0819 06:25:11.019594 21603 solver.cpp:404]     Test net output #0: accuracy = 0.71744\nI0819 06:25:11.019641 21603 solver.cpp:404]     Test net output #1: loss = 1.15898 (* 1 = 1.15898 loss)\nI0819 06:25:11.432232 21603 solver.cpp:228] Iteration 78800, loss = 0.269771\nI0819 06:25:11.432270 21603 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 06:25:11.432293 21603 solver.cpp:244]     Train net output #1: loss = 0.269771 (* 1 = 0.269771 loss)\nI0819 06:25:11.521636 21603 sgd_solver.cpp:166] Iteration 78800, lr = 1.97\nI0819 06:25:58.995824 21603 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0819 06:26:24.984040 21603 solver.cpp:404]     Test net output #0: accuracy = 0.76844\nI0819 06:26:24.984086 21603 solver.cpp:404]     Test net output #1: loss = 0.775936 (* 1 = 0.775936 loss)\nI0819 06:26:25.395411 21603 solver.cpp:228] Iteration 78900, loss = 0.306006\nI0819 06:26:25.395447 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 06:26:25.395472 21603 solver.cpp:244]     Train net output #1: loss = 0.306006 (* 1 = 0.306006 loss)\nI0819 06:26:25.484133 21603 sgd_solver.cpp:166] Iteration 78900, lr = 1.9725\nI0819 06:27:12.860409 21603 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0819 06:27:38.849129 21603 solver.cpp:404]     Test net output #0: accuracy = 0.8002\nI0819 06:27:38.849174 21603 solver.cpp:404]     Test net output #1: loss = 0.635171 (* 1 = 0.635171 loss)\nI0819 06:27:39.261400 21603 solver.cpp:228] Iteration 79000, loss = 0.200096\nI0819 06:27:39.261435 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 06:27:39.261461 21603 solver.cpp:244]     Train net output #1: loss = 0.200096 (* 1 = 0.200096 loss)\nI0819 06:27:39.350925 21603 sgd_solver.cpp:166] Iteration 79000, lr = 1.975\nI0819 06:28:26.728971 21603 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0819 06:28:52.712826 21603 solver.cpp:404]     Test net output #0: accuracy = 0.78156\nI0819 06:28:52.712872 21603 solver.cpp:404]     Test net output #1: loss = 0.695478 (* 1 = 0.695478 loss)\nI0819 06:28:53.125756 21603 solver.cpp:228] Iteration 79100, loss = 0.282593\nI0819 06:28:53.125792 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 06:28:53.125818 21603 solver.cpp:244]     Train net output #1: loss = 0.282593 (* 1 = 0.282593 loss)\nI0819 06:28:53.216348 21603 sgd_solver.cpp:166] Iteration 79100, lr = 1.9775\nI0819 06:29:40.626754 21603 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0819 06:30:06.609535 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77784\nI0819 06:30:06.609583 21603 solver.cpp:404]     Test net output #1: loss = 0.717183 (* 1 = 0.717183 loss)\nI0819 06:30:07.022179 21603 solver.cpp:228] Iteration 79200, loss = 0.244575\nI0819 06:30:07.022220 21603 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 06:30:07.022244 21603 solver.cpp:244]     Train net output #1: loss = 0.244575 (* 1 = 0.244575 loss)\nI0819 06:30:07.119599 21603 sgd_solver.cpp:166] Iteration 79200, lr = 1.98\nI0819 06:30:54.343616 21603 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0819 06:31:20.326725 21603 solver.cpp:404]     Test net output #0: accuracy = 0.77744\nI0819 06:31:20.326771 21603 solver.cpp:404]     Test net output #1: loss = 0.70752 (* 1 = 0.70752 loss)\nI0819 06:31:20.739190 21603 solver.cpp:228] Iteration 79300, loss = 0.275603\nI0819 06:31:20.739225 21603 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 06:31:20.739251 21603 solver.cpp:244]     Train net output #1: loss = 0.275603 (* 1 = 0.275603 loss)\nI0819 06:31:20.829169 21603 sgd_solver.cpp:166] Iteration 79300, lr = 1.9825\nI0819 06:32:08.146169 21603 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0819 06:32:34.129554 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7738\nI0819 06:32:34.129601 21603 solver.cpp:404]     Test net output #1: loss = 0.819219 (* 1 = 0.819219 loss)\nI0819 06:32:34.541815 21603 solver.cpp:228] Iteration 79400, loss = 0.249399\nI0819 06:32:34.541852 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 06:32:34.541877 21603 solver.cpp:244]     Train net output #1: loss = 0.249399 (* 1 = 0.249399 loss)\nI0819 06:32:34.631724 21603 sgd_solver.cpp:166] Iteration 79400, lr = 1.985\nI0819 06:33:21.968461 21603 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0819 06:33:47.953428 21603 solver.cpp:404]     Test net output #0: accuracy = 0.67776\nI0819 06:33:47.953474 21603 solver.cpp:404]     Test net output #1: loss = 1.2392 (* 1 = 1.2392 loss)\nI0819 06:33:48.366159 21603 solver.cpp:228] Iteration 79500, loss = 0.28754\nI0819 06:33:48.366195 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 06:33:48.366219 21603 solver.cpp:244]     Train net output #1: loss = 0.28754 (* 1 = 0.28754 loss)\nI0819 06:33:48.457559 21603 sgd_solver.cpp:166] Iteration 79500, lr = 1.9875\nI0819 06:34:35.736249 21603 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0819 06:35:01.725495 21603 solver.cpp:404]     Test net output #0: accuracy = 0.72756\nI0819 06:35:01.725543 21603 solver.cpp:404]     Test net output #1: loss = 0.915572 (* 1 = 0.915572 loss)\nI0819 06:35:02.137775 21603 solver.cpp:228] Iteration 79600, loss = 0.247712\nI0819 06:35:02.137816 21603 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 06:35:02.137841 21603 solver.cpp:244]     Train net output #1: loss = 0.247712 (* 1 = 0.247712 loss)\nI0819 06:35:02.227746 21603 sgd_solver.cpp:166] Iteration 79600, lr = 1.99\nI0819 06:35:49.628161 21603 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0819 06:36:15.612563 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7642\nI0819 06:36:15.612610 21603 solver.cpp:404]     Test net output #1: loss = 0.871565 (* 1 = 0.871565 loss)\nI0819 06:36:16.024791 21603 solver.cpp:228] Iteration 79700, loss = 0.138958\nI0819 06:36:16.024829 21603 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:36:16.024853 21603 solver.cpp:244]     Train net output #1: loss = 0.138958 (* 1 = 0.138958 loss)\nI0819 06:36:16.115870 21603 sgd_solver.cpp:166] Iteration 79700, lr = 1.9925\nI0819 06:37:03.535502 21603 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0819 06:37:29.522977 21603 solver.cpp:404]     Test net output #0: accuracy = 0.7722\nI0819 06:37:29.523023 21603 solver.cpp:404]     Test net output #1: loss = 0.807167 (* 1 = 0.807167 loss)\nI0819 06:37:29.935256 21603 solver.cpp:228] Iteration 79800, loss = 0.234376\nI0819 06:37:29.935297 21603 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 06:37:29.935322 21603 solver.cpp:244]     Train net output #1: loss = 0.234376 (* 1 = 0.234376 loss)\nI0819 06:37:30.023370 21603 sgd_solver.cpp:166] Iteration 79800, lr = 1.995\nI0819 06:38:17.337972 21603 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0819 06:38:43.323073 21603 solver.cpp:404]     Test net output #0: accuracy = 0.62228\nI0819 06:38:43.323127 21603 solver.cpp:404]     Test net output #1: loss = 1.87844 (* 1 = 1.87844 loss)\nI0819 06:38:43.735729 21603 solver.cpp:228] Iteration 79900, loss = 0.20781\nI0819 06:38:43.735770 21603 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 06:38:43.735795 21603 solver.cpp:244]     Train net output #1: loss = 0.20781 (* 1 = 0.20781 loss)\nI0819 06:38:43.828083 21603 sgd_solver.cpp:166] Iteration 79900, lr = 1.9975\nI0819 06:39:31.307134 21603 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range2Res20Fig4b_iter_80000.caffemodel\nI0819 06:39:31.870884 21603 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/range2Res20Fig4b_iter_80000.solverstate\nI0819 06:39:32.026232 21603 solver.cpp:317] Iteration 80000, loss = 0.261463\nI0819 06:39:32.026266 21603 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0819 06:39:58.011063 21603 solver.cpp:404]     Test net output #0: accuracy = 0.82596\nI0819 06:39:58.011102 21603 solver.cpp:404]     Test net output #1: loss = 0.611952 (* 1 = 0.611952 loss)\nI0819 06:39:58.011126 21603 solver.cpp:322] Optimization Done.\nI0819 06:40:00.445191 21603 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range2SS80kRes56LR",
    "content": "I0818 13:44:36.670388 22726 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0818 13:44:36.672737 22726 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0818 13:44:36.673976 22726 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0818 13:44:36.675190 22726 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0818 13:44:36.676404 22726 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0818 13:44:36.677634 22726 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0818 13:44:36.678869 22726 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0818 13:44:36.680096 22726 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0818 13:44:36.681329 22726 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0818 13:44:37.100294 22726 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 80000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range2SS80kRes56LR\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 2\nI0818 13:44:37.103940 22726 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0818 13:44:37.115777 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:37.115855 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:37.116821 22726 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0818 13:44:37.118732 22726 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"conv\"\n  type: \"Convolution\"\n  bottom: \"data\"\n  top: \"conv\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_conv\"\n  type: \"BatchNorm\"\n  bottom: \"conv\"\n  top: \"bn_conv\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_conv\"\n  type: \"Scale\"\n  bottom: \"bn_conv\"\n  top: \"bn_conv\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_conv\"\n  type: \"ReLU\"\n  bottom: \"bn_conv\"\n  top: \"bn_conv\"\n}\nlayer {\n  name: \"Conv16_1\"\n  type: \"Convolution\"\n  bottom: \"bn_conv\"\n  top: \"Conv16_1\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_1\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_1\"\n  top: \"bn_Conv16_1\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_1\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_1\"\n  top: \"bn_Conv16_1\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_1\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_1\"\n  top: \"bn_Conv16_1\"\n}\nlayer {\n  name: \"Conv16_1_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_1\"\n  top: \"Conv16_1_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_1_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_1_b\"\n  top: \"bn_Conv16_1_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_1_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_1_b\"\n  top: \"bn_Conv16_1_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_bn_conv\"\n  type: \"Eltwise\"\n  bottom: \"bn_conv\"\n  bottom: \"bn_Conv16_1_b\"\n  top: \"sum_bn_Conv16_1_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_1_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_1_b\"\n  top: \"sum_bn_Conv16_1_b\"\n}\nlayer {\n  name: \"Conv16_2\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_1_b\"\n  top: \"Conv16_2\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_2\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_2\"\n  top: \"bn_Conv16_2\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_2\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_2\"\n  top: \"bn_Conv16_2\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_2\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_2\"\n  top: \"bn_Conv16_2\"\n}\nlayer {\n  name: \"Conv16_2_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_2\"\n  top: \"Conv16_2_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_2_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_2_b\"\n  top: \"bn_Conv16_2_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_2_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_2_b\"\n  top: \"bn_Conv16_2_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_1_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_1_b\"\n  bottom: \"bn_Conv16_2_b\"\n  top: \"sum_bn_Conv16_2_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_2_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_2_b\"\n  top: \"sum_bn_Conv16_2_b\"\n}\nlayer {\n  name: \"Conv16_3\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_2_b\"\n  top: \"Conv16_3\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_3\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_3\"\n  top: \"bn_Conv16_3\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_3\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_3\"\n  top: \"bn_Conv16_3\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_3\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_3\"\n  top: \"bn_Conv16_3\"\n}\nlayer {\n  name: \"Conv16_3_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_3\"\n  top: \"Conv16_3_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_3_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_3_b\"\n  top: \"bn_Conv16_3_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_3_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_3_b\"\n  top: \"bn_Conv16_3_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_2_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_2_b\"\n  bottom: \"bn_Conv16_3_b\"\n  top: \"sum_bn_Conv16_3_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_3_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_3_b\"\n  top: \"sum_bn_Conv16_3_b\"\n}\nlayer {\n  name: \"Conv16_4\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_3_b\"\n  top: \"Conv16_4\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_4\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_4\"\n  top: \"bn_Conv16_4\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_4\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_4\"\n  top: \"bn_Conv16_4\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_4\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_4\"\n  top: \"bn_Conv16_4\"\n}\nlayer {\n  name: \"Conv16_4_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_4\"\n  top: \"Conv16_4_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_4_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_4_b\"\n  top: \"bn_Conv16_4_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_4_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_4_b\"\n  top: \"bn_Conv16_4_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_3_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_3_b\"\n  bottom: \"bn_Conv16_4_b\"\n  top: \"sum_bn_Conv16_4_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_4_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_4_b\"\n  top: \"sum_bn_Conv16_4_b\"\n}\nlayer {\n  name: \"Conv16_5\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_4_b\"\n  top: \"Conv16_5\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_5\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_5\"\n  top: \"bn_Conv16_5\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_5\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_5\"\n  top: \"bn_Conv16_5\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_5\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_5\"\n  top: \"bn_Conv16_5\"\n}\nlayer {\n  name: \"Conv16_5_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_5\"\n  top: \"Conv16_5_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_5_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_5_b\"\n  top: \"bn_Conv16_5_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_5_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_5_b\"\n  top: \"bn_Conv16_5_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_4_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_4_b\"\n  bottom: \"bn_Conv16_5_b\"\n  top: \"sum_bn_Conv16_5_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_5_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_5_b\"\n  top: \"sum_bn_Conv16_5_b\"\n}\nlayer {\n  name: \"Conv16_6\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_5_b\"\n  top: \"Conv16_6\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_6\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_6\"\n  top: \"bn_Conv16_6\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_6\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_6\"\n  top: \"bn_Conv16_6\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_6\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_6\"\n  top: \"bn_Conv16_6\"\n}\nlayer {\n  name: \"Conv16_6_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_6\"\n  top: \"Conv16_6_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_6_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_6_b\"\n  top: \"bn_Conv16_6_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_6_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_6_b\"\n  top: \"bn_Conv16_6_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_5_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_5_b\"\n  bottom: \"bn_Conv16_6_b\"\n  top: \"sum_bn_Conv16_6_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_6_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_6_b\"\n  top: \"sum_bn_Conv16_6_b\"\n}\nlayer {\n  name: \"Conv16_7\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_6_b\"\n  top: \"Conv16_7\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_7\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_7\"\n  top: \"bn_Conv16_7\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_7\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_7\"\n  top: \"bn_Conv16_7\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_7\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_7\"\n  top: \"bn_Conv16_7\"\n}\nlayer {\n  name: \"Conv16_7_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_7\"\n  top: \"Conv16_7_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_7_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_7_b\"\n  top: \"bn_Conv16_7_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_7_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_7_b\"\n  top: \"bn_Conv16_7_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_6_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_6_b\"\n  bottom: \"bn_Conv16_7_b\"\n  top: \"sum_bn_Conv16_7_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_7_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_7_b\"\n  top: \"sum_bn_Conv16_7_b\"\n}\nlayer {\n  name: \"Conv16_8\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_7_b\"\n  top: \"Conv16_8\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_8\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_8\"\n  top: \"bn_Conv16_8\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_8\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_8\"\n  top: \"bn_Conv16_8\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_8\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_8\"\n  top: \"bn_Conv16_8\"\n}\nlayer {\n  name: \"Conv16_8_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_8\"\n  top: \"Conv16_8_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_8_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_8_b\"\n  top: \"bn_Conv16_8_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_8_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_8_b\"\n  top: \"bn_Conv16_8_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_7_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_7_b\"\n  bottom: \"bn_Conv16_8_b\"\n  top: \"sum_bn_Conv16_8_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_8_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_8_b\"\n  top: \"sum_bn_Conv16_8_b\"\n}\nlayer {\n  name: \"Conv16_9\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_8_b\"\n  top: \"Conv16_9\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_9\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_9\"\n  top: \"bn_Conv16_9\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_9\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_9\"\n  top: \"bn_Conv16_9\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_9\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_9\"\n  top: \"bn_Conv16_9\"\n}\nlayer {\n  name: \"Conv16_9_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_9\"\n  top: \"Conv16_9_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_9_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_9_b\"\n  top: \"bn_Conv16_9_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_9_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_9_b\"\n  top: \"bn_Conv16_9_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_8_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_8_b\"\n  bottom: \"bn_Conv16_9_b\"\n  top: \"sum_bn_Conv16_9_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_9_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_9_b\"\n  top: \"sum_bn_Conv16_9_b\"\n}\nlayer {\n  name: \"resblk32\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_9_b\"\n  top: \"resblk32\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32\"\n  top: \"bn_resblk32\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32\"\n  top: \"bn_resblk32\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32\"\n  top: \"bn_resblk32\"\n}\nlayer {\n  name: \"resblk32_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32\"\n  top: \"resblk32_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_b\"\n  top: \"bn_resblk32_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_b\"\n  top: \"bn_resblk32_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"avePooling_resblk32\"\n  type: \"Pooling\"\n  bottom: \"sum_bn_Conv16_9_b\"\n  top: \"avgPool_resblk32\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"sum_avgPool_resblk32\"\n  type: \"Eltwise\"\n  bottom: \"avgPool_resblk32\"\n  bottom: \"bn_resblk32_b\"\n  top: \"sum_bn_resblk32_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_b\"\n  top: \"sum_bn_resblk32_b\"\n}\nlayer {\n  name: \"zeros_sum_bn_resblk32_b\"\n  type: \"DummyData\"\n  top: \"zeros_sum_bn_resblk32_b\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"CC_sum_bn_resblk32_b\"\n  type: \"Concat\"\n  bottom: \"sum_bn_resblk32_b\"\n  bottom: \"zeros_sum_bn_resblk32_b\"\n  top: \"CC_sum_bn_resblk32_b\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"resblk32_1\"\n  type: \"Convolution\"\n  bottom: \"CC_sum_bn_resblk32_b\"\n  top: \"resblk32_1\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_1\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_1\"\n  top: \"bn_resblk32_1\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_1\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_1\"\n  top: \"bn_resblk32_1\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_1\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_1\"\n  top: \"bn_resblk32_1\"\n}\nlayer {\n  name: \"resblk32_1_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_1\"\n  top: \"resblk32_1_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_1_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_1_b\"\n  top: \"bn_resblk32_1_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_1_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_1_b\"\n  top: \"bn_resblk32_1_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_CC_sum_bn_resblk32_b\"\n  type: \"Eltwise\"\n  bottom: \"CC_sum_bn_resblk32_b\"\n  bottom: \"bn_resblk32_1_b\"\n  top: \"sum_bn_resblk32_1_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_1_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_1_b\"\n  top: \"sum_bn_resblk32_1_b\"\n}\nlayer {\n  name: \"resblk32_2\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_1_b\"\n  top: \"resblk32_2\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_2\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_2\"\n  top: \"bn_resblk32_2\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_2\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_2\"\n  top: \"bn_resblk32_2\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_2\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_2\"\n  top: \"bn_resblk32_2\"\n}\nlayer {\n  name: \"resblk32_2_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_2\"\n  top: \"resblk32_2_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_2_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_2_b\"\n  top: \"bn_resblk32_2_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_2_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_2_b\"\n  top: \"bn_resblk32_2_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_resblk32_1_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_resblk32_1_b\"\n  bottom: \"bn_resblk32_2_b\"\n  top: \"sum_bn_resblk32_2_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_2_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_2_b\"\n  top: \"sum_bn_resblk32_2_b\"\n}\nlayer {\n  name: \"resblk32_3\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_2_b\"\n  top: \"resblk32_3\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_3\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_3\"\n  top: \"bn_resblk32_3\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_3\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_3\"\n  top: \"bn_resblk32_3\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_3\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_3\"\n  top: \"bn_resblk32_3\"\n}\nlayer {\n  name: \"resblk32_3_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_3\"\n  top: \"resblk32_3_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_3_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_3_b\"\n  top: \"bn_resblk32_3_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_3_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_3_b\"\n  top: \"bn_resblk32_3_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_resblk32_2_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_resblk32_2_b\"\n  bottom: \"bn_resblk32_3_b\"\n  top: \"sum_bn_resblk32_3_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_3_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_3_b\"\n  top: \"sum_bn_resblk32_3_b\"\n}\nlayer {\n  name: \"resblk32_4\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_3_b\"\n  top: \"resblk32_4\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_4\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_4\"\n  top: \"bn_resblk32_4\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_4\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_4\"\n  top: \"bn_resblk32_4\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_4\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_4\"\n  top: \"bn_resblk32_4\"\n}\nlayer {\n  name: \"resblk32_4_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_4\"\n  top: \"resblk32_4_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_4_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_4_b\"\n  top: \"bn_resblk32_4_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_4_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_4_b\"\n  top: \"bn_resblk32_4_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_resblk32_3_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_resblk32_3_b\"\n  bottom: \"bn_resblk32_4_b\"\n  top: \"sum_bn_resblk32_4_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_4_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_4_b\"\n  top: \"sum_bn_resblk32_4_b\"\n}\nlayer {\n  name: \"resblk32_5\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_4_b\"\n  top: \"resblk32_5\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_5\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_5\"\n  top: \"bn_resblk32_5\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_5\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_5\"\n  top: \"bn_resblk32_5\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_5\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_5\"\n  top: \"bn_resblk32_5\"\n}\nlayer {\n  name: \"resblk32_5_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_5\"\n  top: \"resblk32_5_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_5_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_5_b\"\n  top: \"bn_resblk32_5_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_5_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_5_b\"\n  top: \"bn_resblk32_5_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_resblk32_4_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_resblk32_4_b\"\n  bottom: \"bn_resblk32_5_b\"\n  top: \"sum_bn_resblk32_5_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_5_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_5_b\"\n  top: \"sum_bn_resblk32_5_b\"\n}\nlayer {\n  name: \"resblk32_6\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_5_b\"\n  top: \"resblk32_6\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_6\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_6\"\n  top: \"bn_resblk32_6\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_6\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_6\"\n  top: \"bn_resblk32_6\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_6\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_6\"\n  top: \"bn_resblk32_6\"\n}\nlayer {\n  name: \"res\nI0818 13:44:37.120782 22726 layer_factory.hpp:77] Creating layer dataLayer\nI0818 13:44:37.121978 22726 net.cpp:100] Creating Layer dataLayer\nI0818 13:44:37.122062 22726 net.cpp:408] dataLayer -> data\nI0818 13:44:37.122258 22726 net.cpp:408] dataLayer -> label\nI0818 13:44:37.122375 22726 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 13:44:37.132936 22731 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0818 13:44:37.154294 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:37.161983 22726 net.cpp:150] Setting up dataLayer\nI0818 13:44:37.162046 22726 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0818 13:44:37.162060 22726 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:37.162065 22726 net.cpp:165] Memory required for data: 1536500\nI0818 13:44:37.162081 22726 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 13:44:37.162094 22726 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 13:44:37.162102 22726 net.cpp:434] label_dataLayer_1_split <- label\nI0818 13:44:37.162125 22726 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 13:44:37.162140 22726 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 13:44:37.162221 22726 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 13:44:37.162237 22726 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:37.162245 22726 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:37.162250 22726 net.cpp:165] Memory required for data: 1537500\nI0818 13:44:37.162255 22726 layer_factory.hpp:77] Creating layer conv\nI0818 13:44:37.162317 22726 net.cpp:100] Creating Layer conv\nI0818 13:44:37.162329 22726 net.cpp:434] conv <- data\nI0818 13:44:37.162339 22726 net.cpp:408] conv -> conv\nI0818 13:44:37.164067 22726 net.cpp:150] Setting up conv\nI0818 13:44:37.164088 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.164093 22726 net.cpp:165] Memory required for data: 9729500\nI0818 13:44:37.164146 22726 layer_factory.hpp:77] Creating layer batchNorm_conv\nI0818 13:44:37.164224 22726 net.cpp:100] Creating Layer batchNorm_conv\nI0818 13:44:37.164235 22726 net.cpp:434] batchNorm_conv <- conv\nI0818 13:44:37.164248 22726 net.cpp:408] batchNorm_conv -> bn_conv\nI0818 13:44:37.164582 22732 blocking_queue.cpp:50] Waiting for data\nI0818 13:44:37.164721 22726 net.cpp:150] Setting up batchNorm_conv\nI0818 13:44:37.164741 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.164746 22726 net.cpp:165] Memory required for data: 17921500\nI0818 13:44:37.164763 22726 layer_factory.hpp:77] Creating layer scale_conv\nI0818 13:44:37.164815 22726 net.cpp:100] Creating Layer scale_conv\nI0818 13:44:37.164825 22726 net.cpp:434] scale_conv <- bn_conv\nI0818 13:44:37.164839 22726 net.cpp:395] scale_conv -> bn_conv (in-place)\nI0818 13:44:37.165011 22726 layer_factory.hpp:77] Creating layer scale_conv\nI0818 13:44:37.165266 22726 net.cpp:150] Setting up scale_conv\nI0818 13:44:37.165284 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.165290 22726 net.cpp:165] Memory required for data: 26113500\nI0818 13:44:37.165300 22726 layer_factory.hpp:77] Creating layer relu_bn_conv\nI0818 13:44:37.165343 22726 net.cpp:100] Creating Layer relu_bn_conv\nI0818 13:44:37.165351 22726 net.cpp:434] relu_bn_conv <- bn_conv\nI0818 13:44:37.165359 22726 net.cpp:395] relu_bn_conv -> bn_conv (in-place)\nI0818 13:44:37.165369 22726 net.cpp:150] Setting up relu_bn_conv\nI0818 13:44:37.165376 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.165381 22726 net.cpp:165] Memory required for data: 34305500\nI0818 13:44:37.165386 22726 layer_factory.hpp:77] Creating layer bn_conv_relu_bn_conv_0_split\nI0818 13:44:37.165397 22726 net.cpp:100] Creating Layer bn_conv_relu_bn_conv_0_split\nI0818 13:44:37.165402 22726 net.cpp:434] bn_conv_relu_bn_conv_0_split <- bn_conv\nI0818 13:44:37.165410 22726 net.cpp:408] bn_conv_relu_bn_conv_0_split -> bn_conv_relu_bn_conv_0_split_0\nI0818 13:44:37.165419 22726 net.cpp:408] bn_conv_relu_bn_conv_0_split -> bn_conv_relu_bn_conv_0_split_1\nI0818 13:44:37.165478 22726 net.cpp:150] Setting up bn_conv_relu_bn_conv_0_split\nI0818 13:44:37.165490 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.165498 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.165501 22726 net.cpp:165] Memory required for data: 50689500\nI0818 13:44:37.165506 22726 layer_factory.hpp:77] Creating layer Conv16_1\nI0818 13:44:37.165518 22726 net.cpp:100] Creating Layer Conv16_1\nI0818 13:44:37.165524 22726 net.cpp:434] Conv16_1 <- bn_conv_relu_bn_conv_0_split_0\nI0818 13:44:37.165536 22726 net.cpp:408] Conv16_1 -> Conv16_1\nI0818 13:44:37.165866 22726 net.cpp:150] Setting up Conv16_1\nI0818 13:44:37.165881 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.165886 22726 net.cpp:165] Memory required for data: 58881500\nI0818 13:44:37.165899 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_1\nI0818 13:44:37.165910 22726 net.cpp:100] Creating Layer batchNorm_Conv16_1\nI0818 13:44:37.165915 22726 net.cpp:434] batchNorm_Conv16_1 <- Conv16_1\nI0818 13:44:37.165926 22726 net.cpp:408] batchNorm_Conv16_1 -> bn_Conv16_1\nI0818 13:44:37.166162 22726 net.cpp:150] Setting up batchNorm_Conv16_1\nI0818 13:44:37.166174 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.166188 22726 net.cpp:165] Memory required for data: 67073500\nI0818 13:44:37.166198 22726 layer_factory.hpp:77] Creating layer scale_Conv16_1\nI0818 13:44:37.166210 22726 net.cpp:100] Creating Layer scale_Conv16_1\nI0818 13:44:37.166216 22726 net.cpp:434] scale_Conv16_1 <- bn_Conv16_1\nI0818 13:44:37.166224 22726 net.cpp:395] scale_Conv16_1 -> bn_Conv16_1 (in-place)\nI0818 13:44:37.166281 22726 layer_factory.hpp:77] Creating layer scale_Conv16_1\nI0818 13:44:37.166420 22726 net.cpp:150] Setting up scale_Conv16_1\nI0818 13:44:37.166434 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.166438 22726 net.cpp:165] Memory required for data: 75265500\nI0818 13:44:37.166446 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_1\nI0818 13:44:37.166457 22726 net.cpp:100] Creating Layer relu_bn_Conv16_1\nI0818 13:44:37.166463 22726 net.cpp:434] relu_bn_Conv16_1 <- bn_Conv16_1\nI0818 13:44:37.166471 22726 net.cpp:395] relu_bn_Conv16_1 -> bn_Conv16_1 (in-place)\nI0818 13:44:37.166479 22726 net.cpp:150] Setting up relu_bn_Conv16_1\nI0818 13:44:37.166486 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.166491 22726 net.cpp:165] Memory required for data: 83457500\nI0818 13:44:37.166496 22726 layer_factory.hpp:77] Creating layer Conv16_1_b\nI0818 13:44:37.166512 22726 net.cpp:100] Creating Layer Conv16_1_b\nI0818 13:44:37.166517 22726 net.cpp:434] Conv16_1_b <- bn_Conv16_1\nI0818 13:44:37.166528 22726 net.cpp:408] Conv16_1_b -> Conv16_1_b\nI0818 13:44:37.166843 22726 net.cpp:150] Setting up Conv16_1_b\nI0818 13:44:37.166858 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.166863 22726 net.cpp:165] Memory required for data: 91649500\nI0818 13:44:37.166872 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_1_b\nI0818 13:44:37.166885 22726 net.cpp:100] Creating Layer batchNorm_Conv16_1_b\nI0818 13:44:37.166891 22726 net.cpp:434] batchNorm_Conv16_1_b <- Conv16_1_b\nI0818 13:44:37.166899 22726 net.cpp:408] batchNorm_Conv16_1_b -> bn_Conv16_1_b\nI0818 13:44:37.167134 22726 net.cpp:150] Setting up batchNorm_Conv16_1_b\nI0818 13:44:37.167146 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.167151 22726 net.cpp:165] Memory required for data: 99841500\nI0818 13:44:37.167170 22726 layer_factory.hpp:77] Creating layer scale_Conv16_1_b\nI0818 13:44:37.167178 22726 net.cpp:100] Creating Layer scale_Conv16_1_b\nI0818 13:44:37.167184 22726 net.cpp:434] scale_Conv16_1_b <- bn_Conv16_1_b\nI0818 13:44:37.167194 22726 net.cpp:395] scale_Conv16_1_b -> bn_Conv16_1_b (in-place)\nI0818 13:44:37.167258 22726 layer_factory.hpp:77] Creating layer scale_Conv16_1_b\nI0818 13:44:37.167397 22726 net.cpp:150] Setting up scale_Conv16_1_b\nI0818 13:44:37.167409 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.167414 22726 net.cpp:165] Memory required for data: 108033500\nI0818 13:44:37.167423 22726 layer_factory.hpp:77] Creating layer sum_bn_conv\nI0818 13:44:37.167477 22726 net.cpp:100] Creating Layer sum_bn_conv\nI0818 13:44:37.167487 22726 net.cpp:434] sum_bn_conv <- bn_conv_relu_bn_conv_0_split_1\nI0818 13:44:37.167495 22726 net.cpp:434] sum_bn_conv <- bn_Conv16_1_b\nI0818 13:44:37.167502 22726 net.cpp:408] sum_bn_conv -> sum_bn_Conv16_1_b\nI0818 13:44:37.167575 22726 net.cpp:150] Setting up sum_bn_conv\nI0818 13:44:37.167590 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.167595 22726 net.cpp:165] Memory required for data: 116225500\nI0818 13:44:37.167601 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_1_b\nI0818 13:44:37.167609 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_1_b\nI0818 13:44:37.167614 22726 net.cpp:434] relu_sum_bn_Conv16_1_b <- sum_bn_Conv16_1_b\nI0818 13:44:37.167625 22726 net.cpp:395] relu_sum_bn_Conv16_1_b -> sum_bn_Conv16_1_b (in-place)\nI0818 13:44:37.167635 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_1_b\nI0818 13:44:37.167642 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.167647 22726 net.cpp:165] Memory required for data: 124417500\nI0818 13:44:37.167651 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split\nI0818 13:44:37.167668 22726 net.cpp:100] Creating Layer sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split\nI0818 13:44:37.167675 22726 net.cpp:434] sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split <- sum_bn_Conv16_1_b\nI0818 13:44:37.167681 22726 net.cpp:408] sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split -> sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split_0\nI0818 13:44:37.167690 22726 net.cpp:408] sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split -> sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split_1\nI0818 13:44:37.167737 22726 net.cpp:150] Setting up sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split\nI0818 13:44:37.167748 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.167755 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.167759 22726 net.cpp:165] Memory required for data: 140801500\nI0818 13:44:37.167764 22726 layer_factory.hpp:77] Creating layer Conv16_2\nI0818 13:44:37.167778 22726 net.cpp:100] Creating Layer Conv16_2\nI0818 13:44:37.167784 22726 net.cpp:434] Conv16_2 <- sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split_0\nI0818 13:44:37.167793 22726 net.cpp:408] Conv16_2 -> Conv16_2\nI0818 13:44:37.168107 22726 net.cpp:150] Setting up Conv16_2\nI0818 13:44:37.168120 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.168125 22726 net.cpp:165] Memory required for data: 148993500\nI0818 13:44:37.168134 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_2\nI0818 13:44:37.168148 22726 net.cpp:100] Creating Layer batchNorm_Conv16_2\nI0818 13:44:37.168154 22726 net.cpp:434] batchNorm_Conv16_2 <- Conv16_2\nI0818 13:44:37.168162 22726 net.cpp:408] batchNorm_Conv16_2 -> bn_Conv16_2\nI0818 13:44:37.168403 22726 net.cpp:150] Setting up batchNorm_Conv16_2\nI0818 13:44:37.168417 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.168421 22726 net.cpp:165] Memory required for data: 157185500\nI0818 13:44:37.168432 22726 layer_factory.hpp:77] Creating layer scale_Conv16_2\nI0818 13:44:37.168442 22726 net.cpp:100] Creating Layer scale_Conv16_2\nI0818 13:44:37.168447 22726 net.cpp:434] scale_Conv16_2 <- bn_Conv16_2\nI0818 13:44:37.168454 22726 net.cpp:395] scale_Conv16_2 -> bn_Conv16_2 (in-place)\nI0818 13:44:37.168509 22726 layer_factory.hpp:77] Creating layer scale_Conv16_2\nI0818 13:44:37.168649 22726 net.cpp:150] Setting up scale_Conv16_2\nI0818 13:44:37.168663 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.168668 22726 net.cpp:165] Memory required for data: 165377500\nI0818 13:44:37.168675 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_2\nI0818 13:44:37.168684 22726 net.cpp:100] Creating Layer relu_bn_Conv16_2\nI0818 13:44:37.168689 22726 net.cpp:434] relu_bn_Conv16_2 <- bn_Conv16_2\nI0818 13:44:37.168699 22726 net.cpp:395] relu_bn_Conv16_2 -> bn_Conv16_2 (in-place)\nI0818 13:44:37.168709 22726 net.cpp:150] Setting up relu_bn_Conv16_2\nI0818 13:44:37.168715 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.168720 22726 net.cpp:165] Memory required for data: 173569500\nI0818 13:44:37.168725 22726 layer_factory.hpp:77] Creating layer Conv16_2_b\nI0818 13:44:37.168741 22726 net.cpp:100] Creating Layer Conv16_2_b\nI0818 13:44:37.168747 22726 net.cpp:434] Conv16_2_b <- bn_Conv16_2\nI0818 13:44:37.168756 22726 net.cpp:408] Conv16_2_b -> Conv16_2_b\nI0818 13:44:37.169068 22726 net.cpp:150] Setting up Conv16_2_b\nI0818 13:44:37.169082 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.169087 22726 net.cpp:165] Memory required for data: 181761500\nI0818 13:44:37.169096 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_2_b\nI0818 13:44:37.169104 22726 net.cpp:100] Creating Layer batchNorm_Conv16_2_b\nI0818 13:44:37.169111 22726 net.cpp:434] batchNorm_Conv16_2_b <- Conv16_2_b\nI0818 13:44:37.169121 22726 net.cpp:408] batchNorm_Conv16_2_b -> bn_Conv16_2_b\nI0818 13:44:37.169358 22726 net.cpp:150] Setting up batchNorm_Conv16_2_b\nI0818 13:44:37.169373 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.169378 22726 net.cpp:165] Memory required for data: 189953500\nI0818 13:44:37.169400 22726 layer_factory.hpp:77] Creating layer scale_Conv16_2_b\nI0818 13:44:37.169409 22726 net.cpp:100] Creating Layer scale_Conv16_2_b\nI0818 13:44:37.169415 22726 net.cpp:434] scale_Conv16_2_b <- bn_Conv16_2_b\nI0818 13:44:37.169425 22726 net.cpp:395] scale_Conv16_2_b -> bn_Conv16_2_b (in-place)\nI0818 13:44:37.169479 22726 layer_factory.hpp:77] Creating layer scale_Conv16_2_b\nI0818 13:44:37.169618 22726 net.cpp:150] Setting up scale_Conv16_2_b\nI0818 13:44:37.169633 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.169638 22726 net.cpp:165] Memory required for data: 198145500\nI0818 13:44:37.169647 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_1_b\nI0818 13:44:37.169656 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_1_b\nI0818 13:44:37.169661 22726 net.cpp:434] sum_sum_bn_Conv16_1_b <- sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split_1\nI0818 13:44:37.169669 22726 net.cpp:434] sum_sum_bn_Conv16_1_b <- bn_Conv16_2_b\nI0818 13:44:37.169677 22726 net.cpp:408] sum_sum_bn_Conv16_1_b -> sum_bn_Conv16_2_b\nI0818 13:44:37.169710 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_1_b\nI0818 13:44:37.169719 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.169724 22726 net.cpp:165] Memory required for data: 206337500\nI0818 13:44:37.169729 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_2_b\nI0818 13:44:37.169737 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_2_b\nI0818 13:44:37.169742 22726 net.cpp:434] relu_sum_bn_Conv16_2_b <- sum_bn_Conv16_2_b\nI0818 13:44:37.169749 22726 net.cpp:395] relu_sum_bn_Conv16_2_b -> sum_bn_Conv16_2_b (in-place)\nI0818 13:44:37.169757 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_2_b\nI0818 13:44:37.169764 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.169770 22726 net.cpp:165] Memory required for data: 214529500\nI0818 13:44:37.169773 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split\nI0818 13:44:37.169780 22726 net.cpp:100] Creating Layer sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split\nI0818 13:44:37.169785 22726 net.cpp:434] sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split <- sum_bn_Conv16_2_b\nI0818 13:44:37.169795 22726 net.cpp:408] sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split -> sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split_0\nI0818 13:44:37.169806 22726 net.cpp:408] sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split -> sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split_1\nI0818 13:44:37.169853 22726 net.cpp:150] Setting up sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split\nI0818 13:44:37.169868 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.169875 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.169880 22726 net.cpp:165] Memory required for data: 230913500\nI0818 13:44:37.169885 22726 layer_factory.hpp:77] Creating layer Conv16_3\nI0818 13:44:37.169896 22726 net.cpp:100] Creating Layer Conv16_3\nI0818 13:44:37.169903 22726 net.cpp:434] Conv16_3 <- sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split_0\nI0818 13:44:37.169911 22726 net.cpp:408] Conv16_3 -> Conv16_3\nI0818 13:44:37.170218 22726 net.cpp:150] Setting up Conv16_3\nI0818 13:44:37.170231 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.170236 22726 net.cpp:165] Memory required for data: 239105500\nI0818 13:44:37.170245 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_3\nI0818 13:44:37.170253 22726 net.cpp:100] Creating Layer batchNorm_Conv16_3\nI0818 13:44:37.170262 22726 net.cpp:434] batchNorm_Conv16_3 <- Conv16_3\nI0818 13:44:37.170271 22726 net.cpp:408] batchNorm_Conv16_3 -> bn_Conv16_3\nI0818 13:44:37.170509 22726 net.cpp:150] Setting up batchNorm_Conv16_3\nI0818 13:44:37.170521 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.170526 22726 net.cpp:165] Memory required for data: 247297500\nI0818 13:44:37.170537 22726 layer_factory.hpp:77] Creating layer scale_Conv16_3\nI0818 13:44:37.170545 22726 net.cpp:100] Creating Layer scale_Conv16_3\nI0818 13:44:37.170552 22726 net.cpp:434] scale_Conv16_3 <- bn_Conv16_3\nI0818 13:44:37.170562 22726 net.cpp:395] scale_Conv16_3 -> bn_Conv16_3 (in-place)\nI0818 13:44:37.170621 22726 layer_factory.hpp:77] Creating layer scale_Conv16_3\nI0818 13:44:37.170759 22726 net.cpp:150] Setting up scale_Conv16_3\nI0818 13:44:37.170774 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.170779 22726 net.cpp:165] Memory required for data: 255489500\nI0818 13:44:37.170789 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_3\nI0818 13:44:37.170796 22726 net.cpp:100] Creating Layer relu_bn_Conv16_3\nI0818 13:44:37.170801 22726 net.cpp:434] relu_bn_Conv16_3 <- bn_Conv16_3\nI0818 13:44:37.170815 22726 net.cpp:395] relu_bn_Conv16_3 -> bn_Conv16_3 (in-place)\nI0818 13:44:37.170825 22726 net.cpp:150] Setting up relu_bn_Conv16_3\nI0818 13:44:37.170832 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.170837 22726 net.cpp:165] Memory required for data: 263681500\nI0818 13:44:37.170841 22726 layer_factory.hpp:77] Creating layer Conv16_3_b\nI0818 13:44:37.170855 22726 net.cpp:100] Creating Layer Conv16_3_b\nI0818 13:44:37.170861 22726 net.cpp:434] Conv16_3_b <- bn_Conv16_3\nI0818 13:44:37.170872 22726 net.cpp:408] Conv16_3_b -> Conv16_3_b\nI0818 13:44:37.171180 22726 net.cpp:150] Setting up Conv16_3_b\nI0818 13:44:37.171193 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.171198 22726 net.cpp:165] Memory required for data: 271873500\nI0818 13:44:37.171207 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_3_b\nI0818 13:44:37.171223 22726 net.cpp:100] Creating Layer batchNorm_Conv16_3_b\nI0818 13:44:37.171229 22726 net.cpp:434] batchNorm_Conv16_3_b <- Conv16_3_b\nI0818 13:44:37.171238 22726 net.cpp:408] batchNorm_Conv16_3_b -> bn_Conv16_3_b\nI0818 13:44:37.171471 22726 net.cpp:150] Setting up batchNorm_Conv16_3_b\nI0818 13:44:37.171483 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.171489 22726 net.cpp:165] Memory required for data: 280065500\nI0818 13:44:37.171499 22726 layer_factory.hpp:77] Creating layer scale_Conv16_3_b\nI0818 13:44:37.171507 22726 net.cpp:100] Creating Layer scale_Conv16_3_b\nI0818 13:44:37.171514 22726 net.cpp:434] scale_Conv16_3_b <- bn_Conv16_3_b\nI0818 13:44:37.171524 22726 net.cpp:395] scale_Conv16_3_b -> bn_Conv16_3_b (in-place)\nI0818 13:44:37.171576 22726 layer_factory.hpp:77] Creating layer scale_Conv16_3_b\nI0818 13:44:37.171715 22726 net.cpp:150] Setting up scale_Conv16_3_b\nI0818 13:44:37.171730 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.171735 22726 net.cpp:165] Memory required for data: 288257500\nI0818 13:44:37.171742 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_2_b\nI0818 13:44:37.171751 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_2_b\nI0818 13:44:37.171757 22726 net.cpp:434] sum_sum_bn_Conv16_2_b <- sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split_1\nI0818 13:44:37.171764 22726 net.cpp:434] sum_sum_bn_Conv16_2_b <- bn_Conv16_3_b\nI0818 13:44:37.171772 22726 net.cpp:408] sum_sum_bn_Conv16_2_b -> sum_bn_Conv16_3_b\nI0818 13:44:37.171805 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_2_b\nI0818 13:44:37.171828 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.171833 22726 net.cpp:165] Memory required for data: 296449500\nI0818 13:44:37.171838 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_3_b\nI0818 13:44:37.171849 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_3_b\nI0818 13:44:37.171855 22726 net.cpp:434] relu_sum_bn_Conv16_3_b <- sum_bn_Conv16_3_b\nI0818 13:44:37.171862 22726 net.cpp:395] relu_sum_bn_Conv16_3_b -> sum_bn_Conv16_3_b (in-place)\nI0818 13:44:37.171871 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_3_b\nI0818 13:44:37.171878 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.171882 22726 net.cpp:165] Memory required for data: 304641500\nI0818 13:44:37.171887 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split\nI0818 13:44:37.171905 22726 net.cpp:100] Creating Layer sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split\nI0818 13:44:37.171911 22726 net.cpp:434] sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split <- sum_bn_Conv16_3_b\nI0818 13:44:37.171924 22726 net.cpp:408] sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split -> sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split_0\nI0818 13:44:37.171933 22726 net.cpp:408] sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split -> sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split_1\nI0818 13:44:37.171977 22726 net.cpp:150] Setting up sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split\nI0818 13:44:37.171995 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.172003 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.172008 22726 net.cpp:165] Memory required for data: 321025500\nI0818 13:44:37.172013 22726 layer_factory.hpp:77] Creating layer Conv16_4\nI0818 13:44:37.172022 22726 net.cpp:100] Creating Layer Conv16_4\nI0818 13:44:37.172029 22726 net.cpp:434] Conv16_4 <- sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split_0\nI0818 13:44:37.172037 22726 net.cpp:408] Conv16_4 -> Conv16_4\nI0818 13:44:37.172354 22726 net.cpp:150] Setting up Conv16_4\nI0818 13:44:37.172369 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.172374 22726 net.cpp:165] Memory required for data: 329217500\nI0818 13:44:37.172382 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_4\nI0818 13:44:37.172394 22726 net.cpp:100] Creating Layer batchNorm_Conv16_4\nI0818 13:44:37.172399 22726 net.cpp:434] batchNorm_Conv16_4 <- Conv16_4\nI0818 13:44:37.172407 22726 net.cpp:408] batchNorm_Conv16_4 -> bn_Conv16_4\nI0818 13:44:37.172643 22726 net.cpp:150] Setting up batchNorm_Conv16_4\nI0818 13:44:37.172657 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.172662 22726 net.cpp:165] Memory required for data: 337409500\nI0818 13:44:37.172672 22726 layer_factory.hpp:77] Creating layer scale_Conv16_4\nI0818 13:44:37.172679 22726 net.cpp:100] Creating Layer scale_Conv16_4\nI0818 13:44:37.172685 22726 net.cpp:434] scale_Conv16_4 <- bn_Conv16_4\nI0818 13:44:37.172696 22726 net.cpp:395] scale_Conv16_4 -> bn_Conv16_4 (in-place)\nI0818 13:44:37.172749 22726 layer_factory.hpp:77] Creating layer scale_Conv16_4\nI0818 13:44:37.172897 22726 net.cpp:150] Setting up scale_Conv16_4\nI0818 13:44:37.172912 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.172917 22726 net.cpp:165] Memory required for data: 345601500\nI0818 13:44:37.172926 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_4\nI0818 13:44:37.172935 22726 net.cpp:100] Creating Layer relu_bn_Conv16_4\nI0818 13:44:37.172940 22726 net.cpp:434] relu_bn_Conv16_4 <- bn_Conv16_4\nI0818 13:44:37.172947 22726 net.cpp:395] relu_bn_Conv16_4 -> bn_Conv16_4 (in-place)\nI0818 13:44:37.172956 22726 net.cpp:150] Setting up relu_bn_Conv16_4\nI0818 13:44:37.172963 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.172967 22726 net.cpp:165] Memory required for data: 353793500\nI0818 13:44:37.172972 22726 layer_factory.hpp:77] Creating layer Conv16_4_b\nI0818 13:44:37.172986 22726 net.cpp:100] Creating Layer Conv16_4_b\nI0818 13:44:37.172991 22726 net.cpp:434] Conv16_4_b <- bn_Conv16_4\nI0818 13:44:37.173003 22726 net.cpp:408] Conv16_4_b -> Conv16_4_b\nI0818 13:44:37.173317 22726 net.cpp:150] Setting up Conv16_4_b\nI0818 13:44:37.173331 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.173336 22726 net.cpp:165] Memory required for data: 361985500\nI0818 13:44:37.173344 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_4_b\nI0818 13:44:37.173355 22726 net.cpp:100] Creating Layer batchNorm_Conv16_4_b\nI0818 13:44:37.173362 22726 net.cpp:434] batchNorm_Conv16_4_b <- Conv16_4_b\nI0818 13:44:37.173372 22726 net.cpp:408] batchNorm_Conv16_4_b -> bn_Conv16_4_b\nI0818 13:44:37.173610 22726 net.cpp:150] Setting up batchNorm_Conv16_4_b\nI0818 13:44:37.173624 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.173629 22726 net.cpp:165] Memory required for data: 370177500\nI0818 13:44:37.173640 22726 layer_factory.hpp:77] Creating layer scale_Conv16_4_b\nI0818 13:44:37.173647 22726 net.cpp:100] Creating Layer scale_Conv16_4_b\nI0818 13:44:37.173653 22726 net.cpp:434] scale_Conv16_4_b <- bn_Conv16_4_b\nI0818 13:44:37.173671 22726 net.cpp:395] scale_Conv16_4_b -> bn_Conv16_4_b (in-place)\nI0818 13:44:37.173724 22726 layer_factory.hpp:77] Creating layer scale_Conv16_4_b\nI0818 13:44:37.173872 22726 net.cpp:150] Setting up scale_Conv16_4_b\nI0818 13:44:37.173887 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.173892 22726 net.cpp:165] Memory required for data: 378369500\nI0818 13:44:37.173900 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_3_b\nI0818 13:44:37.173909 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_3_b\nI0818 13:44:37.173914 22726 net.cpp:434] sum_sum_bn_Conv16_3_b <- sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split_1\nI0818 13:44:37.173921 22726 net.cpp:434] sum_sum_bn_Conv16_3_b <- bn_Conv16_4_b\nI0818 13:44:37.173933 22726 net.cpp:408] sum_sum_bn_Conv16_3_b -> sum_bn_Conv16_4_b\nI0818 13:44:37.173964 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_3_b\nI0818 13:44:37.173976 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.173981 22726 net.cpp:165] Memory required for data: 386561500\nI0818 13:44:37.173986 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_4_b\nI0818 13:44:37.173993 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_4_b\nI0818 13:44:37.174000 22726 net.cpp:434] relu_sum_bn_Conv16_4_b <- sum_bn_Conv16_4_b\nI0818 13:44:37.174006 22726 net.cpp:395] relu_sum_bn_Conv16_4_b -> sum_bn_Conv16_4_b (in-place)\nI0818 13:44:37.174015 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_4_b\nI0818 13:44:37.174021 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.174026 22726 net.cpp:165] Memory required for data: 394753500\nI0818 13:44:37.174031 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split\nI0818 13:44:37.174042 22726 net.cpp:100] Creating Layer sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split\nI0818 13:44:37.174047 22726 net.cpp:434] sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split <- sum_bn_Conv16_4_b\nI0818 13:44:37.174054 22726 net.cpp:408] sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split -> sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split_0\nI0818 13:44:37.174064 22726 net.cpp:408] sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split -> sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split_1\nI0818 13:44:37.174105 22726 net.cpp:150] Setting up sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split\nI0818 13:44:37.174120 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.174126 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.174131 22726 net.cpp:165] Memory required for data: 411137500\nI0818 13:44:37.174136 22726 layer_factory.hpp:77] Creating layer Conv16_5\nI0818 13:44:37.174147 22726 net.cpp:100] Creating Layer Conv16_5\nI0818 13:44:37.174152 22726 net.cpp:434] Conv16_5 <- sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split_0\nI0818 13:44:37.174161 22726 net.cpp:408] Conv16_5 -> Conv16_5\nI0818 13:44:37.174474 22726 net.cpp:150] Setting up Conv16_5\nI0818 13:44:37.174487 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.174492 22726 net.cpp:165] Memory required for data: 419329500\nI0818 13:44:37.174515 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_5\nI0818 13:44:37.174525 22726 net.cpp:100] Creating Layer batchNorm_Conv16_5\nI0818 13:44:37.174531 22726 net.cpp:434] batchNorm_Conv16_5 <- Conv16_5\nI0818 13:44:37.174540 22726 net.cpp:408] batchNorm_Conv16_5 -> bn_Conv16_5\nI0818 13:44:37.174787 22726 net.cpp:150] Setting up batchNorm_Conv16_5\nI0818 13:44:37.174799 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.174804 22726 net.cpp:165] Memory required for data: 427521500\nI0818 13:44:37.174821 22726 layer_factory.hpp:77] Creating layer scale_Conv16_5\nI0818 13:44:37.174830 22726 net.cpp:100] Creating Layer scale_Conv16_5\nI0818 13:44:37.174835 22726 net.cpp:434] scale_Conv16_5 <- bn_Conv16_5\nI0818 13:44:37.174846 22726 net.cpp:395] scale_Conv16_5 -> bn_Conv16_5 (in-place)\nI0818 13:44:37.174901 22726 layer_factory.hpp:77] Creating layer scale_Conv16_5\nI0818 13:44:37.175040 22726 net.cpp:150] Setting up scale_Conv16_5\nI0818 13:44:37.175055 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.175066 22726 net.cpp:165] Memory required for data: 435713500\nI0818 13:44:37.175076 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_5\nI0818 13:44:37.175083 22726 net.cpp:100] Creating Layer relu_bn_Conv16_5\nI0818 13:44:37.175089 22726 net.cpp:434] relu_bn_Conv16_5 <- bn_Conv16_5\nI0818 13:44:37.175096 22726 net.cpp:395] relu_bn_Conv16_5 -> bn_Conv16_5 (in-place)\nI0818 13:44:37.175106 22726 net.cpp:150] Setting up relu_bn_Conv16_5\nI0818 13:44:37.175112 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.175117 22726 net.cpp:165] Memory required for data: 443905500\nI0818 13:44:37.175122 22726 layer_factory.hpp:77] Creating layer Conv16_5_b\nI0818 13:44:37.175135 22726 net.cpp:100] Creating Layer Conv16_5_b\nI0818 13:44:37.175142 22726 net.cpp:434] Conv16_5_b <- bn_Conv16_5\nI0818 13:44:37.175153 22726 net.cpp:408] Conv16_5_b -> Conv16_5_b\nI0818 13:44:37.175465 22726 net.cpp:150] Setting up Conv16_5_b\nI0818 13:44:37.175479 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.175484 22726 net.cpp:165] Memory required for data: 452097500\nI0818 13:44:37.175493 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_5_b\nI0818 13:44:37.175508 22726 net.cpp:100] Creating Layer batchNorm_Conv16_5_b\nI0818 13:44:37.175514 22726 net.cpp:434] batchNorm_Conv16_5_b <- Conv16_5_b\nI0818 13:44:37.175524 22726 net.cpp:408] batchNorm_Conv16_5_b -> bn_Conv16_5_b\nI0818 13:44:37.175760 22726 net.cpp:150] Setting up batchNorm_Conv16_5_b\nI0818 13:44:37.175772 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.175777 22726 net.cpp:165] Memory required for data: 460289500\nI0818 13:44:37.175788 22726 layer_factory.hpp:77] Creating layer scale_Conv16_5_b\nI0818 13:44:37.175796 22726 net.cpp:100] Creating Layer scale_Conv16_5_b\nI0818 13:44:37.175802 22726 net.cpp:434] scale_Conv16_5_b <- bn_Conv16_5_b\nI0818 13:44:37.175815 22726 net.cpp:395] scale_Conv16_5_b -> bn_Conv16_5_b (in-place)\nI0818 13:44:37.175873 22726 layer_factory.hpp:77] Creating layer scale_Conv16_5_b\nI0818 13:44:37.176013 22726 net.cpp:150] Setting up scale_Conv16_5_b\nI0818 13:44:37.176026 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.176031 22726 net.cpp:165] Memory required for data: 468481500\nI0818 13:44:37.176040 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_4_b\nI0818 13:44:37.176054 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_4_b\nI0818 13:44:37.176060 22726 net.cpp:434] sum_sum_bn_Conv16_4_b <- sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split_1\nI0818 13:44:37.176067 22726 net.cpp:434] sum_sum_bn_Conv16_4_b <- bn_Conv16_5_b\nI0818 13:44:37.176075 22726 net.cpp:408] sum_sum_bn_Conv16_4_b -> sum_bn_Conv16_5_b\nI0818 13:44:37.176110 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_4_b\nI0818 13:44:37.176120 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.176126 22726 net.cpp:165] Memory required for data: 476673500\nI0818 13:44:37.176131 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_5_b\nI0818 13:44:37.176138 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_5_b\nI0818 13:44:37.176143 22726 net.cpp:434] relu_sum_bn_Conv16_5_b <- sum_bn_Conv16_5_b\nI0818 13:44:37.176153 22726 net.cpp:395] relu_sum_bn_Conv16_5_b -> sum_bn_Conv16_5_b (in-place)\nI0818 13:44:37.176162 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_5_b\nI0818 13:44:37.176169 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.176173 22726 net.cpp:165] Memory required for data: 484865500\nI0818 13:44:37.176178 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split\nI0818 13:44:37.176184 22726 net.cpp:100] Creating Layer sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split\nI0818 13:44:37.176190 22726 net.cpp:434] sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split <- sum_bn_Conv16_5_b\nI0818 13:44:37.176199 22726 net.cpp:408] sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split -> sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split_0\nI0818 13:44:37.176209 22726 net.cpp:408] sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split -> sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split_1\nI0818 13:44:37.176259 22726 net.cpp:150] Setting up sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split\nI0818 13:44:37.176270 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.176275 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.176280 22726 net.cpp:165] Memory required for data: 501249500\nI0818 13:44:37.176285 22726 layer_factory.hpp:77] Creating layer Conv16_6\nI0818 13:44:37.176300 22726 net.cpp:100] Creating Layer Conv16_6\nI0818 13:44:37.176306 22726 net.cpp:434] Conv16_6 <- sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split_0\nI0818 13:44:37.176316 22726 net.cpp:408] Conv16_6 -> Conv16_6\nI0818 13:44:37.176626 22726 net.cpp:150] Setting up Conv16_6\nI0818 13:44:37.176640 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.176645 22726 net.cpp:165] Memory required for data: 509441500\nI0818 13:44:37.176653 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_6\nI0818 13:44:37.176664 22726 net.cpp:100] Creating Layer batchNorm_Conv16_6\nI0818 13:44:37.176671 22726 net.cpp:434] batchNorm_Conv16_6 <- Conv16_6\nI0818 13:44:37.176681 22726 net.cpp:408] batchNorm_Conv16_6 -> bn_Conv16_6\nI0818 13:44:37.176928 22726 net.cpp:150] Setting up batchNorm_Conv16_6\nI0818 13:44:37.176940 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.176945 22726 net.cpp:165] Memory required for data: 517633500\nI0818 13:44:37.176955 22726 layer_factory.hpp:77] Creating layer scale_Conv16_6\nI0818 13:44:37.176964 22726 net.cpp:100] Creating Layer scale_Conv16_6\nI0818 13:44:37.176970 22726 net.cpp:434] scale_Conv16_6 <- bn_Conv16_6\nI0818 13:44:37.176977 22726 net.cpp:395] scale_Conv16_6 -> bn_Conv16_6 (in-place)\nI0818 13:44:37.177032 22726 layer_factory.hpp:77] Creating layer scale_Conv16_6\nI0818 13:44:37.177175 22726 net.cpp:150] Setting up scale_Conv16_6\nI0818 13:44:37.177187 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.177192 22726 net.cpp:165] Memory required for data: 525825500\nI0818 13:44:37.177201 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_6\nI0818 13:44:37.177211 22726 net.cpp:100] Creating Layer relu_bn_Conv16_6\nI0818 13:44:37.177217 22726 net.cpp:434] relu_bn_Conv16_6 <- bn_Conv16_6\nI0818 13:44:37.177225 22726 net.cpp:395] relu_bn_Conv16_6 -> bn_Conv16_6 (in-place)\nI0818 13:44:37.177234 22726 net.cpp:150] Setting up relu_bn_Conv16_6\nI0818 13:44:37.177240 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.177245 22726 net.cpp:165] Memory required for data: 534017500\nI0818 13:44:37.177249 22726 layer_factory.hpp:77] Creating layer Conv16_6_b\nI0818 13:44:37.177263 22726 net.cpp:100] Creating Layer Conv16_6_b\nI0818 13:44:37.177269 22726 net.cpp:434] Conv16_6_b <- bn_Conv16_6\nI0818 13:44:37.177280 22726 net.cpp:408] Conv16_6_b -> Conv16_6_b\nI0818 13:44:37.177601 22726 net.cpp:150] Setting up Conv16_6_b\nI0818 13:44:37.177614 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.177619 22726 net.cpp:165] Memory required for data: 542209500\nI0818 13:44:37.177628 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_6_b\nI0818 13:44:37.177639 22726 net.cpp:100] Creating Layer batchNorm_Conv16_6_b\nI0818 13:44:37.177645 22726 net.cpp:434] batchNorm_Conv16_6_b <- Conv16_6_b\nI0818 13:44:37.177654 22726 net.cpp:408] batchNorm_Conv16_6_b -> bn_Conv16_6_b\nI0818 13:44:37.177902 22726 net.cpp:150] Setting up batchNorm_Conv16_6_b\nI0818 13:44:37.177916 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.177920 22726 net.cpp:165] Memory required for data: 550401500\nI0818 13:44:37.177930 22726 layer_factory.hpp:77] Creating layer scale_Conv16_6_b\nI0818 13:44:37.177939 22726 net.cpp:100] Creating Layer scale_Conv16_6_b\nI0818 13:44:37.177945 22726 net.cpp:434] scale_Conv16_6_b <- bn_Conv16_6_b\nI0818 13:44:37.177953 22726 net.cpp:395] scale_Conv16_6_b -> bn_Conv16_6_b (in-place)\nI0818 13:44:37.178007 22726 layer_factory.hpp:77] Creating layer scale_Conv16_6_b\nI0818 13:44:37.178149 22726 net.cpp:150] Setting up scale_Conv16_6_b\nI0818 13:44:37.178169 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.178174 22726 net.cpp:165] Memory required for data: 558593500\nI0818 13:44:37.178182 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_5_b\nI0818 13:44:37.178200 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_5_b\nI0818 13:44:37.178206 22726 net.cpp:434] sum_sum_bn_Conv16_5_b <- sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split_1\nI0818 13:44:37.178213 22726 net.cpp:434] sum_sum_bn_Conv16_5_b <- bn_Conv16_6_b\nI0818 13:44:37.178223 22726 net.cpp:408] sum_sum_bn_Conv16_5_b -> sum_bn_Conv16_6_b\nI0818 13:44:37.178256 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_5_b\nI0818 13:44:37.178267 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.178272 22726 net.cpp:165] Memory required for data: 566785500\nI0818 13:44:37.178277 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_6_b\nI0818 13:44:37.178285 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_6_b\nI0818 13:44:37.178290 22726 net.cpp:434] relu_sum_bn_Conv16_6_b <- sum_bn_Conv16_6_b\nI0818 13:44:37.178297 22726 net.cpp:395] relu_sum_bn_Conv16_6_b -> sum_bn_Conv16_6_b (in-place)\nI0818 13:44:37.178306 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_6_b\nI0818 13:44:37.178313 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.178318 22726 net.cpp:165] Memory required for data: 574977500\nI0818 13:44:37.178323 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split\nI0818 13:44:37.178333 22726 net.cpp:100] Creating Layer sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split\nI0818 13:44:37.178339 22726 net.cpp:434] sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split <- sum_bn_Conv16_6_b\nI0818 13:44:37.178345 22726 net.cpp:408] sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split -> sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split_0\nI0818 13:44:37.178354 22726 net.cpp:408] sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split -> sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split_1\nI0818 13:44:37.178397 22726 net.cpp:150] Setting up sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split\nI0818 13:44:37.178411 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.178418 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.178423 22726 net.cpp:165] Memory required for data: 591361500\nI0818 13:44:37.178428 22726 layer_factory.hpp:77] Creating layer Conv16_7\nI0818 13:44:37.178439 22726 net.cpp:100] Creating Layer Conv16_7\nI0818 13:44:37.178445 22726 net.cpp:434] Conv16_7 <- sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split_0\nI0818 13:44:37.178454 22726 net.cpp:408] Conv16_7 -> Conv16_7\nI0818 13:44:37.178768 22726 net.cpp:150] Setting up Conv16_7\nI0818 13:44:37.178781 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.178786 22726 net.cpp:165] Memory required for data: 599553500\nI0818 13:44:37.178795 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_7\nI0818 13:44:37.178807 22726 net.cpp:100] Creating Layer batchNorm_Conv16_7\nI0818 13:44:37.178818 22726 net.cpp:434] batchNorm_Conv16_7 <- Conv16_7\nI0818 13:44:37.178828 22726 net.cpp:408] batchNorm_Conv16_7 -> bn_Conv16_7\nI0818 13:44:37.179083 22726 net.cpp:150] Setting up batchNorm_Conv16_7\nI0818 13:44:37.179096 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.179101 22726 net.cpp:165] Memory required for data: 607745500\nI0818 13:44:37.179111 22726 layer_factory.hpp:77] Creating layer scale_Conv16_7\nI0818 13:44:37.179121 22726 net.cpp:100] Creating Layer scale_Conv16_7\nI0818 13:44:37.179126 22726 net.cpp:434] scale_Conv16_7 <- bn_Conv16_7\nI0818 13:44:37.179136 22726 net.cpp:395] scale_Conv16_7 -> bn_Conv16_7 (in-place)\nI0818 13:44:37.179190 22726 layer_factory.hpp:77] Creating layer scale_Conv16_7\nI0818 13:44:37.179332 22726 net.cpp:150] Setting up scale_Conv16_7\nI0818 13:44:37.179345 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.179349 22726 net.cpp:165] Memory required for data: 615937500\nI0818 13:44:37.179358 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_7\nI0818 13:44:37.179368 22726 net.cpp:100] Creating Layer relu_bn_Conv16_7\nI0818 13:44:37.179381 22726 net.cpp:434] relu_bn_Conv16_7 <- bn_Conv16_7\nI0818 13:44:37.179389 22726 net.cpp:395] relu_bn_Conv16_7 -> bn_Conv16_7 (in-place)\nI0818 13:44:37.179399 22726 net.cpp:150] Setting up relu_bn_Conv16_7\nI0818 13:44:37.179405 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.179409 22726 net.cpp:165] Memory required for data: 624129500\nI0818 13:44:37.179414 22726 layer_factory.hpp:77] Creating layer Conv16_7_b\nI0818 13:44:37.179428 22726 net.cpp:100] Creating Layer Conv16_7_b\nI0818 13:44:37.179435 22726 net.cpp:434] Conv16_7_b <- bn_Conv16_7\nI0818 13:44:37.179445 22726 net.cpp:408] Conv16_7_b -> Conv16_7_b\nI0818 13:44:37.179761 22726 net.cpp:150] Setting up Conv16_7_b\nI0818 13:44:37.179775 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.179780 22726 net.cpp:165] Memory required for data: 632321500\nI0818 13:44:37.179788 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_7_b\nI0818 13:44:37.179800 22726 net.cpp:100] Creating Layer batchNorm_Conv16_7_b\nI0818 13:44:37.179806 22726 net.cpp:434] batchNorm_Conv16_7_b <- Conv16_7_b\nI0818 13:44:37.179824 22726 net.cpp:408] batchNorm_Conv16_7_b -> bn_Conv16_7_b\nI0818 13:44:37.180066 22726 net.cpp:150] Setting up batchNorm_Conv16_7_b\nI0818 13:44:37.180079 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.180083 22726 net.cpp:165] Memory required for data: 640513500\nI0818 13:44:37.180094 22726 layer_factory.hpp:77] Creating layer scale_Conv16_7_b\nI0818 13:44:37.180102 22726 net.cpp:100] Creating Layer scale_Conv16_7_b\nI0818 13:44:37.180109 22726 net.cpp:434] scale_Conv16_7_b <- bn_Conv16_7_b\nI0818 13:44:37.180116 22726 net.cpp:395] scale_Conv16_7_b -> bn_Conv16_7_b (in-place)\nI0818 13:44:37.180171 22726 layer_factory.hpp:77] Creating layer scale_Conv16_7_b\nI0818 13:44:37.180311 22726 net.cpp:150] Setting up scale_Conv16_7_b\nI0818 13:44:37.180323 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.180328 22726 net.cpp:165] Memory required for data: 648705500\nI0818 13:44:37.180337 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_6_b\nI0818 13:44:37.180349 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_6_b\nI0818 13:44:37.180356 22726 net.cpp:434] sum_sum_bn_Conv16_6_b <- sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split_1\nI0818 13:44:37.180363 22726 net.cpp:434] sum_sum_bn_Conv16_6_b <- bn_Conv16_7_b\nI0818 13:44:37.180371 22726 net.cpp:408] sum_sum_bn_Conv16_6_b -> sum_bn_Conv16_7_b\nI0818 13:44:37.180408 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_6_b\nI0818 13:44:37.180419 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.180424 22726 net.cpp:165] Memory required for data: 656897500\nI0818 13:44:37.180429 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_7_b\nI0818 13:44:37.180438 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_7_b\nI0818 13:44:37.180443 22726 net.cpp:434] relu_sum_bn_Conv16_7_b <- sum_bn_Conv16_7_b\nI0818 13:44:37.180452 22726 net.cpp:395] relu_sum_bn_Conv16_7_b -> sum_bn_Conv16_7_b (in-place)\nI0818 13:44:37.180462 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_7_b\nI0818 13:44:37.180469 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.180474 22726 net.cpp:165] Memory required for data: 665089500\nI0818 13:44:37.180477 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split\nI0818 13:44:37.180485 22726 net.cpp:100] Creating Layer sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split\nI0818 13:44:37.180490 22726 net.cpp:434] sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split <- sum_bn_Conv16_7_b\nI0818 13:44:37.180500 22726 net.cpp:408] sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split -> sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split_0\nI0818 13:44:37.180510 22726 net.cpp:408] sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split -> sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split_1\nI0818 13:44:37.180552 22726 net.cpp:150] Setting up sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split\nI0818 13:44:37.180563 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.180577 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.180582 22726 net.cpp:165] Memory required for data: 681473500\nI0818 13:44:37.180586 22726 layer_factory.hpp:77] Creating layer Conv16_8\nI0818 13:44:37.180600 22726 net.cpp:100] Creating Layer Conv16_8\nI0818 13:44:37.180606 22726 net.cpp:434] Conv16_8 <- sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split_0\nI0818 13:44:37.180616 22726 net.cpp:408] Conv16_8 -> Conv16_8\nI0818 13:44:37.180948 22726 net.cpp:150] Setting up Conv16_8\nI0818 13:44:37.180963 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.180968 22726 net.cpp:165] Memory required for data: 689665500\nI0818 13:44:37.180976 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_8\nI0818 13:44:37.180985 22726 net.cpp:100] Creating Layer batchNorm_Conv16_8\nI0818 13:44:37.180991 22726 net.cpp:434] batchNorm_Conv16_8 <- Conv16_8\nI0818 13:44:37.181005 22726 net.cpp:408] batchNorm_Conv16_8 -> bn_Conv16_8\nI0818 13:44:37.181249 22726 net.cpp:150] Setting up batchNorm_Conv16_8\nI0818 13:44:37.181262 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.181267 22726 net.cpp:165] Memory required for data: 697857500\nI0818 13:44:37.181277 22726 layer_factory.hpp:77] Creating layer scale_Conv16_8\nI0818 13:44:37.181287 22726 net.cpp:100] Creating Layer scale_Conv16_8\nI0818 13:44:37.181291 22726 net.cpp:434] scale_Conv16_8 <- bn_Conv16_8\nI0818 13:44:37.181303 22726 net.cpp:395] scale_Conv16_8 -> bn_Conv16_8 (in-place)\nI0818 13:44:37.181355 22726 layer_factory.hpp:77] Creating layer scale_Conv16_8\nI0818 13:44:37.181494 22726 net.cpp:150] Setting up scale_Conv16_8\nI0818 13:44:37.181509 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.181514 22726 net.cpp:165] Memory required for data: 706049500\nI0818 13:44:37.181524 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_8\nI0818 13:44:37.181531 22726 net.cpp:100] Creating Layer relu_bn_Conv16_8\nI0818 13:44:37.181537 22726 net.cpp:434] relu_bn_Conv16_8 <- bn_Conv16_8\nI0818 13:44:37.181545 22726 net.cpp:395] relu_bn_Conv16_8 -> bn_Conv16_8 (in-place)\nI0818 13:44:37.181553 22726 net.cpp:150] Setting up relu_bn_Conv16_8\nI0818 13:44:37.181560 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.181565 22726 net.cpp:165] Memory required for data: 714241500\nI0818 13:44:37.181569 22726 layer_factory.hpp:77] Creating layer Conv16_8_b\nI0818 13:44:37.181584 22726 net.cpp:100] Creating Layer Conv16_8_b\nI0818 13:44:37.181591 22726 net.cpp:434] Conv16_8_b <- bn_Conv16_8\nI0818 13:44:37.181602 22726 net.cpp:408] Conv16_8_b -> Conv16_8_b\nI0818 13:44:37.181929 22726 net.cpp:150] Setting up Conv16_8_b\nI0818 13:44:37.181943 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.181948 22726 net.cpp:165] Memory required for data: 722433500\nI0818 13:44:37.181957 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_8_b\nI0818 13:44:37.181968 22726 net.cpp:100] Creating Layer batchNorm_Conv16_8_b\nI0818 13:44:37.181974 22726 net.cpp:434] batchNorm_Conv16_8_b <- Conv16_8_b\nI0818 13:44:37.181983 22726 net.cpp:408] batchNorm_Conv16_8_b -> bn_Conv16_8_b\nI0818 13:44:37.182268 22726 net.cpp:150] Setting up batchNorm_Conv16_8_b\nI0818 13:44:37.182282 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.182287 22726 net.cpp:165] Memory required for data: 730625500\nI0818 13:44:37.182297 22726 layer_factory.hpp:77] Creating layer scale_Conv16_8_b\nI0818 13:44:37.182307 22726 net.cpp:100] Creating Layer scale_Conv16_8_b\nI0818 13:44:37.182312 22726 net.cpp:434] scale_Conv16_8_b <- bn_Conv16_8_b\nI0818 13:44:37.182319 22726 net.cpp:395] scale_Conv16_8_b -> bn_Conv16_8_b (in-place)\nI0818 13:44:37.182382 22726 layer_factory.hpp:77] Creating layer scale_Conv16_8_b\nI0818 13:44:37.182524 22726 net.cpp:150] Setting up scale_Conv16_8_b\nI0818 13:44:37.182538 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.182543 22726 net.cpp:165] Memory required for data: 738817500\nI0818 13:44:37.182551 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_7_b\nI0818 13:44:37.182567 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_7_b\nI0818 13:44:37.182576 22726 net.cpp:434] sum_sum_bn_Conv16_7_b <- sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split_1\nI0818 13:44:37.182585 22726 net.cpp:434] sum_sum_bn_Conv16_7_b <- bn_Conv16_8_b\nI0818 13:44:37.182591 22726 net.cpp:408] sum_sum_bn_Conv16_7_b -> sum_bn_Conv16_8_b\nI0818 13:44:37.182624 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_7_b\nI0818 13:44:37.182636 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.182641 22726 net.cpp:165] Memory required for data: 747009500\nI0818 13:44:37.182646 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_8_b\nI0818 13:44:37.182657 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_8_b\nI0818 13:44:37.182662 22726 net.cpp:434] relu_sum_bn_Conv16_8_b <- sum_bn_Conv16_8_b\nI0818 13:44:37.182672 22726 net.cpp:395] relu_sum_bn_Conv16_8_b -> sum_bn_Conv16_8_b (in-place)\nI0818 13:44:37.182682 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_8_b\nI0818 13:44:37.182688 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.182693 22726 net.cpp:165] Memory required for data: 755201500\nI0818 13:44:37.182698 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split\nI0818 13:44:37.182704 22726 net.cpp:100] Creating Layer sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split\nI0818 13:44:37.182709 22726 net.cpp:434] sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split <- sum_bn_Conv16_8_b\nI0818 13:44:37.182716 22726 net.cpp:408] sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split -> sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split_0\nI0818 13:44:37.182725 22726 net.cpp:408] sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split -> sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split_1\nI0818 13:44:37.182773 22726 net.cpp:150] Setting up sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split\nI0818 13:44:37.182785 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.182790 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.182796 22726 net.cpp:165] Memory required for data: 771585500\nI0818 13:44:37.182801 22726 layer_factory.hpp:77] Creating layer Conv16_9\nI0818 13:44:37.182821 22726 net.cpp:100] Creating Layer Conv16_9\nI0818 13:44:37.182827 22726 net.cpp:434] Conv16_9 <- sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split_0\nI0818 13:44:37.182837 22726 net.cpp:408] Conv16_9 -> Conv16_9\nI0818 13:44:37.183164 22726 net.cpp:150] Setting up Conv16_9\nI0818 13:44:37.183182 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.183187 22726 net.cpp:165] Memory required for data: 779777500\nI0818 13:44:37.183195 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_9\nI0818 13:44:37.183205 22726 net.cpp:100] Creating Layer batchNorm_Conv16_9\nI0818 13:44:37.183210 22726 net.cpp:434] batchNorm_Conv16_9 <- Conv16_9\nI0818 13:44:37.183219 22726 net.cpp:408] batchNorm_Conv16_9 -> bn_Conv16_9\nI0818 13:44:37.183465 22726 net.cpp:150] Setting up batchNorm_Conv16_9\nI0818 13:44:37.183478 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.183482 22726 net.cpp:165] Memory required for data: 787969500\nI0818 13:44:37.183493 22726 layer_factory.hpp:77] Creating layer scale_Conv16_9\nI0818 13:44:37.183507 22726 net.cpp:100] Creating Layer scale_Conv16_9\nI0818 13:44:37.183513 22726 net.cpp:434] scale_Conv16_9 <- bn_Conv16_9\nI0818 13:44:37.183521 22726 net.cpp:395] scale_Conv16_9 -> bn_Conv16_9 (in-place)\nI0818 13:44:37.183576 22726 layer_factory.hpp:77] Creating layer scale_Conv16_9\nI0818 13:44:37.183722 22726 net.cpp:150] Setting up scale_Conv16_9\nI0818 13:44:37.183734 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.183739 22726 net.cpp:165] Memory required for data: 796161500\nI0818 13:44:37.183748 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_9\nI0818 13:44:37.183756 22726 net.cpp:100] Creating Layer relu_bn_Conv16_9\nI0818 13:44:37.183761 22726 net.cpp:434] relu_bn_Conv16_9 <- bn_Conv16_9\nI0818 13:44:37.183773 22726 net.cpp:395] relu_bn_Conv16_9 -> bn_Conv16_9 (in-place)\nI0818 13:44:37.183781 22726 net.cpp:150] Setting up relu_bn_Conv16_9\nI0818 13:44:37.183794 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.183799 22726 net.cpp:165] Memory required for data: 804353500\nI0818 13:44:37.183804 22726 layer_factory.hpp:77] Creating layer Conv16_9_b\nI0818 13:44:37.183825 22726 net.cpp:100] Creating Layer Conv16_9_b\nI0818 13:44:37.183831 22726 net.cpp:434] Conv16_9_b <- bn_Conv16_9\nI0818 13:44:37.183840 22726 net.cpp:408] Conv16_9_b -> Conv16_9_b\nI0818 13:44:37.184165 22726 net.cpp:150] Setting up Conv16_9_b\nI0818 13:44:37.184178 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.184182 22726 net.cpp:165] Memory required for data: 812545500\nI0818 13:44:37.184191 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_9_b\nI0818 13:44:37.184202 22726 net.cpp:100] Creating Layer batchNorm_Conv16_9_b\nI0818 13:44:37.184208 22726 net.cpp:434] batchNorm_Conv16_9_b <- Conv16_9_b\nI0818 13:44:37.184217 22726 net.cpp:408] batchNorm_Conv16_9_b -> bn_Conv16_9_b\nI0818 13:44:37.184460 22726 net.cpp:150] Setting up batchNorm_Conv16_9_b\nI0818 13:44:37.184473 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.184478 22726 net.cpp:165] Memory required for data: 820737500\nI0818 13:44:37.184509 22726 layer_factory.hpp:77] Creating layer scale_Conv16_9_b\nI0818 13:44:37.184521 22726 net.cpp:100] Creating Layer scale_Conv16_9_b\nI0818 13:44:37.184527 22726 net.cpp:434] scale_Conv16_9_b <- bn_Conv16_9_b\nI0818 13:44:37.184535 22726 net.cpp:395] scale_Conv16_9_b -> bn_Conv16_9_b (in-place)\nI0818 13:44:37.184592 22726 layer_factory.hpp:77] Creating layer scale_Conv16_9_b\nI0818 13:44:37.184732 22726 net.cpp:150] Setting up scale_Conv16_9_b\nI0818 13:44:37.184746 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.184751 22726 net.cpp:165] Memory required for data: 828929500\nI0818 13:44:37.184758 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_8_b\nI0818 13:44:37.184767 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_8_b\nI0818 13:44:37.184774 22726 net.cpp:434] sum_sum_bn_Conv16_8_b <- sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split_1\nI0818 13:44:37.184782 22726 net.cpp:434] sum_sum_bn_Conv16_8_b <- bn_Conv16_9_b\nI0818 13:44:37.184788 22726 net.cpp:408] sum_sum_bn_Conv16_8_b -> sum_bn_Conv16_9_b\nI0818 13:44:37.184834 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_8_b\nI0818 13:44:37.184845 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.184850 22726 net.cpp:165] Memory required for data: 837121500\nI0818 13:44:37.184855 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_9_b\nI0818 13:44:37.184864 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_9_b\nI0818 13:44:37.184869 22726 net.cpp:434] relu_sum_bn_Conv16_9_b <- sum_bn_Conv16_9_b\nI0818 13:44:37.184878 22726 net.cpp:395] relu_sum_bn_Conv16_9_b -> sum_bn_Conv16_9_b (in-place)\nI0818 13:44:37.184888 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_9_b\nI0818 13:44:37.184895 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.184900 22726 net.cpp:165] Memory required for data: 845313500\nI0818 13:44:37.184906 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split\nI0818 13:44:37.184911 22726 net.cpp:100] Creating Layer sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split\nI0818 13:44:37.184916 22726 net.cpp:434] sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split <- sum_bn_Conv16_9_b\nI0818 13:44:37.184926 22726 net.cpp:408] sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split -> sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split_0\nI0818 13:44:37.184937 22726 net.cpp:408] sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split -> sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split_1\nI0818 13:44:37.184980 22726 net.cpp:150] Setting up sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split\nI0818 13:44:37.184991 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.184998 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.185003 22726 net.cpp:165] Memory required for data: 861697500\nI0818 13:44:37.185008 22726 layer_factory.hpp:77] Creating layer resblk32\nI0818 13:44:37.185029 22726 net.cpp:100] Creating Layer resblk32\nI0818 13:44:37.185036 22726 net.cpp:434] resblk32 <- sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split_0\nI0818 13:44:37.185045 22726 net.cpp:408] resblk32 -> resblk32\nI0818 13:44:37.185369 22726 net.cpp:150] Setting up resblk32\nI0818 13:44:37.185382 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.185387 22726 net.cpp:165] Memory required for data: 863745500\nI0818 13:44:37.185395 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32\nI0818 13:44:37.185405 22726 net.cpp:100] Creating Layer batchNorm_resblk32\nI0818 13:44:37.185413 22726 net.cpp:434] batchNorm_resblk32 <- resblk32\nI0818 13:44:37.185421 22726 net.cpp:408] batchNorm_resblk32 -> bn_resblk32\nI0818 13:44:37.185657 22726 net.cpp:150] Setting up batchNorm_resblk32\nI0818 13:44:37.185669 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.185674 22726 net.cpp:165] Memory required for data: 865793500\nI0818 13:44:37.185684 22726 layer_factory.hpp:77] Creating layer scale_resblk32\nI0818 13:44:37.185693 22726 net.cpp:100] Creating Layer scale_resblk32\nI0818 13:44:37.185698 22726 net.cpp:434] scale_resblk32 <- bn_resblk32\nI0818 13:44:37.185706 22726 net.cpp:395] scale_resblk32 -> bn_resblk32 (in-place)\nI0818 13:44:37.185763 22726 layer_factory.hpp:77] Creating layer scale_resblk32\nI0818 13:44:37.185912 22726 net.cpp:150] Setting up scale_resblk32\nI0818 13:44:37.185928 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.185933 22726 net.cpp:165] Memory required for data: 867841500\nI0818 13:44:37.185942 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32\nI0818 13:44:37.185950 22726 net.cpp:100] Creating Layer relu_bn_resblk32\nI0818 13:44:37.185956 22726 net.cpp:434] relu_bn_resblk32 <- bn_resblk32\nI0818 13:44:37.185963 22726 net.cpp:395] relu_bn_resblk32 -> bn_resblk32 (in-place)\nI0818 13:44:37.185972 22726 net.cpp:150] Setting up relu_bn_resblk32\nI0818 13:44:37.185978 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.185983 22726 net.cpp:165] Memory required for data: 869889500\nI0818 13:44:37.185987 22726 layer_factory.hpp:77] Creating layer resblk32_b\nI0818 13:44:37.186002 22726 net.cpp:100] Creating Layer resblk32_b\nI0818 13:44:37.186007 22726 net.cpp:434] resblk32_b <- bn_resblk32\nI0818 13:44:37.186017 22726 net.cpp:408] resblk32_b -> resblk32_b\nI0818 13:44:37.186341 22726 net.cpp:150] Setting up resblk32_b\nI0818 13:44:37.186354 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.186358 22726 net.cpp:165] Memory required for data: 871937500\nI0818 13:44:37.186367 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_b\nI0818 13:44:37.186379 22726 net.cpp:100] Creating Layer batchNorm_resblk32_b\nI0818 13:44:37.186385 22726 net.cpp:434] batchNorm_resblk32_b <- resblk32_b\nI0818 13:44:37.186399 22726 net.cpp:408] batchNorm_resblk32_b -> bn_resblk32_b\nI0818 13:44:37.186643 22726 net.cpp:150] Setting up batchNorm_resblk32_b\nI0818 13:44:37.186656 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.186661 22726 net.cpp:165] Memory required for data: 873985500\nI0818 13:44:37.186671 22726 layer_factory.hpp:77] Creating layer scale_resblk32_b\nI0818 13:44:37.186679 22726 net.cpp:100] Creating Layer scale_resblk32_b\nI0818 13:44:37.186686 22726 net.cpp:434] scale_resblk32_b <- bn_resblk32_b\nI0818 13:44:37.186693 22726 net.cpp:395] scale_resblk32_b -> bn_resblk32_b (in-place)\nI0818 13:44:37.186750 22726 layer_factory.hpp:77] Creating layer scale_resblk32_b\nI0818 13:44:37.186900 22726 net.cpp:150] Setting up scale_resblk32_b\nI0818 13:44:37.186913 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.186918 22726 net.cpp:165] Memory required for data: 876033500\nI0818 13:44:37.186928 22726 layer_factory.hpp:77] Creating layer avePooling_resblk32\nI0818 13:44:37.186940 22726 net.cpp:100] Creating Layer avePooling_resblk32\nI0818 13:44:37.186947 22726 net.cpp:434] avePooling_resblk32 <- sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split_1\nI0818 13:44:37.186955 22726 net.cpp:408] avePooling_resblk32 -> avgPool_resblk32\nI0818 13:44:37.187046 22726 net.cpp:150] Setting up avePooling_resblk32\nI0818 13:44:37.187062 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.187067 22726 net.cpp:165] Memory required for data: 878081500\nI0818 13:44:37.187072 22726 layer_factory.hpp:77] Creating layer sum_avgPool_resblk32\nI0818 13:44:37.187086 22726 net.cpp:100] Creating Layer sum_avgPool_resblk32\nI0818 13:44:37.187093 22726 net.cpp:434] sum_avgPool_resblk32 <- avgPool_resblk32\nI0818 13:44:37.187099 22726 net.cpp:434] sum_avgPool_resblk32 <- bn_resblk32_b\nI0818 13:44:37.187108 22726 net.cpp:408] sum_avgPool_resblk32 -> sum_bn_resblk32_b\nI0818 13:44:37.187141 22726 net.cpp:150] Setting up sum_avgPool_resblk32\nI0818 13:44:37.187153 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.187158 22726 net.cpp:165] Memory required for data: 880129500\nI0818 13:44:37.187163 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_b\nI0818 13:44:37.187173 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_b\nI0818 13:44:37.187180 22726 net.cpp:434] relu_sum_bn_resblk32_b <- sum_bn_resblk32_b\nI0818 13:44:37.187186 22726 net.cpp:395] relu_sum_bn_resblk32_b -> sum_bn_resblk32_b (in-place)\nI0818 13:44:37.187196 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_b\nI0818 13:44:37.187202 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.187207 22726 net.cpp:165] Memory required for data: 882177500\nI0818 13:44:37.187211 22726 layer_factory.hpp:77] Creating layer zeros_sum_bn_resblk32_b\nI0818 13:44:37.187258 22726 net.cpp:100] Creating Layer zeros_sum_bn_resblk32_b\nI0818 13:44:37.187273 22726 net.cpp:408] zeros_sum_bn_resblk32_b -> zeros_sum_bn_resblk32_b\nI0818 13:44:37.189713 22726 net.cpp:150] Setting up zeros_sum_bn_resblk32_b\nI0818 13:44:37.189735 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.189740 22726 net.cpp:165] Memory required for data: 884225500\nI0818 13:44:37.189746 22726 layer_factory.hpp:77] Creating layer CC_sum_bn_resblk32_b\nI0818 13:44:37.189759 22726 net.cpp:100] Creating Layer CC_sum_bn_resblk32_b\nI0818 13:44:37.189764 22726 net.cpp:434] CC_sum_bn_resblk32_b <- sum_bn_resblk32_b\nI0818 13:44:37.189771 22726 net.cpp:434] CC_sum_bn_resblk32_b <- zeros_sum_bn_resblk32_b\nI0818 13:44:37.189782 22726 net.cpp:408] CC_sum_bn_resblk32_b -> CC_sum_bn_resblk32_b\nI0818 13:44:37.189864 22726 net.cpp:150] Setting up CC_sum_bn_resblk32_b\nI0818 13:44:37.189879 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.189884 22726 net.cpp:165] Memory required for data: 888321500\nI0818 13:44:37.189890 22726 layer_factory.hpp:77] Creating layer CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split\nI0818 13:44:37.189903 22726 net.cpp:100] Creating Layer CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split\nI0818 13:44:37.189908 22726 net.cpp:434] CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split <- CC_sum_bn_resblk32_b\nI0818 13:44:37.189916 22726 net.cpp:408] CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split -> CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split_0\nI0818 13:44:37.189926 22726 net.cpp:408] CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split -> CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split_1\nI0818 13:44:37.189977 22726 net.cpp:150] Setting up CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split\nI0818 13:44:37.189990 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.189996 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.190001 22726 net.cpp:165] Memory required for data: 896513500\nI0818 13:44:37.190006 22726 layer_factory.hpp:77] Creating layer resblk32_1\nI0818 13:44:37.190019 22726 net.cpp:100] Creating Layer resblk32_1\nI0818 13:44:37.190026 22726 net.cpp:434] resblk32_1 <- CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split_0\nI0818 13:44:37.190037 22726 net.cpp:408] resblk32_1 -> resblk32_1\nI0818 13:44:37.191506 22726 net.cpp:150] Setting up resblk32_1\nI0818 13:44:37.191524 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.191529 22726 net.cpp:165] Memory required for data: 900609500\nI0818 13:44:37.191546 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_1\nI0818 13:44:37.191556 22726 net.cpp:100] Creating Layer batchNorm_resblk32_1\nI0818 13:44:37.191562 22726 net.cpp:434] batchNorm_resblk32_1 <- resblk32_1\nI0818 13:44:37.191575 22726 net.cpp:408] batchNorm_resblk32_1 -> bn_resblk32_1\nI0818 13:44:37.191833 22726 net.cpp:150] Setting up batchNorm_resblk32_1\nI0818 13:44:37.191845 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.191850 22726 net.cpp:165] Memory required for data: 904705500\nI0818 13:44:37.191861 22726 layer_factory.hpp:77] Creating layer scale_resblk32_1\nI0818 13:44:37.191870 22726 net.cpp:100] Creating Layer scale_resblk32_1\nI0818 13:44:37.191876 22726 net.cpp:434] scale_resblk32_1 <- bn_resblk32_1\nI0818 13:44:37.191884 22726 net.cpp:395] scale_resblk32_1 -> bn_resblk32_1 (in-place)\nI0818 13:44:37.191943 22726 layer_factory.hpp:77] Creating layer scale_resblk32_1\nI0818 13:44:37.192090 22726 net.cpp:150] Setting up scale_resblk32_1\nI0818 13:44:37.192106 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.192111 22726 net.cpp:165] Memory required for data: 908801500\nI0818 13:44:37.192121 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_1\nI0818 13:44:37.192129 22726 net.cpp:100] Creating Layer relu_bn_resblk32_1\nI0818 13:44:37.192136 22726 net.cpp:434] relu_bn_resblk32_1 <- bn_resblk32_1\nI0818 13:44:37.192142 22726 net.cpp:395] relu_bn_resblk32_1 -> bn_resblk32_1 (in-place)\nI0818 13:44:37.192152 22726 net.cpp:150] Setting up relu_bn_resblk32_1\nI0818 13:44:37.192159 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.192163 22726 net.cpp:165] Memory required for data: 912897500\nI0818 13:44:37.192168 22726 layer_factory.hpp:77] Creating layer resblk32_1_b\nI0818 13:44:37.192183 22726 net.cpp:100] Creating Layer resblk32_1_b\nI0818 13:44:37.192189 22726 net.cpp:434] resblk32_1_b <- bn_resblk32_1\nI0818 13:44:37.192200 22726 net.cpp:408] resblk32_1_b -> resblk32_1_b\nI0818 13:44:37.192662 22726 net.cpp:150] Setting up resblk32_1_b\nI0818 13:44:37.192677 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.192680 22726 net.cpp:165] Memory required for data: 916993500\nI0818 13:44:37.192689 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_1_b\nI0818 13:44:37.192701 22726 net.cpp:100] Creating Layer batchNorm_resblk32_1_b\nI0818 13:44:37.192708 22726 net.cpp:434] batchNorm_resblk32_1_b <- resblk32_1_b\nI0818 13:44:37.192715 22726 net.cpp:408] batchNorm_resblk32_1_b -> bn_resblk32_1_b\nI0818 13:44:37.192973 22726 net.cpp:150] Setting up batchNorm_resblk32_1_b\nI0818 13:44:37.192987 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.192992 22726 net.cpp:165] Memory required for data: 921089500\nI0818 13:44:37.193002 22726 layer_factory.hpp:77] Creating layer scale_resblk32_1_b\nI0818 13:44:37.193011 22726 net.cpp:100] Creating Layer scale_resblk32_1_b\nI0818 13:44:37.193017 22726 net.cpp:434] scale_resblk32_1_b <- bn_resblk32_1_b\nI0818 13:44:37.193024 22726 net.cpp:395] scale_resblk32_1_b -> bn_resblk32_1_b (in-place)\nI0818 13:44:37.193081 22726 layer_factory.hpp:77] Creating layer scale_resblk32_1_b\nI0818 13:44:37.193224 22726 net.cpp:150] Setting up scale_resblk32_1_b\nI0818 13:44:37.193236 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.193241 22726 net.cpp:165] Memory required for data: 925185500\nI0818 13:44:37.193250 22726 layer_factory.hpp:77] Creating layer sum_CC_sum_bn_resblk32_b\nI0818 13:44:37.193262 22726 net.cpp:100] Creating Layer sum_CC_sum_bn_resblk32_b\nI0818 13:44:37.193269 22726 net.cpp:434] sum_CC_sum_bn_resblk32_b <- CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split_1\nI0818 13:44:37.193275 22726 net.cpp:434] sum_CC_sum_bn_resblk32_b <- bn_resblk32_1_b\nI0818 13:44:37.193284 22726 net.cpp:408] sum_CC_sum_bn_resblk32_b -> sum_bn_resblk32_1_b\nI0818 13:44:37.193310 22726 net.cpp:150] Setting up sum_CC_sum_bn_resblk32_b\nI0818 13:44:37.193320 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.193325 22726 net.cpp:165] Memory required for data: 929281500\nI0818 13:44:37.193336 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_1_b\nI0818 13:44:37.193347 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_1_b\nI0818 13:44:37.193353 22726 net.cpp:434] relu_sum_bn_resblk32_1_b <- sum_bn_resblk32_1_b\nI0818 13:44:37.193361 22726 net.cpp:395] relu_sum_bn_resblk32_1_b -> sum_bn_resblk32_1_b (in-place)\nI0818 13:44:37.193370 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_1_b\nI0818 13:44:37.193377 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.193382 22726 net.cpp:165] Memory required for data: 933377500\nI0818 13:44:37.193387 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split\nI0818 13:44:37.193393 22726 net.cpp:100] Creating Layer sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split\nI0818 13:44:37.193398 22726 net.cpp:434] sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split <- sum_bn_resblk32_1_b\nI0818 13:44:37.193405 22726 net.cpp:408] sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split -> sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split_0\nI0818 13:44:37.193414 22726 net.cpp:408] sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split -> sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split_1\nI0818 13:44:37.193461 22726 net.cpp:150] Setting up sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split\nI0818 13:44:37.193473 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.193480 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.193483 22726 net.cpp:165] Memory required for data: 941569500\nI0818 13:44:37.193488 22726 layer_factory.hpp:77] Creating layer resblk32_2\nI0818 13:44:37.193502 22726 net.cpp:100] Creating Layer resblk32_2\nI0818 13:44:37.193508 22726 net.cpp:434] resblk32_2 <- sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split_0\nI0818 13:44:37.193517 22726 net.cpp:408] resblk32_2 -> resblk32_2\nI0818 13:44:37.193991 22726 net.cpp:150] Setting up resblk32_2\nI0818 13:44:37.194005 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.194010 22726 net.cpp:165] Memory required for data: 945665500\nI0818 13:44:37.194020 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_2\nI0818 13:44:37.194031 22726 net.cpp:100] Creating Layer batchNorm_resblk32_2\nI0818 13:44:37.194037 22726 net.cpp:434] batchNorm_resblk32_2 <- resblk32_2\nI0818 13:44:37.194048 22726 net.cpp:408] batchNorm_resblk32_2 -> bn_resblk32_2\nI0818 13:44:37.194293 22726 net.cpp:150] Setting up batchNorm_resblk32_2\nI0818 13:44:37.194306 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.194311 22726 net.cpp:165] Memory required for data: 949761500\nI0818 13:44:37.194321 22726 layer_factory.hpp:77] Creating layer scale_resblk32_2\nI0818 13:44:37.194330 22726 net.cpp:100] Creating Layer scale_resblk32_2\nI0818 13:44:37.194336 22726 net.cpp:434] scale_resblk32_2 <- bn_resblk32_2\nI0818 13:44:37.194344 22726 net.cpp:395] scale_resblk32_2 -> bn_resblk32_2 (in-place)\nI0818 13:44:37.194401 22726 layer_factory.hpp:77] Creating layer scale_resblk32_2\nI0818 13:44:37.194546 22726 net.cpp:150] Setting up scale_resblk32_2\nI0818 13:44:37.194558 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.194563 22726 net.cpp:165] Memory required for data: 953857500\nI0818 13:44:37.194572 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_2\nI0818 13:44:37.194581 22726 net.cpp:100] Creating Layer relu_bn_resblk32_2\nI0818 13:44:37.194589 22726 net.cpp:434] relu_bn_resblk32_2 <- bn_resblk32_2\nI0818 13:44:37.194597 22726 net.cpp:395] relu_bn_resblk32_2 -> bn_resblk32_2 (in-place)\nI0818 13:44:37.194607 22726 net.cpp:150] Setting up relu_bn_resblk32_2\nI0818 13:44:37.194612 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.194617 22726 net.cpp:165] Memory required for data: 957953500\nI0818 13:44:37.194622 22726 layer_factory.hpp:77] Creating layer resblk32_2_b\nI0818 13:44:37.194636 22726 net.cpp:100] Creating Layer resblk32_2_b\nI0818 13:44:37.194643 22726 net.cpp:434] resblk32_2_b <- bn_resblk32_2\nI0818 13:44:37.194650 22726 net.cpp:408] resblk32_2_b -> resblk32_2_b\nI0818 13:44:37.195124 22726 net.cpp:150] Setting up resblk32_2_b\nI0818 13:44:37.195138 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.195143 22726 net.cpp:165] Memory required for data: 962049500\nI0818 13:44:37.195152 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_2_b\nI0818 13:44:37.195164 22726 net.cpp:100] Creating Layer batchNorm_resblk32_2_b\nI0818 13:44:37.195170 22726 net.cpp:434] batchNorm_resblk32_2_b <- resblk32_2_b\nI0818 13:44:37.195179 22726 net.cpp:408] batchNorm_resblk32_2_b -> bn_resblk32_2_b\nI0818 13:44:37.195430 22726 net.cpp:150] Setting up batchNorm_resblk32_2_b\nI0818 13:44:37.195444 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.195449 22726 net.cpp:165] Memory required for data: 966145500\nI0818 13:44:37.195459 22726 layer_factory.hpp:77] Creating layer scale_resblk32_2_b\nI0818 13:44:37.195468 22726 net.cpp:100] Creating Layer scale_resblk32_2_b\nI0818 13:44:37.195474 22726 net.cpp:434] scale_resblk32_2_b <- bn_resblk32_2_b\nI0818 13:44:37.195482 22726 net.cpp:395] scale_resblk32_2_b -> bn_resblk32_2_b (in-place)\nI0818 13:44:37.195536 22726 layer_factory.hpp:77] Creating layer scale_resblk32_2_b\nI0818 13:44:37.195688 22726 net.cpp:150] Setting up scale_resblk32_2_b\nI0818 13:44:37.195700 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.195705 22726 net.cpp:165] Memory required for data: 970241500\nI0818 13:44:37.195713 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_1_b\nI0818 13:44:37.195722 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_1_b\nI0818 13:44:37.195729 22726 net.cpp:434] sum_sum_bn_resblk32_1_b <- sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split_1\nI0818 13:44:37.195736 22726 net.cpp:434] sum_sum_bn_resblk32_1_b <- bn_resblk32_2_b\nI0818 13:44:37.195746 22726 net.cpp:408] sum_sum_bn_resblk32_1_b -> sum_bn_resblk32_2_b\nI0818 13:44:37.195775 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_1_b\nI0818 13:44:37.195783 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.195787 22726 net.cpp:165] Memory required for data: 974337500\nI0818 13:44:37.195792 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_2_b\nI0818 13:44:37.195823 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_2_b\nI0818 13:44:37.195832 22726 net.cpp:434] relu_sum_bn_resblk32_2_b <- sum_bn_resblk32_2_b\nI0818 13:44:37.195842 22726 net.cpp:395] relu_sum_bn_resblk32_2_b -> sum_bn_resblk32_2_b (in-place)\nI0818 13:44:37.195852 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_2_b\nI0818 13:44:37.195859 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.195864 22726 net.cpp:165] Memory required for data: 978433500\nI0818 13:44:37.195869 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split\nI0818 13:44:37.195876 22726 net.cpp:100] Creating Layer sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split\nI0818 13:44:37.195881 22726 net.cpp:434] sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split <- sum_bn_resblk32_2_b\nI0818 13:44:37.195889 22726 net.cpp:408] sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split -> sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split_0\nI0818 13:44:37.195899 22726 net.cpp:408] sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split -> sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split_1\nI0818 13:44:37.195948 22726 net.cpp:150] Setting up sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split\nI0818 13:44:37.195960 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.195966 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.195971 22726 net.cpp:165] Memory required for data: 986625500\nI0818 13:44:37.195976 22726 layer_factory.hpp:77] Creating layer resblk32_3\nI0818 13:44:37.195987 22726 net.cpp:100] Creating Layer resblk32_3\nI0818 13:44:37.195993 22726 net.cpp:434] resblk32_3 <- sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split_0\nI0818 13:44:37.196005 22726 net.cpp:408] resblk32_3 -> resblk32_3\nI0818 13:44:37.196466 22726 net.cpp:150] Setting up resblk32_3\nI0818 13:44:37.196480 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.196491 22726 net.cpp:165] Memory required for data: 990721500\nI0818 13:44:37.196501 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_3\nI0818 13:44:37.196509 22726 net.cpp:100] Creating Layer batchNorm_resblk32_3\nI0818 13:44:37.196516 22726 net.cpp:434] batchNorm_resblk32_3 <- resblk32_3\nI0818 13:44:37.196527 22726 net.cpp:408] batchNorm_resblk32_3 -> bn_resblk32_3\nI0818 13:44:37.196775 22726 net.cpp:150] Setting up batchNorm_resblk32_3\nI0818 13:44:37.196789 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.196794 22726 net.cpp:165] Memory required for data: 994817500\nI0818 13:44:37.196805 22726 layer_factory.hpp:77] Creating layer scale_resblk32_3\nI0818 13:44:37.196823 22726 net.cpp:100] Creating Layer scale_resblk32_3\nI0818 13:44:37.196830 22726 net.cpp:434] scale_resblk32_3 <- bn_resblk32_3\nI0818 13:44:37.196838 22726 net.cpp:395] scale_resblk32_3 -> bn_resblk32_3 (in-place)\nI0818 13:44:37.196893 22726 layer_factory.hpp:77] Creating layer scale_resblk32_3\nI0818 13:44:37.197041 22726 net.cpp:150] Setting up scale_resblk32_3\nI0818 13:44:37.197053 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.197057 22726 net.cpp:165] Memory required for data: 998913500\nI0818 13:44:37.197067 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_3\nI0818 13:44:37.197077 22726 net.cpp:100] Creating Layer relu_bn_resblk32_3\nI0818 13:44:37.197084 22726 net.cpp:434] relu_bn_resblk32_3 <- bn_resblk32_3\nI0818 13:44:37.197091 22726 net.cpp:395] relu_bn_resblk32_3 -> bn_resblk32_3 (in-place)\nI0818 13:44:37.197101 22726 net.cpp:150] Setting up relu_bn_resblk32_3\nI0818 13:44:37.197108 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.197113 22726 net.cpp:165] Memory required for data: 1003009500\nI0818 13:44:37.197118 22726 layer_factory.hpp:77] Creating layer resblk32_3_b\nI0818 13:44:37.197131 22726 net.cpp:100] Creating Layer resblk32_3_b\nI0818 13:44:37.197137 22726 net.cpp:434] resblk32_3_b <- bn_resblk32_3\nI0818 13:44:37.197149 22726 net.cpp:408] resblk32_3_b -> resblk32_3_b\nI0818 13:44:37.197607 22726 net.cpp:150] Setting up resblk32_3_b\nI0818 13:44:37.197620 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.197625 22726 net.cpp:165] Memory required for data: 1007105500\nI0818 13:44:37.197633 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_3_b\nI0818 13:44:37.197643 22726 net.cpp:100] Creating Layer batchNorm_resblk32_3_b\nI0818 13:44:37.197649 22726 net.cpp:434] batchNorm_resblk32_3_b <- resblk32_3_b\nI0818 13:44:37.197657 22726 net.cpp:408] batchNorm_resblk32_3_b -> bn_resblk32_3_b\nI0818 13:44:37.197918 22726 net.cpp:150] Setting up batchNorm_resblk32_3_b\nI0818 13:44:37.197932 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.197937 22726 net.cpp:165] Memory required for data: 1011201500\nI0818 13:44:37.197948 22726 layer_factory.hpp:77] Creating layer scale_resblk32_3_b\nI0818 13:44:37.197957 22726 net.cpp:100] Creating Layer scale_resblk32_3_b\nI0818 13:44:37.197963 22726 net.cpp:434] scale_resblk32_3_b <- bn_resblk32_3_b\nI0818 13:44:37.197973 22726 net.cpp:395] scale_resblk32_3_b -> bn_resblk32_3_b (in-place)\nI0818 13:44:37.198029 22726 layer_factory.hpp:77] Creating layer scale_resblk32_3_b\nI0818 13:44:37.198177 22726 net.cpp:150] Setting up scale_resblk32_3_b\nI0818 13:44:37.198189 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.198194 22726 net.cpp:165] Memory required for data: 1015297500\nI0818 13:44:37.198204 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_2_b\nI0818 13:44:37.198212 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_2_b\nI0818 13:44:37.198218 22726 net.cpp:434] sum_sum_bn_resblk32_2_b <- sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split_1\nI0818 13:44:37.198225 22726 net.cpp:434] sum_sum_bn_resblk32_2_b <- bn_resblk32_3_b\nI0818 13:44:37.198237 22726 net.cpp:408] sum_sum_bn_resblk32_2_b -> sum_bn_resblk32_3_b\nI0818 13:44:37.198266 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_2_b\nI0818 13:44:37.198277 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.198288 22726 net.cpp:165] Memory required for data: 1019393500\nI0818 13:44:37.198294 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_3_b\nI0818 13:44:37.198302 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_3_b\nI0818 13:44:37.198307 22726 net.cpp:434] relu_sum_bn_resblk32_3_b <- sum_bn_resblk32_3_b\nI0818 13:44:37.198314 22726 net.cpp:395] relu_sum_bn_resblk32_3_b -> sum_bn_resblk32_3_b (in-place)\nI0818 13:44:37.198323 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_3_b\nI0818 13:44:37.198330 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.198334 22726 net.cpp:165] Memory required for data: 1023489500\nI0818 13:44:37.198338 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split\nI0818 13:44:37.198348 22726 net.cpp:100] Creating Layer sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split\nI0818 13:44:37.198354 22726 net.cpp:434] sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split <- sum_bn_resblk32_3_b\nI0818 13:44:37.198361 22726 net.cpp:408] sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split -> sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split_0\nI0818 13:44:37.198371 22726 net.cpp:408] sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split -> sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split_1\nI0818 13:44:37.198415 22726 net.cpp:150] Setting up sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split\nI0818 13:44:37.198431 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.198437 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.198441 22726 net.cpp:165] Memory required for data: 1031681500\nI0818 13:44:37.198446 22726 layer_factory.hpp:77] Creating layer resblk32_4\nI0818 13:44:37.198457 22726 net.cpp:100] Creating Layer resblk32_4\nI0818 13:44:37.198463 22726 net.cpp:434] resblk32_4 <- sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split_0\nI0818 13:44:37.198472 22726 net.cpp:408] resblk32_4 -> resblk32_4\nI0818 13:44:37.198946 22726 net.cpp:150] Setting up resblk32_4\nI0818 13:44:37.198963 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.198968 22726 net.cpp:165] Memory required for data: 1035777500\nI0818 13:44:37.198977 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_4\nI0818 13:44:37.198987 22726 net.cpp:100] Creating Layer batchNorm_resblk32_4\nI0818 13:44:37.198992 22726 net.cpp:434] batchNorm_resblk32_4 <- resblk32_4\nI0818 13:44:37.199000 22726 net.cpp:408] batchNorm_resblk32_4 -> bn_resblk32_4\nI0818 13:44:37.199250 22726 net.cpp:150] Setting up batchNorm_resblk32_4\nI0818 13:44:37.199262 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.199267 22726 net.cpp:165] Memory required for data: 1039873500\nI0818 13:44:37.199277 22726 layer_factory.hpp:77] Creating layer scale_resblk32_4\nI0818 13:44:37.199286 22726 net.cpp:100] Creating Layer scale_resblk32_4\nI0818 13:44:37.199292 22726 net.cpp:434] scale_resblk32_4 <- bn_resblk32_4\nI0818 13:44:37.199303 22726 net.cpp:395] scale_resblk32_4 -> bn_resblk32_4 (in-place)\nI0818 13:44:37.199358 22726 layer_factory.hpp:77] Creating layer scale_resblk32_4\nI0818 13:44:37.199512 22726 net.cpp:150] Setting up scale_resblk32_4\nI0818 13:44:37.199523 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.199528 22726 net.cpp:165] Memory required for data: 1043969500\nI0818 13:44:37.199537 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_4\nI0818 13:44:37.199545 22726 net.cpp:100] Creating Layer relu_bn_resblk32_4\nI0818 13:44:37.199551 22726 net.cpp:434] relu_bn_resblk32_4 <- bn_resblk32_4\nI0818 13:44:37.199561 22726 net.cpp:395] relu_bn_resblk32_4 -> bn_resblk32_4 (in-place)\nI0818 13:44:37.199571 22726 net.cpp:150] Setting up relu_bn_resblk32_4\nI0818 13:44:37.199578 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.199582 22726 net.cpp:165] Memory required for data: 1048065500\nI0818 13:44:37.199586 22726 layer_factory.hpp:77] Creating layer resblk32_4_b\nI0818 13:44:37.199600 22726 net.cpp:100] Creating Layer resblk32_4_b\nI0818 13:44:37.199612 22726 net.cpp:434] resblk32_4_b <- bn_resblk32_4\nI0818 13:44:37.199622 22726 net.cpp:408] resblk32_4_b -> resblk32_4_b\nI0818 13:44:37.200103 22726 net.cpp:150] Setting up resblk32_4_b\nI0818 13:44:37.200117 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.200122 22726 net.cpp:165] Memory required for data: 1052161500\nI0818 13:44:37.200131 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_4_b\nI0818 13:44:37.200143 22726 net.cpp:100] Creating Layer batchNorm_resblk32_4_b\nI0818 13:44:37.200150 22726 net.cpp:434] batchNorm_resblk32_4_b <- resblk32_4_b\nI0818 13:44:37.200158 22726 net.cpp:408] batchNorm_resblk32_4_b -> bn_resblk32_4_b\nI0818 13:44:37.200407 22726 net.cpp:150] Setting up batchNorm_resblk32_4_b\nI0818 13:44:37.200418 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.200423 22726 net.cpp:165] Memory required for data: 1056257500\nI0818 13:44:37.200433 22726 layer_factory.hpp:77] Creating layer scale_resblk32_4_b\nI0818 13:44:37.200443 22726 net.cpp:100] Creating Layer scale_resblk32_4_b\nI0818 13:44:37.200448 22726 net.cpp:434] scale_resblk32_4_b <- bn_resblk32_4_b\nI0818 13:44:37.200459 22726 net.cpp:395] scale_resblk32_4_b -> bn_resblk32_4_b (in-place)\nI0818 13:44:37.200515 22726 layer_factory.hpp:77] Creating layer scale_resblk32_4_b\nI0818 13:44:37.200661 22726 net.cpp:150] Setting up scale_resblk32_4_b\nI0818 13:44:37.200675 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.200678 22726 net.cpp:165] Memory required for data: 1060353500\nI0818 13:44:37.200687 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_3_b\nI0818 13:44:37.200696 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_3_b\nI0818 13:44:37.200702 22726 net.cpp:434] sum_sum_bn_resblk32_3_b <- sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split_1\nI0818 13:44:37.200711 22726 net.cpp:434] sum_sum_bn_resblk32_3_b <- bn_resblk32_4_b\nI0818 13:44:37.200721 22726 net.cpp:408] sum_sum_bn_resblk32_3_b -> sum_bn_resblk32_4_b\nI0818 13:44:37.200748 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_3_b\nI0818 13:44:37.200757 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.200762 22726 net.cpp:165] Memory required for data: 1064449500\nI0818 13:44:37.200767 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_4_b\nI0818 13:44:37.200778 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_4_b\nI0818 13:44:37.200783 22726 net.cpp:434] relu_sum_bn_resblk32_4_b <- sum_bn_resblk32_4_b\nI0818 13:44:37.200790 22726 net.cpp:395] relu_sum_bn_resblk32_4_b -> sum_bn_resblk32_4_b (in-place)\nI0818 13:44:37.200799 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_4_b\nI0818 13:44:37.200806 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.200817 22726 net.cpp:165] Memory required for data: 1068545500\nI0818 13:44:37.200822 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split\nI0818 13:44:37.200829 22726 net.cpp:100] Creating Layer sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split\nI0818 13:44:37.200835 22726 net.cpp:434] sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split <- sum_bn_resblk32_4_b\nI0818 13:44:37.200845 22726 net.cpp:408] sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split -> sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split_0\nI0818 13:44:37.200855 22726 net.cpp:408] sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split -> sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split_1\nI0818 13:44:37.200901 22726 net.cpp:150] Setting up sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split\nI0818 13:44:37.200915 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.200922 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.200927 22726 net.cpp:165] Memory required for data: 1076737500\nI0818 13:44:37.200932 22726 layer_factory.hpp:77] Creating layer resblk32_5\nI0818 13:44:37.200942 22726 net.cpp:100] Creating Layer resblk32_5\nI0818 13:44:37.200948 22726 net.cpp:434] resblk32_5 <- sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split_0\nI0818 13:44:37.200958 22726 net.cpp:408] resblk32_5 -> resblk32_5\nI0818 13:44:37.201437 22726 net.cpp:150] Setting up resblk32_5\nI0818 13:44:37.201452 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.201457 22726 net.cpp:165] Memory required for data: 1080833500\nI0818 13:44:37.201465 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_5\nI0818 13:44:37.201477 22726 net.cpp:100] Creating Layer batchNorm_resblk32_5\nI0818 13:44:37.201483 22726 net.cpp:434] batchNorm_resblk32_5 <- resblk32_5\nI0818 13:44:37.201491 22726 net.cpp:408] batchNorm_resblk32_5 -> bn_resblk32_5\nI0818 13:44:37.201742 22726 net.cpp:150] Setting up batchNorm_resblk32_5\nI0818 13:44:37.201756 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.201759 22726 net.cpp:165] Memory required for data: 1084929500\nI0818 13:44:37.201769 22726 layer_factory.hpp:77] Creating layer scale_resblk32_5\nI0818 13:44:37.201778 22726 net.cpp:100] Creating Layer scale_resblk32_5\nI0818 13:44:37.201784 22726 net.cpp:434] scale_resblk32_5 <- bn_resblk32_5\nI0818 13:44:37.201794 22726 net.cpp:395] scale_resblk32_5 -> bn_resblk32_5 (in-place)\nI0818 13:44:37.201858 22726 layer_factory.hpp:77] Creating layer scale_resblk32_5\nI0818 13:44:37.202016 22726 net.cpp:150] Setting up scale_resblk32_5\nI0818 13:44:37.202029 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.202034 22726 net.cpp:165] Memory required for data: 1089025500\nI0818 13:44:37.202044 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_5\nI0818 13:44:37.202050 22726 net.cpp:100] Creating Layer relu_bn_resblk32_5\nI0818 13:44:37.202056 22726 net.cpp:434] relu_bn_resblk32_5 <- bn_resblk32_5\nI0818 13:44:37.202067 22726 net.cpp:395] relu_bn_resblk32_5 -> bn_resblk32_5 (in-place)\nI0818 13:44:37.202076 22726 net.cpp:150] Setting up relu_bn_resblk32_5\nI0818 13:44:37.202083 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.202088 22726 net.cpp:165] Memory required for data: 1093121500\nI0818 13:44:37.202092 22726 layer_factory.hpp:77] Creating layer resblk32_5_b\nI0818 13:44:37.202106 22726 net.cpp:100] Creating Layer resblk32_5_b\nI0818 13:44:37.202112 22726 net.cpp:434] resblk32_5_b <- bn_resblk32_5\nI0818 13:44:37.202121 22726 net.cpp:408] resblk32_5_b -> resblk32_5_b\nI0818 13:44:37.202585 22726 net.cpp:150] Setting up resblk32_5_b\nI0818 13:44:37.202600 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.202603 22726 net.cpp:165] Memory required for data: 1097217500\nI0818 13:44:37.202612 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_5_b\nI0818 13:44:37.202620 22726 net.cpp:100] Creating Layer batchNorm_resblk32_5_b\nI0818 13:44:37.202626 22726 net.cpp:434] batchNorm_resblk32_5_b <- resblk32_5_b\nI0818 13:44:37.202638 22726 net.cpp:408] batchNorm_resblk32_5_b -> bn_resblk32_5_b\nI0818 13:44:37.202893 22726 net.cpp:150] Setting up batchNorm_resblk32_5_b\nI0818 13:44:37.202905 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.202910 22726 net.cpp:165] Memory required for data: 1101313500\nI0818 13:44:37.202920 22726 layer_factory.hpp:77] Creating layer scale_resblk32_5_b\nI0818 13:44:37.202929 22726 net.cpp:100] Creating Layer scale_resblk32_5_b\nI0818 13:44:37.202935 22726 net.cpp:434] scale_resblk32_5_b <- bn_resblk32_5_b\nI0818 13:44:37.202942 22726 net.cpp:395] scale_resblk32_5_b -> bn_resblk32_5_b (in-place)\nI0818 13:44:37.202999 22726 layer_factory.hpp:77] Creating layer scale_resblk32_5_b\nI0818 13:44:37.203147 22726 net.cpp:150] Setting up scale_resblk32_5_b\nI0818 13:44:37.203163 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.203168 22726 net.cpp:165] Memory required for data: 1105409500\nI0818 13:44:37.203184 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_4_b\nI0818 13:44:37.203193 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_4_b\nI0818 13:44:37.203200 22726 net.cpp:434] sum_sum_bn_resblk32_4_b <- sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split_1\nI0818 13:44:37.203207 22726 net.cpp:434] sum_sum_bn_resblk32_4_b <- bn_resblk32_5_b\nI0818 13:44:37.203215 22726 net.cpp:408] sum_sum_bn_resblk32_4_b -> sum_bn_resblk32_5_b\nI0818 13:44:37.203253 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_4_b\nI0818 13:44:37.203263 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.203266 22726 net.cpp:165] Memory required for data: 1109505500\nI0818 13:44:37.203271 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_5_b\nI0818 13:44:37.203279 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_5_b\nI0818 13:44:37.203284 22726 net.cpp:434] relu_sum_bn_resblk32_5_b <- sum_bn_resblk32_5_b\nI0818 13:44:37.203294 22726 net.cpp:395] relu_sum_bn_resblk32_5_b -> sum_bn_resblk32_5_b (in-place)\nI0818 13:44:37.203305 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_5_b\nI0818 13:44:37.203311 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.203315 22726 net.cpp:165] Memory required for data: 1113601500\nI0818 13:44:37.203320 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split\nI0818 13:44:37.203326 22726 net.cpp:100] Creating Layer sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split\nI0818 13:44:37.203332 22726 net.cpp:434] sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split <- sum_bn_resblk32_5_b\nI0818 13:44:37.203342 22726 net.cpp:408] sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split -> sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split_0\nI0818 13:44:37.203352 22726 net.cpp:408] sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split -> sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split_1\nI0818 13:44:37.203397 22726 net.cpp:150] Setting up sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split\nI0818 13:44:37.203408 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.203414 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.203419 22726 net.cpp:165] Memory required for data: 1121793500\nI0818 13:44:37.203424 22726 layer_factory.hpp:77] Creating layer resblk32_6\nI0818 13:44:37.203438 22726 net.cpp:100] Creating Layer resblk32_6\nI0818 13:44:37.203445 22726 net.cpp:434] resblk32_6 <- sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split_0\nI0818 13:44:37.203454 22726 net.cpp:408] resblk32_6 -> resblk32_6\nI0818 13:44:37.203930 22726 net.cpp:150] Setting up resblk32_6\nI0818 13:44:37.203944 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.203949 22726 net.cpp:165] Memory required for data: 1125889500\nI0818 13:44:37.203958 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_6\nI0818 13:44:37.203966 22726 net.cpp:100] Creating Layer batchNorm_resblk32_6\nI0818 13:44:37.203972 22726 net.cpp:434] batchNorm_resblk32_6 <- resblk32_6\nI0818 13:44:37.203984 22726 net.cpp:408] batchNorm_resblk32_6 -> bn_resblk32_6\nI0818 13:44:37.204236 22726 net.cpp:150] Setting up batchNorm_resblk32_6\nI0818 13:44:37.204249 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.204254 22726 net.cpp:165] Memory required for data: 1129985500\nI0818 13:44:37.204264 22726 layer_factory.hpp:77] Creating layer scale_resblk32_6\nI0818 13:44:37.204272 22726 net.cpp:100] Creating Layer scale_resblk32_6\nI0818 13:44:37.204278 22726 net.cpp:434] scale_resblk32_6 <- bn_resblk32_6\nI0818 13:44:37.204286 22726 net.cpp:395] scale_resblk32_6 -> bn_resblk32_6 (in-place)\nI0818 13:44:37.204345 22726 layer_factory.hpp:77] Creating layer scale_resblk32_6\nI0818 13:44:37.204495 22726 net.cpp:150] Setting up scale_resblk32_6\nI0818 13:44:37.204510 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.204515 22726 net.cpp:165] Memory required for data: 1134081500\nI0818 13:44:37.204524 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_6\nI0818 13:44:37.204532 22726 net.cpp:100] Creating Layer relu_bn_resblk32_6\nI0818 13:44:37.204538 22726 net.cpp:434] relu_bn_resblk32_6 <- bn_resblk32_6\nI0818 13:44:37.204545 22726 net.cpp:395] relu_bn_resblk32_6 -> bn_resblk32_6 (in-place)\nI0818 13:44:37.204555 22726 net.cpp:150] Setting up relu_bn_resblk32_6\nI0818 13:44:37.204561 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.204566 22726 net.cpp:165] Memory required for data: 1138177500\nI0818 13:44:37.204571 22726 layer_factory.hpp:77] Creating layer resblk32_6_b\nI0818 13:44:37.204591 22726 net.cpp:100] Creating Layer resblk32_6_b\nI0818 13:44:37.204596 22726 net.cpp:434] resblk32_6_b <- bn_resblk32_6\nI0818 13:44:37.204609 22726 net.cpp:408] resblk32_6_b -> resblk32_6_b\nI0818 13:44:37.205083 22726 net.cpp:150] Setting up resblk32_6_b\nI0818 13:44:37.205097 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.205102 22726 net.cpp:165] Memory required for data: 1142273500\nI0818 13:44:37.205111 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_6_b\nI0818 13:44:37.205122 22726 net.cpp:100] Creating Layer batchNorm_resblk32_6_b\nI0818 13:44:37.205129 22726 net.cpp:434] batchNorm_resblk32_6_b <- resblk32_6_b\nI0818 13:44:37.205140 22726 net.cpp:408] batchNorm_resblk32_6_b -> bn_resblk32_6_b\nI0818 13:44:37.205390 22726 net.cpp:150] Setting up batchNorm_resblk32_6_b\nI0818 13:44:37.205402 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.205407 22726 net.cpp:165] Memory required for data: 1146369500\nI0818 13:44:37.205417 22726 layer_factory.hpp:77] Creating layer scale_resblk32_6_b\nI0818 13:44:37.205425 22726 net.cpp:100] Creating Layer scale_resblk32_6_b\nI0818 13:44:37.205431 22726 net.cpp:434] scale_resblk32_6_b <- bn_resblk32_6_b\nI0818 13:44:37.205440 22726 net.cpp:395] scale_resblk32_6_b -> bn_resblk32_6_b (in-place)\nI0818 13:44:37.205498 22726 layer_factory.hpp:77] Creating layer scale_resblk32_6_b\nI0818 13:44:37.205646 22726 net.cpp:150] Setting up scale_resblk32_6_b\nI0818 13:44:37.205658 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.205663 22726 net.cpp:165] Memory required for data: 1150465500\nI0818 13:44:37.205672 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_5_b\nI0818 13:44:37.205683 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_5_b\nI0818 13:44:37.205690 22726 net.cpp:434] sum_sum_bn_resblk32_5_b <- sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split_1\nI0818 13:44:37.205698 22726 net.cpp:434] sum_sum_bn_resblk32_5_b <- bn_resblk32_6_b\nI0818 13:44:37.205705 22726 net.cpp:408] sum_sum_bn_resblk32_5_b -> sum_bn_resblk32_6_b\nI0818 13:44:37.205732 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_5_b\nI0818 13:44:37.205742 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.205746 22726 net.cpp:165] Memory required for data: 1154561500\nI0818 13:44:37.205751 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_6_b\nI0818 13:44:37.205762 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_6_b\nI0818 13:44:37.205768 22726 net.cpp:434] relu_sum_bn_resblk32_6_b <- sum_bn_resblk32_6_b\nI0818 13:44:37.205775 22726 net.cpp:395] relu_sum_bn_resblk32_6_b -> sum_bn_resblk32_6_b (in-place)\nI0818 13:44:37.205785 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_6_b\nI0818 13:44:37.205791 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.205796 22726 net.cpp:165] Memory required for data: 1158657500\nI0818 13:44:37.205801 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split\nI0818 13:44:37.205813 22726 net.cpp:100] Creating Layer sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split\nI0818 13:44:37.205819 22726 net.cpp:434] sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split <- sum_bn_resblk32_6_b\nI0818 13:44:37.205827 22726 net.cpp:408] sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split -> sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split_0\nI0818 13:44:37.205837 22726 net.cpp:408] sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split -> sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split_1\nI0818 13:44:37.205891 22726 net.cpp:150] Setting up sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split\nI0818 13:44:37.205904 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.205910 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.205914 22726 net.cpp:165] Memory required for data: 1166849500\nI0818 13:44:37.205919 22726 layer_factory.hpp:77] Creating layer resblk32_7\nI0818 13:44:37.205934 22726 net.cpp:100] Creating Layer resblk32_7\nI0818 13:44:37.205947 22726 net.cpp:434] resblk32_7 <- sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split_0\nI0818 13:44:37.205957 22726 net.cpp:408] resblk32_7 -> resblk32_7\nI0818 13:44:37.206440 22726 net.cpp:150] Setting up resblk32_7\nI0818 13:44:37.206455 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.206460 22726 net.cpp:165] Memory required for data: 1170945500\nI0818 13:44:37.206468 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_7\nI0818 13:44:37.206481 22726 net.cpp:100] Creating Layer batchNorm_resblk32_7\nI0818 13:44:37.206488 22726 net.cpp:434] batchNorm_resblk32_7 <- resblk32_7\nI0818 13:44:37.206499 22726 net.cpp:408] batchNorm_resblk32_7 -> bn_resblk32_7\nI0818 13:44:37.206750 22726 net.cpp:150] Setting up batchNorm_resblk32_7\nI0818 13:44:37.206763 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.206768 22726 net.cpp:165] Memory required for data: 1175041500\nI0818 13:44:37.206779 22726 layer_factory.hpp:77] Creating layer scale_resblk32_7\nI0818 13:44:37.206786 22726 net.cpp:100] Creating Layer scale_resblk32_7\nI0818 13:44:37.206792 22726 net.cpp:434] scale_resblk32_7 <- bn_resblk32_7\nI0818 13:44:37.206800 22726 net.cpp:395] scale_resblk32_7 -> bn_resblk32_7 (in-place)\nI0818 13:44:37.206866 22726 layer_factory.hpp:77] Creating layer scale_resblk32_7\nI0818 13:44:37.207018 22726 net.cpp:150] Setting up scale_resblk32_7\nI0818 13:44:37.207031 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.207036 22726 net.cpp:165] Memory required for data: 1179137500\nI0818 13:44:37.207044 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_7\nI0818 13:44:37.207054 22726 net.cpp:100] Creating Layer relu_bn_resblk32_7\nI0818 13:44:37.207062 22726 net.cpp:434] relu_bn_resblk32_7 <- bn_resblk32_7\nI0818 13:44:37.207068 22726 net.cpp:395] relu_bn_resblk32_7 -> bn_resblk32_7 (in-place)\nI0818 13:44:37.207077 22726 net.cpp:150] Setting up relu_bn_resblk32_7\nI0818 13:44:37.207084 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.207089 22726 net.cpp:165] Memory required for data: 1183233500\nI0818 13:44:37.207094 22726 layer_factory.hpp:77] Creating layer resblk32_7_b\nI0818 13:44:37.207108 22726 net.cpp:100] Creating Layer resblk32_7_b\nI0818 13:44:37.207113 22726 net.cpp:434] resblk32_7_b <- bn_resblk32_7\nI0818 13:44:37.207123 22726 net.cpp:408] resblk32_7_b -> resblk32_7_b\nI0818 13:44:37.207592 22726 net.cpp:150] Setting up resblk32_7_b\nI0818 13:44:37.207605 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.207610 22726 net.cpp:165] Memory required for data: 1187329500\nI0818 13:44:37.207618 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_7_b\nI0818 13:44:37.207630 22726 net.cpp:100] Creating Layer batchNorm_resblk32_7_b\nI0818 13:44:37.207636 22726 net.cpp:434] batchNorm_resblk32_7_b <- resblk32_7_b\nI0818 13:44:37.207645 22726 net.cpp:408] batchNorm_resblk32_7_b -> bn_resblk32_7_b\nI0818 13:44:37.207909 22726 net.cpp:150] Setting up batchNorm_resblk32_7_b\nI0818 13:44:37.207926 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.207931 22726 net.cpp:165] Memory required for data: 1191425500\nI0818 13:44:37.207942 22726 layer_factory.hpp:77] Creating layer scale_resblk32_7_b\nI0818 13:44:37.207950 22726 net.cpp:100] Creating Layer scale_resblk32_7_b\nI0818 13:44:37.207957 22726 net.cpp:434] scale_resblk32_7_b <- bn_resblk32_7_b\nI0818 13:44:37.207964 22726 net.cpp:395] scale_resblk32_7_b -> bn_resblk32_7_b (in-place)\nI0818 13:44:37.208020 22726 layer_factory.hpp:77] Creating layer scale_resblk32_7_b\nI0818 13:44:37.208173 22726 net.cpp:150] Setting up scale_resblk32_7_b\nI0818 13:44:37.208185 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.208190 22726 net.cpp:165] Memory required for data: 1195521500\nI0818 13:44:37.208199 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_6_b\nI0818 13:44:37.208209 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_6_b\nI0818 13:44:37.208214 22726 net.cpp:434] sum_sum_bn_resblk32_6_b <- sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split_1\nI0818 13:44:37.208228 22726 net.cpp:434] sum_sum_bn_resblk32_6_b <- bn_resblk32_7_b\nI0818 13:44:37.208240 22726 net.cpp:408] sum_sum_bn_resblk32_6_b -> sum_bn_resblk32_7_b\nI0818 13:44:37.208268 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_6_b\nI0818 13:44:37.208277 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.208282 22726 net.cpp:165] Memory required for data: 1199617500\nI0818 13:44:37.208287 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_7_b\nI0818 13:44:37.208297 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_7_b\nI0818 13:44:37.208303 22726 net.cpp:434] relu_sum_bn_resblk32_7_b <- sum_bn_resblk32_7_b\nI0818 13:44:37.208310 22726 net.cpp:395] relu_sum_bn_resblk32_7_b -> sum_bn_resblk32_7_b (in-place)\nI0818 13:44:37.208319 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_7_b\nI0818 13:44:37.208326 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.208330 22726 net.cpp:165] Memory required for data: 1203713500\nI0818 13:44:37.208335 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split\nI0818 13:44:37.208343 22726 net.cpp:100] Creating Layer sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split\nI0818 13:44:37.208348 22726 net.cpp:434] sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split <- sum_bn_resblk32_7_b\nI0818 13:44:37.208354 22726 net.cpp:408] sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split -> sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split_0\nI0818 13:44:37.208377 22726 net.cpp:408] sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split -> sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split_1\nI0818 13:44:37.208428 22726 net.cpp:150] Setting up sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split\nI0818 13:44:37.208441 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.208447 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.208451 22726 net.cpp:165] Memory required for data: 1211905500\nI0818 13:44:37.208456 22726 layer_factory.hpp:77] Creating layer resblk32_8\nI0818 13:44:37.208467 22726 net.cpp:100] Creating Layer resblk32_8\nI0818 13:44:37.208474 22726 net.cpp:434] resblk32_8 <- sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split_0\nI0818 13:44:37.208487 22726 net.cpp:408] resblk32_8 -> resblk32_8\nI0818 13:44:37.208964 22726 net.cpp:150] Setting up resblk32_8\nI0818 13:44:37.208979 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.208984 22726 net.cpp:165] Memory required for data: 1216001500\nI0818 13:44:37.208992 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_8\nI0818 13:44:37.209004 22726 net.cpp:100] Creating Layer batchNorm_resblk32_8\nI0818 13:44:37.209010 22726 net.cpp:434] batchNorm_resblk32_8 <- resblk32_8\nI0818 13:44:37.209019 22726 net.cpp:408] batchNorm_resblk32_8 -> bn_resblk32_8\nI0818 13:44:37.209269 22726 net.cpp:150] Setting up batchNorm_resblk32_8\nI0818 13:44:37.209281 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.209285 22726 net.cpp:165] Memory required for data: 1220097500\nI0818 13:44:37.209296 22726 layer_factory.hpp:77] Creating layer scale_resblk32_8\nI0818 13:44:37.209308 22726 net.cpp:100] Creating Layer scale_resblk32_8\nI0818 13:44:37.209314 22726 net.cpp:434] scale_resblk32_8 <- bn_resblk32_8\nI0818 13:44:37.209321 22726 net.cpp:395] scale_resblk32_8 -> bn_resblk32_8 (in-place)\nI0818 13:44:37.209378 22726 layer_factory.hpp:77] Creating layer scale_resblk32_8\nI0818 13:44:37.209532 22726 net.cpp:150] Setting up scale_resblk32_8\nI0818 13:44:37.209544 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.209548 22726 net.cpp:165] Memory required for data: 1224193500\nI0818 13:44:37.209558 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_8\nI0818 13:44:37.209568 22726 net.cpp:100] Creating Layer relu_bn_resblk32_8\nI0818 13:44:37.209574 22726 net.cpp:434] relu_bn_resblk32_8 <- bn_resblk32_8\nI0818 13:44:37.209584 22726 net.cpp:395] relu_bn_resblk32_8 -> bn_resblk32_8 (in-place)\nI0818 13:44:37.209594 22726 net.cpp:150] Setting up relu_bn_resblk32_8\nI0818 13:44:37.209601 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.209612 22726 net.cpp:165] Memory required for data: 1228289500\nI0818 13:44:37.209617 22726 layer_factory.hpp:77] Creating layer resblk32_8_b\nI0818 13:44:37.209628 22726 net.cpp:100] Creating Layer resblk32_8_b\nI0818 13:44:37.209635 22726 net.cpp:434] resblk32_8_b <- bn_resblk32_8\nI0818 13:44:37.209646 22726 net.cpp:408] resblk32_8_b -> resblk32_8_b\nI0818 13:44:37.210121 22726 net.cpp:150] Setting up resblk32_8_b\nI0818 13:44:37.210135 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.210140 22726 net.cpp:165] Memory required for data: 1232385500\nI0818 13:44:37.210150 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_8_b\nI0818 13:44:37.210157 22726 net.cpp:100] Creating Layer batchNorm_resblk32_8_b\nI0818 13:44:37.210165 22726 net.cpp:434] batchNorm_resblk32_8_b <- resblk32_8_b\nI0818 13:44:37.210177 22726 net.cpp:408] batchNorm_resblk32_8_b -> bn_resblk32_8_b\nI0818 13:44:37.210431 22726 net.cpp:150] Setting up batchNorm_resblk32_8_b\nI0818 13:44:37.210444 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.210449 22726 net.cpp:165] Memory required for data: 1236481500\nI0818 13:44:37.210490 22726 layer_factory.hpp:77] Creating layer scale_resblk32_8_b\nI0818 13:44:37.210505 22726 net.cpp:100] Creating Layer scale_resblk32_8_b\nI0818 13:44:37.210512 22726 net.cpp:434] scale_resblk32_8_b <- bn_resblk32_8_b\nI0818 13:44:37.210520 22726 net.cpp:395] scale_resblk32_8_b -> bn_resblk32_8_b (in-place)\nI0818 13:44:37.210579 22726 layer_factory.hpp:77] Creating layer scale_resblk32_8_b\nI0818 13:44:37.210727 22726 net.cpp:150] Setting up scale_resblk32_8_b\nI0818 13:44:37.210739 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.210744 22726 net.cpp:165] Memory required for data: 1240577500\nI0818 13:44:37.210753 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_7_b\nI0818 13:44:37.210764 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_7_b\nI0818 13:44:37.210772 22726 net.cpp:434] sum_sum_bn_resblk32_7_b <- sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split_1\nI0818 13:44:37.210778 22726 net.cpp:434] sum_sum_bn_resblk32_7_b <- bn_resblk32_8_b\nI0818 13:44:37.210786 22726 net.cpp:408] sum_sum_bn_resblk32_7_b -> sum_bn_resblk32_8_b\nI0818 13:44:37.210824 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_7_b\nI0818 13:44:37.210837 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.210841 22726 net.cpp:165] Memory required for data: 1244673500\nI0818 13:44:37.210847 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_8_b\nI0818 13:44:37.210855 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_8_b\nI0818 13:44:37.210860 22726 net.cpp:434] relu_sum_bn_resblk32_8_b <- sum_bn_resblk32_8_b\nI0818 13:44:37.210870 22726 net.cpp:395] relu_sum_bn_resblk32_8_b -> sum_bn_resblk32_8_b (in-place)\nI0818 13:44:37.210880 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_8_b\nI0818 13:44:37.210887 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.210891 22726 net.cpp:165] Memory required for data: 1248769500\nI0818 13:44:37.210896 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split\nI0818 13:44:37.210903 22726 net.cpp:100] Creating Layer sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split\nI0818 13:44:37.210908 22726 net.cpp:434] sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split <- sum_bn_resblk32_8_b\nI0818 13:44:37.210918 22726 net.cpp:408] sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split -> sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split_0\nI0818 13:44:37.210928 22726 net.cpp:408] sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split -> sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split_1\nI0818 13:44:37.210974 22726 net.cpp:150] Setting up sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split\nI0818 13:44:37.210985 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.210992 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.210996 22726 net.cpp:165] Memory required for data: 1256961500\nI0818 13:44:37.211002 22726 layer_factory.hpp:77] Creating layer resblk64\nI0818 13:44:37.211025 22726 net.cpp:100] Creating Layer resblk64\nI0818 13:44:37.211033 22726 net.cpp:434] resblk64 <- sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split_0\nI0818 13:44:37.211042 22726 net.cpp:408] resblk64 -> resblk64\nI0818 13:44:37.211519 22726 net.cpp:150] Setting up resblk64\nI0818 13:44:37.211532 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.211537 22726 net.cpp:165] Memory required for data: 1257985500\nI0818 13:44:37.211546 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64\nI0818 13:44:37.211555 22726 net.cpp:100] Creating Layer batchNorm_resblk64\nI0818 13:44:37.211560 22726 net.cpp:434] batchNorm_resblk64 <- resblk64\nI0818 13:44:37.211572 22726 net.cpp:408] batchNorm_resblk64 -> bn_resblk64\nI0818 13:44:37.211840 22726 net.cpp:150] Setting up batchNorm_resblk64\nI0818 13:44:37.211854 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.211859 22726 net.cpp:165] Memory required for data: 1259009500\nI0818 13:44:37.211869 22726 layer_factory.hpp:77] Creating layer scale_resblk64\nI0818 13:44:37.211879 22726 net.cpp:100] Creating Layer scale_resblk64\nI0818 13:44:37.211884 22726 net.cpp:434] scale_resblk64 <- bn_resblk64\nI0818 13:44:37.211895 22726 net.cpp:395] scale_resblk64 -> bn_resblk64 (in-place)\nI0818 13:44:37.211952 22726 layer_factory.hpp:77] Creating layer scale_resblk64\nI0818 13:44:37.212174 22726 net.cpp:150] Setting up scale_resblk64\nI0818 13:44:37.212195 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.212201 22726 net.cpp:165] Memory required for data: 1260033500\nI0818 13:44:37.212211 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64\nI0818 13:44:37.212219 22726 net.cpp:100] Creating Layer relu_bn_resblk64\nI0818 13:44:37.212226 22726 net.cpp:434] relu_bn_resblk64 <- bn_resblk64\nI0818 13:44:37.212236 22726 net.cpp:395] relu_bn_resblk64 -> bn_resblk64 (in-place)\nI0818 13:44:37.212247 22726 net.cpp:150] Setting up relu_bn_resblk64\nI0818 13:44:37.212255 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.212260 22726 net.cpp:165] Memory required for data: 1261057500\nI0818 13:44:37.212263 22726 layer_factory.hpp:77] Creating layer resblk64_b\nI0818 13:44:37.212277 22726 net.cpp:100] Creating Layer resblk64_b\nI0818 13:44:37.212283 22726 net.cpp:434] resblk64_b <- bn_resblk64\nI0818 13:44:37.212296 22726 net.cpp:408] resblk64_b -> resblk64_b\nI0818 13:44:37.212774 22726 net.cpp:150] Setting up resblk64_b\nI0818 13:44:37.212787 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.212792 22726 net.cpp:165] Memory required for data: 1262081500\nI0818 13:44:37.212801 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_b\nI0818 13:44:37.212816 22726 net.cpp:100] Creating Layer batchNorm_resblk64_b\nI0818 13:44:37.212823 22726 net.cpp:434] batchNorm_resblk64_b <- resblk64_b\nI0818 13:44:37.212832 22726 net.cpp:408] batchNorm_resblk64_b -> bn_resblk64_b\nI0818 13:44:37.213104 22726 net.cpp:150] Setting up batchNorm_resblk64_b\nI0818 13:44:37.213116 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.213121 22726 net.cpp:165] Memory required for data: 1263105500\nI0818 13:44:37.213131 22726 layer_factory.hpp:77] Creating layer scale_resblk64_b\nI0818 13:44:37.213143 22726 net.cpp:100] Creating Layer scale_resblk64_b\nI0818 13:44:37.213150 22726 net.cpp:434] scale_resblk64_b <- bn_resblk64_b\nI0818 13:44:37.213160 22726 net.cpp:395] scale_resblk64_b -> bn_resblk64_b (in-place)\nI0818 13:44:37.213215 22726 layer_factory.hpp:77] Creating layer scale_resblk64_b\nI0818 13:44:37.213372 22726 net.cpp:150] Setting up scale_resblk64_b\nI0818 13:44:37.213385 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.213390 22726 net.cpp:165] Memory required for data: 1264129500\nI0818 13:44:37.213399 22726 layer_factory.hpp:77] Creating layer avePooling_resblk64\nI0818 13:44:37.213408 22726 net.cpp:100] Creating Layer avePooling_resblk64\nI0818 13:44:37.213415 22726 net.cpp:434] avePooling_resblk64 <- sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split_1\nI0818 13:44:37.213434 22726 net.cpp:408] avePooling_resblk64 -> avgPool_resblk64\nI0818 13:44:37.213472 22726 net.cpp:150] Setting up avePooling_resblk64\nI0818 13:44:37.213482 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.213486 22726 net.cpp:165] Memory required for data: 1265153500\nI0818 13:44:37.213492 22726 layer_factory.hpp:77] Creating layer sum_avgPool_resblk64\nI0818 13:44:37.213500 22726 net.cpp:100] Creating Layer sum_avgPool_resblk64\nI0818 13:44:37.213506 22726 net.cpp:434] sum_avgPool_resblk64 <- avgPool_resblk64\nI0818 13:44:37.213513 22726 net.cpp:434] sum_avgPool_resblk64 <- bn_resblk64_b\nI0818 13:44:37.213521 22726 net.cpp:408] sum_avgPool_resblk64 -> sum_bn_resblk64_b\nI0818 13:44:37.213554 22726 net.cpp:150] Setting up sum_avgPool_resblk64\nI0818 13:44:37.213565 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.213569 22726 net.cpp:165] Memory required for data: 1266177500\nI0818 13:44:37.213574 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_b\nI0818 13:44:37.213585 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_b\nI0818 13:44:37.213593 22726 net.cpp:434] relu_sum_bn_resblk64_b <- sum_bn_resblk64_b\nI0818 13:44:37.213599 22726 net.cpp:395] relu_sum_bn_resblk64_b -> sum_bn_resblk64_b (in-place)\nI0818 13:44:37.213608 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_b\nI0818 13:44:37.213615 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.213619 22726 net.cpp:165] Memory required for data: 1267201500\nI0818 13:44:37.213624 22726 layer_factory.hpp:77] Creating layer zeros_sum_bn_resblk64_b\nI0818 13:44:37.213632 22726 net.cpp:100] Creating Layer zeros_sum_bn_resblk64_b\nI0818 13:44:37.213640 22726 net.cpp:408] zeros_sum_bn_resblk64_b -> zeros_sum_bn_resblk64_b\nI0818 13:44:37.214884 22726 net.cpp:150] Setting up zeros_sum_bn_resblk64_b\nI0818 13:44:37.214905 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.214910 22726 net.cpp:165] Memory required for data: 1268225500\nI0818 13:44:37.214916 22726 layer_factory.hpp:77] Creating layer CC_sum_bn_resblk64_b\nI0818 13:44:37.214926 22726 net.cpp:100] Creating Layer CC_sum_bn_resblk64_b\nI0818 13:44:37.214932 22726 net.cpp:434] CC_sum_bn_resblk64_b <- sum_bn_resblk64_b\nI0818 13:44:37.214939 22726 net.cpp:434] CC_sum_bn_resblk64_b <- zeros_sum_bn_resblk64_b\nI0818 13:44:37.214951 22726 net.cpp:408] CC_sum_bn_resblk64_b -> CC_sum_bn_resblk64_b\nI0818 13:44:37.214992 22726 net.cpp:150] Setting up CC_sum_bn_resblk64_b\nI0818 13:44:37.215003 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.215008 22726 net.cpp:165] Memory required for data: 1270273500\nI0818 13:44:37.215013 22726 layer_factory.hpp:77] Creating layer CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split\nI0818 13:44:37.215024 22726 net.cpp:100] Creating Layer CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split\nI0818 13:44:37.215030 22726 net.cpp:434] CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split <- CC_sum_bn_resblk64_b\nI0818 13:44:37.215037 22726 net.cpp:408] CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split -> CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split_0\nI0818 13:44:37.215050 22726 net.cpp:408] CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split -> CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split_1\nI0818 13:44:37.215101 22726 net.cpp:150] Setting up CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split\nI0818 13:44:37.215113 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.215119 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.215124 22726 net.cpp:165] Memory required for data: 1274369500\nI0818 13:44:37.215129 22726 layer_factory.hpp:77] Creating layer resblk64_1\nI0818 13:44:37.215143 22726 net.cpp:100] Creating Layer resblk64_1\nI0818 13:44:37.215149 22726 net.cpp:434] resblk64_1 <- CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split_0\nI0818 13:44:37.215162 22726 net.cpp:408] resblk64_1 -> resblk64_1\nI0818 13:44:37.217167 22726 net.cpp:150] Setting up resblk64_1\nI0818 13:44:37.217185 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.217190 22726 net.cpp:165] Memory required for data: 1276417500\nI0818 13:44:37.217206 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_1\nI0818 13:44:37.217216 22726 net.cpp:100] Creating Layer batchNorm_resblk64_1\nI0818 13:44:37.217226 22726 net.cpp:434] batchNorm_resblk64_1 <- resblk64_1\nI0818 13:44:37.217236 22726 net.cpp:408] batchNorm_resblk64_1 -> bn_resblk64_1\nI0818 13:44:37.217496 22726 net.cpp:150] Setting up batchNorm_resblk64_1\nI0818 13:44:37.217509 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.217514 22726 net.cpp:165] Memory required for data: 1278465500\nI0818 13:44:37.217525 22726 layer_factory.hpp:77] Creating layer scale_resblk64_1\nI0818 13:44:37.217535 22726 net.cpp:100] Creating Layer scale_resblk64_1\nI0818 13:44:37.217540 22726 net.cpp:434] scale_resblk64_1 <- bn_resblk64_1\nI0818 13:44:37.217551 22726 net.cpp:395] scale_resblk64_1 -> bn_resblk64_1 (in-place)\nI0818 13:44:37.217609 22726 layer_factory.hpp:77] Creating layer scale_resblk64_1\nI0818 13:44:37.217767 22726 net.cpp:150] Setting up scale_resblk64_1\nI0818 13:44:37.217779 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.217783 22726 net.cpp:165] Memory required for data: 1280513500\nI0818 13:44:37.217793 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_1\nI0818 13:44:37.217804 22726 net.cpp:100] Creating Layer relu_bn_resblk64_1\nI0818 13:44:37.217816 22726 net.cpp:434] relu_bn_resblk64_1 <- bn_resblk64_1\nI0818 13:44:37.217825 22726 net.cpp:395] relu_bn_resblk64_1 -> bn_resblk64_1 (in-place)\nI0818 13:44:37.217835 22726 net.cpp:150] Setting up relu_bn_resblk64_1\nI0818 13:44:37.217842 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.217846 22726 net.cpp:165] Memory required for data: 1282561500\nI0818 13:44:37.217851 22726 layer_factory.hpp:77] Creating layer resblk64_1_b\nI0818 13:44:37.217865 22726 net.cpp:100] Creating Layer resblk64_1_b\nI0818 13:44:37.217871 22726 net.cpp:434] resblk64_1_b <- bn_resblk64_1\nI0818 13:44:37.217883 22726 net.cpp:408] resblk64_1_b -> resblk64_1_b\nI0818 13:44:37.218909 22726 net.cpp:150] Setting up resblk64_1_b\nI0818 13:44:37.218924 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.218928 22726 net.cpp:165] Memory required for data: 1284609500\nI0818 13:44:37.218937 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_1_b\nI0818 13:44:37.218945 22726 net.cpp:100] Creating Layer batchNorm_resblk64_1_b\nI0818 13:44:37.218952 22726 net.cpp:434] batchNorm_resblk64_1_b <- resblk64_1_b\nI0818 13:44:37.218963 22726 net.cpp:408] batchNorm_resblk64_1_b -> bn_resblk64_1_b\nI0818 13:44:37.219230 22726 net.cpp:150] Setting up batchNorm_resblk64_1_b\nI0818 13:44:37.219246 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.219251 22726 net.cpp:165] Memory required for data: 1286657500\nI0818 13:44:37.219261 22726 layer_factory.hpp:77] Creating layer scale_resblk64_1_b\nI0818 13:44:37.219270 22726 net.cpp:100] Creating Layer scale_resblk64_1_b\nI0818 13:44:37.219276 22726 net.cpp:434] scale_resblk64_1_b <- bn_resblk64_1_b\nI0818 13:44:37.219285 22726 net.cpp:395] scale_resblk64_1_b -> bn_resblk64_1_b (in-place)\nI0818 13:44:37.219341 22726 layer_factory.hpp:77] Creating layer scale_resblk64_1_b\nI0818 13:44:37.219496 22726 net.cpp:150] Setting up scale_resblk64_1_b\nI0818 13:44:37.219507 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.219512 22726 net.cpp:165] Memory required for data: 1288705500\nI0818 13:44:37.219521 22726 layer_factory.hpp:77] Creating layer sum_CC_sum_bn_resblk64_b\nI0818 13:44:37.219533 22726 net.cpp:100] Creating Layer sum_CC_sum_bn_resblk64_b\nI0818 13:44:37.219539 22726 net.cpp:434] sum_CC_sum_bn_resblk64_b <- CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split_1\nI0818 13:44:37.219547 22726 net.cpp:434] sum_CC_sum_bn_resblk64_b <- bn_resblk64_1_b\nI0818 13:44:37.219555 22726 net.cpp:408] sum_CC_sum_bn_resblk64_b -> sum_bn_resblk64_1_b\nI0818 13:44:37.219593 22726 net.cpp:150] Setting up sum_CC_sum_bn_resblk64_b\nI0818 13:44:37.219604 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.219609 22726 net.cpp:165] Memory required for data: 1290753500\nI0818 13:44:37.219620 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_1_b\nI0818 13:44:37.219629 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_1_b\nI0818 13:44:37.219635 22726 net.cpp:434] relu_sum_bn_resblk64_1_b <- sum_bn_resblk64_1_b\nI0818 13:44:37.219641 22726 net.cpp:395] relu_sum_bn_resblk64_1_b -> sum_bn_resblk64_1_b (in-place)\nI0818 13:44:37.219650 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_1_b\nI0818 13:44:37.219657 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.219661 22726 net.cpp:165] Memory required for data: 1292801500\nI0818 13:44:37.219666 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split\nI0818 13:44:37.219673 22726 net.cpp:100] Creating Layer sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split\nI0818 13:44:37.219678 22726 net.cpp:434] sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split <- sum_bn_resblk64_1_b\nI0818 13:44:37.219688 22726 net.cpp:408] sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split -> sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split_0\nI0818 13:44:37.219698 22726 net.cpp:408] sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split -> sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split_1\nI0818 13:44:37.219745 22726 net.cpp:150] Setting up sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split\nI0818 13:44:37.219756 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.219763 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.219768 22726 net.cpp:165] Memory required for data: 1296897500\nI0818 13:44:37.219772 22726 layer_factory.hpp:77] Creating layer resblk64_2\nI0818 13:44:37.219789 22726 net.cpp:100] Creating Layer resblk64_2\nI0818 13:44:37.219795 22726 net.cpp:434] resblk64_2 <- sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split_0\nI0818 13:44:37.219805 22726 net.cpp:408] resblk64_2 -> resblk64_2\nI0818 13:44:37.220841 22726 net.cpp:150] Setting up resblk64_2\nI0818 13:44:37.220856 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.220861 22726 net.cpp:165] Memory required for data: 1298945500\nI0818 13:44:37.220870 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_2\nI0818 13:44:37.220878 22726 net.cpp:100] Creating Layer batchNorm_resblk64_2\nI0818 13:44:37.220885 22726 net.cpp:434] batchNorm_resblk64_2 <- resblk64_2\nI0818 13:44:37.220896 22726 net.cpp:408] batchNorm_resblk64_2 -> bn_resblk64_2\nI0818 13:44:37.221158 22726 net.cpp:150] Setting up batchNorm_resblk64_2\nI0818 13:44:37.221171 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.221176 22726 net.cpp:165] Memory required for data: 1300993500\nI0818 13:44:37.221186 22726 layer_factory.hpp:77] Creating layer scale_resblk64_2\nI0818 13:44:37.221194 22726 net.cpp:100] Creating Layer scale_resblk64_2\nI0818 13:44:37.221201 22726 net.cpp:434] scale_resblk64_2 <- bn_resblk64_2\nI0818 13:44:37.221211 22726 net.cpp:395] scale_resblk64_2 -> bn_resblk64_2 (in-place)\nI0818 13:44:37.221271 22726 layer_factory.hpp:77] Creating layer scale_resblk64_2\nI0818 13:44:37.221426 22726 net.cpp:150] Setting up scale_resblk64_2\nI0818 13:44:37.221439 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.221443 22726 net.cpp:165] Memory required for data: 1303041500\nI0818 13:44:37.221452 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_2\nI0818 13:44:37.221463 22726 net.cpp:100] Creating Layer relu_bn_resblk64_2\nI0818 13:44:37.221470 22726 net.cpp:434] relu_bn_resblk64_2 <- bn_resblk64_2\nI0818 13:44:37.221477 22726 net.cpp:395] relu_bn_resblk64_2 -> bn_resblk64_2 (in-place)\nI0818 13:44:37.221487 22726 net.cpp:150] Setting up relu_bn_resblk64_2\nI0818 13:44:37.221493 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.221498 22726 net.cpp:165] Memory required for data: 1305089500\nI0818 13:44:37.221503 22726 layer_factory.hpp:77] Creating layer resblk64_2_b\nI0818 13:44:37.221516 22726 net.cpp:100] Creating Layer resblk64_2_b\nI0818 13:44:37.221523 22726 net.cpp:434] resblk64_2_b <- bn_resblk64_2\nI0818 13:44:37.221534 22726 net.cpp:408] resblk64_2_b -> resblk64_2_b\nI0818 13:44:37.222563 22726 net.cpp:150] Setting up resblk64_2_b\nI0818 13:44:37.222578 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.222582 22726 net.cpp:165] Memory required for data: 1307137500\nI0818 13:44:37.222590 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_2_b\nI0818 13:44:37.222599 22726 net.cpp:100] Creating Layer batchNorm_resblk64_2_b\nI0818 13:44:37.222605 22726 net.cpp:434] batchNorm_resblk64_2_b <- resblk64_2_b\nI0818 13:44:37.222617 22726 net.cpp:408] batchNorm_resblk64_2_b -> bn_resblk64_2_b\nI0818 13:44:37.222892 22726 net.cpp:150] Setting up batchNorm_resblk64_2_b\nI0818 13:44:37.222908 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.222913 22726 net.cpp:165] Memory required for data: 1309185500\nI0818 13:44:37.222924 22726 layer_factory.hpp:77] Creating layer scale_resblk64_2_b\nI0818 13:44:37.222934 22726 net.cpp:100] Creating Layer scale_resblk64_2_b\nI0818 13:44:37.222939 22726 net.cpp:434] scale_resblk64_2_b <- bn_resblk64_2_b\nI0818 13:44:37.222947 22726 net.cpp:395] scale_resblk64_2_b -> bn_resblk64_2_b (in-place)\nI0818 13:44:37.223004 22726 layer_factory.hpp:77] Creating layer scale_resblk64_2_b\nI0818 13:44:37.223165 22726 net.cpp:150] Setting up scale_resblk64_2_b\nI0818 13:44:37.223177 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.223181 22726 net.cpp:165] Memory required for data: 1311233500\nI0818 13:44:37.223191 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_1_b\nI0818 13:44:37.223203 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_1_b\nI0818 13:44:37.223211 22726 net.cpp:434] sum_sum_bn_resblk64_1_b <- sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split_1\nI0818 13:44:37.223218 22726 net.cpp:434] sum_sum_bn_resblk64_1_b <- bn_resblk64_2_b\nI0818 13:44:37.223227 22726 net.cpp:408] sum_sum_bn_resblk64_1_b -> sum_bn_resblk64_2_b\nI0818 13:44:37.223263 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_1_b\nI0818 13:44:37.223274 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.223278 22726 net.cpp:165] Memory required for data: 1313281500\nI0818 13:44:37.223284 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_2_b\nI0818 13:44:37.223291 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_2_b\nI0818 13:44:37.223297 22726 net.cpp:434] relu_sum_bn_resblk64_2_b <- sum_bn_resblk64_2_b\nI0818 13:44:37.223304 22726 net.cpp:395] relu_sum_bn_resblk64_2_b -> sum_bn_resblk64_2_b (in-place)\nI0818 13:44:37.223314 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_2_b\nI0818 13:44:37.223320 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.223325 22726 net.cpp:165] Memory required for data: 1315329500\nI0818 13:44:37.223330 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split\nI0818 13:44:37.223335 22726 net.cpp:100] Creating Layer sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split\nI0818 13:44:37.223341 22726 net.cpp:434] sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split <- sum_bn_resblk64_2_b\nI0818 13:44:37.223351 22726 net.cpp:408] sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split -> sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split_0\nI0818 13:44:37.223361 22726 net.cpp:408] sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split -> sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split_1\nI0818 13:44:37.223407 22726 net.cpp:150] Setting up sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split\nI0818 13:44:37.223418 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.223425 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.223429 22726 net.cpp:165] Memory required for data: 1319425500\nI0818 13:44:37.223434 22726 layer_factory.hpp:77] Creating layer resblk64_3\nI0818 13:44:37.223448 22726 net.cpp:100] Creating Layer resblk64_3\nI0818 13:44:37.223455 22726 net.cpp:434] resblk64_3 <- sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split_0\nI0818 13:44:37.223464 22726 net.cpp:408] resblk64_3 -> resblk64_3\nI0818 13:44:37.224485 22726 net.cpp:150] Setting up resblk64_3\nI0818 13:44:37.224500 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.224511 22726 net.cpp:165] Memory required for data: 1321473500\nI0818 13:44:37.224521 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_3\nI0818 13:44:37.224529 22726 net.cpp:100] Creating Layer batchNorm_resblk64_3\nI0818 13:44:37.224536 22726 net.cpp:434] batchNorm_resblk64_3 <- resblk64_3\nI0818 13:44:37.224547 22726 net.cpp:408] batchNorm_resblk64_3 -> bn_resblk64_3\nI0818 13:44:37.224824 22726 net.cpp:150] Setting up batchNorm_resblk64_3\nI0818 13:44:37.224838 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.224843 22726 net.cpp:165] Memory required for data: 1323521500\nI0818 13:44:37.224853 22726 layer_factory.hpp:77] Creating layer scale_resblk64_3\nI0818 13:44:37.224861 22726 net.cpp:100] Creating Layer scale_resblk64_3\nI0818 13:44:37.224867 22726 net.cpp:434] scale_resblk64_3 <- bn_resblk64_3\nI0818 13:44:37.224879 22726 net.cpp:395] scale_resblk64_3 -> bn_resblk64_3 (in-place)\nI0818 13:44:37.224936 22726 layer_factory.hpp:77] Creating layer scale_resblk64_3\nI0818 13:44:37.225097 22726 net.cpp:150] Setting up scale_resblk64_3\nI0818 13:44:37.225111 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.225114 22726 net.cpp:165] Memory required for data: 1325569500\nI0818 13:44:37.225123 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_3\nI0818 13:44:37.225136 22726 net.cpp:100] Creating Layer relu_bn_resblk64_3\nI0818 13:44:37.225142 22726 net.cpp:434] relu_bn_resblk64_3 <- bn_resblk64_3\nI0818 13:44:37.225148 22726 net.cpp:395] relu_bn_resblk64_3 -> bn_resblk64_3 (in-place)\nI0818 13:44:37.225158 22726 net.cpp:150] Setting up relu_bn_resblk64_3\nI0818 13:44:37.225165 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.225169 22726 net.cpp:165] Memory required for data: 1327617500\nI0818 13:44:37.225174 22726 layer_factory.hpp:77] Creating layer resblk64_3_b\nI0818 13:44:37.225188 22726 net.cpp:100] Creating Layer resblk64_3_b\nI0818 13:44:37.225194 22726 net.cpp:434] resblk64_3_b <- bn_resblk64_3\nI0818 13:44:37.225205 22726 net.cpp:408] resblk64_3_b -> resblk64_3_b\nI0818 13:44:37.226239 22726 net.cpp:150] Setting up resblk64_3_b\nI0818 13:44:37.226254 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.226259 22726 net.cpp:165] Memory required for data: 1329665500\nI0818 13:44:37.226269 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_3_b\nI0818 13:44:37.226277 22726 net.cpp:100] Creating Layer batchNorm_resblk64_3_b\nI0818 13:44:37.226284 22726 net.cpp:434] batchNorm_resblk64_3_b <- resblk64_3_b\nI0818 13:44:37.226292 22726 net.cpp:408] batchNorm_resblk64_3_b -> bn_resblk64_3_b\nI0818 13:44:37.226564 22726 net.cpp:150] Setting up batchNorm_resblk64_3_b\nI0818 13:44:37.226577 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.226583 22726 net.cpp:165] Memory required for data: 1331713500\nI0818 13:44:37.226593 22726 layer_factory.hpp:77] Creating layer scale_resblk64_3_b\nI0818 13:44:37.226604 22726 net.cpp:100] Creating Layer scale_resblk64_3_b\nI0818 13:44:37.226611 22726 net.cpp:434] scale_resblk64_3_b <- bn_resblk64_3_b\nI0818 13:44:37.226621 22726 net.cpp:395] scale_resblk64_3_b -> bn_resblk64_3_b (in-place)\nI0818 13:44:37.226680 22726 layer_factory.hpp:77] Creating layer scale_resblk64_3_b\nI0818 13:44:37.226850 22726 net.cpp:150] Setting up scale_resblk64_3_b\nI0818 13:44:37.226863 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.226868 22726 net.cpp:165] Memory required for data: 1333761500\nI0818 13:44:37.226878 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_2_b\nI0818 13:44:37.226887 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_2_b\nI0818 13:44:37.226897 22726 net.cpp:434] sum_sum_bn_resblk64_2_b <- sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split_1\nI0818 13:44:37.226904 22726 net.cpp:434] sum_sum_bn_resblk64_2_b <- bn_resblk64_3_b\nI0818 13:44:37.226912 22726 net.cpp:408] sum_sum_bn_resblk64_2_b -> sum_bn_resblk64_3_b\nI0818 13:44:37.226948 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_2_b\nI0818 13:44:37.226956 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.226968 22726 net.cpp:165] Memory required for data: 1335809500\nI0818 13:44:37.226974 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_3_b\nI0818 13:44:37.226984 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_3_b\nI0818 13:44:37.226989 22726 net.cpp:434] relu_sum_bn_resblk64_3_b <- sum_bn_resblk64_3_b\nI0818 13:44:37.226997 22726 net.cpp:395] relu_sum_bn_resblk64_3_b -> sum_bn_resblk64_3_b (in-place)\nI0818 13:44:37.227006 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_3_b\nI0818 13:44:37.227013 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.227018 22726 net.cpp:165] Memory required for data: 1337857500\nI0818 13:44:37.227023 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split\nI0818 13:44:37.227030 22726 net.cpp:100] Creating Layer sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split\nI0818 13:44:37.227035 22726 net.cpp:434] sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split <- sum_bn_resblk64_3_b\nI0818 13:44:37.227042 22726 net.cpp:408] sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split -> sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split_0\nI0818 13:44:37.227051 22726 net.cpp:408] sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split -> sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split_1\nI0818 13:44:37.227102 22726 net.cpp:150] Setting up sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split\nI0818 13:44:37.227114 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.227120 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.227125 22726 net.cpp:165] Memory required for data: 1341953500\nI0818 13:44:37.227130 22726 layer_factory.hpp:77] Creating layer resblk64_4\nI0818 13:44:37.227144 22726 net.cpp:100] Creating Layer resblk64_4\nI0818 13:44:37.227150 22726 net.cpp:434] resblk64_4 <- sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split_0\nI0818 13:44:37.227160 22726 net.cpp:408] resblk64_4 -> resblk64_4\nI0818 13:44:37.228193 22726 net.cpp:150] Setting up resblk64_4\nI0818 13:44:37.228207 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.228211 22726 net.cpp:165] Memory required for data: 1344001500\nI0818 13:44:37.228220 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_4\nI0818 13:44:37.228235 22726 net.cpp:100] Creating Layer batchNorm_resblk64_4\nI0818 13:44:37.228241 22726 net.cpp:434] batchNorm_resblk64_4 <- resblk64_4\nI0818 13:44:37.228252 22726 net.cpp:408] batchNorm_resblk64_4 -> bn_resblk64_4\nI0818 13:44:37.229522 22726 net.cpp:150] Setting up batchNorm_resblk64_4\nI0818 13:44:37.229539 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.229544 22726 net.cpp:165] Memory required for data: 1346049500\nI0818 13:44:37.229557 22726 layer_factory.hpp:77] Creating layer scale_resblk64_4\nI0818 13:44:37.229569 22726 net.cpp:100] Creating Layer scale_resblk64_4\nI0818 13:44:37.229575 22726 net.cpp:434] scale_resblk64_4 <- bn_resblk64_4\nI0818 13:44:37.229586 22726 net.cpp:395] scale_resblk64_4 -> bn_resblk64_4 (in-place)\nI0818 13:44:37.229647 22726 layer_factory.hpp:77] Creating layer scale_resblk64_4\nI0818 13:44:37.229816 22726 net.cpp:150] Setting up scale_resblk64_4\nI0818 13:44:37.229830 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.229835 22726 net.cpp:165] Memory required for data: 1348097500\nI0818 13:44:37.229845 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_4\nI0818 13:44:37.229852 22726 net.cpp:100] Creating Layer relu_bn_resblk64_4\nI0818 13:44:37.229858 22726 net.cpp:434] relu_bn_resblk64_4 <- bn_resblk64_4\nI0818 13:44:37.229869 22726 net.cpp:395] relu_bn_resblk64_4 -> bn_resblk64_4 (in-place)\nI0818 13:44:37.229879 22726 net.cpp:150] Setting up relu_bn_resblk64_4\nI0818 13:44:37.229887 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.229890 22726 net.cpp:165] Memory required for data: 1350145500\nI0818 13:44:37.229895 22726 layer_factory.hpp:77] Creating layer resblk64_4_b\nI0818 13:44:37.229909 22726 net.cpp:100] Creating Layer resblk64_4_b\nI0818 13:44:37.229915 22726 net.cpp:434] resblk64_4_b <- bn_resblk64_4\nI0818 13:44:37.229933 22726 net.cpp:408] resblk64_4_b -> resblk64_4_b\nI0818 13:44:37.231948 22726 net.cpp:150] Setting up resblk64_4_b\nI0818 13:44:37.231966 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.231971 22726 net.cpp:165] Memory required for data: 1352193500\nI0818 13:44:37.231981 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_4_b\nI0818 13:44:37.231993 22726 net.cpp:100] Creating Layer batchNorm_resblk64_4_b\nI0818 13:44:37.232000 22726 net.cpp:434] batchNorm_resblk64_4_b <- resblk64_4_b\nI0818 13:44:37.232009 22726 net.cpp:408] batchNorm_resblk64_4_b -> bn_resblk64_4_b\nI0818 13:44:37.232271 22726 net.cpp:150] Setting up batchNorm_resblk64_4_b\nI0818 13:44:37.232285 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.232290 22726 net.cpp:165] Memory required for data: 1354241500\nI0818 13:44:37.232300 22726 layer_factory.hpp:77] Creating layer scale_resblk64_4_b\nI0818 13:44:37.232311 22726 net.cpp:100] Creating Layer scale_resblk64_4_b\nI0818 13:44:37.232317 22726 net.cpp:434] scale_resblk64_4_b <- bn_resblk64_4_b\nI0818 13:44:37.232328 22726 net.cpp:395] scale_resblk64_4_b -> bn_resblk64_4_b (in-place)\nI0818 13:44:37.232403 22726 layer_factory.hpp:77] Creating layer scale_resblk64_4_b\nI0818 13:44:37.232568 22726 net.cpp:150] Setting up scale_resblk64_4_b\nI0818 13:44:37.232581 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.232586 22726 net.cpp:165] Memory required for data: 1356289500\nI0818 13:44:37.232595 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_3_b\nI0818 13:44:37.232605 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_3_b\nI0818 13:44:37.232611 22726 net.cpp:434] sum_sum_bn_resblk64_3_b <- sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split_1\nI0818 13:44:37.232620 22726 net.cpp:434] sum_sum_bn_resblk64_3_b <- bn_resblk64_4_b\nI0818 13:44:37.232630 22726 net.cpp:408] sum_sum_bn_resblk64_3_b -> sum_bn_resblk64_4_b\nI0818 13:44:37.232666 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_3_b\nI0818 13:44:37.232676 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.232681 22726 net.cpp:165] Memory required for data: 1358337500\nI0818 13:44:37.232686 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_4_b\nI0818 13:44:37.232697 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_4_b\nI0818 13:44:37.232703 22726 net.cpp:434] relu_sum_bn_resblk64_4_b <- sum_bn_resblk64_4_b\nI0818 13:44:37.232710 22726 net.cpp:395] relu_sum_bn_resblk64_4_b -> sum_bn_resblk64_4_b (in-place)\nI0818 13:44:37.232720 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_4_b\nI0818 13:44:37.232728 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.232731 22726 net.cpp:165] Memory required for data: 1360385500\nI0818 13:44:37.232736 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split\nI0818 13:44:37.232743 22726 net.cpp:100] Creating Layer sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split\nI0818 13:44:37.232748 22726 net.cpp:434] sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split <- sum_bn_resblk64_4_b\nI0818 13:44:37.232755 22726 net.cpp:408] sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split -> sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split_0\nI0818 13:44:37.232765 22726 net.cpp:408] sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split -> sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split_1\nI0818 13:44:37.232820 22726 net.cpp:150] Setting up sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split\nI0818 13:44:37.232833 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.232839 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.232844 22726 net.cpp:165] Memory required for data: 1364481500\nI0818 13:44:37.232849 22726 layer_factory.hpp:77] Creating layer resblk64_5\nI0818 13:44:37.232863 22726 net.cpp:100] Creating Layer resblk64_5\nI0818 13:44:37.232869 22726 net.cpp:434] resblk64_5 <- sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split_0\nI0818 13:44:37.232879 22726 net.cpp:408] resblk64_5 -> resblk64_5\nI0818 13:44:37.233894 22726 net.cpp:150] Setting up resblk64_5\nI0818 13:44:37.233916 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.233922 22726 net.cpp:165] Memory required for data: 1366529500\nI0818 13:44:37.233932 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_5\nI0818 13:44:37.233943 22726 net.cpp:100] Creating Layer batchNorm_resblk64_5\nI0818 13:44:37.233949 22726 net.cpp:434] batchNorm_resblk64_5 <- resblk64_5\nI0818 13:44:37.233963 22726 net.cpp:408] batchNorm_resblk64_5 -> bn_resblk64_5\nI0818 13:44:37.234225 22726 net.cpp:150] Setting up batchNorm_resblk64_5\nI0818 13:44:37.234237 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.234242 22726 net.cpp:165] Memory required for data: 1368577500\nI0818 13:44:37.234252 22726 layer_factory.hpp:77] Creating layer scale_resblk64_5\nI0818 13:44:37.234261 22726 net.cpp:100] Creating Layer scale_resblk64_5\nI0818 13:44:37.234267 22726 net.cpp:434] scale_resblk64_5 <- bn_resblk64_5\nI0818 13:44:37.234278 22726 net.cpp:395] scale_resblk64_5 -> bn_resblk64_5 (in-place)\nI0818 13:44:37.234336 22726 layer_factory.hpp:77] Creating layer scale_resblk64_5\nI0818 13:44:37.234493 22726 net.cpp:150] Setting up scale_resblk64_5\nI0818 13:44:37.234505 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.234509 22726 net.cpp:165] Memory required for data: 1370625500\nI0818 13:44:37.234519 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_5\nI0818 13:44:37.234526 22726 net.cpp:100] Creating Layer relu_bn_resblk64_5\nI0818 13:44:37.234534 22726 net.cpp:434] relu_bn_resblk64_5 <- bn_resblk64_5\nI0818 13:44:37.234544 22726 net.cpp:395] relu_bn_resblk64_5 -> bn_resblk64_5 (in-place)\nI0818 13:44:37.234555 22726 net.cpp:150] Setting up relu_bn_resblk64_5\nI0818 13:44:37.234562 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.234566 22726 net.cpp:165] Memory required for data: 1372673500\nI0818 13:44:37.234571 22726 layer_factory.hpp:77] Creating layer resblk64_5_b\nI0818 13:44:37.234586 22726 net.cpp:100] Creating Layer resblk64_5_b\nI0818 13:44:37.234591 22726 net.cpp:434] resblk64_5_b <- bn_resblk64_5\nI0818 13:44:37.234601 22726 net.cpp:408] resblk64_5_b -> resblk64_5_b\nI0818 13:44:37.235616 22726 net.cpp:150] Setting up resblk64_5_b\nI0818 13:44:37.235631 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.235636 22726 net.cpp:165] Memory required for data: 1374721500\nI0818 13:44:37.235646 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_5_b\nI0818 13:44:37.235656 22726 net.cpp:100] Creating Layer batchNorm_resblk64_5_b\nI0818 13:44:37.235663 22726 net.cpp:434] batchNorm_resblk64_5_b <- resblk64_5_b\nI0818 13:44:37.235671 22726 net.cpp:408] batchNorm_resblk64_5_b -> bn_resblk64_5_b\nI0818 13:44:37.235942 22726 net.cpp:150] Setting up batchNorm_resblk64_5_b\nI0818 13:44:37.235956 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.235961 22726 net.cpp:165] Memory required for data: 1376769500\nI0818 13:44:37.235971 22726 layer_factory.hpp:77] Creating layer scale_resblk64_5_b\nI0818 13:44:37.235985 22726 net.cpp:100] Creating Layer scale_resblk64_5_b\nI0818 13:44:37.235992 22726 net.cpp:434] scale_resblk64_5_b <- bn_resblk64_5_b\nI0818 13:44:37.235999 22726 net.cpp:395] scale_resblk64_5_b -> bn_resblk64_5_b (in-place)\nI0818 13:44:37.236060 22726 layer_factory.hpp:77] Creating layer scale_resblk64_5_b\nI0818 13:44:37.236219 22726 net.cpp:150] Setting up scale_resblk64_5_b\nI0818 13:44:37.236232 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.236238 22726 net.cpp:165] Memory required for data: 1378817500\nI0818 13:44:37.236245 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_4_b\nI0818 13:44:37.236254 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_4_b\nI0818 13:44:37.236261 22726 net.cpp:434] sum_sum_bn_resblk64_4_b <- sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split_1\nI0818 13:44:37.236268 22726 net.cpp:434] sum_sum_bn_resblk64_4_b <- bn_resblk64_5_b\nI0818 13:44:37.236279 22726 net.cpp:408] sum_sum_bn_resblk64_4_b -> sum_bn_resblk64_5_b\nI0818 13:44:37.236313 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_4_b\nI0818 13:44:37.236330 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.236333 22726 net.cpp:165] Memory required for data: 1380865500\nI0818 13:44:37.236340 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_5_b\nI0818 13:44:37.236351 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_5_b\nI0818 13:44:37.236356 22726 net.cpp:434] relu_sum_bn_resblk64_5_b <- sum_bn_resblk64_5_b\nI0818 13:44:37.236363 22726 net.cpp:395] relu_sum_bn_resblk64_5_b -> sum_bn_resblk64_5_b (in-place)\nI0818 13:44:37.236373 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_5_b\nI0818 13:44:37.236380 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.236384 22726 net.cpp:165] Memory required for data: 1382913500\nI0818 13:44:37.236389 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split\nI0818 13:44:37.236397 22726 net.cpp:100] Creating Layer sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split\nI0818 13:44:37.236402 22726 net.cpp:434] sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split <- sum_bn_resblk64_5_b\nI0818 13:44:37.236409 22726 net.cpp:408] sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split -> sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split_0\nI0818 13:44:37.236419 22726 net.cpp:408] sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split -> sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split_1\nI0818 13:44:37.236469 22726 net.cpp:150] Setting up sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split\nI0818 13:44:37.236479 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.236486 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.236490 22726 net.cpp:165] Memory required for data: 1387009500\nI0818 13:44:37.236495 22726 layer_factory.hpp:77] Creating layer resblk64_6\nI0818 13:44:37.236510 22726 net.cpp:100] Creating Layer resblk64_6\nI0818 13:44:37.236516 22726 net.cpp:434] resblk64_6 <- sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split_0\nI0818 13:44:37.236526 22726 net.cpp:408] resblk64_6 -> resblk64_6\nI0818 13:44:37.237540 22726 net.cpp:150] Setting up resblk64_6\nI0818 13:44:37.237553 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.237558 22726 net.cpp:165] Memory required for data: 1389057500\nI0818 13:44:37.237567 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_6\nI0818 13:44:37.237578 22726 net.cpp:100] Creating Layer batchNorm_resblk64_6\nI0818 13:44:37.237586 22726 net.cpp:434] batchNorm_resblk64_6 <- resblk64_6\nI0818 13:44:37.237596 22726 net.cpp:408] batchNorm_resblk64_6 -> bn_resblk64_6\nI0818 13:44:37.237866 22726 net.cpp:150] Setting up batchNorm_resblk64_6\nI0818 13:44:37.237879 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.237884 22726 net.cpp:165] Memory required for data: 1391105500\nI0818 13:44:37.237895 22726 layer_factory.hpp:77] Creating layer scale_resblk64_6\nI0818 13:44:37.237903 22726 net.cpp:100] Creating Layer scale_resblk64_6\nI0818 13:44:37.237910 22726 net.cpp:434] scale_resblk64_6 <- bn_resblk64_6\nI0818 13:44:37.237920 22726 net.cpp:395] scale_resblk64_6 -> bn_resblk64_6 (in-place)\nI0818 13:44:37.237978 22726 layer_factory.hpp:77] Creating layer scale_resblk64_6\nI0818 13:44:37.238134 22726 net.cpp:150] Setting up scale_resblk64_6\nI0818 13:44:37.238147 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.238152 22726 net.cpp:165] Memory required for data: 1393153500\nI0818 13:44:37.238160 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_6\nI0818 13:44:37.238194 22726 net.cpp:100] Creating Layer relu_bn_resblk64_6\nI0818 13:44:37.238204 22726 net.cpp:434] relu_bn_resblk64_6 <- bn_resblk64_6\nI0818 13:44:37.238211 22726 net.cpp:395] relu_bn_resblk64_6 -> bn_resblk64_6 (in-place)\nI0818 13:44:37.238221 22726 net.cpp:150] Setting up relu_bn_resblk64_6\nI0818 13:44:37.238229 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.238232 22726 net.cpp:165] Memory required for data: 1395201500\nI0818 13:44:37.238239 22726 layer_factory.hpp:77] Creating layer resblk64_6_b\nI0818 13:44:37.238253 22726 net.cpp:100] Creating Layer resblk64_6_b\nI0818 13:44:37.238265 22726 net.cpp:434] resblk64_6_b <- bn_resblk64_6\nI0818 13:44:37.238276 22726 net.cpp:408] resblk64_6_b -> resblk64_6_b\nI0818 13:44:37.239306 22726 net.cpp:150] Setting up resblk64_6_b\nI0818 13:44:37.239321 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.239326 22726 net.cpp:165] Memory required for data: 1397249500\nI0818 13:44:37.239336 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_6_b\nI0818 13:44:37.239346 22726 net.cpp:100] Creating Layer batchNorm_resblk64_6_b\nI0818 13:44:37.239353 22726 net.cpp:434] batchNorm_resblk64_6_b <- resblk64_6_b\nI0818 13:44:37.239362 22726 net.cpp:408] batchNorm_resblk64_6_b -> bn_resblk64_6_b\nI0818 13:44:37.239629 22726 net.cpp:150] Setting up batchNorm_resblk64_6_b\nI0818 13:44:37.239641 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.239645 22726 net.cpp:165] Memory required for data: 1399297500\nI0818 13:44:37.239656 22726 layer_factory.hpp:77] Creating layer scale_resblk64_6_b\nI0818 13:44:37.239665 22726 net.cpp:100] Creating Layer scale_resblk64_6_b\nI0818 13:44:37.239671 22726 net.cpp:434] scale_resblk64_6_b <- bn_resblk64_6_b\nI0818 13:44:37.239678 22726 net.cpp:395] scale_resblk64_6_b -> bn_resblk64_6_b (in-place)\nI0818 13:44:37.239738 22726 layer_factory.hpp:77] Creating layer scale_resblk64_6_b\nI0818 13:44:37.239903 22726 net.cpp:150] Setting up scale_resblk64_6_b\nI0818 13:44:37.239917 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.239922 22726 net.cpp:165] Memory required for data: 1401345500\nI0818 13:44:37.239930 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_5_b\nI0818 13:44:37.239939 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_5_b\nI0818 13:44:37.239945 22726 net.cpp:434] sum_sum_bn_resblk64_5_b <- sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split_1\nI0818 13:44:37.239953 22726 net.cpp:434] sum_sum_bn_resblk64_5_b <- bn_resblk64_6_b\nI0818 13:44:37.239964 22726 net.cpp:408] sum_sum_bn_resblk64_5_b -> sum_bn_resblk64_6_b\nI0818 13:44:37.239997 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_5_b\nI0818 13:44:37.240010 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.240015 22726 net.cpp:165] Memory required for data: 1403393500\nI0818 13:44:37.240020 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_6_b\nI0818 13:44:37.240027 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_6_b\nI0818 13:44:37.240033 22726 net.cpp:434] relu_sum_bn_resblk64_6_b <- sum_bn_resblk64_6_b\nI0818 13:44:37.240039 22726 net.cpp:395] relu_sum_bn_resblk64_6_b -> sum_bn_resblk64_6_b (in-place)\nI0818 13:44:37.240049 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_6_b\nI0818 13:44:37.240056 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.240061 22726 net.cpp:165] Memory required for data: 1405441500\nI0818 13:44:37.240065 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split\nI0818 13:44:37.240077 22726 net.cpp:100] Creating Layer sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split\nI0818 13:44:37.240082 22726 net.cpp:434] sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split <- sum_bn_resblk64_6_b\nI0818 13:44:37.240089 22726 net.cpp:408] sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split -> sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split_0\nI0818 13:44:37.240099 22726 net.cpp:408] sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split -> sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split_1\nI0818 13:44:37.240149 22726 net.cpp:150] Setting up sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split\nI0818 13:44:37.240159 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.240165 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.240170 22726 net.cpp:165] Memory required for data: 1409537500\nI0818 13:44:37.240175 22726 layer_factory.hpp:77] Creating layer resblk64_7\nI0818 13:44:37.240185 22726 net.cpp:100] Creating Layer resblk64_7\nI0818 13:44:37.240192 22726 net.cpp:434] resblk64_7 <- sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split_0\nI0818 13:44:37.240205 22726 net.cpp:408] resblk64_7 -> resblk64_7\nI0818 13:44:37.241235 22726 net.cpp:150] Setting up resblk64_7\nI0818 13:44:37.241250 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.241255 22726 net.cpp:165] Memory required for data: 1411585500\nI0818 13:44:37.241263 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_7\nI0818 13:44:37.241272 22726 net.cpp:100] Creating Layer batchNorm_resblk64_7\nI0818 13:44:37.241278 22726 net.cpp:434] batchNorm_resblk64_7 <- resblk64_7\nI0818 13:44:37.241287 22726 net.cpp:408] batchNorm_resblk64_7 -> bn_resblk64_7\nI0818 13:44:37.241556 22726 net.cpp:150] Setting up batchNorm_resblk64_7\nI0818 13:44:37.241569 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.241574 22726 net.cpp:165] Memory required for data: 1413633500\nI0818 13:44:37.241583 22726 layer_factory.hpp:77] Creating layer scale_resblk64_7\nI0818 13:44:37.241596 22726 net.cpp:100] Creating Layer scale_resblk64_7\nI0818 13:44:37.241603 22726 net.cpp:434] scale_resblk64_7 <- bn_resblk64_7\nI0818 13:44:37.241613 22726 net.cpp:395] scale_resblk64_7 -> bn_resblk64_7 (in-place)\nI0818 13:44:37.241674 22726 layer_factory.hpp:77] Creating layer scale_resblk64_7\nI0818 13:44:37.241844 22726 net.cpp:150] Setting up scale_resblk64_7\nI0818 13:44:37.241858 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.241863 22726 net.cpp:165] Memory required for data: 1415681500\nI0818 13:44:37.241873 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_7\nI0818 13:44:37.241880 22726 net.cpp:100] Creating Layer relu_bn_resblk64_7\nI0818 13:44:37.241886 22726 net.cpp:434] relu_bn_resblk64_7 <- bn_resblk64_7\nI0818 13:44:37.241896 22726 net.cpp:395] relu_bn_resblk64_7 -> bn_resblk64_7 (in-place)\nI0818 13:44:37.241906 22726 net.cpp:150] Setting up relu_bn_resblk64_7\nI0818 13:44:37.241914 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.241917 22726 net.cpp:165] Memory required for data: 1417729500\nI0818 13:44:37.241922 22726 layer_factory.hpp:77] Creating layer resblk64_7_b\nI0818 13:44:37.241935 22726 net.cpp:100] Creating Layer resblk64_7_b\nI0818 13:44:37.241942 22726 net.cpp:434] resblk64_7_b <- bn_resblk64_7\nI0818 13:44:37.241951 22726 net.cpp:408] resblk64_7_b -> resblk64_7_b\nI0818 13:44:37.242965 22726 net.cpp:150] Setting up resblk64_7_b\nI0818 13:44:37.242980 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.242985 22726 net.cpp:165] Memory required for data: 1419777500\nI0818 13:44:37.242995 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_7_b\nI0818 13:44:37.243006 22726 net.cpp:100] Creating Layer batchNorm_resblk64_7_b\nI0818 13:44:37.243013 22726 net.cpp:434] batchNorm_resblk64_7_b <- resblk64_7_b\nI0818 13:44:37.243022 22726 net.cpp:408] batchNorm_resblk64_7_b -> bn_resblk64_7_b\nI0818 13:44:37.243286 22726 net.cpp:150] Setting up batchNorm_resblk64_7_b\nI0818 13:44:37.243299 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.243304 22726 net.cpp:165] Memory required for data: 1421825500\nI0818 13:44:37.243314 22726 layer_factory.hpp:77] Creating layer scale_resblk64_7_b\nI0818 13:44:37.243322 22726 net.cpp:100] Creating Layer scale_resblk64_7_b\nI0818 13:44:37.243330 22726 net.cpp:434] scale_resblk64_7_b <- bn_resblk64_7_b\nI0818 13:44:37.243336 22726 net.cpp:395] scale_resblk64_7_b -> bn_resblk64_7_b (in-place)\nI0818 13:44:37.243396 22726 layer_factory.hpp:77] Creating layer scale_resblk64_7_b\nI0818 13:44:37.243556 22726 net.cpp:150] Setting up scale_resblk64_7_b\nI0818 13:44:37.243567 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.243572 22726 net.cpp:165] Memory required for data: 1423873500\nI0818 13:44:37.243582 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_6_b\nI0818 13:44:37.243590 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_6_b\nI0818 13:44:37.243597 22726 net.cpp:434] sum_sum_bn_resblk64_6_b <- sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split_1\nI0818 13:44:37.243604 22726 net.cpp:434] sum_sum_bn_resblk64_6_b <- bn_resblk64_7_b\nI0818 13:44:37.243615 22726 net.cpp:408] sum_sum_bn_resblk64_6_b -> sum_bn_resblk64_7_b\nI0818 13:44:37.243649 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_6_b\nI0818 13:44:37.243667 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.243672 22726 net.cpp:165] Memory required for data: 1425921500\nI0818 13:44:37.243677 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_7_b\nI0818 13:44:37.243685 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_7_b\nI0818 13:44:37.243691 22726 net.cpp:434] relu_sum_bn_resblk64_7_b <- sum_bn_resblk64_7_b\nI0818 13:44:37.243698 22726 net.cpp:395] relu_sum_bn_resblk64_7_b -> sum_bn_resblk64_7_b (in-place)\nI0818 13:44:37.243707 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_7_b\nI0818 13:44:37.243715 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.243718 22726 net.cpp:165] Memory required for data: 1427969500\nI0818 13:44:37.243722 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split\nI0818 13:44:37.243732 22726 net.cpp:100] Creating Layer sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split\nI0818 13:44:37.243738 22726 net.cpp:434] sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split <- sum_bn_resblk64_7_b\nI0818 13:44:37.243746 22726 net.cpp:408] sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split -> sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split_0\nI0818 13:44:37.243755 22726 net.cpp:408] sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split -> sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split_1\nI0818 13:44:37.243801 22726 net.cpp:150] Setting up sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split\nI0818 13:44:37.243822 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.243829 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.243834 22726 net.cpp:165] Memory required for data: 1432065500\nI0818 13:44:37.243839 22726 layer_factory.hpp:77] Creating layer resblk64_8\nI0818 13:44:37.243849 22726 net.cpp:100] Creating Layer resblk64_8\nI0818 13:44:37.243855 22726 net.cpp:434] resblk64_8 <- sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split_0\nI0818 13:44:37.243865 22726 net.cpp:408] resblk64_8 -> resblk64_8\nI0818 13:44:37.245904 22726 net.cpp:150] Setting up resblk64_8\nI0818 13:44:37.245923 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.245928 22726 net.cpp:165] Memory required for data: 1434113500\nI0818 13:44:37.245936 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_8\nI0818 13:44:37.245949 22726 net.cpp:100] Creating Layer batchNorm_resblk64_8\nI0818 13:44:37.245955 22726 net.cpp:434] batchNorm_resblk64_8 <- resblk64_8\nI0818 13:44:37.245967 22726 net.cpp:408] batchNorm_resblk64_8 -> bn_resblk64_8\nI0818 13:44:37.246234 22726 net.cpp:150] Setting up batchNorm_resblk64_8\nI0818 13:44:37.246248 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.246251 22726 net.cpp:165] Memory required for data: 1436161500\nI0818 13:44:37.246263 22726 layer_factory.hpp:77] Creating layer scale_resblk64_8\nI0818 13:44:37.246271 22726 net.cpp:100] Creating Layer scale_resblk64_8\nI0818 13:44:37.246278 22726 net.cpp:434] scale_resblk64_8 <- bn_resblk64_8\nI0818 13:44:37.246289 22726 net.cpp:395] scale_resblk64_8 -> bn_resblk64_8 (in-place)\nI0818 13:44:37.246348 22726 layer_factory.hpp:77] Creating layer scale_resblk64_8\nI0818 13:44:37.246512 22726 net.cpp:150] Setting up scale_resblk64_8\nI0818 13:44:37.246526 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.246531 22726 net.cpp:165] Memory required for data: 1438209500\nI0818 13:44:37.246539 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_8\nI0818 13:44:37.246547 22726 net.cpp:100] Creating Layer relu_bn_resblk64_8\nI0818 13:44:37.246553 22726 net.cpp:434] relu_bn_resblk64_8 <- bn_resblk64_8\nI0818 13:44:37.246563 22726 net.cpp:395] relu_bn_resblk64_8 -> bn_resblk64_8 (in-place)\nI0818 13:44:37.246573 22726 net.cpp:150] Setting up relu_bn_resblk64_8\nI0818 13:44:37.246580 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.246584 22726 net.cpp:165] Memory required for data: 1440257500\nI0818 13:44:37.246589 22726 layer_factory.hpp:77] Creating layer resblk64_8_b\nI0818 13:44:37.246611 22726 net.cpp:100] Creating Layer resblk64_8_b\nI0818 13:44:37.246618 22726 net.cpp:434] resblk64_8_b <- bn_resblk64_8\nI0818 13:44:37.246628 22726 net.cpp:408] resblk64_8_b -> resblk64_8_b\nI0818 13:44:37.247656 22726 net.cpp:150] Setting up resblk64_8_b\nI0818 13:44:37.247671 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.247676 22726 net.cpp:165] Memory required for data: 1442305500\nI0818 13:44:37.247685 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_8_b\nI0818 13:44:37.247696 22726 net.cpp:100] Creating Layer batchNorm_resblk64_8_b\nI0818 13:44:37.247704 22726 net.cpp:434] batchNorm_resblk64_8_b <- resblk64_8_b\nI0818 13:44:37.247712 22726 net.cpp:408] batchNorm_resblk64_8_b -> bn_resblk64_8_b\nI0818 13:44:37.247992 22726 net.cpp:150] Setting up batchNorm_resblk64_8_b\nI0818 13:44:37.248006 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.248010 22726 net.cpp:165] Memory required for data: 1444353500\nI0818 13:44:37.248021 22726 layer_factory.hpp:77] Creating layer scale_resblk64_8_b\nI0818 13:44:37.248033 22726 net.cpp:100] Creating Layer scale_resblk64_8_b\nI0818 13:44:37.248039 22726 net.cpp:434] scale_resblk64_8_b <- bn_resblk64_8_b\nI0818 13:44:37.248047 22726 net.cpp:395] scale_resblk64_8_b -> bn_resblk64_8_b (in-place)\nI0818 13:44:37.248108 22726 layer_factory.hpp:77] Creating layer scale_resblk64_8_b\nI0818 13:44:37.248266 22726 net.cpp:150] Setting up scale_resblk64_8_b\nI0818 13:44:37.248280 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.248284 22726 net.cpp:165] Memory required for data: 1446401500\nI0818 13:44:37.248293 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_7_b\nI0818 13:44:37.248304 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_7_b\nI0818 13:44:37.248311 22726 net.cpp:434] sum_sum_bn_resblk64_7_b <- sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split_1\nI0818 13:44:37.248319 22726 net.cpp:434] sum_sum_bn_resblk64_7_b <- bn_resblk64_8_b\nI0818 13:44:37.248330 22726 net.cpp:408] sum_sum_bn_resblk64_7_b -> sum_bn_resblk64_8_b\nI0818 13:44:37.248364 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_7_b\nI0818 13:44:37.248376 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.248380 22726 net.cpp:165] Memory required for data: 1448449500\nI0818 13:44:37.248386 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_8_b\nI0818 13:44:37.248399 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_8_b\nI0818 13:44:37.248404 22726 net.cpp:434] relu_sum_bn_resblk64_8_b <- sum_bn_resblk64_8_b\nI0818 13:44:37.248411 22726 net.cpp:395] relu_sum_bn_resblk64_8_b -> sum_bn_resblk64_8_b (in-place)\nI0818 13:44:37.248421 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_8_b\nI0818 13:44:37.248428 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.248432 22726 net.cpp:165] Memory required for data: 1450497500\nI0818 13:44:37.248437 22726 layer_factory.hpp:77] Creating layer avePooling_resblk64_8\nI0818 13:44:37.248445 22726 net.cpp:100] Creating Layer avePooling_resblk64_8\nI0818 13:44:37.248451 22726 net.cpp:434] avePooling_resblk64_8 <- sum_bn_resblk64_8_b\nI0818 13:44:37.248459 22726 net.cpp:408] avePooling_resblk64_8 -> avgPool_resblk64_8\nI0818 13:44:37.248502 22726 net.cpp:150] Setting up avePooling_resblk64_8\nI0818 13:44:37.248513 22726 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0818 13:44:37.248517 22726 net.cpp:165] Memory required for data: 1450529500\nI0818 13:44:37.248522 22726 layer_factory.hpp:77] Creating layer FC_final\nI0818 13:44:37.248605 22726 net.cpp:100] Creating Layer FC_final\nI0818 13:44:37.248618 22726 net.cpp:434] FC_final <- avgPool_resblk64_8\nI0818 13:44:37.248627 22726 net.cpp:408] FC_final -> FC_final\nI0818 13:44:37.248884 22726 net.cpp:150] Setting up FC_final\nI0818 13:44:37.248900 22726 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:37.248905 22726 net.cpp:165] Memory required for data: 1450534500\nI0818 13:44:37.248914 22726 layer_factory.hpp:77] Creating layer FC_final_FC_final_0_split\nI0818 13:44:37.248924 22726 net.cpp:100] Creating Layer FC_final_FC_final_0_split\nI0818 13:44:37.248936 22726 net.cpp:434] FC_final_FC_final_0_split <- FC_final\nI0818 13:44:37.248947 22726 net.cpp:408] FC_final_FC_final_0_split -> FC_final_FC_final_0_split_0\nI0818 13:44:37.248958 22726 net.cpp:408] FC_final_FC_final_0_split -> FC_final_FC_final_0_split_1\nI0818 13:44:37.249007 22726 net.cpp:150] Setting up FC_final_FC_final_0_split\nI0818 13:44:37.249019 22726 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:37.249025 22726 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:37.249030 22726 net.cpp:165] Memory required for data: 1450544500\nI0818 13:44:37.249035 22726 layer_factory.hpp:77] Creating layer accuracy\nI0818 13:44:37.249081 22726 net.cpp:100] Creating Layer accuracy\nI0818 13:44:37.249094 22726 net.cpp:434] accuracy <- FC_final_FC_final_0_split_0\nI0818 13:44:37.249101 22726 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 13:44:37.249109 22726 net.cpp:408] accuracy -> accuracy\nI0818 13:44:37.249152 22726 net.cpp:150] Setting up accuracy\nI0818 13:44:37.249166 22726 net.cpp:157] Top shape: (1)\nI0818 13:44:37.249171 22726 net.cpp:165] Memory required for data: 1450544504\nI0818 13:44:37.249176 22726 layer_factory.hpp:77] Creating layer loss\nI0818 13:44:37.249186 22726 net.cpp:100] Creating Layer loss\nI0818 13:44:37.249191 22726 net.cpp:434] loss <- FC_final_FC_final_0_split_1\nI0818 13:44:37.249197 22726 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 13:44:37.249205 22726 net.cpp:408] loss -> loss\nI0818 13:44:37.249248 22726 layer_factory.hpp:77] Creating layer loss\nI0818 13:44:37.249413 22726 net.cpp:150] Setting up loss\nI0818 13:44:37.249428 22726 net.cpp:157] Top shape: (1)\nI0818 13:44:37.249433 22726 net.cpp:160]     with loss weight 1\nI0818 13:44:37.249508 22726 net.cpp:165] Memory required for data: 1450544508\nI0818 13:44:37.249517 22726 net.cpp:226] loss needs backward computation.\nI0818 13:44:37.249523 22726 net.cpp:228] accuracy does not need backward computation.\nI0818 13:44:37.249529 22726 net.cpp:226] FC_final_FC_final_0_split needs backward computation.\nI0818 13:44:37.249534 22726 net.cpp:226] FC_final needs backward computation.\nI0818 13:44:37.249539 22726 net.cpp:226] avePooling_resblk64_8 needs backward computation.\nI0818 13:44:37.249544 22726 net.cpp:226] relu_sum_bn_resblk64_8_b needs backward computation.\nI0818 13:44:37.249549 22726 net.cpp:226] sum_sum_bn_resblk64_7_b needs backward computation.\nI0818 13:44:37.249554 22726 net.cpp:226] scale_resblk64_8_b needs backward computation.\nI0818 13:44:37.249559 22726 net.cpp:226] batchNorm_resblk64_8_b needs backward computation.\nI0818 13:44:37.249563 22726 net.cpp:226] resblk64_8_b needs backward computation.\nI0818 13:44:37.249568 22726 net.cpp:226] relu_bn_resblk64_8 needs backward computation.\nI0818 13:44:37.249573 22726 net.cpp:226] scale_resblk64_8 needs backward computation.\nI0818 13:44:37.249578 22726 net.cpp:226] batchNorm_resblk64_8 needs backward computation.\nI0818 13:44:37.249583 22726 net.cpp:226] resblk64_8 needs backward computation.\nI0818 13:44:37.249588 22726 net.cpp:226] sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split needs backward computation.\nI0818 13:44:37.249593 22726 net.cpp:226] relu_sum_bn_resblk64_7_b needs backward computation.\nI0818 13:44:37.249598 22726 net.cpp:226] sum_sum_bn_resblk64_6_b needs backward computation.\nI0818 13:44:37.249603 22726 net.cpp:226] scale_resblk64_7_b needs backward computation.\nI0818 13:44:37.249608 22726 net.cpp:226] batchNorm_resblk64_7_b needs backward computation.\nI0818 13:44:37.249613 22726 net.cpp:226] resblk64_7_b needs backward computation.\nI0818 13:44:37.249617 22726 net.cpp:226] relu_bn_resblk64_7 needs backward computation.\nI0818 13:44:37.249622 22726 net.cpp:226] scale_resblk64_7 needs backward computation.\nI0818 13:44:37.249627 22726 net.cpp:226] batchNorm_resblk64_7 needs backward computation.\nI0818 13:44:37.249632 22726 net.cpp:226] resblk64_7 needs backward computation.\nI0818 13:44:37.249637 22726 net.cpp:226] sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split needs backward computation.\nI0818 13:44:37.249642 22726 net.cpp:226] relu_sum_bn_resblk64_6_b needs backward computation.\nI0818 13:44:37.249655 22726 net.cpp:226] sum_sum_bn_resblk64_5_b needs backward computation.\nI0818 13:44:37.249660 22726 net.cpp:226] scale_resblk64_6_b needs backward computation.\nI0818 13:44:37.249665 22726 net.cpp:226] batchNorm_resblk64_6_b needs backward computation.\nI0818 13:44:37.249670 22726 net.cpp:226] resblk64_6_b needs backward computation.\nI0818 13:44:37.249675 22726 net.cpp:226] relu_bn_resblk64_6 needs backward computation.\nI0818 13:44:37.249680 22726 net.cpp:226] scale_resblk64_6 needs backward computation.\nI0818 13:44:37.249686 22726 net.cpp:226] batchNorm_resblk64_6 needs backward computation.\nI0818 13:44:37.249691 22726 net.cpp:226] resblk64_6 needs backward computation.\nI0818 13:44:37.249696 22726 net.cpp:226] sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split needs backward computation.\nI0818 13:44:37.249701 22726 net.cpp:226] relu_sum_bn_resblk64_5_b needs backward computation.\nI0818 13:44:37.249706 22726 net.cpp:226] sum_sum_bn_resblk64_4_b needs backward computation.\nI0818 13:44:37.249711 22726 net.cpp:226] scale_resblk64_5_b needs backward computation.\nI0818 13:44:37.249716 22726 net.cpp:226] batchNorm_resblk64_5_b needs backward computation.\nI0818 13:44:37.249722 22726 net.cpp:226] resblk64_5_b needs backward computation.\nI0818 13:44:37.249727 22726 net.cpp:226] relu_bn_resblk64_5 needs backward computation.\nI0818 13:44:37.249732 22726 net.cpp:226] scale_resblk64_5 needs backward computation.\nI0818 13:44:37.249737 22726 net.cpp:226] batchNorm_resblk64_5 needs backward computation.\nI0818 13:44:37.249742 22726 net.cpp:226] resblk64_5 needs backward computation.\nI0818 13:44:37.249747 22726 net.cpp:226] sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split needs backward computation.\nI0818 13:44:37.249752 22726 net.cpp:226] relu_sum_bn_resblk64_4_b needs backward computation.\nI0818 13:44:37.249756 22726 net.cpp:226] sum_sum_bn_resblk64_3_b needs backward computation.\nI0818 13:44:37.249763 22726 net.cpp:226] scale_resblk64_4_b needs backward computation.\nI0818 13:44:37.249768 22726 net.cpp:226] batchNorm_resblk64_4_b needs backward computation.\nI0818 13:44:37.249773 22726 net.cpp:226] resblk64_4_b needs backward computation.\nI0818 13:44:37.249781 22726 net.cpp:226] relu_bn_resblk64_4 needs backward computation.\nI0818 13:44:37.249786 22726 net.cpp:226] scale_resblk64_4 needs backward computation.\nI0818 13:44:37.249791 22726 net.cpp:226] batchNorm_resblk64_4 needs backward computation.\nI0818 13:44:37.249796 22726 net.cpp:226] resblk64_4 needs backward computation.\nI0818 13:44:37.249802 22726 net.cpp:226] sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split needs backward computation.\nI0818 13:44:37.249814 22726 net.cpp:226] relu_sum_bn_resblk64_3_b needs backward computation.\nI0818 13:44:37.249820 22726 net.cpp:226] sum_sum_bn_resblk64_2_b needs backward computation.\nI0818 13:44:37.249826 22726 net.cpp:226] scale_resblk64_3_b needs backward computation.\nI0818 13:44:37.249831 22726 net.cpp:226] batchNorm_resblk64_3_b needs backward computation.\nI0818 13:44:37.249836 22726 net.cpp:226] resblk64_3_b needs backward computation.\nI0818 13:44:37.249841 22726 net.cpp:226] relu_bn_resblk64_3 needs backward computation.\nI0818 13:44:37.249846 22726 net.cpp:226] scale_resblk64_3 needs backward computation.\nI0818 13:44:37.249851 22726 net.cpp:226] batchNorm_resblk64_3 needs backward computation.\nI0818 13:44:37.249856 22726 net.cpp:226] resblk64_3 needs backward computation.\nI0818 13:44:37.249862 22726 net.cpp:226] sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split needs backward computation.\nI0818 13:44:37.249867 22726 net.cpp:226] relu_sum_bn_resblk64_2_b needs backward computation.\nI0818 13:44:37.249872 22726 net.cpp:226] sum_sum_bn_resblk64_1_b needs backward computation.\nI0818 13:44:37.249878 22726 net.cpp:226] scale_resblk64_2_b needs backward computation.\nI0818 13:44:37.249883 22726 net.cpp:226] batchNorm_resblk64_2_b needs backward computation.\nI0818 13:44:37.249888 22726 net.cpp:226] resblk64_2_b needs backward computation.\nI0818 13:44:37.249893 22726 net.cpp:226] relu_bn_resblk64_2 needs backward computation.\nI0818 13:44:37.249909 22726 net.cpp:226] scale_resblk64_2 needs backward computation.\nI0818 13:44:37.249914 22726 net.cpp:226] batchNorm_resblk64_2 needs backward computation.\nI0818 13:44:37.249920 22726 net.cpp:226] resblk64_2 needs backward computation.\nI0818 13:44:37.249925 22726 net.cpp:226] sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split needs backward computation.\nI0818 13:44:37.249932 22726 net.cpp:226] relu_sum_bn_resblk64_1_b needs backward computation.\nI0818 13:44:37.249936 22726 net.cpp:226] sum_CC_sum_bn_resblk64_b needs backward computation.\nI0818 13:44:37.249941 22726 net.cpp:226] scale_resblk64_1_b needs backward computation.\nI0818 13:44:37.249946 22726 net.cpp:226] batchNorm_resblk64_1_b needs backward computation.\nI0818 13:44:37.249951 22726 net.cpp:226] resblk64_1_b needs backward computation.\nI0818 13:44:37.249958 22726 net.cpp:226] relu_bn_resblk64_1 needs backward computation.\nI0818 13:44:37.249963 22726 net.cpp:226] scale_resblk64_1 needs backward computation.\nI0818 13:44:37.249967 22726 net.cpp:226] batchNorm_resblk64_1 needs backward computation.\nI0818 13:44:37.249972 22726 net.cpp:226] resblk64_1 needs backward computation.\nI0818 13:44:37.249977 22726 net.cpp:226] CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split needs backward computation.\nI0818 13:44:37.249982 22726 net.cpp:226] CC_sum_bn_resblk64_b needs backward computation.\nI0818 13:44:37.249989 22726 net.cpp:228] zeros_sum_bn_resblk64_b does not need backward computation.\nI0818 13:44:37.249994 22726 net.cpp:226] relu_sum_bn_resblk64_b needs backward computation.\nI0818 13:44:37.249999 22726 net.cpp:226] sum_avgPool_resblk64 needs backward computation.\nI0818 13:44:37.250005 22726 net.cpp:226] avePooling_resblk64 needs backward computation.\nI0818 13:44:37.250010 22726 net.cpp:226] scale_resblk64_b needs backward computation.\nI0818 13:44:37.250015 22726 net.cpp:226] batchNorm_resblk64_b needs backward computation.\nI0818 13:44:37.250020 22726 net.cpp:226] resblk64_b needs backward computation.\nI0818 13:44:37.250025 22726 net.cpp:226] relu_bn_resblk64 needs backward computation.\nI0818 13:44:37.250030 22726 net.cpp:226] scale_resblk64 needs backward computation.\nI0818 13:44:37.250036 22726 net.cpp:226] batchNorm_resblk64 needs backward computation.\nI0818 13:44:37.250041 22726 net.cpp:226] resblk64 needs backward computation.\nI0818 13:44:37.250046 22726 net.cpp:226] sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split needs backward computation.\nI0818 13:44:37.250051 22726 net.cpp:226] relu_sum_bn_resblk32_8_b needs backward computation.\nI0818 13:44:37.250056 22726 net.cpp:226] sum_sum_bn_resblk32_7_b needs backward computation.\nI0818 13:44:37.250062 22726 net.cpp:226] scale_resblk32_8_b needs backward computation.\nI0818 13:44:37.250072 22726 net.cpp:226] batchNorm_resblk32_8_b needs backward computation.\nI0818 13:44:37.250077 22726 net.cpp:226] resblk32_8_b needs backward computation.\nI0818 13:44:37.250082 22726 net.cpp:226] relu_bn_resblk32_8 needs backward computation.\nI0818 13:44:37.250087 22726 net.cpp:226] scale_resblk32_8 needs backward computation.\nI0818 13:44:37.250092 22726 net.cpp:226] batchNorm_resblk32_8 needs backward computation.\nI0818 13:44:37.250098 22726 net.cpp:226] resblk32_8 needs backward computation.\nI0818 13:44:37.250103 22726 net.cpp:226] sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split needs backward computation.\nI0818 13:44:37.250108 22726 net.cpp:226] relu_sum_bn_resblk32_7_b needs backward computation.\nI0818 13:44:37.250114 22726 net.cpp:226] sum_sum_bn_resblk32_6_b needs backward computation.\nI0818 13:44:37.250120 22726 net.cpp:226] scale_resblk32_7_b needs backward computation.\nI0818 13:44:37.250125 22726 net.cpp:226] batchNorm_resblk32_7_b needs backward computation.\nI0818 13:44:37.250130 22726 net.cpp:226] resblk32_7_b needs backward computation.\nI0818 13:44:37.250136 22726 net.cpp:226] relu_bn_resblk32_7 needs backward computation.\nI0818 13:44:37.250141 22726 net.cpp:226] scale_resblk32_7 needs backward computation.\nI0818 13:44:37.250146 22726 net.cpp:226] batchNorm_resblk32_7 needs backward computation.\nI0818 13:44:37.250157 22726 net.cpp:226] resblk32_7 needs backward computation.\nI0818 13:44:37.250164 22726 net.cpp:226] sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split needs backward computation.\nI0818 13:44:37.250169 22726 net.cpp:226] relu_sum_bn_resblk32_6_b needs backward computation.\nI0818 13:44:37.250174 22726 net.cpp:226] sum_sum_bn_resblk32_5_b needs backward computation.\nI0818 13:44:37.250180 22726 net.cpp:226] scale_resblk32_6_b needs backward computation.\nI0818 13:44:37.250185 22726 net.cpp:226] batchNorm_resblk32_6_b needs backward computation.\nI0818 13:44:37.250190 22726 net.cpp:226] resblk32_6_b needs backward computation.\nI0818 13:44:37.250196 22726 net.cpp:226] relu_bn_resblk32_6 needs backward computation.\nI0818 13:44:37.250201 22726 net.cpp:226] scale_resblk32_6 needs backward computation.\nI0818 13:44:37.250206 22726 net.cpp:226] batchNorm_resblk32_6 needs backward computation.\nI0818 13:44:37.250211 22726 net.cpp:226] resblk32_6 needs backward computation.\nI0818 13:44:37.250217 22726 net.cpp:226] sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split needs backward computation.\nI0818 13:44:37.250223 22726 net.cpp:226] relu_sum_bn_resblk32_5_b needs backward computation.\nI0818 13:44:37.250228 22726 net.cpp:226] sum_sum_bn_resblk32_4_b needs backward computation.\nI0818 13:44:37.250234 22726 net.cpp:226] scale_resblk32_5_b needs backward computation.\nI0818 13:44:37.250239 22726 net.cpp:226] batchNorm_resblk32_5_b needs backward computation.\nI0818 13:44:37.250246 22726 net.cpp:226] resblk32_5_b needs backward computation.\nI0818 13:44:37.250250 22726 net.cpp:226] relu_bn_resblk32_5 needs backward computation.\nI0818 13:44:37.250255 22726 net.cpp:226] scale_resblk32_5 needs backward computation.\nI0818 13:44:37.250260 22726 net.cpp:226] batchNorm_resblk32_5 needs backward computation.\nI0818 13:44:37.250265 22726 net.cpp:226] resblk32_5 needs backward computation.\nI0818 13:44:37.250272 22726 net.cpp:226] sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split needs backward computation.\nI0818 13:44:37.250277 22726 net.cpp:226] relu_sum_bn_resblk32_4_b needs backward computation.\nI0818 13:44:37.250282 22726 net.cpp:226] sum_sum_bn_resblk32_3_b needs backward computation.\nI0818 13:44:37.250288 22726 net.cpp:226] scale_resblk32_4_b needs backward computation.\nI0818 13:44:37.250293 22726 net.cpp:226] batchNorm_resblk32_4_b needs backward computation.\nI0818 13:44:37.250298 22726 net.cpp:226] resblk32_4_b needs backward computation.\nI0818 13:44:37.250303 22726 net.cpp:226] relu_bn_resblk32_4 needs backward computation.\nI0818 13:44:37.250308 22726 net.cpp:226] scale_resblk32_4 needs backward computation.\nI0818 13:44:37.250313 22726 net.cpp:226] batchNorm_resblk32_4 needs backward computation.\nI0818 13:44:37.250319 22726 net.cpp:226] resblk32_4 needs backward computation.\nI0818 13:44:37.250324 22726 net.cpp:226] sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split needs backward computation.\nI0818 13:44:37.250330 22726 net.cpp:226] relu_sum_bn_resblk32_3_b needs backward computation.\nI0818 13:44:37.250335 22726 net.cpp:226] sum_sum_bn_resblk32_2_b needs backward computation.\nI0818 13:44:37.250341 22726 net.cpp:226] scale_resblk32_3_b needs backward computation.\nI0818 13:44:37.250346 22726 net.cpp:226] batchNorm_resblk32_3_b needs backward computation.\nI0818 13:44:37.250352 22726 net.cpp:226] resblk32_3_b needs backward computation.\nI0818 13:44:37.250357 22726 net.cpp:226] relu_bn_resblk32_3 needs backward computation.\nI0818 13:44:37.250362 22726 net.cpp:226] scale_resblk32_3 needs backward computation.\nI0818 13:44:37.250367 22726 net.cpp:226] batchNorm_resblk32_3 needs backward computation.\nI0818 13:44:37.250373 22726 net.cpp:226] resblk32_3 needs backward computation.\nI0818 13:44:37.250378 22726 net.cpp:226] sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split needs backward computation.\nI0818 13:44:37.250385 22726 net.cpp:226] relu_sum_bn_resblk32_2_b needs backward computation.\nI0818 13:44:37.250389 22726 net.cpp:226] sum_sum_bn_resblk32_1_b needs backward computation.\nI0818 13:44:37.250396 22726 net.cpp:226] scale_resblk32_2_b needs backward computation.\nI0818 13:44:37.250406 22726 net.cpp:226] batchNorm_resblk32_2_b needs backward computation.\nI0818 13:44:37.250412 22726 net.cpp:226] resblk32_2_b needs backward computation.\nI0818 13:44:37.250418 22726 net.cpp:226] relu_bn_resblk32_2 needs backward computation.\nI0818 13:44:37.250423 22726 net.cpp:226] scale_resblk32_2 needs backward computation.\nI0818 13:44:37.250428 22726 net.cpp:226] batchNorm_resblk32_2 needs backward computation.\nI0818 13:44:37.250434 22726 net.cpp:226] resblk32_2 needs backward computation.\nI0818 13:44:37.250440 22726 net.cpp:226] sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split needs backward computation.\nI0818 13:44:37.250445 22726 net.cpp:226] relu_sum_bn_resblk32_1_b needs backward computation.\nI0818 13:44:37.250452 22726 net.cpp:226] sum_CC_sum_bn_resblk32_b needs backward computation.\nI0818 13:44:37.250459 22726 net.cpp:226] scale_resblk32_1_b needs backward computation.\nI0818 13:44:37.250465 22726 net.cpp:226] batchNorm_resblk32_1_b needs backward computation.\nI0818 13:44:37.250471 22726 net.cpp:226] resblk32_1_b needs backward computation.\nI0818 13:44:37.250476 22726 net.cpp:226] relu_bn_resblk32_1 needs backward computation.\nI0818 13:44:37.250483 22726 net.cpp:226] scale_resblk32_1 needs backward computation.\nI0818 13:44:37.250488 22726 net.cpp:226] batchNorm_resblk32_1 needs backward computation.\nI0818 13:44:37.250493 22726 net.cpp:226] resblk32_1 needs backward computation.\nI0818 13:44:37.250499 22726 net.cpp:226] CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split needs backward computation.\nI0818 13:44:37.250504 22726 net.cpp:226] CC_sum_bn_resblk32_b needs backward computation.\nI0818 13:44:37.250510 22726 net.cpp:228] zeros_sum_bn_resblk32_b does not need backward computation.\nI0818 13:44:37.250515 22726 net.cpp:226] relu_sum_bn_resblk32_b needs backward computation.\nI0818 13:44:37.250520 22726 net.cpp:226] sum_avgPool_resblk32 needs backward computation.\nI0818 13:44:37.250526 22726 net.cpp:226] avePooling_resblk32 needs backward computation.\nI0818 13:44:37.250531 22726 net.cpp:226] scale_resblk32_b needs backward computation.\nI0818 13:44:37.250536 22726 net.cpp:226] batchNorm_resblk32_b needs backward computation.\nI0818 13:44:37.250542 22726 net.cpp:226] resblk32_b needs backward computation.\nI0818 13:44:37.250547 22726 net.cpp:226] relu_bn_resblk32 needs backward computation.\nI0818 13:44:37.250553 22726 net.cpp:226] scale_resblk32 needs backward computation.\nI0818 13:44:37.250558 22726 net.cpp:226] batchNorm_resblk32 needs backward computation.\nI0818 13:44:37.250563 22726 net.cpp:226] resblk32 needs backward computation.\nI0818 13:44:37.250569 22726 net.cpp:226] sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split needs backward computation.\nI0818 13:44:37.250576 22726 net.cpp:226] relu_sum_bn_Conv16_9_b needs backward computation.\nI0818 13:44:37.250581 22726 net.cpp:226] sum_sum_bn_Conv16_8_b needs backward computation.\nI0818 13:44:37.250587 22726 net.cpp:226] scale_Conv16_9_b needs backward computation.\nI0818 13:44:37.250592 22726 net.cpp:226] batchNorm_Conv16_9_b needs backward computation.\nI0818 13:44:37.250597 22726 net.cpp:226] Conv16_9_b needs backward computation.\nI0818 13:44:37.250603 22726 net.cpp:226] relu_bn_Conv16_9 needs backward computation.\nI0818 13:44:37.250608 22726 net.cpp:226] scale_Conv16_9 needs backward computation.\nI0818 13:44:37.250613 22726 net.cpp:226] batchNorm_Conv16_9 needs backward computation.\nI0818 13:44:37.250619 22726 net.cpp:226] Conv16_9 needs backward computation.\nI0818 13:44:37.250624 22726 net.cpp:226] sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split needs backward computation.\nI0818 13:44:37.250630 22726 net.cpp:226] relu_sum_bn_Conv16_8_b needs backward computation.\nI0818 13:44:37.250636 22726 net.cpp:226] sum_sum_bn_Conv16_7_b needs backward computation.\nI0818 13:44:37.250643 22726 net.cpp:226] scale_Conv16_8_b needs backward computation.\nI0818 13:44:37.250648 22726 net.cpp:226] batchNorm_Conv16_8_b needs backward computation.\nI0818 13:44:37.250653 22726 net.cpp:226] Conv16_8_b needs backward computation.\nI0818 13:44:37.250663 22726 net.cpp:226] relu_bn_Conv16_8 needs backward computation.\nI0818 13:44:37.250669 22726 net.cpp:226] scale_Conv16_8 needs backward computation.\nI0818 13:44:37.250674 22726 net.cpp:226] batchNorm_Conv16_8 needs backward computation.\nI0818 13:44:37.250679 22726 net.cpp:226] Conv16_8 needs backward computation.\nI0818 13:44:37.250685 22726 net.cpp:226] sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split needs backward computation.\nI0818 13:44:37.250690 22726 net.cpp:226] relu_sum_bn_Conv16_7_b needs backward computation.\nI0818 13:44:37.250696 22726 net.cpp:226] sum_sum_bn_Conv16_6_b needs backward computation.\nI0818 13:44:37.250702 22726 net.cpp:226] scale_Conv16_7_b needs backward computation.\nI0818 13:44:37.250708 22726 net.cpp:226] batchNorm_Conv16_7_b needs backward computation.\nI0818 13:44:37.250713 22726 net.cpp:226] Conv16_7_b needs backward computation.\nI0818 13:44:37.250720 22726 net.cpp:226] relu_bn_Conv16_7 needs backward computation.\nI0818 13:44:37.250725 22726 net.cpp:226] scale_Conv16_7 needs backward computation.\nI0818 13:44:37.250730 22726 net.cpp:226] batchNorm_Conv16_7 needs backward computation.\nI0818 13:44:37.250735 22726 net.cpp:226] Conv16_7 needs backward computation.\nI0818 13:44:37.250741 22726 net.cpp:226] sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split needs backward computation.\nI0818 13:44:37.250747 22726 net.cpp:226] relu_sum_bn_Conv16_6_b needs backward computation.\nI0818 13:44:37.250752 22726 net.cpp:226] sum_sum_bn_Conv16_5_b needs backward computation.\nI0818 13:44:37.250758 22726 net.cpp:226] scale_Conv16_6_b needs backward computation.\nI0818 13:44:37.250764 22726 net.cpp:226] batchNorm_Conv16_6_b needs backward computation.\nI0818 13:44:37.250769 22726 net.cpp:226] Conv16_6_b needs backward computation.\nI0818 13:44:37.250775 22726 net.cpp:226] relu_bn_Conv16_6 needs backward computation.\nI0818 13:44:37.250780 22726 net.cpp:226] scale_Conv16_6 needs backward computation.\nI0818 13:44:37.250785 22726 net.cpp:226] batchNorm_Conv16_6 needs backward computation.\nI0818 13:44:37.250790 22726 net.cpp:226] Conv16_6 needs backward computation.\nI0818 13:44:37.250797 22726 net.cpp:226] sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split needs backward computation.\nI0818 13:44:37.250802 22726 net.cpp:226] relu_sum_bn_Conv16_5_b needs backward computation.\nI0818 13:44:37.250813 22726 net.cpp:226] sum_sum_bn_Conv16_4_b needs backward computation.\nI0818 13:44:37.250820 22726 net.cpp:226] scale_Conv16_5_b needs backward computation.\nI0818 13:44:37.250826 22726 net.cpp:226] batchNorm_Conv16_5_b needs backward computation.\nI0818 13:44:37.250833 22726 net.cpp:226] Conv16_5_b needs backward computation.\nI0818 13:44:37.250838 22726 net.cpp:226] relu_bn_Conv16_5 needs backward computation.\nI0818 13:44:37.250844 22726 net.cpp:226] scale_Conv16_5 needs backward computation.\nI0818 13:44:37.250849 22726 net.cpp:226] batchNorm_Conv16_5 needs backward computation.\nI0818 13:44:37.250854 22726 net.cpp:226] Conv16_5 needs backward computation.\nI0818 13:44:37.250860 22726 net.cpp:226] sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split needs backward computation.\nI0818 13:44:37.250866 22726 net.cpp:226] relu_sum_bn_Conv16_4_b needs backward computation.\nI0818 13:44:37.250871 22726 net.cpp:226] sum_sum_bn_Conv16_3_b needs backward computation.\nI0818 13:44:37.250879 22726 net.cpp:226] scale_Conv16_4_b needs backward computation.\nI0818 13:44:37.250883 22726 net.cpp:226] batchNorm_Conv16_4_b needs backward computation.\nI0818 13:44:37.250888 22726 net.cpp:226] Conv16_4_b needs backward computation.\nI0818 13:44:37.250895 22726 net.cpp:226] relu_bn_Conv16_4 needs backward computation.\nI0818 13:44:37.250900 22726 net.cpp:226] scale_Conv16_4 needs backward computation.\nI0818 13:44:37.250905 22726 net.cpp:226] batchNorm_Conv16_4 needs backward computation.\nI0818 13:44:37.250910 22726 net.cpp:226] Conv16_4 needs backward computation.\nI0818 13:44:37.250916 22726 net.cpp:226] sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split needs backward computation.\nI0818 13:44:37.250922 22726 net.cpp:226] relu_sum_bn_Conv16_3_b needs backward computation.\nI0818 13:44:37.250933 22726 net.cpp:226] sum_sum_bn_Conv16_2_b needs backward computation.\nI0818 13:44:37.250939 22726 net.cpp:226] scale_Conv16_3_b needs backward computation.\nI0818 13:44:37.250946 22726 net.cpp:226] batchNorm_Conv16_3_b needs backward computation.\nI0818 13:44:37.250952 22726 net.cpp:226] Conv16_3_b needs backward computation.\nI0818 13:44:37.250957 22726 net.cpp:226] relu_bn_Conv16_3 needs backward computation.\nI0818 13:44:37.250962 22726 net.cpp:226] scale_Conv16_3 needs backward computation.\nI0818 13:44:37.250967 22726 net.cpp:226] batchNorm_Conv16_3 needs backward computation.\nI0818 13:44:37.250972 22726 net.cpp:226] Conv16_3 needs backward computation.\nI0818 13:44:37.250978 22726 net.cpp:226] sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split needs backward computation.\nI0818 13:44:37.250984 22726 net.cpp:226] relu_sum_bn_Conv16_2_b needs backward computation.\nI0818 13:44:37.250990 22726 net.cpp:226] sum_sum_bn_Conv16_1_b needs backward computation.\nI0818 13:44:37.250996 22726 net.cpp:226] scale_Conv16_2_b needs backward computation.\nI0818 13:44:37.251003 22726 net.cpp:226] batchNorm_Conv16_2_b needs backward computation.\nI0818 13:44:37.251008 22726 net.cpp:226] Conv16_2_b needs backward computation.\nI0818 13:44:37.251013 22726 net.cpp:226] relu_bn_Conv16_2 needs backward computation.\nI0818 13:44:37.251019 22726 net.cpp:226] scale_Conv16_2 needs backward computation.\nI0818 13:44:37.251024 22726 net.cpp:226] batchNorm_Conv16_2 needs backward computation.\nI0818 13:44:37.251029 22726 net.cpp:226] Conv16_2 needs backward computation.\nI0818 13:44:37.251036 22726 net.cpp:226] sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split needs backward computation.\nI0818 13:44:37.251041 22726 net.cpp:226] relu_sum_bn_Conv16_1_b needs backward computation.\nI0818 13:44:37.251047 22726 net.cpp:226] sum_bn_conv needs backward computation.\nI0818 13:44:37.251054 22726 net.cpp:226] scale_Conv16_1_b needs backward computation.\nI0818 13:44:37.251058 22726 net.cpp:226] batchNorm_Conv16_1_b needs backward computation.\nI0818 13:44:37.251065 22726 net.cpp:226] Conv16_1_b needs backward computation.\nI0818 13:44:37.251070 22726 net.cpp:226] relu_bn_Conv16_1 needs backward computation.\nI0818 13:44:37.251075 22726 net.cpp:226] scale_Conv16_1 needs backward computation.\nI0818 13:44:37.251080 22726 net.cpp:226] batchNorm_Conv16_1 needs backward computation.\nI0818 13:44:37.251085 22726 net.cpp:226] Conv16_1 needs backward computation.\nI0818 13:44:37.251091 22726 net.cpp:226] bn_conv_relu_bn_conv_0_split needs backward computation.\nI0818 13:44:37.251096 22726 net.cpp:226] relu_bn_conv needs backward computation.\nI0818 13:44:37.251101 22726 net.cpp:226] scale_conv needs backward computation.\nI0818 13:44:37.251106 22726 net.cpp:226] batchNorm_conv needs backward computation.\nI0818 13:44:37.251111 22726 net.cpp:226] conv needs backward computation.\nI0818 13:44:37.251118 22726 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 13:44:37.251128 22726 net.cpp:228] dataLayer does not need backward computation.\nI0818 13:44:37.251132 22726 net.cpp:270] This network produces output accuracy\nI0818 13:44:37.251139 22726 net.cpp:270] This network produces output loss\nI0818 13:44:37.251526 22726 net.cpp:283] Network initialization done.\nI0818 13:44:37.258756 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:37.258790 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:37.258854 22726 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0818 13:44:37.259176 22726 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0818 13:44:37.260951 22726 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: false\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"conv\"\n  type: \"Convolution\"\n  bottom: \"data\"\n  top: \"conv\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_conv\"\n  type: \"BatchNorm\"\n  bottom: \"conv\"\n  top: \"bn_conv\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_conv\"\n  type: \"Scale\"\n  bottom: \"bn_conv\"\n  top: \"bn_conv\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_conv\"\n  type: \"ReLU\"\n  bottom: \"bn_conv\"\n  top: \"bn_conv\"\n}\nlayer {\n  name: \"Conv16_1\"\n  type: \"Convolution\"\n  bottom: \"bn_conv\"\n  top: \"Conv16_1\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_1\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_1\"\n  top: \"bn_Conv16_1\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_1\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_1\"\n  top: \"bn_Conv16_1\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_1\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_1\"\n  top: \"bn_Conv16_1\"\n}\nlayer {\n  name: \"Conv16_1_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_1\"\n  top: \"Conv16_1_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_1_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_1_b\"\n  top: \"bn_Conv16_1_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_1_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_1_b\"\n  top: \"bn_Conv16_1_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_bn_conv\"\n  type: \"Eltwise\"\n  bottom: \"bn_conv\"\n  bottom: \"bn_Conv16_1_b\"\n  top: \"sum_bn_Conv16_1_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_1_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_1_b\"\n  top: \"sum_bn_Conv16_1_b\"\n}\nlayer {\n  name: \"Conv16_2\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_1_b\"\n  top: \"Conv16_2\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_2\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_2\"\n  top: \"bn_Conv16_2\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_2\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_2\"\n  top: \"bn_Conv16_2\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_2\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_2\"\n  top: \"bn_Conv16_2\"\n}\nlayer {\n  name: \"Conv16_2_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_2\"\n  top: \"Conv16_2_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_2_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_2_b\"\n  top: \"bn_Conv16_2_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_2_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_2_b\"\n  top: \"bn_Conv16_2_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_1_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_1_b\"\n  bottom: \"bn_Conv16_2_b\"\n  top: \"sum_bn_Conv16_2_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_2_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_2_b\"\n  top: \"sum_bn_Conv16_2_b\"\n}\nlayer {\n  name: \"Conv16_3\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_2_b\"\n  top: \"Conv16_3\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_3\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_3\"\n  top: \"bn_Conv16_3\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_3\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_3\"\n  top: \"bn_Conv16_3\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_3\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_3\"\n  top: \"bn_Conv16_3\"\n}\nlayer {\n  name: \"Conv16_3_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_3\"\n  top: \"Conv16_3_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_3_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_3_b\"\n  top: \"bn_Conv16_3_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_3_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_3_b\"\n  top: \"bn_Conv16_3_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_2_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_2_b\"\n  bottom: \"bn_Conv16_3_b\"\n  top: \"sum_bn_Conv16_3_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_3_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_3_b\"\n  top: \"sum_bn_Conv16_3_b\"\n}\nlayer {\n  name: \"Conv16_4\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_3_b\"\n  top: \"Conv16_4\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_4\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_4\"\n  top: \"bn_Conv16_4\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_4\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_4\"\n  top: \"bn_Conv16_4\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_4\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_4\"\n  top: \"bn_Conv16_4\"\n}\nlayer {\n  name: \"Conv16_4_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_4\"\n  top: \"Conv16_4_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_4_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_4_b\"\n  top: \"bn_Conv16_4_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_4_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_4_b\"\n  top: \"bn_Conv16_4_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_3_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_3_b\"\n  bottom: \"bn_Conv16_4_b\"\n  top: \"sum_bn_Conv16_4_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_4_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_4_b\"\n  top: \"sum_bn_Conv16_4_b\"\n}\nlayer {\n  name: \"Conv16_5\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_4_b\"\n  top: \"Conv16_5\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_5\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_5\"\n  top: \"bn_Conv16_5\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_5\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_5\"\n  top: \"bn_Conv16_5\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_5\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_5\"\n  top: \"bn_Conv16_5\"\n}\nlayer {\n  name: \"Conv16_5_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_5\"\n  top: \"Conv16_5_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_5_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_5_b\"\n  top: \"bn_Conv16_5_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_5_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_5_b\"\n  top: \"bn_Conv16_5_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_4_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_4_b\"\n  bottom: \"bn_Conv16_5_b\"\n  top: \"sum_bn_Conv16_5_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_5_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_5_b\"\n  top: \"sum_bn_Conv16_5_b\"\n}\nlayer {\n  name: \"Conv16_6\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_5_b\"\n  top: \"Conv16_6\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_6\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_6\"\n  top: \"bn_Conv16_6\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_6\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_6\"\n  top: \"bn_Conv16_6\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_6\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_6\"\n  top: \"bn_Conv16_6\"\n}\nlayer {\n  name: \"Conv16_6_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_6\"\n  top: \"Conv16_6_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_6_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_6_b\"\n  top: \"bn_Conv16_6_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_6_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_6_b\"\n  top: \"bn_Conv16_6_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_5_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_5_b\"\n  bottom: \"bn_Conv16_6_b\"\n  top: \"sum_bn_Conv16_6_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_6_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_6_b\"\n  top: \"sum_bn_Conv16_6_b\"\n}\nlayer {\n  name: \"Conv16_7\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_6_b\"\n  top: \"Conv16_7\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_7\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_7\"\n  top: \"bn_Conv16_7\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_7\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_7\"\n  top: \"bn_Conv16_7\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_7\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_7\"\n  top: \"bn_Conv16_7\"\n}\nlayer {\n  name: \"Conv16_7_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_7\"\n  top: \"Conv16_7_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_7_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_7_b\"\n  top: \"bn_Conv16_7_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_7_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_7_b\"\n  top: \"bn_Conv16_7_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_6_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_6_b\"\n  bottom: \"bn_Conv16_7_b\"\n  top: \"sum_bn_Conv16_7_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_7_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_7_b\"\n  top: \"sum_bn_Conv16_7_b\"\n}\nlayer {\n  name: \"Conv16_8\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_7_b\"\n  top: \"Conv16_8\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_8\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_8\"\n  top: \"bn_Conv16_8\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_8\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_8\"\n  top: \"bn_Conv16_8\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_8\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_8\"\n  top: \"bn_Conv16_8\"\n}\nlayer {\n  name: \"Conv16_8_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_8\"\n  top: \"Conv16_8_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_8_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_8_b\"\n  top: \"bn_Conv16_8_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_8_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_8_b\"\n  top: \"bn_Conv16_8_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_7_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_7_b\"\n  bottom: \"bn_Conv16_8_b\"\n  top: \"sum_bn_Conv16_8_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_8_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_8_b\"\n  top: \"sum_bn_Conv16_8_b\"\n}\nlayer {\n  name: \"Conv16_9\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_8_b\"\n  top: \"Conv16_9\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_9\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_9\"\n  top: \"bn_Conv16_9\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_9\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_9\"\n  top: \"bn_Conv16_9\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_Conv16_9\"\n  type: \"ReLU\"\n  bottom: \"bn_Conv16_9\"\n  top: \"bn_Conv16_9\"\n}\nlayer {\n  name: \"Conv16_9_b\"\n  type: \"Convolution\"\n  bottom: \"bn_Conv16_9\"\n  top: \"Conv16_9_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_Conv16_9_b\"\n  type: \"BatchNorm\"\n  bottom: \"Conv16_9_b\"\n  top: \"bn_Conv16_9_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_Conv16_9_b\"\n  type: \"Scale\"\n  bottom: \"bn_Conv16_9_b\"\n  top: \"bn_Conv16_9_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_Conv16_8_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_Conv16_8_b\"\n  bottom: \"bn_Conv16_9_b\"\n  top: \"sum_bn_Conv16_9_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_Conv16_9_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_Conv16_9_b\"\n  top: \"sum_bn_Conv16_9_b\"\n}\nlayer {\n  name: \"resblk32\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_Conv16_9_b\"\n  top: \"resblk32\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32\"\n  top: \"bn_resblk32\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32\"\n  top: \"bn_resblk32\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32\"\n  top: \"bn_resblk32\"\n}\nlayer {\n  name: \"resblk32_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32\"\n  top: \"resblk32_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_b\"\n  top: \"bn_resblk32_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_b\"\n  top: \"bn_resblk32_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"avePooling_resblk32\"\n  type: \"Pooling\"\n  bottom: \"sum_bn_Conv16_9_b\"\n  top: \"avgPool_resblk32\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"sum_avgPool_resblk32\"\n  type: \"Eltwise\"\n  bottom: \"avgPool_resblk32\"\n  bottom: \"bn_resblk32_b\"\n  top: \"sum_bn_resblk32_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_b\"\n  top: \"sum_bn_resblk32_b\"\n}\nlayer {\n  name: \"zeros_sum_bn_resblk32_b\"\n  type: \"DummyData\"\n  top: \"zeros_sum_bn_resblk32_b\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"CC_sum_bn_resblk32_b\"\n  type: \"Concat\"\n  bottom: \"sum_bn_resblk32_b\"\n  bottom: \"zeros_sum_bn_resblk32_b\"\n  top: \"CC_sum_bn_resblk32_b\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"resblk32_1\"\n  type: \"Convolution\"\n  bottom: \"CC_sum_bn_resblk32_b\"\n  top: \"resblk32_1\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_1\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_1\"\n  top: \"bn_resblk32_1\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_1\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_1\"\n  top: \"bn_resblk32_1\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_1\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_1\"\n  top: \"bn_resblk32_1\"\n}\nlayer {\n  name: \"resblk32_1_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_1\"\n  top: \"resblk32_1_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_1_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_1_b\"\n  top: \"bn_resblk32_1_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_1_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_1_b\"\n  top: \"bn_resblk32_1_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_CC_sum_bn_resblk32_b\"\n  type: \"Eltwise\"\n  bottom: \"CC_sum_bn_resblk32_b\"\n  bottom: \"bn_resblk32_1_b\"\n  top: \"sum_bn_resblk32_1_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_1_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_1_b\"\n  top: \"sum_bn_resblk32_1_b\"\n}\nlayer {\n  name: \"resblk32_2\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_1_b\"\n  top: \"resblk32_2\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_2\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_2\"\n  top: \"bn_resblk32_2\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_2\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_2\"\n  top: \"bn_resblk32_2\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_2\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_2\"\n  top: \"bn_resblk32_2\"\n}\nlayer {\n  name: \"resblk32_2_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_2\"\n  top: \"resblk32_2_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_2_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_2_b\"\n  top: \"bn_resblk32_2_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_2_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_2_b\"\n  top: \"bn_resblk32_2_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_resblk32_1_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_resblk32_1_b\"\n  bottom: \"bn_resblk32_2_b\"\n  top: \"sum_bn_resblk32_2_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_2_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_2_b\"\n  top: \"sum_bn_resblk32_2_b\"\n}\nlayer {\n  name: \"resblk32_3\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_2_b\"\n  top: \"resblk32_3\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_3\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_3\"\n  top: \"bn_resblk32_3\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_3\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_3\"\n  top: \"bn_resblk32_3\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_3\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_3\"\n  top: \"bn_resblk32_3\"\n}\nlayer {\n  name: \"resblk32_3_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_3\"\n  top: \"resblk32_3_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_3_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_3_b\"\n  top: \"bn_resblk32_3_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_3_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_3_b\"\n  top: \"bn_resblk32_3_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_resblk32_2_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_resblk32_2_b\"\n  bottom: \"bn_resblk32_3_b\"\n  top: \"sum_bn_resblk32_3_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_3_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_3_b\"\n  top: \"sum_bn_resblk32_3_b\"\n}\nlayer {\n  name: \"resblk32_4\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_3_b\"\n  top: \"resblk32_4\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_4\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_4\"\n  top: \"bn_resblk32_4\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_4\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_4\"\n  top: \"bn_resblk32_4\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_4\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_4\"\n  top: \"bn_resblk32_4\"\n}\nlayer {\n  name: \"resblk32_4_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_4\"\n  top: \"resblk32_4_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_4_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_4_b\"\n  top: \"bn_resblk32_4_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_4_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_4_b\"\n  top: \"bn_resblk32_4_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_resblk32_3_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_resblk32_3_b\"\n  bottom: \"bn_resblk32_4_b\"\n  top: \"sum_bn_resblk32_4_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_4_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_4_b\"\n  top: \"sum_bn_resblk32_4_b\"\n}\nlayer {\n  name: \"resblk32_5\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_4_b\"\n  top: \"resblk32_5\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_5\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_5\"\n  top: \"bn_resblk32_5\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_5\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_5\"\n  top: \"bn_resblk32_5\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_5\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_5\"\n  top: \"bn_resblk32_5\"\n}\nlayer {\n  name: \"resblk32_5_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_5\"\n  top: \"resblk32_5_b\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_5_b\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_5_b\"\n  top: \"bn_resblk32_5_b\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_5_b\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_5_b\"\n  top: \"bn_resblk32_5_b\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"sum_sum_bn_resblk32_4_b\"\n  type: \"Eltwise\"\n  bottom: \"sum_bn_resblk32_4_b\"\n  bottom: \"bn_resblk32_5_b\"\n  top: \"sum_bn_resblk32_5_b\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"relu_sum_bn_resblk32_5_b\"\n  type: \"ReLU\"\n  bottom: \"sum_bn_resblk32_5_b\"\n  top: \"sum_bn_resblk32_5_b\"\n}\nlayer {\n  name: \"resblk32_6\"\n  type: \"Convolution\"\n  bottom: \"sum_bn_resblk32_5_b\"\n  top: \"resblk32_6\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"batchNorm_resblk32_6\"\n  type: \"BatchNorm\"\n  bottom: \"resblk32_6\"\n  top: \"bn_resblk32_6\"\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"scale_resblk32_6\"\n  type: \"Scale\"\n  bottom: \"bn_resblk32_6\"\n  top: \"bn_resblk32_6\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"relu_bn_resblk32_6\"\n  type: \"ReLU\"\n  bottom: \"bn_resblk32_6\"\n  top: \"bn_resblk32_6\"\n}\nlayer {\n  name: \"resblk32_6_b\"\n  type: \"Convolution\"\n  bottom: \"bn_resblk32_6\"\n  top: \"r\nI0818 13:44:37.262527 22726 layer_factory.hpp:77] Creating layer dataLayer\nI0818 13:44:37.262760 22726 net.cpp:100] Creating Layer dataLayer\nI0818 13:44:37.262783 22726 net.cpp:408] dataLayer -> data\nI0818 13:44:37.262801 22726 net.cpp:408] dataLayer -> label\nI0818 13:44:37.262820 22726 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 13:44:37.272661 22733 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0818 13:44:37.272941 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:37.280246 22726 net.cpp:150] Setting up dataLayer\nI0818 13:44:37.280266 22726 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0818 13:44:37.280274 22726 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:37.280279 22726 net.cpp:165] Memory required for data: 1536500\nI0818 13:44:37.280285 22726 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 13:44:37.280316 22726 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 13:44:37.280324 22726 net.cpp:434] label_dataLayer_1_split <- label\nI0818 13:44:37.280333 22726 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 13:44:37.280344 22726 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 13:44:37.280480 22726 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 13:44:37.280496 22726 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:37.280503 22726 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:37.280508 22726 net.cpp:165] Memory required for data: 1537500\nI0818 13:44:37.280513 22726 layer_factory.hpp:77] Creating layer conv\nI0818 13:44:37.280534 22726 net.cpp:100] Creating Layer conv\nI0818 13:44:37.280540 22726 net.cpp:434] conv <- data\nI0818 13:44:37.280552 22726 net.cpp:408] conv -> conv\nI0818 13:44:37.281033 22726 net.cpp:150] Setting up conv\nI0818 13:44:37.281050 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.281057 22726 net.cpp:165] Memory required for data: 9729500\nI0818 13:44:37.281072 22726 layer_factory.hpp:77] Creating layer batchNorm_conv\nI0818 13:44:37.281082 22726 net.cpp:100] Creating Layer batchNorm_conv\nI0818 13:44:37.281087 22726 net.cpp:434] batchNorm_conv <- conv\nI0818 13:44:37.281100 22726 net.cpp:408] batchNorm_conv -> bn_conv\nI0818 13:44:37.281510 22726 net.cpp:150] Setting up batchNorm_conv\nI0818 13:44:37.281524 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.281533 22726 net.cpp:165] Memory required for data: 17921500\nI0818 13:44:37.281548 22726 layer_factory.hpp:77] Creating layer scale_conv\nI0818 13:44:37.281561 22726 net.cpp:100] Creating Layer scale_conv\nI0818 13:44:37.281568 22726 net.cpp:434] scale_conv <- bn_conv\nI0818 13:44:37.281575 22726 net.cpp:395] scale_conv -> bn_conv (in-place)\nI0818 13:44:37.281641 22726 layer_factory.hpp:77] Creating layer scale_conv\nI0818 13:44:37.281908 22726 net.cpp:150] Setting up scale_conv\nI0818 13:44:37.281924 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.281929 22726 net.cpp:165] Memory required for data: 26113500\nI0818 13:44:37.281949 22726 layer_factory.hpp:77] Creating layer relu_bn_conv\nI0818 13:44:37.281961 22726 net.cpp:100] Creating Layer relu_bn_conv\nI0818 13:44:37.281967 22726 net.cpp:434] relu_bn_conv <- bn_conv\nI0818 13:44:37.281975 22726 net.cpp:395] relu_bn_conv -> bn_conv (in-place)\nI0818 13:44:37.281985 22726 net.cpp:150] Setting up relu_bn_conv\nI0818 13:44:37.281991 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.281996 22726 net.cpp:165] Memory required for data: 34305500\nI0818 13:44:37.282003 22726 layer_factory.hpp:77] Creating layer bn_conv_relu_bn_conv_0_split\nI0818 13:44:37.282014 22726 net.cpp:100] Creating Layer bn_conv_relu_bn_conv_0_split\nI0818 13:44:37.282019 22726 net.cpp:434] bn_conv_relu_bn_conv_0_split <- bn_conv\nI0818 13:44:37.282027 22726 net.cpp:408] bn_conv_relu_bn_conv_0_split -> bn_conv_relu_bn_conv_0_split_0\nI0818 13:44:37.282038 22726 net.cpp:408] bn_conv_relu_bn_conv_0_split -> bn_conv_relu_bn_conv_0_split_1\nI0818 13:44:37.282122 22726 net.cpp:150] Setting up bn_conv_relu_bn_conv_0_split\nI0818 13:44:37.282136 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.282142 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.282147 22726 net.cpp:165] Memory required for data: 50689500\nI0818 13:44:37.282152 22726 layer_factory.hpp:77] Creating layer Conv16_1\nI0818 13:44:37.282166 22726 net.cpp:100] Creating Layer Conv16_1\nI0818 13:44:37.282172 22726 net.cpp:434] Conv16_1 <- bn_conv_relu_bn_conv_0_split_0\nI0818 13:44:37.282181 22726 net.cpp:408] Conv16_1 -> Conv16_1\nI0818 13:44:37.282609 22726 net.cpp:150] Setting up Conv16_1\nI0818 13:44:37.282624 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.282629 22726 net.cpp:165] Memory required for data: 58881500\nI0818 13:44:37.282645 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_1\nI0818 13:44:37.282660 22726 net.cpp:100] Creating Layer batchNorm_Conv16_1\nI0818 13:44:37.282666 22726 net.cpp:434] batchNorm_Conv16_1 <- Conv16_1\nI0818 13:44:37.282701 22726 net.cpp:408] batchNorm_Conv16_1 -> bn_Conv16_1\nI0818 13:44:37.283304 22726 net.cpp:150] Setting up batchNorm_Conv16_1\nI0818 13:44:37.283320 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.283325 22726 net.cpp:165] Memory required for data: 67073500\nI0818 13:44:37.283336 22726 layer_factory.hpp:77] Creating layer scale_Conv16_1\nI0818 13:44:37.283345 22726 net.cpp:100] Creating Layer scale_Conv16_1\nI0818 13:44:37.283351 22726 net.cpp:434] scale_Conv16_1 <- bn_Conv16_1\nI0818 13:44:37.283360 22726 net.cpp:395] scale_Conv16_1 -> bn_Conv16_1 (in-place)\nI0818 13:44:37.283432 22726 layer_factory.hpp:77] Creating layer scale_Conv16_1\nI0818 13:44:37.283612 22726 net.cpp:150] Setting up scale_Conv16_1\nI0818 13:44:37.283627 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.283632 22726 net.cpp:165] Memory required for data: 75265500\nI0818 13:44:37.283640 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_1\nI0818 13:44:37.283653 22726 net.cpp:100] Creating Layer relu_bn_Conv16_1\nI0818 13:44:37.283659 22726 net.cpp:434] relu_bn_Conv16_1 <- bn_Conv16_1\nI0818 13:44:37.283670 22726 net.cpp:395] relu_bn_Conv16_1 -> bn_Conv16_1 (in-place)\nI0818 13:44:37.283684 22726 net.cpp:150] Setting up relu_bn_Conv16_1\nI0818 13:44:37.283690 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.283704 22726 net.cpp:165] Memory required for data: 83457500\nI0818 13:44:37.283709 22726 layer_factory.hpp:77] Creating layer Conv16_1_b\nI0818 13:44:37.283721 22726 net.cpp:100] Creating Layer Conv16_1_b\nI0818 13:44:37.283728 22726 net.cpp:434] Conv16_1_b <- bn_Conv16_1\nI0818 13:44:37.283740 22726 net.cpp:408] Conv16_1_b -> Conv16_1_b\nI0818 13:44:37.284155 22726 net.cpp:150] Setting up Conv16_1_b\nI0818 13:44:37.284173 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.284179 22726 net.cpp:165] Memory required for data: 91649500\nI0818 13:44:37.284188 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_1_b\nI0818 13:44:37.284200 22726 net.cpp:100] Creating Layer batchNorm_Conv16_1_b\nI0818 13:44:37.284216 22726 net.cpp:434] batchNorm_Conv16_1_b <- Conv16_1_b\nI0818 13:44:37.284225 22726 net.cpp:408] batchNorm_Conv16_1_b -> bn_Conv16_1_b\nI0818 13:44:37.284540 22726 net.cpp:150] Setting up batchNorm_Conv16_1_b\nI0818 13:44:37.284554 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.284559 22726 net.cpp:165] Memory required for data: 99841500\nI0818 13:44:37.284580 22726 layer_factory.hpp:77] Creating layer scale_Conv16_1_b\nI0818 13:44:37.284590 22726 net.cpp:100] Creating Layer scale_Conv16_1_b\nI0818 13:44:37.284595 22726 net.cpp:434] scale_Conv16_1_b <- bn_Conv16_1_b\nI0818 13:44:37.284606 22726 net.cpp:395] scale_Conv16_1_b -> bn_Conv16_1_b (in-place)\nI0818 13:44:37.284672 22726 layer_factory.hpp:77] Creating layer scale_Conv16_1_b\nI0818 13:44:37.284893 22726 net.cpp:150] Setting up scale_Conv16_1_b\nI0818 13:44:37.284909 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.284915 22726 net.cpp:165] Memory required for data: 108033500\nI0818 13:44:37.284924 22726 layer_factory.hpp:77] Creating layer sum_bn_conv\nI0818 13:44:37.284934 22726 net.cpp:100] Creating Layer sum_bn_conv\nI0818 13:44:37.284939 22726 net.cpp:434] sum_bn_conv <- bn_conv_relu_bn_conv_0_split_1\nI0818 13:44:37.284948 22726 net.cpp:434] sum_bn_conv <- bn_Conv16_1_b\nI0818 13:44:37.284960 22726 net.cpp:408] sum_bn_conv -> sum_bn_Conv16_1_b\nI0818 13:44:37.285001 22726 net.cpp:150] Setting up sum_bn_conv\nI0818 13:44:37.285010 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.285017 22726 net.cpp:165] Memory required for data: 116225500\nI0818 13:44:37.285022 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_1_b\nI0818 13:44:37.285032 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_1_b\nI0818 13:44:37.285038 22726 net.cpp:434] relu_sum_bn_Conv16_1_b <- sum_bn_Conv16_1_b\nI0818 13:44:37.285048 22726 net.cpp:395] relu_sum_bn_Conv16_1_b -> sum_bn_Conv16_1_b (in-place)\nI0818 13:44:37.285058 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_1_b\nI0818 13:44:37.285065 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.285069 22726 net.cpp:165] Memory required for data: 124417500\nI0818 13:44:37.285074 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split\nI0818 13:44:37.285085 22726 net.cpp:100] Creating Layer sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split\nI0818 13:44:37.285091 22726 net.cpp:434] sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split <- sum_bn_Conv16_1_b\nI0818 13:44:37.285099 22726 net.cpp:408] sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split -> sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split_0\nI0818 13:44:37.285107 22726 net.cpp:408] sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split -> sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split_1\nI0818 13:44:37.285167 22726 net.cpp:150] Setting up sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split\nI0818 13:44:37.285181 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.285187 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.285192 22726 net.cpp:165] Memory required for data: 140801500\nI0818 13:44:37.285200 22726 layer_factory.hpp:77] Creating layer Conv16_2\nI0818 13:44:37.285215 22726 net.cpp:100] Creating Layer Conv16_2\nI0818 13:44:37.285223 22726 net.cpp:434] Conv16_2 <- sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split_0\nI0818 13:44:37.285235 22726 net.cpp:408] Conv16_2 -> Conv16_2\nI0818 13:44:37.285635 22726 net.cpp:150] Setting up Conv16_2\nI0818 13:44:37.285650 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.285653 22726 net.cpp:165] Memory required for data: 148993500\nI0818 13:44:37.285665 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_2\nI0818 13:44:37.285678 22726 net.cpp:100] Creating Layer batchNorm_Conv16_2\nI0818 13:44:37.285686 22726 net.cpp:434] batchNorm_Conv16_2 <- Conv16_2\nI0818 13:44:37.285696 22726 net.cpp:408] batchNorm_Conv16_2 -> bn_Conv16_2\nI0818 13:44:37.286061 22726 net.cpp:150] Setting up batchNorm_Conv16_2\nI0818 13:44:37.286075 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.286088 22726 net.cpp:165] Memory required for data: 157185500\nI0818 13:44:37.286103 22726 layer_factory.hpp:77] Creating layer scale_Conv16_2\nI0818 13:44:37.286113 22726 net.cpp:100] Creating Layer scale_Conv16_2\nI0818 13:44:37.286118 22726 net.cpp:434] scale_Conv16_2 <- bn_Conv16_2\nI0818 13:44:37.286126 22726 net.cpp:395] scale_Conv16_2 -> bn_Conv16_2 (in-place)\nI0818 13:44:37.286300 22726 layer_factory.hpp:77] Creating layer scale_Conv16_2\nI0818 13:44:37.286485 22726 net.cpp:150] Setting up scale_Conv16_2\nI0818 13:44:37.286501 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.286506 22726 net.cpp:165] Memory required for data: 165377500\nI0818 13:44:37.286515 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_2\nI0818 13:44:37.286523 22726 net.cpp:100] Creating Layer relu_bn_Conv16_2\nI0818 13:44:37.286538 22726 net.cpp:434] relu_bn_Conv16_2 <- bn_Conv16_2\nI0818 13:44:37.286547 22726 net.cpp:395] relu_bn_Conv16_2 -> bn_Conv16_2 (in-place)\nI0818 13:44:37.286558 22726 net.cpp:150] Setting up relu_bn_Conv16_2\nI0818 13:44:37.286566 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.286569 22726 net.cpp:165] Memory required for data: 173569500\nI0818 13:44:37.286577 22726 layer_factory.hpp:77] Creating layer Conv16_2_b\nI0818 13:44:37.286592 22726 net.cpp:100] Creating Layer Conv16_2_b\nI0818 13:44:37.286597 22726 net.cpp:434] Conv16_2_b <- bn_Conv16_2\nI0818 13:44:37.286609 22726 net.cpp:408] Conv16_2_b -> Conv16_2_b\nI0818 13:44:37.287034 22726 net.cpp:150] Setting up Conv16_2_b\nI0818 13:44:37.287050 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.287055 22726 net.cpp:165] Memory required for data: 181761500\nI0818 13:44:37.287063 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_2_b\nI0818 13:44:37.287078 22726 net.cpp:100] Creating Layer batchNorm_Conv16_2_b\nI0818 13:44:37.287086 22726 net.cpp:434] batchNorm_Conv16_2_b <- Conv16_2_b\nI0818 13:44:37.287093 22726 net.cpp:408] batchNorm_Conv16_2_b -> bn_Conv16_2_b\nI0818 13:44:37.287470 22726 net.cpp:150] Setting up batchNorm_Conv16_2_b\nI0818 13:44:37.287488 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.287493 22726 net.cpp:165] Memory required for data: 189953500\nI0818 13:44:37.287513 22726 layer_factory.hpp:77] Creating layer scale_Conv16_2_b\nI0818 13:44:37.287521 22726 net.cpp:100] Creating Layer scale_Conv16_2_b\nI0818 13:44:37.287528 22726 net.cpp:434] scale_Conv16_2_b <- bn_Conv16_2_b\nI0818 13:44:37.287539 22726 net.cpp:395] scale_Conv16_2_b -> bn_Conv16_2_b (in-place)\nI0818 13:44:37.287613 22726 layer_factory.hpp:77] Creating layer scale_Conv16_2_b\nI0818 13:44:37.287798 22726 net.cpp:150] Setting up scale_Conv16_2_b\nI0818 13:44:37.287823 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.287830 22726 net.cpp:165] Memory required for data: 198145500\nI0818 13:44:37.287839 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_1_b\nI0818 13:44:37.287849 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_1_b\nI0818 13:44:37.287858 22726 net.cpp:434] sum_sum_bn_Conv16_1_b <- sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split_1\nI0818 13:44:37.287865 22726 net.cpp:434] sum_sum_bn_Conv16_1_b <- bn_Conv16_2_b\nI0818 13:44:37.287873 22726 net.cpp:408] sum_sum_bn_Conv16_1_b -> sum_bn_Conv16_2_b\nI0818 13:44:37.287916 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_1_b\nI0818 13:44:37.287928 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.287935 22726 net.cpp:165] Memory required for data: 206337500\nI0818 13:44:37.287940 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_2_b\nI0818 13:44:37.287946 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_2_b\nI0818 13:44:37.287951 22726 net.cpp:434] relu_sum_bn_Conv16_2_b <- sum_bn_Conv16_2_b\nI0818 13:44:37.287958 22726 net.cpp:395] relu_sum_bn_Conv16_2_b -> sum_bn_Conv16_2_b (in-place)\nI0818 13:44:37.287967 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_2_b\nI0818 13:44:37.287974 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.287979 22726 net.cpp:165] Memory required for data: 214529500\nI0818 13:44:37.287994 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split\nI0818 13:44:37.288003 22726 net.cpp:100] Creating Layer sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split\nI0818 13:44:37.288008 22726 net.cpp:434] sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split <- sum_bn_Conv16_2_b\nI0818 13:44:37.288018 22726 net.cpp:408] sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split -> sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split_0\nI0818 13:44:37.288031 22726 net.cpp:408] sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split -> sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split_1\nI0818 13:44:37.288087 22726 net.cpp:150] Setting up sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split\nI0818 13:44:37.288096 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.288103 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.288107 22726 net.cpp:165] Memory required for data: 230913500\nI0818 13:44:37.288112 22726 layer_factory.hpp:77] Creating layer Conv16_3\nI0818 13:44:37.288130 22726 net.cpp:100] Creating Layer Conv16_3\nI0818 13:44:37.288141 22726 net.cpp:434] Conv16_3 <- sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split_0\nI0818 13:44:37.288151 22726 net.cpp:408] Conv16_3 -> Conv16_3\nI0818 13:44:37.288589 22726 net.cpp:150] Setting up Conv16_3\nI0818 13:44:37.288604 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.288611 22726 net.cpp:165] Memory required for data: 239105500\nI0818 13:44:37.288621 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_3\nI0818 13:44:37.288630 22726 net.cpp:100] Creating Layer batchNorm_Conv16_3\nI0818 13:44:37.288636 22726 net.cpp:434] batchNorm_Conv16_3 <- Conv16_3\nI0818 13:44:37.288650 22726 net.cpp:408] batchNorm_Conv16_3 -> bn_Conv16_3\nI0818 13:44:37.288983 22726 net.cpp:150] Setting up batchNorm_Conv16_3\nI0818 13:44:37.288998 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.289005 22726 net.cpp:165] Memory required for data: 247297500\nI0818 13:44:37.289014 22726 layer_factory.hpp:77] Creating layer scale_Conv16_3\nI0818 13:44:37.289026 22726 net.cpp:100] Creating Layer scale_Conv16_3\nI0818 13:44:37.289032 22726 net.cpp:434] scale_Conv16_3 <- bn_Conv16_3\nI0818 13:44:37.289043 22726 net.cpp:395] scale_Conv16_3 -> bn_Conv16_3 (in-place)\nI0818 13:44:37.289113 22726 layer_factory.hpp:77] Creating layer scale_Conv16_3\nI0818 13:44:37.289499 22726 net.cpp:150] Setting up scale_Conv16_3\nI0818 13:44:37.289515 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.289520 22726 net.cpp:165] Memory required for data: 255489500\nI0818 13:44:37.289528 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_3\nI0818 13:44:37.289539 22726 net.cpp:100] Creating Layer relu_bn_Conv16_3\nI0818 13:44:37.289546 22726 net.cpp:434] relu_bn_Conv16_3 <- bn_Conv16_3\nI0818 13:44:37.289553 22726 net.cpp:395] relu_bn_Conv16_3 -> bn_Conv16_3 (in-place)\nI0818 13:44:37.289562 22726 net.cpp:150] Setting up relu_bn_Conv16_3\nI0818 13:44:37.289572 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.289579 22726 net.cpp:165] Memory required for data: 263681500\nI0818 13:44:37.289584 22726 layer_factory.hpp:77] Creating layer Conv16_3_b\nI0818 13:44:37.289599 22726 net.cpp:100] Creating Layer Conv16_3_b\nI0818 13:44:37.289605 22726 net.cpp:434] Conv16_3_b <- bn_Conv16_3\nI0818 13:44:37.289620 22726 net.cpp:408] Conv16_3_b -> Conv16_3_b\nI0818 13:44:37.290082 22726 net.cpp:150] Setting up Conv16_3_b\nI0818 13:44:37.290097 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.290102 22726 net.cpp:165] Memory required for data: 271873500\nI0818 13:44:37.290113 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_3_b\nI0818 13:44:37.290129 22726 net.cpp:100] Creating Layer batchNorm_Conv16_3_b\nI0818 13:44:37.290136 22726 net.cpp:434] batchNorm_Conv16_3_b <- Conv16_3_b\nI0818 13:44:37.290151 22726 net.cpp:408] batchNorm_Conv16_3_b -> bn_Conv16_3_b\nI0818 13:44:37.290467 22726 net.cpp:150] Setting up batchNorm_Conv16_3_b\nI0818 13:44:37.290482 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.290495 22726 net.cpp:165] Memory required for data: 280065500\nI0818 13:44:37.290506 22726 layer_factory.hpp:77] Creating layer scale_Conv16_3_b\nI0818 13:44:37.290518 22726 net.cpp:100] Creating Layer scale_Conv16_3_b\nI0818 13:44:37.290524 22726 net.cpp:434] scale_Conv16_3_b <- bn_Conv16_3_b\nI0818 13:44:37.290532 22726 net.cpp:395] scale_Conv16_3_b -> bn_Conv16_3_b (in-place)\nI0818 13:44:37.290601 22726 layer_factory.hpp:77] Creating layer scale_Conv16_3_b\nI0818 13:44:37.290793 22726 net.cpp:150] Setting up scale_Conv16_3_b\nI0818 13:44:37.290813 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.290819 22726 net.cpp:165] Memory required for data: 288257500\nI0818 13:44:37.290830 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_2_b\nI0818 13:44:37.290843 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_2_b\nI0818 13:44:37.290850 22726 net.cpp:434] sum_sum_bn_Conv16_2_b <- sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split_1\nI0818 13:44:37.290858 22726 net.cpp:434] sum_sum_bn_Conv16_2_b <- bn_Conv16_3_b\nI0818 13:44:37.290864 22726 net.cpp:408] sum_sum_bn_Conv16_2_b -> sum_bn_Conv16_3_b\nI0818 13:44:37.290908 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_2_b\nI0818 13:44:37.290918 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.290923 22726 net.cpp:165] Memory required for data: 296449500\nI0818 13:44:37.290928 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_3_b\nI0818 13:44:37.290935 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_3_b\nI0818 13:44:37.290940 22726 net.cpp:434] relu_sum_bn_Conv16_3_b <- sum_bn_Conv16_3_b\nI0818 13:44:37.290951 22726 net.cpp:395] relu_sum_bn_Conv16_3_b -> sum_bn_Conv16_3_b (in-place)\nI0818 13:44:37.290961 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_3_b\nI0818 13:44:37.290967 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.290972 22726 net.cpp:165] Memory required for data: 304641500\nI0818 13:44:37.290977 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split\nI0818 13:44:37.290983 22726 net.cpp:100] Creating Layer sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split\nI0818 13:44:37.290988 22726 net.cpp:434] sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split <- sum_bn_Conv16_3_b\nI0818 13:44:37.290997 22726 net.cpp:408] sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split -> sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split_0\nI0818 13:44:37.291007 22726 net.cpp:408] sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split -> sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split_1\nI0818 13:44:37.291057 22726 net.cpp:150] Setting up sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split\nI0818 13:44:37.291066 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.291074 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.291079 22726 net.cpp:165] Memory required for data: 321025500\nI0818 13:44:37.291084 22726 layer_factory.hpp:77] Creating layer Conv16_4\nI0818 13:44:37.291097 22726 net.cpp:100] Creating Layer Conv16_4\nI0818 13:44:37.291105 22726 net.cpp:434] Conv16_4 <- sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split_0\nI0818 13:44:37.291113 22726 net.cpp:408] Conv16_4 -> Conv16_4\nI0818 13:44:37.291472 22726 net.cpp:150] Setting up Conv16_4\nI0818 13:44:37.291486 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.291491 22726 net.cpp:165] Memory required for data: 329217500\nI0818 13:44:37.291501 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_4\nI0818 13:44:37.291512 22726 net.cpp:100] Creating Layer batchNorm_Conv16_4\nI0818 13:44:37.291518 22726 net.cpp:434] batchNorm_Conv16_4 <- Conv16_4\nI0818 13:44:37.291527 22726 net.cpp:408] batchNorm_Conv16_4 -> bn_Conv16_4\nI0818 13:44:37.291800 22726 net.cpp:150] Setting up batchNorm_Conv16_4\nI0818 13:44:37.291817 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.291823 22726 net.cpp:165] Memory required for data: 337409500\nI0818 13:44:37.291833 22726 layer_factory.hpp:77] Creating layer scale_Conv16_4\nI0818 13:44:37.291841 22726 net.cpp:100] Creating Layer scale_Conv16_4\nI0818 13:44:37.291854 22726 net.cpp:434] scale_Conv16_4 <- bn_Conv16_4\nI0818 13:44:37.291862 22726 net.cpp:395] scale_Conv16_4 -> bn_Conv16_4 (in-place)\nI0818 13:44:37.291924 22726 layer_factory.hpp:77] Creating layer scale_Conv16_4\nI0818 13:44:37.292111 22726 net.cpp:150] Setting up scale_Conv16_4\nI0818 13:44:37.292126 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.292130 22726 net.cpp:165] Memory required for data: 345601500\nI0818 13:44:37.292140 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_4\nI0818 13:44:37.292147 22726 net.cpp:100] Creating Layer relu_bn_Conv16_4\nI0818 13:44:37.292153 22726 net.cpp:434] relu_bn_Conv16_4 <- bn_Conv16_4\nI0818 13:44:37.292163 22726 net.cpp:395] relu_bn_Conv16_4 -> bn_Conv16_4 (in-place)\nI0818 13:44:37.292173 22726 net.cpp:150] Setting up relu_bn_Conv16_4\nI0818 13:44:37.292181 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.292184 22726 net.cpp:165] Memory required for data: 353793500\nI0818 13:44:37.292189 22726 layer_factory.hpp:77] Creating layer Conv16_4_b\nI0818 13:44:37.292202 22726 net.cpp:100] Creating Layer Conv16_4_b\nI0818 13:44:37.292208 22726 net.cpp:434] Conv16_4_b <- bn_Conv16_4\nI0818 13:44:37.292217 22726 net.cpp:408] Conv16_4_b -> Conv16_4_b\nI0818 13:44:37.292579 22726 net.cpp:150] Setting up Conv16_4_b\nI0818 13:44:37.292593 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.292598 22726 net.cpp:165] Memory required for data: 361985500\nI0818 13:44:37.292606 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_4_b\nI0818 13:44:37.292618 22726 net.cpp:100] Creating Layer batchNorm_Conv16_4_b\nI0818 13:44:37.292623 22726 net.cpp:434] batchNorm_Conv16_4_b <- Conv16_4_b\nI0818 13:44:37.292631 22726 net.cpp:408] batchNorm_Conv16_4_b -> bn_Conv16_4_b\nI0818 13:44:37.292915 22726 net.cpp:150] Setting up batchNorm_Conv16_4_b\nI0818 13:44:37.292930 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.292934 22726 net.cpp:165] Memory required for data: 370177500\nI0818 13:44:37.292944 22726 layer_factory.hpp:77] Creating layer scale_Conv16_4_b\nI0818 13:44:37.292953 22726 net.cpp:100] Creating Layer scale_Conv16_4_b\nI0818 13:44:37.292959 22726 net.cpp:434] scale_Conv16_4_b <- bn_Conv16_4_b\nI0818 13:44:37.292966 22726 net.cpp:395] scale_Conv16_4_b -> bn_Conv16_4_b (in-place)\nI0818 13:44:37.293027 22726 layer_factory.hpp:77] Creating layer scale_Conv16_4_b\nI0818 13:44:37.293184 22726 net.cpp:150] Setting up scale_Conv16_4_b\nI0818 13:44:37.293197 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.293201 22726 net.cpp:165] Memory required for data: 378369500\nI0818 13:44:37.293210 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_3_b\nI0818 13:44:37.293222 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_3_b\nI0818 13:44:37.293228 22726 net.cpp:434] sum_sum_bn_Conv16_3_b <- sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split_1\nI0818 13:44:37.293236 22726 net.cpp:434] sum_sum_bn_Conv16_3_b <- bn_Conv16_4_b\nI0818 13:44:37.293242 22726 net.cpp:408] sum_sum_bn_Conv16_3_b -> sum_bn_Conv16_4_b\nI0818 13:44:37.293280 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_3_b\nI0818 13:44:37.293292 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.293296 22726 net.cpp:165] Memory required for data: 386561500\nI0818 13:44:37.293301 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_4_b\nI0818 13:44:37.293308 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_4_b\nI0818 13:44:37.293314 22726 net.cpp:434] relu_sum_bn_Conv16_4_b <- sum_bn_Conv16_4_b\nI0818 13:44:37.293324 22726 net.cpp:395] relu_sum_bn_Conv16_4_b -> sum_bn_Conv16_4_b (in-place)\nI0818 13:44:37.293334 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_4_b\nI0818 13:44:37.293340 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.293345 22726 net.cpp:165] Memory required for data: 394753500\nI0818 13:44:37.293350 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split\nI0818 13:44:37.293357 22726 net.cpp:100] Creating Layer sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split\nI0818 13:44:37.293370 22726 net.cpp:434] sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split <- sum_bn_Conv16_4_b\nI0818 13:44:37.293380 22726 net.cpp:408] sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split -> sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split_0\nI0818 13:44:37.293390 22726 net.cpp:408] sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split -> sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split_1\nI0818 13:44:37.293437 22726 net.cpp:150] Setting up sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split\nI0818 13:44:37.293447 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.293452 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.293457 22726 net.cpp:165] Memory required for data: 411137500\nI0818 13:44:37.293462 22726 layer_factory.hpp:77] Creating layer Conv16_5\nI0818 13:44:37.293475 22726 net.cpp:100] Creating Layer Conv16_5\nI0818 13:44:37.293483 22726 net.cpp:434] Conv16_5 <- sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split_0\nI0818 13:44:37.293491 22726 net.cpp:408] Conv16_5 -> Conv16_5\nI0818 13:44:37.293860 22726 net.cpp:150] Setting up Conv16_5\nI0818 13:44:37.293875 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.293880 22726 net.cpp:165] Memory required for data: 419329500\nI0818 13:44:37.293907 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_5\nI0818 13:44:37.293916 22726 net.cpp:100] Creating Layer batchNorm_Conv16_5\nI0818 13:44:37.293922 22726 net.cpp:434] batchNorm_Conv16_5 <- Conv16_5\nI0818 13:44:37.293933 22726 net.cpp:408] batchNorm_Conv16_5 -> bn_Conv16_5\nI0818 13:44:37.294209 22726 net.cpp:150] Setting up batchNorm_Conv16_5\nI0818 13:44:37.294221 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.294226 22726 net.cpp:165] Memory required for data: 427521500\nI0818 13:44:37.294237 22726 layer_factory.hpp:77] Creating layer scale_Conv16_5\nI0818 13:44:37.294245 22726 net.cpp:100] Creating Layer scale_Conv16_5\nI0818 13:44:37.294251 22726 net.cpp:434] scale_Conv16_5 <- bn_Conv16_5\nI0818 13:44:37.294258 22726 net.cpp:395] scale_Conv16_5 -> bn_Conv16_5 (in-place)\nI0818 13:44:37.294320 22726 layer_factory.hpp:77] Creating layer scale_Conv16_5\nI0818 13:44:37.294479 22726 net.cpp:150] Setting up scale_Conv16_5\nI0818 13:44:37.294493 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.294497 22726 net.cpp:165] Memory required for data: 435713500\nI0818 13:44:37.294507 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_5\nI0818 13:44:37.294514 22726 net.cpp:100] Creating Layer relu_bn_Conv16_5\nI0818 13:44:37.294520 22726 net.cpp:434] relu_bn_Conv16_5 <- bn_Conv16_5\nI0818 13:44:37.294530 22726 net.cpp:395] relu_bn_Conv16_5 -> bn_Conv16_5 (in-place)\nI0818 13:44:37.294540 22726 net.cpp:150] Setting up relu_bn_Conv16_5\nI0818 13:44:37.294546 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.294551 22726 net.cpp:165] Memory required for data: 443905500\nI0818 13:44:37.294555 22726 layer_factory.hpp:77] Creating layer Conv16_5_b\nI0818 13:44:37.294569 22726 net.cpp:100] Creating Layer Conv16_5_b\nI0818 13:44:37.294574 22726 net.cpp:434] Conv16_5_b <- bn_Conv16_5\nI0818 13:44:37.294584 22726 net.cpp:408] Conv16_5_b -> Conv16_5_b\nI0818 13:44:37.294950 22726 net.cpp:150] Setting up Conv16_5_b\nI0818 13:44:37.294965 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.294970 22726 net.cpp:165] Memory required for data: 452097500\nI0818 13:44:37.294977 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_5_b\nI0818 13:44:37.294986 22726 net.cpp:100] Creating Layer batchNorm_Conv16_5_b\nI0818 13:44:37.294991 22726 net.cpp:434] batchNorm_Conv16_5_b <- Conv16_5_b\nI0818 13:44:37.295003 22726 net.cpp:408] batchNorm_Conv16_5_b -> bn_Conv16_5_b\nI0818 13:44:37.295284 22726 net.cpp:150] Setting up batchNorm_Conv16_5_b\nI0818 13:44:37.295301 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.295307 22726 net.cpp:165] Memory required for data: 460289500\nI0818 13:44:37.295317 22726 layer_factory.hpp:77] Creating layer scale_Conv16_5_b\nI0818 13:44:37.295325 22726 net.cpp:100] Creating Layer scale_Conv16_5_b\nI0818 13:44:37.295337 22726 net.cpp:434] scale_Conv16_5_b <- bn_Conv16_5_b\nI0818 13:44:37.295346 22726 net.cpp:395] scale_Conv16_5_b -> bn_Conv16_5_b (in-place)\nI0818 13:44:37.295405 22726 layer_factory.hpp:77] Creating layer scale_Conv16_5_b\nI0818 13:44:37.295568 22726 net.cpp:150] Setting up scale_Conv16_5_b\nI0818 13:44:37.295581 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.295586 22726 net.cpp:165] Memory required for data: 468481500\nI0818 13:44:37.295595 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_4_b\nI0818 13:44:37.295616 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_4_b\nI0818 13:44:37.295624 22726 net.cpp:434] sum_sum_bn_Conv16_4_b <- sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split_1\nI0818 13:44:37.295631 22726 net.cpp:434] sum_sum_bn_Conv16_4_b <- bn_Conv16_5_b\nI0818 13:44:37.295642 22726 net.cpp:408] sum_sum_bn_Conv16_4_b -> sum_bn_Conv16_5_b\nI0818 13:44:37.295678 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_4_b\nI0818 13:44:37.295687 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.295692 22726 net.cpp:165] Memory required for data: 476673500\nI0818 13:44:37.295697 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_5_b\nI0818 13:44:37.295709 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_5_b\nI0818 13:44:37.295716 22726 net.cpp:434] relu_sum_bn_Conv16_5_b <- sum_bn_Conv16_5_b\nI0818 13:44:37.295722 22726 net.cpp:395] relu_sum_bn_Conv16_5_b -> sum_bn_Conv16_5_b (in-place)\nI0818 13:44:37.295732 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_5_b\nI0818 13:44:37.295738 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.295742 22726 net.cpp:165] Memory required for data: 484865500\nI0818 13:44:37.295747 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split\nI0818 13:44:37.295753 22726 net.cpp:100] Creating Layer sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split\nI0818 13:44:37.295758 22726 net.cpp:434] sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split <- sum_bn_Conv16_5_b\nI0818 13:44:37.295766 22726 net.cpp:408] sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split -> sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split_0\nI0818 13:44:37.295775 22726 net.cpp:408] sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split -> sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split_1\nI0818 13:44:37.295835 22726 net.cpp:150] Setting up sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split\nI0818 13:44:37.295847 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.295853 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.295861 22726 net.cpp:165] Memory required for data: 501249500\nI0818 13:44:37.295866 22726 layer_factory.hpp:77] Creating layer Conv16_6\nI0818 13:44:37.295881 22726 net.cpp:100] Creating Layer Conv16_6\nI0818 13:44:37.295887 22726 net.cpp:434] Conv16_6 <- sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split_0\nI0818 13:44:37.295897 22726 net.cpp:408] Conv16_6 -> Conv16_6\nI0818 13:44:37.296253 22726 net.cpp:150] Setting up Conv16_6\nI0818 13:44:37.296267 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.296272 22726 net.cpp:165] Memory required for data: 509441500\nI0818 13:44:37.296281 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_6\nI0818 13:44:37.296293 22726 net.cpp:100] Creating Layer batchNorm_Conv16_6\nI0818 13:44:37.296298 22726 net.cpp:434] batchNorm_Conv16_6 <- Conv16_6\nI0818 13:44:37.296306 22726 net.cpp:408] batchNorm_Conv16_6 -> bn_Conv16_6\nI0818 13:44:37.296583 22726 net.cpp:150] Setting up batchNorm_Conv16_6\nI0818 13:44:37.296602 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.296607 22726 net.cpp:165] Memory required for data: 517633500\nI0818 13:44:37.296617 22726 layer_factory.hpp:77] Creating layer scale_Conv16_6\nI0818 13:44:37.296624 22726 net.cpp:100] Creating Layer scale_Conv16_6\nI0818 13:44:37.296630 22726 net.cpp:434] scale_Conv16_6 <- bn_Conv16_6\nI0818 13:44:37.296638 22726 net.cpp:395] scale_Conv16_6 -> bn_Conv16_6 (in-place)\nI0818 13:44:37.296695 22726 layer_factory.hpp:77] Creating layer scale_Conv16_6\nI0818 13:44:37.296877 22726 net.cpp:150] Setting up scale_Conv16_6\nI0818 13:44:37.296891 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.296895 22726 net.cpp:165] Memory required for data: 525825500\nI0818 13:44:37.296905 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_6\nI0818 13:44:37.296916 22726 net.cpp:100] Creating Layer relu_bn_Conv16_6\nI0818 13:44:37.296922 22726 net.cpp:434] relu_bn_Conv16_6 <- bn_Conv16_6\nI0818 13:44:37.296932 22726 net.cpp:395] relu_bn_Conv16_6 -> bn_Conv16_6 (in-place)\nI0818 13:44:37.296942 22726 net.cpp:150] Setting up relu_bn_Conv16_6\nI0818 13:44:37.296949 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.296954 22726 net.cpp:165] Memory required for data: 534017500\nI0818 13:44:37.296958 22726 layer_factory.hpp:77] Creating layer Conv16_6_b\nI0818 13:44:37.296968 22726 net.cpp:100] Creating Layer Conv16_6_b\nI0818 13:44:37.296974 22726 net.cpp:434] Conv16_6_b <- bn_Conv16_6\nI0818 13:44:37.296986 22726 net.cpp:408] Conv16_6_b -> Conv16_6_b\nI0818 13:44:37.297338 22726 net.cpp:150] Setting up Conv16_6_b\nI0818 13:44:37.297351 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.297356 22726 net.cpp:165] Memory required for data: 542209500\nI0818 13:44:37.297364 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_6_b\nI0818 13:44:37.297374 22726 net.cpp:100] Creating Layer batchNorm_Conv16_6_b\nI0818 13:44:37.297379 22726 net.cpp:434] batchNorm_Conv16_6_b <- Conv16_6_b\nI0818 13:44:37.297390 22726 net.cpp:408] batchNorm_Conv16_6_b -> bn_Conv16_6_b\nI0818 13:44:37.297693 22726 net.cpp:150] Setting up batchNorm_Conv16_6_b\nI0818 13:44:37.297706 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.297711 22726 net.cpp:165] Memory required for data: 550401500\nI0818 13:44:37.297721 22726 layer_factory.hpp:77] Creating layer scale_Conv16_6_b\nI0818 13:44:37.297734 22726 net.cpp:100] Creating Layer scale_Conv16_6_b\nI0818 13:44:37.297740 22726 net.cpp:434] scale_Conv16_6_b <- bn_Conv16_6_b\nI0818 13:44:37.297749 22726 net.cpp:395] scale_Conv16_6_b -> bn_Conv16_6_b (in-place)\nI0818 13:44:37.297813 22726 layer_factory.hpp:77] Creating layer scale_Conv16_6_b\nI0818 13:44:37.297976 22726 net.cpp:150] Setting up scale_Conv16_6_b\nI0818 13:44:37.297988 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.297993 22726 net.cpp:165] Memory required for data: 558593500\nI0818 13:44:37.298002 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_5_b\nI0818 13:44:37.298022 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_5_b\nI0818 13:44:37.298029 22726 net.cpp:434] sum_sum_bn_Conv16_5_b <- sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split_1\nI0818 13:44:37.298038 22726 net.cpp:434] sum_sum_bn_Conv16_5_b <- bn_Conv16_6_b\nI0818 13:44:37.298044 22726 net.cpp:408] sum_sum_bn_Conv16_5_b -> sum_bn_Conv16_6_b\nI0818 13:44:37.298084 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_5_b\nI0818 13:44:37.298094 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.298099 22726 net.cpp:165] Memory required for data: 566785500\nI0818 13:44:37.298104 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_6_b\nI0818 13:44:37.298110 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_6_b\nI0818 13:44:37.298115 22726 net.cpp:434] relu_sum_bn_Conv16_6_b <- sum_bn_Conv16_6_b\nI0818 13:44:37.298122 22726 net.cpp:395] relu_sum_bn_Conv16_6_b -> sum_bn_Conv16_6_b (in-place)\nI0818 13:44:37.298131 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_6_b\nI0818 13:44:37.298138 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.298142 22726 net.cpp:165] Memory required for data: 574977500\nI0818 13:44:37.298147 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split\nI0818 13:44:37.298153 22726 net.cpp:100] Creating Layer sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split\nI0818 13:44:37.298158 22726 net.cpp:434] sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split <- sum_bn_Conv16_6_b\nI0818 13:44:37.298168 22726 net.cpp:408] sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split -> sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split_0\nI0818 13:44:37.298185 22726 net.cpp:408] sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split -> sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split_1\nI0818 13:44:37.298234 22726 net.cpp:150] Setting up sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split\nI0818 13:44:37.298243 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.298249 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.298254 22726 net.cpp:165] Memory required for data: 591361500\nI0818 13:44:37.298259 22726 layer_factory.hpp:77] Creating layer Conv16_7\nI0818 13:44:37.298272 22726 net.cpp:100] Creating Layer Conv16_7\nI0818 13:44:37.298280 22726 net.cpp:434] Conv16_7 <- sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split_0\nI0818 13:44:37.298288 22726 net.cpp:408] Conv16_7 -> Conv16_7\nI0818 13:44:37.298648 22726 net.cpp:150] Setting up Conv16_7\nI0818 13:44:37.298663 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.298667 22726 net.cpp:165] Memory required for data: 599553500\nI0818 13:44:37.298676 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_7\nI0818 13:44:37.298691 22726 net.cpp:100] Creating Layer batchNorm_Conv16_7\nI0818 13:44:37.298696 22726 net.cpp:434] batchNorm_Conv16_7 <- Conv16_7\nI0818 13:44:37.298707 22726 net.cpp:408] batchNorm_Conv16_7 -> bn_Conv16_7\nI0818 13:44:37.298995 22726 net.cpp:150] Setting up batchNorm_Conv16_7\nI0818 13:44:37.299010 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.299013 22726 net.cpp:165] Memory required for data: 607745500\nI0818 13:44:37.299024 22726 layer_factory.hpp:77] Creating layer scale_Conv16_7\nI0818 13:44:37.299032 22726 net.cpp:100] Creating Layer scale_Conv16_7\nI0818 13:44:37.299038 22726 net.cpp:434] scale_Conv16_7 <- bn_Conv16_7\nI0818 13:44:37.299046 22726 net.cpp:395] scale_Conv16_7 -> bn_Conv16_7 (in-place)\nI0818 13:44:37.299106 22726 layer_factory.hpp:77] Creating layer scale_Conv16_7\nI0818 13:44:37.299266 22726 net.cpp:150] Setting up scale_Conv16_7\nI0818 13:44:37.299279 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.299284 22726 net.cpp:165] Memory required for data: 615937500\nI0818 13:44:37.299293 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_7\nI0818 13:44:37.299300 22726 net.cpp:100] Creating Layer relu_bn_Conv16_7\nI0818 13:44:37.299310 22726 net.cpp:434] relu_bn_Conv16_7 <- bn_Conv16_7\nI0818 13:44:37.299317 22726 net.cpp:395] relu_bn_Conv16_7 -> bn_Conv16_7 (in-place)\nI0818 13:44:37.299326 22726 net.cpp:150] Setting up relu_bn_Conv16_7\nI0818 13:44:37.299334 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.299338 22726 net.cpp:165] Memory required for data: 624129500\nI0818 13:44:37.299342 22726 layer_factory.hpp:77] Creating layer Conv16_7_b\nI0818 13:44:37.299355 22726 net.cpp:100] Creating Layer Conv16_7_b\nI0818 13:44:37.299361 22726 net.cpp:434] Conv16_7_b <- bn_Conv16_7\nI0818 13:44:37.299372 22726 net.cpp:408] Conv16_7_b -> Conv16_7_b\nI0818 13:44:37.299741 22726 net.cpp:150] Setting up Conv16_7_b\nI0818 13:44:37.299756 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.299760 22726 net.cpp:165] Memory required for data: 632321500\nI0818 13:44:37.299769 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_7_b\nI0818 13:44:37.299780 22726 net.cpp:100] Creating Layer batchNorm_Conv16_7_b\nI0818 13:44:37.299787 22726 net.cpp:434] batchNorm_Conv16_7_b <- Conv16_7_b\nI0818 13:44:37.299794 22726 net.cpp:408] batchNorm_Conv16_7_b -> bn_Conv16_7_b\nI0818 13:44:37.300078 22726 net.cpp:150] Setting up batchNorm_Conv16_7_b\nI0818 13:44:37.300094 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.300099 22726 net.cpp:165] Memory required for data: 640513500\nI0818 13:44:37.300109 22726 layer_factory.hpp:77] Creating layer scale_Conv16_7_b\nI0818 13:44:37.300119 22726 net.cpp:100] Creating Layer scale_Conv16_7_b\nI0818 13:44:37.300125 22726 net.cpp:434] scale_Conv16_7_b <- bn_Conv16_7_b\nI0818 13:44:37.300133 22726 net.cpp:395] scale_Conv16_7_b -> bn_Conv16_7_b (in-place)\nI0818 13:44:37.300197 22726 layer_factory.hpp:77] Creating layer scale_Conv16_7_b\nI0818 13:44:37.300367 22726 net.cpp:150] Setting up scale_Conv16_7_b\nI0818 13:44:37.300381 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.300385 22726 net.cpp:165] Memory required for data: 648705500\nI0818 13:44:37.300395 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_6_b\nI0818 13:44:37.300403 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_6_b\nI0818 13:44:37.300410 22726 net.cpp:434] sum_sum_bn_Conv16_6_b <- sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split_1\nI0818 13:44:37.300416 22726 net.cpp:434] sum_sum_bn_Conv16_6_b <- bn_Conv16_7_b\nI0818 13:44:37.300427 22726 net.cpp:408] sum_sum_bn_Conv16_6_b -> sum_bn_Conv16_7_b\nI0818 13:44:37.300462 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_6_b\nI0818 13:44:37.300472 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.300475 22726 net.cpp:165] Memory required for data: 656897500\nI0818 13:44:37.300480 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_7_b\nI0818 13:44:37.300492 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_7_b\nI0818 13:44:37.300498 22726 net.cpp:434] relu_sum_bn_Conv16_7_b <- sum_bn_Conv16_7_b\nI0818 13:44:37.300504 22726 net.cpp:395] relu_sum_bn_Conv16_7_b -> sum_bn_Conv16_7_b (in-place)\nI0818 13:44:37.300513 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_7_b\nI0818 13:44:37.300519 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.300524 22726 net.cpp:165] Memory required for data: 665089500\nI0818 13:44:37.300529 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split\nI0818 13:44:37.300535 22726 net.cpp:100] Creating Layer sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split\nI0818 13:44:37.300540 22726 net.cpp:434] sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split <- sum_bn_Conv16_7_b\nI0818 13:44:37.300547 22726 net.cpp:408] sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split -> sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split_0\nI0818 13:44:37.300556 22726 net.cpp:408] sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split -> sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split_1\nI0818 13:44:37.300606 22726 net.cpp:150] Setting up sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split\nI0818 13:44:37.300618 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.300624 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.300629 22726 net.cpp:165] Memory required for data: 681473500\nI0818 13:44:37.300634 22726 layer_factory.hpp:77] Creating layer Conv16_8\nI0818 13:44:37.300647 22726 net.cpp:100] Creating Layer Conv16_8\nI0818 13:44:37.300653 22726 net.cpp:434] Conv16_8 <- sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split_0\nI0818 13:44:37.300662 22726 net.cpp:408] Conv16_8 -> Conv16_8\nI0818 13:44:37.301030 22726 net.cpp:150] Setting up Conv16_8\nI0818 13:44:37.301044 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.301049 22726 net.cpp:165] Memory required for data: 689665500\nI0818 13:44:37.301057 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_8\nI0818 13:44:37.301069 22726 net.cpp:100] Creating Layer batchNorm_Conv16_8\nI0818 13:44:37.301075 22726 net.cpp:434] batchNorm_Conv16_8 <- Conv16_8\nI0818 13:44:37.301084 22726 net.cpp:408] batchNorm_Conv16_8 -> bn_Conv16_8\nI0818 13:44:37.301367 22726 net.cpp:150] Setting up batchNorm_Conv16_8\nI0818 13:44:37.301379 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.301383 22726 net.cpp:165] Memory required for data: 697857500\nI0818 13:44:37.301393 22726 layer_factory.hpp:77] Creating layer scale_Conv16_8\nI0818 13:44:37.301403 22726 net.cpp:100] Creating Layer scale_Conv16_8\nI0818 13:44:37.301409 22726 net.cpp:434] scale_Conv16_8 <- bn_Conv16_8\nI0818 13:44:37.301415 22726 net.cpp:395] scale_Conv16_8 -> bn_Conv16_8 (in-place)\nI0818 13:44:37.301476 22726 layer_factory.hpp:77] Creating layer scale_Conv16_8\nI0818 13:44:37.301645 22726 net.cpp:150] Setting up scale_Conv16_8\nI0818 13:44:37.301657 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.301662 22726 net.cpp:165] Memory required for data: 706049500\nI0818 13:44:37.301678 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_8\nI0818 13:44:37.301687 22726 net.cpp:100] Creating Layer relu_bn_Conv16_8\nI0818 13:44:37.301692 22726 net.cpp:434] relu_bn_Conv16_8 <- bn_Conv16_8\nI0818 13:44:37.301705 22726 net.cpp:395] relu_bn_Conv16_8 -> bn_Conv16_8 (in-place)\nI0818 13:44:37.301714 22726 net.cpp:150] Setting up relu_bn_Conv16_8\nI0818 13:44:37.301722 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.301726 22726 net.cpp:165] Memory required for data: 714241500\nI0818 13:44:37.301730 22726 layer_factory.hpp:77] Creating layer Conv16_8_b\nI0818 13:44:37.301741 22726 net.cpp:100] Creating Layer Conv16_8_b\nI0818 13:44:37.301746 22726 net.cpp:434] Conv16_8_b <- bn_Conv16_8\nI0818 13:44:37.301758 22726 net.cpp:408] Conv16_8_b -> Conv16_8_b\nI0818 13:44:37.302124 22726 net.cpp:150] Setting up Conv16_8_b\nI0818 13:44:37.302139 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.302144 22726 net.cpp:165] Memory required for data: 722433500\nI0818 13:44:37.302152 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_8_b\nI0818 13:44:37.302160 22726 net.cpp:100] Creating Layer batchNorm_Conv16_8_b\nI0818 13:44:37.302166 22726 net.cpp:434] batchNorm_Conv16_8_b <- Conv16_8_b\nI0818 13:44:37.302177 22726 net.cpp:408] batchNorm_Conv16_8_b -> bn_Conv16_8_b\nI0818 13:44:37.302458 22726 net.cpp:150] Setting up batchNorm_Conv16_8_b\nI0818 13:44:37.302469 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.302474 22726 net.cpp:165] Memory required for data: 730625500\nI0818 13:44:37.302484 22726 layer_factory.hpp:77] Creating layer scale_Conv16_8_b\nI0818 13:44:37.302495 22726 net.cpp:100] Creating Layer scale_Conv16_8_b\nI0818 13:44:37.302502 22726 net.cpp:434] scale_Conv16_8_b <- bn_Conv16_8_b\nI0818 13:44:37.302510 22726 net.cpp:395] scale_Conv16_8_b -> bn_Conv16_8_b (in-place)\nI0818 13:44:37.302567 22726 layer_factory.hpp:77] Creating layer scale_Conv16_8_b\nI0818 13:44:37.302729 22726 net.cpp:150] Setting up scale_Conv16_8_b\nI0818 13:44:37.302742 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.302747 22726 net.cpp:165] Memory required for data: 738817500\nI0818 13:44:37.302757 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_7_b\nI0818 13:44:37.302767 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_7_b\nI0818 13:44:37.302773 22726 net.cpp:434] sum_sum_bn_Conv16_7_b <- sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split_1\nI0818 13:44:37.302781 22726 net.cpp:434] sum_sum_bn_Conv16_7_b <- bn_Conv16_8_b\nI0818 13:44:37.302788 22726 net.cpp:408] sum_sum_bn_Conv16_7_b -> sum_bn_Conv16_8_b\nI0818 13:44:37.302832 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_7_b\nI0818 13:44:37.302845 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.302850 22726 net.cpp:165] Memory required for data: 747009500\nI0818 13:44:37.302855 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_8_b\nI0818 13:44:37.302861 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_8_b\nI0818 13:44:37.302867 22726 net.cpp:434] relu_sum_bn_Conv16_8_b <- sum_bn_Conv16_8_b\nI0818 13:44:37.302877 22726 net.cpp:395] relu_sum_bn_Conv16_8_b -> sum_bn_Conv16_8_b (in-place)\nI0818 13:44:37.302887 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_8_b\nI0818 13:44:37.302894 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.302898 22726 net.cpp:165] Memory required for data: 755201500\nI0818 13:44:37.302903 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split\nI0818 13:44:37.302909 22726 net.cpp:100] Creating Layer sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split\nI0818 13:44:37.302914 22726 net.cpp:434] sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split <- sum_bn_Conv16_8_b\nI0818 13:44:37.302922 22726 net.cpp:408] sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split -> sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split_0\nI0818 13:44:37.302932 22726 net.cpp:408] sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split -> sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split_1\nI0818 13:44:37.302983 22726 net.cpp:150] Setting up sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split\nI0818 13:44:37.302999 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.303005 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.303009 22726 net.cpp:165] Memory required for data: 771585500\nI0818 13:44:37.303014 22726 layer_factory.hpp:77] Creating layer Conv16_9\nI0818 13:44:37.303027 22726 net.cpp:100] Creating Layer Conv16_9\nI0818 13:44:37.303033 22726 net.cpp:434] Conv16_9 <- sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split_0\nI0818 13:44:37.303043 22726 net.cpp:408] Conv16_9 -> Conv16_9\nI0818 13:44:37.303412 22726 net.cpp:150] Setting up Conv16_9\nI0818 13:44:37.303429 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.303434 22726 net.cpp:165] Memory required for data: 779777500\nI0818 13:44:37.303442 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_9\nI0818 13:44:37.303453 22726 net.cpp:100] Creating Layer batchNorm_Conv16_9\nI0818 13:44:37.303459 22726 net.cpp:434] batchNorm_Conv16_9 <- Conv16_9\nI0818 13:44:37.303468 22726 net.cpp:408] batchNorm_Conv16_9 -> bn_Conv16_9\nI0818 13:44:37.303752 22726 net.cpp:150] Setting up batchNorm_Conv16_9\nI0818 13:44:37.303766 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.303769 22726 net.cpp:165] Memory required for data: 787969500\nI0818 13:44:37.303781 22726 layer_factory.hpp:77] Creating layer scale_Conv16_9\nI0818 13:44:37.303789 22726 net.cpp:100] Creating Layer scale_Conv16_9\nI0818 13:44:37.303794 22726 net.cpp:434] scale_Conv16_9 <- bn_Conv16_9\nI0818 13:44:37.303805 22726 net.cpp:395] scale_Conv16_9 -> bn_Conv16_9 (in-place)\nI0818 13:44:37.303872 22726 layer_factory.hpp:77] Creating layer scale_Conv16_9\nI0818 13:44:37.304035 22726 net.cpp:150] Setting up scale_Conv16_9\nI0818 13:44:37.304050 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.304055 22726 net.cpp:165] Memory required for data: 796161500\nI0818 13:44:37.304064 22726 layer_factory.hpp:77] Creating layer relu_bn_Conv16_9\nI0818 13:44:37.304072 22726 net.cpp:100] Creating Layer relu_bn_Conv16_9\nI0818 13:44:37.304078 22726 net.cpp:434] relu_bn_Conv16_9 <- bn_Conv16_9\nI0818 13:44:37.304085 22726 net.cpp:395] relu_bn_Conv16_9 -> bn_Conv16_9 (in-place)\nI0818 13:44:37.304095 22726 net.cpp:150] Setting up relu_bn_Conv16_9\nI0818 13:44:37.304101 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.304105 22726 net.cpp:165] Memory required for data: 804353500\nI0818 13:44:37.304111 22726 layer_factory.hpp:77] Creating layer Conv16_9_b\nI0818 13:44:37.304123 22726 net.cpp:100] Creating Layer Conv16_9_b\nI0818 13:44:37.304129 22726 net.cpp:434] Conv16_9_b <- bn_Conv16_9\nI0818 13:44:37.304142 22726 net.cpp:408] Conv16_9_b -> Conv16_9_b\nI0818 13:44:37.304502 22726 net.cpp:150] Setting up Conv16_9_b\nI0818 13:44:37.304515 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.304520 22726 net.cpp:165] Memory required for data: 812545500\nI0818 13:44:37.304528 22726 layer_factory.hpp:77] Creating layer batchNorm_Conv16_9_b\nI0818 13:44:37.304539 22726 net.cpp:100] Creating Layer batchNorm_Conv16_9_b\nI0818 13:44:37.304545 22726 net.cpp:434] batchNorm_Conv16_9_b <- Conv16_9_b\nI0818 13:44:37.304554 22726 net.cpp:408] batchNorm_Conv16_9_b -> bn_Conv16_9_b\nI0818 13:44:37.304842 22726 net.cpp:150] Setting up batchNorm_Conv16_9_b\nI0818 13:44:37.304855 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.304860 22726 net.cpp:165] Memory required for data: 820737500\nI0818 13:44:37.304893 22726 layer_factory.hpp:77] Creating layer scale_Conv16_9_b\nI0818 13:44:37.304903 22726 net.cpp:100] Creating Layer scale_Conv16_9_b\nI0818 13:44:37.304908 22726 net.cpp:434] scale_Conv16_9_b <- bn_Conv16_9_b\nI0818 13:44:37.304919 22726 net.cpp:395] scale_Conv16_9_b -> bn_Conv16_9_b (in-place)\nI0818 13:44:37.304976 22726 layer_factory.hpp:77] Creating layer scale_Conv16_9_b\nI0818 13:44:37.305145 22726 net.cpp:150] Setting up scale_Conv16_9_b\nI0818 13:44:37.305158 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.305163 22726 net.cpp:165] Memory required for data: 828929500\nI0818 13:44:37.305179 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_Conv16_8_b\nI0818 13:44:37.305191 22726 net.cpp:100] Creating Layer sum_sum_bn_Conv16_8_b\nI0818 13:44:37.305197 22726 net.cpp:434] sum_sum_bn_Conv16_8_b <- sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split_1\nI0818 13:44:37.305205 22726 net.cpp:434] sum_sum_bn_Conv16_8_b <- bn_Conv16_9_b\nI0818 13:44:37.305213 22726 net.cpp:408] sum_sum_bn_Conv16_8_b -> sum_bn_Conv16_9_b\nI0818 13:44:37.305248 22726 net.cpp:150] Setting up sum_sum_bn_Conv16_8_b\nI0818 13:44:37.305259 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.305264 22726 net.cpp:165] Memory required for data: 837121500\nI0818 13:44:37.305269 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_Conv16_9_b\nI0818 13:44:37.305279 22726 net.cpp:100] Creating Layer relu_sum_bn_Conv16_9_b\nI0818 13:44:37.305285 22726 net.cpp:434] relu_sum_bn_Conv16_9_b <- sum_bn_Conv16_9_b\nI0818 13:44:37.305292 22726 net.cpp:395] relu_sum_bn_Conv16_9_b -> sum_bn_Conv16_9_b (in-place)\nI0818 13:44:37.305302 22726 net.cpp:150] Setting up relu_sum_bn_Conv16_9_b\nI0818 13:44:37.305308 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.305313 22726 net.cpp:165] Memory required for data: 845313500\nI0818 13:44:37.305317 22726 layer_factory.hpp:77] Creating layer sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split\nI0818 13:44:37.305327 22726 net.cpp:100] Creating Layer sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split\nI0818 13:44:37.305332 22726 net.cpp:434] sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split <- sum_bn_Conv16_9_b\nI0818 13:44:37.305339 22726 net.cpp:408] sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split -> sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split_0\nI0818 13:44:37.305348 22726 net.cpp:408] sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split -> sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split_1\nI0818 13:44:37.305402 22726 net.cpp:150] Setting up sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split\nI0818 13:44:37.305413 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.305419 22726 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:37.305423 22726 net.cpp:165] Memory required for data: 861697500\nI0818 13:44:37.305428 22726 layer_factory.hpp:77] Creating layer resblk32\nI0818 13:44:37.305441 22726 net.cpp:100] Creating Layer resblk32\nI0818 13:44:37.305449 22726 net.cpp:434] resblk32 <- sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split_0\nI0818 13:44:37.305456 22726 net.cpp:408] resblk32 -> resblk32\nI0818 13:44:37.305829 22726 net.cpp:150] Setting up resblk32\nI0818 13:44:37.305842 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.305847 22726 net.cpp:165] Memory required for data: 863745500\nI0818 13:44:37.305856 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32\nI0818 13:44:37.305867 22726 net.cpp:100] Creating Layer batchNorm_resblk32\nI0818 13:44:37.305873 22726 net.cpp:434] batchNorm_resblk32 <- resblk32\nI0818 13:44:37.305882 22726 net.cpp:408] batchNorm_resblk32 -> bn_resblk32\nI0818 13:44:37.306154 22726 net.cpp:150] Setting up batchNorm_resblk32\nI0818 13:44:37.306170 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.306175 22726 net.cpp:165] Memory required for data: 865793500\nI0818 13:44:37.306185 22726 layer_factory.hpp:77] Creating layer scale_resblk32\nI0818 13:44:37.306195 22726 net.cpp:100] Creating Layer scale_resblk32\nI0818 13:44:37.306200 22726 net.cpp:434] scale_resblk32 <- bn_resblk32\nI0818 13:44:37.306207 22726 net.cpp:395] scale_resblk32 -> bn_resblk32 (in-place)\nI0818 13:44:37.306267 22726 layer_factory.hpp:77] Creating layer scale_resblk32\nI0818 13:44:37.306430 22726 net.cpp:150] Setting up scale_resblk32\nI0818 13:44:37.306443 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.306448 22726 net.cpp:165] Memory required for data: 867841500\nI0818 13:44:37.306457 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32\nI0818 13:44:37.306464 22726 net.cpp:100] Creating Layer relu_bn_resblk32\nI0818 13:44:37.306470 22726 net.cpp:434] relu_bn_resblk32 <- bn_resblk32\nI0818 13:44:37.306490 22726 net.cpp:395] relu_bn_resblk32 -> bn_resblk32 (in-place)\nI0818 13:44:37.306500 22726 net.cpp:150] Setting up relu_bn_resblk32\nI0818 13:44:37.306507 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.306511 22726 net.cpp:165] Memory required for data: 869889500\nI0818 13:44:37.306516 22726 layer_factory.hpp:77] Creating layer resblk32_b\nI0818 13:44:37.306529 22726 net.cpp:100] Creating Layer resblk32_b\nI0818 13:44:37.306535 22726 net.cpp:434] resblk32_b <- bn_resblk32\nI0818 13:44:37.306543 22726 net.cpp:408] resblk32_b -> resblk32_b\nI0818 13:44:37.306910 22726 net.cpp:150] Setting up resblk32_b\nI0818 13:44:37.306924 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.306929 22726 net.cpp:165] Memory required for data: 871937500\nI0818 13:44:37.306938 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_b\nI0818 13:44:37.306946 22726 net.cpp:100] Creating Layer batchNorm_resblk32_b\nI0818 13:44:37.306951 22726 net.cpp:434] batchNorm_resblk32_b <- resblk32_b\nI0818 13:44:37.306963 22726 net.cpp:408] batchNorm_resblk32_b -> bn_resblk32_b\nI0818 13:44:37.307234 22726 net.cpp:150] Setting up batchNorm_resblk32_b\nI0818 13:44:37.307246 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.307251 22726 net.cpp:165] Memory required for data: 873985500\nI0818 13:44:37.307261 22726 layer_factory.hpp:77] Creating layer scale_resblk32_b\nI0818 13:44:37.307272 22726 net.cpp:100] Creating Layer scale_resblk32_b\nI0818 13:44:37.307278 22726 net.cpp:434] scale_resblk32_b <- bn_resblk32_b\nI0818 13:44:37.307286 22726 net.cpp:395] scale_resblk32_b -> bn_resblk32_b (in-place)\nI0818 13:44:37.307344 22726 layer_factory.hpp:77] Creating layer scale_resblk32_b\nI0818 13:44:37.307505 22726 net.cpp:150] Setting up scale_resblk32_b\nI0818 13:44:37.307518 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.307523 22726 net.cpp:165] Memory required for data: 876033500\nI0818 13:44:37.307533 22726 layer_factory.hpp:77] Creating layer avePooling_resblk32\nI0818 13:44:37.307543 22726 net.cpp:100] Creating Layer avePooling_resblk32\nI0818 13:44:37.307550 22726 net.cpp:434] avePooling_resblk32 <- sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split_1\nI0818 13:44:37.307561 22726 net.cpp:408] avePooling_resblk32 -> avgPool_resblk32\nI0818 13:44:37.307591 22726 net.cpp:150] Setting up avePooling_resblk32\nI0818 13:44:37.307600 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.307605 22726 net.cpp:165] Memory required for data: 878081500\nI0818 13:44:37.307610 22726 layer_factory.hpp:77] Creating layer sum_avgPool_resblk32\nI0818 13:44:37.307618 22726 net.cpp:100] Creating Layer sum_avgPool_resblk32\nI0818 13:44:37.307623 22726 net.cpp:434] sum_avgPool_resblk32 <- avgPool_resblk32\nI0818 13:44:37.307631 22726 net.cpp:434] sum_avgPool_resblk32 <- bn_resblk32_b\nI0818 13:44:37.307641 22726 net.cpp:408] sum_avgPool_resblk32 -> sum_bn_resblk32_b\nI0818 13:44:37.307677 22726 net.cpp:150] Setting up sum_avgPool_resblk32\nI0818 13:44:37.307687 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.307693 22726 net.cpp:165] Memory required for data: 880129500\nI0818 13:44:37.307696 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_b\nI0818 13:44:37.307704 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_b\nI0818 13:44:37.307710 22726 net.cpp:434] relu_sum_bn_resblk32_b <- sum_bn_resblk32_b\nI0818 13:44:37.307720 22726 net.cpp:395] relu_sum_bn_resblk32_b -> sum_bn_resblk32_b (in-place)\nI0818 13:44:37.307729 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_b\nI0818 13:44:37.307736 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.307741 22726 net.cpp:165] Memory required for data: 882177500\nI0818 13:44:37.307745 22726 layer_factory.hpp:77] Creating layer zeros_sum_bn_resblk32_b\nI0818 13:44:37.307755 22726 net.cpp:100] Creating Layer zeros_sum_bn_resblk32_b\nI0818 13:44:37.307762 22726 net.cpp:408] zeros_sum_bn_resblk32_b -> zeros_sum_bn_resblk32_b\nI0818 13:44:37.310086 22726 net.cpp:150] Setting up zeros_sum_bn_resblk32_b\nI0818 13:44:37.310104 22726 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:37.310118 22726 net.cpp:165] Memory required for data: 884225500\nI0818 13:44:37.310124 22726 layer_factory.hpp:77] Creating layer CC_sum_bn_resblk32_b\nI0818 13:44:37.310133 22726 net.cpp:100] Creating Layer CC_sum_bn_resblk32_b\nI0818 13:44:37.310140 22726 net.cpp:434] CC_sum_bn_resblk32_b <- sum_bn_resblk32_b\nI0818 13:44:37.310148 22726 net.cpp:434] CC_sum_bn_resblk32_b <- zeros_sum_bn_resblk32_b\nI0818 13:44:37.310158 22726 net.cpp:408] CC_sum_bn_resblk32_b -> CC_sum_bn_resblk32_b\nI0818 13:44:37.310209 22726 net.cpp:150] Setting up CC_sum_bn_resblk32_b\nI0818 13:44:37.310220 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.310225 22726 net.cpp:165] Memory required for data: 888321500\nI0818 13:44:37.310230 22726 layer_factory.hpp:77] Creating layer CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split\nI0818 13:44:37.310238 22726 net.cpp:100] Creating Layer CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split\nI0818 13:44:37.310245 22726 net.cpp:434] CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split <- CC_sum_bn_resblk32_b\nI0818 13:44:37.310255 22726 net.cpp:408] CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split -> CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split_0\nI0818 13:44:37.310264 22726 net.cpp:408] CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split -> CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split_1\nI0818 13:44:37.310317 22726 net.cpp:150] Setting up CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split\nI0818 13:44:37.310330 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.310338 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.310341 22726 net.cpp:165] Memory required for data: 896513500\nI0818 13:44:37.310346 22726 layer_factory.hpp:77] Creating layer resblk32_1\nI0818 13:44:37.310358 22726 net.cpp:100] Creating Layer resblk32_1\nI0818 13:44:37.310364 22726 net.cpp:434] resblk32_1 <- CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split_0\nI0818 13:44:37.310374 22726 net.cpp:408] resblk32_1 -> resblk32_1\nI0818 13:44:37.310885 22726 net.cpp:150] Setting up resblk32_1\nI0818 13:44:37.310899 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.310904 22726 net.cpp:165] Memory required for data: 900609500\nI0818 13:44:37.310914 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_1\nI0818 13:44:37.310925 22726 net.cpp:100] Creating Layer batchNorm_resblk32_1\nI0818 13:44:37.310931 22726 net.cpp:434] batchNorm_resblk32_1 <- resblk32_1\nI0818 13:44:37.310940 22726 net.cpp:408] batchNorm_resblk32_1 -> bn_resblk32_1\nI0818 13:44:37.311216 22726 net.cpp:150] Setting up batchNorm_resblk32_1\nI0818 13:44:37.311228 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.311233 22726 net.cpp:165] Memory required for data: 904705500\nI0818 13:44:37.311244 22726 layer_factory.hpp:77] Creating layer scale_resblk32_1\nI0818 13:44:37.311252 22726 net.cpp:100] Creating Layer scale_resblk32_1\nI0818 13:44:37.311259 22726 net.cpp:434] scale_resblk32_1 <- bn_resblk32_1\nI0818 13:44:37.311269 22726 net.cpp:395] scale_resblk32_1 -> bn_resblk32_1 (in-place)\nI0818 13:44:37.311329 22726 layer_factory.hpp:77] Creating layer scale_resblk32_1\nI0818 13:44:37.311487 22726 net.cpp:150] Setting up scale_resblk32_1\nI0818 13:44:37.311501 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.311506 22726 net.cpp:165] Memory required for data: 908801500\nI0818 13:44:37.311513 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_1\nI0818 13:44:37.311522 22726 net.cpp:100] Creating Layer relu_bn_resblk32_1\nI0818 13:44:37.311527 22726 net.cpp:434] relu_bn_resblk32_1 <- bn_resblk32_1\nI0818 13:44:37.311538 22726 net.cpp:395] relu_bn_resblk32_1 -> bn_resblk32_1 (in-place)\nI0818 13:44:37.311547 22726 net.cpp:150] Setting up relu_bn_resblk32_1\nI0818 13:44:37.311554 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.311559 22726 net.cpp:165] Memory required for data: 912897500\nI0818 13:44:37.311563 22726 layer_factory.hpp:77] Creating layer resblk32_1_b\nI0818 13:44:37.311578 22726 net.cpp:100] Creating Layer resblk32_1_b\nI0818 13:44:37.311590 22726 net.cpp:434] resblk32_1_b <- bn_resblk32_1\nI0818 13:44:37.311599 22726 net.cpp:408] resblk32_1_b -> resblk32_1_b\nI0818 13:44:37.312180 22726 net.cpp:150] Setting up resblk32_1_b\nI0818 13:44:37.312196 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.312201 22726 net.cpp:165] Memory required for data: 916993500\nI0818 13:44:37.312211 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_1_b\nI0818 13:44:37.312219 22726 net.cpp:100] Creating Layer batchNorm_resblk32_1_b\nI0818 13:44:37.312225 22726 net.cpp:434] batchNorm_resblk32_1_b <- resblk32_1_b\nI0818 13:44:37.312237 22726 net.cpp:408] batchNorm_resblk32_1_b -> bn_resblk32_1_b\nI0818 13:44:37.312507 22726 net.cpp:150] Setting up batchNorm_resblk32_1_b\nI0818 13:44:37.312520 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.312525 22726 net.cpp:165] Memory required for data: 921089500\nI0818 13:44:37.312536 22726 layer_factory.hpp:77] Creating layer scale_resblk32_1_b\nI0818 13:44:37.312543 22726 net.cpp:100] Creating Layer scale_resblk32_1_b\nI0818 13:44:37.312551 22726 net.cpp:434] scale_resblk32_1_b <- bn_resblk32_1_b\nI0818 13:44:37.312557 22726 net.cpp:395] scale_resblk32_1_b -> bn_resblk32_1_b (in-place)\nI0818 13:44:37.312619 22726 layer_factory.hpp:77] Creating layer scale_resblk32_1_b\nI0818 13:44:37.312777 22726 net.cpp:150] Setting up scale_resblk32_1_b\nI0818 13:44:37.312793 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.312798 22726 net.cpp:165] Memory required for data: 925185500\nI0818 13:44:37.312813 22726 layer_factory.hpp:77] Creating layer sum_CC_sum_bn_resblk32_b\nI0818 13:44:37.312822 22726 net.cpp:100] Creating Layer sum_CC_sum_bn_resblk32_b\nI0818 13:44:37.312829 22726 net.cpp:434] sum_CC_sum_bn_resblk32_b <- CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split_1\nI0818 13:44:37.312836 22726 net.cpp:434] sum_CC_sum_bn_resblk32_b <- bn_resblk32_1_b\nI0818 13:44:37.312844 22726 net.cpp:408] sum_CC_sum_bn_resblk32_b -> sum_bn_resblk32_1_b\nI0818 13:44:37.312878 22726 net.cpp:150] Setting up sum_CC_sum_bn_resblk32_b\nI0818 13:44:37.312888 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.312893 22726 net.cpp:165] Memory required for data: 929281500\nI0818 13:44:37.312898 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_1_b\nI0818 13:44:37.312906 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_1_b\nI0818 13:44:37.312911 22726 net.cpp:434] relu_sum_bn_resblk32_1_b <- sum_bn_resblk32_1_b\nI0818 13:44:37.312922 22726 net.cpp:395] relu_sum_bn_resblk32_1_b -> sum_bn_resblk32_1_b (in-place)\nI0818 13:44:37.312932 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_1_b\nI0818 13:44:37.312937 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.312942 22726 net.cpp:165] Memory required for data: 933377500\nI0818 13:44:37.312947 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split\nI0818 13:44:37.312954 22726 net.cpp:100] Creating Layer sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split\nI0818 13:44:37.312959 22726 net.cpp:434] sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split <- sum_bn_resblk32_1_b\nI0818 13:44:37.312969 22726 net.cpp:408] sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split -> sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split_0\nI0818 13:44:37.312979 22726 net.cpp:408] sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split -> sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split_1\nI0818 13:44:37.313029 22726 net.cpp:150] Setting up sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split\nI0818 13:44:37.313040 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.313046 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.313050 22726 net.cpp:165] Memory required for data: 941569500\nI0818 13:44:37.313055 22726 layer_factory.hpp:77] Creating layer resblk32_2\nI0818 13:44:37.313069 22726 net.cpp:100] Creating Layer resblk32_2\nI0818 13:44:37.313076 22726 net.cpp:434] resblk32_2 <- sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split_0\nI0818 13:44:37.313092 22726 net.cpp:408] resblk32_2 -> resblk32_2\nI0818 13:44:37.313596 22726 net.cpp:150] Setting up resblk32_2\nI0818 13:44:37.313608 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.313613 22726 net.cpp:165] Memory required for data: 945665500\nI0818 13:44:37.313622 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_2\nI0818 13:44:37.313630 22726 net.cpp:100] Creating Layer batchNorm_resblk32_2\nI0818 13:44:37.313637 22726 net.cpp:434] batchNorm_resblk32_2 <- resblk32_2\nI0818 13:44:37.313647 22726 net.cpp:408] batchNorm_resblk32_2 -> bn_resblk32_2\nI0818 13:44:37.313922 22726 net.cpp:150] Setting up batchNorm_resblk32_2\nI0818 13:44:37.313935 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.313941 22726 net.cpp:165] Memory required for data: 949761500\nI0818 13:44:37.313951 22726 layer_factory.hpp:77] Creating layer scale_resblk32_2\nI0818 13:44:37.313959 22726 net.cpp:100] Creating Layer scale_resblk32_2\nI0818 13:44:37.313966 22726 net.cpp:434] scale_resblk32_2 <- bn_resblk32_2\nI0818 13:44:37.313972 22726 net.cpp:395] scale_resblk32_2 -> bn_resblk32_2 (in-place)\nI0818 13:44:37.314033 22726 layer_factory.hpp:77] Creating layer scale_resblk32_2\nI0818 13:44:37.314195 22726 net.cpp:150] Setting up scale_resblk32_2\nI0818 13:44:37.314211 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.314216 22726 net.cpp:165] Memory required for data: 953857500\nI0818 13:44:37.314225 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_2\nI0818 13:44:37.314234 22726 net.cpp:100] Creating Layer relu_bn_resblk32_2\nI0818 13:44:37.314239 22726 net.cpp:434] relu_bn_resblk32_2 <- bn_resblk32_2\nI0818 13:44:37.314246 22726 net.cpp:395] relu_bn_resblk32_2 -> bn_resblk32_2 (in-place)\nI0818 13:44:37.314255 22726 net.cpp:150] Setting up relu_bn_resblk32_2\nI0818 13:44:37.314262 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.314266 22726 net.cpp:165] Memory required for data: 957953500\nI0818 13:44:37.314271 22726 layer_factory.hpp:77] Creating layer resblk32_2_b\nI0818 13:44:37.314285 22726 net.cpp:100] Creating Layer resblk32_2_b\nI0818 13:44:37.314291 22726 net.cpp:434] resblk32_2_b <- bn_resblk32_2\nI0818 13:44:37.314302 22726 net.cpp:408] resblk32_2_b -> resblk32_2_b\nI0818 13:44:37.314803 22726 net.cpp:150] Setting up resblk32_2_b\nI0818 13:44:37.314822 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.314827 22726 net.cpp:165] Memory required for data: 962049500\nI0818 13:44:37.314836 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_2_b\nI0818 13:44:37.314847 22726 net.cpp:100] Creating Layer batchNorm_resblk32_2_b\nI0818 13:44:37.314854 22726 net.cpp:434] batchNorm_resblk32_2_b <- resblk32_2_b\nI0818 13:44:37.314865 22726 net.cpp:408] batchNorm_resblk32_2_b -> bn_resblk32_2_b\nI0818 13:44:37.315135 22726 net.cpp:150] Setting up batchNorm_resblk32_2_b\nI0818 13:44:37.315148 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.315153 22726 net.cpp:165] Memory required for data: 966145500\nI0818 13:44:37.315163 22726 layer_factory.hpp:77] Creating layer scale_resblk32_2_b\nI0818 13:44:37.315172 22726 net.cpp:100] Creating Layer scale_resblk32_2_b\nI0818 13:44:37.315178 22726 net.cpp:434] scale_resblk32_2_b <- bn_resblk32_2_b\nI0818 13:44:37.315186 22726 net.cpp:395] scale_resblk32_2_b -> bn_resblk32_2_b (in-place)\nI0818 13:44:37.315246 22726 layer_factory.hpp:77] Creating layer scale_resblk32_2_b\nI0818 13:44:37.315404 22726 net.cpp:150] Setting up scale_resblk32_2_b\nI0818 13:44:37.315418 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.315423 22726 net.cpp:165] Memory required for data: 970241500\nI0818 13:44:37.315430 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_1_b\nI0818 13:44:37.315443 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_1_b\nI0818 13:44:37.315448 22726 net.cpp:434] sum_sum_bn_resblk32_1_b <- sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split_1\nI0818 13:44:37.315456 22726 net.cpp:434] sum_sum_bn_resblk32_1_b <- bn_resblk32_2_b\nI0818 13:44:37.315464 22726 net.cpp:408] sum_sum_bn_resblk32_1_b -> sum_bn_resblk32_2_b\nI0818 13:44:37.315501 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_1_b\nI0818 13:44:37.315511 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.315515 22726 net.cpp:165] Memory required for data: 974337500\nI0818 13:44:37.315521 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_2_b\nI0818 13:44:37.315541 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_2_b\nI0818 13:44:37.315548 22726 net.cpp:434] relu_sum_bn_resblk32_2_b <- sum_bn_resblk32_2_b\nI0818 13:44:37.315556 22726 net.cpp:395] relu_sum_bn_resblk32_2_b -> sum_bn_resblk32_2_b (in-place)\nI0818 13:44:37.315564 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_2_b\nI0818 13:44:37.315572 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.315575 22726 net.cpp:165] Memory required for data: 978433500\nI0818 13:44:37.315582 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split\nI0818 13:44:37.315588 22726 net.cpp:100] Creating Layer sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split\nI0818 13:44:37.315593 22726 net.cpp:434] sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split <- sum_bn_resblk32_2_b\nI0818 13:44:37.315600 22726 net.cpp:408] sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split -> sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split_0\nI0818 13:44:37.315610 22726 net.cpp:408] sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split -> sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split_1\nI0818 13:44:37.315662 22726 net.cpp:150] Setting up sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split\nI0818 13:44:37.315675 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.315681 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.315685 22726 net.cpp:165] Memory required for data: 986625500\nI0818 13:44:37.315690 22726 layer_factory.hpp:77] Creating layer resblk32_3\nI0818 13:44:37.315701 22726 net.cpp:100] Creating Layer resblk32_3\nI0818 13:44:37.315706 22726 net.cpp:434] resblk32_3 <- sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split_0\nI0818 13:44:37.315718 22726 net.cpp:408] resblk32_3 -> resblk32_3\nI0818 13:44:37.316231 22726 net.cpp:150] Setting up resblk32_3\nI0818 13:44:37.316246 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.316251 22726 net.cpp:165] Memory required for data: 990721500\nI0818 13:44:37.316259 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_3\nI0818 13:44:37.316272 22726 net.cpp:100] Creating Layer batchNorm_resblk32_3\nI0818 13:44:37.316279 22726 net.cpp:434] batchNorm_resblk32_3 <- resblk32_3\nI0818 13:44:37.316288 22726 net.cpp:408] batchNorm_resblk32_3 -> bn_resblk32_3\nI0818 13:44:37.316563 22726 net.cpp:150] Setting up batchNorm_resblk32_3\nI0818 13:44:37.316576 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.316581 22726 net.cpp:165] Memory required for data: 994817500\nI0818 13:44:37.316591 22726 layer_factory.hpp:77] Creating layer scale_resblk32_3\nI0818 13:44:37.316602 22726 net.cpp:100] Creating Layer scale_resblk32_3\nI0818 13:44:37.316609 22726 net.cpp:434] scale_resblk32_3 <- bn_resblk32_3\nI0818 13:44:37.316617 22726 net.cpp:395] scale_resblk32_3 -> bn_resblk32_3 (in-place)\nI0818 13:44:37.316676 22726 layer_factory.hpp:77] Creating layer scale_resblk32_3\nI0818 13:44:37.316843 22726 net.cpp:150] Setting up scale_resblk32_3\nI0818 13:44:37.316856 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.316860 22726 net.cpp:165] Memory required for data: 998913500\nI0818 13:44:37.316869 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_3\nI0818 13:44:37.316882 22726 net.cpp:100] Creating Layer relu_bn_resblk32_3\nI0818 13:44:37.316889 22726 net.cpp:434] relu_bn_resblk32_3 <- bn_resblk32_3\nI0818 13:44:37.316898 22726 net.cpp:395] relu_bn_resblk32_3 -> bn_resblk32_3 (in-place)\nI0818 13:44:37.316908 22726 net.cpp:150] Setting up relu_bn_resblk32_3\nI0818 13:44:37.316915 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.316920 22726 net.cpp:165] Memory required for data: 1003009500\nI0818 13:44:37.316931 22726 layer_factory.hpp:77] Creating layer resblk32_3_b\nI0818 13:44:37.316943 22726 net.cpp:100] Creating Layer resblk32_3_b\nI0818 13:44:37.316948 22726 net.cpp:434] resblk32_3_b <- bn_resblk32_3\nI0818 13:44:37.316961 22726 net.cpp:408] resblk32_3_b -> resblk32_3_b\nI0818 13:44:37.317453 22726 net.cpp:150] Setting up resblk32_3_b\nI0818 13:44:37.317467 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.317472 22726 net.cpp:165] Memory required for data: 1007105500\nI0818 13:44:37.317481 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_3_b\nI0818 13:44:37.317489 22726 net.cpp:100] Creating Layer batchNorm_resblk32_3_b\nI0818 13:44:37.317495 22726 net.cpp:434] batchNorm_resblk32_3_b <- resblk32_3_b\nI0818 13:44:37.317507 22726 net.cpp:408] batchNorm_resblk32_3_b -> bn_resblk32_3_b\nI0818 13:44:37.317777 22726 net.cpp:150] Setting up batchNorm_resblk32_3_b\nI0818 13:44:37.317790 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.317795 22726 net.cpp:165] Memory required for data: 1011201500\nI0818 13:44:37.317806 22726 layer_factory.hpp:77] Creating layer scale_resblk32_3_b\nI0818 13:44:37.317824 22726 net.cpp:100] Creating Layer scale_resblk32_3_b\nI0818 13:44:37.317831 22726 net.cpp:434] scale_resblk32_3_b <- bn_resblk32_3_b\nI0818 13:44:37.317839 22726 net.cpp:395] scale_resblk32_3_b -> bn_resblk32_3_b (in-place)\nI0818 13:44:37.317898 22726 layer_factory.hpp:77] Creating layer scale_resblk32_3_b\nI0818 13:44:37.318060 22726 net.cpp:150] Setting up scale_resblk32_3_b\nI0818 13:44:37.318073 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.318078 22726 net.cpp:165] Memory required for data: 1015297500\nI0818 13:44:37.318086 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_2_b\nI0818 13:44:37.318100 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_2_b\nI0818 13:44:37.318107 22726 net.cpp:434] sum_sum_bn_resblk32_2_b <- sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split_1\nI0818 13:44:37.318115 22726 net.cpp:434] sum_sum_bn_resblk32_2_b <- bn_resblk32_3_b\nI0818 13:44:37.318122 22726 net.cpp:408] sum_sum_bn_resblk32_2_b -> sum_bn_resblk32_3_b\nI0818 13:44:37.318155 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_2_b\nI0818 13:44:37.318164 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.318168 22726 net.cpp:165] Memory required for data: 1019393500\nI0818 13:44:37.318174 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_3_b\nI0818 13:44:37.318182 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_3_b\nI0818 13:44:37.318187 22726 net.cpp:434] relu_sum_bn_resblk32_3_b <- sum_bn_resblk32_3_b\nI0818 13:44:37.318197 22726 net.cpp:395] relu_sum_bn_resblk32_3_b -> sum_bn_resblk32_3_b (in-place)\nI0818 13:44:37.318207 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_3_b\nI0818 13:44:37.318213 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.318218 22726 net.cpp:165] Memory required for data: 1023489500\nI0818 13:44:37.318222 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split\nI0818 13:44:37.318229 22726 net.cpp:100] Creating Layer sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split\nI0818 13:44:37.318234 22726 net.cpp:434] sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split <- sum_bn_resblk32_3_b\nI0818 13:44:37.318243 22726 net.cpp:408] sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split -> sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split_0\nI0818 13:44:37.318251 22726 net.cpp:408] sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split -> sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split_1\nI0818 13:44:37.318302 22726 net.cpp:150] Setting up sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split\nI0818 13:44:37.318315 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.318320 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.318325 22726 net.cpp:165] Memory required for data: 1031681500\nI0818 13:44:37.318330 22726 layer_factory.hpp:77] Creating layer resblk32_4\nI0818 13:44:37.318339 22726 net.cpp:100] Creating Layer resblk32_4\nI0818 13:44:37.318352 22726 net.cpp:434] resblk32_4 <- sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split_0\nI0818 13:44:37.318366 22726 net.cpp:408] resblk32_4 -> resblk32_4\nI0818 13:44:37.318871 22726 net.cpp:150] Setting up resblk32_4\nI0818 13:44:37.318884 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.318889 22726 net.cpp:165] Memory required for data: 1035777500\nI0818 13:44:37.318898 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_4\nI0818 13:44:37.318907 22726 net.cpp:100] Creating Layer batchNorm_resblk32_4\nI0818 13:44:37.318913 22726 net.cpp:434] batchNorm_resblk32_4 <- resblk32_4\nI0818 13:44:37.318924 22726 net.cpp:408] batchNorm_resblk32_4 -> bn_resblk32_4\nI0818 13:44:37.319195 22726 net.cpp:150] Setting up batchNorm_resblk32_4\nI0818 13:44:37.319207 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.319212 22726 net.cpp:165] Memory required for data: 1039873500\nI0818 13:44:37.319222 22726 layer_factory.hpp:77] Creating layer scale_resblk32_4\nI0818 13:44:37.319233 22726 net.cpp:100] Creating Layer scale_resblk32_4\nI0818 13:44:37.319241 22726 net.cpp:434] scale_resblk32_4 <- bn_resblk32_4\nI0818 13:44:37.319248 22726 net.cpp:395] scale_resblk32_4 -> bn_resblk32_4 (in-place)\nI0818 13:44:37.319306 22726 layer_factory.hpp:77] Creating layer scale_resblk32_4\nI0818 13:44:37.319471 22726 net.cpp:150] Setting up scale_resblk32_4\nI0818 13:44:37.319483 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.319489 22726 net.cpp:165] Memory required for data: 1043969500\nI0818 13:44:37.319497 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_4\nI0818 13:44:37.319509 22726 net.cpp:100] Creating Layer relu_bn_resblk32_4\nI0818 13:44:37.319514 22726 net.cpp:434] relu_bn_resblk32_4 <- bn_resblk32_4\nI0818 13:44:37.319522 22726 net.cpp:395] relu_bn_resblk32_4 -> bn_resblk32_4 (in-place)\nI0818 13:44:37.319531 22726 net.cpp:150] Setting up relu_bn_resblk32_4\nI0818 13:44:37.319538 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.319542 22726 net.cpp:165] Memory required for data: 1048065500\nI0818 13:44:37.319547 22726 layer_factory.hpp:77] Creating layer resblk32_4_b\nI0818 13:44:37.319561 22726 net.cpp:100] Creating Layer resblk32_4_b\nI0818 13:44:37.319566 22726 net.cpp:434] resblk32_4_b <- bn_resblk32_4\nI0818 13:44:37.319577 22726 net.cpp:408] resblk32_4_b -> resblk32_4_b\nI0818 13:44:37.320076 22726 net.cpp:150] Setting up resblk32_4_b\nI0818 13:44:37.320091 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.320096 22726 net.cpp:165] Memory required for data: 1052161500\nI0818 13:44:37.320104 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_4_b\nI0818 13:44:37.320112 22726 net.cpp:100] Creating Layer batchNorm_resblk32_4_b\nI0818 13:44:37.320118 22726 net.cpp:434] batchNorm_resblk32_4_b <- resblk32_4_b\nI0818 13:44:37.320127 22726 net.cpp:408] batchNorm_resblk32_4_b -> bn_resblk32_4_b\nI0818 13:44:37.320406 22726 net.cpp:150] Setting up batchNorm_resblk32_4_b\nI0818 13:44:37.320420 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.320425 22726 net.cpp:165] Memory required for data: 1056257500\nI0818 13:44:37.320435 22726 layer_factory.hpp:77] Creating layer scale_resblk32_4_b\nI0818 13:44:37.320442 22726 net.cpp:100] Creating Layer scale_resblk32_4_b\nI0818 13:44:37.320448 22726 net.cpp:434] scale_resblk32_4_b <- bn_resblk32_4_b\nI0818 13:44:37.320459 22726 net.cpp:395] scale_resblk32_4_b -> bn_resblk32_4_b (in-place)\nI0818 13:44:37.320519 22726 layer_factory.hpp:77] Creating layer scale_resblk32_4_b\nI0818 13:44:37.320678 22726 net.cpp:150] Setting up scale_resblk32_4_b\nI0818 13:44:37.320691 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.320695 22726 net.cpp:165] Memory required for data: 1060353500\nI0818 13:44:37.320704 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_3_b\nI0818 13:44:37.320713 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_3_b\nI0818 13:44:37.320719 22726 net.cpp:434] sum_sum_bn_resblk32_3_b <- sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split_1\nI0818 13:44:37.320734 22726 net.cpp:434] sum_sum_bn_resblk32_3_b <- bn_resblk32_4_b\nI0818 13:44:37.320744 22726 net.cpp:408] sum_sum_bn_resblk32_3_b -> sum_bn_resblk32_4_b\nI0818 13:44:37.320775 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_3_b\nI0818 13:44:37.320787 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.320792 22726 net.cpp:165] Memory required for data: 1064449500\nI0818 13:44:37.320797 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_4_b\nI0818 13:44:37.320804 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_4_b\nI0818 13:44:37.320816 22726 net.cpp:434] relu_sum_bn_resblk32_4_b <- sum_bn_resblk32_4_b\nI0818 13:44:37.320823 22726 net.cpp:395] relu_sum_bn_resblk32_4_b -> sum_bn_resblk32_4_b (in-place)\nI0818 13:44:37.320833 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_4_b\nI0818 13:44:37.320840 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.320845 22726 net.cpp:165] Memory required for data: 1068545500\nI0818 13:44:37.320849 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split\nI0818 13:44:37.320859 22726 net.cpp:100] Creating Layer sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split\nI0818 13:44:37.320864 22726 net.cpp:434] sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split <- sum_bn_resblk32_4_b\nI0818 13:44:37.320873 22726 net.cpp:408] sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split -> sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split_0\nI0818 13:44:37.320881 22726 net.cpp:408] sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split -> sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split_1\nI0818 13:44:37.320935 22726 net.cpp:150] Setting up sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split\nI0818 13:44:37.320947 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.320953 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.320957 22726 net.cpp:165] Memory required for data: 1076737500\nI0818 13:44:37.320962 22726 layer_factory.hpp:77] Creating layer resblk32_5\nI0818 13:44:37.320973 22726 net.cpp:100] Creating Layer resblk32_5\nI0818 13:44:37.320981 22726 net.cpp:434] resblk32_5 <- sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split_0\nI0818 13:44:37.320992 22726 net.cpp:408] resblk32_5 -> resblk32_5\nI0818 13:44:37.321501 22726 net.cpp:150] Setting up resblk32_5\nI0818 13:44:37.321516 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.321521 22726 net.cpp:165] Memory required for data: 1080833500\nI0818 13:44:37.321529 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_5\nI0818 13:44:37.321537 22726 net.cpp:100] Creating Layer batchNorm_resblk32_5\nI0818 13:44:37.321543 22726 net.cpp:434] batchNorm_resblk32_5 <- resblk32_5\nI0818 13:44:37.321552 22726 net.cpp:408] batchNorm_resblk32_5 -> bn_resblk32_5\nI0818 13:44:37.321827 22726 net.cpp:150] Setting up batchNorm_resblk32_5\nI0818 13:44:37.321841 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.321846 22726 net.cpp:165] Memory required for data: 1084929500\nI0818 13:44:37.321856 22726 layer_factory.hpp:77] Creating layer scale_resblk32_5\nI0818 13:44:37.321866 22726 net.cpp:100] Creating Layer scale_resblk32_5\nI0818 13:44:37.321872 22726 net.cpp:434] scale_resblk32_5 <- bn_resblk32_5\nI0818 13:44:37.321880 22726 net.cpp:395] scale_resblk32_5 -> bn_resblk32_5 (in-place)\nI0818 13:44:37.321939 22726 layer_factory.hpp:77] Creating layer scale_resblk32_5\nI0818 13:44:37.322098 22726 net.cpp:150] Setting up scale_resblk32_5\nI0818 13:44:37.322111 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.322115 22726 net.cpp:165] Memory required for data: 1089025500\nI0818 13:44:37.322124 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_5\nI0818 13:44:37.322132 22726 net.cpp:100] Creating Layer relu_bn_resblk32_5\nI0818 13:44:37.322139 22726 net.cpp:434] relu_bn_resblk32_5 <- bn_resblk32_5\nI0818 13:44:37.322149 22726 net.cpp:395] relu_bn_resblk32_5 -> bn_resblk32_5 (in-place)\nI0818 13:44:37.322157 22726 net.cpp:150] Setting up relu_bn_resblk32_5\nI0818 13:44:37.322165 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.322175 22726 net.cpp:165] Memory required for data: 1093121500\nI0818 13:44:37.322180 22726 layer_factory.hpp:77] Creating layer resblk32_5_b\nI0818 13:44:37.322193 22726 net.cpp:100] Creating Layer resblk32_5_b\nI0818 13:44:37.322199 22726 net.cpp:434] resblk32_5_b <- bn_resblk32_5\nI0818 13:44:37.322208 22726 net.cpp:408] resblk32_5_b -> resblk32_5_b\nI0818 13:44:37.322707 22726 net.cpp:150] Setting up resblk32_5_b\nI0818 13:44:37.322721 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.322726 22726 net.cpp:165] Memory required for data: 1097217500\nI0818 13:44:37.322734 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_5_b\nI0818 13:44:37.322746 22726 net.cpp:100] Creating Layer batchNorm_resblk32_5_b\nI0818 13:44:37.322752 22726 net.cpp:434] batchNorm_resblk32_5_b <- resblk32_5_b\nI0818 13:44:37.322760 22726 net.cpp:408] batchNorm_resblk32_5_b -> bn_resblk32_5_b\nI0818 13:44:37.323038 22726 net.cpp:150] Setting up batchNorm_resblk32_5_b\nI0818 13:44:37.323052 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.323057 22726 net.cpp:165] Memory required for data: 1101313500\nI0818 13:44:37.323067 22726 layer_factory.hpp:77] Creating layer scale_resblk32_5_b\nI0818 13:44:37.323076 22726 net.cpp:100] Creating Layer scale_resblk32_5_b\nI0818 13:44:37.323082 22726 net.cpp:434] scale_resblk32_5_b <- bn_resblk32_5_b\nI0818 13:44:37.323096 22726 net.cpp:395] scale_resblk32_5_b -> bn_resblk32_5_b (in-place)\nI0818 13:44:37.323155 22726 layer_factory.hpp:77] Creating layer scale_resblk32_5_b\nI0818 13:44:37.323314 22726 net.cpp:150] Setting up scale_resblk32_5_b\nI0818 13:44:37.323328 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.323333 22726 net.cpp:165] Memory required for data: 1105409500\nI0818 13:44:37.323340 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_4_b\nI0818 13:44:37.323349 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_4_b\nI0818 13:44:37.323355 22726 net.cpp:434] sum_sum_bn_resblk32_4_b <- sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split_1\nI0818 13:44:37.323362 22726 net.cpp:434] sum_sum_bn_resblk32_4_b <- bn_resblk32_5_b\nI0818 13:44:37.323374 22726 net.cpp:408] sum_sum_bn_resblk32_4_b -> sum_bn_resblk32_5_b\nI0818 13:44:37.323402 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_4_b\nI0818 13:44:37.323411 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.323416 22726 net.cpp:165] Memory required for data: 1109505500\nI0818 13:44:37.323421 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_5_b\nI0818 13:44:37.323431 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_5_b\nI0818 13:44:37.323437 22726 net.cpp:434] relu_sum_bn_resblk32_5_b <- sum_bn_resblk32_5_b\nI0818 13:44:37.323444 22726 net.cpp:395] relu_sum_bn_resblk32_5_b -> sum_bn_resblk32_5_b (in-place)\nI0818 13:44:37.323453 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_5_b\nI0818 13:44:37.323460 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.323465 22726 net.cpp:165] Memory required for data: 1113601500\nI0818 13:44:37.323469 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split\nI0818 13:44:37.323477 22726 net.cpp:100] Creating Layer sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split\nI0818 13:44:37.323482 22726 net.cpp:434] sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split <- sum_bn_resblk32_5_b\nI0818 13:44:37.323492 22726 net.cpp:408] sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split -> sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split_0\nI0818 13:44:37.323501 22726 net.cpp:408] sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split -> sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split_1\nI0818 13:44:37.323549 22726 net.cpp:150] Setting up sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split\nI0818 13:44:37.323563 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.323570 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.323575 22726 net.cpp:165] Memory required for data: 1121793500\nI0818 13:44:37.323586 22726 layer_factory.hpp:77] Creating layer resblk32_6\nI0818 13:44:37.323597 22726 net.cpp:100] Creating Layer resblk32_6\nI0818 13:44:37.323603 22726 net.cpp:434] resblk32_6 <- sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split_0\nI0818 13:44:37.323612 22726 net.cpp:408] resblk32_6 -> resblk32_6\nI0818 13:44:37.325101 22726 net.cpp:150] Setting up resblk32_6\nI0818 13:44:37.325119 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.325124 22726 net.cpp:165] Memory required for data: 1125889500\nI0818 13:44:37.325134 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_6\nI0818 13:44:37.325145 22726 net.cpp:100] Creating Layer batchNorm_resblk32_6\nI0818 13:44:37.325152 22726 net.cpp:434] batchNorm_resblk32_6 <- resblk32_6\nI0818 13:44:37.325161 22726 net.cpp:408] batchNorm_resblk32_6 -> bn_resblk32_6\nI0818 13:44:37.325434 22726 net.cpp:150] Setting up batchNorm_resblk32_6\nI0818 13:44:37.325450 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.325455 22726 net.cpp:165] Memory required for data: 1129985500\nI0818 13:44:37.325465 22726 layer_factory.hpp:77] Creating layer scale_resblk32_6\nI0818 13:44:37.325474 22726 net.cpp:100] Creating Layer scale_resblk32_6\nI0818 13:44:37.325480 22726 net.cpp:434] scale_resblk32_6 <- bn_resblk32_6\nI0818 13:44:37.325489 22726 net.cpp:395] scale_resblk32_6 -> bn_resblk32_6 (in-place)\nI0818 13:44:37.325549 22726 layer_factory.hpp:77] Creating layer scale_resblk32_6\nI0818 13:44:37.325711 22726 net.cpp:150] Setting up scale_resblk32_6\nI0818 13:44:37.325723 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.325728 22726 net.cpp:165] Memory required for data: 1134081500\nI0818 13:44:37.325737 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_6\nI0818 13:44:37.325745 22726 net.cpp:100] Creating Layer relu_bn_resblk32_6\nI0818 13:44:37.325752 22726 net.cpp:434] relu_bn_resblk32_6 <- bn_resblk32_6\nI0818 13:44:37.325762 22726 net.cpp:395] relu_bn_resblk32_6 -> bn_resblk32_6 (in-place)\nI0818 13:44:37.325773 22726 net.cpp:150] Setting up relu_bn_resblk32_6\nI0818 13:44:37.325779 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.325783 22726 net.cpp:165] Memory required for data: 1138177500\nI0818 13:44:37.325788 22726 layer_factory.hpp:77] Creating layer resblk32_6_b\nI0818 13:44:37.325803 22726 net.cpp:100] Creating Layer resblk32_6_b\nI0818 13:44:37.325814 22726 net.cpp:434] resblk32_6_b <- bn_resblk32_6\nI0818 13:44:37.325824 22726 net.cpp:408] resblk32_6_b -> resblk32_6_b\nI0818 13:44:37.326318 22726 net.cpp:150] Setting up resblk32_6_b\nI0818 13:44:37.326331 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.326336 22726 net.cpp:165] Memory required for data: 1142273500\nI0818 13:44:37.326345 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_6_b\nI0818 13:44:37.326356 22726 net.cpp:100] Creating Layer batchNorm_resblk32_6_b\nI0818 13:44:37.326364 22726 net.cpp:434] batchNorm_resblk32_6_b <- resblk32_6_b\nI0818 13:44:37.326372 22726 net.cpp:408] batchNorm_resblk32_6_b -> bn_resblk32_6_b\nI0818 13:44:37.326642 22726 net.cpp:150] Setting up batchNorm_resblk32_6_b\nI0818 13:44:37.326654 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.326659 22726 net.cpp:165] Memory required for data: 1146369500\nI0818 13:44:37.326670 22726 layer_factory.hpp:77] Creating layer scale_resblk32_6_b\nI0818 13:44:37.326681 22726 net.cpp:100] Creating Layer scale_resblk32_6_b\nI0818 13:44:37.326687 22726 net.cpp:434] scale_resblk32_6_b <- bn_resblk32_6_b\nI0818 13:44:37.326695 22726 net.cpp:395] scale_resblk32_6_b -> bn_resblk32_6_b (in-place)\nI0818 13:44:37.326752 22726 layer_factory.hpp:77] Creating layer scale_resblk32_6_b\nI0818 13:44:37.326916 22726 net.cpp:150] Setting up scale_resblk32_6_b\nI0818 13:44:37.326930 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.326936 22726 net.cpp:165] Memory required for data: 1150465500\nI0818 13:44:37.326944 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_5_b\nI0818 13:44:37.326956 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_5_b\nI0818 13:44:37.326973 22726 net.cpp:434] sum_sum_bn_resblk32_5_b <- sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split_1\nI0818 13:44:37.326982 22726 net.cpp:434] sum_sum_bn_resblk32_5_b <- bn_resblk32_6_b\nI0818 13:44:37.326992 22726 net.cpp:408] sum_sum_bn_resblk32_5_b -> sum_bn_resblk32_6_b\nI0818 13:44:37.327023 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_5_b\nI0818 13:44:37.327033 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.327036 22726 net.cpp:165] Memory required for data: 1154561500\nI0818 13:44:37.327041 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_6_b\nI0818 13:44:37.327049 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_6_b\nI0818 13:44:37.327055 22726 net.cpp:434] relu_sum_bn_resblk32_6_b <- sum_bn_resblk32_6_b\nI0818 13:44:37.327065 22726 net.cpp:395] relu_sum_bn_resblk32_6_b -> sum_bn_resblk32_6_b (in-place)\nI0818 13:44:37.327075 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_6_b\nI0818 13:44:37.327082 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.327087 22726 net.cpp:165] Memory required for data: 1158657500\nI0818 13:44:37.327091 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split\nI0818 13:44:37.327098 22726 net.cpp:100] Creating Layer sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split\nI0818 13:44:37.327103 22726 net.cpp:434] sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split <- sum_bn_resblk32_6_b\nI0818 13:44:37.327111 22726 net.cpp:408] sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split -> sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split_0\nI0818 13:44:37.327121 22726 net.cpp:408] sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split -> sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split_1\nI0818 13:44:37.327173 22726 net.cpp:150] Setting up sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split\nI0818 13:44:37.327185 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.327193 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.327196 22726 net.cpp:165] Memory required for data: 1166849500\nI0818 13:44:37.327201 22726 layer_factory.hpp:77] Creating layer resblk32_7\nI0818 13:44:37.327213 22726 net.cpp:100] Creating Layer resblk32_7\nI0818 13:44:37.327219 22726 net.cpp:434] resblk32_7 <- sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split_0\nI0818 13:44:37.327230 22726 net.cpp:408] resblk32_7 -> resblk32_7\nI0818 13:44:37.327720 22726 net.cpp:150] Setting up resblk32_7\nI0818 13:44:37.327734 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.327739 22726 net.cpp:165] Memory required for data: 1170945500\nI0818 13:44:37.327747 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_7\nI0818 13:44:37.327759 22726 net.cpp:100] Creating Layer batchNorm_resblk32_7\nI0818 13:44:37.327765 22726 net.cpp:434] batchNorm_resblk32_7 <- resblk32_7\nI0818 13:44:37.327774 22726 net.cpp:408] batchNorm_resblk32_7 -> bn_resblk32_7\nI0818 13:44:37.328053 22726 net.cpp:150] Setting up batchNorm_resblk32_7\nI0818 13:44:37.328069 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.328074 22726 net.cpp:165] Memory required for data: 1175041500\nI0818 13:44:37.328085 22726 layer_factory.hpp:77] Creating layer scale_resblk32_7\nI0818 13:44:37.328094 22726 net.cpp:100] Creating Layer scale_resblk32_7\nI0818 13:44:37.328099 22726 net.cpp:434] scale_resblk32_7 <- bn_resblk32_7\nI0818 13:44:37.328107 22726 net.cpp:395] scale_resblk32_7 -> bn_resblk32_7 (in-place)\nI0818 13:44:37.328167 22726 layer_factory.hpp:77] Creating layer scale_resblk32_7\nI0818 13:44:37.328327 22726 net.cpp:150] Setting up scale_resblk32_7\nI0818 13:44:37.328341 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.328346 22726 net.cpp:165] Memory required for data: 1179137500\nI0818 13:44:37.328354 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_7\nI0818 13:44:37.328361 22726 net.cpp:100] Creating Layer relu_bn_resblk32_7\nI0818 13:44:37.328367 22726 net.cpp:434] relu_bn_resblk32_7 <- bn_resblk32_7\nI0818 13:44:37.328377 22726 net.cpp:395] relu_bn_resblk32_7 -> bn_resblk32_7 (in-place)\nI0818 13:44:37.328394 22726 net.cpp:150] Setting up relu_bn_resblk32_7\nI0818 13:44:37.328402 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.328407 22726 net.cpp:165] Memory required for data: 1183233500\nI0818 13:44:37.328411 22726 layer_factory.hpp:77] Creating layer resblk32_7_b\nI0818 13:44:37.328423 22726 net.cpp:100] Creating Layer resblk32_7_b\nI0818 13:44:37.328428 22726 net.cpp:434] resblk32_7_b <- bn_resblk32_7\nI0818 13:44:37.328439 22726 net.cpp:408] resblk32_7_b -> resblk32_7_b\nI0818 13:44:37.328934 22726 net.cpp:150] Setting up resblk32_7_b\nI0818 13:44:37.328949 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.328954 22726 net.cpp:165] Memory required for data: 1187329500\nI0818 13:44:37.328963 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_7_b\nI0818 13:44:37.328971 22726 net.cpp:100] Creating Layer batchNorm_resblk32_7_b\nI0818 13:44:37.328977 22726 net.cpp:434] batchNorm_resblk32_7_b <- resblk32_7_b\nI0818 13:44:37.328989 22726 net.cpp:408] batchNorm_resblk32_7_b -> bn_resblk32_7_b\nI0818 13:44:37.329267 22726 net.cpp:150] Setting up batchNorm_resblk32_7_b\nI0818 13:44:37.329279 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.329284 22726 net.cpp:165] Memory required for data: 1191425500\nI0818 13:44:37.329294 22726 layer_factory.hpp:77] Creating layer scale_resblk32_7_b\nI0818 13:44:37.329306 22726 net.cpp:100] Creating Layer scale_resblk32_7_b\nI0818 13:44:37.329313 22726 net.cpp:434] scale_resblk32_7_b <- bn_resblk32_7_b\nI0818 13:44:37.329320 22726 net.cpp:395] scale_resblk32_7_b -> bn_resblk32_7_b (in-place)\nI0818 13:44:37.329380 22726 layer_factory.hpp:77] Creating layer scale_resblk32_7_b\nI0818 13:44:37.329540 22726 net.cpp:150] Setting up scale_resblk32_7_b\nI0818 13:44:37.329552 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.329557 22726 net.cpp:165] Memory required for data: 1195521500\nI0818 13:44:37.329566 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_6_b\nI0818 13:44:37.329577 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_6_b\nI0818 13:44:37.329584 22726 net.cpp:434] sum_sum_bn_resblk32_6_b <- sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split_1\nI0818 13:44:37.329591 22726 net.cpp:434] sum_sum_bn_resblk32_6_b <- bn_resblk32_7_b\nI0818 13:44:37.329599 22726 net.cpp:408] sum_sum_bn_resblk32_6_b -> sum_bn_resblk32_7_b\nI0818 13:44:37.329632 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_6_b\nI0818 13:44:37.329643 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.329646 22726 net.cpp:165] Memory required for data: 1199617500\nI0818 13:44:37.329651 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_7_b\nI0818 13:44:37.329658 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_7_b\nI0818 13:44:37.329664 22726 net.cpp:434] relu_sum_bn_resblk32_7_b <- sum_bn_resblk32_7_b\nI0818 13:44:37.329674 22726 net.cpp:395] relu_sum_bn_resblk32_7_b -> sum_bn_resblk32_7_b (in-place)\nI0818 13:44:37.329684 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_7_b\nI0818 13:44:37.329691 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.329695 22726 net.cpp:165] Memory required for data: 1203713500\nI0818 13:44:37.329700 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split\nI0818 13:44:37.329706 22726 net.cpp:100] Creating Layer sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split\nI0818 13:44:37.329712 22726 net.cpp:434] sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split <- sum_bn_resblk32_7_b\nI0818 13:44:37.329720 22726 net.cpp:408] sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split -> sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split_0\nI0818 13:44:37.329742 22726 net.cpp:408] sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split -> sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split_1\nI0818 13:44:37.329793 22726 net.cpp:150] Setting up sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split\nI0818 13:44:37.329814 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.329823 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.329833 22726 net.cpp:165] Memory required for data: 1211905500\nI0818 13:44:37.329839 22726 layer_factory.hpp:77] Creating layer resblk32_8\nI0818 13:44:37.329850 22726 net.cpp:100] Creating Layer resblk32_8\nI0818 13:44:37.329857 22726 net.cpp:434] resblk32_8 <- sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split_0\nI0818 13:44:37.329869 22726 net.cpp:408] resblk32_8 -> resblk32_8\nI0818 13:44:37.330374 22726 net.cpp:150] Setting up resblk32_8\nI0818 13:44:37.330387 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.330392 22726 net.cpp:165] Memory required for data: 1216001500\nI0818 13:44:37.330401 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_8\nI0818 13:44:37.330410 22726 net.cpp:100] Creating Layer batchNorm_resblk32_8\nI0818 13:44:37.330416 22726 net.cpp:434] batchNorm_resblk32_8 <- resblk32_8\nI0818 13:44:37.330427 22726 net.cpp:408] batchNorm_resblk32_8 -> bn_resblk32_8\nI0818 13:44:37.330708 22726 net.cpp:150] Setting up batchNorm_resblk32_8\nI0818 13:44:37.330721 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.330726 22726 net.cpp:165] Memory required for data: 1220097500\nI0818 13:44:37.330736 22726 layer_factory.hpp:77] Creating layer scale_resblk32_8\nI0818 13:44:37.330747 22726 net.cpp:100] Creating Layer scale_resblk32_8\nI0818 13:44:37.330754 22726 net.cpp:434] scale_resblk32_8 <- bn_resblk32_8\nI0818 13:44:37.330761 22726 net.cpp:395] scale_resblk32_8 -> bn_resblk32_8 (in-place)\nI0818 13:44:37.330826 22726 layer_factory.hpp:77] Creating layer scale_resblk32_8\nI0818 13:44:37.330991 22726 net.cpp:150] Setting up scale_resblk32_8\nI0818 13:44:37.331004 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.331009 22726 net.cpp:165] Memory required for data: 1224193500\nI0818 13:44:37.331018 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk32_8\nI0818 13:44:37.331027 22726 net.cpp:100] Creating Layer relu_bn_resblk32_8\nI0818 13:44:37.331032 22726 net.cpp:434] relu_bn_resblk32_8 <- bn_resblk32_8\nI0818 13:44:37.331043 22726 net.cpp:395] relu_bn_resblk32_8 -> bn_resblk32_8 (in-place)\nI0818 13:44:37.331053 22726 net.cpp:150] Setting up relu_bn_resblk32_8\nI0818 13:44:37.331058 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.331063 22726 net.cpp:165] Memory required for data: 1228289500\nI0818 13:44:37.331068 22726 layer_factory.hpp:77] Creating layer resblk32_8_b\nI0818 13:44:37.331081 22726 net.cpp:100] Creating Layer resblk32_8_b\nI0818 13:44:37.331087 22726 net.cpp:434] resblk32_8_b <- bn_resblk32_8\nI0818 13:44:37.331099 22726 net.cpp:408] resblk32_8_b -> resblk32_8_b\nI0818 13:44:37.332586 22726 net.cpp:150] Setting up resblk32_8_b\nI0818 13:44:37.332603 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.332608 22726 net.cpp:165] Memory required for data: 1232385500\nI0818 13:44:37.332617 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk32_8_b\nI0818 13:44:37.332628 22726 net.cpp:100] Creating Layer batchNorm_resblk32_8_b\nI0818 13:44:37.332633 22726 net.cpp:434] batchNorm_resblk32_8_b <- resblk32_8_b\nI0818 13:44:37.332645 22726 net.cpp:408] batchNorm_resblk32_8_b -> bn_resblk32_8_b\nI0818 13:44:37.332921 22726 net.cpp:150] Setting up batchNorm_resblk32_8_b\nI0818 13:44:37.332934 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.332939 22726 net.cpp:165] Memory required for data: 1236481500\nI0818 13:44:37.332994 22726 layer_factory.hpp:77] Creating layer scale_resblk32_8_b\nI0818 13:44:37.333008 22726 net.cpp:100] Creating Layer scale_resblk32_8_b\nI0818 13:44:37.333014 22726 net.cpp:434] scale_resblk32_8_b <- bn_resblk32_8_b\nI0818 13:44:37.333021 22726 net.cpp:395] scale_resblk32_8_b -> bn_resblk32_8_b (in-place)\nI0818 13:44:37.333086 22726 layer_factory.hpp:77] Creating layer scale_resblk32_8_b\nI0818 13:44:37.333240 22726 net.cpp:150] Setting up scale_resblk32_8_b\nI0818 13:44:37.333256 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.333261 22726 net.cpp:165] Memory required for data: 1240577500\nI0818 13:44:37.333277 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk32_7_b\nI0818 13:44:37.333287 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk32_7_b\nI0818 13:44:37.333293 22726 net.cpp:434] sum_sum_bn_resblk32_7_b <- sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split_1\nI0818 13:44:37.333302 22726 net.cpp:434] sum_sum_bn_resblk32_7_b <- bn_resblk32_8_b\nI0818 13:44:37.333312 22726 net.cpp:408] sum_sum_bn_resblk32_7_b -> sum_bn_resblk32_8_b\nI0818 13:44:37.333341 22726 net.cpp:150] Setting up sum_sum_bn_resblk32_7_b\nI0818 13:44:37.333351 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.333356 22726 net.cpp:165] Memory required for data: 1244673500\nI0818 13:44:37.333360 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk32_8_b\nI0818 13:44:37.333369 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk32_8_b\nI0818 13:44:37.333377 22726 net.cpp:434] relu_sum_bn_resblk32_8_b <- sum_bn_resblk32_8_b\nI0818 13:44:37.333384 22726 net.cpp:395] relu_sum_bn_resblk32_8_b -> sum_bn_resblk32_8_b (in-place)\nI0818 13:44:37.333395 22726 net.cpp:150] Setting up relu_sum_bn_resblk32_8_b\nI0818 13:44:37.333400 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.333405 22726 net.cpp:165] Memory required for data: 1248769500\nI0818 13:44:37.333410 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split\nI0818 13:44:37.333416 22726 net.cpp:100] Creating Layer sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split\nI0818 13:44:37.333422 22726 net.cpp:434] sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split <- sum_bn_resblk32_8_b\nI0818 13:44:37.333432 22726 net.cpp:408] sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split -> sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split_0\nI0818 13:44:37.333442 22726 net.cpp:408] sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split -> sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split_1\nI0818 13:44:37.333490 22726 net.cpp:150] Setting up sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split\nI0818 13:44:37.333503 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.333508 22726 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:37.333513 22726 net.cpp:165] Memory required for data: 1256961500\nI0818 13:44:37.333518 22726 layer_factory.hpp:77] Creating layer resblk64\nI0818 13:44:37.333531 22726 net.cpp:100] Creating Layer resblk64\nI0818 13:44:37.333539 22726 net.cpp:434] resblk64 <- sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split_0\nI0818 13:44:37.333549 22726 net.cpp:408] resblk64 -> resblk64\nI0818 13:44:37.334056 22726 net.cpp:150] Setting up resblk64\nI0818 13:44:37.334071 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.334076 22726 net.cpp:165] Memory required for data: 1257985500\nI0818 13:44:37.334085 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64\nI0818 13:44:37.334096 22726 net.cpp:100] Creating Layer batchNorm_resblk64\nI0818 13:44:37.334102 22726 net.cpp:434] batchNorm_resblk64 <- resblk64\nI0818 13:44:37.334110 22726 net.cpp:408] batchNorm_resblk64 -> bn_resblk64\nI0818 13:44:37.334388 22726 net.cpp:150] Setting up batchNorm_resblk64\nI0818 13:44:37.334400 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.334405 22726 net.cpp:165] Memory required for data: 1259009500\nI0818 13:44:37.334415 22726 layer_factory.hpp:77] Creating layer scale_resblk64\nI0818 13:44:37.334426 22726 net.cpp:100] Creating Layer scale_resblk64\nI0818 13:44:37.334434 22726 net.cpp:434] scale_resblk64 <- bn_resblk64\nI0818 13:44:37.334441 22726 net.cpp:395] scale_resblk64 -> bn_resblk64 (in-place)\nI0818 13:44:37.334501 22726 layer_factory.hpp:77] Creating layer scale_resblk64\nI0818 13:44:37.334672 22726 net.cpp:150] Setting up scale_resblk64\nI0818 13:44:37.334686 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.334691 22726 net.cpp:165] Memory required for data: 1260033500\nI0818 13:44:37.334699 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64\nI0818 13:44:37.334709 22726 net.cpp:100] Creating Layer relu_bn_resblk64\nI0818 13:44:37.334717 22726 net.cpp:434] relu_bn_resblk64 <- bn_resblk64\nI0818 13:44:37.334730 22726 net.cpp:395] relu_bn_resblk64 -> bn_resblk64 (in-place)\nI0818 13:44:37.334740 22726 net.cpp:150] Setting up relu_bn_resblk64\nI0818 13:44:37.334748 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.334753 22726 net.cpp:165] Memory required for data: 1261057500\nI0818 13:44:37.334756 22726 layer_factory.hpp:77] Creating layer resblk64_b\nI0818 13:44:37.334770 22726 net.cpp:100] Creating Layer resblk64_b\nI0818 13:44:37.334776 22726 net.cpp:434] resblk64_b <- bn_resblk64\nI0818 13:44:37.334787 22726 net.cpp:408] resblk64_b -> resblk64_b\nI0818 13:44:37.335288 22726 net.cpp:150] Setting up resblk64_b\nI0818 13:44:37.335302 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.335307 22726 net.cpp:165] Memory required for data: 1262081500\nI0818 13:44:37.335315 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_b\nI0818 13:44:37.335325 22726 net.cpp:100] Creating Layer batchNorm_resblk64_b\nI0818 13:44:37.335330 22726 net.cpp:434] batchNorm_resblk64_b <- resblk64_b\nI0818 13:44:37.335341 22726 net.cpp:408] batchNorm_resblk64_b -> bn_resblk64_b\nI0818 13:44:37.335619 22726 net.cpp:150] Setting up batchNorm_resblk64_b\nI0818 13:44:37.335635 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.335640 22726 net.cpp:165] Memory required for data: 1263105500\nI0818 13:44:37.335650 22726 layer_factory.hpp:77] Creating layer scale_resblk64_b\nI0818 13:44:37.335659 22726 net.cpp:100] Creating Layer scale_resblk64_b\nI0818 13:44:37.335666 22726 net.cpp:434] scale_resblk64_b <- bn_resblk64_b\nI0818 13:44:37.335674 22726 net.cpp:395] scale_resblk64_b -> bn_resblk64_b (in-place)\nI0818 13:44:37.335732 22726 layer_factory.hpp:77] Creating layer scale_resblk64_b\nI0818 13:44:37.335904 22726 net.cpp:150] Setting up scale_resblk64_b\nI0818 13:44:37.335917 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.335922 22726 net.cpp:165] Memory required for data: 1264129500\nI0818 13:44:37.335932 22726 layer_factory.hpp:77] Creating layer avePooling_resblk64\nI0818 13:44:37.335944 22726 net.cpp:100] Creating Layer avePooling_resblk64\nI0818 13:44:37.335950 22726 net.cpp:434] avePooling_resblk64 <- sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split_1\nI0818 13:44:37.335959 22726 net.cpp:408] avePooling_resblk64 -> avgPool_resblk64\nI0818 13:44:37.335997 22726 net.cpp:150] Setting up avePooling_resblk64\nI0818 13:44:37.336007 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.336012 22726 net.cpp:165] Memory required for data: 1265153500\nI0818 13:44:37.336017 22726 layer_factory.hpp:77] Creating layer sum_avgPool_resblk64\nI0818 13:44:37.336025 22726 net.cpp:100] Creating Layer sum_avgPool_resblk64\nI0818 13:44:37.336031 22726 net.cpp:434] sum_avgPool_resblk64 <- avgPool_resblk64\nI0818 13:44:37.336038 22726 net.cpp:434] sum_avgPool_resblk64 <- bn_resblk64_b\nI0818 13:44:37.336046 22726 net.cpp:408] sum_avgPool_resblk64 -> sum_bn_resblk64_b\nI0818 13:44:37.336082 22726 net.cpp:150] Setting up sum_avgPool_resblk64\nI0818 13:44:37.336094 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.336099 22726 net.cpp:165] Memory required for data: 1266177500\nI0818 13:44:37.336104 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_b\nI0818 13:44:37.336112 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_b\nI0818 13:44:37.336117 22726 net.cpp:434] relu_sum_bn_resblk64_b <- sum_bn_resblk64_b\nI0818 13:44:37.336124 22726 net.cpp:395] relu_sum_bn_resblk64_b -> sum_bn_resblk64_b (in-place)\nI0818 13:44:37.336133 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_b\nI0818 13:44:37.336140 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.336144 22726 net.cpp:165] Memory required for data: 1267201500\nI0818 13:44:37.336149 22726 layer_factory.hpp:77] Creating layer zeros_sum_bn_resblk64_b\nI0818 13:44:37.336158 22726 net.cpp:100] Creating Layer zeros_sum_bn_resblk64_b\nI0818 13:44:37.336169 22726 net.cpp:408] zeros_sum_bn_resblk64_b -> zeros_sum_bn_resblk64_b\nI0818 13:44:37.337385 22726 net.cpp:150] Setting up zeros_sum_bn_resblk64_b\nI0818 13:44:37.337410 22726 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:37.337415 22726 net.cpp:165] Memory required for data: 1268225500\nI0818 13:44:37.337421 22726 layer_factory.hpp:77] Creating layer CC_sum_bn_resblk64_b\nI0818 13:44:37.337433 22726 net.cpp:100] Creating Layer CC_sum_bn_resblk64_b\nI0818 13:44:37.337440 22726 net.cpp:434] CC_sum_bn_resblk64_b <- sum_bn_resblk64_b\nI0818 13:44:37.337448 22726 net.cpp:434] CC_sum_bn_resblk64_b <- zeros_sum_bn_resblk64_b\nI0818 13:44:37.337456 22726 net.cpp:408] CC_sum_bn_resblk64_b -> CC_sum_bn_resblk64_b\nI0818 13:44:37.337499 22726 net.cpp:150] Setting up CC_sum_bn_resblk64_b\nI0818 13:44:37.337514 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.337519 22726 net.cpp:165] Memory required for data: 1270273500\nI0818 13:44:37.337524 22726 layer_factory.hpp:77] Creating layer CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split\nI0818 13:44:37.337532 22726 net.cpp:100] Creating Layer CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split\nI0818 13:44:37.337538 22726 net.cpp:434] CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split <- CC_sum_bn_resblk64_b\nI0818 13:44:37.337548 22726 net.cpp:408] CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split -> CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split_0\nI0818 13:44:37.337558 22726 net.cpp:408] CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split -> CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split_1\nI0818 13:44:37.337610 22726 net.cpp:150] Setting up CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split\nI0818 13:44:37.337625 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.337630 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.337635 22726 net.cpp:165] Memory required for data: 1274369500\nI0818 13:44:37.337641 22726 layer_factory.hpp:77] Creating layer resblk64_1\nI0818 13:44:37.337651 22726 net.cpp:100] Creating Layer resblk64_1\nI0818 13:44:37.337657 22726 net.cpp:434] resblk64_1 <- CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split_0\nI0818 13:44:37.337669 22726 net.cpp:408] resblk64_1 -> resblk64_1\nI0818 13:44:37.338712 22726 net.cpp:150] Setting up resblk64_1\nI0818 13:44:37.338727 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.338732 22726 net.cpp:165] Memory required for data: 1276417500\nI0818 13:44:37.338742 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_1\nI0818 13:44:37.338753 22726 net.cpp:100] Creating Layer batchNorm_resblk64_1\nI0818 13:44:37.338760 22726 net.cpp:434] batchNorm_resblk64_1 <- resblk64_1\nI0818 13:44:37.338769 22726 net.cpp:408] batchNorm_resblk64_1 -> bn_resblk64_1\nI0818 13:44:37.339054 22726 net.cpp:150] Setting up batchNorm_resblk64_1\nI0818 13:44:37.339068 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.339073 22726 net.cpp:165] Memory required for data: 1278465500\nI0818 13:44:37.339083 22726 layer_factory.hpp:77] Creating layer scale_resblk64_1\nI0818 13:44:37.339092 22726 net.cpp:100] Creating Layer scale_resblk64_1\nI0818 13:44:37.339098 22726 net.cpp:434] scale_resblk64_1 <- bn_resblk64_1\nI0818 13:44:37.339107 22726 net.cpp:395] scale_resblk64_1 -> bn_resblk64_1 (in-place)\nI0818 13:44:37.339169 22726 layer_factory.hpp:77] Creating layer scale_resblk64_1\nI0818 13:44:37.339332 22726 net.cpp:150] Setting up scale_resblk64_1\nI0818 13:44:37.339347 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.339352 22726 net.cpp:165] Memory required for data: 1280513500\nI0818 13:44:37.339362 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_1\nI0818 13:44:37.339370 22726 net.cpp:100] Creating Layer relu_bn_resblk64_1\nI0818 13:44:37.339376 22726 net.cpp:434] relu_bn_resblk64_1 <- bn_resblk64_1\nI0818 13:44:37.339383 22726 net.cpp:395] relu_bn_resblk64_1 -> bn_resblk64_1 (in-place)\nI0818 13:44:37.339392 22726 net.cpp:150] Setting up relu_bn_resblk64_1\nI0818 13:44:37.339399 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.339403 22726 net.cpp:165] Memory required for data: 1282561500\nI0818 13:44:37.339408 22726 layer_factory.hpp:77] Creating layer resblk64_1_b\nI0818 13:44:37.339422 22726 net.cpp:100] Creating Layer resblk64_1_b\nI0818 13:44:37.339439 22726 net.cpp:434] resblk64_1_b <- bn_resblk64_1\nI0818 13:44:37.339452 22726 net.cpp:408] resblk64_1_b -> resblk64_1_b\nI0818 13:44:37.340507 22726 net.cpp:150] Setting up resblk64_1_b\nI0818 13:44:37.340522 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.340526 22726 net.cpp:165] Memory required for data: 1284609500\nI0818 13:44:37.340535 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_1_b\nI0818 13:44:37.340544 22726 net.cpp:100] Creating Layer batchNorm_resblk64_1_b\nI0818 13:44:37.340553 22726 net.cpp:434] batchNorm_resblk64_1_b <- resblk64_1_b\nI0818 13:44:37.340562 22726 net.cpp:408] batchNorm_resblk64_1_b -> bn_resblk64_1_b\nI0818 13:44:37.340837 22726 net.cpp:150] Setting up batchNorm_resblk64_1_b\nI0818 13:44:37.340850 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.340855 22726 net.cpp:165] Memory required for data: 1286657500\nI0818 13:44:37.340867 22726 layer_factory.hpp:77] Creating layer scale_resblk64_1_b\nI0818 13:44:37.340875 22726 net.cpp:100] Creating Layer scale_resblk64_1_b\nI0818 13:44:37.340881 22726 net.cpp:434] scale_resblk64_1_b <- bn_resblk64_1_b\nI0818 13:44:37.340891 22726 net.cpp:395] scale_resblk64_1_b -> bn_resblk64_1_b (in-place)\nI0818 13:44:37.340951 22726 layer_factory.hpp:77] Creating layer scale_resblk64_1_b\nI0818 13:44:37.341114 22726 net.cpp:150] Setting up scale_resblk64_1_b\nI0818 13:44:37.341126 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.341131 22726 net.cpp:165] Memory required for data: 1288705500\nI0818 13:44:37.341140 22726 layer_factory.hpp:77] Creating layer sum_CC_sum_bn_resblk64_b\nI0818 13:44:37.341152 22726 net.cpp:100] Creating Layer sum_CC_sum_bn_resblk64_b\nI0818 13:44:37.341159 22726 net.cpp:434] sum_CC_sum_bn_resblk64_b <- CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split_1\nI0818 13:44:37.341166 22726 net.cpp:434] sum_CC_sum_bn_resblk64_b <- bn_resblk64_1_b\nI0818 13:44:37.341174 22726 net.cpp:408] sum_CC_sum_bn_resblk64_b -> sum_bn_resblk64_1_b\nI0818 13:44:37.341213 22726 net.cpp:150] Setting up sum_CC_sum_bn_resblk64_b\nI0818 13:44:37.341224 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.341229 22726 net.cpp:165] Memory required for data: 1290753500\nI0818 13:44:37.341234 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_1_b\nI0818 13:44:37.341241 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_1_b\nI0818 13:44:37.341248 22726 net.cpp:434] relu_sum_bn_resblk64_1_b <- sum_bn_resblk64_1_b\nI0818 13:44:37.341256 22726 net.cpp:395] relu_sum_bn_resblk64_1_b -> sum_bn_resblk64_1_b (in-place)\nI0818 13:44:37.341266 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_1_b\nI0818 13:44:37.341274 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.341277 22726 net.cpp:165] Memory required for data: 1292801500\nI0818 13:44:37.341282 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split\nI0818 13:44:37.341290 22726 net.cpp:100] Creating Layer sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split\nI0818 13:44:37.341295 22726 net.cpp:434] sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split <- sum_bn_resblk64_1_b\nI0818 13:44:37.341302 22726 net.cpp:408] sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split -> sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split_0\nI0818 13:44:37.341311 22726 net.cpp:408] sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split -> sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split_1\nI0818 13:44:37.341363 22726 net.cpp:150] Setting up sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split\nI0818 13:44:37.341374 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.341380 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.341385 22726 net.cpp:165] Memory required for data: 1296897500\nI0818 13:44:37.341390 22726 layer_factory.hpp:77] Creating layer resblk64_2\nI0818 13:44:37.341401 22726 net.cpp:100] Creating Layer resblk64_2\nI0818 13:44:37.341408 22726 net.cpp:434] resblk64_2 <- sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split_0\nI0818 13:44:37.341426 22726 net.cpp:408] resblk64_2 -> resblk64_2\nI0818 13:44:37.342479 22726 net.cpp:150] Setting up resblk64_2\nI0818 13:44:37.342494 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.342499 22726 net.cpp:165] Memory required for data: 1298945500\nI0818 13:44:37.342509 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_2\nI0818 13:44:37.342523 22726 net.cpp:100] Creating Layer batchNorm_resblk64_2\nI0818 13:44:37.342530 22726 net.cpp:434] batchNorm_resblk64_2 <- resblk64_2\nI0818 13:44:37.342538 22726 net.cpp:408] batchNorm_resblk64_2 -> bn_resblk64_2\nI0818 13:44:37.342815 22726 net.cpp:150] Setting up batchNorm_resblk64_2\nI0818 13:44:37.342828 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.342833 22726 net.cpp:165] Memory required for data: 1300993500\nI0818 13:44:37.342844 22726 layer_factory.hpp:77] Creating layer scale_resblk64_2\nI0818 13:44:37.342852 22726 net.cpp:100] Creating Layer scale_resblk64_2\nI0818 13:44:37.342859 22726 net.cpp:434] scale_resblk64_2 <- bn_resblk64_2\nI0818 13:44:37.342866 22726 net.cpp:395] scale_resblk64_2 -> bn_resblk64_2 (in-place)\nI0818 13:44:37.342928 22726 layer_factory.hpp:77] Creating layer scale_resblk64_2\nI0818 13:44:37.343086 22726 net.cpp:150] Setting up scale_resblk64_2\nI0818 13:44:37.343101 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.343106 22726 net.cpp:165] Memory required for data: 1303041500\nI0818 13:44:37.343116 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_2\nI0818 13:44:37.343123 22726 net.cpp:100] Creating Layer relu_bn_resblk64_2\nI0818 13:44:37.343129 22726 net.cpp:434] relu_bn_resblk64_2 <- bn_resblk64_2\nI0818 13:44:37.343137 22726 net.cpp:395] relu_bn_resblk64_2 -> bn_resblk64_2 (in-place)\nI0818 13:44:37.343147 22726 net.cpp:150] Setting up relu_bn_resblk64_2\nI0818 13:44:37.343153 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.343158 22726 net.cpp:165] Memory required for data: 1305089500\nI0818 13:44:37.343163 22726 layer_factory.hpp:77] Creating layer resblk64_2_b\nI0818 13:44:37.343178 22726 net.cpp:100] Creating Layer resblk64_2_b\nI0818 13:44:37.343183 22726 net.cpp:434] resblk64_2_b <- bn_resblk64_2\nI0818 13:44:37.343192 22726 net.cpp:408] resblk64_2_b -> resblk64_2_b\nI0818 13:44:37.344295 22726 net.cpp:150] Setting up resblk64_2_b\nI0818 13:44:37.344312 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.344317 22726 net.cpp:165] Memory required for data: 1307137500\nI0818 13:44:37.344326 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_2_b\nI0818 13:44:37.344336 22726 net.cpp:100] Creating Layer batchNorm_resblk64_2_b\nI0818 13:44:37.344341 22726 net.cpp:434] batchNorm_resblk64_2_b <- resblk64_2_b\nI0818 13:44:37.344353 22726 net.cpp:408] batchNorm_resblk64_2_b -> bn_resblk64_2_b\nI0818 13:44:37.344624 22726 net.cpp:150] Setting up batchNorm_resblk64_2_b\nI0818 13:44:37.344637 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.344642 22726 net.cpp:165] Memory required for data: 1309185500\nI0818 13:44:37.344652 22726 layer_factory.hpp:77] Creating layer scale_resblk64_2_b\nI0818 13:44:37.344660 22726 net.cpp:100] Creating Layer scale_resblk64_2_b\nI0818 13:44:37.344667 22726 net.cpp:434] scale_resblk64_2_b <- bn_resblk64_2_b\nI0818 13:44:37.344677 22726 net.cpp:395] scale_resblk64_2_b -> bn_resblk64_2_b (in-place)\nI0818 13:44:37.344739 22726 layer_factory.hpp:77] Creating layer scale_resblk64_2_b\nI0818 13:44:37.344915 22726 net.cpp:150] Setting up scale_resblk64_2_b\nI0818 13:44:37.344928 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.344933 22726 net.cpp:165] Memory required for data: 1311233500\nI0818 13:44:37.344944 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_1_b\nI0818 13:44:37.344954 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_1_b\nI0818 13:44:37.344961 22726 net.cpp:434] sum_sum_bn_resblk64_1_b <- sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split_1\nI0818 13:44:37.344969 22726 net.cpp:434] sum_sum_bn_resblk64_1_b <- bn_resblk64_2_b\nI0818 13:44:37.344977 22726 net.cpp:408] sum_sum_bn_resblk64_1_b -> sum_bn_resblk64_2_b\nI0818 13:44:37.345022 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_1_b\nI0818 13:44:37.345032 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.345037 22726 net.cpp:165] Memory required for data: 1313281500\nI0818 13:44:37.345042 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_2_b\nI0818 13:44:37.345051 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_2_b\nI0818 13:44:37.345055 22726 net.cpp:434] relu_sum_bn_resblk64_2_b <- sum_bn_resblk64_2_b\nI0818 13:44:37.345065 22726 net.cpp:395] relu_sum_bn_resblk64_2_b -> sum_bn_resblk64_2_b (in-place)\nI0818 13:44:37.345075 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_2_b\nI0818 13:44:37.345082 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.345086 22726 net.cpp:165] Memory required for data: 1315329500\nI0818 13:44:37.345091 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split\nI0818 13:44:37.345098 22726 net.cpp:100] Creating Layer sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split\nI0818 13:44:37.345103 22726 net.cpp:434] sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split <- sum_bn_resblk64_2_b\nI0818 13:44:37.345110 22726 net.cpp:408] sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split -> sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split_0\nI0818 13:44:37.345120 22726 net.cpp:408] sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split -> sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split_1\nI0818 13:44:37.345170 22726 net.cpp:150] Setting up sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split\nI0818 13:44:37.345182 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.345190 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.345193 22726 net.cpp:165] Memory required for data: 1319425500\nI0818 13:44:37.345198 22726 layer_factory.hpp:77] Creating layer resblk64_3\nI0818 13:44:37.345209 22726 net.cpp:100] Creating Layer resblk64_3\nI0818 13:44:37.345216 22726 net.cpp:434] resblk64_3 <- sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split_0\nI0818 13:44:37.345227 22726 net.cpp:408] resblk64_3 -> resblk64_3\nI0818 13:44:37.346274 22726 net.cpp:150] Setting up resblk64_3\nI0818 13:44:37.346289 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.346295 22726 net.cpp:165] Memory required for data: 1321473500\nI0818 13:44:37.346303 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_3\nI0818 13:44:37.346315 22726 net.cpp:100] Creating Layer batchNorm_resblk64_3\nI0818 13:44:37.346323 22726 net.cpp:434] batchNorm_resblk64_3 <- resblk64_3\nI0818 13:44:37.346330 22726 net.cpp:408] batchNorm_resblk64_3 -> bn_resblk64_3\nI0818 13:44:37.346603 22726 net.cpp:150] Setting up batchNorm_resblk64_3\nI0818 13:44:37.346616 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.346621 22726 net.cpp:165] Memory required for data: 1323521500\nI0818 13:44:37.346632 22726 layer_factory.hpp:77] Creating layer scale_resblk64_3\nI0818 13:44:37.346640 22726 net.cpp:100] Creating Layer scale_resblk64_3\nI0818 13:44:37.346647 22726 net.cpp:434] scale_resblk64_3 <- bn_resblk64_3\nI0818 13:44:37.346654 22726 net.cpp:395] scale_resblk64_3 -> bn_resblk64_3 (in-place)\nI0818 13:44:37.346715 22726 layer_factory.hpp:77] Creating layer scale_resblk64_3\nI0818 13:44:37.346881 22726 net.cpp:150] Setting up scale_resblk64_3\nI0818 13:44:37.346897 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.346902 22726 net.cpp:165] Memory required for data: 1325569500\nI0818 13:44:37.346911 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_3\nI0818 13:44:37.346920 22726 net.cpp:100] Creating Layer relu_bn_resblk64_3\nI0818 13:44:37.346925 22726 net.cpp:434] relu_bn_resblk64_3 <- bn_resblk64_3\nI0818 13:44:37.346933 22726 net.cpp:395] relu_bn_resblk64_3 -> bn_resblk64_3 (in-place)\nI0818 13:44:37.346942 22726 net.cpp:150] Setting up relu_bn_resblk64_3\nI0818 13:44:37.346949 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.346953 22726 net.cpp:165] Memory required for data: 1327617500\nI0818 13:44:37.346958 22726 layer_factory.hpp:77] Creating layer resblk64_3_b\nI0818 13:44:37.346982 22726 net.cpp:100] Creating Layer resblk64_3_b\nI0818 13:44:37.346988 22726 net.cpp:434] resblk64_3_b <- bn_resblk64_3\nI0818 13:44:37.346997 22726 net.cpp:408] resblk64_3_b -> resblk64_3_b\nI0818 13:44:37.349017 22726 net.cpp:150] Setting up resblk64_3_b\nI0818 13:44:37.349035 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.349040 22726 net.cpp:165] Memory required for data: 1329665500\nI0818 13:44:37.349050 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_3_b\nI0818 13:44:37.349058 22726 net.cpp:100] Creating Layer batchNorm_resblk64_3_b\nI0818 13:44:37.349064 22726 net.cpp:434] batchNorm_resblk64_3_b <- resblk64_3_b\nI0818 13:44:37.349076 22726 net.cpp:408] batchNorm_resblk64_3_b -> bn_resblk64_3_b\nI0818 13:44:37.349359 22726 net.cpp:150] Setting up batchNorm_resblk64_3_b\nI0818 13:44:37.349370 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.349375 22726 net.cpp:165] Memory required for data: 1331713500\nI0818 13:44:37.349386 22726 layer_factory.hpp:77] Creating layer scale_resblk64_3_b\nI0818 13:44:37.349395 22726 net.cpp:100] Creating Layer scale_resblk64_3_b\nI0818 13:44:37.349401 22726 net.cpp:434] scale_resblk64_3_b <- bn_resblk64_3_b\nI0818 13:44:37.349409 22726 net.cpp:395] scale_resblk64_3_b -> bn_resblk64_3_b (in-place)\nI0818 13:44:37.349472 22726 layer_factory.hpp:77] Creating layer scale_resblk64_3_b\nI0818 13:44:37.349632 22726 net.cpp:150] Setting up scale_resblk64_3_b\nI0818 13:44:37.349644 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.349649 22726 net.cpp:165] Memory required for data: 1333761500\nI0818 13:44:37.349658 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_2_b\nI0818 13:44:37.349670 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_2_b\nI0818 13:44:37.349678 22726 net.cpp:434] sum_sum_bn_resblk64_2_b <- sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split_1\nI0818 13:44:37.349684 22726 net.cpp:434] sum_sum_bn_resblk64_2_b <- bn_resblk64_3_b\nI0818 13:44:37.349692 22726 net.cpp:408] sum_sum_bn_resblk64_2_b -> sum_bn_resblk64_3_b\nI0818 13:44:37.349735 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_2_b\nI0818 13:44:37.349745 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.349750 22726 net.cpp:165] Memory required for data: 1335809500\nI0818 13:44:37.349756 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_3_b\nI0818 13:44:37.349763 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_3_b\nI0818 13:44:37.349769 22726 net.cpp:434] relu_sum_bn_resblk64_3_b <- sum_bn_resblk64_3_b\nI0818 13:44:37.349776 22726 net.cpp:395] relu_sum_bn_resblk64_3_b -> sum_bn_resblk64_3_b (in-place)\nI0818 13:44:37.349786 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_3_b\nI0818 13:44:37.349792 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.349797 22726 net.cpp:165] Memory required for data: 1337857500\nI0818 13:44:37.349802 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split\nI0818 13:44:37.349814 22726 net.cpp:100] Creating Layer sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split\nI0818 13:44:37.349820 22726 net.cpp:434] sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split <- sum_bn_resblk64_3_b\nI0818 13:44:37.349831 22726 net.cpp:408] sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split -> sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split_0\nI0818 13:44:37.349843 22726 net.cpp:408] sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split -> sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split_1\nI0818 13:44:37.349891 22726 net.cpp:150] Setting up sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split\nI0818 13:44:37.349902 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.349910 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.349913 22726 net.cpp:165] Memory required for data: 1341953500\nI0818 13:44:37.349918 22726 layer_factory.hpp:77] Creating layer resblk64_4\nI0818 13:44:37.349933 22726 net.cpp:100] Creating Layer resblk64_4\nI0818 13:44:37.349941 22726 net.cpp:434] resblk64_4 <- sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split_0\nI0818 13:44:37.349958 22726 net.cpp:408] resblk64_4 -> resblk64_4\nI0818 13:44:37.350992 22726 net.cpp:150] Setting up resblk64_4\nI0818 13:44:37.351007 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.351012 22726 net.cpp:165] Memory required for data: 1344001500\nI0818 13:44:37.351020 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_4\nI0818 13:44:37.351032 22726 net.cpp:100] Creating Layer batchNorm_resblk64_4\nI0818 13:44:37.351038 22726 net.cpp:434] batchNorm_resblk64_4 <- resblk64_4\nI0818 13:44:37.351047 22726 net.cpp:408] batchNorm_resblk64_4 -> bn_resblk64_4\nI0818 13:44:37.351323 22726 net.cpp:150] Setting up batchNorm_resblk64_4\nI0818 13:44:37.351336 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.351341 22726 net.cpp:165] Memory required for data: 1346049500\nI0818 13:44:37.351351 22726 layer_factory.hpp:77] Creating layer scale_resblk64_4\nI0818 13:44:37.351362 22726 net.cpp:100] Creating Layer scale_resblk64_4\nI0818 13:44:37.351369 22726 net.cpp:434] scale_resblk64_4 <- bn_resblk64_4\nI0818 13:44:37.351377 22726 net.cpp:395] scale_resblk64_4 -> bn_resblk64_4 (in-place)\nI0818 13:44:37.351441 22726 layer_factory.hpp:77] Creating layer scale_resblk64_4\nI0818 13:44:37.351606 22726 net.cpp:150] Setting up scale_resblk64_4\nI0818 13:44:37.351619 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.351624 22726 net.cpp:165] Memory required for data: 1348097500\nI0818 13:44:37.351634 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_4\nI0818 13:44:37.351644 22726 net.cpp:100] Creating Layer relu_bn_resblk64_4\nI0818 13:44:37.351651 22726 net.cpp:434] relu_bn_resblk64_4 <- bn_resblk64_4\nI0818 13:44:37.351658 22726 net.cpp:395] relu_bn_resblk64_4 -> bn_resblk64_4 (in-place)\nI0818 13:44:37.351668 22726 net.cpp:150] Setting up relu_bn_resblk64_4\nI0818 13:44:37.351675 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.351680 22726 net.cpp:165] Memory required for data: 1350145500\nI0818 13:44:37.351685 22726 layer_factory.hpp:77] Creating layer resblk64_4_b\nI0818 13:44:37.351698 22726 net.cpp:100] Creating Layer resblk64_4_b\nI0818 13:44:37.351704 22726 net.cpp:434] resblk64_4_b <- bn_resblk64_4\nI0818 13:44:37.351716 22726 net.cpp:408] resblk64_4_b -> resblk64_4_b\nI0818 13:44:37.352747 22726 net.cpp:150] Setting up resblk64_4_b\nI0818 13:44:37.352762 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.352767 22726 net.cpp:165] Memory required for data: 1352193500\nI0818 13:44:37.352777 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_4_b\nI0818 13:44:37.352784 22726 net.cpp:100] Creating Layer batchNorm_resblk64_4_b\nI0818 13:44:37.352792 22726 net.cpp:434] batchNorm_resblk64_4_b <- resblk64_4_b\nI0818 13:44:37.352802 22726 net.cpp:408] batchNorm_resblk64_4_b -> bn_resblk64_4_b\nI0818 13:44:37.353085 22726 net.cpp:150] Setting up batchNorm_resblk64_4_b\nI0818 13:44:37.353101 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.353106 22726 net.cpp:165] Memory required for data: 1354241500\nI0818 13:44:37.353116 22726 layer_factory.hpp:77] Creating layer scale_resblk64_4_b\nI0818 13:44:37.353126 22726 net.cpp:100] Creating Layer scale_resblk64_4_b\nI0818 13:44:37.353132 22726 net.cpp:434] scale_resblk64_4_b <- bn_resblk64_4_b\nI0818 13:44:37.353138 22726 net.cpp:395] scale_resblk64_4_b -> bn_resblk64_4_b (in-place)\nI0818 13:44:37.353197 22726 layer_factory.hpp:77] Creating layer scale_resblk64_4_b\nI0818 13:44:37.353359 22726 net.cpp:150] Setting up scale_resblk64_4_b\nI0818 13:44:37.353371 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.353376 22726 net.cpp:165] Memory required for data: 1356289500\nI0818 13:44:37.353385 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_3_b\nI0818 13:44:37.353396 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_3_b\nI0818 13:44:37.353404 22726 net.cpp:434] sum_sum_bn_resblk64_3_b <- sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split_1\nI0818 13:44:37.353411 22726 net.cpp:434] sum_sum_bn_resblk64_3_b <- bn_resblk64_4_b\nI0818 13:44:37.353426 22726 net.cpp:408] sum_sum_bn_resblk64_3_b -> sum_bn_resblk64_4_b\nI0818 13:44:37.353464 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_3_b\nI0818 13:44:37.353474 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.353478 22726 net.cpp:165] Memory required for data: 1358337500\nI0818 13:44:37.353483 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_4_b\nI0818 13:44:37.353492 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_4_b\nI0818 13:44:37.353497 22726 net.cpp:434] relu_sum_bn_resblk64_4_b <- sum_bn_resblk64_4_b\nI0818 13:44:37.353504 22726 net.cpp:395] relu_sum_bn_resblk64_4_b -> sum_bn_resblk64_4_b (in-place)\nI0818 13:44:37.353513 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_4_b\nI0818 13:44:37.353520 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.353525 22726 net.cpp:165] Memory required for data: 1360385500\nI0818 13:44:37.353529 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split\nI0818 13:44:37.353536 22726 net.cpp:100] Creating Layer sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split\nI0818 13:44:37.353541 22726 net.cpp:434] sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split <- sum_bn_resblk64_4_b\nI0818 13:44:37.353551 22726 net.cpp:408] sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split -> sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split_0\nI0818 13:44:37.353561 22726 net.cpp:408] sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split -> sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split_1\nI0818 13:44:37.353608 22726 net.cpp:150] Setting up sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split\nI0818 13:44:37.353621 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.353626 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.353631 22726 net.cpp:165] Memory required for data: 1364481500\nI0818 13:44:37.353636 22726 layer_factory.hpp:77] Creating layer resblk64_5\nI0818 13:44:37.353651 22726 net.cpp:100] Creating Layer resblk64_5\nI0818 13:44:37.353657 22726 net.cpp:434] resblk64_5 <- sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split_0\nI0818 13:44:37.353667 22726 net.cpp:408] resblk64_5 -> resblk64_5\nI0818 13:44:37.354724 22726 net.cpp:150] Setting up resblk64_5\nI0818 13:44:37.354740 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.354745 22726 net.cpp:165] Memory required for data: 1366529500\nI0818 13:44:37.354754 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_5\nI0818 13:44:37.354765 22726 net.cpp:100] Creating Layer batchNorm_resblk64_5\nI0818 13:44:37.354773 22726 net.cpp:434] batchNorm_resblk64_5 <- resblk64_5\nI0818 13:44:37.354781 22726 net.cpp:408] batchNorm_resblk64_5 -> bn_resblk64_5\nI0818 13:44:37.355063 22726 net.cpp:150] Setting up batchNorm_resblk64_5\nI0818 13:44:37.355077 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.355082 22726 net.cpp:165] Memory required for data: 1368577500\nI0818 13:44:37.355093 22726 layer_factory.hpp:77] Creating layer scale_resblk64_5\nI0818 13:44:37.355106 22726 net.cpp:100] Creating Layer scale_resblk64_5\nI0818 13:44:37.355113 22726 net.cpp:434] scale_resblk64_5 <- bn_resblk64_5\nI0818 13:44:37.355121 22726 net.cpp:395] scale_resblk64_5 -> bn_resblk64_5 (in-place)\nI0818 13:44:37.355183 22726 layer_factory.hpp:77] Creating layer scale_resblk64_5\nI0818 13:44:37.355347 22726 net.cpp:150] Setting up scale_resblk64_5\nI0818 13:44:37.355360 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.355365 22726 net.cpp:165] Memory required for data: 1370625500\nI0818 13:44:37.355374 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_5\nI0818 13:44:37.355386 22726 net.cpp:100] Creating Layer relu_bn_resblk64_5\nI0818 13:44:37.355391 22726 net.cpp:434] relu_bn_resblk64_5 <- bn_resblk64_5\nI0818 13:44:37.355399 22726 net.cpp:395] relu_bn_resblk64_5 -> bn_resblk64_5 (in-place)\nI0818 13:44:37.355408 22726 net.cpp:150] Setting up relu_bn_resblk64_5\nI0818 13:44:37.355415 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.355420 22726 net.cpp:165] Memory required for data: 1372673500\nI0818 13:44:37.355432 22726 layer_factory.hpp:77] Creating layer resblk64_5_b\nI0818 13:44:37.355445 22726 net.cpp:100] Creating Layer resblk64_5_b\nI0818 13:44:37.355451 22726 net.cpp:434] resblk64_5_b <- bn_resblk64_5\nI0818 13:44:37.355463 22726 net.cpp:408] resblk64_5_b -> resblk64_5_b\nI0818 13:44:37.356497 22726 net.cpp:150] Setting up resblk64_5_b\nI0818 13:44:37.356511 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.356516 22726 net.cpp:165] Memory required for data: 1374721500\nI0818 13:44:37.356525 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_5_b\nI0818 13:44:37.356534 22726 net.cpp:100] Creating Layer batchNorm_resblk64_5_b\nI0818 13:44:37.356540 22726 net.cpp:434] batchNorm_resblk64_5_b <- resblk64_5_b\nI0818 13:44:37.356551 22726 net.cpp:408] batchNorm_resblk64_5_b -> bn_resblk64_5_b\nI0818 13:44:37.356833 22726 net.cpp:150] Setting up batchNorm_resblk64_5_b\nI0818 13:44:37.356848 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.356854 22726 net.cpp:165] Memory required for data: 1376769500\nI0818 13:44:37.356864 22726 layer_factory.hpp:77] Creating layer scale_resblk64_5_b\nI0818 13:44:37.356873 22726 net.cpp:100] Creating Layer scale_resblk64_5_b\nI0818 13:44:37.356879 22726 net.cpp:434] scale_resblk64_5_b <- bn_resblk64_5_b\nI0818 13:44:37.356887 22726 net.cpp:395] scale_resblk64_5_b -> bn_resblk64_5_b (in-place)\nI0818 13:44:37.356947 22726 layer_factory.hpp:77] Creating layer scale_resblk64_5_b\nI0818 13:44:37.357112 22726 net.cpp:150] Setting up scale_resblk64_5_b\nI0818 13:44:37.357125 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.357131 22726 net.cpp:165] Memory required for data: 1378817500\nI0818 13:44:37.357139 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_4_b\nI0818 13:44:37.357151 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_4_b\nI0818 13:44:37.357158 22726 net.cpp:434] sum_sum_bn_resblk64_4_b <- sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split_1\nI0818 13:44:37.357165 22726 net.cpp:434] sum_sum_bn_resblk64_4_b <- bn_resblk64_5_b\nI0818 13:44:37.357173 22726 net.cpp:408] sum_sum_bn_resblk64_4_b -> sum_bn_resblk64_5_b\nI0818 13:44:37.357216 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_4_b\nI0818 13:44:37.357228 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.357233 22726 net.cpp:165] Memory required for data: 1380865500\nI0818 13:44:37.357237 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_5_b\nI0818 13:44:37.357245 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_5_b\nI0818 13:44:37.357251 22726 net.cpp:434] relu_sum_bn_resblk64_5_b <- sum_bn_resblk64_5_b\nI0818 13:44:37.357259 22726 net.cpp:395] relu_sum_bn_resblk64_5_b -> sum_bn_resblk64_5_b (in-place)\nI0818 13:44:37.357267 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_5_b\nI0818 13:44:37.357275 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.357280 22726 net.cpp:165] Memory required for data: 1382913500\nI0818 13:44:37.357285 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split\nI0818 13:44:37.357291 22726 net.cpp:100] Creating Layer sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split\nI0818 13:44:37.357296 22726 net.cpp:434] sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split <- sum_bn_resblk64_5_b\nI0818 13:44:37.357306 22726 net.cpp:408] sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split -> sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split_0\nI0818 13:44:37.357317 22726 net.cpp:408] sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split -> sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split_1\nI0818 13:44:37.357367 22726 net.cpp:150] Setting up sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split\nI0818 13:44:37.357378 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.357384 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.357389 22726 net.cpp:165] Memory required for data: 1387009500\nI0818 13:44:37.357393 22726 layer_factory.hpp:77] Creating layer resblk64_6\nI0818 13:44:37.357409 22726 net.cpp:100] Creating Layer resblk64_6\nI0818 13:44:37.357424 22726 net.cpp:434] resblk64_6 <- sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split_0\nI0818 13:44:37.357434 22726 net.cpp:408] resblk64_6 -> resblk64_6\nI0818 13:44:37.358475 22726 net.cpp:150] Setting up resblk64_6\nI0818 13:44:37.358490 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.358495 22726 net.cpp:165] Memory required for data: 1389057500\nI0818 13:44:37.358505 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_6\nI0818 13:44:37.358515 22726 net.cpp:100] Creating Layer batchNorm_resblk64_6\nI0818 13:44:37.358522 22726 net.cpp:434] batchNorm_resblk64_6 <- resblk64_6\nI0818 13:44:37.358531 22726 net.cpp:408] batchNorm_resblk64_6 -> bn_resblk64_6\nI0818 13:44:37.358803 22726 net.cpp:150] Setting up batchNorm_resblk64_6\nI0818 13:44:37.358821 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.358827 22726 net.cpp:165] Memory required for data: 1391105500\nI0818 13:44:37.358837 22726 layer_factory.hpp:77] Creating layer scale_resblk64_6\nI0818 13:44:37.358849 22726 net.cpp:100] Creating Layer scale_resblk64_6\nI0818 13:44:37.358855 22726 net.cpp:434] scale_resblk64_6 <- bn_resblk64_6\nI0818 13:44:37.358863 22726 net.cpp:395] scale_resblk64_6 -> bn_resblk64_6 (in-place)\nI0818 13:44:37.358927 22726 layer_factory.hpp:77] Creating layer scale_resblk64_6\nI0818 13:44:37.359089 22726 net.cpp:150] Setting up scale_resblk64_6\nI0818 13:44:37.359102 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.359107 22726 net.cpp:165] Memory required for data: 1393153500\nI0818 13:44:37.359115 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_6\nI0818 13:44:37.359153 22726 net.cpp:100] Creating Layer relu_bn_resblk64_6\nI0818 13:44:37.359163 22726 net.cpp:434] relu_bn_resblk64_6 <- bn_resblk64_6\nI0818 13:44:37.359170 22726 net.cpp:395] relu_bn_resblk64_6 -> bn_resblk64_6 (in-place)\nI0818 13:44:37.359180 22726 net.cpp:150] Setting up relu_bn_resblk64_6\nI0818 13:44:37.359187 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.359191 22726 net.cpp:165] Memory required for data: 1395201500\nI0818 13:44:37.359196 22726 layer_factory.hpp:77] Creating layer resblk64_6_b\nI0818 13:44:37.359207 22726 net.cpp:100] Creating Layer resblk64_6_b\nI0818 13:44:37.359213 22726 net.cpp:434] resblk64_6_b <- bn_resblk64_6\nI0818 13:44:37.359222 22726 net.cpp:408] resblk64_6_b -> resblk64_6_b\nI0818 13:44:37.360256 22726 net.cpp:150] Setting up resblk64_6_b\nI0818 13:44:37.360271 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.360275 22726 net.cpp:165] Memory required for data: 1397249500\nI0818 13:44:37.360285 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_6_b\nI0818 13:44:37.360301 22726 net.cpp:100] Creating Layer batchNorm_resblk64_6_b\nI0818 13:44:37.360307 22726 net.cpp:434] batchNorm_resblk64_6_b <- resblk64_6_b\nI0818 13:44:37.360318 22726 net.cpp:408] batchNorm_resblk64_6_b -> bn_resblk64_6_b\nI0818 13:44:37.360590 22726 net.cpp:150] Setting up batchNorm_resblk64_6_b\nI0818 13:44:37.360602 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.360607 22726 net.cpp:165] Memory required for data: 1399297500\nI0818 13:44:37.360617 22726 layer_factory.hpp:77] Creating layer scale_resblk64_6_b\nI0818 13:44:37.360626 22726 net.cpp:100] Creating Layer scale_resblk64_6_b\nI0818 13:44:37.360632 22726 net.cpp:434] scale_resblk64_6_b <- bn_resblk64_6_b\nI0818 13:44:37.360643 22726 net.cpp:395] scale_resblk64_6_b -> bn_resblk64_6_b (in-place)\nI0818 13:44:37.360703 22726 layer_factory.hpp:77] Creating layer scale_resblk64_6_b\nI0818 13:44:37.360872 22726 net.cpp:150] Setting up scale_resblk64_6_b\nI0818 13:44:37.360885 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.360890 22726 net.cpp:165] Memory required for data: 1401345500\nI0818 13:44:37.360899 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_5_b\nI0818 13:44:37.360911 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_5_b\nI0818 13:44:37.360918 22726 net.cpp:434] sum_sum_bn_resblk64_5_b <- sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split_1\nI0818 13:44:37.360934 22726 net.cpp:434] sum_sum_bn_resblk64_5_b <- bn_resblk64_6_b\nI0818 13:44:37.360941 22726 net.cpp:408] sum_sum_bn_resblk64_5_b -> sum_bn_resblk64_6_b\nI0818 13:44:37.360980 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_5_b\nI0818 13:44:37.360991 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.360996 22726 net.cpp:165] Memory required for data: 1403393500\nI0818 13:44:37.361001 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_6_b\nI0818 13:44:37.361007 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_6_b\nI0818 13:44:37.361013 22726 net.cpp:434] relu_sum_bn_resblk64_6_b <- sum_bn_resblk64_6_b\nI0818 13:44:37.361023 22726 net.cpp:395] relu_sum_bn_resblk64_6_b -> sum_bn_resblk64_6_b (in-place)\nI0818 13:44:37.361033 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_6_b\nI0818 13:44:37.361040 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.361044 22726 net.cpp:165] Memory required for data: 1405441500\nI0818 13:44:37.361049 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split\nI0818 13:44:37.361057 22726 net.cpp:100] Creating Layer sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split\nI0818 13:44:37.361062 22726 net.cpp:434] sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split <- sum_bn_resblk64_6_b\nI0818 13:44:37.361069 22726 net.cpp:408] sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split -> sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split_0\nI0818 13:44:37.361079 22726 net.cpp:408] sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split -> sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split_1\nI0818 13:44:37.361130 22726 net.cpp:150] Setting up sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split\nI0818 13:44:37.361142 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.361148 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.361153 22726 net.cpp:165] Memory required for data: 1409537500\nI0818 13:44:37.361157 22726 layer_factory.hpp:77] Creating layer resblk64_7\nI0818 13:44:37.361168 22726 net.cpp:100] Creating Layer resblk64_7\nI0818 13:44:37.361176 22726 net.cpp:434] resblk64_7 <- sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split_0\nI0818 13:44:37.361189 22726 net.cpp:408] resblk64_7 -> resblk64_7\nI0818 13:44:37.363199 22726 net.cpp:150] Setting up resblk64_7\nI0818 13:44:37.363217 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.363222 22726 net.cpp:165] Memory required for data: 1411585500\nI0818 13:44:37.363231 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_7\nI0818 13:44:37.363245 22726 net.cpp:100] Creating Layer batchNorm_resblk64_7\nI0818 13:44:37.363250 22726 net.cpp:434] batchNorm_resblk64_7 <- resblk64_7\nI0818 13:44:37.363260 22726 net.cpp:408] batchNorm_resblk64_7 -> bn_resblk64_7\nI0818 13:44:37.363539 22726 net.cpp:150] Setting up batchNorm_resblk64_7\nI0818 13:44:37.363553 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.363556 22726 net.cpp:165] Memory required for data: 1413633500\nI0818 13:44:37.363567 22726 layer_factory.hpp:77] Creating layer scale_resblk64_7\nI0818 13:44:37.363581 22726 net.cpp:100] Creating Layer scale_resblk64_7\nI0818 13:44:37.363589 22726 net.cpp:434] scale_resblk64_7 <- bn_resblk64_7\nI0818 13:44:37.363596 22726 net.cpp:395] scale_resblk64_7 -> bn_resblk64_7 (in-place)\nI0818 13:44:37.363662 22726 layer_factory.hpp:77] Creating layer scale_resblk64_7\nI0818 13:44:37.363834 22726 net.cpp:150] Setting up scale_resblk64_7\nI0818 13:44:37.363847 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.363852 22726 net.cpp:165] Memory required for data: 1415681500\nI0818 13:44:37.363862 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_7\nI0818 13:44:37.363873 22726 net.cpp:100] Creating Layer relu_bn_resblk64_7\nI0818 13:44:37.363879 22726 net.cpp:434] relu_bn_resblk64_7 <- bn_resblk64_7\nI0818 13:44:37.363888 22726 net.cpp:395] relu_bn_resblk64_7 -> bn_resblk64_7 (in-place)\nI0818 13:44:37.363896 22726 net.cpp:150] Setting up relu_bn_resblk64_7\nI0818 13:44:37.363903 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.363916 22726 net.cpp:165] Memory required for data: 1417729500\nI0818 13:44:37.363921 22726 layer_factory.hpp:77] Creating layer resblk64_7_b\nI0818 13:44:37.363935 22726 net.cpp:100] Creating Layer resblk64_7_b\nI0818 13:44:37.363941 22726 net.cpp:434] resblk64_7_b <- bn_resblk64_7\nI0818 13:44:37.363953 22726 net.cpp:408] resblk64_7_b -> resblk64_7_b\nI0818 13:44:37.364989 22726 net.cpp:150] Setting up resblk64_7_b\nI0818 13:44:37.365005 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.365010 22726 net.cpp:165] Memory required for data: 1419777500\nI0818 13:44:37.365018 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_7_b\nI0818 13:44:37.365027 22726 net.cpp:100] Creating Layer batchNorm_resblk64_7_b\nI0818 13:44:37.365033 22726 net.cpp:434] batchNorm_resblk64_7_b <- resblk64_7_b\nI0818 13:44:37.365046 22726 net.cpp:408] batchNorm_resblk64_7_b -> bn_resblk64_7_b\nI0818 13:44:37.365324 22726 net.cpp:150] Setting up batchNorm_resblk64_7_b\nI0818 13:44:37.365339 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.365345 22726 net.cpp:165] Memory required for data: 1421825500\nI0818 13:44:37.365355 22726 layer_factory.hpp:77] Creating layer scale_resblk64_7_b\nI0818 13:44:37.365365 22726 net.cpp:100] Creating Layer scale_resblk64_7_b\nI0818 13:44:37.365370 22726 net.cpp:434] scale_resblk64_7_b <- bn_resblk64_7_b\nI0818 13:44:37.365378 22726 net.cpp:395] scale_resblk64_7_b -> bn_resblk64_7_b (in-place)\nI0818 13:44:37.365438 22726 layer_factory.hpp:77] Creating layer scale_resblk64_7_b\nI0818 13:44:37.365600 22726 net.cpp:150] Setting up scale_resblk64_7_b\nI0818 13:44:37.365613 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.365617 22726 net.cpp:165] Memory required for data: 1423873500\nI0818 13:44:37.365627 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_6_b\nI0818 13:44:37.365638 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_6_b\nI0818 13:44:37.365645 22726 net.cpp:434] sum_sum_bn_resblk64_6_b <- sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split_1\nI0818 13:44:37.365653 22726 net.cpp:434] sum_sum_bn_resblk64_6_b <- bn_resblk64_7_b\nI0818 13:44:37.365661 22726 net.cpp:408] sum_sum_bn_resblk64_6_b -> sum_bn_resblk64_7_b\nI0818 13:44:37.365698 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_6_b\nI0818 13:44:37.365710 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.365715 22726 net.cpp:165] Memory required for data: 1425921500\nI0818 13:44:37.365720 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_7_b\nI0818 13:44:37.365728 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_7_b\nI0818 13:44:37.365734 22726 net.cpp:434] relu_sum_bn_resblk64_7_b <- sum_bn_resblk64_7_b\nI0818 13:44:37.365741 22726 net.cpp:395] relu_sum_bn_resblk64_7_b -> sum_bn_resblk64_7_b (in-place)\nI0818 13:44:37.365751 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_7_b\nI0818 13:44:37.365757 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.365761 22726 net.cpp:165] Memory required for data: 1427969500\nI0818 13:44:37.365767 22726 layer_factory.hpp:77] Creating layer sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split\nI0818 13:44:37.365773 22726 net.cpp:100] Creating Layer sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split\nI0818 13:44:37.365778 22726 net.cpp:434] sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split <- sum_bn_resblk64_7_b\nI0818 13:44:37.365788 22726 net.cpp:408] sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split -> sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split_0\nI0818 13:44:37.365798 22726 net.cpp:408] sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split -> sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split_1\nI0818 13:44:37.365855 22726 net.cpp:150] Setting up sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split\nI0818 13:44:37.365867 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.365873 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.365878 22726 net.cpp:165] Memory required for data: 1432065500\nI0818 13:44:37.365883 22726 layer_factory.hpp:77] Creating layer resblk64_8\nI0818 13:44:37.365906 22726 net.cpp:100] Creating Layer resblk64_8\nI0818 13:44:37.365913 22726 net.cpp:434] resblk64_8 <- sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split_0\nI0818 13:44:37.365923 22726 net.cpp:408] resblk64_8 -> resblk64_8\nI0818 13:44:37.366960 22726 net.cpp:150] Setting up resblk64_8\nI0818 13:44:37.366973 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.366978 22726 net.cpp:165] Memory required for data: 1434113500\nI0818 13:44:37.366987 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_8\nI0818 13:44:37.366999 22726 net.cpp:100] Creating Layer batchNorm_resblk64_8\nI0818 13:44:37.367007 22726 net.cpp:434] batchNorm_resblk64_8 <- resblk64_8\nI0818 13:44:37.367014 22726 net.cpp:408] batchNorm_resblk64_8 -> bn_resblk64_8\nI0818 13:44:37.367290 22726 net.cpp:150] Setting up batchNorm_resblk64_8\nI0818 13:44:37.367303 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.367308 22726 net.cpp:165] Memory required for data: 1436161500\nI0818 13:44:37.367318 22726 layer_factory.hpp:77] Creating layer scale_resblk64_8\nI0818 13:44:37.367331 22726 net.cpp:100] Creating Layer scale_resblk64_8\nI0818 13:44:37.367336 22726 net.cpp:434] scale_resblk64_8 <- bn_resblk64_8\nI0818 13:44:37.367344 22726 net.cpp:395] scale_resblk64_8 -> bn_resblk64_8 (in-place)\nI0818 13:44:37.367409 22726 layer_factory.hpp:77] Creating layer scale_resblk64_8\nI0818 13:44:37.367575 22726 net.cpp:150] Setting up scale_resblk64_8\nI0818 13:44:37.367588 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.367594 22726 net.cpp:165] Memory required for data: 1438209500\nI0818 13:44:37.367602 22726 layer_factory.hpp:77] Creating layer relu_bn_resblk64_8\nI0818 13:44:37.367614 22726 net.cpp:100] Creating Layer relu_bn_resblk64_8\nI0818 13:44:37.367620 22726 net.cpp:434] relu_bn_resblk64_8 <- bn_resblk64_8\nI0818 13:44:37.367627 22726 net.cpp:395] relu_bn_resblk64_8 -> bn_resblk64_8 (in-place)\nI0818 13:44:37.367637 22726 net.cpp:150] Setting up relu_bn_resblk64_8\nI0818 13:44:37.367643 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.367648 22726 net.cpp:165] Memory required for data: 1440257500\nI0818 13:44:37.367652 22726 layer_factory.hpp:77] Creating layer resblk64_8_b\nI0818 13:44:37.367666 22726 net.cpp:100] Creating Layer resblk64_8_b\nI0818 13:44:37.367672 22726 net.cpp:434] resblk64_8_b <- bn_resblk64_8\nI0818 13:44:37.367684 22726 net.cpp:408] resblk64_8_b -> resblk64_8_b\nI0818 13:44:37.368715 22726 net.cpp:150] Setting up resblk64_8_b\nI0818 13:44:37.368729 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.368734 22726 net.cpp:165] Memory required for data: 1442305500\nI0818 13:44:37.368743 22726 layer_factory.hpp:77] Creating layer batchNorm_resblk64_8_b\nI0818 13:44:37.368752 22726 net.cpp:100] Creating Layer batchNorm_resblk64_8_b\nI0818 13:44:37.368758 22726 net.cpp:434] batchNorm_resblk64_8_b <- resblk64_8_b\nI0818 13:44:37.368769 22726 net.cpp:408] batchNorm_resblk64_8_b -> bn_resblk64_8_b\nI0818 13:44:37.369051 22726 net.cpp:150] Setting up batchNorm_resblk64_8_b\nI0818 13:44:37.369068 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.369073 22726 net.cpp:165] Memory required for data: 1444353500\nI0818 13:44:37.369083 22726 layer_factory.hpp:77] Creating layer scale_resblk64_8_b\nI0818 13:44:37.369092 22726 net.cpp:100] Creating Layer scale_resblk64_8_b\nI0818 13:44:37.369098 22726 net.cpp:434] scale_resblk64_8_b <- bn_resblk64_8_b\nI0818 13:44:37.369105 22726 net.cpp:395] scale_resblk64_8_b -> bn_resblk64_8_b (in-place)\nI0818 13:44:37.369164 22726 layer_factory.hpp:77] Creating layer scale_resblk64_8_b\nI0818 13:44:37.369329 22726 net.cpp:150] Setting up scale_resblk64_8_b\nI0818 13:44:37.369341 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.369346 22726 net.cpp:165] Memory required for data: 1446401500\nI0818 13:44:37.369355 22726 layer_factory.hpp:77] Creating layer sum_sum_bn_resblk64_7_b\nI0818 13:44:37.369369 22726 net.cpp:100] Creating Layer sum_sum_bn_resblk64_7_b\nI0818 13:44:37.369375 22726 net.cpp:434] sum_sum_bn_resblk64_7_b <- sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split_1\nI0818 13:44:37.369390 22726 net.cpp:434] sum_sum_bn_resblk64_7_b <- bn_resblk64_8_b\nI0818 13:44:37.369398 22726 net.cpp:408] sum_sum_bn_resblk64_7_b -> sum_bn_resblk64_8_b\nI0818 13:44:37.369436 22726 net.cpp:150] Setting up sum_sum_bn_resblk64_7_b\nI0818 13:44:37.369446 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.369451 22726 net.cpp:165] Memory required for data: 1448449500\nI0818 13:44:37.369457 22726 layer_factory.hpp:77] Creating layer relu_sum_bn_resblk64_8_b\nI0818 13:44:37.369463 22726 net.cpp:100] Creating Layer relu_sum_bn_resblk64_8_b\nI0818 13:44:37.369469 22726 net.cpp:434] relu_sum_bn_resblk64_8_b <- sum_bn_resblk64_8_b\nI0818 13:44:37.369477 22726 net.cpp:395] relu_sum_bn_resblk64_8_b -> sum_bn_resblk64_8_b (in-place)\nI0818 13:44:37.369485 22726 net.cpp:150] Setting up relu_sum_bn_resblk64_8_b\nI0818 13:44:37.369493 22726 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:37.369496 22726 net.cpp:165] Memory required for data: 1450497500\nI0818 13:44:37.369501 22726 layer_factory.hpp:77] Creating layer avePooling_resblk64_8\nI0818 13:44:37.369509 22726 net.cpp:100] Creating Layer avePooling_resblk64_8\nI0818 13:44:37.369514 22726 net.cpp:434] avePooling_resblk64_8 <- sum_bn_resblk64_8_b\nI0818 13:44:37.369525 22726 net.cpp:408] avePooling_resblk64_8 -> avgPool_resblk64_8\nI0818 13:44:37.369560 22726 net.cpp:150] Setting up avePooling_resblk64_8\nI0818 13:44:37.369572 22726 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0818 13:44:37.369577 22726 net.cpp:165] Memory required for data: 1450529500\nI0818 13:44:37.369582 22726 layer_factory.hpp:77] Creating layer FC_final\nI0818 13:44:37.369593 22726 net.cpp:100] Creating Layer FC_final\nI0818 13:44:37.369599 22726 net.cpp:434] FC_final <- avgPool_resblk64_8\nI0818 13:44:37.369611 22726 net.cpp:408] FC_final -> FC_final\nI0818 13:44:37.369781 22726 net.cpp:150] Setting up FC_final\nI0818 13:44:37.369794 22726 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:37.369799 22726 net.cpp:165] Memory required for data: 1450534500\nI0818 13:44:37.369813 22726 layer_factory.hpp:77] Creating layer FC_final_FC_final_0_split\nI0818 13:44:37.369825 22726 net.cpp:100] Creating Layer FC_final_FC_final_0_split\nI0818 13:44:37.369832 22726 net.cpp:434] FC_final_FC_final_0_split <- FC_final\nI0818 13:44:37.369839 22726 net.cpp:408] FC_final_FC_final_0_split -> FC_final_FC_final_0_split_0\nI0818 13:44:37.369849 22726 net.cpp:408] FC_final_FC_final_0_split -> FC_final_FC_final_0_split_1\nI0818 13:44:37.369904 22726 net.cpp:150] Setting up FC_final_FC_final_0_split\nI0818 13:44:37.369915 22726 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:37.369921 22726 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:37.369926 22726 net.cpp:165] Memory required for data: 1450544500\nI0818 13:44:37.369931 22726 layer_factory.hpp:77] Creating layer accuracy\nI0818 13:44:37.369938 22726 net.cpp:100] Creating Layer accuracy\nI0818 13:44:37.369945 22726 net.cpp:434] accuracy <- FC_final_FC_final_0_split_0\nI0818 13:44:37.369951 22726 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 13:44:37.369958 22726 net.cpp:408] accuracy -> accuracy\nI0818 13:44:37.369971 22726 net.cpp:150] Setting up accuracy\nI0818 13:44:37.369978 22726 net.cpp:157] Top shape: (1)\nI0818 13:44:37.369983 22726 net.cpp:165] Memory required for data: 1450544504\nI0818 13:44:37.369987 22726 layer_factory.hpp:77] Creating layer loss\nI0818 13:44:37.369998 22726 net.cpp:100] Creating Layer loss\nI0818 13:44:37.370003 22726 net.cpp:434] loss <- FC_final_FC_final_0_split_1\nI0818 13:44:37.370010 22726 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 13:44:37.370018 22726 net.cpp:408] loss -> loss\nI0818 13:44:37.370029 22726 layer_factory.hpp:77] Creating layer loss\nI0818 13:44:37.370152 22726 net.cpp:150] Setting up loss\nI0818 13:44:37.370164 22726 net.cpp:157] Top shape: (1)\nI0818 13:44:37.370169 22726 net.cpp:160]     with loss weight 1\nI0818 13:44:37.370185 22726 net.cpp:165] Memory required for data: 1450544508\nI0818 13:44:37.370192 22726 net.cpp:226] loss needs backward computation.\nI0818 13:44:37.370204 22726 net.cpp:228] accuracy does not need backward computation.\nI0818 13:44:37.370210 22726 net.cpp:226] FC_final_FC_final_0_split needs backward computation.\nI0818 13:44:37.370216 22726 net.cpp:226] FC_final needs backward computation.\nI0818 13:44:37.370221 22726 net.cpp:226] avePooling_resblk64_8 needs backward computation.\nI0818 13:44:37.370226 22726 net.cpp:226] relu_sum_bn_resblk64_8_b needs backward computation.\nI0818 13:44:37.370230 22726 net.cpp:226] sum_sum_bn_resblk64_7_b needs backward computation.\nI0818 13:44:37.370236 22726 net.cpp:226] scale_resblk64_8_b needs backward computation.\nI0818 13:44:37.370240 22726 net.cpp:226] batchNorm_resblk64_8_b needs backward computation.\nI0818 13:44:37.370245 22726 net.cpp:226] resblk64_8_b needs backward computation.\nI0818 13:44:37.370250 22726 net.cpp:226] relu_bn_resblk64_8 needs backward computation.\nI0818 13:44:37.370255 22726 net.cpp:226] scale_resblk64_8 needs backward computation.\nI0818 13:44:37.370260 22726 net.cpp:226] batchNorm_resblk64_8 needs backward computation.\nI0818 13:44:37.370265 22726 net.cpp:226] resblk64_8 needs backward computation.\nI0818 13:44:37.370270 22726 net.cpp:226] sum_bn_resblk64_7_b_relu_sum_bn_resblk64_7_b_0_split needs backward computation.\nI0818 13:44:37.370275 22726 net.cpp:226] relu_sum_bn_resblk64_7_b needs backward computation.\nI0818 13:44:37.370280 22726 net.cpp:226] sum_sum_bn_resblk64_6_b needs backward computation.\nI0818 13:44:37.370285 22726 net.cpp:226] scale_resblk64_7_b needs backward computation.\nI0818 13:44:37.370290 22726 net.cpp:226] batchNorm_resblk64_7_b needs backward computation.\nI0818 13:44:37.370295 22726 net.cpp:226] resblk64_7_b needs backward computation.\nI0818 13:44:37.370302 22726 net.cpp:226] relu_bn_resblk64_7 needs backward computation.\nI0818 13:44:37.370307 22726 net.cpp:226] scale_resblk64_7 needs backward computation.\nI0818 13:44:37.370312 22726 net.cpp:226] batchNorm_resblk64_7 needs backward computation.\nI0818 13:44:37.370317 22726 net.cpp:226] resblk64_7 needs backward computation.\nI0818 13:44:37.370322 22726 net.cpp:226] sum_bn_resblk64_6_b_relu_sum_bn_resblk64_6_b_0_split needs backward computation.\nI0818 13:44:37.370328 22726 net.cpp:226] relu_sum_bn_resblk64_6_b needs backward computation.\nI0818 13:44:37.370332 22726 net.cpp:226] sum_sum_bn_resblk64_5_b needs backward computation.\nI0818 13:44:37.370338 22726 net.cpp:226] scale_resblk64_6_b needs backward computation.\nI0818 13:44:37.370343 22726 net.cpp:226] batchNorm_resblk64_6_b needs backward computation.\nI0818 13:44:37.370348 22726 net.cpp:226] resblk64_6_b needs backward computation.\nI0818 13:44:37.370353 22726 net.cpp:226] relu_bn_resblk64_6 needs backward computation.\nI0818 13:44:37.370358 22726 net.cpp:226] scale_resblk64_6 needs backward computation.\nI0818 13:44:37.370362 22726 net.cpp:226] batchNorm_resblk64_6 needs backward computation.\nI0818 13:44:37.370368 22726 net.cpp:226] resblk64_6 needs backward computation.\nI0818 13:44:37.370373 22726 net.cpp:226] sum_bn_resblk64_5_b_relu_sum_bn_resblk64_5_b_0_split needs backward computation.\nI0818 13:44:37.370378 22726 net.cpp:226] relu_sum_bn_resblk64_5_b needs backward computation.\nI0818 13:44:37.370383 22726 net.cpp:226] sum_sum_bn_resblk64_4_b needs backward computation.\nI0818 13:44:37.370389 22726 net.cpp:226] scale_resblk64_5_b needs backward computation.\nI0818 13:44:37.370394 22726 net.cpp:226] batchNorm_resblk64_5_b needs backward computation.\nI0818 13:44:37.370399 22726 net.cpp:226] resblk64_5_b needs backward computation.\nI0818 13:44:37.370404 22726 net.cpp:226] relu_bn_resblk64_5 needs backward computation.\nI0818 13:44:37.370409 22726 net.cpp:226] scale_resblk64_5 needs backward computation.\nI0818 13:44:37.370414 22726 net.cpp:226] batchNorm_resblk64_5 needs backward computation.\nI0818 13:44:37.370419 22726 net.cpp:226] resblk64_5 needs backward computation.\nI0818 13:44:37.370424 22726 net.cpp:226] sum_bn_resblk64_4_b_relu_sum_bn_resblk64_4_b_0_split needs backward computation.\nI0818 13:44:37.370429 22726 net.cpp:226] relu_sum_bn_resblk64_4_b needs backward computation.\nI0818 13:44:37.370438 22726 net.cpp:226] sum_sum_bn_resblk64_3_b needs backward computation.\nI0818 13:44:37.370445 22726 net.cpp:226] scale_resblk64_4_b needs backward computation.\nI0818 13:44:37.370450 22726 net.cpp:226] batchNorm_resblk64_4_b needs backward computation.\nI0818 13:44:37.370455 22726 net.cpp:226] resblk64_4_b needs backward computation.\nI0818 13:44:37.370460 22726 net.cpp:226] relu_bn_resblk64_4 needs backward computation.\nI0818 13:44:37.370465 22726 net.cpp:226] scale_resblk64_4 needs backward computation.\nI0818 13:44:37.370471 22726 net.cpp:226] batchNorm_resblk64_4 needs backward computation.\nI0818 13:44:37.370476 22726 net.cpp:226] resblk64_4 needs backward computation.\nI0818 13:44:37.370481 22726 net.cpp:226] sum_bn_resblk64_3_b_relu_sum_bn_resblk64_3_b_0_split needs backward computation.\nI0818 13:44:37.370486 22726 net.cpp:226] relu_sum_bn_resblk64_3_b needs backward computation.\nI0818 13:44:37.370491 22726 net.cpp:226] sum_sum_bn_resblk64_2_b needs backward computation.\nI0818 13:44:37.370497 22726 net.cpp:226] scale_resblk64_3_b needs backward computation.\nI0818 13:44:37.370502 22726 net.cpp:226] batchNorm_resblk64_3_b needs backward computation.\nI0818 13:44:37.370507 22726 net.cpp:226] resblk64_3_b needs backward computation.\nI0818 13:44:37.370512 22726 net.cpp:226] relu_bn_resblk64_3 needs backward computation.\nI0818 13:44:37.370517 22726 net.cpp:226] scale_resblk64_3 needs backward computation.\nI0818 13:44:37.370522 22726 net.cpp:226] batchNorm_resblk64_3 needs backward computation.\nI0818 13:44:37.370527 22726 net.cpp:226] resblk64_3 needs backward computation.\nI0818 13:44:37.370532 22726 net.cpp:226] sum_bn_resblk64_2_b_relu_sum_bn_resblk64_2_b_0_split needs backward computation.\nI0818 13:44:37.370537 22726 net.cpp:226] relu_sum_bn_resblk64_2_b needs backward computation.\nI0818 13:44:37.370543 22726 net.cpp:226] sum_sum_bn_resblk64_1_b needs backward computation.\nI0818 13:44:37.370548 22726 net.cpp:226] scale_resblk64_2_b needs backward computation.\nI0818 13:44:37.370553 22726 net.cpp:226] batchNorm_resblk64_2_b needs backward computation.\nI0818 13:44:37.370558 22726 net.cpp:226] resblk64_2_b needs backward computation.\nI0818 13:44:37.370563 22726 net.cpp:226] relu_bn_resblk64_2 needs backward computation.\nI0818 13:44:37.370568 22726 net.cpp:226] scale_resblk64_2 needs backward computation.\nI0818 13:44:37.370573 22726 net.cpp:226] batchNorm_resblk64_2 needs backward computation.\nI0818 13:44:37.370579 22726 net.cpp:226] resblk64_2 needs backward computation.\nI0818 13:44:37.370584 22726 net.cpp:226] sum_bn_resblk64_1_b_relu_sum_bn_resblk64_1_b_0_split needs backward computation.\nI0818 13:44:37.370592 22726 net.cpp:226] relu_sum_bn_resblk64_1_b needs backward computation.\nI0818 13:44:37.370597 22726 net.cpp:226] sum_CC_sum_bn_resblk64_b needs backward computation.\nI0818 13:44:37.370604 22726 net.cpp:226] scale_resblk64_1_b needs backward computation.\nI0818 13:44:37.370609 22726 net.cpp:226] batchNorm_resblk64_1_b needs backward computation.\nI0818 13:44:37.370613 22726 net.cpp:226] resblk64_1_b needs backward computation.\nI0818 13:44:37.370618 22726 net.cpp:226] relu_bn_resblk64_1 needs backward computation.\nI0818 13:44:37.370623 22726 net.cpp:226] scale_resblk64_1 needs backward computation.\nI0818 13:44:37.370628 22726 net.cpp:226] batchNorm_resblk64_1 needs backward computation.\nI0818 13:44:37.370633 22726 net.cpp:226] resblk64_1 needs backward computation.\nI0818 13:44:37.370640 22726 net.cpp:226] CC_sum_bn_resblk64_b_CC_sum_bn_resblk64_b_0_split needs backward computation.\nI0818 13:44:37.370645 22726 net.cpp:226] CC_sum_bn_resblk64_b needs backward computation.\nI0818 13:44:37.370651 22726 net.cpp:228] zeros_sum_bn_resblk64_b does not need backward computation.\nI0818 13:44:37.370656 22726 net.cpp:226] relu_sum_bn_resblk64_b needs backward computation.\nI0818 13:44:37.370661 22726 net.cpp:226] sum_avgPool_resblk64 needs backward computation.\nI0818 13:44:37.370666 22726 net.cpp:226] avePooling_resblk64 needs backward computation.\nI0818 13:44:37.370672 22726 net.cpp:226] scale_resblk64_b needs backward computation.\nI0818 13:44:37.370682 22726 net.cpp:226] batchNorm_resblk64_b needs backward computation.\nI0818 13:44:37.370687 22726 net.cpp:226] resblk64_b needs backward computation.\nI0818 13:44:37.370693 22726 net.cpp:226] relu_bn_resblk64 needs backward computation.\nI0818 13:44:37.370698 22726 net.cpp:226] scale_resblk64 needs backward computation.\nI0818 13:44:37.370702 22726 net.cpp:226] batchNorm_resblk64 needs backward computation.\nI0818 13:44:37.370708 22726 net.cpp:226] resblk64 needs backward computation.\nI0818 13:44:37.370713 22726 net.cpp:226] sum_bn_resblk32_8_b_relu_sum_bn_resblk32_8_b_0_split needs backward computation.\nI0818 13:44:37.370719 22726 net.cpp:226] relu_sum_bn_resblk32_8_b needs backward computation.\nI0818 13:44:37.370724 22726 net.cpp:226] sum_sum_bn_resblk32_7_b needs backward computation.\nI0818 13:44:37.370730 22726 net.cpp:226] scale_resblk32_8_b needs backward computation.\nI0818 13:44:37.370735 22726 net.cpp:226] batchNorm_resblk32_8_b needs backward computation.\nI0818 13:44:37.370740 22726 net.cpp:226] resblk32_8_b needs backward computation.\nI0818 13:44:37.370746 22726 net.cpp:226] relu_bn_resblk32_8 needs backward computation.\nI0818 13:44:37.370750 22726 net.cpp:226] scale_resblk32_8 needs backward computation.\nI0818 13:44:37.370755 22726 net.cpp:226] batchNorm_resblk32_8 needs backward computation.\nI0818 13:44:37.370761 22726 net.cpp:226] resblk32_8 needs backward computation.\nI0818 13:44:37.370766 22726 net.cpp:226] sum_bn_resblk32_7_b_relu_sum_bn_resblk32_7_b_0_split needs backward computation.\nI0818 13:44:37.370772 22726 net.cpp:226] relu_sum_bn_resblk32_7_b needs backward computation.\nI0818 13:44:37.370777 22726 net.cpp:226] sum_sum_bn_resblk32_6_b needs backward computation.\nI0818 13:44:37.370784 22726 net.cpp:226] scale_resblk32_7_b needs backward computation.\nI0818 13:44:37.370789 22726 net.cpp:226] batchNorm_resblk32_7_b needs backward computation.\nI0818 13:44:37.370795 22726 net.cpp:226] resblk32_7_b needs backward computation.\nI0818 13:44:37.370800 22726 net.cpp:226] relu_bn_resblk32_7 needs backward computation.\nI0818 13:44:37.370805 22726 net.cpp:226] scale_resblk32_7 needs backward computation.\nI0818 13:44:37.370815 22726 net.cpp:226] batchNorm_resblk32_7 needs backward computation.\nI0818 13:44:37.370821 22726 net.cpp:226] resblk32_7 needs backward computation.\nI0818 13:44:37.370828 22726 net.cpp:226] sum_bn_resblk32_6_b_relu_sum_bn_resblk32_6_b_0_split needs backward computation.\nI0818 13:44:37.370833 22726 net.cpp:226] relu_sum_bn_resblk32_6_b needs backward computation.\nI0818 13:44:37.370839 22726 net.cpp:226] sum_sum_bn_resblk32_5_b needs backward computation.\nI0818 13:44:37.370846 22726 net.cpp:226] scale_resblk32_6_b needs backward computation.\nI0818 13:44:37.370851 22726 net.cpp:226] batchNorm_resblk32_6_b needs backward computation.\nI0818 13:44:37.370856 22726 net.cpp:226] resblk32_6_b needs backward computation.\nI0818 13:44:37.370862 22726 net.cpp:226] relu_bn_resblk32_6 needs backward computation.\nI0818 13:44:37.370867 22726 net.cpp:226] scale_resblk32_6 needs backward computation.\nI0818 13:44:37.370872 22726 net.cpp:226] batchNorm_resblk32_6 needs backward computation.\nI0818 13:44:37.370877 22726 net.cpp:226] resblk32_6 needs backward computation.\nI0818 13:44:37.370882 22726 net.cpp:226] sum_bn_resblk32_5_b_relu_sum_bn_resblk32_5_b_0_split needs backward computation.\nI0818 13:44:37.370887 22726 net.cpp:226] relu_sum_bn_resblk32_5_b needs backward computation.\nI0818 13:44:37.370893 22726 net.cpp:226] sum_sum_bn_resblk32_4_b needs backward computation.\nI0818 13:44:37.370899 22726 net.cpp:226] scale_resblk32_5_b needs backward computation.\nI0818 13:44:37.370904 22726 net.cpp:226] batchNorm_resblk32_5_b needs backward computation.\nI0818 13:44:37.370910 22726 net.cpp:226] resblk32_5_b needs backward computation.\nI0818 13:44:37.370915 22726 net.cpp:226] relu_bn_resblk32_5 needs backward computation.\nI0818 13:44:37.370920 22726 net.cpp:226] scale_resblk32_5 needs backward computation.\nI0818 13:44:37.370925 22726 net.cpp:226] batchNorm_resblk32_5 needs backward computation.\nI0818 13:44:37.370936 22726 net.cpp:226] resblk32_5 needs backward computation.\nI0818 13:44:37.370942 22726 net.cpp:226] sum_bn_resblk32_4_b_relu_sum_bn_resblk32_4_b_0_split needs backward computation.\nI0818 13:44:37.370949 22726 net.cpp:226] relu_sum_bn_resblk32_4_b needs backward computation.\nI0818 13:44:37.370954 22726 net.cpp:226] sum_sum_bn_resblk32_3_b needs backward computation.\nI0818 13:44:37.370965 22726 net.cpp:226] scale_resblk32_4_b needs backward computation.\nI0818 13:44:37.370970 22726 net.cpp:226] batchNorm_resblk32_4_b needs backward computation.\nI0818 13:44:37.370975 22726 net.cpp:226] resblk32_4_b needs backward computation.\nI0818 13:44:37.370981 22726 net.cpp:226] relu_bn_resblk32_4 needs backward computation.\nI0818 13:44:37.370986 22726 net.cpp:226] scale_resblk32_4 needs backward computation.\nI0818 13:44:37.370991 22726 net.cpp:226] batchNorm_resblk32_4 needs backward computation.\nI0818 13:44:37.370997 22726 net.cpp:226] resblk32_4 needs backward computation.\nI0818 13:44:37.371002 22726 net.cpp:226] sum_bn_resblk32_3_b_relu_sum_bn_resblk32_3_b_0_split needs backward computation.\nI0818 13:44:37.371008 22726 net.cpp:226] relu_sum_bn_resblk32_3_b needs backward computation.\nI0818 13:44:37.371013 22726 net.cpp:226] sum_sum_bn_resblk32_2_b needs backward computation.\nI0818 13:44:37.371019 22726 net.cpp:226] scale_resblk32_3_b needs backward computation.\nI0818 13:44:37.371024 22726 net.cpp:226] batchNorm_resblk32_3_b needs backward computation.\nI0818 13:44:37.371031 22726 net.cpp:226] resblk32_3_b needs backward computation.\nI0818 13:44:37.371037 22726 net.cpp:226] relu_bn_resblk32_3 needs backward computation.\nI0818 13:44:37.371042 22726 net.cpp:226] scale_resblk32_3 needs backward computation.\nI0818 13:44:37.371047 22726 net.cpp:226] batchNorm_resblk32_3 needs backward computation.\nI0818 13:44:37.371052 22726 net.cpp:226] resblk32_3 needs backward computation.\nI0818 13:44:37.371058 22726 net.cpp:226] sum_bn_resblk32_2_b_relu_sum_bn_resblk32_2_b_0_split needs backward computation.\nI0818 13:44:37.371063 22726 net.cpp:226] relu_sum_bn_resblk32_2_b needs backward computation.\nI0818 13:44:37.371069 22726 net.cpp:226] sum_sum_bn_resblk32_1_b needs backward computation.\nI0818 13:44:37.371075 22726 net.cpp:226] scale_resblk32_2_b needs backward computation.\nI0818 13:44:37.371080 22726 net.cpp:226] batchNorm_resblk32_2_b needs backward computation.\nI0818 13:44:37.371086 22726 net.cpp:226] resblk32_2_b needs backward computation.\nI0818 13:44:37.371091 22726 net.cpp:226] relu_bn_resblk32_2 needs backward computation.\nI0818 13:44:37.371098 22726 net.cpp:226] scale_resblk32_2 needs backward computation.\nI0818 13:44:37.371103 22726 net.cpp:226] batchNorm_resblk32_2 needs backward computation.\nI0818 13:44:37.371107 22726 net.cpp:226] resblk32_2 needs backward computation.\nI0818 13:44:37.371114 22726 net.cpp:226] sum_bn_resblk32_1_b_relu_sum_bn_resblk32_1_b_0_split needs backward computation.\nI0818 13:44:37.371119 22726 net.cpp:226] relu_sum_bn_resblk32_1_b needs backward computation.\nI0818 13:44:37.371124 22726 net.cpp:226] sum_CC_sum_bn_resblk32_b needs backward computation.\nI0818 13:44:37.371130 22726 net.cpp:226] scale_resblk32_1_b needs backward computation.\nI0818 13:44:37.371135 22726 net.cpp:226] batchNorm_resblk32_1_b needs backward computation.\nI0818 13:44:37.371141 22726 net.cpp:226] resblk32_1_b needs backward computation.\nI0818 13:44:37.371146 22726 net.cpp:226] relu_bn_resblk32_1 needs backward computation.\nI0818 13:44:37.371152 22726 net.cpp:226] scale_resblk32_1 needs backward computation.\nI0818 13:44:37.371156 22726 net.cpp:226] batchNorm_resblk32_1 needs backward computation.\nI0818 13:44:37.371162 22726 net.cpp:226] resblk32_1 needs backward computation.\nI0818 13:44:37.371168 22726 net.cpp:226] CC_sum_bn_resblk32_b_CC_sum_bn_resblk32_b_0_split needs backward computation.\nI0818 13:44:37.371173 22726 net.cpp:226] CC_sum_bn_resblk32_b needs backward computation.\nI0818 13:44:37.371179 22726 net.cpp:228] zeros_sum_bn_resblk32_b does not need backward computation.\nI0818 13:44:37.371189 22726 net.cpp:226] relu_sum_bn_resblk32_b needs backward computation.\nI0818 13:44:37.371196 22726 net.cpp:226] sum_avgPool_resblk32 needs backward computation.\nI0818 13:44:37.371201 22726 net.cpp:226] avePooling_resblk32 needs backward computation.\nI0818 13:44:37.371207 22726 net.cpp:226] scale_resblk32_b needs backward computation.\nI0818 13:44:37.371212 22726 net.cpp:226] batchNorm_resblk32_b needs backward computation.\nI0818 13:44:37.371218 22726 net.cpp:226] resblk32_b needs backward computation.\nI0818 13:44:37.371224 22726 net.cpp:226] relu_bn_resblk32 needs backward computation.\nI0818 13:44:37.371229 22726 net.cpp:226] scale_resblk32 needs backward computation.\nI0818 13:44:37.371234 22726 net.cpp:226] batchNorm_resblk32 needs backward computation.\nI0818 13:44:37.371240 22726 net.cpp:226] resblk32 needs backward computation.\nI0818 13:44:37.371246 22726 net.cpp:226] sum_bn_Conv16_9_b_relu_sum_bn_Conv16_9_b_0_split needs backward computation.\nI0818 13:44:37.371253 22726 net.cpp:226] relu_sum_bn_Conv16_9_b needs backward computation.\nI0818 13:44:37.371258 22726 net.cpp:226] sum_sum_bn_Conv16_8_b needs backward computation.\nI0818 13:44:37.371264 22726 net.cpp:226] scale_Conv16_9_b needs backward computation.\nI0818 13:44:37.371270 22726 net.cpp:226] batchNorm_Conv16_9_b needs backward computation.\nI0818 13:44:37.371275 22726 net.cpp:226] Conv16_9_b needs backward computation.\nI0818 13:44:37.371281 22726 net.cpp:226] relu_bn_Conv16_9 needs backward computation.\nI0818 13:44:37.371286 22726 net.cpp:226] scale_Conv16_9 needs backward computation.\nI0818 13:44:37.371291 22726 net.cpp:226] batchNorm_Conv16_9 needs backward computation.\nI0818 13:44:37.371296 22726 net.cpp:226] Conv16_9 needs backward computation.\nI0818 13:44:37.371304 22726 net.cpp:226] sum_bn_Conv16_8_b_relu_sum_bn_Conv16_8_b_0_split needs backward computation.\nI0818 13:44:37.371309 22726 net.cpp:226] relu_sum_bn_Conv16_8_b needs backward computation.\nI0818 13:44:37.371314 22726 net.cpp:226] sum_sum_bn_Conv16_7_b needs backward computation.\nI0818 13:44:37.371320 22726 net.cpp:226] scale_Conv16_8_b needs backward computation.\nI0818 13:44:37.371325 22726 net.cpp:226] batchNorm_Conv16_8_b needs backward computation.\nI0818 13:44:37.371330 22726 net.cpp:226] Conv16_8_b needs backward computation.\nI0818 13:44:37.371336 22726 net.cpp:226] relu_bn_Conv16_8 needs backward computation.\nI0818 13:44:37.371341 22726 net.cpp:226] scale_Conv16_8 needs backward computation.\nI0818 13:44:37.371346 22726 net.cpp:226] batchNorm_Conv16_8 needs backward computation.\nI0818 13:44:37.371352 22726 net.cpp:226] Conv16_8 needs backward computation.\nI0818 13:44:37.371358 22726 net.cpp:226] sum_bn_Conv16_7_b_relu_sum_bn_Conv16_7_b_0_split needs backward computation.\nI0818 13:44:37.371363 22726 net.cpp:226] relu_sum_bn_Conv16_7_b needs backward computation.\nI0818 13:44:37.371369 22726 net.cpp:226] sum_sum_bn_Conv16_6_b needs backward computation.\nI0818 13:44:37.371376 22726 net.cpp:226] scale_Conv16_7_b needs backward computation.\nI0818 13:44:37.371381 22726 net.cpp:226] batchNorm_Conv16_7_b needs backward computation.\nI0818 13:44:37.371387 22726 net.cpp:226] Conv16_7_b needs backward computation.\nI0818 13:44:37.371392 22726 net.cpp:226] relu_bn_Conv16_7 needs backward computation.\nI0818 13:44:37.371397 22726 net.cpp:226] scale_Conv16_7 needs backward computation.\nI0818 13:44:37.371402 22726 net.cpp:226] batchNorm_Conv16_7 needs backward computation.\nI0818 13:44:37.371408 22726 net.cpp:226] Conv16_7 needs backward computation.\nI0818 13:44:37.371413 22726 net.cpp:226] sum_bn_Conv16_6_b_relu_sum_bn_Conv16_6_b_0_split needs backward computation.\nI0818 13:44:37.371419 22726 net.cpp:226] relu_sum_bn_Conv16_6_b needs backward computation.\nI0818 13:44:37.371424 22726 net.cpp:226] sum_sum_bn_Conv16_5_b needs backward computation.\nI0818 13:44:37.371430 22726 net.cpp:226] scale_Conv16_6_b needs backward computation.\nI0818 13:44:37.371436 22726 net.cpp:226] batchNorm_Conv16_6_b needs backward computation.\nI0818 13:44:37.371443 22726 net.cpp:226] Conv16_6_b needs backward computation.\nI0818 13:44:37.371453 22726 net.cpp:226] relu_bn_Conv16_6 needs backward computation.\nI0818 13:44:37.371459 22726 net.cpp:226] scale_Conv16_6 needs backward computation.\nI0818 13:44:37.371464 22726 net.cpp:226] batchNorm_Conv16_6 needs backward computation.\nI0818 13:44:37.371469 22726 net.cpp:226] Conv16_6 needs backward computation.\nI0818 13:44:37.371475 22726 net.cpp:226] sum_bn_Conv16_5_b_relu_sum_bn_Conv16_5_b_0_split needs backward computation.\nI0818 13:44:37.371480 22726 net.cpp:226] relu_sum_bn_Conv16_5_b needs backward computation.\nI0818 13:44:37.371486 22726 net.cpp:226] sum_sum_bn_Conv16_4_b needs backward computation.\nI0818 13:44:37.371492 22726 net.cpp:226] scale_Conv16_5_b needs backward computation.\nI0818 13:44:37.371498 22726 net.cpp:226] batchNorm_Conv16_5_b needs backward computation.\nI0818 13:44:37.371503 22726 net.cpp:226] Conv16_5_b needs backward computation.\nI0818 13:44:37.371510 22726 net.cpp:226] relu_bn_Conv16_5 needs backward computation.\nI0818 13:44:37.371515 22726 net.cpp:226] scale_Conv16_5 needs backward computation.\nI0818 13:44:37.371520 22726 net.cpp:226] batchNorm_Conv16_5 needs backward computation.\nI0818 13:44:37.371526 22726 net.cpp:226] Conv16_5 needs backward computation.\nI0818 13:44:37.371531 22726 net.cpp:226] sum_bn_Conv16_4_b_relu_sum_bn_Conv16_4_b_0_split needs backward computation.\nI0818 13:44:37.371537 22726 net.cpp:226] relu_sum_bn_Conv16_4_b needs backward computation.\nI0818 13:44:37.371542 22726 net.cpp:226] sum_sum_bn_Conv16_3_b needs backward computation.\nI0818 13:44:37.371549 22726 net.cpp:226] scale_Conv16_4_b needs backward computation.\nI0818 13:44:37.371554 22726 net.cpp:226] batchNorm_Conv16_4_b needs backward computation.\nI0818 13:44:37.371561 22726 net.cpp:226] Conv16_4_b needs backward computation.\nI0818 13:44:37.371565 22726 net.cpp:226] relu_bn_Conv16_4 needs backward computation.\nI0818 13:44:37.371572 22726 net.cpp:226] scale_Conv16_4 needs backward computation.\nI0818 13:44:37.371577 22726 net.cpp:226] batchNorm_Conv16_4 needs backward computation.\nI0818 13:44:37.371582 22726 net.cpp:226] Conv16_4 needs backward computation.\nI0818 13:44:37.371587 22726 net.cpp:226] sum_bn_Conv16_3_b_relu_sum_bn_Conv16_3_b_0_split needs backward computation.\nI0818 13:44:37.371593 22726 net.cpp:226] relu_sum_bn_Conv16_3_b needs backward computation.\nI0818 13:44:37.371599 22726 net.cpp:226] sum_sum_bn_Conv16_2_b needs backward computation.\nI0818 13:44:37.371605 22726 net.cpp:226] scale_Conv16_3_b needs backward computation.\nI0818 13:44:37.371610 22726 net.cpp:226] batchNorm_Conv16_3_b needs backward computation.\nI0818 13:44:37.371616 22726 net.cpp:226] Conv16_3_b needs backward computation.\nI0818 13:44:37.371623 22726 net.cpp:226] relu_bn_Conv16_3 needs backward computation.\nI0818 13:44:37.371628 22726 net.cpp:226] scale_Conv16_3 needs backward computation.\nI0818 13:44:37.371632 22726 net.cpp:226] batchNorm_Conv16_3 needs backward computation.\nI0818 13:44:37.371639 22726 net.cpp:226] Conv16_3 needs backward computation.\nI0818 13:44:37.371647 22726 net.cpp:226] sum_bn_Conv16_2_b_relu_sum_bn_Conv16_2_b_0_split needs backward computation.\nI0818 13:44:37.371654 22726 net.cpp:226] relu_sum_bn_Conv16_2_b needs backward computation.\nI0818 13:44:37.371659 22726 net.cpp:226] sum_sum_bn_Conv16_1_b needs backward computation.\nI0818 13:44:37.371666 22726 net.cpp:226] scale_Conv16_2_b needs backward computation.\nI0818 13:44:37.371671 22726 net.cpp:226] batchNorm_Conv16_2_b needs backward computation.\nI0818 13:44:37.371677 22726 net.cpp:226] Conv16_2_b needs backward computation.\nI0818 13:44:37.371682 22726 net.cpp:226] relu_bn_Conv16_2 needs backward computation.\nI0818 13:44:37.371688 22726 net.cpp:226] scale_Conv16_2 needs backward computation.\nI0818 13:44:37.371693 22726 net.cpp:226] batchNorm_Conv16_2 needs backward computation.\nI0818 13:44:37.371700 22726 net.cpp:226] Conv16_2 needs backward computation.\nI0818 13:44:37.371706 22726 net.cpp:226] sum_bn_Conv16_1_b_relu_sum_bn_Conv16_1_b_0_split needs backward computation.\nI0818 13:44:37.371711 22726 net.cpp:226] relu_sum_bn_Conv16_1_b needs backward computation.\nI0818 13:44:37.371722 22726 net.cpp:226] sum_bn_conv needs backward computation.\nI0818 13:44:37.371729 22726 net.cpp:226] scale_Conv16_1_b needs backward computation.\nI0818 13:44:37.371736 22726 net.cpp:226] batchNorm_Conv16_1_b needs backward computation.\nI0818 13:44:37.371742 22726 net.cpp:226] Conv16_1_b needs backward computation.\nI0818 13:44:37.371747 22726 net.cpp:226] relu_bn_Conv16_1 needs backward computation.\nI0818 13:44:37.371752 22726 net.cpp:226] scale_Conv16_1 needs backward computation.\nI0818 13:44:37.371757 22726 net.cpp:226] batchNorm_Conv16_1 needs backward computation.\nI0818 13:44:37.371763 22726 net.cpp:226] Conv16_1 needs backward computation.\nI0818 13:44:37.371768 22726 net.cpp:226] bn_conv_relu_bn_conv_0_split needs backward computation.\nI0818 13:44:37.371774 22726 net.cpp:226] relu_bn_conv needs backward computation.\nI0818 13:44:37.371779 22726 net.cpp:226] scale_conv needs backward computation.\nI0818 13:44:37.371784 22726 net.cpp:226] batchNorm_conv needs backward computation.\nI0818 13:44:37.371790 22726 net.cpp:226] conv needs backward computation.\nI0818 13:44:37.371798 22726 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 13:44:37.371803 22726 net.cpp:228] dataLayer does not need backward computation.\nI0818 13:44:37.371812 22726 net.cpp:270] This network produces output accuracy\nI0818 13:44:37.371819 22726 net.cpp:270] This network produces output loss\nI0818 13:44:37.372169 22726 net.cpp:283] Network initialization done.\nI0818 13:44:37.372987 22726 solver.cpp:60] Solver scaffolding done.\nI0818 13:44:37.597445 22726 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0818 13:44:37.952386 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:37.952440 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:37.959321 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:38.188859 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk32_b from root net\nI0818 13:44:38.188949 22726 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer zeros_sum_bn_resblk32_b\nI0818 13:44:38.223711 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk64_b from root net\nI0818 13:44:38.223793 22726 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer zeros_sum_bn_resblk64_b\nI0818 13:44:38.667359 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:38.667433 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:38.675403 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:38.916868 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk32_b from root net\nI0818 13:44:38.917009 22726 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer zeros_sum_bn_resblk32_b\nI0818 13:44:38.968875 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk64_b from root net\nI0818 13:44:38.969010 22726 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer zeros_sum_bn_resblk64_b\nI0818 13:44:39.489924 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:39.489982 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:39.498920 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:39.764749 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk32_b from root net\nI0818 13:44:39.764884 22726 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer zeros_sum_bn_resblk32_b\nI0818 13:44:39.836328 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk64_b from root net\nI0818 13:44:39.836455 22726 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer zeros_sum_bn_resblk64_b\nI0818 13:44:39.921068 22726 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0818 13:44:40.400476 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:40.400547 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:40.410877 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:40.696154 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk32_b from root net\nI0818 13:44:40.696352 22726 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer zeros_sum_bn_resblk32_b\nI0818 13:44:40.788728 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk64_b from root net\nI0818 13:44:40.788910 22726 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer zeros_sum_bn_resblk64_b\nI0818 13:44:41.437413 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:41.437486 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:41.447942 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:41.768164 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk32_b from root net\nI0818 13:44:41.768389 22726 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer zeros_sum_bn_resblk32_b\nI0818 13:44:41.881996 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk64_b from root net\nI0818 13:44:41.882212 22726 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer zeros_sum_bn_resblk64_b\nI0818 13:44:42.598006 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:42.598074 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:42.609498 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:42.947286 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk32_b from root net\nI0818 13:44:42.947540 22726 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer zeros_sum_bn_resblk32_b\nI0818 13:44:43.080673 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk64_b from root net\nI0818 13:44:43.080914 22726 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer zeros_sum_bn_resblk64_b\nI0818 13:44:43.868228 22726 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:43.868283 22726 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:43.880553 22726 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:43.929406 22753 blocking_queue.cpp:50] Waiting for data\nI0818 13:44:43.986196 22753 blocking_queue.cpp:50] Waiting for data\nI0818 13:44:44.326462 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk32_b from root net\nI0818 13:44:44.326748 22726 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer zeros_sum_bn_resblk32_b\nI0818 13:44:44.479307 22726 net.cpp:93] Sharing layer zeros_sum_bn_resblk64_b from root net\nI0818 13:44:44.479574 22726 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer zeros_sum_bn_resblk64_b\nI0818 13:44:44.652153 22726 parallel.cpp:425] Starting Optimization\nI0818 13:44:44.654618 22726 solver.cpp:279] Solving Cifar-Resnet\nI0818 13:44:44.654635 22726 solver.cpp:280] Learning Rate Policy: triangular\nI0818 13:44:44.659473 22726 solver.cpp:337] Iteration 0, Testing net (#0)\nI0818 13:46:08.790943 22726 solver.cpp:404]     Test net output #0: accuracy = 0.0998\nI0818 13:46:08.791191 22726 solver.cpp:404]     Test net output #1: loss = 4.96549 (* 1 = 4.96549 loss)\nI0818 13:46:12.731458 22726 solver.cpp:228] Iteration 0, loss = 5.51851\nI0818 13:46:12.731499 22726 solver.cpp:244]     Train net output #0: accuracy = 0.08\nI0818 13:46:12.731516 22726 solver.cpp:244]     Train net output #1: loss = 5.51851 (* 1 = 5.51851 loss)\nI0818 13:46:12.809986 22726 sgd_solver.cpp:166] Iteration 0, lr = 0\nI0818 13:48:30.557323 22726 solver.cpp:337] Iteration 100, Testing net (#0)\nI0818 13:49:55.096010 22726 solver.cpp:404]     Test net output #0: accuracy = 0.35028\nI0818 13:49:55.096271 22726 solver.cpp:404]     Test net output #1: loss = 1.74396 (* 1 = 1.74396 loss)\nI0818 13:49:56.424774 22726 solver.cpp:228] Iteration 100, loss = 1.65155\nI0818 13:49:56.424825 22726 solver.cpp:244]     Train net output #0: accuracy = 0.384\nI0818 13:49:56.424844 22726 solver.cpp:244]     Train net output #1: loss = 1.65155 (* 1 = 1.65155 loss)\nI0818 13:49:56.506356 22726 sgd_solver.cpp:166] Iteration 100, lr = 0.00250006\nI0818 13:52:14.061103 22726 solver.cpp:337] Iteration 200, Testing net (#0)\nI0818 13:53:38.577736 22726 solver.cpp:404]     Test net output #0: accuracy = 0.46284\nI0818 13:53:38.578001 22726 solver.cpp:404]     Test net output #1: loss = 1.45916 (* 1 = 1.45916 loss)\nI0818 13:53:39.906522 22726 solver.cpp:228] Iteration 200, loss = 1.40548\nI0818 13:53:39.906558 22726 solver.cpp:244]     Train net output #0: accuracy = 0.48\nI0818 13:53:39.906572 22726 solver.cpp:244]     Train net output #1: loss = 1.40548 (* 1 = 1.40548 loss)\nI0818 13:53:39.994278 22726 sgd_solver.cpp:166] Iteration 200, lr = 0.005\nI0818 13:55:57.496255 22726 solver.cpp:337] Iteration 300, Testing net (#0)\nI0818 13:57:22.011245 22726 solver.cpp:404]     Test net output #0: accuracy = 0.52364\nI0818 13:57:22.011498 22726 solver.cpp:404]     Test net output #1: loss = 1.29551 (* 1 = 1.29551 loss)\nI0818 13:57:23.339665 22726 solver.cpp:228] Iteration 300, loss = 1.24159\nI0818 13:57:23.339700 22726 solver.cpp:244]     Train net output #0: accuracy = 0.584\nI0818 13:57:23.339716 22726 solver.cpp:244]     Train net output #1: loss = 1.24159 (* 1 = 1.24159 loss)\nI0818 13:57:23.418503 22726 sgd_solver.cpp:166] Iteration 300, lr = 0.00750005\nI0818 13:59:40.934136 22726 solver.cpp:337] Iteration 400, Testing net (#0)\nI0818 14:01:05.444715 22726 solver.cpp:404]     Test net output #0: accuracy = 0.57968\nI0818 14:01:05.444958 22726 solver.cpp:404]     Test net output #1: loss = 1.15349 (* 1 = 1.15349 loss)\nI0818 14:01:06.773221 22726 solver.cpp:228] Iteration 400, loss = 1.07444\nI0818 14:01:06.773255 22726 solver.cpp:244]     Train net output #0: accuracy = 0.632\nI0818 14:01:06.773270 22726 solver.cpp:244]     Train net output #1: loss = 1.07444 (* 1 = 1.07444 loss)\nI0818 14:01:06.850921 22726 sgd_solver.cpp:166] Iteration 400, lr = 0.00999999\nI0818 14:03:24.392068 22726 solver.cpp:337] Iteration 500, Testing net (#0)\nI0818 14:04:48.892432 22726 solver.cpp:404]     Test net output #0: accuracy = 0.63532\nI0818 14:04:48.892688 22726 solver.cpp:404]     Test net output #1: loss = 1.01956 (* 1 = 1.01956 loss)\nI0818 14:04:50.220181 22726 solver.cpp:228] Iteration 500, loss = 0.865725\nI0818 14:04:50.220216 22726 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0818 14:04:50.220232 22726 solver.cpp:244]     Train net output #1: loss = 0.865725 (* 1 = 0.865725 loss)\nI0818 14:04:50.306844 22726 sgd_solver.cpp:166] Iteration 500, lr = 0.0125\nI0818 14:07:07.860662 22726 solver.cpp:337] Iteration 600, Testing net (#0)\nI0818 14:08:32.364720 22726 solver.cpp:404]     Test net output #0: accuracy = 0.6592\nI0818 14:08:32.364969 22726 solver.cpp:404]     Test net output #1: loss = 0.95046 (* 1 = 0.95046 loss)\nI0818 14:08:33.693845 22726 solver.cpp:228] Iteration 600, loss = 0.813147\nI0818 14:08:33.693891 22726 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 14:08:33.693907 22726 solver.cpp:244]     Train net output #1: loss = 0.813147 (* 1 = 0.813147 loss)\nI0818 14:08:33.776310 22726 sgd_solver.cpp:166] Iteration 600, lr = 0.015\nI0818 14:10:51.335870 22726 solver.cpp:337] Iteration 700, Testing net (#0)\nI0818 14:12:15.841979 22726 solver.cpp:404]     Test net output #0: accuracy = 0.68156\nI0818 14:12:15.842233 22726 solver.cpp:404]     Test net output #1: loss = 0.899195 (* 1 = 0.899195 loss)\nI0818 14:12:17.170238 22726 solver.cpp:228] Iteration 700, loss = 0.841192\nI0818 14:12:17.170279 22726 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0818 14:12:17.170295 22726 solver.cpp:244]     Train net output #1: loss = 0.841192 (* 1 = 0.841192 loss)\nI0818 14:12:17.251514 22726 sgd_solver.cpp:166] Iteration 700, lr = 0.0175\nI0818 14:14:34.822834 22726 solver.cpp:337] Iteration 800, Testing net (#0)\nI0818 14:15:59.330718 22726 solver.cpp:404]     Test net output #0: accuracy = 0.70076\nI0818 14:15:59.330972 22726 solver.cpp:404]     Test net output #1: loss = 0.836304 (* 1 = 0.836304 loss)\nI0818 14:16:00.659649 22726 solver.cpp:228] Iteration 800, loss = 0.707116\nI0818 14:16:00.659683 22726 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0818 14:16:00.659698 22726 solver.cpp:244]     Train net output #1: loss = 0.707116 (* 1 = 0.707116 loss)\nI0818 14:16:00.748286 22726 sgd_solver.cpp:166] Iteration 800, lr = 0.02\nI0818 14:18:18.383612 22726 solver.cpp:337] Iteration 900, Testing net (#0)\nI0818 14:19:42.894716 22726 solver.cpp:404]     Test net output #0: accuracy = 0.72892\nI0818 14:19:42.894973 22726 solver.cpp:404]     Test net output #1: loss = 0.785649 (* 1 = 0.785649 loss)\nI0818 14:19:44.223569 22726 solver.cpp:228] Iteration 900, loss = 0.623757\nI0818 14:19:44.223613 22726 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0818 14:19:44.223628 22726 solver.cpp:244]     Train net output #1: loss = 0.623757 (* 1 = 0.623757 loss)\nI0818 14:19:44.303577 22726 sgd_solver.cpp:166] Iteration 900, lr = 0.0225\nI0818 14:22:01.940580 22726 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0818 14:23:26.450624 22726 solver.cpp:404]     Test net output #0: accuracy = 0.74656\nI0818 14:23:26.450886 22726 solver.cpp:404]     Test net output #1: loss = 0.745317 (* 1 = 0.745317 loss)\nI0818 14:23:27.782691 22726 solver.cpp:228] Iteration 1000, loss = 0.575806\nI0818 14:23:27.782722 22726 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0818 14:23:27.782737 22726 solver.cpp:244]     Train net output #1: loss = 0.575806 (* 1 = 0.575806 loss)\nI0818 14:23:27.865317 22726 sgd_solver.cpp:166] Iteration 1000, lr = 0.025\nI0818 14:25:45.634035 22726 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0818 14:27:10.142861 22726 solver.cpp:404]     Test net output #0: accuracy = 0.7494\nI0818 14:27:10.143120 22726 solver.cpp:404]     Test net output #1: loss = 0.73552 (* 1 = 0.73552 loss)\nI0818 14:27:11.475072 22726 solver.cpp:228] Iteration 1100, loss = 0.532521\nI0818 14:27:11.475116 22726 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0818 14:27:11.475131 22726 solver.cpp:244]     Train net output #1: loss = 0.532521 (* 1 = 0.532521 loss)\nI0818 14:27:11.549281 22726 sgd_solver.cpp:166] Iteration 1100, lr = 0.0275\nI0818 14:29:29.486335 22726 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0818 14:30:53.987459 22726 solver.cpp:404]     Test net output #0: accuracy = 0.75732\nI0818 14:30:53.987712 22726 solver.cpp:404]     Test net output #1: loss = 0.729956 (* 1 = 0.729956 loss)\nI0818 14:30:55.319921 22726 solver.cpp:228] Iteration 1200, loss = 0.465514\nI0818 14:30:55.319953 22726 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0818 14:30:55.319968 22726 solver.cpp:244]     Train net output #1: loss = 0.465514 (* 1 = 0.465514 loss)\nI0818 14:30:55.401332 22726 sgd_solver.cpp:166] Iteration 1200, lr = 0.03\nI0818 14:33:13.299944 22726 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0818 14:34:37.812235 22726 solver.cpp:404]     Test net output #0: accuracy = 0.76512\nI0818 14:34:37.812495 22726 solver.cpp:404]     Test net output #1: loss = 0.713318 (* 1 = 0.713318 loss)\nI0818 14:34:39.144080 22726 solver.cpp:228] Iteration 1300, loss = 0.456116\nI0818 14:34:39.144114 22726 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 14:34:39.144127 22726 solver.cpp:244]     Train net output #1: loss = 0.456116 (* 1 = 0.456116 loss)\nI0818 14:34:39.223194 22726 sgd_solver.cpp:166] Iteration 1300, lr = 0.0325\nI0818 14:41:44.423815 22726 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0818 14:43:08.227960 22726 solver.cpp:404]     Test net output #0: accuracy = 0.77068\nI0818 14:43:08.230291 22726 solver.cpp:404]     Test net output #1: loss = 0.702152 (* 1 = 0.702152 loss)\nI0818 14:43:09.559574 22726 solver.cpp:228] Iteration 1400, loss = 0.43131\nI0818 14:43:09.559607 22726 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 14:43:09.559622 22726 solver.cpp:244]     Train net output #1: loss = 0.43131 (* 1 = 0.43131 loss)\nI0818 14:43:09.639812 22726 sgd_solver.cpp:166] Iteration 1400, lr = 0.035\nI0818 14:45:39.555318 22726 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0818 14:47:04.036499 22726 solver.cpp:404]     Test net output #0: accuracy = 0.78136\nI0818 14:47:04.036821 22726 solver.cpp:404]     Test net output #1: loss = 0.683945 (* 1 = 0.683945 loss)\nI0818 14:47:05.365108 22726 solver.cpp:228] Iteration 1500, loss = 0.433026\nI0818 14:47:05.365149 22726 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0818 14:47:05.365170 22726 solver.cpp:244]     Train net output #1: loss = 0.433026 (* 1 = 0.433026 loss)\nI0818 14:47:05.447065 22726 sgd_solver.cpp:166] Iteration 1500, lr = 0.0375\nI0818 14:49:22.993253 22726 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0818 14:50:47.473757 22726 solver.cpp:404]     Test net output #0: accuracy = 0.77032\nI0818 14:50:47.474020 22726 solver.cpp:404]     Test net output #1: loss = 0.730083 (* 1 = 0.730083 loss)\nI0818 14:50:48.802135 22726 solver.cpp:228] Iteration 1600, loss = 0.378678\nI0818 14:50:48.802184 22726 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0818 14:50:48.802199 22726 solver.cpp:244]     Train net output #1: loss = 0.378678 (* 1 = 0.378678 loss)\nI0818 14:50:48.883519 22726 sgd_solver.cpp:166] Iteration 1600, lr = 0.04\nI0818 14:53:06.436419 22726 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0818 14:54:30.907603 22726 solver.cpp:404]     Test net output #0: accuracy = 0.78112\nI0818 14:54:30.907840 22726 solver.cpp:404]     Test net output #1: loss = 0.706602 (* 1 = 0.706602 loss)\nI0818 14:54:32.235533 22726 solver.cpp:228] Iteration 1700, loss = 0.376541\nI0818 14:54:32.235577 22726 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 14:54:32.235594 22726 solver.cpp:244]     Train net output #1: loss = 0.376541 (* 1 = 0.376541 loss)\nI0818 14:54:32.316201 22726 sgd_solver.cpp:166] Iteration 1700, lr = 0.0425\nI0818 14:56:49.862103 22726 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0818 14:58:14.331892 22726 solver.cpp:404]     Test net output #0: accuracy = 0.78912\nI0818 14:58:14.332154 22726 solver.cpp:404]     Test net output #1: loss = 0.693799 (* 1 = 0.693799 loss)\nI0818 14:58:15.659595 22726 solver.cpp:228] Iteration 1800, loss = 0.416455\nI0818 14:58:15.659628 22726 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 14:58:15.659644 22726 solver.cpp:244]     Train net output #1: loss = 0.416455 (* 1 = 0.416455 loss)\nI0818 14:58:15.741722 22726 sgd_solver.cpp:166] Iteration 1800, lr = 0.045\nI0818 15:00:33.271752 22726 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0818 15:01:57.744223 22726 solver.cpp:404]     Test net output #0: accuracy = 0.79168\nI0818 15:01:57.744472 22726 solver.cpp:404]     Test net output #1: loss = 0.682016 (* 1 = 0.682016 loss)\nI0818 15:01:59.071837 22726 solver.cpp:228] Iteration 1900, loss = 0.276872\nI0818 15:01:59.071877 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 15:01:59.071892 22726 solver.cpp:244]     Train net output #1: loss = 0.276872 (* 1 = 0.276872 loss)\nI0818 15:01:59.156028 22726 sgd_solver.cpp:166] Iteration 1900, lr = 0.0475\nI0818 15:04:16.794026 22726 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0818 15:05:41.266757 22726 solver.cpp:404]     Test net output #0: accuracy = 0.7892\nI0818 15:05:41.267026 22726 solver.cpp:404]     Test net output #1: loss = 0.715744 (* 1 = 0.715744 loss)\nI0818 15:05:42.594614 22726 solver.cpp:228] Iteration 2000, loss = 0.327962\nI0818 15:05:42.594647 22726 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 15:05:42.594663 22726 solver.cpp:244]     Train net output #1: loss = 0.327962 (* 1 = 0.327962 loss)\nI0818 15:05:42.682404 22726 sgd_solver.cpp:166] Iteration 2000, lr = 0.05\nI0818 15:08:00.289165 22726 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0818 15:09:24.761693 22726 solver.cpp:404]     Test net output #0: accuracy = 0.787\nI0818 15:09:24.761960 22726 solver.cpp:404]     Test net output #1: loss = 0.723028 (* 1 = 0.723028 loss)\nI0818 15:09:26.089346 22726 solver.cpp:228] Iteration 2100, loss = 0.35675\nI0818 15:09:26.089380 22726 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 15:09:26.089395 22726 solver.cpp:244]     Train net output #1: loss = 0.35675 (* 1 = 0.35675 loss)\nI0818 15:09:26.172641 22726 sgd_solver.cpp:166] Iteration 2100, lr = 0.0525\nI0818 15:11:43.728991 22726 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0818 15:13:08.213763 22726 solver.cpp:404]     Test net output #0: accuracy = 0.79768\nI0818 15:13:08.214032 22726 solver.cpp:404]     Test net output #1: loss = 0.683653 (* 1 = 0.683653 loss)\nI0818 15:13:09.541682 22726 solver.cpp:228] Iteration 2200, loss = 0.295536\nI0818 15:13:09.541716 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 15:13:09.541731 22726 solver.cpp:244]     Train net output #1: loss = 0.295536 (* 1 = 0.295536 loss)\nI0818 15:13:09.622472 22726 sgd_solver.cpp:166] Iteration 2200, lr = 0.0549999\nI0818 15:15:27.169965 22726 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0818 15:16:51.677114 22726 solver.cpp:404]     Test net output #0: accuracy = 0.78264\nI0818 15:16:51.677373 22726 solver.cpp:404]     Test net output #1: loss = 0.775648 (* 1 = 0.775648 loss)\nI0818 15:16:53.004616 22726 solver.cpp:228] Iteration 2300, loss = 0.37084\nI0818 15:16:53.004649 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:16:53.004665 22726 solver.cpp:244]     Train net output #1: loss = 0.37084 (* 1 = 0.37084 loss)\nI0818 15:16:53.084322 22726 sgd_solver.cpp:166] Iteration 2300, lr = 0.0575\nI0818 15:19:10.738797 22726 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0818 15:20:35.247333 22726 solver.cpp:404]     Test net output #0: accuracy = 0.79708\nI0818 15:20:35.247579 22726 solver.cpp:404]     Test net output #1: loss = 0.718263 (* 1 = 0.718263 loss)\nI0818 15:20:36.575888 22726 solver.cpp:228] Iteration 2400, loss = 0.264313\nI0818 15:20:36.575933 22726 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 15:20:36.575949 22726 solver.cpp:244]     Train net output #1: loss = 0.264313 (* 1 = 0.264313 loss)\nI0818 15:20:36.657171 22726 sgd_solver.cpp:166] Iteration 2400, lr = 0.0599999\nI0818 15:22:54.269399 22726 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0818 15:24:18.770242 22726 solver.cpp:404]     Test net output #0: accuracy = 0.7992\nI0818 15:24:18.770503 22726 solver.cpp:404]     Test net output #1: loss = 0.714189 (* 1 = 0.714189 loss)\nI0818 15:24:20.098539 22726 solver.cpp:228] Iteration 2500, loss = 0.322719\nI0818 15:24:20.098574 22726 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0818 15:24:20.098588 22726 solver.cpp:244]     Train net output #1: loss = 0.322719 (* 1 = 0.322719 loss)\nI0818 15:24:20.184275 22726 sgd_solver.cpp:166] Iteration 2500, lr = 0.0625\nI0818 15:26:37.859017 22726 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0818 15:28:02.358302 22726 solver.cpp:404]     Test net output #0: accuracy = 0.79772\nI0818 15:28:02.358549 22726 solver.cpp:404]     Test net output #1: loss = 0.721512 (* 1 = 0.721512 loss)\nI0818 15:28:03.686050 22726 solver.cpp:228] Iteration 2600, loss = 0.251873\nI0818 15:28:03.686086 22726 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 15:28:03.686101 22726 solver.cpp:244]     Train net output #1: loss = 0.251873 (* 1 = 0.251873 loss)\nI0818 15:28:03.776222 22726 sgd_solver.cpp:166] Iteration 2600, lr = 0.0650001\nI0818 15:30:21.442044 22726 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0818 15:31:45.944401 22726 solver.cpp:404]     Test net output #0: accuracy = 0.80188\nI0818 15:31:45.944651 22726 solver.cpp:404]     Test net output #1: loss = 0.727128 (* 1 = 0.727128 loss)\nI0818 15:31:47.272006 22726 solver.cpp:228] Iteration 2700, loss = 0.247028\nI0818 15:31:47.272050 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 15:31:47.272066 22726 solver.cpp:244]     Train net output #1: loss = 0.247029 (* 1 = 0.247029 loss)\nI0818 15:31:47.359074 22726 sgd_solver.cpp:166] Iteration 2700, lr = 0.0675\nI0818 15:34:04.976080 22726 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0818 15:35:29.469241 22726 solver.cpp:404]     Test net output #0: accuracy = 0.80948\nI0818 15:35:29.469483 22726 solver.cpp:404]     Test net output #1: loss = 0.723757 (* 1 = 0.723757 loss)\nI0818 15:35:30.796881 22726 solver.cpp:228] Iteration 2800, loss = 0.184658\nI0818 15:35:30.796916 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 15:35:30.796931 22726 solver.cpp:244]     Train net output #1: loss = 0.184658 (* 1 = 0.184658 loss)\nI0818 15:35:30.877235 22726 sgd_solver.cpp:166] Iteration 2800, lr = 0.0700001\nI0818 15:37:48.553230 22726 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0818 15:39:13.051182 22726 solver.cpp:404]     Test net output #0: accuracy = 0.81116\nI0818 15:39:13.051439 22726 solver.cpp:404]     Test net output #1: loss = 0.698704 (* 1 = 0.698704 loss)\nI0818 15:39:14.378615 22726 solver.cpp:228] Iteration 2900, loss = 0.192543\nI0818 15:39:14.378650 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:39:14.378664 22726 solver.cpp:244]     Train net output #1: loss = 0.192543 (* 1 = 0.192543 loss)\nI0818 15:39:14.460185 22726 sgd_solver.cpp:166] Iteration 2900, lr = 0.0725\nI0818 15:41:32.067927 22726 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0818 15:42:56.574775 22726 solver.cpp:404]     Test net output #0: accuracy = 0.81272\nI0818 15:42:56.575050 22726 solver.cpp:404]     Test net output #1: loss = 0.732422 (* 1 = 0.732422 loss)\nI0818 15:42:57.902175 22726 solver.cpp:228] Iteration 3000, loss = 0.140338\nI0818 15:42:57.902217 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 15:42:57.902232 22726 solver.cpp:244]     Train net output #1: loss = 0.140338 (* 1 = 0.140338 loss)\nI0818 15:42:57.988375 22726 sgd_solver.cpp:166] Iteration 3000, lr = 0.075\nI0818 15:45:15.553076 22726 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0818 15:46:40.050143 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82\nI0818 15:46:40.050390 22726 solver.cpp:404]     Test net output #1: loss = 0.679755 (* 1 = 0.679755 loss)\nI0818 15:46:41.377849 22726 solver.cpp:228] Iteration 3100, loss = 0.135679\nI0818 15:46:41.377882 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 15:46:41.377897 22726 solver.cpp:244]     Train net output #1: loss = 0.135679 (* 1 = 0.135679 loss)\nI0818 15:46:41.464198 22726 sgd_solver.cpp:166] Iteration 3100, lr = 0.0775\nI0818 15:48:58.949457 22726 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0818 15:50:23.455474 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82324\nI0818 15:50:23.455737 22726 solver.cpp:404]     Test net output #1: loss = 0.700799 (* 1 = 0.700799 loss)\nI0818 15:50:24.782732 22726 solver.cpp:228] Iteration 3200, loss = 0.135123\nI0818 15:50:24.782773 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:50:24.782788 22726 solver.cpp:244]     Train net output #1: loss = 0.135124 (* 1 = 0.135124 loss)\nI0818 15:50:24.866549 22726 sgd_solver.cpp:166] Iteration 3200, lr = 0.08\nI0818 15:52:42.416275 22726 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0818 15:54:06.927924 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82192\nI0818 15:54:06.928171 22726 solver.cpp:404]     Test net output #1: loss = 0.736382 (* 1 = 0.736382 loss)\nI0818 15:54:08.255947 22726 solver.cpp:228] Iteration 3300, loss = 0.0893335\nI0818 15:54:08.255981 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:54:08.255996 22726 solver.cpp:244]     Train net output #1: loss = 0.0893336 (* 1 = 0.0893336 loss)\nI0818 15:54:08.340454 22726 sgd_solver.cpp:166] Iteration 3300, lr = 0.0825\nI0818 15:56:25.887794 22726 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0818 15:57:50.395982 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82028\nI0818 15:57:50.396242 22726 solver.cpp:404]     Test net output #1: loss = 0.747995 (* 1 = 0.747995 loss)\nI0818 15:57:51.724390 22726 solver.cpp:228] Iteration 3400, loss = 0.110271\nI0818 15:57:51.724426 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:57:51.724440 22726 solver.cpp:244]     Train net output #1: loss = 0.110271 (* 1 = 0.110271 loss)\nI0818 15:57:51.804664 22726 sgd_solver.cpp:166] Iteration 3400, lr = 0.085\nI0818 16:00:09.460642 22726 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0818 16:01:33.970741 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82096\nI0818 16:01:33.971014 22726 solver.cpp:404]     Test net output #1: loss = 0.725386 (* 1 = 0.725386 loss)\nI0818 16:01:35.298913 22726 solver.cpp:228] Iteration 3500, loss = 0.0538424\nI0818 16:01:35.298950 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:01:35.298965 22726 solver.cpp:244]     Train net output #1: loss = 0.0538424 (* 1 = 0.0538424 loss)\nI0818 16:01:35.378144 22726 sgd_solver.cpp:166] Iteration 3500, lr = 0.0875\nI0818 16:03:52.936884 22726 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0818 16:05:17.440961 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82172\nI0818 16:05:17.441205 22726 solver.cpp:404]     Test net output #1: loss = 0.730629 (* 1 = 0.730629 loss)\nI0818 16:05:18.768350 22726 solver.cpp:228] Iteration 3600, loss = 0.0504085\nI0818 16:05:18.768398 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:05:18.768414 22726 solver.cpp:244]     Train net output #1: loss = 0.0504086 (* 1 = 0.0504086 loss)\nI0818 16:05:18.864267 22726 sgd_solver.cpp:166] Iteration 3600, lr = 0.09\nI0818 16:07:36.205479 22726 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0818 16:09:00.707937 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82388\nI0818 16:09:00.708200 22726 solver.cpp:404]     Test net output #1: loss = 0.725918 (* 1 = 0.725918 loss)\nI0818 16:09:02.036036 22726 solver.cpp:228] Iteration 3700, loss = 0.162736\nI0818 16:09:02.036070 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:09:02.036085 22726 solver.cpp:244]     Train net output #1: loss = 0.162736 (* 1 = 0.162736 loss)\nI0818 16:09:02.121783 22726 sgd_solver.cpp:166] Iteration 3700, lr = 0.0925\nI0818 16:11:19.414536 22726 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0818 16:12:43.922075 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82648\nI0818 16:12:43.922343 22726 solver.cpp:404]     Test net output #1: loss = 0.744189 (* 1 = 0.744189 loss)\nI0818 16:12:45.249701 22726 solver.cpp:228] Iteration 3800, loss = 0.132122\nI0818 16:12:45.249737 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:12:45.249752 22726 solver.cpp:244]     Train net output #1: loss = 0.132122 (* 1 = 0.132122 loss)\nI0818 16:12:45.326038 22726 sgd_solver.cpp:166] Iteration 3800, lr = 0.095\nI0818 16:15:02.522843 22726 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0818 16:16:27.034060 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82136\nI0818 16:16:27.034327 22726 solver.cpp:404]     Test net output #1: loss = 0.732456 (* 1 = 0.732456 loss)\nI0818 16:16:28.362041 22726 solver.cpp:228] Iteration 3900, loss = 0.194469\nI0818 16:16:28.362077 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:16:28.362092 22726 solver.cpp:244]     Train net output #1: loss = 0.194469 (* 1 = 0.194469 loss)\nI0818 16:16:28.446336 22726 sgd_solver.cpp:166] Iteration 3900, lr = 0.0975\nI0818 16:18:45.672639 22726 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0818 16:20:10.188191 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82296\nI0818 16:20:10.188427 22726 solver.cpp:404]     Test net output #1: loss = 0.735436 (* 1 = 0.735436 loss)\nI0818 16:20:11.515575 22726 solver.cpp:228] Iteration 4000, loss = 0.123937\nI0818 16:20:11.515611 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:20:11.515627 22726 solver.cpp:244]     Train net output #1: loss = 0.123937 (* 1 = 0.123937 loss)\nI0818 16:20:11.599563 22726 sgd_solver.cpp:166] Iteration 4000, lr = 0.1\nI0818 16:22:28.953392 22726 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0818 16:23:53.461030 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83048\nI0818 16:23:53.461282 22726 solver.cpp:404]     Test net output #1: loss = 0.732219 (* 1 = 0.732219 loss)\nI0818 16:23:54.788532 22726 solver.cpp:228] Iteration 4100, loss = 0.128594\nI0818 16:23:54.788568 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:23:54.788583 22726 solver.cpp:244]     Train net output #1: loss = 0.128594 (* 1 = 0.128594 loss)\nI0818 16:23:54.871919 22726 sgd_solver.cpp:166] Iteration 4100, lr = 0.1025\nI0818 16:26:12.282579 22726 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0818 16:27:36.780169 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83008\nI0818 16:27:36.780434 22726 solver.cpp:404]     Test net output #1: loss = 0.744088 (* 1 = 0.744088 loss)\nI0818 16:27:38.107213 22726 solver.cpp:228] Iteration 4200, loss = 0.0464251\nI0818 16:27:38.107246 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:27:38.107261 22726 solver.cpp:244]     Train net output #1: loss = 0.0464251 (* 1 = 0.0464251 loss)\nI0818 16:27:38.190012 22726 sgd_solver.cpp:166] Iteration 4200, lr = 0.105\nI0818 16:29:55.426722 22726 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0818 16:31:19.936530 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83168\nI0818 16:31:19.936775 22726 solver.cpp:404]     Test net output #1: loss = 0.71768 (* 1 = 0.71768 loss)\nI0818 16:31:21.264497 22726 solver.cpp:228] Iteration 4300, loss = 0.081548\nI0818 16:31:21.264539 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:31:21.264564 22726 solver.cpp:244]     Train net output #1: loss = 0.0815481 (* 1 = 0.0815481 loss)\nI0818 16:31:21.354318 22726 sgd_solver.cpp:166] Iteration 4300, lr = 0.1075\nI0818 16:33:38.618350 22726 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0818 16:35:03.136111 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83176\nI0818 16:35:03.136373 22726 solver.cpp:404]     Test net output #1: loss = 0.725194 (* 1 = 0.725194 loss)\nI0818 16:35:04.464025 22726 solver.cpp:228] Iteration 4400, loss = 0.0925924\nI0818 16:35:04.464061 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:35:04.464076 22726 solver.cpp:244]     Train net output #1: loss = 0.0925924 (* 1 = 0.0925924 loss)\nI0818 16:35:04.543695 22726 sgd_solver.cpp:166] Iteration 4400, lr = 0.11\nI0818 16:37:21.827334 22726 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0818 16:38:46.330525 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83344\nI0818 16:38:46.330785 22726 solver.cpp:404]     Test net output #1: loss = 0.728585 (* 1 = 0.728585 loss)\nI0818 16:38:47.657896 22726 solver.cpp:228] Iteration 4500, loss = 0.101803\nI0818 16:38:47.657932 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:38:47.657948 22726 solver.cpp:244]     Train net output #1: loss = 0.101803 (* 1 = 0.101803 loss)\nI0818 16:38:47.738497 22726 sgd_solver.cpp:166] Iteration 4500, lr = 0.1125\nI0818 16:41:05.002769 22726 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0818 16:42:29.505033 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83544\nI0818 16:42:29.505300 22726 solver.cpp:404]     Test net output #1: loss = 0.730989 (* 1 = 0.730989 loss)\nI0818 16:42:30.832492 22726 solver.cpp:228] Iteration 4600, loss = 0.0661556\nI0818 16:42:30.832525 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:42:30.832540 22726 solver.cpp:244]     Train net output #1: loss = 0.0661557 (* 1 = 0.0661557 loss)\nI0818 16:42:30.917814 22726 sgd_solver.cpp:166] Iteration 4600, lr = 0.115\nI0818 16:44:48.153477 22726 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0818 16:46:12.650143 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82848\nI0818 16:46:12.650420 22726 solver.cpp:404]     Test net output #1: loss = 0.744325 (* 1 = 0.744325 loss)\nI0818 16:46:13.977531 22726 solver.cpp:228] Iteration 4700, loss = 0.0388921\nI0818 16:46:13.977563 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:46:13.977578 22726 solver.cpp:244]     Train net output #1: loss = 0.0388922 (* 1 = 0.0388922 loss)\nI0818 16:46:14.058544 22726 sgd_solver.cpp:166] Iteration 4700, lr = 0.1175\nI0818 16:48:31.347892 22726 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0818 16:49:55.855008 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83344\nI0818 16:49:55.855276 22726 solver.cpp:404]     Test net output #1: loss = 0.74917 (* 1 = 0.74917 loss)\nI0818 16:49:57.183059 22726 solver.cpp:228] Iteration 4800, loss = 0.0934206\nI0818 16:49:57.183091 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:49:57.183106 22726 solver.cpp:244]     Train net output #1: loss = 0.0934207 (* 1 = 0.0934207 loss)\nI0818 16:49:57.268280 22726 sgd_solver.cpp:166] Iteration 4800, lr = 0.12\nI0818 16:52:14.547785 22726 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0818 16:53:39.049548 22726 solver.cpp:404]     Test net output #0: accuracy = 0.82584\nI0818 16:53:39.049814 22726 solver.cpp:404]     Test net output #1: loss = 0.774598 (* 1 = 0.774598 loss)\nI0818 16:53:40.377630 22726 solver.cpp:228] Iteration 4900, loss = 0.0251254\nI0818 16:53:40.377662 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:53:40.377677 22726 solver.cpp:244]     Train net output #1: loss = 0.0251255 (* 1 = 0.0251255 loss)\nI0818 16:53:40.454710 22726 sgd_solver.cpp:166] Iteration 4900, lr = 0.1225\nI0818 16:55:57.683336 22726 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0818 16:57:22.178345 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83304\nI0818 16:57:22.178586 22726 solver.cpp:404]     Test net output #1: loss = 0.747655 (* 1 = 0.747655 loss)\nI0818 16:57:23.505553 22726 solver.cpp:228] Iteration 5000, loss = 0.126295\nI0818 16:57:23.505585 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:57:23.505600 22726 solver.cpp:244]     Train net output #1: loss = 0.126295 (* 1 = 0.126295 loss)\nI0818 16:57:23.588685 22726 sgd_solver.cpp:166] Iteration 5000, lr = 0.125\nI0818 16:59:40.809481 22726 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0818 17:01:05.279641 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8236\nI0818 17:01:05.279896 22726 solver.cpp:404]     Test net output #1: loss = 0.743301 (* 1 = 0.743301 loss)\nI0818 17:01:06.607203 22726 solver.cpp:228] Iteration 5100, loss = 0.043186\nI0818 17:01:06.607235 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:01:06.607250 22726 solver.cpp:244]     Train net output #1: loss = 0.0431861 (* 1 = 0.0431861 loss)\nI0818 17:01:06.692243 22726 sgd_solver.cpp:166] Iteration 5100, lr = 0.1275\nI0818 17:03:23.971024 22726 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0818 17:04:48.446350 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83244\nI0818 17:04:48.446604 22726 solver.cpp:404]     Test net output #1: loss = 0.760406 (* 1 = 0.760406 loss)\nI0818 17:04:49.774183 22726 solver.cpp:228] Iteration 5200, loss = 0.110369\nI0818 17:04:49.774215 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:04:49.774231 22726 solver.cpp:244]     Train net output #1: loss = 0.110369 (* 1 = 0.110369 loss)\nI0818 17:04:49.856029 22726 sgd_solver.cpp:166] Iteration 5200, lr = 0.13\nI0818 17:07:07.131268 22726 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0818 17:08:31.609187 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83704\nI0818 17:08:31.609450 22726 solver.cpp:404]     Test net output #1: loss = 0.733375 (* 1 = 0.733375 loss)\nI0818 17:08:32.937209 22726 solver.cpp:228] Iteration 5300, loss = 0.0696552\nI0818 17:08:32.937243 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:08:32.937258 22726 solver.cpp:244]     Train net output #1: loss = 0.0696552 (* 1 = 0.0696552 loss)\nI0818 17:08:33.018323 22726 sgd_solver.cpp:166] Iteration 5300, lr = 0.1325\nI0818 17:10:50.281419 22726 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0818 17:12:14.767659 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83332\nI0818 17:12:14.767932 22726 solver.cpp:404]     Test net output #1: loss = 0.769743 (* 1 = 0.769743 loss)\nI0818 17:12:16.095551 22726 solver.cpp:228] Iteration 5400, loss = 0.0889991\nI0818 17:12:16.095585 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:12:16.095600 22726 solver.cpp:244]     Train net output #1: loss = 0.0889992 (* 1 = 0.0889992 loss)\nI0818 17:12:16.176405 22726 sgd_solver.cpp:166] Iteration 5400, lr = 0.135\nI0818 17:14:33.402259 22726 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0818 17:15:57.889559 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83236\nI0818 17:15:57.889808 22726 solver.cpp:404]     Test net output #1: loss = 0.752743 (* 1 = 0.752743 loss)\nI0818 17:15:59.217170 22726 solver.cpp:228] Iteration 5500, loss = 0.0505173\nI0818 17:15:59.217206 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:15:59.217221 22726 solver.cpp:244]     Train net output #1: loss = 0.0505173 (* 1 = 0.0505173 loss)\nI0818 17:15:59.302292 22726 sgd_solver.cpp:166] Iteration 5500, lr = 0.1375\nI0818 17:18:16.675343 22726 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0818 17:19:41.151553 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83608\nI0818 17:19:41.151815 22726 solver.cpp:404]     Test net output #1: loss = 0.732527 (* 1 = 0.732527 loss)\nI0818 17:19:42.479455 22726 solver.cpp:228] Iteration 5600, loss = 0.0518501\nI0818 17:19:42.479511 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:19:42.479529 22726 solver.cpp:244]     Train net output #1: loss = 0.0518502 (* 1 = 0.0518502 loss)\nI0818 17:19:42.565871 22726 sgd_solver.cpp:166] Iteration 5600, lr = 0.14\nI0818 17:22:00.299021 22726 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0818 17:23:25.373019 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83708\nI0818 17:23:25.373267 22726 solver.cpp:404]     Test net output #1: loss = 0.758962 (* 1 = 0.758962 loss)\nI0818 17:23:26.705314 22726 solver.cpp:228] Iteration 5700, loss = 0.0605293\nI0818 17:23:26.705371 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:23:26.705394 22726 solver.cpp:244]     Train net output #1: loss = 0.0605293 (* 1 = 0.0605293 loss)\nI0818 17:23:26.779352 22726 sgd_solver.cpp:166] Iteration 5700, lr = 0.1425\nI0818 17:25:44.714668 22726 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0818 17:27:09.884402 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83396\nI0818 17:27:09.884615 22726 solver.cpp:404]     Test net output #1: loss = 0.76575 (* 1 = 0.76575 loss)\nI0818 17:27:11.214926 22726 solver.cpp:228] Iteration 5800, loss = 0.075576\nI0818 17:27:11.214977 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:27:11.214999 22726 solver.cpp:244]     Train net output #1: loss = 0.075576 (* 1 = 0.075576 loss)\nI0818 17:27:11.296946 22726 sgd_solver.cpp:166] Iteration 5800, lr = 0.145\nI0818 17:29:29.420439 22726 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0818 17:30:54.479041 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84304\nI0818 17:30:54.479257 22726 solver.cpp:404]     Test net output #1: loss = 0.723728 (* 1 = 0.723728 loss)\nI0818 17:30:55.810905 22726 solver.cpp:228] Iteration 5900, loss = 0.0438968\nI0818 17:30:55.810956 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:30:55.810979 22726 solver.cpp:244]     Train net output #1: loss = 0.0438969 (* 1 = 0.0438969 loss)\nI0818 17:30:55.892340 22726 sgd_solver.cpp:166] Iteration 5900, lr = 0.1475\nI0818 17:33:14.045341 22726 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0818 17:34:39.064522 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI0818 17:34:39.064776 22726 solver.cpp:404]     Test net output #1: loss = 0.759479 (* 1 = 0.759479 loss)\nI0818 17:34:40.396309 22726 solver.cpp:228] Iteration 6000, loss = 0.0176671\nI0818 17:34:40.396365 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 17:34:40.396381 22726 solver.cpp:244]     Train net output #1: loss = 0.0176671 (* 1 = 0.0176671 loss)\nI0818 17:34:40.478618 22726 sgd_solver.cpp:166] Iteration 6000, lr = 0.15\nI0818 17:36:58.632060 22726 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0818 17:38:23.549490 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83436\nI0818 17:38:23.549731 22726 solver.cpp:404]     Test net output #1: loss = 0.769781 (* 1 = 0.769781 loss)\nI0818 17:38:24.881227 22726 solver.cpp:228] Iteration 6100, loss = 0.0912595\nI0818 17:38:24.881270 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:38:24.881290 22726 solver.cpp:244]     Train net output #1: loss = 0.0912595 (* 1 = 0.0912595 loss)\nI0818 17:38:24.959374 22726 sgd_solver.cpp:166] Iteration 6100, lr = 0.1525\nI0818 17:40:42.967525 22726 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0818 17:42:08.147724 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83616\nI0818 17:42:08.147999 22726 solver.cpp:404]     Test net output #1: loss = 0.760957 (* 1 = 0.760957 loss)\nI0818 17:42:09.479714 22726 solver.cpp:228] Iteration 6200, loss = 0.109271\nI0818 17:42:09.479763 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:42:09.479785 22726 solver.cpp:244]     Train net output #1: loss = 0.109271 (* 1 = 0.109271 loss)\nI0818 17:42:09.557953 22726 sgd_solver.cpp:166] Iteration 6200, lr = 0.155\nI0818 17:44:27.556921 22726 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0818 17:45:52.728344 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84016\nI0818 17:45:52.728598 22726 solver.cpp:404]     Test net output #1: loss = 0.708762 (* 1 = 0.708762 loss)\nI0818 17:45:54.059062 22726 solver.cpp:228] Iteration 6300, loss = 0.0224413\nI0818 17:45:54.059108 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:45:54.059130 22726 solver.cpp:244]     Train net output #1: loss = 0.0224414 (* 1 = 0.0224414 loss)\nI0818 17:45:54.139698 22726 sgd_solver.cpp:166] Iteration 6300, lr = 0.1575\nI0818 17:48:12.147606 22726 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0818 17:49:37.331274 22726 solver.cpp:404]     Test net output #0: accuracy = 0.846\nI0818 17:49:37.331528 22726 solver.cpp:404]     Test net output #1: loss = 0.71715 (* 1 = 0.71715 loss)\nI0818 17:49:38.663532 22726 solver.cpp:228] Iteration 6400, loss = 0.0496525\nI0818 17:49:38.663578 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:49:38.663600 22726 solver.cpp:244]     Train net output #1: loss = 0.0496526 (* 1 = 0.0496526 loss)\nI0818 17:49:38.743216 22726 sgd_solver.cpp:166] Iteration 6400, lr = 0.16\nI0818 17:51:56.866817 22726 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0818 17:53:22.063324 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83532\nI0818 17:53:22.063591 22726 solver.cpp:404]     Test net output #1: loss = 0.751844 (* 1 = 0.751844 loss)\nI0818 17:53:23.393600 22726 solver.cpp:228] Iteration 6500, loss = 0.0879318\nI0818 17:53:23.393647 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:53:23.393671 22726 solver.cpp:244]     Train net output #1: loss = 0.0879318 (* 1 = 0.0879318 loss)\nI0818 17:53:23.480469 22726 sgd_solver.cpp:166] Iteration 6500, lr = 0.1625\nI0818 17:55:41.580214 22726 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0818 17:57:06.704402 22726 solver.cpp:404]     Test net output #0: accuracy = 0.83992\nI0818 17:57:06.704634 22726 solver.cpp:404]     Test net output #1: loss = 0.751893 (* 1 = 0.751893 loss)\nI0818 17:57:08.036309 22726 solver.cpp:228] Iteration 6600, loss = 0.0814455\nI0818 17:57:08.036355 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:57:08.036378 22726 solver.cpp:244]     Train net output #1: loss = 0.0814456 (* 1 = 0.0814456 loss)\nI0818 17:57:08.118108 22726 sgd_solver.cpp:166] Iteration 6600, lr = 0.165\nI0818 17:59:26.202986 22726 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0818 18:00:51.098157 22726 solver.cpp:404]     Test net output #0: accuracy = 0.840521\nI0818 18:00:51.098384 22726 solver.cpp:404]     Test net output #1: loss = 0.727555 (* 1 = 0.727555 loss)\nI0818 18:00:52.429585 22726 solver.cpp:228] Iteration 6700, loss = 0.0737519\nI0818 18:00:52.429633 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:00:52.429656 22726 solver.cpp:244]     Train net output #1: loss = 0.073752 (* 1 = 0.073752 loss)\nI0818 18:00:52.512773 22726 sgd_solver.cpp:166] Iteration 6700, lr = 0.1675\nI0818 18:03:10.530063 22726 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0818 18:04:35.397002 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8496\nI0818 18:04:35.397266 22726 solver.cpp:404]     Test net output #1: loss = 0.705462 (* 1 = 0.705462 loss)\nI0818 18:04:36.728186 22726 solver.cpp:228] Iteration 6800, loss = 0.0617215\nI0818 18:04:36.728233 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:04:36.728256 22726 solver.cpp:244]     Train net output #1: loss = 0.0617215 (* 1 = 0.0617215 loss)\nI0818 18:04:36.810154 22726 sgd_solver.cpp:166] Iteration 6800, lr = 0.17\nI0818 18:06:54.919566 22726 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0818 18:08:19.885370 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84612\nI0818 18:08:19.885620 22726 solver.cpp:404]     Test net output #1: loss = 0.684281 (* 1 = 0.684281 loss)\nI0818 18:08:21.216878 22726 solver.cpp:228] Iteration 6900, loss = 0.0217699\nI0818 18:08:21.216924 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:08:21.216953 22726 solver.cpp:244]     Train net output #1: loss = 0.02177 (* 1 = 0.02177 loss)\nI0818 18:08:21.294780 22726 sgd_solver.cpp:166] Iteration 6900, lr = 0.1725\nI0818 18:10:39.356871 22726 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0818 18:12:04.506824 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84712\nI0818 18:12:04.507103 22726 solver.cpp:404]     Test net output #1: loss = 0.709323 (* 1 = 0.709323 loss)\nI0818 18:12:05.838762 22726 solver.cpp:228] Iteration 7000, loss = 0.0212007\nI0818 18:12:05.838807 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:12:05.838830 22726 solver.cpp:244]     Train net output #1: loss = 0.0212008 (* 1 = 0.0212008 loss)\nI0818 18:12:05.924917 22726 sgd_solver.cpp:166] Iteration 7000, lr = 0.175\nI0818 18:14:24.006451 22726 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0818 18:15:49.154940 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84536\nI0818 18:15:49.155161 22726 solver.cpp:404]     Test net output #1: loss = 0.706349 (* 1 = 0.706349 loss)\nI0818 18:15:50.486261 22726 solver.cpp:228] Iteration 7100, loss = 0.0331489\nI0818 18:15:50.486306 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:15:50.486322 22726 solver.cpp:244]     Train net output #1: loss = 0.033149 (* 1 = 0.033149 loss)\nI0818 18:15:50.569794 22726 sgd_solver.cpp:166] Iteration 7100, lr = 0.1775\nI0818 18:18:08.698842 22726 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0818 18:19:33.858561 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8432\nI0818 18:19:33.858805 22726 solver.cpp:404]     Test net output #1: loss = 0.691206 (* 1 = 0.691206 loss)\nI0818 18:19:35.189172 22726 solver.cpp:228] Iteration 7200, loss = 0.0592955\nI0818 18:19:35.189218 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:19:35.189239 22726 solver.cpp:244]     Train net output #1: loss = 0.0592956 (* 1 = 0.0592956 loss)\nI0818 18:19:35.270110 22726 sgd_solver.cpp:166] Iteration 7200, lr = 0.18\nI0818 18:21:53.397348 22726 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0818 18:23:18.572837 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84456\nI0818 18:23:18.573078 22726 solver.cpp:404]     Test net output #1: loss = 0.713217 (* 1 = 0.713217 loss)\nI0818 18:23:19.904335 22726 solver.cpp:228] Iteration 7300, loss = 0.0245717\nI0818 18:23:19.904382 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:23:19.904398 22726 solver.cpp:244]     Train net output #1: loss = 0.0245718 (* 1 = 0.0245718 loss)\nI0818 18:23:19.986244 22726 sgd_solver.cpp:166] Iteration 7300, lr = 0.1825\nI0818 18:25:38.040313 22726 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0818 18:27:03.226361 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84692\nI0818 18:27:03.226609 22726 solver.cpp:404]     Test net output #1: loss = 0.702599 (* 1 = 0.702599 loss)\nI0818 18:27:04.558135 22726 solver.cpp:228] Iteration 7400, loss = 0.072273\nI0818 18:27:04.558178 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:27:04.558194 22726 solver.cpp:244]     Train net output #1: loss = 0.0722731 (* 1 = 0.0722731 loss)\nI0818 18:27:04.637418 22726 sgd_solver.cpp:166] Iteration 7400, lr = 0.185\nI0818 18:29:22.749737 22726 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0818 18:30:47.938884 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8426\nI0818 18:30:47.939132 22726 solver.cpp:404]     Test net output #1: loss = 0.71965 (* 1 = 0.71965 loss)\nI0818 18:30:49.269187 22726 solver.cpp:228] Iteration 7500, loss = 0.101555\nI0818 18:30:49.269237 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:30:49.269254 22726 solver.cpp:244]     Train net output #1: loss = 0.101555 (* 1 = 0.101555 loss)\nI0818 18:30:49.351379 22726 sgd_solver.cpp:166] Iteration 7500, lr = 0.1875\nI0818 18:33:07.354632 22726 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0818 18:34:32.556358 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84736\nI0818 18:34:32.556587 22726 solver.cpp:404]     Test net output #1: loss = 0.705106 (* 1 = 0.705106 loss)\nI0818 18:34:33.887753 22726 solver.cpp:228] Iteration 7600, loss = 0.0349902\nI0818 18:34:33.887800 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:34:33.887822 22726 solver.cpp:244]     Train net output #1: loss = 0.0349903 (* 1 = 0.0349903 loss)\nI0818 18:34:33.967409 22726 sgd_solver.cpp:166] Iteration 7600, lr = 0.19\nI0818 18:36:52.043977 22726 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0818 18:38:17.253489 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8424\nI0818 18:38:17.253716 22726 solver.cpp:404]     Test net output #1: loss = 0.689503 (* 1 = 0.689503 loss)\nI0818 18:38:18.585551 22726 solver.cpp:228] Iteration 7700, loss = 0.106296\nI0818 18:38:18.585598 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:38:18.585620 22726 solver.cpp:244]     Train net output #1: loss = 0.106296 (* 1 = 0.106296 loss)\nI0818 18:38:18.669435 22726 sgd_solver.cpp:166] Iteration 7700, lr = 0.1925\nI0818 18:40:36.917291 22726 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0818 18:42:02.126731 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84584\nI0818 18:42:02.126960 22726 solver.cpp:404]     Test net output #1: loss = 0.694156 (* 1 = 0.694156 loss)\nI0818 18:42:03.457048 22726 solver.cpp:228] Iteration 7800, loss = 0.0988914\nI0818 18:42:03.457095 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:42:03.457118 22726 solver.cpp:244]     Train net output #1: loss = 0.0988915 (* 1 = 0.0988915 loss)\nI0818 18:42:03.535346 22726 sgd_solver.cpp:166] Iteration 7800, lr = 0.195\nI0818 18:44:21.659925 22726 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0818 18:45:46.868360 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84932\nI0818 18:45:46.868602 22726 solver.cpp:404]     Test net output #1: loss = 0.645691 (* 1 = 0.645691 loss)\nI0818 18:45:48.200240 22726 solver.cpp:228] Iteration 7900, loss = 0.0433437\nI0818 18:45:48.200287 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:45:48.200310 22726 solver.cpp:244]     Train net output #1: loss = 0.0433438 (* 1 = 0.0433438 loss)\nI0818 18:45:48.280771 22726 sgd_solver.cpp:166] Iteration 7900, lr = 0.1975\nI0818 18:48:06.431112 22726 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0818 18:49:31.606946 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84596\nI0818 18:49:31.607198 22726 solver.cpp:404]     Test net output #1: loss = 0.655478 (* 1 = 0.655478 loss)\nI0818 18:49:32.937307 22726 solver.cpp:228] Iteration 8000, loss = 0.0911139\nI0818 18:49:32.937353 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:49:32.937376 22726 solver.cpp:244]     Train net output #1: loss = 0.091114 (* 1 = 0.091114 loss)\nI0818 18:49:33.019453 22726 sgd_solver.cpp:166] Iteration 8000, lr = 0.2\nI0818 18:51:51.077689 22726 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0818 18:53:16.289762 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84656\nI0818 18:53:16.290024 22726 solver.cpp:404]     Test net output #1: loss = 0.68793 (* 1 = 0.68793 loss)\nI0818 18:53:17.620975 22726 solver.cpp:228] Iteration 8100, loss = 0.0861403\nI0818 18:53:17.621023 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:53:17.621047 22726 solver.cpp:244]     Train net output #1: loss = 0.0861404 (* 1 = 0.0861404 loss)\nI0818 18:53:17.706980 22726 sgd_solver.cpp:166] Iteration 8100, lr = 0.2025\nI0818 18:55:35.664276 22726 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0818 18:57:00.887733 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84128\nI0818 18:57:00.887995 22726 solver.cpp:404]     Test net output #1: loss = 0.682959 (* 1 = 0.682959 loss)\nI0818 18:57:02.218427 22726 solver.cpp:228] Iteration 8200, loss = 0.0265967\nI0818 18:57:02.218473 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:57:02.218495 22726 solver.cpp:244]     Train net output #1: loss = 0.0265968 (* 1 = 0.0265968 loss)\nI0818 18:57:02.305029 22726 sgd_solver.cpp:166] Iteration 8200, lr = 0.205\nI0818 18:59:20.276101 22726 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0818 19:00:45.496431 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85076\nI0818 19:00:45.496662 22726 solver.cpp:404]     Test net output #1: loss = 0.65669 (* 1 = 0.65669 loss)\nI0818 19:00:46.826750 22726 solver.cpp:228] Iteration 8300, loss = 0.098622\nI0818 19:00:46.826795 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:00:46.826818 22726 solver.cpp:244]     Train net output #1: loss = 0.0986221 (* 1 = 0.0986221 loss)\nI0818 19:00:46.922039 22726 sgd_solver.cpp:166] Iteration 8300, lr = 0.2075\nI0818 19:03:04.910773 22726 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0818 19:04:30.119549 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85048\nI0818 19:04:30.119799 22726 solver.cpp:404]     Test net output #1: loss = 0.672524 (* 1 = 0.672524 loss)\nI0818 19:04:31.450099 22726 solver.cpp:228] Iteration 8400, loss = 0.0374461\nI0818 19:04:31.450148 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:04:31.450171 22726 solver.cpp:244]     Train net output #1: loss = 0.0374462 (* 1 = 0.0374462 loss)\nI0818 19:04:31.539750 22726 sgd_solver.cpp:166] Iteration 8400, lr = 0.21\nI0818 19:06:49.573984 22726 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0818 19:08:14.714231 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84712\nI0818 19:08:14.714468 22726 solver.cpp:404]     Test net output #1: loss = 0.681361 (* 1 = 0.681361 loss)\nI0818 19:08:16.044888 22726 solver.cpp:228] Iteration 8500, loss = 0.0292511\nI0818 19:08:16.044936 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:08:16.044961 22726 solver.cpp:244]     Train net output #1: loss = 0.0292512 (* 1 = 0.0292512 loss)\nI0818 19:08:16.132333 22726 sgd_solver.cpp:166] Iteration 8500, lr = 0.2125\nI0818 19:10:34.110600 22726 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0818 19:11:59.331642 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85632\nI0818 19:11:59.331904 22726 solver.cpp:404]     Test net output #1: loss = 0.655543 (* 1 = 0.655543 loss)\nI0818 19:12:00.663034 22726 solver.cpp:228] Iteration 8600, loss = 0.045823\nI0818 19:12:00.663079 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:12:00.663101 22726 solver.cpp:244]     Train net output #1: loss = 0.0458231 (* 1 = 0.0458231 loss)\nI0818 19:12:00.744400 22726 sgd_solver.cpp:166] Iteration 8600, lr = 0.215\nI0818 19:14:18.674721 22726 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0818 19:15:43.784463 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8498\nI0818 19:15:43.784716 22726 solver.cpp:404]     Test net output #1: loss = 0.669559 (* 1 = 0.669559 loss)\nI0818 19:15:45.114868 22726 solver.cpp:228] Iteration 8700, loss = 0.0588202\nI0818 19:15:45.114913 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:15:45.114940 22726 solver.cpp:244]     Train net output #1: loss = 0.0588202 (* 1 = 0.0588202 loss)\nI0818 19:15:45.203021 22726 sgd_solver.cpp:166] Iteration 8700, lr = 0.2175\nI0818 19:18:03.317829 22726 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0818 19:19:28.559913 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85068\nI0818 19:19:28.560168 22726 solver.cpp:404]     Test net output #1: loss = 0.660987 (* 1 = 0.660987 loss)\nI0818 19:19:29.890306 22726 solver.cpp:228] Iteration 8800, loss = 0.161621\nI0818 19:19:29.890350 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 19:19:29.890367 22726 solver.cpp:244]     Train net output #1: loss = 0.161621 (* 1 = 0.161621 loss)\nI0818 19:19:29.979221 22726 sgd_solver.cpp:166] Iteration 8800, lr = 0.22\nI0818 19:21:47.950379 22726 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0818 19:23:13.178392 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85512\nI0818 19:23:13.178658 22726 solver.cpp:404]     Test net output #1: loss = 0.640297 (* 1 = 0.640297 loss)\nI0818 19:23:14.509940 22726 solver.cpp:228] Iteration 8900, loss = 0.0366035\nI0818 19:23:14.509987 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:23:14.510010 22726 solver.cpp:244]     Train net output #1: loss = 0.0366036 (* 1 = 0.0366036 loss)\nI0818 19:23:14.589468 22726 sgd_solver.cpp:166] Iteration 8900, lr = 0.2225\nI0818 19:25:32.518167 22726 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0818 19:26:57.606638 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85064\nI0818 19:26:57.606905 22726 solver.cpp:404]     Test net output #1: loss = 0.64411 (* 1 = 0.64411 loss)\nI0818 19:26:58.936908 22726 solver.cpp:228] Iteration 9000, loss = 0.0588317\nI0818 19:26:58.936957 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:26:58.936981 22726 solver.cpp:244]     Train net output #1: loss = 0.0588318 (* 1 = 0.0588318 loss)\nI0818 19:26:59.018061 22726 sgd_solver.cpp:166] Iteration 9000, lr = 0.225\nI0818 19:29:17.048014 22726 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0818 19:30:42.275517 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85056\nI0818 19:30:42.275753 22726 solver.cpp:404]     Test net output #1: loss = 0.643737 (* 1 = 0.643737 loss)\nI0818 19:30:43.606670 22726 solver.cpp:228] Iteration 9100, loss = 0.0488401\nI0818 19:30:43.606715 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:30:43.606739 22726 solver.cpp:244]     Train net output #1: loss = 0.0488401 (* 1 = 0.0488401 loss)\nI0818 19:30:43.698498 22726 sgd_solver.cpp:166] Iteration 9100, lr = 0.2275\nI0818 19:33:01.808413 22726 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0818 19:34:27.039768 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84868\nI0818 19:34:27.040001 22726 solver.cpp:404]     Test net output #1: loss = 0.67388 (* 1 = 0.67388 loss)\nI0818 19:34:28.370421 22726 solver.cpp:228] Iteration 9200, loss = 0.0197324\nI0818 19:34:28.370466 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:34:28.370489 22726 solver.cpp:244]     Train net output #1: loss = 0.0197324 (* 1 = 0.0197324 loss)\nI0818 19:34:28.459566 22726 sgd_solver.cpp:166] Iteration 9200, lr = 0.23\nI0818 19:36:46.465903 22726 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0818 19:38:11.675137 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85144\nI0818 19:38:11.675390 22726 solver.cpp:404]     Test net output #1: loss = 0.633219 (* 1 = 0.633219 loss)\nI0818 19:38:13.005251 22726 solver.cpp:228] Iteration 9300, loss = 0.0830665\nI0818 19:38:13.005296 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 19:38:13.005312 22726 solver.cpp:244]     Train net output #1: loss = 0.0830666 (* 1 = 0.0830666 loss)\nI0818 19:38:13.090884 22726 sgd_solver.cpp:166] Iteration 9300, lr = 0.2325\nI0818 19:40:31.082824 22726 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0818 19:41:56.244288 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85492\nI0818 19:41:56.244534 22726 solver.cpp:404]     Test net output #1: loss = 0.6388 (* 1 = 0.6388 loss)\nI0818 19:41:57.574854 22726 solver.cpp:228] Iteration 9400, loss = 0.0334792\nI0818 19:41:57.574897 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:41:57.574911 22726 solver.cpp:244]     Train net output #1: loss = 0.0334793 (* 1 = 0.0334793 loss)\nI0818 19:41:57.661435 22726 sgd_solver.cpp:166] Iteration 9400, lr = 0.235\nI0818 19:44:15.649163 22726 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0818 19:45:40.843580 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8474\nI0818 19:45:40.843816 22726 solver.cpp:404]     Test net output #1: loss = 0.656249 (* 1 = 0.656249 loss)\nI0818 19:45:42.173611 22726 solver.cpp:228] Iteration 9500, loss = 0.119957\nI0818 19:45:42.173655 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:45:42.173671 22726 solver.cpp:244]     Train net output #1: loss = 0.119957 (* 1 = 0.119957 loss)\nI0818 19:45:42.254011 22726 sgd_solver.cpp:166] Iteration 9500, lr = 0.2375\nI0818 19:48:00.306996 22726 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0818 19:49:25.500551 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85272\nI0818 19:49:25.500771 22726 solver.cpp:404]     Test net output #1: loss = 0.668104 (* 1 = 0.668104 loss)\nI0818 19:49:26.831041 22726 solver.cpp:228] Iteration 9600, loss = 0.0328171\nI0818 19:49:26.831085 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:49:26.831101 22726 solver.cpp:244]     Train net output #1: loss = 0.0328171 (* 1 = 0.0328171 loss)\nI0818 19:49:26.919994 22726 sgd_solver.cpp:166] Iteration 9600, lr = 0.24\nI0818 19:51:45.028527 22726 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0818 19:53:10.019242 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85184\nI0818 19:53:10.019461 22726 solver.cpp:404]     Test net output #1: loss = 0.645052 (* 1 = 0.645052 loss)\nI0818 19:53:11.349659 22726 solver.cpp:228] Iteration 9700, loss = 0.0131564\nI0818 19:53:11.349702 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:53:11.349717 22726 solver.cpp:244]     Train net output #1: loss = 0.0131564 (* 1 = 0.0131564 loss)\nI0818 19:53:11.431668 22726 sgd_solver.cpp:166] Iteration 9700, lr = 0.2425\nI0818 19:55:29.450274 22726 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0818 19:56:54.540925 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85128\nI0818 19:56:54.541188 22726 solver.cpp:404]     Test net output #1: loss = 0.659973 (* 1 = 0.659973 loss)\nI0818 19:56:55.870882 22726 solver.cpp:228] Iteration 9800, loss = 0.0493322\nI0818 19:56:55.870924 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:56:55.870939 22726 solver.cpp:244]     Train net output #1: loss = 0.0493323 (* 1 = 0.0493323 loss)\nI0818 19:56:55.958405 22726 sgd_solver.cpp:166] Iteration 9800, lr = 0.245\nI0818 19:59:14.097896 22726 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0818 20:00:39.152446 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85352\nI0818 20:00:39.152674 22726 solver.cpp:404]     Test net output #1: loss = 0.651346 (* 1 = 0.651346 loss)\nI0818 20:00:40.482733 22726 solver.cpp:228] Iteration 9900, loss = 0.13777\nI0818 20:00:40.482776 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:00:40.482792 22726 solver.cpp:244]     Train net output #1: loss = 0.13777 (* 1 = 0.13777 loss)\nI0818 20:00:40.570375 22726 sgd_solver.cpp:166] Iteration 9900, lr = 0.2475\nI0818 20:02:58.692926 22726 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0818 20:04:23.880759 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85384\nI0818 20:04:23.881006 22726 solver.cpp:404]     Test net output #1: loss = 0.646895 (* 1 = 0.646895 loss)\nI0818 20:04:25.211343 22726 solver.cpp:228] Iteration 10000, loss = 0.037176\nI0818 20:04:25.211385 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:04:25.211400 22726 solver.cpp:244]     Train net output #1: loss = 0.037176 (* 1 = 0.037176 loss)\nI0818 20:04:25.300983 22726 sgd_solver.cpp:166] Iteration 10000, lr = 0.25\nI0818 20:06:43.491082 22726 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0818 20:08:08.685355 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85176\nI0818 20:08:08.685581 22726 solver.cpp:404]     Test net output #1: loss = 0.633388 (* 1 = 0.633388 loss)\nI0818 20:08:10.015789 22726 solver.cpp:228] Iteration 10100, loss = 0.0279549\nI0818 20:08:10.015831 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:08:10.015846 22726 solver.cpp:244]     Train net output #1: loss = 0.0279549 (* 1 = 0.0279549 loss)\nI0818 20:08:10.102573 22726 sgd_solver.cpp:166] Iteration 10100, lr = 0.2525\nI0818 20:10:28.277125 22726 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0818 20:11:53.394943 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85744\nI0818 20:11:53.395203 22726 solver.cpp:404]     Test net output #1: loss = 0.645731 (* 1 = 0.645731 loss)\nI0818 20:11:54.725111 22726 solver.cpp:228] Iteration 10200, loss = 0.0333407\nI0818 20:11:54.725152 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:11:54.725167 22726 solver.cpp:244]     Train net output #1: loss = 0.0333406 (* 1 = 0.0333406 loss)\nI0818 20:11:54.806020 22726 sgd_solver.cpp:166] Iteration 10200, lr = 0.255\nI0818 20:14:12.976119 22726 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0818 20:15:38.033881 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86048\nI0818 20:15:38.034113 22726 solver.cpp:404]     Test net output #1: loss = 0.645328 (* 1 = 0.645328 loss)\nI0818 20:15:39.364596 22726 solver.cpp:228] Iteration 10300, loss = 0.0732604\nI0818 20:15:39.364639 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:15:39.364655 22726 solver.cpp:244]     Train net output #1: loss = 0.0732604 (* 1 = 0.0732604 loss)\nI0818 20:15:39.453217 22726 sgd_solver.cpp:166] Iteration 10300, lr = 0.2575\nI0818 20:17:57.572589 22726 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0818 20:19:22.769407 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85772\nI0818 20:19:22.769657 22726 solver.cpp:404]     Test net output #1: loss = 0.638992 (* 1 = 0.638992 loss)\nI0818 20:19:24.099987 22726 solver.cpp:228] Iteration 10400, loss = 0.0785984\nI0818 20:19:24.100028 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:19:24.100047 22726 solver.cpp:244]     Train net output #1: loss = 0.0785983 (* 1 = 0.0785983 loss)\nI0818 20:19:24.183234 22726 sgd_solver.cpp:166] Iteration 10400, lr = 0.26\nI0818 20:21:42.317054 22726 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0818 20:23:07.513962 22726 solver.cpp:404]     Test net output #0: accuracy = 0.857601\nI0818 20:23:07.514261 22726 solver.cpp:404]     Test net output #1: loss = 0.622076 (* 1 = 0.622076 loss)\nI0818 20:23:08.844647 22726 solver.cpp:228] Iteration 10500, loss = 0.172251\nI0818 20:23:08.844689 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:23:08.844704 22726 solver.cpp:244]     Train net output #1: loss = 0.172251 (* 1 = 0.172251 loss)\nI0818 20:23:08.926090 22726 sgd_solver.cpp:166] Iteration 10500, lr = 0.2625\nI0818 20:25:27.029492 22726 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0818 20:26:52.214741 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85064\nI0818 20:26:52.215003 22726 solver.cpp:404]     Test net output #1: loss = 0.65446 (* 1 = 0.65446 loss)\nI0818 20:26:53.545177 22726 solver.cpp:228] Iteration 10600, loss = 0.0385147\nI0818 20:26:53.545218 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:26:53.545233 22726 solver.cpp:244]     Train net output #1: loss = 0.0385147 (* 1 = 0.0385147 loss)\nI0818 20:26:53.629343 22726 sgd_solver.cpp:166] Iteration 10600, lr = 0.265\nI0818 20:29:11.704916 22726 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0818 20:30:36.890761 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85092\nI0818 20:30:36.891048 22726 solver.cpp:404]     Test net output #1: loss = 0.656244 (* 1 = 0.656244 loss)\nI0818 20:30:38.220769 22726 solver.cpp:228] Iteration 10700, loss = 0.126409\nI0818 20:30:38.220814 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:30:38.220830 22726 solver.cpp:244]     Train net output #1: loss = 0.126409 (* 1 = 0.126409 loss)\nI0818 20:30:38.302923 22726 sgd_solver.cpp:166] Iteration 10700, lr = 0.2675\nI0818 20:32:56.347484 22726 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0818 20:34:21.551213 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85724\nI0818 20:34:21.551437 22726 solver.cpp:404]     Test net output #1: loss = 0.632377 (* 1 = 0.632377 loss)\nI0818 20:34:22.881584 22726 solver.cpp:228] Iteration 10800, loss = 0.090954\nI0818 20:34:22.881628 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:34:22.881644 22726 solver.cpp:244]     Train net output #1: loss = 0.090954 (* 1 = 0.090954 loss)\nI0818 20:34:22.960676 22726 sgd_solver.cpp:166] Iteration 10800, lr = 0.27\nI0818 20:36:41.080157 22726 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0818 20:38:06.266187 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86184\nI0818 20:38:06.266396 22726 solver.cpp:404]     Test net output #1: loss = 0.596632 (* 1 = 0.596632 loss)\nI0818 20:38:07.596468 22726 solver.cpp:228] Iteration 10900, loss = 0.0322899\nI0818 20:38:07.596510 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:38:07.596526 22726 solver.cpp:244]     Train net output #1: loss = 0.0322898 (* 1 = 0.0322898 loss)\nI0818 20:38:07.680279 22726 sgd_solver.cpp:166] Iteration 10900, lr = 0.2725\nI0818 20:40:25.896540 22726 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0818 20:41:50.919436 22726 solver.cpp:404]     Test net output #0: accuracy = 0.858041\nI0818 20:41:50.919646 22726 solver.cpp:404]     Test net output #1: loss = 0.624215 (* 1 = 0.624215 loss)\nI0818 20:41:52.252976 22726 solver.cpp:228] Iteration 11000, loss = 0.0760239\nI0818 20:41:52.253021 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:41:52.253041 22726 solver.cpp:244]     Train net output #1: loss = 0.0760239 (* 1 = 0.0760239 loss)\nI0818 20:41:52.328632 22726 sgd_solver.cpp:166] Iteration 11000, lr = 0.275\nI0818 20:44:10.369981 22726 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0818 20:45:35.527627 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8608\nI0818 20:45:35.527851 22726 solver.cpp:404]     Test net output #1: loss = 0.622041 (* 1 = 0.622041 loss)\nI0818 20:45:36.861133 22726 solver.cpp:228] Iteration 11100, loss = 0.0520044\nI0818 20:45:36.861176 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:45:36.861192 22726 solver.cpp:244]     Train net output #1: loss = 0.0520043 (* 1 = 0.0520043 loss)\nI0818 20:45:36.939447 22726 sgd_solver.cpp:166] Iteration 11100, lr = 0.2775\nI0818 20:47:54.947015 22726 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0818 20:49:20.096272 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85448\nI0818 20:49:20.096495 22726 solver.cpp:404]     Test net output #1: loss = 0.619157 (* 1 = 0.619157 loss)\nI0818 20:49:21.429358 22726 solver.cpp:228] Iteration 11200, loss = 0.0394183\nI0818 20:49:21.429402 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:49:21.429419 22726 solver.cpp:244]     Train net output #1: loss = 0.0394182 (* 1 = 0.0394182 loss)\nI0818 20:49:21.508146 22726 sgd_solver.cpp:166] Iteration 11200, lr = 0.28\nI0818 20:51:39.632385 22726 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0818 20:53:04.635921 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8568\nI0818 20:53:04.636222 22726 solver.cpp:404]     Test net output #1: loss = 0.633431 (* 1 = 0.633431 loss)\nI0818 20:53:05.969820 22726 solver.cpp:228] Iteration 11300, loss = 0.0377337\nI0818 20:53:05.969866 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:53:05.969885 22726 solver.cpp:244]     Train net output #1: loss = 0.0377336 (* 1 = 0.0377336 loss)\nI0818 20:53:06.051174 22726 sgd_solver.cpp:166] Iteration 11300, lr = 0.2825\nI0818 20:55:24.220829 22726 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0818 20:56:49.402659 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86228\nI0818 20:56:49.402895 22726 solver.cpp:404]     Test net output #1: loss = 0.586749 (* 1 = 0.586749 loss)\nI0818 20:56:50.735976 22726 solver.cpp:228] Iteration 11400, loss = 0.0748914\nI0818 20:56:50.736021 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:56:50.736037 22726 solver.cpp:244]     Train net output #1: loss = 0.0748913 (* 1 = 0.0748913 loss)\nI0818 20:56:50.818527 22726 sgd_solver.cpp:166] Iteration 11400, lr = 0.285\nI0818 20:59:08.945103 22726 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0818 21:00:34.111260 22726 solver.cpp:404]     Test net output #0: accuracy = 0.84788\nI0818 21:00:34.111546 22726 solver.cpp:404]     Test net output #1: loss = 0.65163 (* 1 = 0.65163 loss)\nI0818 21:00:35.445282 22726 solver.cpp:228] Iteration 11500, loss = 0.0948463\nI0818 21:00:35.445325 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:00:35.445341 22726 solver.cpp:244]     Train net output #1: loss = 0.0948463 (* 1 = 0.0948463 loss)\nI0818 21:00:35.517163 22726 sgd_solver.cpp:166] Iteration 11500, lr = 0.2875\nI0818 21:02:53.545243 22726 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0818 21:04:18.725810 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86284\nI0818 21:04:18.726069 22726 solver.cpp:404]     Test net output #1: loss = 0.603287 (* 1 = 0.603287 loss)\nI0818 21:04:20.059257 22726 solver.cpp:228] Iteration 11600, loss = 0.0666989\nI0818 21:04:20.059300 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:04:20.059316 22726 solver.cpp:244]     Train net output #1: loss = 0.0666989 (* 1 = 0.0666989 loss)\nI0818 21:04:20.142490 22726 sgd_solver.cpp:166] Iteration 11600, lr = 0.29\nI0818 21:06:38.173735 22726 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0818 21:08:03.173233 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86172\nI0818 21:08:03.173486 22726 solver.cpp:404]     Test net output #1: loss = 0.613409 (* 1 = 0.613409 loss)\nI0818 21:08:04.506255 22726 solver.cpp:228] Iteration 11700, loss = 0.133054\nI0818 21:08:04.506299 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:08:04.506314 22726 solver.cpp:244]     Train net output #1: loss = 0.133054 (* 1 = 0.133054 loss)\nI0818 21:08:04.589857 22726 sgd_solver.cpp:166] Iteration 11700, lr = 0.2925\nI0818 21:10:22.664657 22726 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0818 21:11:47.804760 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85884\nI0818 21:11:47.804991 22726 solver.cpp:404]     Test net output #1: loss = 0.609471 (* 1 = 0.609471 loss)\nI0818 21:11:49.138684 22726 solver.cpp:228] Iteration 11800, loss = 0.0319363\nI0818 21:11:49.138725 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:11:49.138741 22726 solver.cpp:244]     Train net output #1: loss = 0.0319362 (* 1 = 0.0319362 loss)\nI0818 21:11:49.220726 22726 sgd_solver.cpp:166] Iteration 11800, lr = 0.295\nI0818 21:14:07.312683 22726 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0818 21:15:32.432297 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86112\nI0818 21:15:32.432531 22726 solver.cpp:404]     Test net output #1: loss = 0.619075 (* 1 = 0.619075 loss)\nI0818 21:15:33.765244 22726 solver.cpp:228] Iteration 11900, loss = 0.087336\nI0818 21:15:33.765285 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:15:33.765300 22726 solver.cpp:244]     Train net output #1: loss = 0.0873359 (* 1 = 0.0873359 loss)\nI0818 21:15:33.845034 22726 sgd_solver.cpp:166] Iteration 11900, lr = 0.2975\nI0818 21:17:51.889317 22726 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0818 21:19:17.027874 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86444\nI0818 21:19:17.028110 22726 solver.cpp:404]     Test net output #1: loss = 0.590575 (* 1 = 0.590575 loss)\nI0818 21:19:18.361502 22726 solver.cpp:228] Iteration 12000, loss = 0.043816\nI0818 21:19:18.361546 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:19:18.361562 22726 solver.cpp:244]     Train net output #1: loss = 0.0438159 (* 1 = 0.0438159 loss)\nI0818 21:19:18.441910 22726 sgd_solver.cpp:166] Iteration 12000, lr = 0.3\nI0818 21:21:36.617291 22726 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0818 21:23:01.804919 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8638\nI0818 21:23:01.805225 22726 solver.cpp:404]     Test net output #1: loss = 0.596991 (* 1 = 0.596991 loss)\nI0818 21:23:03.138304 22726 solver.cpp:228] Iteration 12100, loss = 0.040026\nI0818 21:23:03.138346 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:23:03.138361 22726 solver.cpp:244]     Train net output #1: loss = 0.0400259 (* 1 = 0.0400259 loss)\nI0818 21:23:03.217839 22726 sgd_solver.cpp:166] Iteration 12100, lr = 0.3025\nI0818 21:25:21.208008 22726 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0818 21:26:46.299057 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85816\nI0818 21:26:46.299291 22726 solver.cpp:404]     Test net output #1: loss = 0.631496 (* 1 = 0.631496 loss)\nI0818 21:26:47.632454 22726 solver.cpp:228] Iteration 12200, loss = 0.0652126\nI0818 21:26:47.632498 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:26:47.632513 22726 solver.cpp:244]     Train net output #1: loss = 0.0652125 (* 1 = 0.0652125 loss)\nI0818 21:26:47.718585 22726 sgd_solver.cpp:166] Iteration 12200, lr = 0.305\nI0818 21:29:05.753614 22726 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0818 21:30:30.921066 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8664\nI0818 21:30:30.921329 22726 solver.cpp:404]     Test net output #1: loss = 0.569227 (* 1 = 0.569227 loss)\nI0818 21:30:32.254845 22726 solver.cpp:228] Iteration 12300, loss = 0.0445013\nI0818 21:30:32.254889 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:30:32.254904 22726 solver.cpp:244]     Train net output #1: loss = 0.0445013 (* 1 = 0.0445013 loss)\nI0818 21:30:32.338582 22726 sgd_solver.cpp:166] Iteration 12300, lr = 0.3075\nI0818 21:32:50.404098 22726 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0818 21:34:15.560027 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86032\nI0818 21:34:15.560261 22726 solver.cpp:404]     Test net output #1: loss = 0.584553 (* 1 = 0.584553 loss)\nI0818 21:34:16.893366 22726 solver.cpp:228] Iteration 12400, loss = 0.0380621\nI0818 21:34:16.893409 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:34:16.893424 22726 solver.cpp:244]     Train net output #1: loss = 0.038062 (* 1 = 0.038062 loss)\nI0818 21:34:16.977377 22726 sgd_solver.cpp:166] Iteration 12400, lr = 0.31\nI0818 21:36:34.973660 22726 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0818 21:38:00.112280 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86424\nI0818 21:38:00.112512 22726 solver.cpp:404]     Test net output #1: loss = 0.574194 (* 1 = 0.574194 loss)\nI0818 21:38:01.445418 22726 solver.cpp:228] Iteration 12500, loss = 0.0459679\nI0818 21:38:01.445459 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:38:01.445474 22726 solver.cpp:244]     Train net output #1: loss = 0.0459679 (* 1 = 0.0459679 loss)\nI0818 21:38:01.520400 22726 sgd_solver.cpp:166] Iteration 12500, lr = 0.3125\nI0818 21:40:19.428238 22726 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0818 21:41:44.586627 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86336\nI0818 21:41:44.586850 22726 solver.cpp:404]     Test net output #1: loss = 0.596426 (* 1 = 0.596426 loss)\nI0818 21:41:45.919643 22726 solver.cpp:228] Iteration 12600, loss = 0.0723309\nI0818 21:41:45.919687 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:41:45.919701 22726 solver.cpp:244]     Train net output #1: loss = 0.0723309 (* 1 = 0.0723309 loss)\nI0818 21:41:46.003942 22726 sgd_solver.cpp:166] Iteration 12600, lr = 0.315\nI0818 21:44:04.048444 22726 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 21:45:29.206328 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86052\nI0818 21:45:29.206636 22726 solver.cpp:404]     Test net output #1: loss = 0.607887 (* 1 = 0.607887 loss)\nI0818 21:45:30.540956 22726 solver.cpp:228] Iteration 12700, loss = 0.112363\nI0818 21:45:30.540998 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:45:30.541014 22726 solver.cpp:244]     Train net output #1: loss = 0.112363 (* 1 = 0.112363 loss)\nI0818 21:45:30.621538 22726 sgd_solver.cpp:166] Iteration 12700, lr = 0.3175\nI0818 21:47:48.619830 22726 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 21:49:13.856678 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85528\nI0818 21:49:13.856976 22726 solver.cpp:404]     Test net output #1: loss = 0.61336 (* 1 = 0.61336 loss)\nI0818 21:49:15.191896 22726 solver.cpp:228] Iteration 12800, loss = 0.097828\nI0818 21:49:15.191936 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:49:15.191951 22726 solver.cpp:244]     Train net output #1: loss = 0.0978279 (* 1 = 0.0978279 loss)\nI0818 21:49:15.269177 22726 sgd_solver.cpp:166] Iteration 12800, lr = 0.32\nI0818 21:51:33.247167 22726 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 21:52:58.587256 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8618\nI0818 21:52:58.587577 22726 solver.cpp:404]     Test net output #1: loss = 0.563554 (* 1 = 0.563554 loss)\nI0818 21:52:59.920725 22726 solver.cpp:228] Iteration 12900, loss = 0.0972971\nI0818 21:52:59.920768 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:52:59.920784 22726 solver.cpp:244]     Train net output #1: loss = 0.097297 (* 1 = 0.097297 loss)\nI0818 21:53:00.001044 22726 sgd_solver.cpp:166] Iteration 12900, lr = 0.3225\nI0818 21:55:17.944594 22726 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 21:56:43.319188 22726 solver.cpp:404]     Test net output #0: accuracy = 0.861\nI0818 21:56:43.319516 22726 solver.cpp:404]     Test net output #1: loss = 0.585039 (* 1 = 0.585039 loss)\nI0818 21:56:44.653517 22726 solver.cpp:228] Iteration 13000, loss = 0.0948445\nI0818 21:56:44.653558 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:56:44.653573 22726 solver.cpp:244]     Train net output #1: loss = 0.0948444 (* 1 = 0.0948444 loss)\nI0818 21:56:44.728464 22726 sgd_solver.cpp:166] Iteration 13000, lr = 0.325\nI0818 21:59:02.722539 22726 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 22:00:28.099803 22726 solver.cpp:404]     Test net output #0: accuracy = 0.85808\nI0818 22:00:28.100111 22726 solver.cpp:404]     Test net output #1: loss = 0.578811 (* 1 = 0.578811 loss)\nI0818 22:00:29.433341 22726 solver.cpp:228] Iteration 13100, loss = 0.0583398\nI0818 22:00:29.433379 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:00:29.433395 22726 solver.cpp:244]     Train net output #1: loss = 0.0583397 (* 1 = 0.0583397 loss)\nI0818 22:00:29.509918 22726 sgd_solver.cpp:166] Iteration 13100, lr = 0.3275\nI0818 22:02:47.626504 22726 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 22:04:13.007009 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87272\nI0818 22:04:13.007333 22726 solver.cpp:404]     Test net output #1: loss = 0.536016 (* 1 = 0.536016 loss)\nI0818 22:04:14.340690 22726 solver.cpp:228] Iteration 13200, loss = 0.0427131\nI0818 22:04:14.340730 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:04:14.340746 22726 solver.cpp:244]     Train net output #1: loss = 0.042713 (* 1 = 0.042713 loss)\nI0818 22:04:14.417172 22726 sgd_solver.cpp:166] Iteration 13200, lr = 0.33\nI0818 22:06:32.405973 22726 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 22:07:57.733341 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86496\nI0818 22:07:57.733661 22726 solver.cpp:404]     Test net output #1: loss = 0.577857 (* 1 = 0.577857 loss)\nI0818 22:07:59.068184 22726 solver.cpp:228] Iteration 13300, loss = 0.0619659\nI0818 22:07:59.068233 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:07:59.068249 22726 solver.cpp:244]     Train net output #1: loss = 0.0619658 (* 1 = 0.0619658 loss)\nI0818 22:07:59.149515 22726 sgd_solver.cpp:166] Iteration 13300, lr = 0.3325\nI0818 22:10:17.330637 22726 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 22:11:42.680147 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86912\nI0818 22:11:42.680480 22726 solver.cpp:404]     Test net output #1: loss = 0.556845 (* 1 = 0.556845 loss)\nI0818 22:11:44.013687 22726 solver.cpp:228] Iteration 13400, loss = 0.0612258\nI0818 22:11:44.013726 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:11:44.013742 22726 solver.cpp:244]     Train net output #1: loss = 0.0612257 (* 1 = 0.0612257 loss)\nI0818 22:11:44.094700 22726 sgd_solver.cpp:166] Iteration 13400, lr = 0.335\nI0818 22:14:02.228660 22726 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 22:15:27.524184 22726 solver.cpp:404]     Test net output #0: accuracy = 0.861\nI0818 22:15:27.524489 22726 solver.cpp:404]     Test net output #1: loss = 0.58558 (* 1 = 0.58558 loss)\nI0818 22:15:28.859158 22726 solver.cpp:228] Iteration 13500, loss = 0.109838\nI0818 22:15:28.859200 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:15:28.859215 22726 solver.cpp:244]     Train net output #1: loss = 0.109838 (* 1 = 0.109838 loss)\nI0818 22:15:28.936559 22726 sgd_solver.cpp:166] Iteration 13500, lr = 0.3375\nI0818 22:17:46.975121 22726 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 22:19:12.209321 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86472\nI0818 22:19:12.209645 22726 solver.cpp:404]     Test net output #1: loss = 0.565424 (* 1 = 0.565424 loss)\nI0818 22:19:13.544739 22726 solver.cpp:228] Iteration 13600, loss = 0.0456946\nI0818 22:19:13.544778 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:19:13.544793 22726 solver.cpp:244]     Train net output #1: loss = 0.0456945 (* 1 = 0.0456945 loss)\nI0818 22:19:13.625166 22726 sgd_solver.cpp:166] Iteration 13600, lr = 0.34\nI0818 22:21:31.666604 22726 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 22:22:57.066439 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8652\nI0818 22:22:57.066737 22726 solver.cpp:404]     Test net output #1: loss = 0.567982 (* 1 = 0.567982 loss)\nI0818 22:22:58.400347 22726 solver.cpp:228] Iteration 13700, loss = 0.066685\nI0818 22:22:58.400389 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:22:58.400413 22726 solver.cpp:244]     Train net output #1: loss = 0.0666848 (* 1 = 0.0666848 loss)\nI0818 22:22:58.480311 22726 sgd_solver.cpp:166] Iteration 13700, lr = 0.3425\nI0818 22:25:16.573817 22726 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 22:26:41.833546 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8576\nI0818 22:26:41.833875 22726 solver.cpp:404]     Test net output #1: loss = 0.592569 (* 1 = 0.592569 loss)\nI0818 22:26:43.168470 22726 solver.cpp:228] Iteration 13800, loss = 0.137755\nI0818 22:26:43.168514 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 22:26:43.168535 22726 solver.cpp:244]     Train net output #1: loss = 0.137755 (* 1 = 0.137755 loss)\nI0818 22:26:43.247022 22726 sgd_solver.cpp:166] Iteration 13800, lr = 0.345\nI0818 22:29:01.289880 22726 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 22:30:26.536327 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86552\nI0818 22:30:26.536638 22726 solver.cpp:404]     Test net output #1: loss = 0.572654 (* 1 = 0.572654 loss)\nI0818 22:30:27.871275 22726 solver.cpp:228] Iteration 13900, loss = 0.110724\nI0818 22:30:27.871316 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:30:27.871340 22726 solver.cpp:244]     Train net output #1: loss = 0.110724 (* 1 = 0.110724 loss)\nI0818 22:30:27.955162 22726 sgd_solver.cpp:166] Iteration 13900, lr = 0.3475\nI0818 22:32:46.198329 22726 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 22:34:11.448262 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86748\nI0818 22:34:11.448597 22726 solver.cpp:404]     Test net output #1: loss = 0.55649 (* 1 = 0.55649 loss)\nI0818 22:34:12.783031 22726 solver.cpp:228] Iteration 14000, loss = 0.0349211\nI0818 22:34:12.783072 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:34:12.783095 22726 solver.cpp:244]     Train net output #1: loss = 0.034921 (* 1 = 0.034921 loss)\nI0818 22:34:12.862308 22726 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 22:36:30.946352 22726 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 22:37:56.186966 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8662\nI0818 22:37:56.187294 22726 solver.cpp:404]     Test net output #1: loss = 0.552344 (* 1 = 0.552344 loss)\nI0818 22:37:57.521602 22726 solver.cpp:228] Iteration 14100, loss = 0.0675692\nI0818 22:37:57.521644 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:37:57.521667 22726 solver.cpp:244]     Train net output #1: loss = 0.0675691 (* 1 = 0.0675691 loss)\nI0818 22:37:57.596423 22726 sgd_solver.cpp:166] Iteration 14100, lr = 0.3525\nI0818 22:40:15.625530 22726 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 22:41:40.860647 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8678\nI0818 22:41:40.860966 22726 solver.cpp:404]     Test net output #1: loss = 0.560255 (* 1 = 0.560255 loss)\nI0818 22:41:42.195677 22726 solver.cpp:228] Iteration 14200, loss = 0.0596975\nI0818 22:41:42.195720 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:41:42.195735 22726 solver.cpp:244]     Train net output #1: loss = 0.0596974 (* 1 = 0.0596974 loss)\nI0818 22:41:42.277523 22726 sgd_solver.cpp:166] Iteration 14200, lr = 0.355\nI0818 22:44:00.446722 22726 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 22:45:25.732324 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86444\nI0818 22:45:25.732628 22726 solver.cpp:404]     Test net output #1: loss = 0.572741 (* 1 = 0.572741 loss)\nI0818 22:45:27.065784 22726 solver.cpp:228] Iteration 14300, loss = 0.061841\nI0818 22:45:27.065826 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:45:27.065850 22726 solver.cpp:244]     Train net output #1: loss = 0.0618409 (* 1 = 0.0618409 loss)\nI0818 22:45:27.143729 22726 sgd_solver.cpp:166] Iteration 14300, lr = 0.3575\nI0818 22:47:45.158377 22726 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 22:49:10.396535 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8612\nI0818 22:49:10.396836 22726 solver.cpp:404]     Test net output #1: loss = 0.590298 (* 1 = 0.590298 loss)\nI0818 22:49:11.730559 22726 solver.cpp:228] Iteration 14400, loss = 0.0536143\nI0818 22:49:11.730603 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:49:11.730626 22726 solver.cpp:244]     Train net output #1: loss = 0.0536141 (* 1 = 0.0536141 loss)\nI0818 22:49:11.812271 22726 sgd_solver.cpp:166] Iteration 14400, lr = 0.36\nI0818 22:51:29.947104 22726 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 22:52:55.194689 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86388\nI0818 22:52:55.195011 22726 solver.cpp:404]     Test net output #1: loss = 0.580256 (* 1 = 0.580256 loss)\nI0818 22:52:56.527987 22726 solver.cpp:228] Iteration 14500, loss = 0.136246\nI0818 22:52:56.528034 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:52:56.528055 22726 solver.cpp:244]     Train net output #1: loss = 0.136246 (* 1 = 0.136246 loss)\nI0818 22:52:56.609251 22726 sgd_solver.cpp:166] Iteration 14500, lr = 0.3625\nI0818 22:55:14.571894 22726 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 22:56:39.835914 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86696\nI0818 22:56:39.836221 22726 solver.cpp:404]     Test net output #1: loss = 0.567143 (* 1 = 0.567143 loss)\nI0818 22:56:41.169643 22726 solver.cpp:228] Iteration 14600, loss = 0.0770639\nI0818 22:56:41.169688 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:56:41.169710 22726 solver.cpp:244]     Train net output #1: loss = 0.0770638 (* 1 = 0.0770638 loss)\nI0818 22:56:41.246868 22726 sgd_solver.cpp:166] Iteration 14600, lr = 0.365\nI0818 22:58:59.222813 22726 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 23:00:24.459857 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86884\nI0818 23:00:24.460175 22726 solver.cpp:404]     Test net output #1: loss = 0.583211 (* 1 = 0.583211 loss)\nI0818 23:00:25.794715 22726 solver.cpp:228] Iteration 14700, loss = 0.173235\nI0818 23:00:25.794757 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:00:25.794773 22726 solver.cpp:244]     Train net output #1: loss = 0.173235 (* 1 = 0.173235 loss)\nI0818 23:00:25.871482 22726 sgd_solver.cpp:166] Iteration 14700, lr = 0.3675\nI0818 23:02:44.051566 22726 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 23:04:09.275841 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0818 23:04:09.276142 22726 solver.cpp:404]     Test net output #1: loss = 0.569177 (* 1 = 0.569177 loss)\nI0818 23:04:10.610803 22726 solver.cpp:228] Iteration 14800, loss = 0.0169321\nI0818 23:04:10.610846 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 23:04:10.610862 22726 solver.cpp:244]     Train net output #1: loss = 0.016932 (* 1 = 0.016932 loss)\nI0818 23:04:10.693040 22726 sgd_solver.cpp:166] Iteration 14800, lr = 0.37\nI0818 23:06:28.826529 22726 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 23:07:54.057749 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86568\nI0818 23:07:54.058073 22726 solver.cpp:404]     Test net output #1: loss = 0.571647 (* 1 = 0.571647 loss)\nI0818 23:07:55.392112 22726 solver.cpp:228] Iteration 14900, loss = 0.0684817\nI0818 23:07:55.392154 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:07:55.392169 22726 solver.cpp:244]     Train net output #1: loss = 0.0684816 (* 1 = 0.0684816 loss)\nI0818 23:07:55.474949 22726 sgd_solver.cpp:166] Iteration 14900, lr = 0.3725\nI0818 23:10:13.675551 22726 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 23:11:38.911798 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86644\nI0818 23:11:38.912122 22726 solver.cpp:404]     Test net output #1: loss = 0.555311 (* 1 = 0.555311 loss)\nI0818 23:11:40.246737 22726 solver.cpp:228] Iteration 15000, loss = 0.0764202\nI0818 23:11:40.246781 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 23:11:40.246796 22726 solver.cpp:244]     Train net output #1: loss = 0.0764201 (* 1 = 0.0764201 loss)\nI0818 23:11:40.322506 22726 sgd_solver.cpp:166] Iteration 15000, lr = 0.375\nI0818 23:13:58.415006 22726 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 23:15:23.667662 22726 solver.cpp:404]     Test net output #0: accuracy = 0.865\nI0818 23:15:23.667996 22726 solver.cpp:404]     Test net output #1: loss = 0.554583 (* 1 = 0.554583 loss)\nI0818 23:15:25.002699 22726 solver.cpp:228] Iteration 15100, loss = 0.0615335\nI0818 23:15:25.002746 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:15:25.002768 22726 solver.cpp:244]     Train net output #1: loss = 0.0615334 (* 1 = 0.0615334 loss)\nI0818 23:15:25.078439 22726 sgd_solver.cpp:166] Iteration 15100, lr = 0.3775\nI0818 23:17:43.109906 22726 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 23:19:08.336264 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8706\nI0818 23:19:08.336583 22726 solver.cpp:404]     Test net output #1: loss = 0.540544 (* 1 = 0.540544 loss)\nI0818 23:19:09.670424 22726 solver.cpp:228] Iteration 15200, loss = 0.0622453\nI0818 23:19:09.670466 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:19:09.670480 22726 solver.cpp:244]     Train net output #1: loss = 0.0622452 (* 1 = 0.0622452 loss)\nI0818 23:19:09.745434 22726 sgd_solver.cpp:166] Iteration 15200, lr = 0.38\nI0818 23:21:27.835223 22726 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 23:22:53.065965 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86664\nI0818 23:22:53.066263 22726 solver.cpp:404]     Test net output #1: loss = 0.542276 (* 1 = 0.542276 loss)\nI0818 23:22:54.400833 22726 solver.cpp:228] Iteration 15300, loss = 0.0608777\nI0818 23:22:54.400876 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:22:54.400892 22726 solver.cpp:244]     Train net output #1: loss = 0.0608776 (* 1 = 0.0608776 loss)\nI0818 23:22:54.478827 22726 sgd_solver.cpp:166] Iteration 15300, lr = 0.3825\nI0818 23:25:12.758831 22726 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 23:26:37.978888 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87312\nI0818 23:26:37.979223 22726 solver.cpp:404]     Test net output #1: loss = 0.552346 (* 1 = 0.552346 loss)\nI0818 23:26:39.312127 22726 solver.cpp:228] Iteration 15400, loss = 0.0465014\nI0818 23:26:39.312170 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:26:39.312186 22726 solver.cpp:244]     Train net output #1: loss = 0.0465013 (* 1 = 0.0465013 loss)\nI0818 23:26:39.392899 22726 sgd_solver.cpp:166] Iteration 15400, lr = 0.385\nI0818 23:28:57.623509 22726 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 23:30:22.873992 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86804\nI0818 23:30:22.874305 22726 solver.cpp:404]     Test net output #1: loss = 0.574414 (* 1 = 0.574414 loss)\nI0818 23:30:24.208307 22726 solver.cpp:228] Iteration 15500, loss = 0.0674272\nI0818 23:30:24.208353 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 23:30:24.208377 22726 solver.cpp:244]     Train net output #1: loss = 0.0674271 (* 1 = 0.0674271 loss)\nI0818 23:30:24.289921 22726 sgd_solver.cpp:166] Iteration 15500, lr = 0.3875\nI0818 23:32:42.452720 22726 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 23:34:07.697598 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86796\nI0818 23:34:07.697942 22726 solver.cpp:404]     Test net output #1: loss = 0.540221 (* 1 = 0.540221 loss)\nI0818 23:34:09.031029 22726 solver.cpp:228] Iteration 15600, loss = 0.0308636\nI0818 23:34:09.031077 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 23:34:09.031100 22726 solver.cpp:244]     Train net output #1: loss = 0.0308635 (* 1 = 0.0308635 loss)\nI0818 23:34:09.109746 22726 sgd_solver.cpp:166] Iteration 15600, lr = 0.39\nI0818 23:36:27.260068 22726 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 23:37:52.507362 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86164\nI0818 23:37:52.507688 22726 solver.cpp:404]     Test net output #1: loss = 0.564676 (* 1 = 0.564676 loss)\nI0818 23:37:53.841089 22726 solver.cpp:228] Iteration 15700, loss = 0.148182\nI0818 23:37:53.841133 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:37:53.841156 22726 solver.cpp:244]     Train net output #1: loss = 0.148182 (* 1 = 0.148182 loss)\nI0818 23:37:53.925487 22726 sgd_solver.cpp:166] Iteration 15700, lr = 0.3925\nI0818 23:40:11.960384 22726 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 23:41:37.205848 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87244\nI0818 23:41:37.206185 22726 solver.cpp:404]     Test net output #1: loss = 0.560471 (* 1 = 0.560471 loss)\nI0818 23:41:38.541216 22726 solver.cpp:228] Iteration 15800, loss = 0.0463755\nI0818 23:41:38.541306 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:41:38.541333 22726 solver.cpp:244]     Train net output #1: loss = 0.0463754 (* 1 = 0.0463754 loss)\nI0818 23:41:38.617995 22726 sgd_solver.cpp:166] Iteration 15800, lr = 0.395\nI0818 23:43:56.672590 22726 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 23:45:21.918645 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86556\nI0818 23:45:21.918972 22726 solver.cpp:404]     Test net output #1: loss = 0.527176 (* 1 = 0.527176 loss)\nI0818 23:45:23.253695 22726 solver.cpp:228] Iteration 15900, loss = 0.0647182\nI0818 23:45:23.253741 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:45:23.253763 22726 solver.cpp:244]     Train net output #1: loss = 0.0647181 (* 1 = 0.0647181 loss)\nI0818 23:45:23.332995 22726 sgd_solver.cpp:166] Iteration 15900, lr = 0.3975\nI0818 23:47:41.338796 22726 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 23:49:06.603596 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87168\nI0818 23:49:06.603929 22726 solver.cpp:404]     Test net output #1: loss = 0.529721 (* 1 = 0.529721 loss)\nI0818 23:49:07.938426 22726 solver.cpp:228] Iteration 16000, loss = 0.0438052\nI0818 23:49:07.938473 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:49:07.938498 22726 solver.cpp:244]     Train net output #1: loss = 0.0438051 (* 1 = 0.0438051 loss)\nI0818 23:49:08.019857 22726 sgd_solver.cpp:166] Iteration 16000, lr = 0.4\nI0818 23:51:26.029757 22726 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 23:52:51.338263 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8732\nI0818 23:52:51.338578 22726 solver.cpp:404]     Test net output #1: loss = 0.526531 (* 1 = 0.526531 loss)\nI0818 23:52:52.675644 22726 solver.cpp:228] Iteration 16100, loss = 0.0427424\nI0818 23:52:52.675689 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 23:52:52.675714 22726 solver.cpp:244]     Train net output #1: loss = 0.0427423 (* 1 = 0.0427423 loss)\nI0818 23:52:52.752454 22726 sgd_solver.cpp:166] Iteration 16100, lr = 0.4025\nI0818 23:55:10.780356 22726 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 23:56:36.129739 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87012\nI0818 23:56:36.130050 22726 solver.cpp:404]     Test net output #1: loss = 0.534155 (* 1 = 0.534155 loss)\nI0818 23:56:37.464180 22726 solver.cpp:228] Iteration 16200, loss = 0.111396\nI0818 23:56:37.464229 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 23:56:37.464252 22726 solver.cpp:244]     Train net output #1: loss = 0.111396 (* 1 = 0.111396 loss)\nI0818 23:56:37.542078 22726 sgd_solver.cpp:166] Iteration 16200, lr = 0.405\nI0818 23:58:55.568892 22726 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0819 00:00:20.895087 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87424\nI0819 00:00:20.895391 22726 solver.cpp:404]     Test net output #1: loss = 0.528594 (* 1 = 0.528594 loss)\nI0819 00:00:22.228996 22726 solver.cpp:228] Iteration 16300, loss = 0.0668828\nI0819 00:00:22.229043 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:00:22.229065 22726 solver.cpp:244]     Train net output #1: loss = 0.0668827 (* 1 = 0.0668827 loss)\nI0819 00:00:22.303839 22726 sgd_solver.cpp:166] Iteration 16300, lr = 0.4075\nI0819 00:02:40.417605 22726 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0819 00:04:05.689630 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86572\nI0819 00:04:05.689939 22726 solver.cpp:404]     Test net output #1: loss = 0.538659 (* 1 = 0.538659 loss)\nI0819 00:04:07.024346 22726 solver.cpp:228] Iteration 16400, loss = 0.0376049\nI0819 00:04:07.024386 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:04:07.024410 22726 solver.cpp:244]     Train net output #1: loss = 0.0376047 (* 1 = 0.0376047 loss)\nI0819 00:04:07.106669 22726 sgd_solver.cpp:166] Iteration 16400, lr = 0.41\nI0819 00:06:25.253717 22726 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0819 00:07:50.638005 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87036\nI0819 00:07:50.638334 22726 solver.cpp:404]     Test net output #1: loss = 0.526264 (* 1 = 0.526264 loss)\nI0819 00:07:51.972807 22726 solver.cpp:228] Iteration 16500, loss = 0.0372676\nI0819 00:07:51.972851 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:07:51.972875 22726 solver.cpp:244]     Train net output #1: loss = 0.0372675 (* 1 = 0.0372675 loss)\nI0819 00:07:52.049441 22726 sgd_solver.cpp:166] Iteration 16500, lr = 0.4125\nI0819 00:10:10.162780 22726 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0819 00:11:35.530057 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87104\nI0819 00:11:35.530369 22726 solver.cpp:404]     Test net output #1: loss = 0.543883 (* 1 = 0.543883 loss)\nI0819 00:11:36.864744 22726 solver.cpp:228] Iteration 16600, loss = 0.0770823\nI0819 00:11:36.864789 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:11:36.864812 22726 solver.cpp:244]     Train net output #1: loss = 0.0770821 (* 1 = 0.0770821 loss)\nI0819 00:11:36.948230 22726 sgd_solver.cpp:166] Iteration 16600, lr = 0.415\nI0819 00:13:55.037751 22726 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0819 00:15:20.433540 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87252\nI0819 00:15:20.433867 22726 solver.cpp:404]     Test net output #1: loss = 0.541139 (* 1 = 0.541139 loss)\nI0819 00:15:21.767208 22726 solver.cpp:228] Iteration 16700, loss = 0.0212722\nI0819 00:15:21.767251 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:15:21.767273 22726 solver.cpp:244]     Train net output #1: loss = 0.021272 (* 1 = 0.021272 loss)\nI0819 00:15:21.852367 22726 sgd_solver.cpp:166] Iteration 16700, lr = 0.4175\nI0819 00:17:40.117399 22726 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0819 00:19:05.437789 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87488\nI0819 00:19:05.438117 22726 solver.cpp:404]     Test net output #1: loss = 0.530789 (* 1 = 0.530789 loss)\nI0819 00:19:06.772869 22726 solver.cpp:228] Iteration 16800, loss = 0.0763497\nI0819 00:19:06.772913 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:19:06.772936 22726 solver.cpp:244]     Train net output #1: loss = 0.0763495 (* 1 = 0.0763495 loss)\nI0819 00:19:06.849580 22726 sgd_solver.cpp:166] Iteration 16800, lr = 0.42\nI0819 00:21:24.851709 22726 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0819 00:22:50.193464 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86912\nI0819 00:22:50.193787 22726 solver.cpp:404]     Test net output #1: loss = 0.553615 (* 1 = 0.553615 loss)\nI0819 00:22:51.527360 22726 solver.cpp:228] Iteration 16900, loss = 0.0629713\nI0819 00:22:51.527403 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:22:51.527426 22726 solver.cpp:244]     Train net output #1: loss = 0.0629711 (* 1 = 0.0629711 loss)\nI0819 00:22:51.605396 22726 sgd_solver.cpp:166] Iteration 16900, lr = 0.4225\nI0819 00:25:09.588943 22726 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0819 00:26:34.960525 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87184\nI0819 00:26:34.960836 22726 solver.cpp:404]     Test net output #1: loss = 0.552544 (* 1 = 0.552544 loss)\nI0819 00:26:36.294112 22726 solver.cpp:228] Iteration 17000, loss = 0.0693857\nI0819 00:26:36.294154 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:26:36.294178 22726 solver.cpp:244]     Train net output #1: loss = 0.0693855 (* 1 = 0.0693855 loss)\nI0819 00:26:36.374146 22726 sgd_solver.cpp:166] Iteration 17000, lr = 0.425\nI0819 00:28:54.470779 22726 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0819 00:30:19.807116 22726 solver.cpp:404]     Test net output #0: accuracy = 0.872801\nI0819 00:30:19.807440 22726 solver.cpp:404]     Test net output #1: loss = 0.52155 (* 1 = 0.52155 loss)\nI0819 00:30:21.142591 22726 solver.cpp:228] Iteration 17100, loss = 0.0229798\nI0819 00:30:21.142634 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:30:21.142658 22726 solver.cpp:244]     Train net output #1: loss = 0.0229797 (* 1 = 0.0229797 loss)\nI0819 00:30:21.224059 22726 sgd_solver.cpp:166] Iteration 17100, lr = 0.4275\nI0819 00:32:39.254106 22726 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0819 00:34:04.592799 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87324\nI0819 00:34:04.593128 22726 solver.cpp:404]     Test net output #1: loss = 0.524724 (* 1 = 0.524724 loss)\nI0819 00:34:05.927609 22726 solver.cpp:228] Iteration 17200, loss = 0.0463251\nI0819 00:34:05.927652 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:34:05.927676 22726 solver.cpp:244]     Train net output #1: loss = 0.046325 (* 1 = 0.046325 loss)\nI0819 00:34:06.012763 22726 sgd_solver.cpp:166] Iteration 17200, lr = 0.43\nI0819 00:36:24.361562 22726 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0819 00:37:49.586535 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86652\nI0819 00:37:49.586859 22726 solver.cpp:404]     Test net output #1: loss = 0.536998 (* 1 = 0.536998 loss)\nI0819 00:37:50.919245 22726 solver.cpp:228] Iteration 17300, loss = 0.0412119\nI0819 00:37:50.919287 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:37:50.919302 22726 solver.cpp:244]     Train net output #1: loss = 0.0412118 (* 1 = 0.0412118 loss)\nI0819 00:37:51.003481 22726 sgd_solver.cpp:166] Iteration 17300, lr = 0.4325\nI0819 00:40:09.008711 22726 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0819 00:41:34.238845 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86976\nI0819 00:41:34.239172 22726 solver.cpp:404]     Test net output #1: loss = 0.544659 (* 1 = 0.544659 loss)\nI0819 00:41:35.573940 22726 solver.cpp:228] Iteration 17400, loss = 0.106916\nI0819 00:41:35.573982 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:41:35.574002 22726 solver.cpp:244]     Train net output #1: loss = 0.106916 (* 1 = 0.106916 loss)\nI0819 00:41:35.651319 22726 sgd_solver.cpp:166] Iteration 17400, lr = 0.435\nI0819 00:43:53.647388 22726 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0819 00:45:18.381832 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87344\nI0819 00:45:18.382114 22726 solver.cpp:404]     Test net output #1: loss = 0.540694 (* 1 = 0.540694 loss)\nI0819 00:45:19.715991 22726 solver.cpp:228] Iteration 17500, loss = 0.107782\nI0819 00:45:19.716034 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:45:19.716049 22726 solver.cpp:244]     Train net output #1: loss = 0.107782 (* 1 = 0.107782 loss)\nI0819 00:45:19.790122 22726 sgd_solver.cpp:166] Iteration 17500, lr = 0.4375\nI0819 00:47:37.694692 22726 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0819 00:49:02.562525 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8736\nI0819 00:49:02.562799 22726 solver.cpp:404]     Test net output #1: loss = 0.515557 (* 1 = 0.515557 loss)\nI0819 00:49:03.896220 22726 solver.cpp:228] Iteration 17600, loss = 0.0913487\nI0819 00:49:03.896261 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:49:03.896277 22726 solver.cpp:244]     Train net output #1: loss = 0.0913486 (* 1 = 0.0913486 loss)\nI0819 00:49:03.977382 22726 sgd_solver.cpp:166] Iteration 17600, lr = 0.44\nI0819 00:51:21.840839 22726 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0819 00:52:46.651932 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87528\nI0819 00:52:46.652249 22726 solver.cpp:404]     Test net output #1: loss = 0.506216 (* 1 = 0.506216 loss)\nI0819 00:52:47.986050 22726 solver.cpp:228] Iteration 17700, loss = 0.0636979\nI0819 00:52:47.986093 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:52:47.986109 22726 solver.cpp:244]     Train net output #1: loss = 0.0636977 (* 1 = 0.0636977 loss)\nI0819 00:52:48.068655 22726 sgd_solver.cpp:166] Iteration 17700, lr = 0.4425\nI0819 00:55:05.844619 22726 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0819 00:56:31.063536 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87168\nI0819 00:56:31.063864 22726 solver.cpp:404]     Test net output #1: loss = 0.518447 (* 1 = 0.518447 loss)\nI0819 00:56:32.397557 22726 solver.cpp:228] Iteration 17800, loss = 0.0866379\nI0819 00:56:32.397599 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:56:32.397615 22726 solver.cpp:244]     Train net output #1: loss = 0.0866377 (* 1 = 0.0866377 loss)\nI0819 00:56:32.470722 22726 sgd_solver.cpp:166] Iteration 17800, lr = 0.445\nI0819 00:58:50.299213 22726 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0819 01:00:15.529379 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87444\nI0819 01:00:15.529681 22726 solver.cpp:404]     Test net output #1: loss = 0.505941 (* 1 = 0.505941 loss)\nI0819 01:00:16.863185 22726 solver.cpp:228] Iteration 17900, loss = 0.0843636\nI0819 01:00:16.863231 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:00:16.863247 22726 solver.cpp:244]     Train net output #1: loss = 0.0843634 (* 1 = 0.0843634 loss)\nI0819 01:00:16.944409 22726 sgd_solver.cpp:166] Iteration 17900, lr = 0.4475\nI0819 01:02:34.735071 22726 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0819 01:03:59.960772 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8742\nI0819 01:03:59.961102 22726 solver.cpp:404]     Test net output #1: loss = 0.517494 (* 1 = 0.517494 loss)\nI0819 01:04:01.293771 22726 solver.cpp:228] Iteration 18000, loss = 0.0401598\nI0819 01:04:01.293823 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:04:01.293839 22726 solver.cpp:244]     Train net output #1: loss = 0.0401596 (* 1 = 0.0401596 loss)\nI0819 01:04:01.376766 22726 sgd_solver.cpp:166] Iteration 18000, lr = 0.45\nI0819 01:06:19.163204 22726 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0819 01:07:44.385768 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8752\nI0819 01:07:44.386096 22726 solver.cpp:404]     Test net output #1: loss = 0.535202 (* 1 = 0.535202 loss)\nI0819 01:07:45.719570 22726 solver.cpp:228] Iteration 18100, loss = 0.0605398\nI0819 01:07:45.719612 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:07:45.719626 22726 solver.cpp:244]     Train net output #1: loss = 0.0605397 (* 1 = 0.0605397 loss)\nI0819 01:07:45.803050 22726 sgd_solver.cpp:166] Iteration 18100, lr = 0.4525\nI0819 01:10:03.568186 22726 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0819 01:11:28.800058 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87012\nI0819 01:11:28.800390 22726 solver.cpp:404]     Test net output #1: loss = 0.514867 (* 1 = 0.514867 loss)\nI0819 01:11:30.133755 22726 solver.cpp:228] Iteration 18200, loss = 0.12456\nI0819 01:11:30.133797 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:11:30.133812 22726 solver.cpp:244]     Train net output #1: loss = 0.124559 (* 1 = 0.124559 loss)\nI0819 01:11:30.216416 22726 sgd_solver.cpp:166] Iteration 18200, lr = 0.455\nI0819 01:13:48.031249 22726 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0819 01:15:13.259488 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87304\nI0819 01:15:13.259815 22726 solver.cpp:404]     Test net output #1: loss = 0.532911 (* 1 = 0.532911 loss)\nI0819 01:15:14.592876 22726 solver.cpp:228] Iteration 18300, loss = 0.0380372\nI0819 01:15:14.592919 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:15:14.592936 22726 solver.cpp:244]     Train net output #1: loss = 0.0380371 (* 1 = 0.0380371 loss)\nI0819 01:15:14.673274 22726 sgd_solver.cpp:166] Iteration 18300, lr = 0.4575\nI0819 01:17:32.457651 22726 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0819 01:18:57.693025 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87096\nI0819 01:18:57.693334 22726 solver.cpp:404]     Test net output #1: loss = 0.527416 (* 1 = 0.527416 loss)\nI0819 01:18:59.026448 22726 solver.cpp:228] Iteration 18400, loss = 0.127637\nI0819 01:18:59.026489 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:18:59.026505 22726 solver.cpp:244]     Train net output #1: loss = 0.127637 (* 1 = 0.127637 loss)\nI0819 01:18:59.105778 22726 sgd_solver.cpp:166] Iteration 18400, lr = 0.46\nI0819 01:21:16.870829 22726 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0819 01:22:42.112920 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86808\nI0819 01:22:42.113222 22726 solver.cpp:404]     Test net output #1: loss = 0.535686 (* 1 = 0.535686 loss)\nI0819 01:22:43.446601 22726 solver.cpp:228] Iteration 18500, loss = 0.163259\nI0819 01:22:43.446645 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 01:22:43.446660 22726 solver.cpp:244]     Train net output #1: loss = 0.163259 (* 1 = 0.163259 loss)\nI0819 01:22:43.526571 22726 sgd_solver.cpp:166] Iteration 18500, lr = 0.4625\nI0819 01:25:01.342877 22726 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0819 01:26:26.573303 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86732\nI0819 01:26:26.573631 22726 solver.cpp:404]     Test net output #1: loss = 0.540047 (* 1 = 0.540047 loss)\nI0819 01:26:27.906862 22726 solver.cpp:228] Iteration 18600, loss = 0.105797\nI0819 01:26:27.906915 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:26:27.906932 22726 solver.cpp:244]     Train net output #1: loss = 0.105797 (* 1 = 0.105797 loss)\nI0819 01:26:27.986066 22726 sgd_solver.cpp:166] Iteration 18600, lr = 0.465\nI0819 01:28:45.954368 22726 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0819 01:30:11.175287 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87364\nI0819 01:30:11.175604 22726 solver.cpp:404]     Test net output #1: loss = 0.507061 (* 1 = 0.507061 loss)\nI0819 01:30:12.509639 22726 solver.cpp:228] Iteration 18700, loss = 0.141576\nI0819 01:30:12.509693 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:30:12.509709 22726 solver.cpp:244]     Train net output #1: loss = 0.141576 (* 1 = 0.141576 loss)\nI0819 01:30:12.586285 22726 sgd_solver.cpp:166] Iteration 18700, lr = 0.4675\nI0819 01:32:30.480911 22726 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0819 01:33:55.720623 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87652\nI0819 01:33:55.720958 22726 solver.cpp:404]     Test net output #1: loss = 0.517211 (* 1 = 0.517211 loss)\nI0819 01:33:57.054752 22726 solver.cpp:228] Iteration 18800, loss = 0.0488095\nI0819 01:33:57.054795 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 01:33:57.054811 22726 solver.cpp:244]     Train net output #1: loss = 0.0488094 (* 1 = 0.0488094 loss)\nI0819 01:33:57.134076 22726 sgd_solver.cpp:166] Iteration 18800, lr = 0.47\nI0819 01:36:15.080008 22726 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0819 01:37:40.321279 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87248\nI0819 01:37:40.321604 22726 solver.cpp:404]     Test net output #1: loss = 0.516113 (* 1 = 0.516113 loss)\nI0819 01:37:41.655452 22726 solver.cpp:228] Iteration 18900, loss = 0.0496628\nI0819 01:37:41.655494 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:37:41.655509 22726 solver.cpp:244]     Train net output #1: loss = 0.0496627 (* 1 = 0.0496627 loss)\nI0819 01:37:41.735008 22726 sgd_solver.cpp:166] Iteration 18900, lr = 0.4725\nI0819 01:39:59.640651 22726 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0819 01:41:24.876821 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0819 01:41:24.877140 22726 solver.cpp:404]     Test net output #1: loss = 0.490196 (* 1 = 0.490196 loss)\nI0819 01:41:26.211565 22726 solver.cpp:228] Iteration 19000, loss = 0.0551938\nI0819 01:41:26.211607 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 01:41:26.211623 22726 solver.cpp:244]     Train net output #1: loss = 0.0551936 (* 1 = 0.0551936 loss)\nI0819 01:41:26.283578 22726 sgd_solver.cpp:166] Iteration 19000, lr = 0.475\nI0819 01:43:44.147994 22726 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0819 01:45:09.377501 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87648\nI0819 01:45:09.377862 22726 solver.cpp:404]     Test net output #1: loss = 0.49377 (* 1 = 0.49377 loss)\nI0819 01:45:10.711907 22726 solver.cpp:228] Iteration 19100, loss = 0.077916\nI0819 01:45:10.711948 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:45:10.711964 22726 solver.cpp:244]     Train net output #1: loss = 0.0779159 (* 1 = 0.0779159 loss)\nI0819 01:45:10.790292 22726 sgd_solver.cpp:166] Iteration 19100, lr = 0.4775\nI0819 01:47:28.632269 22726 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0819 01:48:53.867233 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87644\nI0819 01:48:53.867539 22726 solver.cpp:404]     Test net output #1: loss = 0.506874 (* 1 = 0.506874 loss)\nI0819 01:48:55.201995 22726 solver.cpp:228] Iteration 19200, loss = 0.0988541\nI0819 01:48:55.202046 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:48:55.202064 22726 solver.cpp:244]     Train net output #1: loss = 0.0988539 (* 1 = 0.0988539 loss)\nI0819 01:48:55.281186 22726 sgd_solver.cpp:166] Iteration 19200, lr = 0.48\nI0819 01:51:13.134865 22726 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0819 01:52:38.365485 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87404\nI0819 01:52:38.365814 22726 solver.cpp:404]     Test net output #1: loss = 0.508267 (* 1 = 0.508267 loss)\nI0819 01:52:39.699239 22726 solver.cpp:228] Iteration 19300, loss = 0.0490904\nI0819 01:52:39.699280 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:52:39.699295 22726 solver.cpp:244]     Train net output #1: loss = 0.0490902 (* 1 = 0.0490902 loss)\nI0819 01:52:39.780247 22726 sgd_solver.cpp:166] Iteration 19300, lr = 0.4825\nI0819 01:54:57.624042 22726 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0819 01:56:22.861095 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86316\nI0819 01:56:22.861405 22726 solver.cpp:404]     Test net output #1: loss = 0.556116 (* 1 = 0.556116 loss)\nI0819 01:56:24.193930 22726 solver.cpp:228] Iteration 19400, loss = 0.0989479\nI0819 01:56:24.193972 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:56:24.193989 22726 solver.cpp:244]     Train net output #1: loss = 0.0989477 (* 1 = 0.0989477 loss)\nI0819 01:56:24.272742 22726 sgd_solver.cpp:166] Iteration 19400, lr = 0.485\nI0819 01:58:42.227022 22726 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0819 02:00:07.466215 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87804\nI0819 02:00:07.466516 22726 solver.cpp:404]     Test net output #1: loss = 0.495772 (* 1 = 0.495772 loss)\nI0819 02:00:08.799418 22726 solver.cpp:228] Iteration 19500, loss = 0.0524038\nI0819 02:00:08.799469 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:00:08.799486 22726 solver.cpp:244]     Train net output #1: loss = 0.0524035 (* 1 = 0.0524035 loss)\nI0819 02:00:08.883577 22726 sgd_solver.cpp:166] Iteration 19500, lr = 0.4875\nI0819 02:02:26.753214 22726 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0819 02:03:51.999603 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87604\nI0819 02:03:51.999917 22726 solver.cpp:404]     Test net output #1: loss = 0.50052 (* 1 = 0.50052 loss)\nI0819 02:03:53.333499 22726 solver.cpp:228] Iteration 19600, loss = 0.0975625\nI0819 02:03:53.333550 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:03:53.333567 22726 solver.cpp:244]     Train net output #1: loss = 0.0975623 (* 1 = 0.0975623 loss)\nI0819 02:03:53.422945 22726 sgd_solver.cpp:166] Iteration 19600, lr = 0.49\nI0819 02:06:11.304603 22726 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0819 02:07:36.666684 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8688\nI0819 02:07:36.667016 22726 solver.cpp:404]     Test net output #1: loss = 0.525009 (* 1 = 0.525009 loss)\nI0819 02:07:38.001457 22726 solver.cpp:228] Iteration 19700, loss = 0.0894855\nI0819 02:07:38.001500 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 02:07:38.001516 22726 solver.cpp:244]     Train net output #1: loss = 0.0894853 (* 1 = 0.0894853 loss)\nI0819 02:07:38.080081 22726 sgd_solver.cpp:166] Iteration 19700, lr = 0.4925\nI0819 02:09:56.025076 22726 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0819 02:11:21.322038 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87428\nI0819 02:11:21.322332 22726 solver.cpp:404]     Test net output #1: loss = 0.502514 (* 1 = 0.502514 loss)\nI0819 02:11:22.655833 22726 solver.cpp:228] Iteration 19800, loss = 0.133147\nI0819 02:11:22.655884 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:11:22.655901 22726 solver.cpp:244]     Train net output #1: loss = 0.133147 (* 1 = 0.133147 loss)\nI0819 02:11:22.730820 22726 sgd_solver.cpp:166] Iteration 19800, lr = 0.495\nI0819 02:13:40.644140 22726 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0819 02:15:05.952098 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88164\nI0819 02:15:05.952440 22726 solver.cpp:404]     Test net output #1: loss = 0.493729 (* 1 = 0.493729 loss)\nI0819 02:15:07.286521 22726 solver.cpp:228] Iteration 19900, loss = 0.0339891\nI0819 02:15:07.286573 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 02:15:07.286590 22726 solver.cpp:244]     Train net output #1: loss = 0.0339889 (* 1 = 0.0339889 loss)\nI0819 02:15:07.358465 22726 sgd_solver.cpp:166] Iteration 19900, lr = 0.4975\nI0819 02:17:25.256984 22726 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0819 02:18:50.591428 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87324\nI0819 02:18:50.591734 22726 solver.cpp:404]     Test net output #1: loss = 0.503316 (* 1 = 0.503316 loss)\nI0819 02:18:51.925905 22726 solver.cpp:228] Iteration 20000, loss = 0.0965864\nI0819 02:18:51.925956 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:18:51.925973 22726 solver.cpp:244]     Train net output #1: loss = 0.0965861 (* 1 = 0.0965861 loss)\nI0819 02:18:52.011157 22726 sgd_solver.cpp:166] Iteration 20000, lr = 0.5\nI0819 02:21:09.869519 22726 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0819 02:22:35.198736 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87692\nI0819 02:22:35.199033 22726 solver.cpp:404]     Test net output #1: loss = 0.502021 (* 1 = 0.502021 loss)\nI0819 02:22:36.533740 22726 solver.cpp:228] Iteration 20100, loss = 0.072269\nI0819 02:22:36.533784 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:22:36.533800 22726 solver.cpp:244]     Train net output #1: loss = 0.0722687 (* 1 = 0.0722687 loss)\nI0819 02:22:36.611357 22726 sgd_solver.cpp:166] Iteration 20100, lr = 0.5025\nI0819 02:24:54.547176 22726 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0819 02:26:19.866402 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87364\nI0819 02:26:19.866705 22726 solver.cpp:404]     Test net output #1: loss = 0.516902 (* 1 = 0.516902 loss)\nI0819 02:26:21.200122 22726 solver.cpp:228] Iteration 20200, loss = 0.0893676\nI0819 02:26:21.200165 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:26:21.200181 22726 solver.cpp:244]     Train net output #1: loss = 0.0893674 (* 1 = 0.0893674 loss)\nI0819 02:26:21.275579 22726 sgd_solver.cpp:166] Iteration 20200, lr = 0.505\nI0819 02:28:39.137975 22726 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0819 02:30:04.471264 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88116\nI0819 02:30:04.471577 22726 solver.cpp:404]     Test net output #1: loss = 0.485514 (* 1 = 0.485514 loss)\nI0819 02:30:05.805331 22726 solver.cpp:228] Iteration 20300, loss = 0.0930716\nI0819 02:30:05.805375 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 02:30:05.805392 22726 solver.cpp:244]     Train net output #1: loss = 0.0930713 (* 1 = 0.0930713 loss)\nI0819 02:30:05.880168 22726 sgd_solver.cpp:166] Iteration 20300, lr = 0.5075\nI0819 02:32:23.706722 22726 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0819 02:33:48.993764 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87336\nI0819 02:33:48.994092 22726 solver.cpp:404]     Test net output #1: loss = 0.50526 (* 1 = 0.50526 loss)\nI0819 02:33:50.326771 22726 solver.cpp:228] Iteration 20400, loss = 0.0796948\nI0819 02:33:50.326824 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 02:33:50.326843 22726 solver.cpp:244]     Train net output #1: loss = 0.0796946 (* 1 = 0.0796946 loss)\nI0819 02:33:50.414252 22726 sgd_solver.cpp:166] Iteration 20400, lr = 0.51\nI0819 02:36:08.312935 22726 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0819 02:37:33.677733 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8804\nI0819 02:37:33.678045 22726 solver.cpp:404]     Test net output #1: loss = 0.479403 (* 1 = 0.479403 loss)\nI0819 02:37:35.011575 22726 solver.cpp:228] Iteration 20500, loss = 0.0537436\nI0819 02:37:35.011618 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:37:35.011633 22726 solver.cpp:244]     Train net output #1: loss = 0.0537434 (* 1 = 0.0537434 loss)\nI0819 02:37:35.087092 22726 sgd_solver.cpp:166] Iteration 20500, lr = 0.5125\nI0819 02:39:53.001257 22726 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0819 02:41:18.248814 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87652\nI0819 02:41:18.249135 22726 solver.cpp:404]     Test net output #1: loss = 0.468499 (* 1 = 0.468499 loss)\nI0819 02:41:19.582084 22726 solver.cpp:228] Iteration 20600, loss = 0.116045\nI0819 02:41:19.582139 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 02:41:19.582155 22726 solver.cpp:244]     Train net output #1: loss = 0.116045 (* 1 = 0.116045 loss)\nI0819 02:41:19.659340 22726 sgd_solver.cpp:166] Iteration 20600, lr = 0.515\nI0819 02:43:37.596971 22726 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0819 02:45:02.814716 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87308\nI0819 02:45:02.815059 22726 solver.cpp:404]     Test net output #1: loss = 0.497075 (* 1 = 0.497075 loss)\nI0819 02:45:04.151818 22726 solver.cpp:228] Iteration 20700, loss = 0.0250711\nI0819 02:45:04.151865 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 02:45:04.151881 22726 solver.cpp:244]     Train net output #1: loss = 0.0250709 (* 1 = 0.0250709 loss)\nI0819 02:45:04.227923 22726 sgd_solver.cpp:166] Iteration 20700, lr = 0.5175\nI0819 02:47:22.092224 22726 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0819 02:48:47.360133 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87736\nI0819 02:48:47.360451 22726 solver.cpp:404]     Test net output #1: loss = 0.48452 (* 1 = 0.48452 loss)\nI0819 02:48:48.693773 22726 solver.cpp:228] Iteration 20800, loss = 0.126684\nI0819 02:48:48.693845 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 02:48:48.693869 22726 solver.cpp:244]     Train net output #1: loss = 0.126683 (* 1 = 0.126683 loss)\nI0819 02:48:48.776998 22726 sgd_solver.cpp:166] Iteration 20800, lr = 0.52\nI0819 02:51:06.698441 22726 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0819 02:52:31.942275 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87444\nI0819 02:52:31.942575 22726 solver.cpp:404]     Test net output #1: loss = 0.502479 (* 1 = 0.502479 loss)\nI0819 02:52:33.276602 22726 solver.cpp:228] Iteration 20900, loss = 0.0525957\nI0819 02:52:33.276657 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:52:33.276681 22726 solver.cpp:244]     Train net output #1: loss = 0.0525955 (* 1 = 0.0525955 loss)\nI0819 02:52:33.351095 22726 sgd_solver.cpp:166] Iteration 20900, lr = 0.5225\nI0819 02:54:51.265161 22726 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0819 02:56:16.509675 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87192\nI0819 02:56:16.509994 22726 solver.cpp:404]     Test net output #1: loss = 0.510412 (* 1 = 0.510412 loss)\nI0819 02:56:17.843230 22726 solver.cpp:228] Iteration 21000, loss = 0.066767\nI0819 02:56:17.843276 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:56:17.843299 22726 solver.cpp:244]     Train net output #1: loss = 0.0667668 (* 1 = 0.0667668 loss)\nI0819 02:56:17.920708 22726 sgd_solver.cpp:166] Iteration 21000, lr = 0.525\nI0819 02:58:35.860901 22726 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0819 03:00:01.101698 22726 solver.cpp:404]     Test net output #0: accuracy = 0.874\nI0819 03:00:01.102023 22726 solver.cpp:404]     Test net output #1: loss = 0.511913 (* 1 = 0.511913 loss)\nI0819 03:00:02.436751 22726 solver.cpp:228] Iteration 21100, loss = 0.0358711\nI0819 03:00:02.436794 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:00:02.436818 22726 solver.cpp:244]     Train net output #1: loss = 0.0358709 (* 1 = 0.0358709 loss)\nI0819 03:00:02.512670 22726 sgd_solver.cpp:166] Iteration 21100, lr = 0.5275\nI0819 03:02:20.489434 22726 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0819 03:03:45.730284 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87648\nI0819 03:03:45.730593 22726 solver.cpp:404]     Test net output #1: loss = 0.498982 (* 1 = 0.498982 loss)\nI0819 03:03:47.064054 22726 solver.cpp:228] Iteration 21200, loss = 0.0492556\nI0819 03:03:47.064110 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:03:47.064133 22726 solver.cpp:244]     Train net output #1: loss = 0.0492554 (* 1 = 0.0492554 loss)\nI0819 03:03:47.144064 22726 sgd_solver.cpp:166] Iteration 21200, lr = 0.53\nI0819 03:06:05.112794 22726 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0819 03:07:30.362995 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87632\nI0819 03:07:30.363332 22726 solver.cpp:404]     Test net output #1: loss = 0.487357 (* 1 = 0.487357 loss)\nI0819 03:07:31.696745 22726 solver.cpp:228] Iteration 21300, loss = 0.0577075\nI0819 03:07:31.696800 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 03:07:31.696823 22726 solver.cpp:244]     Train net output #1: loss = 0.0577074 (* 1 = 0.0577074 loss)\nI0819 03:07:31.785964 22726 sgd_solver.cpp:166] Iteration 21300, lr = 0.5325\nI0819 03:09:49.699785 22726 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0819 03:11:14.927409 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87272\nI0819 03:11:14.927727 22726 solver.cpp:404]     Test net output #1: loss = 0.515726 (* 1 = 0.515726 loss)\nI0819 03:11:16.262217 22726 solver.cpp:228] Iteration 21400, loss = 0.105709\nI0819 03:11:16.262259 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:11:16.262274 22726 solver.cpp:244]     Train net output #1: loss = 0.105709 (* 1 = 0.105709 loss)\nI0819 03:11:16.341514 22726 sgd_solver.cpp:166] Iteration 21400, lr = 0.535\nI0819 03:13:34.240177 22726 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0819 03:14:59.477015 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87608\nI0819 03:14:59.477324 22726 solver.cpp:404]     Test net output #1: loss = 0.481071 (* 1 = 0.481071 loss)\nI0819 03:15:00.811374 22726 solver.cpp:228] Iteration 21500, loss = 0.0672113\nI0819 03:15:00.811427 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 03:15:00.811444 22726 solver.cpp:244]     Train net output #1: loss = 0.0672111 (* 1 = 0.0672111 loss)\nI0819 03:15:00.890007 22726 sgd_solver.cpp:166] Iteration 21500, lr = 0.5375\nI0819 03:17:18.813999 22726 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0819 03:18:44.057183 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0819 03:18:44.057521 22726 solver.cpp:404]     Test net output #1: loss = 0.494672 (* 1 = 0.494672 loss)\nI0819 03:18:45.390385 22726 solver.cpp:228] Iteration 21600, loss = 0.0190107\nI0819 03:18:45.390439 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:18:45.390456 22726 solver.cpp:244]     Train net output #1: loss = 0.0190106 (* 1 = 0.0190106 loss)\nI0819 03:18:45.470785 22726 sgd_solver.cpp:166] Iteration 21600, lr = 0.54\nI0819 03:21:03.289912 22726 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0819 03:22:27.786420 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87684\nI0819 03:22:27.786687 22726 solver.cpp:404]     Test net output #1: loss = 0.517388 (* 1 = 0.517388 loss)\nI0819 03:22:29.117480 22726 solver.cpp:228] Iteration 21700, loss = 0.211087\nI0819 03:22:29.117512 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 03:22:29.117527 22726 solver.cpp:244]     Train net output #1: loss = 0.211087 (* 1 = 0.211087 loss)\nI0819 03:22:29.198997 22726 sgd_solver.cpp:166] Iteration 21700, lr = 0.5425\nI0819 03:24:46.785449 22726 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0819 03:26:11.276574 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88048\nI0819 03:26:11.276860 22726 solver.cpp:404]     Test net output #1: loss = 0.467198 (* 1 = 0.467198 loss)\nI0819 03:26:12.607847 22726 solver.cpp:228] Iteration 21800, loss = 0.0766026\nI0819 03:26:12.607882 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 03:26:12.607900 22726 solver.cpp:244]     Train net output #1: loss = 0.0766025 (* 1 = 0.0766025 loss)\nI0819 03:26:12.689786 22726 sgd_solver.cpp:166] Iteration 21800, lr = 0.545\nI0819 03:28:30.271353 22726 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0819 03:29:54.757392 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88076\nI0819 03:29:54.757686 22726 solver.cpp:404]     Test net output #1: loss = 0.48224 (* 1 = 0.48224 loss)\nI0819 03:29:56.088912 22726 solver.cpp:228] Iteration 21900, loss = 0.061491\nI0819 03:29:56.088945 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:29:56.088961 22726 solver.cpp:244]     Train net output #1: loss = 0.0614908 (* 1 = 0.0614908 loss)\nI0819 03:29:56.169729 22726 sgd_solver.cpp:166] Iteration 21900, lr = 0.5475\nI0819 03:32:13.885553 22726 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0819 03:33:38.370774 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87356\nI0819 03:33:38.371069 22726 solver.cpp:404]     Test net output #1: loss = 0.482133 (* 1 = 0.482133 loss)\nI0819 03:33:39.702414 22726 solver.cpp:228] Iteration 22000, loss = 0.0673411\nI0819 03:33:39.702451 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:33:39.702467 22726 solver.cpp:244]     Train net output #1: loss = 0.067341 (* 1 = 0.067341 loss)\nI0819 03:33:39.780346 22726 sgd_solver.cpp:166] Iteration 22000, lr = 0.55\nI0819 03:35:57.399910 22726 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0819 03:37:21.893854 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8818\nI0819 03:37:21.894137 22726 solver.cpp:404]     Test net output #1: loss = 0.471062 (* 1 = 0.471062 loss)\nI0819 03:37:23.225148 22726 solver.cpp:228] Iteration 22100, loss = 0.0529904\nI0819 03:37:23.225189 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:37:23.225206 22726 solver.cpp:244]     Train net output #1: loss = 0.0529903 (* 1 = 0.0529903 loss)\nI0819 03:37:23.306998 22726 sgd_solver.cpp:166] Iteration 22100, lr = 0.5525\nI0819 03:39:40.903990 22726 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0819 03:41:05.387807 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87864\nI0819 03:41:05.388085 22726 solver.cpp:404]     Test net output #1: loss = 0.480618 (* 1 = 0.480618 loss)\nI0819 03:41:06.718637 22726 solver.cpp:228] Iteration 22200, loss = 0.0811188\nI0819 03:41:06.718677 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:41:06.718693 22726 solver.cpp:244]     Train net output #1: loss = 0.0811186 (* 1 = 0.0811186 loss)\nI0819 03:41:06.797479 22726 sgd_solver.cpp:166] Iteration 22200, lr = 0.555\nI0819 03:43:24.391438 22726 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0819 03:44:48.877770 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87356\nI0819 03:44:48.878068 22726 solver.cpp:404]     Test net output #1: loss = 0.500219 (* 1 = 0.500219 loss)\nI0819 03:44:50.208920 22726 solver.cpp:228] Iteration 22300, loss = 0.0201558\nI0819 03:44:50.208959 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:44:50.208976 22726 solver.cpp:244]     Train net output #1: loss = 0.0201556 (* 1 = 0.0201556 loss)\nI0819 03:44:50.286474 22726 sgd_solver.cpp:166] Iteration 22300, lr = 0.5575\nI0819 03:47:07.826063 22726 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0819 03:48:32.307469 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87896\nI0819 03:48:32.307762 22726 solver.cpp:404]     Test net output #1: loss = 0.477803 (* 1 = 0.477803 loss)\nI0819 03:48:33.638900 22726 solver.cpp:228] Iteration 22400, loss = 0.084071\nI0819 03:48:33.638943 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:48:33.638960 22726 solver.cpp:244]     Train net output #1: loss = 0.0840708 (* 1 = 0.0840708 loss)\nI0819 03:48:33.714614 22726 sgd_solver.cpp:166] Iteration 22400, lr = 0.56\nI0819 03:50:51.288424 22726 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0819 03:52:15.769929 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0819 03:52:15.770216 22726 solver.cpp:404]     Test net output #1: loss = 0.464964 (* 1 = 0.464964 loss)\nI0819 03:52:17.100793 22726 solver.cpp:228] Iteration 22500, loss = 0.050832\nI0819 03:52:17.100838 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:52:17.100854 22726 solver.cpp:244]     Train net output #1: loss = 0.0508319 (* 1 = 0.0508319 loss)\nI0819 03:52:17.190088 22726 sgd_solver.cpp:166] Iteration 22500, lr = 0.5625\nI0819 03:54:34.881371 22726 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0819 03:55:59.368935 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87708\nI0819 03:55:59.369199 22726 solver.cpp:404]     Test net output #1: loss = 0.484669 (* 1 = 0.484669 loss)\nI0819 03:56:00.699885 22726 solver.cpp:228] Iteration 22600, loss = 0.0353315\nI0819 03:56:00.699930 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:56:00.699954 22726 solver.cpp:244]     Train net output #1: loss = 0.0353313 (* 1 = 0.0353313 loss)\nI0819 03:56:00.782331 22726 sgd_solver.cpp:166] Iteration 22600, lr = 0.565\nI0819 03:58:18.287509 22726 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0819 03:59:42.776525 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8798\nI0819 03:59:42.776792 22726 solver.cpp:404]     Test net output #1: loss = 0.474579 (* 1 = 0.474579 loss)\nI0819 03:59:44.107669 22726 solver.cpp:228] Iteration 22700, loss = 0.0356973\nI0819 03:59:44.107712 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:59:44.107728 22726 solver.cpp:244]     Train net output #1: loss = 0.0356972 (* 1 = 0.0356972 loss)\nI0819 03:59:44.184329 22726 sgd_solver.cpp:166] Iteration 22700, lr = 0.5675\nI0819 04:02:01.669769 22726 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0819 04:03:26.157282 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88308\nI0819 04:03:26.157572 22726 solver.cpp:404]     Test net output #1: loss = 0.468605 (* 1 = 0.468605 loss)\nI0819 04:03:27.488450 22726 solver.cpp:228] Iteration 22800, loss = 0.0341361\nI0819 04:03:27.488492 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 04:03:27.488509 22726 solver.cpp:244]     Train net output #1: loss = 0.0341359 (* 1 = 0.0341359 loss)\nI0819 04:03:27.570318 22726 sgd_solver.cpp:166] Iteration 22800, lr = 0.57\nI0819 04:05:45.093235 22726 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0819 04:07:09.571548 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87588\nI0819 04:07:09.571841 22726 solver.cpp:404]     Test net output #1: loss = 0.4874 (* 1 = 0.4874 loss)\nI0819 04:07:10.903285 22726 solver.cpp:228] Iteration 22900, loss = 0.0592216\nI0819 04:07:10.903319 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:07:10.903334 22726 solver.cpp:244]     Train net output #1: loss = 0.0592215 (* 1 = 0.0592215 loss)\nI0819 04:07:10.982668 22726 sgd_solver.cpp:166] Iteration 22900, lr = 0.5725\nI0819 04:09:28.573071 22726 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0819 04:10:53.050364 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88564\nI0819 04:10:53.050653 22726 solver.cpp:404]     Test net output #1: loss = 0.469927 (* 1 = 0.469927 loss)\nI0819 04:10:54.381106 22726 solver.cpp:228] Iteration 23000, loss = 0.0871131\nI0819 04:10:54.381148 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:10:54.381165 22726 solver.cpp:244]     Train net output #1: loss = 0.087113 (* 1 = 0.087113 loss)\nI0819 04:10:54.459182 22726 sgd_solver.cpp:166] Iteration 23000, lr = 0.575\nI0819 04:13:12.028575 22726 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0819 04:14:36.512460 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87376\nI0819 04:14:36.512749 22726 solver.cpp:404]     Test net output #1: loss = 0.487162 (* 1 = 0.487162 loss)\nI0819 04:14:37.843852 22726 solver.cpp:228] Iteration 23100, loss = 0.236982\nI0819 04:14:37.843899 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 04:14:37.843915 22726 solver.cpp:244]     Train net output #1: loss = 0.236982 (* 1 = 0.236982 loss)\nI0819 04:14:37.925348 22726 sgd_solver.cpp:166] Iteration 23100, lr = 0.5775\nI0819 04:16:55.429299 22726 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0819 04:18:19.914960 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87876\nI0819 04:18:19.915256 22726 solver.cpp:404]     Test net output #1: loss = 0.492639 (* 1 = 0.492639 loss)\nI0819 04:18:21.245857 22726 solver.cpp:228] Iteration 23200, loss = 0.0440318\nI0819 04:18:21.245901 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:18:21.245918 22726 solver.cpp:244]     Train net output #1: loss = 0.0440317 (* 1 = 0.0440317 loss)\nI0819 04:18:21.325213 22726 sgd_solver.cpp:166] Iteration 23200, lr = 0.58\nI0819 04:20:38.933439 22726 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0819 04:22:03.390478 22726 solver.cpp:404]     Test net output #0: accuracy = 0.882\nI0819 04:22:03.390745 22726 solver.cpp:404]     Test net output #1: loss = 0.461311 (* 1 = 0.461311 loss)\nI0819 04:22:04.721233 22726 solver.cpp:228] Iteration 23300, loss = 0.0257344\nI0819 04:22:04.721273 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 04:22:04.721289 22726 solver.cpp:244]     Train net output #1: loss = 0.0257342 (* 1 = 0.0257342 loss)\nI0819 04:22:04.798672 22726 sgd_solver.cpp:166] Iteration 23300, lr = 0.5825\nI0819 04:24:22.435708 22726 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0819 04:25:46.892371 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0819 04:25:46.892665 22726 solver.cpp:404]     Test net output #1: loss = 0.489395 (* 1 = 0.489395 loss)\nI0819 04:25:48.224138 22726 solver.cpp:228] Iteration 23400, loss = 0.0714437\nI0819 04:25:48.224179 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:25:48.224195 22726 solver.cpp:244]     Train net output #1: loss = 0.0714436 (* 1 = 0.0714436 loss)\nI0819 04:25:48.303414 22726 sgd_solver.cpp:166] Iteration 23400, lr = 0.585\nI0819 04:28:05.857091 22726 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0819 04:29:30.312263 22726 solver.cpp:404]     Test net output #0: accuracy = 0.883\nI0819 04:29:30.312556 22726 solver.cpp:404]     Test net output #1: loss = 0.45823 (* 1 = 0.45823 loss)\nI0819 04:29:31.642931 22726 solver.cpp:228] Iteration 23500, loss = 0.0363407\nI0819 04:29:31.642971 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 04:29:31.642987 22726 solver.cpp:244]     Train net output #1: loss = 0.0363406 (* 1 = 0.0363406 loss)\nI0819 04:29:31.723845 22726 sgd_solver.cpp:166] Iteration 23500, lr = 0.5875\nI0819 04:31:49.322610 22726 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0819 04:33:13.777420 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0819 04:33:13.777707 22726 solver.cpp:404]     Test net output #1: loss = 0.463813 (* 1 = 0.463813 loss)\nI0819 04:33:15.108806 22726 solver.cpp:228] Iteration 23600, loss = 0.0599348\nI0819 04:33:15.108844 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 04:33:15.108860 22726 solver.cpp:244]     Train net output #1: loss = 0.0599346 (* 1 = 0.0599346 loss)\nI0819 04:33:15.189540 22726 sgd_solver.cpp:166] Iteration 23600, lr = 0.59\nI0819 04:35:32.780957 22726 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0819 04:36:57.248723 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88116\nI0819 04:36:57.248992 22726 solver.cpp:404]     Test net output #1: loss = 0.4589 (* 1 = 0.4589 loss)\nI0819 04:36:58.579576 22726 solver.cpp:228] Iteration 23700, loss = 0.0778266\nI0819 04:36:58.579617 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:36:58.579633 22726 solver.cpp:244]     Train net output #1: loss = 0.0778265 (* 1 = 0.0778265 loss)\nI0819 04:36:58.657552 22726 sgd_solver.cpp:166] Iteration 23700, lr = 0.5925\nI0819 04:39:16.246486 22726 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0819 04:40:40.705482 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88076\nI0819 04:40:40.705775 22726 solver.cpp:404]     Test net output #1: loss = 0.459125 (* 1 = 0.459125 loss)\nI0819 04:40:42.036780 22726 solver.cpp:228] Iteration 23800, loss = 0.0749915\nI0819 04:40:42.036823 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:40:42.036840 22726 solver.cpp:244]     Train net output #1: loss = 0.0749913 (* 1 = 0.0749913 loss)\nI0819 04:40:42.114086 22726 sgd_solver.cpp:166] Iteration 23800, lr = 0.595\nI0819 04:42:59.732697 22726 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0819 04:44:24.194689 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87848\nI0819 04:44:24.194980 22726 solver.cpp:404]     Test net output #1: loss = 0.493283 (* 1 = 0.493283 loss)\nI0819 04:44:25.526355 22726 solver.cpp:228] Iteration 23900, loss = 0.0936261\nI0819 04:44:25.526398 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:44:25.526414 22726 solver.cpp:244]     Train net output #1: loss = 0.0936259 (* 1 = 0.0936259 loss)\nI0819 04:44:25.610347 22726 sgd_solver.cpp:166] Iteration 23900, lr = 0.5975\nI0819 04:46:43.259791 22726 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0819 04:48:07.716704 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87708\nI0819 04:48:07.716996 22726 solver.cpp:404]     Test net output #1: loss = 0.477583 (* 1 = 0.477583 loss)\nI0819 04:48:09.047652 22726 solver.cpp:228] Iteration 24000, loss = 0.02903\nI0819 04:48:09.047696 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:48:09.047713 22726 solver.cpp:244]     Train net output #1: loss = 0.0290299 (* 1 = 0.0290299 loss)\nI0819 04:48:09.130548 22726 sgd_solver.cpp:166] Iteration 24000, lr = 0.6\nI0819 04:50:26.689924 22726 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0819 04:51:51.146947 22726 solver.cpp:404]     Test net output #0: accuracy = 0.877\nI0819 04:51:51.147238 22726 solver.cpp:404]     Test net output #1: loss = 0.488176 (* 1 = 0.488176 loss)\nI0819 04:51:52.477867 22726 solver.cpp:228] Iteration 24100, loss = 0.0615974\nI0819 04:51:52.477916 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 04:51:52.477932 22726 solver.cpp:244]     Train net output #1: loss = 0.0615973 (* 1 = 0.0615973 loss)\nI0819 04:51:52.559157 22726 sgd_solver.cpp:166] Iteration 24100, lr = 0.6025\nI0819 04:54:10.214411 22726 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0819 04:55:34.687310 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8744\nI0819 04:55:34.687607 22726 solver.cpp:404]     Test net output #1: loss = 0.485464 (* 1 = 0.485464 loss)\nI0819 04:55:36.018260 22726 solver.cpp:228] Iteration 24200, loss = 0.0321902\nI0819 04:55:36.018306 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 04:55:36.018321 22726 solver.cpp:244]     Train net output #1: loss = 0.0321901 (* 1 = 0.0321901 loss)\nI0819 04:55:36.102149 22726 sgd_solver.cpp:166] Iteration 24200, lr = 0.605\nI0819 04:57:53.586674 22726 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0819 04:59:18.073104 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88084\nI0819 04:59:18.073405 22726 solver.cpp:404]     Test net output #1: loss = 0.474353 (* 1 = 0.474353 loss)\nI0819 04:59:19.404537 22726 solver.cpp:228] Iteration 24300, loss = 0.0832481\nI0819 04:59:19.404579 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:59:19.404595 22726 solver.cpp:244]     Train net output #1: loss = 0.083248 (* 1 = 0.083248 loss)\nI0819 04:59:19.479970 22726 sgd_solver.cpp:166] Iteration 24300, lr = 0.6075\nI0819 05:01:37.027858 22726 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0819 05:03:01.516618 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87952\nI0819 05:03:01.516916 22726 solver.cpp:404]     Test net output #1: loss = 0.463567 (* 1 = 0.463567 loss)\nI0819 05:03:02.847942 22726 solver.cpp:228] Iteration 24400, loss = 0.0514444\nI0819 05:03:02.847983 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:03:02.848000 22726 solver.cpp:244]     Train net output #1: loss = 0.0514442 (* 1 = 0.0514442 loss)\nI0819 05:03:02.924166 22726 sgd_solver.cpp:166] Iteration 24400, lr = 0.61\nI0819 05:05:20.586771 22726 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0819 05:06:45.067095 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8752\nI0819 05:06:45.067385 22726 solver.cpp:404]     Test net output #1: loss = 0.479359 (* 1 = 0.479359 loss)\nI0819 05:06:46.397573 22726 solver.cpp:228] Iteration 24500, loss = 0.0662786\nI0819 05:06:46.397614 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:06:46.397630 22726 solver.cpp:244]     Train net output #1: loss = 0.0662785 (* 1 = 0.0662785 loss)\nI0819 05:06:46.476166 22726 sgd_solver.cpp:166] Iteration 24500, lr = 0.6125\nI0819 05:09:04.052575 22726 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0819 05:10:28.537678 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8824\nI0819 05:10:28.537981 22726 solver.cpp:404]     Test net output #1: loss = 0.457576 (* 1 = 0.457576 loss)\nI0819 05:10:29.869333 22726 solver.cpp:228] Iteration 24600, loss = 0.0652849\nI0819 05:10:29.869374 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:10:29.869390 22726 solver.cpp:244]     Train net output #1: loss = 0.0652847 (* 1 = 0.0652847 loss)\nI0819 05:10:29.950007 22726 sgd_solver.cpp:166] Iteration 24600, lr = 0.615\nI0819 05:12:47.552130 22726 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0819 05:14:12.038257 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8812\nI0819 05:14:12.038545 22726 solver.cpp:404]     Test net output #1: loss = 0.495876 (* 1 = 0.495876 loss)\nI0819 05:14:13.369097 22726 solver.cpp:228] Iteration 24700, loss = 0.0468816\nI0819 05:14:13.369138 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:14:13.369153 22726 solver.cpp:244]     Train net output #1: loss = 0.0468815 (* 1 = 0.0468815 loss)\nI0819 05:14:13.448947 22726 sgd_solver.cpp:166] Iteration 24700, lr = 0.6175\nI0819 05:16:31.023110 22726 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0819 05:17:55.518759 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8794\nI0819 05:17:55.519048 22726 solver.cpp:404]     Test net output #1: loss = 0.491293 (* 1 = 0.491293 loss)\nI0819 05:17:56.850028 22726 solver.cpp:228] Iteration 24800, loss = 0.0799649\nI0819 05:17:56.850067 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:17:56.850082 22726 solver.cpp:244]     Train net output #1: loss = 0.0799647 (* 1 = 0.0799647 loss)\nI0819 05:17:56.926234 22726 sgd_solver.cpp:166] Iteration 24800, lr = 0.62\nI0819 05:20:14.481108 22726 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0819 05:21:38.969002 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88016\nI0819 05:21:38.969280 22726 solver.cpp:404]     Test net output #1: loss = 0.468315 (* 1 = 0.468315 loss)\nI0819 05:21:40.299995 22726 solver.cpp:228] Iteration 24900, loss = 0.047255\nI0819 05:21:40.300035 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:21:40.300051 22726 solver.cpp:244]     Train net output #1: loss = 0.0472548 (* 1 = 0.0472548 loss)\nI0819 05:21:40.375252 22726 sgd_solver.cpp:166] Iteration 24900, lr = 0.6225\nI0819 05:23:57.917480 22726 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0819 05:25:22.399060 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8824\nI0819 05:25:22.399361 22726 solver.cpp:404]     Test net output #1: loss = 0.454587 (* 1 = 0.454587 loss)\nI0819 05:25:23.729856 22726 solver.cpp:228] Iteration 25000, loss = 0.138741\nI0819 05:25:23.729900 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 05:25:23.729917 22726 solver.cpp:244]     Train net output #1: loss = 0.138741 (* 1 = 0.138741 loss)\nI0819 05:25:23.808523 22726 sgd_solver.cpp:166] Iteration 25000, lr = 0.625\nI0819 05:27:41.322656 22726 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0819 05:29:05.808116 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87844\nI0819 05:29:05.808405 22726 solver.cpp:404]     Test net output #1: loss = 0.464759 (* 1 = 0.464759 loss)\nI0819 05:29:07.138952 22726 solver.cpp:228] Iteration 25100, loss = 0.0737811\nI0819 05:29:07.138993 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:29:07.139008 22726 solver.cpp:244]     Train net output #1: loss = 0.0737809 (* 1 = 0.0737809 loss)\nI0819 05:29:07.215884 22726 sgd_solver.cpp:166] Iteration 25100, lr = 0.6275\nI0819 05:31:24.780077 22726 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0819 05:32:49.269105 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88192\nI0819 05:32:49.269376 22726 solver.cpp:404]     Test net output #1: loss = 0.442422 (* 1 = 0.442422 loss)\nI0819 05:32:50.600419 22726 solver.cpp:228] Iteration 25200, loss = 0.0710785\nI0819 05:32:50.600458 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:32:50.600474 22726 solver.cpp:244]     Train net output #1: loss = 0.0710783 (* 1 = 0.0710783 loss)\nI0819 05:32:50.684945 22726 sgd_solver.cpp:166] Iteration 25200, lr = 0.63\nI0819 05:35:08.285670 22726 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0819 05:36:32.773685 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88112\nI0819 05:36:32.773980 22726 solver.cpp:404]     Test net output #1: loss = 0.462586 (* 1 = 0.462586 loss)\nI0819 05:36:34.104174 22726 solver.cpp:228] Iteration 25300, loss = 0.093051\nI0819 05:36:34.104215 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:36:34.104231 22726 solver.cpp:244]     Train net output #1: loss = 0.0930508 (* 1 = 0.0930508 loss)\nI0819 05:36:34.184005 22726 sgd_solver.cpp:166] Iteration 25300, lr = 0.6325\nI0819 05:38:51.757361 22726 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0819 05:40:16.239111 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87704\nI0819 05:40:16.239404 22726 solver.cpp:404]     Test net output #1: loss = 0.486293 (* 1 = 0.486293 loss)\nI0819 05:40:17.570716 22726 solver.cpp:228] Iteration 25400, loss = 0.0809407\nI0819 05:40:17.570757 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:40:17.570773 22726 solver.cpp:244]     Train net output #1: loss = 0.0809406 (* 1 = 0.0809406 loss)\nI0819 05:40:17.647682 22726 sgd_solver.cpp:166] Iteration 25400, lr = 0.635\nI0819 05:42:35.139672 22726 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0819 05:43:59.625900 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8758\nI0819 05:43:59.626185 22726 solver.cpp:404]     Test net output #1: loss = 0.478881 (* 1 = 0.478881 loss)\nI0819 05:44:00.956594 22726 solver.cpp:228] Iteration 25500, loss = 0.0951998\nI0819 05:44:00.956634 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:44:00.956650 22726 solver.cpp:244]     Train net output #1: loss = 0.0951996 (* 1 = 0.0951996 loss)\nI0819 05:44:01.036783 22726 sgd_solver.cpp:166] Iteration 25500, lr = 0.6375\nI0819 05:46:18.649487 22726 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0819 05:47:43.127486 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88296\nI0819 05:47:43.127780 22726 solver.cpp:404]     Test net output #1: loss = 0.441769 (* 1 = 0.441769 loss)\nI0819 05:47:44.458873 22726 solver.cpp:228] Iteration 25600, loss = 0.0295325\nI0819 05:47:44.458919 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:47:44.458935 22726 solver.cpp:244]     Train net output #1: loss = 0.0295323 (* 1 = 0.0295323 loss)\nI0819 05:47:44.545380 22726 sgd_solver.cpp:166] Iteration 25600, lr = 0.64\nI0819 05:50:02.233638 22726 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0819 05:51:26.723971 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88592\nI0819 05:51:26.724261 22726 solver.cpp:404]     Test net output #1: loss = 0.447057 (* 1 = 0.447057 loss)\nI0819 05:51:28.055510 22726 solver.cpp:228] Iteration 25700, loss = 0.0502407\nI0819 05:51:28.055549 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:51:28.055564 22726 solver.cpp:244]     Train net output #1: loss = 0.0502405 (* 1 = 0.0502405 loss)\nI0819 05:51:28.139192 22726 sgd_solver.cpp:166] Iteration 25700, lr = 0.6425\nI0819 05:53:45.726301 22726 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0819 05:55:10.215291 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87864\nI0819 05:55:10.215560 22726 solver.cpp:404]     Test net output #1: loss = 0.468352 (* 1 = 0.468352 loss)\nI0819 05:55:11.545909 22726 solver.cpp:228] Iteration 25800, loss = 0.0867184\nI0819 05:55:11.545951 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:55:11.545967 22726 solver.cpp:244]     Train net output #1: loss = 0.0867182 (* 1 = 0.0867182 loss)\nI0819 05:55:11.627460 22726 sgd_solver.cpp:166] Iteration 25800, lr = 0.645\nI0819 05:57:29.180927 22726 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0819 05:58:53.668370 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88208\nI0819 05:58:53.668653 22726 solver.cpp:404]     Test net output #1: loss = 0.458939 (* 1 = 0.458939 loss)\nI0819 05:58:54.999531 22726 solver.cpp:228] Iteration 25900, loss = 0.0352016\nI0819 05:58:54.999573 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:58:54.999588 22726 solver.cpp:244]     Train net output #1: loss = 0.0352015 (* 1 = 0.0352015 loss)\nI0819 05:58:55.080618 22726 sgd_solver.cpp:166] Iteration 25900, lr = 0.6475\nI0819 06:01:12.673604 22726 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0819 06:02:37.162083 22726 solver.cpp:404]     Test net output #0: accuracy = 0.881\nI0819 06:02:37.162384 22726 solver.cpp:404]     Test net output #1: loss = 0.451584 (* 1 = 0.451584 loss)\nI0819 06:02:38.492838 22726 solver.cpp:228] Iteration 26000, loss = 0.0492581\nI0819 06:02:38.492880 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 06:02:38.492902 22726 solver.cpp:244]     Train net output #1: loss = 0.0492579 (* 1 = 0.0492579 loss)\nI0819 06:02:38.568439 22726 sgd_solver.cpp:166] Iteration 26000, lr = 0.65\nI0819 06:04:56.127777 22726 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0819 06:06:20.607095 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88412\nI0819 06:06:20.607375 22726 solver.cpp:404]     Test net output #1: loss = 0.44984 (* 1 = 0.44984 loss)\nI0819 06:06:21.938021 22726 solver.cpp:228] Iteration 26100, loss = 0.119083\nI0819 06:06:21.938064 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:06:21.938081 22726 solver.cpp:244]     Train net output #1: loss = 0.119083 (* 1 = 0.119083 loss)\nI0819 06:06:22.022622 22726 sgd_solver.cpp:166] Iteration 26100, lr = 0.6525\nI0819 06:08:39.573710 22726 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0819 06:10:04.064144 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0819 06:10:04.064414 22726 solver.cpp:404]     Test net output #1: loss = 0.453514 (* 1 = 0.453514 loss)\nI0819 06:10:05.395072 22726 solver.cpp:228] Iteration 26200, loss = 0.0823812\nI0819 06:10:05.395114 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 06:10:05.395130 22726 solver.cpp:244]     Train net output #1: loss = 0.082381 (* 1 = 0.082381 loss)\nI0819 06:10:05.478298 22726 sgd_solver.cpp:166] Iteration 26200, lr = 0.655\nI0819 06:12:22.968006 22726 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0819 06:13:47.456609 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87752\nI0819 06:13:47.456884 22726 solver.cpp:404]     Test net output #1: loss = 0.467114 (* 1 = 0.467114 loss)\nI0819 06:13:48.787492 22726 solver.cpp:228] Iteration 26300, loss = 0.120645\nI0819 06:13:48.787533 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 06:13:48.787547 22726 solver.cpp:244]     Train net output #1: loss = 0.120645 (* 1 = 0.120645 loss)\nI0819 06:13:48.877843 22726 sgd_solver.cpp:166] Iteration 26300, lr = 0.6575\nI0819 06:16:06.456817 22726 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0819 06:17:30.942454 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87648\nI0819 06:17:30.942745 22726 solver.cpp:404]     Test net output #1: loss = 0.469825 (* 1 = 0.469825 loss)\nI0819 06:17:32.273207 22726 solver.cpp:228] Iteration 26400, loss = 0.0574865\nI0819 06:17:32.273248 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:17:32.273263 22726 solver.cpp:244]     Train net output #1: loss = 0.0574863 (* 1 = 0.0574863 loss)\nI0819 06:17:32.355197 22726 sgd_solver.cpp:166] Iteration 26400, lr = 0.66\nI0819 06:19:49.918382 22726 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0819 06:21:14.405526 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88456\nI0819 06:21:14.405827 22726 solver.cpp:404]     Test net output #1: loss = 0.459103 (* 1 = 0.459103 loss)\nI0819 06:21:15.736326 22726 solver.cpp:228] Iteration 26500, loss = 0.132251\nI0819 06:21:15.736368 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 06:21:15.736383 22726 solver.cpp:244]     Train net output #1: loss = 0.132251 (* 1 = 0.132251 loss)\nI0819 06:21:15.817337 22726 sgd_solver.cpp:166] Iteration 26500, lr = 0.6625\nI0819 06:23:33.358072 22726 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0819 06:24:57.848551 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88068\nI0819 06:24:57.848847 22726 solver.cpp:404]     Test net output #1: loss = 0.445853 (* 1 = 0.445853 loss)\nI0819 06:24:59.179903 22726 solver.cpp:228] Iteration 26600, loss = 0.113274\nI0819 06:24:59.179944 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 06:24:59.179960 22726 solver.cpp:244]     Train net output #1: loss = 0.113274 (* 1 = 0.113274 loss)\nI0819 06:24:59.260092 22726 sgd_solver.cpp:166] Iteration 26600, lr = 0.665\nI0819 06:27:16.864543 22726 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0819 06:28:41.349993 22726 solver.cpp:404]     Test net output #0: accuracy = 0.882001\nI0819 06:28:41.350288 22726 solver.cpp:404]     Test net output #1: loss = 0.450391 (* 1 = 0.450391 loss)\nI0819 06:28:42.681252 22726 solver.cpp:228] Iteration 26700, loss = 0.0408975\nI0819 06:28:42.681296 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:28:42.681313 22726 solver.cpp:244]     Train net output #1: loss = 0.0408974 (* 1 = 0.0408974 loss)\nI0819 06:28:42.756858 22726 sgd_solver.cpp:166] Iteration 26700, lr = 0.6675\nI0819 06:31:00.289816 22726 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0819 06:32:24.776046 22726 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 06:32:24.776357 22726 solver.cpp:404]     Test net output #1: loss = 0.466892 (* 1 = 0.466892 loss)\nI0819 06:32:26.107376 22726 solver.cpp:228] Iteration 26800, loss = 0.0690098\nI0819 06:32:26.107419 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:32:26.107435 22726 solver.cpp:244]     Train net output #1: loss = 0.0690097 (* 1 = 0.0690097 loss)\nI0819 06:32:26.186947 22726 sgd_solver.cpp:166] Iteration 26800, lr = 0.67\nI0819 06:34:43.661782 22726 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0819 06:36:08.149242 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88264\nI0819 06:36:08.149536 22726 solver.cpp:404]     Test net output #1: loss = 0.458961 (* 1 = 0.458961 loss)\nI0819 06:36:09.479650 22726 solver.cpp:228] Iteration 26900, loss = 0.0871134\nI0819 06:36:09.479691 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 06:36:09.479707 22726 solver.cpp:244]     Train net output #1: loss = 0.0871133 (* 1 = 0.0871133 loss)\nI0819 06:36:09.559552 22726 sgd_solver.cpp:166] Iteration 26900, lr = 0.6725\nI0819 06:38:27.113730 22726 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0819 06:39:51.573583 22726 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 06:39:51.573880 22726 solver.cpp:404]     Test net output #1: loss = 0.474752 (* 1 = 0.474752 loss)\nI0819 06:39:52.905042 22726 solver.cpp:228] Iteration 27000, loss = 0.0598202\nI0819 06:39:52.905084 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:39:52.905100 22726 solver.cpp:244]     Train net output #1: loss = 0.0598201 (* 1 = 0.0598201 loss)\nI0819 06:39:52.984563 22726 sgd_solver.cpp:166] Iteration 27000, lr = 0.675\nI0819 06:42:10.485963 22726 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0819 06:43:34.928470 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88272\nI0819 06:43:34.928766 22726 solver.cpp:404]     Test net output #1: loss = 0.463085 (* 1 = 0.463085 loss)\nI0819 06:43:36.259395 22726 solver.cpp:228] Iteration 27100, loss = 0.112513\nI0819 06:43:36.259436 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:43:36.259451 22726 solver.cpp:244]     Train net output #1: loss = 0.112513 (* 1 = 0.112513 loss)\nI0819 06:43:36.345286 22726 sgd_solver.cpp:166] Iteration 27100, lr = 0.6775\nI0819 06:45:53.951169 22726 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0819 06:47:18.385004 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0819 06:47:18.385298 22726 solver.cpp:404]     Test net output #1: loss = 0.453018 (* 1 = 0.453018 loss)\nI0819 06:47:19.715742 22726 solver.cpp:228] Iteration 27200, loss = 0.0802916\nI0819 06:47:19.715782 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 06:47:19.715798 22726 solver.cpp:244]     Train net output #1: loss = 0.0802915 (* 1 = 0.0802915 loss)\nI0819 06:47:19.800151 22726 sgd_solver.cpp:166] Iteration 27200, lr = 0.68\nI0819 06:49:37.255568 22726 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0819 06:51:01.682680 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87596\nI0819 06:51:01.682981 22726 solver.cpp:404]     Test net output #1: loss = 0.473708 (* 1 = 0.473708 loss)\nI0819 06:51:03.013392 22726 solver.cpp:228] Iteration 27300, loss = 0.142912\nI0819 06:51:03.013432 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 06:51:03.013447 22726 solver.cpp:244]     Train net output #1: loss = 0.142912 (* 1 = 0.142912 loss)\nI0819 06:51:03.098876 22726 sgd_solver.cpp:166] Iteration 27300, lr = 0.6825\nI0819 06:53:20.636397 22726 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0819 06:54:45.065546 22726 solver.cpp:404]     Test net output #0: accuracy = 0.883001\nI0819 06:54:45.065845 22726 solver.cpp:404]     Test net output #1: loss = 0.459462 (* 1 = 0.459462 loss)\nI0819 06:54:46.396705 22726 solver.cpp:228] Iteration 27400, loss = 0.0770613\nI0819 06:54:46.396739 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:54:46.396754 22726 solver.cpp:244]     Train net output #1: loss = 0.0770612 (* 1 = 0.0770612 loss)\nI0819 06:54:46.481763 22726 sgd_solver.cpp:166] Iteration 27400, lr = 0.685\nI0819 06:57:03.994168 22726 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0819 06:58:28.412155 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0819 06:58:28.412451 22726 solver.cpp:404]     Test net output #1: loss = 0.448715 (* 1 = 0.448715 loss)\nI0819 06:58:29.742729 22726 solver.cpp:228] Iteration 27500, loss = 0.156186\nI0819 06:58:29.742774 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 06:58:29.742790 22726 solver.cpp:244]     Train net output #1: loss = 0.156186 (* 1 = 0.156186 loss)\nI0819 06:58:29.830536 22726 sgd_solver.cpp:166] Iteration 27500, lr = 0.6875\nI0819 07:00:47.338673 22726 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0819 07:02:11.755456 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0819 07:02:11.755756 22726 solver.cpp:404]     Test net output #1: loss = 0.486752 (* 1 = 0.486752 loss)\nI0819 07:02:13.086973 22726 solver.cpp:228] Iteration 27600, loss = 0.11594\nI0819 07:02:13.087018 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:02:13.087033 22726 solver.cpp:244]     Train net output #1: loss = 0.11594 (* 1 = 0.11594 loss)\nI0819 07:02:13.169059 22726 sgd_solver.cpp:166] Iteration 27600, lr = 0.69\nI0819 07:04:30.670137 22726 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0819 07:05:55.092340 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87272\nI0819 07:05:55.092638 22726 solver.cpp:404]     Test net output #1: loss = 0.494312 (* 1 = 0.494312 loss)\nI0819 07:05:56.424507 22726 solver.cpp:228] Iteration 27700, loss = 0.0198901\nI0819 07:05:56.424554 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 07:05:56.424571 22726 solver.cpp:244]     Train net output #1: loss = 0.01989 (* 1 = 0.01989 loss)\nI0819 07:05:56.500816 22726 sgd_solver.cpp:166] Iteration 27700, lr = 0.6925\nI0819 07:08:14.008148 22726 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0819 07:09:38.437664 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87752\nI0819 07:09:38.437961 22726 solver.cpp:404]     Test net output #1: loss = 0.476626 (* 1 = 0.476626 loss)\nI0819 07:09:39.768388 22726 solver.cpp:228] Iteration 27800, loss = 0.107985\nI0819 07:09:39.768432 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:09:39.768448 22726 solver.cpp:244]     Train net output #1: loss = 0.107985 (* 1 = 0.107985 loss)\nI0819 07:09:39.856386 22726 sgd_solver.cpp:166] Iteration 27800, lr = 0.695\nI0819 07:11:57.340117 22726 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0819 07:13:21.791393 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86732\nI0819 07:13:21.791700 22726 solver.cpp:404]     Test net output #1: loss = 0.518662 (* 1 = 0.518662 loss)\nI0819 07:13:23.122964 22726 solver.cpp:228] Iteration 27900, loss = 0.102919\nI0819 07:13:23.123008 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:13:23.123023 22726 solver.cpp:244]     Train net output #1: loss = 0.102919 (* 1 = 0.102919 loss)\nI0819 07:13:23.199403 22726 sgd_solver.cpp:166] Iteration 27900, lr = 0.6975\nI0819 07:15:40.717337 22726 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0819 07:17:05.166455 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87844\nI0819 07:17:05.166731 22726 solver.cpp:404]     Test net output #1: loss = 0.465273 (* 1 = 0.465273 loss)\nI0819 07:17:06.497808 22726 solver.cpp:228] Iteration 28000, loss = 0.0495978\nI0819 07:17:06.497853 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 07:17:06.497870 22726 solver.cpp:244]     Train net output #1: loss = 0.0495977 (* 1 = 0.0495977 loss)\nI0819 07:17:06.580052 22726 sgd_solver.cpp:166] Iteration 28000, lr = 0.7\nI0819 07:19:24.065837 22726 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0819 07:20:48.537274 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8832\nI0819 07:20:48.537576 22726 solver.cpp:404]     Test net output #1: loss = 0.464446 (* 1 = 0.464446 loss)\nI0819 07:20:49.868727 22726 solver.cpp:228] Iteration 28100, loss = 0.0177448\nI0819 07:20:49.868772 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:20:49.868788 22726 solver.cpp:244]     Train net output #1: loss = 0.0177447 (* 1 = 0.0177447 loss)\nI0819 07:20:49.945477 22726 sgd_solver.cpp:166] Iteration 28100, lr = 0.7025\nI0819 07:23:07.427564 22726 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0819 07:24:31.886791 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88204\nI0819 07:24:31.887102 22726 solver.cpp:404]     Test net output #1: loss = 0.439488 (* 1 = 0.439488 loss)\nI0819 07:24:33.218436 22726 solver.cpp:228] Iteration 28200, loss = 0.0941644\nI0819 07:24:33.218482 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:24:33.218497 22726 solver.cpp:244]     Train net output #1: loss = 0.0941643 (* 1 = 0.0941643 loss)\nI0819 07:24:33.300583 22726 sgd_solver.cpp:166] Iteration 28200, lr = 0.705\nI0819 07:26:50.816999 22726 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0819 07:28:15.281016 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87928\nI0819 07:28:15.281321 22726 solver.cpp:404]     Test net output #1: loss = 0.458815 (* 1 = 0.458815 loss)\nI0819 07:28:16.612169 22726 solver.cpp:228] Iteration 28300, loss = 0.0681918\nI0819 07:28:16.612215 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:28:16.612231 22726 solver.cpp:244]     Train net output #1: loss = 0.0681917 (* 1 = 0.0681917 loss)\nI0819 07:28:16.686738 22726 sgd_solver.cpp:166] Iteration 28300, lr = 0.7075\nI0819 07:30:34.193379 22726 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0819 07:31:58.662559 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88208\nI0819 07:31:58.662863 22726 solver.cpp:404]     Test net output #1: loss = 0.438144 (* 1 = 0.438144 loss)\nI0819 07:31:59.994540 22726 solver.cpp:228] Iteration 28400, loss = 0.0600953\nI0819 07:31:59.994585 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:31:59.994601 22726 solver.cpp:244]     Train net output #1: loss = 0.0600953 (* 1 = 0.0600953 loss)\nI0819 07:32:00.074731 22726 sgd_solver.cpp:166] Iteration 28400, lr = 0.71\nI0819 07:34:17.655331 22726 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0819 07:35:42.176884 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88276\nI0819 07:35:42.177194 22726 solver.cpp:404]     Test net output #1: loss = 0.451774 (* 1 = 0.451774 loss)\nI0819 07:35:43.508854 22726 solver.cpp:228] Iteration 28500, loss = 0.103162\nI0819 07:35:43.508893 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:35:43.508916 22726 solver.cpp:244]     Train net output #1: loss = 0.103162 (* 1 = 0.103162 loss)\nI0819 07:35:43.588739 22726 sgd_solver.cpp:166] Iteration 28500, lr = 0.7125\nI0819 07:38:01.152281 22726 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0819 07:39:25.750975 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0819 07:39:25.751288 22726 solver.cpp:404]     Test net output #1: loss = 0.448099 (* 1 = 0.448099 loss)\nI0819 07:39:27.082736 22726 solver.cpp:228] Iteration 28600, loss = 0.0450188\nI0819 07:39:27.082772 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:39:27.082797 22726 solver.cpp:244]     Train net output #1: loss = 0.0450186 (* 1 = 0.0450186 loss)\nI0819 07:39:27.155804 22726 sgd_solver.cpp:166] Iteration 28600, lr = 0.715\nI0819 07:41:44.651414 22726 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0819 07:43:09.150485 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86812\nI0819 07:43:09.150789 22726 solver.cpp:404]     Test net output #1: loss = 0.505146 (* 1 = 0.505146 loss)\nI0819 07:43:10.481431 22726 solver.cpp:228] Iteration 28700, loss = 0.156289\nI0819 07:43:10.481465 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 07:43:10.481480 22726 solver.cpp:244]     Train net output #1: loss = 0.156289 (* 1 = 0.156289 loss)\nI0819 07:43:10.565367 22726 sgd_solver.cpp:166] Iteration 28700, lr = 0.7175\nI0819 07:45:28.029793 22726 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0819 07:46:52.498466 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88284\nI0819 07:46:52.498760 22726 solver.cpp:404]     Test net output #1: loss = 0.45542 (* 1 = 0.45542 loss)\nI0819 07:46:53.829105 22726 solver.cpp:228] Iteration 28800, loss = 0.105531\nI0819 07:46:53.829140 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 07:46:53.829155 22726 solver.cpp:244]     Train net output #1: loss = 0.105531 (* 1 = 0.105531 loss)\nI0819 07:46:53.911573 22726 sgd_solver.cpp:166] Iteration 28800, lr = 0.72\nI0819 07:49:11.426317 22726 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0819 07:50:35.892714 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 07:50:35.893023 22726 solver.cpp:404]     Test net output #1: loss = 0.440219 (* 1 = 0.440219 loss)\nI0819 07:50:37.224474 22726 solver.cpp:228] Iteration 28900, loss = 0.0447241\nI0819 07:50:37.224508 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:50:37.224522 22726 solver.cpp:244]     Train net output #1: loss = 0.0447239 (* 1 = 0.0447239 loss)\nI0819 07:50:37.308351 22726 sgd_solver.cpp:166] Iteration 28900, lr = 0.7225\nI0819 07:52:54.776530 22726 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0819 07:54:19.289494 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87692\nI0819 07:54:19.289767 22726 solver.cpp:404]     Test net output #1: loss = 0.471826 (* 1 = 0.471826 loss)\nI0819 07:54:20.621137 22726 solver.cpp:228] Iteration 29000, loss = 0.109903\nI0819 07:54:20.621171 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:54:20.621187 22726 solver.cpp:244]     Train net output #1: loss = 0.109902 (* 1 = 0.109902 loss)\nI0819 07:54:20.695471 22726 sgd_solver.cpp:166] Iteration 29000, lr = 0.725\nI0819 07:56:38.248457 22726 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0819 07:58:02.702216 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88332\nI0819 07:58:02.702515 22726 solver.cpp:404]     Test net output #1: loss = 0.457141 (* 1 = 0.457141 loss)\nI0819 07:58:04.034198 22726 solver.cpp:228] Iteration 29100, loss = 0.0524762\nI0819 07:58:04.034234 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:58:04.034248 22726 solver.cpp:244]     Train net output #1: loss = 0.0524761 (* 1 = 0.0524761 loss)\nI0819 07:58:04.109503 22726 sgd_solver.cpp:166] Iteration 29100, lr = 0.7275\nI0819 08:00:21.610076 22726 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0819 08:01:46.060887 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87976\nI0819 08:01:46.061363 22726 solver.cpp:404]     Test net output #1: loss = 0.461656 (* 1 = 0.461656 loss)\nI0819 08:01:47.391974 22726 solver.cpp:228] Iteration 29200, loss = 0.0488679\nI0819 08:01:47.392009 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 08:01:47.392024 22726 solver.cpp:244]     Train net output #1: loss = 0.0488678 (* 1 = 0.0488678 loss)\nI0819 08:01:47.473618 22726 sgd_solver.cpp:166] Iteration 29200, lr = 0.73\nI0819 08:04:04.987969 22726 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0819 08:05:29.462198 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0819 08:05:29.462502 22726 solver.cpp:404]     Test net output #1: loss = 0.45795 (* 1 = 0.45795 loss)\nI0819 08:05:30.793684 22726 solver.cpp:228] Iteration 29300, loss = 0.0480825\nI0819 08:05:30.793720 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:05:30.793735 22726 solver.cpp:244]     Train net output #1: loss = 0.0480824 (* 1 = 0.0480824 loss)\nI0819 08:05:30.875633 22726 sgd_solver.cpp:166] Iteration 29300, lr = 0.7325\nI0819 08:07:48.326345 22726 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0819 08:09:12.775975 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0819 08:09:12.776284 22726 solver.cpp:404]     Test net output #1: loss = 0.431411 (* 1 = 0.431411 loss)\nI0819 08:09:14.107455 22726 solver.cpp:228] Iteration 29400, loss = 0.0547697\nI0819 08:09:14.107489 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:09:14.107504 22726 solver.cpp:244]     Train net output #1: loss = 0.0547696 (* 1 = 0.0547696 loss)\nI0819 08:09:14.187139 22726 sgd_solver.cpp:166] Iteration 29400, lr = 0.735\nI0819 08:11:31.795578 22726 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0819 08:12:56.252795 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88472\nI0819 08:12:56.253113 22726 solver.cpp:404]     Test net output #1: loss = 0.441078 (* 1 = 0.441078 loss)\nI0819 08:12:57.584944 22726 solver.cpp:228] Iteration 29500, loss = 0.0624006\nI0819 08:12:57.584978 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:12:57.584993 22726 solver.cpp:244]     Train net output #1: loss = 0.0624005 (* 1 = 0.0624005 loss)\nI0819 08:12:57.658684 22726 sgd_solver.cpp:166] Iteration 29500, lr = 0.7375\nI0819 08:15:15.175357 22726 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0819 08:16:39.620723 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88012\nI0819 08:16:39.621037 22726 solver.cpp:404]     Test net output #1: loss = 0.449989 (* 1 = 0.449989 loss)\nI0819 08:16:40.952549 22726 solver.cpp:228] Iteration 29600, loss = 0.0501943\nI0819 08:16:40.952601 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:16:40.952618 22726 solver.cpp:244]     Train net output #1: loss = 0.0501942 (* 1 = 0.0501942 loss)\nI0819 08:16:41.031301 22726 sgd_solver.cpp:166] Iteration 29600, lr = 0.74\nI0819 08:18:58.692787 22726 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0819 08:20:23.130172 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87604\nI0819 08:20:23.130475 22726 solver.cpp:404]     Test net output #1: loss = 0.46689 (* 1 = 0.46689 loss)\nI0819 08:20:24.461685 22726 solver.cpp:228] Iteration 29700, loss = 0.116709\nI0819 08:20:24.461720 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 08:20:24.461736 22726 solver.cpp:244]     Train net output #1: loss = 0.116709 (* 1 = 0.116709 loss)\nI0819 08:20:24.544579 22726 sgd_solver.cpp:166] Iteration 29700, lr = 0.7425\nI0819 08:22:42.038801 22726 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0819 08:24:06.479215 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88648\nI0819 08:24:06.479514 22726 solver.cpp:404]     Test net output #1: loss = 0.446691 (* 1 = 0.446691 loss)\nI0819 08:24:07.810636 22726 solver.cpp:228] Iteration 29800, loss = 0.115177\nI0819 08:24:07.810672 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 08:24:07.810686 22726 solver.cpp:244]     Train net output #1: loss = 0.115177 (* 1 = 0.115177 loss)\nI0819 08:24:07.890632 22726 sgd_solver.cpp:166] Iteration 29800, lr = 0.745\nI0819 08:26:25.461063 22726 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0819 08:27:49.906792 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88492\nI0819 08:27:49.907100 22726 solver.cpp:404]     Test net output #1: loss = 0.435137 (* 1 = 0.435137 loss)\nI0819 08:27:51.237764 22726 solver.cpp:228] Iteration 29900, loss = 0.0910353\nI0819 08:27:51.237799 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:27:51.237814 22726 solver.cpp:244]     Train net output #1: loss = 0.0910352 (* 1 = 0.0910352 loss)\nI0819 08:27:51.318048 22726 sgd_solver.cpp:166] Iteration 29900, lr = 0.7475\nI0819 08:30:08.912600 22726 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0819 08:31:33.342260 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8874\nI0819 08:31:33.342536 22726 solver.cpp:404]     Test net output #1: loss = 0.413032 (* 1 = 0.413032 loss)\nI0819 08:31:34.673228 22726 solver.cpp:228] Iteration 30000, loss = 0.0702202\nI0819 08:31:34.673264 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:31:34.673279 22726 solver.cpp:244]     Train net output #1: loss = 0.0702201 (* 1 = 0.0702201 loss)\nI0819 08:31:34.754127 22726 sgd_solver.cpp:166] Iteration 30000, lr = 0.75\nI0819 08:33:52.545085 22726 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0819 08:35:16.976094 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87968\nI0819 08:35:16.976385 22726 solver.cpp:404]     Test net output #1: loss = 0.448139 (* 1 = 0.448139 loss)\nI0819 08:35:18.306871 22726 solver.cpp:228] Iteration 30100, loss = 0.139183\nI0819 08:35:18.306912 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 08:35:18.306928 22726 solver.cpp:244]     Train net output #1: loss = 0.139183 (* 1 = 0.139183 loss)\nI0819 08:35:18.382403 22726 sgd_solver.cpp:166] Iteration 30100, lr = 0.7525\nI0819 08:37:36.017807 22726 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0819 08:39:00.447352 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88708\nI0819 08:39:00.447650 22726 solver.cpp:404]     Test net output #1: loss = 0.435095 (* 1 = 0.435095 loss)\nI0819 08:39:01.778164 22726 solver.cpp:228] Iteration 30200, loss = 0.10561\nI0819 08:39:01.778199 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 08:39:01.778215 22726 solver.cpp:244]     Train net output #1: loss = 0.10561 (* 1 = 0.10561 loss)\nI0819 08:39:01.855389 22726 sgd_solver.cpp:166] Iteration 30200, lr = 0.755\nI0819 08:41:19.466261 22726 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0819 08:42:43.920336 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87736\nI0819 08:42:43.920637 22726 solver.cpp:404]     Test net output #1: loss = 0.471028 (* 1 = 0.471028 loss)\nI0819 08:42:45.251106 22726 solver.cpp:228] Iteration 30300, loss = 0.106829\nI0819 08:42:45.251142 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 08:42:45.251157 22726 solver.cpp:244]     Train net output #1: loss = 0.106829 (* 1 = 0.106829 loss)\nI0819 08:42:45.335074 22726 sgd_solver.cpp:166] Iteration 30300, lr = 0.7575\nI0819 08:45:02.900081 22726 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0819 08:46:27.346904 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0819 08:46:27.347183 22726 solver.cpp:404]     Test net output #1: loss = 0.444016 (* 1 = 0.444016 loss)\nI0819 08:46:28.678191 22726 solver.cpp:228] Iteration 30400, loss = 0.0793023\nI0819 08:46:28.678228 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:46:28.678242 22726 solver.cpp:244]     Train net output #1: loss = 0.0793022 (* 1 = 0.0793022 loss)\nI0819 08:46:28.755342 22726 sgd_solver.cpp:166] Iteration 30400, lr = 0.76\nI0819 08:48:46.424283 22726 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0819 08:50:10.851583 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86392\nI0819 08:50:10.851867 22726 solver.cpp:404]     Test net output #1: loss = 0.522773 (* 1 = 0.522773 loss)\nI0819 08:50:12.182643 22726 solver.cpp:228] Iteration 30500, loss = 0.202588\nI0819 08:50:12.182679 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 08:50:12.182693 22726 solver.cpp:244]     Train net output #1: loss = 0.202588 (* 1 = 0.202588 loss)\nI0819 08:50:12.265075 22726 sgd_solver.cpp:166] Iteration 30500, lr = 0.7625\nI0819 08:52:30.024327 22726 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0819 08:53:54.454108 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88392\nI0819 08:53:54.454411 22726 solver.cpp:404]     Test net output #1: loss = 0.447723 (* 1 = 0.447723 loss)\nI0819 08:53:55.785562 22726 solver.cpp:228] Iteration 30600, loss = 0.067538\nI0819 08:53:55.785596 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:53:55.785612 22726 solver.cpp:244]     Train net output #1: loss = 0.0675379 (* 1 = 0.0675379 loss)\nI0819 08:53:55.867256 22726 sgd_solver.cpp:166] Iteration 30600, lr = 0.765\nI0819 08:56:13.516572 22726 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0819 08:57:37.901700 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88136\nI0819 08:57:37.901995 22726 solver.cpp:404]     Test net output #1: loss = 0.440899 (* 1 = 0.440899 loss)\nI0819 08:57:39.232271 22726 solver.cpp:228] Iteration 30700, loss = 0.187685\nI0819 08:57:39.232306 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 08:57:39.232321 22726 solver.cpp:244]     Train net output #1: loss = 0.187685 (* 1 = 0.187685 loss)\nI0819 08:57:39.309661 22726 sgd_solver.cpp:166] Iteration 30700, lr = 0.7675\nI0819 08:59:57.043928 22726 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0819 09:01:21.435783 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88564\nI0819 09:01:21.436096 22726 solver.cpp:404]     Test net output #1: loss = 0.422293 (* 1 = 0.422293 loss)\nI0819 09:01:22.766752 22726 solver.cpp:228] Iteration 30800, loss = 0.0701362\nI0819 09:01:22.766788 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:01:22.766803 22726 solver.cpp:244]     Train net output #1: loss = 0.0701361 (* 1 = 0.0701361 loss)\nI0819 09:01:22.849817 22726 sgd_solver.cpp:166] Iteration 30800, lr = 0.77\nI0819 09:03:40.414904 22726 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0819 09:05:04.810963 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0819 09:05:04.811254 22726 solver.cpp:404]     Test net output #1: loss = 0.437515 (* 1 = 0.437515 loss)\nI0819 09:05:06.140928 22726 solver.cpp:228] Iteration 30900, loss = 0.0682294\nI0819 09:05:06.140964 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:05:06.140980 22726 solver.cpp:244]     Train net output #1: loss = 0.0682293 (* 1 = 0.0682293 loss)\nI0819 09:05:06.221652 22726 sgd_solver.cpp:166] Iteration 30900, lr = 0.7725\nI0819 09:07:23.712203 22726 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0819 09:08:48.110756 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88164\nI0819 09:08:48.111068 22726 solver.cpp:404]     Test net output #1: loss = 0.438578 (* 1 = 0.438578 loss)\nI0819 09:08:49.441674 22726 solver.cpp:228] Iteration 31000, loss = 0.122047\nI0819 09:08:49.441709 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 09:08:49.441725 22726 solver.cpp:244]     Train net output #1: loss = 0.122047 (* 1 = 0.122047 loss)\nI0819 09:08:49.518422 22726 sgd_solver.cpp:166] Iteration 31000, lr = 0.775\nI0819 09:11:07.025246 22726 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0819 09:12:31.425832 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0819 09:12:31.426143 22726 solver.cpp:404]     Test net output #1: loss = 0.428616 (* 1 = 0.428616 loss)\nI0819 09:12:32.757129 22726 solver.cpp:228] Iteration 31100, loss = 0.0618621\nI0819 09:12:32.757164 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:12:32.757179 22726 solver.cpp:244]     Train net output #1: loss = 0.0618621 (* 1 = 0.0618621 loss)\nI0819 09:12:32.841480 22726 sgd_solver.cpp:166] Iteration 31100, lr = 0.7775\nI0819 09:14:50.374440 22726 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0819 09:16:14.774495 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88444\nI0819 09:16:14.774791 22726 solver.cpp:404]     Test net output #1: loss = 0.430021 (* 1 = 0.430021 loss)\nI0819 09:16:16.105670 22726 solver.cpp:228] Iteration 31200, loss = 0.0794078\nI0819 09:16:16.105705 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 09:16:16.105720 22726 solver.cpp:244]     Train net output #1: loss = 0.0794078 (* 1 = 0.0794078 loss)\nI0819 09:16:16.184119 22726 sgd_solver.cpp:166] Iteration 31200, lr = 0.78\nI0819 09:18:33.788861 22726 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0819 09:19:58.188262 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88312\nI0819 09:19:58.188544 22726 solver.cpp:404]     Test net output #1: loss = 0.442896 (* 1 = 0.442896 loss)\nI0819 09:19:59.519289 22726 solver.cpp:228] Iteration 31300, loss = 0.0593549\nI0819 09:19:59.519326 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 09:19:59.519341 22726 solver.cpp:244]     Train net output #1: loss = 0.0593549 (* 1 = 0.0593549 loss)\nI0819 09:19:59.598203 22726 sgd_solver.cpp:166] Iteration 31300, lr = 0.7825\nI0819 09:22:17.202491 22726 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0819 09:23:41.595706 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87672\nI0819 09:23:41.596035 22726 solver.cpp:404]     Test net output #1: loss = 0.458991 (* 1 = 0.458991 loss)\nI0819 09:23:42.926709 22726 solver.cpp:228] Iteration 31400, loss = 0.176648\nI0819 09:23:42.926743 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 09:23:42.926759 22726 solver.cpp:244]     Train net output #1: loss = 0.176648 (* 1 = 0.176648 loss)\nI0819 09:23:43.007608 22726 sgd_solver.cpp:166] Iteration 31400, lr = 0.785\nI0819 09:26:00.629330 22726 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0819 09:27:25.022791 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8732\nI0819 09:27:25.023102 22726 solver.cpp:404]     Test net output #1: loss = 0.468902 (* 1 = 0.468902 loss)\nI0819 09:27:26.353657 22726 solver.cpp:228] Iteration 31500, loss = 0.0581237\nI0819 09:27:26.353691 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 09:27:26.353708 22726 solver.cpp:244]     Train net output #1: loss = 0.0581237 (* 1 = 0.0581237 loss)\nI0819 09:27:26.434201 22726 sgd_solver.cpp:166] Iteration 31500, lr = 0.7875\nI0819 09:29:44.031363 22726 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0819 09:31:08.459061 22726 solver.cpp:404]     Test net output #0: accuracy = 0.881681\nI0819 09:31:08.459358 22726 solver.cpp:404]     Test net output #1: loss = 0.444472 (* 1 = 0.444472 loss)\nI0819 09:31:09.790043 22726 solver.cpp:228] Iteration 31600, loss = 0.122762\nI0819 09:31:09.790077 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 09:31:09.790092 22726 solver.cpp:244]     Train net output #1: loss = 0.122762 (* 1 = 0.122762 loss)\nI0819 09:31:09.870307 22726 sgd_solver.cpp:166] Iteration 31600, lr = 0.79\nI0819 09:33:27.480787 22726 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0819 09:34:51.908298 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88444\nI0819 09:34:51.908598 22726 solver.cpp:404]     Test net output #1: loss = 0.423642 (* 1 = 0.423642 loss)\nI0819 09:34:53.239573 22726 solver.cpp:228] Iteration 31700, loss = 0.0663246\nI0819 09:34:53.239608 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:34:53.239622 22726 solver.cpp:244]     Train net output #1: loss = 0.0663246 (* 1 = 0.0663246 loss)\nI0819 09:34:53.314913 22726 sgd_solver.cpp:166] Iteration 31700, lr = 0.7925\nI0819 09:37:10.890817 22726 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0819 09:38:35.317184 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87556\nI0819 09:38:35.317486 22726 solver.cpp:404]     Test net output #1: loss = 0.448354 (* 1 = 0.448354 loss)\nI0819 09:38:36.648483 22726 solver.cpp:228] Iteration 31800, loss = 0.0679408\nI0819 09:38:36.648519 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:38:36.648535 22726 solver.cpp:244]     Train net output #1: loss = 0.0679407 (* 1 = 0.0679407 loss)\nI0819 09:38:36.727701 22726 sgd_solver.cpp:166] Iteration 31800, lr = 0.795\nI0819 09:40:54.302242 22726 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0819 09:42:18.730010 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0819 09:42:18.730314 22726 solver.cpp:404]     Test net output #1: loss = 0.431533 (* 1 = 0.431533 loss)\nI0819 09:42:20.061266 22726 solver.cpp:228] Iteration 31900, loss = 0.0841815\nI0819 09:42:20.061302 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:42:20.061318 22726 solver.cpp:244]     Train net output #1: loss = 0.0841814 (* 1 = 0.0841814 loss)\nI0819 09:42:20.141485 22726 sgd_solver.cpp:166] Iteration 31900, lr = 0.7975\nI0819 09:44:37.698171 22726 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0819 09:46:02.120311 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0819 09:46:02.120604 22726 solver.cpp:404]     Test net output #1: loss = 0.453481 (* 1 = 0.453481 loss)\nI0819 09:46:03.451647 22726 solver.cpp:228] Iteration 32000, loss = 0.174344\nI0819 09:46:03.451683 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 09:46:03.451699 22726 solver.cpp:244]     Train net output #1: loss = 0.174344 (* 1 = 0.174344 loss)\nI0819 09:46:03.533208 22726 sgd_solver.cpp:166] Iteration 32000, lr = 0.8\nI0819 09:48:21.116336 22726 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0819 09:49:45.532085 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0819 09:49:45.532388 22726 solver.cpp:404]     Test net output #1: loss = 0.456768 (* 1 = 0.456768 loss)\nI0819 09:49:46.862479 22726 solver.cpp:228] Iteration 32100, loss = 0.0786766\nI0819 09:49:46.862514 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 09:49:46.862529 22726 solver.cpp:244]     Train net output #1: loss = 0.0786765 (* 1 = 0.0786765 loss)\nI0819 09:49:46.945642 22726 sgd_solver.cpp:166] Iteration 32100, lr = 0.8025\nI0819 09:52:04.558830 22726 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0819 09:53:28.977447 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87716\nI0819 09:53:28.977751 22726 solver.cpp:404]     Test net output #1: loss = 0.469742 (* 1 = 0.469742 loss)\nI0819 09:53:30.308924 22726 solver.cpp:228] Iteration 32200, loss = 0.0861462\nI0819 09:53:30.308959 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:53:30.308975 22726 solver.cpp:244]     Train net output #1: loss = 0.0861461 (* 1 = 0.0861461 loss)\nI0819 09:53:30.391372 22726 sgd_solver.cpp:166] Iteration 32200, lr = 0.805\nI0819 09:55:48.034144 22726 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0819 09:57:12.453557 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0819 09:57:12.453857 22726 solver.cpp:404]     Test net output #1: loss = 0.41867 (* 1 = 0.41867 loss)\nI0819 09:57:13.785284 22726 solver.cpp:228] Iteration 32300, loss = 0.0508906\nI0819 09:57:13.785317 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:57:13.785333 22726 solver.cpp:244]     Train net output #1: loss = 0.0508904 (* 1 = 0.0508904 loss)\nI0819 09:57:13.864079 22726 sgd_solver.cpp:166] Iteration 32300, lr = 0.8075\nI0819 09:59:31.485033 22726 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0819 10:00:55.958323 22726 solver.cpp:404]     Test net output #0: accuracy = 0.886\nI0819 10:00:55.958626 22726 solver.cpp:404]     Test net output #1: loss = 0.425182 (* 1 = 0.425182 loss)\nI0819 10:00:57.290150 22726 solver.cpp:228] Iteration 32400, loss = 0.127637\nI0819 10:00:57.290189 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:00:57.290211 22726 solver.cpp:244]     Train net output #1: loss = 0.127637 (* 1 = 0.127637 loss)\nI0819 10:00:57.372195 22726 sgd_solver.cpp:166] Iteration 32400, lr = 0.81\nI0819 10:03:14.964088 22726 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0819 10:04:39.472631 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87736\nI0819 10:04:39.472915 22726 solver.cpp:404]     Test net output #1: loss = 0.458994 (* 1 = 0.458994 loss)\nI0819 10:04:40.804404 22726 solver.cpp:228] Iteration 32500, loss = 0.131199\nI0819 10:04:40.804443 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 10:04:40.804466 22726 solver.cpp:244]     Train net output #1: loss = 0.131199 (* 1 = 0.131199 loss)\nI0819 10:04:40.880033 22726 sgd_solver.cpp:166] Iteration 32500, lr = 0.8125\nI0819 10:06:58.487534 22726 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0819 10:08:22.993618 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87876\nI0819 10:08:22.993911 22726 solver.cpp:404]     Test net output #1: loss = 0.447293 (* 1 = 0.447293 loss)\nI0819 10:08:24.325448 22726 solver.cpp:228] Iteration 32600, loss = 0.0982114\nI0819 10:08:24.325487 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:08:24.325511 22726 solver.cpp:244]     Train net output #1: loss = 0.0982113 (* 1 = 0.0982113 loss)\nI0819 10:08:24.409907 22726 sgd_solver.cpp:166] Iteration 32600, lr = 0.815\nI0819 10:10:41.915285 22726 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0819 10:12:06.466159 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87712\nI0819 10:12:06.466460 22726 solver.cpp:404]     Test net output #1: loss = 0.471629 (* 1 = 0.471629 loss)\nI0819 10:12:07.798305 22726 solver.cpp:228] Iteration 32700, loss = 0.0407355\nI0819 10:12:07.798343 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 10:12:07.798367 22726 solver.cpp:244]     Train net output #1: loss = 0.0407355 (* 1 = 0.0407355 loss)\nI0819 10:12:07.876168 22726 sgd_solver.cpp:166] Iteration 32700, lr = 0.8175\nI0819 10:14:25.482998 22726 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0819 10:15:49.986681 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87844\nI0819 10:15:49.986981 22726 solver.cpp:404]     Test net output #1: loss = 0.447842 (* 1 = 0.447842 loss)\nI0819 10:15:51.318658 22726 solver.cpp:228] Iteration 32800, loss = 0.0934006\nI0819 10:15:51.318696 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:15:51.318718 22726 solver.cpp:244]     Train net output #1: loss = 0.0934005 (* 1 = 0.0934005 loss)\nI0819 10:15:51.396833 22726 sgd_solver.cpp:166] Iteration 32800, lr = 0.82\nI0819 10:18:08.914808 22726 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0819 10:19:33.351028 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8816\nI0819 10:19:33.351346 22726 solver.cpp:404]     Test net output #1: loss = 0.441188 (* 1 = 0.441188 loss)\nI0819 10:19:34.682919 22726 solver.cpp:228] Iteration 32900, loss = 0.127571\nI0819 10:19:34.682955 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:19:34.682978 22726 solver.cpp:244]     Train net output #1: loss = 0.127571 (* 1 = 0.127571 loss)\nI0819 10:19:34.763181 22726 sgd_solver.cpp:166] Iteration 32900, lr = 0.8225\nI0819 10:21:52.254976 22726 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0819 10:23:16.744729 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88116\nI0819 10:23:16.745039 22726 solver.cpp:404]     Test net output #1: loss = 0.451015 (* 1 = 0.451015 loss)\nI0819 10:23:18.075876 22726 solver.cpp:228] Iteration 33000, loss = 0.0399886\nI0819 10:23:18.075911 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 10:23:18.075935 22726 solver.cpp:244]     Train net output #1: loss = 0.0399885 (* 1 = 0.0399885 loss)\nI0819 10:23:18.158650 22726 sgd_solver.cpp:166] Iteration 33000, lr = 0.825\nI0819 10:25:35.756805 22726 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0819 10:27:00.227275 22726 solver.cpp:404]     Test net output #0: accuracy = 0.880321\nI0819 10:27:00.227584 22726 solver.cpp:404]     Test net output #1: loss = 0.439966 (* 1 = 0.439966 loss)\nI0819 10:27:01.559412 22726 solver.cpp:228] Iteration 33100, loss = 0.129761\nI0819 10:27:01.559449 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:27:01.559471 22726 solver.cpp:244]     Train net output #1: loss = 0.129761 (* 1 = 0.129761 loss)\nI0819 10:27:01.634296 22726 sgd_solver.cpp:166] Iteration 33100, lr = 0.8275\nI0819 10:29:19.259829 22726 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0819 10:30:43.741117 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88532\nI0819 10:30:43.741430 22726 solver.cpp:404]     Test net output #1: loss = 0.421979 (* 1 = 0.421979 loss)\nI0819 10:30:45.072633 22726 solver.cpp:228] Iteration 33200, loss = 0.0850488\nI0819 10:30:45.072669 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:30:45.072691 22726 solver.cpp:244]     Train net output #1: loss = 0.0850487 (* 1 = 0.0850487 loss)\nI0819 10:30:45.157466 22726 sgd_solver.cpp:166] Iteration 33200, lr = 0.83\nI0819 10:33:02.810366 22726 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0819 10:34:27.304579 22726 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 10:34:27.304890 22726 solver.cpp:404]     Test net output #1: loss = 0.442261 (* 1 = 0.442261 loss)\nI0819 10:34:28.636648 22726 solver.cpp:228] Iteration 33300, loss = 0.0933326\nI0819 10:34:28.636685 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:34:28.636708 22726 solver.cpp:244]     Train net output #1: loss = 0.0933325 (* 1 = 0.0933325 loss)\nI0819 10:34:28.711511 22726 sgd_solver.cpp:166] Iteration 33300, lr = 0.8325\nI0819 10:36:46.197405 22726 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0819 10:38:10.655236 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 10:38:10.655541 22726 solver.cpp:404]     Test net output #1: loss = 0.439922 (* 1 = 0.439922 loss)\nI0819 10:38:11.987263 22726 solver.cpp:228] Iteration 33400, loss = 0.205085\nI0819 10:38:11.987300 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:38:11.987323 22726 solver.cpp:244]     Train net output #1: loss = 0.205085 (* 1 = 0.205085 loss)\nI0819 10:38:12.066967 22726 sgd_solver.cpp:166] Iteration 33400, lr = 0.835\nI0819 10:40:29.606003 22726 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0819 10:41:54.063793 22726 solver.cpp:404]     Test net output #0: accuracy = 0.886361\nI0819 10:41:54.064116 22726 solver.cpp:404]     Test net output #1: loss = 0.421667 (* 1 = 0.421667 loss)\nI0819 10:41:55.395411 22726 solver.cpp:228] Iteration 33500, loss = 0.104513\nI0819 10:41:55.395448 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:41:55.395472 22726 solver.cpp:244]     Train net output #1: loss = 0.104513 (* 1 = 0.104513 loss)\nI0819 10:41:55.477923 22726 sgd_solver.cpp:166] Iteration 33500, lr = 0.8375\nI0819 10:44:13.010120 22726 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0819 10:45:37.508426 22726 solver.cpp:404]     Test net output #0: accuracy = 0.882601\nI0819 10:45:37.508740 22726 solver.cpp:404]     Test net output #1: loss = 0.427444 (* 1 = 0.427444 loss)\nI0819 10:45:38.835454 22726 solver.cpp:228] Iteration 33600, loss = 0.107491\nI0819 10:45:38.835491 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:45:38.835513 22726 solver.cpp:244]     Train net output #1: loss = 0.107491 (* 1 = 0.107491 loss)\nI0819 10:45:38.918422 22726 sgd_solver.cpp:166] Iteration 33600, lr = 0.84\nI0819 10:47:56.233397 22726 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0819 10:49:20.739059 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87804\nI0819 10:49:20.739372 22726 solver.cpp:404]     Test net output #1: loss = 0.456359 (* 1 = 0.456359 loss)\nI0819 10:49:22.067198 22726 solver.cpp:228] Iteration 33700, loss = 0.121948\nI0819 10:49:22.067234 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:49:22.067258 22726 solver.cpp:244]     Train net output #1: loss = 0.121948 (* 1 = 0.121948 loss)\nI0819 10:49:22.152750 22726 sgd_solver.cpp:166] Iteration 33700, lr = 0.8425\nI0819 10:51:39.323415 22726 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0819 10:53:03.844954 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88008\nI0819 10:53:03.845259 22726 solver.cpp:404]     Test net output #1: loss = 0.44264 (* 1 = 0.44264 loss)\nI0819 10:53:05.172906 22726 solver.cpp:228] Iteration 33800, loss = 0.141986\nI0819 10:53:05.172951 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 10:53:05.172976 22726 solver.cpp:244]     Train net output #1: loss = 0.141986 (* 1 = 0.141986 loss)\nI0819 10:53:05.299018 22726 sgd_solver.cpp:166] Iteration 33800, lr = 0.845\nI0819 10:55:22.473892 22726 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0819 10:56:46.935714 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88136\nI0819 10:56:46.936020 22726 solver.cpp:404]     Test net output #1: loss = 0.432113 (* 1 = 0.432113 loss)\nI0819 10:56:48.264398 22726 solver.cpp:228] Iteration 33900, loss = 0.145024\nI0819 10:56:48.264434 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 10:56:48.264457 22726 solver.cpp:244]     Train net output #1: loss = 0.145024 (* 1 = 0.145024 loss)\nI0819 10:56:48.344640 22726 sgd_solver.cpp:166] Iteration 33900, lr = 0.8475\nI0819 10:59:05.669057 22726 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0819 11:00:30.181305 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88052\nI0819 11:00:30.181612 22726 solver.cpp:404]     Test net output #1: loss = 0.434911 (* 1 = 0.434911 loss)\nI0819 11:00:31.508622 22726 solver.cpp:228] Iteration 34000, loss = 0.0850736\nI0819 11:00:31.508668 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:00:31.508692 22726 solver.cpp:244]     Train net output #1: loss = 0.0850735 (* 1 = 0.0850735 loss)\nI0819 11:00:31.591910 22726 sgd_solver.cpp:166] Iteration 34000, lr = 0.85\nI0819 11:02:48.879951 22726 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0819 11:04:13.375015 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88372\nI0819 11:04:13.375313 22726 solver.cpp:404]     Test net output #1: loss = 0.43781 (* 1 = 0.43781 loss)\nI0819 11:04:14.702394 22726 solver.cpp:228] Iteration 34100, loss = 0.0988423\nI0819 11:04:14.702430 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:04:14.702452 22726 solver.cpp:244]     Train net output #1: loss = 0.0988422 (* 1 = 0.0988422 loss)\nI0819 11:04:14.782506 22726 sgd_solver.cpp:166] Iteration 34100, lr = 0.8525\nI0819 11:06:31.999769 22726 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0819 11:07:56.555734 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0819 11:07:56.556030 22726 solver.cpp:404]     Test net output #1: loss = 0.445191 (* 1 = 0.445191 loss)\nI0819 11:07:57.883322 22726 solver.cpp:228] Iteration 34200, loss = 0.139664\nI0819 11:07:57.883359 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 11:07:57.883383 22726 solver.cpp:244]     Train net output #1: loss = 0.139664 (* 1 = 0.139664 loss)\nI0819 11:07:57.965914 22726 sgd_solver.cpp:166] Iteration 34200, lr = 0.855\nI0819 11:10:15.176045 22726 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0819 11:11:39.687098 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87476\nI0819 11:11:39.687408 22726 solver.cpp:404]     Test net output #1: loss = 0.483347 (* 1 = 0.483347 loss)\nI0819 11:11:41.014329 22726 solver.cpp:228] Iteration 34300, loss = 0.0730371\nI0819 11:11:41.014366 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:11:41.014389 22726 solver.cpp:244]     Train net output #1: loss = 0.073037 (* 1 = 0.073037 loss)\nI0819 11:11:41.090283 22726 sgd_solver.cpp:166] Iteration 34300, lr = 0.8575\nI0819 11:13:58.274837 22726 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0819 11:15:22.742943 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88268\nI0819 11:15:22.743219 22726 solver.cpp:404]     Test net output #1: loss = 0.446385 (* 1 = 0.446385 loss)\nI0819 11:15:24.071008 22726 solver.cpp:228] Iteration 34400, loss = 0.0893001\nI0819 11:15:24.071045 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 11:15:24.071069 22726 solver.cpp:244]     Train net output #1: loss = 0.0893 (* 1 = 0.0893 loss)\nI0819 11:15:24.155546 22726 sgd_solver.cpp:166] Iteration 34400, lr = 0.86\nI0819 11:17:41.341876 22726 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0819 11:19:05.791903 22726 solver.cpp:404]     Test net output #0: accuracy = 0.89036\nI0819 11:19:05.792214 22726 solver.cpp:404]     Test net output #1: loss = 0.40551 (* 1 = 0.40551 loss)\nI0819 11:19:07.119941 22726 solver.cpp:228] Iteration 34500, loss = 0.0656912\nI0819 11:19:07.119978 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 11:19:07.120003 22726 solver.cpp:244]     Train net output #1: loss = 0.0656911 (* 1 = 0.0656911 loss)\nI0819 11:19:07.202391 22726 sgd_solver.cpp:166] Iteration 34500, lr = 0.8625\nI0819 11:21:24.450196 22726 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0819 11:22:48.946360 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86956\nI0819 11:22:48.946671 22726 solver.cpp:404]     Test net output #1: loss = 0.485561 (* 1 = 0.485561 loss)\nI0819 11:22:50.273499 22726 solver.cpp:228] Iteration 34600, loss = 0.216599\nI0819 11:22:50.273533 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 11:22:50.273556 22726 solver.cpp:244]     Train net output #1: loss = 0.216599 (* 1 = 0.216599 loss)\nI0819 11:22:50.353615 22726 sgd_solver.cpp:166] Iteration 34600, lr = 0.865\nI0819 11:25:07.613142 22726 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0819 11:26:32.084223 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87908\nI0819 11:26:32.084534 22726 solver.cpp:404]     Test net output #1: loss = 0.47289 (* 1 = 0.47289 loss)\nI0819 11:26:33.411763 22726 solver.cpp:228] Iteration 34700, loss = 0.130743\nI0819 11:26:33.411810 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 11:26:33.411836 22726 solver.cpp:244]     Train net output #1: loss = 0.130743 (* 1 = 0.130743 loss)\nI0819 11:26:33.488091 22726 sgd_solver.cpp:166] Iteration 34700, lr = 0.8675\nI0819 11:28:50.681360 22726 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0819 11:30:15.180500 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88192\nI0819 11:30:15.180811 22726 solver.cpp:404]     Test net output #1: loss = 0.429555 (* 1 = 0.429555 loss)\nI0819 11:30:16.508359 22726 solver.cpp:228] Iteration 34800, loss = 0.0583815\nI0819 11:30:16.508407 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 11:30:16.508433 22726 solver.cpp:244]     Train net output #1: loss = 0.0583814 (* 1 = 0.0583814 loss)\nI0819 11:30:16.585562 22726 sgd_solver.cpp:166] Iteration 34800, lr = 0.87\nI0819 11:32:33.761363 22726 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0819 11:33:58.218909 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0819 11:33:58.219230 22726 solver.cpp:404]     Test net output #1: loss = 0.449507 (* 1 = 0.449507 loss)\nI0819 11:33:59.547206 22726 solver.cpp:228] Iteration 34900, loss = 0.0590279\nI0819 11:33:59.547243 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:33:59.547266 22726 solver.cpp:244]     Train net output #1: loss = 0.0590278 (* 1 = 0.0590278 loss)\nI0819 11:33:59.625488 22726 sgd_solver.cpp:166] Iteration 34900, lr = 0.8725\nI0819 11:36:17.291206 22726 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0819 11:37:42.448400 22726 solver.cpp:404]     Test net output #0: accuracy = 0.877281\nI0819 11:37:42.448668 22726 solver.cpp:404]     Test net output #1: loss = 0.454246 (* 1 = 0.454246 loss)\nI0819 11:37:43.779388 22726 solver.cpp:228] Iteration 35000, loss = 0.130402\nI0819 11:37:43.779433 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:37:43.779448 22726 solver.cpp:244]     Train net output #1: loss = 0.130402 (* 1 = 0.130402 loss)\nI0819 11:37:43.859644 22726 sgd_solver.cpp:166] Iteration 35000, lr = 0.875\nI0819 11:40:01.514215 22726 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0819 11:41:26.671556 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88368\nI0819 11:41:26.671866 22726 solver.cpp:404]     Test net output #1: loss = 0.433497 (* 1 = 0.433497 loss)\nI0819 11:41:28.003280 22726 solver.cpp:228] Iteration 35100, loss = 0.12095\nI0819 11:41:28.003325 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:41:28.003341 22726 solver.cpp:244]     Train net output #1: loss = 0.12095 (* 1 = 0.12095 loss)\nI0819 11:41:28.084744 22726 sgd_solver.cpp:166] Iteration 35100, lr = 0.8775\nI0819 11:43:45.731544 22726 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0819 11:45:10.893591 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87936\nI0819 11:45:10.893878 22726 solver.cpp:404]     Test net output #1: loss = 0.470291 (* 1 = 0.470291 loss)\nI0819 11:45:12.224889 22726 solver.cpp:228] Iteration 35200, loss = 0.0761982\nI0819 11:45:12.224933 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:45:12.224949 22726 solver.cpp:244]     Train net output #1: loss = 0.0761982 (* 1 = 0.0761982 loss)\nI0819 11:45:12.298589 22726 sgd_solver.cpp:166] Iteration 35200, lr = 0.88\nI0819 11:47:30.052994 22726 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0819 11:48:55.127668 22726 solver.cpp:404]     Test net output #0: accuracy = 0.881121\nI0819 11:48:55.127952 22726 solver.cpp:404]     Test net output #1: loss = 0.429046 (* 1 = 0.429046 loss)\nI0819 11:48:56.458364 22726 solver.cpp:228] Iteration 35300, loss = 0.062588\nI0819 11:48:56.458410 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 11:48:56.458425 22726 solver.cpp:244]     Train net output #1: loss = 0.0625879 (* 1 = 0.0625879 loss)\nI0819 11:48:56.537005 22726 sgd_solver.cpp:166] Iteration 35300, lr = 0.8825\nI0819 11:51:14.365465 22726 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0819 11:52:39.479009 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0819 11:52:39.479338 22726 solver.cpp:404]     Test net output #1: loss = 0.430366 (* 1 = 0.430366 loss)\nI0819 11:52:40.809991 22726 solver.cpp:228] Iteration 35400, loss = 0.0182442\nI0819 11:52:40.810039 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 11:52:40.810053 22726 solver.cpp:244]     Train net output #1: loss = 0.0182441 (* 1 = 0.0182441 loss)\nI0819 11:52:40.893828 22726 sgd_solver.cpp:166] Iteration 35400, lr = 0.885\nI0819 11:54:58.834329 22726 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0819 11:56:23.907274 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88132\nI0819 11:56:23.907507 22726 solver.cpp:404]     Test net output #1: loss = 0.436344 (* 1 = 0.436344 loss)\nI0819 11:56:25.237797 22726 solver.cpp:228] Iteration 35500, loss = 0.0943684\nI0819 11:56:25.237841 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:56:25.237859 22726 solver.cpp:244]     Train net output #1: loss = 0.0943683 (* 1 = 0.0943683 loss)\nI0819 11:56:25.321229 22726 sgd_solver.cpp:166] Iteration 35500, lr = 0.8875\nI0819 11:58:43.124017 22726 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0819 12:00:07.552664 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88544\nI0819 12:00:07.552970 22726 solver.cpp:404]     Test net output #1: loss = 0.421935 (* 1 = 0.421935 loss)\nI0819 12:00:08.883034 22726 solver.cpp:228] Iteration 35600, loss = 0.0581537\nI0819 12:00:08.883074 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 12:00:08.883090 22726 solver.cpp:244]     Train net output #1: loss = 0.0581537 (* 1 = 0.0581537 loss)\nI0819 12:00:08.961565 22726 sgd_solver.cpp:166] Iteration 35600, lr = 0.89\nI0819 12:02:26.765532 22726 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0819 12:03:51.200417 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87804\nI0819 12:03:51.200721 22726 solver.cpp:404]     Test net output #1: loss = 0.467893 (* 1 = 0.467893 loss)\nI0819 12:03:52.531395 22726 solver.cpp:228] Iteration 35700, loss = 0.143833\nI0819 12:03:52.531430 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 12:03:52.531443 22726 solver.cpp:244]     Train net output #1: loss = 0.143833 (* 1 = 0.143833 loss)\nI0819 12:03:52.610018 22726 sgd_solver.cpp:166] Iteration 35700, lr = 0.8925\nI0819 12:06:10.221515 22726 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0819 12:07:34.653018 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88916\nI0819 12:07:34.653313 22726 solver.cpp:404]     Test net output #1: loss = 0.410016 (* 1 = 0.410016 loss)\nI0819 12:07:35.983741 22726 solver.cpp:228] Iteration 35800, loss = 0.098717\nI0819 12:07:35.983774 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 12:07:35.983789 22726 solver.cpp:244]     Train net output #1: loss = 0.0987169 (* 1 = 0.0987169 loss)\nI0819 12:07:36.069310 22726 sgd_solver.cpp:166] Iteration 35800, lr = 0.895\nI0819 12:09:53.644330 22726 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0819 12:11:18.075587 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88056\nI0819 12:11:18.075893 22726 solver.cpp:404]     Test net output #1: loss = 0.439031 (* 1 = 0.439031 loss)\nI0819 12:11:19.406144 22726 solver.cpp:228] Iteration 35900, loss = 0.0531521\nI0819 12:11:19.406178 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 12:11:19.406193 22726 solver.cpp:244]     Train net output #1: loss = 0.053152 (* 1 = 0.053152 loss)\nI0819 12:11:19.491276 22726 sgd_solver.cpp:166] Iteration 35900, lr = 0.8975\nI0819 12:13:37.127110 22726 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0819 12:15:01.618541 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87892\nI0819 12:15:01.618858 22726 solver.cpp:404]     Test net output #1: loss = 0.444767 (* 1 = 0.444767 loss)\nI0819 12:15:02.949357 22726 solver.cpp:228] Iteration 36000, loss = 0.075498\nI0819 12:15:02.949393 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 12:15:02.949417 22726 solver.cpp:244]     Train net output #1: loss = 0.0754979 (* 1 = 0.0754979 loss)\nI0819 12:15:03.027142 22726 sgd_solver.cpp:166] Iteration 36000, lr = 0.9\nI0819 12:17:20.839973 22726 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0819 12:18:45.353132 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88072\nI0819 12:18:45.353446 22726 solver.cpp:404]     Test net output #1: loss = 0.432587 (* 1 = 0.432587 loss)\nI0819 12:18:46.684080 22726 solver.cpp:228] Iteration 36100, loss = 0.100446\nI0819 12:18:46.684118 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:18:46.684140 22726 solver.cpp:244]     Train net output #1: loss = 0.100446 (* 1 = 0.100446 loss)\nI0819 12:18:46.766888 22726 sgd_solver.cpp:166] Iteration 36100, lr = 0.9025\nI0819 12:21:04.400185 22726 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0819 12:22:28.881675 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88092\nI0819 12:22:28.881992 22726 solver.cpp:404]     Test net output #1: loss = 0.416939 (* 1 = 0.416939 loss)\nI0819 12:22:30.213584 22726 solver.cpp:228] Iteration 36200, loss = 0.0873027\nI0819 12:22:30.213620 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:22:30.213644 22726 solver.cpp:244]     Train net output #1: loss = 0.0873027 (* 1 = 0.0873027 loss)\nI0819 12:22:30.296900 22726 sgd_solver.cpp:166] Iteration 36200, lr = 0.905\nI0819 12:24:47.873881 22726 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0819 12:26:12.366545 22726 solver.cpp:404]     Test net output #0: accuracy = 0.879801\nI0819 12:26:12.366858 22726 solver.cpp:404]     Test net output #1: loss = 0.438993 (* 1 = 0.438993 loss)\nI0819 12:26:13.699185 22726 solver.cpp:228] Iteration 36300, loss = 0.120873\nI0819 12:26:13.699223 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:26:13.699245 22726 solver.cpp:244]     Train net output #1: loss = 0.120873 (* 1 = 0.120873 loss)\nI0819 12:26:13.778205 22726 sgd_solver.cpp:166] Iteration 36300, lr = 0.9075\nI0819 12:28:31.381353 22726 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0819 12:29:55.821614 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0819 12:29:55.821933 22726 solver.cpp:404]     Test net output #1: loss = 0.434625 (* 1 = 0.434625 loss)\nI0819 12:29:57.152890 22726 solver.cpp:228] Iteration 36400, loss = 0.100242\nI0819 12:29:57.152927 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 12:29:57.152951 22726 solver.cpp:244]     Train net output #1: loss = 0.100242 (* 1 = 0.100242 loss)\nI0819 12:29:57.232836 22726 sgd_solver.cpp:166] Iteration 36400, lr = 0.91\nI0819 12:32:14.883358 22726 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0819 12:33:39.367341 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88576\nI0819 12:33:39.367661 22726 solver.cpp:404]     Test net output #1: loss = 0.419955 (* 1 = 0.419955 loss)\nI0819 12:33:40.699833 22726 solver.cpp:228] Iteration 36500, loss = 0.0504498\nI0819 12:33:40.699869 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 12:33:40.699892 22726 solver.cpp:244]     Train net output #1: loss = 0.0504497 (* 1 = 0.0504497 loss)\nI0819 12:33:40.781899 22726 sgd_solver.cpp:166] Iteration 36500, lr = 0.9125\nI0819 12:35:58.491993 22726 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0819 12:37:22.962570 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88612\nI0819 12:37:22.962893 22726 solver.cpp:404]     Test net output #1: loss = 0.430964 (* 1 = 0.430964 loss)\nI0819 12:37:24.294356 22726 solver.cpp:228] Iteration 36600, loss = 0.119535\nI0819 12:37:24.294394 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 12:37:24.294416 22726 solver.cpp:244]     Train net output #1: loss = 0.119535 (* 1 = 0.119535 loss)\nI0819 12:37:24.377463 22726 sgd_solver.cpp:166] Iteration 36600, lr = 0.915\nI0819 12:39:41.953155 22726 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0819 12:41:06.476681 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88312\nI0819 12:41:06.476984 22726 solver.cpp:404]     Test net output #1: loss = 0.434615 (* 1 = 0.434615 loss)\nI0819 12:41:07.808193 22726 solver.cpp:228] Iteration 36700, loss = 0.14272\nI0819 12:41:07.808231 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 12:41:07.808254 22726 solver.cpp:244]     Train net output #1: loss = 0.14272 (* 1 = 0.14272 loss)\nI0819 12:41:07.884529 22726 sgd_solver.cpp:166] Iteration 36700, lr = 0.9175\nI0819 12:43:25.533535 22726 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0819 12:44:50.033421 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0819 12:44:50.033740 22726 solver.cpp:404]     Test net output #1: loss = 0.440452 (* 1 = 0.440452 loss)\nI0819 12:44:51.365067 22726 solver.cpp:228] Iteration 36800, loss = 0.156665\nI0819 12:44:51.365104 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 12:44:51.365126 22726 solver.cpp:244]     Train net output #1: loss = 0.156665 (* 1 = 0.156665 loss)\nI0819 12:44:51.443770 22726 sgd_solver.cpp:166] Iteration 36800, lr = 0.92\nI0819 12:47:09.152087 22726 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0819 12:48:33.729378 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88324\nI0819 12:48:33.729743 22726 solver.cpp:404]     Test net output #1: loss = 0.419324 (* 1 = 0.419324 loss)\nI0819 12:48:35.061101 22726 solver.cpp:228] Iteration 36900, loss = 0.100001\nI0819 12:48:35.061138 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:48:35.061161 22726 solver.cpp:244]     Train net output #1: loss = 0.100001 (* 1 = 0.100001 loss)\nI0819 12:48:35.142506 22726 sgd_solver.cpp:166] Iteration 36900, lr = 0.9225\nI0819 12:50:52.821064 22726 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0819 12:52:17.361459 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0819 12:52:17.361774 22726 solver.cpp:404]     Test net output #1: loss = 0.410122 (* 1 = 0.410122 loss)\nI0819 12:52:18.693009 22726 solver.cpp:228] Iteration 37000, loss = 0.105938\nI0819 12:52:18.693048 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 12:52:18.693076 22726 solver.cpp:244]     Train net output #1: loss = 0.105938 (* 1 = 0.105938 loss)\nI0819 12:52:18.775583 22726 sgd_solver.cpp:166] Iteration 37000, lr = 0.925\nI0819 12:54:36.325585 22726 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0819 12:56:00.893069 22726 solver.cpp:404]     Test net output #0: accuracy = 0.888281\nI0819 12:56:00.893401 22726 solver.cpp:404]     Test net output #1: loss = 0.403588 (* 1 = 0.403588 loss)\nI0819 12:56:02.225137 22726 solver.cpp:228] Iteration 37100, loss = 0.175607\nI0819 12:56:02.225173 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 12:56:02.225198 22726 solver.cpp:244]     Train net output #1: loss = 0.175607 (* 1 = 0.175607 loss)\nI0819 12:56:02.300684 22726 sgd_solver.cpp:166] Iteration 37100, lr = 0.9275\nI0819 12:58:19.814576 22726 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0819 12:59:44.343343 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88472\nI0819 12:59:44.343662 22726 solver.cpp:404]     Test net output #1: loss = 0.442411 (* 1 = 0.442411 loss)\nI0819 12:59:45.675154 22726 solver.cpp:228] Iteration 37200, loss = 0.165006\nI0819 12:59:45.675194 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 12:59:45.675215 22726 solver.cpp:244]     Train net output #1: loss = 0.165006 (* 1 = 0.165006 loss)\nI0819 12:59:45.759537 22726 sgd_solver.cpp:166] Iteration 37200, lr = 0.93\nI0819 13:02:03.323073 22726 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0819 13:03:27.854696 22726 solver.cpp:404]     Test net output #0: accuracy = 0.89048\nI0819 13:03:27.855013 22726 solver.cpp:404]     Test net output #1: loss = 0.417886 (* 1 = 0.417886 loss)\nI0819 13:03:29.186610 22726 solver.cpp:228] Iteration 37300, loss = 0.170814\nI0819 13:03:29.186650 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 13:03:29.186673 22726 solver.cpp:244]     Train net output #1: loss = 0.170814 (* 1 = 0.170814 loss)\nI0819 13:03:29.266129 22726 sgd_solver.cpp:166] Iteration 37300, lr = 0.9325\nI0819 13:05:46.775317 22726 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0819 13:07:11.267493 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8872\nI0819 13:07:11.267802 22726 solver.cpp:404]     Test net output #1: loss = 0.419553 (* 1 = 0.419553 loss)\nI0819 13:07:12.599593 22726 solver.cpp:228] Iteration 37400, loss = 0.0850908\nI0819 13:07:12.599632 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 13:07:12.599655 22726 solver.cpp:244]     Train net output #1: loss = 0.0850907 (* 1 = 0.0850907 loss)\nI0819 13:07:12.678598 22726 sgd_solver.cpp:166] Iteration 37400, lr = 0.935\nI0819 13:09:30.189268 22726 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0819 13:10:54.731304 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88612\nI0819 13:10:54.731621 22726 solver.cpp:404]     Test net output #1: loss = 0.420505 (* 1 = 0.420505 loss)\nI0819 13:10:56.063413 22726 solver.cpp:228] Iteration 37500, loss = 0.0546935\nI0819 13:10:56.063452 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 13:10:56.063477 22726 solver.cpp:244]     Train net output #1: loss = 0.0546934 (* 1 = 0.0546934 loss)\nI0819 13:10:56.141050 22726 sgd_solver.cpp:166] Iteration 37500, lr = 0.9375\nI0819 13:13:13.761420 22726 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0819 13:14:38.259623 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0819 13:14:38.259912 22726 solver.cpp:404]     Test net output #1: loss = 0.435853 (* 1 = 0.435853 loss)\nI0819 13:14:39.591626 22726 solver.cpp:228] Iteration 37600, loss = 0.151125\nI0819 13:14:39.591665 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 13:14:39.591687 22726 solver.cpp:244]     Train net output #1: loss = 0.151124 (* 1 = 0.151124 loss)\nI0819 13:14:39.673707 22726 sgd_solver.cpp:166] Iteration 37600, lr = 0.94\nI0819 13:16:57.316779 22726 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0819 13:18:21.834892 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87704\nI0819 13:18:21.835209 22726 solver.cpp:404]     Test net output #1: loss = 0.463197 (* 1 = 0.463197 loss)\nI0819 13:18:23.166628 22726 solver.cpp:228] Iteration 37700, loss = 0.208281\nI0819 13:18:23.166666 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 13:18:23.166687 22726 solver.cpp:244]     Train net output #1: loss = 0.208281 (* 1 = 0.208281 loss)\nI0819 13:18:23.248874 22726 sgd_solver.cpp:166] Iteration 37700, lr = 0.9425\nI0819 13:20:40.795665 22726 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0819 13:22:05.308291 22726 solver.cpp:404]     Test net output #0: accuracy = 0.888561\nI0819 13:22:05.308594 22726 solver.cpp:404]     Test net output #1: loss = 0.402458 (* 1 = 0.402458 loss)\nI0819 13:22:06.639374 22726 solver.cpp:228] Iteration 37800, loss = 0.111242\nI0819 13:22:06.639412 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:22:06.639436 22726 solver.cpp:244]     Train net output #1: loss = 0.111242 (* 1 = 0.111242 loss)\nI0819 13:22:06.719475 22726 sgd_solver.cpp:166] Iteration 37800, lr = 0.945\nI0819 13:24:24.249256 22726 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0819 13:25:48.798945 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8854\nI0819 13:25:48.799268 22726 solver.cpp:404]     Test net output #1: loss = 0.401249 (* 1 = 0.401249 loss)\nI0819 13:25:50.131351 22726 solver.cpp:228] Iteration 37900, loss = 0.0975174\nI0819 13:25:50.131395 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:25:50.131420 22726 solver.cpp:244]     Train net output #1: loss = 0.0975172 (* 1 = 0.0975172 loss)\nI0819 13:25:50.215255 22726 sgd_solver.cpp:166] Iteration 37900, lr = 0.9475\nI0819 13:28:07.722625 22726 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0819 13:29:32.197860 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88588\nI0819 13:29:32.198179 22726 solver.cpp:404]     Test net output #1: loss = 0.420121 (* 1 = 0.420121 loss)\nI0819 13:29:33.529881 22726 solver.cpp:228] Iteration 38000, loss = 0.132587\nI0819 13:29:33.529923 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:29:33.529947 22726 solver.cpp:244]     Train net output #1: loss = 0.132587 (* 1 = 0.132587 loss)\nI0819 13:29:33.605360 22726 sgd_solver.cpp:166] Iteration 38000, lr = 0.95\nI0819 13:31:51.104347 22726 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0819 13:33:15.566273 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88112\nI0819 13:33:15.566586 22726 solver.cpp:404]     Test net output #1: loss = 0.455135 (* 1 = 0.455135 loss)\nI0819 13:33:16.894632 22726 solver.cpp:228] Iteration 38100, loss = 0.0721985\nI0819 13:33:16.894666 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 13:33:16.894681 22726 solver.cpp:244]     Train net output #1: loss = 0.0721984 (* 1 = 0.0721984 loss)\nI0819 13:33:16.977152 22726 sgd_solver.cpp:166] Iteration 38100, lr = 0.9525\nI0819 13:35:34.624838 22726 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0819 13:36:59.114022 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0819 13:36:59.114325 22726 solver.cpp:404]     Test net output #1: loss = 0.439902 (* 1 = 0.439902 loss)\nI0819 13:37:00.441920 22726 solver.cpp:228] Iteration 38200, loss = 0.0547399\nI0819 13:37:00.441962 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 13:37:00.441977 22726 solver.cpp:244]     Train net output #1: loss = 0.0547398 (* 1 = 0.0547398 loss)\nI0819 13:37:00.527192 22726 sgd_solver.cpp:166] Iteration 38200, lr = 0.955\nI0819 13:39:18.129560 22726 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0819 13:40:42.591884 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0819 13:40:42.592196 22726 solver.cpp:404]     Test net output #1: loss = 0.409734 (* 1 = 0.409734 loss)\nI0819 13:40:43.919607 22726 solver.cpp:228] Iteration 38300, loss = 0.1603\nI0819 13:40:43.919639 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 13:40:43.919656 22726 solver.cpp:244]     Train net output #1: loss = 0.1603 (* 1 = 0.1603 loss)\nI0819 13:40:43.999399 22726 sgd_solver.cpp:166] Iteration 38300, lr = 0.9575\nI0819 13:43:01.716248 22726 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0819 13:44:26.227180 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87236\nI0819 13:44:26.227491 22726 solver.cpp:404]     Test net output #1: loss = 0.45943 (* 1 = 0.45943 loss)\nI0819 13:44:27.557904 22726 solver.cpp:228] Iteration 38400, loss = 0.15641\nI0819 13:44:27.557947 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:44:27.557965 22726 solver.cpp:244]     Train net output #1: loss = 0.15641 (* 1 = 0.15641 loss)\nI0819 13:44:27.639276 22726 sgd_solver.cpp:166] Iteration 38400, lr = 0.96\nI0819 13:46:45.566419 22726 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0819 13:48:10.061520 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88352\nI0819 13:48:10.061835 22726 solver.cpp:404]     Test net output #1: loss = 0.417185 (* 1 = 0.417185 loss)\nI0819 13:48:11.393560 22726 solver.cpp:228] Iteration 38500, loss = 0.0525465\nI0819 13:48:11.393605 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 13:48:11.393622 22726 solver.cpp:244]     Train net output #1: loss = 0.0525463 (* 1 = 0.0525463 loss)\nI0819 13:48:11.478149 22726 sgd_solver.cpp:166] Iteration 38500, lr = 0.9625\nI0819 13:50:29.231680 22726 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0819 13:51:53.717523 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0819 13:51:53.717835 22726 solver.cpp:404]     Test net output #1: loss = 0.442293 (* 1 = 0.442293 loss)\nI0819 13:51:55.049330 22726 solver.cpp:228] Iteration 38600, loss = 0.0968207\nI0819 13:51:55.049374 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 13:51:55.049391 22726 solver.cpp:244]     Train net output #1: loss = 0.0968205 (* 1 = 0.0968205 loss)\nI0819 13:51:55.128656 22726 sgd_solver.cpp:166] Iteration 38600, lr = 0.965\nI0819 13:54:13.072935 22726 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0819 13:55:37.527145 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88132\nI0819 13:55:37.527458 22726 solver.cpp:404]     Test net output #1: loss = 0.431332 (* 1 = 0.431332 loss)\nI0819 13:55:38.858675 22726 solver.cpp:228] Iteration 38700, loss = 0.0436653\nI0819 13:55:38.858721 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 13:55:38.858736 22726 solver.cpp:244]     Train net output #1: loss = 0.0436651 (* 1 = 0.0436651 loss)\nI0819 13:55:38.937386 22726 sgd_solver.cpp:166] Iteration 38700, lr = 0.9675\nI0819 13:57:56.707535 22726 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0819 13:59:21.198340 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0819 13:59:21.198654 22726 solver.cpp:404]     Test net output #1: loss = 0.427073 (* 1 = 0.427073 loss)\nI0819 13:59:22.530475 22726 solver.cpp:228] Iteration 38800, loss = 0.131698\nI0819 13:59:22.530519 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 13:59:22.530536 22726 solver.cpp:244]     Train net output #1: loss = 0.131698 (* 1 = 0.131698 loss)\nI0819 13:59:22.614711 22726 sgd_solver.cpp:166] Iteration 38800, lr = 0.97\nI0819 14:01:40.536041 22726 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0819 14:03:05.034021 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87824\nI0819 14:03:05.034339 22726 solver.cpp:404]     Test net output #1: loss = 0.439457 (* 1 = 0.439457 loss)\nI0819 14:03:06.364852 22726 solver.cpp:228] Iteration 38900, loss = 0.127364\nI0819 14:03:06.364898 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 14:03:06.364914 22726 solver.cpp:244]     Train net output #1: loss = 0.127364 (* 1 = 0.127364 loss)\nI0819 14:03:06.450594 22726 sgd_solver.cpp:166] Iteration 38900, lr = 0.9725\nI0819 14:05:24.397042 22726 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0819 14:06:48.968999 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87824\nI0819 14:06:48.969317 22726 solver.cpp:404]     Test net output #1: loss = 0.448687 (* 1 = 0.448687 loss)\nI0819 14:06:50.297240 22726 solver.cpp:228] Iteration 39000, loss = 0.198809\nI0819 14:06:50.297276 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 14:06:50.297292 22726 solver.cpp:244]     Train net output #1: loss = 0.198809 (* 1 = 0.198809 loss)\nI0819 14:06:50.378574 22726 sgd_solver.cpp:166] Iteration 39000, lr = 0.975\nI0819 14:09:07.949425 22726 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0819 14:10:32.469285 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88248\nI0819 14:10:32.469591 22726 solver.cpp:404]     Test net output #1: loss = 0.430042 (* 1 = 0.430042 loss)\nI0819 14:10:33.797693 22726 solver.cpp:228] Iteration 39100, loss = 0.101155\nI0819 14:10:33.797737 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:10:33.797754 22726 solver.cpp:244]     Train net output #1: loss = 0.101155 (* 1 = 0.101155 loss)\nI0819 14:10:33.880633 22726 sgd_solver.cpp:166] Iteration 39100, lr = 0.9775\nI0819 14:12:51.525198 22726 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0819 14:14:16.065369 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8778\nI0819 14:14:16.065680 22726 solver.cpp:404]     Test net output #1: loss = 0.431454 (* 1 = 0.431454 loss)\nI0819 14:14:17.393935 22726 solver.cpp:228] Iteration 39200, loss = 0.108407\nI0819 14:14:17.393980 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:14:17.393996 22726 solver.cpp:244]     Train net output #1: loss = 0.108407 (* 1 = 0.108407 loss)\nI0819 14:14:17.479441 22726 sgd_solver.cpp:166] Iteration 39200, lr = 0.98\nI0819 14:16:35.196808 22726 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0819 14:17:59.726341 22726 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 14:17:59.726652 22726 solver.cpp:404]     Test net output #1: loss = 0.432377 (* 1 = 0.432377 loss)\nI0819 14:18:01.053627 22726 solver.cpp:228] Iteration 39300, loss = 0.113894\nI0819 14:18:01.053673 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:18:01.053689 22726 solver.cpp:244]     Train net output #1: loss = 0.113893 (* 1 = 0.113893 loss)\nI0819 14:18:01.140277 22726 sgd_solver.cpp:166] Iteration 39300, lr = 0.9825\nI0819 14:20:18.658411 22726 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0819 14:21:43.107398 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0819 14:21:43.107712 22726 solver.cpp:404]     Test net output #1: loss = 0.433075 (* 1 = 0.433075 loss)\nI0819 14:21:44.435233 22726 solver.cpp:228] Iteration 39400, loss = 0.0729276\nI0819 14:21:44.435278 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:21:44.435295 22726 solver.cpp:244]     Train net output #1: loss = 0.0729274 (* 1 = 0.0729274 loss)\nI0819 14:21:44.519707 22726 sgd_solver.cpp:166] Iteration 39400, lr = 0.985\nI0819 14:24:02.162986 22726 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0819 14:25:26.594759 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87968\nI0819 14:25:26.595077 22726 solver.cpp:404]     Test net output #1: loss = 0.426412 (* 1 = 0.426412 loss)\nI0819 14:25:27.922189 22726 solver.cpp:228] Iteration 39500, loss = 0.171442\nI0819 14:25:27.922225 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 14:25:27.922240 22726 solver.cpp:244]     Train net output #1: loss = 0.171442 (* 1 = 0.171442 loss)\nI0819 14:25:28.006824 22726 sgd_solver.cpp:166] Iteration 39500, lr = 0.9875\nI0819 14:27:45.696240 22726 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0819 14:29:10.124325 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88192\nI0819 14:29:10.124637 22726 solver.cpp:404]     Test net output #1: loss = 0.445656 (* 1 = 0.445656 loss)\nI0819 14:29:11.451611 22726 solver.cpp:228] Iteration 39600, loss = 0.0759904\nI0819 14:29:11.451656 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 14:29:11.451673 22726 solver.cpp:244]     Train net output #1: loss = 0.0759902 (* 1 = 0.0759902 loss)\nI0819 14:29:11.536144 22726 sgd_solver.cpp:166] Iteration 39600, lr = 0.99\nI0819 14:31:29.150033 22726 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0819 14:32:53.576704 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88668\nI0819 14:32:53.577020 22726 solver.cpp:404]     Test net output #1: loss = 0.412761 (* 1 = 0.412761 loss)\nI0819 14:32:54.903630 22726 solver.cpp:228] Iteration 39700, loss = 0.128591\nI0819 14:32:54.903676 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 14:32:54.903692 22726 solver.cpp:244]     Train net output #1: loss = 0.12859 (* 1 = 0.12859 loss)\nI0819 14:32:54.987207 22726 sgd_solver.cpp:166] Iteration 39700, lr = 0.9925\nI0819 14:35:12.634361 22726 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0819 14:36:37.069723 22726 solver.cpp:404]     Test net output #0: accuracy = 0.883\nI0819 14:36:37.070044 22726 solver.cpp:404]     Test net output #1: loss = 0.409333 (* 1 = 0.409333 loss)\nI0819 14:36:38.396669 22726 solver.cpp:228] Iteration 39800, loss = 0.0716699\nI0819 14:36:38.396705 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:36:38.396720 22726 solver.cpp:244]     Train net output #1: loss = 0.0716697 (* 1 = 0.0716697 loss)\nI0819 14:36:38.475947 22726 sgd_solver.cpp:166] Iteration 39800, lr = 0.995\nI0819 14:38:56.095620 22726 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0819 14:40:20.514803 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87888\nI0819 14:40:20.515123 22726 solver.cpp:404]     Test net output #1: loss = 0.426669 (* 1 = 0.426669 loss)\nI0819 14:40:21.842047 22726 solver.cpp:228] Iteration 39900, loss = 0.105062\nI0819 14:40:21.842082 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 14:40:21.842098 22726 solver.cpp:244]     Train net output #1: loss = 0.105062 (* 1 = 0.105062 loss)\nI0819 14:40:21.921788 22726 sgd_solver.cpp:166] Iteration 39900, lr = 0.9975\nI0819 14:42:39.524056 22726 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0819 14:44:04.471660 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0819 14:44:04.471922 22726 solver.cpp:404]     Test net output #1: loss = 0.427824 (* 1 = 0.427824 loss)\nI0819 14:44:05.801090 22726 solver.cpp:228] Iteration 40000, loss = 0.112772\nI0819 14:44:05.801136 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:44:05.801152 22726 solver.cpp:244]     Train net output #1: loss = 0.112772 (* 1 = 0.112772 loss)\nI0819 14:44:05.886127 22726 sgd_solver.cpp:166] Iteration 40000, lr = 1\nI0819 14:46:23.661007 22726 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0819 14:47:48.558416 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88204\nI0819 14:47:48.558668 22726 solver.cpp:404]     Test net output #1: loss = 0.420009 (* 1 = 0.420009 loss)\nI0819 14:47:49.888573 22726 solver.cpp:228] Iteration 40100, loss = 0.0756406\nI0819 14:47:49.888619 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:47:49.888634 22726 solver.cpp:244]     Train net output #1: loss = 0.0756404 (* 1 = 0.0756404 loss)\nI0819 14:47:49.975463 22726 sgd_solver.cpp:166] Iteration 40100, lr = 1.0025\nI0819 14:50:07.776142 22726 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0819 14:51:32.882784 22726 solver.cpp:404]     Test net output #0: accuracy = 0.880521\nI0819 14:51:32.883112 22726 solver.cpp:404]     Test net output #1: loss = 0.437502 (* 1 = 0.437502 loss)\nI0819 14:51:34.212599 22726 solver.cpp:228] Iteration 40200, loss = 0.0715516\nI0819 14:51:34.212644 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:51:34.212661 22726 solver.cpp:244]     Train net output #1: loss = 0.0715514 (* 1 = 0.0715514 loss)\nI0819 14:51:34.293429 22726 sgd_solver.cpp:166] Iteration 40200, lr = 1.005\nI0819 14:53:52.091289 22726 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0819 14:55:16.834141 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0819 14:55:16.834383 22726 solver.cpp:404]     Test net output #1: loss = 0.441575 (* 1 = 0.441575 loss)\nI0819 14:55:18.164894 22726 solver.cpp:228] Iteration 40300, loss = 0.0520737\nI0819 14:55:18.164937 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:55:18.164958 22726 solver.cpp:244]     Train net output #1: loss = 0.0520735 (* 1 = 0.0520735 loss)\nI0819 14:55:18.243198 22726 sgd_solver.cpp:166] Iteration 40300, lr = 1.0075\nI0819 14:57:36.054780 22726 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0819 14:59:01.100524 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87852\nI0819 14:59:01.100780 22726 solver.cpp:404]     Test net output #1: loss = 0.470168 (* 1 = 0.470168 loss)\nI0819 14:59:02.431170 22726 solver.cpp:228] Iteration 40400, loss = 0.0846796\nI0819 14:59:02.431213 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:59:02.431229 22726 solver.cpp:244]     Train net output #1: loss = 0.0846794 (* 1 = 0.0846794 loss)\nI0819 14:59:02.511971 22726 sgd_solver.cpp:166] Iteration 40400, lr = 1.01\nI0819 15:01:20.250792 22726 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0819 15:02:45.397644 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88244\nI0819 15:02:45.397979 22726 solver.cpp:404]     Test net output #1: loss = 0.43428 (* 1 = 0.43428 loss)\nI0819 15:02:46.727648 22726 solver.cpp:228] Iteration 40500, loss = 0.0974222\nI0819 15:02:46.727691 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:02:46.727708 22726 solver.cpp:244]     Train net output #1: loss = 0.0974221 (* 1 = 0.0974221 loss)\nI0819 15:02:46.807709 22726 sgd_solver.cpp:166] Iteration 40500, lr = 1.0125\nI0819 15:05:04.466759 22726 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0819 15:06:29.510972 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8878\nI0819 15:06:29.511252 22726 solver.cpp:404]     Test net output #1: loss = 0.411102 (* 1 = 0.411102 loss)\nI0819 15:06:30.840848 22726 solver.cpp:228] Iteration 40600, loss = 0.0907582\nI0819 15:06:30.840893 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:06:30.840910 22726 solver.cpp:244]     Train net output #1: loss = 0.0907581 (* 1 = 0.0907581 loss)\nI0819 15:06:30.921100 22726 sgd_solver.cpp:166] Iteration 40600, lr = 1.015\nI0819 15:08:48.672329 22726 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0819 15:10:13.782232 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88304\nI0819 15:10:13.782505 22726 solver.cpp:404]     Test net output #1: loss = 0.424374 (* 1 = 0.424374 loss)\nI0819 15:10:15.113586 22726 solver.cpp:228] Iteration 40700, loss = 0.0722541\nI0819 15:10:15.113631 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:10:15.113647 22726 solver.cpp:244]     Train net output #1: loss = 0.0722539 (* 1 = 0.0722539 loss)\nI0819 15:10:15.192525 22726 sgd_solver.cpp:166] Iteration 40700, lr = 1.0175\nI0819 15:12:33.022027 22726 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0819 15:13:58.176062 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0819 15:13:58.176359 22726 solver.cpp:404]     Test net output #1: loss = 0.419228 (* 1 = 0.419228 loss)\nI0819 15:13:59.506028 22726 solver.cpp:228] Iteration 40800, loss = 0.0843472\nI0819 15:13:59.506074 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:13:59.506091 22726 solver.cpp:244]     Train net output #1: loss = 0.084347 (* 1 = 0.084347 loss)\nI0819 15:13:59.586179 22726 sgd_solver.cpp:166] Iteration 40800, lr = 1.02\nI0819 15:16:17.248021 22726 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0819 15:17:42.400990 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87876\nI0819 15:17:42.401268 22726 solver.cpp:404]     Test net output #1: loss = 0.455578 (* 1 = 0.455578 loss)\nI0819 15:17:43.730931 22726 solver.cpp:228] Iteration 40900, loss = 0.0974859\nI0819 15:17:43.730980 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:17:43.730999 22726 solver.cpp:244]     Train net output #1: loss = 0.0974857 (* 1 = 0.0974857 loss)\nI0819 15:17:43.813105 22726 sgd_solver.cpp:166] Iteration 40900, lr = 1.0225\nI0819 15:20:01.592986 22726 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0819 15:21:26.739270 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87768\nI0819 15:21:26.739572 22726 solver.cpp:404]     Test net output #1: loss = 0.438502 (* 1 = 0.438502 loss)\nI0819 15:21:28.070978 22726 solver.cpp:228] Iteration 41000, loss = 0.0699136\nI0819 15:21:28.071022 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:21:28.071038 22726 solver.cpp:244]     Train net output #1: loss = 0.0699134 (* 1 = 0.0699134 loss)\nI0819 15:21:28.147475 22726 sgd_solver.cpp:166] Iteration 41000, lr = 1.025\nI0819 15:23:45.919183 22726 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0819 15:25:11.035980 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88088\nI0819 15:25:11.036259 22726 solver.cpp:404]     Test net output #1: loss = 0.459575 (* 1 = 0.459575 loss)\nI0819 15:25:12.367382 22726 solver.cpp:228] Iteration 41100, loss = 0.185923\nI0819 15:25:12.367425 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 15:25:12.367442 22726 solver.cpp:244]     Train net output #1: loss = 0.185923 (* 1 = 0.185923 loss)\nI0819 15:25:12.451439 22726 sgd_solver.cpp:166] Iteration 41100, lr = 1.0275\nI0819 15:27:30.217934 22726 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0819 15:28:55.348373 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87996\nI0819 15:28:55.348659 22726 solver.cpp:404]     Test net output #1: loss = 0.424589 (* 1 = 0.424589 loss)\nI0819 15:28:56.678125 22726 solver.cpp:228] Iteration 41200, loss = 0.0909304\nI0819 15:28:56.678169 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:28:56.678186 22726 solver.cpp:244]     Train net output #1: loss = 0.0909302 (* 1 = 0.0909302 loss)\nI0819 15:28:56.761217 22726 sgd_solver.cpp:166] Iteration 41200, lr = 1.03\nI0819 15:31:14.463173 22726 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0819 15:32:39.575937 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87916\nI0819 15:32:39.576236 22726 solver.cpp:404]     Test net output #1: loss = 0.441303 (* 1 = 0.441303 loss)\nI0819 15:32:40.905644 22726 solver.cpp:228] Iteration 41300, loss = 0.10088\nI0819 15:32:40.905689 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:32:40.905705 22726 solver.cpp:244]     Train net output #1: loss = 0.10088 (* 1 = 0.10088 loss)\nI0819 15:32:40.991164 22726 sgd_solver.cpp:166] Iteration 41300, lr = 1.0325\nI0819 15:34:58.705114 22726 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0819 15:36:23.828917 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8842\nI0819 15:36:23.829211 22726 solver.cpp:404]     Test net output #1: loss = 0.425179 (* 1 = 0.425179 loss)\nI0819 15:36:25.160259 22726 solver.cpp:228] Iteration 41400, loss = 0.107576\nI0819 15:36:25.160303 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 15:36:25.160318 22726 solver.cpp:244]     Train net output #1: loss = 0.107576 (* 1 = 0.107576 loss)\nI0819 15:36:25.243655 22726 sgd_solver.cpp:166] Iteration 41400, lr = 1.035\nI0819 15:38:42.929360 22726 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0819 15:40:08.055935 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88404\nI0819 15:40:08.056210 22726 solver.cpp:404]     Test net output #1: loss = 0.431507 (* 1 = 0.431507 loss)\nI0819 15:40:09.386792 22726 solver.cpp:228] Iteration 41500, loss = 0.147465\nI0819 15:40:09.386837 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:40:09.386852 22726 solver.cpp:244]     Train net output #1: loss = 0.147465 (* 1 = 0.147465 loss)\nI0819 15:40:09.463538 22726 sgd_solver.cpp:166] Iteration 41500, lr = 1.0375\nI0819 15:42:27.178031 22726 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0819 15:43:52.220455 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87248\nI0819 15:43:52.220784 22726 solver.cpp:404]     Test net output #1: loss = 0.459651 (* 1 = 0.459651 loss)\nI0819 15:43:53.551825 22726 solver.cpp:228] Iteration 41600, loss = 0.0988391\nI0819 15:43:53.551869 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:43:53.551885 22726 solver.cpp:244]     Train net output #1: loss = 0.0988389 (* 1 = 0.0988389 loss)\nI0819 15:43:53.635951 22726 sgd_solver.cpp:166] Iteration 41600, lr = 1.04\nI0819 15:46:11.357810 22726 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0819 15:47:36.266216 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87776\nI0819 15:47:36.266505 22726 solver.cpp:404]     Test net output #1: loss = 0.432032 (* 1 = 0.432032 loss)\nI0819 15:47:37.595846 22726 solver.cpp:228] Iteration 41700, loss = 0.0970362\nI0819 15:47:37.595890 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:47:37.595906 22726 solver.cpp:244]     Train net output #1: loss = 0.0970361 (* 1 = 0.0970361 loss)\nI0819 15:47:37.668362 22726 sgd_solver.cpp:166] Iteration 41700, lr = 1.0425\nI0819 15:49:55.152839 22726 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0819 15:51:19.975368 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87748\nI0819 15:51:19.975631 22726 solver.cpp:404]     Test net output #1: loss = 0.44826 (* 1 = 0.44826 loss)\nI0819 15:51:21.305503 22726 solver.cpp:228] Iteration 41800, loss = 0.079757\nI0819 15:51:21.305546 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:51:21.305562 22726 solver.cpp:244]     Train net output #1: loss = 0.0797569 (* 1 = 0.0797569 loss)\nI0819 15:51:21.381971 22726 sgd_solver.cpp:166] Iteration 41800, lr = 1.045\nI0819 15:53:38.962419 22726 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0819 15:55:03.800855 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88516\nI0819 15:55:03.801156 22726 solver.cpp:404]     Test net output #1: loss = 0.408636 (* 1 = 0.408636 loss)\nI0819 15:55:05.132336 22726 solver.cpp:228] Iteration 41900, loss = 0.0784445\nI0819 15:55:05.132378 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 15:55:05.132395 22726 solver.cpp:244]     Train net output #1: loss = 0.0784444 (* 1 = 0.0784444 loss)\nI0819 15:55:05.207399 22726 sgd_solver.cpp:166] Iteration 41900, lr = 1.0475\nI0819 15:57:22.693509 22726 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0819 15:58:47.788733 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8776\nI0819 15:58:47.789026 22726 solver.cpp:404]     Test net output #1: loss = 0.449468 (* 1 = 0.449468 loss)\nI0819 15:58:49.118413 22726 solver.cpp:228] Iteration 42000, loss = 0.146537\nI0819 15:58:49.118455 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 15:58:49.118470 22726 solver.cpp:244]     Train net output #1: loss = 0.146537 (* 1 = 0.146537 loss)\nI0819 15:58:49.199465 22726 sgd_solver.cpp:166] Iteration 42000, lr = 1.05\nI0819 16:01:06.728054 22726 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0819 16:02:31.812613 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87364\nI0819 16:02:31.812896 22726 solver.cpp:404]     Test net output #1: loss = 0.446916 (* 1 = 0.446916 loss)\nI0819 16:02:33.143167 22726 solver.cpp:228] Iteration 42100, loss = 0.200835\nI0819 16:02:33.143208 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 16:02:33.143224 22726 solver.cpp:244]     Train net output #1: loss = 0.200835 (* 1 = 0.200835 loss)\nI0819 16:02:33.221462 22726 sgd_solver.cpp:166] Iteration 42100, lr = 1.0525\nI0819 16:04:50.668620 22726 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0819 16:06:15.653465 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0819 16:06:15.653753 22726 solver.cpp:404]     Test net output #1: loss = 0.416616 (* 1 = 0.416616 loss)\nI0819 16:06:16.983779 22726 solver.cpp:228] Iteration 42200, loss = 0.0525403\nI0819 16:06:16.983821 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 16:06:16.983837 22726 solver.cpp:244]     Train net output #1: loss = 0.0525402 (* 1 = 0.0525402 loss)\nI0819 16:06:17.065155 22726 sgd_solver.cpp:166] Iteration 42200, lr = 1.055\nI0819 16:08:34.553774 22726 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0819 16:09:59.522421 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0819 16:09:59.522708 22726 solver.cpp:404]     Test net output #1: loss = 0.435527 (* 1 = 0.435527 loss)\nI0819 16:10:00.853998 22726 solver.cpp:228] Iteration 42300, loss = 0.0940859\nI0819 16:10:00.854041 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 16:10:00.854058 22726 solver.cpp:244]     Train net output #1: loss = 0.0940858 (* 1 = 0.0940858 loss)\nI0819 16:10:00.931179 22726 sgd_solver.cpp:166] Iteration 42300, lr = 1.0575\nI0819 16:12:18.523306 22726 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0819 16:13:43.401370 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0819 16:13:43.401659 22726 solver.cpp:404]     Test net output #1: loss = 0.420347 (* 1 = 0.420347 loss)\nI0819 16:13:44.733059 22726 solver.cpp:228] Iteration 42400, loss = 0.119676\nI0819 16:13:44.733101 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 16:13:44.733117 22726 solver.cpp:244]     Train net output #1: loss = 0.119676 (* 1 = 0.119676 loss)\nI0819 16:13:44.811556 22726 sgd_solver.cpp:166] Iteration 42400, lr = 1.06\nI0819 16:16:02.335233 22726 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0819 16:17:27.135213 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88356\nI0819 16:17:27.135545 22726 solver.cpp:404]     Test net output #1: loss = 0.431356 (* 1 = 0.431356 loss)\nI0819 16:17:28.465512 22726 solver.cpp:228] Iteration 42500, loss = 0.0821538\nI0819 16:17:28.465556 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:17:28.465572 22726 solver.cpp:244]     Train net output #1: loss = 0.0821536 (* 1 = 0.0821536 loss)\nI0819 16:17:28.548283 22726 sgd_solver.cpp:166] Iteration 42500, lr = 1.0625\nI0819 16:19:46.126786 22726 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0819 16:21:10.997858 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0819 16:21:10.998160 22726 solver.cpp:404]     Test net output #1: loss = 0.416378 (* 1 = 0.416378 loss)\nI0819 16:21:12.329241 22726 solver.cpp:228] Iteration 42600, loss = 0.15107\nI0819 16:21:12.329285 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 16:21:12.329301 22726 solver.cpp:244]     Train net output #1: loss = 0.15107 (* 1 = 0.15107 loss)\nI0819 16:21:12.408110 22726 sgd_solver.cpp:166] Iteration 42600, lr = 1.065\nI0819 16:23:29.849527 22726 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0819 16:24:54.701652 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0819 16:24:54.701896 22726 solver.cpp:404]     Test net output #1: loss = 0.41011 (* 1 = 0.41011 loss)\nI0819 16:24:56.031538 22726 solver.cpp:228] Iteration 42700, loss = 0.0919337\nI0819 16:24:56.031579 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 16:24:56.031594 22726 solver.cpp:244]     Train net output #1: loss = 0.0919334 (* 1 = 0.0919334 loss)\nI0819 16:24:56.110299 22726 sgd_solver.cpp:166] Iteration 42700, lr = 1.0675\nI0819 16:27:13.549742 22726 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0819 16:28:38.346746 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88188\nI0819 16:28:38.347000 22726 solver.cpp:404]     Test net output #1: loss = 0.424547 (* 1 = 0.424547 loss)\nI0819 16:28:39.676911 22726 solver.cpp:228] Iteration 42800, loss = 0.0951257\nI0819 16:28:39.676954 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 16:28:39.676970 22726 solver.cpp:244]     Train net output #1: loss = 0.0951255 (* 1 = 0.0951255 loss)\nI0819 16:28:39.754849 22726 sgd_solver.cpp:166] Iteration 42800, lr = 1.07\nI0819 16:30:57.260035 22726 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0819 16:32:22.191169 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8774\nI0819 16:32:22.191488 22726 solver.cpp:404]     Test net output #1: loss = 0.425607 (* 1 = 0.425607 loss)\nI0819 16:32:23.521741 22726 solver.cpp:228] Iteration 42900, loss = 0.110396\nI0819 16:32:23.521783 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 16:32:23.521800 22726 solver.cpp:244]     Train net output #1: loss = 0.110396 (* 1 = 0.110396 loss)\nI0819 16:32:23.596628 22726 sgd_solver.cpp:166] Iteration 42900, lr = 1.0725\nI0819 16:34:41.049579 22726 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0819 16:36:06.119321 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87936\nI0819 16:36:06.119618 22726 solver.cpp:404]     Test net output #1: loss = 0.424524 (* 1 = 0.424524 loss)\nI0819 16:36:07.449546 22726 solver.cpp:228] Iteration 43000, loss = 0.0878933\nI0819 16:36:07.449584 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 16:36:07.449599 22726 solver.cpp:244]     Train net output #1: loss = 0.0878931 (* 1 = 0.0878931 loss)\nI0819 16:36:07.525980 22726 sgd_solver.cpp:166] Iteration 43000, lr = 1.075\nI0819 16:38:24.929280 22726 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0819 16:39:50.036535 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0819 16:39:50.036826 22726 solver.cpp:404]     Test net output #1: loss = 0.439756 (* 1 = 0.439756 loss)\nI0819 16:39:51.366554 22726 solver.cpp:228] Iteration 43100, loss = 0.104938\nI0819 16:39:51.366593 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 16:39:51.366610 22726 solver.cpp:244]     Train net output #1: loss = 0.104938 (* 1 = 0.104938 loss)\nI0819 16:39:51.444577 22726 sgd_solver.cpp:166] Iteration 43100, lr = 1.0775\nI0819 16:42:08.863945 22726 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0819 16:43:33.996430 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8776\nI0819 16:43:33.996695 22726 solver.cpp:404]     Test net output #1: loss = 0.447823 (* 1 = 0.447823 loss)\nI0819 16:43:35.326504 22726 solver.cpp:228] Iteration 43200, loss = 0.184124\nI0819 16:43:35.326547 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 16:43:35.326562 22726 solver.cpp:244]     Train net output #1: loss = 0.184124 (* 1 = 0.184124 loss)\nI0819 16:43:35.402746 22726 sgd_solver.cpp:166] Iteration 43200, lr = 1.08\nI0819 16:45:52.883839 22726 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0819 16:47:17.995203 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86888\nI0819 16:47:17.995465 22726 solver.cpp:404]     Test net output #1: loss = 0.501064 (* 1 = 0.501064 loss)\nI0819 16:47:19.325191 22726 solver.cpp:228] Iteration 43300, loss = 0.19333\nI0819 16:47:19.325232 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 16:47:19.325248 22726 solver.cpp:244]     Train net output #1: loss = 0.193329 (* 1 = 0.193329 loss)\nI0819 16:47:19.406035 22726 sgd_solver.cpp:166] Iteration 43300, lr = 1.0825\nI0819 16:49:36.849910 22726 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0819 16:51:01.967381 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0819 16:51:01.967679 22726 solver.cpp:404]     Test net output #1: loss = 0.404461 (* 1 = 0.404461 loss)\nI0819 16:51:03.296964 22726 solver.cpp:228] Iteration 43400, loss = 0.204437\nI0819 16:51:03.297009 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 16:51:03.297024 22726 solver.cpp:244]     Train net output #1: loss = 0.204437 (* 1 = 0.204437 loss)\nI0819 16:51:03.379329 22726 sgd_solver.cpp:166] Iteration 43400, lr = 1.085\nI0819 16:53:20.862982 22726 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0819 16:54:46.001421 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87724\nI0819 16:54:46.001680 22726 solver.cpp:404]     Test net output #1: loss = 0.441079 (* 1 = 0.441079 loss)\nI0819 16:54:47.331423 22726 solver.cpp:228] Iteration 43500, loss = 0.146761\nI0819 16:54:47.331465 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 16:54:47.331481 22726 solver.cpp:244]     Train net output #1: loss = 0.146761 (* 1 = 0.146761 loss)\nI0819 16:54:47.406639 22726 sgd_solver.cpp:166] Iteration 43500, lr = 1.0875\nI0819 16:57:04.878435 22726 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0819 16:58:29.916144 22726 solver.cpp:404]     Test net output #0: accuracy = 0.886\nI0819 16:58:29.916399 22726 solver.cpp:404]     Test net output #1: loss = 0.417883 (* 1 = 0.417883 loss)\nI0819 16:58:31.246407 22726 solver.cpp:228] Iteration 43600, loss = 0.130605\nI0819 16:58:31.246449 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 16:58:31.246465 22726 solver.cpp:244]     Train net output #1: loss = 0.130605 (* 1 = 0.130605 loss)\nI0819 16:58:31.326779 22726 sgd_solver.cpp:166] Iteration 43600, lr = 1.09\nI0819 17:00:48.825153 22726 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0819 17:02:13.801988 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88736\nI0819 17:02:13.802320 22726 solver.cpp:404]     Test net output #1: loss = 0.423079 (* 1 = 0.423079 loss)\nI0819 17:02:15.131331 22726 solver.cpp:228] Iteration 43700, loss = 0.182598\nI0819 17:02:15.131374 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 17:02:15.131391 22726 solver.cpp:244]     Train net output #1: loss = 0.182598 (* 1 = 0.182598 loss)\nI0819 17:02:15.209754 22726 sgd_solver.cpp:166] Iteration 43700, lr = 1.0925\nI0819 17:04:32.679133 22726 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0819 17:05:57.790673 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87844\nI0819 17:05:57.790935 22726 solver.cpp:404]     Test net output #1: loss = 0.428662 (* 1 = 0.428662 loss)\nI0819 17:05:59.120579 22726 solver.cpp:228] Iteration 43800, loss = 0.0918641\nI0819 17:05:59.120623 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:05:59.120640 22726 solver.cpp:244]     Train net output #1: loss = 0.0918639 (* 1 = 0.0918639 loss)\nI0819 17:05:59.196118 22726 sgd_solver.cpp:166] Iteration 43800, lr = 1.095\nI0819 17:08:16.654353 22726 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0819 17:09:41.488696 22726 solver.cpp:404]     Test net output #0: accuracy = 0.89\nI0819 17:09:41.488965 22726 solver.cpp:404]     Test net output #1: loss = 0.393206 (* 1 = 0.393206 loss)\nI0819 17:09:42.818874 22726 solver.cpp:228] Iteration 43900, loss = 0.120798\nI0819 17:09:42.818917 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:09:42.818933 22726 solver.cpp:244]     Train net output #1: loss = 0.120797 (* 1 = 0.120797 loss)\nI0819 17:09:42.898593 22726 sgd_solver.cpp:166] Iteration 43900, lr = 1.0975\nI0819 17:12:00.394758 22726 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0819 17:13:25.163998 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88708\nI0819 17:13:25.164276 22726 solver.cpp:404]     Test net output #1: loss = 0.419186 (* 1 = 0.419186 loss)\nI0819 17:13:26.493723 22726 solver.cpp:228] Iteration 44000, loss = 0.102897\nI0819 17:13:26.493767 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 17:13:26.493784 22726 solver.cpp:244]     Train net output #1: loss = 0.102897 (* 1 = 0.102897 loss)\nI0819 17:13:26.574417 22726 sgd_solver.cpp:166] Iteration 44000, lr = 1.1\nI0819 17:15:44.149307 22726 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0819 17:17:09.129048 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8866\nI0819 17:17:09.129318 22726 solver.cpp:404]     Test net output #1: loss = 0.405243 (* 1 = 0.405243 loss)\nI0819 17:17:10.459347 22726 solver.cpp:228] Iteration 44100, loss = 0.227105\nI0819 17:17:10.459389 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 17:17:10.459405 22726 solver.cpp:244]     Train net output #1: loss = 0.227104 (* 1 = 0.227104 loss)\nI0819 17:17:10.535346 22726 sgd_solver.cpp:166] Iteration 44100, lr = 1.1025\nI0819 17:19:28.015758 22726 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0819 17:20:52.810108 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88544\nI0819 17:20:52.810403 22726 solver.cpp:404]     Test net output #1: loss = 0.420298 (* 1 = 0.420298 loss)\nI0819 17:20:54.140688 22726 solver.cpp:228] Iteration 44200, loss = 0.0370334\nI0819 17:20:54.140730 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 17:20:54.140748 22726 solver.cpp:244]     Train net output #1: loss = 0.0370333 (* 1 = 0.0370333 loss)\nI0819 17:20:54.221444 22726 sgd_solver.cpp:166] Iteration 44200, lr = 1.105\nI0819 17:23:11.725849 22726 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0819 17:24:36.538452 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87516\nI0819 17:24:36.538689 22726 solver.cpp:404]     Test net output #1: loss = 0.445707 (* 1 = 0.445707 loss)\nI0819 17:24:37.867678 22726 solver.cpp:228] Iteration 44300, loss = 0.081498\nI0819 17:24:37.867720 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:24:37.867735 22726 solver.cpp:244]     Train net output #1: loss = 0.0814978 (* 1 = 0.0814978 loss)\nI0819 17:24:37.945550 22726 sgd_solver.cpp:166] Iteration 44300, lr = 1.1075\nI0819 17:26:55.424620 22726 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0819 17:28:20.211244 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0819 17:28:20.211486 22726 solver.cpp:404]     Test net output #1: loss = 0.429527 (* 1 = 0.429527 loss)\nI0819 17:28:21.541242 22726 solver.cpp:228] Iteration 44400, loss = 0.11621\nI0819 17:28:21.541285 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:28:21.541301 22726 solver.cpp:244]     Train net output #1: loss = 0.116209 (* 1 = 0.116209 loss)\nI0819 17:28:21.621300 22726 sgd_solver.cpp:166] Iteration 44400, lr = 1.11\nI0819 17:30:39.106544 22726 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0819 17:32:03.896246 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8862\nI0819 17:32:03.896570 22726 solver.cpp:404]     Test net output #1: loss = 0.427583 (* 1 = 0.427583 loss)\nI0819 17:32:05.226454 22726 solver.cpp:228] Iteration 44500, loss = 0.0404185\nI0819 17:32:05.226496 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 17:32:05.226511 22726 solver.cpp:244]     Train net output #1: loss = 0.0404183 (* 1 = 0.0404183 loss)\nI0819 17:32:05.301136 22726 sgd_solver.cpp:166] Iteration 44500, lr = 1.1125\nI0819 17:34:22.792170 22726 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0819 17:35:47.842387 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87996\nI0819 17:35:47.842634 22726 solver.cpp:404]     Test net output #1: loss = 0.433381 (* 1 = 0.433381 loss)\nI0819 17:35:49.171835 22726 solver.cpp:228] Iteration 44600, loss = 0.15142\nI0819 17:35:49.171878 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:35:49.171892 22726 solver.cpp:244]     Train net output #1: loss = 0.15142 (* 1 = 0.15142 loss)\nI0819 17:35:49.246340 22726 sgd_solver.cpp:166] Iteration 44600, lr = 1.115\nI0819 17:38:06.934823 22726 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0819 17:39:31.784144 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88368\nI0819 17:39:31.784409 22726 solver.cpp:404]     Test net output #1: loss = 0.425267 (* 1 = 0.425267 loss)\nI0819 17:39:33.114380 22726 solver.cpp:228] Iteration 44700, loss = 0.132285\nI0819 17:39:33.114428 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 17:39:33.114450 22726 solver.cpp:244]     Train net output #1: loss = 0.132285 (* 1 = 0.132285 loss)\nI0819 17:39:33.192706 22726 sgd_solver.cpp:166] Iteration 44700, lr = 1.1175\nI0819 17:41:51.021797 22726 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0819 17:43:16.164727 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88436\nI0819 17:43:16.165060 22726 solver.cpp:404]     Test net output #1: loss = 0.41835 (* 1 = 0.41835 loss)\nI0819 17:43:17.496475 22726 solver.cpp:228] Iteration 44800, loss = 0.145433\nI0819 17:43:17.496521 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:43:17.496543 22726 solver.cpp:244]     Train net output #1: loss = 0.145433 (* 1 = 0.145433 loss)\nI0819 17:43:17.576355 22726 sgd_solver.cpp:166] Iteration 44800, lr = 1.12\nI0819 17:45:35.325043 22726 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0819 17:47:00.485183 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88264\nI0819 17:47:00.485451 22726 solver.cpp:404]     Test net output #1: loss = 0.432994 (* 1 = 0.432994 loss)\nI0819 17:47:01.815129 22726 solver.cpp:228] Iteration 44900, loss = 0.1554\nI0819 17:47:01.815176 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:47:01.815201 22726 solver.cpp:244]     Train net output #1: loss = 0.155399 (* 1 = 0.155399 loss)\nI0819 17:47:01.885095 22726 sgd_solver.cpp:166] Iteration 44900, lr = 1.1225\nI0819 17:49:19.559855 22726 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0819 17:50:44.715332 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0819 17:50:44.715585 22726 solver.cpp:404]     Test net output #1: loss = 0.420074 (* 1 = 0.420074 loss)\nI0819 17:50:46.045999 22726 solver.cpp:228] Iteration 45000, loss = 0.0719199\nI0819 17:50:46.046038 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 17:50:46.046054 22726 solver.cpp:244]     Train net output #1: loss = 0.0719197 (* 1 = 0.0719197 loss)\nI0819 17:50:46.125102 22726 sgd_solver.cpp:166] Iteration 45000, lr = 1.125\nI0819 17:53:03.673178 22726 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0819 17:54:28.102478 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8812\nI0819 17:54:28.102774 22726 solver.cpp:404]     Test net output #1: loss = 0.435752 (* 1 = 0.435752 loss)\nI0819 17:54:29.430872 22726 solver.cpp:228] Iteration 45100, loss = 0.0476121\nI0819 17:54:29.430917 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 17:54:29.430940 22726 solver.cpp:244]     Train net output #1: loss = 0.0476119 (* 1 = 0.0476119 loss)\nI0819 17:54:29.513969 22726 sgd_solver.cpp:166] Iteration 45100, lr = 1.1275\nI0819 17:56:47.042331 22726 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0819 17:58:11.474210 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88008\nI0819 17:58:11.474539 22726 solver.cpp:404]     Test net output #1: loss = 0.430082 (* 1 = 0.430082 loss)\nI0819 17:58:12.802101 22726 solver.cpp:228] Iteration 45200, loss = 0.124969\nI0819 17:58:12.802150 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:58:12.802173 22726 solver.cpp:244]     Train net output #1: loss = 0.124969 (* 1 = 0.124969 loss)\nI0819 17:58:12.888739 22726 sgd_solver.cpp:166] Iteration 45200, lr = 1.13\nI0819 18:00:30.418285 22726 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0819 18:01:54.813693 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87852\nI0819 18:01:54.814028 22726 solver.cpp:404]     Test net output #1: loss = 0.44426 (* 1 = 0.44426 loss)\nI0819 18:01:56.141800 22726 solver.cpp:228] Iteration 45300, loss = 0.185026\nI0819 18:01:56.141844 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 18:01:56.141867 22726 solver.cpp:244]     Train net output #1: loss = 0.185025 (* 1 = 0.185025 loss)\nI0819 18:01:56.225647 22726 sgd_solver.cpp:166] Iteration 45300, lr = 1.1325\nI0819 18:04:13.819963 22726 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0819 18:05:38.212400 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87856\nI0819 18:05:38.212725 22726 solver.cpp:404]     Test net output #1: loss = 0.431743 (* 1 = 0.431743 loss)\nI0819 18:05:39.540576 22726 solver.cpp:228] Iteration 45400, loss = 0.0966843\nI0819 18:05:39.540624 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:05:39.540648 22726 solver.cpp:244]     Train net output #1: loss = 0.0966841 (* 1 = 0.0966841 loss)\nI0819 18:05:39.624575 22726 sgd_solver.cpp:166] Iteration 45400, lr = 1.135\nI0819 18:07:57.154387 22726 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0819 18:09:21.550453 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88076\nI0819 18:09:21.550789 22726 solver.cpp:404]     Test net output #1: loss = 0.420523 (* 1 = 0.420523 loss)\nI0819 18:09:22.878618 22726 solver.cpp:228] Iteration 45500, loss = 0.111069\nI0819 18:09:22.878665 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:09:22.878690 22726 solver.cpp:244]     Train net output #1: loss = 0.111069 (* 1 = 0.111069 loss)\nI0819 18:09:22.960634 22726 sgd_solver.cpp:166] Iteration 45500, lr = 1.1375\nI0819 18:11:40.590859 22726 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0819 18:13:04.982164 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0819 18:13:04.982491 22726 solver.cpp:404]     Test net output #1: loss = 0.438675 (* 1 = 0.438675 loss)\nI0819 18:13:06.310500 22726 solver.cpp:228] Iteration 45600, loss = 0.0880177\nI0819 18:13:06.310549 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:13:06.310572 22726 solver.cpp:244]     Train net output #1: loss = 0.0880174 (* 1 = 0.0880174 loss)\nI0819 18:13:06.390768 22726 sgd_solver.cpp:166] Iteration 45600, lr = 1.14\nI0819 18:15:24.136515 22726 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0819 18:16:48.525220 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87384\nI0819 18:16:48.525547 22726 solver.cpp:404]     Test net output #1: loss = 0.444564 (* 1 = 0.444564 loss)\nI0819 18:16:49.853263 22726 solver.cpp:228] Iteration 45700, loss = 0.12727\nI0819 18:16:49.853301 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 18:16:49.853322 22726 solver.cpp:244]     Train net output #1: loss = 0.12727 (* 1 = 0.12727 loss)\nI0819 18:16:49.935395 22726 sgd_solver.cpp:166] Iteration 45700, lr = 1.1425\nI0819 18:19:07.624141 22726 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0819 18:20:32.016074 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8866\nI0819 18:20:32.016417 22726 solver.cpp:404]     Test net output #1: loss = 0.408267 (* 1 = 0.408267 loss)\nI0819 18:20:33.342885 22726 solver.cpp:228] Iteration 45800, loss = 0.0703556\nI0819 18:20:33.342933 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:20:33.342957 22726 solver.cpp:244]     Train net output #1: loss = 0.0703554 (* 1 = 0.0703554 loss)\nI0819 18:20:33.429291 22726 sgd_solver.cpp:166] Iteration 45800, lr = 1.145\nI0819 18:22:50.967988 22726 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0819 18:24:15.381077 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8888\nI0819 18:24:15.381400 22726 solver.cpp:404]     Test net output #1: loss = 0.397893 (* 1 = 0.397893 loss)\nI0819 18:24:16.708601 22726 solver.cpp:228] Iteration 45900, loss = 0.136161\nI0819 18:24:16.708640 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 18:24:16.708662 22726 solver.cpp:244]     Train net output #1: loss = 0.13616 (* 1 = 0.13616 loss)\nI0819 18:24:16.789161 22726 sgd_solver.cpp:166] Iteration 45900, lr = 1.1475\nI0819 18:26:34.413347 22726 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0819 18:27:58.867154 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88656\nI0819 18:27:58.867465 22726 solver.cpp:404]     Test net output #1: loss = 0.421817 (* 1 = 0.421817 loss)\nI0819 18:28:00.194694 22726 solver.cpp:228] Iteration 46000, loss = 0.0533006\nI0819 18:28:00.194738 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 18:28:00.194756 22726 solver.cpp:244]     Train net output #1: loss = 0.0533004 (* 1 = 0.0533004 loss)\nI0819 18:28:00.282202 22726 sgd_solver.cpp:166] Iteration 46000, lr = 1.15\nI0819 18:30:17.887912 22726 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0819 18:31:42.295480 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88784\nI0819 18:31:42.295801 22726 solver.cpp:404]     Test net output #1: loss = 0.403742 (* 1 = 0.403742 loss)\nI0819 18:31:43.624711 22726 solver.cpp:228] Iteration 46100, loss = 0.121245\nI0819 18:31:43.624756 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:31:43.624773 22726 solver.cpp:244]     Train net output #1: loss = 0.121245 (* 1 = 0.121245 loss)\nI0819 18:31:43.710935 22726 sgd_solver.cpp:166] Iteration 46100, lr = 1.1525\nI0819 18:34:01.343050 22726 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0819 18:35:25.785075 22726 solver.cpp:404]     Test net output #0: accuracy = 0.881601\nI0819 18:35:25.785406 22726 solver.cpp:404]     Test net output #1: loss = 0.421497 (* 1 = 0.421497 loss)\nI0819 18:35:27.113411 22726 solver.cpp:228] Iteration 46200, loss = 0.093508\nI0819 18:35:27.113457 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:35:27.113481 22726 solver.cpp:244]     Train net output #1: loss = 0.0935078 (* 1 = 0.0935078 loss)\nI0819 18:35:27.197077 22726 sgd_solver.cpp:166] Iteration 46200, lr = 1.155\nI0819 18:37:44.852674 22726 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0819 18:39:09.303418 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88488\nI0819 18:39:09.303750 22726 solver.cpp:404]     Test net output #1: loss = 0.399207 (* 1 = 0.399207 loss)\nI0819 18:39:10.631417 22726 solver.cpp:228] Iteration 46300, loss = 0.0822586\nI0819 18:39:10.631459 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:39:10.631484 22726 solver.cpp:244]     Train net output #1: loss = 0.0822584 (* 1 = 0.0822584 loss)\nI0819 18:39:10.710960 22726 sgd_solver.cpp:166] Iteration 46300, lr = 1.1575\nI0819 18:41:28.426810 22726 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0819 18:42:52.856968 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88\nI0819 18:42:52.857292 22726 solver.cpp:404]     Test net output #1: loss = 0.438245 (* 1 = 0.438245 loss)\nI0819 18:42:54.184001 22726 solver.cpp:228] Iteration 46400, loss = 0.152211\nI0819 18:42:54.184034 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:42:54.184049 22726 solver.cpp:244]     Train net output #1: loss = 0.152211 (* 1 = 0.152211 loss)\nI0819 18:42:54.266265 22726 sgd_solver.cpp:166] Iteration 46400, lr = 1.16\nI0819 18:45:11.850544 22726 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0819 18:46:36.281000 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88536\nI0819 18:46:36.281324 22726 solver.cpp:404]     Test net output #1: loss = 0.39519 (* 1 = 0.39519 loss)\nI0819 18:46:37.608018 22726 solver.cpp:228] Iteration 46500, loss = 0.0745814\nI0819 18:46:37.608055 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:46:37.608070 22726 solver.cpp:244]     Train net output #1: loss = 0.0745812 (* 1 = 0.0745812 loss)\nI0819 18:46:37.689085 22726 sgd_solver.cpp:166] Iteration 46500, lr = 1.1625\nI0819 18:48:55.183740 22726 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0819 18:50:19.615010 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88708\nI0819 18:50:19.615320 22726 solver.cpp:404]     Test net output #1: loss = 0.404372 (* 1 = 0.404372 loss)\nI0819 18:50:20.942342 22726 solver.cpp:228] Iteration 46600, loss = 0.0939489\nI0819 18:50:20.942379 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 18:50:20.942395 22726 solver.cpp:244]     Train net output #1: loss = 0.0939487 (* 1 = 0.0939487 loss)\nI0819 18:50:21.021126 22726 sgd_solver.cpp:166] Iteration 46600, lr = 1.165\nI0819 18:52:38.539577 22726 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0819 18:54:02.968257 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87888\nI0819 18:54:02.968571 22726 solver.cpp:404]     Test net output #1: loss = 0.41529 (* 1 = 0.41529 loss)\nI0819 18:54:04.295377 22726 solver.cpp:228] Iteration 46700, loss = 0.0895156\nI0819 18:54:04.295415 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:54:04.295431 22726 solver.cpp:244]     Train net output #1: loss = 0.0895154 (* 1 = 0.0895154 loss)\nI0819 18:54:04.379843 22726 sgd_solver.cpp:166] Iteration 46700, lr = 1.1675\nI0819 18:56:21.920337 22726 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0819 18:57:46.353533 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88184\nI0819 18:57:46.353849 22726 solver.cpp:404]     Test net output #1: loss = 0.443431 (* 1 = 0.443431 loss)\nI0819 18:57:47.681288 22726 solver.cpp:228] Iteration 46800, loss = 0.223196\nI0819 18:57:47.681330 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 18:57:47.681346 22726 solver.cpp:244]     Train net output #1: loss = 0.223196 (* 1 = 0.223196 loss)\nI0819 18:57:47.765404 22726 sgd_solver.cpp:166] Iteration 46800, lr = 1.17\nI0819 19:00:05.252040 22726 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0819 19:01:29.689455 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86996\nI0819 19:01:29.689779 22726 solver.cpp:404]     Test net output #1: loss = 0.4753 (* 1 = 0.4753 loss)\nI0819 19:01:31.016886 22726 solver.cpp:228] Iteration 46900, loss = 0.223155\nI0819 19:01:31.016923 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 19:01:31.016938 22726 solver.cpp:244]     Train net output #1: loss = 0.223155 (* 1 = 0.223155 loss)\nI0819 19:01:31.096772 22726 sgd_solver.cpp:166] Iteration 46900, lr = 1.1725\nI0819 19:03:48.602079 22726 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0819 19:05:13.038480 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0819 19:05:13.038808 22726 solver.cpp:404]     Test net output #1: loss = 0.417122 (* 1 = 0.417122 loss)\nI0819 19:05:14.366179 22726 solver.cpp:228] Iteration 47000, loss = 0.0679621\nI0819 19:05:14.366221 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 19:05:14.366237 22726 solver.cpp:244]     Train net output #1: loss = 0.0679619 (* 1 = 0.0679619 loss)\nI0819 19:05:14.451835 22726 sgd_solver.cpp:166] Iteration 47000, lr = 1.175\nI0819 19:07:31.962970 22726 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0819 19:08:56.398727 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0819 19:08:56.399044 22726 solver.cpp:404]     Test net output #1: loss = 0.428933 (* 1 = 0.428933 loss)\nI0819 19:08:57.725436 22726 solver.cpp:228] Iteration 47100, loss = 0.146672\nI0819 19:08:57.725476 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 19:08:57.725492 22726 solver.cpp:244]     Train net output #1: loss = 0.146672 (* 1 = 0.146672 loss)\nI0819 19:08:57.806758 22726 sgd_solver.cpp:166] Iteration 47100, lr = 1.1775\nI0819 19:11:15.294486 22726 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0819 19:12:39.729275 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87496\nI0819 19:12:39.729604 22726 solver.cpp:404]     Test net output #1: loss = 0.448276 (* 1 = 0.448276 loss)\nI0819 19:12:41.056627 22726 solver.cpp:228] Iteration 47200, loss = 0.153864\nI0819 19:12:41.056668 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 19:12:41.056684 22726 solver.cpp:244]     Train net output #1: loss = 0.153864 (* 1 = 0.153864 loss)\nI0819 19:12:41.143465 22726 sgd_solver.cpp:166] Iteration 47200, lr = 1.18\nI0819 19:14:58.732410 22726 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0819 19:16:23.171239 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87556\nI0819 19:16:23.171563 22726 solver.cpp:404]     Test net output #1: loss = 0.443823 (* 1 = 0.443823 loss)\nI0819 19:16:24.502106 22726 solver.cpp:228] Iteration 47300, loss = 0.0900085\nI0819 19:16:24.502147 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 19:16:24.502162 22726 solver.cpp:244]     Train net output #1: loss = 0.0900083 (* 1 = 0.0900083 loss)\nI0819 19:16:24.586990 22726 sgd_solver.cpp:166] Iteration 47300, lr = 1.1825\nI0819 19:18:42.091874 22726 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0819 19:20:06.519081 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0819 19:20:06.519405 22726 solver.cpp:404]     Test net output #1: loss = 0.433187 (* 1 = 0.433187 loss)\nI0819 19:20:07.850252 22726 solver.cpp:228] Iteration 47400, loss = 0.133181\nI0819 19:20:07.850293 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:20:07.850309 22726 solver.cpp:244]     Train net output #1: loss = 0.13318 (* 1 = 0.13318 loss)\nI0819 19:20:07.929139 22726 sgd_solver.cpp:166] Iteration 47400, lr = 1.185\nI0819 19:22:25.540318 22726 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0819 19:23:50.020970 22726 solver.cpp:404]     Test net output #0: accuracy = 0.880681\nI0819 19:23:50.021303 22726 solver.cpp:404]     Test net output #1: loss = 0.420161 (* 1 = 0.420161 loss)\nI0819 19:23:51.352929 22726 solver.cpp:228] Iteration 47500, loss = 0.0891745\nI0819 19:23:51.352972 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 19:23:51.352988 22726 solver.cpp:244]     Train net output #1: loss = 0.0891742 (* 1 = 0.0891742 loss)\nI0819 19:23:51.441884 22726 sgd_solver.cpp:166] Iteration 47500, lr = 1.1875\nI0819 19:26:09.007803 22726 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0819 19:27:33.458338 22726 solver.cpp:404]     Test net output #0: accuracy = 0.881561\nI0819 19:27:33.458662 22726 solver.cpp:404]     Test net output #1: loss = 0.440841 (* 1 = 0.440841 loss)\nI0819 19:27:34.789350 22726 solver.cpp:228] Iteration 47600, loss = 0.113273\nI0819 19:27:34.789393 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 19:27:34.789410 22726 solver.cpp:244]     Train net output #1: loss = 0.113273 (* 1 = 0.113273 loss)\nI0819 19:27:34.873459 22726 sgd_solver.cpp:166] Iteration 47600, lr = 1.19\nI0819 19:29:52.774245 22726 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0819 19:31:17.250519 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86996\nI0819 19:31:17.250846 22726 solver.cpp:404]     Test net output #1: loss = 0.474175 (* 1 = 0.474175 loss)\nI0819 19:31:18.582727 22726 solver.cpp:228] Iteration 47700, loss = 0.205607\nI0819 19:31:18.582768 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 19:31:18.582785 22726 solver.cpp:244]     Train net output #1: loss = 0.205607 (* 1 = 0.205607 loss)\nI0819 19:31:18.663760 22726 sgd_solver.cpp:166] Iteration 47700, lr = 1.1925\nI0819 19:33:36.680502 22726 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0819 19:35:01.118135 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87192\nI0819 19:35:01.118469 22726 solver.cpp:404]     Test net output #1: loss = 0.456475 (* 1 = 0.456475 loss)\nI0819 19:35:02.449463 22726 solver.cpp:228] Iteration 47800, loss = 0.10647\nI0819 19:35:02.449503 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:35:02.449520 22726 solver.cpp:244]     Train net output #1: loss = 0.10647 (* 1 = 0.10647 loss)\nI0819 19:35:02.535939 22726 sgd_solver.cpp:166] Iteration 47800, lr = 1.195\nI0819 19:37:20.434294 22726 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0819 19:38:44.903687 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88428\nI0819 19:38:44.904014 22726 solver.cpp:404]     Test net output #1: loss = 0.437964 (* 1 = 0.437964 loss)\nI0819 19:38:46.235801 22726 solver.cpp:228] Iteration 47900, loss = 0.175451\nI0819 19:38:46.235843 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 19:38:46.235859 22726 solver.cpp:244]     Train net output #1: loss = 0.175451 (* 1 = 0.175451 loss)\nI0819 19:38:46.320001 22726 sgd_solver.cpp:166] Iteration 47900, lr = 1.1975\nI0819 19:41:04.160390 22726 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0819 19:42:28.652971 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87428\nI0819 19:42:28.653270 22726 solver.cpp:404]     Test net output #1: loss = 0.455786 (* 1 = 0.455786 loss)\nI0819 19:42:29.984650 22726 solver.cpp:228] Iteration 48000, loss = 0.178625\nI0819 19:42:29.984691 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 19:42:29.984707 22726 solver.cpp:244]     Train net output #1: loss = 0.178625 (* 1 = 0.178625 loss)\nI0819 19:42:30.068169 22726 sgd_solver.cpp:166] Iteration 48000, lr = 1.2\nI0819 19:44:47.896508 22726 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0819 19:46:12.321913 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87676\nI0819 19:46:12.322239 22726 solver.cpp:404]     Test net output #1: loss = 0.441683 (* 1 = 0.441683 loss)\nI0819 19:46:13.653045 22726 solver.cpp:228] Iteration 48100, loss = 0.103727\nI0819 19:46:13.653087 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:46:13.653108 22726 solver.cpp:244]     Train net output #1: loss = 0.103727 (* 1 = 0.103727 loss)\nI0819 19:46:13.738014 22726 sgd_solver.cpp:166] Iteration 48100, lr = 1.2025\nI0819 19:48:31.573359 22726 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0819 19:49:56.002048 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0819 19:49:56.002363 22726 solver.cpp:404]     Test net output #1: loss = 0.441844 (* 1 = 0.441844 loss)\nI0819 19:49:57.332878 22726 solver.cpp:228] Iteration 48200, loss = 0.163493\nI0819 19:49:57.332921 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:49:57.332937 22726 solver.cpp:244]     Train net output #1: loss = 0.163493 (* 1 = 0.163493 loss)\nI0819 19:49:57.412881 22726 sgd_solver.cpp:166] Iteration 48200, lr = 1.205\nI0819 19:52:15.210454 22726 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0819 19:53:39.649838 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87492\nI0819 19:53:39.650148 22726 solver.cpp:404]     Test net output #1: loss = 0.431437 (* 1 = 0.431437 loss)\nI0819 19:53:40.981871 22726 solver.cpp:228] Iteration 48300, loss = 0.161566\nI0819 19:53:40.981914 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 19:53:40.981931 22726 solver.cpp:244]     Train net output #1: loss = 0.161566 (* 1 = 0.161566 loss)\nI0819 19:53:41.061910 22726 sgd_solver.cpp:166] Iteration 48300, lr = 1.2075\nI0819 19:55:58.928257 22726 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0819 19:57:23.366858 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87824\nI0819 19:57:23.367182 22726 solver.cpp:404]     Test net output #1: loss = 0.4376 (* 1 = 0.4376 loss)\nI0819 19:57:24.699179 22726 solver.cpp:228] Iteration 48400, loss = 0.174262\nI0819 19:57:24.699223 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 19:57:24.699239 22726 solver.cpp:244]     Train net output #1: loss = 0.174262 (* 1 = 0.174262 loss)\nI0819 19:57:24.782042 22726 sgd_solver.cpp:166] Iteration 48400, lr = 1.21\nI0819 19:59:42.777451 22726 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0819 20:01:07.215898 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8864\nI0819 20:01:07.216202 22726 solver.cpp:404]     Test net output #1: loss = 0.42246 (* 1 = 0.42246 loss)\nI0819 20:01:08.546607 22726 solver.cpp:228] Iteration 48500, loss = 0.129582\nI0819 20:01:08.546651 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 20:01:08.546675 22726 solver.cpp:244]     Train net output #1: loss = 0.129582 (* 1 = 0.129582 loss)\nI0819 20:01:08.630240 22726 sgd_solver.cpp:166] Iteration 48500, lr = 1.2125\nI0819 20:03:26.531553 22726 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0819 20:04:50.963956 22726 solver.cpp:404]     Test net output #0: accuracy = 0.884\nI0819 20:04:50.964289 22726 solver.cpp:404]     Test net output #1: loss = 0.411601 (* 1 = 0.411601 loss)\nI0819 20:04:52.299947 22726 solver.cpp:228] Iteration 48600, loss = 0.196507\nI0819 20:04:52.299994 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 20:04:52.300019 22726 solver.cpp:244]     Train net output #1: loss = 0.196506 (* 1 = 0.196506 loss)\nI0819 20:04:52.371129 22726 sgd_solver.cpp:166] Iteration 48600, lr = 1.215\nI0819 20:07:10.177073 22726 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0819 20:08:34.622931 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88328\nI0819 20:08:34.623242 22726 solver.cpp:404]     Test net output #1: loss = 0.399775 (* 1 = 0.399775 loss)\nI0819 20:08:35.955066 22726 solver.cpp:228] Iteration 48700, loss = 0.162707\nI0819 20:08:35.955108 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 20:08:35.955124 22726 solver.cpp:244]     Train net output #1: loss = 0.162707 (* 1 = 0.162707 loss)\nI0819 20:08:36.039623 22726 sgd_solver.cpp:166] Iteration 48700, lr = 1.2175\nI0819 20:10:53.862969 22726 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0819 20:12:18.310567 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8842\nI0819 20:12:18.310890 22726 solver.cpp:404]     Test net output #1: loss = 0.421054 (* 1 = 0.421054 loss)\nI0819 20:12:19.642060 22726 solver.cpp:228] Iteration 48800, loss = 0.167148\nI0819 20:12:19.642101 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 20:12:19.642117 22726 solver.cpp:244]     Train net output #1: loss = 0.167148 (* 1 = 0.167148 loss)\nI0819 20:12:19.717730 22726 sgd_solver.cpp:166] Iteration 48800, lr = 1.22\nI0819 20:14:37.641825 22726 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0819 20:16:02.754685 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88632\nI0819 20:16:02.754968 22726 solver.cpp:404]     Test net output #1: loss = 0.40987 (* 1 = 0.40987 loss)\nI0819 20:16:04.087846 22726 solver.cpp:228] Iteration 48900, loss = 0.131116\nI0819 20:16:04.087888 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:16:04.087904 22726 solver.cpp:244]     Train net output #1: loss = 0.131116 (* 1 = 0.131116 loss)\nI0819 20:16:04.166968 22726 sgd_solver.cpp:166] Iteration 48900, lr = 1.2225\nI0819 20:18:22.306344 22726 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0819 20:19:47.409186 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0819 20:19:47.409471 22726 solver.cpp:404]     Test net output #1: loss = 0.418979 (* 1 = 0.418979 loss)\nI0819 20:19:48.743564 22726 solver.cpp:228] Iteration 49000, loss = 0.096998\nI0819 20:19:48.743607 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:19:48.743623 22726 solver.cpp:244]     Train net output #1: loss = 0.0969979 (* 1 = 0.0969979 loss)\nI0819 20:19:48.818657 22726 sgd_solver.cpp:166] Iteration 49000, lr = 1.225\nI0819 20:22:06.961220 22726 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0819 20:23:32.092149 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88\nI0819 20:23:32.092416 22726 solver.cpp:404]     Test net output #1: loss = 0.415454 (* 1 = 0.415454 loss)\nI0819 20:23:33.424110 22726 solver.cpp:228] Iteration 49100, loss = 0.127898\nI0819 20:23:33.424154 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:23:33.424170 22726 solver.cpp:244]     Train net output #1: loss = 0.127898 (* 1 = 0.127898 loss)\nI0819 20:23:33.508035 22726 sgd_solver.cpp:166] Iteration 49100, lr = 1.2275\nI0819 20:25:51.772017 22726 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0819 20:27:16.895689 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0819 20:27:16.895987 22726 solver.cpp:404]     Test net output #1: loss = 0.414193 (* 1 = 0.414193 loss)\nI0819 20:27:18.229848 22726 solver.cpp:228] Iteration 49200, loss = 0.203477\nI0819 20:27:18.229895 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 20:27:18.229918 22726 solver.cpp:244]     Train net output #1: loss = 0.203477 (* 1 = 0.203477 loss)\nI0819 20:27:18.308815 22726 sgd_solver.cpp:166] Iteration 49200, lr = 1.23\nI0819 20:29:36.437278 22726 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0819 20:31:01.565196 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88248\nI0819 20:31:01.565505 22726 solver.cpp:404]     Test net output #1: loss = 0.424422 (* 1 = 0.424422 loss)\nI0819 20:31:02.899507 22726 solver.cpp:228] Iteration 49300, loss = 0.207418\nI0819 20:31:02.899551 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 20:31:02.899566 22726 solver.cpp:244]     Train net output #1: loss = 0.207418 (* 1 = 0.207418 loss)\nI0819 20:31:02.986004 22726 sgd_solver.cpp:166] Iteration 49300, lr = 1.2325\nI0819 20:33:21.334888 22726 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0819 20:34:46.452715 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0819 20:34:46.452983 22726 solver.cpp:404]     Test net output #1: loss = 0.395484 (* 1 = 0.395484 loss)\nI0819 20:34:47.785890 22726 solver.cpp:228] Iteration 49400, loss = 0.0689618\nI0819 20:34:47.785935 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 20:34:47.785949 22726 solver.cpp:244]     Train net output #1: loss = 0.0689618 (* 1 = 0.0689618 loss)\nI0819 20:34:47.864177 22726 sgd_solver.cpp:166] Iteration 49400, lr = 1.235\nI0819 20:37:06.084041 22726 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0819 20:38:31.181381 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87188\nI0819 20:38:31.181684 22726 solver.cpp:404]     Test net output #1: loss = 0.45326 (* 1 = 0.45326 loss)\nI0819 20:38:32.515261 22726 solver.cpp:228] Iteration 49500, loss = 0.142651\nI0819 20:38:32.515302 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:38:32.515317 22726 solver.cpp:244]     Train net output #1: loss = 0.142651 (* 1 = 0.142651 loss)\nI0819 20:38:32.594017 22726 sgd_solver.cpp:166] Iteration 49500, lr = 1.2375\nI0819 20:40:50.639158 22726 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0819 20:42:15.738489 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87776\nI0819 20:42:15.738811 22726 solver.cpp:404]     Test net output #1: loss = 0.430537 (* 1 = 0.430537 loss)\nI0819 20:42:17.071035 22726 solver.cpp:228] Iteration 49600, loss = 0.0593288\nI0819 20:42:17.071087 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 20:42:17.071105 22726 solver.cpp:244]     Train net output #1: loss = 0.0593288 (* 1 = 0.0593288 loss)\nI0819 20:42:17.152737 22726 sgd_solver.cpp:166] Iteration 49600, lr = 1.24\nI0819 20:44:35.243459 22726 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0819 20:46:00.335269 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8846\nI0819 20:46:00.335546 22726 solver.cpp:404]     Test net output #1: loss = 0.415641 (* 1 = 0.415641 loss)\nI0819 20:46:01.668349 22726 solver.cpp:228] Iteration 49700, loss = 0.133266\nI0819 20:46:01.668395 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 20:46:01.668411 22726 solver.cpp:244]     Train net output #1: loss = 0.133266 (* 1 = 0.133266 loss)\nI0819 20:46:01.750471 22726 sgd_solver.cpp:166] Iteration 49700, lr = 1.2425\nI0819 20:48:19.897670 22726 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0819 20:49:44.989873 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87652\nI0819 20:49:44.990172 22726 solver.cpp:404]     Test net output #1: loss = 0.446702 (* 1 = 0.446702 loss)\nI0819 20:49:46.323245 22726 solver.cpp:228] Iteration 49800, loss = 0.152697\nI0819 20:49:46.323287 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 20:49:46.323302 22726 solver.cpp:244]     Train net output #1: loss = 0.152696 (* 1 = 0.152696 loss)\nI0819 20:49:46.400475 22726 sgd_solver.cpp:166] Iteration 49800, lr = 1.245\nI0819 20:52:04.450191 22726 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0819 20:53:29.539875 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88112\nI0819 20:53:29.540165 22726 solver.cpp:404]     Test net output #1: loss = 0.413625 (* 1 = 0.413625 loss)\nI0819 20:53:30.872936 22726 solver.cpp:228] Iteration 49900, loss = 0.156976\nI0819 20:53:30.872985 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:53:30.873001 22726 solver.cpp:244]     Train net output #1: loss = 0.156976 (* 1 = 0.156976 loss)\nI0819 20:53:30.955485 22726 sgd_solver.cpp:166] Iteration 49900, lr = 1.2475\nI0819 20:55:49.138612 22726 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0819 20:57:14.214371 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0819 20:57:14.214649 22726 solver.cpp:404]     Test net output #1: loss = 0.416618 (* 1 = 0.416618 loss)\nI0819 20:57:15.547247 22726 solver.cpp:228] Iteration 50000, loss = 0.118758\nI0819 20:57:15.547287 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 20:57:15.547302 22726 solver.cpp:244]     Train net output #1: loss = 0.118758 (* 1 = 0.118758 loss)\nI0819 20:57:15.626237 22726 sgd_solver.cpp:166] Iteration 50000, lr = 1.25\nI0819 20:59:33.917567 22726 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0819 21:00:59.023599 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88352\nI0819 21:00:59.023910 22726 solver.cpp:404]     Test net output #1: loss = 0.418233 (* 1 = 0.418233 loss)\nI0819 21:01:00.356801 22726 solver.cpp:228] Iteration 50100, loss = 0.114394\nI0819 21:01:00.356842 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:01:00.356858 22726 solver.cpp:244]     Train net output #1: loss = 0.114394 (* 1 = 0.114394 loss)\nI0819 21:01:00.433372 22726 sgd_solver.cpp:166] Iteration 50100, lr = 1.2525\nI0819 21:03:18.556344 22726 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0819 21:04:43.676113 22726 solver.cpp:404]     Test net output #0: accuracy = 0.89396\nI0819 21:04:43.676398 22726 solver.cpp:404]     Test net output #1: loss = 0.380496 (* 1 = 0.380496 loss)\nI0819 21:04:45.008858 22726 solver.cpp:228] Iteration 50200, loss = 0.0702409\nI0819 21:04:45.008901 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 21:04:45.008918 22726 solver.cpp:244]     Train net output #1: loss = 0.0702408 (* 1 = 0.0702408 loss)\nI0819 21:04:45.089318 22726 sgd_solver.cpp:166] Iteration 50200, lr = 1.255\nI0819 21:07:03.179484 22726 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0819 21:08:28.310145 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88808\nI0819 21:08:28.310420 22726 solver.cpp:404]     Test net output #1: loss = 0.392652 (* 1 = 0.392652 loss)\nI0819 21:08:29.643362 22726 solver.cpp:228] Iteration 50300, loss = 0.161919\nI0819 21:08:29.643402 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:08:29.643417 22726 solver.cpp:244]     Train net output #1: loss = 0.161918 (* 1 = 0.161918 loss)\nI0819 21:08:29.726411 22726 sgd_solver.cpp:166] Iteration 50300, lr = 1.2575\nI0819 21:10:47.799088 22726 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0819 21:12:12.914829 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0819 21:12:12.915159 22726 solver.cpp:404]     Test net output #1: loss = 0.402642 (* 1 = 0.402642 loss)\nI0819 21:12:14.248044 22726 solver.cpp:228] Iteration 50400, loss = 0.185151\nI0819 21:12:14.248086 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:12:14.248102 22726 solver.cpp:244]     Train net output #1: loss = 0.185151 (* 1 = 0.185151 loss)\nI0819 21:12:14.331853 22726 sgd_solver.cpp:166] Iteration 50400, lr = 1.26\nI0819 21:14:32.519317 22726 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0819 21:15:57.648898 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88432\nI0819 21:15:57.649230 22726 solver.cpp:404]     Test net output #1: loss = 0.412018 (* 1 = 0.412018 loss)\nI0819 21:15:58.981806 22726 solver.cpp:228] Iteration 50500, loss = 0.167868\nI0819 21:15:58.981847 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 21:15:58.981863 22726 solver.cpp:244]     Train net output #1: loss = 0.167868 (* 1 = 0.167868 loss)\nI0819 21:15:59.060168 22726 sgd_solver.cpp:166] Iteration 50500, lr = 1.2625\nI0819 21:18:17.197489 22726 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0819 21:19:42.324971 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88996\nI0819 21:19:42.325251 22726 solver.cpp:404]     Test net output #1: loss = 0.385878 (* 1 = 0.385878 loss)\nI0819 21:19:43.658663 22726 solver.cpp:228] Iteration 50600, loss = 0.085132\nI0819 21:19:43.658704 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:19:43.658720 22726 solver.cpp:244]     Train net output #1: loss = 0.085132 (* 1 = 0.085132 loss)\nI0819 21:19:43.739131 22726 sgd_solver.cpp:166] Iteration 50600, lr = 1.265\nI0819 21:22:01.842651 22726 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0819 21:23:26.972934 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88664\nI0819 21:23:26.973240 22726 solver.cpp:404]     Test net output #1: loss = 0.406265 (* 1 = 0.406265 loss)\nI0819 21:23:28.305153 22726 solver.cpp:228] Iteration 50700, loss = 0.124242\nI0819 21:23:28.305203 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:23:28.305218 22726 solver.cpp:244]     Train net output #1: loss = 0.124242 (* 1 = 0.124242 loss)\nI0819 21:23:28.385051 22726 sgd_solver.cpp:166] Iteration 50700, lr = 1.2675\nI0819 21:25:46.498540 22726 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0819 21:27:11.624275 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8738\nI0819 21:27:11.624569 22726 solver.cpp:404]     Test net output #1: loss = 0.444212 (* 1 = 0.444212 loss)\nI0819 21:27:12.957166 22726 solver.cpp:228] Iteration 50800, loss = 0.230476\nI0819 21:27:12.957209 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 21:27:12.957226 22726 solver.cpp:244]     Train net output #1: loss = 0.230476 (* 1 = 0.230476 loss)\nI0819 21:27:13.041231 22726 sgd_solver.cpp:166] Iteration 50800, lr = 1.27\nI0819 21:29:31.170440 22726 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0819 21:30:56.300035 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88484\nI0819 21:30:56.300333 22726 solver.cpp:404]     Test net output #1: loss = 0.405759 (* 1 = 0.405759 loss)\nI0819 21:30:57.633144 22726 solver.cpp:228] Iteration 50900, loss = 0.186587\nI0819 21:30:57.633186 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 21:30:57.633201 22726 solver.cpp:244]     Train net output #1: loss = 0.186587 (* 1 = 0.186587 loss)\nI0819 21:30:57.718538 22726 sgd_solver.cpp:166] Iteration 50900, lr = 1.2725\nI0819 21:33:15.819537 22726 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0819 21:34:40.936086 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8804\nI0819 21:34:40.936354 22726 solver.cpp:404]     Test net output #1: loss = 0.416316 (* 1 = 0.416316 loss)\nI0819 21:34:42.268899 22726 solver.cpp:228] Iteration 51000, loss = 0.132697\nI0819 21:34:42.268954 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 21:34:42.268970 22726 solver.cpp:244]     Train net output #1: loss = 0.132697 (* 1 = 0.132697 loss)\nI0819 21:34:42.348762 22726 sgd_solver.cpp:166] Iteration 51000, lr = 1.275\nI0819 21:37:00.548239 22726 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0819 21:38:25.666718 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88404\nI0819 21:38:25.667006 22726 solver.cpp:404]     Test net output #1: loss = 0.409756 (* 1 = 0.409756 loss)\nI0819 21:38:26.999675 22726 solver.cpp:228] Iteration 51100, loss = 0.131553\nI0819 21:38:26.999719 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 21:38:26.999734 22726 solver.cpp:244]     Train net output #1: loss = 0.131553 (* 1 = 0.131553 loss)\nI0819 21:38:27.080760 22726 sgd_solver.cpp:166] Iteration 51100, lr = 1.2775\nI0819 21:40:45.194444 22726 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0819 21:42:10.286746 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88588\nI0819 21:42:10.287057 22726 solver.cpp:404]     Test net output #1: loss = 0.407526 (* 1 = 0.407526 loss)\nI0819 21:42:11.619490 22726 solver.cpp:228] Iteration 51200, loss = 0.16876\nI0819 21:42:11.619534 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:42:11.619549 22726 solver.cpp:244]     Train net output #1: loss = 0.16876 (* 1 = 0.16876 loss)\nI0819 21:42:11.699069 22726 sgd_solver.cpp:166] Iteration 51200, lr = 1.28\nI0819 21:44:29.825374 22726 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0819 21:45:54.730296 22726 solver.cpp:404]     Test net output #0: accuracy = 0.89112\nI0819 21:45:54.730567 22726 solver.cpp:404]     Test net output #1: loss = 0.384559 (* 1 = 0.384559 loss)\nI0819 21:45:56.063484 22726 solver.cpp:228] Iteration 51300, loss = 0.161379\nI0819 21:45:56.063525 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:45:56.063541 22726 solver.cpp:244]     Train net output #1: loss = 0.161379 (* 1 = 0.161379 loss)\nI0819 21:45:56.145583 22726 sgd_solver.cpp:166] Iteration 51300, lr = 1.2825\nI0819 21:48:14.270977 22726 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0819 21:49:39.208710 22726 solver.cpp:404]     Test net output #0: accuracy = 0.886521\nI0819 21:49:39.208983 22726 solver.cpp:404]     Test net output #1: loss = 0.391145 (* 1 = 0.391145 loss)\nI0819 21:49:40.541909 22726 solver.cpp:228] Iteration 51400, loss = 0.139674\nI0819 21:49:40.541954 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:49:40.541968 22726 solver.cpp:244]     Train net output #1: loss = 0.139674 (* 1 = 0.139674 loss)\nI0819 21:49:40.620153 22726 sgd_solver.cpp:166] Iteration 51400, lr = 1.285\nI0819 21:51:58.651643 22726 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0819 21:53:23.706290 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88256\nI0819 21:53:23.706599 22726 solver.cpp:404]     Test net output #1: loss = 0.408804 (* 1 = 0.408804 loss)\nI0819 21:53:25.039358 22726 solver.cpp:228] Iteration 51500, loss = 0.12237\nI0819 21:53:25.039408 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:53:25.039425 22726 solver.cpp:244]     Train net output #1: loss = 0.12237 (* 1 = 0.12237 loss)\nI0819 21:53:25.124732 22726 sgd_solver.cpp:166] Iteration 51500, lr = 1.2875\nI0819 21:55:43.165110 22726 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0819 21:57:08.232604 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87692\nI0819 21:57:08.232905 22726 solver.cpp:404]     Test net output #1: loss = 0.430086 (* 1 = 0.430086 loss)\nI0819 21:57:09.565583 22726 solver.cpp:228] Iteration 51600, loss = 0.135303\nI0819 21:57:09.565625 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:57:09.565640 22726 solver.cpp:244]     Train net output #1: loss = 0.135302 (* 1 = 0.135302 loss)\nI0819 21:57:09.646227 22726 sgd_solver.cpp:166] Iteration 51600, lr = 1.29\nI0819 21:59:27.739984 22726 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 22:00:52.792207 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8886\nI0819 22:00:52.792500 22726 solver.cpp:404]     Test net output #1: loss = 0.400972 (* 1 = 0.400972 loss)\nI0819 22:00:54.124827 22726 solver.cpp:228] Iteration 51700, loss = 0.0307401\nI0819 22:00:54.124878 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 22:00:54.124894 22726 solver.cpp:244]     Train net output #1: loss = 0.03074 (* 1 = 0.03074 loss)\nI0819 22:00:54.208847 22726 sgd_solver.cpp:166] Iteration 51700, lr = 1.2925\nI0819 22:03:12.261983 22726 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 22:04:37.324385 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88812\nI0819 22:04:37.324686 22726 solver.cpp:404]     Test net output #1: loss = 0.391522 (* 1 = 0.391522 loss)\nI0819 22:04:38.657241 22726 solver.cpp:228] Iteration 51800, loss = 0.0890575\nI0819 22:04:38.657281 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 22:04:38.657296 22726 solver.cpp:244]     Train net output #1: loss = 0.0890574 (* 1 = 0.0890574 loss)\nI0819 22:04:38.735883 22726 sgd_solver.cpp:166] Iteration 51800, lr = 1.295\nI0819 22:06:56.848686 22726 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 22:08:21.735610 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8892\nI0819 22:08:21.735875 22726 solver.cpp:404]     Test net output #1: loss = 0.372186 (* 1 = 0.372186 loss)\nI0819 22:08:23.068583 22726 solver.cpp:228] Iteration 51900, loss = 0.118487\nI0819 22:08:23.068625 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 22:08:23.068641 22726 solver.cpp:244]     Train net output #1: loss = 0.118486 (* 1 = 0.118486 loss)\nI0819 22:08:23.146569 22726 sgd_solver.cpp:166] Iteration 51900, lr = 1.2975\nI0819 22:10:41.221645 22726 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 22:12:06.365970 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0819 22:12:06.366315 22726 solver.cpp:404]     Test net output #1: loss = 0.44509 (* 1 = 0.44509 loss)\nI0819 22:12:07.698482 22726 solver.cpp:228] Iteration 52000, loss = 0.195879\nI0819 22:12:07.698520 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 22:12:07.698536 22726 solver.cpp:244]     Train net output #1: loss = 0.195879 (* 1 = 0.195879 loss)\nI0819 22:12:07.774377 22726 sgd_solver.cpp:166] Iteration 52000, lr = 1.3\nI0819 22:14:25.764848 22726 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 22:15:50.628180 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0819 22:15:50.628458 22726 solver.cpp:404]     Test net output #1: loss = 0.403391 (* 1 = 0.403391 loss)\nI0819 22:15:51.961659 22726 solver.cpp:228] Iteration 52100, loss = 0.100027\nI0819 22:15:51.961699 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 22:15:51.961715 22726 solver.cpp:244]     Train net output #1: loss = 0.100026 (* 1 = 0.100026 loss)\nI0819 22:15:52.039520 22726 sgd_solver.cpp:166] Iteration 52100, lr = 1.3025\nI0819 22:18:10.101287 22726 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 22:19:34.934378 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88048\nI0819 22:19:34.934660 22726 solver.cpp:404]     Test net output #1: loss = 0.431455 (* 1 = 0.431455 loss)\nI0819 22:19:36.267103 22726 solver.cpp:228] Iteration 52200, loss = 0.302011\nI0819 22:19:36.267146 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 22:19:36.267163 22726 solver.cpp:244]     Train net output #1: loss = 0.302011 (* 1 = 0.302011 loss)\nI0819 22:19:36.350970 22726 sgd_solver.cpp:166] Iteration 52200, lr = 1.305\nI0819 22:21:54.403620 22726 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 22:23:19.245056 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88704\nI0819 22:23:19.245363 22726 solver.cpp:404]     Test net output #1: loss = 0.396455 (* 1 = 0.396455 loss)\nI0819 22:23:20.578383 22726 solver.cpp:228] Iteration 52300, loss = 0.13975\nI0819 22:23:20.578425 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 22:23:20.578440 22726 solver.cpp:244]     Train net output #1: loss = 0.13975 (* 1 = 0.13975 loss)\nI0819 22:23:20.660912 22726 sgd_solver.cpp:166] Iteration 52300, lr = 1.3075\nI0819 22:25:38.990757 22726 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 22:27:03.797897 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88304\nI0819 22:27:03.798166 22726 solver.cpp:404]     Test net output #1: loss = 0.401215 (* 1 = 0.401215 loss)\nI0819 22:27:05.130683 22726 solver.cpp:228] Iteration 52400, loss = 0.178766\nI0819 22:27:05.130724 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 22:27:05.130740 22726 solver.cpp:244]     Train net output #1: loss = 0.178766 (* 1 = 0.178766 loss)\nI0819 22:27:05.215258 22726 sgd_solver.cpp:166] Iteration 52400, lr = 1.31\nI0819 22:29:23.456898 22726 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 22:30:48.373311 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88064\nI0819 22:30:48.373651 22726 solver.cpp:404]     Test net output #1: loss = 0.418013 (* 1 = 0.418013 loss)\nI0819 22:30:49.706701 22726 solver.cpp:228] Iteration 52500, loss = 0.0949333\nI0819 22:30:49.706744 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 22:30:49.706760 22726 solver.cpp:244]     Train net output #1: loss = 0.0949332 (* 1 = 0.0949332 loss)\nI0819 22:30:49.790977 22726 sgd_solver.cpp:166] Iteration 52500, lr = 1.3125\nI0819 22:33:07.914942 22726 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 22:34:32.769816 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88396\nI0819 22:34:32.770081 22726 solver.cpp:404]     Test net output #1: loss = 0.406995 (* 1 = 0.406995 loss)\nI0819 22:34:34.101974 22726 solver.cpp:228] Iteration 52600, loss = 0.0499557\nI0819 22:34:34.102031 22726 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 22:34:34.102048 22726 solver.cpp:244]     Train net output #1: loss = 0.0499556 (* 1 = 0.0499556 loss)\nI0819 22:34:34.179811 22726 sgd_solver.cpp:166] Iteration 52600, lr = 1.315\nI0819 22:36:52.328084 22726 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 22:38:17.082365 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88072\nI0819 22:38:17.082650 22726 solver.cpp:404]     Test net output #1: loss = 0.404664 (* 1 = 0.404664 loss)\nI0819 22:38:18.415204 22726 solver.cpp:228] Iteration 52700, loss = 0.200416\nI0819 22:38:18.415249 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 22:38:18.415266 22726 solver.cpp:244]     Train net output #1: loss = 0.200415 (* 1 = 0.200415 loss)\nI0819 22:38:18.497457 22726 sgd_solver.cpp:166] Iteration 52700, lr = 1.3175\nI0819 22:40:36.593426 22726 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 22:42:01.373078 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88932\nI0819 22:42:01.373343 22726 solver.cpp:404]     Test net output #1: loss = 0.395613 (* 1 = 0.395613 loss)\nI0819 22:42:02.707105 22726 solver.cpp:228] Iteration 52800, loss = 0.152618\nI0819 22:42:02.707145 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 22:42:02.707160 22726 solver.cpp:244]     Train net output #1: loss = 0.152618 (* 1 = 0.152618 loss)\nI0819 22:42:02.790611 22726 sgd_solver.cpp:166] Iteration 52800, lr = 1.32\nI0819 22:44:20.840029 22726 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 22:45:45.612697 22726 solver.cpp:404]     Test net output #0: accuracy = 0.886921\nI0819 22:45:45.612987 22726 solver.cpp:404]     Test net output #1: loss = 0.391603 (* 1 = 0.391603 loss)\nI0819 22:45:46.946805 22726 solver.cpp:228] Iteration 52900, loss = 0.0989175\nI0819 22:45:46.946848 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 22:45:46.946863 22726 solver.cpp:244]     Train net output #1: loss = 0.0989174 (* 1 = 0.0989174 loss)\nI0819 22:45:47.022672 22726 sgd_solver.cpp:166] Iteration 52900, lr = 1.3225\nI0819 22:48:05.151257 22726 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 22:49:29.947284 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88472\nI0819 22:49:29.947559 22726 solver.cpp:404]     Test net output #1: loss = 0.408584 (* 1 = 0.408584 loss)\nI0819 22:49:31.280560 22726 solver.cpp:228] Iteration 53000, loss = 0.150853\nI0819 22:49:31.280611 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 22:49:31.280627 22726 solver.cpp:244]     Train net output #1: loss = 0.150853 (* 1 = 0.150853 loss)\nI0819 22:49:31.363003 22726 sgd_solver.cpp:166] Iteration 53000, lr = 1.325\nI0819 22:51:49.458710 22726 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 22:53:14.200407 22726 solver.cpp:404]     Test net output #0: accuracy = 0.888\nI0819 22:53:14.200712 22726 solver.cpp:404]     Test net output #1: loss = 0.393014 (* 1 = 0.393014 loss)\nI0819 22:53:15.534165 22726 solver.cpp:228] Iteration 53100, loss = 0.216402\nI0819 22:53:15.534207 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 22:53:15.534224 22726 solver.cpp:244]     Train net output #1: loss = 0.216402 (* 1 = 0.216402 loss)\nI0819 22:53:15.615756 22726 sgd_solver.cpp:166] Iteration 53100, lr = 1.3275\nI0819 22:55:33.600149 22726 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 22:56:58.457468 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88624\nI0819 22:56:58.457720 22726 solver.cpp:404]     Test net output #1: loss = 0.396191 (* 1 = 0.396191 loss)\nI0819 22:56:59.791474 22726 solver.cpp:228] Iteration 53200, loss = 0.117953\nI0819 22:56:59.791518 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 22:56:59.791534 22726 solver.cpp:244]     Train net output #1: loss = 0.117952 (* 1 = 0.117952 loss)\nI0819 22:56:59.872078 22726 sgd_solver.cpp:166] Iteration 53200, lr = 1.33\nI0819 22:59:17.954541 22726 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 23:00:42.776180 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8896\nI0819 23:00:42.776474 22726 solver.cpp:404]     Test net output #1: loss = 0.379407 (* 1 = 0.379407 loss)\nI0819 23:00:44.110373 22726 solver.cpp:228] Iteration 53300, loss = 0.0947585\nI0819 23:00:44.110416 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:00:44.110432 22726 solver.cpp:244]     Train net output #1: loss = 0.0947583 (* 1 = 0.0947583 loss)\nI0819 23:00:44.189208 22726 sgd_solver.cpp:166] Iteration 53300, lr = 1.3325\nI0819 23:03:02.280944 22726 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 23:04:27.299412 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8776\nI0819 23:04:27.299701 22726 solver.cpp:404]     Test net output #1: loss = 0.422914 (* 1 = 0.422914 loss)\nI0819 23:04:28.632855 22726 solver.cpp:228] Iteration 53400, loss = 0.120344\nI0819 23:04:28.632899 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:04:28.632915 22726 solver.cpp:244]     Train net output #1: loss = 0.120344 (* 1 = 0.120344 loss)\nI0819 23:04:28.717016 22726 sgd_solver.cpp:166] Iteration 53400, lr = 1.335\nI0819 23:06:47.017642 22726 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 23:08:11.783696 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88152\nI0819 23:08:11.784013 22726 solver.cpp:404]     Test net output #1: loss = 0.422802 (* 1 = 0.422802 loss)\nI0819 23:08:13.117739 22726 solver.cpp:228] Iteration 53500, loss = 0.0983419\nI0819 23:08:13.117781 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:08:13.117797 22726 solver.cpp:244]     Train net output #1: loss = 0.0983417 (* 1 = 0.0983417 loss)\nI0819 23:08:13.199092 22726 sgd_solver.cpp:166] Iteration 53500, lr = 1.3375\nI0819 23:10:31.293215 22726 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 23:11:56.391242 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8856\nI0819 23:11:56.391592 22726 solver.cpp:404]     Test net output #1: loss = 0.389156 (* 1 = 0.389156 loss)\nI0819 23:11:57.725888 22726 solver.cpp:228] Iteration 53600, loss = 0.146033\nI0819 23:11:57.725929 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 23:11:57.725945 22726 solver.cpp:244]     Train net output #1: loss = 0.146033 (* 1 = 0.146033 loss)\nI0819 23:11:57.805932 22726 sgd_solver.cpp:166] Iteration 53600, lr = 1.34\nI0819 23:14:15.828820 22726 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 23:15:40.979192 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8778\nI0819 23:15:40.979519 22726 solver.cpp:404]     Test net output #1: loss = 0.418274 (* 1 = 0.418274 loss)\nI0819 23:15:42.313526 22726 solver.cpp:228] Iteration 53700, loss = 0.254303\nI0819 23:15:42.313570 22726 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0819 23:15:42.313585 22726 solver.cpp:244]     Train net output #1: loss = 0.254303 (* 1 = 0.254303 loss)\nI0819 23:15:42.401584 22726 sgd_solver.cpp:166] Iteration 53700, lr = 1.3425\nI0819 23:18:00.568684 22726 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 23:19:25.705653 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88436\nI0819 23:19:25.706009 22726 solver.cpp:404]     Test net output #1: loss = 0.416462 (* 1 = 0.416462 loss)\nI0819 23:19:27.040248 22726 solver.cpp:228] Iteration 53800, loss = 0.20828\nI0819 23:19:27.040290 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 23:19:27.040307 22726 solver.cpp:244]     Train net output #1: loss = 0.20828 (* 1 = 0.20828 loss)\nI0819 23:19:27.123831 22726 sgd_solver.cpp:166] Iteration 53800, lr = 1.345\nI0819 23:21:45.222463 22726 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 23:23:10.344247 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88756\nI0819 23:23:10.344584 22726 solver.cpp:404]     Test net output #1: loss = 0.401891 (* 1 = 0.401891 loss)\nI0819 23:23:11.677073 22726 solver.cpp:228] Iteration 53900, loss = 0.0636728\nI0819 23:23:11.677119 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 23:23:11.677135 22726 solver.cpp:244]     Train net output #1: loss = 0.0636726 (* 1 = 0.0636726 loss)\nI0819 23:23:11.759116 22726 sgd_solver.cpp:166] Iteration 53900, lr = 1.3475\nI0819 23:25:29.776108 22726 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 23:26:54.900599 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88296\nI0819 23:26:54.900975 22726 solver.cpp:404]     Test net output #1: loss = 0.403791 (* 1 = 0.403791 loss)\nI0819 23:26:56.233898 22726 solver.cpp:228] Iteration 54000, loss = 0.127542\nI0819 23:26:56.233942 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 23:26:56.233958 22726 solver.cpp:244]     Train net output #1: loss = 0.127541 (* 1 = 0.127541 loss)\nI0819 23:26:56.317970 22726 sgd_solver.cpp:166] Iteration 54000, lr = 1.35\nI0819 23:29:14.446296 22726 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 23:30:39.574851 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88392\nI0819 23:30:39.575206 22726 solver.cpp:404]     Test net output #1: loss = 0.399701 (* 1 = 0.399701 loss)\nI0819 23:30:40.908150 22726 solver.cpp:228] Iteration 54100, loss = 0.115005\nI0819 23:30:40.908192 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:30:40.908208 22726 solver.cpp:244]     Train net output #1: loss = 0.115005 (* 1 = 0.115005 loss)\nI0819 23:30:40.988548 22726 sgd_solver.cpp:166] Iteration 54100, lr = 1.3525\nI0819 23:32:59.063977 22726 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 23:34:24.190241 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8858\nI0819 23:34:24.190570 22726 solver.cpp:404]     Test net output #1: loss = 0.405211 (* 1 = 0.405211 loss)\nI0819 23:34:25.523272 22726 solver.cpp:228] Iteration 54200, loss = 0.204751\nI0819 23:34:25.523315 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 23:34:25.523330 22726 solver.cpp:244]     Train net output #1: loss = 0.204751 (* 1 = 0.204751 loss)\nI0819 23:34:25.603302 22726 sgd_solver.cpp:166] Iteration 54200, lr = 1.355\nI0819 23:36:43.821949 22726 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 23:38:08.973340 22726 solver.cpp:404]     Test net output #0: accuracy = 0.884\nI0819 23:38:08.973686 22726 solver.cpp:404]     Test net output #1: loss = 0.405921 (* 1 = 0.405921 loss)\nI0819 23:38:10.306170 22726 solver.cpp:228] Iteration 54300, loss = 0.0688179\nI0819 23:38:10.306213 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 23:38:10.306229 22726 solver.cpp:244]     Train net output #1: loss = 0.0688177 (* 1 = 0.0688177 loss)\nI0819 23:38:10.385233 22726 sgd_solver.cpp:166] Iteration 54300, lr = 1.3575\nI0819 23:40:28.514470 22726 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 23:41:53.677891 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0819 23:41:53.678290 22726 solver.cpp:404]     Test net output #1: loss = 0.397362 (* 1 = 0.397362 loss)\nI0819 23:41:55.010781 22726 solver.cpp:228] Iteration 54400, loss = 0.108855\nI0819 23:41:55.010833 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:41:55.010849 22726 solver.cpp:244]     Train net output #1: loss = 0.108855 (* 1 = 0.108855 loss)\nI0819 23:41:55.093431 22726 sgd_solver.cpp:166] Iteration 54400, lr = 1.36\nI0819 23:44:13.224627 22726 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 23:45:38.393736 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0819 23:45:38.394095 22726 solver.cpp:404]     Test net output #1: loss = 0.408931 (* 1 = 0.408931 loss)\nI0819 23:45:39.726800 22726 solver.cpp:228] Iteration 54500, loss = 0.103023\nI0819 23:45:39.726841 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:45:39.726857 22726 solver.cpp:244]     Train net output #1: loss = 0.103022 (* 1 = 0.103022 loss)\nI0819 23:45:39.816511 22726 sgd_solver.cpp:166] Iteration 54500, lr = 1.3625\nI0819 23:47:58.117115 22726 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 23:49:23.268155 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88064\nI0819 23:49:23.268532 22726 solver.cpp:404]     Test net output #1: loss = 0.405704 (* 1 = 0.405704 loss)\nI0819 23:49:24.600711 22726 solver.cpp:228] Iteration 54600, loss = 0.115076\nI0819 23:49:24.600754 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:49:24.600769 22726 solver.cpp:244]     Train net output #1: loss = 0.115075 (* 1 = 0.115075 loss)\nI0819 23:49:24.680362 22726 sgd_solver.cpp:166] Iteration 54600, lr = 1.365\nI0819 23:51:42.876655 22726 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 23:53:08.033365 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87744\nI0819 23:53:08.033717 22726 solver.cpp:404]     Test net output #1: loss = 0.435277 (* 1 = 0.435277 loss)\nI0819 23:53:09.366508 22726 solver.cpp:228] Iteration 54700, loss = 0.166086\nI0819 23:53:09.366547 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:53:09.366564 22726 solver.cpp:244]     Train net output #1: loss = 0.166086 (* 1 = 0.166086 loss)\nI0819 23:53:09.444557 22726 sgd_solver.cpp:166] Iteration 54700, lr = 1.3675\nI0819 23:55:27.550895 22726 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 23:56:52.705484 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88756\nI0819 23:56:52.705865 22726 solver.cpp:404]     Test net output #1: loss = 0.387296 (* 1 = 0.387296 loss)\nI0819 23:56:54.038702 22726 solver.cpp:228] Iteration 54800, loss = 0.135508\nI0819 23:56:54.038740 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:56:54.038756 22726 solver.cpp:244]     Train net output #1: loss = 0.135508 (* 1 = 0.135508 loss)\nI0819 23:56:54.120592 22726 sgd_solver.cpp:166] Iteration 54800, lr = 1.37\nI0819 23:59:12.246912 22726 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0820 00:00:37.412770 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87928\nI0820 00:00:37.413116 22726 solver.cpp:404]     Test net output #1: loss = 0.437905 (* 1 = 0.437905 loss)\nI0820 00:00:38.745707 22726 solver.cpp:228] Iteration 54900, loss = 0.132169\nI0820 00:00:38.745750 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 00:00:38.745767 22726 solver.cpp:244]     Train net output #1: loss = 0.132169 (* 1 = 0.132169 loss)\nI0820 00:00:38.821498 22726 sgd_solver.cpp:166] Iteration 54900, lr = 1.3725\nI0820 00:02:56.929143 22726 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0820 00:04:22.075670 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88732\nI0820 00:04:22.076028 22726 solver.cpp:404]     Test net output #1: loss = 0.395137 (* 1 = 0.395137 loss)\nI0820 00:04:23.409121 22726 solver.cpp:228] Iteration 55000, loss = 0.119832\nI0820 00:04:23.409165 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 00:04:23.409180 22726 solver.cpp:244]     Train net output #1: loss = 0.119832 (* 1 = 0.119832 loss)\nI0820 00:04:23.491222 22726 sgd_solver.cpp:166] Iteration 55000, lr = 1.375\nI0820 00:06:41.482234 22726 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0820 00:08:06.653378 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88092\nI0820 00:08:06.653740 22726 solver.cpp:404]     Test net output #1: loss = 0.420854 (* 1 = 0.420854 loss)\nI0820 00:08:07.986742 22726 solver.cpp:228] Iteration 55100, loss = 0.147623\nI0820 00:08:07.986783 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 00:08:07.986799 22726 solver.cpp:244]     Train net output #1: loss = 0.147623 (* 1 = 0.147623 loss)\nI0820 00:08:08.070540 22726 sgd_solver.cpp:166] Iteration 55100, lr = 1.3775\nI0820 00:10:26.144897 22726 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0820 00:11:51.311641 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87748\nI0820 00:11:51.311992 22726 solver.cpp:404]     Test net output #1: loss = 0.430871 (* 1 = 0.430871 loss)\nI0820 00:11:52.644984 22726 solver.cpp:228] Iteration 55200, loss = 0.0759977\nI0820 00:11:52.645033 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 00:11:52.645051 22726 solver.cpp:244]     Train net output #1: loss = 0.0759976 (* 1 = 0.0759976 loss)\nI0820 00:11:52.729567 22726 sgd_solver.cpp:166] Iteration 55200, lr = 1.38\nI0820 00:14:10.832607 22726 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0820 00:15:35.988762 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0820 00:15:35.989115 22726 solver.cpp:404]     Test net output #1: loss = 0.415274 (* 1 = 0.415274 loss)\nI0820 00:15:37.321964 22726 solver.cpp:228] Iteration 55300, loss = 0.096892\nI0820 00:15:37.322008 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 00:15:37.322024 22726 solver.cpp:244]     Train net output #1: loss = 0.0968919 (* 1 = 0.0968919 loss)\nI0820 00:15:37.398030 22726 sgd_solver.cpp:166] Iteration 55300, lr = 1.3825\nI0820 00:17:55.474453 22726 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0820 00:19:20.623762 22726 solver.cpp:404]     Test net output #0: accuracy = 0.883\nI0820 00:19:20.624140 22726 solver.cpp:404]     Test net output #1: loss = 0.400761 (* 1 = 0.400761 loss)\nI0820 00:19:21.956696 22726 solver.cpp:228] Iteration 55400, loss = 0.109219\nI0820 00:19:21.956737 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 00:19:21.956753 22726 solver.cpp:244]     Train net output #1: loss = 0.109219 (* 1 = 0.109219 loss)\nI0820 00:19:22.042152 22726 sgd_solver.cpp:166] Iteration 55400, lr = 1.385\nI0820 00:21:40.244765 22726 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0820 00:23:05.406697 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87692\nI0820 00:23:05.407042 22726 solver.cpp:404]     Test net output #1: loss = 0.425387 (* 1 = 0.425387 loss)\nI0820 00:23:06.739848 22726 solver.cpp:228] Iteration 55500, loss = 0.134287\nI0820 00:23:06.739892 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 00:23:06.739907 22726 solver.cpp:244]     Train net output #1: loss = 0.134287 (* 1 = 0.134287 loss)\nI0820 00:23:06.832247 22726 sgd_solver.cpp:166] Iteration 55500, lr = 1.3875\nI0820 00:25:25.025771 22726 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0820 00:26:50.169740 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8762\nI0820 00:26:50.170111 22726 solver.cpp:404]     Test net output #1: loss = 0.430643 (* 1 = 0.430643 loss)\nI0820 00:26:51.502524 22726 solver.cpp:228] Iteration 55600, loss = 0.206796\nI0820 00:26:51.502568 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 00:26:51.502583 22726 solver.cpp:244]     Train net output #1: loss = 0.206796 (* 1 = 0.206796 loss)\nI0820 00:26:51.580157 22726 sgd_solver.cpp:166] Iteration 55600, lr = 1.39\nI0820 00:29:09.669761 22726 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0820 00:30:34.821945 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87804\nI0820 00:30:34.822285 22726 solver.cpp:404]     Test net output #1: loss = 0.427016 (* 1 = 0.427016 loss)\nI0820 00:30:36.156428 22726 solver.cpp:228] Iteration 55700, loss = 0.202077\nI0820 00:30:36.156469 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 00:30:36.156486 22726 solver.cpp:244]     Train net output #1: loss = 0.202077 (* 1 = 0.202077 loss)\nI0820 00:30:36.241925 22726 sgd_solver.cpp:166] Iteration 55700, lr = 1.3925\nI0820 00:32:54.377068 22726 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0820 00:34:19.553402 22726 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0820 00:34:19.553781 22726 solver.cpp:404]     Test net output #1: loss = 0.42059 (* 1 = 0.42059 loss)\nI0820 00:34:20.887733 22726 solver.cpp:228] Iteration 55800, loss = 0.194065\nI0820 00:34:20.887774 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 00:34:20.887789 22726 solver.cpp:244]     Train net output #1: loss = 0.194065 (* 1 = 0.194065 loss)\nI0820 00:34:20.970023 22726 sgd_solver.cpp:166] Iteration 55800, lr = 1.395\nI0820 00:36:39.041484 22726 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0820 00:38:04.205221 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0820 00:38:04.205585 22726 solver.cpp:404]     Test net output #1: loss = 0.40393 (* 1 = 0.40393 loss)\nI0820 00:38:05.539042 22726 solver.cpp:228] Iteration 55900, loss = 0.138543\nI0820 00:38:05.539084 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 00:38:05.539100 22726 solver.cpp:244]     Train net output #1: loss = 0.138543 (* 1 = 0.138543 loss)\nI0820 00:38:05.620450 22726 sgd_solver.cpp:166] Iteration 55900, lr = 1.3975\nI0820 00:40:23.750311 22726 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0820 00:41:48.908486 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88784\nI0820 00:41:48.908861 22726 solver.cpp:404]     Test net output #1: loss = 0.396897 (* 1 = 0.396897 loss)\nI0820 00:41:50.241729 22726 solver.cpp:228] Iteration 56000, loss = 0.123213\nI0820 00:41:50.241780 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 00:41:50.241796 22726 solver.cpp:244]     Train net output #1: loss = 0.123213 (* 1 = 0.123213 loss)\nI0820 00:41:50.328322 22726 sgd_solver.cpp:166] Iteration 56000, lr = 1.4\nI0820 00:44:08.491070 22726 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0820 00:45:33.115901 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88524\nI0820 00:45:33.116230 22726 solver.cpp:404]     Test net output #1: loss = 0.402783 (* 1 = 0.402783 loss)\nI0820 00:45:34.451206 22726 solver.cpp:228] Iteration 56100, loss = 0.157239\nI0820 00:45:34.451253 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 00:45:34.451274 22726 solver.cpp:244]     Train net output #1: loss = 0.157239 (* 1 = 0.157239 loss)\nI0820 00:45:34.529840 22726 sgd_solver.cpp:166] Iteration 56100, lr = 1.4025\nI0820 00:47:52.649823 22726 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0820 00:49:17.080231 22726 solver.cpp:404]     Test net output #0: accuracy = 0.882\nI0820 00:49:17.080541 22726 solver.cpp:404]     Test net output #1: loss = 0.41999 (* 1 = 0.41999 loss)\nI0820 00:49:18.411967 22726 solver.cpp:228] Iteration 56200, loss = 0.188514\nI0820 00:49:18.412014 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 00:49:18.412031 22726 solver.cpp:244]     Train net output #1: loss = 0.188514 (* 1 = 0.188514 loss)\nI0820 00:49:18.486570 22726 sgd_solver.cpp:166] Iteration 56200, lr = 1.405\nI0820 00:51:36.343444 22726 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0820 00:53:00.730490 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8788\nI0820 00:53:00.730804 22726 solver.cpp:404]     Test net output #1: loss = 0.430337 (* 1 = 0.430337 loss)\nI0820 00:53:02.061573 22726 solver.cpp:228] Iteration 56300, loss = 0.281086\nI0820 00:53:02.061620 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 00:53:02.061635 22726 solver.cpp:244]     Train net output #1: loss = 0.281086 (* 1 = 0.281086 loss)\nI0820 00:53:02.144862 22726 sgd_solver.cpp:166] Iteration 56300, lr = 1.4075\nI0820 00:55:20.106088 22726 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0820 00:56:44.498456 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0820 00:56:44.498762 22726 solver.cpp:404]     Test net output #1: loss = 0.412837 (* 1 = 0.412837 loss)\nI0820 00:56:45.829092 22726 solver.cpp:228] Iteration 56400, loss = 0.238263\nI0820 00:56:45.829139 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 00:56:45.829157 22726 solver.cpp:244]     Train net output #1: loss = 0.238263 (* 1 = 0.238263 loss)\nI0820 00:56:45.915508 22726 sgd_solver.cpp:166] Iteration 56400, lr = 1.41\nI0820 00:59:03.811439 22726 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0820 01:00:28.200116 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8822\nI0820 01:00:28.200403 22726 solver.cpp:404]     Test net output #1: loss = 0.414762 (* 1 = 0.414762 loss)\nI0820 01:00:29.531339 22726 solver.cpp:228] Iteration 56500, loss = 0.10269\nI0820 01:00:29.531383 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:00:29.531399 22726 solver.cpp:244]     Train net output #1: loss = 0.10269 (* 1 = 0.10269 loss)\nI0820 01:00:29.609707 22726 sgd_solver.cpp:166] Iteration 56500, lr = 1.4125\nI0820 01:02:47.469097 22726 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0820 01:04:11.863298 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88056\nI0820 01:04:11.863589 22726 solver.cpp:404]     Test net output #1: loss = 0.406355 (* 1 = 0.406355 loss)\nI0820 01:04:13.194460 22726 solver.cpp:228] Iteration 56600, loss = 0.111398\nI0820 01:04:13.194504 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:04:13.194520 22726 solver.cpp:244]     Train net output #1: loss = 0.111398 (* 1 = 0.111398 loss)\nI0820 01:04:13.281445 22726 sgd_solver.cpp:166] Iteration 56600, lr = 1.415\nI0820 01:06:31.148267 22726 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0820 01:07:55.554718 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87864\nI0820 01:07:55.555034 22726 solver.cpp:404]     Test net output #1: loss = 0.430556 (* 1 = 0.430556 loss)\nI0820 01:07:56.885532 22726 solver.cpp:228] Iteration 56700, loss = 0.191349\nI0820 01:07:56.885576 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:07:56.885592 22726 solver.cpp:244]     Train net output #1: loss = 0.191349 (* 1 = 0.191349 loss)\nI0820 01:07:56.962307 22726 sgd_solver.cpp:166] Iteration 56700, lr = 1.4175\nI0820 01:10:14.806521 22726 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0820 01:11:39.213104 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88112\nI0820 01:11:39.213376 22726 solver.cpp:404]     Test net output #1: loss = 0.430986 (* 1 = 0.430986 loss)\nI0820 01:11:40.543998 22726 solver.cpp:228] Iteration 56800, loss = 0.138928\nI0820 01:11:40.544042 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:11:40.544059 22726 solver.cpp:244]     Train net output #1: loss = 0.138928 (* 1 = 0.138928 loss)\nI0820 01:11:40.627980 22726 sgd_solver.cpp:166] Iteration 56800, lr = 1.42\nI0820 01:13:58.549304 22726 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0820 01:15:22.960681 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87916\nI0820 01:15:22.960988 22726 solver.cpp:404]     Test net output #1: loss = 0.418405 (* 1 = 0.418405 loss)\nI0820 01:15:24.291368 22726 solver.cpp:228] Iteration 56900, loss = 0.106565\nI0820 01:15:24.291412 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:15:24.291429 22726 solver.cpp:244]     Train net output #1: loss = 0.106565 (* 1 = 0.106565 loss)\nI0820 01:15:24.366693 22726 sgd_solver.cpp:166] Iteration 56900, lr = 1.4225\nI0820 01:17:42.359396 22726 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0820 01:19:06.756227 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0820 01:19:06.756500 22726 solver.cpp:404]     Test net output #1: loss = 0.391933 (* 1 = 0.391933 loss)\nI0820 01:19:08.087296 22726 solver.cpp:228] Iteration 57000, loss = 0.147339\nI0820 01:19:08.087338 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:19:08.087353 22726 solver.cpp:244]     Train net output #1: loss = 0.147339 (* 1 = 0.147339 loss)\nI0820 01:19:08.165894 22726 sgd_solver.cpp:166] Iteration 57000, lr = 1.425\nI0820 01:21:26.218154 22726 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0820 01:22:51.054993 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87792\nI0820 01:22:51.055369 22726 solver.cpp:404]     Test net output #1: loss = 0.411721 (* 1 = 0.411721 loss)\nI0820 01:22:52.387858 22726 solver.cpp:228] Iteration 57100, loss = 0.167703\nI0820 01:22:52.387902 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:22:52.387917 22726 solver.cpp:244]     Train net output #1: loss = 0.167703 (* 1 = 0.167703 loss)\nI0820 01:22:52.474092 22726 sgd_solver.cpp:166] Iteration 57100, lr = 1.4275\nI0820 01:25:10.581084 22726 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0820 01:26:35.732975 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8722\nI0820 01:26:35.733341 22726 solver.cpp:404]     Test net output #1: loss = 0.438892 (* 1 = 0.438892 loss)\nI0820 01:26:37.065731 22726 solver.cpp:228] Iteration 57200, loss = 0.161432\nI0820 01:26:37.065770 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 01:26:37.065785 22726 solver.cpp:244]     Train net output #1: loss = 0.161432 (* 1 = 0.161432 loss)\nI0820 01:26:37.149652 22726 sgd_solver.cpp:166] Iteration 57200, lr = 1.43\nI0820 01:28:55.262331 22726 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0820 01:30:20.421054 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0820 01:30:20.421418 22726 solver.cpp:404]     Test net output #1: loss = 0.405242 (* 1 = 0.405242 loss)\nI0820 01:30:21.754125 22726 solver.cpp:228] Iteration 57300, loss = 0.13339\nI0820 01:30:21.754168 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:30:21.754182 22726 solver.cpp:244]     Train net output #1: loss = 0.13339 (* 1 = 0.13339 loss)\nI0820 01:30:21.835814 22726 sgd_solver.cpp:166] Iteration 57300, lr = 1.4325\nI0820 01:32:40.004021 22726 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0820 01:34:05.172925 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8842\nI0820 01:34:05.173310 22726 solver.cpp:404]     Test net output #1: loss = 0.407338 (* 1 = 0.407338 loss)\nI0820 01:34:06.505540 22726 solver.cpp:228] Iteration 57400, loss = 0.23025\nI0820 01:34:06.505581 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:34:06.505597 22726 solver.cpp:244]     Train net output #1: loss = 0.23025 (* 1 = 0.23025 loss)\nI0820 01:34:06.582432 22726 sgd_solver.cpp:166] Iteration 57400, lr = 1.435\nI0820 01:36:24.693143 22726 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0820 01:37:49.856120 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88052\nI0820 01:37:49.856473 22726 solver.cpp:404]     Test net output #1: loss = 0.404377 (* 1 = 0.404377 loss)\nI0820 01:37:51.189327 22726 solver.cpp:228] Iteration 57500, loss = 0.0806574\nI0820 01:37:51.189364 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 01:37:51.189379 22726 solver.cpp:244]     Train net output #1: loss = 0.0806573 (* 1 = 0.0806573 loss)\nI0820 01:37:51.272186 22726 sgd_solver.cpp:166] Iteration 57500, lr = 1.4375\nI0820 01:40:09.314803 22726 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0820 01:41:34.470201 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8758\nI0820 01:41:34.470587 22726 solver.cpp:404]     Test net output #1: loss = 0.428466 (* 1 = 0.428466 loss)\nI0820 01:41:35.803218 22726 solver.cpp:228] Iteration 57600, loss = 0.193506\nI0820 01:41:35.803258 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:41:35.803274 22726 solver.cpp:244]     Train net output #1: loss = 0.193506 (* 1 = 0.193506 loss)\nI0820 01:41:35.884826 22726 sgd_solver.cpp:166] Iteration 57600, lr = 1.44\nI0820 01:43:53.990721 22726 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0820 01:45:19.154448 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88556\nI0820 01:45:19.154806 22726 solver.cpp:404]     Test net output #1: loss = 0.391322 (* 1 = 0.391322 loss)\nI0820 01:45:20.487609 22726 solver.cpp:228] Iteration 57700, loss = 0.160041\nI0820 01:45:20.487649 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:45:20.487664 22726 solver.cpp:244]     Train net output #1: loss = 0.160041 (* 1 = 0.160041 loss)\nI0820 01:45:20.565532 22726 sgd_solver.cpp:166] Iteration 57700, lr = 1.4425\nI0820 01:47:38.623123 22726 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0820 01:49:03.782985 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8772\nI0820 01:49:03.783365 22726 solver.cpp:404]     Test net output #1: loss = 0.426862 (* 1 = 0.426862 loss)\nI0820 01:49:05.116055 22726 solver.cpp:228] Iteration 57800, loss = 0.152023\nI0820 01:49:05.116093 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:49:05.116108 22726 solver.cpp:244]     Train net output #1: loss = 0.152023 (* 1 = 0.152023 loss)\nI0820 01:49:05.189872 22726 sgd_solver.cpp:166] Iteration 57800, lr = 1.445\nI0820 01:51:23.179656 22726 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0820 01:52:48.324816 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8846\nI0820 01:52:48.325206 22726 solver.cpp:404]     Test net output #1: loss = 0.3917 (* 1 = 0.3917 loss)\nI0820 01:52:49.657979 22726 solver.cpp:228] Iteration 57900, loss = 0.0801318\nI0820 01:52:49.658025 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 01:52:49.658041 22726 solver.cpp:244]     Train net output #1: loss = 0.0801317 (* 1 = 0.0801317 loss)\nI0820 01:52:49.739378 22726 sgd_solver.cpp:166] Iteration 57900, lr = 1.4475\nI0820 01:55:07.844988 22726 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0820 01:56:32.994050 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88116\nI0820 01:56:32.994407 22726 solver.cpp:404]     Test net output #1: loss = 0.418345 (* 1 = 0.418345 loss)\nI0820 01:56:34.326817 22726 solver.cpp:228] Iteration 58000, loss = 0.176963\nI0820 01:56:34.326858 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:56:34.326874 22726 solver.cpp:244]     Train net output #1: loss = 0.176963 (* 1 = 0.176963 loss)\nI0820 01:56:34.408998 22726 sgd_solver.cpp:166] Iteration 58000, lr = 1.45\nI0820 01:58:52.485013 22726 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0820 02:00:17.635596 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8766\nI0820 02:00:17.635964 22726 solver.cpp:404]     Test net output #1: loss = 0.425634 (* 1 = 0.425634 loss)\nI0820 02:00:18.968744 22726 solver.cpp:228] Iteration 58100, loss = 0.140779\nI0820 02:00:18.968783 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:00:18.968797 22726 solver.cpp:244]     Train net output #1: loss = 0.140779 (* 1 = 0.140779 loss)\nI0820 02:00:19.055297 22726 sgd_solver.cpp:166] Iteration 58100, lr = 1.4525\nI0820 02:02:37.347390 22726 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0820 02:04:02.513032 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88692\nI0820 02:04:02.513383 22726 solver.cpp:404]     Test net output #1: loss = 0.391871 (* 1 = 0.391871 loss)\nI0820 02:04:03.846339 22726 solver.cpp:228] Iteration 58200, loss = 0.138006\nI0820 02:04:03.846381 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:04:03.846396 22726 solver.cpp:244]     Train net output #1: loss = 0.138006 (* 1 = 0.138006 loss)\nI0820 02:04:03.925827 22726 sgd_solver.cpp:166] Iteration 58200, lr = 1.455\nI0820 02:06:21.983026 22726 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0820 02:07:47.143400 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88764\nI0820 02:07:47.143754 22726 solver.cpp:404]     Test net output #1: loss = 0.386905 (* 1 = 0.386905 loss)\nI0820 02:07:48.476069 22726 solver.cpp:228] Iteration 58300, loss = 0.169811\nI0820 02:07:48.476111 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 02:07:48.476126 22726 solver.cpp:244]     Train net output #1: loss = 0.169811 (* 1 = 0.169811 loss)\nI0820 02:07:48.555510 22726 sgd_solver.cpp:166] Iteration 58300, lr = 1.4575\nI0820 02:10:06.559825 22726 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0820 02:11:31.717880 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88456\nI0820 02:11:31.718273 22726 solver.cpp:404]     Test net output #1: loss = 0.380616 (* 1 = 0.380616 loss)\nI0820 02:11:33.050768 22726 solver.cpp:228] Iteration 58400, loss = 0.137581\nI0820 02:11:33.050807 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:11:33.050823 22726 solver.cpp:244]     Train net output #1: loss = 0.137581 (* 1 = 0.137581 loss)\nI0820 02:11:33.129827 22726 sgd_solver.cpp:166] Iteration 58400, lr = 1.46\nI0820 02:13:51.134392 22726 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0820 02:15:16.290145 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8766\nI0820 02:15:16.290503 22726 solver.cpp:404]     Test net output #1: loss = 0.404754 (* 1 = 0.404754 loss)\nI0820 02:15:17.622860 22726 solver.cpp:228] Iteration 58500, loss = 0.138006\nI0820 02:15:17.622902 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 02:15:17.622918 22726 solver.cpp:244]     Train net output #1: loss = 0.138005 (* 1 = 0.138005 loss)\nI0820 02:15:17.704116 22726 sgd_solver.cpp:166] Iteration 58500, lr = 1.4625\nI0820 02:17:35.717067 22726 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0820 02:19:00.866569 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88572\nI0820 02:19:00.866940 22726 solver.cpp:404]     Test net output #1: loss = 0.406006 (* 1 = 0.406006 loss)\nI0820 02:19:02.199038 22726 solver.cpp:228] Iteration 58600, loss = 0.116335\nI0820 02:19:02.199080 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 02:19:02.199095 22726 solver.cpp:244]     Train net output #1: loss = 0.116335 (* 1 = 0.116335 loss)\nI0820 02:19:02.283836 22726 sgd_solver.cpp:166] Iteration 58600, lr = 1.465\nI0820 02:21:20.514932 22726 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0820 02:22:45.658890 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88536\nI0820 02:22:45.659241 22726 solver.cpp:404]     Test net output #1: loss = 0.389082 (* 1 = 0.389082 loss)\nI0820 02:22:46.991183 22726 solver.cpp:228] Iteration 58700, loss = 0.125212\nI0820 02:22:46.991221 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 02:22:46.991237 22726 solver.cpp:244]     Train net output #1: loss = 0.125212 (* 1 = 0.125212 loss)\nI0820 02:22:47.073644 22726 sgd_solver.cpp:166] Iteration 58700, lr = 1.4675\nI0820 02:25:05.039978 22726 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0820 02:26:30.196658 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8856\nI0820 02:26:30.197024 22726 solver.cpp:404]     Test net output #1: loss = 0.399004 (* 1 = 0.399004 loss)\nI0820 02:26:31.530427 22726 solver.cpp:228] Iteration 58800, loss = 0.210868\nI0820 02:26:31.530469 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 02:26:31.530485 22726 solver.cpp:244]     Train net output #1: loss = 0.210868 (* 1 = 0.210868 loss)\nI0820 02:26:31.612395 22726 sgd_solver.cpp:166] Iteration 58800, lr = 1.47\nI0820 02:28:49.520449 22726 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0820 02:30:14.689785 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87272\nI0820 02:30:14.690129 22726 solver.cpp:404]     Test net output #1: loss = 0.437269 (* 1 = 0.437269 loss)\nI0820 02:30:16.023102 22726 solver.cpp:228] Iteration 58900, loss = 0.138162\nI0820 02:30:16.023144 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 02:30:16.023159 22726 solver.cpp:244]     Train net output #1: loss = 0.138161 (* 1 = 0.138161 loss)\nI0820 02:30:16.102190 22726 sgd_solver.cpp:166] Iteration 58900, lr = 1.4725\nI0820 02:32:34.032699 22726 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0820 02:33:59.193333 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0820 02:33:59.193713 22726 solver.cpp:404]     Test net output #1: loss = 0.42072 (* 1 = 0.42072 loss)\nI0820 02:34:00.527487 22726 solver.cpp:228] Iteration 59000, loss = 0.109644\nI0820 02:34:00.527530 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 02:34:00.527546 22726 solver.cpp:244]     Train net output #1: loss = 0.109644 (* 1 = 0.109644 loss)\nI0820 02:34:00.606431 22726 sgd_solver.cpp:166] Iteration 59000, lr = 1.475\nI0820 02:36:18.606854 22726 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0820 02:37:43.765103 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8874\nI0820 02:37:43.765467 22726 solver.cpp:404]     Test net output #1: loss = 0.395035 (* 1 = 0.395035 loss)\nI0820 02:37:45.097811 22726 solver.cpp:228] Iteration 59100, loss = 0.0725888\nI0820 02:37:45.097852 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 02:37:45.097868 22726 solver.cpp:244]     Train net output #1: loss = 0.0725886 (* 1 = 0.0725886 loss)\nI0820 02:37:45.178289 22726 sgd_solver.cpp:166] Iteration 59100, lr = 1.4775\nI0820 02:40:03.273576 22726 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0820 02:41:28.438509 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88372\nI0820 02:41:28.438889 22726 solver.cpp:404]     Test net output #1: loss = 0.396527 (* 1 = 0.396527 loss)\nI0820 02:41:29.771425 22726 solver.cpp:228] Iteration 59200, loss = 0.19297\nI0820 02:41:29.771467 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 02:41:29.771482 22726 solver.cpp:244]     Train net output #1: loss = 0.19297 (* 1 = 0.19297 loss)\nI0820 02:41:29.849756 22726 sgd_solver.cpp:166] Iteration 59200, lr = 1.48\nI0820 02:43:47.906123 22726 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0820 02:45:13.060501 22726 solver.cpp:404]     Test net output #0: accuracy = 0.871921\nI0820 02:45:13.060861 22726 solver.cpp:404]     Test net output #1: loss = 0.434196 (* 1 = 0.434196 loss)\nI0820 02:45:14.393659 22726 solver.cpp:228] Iteration 59300, loss = 0.22989\nI0820 02:45:14.393699 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:45:14.393714 22726 solver.cpp:244]     Train net output #1: loss = 0.229889 (* 1 = 0.229889 loss)\nI0820 02:45:14.478476 22726 sgd_solver.cpp:166] Iteration 59300, lr = 1.4825\nI0820 02:47:32.529681 22726 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0820 02:48:57.687347 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88296\nI0820 02:48:57.687700 22726 solver.cpp:404]     Test net output #1: loss = 0.413666 (* 1 = 0.413666 loss)\nI0820 02:48:59.020720 22726 solver.cpp:228] Iteration 59400, loss = 0.0367935\nI0820 02:48:59.020762 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0820 02:48:59.020777 22726 solver.cpp:244]     Train net output #1: loss = 0.0367933 (* 1 = 0.0367933 loss)\nI0820 02:48:59.102706 22726 sgd_solver.cpp:166] Iteration 59400, lr = 1.485\nI0820 02:51:17.224998 22726 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0820 02:52:42.375972 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8858\nI0820 02:52:42.376348 22726 solver.cpp:404]     Test net output #1: loss = 0.398849 (* 1 = 0.398849 loss)\nI0820 02:52:43.709484 22726 solver.cpp:228] Iteration 59500, loss = 0.10581\nI0820 02:52:43.709525 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 02:52:43.709542 22726 solver.cpp:244]     Train net output #1: loss = 0.105809 (* 1 = 0.105809 loss)\nI0820 02:52:43.786854 22726 sgd_solver.cpp:166] Iteration 59500, lr = 1.4875\nI0820 02:55:01.903529 22726 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0820 02:56:27.062863 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88832\nI0820 02:56:27.063226 22726 solver.cpp:404]     Test net output #1: loss = 0.386465 (* 1 = 0.386465 loss)\nI0820 02:56:28.396224 22726 solver.cpp:228] Iteration 59600, loss = 0.148858\nI0820 02:56:28.396265 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:56:28.396280 22726 solver.cpp:244]     Train net output #1: loss = 0.148858 (* 1 = 0.148858 loss)\nI0820 02:56:28.478121 22726 sgd_solver.cpp:166] Iteration 59600, lr = 1.49\nI0820 02:58:46.664554 22726 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0820 03:00:11.821959 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87676\nI0820 03:00:11.822310 22726 solver.cpp:404]     Test net output #1: loss = 0.402279 (* 1 = 0.402279 loss)\nI0820 03:00:13.154952 22726 solver.cpp:228] Iteration 59700, loss = 0.116229\nI0820 03:00:13.154994 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:00:13.155009 22726 solver.cpp:244]     Train net output #1: loss = 0.116229 (* 1 = 0.116229 loss)\nI0820 03:00:13.236588 22726 sgd_solver.cpp:166] Iteration 59700, lr = 1.4925\nI0820 03:02:31.377920 22726 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0820 03:03:56.539620 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88616\nI0820 03:03:56.540024 22726 solver.cpp:404]     Test net output #1: loss = 0.39667 (* 1 = 0.39667 loss)\nI0820 03:03:57.872508 22726 solver.cpp:228] Iteration 59800, loss = 0.0637384\nI0820 03:03:57.872547 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 03:03:57.872563 22726 solver.cpp:244]     Train net output #1: loss = 0.0637381 (* 1 = 0.0637381 loss)\nI0820 03:03:57.951575 22726 sgd_solver.cpp:166] Iteration 59800, lr = 1.495\nI0820 03:06:16.166294 22726 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0820 03:07:41.338675 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88264\nI0820 03:07:41.339072 22726 solver.cpp:404]     Test net output #1: loss = 0.400645 (* 1 = 0.400645 loss)\nI0820 03:07:42.672068 22726 solver.cpp:228] Iteration 59900, loss = 0.0938828\nI0820 03:07:42.672106 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 03:07:42.672122 22726 solver.cpp:244]     Train net output #1: loss = 0.0938826 (* 1 = 0.0938826 loss)\nI0820 03:07:42.758509 22726 sgd_solver.cpp:166] Iteration 59900, lr = 1.4975\nI0820 03:10:00.886862 22726 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0820 03:11:26.066251 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0820 03:11:26.066597 22726 solver.cpp:404]     Test net output #1: loss = 0.394165 (* 1 = 0.394165 loss)\nI0820 03:11:27.399399 22726 solver.cpp:228] Iteration 60000, loss = 0.1337\nI0820 03:11:27.399438 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:11:27.399454 22726 solver.cpp:244]     Train net output #1: loss = 0.133699 (* 1 = 0.133699 loss)\nI0820 03:11:27.478382 22726 sgd_solver.cpp:166] Iteration 60000, lr = 1.5\nI0820 03:13:45.744779 22726 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0820 03:15:10.898082 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88596\nI0820 03:15:10.898427 22726 solver.cpp:404]     Test net output #1: loss = 0.402531 (* 1 = 0.402531 loss)\nI0820 03:15:12.231014 22726 solver.cpp:228] Iteration 60100, loss = 0.13332\nI0820 03:15:12.231061 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:15:12.231077 22726 solver.cpp:244]     Train net output #1: loss = 0.13332 (* 1 = 0.13332 loss)\nI0820 03:15:12.315474 22726 sgd_solver.cpp:166] Iteration 60100, lr = 1.5025\nI0820 03:17:30.481981 22726 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0820 03:18:55.642320 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0820 03:18:55.642662 22726 solver.cpp:404]     Test net output #1: loss = 0.399655 (* 1 = 0.399655 loss)\nI0820 03:18:56.975263 22726 solver.cpp:228] Iteration 60200, loss = 0.0631312\nI0820 03:18:56.975304 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 03:18:56.975319 22726 solver.cpp:244]     Train net output #1: loss = 0.063131 (* 1 = 0.063131 loss)\nI0820 03:18:57.058598 22726 sgd_solver.cpp:166] Iteration 60200, lr = 1.505\nI0820 03:21:15.176882 22726 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0820 03:22:40.319994 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8788\nI0820 03:22:40.320371 22726 solver.cpp:404]     Test net output #1: loss = 0.417477 (* 1 = 0.417477 loss)\nI0820 03:22:41.652940 22726 solver.cpp:228] Iteration 60300, loss = 0.141535\nI0820 03:22:41.652982 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 03:22:41.652997 22726 solver.cpp:244]     Train net output #1: loss = 0.141534 (* 1 = 0.141534 loss)\nI0820 03:22:41.731518 22726 sgd_solver.cpp:166] Iteration 60300, lr = 1.5075\nI0820 03:24:59.776047 22726 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0820 03:26:24.937944 22726 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0820 03:26:24.938313 22726 solver.cpp:404]     Test net output #1: loss = 0.432127 (* 1 = 0.432127 loss)\nI0820 03:26:26.270627 22726 solver.cpp:228] Iteration 60400, loss = 0.13639\nI0820 03:26:26.270669 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:26:26.270684 22726 solver.cpp:244]     Train net output #1: loss = 0.13639 (* 1 = 0.13639 loss)\nI0820 03:26:26.347101 22726 sgd_solver.cpp:166] Iteration 60400, lr = 1.51\nI0820 03:28:44.290637 22726 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0820 03:30:09.467911 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87868\nI0820 03:30:09.468260 22726 solver.cpp:404]     Test net output #1: loss = 0.426144 (* 1 = 0.426144 loss)\nI0820 03:30:10.800688 22726 solver.cpp:228] Iteration 60500, loss = 0.164964\nI0820 03:30:10.800729 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 03:30:10.800745 22726 solver.cpp:244]     Train net output #1: loss = 0.164963 (* 1 = 0.164963 loss)\nI0820 03:30:10.882297 22726 sgd_solver.cpp:166] Iteration 60500, lr = 1.5125\nI0820 03:32:28.857517 22726 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0820 03:33:54.035127 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88088\nI0820 03:33:54.035519 22726 solver.cpp:404]     Test net output #1: loss = 0.40723 (* 1 = 0.40723 loss)\nI0820 03:33:55.367810 22726 solver.cpp:228] Iteration 60600, loss = 0.176044\nI0820 03:33:55.367853 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 03:33:55.367869 22726 solver.cpp:244]     Train net output #1: loss = 0.176043 (* 1 = 0.176043 loss)\nI0820 03:33:55.449483 22726 sgd_solver.cpp:166] Iteration 60600, lr = 1.515\nI0820 03:36:13.548254 22726 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0820 03:37:38.721864 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0820 03:37:38.722263 22726 solver.cpp:404]     Test net output #1: loss = 0.398261 (* 1 = 0.398261 loss)\nI0820 03:37:40.054975 22726 solver.cpp:228] Iteration 60700, loss = 0.12888\nI0820 03:37:40.055022 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 03:37:40.055037 22726 solver.cpp:244]     Train net output #1: loss = 0.12888 (* 1 = 0.12888 loss)\nI0820 03:37:40.131762 22726 sgd_solver.cpp:166] Iteration 60700, lr = 1.5175\nI0820 03:39:58.114142 22726 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0820 03:41:23.269593 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8884\nI0820 03:41:23.269960 22726 solver.cpp:404]     Test net output #1: loss = 0.396044 (* 1 = 0.396044 loss)\nI0820 03:41:24.602964 22726 solver.cpp:228] Iteration 60800, loss = 0.13525\nI0820 03:41:24.603004 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 03:41:24.603024 22726 solver.cpp:244]     Train net output #1: loss = 0.13525 (* 1 = 0.13525 loss)\nI0820 03:41:24.682914 22726 sgd_solver.cpp:166] Iteration 60800, lr = 1.52\nI0820 03:43:42.704174 22726 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0820 03:45:07.863301 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88444\nI0820 03:45:07.863656 22726 solver.cpp:404]     Test net output #1: loss = 0.391748 (* 1 = 0.391748 loss)\nI0820 03:45:09.196235 22726 solver.cpp:228] Iteration 60900, loss = 0.128603\nI0820 03:45:09.196277 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:45:09.196293 22726 solver.cpp:244]     Train net output #1: loss = 0.128603 (* 1 = 0.128603 loss)\nI0820 03:45:09.276861 22726 sgd_solver.cpp:166] Iteration 60900, lr = 1.5225\nI0820 03:47:27.286680 22726 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0820 03:48:52.441184 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87572\nI0820 03:48:52.441567 22726 solver.cpp:404]     Test net output #1: loss = 0.437781 (* 1 = 0.437781 loss)\nI0820 03:48:53.773840 22726 solver.cpp:228] Iteration 61000, loss = 0.0966251\nI0820 03:48:53.773882 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:48:53.773897 22726 solver.cpp:244]     Train net output #1: loss = 0.0966249 (* 1 = 0.0966249 loss)\nI0820 03:48:53.854050 22726 sgd_solver.cpp:166] Iteration 61000, lr = 1.525\nI0820 03:51:11.898658 22726 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0820 03:52:37.044821 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87512\nI0820 03:52:37.045207 22726 solver.cpp:404]     Test net output #1: loss = 0.420797 (* 1 = 0.420797 loss)\nI0820 03:52:38.377898 22726 solver.cpp:228] Iteration 61100, loss = 0.120016\nI0820 03:52:38.377939 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:52:38.377955 22726 solver.cpp:244]     Train net output #1: loss = 0.120016 (* 1 = 0.120016 loss)\nI0820 03:52:38.459136 22726 sgd_solver.cpp:166] Iteration 61100, lr = 1.5275\nI0820 03:54:56.427196 22726 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0820 03:56:21.633625 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87596\nI0820 03:56:21.634021 22726 solver.cpp:404]     Test net output #1: loss = 0.419878 (* 1 = 0.419878 loss)\nI0820 03:56:22.966497 22726 solver.cpp:228] Iteration 61200, loss = 0.142731\nI0820 03:56:22.966542 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 03:56:22.966557 22726 solver.cpp:244]     Train net output #1: loss = 0.142731 (* 1 = 0.142731 loss)\nI0820 03:56:23.056573 22726 sgd_solver.cpp:166] Iteration 61200, lr = 1.53\nI0820 03:58:41.008723 22726 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0820 04:00:06.163970 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0820 04:00:06.164353 22726 solver.cpp:404]     Test net output #1: loss = 0.406493 (* 1 = 0.406493 loss)\nI0820 04:00:07.497972 22726 solver.cpp:228] Iteration 61300, loss = 0.228537\nI0820 04:00:07.498020 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 04:00:07.498034 22726 solver.cpp:244]     Train net output #1: loss = 0.228537 (* 1 = 0.228537 loss)\nI0820 04:00:07.577292 22726 sgd_solver.cpp:166] Iteration 61300, lr = 1.5325\nI0820 04:02:25.590622 22726 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0820 04:03:50.742385 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8886\nI0820 04:03:50.742929 22726 solver.cpp:404]     Test net output #1: loss = 0.394783 (* 1 = 0.394783 loss)\nI0820 04:03:52.076628 22726 solver.cpp:228] Iteration 61400, loss = 0.0761213\nI0820 04:03:52.076670 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 04:03:52.076686 22726 solver.cpp:244]     Train net output #1: loss = 0.0761211 (* 1 = 0.0761211 loss)\nI0820 04:03:52.152261 22726 sgd_solver.cpp:166] Iteration 61400, lr = 1.535\nI0820 04:06:10.162024 22726 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0820 04:07:35.326133 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87628\nI0820 04:07:35.326516 22726 solver.cpp:404]     Test net output #1: loss = 0.415467 (* 1 = 0.415467 loss)\nI0820 04:07:36.659884 22726 solver.cpp:228] Iteration 61500, loss = 0.234714\nI0820 04:07:36.659927 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 04:07:36.659942 22726 solver.cpp:244]     Train net output #1: loss = 0.234714 (* 1 = 0.234714 loss)\nI0820 04:07:36.742605 22726 sgd_solver.cpp:166] Iteration 61500, lr = 1.5375\nI0820 04:09:54.763947 22726 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0820 04:11:19.939563 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88552\nI0820 04:11:19.939939 22726 solver.cpp:404]     Test net output #1: loss = 0.387178 (* 1 = 0.387178 loss)\nI0820 04:11:21.273928 22726 solver.cpp:228] Iteration 61600, loss = 0.0903065\nI0820 04:11:21.273972 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 04:11:21.273991 22726 solver.cpp:244]     Train net output #1: loss = 0.0903062 (* 1 = 0.0903062 loss)\nI0820 04:11:21.354589 22726 sgd_solver.cpp:166] Iteration 61600, lr = 1.54\nI0820 04:13:39.538854 22726 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0820 04:15:04.702901 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88364\nI0820 04:15:04.703290 22726 solver.cpp:404]     Test net output #1: loss = 0.409348 (* 1 = 0.409348 loss)\nI0820 04:15:06.041566 22726 solver.cpp:228] Iteration 61700, loss = 0.0599699\nI0820 04:15:06.041610 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 04:15:06.041635 22726 solver.cpp:244]     Train net output #1: loss = 0.0599696 (* 1 = 0.0599696 loss)\nI0820 04:15:06.115365 22726 sgd_solver.cpp:166] Iteration 61700, lr = 1.5425\nI0820 04:17:24.117496 22726 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0820 04:18:49.318608 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87848\nI0820 04:18:49.319000 22726 solver.cpp:404]     Test net output #1: loss = 0.412733 (* 1 = 0.412733 loss)\nI0820 04:18:50.651935 22726 solver.cpp:228] Iteration 61800, loss = 0.193261\nI0820 04:18:50.651978 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 04:18:50.651994 22726 solver.cpp:244]     Train net output #1: loss = 0.193261 (* 1 = 0.193261 loss)\nI0820 04:18:50.732553 22726 sgd_solver.cpp:166] Iteration 61800, lr = 1.545\nI0820 04:21:08.661928 22726 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0820 04:22:33.849246 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87892\nI0820 04:22:33.849627 22726 solver.cpp:404]     Test net output #1: loss = 0.408584 (* 1 = 0.408584 loss)\nI0820 04:22:35.183991 22726 solver.cpp:228] Iteration 61900, loss = 0.203259\nI0820 04:22:35.184033 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 04:22:35.184056 22726 solver.cpp:244]     Train net output #1: loss = 0.203259 (* 1 = 0.203259 loss)\nI0820 04:22:35.263749 22726 sgd_solver.cpp:166] Iteration 61900, lr = 1.5475\nI0820 04:24:53.123004 22726 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0820 04:26:18.300070 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87796\nI0820 04:26:18.300439 22726 solver.cpp:404]     Test net output #1: loss = 0.426934 (* 1 = 0.426934 loss)\nI0820 04:26:19.633167 22726 solver.cpp:228] Iteration 62000, loss = 0.1338\nI0820 04:26:19.633213 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:26:19.633237 22726 solver.cpp:244]     Train net output #1: loss = 0.1338 (* 1 = 0.1338 loss)\nI0820 04:26:19.713553 22726 sgd_solver.cpp:166] Iteration 62000, lr = 1.55\nI0820 04:28:37.524843 22726 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0820 04:30:02.784996 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88672\nI0820 04:30:02.785359 22726 solver.cpp:404]     Test net output #1: loss = 0.390586 (* 1 = 0.390586 loss)\nI0820 04:30:04.118343 22726 solver.cpp:228] Iteration 62100, loss = 0.165376\nI0820 04:30:04.118391 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 04:30:04.118414 22726 solver.cpp:244]     Train net output #1: loss = 0.165376 (* 1 = 0.165376 loss)\nI0820 04:30:04.197000 22726 sgd_solver.cpp:166] Iteration 62100, lr = 1.5525\nI0820 04:32:22.052711 22726 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0820 04:33:47.318702 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8796\nI0820 04:33:47.319075 22726 solver.cpp:404]     Test net output #1: loss = 0.412631 (* 1 = 0.412631 loss)\nI0820 04:33:48.653524 22726 solver.cpp:228] Iteration 62200, loss = 0.183733\nI0820 04:33:48.653570 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 04:33:48.653594 22726 solver.cpp:244]     Train net output #1: loss = 0.183733 (* 1 = 0.183733 loss)\nI0820 04:33:48.733916 22726 sgd_solver.cpp:166] Iteration 62200, lr = 1.555\nI0820 04:36:06.588551 22726 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0820 04:37:31.858028 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88292\nI0820 04:37:31.858422 22726 solver.cpp:404]     Test net output #1: loss = 0.404147 (* 1 = 0.404147 loss)\nI0820 04:37:33.191009 22726 solver.cpp:228] Iteration 62300, loss = 0.173183\nI0820 04:37:33.191064 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 04:37:33.191089 22726 solver.cpp:244]     Train net output #1: loss = 0.173183 (* 1 = 0.173183 loss)\nI0820 04:37:33.269871 22726 sgd_solver.cpp:166] Iteration 62300, lr = 1.5575\nI0820 04:39:51.097128 22726 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0820 04:41:16.384189 22726 solver.cpp:404]     Test net output #0: accuracy = 0.881041\nI0820 04:41:16.384572 22726 solver.cpp:404]     Test net output #1: loss = 0.406968 (* 1 = 0.406968 loss)\nI0820 04:41:17.718972 22726 solver.cpp:228] Iteration 62400, loss = 0.1319\nI0820 04:41:17.719018 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 04:41:17.719040 22726 solver.cpp:244]     Train net output #1: loss = 0.131899 (* 1 = 0.131899 loss)\nI0820 04:41:17.799274 22726 sgd_solver.cpp:166] Iteration 62400, lr = 1.56\nI0820 04:43:35.605612 22726 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0820 04:45:00.857882 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88716\nI0820 04:45:00.858285 22726 solver.cpp:404]     Test net output #1: loss = 0.381664 (* 1 = 0.381664 loss)\nI0820 04:45:02.190680 22726 solver.cpp:228] Iteration 62500, loss = 0.0613996\nI0820 04:45:02.190717 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 04:45:02.190742 22726 solver.cpp:244]     Train net output #1: loss = 0.0613993 (* 1 = 0.0613993 loss)\nI0820 04:45:02.269320 22726 sgd_solver.cpp:166] Iteration 62500, lr = 1.5625\nI0820 04:47:20.095067 22726 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0820 04:48:45.395287 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87844\nI0820 04:48:45.395674 22726 solver.cpp:404]     Test net output #1: loss = 0.427667 (* 1 = 0.427667 loss)\nI0820 04:48:46.728121 22726 solver.cpp:228] Iteration 62600, loss = 0.159749\nI0820 04:48:46.728162 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:48:46.728184 22726 solver.cpp:244]     Train net output #1: loss = 0.159748 (* 1 = 0.159748 loss)\nI0820 04:48:46.807621 22726 sgd_solver.cpp:166] Iteration 62600, lr = 1.565\nI0820 04:51:04.664527 22726 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0820 04:52:29.922837 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0820 04:52:29.923220 22726 solver.cpp:404]     Test net output #1: loss = 0.402875 (* 1 = 0.402875 loss)\nI0820 04:52:31.255646 22726 solver.cpp:228] Iteration 62700, loss = 0.151584\nI0820 04:52:31.255702 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 04:52:31.255725 22726 solver.cpp:244]     Train net output #1: loss = 0.151584 (* 1 = 0.151584 loss)\nI0820 04:52:31.333374 22726 sgd_solver.cpp:166] Iteration 62700, lr = 1.5675\nI0820 04:54:49.154880 22726 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0820 04:56:14.326081 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0820 04:56:14.326483 22726 solver.cpp:404]     Test net output #1: loss = 0.409366 (* 1 = 0.409366 loss)\nI0820 04:56:15.657701 22726 solver.cpp:228] Iteration 62800, loss = 0.181961\nI0820 04:56:15.657758 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:56:15.657783 22726 solver.cpp:244]     Train net output #1: loss = 0.181961 (* 1 = 0.181961 loss)\nI0820 04:56:15.737934 22726 sgd_solver.cpp:166] Iteration 62800, lr = 1.57\nI0820 04:58:33.585772 22726 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0820 04:59:58.793105 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88208\nI0820 04:59:58.793504 22726 solver.cpp:404]     Test net output #1: loss = 0.40094 (* 1 = 0.40094 loss)\nI0820 05:00:00.123435 22726 solver.cpp:228] Iteration 62900, loss = 0.132862\nI0820 05:00:00.123489 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 05:00:00.123514 22726 solver.cpp:244]     Train net output #1: loss = 0.132861 (* 1 = 0.132861 loss)\nI0820 05:00:00.202306 22726 sgd_solver.cpp:166] Iteration 62900, lr = 1.5725\nI0820 05:02:18.017191 22726 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0820 05:03:43.230201 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87088\nI0820 05:03:43.230607 22726 solver.cpp:404]     Test net output #1: loss = 0.45242 (* 1 = 0.45242 loss)\nI0820 05:03:44.560967 22726 solver.cpp:228] Iteration 63000, loss = 0.155582\nI0820 05:03:44.561023 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 05:03:44.561048 22726 solver.cpp:244]     Train net output #1: loss = 0.155582 (* 1 = 0.155582 loss)\nI0820 05:03:44.643579 22726 sgd_solver.cpp:166] Iteration 63000, lr = 1.575\nI0820 05:06:02.523557 22726 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0820 05:07:27.729682 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87976\nI0820 05:07:27.730047 22726 solver.cpp:404]     Test net output #1: loss = 0.417509 (* 1 = 0.417509 loss)\nI0820 05:07:29.059741 22726 solver.cpp:228] Iteration 63100, loss = 0.141484\nI0820 05:07:29.059797 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 05:07:29.059819 22726 solver.cpp:244]     Train net output #1: loss = 0.141484 (* 1 = 0.141484 loss)\nI0820 05:07:29.141091 22726 sgd_solver.cpp:166] Iteration 63100, lr = 1.5775\nI0820 05:09:46.791815 22726 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0820 05:11:12.053961 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87796\nI0820 05:11:12.054337 22726 solver.cpp:404]     Test net output #1: loss = 0.428545 (* 1 = 0.428545 loss)\nI0820 05:11:13.384816 22726 solver.cpp:228] Iteration 63200, loss = 0.125459\nI0820 05:11:13.384866 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:11:13.384891 22726 solver.cpp:244]     Train net output #1: loss = 0.125459 (* 1 = 0.125459 loss)\nI0820 05:11:13.466110 22726 sgd_solver.cpp:166] Iteration 63200, lr = 1.58\nI0820 05:13:31.159294 22726 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0820 05:14:56.380903 22726 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0820 05:14:56.381295 22726 solver.cpp:404]     Test net output #1: loss = 0.42414 (* 1 = 0.42414 loss)\nI0820 05:14:57.711300 22726 solver.cpp:228] Iteration 63300, loss = 0.192408\nI0820 05:14:57.711350 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 05:14:57.711376 22726 solver.cpp:244]     Train net output #1: loss = 0.192408 (* 1 = 0.192408 loss)\nI0820 05:14:57.790307 22726 sgd_solver.cpp:166] Iteration 63300, lr = 1.5825\nI0820 05:17:15.485517 22726 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0820 05:18:40.751394 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87904\nI0820 05:18:40.751793 22726 solver.cpp:404]     Test net output #1: loss = 0.42716 (* 1 = 0.42716 loss)\nI0820 05:18:42.082968 22726 solver.cpp:228] Iteration 63400, loss = 0.0684592\nI0820 05:18:42.083016 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:18:42.083039 22726 solver.cpp:244]     Train net output #1: loss = 0.068459 (* 1 = 0.068459 loss)\nI0820 05:18:42.163925 22726 sgd_solver.cpp:166] Iteration 63400, lr = 1.585\nI0820 05:20:59.830183 22726 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0820 05:22:25.084954 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8774\nI0820 05:22:25.085335 22726 solver.cpp:404]     Test net output #1: loss = 0.404726 (* 1 = 0.404726 loss)\nI0820 05:22:26.414651 22726 solver.cpp:228] Iteration 63500, loss = 0.122809\nI0820 05:22:26.414700 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 05:22:26.414722 22726 solver.cpp:244]     Train net output #1: loss = 0.122809 (* 1 = 0.122809 loss)\nI0820 05:22:26.493147 22726 sgd_solver.cpp:166] Iteration 63500, lr = 1.5875\nI0820 05:24:44.204813 22726 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0820 05:26:09.519579 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87628\nI0820 05:26:09.519968 22726 solver.cpp:404]     Test net output #1: loss = 0.420014 (* 1 = 0.420014 loss)\nI0820 05:26:10.851236 22726 solver.cpp:228] Iteration 63600, loss = 0.255481\nI0820 05:26:10.851289 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 05:26:10.851312 22726 solver.cpp:244]     Train net output #1: loss = 0.255481 (* 1 = 0.255481 loss)\nI0820 05:26:10.926952 22726 sgd_solver.cpp:166] Iteration 63600, lr = 1.59\nI0820 05:28:28.611040 22726 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0820 05:29:53.899813 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8784\nI0820 05:29:53.900180 22726 solver.cpp:404]     Test net output #1: loss = 0.417407 (* 1 = 0.417407 loss)\nI0820 05:29:55.232194 22726 solver.cpp:228] Iteration 63700, loss = 0.253416\nI0820 05:29:55.232252 22726 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 05:29:55.232276 22726 solver.cpp:244]     Train net output #1: loss = 0.253416 (* 1 = 0.253416 loss)\nI0820 05:29:55.310853 22726 sgd_solver.cpp:166] Iteration 63700, lr = 1.5925\nI0820 05:32:13.075711 22726 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0820 05:33:38.400383 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8832\nI0820 05:33:38.400773 22726 solver.cpp:404]     Test net output #1: loss = 0.424146 (* 1 = 0.424146 loss)\nI0820 05:33:39.732096 22726 solver.cpp:228] Iteration 63800, loss = 0.0929459\nI0820 05:33:39.732146 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 05:33:39.732162 22726 solver.cpp:244]     Train net output #1: loss = 0.0929456 (* 1 = 0.0929456 loss)\nI0820 05:33:39.811252 22726 sgd_solver.cpp:166] Iteration 63800, lr = 1.595\nI0820 05:35:57.590256 22726 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0820 05:37:22.762150 22726 solver.cpp:404]     Test net output #0: accuracy = 0.89036\nI0820 05:37:22.762512 22726 solver.cpp:404]     Test net output #1: loss = 0.377229 (* 1 = 0.377229 loss)\nI0820 05:37:24.093082 22726 solver.cpp:228] Iteration 63900, loss = 0.0711785\nI0820 05:37:24.093132 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:37:24.093149 22726 solver.cpp:244]     Train net output #1: loss = 0.0711782 (* 1 = 0.0711782 loss)\nI0820 05:37:24.179925 22726 sgd_solver.cpp:166] Iteration 63900, lr = 1.5975\nI0820 05:39:41.935101 22726 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0820 05:41:07.103060 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87636\nI0820 05:41:07.103386 22726 solver.cpp:404]     Test net output #1: loss = 0.420412 (* 1 = 0.420412 loss)\nI0820 05:41:08.434054 22726 solver.cpp:228] Iteration 64000, loss = 0.133204\nI0820 05:41:08.434105 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 05:41:08.434121 22726 solver.cpp:244]     Train net output #1: loss = 0.133204 (* 1 = 0.133204 loss)\nI0820 05:41:08.514098 22726 sgd_solver.cpp:166] Iteration 64000, lr = 1.6\nI0820 05:43:26.259279 22726 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0820 05:44:51.430068 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87704\nI0820 05:44:51.430459 22726 solver.cpp:404]     Test net output #1: loss = 0.425465 (* 1 = 0.425465 loss)\nI0820 05:44:52.761036 22726 solver.cpp:228] Iteration 64100, loss = 0.153454\nI0820 05:44:52.761086 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 05:44:52.761102 22726 solver.cpp:244]     Train net output #1: loss = 0.153454 (* 1 = 0.153454 loss)\nI0820 05:44:52.839087 22726 sgd_solver.cpp:166] Iteration 64100, lr = 1.6025\nI0820 05:47:10.673375 22726 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0820 05:48:35.856688 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88388\nI0820 05:48:35.857074 22726 solver.cpp:404]     Test net output #1: loss = 0.390683 (* 1 = 0.390683 loss)\nI0820 05:48:37.188225 22726 solver.cpp:228] Iteration 64200, loss = 0.146805\nI0820 05:48:37.188277 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 05:48:37.188293 22726 solver.cpp:244]     Train net output #1: loss = 0.146805 (* 1 = 0.146805 loss)\nI0820 05:48:37.266007 22726 sgd_solver.cpp:166] Iteration 64200, lr = 1.605\nI0820 05:50:55.094462 22726 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0820 05:52:20.266275 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8792\nI0820 05:52:20.266625 22726 solver.cpp:404]     Test net output #1: loss = 0.395541 (* 1 = 0.395541 loss)\nI0820 05:52:21.597481 22726 solver.cpp:228] Iteration 64300, loss = 0.114525\nI0820 05:52:21.597532 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 05:52:21.597548 22726 solver.cpp:244]     Train net output #1: loss = 0.114524 (* 1 = 0.114524 loss)\nI0820 05:52:21.676666 22726 sgd_solver.cpp:166] Iteration 64300, lr = 1.6075\nI0820 05:54:39.468322 22726 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0820 05:56:04.640415 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88824\nI0820 05:56:04.640816 22726 solver.cpp:404]     Test net output #1: loss = 0.388502 (* 1 = 0.388502 loss)\nI0820 05:56:05.971297 22726 solver.cpp:228] Iteration 64400, loss = 0.234167\nI0820 05:56:05.971348 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 05:56:05.971364 22726 solver.cpp:244]     Train net output #1: loss = 0.234167 (* 1 = 0.234167 loss)\nI0820 05:56:06.052433 22726 sgd_solver.cpp:166] Iteration 64400, lr = 1.61\nI0820 05:58:23.887275 22726 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0820 05:59:49.060870 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88708\nI0820 05:59:49.061239 22726 solver.cpp:404]     Test net output #1: loss = 0.382594 (* 1 = 0.382594 loss)\nI0820 05:59:50.390802 22726 solver.cpp:228] Iteration 64500, loss = 0.0923844\nI0820 05:59:50.390854 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 05:59:50.390872 22726 solver.cpp:244]     Train net output #1: loss = 0.0923841 (* 1 = 0.0923841 loss)\nI0820 05:59:50.468816 22726 sgd_solver.cpp:166] Iteration 64500, lr = 1.6125\nI0820 06:02:08.220481 22726 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0820 06:03:33.398494 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87464\nI0820 06:03:33.398859 22726 solver.cpp:404]     Test net output #1: loss = 0.428333 (* 1 = 0.428333 loss)\nI0820 06:03:34.728873 22726 solver.cpp:228] Iteration 64600, loss = 0.247256\nI0820 06:03:34.728924 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 06:03:34.728940 22726 solver.cpp:244]     Train net output #1: loss = 0.247255 (* 1 = 0.247255 loss)\nI0820 06:03:34.807265 22726 sgd_solver.cpp:166] Iteration 64600, lr = 1.615\nI0820 06:05:52.614117 22726 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0820 06:07:17.787418 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88592\nI0820 06:07:17.787791 22726 solver.cpp:404]     Test net output #1: loss = 0.385586 (* 1 = 0.385586 loss)\nI0820 06:07:19.119303 22726 solver.cpp:228] Iteration 64700, loss = 0.0802152\nI0820 06:07:19.119354 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 06:07:19.119369 22726 solver.cpp:244]     Train net output #1: loss = 0.0802148 (* 1 = 0.0802148 loss)\nI0820 06:07:19.199789 22726 sgd_solver.cpp:166] Iteration 64700, lr = 1.6175\nI0820 06:09:36.995437 22726 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0820 06:11:02.217394 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88656\nI0820 06:11:02.217761 22726 solver.cpp:404]     Test net output #1: loss = 0.375032 (* 1 = 0.375032 loss)\nI0820 06:11:03.549409 22726 solver.cpp:228] Iteration 64800, loss = 0.190994\nI0820 06:11:03.549463 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 06:11:03.549480 22726 solver.cpp:244]     Train net output #1: loss = 0.190994 (* 1 = 0.190994 loss)\nI0820 06:11:03.628926 22726 sgd_solver.cpp:166] Iteration 64800, lr = 1.62\nI0820 06:13:21.350530 22726 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0820 06:14:46.592311 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0820 06:14:46.592701 22726 solver.cpp:404]     Test net output #1: loss = 0.383257 (* 1 = 0.383257 loss)\nI0820 06:14:47.923341 22726 solver.cpp:228] Iteration 64900, loss = 0.111141\nI0820 06:14:47.923393 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 06:14:47.923409 22726 solver.cpp:244]     Train net output #1: loss = 0.111141 (* 1 = 0.111141 loss)\nI0820 06:14:48.007994 22726 sgd_solver.cpp:166] Iteration 64900, lr = 1.6225\nI0820 06:17:05.785172 22726 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0820 06:18:31.045127 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0820 06:18:31.045500 22726 solver.cpp:404]     Test net output #1: loss = 0.404345 (* 1 = 0.404345 loss)\nI0820 06:18:32.376827 22726 solver.cpp:228] Iteration 65000, loss = 0.116409\nI0820 06:18:32.376875 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 06:18:32.376893 22726 solver.cpp:244]     Train net output #1: loss = 0.116408 (* 1 = 0.116408 loss)\nI0820 06:18:32.464107 22726 sgd_solver.cpp:166] Iteration 65000, lr = 1.625\nI0820 06:20:50.245543 22726 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0820 06:22:15.434896 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88136\nI0820 06:22:15.435276 22726 solver.cpp:404]     Test net output #1: loss = 0.415409 (* 1 = 0.415409 loss)\nI0820 06:22:16.765992 22726 solver.cpp:228] Iteration 65100, loss = 0.134615\nI0820 06:22:16.766043 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 06:22:16.766059 22726 solver.cpp:244]     Train net output #1: loss = 0.134615 (* 1 = 0.134615 loss)\nI0820 06:22:16.842548 22726 sgd_solver.cpp:166] Iteration 65100, lr = 1.6275\nI0820 06:24:34.649077 22726 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0820 06:25:59.835182 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0820 06:25:59.835574 22726 solver.cpp:404]     Test net output #1: loss = 0.401332 (* 1 = 0.401332 loss)\nI0820 06:26:01.165333 22726 solver.cpp:228] Iteration 65200, loss = 0.151369\nI0820 06:26:01.165380 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 06:26:01.165396 22726 solver.cpp:244]     Train net output #1: loss = 0.151369 (* 1 = 0.151369 loss)\nI0820 06:26:01.243796 22726 sgd_solver.cpp:166] Iteration 65200, lr = 1.63\nI0820 06:28:18.895864 22726 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0820 06:29:44.122079 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88496\nI0820 06:29:44.122449 22726 solver.cpp:404]     Test net output #1: loss = 0.393083 (* 1 = 0.393083 loss)\nI0820 06:29:45.453315 22726 solver.cpp:228] Iteration 65300, loss = 0.0923935\nI0820 06:29:45.453361 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 06:29:45.453377 22726 solver.cpp:244]     Train net output #1: loss = 0.0923931 (* 1 = 0.0923931 loss)\nI0820 06:29:45.528563 22726 sgd_solver.cpp:166] Iteration 65300, lr = 1.6325\nI0820 06:32:03.351717 22726 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0820 06:33:28.564025 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87788\nI0820 06:33:28.564386 22726 solver.cpp:404]     Test net output #1: loss = 0.409564 (* 1 = 0.409564 loss)\nI0820 06:33:29.896004 22726 solver.cpp:228] Iteration 65400, loss = 0.134018\nI0820 06:33:29.896057 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 06:33:29.896073 22726 solver.cpp:244]     Train net output #1: loss = 0.134017 (* 1 = 0.134017 loss)\nI0820 06:33:29.971123 22726 sgd_solver.cpp:166] Iteration 65400, lr = 1.635\nI0820 06:35:47.906263 22726 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0820 06:37:13.108499 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8804\nI0820 06:37:13.108865 22726 solver.cpp:404]     Test net output #1: loss = 0.413356 (* 1 = 0.413356 loss)\nI0820 06:37:14.440484 22726 solver.cpp:228] Iteration 65500, loss = 0.223986\nI0820 06:37:14.440536 22726 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 06:37:14.440552 22726 solver.cpp:244]     Train net output #1: loss = 0.223986 (* 1 = 0.223986 loss)\nI0820 06:37:14.513988 22726 sgd_solver.cpp:166] Iteration 65500, lr = 1.6375\nI0820 06:39:32.276314 22726 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0820 06:40:57.521688 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0820 06:40:57.522040 22726 solver.cpp:404]     Test net output #1: loss = 0.412083 (* 1 = 0.412083 loss)\nI0820 06:40:58.852943 22726 solver.cpp:228] Iteration 65600, loss = 0.105468\nI0820 06:40:58.853000 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 06:40:58.853026 22726 solver.cpp:244]     Train net output #1: loss = 0.105467 (* 1 = 0.105467 loss)\nI0820 06:40:58.929328 22726 sgd_solver.cpp:166] Iteration 65600, lr = 1.64\nI0820 06:43:16.739189 22726 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0820 06:44:42.028314 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0820 06:44:42.028714 22726 solver.cpp:404]     Test net output #1: loss = 0.414843 (* 1 = 0.414843 loss)\nI0820 06:44:43.359930 22726 solver.cpp:228] Iteration 65700, loss = 0.121999\nI0820 06:44:43.359987 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 06:44:43.360011 22726 solver.cpp:244]     Train net output #1: loss = 0.121998 (* 1 = 0.121998 loss)\nI0820 06:44:43.436810 22726 sgd_solver.cpp:166] Iteration 65700, lr = 1.6425\nI0820 06:47:01.219977 22726 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0820 06:48:26.515615 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88248\nI0820 06:48:26.516031 22726 solver.cpp:404]     Test net output #1: loss = 0.406113 (* 1 = 0.406113 loss)\nI0820 06:48:27.848069 22726 solver.cpp:228] Iteration 65800, loss = 0.0959242\nI0820 06:48:27.848125 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0820 06:48:27.848150 22726 solver.cpp:244]     Train net output #1: loss = 0.0959238 (* 1 = 0.0959238 loss)\nI0820 06:48:27.929467 22726 sgd_solver.cpp:166] Iteration 65800, lr = 1.645\nI0820 06:50:45.741678 22726 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0820 06:52:10.951226 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88328\nI0820 06:52:10.951581 22726 solver.cpp:404]     Test net output #1: loss = 0.403101 (* 1 = 0.403101 loss)\nI0820 06:52:12.282516 22726 solver.cpp:228] Iteration 65900, loss = 0.206521\nI0820 06:52:12.282570 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 06:52:12.282595 22726 solver.cpp:244]     Train net output #1: loss = 0.206521 (* 1 = 0.206521 loss)\nI0820 06:52:12.364261 22726 sgd_solver.cpp:166] Iteration 65900, lr = 1.6475\nI0820 06:54:30.130081 22726 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0820 06:55:55.308034 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87716\nI0820 06:55:55.308444 22726 solver.cpp:404]     Test net output #1: loss = 0.419091 (* 1 = 0.419091 loss)\nI0820 06:55:56.637928 22726 solver.cpp:228] Iteration 66000, loss = 0.112439\nI0820 06:55:56.637984 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 06:55:56.638010 22726 solver.cpp:244]     Train net output #1: loss = 0.112439 (* 1 = 0.112439 loss)\nI0820 06:55:56.714946 22726 sgd_solver.cpp:166] Iteration 66000, lr = 1.65\nI0820 06:58:14.699018 22726 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0820 06:59:39.898793 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87744\nI0820 06:59:39.899168 22726 solver.cpp:404]     Test net output #1: loss = 0.418493 (* 1 = 0.418493 loss)\nI0820 06:59:41.229249 22726 solver.cpp:228] Iteration 66100, loss = 0.107077\nI0820 06:59:41.229307 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 06:59:41.229332 22726 solver.cpp:244]     Train net output #1: loss = 0.107077 (* 1 = 0.107077 loss)\nI0820 06:59:41.308190 22726 sgd_solver.cpp:166] Iteration 66100, lr = 1.6525\nI0820 07:01:59.239166 22726 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0820 07:03:24.500972 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0820 07:03:24.501363 22726 solver.cpp:404]     Test net output #1: loss = 0.402851 (* 1 = 0.402851 loss)\nI0820 07:03:25.832522 22726 solver.cpp:228] Iteration 66200, loss = 0.0817614\nI0820 07:03:25.832782 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:03:25.832814 22726 solver.cpp:244]     Train net output #1: loss = 0.081761 (* 1 = 0.081761 loss)\nI0820 07:03:25.910876 22726 sgd_solver.cpp:166] Iteration 66200, lr = 1.655\nI0820 07:05:43.854183 22726 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0820 07:07:09.053045 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87964\nI0820 07:07:09.053418 22726 solver.cpp:404]     Test net output #1: loss = 0.414239 (* 1 = 0.414239 loss)\nI0820 07:07:10.384670 22726 solver.cpp:228] Iteration 66300, loss = 0.0760087\nI0820 07:07:10.384726 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:07:10.384749 22726 solver.cpp:244]     Train net output #1: loss = 0.0760083 (* 1 = 0.0760083 loss)\nI0820 07:07:10.462851 22726 sgd_solver.cpp:166] Iteration 66300, lr = 1.6575\nI0820 07:09:28.333444 22726 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0820 07:10:53.509377 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87236\nI0820 07:10:53.509757 22726 solver.cpp:404]     Test net output #1: loss = 0.44937 (* 1 = 0.44937 loss)\nI0820 07:10:54.841639 22726 solver.cpp:228] Iteration 66400, loss = 0.188691\nI0820 07:10:54.841697 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 07:10:54.841722 22726 solver.cpp:244]     Train net output #1: loss = 0.188691 (* 1 = 0.188691 loss)\nI0820 07:10:54.918807 22726 sgd_solver.cpp:166] Iteration 66400, lr = 1.66\nI0820 07:13:12.645745 22726 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0820 07:14:37.538641 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87224\nI0820 07:14:37.538921 22726 solver.cpp:404]     Test net output #1: loss = 0.43311 (* 1 = 0.43311 loss)\nI0820 07:14:38.868741 22726 solver.cpp:228] Iteration 66500, loss = 0.197613\nI0820 07:14:38.868798 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 07:14:38.868822 22726 solver.cpp:244]     Train net output #1: loss = 0.197612 (* 1 = 0.197612 loss)\nI0820 07:14:38.951141 22726 sgd_solver.cpp:166] Iteration 66500, lr = 1.6625\nI0820 07:16:56.627347 22726 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0820 07:18:21.501426 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88244\nI0820 07:18:21.501739 22726 solver.cpp:404]     Test net output #1: loss = 0.403549 (* 1 = 0.403549 loss)\nI0820 07:18:22.833003 22726 solver.cpp:228] Iteration 66600, loss = 0.11152\nI0820 07:18:22.833055 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:18:22.833072 22726 solver.cpp:244]     Train net output #1: loss = 0.111519 (* 1 = 0.111519 loss)\nI0820 07:18:22.912592 22726 sgd_solver.cpp:166] Iteration 66600, lr = 1.665\nI0820 07:20:40.659780 22726 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0820 07:22:05.795011 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88144\nI0820 07:22:05.795361 22726 solver.cpp:404]     Test net output #1: loss = 0.402512 (* 1 = 0.402512 loss)\nI0820 07:22:07.125057 22726 solver.cpp:228] Iteration 66700, loss = 0.0877102\nI0820 07:22:07.125108 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:22:07.125126 22726 solver.cpp:244]     Train net output #1: loss = 0.0877097 (* 1 = 0.0877097 loss)\nI0820 07:22:07.207134 22726 sgd_solver.cpp:166] Iteration 66700, lr = 1.6675\nI0820 07:24:24.901929 22726 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0820 07:25:49.922233 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0820 07:25:49.922502 22726 solver.cpp:404]     Test net output #1: loss = 0.416251 (* 1 = 0.416251 loss)\nI0820 07:25:51.252353 22726 solver.cpp:228] Iteration 66800, loss = 0.103723\nI0820 07:25:51.252408 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:25:51.252424 22726 solver.cpp:244]     Train net output #1: loss = 0.103722 (* 1 = 0.103722 loss)\nI0820 07:25:51.332437 22726 sgd_solver.cpp:166] Iteration 66800, lr = 1.67\nI0820 07:28:09.030735 22726 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0820 07:29:33.962698 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87624\nI0820 07:29:33.962976 22726 solver.cpp:404]     Test net output #1: loss = 0.428515 (* 1 = 0.428515 loss)\nI0820 07:29:35.293856 22726 solver.cpp:228] Iteration 66900, loss = 0.159588\nI0820 07:29:35.293910 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 07:29:35.293928 22726 solver.cpp:244]     Train net output #1: loss = 0.159588 (* 1 = 0.159588 loss)\nI0820 07:29:35.377259 22726 sgd_solver.cpp:166] Iteration 66900, lr = 1.6725\nI0820 07:31:53.039963 22726 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0820 07:33:18.040755 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88416\nI0820 07:33:18.041072 22726 solver.cpp:404]     Test net output #1: loss = 0.395892 (* 1 = 0.395892 loss)\nI0820 07:33:19.372447 22726 solver.cpp:228] Iteration 67000, loss = 0.17295\nI0820 07:33:19.372499 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 07:33:19.372516 22726 solver.cpp:244]     Train net output #1: loss = 0.172949 (* 1 = 0.172949 loss)\nI0820 07:33:19.455346 22726 sgd_solver.cpp:166] Iteration 67000, lr = 1.675\nI0820 07:35:37.148448 22726 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0820 07:37:02.269691 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88244\nI0820 07:37:02.270112 22726 solver.cpp:404]     Test net output #1: loss = 0.385007 (* 1 = 0.385007 loss)\nI0820 07:37:03.600137 22726 solver.cpp:228] Iteration 67100, loss = 0.129882\nI0820 07:37:03.600190 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:37:03.600211 22726 solver.cpp:244]     Train net output #1: loss = 0.129881 (* 1 = 0.129881 loss)\nI0820 07:37:03.676779 22726 sgd_solver.cpp:166] Iteration 67100, lr = 1.6775\nI0820 07:39:21.354028 22726 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0820 07:40:46.387498 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87056\nI0820 07:40:46.387783 22726 solver.cpp:404]     Test net output #1: loss = 0.448648 (* 1 = 0.448648 loss)\nI0820 07:40:47.717470 22726 solver.cpp:228] Iteration 67200, loss = 0.164186\nI0820 07:40:47.717522 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 07:40:47.717540 22726 solver.cpp:244]     Train net output #1: loss = 0.164186 (* 1 = 0.164186 loss)\nI0820 07:40:47.793581 22726 sgd_solver.cpp:166] Iteration 67200, lr = 1.68\nI0820 07:43:05.507015 22726 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0820 07:44:30.582458 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87936\nI0820 07:44:30.582768 22726 solver.cpp:404]     Test net output #1: loss = 0.406339 (* 1 = 0.406339 loss)\nI0820 07:44:31.913697 22726 solver.cpp:228] Iteration 67300, loss = 0.099875\nI0820 07:44:31.913750 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:44:31.913767 22726 solver.cpp:244]     Train net output #1: loss = 0.0998746 (* 1 = 0.0998746 loss)\nI0820 07:44:31.995633 22726 sgd_solver.cpp:166] Iteration 67300, lr = 1.6825\nI0820 07:46:49.691915 22726 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0820 07:48:14.807845 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88376\nI0820 07:48:14.808135 22726 solver.cpp:404]     Test net output #1: loss = 0.406158 (* 1 = 0.406158 loss)\nI0820 07:48:16.138983 22726 solver.cpp:228] Iteration 67400, loss = 0.160239\nI0820 07:48:16.139037 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 07:48:16.139055 22726 solver.cpp:244]     Train net output #1: loss = 0.160238 (* 1 = 0.160238 loss)\nI0820 07:48:16.227325 22726 sgd_solver.cpp:166] Iteration 67400, lr = 1.685\nI0820 07:50:33.848083 22726 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0820 07:51:59.003132 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0820 07:51:59.003430 22726 solver.cpp:404]     Test net output #1: loss = 0.414947 (* 1 = 0.414947 loss)\nI0820 07:52:00.334179 22726 solver.cpp:228] Iteration 67500, loss = 0.204288\nI0820 07:52:00.334239 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 07:52:00.334256 22726 solver.cpp:244]     Train net output #1: loss = 0.204288 (* 1 = 0.204288 loss)\nI0820 07:52:00.413817 22726 sgd_solver.cpp:166] Iteration 67500, lr = 1.6875\nI0820 07:54:18.179085 22726 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0820 07:55:43.331312 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87812\nI0820 07:55:43.331603 22726 solver.cpp:404]     Test net output #1: loss = 0.443229 (* 1 = 0.443229 loss)\nI0820 07:55:44.662410 22726 solver.cpp:228] Iteration 67600, loss = 0.297145\nI0820 07:55:44.662464 22726 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0820 07:55:44.662480 22726 solver.cpp:244]     Train net output #1: loss = 0.297145 (* 1 = 0.297145 loss)\nI0820 07:55:44.740087 22726 sgd_solver.cpp:166] Iteration 67600, lr = 1.69\nI0820 07:58:02.641047 22726 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0820 07:59:27.788538 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87632\nI0820 07:59:27.788820 22726 solver.cpp:404]     Test net output #1: loss = 0.403606 (* 1 = 0.403606 loss)\nI0820 07:59:29.120163 22726 solver.cpp:228] Iteration 67700, loss = 0.164976\nI0820 07:59:29.120213 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 07:59:29.120230 22726 solver.cpp:244]     Train net output #1: loss = 0.164976 (* 1 = 0.164976 loss)\nI0820 07:59:29.203518 22726 sgd_solver.cpp:166] Iteration 67700, lr = 1.6925\nI0820 08:01:46.956578 22726 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0820 08:03:11.922063 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88808\nI0820 08:03:11.922420 22726 solver.cpp:404]     Test net output #1: loss = 0.373483 (* 1 = 0.373483 loss)\nI0820 08:03:13.253073 22726 solver.cpp:228] Iteration 67800, loss = 0.118597\nI0820 08:03:13.253125 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 08:03:13.253142 22726 solver.cpp:244]     Train net output #1: loss = 0.118596 (* 1 = 0.118596 loss)\nI0820 08:03:13.335177 22726 sgd_solver.cpp:166] Iteration 67800, lr = 1.695\nI0820 08:05:31.159860 22726 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0820 08:06:56.206822 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88008\nI0820 08:06:56.207134 22726 solver.cpp:404]     Test net output #1: loss = 0.411957 (* 1 = 0.411957 loss)\nI0820 08:06:57.536715 22726 solver.cpp:228] Iteration 67900, loss = 0.191705\nI0820 08:06:57.536765 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 08:06:57.536782 22726 solver.cpp:244]     Train net output #1: loss = 0.191704 (* 1 = 0.191704 loss)\nI0820 08:06:57.614730 22726 sgd_solver.cpp:166] Iteration 67900, lr = 1.6975\nI0820 08:09:15.452991 22726 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0820 08:10:40.363123 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0820 08:10:40.363421 22726 solver.cpp:404]     Test net output #1: loss = 0.39527 (* 1 = 0.39527 loss)\nI0820 08:10:41.693006 22726 solver.cpp:228] Iteration 68000, loss = 0.181889\nI0820 08:10:41.693055 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 08:10:41.693071 22726 solver.cpp:244]     Train net output #1: loss = 0.181889 (* 1 = 0.181889 loss)\nI0820 08:10:41.770123 22726 sgd_solver.cpp:166] Iteration 68000, lr = 1.7\nI0820 08:12:59.499987 22726 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0820 08:14:24.592752 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0820 08:14:24.593029 22726 solver.cpp:404]     Test net output #1: loss = 0.39896 (* 1 = 0.39896 loss)\nI0820 08:14:25.922729 22726 solver.cpp:228] Iteration 68100, loss = 0.232084\nI0820 08:14:25.922782 22726 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 08:14:25.922798 22726 solver.cpp:244]     Train net output #1: loss = 0.232083 (* 1 = 0.232083 loss)\nI0820 08:14:26.001631 22726 sgd_solver.cpp:166] Iteration 68100, lr = 1.7025\nI0820 08:16:43.799814 22726 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0820 08:18:08.912636 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87532\nI0820 08:18:08.912947 22726 solver.cpp:404]     Test net output #1: loss = 0.429879 (* 1 = 0.429879 loss)\nI0820 08:18:10.244148 22726 solver.cpp:228] Iteration 68200, loss = 0.120031\nI0820 08:18:10.244199 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 08:18:10.244220 22726 solver.cpp:244]     Train net output #1: loss = 0.12003 (* 1 = 0.12003 loss)\nI0820 08:18:10.327630 22726 sgd_solver.cpp:166] Iteration 68200, lr = 1.705\nI0820 08:20:28.089105 22726 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0820 08:21:53.192314 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8778\nI0820 08:21:53.192608 22726 solver.cpp:404]     Test net output #1: loss = 0.422674 (* 1 = 0.422674 loss)\nI0820 08:21:54.523340 22726 solver.cpp:228] Iteration 68300, loss = 0.167193\nI0820 08:21:54.523391 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:21:54.523406 22726 solver.cpp:244]     Train net output #1: loss = 0.167192 (* 1 = 0.167192 loss)\nI0820 08:21:54.608826 22726 sgd_solver.cpp:166] Iteration 68300, lr = 1.7075\nI0820 08:24:12.428417 22726 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0820 08:25:37.392490 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88524\nI0820 08:25:37.392807 22726 solver.cpp:404]     Test net output #1: loss = 0.395023 (* 1 = 0.395023 loss)\nI0820 08:25:38.722731 22726 solver.cpp:228] Iteration 68400, loss = 0.187821\nI0820 08:25:38.722780 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 08:25:38.722797 22726 solver.cpp:244]     Train net output #1: loss = 0.18782 (* 1 = 0.18782 loss)\nI0820 08:25:38.797171 22726 sgd_solver.cpp:166] Iteration 68400, lr = 1.71\nI0820 08:27:56.494746 22726 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0820 08:29:21.403148 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86764\nI0820 08:29:21.403445 22726 solver.cpp:404]     Test net output #1: loss = 0.439284 (* 1 = 0.439284 loss)\nI0820 08:29:22.733403 22726 solver.cpp:228] Iteration 68500, loss = 0.197703\nI0820 08:29:22.733455 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 08:29:22.733471 22726 solver.cpp:244]     Train net output #1: loss = 0.197703 (* 1 = 0.197703 loss)\nI0820 08:29:22.817044 22726 sgd_solver.cpp:166] Iteration 68500, lr = 1.7125\nI0820 08:31:40.592149 22726 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0820 08:33:05.670486 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0820 08:33:05.670838 22726 solver.cpp:404]     Test net output #1: loss = 0.398313 (* 1 = 0.398313 loss)\nI0820 08:33:07.001083 22726 solver.cpp:228] Iteration 68600, loss = 0.180042\nI0820 08:33:07.001133 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:33:07.001150 22726 solver.cpp:244]     Train net output #1: loss = 0.180042 (* 1 = 0.180042 loss)\nI0820 08:33:07.084805 22726 sgd_solver.cpp:166] Iteration 68600, lr = 1.715\nI0820 08:35:25.001157 22726 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0820 08:36:49.995676 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8792\nI0820 08:36:49.995949 22726 solver.cpp:404]     Test net output #1: loss = 0.428513 (* 1 = 0.428513 loss)\nI0820 08:36:51.326367 22726 solver.cpp:228] Iteration 68700, loss = 0.0928761\nI0820 08:36:51.326421 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 08:36:51.326444 22726 solver.cpp:244]     Train net output #1: loss = 0.0928755 (* 1 = 0.0928755 loss)\nI0820 08:36:51.407194 22726 sgd_solver.cpp:166] Iteration 68700, lr = 1.7175\nI0820 08:39:09.191292 22726 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0820 08:40:34.297462 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88152\nI0820 08:40:34.297791 22726 solver.cpp:404]     Test net output #1: loss = 0.386979 (* 1 = 0.386979 loss)\nI0820 08:40:35.628581 22726 solver.cpp:228] Iteration 68800, loss = 0.207389\nI0820 08:40:35.628630 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:40:35.628646 22726 solver.cpp:244]     Train net output #1: loss = 0.207389 (* 1 = 0.207389 loss)\nI0820 08:40:35.707244 22726 sgd_solver.cpp:166] Iteration 68800, lr = 1.72\nI0820 08:42:53.440124 22726 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0820 08:44:18.461807 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88288\nI0820 08:44:18.462100 22726 solver.cpp:404]     Test net output #1: loss = 0.405967 (* 1 = 0.405967 loss)\nI0820 08:44:19.793072 22726 solver.cpp:228] Iteration 68900, loss = 0.172987\nI0820 08:44:19.793123 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 08:44:19.793138 22726 solver.cpp:244]     Train net output #1: loss = 0.172986 (* 1 = 0.172986 loss)\nI0820 08:44:19.876830 22726 sgd_solver.cpp:166] Iteration 68900, lr = 1.7225\nI0820 08:46:37.730298 22726 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0820 08:48:02.751332 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88412\nI0820 08:48:02.751622 22726 solver.cpp:404]     Test net output #1: loss = 0.381692 (* 1 = 0.381692 loss)\nI0820 08:48:04.082566 22726 solver.cpp:228] Iteration 69000, loss = 0.10253\nI0820 08:48:04.082617 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 08:48:04.082633 22726 solver.cpp:244]     Train net output #1: loss = 0.102529 (* 1 = 0.102529 loss)\nI0820 08:48:04.165400 22726 sgd_solver.cpp:166] Iteration 69000, lr = 1.725\nI0820 08:50:22.172937 22726 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0820 08:51:47.279176 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0820 08:51:47.279507 22726 solver.cpp:404]     Test net output #1: loss = 0.397706 (* 1 = 0.397706 loss)\nI0820 08:51:48.609758 22726 solver.cpp:228] Iteration 69100, loss = 0.168906\nI0820 08:51:48.609810 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:51:48.609827 22726 solver.cpp:244]     Train net output #1: loss = 0.168906 (* 1 = 0.168906 loss)\nI0820 08:51:48.687371 22726 sgd_solver.cpp:166] Iteration 69100, lr = 1.7275\nI0820 08:54:06.405287 22726 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0820 08:55:31.511903 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88348\nI0820 08:55:31.512192 22726 solver.cpp:404]     Test net output #1: loss = 0.395284 (* 1 = 0.395284 loss)\nI0820 08:55:32.841307 22726 solver.cpp:228] Iteration 69200, loss = 0.256295\nI0820 08:55:32.841357 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 08:55:32.841374 22726 solver.cpp:244]     Train net output #1: loss = 0.256294 (* 1 = 0.256294 loss)\nI0820 08:55:32.923704 22726 sgd_solver.cpp:166] Iteration 69200, lr = 1.73\nI0820 08:57:50.808670 22726 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0820 08:59:15.912317 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87664\nI0820 08:59:15.912672 22726 solver.cpp:404]     Test net output #1: loss = 0.416342 (* 1 = 0.416342 loss)\nI0820 08:59:17.243608 22726 solver.cpp:228] Iteration 69300, loss = 0.156967\nI0820 08:59:17.243656 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 08:59:17.243680 22726 solver.cpp:244]     Train net output #1: loss = 0.156967 (* 1 = 0.156967 loss)\nI0820 08:59:17.324177 22726 sgd_solver.cpp:166] Iteration 69300, lr = 1.7325\nI0820 09:01:35.074404 22726 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0820 09:03:00.250038 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87648\nI0820 09:03:00.250444 22726 solver.cpp:404]     Test net output #1: loss = 0.425518 (* 1 = 0.425518 loss)\nI0820 09:03:01.580404 22726 solver.cpp:228] Iteration 69400, loss = 0.170231\nI0820 09:03:01.580451 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 09:03:01.580474 22726 solver.cpp:244]     Train net output #1: loss = 0.17023 (* 1 = 0.17023 loss)\nI0820 09:03:01.661309 22726 sgd_solver.cpp:166] Iteration 69400, lr = 1.735\nI0820 09:05:19.460330 22726 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0820 09:06:44.586200 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87044\nI0820 09:06:44.586571 22726 solver.cpp:404]     Test net output #1: loss = 0.444469 (* 1 = 0.444469 loss)\nI0820 09:06:45.916393 22726 solver.cpp:228] Iteration 69500, loss = 0.110374\nI0820 09:06:45.916438 22726 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 09:06:45.916455 22726 solver.cpp:244]     Train net output #1: loss = 0.110373 (* 1 = 0.110373 loss)\nI0820 09:06:45.999830 22726 sgd_solver.cpp:166] Iteration 69500, lr = 1.7375\nI0820 09:09:03.784179 22726 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0820 09:10:28.233290 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86572\nI0820 09:10:28.233640 22726 solver.cpp:404]     Test net output #1: loss = 0.449665 (* 1 = 0.449665 loss)\nI0820 09:10:29.561805 22726 solver.cpp:228] Iteration 69600, loss = 0.13595\nI0820 09:10:29.561847 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 09:10:29.561863 22726 solver.cpp:244]     Train net output #1: loss = 0.135949 (* 1 = 0.135949 loss)\nI0820 09:10:29.644137 22726 sgd_solver.cpp:166] Iteration 69600, lr = 1.74\nI0820 09:12:47.341233 22726 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0820 09:14:11.779757 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0820 09:14:11.780099 22726 solver.cpp:404]     Test net output #1: loss = 0.403328 (* 1 = 0.403328 loss)\nI0820 09:14:13.107522 22726 solver.cpp:228] Iteration 69700, loss = 0.254295\nI0820 09:14:13.107558 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 09:14:13.107573 22726 solver.cpp:244]     Train net output #1: loss = 0.254295 (* 1 = 0.254295 loss)\nI0820 09:14:13.190707 22726 sgd_solver.cpp:166] Iteration 69700, lr = 1.7425\nI0820 09:16:30.715970 22726 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0820 09:17:55.156390 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0820 09:17:55.156759 22726 solver.cpp:404]     Test net output #1: loss = 0.387623 (* 1 = 0.387623 loss)\nI0820 09:17:56.483971 22726 solver.cpp:228] Iteration 69800, loss = 0.151105\nI0820 09:17:56.484014 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 09:17:56.484036 22726 solver.cpp:244]     Train net output #1: loss = 0.151105 (* 1 = 0.151105 loss)\nI0820 09:17:56.570612 22726 sgd_solver.cpp:166] Iteration 69800, lr = 1.745\nI0820 09:20:14.156886 22726 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0820 09:21:38.589495 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88056\nI0820 09:21:38.589848 22726 solver.cpp:404]     Test net output #1: loss = 0.389729 (* 1 = 0.389729 loss)\nI0820 09:21:39.917541 22726 solver.cpp:228] Iteration 69900, loss = 0.268492\nI0820 09:21:39.917582 22726 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 09:21:39.917603 22726 solver.cpp:244]     Train net output #1: loss = 0.268492 (* 1 = 0.268492 loss)\nI0820 09:21:40.003394 22726 sgd_solver.cpp:166] Iteration 69900, lr = 1.7475\nI0820 09:23:57.665026 22726 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0820 09:25:22.750417 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88508\nI0820 09:25:22.750690 22726 solver.cpp:404]     Test net output #1: loss = 0.387121 (* 1 = 0.387121 loss)\nI0820 09:25:24.081751 22726 solver.cpp:228] Iteration 70000, loss = 0.143119\nI0820 09:25:24.081796 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 09:25:24.081812 22726 solver.cpp:244]     Train net output #1: loss = 0.143118 (* 1 = 0.143118 loss)\nI0820 09:25:24.161767 22726 sgd_solver.cpp:166] Iteration 70000, lr = 1.75\nI0820 09:27:41.929497 22726 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0820 09:29:06.847404 22726 solver.cpp:404]     Test net output #0: accuracy = 0.882321\nI0820 09:29:06.847672 22726 solver.cpp:404]     Test net output #1: loss = 0.391737 (* 1 = 0.391737 loss)\nI0820 09:29:08.177248 22726 solver.cpp:228] Iteration 70100, loss = 0.186881\nI0820 09:29:08.177289 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:29:08.177304 22726 solver.cpp:244]     Train net output #1: loss = 0.186881 (* 1 = 0.186881 loss)\nI0820 09:29:08.258493 22726 sgd_solver.cpp:166] Iteration 70100, lr = 1.7525\nI0820 09:31:26.044796 22726 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0820 09:32:50.978389 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0820 09:32:50.978737 22726 solver.cpp:404]     Test net output #1: loss = 0.383868 (* 1 = 0.383868 loss)\nI0820 09:32:52.309770 22726 solver.cpp:228] Iteration 70200, loss = 0.173911\nI0820 09:32:52.309815 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 09:32:52.309830 22726 solver.cpp:244]     Train net output #1: loss = 0.173911 (* 1 = 0.173911 loss)\nI0820 09:32:52.388073 22726 sgd_solver.cpp:166] Iteration 70200, lr = 1.755\nI0820 09:35:10.203351 22726 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0820 09:36:35.307996 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87132\nI0820 09:36:35.308280 22726 solver.cpp:404]     Test net output #1: loss = 0.434022 (* 1 = 0.434022 loss)\nI0820 09:36:36.638295 22726 solver.cpp:228] Iteration 70300, loss = 0.213355\nI0820 09:36:36.638339 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 09:36:36.638355 22726 solver.cpp:244]     Train net output #1: loss = 0.213355 (* 1 = 0.213355 loss)\nI0820 09:36:36.720278 22726 sgd_solver.cpp:166] Iteration 70300, lr = 1.7575\nI0820 09:38:54.455955 22726 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0820 09:40:19.576666 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87608\nI0820 09:40:19.576994 22726 solver.cpp:404]     Test net output #1: loss = 0.416732 (* 1 = 0.416732 loss)\nI0820 09:40:20.907842 22726 solver.cpp:228] Iteration 70400, loss = 0.173722\nI0820 09:40:20.907886 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:40:20.907902 22726 solver.cpp:244]     Train net output #1: loss = 0.173721 (* 1 = 0.173721 loss)\nI0820 09:40:20.997927 22726 sgd_solver.cpp:166] Iteration 70400, lr = 1.76\nI0820 09:42:38.758816 22726 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0820 09:44:03.891670 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88632\nI0820 09:44:03.892005 22726 solver.cpp:404]     Test net output #1: loss = 0.379709 (* 1 = 0.379709 loss)\nI0820 09:44:05.221276 22726 solver.cpp:228] Iteration 70500, loss = 0.0582815\nI0820 09:44:05.221328 22726 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0820 09:44:05.221345 22726 solver.cpp:244]     Train net output #1: loss = 0.0582808 (* 1 = 0.0582808 loss)\nI0820 09:44:05.305063 22726 sgd_solver.cpp:166] Iteration 70500, lr = 1.7625\nI0820 09:46:23.230856 22726 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0820 09:47:48.358654 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87976\nI0820 09:47:48.358989 22726 solver.cpp:404]     Test net output #1: loss = 0.404501 (* 1 = 0.404501 loss)\nI0820 09:47:49.689469 22726 solver.cpp:228] Iteration 70600, loss = 0.149544\nI0820 09:47:49.689522 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 09:47:49.689538 22726 solver.cpp:244]     Train net output #1: loss = 0.149543 (* 1 = 0.149543 loss)\nI0820 09:47:49.770917 22726 sgd_solver.cpp:166] Iteration 70600, lr = 1.765\nI0820 09:50:07.449978 22726 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0820 09:51:32.557509 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87328\nI0820 09:51:32.557801 22726 solver.cpp:404]     Test net output #1: loss = 0.416162 (* 1 = 0.416162 loss)\nI0820 09:51:33.888243 22726 solver.cpp:228] Iteration 70700, loss = 0.17159\nI0820 09:51:33.888284 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 09:51:33.888299 22726 solver.cpp:244]     Train net output #1: loss = 0.17159 (* 1 = 0.17159 loss)\nI0820 09:51:33.965296 22726 sgd_solver.cpp:166] Iteration 70700, lr = 1.7675\nI0820 09:53:51.687633 22726 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0820 09:55:16.763420 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88752\nI0820 09:55:16.763734 22726 solver.cpp:404]     Test net output #1: loss = 0.373654 (* 1 = 0.373654 loss)\nI0820 09:55:18.093866 22726 solver.cpp:228] Iteration 70800, loss = 0.149509\nI0820 09:55:18.093917 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 09:55:18.093932 22726 solver.cpp:244]     Train net output #1: loss = 0.149508 (* 1 = 0.149508 loss)\nI0820 09:55:18.173280 22726 sgd_solver.cpp:166] Iteration 70800, lr = 1.77\nI0820 09:57:35.883316 22726 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0820 09:59:01.028794 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0820 09:59:01.029103 22726 solver.cpp:404]     Test net output #1: loss = 0.413113 (* 1 = 0.413113 loss)\nI0820 09:59:02.359165 22726 solver.cpp:228] Iteration 70900, loss = 0.154544\nI0820 09:59:02.359215 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 09:59:02.359230 22726 solver.cpp:244]     Train net output #1: loss = 0.154543 (* 1 = 0.154543 loss)\nI0820 09:59:02.438309 22726 sgd_solver.cpp:166] Iteration 70900, lr = 1.7725\nI0820 10:01:20.100575 22726 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0820 10:02:45.237671 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87992\nI0820 10:02:45.238044 22726 solver.cpp:404]     Test net output #1: loss = 0.403228 (* 1 = 0.403228 loss)\nI0820 10:02:46.568158 22726 solver.cpp:228] Iteration 71000, loss = 0.147844\nI0820 10:02:46.568202 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:02:46.568217 22726 solver.cpp:244]     Train net output #1: loss = 0.147844 (* 1 = 0.147844 loss)\nI0820 10:02:46.652429 22726 sgd_solver.cpp:166] Iteration 71000, lr = 1.775\nI0820 10:05:04.616595 22726 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0820 10:06:29.769075 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88164\nI0820 10:06:29.769362 22726 solver.cpp:404]     Test net output #1: loss = 0.393243 (* 1 = 0.393243 loss)\nI0820 10:06:31.099179 22726 solver.cpp:228] Iteration 71100, loss = 0.204066\nI0820 10:06:31.099231 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 10:06:31.099246 22726 solver.cpp:244]     Train net output #1: loss = 0.204065 (* 1 = 0.204065 loss)\nI0820 10:06:31.176877 22726 sgd_solver.cpp:166] Iteration 71100, lr = 1.7775\nI0820 10:08:49.029050 22726 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0820 10:10:13.926174 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0820 10:10:13.926498 22726 solver.cpp:404]     Test net output #1: loss = 0.410208 (* 1 = 0.410208 loss)\nI0820 10:10:15.256198 22726 solver.cpp:228] Iteration 71200, loss = 0.161669\nI0820 10:10:15.256242 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 10:10:15.256258 22726 solver.cpp:244]     Train net output #1: loss = 0.161668 (* 1 = 0.161668 loss)\nI0820 10:10:15.338841 22726 sgd_solver.cpp:166] Iteration 71200, lr = 1.78\nI0820 10:12:33.037461 22726 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0820 10:13:57.955684 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87704\nI0820 10:13:57.955950 22726 solver.cpp:404]     Test net output #1: loss = 0.402606 (* 1 = 0.402606 loss)\nI0820 10:13:59.285024 22726 solver.cpp:228] Iteration 71300, loss = 0.128342\nI0820 10:13:59.285075 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:13:59.285091 22726 solver.cpp:244]     Train net output #1: loss = 0.128341 (* 1 = 0.128341 loss)\nI0820 10:13:59.362522 22726 sgd_solver.cpp:166] Iteration 71300, lr = 1.7825\nI0820 10:16:17.134945 22726 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0820 10:17:41.937682 22726 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0820 10:17:41.937952 22726 solver.cpp:404]     Test net output #1: loss = 0.434478 (* 1 = 0.434478 loss)\nI0820 10:17:43.267983 22726 solver.cpp:228] Iteration 71400, loss = 0.126453\nI0820 10:17:43.268036 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 10:17:43.268054 22726 solver.cpp:244]     Train net output #1: loss = 0.126453 (* 1 = 0.126453 loss)\nI0820 10:17:43.353888 22726 sgd_solver.cpp:166] Iteration 71400, lr = 1.785\nI0820 10:20:01.095376 22726 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0820 10:21:26.012888 22726 solver.cpp:404]     Test net output #0: accuracy = 0.872441\nI0820 10:21:26.013283 22726 solver.cpp:404]     Test net output #1: loss = 0.427102 (* 1 = 0.427102 loss)\nI0820 10:21:27.343010 22726 solver.cpp:228] Iteration 71500, loss = 0.19057\nI0820 10:21:27.343060 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 10:21:27.343078 22726 solver.cpp:244]     Train net output #1: loss = 0.190569 (* 1 = 0.190569 loss)\nI0820 10:21:27.421674 22726 sgd_solver.cpp:166] Iteration 71500, lr = 1.7875\nI0820 10:23:45.195658 22726 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0820 10:25:10.020031 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88608\nI0820 10:25:10.020381 22726 solver.cpp:404]     Test net output #1: loss = 0.380053 (* 1 = 0.380053 loss)\nI0820 10:25:11.350046 22726 solver.cpp:228] Iteration 71600, loss = 0.209798\nI0820 10:25:11.350098 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 10:25:11.350116 22726 solver.cpp:244]     Train net output #1: loss = 0.209798 (* 1 = 0.209798 loss)\nI0820 10:25:11.431200 22726 sgd_solver.cpp:166] Iteration 71600, lr = 1.79\nI0820 10:27:29.208915 22726 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0820 10:28:54.190004 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87596\nI0820 10:28:54.190390 22726 solver.cpp:404]     Test net output #1: loss = 0.418918 (* 1 = 0.418918 loss)\nI0820 10:28:55.520330 22726 solver.cpp:228] Iteration 71700, loss = 0.202189\nI0820 10:28:55.520375 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:28:55.520390 22726 solver.cpp:244]     Train net output #1: loss = 0.202188 (* 1 = 0.202188 loss)\nI0820 10:28:55.599264 22726 sgd_solver.cpp:166] Iteration 71700, lr = 1.7925\nI0820 10:31:13.352670 22726 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0820 10:32:38.510337 22726 solver.cpp:404]     Test net output #0: accuracy = 0.881\nI0820 10:32:38.510689 22726 solver.cpp:404]     Test net output #1: loss = 0.383638 (* 1 = 0.383638 loss)\nI0820 10:32:39.841614 22726 solver.cpp:228] Iteration 71800, loss = 0.138713\nI0820 10:32:39.841656 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 10:32:39.841672 22726 solver.cpp:244]     Train net output #1: loss = 0.138712 (* 1 = 0.138712 loss)\nI0820 10:32:39.921936 22726 sgd_solver.cpp:166] Iteration 71800, lr = 1.795\nI0820 10:34:57.710019 22726 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0820 10:36:22.686369 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87408\nI0820 10:36:22.686750 22726 solver.cpp:404]     Test net output #1: loss = 0.420842 (* 1 = 0.420842 loss)\nI0820 10:36:24.017225 22726 solver.cpp:228] Iteration 71900, loss = 0.210559\nI0820 10:36:24.017280 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 10:36:24.017297 22726 solver.cpp:244]     Train net output #1: loss = 0.210558 (* 1 = 0.210558 loss)\nI0820 10:36:24.095890 22726 sgd_solver.cpp:166] Iteration 71900, lr = 1.7975\nI0820 10:38:41.849505 22726 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0820 10:40:06.768293 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88764\nI0820 10:40:06.768622 22726 solver.cpp:404]     Test net output #1: loss = 0.379095 (* 1 = 0.379095 loss)\nI0820 10:40:08.098397 22726 solver.cpp:228] Iteration 72000, loss = 0.156229\nI0820 10:40:08.098450 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 10:40:08.098466 22726 solver.cpp:244]     Train net output #1: loss = 0.156229 (* 1 = 0.156229 loss)\nI0820 10:40:08.181696 22726 sgd_solver.cpp:166] Iteration 72000, lr = 1.8\nI0820 10:42:25.952255 22726 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0820 10:43:51.121927 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87636\nI0820 10:43:51.122284 22726 solver.cpp:404]     Test net output #1: loss = 0.408834 (* 1 = 0.408834 loss)\nI0820 10:43:52.452154 22726 solver.cpp:228] Iteration 72100, loss = 0.137471\nI0820 10:43:52.452198 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 10:43:52.452214 22726 solver.cpp:244]     Train net output #1: loss = 0.13747 (* 1 = 0.13747 loss)\nI0820 10:43:52.531143 22726 sgd_solver.cpp:166] Iteration 72100, lr = 1.8025\nI0820 10:46:10.335834 22726 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0820 10:47:35.496749 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8782\nI0820 10:47:35.497135 22726 solver.cpp:404]     Test net output #1: loss = 0.399339 (* 1 = 0.399339 loss)\nI0820 10:47:36.826761 22726 solver.cpp:228] Iteration 72200, loss = 0.189145\nI0820 10:47:36.826812 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:47:36.826828 22726 solver.cpp:244]     Train net output #1: loss = 0.189144 (* 1 = 0.189144 loss)\nI0820 10:47:36.909224 22726 sgd_solver.cpp:166] Iteration 72200, lr = 1.805\nI0820 10:49:54.782097 22726 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0820 10:51:19.940902 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86844\nI0820 10:51:19.941309 22726 solver.cpp:404]     Test net output #1: loss = 0.436186 (* 1 = 0.436186 loss)\nI0820 10:51:21.271284 22726 solver.cpp:228] Iteration 72300, loss = 0.254975\nI0820 10:51:21.271333 22726 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 10:51:21.271350 22726 solver.cpp:244]     Train net output #1: loss = 0.254974 (* 1 = 0.254974 loss)\nI0820 10:51:21.352625 22726 sgd_solver.cpp:166] Iteration 72300, lr = 1.8075\nI0820 10:53:39.165994 22726 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0820 10:55:04.343282 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0820 10:55:04.343684 22726 solver.cpp:404]     Test net output #1: loss = 0.407108 (* 1 = 0.407108 loss)\nI0820 10:55:05.673940 22726 solver.cpp:228] Iteration 72400, loss = 0.164941\nI0820 10:55:05.673985 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 10:55:05.674001 22726 solver.cpp:244]     Train net output #1: loss = 0.16494 (* 1 = 0.16494 loss)\nI0820 10:55:05.756621 22726 sgd_solver.cpp:166] Iteration 72400, lr = 1.81\nI0820 10:57:23.541935 22726 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0820 10:58:48.709252 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87904\nI0820 10:58:48.709656 22726 solver.cpp:404]     Test net output #1: loss = 0.402522 (* 1 = 0.402522 loss)\nI0820 10:58:50.039541 22726 solver.cpp:228] Iteration 72500, loss = 0.139575\nI0820 10:58:50.039589 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 10:58:50.039605 22726 solver.cpp:244]     Train net output #1: loss = 0.139574 (* 1 = 0.139574 loss)\nI0820 10:58:50.119729 22726 sgd_solver.cpp:166] Iteration 72500, lr = 1.8125\nI0820 11:01:07.933034 22726 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0820 11:02:33.101984 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0820 11:02:33.102380 22726 solver.cpp:404]     Test net output #1: loss = 0.384468 (* 1 = 0.384468 loss)\nI0820 11:02:34.432270 22726 solver.cpp:228] Iteration 72600, loss = 0.122739\nI0820 11:02:34.432320 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:02:34.432337 22726 solver.cpp:244]     Train net output #1: loss = 0.122739 (* 1 = 0.122739 loss)\nI0820 11:02:34.513339 22726 sgd_solver.cpp:166] Iteration 72600, lr = 1.815\nI0820 11:04:52.314965 22726 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0820 11:06:17.485890 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87788\nI0820 11:06:17.486274 22726 solver.cpp:404]     Test net output #1: loss = 0.392187 (* 1 = 0.392187 loss)\nI0820 11:06:18.815748 22726 solver.cpp:228] Iteration 72700, loss = 0.0959956\nI0820 11:06:18.815796 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:06:18.815811 22726 solver.cpp:244]     Train net output #1: loss = 0.0959951 (* 1 = 0.0959951 loss)\nI0820 11:06:18.899089 22726 sgd_solver.cpp:166] Iteration 72700, lr = 1.8175\nI0820 11:08:36.613688 22726 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0820 11:10:01.781847 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88144\nI0820 11:10:01.782251 22726 solver.cpp:404]     Test net output #1: loss = 0.392143 (* 1 = 0.392143 loss)\nI0820 11:10:03.112709 22726 solver.cpp:228] Iteration 72800, loss = 0.12834\nI0820 11:10:03.112753 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 11:10:03.112769 22726 solver.cpp:244]     Train net output #1: loss = 0.128339 (* 1 = 0.128339 loss)\nI0820 11:10:03.192623 22726 sgd_solver.cpp:166] Iteration 72800, lr = 1.82\nI0820 11:12:20.920439 22726 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0820 11:13:46.085108 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86936\nI0820 11:13:46.085484 22726 solver.cpp:404]     Test net output #1: loss = 0.447936 (* 1 = 0.447936 loss)\nI0820 11:13:47.415562 22726 solver.cpp:228] Iteration 72900, loss = 0.140577\nI0820 11:13:47.415609 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 11:13:47.415626 22726 solver.cpp:244]     Train net output #1: loss = 0.140577 (* 1 = 0.140577 loss)\nI0820 11:13:47.493862 22726 sgd_solver.cpp:166] Iteration 72900, lr = 1.8225\nI0820 11:16:05.255584 22726 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0820 11:17:30.425602 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87432\nI0820 11:17:30.426018 22726 solver.cpp:404]     Test net output #1: loss = 0.435592 (* 1 = 0.435592 loss)\nI0820 11:17:31.756062 22726 solver.cpp:228] Iteration 73000, loss = 0.283941\nI0820 11:17:31.756106 22726 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0820 11:17:31.756124 22726 solver.cpp:244]     Train net output #1: loss = 0.283941 (* 1 = 0.283941 loss)\nI0820 11:17:31.839960 22726 sgd_solver.cpp:166] Iteration 73000, lr = 1.825\nI0820 11:19:49.579555 22726 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0820 11:21:14.733685 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87448\nI0820 11:21:14.734084 22726 solver.cpp:404]     Test net output #1: loss = 0.410845 (* 1 = 0.410845 loss)\nI0820 11:21:16.064571 22726 solver.cpp:228] Iteration 73100, loss = 0.147223\nI0820 11:21:16.064618 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:21:16.064635 22726 solver.cpp:244]     Train net output #1: loss = 0.147222 (* 1 = 0.147222 loss)\nI0820 11:21:16.145506 22726 sgd_solver.cpp:166] Iteration 73100, lr = 1.8275\nI0820 11:23:33.839465 22726 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0820 11:24:58.982480 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0820 11:24:58.982879 22726 solver.cpp:404]     Test net output #1: loss = 0.378834 (* 1 = 0.378834 loss)\nI0820 11:25:00.312954 22726 solver.cpp:228] Iteration 73200, loss = 0.118786\nI0820 11:25:00.313005 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 11:25:00.313020 22726 solver.cpp:244]     Train net output #1: loss = 0.118785 (* 1 = 0.118785 loss)\nI0820 11:25:00.397295 22726 sgd_solver.cpp:166] Iteration 73200, lr = 1.83\nI0820 11:27:18.198314 22726 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0820 11:28:43.360569 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8804\nI0820 11:28:43.360975 22726 solver.cpp:404]     Test net output #1: loss = 0.397878 (* 1 = 0.397878 loss)\nI0820 11:28:44.690668 22726 solver.cpp:228] Iteration 73300, loss = 0.145092\nI0820 11:28:44.690718 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 11:28:44.690733 22726 solver.cpp:244]     Train net output #1: loss = 0.145092 (* 1 = 0.145092 loss)\nI0820 11:28:44.768121 22726 sgd_solver.cpp:166] Iteration 73300, lr = 1.8325\nI0820 11:31:02.479619 22726 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0820 11:32:27.633184 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88424\nI0820 11:32:27.633553 22726 solver.cpp:404]     Test net output #1: loss = 0.3898 (* 1 = 0.3898 loss)\nI0820 11:32:28.962769 22726 solver.cpp:228] Iteration 73400, loss = 0.113096\nI0820 11:32:28.962816 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 11:32:28.962831 22726 solver.cpp:244]     Train net output #1: loss = 0.113095 (* 1 = 0.113095 loss)\nI0820 11:32:29.039880 22726 sgd_solver.cpp:166] Iteration 73400, lr = 1.835\nI0820 11:34:46.713765 22726 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0820 11:36:11.857766 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88852\nI0820 11:36:11.858157 22726 solver.cpp:404]     Test net output #1: loss = 0.366737 (* 1 = 0.366737 loss)\nI0820 11:36:13.187871 22726 solver.cpp:228] Iteration 73500, loss = 0.108575\nI0820 11:36:13.187925 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:36:13.187942 22726 solver.cpp:244]     Train net output #1: loss = 0.108574 (* 1 = 0.108574 loss)\nI0820 11:36:13.268416 22726 sgd_solver.cpp:166] Iteration 73500, lr = 1.8375\nI0820 11:38:31.111351 22726 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0820 11:39:56.269541 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8778\nI0820 11:39:56.269919 22726 solver.cpp:404]     Test net output #1: loss = 0.411401 (* 1 = 0.411401 loss)\nI0820 11:39:57.599716 22726 solver.cpp:228] Iteration 73600, loss = 0.148045\nI0820 11:39:57.599767 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 11:39:57.599783 22726 solver.cpp:244]     Train net output #1: loss = 0.148044 (* 1 = 0.148044 loss)\nI0820 11:39:57.678833 22726 sgd_solver.cpp:166] Iteration 73600, lr = 1.84\nI0820 11:42:15.361892 22726 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0820 11:43:40.515883 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87576\nI0820 11:43:40.516268 22726 solver.cpp:404]     Test net output #1: loss = 0.393616 (* 1 = 0.393616 loss)\nI0820 11:43:41.846554 22726 solver.cpp:228] Iteration 73700, loss = 0.138618\nI0820 11:43:41.846599 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 11:43:41.846616 22726 solver.cpp:244]     Train net output #1: loss = 0.138618 (* 1 = 0.138618 loss)\nI0820 11:43:41.928092 22726 sgd_solver.cpp:166] Iteration 73700, lr = 1.8425\nI0820 11:45:59.599375 22726 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0820 11:47:24.756860 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87088\nI0820 11:47:24.757272 22726 solver.cpp:404]     Test net output #1: loss = 0.436342 (* 1 = 0.436342 loss)\nI0820 11:47:26.086541 22726 solver.cpp:228] Iteration 73800, loss = 0.197622\nI0820 11:47:26.086594 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 11:47:26.086611 22726 solver.cpp:244]     Train net output #1: loss = 0.197622 (* 1 = 0.197622 loss)\nI0820 11:47:26.169463 22726 sgd_solver.cpp:166] Iteration 73800, lr = 1.845\nI0820 11:49:43.774395 22726 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0820 11:51:08.922466 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86876\nI0820 11:51:08.922859 22726 solver.cpp:404]     Test net output #1: loss = 0.427255 (* 1 = 0.427255 loss)\nI0820 11:51:10.252629 22726 solver.cpp:228] Iteration 73900, loss = 0.276647\nI0820 11:51:10.252681 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 11:51:10.252697 22726 solver.cpp:244]     Train net output #1: loss = 0.276647 (* 1 = 0.276647 loss)\nI0820 11:51:10.334928 22726 sgd_solver.cpp:166] Iteration 73900, lr = 1.8475\nI0820 11:53:28.124955 22726 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0820 11:54:53.280405 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8704\nI0820 11:54:53.280804 22726 solver.cpp:404]     Test net output #1: loss = 0.429218 (* 1 = 0.429218 loss)\nI0820 11:54:54.613836 22726 solver.cpp:228] Iteration 74000, loss = 0.127418\nI0820 11:54:54.613891 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:54:54.613909 22726 solver.cpp:244]     Train net output #1: loss = 0.127417 (* 1 = 0.127417 loss)\nI0820 11:54:54.694069 22726 sgd_solver.cpp:166] Iteration 74000, lr = 1.85\nI0820 11:57:12.384099 22726 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0820 11:58:37.550624 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87\nI0820 11:58:37.551050 22726 solver.cpp:404]     Test net output #1: loss = 0.43936 (* 1 = 0.43936 loss)\nI0820 11:58:38.881559 22726 solver.cpp:228] Iteration 74100, loss = 0.273559\nI0820 11:58:38.881613 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 11:58:38.881629 22726 solver.cpp:244]     Train net output #1: loss = 0.273559 (* 1 = 0.273559 loss)\nI0820 11:58:38.960186 22726 sgd_solver.cpp:166] Iteration 74100, lr = 1.8525\nI0820 12:00:56.631110 22726 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0820 12:02:21.791791 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88456\nI0820 12:02:21.792189 22726 solver.cpp:404]     Test net output #1: loss = 0.381362 (* 1 = 0.381362 loss)\nI0820 12:02:23.122297 22726 solver.cpp:228] Iteration 74200, loss = 0.161437\nI0820 12:02:23.122351 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:02:23.122366 22726 solver.cpp:244]     Train net output #1: loss = 0.161436 (* 1 = 0.161436 loss)\nI0820 12:02:23.201836 22726 sgd_solver.cpp:166] Iteration 74200, lr = 1.855\nI0820 12:04:41.015264 22726 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0820 12:06:06.073945 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0820 12:06:06.074301 22726 solver.cpp:404]     Test net output #1: loss = 0.414032 (* 1 = 0.414032 loss)\nI0820 12:06:07.403988 22726 solver.cpp:228] Iteration 74300, loss = 0.175402\nI0820 12:06:07.404042 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 12:06:07.404058 22726 solver.cpp:244]     Train net output #1: loss = 0.175402 (* 1 = 0.175402 loss)\nI0820 12:06:07.485280 22726 sgd_solver.cpp:166] Iteration 74300, lr = 1.8575\nI0820 12:08:25.190868 22726 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0820 12:09:50.216363 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87916\nI0820 12:09:50.216634 22726 solver.cpp:404]     Test net output #1: loss = 0.417671 (* 1 = 0.417671 loss)\nI0820 12:09:51.545940 22726 solver.cpp:228] Iteration 74400, loss = 0.220767\nI0820 12:09:51.546000 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 12:09:51.546016 22726 solver.cpp:244]     Train net output #1: loss = 0.220767 (* 1 = 0.220767 loss)\nI0820 12:09:51.628697 22726 sgd_solver.cpp:166] Iteration 74400, lr = 1.86\nI0820 12:12:09.326095 22726 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0820 12:13:34.194365 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86932\nI0820 12:13:34.194635 22726 solver.cpp:404]     Test net output #1: loss = 0.456222 (* 1 = 0.456222 loss)\nI0820 12:13:35.525853 22726 solver.cpp:228] Iteration 74500, loss = 0.234297\nI0820 12:13:35.525908 22726 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0820 12:13:35.525925 22726 solver.cpp:244]     Train net output #1: loss = 0.234296 (* 1 = 0.234296 loss)\nI0820 12:13:35.600615 22726 sgd_solver.cpp:166] Iteration 74500, lr = 1.8625\nI0820 12:15:53.253850 22726 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0820 12:17:18.103492 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88248\nI0820 12:17:18.103766 22726 solver.cpp:404]     Test net output #1: loss = 0.401099 (* 1 = 0.401099 loss)\nI0820 12:17:19.434414 22726 solver.cpp:228] Iteration 74600, loss = 0.263947\nI0820 12:17:19.434468 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 12:17:19.434486 22726 solver.cpp:244]     Train net output #1: loss = 0.263947 (* 1 = 0.263947 loss)\nI0820 12:17:19.515516 22726 sgd_solver.cpp:166] Iteration 74600, lr = 1.865\nI0820 12:19:37.202075 22726 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0820 12:21:02.000208 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87496\nI0820 12:21:02.000535 22726 solver.cpp:404]     Test net output #1: loss = 0.407754 (* 1 = 0.407754 loss)\nI0820 12:21:03.331686 22726 solver.cpp:228] Iteration 74700, loss = 0.145471\nI0820 12:21:03.331730 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 12:21:03.331745 22726 solver.cpp:244]     Train net output #1: loss = 0.14547 (* 1 = 0.14547 loss)\nI0820 12:21:03.414275 22726 sgd_solver.cpp:166] Iteration 74700, lr = 1.8675\nI0820 12:23:21.107062 22726 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0820 12:24:46.004844 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87628\nI0820 12:24:46.005136 22726 solver.cpp:404]     Test net output #1: loss = 0.423634 (* 1 = 0.423634 loss)\nI0820 12:24:47.335346 22726 solver.cpp:228] Iteration 74800, loss = 0.130791\nI0820 12:24:47.335398 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 12:24:47.335414 22726 solver.cpp:244]     Train net output #1: loss = 0.13079 (* 1 = 0.13079 loss)\nI0820 12:24:47.414327 22726 sgd_solver.cpp:166] Iteration 74800, lr = 1.87\nI0820 12:27:05.093946 22726 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0820 12:28:29.937723 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8782\nI0820 12:28:29.937991 22726 solver.cpp:404]     Test net output #1: loss = 0.402774 (* 1 = 0.402774 loss)\nI0820 12:28:31.269340 22726 solver.cpp:228] Iteration 74900, loss = 0.168822\nI0820 12:28:31.269393 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 12:28:31.269410 22726 solver.cpp:244]     Train net output #1: loss = 0.168821 (* 1 = 0.168821 loss)\nI0820 12:28:31.349129 22726 sgd_solver.cpp:166] Iteration 74900, lr = 1.8725\nI0820 12:30:49.075723 22726 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0820 12:32:14.190596 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86916\nI0820 12:32:14.190982 22726 solver.cpp:404]     Test net output #1: loss = 0.410561 (* 1 = 0.410561 loss)\nI0820 12:32:15.521625 22726 solver.cpp:228] Iteration 75000, loss = 0.0878397\nI0820 12:32:15.521670 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 12:32:15.521687 22726 solver.cpp:244]     Train net output #1: loss = 0.0878391 (* 1 = 0.0878391 loss)\nI0820 12:32:15.606149 22726 sgd_solver.cpp:166] Iteration 75000, lr = 1.875\nI0820 12:34:33.466787 22726 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0820 12:35:58.626025 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0820 12:35:58.626448 22726 solver.cpp:404]     Test net output #1: loss = 0.405628 (* 1 = 0.405628 loss)\nI0820 12:35:59.957455 22726 solver.cpp:228] Iteration 75100, loss = 0.200765\nI0820 12:35:59.957501 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 12:35:59.957525 22726 solver.cpp:244]     Train net output #1: loss = 0.200764 (* 1 = 0.200764 loss)\nI0820 12:36:00.036020 22726 sgd_solver.cpp:166] Iteration 75100, lr = 1.8775\nI0820 12:38:17.758461 22726 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0820 12:39:42.949434 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8856\nI0820 12:39:42.949826 22726 solver.cpp:404]     Test net output #1: loss = 0.392046 (* 1 = 0.392046 loss)\nI0820 12:39:44.281790 22726 solver.cpp:228] Iteration 75200, loss = 0.114813\nI0820 12:39:44.281838 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 12:39:44.281854 22726 solver.cpp:244]     Train net output #1: loss = 0.114812 (* 1 = 0.114812 loss)\nI0820 12:39:44.358238 22726 sgd_solver.cpp:166] Iteration 75200, lr = 1.88\nI0820 12:42:02.054738 22726 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0820 12:43:27.244580 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87908\nI0820 12:43:27.244972 22726 solver.cpp:404]     Test net output #1: loss = 0.38889 (* 1 = 0.38889 loss)\nI0820 12:43:28.574936 22726 solver.cpp:228] Iteration 75300, loss = 0.251905\nI0820 12:43:28.574978 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 12:43:28.574995 22726 solver.cpp:244]     Train net output #1: loss = 0.251904 (* 1 = 0.251904 loss)\nI0820 12:43:28.653579 22726 sgd_solver.cpp:166] Iteration 75300, lr = 1.8825\nI0820 12:45:46.331573 22726 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0820 12:47:11.514461 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88092\nI0820 12:47:11.514855 22726 solver.cpp:404]     Test net output #1: loss = 0.397534 (* 1 = 0.397534 loss)\nI0820 12:47:12.844463 22726 solver.cpp:228] Iteration 75400, loss = 0.164249\nI0820 12:47:12.844506 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 12:47:12.844521 22726 solver.cpp:244]     Train net output #1: loss = 0.164248 (* 1 = 0.164248 loss)\nI0820 12:47:12.922722 22726 sgd_solver.cpp:166] Iteration 75400, lr = 1.885\nI0820 12:49:30.600100 22726 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0820 12:50:55.769506 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87892\nI0820 12:50:55.769870 22726 solver.cpp:404]     Test net output #1: loss = 0.398464 (* 1 = 0.398464 loss)\nI0820 12:50:57.099788 22726 solver.cpp:228] Iteration 75500, loss = 0.13427\nI0820 12:50:57.099827 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:50:57.099841 22726 solver.cpp:244]     Train net output #1: loss = 0.13427 (* 1 = 0.13427 loss)\nI0820 12:50:57.182193 22726 sgd_solver.cpp:166] Iteration 75500, lr = 1.8875\nI0820 12:53:15.098526 22726 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0820 12:54:40.269979 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0820 12:54:40.270364 22726 solver.cpp:404]     Test net output #1: loss = 0.408873 (* 1 = 0.408873 loss)\nI0820 12:54:41.602969 22726 solver.cpp:228] Iteration 75600, loss = 0.149693\nI0820 12:54:41.603013 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 12:54:41.603035 22726 solver.cpp:244]     Train net output #1: loss = 0.149692 (* 1 = 0.149692 loss)\nI0820 12:54:41.680789 22726 sgd_solver.cpp:166] Iteration 75600, lr = 1.89\nI0820 12:56:59.682093 22726 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0820 12:58:24.850605 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87584\nI0820 12:58:24.851011 22726 solver.cpp:404]     Test net output #1: loss = 0.416317 (* 1 = 0.416317 loss)\nI0820 12:58:26.180848 22726 solver.cpp:228] Iteration 75700, loss = 0.176042\nI0820 12:58:26.180887 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 12:58:26.180902 22726 solver.cpp:244]     Train net output #1: loss = 0.176041 (* 1 = 0.176041 loss)\nI0820 12:58:26.262768 22726 sgd_solver.cpp:166] Iteration 75700, lr = 1.8925\nI0820 13:00:44.236052 22726 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0820 13:02:09.399945 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88\nI0820 13:02:09.400351 22726 solver.cpp:404]     Test net output #1: loss = 0.389832 (* 1 = 0.389832 loss)\nI0820 13:02:10.729926 22726 solver.cpp:228] Iteration 75800, loss = 0.215055\nI0820 13:02:10.729964 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 13:02:10.729979 22726 solver.cpp:244]     Train net output #1: loss = 0.215055 (* 1 = 0.215055 loss)\nI0820 13:02:10.814836 22726 sgd_solver.cpp:166] Iteration 75800, lr = 1.895\nI0820 13:04:28.762104 22726 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0820 13:05:53.905349 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87716\nI0820 13:05:53.905728 22726 solver.cpp:404]     Test net output #1: loss = 0.404673 (* 1 = 0.404673 loss)\nI0820 13:05:55.235736 22726 solver.cpp:228] Iteration 75900, loss = 0.19645\nI0820 13:05:55.235775 22726 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 13:05:55.235791 22726 solver.cpp:244]     Train net output #1: loss = 0.19645 (* 1 = 0.19645 loss)\nI0820 13:05:55.312050 22726 sgd_solver.cpp:166] Iteration 75900, lr = 1.8975\nI0820 13:08:13.238243 22726 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0820 13:09:38.400279 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0820 13:09:38.400687 22726 solver.cpp:404]     Test net output #1: loss = 0.38327 (* 1 = 0.38327 loss)\nI0820 13:09:39.730602 22726 solver.cpp:228] Iteration 76000, loss = 0.227653\nI0820 13:09:39.730641 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:09:39.730656 22726 solver.cpp:244]     Train net output #1: loss = 0.227652 (* 1 = 0.227652 loss)\nI0820 13:09:39.816097 22726 sgd_solver.cpp:166] Iteration 76000, lr = 1.9\nI0820 13:11:57.702373 22726 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0820 13:13:22.837357 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87888\nI0820 13:13:22.837759 22726 solver.cpp:404]     Test net output #1: loss = 0.396481 (* 1 = 0.396481 loss)\nI0820 13:13:24.167028 22726 solver.cpp:228] Iteration 76100, loss = 0.144588\nI0820 13:13:24.167068 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 13:13:24.167083 22726 solver.cpp:244]     Train net output #1: loss = 0.144588 (* 1 = 0.144588 loss)\nI0820 13:13:24.254005 22726 sgd_solver.cpp:166] Iteration 76100, lr = 1.9025\nI0820 13:15:42.205215 22726 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0820 13:17:07.350438 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88448\nI0820 13:17:07.350810 22726 solver.cpp:404]     Test net output #1: loss = 0.385264 (* 1 = 0.385264 loss)\nI0820 13:17:08.680835 22726 solver.cpp:228] Iteration 76200, loss = 0.0905902\nI0820 13:17:08.680877 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 13:17:08.680893 22726 solver.cpp:244]     Train net output #1: loss = 0.0905896 (* 1 = 0.0905896 loss)\nI0820 13:17:08.761554 22726 sgd_solver.cpp:166] Iteration 76200, lr = 1.905\nI0820 13:19:26.691099 22726 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0820 13:20:51.837505 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88608\nI0820 13:20:51.837882 22726 solver.cpp:404]     Test net output #1: loss = 0.37673 (* 1 = 0.37673 loss)\nI0820 13:20:53.167799 22726 solver.cpp:228] Iteration 76300, loss = 0.173885\nI0820 13:20:53.167840 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:20:53.167855 22726 solver.cpp:244]     Train net output #1: loss = 0.173885 (* 1 = 0.173885 loss)\nI0820 13:20:53.256880 22726 sgd_solver.cpp:166] Iteration 76300, lr = 1.9075\nI0820 13:23:11.362239 22726 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0820 13:24:36.511334 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87292\nI0820 13:24:36.511749 22726 solver.cpp:404]     Test net output #1: loss = 0.426561 (* 1 = 0.426561 loss)\nI0820 13:24:37.842514 22726 solver.cpp:228] Iteration 76400, loss = 0.203817\nI0820 13:24:37.842555 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 13:24:37.842569 22726 solver.cpp:244]     Train net output #1: loss = 0.203816 (* 1 = 0.203816 loss)\nI0820 13:24:37.924984 22726 sgd_solver.cpp:166] Iteration 76400, lr = 1.91\nI0820 13:26:55.935068 22726 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0820 13:28:20.846060 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87272\nI0820 13:28:20.846391 22726 solver.cpp:404]     Test net output #1: loss = 0.408585 (* 1 = 0.408585 loss)\nI0820 13:28:22.176949 22726 solver.cpp:228] Iteration 76500, loss = 0.144184\nI0820 13:28:22.176990 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 13:28:22.177011 22726 solver.cpp:244]     Train net output #1: loss = 0.144184 (* 1 = 0.144184 loss)\nI0820 13:28:22.257505 22726 sgd_solver.cpp:166] Iteration 76500, lr = 1.9125\nI0820 13:30:40.172514 22726 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0820 13:32:05.253355 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87536\nI0820 13:32:05.253713 22726 solver.cpp:404]     Test net output #1: loss = 0.409231 (* 1 = 0.409231 loss)\nI0820 13:32:06.583416 22726 solver.cpp:228] Iteration 76600, loss = 0.230276\nI0820 13:32:06.583458 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 13:32:06.583473 22726 solver.cpp:244]     Train net output #1: loss = 0.230276 (* 1 = 0.230276 loss)\nI0820 13:32:06.665987 22726 sgd_solver.cpp:166] Iteration 76600, lr = 1.915\nI0820 13:34:24.560045 22726 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0820 13:35:49.630429 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0820 13:35:49.630720 22726 solver.cpp:404]     Test net output #1: loss = 0.396744 (* 1 = 0.396744 loss)\nI0820 13:35:50.960469 22726 solver.cpp:228] Iteration 76700, loss = 0.189339\nI0820 13:35:50.960510 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 13:35:50.960526 22726 solver.cpp:244]     Train net output #1: loss = 0.189339 (* 1 = 0.189339 loss)\nI0820 13:35:51.045434 22726 sgd_solver.cpp:166] Iteration 76700, lr = 1.9175\nI0820 13:38:09.004689 22726 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0820 13:39:34.119557 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87276\nI0820 13:39:34.119894 22726 solver.cpp:404]     Test net output #1: loss = 0.431531 (* 1 = 0.431531 loss)\nI0820 13:39:35.449302 22726 solver.cpp:228] Iteration 76800, loss = 0.136495\nI0820 13:39:35.449345 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:39:35.449362 22726 solver.cpp:244]     Train net output #1: loss = 0.136494 (* 1 = 0.136494 loss)\nI0820 13:39:35.532024 22726 sgd_solver.cpp:166] Iteration 76800, lr = 1.92\nI0820 13:41:53.430372 22726 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0820 13:43:18.222316 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88464\nI0820 13:43:18.222642 22726 solver.cpp:404]     Test net output #1: loss = 0.380116 (* 1 = 0.380116 loss)\nI0820 13:43:19.552345 22726 solver.cpp:228] Iteration 76900, loss = 0.133879\nI0820 13:43:19.552388 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 13:43:19.552403 22726 solver.cpp:244]     Train net output #1: loss = 0.133878 (* 1 = 0.133878 loss)\nI0820 13:43:19.636019 22726 sgd_solver.cpp:166] Iteration 76900, lr = 1.9225\nI0820 13:45:37.563534 22726 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0820 13:47:02.413808 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87668\nI0820 13:47:02.414091 22726 solver.cpp:404]     Test net output #1: loss = 0.39802 (* 1 = 0.39802 loss)\nI0820 13:47:03.743608 22726 solver.cpp:228] Iteration 77000, loss = 0.245751\nI0820 13:47:03.743649 22726 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 13:47:03.743662 22726 solver.cpp:244]     Train net output #1: loss = 0.24575 (* 1 = 0.24575 loss)\nI0820 13:47:03.831125 22726 sgd_solver.cpp:166] Iteration 77000, lr = 1.925\nI0820 13:49:21.787550 22726 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0820 13:50:46.807502 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0820 13:50:46.807883 22726 solver.cpp:404]     Test net output #1: loss = 0.410063 (* 1 = 0.410063 loss)\nI0820 13:50:48.138108 22726 solver.cpp:228] Iteration 77100, loss = 0.153369\nI0820 13:50:48.138149 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:50:48.138164 22726 solver.cpp:244]     Train net output #1: loss = 0.153368 (* 1 = 0.153368 loss)\nI0820 13:50:48.219696 22726 sgd_solver.cpp:166] Iteration 77100, lr = 1.9275\nI0820 13:53:06.418627 22726 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0820 13:54:31.532109 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88556\nI0820 13:54:31.532407 22726 solver.cpp:404]     Test net output #1: loss = 0.394801 (* 1 = 0.394801 loss)\nI0820 13:54:32.862540 22726 solver.cpp:228] Iteration 77200, loss = 0.0961737\nI0820 13:54:32.862581 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 13:54:32.862596 22726 solver.cpp:244]     Train net output #1: loss = 0.096173 (* 1 = 0.096173 loss)\nI0820 13:54:32.944257 22726 sgd_solver.cpp:166] Iteration 77200, lr = 1.93\nI0820 13:56:50.903728 22726 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0820 13:58:15.670276 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87784\nI0820 13:58:15.670588 22726 solver.cpp:404]     Test net output #1: loss = 0.412729 (* 1 = 0.412729 loss)\nI0820 13:58:17.000195 22726 solver.cpp:228] Iteration 77300, loss = 0.135441\nI0820 13:58:17.000237 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:58:17.000253 22726 solver.cpp:244]     Train net output #1: loss = 0.135441 (* 1 = 0.135441 loss)\nI0820 13:58:17.079990 22726 sgd_solver.cpp:166] Iteration 77300, lr = 1.9325\nI0820 14:00:35.096531 22726 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0820 14:02:00.120770 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0820 14:02:00.121101 22726 solver.cpp:404]     Test net output #1: loss = 0.397102 (* 1 = 0.397102 loss)\nI0820 14:02:01.450671 22726 solver.cpp:228] Iteration 77400, loss = 0.192876\nI0820 14:02:01.450716 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:02:01.450731 22726 solver.cpp:244]     Train net output #1: loss = 0.192876 (* 1 = 0.192876 loss)\nI0820 14:02:01.539140 22726 sgd_solver.cpp:166] Iteration 77400, lr = 1.935\nI0820 14:04:19.499665 22726 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0820 14:05:44.366962 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88508\nI0820 14:05:44.367285 22726 solver.cpp:404]     Test net output #1: loss = 0.375663 (* 1 = 0.375663 loss)\nI0820 14:05:45.697445 22726 solver.cpp:228] Iteration 77500, loss = 0.0958344\nI0820 14:05:45.697486 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 14:05:45.697502 22726 solver.cpp:244]     Train net output #1: loss = 0.0958337 (* 1 = 0.0958337 loss)\nI0820 14:05:45.778276 22726 sgd_solver.cpp:166] Iteration 77500, lr = 1.9375\nI0820 14:08:03.847447 22726 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0820 14:09:28.955492 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88416\nI0820 14:09:28.955831 22726 solver.cpp:404]     Test net output #1: loss = 0.374539 (* 1 = 0.374539 loss)\nI0820 14:09:30.285182 22726 solver.cpp:228] Iteration 77600, loss = 0.104719\nI0820 14:09:30.285223 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 14:09:30.285238 22726 solver.cpp:244]     Train net output #1: loss = 0.104718 (* 1 = 0.104718 loss)\nI0820 14:09:30.365470 22726 sgd_solver.cpp:166] Iteration 77600, lr = 1.94\nI0820 14:11:48.365609 22726 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0820 14:13:13.407794 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8746\nI0820 14:13:13.408176 22726 solver.cpp:404]     Test net output #1: loss = 0.415667 (* 1 = 0.415667 loss)\nI0820 14:13:14.738032 22726 solver.cpp:228] Iteration 77700, loss = 0.146508\nI0820 14:13:14.738075 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 14:13:14.738090 22726 solver.cpp:244]     Train net output #1: loss = 0.146507 (* 1 = 0.146507 loss)\nI0820 14:13:14.818399 22726 sgd_solver.cpp:166] Iteration 77700, lr = 1.9425\nI0820 14:15:32.785533 22726 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0820 14:16:57.630792 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8794\nI0820 14:16:57.631098 22726 solver.cpp:404]     Test net output #1: loss = 0.40116 (* 1 = 0.40116 loss)\nI0820 14:16:58.960631 22726 solver.cpp:228] Iteration 77800, loss = 0.105144\nI0820 14:16:58.960674 22726 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 14:16:58.960688 22726 solver.cpp:244]     Train net output #1: loss = 0.105143 (* 1 = 0.105143 loss)\nI0820 14:16:59.041939 22726 sgd_solver.cpp:166] Iteration 77800, lr = 1.945\nI0820 14:19:16.984874 22726 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0820 14:20:41.822753 22726 solver.cpp:404]     Test net output #0: accuracy = 0.86888\nI0820 14:20:41.823083 22726 solver.cpp:404]     Test net output #1: loss = 0.438538 (* 1 = 0.438538 loss)\nI0820 14:20:43.152793 22726 solver.cpp:228] Iteration 77900, loss = 0.163665\nI0820 14:20:43.152835 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 14:20:43.152850 22726 solver.cpp:244]     Train net output #1: loss = 0.163664 (* 1 = 0.163664 loss)\nI0820 14:20:43.238137 22726 sgd_solver.cpp:166] Iteration 77900, lr = 1.9475\nI0820 14:23:01.229079 22726 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0820 14:24:26.024283 22726 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0820 14:24:26.024552 22726 solver.cpp:404]     Test net output #1: loss = 0.410094 (* 1 = 0.410094 loss)\nI0820 14:24:27.353356 22726 solver.cpp:228] Iteration 78000, loss = 0.221017\nI0820 14:24:27.353399 22726 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 14:24:27.353415 22726 solver.cpp:244]     Train net output #1: loss = 0.221016 (* 1 = 0.221016 loss)\nI0820 14:24:27.432350 22726 sgd_solver.cpp:166] Iteration 78000, lr = 1.95\nI0820 14:26:45.271493 22726 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0820 14:28:10.316627 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88492\nI0820 14:28:10.317055 22726 solver.cpp:404]     Test net output #1: loss = 0.369684 (* 1 = 0.369684 loss)\nI0820 14:28:11.648133 22726 solver.cpp:228] Iteration 78100, loss = 0.183313\nI0820 14:28:11.648175 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 14:28:11.648190 22726 solver.cpp:244]     Train net output #1: loss = 0.183313 (* 1 = 0.183313 loss)\nI0820 14:28:11.733726 22726 sgd_solver.cpp:166] Iteration 78100, lr = 1.9525\nI0820 14:30:29.611793 22726 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0820 14:31:54.591418 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88228\nI0820 14:31:54.591733 22726 solver.cpp:404]     Test net output #1: loss = 0.392198 (* 1 = 0.392198 loss)\nI0820 14:31:55.922189 22726 solver.cpp:228] Iteration 78200, loss = 0.144744\nI0820 14:31:55.922232 22726 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 14:31:55.922247 22726 solver.cpp:244]     Train net output #1: loss = 0.144743 (* 1 = 0.144743 loss)\nI0820 14:31:56.006717 22726 sgd_solver.cpp:166] Iteration 78200, lr = 1.955\nI0820 14:34:13.982762 22726 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0820 14:35:39.074765 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87696\nI0820 14:35:39.075067 22726 solver.cpp:404]     Test net output #1: loss = 0.413357 (* 1 = 0.413357 loss)\nI0820 14:35:40.404901 22726 solver.cpp:228] Iteration 78300, loss = 0.206835\nI0820 14:35:40.404948 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 14:35:40.404965 22726 solver.cpp:244]     Train net output #1: loss = 0.206835 (* 1 = 0.206835 loss)\nI0820 14:35:40.487354 22726 sgd_solver.cpp:166] Iteration 78300, lr = 1.9575\nI0820 14:37:58.528717 22726 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0820 14:39:23.593358 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0820 14:39:23.593660 22726 solver.cpp:404]     Test net output #1: loss = 0.388896 (* 1 = 0.388896 loss)\nI0820 14:39:24.923424 22726 solver.cpp:228] Iteration 78400, loss = 0.114951\nI0820 14:39:24.923467 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 14:39:24.923483 22726 solver.cpp:244]     Train net output #1: loss = 0.11495 (* 1 = 0.11495 loss)\nI0820 14:39:25.010965 22726 sgd_solver.cpp:166] Iteration 78400, lr = 1.96\nI0820 14:41:42.990394 22726 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0820 14:43:08.141666 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87592\nI0820 14:43:08.142048 22726 solver.cpp:404]     Test net output #1: loss = 0.407938 (* 1 = 0.407938 loss)\nI0820 14:43:09.472627 22726 solver.cpp:228] Iteration 78500, loss = 0.205523\nI0820 14:43:09.472674 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 14:43:09.472697 22726 solver.cpp:244]     Train net output #1: loss = 0.205523 (* 1 = 0.205523 loss)\nI0820 14:43:09.557405 22726 sgd_solver.cpp:166] Iteration 78500, lr = 1.9625\nI0820 14:45:27.553800 22726 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0820 14:46:52.682548 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87688\nI0820 14:46:52.682888 22726 solver.cpp:404]     Test net output #1: loss = 0.391885 (* 1 = 0.391885 loss)\nI0820 14:46:54.015544 22726 solver.cpp:228] Iteration 78600, loss = 0.236874\nI0820 14:46:54.015592 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:46:54.015615 22726 solver.cpp:244]     Train net output #1: loss = 0.236873 (* 1 = 0.236873 loss)\nI0820 14:46:54.096665 22726 sgd_solver.cpp:166] Iteration 78600, lr = 1.965\nI0820 14:49:12.108933 22726 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0820 14:50:37.069764 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87992\nI0820 14:50:37.070111 22726 solver.cpp:404]     Test net output #1: loss = 0.394966 (* 1 = 0.394966 loss)\nI0820 14:50:38.404484 22726 solver.cpp:228] Iteration 78700, loss = 0.164385\nI0820 14:50:38.404531 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 14:50:38.404552 22726 solver.cpp:244]     Train net output #1: loss = 0.164384 (* 1 = 0.164384 loss)\nI0820 14:50:38.488199 22726 sgd_solver.cpp:166] Iteration 78700, lr = 1.9675\nI0820 14:52:56.671677 22726 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0820 14:54:21.709843 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87624\nI0820 14:54:21.710211 22726 solver.cpp:404]     Test net output #1: loss = 0.411141 (* 1 = 0.411141 loss)\nI0820 14:54:23.043853 22726 solver.cpp:228] Iteration 78800, loss = 0.242659\nI0820 14:54:23.043895 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:54:23.043910 22726 solver.cpp:244]     Train net output #1: loss = 0.242659 (* 1 = 0.242659 loss)\nI0820 14:54:23.123684 22726 sgd_solver.cpp:166] Iteration 78800, lr = 1.97\nI0820 14:56:41.109457 22726 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0820 14:58:05.920125 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0820 14:58:05.920460 22726 solver.cpp:404]     Test net output #1: loss = 0.373551 (* 1 = 0.373551 loss)\nI0820 14:58:07.254307 22726 solver.cpp:228] Iteration 78900, loss = 0.102107\nI0820 14:58:07.254349 22726 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 14:58:07.254365 22726 solver.cpp:244]     Train net output #1: loss = 0.102107 (* 1 = 0.102107 loss)\nI0820 14:58:07.336570 22726 sgd_solver.cpp:166] Iteration 78900, lr = 1.9725\nI0820 15:00:25.357919 22726 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0820 15:01:50.287083 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88412\nI0820 15:01:50.287427 22726 solver.cpp:404]     Test net output #1: loss = 0.379112 (* 1 = 0.379112 loss)\nI0820 15:01:51.620398 22726 solver.cpp:228] Iteration 79000, loss = 0.172804\nI0820 15:01:51.620441 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 15:01:51.620457 22726 solver.cpp:244]     Train net output #1: loss = 0.172804 (* 1 = 0.172804 loss)\nI0820 15:01:51.699148 22726 sgd_solver.cpp:166] Iteration 79000, lr = 1.975\nI0820 15:04:09.659492 22726 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0820 15:05:34.476621 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87652\nI0820 15:05:34.476960 22726 solver.cpp:404]     Test net output #1: loss = 0.402809 (* 1 = 0.402809 loss)\nI0820 15:05:35.810365 22726 solver.cpp:228] Iteration 79100, loss = 0.203202\nI0820 15:05:35.810408 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 15:05:35.810425 22726 solver.cpp:244]     Train net output #1: loss = 0.203202 (* 1 = 0.203202 loss)\nI0820 15:05:35.893276 22726 sgd_solver.cpp:166] Iteration 79100, lr = 1.9775\nI0820 15:07:53.833035 22726 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0820 15:09:18.758970 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87944\nI0820 15:09:18.759366 22726 solver.cpp:404]     Test net output #1: loss = 0.385764 (* 1 = 0.385764 loss)\nI0820 15:09:20.093422 22726 solver.cpp:228] Iteration 79200, loss = 0.202657\nI0820 15:09:20.093463 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 15:09:20.093479 22726 solver.cpp:244]     Train net output #1: loss = 0.202657 (* 1 = 0.202657 loss)\nI0820 15:09:20.174994 22726 sgd_solver.cpp:166] Iteration 79200, lr = 1.98\nI0820 15:11:38.123219 22726 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0820 15:13:03.287431 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0820 15:13:03.287798 22726 solver.cpp:404]     Test net output #1: loss = 0.396796 (* 1 = 0.396796 loss)\nI0820 15:13:04.620286 22726 solver.cpp:228] Iteration 79300, loss = 0.151116\nI0820 15:13:04.620326 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 15:13:04.620342 22726 solver.cpp:244]     Train net output #1: loss = 0.151116 (* 1 = 0.151116 loss)\nI0820 15:13:04.698114 22726 sgd_solver.cpp:166] Iteration 79300, lr = 1.9825\nI0820 15:15:22.628248 22726 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0820 15:16:47.792357 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87148\nI0820 15:16:47.792734 22726 solver.cpp:404]     Test net output #1: loss = 0.444745 (* 1 = 0.444745 loss)\nI0820 15:16:49.126015 22726 solver.cpp:228] Iteration 79400, loss = 0.175507\nI0820 15:16:49.126052 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 15:16:49.126068 22726 solver.cpp:244]     Train net output #1: loss = 0.175507 (* 1 = 0.175507 loss)\nI0820 15:16:49.207145 22726 sgd_solver.cpp:166] Iteration 79400, lr = 1.985\nI0820 15:19:07.168638 22726 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0820 15:20:32.325142 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87116\nI0820 15:20:32.325515 22726 solver.cpp:404]     Test net output #1: loss = 0.417953 (* 1 = 0.417953 loss)\nI0820 15:20:33.659170 22726 solver.cpp:228] Iteration 79500, loss = 0.270332\nI0820 15:20:33.659212 22726 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 15:20:33.659229 22726 solver.cpp:244]     Train net output #1: loss = 0.270332 (* 1 = 0.270332 loss)\nI0820 15:20:33.733561 22726 sgd_solver.cpp:166] Iteration 79500, lr = 1.9875\nI0820 15:22:51.736801 22726 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0820 15:24:16.900254 22726 solver.cpp:404]     Test net output #0: accuracy = 0.881201\nI0820 15:24:16.900660 22726 solver.cpp:404]     Test net output #1: loss = 0.397514 (* 1 = 0.397514 loss)\nI0820 15:24:18.234474 22726 solver.cpp:228] Iteration 79600, loss = 0.142845\nI0820 15:24:18.234518 22726 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 15:24:18.234534 22726 solver.cpp:244]     Train net output #1: loss = 0.142845 (* 1 = 0.142845 loss)\nI0820 15:24:18.310619 22726 sgd_solver.cpp:166] Iteration 79600, lr = 1.99\nI0820 15:26:36.373544 22726 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0820 15:28:01.548301 22726 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0820 15:28:01.548703 22726 solver.cpp:404]     Test net output #1: loss = 0.394269 (* 1 = 0.394269 loss)\nI0820 15:28:02.881055 22726 solver.cpp:228] Iteration 79700, loss = 0.164867\nI0820 15:28:02.881094 22726 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 15:28:02.881110 22726 solver.cpp:244]     Train net output #1: loss = 0.164867 (* 1 = 0.164867 loss)\nI0820 15:28:02.964377 22726 sgd_solver.cpp:166] Iteration 79700, lr = 1.9925\nI0820 15:30:20.995602 22726 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0820 15:31:46.186054 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88016\nI0820 15:31:46.186437 22726 solver.cpp:404]     Test net output #1: loss = 0.397034 (* 1 = 0.397034 loss)\nI0820 15:31:47.520376 22726 solver.cpp:228] Iteration 79800, loss = 0.218511\nI0820 15:31:47.520417 22726 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 15:31:47.520432 22726 solver.cpp:244]     Train net output #1: loss = 0.218511 (* 1 = 0.218511 loss)\nI0820 15:31:47.605747 22726 sgd_solver.cpp:166] Iteration 79800, lr = 1.995\nI0820 15:34:05.613067 22726 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0820 15:35:30.785610 22726 solver.cpp:404]     Test net output #0: accuracy = 0.88268\nI0820 15:35:30.786031 22726 solver.cpp:404]     Test net output #1: loss = 0.373418 (* 1 = 0.373418 loss)\nI0820 15:35:32.120077 22726 solver.cpp:228] Iteration 79900, loss = 0.182946\nI0820 15:35:32.120120 22726 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 15:35:32.120136 22726 solver.cpp:244]     Train net output #1: loss = 0.182946 (* 1 = 0.182946 loss)\nI0820 15:35:32.199110 22726 sgd_solver.cpp:166] Iteration 79900, lr = 1.9975\nI0820 15:37:50.248652 22726 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range2SS80kRes56LR_iter_80000.caffemodel\nI0820 15:37:50.698093 22726 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/range2SS80kRes56LR_iter_80000.solverstate\nI0820 15:37:51.153499 22726 solver.cpp:317] Iteration 80000, loss = 0.150606\nI0820 15:37:51.153550 22726 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0820 15:39:16.310055 22726 solver.cpp:404]     Test net output #0: accuracy = 0.877321\nI0820 15:39:16.310467 22726 solver.cpp:404]     Test net output #1: loss = 0.406934 (* 1 = 0.406934 loss)\nI0820 15:39:16.310478 22726 solver.cpp:322] Optimization Done.\nI0820 15:39:21.727124 22726 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range2SS80kRes56wd0",
    "content": "I0818 13:44:11.025399 20842 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0818 13:44:11.027765 20842 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0818 13:44:11.029311 20842 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0818 13:44:11.030531 20842 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0818 13:44:11.032047 20842 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0818 13:44:11.033272 20842 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0818 13:44:11.034509 20842 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0818 13:44:11.035733 20842 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0818 13:44:11.036958 20842 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0818 13:44:11.446710 20842 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 80000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range2SS80kRes56wd0\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 2\nI0818 13:44:11.451990 20842 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0818 13:44:11.466930 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:11.467011 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:11.468145 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0818 13:44:11.468204 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0818 13:44:11.468230 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0818 13:44:11.468251 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0818 13:44:11.468271 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0818 13:44:11.468288 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0818 13:44:11.468307 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0818 13:44:11.468325 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0818 13:44:11.468345 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0818 13:44:11.468364 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0818 13:44:11.468384 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0818 13:44:11.468400 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0818 13:44:11.468417 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0818 13:44:11.468436 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0818 13:44:11.468456 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0818 13:44:11.468472 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0818 13:44:11.468490 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0818 13:44:11.468508 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0818 13:44:11.468528 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0818 13:44:11.468544 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0818 13:44:11.468578 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0818 13:44:11.468596 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0818 13:44:11.468621 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0818 13:44:11.468641 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0818 13:44:11.468658 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0818 13:44:11.468683 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0818 13:44:11.468704 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0818 13:44:11.468721 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0818 13:44:11.468739 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0818 13:44:11.468756 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0818 13:44:11.468776 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0818 13:44:11.468794 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0818 13:44:11.468814 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0818 13:44:11.468829 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0818 13:44:11.468848 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0818 13:44:11.468866 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0818 13:44:11.468884 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0818 13:44:11.468902 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0818 13:44:11.468919 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0818 13:44:11.468937 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0818 13:44:11.468961 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0818 13:44:11.468978 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0818 13:44:11.468994 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0818 13:44:11.469013 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0818 13:44:11.469033 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0818 13:44:11.469050 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0818 13:44:11.469069 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0818 13:44:11.469084 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0818 13:44:11.469103 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0818 13:44:11.469120 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0818 13:44:11.469137 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0818 13:44:11.469166 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0818 13:44:11.469187 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0818 13:44:11.469207 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0818 13:44:11.469225 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0818 13:44:11.469240 20842 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0818 13:44:11.471002 20842 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0818 13:44:11.473104 20842 layer_factory.hpp:77] Creating layer dataLayer\nI0818 13:44:11.478813 20842 net.cpp:100] Creating Layer dataLayer\nI0818 13:44:11.478893 20842 net.cpp:408] dataLayer -> data_top\nI0818 13:44:11.479126 20842 net.cpp:408] dataLayer -> label\nI0818 13:44:11.479256 20842 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 13:44:11.859120 20853 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0818 13:44:11.997714 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:12.004843 20842 net.cpp:150] Setting up dataLayer\nI0818 13:44:12.004930 20842 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0818 13:44:12.004951 20842 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:12.004961 20842 net.cpp:165] Memory required for data: 1536500\nI0818 13:44:12.004987 20842 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 13:44:12.005013 20842 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 13:44:12.005029 20842 net.cpp:434] label_dataLayer_1_split <- label\nI0818 13:44:12.005061 20842 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 13:44:12.005091 20842 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 13:44:12.005199 20842 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 13:44:12.005218 20842 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:12.005230 20842 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:12.005240 20842 net.cpp:165] Memory required for data: 1537500\nI0818 13:44:12.005250 20842 layer_factory.hpp:77] Creating layer pre_conv\nI0818 13:44:12.005336 20842 net.cpp:100] Creating Layer pre_conv\nI0818 13:44:12.005352 20842 net.cpp:434] pre_conv <- data_top\nI0818 13:44:12.005373 20842 net.cpp:408] pre_conv -> pre_conv_top\nI0818 13:44:12.007189 20842 net.cpp:150] Setting up pre_conv\nI0818 13:44:12.007212 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.007223 20842 net.cpp:165] Memory required for data: 9729500\nI0818 13:44:12.007319 20842 layer_factory.hpp:77] Creating layer pre_bn\nI0818 13:44:12.007436 20842 net.cpp:100] Creating Layer pre_bn\nI0818 13:44:12.007452 20842 net.cpp:434] pre_bn <- pre_conv_top\nI0818 13:44:12.007468 20842 net.cpp:408] pre_bn -> pre_bn_top\nI0818 13:44:12.007570 20854 blocking_queue.cpp:50] Waiting for data\nI0818 13:44:12.007849 20842 net.cpp:150] Setting up pre_bn\nI0818 13:44:12.007876 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.007887 20842 net.cpp:165] Memory required for data: 17921500\nI0818 13:44:12.007917 20842 layer_factory.hpp:77] Creating layer pre_scale\nI0818 13:44:12.007984 20842 net.cpp:100] Creating Layer pre_scale\nI0818 13:44:12.007999 20842 net.cpp:434] pre_scale <- pre_bn_top\nI0818 13:44:12.008014 20842 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0818 13:44:12.008234 20842 layer_factory.hpp:77] Creating layer pre_scale\nI0818 13:44:12.020615 20842 net.cpp:150] Setting up pre_scale\nI0818 13:44:12.020642 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.020653 20842 net.cpp:165] Memory required for data: 26113500\nI0818 13:44:12.020681 20842 layer_factory.hpp:77] Creating layer pre_relu\nI0818 13:44:12.020753 20842 net.cpp:100] Creating Layer pre_relu\nI0818 13:44:12.020769 20842 net.cpp:434] pre_relu <- pre_bn_top\nI0818 13:44:12.020789 20842 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0818 13:44:12.020810 20842 net.cpp:150] Setting up pre_relu\nI0818 13:44:12.020828 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.020836 20842 net.cpp:165] Memory required for data: 34305500\nI0818 13:44:12.020848 20842 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0818 13:44:12.020862 20842 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0818 13:44:12.020872 20842 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0818 13:44:12.020892 20842 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0818 13:44:12.020913 20842 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0818 13:44:12.020989 20842 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0818 13:44:12.021010 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.021024 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.021034 20842 net.cpp:165] Memory required for data: 50689500\nI0818 13:44:12.021044 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0818 13:44:12.021070 20842 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0818 13:44:12.021081 20842 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0818 13:44:12.021100 20842 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0818 13:44:12.021461 20842 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0818 13:44:12.021482 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.021492 20842 net.cpp:165] Memory required for data: 58881500\nI0818 13:44:12.021524 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0818 13:44:12.021550 20842 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0818 13:44:12.021564 20842 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0818 13:44:12.021580 20842 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0818 13:44:12.021859 20842 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0818 13:44:12.021878 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.021888 20842 net.cpp:165] Memory required for data: 67073500\nI0818 13:44:12.021910 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 13:44:12.021929 20842 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0818 13:44:12.021939 20842 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0818 13:44:12.021960 20842 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.022042 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 13:44:12.022215 20842 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0818 13:44:12.022234 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.022244 20842 net.cpp:165] Memory required for data: 75265500\nI0818 13:44:12.022264 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0818 13:44:12.022289 20842 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0818 13:44:12.022300 20842 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0818 13:44:12.022320 20842 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.022341 20842 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0818 13:44:12.022356 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.022366 20842 net.cpp:165] Memory required for data: 83457500\nI0818 13:44:12.022375 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0818 13:44:12.022402 20842 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0818 13:44:12.022413 20842 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0818 13:44:12.022430 20842 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0818 13:44:12.022791 20842 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0818 13:44:12.022811 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.022821 20842 net.cpp:165] Memory required for data: 91649500\nI0818 13:44:12.022840 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0818 13:44:12.022856 20842 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0818 13:44:12.022872 20842 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0818 13:44:12.022889 20842 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0818 13:44:12.023155 20842 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0818 13:44:12.023175 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.023185 20842 net.cpp:165] Memory required for data: 99841500\nI0818 13:44:12.023211 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 13:44:12.023233 20842 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0818 13:44:12.023246 20842 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0818 13:44:12.023262 20842 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0818 13:44:12.023358 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 13:44:12.023538 20842 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0818 13:44:12.023557 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.023567 20842 net.cpp:165] Memory required for data: 108033500\nI0818 13:44:12.023586 20842 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0818 13:44:12.023659 20842 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0818 13:44:12.023681 20842 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0818 13:44:12.023695 20842 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0818 13:44:12.023717 20842 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0818 13:44:12.023828 20842 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0818 13:44:12.023849 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.023859 20842 net.cpp:165] Memory required for data: 116225500\nI0818 13:44:12.023869 20842 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0818 13:44:12.023883 20842 net.cpp:100] Creating Layer L1_b1_relu\nI0818 13:44:12.023895 20842 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0818 13:44:12.023910 20842 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0818 13:44:12.023932 20842 net.cpp:150] Setting up L1_b1_relu\nI0818 13:44:12.023948 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.023958 20842 net.cpp:165] Memory required for data: 124417500\nI0818 13:44:12.023970 20842 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 13:44:12.023986 20842 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 13:44:12.023998 20842 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0818 13:44:12.024013 20842 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 13:44:12.024031 20842 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 13:44:12.024109 20842 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 13:44:12.024129 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.024142 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.024160 20842 net.cpp:165] Memory required for data: 140801500\nI0818 13:44:12.024173 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0818 13:44:12.024191 20842 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0818 13:44:12.024204 20842 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 13:44:12.024226 20842 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0818 13:44:12.024580 20842 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0818 13:44:12.024667 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.024751 20842 net.cpp:165] Memory required for data: 148993500\nI0818 13:44:12.024821 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0818 13:44:12.024839 20842 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0818 13:44:12.024852 20842 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0818 13:44:12.024875 20842 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0818 13:44:12.025156 20842 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0818 13:44:12.025179 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.025190 20842 net.cpp:165] Memory required for data: 157185500\nI0818 13:44:12.025213 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 13:44:12.025229 20842 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0818 13:44:12.025240 20842 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0818 13:44:12.025256 20842 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.025338 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 13:44:12.025513 20842 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0818 13:44:12.025532 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.025542 20842 net.cpp:165] Memory required for data: 165377500\nI0818 13:44:12.025560 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0818 13:44:12.025580 20842 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0818 13:44:12.025593 20842 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0818 13:44:12.025606 20842 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.025625 20842 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0818 13:44:12.025640 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.025650 20842 net.cpp:165] Memory required for data: 173569500\nI0818 13:44:12.025660 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0818 13:44:12.025693 20842 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0818 13:44:12.025707 20842 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0818 13:44:12.025728 20842 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0818 13:44:12.026070 20842 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0818 13:44:12.026089 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.026099 20842 net.cpp:165] Memory required for data: 181761500\nI0818 13:44:12.026118 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0818 13:44:12.026134 20842 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0818 13:44:12.026145 20842 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0818 13:44:12.026170 20842 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0818 13:44:12.026439 20842 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0818 13:44:12.026458 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.026468 20842 net.cpp:165] Memory required for data: 189953500\nI0818 13:44:12.026501 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 13:44:12.026520 20842 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0818 13:44:12.026531 20842 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0818 13:44:12.026547 20842 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0818 13:44:12.026641 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 13:44:12.026819 20842 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0818 13:44:12.026839 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.026849 20842 net.cpp:165] Memory required for data: 198145500\nI0818 13:44:12.026866 20842 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0818 13:44:12.026897 20842 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0818 13:44:12.026909 20842 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0818 13:44:12.026924 20842 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 13:44:12.026942 20842 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0818 13:44:12.026999 20842 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0818 13:44:12.027021 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.027030 20842 net.cpp:165] Memory required for data: 206337500\nI0818 13:44:12.027041 20842 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0818 13:44:12.027055 20842 net.cpp:100] Creating Layer L1_b2_relu\nI0818 13:44:12.027066 20842 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0818 13:44:12.027079 20842 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0818 13:44:12.027098 20842 net.cpp:150] Setting up L1_b2_relu\nI0818 13:44:12.027112 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.027122 20842 net.cpp:165] Memory required for data: 214529500\nI0818 13:44:12.027132 20842 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 13:44:12.027146 20842 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 13:44:12.027156 20842 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0818 13:44:12.027176 20842 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 13:44:12.027196 20842 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 13:44:12.027269 20842 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 13:44:12.027290 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.027304 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.027314 20842 net.cpp:165] Memory required for data: 230913500\nI0818 13:44:12.027325 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0818 13:44:12.027349 20842 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0818 13:44:12.027364 20842 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 13:44:12.027380 20842 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0818 13:44:12.027742 20842 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0818 13:44:12.027762 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.027771 20842 net.cpp:165] Memory required for data: 239105500\nI0818 13:44:12.027789 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0818 13:44:12.027812 20842 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0818 13:44:12.027824 20842 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0818 13:44:12.027845 20842 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0818 13:44:12.028110 20842 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0818 13:44:12.028128 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.028138 20842 net.cpp:165] Memory required for data: 247297500\nI0818 13:44:12.028159 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 13:44:12.028175 20842 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0818 13:44:12.028187 20842 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0818 13:44:12.028201 20842 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.028288 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 13:44:12.028463 20842 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0818 13:44:12.028482 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.028492 20842 net.cpp:165] Memory required for data: 255489500\nI0818 13:44:12.028509 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0818 13:44:12.028530 20842 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0818 13:44:12.028542 20842 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0818 13:44:12.028556 20842 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.028575 20842 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0818 13:44:12.028596 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.028607 20842 net.cpp:165] Memory required for data: 263681500\nI0818 13:44:12.028617 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0818 13:44:12.028642 20842 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0818 13:44:12.028656 20842 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0818 13:44:12.028686 20842 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0818 13:44:12.029033 20842 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0818 13:44:12.029053 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.029062 20842 net.cpp:165] Memory required for data: 271873500\nI0818 13:44:12.029080 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0818 13:44:12.029108 20842 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0818 13:44:12.029124 20842 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0818 13:44:12.029141 20842 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0818 13:44:12.029420 20842 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0818 13:44:12.029439 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.029449 20842 net.cpp:165] Memory required for data: 280065500\nI0818 13:44:12.029470 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 13:44:12.029487 20842 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0818 13:44:12.029498 20842 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0818 13:44:12.029518 20842 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0818 13:44:12.029603 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 13:44:12.029779 20842 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0818 13:44:12.029803 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.029812 20842 net.cpp:165] Memory required for data: 288257500\nI0818 13:44:12.029830 20842 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0818 13:44:12.029847 20842 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0818 13:44:12.029860 20842 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0818 13:44:12.029872 20842 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 13:44:12.029888 20842 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0818 13:44:12.029943 20842 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0818 13:44:12.029961 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.029971 20842 net.cpp:165] Memory required for data: 296449500\nI0818 13:44:12.029981 20842 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0818 13:44:12.029995 20842 net.cpp:100] Creating Layer L1_b3_relu\nI0818 13:44:12.030012 20842 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0818 13:44:12.030027 20842 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0818 13:44:12.030045 20842 net.cpp:150] Setting up L1_b3_relu\nI0818 13:44:12.030059 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.030069 20842 net.cpp:165] Memory required for data: 304641500\nI0818 13:44:12.030079 20842 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 13:44:12.030093 20842 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 13:44:12.030103 20842 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0818 13:44:12.030123 20842 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 13:44:12.030144 20842 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 13:44:12.030218 20842 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 13:44:12.030239 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.030253 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.030262 20842 net.cpp:165] Memory required for data: 321025500\nI0818 13:44:12.030273 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0818 13:44:12.030297 20842 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0818 13:44:12.030309 20842 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 13:44:12.030336 20842 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0818 13:44:12.030721 20842 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0818 13:44:12.030742 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.030752 20842 net.cpp:165] Memory required for data: 329217500\nI0818 13:44:12.030771 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0818 13:44:12.030792 20842 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0818 13:44:12.030805 20842 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0818 13:44:12.030822 20842 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0818 13:44:12.031095 20842 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0818 13:44:12.031114 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.031123 20842 net.cpp:165] Memory required for data: 337409500\nI0818 13:44:12.031146 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 13:44:12.031162 20842 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0818 13:44:12.031173 20842 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0818 13:44:12.031193 20842 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.031277 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 13:44:12.031462 20842 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0818 13:44:12.031484 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.031496 20842 net.cpp:165] Memory required for data: 345601500\nI0818 13:44:12.031513 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0818 13:44:12.031528 20842 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0818 13:44:12.031540 20842 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0818 13:44:12.031554 20842 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.031574 20842 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0818 13:44:12.031587 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.031597 20842 net.cpp:165] Memory required for data: 353793500\nI0818 13:44:12.031607 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0818 13:44:12.031632 20842 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0818 13:44:12.031646 20842 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0818 13:44:12.031666 20842 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0818 13:44:12.032028 20842 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0818 13:44:12.032048 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.032058 20842 net.cpp:165] Memory required for data: 361985500\nI0818 13:44:12.032075 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0818 13:44:12.032099 20842 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0818 13:44:12.032110 20842 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0818 13:44:12.032131 20842 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0818 13:44:12.032400 20842 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0818 13:44:12.032420 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.032430 20842 net.cpp:165] Memory required for data: 370177500\nI0818 13:44:12.032451 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 13:44:12.032469 20842 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0818 13:44:12.032480 20842 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0818 13:44:12.032498 20842 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0818 13:44:12.032586 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 13:44:12.032768 20842 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0818 13:44:12.032786 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.032795 20842 net.cpp:165] Memory required for data: 378369500\nI0818 13:44:12.032814 20842 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0818 13:44:12.032831 20842 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0818 13:44:12.032842 20842 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0818 13:44:12.032856 20842 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 13:44:12.032876 20842 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0818 13:44:12.032943 20842 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0818 13:44:12.032965 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.032977 20842 net.cpp:165] Memory required for data: 386561500\nI0818 13:44:12.032989 20842 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0818 13:44:12.033001 20842 net.cpp:100] Creating Layer L1_b4_relu\nI0818 13:44:12.033013 20842 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0818 13:44:12.033027 20842 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0818 13:44:12.033046 20842 net.cpp:150] Setting up L1_b4_relu\nI0818 13:44:12.033059 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.033069 20842 net.cpp:165] Memory required for data: 394753500\nI0818 13:44:12.033079 20842 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 13:44:12.033100 20842 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 13:44:12.033113 20842 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0818 13:44:12.033128 20842 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 13:44:12.033146 20842 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 13:44:12.033217 20842 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 13:44:12.033241 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.033254 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.033264 20842 net.cpp:165] Memory required for data: 411137500\nI0818 13:44:12.033274 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0818 13:44:12.033294 20842 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0818 13:44:12.033306 20842 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 13:44:12.033324 20842 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0818 13:44:12.033690 20842 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0818 13:44:12.033710 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.033718 20842 net.cpp:165] Memory required for data: 419329500\nI0818 13:44:12.033756 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0818 13:44:12.033779 20842 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0818 13:44:12.033792 20842 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0818 13:44:12.033808 20842 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0818 13:44:12.034090 20842 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0818 13:44:12.034108 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.034117 20842 net.cpp:165] Memory required for data: 427521500\nI0818 13:44:12.034139 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 13:44:12.034157 20842 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0818 13:44:12.034168 20842 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0818 13:44:12.034188 20842 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.034271 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 13:44:12.034445 20842 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0818 13:44:12.034463 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.034472 20842 net.cpp:165] Memory required for data: 435713500\nI0818 13:44:12.034492 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0818 13:44:12.034508 20842 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0818 13:44:12.034519 20842 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0818 13:44:12.034533 20842 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.034553 20842 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0818 13:44:12.034566 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.034575 20842 net.cpp:165] Memory required for data: 443905500\nI0818 13:44:12.034586 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0818 13:44:12.034610 20842 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0818 13:44:12.034623 20842 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0818 13:44:12.034653 20842 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0818 13:44:12.035022 20842 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0818 13:44:12.035043 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.035053 20842 net.cpp:165] Memory required for data: 452097500\nI0818 13:44:12.035070 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0818 13:44:12.035099 20842 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0818 13:44:12.035112 20842 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0818 13:44:12.035133 20842 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0818 13:44:12.035410 20842 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0818 13:44:12.035429 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.035439 20842 net.cpp:165] Memory required for data: 460289500\nI0818 13:44:12.035459 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 13:44:12.035475 20842 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0818 13:44:12.035487 20842 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0818 13:44:12.035506 20842 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0818 13:44:12.035593 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 13:44:12.035770 20842 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0818 13:44:12.035789 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.035799 20842 net.cpp:165] Memory required for data: 468481500\nI0818 13:44:12.035818 20842 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0818 13:44:12.035842 20842 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0818 13:44:12.035854 20842 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0818 13:44:12.035868 20842 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 13:44:12.035884 20842 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0818 13:44:12.035941 20842 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0818 13:44:12.035959 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.035969 20842 net.cpp:165] Memory required for data: 476673500\nI0818 13:44:12.035979 20842 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0818 13:44:12.035992 20842 net.cpp:100] Creating Layer L1_b5_relu\nI0818 13:44:12.036005 20842 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0818 13:44:12.036023 20842 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0818 13:44:12.036042 20842 net.cpp:150] Setting up L1_b5_relu\nI0818 13:44:12.036056 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.036065 20842 net.cpp:165] Memory required for data: 484865500\nI0818 13:44:12.036075 20842 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 13:44:12.036089 20842 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 13:44:12.036099 20842 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0818 13:44:12.036119 20842 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 13:44:12.036139 20842 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 13:44:12.036214 20842 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 13:44:12.036233 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.036245 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.036250 20842 net.cpp:165] Memory required for data: 501249500\nI0818 13:44:12.036257 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0818 13:44:12.036273 20842 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0818 13:44:12.036279 20842 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 13:44:12.036288 20842 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0818 13:44:12.036604 20842 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0818 13:44:12.036617 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.036622 20842 net.cpp:165] Memory required for data: 509441500\nI0818 13:44:12.036638 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0818 13:44:12.036648 20842 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0818 13:44:12.036658 20842 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0818 13:44:12.036665 20842 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0818 13:44:12.037006 20842 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0818 13:44:12.037027 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.037036 20842 net.cpp:165] Memory required for data: 517633500\nI0818 13:44:12.037057 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 13:44:12.037075 20842 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0818 13:44:12.037086 20842 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0818 13:44:12.037106 20842 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.037194 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 13:44:12.037369 20842 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0818 13:44:12.037391 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.037402 20842 net.cpp:165] Memory required for data: 525825500\nI0818 13:44:12.037421 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0818 13:44:12.037437 20842 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0818 13:44:12.037448 20842 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0818 13:44:12.037462 20842 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.037480 20842 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0818 13:44:12.037494 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.037503 20842 net.cpp:165] Memory required for data: 534017500\nI0818 13:44:12.037513 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0818 13:44:12.037539 20842 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0818 13:44:12.037552 20842 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0818 13:44:12.037573 20842 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0818 13:44:12.037941 20842 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0818 13:44:12.037961 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.037971 20842 net.cpp:165] Memory required for data: 542209500\nI0818 13:44:12.037988 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0818 13:44:12.038010 20842 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0818 13:44:12.038022 20842 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0818 13:44:12.038043 20842 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0818 13:44:12.038316 20842 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0818 13:44:12.038336 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.038344 20842 net.cpp:165] Memory required for data: 550401500\nI0818 13:44:12.038365 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 13:44:12.038383 20842 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0818 13:44:12.038394 20842 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0818 13:44:12.038409 20842 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0818 13:44:12.038501 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 13:44:12.038682 20842 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0818 13:44:12.038702 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.038712 20842 net.cpp:165] Memory required for data: 558593500\nI0818 13:44:12.038729 20842 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0818 13:44:12.038766 20842 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0818 13:44:12.038780 20842 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0818 13:44:12.038794 20842 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 13:44:12.038810 20842 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0818 13:44:12.038871 20842 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0818 13:44:12.038892 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.038902 20842 net.cpp:165] Memory required for data: 566785500\nI0818 13:44:12.038913 20842 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0818 13:44:12.038938 20842 net.cpp:100] Creating Layer L1_b6_relu\nI0818 13:44:12.038950 20842 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0818 13:44:12.038964 20842 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0818 13:44:12.038988 20842 net.cpp:150] Setting up L1_b6_relu\nI0818 13:44:12.039005 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.039014 20842 net.cpp:165] Memory required for data: 574977500\nI0818 13:44:12.039024 20842 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 13:44:12.039039 20842 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 13:44:12.039052 20842 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0818 13:44:12.039067 20842 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 13:44:12.039086 20842 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 13:44:12.039167 20842 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 13:44:12.039189 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.039202 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.039211 20842 net.cpp:165] Memory required for data: 591361500\nI0818 13:44:12.039222 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0818 13:44:12.039242 20842 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0818 13:44:12.039253 20842 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 13:44:12.039275 20842 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0818 13:44:12.039638 20842 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0818 13:44:12.039659 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.039667 20842 net.cpp:165] Memory required for data: 599553500\nI0818 13:44:12.039693 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0818 13:44:12.039710 20842 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0818 13:44:12.039722 20842 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0818 13:44:12.039743 20842 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0818 13:44:12.040033 20842 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0818 13:44:12.040052 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.040062 20842 net.cpp:165] Memory required for data: 607745500\nI0818 13:44:12.040083 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 13:44:12.040104 20842 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0818 13:44:12.040117 20842 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0818 13:44:12.040132 20842 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.040215 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 13:44:12.040390 20842 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0818 13:44:12.040410 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.040419 20842 net.cpp:165] Memory required for data: 615937500\nI0818 13:44:12.040436 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0818 13:44:12.040451 20842 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0818 13:44:12.040462 20842 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0818 13:44:12.040484 20842 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.040504 20842 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0818 13:44:12.040521 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.040530 20842 net.cpp:165] Memory required for data: 624129500\nI0818 13:44:12.040541 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0818 13:44:12.040570 20842 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0818 13:44:12.040582 20842 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0818 13:44:12.040599 20842 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0818 13:44:12.040956 20842 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0818 13:44:12.040977 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.040987 20842 net.cpp:165] Memory required for data: 632321500\nI0818 13:44:12.041013 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0818 13:44:12.041038 20842 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0818 13:44:12.041052 20842 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0818 13:44:12.041070 20842 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0818 13:44:12.041349 20842 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0818 13:44:12.041368 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.041378 20842 net.cpp:165] Memory required for data: 640513500\nI0818 13:44:12.041399 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 13:44:12.041421 20842 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0818 13:44:12.041435 20842 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0818 13:44:12.041450 20842 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0818 13:44:12.041535 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 13:44:12.041721 20842 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0818 13:44:12.041740 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.041749 20842 net.cpp:165] Memory required for data: 648705500\nI0818 13:44:12.041767 20842 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0818 13:44:12.041785 20842 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0818 13:44:12.041795 20842 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0818 13:44:12.041808 20842 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 13:44:12.041829 20842 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0818 13:44:12.041882 20842 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0818 13:44:12.041905 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.041916 20842 net.cpp:165] Memory required for data: 656897500\nI0818 13:44:12.041926 20842 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0818 13:44:12.041941 20842 net.cpp:100] Creating Layer L1_b7_relu\nI0818 13:44:12.041954 20842 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0818 13:44:12.041966 20842 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0818 13:44:12.041985 20842 net.cpp:150] Setting up L1_b7_relu\nI0818 13:44:12.042001 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.042011 20842 net.cpp:165] Memory required for data: 665089500\nI0818 13:44:12.042021 20842 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 13:44:12.042038 20842 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 13:44:12.042049 20842 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0818 13:44:12.042065 20842 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 13:44:12.042084 20842 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 13:44:12.042165 20842 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 13:44:12.042184 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.042197 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.042207 20842 net.cpp:165] Memory required for data: 681473500\nI0818 13:44:12.042217 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0818 13:44:12.042237 20842 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0818 13:44:12.042249 20842 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 13:44:12.042273 20842 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0818 13:44:12.042634 20842 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0818 13:44:12.042654 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.042664 20842 net.cpp:165] Memory required for data: 689665500\nI0818 13:44:12.042688 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0818 13:44:12.042706 20842 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0818 13:44:12.042718 20842 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0818 13:44:12.042734 20842 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0818 13:44:12.043054 20842 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0818 13:44:12.043074 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.043083 20842 net.cpp:165] Memory required for data: 697857500\nI0818 13:44:12.043105 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 13:44:12.043128 20842 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0818 13:44:12.043139 20842 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0818 13:44:12.043156 20842 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.043252 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 13:44:12.043433 20842 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0818 13:44:12.043452 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.043462 20842 net.cpp:165] Memory required for data: 706049500\nI0818 13:44:12.043480 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0818 13:44:12.043496 20842 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0818 13:44:12.043507 20842 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0818 13:44:12.043526 20842 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.043546 20842 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0818 13:44:12.043560 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.043570 20842 net.cpp:165] Memory required for data: 714241500\nI0818 13:44:12.043581 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0818 13:44:12.043604 20842 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0818 13:44:12.043618 20842 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0818 13:44:12.043635 20842 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0818 13:44:12.044001 20842 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0818 13:44:12.044020 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.044030 20842 net.cpp:165] Memory required for data: 722433500\nI0818 13:44:12.044049 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0818 13:44:12.044070 20842 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0818 13:44:12.044082 20842 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0818 13:44:12.044100 20842 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0818 13:44:12.044376 20842 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0818 13:44:12.044395 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.044404 20842 net.cpp:165] Memory required for data: 730625500\nI0818 13:44:12.044425 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 13:44:12.044442 20842 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0818 13:44:12.044454 20842 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0818 13:44:12.044476 20842 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0818 13:44:12.044564 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 13:44:12.044747 20842 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0818 13:44:12.044766 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.044776 20842 net.cpp:165] Memory required for data: 738817500\nI0818 13:44:12.044795 20842 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0818 13:44:12.044812 20842 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0818 13:44:12.044824 20842 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0818 13:44:12.044837 20842 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 13:44:12.044858 20842 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0818 13:44:12.044911 20842 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0818 13:44:12.044934 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.044945 20842 net.cpp:165] Memory required for data: 747009500\nI0818 13:44:12.044955 20842 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0818 13:44:12.044968 20842 net.cpp:100] Creating Layer L1_b8_relu\nI0818 13:44:12.044980 20842 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0818 13:44:12.044993 20842 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0818 13:44:12.045012 20842 net.cpp:150] Setting up L1_b8_relu\nI0818 13:44:12.045027 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.045049 20842 net.cpp:165] Memory required for data: 755201500\nI0818 13:44:12.045060 20842 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 13:44:12.045079 20842 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 13:44:12.045091 20842 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0818 13:44:12.045106 20842 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 13:44:12.045126 20842 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 13:44:12.045204 20842 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 13:44:12.045228 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.045241 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.045251 20842 net.cpp:165] Memory required for data: 771585500\nI0818 13:44:12.045262 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0818 13:44:12.045282 20842 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0818 13:44:12.045295 20842 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 13:44:12.045312 20842 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0818 13:44:12.045684 20842 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0818 13:44:12.045706 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.045716 20842 net.cpp:165] Memory required for data: 779777500\nI0818 13:44:12.045733 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0818 13:44:12.045759 20842 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0818 13:44:12.045773 20842 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0818 13:44:12.045789 20842 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0818 13:44:12.046067 20842 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0818 13:44:12.046090 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.046099 20842 net.cpp:165] Memory required for data: 787969500\nI0818 13:44:12.046121 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 13:44:12.046139 20842 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0818 13:44:12.046150 20842 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0818 13:44:12.046165 20842 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.046250 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 13:44:12.046432 20842 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0818 13:44:12.046450 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.046460 20842 net.cpp:165] Memory required for data: 796161500\nI0818 13:44:12.046478 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0818 13:44:12.046499 20842 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0818 13:44:12.046510 20842 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0818 13:44:12.046530 20842 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.046550 20842 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0818 13:44:12.046564 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.046574 20842 net.cpp:165] Memory required for data: 804353500\nI0818 13:44:12.046586 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0818 13:44:12.046605 20842 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0818 13:44:12.046618 20842 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0818 13:44:12.046639 20842 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0818 13:44:12.047003 20842 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0818 13:44:12.047021 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.047030 20842 net.cpp:165] Memory required for data: 812545500\nI0818 13:44:12.047049 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0818 13:44:12.047065 20842 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0818 13:44:12.047077 20842 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0818 13:44:12.047098 20842 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0818 13:44:12.047415 20842 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0818 13:44:12.047435 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.047444 20842 net.cpp:165] Memory required for data: 820737500\nI0818 13:44:12.047492 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 13:44:12.047515 20842 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0818 13:44:12.047528 20842 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0818 13:44:12.047544 20842 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0818 13:44:12.047634 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 13:44:12.047817 20842 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0818 13:44:12.047837 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.047845 20842 net.cpp:165] Memory required for data: 828929500\nI0818 13:44:12.047863 20842 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0818 13:44:12.047880 20842 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0818 13:44:12.047891 20842 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0818 13:44:12.047904 20842 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 13:44:12.047925 20842 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0818 13:44:12.047984 20842 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0818 13:44:12.048002 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.048012 20842 net.cpp:165] Memory required for data: 837121500\nI0818 13:44:12.048022 20842 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0818 13:44:12.048036 20842 net.cpp:100] Creating Layer L1_b9_relu\nI0818 13:44:12.048048 20842 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0818 13:44:12.048061 20842 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0818 13:44:12.048084 20842 net.cpp:150] Setting up L1_b9_relu\nI0818 13:44:12.048100 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.048110 20842 net.cpp:165] Memory required for data: 845313500\nI0818 13:44:12.048120 20842 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 13:44:12.048133 20842 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 13:44:12.048144 20842 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0818 13:44:12.048166 20842 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 13:44:12.048187 20842 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 13:44:12.048267 20842 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 13:44:12.048287 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.048301 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.048310 20842 net.cpp:165] Memory required for data: 861697500\nI0818 13:44:12.048321 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0818 13:44:12.048341 20842 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0818 13:44:12.048354 20842 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 13:44:12.048377 20842 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0818 13:44:12.048753 20842 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0818 13:44:12.048774 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.048782 20842 net.cpp:165] Memory required for data: 863745500\nI0818 13:44:12.048800 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0818 13:44:12.048817 20842 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0818 13:44:12.048830 20842 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0818 13:44:12.048851 20842 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0818 13:44:12.049126 20842 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0818 13:44:12.049145 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.049155 20842 net.cpp:165] Memory required for data: 865793500\nI0818 13:44:12.049176 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 13:44:12.049201 20842 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0818 13:44:12.049221 20842 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0818 13:44:12.049239 20842 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.049329 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 13:44:12.049507 20842 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0818 13:44:12.049526 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.049536 20842 net.cpp:165] Memory required for data: 867841500\nI0818 13:44:12.049556 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0818 13:44:12.049576 20842 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0818 13:44:12.049587 20842 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0818 13:44:12.049602 20842 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.049623 20842 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0818 13:44:12.049636 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.049645 20842 net.cpp:165] Memory required for data: 869889500\nI0818 13:44:12.049655 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0818 13:44:12.049687 20842 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0818 13:44:12.049701 20842 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0818 13:44:12.049721 20842 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0818 13:44:12.050078 20842 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0818 13:44:12.050097 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.050107 20842 net.cpp:165] Memory required for data: 871937500\nI0818 13:44:12.050124 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0818 13:44:12.050140 20842 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0818 13:44:12.050153 20842 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0818 13:44:12.050169 20842 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0818 13:44:12.050454 20842 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0818 13:44:12.050474 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.050484 20842 net.cpp:165] Memory required for data: 873985500\nI0818 13:44:12.050506 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 13:44:12.050523 20842 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0818 13:44:12.050534 20842 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0818 13:44:12.050554 20842 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0818 13:44:12.050642 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 13:44:12.050827 20842 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0818 13:44:12.050845 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.050855 20842 net.cpp:165] Memory required for data: 876033500\nI0818 13:44:12.050873 20842 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0818 13:44:12.050892 20842 net.cpp:100] Creating Layer L2_b1_pool\nI0818 13:44:12.050904 20842 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 13:44:12.050925 20842 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0818 13:44:12.051028 20842 net.cpp:150] Setting up L2_b1_pool\nI0818 13:44:12.051056 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.051067 20842 net.cpp:165] Memory required for data: 878081500\nI0818 13:44:12.051079 20842 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0818 13:44:12.051095 20842 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0818 13:44:12.051106 20842 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0818 13:44:12.051120 20842 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0818 13:44:12.051144 20842 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0818 13:44:12.051201 20842 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0818 13:44:12.051219 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.051229 20842 net.cpp:165] Memory required for data: 880129500\nI0818 13:44:12.051239 20842 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0818 13:44:12.051254 20842 net.cpp:100] Creating Layer L2_b1_relu\nI0818 13:44:12.051265 20842 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0818 13:44:12.051280 20842 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0818 13:44:12.051308 20842 net.cpp:150] Setting up L2_b1_relu\nI0818 13:44:12.051323 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.051332 20842 net.cpp:165] Memory required for data: 882177500\nI0818 13:44:12.051343 20842 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0818 13:44:12.051419 20842 net.cpp:100] Creating Layer L2_b1_zeros\nI0818 13:44:12.051440 20842 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0818 13:44:12.053843 20842 net.cpp:150] Setting up L2_b1_zeros\nI0818 13:44:12.053870 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.053881 20842 net.cpp:165] Memory required for data: 884225500\nI0818 13:44:12.053892 20842 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0818 13:44:12.053910 20842 net.cpp:100] Creating Layer L2_b1_concat0\nI0818 13:44:12.053922 20842 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0818 13:44:12.053936 20842 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0818 13:44:12.053952 20842 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0818 13:44:12.054057 20842 net.cpp:150] Setting up L2_b1_concat0\nI0818 13:44:12.054077 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.054087 20842 net.cpp:165] Memory required for data: 888321500\nI0818 13:44:12.054098 20842 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 13:44:12.054117 20842 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 13:44:12.054129 20842 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0818 13:44:12.054144 20842 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 13:44:12.054164 20842 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 13:44:12.054250 20842 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0818 13:44:12.054272 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.054286 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.054296 20842 net.cpp:165] Memory required for data: 896513500\nI0818 13:44:12.054306 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0818 13:44:12.054332 20842 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0818 13:44:12.054347 20842 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 13:44:12.054364 20842 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0818 13:44:12.055898 20842 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0818 13:44:12.055922 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.055932 20842 net.cpp:165] Memory required for data: 900609500\nI0818 13:44:12.055950 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0818 13:44:12.055968 20842 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0818 13:44:12.055980 20842 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0818 13:44:12.056001 20842 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0818 13:44:12.056285 20842 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0818 13:44:12.056305 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.056314 20842 net.cpp:165] Memory required for data: 904705500\nI0818 13:44:12.056337 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 13:44:12.056360 20842 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0818 13:44:12.056372 20842 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0818 13:44:12.056390 20842 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.056483 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 13:44:12.056670 20842 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0818 13:44:12.056695 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.056705 20842 net.cpp:165] Memory required for data: 908801500\nI0818 13:44:12.056723 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0818 13:44:12.056747 20842 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0818 13:44:12.056759 20842 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0818 13:44:12.056776 20842 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.056805 20842 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0818 13:44:12.056821 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.056831 20842 net.cpp:165] Memory required for data: 912897500\nI0818 13:44:12.056843 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0818 13:44:12.056867 20842 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0818 13:44:12.056881 20842 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0818 13:44:12.056905 20842 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0818 13:44:12.057406 20842 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0818 13:44:12.057425 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.057435 20842 net.cpp:165] Memory required for data: 916993500\nI0818 13:44:12.057452 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0818 13:44:12.057471 20842 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0818 13:44:12.057482 20842 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0818 13:44:12.057503 20842 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0818 13:44:12.057796 20842 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0818 13:44:12.057816 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.057824 20842 net.cpp:165] Memory required for data: 921089500\nI0818 13:44:12.057845 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 13:44:12.057868 20842 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0818 13:44:12.057880 20842 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0818 13:44:12.057896 20842 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0818 13:44:12.057982 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 13:44:12.058163 20842 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0818 13:44:12.058182 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.058192 20842 net.cpp:165] Memory required for data: 925185500\nI0818 13:44:12.058209 20842 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0818 13:44:12.058233 20842 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0818 13:44:12.058245 20842 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0818 13:44:12.058259 20842 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 13:44:12.058274 20842 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0818 13:44:12.058320 20842 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0818 13:44:12.058348 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.058358 20842 net.cpp:165] Memory required for data: 929281500\nI0818 13:44:12.058369 20842 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0818 13:44:12.058383 20842 net.cpp:100] Creating Layer L2_b2_relu\nI0818 13:44:12.058395 20842 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0818 13:44:12.058409 20842 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0818 13:44:12.058429 20842 net.cpp:150] Setting up L2_b2_relu\nI0818 13:44:12.058444 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.058451 20842 net.cpp:165] Memory required for data: 933377500\nI0818 13:44:12.058462 20842 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 13:44:12.058481 20842 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 13:44:12.058493 20842 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0818 13:44:12.058508 20842 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 13:44:12.058528 20842 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 13:44:12.058614 20842 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 13:44:12.058635 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.058650 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.058660 20842 net.cpp:165] Memory required for data: 941569500\nI0818 13:44:12.058670 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0818 13:44:12.058704 20842 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0818 13:44:12.058718 20842 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 13:44:12.058743 20842 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0818 13:44:12.059239 20842 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0818 13:44:12.059259 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.059268 20842 net.cpp:165] Memory required for data: 945665500\nI0818 13:44:12.059286 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0818 13:44:12.059304 20842 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0818 13:44:12.059315 20842 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0818 13:44:12.059336 20842 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0818 13:44:12.059626 20842 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0818 13:44:12.059645 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.059655 20842 net.cpp:165] Memory required for data: 949761500\nI0818 13:44:12.059684 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 13:44:12.059706 20842 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0818 13:44:12.059720 20842 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0818 13:44:12.059736 20842 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.059829 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 13:44:12.060010 20842 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0818 13:44:12.060029 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.060039 20842 net.cpp:165] Memory required for data: 953857500\nI0818 13:44:12.060057 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0818 13:44:12.060077 20842 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0818 13:44:12.060091 20842 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0818 13:44:12.060106 20842 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.060124 20842 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0818 13:44:12.060138 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.060148 20842 net.cpp:165] Memory required for data: 957953500\nI0818 13:44:12.060158 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0818 13:44:12.060183 20842 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0818 13:44:12.060195 20842 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0818 13:44:12.060217 20842 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0818 13:44:12.060724 20842 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0818 13:44:12.060745 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.060753 20842 net.cpp:165] Memory required for data: 962049500\nI0818 13:44:12.060771 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0818 13:44:12.060788 20842 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0818 13:44:12.060801 20842 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0818 13:44:12.060817 20842 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0818 13:44:12.061105 20842 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0818 13:44:12.061123 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.061132 20842 net.cpp:165] Memory required for data: 966145500\nI0818 13:44:12.061154 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 13:44:12.061170 20842 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0818 13:44:12.061182 20842 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0818 13:44:12.061203 20842 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0818 13:44:12.061288 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 13:44:12.061472 20842 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0818 13:44:12.061491 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.061501 20842 net.cpp:165] Memory required for data: 970241500\nI0818 13:44:12.061520 20842 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0818 13:44:12.061537 20842 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0818 13:44:12.061549 20842 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0818 13:44:12.061563 20842 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 13:44:12.061590 20842 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0818 13:44:12.061640 20842 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0818 13:44:12.061663 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.061681 20842 net.cpp:165] Memory required for data: 974337500\nI0818 13:44:12.061693 20842 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0818 13:44:12.061725 20842 net.cpp:100] Creating Layer L2_b3_relu\nI0818 13:44:12.061739 20842 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0818 13:44:12.061754 20842 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0818 13:44:12.061774 20842 net.cpp:150] Setting up L2_b3_relu\nI0818 13:44:12.061789 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.061797 20842 net.cpp:165] Memory required for data: 978433500\nI0818 13:44:12.061810 20842 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 13:44:12.061825 20842 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 13:44:12.061836 20842 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0818 13:44:12.061854 20842 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 13:44:12.061873 20842 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 13:44:12.061954 20842 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 13:44:12.061972 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.061986 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.061995 20842 net.cpp:165] Memory required for data: 986625500\nI0818 13:44:12.062006 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0818 13:44:12.062031 20842 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0818 13:44:12.062048 20842 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 13:44:12.062068 20842 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0818 13:44:12.062572 20842 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0818 13:44:12.062592 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.062600 20842 net.cpp:165] Memory required for data: 990721500\nI0818 13:44:12.062618 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0818 13:44:12.062639 20842 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0818 13:44:12.062652 20842 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0818 13:44:12.062669 20842 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0818 13:44:12.062973 20842 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0818 13:44:12.062990 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.063000 20842 net.cpp:165] Memory required for data: 994817500\nI0818 13:44:12.063022 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 13:44:12.063038 20842 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0818 13:44:12.063050 20842 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0818 13:44:12.063071 20842 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.063159 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 13:44:12.063343 20842 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0818 13:44:12.063360 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.063370 20842 net.cpp:165] Memory required for data: 998913500\nI0818 13:44:12.063388 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0818 13:44:12.063405 20842 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0818 13:44:12.063416 20842 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0818 13:44:12.063429 20842 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.063449 20842 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0818 13:44:12.063463 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.063473 20842 net.cpp:165] Memory required for data: 1003009500\nI0818 13:44:12.063483 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0818 13:44:12.063518 20842 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0818 13:44:12.063531 20842 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0818 13:44:12.063554 20842 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0818 13:44:12.064060 20842 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0818 13:44:12.064081 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.064090 20842 net.cpp:165] Memory required for data: 1007105500\nI0818 13:44:12.064108 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0818 13:44:12.064133 20842 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0818 13:44:12.064146 20842 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0818 13:44:12.064168 20842 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0818 13:44:12.064460 20842 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0818 13:44:12.064478 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.064488 20842 net.cpp:165] Memory required for data: 1011201500\nI0818 13:44:12.064509 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 13:44:12.064527 20842 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0818 13:44:12.064538 20842 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0818 13:44:12.064553 20842 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0818 13:44:12.064644 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 13:44:12.064831 20842 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0818 13:44:12.064857 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.064867 20842 net.cpp:165] Memory required for data: 1015297500\nI0818 13:44:12.064887 20842 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0818 13:44:12.064903 20842 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0818 13:44:12.064914 20842 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0818 13:44:12.064929 20842 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 13:44:12.064944 20842 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0818 13:44:12.064996 20842 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0818 13:44:12.065013 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.065024 20842 net.cpp:165] Memory required for data: 1019393500\nI0818 13:44:12.065034 20842 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0818 13:44:12.065049 20842 net.cpp:100] Creating Layer L2_b4_relu\nI0818 13:44:12.065062 20842 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0818 13:44:12.065079 20842 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0818 13:44:12.065100 20842 net.cpp:150] Setting up L2_b4_relu\nI0818 13:44:12.065114 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.065124 20842 net.cpp:165] Memory required for data: 1023489500\nI0818 13:44:12.065135 20842 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 13:44:12.065150 20842 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 13:44:12.065160 20842 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0818 13:44:12.065179 20842 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 13:44:12.065199 20842 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 13:44:12.065279 20842 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 13:44:12.065297 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.065311 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.065320 20842 net.cpp:165] Memory required for data: 1031681500\nI0818 13:44:12.065330 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0818 13:44:12.065359 20842 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0818 13:44:12.065373 20842 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 13:44:12.065392 20842 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0818 13:44:12.065937 20842 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0818 13:44:12.065964 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.065974 20842 net.cpp:165] Memory required for data: 1035777500\nI0818 13:44:12.065992 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0818 13:44:12.066015 20842 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0818 13:44:12.066026 20842 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0818 13:44:12.066043 20842 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0818 13:44:12.066326 20842 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0818 13:44:12.066345 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.066355 20842 net.cpp:165] Memory required for data: 1039873500\nI0818 13:44:12.066377 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 13:44:12.066395 20842 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0818 13:44:12.066406 20842 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0818 13:44:12.066421 20842 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.066515 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 13:44:12.066706 20842 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0818 13:44:12.066730 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.066740 20842 net.cpp:165] Memory required for data: 1043969500\nI0818 13:44:12.066758 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0818 13:44:12.066773 20842 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0818 13:44:12.066786 20842 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0818 13:44:12.066799 20842 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.066818 20842 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0818 13:44:12.066833 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.066843 20842 net.cpp:165] Memory required for data: 1048065500\nI0818 13:44:12.066854 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0818 13:44:12.066879 20842 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0818 13:44:12.066891 20842 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0818 13:44:12.066913 20842 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0818 13:44:12.067420 20842 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0818 13:44:12.067440 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.067450 20842 net.cpp:165] Memory required for data: 1052161500\nI0818 13:44:12.067467 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0818 13:44:12.067489 20842 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0818 13:44:12.067502 20842 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0818 13:44:12.067523 20842 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0818 13:44:12.067826 20842 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0818 13:44:12.067845 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.067855 20842 net.cpp:165] Memory required for data: 1056257500\nI0818 13:44:12.067878 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 13:44:12.067895 20842 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0818 13:44:12.067908 20842 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0818 13:44:12.067922 20842 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0818 13:44:12.068014 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 13:44:12.068197 20842 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0818 13:44:12.068215 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.068225 20842 net.cpp:165] Memory required for data: 1060353500\nI0818 13:44:12.068243 20842 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0818 13:44:12.068264 20842 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0818 13:44:12.068276 20842 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0818 13:44:12.068290 20842 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 13:44:12.068306 20842 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0818 13:44:12.068352 20842 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0818 13:44:12.068370 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.068388 20842 net.cpp:165] Memory required for data: 1064449500\nI0818 13:44:12.068400 20842 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0818 13:44:12.068419 20842 net.cpp:100] Creating Layer L2_b5_relu\nI0818 13:44:12.068431 20842 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0818 13:44:12.068446 20842 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0818 13:44:12.068465 20842 net.cpp:150] Setting up L2_b5_relu\nI0818 13:44:12.068480 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.068490 20842 net.cpp:165] Memory required for data: 1068545500\nI0818 13:44:12.068500 20842 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 13:44:12.068513 20842 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 13:44:12.068523 20842 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0818 13:44:12.068538 20842 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 13:44:12.068557 20842 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 13:44:12.068641 20842 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 13:44:12.068662 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.068682 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.068693 20842 net.cpp:165] Memory required for data: 1076737500\nI0818 13:44:12.068704 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0818 13:44:12.068727 20842 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0818 13:44:12.068742 20842 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 13:44:12.068759 20842 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0818 13:44:12.069272 20842 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0818 13:44:12.069291 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.069300 20842 net.cpp:165] Memory required for data: 1080833500\nI0818 13:44:12.069319 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0818 13:44:12.069339 20842 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0818 13:44:12.069352 20842 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0818 13:44:12.069371 20842 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0818 13:44:12.069656 20842 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0818 13:44:12.069681 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.069692 20842 net.cpp:165] Memory required for data: 1084929500\nI0818 13:44:12.069715 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 13:44:12.069732 20842 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0818 13:44:12.069744 20842 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0818 13:44:12.069761 20842 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.069860 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 13:44:12.070042 20842 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0818 13:44:12.070065 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.070075 20842 net.cpp:165] Memory required for data: 1089025500\nI0818 13:44:12.070093 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0818 13:44:12.070108 20842 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0818 13:44:12.070121 20842 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0818 13:44:12.070135 20842 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.070154 20842 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0818 13:44:12.070168 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.070178 20842 net.cpp:165] Memory required for data: 1093121500\nI0818 13:44:12.070188 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0818 13:44:12.070212 20842 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0818 13:44:12.070227 20842 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0818 13:44:12.070247 20842 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0818 13:44:12.070780 20842 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0818 13:44:12.070807 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.070817 20842 net.cpp:165] Memory required for data: 1097217500\nI0818 13:44:12.070835 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0818 13:44:12.070857 20842 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0818 13:44:12.070869 20842 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0818 13:44:12.070891 20842 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0818 13:44:12.071188 20842 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0818 13:44:12.071208 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.071218 20842 net.cpp:165] Memory required for data: 1101313500\nI0818 13:44:12.071239 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 13:44:12.071255 20842 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0818 13:44:12.071267 20842 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0818 13:44:12.071282 20842 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0818 13:44:12.071374 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 13:44:12.071559 20842 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0818 13:44:12.071578 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.071588 20842 net.cpp:165] Memory required for data: 1105409500\nI0818 13:44:12.071607 20842 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0818 13:44:12.071629 20842 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0818 13:44:12.071641 20842 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0818 13:44:12.071655 20842 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 13:44:12.071671 20842 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0818 13:44:12.071725 20842 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0818 13:44:12.071743 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.071753 20842 net.cpp:165] Memory required for data: 1109505500\nI0818 13:44:12.071763 20842 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0818 13:44:12.071782 20842 net.cpp:100] Creating Layer L2_b6_relu\nI0818 13:44:12.071794 20842 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0818 13:44:12.071808 20842 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0818 13:44:12.071827 20842 net.cpp:150] Setting up L2_b6_relu\nI0818 13:44:12.071841 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.071851 20842 net.cpp:165] Memory required for data: 1113601500\nI0818 13:44:12.071861 20842 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 13:44:12.071876 20842 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 13:44:12.071885 20842 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0818 13:44:12.071900 20842 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 13:44:12.071920 20842 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 13:44:12.072002 20842 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 13:44:12.072023 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.072036 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.072046 20842 net.cpp:165] Memory required for data: 1121793500\nI0818 13:44:12.072057 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0818 13:44:12.072082 20842 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0818 13:44:12.072095 20842 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 13:44:12.072113 20842 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0818 13:44:12.072628 20842 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0818 13:44:12.072646 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.072655 20842 net.cpp:165] Memory required for data: 1125889500\nI0818 13:44:12.072679 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0818 13:44:12.072702 20842 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0818 13:44:12.072722 20842 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0818 13:44:12.072747 20842 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0818 13:44:12.073036 20842 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0818 13:44:12.073055 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.073065 20842 net.cpp:165] Memory required for data: 1129985500\nI0818 13:44:12.073086 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 13:44:12.073103 20842 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0818 13:44:12.073115 20842 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0818 13:44:12.073132 20842 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.073221 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 13:44:12.073405 20842 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0818 13:44:12.073424 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.073434 20842 net.cpp:165] Memory required for data: 1134081500\nI0818 13:44:12.073452 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0818 13:44:12.073473 20842 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0818 13:44:12.073485 20842 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0818 13:44:12.073499 20842 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.073518 20842 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0818 13:44:12.073531 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.073541 20842 net.cpp:165] Memory required for data: 1138177500\nI0818 13:44:12.073551 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0818 13:44:12.073580 20842 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0818 13:44:12.073592 20842 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0818 13:44:12.073616 20842 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0818 13:44:12.074129 20842 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0818 13:44:12.074149 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.074158 20842 net.cpp:165] Memory required for data: 1142273500\nI0818 13:44:12.074177 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0818 13:44:12.074198 20842 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0818 13:44:12.074211 20842 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0818 13:44:12.074229 20842 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0818 13:44:12.074517 20842 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0818 13:44:12.074542 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.074553 20842 net.cpp:165] Memory required for data: 1146369500\nI0818 13:44:12.074575 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 13:44:12.074591 20842 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0818 13:44:12.074604 20842 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0818 13:44:12.074620 20842 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0818 13:44:12.074720 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 13:44:12.074906 20842 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0818 13:44:12.074925 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.074935 20842 net.cpp:165] Memory required for data: 1150465500\nI0818 13:44:12.074954 20842 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0818 13:44:12.074972 20842 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0818 13:44:12.074983 20842 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0818 13:44:12.074997 20842 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 13:44:12.075019 20842 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0818 13:44:12.075065 20842 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0818 13:44:12.075083 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.075093 20842 net.cpp:165] Memory required for data: 1154561500\nI0818 13:44:12.075104 20842 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0818 13:44:12.075122 20842 net.cpp:100] Creating Layer L2_b7_relu\nI0818 13:44:12.075136 20842 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0818 13:44:12.075158 20842 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0818 13:44:12.075178 20842 net.cpp:150] Setting up L2_b7_relu\nI0818 13:44:12.075194 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.075204 20842 net.cpp:165] Memory required for data: 1158657500\nI0818 13:44:12.075214 20842 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 13:44:12.075227 20842 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 13:44:12.075239 20842 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0818 13:44:12.075254 20842 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 13:44:12.075273 20842 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 13:44:12.075356 20842 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 13:44:12.075376 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.075388 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.075397 20842 net.cpp:165] Memory required for data: 1166849500\nI0818 13:44:12.075408 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0818 13:44:12.075433 20842 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0818 13:44:12.075446 20842 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 13:44:12.075465 20842 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0818 13:44:12.075996 20842 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0818 13:44:12.076016 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.076025 20842 net.cpp:165] Memory required for data: 1170945500\nI0818 13:44:12.076043 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0818 13:44:12.076066 20842 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0818 13:44:12.076077 20842 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0818 13:44:12.076098 20842 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0818 13:44:12.076386 20842 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0818 13:44:12.076406 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.076414 20842 net.cpp:165] Memory required for data: 1175041500\nI0818 13:44:12.076436 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 13:44:12.076453 20842 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0818 13:44:12.076465 20842 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0818 13:44:12.076480 20842 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.076573 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 13:44:12.076769 20842 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0818 13:44:12.076788 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.076797 20842 net.cpp:165] Memory required for data: 1179137500\nI0818 13:44:12.076817 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0818 13:44:12.076831 20842 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0818 13:44:12.076848 20842 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0818 13:44:12.076863 20842 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.076882 20842 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0818 13:44:12.076896 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.076906 20842 net.cpp:165] Memory required for data: 1183233500\nI0818 13:44:12.076916 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0818 13:44:12.076942 20842 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0818 13:44:12.076956 20842 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0818 13:44:12.076972 20842 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0818 13:44:12.077482 20842 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0818 13:44:12.077502 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.077510 20842 net.cpp:165] Memory required for data: 1187329500\nI0818 13:44:12.077528 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0818 13:44:12.077551 20842 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0818 13:44:12.077574 20842 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0818 13:44:12.077592 20842 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0818 13:44:12.077895 20842 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0818 13:44:12.077919 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.077929 20842 net.cpp:165] Memory required for data: 1191425500\nI0818 13:44:12.077950 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 13:44:12.077966 20842 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0818 13:44:12.077980 20842 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0818 13:44:12.077996 20842 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0818 13:44:12.078083 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 13:44:12.078270 20842 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0818 13:44:12.078289 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.078299 20842 net.cpp:165] Memory required for data: 1195521500\nI0818 13:44:12.078316 20842 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0818 13:44:12.078333 20842 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0818 13:44:12.078346 20842 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0818 13:44:12.078358 20842 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 13:44:12.078379 20842 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0818 13:44:12.078428 20842 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0818 13:44:12.078444 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.078454 20842 net.cpp:165] Memory required for data: 1199617500\nI0818 13:44:12.078465 20842 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0818 13:44:12.078483 20842 net.cpp:100] Creating Layer L2_b8_relu\nI0818 13:44:12.078495 20842 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0818 13:44:12.078510 20842 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0818 13:44:12.078528 20842 net.cpp:150] Setting up L2_b8_relu\nI0818 13:44:12.078542 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.078552 20842 net.cpp:165] Memory required for data: 1203713500\nI0818 13:44:12.078562 20842 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 13:44:12.078577 20842 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 13:44:12.078588 20842 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0818 13:44:12.078603 20842 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 13:44:12.078639 20842 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 13:44:12.078735 20842 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 13:44:12.078755 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.078769 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.078779 20842 net.cpp:165] Memory required for data: 1211905500\nI0818 13:44:12.078790 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0818 13:44:12.078811 20842 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0818 13:44:12.078824 20842 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 13:44:12.078848 20842 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0818 13:44:12.079366 20842 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0818 13:44:12.079385 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.079394 20842 net.cpp:165] Memory required for data: 1216001500\nI0818 13:44:12.079412 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0818 13:44:12.079435 20842 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0818 13:44:12.079447 20842 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0818 13:44:12.079463 20842 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0818 13:44:12.079761 20842 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0818 13:44:12.079785 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.079795 20842 net.cpp:165] Memory required for data: 1220097500\nI0818 13:44:12.079826 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 13:44:12.079843 20842 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0818 13:44:12.079855 20842 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0818 13:44:12.079871 20842 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.079964 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 13:44:12.080154 20842 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0818 13:44:12.080173 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.080183 20842 net.cpp:165] Memory required for data: 1224193500\nI0818 13:44:12.080201 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0818 13:44:12.080217 20842 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0818 13:44:12.080229 20842 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0818 13:44:12.080248 20842 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.080268 20842 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0818 13:44:12.080282 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.080292 20842 net.cpp:165] Memory required for data: 1228289500\nI0818 13:44:12.080302 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0818 13:44:12.080322 20842 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0818 13:44:12.080334 20842 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0818 13:44:12.080356 20842 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0818 13:44:12.080865 20842 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0818 13:44:12.080885 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.080895 20842 net.cpp:165] Memory required for data: 1232385500\nI0818 13:44:12.080912 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0818 13:44:12.080930 20842 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0818 13:44:12.080942 20842 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0818 13:44:12.080963 20842 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0818 13:44:12.081254 20842 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0818 13:44:12.081272 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.081282 20842 net.cpp:165] Memory required for data: 1236481500\nI0818 13:44:12.081356 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 13:44:12.081378 20842 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0818 13:44:12.081392 20842 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0818 13:44:12.081408 20842 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0818 13:44:12.081506 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 13:44:12.081701 20842 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0818 13:44:12.081727 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.081737 20842 net.cpp:165] Memory required for data: 1240577500\nI0818 13:44:12.081755 20842 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0818 13:44:12.081773 20842 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0818 13:44:12.081784 20842 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0818 13:44:12.081799 20842 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 13:44:12.081815 20842 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0818 13:44:12.081866 20842 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0818 13:44:12.081884 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.081894 20842 net.cpp:165] Memory required for data: 1244673500\nI0818 13:44:12.081904 20842 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0818 13:44:12.081918 20842 net.cpp:100] Creating Layer L2_b9_relu\nI0818 13:44:12.081929 20842 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0818 13:44:12.081949 20842 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0818 13:44:12.081969 20842 net.cpp:150] Setting up L2_b9_relu\nI0818 13:44:12.081982 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.081992 20842 net.cpp:165] Memory required for data: 1248769500\nI0818 13:44:12.082001 20842 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 13:44:12.082025 20842 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 13:44:12.082036 20842 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0818 13:44:12.082056 20842 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 13:44:12.082079 20842 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 13:44:12.082155 20842 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 13:44:12.082175 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.082187 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.082197 20842 net.cpp:165] Memory required for data: 1256961500\nI0818 13:44:12.082208 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0818 13:44:12.082234 20842 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0818 13:44:12.082248 20842 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 13:44:12.082267 20842 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0818 13:44:12.082798 20842 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0818 13:44:12.082819 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.082829 20842 net.cpp:165] Memory required for data: 1257985500\nI0818 13:44:12.082847 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0818 13:44:12.082870 20842 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0818 13:44:12.082881 20842 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0818 13:44:12.082897 20842 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0818 13:44:12.083189 20842 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0818 13:44:12.083209 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.083218 20842 net.cpp:165] Memory required for data: 1259009500\nI0818 13:44:12.083241 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 13:44:12.083262 20842 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0818 13:44:12.083274 20842 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0818 13:44:12.083290 20842 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.083379 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 13:44:12.083575 20842 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0818 13:44:12.083595 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.083603 20842 net.cpp:165] Memory required for data: 1260033500\nI0818 13:44:12.083621 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0818 13:44:12.083642 20842 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0818 13:44:12.083654 20842 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0818 13:44:12.083669 20842 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.083696 20842 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0818 13:44:12.083711 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.083721 20842 net.cpp:165] Memory required for data: 1261057500\nI0818 13:44:12.083731 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0818 13:44:12.083756 20842 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0818 13:44:12.083770 20842 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0818 13:44:12.083791 20842 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0818 13:44:12.084305 20842 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0818 13:44:12.084324 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.084333 20842 net.cpp:165] Memory required for data: 1262081500\nI0818 13:44:12.084352 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0818 13:44:12.084369 20842 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0818 13:44:12.084381 20842 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0818 13:44:12.084401 20842 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0818 13:44:12.084712 20842 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0818 13:44:12.084731 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.084741 20842 net.cpp:165] Memory required for data: 1263105500\nI0818 13:44:12.084772 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 13:44:12.084789 20842 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0818 13:44:12.084800 20842 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0818 13:44:12.084816 20842 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0818 13:44:12.084911 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 13:44:12.085103 20842 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0818 13:44:12.085125 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.085135 20842 net.cpp:165] Memory required for data: 1264129500\nI0818 13:44:12.085155 20842 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0818 13:44:12.085171 20842 net.cpp:100] Creating Layer L3_b1_pool\nI0818 13:44:12.085183 20842 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 13:44:12.085201 20842 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0818 13:44:12.085261 20842 net.cpp:150] Setting up L3_b1_pool\nI0818 13:44:12.085279 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.085289 20842 net.cpp:165] Memory required for data: 1265153500\nI0818 13:44:12.085300 20842 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0818 13:44:12.085316 20842 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0818 13:44:12.085328 20842 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0818 13:44:12.085341 20842 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0818 13:44:12.085357 20842 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0818 13:44:12.085418 20842 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0818 13:44:12.085436 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.085448 20842 net.cpp:165] Memory required for data: 1266177500\nI0818 13:44:12.085458 20842 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0818 13:44:12.085470 20842 net.cpp:100] Creating Layer L3_b1_relu\nI0818 13:44:12.085482 20842 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0818 13:44:12.085496 20842 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0818 13:44:12.085515 20842 net.cpp:150] Setting up L3_b1_relu\nI0818 13:44:12.085530 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.085539 20842 net.cpp:165] Memory required for data: 1267201500\nI0818 13:44:12.085549 20842 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0818 13:44:12.085566 20842 net.cpp:100] Creating Layer L3_b1_zeros\nI0818 13:44:12.085587 20842 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0818 13:44:12.086858 20842 net.cpp:150] Setting up L3_b1_zeros\nI0818 13:44:12.086880 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.086890 20842 net.cpp:165] Memory required for data: 1268225500\nI0818 13:44:12.086901 20842 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0818 13:44:12.086922 20842 net.cpp:100] Creating Layer L3_b1_concat0\nI0818 13:44:12.086935 20842 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0818 13:44:12.086948 20842 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0818 13:44:12.086966 20842 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0818 13:44:12.087029 20842 net.cpp:150] Setting up L3_b1_concat0\nI0818 13:44:12.087049 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.087059 20842 net.cpp:165] Memory required for data: 1270273500\nI0818 13:44:12.087069 20842 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 13:44:12.087082 20842 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 13:44:12.087095 20842 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0818 13:44:12.087115 20842 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 13:44:12.087136 20842 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 13:44:12.087219 20842 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0818 13:44:12.087244 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.087258 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.087277 20842 net.cpp:165] Memory required for data: 1274369500\nI0818 13:44:12.087290 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0818 13:44:12.087316 20842 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0818 13:44:12.087330 20842 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 13:44:12.087349 20842 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0818 13:44:12.089391 20842 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0818 13:44:12.089413 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.089423 20842 net.cpp:165] Memory required for data: 1276417500\nI0818 13:44:12.089442 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0818 13:44:12.089464 20842 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0818 13:44:12.089479 20842 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0818 13:44:12.089495 20842 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0818 13:44:12.089812 20842 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0818 13:44:12.089833 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.089843 20842 net.cpp:165] Memory required for data: 1278465500\nI0818 13:44:12.089864 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 13:44:12.089885 20842 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0818 13:44:12.089898 20842 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0818 13:44:12.089918 20842 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.090016 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 13:44:12.090210 20842 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0818 13:44:12.090229 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.090240 20842 net.cpp:165] Memory required for data: 1280513500\nI0818 13:44:12.090257 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0818 13:44:12.090273 20842 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0818 13:44:12.090284 20842 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0818 13:44:12.090303 20842 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.090324 20842 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0818 13:44:12.090339 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.090349 20842 net.cpp:165] Memory required for data: 1282561500\nI0818 13:44:12.090359 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0818 13:44:12.090379 20842 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0818 13:44:12.090392 20842 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0818 13:44:12.090415 20842 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0818 13:44:12.091486 20842 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0818 13:44:12.091507 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.091517 20842 net.cpp:165] Memory required for data: 1284609500\nI0818 13:44:12.091536 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0818 13:44:12.091558 20842 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0818 13:44:12.091572 20842 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0818 13:44:12.091588 20842 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0818 13:44:12.091892 20842 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0818 13:44:12.091912 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.091922 20842 net.cpp:165] Memory required for data: 1286657500\nI0818 13:44:12.091943 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 13:44:12.091959 20842 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0818 13:44:12.091970 20842 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0818 13:44:12.091985 20842 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0818 13:44:12.092078 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 13:44:12.092270 20842 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0818 13:44:12.092289 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.092299 20842 net.cpp:165] Memory required for data: 1288705500\nI0818 13:44:12.092319 20842 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0818 13:44:12.092335 20842 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0818 13:44:12.092358 20842 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0818 13:44:12.092372 20842 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 13:44:12.092393 20842 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0818 13:44:12.092453 20842 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0818 13:44:12.092471 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.092481 20842 net.cpp:165] Memory required for data: 1290753500\nI0818 13:44:12.092492 20842 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0818 13:44:12.092511 20842 net.cpp:100] Creating Layer L3_b2_relu\nI0818 13:44:12.092523 20842 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0818 13:44:12.092538 20842 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0818 13:44:12.092557 20842 net.cpp:150] Setting up L3_b2_relu\nI0818 13:44:12.092571 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.092581 20842 net.cpp:165] Memory required for data: 1292801500\nI0818 13:44:12.092592 20842 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 13:44:12.092608 20842 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 13:44:12.092620 20842 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0818 13:44:12.092636 20842 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 13:44:12.092655 20842 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 13:44:12.092741 20842 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 13:44:12.092767 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.092782 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.092792 20842 net.cpp:165] Memory required for data: 1296897500\nI0818 13:44:12.092803 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0818 13:44:12.092823 20842 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0818 13:44:12.092835 20842 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 13:44:12.092854 20842 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0818 13:44:12.093922 20842 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0818 13:44:12.093942 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.093952 20842 net.cpp:165] Memory required for data: 1298945500\nI0818 13:44:12.093971 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0818 13:44:12.093992 20842 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0818 13:44:12.094005 20842 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0818 13:44:12.094022 20842 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0818 13:44:12.094319 20842 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0818 13:44:12.094338 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.094348 20842 net.cpp:165] Memory required for data: 1300993500\nI0818 13:44:12.094369 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 13:44:12.094391 20842 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0818 13:44:12.094404 20842 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0818 13:44:12.094427 20842 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.094517 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 13:44:12.094717 20842 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0818 13:44:12.094736 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.094745 20842 net.cpp:165] Memory required for data: 1303041500\nI0818 13:44:12.094764 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0818 13:44:12.094779 20842 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0818 13:44:12.094791 20842 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0818 13:44:12.094810 20842 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.094831 20842 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0818 13:44:12.094846 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.094864 20842 net.cpp:165] Memory required for data: 1305089500\nI0818 13:44:12.094877 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0818 13:44:12.094902 20842 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0818 13:44:12.094915 20842 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0818 13:44:12.094934 20842 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0818 13:44:12.096006 20842 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0818 13:44:12.096026 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.096035 20842 net.cpp:165] Memory required for data: 1307137500\nI0818 13:44:12.096053 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0818 13:44:12.096076 20842 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0818 13:44:12.096088 20842 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0818 13:44:12.096104 20842 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0818 13:44:12.096406 20842 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0818 13:44:12.096426 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.096434 20842 net.cpp:165] Memory required for data: 1309185500\nI0818 13:44:12.096456 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 13:44:12.096473 20842 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0818 13:44:12.096484 20842 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0818 13:44:12.096499 20842 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0818 13:44:12.096596 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 13:44:12.096797 20842 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0818 13:44:12.096817 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.096827 20842 net.cpp:165] Memory required for data: 1311233500\nI0818 13:44:12.096844 20842 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0818 13:44:12.096863 20842 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0818 13:44:12.096875 20842 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0818 13:44:12.096889 20842 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 13:44:12.096911 20842 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0818 13:44:12.096967 20842 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0818 13:44:12.096992 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.097002 20842 net.cpp:165] Memory required for data: 1313281500\nI0818 13:44:12.097012 20842 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0818 13:44:12.097025 20842 net.cpp:100] Creating Layer L3_b3_relu\nI0818 13:44:12.097038 20842 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0818 13:44:12.097051 20842 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0818 13:44:12.097070 20842 net.cpp:150] Setting up L3_b3_relu\nI0818 13:44:12.097084 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.097092 20842 net.cpp:165] Memory required for data: 1315329500\nI0818 13:44:12.097103 20842 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 13:44:12.097121 20842 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 13:44:12.097133 20842 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0818 13:44:12.097149 20842 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 13:44:12.097169 20842 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 13:44:12.097254 20842 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 13:44:12.097272 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.097285 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.097295 20842 net.cpp:165] Memory required for data: 1319425500\nI0818 13:44:12.097306 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0818 13:44:12.097326 20842 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0818 13:44:12.097339 20842 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 13:44:12.097363 20842 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0818 13:44:12.100890 20842 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0818 13:44:12.101023 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.101107 20842 net.cpp:165] Memory required for data: 1321473500\nI0818 13:44:12.101200 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0818 13:44:12.101294 20842 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0818 13:44:12.101378 20842 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0818 13:44:12.101985 20842 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0818 13:44:12.103242 20842 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0818 13:44:12.103266 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.103277 20842 net.cpp:165] Memory required for data: 1323521500\nI0818 13:44:12.103299 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 13:44:12.103319 20842 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0818 13:44:12.103332 20842 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0818 13:44:12.103348 20842 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.103448 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 13:44:12.103647 20842 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0818 13:44:12.103667 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.103685 20842 net.cpp:165] Memory required for data: 1325569500\nI0818 13:44:12.103705 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0818 13:44:12.103723 20842 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0818 13:44:12.103734 20842 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0818 13:44:12.103754 20842 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.103775 20842 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0818 13:44:12.103790 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.103799 20842 net.cpp:165] Memory required for data: 1327617500\nI0818 13:44:12.103809 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0818 13:44:12.103834 20842 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0818 13:44:12.103847 20842 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0818 13:44:12.103865 20842 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0818 13:44:12.104961 20842 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0818 13:44:12.104982 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.104991 20842 net.cpp:165] Memory required for data: 1329665500\nI0818 13:44:12.105010 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0818 13:44:12.105039 20842 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0818 13:44:12.105054 20842 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0818 13:44:12.105077 20842 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0818 13:44:12.105388 20842 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0818 13:44:12.105408 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.105417 20842 net.cpp:165] Memory required for data: 1331713500\nI0818 13:44:12.105438 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 13:44:12.105455 20842 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0818 13:44:12.105468 20842 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0818 13:44:12.105484 20842 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0818 13:44:12.105581 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 13:44:12.105787 20842 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0818 13:44:12.105806 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.105816 20842 net.cpp:165] Memory required for data: 1333761500\nI0818 13:44:12.105834 20842 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0818 13:44:12.105852 20842 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0818 13:44:12.105865 20842 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0818 13:44:12.105880 20842 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 13:44:12.105904 20842 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0818 13:44:12.105963 20842 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0818 13:44:12.105995 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.106006 20842 net.cpp:165] Memory required for data: 1335809500\nI0818 13:44:12.106016 20842 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0818 13:44:12.106030 20842 net.cpp:100] Creating Layer L3_b4_relu\nI0818 13:44:12.106042 20842 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0818 13:44:12.106056 20842 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0818 13:44:12.106076 20842 net.cpp:150] Setting up L3_b4_relu\nI0818 13:44:12.106091 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.106101 20842 net.cpp:165] Memory required for data: 1337857500\nI0818 13:44:12.106112 20842 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 13:44:12.106129 20842 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 13:44:12.106142 20842 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0818 13:44:12.106158 20842 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 13:44:12.106178 20842 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 13:44:12.106263 20842 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 13:44:12.106282 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.106295 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.106304 20842 net.cpp:165] Memory required for data: 1341953500\nI0818 13:44:12.106314 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0818 13:44:12.106335 20842 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0818 13:44:12.106348 20842 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 13:44:12.106371 20842 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0818 13:44:12.107463 20842 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0818 13:44:12.107483 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.107494 20842 net.cpp:165] Memory required for data: 1344001500\nI0818 13:44:12.107512 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0818 13:44:12.107530 20842 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0818 13:44:12.107542 20842 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0818 13:44:12.107563 20842 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0818 13:44:12.108841 20842 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0818 13:44:12.108863 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.108873 20842 net.cpp:165] Memory required for data: 1346049500\nI0818 13:44:12.108896 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 13:44:12.108914 20842 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0818 13:44:12.108927 20842 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0818 13:44:12.108942 20842 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.109036 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 13:44:12.109230 20842 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0818 13:44:12.109248 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.109257 20842 net.cpp:165] Memory required for data: 1348097500\nI0818 13:44:12.109277 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0818 13:44:12.109292 20842 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0818 13:44:12.109304 20842 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0818 13:44:12.109323 20842 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.109344 20842 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0818 13:44:12.109359 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.109369 20842 net.cpp:165] Memory required for data: 1350145500\nI0818 13:44:12.109378 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0818 13:44:12.109403 20842 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0818 13:44:12.109416 20842 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0818 13:44:12.109434 20842 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0818 13:44:12.111393 20842 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0818 13:44:12.111415 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.111426 20842 net.cpp:165] Memory required for data: 1352193500\nI0818 13:44:12.111444 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0818 13:44:12.111467 20842 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0818 13:44:12.111481 20842 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0818 13:44:12.111503 20842 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0818 13:44:12.111801 20842 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0818 13:44:12.111820 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.111830 20842 net.cpp:165] Memory required for data: 1354241500\nI0818 13:44:12.111853 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 13:44:12.111871 20842 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0818 13:44:12.111882 20842 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0818 13:44:12.111898 20842 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0818 13:44:12.111989 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 13:44:12.112175 20842 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0818 13:44:12.112195 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.112203 20842 net.cpp:165] Memory required for data: 1356289500\nI0818 13:44:12.112221 20842 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0818 13:44:12.112238 20842 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0818 13:44:12.112251 20842 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0818 13:44:12.112263 20842 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 13:44:12.112283 20842 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0818 13:44:12.112337 20842 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0818 13:44:12.112360 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.112371 20842 net.cpp:165] Memory required for data: 1358337500\nI0818 13:44:12.112380 20842 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0818 13:44:12.112396 20842 net.cpp:100] Creating Layer L3_b5_relu\nI0818 13:44:12.112408 20842 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0818 13:44:12.112421 20842 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0818 13:44:12.112440 20842 net.cpp:150] Setting up L3_b5_relu\nI0818 13:44:12.112457 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.112464 20842 net.cpp:165] Memory required for data: 1360385500\nI0818 13:44:12.112474 20842 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 13:44:12.112493 20842 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 13:44:12.112505 20842 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0818 13:44:12.112521 20842 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 13:44:12.112541 20842 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 13:44:12.112624 20842 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 13:44:12.112645 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.112659 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.112669 20842 net.cpp:165] Memory required for data: 1364481500\nI0818 13:44:12.112687 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0818 13:44:12.112707 20842 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0818 13:44:12.112721 20842 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 13:44:12.112743 20842 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0818 13:44:12.113796 20842 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0818 13:44:12.113816 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.113826 20842 net.cpp:165] Memory required for data: 1366529500\nI0818 13:44:12.113844 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0818 13:44:12.113862 20842 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0818 13:44:12.113881 20842 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0818 13:44:12.113905 20842 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0818 13:44:12.114197 20842 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0818 13:44:12.114219 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.114229 20842 net.cpp:165] Memory required for data: 1368577500\nI0818 13:44:12.114251 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 13:44:12.114269 20842 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0818 13:44:12.114280 20842 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0818 13:44:12.114296 20842 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.114384 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 13:44:12.114573 20842 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0818 13:44:12.114593 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.114603 20842 net.cpp:165] Memory required for data: 1370625500\nI0818 13:44:12.114621 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0818 13:44:12.114636 20842 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0818 13:44:12.114648 20842 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0818 13:44:12.114666 20842 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.114694 20842 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0818 13:44:12.114711 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.114720 20842 net.cpp:165] Memory required for data: 1372673500\nI0818 13:44:12.114730 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0818 13:44:12.114755 20842 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0818 13:44:12.114769 20842 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0818 13:44:12.114786 20842 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0818 13:44:12.115838 20842 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0818 13:44:12.115856 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.115866 20842 net.cpp:165] Memory required for data: 1374721500\nI0818 13:44:12.115885 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0818 13:44:12.115906 20842 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0818 13:44:12.115919 20842 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0818 13:44:12.115939 20842 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0818 13:44:12.116225 20842 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0818 13:44:12.116243 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.116252 20842 net.cpp:165] Memory required for data: 1376769500\nI0818 13:44:12.116273 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 13:44:12.116291 20842 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0818 13:44:12.116302 20842 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0818 13:44:12.116317 20842 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0818 13:44:12.116408 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 13:44:12.116600 20842 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0818 13:44:12.116618 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.116628 20842 net.cpp:165] Memory required for data: 1378817500\nI0818 13:44:12.116647 20842 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0818 13:44:12.116663 20842 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0818 13:44:12.116683 20842 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0818 13:44:12.116698 20842 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 13:44:12.116719 20842 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0818 13:44:12.116773 20842 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0818 13:44:12.116796 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.116806 20842 net.cpp:165] Memory required for data: 1380865500\nI0818 13:44:12.116816 20842 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0818 13:44:12.116830 20842 net.cpp:100] Creating Layer L3_b6_relu\nI0818 13:44:12.116842 20842 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0818 13:44:12.116864 20842 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0818 13:44:12.116884 20842 net.cpp:150] Setting up L3_b6_relu\nI0818 13:44:12.116899 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.116907 20842 net.cpp:165] Memory required for data: 1382913500\nI0818 13:44:12.116917 20842 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 13:44:12.116936 20842 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 13:44:12.116948 20842 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0818 13:44:12.116963 20842 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 13:44:12.116984 20842 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 13:44:12.117065 20842 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 13:44:12.117086 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.117100 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.117110 20842 net.cpp:165] Memory required for data: 1387009500\nI0818 13:44:12.117120 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0818 13:44:12.117139 20842 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0818 13:44:12.117152 20842 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 13:44:12.117174 20842 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0818 13:44:12.118230 20842 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0818 13:44:12.118252 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.118260 20842 net.cpp:165] Memory required for data: 1389057500\nI0818 13:44:12.118279 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0818 13:44:12.118296 20842 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0818 13:44:12.118309 20842 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0818 13:44:12.118329 20842 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0818 13:44:12.118618 20842 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0818 13:44:12.118640 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.118651 20842 net.cpp:165] Memory required for data: 1391105500\nI0818 13:44:12.118680 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 13:44:12.118696 20842 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0818 13:44:12.118708 20842 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0818 13:44:12.118726 20842 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.118819 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 13:44:12.119009 20842 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0818 13:44:12.119027 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.119036 20842 net.cpp:165] Memory required for data: 1393153500\nI0818 13:44:12.119055 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0818 13:44:12.119127 20842 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0818 13:44:12.119141 20842 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0818 13:44:12.119155 20842 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.119175 20842 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0818 13:44:12.119190 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.119199 20842 net.cpp:165] Memory required for data: 1395201500\nI0818 13:44:12.119211 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0818 13:44:12.119232 20842 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0818 13:44:12.119244 20842 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0818 13:44:12.119266 20842 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0818 13:44:12.120335 20842 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0818 13:44:12.120355 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.120365 20842 net.cpp:165] Memory required for data: 1397249500\nI0818 13:44:12.120383 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0818 13:44:12.120400 20842 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0818 13:44:12.120419 20842 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0818 13:44:12.120440 20842 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0818 13:44:12.120759 20842 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0818 13:44:12.120781 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.120791 20842 net.cpp:165] Memory required for data: 1399297500\nI0818 13:44:12.120815 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 13:44:12.120831 20842 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0818 13:44:12.120844 20842 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0818 13:44:12.120859 20842 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0818 13:44:12.120956 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 13:44:12.121145 20842 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0818 13:44:12.121165 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.121173 20842 net.cpp:165] Memory required for data: 1401345500\nI0818 13:44:12.121192 20842 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0818 13:44:12.121209 20842 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0818 13:44:12.121227 20842 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0818 13:44:12.121240 20842 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 13:44:12.121258 20842 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0818 13:44:12.121318 20842 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0818 13:44:12.121336 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.121345 20842 net.cpp:165] Memory required for data: 1403393500\nI0818 13:44:12.121356 20842 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0818 13:44:12.121371 20842 net.cpp:100] Creating Layer L3_b7_relu\nI0818 13:44:12.121383 20842 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0818 13:44:12.121397 20842 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0818 13:44:12.121415 20842 net.cpp:150] Setting up L3_b7_relu\nI0818 13:44:12.121429 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.121438 20842 net.cpp:165] Memory required for data: 1405441500\nI0818 13:44:12.121449 20842 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 13:44:12.121464 20842 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 13:44:12.121475 20842 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0818 13:44:12.121490 20842 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 13:44:12.121511 20842 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 13:44:12.121594 20842 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 13:44:12.121615 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.121629 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.121639 20842 net.cpp:165] Memory required for data: 1409537500\nI0818 13:44:12.121649 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0818 13:44:12.121680 20842 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0818 13:44:12.121695 20842 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 13:44:12.121713 20842 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0818 13:44:12.122776 20842 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0818 13:44:12.122797 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.122805 20842 net.cpp:165] Memory required for data: 1411585500\nI0818 13:44:12.122824 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0818 13:44:12.122841 20842 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0818 13:44:12.122853 20842 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0818 13:44:12.122874 20842 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0818 13:44:12.123164 20842 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0818 13:44:12.123183 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.123193 20842 net.cpp:165] Memory required for data: 1413633500\nI0818 13:44:12.123222 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 13:44:12.123239 20842 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0818 13:44:12.123251 20842 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0818 13:44:12.123271 20842 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.123366 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 13:44:12.123560 20842 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0818 13:44:12.123579 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.123589 20842 net.cpp:165] Memory required for data: 1415681500\nI0818 13:44:12.123608 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0818 13:44:12.123627 20842 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0818 13:44:12.123639 20842 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0818 13:44:12.123654 20842 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.123679 20842 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0818 13:44:12.123695 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.123705 20842 net.cpp:165] Memory required for data: 1417729500\nI0818 13:44:12.123715 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0818 13:44:12.123740 20842 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0818 13:44:12.123754 20842 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0818 13:44:12.123775 20842 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0818 13:44:12.124888 20842 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0818 13:44:12.124909 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.124919 20842 net.cpp:165] Memory required for data: 1419777500\nI0818 13:44:12.124938 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0818 13:44:12.124955 20842 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0818 13:44:12.124966 20842 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0818 13:44:12.124987 20842 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0818 13:44:12.125282 20842 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0818 13:44:12.125304 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.125314 20842 net.cpp:165] Memory required for data: 1421825500\nI0818 13:44:12.125336 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 13:44:12.125352 20842 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0818 13:44:12.125365 20842 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0818 13:44:12.125381 20842 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0818 13:44:12.125470 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 13:44:12.125660 20842 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0818 13:44:12.125689 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.125699 20842 net.cpp:165] Memory required for data: 1423873500\nI0818 13:44:12.125718 20842 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0818 13:44:12.125738 20842 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0818 13:44:12.125751 20842 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0818 13:44:12.125766 20842 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 13:44:12.125780 20842 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0818 13:44:12.125838 20842 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0818 13:44:12.125856 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.125866 20842 net.cpp:165] Memory required for data: 1425921500\nI0818 13:44:12.125877 20842 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0818 13:44:12.125891 20842 net.cpp:100] Creating Layer L3_b8_relu\nI0818 13:44:12.125902 20842 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0818 13:44:12.125916 20842 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0818 13:44:12.125936 20842 net.cpp:150] Setting up L3_b8_relu\nI0818 13:44:12.125952 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.125960 20842 net.cpp:165] Memory required for data: 1427969500\nI0818 13:44:12.125970 20842 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 13:44:12.125991 20842 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 13:44:12.126003 20842 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0818 13:44:12.126024 20842 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 13:44:12.126044 20842 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 13:44:12.126121 20842 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 13:44:12.126139 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.126153 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.126163 20842 net.cpp:165] Memory required for data: 1432065500\nI0818 13:44:12.126173 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0818 13:44:12.126197 20842 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0818 13:44:12.126211 20842 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 13:44:12.126230 20842 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0818 13:44:12.128196 20842 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0818 13:44:12.128218 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.128228 20842 net.cpp:165] Memory required for data: 1434113500\nI0818 13:44:12.128247 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0818 13:44:12.128268 20842 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0818 13:44:12.128281 20842 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0818 13:44:12.128299 20842 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0818 13:44:12.128600 20842 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0818 13:44:12.128620 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.128630 20842 net.cpp:165] Memory required for data: 1436161500\nI0818 13:44:12.128651 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 13:44:12.128669 20842 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0818 13:44:12.128689 20842 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0818 13:44:12.128705 20842 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.128795 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 13:44:12.128984 20842 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0818 13:44:12.129007 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.129017 20842 net.cpp:165] Memory required for data: 1438209500\nI0818 13:44:12.129036 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0818 13:44:12.129052 20842 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0818 13:44:12.129063 20842 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0818 13:44:12.129078 20842 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.129097 20842 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0818 13:44:12.129112 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.129122 20842 net.cpp:165] Memory required for data: 1440257500\nI0818 13:44:12.129132 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0818 13:44:12.129155 20842 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0818 13:44:12.129169 20842 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0818 13:44:12.129187 20842 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0818 13:44:12.130249 20842 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0818 13:44:12.130270 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.130278 20842 net.cpp:165] Memory required for data: 1442305500\nI0818 13:44:12.130296 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0818 13:44:12.130314 20842 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0818 13:44:12.130331 20842 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0818 13:44:12.130348 20842 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0818 13:44:12.130640 20842 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0818 13:44:12.130661 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.130671 20842 net.cpp:165] Memory required for data: 1444353500\nI0818 13:44:12.130708 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 13:44:12.130725 20842 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0818 13:44:12.130738 20842 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0818 13:44:12.130758 20842 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0818 13:44:12.130851 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 13:44:12.131041 20842 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0818 13:44:12.131058 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.131068 20842 net.cpp:165] Memory required for data: 1446401500\nI0818 13:44:12.131088 20842 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0818 13:44:12.131109 20842 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0818 13:44:12.131122 20842 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0818 13:44:12.131135 20842 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 13:44:12.131152 20842 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0818 13:44:12.131211 20842 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0818 13:44:12.131229 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.131238 20842 net.cpp:165] Memory required for data: 1448449500\nI0818 13:44:12.131247 20842 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0818 13:44:12.131263 20842 net.cpp:100] Creating Layer L3_b9_relu\nI0818 13:44:12.131275 20842 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0818 13:44:12.131292 20842 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0818 13:44:12.131311 20842 net.cpp:150] Setting up L3_b9_relu\nI0818 13:44:12.131326 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.131335 20842 net.cpp:165] Memory required for data: 1450497500\nI0818 13:44:12.131346 20842 layer_factory.hpp:77] Creating layer post_pool\nI0818 13:44:12.131363 20842 net.cpp:100] Creating Layer post_pool\nI0818 13:44:12.131374 20842 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0818 13:44:12.131391 20842 net.cpp:408] post_pool -> post_pool\nI0818 13:44:12.131448 20842 net.cpp:150] Setting up post_pool\nI0818 13:44:12.131474 20842 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0818 13:44:12.131484 20842 net.cpp:165] Memory required for data: 1450529500\nI0818 13:44:12.131494 20842 layer_factory.hpp:77] Creating layer post_FC\nI0818 13:44:12.131613 20842 net.cpp:100] Creating Layer post_FC\nI0818 13:44:12.131630 20842 net.cpp:434] post_FC <- post_pool\nI0818 13:44:12.131649 20842 net.cpp:408] post_FC -> post_FC_top\nI0818 13:44:12.131959 20842 net.cpp:150] Setting up post_FC\nI0818 13:44:12.131983 20842 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:12.131994 20842 net.cpp:165] Memory required for data: 1450534500\nI0818 13:44:12.132012 20842 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0818 13:44:12.132028 20842 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0818 13:44:12.132040 20842 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0818 13:44:12.132055 20842 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0818 13:44:12.132076 20842 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0818 13:44:12.132159 20842 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0818 13:44:12.132180 20842 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:12.132192 20842 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:12.132202 20842 net.cpp:165] Memory required for data: 1450544500\nI0818 13:44:12.132213 20842 layer_factory.hpp:77] Creating layer accuracy\nI0818 13:44:12.132280 20842 net.cpp:100] Creating Layer accuracy\nI0818 13:44:12.132297 20842 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0818 13:44:12.132311 20842 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 13:44:12.132333 20842 net.cpp:408] accuracy -> accuracy\nI0818 13:44:12.132405 20842 net.cpp:150] Setting up accuracy\nI0818 13:44:12.132422 20842 net.cpp:157] Top shape: (1)\nI0818 13:44:12.132433 20842 net.cpp:165] Memory required for data: 1450544504\nI0818 13:44:12.132443 20842 layer_factory.hpp:77] Creating layer loss\nI0818 13:44:12.132467 20842 net.cpp:100] Creating Layer loss\nI0818 13:44:12.132480 20842 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0818 13:44:12.132493 20842 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 13:44:12.132509 20842 net.cpp:408] loss -> loss\nI0818 13:44:12.136457 20842 layer_factory.hpp:77] Creating layer loss\nI0818 13:44:12.140416 20842 net.cpp:150] Setting up loss\nI0818 13:44:12.140442 20842 net.cpp:157] Top shape: (1)\nI0818 13:44:12.140453 20842 net.cpp:160]     with loss weight 1\nI0818 13:44:12.140558 20842 net.cpp:165] Memory required for data: 1450544508\nI0818 13:44:12.140571 20842 net.cpp:226] loss needs backward computation.\nI0818 13:44:12.140583 20842 net.cpp:228] accuracy does not need backward computation.\nI0818 13:44:12.140595 20842 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0818 13:44:12.140605 20842 net.cpp:226] post_FC needs backward computation.\nI0818 13:44:12.140614 20842 net.cpp:226] post_pool needs backward computation.\nI0818 13:44:12.140625 20842 net.cpp:226] L3_b9_relu needs backward computation.\nI0818 13:44:12.140635 20842 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0818 13:44:12.140645 20842 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0818 13:44:12.140655 20842 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0818 13:44:12.140666 20842 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0818 13:44:12.140684 20842 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0818 13:44:12.140696 20842 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0818 13:44:12.140705 20842 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0818 13:44:12.140715 20842 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0818 13:44:12.140727 20842 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0818 13:44:12.140736 20842 net.cpp:226] L3_b8_relu needs backward computation.\nI0818 13:44:12.140746 20842 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0818 13:44:12.140758 20842 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0818 13:44:12.140768 20842 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0818 13:44:12.140779 20842 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0818 13:44:12.140789 20842 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0818 13:44:12.140799 20842 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0818 13:44:12.140807 20842 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0818 13:44:12.140818 20842 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0818 13:44:12.140828 20842 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0818 13:44:12.140839 20842 net.cpp:226] L3_b7_relu needs backward computation.\nI0818 13:44:12.140848 20842 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0818 13:44:12.140859 20842 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0818 13:44:12.140869 20842 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0818 13:44:12.140879 20842 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0818 13:44:12.140889 20842 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0818 13:44:12.140898 20842 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0818 13:44:12.140909 20842 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0818 13:44:12.140919 20842 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0818 13:44:12.140929 20842 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0818 13:44:12.140939 20842 net.cpp:226] L3_b6_relu needs backward computation.\nI0818 13:44:12.140949 20842 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0818 13:44:12.140961 20842 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0818 13:44:12.140971 20842 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0818 13:44:12.140981 20842 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0818 13:44:12.140992 20842 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0818 13:44:12.141011 20842 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0818 13:44:12.141021 20842 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0818 13:44:12.141032 20842 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0818 13:44:12.141042 20842 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0818 13:44:12.141053 20842 net.cpp:226] L3_b5_relu needs backward computation.\nI0818 13:44:12.141064 20842 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0818 13:44:12.141074 20842 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0818 13:44:12.141084 20842 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0818 13:44:12.141095 20842 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0818 13:44:12.141106 20842 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0818 13:44:12.141115 20842 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0818 13:44:12.141126 20842 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0818 13:44:12.141136 20842 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0818 13:44:12.141147 20842 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0818 13:44:12.141157 20842 net.cpp:226] L3_b4_relu needs backward computation.\nI0818 13:44:12.141167 20842 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0818 13:44:12.141178 20842 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0818 13:44:12.141188 20842 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0818 13:44:12.141199 20842 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0818 13:44:12.141209 20842 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0818 13:44:12.141219 20842 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0818 13:44:12.141229 20842 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0818 13:44:12.141240 20842 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0818 13:44:12.141252 20842 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0818 13:44:12.141264 20842 net.cpp:226] L3_b3_relu needs backward computation.\nI0818 13:44:12.141274 20842 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0818 13:44:12.141285 20842 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0818 13:44:12.141296 20842 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0818 13:44:12.141309 20842 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0818 13:44:12.141319 20842 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0818 13:44:12.141330 20842 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0818 13:44:12.141340 20842 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0818 13:44:12.141350 20842 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0818 13:44:12.141366 20842 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0818 13:44:12.141379 20842 net.cpp:226] L3_b2_relu needs backward computation.\nI0818 13:44:12.141389 20842 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0818 13:44:12.141402 20842 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0818 13:44:12.141413 20842 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0818 13:44:12.141424 20842 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0818 13:44:12.141435 20842 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0818 13:44:12.141445 20842 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0818 13:44:12.141456 20842 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0818 13:44:12.141468 20842 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0818 13:44:12.141479 20842 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0818 13:44:12.141490 20842 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0818 13:44:12.141504 20842 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0818 13:44:12.141513 20842 net.cpp:226] L3_b1_relu needs backward computation.\nI0818 13:44:12.141535 20842 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0818 13:44:12.141554 20842 net.cpp:226] L3_b1_pool needs backward computation.\nI0818 13:44:12.141566 20842 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0818 13:44:12.141577 20842 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0818 13:44:12.141588 20842 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0818 13:44:12.141598 20842 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0818 13:44:12.141609 20842 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0818 13:44:12.141620 20842 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0818 13:44:12.141631 20842 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0818 13:44:12.141643 20842 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0818 13:44:12.141654 20842 net.cpp:226] L2_b9_relu needs backward computation.\nI0818 13:44:12.141665 20842 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0818 13:44:12.141685 20842 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0818 13:44:12.141696 20842 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0818 13:44:12.141707 20842 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0818 13:44:12.141718 20842 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0818 13:44:12.141729 20842 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0818 13:44:12.141741 20842 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0818 13:44:12.141752 20842 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0818 13:44:12.141762 20842 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0818 13:44:12.141774 20842 net.cpp:226] L2_b8_relu needs backward computation.\nI0818 13:44:12.141785 20842 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0818 13:44:12.141796 20842 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0818 13:44:12.141808 20842 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0818 13:44:12.141820 20842 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0818 13:44:12.141831 20842 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0818 13:44:12.141841 20842 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0818 13:44:12.141852 20842 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0818 13:44:12.141863 20842 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0818 13:44:12.141875 20842 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0818 13:44:12.141887 20842 net.cpp:226] L2_b7_relu needs backward computation.\nI0818 13:44:12.141897 20842 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0818 13:44:12.141909 20842 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0818 13:44:12.141921 20842 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0818 13:44:12.141932 20842 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0818 13:44:12.141948 20842 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0818 13:44:12.141961 20842 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0818 13:44:12.141971 20842 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0818 13:44:12.141983 20842 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0818 13:44:12.141994 20842 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0818 13:44:12.142005 20842 net.cpp:226] L2_b6_relu needs backward computation.\nI0818 13:44:12.142016 20842 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0818 13:44:12.142029 20842 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0818 13:44:12.142040 20842 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0818 13:44:12.142051 20842 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0818 13:44:12.142062 20842 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0818 13:44:12.142072 20842 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0818 13:44:12.142096 20842 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0818 13:44:12.142107 20842 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0818 13:44:12.142119 20842 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0818 13:44:12.142132 20842 net.cpp:226] L2_b5_relu needs backward computation.\nI0818 13:44:12.142143 20842 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0818 13:44:12.142154 20842 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0818 13:44:12.142165 20842 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0818 13:44:12.142177 20842 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0818 13:44:12.142189 20842 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0818 13:44:12.142199 20842 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0818 13:44:12.142210 20842 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0818 13:44:12.142222 20842 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0818 13:44:12.142233 20842 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0818 13:44:12.142246 20842 net.cpp:226] L2_b4_relu needs backward computation.\nI0818 13:44:12.142256 20842 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0818 13:44:12.142268 20842 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0818 13:44:12.142280 20842 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0818 13:44:12.142292 20842 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0818 13:44:12.142303 20842 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0818 13:44:12.142313 20842 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0818 13:44:12.142325 20842 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0818 13:44:12.142338 20842 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0818 13:44:12.142349 20842 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0818 13:44:12.142360 20842 net.cpp:226] L2_b3_relu needs backward computation.\nI0818 13:44:12.142371 20842 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0818 13:44:12.142383 20842 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0818 13:44:12.142395 20842 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0818 13:44:12.142406 20842 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0818 13:44:12.142417 20842 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0818 13:44:12.142428 20842 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0818 13:44:12.142438 20842 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0818 13:44:12.142451 20842 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0818 13:44:12.142462 20842 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0818 13:44:12.142474 20842 net.cpp:226] L2_b2_relu needs backward computation.\nI0818 13:44:12.142485 20842 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0818 13:44:12.142498 20842 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0818 13:44:12.142508 20842 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0818 13:44:12.142520 20842 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0818 13:44:12.142531 20842 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0818 13:44:12.142542 20842 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0818 13:44:12.142554 20842 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0818 13:44:12.142565 20842 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0818 13:44:12.142576 20842 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0818 13:44:12.142587 20842 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0818 13:44:12.142599 20842 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0818 13:44:12.142611 20842 net.cpp:226] L2_b1_relu needs backward computation.\nI0818 13:44:12.142621 20842 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0818 13:44:12.142640 20842 net.cpp:226] L2_b1_pool needs backward computation.\nI0818 13:44:12.142652 20842 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0818 13:44:12.142663 20842 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0818 13:44:12.142683 20842 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0818 13:44:12.142694 20842 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0818 13:44:12.142704 20842 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0818 13:44:12.142716 20842 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0818 13:44:12.142727 20842 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0818 13:44:12.142751 20842 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0818 13:44:12.142765 20842 net.cpp:226] L1_b9_relu needs backward computation.\nI0818 13:44:12.142776 20842 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0818 13:44:12.142787 20842 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0818 13:44:12.142799 20842 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0818 13:44:12.142812 20842 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0818 13:44:12.142822 20842 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0818 13:44:12.142832 20842 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0818 13:44:12.142845 20842 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0818 13:44:12.142856 20842 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0818 13:44:12.142868 20842 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0818 13:44:12.142879 20842 net.cpp:226] L1_b8_relu needs backward computation.\nI0818 13:44:12.142890 20842 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0818 13:44:12.142902 20842 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0818 13:44:12.142913 20842 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0818 13:44:12.142925 20842 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0818 13:44:12.142937 20842 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0818 13:44:12.142948 20842 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0818 13:44:12.142959 20842 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0818 13:44:12.142971 20842 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0818 13:44:12.142982 20842 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0818 13:44:12.142993 20842 net.cpp:226] L1_b7_relu needs backward computation.\nI0818 13:44:12.143005 20842 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0818 13:44:12.143018 20842 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0818 13:44:12.143028 20842 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0818 13:44:12.143040 20842 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0818 13:44:12.143052 20842 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0818 13:44:12.143062 20842 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0818 13:44:12.143074 20842 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0818 13:44:12.143085 20842 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0818 13:44:12.143096 20842 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0818 13:44:12.143107 20842 net.cpp:226] L1_b6_relu needs backward computation.\nI0818 13:44:12.143120 20842 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0818 13:44:12.143131 20842 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0818 13:44:12.143142 20842 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0818 13:44:12.143154 20842 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0818 13:44:12.143165 20842 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0818 13:44:12.143177 20842 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0818 13:44:12.143187 20842 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0818 13:44:12.143206 20842 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0818 13:44:12.143218 20842 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0818 13:44:12.143230 20842 net.cpp:226] L1_b5_relu needs backward computation.\nI0818 13:44:12.143244 20842 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0818 13:44:12.143256 20842 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0818 13:44:12.143270 20842 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0818 13:44:12.143280 20842 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0818 13:44:12.143292 20842 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0818 13:44:12.143302 20842 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0818 13:44:12.143313 20842 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0818 13:44:12.143324 20842 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0818 13:44:12.143337 20842 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0818 13:44:12.143348 20842 net.cpp:226] L1_b4_relu needs backward computation.\nI0818 13:44:12.143359 20842 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0818 13:44:12.143373 20842 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0818 13:44:12.143383 20842 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0818 13:44:12.143394 20842 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0818 13:44:12.143405 20842 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0818 13:44:12.143416 20842 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0818 13:44:12.143427 20842 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0818 13:44:12.143438 20842 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0818 13:44:12.143450 20842 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0818 13:44:12.143463 20842 net.cpp:226] L1_b3_relu needs backward computation.\nI0818 13:44:12.143473 20842 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0818 13:44:12.143486 20842 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0818 13:44:12.143496 20842 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0818 13:44:12.143508 20842 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0818 13:44:12.143519 20842 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0818 13:44:12.143530 20842 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0818 13:44:12.143541 20842 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0818 13:44:12.143553 20842 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0818 13:44:12.143564 20842 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0818 13:44:12.143576 20842 net.cpp:226] L1_b2_relu needs backward computation.\nI0818 13:44:12.143587 20842 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0818 13:44:12.143599 20842 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0818 13:44:12.143610 20842 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0818 13:44:12.143622 20842 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0818 13:44:12.143633 20842 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0818 13:44:12.143643 20842 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0818 13:44:12.143656 20842 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0818 13:44:12.143666 20842 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0818 13:44:12.143687 20842 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0818 13:44:12.143700 20842 net.cpp:226] L1_b1_relu needs backward computation.\nI0818 13:44:12.143712 20842 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0818 13:44:12.143723 20842 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0818 13:44:12.143735 20842 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0818 13:44:12.143746 20842 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0818 13:44:12.143765 20842 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0818 13:44:12.143776 20842 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0818 13:44:12.143788 20842 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0818 13:44:12.143800 20842 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0818 13:44:12.143811 20842 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0818 13:44:12.143822 20842 net.cpp:226] pre_relu needs backward computation.\nI0818 13:44:12.143833 20842 net.cpp:226] pre_scale needs backward computation.\nI0818 13:44:12.143844 20842 net.cpp:226] pre_bn needs backward computation.\nI0818 13:44:12.143856 20842 net.cpp:226] pre_conv needs backward computation.\nI0818 13:44:12.143868 20842 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 13:44:12.143882 20842 net.cpp:228] dataLayer does not need backward computation.\nI0818 13:44:12.143890 20842 net.cpp:270] This network produces output accuracy\nI0818 13:44:12.143903 20842 net.cpp:270] This network produces output loss\nI0818 13:44:12.144299 20842 net.cpp:283] Network initialization done.\nI0818 13:44:12.153946 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:12.153996 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:12.154073 20842 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0818 13:44:12.154479 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0818 13:44:12.154505 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0818 13:44:12.154523 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0818 13:44:12.154542 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0818 13:44:12.154562 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0818 13:44:12.154580 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0818 13:44:12.154598 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0818 13:44:12.154618 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0818 13:44:12.154637 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0818 13:44:12.154655 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0818 13:44:12.154683 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0818 13:44:12.154702 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0818 13:44:12.154722 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0818 13:44:12.154741 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0818 13:44:12.154759 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0818 13:44:12.154778 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0818 13:44:12.154796 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0818 13:44:12.154814 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0818 13:44:12.154834 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0818 13:44:12.154863 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0818 13:44:12.154884 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0818 13:44:12.154903 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0818 13:44:12.154928 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0818 13:44:12.154947 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0818 13:44:12.154968 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0818 13:44:12.154983 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0818 13:44:12.155001 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0818 13:44:12.155019 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0818 13:44:12.155035 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0818 13:44:12.155052 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0818 13:44:12.155071 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0818 13:44:12.155089 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0818 13:44:12.155108 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0818 13:44:12.155124 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0818 13:44:12.155143 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0818 13:44:12.155161 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0818 13:44:12.155180 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0818 13:44:12.155199 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0818 13:44:12.155216 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0818 13:44:12.155232 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0818 13:44:12.155257 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0818 13:44:12.155274 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0818 13:44:12.155292 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0818 13:44:12.155308 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0818 13:44:12.155328 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0818 13:44:12.155345 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0818 13:44:12.155364 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0818 13:44:12.155380 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0818 13:44:12.155397 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0818 13:44:12.155414 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0818 13:44:12.155447 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0818 13:44:12.155464 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0818 13:44:12.155485 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0818 13:44:12.155503 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0818 13:44:12.155521 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0818 13:44:12.155539 20842 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0818 13:44:12.157234 20842 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0818 13:44:12.159008 20842 layer_factory.hpp:77] Creating layer dataLayer\nI0818 13:44:12.159237 20842 net.cpp:100] Creating Layer dataLayer\nI0818 13:44:12.159257 20842 net.cpp:408] dataLayer -> data_top\nI0818 13:44:12.159303 20842 net.cpp:408] dataLayer -> label\nI0818 13:44:12.159332 20842 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 13:44:12.227596 20855 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0818 13:44:12.227846 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:12.235679 20842 net.cpp:150] Setting up dataLayer\nI0818 13:44:12.235707 20842 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0818 13:44:12.235723 20842 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:12.235733 20842 net.cpp:165] Memory required for data: 1536500\nI0818 13:44:12.235746 20842 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 13:44:12.235764 20842 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 13:44:12.235776 20842 net.cpp:434] label_dataLayer_1_split <- label\nI0818 13:44:12.235792 20842 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 13:44:12.235813 20842 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 13:44:12.235940 20842 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 13:44:12.235960 20842 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:12.235972 20842 net.cpp:157] Top shape: 125 (125)\nI0818 13:44:12.235980 20842 net.cpp:165] Memory required for data: 1537500\nI0818 13:44:12.235991 20842 layer_factory.hpp:77] Creating layer pre_conv\nI0818 13:44:12.236019 20842 net.cpp:100] Creating Layer pre_conv\nI0818 13:44:12.236032 20842 net.cpp:434] pre_conv <- data_top\nI0818 13:44:12.236080 20842 net.cpp:408] pre_conv -> pre_conv_top\nI0818 13:44:12.236546 20842 net.cpp:150] Setting up pre_conv\nI0818 13:44:12.236577 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.236587 20842 net.cpp:165] Memory required for data: 9729500\nI0818 13:44:12.236615 20842 layer_factory.hpp:77] Creating layer pre_bn\nI0818 13:44:12.236645 20842 net.cpp:100] Creating Layer pre_bn\nI0818 13:44:12.236659 20842 net.cpp:434] pre_bn <- pre_conv_top\nI0818 13:44:12.236685 20842 net.cpp:408] pre_bn -> pre_bn_top\nI0818 13:44:12.237064 20842 net.cpp:150] Setting up pre_bn\nI0818 13:44:12.237084 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.237098 20842 net.cpp:165] Memory required for data: 17921500\nI0818 13:44:12.237128 20842 layer_factory.hpp:77] Creating layer pre_scale\nI0818 13:44:12.237144 20842 net.cpp:100] Creating Layer pre_scale\nI0818 13:44:12.237155 20842 net.cpp:434] pre_scale <- pre_bn_top\nI0818 13:44:12.237179 20842 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0818 13:44:12.237287 20842 layer_factory.hpp:77] Creating layer pre_scale\nI0818 13:44:12.237506 20842 net.cpp:150] Setting up pre_scale\nI0818 13:44:12.237529 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.237540 20842 net.cpp:165] Memory required for data: 26113500\nI0818 13:44:12.237557 20842 layer_factory.hpp:77] Creating layer pre_relu\nI0818 13:44:12.237576 20842 net.cpp:100] Creating Layer pre_relu\nI0818 13:44:12.237586 20842 net.cpp:434] pre_relu <- pre_bn_top\nI0818 13:44:12.237601 20842 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0818 13:44:12.237619 20842 net.cpp:150] Setting up pre_relu\nI0818 13:44:12.237637 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.237646 20842 net.cpp:165] Memory required for data: 34305500\nI0818 13:44:12.237658 20842 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0818 13:44:12.237685 20842 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0818 13:44:12.237699 20842 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0818 13:44:12.237717 20842 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0818 13:44:12.237741 20842 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0818 13:44:12.237884 20842 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0818 13:44:12.237903 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.237916 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.237926 20842 net.cpp:165] Memory required for data: 50689500\nI0818 13:44:12.237936 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0818 13:44:12.237958 20842 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0818 13:44:12.237970 20842 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0818 13:44:12.237996 20842 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0818 13:44:12.238521 20842 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0818 13:44:12.238545 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.238555 20842 net.cpp:165] Memory required for data: 58881500\nI0818 13:44:12.238581 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0818 13:44:12.238606 20842 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0818 13:44:12.238622 20842 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0818 13:44:12.238639 20842 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0818 13:44:12.238983 20842 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0818 13:44:12.239012 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.239024 20842 net.cpp:165] Memory required for data: 67073500\nI0818 13:44:12.239048 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 13:44:12.239066 20842 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0818 13:44:12.239078 20842 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0818 13:44:12.239094 20842 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.239190 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 13:44:12.239388 20842 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0818 13:44:12.239408 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.239416 20842 net.cpp:165] Memory required for data: 75265500\nI0818 13:44:12.239446 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0818 13:44:12.239466 20842 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0818 13:44:12.239477 20842 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0818 13:44:12.239492 20842 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.239511 20842 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0818 13:44:12.239526 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.239538 20842 net.cpp:165] Memory required for data: 83457500\nI0818 13:44:12.239548 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0818 13:44:12.239575 20842 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0818 13:44:12.239588 20842 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0818 13:44:12.239612 20842 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0818 13:44:12.240015 20842 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0818 13:44:12.240036 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.240044 20842 net.cpp:165] Memory required for data: 91649500\nI0818 13:44:12.240063 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0818 13:44:12.240084 20842 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0818 13:44:12.240095 20842 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0818 13:44:12.240111 20842 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0818 13:44:12.240411 20842 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0818 13:44:12.240430 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.240439 20842 net.cpp:165] Memory required for data: 99841500\nI0818 13:44:12.240470 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 13:44:12.240492 20842 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0818 13:44:12.240504 20842 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0818 13:44:12.240525 20842 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0818 13:44:12.240633 20842 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 13:44:12.241014 20842 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0818 13:44:12.241034 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.241044 20842 net.cpp:165] Memory required for data: 108033500\nI0818 13:44:12.241061 20842 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0818 13:44:12.241081 20842 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0818 13:44:12.241094 20842 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0818 13:44:12.241107 20842 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0818 13:44:12.241123 20842 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0818 13:44:12.241183 20842 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0818 13:44:12.241200 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.241210 20842 net.cpp:165] Memory required for data: 116225500\nI0818 13:44:12.241220 20842 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0818 13:44:12.241235 20842 net.cpp:100] Creating Layer L1_b1_relu\nI0818 13:44:12.241246 20842 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0818 13:44:12.241268 20842 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0818 13:44:12.241288 20842 net.cpp:150] Setting up L1_b1_relu\nI0818 13:44:12.241302 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.241312 20842 net.cpp:165] Memory required for data: 124417500\nI0818 13:44:12.241323 20842 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 13:44:12.241340 20842 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 13:44:12.241351 20842 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0818 13:44:12.241366 20842 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 13:44:12.241390 20842 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 13:44:12.241477 20842 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 13:44:12.241497 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.241521 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.241538 20842 net.cpp:165] Memory required for data: 140801500\nI0818 13:44:12.241547 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0818 13:44:12.241571 20842 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0818 13:44:12.241585 20842 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 13:44:12.241601 20842 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0818 13:44:12.242010 20842 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0818 13:44:12.242030 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.242040 20842 net.cpp:165] Memory required for data: 148993500\nI0818 13:44:12.242058 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0818 13:44:12.242079 20842 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0818 13:44:12.242091 20842 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0818 13:44:12.242108 20842 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0818 13:44:12.242419 20842 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0818 13:44:12.242437 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.242447 20842 net.cpp:165] Memory required for data: 157185500\nI0818 13:44:12.242470 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 13:44:12.242486 20842 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0818 13:44:12.242496 20842 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0818 13:44:12.242511 20842 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.242604 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 13:44:12.242800 20842 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0818 13:44:12.242820 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.242830 20842 net.cpp:165] Memory required for data: 165377500\nI0818 13:44:12.242847 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0818 13:44:12.242862 20842 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0818 13:44:12.242874 20842 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0818 13:44:12.242892 20842 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.242913 20842 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0818 13:44:12.242928 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.242936 20842 net.cpp:165] Memory required for data: 173569500\nI0818 13:44:12.242946 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0818 13:44:12.242970 20842 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0818 13:44:12.242983 20842 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0818 13:44:12.243000 20842 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0818 13:44:12.243568 20842 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0818 13:44:12.243587 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.243597 20842 net.cpp:165] Memory required for data: 181761500\nI0818 13:44:12.243615 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0818 13:44:12.243638 20842 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0818 13:44:12.243649 20842 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0818 13:44:12.243665 20842 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0818 13:44:12.243978 20842 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0818 13:44:12.244000 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.244010 20842 net.cpp:165] Memory required for data: 189953500\nI0818 13:44:12.244041 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 13:44:12.244057 20842 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0818 13:44:12.244069 20842 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0818 13:44:12.244089 20842 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0818 13:44:12.244189 20842 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 13:44:12.244381 20842 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0818 13:44:12.244403 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.244412 20842 net.cpp:165] Memory required for data: 198145500\nI0818 13:44:12.244442 20842 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0818 13:44:12.244460 20842 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0818 13:44:12.244472 20842 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0818 13:44:12.244484 20842 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 13:44:12.244504 20842 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0818 13:44:12.244561 20842 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0818 13:44:12.244582 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.244592 20842 net.cpp:165] Memory required for data: 206337500\nI0818 13:44:12.244603 20842 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0818 13:44:12.244617 20842 net.cpp:100] Creating Layer L1_b2_relu\nI0818 13:44:12.244629 20842 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0818 13:44:12.244643 20842 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0818 13:44:12.244662 20842 net.cpp:150] Setting up L1_b2_relu\nI0818 13:44:12.244683 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.244693 20842 net.cpp:165] Memory required for data: 214529500\nI0818 13:44:12.244704 20842 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 13:44:12.244721 20842 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 13:44:12.244734 20842 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0818 13:44:12.244747 20842 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 13:44:12.244766 20842 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 13:44:12.244848 20842 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 13:44:12.244871 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.244885 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.244895 20842 net.cpp:165] Memory required for data: 230913500\nI0818 13:44:12.244905 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0818 13:44:12.244925 20842 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0818 13:44:12.244937 20842 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 13:44:12.244954 20842 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0818 13:44:12.245354 20842 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0818 13:44:12.245381 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.245390 20842 net.cpp:165] Memory required for data: 239105500\nI0818 13:44:12.245407 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0818 13:44:12.245429 20842 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0818 13:44:12.245441 20842 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0818 13:44:12.245456 20842 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0818 13:44:12.245785 20842 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0818 13:44:12.245805 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.245815 20842 net.cpp:165] Memory required for data: 247297500\nI0818 13:44:12.245836 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 13:44:12.245854 20842 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0818 13:44:12.245867 20842 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0818 13:44:12.245887 20842 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.245990 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 13:44:12.246210 20842 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0818 13:44:12.246235 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.246246 20842 net.cpp:165] Memory required for data: 255489500\nI0818 13:44:12.246268 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0818 13:44:12.246286 20842 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0818 13:44:12.246297 20842 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0818 13:44:12.246315 20842 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.246345 20842 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0818 13:44:12.246358 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.246368 20842 net.cpp:165] Memory required for data: 263681500\nI0818 13:44:12.246382 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0818 13:44:12.246412 20842 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0818 13:44:12.246426 20842 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0818 13:44:12.246450 20842 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0818 13:44:12.246901 20842 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0818 13:44:12.246920 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.246933 20842 net.cpp:165] Memory required for data: 271873500\nI0818 13:44:12.246953 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0818 13:44:12.246985 20842 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0818 13:44:12.246999 20842 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0818 13:44:12.247016 20842 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0818 13:44:12.247372 20842 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0818 13:44:12.247392 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.247402 20842 net.cpp:165] Memory required for data: 280065500\nI0818 13:44:12.247426 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 13:44:12.247443 20842 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0818 13:44:12.247455 20842 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0818 13:44:12.247478 20842 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0818 13:44:12.247582 20842 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 13:44:12.247799 20842 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0818 13:44:12.247822 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.247835 20842 net.cpp:165] Memory required for data: 288257500\nI0818 13:44:12.247856 20842 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0818 13:44:12.247874 20842 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0818 13:44:12.247886 20842 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0818 13:44:12.247901 20842 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 13:44:12.247920 20842 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0818 13:44:12.247980 20842 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0818 13:44:12.248001 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.248013 20842 net.cpp:165] Memory required for data: 296449500\nI0818 13:44:12.248023 20842 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0818 13:44:12.248046 20842 net.cpp:100] Creating Layer L1_b3_relu\nI0818 13:44:12.248060 20842 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0818 13:44:12.248075 20842 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0818 13:44:12.248095 20842 net.cpp:150] Setting up L1_b3_relu\nI0818 13:44:12.248111 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.248119 20842 net.cpp:165] Memory required for data: 304641500\nI0818 13:44:12.248129 20842 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 13:44:12.248148 20842 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 13:44:12.248159 20842 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0818 13:44:12.248175 20842 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 13:44:12.248198 20842 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 13:44:12.248287 20842 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 13:44:12.248311 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.248327 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.248339 20842 net.cpp:165] Memory required for data: 321025500\nI0818 13:44:12.248352 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0818 13:44:12.248373 20842 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0818 13:44:12.248401 20842 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 13:44:12.248423 20842 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0818 13:44:12.248880 20842 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0818 13:44:12.248903 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.248914 20842 net.cpp:165] Memory required for data: 329217500\nI0818 13:44:12.248929 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0818 13:44:12.248951 20842 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0818 13:44:12.248963 20842 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0818 13:44:12.248988 20842 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0818 13:44:12.249344 20842 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0818 13:44:12.249364 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.249373 20842 net.cpp:165] Memory required for data: 337409500\nI0818 13:44:12.249398 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 13:44:12.249415 20842 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0818 13:44:12.249429 20842 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0818 13:44:12.249450 20842 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.249553 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 13:44:12.249774 20842 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0818 13:44:12.249797 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.249811 20842 net.cpp:165] Memory required for data: 345601500\nI0818 13:44:12.249831 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0818 13:44:12.249846 20842 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0818 13:44:12.249860 20842 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0818 13:44:12.249876 20842 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.249894 20842 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0818 13:44:12.249912 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.249922 20842 net.cpp:165] Memory required for data: 353793500\nI0818 13:44:12.249932 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0818 13:44:12.249955 20842 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0818 13:44:12.249967 20842 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0818 13:44:12.249994 20842 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0818 13:44:12.250428 20842 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0818 13:44:12.250447 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.250457 20842 net.cpp:165] Memory required for data: 361985500\nI0818 13:44:12.250478 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0818 13:44:12.250499 20842 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0818 13:44:12.250514 20842 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0818 13:44:12.250537 20842 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0818 13:44:12.250916 20842 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0818 13:44:12.250938 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.250948 20842 net.cpp:165] Memory required for data: 370177500\nI0818 13:44:12.250973 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 13:44:12.250989 20842 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0818 13:44:12.251004 20842 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0818 13:44:12.251025 20842 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0818 13:44:12.251124 20842 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 13:44:12.251338 20842 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0818 13:44:12.251360 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.251370 20842 net.cpp:165] Memory required for data: 378369500\nI0818 13:44:12.251389 20842 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0818 13:44:12.251413 20842 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0818 13:44:12.251426 20842 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0818 13:44:12.251440 20842 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 13:44:12.251467 20842 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0818 13:44:12.251533 20842 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0818 13:44:12.251554 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.251562 20842 net.cpp:165] Memory required for data: 386561500\nI0818 13:44:12.251575 20842 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0818 13:44:12.251590 20842 net.cpp:100] Creating Layer L1_b4_relu\nI0818 13:44:12.251600 20842 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0818 13:44:12.251621 20842 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0818 13:44:12.251643 20842 net.cpp:150] Setting up L1_b4_relu\nI0818 13:44:12.251658 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.251667 20842 net.cpp:165] Memory required for data: 394753500\nI0818 13:44:12.251688 20842 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 13:44:12.251703 20842 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 13:44:12.251718 20842 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0818 13:44:12.251739 20842 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 13:44:12.251758 20842 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 13:44:12.251849 20842 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 13:44:12.251873 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.251886 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.251896 20842 net.cpp:165] Memory required for data: 411137500\nI0818 13:44:12.251906 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0818 13:44:12.251933 20842 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0818 13:44:12.251947 20842 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 13:44:12.251969 20842 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0818 13:44:12.252459 20842 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0818 13:44:12.252478 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.252488 20842 net.cpp:165] Memory required for data: 419329500\nI0818 13:44:12.252529 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0818 13:44:12.252554 20842 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0818 13:44:12.252568 20842 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0818 13:44:12.252584 20842 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0818 13:44:12.252945 20842 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0818 13:44:12.252967 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.252977 20842 net.cpp:165] Memory required for data: 427521500\nI0818 13:44:12.253002 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 13:44:12.253018 20842 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0818 13:44:12.253029 20842 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0818 13:44:12.253049 20842 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.253152 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 13:44:12.253372 20842 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0818 13:44:12.253394 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.253403 20842 net.cpp:165] Memory required for data: 435713500\nI0818 13:44:12.253424 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0818 13:44:12.253440 20842 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0818 13:44:12.253454 20842 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0818 13:44:12.253474 20842 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.253494 20842 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0818 13:44:12.253509 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.253520 20842 net.cpp:165] Memory required for data: 443905500\nI0818 13:44:12.253530 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0818 13:44:12.253566 20842 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0818 13:44:12.253579 20842 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0818 13:44:12.253602 20842 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0818 13:44:12.254051 20842 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0818 13:44:12.254073 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.254083 20842 net.cpp:165] Memory required for data: 452097500\nI0818 13:44:12.254103 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0818 13:44:12.254125 20842 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0818 13:44:12.254140 20842 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0818 13:44:12.254158 20842 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0818 13:44:12.254523 20842 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0818 13:44:12.254549 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.254559 20842 net.cpp:165] Memory required for data: 460289500\nI0818 13:44:12.254580 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 13:44:12.254600 20842 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0818 13:44:12.254611 20842 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0818 13:44:12.254626 20842 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0818 13:44:12.254734 20842 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 13:44:12.254945 20842 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0818 13:44:12.254964 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.254973 20842 net.cpp:165] Memory required for data: 468481500\nI0818 13:44:12.254992 20842 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0818 13:44:12.255012 20842 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0818 13:44:12.255025 20842 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0818 13:44:12.255038 20842 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 13:44:12.255059 20842 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0818 13:44:12.255115 20842 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0818 13:44:12.255136 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.255146 20842 net.cpp:165] Memory required for data: 476673500\nI0818 13:44:12.255154 20842 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0818 13:44:12.255177 20842 net.cpp:100] Creating Layer L1_b5_relu\nI0818 13:44:12.255188 20842 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0818 13:44:12.255203 20842 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0818 13:44:12.255221 20842 net.cpp:150] Setting up L1_b5_relu\nI0818 13:44:12.255235 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.255244 20842 net.cpp:165] Memory required for data: 484865500\nI0818 13:44:12.255254 20842 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 13:44:12.255267 20842 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 13:44:12.255276 20842 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0818 13:44:12.255291 20842 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 13:44:12.255309 20842 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 13:44:12.255394 20842 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 13:44:12.255414 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.255425 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.255435 20842 net.cpp:165] Memory required for data: 501249500\nI0818 13:44:12.255445 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0818 13:44:12.255470 20842 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0818 13:44:12.255481 20842 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 13:44:12.255498 20842 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0818 13:44:12.255899 20842 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0818 13:44:12.255919 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.255936 20842 net.cpp:165] Memory required for data: 509441500\nI0818 13:44:12.255955 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0818 13:44:12.255977 20842 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0818 13:44:12.255990 20842 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0818 13:44:12.256006 20842 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0818 13:44:12.256325 20842 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0818 13:44:12.256343 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.256353 20842 net.cpp:165] Memory required for data: 517633500\nI0818 13:44:12.256374 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 13:44:12.256391 20842 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0818 13:44:12.256402 20842 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0818 13:44:12.256417 20842 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.256513 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 13:44:12.256716 20842 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0818 13:44:12.256734 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.256743 20842 net.cpp:165] Memory required for data: 525825500\nI0818 13:44:12.256762 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0818 13:44:12.256778 20842 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0818 13:44:12.256788 20842 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0818 13:44:12.256806 20842 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.256826 20842 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0818 13:44:12.256840 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.256850 20842 net.cpp:165] Memory required for data: 534017500\nI0818 13:44:12.256860 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0818 13:44:12.256884 20842 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0818 13:44:12.256897 20842 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0818 13:44:12.256916 20842 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0818 13:44:12.257306 20842 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0818 13:44:12.257325 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.257335 20842 net.cpp:165] Memory required for data: 542209500\nI0818 13:44:12.257352 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0818 13:44:12.257370 20842 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0818 13:44:12.257381 20842 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0818 13:44:12.257405 20842 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0818 13:44:12.257725 20842 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0818 13:44:12.257748 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.257760 20842 net.cpp:165] Memory required for data: 550401500\nI0818 13:44:12.257781 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 13:44:12.257797 20842 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0818 13:44:12.257808 20842 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0818 13:44:12.257823 20842 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0818 13:44:12.257912 20842 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 13:44:12.258105 20842 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0818 13:44:12.258124 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.258133 20842 net.cpp:165] Memory required for data: 558593500\nI0818 13:44:12.258152 20842 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0818 13:44:12.258186 20842 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0818 13:44:12.258201 20842 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0818 13:44:12.258214 20842 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 13:44:12.258230 20842 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0818 13:44:12.258291 20842 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0818 13:44:12.258309 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.258319 20842 net.cpp:165] Memory required for data: 566785500\nI0818 13:44:12.258340 20842 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0818 13:44:12.258354 20842 net.cpp:100] Creating Layer L1_b6_relu\nI0818 13:44:12.258366 20842 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0818 13:44:12.258380 20842 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0818 13:44:12.258400 20842 net.cpp:150] Setting up L1_b6_relu\nI0818 13:44:12.258415 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.258424 20842 net.cpp:165] Memory required for data: 574977500\nI0818 13:44:12.258435 20842 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 13:44:12.258447 20842 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 13:44:12.258458 20842 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0818 13:44:12.258477 20842 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 13:44:12.258497 20842 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 13:44:12.258574 20842 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 13:44:12.258594 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.258606 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.258616 20842 net.cpp:165] Memory required for data: 591361500\nI0818 13:44:12.258627 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0818 13:44:12.258651 20842 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0818 13:44:12.258662 20842 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 13:44:12.258688 20842 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0818 13:44:12.259091 20842 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0818 13:44:12.259111 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.259120 20842 net.cpp:165] Memory required for data: 599553500\nI0818 13:44:12.259137 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0818 13:44:12.259155 20842 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0818 13:44:12.259166 20842 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0818 13:44:12.259187 20842 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0818 13:44:12.259506 20842 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0818 13:44:12.259526 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.259536 20842 net.cpp:165] Memory required for data: 607745500\nI0818 13:44:12.259557 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 13:44:12.259573 20842 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0818 13:44:12.259585 20842 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0818 13:44:12.259605 20842 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.259701 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 13:44:12.259902 20842 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0818 13:44:12.259924 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.259934 20842 net.cpp:165] Memory required for data: 615937500\nI0818 13:44:12.259953 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0818 13:44:12.259969 20842 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0818 13:44:12.259980 20842 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0818 13:44:12.259994 20842 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.260012 20842 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0818 13:44:12.260026 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.260036 20842 net.cpp:165] Memory required for data: 624129500\nI0818 13:44:12.260047 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0818 13:44:12.260072 20842 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0818 13:44:12.260087 20842 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0818 13:44:12.260107 20842 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0818 13:44:12.260506 20842 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0818 13:44:12.260526 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.260545 20842 net.cpp:165] Memory required for data: 632321500\nI0818 13:44:12.260563 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0818 13:44:12.260586 20842 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0818 13:44:12.260597 20842 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0818 13:44:12.260617 20842 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0818 13:44:12.260932 20842 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0818 13:44:12.260951 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.260960 20842 net.cpp:165] Memory required for data: 640513500\nI0818 13:44:12.260982 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 13:44:12.260998 20842 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0818 13:44:12.261008 20842 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0818 13:44:12.261023 20842 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0818 13:44:12.261117 20842 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 13:44:12.261312 20842 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0818 13:44:12.261330 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.261339 20842 net.cpp:165] Memory required for data: 648705500\nI0818 13:44:12.261358 20842 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0818 13:44:12.261378 20842 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0818 13:44:12.261389 20842 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0818 13:44:12.261402 20842 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 13:44:12.261418 20842 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0818 13:44:12.261478 20842 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0818 13:44:12.261498 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.261508 20842 net.cpp:165] Memory required for data: 656897500\nI0818 13:44:12.261518 20842 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0818 13:44:12.261533 20842 net.cpp:100] Creating Layer L1_b7_relu\nI0818 13:44:12.261543 20842 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0818 13:44:12.261561 20842 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0818 13:44:12.261580 20842 net.cpp:150] Setting up L1_b7_relu\nI0818 13:44:12.261595 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.261605 20842 net.cpp:165] Memory required for data: 665089500\nI0818 13:44:12.261615 20842 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 13:44:12.261627 20842 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 13:44:12.261638 20842 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0818 13:44:12.261658 20842 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 13:44:12.261685 20842 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 13:44:12.261770 20842 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 13:44:12.261787 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.261801 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.261809 20842 net.cpp:165] Memory required for data: 681473500\nI0818 13:44:12.261819 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0818 13:44:12.261844 20842 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0818 13:44:12.261858 20842 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 13:44:12.261876 20842 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0818 13:44:12.262284 20842 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0818 13:44:12.262305 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.262312 20842 net.cpp:165] Memory required for data: 689665500\nI0818 13:44:12.262331 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0818 13:44:12.262352 20842 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0818 13:44:12.262364 20842 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0818 13:44:12.262393 20842 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0818 13:44:12.262742 20842 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0818 13:44:12.262763 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.262773 20842 net.cpp:165] Memory required for data: 697857500\nI0818 13:44:12.262794 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 13:44:12.262809 20842 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0818 13:44:12.262820 20842 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0818 13:44:12.262835 20842 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.262928 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 13:44:12.263123 20842 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0818 13:44:12.263141 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.263151 20842 net.cpp:165] Memory required for data: 706049500\nI0818 13:44:12.263169 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0818 13:44:12.263188 20842 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0818 13:44:12.263200 20842 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0818 13:44:12.263214 20842 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.263233 20842 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0818 13:44:12.263248 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.263258 20842 net.cpp:165] Memory required for data: 714241500\nI0818 13:44:12.263268 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0818 13:44:12.263291 20842 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0818 13:44:12.263304 20842 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0818 13:44:12.263325 20842 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0818 13:44:12.263736 20842 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0818 13:44:12.263756 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.263766 20842 net.cpp:165] Memory required for data: 722433500\nI0818 13:44:12.263783 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0818 13:44:12.263805 20842 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0818 13:44:12.263818 20842 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0818 13:44:12.263839 20842 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0818 13:44:12.264142 20842 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0818 13:44:12.264160 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.264170 20842 net.cpp:165] Memory required for data: 730625500\nI0818 13:44:12.264191 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 13:44:12.264207 20842 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0818 13:44:12.264219 20842 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0818 13:44:12.264235 20842 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0818 13:44:12.264330 20842 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 13:44:12.264524 20842 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0818 13:44:12.264544 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.264554 20842 net.cpp:165] Memory required for data: 738817500\nI0818 13:44:12.264571 20842 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0818 13:44:12.264587 20842 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0818 13:44:12.264605 20842 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0818 13:44:12.264617 20842 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 13:44:12.264633 20842 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0818 13:44:12.264700 20842 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0818 13:44:12.264719 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.264729 20842 net.cpp:165] Memory required for data: 747009500\nI0818 13:44:12.264739 20842 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0818 13:44:12.264752 20842 net.cpp:100] Creating Layer L1_b8_relu\nI0818 13:44:12.264763 20842 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0818 13:44:12.264781 20842 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0818 13:44:12.264808 20842 net.cpp:150] Setting up L1_b8_relu\nI0818 13:44:12.264824 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.264834 20842 net.cpp:165] Memory required for data: 755201500\nI0818 13:44:12.264843 20842 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 13:44:12.264858 20842 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 13:44:12.264869 20842 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0818 13:44:12.264889 20842 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 13:44:12.264909 20842 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 13:44:12.264987 20842 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 13:44:12.265005 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.265018 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.265028 20842 net.cpp:165] Memory required for data: 771585500\nI0818 13:44:12.265038 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0818 13:44:12.265063 20842 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0818 13:44:12.265075 20842 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 13:44:12.265094 20842 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0818 13:44:12.265504 20842 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0818 13:44:12.265528 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.265538 20842 net.cpp:165] Memory required for data: 779777500\nI0818 13:44:12.265558 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0818 13:44:12.265573 20842 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0818 13:44:12.265584 20842 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0818 13:44:12.265606 20842 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0818 13:44:12.265930 20842 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0818 13:44:12.265951 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.265960 20842 net.cpp:165] Memory required for data: 787969500\nI0818 13:44:12.265982 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 13:44:12.266003 20842 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0818 13:44:12.266016 20842 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0818 13:44:12.266031 20842 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.266121 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 13:44:12.266320 20842 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0818 13:44:12.266340 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.266348 20842 net.cpp:165] Memory required for data: 796161500\nI0818 13:44:12.266366 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0818 13:44:12.266381 20842 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0818 13:44:12.266392 20842 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0818 13:44:12.266414 20842 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.266434 20842 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0818 13:44:12.266449 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.266458 20842 net.cpp:165] Memory required for data: 804353500\nI0818 13:44:12.266469 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0818 13:44:12.266492 20842 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0818 13:44:12.266505 20842 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0818 13:44:12.266523 20842 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0818 13:44:12.266930 20842 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0818 13:44:12.266950 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.266959 20842 net.cpp:165] Memory required for data: 812545500\nI0818 13:44:12.266978 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0818 13:44:12.266999 20842 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0818 13:44:12.267012 20842 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0818 13:44:12.267037 20842 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0818 13:44:12.267355 20842 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0818 13:44:12.267375 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.267385 20842 net.cpp:165] Memory required for data: 820737500\nI0818 13:44:12.267441 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 13:44:12.267462 20842 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0818 13:44:12.267477 20842 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0818 13:44:12.267495 20842 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0818 13:44:12.267586 20842 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 13:44:12.267791 20842 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0818 13:44:12.267812 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.267820 20842 net.cpp:165] Memory required for data: 828929500\nI0818 13:44:12.267838 20842 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0818 13:44:12.267853 20842 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0818 13:44:12.267865 20842 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0818 13:44:12.267879 20842 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 13:44:12.267894 20842 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0818 13:44:12.267954 20842 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0818 13:44:12.267973 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.267983 20842 net.cpp:165] Memory required for data: 837121500\nI0818 13:44:12.267993 20842 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0818 13:44:12.268007 20842 net.cpp:100] Creating Layer L1_b9_relu\nI0818 13:44:12.268018 20842 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0818 13:44:12.268036 20842 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0818 13:44:12.268055 20842 net.cpp:150] Setting up L1_b9_relu\nI0818 13:44:12.268069 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.268079 20842 net.cpp:165] Memory required for data: 845313500\nI0818 13:44:12.268090 20842 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 13:44:12.268102 20842 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 13:44:12.268113 20842 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0818 13:44:12.268132 20842 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 13:44:12.268153 20842 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 13:44:12.268239 20842 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 13:44:12.268256 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.268270 20842 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 13:44:12.268278 20842 net.cpp:165] Memory required for data: 861697500\nI0818 13:44:12.268287 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0818 13:44:12.268311 20842 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0818 13:44:12.268326 20842 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 13:44:12.268343 20842 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0818 13:44:12.268764 20842 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0818 13:44:12.268784 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.268795 20842 net.cpp:165] Memory required for data: 863745500\nI0818 13:44:12.268811 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0818 13:44:12.268833 20842 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0818 13:44:12.268846 20842 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0818 13:44:12.268862 20842 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0818 13:44:12.269165 20842 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0818 13:44:12.269183 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.269193 20842 net.cpp:165] Memory required for data: 865793500\nI0818 13:44:12.269214 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 13:44:12.269239 20842 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0818 13:44:12.269251 20842 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0818 13:44:12.269273 20842 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.269371 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 13:44:12.269568 20842 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0818 13:44:12.269587 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.269598 20842 net.cpp:165] Memory required for data: 867841500\nI0818 13:44:12.269615 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0818 13:44:12.269630 20842 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0818 13:44:12.269642 20842 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0818 13:44:12.269661 20842 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.269690 20842 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0818 13:44:12.269704 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.269714 20842 net.cpp:165] Memory required for data: 869889500\nI0818 13:44:12.269724 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0818 13:44:12.269748 20842 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0818 13:44:12.269762 20842 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0818 13:44:12.269779 20842 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0818 13:44:12.270174 20842 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0818 13:44:12.270195 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.270203 20842 net.cpp:165] Memory required for data: 871937500\nI0818 13:44:12.270221 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0818 13:44:12.270242 20842 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0818 13:44:12.270254 20842 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0818 13:44:12.270275 20842 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0818 13:44:12.270572 20842 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0818 13:44:12.270591 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.270601 20842 net.cpp:165] Memory required for data: 873985500\nI0818 13:44:12.270623 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 13:44:12.270639 20842 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0818 13:44:12.270651 20842 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0818 13:44:12.270666 20842 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0818 13:44:12.270769 20842 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 13:44:12.270961 20842 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0818 13:44:12.270982 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.270992 20842 net.cpp:165] Memory required for data: 876033500\nI0818 13:44:12.271011 20842 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0818 13:44:12.271028 20842 net.cpp:100] Creating Layer L2_b1_pool\nI0818 13:44:12.271039 20842 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 13:44:12.271055 20842 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0818 13:44:12.271111 20842 net.cpp:150] Setting up L2_b1_pool\nI0818 13:44:12.271131 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.271140 20842 net.cpp:165] Memory required for data: 878081500\nI0818 13:44:12.271152 20842 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0818 13:44:12.271167 20842 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0818 13:44:12.271178 20842 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0818 13:44:12.271190 20842 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0818 13:44:12.271212 20842 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0818 13:44:12.271270 20842 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0818 13:44:12.271287 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.271297 20842 net.cpp:165] Memory required for data: 880129500\nI0818 13:44:12.271306 20842 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0818 13:44:12.271327 20842 net.cpp:100] Creating Layer L2_b1_relu\nI0818 13:44:12.271347 20842 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0818 13:44:12.271363 20842 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0818 13:44:12.271383 20842 net.cpp:150] Setting up L2_b1_relu\nI0818 13:44:12.271399 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.271409 20842 net.cpp:165] Memory required for data: 882177500\nI0818 13:44:12.271419 20842 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0818 13:44:12.271436 20842 net.cpp:100] Creating Layer L2_b1_zeros\nI0818 13:44:12.271457 20842 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0818 13:44:12.273797 20842 net.cpp:150] Setting up L2_b1_zeros\nI0818 13:44:12.273820 20842 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 13:44:12.273830 20842 net.cpp:165] Memory required for data: 884225500\nI0818 13:44:12.273840 20842 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0818 13:44:12.273855 20842 net.cpp:100] Creating Layer L2_b1_concat0\nI0818 13:44:12.273867 20842 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0818 13:44:12.273881 20842 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0818 13:44:12.273901 20842 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0818 13:44:12.273963 20842 net.cpp:150] Setting up L2_b1_concat0\nI0818 13:44:12.273982 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.273990 20842 net.cpp:165] Memory required for data: 888321500\nI0818 13:44:12.274000 20842 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 13:44:12.274019 20842 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 13:44:12.274030 20842 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0818 13:44:12.274045 20842 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 13:44:12.274067 20842 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 13:44:12.274152 20842 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0818 13:44:12.274169 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.274183 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.274190 20842 net.cpp:165] Memory required for data: 896513500\nI0818 13:44:12.274200 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0818 13:44:12.274225 20842 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0818 13:44:12.274237 20842 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 13:44:12.274260 20842 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0818 13:44:12.274808 20842 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0818 13:44:12.274827 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.274837 20842 net.cpp:165] Memory required for data: 900609500\nI0818 13:44:12.274854 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0818 13:44:12.274870 20842 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0818 13:44:12.274883 20842 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0818 13:44:12.274902 20842 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0818 13:44:12.275224 20842 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0818 13:44:12.275243 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.275252 20842 net.cpp:165] Memory required for data: 904705500\nI0818 13:44:12.275272 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 13:44:12.275292 20842 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0818 13:44:12.275305 20842 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0818 13:44:12.275321 20842 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.275418 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 13:44:12.275621 20842 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0818 13:44:12.275640 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.275650 20842 net.cpp:165] Memory required for data: 908801500\nI0818 13:44:12.275667 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0818 13:44:12.275696 20842 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0818 13:44:12.275715 20842 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0818 13:44:12.275730 20842 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.275750 20842 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0818 13:44:12.275765 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.275774 20842 net.cpp:165] Memory required for data: 912897500\nI0818 13:44:12.275784 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0818 13:44:12.275810 20842 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0818 13:44:12.275822 20842 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0818 13:44:12.275873 20842 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0818 13:44:12.276418 20842 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0818 13:44:12.276438 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.276448 20842 net.cpp:165] Memory required for data: 916993500\nI0818 13:44:12.276465 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0818 13:44:12.276481 20842 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0818 13:44:12.276494 20842 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0818 13:44:12.276513 20842 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0818 13:44:12.276829 20842 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0818 13:44:12.276847 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.276856 20842 net.cpp:165] Memory required for data: 921089500\nI0818 13:44:12.276877 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 13:44:12.276897 20842 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0818 13:44:12.276909 20842 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0818 13:44:12.276924 20842 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0818 13:44:12.277019 20842 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 13:44:12.277215 20842 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0818 13:44:12.277235 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.277243 20842 net.cpp:165] Memory required for data: 925185500\nI0818 13:44:12.277261 20842 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0818 13:44:12.277281 20842 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0818 13:44:12.277293 20842 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0818 13:44:12.277307 20842 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 13:44:12.277321 20842 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0818 13:44:12.277369 20842 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0818 13:44:12.277395 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.277405 20842 net.cpp:165] Memory required for data: 929281500\nI0818 13:44:12.277415 20842 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0818 13:44:12.277429 20842 net.cpp:100] Creating Layer L2_b2_relu\nI0818 13:44:12.277441 20842 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0818 13:44:12.277453 20842 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0818 13:44:12.277472 20842 net.cpp:150] Setting up L2_b2_relu\nI0818 13:44:12.277487 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.277494 20842 net.cpp:165] Memory required for data: 933377500\nI0818 13:44:12.277503 20842 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 13:44:12.277521 20842 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 13:44:12.277534 20842 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0818 13:44:12.277549 20842 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 13:44:12.277567 20842 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 13:44:12.277653 20842 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 13:44:12.277678 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.277693 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.277703 20842 net.cpp:165] Memory required for data: 941569500\nI0818 13:44:12.277721 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0818 13:44:12.277742 20842 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0818 13:44:12.277755 20842 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 13:44:12.277778 20842 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0818 13:44:12.278316 20842 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0818 13:44:12.278337 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.278344 20842 net.cpp:165] Memory required for data: 945665500\nI0818 13:44:12.278362 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0818 13:44:12.278378 20842 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0818 13:44:12.278390 20842 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0818 13:44:12.278410 20842 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0818 13:44:12.278760 20842 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0818 13:44:12.278781 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.278790 20842 net.cpp:165] Memory required for data: 949761500\nI0818 13:44:12.278811 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 13:44:12.278832 20842 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0818 13:44:12.278844 20842 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0818 13:44:12.278858 20842 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.278954 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 13:44:12.279151 20842 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0818 13:44:12.279170 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.279178 20842 net.cpp:165] Memory required for data: 953857500\nI0818 13:44:12.279196 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0818 13:44:12.279217 20842 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0818 13:44:12.279229 20842 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0818 13:44:12.279243 20842 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.279263 20842 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0818 13:44:12.279276 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.279284 20842 net.cpp:165] Memory required for data: 957953500\nI0818 13:44:12.279294 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0818 13:44:12.279317 20842 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0818 13:44:12.279330 20842 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0818 13:44:12.279353 20842 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0818 13:44:12.279892 20842 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0818 13:44:12.279912 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.279922 20842 net.cpp:165] Memory required for data: 962049500\nI0818 13:44:12.279938 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0818 13:44:12.279955 20842 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0818 13:44:12.279968 20842 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0818 13:44:12.279983 20842 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0818 13:44:12.280300 20842 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0818 13:44:12.280319 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.280328 20842 net.cpp:165] Memory required for data: 966145500\nI0818 13:44:12.280349 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 13:44:12.280364 20842 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0818 13:44:12.280375 20842 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0818 13:44:12.280395 20842 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0818 13:44:12.280495 20842 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 13:44:12.280699 20842 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0818 13:44:12.280719 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.280728 20842 net.cpp:165] Memory required for data: 970241500\nI0818 13:44:12.280746 20842 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0818 13:44:12.280763 20842 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0818 13:44:12.280784 20842 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0818 13:44:12.280797 20842 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 13:44:12.280818 20842 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0818 13:44:12.280867 20842 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0818 13:44:12.280892 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.280903 20842 net.cpp:165] Memory required for data: 974337500\nI0818 13:44:12.280913 20842 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0818 13:44:12.280944 20842 net.cpp:100] Creating Layer L2_b3_relu\nI0818 13:44:12.280957 20842 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0818 13:44:12.280972 20842 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0818 13:44:12.280990 20842 net.cpp:150] Setting up L2_b3_relu\nI0818 13:44:12.281004 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.281013 20842 net.cpp:165] Memory required for data: 978433500\nI0818 13:44:12.281024 20842 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 13:44:12.281038 20842 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 13:44:12.281049 20842 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0818 13:44:12.281069 20842 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 13:44:12.281088 20842 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 13:44:12.281172 20842 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 13:44:12.281193 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.281206 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.281215 20842 net.cpp:165] Memory required for data: 986625500\nI0818 13:44:12.281226 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0818 13:44:12.281251 20842 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0818 13:44:12.281265 20842 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 13:44:12.281287 20842 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0818 13:44:12.281836 20842 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0818 13:44:12.281855 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.281865 20842 net.cpp:165] Memory required for data: 990721500\nI0818 13:44:12.281883 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0818 13:44:12.281908 20842 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0818 13:44:12.281920 20842 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0818 13:44:12.281937 20842 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0818 13:44:12.282239 20842 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0818 13:44:12.282259 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.282269 20842 net.cpp:165] Memory required for data: 994817500\nI0818 13:44:12.282290 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 13:44:12.282306 20842 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0818 13:44:12.282317 20842 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0818 13:44:12.282337 20842 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.282433 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 13:44:12.282627 20842 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0818 13:44:12.282646 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.282655 20842 net.cpp:165] Memory required for data: 998913500\nI0818 13:44:12.282680 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0818 13:44:12.282697 20842 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0818 13:44:12.282709 20842 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0818 13:44:12.282724 20842 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.282743 20842 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0818 13:44:12.282758 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.282768 20842 net.cpp:165] Memory required for data: 1003009500\nI0818 13:44:12.282789 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0818 13:44:12.282817 20842 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0818 13:44:12.282831 20842 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0818 13:44:12.282855 20842 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0818 13:44:12.283396 20842 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0818 13:44:12.283416 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.283426 20842 net.cpp:165] Memory required for data: 1007105500\nI0818 13:44:12.283443 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0818 13:44:12.283465 20842 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0818 13:44:12.283478 20842 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0818 13:44:12.283499 20842 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0818 13:44:12.283814 20842 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0818 13:44:12.283834 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.283844 20842 net.cpp:165] Memory required for data: 1011201500\nI0818 13:44:12.283865 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 13:44:12.283881 20842 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0818 13:44:12.283893 20842 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0818 13:44:12.283907 20842 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0818 13:44:12.284003 20842 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 13:44:12.284201 20842 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0818 13:44:12.284224 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.284234 20842 net.cpp:165] Memory required for data: 1015297500\nI0818 13:44:12.284252 20842 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0818 13:44:12.284270 20842 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0818 13:44:12.284281 20842 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0818 13:44:12.284293 20842 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 13:44:12.284310 20842 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0818 13:44:12.284363 20842 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0818 13:44:12.284380 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.284390 20842 net.cpp:165] Memory required for data: 1019393500\nI0818 13:44:12.284400 20842 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0818 13:44:12.284415 20842 net.cpp:100] Creating Layer L2_b4_relu\nI0818 13:44:12.284426 20842 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0818 13:44:12.284446 20842 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0818 13:44:12.284466 20842 net.cpp:150] Setting up L2_b4_relu\nI0818 13:44:12.284481 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.284489 20842 net.cpp:165] Memory required for data: 1023489500\nI0818 13:44:12.284499 20842 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 13:44:12.284513 20842 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 13:44:12.284525 20842 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0818 13:44:12.284544 20842 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 13:44:12.284565 20842 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 13:44:12.284651 20842 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 13:44:12.284677 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.284693 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.284703 20842 net.cpp:165] Memory required for data: 1031681500\nI0818 13:44:12.284713 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0818 13:44:12.284741 20842 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0818 13:44:12.284755 20842 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 13:44:12.284773 20842 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0818 13:44:12.285331 20842 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0818 13:44:12.285351 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.285360 20842 net.cpp:165] Memory required for data: 1035777500\nI0818 13:44:12.285379 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0818 13:44:12.285400 20842 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0818 13:44:12.285413 20842 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0818 13:44:12.285429 20842 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0818 13:44:12.285742 20842 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0818 13:44:12.285761 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.285771 20842 net.cpp:165] Memory required for data: 1039873500\nI0818 13:44:12.285794 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 13:44:12.285810 20842 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0818 13:44:12.285822 20842 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0818 13:44:12.285837 20842 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.285931 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 13:44:12.286124 20842 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0818 13:44:12.286147 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.286157 20842 net.cpp:165] Memory required for data: 1043969500\nI0818 13:44:12.286175 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0818 13:44:12.286191 20842 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0818 13:44:12.286202 20842 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0818 13:44:12.286216 20842 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.286236 20842 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0818 13:44:12.286250 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.286260 20842 net.cpp:165] Memory required for data: 1048065500\nI0818 13:44:12.286270 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0818 13:44:12.286294 20842 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0818 13:44:12.286308 20842 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0818 13:44:12.286329 20842 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0818 13:44:12.286875 20842 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0818 13:44:12.286895 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.286905 20842 net.cpp:165] Memory required for data: 1052161500\nI0818 13:44:12.286923 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0818 13:44:12.286945 20842 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0818 13:44:12.286957 20842 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0818 13:44:12.286978 20842 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0818 13:44:12.287286 20842 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0818 13:44:12.287304 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.287314 20842 net.cpp:165] Memory required for data: 1056257500\nI0818 13:44:12.287335 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 13:44:12.287353 20842 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0818 13:44:12.287364 20842 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0818 13:44:12.287379 20842 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0818 13:44:12.287474 20842 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 13:44:12.287665 20842 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0818 13:44:12.287690 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.287700 20842 net.cpp:165] Memory required for data: 1060353500\nI0818 13:44:12.287719 20842 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0818 13:44:12.287741 20842 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0818 13:44:12.287756 20842 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0818 13:44:12.287770 20842 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 13:44:12.287786 20842 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0818 13:44:12.287833 20842 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0818 13:44:12.287859 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.287869 20842 net.cpp:165] Memory required for data: 1064449500\nI0818 13:44:12.287880 20842 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0818 13:44:12.287899 20842 net.cpp:100] Creating Layer L2_b5_relu\nI0818 13:44:12.287911 20842 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0818 13:44:12.287925 20842 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0818 13:44:12.287943 20842 net.cpp:150] Setting up L2_b5_relu\nI0818 13:44:12.287957 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.287966 20842 net.cpp:165] Memory required for data: 1068545500\nI0818 13:44:12.287976 20842 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 13:44:12.287989 20842 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 13:44:12.288000 20842 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0818 13:44:12.288014 20842 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 13:44:12.288033 20842 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 13:44:12.288120 20842 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 13:44:12.288138 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.288151 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.288161 20842 net.cpp:165] Memory required for data: 1076737500\nI0818 13:44:12.288172 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0818 13:44:12.288195 20842 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0818 13:44:12.288208 20842 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 13:44:12.288226 20842 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0818 13:44:12.288769 20842 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0818 13:44:12.288789 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.288797 20842 net.cpp:165] Memory required for data: 1080833500\nI0818 13:44:12.288815 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0818 13:44:12.288836 20842 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0818 13:44:12.288849 20842 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0818 13:44:12.288868 20842 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0818 13:44:12.289172 20842 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0818 13:44:12.289191 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.289201 20842 net.cpp:165] Memory required for data: 1084929500\nI0818 13:44:12.289222 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 13:44:12.289237 20842 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0818 13:44:12.289249 20842 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0818 13:44:12.289263 20842 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.289361 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 13:44:12.289563 20842 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0818 13:44:12.289587 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.289597 20842 net.cpp:165] Memory required for data: 1089025500\nI0818 13:44:12.289615 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0818 13:44:12.289630 20842 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0818 13:44:12.289641 20842 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0818 13:44:12.289655 20842 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.289680 20842 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0818 13:44:12.289695 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.289705 20842 net.cpp:165] Memory required for data: 1093121500\nI0818 13:44:12.289714 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0818 13:44:12.289739 20842 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0818 13:44:12.289752 20842 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0818 13:44:12.289774 20842 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0818 13:44:12.290334 20842 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0818 13:44:12.290355 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.290364 20842 net.cpp:165] Memory required for data: 1097217500\nI0818 13:44:12.290382 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0818 13:44:12.290403 20842 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0818 13:44:12.290416 20842 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0818 13:44:12.290437 20842 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0818 13:44:12.290757 20842 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0818 13:44:12.290776 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.290784 20842 net.cpp:165] Memory required for data: 1101313500\nI0818 13:44:12.290807 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 13:44:12.290822 20842 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0818 13:44:12.290834 20842 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0818 13:44:12.290848 20842 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0818 13:44:12.290946 20842 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 13:44:12.291137 20842 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0818 13:44:12.291157 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.291165 20842 net.cpp:165] Memory required for data: 1105409500\nI0818 13:44:12.291184 20842 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0818 13:44:12.291204 20842 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0818 13:44:12.291218 20842 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0818 13:44:12.291230 20842 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 13:44:12.291247 20842 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0818 13:44:12.291296 20842 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0818 13:44:12.291312 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.291322 20842 net.cpp:165] Memory required for data: 1109505500\nI0818 13:44:12.291333 20842 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0818 13:44:12.291352 20842 net.cpp:100] Creating Layer L2_b6_relu\nI0818 13:44:12.291364 20842 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0818 13:44:12.291379 20842 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0818 13:44:12.291398 20842 net.cpp:150] Setting up L2_b6_relu\nI0818 13:44:12.291414 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.291422 20842 net.cpp:165] Memory required for data: 1113601500\nI0818 13:44:12.291431 20842 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 13:44:12.291445 20842 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 13:44:12.291455 20842 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0818 13:44:12.291471 20842 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 13:44:12.291491 20842 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 13:44:12.291575 20842 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 13:44:12.291594 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.291606 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.291616 20842 net.cpp:165] Memory required for data: 1121793500\nI0818 13:44:12.291626 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0818 13:44:12.291651 20842 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0818 13:44:12.291666 20842 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 13:44:12.291692 20842 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0818 13:44:12.293233 20842 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0818 13:44:12.293256 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.293265 20842 net.cpp:165] Memory required for data: 1125889500\nI0818 13:44:12.293283 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0818 13:44:12.293311 20842 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0818 13:44:12.293324 20842 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0818 13:44:12.293344 20842 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0818 13:44:12.293658 20842 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0818 13:44:12.293682 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.293694 20842 net.cpp:165] Memory required for data: 1129985500\nI0818 13:44:12.293715 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 13:44:12.293736 20842 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0818 13:44:12.293750 20842 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0818 13:44:12.293766 20842 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.293864 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 13:44:12.294059 20842 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0818 13:44:12.294077 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.294087 20842 net.cpp:165] Memory required for data: 1134081500\nI0818 13:44:12.294106 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0818 13:44:12.294126 20842 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0818 13:44:12.294138 20842 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0818 13:44:12.294153 20842 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.294173 20842 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0818 13:44:12.294188 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.294196 20842 net.cpp:165] Memory required for data: 1138177500\nI0818 13:44:12.294206 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0818 13:44:12.294231 20842 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0818 13:44:12.294245 20842 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0818 13:44:12.294265 20842 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0818 13:44:12.294803 20842 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0818 13:44:12.294823 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.294832 20842 net.cpp:165] Memory required for data: 1142273500\nI0818 13:44:12.294850 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0818 13:44:12.294868 20842 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0818 13:44:12.294880 20842 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0818 13:44:12.294901 20842 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0818 13:44:12.295205 20842 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0818 13:44:12.295224 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.295233 20842 net.cpp:165] Memory required for data: 1146369500\nI0818 13:44:12.295254 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 13:44:12.295276 20842 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0818 13:44:12.295289 20842 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0818 13:44:12.295305 20842 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0818 13:44:12.295392 20842 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 13:44:12.295588 20842 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0818 13:44:12.295608 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.295616 20842 net.cpp:165] Memory required for data: 1150465500\nI0818 13:44:12.295635 20842 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0818 13:44:12.295657 20842 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0818 13:44:12.295670 20842 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0818 13:44:12.295692 20842 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 13:44:12.295709 20842 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0818 13:44:12.295763 20842 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0818 13:44:12.295780 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.295789 20842 net.cpp:165] Memory required for data: 1154561500\nI0818 13:44:12.295799 20842 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0818 13:44:12.295814 20842 net.cpp:100] Creating Layer L2_b7_relu\nI0818 13:44:12.295835 20842 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0818 13:44:12.295850 20842 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0818 13:44:12.295871 20842 net.cpp:150] Setting up L2_b7_relu\nI0818 13:44:12.295886 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.295897 20842 net.cpp:165] Memory required for data: 1158657500\nI0818 13:44:12.295905 20842 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 13:44:12.295924 20842 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 13:44:12.295936 20842 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0818 13:44:12.295951 20842 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 13:44:12.295970 20842 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 13:44:12.296056 20842 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 13:44:12.296074 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.296087 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.296097 20842 net.cpp:165] Memory required for data: 1166849500\nI0818 13:44:12.296108 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0818 13:44:12.296128 20842 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0818 13:44:12.296140 20842 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 13:44:12.296164 20842 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0818 13:44:12.296707 20842 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0818 13:44:12.296727 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.296736 20842 net.cpp:165] Memory required for data: 1170945500\nI0818 13:44:12.296756 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0818 13:44:12.296772 20842 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0818 13:44:12.296784 20842 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0818 13:44:12.296805 20842 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0818 13:44:12.297112 20842 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0818 13:44:12.297130 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.297140 20842 net.cpp:165] Memory required for data: 1175041500\nI0818 13:44:12.297161 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 13:44:12.297183 20842 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0818 13:44:12.297194 20842 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0818 13:44:12.297210 20842 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.297302 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 13:44:12.297493 20842 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0818 13:44:12.297513 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.297521 20842 net.cpp:165] Memory required for data: 1179137500\nI0818 13:44:12.297539 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0818 13:44:12.297560 20842 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0818 13:44:12.297572 20842 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0818 13:44:12.297587 20842 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.297606 20842 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0818 13:44:12.297621 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.297629 20842 net.cpp:165] Memory required for data: 1183233500\nI0818 13:44:12.297641 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0818 13:44:12.297665 20842 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0818 13:44:12.297685 20842 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0818 13:44:12.297706 20842 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0818 13:44:12.298228 20842 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0818 13:44:12.298249 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.298259 20842 net.cpp:165] Memory required for data: 1187329500\nI0818 13:44:12.298275 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0818 13:44:12.298301 20842 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0818 13:44:12.298315 20842 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0818 13:44:12.298331 20842 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0818 13:44:12.298651 20842 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0818 13:44:12.298671 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.298687 20842 net.cpp:165] Memory required for data: 1191425500\nI0818 13:44:12.298710 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 13:44:12.298727 20842 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0818 13:44:12.298739 20842 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0818 13:44:12.298758 20842 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0818 13:44:12.298857 20842 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 13:44:12.299057 20842 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0818 13:44:12.299075 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.299085 20842 net.cpp:165] Memory required for data: 1195521500\nI0818 13:44:12.299103 20842 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0818 13:44:12.299121 20842 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0818 13:44:12.299134 20842 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0818 13:44:12.299146 20842 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 13:44:12.299167 20842 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0818 13:44:12.299216 20842 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0818 13:44:12.299238 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.299249 20842 net.cpp:165] Memory required for data: 1199617500\nI0818 13:44:12.299259 20842 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0818 13:44:12.299273 20842 net.cpp:100] Creating Layer L2_b8_relu\nI0818 13:44:12.299284 20842 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0818 13:44:12.299299 20842 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0818 13:44:12.299316 20842 net.cpp:150] Setting up L2_b8_relu\nI0818 13:44:12.299331 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.299340 20842 net.cpp:165] Memory required for data: 1203713500\nI0818 13:44:12.299350 20842 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 13:44:12.299371 20842 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 13:44:12.299383 20842 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0818 13:44:12.299399 20842 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 13:44:12.299438 20842 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 13:44:12.299525 20842 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 13:44:12.299549 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.299563 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.299573 20842 net.cpp:165] Memory required for data: 1211905500\nI0818 13:44:12.299584 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0818 13:44:12.299609 20842 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0818 13:44:12.299623 20842 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 13:44:12.299641 20842 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0818 13:44:12.300184 20842 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0818 13:44:12.300204 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.300213 20842 net.cpp:165] Memory required for data: 1216001500\nI0818 13:44:12.300230 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0818 13:44:12.300251 20842 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0818 13:44:12.300264 20842 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0818 13:44:12.300281 20842 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0818 13:44:12.300606 20842 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0818 13:44:12.300631 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.300642 20842 net.cpp:165] Memory required for data: 1220097500\nI0818 13:44:12.300664 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 13:44:12.300689 20842 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0818 13:44:12.300703 20842 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0818 13:44:12.300722 20842 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.300824 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 13:44:12.301023 20842 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0818 13:44:12.301041 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.301051 20842 net.cpp:165] Memory required for data: 1224193500\nI0818 13:44:12.301069 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0818 13:44:12.301084 20842 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0818 13:44:12.301096 20842 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0818 13:44:12.301115 20842 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.301134 20842 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0818 13:44:12.301148 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.301158 20842 net.cpp:165] Memory required for data: 1228289500\nI0818 13:44:12.301169 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0818 13:44:12.301193 20842 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0818 13:44:12.301208 20842 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0818 13:44:12.301225 20842 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0818 13:44:12.302763 20842 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0818 13:44:12.302784 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.302794 20842 net.cpp:165] Memory required for data: 1232385500\nI0818 13:44:12.302811 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0818 13:44:12.302829 20842 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0818 13:44:12.302842 20842 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0818 13:44:12.302862 20842 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0818 13:44:12.303165 20842 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0818 13:44:12.303184 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.303194 20842 net.cpp:165] Memory required for data: 1236481500\nI0818 13:44:12.303266 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 13:44:12.303292 20842 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0818 13:44:12.303304 20842 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0818 13:44:12.303320 20842 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0818 13:44:12.303421 20842 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 13:44:12.303611 20842 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0818 13:44:12.303633 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.303643 20842 net.cpp:165] Memory required for data: 1240577500\nI0818 13:44:12.303663 20842 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0818 13:44:12.303686 20842 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0818 13:44:12.303699 20842 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0818 13:44:12.303714 20842 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 13:44:12.303730 20842 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0818 13:44:12.303782 20842 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0818 13:44:12.303800 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.303809 20842 net.cpp:165] Memory required for data: 1244673500\nI0818 13:44:12.303819 20842 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0818 13:44:12.303834 20842 net.cpp:100] Creating Layer L2_b9_relu\nI0818 13:44:12.303845 20842 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0818 13:44:12.303864 20842 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0818 13:44:12.303884 20842 net.cpp:150] Setting up L2_b9_relu\nI0818 13:44:12.303897 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.303915 20842 net.cpp:165] Memory required for data: 1248769500\nI0818 13:44:12.303927 20842 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 13:44:12.303941 20842 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 13:44:12.303952 20842 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0818 13:44:12.303975 20842 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 13:44:12.303998 20842 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 13:44:12.304081 20842 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 13:44:12.304102 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.304116 20842 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 13:44:12.304126 20842 net.cpp:165] Memory required for data: 1256961500\nI0818 13:44:12.304136 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0818 13:44:12.304162 20842 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0818 13:44:12.304175 20842 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 13:44:12.304193 20842 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0818 13:44:12.304744 20842 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0818 13:44:12.304764 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.304774 20842 net.cpp:165] Memory required for data: 1257985500\nI0818 13:44:12.304791 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0818 13:44:12.304813 20842 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0818 13:44:12.304826 20842 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0818 13:44:12.304841 20842 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0818 13:44:12.305148 20842 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0818 13:44:12.305168 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.305177 20842 net.cpp:165] Memory required for data: 1259009500\nI0818 13:44:12.305199 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 13:44:12.305220 20842 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0818 13:44:12.305233 20842 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0818 13:44:12.305249 20842 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.305338 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 13:44:12.305538 20842 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0818 13:44:12.305557 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.305567 20842 net.cpp:165] Memory required for data: 1260033500\nI0818 13:44:12.305585 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0818 13:44:12.305606 20842 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0818 13:44:12.305619 20842 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0818 13:44:12.305634 20842 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0818 13:44:12.305652 20842 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0818 13:44:12.305666 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.305685 20842 net.cpp:165] Memory required for data: 1261057500\nI0818 13:44:12.305696 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0818 13:44:12.305719 20842 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0818 13:44:12.305733 20842 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0818 13:44:12.305753 20842 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0818 13:44:12.306283 20842 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0818 13:44:12.306303 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.306311 20842 net.cpp:165] Memory required for data: 1262081500\nI0818 13:44:12.306329 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0818 13:44:12.306346 20842 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0818 13:44:12.306357 20842 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0818 13:44:12.306378 20842 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0818 13:44:12.306720 20842 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0818 13:44:12.306746 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.306756 20842 net.cpp:165] Memory required for data: 1263105500\nI0818 13:44:12.306778 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 13:44:12.306795 20842 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0818 13:44:12.306807 20842 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0818 13:44:12.306821 20842 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0818 13:44:12.306913 20842 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 13:44:12.307111 20842 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0818 13:44:12.307129 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.307138 20842 net.cpp:165] Memory required for data: 1264129500\nI0818 13:44:12.307155 20842 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0818 13:44:12.307178 20842 net.cpp:100] Creating Layer L3_b1_pool\nI0818 13:44:12.307189 20842 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 13:44:12.307205 20842 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0818 13:44:12.307267 20842 net.cpp:150] Setting up L3_b1_pool\nI0818 13:44:12.307287 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.307297 20842 net.cpp:165] Memory required for data: 1265153500\nI0818 13:44:12.307307 20842 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0818 13:44:12.307323 20842 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0818 13:44:12.307334 20842 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0818 13:44:12.307346 20842 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0818 13:44:12.307363 20842 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0818 13:44:12.307425 20842 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0818 13:44:12.307443 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.307453 20842 net.cpp:165] Memory required for data: 1266177500\nI0818 13:44:12.307463 20842 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0818 13:44:12.307478 20842 net.cpp:100] Creating Layer L3_b1_relu\nI0818 13:44:12.307490 20842 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0818 13:44:12.307504 20842 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0818 13:44:12.307523 20842 net.cpp:150] Setting up L3_b1_relu\nI0818 13:44:12.307538 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.307546 20842 net.cpp:165] Memory required for data: 1267201500\nI0818 13:44:12.307556 20842 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0818 13:44:12.307574 20842 net.cpp:100] Creating Layer L3_b1_zeros\nI0818 13:44:12.307593 20842 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0818 13:44:12.308866 20842 net.cpp:150] Setting up L3_b1_zeros\nI0818 13:44:12.308888 20842 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 13:44:12.308898 20842 net.cpp:165] Memory required for data: 1268225500\nI0818 13:44:12.308909 20842 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0818 13:44:12.308929 20842 net.cpp:100] Creating Layer L3_b1_concat0\nI0818 13:44:12.308943 20842 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0818 13:44:12.308956 20842 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0818 13:44:12.308971 20842 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0818 13:44:12.309037 20842 net.cpp:150] Setting up L3_b1_concat0\nI0818 13:44:12.309056 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.309065 20842 net.cpp:165] Memory required for data: 1270273500\nI0818 13:44:12.309075 20842 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 13:44:12.309090 20842 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 13:44:12.309101 20842 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0818 13:44:12.309120 20842 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 13:44:12.309140 20842 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 13:44:12.309232 20842 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0818 13:44:12.309258 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.309288 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.309298 20842 net.cpp:165] Memory required for data: 1274369500\nI0818 13:44:12.309309 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0818 13:44:12.309330 20842 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0818 13:44:12.309343 20842 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 13:44:12.309366 20842 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0818 13:44:12.310457 20842 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0818 13:44:12.310477 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.310487 20842 net.cpp:165] Memory required for data: 1276417500\nI0818 13:44:12.310505 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0818 13:44:12.310528 20842 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0818 13:44:12.310539 20842 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0818 13:44:12.310556 20842 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0818 13:44:12.310869 20842 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0818 13:44:12.310889 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.310899 20842 net.cpp:165] Memory required for data: 1278465500\nI0818 13:44:12.310920 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 13:44:12.310937 20842 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0818 13:44:12.310948 20842 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0818 13:44:12.310963 20842 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.311058 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 13:44:12.311255 20842 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0818 13:44:12.311275 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.311283 20842 net.cpp:165] Memory required for data: 1280513500\nI0818 13:44:12.311302 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0818 13:44:12.311317 20842 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0818 13:44:12.311329 20842 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0818 13:44:12.311344 20842 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0818 13:44:12.311364 20842 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0818 13:44:12.311378 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.311388 20842 net.cpp:165] Memory required for data: 1282561500\nI0818 13:44:12.311398 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0818 13:44:12.311424 20842 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0818 13:44:12.311437 20842 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0818 13:44:12.311460 20842 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0818 13:44:12.312554 20842 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0818 13:44:12.312575 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.312584 20842 net.cpp:165] Memory required for data: 1284609500\nI0818 13:44:12.312602 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0818 13:44:12.312624 20842 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0818 13:44:12.312638 20842 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0818 13:44:12.312654 20842 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0818 13:44:12.312966 20842 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0818 13:44:12.312985 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.312995 20842 net.cpp:165] Memory required for data: 1286657500\nI0818 13:44:12.313016 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 13:44:12.313037 20842 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0818 13:44:12.313050 20842 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0818 13:44:12.313066 20842 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0818 13:44:12.313159 20842 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 13:44:12.313357 20842 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0818 13:44:12.313375 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.313385 20842 net.cpp:165] Memory required for data: 1288705500\nI0818 13:44:12.313412 20842 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0818 13:44:12.313436 20842 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0818 13:44:12.313447 20842 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0818 13:44:12.313462 20842 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 13:44:12.313482 20842 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0818 13:44:12.313539 20842 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0818 13:44:12.313561 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.313571 20842 net.cpp:165] Memory required for data: 1290753500\nI0818 13:44:12.313581 20842 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0818 13:44:12.313596 20842 net.cpp:100] Creating Layer L3_b2_relu\nI0818 13:44:12.313608 20842 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0818 13:44:12.313627 20842 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0818 13:44:12.313648 20842 net.cpp:150] Setting up L3_b2_relu\nI0818 13:44:12.313663 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.313678 20842 net.cpp:165] Memory required for data: 1292801500\nI0818 13:44:12.313691 20842 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 13:44:12.313705 20842 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 13:44:12.313716 20842 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0818 13:44:12.313731 20842 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 13:44:12.313751 20842 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 13:44:12.313836 20842 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 13:44:12.313854 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.313868 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.313876 20842 net.cpp:165] Memory required for data: 1296897500\nI0818 13:44:12.313887 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0818 13:44:12.313912 20842 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0818 13:44:12.313925 20842 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 13:44:12.313944 20842 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0818 13:44:12.315030 20842 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0818 13:44:12.315050 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.315060 20842 net.cpp:165] Memory required for data: 1298945500\nI0818 13:44:12.315078 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0818 13:44:12.315099 20842 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0818 13:44:12.315112 20842 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0818 13:44:12.315129 20842 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0818 13:44:12.315434 20842 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0818 13:44:12.315454 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.315464 20842 net.cpp:165] Memory required for data: 1300993500\nI0818 13:44:12.315485 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 13:44:12.315502 20842 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0818 13:44:12.315515 20842 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0818 13:44:12.315529 20842 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.315621 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 13:44:12.315825 20842 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0818 13:44:12.315845 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.315853 20842 net.cpp:165] Memory required for data: 1303041500\nI0818 13:44:12.315871 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0818 13:44:12.315887 20842 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0818 13:44:12.315899 20842 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0818 13:44:12.315917 20842 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0818 13:44:12.315939 20842 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0818 13:44:12.315961 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.315973 20842 net.cpp:165] Memory required for data: 1305089500\nI0818 13:44:12.315984 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0818 13:44:12.316011 20842 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0818 13:44:12.316025 20842 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0818 13:44:12.316042 20842 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0818 13:44:12.317140 20842 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0818 13:44:12.317162 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.317170 20842 net.cpp:165] Memory required for data: 1307137500\nI0818 13:44:12.317188 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0818 13:44:12.317210 20842 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0818 13:44:12.317224 20842 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0818 13:44:12.317241 20842 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0818 13:44:12.317550 20842 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0818 13:44:12.317569 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.317579 20842 net.cpp:165] Memory required for data: 1309185500\nI0818 13:44:12.317600 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 13:44:12.317621 20842 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0818 13:44:12.317634 20842 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0818 13:44:12.317651 20842 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0818 13:44:12.317764 20842 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 13:44:12.317961 20842 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0818 13:44:12.317981 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.317991 20842 net.cpp:165] Memory required for data: 1311233500\nI0818 13:44:12.318008 20842 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0818 13:44:12.318030 20842 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0818 13:44:12.318043 20842 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0818 13:44:12.318058 20842 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 13:44:12.318078 20842 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0818 13:44:12.318133 20842 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0818 13:44:12.318151 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.318161 20842 net.cpp:165] Memory required for data: 1313281500\nI0818 13:44:12.318171 20842 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0818 13:44:12.318189 20842 net.cpp:100] Creating Layer L3_b3_relu\nI0818 13:44:12.318202 20842 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0818 13:44:12.318217 20842 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0818 13:44:12.318235 20842 net.cpp:150] Setting up L3_b3_relu\nI0818 13:44:12.318249 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.318259 20842 net.cpp:165] Memory required for data: 1315329500\nI0818 13:44:12.318269 20842 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 13:44:12.318284 20842 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 13:44:12.318294 20842 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0818 13:44:12.318308 20842 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 13:44:12.318328 20842 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 13:44:12.318414 20842 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 13:44:12.318434 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.318446 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.318455 20842 net.cpp:165] Memory required for data: 1319425500\nI0818 13:44:12.318465 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0818 13:44:12.318490 20842 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0818 13:44:12.318503 20842 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 13:44:12.318531 20842 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0818 13:44:12.319633 20842 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0818 13:44:12.319653 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.319664 20842 net.cpp:165] Memory required for data: 1321473500\nI0818 13:44:12.319705 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0818 13:44:12.319730 20842 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0818 13:44:12.319743 20842 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0818 13:44:12.319764 20842 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0818 13:44:12.320070 20842 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0818 13:44:12.320088 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.320097 20842 net.cpp:165] Memory required for data: 1323521500\nI0818 13:44:12.320119 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 13:44:12.320135 20842 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0818 13:44:12.320147 20842 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0818 13:44:12.320161 20842 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.320255 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 13:44:12.320454 20842 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0818 13:44:12.320473 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.320483 20842 net.cpp:165] Memory required for data: 1325569500\nI0818 13:44:12.320502 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0818 13:44:12.320518 20842 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0818 13:44:12.320529 20842 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0818 13:44:12.320549 20842 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0818 13:44:12.320569 20842 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0818 13:44:12.320583 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.320592 20842 net.cpp:165] Memory required for data: 1327617500\nI0818 13:44:12.320602 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0818 13:44:12.320627 20842 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0818 13:44:12.320641 20842 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0818 13:44:12.320657 20842 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0818 13:44:12.322738 20842 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0818 13:44:12.322760 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.322770 20842 net.cpp:165] Memory required for data: 1329665500\nI0818 13:44:12.322788 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0818 13:44:12.322810 20842 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0818 13:44:12.322824 20842 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0818 13:44:12.322841 20842 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0818 13:44:12.323153 20842 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0818 13:44:12.323173 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.323182 20842 net.cpp:165] Memory required for data: 1331713500\nI0818 13:44:12.323204 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 13:44:12.323220 20842 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0818 13:44:12.323232 20842 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0818 13:44:12.323248 20842 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0818 13:44:12.323343 20842 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 13:44:12.323541 20842 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0818 13:44:12.323560 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.323570 20842 net.cpp:165] Memory required for data: 1333761500\nI0818 13:44:12.323590 20842 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0818 13:44:12.323606 20842 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0818 13:44:12.323617 20842 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0818 13:44:12.323631 20842 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 13:44:12.323652 20842 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0818 13:44:12.323724 20842 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0818 13:44:12.323746 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.323757 20842 net.cpp:165] Memory required for data: 1335809500\nI0818 13:44:12.323768 20842 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0818 13:44:12.323782 20842 net.cpp:100] Creating Layer L3_b4_relu\nI0818 13:44:12.323794 20842 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0818 13:44:12.323809 20842 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0818 13:44:12.323827 20842 net.cpp:150] Setting up L3_b4_relu\nI0818 13:44:12.323843 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.323851 20842 net.cpp:165] Memory required for data: 1337857500\nI0818 13:44:12.323860 20842 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 13:44:12.323879 20842 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 13:44:12.323890 20842 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0818 13:44:12.323906 20842 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 13:44:12.323925 20842 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 13:44:12.324007 20842 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 13:44:12.324026 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.324039 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.324049 20842 net.cpp:165] Memory required for data: 1341953500\nI0818 13:44:12.324059 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0818 13:44:12.324079 20842 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0818 13:44:12.324092 20842 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 13:44:12.324115 20842 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0818 13:44:12.325261 20842 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0818 13:44:12.325283 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.325292 20842 net.cpp:165] Memory required for data: 1344001500\nI0818 13:44:12.325310 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0818 13:44:12.325327 20842 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0818 13:44:12.325340 20842 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0818 13:44:12.325362 20842 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0818 13:44:12.325680 20842 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0818 13:44:12.325703 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.325714 20842 net.cpp:165] Memory required for data: 1346049500\nI0818 13:44:12.325736 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 13:44:12.325753 20842 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0818 13:44:12.325765 20842 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0818 13:44:12.325781 20842 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.325871 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 13:44:12.326071 20842 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0818 13:44:12.326091 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.326100 20842 net.cpp:165] Memory required for data: 1348097500\nI0818 13:44:12.326118 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0818 13:44:12.326134 20842 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0818 13:44:12.326146 20842 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0818 13:44:12.326165 20842 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0818 13:44:12.326186 20842 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0818 13:44:12.326200 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.326210 20842 net.cpp:165] Memory required for data: 1350145500\nI0818 13:44:12.326220 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0818 13:44:12.326246 20842 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0818 13:44:12.326259 20842 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0818 13:44:12.326287 20842 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0818 13:44:12.327363 20842 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0818 13:44:12.327384 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.327394 20842 net.cpp:165] Memory required for data: 1352193500\nI0818 13:44:12.327412 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0818 13:44:12.327435 20842 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0818 13:44:12.327447 20842 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0818 13:44:12.327468 20842 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0818 13:44:12.327782 20842 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0818 13:44:12.327801 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.327811 20842 net.cpp:165] Memory required for data: 1354241500\nI0818 13:44:12.327832 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 13:44:12.327850 20842 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0818 13:44:12.327862 20842 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0818 13:44:12.327883 20842 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0818 13:44:12.327972 20842 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 13:44:12.328173 20842 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0818 13:44:12.328193 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.328202 20842 net.cpp:165] Memory required for data: 1356289500\nI0818 13:44:12.328220 20842 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0818 13:44:12.328238 20842 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0818 13:44:12.328248 20842 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0818 13:44:12.328261 20842 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 13:44:12.328284 20842 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0818 13:44:12.328348 20842 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0818 13:44:12.328366 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.328375 20842 net.cpp:165] Memory required for data: 1358337500\nI0818 13:44:12.328385 20842 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0818 13:44:12.328402 20842 net.cpp:100] Creating Layer L3_b5_relu\nI0818 13:44:12.328413 20842 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0818 13:44:12.328428 20842 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0818 13:44:12.328445 20842 net.cpp:150] Setting up L3_b5_relu\nI0818 13:44:12.328460 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.328469 20842 net.cpp:165] Memory required for data: 1360385500\nI0818 13:44:12.328478 20842 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 13:44:12.328497 20842 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 13:44:12.328508 20842 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0818 13:44:12.328523 20842 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 13:44:12.328543 20842 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 13:44:12.328627 20842 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 13:44:12.328649 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.328661 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.328677 20842 net.cpp:165] Memory required for data: 1364481500\nI0818 13:44:12.328691 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0818 13:44:12.328709 20842 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0818 13:44:12.328722 20842 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 13:44:12.328747 20842 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0818 13:44:12.329818 20842 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0818 13:44:12.329838 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.329848 20842 net.cpp:165] Memory required for data: 1366529500\nI0818 13:44:12.329865 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0818 13:44:12.329891 20842 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0818 13:44:12.329903 20842 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0818 13:44:12.329923 20842 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0818 13:44:12.330242 20842 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0818 13:44:12.330265 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.330276 20842 net.cpp:165] Memory required for data: 1368577500\nI0818 13:44:12.330297 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 13:44:12.330313 20842 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0818 13:44:12.330325 20842 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0818 13:44:12.330343 20842 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.330431 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 13:44:12.330631 20842 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0818 13:44:12.330651 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.330660 20842 net.cpp:165] Memory required for data: 1370625500\nI0818 13:44:12.330685 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0818 13:44:12.330708 20842 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0818 13:44:12.330720 20842 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0818 13:44:12.330736 20842 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0818 13:44:12.330755 20842 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0818 13:44:12.330770 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.330780 20842 net.cpp:165] Memory required for data: 1372673500\nI0818 13:44:12.330790 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0818 13:44:12.330814 20842 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0818 13:44:12.330828 20842 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0818 13:44:12.330845 20842 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0818 13:44:12.331915 20842 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0818 13:44:12.331935 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.331945 20842 net.cpp:165] Memory required for data: 1374721500\nI0818 13:44:12.331964 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0818 13:44:12.331989 20842 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0818 13:44:12.332001 20842 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0818 13:44:12.332023 20842 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0818 13:44:12.332352 20842 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0818 13:44:12.332372 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.332381 20842 net.cpp:165] Memory required for data: 1376769500\nI0818 13:44:12.332403 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 13:44:12.332419 20842 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0818 13:44:12.332432 20842 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0818 13:44:12.332451 20842 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0818 13:44:12.332546 20842 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 13:44:12.332749 20842 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0818 13:44:12.332768 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.332778 20842 net.cpp:165] Memory required for data: 1378817500\nI0818 13:44:12.332797 20842 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0818 13:44:12.332814 20842 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0818 13:44:12.332825 20842 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0818 13:44:12.332839 20842 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 13:44:12.332860 20842 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0818 13:44:12.332922 20842 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0818 13:44:12.332939 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.332948 20842 net.cpp:165] Memory required for data: 1380865500\nI0818 13:44:12.332958 20842 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0818 13:44:12.332973 20842 net.cpp:100] Creating Layer L3_b6_relu\nI0818 13:44:12.332994 20842 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0818 13:44:12.333014 20842 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0818 13:44:12.333035 20842 net.cpp:150] Setting up L3_b6_relu\nI0818 13:44:12.333050 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.333060 20842 net.cpp:165] Memory required for data: 1382913500\nI0818 13:44:12.333068 20842 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 13:44:12.333083 20842 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 13:44:12.333096 20842 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0818 13:44:12.333109 20842 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 13:44:12.333130 20842 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 13:44:12.333214 20842 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 13:44:12.333232 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.333245 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.333254 20842 net.cpp:165] Memory required for data: 1387009500\nI0818 13:44:12.333266 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0818 13:44:12.333286 20842 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0818 13:44:12.333297 20842 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 13:44:12.333320 20842 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0818 13:44:12.334403 20842 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0818 13:44:12.334424 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.334434 20842 net.cpp:165] Memory required for data: 1389057500\nI0818 13:44:12.334451 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0818 13:44:12.334468 20842 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0818 13:44:12.334481 20842 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0818 13:44:12.334501 20842 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0818 13:44:12.334823 20842 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0818 13:44:12.334848 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.334859 20842 net.cpp:165] Memory required for data: 1391105500\nI0818 13:44:12.334882 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 13:44:12.334898 20842 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0818 13:44:12.334910 20842 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0818 13:44:12.334926 20842 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.335026 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 13:44:12.335224 20842 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0818 13:44:12.335242 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.335253 20842 net.cpp:165] Memory required for data: 1393153500\nI0818 13:44:12.335270 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0818 13:44:12.335321 20842 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0818 13:44:12.335336 20842 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0818 13:44:12.335351 20842 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0818 13:44:12.335371 20842 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0818 13:44:12.335386 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.335395 20842 net.cpp:165] Memory required for data: 1395201500\nI0818 13:44:12.335407 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0818 13:44:12.335427 20842 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0818 13:44:12.335439 20842 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0818 13:44:12.335463 20842 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0818 13:44:12.336539 20842 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0818 13:44:12.336560 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.336570 20842 net.cpp:165] Memory required for data: 1397249500\nI0818 13:44:12.336587 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0818 13:44:12.336614 20842 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0818 13:44:12.336627 20842 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0818 13:44:12.336647 20842 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0818 13:44:12.336971 20842 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0818 13:44:12.336993 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.337004 20842 net.cpp:165] Memory required for data: 1399297500\nI0818 13:44:12.337025 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 13:44:12.337043 20842 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0818 13:44:12.337055 20842 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0818 13:44:12.337070 20842 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0818 13:44:12.337162 20842 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 13:44:12.337365 20842 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0818 13:44:12.337383 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.337393 20842 net.cpp:165] Memory required for data: 1401345500\nI0818 13:44:12.337411 20842 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0818 13:44:12.337433 20842 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0818 13:44:12.337446 20842 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0818 13:44:12.337458 20842 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 13:44:12.337472 20842 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0818 13:44:12.337527 20842 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0818 13:44:12.337540 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.337548 20842 net.cpp:165] Memory required for data: 1403393500\nI0818 13:44:12.337556 20842 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0818 13:44:12.337571 20842 net.cpp:100] Creating Layer L3_b7_relu\nI0818 13:44:12.337581 20842 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0818 13:44:12.337592 20842 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0818 13:44:12.337608 20842 net.cpp:150] Setting up L3_b7_relu\nI0818 13:44:12.337620 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.337630 20842 net.cpp:165] Memory required for data: 1405441500\nI0818 13:44:12.337640 20842 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 13:44:12.337651 20842 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 13:44:12.337659 20842 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0818 13:44:12.337689 20842 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 13:44:12.337709 20842 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 13:44:12.337780 20842 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 13:44:12.337796 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.337810 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.337819 20842 net.cpp:165] Memory required for data: 1409537500\nI0818 13:44:12.337829 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0818 13:44:12.337854 20842 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0818 13:44:12.337868 20842 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 13:44:12.337887 20842 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0818 13:44:12.339968 20842 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0818 13:44:12.339990 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.340000 20842 net.cpp:165] Memory required for data: 1411585500\nI0818 13:44:12.340018 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0818 13:44:12.340040 20842 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0818 13:44:12.340052 20842 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0818 13:44:12.340070 20842 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0818 13:44:12.340399 20842 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0818 13:44:12.340418 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.340438 20842 net.cpp:165] Memory required for data: 1413633500\nI0818 13:44:12.340461 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 13:44:12.340478 20842 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0818 13:44:12.340489 20842 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0818 13:44:12.340505 20842 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.340605 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 13:44:12.340808 20842 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0818 13:44:12.340832 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.340843 20842 net.cpp:165] Memory required for data: 1415681500\nI0818 13:44:12.340862 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0818 13:44:12.340878 20842 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0818 13:44:12.340888 20842 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0818 13:44:12.340903 20842 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0818 13:44:12.340922 20842 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0818 13:44:12.340937 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.340947 20842 net.cpp:165] Memory required for data: 1417729500\nI0818 13:44:12.340957 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0818 13:44:12.340981 20842 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0818 13:44:12.340996 20842 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0818 13:44:12.341013 20842 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0818 13:44:12.342090 20842 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0818 13:44:12.342111 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.342120 20842 net.cpp:165] Memory required for data: 1419777500\nI0818 13:44:12.342139 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0818 13:44:12.342160 20842 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0818 13:44:12.342175 20842 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0818 13:44:12.342191 20842 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0818 13:44:12.342494 20842 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0818 13:44:12.342512 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.342522 20842 net.cpp:165] Memory required for data: 1421825500\nI0818 13:44:12.342543 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 13:44:12.342561 20842 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0818 13:44:12.342571 20842 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0818 13:44:12.342592 20842 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0818 13:44:12.342689 20842 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 13:44:12.342887 20842 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0818 13:44:12.342906 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.342916 20842 net.cpp:165] Memory required for data: 1423873500\nI0818 13:44:12.342933 20842 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0818 13:44:12.342957 20842 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0818 13:44:12.342969 20842 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0818 13:44:12.342983 20842 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 13:44:12.342999 20842 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0818 13:44:12.343060 20842 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0818 13:44:12.343077 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.343086 20842 net.cpp:165] Memory required for data: 1425921500\nI0818 13:44:12.343096 20842 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0818 13:44:12.343109 20842 net.cpp:100] Creating Layer L3_b8_relu\nI0818 13:44:12.343122 20842 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0818 13:44:12.343140 20842 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0818 13:44:12.343160 20842 net.cpp:150] Setting up L3_b8_relu\nI0818 13:44:12.343174 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.343184 20842 net.cpp:165] Memory required for data: 1427969500\nI0818 13:44:12.343202 20842 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 13:44:12.343217 20842 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 13:44:12.343228 20842 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0818 13:44:12.343248 20842 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 13:44:12.343269 20842 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 13:44:12.343354 20842 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 13:44:12.343374 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.343386 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.343395 20842 net.cpp:165] Memory required for data: 1432065500\nI0818 13:44:12.343405 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0818 13:44:12.343425 20842 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0818 13:44:12.343438 20842 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 13:44:12.343462 20842 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0818 13:44:12.344525 20842 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0818 13:44:12.344545 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.344555 20842 net.cpp:165] Memory required for data: 1434113500\nI0818 13:44:12.344573 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0818 13:44:12.344594 20842 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0818 13:44:12.344607 20842 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0818 13:44:12.344625 20842 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0818 13:44:12.344935 20842 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0818 13:44:12.344955 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.344965 20842 net.cpp:165] Memory required for data: 1436161500\nI0818 13:44:12.344986 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 13:44:12.345005 20842 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0818 13:44:12.345016 20842 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0818 13:44:12.345032 20842 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.345127 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 13:44:12.345324 20842 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0818 13:44:12.345342 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.345351 20842 net.cpp:165] Memory required for data: 1438209500\nI0818 13:44:12.345369 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0818 13:44:12.345386 20842 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0818 13:44:12.345396 20842 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0818 13:44:12.345412 20842 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0818 13:44:12.345430 20842 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0818 13:44:12.345445 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.345454 20842 net.cpp:165] Memory required for data: 1440257500\nI0818 13:44:12.345464 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0818 13:44:12.345489 20842 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0818 13:44:12.345501 20842 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0818 13:44:12.345523 20842 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0818 13:44:12.346607 20842 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0818 13:44:12.346627 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.346637 20842 net.cpp:165] Memory required for data: 1442305500\nI0818 13:44:12.346655 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0818 13:44:12.346686 20842 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0818 13:44:12.346700 20842 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0818 13:44:12.346719 20842 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0818 13:44:12.347038 20842 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0818 13:44:12.347067 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.347085 20842 net.cpp:165] Memory required for data: 1444353500\nI0818 13:44:12.347108 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 13:44:12.347131 20842 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0818 13:44:12.347144 20842 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0818 13:44:12.347159 20842 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0818 13:44:12.347259 20842 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 13:44:12.347455 20842 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0818 13:44:12.347476 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.347484 20842 net.cpp:165] Memory required for data: 1446401500\nI0818 13:44:12.347502 20842 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0818 13:44:12.347524 20842 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0818 13:44:12.347537 20842 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0818 13:44:12.347550 20842 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 13:44:12.347566 20842 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0818 13:44:12.347627 20842 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0818 13:44:12.347645 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.347654 20842 net.cpp:165] Memory required for data: 1448449500\nI0818 13:44:12.347664 20842 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0818 13:44:12.347685 20842 net.cpp:100] Creating Layer L3_b9_relu\nI0818 13:44:12.347698 20842 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0818 13:44:12.347717 20842 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0818 13:44:12.347738 20842 net.cpp:150] Setting up L3_b9_relu\nI0818 13:44:12.347751 20842 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 13:44:12.347761 20842 net.cpp:165] Memory required for data: 1450497500\nI0818 13:44:12.347771 20842 layer_factory.hpp:77] Creating layer post_pool\nI0818 13:44:12.347786 20842 net.cpp:100] Creating Layer post_pool\nI0818 13:44:12.347798 20842 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0818 13:44:12.347815 20842 net.cpp:408] post_pool -> post_pool\nI0818 13:44:12.347870 20842 net.cpp:150] Setting up post_pool\nI0818 13:44:12.347893 20842 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0818 13:44:12.347904 20842 net.cpp:165] Memory required for data: 1450529500\nI0818 13:44:12.347915 20842 layer_factory.hpp:77] Creating layer post_FC\nI0818 13:44:12.347934 20842 net.cpp:100] Creating Layer post_FC\nI0818 13:44:12.347947 20842 net.cpp:434] post_FC <- post_pool\nI0818 13:44:12.347964 20842 net.cpp:408] post_FC -> post_FC_top\nI0818 13:44:12.348162 20842 net.cpp:150] Setting up post_FC\nI0818 13:44:12.348183 20842 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:12.348193 20842 net.cpp:165] Memory required for data: 1450534500\nI0818 13:44:12.348212 20842 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0818 13:44:12.348227 20842 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0818 13:44:12.348238 20842 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0818 13:44:12.348254 20842 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0818 13:44:12.348274 20842 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0818 13:44:12.348359 20842 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0818 13:44:12.348381 20842 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:12.348393 20842 net.cpp:157] Top shape: 125 10 (1250)\nI0818 13:44:12.348402 20842 net.cpp:165] Memory required for data: 1450544500\nI0818 13:44:12.348412 20842 layer_factory.hpp:77] Creating layer accuracy\nI0818 13:44:12.348433 20842 net.cpp:100] Creating Layer accuracy\nI0818 13:44:12.348444 20842 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0818 13:44:12.348457 20842 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 13:44:12.348472 20842 net.cpp:408] accuracy -> accuracy\nI0818 13:44:12.348497 20842 net.cpp:150] Setting up accuracy\nI0818 13:44:12.348512 20842 net.cpp:157] Top shape: (1)\nI0818 13:44:12.348531 20842 net.cpp:165] Memory required for data: 1450544504\nI0818 13:44:12.348542 20842 layer_factory.hpp:77] Creating layer loss\nI0818 13:44:12.348557 20842 net.cpp:100] Creating Layer loss\nI0818 13:44:12.348569 20842 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0818 13:44:12.348582 20842 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 13:44:12.348597 20842 net.cpp:408] loss -> loss\nI0818 13:44:12.348619 20842 layer_factory.hpp:77] Creating layer loss\nI0818 13:44:12.348798 20842 net.cpp:150] Setting up loss\nI0818 13:44:12.348821 20842 net.cpp:157] Top shape: (1)\nI0818 13:44:12.348832 20842 net.cpp:160]     with loss weight 1\nI0818 13:44:12.348855 20842 net.cpp:165] Memory required for data: 1450544508\nI0818 13:44:12.348867 20842 net.cpp:226] loss needs backward computation.\nI0818 13:44:12.348878 20842 net.cpp:228] accuracy does not need backward computation.\nI0818 13:44:12.348891 20842 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0818 13:44:12.348901 20842 net.cpp:226] post_FC needs backward computation.\nI0818 13:44:12.348911 20842 net.cpp:226] post_pool needs backward computation.\nI0818 13:44:12.348920 20842 net.cpp:226] L3_b9_relu needs backward computation.\nI0818 13:44:12.348929 20842 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0818 13:44:12.348940 20842 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0818 13:44:12.348949 20842 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0818 13:44:12.348959 20842 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0818 13:44:12.348970 20842 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0818 13:44:12.348981 20842 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0818 13:44:12.348990 20842 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0818 13:44:12.349000 20842 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0818 13:44:12.349011 20842 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0818 13:44:12.349022 20842 net.cpp:226] L3_b8_relu needs backward computation.\nI0818 13:44:12.349031 20842 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0818 13:44:12.349041 20842 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0818 13:44:12.349051 20842 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0818 13:44:12.349062 20842 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0818 13:44:12.349071 20842 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0818 13:44:12.349081 20842 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0818 13:44:12.349092 20842 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0818 13:44:12.349102 20842 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0818 13:44:12.349112 20842 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0818 13:44:12.349123 20842 net.cpp:226] L3_b7_relu needs backward computation.\nI0818 13:44:12.349133 20842 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0818 13:44:12.349144 20842 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0818 13:44:12.349154 20842 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0818 13:44:12.349164 20842 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0818 13:44:12.349175 20842 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0818 13:44:12.349185 20842 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0818 13:44:12.349195 20842 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0818 13:44:12.349205 20842 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0818 13:44:12.349215 20842 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0818 13:44:12.349226 20842 net.cpp:226] L3_b6_relu needs backward computation.\nI0818 13:44:12.349236 20842 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0818 13:44:12.349246 20842 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0818 13:44:12.349257 20842 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0818 13:44:12.349277 20842 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0818 13:44:12.349287 20842 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0818 13:44:12.349298 20842 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0818 13:44:12.349308 20842 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0818 13:44:12.349318 20842 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0818 13:44:12.349328 20842 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0818 13:44:12.349339 20842 net.cpp:226] L3_b5_relu needs backward computation.\nI0818 13:44:12.349349 20842 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0818 13:44:12.349359 20842 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0818 13:44:12.349370 20842 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0818 13:44:12.349380 20842 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0818 13:44:12.349391 20842 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0818 13:44:12.349401 20842 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0818 13:44:12.349411 20842 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0818 13:44:12.349421 20842 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0818 13:44:12.349431 20842 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0818 13:44:12.349443 20842 net.cpp:226] L3_b4_relu needs backward computation.\nI0818 13:44:12.349453 20842 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0818 13:44:12.349464 20842 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0818 13:44:12.349473 20842 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0818 13:44:12.349485 20842 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0818 13:44:12.349496 20842 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0818 13:44:12.349504 20842 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0818 13:44:12.349514 20842 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0818 13:44:12.349524 20842 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0818 13:44:12.349535 20842 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0818 13:44:12.349545 20842 net.cpp:226] L3_b3_relu needs backward computation.\nI0818 13:44:12.349555 20842 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0818 13:44:12.349570 20842 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0818 13:44:12.349581 20842 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0818 13:44:12.349592 20842 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0818 13:44:12.349603 20842 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0818 13:44:12.349613 20842 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0818 13:44:12.349622 20842 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0818 13:44:12.349632 20842 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0818 13:44:12.349643 20842 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0818 13:44:12.349654 20842 net.cpp:226] L3_b2_relu needs backward computation.\nI0818 13:44:12.349664 20842 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0818 13:44:12.349684 20842 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0818 13:44:12.349695 20842 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0818 13:44:12.349706 20842 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0818 13:44:12.349716 20842 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0818 13:44:12.349726 20842 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0818 13:44:12.349736 20842 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0818 13:44:12.349746 20842 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0818 13:44:12.349756 20842 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0818 13:44:12.349767 20842 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0818 13:44:12.349787 20842 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0818 13:44:12.349798 20842 net.cpp:226] L3_b1_relu needs backward computation.\nI0818 13:44:12.349809 20842 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0818 13:44:12.349822 20842 net.cpp:226] L3_b1_pool needs backward computation.\nI0818 13:44:12.349833 20842 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0818 13:44:12.349843 20842 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0818 13:44:12.349854 20842 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0818 13:44:12.349864 20842 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0818 13:44:12.349875 20842 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0818 13:44:12.349885 20842 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0818 13:44:12.349895 20842 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0818 13:44:12.349906 20842 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0818 13:44:12.349917 20842 net.cpp:226] L2_b9_relu needs backward computation.\nI0818 13:44:12.349927 20842 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0818 13:44:12.349937 20842 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0818 13:44:12.349948 20842 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0818 13:44:12.349959 20842 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0818 13:44:12.349970 20842 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0818 13:44:12.349980 20842 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0818 13:44:12.349992 20842 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0818 13:44:12.350003 20842 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0818 13:44:12.350013 20842 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0818 13:44:12.350023 20842 net.cpp:226] L2_b8_relu needs backward computation.\nI0818 13:44:12.350034 20842 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0818 13:44:12.350045 20842 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0818 13:44:12.350055 20842 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0818 13:44:12.350066 20842 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0818 13:44:12.350077 20842 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0818 13:44:12.350086 20842 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0818 13:44:12.350096 20842 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0818 13:44:12.350106 20842 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0818 13:44:12.350122 20842 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0818 13:44:12.350133 20842 net.cpp:226] L2_b7_relu needs backward computation.\nI0818 13:44:12.350144 20842 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0818 13:44:12.350155 20842 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0818 13:44:12.350165 20842 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0818 13:44:12.350177 20842 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0818 13:44:12.350188 20842 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0818 13:44:12.350198 20842 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0818 13:44:12.350208 20842 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0818 13:44:12.350217 20842 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0818 13:44:12.350229 20842 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0818 13:44:12.350240 20842 net.cpp:226] L2_b6_relu needs backward computation.\nI0818 13:44:12.350250 20842 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0818 13:44:12.350261 20842 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0818 13:44:12.350272 20842 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0818 13:44:12.350282 20842 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0818 13:44:12.350292 20842 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0818 13:44:12.350311 20842 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0818 13:44:12.350322 20842 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0818 13:44:12.350332 20842 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0818 13:44:12.350343 20842 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0818 13:44:12.350355 20842 net.cpp:226] L2_b5_relu needs backward computation.\nI0818 13:44:12.350365 20842 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0818 13:44:12.350378 20842 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0818 13:44:12.350388 20842 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0818 13:44:12.350399 20842 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0818 13:44:12.350409 20842 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0818 13:44:12.350419 20842 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0818 13:44:12.350430 20842 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0818 13:44:12.350440 20842 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0818 13:44:12.350450 20842 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0818 13:44:12.350461 20842 net.cpp:226] L2_b4_relu needs backward computation.\nI0818 13:44:12.350472 20842 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0818 13:44:12.350483 20842 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0818 13:44:12.350494 20842 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0818 13:44:12.350505 20842 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0818 13:44:12.350515 20842 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0818 13:44:12.350527 20842 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0818 13:44:12.350536 20842 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0818 13:44:12.350548 20842 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0818 13:44:12.350558 20842 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0818 13:44:12.350569 20842 net.cpp:226] L2_b3_relu needs backward computation.\nI0818 13:44:12.350580 20842 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0818 13:44:12.350592 20842 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0818 13:44:12.350602 20842 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0818 13:44:12.350615 20842 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0818 13:44:12.350625 20842 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0818 13:44:12.350636 20842 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0818 13:44:12.350646 20842 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0818 13:44:12.350657 20842 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0818 13:44:12.350667 20842 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0818 13:44:12.350687 20842 net.cpp:226] L2_b2_relu needs backward computation.\nI0818 13:44:12.350698 20842 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0818 13:44:12.350709 20842 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0818 13:44:12.350719 20842 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0818 13:44:12.350731 20842 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0818 13:44:12.350742 20842 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0818 13:44:12.350751 20842 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0818 13:44:12.350761 20842 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0818 13:44:12.350774 20842 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0818 13:44:12.350783 20842 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0818 13:44:12.350795 20842 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0818 13:44:12.350806 20842 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0818 13:44:12.350816 20842 net.cpp:226] L2_b1_relu needs backward computation.\nI0818 13:44:12.350836 20842 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0818 13:44:12.350859 20842 net.cpp:226] L2_b1_pool needs backward computation.\nI0818 13:44:12.350873 20842 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0818 13:44:12.350885 20842 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0818 13:44:12.350895 20842 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0818 13:44:12.350905 20842 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0818 13:44:12.350916 20842 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0818 13:44:12.350927 20842 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0818 13:44:12.350937 20842 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0818 13:44:12.350949 20842 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0818 13:44:12.350960 20842 net.cpp:226] L1_b9_relu needs backward computation.\nI0818 13:44:12.350971 20842 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0818 13:44:12.350982 20842 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0818 13:44:12.350992 20842 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0818 13:44:12.351004 20842 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0818 13:44:12.351014 20842 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0818 13:44:12.351024 20842 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0818 13:44:12.351035 20842 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0818 13:44:12.351047 20842 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0818 13:44:12.351058 20842 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0818 13:44:12.351069 20842 net.cpp:226] L1_b8_relu needs backward computation.\nI0818 13:44:12.351080 20842 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0818 13:44:12.351091 20842 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0818 13:44:12.351101 20842 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0818 13:44:12.351114 20842 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0818 13:44:12.351125 20842 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0818 13:44:12.351135 20842 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0818 13:44:12.351145 20842 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0818 13:44:12.351157 20842 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0818 13:44:12.351169 20842 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0818 13:44:12.351181 20842 net.cpp:226] L1_b7_relu needs backward computation.\nI0818 13:44:12.351191 20842 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0818 13:44:12.351203 20842 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0818 13:44:12.351213 20842 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0818 13:44:12.351225 20842 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0818 13:44:12.351238 20842 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0818 13:44:12.351248 20842 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0818 13:44:12.351259 20842 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0818 13:44:12.351270 20842 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0818 13:44:12.351284 20842 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0818 13:44:12.351294 20842 net.cpp:226] L1_b6_relu needs backward computation.\nI0818 13:44:12.351305 20842 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0818 13:44:12.351317 20842 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0818 13:44:12.351327 20842 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0818 13:44:12.351339 20842 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0818 13:44:12.351351 20842 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0818 13:44:12.351361 20842 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0818 13:44:12.351379 20842 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0818 13:44:12.351392 20842 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0818 13:44:12.351404 20842 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0818 13:44:12.351418 20842 net.cpp:226] L1_b5_relu needs backward computation.\nI0818 13:44:12.351428 20842 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0818 13:44:12.351439 20842 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0818 13:44:12.351450 20842 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0818 13:44:12.351461 20842 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0818 13:44:12.351474 20842 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0818 13:44:12.351483 20842 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0818 13:44:12.351495 20842 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0818 13:44:12.351505 20842 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0818 13:44:12.351517 20842 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0818 13:44:12.351528 20842 net.cpp:226] L1_b4_relu needs backward computation.\nI0818 13:44:12.351538 20842 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0818 13:44:12.351549 20842 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0818 13:44:12.351560 20842 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0818 13:44:12.351572 20842 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0818 13:44:12.351583 20842 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0818 13:44:12.351593 20842 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0818 13:44:12.351604 20842 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0818 13:44:12.351616 20842 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0818 13:44:12.351629 20842 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0818 13:44:12.351640 20842 net.cpp:226] L1_b3_relu needs backward computation.\nI0818 13:44:12.351650 20842 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0818 13:44:12.351663 20842 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0818 13:44:12.351680 20842 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0818 13:44:12.351692 20842 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0818 13:44:12.351703 20842 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0818 13:44:12.351713 20842 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0818 13:44:12.351724 20842 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0818 13:44:12.351734 20842 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0818 13:44:12.351745 20842 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0818 13:44:12.351757 20842 net.cpp:226] L1_b2_relu needs backward computation.\nI0818 13:44:12.351768 20842 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0818 13:44:12.351779 20842 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0818 13:44:12.351790 20842 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0818 13:44:12.351801 20842 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0818 13:44:12.351814 20842 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0818 13:44:12.351824 20842 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0818 13:44:12.351835 20842 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0818 13:44:12.351846 20842 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0818 13:44:12.351857 20842 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0818 13:44:12.351868 20842 net.cpp:226] L1_b1_relu needs backward computation.\nI0818 13:44:12.351879 20842 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0818 13:44:12.351892 20842 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0818 13:44:12.351903 20842 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0818 13:44:12.351927 20842 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0818 13:44:12.351938 20842 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0818 13:44:12.351949 20842 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0818 13:44:12.351959 20842 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0818 13:44:12.351971 20842 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0818 13:44:12.351984 20842 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0818 13:44:12.351994 20842 net.cpp:226] pre_relu needs backward computation.\nI0818 13:44:12.352005 20842 net.cpp:226] pre_scale needs backward computation.\nI0818 13:44:12.352015 20842 net.cpp:226] pre_bn needs backward computation.\nI0818 13:44:12.352026 20842 net.cpp:226] pre_conv needs backward computation.\nI0818 13:44:12.352038 20842 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 13:44:12.352051 20842 net.cpp:228] dataLayer does not need backward computation.\nI0818 13:44:12.352061 20842 net.cpp:270] This network produces output accuracy\nI0818 13:44:12.352072 20842 net.cpp:270] This network produces output loss\nI0818 13:44:12.352425 20842 net.cpp:283] Network initialization done.\nI0818 13:44:12.353484 20842 solver.cpp:60] Solver scaffolding done.\nI0818 13:44:12.576275 20842 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0818 13:44:12.921284 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:12.921355 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:12.928280 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:13.160513 20842 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 13:44:13.160624 20842 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 13:44:13.194788 20842 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 13:44:13.194892 20842 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 13:44:13.628176 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:13.628257 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:13.636364 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:13.880170 20842 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 13:44:13.880277 20842 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 13:44:13.931522 20842 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 13:44:13.931630 20842 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 13:44:14.442628 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:14.442680 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:14.451730 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:14.717519 20842 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 13:44:14.717682 20842 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 13:44:14.788414 20842 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 13:44:14.788575 20842 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 13:44:14.871229 20842 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0818 13:44:15.336087 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:15.336141 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:15.346459 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:15.642477 20842 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 13:44:15.642632 20842 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 13:44:15.734055 20842 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 13:44:15.734205 20842 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 13:44:16.362761 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:16.362813 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:16.373265 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:16.695129 20842 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 13:44:16.695340 20842 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 13:44:16.808161 20842 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 13:44:16.808369 20842 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 13:44:17.516652 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:17.516729 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:17.528443 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:17.864156 20842 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 13:44:17.864404 20842 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 13:44:17.997102 20842 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 13:44:17.997342 20842 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 13:44:18.767693 20842 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 13:44:18.767755 20842 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 13:44:18.779959 20842 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 13:44:19.144472 20842 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 13:44:19.144747 20842 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 13:44:19.296900 20842 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 13:44:19.297163 20842 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 13:44:19.468345 20842 parallel.cpp:425] Starting Optimization\nI0818 13:44:19.469624 20842 solver.cpp:279] Solving Cifar-Resnet\nI0818 13:44:19.469642 20842 solver.cpp:280] Learning Rate Policy: triangular\nI0818 13:44:19.473858 20842 solver.cpp:337] Iteration 0, Testing net (#0)\nI0818 13:44:21.584439 20874 blocking_queue.cpp:50] Waiting for data\nI0818 13:44:23.355391 20877 blocking_queue.cpp:50] Waiting for data\nI0818 13:44:24.381886 20877 blocking_queue.cpp:50] Waiting for data\nI0818 13:45:43.299554 20842 solver.cpp:404]     Test net output #0: accuracy = 0.09892\nI0818 13:45:43.299837 20842 solver.cpp:404]     Test net output #1: loss = 3.9918 (* 1 = 3.9918 loss)\nI0818 13:45:47.375872 20842 solver.cpp:228] Iteration 0, loss = 3.82208\nI0818 13:45:47.375911 20842 solver.cpp:244]     Train net output #0: accuracy = 0.144\nI0818 13:45:47.375931 20842 solver.cpp:244]     Train net output #1: loss = 3.82208 (* 1 = 3.82208 loss)\nI0818 13:45:47.462268 20842 sgd_solver.cpp:166] Iteration 0, lr = 0\nI0818 13:48:04.817322 20842 solver.cpp:337] Iteration 100, Testing net (#0)\nI0818 13:49:28.281430 20842 solver.cpp:404]     Test net output #0: accuracy = 0.3276\nI0818 13:49:28.281664 20842 solver.cpp:404]     Test net output #1: loss = 1.81554 (* 1 = 1.81554 loss)\nI0818 13:49:29.598403 20842 solver.cpp:228] Iteration 100, loss = 1.82954\nI0818 13:49:29.598443 20842 solver.cpp:244]     Train net output #0: accuracy = 0.256\nI0818 13:49:29.598459 20842 solver.cpp:244]     Train net output #1: loss = 1.82954 (* 1 = 1.82954 loss)\nI0818 13:49:29.689365 20842 sgd_solver.cpp:166] Iteration 100, lr = 0.00250006\nI0818 13:51:46.804361 20842 solver.cpp:337] Iteration 200, Testing net (#0)\nI0818 13:53:10.257212 20842 solver.cpp:404]     Test net output #0: accuracy = 0.43796\nI0818 13:53:10.257464 20842 solver.cpp:404]     Test net output #1: loss = 1.51979 (* 1 = 1.51979 loss)\nI0818 13:53:11.574401 20842 solver.cpp:228] Iteration 200, loss = 1.48374\nI0818 13:53:11.574441 20842 solver.cpp:244]     Train net output #0: accuracy = 0.456\nI0818 13:53:11.574458 20842 solver.cpp:244]     Train net output #1: loss = 1.48374 (* 1 = 1.48374 loss)\nI0818 13:53:11.660388 20842 sgd_solver.cpp:166] Iteration 200, lr = 0.005\nI0818 13:55:29.134677 20842 solver.cpp:337] Iteration 300, Testing net (#0)\nI0818 13:56:52.586140 20842 solver.cpp:404]     Test net output #0: accuracy = 0.51344\nI0818 13:56:52.586390 20842 solver.cpp:404]     Test net output #1: loss = 1.34626 (* 1 = 1.34626 loss)\nI0818 13:56:53.903093 20842 solver.cpp:228] Iteration 300, loss = 1.30176\nI0818 13:56:53.903133 20842 solver.cpp:244]     Train net output #0: accuracy = 0.528\nI0818 13:56:53.903149 20842 solver.cpp:244]     Train net output #1: loss = 1.30176 (* 1 = 1.30176 loss)\nI0818 13:56:53.987704 20842 sgd_solver.cpp:166] Iteration 300, lr = 0.00750005\nI0818 13:59:11.506391 20842 solver.cpp:337] Iteration 400, Testing net (#0)\nI0818 14:00:34.958683 20842 solver.cpp:404]     Test net output #0: accuracy = 0.5746\nI0818 14:00:34.958937 20842 solver.cpp:404]     Test net output #1: loss = 1.18456 (* 1 = 1.18456 loss)\nI0818 14:00:36.275789 20842 solver.cpp:228] Iteration 400, loss = 1.11748\nI0818 14:00:36.275828 20842 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI0818 14:00:36.275845 20842 solver.cpp:244]     Train net output #1: loss = 1.11748 (* 1 = 1.11748 loss)\nI0818 14:00:36.364063 20842 sgd_solver.cpp:166] Iteration 400, lr = 0.00999999\nI0818 14:02:53.844311 20842 solver.cpp:337] Iteration 500, Testing net (#0)\nI0818 14:04:17.294997 20842 solver.cpp:404]     Test net output #0: accuracy = 0.61176\nI0818 14:04:17.295253 20842 solver.cpp:404]     Test net output #1: loss = 1.07533 (* 1 = 1.07533 loss)\nI0818 14:04:18.611274 20842 solver.cpp:228] Iteration 500, loss = 0.962016\nI0818 14:04:18.611313 20842 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0818 14:04:18.611330 20842 solver.cpp:244]     Train net output #1: loss = 0.962016 (* 1 = 0.962016 loss)\nI0818 14:04:18.702801 20842 sgd_solver.cpp:166] Iteration 500, lr = 0.0125\nI0818 14:06:36.143462 20842 solver.cpp:337] Iteration 600, Testing net (#0)\nI0818 14:08:00.517957 20842 solver.cpp:404]     Test net output #0: accuracy = 0.64348\nI0818 14:08:00.518250 20842 solver.cpp:404]     Test net output #1: loss = 0.994536 (* 1 = 0.994536 loss)\nI0818 14:08:01.839244 20842 solver.cpp:228] Iteration 600, loss = 0.867127\nI0818 14:08:01.839303 20842 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0818 14:08:01.839323 20842 solver.cpp:244]     Train net output #1: loss = 0.867127 (* 1 = 0.867127 loss)\nI0818 14:08:01.930985 20842 sgd_solver.cpp:166] Iteration 600, lr = 0.015\nI0818 14:10:19.323987 20842 solver.cpp:337] Iteration 700, Testing net (#0)\nI0818 14:11:42.782235 20842 solver.cpp:404]     Test net output #0: accuracy = 0.67596\nI0818 14:11:42.782501 20842 solver.cpp:404]     Test net output #1: loss = 0.909542 (* 1 = 0.909542 loss)\nI0818 14:11:44.105196 20842 solver.cpp:228] Iteration 700, loss = 0.802211\nI0818 14:11:44.105242 20842 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0818 14:11:44.105259 20842 solver.cpp:244]     Train net output #1: loss = 0.802211 (* 1 = 0.802211 loss)\nI0818 14:11:44.185766 20842 sgd_solver.cpp:166] Iteration 700, lr = 0.0175\nI0818 14:14:01.690726 20842 solver.cpp:337] Iteration 800, Testing net (#0)\nI0818 14:15:25.152740 20842 solver.cpp:404]     Test net output #0: accuracy = 0.69996\nI0818 14:15:25.152993 20842 solver.cpp:404]     Test net output #1: loss = 0.862757 (* 1 = 0.862757 loss)\nI0818 14:15:26.469039 20842 solver.cpp:228] Iteration 800, loss = 0.814637\nI0818 14:15:26.469080 20842 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 14:15:26.469096 20842 solver.cpp:244]     Train net output #1: loss = 0.814637 (* 1 = 0.814637 loss)\nI0818 14:15:26.554241 20842 sgd_solver.cpp:166] Iteration 800, lr = 0.02\nI0818 14:17:44.080788 20842 solver.cpp:337] Iteration 900, Testing net (#0)\nI0818 14:19:07.537569 20842 solver.cpp:404]     Test net output #0: accuracy = 0.7096\nI0818 14:19:07.537827 20842 solver.cpp:404]     Test net output #1: loss = 0.825947 (* 1 = 0.825947 loss)\nI0818 14:19:08.854815 20842 solver.cpp:228] Iteration 900, loss = 0.724681\nI0818 14:19:08.854856 20842 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 14:19:08.854871 20842 solver.cpp:244]     Train net output #1: loss = 0.724681 (* 1 = 0.724681 loss)\nI0818 14:19:08.946147 20842 sgd_solver.cpp:166] Iteration 900, lr = 0.0225\nI0818 14:21:26.436713 20842 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0818 14:22:49.927808 20842 solver.cpp:404]     Test net output #0: accuracy = 0.73364\nI0818 14:22:50.009536 20842 solver.cpp:404]     Test net output #1: loss = 0.774517 (* 1 = 0.774517 loss)\nI0818 14:22:51.326616 20842 solver.cpp:228] Iteration 1000, loss = 0.717458\nI0818 14:22:51.326654 20842 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0818 14:22:51.326670 20842 solver.cpp:244]     Train net output #1: loss = 0.717458 (* 1 = 0.717458 loss)\nI0818 14:22:51.416716 20842 sgd_solver.cpp:166] Iteration 1000, lr = 0.025\nI0818 14:25:08.870746 20842 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0818 14:26:32.330672 20842 solver.cpp:404]     Test net output #0: accuracy = 0.74844\nI0818 14:26:32.330946 20842 solver.cpp:404]     Test net output #1: loss = 0.741457 (* 1 = 0.741457 loss)\nI0818 14:26:33.648290 20842 solver.cpp:228] Iteration 1100, loss = 0.566408\nI0818 14:26:33.648331 20842 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0818 14:26:33.648347 20842 solver.cpp:244]     Train net output #1: loss = 0.566408 (* 1 = 0.566408 loss)\nI0818 14:26:33.737062 20842 sgd_solver.cpp:166] Iteration 1100, lr = 0.0275\nI0818 14:28:51.138145 20842 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0818 14:30:14.980881 20842 solver.cpp:404]     Test net output #0: accuracy = 0.76528\nI0818 14:30:14.981138 20842 solver.cpp:404]     Test net output #1: loss = 0.71877 (* 1 = 0.71877 loss)\nI0818 14:30:16.298985 20842 solver.cpp:228] Iteration 1200, loss = 0.619655\nI0818 14:30:16.299026 20842 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0818 14:30:16.299041 20842 solver.cpp:244]     Train net output #1: loss = 0.619655 (* 1 = 0.619655 loss)\nI0818 14:30:16.391614 20842 sgd_solver.cpp:166] Iteration 1200, lr = 0.03\nI0818 14:32:33.898536 20842 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0818 14:33:57.357504 20842 solver.cpp:404]     Test net output #0: accuracy = 0.76528\nI0818 14:33:57.357769 20842 solver.cpp:404]     Test net output #1: loss = 0.712143 (* 1 = 0.712143 loss)\nI0818 14:33:58.675256 20842 solver.cpp:228] Iteration 1300, loss = 0.476563\nI0818 14:33:58.675297 20842 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0818 14:33:58.675312 20842 solver.cpp:244]     Train net output #1: loss = 0.476563 (* 1 = 0.476563 loss)\nI0818 14:33:58.758370 20842 sgd_solver.cpp:166] Iteration 1300, lr = 0.0325\nI0818 14:39:34.431025 20842 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0818 14:40:57.586076 20842 solver.cpp:404]     Test net output #0: accuracy = 0.7752\nI0818 14:40:57.588457 20842 solver.cpp:404]     Test net output #1: loss = 0.685788 (* 1 = 0.685788 loss)\nI0818 14:40:58.908293 20842 solver.cpp:228] Iteration 1400, loss = 0.464151\nI0818 14:40:58.908332 20842 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 14:40:58.908347 20842 solver.cpp:244]     Train net output #1: loss = 0.464151 (* 1 = 0.464151 loss)\nI0818 14:40:58.994086 20842 sgd_solver.cpp:166] Iteration 1400, lr = 0.035\nI0818 14:43:40.755403 20842 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0818 14:45:04.101227 20842 solver.cpp:404]     Test net output #0: accuracy = 0.77796\nI0818 14:45:04.101557 20842 solver.cpp:404]     Test net output #1: loss = 0.69483 (* 1 = 0.69483 loss)\nI0818 14:45:05.417038 20842 solver.cpp:228] Iteration 1500, loss = 0.477487\nI0818 14:45:05.417080 20842 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 14:45:05.417095 20842 solver.cpp:244]     Train net output #1: loss = 0.477487 (* 1 = 0.477487 loss)\nI0818 14:45:05.514168 20842 sgd_solver.cpp:166] Iteration 1500, lr = 0.0375\nI0818 14:47:22.907236 20842 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0818 14:48:46.190166 20842 solver.cpp:404]     Test net output #0: accuracy = 0.78396\nI0818 14:48:46.190433 20842 solver.cpp:404]     Test net output #1: loss = 0.690017 (* 1 = 0.690017 loss)\nI0818 14:48:47.506479 20842 solver.cpp:228] Iteration 1600, loss = 0.439176\nI0818 14:48:47.506523 20842 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 14:48:47.506541 20842 solver.cpp:244]     Train net output #1: loss = 0.439176 (* 1 = 0.439176 loss)\nI0818 14:48:47.595587 20842 sgd_solver.cpp:166] Iteration 1600, lr = 0.04\nI0818 14:51:05.029655 20842 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0818 14:52:28.316253 20842 solver.cpp:404]     Test net output #0: accuracy = 0.78528\nI0818 14:52:28.316506 20842 solver.cpp:404]     Test net output #1: loss = 0.693676 (* 1 = 0.693676 loss)\nI0818 14:52:29.631642 20842 solver.cpp:228] Iteration 1700, loss = 0.264536\nI0818 14:52:29.631685 20842 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 14:52:29.631701 20842 solver.cpp:244]     Train net output #1: loss = 0.264536 (* 1 = 0.264536 loss)\nI0818 14:52:29.726917 20842 sgd_solver.cpp:166] Iteration 1700, lr = 0.0425\nI0818 14:54:47.122694 20842 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0818 14:56:10.404295 20842 solver.cpp:404]     Test net output #0: accuracy = 0.79056\nI0818 14:56:10.404551 20842 solver.cpp:404]     Test net output #1: loss = 0.682025 (* 1 = 0.682025 loss)\nI0818 14:56:11.720103 20842 solver.cpp:228] Iteration 1800, loss = 0.289363\nI0818 14:56:11.720147 20842 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 14:56:11.720165 20842 solver.cpp:244]     Train net output #1: loss = 0.289363 (* 1 = 0.289363 loss)\nI0818 14:56:11.813788 20842 sgd_solver.cpp:166] Iteration 1800, lr = 0.045\nI0818 14:58:29.200798 20842 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0818 14:59:52.480221 20842 solver.cpp:404]     Test net output #0: accuracy = 0.79256\nI0818 14:59:52.480484 20842 solver.cpp:404]     Test net output #1: loss = 0.682678 (* 1 = 0.682678 loss)\nI0818 14:59:53.796705 20842 solver.cpp:228] Iteration 1900, loss = 0.313312\nI0818 14:59:53.796751 20842 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 14:59:53.796768 20842 solver.cpp:244]     Train net output #1: loss = 0.313312 (* 1 = 0.313312 loss)\nI0818 14:59:53.892211 20842 sgd_solver.cpp:166] Iteration 1900, lr = 0.0475\nI0818 15:02:11.296149 20842 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0818 15:03:34.580513 20842 solver.cpp:404]     Test net output #0: accuracy = 0.79836\nI0818 15:03:34.580749 20842 solver.cpp:404]     Test net output #1: loss = 0.680869 (* 1 = 0.680869 loss)\nI0818 15:03:35.898370 20842 solver.cpp:228] Iteration 2000, loss = 0.293058\nI0818 15:03:35.898416 20842 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 15:03:35.898432 20842 solver.cpp:244]     Train net output #1: loss = 0.293058 (* 1 = 0.293058 loss)\nI0818 15:03:35.998064 20842 sgd_solver.cpp:166] Iteration 2000, lr = 0.05\nI0818 15:05:53.459194 20842 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0818 15:07:16.743609 20842 solver.cpp:404]     Test net output #0: accuracy = 0.79268\nI0818 15:07:16.743854 20842 solver.cpp:404]     Test net output #1: loss = 0.713168 (* 1 = 0.713168 loss)\nI0818 15:07:18.058838 20842 solver.cpp:228] Iteration 2100, loss = 0.337246\nI0818 15:07:18.058883 20842 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0818 15:07:18.058900 20842 solver.cpp:244]     Train net output #1: loss = 0.337246 (* 1 = 0.337246 loss)\nI0818 15:07:18.153244 20842 sgd_solver.cpp:166] Iteration 2100, lr = 0.0525\nI0818 15:09:35.638233 20842 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0818 15:10:58.921844 20842 solver.cpp:404]     Test net output #0: accuracy = 0.79588\nI0818 15:10:58.922097 20842 solver.cpp:404]     Test net output #1: loss = 0.705056 (* 1 = 0.705056 loss)\nI0818 15:11:00.238035 20842 solver.cpp:228] Iteration 2200, loss = 0.270998\nI0818 15:11:00.238081 20842 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 15:11:00.238096 20842 solver.cpp:244]     Train net output #1: loss = 0.270998 (* 1 = 0.270998 loss)\nI0818 15:11:00.321032 20842 sgd_solver.cpp:166] Iteration 2200, lr = 0.0549999\nI0818 15:13:17.750777 20842 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0818 15:14:41.080626 20842 solver.cpp:404]     Test net output #0: accuracy = 0.79016\nI0818 15:14:41.080889 20842 solver.cpp:404]     Test net output #1: loss = 0.708311 (* 1 = 0.708311 loss)\nI0818 15:14:42.396425 20842 solver.cpp:228] Iteration 2300, loss = 0.22984\nI0818 15:14:42.396471 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 15:14:42.396487 20842 solver.cpp:244]     Train net output #1: loss = 0.22984 (* 1 = 0.22984 loss)\nI0818 15:14:42.490543 20842 sgd_solver.cpp:166] Iteration 2300, lr = 0.0575\nI0818 15:16:59.945665 20842 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0818 15:18:23.322198 20842 solver.cpp:404]     Test net output #0: accuracy = 0.7944\nI0818 15:18:23.322520 20842 solver.cpp:404]     Test net output #1: loss = 0.744184 (* 1 = 0.744184 loss)\nI0818 15:18:24.638244 20842 solver.cpp:228] Iteration 2400, loss = 0.292282\nI0818 15:18:24.638283 20842 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 15:18:24.638298 20842 solver.cpp:244]     Train net output #1: loss = 0.292282 (* 1 = 0.292282 loss)\nI0818 15:18:24.731834 20842 sgd_solver.cpp:166] Iteration 2400, lr = 0.0599999\nI0818 15:20:42.169737 20842 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0818 15:22:05.544525 20842 solver.cpp:404]     Test net output #0: accuracy = 0.80268\nI0818 15:22:05.544766 20842 solver.cpp:404]     Test net output #1: loss = 0.689618 (* 1 = 0.689618 loss)\nI0818 15:22:06.859717 20842 solver.cpp:228] Iteration 2500, loss = 0.213303\nI0818 15:22:06.859763 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 15:22:06.859781 20842 solver.cpp:244]     Train net output #1: loss = 0.213303 (* 1 = 0.213303 loss)\nI0818 15:22:06.940798 20842 sgd_solver.cpp:166] Iteration 2500, lr = 0.0625\nI0818 15:24:24.333874 20842 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0818 15:25:47.704584 20842 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI0818 15:25:47.704844 20842 solver.cpp:404]     Test net output #1: loss = 0.718025 (* 1 = 0.718025 loss)\nI0818 15:25:49.020462 20842 solver.cpp:228] Iteration 2600, loss = 0.221979\nI0818 15:25:49.020509 20842 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 15:25:49.020525 20842 solver.cpp:244]     Train net output #1: loss = 0.221979 (* 1 = 0.221979 loss)\nI0818 15:25:49.111232 20842 sgd_solver.cpp:166] Iteration 2600, lr = 0.0650001\nI0818 15:28:06.522541 20842 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0818 15:29:29.895404 20842 solver.cpp:404]     Test net output #0: accuracy = 0.80824\nI0818 15:29:29.895671 20842 solver.cpp:404]     Test net output #1: loss = 0.6996 (* 1 = 0.6996 loss)\nI0818 15:29:31.211784 20842 solver.cpp:228] Iteration 2700, loss = 0.186277\nI0818 15:29:31.211830 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:29:31.211848 20842 solver.cpp:244]     Train net output #1: loss = 0.186277 (* 1 = 0.186277 loss)\nI0818 15:29:31.305456 20842 sgd_solver.cpp:166] Iteration 2700, lr = 0.0675\nI0818 15:31:48.773236 20842 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0818 15:33:12.145264 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8034\nI0818 15:33:12.145524 20842 solver.cpp:404]     Test net output #1: loss = 0.737265 (* 1 = 0.737265 loss)\nI0818 15:33:13.461369 20842 solver.cpp:228] Iteration 2800, loss = 0.163714\nI0818 15:33:13.461416 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 15:33:13.461431 20842 solver.cpp:244]     Train net output #1: loss = 0.163714 (* 1 = 0.163714 loss)\nI0818 15:33:13.550544 20842 sgd_solver.cpp:166] Iteration 2800, lr = 0.0700001\nI0818 15:35:31.034791 20842 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0818 15:36:54.405805 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8096\nI0818 15:36:54.406069 20842 solver.cpp:404]     Test net output #1: loss = 0.726373 (* 1 = 0.726373 loss)\nI0818 15:36:55.721153 20842 solver.cpp:228] Iteration 2900, loss = 0.142374\nI0818 15:36:55.721196 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:36:55.721212 20842 solver.cpp:244]     Train net output #1: loss = 0.142374 (* 1 = 0.142374 loss)\nI0818 15:36:55.805474 20842 sgd_solver.cpp:166] Iteration 2900, lr = 0.0725\nI0818 15:39:13.201678 20842 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0818 15:40:36.577512 20842 solver.cpp:404]     Test net output #0: accuracy = 0.81308\nI0818 15:40:36.577759 20842 solver.cpp:404]     Test net output #1: loss = 0.728352 (* 1 = 0.728352 loss)\nI0818 15:40:37.892696 20842 solver.cpp:228] Iteration 3000, loss = 0.148488\nI0818 15:40:37.892741 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 15:40:37.892758 20842 solver.cpp:244]     Train net output #1: loss = 0.148488 (* 1 = 0.148488 loss)\nI0818 15:40:37.985935 20842 sgd_solver.cpp:166] Iteration 3000, lr = 0.075\nI0818 15:42:55.309343 20842 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0818 15:44:18.691488 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8154\nI0818 15:44:18.691752 20842 solver.cpp:404]     Test net output #1: loss = 0.700925 (* 1 = 0.700925 loss)\nI0818 15:44:20.006533 20842 solver.cpp:228] Iteration 3100, loss = 0.119944\nI0818 15:44:20.006577 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 15:44:20.006594 20842 solver.cpp:244]     Train net output #1: loss = 0.119944 (* 1 = 0.119944 loss)\nI0818 15:44:20.100757 20842 sgd_solver.cpp:166] Iteration 3100, lr = 0.0775\nI0818 15:46:37.458353 20842 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0818 15:48:00.869611 20842 solver.cpp:404]     Test net output #0: accuracy = 0.80808\nI0818 15:48:00.869869 20842 solver.cpp:404]     Test net output #1: loss = 0.748294 (* 1 = 0.748294 loss)\nI0818 15:48:02.187381 20842 solver.cpp:228] Iteration 3200, loss = 0.177295\nI0818 15:48:02.187419 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 15:48:02.187435 20842 solver.cpp:244]     Train net output #1: loss = 0.177295 (* 1 = 0.177295 loss)\nI0818 15:48:02.271744 20842 sgd_solver.cpp:166] Iteration 3200, lr = 0.08\nI0818 15:50:19.690418 20842 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0818 15:51:43.061030 20842 solver.cpp:404]     Test net output #0: accuracy = 0.81712\nI0818 15:51:43.061278 20842 solver.cpp:404]     Test net output #1: loss = 0.701607 (* 1 = 0.701607 loss)\nI0818 15:51:44.376315 20842 solver.cpp:228] Iteration 3300, loss = 0.192597\nI0818 15:51:44.376359 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 15:51:44.376375 20842 solver.cpp:244]     Train net output #1: loss = 0.192597 (* 1 = 0.192597 loss)\nI0818 15:51:44.468535 20842 sgd_solver.cpp:166] Iteration 3300, lr = 0.0825\nI0818 15:54:01.876858 20842 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0818 15:55:25.240679 20842 solver.cpp:404]     Test net output #0: accuracy = 0.80988\nI0818 15:55:25.240928 20842 solver.cpp:404]     Test net output #1: loss = 0.743638 (* 1 = 0.743638 loss)\nI0818 15:55:26.556644 20842 solver.cpp:228] Iteration 3400, loss = 0.223691\nI0818 15:55:26.556686 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 15:55:26.556702 20842 solver.cpp:244]     Train net output #1: loss = 0.223691 (* 1 = 0.223691 loss)\nI0818 15:55:26.646951 20842 sgd_solver.cpp:166] Iteration 3400, lr = 0.085\nI0818 15:57:44.030344 20842 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0818 15:59:07.406829 20842 solver.cpp:404]     Test net output #0: accuracy = 0.81972\nI0818 15:59:07.407078 20842 solver.cpp:404]     Test net output #1: loss = 0.712577 (* 1 = 0.712577 loss)\nI0818 15:59:08.722306 20842 solver.cpp:228] Iteration 3500, loss = 0.153992\nI0818 15:59:08.722347 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 15:59:08.722362 20842 solver.cpp:244]     Train net output #1: loss = 0.153992 (* 1 = 0.153992 loss)\nI0818 15:59:08.814007 20842 sgd_solver.cpp:166] Iteration 3500, lr = 0.0875\nI0818 16:01:26.126938 20842 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0818 16:02:49.503432 20842 solver.cpp:404]     Test net output #0: accuracy = 0.81688\nI0818 16:02:49.503703 20842 solver.cpp:404]     Test net output #1: loss = 0.7436 (* 1 = 0.7436 loss)\nI0818 16:02:50.818737 20842 solver.cpp:228] Iteration 3600, loss = 0.168725\nI0818 16:02:50.818779 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 16:02:50.818795 20842 solver.cpp:244]     Train net output #1: loss = 0.168725 (* 1 = 0.168725 loss)\nI0818 16:02:50.904860 20842 sgd_solver.cpp:166] Iteration 3600, lr = 0.09\nI0818 16:05:08.301009 20842 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0818 16:06:31.673676 20842 solver.cpp:404]     Test net output #0: accuracy = 0.82268\nI0818 16:06:31.673950 20842 solver.cpp:404]     Test net output #1: loss = 0.732185 (* 1 = 0.732185 loss)\nI0818 16:06:32.989398 20842 solver.cpp:228] Iteration 3700, loss = 0.0899899\nI0818 16:06:32.989439 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:06:32.989464 20842 solver.cpp:244]     Train net output #1: loss = 0.08999 (* 1 = 0.08999 loss)\nI0818 16:06:33.074615 20842 sgd_solver.cpp:166] Iteration 3700, lr = 0.0925\nI0818 16:08:50.576670 20842 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0818 16:10:13.948707 20842 solver.cpp:404]     Test net output #0: accuracy = 0.82212\nI0818 16:10:13.948957 20842 solver.cpp:404]     Test net output #1: loss = 0.754182 (* 1 = 0.754182 loss)\nI0818 16:10:15.264467 20842 solver.cpp:228] Iteration 3800, loss = 0.23838\nI0818 16:10:15.264508 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 16:10:15.264524 20842 solver.cpp:244]     Train net output #1: loss = 0.23838 (* 1 = 0.23838 loss)\nI0818 16:10:15.359185 20842 sgd_solver.cpp:166] Iteration 3800, lr = 0.095\nI0818 16:12:32.789773 20842 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0818 16:13:56.160817 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83072\nI0818 16:13:56.161087 20842 solver.cpp:404]     Test net output #1: loss = 0.702266 (* 1 = 0.702266 loss)\nI0818 16:13:57.477486 20842 solver.cpp:228] Iteration 3900, loss = 0.067586\nI0818 16:13:57.477530 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 16:13:57.477546 20842 solver.cpp:244]     Train net output #1: loss = 0.067586 (* 1 = 0.067586 loss)\nI0818 16:13:57.559454 20842 sgd_solver.cpp:166] Iteration 3900, lr = 0.0975\nI0818 16:16:14.990907 20842 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0818 16:17:38.364876 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8242\nI0818 16:17:38.365136 20842 solver.cpp:404]     Test net output #1: loss = 0.728199 (* 1 = 0.728199 loss)\nI0818 16:17:39.681095 20842 solver.cpp:228] Iteration 4000, loss = 0.173592\nI0818 16:17:39.681138 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:17:39.681154 20842 solver.cpp:244]     Train net output #1: loss = 0.173592 (* 1 = 0.173592 loss)\nI0818 16:17:39.773170 20842 sgd_solver.cpp:166] Iteration 4000, lr = 0.1\nI0818 16:19:57.220301 20842 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0818 16:21:20.591924 20842 solver.cpp:404]     Test net output #0: accuracy = 0.82548\nI0818 16:21:20.592175 20842 solver.cpp:404]     Test net output #1: loss = 0.743972 (* 1 = 0.743972 loss)\nI0818 16:21:21.906934 20842 solver.cpp:228] Iteration 4100, loss = 0.129934\nI0818 16:21:21.906975 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:21:21.906992 20842 solver.cpp:244]     Train net output #1: loss = 0.129934 (* 1 = 0.129934 loss)\nI0818 16:21:22.002233 20842 sgd_solver.cpp:166] Iteration 4100, lr = 0.1025\nI0818 16:23:39.366578 20842 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0818 16:25:02.742694 20842 solver.cpp:404]     Test net output #0: accuracy = 0.82984\nI0818 16:25:02.742959 20842 solver.cpp:404]     Test net output #1: loss = 0.720478 (* 1 = 0.720478 loss)\nI0818 16:25:04.058527 20842 solver.cpp:228] Iteration 4200, loss = 0.124089\nI0818 16:25:04.058571 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:25:04.058588 20842 solver.cpp:244]     Train net output #1: loss = 0.124089 (* 1 = 0.124089 loss)\nI0818 16:25:04.147958 20842 sgd_solver.cpp:166] Iteration 4200, lr = 0.105\nI0818 16:27:21.583353 20842 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0818 16:28:44.960289 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83088\nI0818 16:28:44.960556 20842 solver.cpp:404]     Test net output #1: loss = 0.703203 (* 1 = 0.703203 loss)\nI0818 16:28:46.276124 20842 solver.cpp:228] Iteration 4300, loss = 0.0572773\nI0818 16:28:46.276165 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:28:46.276180 20842 solver.cpp:244]     Train net output #1: loss = 0.0572773 (* 1 = 0.0572773 loss)\nI0818 16:28:46.365694 20842 sgd_solver.cpp:166] Iteration 4300, lr = 0.1075\nI0818 16:31:03.761648 20842 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0818 16:32:27.140617 20842 solver.cpp:404]     Test net output #0: accuracy = 0.82836\nI0818 16:32:27.140874 20842 solver.cpp:404]     Test net output #1: loss = 0.73637 (* 1 = 0.73637 loss)\nI0818 16:32:28.456956 20842 solver.cpp:228] Iteration 4400, loss = 0.0787364\nI0818 16:32:28.457002 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:32:28.457020 20842 solver.cpp:244]     Train net output #1: loss = 0.0787365 (* 1 = 0.0787365 loss)\nI0818 16:32:28.543329 20842 sgd_solver.cpp:166] Iteration 4400, lr = 0.11\nI0818 16:34:45.982483 20842 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0818 16:36:09.358085 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83248\nI0818 16:36:09.358353 20842 solver.cpp:404]     Test net output #1: loss = 0.720035 (* 1 = 0.720035 loss)\nI0818 16:36:10.674638 20842 solver.cpp:228] Iteration 4500, loss = 0.0835163\nI0818 16:36:10.674684 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:36:10.674700 20842 solver.cpp:244]     Train net output #1: loss = 0.0835164 (* 1 = 0.0835164 loss)\nI0818 16:36:10.757517 20842 sgd_solver.cpp:166] Iteration 4500, lr = 0.1125\nI0818 16:38:28.235672 20842 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0818 16:39:51.618520 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8304\nI0818 16:39:51.618789 20842 solver.cpp:404]     Test net output #1: loss = 0.752453 (* 1 = 0.752453 loss)\nI0818 16:39:52.935288 20842 solver.cpp:228] Iteration 4600, loss = 0.0530936\nI0818 16:39:52.935334 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:39:52.935353 20842 solver.cpp:244]     Train net output #1: loss = 0.0530936 (* 1 = 0.0530936 loss)\nI0818 16:39:53.013933 20842 sgd_solver.cpp:166] Iteration 4600, lr = 0.115\nI0818 16:42:10.345326 20842 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0818 16:43:33.732686 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83272\nI0818 16:43:33.732915 20842 solver.cpp:404]     Test net output #1: loss = 0.719416 (* 1 = 0.719416 loss)\nI0818 16:43:35.048234 20842 solver.cpp:228] Iteration 4700, loss = 0.0292228\nI0818 16:43:35.048281 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 16:43:35.048300 20842 solver.cpp:244]     Train net output #1: loss = 0.0292229 (* 1 = 0.0292229 loss)\nI0818 16:43:35.129855 20842 sgd_solver.cpp:166] Iteration 4700, lr = 0.1175\nI0818 16:45:52.462975 20842 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0818 16:47:15.850708 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83208\nI0818 16:47:15.850956 20842 solver.cpp:404]     Test net output #1: loss = 0.732083 (* 1 = 0.732083 loss)\nI0818 16:47:17.166538 20842 solver.cpp:228] Iteration 4800, loss = 0.0878546\nI0818 16:47:17.166584 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:47:17.166599 20842 solver.cpp:244]     Train net output #1: loss = 0.0878546 (* 1 = 0.0878546 loss)\nI0818 16:47:17.262349 20842 sgd_solver.cpp:166] Iteration 4800, lr = 0.12\nI0818 16:49:34.681140 20842 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0818 16:50:58.060042 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84036\nI0818 16:50:58.060294 20842 solver.cpp:404]     Test net output #1: loss = 0.748451 (* 1 = 0.748451 loss)\nI0818 16:50:59.376349 20842 solver.cpp:228] Iteration 4900, loss = 0.0595483\nI0818 16:50:59.376395 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 16:50:59.376412 20842 solver.cpp:244]     Train net output #1: loss = 0.0595484 (* 1 = 0.0595484 loss)\nI0818 16:50:59.459475 20842 sgd_solver.cpp:166] Iteration 4900, lr = 0.1225\nI0818 16:53:16.865106 20842 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0818 16:54:40.243778 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83744\nI0818 16:54:40.243984 20842 solver.cpp:404]     Test net output #1: loss = 0.711853 (* 1 = 0.711853 loss)\nI0818 16:54:41.559305 20842 solver.cpp:228] Iteration 5000, loss = 0.0139725\nI0818 16:54:41.559351 20842 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 16:54:41.559367 20842 solver.cpp:244]     Train net output #1: loss = 0.0139726 (* 1 = 0.0139726 loss)\nI0818 16:54:41.650631 20842 sgd_solver.cpp:166] Iteration 5000, lr = 0.125\nI0818 16:56:59.019007 20842 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0818 16:58:22.311388 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83728\nI0818 16:58:22.311627 20842 solver.cpp:404]     Test net output #1: loss = 0.710053 (* 1 = 0.710053 loss)\nI0818 16:58:23.627470 20842 solver.cpp:228] Iteration 5100, loss = 0.111444\nI0818 16:58:23.627518 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 16:58:23.627537 20842 solver.cpp:244]     Train net output #1: loss = 0.111444 (* 1 = 0.111444 loss)\nI0818 16:58:23.718633 20842 sgd_solver.cpp:166] Iteration 5100, lr = 0.1275\nI0818 17:00:41.168611 20842 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0818 17:02:04.457283 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83504\nI0818 17:02:04.457521 20842 solver.cpp:404]     Test net output #1: loss = 0.735487 (* 1 = 0.735487 loss)\nI0818 17:02:05.773377 20842 solver.cpp:228] Iteration 5200, loss = 0.0430399\nI0818 17:02:05.773422 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:02:05.773439 20842 solver.cpp:244]     Train net output #1: loss = 0.04304 (* 1 = 0.04304 loss)\nI0818 17:02:05.864804 20842 sgd_solver.cpp:166] Iteration 5200, lr = 0.13\nI0818 17:04:23.330961 20842 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0818 17:05:46.621110 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83464\nI0818 17:05:46.621341 20842 solver.cpp:404]     Test net output #1: loss = 0.737289 (* 1 = 0.737289 loss)\nI0818 17:05:47.937661 20842 solver.cpp:228] Iteration 5300, loss = 0.142176\nI0818 17:05:47.937705 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:05:47.937722 20842 solver.cpp:244]     Train net output #1: loss = 0.142176 (* 1 = 0.142176 loss)\nI0818 17:05:48.024227 20842 sgd_solver.cpp:166] Iteration 5300, lr = 0.1325\nI0818 17:08:05.472992 20842 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0818 17:09:28.751324 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83976\nI0818 17:09:28.751567 20842 solver.cpp:404]     Test net output #1: loss = 0.696797 (* 1 = 0.696797 loss)\nI0818 17:09:30.067739 20842 solver.cpp:228] Iteration 5400, loss = 0.0773041\nI0818 17:09:30.067783 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:09:30.067800 20842 solver.cpp:244]     Train net output #1: loss = 0.0773042 (* 1 = 0.0773042 loss)\nI0818 17:09:30.153118 20842 sgd_solver.cpp:166] Iteration 5400, lr = 0.135\nI0818 17:11:47.562415 20842 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0818 17:13:10.854439 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83872\nI0818 17:13:10.854701 20842 solver.cpp:404]     Test net output #1: loss = 0.720846 (* 1 = 0.720846 loss)\nI0818 17:13:12.172211 20842 solver.cpp:228] Iteration 5500, loss = 0.0267912\nI0818 17:13:12.172260 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:13:12.172276 20842 solver.cpp:244]     Train net output #1: loss = 0.0267912 (* 1 = 0.0267912 loss)\nI0818 17:13:12.251585 20842 sgd_solver.cpp:166] Iteration 5500, lr = 0.1375\nI0818 17:15:29.629672 20842 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0818 17:16:52.913254 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83456\nI0818 17:16:52.913489 20842 solver.cpp:404]     Test net output #1: loss = 0.729861 (* 1 = 0.729861 loss)\nI0818 17:16:54.229033 20842 solver.cpp:228] Iteration 5600, loss = 0.0497658\nI0818 17:16:54.229079 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:16:54.229095 20842 solver.cpp:244]     Train net output #1: loss = 0.0497658 (* 1 = 0.0497658 loss)\nI0818 17:16:54.314780 20842 sgd_solver.cpp:166] Iteration 5600, lr = 0.14\nI0818 17:19:11.666317 20842 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0818 17:20:34.952421 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83428\nI0818 17:20:34.952659 20842 solver.cpp:404]     Test net output #1: loss = 0.744407 (* 1 = 0.744407 loss)\nI0818 17:20:36.268014 20842 solver.cpp:228] Iteration 5700, loss = 0.0737335\nI0818 17:20:36.268059 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:20:36.268076 20842 solver.cpp:244]     Train net output #1: loss = 0.0737335 (* 1 = 0.0737335 loss)\nI0818 17:20:36.358878 20842 sgd_solver.cpp:166] Iteration 5700, lr = 0.1425\nI0818 17:22:53.656014 20842 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0818 17:24:16.938498 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83968\nI0818 17:24:16.938737 20842 solver.cpp:404]     Test net output #1: loss = 0.70792 (* 1 = 0.70792 loss)\nI0818 17:24:18.255086 20842 solver.cpp:228] Iteration 5800, loss = 0.0729018\nI0818 17:24:18.255128 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:24:18.255146 20842 solver.cpp:244]     Train net output #1: loss = 0.0729019 (* 1 = 0.0729019 loss)\nI0818 17:24:18.342254 20842 sgd_solver.cpp:166] Iteration 5800, lr = 0.145\nI0818 17:26:35.809276 20842 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0818 17:27:59.091825 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8342\nI0818 17:27:59.092078 20842 solver.cpp:404]     Test net output #1: loss = 0.749968 (* 1 = 0.749968 loss)\nI0818 17:28:00.407611 20842 solver.cpp:228] Iteration 5900, loss = 0.0752886\nI0818 17:28:00.407654 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:28:00.407670 20842 solver.cpp:244]     Train net output #1: loss = 0.0752886 (* 1 = 0.0752886 loss)\nI0818 17:28:00.493175 20842 sgd_solver.cpp:166] Iteration 5900, lr = 0.1475\nI0818 17:30:17.822939 20842 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0818 17:31:41.183094 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83696\nI0818 17:31:41.183326 20842 solver.cpp:404]     Test net output #1: loss = 0.749838 (* 1 = 0.749838 loss)\nI0818 17:31:42.498749 20842 solver.cpp:228] Iteration 6000, loss = 0.0481092\nI0818 17:31:42.498791 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:31:42.498807 20842 solver.cpp:244]     Train net output #1: loss = 0.0481093 (* 1 = 0.0481093 loss)\nI0818 17:31:42.584683 20842 sgd_solver.cpp:166] Iteration 6000, lr = 0.15\nI0818 17:33:59.962643 20842 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0818 17:35:23.337479 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84256\nI0818 17:35:23.337745 20842 solver.cpp:404]     Test net output #1: loss = 0.711165 (* 1 = 0.711165 loss)\nI0818 17:35:24.653540 20842 solver.cpp:228] Iteration 6100, loss = 0.0575711\nI0818 17:35:24.653581 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:35:24.653599 20842 solver.cpp:244]     Train net output #1: loss = 0.0575712 (* 1 = 0.0575712 loss)\nI0818 17:35:24.747198 20842 sgd_solver.cpp:166] Iteration 6100, lr = 0.1525\nI0818 17:37:42.066218 20842 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0818 17:39:05.440515 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83908\nI0818 17:39:05.440770 20842 solver.cpp:404]     Test net output #1: loss = 0.714601 (* 1 = 0.714601 loss)\nI0818 17:39:06.756603 20842 solver.cpp:228] Iteration 6200, loss = 0.0577574\nI0818 17:39:06.756645 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:39:06.756661 20842 solver.cpp:244]     Train net output #1: loss = 0.0577575 (* 1 = 0.0577575 loss)\nI0818 17:39:06.850273 20842 sgd_solver.cpp:166] Iteration 6200, lr = 0.155\nI0818 17:41:24.285969 20842 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0818 17:42:47.674206 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI0818 17:42:47.674458 20842 solver.cpp:404]     Test net output #1: loss = 0.731346 (* 1 = 0.731346 loss)\nI0818 17:42:48.986413 20842 solver.cpp:228] Iteration 6300, loss = 0.0421218\nI0818 17:42:48.986455 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:42:48.986471 20842 solver.cpp:244]     Train net output #1: loss = 0.0421218 (* 1 = 0.0421218 loss)\nI0818 17:42:49.087441 20842 sgd_solver.cpp:166] Iteration 6300, lr = 0.1575\nI0818 17:45:06.497160 20842 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0818 17:46:29.881742 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8454\nI0818 17:46:29.881989 20842 solver.cpp:404]     Test net output #1: loss = 0.70831 (* 1 = 0.70831 loss)\nI0818 17:46:31.192980 20842 solver.cpp:228] Iteration 6400, loss = 0.0241445\nI0818 17:46:31.193022 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:46:31.193038 20842 solver.cpp:244]     Train net output #1: loss = 0.0241446 (* 1 = 0.0241446 loss)\nI0818 17:46:31.284680 20842 sgd_solver.cpp:166] Iteration 6400, lr = 0.16\nI0818 17:48:48.630080 20842 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0818 17:50:12.008296 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84712\nI0818 17:50:12.008555 20842 solver.cpp:404]     Test net output #1: loss = 0.695263 (* 1 = 0.695263 loss)\nI0818 17:50:13.319706 20842 solver.cpp:228] Iteration 6500, loss = 0.0668582\nI0818 17:50:13.319746 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:50:13.319763 20842 solver.cpp:244]     Train net output #1: loss = 0.0668582 (* 1 = 0.0668582 loss)\nI0818 17:50:13.407192 20842 sgd_solver.cpp:166] Iteration 6500, lr = 0.1625\nI0818 17:52:30.784549 20842 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0818 17:53:54.159497 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84608\nI0818 17:53:54.159718 20842 solver.cpp:404]     Test net output #1: loss = 0.704453 (* 1 = 0.704453 loss)\nI0818 17:53:55.471112 20842 solver.cpp:228] Iteration 6600, loss = 0.0831617\nI0818 17:53:55.471153 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:53:55.471169 20842 solver.cpp:244]     Train net output #1: loss = 0.0831618 (* 1 = 0.0831618 loss)\nI0818 17:53:55.567142 20842 sgd_solver.cpp:166] Iteration 6600, lr = 0.165\nI0818 17:56:12.978844 20842 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0818 17:57:36.354353 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84484\nI0818 17:57:36.354568 20842 solver.cpp:404]     Test net output #1: loss = 0.688889 (* 1 = 0.688889 loss)\nI0818 17:57:37.666425 20842 solver.cpp:228] Iteration 6700, loss = 0.0366435\nI0818 17:57:37.666471 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:57:37.666492 20842 solver.cpp:244]     Train net output #1: loss = 0.0366436 (* 1 = 0.0366436 loss)\nI0818 17:57:37.757581 20842 sgd_solver.cpp:166] Iteration 6700, lr = 0.1675\nI0818 17:59:55.125406 20842 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0818 18:01:18.504549 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84392\nI0818 18:01:18.504788 20842 solver.cpp:404]     Test net output #1: loss = 0.686459 (* 1 = 0.686459 loss)\nI0818 18:01:19.816403 20842 solver.cpp:228] Iteration 6800, loss = 0.0925137\nI0818 18:01:19.816447 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:01:19.816463 20842 solver.cpp:244]     Train net output #1: loss = 0.0925138 (* 1 = 0.0925138 loss)\nI0818 18:01:19.918149 20842 sgd_solver.cpp:166] Iteration 6800, lr = 0.17\nI0818 18:03:37.282660 20842 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0818 18:05:00.656800 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85076\nI0818 18:05:00.657001 20842 solver.cpp:404]     Test net output #1: loss = 0.665307 (* 1 = 0.665307 loss)\nI0818 18:05:01.968616 20842 solver.cpp:228] Iteration 6900, loss = 0.0185902\nI0818 18:05:01.968655 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:05:01.968672 20842 solver.cpp:244]     Train net output #1: loss = 0.0185903 (* 1 = 0.0185903 loss)\nI0818 18:05:02.059890 20842 sgd_solver.cpp:166] Iteration 6900, lr = 0.1725\nI0818 18:07:19.454159 20842 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0818 18:08:42.825150 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84424\nI0818 18:08:42.825387 20842 solver.cpp:404]     Test net output #1: loss = 0.693753 (* 1 = 0.693753 loss)\nI0818 18:08:44.136945 20842 solver.cpp:228] Iteration 7000, loss = 0.0166114\nI0818 18:08:44.136986 20842 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:08:44.137003 20842 solver.cpp:244]     Train net output #1: loss = 0.0166114 (* 1 = 0.0166114 loss)\nI0818 18:08:44.233554 20842 sgd_solver.cpp:166] Iteration 7000, lr = 0.175\nI0818 18:11:01.689208 20842 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0818 18:12:25.069190 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83924\nI0818 18:12:25.069437 20842 solver.cpp:404]     Test net output #1: loss = 0.705752 (* 1 = 0.705752 loss)\nI0818 18:12:26.380976 20842 solver.cpp:228] Iteration 7100, loss = 0.0776872\nI0818 18:12:26.381016 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:12:26.381032 20842 solver.cpp:244]     Train net output #1: loss = 0.0776873 (* 1 = 0.0776873 loss)\nI0818 18:12:26.478786 20842 sgd_solver.cpp:166] Iteration 7100, lr = 0.1775\nI0818 18:14:43.886395 20842 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0818 18:16:07.263591 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84704\nI0818 18:16:07.263833 20842 solver.cpp:404]     Test net output #1: loss = 0.668325 (* 1 = 0.668325 loss)\nI0818 18:16:08.575529 20842 solver.cpp:228] Iteration 7200, loss = 0.0254671\nI0818 18:16:08.575572 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:16:08.575587 20842 solver.cpp:244]     Train net output #1: loss = 0.0254672 (* 1 = 0.0254672 loss)\nI0818 18:16:08.666636 20842 sgd_solver.cpp:166] Iteration 7200, lr = 0.18\nI0818 18:18:26.149740 20842 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0818 18:19:49.524842 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84688\nI0818 18:19:49.525084 20842 solver.cpp:404]     Test net output #1: loss = 0.669004 (* 1 = 0.669004 loss)\nI0818 18:19:50.836320 20842 solver.cpp:228] Iteration 7300, loss = 0.0535424\nI0818 18:19:50.836362 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:19:50.836380 20842 solver.cpp:244]     Train net output #1: loss = 0.0535425 (* 1 = 0.0535425 loss)\nI0818 18:19:50.926779 20842 sgd_solver.cpp:166] Iteration 7300, lr = 0.1825\nI0818 18:22:08.289232 20842 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0818 18:23:31.672211 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84996\nI0818 18:23:31.672467 20842 solver.cpp:404]     Test net output #1: loss = 0.671794 (* 1 = 0.671794 loss)\nI0818 18:23:32.983492 20842 solver.cpp:228] Iteration 7400, loss = 0.116442\nI0818 18:23:32.983536 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 18:23:32.983552 20842 solver.cpp:244]     Train net output #1: loss = 0.116442 (* 1 = 0.116442 loss)\nI0818 18:23:33.079434 20842 sgd_solver.cpp:166] Iteration 7400, lr = 0.185\nI0818 18:25:50.522895 20842 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0818 18:27:13.905676 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8518\nI0818 18:27:13.905941 20842 solver.cpp:404]     Test net output #1: loss = 0.660656 (* 1 = 0.660656 loss)\nI0818 18:27:15.217157 20842 solver.cpp:228] Iteration 7500, loss = 0.0296601\nI0818 18:27:15.217200 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:27:15.217217 20842 solver.cpp:244]     Train net output #1: loss = 0.0296602 (* 1 = 0.0296602 loss)\nI0818 18:27:15.310040 20842 sgd_solver.cpp:166] Iteration 7500, lr = 0.1875\nI0818 18:29:32.813149 20842 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0818 18:30:56.191807 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85176\nI0818 18:30:56.192020 20842 solver.cpp:404]     Test net output #1: loss = 0.681982 (* 1 = 0.681982 loss)\nI0818 18:30:57.503803 20842 solver.cpp:228] Iteration 7600, loss = 0.0333634\nI0818 18:30:57.503844 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:30:57.503860 20842 solver.cpp:244]     Train net output #1: loss = 0.0333635 (* 1 = 0.0333635 loss)\nI0818 18:30:57.604111 20842 sgd_solver.cpp:166] Iteration 7600, lr = 0.19\nI0818 18:33:15.101737 20842 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0818 18:34:38.483645 20842 solver.cpp:404]     Test net output #0: accuracy = 0.847961\nI0818 18:34:38.483842 20842 solver.cpp:404]     Test net output #1: loss = 0.65822 (* 1 = 0.65822 loss)\nI0818 18:34:39.795461 20842 solver.cpp:228] Iteration 7700, loss = 0.0363202\nI0818 18:34:39.795511 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:34:39.795527 20842 solver.cpp:244]     Train net output #1: loss = 0.0363203 (* 1 = 0.0363203 loss)\nI0818 18:34:39.899914 20842 sgd_solver.cpp:166] Iteration 7700, lr = 0.1925\nI0818 18:36:57.371598 20842 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0818 18:38:20.750730 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8494\nI0818 18:38:20.750988 20842 solver.cpp:404]     Test net output #1: loss = 0.690016 (* 1 = 0.690016 loss)\nI0818 18:38:22.062456 20842 solver.cpp:228] Iteration 7800, loss = 0.0576675\nI0818 18:38:22.062506 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:38:22.062523 20842 solver.cpp:244]     Train net output #1: loss = 0.0576676 (* 1 = 0.0576676 loss)\nI0818 18:38:22.158586 20842 sgd_solver.cpp:166] Iteration 7800, lr = 0.195\nI0818 18:40:39.588021 20842 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0818 18:42:02.964388 20842 solver.cpp:404]     Test net output #0: accuracy = 0.83908\nI0818 18:42:02.964642 20842 solver.cpp:404]     Test net output #1: loss = 0.682611 (* 1 = 0.682611 loss)\nI0818 18:42:04.275746 20842 solver.cpp:228] Iteration 7900, loss = 0.0850877\nI0818 18:42:04.275790 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:42:04.275807 20842 solver.cpp:244]     Train net output #1: loss = 0.0850878 (* 1 = 0.0850878 loss)\nI0818 18:42:04.376092 20842 sgd_solver.cpp:166] Iteration 7900, lr = 0.1975\nI0818 18:44:21.752241 20842 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0818 18:45:45.122762 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84592\nI0818 18:45:45.123011 20842 solver.cpp:404]     Test net output #1: loss = 0.672226 (* 1 = 0.672226 loss)\nI0818 18:45:46.435042 20842 solver.cpp:228] Iteration 8000, loss = 0.0196703\nI0818 18:45:46.435088 20842 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:45:46.435106 20842 solver.cpp:244]     Train net output #1: loss = 0.0196705 (* 1 = 0.0196705 loss)\nI0818 18:45:46.530827 20842 sgd_solver.cpp:166] Iteration 8000, lr = 0.2\nI0818 18:48:03.877537 20842 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0818 18:49:27.255738 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8492\nI0818 18:49:27.255949 20842 solver.cpp:404]     Test net output #1: loss = 0.638055 (* 1 = 0.638055 loss)\nI0818 18:49:28.568055 20842 solver.cpp:228] Iteration 8100, loss = 0.0341061\nI0818 18:49:28.568100 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:49:28.568117 20842 solver.cpp:244]     Train net output #1: loss = 0.0341062 (* 1 = 0.0341062 loss)\nI0818 18:49:28.660959 20842 sgd_solver.cpp:166] Iteration 8100, lr = 0.2025\nI0818 18:51:46.030375 20842 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0818 18:53:09.404752 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85132\nI0818 18:53:09.404968 20842 solver.cpp:404]     Test net output #1: loss = 0.649649 (* 1 = 0.649649 loss)\nI0818 18:53:10.717139 20842 solver.cpp:228] Iteration 8200, loss = 0.0431039\nI0818 18:53:10.717185 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:53:10.717203 20842 solver.cpp:244]     Train net output #1: loss = 0.043104 (* 1 = 0.043104 loss)\nI0818 18:53:10.814508 20842 sgd_solver.cpp:166] Iteration 8200, lr = 0.205\nI0818 18:55:28.175444 20842 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0818 18:56:51.544054 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84896\nI0818 18:56:51.544291 20842 solver.cpp:404]     Test net output #1: loss = 0.660656 (* 1 = 0.660656 loss)\nI0818 18:56:52.858973 20842 solver.cpp:228] Iteration 8300, loss = 0.0632378\nI0818 18:56:52.859022 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:56:52.859038 20842 solver.cpp:244]     Train net output #1: loss = 0.0632379 (* 1 = 0.0632379 loss)\nI0818 18:56:52.952906 20842 sgd_solver.cpp:166] Iteration 8300, lr = 0.2075\nI0818 18:59:10.366525 20842 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0818 19:00:33.731756 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84368\nI0818 19:00:33.731972 20842 solver.cpp:404]     Test net output #1: loss = 0.695812 (* 1 = 0.695812 loss)\nI0818 19:00:35.043359 20842 solver.cpp:228] Iteration 8400, loss = 0.0834181\nI0818 19:00:35.043404 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:00:35.043421 20842 solver.cpp:244]     Train net output #1: loss = 0.0834182 (* 1 = 0.0834182 loss)\nI0818 19:00:35.135725 20842 sgd_solver.cpp:166] Iteration 8400, lr = 0.21\nI0818 19:02:52.551615 20842 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0818 19:04:15.916373 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85564\nI0818 19:04:15.916620 20842 solver.cpp:404]     Test net output #1: loss = 0.629843 (* 1 = 0.629843 loss)\nI0818 19:04:17.228080 20842 solver.cpp:228] Iteration 8500, loss = 0.083726\nI0818 19:04:17.228123 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:04:17.228139 20842 solver.cpp:244]     Train net output #1: loss = 0.0837261 (* 1 = 0.0837261 loss)\nI0818 19:04:17.325453 20842 sgd_solver.cpp:166] Iteration 8500, lr = 0.2125\nI0818 19:06:34.785897 20842 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0818 19:07:58.155177 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84696\nI0818 19:07:58.155444 20842 solver.cpp:404]     Test net output #1: loss = 0.654898 (* 1 = 0.654898 loss)\nI0818 19:07:59.467133 20842 solver.cpp:228] Iteration 8600, loss = 0.110953\nI0818 19:07:59.467175 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:07:59.467191 20842 solver.cpp:244]     Train net output #1: loss = 0.110953 (* 1 = 0.110953 loss)\nI0818 19:07:59.563285 20842 sgd_solver.cpp:166] Iteration 8600, lr = 0.215\nI0818 19:10:16.978370 20842 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0818 19:11:40.344099 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85592\nI0818 19:11:40.344360 20842 solver.cpp:404]     Test net output #1: loss = 0.624205 (* 1 = 0.624205 loss)\nI0818 19:11:41.655918 20842 solver.cpp:228] Iteration 8700, loss = 0.0617344\nI0818 19:11:41.655962 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:11:41.655978 20842 solver.cpp:244]     Train net output #1: loss = 0.0617345 (* 1 = 0.0617345 loss)\nI0818 19:11:41.745743 20842 sgd_solver.cpp:166] Iteration 8700, lr = 0.2175\nI0818 19:13:59.186290 20842 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0818 19:15:22.455474 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85716\nI0818 19:15:22.455740 20842 solver.cpp:404]     Test net output #1: loss = 0.604233 (* 1 = 0.604233 loss)\nI0818 19:15:23.768106 20842 solver.cpp:228] Iteration 8800, loss = 0.0530541\nI0818 19:15:23.768151 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:15:23.768167 20842 solver.cpp:244]     Train net output #1: loss = 0.0530541 (* 1 = 0.0530541 loss)\nI0818 19:15:23.861348 20842 sgd_solver.cpp:166] Iteration 8800, lr = 0.22\nI0818 19:17:41.304880 20842 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0818 19:19:04.580157 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85516\nI0818 19:19:04.580416 20842 solver.cpp:404]     Test net output #1: loss = 0.619647 (* 1 = 0.619647 loss)\nI0818 19:19:05.892520 20842 solver.cpp:228] Iteration 8900, loss = 0.0623511\nI0818 19:19:05.892566 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:19:05.892582 20842 solver.cpp:244]     Train net output #1: loss = 0.0623512 (* 1 = 0.0623512 loss)\nI0818 19:19:05.988550 20842 sgd_solver.cpp:166] Iteration 8900, lr = 0.2225\nI0818 19:21:23.323977 20842 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0818 19:22:46.595039 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84752\nI0818 19:22:46.595312 20842 solver.cpp:404]     Test net output #1: loss = 0.646543 (* 1 = 0.646543 loss)\nI0818 19:22:47.911661 20842 solver.cpp:228] Iteration 9000, loss = 0.0802582\nI0818 19:22:47.911706 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:22:47.911723 20842 solver.cpp:244]     Train net output #1: loss = 0.0802583 (* 1 = 0.0802583 loss)\nI0818 19:22:47.998178 20842 sgd_solver.cpp:166] Iteration 9000, lr = 0.225\nI0818 19:25:05.397771 20842 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0818 19:26:28.662870 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85376\nI0818 19:26:28.663102 20842 solver.cpp:404]     Test net output #1: loss = 0.622058 (* 1 = 0.622058 loss)\nI0818 19:26:29.979235 20842 solver.cpp:228] Iteration 9100, loss = 0.0372107\nI0818 19:26:29.979279 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:26:29.979296 20842 solver.cpp:244]     Train net output #1: loss = 0.0372108 (* 1 = 0.0372108 loss)\nI0818 19:26:30.066298 20842 sgd_solver.cpp:166] Iteration 9100, lr = 0.2275\nI0818 19:28:47.487800 20842 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0818 19:30:10.768853 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85748\nI0818 19:30:10.769115 20842 solver.cpp:404]     Test net output #1: loss = 0.621019 (* 1 = 0.621019 loss)\nI0818 19:30:12.085523 20842 solver.cpp:228] Iteration 9200, loss = 0.0276618\nI0818 19:30:12.085569 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:30:12.085587 20842 solver.cpp:244]     Train net output #1: loss = 0.0276619 (* 1 = 0.0276619 loss)\nI0818 19:30:12.174481 20842 sgd_solver.cpp:166] Iteration 9200, lr = 0.23\nI0818 19:32:29.637845 20842 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0818 19:33:52.903419 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8516\nI0818 19:33:52.903698 20842 solver.cpp:404]     Test net output #1: loss = 0.646738 (* 1 = 0.646738 loss)\nI0818 19:33:54.219573 20842 solver.cpp:228] Iteration 9300, loss = 0.0528743\nI0818 19:33:54.219616 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:33:54.219633 20842 solver.cpp:244]     Train net output #1: loss = 0.0528743 (* 1 = 0.0528743 loss)\nI0818 19:33:54.306243 20842 sgd_solver.cpp:166] Iteration 9300, lr = 0.2325\nI0818 19:36:11.618310 20842 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0818 19:37:34.887970 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85004\nI0818 19:37:34.888237 20842 solver.cpp:404]     Test net output #1: loss = 0.622942 (* 1 = 0.622942 loss)\nI0818 19:37:36.203470 20842 solver.cpp:228] Iteration 9400, loss = 0.083314\nI0818 19:37:36.203519 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:37:36.203546 20842 solver.cpp:244]     Train net output #1: loss = 0.0833141 (* 1 = 0.0833141 loss)\nI0818 19:37:36.299139 20842 sgd_solver.cpp:166] Iteration 9400, lr = 0.235\nI0818 19:39:53.741850 20842 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0818 19:41:17.011107 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8576\nI0818 19:41:17.011366 20842 solver.cpp:404]     Test net output #1: loss = 0.593471 (* 1 = 0.593471 loss)\nI0818 19:41:18.328177 20842 solver.cpp:228] Iteration 9500, loss = 0.0320975\nI0818 19:41:18.328220 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:41:18.328236 20842 solver.cpp:244]     Train net output #1: loss = 0.0320976 (* 1 = 0.0320976 loss)\nI0818 19:41:18.419821 20842 sgd_solver.cpp:166] Iteration 9500, lr = 0.2375\nI0818 19:43:35.844254 20842 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0818 19:44:59.104311 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8554\nI0818 19:44:59.104580 20842 solver.cpp:404]     Test net output #1: loss = 0.625986 (* 1 = 0.625986 loss)\nI0818 19:45:00.420931 20842 solver.cpp:228] Iteration 9600, loss = 0.0433798\nI0818 19:45:00.420975 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:45:00.420991 20842 solver.cpp:244]     Train net output #1: loss = 0.0433799 (* 1 = 0.0433799 loss)\nI0818 19:45:00.513856 20842 sgd_solver.cpp:166] Iteration 9600, lr = 0.24\nI0818 19:47:17.907016 20842 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0818 19:48:41.268455 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8526\nI0818 19:48:41.268733 20842 solver.cpp:404]     Test net output #1: loss = 0.618518 (* 1 = 0.618518 loss)\nI0818 19:48:42.585335 20842 solver.cpp:228] Iteration 9700, loss = 0.0309873\nI0818 19:48:42.585379 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:48:42.585395 20842 solver.cpp:244]     Train net output #1: loss = 0.0309874 (* 1 = 0.0309874 loss)\nI0818 19:48:42.670243 20842 sgd_solver.cpp:166] Iteration 9700, lr = 0.2425\nI0818 19:51:00.059185 20842 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0818 19:52:23.421021 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85604\nI0818 19:52:23.421281 20842 solver.cpp:404]     Test net output #1: loss = 0.606927 (* 1 = 0.606927 loss)\nI0818 19:52:24.737727 20842 solver.cpp:228] Iteration 9800, loss = 0.0552563\nI0818 19:52:24.737771 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:52:24.737787 20842 solver.cpp:244]     Train net output #1: loss = 0.0552564 (* 1 = 0.0552564 loss)\nI0818 19:52:24.828788 20842 sgd_solver.cpp:166] Iteration 9800, lr = 0.245\nI0818 19:54:42.279628 20842 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0818 19:56:05.634610 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85556\nI0818 19:56:05.634835 20842 solver.cpp:404]     Test net output #1: loss = 0.641306 (* 1 = 0.641306 loss)\nI0818 19:56:06.950642 20842 solver.cpp:228] Iteration 9900, loss = 0.00666152\nI0818 19:56:06.950685 20842 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 19:56:06.950702 20842 solver.cpp:244]     Train net output #1: loss = 0.00666163 (* 1 = 0.00666163 loss)\nI0818 19:56:07.046226 20842 sgd_solver.cpp:166] Iteration 9900, lr = 0.2475\nI0818 19:58:24.372810 20842 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0818 19:59:47.731767 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8544\nI0818 19:59:47.732029 20842 solver.cpp:404]     Test net output #1: loss = 0.6257 (* 1 = 0.6257 loss)\nI0818 19:59:49.048055 20842 solver.cpp:228] Iteration 10000, loss = 0.0813578\nI0818 19:59:49.048099 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:59:49.048115 20842 solver.cpp:244]     Train net output #1: loss = 0.0813579 (* 1 = 0.0813579 loss)\nI0818 19:59:49.143924 20842 sgd_solver.cpp:166] Iteration 10000, lr = 0.25\nI0818 20:02:06.518407 20842 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0818 20:03:29.881220 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85572\nI0818 20:03:29.881480 20842 solver.cpp:404]     Test net output #1: loss = 0.596404 (* 1 = 0.596404 loss)\nI0818 20:03:31.197681 20842 solver.cpp:228] Iteration 10100, loss = 0.0579915\nI0818 20:03:31.197721 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:03:31.197737 20842 solver.cpp:244]     Train net output #1: loss = 0.0579916 (* 1 = 0.0579916 loss)\nI0818 20:03:31.291692 20842 sgd_solver.cpp:166] Iteration 10100, lr = 0.2525\nI0818 20:05:48.723928 20842 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0818 20:07:12.087503 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85816\nI0818 20:07:12.087767 20842 solver.cpp:404]     Test net output #1: loss = 0.594635 (* 1 = 0.594635 loss)\nI0818 20:07:13.403908 20842 solver.cpp:228] Iteration 10200, loss = 0.101389\nI0818 20:07:13.403951 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:07:13.403967 20842 solver.cpp:244]     Train net output #1: loss = 0.101389 (* 1 = 0.101389 loss)\nI0818 20:07:13.498798 20842 sgd_solver.cpp:166] Iteration 10200, lr = 0.255\nI0818 20:09:30.875285 20842 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0818 20:10:54.235749 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86072\nI0818 20:10:54.236011 20842 solver.cpp:404]     Test net output #1: loss = 0.595829 (* 1 = 0.595829 loss)\nI0818 20:10:55.551710 20842 solver.cpp:228] Iteration 10300, loss = 0.124277\nI0818 20:10:55.551743 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:10:55.551759 20842 solver.cpp:244]     Train net output #1: loss = 0.124277 (* 1 = 0.124277 loss)\nI0818 20:10:55.644810 20842 sgd_solver.cpp:166] Iteration 10300, lr = 0.2575\nI0818 20:13:13.025256 20842 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0818 20:14:36.397497 20842 solver.cpp:404]     Test net output #0: accuracy = 0.84656\nI0818 20:14:36.397763 20842 solver.cpp:404]     Test net output #1: loss = 0.631801 (* 1 = 0.631801 loss)\nI0818 20:14:37.714130 20842 solver.cpp:228] Iteration 10400, loss = 0.0785656\nI0818 20:14:37.714174 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:14:37.714190 20842 solver.cpp:244]     Train net output #1: loss = 0.0785657 (* 1 = 0.0785657 loss)\nI0818 20:14:37.804394 20842 sgd_solver.cpp:166] Iteration 10400, lr = 0.26\nI0818 20:16:55.220791 20842 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0818 20:18:18.586959 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85636\nI0818 20:18:18.587230 20842 solver.cpp:404]     Test net output #1: loss = 0.584936 (* 1 = 0.584936 loss)\nI0818 20:18:19.902724 20842 solver.cpp:228] Iteration 10500, loss = 0.0768198\nI0818 20:18:19.902765 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:18:19.902781 20842 solver.cpp:244]     Train net output #1: loss = 0.0768199 (* 1 = 0.0768199 loss)\nI0818 20:18:19.998438 20842 sgd_solver.cpp:166] Iteration 10500, lr = 0.2625\nI0818 20:20:37.472560 20842 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0818 20:22:00.837038 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86076\nI0818 20:22:00.837301 20842 solver.cpp:404]     Test net output #1: loss = 0.590934 (* 1 = 0.590934 loss)\nI0818 20:22:02.153142 20842 solver.cpp:228] Iteration 10600, loss = 0.0654921\nI0818 20:22:02.153184 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:22:02.153203 20842 solver.cpp:244]     Train net output #1: loss = 0.0654922 (* 1 = 0.0654922 loss)\nI0818 20:22:02.238996 20842 sgd_solver.cpp:166] Iteration 10600, lr = 0.265\nI0818 20:24:19.643149 20842 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0818 20:25:43.011143 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85572\nI0818 20:25:43.011416 20842 solver.cpp:404]     Test net output #1: loss = 0.608149 (* 1 = 0.608149 loss)\nI0818 20:25:44.328080 20842 solver.cpp:228] Iteration 10700, loss = 0.0275355\nI0818 20:25:44.328116 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:25:44.328132 20842 solver.cpp:244]     Train net output #1: loss = 0.0275356 (* 1 = 0.0275356 loss)\nI0818 20:25:44.418864 20842 sgd_solver.cpp:166] Iteration 10700, lr = 0.2675\nI0818 20:28:01.916152 20842 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0818 20:29:25.281811 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86116\nI0818 20:29:25.282080 20842 solver.cpp:404]     Test net output #1: loss = 0.567692 (* 1 = 0.567692 loss)\nI0818 20:29:26.598234 20842 solver.cpp:228] Iteration 10800, loss = 0.0592035\nI0818 20:29:26.598269 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:29:26.598284 20842 solver.cpp:244]     Train net output #1: loss = 0.0592035 (* 1 = 0.0592035 loss)\nI0818 20:29:26.690651 20842 sgd_solver.cpp:166] Iteration 10800, lr = 0.27\nI0818 20:31:44.120272 20842 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0818 20:33:07.480959 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85504\nI0818 20:33:07.481220 20842 solver.cpp:404]     Test net output #1: loss = 0.596835 (* 1 = 0.596835 loss)\nI0818 20:33:08.798226 20842 solver.cpp:228] Iteration 10900, loss = 0.039567\nI0818 20:33:08.798271 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:33:08.798287 20842 solver.cpp:244]     Train net output #1: loss = 0.0395671 (* 1 = 0.0395671 loss)\nI0818 20:33:08.893077 20842 sgd_solver.cpp:166] Iteration 10900, lr = 0.2725\nI0818 20:35:26.225551 20842 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0818 20:36:49.585168 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86476\nI0818 20:36:49.585407 20842 solver.cpp:404]     Test net output #1: loss = 0.576465 (* 1 = 0.576465 loss)\nI0818 20:36:50.901715 20842 solver.cpp:228] Iteration 11000, loss = 0.0137966\nI0818 20:36:50.901759 20842 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:36:50.901775 20842 solver.cpp:244]     Train net output #1: loss = 0.0137967 (* 1 = 0.0137967 loss)\nI0818 20:36:50.995795 20842 sgd_solver.cpp:166] Iteration 11000, lr = 0.275\nI0818 20:39:08.457150 20842 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0818 20:40:31.813912 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86092\nI0818 20:40:31.814168 20842 solver.cpp:404]     Test net output #1: loss = 0.573918 (* 1 = 0.573918 loss)\nI0818 20:40:33.129703 20842 solver.cpp:228] Iteration 11100, loss = 0.0367501\nI0818 20:40:33.129747 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:40:33.129763 20842 solver.cpp:244]     Train net output #1: loss = 0.0367502 (* 1 = 0.0367502 loss)\nI0818 20:40:33.217063 20842 sgd_solver.cpp:166] Iteration 11100, lr = 0.2775\nI0818 20:42:50.637907 20842 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0818 20:44:13.997143 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85344\nI0818 20:44:13.997417 20842 solver.cpp:404]     Test net output #1: loss = 0.62969 (* 1 = 0.62969 loss)\nI0818 20:44:15.314119 20842 solver.cpp:228] Iteration 11200, loss = 0.118235\nI0818 20:44:15.314165 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:44:15.314182 20842 solver.cpp:244]     Train net output #1: loss = 0.118235 (* 1 = 0.118235 loss)\nI0818 20:44:15.403645 20842 sgd_solver.cpp:166] Iteration 11200, lr = 0.28\nI0818 20:46:32.842869 20842 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0818 20:47:56.200278 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85644\nI0818 20:47:56.200554 20842 solver.cpp:404]     Test net output #1: loss = 0.606256 (* 1 = 0.606256 loss)\nI0818 20:47:57.517135 20842 solver.cpp:228] Iteration 11300, loss = 0.0632244\nI0818 20:47:57.517171 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:47:57.517187 20842 solver.cpp:244]     Train net output #1: loss = 0.0632245 (* 1 = 0.0632245 loss)\nI0818 20:47:57.602640 20842 sgd_solver.cpp:166] Iteration 11300, lr = 0.2825\nI0818 20:50:15.023789 20842 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0818 20:51:38.382941 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85704\nI0818 20:51:38.383193 20842 solver.cpp:404]     Test net output #1: loss = 0.588885 (* 1 = 0.588885 loss)\nI0818 20:51:39.699924 20842 solver.cpp:228] Iteration 11400, loss = 0.135588\nI0818 20:51:39.699970 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:51:39.699986 20842 solver.cpp:244]     Train net output #1: loss = 0.135588 (* 1 = 0.135588 loss)\nI0818 20:51:39.791385 20842 sgd_solver.cpp:166] Iteration 11400, lr = 0.285\nI0818 20:53:57.165752 20842 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0818 20:55:20.525024 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86264\nI0818 20:55:20.525298 20842 solver.cpp:404]     Test net output #1: loss = 0.591697 (* 1 = 0.591697 loss)\nI0818 20:55:21.841511 20842 solver.cpp:228] Iteration 11500, loss = 0.0852888\nI0818 20:55:21.841557 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:55:21.841573 20842 solver.cpp:244]     Train net output #1: loss = 0.0852889 (* 1 = 0.0852889 loss)\nI0818 20:55:21.929551 20842 sgd_solver.cpp:166] Iteration 11500, lr = 0.2875\nI0818 20:57:39.309129 20842 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0818 20:59:02.668440 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86252\nI0818 20:59:02.668712 20842 solver.cpp:404]     Test net output #1: loss = 0.586545 (* 1 = 0.586545 loss)\nI0818 20:59:03.985781 20842 solver.cpp:228] Iteration 11600, loss = 0.010924\nI0818 20:59:03.985826 20842 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:59:03.985843 20842 solver.cpp:244]     Train net output #1: loss = 0.0109241 (* 1 = 0.0109241 loss)\nI0818 20:59:04.079859 20842 sgd_solver.cpp:166] Iteration 11600, lr = 0.29\nI0818 21:01:21.365864 20842 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0818 21:02:44.727448 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86176\nI0818 21:02:44.727733 20842 solver.cpp:404]     Test net output #1: loss = 0.60039 (* 1 = 0.60039 loss)\nI0818 21:02:46.044308 20842 solver.cpp:228] Iteration 11700, loss = 0.0662201\nI0818 21:02:46.044351 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:02:46.044368 20842 solver.cpp:244]     Train net output #1: loss = 0.0662202 (* 1 = 0.0662202 loss)\nI0818 21:02:46.138914 20842 sgd_solver.cpp:166] Iteration 11700, lr = 0.2925\nI0818 21:05:03.481931 20842 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0818 21:06:26.848958 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8532\nI0818 21:06:26.849231 20842 solver.cpp:404]     Test net output #1: loss = 0.618693 (* 1 = 0.618693 loss)\nI0818 21:06:28.166112 20842 solver.cpp:228] Iteration 11800, loss = 0.103794\nI0818 21:06:28.166157 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:06:28.166173 20842 solver.cpp:244]     Train net output #1: loss = 0.103794 (* 1 = 0.103794 loss)\nI0818 21:06:28.258303 20842 sgd_solver.cpp:166] Iteration 11800, lr = 0.295\nI0818 21:08:45.621989 20842 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0818 21:10:08.984066 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8626\nI0818 21:10:08.984328 20842 solver.cpp:404]     Test net output #1: loss = 0.572505 (* 1 = 0.572505 loss)\nI0818 21:10:10.301048 20842 solver.cpp:228] Iteration 11900, loss = 0.0188253\nI0818 21:10:10.301081 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:10:10.301097 20842 solver.cpp:244]     Train net output #1: loss = 0.0188254 (* 1 = 0.0188254 loss)\nI0818 21:10:10.390961 20842 sgd_solver.cpp:166] Iteration 11900, lr = 0.2975\nI0818 21:12:27.786137 20842 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0818 21:13:51.547118 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85736\nI0818 21:13:51.547433 20842 solver.cpp:404]     Test net output #1: loss = 0.601144 (* 1 = 0.601144 loss)\nI0818 21:13:52.867936 20842 solver.cpp:228] Iteration 12000, loss = 0.145497\nI0818 21:13:52.867998 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 21:13:52.868016 20842 solver.cpp:244]     Train net output #1: loss = 0.145497 (* 1 = 0.145497 loss)\nI0818 21:13:52.959079 20842 sgd_solver.cpp:166] Iteration 12000, lr = 0.3\nI0818 21:16:10.434669 20842 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0818 21:17:33.794190 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86432\nI0818 21:17:33.794474 20842 solver.cpp:404]     Test net output #1: loss = 0.585035 (* 1 = 0.585035 loss)\nI0818 21:17:35.109869 20842 solver.cpp:228] Iteration 12100, loss = 0.11037\nI0818 21:17:35.109912 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:17:35.109928 20842 solver.cpp:244]     Train net output #1: loss = 0.11037 (* 1 = 0.11037 loss)\nI0818 21:17:35.206048 20842 sgd_solver.cpp:166] Iteration 12100, lr = 0.3025\nI0818 21:19:52.533412 20842 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0818 21:21:15.894829 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86352\nI0818 21:21:15.895104 20842 solver.cpp:404]     Test net output #1: loss = 0.572732 (* 1 = 0.572732 loss)\nI0818 21:21:17.211202 20842 solver.cpp:228] Iteration 12200, loss = 0.078415\nI0818 21:21:17.211244 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:21:17.211261 20842 solver.cpp:244]     Train net output #1: loss = 0.0784152 (* 1 = 0.0784152 loss)\nI0818 21:21:17.305263 20842 sgd_solver.cpp:166] Iteration 12200, lr = 0.305\nI0818 21:23:34.791435 20842 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0818 21:24:58.153749 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86712\nI0818 21:24:58.153997 20842 solver.cpp:404]     Test net output #1: loss = 0.577204 (* 1 = 0.577204 loss)\nI0818 21:24:59.470595 20842 solver.cpp:228] Iteration 12300, loss = 0.116496\nI0818 21:24:59.470631 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:24:59.470648 20842 solver.cpp:244]     Train net output #1: loss = 0.116496 (* 1 = 0.116496 loss)\nI0818 21:24:59.562523 20842 sgd_solver.cpp:166] Iteration 12300, lr = 0.3075\nI0818 21:27:16.921764 20842 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0818 21:28:40.288725 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86716\nI0818 21:28:40.288998 20842 solver.cpp:404]     Test net output #1: loss = 0.544679 (* 1 = 0.544679 loss)\nI0818 21:28:41.606113 20842 solver.cpp:228] Iteration 12400, loss = 0.0554443\nI0818 21:28:41.606158 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:28:41.606175 20842 solver.cpp:244]     Train net output #1: loss = 0.0554445 (* 1 = 0.0554445 loss)\nI0818 21:28:41.696638 20842 sgd_solver.cpp:166] Iteration 12400, lr = 0.31\nI0818 21:30:59.055685 20842 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0818 21:32:22.325584 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86476\nI0818 21:32:22.325840 20842 solver.cpp:404]     Test net output #1: loss = 0.557205 (* 1 = 0.557205 loss)\nI0818 21:32:23.641541 20842 solver.cpp:228] Iteration 12500, loss = 0.0823181\nI0818 21:32:23.641587 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 21:32:23.641603 20842 solver.cpp:244]     Train net output #1: loss = 0.0823182 (* 1 = 0.0823182 loss)\nI0818 21:32:23.729120 20842 sgd_solver.cpp:166] Iteration 12500, lr = 0.3125\nI0818 21:34:41.149905 20842 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0818 21:36:04.420121 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86724\nI0818 21:36:04.420380 20842 solver.cpp:404]     Test net output #1: loss = 0.555896 (* 1 = 0.555896 loss)\nI0818 21:36:05.736742 20842 solver.cpp:228] Iteration 12600, loss = 0.0324029\nI0818 21:36:05.736788 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:36:05.736804 20842 solver.cpp:244]     Train net output #1: loss = 0.032403 (* 1 = 0.032403 loss)\nI0818 21:36:05.821390 20842 sgd_solver.cpp:166] Iteration 12600, lr = 0.315\nI0818 21:38:23.237572 20842 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 21:39:46.501922 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86736\nI0818 21:39:46.502171 20842 solver.cpp:404]     Test net output #1: loss = 0.555289 (* 1 = 0.555289 loss)\nI0818 21:39:47.817800 20842 solver.cpp:228] Iteration 12700, loss = 0.0655144\nI0818 21:39:47.817842 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:39:47.817859 20842 solver.cpp:244]     Train net output #1: loss = 0.0655145 (* 1 = 0.0655145 loss)\nI0818 21:39:47.902201 20842 sgd_solver.cpp:166] Iteration 12700, lr = 0.3175\nI0818 21:42:05.339237 20842 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 21:43:28.602800 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8702\nI0818 21:43:28.603035 20842 solver.cpp:404]     Test net output #1: loss = 0.545434 (* 1 = 0.545434 loss)\nI0818 21:43:29.919011 20842 solver.cpp:228] Iteration 12800, loss = 0.0151796\nI0818 21:43:29.919054 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:43:29.919070 20842 solver.cpp:244]     Train net output #1: loss = 0.0151797 (* 1 = 0.0151797 loss)\nI0818 21:43:30.003809 20842 sgd_solver.cpp:166] Iteration 12800, lr = 0.32\nI0818 21:45:47.400899 20842 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 21:47:10.671710 20842 solver.cpp:404]     Test net output #0: accuracy = 0.859\nI0818 21:47:10.671989 20842 solver.cpp:404]     Test net output #1: loss = 0.599238 (* 1 = 0.599238 loss)\nI0818 21:47:11.988481 20842 solver.cpp:228] Iteration 12900, loss = 0.0442576\nI0818 21:47:11.988523 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:47:11.988540 20842 solver.cpp:244]     Train net output #1: loss = 0.0442578 (* 1 = 0.0442578 loss)\nI0818 21:47:12.077214 20842 sgd_solver.cpp:166] Iteration 12900, lr = 0.3225\nI0818 21:49:29.488634 20842 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 21:50:52.760351 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86256\nI0818 21:50:52.760573 20842 solver.cpp:404]     Test net output #1: loss = 0.567515 (* 1 = 0.567515 loss)\nI0818 21:50:54.076768 20842 solver.cpp:228] Iteration 13000, loss = 0.0634166\nI0818 21:50:54.076812 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:50:54.076829 20842 solver.cpp:244]     Train net output #1: loss = 0.0634167 (* 1 = 0.0634167 loss)\nI0818 21:50:54.163391 20842 sgd_solver.cpp:166] Iteration 13000, lr = 0.325\nI0818 21:53:11.546586 20842 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 21:54:34.816576 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86608\nI0818 21:54:34.816812 20842 solver.cpp:404]     Test net output #1: loss = 0.540599 (* 1 = 0.540599 loss)\nI0818 21:54:36.132004 20842 solver.cpp:228] Iteration 13100, loss = 0.104073\nI0818 21:54:36.132046 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:54:36.132063 20842 solver.cpp:244]     Train net output #1: loss = 0.104073 (* 1 = 0.104073 loss)\nI0818 21:54:36.220614 20842 sgd_solver.cpp:166] Iteration 13100, lr = 0.3275\nI0818 21:56:53.616433 20842 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 21:58:16.891705 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86876\nI0818 21:58:16.891968 20842 solver.cpp:404]     Test net output #1: loss = 0.563846 (* 1 = 0.563846 loss)\nI0818 21:58:18.208468 20842 solver.cpp:228] Iteration 13200, loss = 0.0594813\nI0818 21:58:18.208513 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:58:18.208529 20842 solver.cpp:244]     Train net output #1: loss = 0.0594814 (* 1 = 0.0594814 loss)\nI0818 21:58:18.301192 20842 sgd_solver.cpp:166] Iteration 13200, lr = 0.33\nI0818 22:00:35.647526 20842 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 22:01:58.919364 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86656\nI0818 22:01:58.919623 20842 solver.cpp:404]     Test net output #1: loss = 0.573889 (* 1 = 0.573889 loss)\nI0818 22:02:00.234786 20842 solver.cpp:228] Iteration 13300, loss = 0.0396515\nI0818 22:02:00.234823 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:02:00.234838 20842 solver.cpp:244]     Train net output #1: loss = 0.0396516 (* 1 = 0.0396516 loss)\nI0818 22:02:00.328044 20842 sgd_solver.cpp:166] Iteration 13300, lr = 0.3325\nI0818 22:04:17.701241 20842 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 22:05:41.064929 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86592\nI0818 22:05:41.065174 20842 solver.cpp:404]     Test net output #1: loss = 0.568199 (* 1 = 0.568199 loss)\nI0818 22:05:42.380851 20842 solver.cpp:228] Iteration 13400, loss = 0.054373\nI0818 22:05:42.380893 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:05:42.380909 20842 solver.cpp:244]     Train net output #1: loss = 0.0543731 (* 1 = 0.0543731 loss)\nI0818 22:05:42.471294 20842 sgd_solver.cpp:166] Iteration 13400, lr = 0.335\nI0818 22:07:59.914556 20842 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 22:09:23.285678 20842 solver.cpp:404]     Test net output #0: accuracy = 0.85764\nI0818 22:09:23.285909 20842 solver.cpp:404]     Test net output #1: loss = 0.587777 (* 1 = 0.587777 loss)\nI0818 22:09:24.601200 20842 solver.cpp:228] Iteration 13500, loss = 0.0802509\nI0818 22:09:24.601243 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:09:24.601260 20842 solver.cpp:244]     Train net output #1: loss = 0.080251 (* 1 = 0.080251 loss)\nI0818 22:09:24.693305 20842 sgd_solver.cpp:166] Iteration 13500, lr = 0.3375\nI0818 22:11:42.170938 20842 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 22:13:05.542073 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86836\nI0818 22:13:05.542333 20842 solver.cpp:404]     Test net output #1: loss = 0.552137 (* 1 = 0.552137 loss)\nI0818 22:13:06.858284 20842 solver.cpp:228] Iteration 13600, loss = 0.0958097\nI0818 22:13:06.858325 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:13:06.858341 20842 solver.cpp:244]     Train net output #1: loss = 0.0958098 (* 1 = 0.0958098 loss)\nI0818 22:13:06.952852 20842 sgd_solver.cpp:166] Iteration 13600, lr = 0.34\nI0818 22:15:24.289655 20842 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 22:16:47.662150 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86684\nI0818 22:16:47.662406 20842 solver.cpp:404]     Test net output #1: loss = 0.560662 (* 1 = 0.560662 loss)\nI0818 22:16:48.977581 20842 solver.cpp:228] Iteration 13700, loss = 0.0498236\nI0818 22:16:48.977622 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:16:48.977638 20842 solver.cpp:244]     Train net output #1: loss = 0.0498237 (* 1 = 0.0498237 loss)\nI0818 22:16:49.074303 20842 sgd_solver.cpp:166] Iteration 13700, lr = 0.3425\nI0818 22:19:06.478171 20842 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 22:20:29.843783 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86676\nI0818 22:20:29.844035 20842 solver.cpp:404]     Test net output #1: loss = 0.546203 (* 1 = 0.546203 loss)\nI0818 22:20:31.159726 20842 solver.cpp:228] Iteration 13800, loss = 0.02594\nI0818 22:20:31.159767 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:20:31.159785 20842 solver.cpp:244]     Train net output #1: loss = 0.0259401 (* 1 = 0.0259401 loss)\nI0818 22:20:31.246274 20842 sgd_solver.cpp:166] Iteration 13800, lr = 0.345\nI0818 22:22:48.673348 20842 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 22:24:12.044399 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86736\nI0818 22:24:12.044657 20842 solver.cpp:404]     Test net output #1: loss = 0.531983 (* 1 = 0.531983 loss)\nI0818 22:24:13.360291 20842 solver.cpp:228] Iteration 13900, loss = 0.07453\nI0818 22:24:13.360325 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:24:13.360340 20842 solver.cpp:244]     Train net output #1: loss = 0.0745301 (* 1 = 0.0745301 loss)\nI0818 22:24:13.451509 20842 sgd_solver.cpp:166] Iteration 13900, lr = 0.3475\nI0818 22:26:30.828480 20842 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 22:27:54.199653 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8654\nI0818 22:27:54.199895 20842 solver.cpp:404]     Test net output #1: loss = 0.567899 (* 1 = 0.567899 loss)\nI0818 22:27:55.515837 20842 solver.cpp:228] Iteration 14000, loss = 0.0327445\nI0818 22:27:55.515879 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:27:55.515895 20842 solver.cpp:244]     Train net output #1: loss = 0.0327446 (* 1 = 0.0327446 loss)\nI0818 22:27:55.600100 20842 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 22:30:13.041997 20842 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 22:31:36.407284 20842 solver.cpp:404]     Test net output #0: accuracy = 0.867801\nI0818 22:31:36.407523 20842 solver.cpp:404]     Test net output #1: loss = 0.551984 (* 1 = 0.551984 loss)\nI0818 22:31:37.722857 20842 solver.cpp:228] Iteration 14100, loss = 0.0410051\nI0818 22:31:37.722898 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:31:37.722915 20842 solver.cpp:244]     Train net output #1: loss = 0.0410052 (* 1 = 0.0410052 loss)\nI0818 22:31:37.812427 20842 sgd_solver.cpp:166] Iteration 14100, lr = 0.3525\nI0818 22:33:55.223037 20842 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 22:35:18.588552 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86396\nI0818 22:35:18.588819 20842 solver.cpp:404]     Test net output #1: loss = 0.559955 (* 1 = 0.559955 loss)\nI0818 22:35:19.904475 20842 solver.cpp:228] Iteration 14200, loss = 0.0532231\nI0818 22:35:19.904516 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:35:19.904533 20842 solver.cpp:244]     Train net output #1: loss = 0.0532232 (* 1 = 0.0532232 loss)\nI0818 22:35:19.999163 20842 sgd_solver.cpp:166] Iteration 14200, lr = 0.355\nI0818 22:37:37.461274 20842 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 22:39:00.831136 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86516\nI0818 22:39:00.831403 20842 solver.cpp:404]     Test net output #1: loss = 0.57187 (* 1 = 0.57187 loss)\nI0818 22:39:02.147392 20842 solver.cpp:228] Iteration 14300, loss = 0.0509196\nI0818 22:39:02.147431 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:39:02.147451 20842 solver.cpp:244]     Train net output #1: loss = 0.0509197 (* 1 = 0.0509197 loss)\nI0818 22:39:02.235564 20842 sgd_solver.cpp:166] Iteration 14300, lr = 0.3575\nI0818 22:41:19.652392 20842 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 22:42:43.024165 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86872\nI0818 22:42:43.024425 20842 solver.cpp:404]     Test net output #1: loss = 0.54986 (* 1 = 0.54986 loss)\nI0818 22:42:44.339946 20842 solver.cpp:228] Iteration 14400, loss = 0.0403535\nI0818 22:42:44.339980 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:42:44.339996 20842 solver.cpp:244]     Train net output #1: loss = 0.0403536 (* 1 = 0.0403536 loss)\nI0818 22:42:44.427642 20842 sgd_solver.cpp:166] Iteration 14400, lr = 0.36\nI0818 22:45:01.796854 20842 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 22:46:25.165011 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8724\nI0818 22:46:25.165274 20842 solver.cpp:404]     Test net output #1: loss = 0.527438 (* 1 = 0.527438 loss)\nI0818 22:46:26.481281 20842 solver.cpp:228] Iteration 14500, loss = 0.0470816\nI0818 22:46:26.481323 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:46:26.481340 20842 solver.cpp:244]     Train net output #1: loss = 0.0470817 (* 1 = 0.0470817 loss)\nI0818 22:46:26.566481 20842 sgd_solver.cpp:166] Iteration 14500, lr = 0.3625\nI0818 22:48:43.963268 20842 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 22:50:07.330245 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87\nI0818 22:50:07.330510 20842 solver.cpp:404]     Test net output #1: loss = 0.542689 (* 1 = 0.542689 loss)\nI0818 22:50:08.646363 20842 solver.cpp:228] Iteration 14600, loss = 0.0299558\nI0818 22:50:08.646409 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:50:08.646425 20842 solver.cpp:244]     Train net output #1: loss = 0.0299559 (* 1 = 0.0299559 loss)\nI0818 22:50:08.737356 20842 sgd_solver.cpp:166] Iteration 14600, lr = 0.365\nI0818 22:52:26.186496 20842 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 22:53:49.551596 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87192\nI0818 22:53:49.551853 20842 solver.cpp:404]     Test net output #1: loss = 0.532305 (* 1 = 0.532305 loss)\nI0818 22:53:50.868268 20842 solver.cpp:228] Iteration 14700, loss = 0.05268\nI0818 22:53:50.868317 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:53:50.868341 20842 solver.cpp:244]     Train net output #1: loss = 0.0526801 (* 1 = 0.0526801 loss)\nI0818 22:53:50.959911 20842 sgd_solver.cpp:166] Iteration 14700, lr = 0.3675\nI0818 22:56:08.259165 20842 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 22:57:31.622674 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86772\nI0818 22:57:31.622923 20842 solver.cpp:404]     Test net output #1: loss = 0.560401 (* 1 = 0.560401 loss)\nI0818 22:57:32.939891 20842 solver.cpp:228] Iteration 14800, loss = 0.0824113\nI0818 22:57:32.939939 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:57:32.939965 20842 solver.cpp:244]     Train net output #1: loss = 0.0824113 (* 1 = 0.0824113 loss)\nI0818 22:57:33.031807 20842 sgd_solver.cpp:166] Iteration 14800, lr = 0.37\nI0818 22:59:50.475350 20842 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 23:01:13.842411 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87088\nI0818 23:01:13.842679 20842 solver.cpp:404]     Test net output #1: loss = 0.528752 (* 1 = 0.528752 loss)\nI0818 23:01:15.159308 20842 solver.cpp:228] Iteration 14900, loss = 0.0404497\nI0818 23:01:15.159346 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 23:01:15.159370 20842 solver.cpp:244]     Train net output #1: loss = 0.0404498 (* 1 = 0.0404498 loss)\nI0818 23:01:15.246503 20842 sgd_solver.cpp:166] Iteration 14900, lr = 0.3725\nI0818 23:03:32.690798 20842 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 23:04:56.058079 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86892\nI0818 23:04:56.058336 20842 solver.cpp:404]     Test net output #1: loss = 0.544754 (* 1 = 0.544754 loss)\nI0818 23:04:57.373884 20842 solver.cpp:228] Iteration 15000, loss = 0.0649743\nI0818 23:04:57.373929 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:04:57.373955 20842 solver.cpp:244]     Train net output #1: loss = 0.0649744 (* 1 = 0.0649744 loss)\nI0818 23:04:57.463757 20842 sgd_solver.cpp:166] Iteration 15000, lr = 0.375\nI0818 23:07:14.877393 20842 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 23:08:38.239804 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8656\nI0818 23:08:38.240062 20842 solver.cpp:404]     Test net output #1: loss = 0.561002 (* 1 = 0.561002 loss)\nI0818 23:08:39.555582 20842 solver.cpp:228] Iteration 15100, loss = 0.0529394\nI0818 23:08:39.555627 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 23:08:39.555651 20842 solver.cpp:244]     Train net output #1: loss = 0.0529395 (* 1 = 0.0529395 loss)\nI0818 23:08:39.640663 20842 sgd_solver.cpp:166] Iteration 15100, lr = 0.3775\nI0818 23:10:57.032827 20842 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 23:12:20.403219 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86804\nI0818 23:12:20.403482 20842 solver.cpp:404]     Test net output #1: loss = 0.55237 (* 1 = 0.55237 loss)\nI0818 23:12:21.720034 20842 solver.cpp:228] Iteration 15200, loss = 0.081039\nI0818 23:12:21.720080 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:12:21.720105 20842 solver.cpp:244]     Train net output #1: loss = 0.0810391 (* 1 = 0.0810391 loss)\nI0818 23:12:21.812201 20842 sgd_solver.cpp:166] Iteration 15200, lr = 0.38\nI0818 23:14:39.172389 20842 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 23:16:02.540457 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8694\nI0818 23:16:02.540673 20842 solver.cpp:404]     Test net output #1: loss = 0.540738 (* 1 = 0.540738 loss)\nI0818 23:16:03.857650 20842 solver.cpp:228] Iteration 15300, loss = 0.0397941\nI0818 23:16:03.857694 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:16:03.857718 20842 solver.cpp:244]     Train net output #1: loss = 0.0397941 (* 1 = 0.0397941 loss)\nI0818 23:16:03.944072 20842 sgd_solver.cpp:166] Iteration 15300, lr = 0.3825\nI0818 23:18:21.275367 20842 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 23:19:44.641057 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86668\nI0818 23:19:44.641314 20842 solver.cpp:404]     Test net output #1: loss = 0.562928 (* 1 = 0.562928 loss)\nI0818 23:19:45.958429 20842 solver.cpp:228] Iteration 15400, loss = 0.0612205\nI0818 23:19:45.958480 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:19:45.958505 20842 solver.cpp:244]     Train net output #1: loss = 0.0612206 (* 1 = 0.0612206 loss)\nI0818 23:19:46.045775 20842 sgd_solver.cpp:166] Iteration 15400, lr = 0.385\nI0818 23:22:03.412140 20842 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 23:23:26.774161 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86964\nI0818 23:23:26.774385 20842 solver.cpp:404]     Test net output #1: loss = 0.523367 (* 1 = 0.523367 loss)\nI0818 23:23:28.091645 20842 solver.cpp:228] Iteration 15500, loss = 0.0305336\nI0818 23:23:28.091682 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:23:28.091706 20842 solver.cpp:244]     Train net output #1: loss = 0.0305337 (* 1 = 0.0305337 loss)\nI0818 23:23:28.174407 20842 sgd_solver.cpp:166] Iteration 15500, lr = 0.3875\nI0818 23:25:45.506623 20842 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 23:27:08.866396 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87252\nI0818 23:27:08.866650 20842 solver.cpp:404]     Test net output #1: loss = 0.52509 (* 1 = 0.52509 loss)\nI0818 23:27:10.183418 20842 solver.cpp:228] Iteration 15600, loss = 0.0563919\nI0818 23:27:10.183470 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:27:10.183495 20842 solver.cpp:244]     Train net output #1: loss = 0.056392 (* 1 = 0.056392 loss)\nI0818 23:27:10.268059 20842 sgd_solver.cpp:166] Iteration 15600, lr = 0.39\nI0818 23:29:27.621770 20842 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 23:30:50.989682 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87292\nI0818 23:30:50.989941 20842 solver.cpp:404]     Test net output #1: loss = 0.514004 (* 1 = 0.514004 loss)\nI0818 23:30:52.306488 20842 solver.cpp:228] Iteration 15700, loss = 0.0809594\nI0818 23:30:52.306526 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:30:52.306550 20842 solver.cpp:244]     Train net output #1: loss = 0.0809595 (* 1 = 0.0809595 loss)\nI0818 23:30:52.402282 20842 sgd_solver.cpp:166] Iteration 15700, lr = 0.3925\nI0818 23:33:09.785094 20842 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 23:34:33.154676 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8706\nI0818 23:34:33.154950 20842 solver.cpp:404]     Test net output #1: loss = 0.537599 (* 1 = 0.537599 loss)\nI0818 23:34:34.471264 20842 solver.cpp:228] Iteration 15800, loss = 0.103823\nI0818 23:34:34.471313 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:34:34.471338 20842 solver.cpp:244]     Train net output #1: loss = 0.103823 (* 1 = 0.103823 loss)\nI0818 23:34:34.561867 20842 sgd_solver.cpp:166] Iteration 15800, lr = 0.395\nI0818 23:36:51.902348 20842 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 23:38:15.265358 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87212\nI0818 23:38:15.265601 20842 solver.cpp:404]     Test net output #1: loss = 0.533897 (* 1 = 0.533897 loss)\nI0818 23:38:16.582481 20842 solver.cpp:228] Iteration 15900, loss = 0.0434407\nI0818 23:38:16.582528 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:38:16.582552 20842 solver.cpp:244]     Train net output #1: loss = 0.0434407 (* 1 = 0.0434407 loss)\nI0818 23:38:16.667809 20842 sgd_solver.cpp:166] Iteration 15900, lr = 0.3975\nI0818 23:40:33.778241 20842 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 23:41:57.140560 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86792\nI0818 23:41:57.140816 20842 solver.cpp:404]     Test net output #1: loss = 0.547982 (* 1 = 0.547982 loss)\nI0818 23:41:58.457571 20842 solver.cpp:228] Iteration 16000, loss = 0.0595215\nI0818 23:41:58.457618 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:41:58.457643 20842 solver.cpp:244]     Train net output #1: loss = 0.0595215 (* 1 = 0.0595215 loss)\nI0818 23:41:58.548415 20842 sgd_solver.cpp:166] Iteration 16000, lr = 0.4\nI0818 23:44:15.650441 20842 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 23:45:39.017284 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87484\nI0818 23:45:39.017556 20842 solver.cpp:404]     Test net output #1: loss = 0.521846 (* 1 = 0.521846 loss)\nI0818 23:45:40.333936 20842 solver.cpp:228] Iteration 16100, loss = 0.0630223\nI0818 23:45:40.333984 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:45:40.334008 20842 solver.cpp:244]     Train net output #1: loss = 0.0630223 (* 1 = 0.0630223 loss)\nI0818 23:45:40.415412 20842 sgd_solver.cpp:166] Iteration 16100, lr = 0.4025\nI0818 23:47:57.256247 20842 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 23:49:20.523010 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86584\nI0818 23:49:20.523285 20842 solver.cpp:404]     Test net output #1: loss = 0.523458 (* 1 = 0.523458 loss)\nI0818 23:49:21.839959 20842 solver.cpp:228] Iteration 16200, loss = 0.0452589\nI0818 23:49:21.839998 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:49:21.840023 20842 solver.cpp:244]     Train net output #1: loss = 0.0452589 (* 1 = 0.0452589 loss)\nI0818 23:49:21.927954 20842 sgd_solver.cpp:166] Iteration 16200, lr = 0.405\nI0818 23:51:38.987645 20842 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 23:53:02.262634 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87376\nI0818 23:53:02.262922 20842 solver.cpp:404]     Test net output #1: loss = 0.517121 (* 1 = 0.517121 loss)\nI0818 23:53:03.579545 20842 solver.cpp:228] Iteration 16300, loss = 0.0508644\nI0818 23:53:03.579584 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:53:03.579608 20842 solver.cpp:244]     Train net output #1: loss = 0.0508644 (* 1 = 0.0508644 loss)\nI0818 23:53:03.671648 20842 sgd_solver.cpp:166] Iteration 16300, lr = 0.4075\nI0818 23:55:20.639899 20842 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 23:56:43.909363 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87288\nI0818 23:56:43.909621 20842 solver.cpp:404]     Test net output #1: loss = 0.520764 (* 1 = 0.520764 loss)\nI0818 23:56:45.225386 20842 solver.cpp:228] Iteration 16400, loss = 0.0587405\nI0818 23:56:45.225425 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:56:45.225448 20842 solver.cpp:244]     Train net output #1: loss = 0.0587406 (* 1 = 0.0587406 loss)\nI0818 23:56:45.313391 20842 sgd_solver.cpp:166] Iteration 16400, lr = 0.41\nI0818 23:59:02.061821 20842 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0819 00:00:25.340528 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86876\nI0819 00:00:25.340762 20842 solver.cpp:404]     Test net output #1: loss = 0.519156 (* 1 = 0.519156 loss)\nI0819 00:00:26.657989 20842 solver.cpp:228] Iteration 16500, loss = 0.0790411\nI0819 00:00:26.658026 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:00:26.658049 20842 solver.cpp:244]     Train net output #1: loss = 0.0790411 (* 1 = 0.0790411 loss)\nI0819 00:00:26.743638 20842 sgd_solver.cpp:166] Iteration 16500, lr = 0.4125\nI0819 00:02:43.737710 20842 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0819 00:04:07.000336 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86972\nI0819 00:04:07.000591 20842 solver.cpp:404]     Test net output #1: loss = 0.551758 (* 1 = 0.551758 loss)\nI0819 00:04:08.317181 20842 solver.cpp:228] Iteration 16600, loss = 0.103805\nI0819 00:04:08.317229 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:04:08.317253 20842 solver.cpp:244]     Train net output #1: loss = 0.103805 (* 1 = 0.103805 loss)\nI0819 00:04:08.397608 20842 sgd_solver.cpp:166] Iteration 16600, lr = 0.415\nI0819 00:06:25.507210 20842 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0819 00:07:48.777981 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86736\nI0819 00:07:48.778216 20842 solver.cpp:404]     Test net output #1: loss = 0.531895 (* 1 = 0.531895 loss)\nI0819 00:07:50.093031 20842 solver.cpp:228] Iteration 16700, loss = 0.128466\nI0819 00:07:50.093076 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:07:50.093092 20842 solver.cpp:244]     Train net output #1: loss = 0.128466 (* 1 = 0.128466 loss)\nI0819 00:07:50.185330 20842 sgd_solver.cpp:166] Iteration 16700, lr = 0.4175\nI0819 00:10:07.243005 20842 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0819 00:11:30.522912 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87556\nI0819 00:11:30.523175 20842 solver.cpp:404]     Test net output #1: loss = 0.519516 (* 1 = 0.519516 loss)\nI0819 00:11:31.839810 20842 solver.cpp:228] Iteration 16800, loss = 0.0338408\nI0819 00:11:31.839854 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:11:31.839871 20842 solver.cpp:244]     Train net output #1: loss = 0.0338408 (* 1 = 0.0338408 loss)\nI0819 00:11:31.922008 20842 sgd_solver.cpp:166] Iteration 16800, lr = 0.42\nI0819 00:13:48.969843 20842 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0819 00:15:12.250241 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87216\nI0819 00:15:12.250511 20842 solver.cpp:404]     Test net output #1: loss = 0.538005 (* 1 = 0.538005 loss)\nI0819 00:15:13.567291 20842 solver.cpp:228] Iteration 16900, loss = 0.0362972\nI0819 00:15:13.567335 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:15:13.567353 20842 solver.cpp:244]     Train net output #1: loss = 0.0362972 (* 1 = 0.0362972 loss)\nI0819 00:15:13.648543 20842 sgd_solver.cpp:166] Iteration 16900, lr = 0.4225\nI0819 00:17:30.679937 20842 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0819 00:18:53.947906 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8666\nI0819 00:18:53.948170 20842 solver.cpp:404]     Test net output #1: loss = 0.551896 (* 1 = 0.551896 loss)\nI0819 00:18:55.263782 20842 solver.cpp:228] Iteration 17000, loss = 0.0979779\nI0819 00:18:55.263826 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:18:55.263844 20842 solver.cpp:244]     Train net output #1: loss = 0.0979779 (* 1 = 0.0979779 loss)\nI0819 00:18:55.351372 20842 sgd_solver.cpp:166] Iteration 17000, lr = 0.425\nI0819 00:21:12.458081 20842 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0819 00:22:35.834703 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87008\nI0819 00:22:35.834975 20842 solver.cpp:404]     Test net output #1: loss = 0.532011 (* 1 = 0.532011 loss)\nI0819 00:22:37.150776 20842 solver.cpp:228] Iteration 17100, loss = 0.0527637\nI0819 00:22:37.150820 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:22:37.150835 20842 solver.cpp:244]     Train net output #1: loss = 0.0527638 (* 1 = 0.0527638 loss)\nI0819 00:22:37.240687 20842 sgd_solver.cpp:166] Iteration 17100, lr = 0.4275\nI0819 00:24:54.259712 20842 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0819 00:26:17.649080 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87004\nI0819 00:26:17.649308 20842 solver.cpp:404]     Test net output #1: loss = 0.514268 (* 1 = 0.514268 loss)\nI0819 00:26:18.965539 20842 solver.cpp:228] Iteration 17200, loss = 0.103148\nI0819 00:26:18.965586 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:26:18.965610 20842 solver.cpp:244]     Train net output #1: loss = 0.103148 (* 1 = 0.103148 loss)\nI0819 00:26:19.050820 20842 sgd_solver.cpp:166] Iteration 17200, lr = 0.43\nI0819 00:28:36.241308 20842 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0819 00:29:59.625253 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87424\nI0819 00:29:59.625514 20842 solver.cpp:404]     Test net output #1: loss = 0.500168 (* 1 = 0.500168 loss)\nI0819 00:30:00.942214 20842 solver.cpp:228] Iteration 17300, loss = 0.0504037\nI0819 00:30:00.942251 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:30:00.942275 20842 solver.cpp:244]     Train net output #1: loss = 0.0504037 (* 1 = 0.0504037 loss)\nI0819 00:30:01.024201 20842 sgd_solver.cpp:166] Iteration 17300, lr = 0.4325\nI0819 00:32:18.135035 20842 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0819 00:33:41.502887 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87124\nI0819 00:33:41.503124 20842 solver.cpp:404]     Test net output #1: loss = 0.520917 (* 1 = 0.520917 loss)\nI0819 00:33:42.819223 20842 solver.cpp:228] Iteration 17400, loss = 0.0692376\nI0819 00:33:42.819267 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:33:42.819283 20842 solver.cpp:244]     Train net output #1: loss = 0.0692376 (* 1 = 0.0692376 loss)\nI0819 00:33:42.906313 20842 sgd_solver.cpp:166] Iteration 17400, lr = 0.435\nI0819 00:35:59.918763 20842 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0819 00:37:23.284951 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8678\nI0819 00:37:23.285197 20842 solver.cpp:404]     Test net output #1: loss = 0.50928 (* 1 = 0.50928 loss)\nI0819 00:37:24.601686 20842 solver.cpp:228] Iteration 17500, loss = 0.0700039\nI0819 00:37:24.601728 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:37:24.601744 20842 solver.cpp:244]     Train net output #1: loss = 0.0700039 (* 1 = 0.0700039 loss)\nI0819 00:37:24.683647 20842 sgd_solver.cpp:166] Iteration 17500, lr = 0.4375\nI0819 00:39:41.796788 20842 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0819 00:41:05.156453 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0819 00:41:05.156716 20842 solver.cpp:404]     Test net output #1: loss = 0.500469 (* 1 = 0.500469 loss)\nI0819 00:41:06.472556 20842 solver.cpp:228] Iteration 17600, loss = 0.0187379\nI0819 00:41:06.472601 20842 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:41:06.472618 20842 solver.cpp:244]     Train net output #1: loss = 0.0187379 (* 1 = 0.0187379 loss)\nI0819 00:41:06.558228 20842 sgd_solver.cpp:166] Iteration 17600, lr = 0.44\nI0819 00:43:23.685581 20842 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0819 00:44:47.024058 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0819 00:44:47.024293 20842 solver.cpp:404]     Test net output #1: loss = 0.495445 (* 1 = 0.495445 loss)\nI0819 00:44:48.339978 20842 solver.cpp:228] Iteration 17700, loss = 0.0532434\nI0819 00:44:48.340018 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:44:48.340034 20842 solver.cpp:244]     Train net output #1: loss = 0.0532434 (* 1 = 0.0532434 loss)\nI0819 00:44:48.430140 20842 sgd_solver.cpp:166] Iteration 17700, lr = 0.4425\nI0819 00:47:05.577570 20842 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0819 00:48:28.898970 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87996\nI0819 00:48:28.899206 20842 solver.cpp:404]     Test net output #1: loss = 0.51585 (* 1 = 0.51585 loss)\nI0819 00:48:30.214975 20842 solver.cpp:228] Iteration 17800, loss = 0.0915267\nI0819 00:48:30.215029 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 00:48:30.215045 20842 solver.cpp:244]     Train net output #1: loss = 0.0915267 (* 1 = 0.0915267 loss)\nI0819 00:48:30.296519 20842 sgd_solver.cpp:166] Iteration 17800, lr = 0.445\nI0819 00:50:47.404500 20842 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0819 00:52:10.732947 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87776\nI0819 00:52:10.733171 20842 solver.cpp:404]     Test net output #1: loss = 0.499373 (* 1 = 0.499373 loss)\nI0819 00:52:12.049177 20842 solver.cpp:228] Iteration 17900, loss = 0.0711007\nI0819 00:52:12.049211 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:52:12.049226 20842 solver.cpp:244]     Train net output #1: loss = 0.0711007 (* 1 = 0.0711007 loss)\nI0819 00:52:12.139415 20842 sgd_solver.cpp:166] Iteration 17900, lr = 0.4475\nI0819 00:54:29.228780 20842 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0819 00:55:52.575851 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87776\nI0819 00:55:52.576118 20842 solver.cpp:404]     Test net output #1: loss = 0.503313 (* 1 = 0.503313 loss)\nI0819 00:55:53.891913 20842 solver.cpp:228] Iteration 18000, loss = 0.0635323\nI0819 00:55:53.891958 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:55:53.891975 20842 solver.cpp:244]     Train net output #1: loss = 0.0635323 (* 1 = 0.0635323 loss)\nI0819 00:55:53.977115 20842 sgd_solver.cpp:166] Iteration 18000, lr = 0.45\nI0819 00:58:11.164491 20842 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0819 00:59:34.515566 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87008\nI0819 00:59:34.515817 20842 solver.cpp:404]     Test net output #1: loss = 0.52365 (* 1 = 0.52365 loss)\nI0819 00:59:35.831576 20842 solver.cpp:228] Iteration 18100, loss = 0.0487804\nI0819 00:59:35.831619 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:59:35.831635 20842 solver.cpp:244]     Train net output #1: loss = 0.0487804 (* 1 = 0.0487804 loss)\nI0819 00:59:35.914644 20842 sgd_solver.cpp:166] Iteration 18100, lr = 0.4525\nI0819 01:01:53.058589 20842 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0819 01:03:16.411823 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87332\nI0819 01:03:16.412045 20842 solver.cpp:404]     Test net output #1: loss = 0.510601 (* 1 = 0.510601 loss)\nI0819 01:03:17.727619 20842 solver.cpp:228] Iteration 18200, loss = 0.0333084\nI0819 01:03:17.727661 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:03:17.727677 20842 solver.cpp:244]     Train net output #1: loss = 0.0333085 (* 1 = 0.0333085 loss)\nI0819 01:03:17.811985 20842 sgd_solver.cpp:166] Iteration 18200, lr = 0.455\nI0819 01:05:34.902984 20842 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0819 01:06:58.261390 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87136\nI0819 01:06:58.261637 20842 solver.cpp:404]     Test net output #1: loss = 0.523517 (* 1 = 0.523517 loss)\nI0819 01:06:59.577649 20842 solver.cpp:228] Iteration 18300, loss = 0.0431758\nI0819 01:06:59.577692 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:06:59.577708 20842 solver.cpp:244]     Train net output #1: loss = 0.0431758 (* 1 = 0.0431758 loss)\nI0819 01:06:59.669801 20842 sgd_solver.cpp:166] Iteration 18300, lr = 0.4575\nI0819 01:09:16.727587 20842 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0819 01:10:40.080473 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87948\nI0819 01:10:40.080752 20842 solver.cpp:404]     Test net output #1: loss = 0.498738 (* 1 = 0.498738 loss)\nI0819 01:10:41.394184 20842 solver.cpp:228] Iteration 18400, loss = 0.105233\nI0819 01:10:41.394228 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:10:41.394244 20842 solver.cpp:244]     Train net output #1: loss = 0.105233 (* 1 = 0.105233 loss)\nI0819 01:10:41.484271 20842 sgd_solver.cpp:166] Iteration 18400, lr = 0.46\nI0819 01:12:58.707393 20842 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0819 01:14:22.056814 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88072\nI0819 01:14:22.057018 20842 solver.cpp:404]     Test net output #1: loss = 0.479485 (* 1 = 0.479485 loss)\nI0819 01:14:23.368451 20842 solver.cpp:228] Iteration 18500, loss = 0.0324857\nI0819 01:14:23.368494 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:14:23.368515 20842 solver.cpp:244]     Train net output #1: loss = 0.0324858 (* 1 = 0.0324858 loss)\nI0819 01:14:23.455287 20842 sgd_solver.cpp:166] Iteration 18500, lr = 0.4625\nI0819 01:16:40.699553 20842 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0819 01:18:04.040868 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87788\nI0819 01:18:04.041115 20842 solver.cpp:404]     Test net output #1: loss = 0.508967 (* 1 = 0.508967 loss)\nI0819 01:18:05.352823 20842 solver.cpp:228] Iteration 18600, loss = 0.0317016\nI0819 01:18:05.352864 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:18:05.352880 20842 solver.cpp:244]     Train net output #1: loss = 0.0317016 (* 1 = 0.0317016 loss)\nI0819 01:18:05.448056 20842 sgd_solver.cpp:166] Iteration 18600, lr = 0.465\nI0819 01:20:22.740623 20842 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0819 01:21:46.083781 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87312\nI0819 01:21:46.084014 20842 solver.cpp:404]     Test net output #1: loss = 0.539748 (* 1 = 0.539748 loss)\nI0819 01:21:47.395669 20842 solver.cpp:228] Iteration 18700, loss = 0.0521921\nI0819 01:21:47.395702 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 01:21:47.395717 20842 solver.cpp:244]     Train net output #1: loss = 0.0521922 (* 1 = 0.0521922 loss)\nI0819 01:21:47.483314 20842 sgd_solver.cpp:166] Iteration 18700, lr = 0.4675\nI0819 01:24:04.737123 20842 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0819 01:25:28.081782 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8758\nI0819 01:25:28.082046 20842 solver.cpp:404]     Test net output #1: loss = 0.519934 (* 1 = 0.519934 loss)\nI0819 01:25:29.393769 20842 solver.cpp:228] Iteration 18800, loss = 0.0519806\nI0819 01:25:29.393813 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:25:29.393829 20842 solver.cpp:244]     Train net output #1: loss = 0.0519807 (* 1 = 0.0519807 loss)\nI0819 01:25:29.485229 20842 sgd_solver.cpp:166] Iteration 18800, lr = 0.47\nI0819 01:27:46.841312 20842 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0819 01:29:10.200448 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87728\nI0819 01:29:10.200698 20842 solver.cpp:404]     Test net output #1: loss = 0.487347 (* 1 = 0.487347 loss)\nI0819 01:29:11.512364 20842 solver.cpp:228] Iteration 18900, loss = 0.0213521\nI0819 01:29:11.512409 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:29:11.512426 20842 solver.cpp:244]     Train net output #1: loss = 0.0213522 (* 1 = 0.0213522 loss)\nI0819 01:29:11.602102 20842 sgd_solver.cpp:166] Iteration 18900, lr = 0.4725\nI0819 01:31:28.872727 20842 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0819 01:32:52.239203 20842 solver.cpp:404]     Test net output #0: accuracy = 0.872\nI0819 01:32:52.239468 20842 solver.cpp:404]     Test net output #1: loss = 0.524757 (* 1 = 0.524757 loss)\nI0819 01:32:53.550344 20842 solver.cpp:228] Iteration 19000, loss = 0.103325\nI0819 01:32:53.550387 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 01:32:53.550403 20842 solver.cpp:244]     Train net output #1: loss = 0.103325 (* 1 = 0.103325 loss)\nI0819 01:32:53.645040 20842 sgd_solver.cpp:166] Iteration 19000, lr = 0.475\nI0819 01:35:10.936715 20842 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0819 01:36:34.306586 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8746\nI0819 01:36:34.306813 20842 solver.cpp:404]     Test net output #1: loss = 0.489614 (* 1 = 0.489614 loss)\nI0819 01:36:35.619683 20842 solver.cpp:228] Iteration 19100, loss = 0.0466258\nI0819 01:36:35.619729 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:36:35.619746 20842 solver.cpp:244]     Train net output #1: loss = 0.0466259 (* 1 = 0.0466259 loss)\nI0819 01:36:35.713945 20842 sgd_solver.cpp:166] Iteration 19100, lr = 0.4775\nI0819 01:38:52.983014 20842 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0819 01:40:16.352326 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87276\nI0819 01:40:16.352586 20842 solver.cpp:404]     Test net output #1: loss = 0.51384 (* 1 = 0.51384 loss)\nI0819 01:40:17.665401 20842 solver.cpp:228] Iteration 19200, loss = 0.120881\nI0819 01:40:17.665446 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:40:17.665462 20842 solver.cpp:244]     Train net output #1: loss = 0.120882 (* 1 = 0.120882 loss)\nI0819 01:40:17.762750 20842 sgd_solver.cpp:166] Iteration 19200, lr = 0.48\nI0819 01:42:35.002681 20842 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0819 01:43:58.371492 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87564\nI0819 01:43:58.371731 20842 solver.cpp:404]     Test net output #1: loss = 0.517022 (* 1 = 0.517022 loss)\nI0819 01:43:59.683954 20842 solver.cpp:228] Iteration 19300, loss = 0.0315895\nI0819 01:43:59.684000 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:43:59.684017 20842 solver.cpp:244]     Train net output #1: loss = 0.0315896 (* 1 = 0.0315896 loss)\nI0819 01:43:59.778125 20842 sgd_solver.cpp:166] Iteration 19300, lr = 0.4825\nI0819 01:46:17.078080 20842 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0819 01:47:40.448534 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87868\nI0819 01:47:40.448806 20842 solver.cpp:404]     Test net output #1: loss = 0.487259 (* 1 = 0.487259 loss)\nI0819 01:47:41.760915 20842 solver.cpp:228] Iteration 19400, loss = 0.0836596\nI0819 01:47:41.760962 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:47:41.760977 20842 solver.cpp:244]     Train net output #1: loss = 0.0836597 (* 1 = 0.0836597 loss)\nI0819 01:47:41.853925 20842 sgd_solver.cpp:166] Iteration 19400, lr = 0.485\nI0819 01:49:59.058660 20842 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0819 01:51:22.419847 20842 solver.cpp:404]     Test net output #0: accuracy = 0.878\nI0819 01:51:22.420111 20842 solver.cpp:404]     Test net output #1: loss = 0.497298 (* 1 = 0.497298 loss)\nI0819 01:51:23.732353 20842 solver.cpp:228] Iteration 19500, loss = 0.0771424\nI0819 01:51:23.732398 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:51:23.732415 20842 solver.cpp:244]     Train net output #1: loss = 0.0771425 (* 1 = 0.0771425 loss)\nI0819 01:51:23.826141 20842 sgd_solver.cpp:166] Iteration 19500, lr = 0.4875\nI0819 01:53:41.071354 20842 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0819 01:55:04.434216 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87092\nI0819 01:55:04.434489 20842 solver.cpp:404]     Test net output #1: loss = 0.550609 (* 1 = 0.550609 loss)\nI0819 01:55:05.746011 20842 solver.cpp:228] Iteration 19600, loss = 0.0364832\nI0819 01:55:05.746054 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:55:05.746071 20842 solver.cpp:244]     Train net output #1: loss = 0.0364833 (* 1 = 0.0364833 loss)\nI0819 01:55:05.832235 20842 sgd_solver.cpp:166] Iteration 19600, lr = 0.49\nI0819 01:57:23.048717 20842 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0819 01:58:46.410581 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87456\nI0819 01:58:46.410818 20842 solver.cpp:404]     Test net output #1: loss = 0.524119 (* 1 = 0.524119 loss)\nI0819 01:58:47.722141 20842 solver.cpp:228] Iteration 19700, loss = 0.13177\nI0819 01:58:47.722184 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:58:47.722201 20842 solver.cpp:244]     Train net output #1: loss = 0.13177 (* 1 = 0.13177 loss)\nI0819 01:58:47.809226 20842 sgd_solver.cpp:166] Iteration 19700, lr = 0.4925\nI0819 02:01:04.990396 20842 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0819 02:02:28.353891 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87228\nI0819 02:02:28.354158 20842 solver.cpp:404]     Test net output #1: loss = 0.528087 (* 1 = 0.528087 loss)\nI0819 02:02:29.665746 20842 solver.cpp:228] Iteration 19800, loss = 0.0921344\nI0819 02:02:29.665791 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:02:29.665808 20842 solver.cpp:244]     Train net output #1: loss = 0.0921345 (* 1 = 0.0921345 loss)\nI0819 02:02:29.763761 20842 sgd_solver.cpp:166] Iteration 19800, lr = 0.495\nI0819 02:04:46.977001 20842 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0819 02:06:10.245265 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86872\nI0819 02:06:10.245549 20842 solver.cpp:404]     Test net output #1: loss = 0.520105 (* 1 = 0.520105 loss)\nI0819 02:06:11.556769 20842 solver.cpp:228] Iteration 19900, loss = 0.133507\nI0819 02:06:11.556814 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 02:06:11.556830 20842 solver.cpp:244]     Train net output #1: loss = 0.133507 (* 1 = 0.133507 loss)\nI0819 02:06:11.646258 20842 sgd_solver.cpp:166] Iteration 19900, lr = 0.4975\nI0819 02:08:28.839305 20842 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0819 02:09:52.107628 20842 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 02:09:52.107918 20842 solver.cpp:404]     Test net output #1: loss = 0.491417 (* 1 = 0.491417 loss)\nI0819 02:09:53.419446 20842 solver.cpp:228] Iteration 20000, loss = 0.0378724\nI0819 02:09:53.419487 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:09:53.419504 20842 solver.cpp:244]     Train net output #1: loss = 0.0378725 (* 1 = 0.0378725 loss)\nI0819 02:09:53.512629 20842 sgd_solver.cpp:166] Iteration 20000, lr = 0.5\nI0819 02:12:10.742200 20842 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0819 02:13:34.002990 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87516\nI0819 02:13:34.003274 20842 solver.cpp:404]     Test net output #1: loss = 0.52154 (* 1 = 0.52154 loss)\nI0819 02:13:35.315356 20842 solver.cpp:228] Iteration 20100, loss = 0.0501929\nI0819 02:13:35.315398 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:13:35.315415 20842 solver.cpp:244]     Train net output #1: loss = 0.050193 (* 1 = 0.050193 loss)\nI0819 02:13:35.409570 20842 sgd_solver.cpp:166] Iteration 20100, lr = 0.5025\nI0819 02:15:52.563041 20842 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0819 02:17:15.827901 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87532\nI0819 02:17:15.828145 20842 solver.cpp:404]     Test net output #1: loss = 0.486252 (* 1 = 0.486252 loss)\nI0819 02:17:17.139637 20842 solver.cpp:228] Iteration 20200, loss = 0.088265\nI0819 02:17:17.139683 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:17:17.139699 20842 solver.cpp:244]     Train net output #1: loss = 0.0882652 (* 1 = 0.0882652 loss)\nI0819 02:17:17.229038 20842 sgd_solver.cpp:166] Iteration 20200, lr = 0.505\nI0819 02:19:34.472502 20842 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0819 02:20:57.738468 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0819 02:20:57.738747 20842 solver.cpp:404]     Test net output #1: loss = 0.498617 (* 1 = 0.498617 loss)\nI0819 02:20:59.050953 20842 solver.cpp:228] Iteration 20300, loss = 0.0708466\nI0819 02:20:59.050998 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:20:59.051017 20842 solver.cpp:244]     Train net output #1: loss = 0.0708467 (* 1 = 0.0708467 loss)\nI0819 02:20:59.142138 20842 sgd_solver.cpp:166] Iteration 20300, lr = 0.5075\nI0819 02:23:16.342566 20842 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0819 02:24:39.604616 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87616\nI0819 02:24:39.604862 20842 solver.cpp:404]     Test net output #1: loss = 0.475045 (* 1 = 0.475045 loss)\nI0819 02:24:40.916803 20842 solver.cpp:228] Iteration 20400, loss = 0.0905846\nI0819 02:24:40.916846 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:24:40.916863 20842 solver.cpp:244]     Train net output #1: loss = 0.0905847 (* 1 = 0.0905847 loss)\nI0819 02:24:41.011515 20842 sgd_solver.cpp:166] Iteration 20400, lr = 0.51\nI0819 02:26:58.211940 20842 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0819 02:28:21.478935 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87612\nI0819 02:28:21.479178 20842 solver.cpp:404]     Test net output #1: loss = 0.482862 (* 1 = 0.482862 loss)\nI0819 02:28:22.792069 20842 solver.cpp:228] Iteration 20500, loss = 0.0425411\nI0819 02:28:22.792115 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 02:28:22.792131 20842 solver.cpp:244]     Train net output #1: loss = 0.0425412 (* 1 = 0.0425412 loss)\nI0819 02:28:22.888017 20842 sgd_solver.cpp:166] Iteration 20500, lr = 0.5125\nI0819 02:30:40.063375 20842 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0819 02:32:03.324718 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87532\nI0819 02:32:03.324978 20842 solver.cpp:404]     Test net output #1: loss = 0.487812 (* 1 = 0.487812 loss)\nI0819 02:32:04.637512 20842 solver.cpp:228] Iteration 20600, loss = 0.0387041\nI0819 02:32:04.637557 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 02:32:04.637574 20842 solver.cpp:244]     Train net output #1: loss = 0.0387042 (* 1 = 0.0387042 loss)\nI0819 02:32:04.726688 20842 sgd_solver.cpp:166] Iteration 20600, lr = 0.515\nI0819 02:34:21.862025 20842 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0819 02:35:45.126835 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8824\nI0819 02:35:45.127104 20842 solver.cpp:404]     Test net output #1: loss = 0.473645 (* 1 = 0.473645 loss)\nI0819 02:35:46.438691 20842 solver.cpp:228] Iteration 20700, loss = 0.0517098\nI0819 02:35:46.438736 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:35:46.438752 20842 solver.cpp:244]     Train net output #1: loss = 0.0517099 (* 1 = 0.0517099 loss)\nI0819 02:35:46.520640 20842 sgd_solver.cpp:166] Iteration 20700, lr = 0.5175\nI0819 02:38:03.689225 20842 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0819 02:39:27.050174 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87552\nI0819 02:39:27.050447 20842 solver.cpp:404]     Test net output #1: loss = 0.484106 (* 1 = 0.484106 loss)\nI0819 02:39:28.362874 20842 solver.cpp:228] Iteration 20800, loss = 0.121892\nI0819 02:39:28.362920 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 02:39:28.362936 20842 solver.cpp:244]     Train net output #1: loss = 0.121892 (* 1 = 0.121892 loss)\nI0819 02:39:28.448698 20842 sgd_solver.cpp:166] Iteration 20800, lr = 0.52\nI0819 02:41:45.810379 20842 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0819 02:43:09.178361 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87592\nI0819 02:43:09.178647 20842 solver.cpp:404]     Test net output #1: loss = 0.47461 (* 1 = 0.47461 loss)\nI0819 02:43:10.490767 20842 solver.cpp:228] Iteration 20900, loss = 0.0299817\nI0819 02:43:10.490811 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 02:43:10.490828 20842 solver.cpp:244]     Train net output #1: loss = 0.0299818 (* 1 = 0.0299818 loss)\nI0819 02:43:10.586741 20842 sgd_solver.cpp:166] Iteration 20900, lr = 0.5225\nI0819 02:45:27.742426 20842 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0819 02:46:51.113741 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 02:46:51.114035 20842 solver.cpp:404]     Test net output #1: loss = 0.463845 (* 1 = 0.463845 loss)\nI0819 02:46:52.426144 20842 solver.cpp:228] Iteration 21000, loss = 0.0530135\nI0819 02:46:52.426190 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:46:52.426208 20842 solver.cpp:244]     Train net output #1: loss = 0.0530135 (* 1 = 0.0530135 loss)\nI0819 02:46:52.521118 20842 sgd_solver.cpp:166] Iteration 21000, lr = 0.525\nI0819 02:49:09.764037 20842 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0819 02:50:33.136289 20842 solver.cpp:404]     Test net output #0: accuracy = 0.878\nI0819 02:50:33.136564 20842 solver.cpp:404]     Test net output #1: loss = 0.489051 (* 1 = 0.489051 loss)\nI0819 02:50:34.447963 20842 solver.cpp:228] Iteration 21100, loss = 0.036951\nI0819 02:50:34.448010 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:50:34.448027 20842 solver.cpp:244]     Train net output #1: loss = 0.0369511 (* 1 = 0.0369511 loss)\nI0819 02:50:34.535380 20842 sgd_solver.cpp:166] Iteration 21100, lr = 0.5275\nI0819 02:52:51.710342 20842 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0819 02:54:15.088703 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8728\nI0819 02:54:15.089002 20842 solver.cpp:404]     Test net output #1: loss = 0.512184 (* 1 = 0.512184 loss)\nI0819 02:54:16.400876 20842 solver.cpp:228] Iteration 21200, loss = 0.090843\nI0819 02:54:16.400919 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:54:16.400936 20842 solver.cpp:244]     Train net output #1: loss = 0.0908431 (* 1 = 0.0908431 loss)\nI0819 02:54:16.490478 20842 sgd_solver.cpp:166] Iteration 21200, lr = 0.53\nI0819 02:56:33.672144 20842 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0819 02:57:57.044361 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87508\nI0819 02:57:57.044658 20842 solver.cpp:404]     Test net output #1: loss = 0.498688 (* 1 = 0.498688 loss)\nI0819 02:57:58.356629 20842 solver.cpp:228] Iteration 21300, loss = 0.0793653\nI0819 02:57:58.356673 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:57:58.356690 20842 solver.cpp:244]     Train net output #1: loss = 0.0793653 (* 1 = 0.0793653 loss)\nI0819 02:57:58.451936 20842 sgd_solver.cpp:166] Iteration 21300, lr = 0.5325\nI0819 03:00:15.630153 20842 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0819 03:01:38.997056 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87524\nI0819 03:01:38.997344 20842 solver.cpp:404]     Test net output #1: loss = 0.488191 (* 1 = 0.488191 loss)\nI0819 03:01:40.308979 20842 solver.cpp:228] Iteration 21400, loss = 0.0376174\nI0819 03:01:40.309022 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:01:40.309039 20842 solver.cpp:244]     Train net output #1: loss = 0.0376175 (* 1 = 0.0376175 loss)\nI0819 03:01:40.405778 20842 sgd_solver.cpp:166] Iteration 21400, lr = 0.535\nI0819 03:03:57.577992 20842 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0819 03:05:20.946151 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86548\nI0819 03:05:20.946432 20842 solver.cpp:404]     Test net output #1: loss = 0.536525 (* 1 = 0.536525 loss)\nI0819 03:05:22.258674 20842 solver.cpp:228] Iteration 21500, loss = 0.0505292\nI0819 03:05:22.258718 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:05:22.258734 20842 solver.cpp:244]     Train net output #1: loss = 0.0505293 (* 1 = 0.0505293 loss)\nI0819 03:05:22.348377 20842 sgd_solver.cpp:166] Iteration 21500, lr = 0.5375\nI0819 03:07:38.990921 20842 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0819 03:09:02.355095 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87716\nI0819 03:09:02.355358 20842 solver.cpp:404]     Test net output #1: loss = 0.492459 (* 1 = 0.492459 loss)\nI0819 03:09:03.666707 20842 solver.cpp:228] Iteration 21600, loss = 0.0751702\nI0819 03:09:03.666752 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 03:09:03.666769 20842 solver.cpp:244]     Train net output #1: loss = 0.0751702 (* 1 = 0.0751702 loss)\nI0819 03:09:03.757725 20842 sgd_solver.cpp:166] Iteration 21600, lr = 0.54\nI0819 03:11:20.346997 20842 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0819 03:12:43.711066 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88548\nI0819 03:12:43.711354 20842 solver.cpp:404]     Test net output #1: loss = 0.469035 (* 1 = 0.469035 loss)\nI0819 03:12:45.022764 20842 solver.cpp:228] Iteration 21700, loss = 0.0871539\nI0819 03:12:45.022809 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:12:45.022826 20842 solver.cpp:244]     Train net output #1: loss = 0.0871539 (* 1 = 0.0871539 loss)\nI0819 03:12:45.105520 20842 sgd_solver.cpp:166] Iteration 21700, lr = 0.5425\nI0819 03:15:01.773494 20842 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0819 03:16:25.146075 20842 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 03:16:25.146363 20842 solver.cpp:404]     Test net output #1: loss = 0.487481 (* 1 = 0.487481 loss)\nI0819 03:16:26.458024 20842 solver.cpp:228] Iteration 21800, loss = 0.0627494\nI0819 03:16:26.458068 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:16:26.458084 20842 solver.cpp:244]     Train net output #1: loss = 0.0627494 (* 1 = 0.0627494 loss)\nI0819 03:16:26.550803 20842 sgd_solver.cpp:166] Iteration 21800, lr = 0.545\nI0819 03:18:43.197412 20842 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0819 03:20:06.569998 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8824\nI0819 03:20:06.570272 20842 solver.cpp:404]     Test net output #1: loss = 0.485658 (* 1 = 0.485658 loss)\nI0819 03:20:07.881927 20842 solver.cpp:228] Iteration 21900, loss = 0.0923855\nI0819 03:20:07.881970 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:20:07.881986 20842 solver.cpp:244]     Train net output #1: loss = 0.0923855 (* 1 = 0.0923855 loss)\nI0819 03:20:07.976269 20842 sgd_solver.cpp:166] Iteration 21900, lr = 0.5475\nI0819 03:22:24.698163 20842 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0819 03:23:48.071962 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8754\nI0819 03:23:48.072237 20842 solver.cpp:404]     Test net output #1: loss = 0.497784 (* 1 = 0.497784 loss)\nI0819 03:23:49.383952 20842 solver.cpp:228] Iteration 22000, loss = 0.0984349\nI0819 03:23:49.383998 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:23:49.384016 20842 solver.cpp:244]     Train net output #1: loss = 0.0984349 (* 1 = 0.0984349 loss)\nI0819 03:23:49.471438 20842 sgd_solver.cpp:166] Iteration 22000, lr = 0.55\nI0819 03:26:05.799160 20842 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0819 03:27:29.162271 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0819 03:27:29.162547 20842 solver.cpp:404]     Test net output #1: loss = 0.49845 (* 1 = 0.49845 loss)\nI0819 03:27:30.474237 20842 solver.cpp:228] Iteration 22100, loss = 0.0497522\nI0819 03:27:30.474282 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:27:30.474299 20842 solver.cpp:244]     Train net output #1: loss = 0.0497523 (* 1 = 0.0497523 loss)\nI0819 03:27:30.564754 20842 sgd_solver.cpp:166] Iteration 22100, lr = 0.5525\nI0819 03:29:47.221016 20842 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0819 03:31:10.587558 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87952\nI0819 03:31:10.587832 20842 solver.cpp:404]     Test net output #1: loss = 0.479026 (* 1 = 0.479026 loss)\nI0819 03:31:11.899446 20842 solver.cpp:228] Iteration 22200, loss = 0.0991285\nI0819 03:31:11.899503 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:31:11.899530 20842 solver.cpp:244]     Train net output #1: loss = 0.0991286 (* 1 = 0.0991286 loss)\nI0819 03:31:11.986048 20842 sgd_solver.cpp:166] Iteration 22200, lr = 0.555\nI0819 03:33:28.612586 20842 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0819 03:34:51.980003 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0819 03:34:51.980253 20842 solver.cpp:404]     Test net output #1: loss = 0.509687 (* 1 = 0.509687 loss)\nI0819 03:34:53.291975 20842 solver.cpp:228] Iteration 22300, loss = 0.0824544\nI0819 03:34:53.292018 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:34:53.292037 20842 solver.cpp:244]     Train net output #1: loss = 0.0824544 (* 1 = 0.0824544 loss)\nI0819 03:34:53.382434 20842 sgd_solver.cpp:166] Iteration 22300, lr = 0.5575\nI0819 03:37:10.053128 20842 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0819 03:38:33.425932 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87324\nI0819 03:38:33.426169 20842 solver.cpp:404]     Test net output #1: loss = 0.507714 (* 1 = 0.507714 loss)\nI0819 03:38:34.737609 20842 solver.cpp:228] Iteration 22400, loss = 0.0454901\nI0819 03:38:34.737653 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 03:38:34.737670 20842 solver.cpp:244]     Train net output #1: loss = 0.0454901 (* 1 = 0.0454901 loss)\nI0819 03:38:34.826086 20842 sgd_solver.cpp:166] Iteration 22400, lr = 0.56\nI0819 03:40:51.489274 20842 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0819 03:42:14.863901 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88156\nI0819 03:42:14.864148 20842 solver.cpp:404]     Test net output #1: loss = 0.469363 (* 1 = 0.469363 loss)\nI0819 03:42:16.175725 20842 solver.cpp:228] Iteration 22500, loss = 0.0335317\nI0819 03:42:16.175770 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:42:16.175786 20842 solver.cpp:244]     Train net output #1: loss = 0.0335317 (* 1 = 0.0335317 loss)\nI0819 03:42:16.264853 20842 sgd_solver.cpp:166] Iteration 22500, lr = 0.5625\nI0819 03:44:33.221067 20842 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0819 03:45:57.619654 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87852\nI0819 03:45:57.619969 20842 solver.cpp:404]     Test net output #1: loss = 0.492008 (* 1 = 0.492008 loss)\nI0819 03:45:58.936498 20842 solver.cpp:228] Iteration 22600, loss = 0.159776\nI0819 03:45:58.936561 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 03:45:58.936580 20842 solver.cpp:244]     Train net output #1: loss = 0.159776 (* 1 = 0.159776 loss)\nI0819 03:45:59.046072 20842 sgd_solver.cpp:166] Iteration 22600, lr = 0.565\nI0819 03:48:16.039376 20842 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0819 03:49:40.430449 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88116\nI0819 03:49:40.430866 20842 solver.cpp:404]     Test net output #1: loss = 0.504335 (* 1 = 0.504335 loss)\nI0819 03:49:41.747349 20842 solver.cpp:228] Iteration 22700, loss = 0.10577\nI0819 03:49:41.747391 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:49:41.747408 20842 solver.cpp:244]     Train net output #1: loss = 0.10577 (* 1 = 0.10577 loss)\nI0819 03:49:41.830603 20842 sgd_solver.cpp:166] Iteration 22700, lr = 0.5675\nI0819 03:51:58.783427 20842 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0819 03:53:23.168627 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87332\nI0819 03:53:23.168958 20842 solver.cpp:404]     Test net output #1: loss = 0.501836 (* 1 = 0.501836 loss)\nI0819 03:53:24.485754 20842 solver.cpp:228] Iteration 22800, loss = 0.0674187\nI0819 03:53:24.485812 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:53:24.485831 20842 solver.cpp:244]     Train net output #1: loss = 0.0674187 (* 1 = 0.0674187 loss)\nI0819 03:53:24.574050 20842 sgd_solver.cpp:166] Iteration 22800, lr = 0.57\nI0819 03:55:41.577281 20842 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0819 03:57:05.974669 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8772\nI0819 03:57:05.974984 20842 solver.cpp:404]     Test net output #1: loss = 0.486145 (* 1 = 0.486145 loss)\nI0819 03:57:07.291385 20842 solver.cpp:228] Iteration 22900, loss = 0.0371084\nI0819 03:57:07.291427 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:57:07.291445 20842 solver.cpp:244]     Train net output #1: loss = 0.0371084 (* 1 = 0.0371084 loss)\nI0819 03:57:07.375967 20842 sgd_solver.cpp:166] Iteration 22900, lr = 0.5725\nI0819 03:59:24.360443 20842 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0819 04:00:48.761301 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8772\nI0819 04:00:48.761612 20842 solver.cpp:404]     Test net output #1: loss = 0.487373 (* 1 = 0.487373 loss)\nI0819 04:00:50.077204 20842 solver.cpp:228] Iteration 23000, loss = 0.108127\nI0819 04:00:50.077249 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:00:50.077265 20842 solver.cpp:244]     Train net output #1: loss = 0.108127 (* 1 = 0.108127 loss)\nI0819 04:00:50.162371 20842 sgd_solver.cpp:166] Iteration 23000, lr = 0.575\nI0819 04:03:07.120373 20842 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0819 04:04:31.519284 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87296\nI0819 04:04:31.519623 20842 solver.cpp:404]     Test net output #1: loss = 0.517264 (* 1 = 0.517264 loss)\nI0819 04:04:32.836705 20842 solver.cpp:228] Iteration 23100, loss = 0.0989501\nI0819 04:04:32.836750 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 04:04:32.836766 20842 solver.cpp:244]     Train net output #1: loss = 0.0989501 (* 1 = 0.0989501 loss)\nI0819 04:04:32.922935 20842 sgd_solver.cpp:166] Iteration 23100, lr = 0.5775\nI0819 04:06:49.412214 20842 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0819 04:08:13.811501 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87408\nI0819 04:08:13.811892 20842 solver.cpp:404]     Test net output #1: loss = 0.493577 (* 1 = 0.493577 loss)\nI0819 04:08:15.127235 20842 solver.cpp:228] Iteration 23200, loss = 0.115493\nI0819 04:08:15.127296 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:08:15.127315 20842 solver.cpp:244]     Train net output #1: loss = 0.115493 (* 1 = 0.115493 loss)\nI0819 04:08:15.211774 20842 sgd_solver.cpp:166] Iteration 23200, lr = 0.58\nI0819 04:10:32.270853 20842 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0819 04:11:56.665496 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8818\nI0819 04:11:56.665841 20842 solver.cpp:404]     Test net output #1: loss = 0.484514 (* 1 = 0.484514 loss)\nI0819 04:11:57.981452 20842 solver.cpp:228] Iteration 23300, loss = 0.0423813\nI0819 04:11:57.981494 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 04:11:57.981511 20842 solver.cpp:244]     Train net output #1: loss = 0.0423813 (* 1 = 0.0423813 loss)\nI0819 04:11:58.071045 20842 sgd_solver.cpp:166] Iteration 23300, lr = 0.5825\nI0819 04:14:15.096274 20842 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0819 04:15:39.495447 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88056\nI0819 04:15:39.495787 20842 solver.cpp:404]     Test net output #1: loss = 0.500409 (* 1 = 0.500409 loss)\nI0819 04:15:40.811904 20842 solver.cpp:228] Iteration 23400, loss = 0.0498225\nI0819 04:15:40.811965 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 04:15:40.811985 20842 solver.cpp:244]     Train net output #1: loss = 0.0498225 (* 1 = 0.0498225 loss)\nI0819 04:15:40.896188 20842 sgd_solver.cpp:166] Iteration 23400, lr = 0.585\nI0819 04:17:57.827927 20842 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0819 04:19:22.225590 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0819 04:19:22.225898 20842 solver.cpp:404]     Test net output #1: loss = 0.485413 (* 1 = 0.485413 loss)\nI0819 04:19:23.542291 20842 solver.cpp:228] Iteration 23500, loss = 0.0667401\nI0819 04:19:23.542336 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:19:23.542351 20842 solver.cpp:244]     Train net output #1: loss = 0.0667401 (* 1 = 0.0667401 loss)\nI0819 04:19:23.631870 20842 sgd_solver.cpp:166] Iteration 23500, lr = 0.5875\nI0819 04:21:40.682130 20842 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0819 04:23:05.076838 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87868\nI0819 04:23:05.077141 20842 solver.cpp:404]     Test net output #1: loss = 0.486883 (* 1 = 0.486883 loss)\nI0819 04:23:06.394114 20842 solver.cpp:228] Iteration 23600, loss = 0.129579\nI0819 04:23:06.394173 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:23:06.394191 20842 solver.cpp:244]     Train net output #1: loss = 0.129579 (* 1 = 0.129579 loss)\nI0819 04:23:06.476164 20842 sgd_solver.cpp:166] Iteration 23600, lr = 0.59\nI0819 04:25:23.344179 20842 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0819 04:26:47.734024 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87748\nI0819 04:26:47.734355 20842 solver.cpp:404]     Test net output #1: loss = 0.473498 (* 1 = 0.473498 loss)\nI0819 04:26:49.050941 20842 solver.cpp:228] Iteration 23700, loss = 0.0774627\nI0819 04:26:49.051003 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:26:49.051021 20842 solver.cpp:244]     Train net output #1: loss = 0.0774627 (* 1 = 0.0774627 loss)\nI0819 04:26:49.140880 20842 sgd_solver.cpp:166] Iteration 23700, lr = 0.5925\nI0819 04:29:05.590591 20842 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0819 04:30:29.974546 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87984\nI0819 04:30:29.974882 20842 solver.cpp:404]     Test net output #1: loss = 0.474466 (* 1 = 0.474466 loss)\nI0819 04:30:31.291697 20842 solver.cpp:228] Iteration 23800, loss = 0.0904089\nI0819 04:30:31.291756 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:30:31.291774 20842 solver.cpp:244]     Train net output #1: loss = 0.0904089 (* 1 = 0.0904089 loss)\nI0819 04:30:31.377516 20842 sgd_solver.cpp:166] Iteration 23800, lr = 0.595\nI0819 04:32:47.814126 20842 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0819 04:34:12.200691 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0819 04:34:12.201017 20842 solver.cpp:404]     Test net output #1: loss = 0.487256 (* 1 = 0.487256 loss)\nI0819 04:34:13.517509 20842 solver.cpp:228] Iteration 23900, loss = 0.0948732\nI0819 04:34:13.517568 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:34:13.517587 20842 solver.cpp:244]     Train net output #1: loss = 0.0948732 (* 1 = 0.0948732 loss)\nI0819 04:34:13.606171 20842 sgd_solver.cpp:166] Iteration 23900, lr = 0.5975\nI0819 04:36:30.520936 20842 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0819 04:37:54.907564 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0819 04:37:54.907876 20842 solver.cpp:404]     Test net output #1: loss = 0.467449 (* 1 = 0.467449 loss)\nI0819 04:37:56.224004 20842 solver.cpp:228] Iteration 24000, loss = 0.0387832\nI0819 04:37:56.224046 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 04:37:56.224063 20842 solver.cpp:244]     Train net output #1: loss = 0.0387832 (* 1 = 0.0387832 loss)\nI0819 04:37:56.305256 20842 sgd_solver.cpp:166] Iteration 24000, lr = 0.6\nI0819 04:40:13.183660 20842 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0819 04:41:37.563413 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88428\nI0819 04:41:37.563752 20842 solver.cpp:404]     Test net output #1: loss = 0.462875 (* 1 = 0.462875 loss)\nI0819 04:41:38.880506 20842 solver.cpp:228] Iteration 24100, loss = 0.052753\nI0819 04:41:38.880548 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:41:38.880564 20842 solver.cpp:244]     Train net output #1: loss = 0.052753 (* 1 = 0.052753 loss)\nI0819 04:41:38.966866 20842 sgd_solver.cpp:166] Iteration 24100, lr = 0.6025\nI0819 04:43:55.808887 20842 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0819 04:45:20.201668 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88244\nI0819 04:45:20.202002 20842 solver.cpp:404]     Test net output #1: loss = 0.460529 (* 1 = 0.460529 loss)\nI0819 04:45:21.518973 20842 solver.cpp:228] Iteration 24200, loss = 0.0743081\nI0819 04:45:21.519016 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:45:21.519032 20842 solver.cpp:244]     Train net output #1: loss = 0.0743081 (* 1 = 0.0743081 loss)\nI0819 04:45:21.609518 20842 sgd_solver.cpp:166] Iteration 24200, lr = 0.605\nI0819 04:47:38.103968 20842 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0819 04:49:02.506027 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8824\nI0819 04:49:02.506356 20842 solver.cpp:404]     Test net output #1: loss = 0.453066 (* 1 = 0.453066 loss)\nI0819 04:49:03.823420 20842 solver.cpp:228] Iteration 24300, loss = 0.0347444\nI0819 04:49:03.823462 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 04:49:03.823478 20842 solver.cpp:244]     Train net output #1: loss = 0.0347444 (* 1 = 0.0347444 loss)\nI0819 04:49:03.906013 20842 sgd_solver.cpp:166] Iteration 24300, lr = 0.6075\nI0819 04:51:20.797967 20842 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0819 04:52:45.196883 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87936\nI0819 04:52:45.197191 20842 solver.cpp:404]     Test net output #1: loss = 0.480724 (* 1 = 0.480724 loss)\nI0819 04:52:46.514464 20842 solver.cpp:228] Iteration 24400, loss = 0.107518\nI0819 04:52:46.514509 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:52:46.514533 20842 solver.cpp:244]     Train net output #1: loss = 0.107518 (* 1 = 0.107518 loss)\nI0819 04:52:46.601791 20842 sgd_solver.cpp:166] Iteration 24400, lr = 0.61\nI0819 04:55:03.476541 20842 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0819 04:56:27.876613 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0819 04:56:27.876929 20842 solver.cpp:404]     Test net output #1: loss = 0.476444 (* 1 = 0.476444 loss)\nI0819 04:56:29.192767 20842 solver.cpp:228] Iteration 24500, loss = 0.112936\nI0819 04:56:29.192814 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:56:29.192836 20842 solver.cpp:244]     Train net output #1: loss = 0.112936 (* 1 = 0.112936 loss)\nI0819 04:56:29.274534 20842 sgd_solver.cpp:166] Iteration 24500, lr = 0.6125\nI0819 04:58:45.863781 20842 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0819 05:00:10.260654 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87496\nI0819 05:00:10.260974 20842 solver.cpp:404]     Test net output #1: loss = 0.484341 (* 1 = 0.484341 loss)\nI0819 05:00:11.577162 20842 solver.cpp:228] Iteration 24600, loss = 0.0892656\nI0819 05:00:11.577227 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:00:11.577253 20842 solver.cpp:244]     Train net output #1: loss = 0.0892656 (* 1 = 0.0892656 loss)\nI0819 05:00:11.661813 20842 sgd_solver.cpp:166] Iteration 24600, lr = 0.615\nI0819 05:02:28.567683 20842 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0819 05:03:52.959983 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88568\nI0819 05:03:52.960338 20842 solver.cpp:404]     Test net output #1: loss = 0.464898 (* 1 = 0.464898 loss)\nI0819 05:03:54.276017 20842 solver.cpp:228] Iteration 24700, loss = 0.0415366\nI0819 05:03:54.276060 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 05:03:54.276083 20842 solver.cpp:244]     Train net output #1: loss = 0.0415367 (* 1 = 0.0415367 loss)\nI0819 05:03:54.359143 20842 sgd_solver.cpp:166] Iteration 24700, lr = 0.6175\nI0819 05:06:11.186231 20842 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0819 05:07:35.573345 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88092\nI0819 05:07:35.573664 20842 solver.cpp:404]     Test net output #1: loss = 0.457737 (* 1 = 0.457737 loss)\nI0819 05:07:36.889621 20842 solver.cpp:228] Iteration 24800, loss = 0.0452483\nI0819 05:07:36.889664 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:07:36.889688 20842 solver.cpp:244]     Train net output #1: loss = 0.0452483 (* 1 = 0.0452483 loss)\nI0819 05:07:36.978209 20842 sgd_solver.cpp:166] Iteration 24800, lr = 0.62\nI0819 05:09:53.869889 20842 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0819 05:11:18.269614 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87096\nI0819 05:11:18.269958 20842 solver.cpp:404]     Test net output #1: loss = 0.505093 (* 1 = 0.505093 loss)\nI0819 05:11:19.585736 20842 solver.cpp:228] Iteration 24900, loss = 0.0374939\nI0819 05:11:19.585783 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:11:19.585808 20842 solver.cpp:244]     Train net output #1: loss = 0.0374939 (* 1 = 0.0374939 loss)\nI0819 05:11:19.667661 20842 sgd_solver.cpp:166] Iteration 24900, lr = 0.6225\nI0819 05:13:36.577150 20842 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0819 05:15:00.971107 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8774\nI0819 05:15:00.971441 20842 solver.cpp:404]     Test net output #1: loss = 0.476634 (* 1 = 0.476634 loss)\nI0819 05:15:02.288254 20842 solver.cpp:228] Iteration 25000, loss = 0.0168441\nI0819 05:15:02.288312 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 05:15:02.288339 20842 solver.cpp:244]     Train net output #1: loss = 0.0168442 (* 1 = 0.0168442 loss)\nI0819 05:15:02.376478 20842 sgd_solver.cpp:166] Iteration 25000, lr = 0.625\nI0819 05:17:19.259310 20842 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0819 05:18:43.649998 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87976\nI0819 05:18:43.650321 20842 solver.cpp:404]     Test net output #1: loss = 0.467928 (* 1 = 0.467928 loss)\nI0819 05:18:44.967121 20842 solver.cpp:228] Iteration 25100, loss = 0.0386797\nI0819 05:18:44.967167 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:18:44.967190 20842 solver.cpp:244]     Train net output #1: loss = 0.0386797 (* 1 = 0.0386797 loss)\nI0819 05:18:45.046116 20842 sgd_solver.cpp:166] Iteration 25100, lr = 0.6275\nI0819 05:21:01.614658 20842 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0819 05:22:25.990490 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88568\nI0819 05:22:25.990849 20842 solver.cpp:404]     Test net output #1: loss = 0.466227 (* 1 = 0.466227 loss)\nI0819 05:22:27.307884 20842 solver.cpp:228] Iteration 25200, loss = 0.093271\nI0819 05:22:27.307945 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:22:27.307970 20842 solver.cpp:244]     Train net output #1: loss = 0.093271 (* 1 = 0.093271 loss)\nI0819 05:22:27.391482 20842 sgd_solver.cpp:166] Iteration 25200, lr = 0.63\nI0819 05:24:44.296602 20842 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0819 05:26:08.694322 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87756\nI0819 05:26:08.694636 20842 solver.cpp:404]     Test net output #1: loss = 0.470301 (* 1 = 0.470301 loss)\nI0819 05:26:10.010764 20842 solver.cpp:228] Iteration 25300, loss = 0.107322\nI0819 05:26:10.010810 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:26:10.010833 20842 solver.cpp:244]     Train net output #1: loss = 0.107322 (* 1 = 0.107322 loss)\nI0819 05:26:10.089402 20842 sgd_solver.cpp:166] Iteration 25300, lr = 0.6325\nI0819 05:28:26.976106 20842 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0819 05:29:51.366092 20842 solver.cpp:404]     Test net output #0: accuracy = 0.886\nI0819 05:29:51.366408 20842 solver.cpp:404]     Test net output #1: loss = 0.466708 (* 1 = 0.466708 loss)\nI0819 05:29:52.682005 20842 solver.cpp:228] Iteration 25400, loss = 0.074164\nI0819 05:29:52.682068 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 05:29:52.682093 20842 solver.cpp:244]     Train net output #1: loss = 0.074164 (* 1 = 0.074164 loss)\nI0819 05:29:52.768121 20842 sgd_solver.cpp:166] Iteration 25400, lr = 0.635\nI0819 05:32:09.720180 20842 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0819 05:33:34.098057 20842 solver.cpp:404]     Test net output #0: accuracy = 0.868761\nI0819 05:33:34.098393 20842 solver.cpp:404]     Test net output #1: loss = 0.501898 (* 1 = 0.501898 loss)\nI0819 05:33:35.414103 20842 solver.cpp:228] Iteration 25500, loss = 0.0377843\nI0819 05:33:35.414146 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:33:35.414162 20842 solver.cpp:244]     Train net output #1: loss = 0.0377844 (* 1 = 0.0377844 loss)\nI0819 05:33:35.506063 20842 sgd_solver.cpp:166] Iteration 25500, lr = 0.6375\nI0819 05:35:52.582226 20842 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0819 05:37:16.955438 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88008\nI0819 05:37:16.955781 20842 solver.cpp:404]     Test net output #1: loss = 0.476126 (* 1 = 0.476126 loss)\nI0819 05:37:18.271203 20842 solver.cpp:228] Iteration 25600, loss = 0.106143\nI0819 05:37:18.271241 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:37:18.271257 20842 solver.cpp:244]     Train net output #1: loss = 0.106143 (* 1 = 0.106143 loss)\nI0819 05:37:18.358840 20842 sgd_solver.cpp:166] Iteration 25600, lr = 0.64\nI0819 05:39:34.957115 20842 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0819 05:40:59.329915 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87948\nI0819 05:40:59.330236 20842 solver.cpp:404]     Test net output #1: loss = 0.484928 (* 1 = 0.484928 loss)\nI0819 05:41:00.646117 20842 solver.cpp:228] Iteration 25700, loss = 0.0619458\nI0819 05:41:00.646172 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:41:00.646190 20842 solver.cpp:244]     Train net output #1: loss = 0.0619459 (* 1 = 0.0619459 loss)\nI0819 05:41:00.735311 20842 sgd_solver.cpp:166] Iteration 25700, lr = 0.6425\nI0819 05:43:17.760520 20842 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0819 05:44:42.136924 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88456\nI0819 05:44:42.137230 20842 solver.cpp:404]     Test net output #1: loss = 0.446216 (* 1 = 0.446216 loss)\nI0819 05:44:43.453253 20842 solver.cpp:228] Iteration 25800, loss = 0.0876078\nI0819 05:44:43.453297 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:44:43.453313 20842 solver.cpp:244]     Train net output #1: loss = 0.0876079 (* 1 = 0.0876079 loss)\nI0819 05:44:43.537648 20842 sgd_solver.cpp:166] Iteration 25800, lr = 0.645\nI0819 05:47:00.569969 20842 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0819 05:48:24.939232 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87632\nI0819 05:48:24.939553 20842 solver.cpp:404]     Test net output #1: loss = 0.502879 (* 1 = 0.502879 loss)\nI0819 05:48:26.255808 20842 solver.cpp:228] Iteration 25900, loss = 0.102366\nI0819 05:48:26.255867 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:48:26.255884 20842 solver.cpp:244]     Train net output #1: loss = 0.102366 (* 1 = 0.102366 loss)\nI0819 05:48:26.338670 20842 sgd_solver.cpp:166] Iteration 25900, lr = 0.6475\nI0819 05:50:43.381691 20842 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0819 05:52:07.729738 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8822\nI0819 05:52:07.730054 20842 solver.cpp:404]     Test net output #1: loss = 0.468474 (* 1 = 0.468474 loss)\nI0819 05:52:09.045646 20842 solver.cpp:228] Iteration 26000, loss = 0.0521359\nI0819 05:52:09.045703 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:52:09.045722 20842 solver.cpp:244]     Train net output #1: loss = 0.052136 (* 1 = 0.052136 loss)\nI0819 05:52:09.136823 20842 sgd_solver.cpp:166] Iteration 26000, lr = 0.65\nI0819 05:54:26.163679 20842 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0819 05:55:50.273507 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87588\nI0819 05:55:50.273807 20842 solver.cpp:404]     Test net output #1: loss = 0.473086 (* 1 = 0.473086 loss)\nI0819 05:55:51.589367 20842 solver.cpp:228] Iteration 26100, loss = 0.151356\nI0819 05:55:51.589411 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 05:55:51.589426 20842 solver.cpp:244]     Train net output #1: loss = 0.151356 (* 1 = 0.151356 loss)\nI0819 05:55:51.674942 20842 sgd_solver.cpp:166] Iteration 26100, lr = 0.6525\nI0819 05:58:08.691406 20842 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0819 05:59:33.065074 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87688\nI0819 05:59:33.065368 20842 solver.cpp:404]     Test net output #1: loss = 0.483034 (* 1 = 0.483034 loss)\nI0819 05:59:34.381115 20842 solver.cpp:228] Iteration 26200, loss = 0.0736336\nI0819 05:59:34.381158 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:59:34.381175 20842 solver.cpp:244]     Train net output #1: loss = 0.0736338 (* 1 = 0.0736338 loss)\nI0819 05:59:34.464056 20842 sgd_solver.cpp:166] Iteration 26200, lr = 0.655\nI0819 06:01:51.067229 20842 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0819 06:03:15.448058 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0819 06:03:15.448354 20842 solver.cpp:404]     Test net output #1: loss = 0.469478 (* 1 = 0.469478 loss)\nI0819 06:03:16.764292 20842 solver.cpp:228] Iteration 26300, loss = 0.0408959\nI0819 06:03:16.764353 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 06:03:16.764371 20842 solver.cpp:244]     Train net output #1: loss = 0.040896 (* 1 = 0.040896 loss)\nI0819 06:03:16.855228 20842 sgd_solver.cpp:166] Iteration 26300, lr = 0.6575\nI0819 06:05:33.679639 20842 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0819 06:06:58.066965 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88928\nI0819 06:06:58.067306 20842 solver.cpp:404]     Test net output #1: loss = 0.431963 (* 1 = 0.431963 loss)\nI0819 06:06:59.382756 20842 solver.cpp:228] Iteration 26400, loss = 0.0483819\nI0819 06:06:59.382817 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 06:06:59.382835 20842 solver.cpp:244]     Train net output #1: loss = 0.048382 (* 1 = 0.048382 loss)\nI0819 06:06:59.464610 20842 sgd_solver.cpp:166] Iteration 26400, lr = 0.66\nI0819 06:09:16.362509 20842 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0819 06:10:40.750582 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87288\nI0819 06:10:40.750890 20842 solver.cpp:404]     Test net output #1: loss = 0.498472 (* 1 = 0.498472 loss)\nI0819 06:10:42.066435 20842 solver.cpp:228] Iteration 26500, loss = 0.0779634\nI0819 06:10:42.066478 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 06:10:42.066495 20842 solver.cpp:244]     Train net output #1: loss = 0.0779636 (* 1 = 0.0779636 loss)\nI0819 06:10:42.155418 20842 sgd_solver.cpp:166] Iteration 26500, lr = 0.6625\nI0819 06:12:58.739564 20842 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0819 06:14:23.127521 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88868\nI0819 06:14:23.127826 20842 solver.cpp:404]     Test net output #1: loss = 0.438095 (* 1 = 0.438095 loss)\nI0819 06:14:24.443711 20842 solver.cpp:228] Iteration 26600, loss = 0.0480101\nI0819 06:14:24.443773 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 06:14:24.443791 20842 solver.cpp:244]     Train net output #1: loss = 0.0480102 (* 1 = 0.0480102 loss)\nI0819 06:14:24.526764 20842 sgd_solver.cpp:166] Iteration 26600, lr = 0.665\nI0819 06:16:41.400998 20842 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0819 06:18:05.791656 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87804\nI0819 06:18:05.791997 20842 solver.cpp:404]     Test net output #1: loss = 0.486136 (* 1 = 0.486136 loss)\nI0819 06:18:07.107764 20842 solver.cpp:228] Iteration 26700, loss = 0.208177\nI0819 06:18:07.107821 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 06:18:07.107838 20842 solver.cpp:244]     Train net output #1: loss = 0.208177 (* 1 = 0.208177 loss)\nI0819 06:18:07.193971 20842 sgd_solver.cpp:166] Iteration 26700, lr = 0.6675\nI0819 06:20:24.054918 20842 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0819 06:21:48.435520 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8876\nI0819 06:21:48.435822 20842 solver.cpp:404]     Test net output #1: loss = 0.436897 (* 1 = 0.436897 loss)\nI0819 06:21:49.751932 20842 solver.cpp:228] Iteration 26800, loss = 0.0391723\nI0819 06:21:49.751986 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 06:21:49.752002 20842 solver.cpp:244]     Train net output #1: loss = 0.0391724 (* 1 = 0.0391724 loss)\nI0819 06:21:49.832461 20842 sgd_solver.cpp:166] Iteration 26800, lr = 0.67\nI0819 06:24:06.329597 20842 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0819 06:25:30.726042 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88724\nI0819 06:25:30.726388 20842 solver.cpp:404]     Test net output #1: loss = 0.443175 (* 1 = 0.443175 loss)\nI0819 06:25:32.041806 20842 solver.cpp:228] Iteration 26900, loss = 0.0511102\nI0819 06:25:32.041863 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:25:32.041888 20842 solver.cpp:244]     Train net output #1: loss = 0.0511103 (* 1 = 0.0511103 loss)\nI0819 06:25:32.120857 20842 sgd_solver.cpp:166] Iteration 26900, lr = 0.6725\nI0819 06:27:48.969130 20842 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0819 06:29:13.354984 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88416\nI0819 06:29:13.355299 20842 solver.cpp:404]     Test net output #1: loss = 0.447067 (* 1 = 0.447067 loss)\nI0819 06:29:14.670850 20842 solver.cpp:228] Iteration 27000, loss = 0.0342315\nI0819 06:29:14.670910 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:29:14.670928 20842 solver.cpp:244]     Train net output #1: loss = 0.0342316 (* 1 = 0.0342316 loss)\nI0819 06:29:14.758116 20842 sgd_solver.cpp:166] Iteration 27000, lr = 0.675\nI0819 06:31:31.614832 20842 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0819 06:32:55.987608 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 06:32:55.987941 20842 solver.cpp:404]     Test net output #1: loss = 0.467451 (* 1 = 0.467451 loss)\nI0819 06:32:57.303165 20842 solver.cpp:228] Iteration 27100, loss = 0.108285\nI0819 06:32:57.303223 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:32:57.303242 20842 solver.cpp:244]     Train net output #1: loss = 0.108285 (* 1 = 0.108285 loss)\nI0819 06:32:57.389520 20842 sgd_solver.cpp:166] Iteration 27100, lr = 0.6775\nI0819 06:35:14.338903 20842 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0819 06:36:38.717898 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88632\nI0819 06:36:38.718212 20842 solver.cpp:404]     Test net output #1: loss = 0.44083 (* 1 = 0.44083 loss)\nI0819 06:36:40.034160 20842 solver.cpp:228] Iteration 27200, loss = 0.0744873\nI0819 06:36:40.034214 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:36:40.034230 20842 solver.cpp:244]     Train net output #1: loss = 0.0744874 (* 1 = 0.0744874 loss)\nI0819 06:36:40.121804 20842 sgd_solver.cpp:166] Iteration 27200, lr = 0.68\nI0819 06:38:56.949206 20842 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0819 06:40:21.328920 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8774\nI0819 06:40:21.329222 20842 solver.cpp:404]     Test net output #1: loss = 0.480847 (* 1 = 0.480847 loss)\nI0819 06:40:22.644306 20842 solver.cpp:228] Iteration 27300, loss = 0.0404085\nI0819 06:40:22.644359 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:40:22.644376 20842 solver.cpp:244]     Train net output #1: loss = 0.0404086 (* 1 = 0.0404086 loss)\nI0819 06:40:22.732421 20842 sgd_solver.cpp:166] Iteration 27300, lr = 0.6825\nI0819 06:42:39.260658 20842 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0819 06:44:03.640914 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 06:44:03.641249 20842 solver.cpp:404]     Test net output #1: loss = 0.458928 (* 1 = 0.458928 loss)\nI0819 06:44:04.957012 20842 solver.cpp:228] Iteration 27400, loss = 0.150472\nI0819 06:44:04.957072 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 06:44:04.957090 20842 solver.cpp:244]     Train net output #1: loss = 0.150472 (* 1 = 0.150472 loss)\nI0819 06:44:05.043207 20842 sgd_solver.cpp:166] Iteration 27400, lr = 0.685\nI0819 06:46:21.933982 20842 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0819 06:47:46.318984 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87796\nI0819 06:47:46.319296 20842 solver.cpp:404]     Test net output #1: loss = 0.468104 (* 1 = 0.468104 loss)\nI0819 06:47:47.635365 20842 solver.cpp:228] Iteration 27500, loss = 0.139797\nI0819 06:47:47.635423 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 06:47:47.635442 20842 solver.cpp:244]     Train net output #1: loss = 0.139797 (* 1 = 0.139797 loss)\nI0819 06:47:47.713584 20842 sgd_solver.cpp:166] Iteration 27500, lr = 0.6875\nI0819 06:50:04.624541 20842 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0819 06:51:29.015053 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87992\nI0819 06:51:29.015393 20842 solver.cpp:404]     Test net output #1: loss = 0.466088 (* 1 = 0.466088 loss)\nI0819 06:51:30.330890 20842 solver.cpp:228] Iteration 27600, loss = 0.144865\nI0819 06:51:30.330950 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 06:51:30.330967 20842 solver.cpp:244]     Train net output #1: loss = 0.144865 (* 1 = 0.144865 loss)\nI0819 06:51:30.414466 20842 sgd_solver.cpp:166] Iteration 27600, lr = 0.69\nI0819 06:53:47.346693 20842 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0819 06:55:11.731000 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88052\nI0819 06:55:11.731317 20842 solver.cpp:404]     Test net output #1: loss = 0.450174 (* 1 = 0.450174 loss)\nI0819 06:55:13.047082 20842 solver.cpp:228] Iteration 27700, loss = 0.0926541\nI0819 06:55:13.047143 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:55:13.047161 20842 solver.cpp:244]     Train net output #1: loss = 0.0926543 (* 1 = 0.0926543 loss)\nI0819 06:55:13.136168 20842 sgd_solver.cpp:166] Iteration 27700, lr = 0.6925\nI0819 06:57:30.068095 20842 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0819 06:58:54.462610 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0819 06:58:54.462932 20842 solver.cpp:404]     Test net output #1: loss = 0.469024 (* 1 = 0.469024 loss)\nI0819 06:58:55.778743 20842 solver.cpp:228] Iteration 27800, loss = 0.213446\nI0819 06:58:55.778802 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 06:58:55.778821 20842 solver.cpp:244]     Train net output #1: loss = 0.213446 (* 1 = 0.213446 loss)\nI0819 06:58:55.873157 20842 sgd_solver.cpp:166] Iteration 27800, lr = 0.695\nI0819 07:01:12.365135 20842 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0819 07:02:36.763025 20842 solver.cpp:404]     Test net output #0: accuracy = 0.885161\nI0819 07:02:36.763356 20842 solver.cpp:404]     Test net output #1: loss = 0.452582 (* 1 = 0.452582 loss)\nI0819 07:02:38.079058 20842 solver.cpp:228] Iteration 27900, loss = 0.0378734\nI0819 07:02:38.079118 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:02:38.079135 20842 solver.cpp:244]     Train net output #1: loss = 0.0378736 (* 1 = 0.0378736 loss)\nI0819 07:02:38.163398 20842 sgd_solver.cpp:166] Iteration 27900, lr = 0.6975\nI0819 07:04:55.016283 20842 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0819 07:06:19.408027 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88056\nI0819 07:06:19.408365 20842 solver.cpp:404]     Test net output #1: loss = 0.449495 (* 1 = 0.449495 loss)\nI0819 07:06:20.723892 20842 solver.cpp:228] Iteration 28000, loss = 0.0974994\nI0819 07:06:20.723951 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 07:06:20.723970 20842 solver.cpp:244]     Train net output #1: loss = 0.0974995 (* 1 = 0.0974995 loss)\nI0819 07:06:20.806480 20842 sgd_solver.cpp:166] Iteration 28000, lr = 0.7\nI0819 07:08:37.647389 20842 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0819 07:10:02.039708 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88184\nI0819 07:10:02.039999 20842 solver.cpp:404]     Test net output #1: loss = 0.467533 (* 1 = 0.467533 loss)\nI0819 07:10:03.355450 20842 solver.cpp:228] Iteration 28100, loss = 0.109347\nI0819 07:10:03.355494 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:10:03.355509 20842 solver.cpp:244]     Train net output #1: loss = 0.109347 (* 1 = 0.109347 loss)\nI0819 07:10:03.434213 20842 sgd_solver.cpp:166] Iteration 28100, lr = 0.7025\nI0819 07:12:19.977568 20842 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0819 07:13:44.335319 20842 solver.cpp:404]     Test net output #0: accuracy = 0.883361\nI0819 07:13:44.335659 20842 solver.cpp:404]     Test net output #1: loss = 0.448448 (* 1 = 0.448448 loss)\nI0819 07:13:45.651309 20842 solver.cpp:228] Iteration 28200, loss = 0.0913826\nI0819 07:13:45.651351 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:13:45.651367 20842 solver.cpp:244]     Train net output #1: loss = 0.0913827 (* 1 = 0.0913827 loss)\nI0819 07:13:45.740103 20842 sgd_solver.cpp:166] Iteration 28200, lr = 0.705\nI0819 07:16:02.705255 20842 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0819 07:17:27.062636 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87728\nI0819 07:17:27.062954 20842 solver.cpp:404]     Test net output #1: loss = 0.464562 (* 1 = 0.464562 loss)\nI0819 07:17:28.378545 20842 solver.cpp:228] Iteration 28300, loss = 0.0707043\nI0819 07:17:28.378587 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:17:28.378604 20842 solver.cpp:244]     Train net output #1: loss = 0.0707045 (* 1 = 0.0707045 loss)\nI0819 07:17:28.461946 20842 sgd_solver.cpp:166] Iteration 28300, lr = 0.7075\nI0819 07:19:45.385614 20842 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0819 07:21:09.494825 20842 solver.cpp:404]     Test net output #0: accuracy = 0.885\nI0819 07:21:09.495154 20842 solver.cpp:404]     Test net output #1: loss = 0.437259 (* 1 = 0.437259 loss)\nI0819 07:21:10.810951 20842 solver.cpp:228] Iteration 28400, loss = 0.0424757\nI0819 07:21:10.811012 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 07:21:10.811030 20842 solver.cpp:244]     Train net output #1: loss = 0.0424758 (* 1 = 0.0424758 loss)\nI0819 07:21:10.901093 20842 sgd_solver.cpp:166] Iteration 28400, lr = 0.71\nI0819 07:23:27.799922 20842 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0819 07:24:52.166877 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0819 07:24:52.167189 20842 solver.cpp:404]     Test net output #1: loss = 0.444834 (* 1 = 0.444834 loss)\nI0819 07:24:53.482851 20842 solver.cpp:228] Iteration 28500, loss = 0.0426236\nI0819 07:24:53.482910 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:24:53.482929 20842 solver.cpp:244]     Train net output #1: loss = 0.0426237 (* 1 = 0.0426237 loss)\nI0819 07:24:53.571759 20842 sgd_solver.cpp:166] Iteration 28500, lr = 0.7125\nI0819 07:27:10.361552 20842 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0819 07:28:34.723245 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88948\nI0819 07:28:34.723536 20842 solver.cpp:404]     Test net output #1: loss = 0.420324 (* 1 = 0.420324 loss)\nI0819 07:28:36.039289 20842 solver.cpp:228] Iteration 28600, loss = 0.120832\nI0819 07:28:36.039331 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 07:28:36.039347 20842 solver.cpp:244]     Train net output #1: loss = 0.120832 (* 1 = 0.120832 loss)\nI0819 07:28:36.122898 20842 sgd_solver.cpp:166] Iteration 28600, lr = 0.715\nI0819 07:30:52.954314 20842 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0819 07:32:17.319798 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0819 07:32:17.320124 20842 solver.cpp:404]     Test net output #1: loss = 0.446225 (* 1 = 0.446225 loss)\nI0819 07:32:18.635654 20842 solver.cpp:228] Iteration 28700, loss = 0.0441437\nI0819 07:32:18.635711 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:32:18.635730 20842 solver.cpp:244]     Train net output #1: loss = 0.0441438 (* 1 = 0.0441438 loss)\nI0819 07:32:18.723117 20842 sgd_solver.cpp:166] Iteration 28700, lr = 0.7175\nI0819 07:34:35.555670 20842 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0819 07:35:59.928305 20842 solver.cpp:404]     Test net output #0: accuracy = 0.885201\nI0819 07:35:59.928643 20842 solver.cpp:404]     Test net output #1: loss = 0.441295 (* 1 = 0.441295 loss)\nI0819 07:36:01.244088 20842 solver.cpp:228] Iteration 28800, loss = 0.036552\nI0819 07:36:01.244130 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 07:36:01.244148 20842 solver.cpp:244]     Train net output #1: loss = 0.0365521 (* 1 = 0.0365521 loss)\nI0819 07:36:01.329742 20842 sgd_solver.cpp:166] Iteration 28800, lr = 0.72\nI0819 07:38:18.226807 20842 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0819 07:39:42.610363 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8866\nI0819 07:39:42.610690 20842 solver.cpp:404]     Test net output #1: loss = 0.426292 (* 1 = 0.426292 loss)\nI0819 07:39:43.926157 20842 solver.cpp:228] Iteration 28900, loss = 0.107324\nI0819 07:39:43.926200 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:39:43.926218 20842 solver.cpp:244]     Train net output #1: loss = 0.107324 (* 1 = 0.107324 loss)\nI0819 07:39:44.012233 20842 sgd_solver.cpp:166] Iteration 28900, lr = 0.7225\nI0819 07:42:00.857151 20842 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0819 07:43:25.239198 20842 solver.cpp:404]     Test net output #0: accuracy = 0.881921\nI0819 07:43:25.239529 20842 solver.cpp:404]     Test net output #1: loss = 0.466128 (* 1 = 0.466128 loss)\nI0819 07:43:26.555550 20842 solver.cpp:228] Iteration 29000, loss = 0.0790323\nI0819 07:43:26.555608 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:43:26.555626 20842 solver.cpp:244]     Train net output #1: loss = 0.0790324 (* 1 = 0.0790324 loss)\nI0819 07:43:26.642757 20842 sgd_solver.cpp:166] Iteration 29000, lr = 0.725\nI0819 07:45:43.484813 20842 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0819 07:47:07.865931 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88456\nI0819 07:47:07.866253 20842 solver.cpp:404]     Test net output #1: loss = 0.433997 (* 1 = 0.433997 loss)\nI0819 07:47:09.182991 20842 solver.cpp:228] Iteration 29100, loss = 0.0885962\nI0819 07:47:09.183050 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:47:09.183068 20842 solver.cpp:244]     Train net output #1: loss = 0.0885962 (* 1 = 0.0885962 loss)\nI0819 07:47:09.269783 20842 sgd_solver.cpp:166] Iteration 29100, lr = 0.7275\nI0819 07:49:26.142565 20842 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0819 07:50:50.511695 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8766\nI0819 07:50:50.512024 20842 solver.cpp:404]     Test net output #1: loss = 0.47265 (* 1 = 0.47265 loss)\nI0819 07:50:51.828768 20842 solver.cpp:228] Iteration 29200, loss = 0.0678933\nI0819 07:50:51.828809 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:50:51.828825 20842 solver.cpp:244]     Train net output #1: loss = 0.0678933 (* 1 = 0.0678933 loss)\nI0819 07:50:51.912010 20842 sgd_solver.cpp:166] Iteration 29200, lr = 0.73\nI0819 07:53:08.734172 20842 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0819 07:54:33.097682 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0819 07:54:33.098009 20842 solver.cpp:404]     Test net output #1: loss = 0.443104 (* 1 = 0.443104 loss)\nI0819 07:54:34.414379 20842 solver.cpp:228] Iteration 29300, loss = 0.108007\nI0819 07:54:34.414435 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:54:34.414453 20842 solver.cpp:244]     Train net output #1: loss = 0.108007 (* 1 = 0.108007 loss)\nI0819 07:54:34.501538 20842 sgd_solver.cpp:166] Iteration 29300, lr = 0.7325\nI0819 07:56:50.951489 20842 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0819 07:58:15.317128 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88512\nI0819 07:58:15.317469 20842 solver.cpp:404]     Test net output #1: loss = 0.434656 (* 1 = 0.434656 loss)\nI0819 07:58:16.633277 20842 solver.cpp:228] Iteration 29400, loss = 0.0793023\nI0819 07:58:16.633333 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:58:16.633352 20842 solver.cpp:244]     Train net output #1: loss = 0.0793023 (* 1 = 0.0793023 loss)\nI0819 07:58:16.722357 20842 sgd_solver.cpp:166] Iteration 29400, lr = 0.735\nI0819 08:00:33.521739 20842 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0819 08:01:57.898391 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8822\nI0819 08:01:57.898720 20842 solver.cpp:404]     Test net output #1: loss = 0.459323 (* 1 = 0.459323 loss)\nI0819 08:01:59.214324 20842 solver.cpp:228] Iteration 29500, loss = 0.14292\nI0819 08:01:59.214365 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 08:01:59.214381 20842 solver.cpp:244]     Train net output #1: loss = 0.14292 (* 1 = 0.14292 loss)\nI0819 08:01:59.302678 20842 sgd_solver.cpp:166] Iteration 29500, lr = 0.7375\nI0819 08:04:16.103869 20842 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0819 08:05:40.493863 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88792\nI0819 08:05:40.494204 20842 solver.cpp:404]     Test net output #1: loss = 0.436225 (* 1 = 0.436225 loss)\nI0819 08:05:41.809842 20842 solver.cpp:228] Iteration 29600, loss = 0.0753559\nI0819 08:05:41.809885 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:05:41.809900 20842 solver.cpp:244]     Train net output #1: loss = 0.075356 (* 1 = 0.075356 loss)\nI0819 08:05:41.897436 20842 sgd_solver.cpp:166] Iteration 29600, lr = 0.74\nI0819 08:07:58.554958 20842 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0819 08:09:22.941061 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87916\nI0819 08:09:22.941400 20842 solver.cpp:404]     Test net output #1: loss = 0.466649 (* 1 = 0.466649 loss)\nI0819 08:09:24.256973 20842 solver.cpp:228] Iteration 29700, loss = 0.137703\nI0819 08:09:24.257012 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 08:09:24.257030 20842 solver.cpp:244]     Train net output #1: loss = 0.137703 (* 1 = 0.137703 loss)\nI0819 08:09:24.343386 20842 sgd_solver.cpp:166] Iteration 29700, lr = 0.7425\nI0819 08:11:41.144182 20842 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0819 08:13:05.521997 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88856\nI0819 08:13:05.522322 20842 solver.cpp:404]     Test net output #1: loss = 0.433577 (* 1 = 0.433577 loss)\nI0819 08:13:06.837682 20842 solver.cpp:228] Iteration 29800, loss = 0.0762453\nI0819 08:13:06.837724 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:13:06.837740 20842 solver.cpp:244]     Train net output #1: loss = 0.0762453 (* 1 = 0.0762453 loss)\nI0819 08:13:06.922632 20842 sgd_solver.cpp:166] Iteration 29800, lr = 0.745\nI0819 08:15:23.406219 20842 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0819 08:16:47.792560 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88548\nI0819 08:16:47.792932 20842 solver.cpp:404]     Test net output #1: loss = 0.433674 (* 1 = 0.433674 loss)\nI0819 08:16:49.108244 20842 solver.cpp:228] Iteration 29900, loss = 0.0779497\nI0819 08:16:49.108285 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 08:16:49.108301 20842 solver.cpp:244]     Train net output #1: loss = 0.0779497 (* 1 = 0.0779497 loss)\nI0819 08:16:49.197983 20842 sgd_solver.cpp:166] Iteration 29900, lr = 0.7475\nI0819 08:19:06.085355 20842 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0819 08:20:30.469028 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0819 08:20:30.469359 20842 solver.cpp:404]     Test net output #1: loss = 0.457149 (* 1 = 0.457149 loss)\nI0819 08:20:31.785115 20842 solver.cpp:228] Iteration 30000, loss = 0.0362526\nI0819 08:20:31.785173 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 08:20:31.785190 20842 solver.cpp:244]     Train net output #1: loss = 0.0362526 (* 1 = 0.0362526 loss)\nI0819 08:20:31.875061 20842 sgd_solver.cpp:166] Iteration 30000, lr = 0.75\nI0819 08:22:48.769700 20842 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0819 08:24:13.142132 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8866\nI0819 08:24:13.142451 20842 solver.cpp:404]     Test net output #1: loss = 0.432065 (* 1 = 0.432065 loss)\nI0819 08:24:14.458461 20842 solver.cpp:228] Iteration 30100, loss = 0.055094\nI0819 08:24:14.458501 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:24:14.458518 20842 solver.cpp:244]     Train net output #1: loss = 0.0550941 (* 1 = 0.0550941 loss)\nI0819 08:24:14.542388 20842 sgd_solver.cpp:166] Iteration 30100, lr = 0.7525\nI0819 08:26:31.462085 20842 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0819 08:27:55.826256 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87916\nI0819 08:27:55.826581 20842 solver.cpp:404]     Test net output #1: loss = 0.42719 (* 1 = 0.42719 loss)\nI0819 08:27:57.141841 20842 solver.cpp:228] Iteration 30200, loss = 0.165835\nI0819 08:27:57.141898 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 08:27:57.141916 20842 solver.cpp:244]     Train net output #1: loss = 0.165835 (* 1 = 0.165835 loss)\nI0819 08:27:57.229146 20842 sgd_solver.cpp:166] Iteration 30200, lr = 0.755\nI0819 08:30:13.732789 20842 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0819 08:31:38.112618 20842 solver.cpp:404]     Test net output #0: accuracy = 0.877241\nI0819 08:31:38.112942 20842 solver.cpp:404]     Test net output #1: loss = 0.471516 (* 1 = 0.471516 loss)\nI0819 08:31:39.428872 20842 solver.cpp:228] Iteration 30300, loss = 0.0977758\nI0819 08:31:39.428933 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:31:39.428951 20842 solver.cpp:244]     Train net output #1: loss = 0.0977758 (* 1 = 0.0977758 loss)\nI0819 08:31:39.516494 20842 sgd_solver.cpp:166] Iteration 30300, lr = 0.7575\nI0819 08:33:56.572160 20842 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0819 08:35:20.946148 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88072\nI0819 08:35:20.946490 20842 solver.cpp:404]     Test net output #1: loss = 0.469121 (* 1 = 0.469121 loss)\nI0819 08:35:22.265859 20842 solver.cpp:228] Iteration 30400, loss = 0.0979586\nI0819 08:35:22.265902 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:35:22.265918 20842 solver.cpp:244]     Train net output #1: loss = 0.0979586 (* 1 = 0.0979586 loss)\nI0819 08:35:22.348809 20842 sgd_solver.cpp:166] Iteration 30400, lr = 0.76\nI0819 08:37:39.320960 20842 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0819 08:39:03.697048 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88152\nI0819 08:39:03.697360 20842 solver.cpp:404]     Test net output #1: loss = 0.470678 (* 1 = 0.470678 loss)\nI0819 08:39:05.017323 20842 solver.cpp:228] Iteration 30500, loss = 0.0740652\nI0819 08:39:05.017367 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:39:05.017385 20842 solver.cpp:244]     Train net output #1: loss = 0.0740652 (* 1 = 0.0740652 loss)\nI0819 08:39:05.099931 20842 sgd_solver.cpp:166] Iteration 30500, lr = 0.7625\nI0819 08:41:22.095070 20842 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0819 08:42:46.307476 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88208\nI0819 08:42:46.307785 20842 solver.cpp:404]     Test net output #1: loss = 0.44478 (* 1 = 0.44478 loss)\nI0819 08:42:47.626395 20842 solver.cpp:228] Iteration 30600, loss = 0.0478403\nI0819 08:42:47.626436 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:42:47.626452 20842 solver.cpp:244]     Train net output #1: loss = 0.0478403 (* 1 = 0.0478403 loss)\nI0819 08:42:47.710111 20842 sgd_solver.cpp:166] Iteration 30600, lr = 0.765\nI0819 08:45:04.220476 20842 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0819 08:46:28.187083 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88584\nI0819 08:46:28.187376 20842 solver.cpp:404]     Test net output #1: loss = 0.432597 (* 1 = 0.432597 loss)\nI0819 08:46:29.506836 20842 solver.cpp:228] Iteration 30700, loss = 0.0695269\nI0819 08:46:29.506880 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:46:29.506896 20842 solver.cpp:244]     Train net output #1: loss = 0.0695268 (* 1 = 0.0695268 loss)\nI0819 08:46:29.592806 20842 sgd_solver.cpp:166] Iteration 30700, lr = 0.7675\nI0819 08:48:46.532001 20842 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0819 08:50:10.892745 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0819 08:50:10.893075 20842 solver.cpp:404]     Test net output #1: loss = 0.455216 (* 1 = 0.455216 loss)\nI0819 08:50:12.212914 20842 solver.cpp:228] Iteration 30800, loss = 0.0664496\nI0819 08:50:12.212959 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:50:12.212973 20842 solver.cpp:244]     Train net output #1: loss = 0.0664496 (* 1 = 0.0664496 loss)\nI0819 08:50:12.291812 20842 sgd_solver.cpp:166] Iteration 30800, lr = 0.77\nI0819 08:52:29.284329 20842 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0819 08:53:53.664721 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87416\nI0819 08:53:53.665026 20842 solver.cpp:404]     Test net output #1: loss = 0.466724 (* 1 = 0.466724 loss)\nI0819 08:53:54.984925 20842 solver.cpp:228] Iteration 30900, loss = 0.110551\nI0819 08:53:54.984969 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 08:53:54.984985 20842 solver.cpp:244]     Train net output #1: loss = 0.110551 (* 1 = 0.110551 loss)\nI0819 08:53:55.071331 20842 sgd_solver.cpp:166] Iteration 30900, lr = 0.7725\nI0819 08:56:11.530956 20842 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0819 08:57:35.912842 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0819 08:57:35.913187 20842 solver.cpp:404]     Test net output #1: loss = 0.448568 (* 1 = 0.448568 loss)\nI0819 08:57:37.232902 20842 solver.cpp:228] Iteration 31000, loss = 0.0537235\nI0819 08:57:37.232942 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:57:37.232959 20842 solver.cpp:244]     Train net output #1: loss = 0.0537235 (* 1 = 0.0537235 loss)\nI0819 08:57:37.321861 20842 sgd_solver.cpp:166] Iteration 31000, lr = 0.775\nI0819 08:59:54.357822 20842 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0819 09:01:18.748440 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0819 09:01:18.748773 20842 solver.cpp:404]     Test net output #1: loss = 0.451825 (* 1 = 0.451825 loss)\nI0819 09:01:20.068938 20842 solver.cpp:228] Iteration 31100, loss = 0.117896\nI0819 09:01:20.068980 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:01:20.068995 20842 solver.cpp:244]     Train net output #1: loss = 0.117896 (* 1 = 0.117896 loss)\nI0819 09:01:20.151531 20842 sgd_solver.cpp:166] Iteration 31100, lr = 0.7775\nI0819 09:03:37.113473 20842 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0819 09:05:01.505621 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88104\nI0819 09:05:01.505960 20842 solver.cpp:404]     Test net output #1: loss = 0.44815 (* 1 = 0.44815 loss)\nI0819 09:05:02.826102 20842 solver.cpp:228] Iteration 31200, loss = 0.0717374\nI0819 09:05:02.826143 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 09:05:02.826159 20842 solver.cpp:244]     Train net output #1: loss = 0.0717374 (* 1 = 0.0717374 loss)\nI0819 09:05:02.913743 20842 sgd_solver.cpp:166] Iteration 31200, lr = 0.78\nI0819 09:07:19.865072 20842 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0819 09:08:44.251226 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0819 09:08:44.251579 20842 solver.cpp:404]     Test net output #1: loss = 0.44544 (* 1 = 0.44544 loss)\nI0819 09:08:45.571609 20842 solver.cpp:228] Iteration 31300, loss = 0.044426\nI0819 09:08:45.571655 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 09:08:45.571669 20842 solver.cpp:244]     Train net output #1: loss = 0.044426 (* 1 = 0.044426 loss)\nI0819 09:08:45.659610 20842 sgd_solver.cpp:166] Iteration 31300, lr = 0.7825\nI0819 09:11:02.198120 20842 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0819 09:12:26.577088 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0819 09:12:26.577405 20842 solver.cpp:404]     Test net output #1: loss = 0.440268 (* 1 = 0.440268 loss)\nI0819 09:12:27.897619 20842 solver.cpp:228] Iteration 31400, loss = 0.0319335\nI0819 09:12:27.897660 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:12:27.897675 20842 solver.cpp:244]     Train net output #1: loss = 0.0319335 (* 1 = 0.0319335 loss)\nI0819 09:12:27.979887 20842 sgd_solver.cpp:166] Iteration 31400, lr = 0.785\nI0819 09:14:44.980901 20842 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0819 09:16:09.368553 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88512\nI0819 09:16:09.368882 20842 solver.cpp:404]     Test net output #1: loss = 0.438711 (* 1 = 0.438711 loss)\nI0819 09:16:10.687469 20842 solver.cpp:228] Iteration 31500, loss = 0.0887215\nI0819 09:16:10.687528 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:16:10.687546 20842 solver.cpp:244]     Train net output #1: loss = 0.0887215 (* 1 = 0.0887215 loss)\nI0819 09:16:10.767570 20842 sgd_solver.cpp:166] Iteration 31500, lr = 0.7875\nI0819 09:18:27.742287 20842 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0819 09:19:52.133615 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88436\nI0819 09:19:52.133960 20842 solver.cpp:404]     Test net output #1: loss = 0.453667 (* 1 = 0.453667 loss)\nI0819 09:19:53.453984 20842 solver.cpp:228] Iteration 31600, loss = 0.135533\nI0819 09:19:53.454044 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 09:19:53.454061 20842 solver.cpp:244]     Train net output #1: loss = 0.135533 (* 1 = 0.135533 loss)\nI0819 09:19:53.534271 20842 sgd_solver.cpp:166] Iteration 31600, lr = 0.79\nI0819 09:22:10.531436 20842 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0819 09:23:34.916026 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88156\nI0819 09:23:34.916344 20842 solver.cpp:404]     Test net output #1: loss = 0.457532 (* 1 = 0.457532 loss)\nI0819 09:23:36.235990 20842 solver.cpp:228] Iteration 31700, loss = 0.0703571\nI0819 09:23:36.236034 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:23:36.236050 20842 solver.cpp:244]     Train net output #1: loss = 0.0703571 (* 1 = 0.0703571 loss)\nI0819 09:23:36.318322 20842 sgd_solver.cpp:166] Iteration 31700, lr = 0.7925\nI0819 09:25:53.284448 20842 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0819 09:27:17.675118 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87928\nI0819 09:27:17.675460 20842 solver.cpp:404]     Test net output #1: loss = 0.469525 (* 1 = 0.469525 loss)\nI0819 09:27:18.994674 20842 solver.cpp:228] Iteration 31800, loss = 0.0694999\nI0819 09:27:18.994717 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:27:18.994732 20842 solver.cpp:244]     Train net output #1: loss = 0.0694999 (* 1 = 0.0694999 loss)\nI0819 09:27:19.080278 20842 sgd_solver.cpp:166] Iteration 31800, lr = 0.795\nI0819 09:29:36.081187 20842 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0819 09:31:00.468024 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87752\nI0819 09:31:00.468312 20842 solver.cpp:404]     Test net output #1: loss = 0.457319 (* 1 = 0.457319 loss)\nI0819 09:31:01.787161 20842 solver.cpp:228] Iteration 31900, loss = 0.126815\nI0819 09:31:01.787215 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 09:31:01.787232 20842 solver.cpp:244]     Train net output #1: loss = 0.126815 (* 1 = 0.126815 loss)\nI0819 09:31:01.872490 20842 sgd_solver.cpp:166] Iteration 31900, lr = 0.7975\nI0819 09:33:18.383833 20842 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0819 09:34:42.770193 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88916\nI0819 09:34:42.770514 20842 solver.cpp:404]     Test net output #1: loss = 0.426162 (* 1 = 0.426162 loss)\nI0819 09:34:44.089314 20842 solver.cpp:228] Iteration 32000, loss = 0.0629604\nI0819 09:34:44.089351 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:34:44.089367 20842 solver.cpp:244]     Train net output #1: loss = 0.0629603 (* 1 = 0.0629603 loss)\nI0819 09:34:44.167047 20842 sgd_solver.cpp:166] Iteration 32000, lr = 0.8\nI0819 09:37:01.241221 20842 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0819 09:38:25.627179 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88944\nI0819 09:38:25.627526 20842 solver.cpp:404]     Test net output #1: loss = 0.421293 (* 1 = 0.421293 loss)\nI0819 09:38:26.946493 20842 solver.cpp:228] Iteration 32100, loss = 0.128401\nI0819 09:38:26.946532 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 09:38:26.946547 20842 solver.cpp:244]     Train net output #1: loss = 0.128401 (* 1 = 0.128401 loss)\nI0819 09:38:27.035365 20842 sgd_solver.cpp:166] Iteration 32100, lr = 0.8025\nI0819 09:40:44.151223 20842 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0819 09:42:08.539973 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88292\nI0819 09:42:08.540279 20842 solver.cpp:404]     Test net output #1: loss = 0.453388 (* 1 = 0.453388 loss)\nI0819 09:42:09.859278 20842 solver.cpp:228] Iteration 32200, loss = 0.0749531\nI0819 09:42:09.859318 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:42:09.859333 20842 solver.cpp:244]     Train net output #1: loss = 0.074953 (* 1 = 0.074953 loss)\nI0819 09:42:09.948683 20842 sgd_solver.cpp:166] Iteration 32200, lr = 0.805\nI0819 09:44:26.455718 20842 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0819 09:45:50.845377 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88732\nI0819 09:45:50.845701 20842 solver.cpp:404]     Test net output #1: loss = 0.417296 (* 1 = 0.417296 loss)\nI0819 09:45:52.164914 20842 solver.cpp:228] Iteration 32300, loss = 0.0653387\nI0819 09:45:52.164952 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 09:45:52.164968 20842 solver.cpp:244]     Train net output #1: loss = 0.0653387 (* 1 = 0.0653387 loss)\nI0819 09:45:52.248363 20842 sgd_solver.cpp:166] Iteration 32300, lr = 0.8075\nI0819 09:48:09.309518 20842 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0819 09:49:33.690650 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0819 09:49:33.690996 20842 solver.cpp:404]     Test net output #1: loss = 0.463901 (* 1 = 0.463901 loss)\nI0819 09:49:35.010222 20842 solver.cpp:228] Iteration 32400, loss = 0.0771939\nI0819 09:49:35.010263 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:49:35.010280 20842 solver.cpp:244]     Train net output #1: loss = 0.0771939 (* 1 = 0.0771939 loss)\nI0819 09:49:35.095844 20842 sgd_solver.cpp:166] Iteration 32400, lr = 0.81\nI0819 09:51:52.162010 20842 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0819 09:53:16.527762 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0819 09:53:16.528156 20842 solver.cpp:404]     Test net output #1: loss = 0.452794 (* 1 = 0.452794 loss)\nI0819 09:53:17.846822 20842 solver.cpp:228] Iteration 32500, loss = 0.0427101\nI0819 09:53:17.846861 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:53:17.846877 20842 solver.cpp:244]     Train net output #1: loss = 0.04271 (* 1 = 0.04271 loss)\nI0819 09:53:17.933421 20842 sgd_solver.cpp:166] Iteration 32500, lr = 0.8125\nI0819 09:55:34.546859 20842 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0819 09:56:58.931329 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88724\nI0819 09:56:58.931674 20842 solver.cpp:404]     Test net output #1: loss = 0.45365 (* 1 = 0.45365 loss)\nI0819 09:57:00.250882 20842 solver.cpp:228] Iteration 32600, loss = 0.135606\nI0819 09:57:00.250921 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:57:00.250936 20842 solver.cpp:244]     Train net output #1: loss = 0.135606 (* 1 = 0.135606 loss)\nI0819 09:57:00.337738 20842 sgd_solver.cpp:166] Iteration 32600, lr = 0.815\nI0819 09:59:17.368161 20842 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0819 10:00:41.752619 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88228\nI0819 10:00:41.752934 20842 solver.cpp:404]     Test net output #1: loss = 0.442413 (* 1 = 0.442413 loss)\nI0819 10:00:43.072021 20842 solver.cpp:228] Iteration 32700, loss = 0.100312\nI0819 10:00:43.072060 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:00:43.072077 20842 solver.cpp:244]     Train net output #1: loss = 0.100312 (* 1 = 0.100312 loss)\nI0819 10:00:43.155603 20842 sgd_solver.cpp:166] Iteration 32700, lr = 0.8175\nI0819 10:03:00.144035 20842 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0819 10:04:24.517278 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87648\nI0819 10:04:24.517642 20842 solver.cpp:404]     Test net output #1: loss = 0.470196 (* 1 = 0.470196 loss)\nI0819 10:04:25.836912 20842 solver.cpp:228] Iteration 32800, loss = 0.075092\nI0819 10:04:25.836949 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:04:25.836966 20842 solver.cpp:244]     Train net output #1: loss = 0.0750919 (* 1 = 0.0750919 loss)\nI0819 10:04:25.918411 20842 sgd_solver.cpp:166] Iteration 32800, lr = 0.82\nI0819 10:06:42.956874 20842 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0819 10:08:07.328438 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8804\nI0819 10:08:07.328785 20842 solver.cpp:404]     Test net output #1: loss = 0.455329 (* 1 = 0.455329 loss)\nI0819 10:08:08.647186 20842 solver.cpp:228] Iteration 32900, loss = 0.0458299\nI0819 10:08:08.647228 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 10:08:08.647244 20842 solver.cpp:244]     Train net output #1: loss = 0.0458299 (* 1 = 0.0458299 loss)\nI0819 10:08:08.734117 20842 sgd_solver.cpp:166] Iteration 32900, lr = 0.8225\nI0819 10:10:25.275543 20842 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0819 10:11:49.659879 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88236\nI0819 10:11:49.660203 20842 solver.cpp:404]     Test net output #1: loss = 0.455883 (* 1 = 0.455883 loss)\nI0819 10:11:50.979907 20842 solver.cpp:228] Iteration 33000, loss = 0.188539\nI0819 10:11:50.979953 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 10:11:50.979976 20842 solver.cpp:244]     Train net output #1: loss = 0.188539 (* 1 = 0.188539 loss)\nI0819 10:11:51.065351 20842 sgd_solver.cpp:166] Iteration 33000, lr = 0.825\nI0819 10:14:08.091436 20842 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0819 10:15:32.538779 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0819 10:15:32.539134 20842 solver.cpp:404]     Test net output #1: loss = 0.44091 (* 1 = 0.44091 loss)\nI0819 10:15:33.858252 20842 solver.cpp:228] Iteration 33100, loss = 0.102188\nI0819 10:15:33.858297 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:15:33.858320 20842 solver.cpp:244]     Train net output #1: loss = 0.102188 (* 1 = 0.102188 loss)\nI0819 10:15:33.947558 20842 sgd_solver.cpp:166] Iteration 33100, lr = 0.8275\nI0819 10:17:51.042174 20842 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0819 10:19:15.474212 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88924\nI0819 10:19:15.474512 20842 solver.cpp:404]     Test net output #1: loss = 0.42616 (* 1 = 0.42616 loss)\nI0819 10:19:16.794179 20842 solver.cpp:228] Iteration 33200, loss = 0.087923\nI0819 10:19:16.794240 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:19:16.794267 20842 solver.cpp:244]     Train net output #1: loss = 0.087923 (* 1 = 0.087923 loss)\nI0819 10:19:16.883388 20842 sgd_solver.cpp:166] Iteration 33200, lr = 0.83\nI0819 10:21:33.945329 20842 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0819 10:22:58.386972 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88472\nI0819 10:22:58.387316 20842 solver.cpp:404]     Test net output #1: loss = 0.438764 (* 1 = 0.438764 loss)\nI0819 10:22:59.707692 20842 solver.cpp:228] Iteration 33300, loss = 0.10141\nI0819 10:22:59.707741 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:22:59.707764 20842 solver.cpp:244]     Train net output #1: loss = 0.10141 (* 1 = 0.10141 loss)\nI0819 10:22:59.795555 20842 sgd_solver.cpp:166] Iteration 33300, lr = 0.8325\nI0819 10:25:16.309041 20842 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0819 10:26:40.692342 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88696\nI0819 10:26:40.692669 20842 solver.cpp:404]     Test net output #1: loss = 0.422414 (* 1 = 0.422414 loss)\nI0819 10:26:42.013478 20842 solver.cpp:228] Iteration 33400, loss = 0.0403429\nI0819 10:26:42.013525 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 10:26:42.013548 20842 solver.cpp:244]     Train net output #1: loss = 0.0403429 (* 1 = 0.0403429 loss)\nI0819 10:26:42.099077 20842 sgd_solver.cpp:166] Iteration 33400, lr = 0.835\nI0819 10:28:59.179162 20842 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0819 10:30:23.685418 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88776\nI0819 10:30:23.685760 20842 solver.cpp:404]     Test net output #1: loss = 0.434753 (* 1 = 0.434753 loss)\nI0819 10:30:25.005566 20842 solver.cpp:228] Iteration 33500, loss = 0.138391\nI0819 10:30:25.005612 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:30:25.005637 20842 solver.cpp:244]     Train net output #1: loss = 0.138391 (* 1 = 0.138391 loss)\nI0819 10:30:25.091136 20842 sgd_solver.cpp:166] Iteration 33500, lr = 0.8375\nI0819 10:32:42.139241 20842 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0819 10:34:06.555933 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8858\nI0819 10:34:06.556283 20842 solver.cpp:404]     Test net output #1: loss = 0.434576 (* 1 = 0.434576 loss)\nI0819 10:34:07.875991 20842 solver.cpp:228] Iteration 33600, loss = 0.0910674\nI0819 10:34:07.876037 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:34:07.876062 20842 solver.cpp:244]     Train net output #1: loss = 0.0910674 (* 1 = 0.0910674 loss)\nI0819 10:34:07.964550 20842 sgd_solver.cpp:166] Iteration 33600, lr = 0.84\nI0819 10:36:25.003024 20842 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0819 10:37:49.520584 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88424\nI0819 10:37:49.520920 20842 solver.cpp:404]     Test net output #1: loss = 0.446059 (* 1 = 0.446059 loss)\nI0819 10:37:50.841059 20842 solver.cpp:228] Iteration 33700, loss = 0.157729\nI0819 10:37:50.841116 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:37:50.841140 20842 solver.cpp:244]     Train net output #1: loss = 0.157729 (* 1 = 0.157729 loss)\nI0819 10:37:50.920023 20842 sgd_solver.cpp:166] Iteration 33700, lr = 0.8425\nI0819 10:40:07.892688 20842 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0819 10:41:31.290858 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87944\nI0819 10:41:31.291157 20842 solver.cpp:404]     Test net output #1: loss = 0.480384 (* 1 = 0.480384 loss)\nI0819 10:41:32.606873 20842 solver.cpp:228] Iteration 33800, loss = 0.126027\nI0819 10:41:32.606919 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:41:32.606935 20842 solver.cpp:244]     Train net output #1: loss = 0.126027 (* 1 = 0.126027 loss)\nI0819 10:41:32.698221 20842 sgd_solver.cpp:166] Iteration 33800, lr = 0.845\nI0819 10:43:49.600214 20842 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0819 10:45:12.964468 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88596\nI0819 10:45:12.964802 20842 solver.cpp:404]     Test net output #1: loss = 0.442306 (* 1 = 0.442306 loss)\nI0819 10:45:14.280975 20842 solver.cpp:228] Iteration 33900, loss = 0.0399702\nI0819 10:45:14.281019 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 10:45:14.281036 20842 solver.cpp:244]     Train net output #1: loss = 0.0399701 (* 1 = 0.0399701 loss)\nI0819 10:45:14.368613 20842 sgd_solver.cpp:166] Iteration 33900, lr = 0.8475\nI0819 10:47:30.731453 20842 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0819 10:48:54.090353 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87044\nI0819 10:48:54.090644 20842 solver.cpp:404]     Test net output #1: loss = 0.498908 (* 1 = 0.498908 loss)\nI0819 10:48:55.407017 20842 solver.cpp:228] Iteration 34000, loss = 0.158211\nI0819 10:48:55.407059 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:48:55.407076 20842 solver.cpp:244]     Train net output #1: loss = 0.158211 (* 1 = 0.158211 loss)\nI0819 10:48:55.494915 20842 sgd_solver.cpp:166] Iteration 34000, lr = 0.85\nI0819 10:51:12.305515 20842 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0819 10:52:35.669833 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88288\nI0819 10:52:35.670076 20842 solver.cpp:404]     Test net output #1: loss = 0.462992 (* 1 = 0.462992 loss)\nI0819 10:52:36.986817 20842 solver.cpp:228] Iteration 34100, loss = 0.0835634\nI0819 10:52:36.986860 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:52:36.986876 20842 solver.cpp:244]     Train net output #1: loss = 0.0835633 (* 1 = 0.0835633 loss)\nI0819 10:52:37.077320 20842 sgd_solver.cpp:166] Iteration 34100, lr = 0.8525\nI0819 10:54:54.005311 20842 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0819 10:56:17.364953 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0819 10:56:17.365237 20842 solver.cpp:404]     Test net output #1: loss = 0.453703 (* 1 = 0.453703 loss)\nI0819 10:56:18.681169 20842 solver.cpp:228] Iteration 34200, loss = 0.0778574\nI0819 10:56:18.681210 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:56:18.681226 20842 solver.cpp:244]     Train net output #1: loss = 0.0778573 (* 1 = 0.0778573 loss)\nI0819 10:56:18.776149 20842 sgd_solver.cpp:166] Iteration 34200, lr = 0.855\nI0819 10:58:35.658993 20842 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0819 10:59:59.021344 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87948\nI0819 10:59:59.021634 20842 solver.cpp:404]     Test net output #1: loss = 0.451728 (* 1 = 0.451728 loss)\nI0819 11:00:00.337893 20842 solver.cpp:228] Iteration 34300, loss = 0.157304\nI0819 11:00:00.337934 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 11:00:00.337951 20842 solver.cpp:244]     Train net output #1: loss = 0.157304 (* 1 = 0.157304 loss)\nI0819 11:00:00.427316 20842 sgd_solver.cpp:166] Iteration 34300, lr = 0.8575\nI0819 11:02:17.281124 20842 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0819 11:03:40.644702 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88536\nI0819 11:03:40.644976 20842 solver.cpp:404]     Test net output #1: loss = 0.430803 (* 1 = 0.430803 loss)\nI0819 11:03:41.961122 20842 solver.cpp:228] Iteration 34400, loss = 0.148188\nI0819 11:03:41.961165 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 11:03:41.961182 20842 solver.cpp:244]     Train net output #1: loss = 0.148188 (* 1 = 0.148188 loss)\nI0819 11:03:42.050957 20842 sgd_solver.cpp:166] Iteration 34400, lr = 0.86\nI0819 11:05:58.935268 20842 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0819 11:07:22.298595 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87912\nI0819 11:07:22.298817 20842 solver.cpp:404]     Test net output #1: loss = 0.475668 (* 1 = 0.475668 loss)\nI0819 11:07:23.613922 20842 solver.cpp:228] Iteration 34500, loss = 0.0849189\nI0819 11:07:23.613965 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:07:23.613982 20842 solver.cpp:244]     Train net output #1: loss = 0.0849188 (* 1 = 0.0849188 loss)\nI0819 11:07:23.699524 20842 sgd_solver.cpp:166] Iteration 34500, lr = 0.8625\nI0819 11:09:40.568156 20842 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0819 11:11:03.835664 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88444\nI0819 11:11:03.835944 20842 solver.cpp:404]     Test net output #1: loss = 0.451556 (* 1 = 0.451556 loss)\nI0819 11:11:05.151278 20842 solver.cpp:228] Iteration 34600, loss = 0.0823864\nI0819 11:11:05.151317 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:11:05.151335 20842 solver.cpp:244]     Train net output #1: loss = 0.0823863 (* 1 = 0.0823863 loss)\nI0819 11:11:05.242050 20842 sgd_solver.cpp:166] Iteration 34600, lr = 0.865\nI0819 11:13:21.681608 20842 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0819 11:14:44.954917 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0819 11:14:44.955199 20842 solver.cpp:404]     Test net output #1: loss = 0.436428 (* 1 = 0.436428 loss)\nI0819 11:14:46.271333 20842 solver.cpp:228] Iteration 34700, loss = 0.0303937\nI0819 11:14:46.271375 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 11:14:46.271392 20842 solver.cpp:244]     Train net output #1: loss = 0.0303937 (* 1 = 0.0303937 loss)\nI0819 11:14:46.361538 20842 sgd_solver.cpp:166] Iteration 34700, lr = 0.8675\nI0819 11:17:03.234961 20842 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0819 11:18:26.508594 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88732\nI0819 11:18:26.508867 20842 solver.cpp:404]     Test net output #1: loss = 0.428669 (* 1 = 0.428669 loss)\nI0819 11:18:27.824214 20842 solver.cpp:228] Iteration 34800, loss = 0.0697181\nI0819 11:18:27.824255 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 11:18:27.824271 20842 solver.cpp:244]     Train net output #1: loss = 0.0697181 (* 1 = 0.0697181 loss)\nI0819 11:18:27.914659 20842 sgd_solver.cpp:166] Iteration 34800, lr = 0.87\nI0819 11:20:44.661049 20842 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0819 11:22:07.927974 20842 solver.cpp:404]     Test net output #0: accuracy = 0.886\nI0819 11:22:07.928261 20842 solver.cpp:404]     Test net output #1: loss = 0.44217 (* 1 = 0.44217 loss)\nI0819 11:22:09.244331 20842 solver.cpp:228] Iteration 34900, loss = 0.132426\nI0819 11:22:09.244372 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:22:09.244388 20842 solver.cpp:244]     Train net output #1: loss = 0.132426 (* 1 = 0.132426 loss)\nI0819 11:22:09.333523 20842 sgd_solver.cpp:166] Iteration 34900, lr = 0.8725\nI0819 11:24:26.143915 20842 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0819 11:25:49.409863 20842 solver.cpp:404]     Test net output #0: accuracy = 0.886841\nI0819 11:25:49.410166 20842 solver.cpp:404]     Test net output #1: loss = 0.427163 (* 1 = 0.427163 loss)\nI0819 11:25:50.726238 20842 solver.cpp:228] Iteration 35000, loss = 0.10828\nI0819 11:25:50.726279 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 11:25:50.726295 20842 solver.cpp:244]     Train net output #1: loss = 0.10828 (* 1 = 0.10828 loss)\nI0819 11:25:50.809053 20842 sgd_solver.cpp:166] Iteration 35000, lr = 0.875\nI0819 11:28:07.597362 20842 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0819 11:29:30.872884 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88704\nI0819 11:29:30.873174 20842 solver.cpp:404]     Test net output #1: loss = 0.410964 (* 1 = 0.410964 loss)\nI0819 11:29:32.189002 20842 solver.cpp:228] Iteration 35100, loss = 0.0456667\nI0819 11:29:32.189044 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 11:29:32.189060 20842 solver.cpp:244]     Train net output #1: loss = 0.0456667 (* 1 = 0.0456667 loss)\nI0819 11:29:32.279532 20842 sgd_solver.cpp:166] Iteration 35100, lr = 0.8775\nI0819 11:31:49.162513 20842 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0819 11:33:12.435652 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88328\nI0819 11:33:12.435969 20842 solver.cpp:404]     Test net output #1: loss = 0.439057 (* 1 = 0.439057 loss)\nI0819 11:33:13.751365 20842 solver.cpp:228] Iteration 35200, loss = 0.0622526\nI0819 11:33:13.751406 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:33:13.751422 20842 solver.cpp:244]     Train net output #1: loss = 0.0622526 (* 1 = 0.0622526 loss)\nI0819 11:33:13.836166 20842 sgd_solver.cpp:166] Iteration 35200, lr = 0.88\nI0819 11:35:30.174569 20842 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0819 11:36:53.436992 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88672\nI0819 11:36:53.437304 20842 solver.cpp:404]     Test net output #1: loss = 0.43231 (* 1 = 0.43231 loss)\nI0819 11:36:54.752691 20842 solver.cpp:228] Iteration 35300, loss = 0.122984\nI0819 11:36:54.752732 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:36:54.752748 20842 solver.cpp:244]     Train net output #1: loss = 0.122984 (* 1 = 0.122984 loss)\nI0819 11:36:54.843147 20842 sgd_solver.cpp:166] Iteration 35300, lr = 0.8825\nI0819 11:39:11.598726 20842 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0819 11:40:34.867653 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88244\nI0819 11:40:34.867959 20842 solver.cpp:404]     Test net output #1: loss = 0.433293 (* 1 = 0.433293 loss)\nI0819 11:40:36.184118 20842 solver.cpp:228] Iteration 35400, loss = 0.123221\nI0819 11:40:36.184156 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:40:36.184172 20842 solver.cpp:244]     Train net output #1: loss = 0.123221 (* 1 = 0.123221 loss)\nI0819 11:40:36.273936 20842 sgd_solver.cpp:166] Iteration 35400, lr = 0.885\nI0819 11:42:52.552402 20842 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0819 11:44:15.912086 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88376\nI0819 11:44:15.912377 20842 solver.cpp:404]     Test net output #1: loss = 0.439548 (* 1 = 0.439548 loss)\nI0819 11:44:17.228173 20842 solver.cpp:228] Iteration 35500, loss = 0.0973305\nI0819 11:44:17.228211 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:44:17.228227 20842 solver.cpp:244]     Train net output #1: loss = 0.0973304 (* 1 = 0.0973304 loss)\nI0819 11:44:17.312173 20842 sgd_solver.cpp:166] Iteration 35500, lr = 0.8875\nI0819 11:46:34.041126 20842 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0819 11:47:57.412140 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0819 11:47:57.412441 20842 solver.cpp:404]     Test net output #1: loss = 0.438421 (* 1 = 0.438421 loss)\nI0819 11:47:58.728595 20842 solver.cpp:228] Iteration 35600, loss = 0.0977752\nI0819 11:47:58.728632 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 11:47:58.728648 20842 solver.cpp:244]     Train net output #1: loss = 0.0977751 (* 1 = 0.0977751 loss)\nI0819 11:47:58.818773 20842 sgd_solver.cpp:166] Iteration 35600, lr = 0.89\nI0819 11:50:15.591574 20842 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0819 11:51:38.957249 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87864\nI0819 11:51:38.957545 20842 solver.cpp:404]     Test net output #1: loss = 0.450492 (* 1 = 0.450492 loss)\nI0819 11:51:40.273944 20842 solver.cpp:228] Iteration 35700, loss = 0.118497\nI0819 11:51:40.273983 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:51:40.273999 20842 solver.cpp:244]     Train net output #1: loss = 0.118497 (* 1 = 0.118497 loss)\nI0819 11:51:40.355418 20842 sgd_solver.cpp:166] Iteration 35700, lr = 0.8925\nI0819 11:53:56.727869 20842 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0819 11:55:20.096863 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88604\nI0819 11:55:20.097156 20842 solver.cpp:404]     Test net output #1: loss = 0.421013 (* 1 = 0.421013 loss)\nI0819 11:55:21.413036 20842 solver.cpp:228] Iteration 35800, loss = 0.124913\nI0819 11:55:21.413074 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:55:21.413091 20842 solver.cpp:244]     Train net output #1: loss = 0.124913 (* 1 = 0.124913 loss)\nI0819 11:55:21.502169 20842 sgd_solver.cpp:166] Iteration 35800, lr = 0.895\nI0819 11:57:38.263772 20842 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0819 11:59:01.650926 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8878\nI0819 11:59:01.651257 20842 solver.cpp:404]     Test net output #1: loss = 0.413053 (* 1 = 0.413053 loss)\nI0819 11:59:02.967022 20842 solver.cpp:228] Iteration 35900, loss = 0.17297\nI0819 11:59:02.967064 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 11:59:02.967090 20842 solver.cpp:244]     Train net output #1: loss = 0.17297 (* 1 = 0.17297 loss)\nI0819 11:59:03.058202 20842 sgd_solver.cpp:166] Iteration 35900, lr = 0.8975\nI0819 12:01:19.407910 20842 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0819 12:02:42.792436 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88812\nI0819 12:02:42.792742 20842 solver.cpp:404]     Test net output #1: loss = 0.412927 (* 1 = 0.412927 loss)\nI0819 12:02:44.109149 20842 solver.cpp:228] Iteration 36000, loss = 0.112061\nI0819 12:02:44.109194 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:02:44.109218 20842 solver.cpp:244]     Train net output #1: loss = 0.112061 (* 1 = 0.112061 loss)\nI0819 12:02:44.197414 20842 sgd_solver.cpp:166] Iteration 36000, lr = 0.9\nI0819 12:05:00.526310 20842 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0819 12:06:23.894188 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88472\nI0819 12:06:23.894466 20842 solver.cpp:404]     Test net output #1: loss = 0.433644 (* 1 = 0.433644 loss)\nI0819 12:06:25.210372 20842 solver.cpp:228] Iteration 36100, loss = 0.0680856\nI0819 12:06:25.210414 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:06:25.210431 20842 solver.cpp:244]     Train net output #1: loss = 0.0680855 (* 1 = 0.0680855 loss)\nI0819 12:06:25.295660 20842 sgd_solver.cpp:166] Iteration 36100, lr = 0.9025\nI0819 12:08:42.085186 20842 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0819 12:10:05.450294 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88692\nI0819 12:10:05.450567 20842 solver.cpp:404]     Test net output #1: loss = 0.419003 (* 1 = 0.419003 loss)\nI0819 12:10:06.766235 20842 solver.cpp:228] Iteration 36200, loss = 0.111747\nI0819 12:10:06.766278 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:10:06.766294 20842 solver.cpp:244]     Train net output #1: loss = 0.111747 (* 1 = 0.111747 loss)\nI0819 12:10:06.852537 20842 sgd_solver.cpp:166] Iteration 36200, lr = 0.905\nI0819 12:12:23.628057 20842 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0819 12:13:47.002135 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0819 12:13:47.002393 20842 solver.cpp:404]     Test net output #1: loss = 0.459624 (* 1 = 0.459624 loss)\nI0819 12:13:48.318548 20842 solver.cpp:228] Iteration 36300, loss = 0.100474\nI0819 12:13:48.318591 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 12:13:48.318608 20842 solver.cpp:244]     Train net output #1: loss = 0.100474 (* 1 = 0.100474 loss)\nI0819 12:13:48.407405 20842 sgd_solver.cpp:166] Iteration 36300, lr = 0.9075\nI0819 12:16:05.277271 20842 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0819 12:17:28.645364 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88104\nI0819 12:17:28.645615 20842 solver.cpp:404]     Test net output #1: loss = 0.436552 (* 1 = 0.436552 loss)\nI0819 12:17:29.961369 20842 solver.cpp:228] Iteration 36400, loss = 0.101756\nI0819 12:17:29.961410 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:17:29.961427 20842 solver.cpp:244]     Train net output #1: loss = 0.101756 (* 1 = 0.101756 loss)\nI0819 12:17:30.048198 20842 sgd_solver.cpp:166] Iteration 36400, lr = 0.91\nI0819 12:19:46.347854 20842 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0819 12:21:09.721352 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8896\nI0819 12:21:09.721691 20842 solver.cpp:404]     Test net output #1: loss = 0.429558 (* 1 = 0.429558 loss)\nI0819 12:21:11.037740 20842 solver.cpp:228] Iteration 36500, loss = 0.0480628\nI0819 12:21:11.037782 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 12:21:11.037799 20842 solver.cpp:244]     Train net output #1: loss = 0.0480628 (* 1 = 0.0480628 loss)\nI0819 12:21:11.132109 20842 sgd_solver.cpp:166] Iteration 36500, lr = 0.9125\nI0819 12:23:27.884632 20842 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0819 12:24:51.255245 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8732\nI0819 12:24:51.255532 20842 solver.cpp:404]     Test net output #1: loss = 0.459361 (* 1 = 0.459361 loss)\nI0819 12:24:52.571672 20842 solver.cpp:228] Iteration 36600, loss = 0.0884288\nI0819 12:24:52.571718 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:24:52.571735 20842 solver.cpp:244]     Train net output #1: loss = 0.0884288 (* 1 = 0.0884288 loss)\nI0819 12:24:52.664167 20842 sgd_solver.cpp:166] Iteration 36600, lr = 0.915\nI0819 12:27:09.021661 20842 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0819 12:28:32.394536 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87944\nI0819 12:28:32.394822 20842 solver.cpp:404]     Test net output #1: loss = 0.446472 (* 1 = 0.446472 loss)\nI0819 12:28:33.711554 20842 solver.cpp:228] Iteration 36700, loss = 0.0589343\nI0819 12:28:33.711596 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:28:33.711612 20842 solver.cpp:244]     Train net output #1: loss = 0.0589342 (* 1 = 0.0589342 loss)\nI0819 12:28:33.799692 20842 sgd_solver.cpp:166] Iteration 36700, lr = 0.9175\nI0819 12:30:50.476562 20842 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0819 12:32:13.852665 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87892\nI0819 12:32:13.852960 20842 solver.cpp:404]     Test net output #1: loss = 0.444561 (* 1 = 0.444561 loss)\nI0819 12:32:15.168843 20842 solver.cpp:228] Iteration 36800, loss = 0.112768\nI0819 12:32:15.168884 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 12:32:15.168901 20842 solver.cpp:244]     Train net output #1: loss = 0.112768 (* 1 = 0.112768 loss)\nI0819 12:32:15.252488 20842 sgd_solver.cpp:166] Iteration 36800, lr = 0.92\nI0819 12:34:31.966495 20842 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0819 12:35:55.339609 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0819 12:35:55.339946 20842 solver.cpp:404]     Test net output #1: loss = 0.413061 (* 1 = 0.413061 loss)\nI0819 12:35:56.656208 20842 solver.cpp:228] Iteration 36900, loss = 0.0725808\nI0819 12:35:56.656251 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 12:35:56.656268 20842 solver.cpp:244]     Train net output #1: loss = 0.0725808 (* 1 = 0.0725808 loss)\nI0819 12:35:56.747999 20842 sgd_solver.cpp:166] Iteration 36900, lr = 0.9225\nI0819 12:38:13.520622 20842 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0819 12:39:36.890091 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88792\nI0819 12:39:36.890360 20842 solver.cpp:404]     Test net output #1: loss = 0.416093 (* 1 = 0.416093 loss)\nI0819 12:39:38.207041 20842 solver.cpp:228] Iteration 37000, loss = 0.0639166\nI0819 12:39:38.207084 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:39:38.207100 20842 solver.cpp:244]     Train net output #1: loss = 0.0639166 (* 1 = 0.0639166 loss)\nI0819 12:39:38.288540 20842 sgd_solver.cpp:166] Iteration 37000, lr = 0.925\nI0819 12:41:55.109756 20842 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0819 12:43:18.481699 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88028\nI0819 12:43:18.482004 20842 solver.cpp:404]     Test net output #1: loss = 0.444679 (* 1 = 0.444679 loss)\nI0819 12:43:19.798715 20842 solver.cpp:228] Iteration 37100, loss = 0.145401\nI0819 12:43:19.798758 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 12:43:19.798775 20842 solver.cpp:244]     Train net output #1: loss = 0.145401 (* 1 = 0.145401 loss)\nI0819 12:43:19.879307 20842 sgd_solver.cpp:166] Iteration 37100, lr = 0.9275\nI0819 12:45:36.630776 20842 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0819 12:47:00.002033 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8822\nI0819 12:47:00.002326 20842 solver.cpp:404]     Test net output #1: loss = 0.410422 (* 1 = 0.410422 loss)\nI0819 12:47:01.319021 20842 solver.cpp:228] Iteration 37200, loss = 0.129085\nI0819 12:47:01.319066 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 12:47:01.319082 20842 solver.cpp:244]     Train net output #1: loss = 0.129085 (* 1 = 0.129085 loss)\nI0819 12:47:01.404109 20842 sgd_solver.cpp:166] Iteration 37200, lr = 0.93\nI0819 12:49:18.151270 20842 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0819 12:50:41.527381 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88748\nI0819 12:50:41.527673 20842 solver.cpp:404]     Test net output #1: loss = 0.436259 (* 1 = 0.436259 loss)\nI0819 12:50:42.843879 20842 solver.cpp:228] Iteration 37300, loss = 0.0939594\nI0819 12:50:42.843922 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 12:50:42.843940 20842 solver.cpp:244]     Train net output #1: loss = 0.0939594 (* 1 = 0.0939594 loss)\nI0819 12:50:42.934500 20842 sgd_solver.cpp:166] Iteration 37300, lr = 0.9325\nI0819 12:52:59.669353 20842 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0819 12:54:23.045637 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87576\nI0819 12:54:23.045907 20842 solver.cpp:404]     Test net output #1: loss = 0.46354 (* 1 = 0.46354 loss)\nI0819 12:54:24.357676 20842 solver.cpp:228] Iteration 37400, loss = 0.10415\nI0819 12:54:24.357722 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:54:24.357738 20842 solver.cpp:244]     Train net output #1: loss = 0.10415 (* 1 = 0.10415 loss)\nI0819 12:54:24.447654 20842 sgd_solver.cpp:166] Iteration 37400, lr = 0.935\nI0819 12:56:41.451082 20842 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0819 12:58:04.828388 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8878\nI0819 12:58:04.828650 20842 solver.cpp:404]     Test net output #1: loss = 0.42271 (* 1 = 0.42271 loss)\nI0819 12:58:06.141451 20842 solver.cpp:228] Iteration 37500, loss = 0.113544\nI0819 12:58:06.141502 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 12:58:06.141520 20842 solver.cpp:244]     Train net output #1: loss = 0.113544 (* 1 = 0.113544 loss)\nI0819 12:58:06.226140 20842 sgd_solver.cpp:166] Iteration 37500, lr = 0.9375\nI0819 13:00:23.264813 20842 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0819 13:01:46.636394 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88816\nI0819 13:01:46.636663 20842 solver.cpp:404]     Test net output #1: loss = 0.41823 (* 1 = 0.41823 loss)\nI0819 13:01:47.948470 20842 solver.cpp:228] Iteration 37600, loss = 0.149742\nI0819 13:01:47.948516 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 13:01:47.948534 20842 solver.cpp:244]     Train net output #1: loss = 0.149742 (* 1 = 0.149742 loss)\nI0819 13:01:48.042963 20842 sgd_solver.cpp:166] Iteration 37600, lr = 0.94\nI0819 13:04:05.044944 20842 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0819 13:05:28.427467 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8854\nI0819 13:05:28.427745 20842 solver.cpp:404]     Test net output #1: loss = 0.427509 (* 1 = 0.427509 loss)\nI0819 13:05:29.739864 20842 solver.cpp:228] Iteration 37700, loss = 0.0601252\nI0819 13:05:29.739908 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 13:05:29.739933 20842 solver.cpp:244]     Train net output #1: loss = 0.0601252 (* 1 = 0.0601252 loss)\nI0819 13:05:29.831473 20842 sgd_solver.cpp:166] Iteration 37700, lr = 0.9425\nI0819 13:07:46.825569 20842 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0819 13:09:10.195413 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88352\nI0819 13:09:10.195683 20842 solver.cpp:404]     Test net output #1: loss = 0.426993 (* 1 = 0.426993 loss)\nI0819 13:09:11.508355 20842 solver.cpp:228] Iteration 37800, loss = 0.130959\nI0819 13:09:11.508399 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 13:09:11.508414 20842 solver.cpp:244]     Train net output #1: loss = 0.130959 (* 1 = 0.130959 loss)\nI0819 13:09:11.597514 20842 sgd_solver.cpp:166] Iteration 37800, lr = 0.945\nI0819 13:11:28.367249 20842 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0819 13:12:51.739107 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88888\nI0819 13:12:51.739400 20842 solver.cpp:404]     Test net output #1: loss = 0.41239 (* 1 = 0.41239 loss)\nI0819 13:12:53.051645 20842 solver.cpp:228] Iteration 37900, loss = 0.102041\nI0819 13:12:53.051687 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:12:53.051703 20842 solver.cpp:244]     Train net output #1: loss = 0.102041 (* 1 = 0.102041 loss)\nI0819 13:12:53.142458 20842 sgd_solver.cpp:166] Iteration 37900, lr = 0.9475\nI0819 13:15:10.125650 20842 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0819 13:16:33.499613 20842 solver.cpp:404]     Test net output #0: accuracy = 0.882241\nI0819 13:16:33.499907 20842 solver.cpp:404]     Test net output #1: loss = 0.435551 (* 1 = 0.435551 loss)\nI0819 13:16:34.811926 20842 solver.cpp:228] Iteration 38000, loss = 0.0467814\nI0819 13:16:34.811969 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 13:16:34.811985 20842 solver.cpp:244]     Train net output #1: loss = 0.0467814 (* 1 = 0.0467814 loss)\nI0819 13:16:34.903764 20842 sgd_solver.cpp:166] Iteration 38000, lr = 0.95\nI0819 13:18:51.959360 20842 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0819 13:20:15.327992 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88952\nI0819 13:20:15.328282 20842 solver.cpp:404]     Test net output #1: loss = 0.408724 (* 1 = 0.408724 loss)\nI0819 13:20:16.640041 20842 solver.cpp:228] Iteration 38100, loss = 0.0784438\nI0819 13:20:16.640085 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 13:20:16.640101 20842 solver.cpp:244]     Train net output #1: loss = 0.0784438 (* 1 = 0.0784438 loss)\nI0819 13:20:16.730672 20842 sgd_solver.cpp:166] Iteration 38100, lr = 0.9525\nI0819 13:22:33.664628 20842 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0819 13:23:57.034801 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0819 13:23:57.035058 20842 solver.cpp:404]     Test net output #1: loss = 0.42187 (* 1 = 0.42187 loss)\nI0819 13:23:58.347048 20842 solver.cpp:228] Iteration 38200, loss = 0.13139\nI0819 13:23:58.347090 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 13:23:58.347106 20842 solver.cpp:244]     Train net output #1: loss = 0.13139 (* 1 = 0.13139 loss)\nI0819 13:23:58.441018 20842 sgd_solver.cpp:166] Iteration 38200, lr = 0.955\nI0819 13:26:15.472398 20842 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0819 13:27:38.747316 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88276\nI0819 13:27:38.747584 20842 solver.cpp:404]     Test net output #1: loss = 0.423533 (* 1 = 0.423533 loss)\nI0819 13:27:40.060226 20842 solver.cpp:228] Iteration 38300, loss = 0.119621\nI0819 13:27:40.060268 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:27:40.060286 20842 solver.cpp:244]     Train net output #1: loss = 0.119621 (* 1 = 0.119621 loss)\nI0819 13:27:40.154690 20842 sgd_solver.cpp:166] Iteration 38300, lr = 0.9575\nI0819 13:29:57.224880 20842 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0819 13:31:20.500674 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0819 13:31:20.500973 20842 solver.cpp:404]     Test net output #1: loss = 0.44109 (* 1 = 0.44109 loss)\nI0819 13:31:21.813246 20842 solver.cpp:228] Iteration 38400, loss = 0.13045\nI0819 13:31:21.813288 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 13:31:21.813305 20842 solver.cpp:244]     Train net output #1: loss = 0.13045 (* 1 = 0.13045 loss)\nI0819 13:31:21.906651 20842 sgd_solver.cpp:166] Iteration 38400, lr = 0.96\nI0819 13:33:39.066330 20842 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0819 13:35:03.465565 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88816\nI0819 13:35:03.465919 20842 solver.cpp:404]     Test net output #1: loss = 0.401352 (* 1 = 0.401352 loss)\nI0819 13:35:04.782598 20842 solver.cpp:228] Iteration 38500, loss = 0.101795\nI0819 13:35:04.782655 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 13:35:04.782671 20842 solver.cpp:244]     Train net output #1: loss = 0.101795 (* 1 = 0.101795 loss)\nI0819 13:35:04.870919 20842 sgd_solver.cpp:166] Iteration 38500, lr = 0.9625\nI0819 13:37:22.099148 20842 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0819 13:38:46.495937 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8878\nI0819 13:38:46.496299 20842 solver.cpp:404]     Test net output #1: loss = 0.42218 (* 1 = 0.42218 loss)\nI0819 13:38:47.813032 20842 solver.cpp:228] Iteration 38600, loss = 0.0546297\nI0819 13:38:47.813091 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 13:38:47.813110 20842 solver.cpp:244]     Train net output #1: loss = 0.0546298 (* 1 = 0.0546298 loss)\nI0819 13:38:47.894057 20842 sgd_solver.cpp:166] Iteration 38600, lr = 0.965\nI0819 13:41:05.096396 20842 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0819 13:42:29.474959 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88704\nI0819 13:42:29.475301 20842 solver.cpp:404]     Test net output #1: loss = 0.399969 (* 1 = 0.399969 loss)\nI0819 13:42:30.790839 20842 solver.cpp:228] Iteration 38700, loss = 0.0694612\nI0819 13:42:30.790899 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 13:42:30.790915 20842 solver.cpp:244]     Train net output #1: loss = 0.0694612 (* 1 = 0.0694612 loss)\nI0819 13:42:30.879853 20842 sgd_solver.cpp:166] Iteration 38700, lr = 0.9675\nI0819 13:44:47.775600 20842 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0819 13:46:12.178411 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87784\nI0819 13:46:12.178768 20842 solver.cpp:404]     Test net output #1: loss = 0.4783 (* 1 = 0.4783 loss)\nI0819 13:46:13.494987 20842 solver.cpp:228] Iteration 38800, loss = 0.168377\nI0819 13:46:13.495044 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 13:46:13.495062 20842 solver.cpp:244]     Train net output #1: loss = 0.168377 (* 1 = 0.168377 loss)\nI0819 13:46:13.576225 20842 sgd_solver.cpp:166] Iteration 38800, lr = 0.97\nI0819 13:48:30.803907 20842 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0819 13:49:55.189826 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88628\nI0819 13:49:55.190157 20842 solver.cpp:404]     Test net output #1: loss = 0.434984 (* 1 = 0.434984 loss)\nI0819 13:49:56.506500 20842 solver.cpp:228] Iteration 38900, loss = 0.0971071\nI0819 13:49:56.506557 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 13:49:56.506575 20842 solver.cpp:244]     Train net output #1: loss = 0.0971071 (* 1 = 0.0971071 loss)\nI0819 13:49:56.591423 20842 sgd_solver.cpp:166] Iteration 38900, lr = 0.9725\nI0819 13:52:13.441208 20842 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0819 13:53:37.832479 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0819 13:53:37.832844 20842 solver.cpp:404]     Test net output #1: loss = 0.4406 (* 1 = 0.4406 loss)\nI0819 13:53:39.149860 20842 solver.cpp:228] Iteration 39000, loss = 0.0985099\nI0819 13:53:39.149914 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 13:53:39.149930 20842 solver.cpp:244]     Train net output #1: loss = 0.09851 (* 1 = 0.09851 loss)\nI0819 13:53:39.231462 20842 sgd_solver.cpp:166] Iteration 39000, lr = 0.975\nI0819 13:55:56.476204 20842 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0819 13:57:20.868834 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88612\nI0819 13:57:20.869187 20842 solver.cpp:404]     Test net output #1: loss = 0.402724 (* 1 = 0.402724 loss)\nI0819 13:57:22.185020 20842 solver.cpp:228] Iteration 39100, loss = 0.0806194\nI0819 13:57:22.185076 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 13:57:22.185096 20842 solver.cpp:244]     Train net output #1: loss = 0.0806194 (* 1 = 0.0806194 loss)\nI0819 13:57:22.271486 20842 sgd_solver.cpp:166] Iteration 39100, lr = 0.9775\nI0819 13:59:39.526020 20842 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0819 14:01:03.916394 20842 solver.cpp:404]     Test net output #0: accuracy = 0.892601\nI0819 14:01:03.916710 20842 solver.cpp:404]     Test net output #1: loss = 0.397754 (* 1 = 0.397754 loss)\nI0819 14:01:05.232165 20842 solver.cpp:228] Iteration 39200, loss = 0.0238205\nI0819 14:01:05.232216 20842 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 14:01:05.232234 20842 solver.cpp:244]     Train net output #1: loss = 0.0238205 (* 1 = 0.0238205 loss)\nI0819 14:01:05.323112 20842 sgd_solver.cpp:166] Iteration 39200, lr = 0.98\nI0819 14:03:22.538182 20842 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0819 14:04:46.925856 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88744\nI0819 14:04:46.926184 20842 solver.cpp:404]     Test net output #1: loss = 0.431653 (* 1 = 0.431653 loss)\nI0819 14:04:48.242559 20842 solver.cpp:228] Iteration 39300, loss = 0.119613\nI0819 14:04:48.242624 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:04:48.242642 20842 solver.cpp:244]     Train net output #1: loss = 0.119613 (* 1 = 0.119613 loss)\nI0819 14:04:48.324566 20842 sgd_solver.cpp:166] Iteration 39300, lr = 0.9825\nI0819 14:07:05.336514 20842 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0819 14:08:29.716094 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8812\nI0819 14:08:29.716445 20842 solver.cpp:404]     Test net output #1: loss = 0.442843 (* 1 = 0.442843 loss)\nI0819 14:08:31.032934 20842 solver.cpp:228] Iteration 39400, loss = 0.146268\nI0819 14:08:31.032991 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 14:08:31.033010 20842 solver.cpp:244]     Train net output #1: loss = 0.146268 (* 1 = 0.146268 loss)\nI0819 14:08:31.116804 20842 sgd_solver.cpp:166] Iteration 39400, lr = 0.985\nI0819 14:10:48.194327 20842 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0819 14:12:12.571915 20842 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 14:12:12.572259 20842 solver.cpp:404]     Test net output #1: loss = 0.489201 (* 1 = 0.489201 loss)\nI0819 14:12:13.888787 20842 solver.cpp:228] Iteration 39500, loss = 0.109859\nI0819 14:12:13.888841 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:12:13.888859 20842 solver.cpp:244]     Train net output #1: loss = 0.109859 (* 1 = 0.109859 loss)\nI0819 14:12:13.976049 20842 sgd_solver.cpp:166] Iteration 39500, lr = 0.9875\nI0819 14:14:30.981389 20842 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0819 14:15:55.358422 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87208\nI0819 14:15:55.358783 20842 solver.cpp:404]     Test net output #1: loss = 0.456957 (* 1 = 0.456957 loss)\nI0819 14:15:56.673637 20842 solver.cpp:228] Iteration 39600, loss = 0.124093\nI0819 14:15:56.673696 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:15:56.673713 20842 solver.cpp:244]     Train net output #1: loss = 0.124093 (* 1 = 0.124093 loss)\nI0819 14:15:56.760887 20842 sgd_solver.cpp:166] Iteration 39600, lr = 0.99\nI0819 14:18:13.738718 20842 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0819 14:19:38.113770 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88588\nI0819 14:19:38.114097 20842 solver.cpp:404]     Test net output #1: loss = 0.411499 (* 1 = 0.411499 loss)\nI0819 14:19:39.429502 20842 solver.cpp:228] Iteration 39700, loss = 0.0716032\nI0819 14:19:39.429560 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 14:19:39.429577 20842 solver.cpp:244]     Train net output #1: loss = 0.0716032 (* 1 = 0.0716032 loss)\nI0819 14:19:39.515410 20842 sgd_solver.cpp:166] Iteration 39700, lr = 0.9925\nI0819 14:21:56.728159 20842 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0819 14:23:21.097088 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0819 14:23:21.097435 20842 solver.cpp:404]     Test net output #1: loss = 0.432909 (* 1 = 0.432909 loss)\nI0819 14:23:22.414259 20842 solver.cpp:228] Iteration 39800, loss = 0.0845516\nI0819 14:23:22.414319 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:23:22.414337 20842 solver.cpp:244]     Train net output #1: loss = 0.0845516 (* 1 = 0.0845516 loss)\nI0819 14:23:22.495985 20842 sgd_solver.cpp:166] Iteration 39800, lr = 0.995\nI0819 14:25:39.587512 20842 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0819 14:27:03.965847 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88288\nI0819 14:27:03.966203 20842 solver.cpp:404]     Test net output #1: loss = 0.425412 (* 1 = 0.425412 loss)\nI0819 14:27:05.282518 20842 solver.cpp:228] Iteration 39900, loss = 0.125916\nI0819 14:27:05.282577 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 14:27:05.282595 20842 solver.cpp:244]     Train net output #1: loss = 0.125916 (* 1 = 0.125916 loss)\nI0819 14:27:05.369994 20842 sgd_solver.cpp:166] Iteration 39900, lr = 0.9975\nI0819 14:29:22.597234 20842 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0819 14:30:46.971379 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88304\nI0819 14:30:46.971729 20842 solver.cpp:404]     Test net output #1: loss = 0.419498 (* 1 = 0.419498 loss)\nI0819 14:30:48.288288 20842 solver.cpp:228] Iteration 40000, loss = 0.146569\nI0819 14:30:48.288345 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 14:30:48.288363 20842 solver.cpp:244]     Train net output #1: loss = 0.146569 (* 1 = 0.146569 loss)\nI0819 14:30:48.374408 20842 sgd_solver.cpp:166] Iteration 40000, lr = 1\nI0819 14:33:05.613479 20842 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0819 14:34:29.988184 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8878\nI0819 14:34:29.988534 20842 solver.cpp:404]     Test net output #1: loss = 0.407717 (* 1 = 0.407717 loss)\nI0819 14:34:31.305330 20842 solver.cpp:228] Iteration 40100, loss = 0.0643258\nI0819 14:34:31.305383 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:34:31.305399 20842 solver.cpp:244]     Train net output #1: loss = 0.0643258 (* 1 = 0.0643258 loss)\nI0819 14:34:31.393482 20842 sgd_solver.cpp:166] Iteration 40100, lr = 1.0025\nI0819 14:36:48.256757 20842 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0819 14:38:12.636855 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0819 14:38:12.637212 20842 solver.cpp:404]     Test net output #1: loss = 0.408452 (* 1 = 0.408452 loss)\nI0819 14:38:13.953663 20842 solver.cpp:228] Iteration 40200, loss = 0.0689979\nI0819 14:38:13.953723 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 14:38:13.953742 20842 solver.cpp:244]     Train net output #1: loss = 0.0689979 (* 1 = 0.0689979 loss)\nI0819 14:38:14.038564 20842 sgd_solver.cpp:166] Iteration 40200, lr = 1.005\nI0819 14:40:31.278520 20842 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0819 14:41:55.643605 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88804\nI0819 14:41:55.643971 20842 solver.cpp:404]     Test net output #1: loss = 0.408091 (* 1 = 0.408091 loss)\nI0819 14:41:56.960579 20842 solver.cpp:228] Iteration 40300, loss = 0.107332\nI0819 14:41:56.960640 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:41:56.960659 20842 solver.cpp:244]     Train net output #1: loss = 0.107332 (* 1 = 0.107332 loss)\nI0819 14:41:57.047924 20842 sgd_solver.cpp:166] Iteration 40300, lr = 1.0075\nI0819 14:44:14.301714 20842 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0819 14:45:38.675745 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88492\nI0819 14:45:38.676080 20842 solver.cpp:404]     Test net output #1: loss = 0.423149 (* 1 = 0.423149 loss)\nI0819 14:45:39.992631 20842 solver.cpp:228] Iteration 40400, loss = 0.114816\nI0819 14:45:39.992687 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:45:39.992705 20842 solver.cpp:244]     Train net output #1: loss = 0.114816 (* 1 = 0.114816 loss)\nI0819 14:45:40.077780 20842 sgd_solver.cpp:166] Iteration 40400, lr = 1.01\nI0819 14:47:57.053623 20842 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0819 14:49:21.422813 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 14:49:21.423171 20842 solver.cpp:404]     Test net output #1: loss = 0.443852 (* 1 = 0.443852 loss)\nI0819 14:49:22.738947 20842 solver.cpp:228] Iteration 40500, loss = 0.115442\nI0819 14:49:22.739006 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:49:22.739024 20842 solver.cpp:244]     Train net output #1: loss = 0.115442 (* 1 = 0.115442 loss)\nI0819 14:49:22.826642 20842 sgd_solver.cpp:166] Iteration 40500, lr = 1.0125\nI0819 14:51:40.010591 20842 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0819 14:53:04.354327 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88428\nI0819 14:53:04.354662 20842 solver.cpp:404]     Test net output #1: loss = 0.437853 (* 1 = 0.437853 loss)\nI0819 14:53:05.670084 20842 solver.cpp:228] Iteration 40600, loss = 0.0392997\nI0819 14:53:05.670136 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:53:05.670152 20842 solver.cpp:244]     Train net output #1: loss = 0.0392998 (* 1 = 0.0392998 loss)\nI0819 14:53:05.760303 20842 sgd_solver.cpp:166] Iteration 40600, lr = 1.015\nI0819 14:55:22.742974 20842 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0819 14:56:47.097724 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88468\nI0819 14:56:47.097987 20842 solver.cpp:404]     Test net output #1: loss = 0.429023 (* 1 = 0.429023 loss)\nI0819 14:56:48.414049 20842 solver.cpp:228] Iteration 40700, loss = 0.072652\nI0819 14:56:48.414091 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 14:56:48.414108 20842 solver.cpp:244]     Train net output #1: loss = 0.072652 (* 1 = 0.072652 loss)\nI0819 14:56:48.503808 20842 sgd_solver.cpp:166] Iteration 40700, lr = 1.0175\nI0819 14:59:05.495667 20842 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0819 15:00:29.793417 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0819 15:00:29.793695 20842 solver.cpp:404]     Test net output #1: loss = 0.443908 (* 1 = 0.443908 loss)\nI0819 15:00:31.109042 20842 solver.cpp:228] Iteration 40800, loss = 0.117943\nI0819 15:00:31.109100 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 15:00:31.109118 20842 solver.cpp:244]     Train net output #1: loss = 0.117943 (* 1 = 0.117943 loss)\nI0819 15:00:31.191792 20842 sgd_solver.cpp:166] Iteration 40800, lr = 1.02\nI0819 15:02:48.233307 20842 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0819 15:04:12.537953 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88204\nI0819 15:04:12.538208 20842 solver.cpp:404]     Test net output #1: loss = 0.448928 (* 1 = 0.448928 loss)\nI0819 15:04:13.853771 20842 solver.cpp:228] Iteration 40900, loss = 0.0694757\nI0819 15:04:13.853814 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:04:13.853830 20842 solver.cpp:244]     Train net output #1: loss = 0.0694757 (* 1 = 0.0694757 loss)\nI0819 15:04:13.936322 20842 sgd_solver.cpp:166] Iteration 40900, lr = 1.0225\nI0819 15:06:30.940565 20842 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0819 15:07:55.304373 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8828\nI0819 15:07:55.304652 20842 solver.cpp:404]     Test net output #1: loss = 0.4414 (* 1 = 0.4414 loss)\nI0819 15:07:56.621281 20842 solver.cpp:228] Iteration 41000, loss = 0.166686\nI0819 15:07:56.621323 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 15:07:56.621340 20842 solver.cpp:244]     Train net output #1: loss = 0.166686 (* 1 = 0.166686 loss)\nI0819 15:07:56.703313 20842 sgd_solver.cpp:166] Iteration 41000, lr = 1.025\nI0819 15:10:13.249845 20842 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0819 15:11:37.620486 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88556\nI0819 15:11:37.620803 20842 solver.cpp:404]     Test net output #1: loss = 0.428297 (* 1 = 0.428297 loss)\nI0819 15:11:38.936444 20842 solver.cpp:228] Iteration 41100, loss = 0.172904\nI0819 15:11:38.936502 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 15:11:38.936520 20842 solver.cpp:244]     Train net output #1: loss = 0.172904 (* 1 = 0.172904 loss)\nI0819 15:11:39.027930 20842 sgd_solver.cpp:166] Iteration 41100, lr = 1.0275\nI0819 15:13:56.049121 20842 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0819 15:15:19.992800 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88592\nI0819 15:15:19.993054 20842 solver.cpp:404]     Test net output #1: loss = 0.4234 (* 1 = 0.4234 loss)\nI0819 15:15:21.310297 20842 solver.cpp:228] Iteration 41200, loss = 0.161436\nI0819 15:15:21.310338 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 15:15:21.310353 20842 solver.cpp:244]     Train net output #1: loss = 0.161436 (* 1 = 0.161436 loss)\nI0819 15:15:21.390352 20842 sgd_solver.cpp:166] Iteration 41200, lr = 1.03\nI0819 15:17:38.485114 20842 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0819 15:19:02.477030 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0819 15:19:02.477279 20842 solver.cpp:404]     Test net output #1: loss = 0.416657 (* 1 = 0.416657 loss)\nI0819 15:19:03.794359 20842 solver.cpp:228] Iteration 41300, loss = 0.114193\nI0819 15:19:03.794400 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:19:03.794415 20842 solver.cpp:244]     Train net output #1: loss = 0.114193 (* 1 = 0.114193 loss)\nI0819 15:19:03.884737 20842 sgd_solver.cpp:166] Iteration 41300, lr = 1.0325\nI0819 15:21:20.400521 20842 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0819 15:22:44.182147 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89068\nI0819 15:22:44.182433 20842 solver.cpp:404]     Test net output #1: loss = 0.421546 (* 1 = 0.421546 loss)\nI0819 15:22:45.499680 20842 solver.cpp:228] Iteration 41400, loss = 0.0954982\nI0819 15:22:45.499721 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:22:45.499737 20842 solver.cpp:244]     Train net output #1: loss = 0.0954982 (* 1 = 0.0954982 loss)\nI0819 15:22:45.582331 20842 sgd_solver.cpp:166] Iteration 41400, lr = 1.035\nI0819 15:25:02.634970 20842 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0819 15:26:26.539960 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88808\nI0819 15:26:26.540195 20842 solver.cpp:404]     Test net output #1: loss = 0.406167 (* 1 = 0.406167 loss)\nI0819 15:26:27.857306 20842 solver.cpp:228] Iteration 41500, loss = 0.166578\nI0819 15:26:27.857347 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 15:26:27.857363 20842 solver.cpp:244]     Train net output #1: loss = 0.166578 (* 1 = 0.166578 loss)\nI0819 15:26:27.943773 20842 sgd_solver.cpp:166] Iteration 41500, lr = 1.0375\nI0819 15:28:44.503888 20842 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0819 15:30:08.295929 20842 solver.cpp:404]     Test net output #0: accuracy = 0.883321\nI0819 15:30:08.296180 20842 solver.cpp:404]     Test net output #1: loss = 0.446313 (* 1 = 0.446313 loss)\nI0819 15:30:09.612512 20842 solver.cpp:228] Iteration 41600, loss = 0.082277\nI0819 15:30:09.612570 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:30:09.612596 20842 solver.cpp:244]     Train net output #1: loss = 0.0822769 (* 1 = 0.0822769 loss)\nI0819 15:30:09.696410 20842 sgd_solver.cpp:166] Iteration 41600, lr = 1.04\nI0819 15:32:26.667532 20842 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0819 15:33:50.485781 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88292\nI0819 15:33:50.486057 20842 solver.cpp:404]     Test net output #1: loss = 0.42151 (* 1 = 0.42151 loss)\nI0819 15:33:51.802692 20842 solver.cpp:228] Iteration 41700, loss = 0.14008\nI0819 15:33:51.802733 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:33:51.802749 20842 solver.cpp:244]     Train net output #1: loss = 0.14008 (* 1 = 0.14008 loss)\nI0819 15:33:51.892065 20842 sgd_solver.cpp:166] Iteration 41700, lr = 1.0425\nI0819 15:36:08.450507 20842 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0819 15:37:32.323873 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8856\nI0819 15:37:32.324126 20842 solver.cpp:404]     Test net output #1: loss = 0.420955 (* 1 = 0.420955 loss)\nI0819 15:37:33.641052 20842 solver.cpp:228] Iteration 41800, loss = 0.0815207\nI0819 15:37:33.641109 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 15:37:33.641127 20842 solver.cpp:244]     Train net output #1: loss = 0.0815206 (* 1 = 0.0815206 loss)\nI0819 15:37:33.726439 20842 sgd_solver.cpp:166] Iteration 41800, lr = 1.045\nI0819 15:39:50.748473 20842 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0819 15:41:14.700454 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88428\nI0819 15:41:14.700749 20842 solver.cpp:404]     Test net output #1: loss = 0.421533 (* 1 = 0.421533 loss)\nI0819 15:41:16.017690 20842 solver.cpp:228] Iteration 41900, loss = 0.179505\nI0819 15:41:16.017741 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 15:41:16.017758 20842 solver.cpp:244]     Train net output #1: loss = 0.179505 (* 1 = 0.179505 loss)\nI0819 15:41:16.105684 20842 sgd_solver.cpp:166] Iteration 41900, lr = 1.0475\nI0819 15:43:33.022956 20842 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0819 15:44:56.901716 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88552\nI0819 15:44:56.901989 20842 solver.cpp:404]     Test net output #1: loss = 0.420926 (* 1 = 0.420926 loss)\nI0819 15:44:58.218075 20842 solver.cpp:228] Iteration 42000, loss = 0.0830381\nI0819 15:44:58.218132 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:44:58.218149 20842 solver.cpp:244]     Train net output #1: loss = 0.0830379 (* 1 = 0.0830379 loss)\nI0819 15:44:58.304191 20842 sgd_solver.cpp:166] Iteration 42000, lr = 1.05\nI0819 15:47:15.550253 20842 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0819 15:48:39.445684 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88328\nI0819 15:48:39.445936 20842 solver.cpp:404]     Test net output #1: loss = 0.422775 (* 1 = 0.422775 loss)\nI0819 15:48:40.762994 20842 solver.cpp:228] Iteration 42100, loss = 0.0975785\nI0819 15:48:40.763056 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:48:40.763073 20842 solver.cpp:244]     Train net output #1: loss = 0.0975783 (* 1 = 0.0975783 loss)\nI0819 15:48:40.851382 20842 sgd_solver.cpp:166] Iteration 42100, lr = 1.0525\nI0819 15:50:58.094035 20842 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0819 15:52:22.304388 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88176\nI0819 15:52:22.304684 20842 solver.cpp:404]     Test net output #1: loss = 0.423596 (* 1 = 0.423596 loss)\nI0819 15:52:23.620539 20842 solver.cpp:228] Iteration 42200, loss = 0.101483\nI0819 15:52:23.620599 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:52:23.620621 20842 solver.cpp:244]     Train net output #1: loss = 0.101483 (* 1 = 0.101483 loss)\nI0819 15:52:23.702863 20842 sgd_solver.cpp:166] Iteration 42200, lr = 1.055\nI0819 15:54:40.576478 20842 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0819 15:56:04.880035 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88136\nI0819 15:56:04.880306 20842 solver.cpp:404]     Test net output #1: loss = 0.42581 (* 1 = 0.42581 loss)\nI0819 15:56:06.195991 20842 solver.cpp:228] Iteration 42300, loss = 0.0906194\nI0819 15:56:06.196053 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:56:06.196071 20842 solver.cpp:244]     Train net output #1: loss = 0.0906193 (* 1 = 0.0906193 loss)\nI0819 15:56:06.278511 20842 sgd_solver.cpp:166] Iteration 42300, lr = 1.0575\nI0819 15:58:23.545550 20842 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0819 15:59:47.887323 20842 solver.cpp:404]     Test net output #0: accuracy = 0.877081\nI0819 15:59:47.887574 20842 solver.cpp:404]     Test net output #1: loss = 0.436935 (* 1 = 0.436935 loss)\nI0819 15:59:49.203676 20842 solver.cpp:228] Iteration 42400, loss = 0.0941524\nI0819 15:59:49.203729 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:59:49.203747 20842 solver.cpp:244]     Train net output #1: loss = 0.0941522 (* 1 = 0.0941522 loss)\nI0819 15:59:49.292186 20842 sgd_solver.cpp:166] Iteration 42400, lr = 1.06\nI0819 16:02:06.347208 20842 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0819 16:03:30.585422 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0819 16:03:30.585741 20842 solver.cpp:404]     Test net output #1: loss = 0.438871 (* 1 = 0.438871 loss)\nI0819 16:03:31.902520 20842 solver.cpp:228] Iteration 42500, loss = 0.127462\nI0819 16:03:31.902565 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 16:03:31.902580 20842 solver.cpp:244]     Train net output #1: loss = 0.127462 (* 1 = 0.127462 loss)\nI0819 16:03:31.990734 20842 sgd_solver.cpp:166] Iteration 42500, lr = 1.0625\nI0819 16:05:48.428817 20842 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0819 16:07:12.236268 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88116\nI0819 16:07:12.236510 20842 solver.cpp:404]     Test net output #1: loss = 0.445103 (* 1 = 0.445103 loss)\nI0819 16:07:13.552765 20842 solver.cpp:228] Iteration 42600, loss = 0.116758\nI0819 16:07:13.552810 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 16:07:13.552827 20842 solver.cpp:244]     Train net output #1: loss = 0.116757 (* 1 = 0.116757 loss)\nI0819 16:07:13.640494 20842 sgd_solver.cpp:166] Iteration 42600, lr = 1.065\nI0819 16:09:30.096056 20842 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0819 16:10:54.281286 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88112\nI0819 16:10:54.281585 20842 solver.cpp:404]     Test net output #1: loss = 0.452329 (* 1 = 0.452329 loss)\nI0819 16:10:55.598372 20842 solver.cpp:228] Iteration 42700, loss = 0.146651\nI0819 16:10:55.598417 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 16:10:55.598433 20842 solver.cpp:244]     Train net output #1: loss = 0.146651 (* 1 = 0.146651 loss)\nI0819 16:10:55.690224 20842 sgd_solver.cpp:166] Iteration 42700, lr = 1.0675\nI0819 16:13:12.210304 20842 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0819 16:14:36.378252 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88996\nI0819 16:14:36.378486 20842 solver.cpp:404]     Test net output #1: loss = 0.402096 (* 1 = 0.402096 loss)\nI0819 16:14:37.694067 20842 solver.cpp:228] Iteration 42800, loss = 0.050999\nI0819 16:14:37.694128 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:14:37.694146 20842 solver.cpp:244]     Train net output #1: loss = 0.0509988 (* 1 = 0.0509988 loss)\nI0819 16:14:37.786448 20842 sgd_solver.cpp:166] Iteration 42800, lr = 1.07\nI0819 16:16:54.750197 20842 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0819 16:18:19.080370 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87628\nI0819 16:18:19.080647 20842 solver.cpp:404]     Test net output #1: loss = 0.454815 (* 1 = 0.454815 loss)\nI0819 16:18:20.397399 20842 solver.cpp:228] Iteration 42900, loss = 0.0718686\nI0819 16:18:20.397444 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:18:20.397460 20842 solver.cpp:244]     Train net output #1: loss = 0.0718684 (* 1 = 0.0718684 loss)\nI0819 16:18:20.479557 20842 sgd_solver.cpp:166] Iteration 42900, lr = 1.0725\nI0819 16:20:37.051017 20842 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0819 16:22:01.396062 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87956\nI0819 16:22:01.396369 20842 solver.cpp:404]     Test net output #1: loss = 0.439711 (* 1 = 0.439711 loss)\nI0819 16:22:02.712579 20842 solver.cpp:228] Iteration 43000, loss = 0.155147\nI0819 16:22:02.712620 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 16:22:02.712637 20842 solver.cpp:244]     Train net output #1: loss = 0.155147 (* 1 = 0.155147 loss)\nI0819 16:22:02.801148 20842 sgd_solver.cpp:166] Iteration 43000, lr = 1.075\nI0819 16:24:19.367108 20842 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0819 16:25:43.706992 20842 solver.cpp:404]     Test net output #0: accuracy = 0.882\nI0819 16:25:43.707247 20842 solver.cpp:404]     Test net output #1: loss = 0.444012 (* 1 = 0.444012 loss)\nI0819 16:25:45.022745 20842 solver.cpp:228] Iteration 43100, loss = 0.0822793\nI0819 16:25:45.022805 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:25:45.022822 20842 solver.cpp:244]     Train net output #1: loss = 0.0822791 (* 1 = 0.0822791 loss)\nI0819 16:25:45.113180 20842 sgd_solver.cpp:166] Iteration 43100, lr = 1.0775\nI0819 16:28:02.074915 20842 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0819 16:29:26.399281 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87688\nI0819 16:29:26.399554 20842 solver.cpp:404]     Test net output #1: loss = 0.469686 (* 1 = 0.469686 loss)\nI0819 16:29:27.715275 20842 solver.cpp:228] Iteration 43200, loss = 0.163117\nI0819 16:29:27.715317 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 16:29:27.715332 20842 solver.cpp:244]     Train net output #1: loss = 0.163117 (* 1 = 0.163117 loss)\nI0819 16:29:27.806363 20842 sgd_solver.cpp:166] Iteration 43200, lr = 1.08\nI0819 16:31:44.320863 20842 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0819 16:33:08.659476 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88752\nI0819 16:33:08.659787 20842 solver.cpp:404]     Test net output #1: loss = 0.394905 (* 1 = 0.394905 loss)\nI0819 16:33:09.975680 20842 solver.cpp:228] Iteration 43300, loss = 0.0922109\nI0819 16:33:09.975723 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 16:33:09.975739 20842 solver.cpp:244]     Train net output #1: loss = 0.0922107 (* 1 = 0.0922107 loss)\nI0819 16:33:10.066958 20842 sgd_solver.cpp:166] Iteration 43300, lr = 1.0825\nI0819 16:35:27.058612 20842 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0819 16:36:51.401229 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0819 16:36:51.401480 20842 solver.cpp:404]     Test net output #1: loss = 0.45251 (* 1 = 0.45251 loss)\nI0819 16:36:52.717301 20842 solver.cpp:228] Iteration 43400, loss = 0.123031\nI0819 16:36:52.717344 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 16:36:52.717360 20842 solver.cpp:244]     Train net output #1: loss = 0.12303 (* 1 = 0.12303 loss)\nI0819 16:36:52.808820 20842 sgd_solver.cpp:166] Iteration 43400, lr = 1.085\nI0819 16:39:09.358063 20842 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0819 16:40:33.698194 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87636\nI0819 16:40:33.698477 20842 solver.cpp:404]     Test net output #1: loss = 0.446102 (* 1 = 0.446102 loss)\nI0819 16:40:35.014451 20842 solver.cpp:228] Iteration 43500, loss = 0.186254\nI0819 16:40:35.014494 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 16:40:35.014509 20842 solver.cpp:244]     Train net output #1: loss = 0.186254 (* 1 = 0.186254 loss)\nI0819 16:40:35.104967 20842 sgd_solver.cpp:166] Iteration 43500, lr = 1.0875\nI0819 16:42:52.091401 20842 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0819 16:44:16.419939 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88348\nI0819 16:44:16.420244 20842 solver.cpp:404]     Test net output #1: loss = 0.417315 (* 1 = 0.417315 loss)\nI0819 16:44:17.735972 20842 solver.cpp:228] Iteration 43600, loss = 0.0322493\nI0819 16:44:17.736032 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 16:44:17.736048 20842 solver.cpp:244]     Train net output #1: loss = 0.032249 (* 1 = 0.032249 loss)\nI0819 16:44:17.827677 20842 sgd_solver.cpp:166] Iteration 43600, lr = 1.09\nI0819 16:46:34.895043 20842 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0819 16:47:59.222707 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0819 16:47:59.222954 20842 solver.cpp:404]     Test net output #1: loss = 0.428009 (* 1 = 0.428009 loss)\nI0819 16:48:00.538813 20842 solver.cpp:228] Iteration 43700, loss = 0.073363\nI0819 16:48:00.538856 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 16:48:00.538872 20842 solver.cpp:244]     Train net output #1: loss = 0.0733628 (* 1 = 0.0733628 loss)\nI0819 16:48:00.628907 20842 sgd_solver.cpp:166] Iteration 43700, lr = 1.0925\nI0819 16:50:17.701531 20842 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0819 16:51:42.037647 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88876\nI0819 16:51:42.037943 20842 solver.cpp:404]     Test net output #1: loss = 0.405801 (* 1 = 0.405801 loss)\nI0819 16:51:43.353668 20842 solver.cpp:228] Iteration 43800, loss = 0.122101\nI0819 16:51:43.353710 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:51:43.353726 20842 solver.cpp:244]     Train net output #1: loss = 0.1221 (* 1 = 0.1221 loss)\nI0819 16:51:43.438297 20842 sgd_solver.cpp:166] Iteration 43800, lr = 1.095\nI0819 16:54:00.417438 20842 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0819 16:55:24.752938 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87652\nI0819 16:55:24.753219 20842 solver.cpp:404]     Test net output #1: loss = 0.437456 (* 1 = 0.437456 loss)\nI0819 16:55:26.068616 20842 solver.cpp:228] Iteration 43900, loss = 0.0774972\nI0819 16:55:26.068660 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:55:26.068675 20842 solver.cpp:244]     Train net output #1: loss = 0.0774969 (* 1 = 0.0774969 loss)\nI0819 16:55:26.163002 20842 sgd_solver.cpp:166] Iteration 43900, lr = 1.0975\nI0819 16:57:43.109586 20842 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0819 16:59:07.453003 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0819 16:59:07.453238 20842 solver.cpp:404]     Test net output #1: loss = 0.44173 (* 1 = 0.44173 loss)\nI0819 16:59:08.768952 20842 solver.cpp:228] Iteration 44000, loss = 0.151698\nI0819 16:59:08.769011 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 16:59:08.769028 20842 solver.cpp:244]     Train net output #1: loss = 0.151698 (* 1 = 0.151698 loss)\nI0819 16:59:08.860561 20842 sgd_solver.cpp:166] Iteration 44000, lr = 1.1\nI0819 17:01:25.368698 20842 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0819 17:02:49.697736 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88028\nI0819 17:02:49.698046 20842 solver.cpp:404]     Test net output #1: loss = 0.442126 (* 1 = 0.442126 loss)\nI0819 17:02:51.013828 20842 solver.cpp:228] Iteration 44100, loss = 0.168021\nI0819 17:02:51.013872 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 17:02:51.013887 20842 solver.cpp:244]     Train net output #1: loss = 0.16802 (* 1 = 0.16802 loss)\nI0819 17:02:51.098011 20842 sgd_solver.cpp:166] Iteration 44100, lr = 1.1025\nI0819 17:05:08.081708 20842 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0819 17:06:32.424449 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88288\nI0819 17:06:32.424733 20842 solver.cpp:404]     Test net output #1: loss = 0.422997 (* 1 = 0.422997 loss)\nI0819 17:06:33.741637 20842 solver.cpp:228] Iteration 44200, loss = 0.110696\nI0819 17:06:33.741683 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:06:33.741698 20842 solver.cpp:244]     Train net output #1: loss = 0.110696 (* 1 = 0.110696 loss)\nI0819 17:06:33.827095 20842 sgd_solver.cpp:166] Iteration 44200, lr = 1.105\nI0819 17:08:50.801523 20842 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0819 17:10:15.141168 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0819 17:10:15.141465 20842 solver.cpp:404]     Test net output #1: loss = 0.443436 (* 1 = 0.443436 loss)\nI0819 17:10:16.458192 20842 solver.cpp:228] Iteration 44300, loss = 0.122972\nI0819 17:10:16.458250 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:10:16.458267 20842 solver.cpp:244]     Train net output #1: loss = 0.122972 (* 1 = 0.122972 loss)\nI0819 17:10:16.546558 20842 sgd_solver.cpp:166] Iteration 44300, lr = 1.1075\nI0819 17:12:33.549443 20842 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0819 17:13:57.881803 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0819 17:13:57.882113 20842 solver.cpp:404]     Test net output #1: loss = 0.452093 (* 1 = 0.452093 loss)\nI0819 17:13:59.198477 20842 solver.cpp:228] Iteration 44400, loss = 0.0724894\nI0819 17:13:59.198521 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:13:59.198537 20842 solver.cpp:244]     Train net output #1: loss = 0.0724891 (* 1 = 0.0724891 loss)\nI0819 17:13:59.287684 20842 sgd_solver.cpp:166] Iteration 44400, lr = 1.11\nI0819 17:16:15.857762 20842 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0819 17:17:40.200954 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88344\nI0819 17:17:40.201226 20842 solver.cpp:404]     Test net output #1: loss = 0.44296 (* 1 = 0.44296 loss)\nI0819 17:17:41.518066 20842 solver.cpp:228] Iteration 44500, loss = 0.130103\nI0819 17:17:41.518127 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:17:41.518144 20842 solver.cpp:244]     Train net output #1: loss = 0.130103 (* 1 = 0.130103 loss)\nI0819 17:17:41.608682 20842 sgd_solver.cpp:166] Iteration 44500, lr = 1.1125\nI0819 17:19:58.591599 20842 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0819 17:21:22.939357 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88108\nI0819 17:21:22.939635 20842 solver.cpp:404]     Test net output #1: loss = 0.434576 (* 1 = 0.434576 loss)\nI0819 17:21:24.256943 20842 solver.cpp:228] Iteration 44600, loss = 0.0775031\nI0819 17:21:24.256988 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:21:24.257002 20842 solver.cpp:244]     Train net output #1: loss = 0.0775028 (* 1 = 0.0775028 loss)\nI0819 17:21:24.342739 20842 sgd_solver.cpp:166] Iteration 44600, lr = 1.115\nI0819 17:23:40.877173 20842 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0819 17:25:05.238229 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88432\nI0819 17:25:05.238474 20842 solver.cpp:404]     Test net output #1: loss = 0.420714 (* 1 = 0.420714 loss)\nI0819 17:25:06.555369 20842 solver.cpp:228] Iteration 44700, loss = 0.1075\nI0819 17:25:06.555431 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:25:06.555449 20842 solver.cpp:244]     Train net output #1: loss = 0.1075 (* 1 = 0.1075 loss)\nI0819 17:25:06.638255 20842 sgd_solver.cpp:166] Iteration 44700, lr = 1.1175\nI0819 17:27:23.480126 20842 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0819 17:28:47.835189 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88084\nI0819 17:28:47.835444 20842 solver.cpp:404]     Test net output #1: loss = 0.425032 (* 1 = 0.425032 loss)\nI0819 17:28:49.151792 20842 solver.cpp:228] Iteration 44800, loss = 0.103171\nI0819 17:28:49.151845 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 17:28:49.151861 20842 solver.cpp:244]     Train net output #1: loss = 0.103171 (* 1 = 0.103171 loss)\nI0819 17:28:49.231685 20842 sgd_solver.cpp:166] Iteration 44800, lr = 1.12\nI0819 17:31:06.068555 20842 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0819 17:32:30.398710 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88244\nI0819 17:32:30.399010 20842 solver.cpp:404]     Test net output #1: loss = 0.445092 (* 1 = 0.445092 loss)\nI0819 17:32:31.715412 20842 solver.cpp:228] Iteration 44900, loss = 0.0935781\nI0819 17:32:31.715467 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 17:32:31.715484 20842 solver.cpp:244]     Train net output #1: loss = 0.0935778 (* 1 = 0.0935778 loss)\nI0819 17:32:31.798094 20842 sgd_solver.cpp:166] Iteration 44900, lr = 1.1225\nI0819 17:34:48.302078 20842 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0819 17:36:12.631753 20842 solver.cpp:404]     Test net output #0: accuracy = 0.885841\nI0819 17:36:12.632002 20842 solver.cpp:404]     Test net output #1: loss = 0.402446 (* 1 = 0.402446 loss)\nI0819 17:36:13.948551 20842 solver.cpp:228] Iteration 45000, loss = 0.0846264\nI0819 17:36:13.948616 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 17:36:13.948633 20842 solver.cpp:244]     Train net output #1: loss = 0.0846261 (* 1 = 0.0846261 loss)\nI0819 17:36:14.031208 20842 sgd_solver.cpp:166] Iteration 45000, lr = 1.125\nI0819 17:38:30.527765 20842 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0819 17:39:54.833392 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88184\nI0819 17:39:54.833673 20842 solver.cpp:404]     Test net output #1: loss = 0.444094 (* 1 = 0.444094 loss)\nI0819 17:39:56.149961 20842 solver.cpp:228] Iteration 45100, loss = 0.114075\nI0819 17:39:56.150014 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 17:39:56.150032 20842 solver.cpp:244]     Train net output #1: loss = 0.114075 (* 1 = 0.114075 loss)\nI0819 17:39:56.235913 20842 sgd_solver.cpp:166] Iteration 45100, lr = 1.1275\nI0819 17:42:12.814368 20842 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0819 17:43:37.146697 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88672\nI0819 17:43:37.146951 20842 solver.cpp:404]     Test net output #1: loss = 0.406652 (* 1 = 0.406652 loss)\nI0819 17:43:38.463361 20842 solver.cpp:228] Iteration 45200, loss = 0.06434\nI0819 17:43:38.463418 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 17:43:38.463435 20842 solver.cpp:244]     Train net output #1: loss = 0.0643397 (* 1 = 0.0643397 loss)\nI0819 17:43:38.547410 20842 sgd_solver.cpp:166] Iteration 45200, lr = 1.13\nI0819 17:45:55.023679 20842 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0819 17:47:19.347532 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88408\nI0819 17:47:19.347800 20842 solver.cpp:404]     Test net output #1: loss = 0.418116 (* 1 = 0.418116 loss)\nI0819 17:47:20.664870 20842 solver.cpp:228] Iteration 45300, loss = 0.116407\nI0819 17:47:20.664921 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 17:47:20.664938 20842 solver.cpp:244]     Train net output #1: loss = 0.116407 (* 1 = 0.116407 loss)\nI0819 17:47:20.748562 20842 sgd_solver.cpp:166] Iteration 45300, lr = 1.1325\nI0819 17:49:37.548171 20842 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0819 17:51:01.909354 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88664\nI0819 17:51:01.909639 20842 solver.cpp:404]     Test net output #1: loss = 0.424975 (* 1 = 0.424975 loss)\nI0819 17:51:03.226229 20842 solver.cpp:228] Iteration 45400, loss = 0.168278\nI0819 17:51:03.226287 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 17:51:03.226305 20842 solver.cpp:244]     Train net output #1: loss = 0.168277 (* 1 = 0.168277 loss)\nI0819 17:51:03.314170 20842 sgd_solver.cpp:166] Iteration 45400, lr = 1.135\nI0819 17:53:20.205605 20842 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0819 17:54:44.574446 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8884\nI0819 17:54:44.574726 20842 solver.cpp:404]     Test net output #1: loss = 0.416324 (* 1 = 0.416324 loss)\nI0819 17:54:45.890282 20842 solver.cpp:228] Iteration 45500, loss = 0.0725264\nI0819 17:54:45.890334 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 17:54:45.890352 20842 solver.cpp:244]     Train net output #1: loss = 0.072526 (* 1 = 0.072526 loss)\nI0819 17:54:45.977095 20842 sgd_solver.cpp:166] Iteration 45500, lr = 1.1375\nI0819 17:57:02.474261 20842 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0819 17:58:26.798848 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88308\nI0819 17:58:26.799290 20842 solver.cpp:404]     Test net output #1: loss = 0.416333 (* 1 = 0.416333 loss)\nI0819 17:58:28.114933 20842 solver.cpp:228] Iteration 45600, loss = 0.212741\nI0819 17:58:28.114985 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 17:58:28.115001 20842 solver.cpp:244]     Train net output #1: loss = 0.21274 (* 1 = 0.21274 loss)\nI0819 17:58:28.202960 20842 sgd_solver.cpp:166] Iteration 45600, lr = 1.14\nI0819 18:00:44.735707 20842 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0819 18:02:09.054828 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88628\nI0819 18:02:09.055162 20842 solver.cpp:404]     Test net output #1: loss = 0.399868 (* 1 = 0.399868 loss)\nI0819 18:02:10.371168 20842 solver.cpp:228] Iteration 45700, loss = 0.0741218\nI0819 18:02:10.371223 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 18:02:10.371239 20842 solver.cpp:244]     Train net output #1: loss = 0.0741215 (* 1 = 0.0741215 loss)\nI0819 18:02:10.461418 20842 sgd_solver.cpp:166] Iteration 45700, lr = 1.1425\nI0819 18:04:27.345945 20842 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0819 18:05:51.687800 20842 solver.cpp:404]     Test net output #0: accuracy = 0.884\nI0819 18:05:51.688058 20842 solver.cpp:404]     Test net output #1: loss = 0.416934 (* 1 = 0.416934 loss)\nI0819 18:05:53.003389 20842 solver.cpp:228] Iteration 45800, loss = 0.130815\nI0819 18:05:53.003445 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 18:05:53.003461 20842 solver.cpp:244]     Train net output #1: loss = 0.130814 (* 1 = 0.130814 loss)\nI0819 18:05:53.087927 20842 sgd_solver.cpp:166] Iteration 45800, lr = 1.145\nI0819 18:08:09.946841 20842 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0819 18:09:34.270437 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88308\nI0819 18:09:34.270701 20842 solver.cpp:404]     Test net output #1: loss = 0.42596 (* 1 = 0.42596 loss)\nI0819 18:09:35.586061 20842 solver.cpp:228] Iteration 45900, loss = 0.0463506\nI0819 18:09:35.586117 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 18:09:35.586134 20842 solver.cpp:244]     Train net output #1: loss = 0.0463503 (* 1 = 0.0463503 loss)\nI0819 18:09:35.670084 20842 sgd_solver.cpp:166] Iteration 45900, lr = 1.1475\nI0819 18:11:52.180152 20842 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0819 18:13:16.532573 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88492\nI0819 18:13:16.532902 20842 solver.cpp:404]     Test net output #1: loss = 0.408276 (* 1 = 0.408276 loss)\nI0819 18:13:17.848626 20842 solver.cpp:228] Iteration 46000, loss = 0.108295\nI0819 18:13:17.848685 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:13:17.848703 20842 solver.cpp:244]     Train net output #1: loss = 0.108294 (* 1 = 0.108294 loss)\nI0819 18:13:17.933336 20842 sgd_solver.cpp:166] Iteration 46000, lr = 1.15\nI0819 18:15:34.433696 20842 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0819 18:16:58.781533 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88972\nI0819 18:16:58.781813 20842 solver.cpp:404]     Test net output #1: loss = 0.4003 (* 1 = 0.4003 loss)\nI0819 18:17:00.097062 20842 solver.cpp:228] Iteration 46100, loss = 0.0742068\nI0819 18:17:00.097123 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:17:00.097141 20842 solver.cpp:244]     Train net output #1: loss = 0.0742065 (* 1 = 0.0742065 loss)\nI0819 18:17:00.183863 20842 sgd_solver.cpp:166] Iteration 46100, lr = 1.1525\nI0819 18:19:16.696987 20842 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0819 18:20:41.045053 20842 solver.cpp:404]     Test net output #0: accuracy = 0.887\nI0819 18:20:41.045305 20842 solver.cpp:404]     Test net output #1: loss = 0.408499 (* 1 = 0.408499 loss)\nI0819 18:20:42.361404 20842 solver.cpp:228] Iteration 46200, loss = 0.102822\nI0819 18:20:42.361464 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 18:20:42.361480 20842 solver.cpp:244]     Train net output #1: loss = 0.102821 (* 1 = 0.102821 loss)\nI0819 18:20:42.445713 20842 sgd_solver.cpp:166] Iteration 46200, lr = 1.155\nI0819 18:22:59.029356 20842 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0819 18:24:23.393990 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0819 18:24:23.394260 20842 solver.cpp:404]     Test net output #1: loss = 0.4467 (* 1 = 0.4467 loss)\nI0819 18:24:24.710121 20842 solver.cpp:228] Iteration 46300, loss = 0.109983\nI0819 18:24:24.710181 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:24:24.710198 20842 solver.cpp:244]     Train net output #1: loss = 0.109983 (* 1 = 0.109983 loss)\nI0819 18:24:24.794739 20842 sgd_solver.cpp:166] Iteration 46300, lr = 1.1575\nI0819 18:26:41.251916 20842 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0819 18:28:05.626313 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8784\nI0819 18:28:05.626572 20842 solver.cpp:404]     Test net output #1: loss = 0.442928 (* 1 = 0.442928 loss)\nI0819 18:28:06.942076 20842 solver.cpp:228] Iteration 46400, loss = 0.107179\nI0819 18:28:06.942137 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 18:28:06.942154 20842 solver.cpp:244]     Train net output #1: loss = 0.107179 (* 1 = 0.107179 loss)\nI0819 18:28:07.022579 20842 sgd_solver.cpp:166] Iteration 46400, lr = 1.16\nI0819 18:30:23.394837 20842 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0819 18:31:47.710373 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8886\nI0819 18:31:47.710654 20842 solver.cpp:404]     Test net output #1: loss = 0.397722 (* 1 = 0.397722 loss)\nI0819 18:31:49.026240 20842 solver.cpp:228] Iteration 46500, loss = 0.0976386\nI0819 18:31:49.026298 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 18:31:49.026317 20842 solver.cpp:244]     Train net output #1: loss = 0.0976382 (* 1 = 0.0976382 loss)\nI0819 18:31:49.107583 20842 sgd_solver.cpp:166] Iteration 46500, lr = 1.1625\nI0819 18:34:05.884063 20842 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0819 18:35:30.232924 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0819 18:35:30.233173 20842 solver.cpp:404]     Test net output #1: loss = 0.416046 (* 1 = 0.416046 loss)\nI0819 18:35:31.548934 20842 solver.cpp:228] Iteration 46600, loss = 0.106338\nI0819 18:35:31.548987 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:35:31.549005 20842 solver.cpp:244]     Train net output #1: loss = 0.106338 (* 1 = 0.106338 loss)\nI0819 18:35:31.637522 20842 sgd_solver.cpp:166] Iteration 46600, lr = 1.165\nI0819 18:37:48.124397 20842 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0819 18:39:12.446997 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88348\nI0819 18:39:12.447249 20842 solver.cpp:404]     Test net output #1: loss = 0.403196 (* 1 = 0.403196 loss)\nI0819 18:39:13.763348 20842 solver.cpp:228] Iteration 46700, loss = 0.111175\nI0819 18:39:13.763401 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:39:13.763420 20842 solver.cpp:244]     Train net output #1: loss = 0.111174 (* 1 = 0.111174 loss)\nI0819 18:39:13.843526 20842 sgd_solver.cpp:166] Iteration 46700, lr = 1.1675\nI0819 18:41:30.327230 20842 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0819 18:42:54.649430 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87316\nI0819 18:42:54.649725 20842 solver.cpp:404]     Test net output #1: loss = 0.475678 (* 1 = 0.475678 loss)\nI0819 18:42:55.965313 20842 solver.cpp:228] Iteration 46800, loss = 0.121635\nI0819 18:42:55.965370 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 18:42:55.965389 20842 solver.cpp:244]     Train net output #1: loss = 0.121635 (* 1 = 0.121635 loss)\nI0819 18:42:56.054071 20842 sgd_solver.cpp:166] Iteration 46800, lr = 1.17\nI0819 18:45:12.529458 20842 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0819 18:46:36.875571 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89016\nI0819 18:46:36.875830 20842 solver.cpp:404]     Test net output #1: loss = 0.399688 (* 1 = 0.399688 loss)\nI0819 18:46:38.192095 20842 solver.cpp:228] Iteration 46900, loss = 0.191083\nI0819 18:46:38.192150 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 18:46:38.192167 20842 solver.cpp:244]     Train net output #1: loss = 0.191082 (* 1 = 0.191082 loss)\nI0819 18:46:38.275986 20842 sgd_solver.cpp:166] Iteration 46900, lr = 1.1725\nI0819 18:48:55.156103 20842 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0819 18:50:19.494268 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88684\nI0819 18:50:19.494601 20842 solver.cpp:404]     Test net output #1: loss = 0.40276 (* 1 = 0.40276 loss)\nI0819 18:50:20.809816 20842 solver.cpp:228] Iteration 47000, loss = 0.108908\nI0819 18:50:20.809871 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 18:50:20.809890 20842 solver.cpp:244]     Train net output #1: loss = 0.108908 (* 1 = 0.108908 loss)\nI0819 18:50:20.893559 20842 sgd_solver.cpp:166] Iteration 47000, lr = 1.175\nI0819 18:52:37.665685 20842 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0819 18:54:02.000552 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88508\nI0819 18:54:02.000833 20842 solver.cpp:404]     Test net output #1: loss = 0.389385 (* 1 = 0.389385 loss)\nI0819 18:54:03.316556 20842 solver.cpp:228] Iteration 47100, loss = 0.0663966\nI0819 18:54:03.316606 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:54:03.316627 20842 solver.cpp:244]     Train net output #1: loss = 0.0663962 (* 1 = 0.0663962 loss)\nI0819 18:54:03.402567 20842 sgd_solver.cpp:166] Iteration 47100, lr = 1.1775\nI0819 18:56:19.827061 20842 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0819 18:57:44.078876 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88316\nI0819 18:57:44.079203 20842 solver.cpp:404]     Test net output #1: loss = 0.421444 (* 1 = 0.421444 loss)\nI0819 18:57:45.394629 20842 solver.cpp:228] Iteration 47200, loss = 0.105709\nI0819 18:57:45.394682 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:57:45.394701 20842 solver.cpp:244]     Train net output #1: loss = 0.105709 (* 1 = 0.105709 loss)\nI0819 18:57:45.489305 20842 sgd_solver.cpp:166] Iteration 47200, lr = 1.18\nI0819 19:00:02.327749 20842 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0819 19:01:26.656124 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0819 19:01:26.656381 20842 solver.cpp:404]     Test net output #1: loss = 0.424746 (* 1 = 0.424746 loss)\nI0819 19:01:27.972076 20842 solver.cpp:228] Iteration 47300, loss = 0.138139\nI0819 19:01:27.972136 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:01:27.972152 20842 solver.cpp:244]     Train net output #1: loss = 0.138139 (* 1 = 0.138139 loss)\nI0819 19:01:28.056397 20842 sgd_solver.cpp:166] Iteration 47300, lr = 1.1825\nI0819 19:03:44.708686 20842 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0819 19:05:09.058751 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88372\nI0819 19:05:09.059026 20842 solver.cpp:404]     Test net output #1: loss = 0.401862 (* 1 = 0.401862 loss)\nI0819 19:05:10.375540 20842 solver.cpp:228] Iteration 47400, loss = 0.0738497\nI0819 19:05:10.375583 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 19:05:10.375598 20842 solver.cpp:244]     Train net output #1: loss = 0.0738493 (* 1 = 0.0738493 loss)\nI0819 19:05:10.462604 20842 sgd_solver.cpp:166] Iteration 47400, lr = 1.185\nI0819 19:07:27.599789 20842 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0819 19:08:51.922308 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0819 19:08:51.922567 20842 solver.cpp:404]     Test net output #1: loss = 0.426123 (* 1 = 0.426123 loss)\nI0819 19:08:53.238145 20842 solver.cpp:228] Iteration 47500, loss = 0.11974\nI0819 19:08:53.238188 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:08:53.238204 20842 solver.cpp:244]     Train net output #1: loss = 0.11974 (* 1 = 0.11974 loss)\nI0819 19:08:53.329571 20842 sgd_solver.cpp:166] Iteration 47500, lr = 1.1875\nI0819 19:11:09.884425 20842 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0819 19:12:34.222049 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88592\nI0819 19:12:34.222350 20842 solver.cpp:404]     Test net output #1: loss = 0.418062 (* 1 = 0.418062 loss)\nI0819 19:12:35.538478 20842 solver.cpp:228] Iteration 47600, loss = 0.1345\nI0819 19:12:35.538523 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 19:12:35.538538 20842 solver.cpp:244]     Train net output #1: loss = 0.1345 (* 1 = 0.1345 loss)\nI0819 19:12:35.625231 20842 sgd_solver.cpp:166] Iteration 47600, lr = 1.19\nI0819 19:14:52.222784 20842 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0819 19:16:16.558887 20842 solver.cpp:404]     Test net output #0: accuracy = 0.886321\nI0819 19:16:16.559142 20842 solver.cpp:404]     Test net output #1: loss = 0.399175 (* 1 = 0.399175 loss)\nI0819 19:16:17.874694 20842 solver.cpp:228] Iteration 47700, loss = 0.145567\nI0819 19:16:17.874737 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:16:17.874752 20842 solver.cpp:244]     Train net output #1: loss = 0.145567 (* 1 = 0.145567 loss)\nI0819 19:16:17.967506 20842 sgd_solver.cpp:166] Iteration 47700, lr = 1.1925\nI0819 19:18:34.486160 20842 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0819 19:19:58.837550 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8888\nI0819 19:19:58.837833 20842 solver.cpp:404]     Test net output #1: loss = 0.409075 (* 1 = 0.409075 loss)\nI0819 19:20:00.153100 20842 solver.cpp:228] Iteration 47800, loss = 0.111278\nI0819 19:20:00.153159 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:20:00.153177 20842 solver.cpp:244]     Train net output #1: loss = 0.111277 (* 1 = 0.111277 loss)\nI0819 19:20:00.245988 20842 sgd_solver.cpp:166] Iteration 47800, lr = 1.195\nI0819 19:22:17.335110 20842 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0819 19:23:41.669260 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88448\nI0819 19:23:41.669529 20842 solver.cpp:404]     Test net output #1: loss = 0.411045 (* 1 = 0.411045 loss)\nI0819 19:23:42.985102 20842 solver.cpp:228] Iteration 47900, loss = 0.0734092\nI0819 19:23:42.985146 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:23:42.985162 20842 solver.cpp:244]     Train net output #1: loss = 0.0734088 (* 1 = 0.0734088 loss)\nI0819 19:23:43.074187 20842 sgd_solver.cpp:166] Iteration 47900, lr = 1.1975\nI0819 19:25:59.633152 20842 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0819 19:27:23.962918 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89284\nI0819 19:27:23.963193 20842 solver.cpp:404]     Test net output #1: loss = 0.394876 (* 1 = 0.394876 loss)\nI0819 19:27:25.279908 20842 solver.cpp:228] Iteration 48000, loss = 0.0797473\nI0819 19:27:25.279953 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:27:25.279968 20842 solver.cpp:244]     Train net output #1: loss = 0.079747 (* 1 = 0.079747 loss)\nI0819 19:27:25.366238 20842 sgd_solver.cpp:166] Iteration 48000, lr = 1.2\nI0819 19:29:41.963327 20842 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0819 19:31:06.306694 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8854\nI0819 19:31:06.306955 20842 solver.cpp:404]     Test net output #1: loss = 0.403169 (* 1 = 0.403169 loss)\nI0819 19:31:07.623988 20842 solver.cpp:228] Iteration 48100, loss = 0.127105\nI0819 19:31:07.624030 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 19:31:07.624045 20842 solver.cpp:244]     Train net output #1: loss = 0.127104 (* 1 = 0.127104 loss)\nI0819 19:31:07.716323 20842 sgd_solver.cpp:166] Iteration 48100, lr = 1.2025\nI0819 19:33:24.384651 20842 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0819 19:34:48.725946 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88536\nI0819 19:34:48.726198 20842 solver.cpp:404]     Test net output #1: loss = 0.414667 (* 1 = 0.414667 loss)\nI0819 19:34:50.042639 20842 solver.cpp:228] Iteration 48200, loss = 0.141055\nI0819 19:34:50.042682 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 19:34:50.042697 20842 solver.cpp:244]     Train net output #1: loss = 0.141055 (* 1 = 0.141055 loss)\nI0819 19:34:50.132293 20842 sgd_solver.cpp:166] Iteration 48200, lr = 1.205\nI0819 19:37:06.644328 20842 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0819 19:38:30.988201 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88668\nI0819 19:38:30.988476 20842 solver.cpp:404]     Test net output #1: loss = 0.395317 (* 1 = 0.395317 loss)\nI0819 19:38:32.305130 20842 solver.cpp:228] Iteration 48300, loss = 0.179324\nI0819 19:38:32.305172 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 19:38:32.305188 20842 solver.cpp:244]     Train net output #1: loss = 0.179324 (* 1 = 0.179324 loss)\nI0819 19:38:32.394716 20842 sgd_solver.cpp:166] Iteration 48300, lr = 1.2075\nI0819 19:40:48.935683 20842 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0819 19:42:13.295305 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88632\nI0819 19:42:13.295619 20842 solver.cpp:404]     Test net output #1: loss = 0.402387 (* 1 = 0.402387 loss)\nI0819 19:42:14.612665 20842 solver.cpp:228] Iteration 48400, loss = 0.0925165\nI0819 19:42:14.612715 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:42:14.612737 20842 solver.cpp:244]     Train net output #1: loss = 0.0925161 (* 1 = 0.0925161 loss)\nI0819 19:42:14.702875 20842 sgd_solver.cpp:166] Iteration 48400, lr = 1.21\nI0819 19:44:31.316397 20842 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0819 19:45:55.683693 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88132\nI0819 19:45:55.684015 20842 solver.cpp:404]     Test net output #1: loss = 0.428311 (* 1 = 0.428311 loss)\nI0819 19:45:57.001142 20842 solver.cpp:228] Iteration 48500, loss = 0.133875\nI0819 19:45:57.001189 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:45:57.001214 20842 solver.cpp:244]     Train net output #1: loss = 0.133875 (* 1 = 0.133875 loss)\nI0819 19:45:57.089920 20842 sgd_solver.cpp:166] Iteration 48500, lr = 1.2125\nI0819 19:48:13.631266 20842 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0819 19:49:37.997470 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88236\nI0819 19:49:37.997817 20842 solver.cpp:404]     Test net output #1: loss = 0.407582 (* 1 = 0.407582 loss)\nI0819 19:49:39.314113 20842 solver.cpp:228] Iteration 48600, loss = 0.119997\nI0819 19:49:39.314179 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:49:39.314206 20842 solver.cpp:244]     Train net output #1: loss = 0.119997 (* 1 = 0.119997 loss)\nI0819 19:49:39.401906 20842 sgd_solver.cpp:166] Iteration 48600, lr = 1.215\nI0819 19:51:56.217998 20842 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0819 19:53:20.600663 20842 solver.cpp:404]     Test net output #0: accuracy = 0.880641\nI0819 19:53:20.600996 20842 solver.cpp:404]     Test net output #1: loss = 0.417381 (* 1 = 0.417381 loss)\nI0819 19:53:21.917837 20842 solver.cpp:228] Iteration 48700, loss = 0.0751485\nI0819 19:53:21.917886 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:53:21.917910 20842 solver.cpp:244]     Train net output #1: loss = 0.0751481 (* 1 = 0.0751481 loss)\nI0819 19:53:22.004290 20842 sgd_solver.cpp:166] Iteration 48700, lr = 1.2175\nI0819 19:55:39.242986 20842 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0819 19:57:03.601752 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8832\nI0819 19:57:03.602108 20842 solver.cpp:404]     Test net output #1: loss = 0.411421 (* 1 = 0.411421 loss)\nI0819 19:57:04.918658 20842 solver.cpp:228] Iteration 48800, loss = 0.0800219\nI0819 19:57:04.918707 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 19:57:04.918730 20842 solver.cpp:244]     Train net output #1: loss = 0.0800215 (* 1 = 0.0800215 loss)\nI0819 19:57:05.002539 20842 sgd_solver.cpp:166] Iteration 48800, lr = 1.22\nI0819 19:59:22.244570 20842 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0819 20:00:46.605181 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88808\nI0819 20:00:46.605538 20842 solver.cpp:404]     Test net output #1: loss = 0.401406 (* 1 = 0.401406 loss)\nI0819 20:00:47.921937 20842 solver.cpp:228] Iteration 48900, loss = 0.124024\nI0819 20:00:47.921984 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 20:00:47.922008 20842 solver.cpp:244]     Train net output #1: loss = 0.124024 (* 1 = 0.124024 loss)\nI0819 20:00:48.008013 20842 sgd_solver.cpp:166] Iteration 48900, lr = 1.2225\nI0819 20:03:04.781111 20842 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0819 20:04:29.137503 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87872\nI0819 20:04:29.137852 20842 solver.cpp:404]     Test net output #1: loss = 0.428436 (* 1 = 0.428436 loss)\nI0819 20:04:30.455365 20842 solver.cpp:228] Iteration 49000, loss = 0.113716\nI0819 20:04:30.455412 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:04:30.455436 20842 solver.cpp:244]     Train net output #1: loss = 0.113716 (* 1 = 0.113716 loss)\nI0819 20:04:30.547969 20842 sgd_solver.cpp:166] Iteration 49000, lr = 1.225\nI0819 20:06:47.431413 20842 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0819 20:08:11.823108 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88576\nI0819 20:08:11.823477 20842 solver.cpp:404]     Test net output #1: loss = 0.429639 (* 1 = 0.429639 loss)\nI0819 20:08:13.140419 20842 solver.cpp:228] Iteration 49100, loss = 0.205683\nI0819 20:08:13.140465 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 20:08:13.140488 20842 solver.cpp:244]     Train net output #1: loss = 0.205683 (* 1 = 0.205683 loss)\nI0819 20:08:13.228993 20842 sgd_solver.cpp:166] Iteration 49100, lr = 1.2275\nI0819 20:10:30.505291 20842 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0819 20:11:54.894392 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88332\nI0819 20:11:54.894745 20842 solver.cpp:404]     Test net output #1: loss = 0.419166 (* 1 = 0.419166 loss)\nI0819 20:11:56.211941 20842 solver.cpp:228] Iteration 49200, loss = 0.154515\nI0819 20:11:56.211987 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 20:11:56.212008 20842 solver.cpp:244]     Train net output #1: loss = 0.154515 (* 1 = 0.154515 loss)\nI0819 20:11:56.299336 20842 sgd_solver.cpp:166] Iteration 49200, lr = 1.23\nI0819 20:14:13.531491 20842 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0819 20:15:37.973883 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8886\nI0819 20:15:37.974225 20842 solver.cpp:404]     Test net output #1: loss = 0.396772 (* 1 = 0.396772 loss)\nI0819 20:15:39.290045 20842 solver.cpp:228] Iteration 49300, loss = 0.102537\nI0819 20:15:39.290091 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:15:39.290115 20842 solver.cpp:244]     Train net output #1: loss = 0.102536 (* 1 = 0.102536 loss)\nI0819 20:15:39.380452 20842 sgd_solver.cpp:166] Iteration 49300, lr = 1.2325\nI0819 20:17:56.670967 20842 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0819 20:19:21.061584 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88856\nI0819 20:19:21.061947 20842 solver.cpp:404]     Test net output #1: loss = 0.38966 (* 1 = 0.38966 loss)\nI0819 20:19:22.378338 20842 solver.cpp:228] Iteration 49400, loss = 0.110809\nI0819 20:19:22.378382 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:19:22.378398 20842 solver.cpp:244]     Train net output #1: loss = 0.110809 (* 1 = 0.110809 loss)\nI0819 20:19:22.467469 20842 sgd_solver.cpp:166] Iteration 49400, lr = 1.235\nI0819 20:21:39.326792 20842 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0819 20:23:03.770871 20842 solver.cpp:404]     Test net output #0: accuracy = 0.881281\nI0819 20:23:03.771235 20842 solver.cpp:404]     Test net output #1: loss = 0.460876 (* 1 = 0.460876 loss)\nI0819 20:23:05.087864 20842 solver.cpp:228] Iteration 49500, loss = 0.15168\nI0819 20:23:05.087924 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 20:23:05.087941 20842 solver.cpp:244]     Train net output #1: loss = 0.151679 (* 1 = 0.151679 loss)\nI0819 20:23:05.178064 20842 sgd_solver.cpp:166] Iteration 49500, lr = 1.2375\nI0819 20:25:22.071056 20842 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0819 20:26:46.527828 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88596\nI0819 20:26:46.528200 20842 solver.cpp:404]     Test net output #1: loss = 0.410924 (* 1 = 0.410924 loss)\nI0819 20:26:47.845002 20842 solver.cpp:228] Iteration 49600, loss = 0.143419\nI0819 20:26:47.845059 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:26:47.845078 20842 solver.cpp:244]     Train net output #1: loss = 0.143418 (* 1 = 0.143418 loss)\nI0819 20:26:47.937458 20842 sgd_solver.cpp:166] Iteration 49600, lr = 1.24\nI0819 20:29:04.790545 20842 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0819 20:30:29.341663 20842 solver.cpp:404]     Test net output #0: accuracy = 0.881\nI0819 20:30:29.341998 20842 solver.cpp:404]     Test net output #1: loss = 0.42802 (* 1 = 0.42802 loss)\nI0819 20:30:30.659514 20842 solver.cpp:228] Iteration 49700, loss = 0.0821478\nI0819 20:30:30.659574 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 20:30:30.659593 20842 solver.cpp:244]     Train net output #1: loss = 0.0821474 (* 1 = 0.0821474 loss)\nI0819 20:30:30.756558 20842 sgd_solver.cpp:166] Iteration 49700, lr = 1.2425\nI0819 20:32:47.657397 20842 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0819 20:34:12.209009 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87736\nI0819 20:34:12.209348 20842 solver.cpp:404]     Test net output #1: loss = 0.422427 (* 1 = 0.422427 loss)\nI0819 20:34:13.526161 20842 solver.cpp:228] Iteration 49800, loss = 0.215071\nI0819 20:34:13.526204 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 20:34:13.526221 20842 solver.cpp:244]     Train net output #1: loss = 0.21507 (* 1 = 0.21507 loss)\nI0819 20:34:13.610199 20842 sgd_solver.cpp:166] Iteration 49800, lr = 1.245\nI0819 20:36:30.435248 20842 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0819 20:37:54.921103 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88664\nI0819 20:37:54.921450 20842 solver.cpp:404]     Test net output #1: loss = 0.402228 (* 1 = 0.402228 loss)\nI0819 20:37:56.238410 20842 solver.cpp:228] Iteration 49900, loss = 0.080482\nI0819 20:37:56.238456 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:37:56.238479 20842 solver.cpp:244]     Train net output #1: loss = 0.0804816 (* 1 = 0.0804816 loss)\nI0819 20:37:56.322793 20842 sgd_solver.cpp:166] Iteration 49900, lr = 1.2475\nI0819 20:40:13.225147 20842 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0819 20:41:37.618826 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88656\nI0819 20:41:37.619190 20842 solver.cpp:404]     Test net output #1: loss = 0.389795 (* 1 = 0.389795 loss)\nI0819 20:41:38.935889 20842 solver.cpp:228] Iteration 50000, loss = 0.21068\nI0819 20:41:38.935950 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:41:38.935967 20842 solver.cpp:244]     Train net output #1: loss = 0.21068 (* 1 = 0.21068 loss)\nI0819 20:41:39.027371 20842 sgd_solver.cpp:166] Iteration 50000, lr = 1.25\nI0819 20:43:55.954910 20842 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0819 20:45:20.337754 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0819 20:45:20.338121 20842 solver.cpp:404]     Test net output #1: loss = 0.421476 (* 1 = 0.421476 loss)\nI0819 20:45:21.653321 20842 solver.cpp:228] Iteration 50100, loss = 0.222441\nI0819 20:45:21.653363 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 20:45:21.653378 20842 solver.cpp:244]     Train net output #1: loss = 0.222441 (* 1 = 0.222441 loss)\nI0819 20:45:21.750385 20842 sgd_solver.cpp:166] Iteration 50100, lr = 1.2525\nI0819 20:47:38.651892 20842 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0819 20:49:03.028091 20842 solver.cpp:404]     Test net output #0: accuracy = 0.882\nI0819 20:49:03.028435 20842 solver.cpp:404]     Test net output #1: loss = 0.417972 (* 1 = 0.417972 loss)\nI0819 20:49:04.344413 20842 solver.cpp:228] Iteration 50200, loss = 0.0948201\nI0819 20:49:04.344463 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 20:49:04.344480 20842 solver.cpp:244]     Train net output #1: loss = 0.0948196 (* 1 = 0.0948196 loss)\nI0819 20:49:04.425606 20842 sgd_solver.cpp:166] Iteration 50200, lr = 1.255\nI0819 20:51:21.444963 20842 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0819 20:52:45.803724 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88604\nI0819 20:52:45.804075 20842 solver.cpp:404]     Test net output #1: loss = 0.393029 (* 1 = 0.393029 loss)\nI0819 20:52:47.119370 20842 solver.cpp:228] Iteration 50300, loss = 0.164484\nI0819 20:52:47.119421 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:52:47.119438 20842 solver.cpp:244]     Train net output #1: loss = 0.164484 (* 1 = 0.164484 loss)\nI0819 20:52:47.202318 20842 sgd_solver.cpp:166] Iteration 50300, lr = 1.2575\nI0819 20:55:03.977769 20842 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0819 20:56:28.353385 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88236\nI0819 20:56:28.353754 20842 solver.cpp:404]     Test net output #1: loss = 0.429603 (* 1 = 0.429603 loss)\nI0819 20:56:29.669312 20842 solver.cpp:228] Iteration 50400, loss = 0.133277\nI0819 20:56:29.669360 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 20:56:29.669376 20842 solver.cpp:244]     Train net output #1: loss = 0.133277 (* 1 = 0.133277 loss)\nI0819 20:56:29.753144 20842 sgd_solver.cpp:166] Iteration 50400, lr = 1.26\nI0819 20:58:46.804677 20842 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0819 21:00:11.166640 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0819 21:00:11.166998 20842 solver.cpp:404]     Test net output #1: loss = 0.415304 (* 1 = 0.415304 loss)\nI0819 21:00:12.483165 20842 solver.cpp:228] Iteration 50500, loss = 0.132481\nI0819 21:00:12.483220 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:00:12.483238 20842 solver.cpp:244]     Train net output #1: loss = 0.132481 (* 1 = 0.132481 loss)\nI0819 21:00:12.562777 20842 sgd_solver.cpp:166] Iteration 50500, lr = 1.2625\nI0819 21:02:29.404974 20842 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0819 21:03:53.784512 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0819 21:03:53.784884 20842 solver.cpp:404]     Test net output #1: loss = 0.404385 (* 1 = 0.404385 loss)\nI0819 21:03:55.100589 20842 solver.cpp:228] Iteration 50600, loss = 0.165378\nI0819 21:03:55.100647 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 21:03:55.100664 20842 solver.cpp:244]     Train net output #1: loss = 0.165378 (* 1 = 0.165378 loss)\nI0819 21:03:55.188633 20842 sgd_solver.cpp:166] Iteration 50600, lr = 1.265\nI0819 21:06:11.982229 20842 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0819 21:07:36.356582 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88628\nI0819 21:07:36.356950 20842 solver.cpp:404]     Test net output #1: loss = 0.403985 (* 1 = 0.403985 loss)\nI0819 21:07:37.672499 20842 solver.cpp:228] Iteration 50700, loss = 0.19253\nI0819 21:07:37.672547 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 21:07:37.672564 20842 solver.cpp:244]     Train net output #1: loss = 0.19253 (* 1 = 0.19253 loss)\nI0819 21:07:37.754226 20842 sgd_solver.cpp:166] Iteration 50700, lr = 1.2675\nI0819 21:09:54.703871 20842 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0819 21:11:19.082716 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87556\nI0819 21:11:19.083073 20842 solver.cpp:404]     Test net output #1: loss = 0.453025 (* 1 = 0.453025 loss)\nI0819 21:11:20.398936 20842 solver.cpp:228] Iteration 50800, loss = 0.172414\nI0819 21:11:20.398991 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 21:11:20.399009 20842 solver.cpp:244]     Train net output #1: loss = 0.172413 (* 1 = 0.172413 loss)\nI0819 21:11:20.485355 20842 sgd_solver.cpp:166] Iteration 50800, lr = 1.27\nI0819 21:13:37.400370 20842 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0819 21:15:01.775203 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88248\nI0819 21:15:01.775573 20842 solver.cpp:404]     Test net output #1: loss = 0.39505 (* 1 = 0.39505 loss)\nI0819 21:15:03.091399 20842 solver.cpp:228] Iteration 50900, loss = 0.187848\nI0819 21:15:03.091459 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:15:03.091475 20842 solver.cpp:244]     Train net output #1: loss = 0.187847 (* 1 = 0.187847 loss)\nI0819 21:15:03.181581 20842 sgd_solver.cpp:166] Iteration 50900, lr = 1.2725\nI0819 21:17:20.047389 20842 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0819 21:18:44.418453 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0819 21:18:44.418823 20842 solver.cpp:404]     Test net output #1: loss = 0.424404 (* 1 = 0.424404 loss)\nI0819 21:18:45.734246 20842 solver.cpp:228] Iteration 51000, loss = 0.201992\nI0819 21:18:45.734305 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 21:18:45.734323 20842 solver.cpp:244]     Train net output #1: loss = 0.201992 (* 1 = 0.201992 loss)\nI0819 21:18:45.819563 20842 sgd_solver.cpp:166] Iteration 51000, lr = 1.275\nI0819 21:21:02.621791 20842 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0819 21:22:26.983127 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88396\nI0819 21:22:26.983456 20842 solver.cpp:404]     Test net output #1: loss = 0.412382 (* 1 = 0.412382 loss)\nI0819 21:22:28.298979 20842 solver.cpp:228] Iteration 51100, loss = 0.0760228\nI0819 21:22:28.299037 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:22:28.299057 20842 solver.cpp:244]     Train net output #1: loss = 0.0760222 (* 1 = 0.0760222 loss)\nI0819 21:22:28.385399 20842 sgd_solver.cpp:166] Iteration 51100, lr = 1.2775\nI0819 21:24:45.213480 20842 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0819 21:26:09.585283 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88112\nI0819 21:26:09.585644 20842 solver.cpp:404]     Test net output #1: loss = 0.419932 (* 1 = 0.419932 loss)\nI0819 21:26:10.901892 20842 solver.cpp:228] Iteration 51200, loss = 0.0875978\nI0819 21:26:10.901953 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:26:10.901973 20842 solver.cpp:244]     Train net output #1: loss = 0.0875973 (* 1 = 0.0875973 loss)\nI0819 21:26:10.987851 20842 sgd_solver.cpp:166] Iteration 51200, lr = 1.28\nI0819 21:28:27.829134 20842 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0819 21:29:52.197993 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88552\nI0819 21:29:52.198338 20842 solver.cpp:404]     Test net output #1: loss = 0.401433 (* 1 = 0.401433 loss)\nI0819 21:29:53.514001 20842 solver.cpp:228] Iteration 51300, loss = 0.138206\nI0819 21:29:53.514061 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:29:53.514081 20842 solver.cpp:244]     Train net output #1: loss = 0.138205 (* 1 = 0.138205 loss)\nI0819 21:29:53.602998 20842 sgd_solver.cpp:166] Iteration 51300, lr = 1.2825\nI0819 21:32:10.431980 20842 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0819 21:33:34.797348 20842 solver.cpp:404]     Test net output #0: accuracy = 0.881041\nI0819 21:33:34.797788 20842 solver.cpp:404]     Test net output #1: loss = 0.406395 (* 1 = 0.406395 loss)\nI0819 21:33:36.113276 20842 solver.cpp:228] Iteration 51400, loss = 0.0853985\nI0819 21:33:36.113334 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:33:36.113353 20842 solver.cpp:244]     Train net output #1: loss = 0.0853979 (* 1 = 0.0853979 loss)\nI0819 21:33:36.203387 20842 sgd_solver.cpp:166] Iteration 51400, lr = 1.285\nI0819 21:35:52.988581 20842 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0819 21:37:17.358007 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8882\nI0819 21:37:17.358352 20842 solver.cpp:404]     Test net output #1: loss = 0.401918 (* 1 = 0.401918 loss)\nI0819 21:37:18.674367 20842 solver.cpp:228] Iteration 51500, loss = 0.130487\nI0819 21:37:18.674425 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:37:18.674443 20842 solver.cpp:244]     Train net output #1: loss = 0.130487 (* 1 = 0.130487 loss)\nI0819 21:37:18.755846 20842 sgd_solver.cpp:166] Iteration 51500, lr = 1.2875\nI0819 21:39:35.633301 20842 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0819 21:41:00.055182 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8842\nI0819 21:41:00.055536 20842 solver.cpp:404]     Test net output #1: loss = 0.4046 (* 1 = 0.4046 loss)\nI0819 21:41:01.372046 20842 solver.cpp:228] Iteration 51600, loss = 0.109014\nI0819 21:41:01.372103 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:41:01.372123 20842 solver.cpp:244]     Train net output #1: loss = 0.109014 (* 1 = 0.109014 loss)\nI0819 21:41:01.454294 20842 sgd_solver.cpp:166] Iteration 51600, lr = 1.29\nI0819 21:43:18.308357 20842 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 21:44:42.735941 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88568\nI0819 21:44:42.736284 20842 solver.cpp:404]     Test net output #1: loss = 0.395224 (* 1 = 0.395224 loss)\nI0819 21:44:44.056087 20842 solver.cpp:228] Iteration 51700, loss = 0.129803\nI0819 21:44:44.056145 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:44:44.056164 20842 solver.cpp:244]     Train net output #1: loss = 0.129802 (* 1 = 0.129802 loss)\nI0819 21:44:44.143081 20842 sgd_solver.cpp:166] Iteration 51700, lr = 1.2925\nI0819 21:47:01.001080 20842 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 21:48:25.363615 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88164\nI0819 21:48:25.363935 20842 solver.cpp:404]     Test net output #1: loss = 0.424918 (* 1 = 0.424918 loss)\nI0819 21:48:26.683625 20842 solver.cpp:228] Iteration 51800, loss = 0.058949\nI0819 21:48:26.683686 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 21:48:26.683703 20842 solver.cpp:244]     Train net output #1: loss = 0.0589486 (* 1 = 0.0589486 loss)\nI0819 21:48:26.768261 20842 sgd_solver.cpp:166] Iteration 51800, lr = 1.295\nI0819 21:50:43.644474 20842 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 21:52:08.006570 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87692\nI0819 21:52:08.006927 20842 solver.cpp:404]     Test net output #1: loss = 0.424515 (* 1 = 0.424515 loss)\nI0819 21:52:09.326524 20842 solver.cpp:228] Iteration 51900, loss = 0.149224\nI0819 21:52:09.326584 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 21:52:09.326608 20842 solver.cpp:244]     Train net output #1: loss = 0.149224 (* 1 = 0.149224 loss)\nI0819 21:52:09.406275 20842 sgd_solver.cpp:166] Iteration 51900, lr = 1.2975\nI0819 21:54:26.572196 20842 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 21:55:50.940004 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87696\nI0819 21:55:50.940348 20842 solver.cpp:404]     Test net output #1: loss = 0.44815 (* 1 = 0.44815 loss)\nI0819 21:55:52.260828 20842 solver.cpp:228] Iteration 52000, loss = 0.11296\nI0819 21:55:52.260887 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:55:52.260906 20842 solver.cpp:244]     Train net output #1: loss = 0.112959 (* 1 = 0.112959 loss)\nI0819 21:55:52.338753 20842 sgd_solver.cpp:166] Iteration 52000, lr = 1.3\nI0819 21:58:09.221345 20842 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 21:59:33.593791 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0819 21:59:33.594161 20842 solver.cpp:404]     Test net output #1: loss = 0.403731 (* 1 = 0.403731 loss)\nI0819 21:59:34.914975 20842 solver.cpp:228] Iteration 52100, loss = 0.169998\nI0819 21:59:34.915031 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 21:59:34.915056 20842 solver.cpp:244]     Train net output #1: loss = 0.169998 (* 1 = 0.169998 loss)\nI0819 21:59:35.000396 20842 sgd_solver.cpp:166] Iteration 52100, lr = 1.3025\nI0819 22:01:51.940186 20842 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 22:03:16.275766 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88648\nI0819 22:03:16.276134 20842 solver.cpp:404]     Test net output #1: loss = 0.399799 (* 1 = 0.399799 loss)\nI0819 22:03:17.594796 20842 solver.cpp:228] Iteration 52200, loss = 0.120724\nI0819 22:03:17.594856 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:03:17.594874 20842 solver.cpp:244]     Train net output #1: loss = 0.120724 (* 1 = 0.120724 loss)\nI0819 22:03:17.678239 20842 sgd_solver.cpp:166] Iteration 52200, lr = 1.305\nI0819 22:05:34.573106 20842 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 22:06:58.924441 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88596\nI0819 22:06:58.924796 20842 solver.cpp:404]     Test net output #1: loss = 0.399167 (* 1 = 0.399167 loss)\nI0819 22:07:00.244029 20842 solver.cpp:228] Iteration 52300, loss = 0.149131\nI0819 22:07:00.244091 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 22:07:00.244108 20842 solver.cpp:244]     Train net output #1: loss = 0.149131 (* 1 = 0.149131 loss)\nI0819 22:07:00.333412 20842 sgd_solver.cpp:166] Iteration 52300, lr = 1.3075\nI0819 22:09:17.474412 20842 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 22:10:41.829339 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0819 22:10:41.829699 20842 solver.cpp:404]     Test net output #1: loss = 0.404105 (* 1 = 0.404105 loss)\nI0819 22:10:43.148671 20842 solver.cpp:228] Iteration 52400, loss = 0.154352\nI0819 22:10:43.148727 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:10:43.148744 20842 solver.cpp:244]     Train net output #1: loss = 0.154351 (* 1 = 0.154351 loss)\nI0819 22:10:43.230039 20842 sgd_solver.cpp:166] Iteration 52400, lr = 1.31\nI0819 22:13:00.020653 20842 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 22:14:24.378749 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0819 22:14:24.379081 20842 solver.cpp:404]     Test net output #1: loss = 0.417513 (* 1 = 0.417513 loss)\nI0819 22:14:25.698009 20842 solver.cpp:228] Iteration 52500, loss = 0.125816\nI0819 22:14:25.698065 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:14:25.698083 20842 solver.cpp:244]     Train net output #1: loss = 0.125816 (* 1 = 0.125816 loss)\nI0819 22:14:25.780117 20842 sgd_solver.cpp:166] Iteration 52500, lr = 1.3125\nI0819 22:16:42.981765 20842 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 22:18:07.352452 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88008\nI0819 22:18:07.352819 20842 solver.cpp:404]     Test net output #1: loss = 0.435323 (* 1 = 0.435323 loss)\nI0819 22:18:08.672096 20842 solver.cpp:228] Iteration 52600, loss = 0.0747952\nI0819 22:18:08.672144 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 22:18:08.672163 20842 solver.cpp:244]     Train net output #1: loss = 0.0747947 (* 1 = 0.0747947 loss)\nI0819 22:18:08.750762 20842 sgd_solver.cpp:166] Iteration 52600, lr = 1.315\nI0819 22:20:25.553768 20842 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 22:21:49.922617 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88964\nI0819 22:21:49.923020 20842 solver.cpp:404]     Test net output #1: loss = 0.381374 (* 1 = 0.381374 loss)\nI0819 22:21:51.242007 20842 solver.cpp:228] Iteration 52700, loss = 0.117654\nI0819 22:21:51.242058 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:21:51.242075 20842 solver.cpp:244]     Train net output #1: loss = 0.117653 (* 1 = 0.117653 loss)\nI0819 22:21:51.324607 20842 sgd_solver.cpp:166] Iteration 52700, lr = 1.3175\nI0819 22:24:08.171684 20842 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 22:25:32.545173 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89104\nI0819 22:25:32.545547 20842 solver.cpp:404]     Test net output #1: loss = 0.392006 (* 1 = 0.392006 loss)\nI0819 22:25:33.865114 20842 solver.cpp:228] Iteration 52800, loss = 0.11519\nI0819 22:25:33.865164 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:25:33.865181 20842 solver.cpp:244]     Train net output #1: loss = 0.11519 (* 1 = 0.11519 loss)\nI0819 22:25:33.951228 20842 sgd_solver.cpp:166] Iteration 52800, lr = 1.32\nI0819 22:27:50.783962 20842 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 22:29:15.153228 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0819 22:29:15.153602 20842 solver.cpp:404]     Test net output #1: loss = 0.421073 (* 1 = 0.421073 loss)\nI0819 22:29:16.473646 20842 solver.cpp:228] Iteration 52900, loss = 0.136449\nI0819 22:29:16.473697 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 22:29:16.473716 20842 solver.cpp:244]     Train net output #1: loss = 0.136449 (* 1 = 0.136449 loss)\nI0819 22:29:16.560165 20842 sgd_solver.cpp:166] Iteration 52900, lr = 1.3225\nI0819 22:31:33.709748 20842 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 22:32:58.082630 20842 solver.cpp:404]     Test net output #0: accuracy = 0.881001\nI0819 22:32:58.082984 20842 solver.cpp:404]     Test net output #1: loss = 0.405762 (* 1 = 0.405762 loss)\nI0819 22:32:59.403358 20842 solver.cpp:228] Iteration 53000, loss = 0.0735251\nI0819 22:32:59.403411 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 22:32:59.403429 20842 solver.cpp:244]     Train net output #1: loss = 0.0735246 (* 1 = 0.0735246 loss)\nI0819 22:32:59.487108 20842 sgd_solver.cpp:166] Iteration 53000, lr = 1.325\nI0819 22:35:16.471007 20842 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 22:36:40.872475 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0819 22:36:40.872846 20842 solver.cpp:404]     Test net output #1: loss = 0.396923 (* 1 = 0.396923 loss)\nI0819 22:36:42.193140 20842 solver.cpp:228] Iteration 53100, loss = 0.140487\nI0819 22:36:42.193195 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 22:36:42.193220 20842 solver.cpp:244]     Train net output #1: loss = 0.140487 (* 1 = 0.140487 loss)\nI0819 22:36:42.276198 20842 sgd_solver.cpp:166] Iteration 53100, lr = 1.3275\nI0819 22:38:59.192579 20842 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 22:40:23.591213 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0819 22:40:23.591593 20842 solver.cpp:404]     Test net output #1: loss = 0.424313 (* 1 = 0.424313 loss)\nI0819 22:40:24.912190 20842 solver.cpp:228] Iteration 53200, loss = 0.171672\nI0819 22:40:24.912250 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 22:40:24.912274 20842 solver.cpp:244]     Train net output #1: loss = 0.171672 (* 1 = 0.171672 loss)\nI0819 22:40:24.996215 20842 sgd_solver.cpp:166] Iteration 53200, lr = 1.33\nI0819 22:42:42.206157 20842 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0819 22:44:06.601703 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87772\nI0819 22:44:06.602074 20842 solver.cpp:404]     Test net output #1: loss = 0.416253 (* 1 = 0.416253 loss)\nI0819 22:44:07.921067 20842 solver.cpp:228] Iteration 53300, loss = 0.0966503\nI0819 22:44:07.921116 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 22:44:07.921141 20842 solver.cpp:244]     Train net output #1: loss = 0.0966498 (* 1 = 0.0966498 loss)\nI0819 22:44:08.004745 20842 sgd_solver.cpp:166] Iteration 53300, lr = 1.3325\nI0819 22:46:24.942368 20842 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0819 22:47:49.340651 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0819 22:47:49.341022 20842 solver.cpp:404]     Test net output #1: loss = 0.428005 (* 1 = 0.428005 loss)\nI0819 22:47:50.661629 20842 solver.cpp:228] Iteration 53400, loss = 0.17022\nI0819 22:47:50.661685 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 22:47:50.661710 20842 solver.cpp:244]     Train net output #1: loss = 0.170219 (* 1 = 0.170219 loss)\nI0819 22:47:50.745254 20842 sgd_solver.cpp:166] Iteration 53400, lr = 1.335\nI0819 22:50:07.538033 20842 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0819 22:51:31.934370 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0819 22:51:31.934746 20842 solver.cpp:404]     Test net output #1: loss = 0.400497 (* 1 = 0.400497 loss)\nI0819 22:51:33.253581 20842 solver.cpp:228] Iteration 53500, loss = 0.191888\nI0819 22:51:33.253635 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 22:51:33.253660 20842 solver.cpp:244]     Train net output #1: loss = 0.191887 (* 1 = 0.191887 loss)\nI0819 22:51:33.340626 20842 sgd_solver.cpp:166] Iteration 53500, lr = 1.3375\nI0819 22:53:50.175940 20842 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0819 22:55:14.575382 20842 solver.cpp:404]     Test net output #0: accuracy = 0.881841\nI0819 22:55:14.575749 20842 solver.cpp:404]     Test net output #1: loss = 0.392106 (* 1 = 0.392106 loss)\nI0819 22:55:15.895902 20842 solver.cpp:228] Iteration 53600, loss = 0.129601\nI0819 22:55:15.895958 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 22:55:15.895983 20842 solver.cpp:244]     Train net output #1: loss = 0.1296 (* 1 = 0.1296 loss)\nI0819 22:55:15.978930 20842 sgd_solver.cpp:166] Iteration 53600, lr = 1.34\nI0819 22:57:32.793736 20842 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0819 22:58:57.193351 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88264\nI0819 22:58:57.193691 20842 solver.cpp:404]     Test net output #1: loss = 0.41348 (* 1 = 0.41348 loss)\nI0819 22:58:58.513087 20842 solver.cpp:228] Iteration 53700, loss = 0.0883319\nI0819 22:58:58.513151 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 22:58:58.513177 20842 solver.cpp:244]     Train net output #1: loss = 0.0883314 (* 1 = 0.0883314 loss)\nI0819 22:58:58.596403 20842 sgd_solver.cpp:166] Iteration 53700, lr = 1.3425\nI0819 23:01:15.480157 20842 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0819 23:02:39.865504 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88252\nI0819 23:02:39.865875 20842 solver.cpp:404]     Test net output #1: loss = 0.417635 (* 1 = 0.417635 loss)\nI0819 23:02:41.184588 20842 solver.cpp:228] Iteration 53800, loss = 0.119014\nI0819 23:02:41.184648 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 23:02:41.184675 20842 solver.cpp:244]     Train net output #1: loss = 0.119013 (* 1 = 0.119013 loss)\nI0819 23:02:41.275095 20842 sgd_solver.cpp:166] Iteration 53800, lr = 1.345\nI0819 23:04:58.268055 20842 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0819 23:06:22.662250 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0819 23:06:22.662647 20842 solver.cpp:404]     Test net output #1: loss = 0.422704 (* 1 = 0.422704 loss)\nI0819 23:06:23.983096 20842 solver.cpp:228] Iteration 53900, loss = 0.189705\nI0819 23:06:23.983153 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 23:06:23.983178 20842 solver.cpp:244]     Train net output #1: loss = 0.189704 (* 1 = 0.189704 loss)\nI0819 23:06:24.067466 20842 sgd_solver.cpp:166] Iteration 53900, lr = 1.3475\nI0819 23:08:40.891279 20842 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0819 23:10:05.278575 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87924\nI0819 23:10:05.278930 20842 solver.cpp:404]     Test net output #1: loss = 0.434126 (* 1 = 0.434126 loss)\nI0819 23:10:06.597810 20842 solver.cpp:228] Iteration 54000, loss = 0.110094\nI0819 23:10:06.597865 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:10:06.597890 20842 solver.cpp:244]     Train net output #1: loss = 0.110093 (* 1 = 0.110093 loss)\nI0819 23:10:06.682138 20842 sgd_solver.cpp:166] Iteration 54000, lr = 1.35\nI0819 23:12:23.606595 20842 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0819 23:13:47.993546 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0819 23:13:47.993922 20842 solver.cpp:404]     Test net output #1: loss = 0.40161 (* 1 = 0.40161 loss)\nI0819 23:13:49.313310 20842 solver.cpp:228] Iteration 54100, loss = 0.163632\nI0819 23:13:49.313364 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 23:13:49.313390 20842 solver.cpp:244]     Train net output #1: loss = 0.163631 (* 1 = 0.163631 loss)\nI0819 23:13:49.400527 20842 sgd_solver.cpp:166] Iteration 54100, lr = 1.3525\nI0819 23:16:06.296399 20842 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0819 23:17:30.716280 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0819 23:17:30.716639 20842 solver.cpp:404]     Test net output #1: loss = 0.425381 (* 1 = 0.425381 loss)\nI0819 23:17:32.035955 20842 solver.cpp:228] Iteration 54200, loss = 0.174633\nI0819 23:17:32.036008 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 23:17:32.036032 20842 solver.cpp:244]     Train net output #1: loss = 0.174632 (* 1 = 0.174632 loss)\nI0819 23:17:32.121623 20842 sgd_solver.cpp:166] Iteration 54200, lr = 1.355\nI0819 23:19:49.007422 20842 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0819 23:21:13.438148 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88012\nI0819 23:21:13.438516 20842 solver.cpp:404]     Test net output #1: loss = 0.411914 (* 1 = 0.411914 loss)\nI0819 23:21:14.759068 20842 solver.cpp:228] Iteration 54300, loss = 0.125382\nI0819 23:21:14.759126 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 23:21:14.759151 20842 solver.cpp:244]     Train net output #1: loss = 0.125381 (* 1 = 0.125381 loss)\nI0819 23:21:14.837281 20842 sgd_solver.cpp:166] Iteration 54300, lr = 1.3575\nI0819 23:23:31.748657 20842 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0819 23:24:56.170521 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8844\nI0819 23:24:56.170909 20842 solver.cpp:404]     Test net output #1: loss = 0.405826 (* 1 = 0.405826 loss)\nI0819 23:24:57.491166 20842 solver.cpp:228] Iteration 54400, loss = 0.0904625\nI0819 23:24:57.491225 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:24:57.491245 20842 solver.cpp:244]     Train net output #1: loss = 0.0904621 (* 1 = 0.0904621 loss)\nI0819 23:24:57.579977 20842 sgd_solver.cpp:166] Iteration 54400, lr = 1.36\nI0819 23:27:14.475163 20842 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0819 23:28:38.901916 20842 solver.cpp:404]     Test net output #0: accuracy = 0.885201\nI0819 23:28:38.902281 20842 solver.cpp:404]     Test net output #1: loss = 0.40388 (* 1 = 0.40388 loss)\nI0819 23:28:40.222319 20842 solver.cpp:228] Iteration 54500, loss = 0.16916\nI0819 23:28:40.222371 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 23:28:40.222388 20842 solver.cpp:244]     Train net output #1: loss = 0.169159 (* 1 = 0.169159 loss)\nI0819 23:28:40.305215 20842 sgd_solver.cpp:166] Iteration 54500, lr = 1.3625\nI0819 23:30:57.301307 20842 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0819 23:32:21.726013 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87496\nI0819 23:32:21.726361 20842 solver.cpp:404]     Test net output #1: loss = 0.467407 (* 1 = 0.467407 loss)\nI0819 23:32:23.047087 20842 solver.cpp:228] Iteration 54600, loss = 0.246075\nI0819 23:32:23.047148 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 23:32:23.047166 20842 solver.cpp:244]     Train net output #1: loss = 0.246075 (* 1 = 0.246075 loss)\nI0819 23:32:23.125198 20842 sgd_solver.cpp:166] Iteration 54600, lr = 1.365\nI0819 23:34:40.276306 20842 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0819 23:36:04.707612 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88768\nI0819 23:36:04.707972 20842 solver.cpp:404]     Test net output #1: loss = 0.401853 (* 1 = 0.401853 loss)\nI0819 23:36:06.029258 20842 solver.cpp:228] Iteration 54700, loss = 0.0576581\nI0819 23:36:06.029305 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 23:36:06.029323 20842 solver.cpp:244]     Train net output #1: loss = 0.0576576 (* 1 = 0.0576576 loss)\nI0819 23:36:06.112524 20842 sgd_solver.cpp:166] Iteration 54700, lr = 1.3675\nI0819 23:38:22.973428 20842 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0819 23:39:47.406186 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0819 23:39:47.406529 20842 solver.cpp:404]     Test net output #1: loss = 0.443359 (* 1 = 0.443359 loss)\nI0819 23:39:48.727035 20842 solver.cpp:228] Iteration 54800, loss = 0.115019\nI0819 23:39:48.727084 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 23:39:48.727102 20842 solver.cpp:244]     Train net output #1: loss = 0.115019 (* 1 = 0.115019 loss)\nI0819 23:39:48.806058 20842 sgd_solver.cpp:166] Iteration 54800, lr = 1.37\nI0819 23:42:05.957737 20842 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0819 23:43:30.374430 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88824\nI0819 23:43:30.374805 20842 solver.cpp:404]     Test net output #1: loss = 0.399051 (* 1 = 0.399051 loss)\nI0819 23:43:31.694196 20842 solver.cpp:228] Iteration 54900, loss = 0.121298\nI0819 23:43:31.694247 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 23:43:31.694265 20842 solver.cpp:244]     Train net output #1: loss = 0.121298 (* 1 = 0.121298 loss)\nI0819 23:43:31.781708 20842 sgd_solver.cpp:166] Iteration 54900, lr = 1.3725\nI0819 23:45:48.756531 20842 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0819 23:47:13.176661 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8816\nI0819 23:47:13.177042 20842 solver.cpp:404]     Test net output #1: loss = 0.41083 (* 1 = 0.41083 loss)\nI0819 23:47:14.496263 20842 solver.cpp:228] Iteration 55000, loss = 0.194172\nI0819 23:47:14.496316 20842 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0819 23:47:14.496333 20842 solver.cpp:244]     Train net output #1: loss = 0.194171 (* 1 = 0.194171 loss)\nI0819 23:47:14.589049 20842 sgd_solver.cpp:166] Iteration 55000, lr = 1.375\nI0819 23:49:31.472017 20842 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0819 23:50:55.889966 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88672\nI0819 23:50:55.890322 20842 solver.cpp:404]     Test net output #1: loss = 0.390706 (* 1 = 0.390706 loss)\nI0819 23:50:57.210456 20842 solver.cpp:228] Iteration 55100, loss = 0.149682\nI0819 23:50:57.210505 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 23:50:57.210520 20842 solver.cpp:244]     Train net output #1: loss = 0.149682 (* 1 = 0.149682 loss)\nI0819 23:50:57.295372 20842 sgd_solver.cpp:166] Iteration 55100, lr = 1.3775\nI0819 23:53:14.298789 20842 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0819 23:54:38.719979 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 23:54:38.720362 20842 solver.cpp:404]     Test net output #1: loss = 0.420203 (* 1 = 0.420203 loss)\nI0819 23:54:40.040839 20842 solver.cpp:228] Iteration 55200, loss = 0.0766102\nI0819 23:54:40.040896 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:54:40.040913 20842 solver.cpp:244]     Train net output #1: loss = 0.0766097 (* 1 = 0.0766097 loss)\nI0819 23:54:40.124785 20842 sgd_solver.cpp:166] Iteration 55200, lr = 1.38\nI0819 23:56:57.001642 20842 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0819 23:58:21.419258 20842 solver.cpp:404]     Test net output #0: accuracy = 0.881121\nI0819 23:58:21.419602 20842 solver.cpp:404]     Test net output #1: loss = 0.428411 (* 1 = 0.428411 loss)\nI0819 23:58:22.740234 20842 solver.cpp:228] Iteration 55300, loss = 0.1324\nI0819 23:58:22.740283 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 23:58:22.740301 20842 solver.cpp:244]     Train net output #1: loss = 0.1324 (* 1 = 0.1324 loss)\nI0819 23:58:22.817029 20842 sgd_solver.cpp:166] Iteration 55300, lr = 1.3825\nI0820 00:00:39.693729 20842 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0820 00:02:04.097913 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0820 00:02:04.098233 20842 solver.cpp:404]     Test net output #1: loss = 0.421989 (* 1 = 0.421989 loss)\nI0820 00:02:05.418915 20842 solver.cpp:228] Iteration 55400, loss = 0.157278\nI0820 00:02:05.418973 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 00:02:05.418992 20842 solver.cpp:244]     Train net output #1: loss = 0.157278 (* 1 = 0.157278 loss)\nI0820 00:02:05.509254 20842 sgd_solver.cpp:166] Iteration 55400, lr = 1.385\nI0820 00:04:22.383910 20842 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0820 00:05:46.801319 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88628\nI0820 00:05:46.801705 20842 solver.cpp:404]     Test net output #1: loss = 0.383855 (* 1 = 0.383855 loss)\nI0820 00:05:48.122098 20842 solver.cpp:228] Iteration 55500, loss = 0.168208\nI0820 00:05:48.122146 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 00:05:48.122164 20842 solver.cpp:244]     Train net output #1: loss = 0.168208 (* 1 = 0.168208 loss)\nI0820 00:05:48.204077 20842 sgd_solver.cpp:166] Iteration 55500, lr = 1.3875\nI0820 00:08:05.465550 20842 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0820 00:09:29.881909 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0820 00:09:29.882279 20842 solver.cpp:404]     Test net output #1: loss = 0.417215 (* 1 = 0.417215 loss)\nI0820 00:09:31.202693 20842 solver.cpp:228] Iteration 55600, loss = 0.0648483\nI0820 00:09:31.202751 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 00:09:31.202769 20842 solver.cpp:244]     Train net output #1: loss = 0.0648478 (* 1 = 0.0648478 loss)\nI0820 00:09:31.285208 20842 sgd_solver.cpp:166] Iteration 55600, lr = 1.39\nI0820 00:11:48.189687 20842 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0820 00:13:12.587172 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88352\nI0820 00:13:12.587512 20842 solver.cpp:404]     Test net output #1: loss = 0.399704 (* 1 = 0.399704 loss)\nI0820 00:13:13.906589 20842 solver.cpp:228] Iteration 55700, loss = 0.0913855\nI0820 00:13:13.906647 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 00:13:13.906666 20842 solver.cpp:244]     Train net output #1: loss = 0.091385 (* 1 = 0.091385 loss)\nI0820 00:13:13.994114 20842 sgd_solver.cpp:166] Iteration 55700, lr = 1.3925\nI0820 00:15:30.888559 20842 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0820 00:16:55.309850 20842 solver.cpp:404]     Test net output #0: accuracy = 0.885921\nI0820 00:16:55.310202 20842 solver.cpp:404]     Test net output #1: loss = 0.389032 (* 1 = 0.389032 loss)\nI0820 00:16:56.631325 20842 solver.cpp:228] Iteration 55800, loss = 0.166092\nI0820 00:16:56.631377 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 00:16:56.631394 20842 solver.cpp:244]     Train net output #1: loss = 0.166092 (* 1 = 0.166092 loss)\nI0820 00:16:56.719691 20842 sgd_solver.cpp:166] Iteration 55800, lr = 1.395\nI0820 00:19:13.546113 20842 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0820 00:20:37.966456 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0820 00:20:37.966828 20842 solver.cpp:404]     Test net output #1: loss = 0.458858 (* 1 = 0.458858 loss)\nI0820 00:20:39.287870 20842 solver.cpp:228] Iteration 55900, loss = 0.185407\nI0820 00:20:39.287919 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 00:20:39.287936 20842 solver.cpp:244]     Train net output #1: loss = 0.185407 (* 1 = 0.185407 loss)\nI0820 00:20:39.370337 20842 sgd_solver.cpp:166] Iteration 55900, lr = 1.3975\nI0820 00:22:56.317247 20842 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0820 00:24:20.745486 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0820 00:24:20.745865 20842 solver.cpp:404]     Test net output #1: loss = 0.424162 (* 1 = 0.424162 loss)\nI0820 00:24:22.066385 20842 solver.cpp:228] Iteration 56000, loss = 0.106021\nI0820 00:24:22.066432 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 00:24:22.066450 20842 solver.cpp:244]     Train net output #1: loss = 0.10602 (* 1 = 0.10602 loss)\nI0820 00:24:22.152729 20842 sgd_solver.cpp:166] Iteration 56000, lr = 1.4\nI0820 00:26:38.925542 20842 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0820 00:28:03.362777 20842 solver.cpp:404]     Test net output #0: accuracy = 0.878641\nI0820 00:28:03.363155 20842 solver.cpp:404]     Test net output #1: loss = 0.439315 (* 1 = 0.439315 loss)\nI0820 00:28:04.683071 20842 solver.cpp:228] Iteration 56100, loss = 0.0869751\nI0820 00:28:04.683130 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 00:28:04.683156 20842 solver.cpp:244]     Train net output #1: loss = 0.0869745 (* 1 = 0.0869745 loss)\nI0820 00:28:04.767066 20842 sgd_solver.cpp:166] Iteration 56100, lr = 1.4025\nI0820 00:30:21.966009 20842 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0820 00:31:46.402787 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89072\nI0820 00:31:46.403143 20842 solver.cpp:404]     Test net output #1: loss = 0.385024 (* 1 = 0.385024 loss)\nI0820 00:31:47.722620 20842 solver.cpp:228] Iteration 56200, loss = 0.114045\nI0820 00:31:47.722671 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 00:31:47.722694 20842 solver.cpp:244]     Train net output #1: loss = 0.114044 (* 1 = 0.114044 loss)\nI0820 00:31:47.809615 20842 sgd_solver.cpp:166] Iteration 56200, lr = 1.405\nI0820 00:34:04.641319 20842 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0820 00:35:29.083933 20842 solver.cpp:404]     Test net output #0: accuracy = 0.883\nI0820 00:35:29.084311 20842 solver.cpp:404]     Test net output #1: loss = 0.416214 (* 1 = 0.416214 loss)\nI0820 00:35:30.403906 20842 solver.cpp:228] Iteration 56300, loss = 0.183128\nI0820 00:35:30.403954 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 00:35:30.403978 20842 solver.cpp:244]     Train net output #1: loss = 0.183128 (* 1 = 0.183128 loss)\nI0820 00:35:30.486564 20842 sgd_solver.cpp:166] Iteration 56300, lr = 1.4075\nI0820 00:37:47.303666 20842 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0820 00:39:11.740918 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88488\nI0820 00:39:11.741300 20842 solver.cpp:404]     Test net output #1: loss = 0.406693 (* 1 = 0.406693 loss)\nI0820 00:39:13.061136 20842 solver.cpp:228] Iteration 56400, loss = 0.201694\nI0820 00:39:13.061189 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 00:39:13.061214 20842 solver.cpp:244]     Train net output #1: loss = 0.201693 (* 1 = 0.201693 loss)\nI0820 00:39:13.148154 20842 sgd_solver.cpp:166] Iteration 56400, lr = 1.41\nI0820 00:41:29.954489 20842 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0820 00:42:53.988322 20842 solver.cpp:404]     Test net output #0: accuracy = 0.878\nI0820 00:42:53.988651 20842 solver.cpp:404]     Test net output #1: loss = 0.430885 (* 1 = 0.430885 loss)\nI0820 00:42:55.309123 20842 solver.cpp:228] Iteration 56500, loss = 0.275032\nI0820 00:42:55.309170 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 00:42:55.309195 20842 solver.cpp:244]     Train net output #1: loss = 0.275032 (* 1 = 0.275032 loss)\nI0820 00:42:55.399152 20842 sgd_solver.cpp:166] Iteration 56500, lr = 1.4125\nI0820 00:45:12.120656 20842 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0820 00:46:35.800739 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88536\nI0820 00:46:35.801057 20842 solver.cpp:404]     Test net output #1: loss = 0.400595 (* 1 = 0.400595 loss)\nI0820 00:46:37.128300 20842 solver.cpp:228] Iteration 56600, loss = 0.175928\nI0820 00:46:37.128341 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 00:46:37.128357 20842 solver.cpp:244]     Train net output #1: loss = 0.175928 (* 1 = 0.175928 loss)\nI0820 00:46:37.202499 20842 sgd_solver.cpp:166] Iteration 56600, lr = 1.415\nI0820 00:48:53.954486 20842 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0820 00:50:17.311054 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88892\nI0820 00:50:17.311357 20842 solver.cpp:404]     Test net output #1: loss = 0.394521 (* 1 = 0.394521 loss)\nI0820 00:50:18.628281 20842 solver.cpp:228] Iteration 56700, loss = 0.182177\nI0820 00:50:18.628325 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 00:50:18.628342 20842 solver.cpp:244]     Train net output #1: loss = 0.182177 (* 1 = 0.182177 loss)\nI0820 00:50:18.712378 20842 sgd_solver.cpp:166] Iteration 56700, lr = 1.4175\nI0820 00:52:35.510953 20842 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0820 00:53:58.865037 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0820 00:53:58.865348 20842 solver.cpp:404]     Test net output #1: loss = 0.426961 (* 1 = 0.426961 loss)\nI0820 00:54:00.177618 20842 solver.cpp:228] Iteration 56800, loss = 0.151516\nI0820 00:54:00.177665 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 00:54:00.177682 20842 solver.cpp:244]     Train net output #1: loss = 0.151516 (* 1 = 0.151516 loss)\nI0820 00:54:00.264457 20842 sgd_solver.cpp:166] Iteration 56800, lr = 1.42\nI0820 00:56:16.923818 20842 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0820 00:57:40.267961 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87572\nI0820 00:57:40.268237 20842 solver.cpp:404]     Test net output #1: loss = 0.430847 (* 1 = 0.430847 loss)\nI0820 00:57:41.584507 20842 solver.cpp:228] Iteration 56900, loss = 0.10252\nI0820 00:57:41.584552 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 00:57:41.584568 20842 solver.cpp:244]     Train net output #1: loss = 0.10252 (* 1 = 0.10252 loss)\nI0820 00:57:41.667668 20842 sgd_solver.cpp:166] Iteration 56900, lr = 1.4225\nI0820 00:59:58.533589 20842 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0820 01:01:21.879802 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88048\nI0820 01:01:21.880115 20842 solver.cpp:404]     Test net output #1: loss = 0.423672 (* 1 = 0.423672 loss)\nI0820 01:01:23.195683 20842 solver.cpp:228] Iteration 57000, loss = 0.152898\nI0820 01:01:23.195732 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:01:23.195756 20842 solver.cpp:244]     Train net output #1: loss = 0.152897 (* 1 = 0.152897 loss)\nI0820 01:01:23.277557 20842 sgd_solver.cpp:166] Iteration 57000, lr = 1.425\nI0820 01:03:40.063670 20842 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0820 01:05:03.408898 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88636\nI0820 01:05:03.409150 20842 solver.cpp:404]     Test net output #1: loss = 0.385721 (* 1 = 0.385721 loss)\nI0820 01:05:04.725677 20842 solver.cpp:228] Iteration 57100, loss = 0.137957\nI0820 01:05:04.725713 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:05:04.725728 20842 solver.cpp:244]     Train net output #1: loss = 0.137956 (* 1 = 0.137956 loss)\nI0820 01:05:04.807288 20842 sgd_solver.cpp:166] Iteration 57100, lr = 1.4275\nI0820 01:07:21.570544 20842 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0820 01:08:44.913661 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87584\nI0820 01:08:44.913969 20842 solver.cpp:404]     Test net output #1: loss = 0.422484 (* 1 = 0.422484 loss)\nI0820 01:08:46.229797 20842 solver.cpp:228] Iteration 57200, loss = 0.170123\nI0820 01:08:46.229841 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 01:08:46.229857 20842 solver.cpp:244]     Train net output #1: loss = 0.170122 (* 1 = 0.170122 loss)\nI0820 01:08:46.314550 20842 sgd_solver.cpp:166] Iteration 57200, lr = 1.43\nI0820 01:11:03.228452 20842 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0820 01:12:26.570839 20842 solver.cpp:404]     Test net output #0: accuracy = 0.873\nI0820 01:12:26.571154 20842 solver.cpp:404]     Test net output #1: loss = 0.450402 (* 1 = 0.450402 loss)\nI0820 01:12:27.887938 20842 solver.cpp:228] Iteration 57300, loss = 0.162117\nI0820 01:12:27.887972 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:12:27.887987 20842 solver.cpp:244]     Train net output #1: loss = 0.162117 (* 1 = 0.162117 loss)\nI0820 01:12:27.965432 20842 sgd_solver.cpp:166] Iteration 57300, lr = 1.4325\nI0820 01:14:44.833869 20842 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0820 01:16:08.177280 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87924\nI0820 01:16:08.177554 20842 solver.cpp:404]     Test net output #1: loss = 0.407141 (* 1 = 0.407141 loss)\nI0820 01:16:09.493613 20842 solver.cpp:228] Iteration 57400, loss = 0.117608\nI0820 01:16:09.493657 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:16:09.493674 20842 solver.cpp:244]     Train net output #1: loss = 0.117607 (* 1 = 0.117607 loss)\nI0820 01:16:09.570422 20842 sgd_solver.cpp:166] Iteration 57400, lr = 1.435\nI0820 01:18:26.325876 20842 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0820 01:19:49.672938 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88772\nI0820 01:19:49.673197 20842 solver.cpp:404]     Test net output #1: loss = 0.385435 (* 1 = 0.385435 loss)\nI0820 01:19:50.989456 20842 solver.cpp:228] Iteration 57500, loss = 0.122403\nI0820 01:19:50.989501 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 01:19:50.989518 20842 solver.cpp:244]     Train net output #1: loss = 0.122403 (* 1 = 0.122403 loss)\nI0820 01:19:51.076258 20842 sgd_solver.cpp:166] Iteration 57500, lr = 1.4375\nI0820 01:22:07.850443 20842 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0820 01:23:31.282974 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0820 01:23:31.283244 20842 solver.cpp:404]     Test net output #1: loss = 0.388437 (* 1 = 0.388437 loss)\nI0820 01:23:32.599544 20842 solver.cpp:228] Iteration 57600, loss = 0.0786712\nI0820 01:23:32.599584 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 01:23:32.599601 20842 solver.cpp:244]     Train net output #1: loss = 0.0786707 (* 1 = 0.0786707 loss)\nI0820 01:23:32.685595 20842 sgd_solver.cpp:166] Iteration 57600, lr = 1.44\nI0820 01:25:49.669961 20842 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0820 01:27:13.114461 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88188\nI0820 01:27:13.114779 20842 solver.cpp:404]     Test net output #1: loss = 0.421605 (* 1 = 0.421605 loss)\nI0820 01:27:14.431043 20842 solver.cpp:228] Iteration 57700, loss = 0.163151\nI0820 01:27:14.431084 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 01:27:14.431100 20842 solver.cpp:244]     Train net output #1: loss = 0.163151 (* 1 = 0.163151 loss)\nI0820 01:27:14.509014 20842 sgd_solver.cpp:166] Iteration 57700, lr = 1.4425\nI0820 01:29:31.176772 20842 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0820 01:30:54.610879 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88108\nI0820 01:30:54.611199 20842 solver.cpp:404]     Test net output #1: loss = 0.405756 (* 1 = 0.405756 loss)\nI0820 01:30:55.927901 20842 solver.cpp:228] Iteration 57800, loss = 0.148623\nI0820 01:30:55.927940 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:30:55.927958 20842 solver.cpp:244]     Train net output #1: loss = 0.148622 (* 1 = 0.148622 loss)\nI0820 01:30:56.020812 20842 sgd_solver.cpp:166] Iteration 57800, lr = 1.445\nI0820 01:33:12.778944 20842 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0820 01:34:36.207398 20842 solver.cpp:404]     Test net output #0: accuracy = 0.877\nI0820 01:34:36.207712 20842 solver.cpp:404]     Test net output #1: loss = 0.429602 (* 1 = 0.429602 loss)\nI0820 01:34:37.523686 20842 solver.cpp:228] Iteration 57900, loss = 0.137381\nI0820 01:34:37.523727 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:34:37.523743 20842 solver.cpp:244]     Train net output #1: loss = 0.13738 (* 1 = 0.13738 loss)\nI0820 01:34:37.609840 20842 sgd_solver.cpp:166] Iteration 57900, lr = 1.4475\nI0820 01:36:54.400593 20842 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0820 01:38:17.841599 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88012\nI0820 01:38:17.841874 20842 solver.cpp:404]     Test net output #1: loss = 0.422492 (* 1 = 0.422492 loss)\nI0820 01:38:19.158427 20842 solver.cpp:228] Iteration 58000, loss = 0.0794306\nI0820 01:38:19.158460 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:38:19.158476 20842 solver.cpp:244]     Train net output #1: loss = 0.07943 (* 1 = 0.07943 loss)\nI0820 01:38:19.244079 20842 sgd_solver.cpp:166] Iteration 58000, lr = 1.45\nI0820 01:40:36.361569 20842 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0820 01:41:59.795917 20842 solver.cpp:404]     Test net output #0: accuracy = 0.884\nI0820 01:41:59.796205 20842 solver.cpp:404]     Test net output #1: loss = 0.406508 (* 1 = 0.406508 loss)\nI0820 01:42:01.112278 20842 solver.cpp:228] Iteration 58100, loss = 0.0673741\nI0820 01:42:01.112313 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 01:42:01.112326 20842 solver.cpp:244]     Train net output #1: loss = 0.0673735 (* 1 = 0.0673735 loss)\nI0820 01:42:01.196347 20842 sgd_solver.cpp:166] Iteration 58100, lr = 1.4525\nI0820 01:44:18.269949 20842 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0820 01:45:41.704085 20842 solver.cpp:404]     Test net output #0: accuracy = 0.882\nI0820 01:45:41.704367 20842 solver.cpp:404]     Test net output #1: loss = 0.400379 (* 1 = 0.400379 loss)\nI0820 01:45:43.020195 20842 solver.cpp:228] Iteration 58200, loss = 0.111622\nI0820 01:45:43.020236 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:45:43.020252 20842 solver.cpp:244]     Train net output #1: loss = 0.111621 (* 1 = 0.111621 loss)\nI0820 01:45:43.107493 20842 sgd_solver.cpp:166] Iteration 58200, lr = 1.455\nI0820 01:48:00.143612 20842 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0820 01:49:23.581601 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88512\nI0820 01:49:23.581904 20842 solver.cpp:404]     Test net output #1: loss = 0.395352 (* 1 = 0.395352 loss)\nI0820 01:49:24.898465 20842 solver.cpp:228] Iteration 58300, loss = 0.144939\nI0820 01:49:24.898507 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:49:24.898522 20842 solver.cpp:244]     Train net output #1: loss = 0.144939 (* 1 = 0.144939 loss)\nI0820 01:49:24.989020 20842 sgd_solver.cpp:166] Iteration 58300, lr = 1.4575\nI0820 01:51:41.954499 20842 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0820 01:53:05.387159 20842 solver.cpp:404]     Test net output #0: accuracy = 0.879001\nI0820 01:53:05.387481 20842 solver.cpp:404]     Test net output #1: loss = 0.417366 (* 1 = 0.417366 loss)\nI0820 01:53:06.703615 20842 solver.cpp:228] Iteration 58400, loss = 0.123253\nI0820 01:53:06.703657 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:53:06.703673 20842 solver.cpp:244]     Train net output #1: loss = 0.123252 (* 1 = 0.123252 loss)\nI0820 01:53:06.790988 20842 sgd_solver.cpp:166] Iteration 58400, lr = 1.46\nI0820 01:55:23.773977 20842 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0820 01:56:47.201089 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0820 01:56:47.201366 20842 solver.cpp:404]     Test net output #1: loss = 0.417659 (* 1 = 0.417659 loss)\nI0820 01:56:48.517454 20842 solver.cpp:228] Iteration 58500, loss = 0.0924647\nI0820 01:56:48.517498 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:56:48.517513 20842 solver.cpp:244]     Train net output #1: loss = 0.0924641 (* 1 = 0.0924641 loss)\nI0820 01:56:48.602560 20842 sgd_solver.cpp:166] Iteration 58500, lr = 1.4625\nI0820 01:59:05.625231 20842 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0820 02:00:29.058218 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8896\nI0820 02:00:29.058550 20842 solver.cpp:404]     Test net output #1: loss = 0.397839 (* 1 = 0.397839 loss)\nI0820 02:00:30.374642 20842 solver.cpp:228] Iteration 58600, loss = 0.227418\nI0820 02:00:30.374677 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 02:00:30.374692 20842 solver.cpp:244]     Train net output #1: loss = 0.227417 (* 1 = 0.227417 loss)\nI0820 02:00:30.464679 20842 sgd_solver.cpp:166] Iteration 58600, lr = 1.465\nI0820 02:02:47.483521 20842 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0820 02:04:10.917417 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0820 02:04:10.917721 20842 solver.cpp:404]     Test net output #1: loss = 0.391141 (* 1 = 0.391141 loss)\nI0820 02:04:12.234735 20842 solver.cpp:228] Iteration 58700, loss = 0.144456\nI0820 02:04:12.234771 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 02:04:12.234786 20842 solver.cpp:244]     Train net output #1: loss = 0.144455 (* 1 = 0.144455 loss)\nI0820 02:04:12.319042 20842 sgd_solver.cpp:166] Iteration 58700, lr = 1.4675\nI0820 02:06:29.358351 20842 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0820 02:07:52.790112 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0820 02:07:52.790403 20842 solver.cpp:404]     Test net output #1: loss = 0.410063 (* 1 = 0.410063 loss)\nI0820 02:07:54.106539 20842 solver.cpp:228] Iteration 58800, loss = 0.116896\nI0820 02:07:54.106582 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:07:54.106598 20842 solver.cpp:244]     Train net output #1: loss = 0.116895 (* 1 = 0.116895 loss)\nI0820 02:07:54.198942 20842 sgd_solver.cpp:166] Iteration 58800, lr = 1.47\nI0820 02:10:11.223510 20842 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0820 02:11:34.657836 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88916\nI0820 02:11:34.658120 20842 solver.cpp:404]     Test net output #1: loss = 0.375906 (* 1 = 0.375906 loss)\nI0820 02:11:35.974311 20842 solver.cpp:228] Iteration 58900, loss = 0.191349\nI0820 02:11:35.974355 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:11:35.974371 20842 solver.cpp:244]     Train net output #1: loss = 0.191348 (* 1 = 0.191348 loss)\nI0820 02:11:36.064743 20842 sgd_solver.cpp:166] Iteration 58900, lr = 1.4725\nI0820 02:13:53.075999 20842 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0820 02:15:16.505463 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87592\nI0820 02:15:16.505767 20842 solver.cpp:404]     Test net output #1: loss = 0.4291 (* 1 = 0.4291 loss)\nI0820 02:15:17.821934 20842 solver.cpp:228] Iteration 59000, loss = 0.278166\nI0820 02:15:17.821967 20842 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 02:15:17.821983 20842 solver.cpp:244]     Train net output #1: loss = 0.278165 (* 1 = 0.278165 loss)\nI0820 02:15:17.904098 20842 sgd_solver.cpp:166] Iteration 59000, lr = 1.475\nI0820 02:17:34.909024 20842 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0820 02:18:58.337590 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8936\nI0820 02:18:58.337862 20842 solver.cpp:404]     Test net output #1: loss = 0.367324 (* 1 = 0.367324 loss)\nI0820 02:18:59.654649 20842 solver.cpp:228] Iteration 59100, loss = 0.0998898\nI0820 02:18:59.654692 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 02:18:59.654711 20842 solver.cpp:244]     Train net output #1: loss = 0.0998892 (* 1 = 0.0998892 loss)\nI0820 02:18:59.742815 20842 sgd_solver.cpp:166] Iteration 59100, lr = 1.4775\nI0820 02:21:16.732288 20842 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0820 02:22:40.152755 20842 solver.cpp:404]     Test net output #0: accuracy = 0.889\nI0820 02:22:40.153075 20842 solver.cpp:404]     Test net output #1: loss = 0.370675 (* 1 = 0.370675 loss)\nI0820 02:22:41.468713 20842 solver.cpp:228] Iteration 59200, loss = 0.136307\nI0820 02:22:41.468758 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:22:41.468775 20842 solver.cpp:244]     Train net output #1: loss = 0.136306 (* 1 = 0.136306 loss)\nI0820 02:22:41.555184 20842 sgd_solver.cpp:166] Iteration 59200, lr = 1.48\nI0820 02:24:58.547237 20842 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0820 02:26:21.977609 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8882\nI0820 02:26:21.977918 20842 solver.cpp:404]     Test net output #1: loss = 0.381735 (* 1 = 0.381735 loss)\nI0820 02:26:23.294332 20842 solver.cpp:228] Iteration 59300, loss = 0.143794\nI0820 02:26:23.294366 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:26:23.294381 20842 solver.cpp:244]     Train net output #1: loss = 0.143794 (* 1 = 0.143794 loss)\nI0820 02:26:23.385658 20842 sgd_solver.cpp:166] Iteration 59300, lr = 1.4825\nI0820 02:28:40.247489 20842 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0820 02:30:03.677747 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0820 02:30:03.678062 20842 solver.cpp:404]     Test net output #1: loss = 0.424785 (* 1 = 0.424785 loss)\nI0820 02:30:04.995815 20842 solver.cpp:228] Iteration 59400, loss = 0.122342\nI0820 02:30:04.995860 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 02:30:04.995878 20842 solver.cpp:244]     Train net output #1: loss = 0.122341 (* 1 = 0.122341 loss)\nI0820 02:30:05.077793 20842 sgd_solver.cpp:166] Iteration 59400, lr = 1.485\nI0820 02:32:22.251119 20842 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0820 02:33:45.683373 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88416\nI0820 02:33:45.683624 20842 solver.cpp:404]     Test net output #1: loss = 0.40316 (* 1 = 0.40316 loss)\nI0820 02:33:47.000793 20842 solver.cpp:228] Iteration 59500, loss = 0.0939997\nI0820 02:33:47.000829 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:33:47.000845 20842 solver.cpp:244]     Train net output #1: loss = 0.0939991 (* 1 = 0.0939991 loss)\nI0820 02:33:47.092785 20842 sgd_solver.cpp:166] Iteration 59500, lr = 1.4875\nI0820 02:36:03.938839 20842 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0820 02:37:27.372824 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88748\nI0820 02:37:27.373136 20842 solver.cpp:404]     Test net output #1: loss = 0.399568 (* 1 = 0.399568 loss)\nI0820 02:37:28.691076 20842 solver.cpp:228] Iteration 59600, loss = 0.124095\nI0820 02:37:28.691121 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 02:37:28.691139 20842 solver.cpp:244]     Train net output #1: loss = 0.124095 (* 1 = 0.124095 loss)\nI0820 02:37:28.773942 20842 sgd_solver.cpp:166] Iteration 59600, lr = 1.49\nI0820 02:39:45.673812 20842 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0820 02:41:09.109633 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88576\nI0820 02:41:09.109941 20842 solver.cpp:404]     Test net output #1: loss = 0.38314 (* 1 = 0.38314 loss)\nI0820 02:41:10.426448 20842 solver.cpp:228] Iteration 59700, loss = 0.163437\nI0820 02:41:10.426497 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 02:41:10.426514 20842 solver.cpp:244]     Train net output #1: loss = 0.163436 (* 1 = 0.163436 loss)\nI0820 02:41:10.510700 20842 sgd_solver.cpp:166] Iteration 59700, lr = 1.4925\nI0820 02:43:27.707587 20842 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0820 02:44:51.138909 20842 solver.cpp:404]     Test net output #0: accuracy = 0.882\nI0820 02:44:51.139226 20842 solver.cpp:404]     Test net output #1: loss = 0.41418 (* 1 = 0.41418 loss)\nI0820 02:44:52.455541 20842 solver.cpp:228] Iteration 59800, loss = 0.0955032\nI0820 02:44:52.455577 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 02:44:52.455592 20842 solver.cpp:244]     Train net output #1: loss = 0.0955026 (* 1 = 0.0955026 loss)\nI0820 02:44:52.550326 20842 sgd_solver.cpp:166] Iteration 59800, lr = 1.495\nI0820 02:47:09.637609 20842 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0820 02:48:33.073966 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0820 02:48:33.074280 20842 solver.cpp:404]     Test net output #1: loss = 0.386765 (* 1 = 0.386765 loss)\nI0820 02:48:34.390475 20842 solver.cpp:228] Iteration 59900, loss = 0.09668\nI0820 02:48:34.390522 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 02:48:34.390540 20842 solver.cpp:244]     Train net output #1: loss = 0.0966794 (* 1 = 0.0966794 loss)\nI0820 02:48:34.485087 20842 sgd_solver.cpp:166] Iteration 59900, lr = 1.4975\nI0820 02:50:51.696075 20842 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0820 02:52:15.137331 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8764\nI0820 02:52:15.137629 20842 solver.cpp:404]     Test net output #1: loss = 0.43996 (* 1 = 0.43996 loss)\nI0820 02:52:16.454814 20842 solver.cpp:228] Iteration 60000, loss = 0.25469\nI0820 02:52:16.454849 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 02:52:16.454864 20842 solver.cpp:244]     Train net output #1: loss = 0.25469 (* 1 = 0.25469 loss)\nI0820 02:52:16.537536 20842 sgd_solver.cpp:166] Iteration 60000, lr = 1.5\nI0820 02:54:33.734405 20842 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0820 02:55:57.168614 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88292\nI0820 02:55:57.168900 20842 solver.cpp:404]     Test net output #1: loss = 0.389342 (* 1 = 0.389342 loss)\nI0820 02:55:58.485973 20842 solver.cpp:228] Iteration 60100, loss = 0.2036\nI0820 02:55:58.486016 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:55:58.486032 20842 solver.cpp:244]     Train net output #1: loss = 0.203599 (* 1 = 0.203599 loss)\nI0820 02:55:58.574095 20842 sgd_solver.cpp:166] Iteration 60100, lr = 1.5025\nI0820 02:58:15.561854 20842 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0820 02:59:38.998103 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8832\nI0820 02:59:38.998420 20842 solver.cpp:404]     Test net output #1: loss = 0.40403 (* 1 = 0.40403 loss)\nI0820 02:59:40.314409 20842 solver.cpp:228] Iteration 60200, loss = 0.0679368\nI0820 02:59:40.314453 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 02:59:40.314471 20842 solver.cpp:244]     Train net output #1: loss = 0.0679362 (* 1 = 0.0679362 loss)\nI0820 02:59:40.398236 20842 sgd_solver.cpp:166] Iteration 60200, lr = 1.505\nI0820 03:01:57.454200 20842 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0820 03:03:20.893429 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87868\nI0820 03:03:20.893762 20842 solver.cpp:404]     Test net output #1: loss = 0.420658 (* 1 = 0.420658 loss)\nI0820 03:03:22.209169 20842 solver.cpp:228] Iteration 60300, loss = 0.121164\nI0820 03:03:22.209214 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 03:03:22.209231 20842 solver.cpp:244]     Train net output #1: loss = 0.121163 (* 1 = 0.121163 loss)\nI0820 03:03:22.294327 20842 sgd_solver.cpp:166] Iteration 60300, lr = 1.5075\nI0820 03:05:39.487365 20842 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0820 03:07:02.834501 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87476\nI0820 03:07:02.834811 20842 solver.cpp:404]     Test net output #1: loss = 0.436581 (* 1 = 0.436581 loss)\nI0820 03:07:04.151062 20842 solver.cpp:228] Iteration 60400, loss = 0.129323\nI0820 03:07:04.151106 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:07:04.151121 20842 solver.cpp:244]     Train net output #1: loss = 0.129322 (* 1 = 0.129322 loss)\nI0820 03:07:04.236654 20842 sgd_solver.cpp:166] Iteration 60400, lr = 1.51\nI0820 03:09:21.340850 20842 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0820 03:10:44.689049 20842 solver.cpp:404]     Test net output #0: accuracy = 0.881\nI0820 03:10:44.689388 20842 solver.cpp:404]     Test net output #1: loss = 0.402785 (* 1 = 0.402785 loss)\nI0820 03:10:46.006151 20842 solver.cpp:228] Iteration 60500, loss = 0.177175\nI0820 03:10:46.006187 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:10:46.006203 20842 solver.cpp:244]     Train net output #1: loss = 0.177174 (* 1 = 0.177174 loss)\nI0820 03:10:46.095777 20842 sgd_solver.cpp:166] Iteration 60500, lr = 1.5125\nI0820 03:13:03.040969 20842 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0820 03:14:26.398404 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88908\nI0820 03:14:26.398728 20842 solver.cpp:404]     Test net output #1: loss = 0.395692 (* 1 = 0.395692 loss)\nI0820 03:14:27.716305 20842 solver.cpp:228] Iteration 60600, loss = 0.134688\nI0820 03:14:27.716346 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:14:27.716363 20842 solver.cpp:244]     Train net output #1: loss = 0.134687 (* 1 = 0.134687 loss)\nI0820 03:14:27.798094 20842 sgd_solver.cpp:166] Iteration 60600, lr = 1.515\nI0820 03:16:44.726902 20842 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0820 03:18:08.079927 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0820 03:18:08.080204 20842 solver.cpp:404]     Test net output #1: loss = 0.417419 (* 1 = 0.417419 loss)\nI0820 03:18:09.397346 20842 solver.cpp:228] Iteration 60700, loss = 0.184713\nI0820 03:18:09.397388 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 03:18:09.397405 20842 solver.cpp:244]     Train net output #1: loss = 0.184712 (* 1 = 0.184712 loss)\nI0820 03:18:09.484611 20842 sgd_solver.cpp:166] Iteration 60700, lr = 1.5175\nI0820 03:20:26.375726 20842 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0820 03:21:49.731322 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0820 03:21:49.731580 20842 solver.cpp:404]     Test net output #1: loss = 0.388054 (* 1 = 0.388054 loss)\nI0820 03:21:51.048681 20842 solver.cpp:228] Iteration 60800, loss = 0.168351\nI0820 03:21:51.048717 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 03:21:51.048732 20842 solver.cpp:244]     Train net output #1: loss = 0.16835 (* 1 = 0.16835 loss)\nI0820 03:21:51.130456 20842 sgd_solver.cpp:166] Iteration 60800, lr = 1.52\nI0820 03:24:08.086333 20842 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0820 03:25:31.445493 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87684\nI0820 03:25:31.445807 20842 solver.cpp:404]     Test net output #1: loss = 0.419165 (* 1 = 0.419165 loss)\nI0820 03:25:32.763387 20842 solver.cpp:228] Iteration 60900, loss = 0.144235\nI0820 03:25:32.763423 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 03:25:32.763440 20842 solver.cpp:244]     Train net output #1: loss = 0.144235 (* 1 = 0.144235 loss)\nI0820 03:25:32.852432 20842 sgd_solver.cpp:166] Iteration 60900, lr = 1.5225\nI0820 03:27:49.902981 20842 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0820 03:29:13.258227 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88184\nI0820 03:29:13.258527 20842 solver.cpp:404]     Test net output #1: loss = 0.397581 (* 1 = 0.397581 loss)\nI0820 03:29:14.575670 20842 solver.cpp:228] Iteration 61000, loss = 0.13071\nI0820 03:29:14.575706 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:29:14.575721 20842 solver.cpp:244]     Train net output #1: loss = 0.13071 (* 1 = 0.13071 loss)\nI0820 03:29:14.659134 20842 sgd_solver.cpp:166] Iteration 61000, lr = 1.525\nI0820 03:31:31.629935 20842 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0820 03:32:54.993404 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0820 03:32:54.993719 20842 solver.cpp:404]     Test net output #1: loss = 0.391013 (* 1 = 0.391013 loss)\nI0820 03:32:56.310215 20842 solver.cpp:228] Iteration 61100, loss = 0.16382\nI0820 03:32:56.310250 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:32:56.310266 20842 solver.cpp:244]     Train net output #1: loss = 0.16382 (* 1 = 0.16382 loss)\nI0820 03:32:56.396080 20842 sgd_solver.cpp:166] Iteration 61100, lr = 1.5275\nI0820 03:35:13.515857 20842 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0820 03:36:36.871487 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88276\nI0820 03:36:36.871786 20842 solver.cpp:404]     Test net output #1: loss = 0.416525 (* 1 = 0.416525 loss)\nI0820 03:36:38.189100 20842 solver.cpp:228] Iteration 61200, loss = 0.184995\nI0820 03:36:38.189143 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 03:36:38.189159 20842 solver.cpp:244]     Train net output #1: loss = 0.184995 (* 1 = 0.184995 loss)\nI0820 03:36:38.276971 20842 sgd_solver.cpp:166] Iteration 61200, lr = 1.53\nI0820 03:38:55.192458 20842 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0820 03:40:18.622630 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88332\nI0820 03:40:18.622946 20842 solver.cpp:404]     Test net output #1: loss = 0.404848 (* 1 = 0.404848 loss)\nI0820 03:40:19.939874 20842 solver.cpp:228] Iteration 61300, loss = 0.107207\nI0820 03:40:19.939919 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:40:19.939936 20842 solver.cpp:244]     Train net output #1: loss = 0.107207 (* 1 = 0.107207 loss)\nI0820 03:40:20.032886 20842 sgd_solver.cpp:166] Iteration 61300, lr = 1.5325\nI0820 03:42:37.158311 20842 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0820 03:44:00.586803 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88308\nI0820 03:44:00.587102 20842 solver.cpp:404]     Test net output #1: loss = 0.422578 (* 1 = 0.422578 loss)\nI0820 03:44:01.903862 20842 solver.cpp:228] Iteration 61400, loss = 0.132076\nI0820 03:44:01.903905 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:44:01.903921 20842 solver.cpp:244]     Train net output #1: loss = 0.132076 (* 1 = 0.132076 loss)\nI0820 03:44:01.994503 20842 sgd_solver.cpp:166] Iteration 61400, lr = 1.535\nI0820 03:46:18.784200 20842 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0820 03:47:42.219456 20842 solver.cpp:404]     Test net output #0: accuracy = 0.884321\nI0820 03:47:42.219727 20842 solver.cpp:404]     Test net output #1: loss = 0.398567 (* 1 = 0.398567 loss)\nI0820 03:47:43.536147 20842 solver.cpp:228] Iteration 61500, loss = 0.185811\nI0820 03:47:43.536193 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:47:43.536208 20842 solver.cpp:244]     Train net output #1: loss = 0.18581 (* 1 = 0.18581 loss)\nI0820 03:47:43.620386 20842 sgd_solver.cpp:166] Iteration 61500, lr = 1.5375\nI0820 03:50:00.353935 20842 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0820 03:51:23.795573 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88208\nI0820 03:51:23.795897 20842 solver.cpp:404]     Test net output #1: loss = 0.401728 (* 1 = 0.401728 loss)\nI0820 03:51:25.112244 20842 solver.cpp:228] Iteration 61600, loss = 0.144817\nI0820 03:51:25.112289 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 03:51:25.112305 20842 solver.cpp:244]     Train net output #1: loss = 0.144817 (* 1 = 0.144817 loss)\nI0820 03:51:25.197801 20842 sgd_solver.cpp:166] Iteration 61600, lr = 1.54\nI0820 03:53:41.884021 20842 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0820 03:55:05.322661 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88688\nI0820 03:55:05.322983 20842 solver.cpp:404]     Test net output #1: loss = 0.38478 (* 1 = 0.38478 loss)\nI0820 03:55:06.639060 20842 solver.cpp:228] Iteration 61700, loss = 0.145815\nI0820 03:55:06.639102 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:55:06.639119 20842 solver.cpp:244]     Train net output #1: loss = 0.145814 (* 1 = 0.145814 loss)\nI0820 03:55:06.722718 20842 sgd_solver.cpp:166] Iteration 61700, lr = 1.5425\nI0820 03:57:23.365483 20842 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0820 03:58:46.797217 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87492\nI0820 03:58:46.797552 20842 solver.cpp:404]     Test net output #1: loss = 0.421905 (* 1 = 0.421905 loss)\nI0820 03:58:48.113292 20842 solver.cpp:228] Iteration 61800, loss = 0.147399\nI0820 03:58:48.113335 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 03:58:48.113353 20842 solver.cpp:244]     Train net output #1: loss = 0.147398 (* 1 = 0.147398 loss)\nI0820 03:58:48.194319 20842 sgd_solver.cpp:166] Iteration 61800, lr = 1.545\nI0820 04:01:05.142871 20842 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0820 04:02:29.657330 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88436\nI0820 04:02:29.657673 20842 solver.cpp:404]     Test net output #1: loss = 0.394299 (* 1 = 0.394299 loss)\nI0820 04:02:30.978335 20842 solver.cpp:228] Iteration 61900, loss = 0.116325\nI0820 04:02:30.978392 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 04:02:30.978410 20842 solver.cpp:244]     Train net output #1: loss = 0.116325 (* 1 = 0.116325 loss)\nI0820 04:02:31.061096 20842 sgd_solver.cpp:166] Iteration 61900, lr = 1.5475\nI0820 04:04:47.857360 20842 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0820 04:06:12.292299 20842 solver.cpp:404]     Test net output #0: accuracy = 0.885561\nI0820 04:06:12.292660 20842 solver.cpp:404]     Test net output #1: loss = 0.391196 (* 1 = 0.391196 loss)\nI0820 04:06:13.612839 20842 solver.cpp:228] Iteration 62000, loss = 0.196229\nI0820 04:06:13.612891 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:06:13.612908 20842 solver.cpp:244]     Train net output #1: loss = 0.196228 (* 1 = 0.196228 loss)\nI0820 04:06:13.698319 20842 sgd_solver.cpp:166] Iteration 62000, lr = 1.55\nI0820 04:08:30.479763 20842 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0820 04:09:54.902206 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88448\nI0820 04:09:54.902571 20842 solver.cpp:404]     Test net output #1: loss = 0.411528 (* 1 = 0.411528 loss)\nI0820 04:09:56.221611 20842 solver.cpp:228] Iteration 62100, loss = 0.287872\nI0820 04:09:56.221662 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 04:09:56.221678 20842 solver.cpp:244]     Train net output #1: loss = 0.287871 (* 1 = 0.287871 loss)\nI0820 04:09:56.306900 20842 sgd_solver.cpp:166] Iteration 62100, lr = 1.5525\nI0820 04:12:13.187283 20842 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0820 04:13:37.608896 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0820 04:13:37.609277 20842 solver.cpp:404]     Test net output #1: loss = 0.418114 (* 1 = 0.418114 loss)\nI0820 04:13:38.930274 20842 solver.cpp:228] Iteration 62200, loss = 0.177965\nI0820 04:13:38.930335 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 04:13:38.930352 20842 solver.cpp:244]     Train net output #1: loss = 0.177964 (* 1 = 0.177964 loss)\nI0820 04:13:39.019280 20842 sgd_solver.cpp:166] Iteration 62200, lr = 1.555\nI0820 04:15:55.869007 20842 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0820 04:17:20.293431 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88188\nI0820 04:17:20.293815 20842 solver.cpp:404]     Test net output #1: loss = 0.389355 (* 1 = 0.389355 loss)\nI0820 04:17:21.614117 20842 solver.cpp:228] Iteration 62300, loss = 0.0973639\nI0820 04:17:21.614173 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 04:17:21.614192 20842 solver.cpp:244]     Train net output #1: loss = 0.0973632 (* 1 = 0.0973632 loss)\nI0820 04:17:21.701665 20842 sgd_solver.cpp:166] Iteration 62300, lr = 1.5575\nI0820 04:19:38.634651 20842 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0820 04:21:03.062356 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87592\nI0820 04:21:03.062736 20842 solver.cpp:404]     Test net output #1: loss = 0.415865 (* 1 = 0.415865 loss)\nI0820 04:21:04.382046 20842 solver.cpp:228] Iteration 62400, loss = 0.310136\nI0820 04:21:04.382100 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 04:21:04.382117 20842 solver.cpp:244]     Train net output #1: loss = 0.310135 (* 1 = 0.310135 loss)\nI0820 04:21:04.461006 20842 sgd_solver.cpp:166] Iteration 62400, lr = 1.56\nI0820 04:23:21.307736 20842 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0820 04:24:45.739439 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88248\nI0820 04:24:45.739825 20842 solver.cpp:404]     Test net output #1: loss = 0.405823 (* 1 = 0.405823 loss)\nI0820 04:24:47.059233 20842 solver.cpp:228] Iteration 62500, loss = 0.21226\nI0820 04:24:47.059283 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 04:24:47.059299 20842 solver.cpp:244]     Train net output #1: loss = 0.212259 (* 1 = 0.212259 loss)\nI0820 04:24:47.142143 20842 sgd_solver.cpp:166] Iteration 62500, lr = 1.5625\nI0820 04:27:04.156816 20842 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0820 04:28:28.555598 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88576\nI0820 04:28:28.555963 20842 solver.cpp:404]     Test net output #1: loss = 0.390028 (* 1 = 0.390028 loss)\nI0820 04:28:29.875509 20842 solver.cpp:228] Iteration 62600, loss = 0.165397\nI0820 04:28:29.875567 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 04:28:29.875586 20842 solver.cpp:244]     Train net output #1: loss = 0.165397 (* 1 = 0.165397 loss)\nI0820 04:28:29.959383 20842 sgd_solver.cpp:166] Iteration 62600, lr = 1.565\nI0820 04:30:46.893760 20842 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0820 04:32:11.275187 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87924\nI0820 04:32:11.275548 20842 solver.cpp:404]     Test net output #1: loss = 0.403573 (* 1 = 0.403573 loss)\nI0820 04:32:12.594861 20842 solver.cpp:228] Iteration 62700, loss = 0.143396\nI0820 04:32:12.594913 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 04:32:12.594930 20842 solver.cpp:244]     Train net output #1: loss = 0.143396 (* 1 = 0.143396 loss)\nI0820 04:32:12.679672 20842 sgd_solver.cpp:166] Iteration 62700, lr = 1.5675\nI0820 04:34:29.631438 20842 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0820 04:35:54.060312 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87756\nI0820 04:35:54.060676 20842 solver.cpp:404]     Test net output #1: loss = 0.44589 (* 1 = 0.44589 loss)\nI0820 04:35:55.379639 20842 solver.cpp:228] Iteration 62800, loss = 0.210049\nI0820 04:35:55.379693 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:35:55.379709 20842 solver.cpp:244]     Train net output #1: loss = 0.210049 (* 1 = 0.210049 loss)\nI0820 04:35:55.465953 20842 sgd_solver.cpp:166] Iteration 62800, lr = 1.57\nI0820 04:38:12.439847 20842 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0820 04:39:36.872644 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88496\nI0820 04:39:36.873006 20842 solver.cpp:404]     Test net output #1: loss = 0.392918 (* 1 = 0.392918 loss)\nI0820 04:39:38.192190 20842 solver.cpp:228] Iteration 62900, loss = 0.0927518\nI0820 04:39:38.192241 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 04:39:38.192260 20842 solver.cpp:244]     Train net output #1: loss = 0.092751 (* 1 = 0.092751 loss)\nI0820 04:39:38.276696 20842 sgd_solver.cpp:166] Iteration 62900, lr = 1.5725\nI0820 04:41:55.271610 20842 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0820 04:43:19.685618 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0820 04:43:19.686000 20842 solver.cpp:404]     Test net output #1: loss = 0.427487 (* 1 = 0.427487 loss)\nI0820 04:43:21.005069 20842 solver.cpp:228] Iteration 63000, loss = 0.181514\nI0820 04:43:21.005120 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 04:43:21.005137 20842 solver.cpp:244]     Train net output #1: loss = 0.181513 (* 1 = 0.181513 loss)\nI0820 04:43:21.084184 20842 sgd_solver.cpp:166] Iteration 63000, lr = 1.575\nI0820 04:45:38.017738 20842 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0820 04:47:02.421061 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88396\nI0820 04:47:02.421447 20842 solver.cpp:404]     Test net output #1: loss = 0.406192 (* 1 = 0.406192 loss)\nI0820 04:47:03.741369 20842 solver.cpp:228] Iteration 63100, loss = 0.109247\nI0820 04:47:03.741423 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 04:47:03.741441 20842 solver.cpp:244]     Train net output #1: loss = 0.109247 (* 1 = 0.109247 loss)\nI0820 04:47:03.819934 20842 sgd_solver.cpp:166] Iteration 63100, lr = 1.5775\nI0820 04:49:20.782968 20842 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0820 04:50:45.193285 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88292\nI0820 04:50:45.193665 20842 solver.cpp:404]     Test net output #1: loss = 0.398951 (* 1 = 0.398951 loss)\nI0820 04:50:46.512835 20842 solver.cpp:228] Iteration 63200, loss = 0.0620784\nI0820 04:50:46.512892 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 04:50:46.512908 20842 solver.cpp:244]     Train net output #1: loss = 0.0620777 (* 1 = 0.0620777 loss)\nI0820 04:50:46.590631 20842 sgd_solver.cpp:166] Iteration 63200, lr = 1.58\nI0820 04:53:03.502606 20842 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0820 04:54:27.922850 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87784\nI0820 04:54:27.923214 20842 solver.cpp:404]     Test net output #1: loss = 0.415448 (* 1 = 0.415448 loss)\nI0820 04:54:29.242908 20842 solver.cpp:228] Iteration 63300, loss = 0.176232\nI0820 04:54:29.242954 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:54:29.242970 20842 solver.cpp:244]     Train net output #1: loss = 0.176231 (* 1 = 0.176231 loss)\nI0820 04:54:29.326666 20842 sgd_solver.cpp:166] Iteration 63300, lr = 1.5825\nI0820 04:56:46.272562 20842 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0820 04:58:10.692499 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88524\nI0820 04:58:10.692904 20842 solver.cpp:404]     Test net output #1: loss = 0.403967 (* 1 = 0.403967 loss)\nI0820 04:58:12.011835 20842 solver.cpp:228] Iteration 63400, loss = 0.0995383\nI0820 04:58:12.011883 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 04:58:12.011898 20842 solver.cpp:244]     Train net output #1: loss = 0.0995375 (* 1 = 0.0995375 loss)\nI0820 04:58:12.094940 20842 sgd_solver.cpp:166] Iteration 63400, lr = 1.585\nI0820 05:00:29.046840 20842 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0820 05:01:53.460132 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87644\nI0820 05:01:53.460525 20842 solver.cpp:404]     Test net output #1: loss = 0.423004 (* 1 = 0.423004 loss)\nI0820 05:01:54.779685 20842 solver.cpp:228] Iteration 63500, loss = 0.137531\nI0820 05:01:54.779733 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:01:54.779749 20842 solver.cpp:244]     Train net output #1: loss = 0.13753 (* 1 = 0.13753 loss)\nI0820 05:01:54.864380 20842 sgd_solver.cpp:166] Iteration 63500, lr = 1.5875\nI0820 05:04:11.749253 20842 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0820 05:05:36.169950 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87744\nI0820 05:05:36.170313 20842 solver.cpp:404]     Test net output #1: loss = 0.406916 (* 1 = 0.406916 loss)\nI0820 05:05:37.490242 20842 solver.cpp:228] Iteration 63600, loss = 0.149111\nI0820 05:05:37.490295 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 05:05:37.490314 20842 solver.cpp:244]     Train net output #1: loss = 0.149111 (* 1 = 0.149111 loss)\nI0820 05:05:37.575350 20842 sgd_solver.cpp:166] Iteration 63600, lr = 1.59\nI0820 05:07:54.341313 20842 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0820 05:09:18.752331 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8812\nI0820 05:09:18.752696 20842 solver.cpp:404]     Test net output #1: loss = 0.404968 (* 1 = 0.404968 loss)\nI0820 05:09:20.072262 20842 solver.cpp:228] Iteration 63700, loss = 0.183029\nI0820 05:09:20.072312 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 05:09:20.072329 20842 solver.cpp:244]     Train net output #1: loss = 0.183028 (* 1 = 0.183028 loss)\nI0820 05:09:20.157547 20842 sgd_solver.cpp:166] Iteration 63700, lr = 1.5925\nI0820 05:11:37.024267 20842 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0820 05:13:01.434904 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88544\nI0820 05:13:01.435282 20842 solver.cpp:404]     Test net output #1: loss = 0.391858 (* 1 = 0.391858 loss)\nI0820 05:13:02.754534 20842 solver.cpp:228] Iteration 63800, loss = 0.140844\nI0820 05:13:02.754580 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 05:13:02.754597 20842 solver.cpp:244]     Train net output #1: loss = 0.140843 (* 1 = 0.140843 loss)\nI0820 05:13:02.834532 20842 sgd_solver.cpp:166] Iteration 63800, lr = 1.595\nI0820 05:15:19.400182 20842 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0820 05:16:43.829785 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89068\nI0820 05:16:43.830180 20842 solver.cpp:404]     Test net output #1: loss = 0.370557 (* 1 = 0.370557 loss)\nI0820 05:16:45.149876 20842 solver.cpp:228] Iteration 63900, loss = 0.172907\nI0820 05:16:45.149922 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 05:16:45.149940 20842 solver.cpp:244]     Train net output #1: loss = 0.172907 (* 1 = 0.172907 loss)\nI0820 05:16:45.229637 20842 sgd_solver.cpp:166] Iteration 63900, lr = 1.5975\nI0820 05:19:01.794781 20842 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0820 05:20:26.219601 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8842\nI0820 05:20:26.219959 20842 solver.cpp:404]     Test net output #1: loss = 0.400614 (* 1 = 0.400614 loss)\nI0820 05:20:27.539841 20842 solver.cpp:228] Iteration 64000, loss = 0.220527\nI0820 05:20:27.539886 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 05:20:27.539903 20842 solver.cpp:244]     Train net output #1: loss = 0.220526 (* 1 = 0.220526 loss)\nI0820 05:20:27.624166 20842 sgd_solver.cpp:166] Iteration 64000, lr = 1.6\nI0820 05:22:44.130748 20842 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0820 05:24:08.564227 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87524\nI0820 05:24:08.564587 20842 solver.cpp:404]     Test net output #1: loss = 0.429834 (* 1 = 0.429834 loss)\nI0820 05:24:09.883400 20842 solver.cpp:228] Iteration 64100, loss = 0.17754\nI0820 05:24:09.883448 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:24:09.883466 20842 solver.cpp:244]     Train net output #1: loss = 0.177539 (* 1 = 0.177539 loss)\nI0820 05:24:09.973395 20842 sgd_solver.cpp:166] Iteration 64100, lr = 1.6025\nI0820 05:26:26.511449 20842 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0820 05:27:50.938235 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88728\nI0820 05:27:50.938725 20842 solver.cpp:404]     Test net output #1: loss = 0.397806 (* 1 = 0.397806 loss)\nI0820 05:27:52.257980 20842 solver.cpp:228] Iteration 64200, loss = 0.104055\nI0820 05:27:52.258029 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 05:27:52.258046 20842 solver.cpp:244]     Train net output #1: loss = 0.104055 (* 1 = 0.104055 loss)\nI0820 05:27:52.338141 20842 sgd_solver.cpp:166] Iteration 64200, lr = 1.605\nI0820 05:30:08.854706 20842 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0820 05:31:33.282766 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8872\nI0820 05:31:33.283149 20842 solver.cpp:404]     Test net output #1: loss = 0.375729 (* 1 = 0.375729 loss)\nI0820 05:31:34.602337 20842 solver.cpp:228] Iteration 64300, loss = 0.122206\nI0820 05:31:34.602394 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 05:31:34.602411 20842 solver.cpp:244]     Train net output #1: loss = 0.122205 (* 1 = 0.122205 loss)\nI0820 05:31:34.687108 20842 sgd_solver.cpp:166] Iteration 64300, lr = 1.6075\nI0820 05:33:51.223932 20842 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0820 05:35:15.653661 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88292\nI0820 05:35:15.654042 20842 solver.cpp:404]     Test net output #1: loss = 0.395664 (* 1 = 0.395664 loss)\nI0820 05:35:16.973893 20842 solver.cpp:228] Iteration 64400, loss = 0.164548\nI0820 05:35:16.973938 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 05:35:16.973954 20842 solver.cpp:244]     Train net output #1: loss = 0.164547 (* 1 = 0.164547 loss)\nI0820 05:35:17.051357 20842 sgd_solver.cpp:166] Iteration 64400, lr = 1.61\nI0820 05:37:33.849452 20842 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0820 05:38:58.286998 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87728\nI0820 05:38:58.287362 20842 solver.cpp:404]     Test net output #1: loss = 0.411058 (* 1 = 0.411058 loss)\nI0820 05:38:59.606469 20842 solver.cpp:228] Iteration 64500, loss = 0.186123\nI0820 05:38:59.606516 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 05:38:59.606534 20842 solver.cpp:244]     Train net output #1: loss = 0.186122 (* 1 = 0.186122 loss)\nI0820 05:38:59.680153 20842 sgd_solver.cpp:166] Iteration 64500, lr = 1.6125\nI0820 05:41:16.677264 20842 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0820 05:42:41.103554 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88456\nI0820 05:42:41.103929 20842 solver.cpp:404]     Test net output #1: loss = 0.386327 (* 1 = 0.386327 loss)\nI0820 05:42:42.423125 20842 solver.cpp:228] Iteration 64600, loss = 0.15652\nI0820 05:42:42.423176 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:42:42.423193 20842 solver.cpp:244]     Train net output #1: loss = 0.156519 (* 1 = 0.156519 loss)\nI0820 05:42:42.508150 20842 sgd_solver.cpp:166] Iteration 64600, lr = 1.615\nI0820 05:44:59.343595 20842 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0820 05:46:23.769943 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88192\nI0820 05:46:23.770328 20842 solver.cpp:404]     Test net output #1: loss = 0.411779 (* 1 = 0.411779 loss)\nI0820 05:46:25.089395 20842 solver.cpp:228] Iteration 64700, loss = 0.210485\nI0820 05:46:25.089447 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 05:46:25.089464 20842 solver.cpp:244]     Train net output #1: loss = 0.210484 (* 1 = 0.210484 loss)\nI0820 05:46:25.172668 20842 sgd_solver.cpp:166] Iteration 64700, lr = 1.6175\nI0820 05:48:42.060243 20842 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0820 05:50:06.493582 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88536\nI0820 05:50:06.493939 20842 solver.cpp:404]     Test net output #1: loss = 0.396325 (* 1 = 0.396325 loss)\nI0820 05:50:07.813449 20842 solver.cpp:228] Iteration 64800, loss = 0.170803\nI0820 05:50:07.813503 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 05:50:07.813521 20842 solver.cpp:244]     Train net output #1: loss = 0.170802 (* 1 = 0.170802 loss)\nI0820 05:50:07.899868 20842 sgd_solver.cpp:166] Iteration 64800, lr = 1.62\nI0820 05:52:25.134562 20842 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0820 05:53:49.566131 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8802\nI0820 05:53:49.566493 20842 solver.cpp:404]     Test net output #1: loss = 0.411447 (* 1 = 0.411447 loss)\nI0820 05:53:50.886277 20842 solver.cpp:228] Iteration 64900, loss = 0.132682\nI0820 05:53:50.886327 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 05:53:50.886344 20842 solver.cpp:244]     Train net output #1: loss = 0.132682 (* 1 = 0.132682 loss)\nI0820 05:53:50.974025 20842 sgd_solver.cpp:166] Iteration 64900, lr = 1.6225\nI0820 05:56:07.838991 20842 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0820 05:57:32.273182 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89148\nI0820 05:57:32.273581 20842 solver.cpp:404]     Test net output #1: loss = 0.378848 (* 1 = 0.378848 loss)\nI0820 05:57:33.593899 20842 solver.cpp:228] Iteration 65000, loss = 0.10702\nI0820 05:57:33.593950 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 05:57:33.593976 20842 solver.cpp:244]     Train net output #1: loss = 0.10702 (* 1 = 0.10702 loss)\nI0820 05:57:33.674305 20842 sgd_solver.cpp:166] Iteration 65000, lr = 1.625\nI0820 05:59:50.593905 20842 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0820 06:01:15.024471 20842 solver.cpp:404]     Test net output #0: accuracy = 0.884841\nI0820 06:01:15.024868 20842 solver.cpp:404]     Test net output #1: loss = 0.392847 (* 1 = 0.392847 loss)\nI0820 06:01:16.344285 20842 solver.cpp:228] Iteration 65100, loss = 0.165498\nI0820 06:01:16.344343 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 06:01:16.344367 20842 solver.cpp:244]     Train net output #1: loss = 0.165498 (* 1 = 0.165498 loss)\nI0820 06:01:16.424046 20842 sgd_solver.cpp:166] Iteration 65100, lr = 1.6275\nI0820 06:03:33.333875 20842 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0820 06:04:57.767226 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89056\nI0820 06:04:57.767599 20842 solver.cpp:404]     Test net output #1: loss = 0.38805 (* 1 = 0.38805 loss)\nI0820 06:04:59.088004 20842 solver.cpp:228] Iteration 65200, loss = 0.118203\nI0820 06:04:59.088055 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 06:04:59.088080 20842 solver.cpp:244]     Train net output #1: loss = 0.118203 (* 1 = 0.118203 loss)\nI0820 06:04:59.174111 20842 sgd_solver.cpp:166] Iteration 65200, lr = 1.63\nI0820 06:07:16.119911 20842 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0820 06:08:39.562983 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0820 06:08:39.563285 20842 solver.cpp:404]     Test net output #1: loss = 0.422905 (* 1 = 0.422905 loss)\nI0820 06:08:40.879850 20842 solver.cpp:228] Iteration 65300, loss = 0.152847\nI0820 06:08:40.879894 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 06:08:40.879914 20842 solver.cpp:244]     Train net output #1: loss = 0.152846 (* 1 = 0.152846 loss)\nI0820 06:08:40.965648 20842 sgd_solver.cpp:166] Iteration 65300, lr = 1.6325\nI0820 06:10:58.146919 20842 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0820 06:12:21.576337 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89152\nI0820 06:12:21.576670 20842 solver.cpp:404]     Test net output #1: loss = 0.366592 (* 1 = 0.366592 loss)\nI0820 06:12:22.892931 20842 solver.cpp:228] Iteration 65400, loss = 0.160348\nI0820 06:12:22.892974 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 06:12:22.892992 20842 solver.cpp:244]     Train net output #1: loss = 0.160348 (* 1 = 0.160348 loss)\nI0820 06:12:22.980551 20842 sgd_solver.cpp:166] Iteration 65400, lr = 1.635\nI0820 06:14:40.189828 20842 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0820 06:16:03.622262 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88696\nI0820 06:16:03.622551 20842 solver.cpp:404]     Test net output #1: loss = 0.39254 (* 1 = 0.39254 loss)\nI0820 06:16:04.938758 20842 solver.cpp:228] Iteration 65500, loss = 0.235321\nI0820 06:16:04.938802 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 06:16:04.938817 20842 solver.cpp:244]     Train net output #1: loss = 0.23532 (* 1 = 0.23532 loss)\nI0820 06:16:05.026806 20842 sgd_solver.cpp:166] Iteration 65500, lr = 1.6375\nI0820 06:18:22.119565 20842 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0820 06:19:45.553555 20842 solver.cpp:404]     Test net output #0: accuracy = 0.879881\nI0820 06:19:45.553835 20842 solver.cpp:404]     Test net output #1: loss = 0.427546 (* 1 = 0.427546 loss)\nI0820 06:19:46.870864 20842 solver.cpp:228] Iteration 65600, loss = 0.23015\nI0820 06:19:46.870908 20842 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0820 06:19:46.870924 20842 solver.cpp:244]     Train net output #1: loss = 0.230149 (* 1 = 0.230149 loss)\nI0820 06:19:46.961444 20842 sgd_solver.cpp:166] Iteration 65600, lr = 1.64\nI0820 06:22:03.906167 20842 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0820 06:23:27.328554 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87016\nI0820 06:23:27.328831 20842 solver.cpp:404]     Test net output #1: loss = 0.444009 (* 1 = 0.444009 loss)\nI0820 06:23:28.645397 20842 solver.cpp:228] Iteration 65700, loss = 0.183457\nI0820 06:23:28.645433 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 06:23:28.645449 20842 solver.cpp:244]     Train net output #1: loss = 0.183456 (* 1 = 0.183456 loss)\nI0820 06:23:28.733028 20842 sgd_solver.cpp:166] Iteration 65700, lr = 1.6425\nI0820 06:25:45.734257 20842 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0820 06:27:09.168244 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87424\nI0820 06:27:09.168570 20842 solver.cpp:404]     Test net output #1: loss = 0.430927 (* 1 = 0.430927 loss)\nI0820 06:27:10.485112 20842 solver.cpp:228] Iteration 65800, loss = 0.132859\nI0820 06:27:10.485152 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 06:27:10.485167 20842 solver.cpp:244]     Train net output #1: loss = 0.132858 (* 1 = 0.132858 loss)\nI0820 06:27:10.572422 20842 sgd_solver.cpp:166] Iteration 65800, lr = 1.645\nI0820 06:29:27.534786 20842 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0820 06:30:50.963971 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88344\nI0820 06:30:50.964298 20842 solver.cpp:404]     Test net output #1: loss = 0.366761 (* 1 = 0.366761 loss)\nI0820 06:30:52.280649 20842 solver.cpp:228] Iteration 65900, loss = 0.0666306\nI0820 06:30:52.280690 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0820 06:30:52.280706 20842 solver.cpp:244]     Train net output #1: loss = 0.0666299 (* 1 = 0.0666299 loss)\nI0820 06:30:52.367569 20842 sgd_solver.cpp:166] Iteration 65900, lr = 1.6475\nI0820 06:33:09.694481 20842 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0820 06:34:33.122751 20842 solver.cpp:404]     Test net output #0: accuracy = 0.886481\nI0820 06:34:33.123077 20842 solver.cpp:404]     Test net output #1: loss = 0.393669 (* 1 = 0.393669 loss)\nI0820 06:34:34.439844 20842 solver.cpp:228] Iteration 66000, loss = 0.183512\nI0820 06:34:34.439883 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 06:34:34.439900 20842 solver.cpp:244]     Train net output #1: loss = 0.183511 (* 1 = 0.183511 loss)\nI0820 06:34:34.529436 20842 sgd_solver.cpp:166] Iteration 66000, lr = 1.65\nI0820 06:36:51.540132 20842 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0820 06:38:14.971177 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88796\nI0820 06:38:14.971506 20842 solver.cpp:404]     Test net output #1: loss = 0.392412 (* 1 = 0.392412 loss)\nI0820 06:38:16.288054 20842 solver.cpp:228] Iteration 66100, loss = 0.0935557\nI0820 06:38:16.288100 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 06:38:16.288116 20842 solver.cpp:244]     Train net output #1: loss = 0.0935549 (* 1 = 0.0935549 loss)\nI0820 06:38:16.371594 20842 sgd_solver.cpp:166] Iteration 66100, lr = 1.6525\nI0820 06:40:33.391525 20842 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0820 06:41:56.822777 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0820 06:41:56.823083 20842 solver.cpp:404]     Test net output #1: loss = 0.400964 (* 1 = 0.400964 loss)\nI0820 06:41:58.140058 20842 solver.cpp:228] Iteration 66200, loss = 0.264294\nI0820 06:41:58.140100 20842 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 06:41:58.140117 20842 solver.cpp:244]     Train net output #1: loss = 0.264294 (* 1 = 0.264294 loss)\nI0820 06:41:58.224540 20842 sgd_solver.cpp:166] Iteration 66200, lr = 1.655\nI0820 06:44:15.176378 20842 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0820 06:45:38.614568 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87896\nI0820 06:45:38.614868 20842 solver.cpp:404]     Test net output #1: loss = 0.403431 (* 1 = 0.403431 loss)\nI0820 06:45:39.931124 20842 solver.cpp:228] Iteration 66300, loss = 0.18329\nI0820 06:45:39.931175 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 06:45:39.931200 20842 solver.cpp:244]     Train net output #1: loss = 0.183289 (* 1 = 0.183289 loss)\nI0820 06:45:40.020884 20842 sgd_solver.cpp:166] Iteration 66300, lr = 1.6575\nI0820 06:47:57.051311 20842 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0820 06:49:20.488366 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88348\nI0820 06:49:20.488698 20842 solver.cpp:404]     Test net output #1: loss = 0.391026 (* 1 = 0.391026 loss)\nI0820 06:49:21.805294 20842 solver.cpp:228] Iteration 66400, loss = 0.0835248\nI0820 06:49:21.805337 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 06:49:21.805353 20842 solver.cpp:244]     Train net output #1: loss = 0.083524 (* 1 = 0.083524 loss)\nI0820 06:49:21.900560 20842 sgd_solver.cpp:166] Iteration 66400, lr = 1.66\nI0820 06:51:38.880041 20842 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0820 06:53:02.318526 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88304\nI0820 06:53:02.318838 20842 solver.cpp:404]     Test net output #1: loss = 0.39855 (* 1 = 0.39855 loss)\nI0820 06:53:03.634405 20842 solver.cpp:228] Iteration 66500, loss = 0.226171\nI0820 06:53:03.634451 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 06:53:03.634469 20842 solver.cpp:244]     Train net output #1: loss = 0.22617 (* 1 = 0.22617 loss)\nI0820 06:53:03.725769 20842 sgd_solver.cpp:166] Iteration 66500, lr = 1.6625\nI0820 06:55:20.781450 20842 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0820 06:56:44.218844 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89092\nI0820 06:56:44.219137 20842 solver.cpp:404]     Test net output #1: loss = 0.363862 (* 1 = 0.363862 loss)\nI0820 06:56:45.535295 20842 solver.cpp:228] Iteration 66600, loss = 0.217066\nI0820 06:56:45.535338 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 06:56:45.535354 20842 solver.cpp:244]     Train net output #1: loss = 0.217065 (* 1 = 0.217065 loss)\nI0820 06:56:45.624053 20842 sgd_solver.cpp:166] Iteration 66600, lr = 1.665\nI0820 06:59:02.972736 20842 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0820 07:00:26.411536 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87732\nI0820 07:00:26.411823 20842 solver.cpp:404]     Test net output #1: loss = 0.400004 (* 1 = 0.400004 loss)\nI0820 07:00:27.728237 20842 solver.cpp:228] Iteration 66700, loss = 0.136196\nI0820 07:00:27.728282 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 07:00:27.728298 20842 solver.cpp:244]     Train net output #1: loss = 0.136195 (* 1 = 0.136195 loss)\nI0820 07:00:27.822573 20842 sgd_solver.cpp:166] Iteration 66700, lr = 1.6675\nI0820 07:02:44.837939 20842 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0820 07:04:08.266329 20842 solver.cpp:404]     Test net output #0: accuracy = 0.883\nI0820 07:04:08.266621 20842 solver.cpp:404]     Test net output #1: loss = 0.380825 (* 1 = 0.380825 loss)\nI0820 07:04:09.583039 20842 solver.cpp:228] Iteration 66800, loss = 0.185421\nI0820 07:04:09.583084 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 07:04:09.583101 20842 solver.cpp:244]     Train net output #1: loss = 0.18542 (* 1 = 0.18542 loss)\nI0820 07:04:09.673755 20842 sgd_solver.cpp:166] Iteration 66800, lr = 1.67\nI0820 07:06:26.672251 20842 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0820 07:07:50.107233 20842 solver.cpp:404]     Test net output #0: accuracy = 0.883\nI0820 07:07:50.107539 20842 solver.cpp:404]     Test net output #1: loss = 0.42206 (* 1 = 0.42206 loss)\nI0820 07:07:51.423985 20842 solver.cpp:228] Iteration 66900, loss = 0.135575\nI0820 07:07:51.424031 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 07:07:51.424047 20842 solver.cpp:244]     Train net output #1: loss = 0.135574 (* 1 = 0.135574 loss)\nI0820 07:07:51.519039 20842 sgd_solver.cpp:166] Iteration 66900, lr = 1.6725\nI0820 07:10:08.933099 20842 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0820 07:11:32.380383 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88264\nI0820 07:11:32.380693 20842 solver.cpp:404]     Test net output #1: loss = 0.40564 (* 1 = 0.40564 loss)\nI0820 07:11:33.697377 20842 solver.cpp:228] Iteration 67000, loss = 0.198716\nI0820 07:11:33.697422 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 07:11:33.697439 20842 solver.cpp:244]     Train net output #1: loss = 0.198715 (* 1 = 0.198715 loss)\nI0820 07:11:33.798753 20842 sgd_solver.cpp:166] Iteration 67000, lr = 1.675\nI0820 07:13:50.842955 20842 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0820 07:15:14.278954 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8824\nI0820 07:15:14.279228 20842 solver.cpp:404]     Test net output #1: loss = 0.395929 (* 1 = 0.395929 loss)\nI0820 07:15:15.595407 20842 solver.cpp:228] Iteration 67100, loss = 0.0800964\nI0820 07:15:15.595448 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 07:15:15.595464 20842 solver.cpp:244]     Train net output #1: loss = 0.0800957 (* 1 = 0.0800957 loss)\nI0820 07:15:15.687749 20842 sgd_solver.cpp:166] Iteration 67100, lr = 1.6775\nI0820 07:17:32.730020 20842 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0820 07:18:56.168937 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88136\nI0820 07:18:56.169250 20842 solver.cpp:404]     Test net output #1: loss = 0.397926 (* 1 = 0.397926 loss)\nI0820 07:18:57.485597 20842 solver.cpp:228] Iteration 67200, loss = 0.2283\nI0820 07:18:57.485638 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 07:18:57.485654 20842 solver.cpp:244]     Train net output #1: loss = 0.228299 (* 1 = 0.228299 loss)\nI0820 07:18:57.573546 20842 sgd_solver.cpp:166] Iteration 67200, lr = 1.68\nI0820 07:21:14.513808 20842 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0820 07:22:38.378444 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88704\nI0820 07:22:38.378815 20842 solver.cpp:404]     Test net output #1: loss = 0.388252 (* 1 = 0.388252 loss)\nI0820 07:22:39.698732 20842 solver.cpp:228] Iteration 67300, loss = 0.140638\nI0820 07:22:39.698776 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 07:22:39.698791 20842 solver.cpp:244]     Train net output #1: loss = 0.140637 (* 1 = 0.140637 loss)\nI0820 07:22:39.782485 20842 sgd_solver.cpp:166] Iteration 67300, lr = 1.6825\nI0820 07:24:56.833665 20842 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0820 07:26:20.267608 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88048\nI0820 07:26:20.267935 20842 solver.cpp:404]     Test net output #1: loss = 0.408067 (* 1 = 0.408067 loss)\nI0820 07:26:21.585433 20842 solver.cpp:228] Iteration 67400, loss = 0.14506\nI0820 07:26:21.585476 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:26:21.585500 20842 solver.cpp:244]     Train net output #1: loss = 0.14506 (* 1 = 0.14506 loss)\nI0820 07:26:21.681675 20842 sgd_solver.cpp:166] Iteration 67400, lr = 1.685\nI0820 07:28:38.542289 20842 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0820 07:30:02.981070 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88156\nI0820 07:30:02.981426 20842 solver.cpp:404]     Test net output #1: loss = 0.391849 (* 1 = 0.391849 loss)\nI0820 07:30:04.302001 20842 solver.cpp:228] Iteration 67500, loss = 0.115664\nI0820 07:30:04.302047 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:30:04.302062 20842 solver.cpp:244]     Train net output #1: loss = 0.115663 (* 1 = 0.115663 loss)\nI0820 07:30:04.391157 20842 sgd_solver.cpp:166] Iteration 67500, lr = 1.6875\nI0820 07:32:21.332226 20842 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0820 07:33:45.769433 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87552\nI0820 07:33:45.769832 20842 solver.cpp:404]     Test net output #1: loss = 0.422942 (* 1 = 0.422942 loss)\nI0820 07:33:47.089856 20842 solver.cpp:228] Iteration 67600, loss = 0.18461\nI0820 07:33:47.089900 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 07:33:47.089915 20842 solver.cpp:244]     Train net output #1: loss = 0.184609 (* 1 = 0.184609 loss)\nI0820 07:33:47.178907 20842 sgd_solver.cpp:166] Iteration 67600, lr = 1.69\nI0820 07:36:04.115887 20842 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0820 07:37:28.550354 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87692\nI0820 07:37:28.550750 20842 solver.cpp:404]     Test net output #1: loss = 0.427719 (* 1 = 0.427719 loss)\nI0820 07:37:29.870975 20842 solver.cpp:228] Iteration 67700, loss = 0.228342\nI0820 07:37:29.871034 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 07:37:29.871052 20842 solver.cpp:244]     Train net output #1: loss = 0.228342 (* 1 = 0.228342 loss)\nI0820 07:37:29.961777 20842 sgd_solver.cpp:166] Iteration 67700, lr = 1.6925\nI0820 07:39:47.044440 20842 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0820 07:41:10.405536 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8872\nI0820 07:41:10.405892 20842 solver.cpp:404]     Test net output #1: loss = 0.388803 (* 1 = 0.388803 loss)\nI0820 07:41:11.723575 20842 solver.cpp:228] Iteration 67800, loss = 0.129927\nI0820 07:41:11.723623 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:41:11.723647 20842 solver.cpp:244]     Train net output #1: loss = 0.129926 (* 1 = 0.129926 loss)\nI0820 07:41:11.805475 20842 sgd_solver.cpp:166] Iteration 67800, lr = 1.695\nI0820 07:43:28.656718 20842 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0820 07:44:52.005178 20842 solver.cpp:404]     Test net output #0: accuracy = 0.89008\nI0820 07:44:52.005496 20842 solver.cpp:404]     Test net output #1: loss = 0.369555 (* 1 = 0.369555 loss)\nI0820 07:44:53.321939 20842 solver.cpp:228] Iteration 67900, loss = 0.132635\nI0820 07:44:53.321985 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:44:53.322000 20842 solver.cpp:244]     Train net output #1: loss = 0.132634 (* 1 = 0.132634 loss)\nI0820 07:44:53.414575 20842 sgd_solver.cpp:166] Iteration 67900, lr = 1.6975\nI0820 07:47:10.382736 20842 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0820 07:48:33.736603 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0820 07:48:33.736904 20842 solver.cpp:404]     Test net output #1: loss = 0.393387 (* 1 = 0.393387 loss)\nI0820 07:48:35.053169 20842 solver.cpp:228] Iteration 68000, loss = 0.0864285\nI0820 07:48:35.053211 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:48:35.053227 20842 solver.cpp:244]     Train net output #1: loss = 0.0864279 (* 1 = 0.0864279 loss)\nI0820 07:48:35.140064 20842 sgd_solver.cpp:166] Iteration 68000, lr = 1.7\nI0820 07:50:52.285055 20842 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0820 07:52:15.642587 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88296\nI0820 07:52:15.642940 20842 solver.cpp:404]     Test net output #1: loss = 0.390741 (* 1 = 0.390741 loss)\nI0820 07:52:16.959306 20842 solver.cpp:228] Iteration 68100, loss = 0.176083\nI0820 07:52:16.959349 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 07:52:16.959367 20842 solver.cpp:244]     Train net output #1: loss = 0.176082 (* 1 = 0.176082 loss)\nI0820 07:52:17.046694 20842 sgd_solver.cpp:166] Iteration 68100, lr = 1.7025\nI0820 07:54:33.777622 20842 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0820 07:55:57.130934 20842 solver.cpp:404]     Test net output #0: accuracy = 0.880321\nI0820 07:55:57.131259 20842 solver.cpp:404]     Test net output #1: loss = 0.407149 (* 1 = 0.407149 loss)\nI0820 07:55:58.447930 20842 solver.cpp:228] Iteration 68200, loss = 0.100273\nI0820 07:55:58.447975 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 07:55:58.447993 20842 solver.cpp:244]     Train net output #1: loss = 0.100272 (* 1 = 0.100272 loss)\nI0820 07:55:58.541580 20842 sgd_solver.cpp:166] Iteration 68200, lr = 1.705\nI0820 07:58:15.611255 20842 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0820 07:59:38.964828 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88812\nI0820 07:59:38.965144 20842 solver.cpp:404]     Test net output #1: loss = 0.384405 (* 1 = 0.384405 loss)\nI0820 07:59:40.281944 20842 solver.cpp:228] Iteration 68300, loss = 0.206882\nI0820 07:59:40.281985 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 07:59:40.282002 20842 solver.cpp:244]     Train net output #1: loss = 0.206882 (* 1 = 0.206882 loss)\nI0820 07:59:40.367458 20842 sgd_solver.cpp:166] Iteration 68300, lr = 1.7075\nI0820 08:01:57.115555 20842 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0820 08:03:20.460815 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87744\nI0820 08:03:20.461144 20842 solver.cpp:404]     Test net output #1: loss = 0.403387 (* 1 = 0.403387 loss)\nI0820 08:03:21.777516 20842 solver.cpp:228] Iteration 68400, loss = 0.126577\nI0820 08:03:21.777559 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 08:03:21.777575 20842 solver.cpp:244]     Train net output #1: loss = 0.126576 (* 1 = 0.126576 loss)\nI0820 08:03:21.868882 20842 sgd_solver.cpp:166] Iteration 68400, lr = 1.71\nI0820 08:05:38.629456 20842 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0820 08:07:01.971216 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88028\nI0820 08:07:01.971535 20842 solver.cpp:404]     Test net output #1: loss = 0.414268 (* 1 = 0.414268 loss)\nI0820 08:07:03.288070 20842 solver.cpp:228] Iteration 68500, loss = 0.12156\nI0820 08:07:03.288112 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 08:07:03.288130 20842 solver.cpp:244]     Train net output #1: loss = 0.12156 (* 1 = 0.12156 loss)\nI0820 08:07:03.375805 20842 sgd_solver.cpp:166] Iteration 68500, lr = 1.7125\nI0820 08:09:20.080607 20842 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0820 08:10:43.430263 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87672\nI0820 08:10:43.430589 20842 solver.cpp:404]     Test net output #1: loss = 0.42379 (* 1 = 0.42379 loss)\nI0820 08:10:44.746464 20842 solver.cpp:228] Iteration 68600, loss = 0.116211\nI0820 08:10:44.746515 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 08:10:44.746532 20842 solver.cpp:244]     Train net output #1: loss = 0.116211 (* 1 = 0.116211 loss)\nI0820 08:10:44.832118 20842 sgd_solver.cpp:166] Iteration 68600, lr = 1.715\nI0820 08:13:01.552727 20842 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0820 08:14:24.982269 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88748\nI0820 08:14:24.982605 20842 solver.cpp:404]     Test net output #1: loss = 0.377508 (* 1 = 0.377508 loss)\nI0820 08:14:26.299039 20842 solver.cpp:228] Iteration 68700, loss = 0.157479\nI0820 08:14:26.299082 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 08:14:26.299098 20842 solver.cpp:244]     Train net output #1: loss = 0.157479 (* 1 = 0.157479 loss)\nI0820 08:14:26.386270 20842 sgd_solver.cpp:166] Iteration 68700, lr = 1.7175\nI0820 08:16:43.090641 20842 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0820 08:18:06.522826 20842 solver.cpp:404]     Test net output #0: accuracy = 0.882761\nI0820 08:18:06.523150 20842 solver.cpp:404]     Test net output #1: loss = 0.387652 (* 1 = 0.387652 loss)\nI0820 08:18:07.839287 20842 solver.cpp:228] Iteration 68800, loss = 0.177357\nI0820 08:18:07.839331 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:18:07.839349 20842 solver.cpp:244]     Train net output #1: loss = 0.177357 (* 1 = 0.177357 loss)\nI0820 08:18:07.931685 20842 sgd_solver.cpp:166] Iteration 68800, lr = 1.72\nI0820 08:20:24.686022 20842 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0820 08:21:48.113365 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8888\nI0820 08:21:48.113672 20842 solver.cpp:404]     Test net output #1: loss = 0.382594 (* 1 = 0.382594 loss)\nI0820 08:21:49.429714 20842 solver.cpp:228] Iteration 68900, loss = 0.122346\nI0820 08:21:49.429759 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 08:21:49.429775 20842 solver.cpp:244]     Train net output #1: loss = 0.122346 (* 1 = 0.122346 loss)\nI0820 08:21:49.525756 20842 sgd_solver.cpp:166] Iteration 68900, lr = 1.7225\nI0820 08:24:06.570430 20842 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0820 08:25:30.007728 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88456\nI0820 08:25:30.007997 20842 solver.cpp:404]     Test net output #1: loss = 0.380435 (* 1 = 0.380435 loss)\nI0820 08:25:31.324666 20842 solver.cpp:228] Iteration 69000, loss = 0.114786\nI0820 08:25:31.324712 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 08:25:31.324728 20842 solver.cpp:244]     Train net output #1: loss = 0.114786 (* 1 = 0.114786 loss)\nI0820 08:25:31.407161 20842 sgd_solver.cpp:166] Iteration 69000, lr = 1.725\nI0820 08:27:48.108480 20842 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0820 08:29:11.545693 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87508\nI0820 08:29:11.545994 20842 solver.cpp:404]     Test net output #1: loss = 0.422054 (* 1 = 0.422054 loss)\nI0820 08:29:12.862510 20842 solver.cpp:228] Iteration 69100, loss = 0.247761\nI0820 08:29:12.862552 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:29:12.862570 20842 solver.cpp:244]     Train net output #1: loss = 0.24776 (* 1 = 0.24776 loss)\nI0820 08:29:12.943771 20842 sgd_solver.cpp:166] Iteration 69100, lr = 1.7275\nI0820 08:31:29.608536 20842 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0820 08:32:53.042781 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87756\nI0820 08:32:53.043087 20842 solver.cpp:404]     Test net output #1: loss = 0.408327 (* 1 = 0.408327 loss)\nI0820 08:32:54.358692 20842 solver.cpp:228] Iteration 69200, loss = 0.137076\nI0820 08:32:54.358733 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 08:32:54.358750 20842 solver.cpp:244]     Train net output #1: loss = 0.137076 (* 1 = 0.137076 loss)\nI0820 08:32:54.451594 20842 sgd_solver.cpp:166] Iteration 69200, lr = 1.73\nI0820 08:35:11.064221 20842 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0820 08:36:34.501281 20842 solver.cpp:404]     Test net output #0: accuracy = 0.874959\nI0820 08:36:34.501590 20842 solver.cpp:404]     Test net output #1: loss = 0.427416 (* 1 = 0.427416 loss)\nI0820 08:36:35.817878 20842 solver.cpp:228] Iteration 69300, loss = 0.217938\nI0820 08:36:35.817919 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 08:36:35.817936 20842 solver.cpp:244]     Train net output #1: loss = 0.217938 (* 1 = 0.217938 loss)\nI0820 08:36:35.899607 20842 sgd_solver.cpp:166] Iteration 69300, lr = 1.7325\nI0820 08:38:52.552654 20842 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0820 08:40:15.989542 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88064\nI0820 08:40:15.989826 20842 solver.cpp:404]     Test net output #1: loss = 0.4059 (* 1 = 0.4059 loss)\nI0820 08:40:17.305874 20842 solver.cpp:228] Iteration 69400, loss = 0.101566\nI0820 08:40:17.305917 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 08:40:17.305935 20842 solver.cpp:244]     Train net output #1: loss = 0.101566 (* 1 = 0.101566 loss)\nI0820 08:40:17.397131 20842 sgd_solver.cpp:166] Iteration 69400, lr = 1.735\nI0820 08:42:34.011919 20842 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0820 08:43:57.437948 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88456\nI0820 08:43:57.438340 20842 solver.cpp:404]     Test net output #1: loss = 0.384724 (* 1 = 0.384724 loss)\nI0820 08:43:58.754559 20842 solver.cpp:228] Iteration 69500, loss = 0.153424\nI0820 08:43:58.754601 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 08:43:58.754618 20842 solver.cpp:244]     Train net output #1: loss = 0.153424 (* 1 = 0.153424 loss)\nI0820 08:43:58.847801 20842 sgd_solver.cpp:166] Iteration 69500, lr = 1.7375\nI0820 08:46:15.989441 20842 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0820 08:47:39.425082 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8726\nI0820 08:47:39.425386 20842 solver.cpp:404]     Test net output #1: loss = 0.430813 (* 1 = 0.430813 loss)\nI0820 08:47:40.741920 20842 solver.cpp:228] Iteration 69600, loss = 0.113502\nI0820 08:47:40.741962 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 08:47:40.741979 20842 solver.cpp:244]     Train net output #1: loss = 0.113502 (* 1 = 0.113502 loss)\nI0820 08:47:40.825073 20842 sgd_solver.cpp:166] Iteration 69600, lr = 1.74\nI0820 08:49:57.946636 20842 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0820 08:51:21.387734 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88184\nI0820 08:51:21.388023 20842 solver.cpp:404]     Test net output #1: loss = 0.392333 (* 1 = 0.392333 loss)\nI0820 08:51:22.703815 20842 solver.cpp:228] Iteration 69700, loss = 0.10636\nI0820 08:51:22.703856 20842 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 08:51:22.703873 20842 solver.cpp:244]     Train net output #1: loss = 0.10636 (* 1 = 0.10636 loss)\nI0820 08:51:22.781388 20842 sgd_solver.cpp:166] Iteration 69700, lr = 1.7425\nI0820 08:53:39.464992 20842 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0820 08:55:02.895079 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0820 08:55:02.895375 20842 solver.cpp:404]     Test net output #1: loss = 0.414031 (* 1 = 0.414031 loss)\nI0820 08:55:04.211377 20842 solver.cpp:228] Iteration 69800, loss = 0.209074\nI0820 08:55:04.211417 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 08:55:04.211434 20842 solver.cpp:244]     Train net output #1: loss = 0.209074 (* 1 = 0.209074 loss)\nI0820 08:55:04.300539 20842 sgd_solver.cpp:166] Iteration 69800, lr = 1.745\nI0820 08:57:21.075383 20842 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0820 08:58:44.507004 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88068\nI0820 08:58:44.507328 20842 solver.cpp:404]     Test net output #1: loss = 0.401074 (* 1 = 0.401074 loss)\nI0820 08:58:45.823546 20842 solver.cpp:228] Iteration 69900, loss = 0.226911\nI0820 08:58:45.823591 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 08:58:45.823607 20842 solver.cpp:244]     Train net output #1: loss = 0.22691 (* 1 = 0.22691 loss)\nI0820 08:58:45.908936 20842 sgd_solver.cpp:166] Iteration 69900, lr = 1.7475\nI0820 09:01:03.006811 20842 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0820 09:02:26.445399 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0820 09:02:26.445736 20842 solver.cpp:404]     Test net output #1: loss = 0.405043 (* 1 = 0.405043 loss)\nI0820 09:02:27.763079 20842 solver.cpp:228] Iteration 70000, loss = 0.219381\nI0820 09:02:27.763123 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 09:02:27.763139 20842 solver.cpp:244]     Train net output #1: loss = 0.219381 (* 1 = 0.219381 loss)\nI0820 09:02:27.852089 20842 sgd_solver.cpp:166] Iteration 70000, lr = 1.75\nI0820 09:04:44.512641 20842 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0820 09:06:07.948539 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87896\nI0820 09:06:07.948838 20842 solver.cpp:404]     Test net output #1: loss = 0.414917 (* 1 = 0.414917 loss)\nI0820 09:06:09.264909 20842 solver.cpp:228] Iteration 70100, loss = 0.123613\nI0820 09:06:09.264953 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 09:06:09.264969 20842 solver.cpp:244]     Train net output #1: loss = 0.123612 (* 1 = 0.123612 loss)\nI0820 09:06:09.349705 20842 sgd_solver.cpp:166] Iteration 70100, lr = 1.7525\nI0820 09:08:26.089694 20842 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0820 09:09:49.489455 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88292\nI0820 09:09:49.489773 20842 solver.cpp:404]     Test net output #1: loss = 0.372724 (* 1 = 0.372724 loss)\nI0820 09:09:50.805856 20842 solver.cpp:228] Iteration 70200, loss = 0.193578\nI0820 09:09:50.805902 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 09:09:50.805918 20842 solver.cpp:244]     Train net output #1: loss = 0.193578 (* 1 = 0.193578 loss)\nI0820 09:09:50.893035 20842 sgd_solver.cpp:166] Iteration 70200, lr = 1.755\nI0820 09:12:07.634516 20842 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0820 09:13:31.033723 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0820 09:13:31.034008 20842 solver.cpp:404]     Test net output #1: loss = 0.385933 (* 1 = 0.385933 loss)\nI0820 09:13:32.349354 20842 solver.cpp:228] Iteration 70300, loss = 0.169409\nI0820 09:13:32.349398 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 09:13:32.349413 20842 solver.cpp:244]     Train net output #1: loss = 0.169409 (* 1 = 0.169409 loss)\nI0820 09:13:32.442742 20842 sgd_solver.cpp:166] Iteration 70300, lr = 1.7575\nI0820 09:15:49.517213 20842 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0820 09:17:12.908510 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88344\nI0820 09:17:12.908804 20842 solver.cpp:404]     Test net output #1: loss = 0.398137 (* 1 = 0.398137 loss)\nI0820 09:17:14.224907 20842 solver.cpp:228] Iteration 70400, loss = 0.193685\nI0820 09:17:14.224951 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:17:14.224967 20842 solver.cpp:244]     Train net output #1: loss = 0.193685 (* 1 = 0.193685 loss)\nI0820 09:17:14.317672 20842 sgd_solver.cpp:166] Iteration 70400, lr = 1.76\nI0820 09:19:31.380533 20842 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0820 09:20:54.760697 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88616\nI0820 09:20:54.761031 20842 solver.cpp:404]     Test net output #1: loss = 0.3734 (* 1 = 0.3734 loss)\nI0820 09:20:56.077280 20842 solver.cpp:228] Iteration 70500, loss = 0.0837714\nI0820 09:20:56.077324 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 09:20:56.077342 20842 solver.cpp:244]     Train net output #1: loss = 0.0837709 (* 1 = 0.0837709 loss)\nI0820 09:20:56.166448 20842 sgd_solver.cpp:166] Iteration 70500, lr = 1.7625\nI0820 09:23:12.860692 20842 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0820 09:24:36.256631 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87796\nI0820 09:24:36.256955 20842 solver.cpp:404]     Test net output #1: loss = 0.402293 (* 1 = 0.402293 loss)\nI0820 09:24:37.572319 20842 solver.cpp:228] Iteration 70600, loss = 0.125459\nI0820 09:24:37.572361 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 09:24:37.572378 20842 solver.cpp:244]     Train net output #1: loss = 0.125459 (* 1 = 0.125459 loss)\nI0820 09:24:37.671052 20842 sgd_solver.cpp:166] Iteration 70600, lr = 1.765\nI0820 09:26:54.737872 20842 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0820 09:28:18.127681 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8788\nI0820 09:28:18.128026 20842 solver.cpp:404]     Test net output #1: loss = 0.388864 (* 1 = 0.388864 loss)\nI0820 09:28:19.444403 20842 solver.cpp:228] Iteration 70700, loss = 0.120785\nI0820 09:28:19.444447 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 09:28:19.444463 20842 solver.cpp:244]     Train net output #1: loss = 0.120784 (* 1 = 0.120784 loss)\nI0820 09:28:19.534896 20842 sgd_solver.cpp:166] Iteration 70700, lr = 1.7675\nI0820 09:30:36.697821 20842 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0820 09:32:00.092936 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87484\nI0820 09:32:00.093262 20842 solver.cpp:404]     Test net output #1: loss = 0.42591 (* 1 = 0.42591 loss)\nI0820 09:32:01.408957 20842 solver.cpp:228] Iteration 70800, loss = 0.197866\nI0820 09:32:01.408998 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 09:32:01.409013 20842 solver.cpp:244]     Train net output #1: loss = 0.197866 (* 1 = 0.197866 loss)\nI0820 09:32:01.493652 20842 sgd_solver.cpp:166] Iteration 70800, lr = 1.77\nI0820 09:34:18.181291 20842 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0820 09:35:41.566525 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0820 09:35:41.566798 20842 solver.cpp:404]     Test net output #1: loss = 0.394893 (* 1 = 0.394893 loss)\nI0820 09:35:42.882951 20842 solver.cpp:228] Iteration 70900, loss = 0.122239\nI0820 09:35:42.882992 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 09:35:42.883009 20842 solver.cpp:244]     Train net output #1: loss = 0.122238 (* 1 = 0.122238 loss)\nI0820 09:35:42.978721 20842 sgd_solver.cpp:166] Iteration 70900, lr = 1.7725\nI0820 09:38:00.072921 20842 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0820 09:39:23.469310 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88332\nI0820 09:39:23.469614 20842 solver.cpp:404]     Test net output #1: loss = 0.391247 (* 1 = 0.391247 loss)\nI0820 09:39:24.785678 20842 solver.cpp:228] Iteration 71000, loss = 0.179888\nI0820 09:39:24.785720 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 09:39:24.785737 20842 solver.cpp:244]     Train net output #1: loss = 0.179887 (* 1 = 0.179887 loss)\nI0820 09:39:24.869122 20842 sgd_solver.cpp:166] Iteration 71000, lr = 1.775\nI0820 09:41:41.468277 20842 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0820 09:43:04.858266 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0820 09:43:04.858603 20842 solver.cpp:404]     Test net output #1: loss = 0.401375 (* 1 = 0.401375 loss)\nI0820 09:43:06.174717 20842 solver.cpp:228] Iteration 71100, loss = 0.203676\nI0820 09:43:06.174759 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 09:43:06.174777 20842 solver.cpp:244]     Train net output #1: loss = 0.203675 (* 1 = 0.203675 loss)\nI0820 09:43:06.257380 20842 sgd_solver.cpp:166] Iteration 71100, lr = 1.7775\nI0820 09:45:22.893766 20842 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0820 09:46:46.279079 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88624\nI0820 09:46:46.279369 20842 solver.cpp:404]     Test net output #1: loss = 0.392116 (* 1 = 0.392116 loss)\nI0820 09:46:47.595211 20842 solver.cpp:228] Iteration 71200, loss = 0.195691\nI0820 09:46:47.595250 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 09:46:47.595266 20842 solver.cpp:244]     Train net output #1: loss = 0.195691 (* 1 = 0.195691 loss)\nI0820 09:46:47.675611 20842 sgd_solver.cpp:166] Iteration 71200, lr = 1.78\nI0820 09:49:04.347985 20842 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0820 09:50:27.737900 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88108\nI0820 09:50:27.738230 20842 solver.cpp:404]     Test net output #1: loss = 0.422759 (* 1 = 0.422759 loss)\nI0820 09:50:29.054158 20842 solver.cpp:228] Iteration 71300, loss = 0.164793\nI0820 09:50:29.054200 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:50:29.054215 20842 solver.cpp:244]     Train net output #1: loss = 0.164793 (* 1 = 0.164793 loss)\nI0820 09:50:29.142084 20842 sgd_solver.cpp:166] Iteration 71300, lr = 1.7825\nI0820 09:52:46.283892 20842 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0820 09:54:09.677635 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88584\nI0820 09:54:09.677958 20842 solver.cpp:404]     Test net output #1: loss = 0.394717 (* 1 = 0.394717 loss)\nI0820 09:54:10.994391 20842 solver.cpp:228] Iteration 71400, loss = 0.140568\nI0820 09:54:10.994432 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:54:10.994448 20842 solver.cpp:244]     Train net output #1: loss = 0.140568 (* 1 = 0.140568 loss)\nI0820 09:54:11.092826 20842 sgd_solver.cpp:166] Iteration 71400, lr = 1.785\nI0820 09:56:27.709672 20842 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0820 09:57:51.013458 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8738\nI0820 09:57:51.013773 20842 solver.cpp:404]     Test net output #1: loss = 0.440531 (* 1 = 0.440531 loss)\nI0820 09:57:52.329874 20842 solver.cpp:228] Iteration 71500, loss = 0.132074\nI0820 09:57:52.329916 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:57:52.329931 20842 solver.cpp:244]     Train net output #1: loss = 0.132073 (* 1 = 0.132073 loss)\nI0820 09:57:52.419220 20842 sgd_solver.cpp:166] Iteration 71500, lr = 1.7875\nI0820 10:00:09.490834 20842 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0820 10:01:32.787847 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87228\nI0820 10:01:32.788168 20842 solver.cpp:404]     Test net output #1: loss = 0.424793 (* 1 = 0.424793 loss)\nI0820 10:01:34.104290 20842 solver.cpp:228] Iteration 71600, loss = 0.162423\nI0820 10:01:34.104331 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 10:01:34.104347 20842 solver.cpp:244]     Train net output #1: loss = 0.162423 (* 1 = 0.162423 loss)\nI0820 10:01:34.190729 20842 sgd_solver.cpp:166] Iteration 71600, lr = 1.79\nI0820 10:03:50.877931 20842 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0820 10:05:14.176292 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87856\nI0820 10:05:14.176620 20842 solver.cpp:404]     Test net output #1: loss = 0.409754 (* 1 = 0.409754 loss)\nI0820 10:05:15.493077 20842 solver.cpp:228] Iteration 71700, loss = 0.148937\nI0820 10:05:15.493116 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 10:05:15.493134 20842 solver.cpp:244]     Train net output #1: loss = 0.148936 (* 1 = 0.148936 loss)\nI0820 10:05:15.579712 20842 sgd_solver.cpp:166] Iteration 71700, lr = 1.7925\nI0820 10:07:32.389340 20842 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0820 10:08:55.687597 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0820 10:08:55.687883 20842 solver.cpp:404]     Test net output #1: loss = 0.412652 (* 1 = 0.412652 loss)\nI0820 10:08:56.999806 20842 solver.cpp:228] Iteration 71800, loss = 0.195538\nI0820 10:08:56.999847 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:08:56.999863 20842 solver.cpp:244]     Train net output #1: loss = 0.195537 (* 1 = 0.195537 loss)\nI0820 10:08:57.090852 20842 sgd_solver.cpp:166] Iteration 71800, lr = 1.795\nI0820 10:11:14.357512 20842 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0820 10:12:37.649909 20842 solver.cpp:404]     Test net output #0: accuracy = 0.885241\nI0820 10:12:37.650197 20842 solver.cpp:404]     Test net output #1: loss = 0.372078 (* 1 = 0.372078 loss)\nI0820 10:12:38.961851 20842 solver.cpp:228] Iteration 71900, loss = 0.137763\nI0820 10:12:38.961891 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:12:38.961907 20842 solver.cpp:244]     Train net output #1: loss = 0.137763 (* 1 = 0.137763 loss)\nI0820 10:12:39.054540 20842 sgd_solver.cpp:166] Iteration 71900, lr = 1.7975\nI0820 10:14:56.079996 20842 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0820 10:16:19.373839 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88088\nI0820 10:16:19.374121 20842 solver.cpp:404]     Test net output #1: loss = 0.406142 (* 1 = 0.406142 loss)\nI0820 10:16:20.686519 20842 solver.cpp:228] Iteration 72000, loss = 0.148709\nI0820 10:16:20.686561 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 10:16:20.686579 20842 solver.cpp:244]     Train net output #1: loss = 0.148709 (* 1 = 0.148709 loss)\nI0820 10:16:20.784373 20842 sgd_solver.cpp:166] Iteration 72000, lr = 1.8\nI0820 10:18:37.847685 20842 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0820 10:20:01.154979 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88408\nI0820 10:20:01.155272 20842 solver.cpp:404]     Test net output #1: loss = 0.389605 (* 1 = 0.389605 loss)\nI0820 10:20:02.471155 20842 solver.cpp:228] Iteration 72100, loss = 0.198061\nI0820 10:20:02.471196 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 10:20:02.471212 20842 solver.cpp:244]     Train net output #1: loss = 0.19806 (* 1 = 0.19806 loss)\nI0820 10:20:02.565716 20842 sgd_solver.cpp:166] Iteration 72100, lr = 1.8025\nI0820 10:22:19.722276 20842 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0820 10:23:43.039399 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0820 10:23:43.039698 20842 solver.cpp:404]     Test net output #1: loss = 0.397237 (* 1 = 0.397237 loss)\nI0820 10:23:44.356210 20842 solver.cpp:228] Iteration 72200, loss = 0.104805\nI0820 10:23:44.356256 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 10:23:44.356271 20842 solver.cpp:244]     Train net output #1: loss = 0.104804 (* 1 = 0.104804 loss)\nI0820 10:23:44.451063 20842 sgd_solver.cpp:166] Iteration 72200, lr = 1.805\nI0820 10:26:01.436369 20842 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0820 10:27:24.761189 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8798\nI0820 10:27:24.761458 20842 solver.cpp:404]     Test net output #1: loss = 0.41527 (* 1 = 0.41527 loss)\nI0820 10:27:26.078071 20842 solver.cpp:228] Iteration 72300, loss = 0.167455\nI0820 10:27:26.078116 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 10:27:26.078133 20842 solver.cpp:244]     Train net output #1: loss = 0.167455 (* 1 = 0.167455 loss)\nI0820 10:27:26.168401 20842 sgd_solver.cpp:166] Iteration 72300, lr = 1.8075\nI0820 10:29:43.218118 20842 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0820 10:31:06.633689 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0820 10:31:06.634001 20842 solver.cpp:404]     Test net output #1: loss = 0.416589 (* 1 = 0.416589 loss)\nI0820 10:31:07.950305 20842 solver.cpp:228] Iteration 72400, loss = 0.141424\nI0820 10:31:07.950341 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 10:31:07.950356 20842 solver.cpp:244]     Train net output #1: loss = 0.141423 (* 1 = 0.141423 loss)\nI0820 10:31:08.036788 20842 sgd_solver.cpp:166] Iteration 72400, lr = 1.81\nI0820 10:33:25.070328 20842 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0820 10:34:48.488028 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88256\nI0820 10:34:48.488310 20842 solver.cpp:404]     Test net output #1: loss = 0.396732 (* 1 = 0.396732 loss)\nI0820 10:34:49.805717 20842 solver.cpp:228] Iteration 72500, loss = 0.203496\nI0820 10:34:49.805765 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 10:34:49.805789 20842 solver.cpp:244]     Train net output #1: loss = 0.203495 (* 1 = 0.203495 loss)\nI0820 10:34:49.896237 20842 sgd_solver.cpp:166] Iteration 72500, lr = 1.8125\nI0820 10:37:07.067062 20842 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0820 10:38:30.471305 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87976\nI0820 10:38:30.471617 20842 solver.cpp:404]     Test net output #1: loss = 0.400085 (* 1 = 0.400085 loss)\nI0820 10:38:31.788822 20842 solver.cpp:228] Iteration 72600, loss = 0.191612\nI0820 10:38:31.788858 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 10:38:31.788874 20842 solver.cpp:244]     Train net output #1: loss = 0.191612 (* 1 = 0.191612 loss)\nI0820 10:38:31.876224 20842 sgd_solver.cpp:166] Iteration 72600, lr = 1.815\nI0820 10:40:49.062582 20842 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0820 10:42:12.493717 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87884\nI0820 10:42:12.494012 20842 solver.cpp:404]     Test net output #1: loss = 0.407303 (* 1 = 0.407303 loss)\nI0820 10:42:13.811050 20842 solver.cpp:228] Iteration 72700, loss = 0.160458\nI0820 10:42:13.811095 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 10:42:13.811112 20842 solver.cpp:244]     Train net output #1: loss = 0.160458 (* 1 = 0.160458 loss)\nI0820 10:42:13.895835 20842 sgd_solver.cpp:166] Iteration 72700, lr = 1.8175\nI0820 10:44:30.998915 20842 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0820 10:45:54.403307 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88556\nI0820 10:45:54.403635 20842 solver.cpp:404]     Test net output #1: loss = 0.38785 (* 1 = 0.38785 loss)\nI0820 10:45:55.720379 20842 solver.cpp:228] Iteration 72800, loss = 0.13112\nI0820 10:45:55.720417 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 10:45:55.720448 20842 solver.cpp:244]     Train net output #1: loss = 0.131119 (* 1 = 0.131119 loss)\nI0820 10:45:55.804177 20842 sgd_solver.cpp:166] Iteration 72800, lr = 1.82\nI0820 10:48:12.884369 20842 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0820 10:49:36.291383 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87352\nI0820 10:49:36.291679 20842 solver.cpp:404]     Test net output #1: loss = 0.41123 (* 1 = 0.41123 loss)\nI0820 10:49:37.610246 20842 solver.cpp:228] Iteration 72900, loss = 0.152331\nI0820 10:49:37.610283 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 10:49:37.610299 20842 solver.cpp:244]     Train net output #1: loss = 0.15233 (* 1 = 0.15233 loss)\nI0820 10:49:37.695175 20842 sgd_solver.cpp:166] Iteration 72900, lr = 1.8225\nI0820 10:51:54.664579 20842 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0820 10:53:18.067427 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0820 10:53:18.067770 20842 solver.cpp:404]     Test net output #1: loss = 0.421436 (* 1 = 0.421436 loss)\nI0820 10:53:19.384357 20842 solver.cpp:228] Iteration 73000, loss = 0.166394\nI0820 10:53:19.384402 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 10:53:19.384418 20842 solver.cpp:244]     Train net output #1: loss = 0.166394 (* 1 = 0.166394 loss)\nI0820 10:53:19.470161 20842 sgd_solver.cpp:166] Iteration 73000, lr = 1.825\nI0820 10:55:36.902660 20842 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0820 10:57:01.311769 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87396\nI0820 10:57:01.312144 20842 solver.cpp:404]     Test net output #1: loss = 0.416421 (* 1 = 0.416421 loss)\nI0820 10:57:02.628962 20842 solver.cpp:228] Iteration 73100, loss = 0.207636\nI0820 10:57:02.629019 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:57:02.629037 20842 solver.cpp:244]     Train net output #1: loss = 0.207635 (* 1 = 0.207635 loss)\nI0820 10:57:02.715360 20842 sgd_solver.cpp:166] Iteration 73100, lr = 1.8275\nI0820 10:59:19.875196 20842 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0820 11:00:44.285593 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87716\nI0820 11:00:44.285957 20842 solver.cpp:404]     Test net output #1: loss = 0.416827 (* 1 = 0.416827 loss)\nI0820 11:00:45.602982 20842 solver.cpp:228] Iteration 73200, loss = 0.181955\nI0820 11:00:45.603040 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 11:00:45.603058 20842 solver.cpp:244]     Train net output #1: loss = 0.181955 (* 1 = 0.181955 loss)\nI0820 11:00:45.685534 20842 sgd_solver.cpp:166] Iteration 73200, lr = 1.83\nI0820 11:03:02.811825 20842 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0820 11:04:27.220163 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87532\nI0820 11:04:27.220538 20842 solver.cpp:404]     Test net output #1: loss = 0.411628 (* 1 = 0.411628 loss)\nI0820 11:04:28.537873 20842 solver.cpp:228] Iteration 73300, loss = 0.100253\nI0820 11:04:28.537932 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:04:28.537950 20842 solver.cpp:244]     Train net output #1: loss = 0.100252 (* 1 = 0.100252 loss)\nI0820 11:04:28.619565 20842 sgd_solver.cpp:166] Iteration 73300, lr = 1.8325\nI0820 11:06:45.716809 20842 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0820 11:08:10.131386 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88616\nI0820 11:08:10.131779 20842 solver.cpp:404]     Test net output #1: loss = 0.369743 (* 1 = 0.369743 loss)\nI0820 11:08:11.452203 20842 solver.cpp:228] Iteration 73400, loss = 0.11163\nI0820 11:08:11.452262 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 11:08:11.452280 20842 solver.cpp:244]     Train net output #1: loss = 0.111629 (* 1 = 0.111629 loss)\nI0820 11:08:11.535126 20842 sgd_solver.cpp:166] Iteration 73400, lr = 1.835\nI0820 11:10:28.741948 20842 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0820 11:11:53.147486 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0820 11:11:53.147856 20842 solver.cpp:404]     Test net output #1: loss = 0.402344 (* 1 = 0.402344 loss)\nI0820 11:11:54.468582 20842 solver.cpp:228] Iteration 73500, loss = 0.234614\nI0820 11:11:54.468633 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 11:11:54.468650 20842 solver.cpp:244]     Train net output #1: loss = 0.234613 (* 1 = 0.234613 loss)\nI0820 11:11:54.555621 20842 sgd_solver.cpp:166] Iteration 73500, lr = 1.8375\nI0820 11:14:11.646297 20842 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0820 11:15:36.060020 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0820 11:15:36.060394 20842 solver.cpp:404]     Test net output #1: loss = 0.419608 (* 1 = 0.419608 loss)\nI0820 11:15:37.380895 20842 solver.cpp:228] Iteration 73600, loss = 0.206221\nI0820 11:15:37.380949 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 11:15:37.380967 20842 solver.cpp:244]     Train net output #1: loss = 0.20622 (* 1 = 0.20622 loss)\nI0820 11:15:37.469578 20842 sgd_solver.cpp:166] Iteration 73600, lr = 1.84\nI0820 11:17:54.577786 20842 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0820 11:19:18.973832 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8774\nI0820 11:19:18.974242 20842 solver.cpp:404]     Test net output #1: loss = 0.42288 (* 1 = 0.42288 loss)\nI0820 11:19:20.294608 20842 solver.cpp:228] Iteration 73700, loss = 0.170938\nI0820 11:19:20.294665 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 11:19:20.294682 20842 solver.cpp:244]     Train net output #1: loss = 0.170938 (* 1 = 0.170938 loss)\nI0820 11:19:20.379351 20842 sgd_solver.cpp:166] Iteration 73700, lr = 1.8425\nI0820 11:21:37.550518 20842 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0820 11:23:01.932524 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88524\nI0820 11:23:01.932889 20842 solver.cpp:404]     Test net output #1: loss = 0.38079 (* 1 = 0.38079 loss)\nI0820 11:23:03.253199 20842 solver.cpp:228] Iteration 73800, loss = 0.253533\nI0820 11:23:03.253255 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 11:23:03.253273 20842 solver.cpp:244]     Train net output #1: loss = 0.253533 (* 1 = 0.253533 loss)\nI0820 11:23:03.343252 20842 sgd_solver.cpp:166] Iteration 73800, lr = 1.845\nI0820 11:25:20.546489 20842 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0820 11:26:44.942811 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87952\nI0820 11:26:44.943228 20842 solver.cpp:404]     Test net output #1: loss = 0.409913 (* 1 = 0.409913 loss)\nI0820 11:26:46.263902 20842 solver.cpp:228] Iteration 73900, loss = 0.202871\nI0820 11:26:46.263954 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 11:26:46.263972 20842 solver.cpp:244]     Train net output #1: loss = 0.202871 (* 1 = 0.202871 loss)\nI0820 11:26:46.349311 20842 sgd_solver.cpp:166] Iteration 73900, lr = 1.8475\nI0820 11:29:03.523772 20842 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0820 11:30:27.923853 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88412\nI0820 11:30:27.924226 20842 solver.cpp:404]     Test net output #1: loss = 0.391784 (* 1 = 0.391784 loss)\nI0820 11:30:29.244736 20842 solver.cpp:228] Iteration 74000, loss = 0.212572\nI0820 11:30:29.244794 20842 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 11:30:29.244812 20842 solver.cpp:244]     Train net output #1: loss = 0.212571 (* 1 = 0.212571 loss)\nI0820 11:30:29.332615 20842 sgd_solver.cpp:166] Iteration 74000, lr = 1.85\nI0820 11:32:46.480684 20842 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0820 11:34:10.882278 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0820 11:34:10.882673 20842 solver.cpp:404]     Test net output #1: loss = 0.391157 (* 1 = 0.391157 loss)\nI0820 11:34:12.202919 20842 solver.cpp:228] Iteration 74100, loss = 0.200313\nI0820 11:34:12.202976 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:34:12.202993 20842 solver.cpp:244]     Train net output #1: loss = 0.200313 (* 1 = 0.200313 loss)\nI0820 11:34:12.294441 20842 sgd_solver.cpp:166] Iteration 74100, lr = 1.8525\nI0820 11:36:29.404875 20842 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0820 11:37:53.809739 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88348\nI0820 11:37:53.810154 20842 solver.cpp:404]     Test net output #1: loss = 0.392123 (* 1 = 0.392123 loss)\nI0820 11:37:55.130719 20842 solver.cpp:228] Iteration 74200, loss = 0.158251\nI0820 11:37:55.130774 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 11:37:55.130792 20842 solver.cpp:244]     Train net output #1: loss = 0.15825 (* 1 = 0.15825 loss)\nI0820 11:37:55.221846 20842 sgd_solver.cpp:166] Iteration 74200, lr = 1.855\nI0820 11:40:12.720371 20842 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0820 11:41:37.126166 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87512\nI0820 11:41:37.126574 20842 solver.cpp:404]     Test net output #1: loss = 0.414117 (* 1 = 0.414117 loss)\nI0820 11:41:38.446709 20842 solver.cpp:228] Iteration 74300, loss = 0.214109\nI0820 11:41:38.446768 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 11:41:38.446784 20842 solver.cpp:244]     Train net output #1: loss = 0.214109 (* 1 = 0.214109 loss)\nI0820 11:41:38.537848 20842 sgd_solver.cpp:166] Iteration 74300, lr = 1.8575\nI0820 11:43:55.725939 20842 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0820 11:45:20.129474 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87232\nI0820 11:45:20.129845 20842 solver.cpp:404]     Test net output #1: loss = 0.43516 (* 1 = 0.43516 loss)\nI0820 11:45:21.450160 20842 solver.cpp:228] Iteration 74400, loss = 0.260716\nI0820 11:45:21.450211 20842 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 11:45:21.450227 20842 solver.cpp:244]     Train net output #1: loss = 0.260715 (* 1 = 0.260715 loss)\nI0820 11:45:21.535776 20842 sgd_solver.cpp:166] Iteration 74400, lr = 1.86\nI0820 11:47:38.723768 20842 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0820 11:49:03.130394 20842 solver.cpp:404]     Test net output #0: accuracy = 0.872521\nI0820 11:49:03.130779 20842 solver.cpp:404]     Test net output #1: loss = 0.434006 (* 1 = 0.434006 loss)\nI0820 11:49:04.450929 20842 solver.cpp:228] Iteration 74500, loss = 0.153093\nI0820 11:49:04.450986 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 11:49:04.451004 20842 solver.cpp:244]     Train net output #1: loss = 0.153093 (* 1 = 0.153093 loss)\nI0820 11:49:04.544814 20842 sgd_solver.cpp:166] Iteration 74500, lr = 1.8625\nI0820 11:51:21.867285 20842 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0820 11:52:46.268085 20842 solver.cpp:404]     Test net output #0: accuracy = 0.886401\nI0820 11:52:46.268465 20842 solver.cpp:404]     Test net output #1: loss = 0.378168 (* 1 = 0.378168 loss)\nI0820 11:52:47.588768 20842 solver.cpp:228] Iteration 74600, loss = 0.101857\nI0820 11:52:47.588824 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 11:52:47.588841 20842 solver.cpp:244]     Train net output #1: loss = 0.101857 (* 1 = 0.101857 loss)\nI0820 11:52:47.676580 20842 sgd_solver.cpp:166] Iteration 74600, lr = 1.865\nI0820 11:55:05.213531 20842 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0820 11:56:29.618610 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88396\nI0820 11:56:29.619016 20842 solver.cpp:404]     Test net output #1: loss = 0.380702 (* 1 = 0.380702 loss)\nI0820 11:56:30.937726 20842 solver.cpp:228] Iteration 74700, loss = 0.260995\nI0820 11:56:30.937782 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 11:56:30.937799 20842 solver.cpp:244]     Train net output #1: loss = 0.260994 (* 1 = 0.260994 loss)\nI0820 11:56:31.033926 20842 sgd_solver.cpp:166] Iteration 74700, lr = 1.8675\nI0820 11:58:48.133831 20842 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0820 12:00:12.544275 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88704\nI0820 12:00:12.544634 20842 solver.cpp:404]     Test net output #1: loss = 0.382826 (* 1 = 0.382826 loss)\nI0820 12:00:13.863749 20842 solver.cpp:228] Iteration 74800, loss = 0.154\nI0820 12:00:13.863806 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 12:00:13.863823 20842 solver.cpp:244]     Train net output #1: loss = 0.153999 (* 1 = 0.153999 loss)\nI0820 12:00:13.955410 20842 sgd_solver.cpp:166] Iteration 74800, lr = 1.87\nI0820 12:02:31.071135 20842 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0820 12:03:55.471428 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88712\nI0820 12:03:55.471808 20842 solver.cpp:404]     Test net output #1: loss = 0.39596 (* 1 = 0.39596 loss)\nI0820 12:03:56.791072 20842 solver.cpp:228] Iteration 74900, loss = 0.110531\nI0820 12:03:56.791128 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:03:56.791147 20842 solver.cpp:244]     Train net output #1: loss = 0.110531 (* 1 = 0.110531 loss)\nI0820 12:03:56.873361 20842 sgd_solver.cpp:166] Iteration 74900, lr = 1.8725\nI0820 12:06:13.985395 20842 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0820 12:07:38.376547 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88316\nI0820 12:07:38.376926 20842 solver.cpp:404]     Test net output #1: loss = 0.380448 (* 1 = 0.380448 loss)\nI0820 12:07:39.695639 20842 solver.cpp:228] Iteration 75000, loss = 0.192857\nI0820 12:07:39.695699 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 12:07:39.695718 20842 solver.cpp:244]     Train net output #1: loss = 0.192856 (* 1 = 0.192856 loss)\nI0820 12:07:39.782214 20842 sgd_solver.cpp:166] Iteration 75000, lr = 1.875\nI0820 12:09:57.228147 20842 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0820 12:11:21.632367 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8864\nI0820 12:11:21.632750 20842 solver.cpp:404]     Test net output #1: loss = 0.391014 (* 1 = 0.391014 loss)\nI0820 12:11:22.951467 20842 solver.cpp:228] Iteration 75100, loss = 0.19897\nI0820 12:11:22.951524 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:11:22.951541 20842 solver.cpp:244]     Train net output #1: loss = 0.19897 (* 1 = 0.19897 loss)\nI0820 12:11:23.030867 20842 sgd_solver.cpp:166] Iteration 75100, lr = 1.8775\nI0820 12:13:40.159904 20842 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0820 12:15:04.575795 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88868\nI0820 12:15:04.576202 20842 solver.cpp:404]     Test net output #1: loss = 0.376814 (* 1 = 0.376814 loss)\nI0820 12:15:05.895586 20842 solver.cpp:228] Iteration 75200, loss = 0.0969723\nI0820 12:15:05.895642 20842 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 12:15:05.895663 20842 solver.cpp:244]     Train net output #1: loss = 0.0969717 (* 1 = 0.0969717 loss)\nI0820 12:15:05.977330 20842 sgd_solver.cpp:166] Iteration 75200, lr = 1.88\nI0820 12:17:23.231995 20842 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0820 12:18:47.648564 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88812\nI0820 12:18:47.648949 20842 solver.cpp:404]     Test net output #1: loss = 0.376578 (* 1 = 0.376578 loss)\nI0820 12:18:48.967850 20842 solver.cpp:228] Iteration 75300, loss = 0.240471\nI0820 12:18:48.967908 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 12:18:48.967926 20842 solver.cpp:244]     Train net output #1: loss = 0.24047 (* 1 = 0.24047 loss)\nI0820 12:18:49.058707 20842 sgd_solver.cpp:166] Iteration 75300, lr = 1.8825\nI0820 12:21:06.292901 20842 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0820 12:22:30.694491 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8856\nI0820 12:22:30.694877 20842 solver.cpp:404]     Test net output #1: loss = 0.375571 (* 1 = 0.375571 loss)\nI0820 12:22:32.013880 20842 solver.cpp:228] Iteration 75400, loss = 0.11817\nI0820 12:22:32.013936 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:22:32.013953 20842 solver.cpp:244]     Train net output #1: loss = 0.11817 (* 1 = 0.11817 loss)\nI0820 12:22:32.102432 20842 sgd_solver.cpp:166] Iteration 75400, lr = 1.885\nI0820 12:24:49.322607 20842 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0820 12:26:13.734220 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8728\nI0820 12:26:13.734634 20842 solver.cpp:404]     Test net output #1: loss = 0.40236 (* 1 = 0.40236 loss)\nI0820 12:26:15.054255 20842 solver.cpp:228] Iteration 75500, loss = 0.162503\nI0820 12:26:15.054311 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 12:26:15.054329 20842 solver.cpp:244]     Train net output #1: loss = 0.162502 (* 1 = 0.162502 loss)\nI0820 12:26:15.138218 20842 sgd_solver.cpp:166] Iteration 75500, lr = 1.8875\nI0820 12:28:32.696117 20842 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0820 12:29:57.113706 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0820 12:29:57.114081 20842 solver.cpp:404]     Test net output #1: loss = 0.395938 (* 1 = 0.395938 loss)\nI0820 12:29:58.432875 20842 solver.cpp:228] Iteration 75600, loss = 0.273777\nI0820 12:29:58.432926 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 12:29:58.432942 20842 solver.cpp:244]     Train net output #1: loss = 0.273776 (* 1 = 0.273776 loss)\nI0820 12:29:58.518131 20842 sgd_solver.cpp:166] Iteration 75600, lr = 1.89\nI0820 12:32:15.581318 20842 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0820 12:33:39.995687 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0820 12:33:39.996079 20842 solver.cpp:404]     Test net output #1: loss = 0.398024 (* 1 = 0.398024 loss)\nI0820 12:33:41.316280 20842 solver.cpp:228] Iteration 75700, loss = 0.131605\nI0820 12:33:41.316337 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:33:41.316355 20842 solver.cpp:244]     Train net output #1: loss = 0.131605 (* 1 = 0.131605 loss)\nI0820 12:33:41.406489 20842 sgd_solver.cpp:166] Iteration 75700, lr = 1.8925\nI0820 12:35:58.484845 20842 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0820 12:37:22.893810 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0820 12:37:22.894187 20842 solver.cpp:404]     Test net output #1: loss = 0.401272 (* 1 = 0.401272 loss)\nI0820 12:37:24.213454 20842 solver.cpp:228] Iteration 75800, loss = 0.261589\nI0820 12:37:24.213507 20842 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 12:37:24.213524 20842 solver.cpp:244]     Train net output #1: loss = 0.261588 (* 1 = 0.261588 loss)\nI0820 12:37:24.304667 20842 sgd_solver.cpp:166] Iteration 75800, lr = 1.895\nI0820 12:39:41.445267 20842 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0820 12:41:05.849671 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87812\nI0820 12:41:05.850035 20842 solver.cpp:404]     Test net output #1: loss = 0.396368 (* 1 = 0.396368 loss)\nI0820 12:41:07.169217 20842 solver.cpp:228] Iteration 75900, loss = 0.198071\nI0820 12:41:07.169268 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 12:41:07.169286 20842 solver.cpp:244]     Train net output #1: loss = 0.19807 (* 1 = 0.19807 loss)\nI0820 12:41:07.257551 20842 sgd_solver.cpp:166] Iteration 75900, lr = 1.8975\nI0820 12:43:24.700297 20842 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0820 12:44:49.144856 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87504\nI0820 12:44:49.145259 20842 solver.cpp:404]     Test net output #1: loss = 0.414246 (* 1 = 0.414246 loss)\nI0820 12:44:50.464226 20842 solver.cpp:228] Iteration 76000, loss = 0.187909\nI0820 12:44:50.464282 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 12:44:50.464306 20842 solver.cpp:244]     Train net output #1: loss = 0.187909 (* 1 = 0.187909 loss)\nI0820 12:44:50.552603 20842 sgd_solver.cpp:166] Iteration 76000, lr = 1.9\nI0820 12:47:07.638604 20842 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0820 12:48:32.040210 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88052\nI0820 12:48:32.040606 20842 solver.cpp:404]     Test net output #1: loss = 0.395721 (* 1 = 0.395721 loss)\nI0820 12:48:33.360054 20842 solver.cpp:228] Iteration 76100, loss = 0.157021\nI0820 12:48:33.360112 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 12:48:33.360131 20842 solver.cpp:244]     Train net output #1: loss = 0.15702 (* 1 = 0.15702 loss)\nI0820 12:48:33.449123 20842 sgd_solver.cpp:166] Iteration 76100, lr = 1.9025\nI0820 12:50:50.565142 20842 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0820 12:52:14.955379 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88064\nI0820 12:52:14.955767 20842 solver.cpp:404]     Test net output #1: loss = 0.391182 (* 1 = 0.391182 loss)\nI0820 12:52:16.275274 20842 solver.cpp:228] Iteration 76200, loss = 0.174705\nI0820 12:52:16.275328 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:52:16.275346 20842 solver.cpp:244]     Train net output #1: loss = 0.174704 (* 1 = 0.174704 loss)\nI0820 12:52:16.364588 20842 sgd_solver.cpp:166] Iteration 76200, lr = 1.905\nI0820 12:54:33.551964 20842 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0820 12:55:57.947762 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87664\nI0820 12:55:57.948138 20842 solver.cpp:404]     Test net output #1: loss = 0.398581 (* 1 = 0.398581 loss)\nI0820 12:55:59.268378 20842 solver.cpp:228] Iteration 76300, loss = 0.136924\nI0820 12:55:59.268435 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 12:55:59.268453 20842 solver.cpp:244]     Train net output #1: loss = 0.136924 (* 1 = 0.136924 loss)\nI0820 12:55:59.351934 20842 sgd_solver.cpp:166] Iteration 76300, lr = 1.9075\nI0820 12:58:16.511675 20842 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0820 12:59:40.920110 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87488\nI0820 12:59:40.920522 20842 solver.cpp:404]     Test net output #1: loss = 0.43736 (* 1 = 0.43736 loss)\nI0820 12:59:42.239924 20842 solver.cpp:228] Iteration 76400, loss = 0.186907\nI0820 12:59:42.239981 20842 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 12:59:42.240000 20842 solver.cpp:244]     Train net output #1: loss = 0.186907 (* 1 = 0.186907 loss)\nI0820 12:59:42.322083 20842 sgd_solver.cpp:166] Iteration 76400, lr = 1.91\nI0820 13:01:59.499156 20842 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0820 13:03:23.890663 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88332\nI0820 13:03:23.891047 20842 solver.cpp:404]     Test net output #1: loss = 0.377653 (* 1 = 0.377653 loss)\nI0820 13:03:25.209921 20842 solver.cpp:228] Iteration 76500, loss = 0.123215\nI0820 13:03:25.209981 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:03:25.210000 20842 solver.cpp:244]     Train net output #1: loss = 0.123215 (* 1 = 0.123215 loss)\nI0820 13:03:25.305713 20842 sgd_solver.cpp:166] Iteration 76500, lr = 1.9125\nI0820 13:05:42.381877 20842 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0820 13:07:06.779711 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87696\nI0820 13:07:06.780108 20842 solver.cpp:404]     Test net output #1: loss = 0.410515 (* 1 = 0.410515 loss)\nI0820 13:07:08.099890 20842 solver.cpp:228] Iteration 76600, loss = 0.154369\nI0820 13:07:08.099949 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:07:08.099967 20842 solver.cpp:244]     Train net output #1: loss = 0.154369 (* 1 = 0.154369 loss)\nI0820 13:07:08.186914 20842 sgd_solver.cpp:166] Iteration 76600, lr = 1.915\nI0820 13:09:25.261548 20842 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0820 13:10:49.657143 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87412\nI0820 13:10:49.657516 20842 solver.cpp:404]     Test net output #1: loss = 0.419498 (* 1 = 0.419498 loss)\nI0820 13:10:50.973847 20842 solver.cpp:228] Iteration 76700, loss = 0.21859\nI0820 13:10:50.973899 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:10:50.973917 20842 solver.cpp:244]     Train net output #1: loss = 0.218589 (* 1 = 0.218589 loss)\nI0820 13:10:51.060871 20842 sgd_solver.cpp:166] Iteration 76700, lr = 1.9175\nI0820 13:13:08.207080 20842 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0820 13:14:32.600174 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8864\nI0820 13:14:32.600546 20842 solver.cpp:404]     Test net output #1: loss = 0.378966 (* 1 = 0.378966 loss)\nI0820 13:14:33.916429 20842 solver.cpp:228] Iteration 76800, loss = 0.0927396\nI0820 13:14:33.916489 20842 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0820 13:14:33.916507 20842 solver.cpp:244]     Train net output #1: loss = 0.0927391 (* 1 = 0.0927391 loss)\nI0820 13:14:34.004016 20842 sgd_solver.cpp:166] Iteration 76800, lr = 1.92\nI0820 13:16:51.196981 20842 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0820 13:18:15.591481 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0820 13:18:15.591859 20842 solver.cpp:404]     Test net output #1: loss = 0.389112 (* 1 = 0.389112 loss)\nI0820 13:18:16.908073 20842 solver.cpp:228] Iteration 76900, loss = 0.159246\nI0820 13:18:16.908116 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:18:16.908133 20842 solver.cpp:244]     Train net output #1: loss = 0.159245 (* 1 = 0.159245 loss)\nI0820 13:18:16.995975 20842 sgd_solver.cpp:166] Iteration 76900, lr = 1.9225\nI0820 13:20:34.084161 20842 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0820 13:21:58.505561 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0820 13:21:58.505944 20842 solver.cpp:404]     Test net output #1: loss = 0.402053 (* 1 = 0.402053 loss)\nI0820 13:21:59.822005 20842 solver.cpp:228] Iteration 77000, loss = 0.227358\nI0820 13:21:59.822067 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 13:21:59.822093 20842 solver.cpp:244]     Train net output #1: loss = 0.227357 (* 1 = 0.227357 loss)\nI0820 13:21:59.907891 20842 sgd_solver.cpp:166] Iteration 77000, lr = 1.925\nI0820 13:24:17.068101 20842 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0820 13:25:41.481161 20842 solver.cpp:404]     Test net output #0: accuracy = 0.883681\nI0820 13:25:41.481568 20842 solver.cpp:404]     Test net output #1: loss = 0.39286 (* 1 = 0.39286 loss)\nI0820 13:25:42.798858 20842 solver.cpp:228] Iteration 77100, loss = 0.174977\nI0820 13:25:42.798923 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 13:25:42.798949 20842 solver.cpp:244]     Train net output #1: loss = 0.174976 (* 1 = 0.174976 loss)\nI0820 13:25:42.887419 20842 sgd_solver.cpp:166] Iteration 77100, lr = 1.9275\nI0820 13:28:00.051168 20842 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0820 13:29:24.450009 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8812\nI0820 13:29:24.450390 20842 solver.cpp:404]     Test net output #1: loss = 0.391204 (* 1 = 0.391204 loss)\nI0820 13:29:25.767725 20842 solver.cpp:228] Iteration 77200, loss = 0.131507\nI0820 13:29:25.767773 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 13:29:25.767796 20842 solver.cpp:244]     Train net output #1: loss = 0.131506 (* 1 = 0.131506 loss)\nI0820 13:29:25.860415 20842 sgd_solver.cpp:166] Iteration 77200, lr = 1.93\nI0820 13:31:43.134465 20842 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0820 13:33:07.529098 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87756\nI0820 13:33:07.529490 20842 solver.cpp:404]     Test net output #1: loss = 0.421515 (* 1 = 0.421515 loss)\nI0820 13:33:08.845530 20842 solver.cpp:228] Iteration 77300, loss = 0.187638\nI0820 13:33:08.845597 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:33:08.845621 20842 solver.cpp:244]     Train net output #1: loss = 0.187637 (* 1 = 0.187637 loss)\nI0820 13:33:08.937134 20842 sgd_solver.cpp:166] Iteration 77300, lr = 1.9325\nI0820 13:35:26.147974 20842 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0820 13:36:50.560389 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0820 13:36:50.560812 20842 solver.cpp:404]     Test net output #1: loss = 0.383249 (* 1 = 0.383249 loss)\nI0820 13:36:51.876709 20842 solver.cpp:228] Iteration 77400, loss = 0.154189\nI0820 13:36:51.876755 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 13:36:51.876777 20842 solver.cpp:244]     Train net output #1: loss = 0.154189 (* 1 = 0.154189 loss)\nI0820 13:36:51.960755 20842 sgd_solver.cpp:166] Iteration 77400, lr = 1.935\nI0820 13:39:09.445700 20842 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0820 13:40:33.846125 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0820 13:40:33.846493 20842 solver.cpp:404]     Test net output #1: loss = 0.413602 (* 1 = 0.413602 loss)\nI0820 13:40:35.162994 20842 solver.cpp:228] Iteration 77500, loss = 0.113884\nI0820 13:40:35.163038 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:40:35.163054 20842 solver.cpp:244]     Train net output #1: loss = 0.113883 (* 1 = 0.113883 loss)\nI0820 13:40:35.254389 20842 sgd_solver.cpp:166] Iteration 77500, lr = 1.9375\nI0820 13:42:52.500490 20842 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0820 13:44:16.969849 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88472\nI0820 13:44:16.970234 20842 solver.cpp:404]     Test net output #1: loss = 0.388301 (* 1 = 0.388301 loss)\nI0820 13:44:18.286182 20842 solver.cpp:228] Iteration 77600, loss = 0.200144\nI0820 13:44:18.286227 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 13:44:18.286250 20842 solver.cpp:244]     Train net output #1: loss = 0.200144 (* 1 = 0.200144 loss)\nI0820 13:44:18.379297 20842 sgd_solver.cpp:166] Iteration 77600, lr = 1.94\nI0820 13:46:35.623071 20842 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0820 13:48:00.033715 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87516\nI0820 13:48:00.034104 20842 solver.cpp:404]     Test net output #1: loss = 0.416957 (* 1 = 0.416957 loss)\nI0820 13:48:01.351430 20842 solver.cpp:228] Iteration 77700, loss = 0.172819\nI0820 13:48:01.351476 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:48:01.351500 20842 solver.cpp:244]     Train net output #1: loss = 0.172819 (* 1 = 0.172819 loss)\nI0820 13:48:01.438890 20842 sgd_solver.cpp:166] Iteration 77700, lr = 1.9425\nI0820 13:50:19.037582 20842 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0820 13:51:43.450026 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88856\nI0820 13:51:43.450440 20842 solver.cpp:404]     Test net output #1: loss = 0.372775 (* 1 = 0.372775 loss)\nI0820 13:51:44.767757 20842 solver.cpp:228] Iteration 77800, loss = 0.152388\nI0820 13:51:44.767803 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 13:51:44.767827 20842 solver.cpp:244]     Train net output #1: loss = 0.152388 (* 1 = 0.152388 loss)\nI0820 13:51:44.856040 20842 sgd_solver.cpp:166] Iteration 77800, lr = 1.945\nI0820 13:54:02.056206 20842 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0820 13:55:26.471338 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88468\nI0820 13:55:26.471725 20842 solver.cpp:404]     Test net output #1: loss = 0.385011 (* 1 = 0.385011 loss)\nI0820 13:55:27.787936 20842 solver.cpp:228] Iteration 77900, loss = 0.210457\nI0820 13:55:27.787981 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:55:27.788003 20842 solver.cpp:244]     Train net output #1: loss = 0.210457 (* 1 = 0.210457 loss)\nI0820 13:55:27.881044 20842 sgd_solver.cpp:166] Iteration 77900, lr = 1.9475\nI0820 13:57:45.086230 20842 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0820 13:59:09.496541 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88092\nI0820 13:59:09.496932 20842 solver.cpp:404]     Test net output #1: loss = 0.404409 (* 1 = 0.404409 loss)\nI0820 13:59:10.814411 20842 solver.cpp:228] Iteration 78000, loss = 0.167463\nI0820 13:59:10.814474 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:59:10.814499 20842 solver.cpp:244]     Train net output #1: loss = 0.167462 (* 1 = 0.167462 loss)\nI0820 13:59:10.906059 20842 sgd_solver.cpp:166] Iteration 78000, lr = 1.95\nI0820 14:01:28.474560 20842 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0820 14:02:52.880157 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88072\nI0820 14:02:52.880556 20842 solver.cpp:404]     Test net output #1: loss = 0.395915 (* 1 = 0.395915 loss)\nI0820 14:02:54.197612 20842 solver.cpp:228] Iteration 78100, loss = 0.180993\nI0820 14:02:54.197674 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:02:54.197700 20842 solver.cpp:244]     Train net output #1: loss = 0.180993 (* 1 = 0.180993 loss)\nI0820 14:02:54.287102 20842 sgd_solver.cpp:166] Iteration 78100, lr = 1.9525\nI0820 14:05:11.831631 20842 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0820 14:06:36.250731 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87536\nI0820 14:06:36.251149 20842 solver.cpp:404]     Test net output #1: loss = 0.42019 (* 1 = 0.42019 loss)\nI0820 14:06:37.567936 20842 solver.cpp:228] Iteration 78200, loss = 0.197533\nI0820 14:06:37.567981 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 14:06:37.568006 20842 solver.cpp:244]     Train net output #1: loss = 0.197533 (* 1 = 0.197533 loss)\nI0820 14:06:37.659564 20842 sgd_solver.cpp:166] Iteration 78200, lr = 1.955\nI0820 14:08:54.777200 20842 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0820 14:10:19.190462 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87948\nI0820 14:10:19.190873 20842 solver.cpp:404]     Test net output #1: loss = 0.383875 (* 1 = 0.383875 loss)\nI0820 14:10:20.506988 20842 solver.cpp:228] Iteration 78300, loss = 0.154099\nI0820 14:10:20.507033 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 14:10:20.507056 20842 solver.cpp:244]     Train net output #1: loss = 0.154098 (* 1 = 0.154098 loss)\nI0820 14:10:20.597056 20842 sgd_solver.cpp:166] Iteration 78300, lr = 1.9575\nI0820 14:12:37.702754 20842 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0820 14:14:02.111524 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0820 14:14:02.111930 20842 solver.cpp:404]     Test net output #1: loss = 0.402005 (* 1 = 0.402005 loss)\nI0820 14:14:03.429002 20842 solver.cpp:228] Iteration 78400, loss = 0.11446\nI0820 14:14:03.429049 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 14:14:03.429072 20842 solver.cpp:244]     Train net output #1: loss = 0.11446 (* 1 = 0.11446 loss)\nI0820 14:14:03.521221 20842 sgd_solver.cpp:166] Iteration 78400, lr = 1.96\nI0820 14:16:20.660981 20842 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0820 14:17:45.073217 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87708\nI0820 14:17:45.073603 20842 solver.cpp:404]     Test net output #1: loss = 0.406719 (* 1 = 0.406719 loss)\nI0820 14:17:46.390967 20842 solver.cpp:228] Iteration 78500, loss = 0.257753\nI0820 14:17:46.391013 20842 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 14:17:46.391036 20842 solver.cpp:244]     Train net output #1: loss = 0.257753 (* 1 = 0.257753 loss)\nI0820 14:17:46.482960 20842 sgd_solver.cpp:166] Iteration 78500, lr = 1.9625\nI0820 14:20:03.568730 20842 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0820 14:21:27.981667 20842 solver.cpp:404]     Test net output #0: accuracy = 0.86884\nI0820 14:21:27.982086 20842 solver.cpp:404]     Test net output #1: loss = 0.426783 (* 1 = 0.426783 loss)\nI0820 14:21:29.299568 20842 solver.cpp:228] Iteration 78600, loss = 0.194423\nI0820 14:21:29.299613 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:21:29.299638 20842 solver.cpp:244]     Train net output #1: loss = 0.194422 (* 1 = 0.194422 loss)\nI0820 14:21:29.389613 20842 sgd_solver.cpp:166] Iteration 78600, lr = 1.965\nI0820 14:23:46.825475 20842 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0820 14:25:11.232187 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87312\nI0820 14:25:11.232538 20842 solver.cpp:404]     Test net output #1: loss = 0.427627 (* 1 = 0.427627 loss)\nI0820 14:25:12.549832 20842 solver.cpp:228] Iteration 78700, loss = 0.208746\nI0820 14:25:12.549878 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:25:12.549901 20842 solver.cpp:244]     Train net output #1: loss = 0.208745 (* 1 = 0.208745 loss)\nI0820 14:25:12.644290 20842 sgd_solver.cpp:166] Iteration 78700, lr = 1.9675\nI0820 14:27:30.100509 20842 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0820 14:28:54.506939 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88\nI0820 14:28:54.507320 20842 solver.cpp:404]     Test net output #1: loss = 0.399724 (* 1 = 0.399724 loss)\nI0820 14:28:55.824533 20842 solver.cpp:228] Iteration 78800, loss = 0.160484\nI0820 14:28:55.824582 20842 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:28:55.824606 20842 solver.cpp:244]     Train net output #1: loss = 0.160483 (* 1 = 0.160483 loss)\nI0820 14:28:55.918857 20842 sgd_solver.cpp:166] Iteration 78800, lr = 1.97\nI0820 14:31:12.987169 20842 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0820 14:32:37.389904 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88564\nI0820 14:32:37.390306 20842 solver.cpp:404]     Test net output #1: loss = 0.380192 (* 1 = 0.380192 loss)\nI0820 14:32:38.707367 20842 solver.cpp:228] Iteration 78900, loss = 0.108523\nI0820 14:32:38.707429 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 14:32:38.707455 20842 solver.cpp:244]     Train net output #1: loss = 0.108522 (* 1 = 0.108522 loss)\nI0820 14:32:38.800884 20842 sgd_solver.cpp:166] Iteration 78900, lr = 1.9725\nI0820 14:34:55.887405 20842 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0820 14:36:20.301311 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87608\nI0820 14:36:20.301731 20842 solver.cpp:404]     Test net output #1: loss = 0.407784 (* 1 = 0.407784 loss)\nI0820 14:36:21.618526 20842 solver.cpp:228] Iteration 79000, loss = 0.161762\nI0820 14:36:21.618579 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 14:36:21.618604 20842 solver.cpp:244]     Train net output #1: loss = 0.161761 (* 1 = 0.161761 loss)\nI0820 14:36:21.706360 20842 sgd_solver.cpp:166] Iteration 79000, lr = 1.975\nI0820 14:38:38.777535 20842 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0820 14:40:03.193683 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8774\nI0820 14:40:03.194085 20842 solver.cpp:404]     Test net output #1: loss = 0.409345 (* 1 = 0.409345 loss)\nI0820 14:40:04.511581 20842 solver.cpp:228] Iteration 79100, loss = 0.206184\nI0820 14:40:04.511628 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 14:40:04.511652 20842 solver.cpp:244]     Train net output #1: loss = 0.206183 (* 1 = 0.206183 loss)\nI0820 14:40:04.603281 20842 sgd_solver.cpp:166] Iteration 79100, lr = 1.9775\nI0820 14:42:21.674384 20842 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0820 14:43:46.097404 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8748\nI0820 14:43:46.097822 20842 solver.cpp:404]     Test net output #1: loss = 0.428171 (* 1 = 0.428171 loss)\nI0820 14:43:47.414824 20842 solver.cpp:228] Iteration 79200, loss = 0.216584\nI0820 14:43:47.414886 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 14:43:47.414912 20842 solver.cpp:244]     Train net output #1: loss = 0.216584 (* 1 = 0.216584 loss)\nI0820 14:43:47.507316 20842 sgd_solver.cpp:166] Iteration 79200, lr = 1.98\nI0820 14:46:04.616991 20842 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0820 14:47:29.035328 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88328\nI0820 14:47:29.035717 20842 solver.cpp:404]     Test net output #1: loss = 0.406139 (* 1 = 0.406139 loss)\nI0820 14:47:30.352123 20842 solver.cpp:228] Iteration 79300, loss = 0.104034\nI0820 14:47:30.352169 20842 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 14:47:30.352192 20842 solver.cpp:244]     Train net output #1: loss = 0.104033 (* 1 = 0.104033 loss)\nI0820 14:47:30.442001 20842 sgd_solver.cpp:166] Iteration 79300, lr = 1.9825\nI0820 14:49:47.540535 20842 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0820 14:51:11.964303 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0820 14:51:11.964701 20842 solver.cpp:404]     Test net output #1: loss = 0.398734 (* 1 = 0.398734 loss)\nI0820 14:51:13.281891 20842 solver.cpp:228] Iteration 79400, loss = 0.203755\nI0820 14:51:13.281939 20842 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 14:51:13.281961 20842 solver.cpp:244]     Train net output #1: loss = 0.203754 (* 1 = 0.203754 loss)\nI0820 14:51:13.373123 20842 sgd_solver.cpp:166] Iteration 79400, lr = 1.985\nI0820 14:53:30.543488 20842 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0820 14:54:54.964613 20842 solver.cpp:404]     Test net output #0: accuracy = 0.88048\nI0820 14:54:54.965025 20842 solver.cpp:404]     Test net output #1: loss = 0.390702 (* 1 = 0.390702 loss)\nI0820 14:54:56.282356 20842 solver.cpp:228] Iteration 79500, loss = 0.188056\nI0820 14:54:56.282402 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 14:54:56.282424 20842 solver.cpp:244]     Train net output #1: loss = 0.188055 (* 1 = 0.188055 loss)\nI0820 14:54:56.367944 20842 sgd_solver.cpp:166] Iteration 79500, lr = 1.9875\nI0820 14:57:13.446960 20842 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0820 14:58:37.877609 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87924\nI0820 14:58:37.877991 20842 solver.cpp:404]     Test net output #1: loss = 0.388448 (* 1 = 0.388448 loss)\nI0820 14:58:39.195015 20842 solver.cpp:228] Iteration 79600, loss = 0.198845\nI0820 14:58:39.195062 20842 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 14:58:39.195086 20842 solver.cpp:244]     Train net output #1: loss = 0.198844 (* 1 = 0.198844 loss)\nI0820 14:58:39.283684 20842 sgd_solver.cpp:166] Iteration 79600, lr = 1.99\nI0820 15:00:56.433545 20842 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0820 15:02:20.862952 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87644\nI0820 15:02:20.863353 20842 solver.cpp:404]     Test net output #1: loss = 0.420708 (* 1 = 0.420708 loss)\nI0820 15:02:22.180234 20842 solver.cpp:228] Iteration 79700, loss = 0.194982\nI0820 15:02:22.180295 20842 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 15:02:22.180346 20842 solver.cpp:244]     Train net output #1: loss = 0.194981 (* 1 = 0.194981 loss)\nI0820 15:02:22.263491 20842 sgd_solver.cpp:166] Iteration 79700, lr = 1.9925\nI0820 15:04:39.358847 20842 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0820 15:06:03.776545 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0820 15:06:03.776960 20842 solver.cpp:404]     Test net output #1: loss = 0.396962 (* 1 = 0.396962 loss)\nI0820 15:06:05.094283 20842 solver.cpp:228] Iteration 79800, loss = 0.150977\nI0820 15:06:05.094331 20842 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 15:06:05.094353 20842 solver.cpp:244]     Train net output #1: loss = 0.150976 (* 1 = 0.150976 loss)\nI0820 15:06:05.186015 20842 sgd_solver.cpp:166] Iteration 79800, lr = 1.995\nI0820 15:08:22.286594 20842 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0820 15:09:46.701884 20842 solver.cpp:404]     Test net output #0: accuracy = 0.8762\nI0820 15:09:46.702280 20842 solver.cpp:404]     Test net output #1: loss = 0.428749 (* 1 = 0.428749 loss)\nI0820 15:09:48.018683 20842 solver.cpp:228] Iteration 79900, loss = 0.171543\nI0820 15:09:48.018728 20842 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 15:09:48.018750 20842 solver.cpp:244]     Train net output #1: loss = 0.171542 (* 1 = 0.171542 loss)\nI0820 15:09:48.105186 20842 sgd_solver.cpp:166] Iteration 79900, lr = 1.9975\nI0820 15:12:05.250924 20842 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range2SS80kRes56wd0_iter_80000.caffemodel\nI0820 15:12:05.969061 20842 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/range2SS80kRes56wd0_iter_80000.solverstate\nI0820 15:12:06.421737 20842 solver.cpp:317] Iteration 80000, loss = 0.193811\nI0820 15:12:06.421787 20842 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0820 15:13:30.825131 20842 solver.cpp:404]     Test net output #0: accuracy = 0.87776\nI0820 15:13:30.825521 20842 solver.cpp:404]     Test net output #1: loss = 0.418386 (* 1 = 0.418386 loss)\nI0820 15:13:30.825541 20842 solver.cpp:322] Optimization Done.\nI0820 15:13:36.494376 20842 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range2SS80kRes56wd1",
    "content": "I0818 15:07:02.361129 21584 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0818 15:07:02.364202 21584 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0818 15:07:02.365427 21584 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0818 15:07:02.366655 21584 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0818 15:07:02.367872 21584 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0818 15:07:02.369109 21584 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0818 15:07:02.370342 21584 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0818 15:07:02.371580 21584 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0818 15:07:02.372820 21584 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0818 15:07:02.787838 21584 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 80000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 80000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range2SS80kRes56wd1\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 2\nI0818 15:07:02.793946 21584 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0818 15:07:02.810959 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:02.811038 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:02.812168 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0818 15:07:02.812239 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0818 15:07:02.812263 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0818 15:07:02.812283 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0818 15:07:02.812304 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0818 15:07:02.812321 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0818 15:07:02.812338 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0818 15:07:02.812357 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0818 15:07:02.812376 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0818 15:07:02.812394 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0818 15:07:02.812413 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0818 15:07:02.812430 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0818 15:07:02.812450 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0818 15:07:02.812469 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0818 15:07:02.812489 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0818 15:07:02.812506 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0818 15:07:02.812526 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0818 15:07:02.812543 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0818 15:07:02.812563 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0818 15:07:02.812582 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0818 15:07:02.812613 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0818 15:07:02.812631 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0818 15:07:02.812666 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0818 15:07:02.812687 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0818 15:07:02.812706 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0818 15:07:02.812723 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0818 15:07:02.812743 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0818 15:07:02.812758 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0818 15:07:02.812777 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0818 15:07:02.812795 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0818 15:07:02.812815 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0818 15:07:02.812834 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0818 15:07:02.812852 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0818 15:07:02.812870 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0818 15:07:02.812889 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0818 15:07:02.812907 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0818 15:07:02.812928 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0818 15:07:02.812945 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0818 15:07:02.812964 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0818 15:07:02.812981 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0818 15:07:02.813005 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0818 15:07:02.813022 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0818 15:07:02.813040 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0818 15:07:02.813058 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0818 15:07:02.813078 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0818 15:07:02.813097 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0818 15:07:02.813115 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0818 15:07:02.813132 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0818 15:07:02.813150 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0818 15:07:02.813168 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0818 15:07:02.813186 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0818 15:07:02.813213 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0818 15:07:02.813235 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0818 15:07:02.813253 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0818 15:07:02.813272 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0818 15:07:02.813288 21584 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0818 15:07:02.815042 21584 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI0818 15:07:02.817127 21584 layer_factory.hpp:77] Creating layer dataLayer\nI0818 15:07:02.819499 21584 net.cpp:100] Creating Layer dataLayer\nI0818 15:07:02.819581 21584 net.cpp:408] dataLayer -> data_top\nI0818 15:07:02.819793 21584 net.cpp:408] dataLayer -> label\nI0818 15:07:02.819923 21584 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 15:07:02.926396 21589 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI0818 15:07:03.231662 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:03.239459 21584 net.cpp:150] Setting up dataLayer\nI0818 15:07:03.239527 21584 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0818 15:07:03.239542 21584 net.cpp:157] Top shape: 125 (125)\nI0818 15:07:03.239547 21584 net.cpp:165] Memory required for data: 1536500\nI0818 15:07:03.239563 21584 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 15:07:03.239579 21584 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 15:07:03.239588 21584 net.cpp:434] label_dataLayer_1_split <- label\nI0818 15:07:03.239610 21584 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 15:07:03.239629 21584 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 15:07:03.239711 21584 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 15:07:03.239727 21584 net.cpp:157] Top shape: 125 (125)\nI0818 15:07:03.239733 21584 net.cpp:157] Top shape: 125 (125)\nI0818 15:07:03.239738 21584 net.cpp:165] Memory required for data: 1537500\nI0818 15:07:03.239744 21584 layer_factory.hpp:77] Creating layer pre_conv\nI0818 15:07:03.239815 21584 net.cpp:100] Creating Layer pre_conv\nI0818 15:07:03.239828 21584 net.cpp:434] pre_conv <- data_top\nI0818 15:07:03.239840 21584 net.cpp:408] pre_conv -> pre_conv_top\nI0818 15:07:03.241569 21584 net.cpp:150] Setting up pre_conv\nI0818 15:07:03.241598 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.241605 21584 net.cpp:165] Memory required for data: 9729500\nI0818 15:07:03.241679 21584 layer_factory.hpp:77] Creating layer pre_bn\nI0818 15:07:03.241765 21584 net.cpp:100] Creating Layer pre_bn\nI0818 15:07:03.241778 21584 net.cpp:434] pre_bn <- pre_conv_top\nI0818 15:07:03.241791 21584 net.cpp:408] pre_bn -> pre_bn_top\nI0818 15:07:03.242126 21590 blocking_queue.cpp:50] Waiting for data\nI0818 15:07:03.242244 21584 net.cpp:150] Setting up pre_bn\nI0818 15:07:03.242264 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.242269 21584 net.cpp:165] Memory required for data: 17921500\nI0818 15:07:03.242286 21584 layer_factory.hpp:77] Creating layer pre_scale\nI0818 15:07:03.242341 21584 net.cpp:100] Creating Layer pre_scale\nI0818 15:07:03.242350 21584 net.cpp:434] pre_scale <- pre_bn_top\nI0818 15:07:03.242363 21584 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0818 15:07:03.242547 21584 layer_factory.hpp:77] Creating layer pre_scale\nI0818 15:07:03.251374 21584 net.cpp:150] Setting up pre_scale\nI0818 15:07:03.251400 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.251407 21584 net.cpp:165] Memory required for data: 26113500\nI0818 15:07:03.251418 21584 layer_factory.hpp:77] Creating layer pre_relu\nI0818 15:07:03.251476 21584 net.cpp:100] Creating Layer pre_relu\nI0818 15:07:03.251485 21584 net.cpp:434] pre_relu <- pre_bn_top\nI0818 15:07:03.251494 21584 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0818 15:07:03.251507 21584 net.cpp:150] Setting up pre_relu\nI0818 15:07:03.251515 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.251519 21584 net.cpp:165] Memory required for data: 34305500\nI0818 15:07:03.251525 21584 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0818 15:07:03.251536 21584 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0818 15:07:03.251543 21584 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0818 15:07:03.251550 21584 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0818 15:07:03.251560 21584 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0818 15:07:03.251610 21584 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0818 15:07:03.251626 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.251633 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.251637 21584 net.cpp:165] Memory required for data: 50689500\nI0818 15:07:03.251642 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0818 15:07:03.251674 21584 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0818 15:07:03.251683 21584 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0818 15:07:03.251693 21584 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0818 15:07:03.252019 21584 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0818 15:07:03.252035 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.252040 21584 net.cpp:165] Memory required for data: 58881500\nI0818 15:07:03.252053 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0818 15:07:03.252069 21584 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0818 15:07:03.252075 21584 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0818 15:07:03.252086 21584 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0818 15:07:03.252317 21584 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0818 15:07:03.252331 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.252336 21584 net.cpp:165] Memory required for data: 67073500\nI0818 15:07:03.252347 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 15:07:03.252360 21584 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0818 15:07:03.252367 21584 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0818 15:07:03.252375 21584 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.252425 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 15:07:03.252562 21584 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0818 15:07:03.252574 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.252580 21584 net.cpp:165] Memory required for data: 75265500\nI0818 15:07:03.252589 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0818 15:07:03.252609 21584 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0818 15:07:03.252615 21584 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0818 15:07:03.252624 21584 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.252634 21584 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0818 15:07:03.252640 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.252645 21584 net.cpp:165] Memory required for data: 83457500\nI0818 15:07:03.252656 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0818 15:07:03.252673 21584 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0818 15:07:03.252681 21584 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0818 15:07:03.252694 21584 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0818 15:07:03.252995 21584 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0818 15:07:03.253008 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.253013 21584 net.cpp:165] Memory required for data: 91649500\nI0818 15:07:03.253022 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0818 15:07:03.253032 21584 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0818 15:07:03.253038 21584 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0818 15:07:03.253046 21584 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0818 15:07:03.253274 21584 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0818 15:07:03.253288 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.253293 21584 net.cpp:165] Memory required for data: 99841500\nI0818 15:07:03.253310 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 15:07:03.253320 21584 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0818 15:07:03.253326 21584 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0818 15:07:03.253337 21584 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0818 15:07:03.253392 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 15:07:03.253532 21584 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0818 15:07:03.253546 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.253551 21584 net.cpp:165] Memory required for data: 108033500\nI0818 15:07:03.253561 21584 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0818 15:07:03.253620 21584 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0818 15:07:03.253633 21584 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0818 15:07:03.253641 21584 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0818 15:07:03.253656 21584 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0818 15:07:03.253739 21584 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0818 15:07:03.253754 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.253760 21584 net.cpp:165] Memory required for data: 116225500\nI0818 15:07:03.253767 21584 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0818 15:07:03.253775 21584 net.cpp:100] Creating Layer L1_b1_relu\nI0818 15:07:03.253780 21584 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0818 15:07:03.253793 21584 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0818 15:07:03.253803 21584 net.cpp:150] Setting up L1_b1_relu\nI0818 15:07:03.253810 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.253815 21584 net.cpp:165] Memory required for data: 124417500\nI0818 15:07:03.253819 21584 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:03.253829 21584 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:03.253834 21584 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0818 15:07:03.253842 21584 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 15:07:03.253852 21584 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 15:07:03.253897 21584 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:03.253909 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.253916 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.253928 21584 net.cpp:165] Memory required for data: 140801500\nI0818 15:07:03.253933 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0818 15:07:03.253948 21584 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0818 15:07:03.253955 21584 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 15:07:03.253964 21584 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0818 15:07:03.254271 21584 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0818 15:07:03.254286 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.254290 21584 net.cpp:165] Memory required for data: 148993500\nI0818 15:07:03.254300 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0818 15:07:03.254314 21584 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0818 15:07:03.254320 21584 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0818 15:07:03.254329 21584 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0818 15:07:03.254570 21584 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0818 15:07:03.254583 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.254588 21584 net.cpp:165] Memory required for data: 157185500\nI0818 15:07:03.254598 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 15:07:03.254607 21584 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0818 15:07:03.254613 21584 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0818 15:07:03.254621 21584 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.254683 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 15:07:03.254820 21584 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0818 15:07:03.254833 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.254839 21584 net.cpp:165] Memory required for data: 165377500\nI0818 15:07:03.254848 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0818 15:07:03.254856 21584 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0818 15:07:03.254863 21584 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0818 15:07:03.254874 21584 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.254884 21584 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0818 15:07:03.254891 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.254896 21584 net.cpp:165] Memory required for data: 173569500\nI0818 15:07:03.254901 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0818 15:07:03.254914 21584 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0818 15:07:03.254920 21584 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0818 15:07:03.254930 21584 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0818 15:07:03.255234 21584 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0818 15:07:03.255247 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.255252 21584 net.cpp:165] Memory required for data: 181761500\nI0818 15:07:03.255261 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0818 15:07:03.255275 21584 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0818 15:07:03.255282 21584 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0818 15:07:03.255291 21584 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0818 15:07:03.255527 21584 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0818 15:07:03.255543 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.255548 21584 net.cpp:165] Memory required for data: 189953500\nI0818 15:07:03.255564 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 15:07:03.255574 21584 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0818 15:07:03.255580 21584 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0818 15:07:03.255591 21584 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0818 15:07:03.255643 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 15:07:03.255795 21584 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0818 15:07:03.255810 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.255815 21584 net.cpp:165] Memory required for data: 198145500\nI0818 15:07:03.255825 21584 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0818 15:07:03.255841 21584 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0818 15:07:03.255847 21584 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0818 15:07:03.255854 21584 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 15:07:03.255864 21584 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0818 15:07:03.255897 21584 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0818 15:07:03.255908 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.255913 21584 net.cpp:165] Memory required for data: 206337500\nI0818 15:07:03.255918 21584 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0818 15:07:03.255925 21584 net.cpp:100] Creating Layer L1_b2_relu\nI0818 15:07:03.255931 21584 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0818 15:07:03.255939 21584 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0818 15:07:03.255947 21584 net.cpp:150] Setting up L1_b2_relu\nI0818 15:07:03.255954 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.255959 21584 net.cpp:165] Memory required for data: 214529500\nI0818 15:07:03.255964 21584 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:03.255975 21584 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:03.255980 21584 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0818 15:07:03.255987 21584 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 15:07:03.255996 21584 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 15:07:03.256037 21584 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:03.256052 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.256058 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.256063 21584 net.cpp:165] Memory required for data: 230913500\nI0818 15:07:03.256068 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0818 15:07:03.256079 21584 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0818 15:07:03.256085 21584 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 15:07:03.256094 21584 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0818 15:07:03.256397 21584 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0818 15:07:03.256412 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.256417 21584 net.cpp:165] Memory required for data: 239105500\nI0818 15:07:03.256425 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0818 15:07:03.256438 21584 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0818 15:07:03.256443 21584 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0818 15:07:03.256451 21584 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0818 15:07:03.256691 21584 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0818 15:07:03.256705 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.256711 21584 net.cpp:165] Memory required for data: 247297500\nI0818 15:07:03.256721 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 15:07:03.256731 21584 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0818 15:07:03.256736 21584 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0818 15:07:03.256747 21584 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.256798 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 15:07:03.256940 21584 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0818 15:07:03.256954 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.256959 21584 net.cpp:165] Memory required for data: 255489500\nI0818 15:07:03.256968 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0818 15:07:03.256976 21584 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0818 15:07:03.256983 21584 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0818 15:07:03.256992 21584 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.257002 21584 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0818 15:07:03.257016 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.257021 21584 net.cpp:165] Memory required for data: 263681500\nI0818 15:07:03.257026 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0818 15:07:03.257041 21584 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0818 15:07:03.257047 21584 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0818 15:07:03.257056 21584 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0818 15:07:03.257367 21584 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0818 15:07:03.257381 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.257386 21584 net.cpp:165] Memory required for data: 271873500\nI0818 15:07:03.257395 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0818 15:07:03.257411 21584 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0818 15:07:03.257417 21584 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0818 15:07:03.257426 21584 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0818 15:07:03.257664 21584 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0818 15:07:03.257678 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.257684 21584 net.cpp:165] Memory required for data: 280065500\nI0818 15:07:03.257694 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 15:07:03.257706 21584 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0818 15:07:03.257712 21584 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0818 15:07:03.257721 21584 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0818 15:07:03.257776 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 15:07:03.257913 21584 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0818 15:07:03.257926 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.257931 21584 net.cpp:165] Memory required for data: 288257500\nI0818 15:07:03.257941 21584 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0818 15:07:03.257949 21584 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0818 15:07:03.257956 21584 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0818 15:07:03.257961 21584 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 15:07:03.257972 21584 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0818 15:07:03.258003 21584 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0818 15:07:03.258015 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.258021 21584 net.cpp:165] Memory required for data: 296449500\nI0818 15:07:03.258026 21584 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0818 15:07:03.258033 21584 net.cpp:100] Creating Layer L1_b3_relu\nI0818 15:07:03.258039 21584 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0818 15:07:03.258046 21584 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0818 15:07:03.258055 21584 net.cpp:150] Setting up L1_b3_relu\nI0818 15:07:03.258062 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.258067 21584 net.cpp:165] Memory required for data: 304641500\nI0818 15:07:03.258072 21584 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:03.258082 21584 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:03.258087 21584 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0818 15:07:03.258095 21584 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 15:07:03.258105 21584 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 15:07:03.258149 21584 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:03.258162 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.258167 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.258172 21584 net.cpp:165] Memory required for data: 321025500\nI0818 15:07:03.258177 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0818 15:07:03.258188 21584 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0818 15:07:03.258194 21584 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 15:07:03.258214 21584 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0818 15:07:03.258522 21584 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0818 15:07:03.258535 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.258541 21584 net.cpp:165] Memory required for data: 329217500\nI0818 15:07:03.258549 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0818 15:07:03.258559 21584 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0818 15:07:03.258565 21584 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0818 15:07:03.258574 21584 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0818 15:07:03.258826 21584 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0818 15:07:03.258841 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.258846 21584 net.cpp:165] Memory required for data: 337409500\nI0818 15:07:03.258857 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 15:07:03.258868 21584 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0818 15:07:03.258874 21584 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0818 15:07:03.258882 21584 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.258939 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 15:07:03.259079 21584 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0818 15:07:03.259093 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.259097 21584 net.cpp:165] Memory required for data: 345601500\nI0818 15:07:03.259106 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0818 15:07:03.259115 21584 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0818 15:07:03.259120 21584 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0818 15:07:03.259131 21584 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.259141 21584 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0818 15:07:03.259148 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.259152 21584 net.cpp:165] Memory required for data: 353793500\nI0818 15:07:03.259157 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0818 15:07:03.259171 21584 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0818 15:07:03.259176 21584 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0818 15:07:03.259186 21584 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0818 15:07:03.259492 21584 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0818 15:07:03.259506 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.259511 21584 net.cpp:165] Memory required for data: 361985500\nI0818 15:07:03.259521 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0818 15:07:03.259532 21584 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0818 15:07:03.259539 21584 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0818 15:07:03.259547 21584 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0818 15:07:03.259793 21584 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0818 15:07:03.259809 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.259814 21584 net.cpp:165] Memory required for data: 370177500\nI0818 15:07:03.259824 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 15:07:03.259835 21584 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0818 15:07:03.259841 21584 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0818 15:07:03.259850 21584 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0818 15:07:03.259904 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 15:07:03.260044 21584 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0818 15:07:03.260057 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.260062 21584 net.cpp:165] Memory required for data: 378369500\nI0818 15:07:03.260071 21584 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0818 15:07:03.260083 21584 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0818 15:07:03.260090 21584 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0818 15:07:03.260097 21584 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 15:07:03.260105 21584 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0818 15:07:03.260149 21584 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0818 15:07:03.260159 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.260164 21584 net.cpp:165] Memory required for data: 386561500\nI0818 15:07:03.260169 21584 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0818 15:07:03.260176 21584 net.cpp:100] Creating Layer L1_b4_relu\nI0818 15:07:03.260182 21584 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0818 15:07:03.260192 21584 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0818 15:07:03.260202 21584 net.cpp:150] Setting up L1_b4_relu\nI0818 15:07:03.260210 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.260213 21584 net.cpp:165] Memory required for data: 394753500\nI0818 15:07:03.260218 21584 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:03.260226 21584 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:03.260231 21584 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0818 15:07:03.260238 21584 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 15:07:03.260247 21584 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 15:07:03.260293 21584 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:03.260304 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.260310 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.260315 21584 net.cpp:165] Memory required for data: 411137500\nI0818 15:07:03.260320 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0818 15:07:03.260331 21584 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0818 15:07:03.260337 21584 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 15:07:03.260349 21584 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0818 15:07:03.260661 21584 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0818 15:07:03.260676 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.260681 21584 net.cpp:165] Memory required for data: 419329500\nI0818 15:07:03.260704 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0818 15:07:03.260715 21584 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0818 15:07:03.260722 21584 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0818 15:07:03.260733 21584 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0818 15:07:03.260972 21584 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0818 15:07:03.260985 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.260992 21584 net.cpp:165] Memory required for data: 427521500\nI0818 15:07:03.261001 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 15:07:03.261013 21584 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0818 15:07:03.261020 21584 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0818 15:07:03.261029 21584 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.261080 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 15:07:03.261221 21584 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0818 15:07:03.261234 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.261240 21584 net.cpp:165] Memory required for data: 435713500\nI0818 15:07:03.261248 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0818 15:07:03.261257 21584 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0818 15:07:03.261263 21584 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0818 15:07:03.261273 21584 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.261283 21584 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0818 15:07:03.261291 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.261296 21584 net.cpp:165] Memory required for data: 443905500\nI0818 15:07:03.261301 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0818 15:07:03.261315 21584 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0818 15:07:03.261322 21584 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0818 15:07:03.261337 21584 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0818 15:07:03.261648 21584 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0818 15:07:03.261668 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.261673 21584 net.cpp:165] Memory required for data: 452097500\nI0818 15:07:03.261682 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0818 15:07:03.261698 21584 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0818 15:07:03.261703 21584 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0818 15:07:03.261713 21584 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0818 15:07:03.261950 21584 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0818 15:07:03.261963 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.261968 21584 net.cpp:165] Memory required for data: 460289500\nI0818 15:07:03.261978 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 15:07:03.261991 21584 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0818 15:07:03.261997 21584 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0818 15:07:03.262004 21584 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0818 15:07:03.262056 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 15:07:03.262197 21584 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0818 15:07:03.262209 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.262214 21584 net.cpp:165] Memory required for data: 468481500\nI0818 15:07:03.262223 21584 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0818 15:07:03.262233 21584 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0818 15:07:03.262238 21584 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0818 15:07:03.262245 21584 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 15:07:03.262256 21584 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0818 15:07:03.262287 21584 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0818 15:07:03.262300 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.262305 21584 net.cpp:165] Memory required for data: 476673500\nI0818 15:07:03.262310 21584 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0818 15:07:03.262317 21584 net.cpp:100] Creating Layer L1_b5_relu\nI0818 15:07:03.262323 21584 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0818 15:07:03.262331 21584 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0818 15:07:03.262339 21584 net.cpp:150] Setting up L1_b5_relu\nI0818 15:07:03.262346 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.262351 21584 net.cpp:165] Memory required for data: 484865500\nI0818 15:07:03.262356 21584 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:03.262365 21584 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:03.262372 21584 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0818 15:07:03.262379 21584 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 15:07:03.262388 21584 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 15:07:03.262434 21584 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:03.262444 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.262451 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.262456 21584 net.cpp:165] Memory required for data: 501249500\nI0818 15:07:03.262461 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0818 15:07:03.262472 21584 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0818 15:07:03.262478 21584 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 15:07:03.262490 21584 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0818 15:07:03.262809 21584 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0818 15:07:03.262822 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.262828 21584 net.cpp:165] Memory required for data: 509441500\nI0818 15:07:03.262845 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0818 15:07:03.262854 21584 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0818 15:07:03.262861 21584 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0818 15:07:03.262868 21584 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0818 15:07:03.263110 21584 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0818 15:07:03.263123 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.263128 21584 net.cpp:165] Memory required for data: 517633500\nI0818 15:07:03.263139 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 15:07:03.263151 21584 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0818 15:07:03.263157 21584 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0818 15:07:03.263165 21584 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.263221 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 15:07:03.263361 21584 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0818 15:07:03.263375 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.263380 21584 net.cpp:165] Memory required for data: 525825500\nI0818 15:07:03.263388 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0818 15:07:03.263396 21584 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0818 15:07:03.263402 21584 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0818 15:07:03.263412 21584 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.263423 21584 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0818 15:07:03.263430 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.263435 21584 net.cpp:165] Memory required for data: 534017500\nI0818 15:07:03.263440 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0818 15:07:03.263453 21584 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0818 15:07:03.263459 21584 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0818 15:07:03.263468 21584 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0818 15:07:03.263790 21584 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0818 15:07:03.263805 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.263810 21584 net.cpp:165] Memory required for data: 542209500\nI0818 15:07:03.263819 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0818 15:07:03.263831 21584 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0818 15:07:03.263837 21584 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0818 15:07:03.263846 21584 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0818 15:07:03.264083 21584 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0818 15:07:03.264096 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.264102 21584 net.cpp:165] Memory required for data: 550401500\nI0818 15:07:03.264112 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 15:07:03.264122 21584 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0818 15:07:03.264128 21584 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0818 15:07:03.264139 21584 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0818 15:07:03.264192 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 15:07:03.264333 21584 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0818 15:07:03.264345 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.264350 21584 net.cpp:165] Memory required for data: 558593500\nI0818 15:07:03.264359 21584 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0818 15:07:03.264376 21584 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0818 15:07:03.264384 21584 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0818 15:07:03.264390 21584 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 15:07:03.264405 21584 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0818 15:07:03.264436 21584 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0818 15:07:03.264447 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.264452 21584 net.cpp:165] Memory required for data: 566785500\nI0818 15:07:03.264458 21584 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0818 15:07:03.264475 21584 net.cpp:100] Creating Layer L1_b6_relu\nI0818 15:07:03.264482 21584 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0818 15:07:03.264492 21584 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0818 15:07:03.264502 21584 net.cpp:150] Setting up L1_b6_relu\nI0818 15:07:03.264508 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.264513 21584 net.cpp:165] Memory required for data: 574977500\nI0818 15:07:03.264518 21584 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:03.264524 21584 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:03.264530 21584 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0818 15:07:03.264538 21584 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 15:07:03.264547 21584 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 15:07:03.264592 21584 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:03.264603 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.264611 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.264616 21584 net.cpp:165] Memory required for data: 591361500\nI0818 15:07:03.264621 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0818 15:07:03.264634 21584 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0818 15:07:03.264641 21584 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 15:07:03.264655 21584 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0818 15:07:03.264972 21584 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0818 15:07:03.264986 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.264991 21584 net.cpp:165] Memory required for data: 599553500\nI0818 15:07:03.265000 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0818 15:07:03.265010 21584 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0818 15:07:03.265017 21584 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0818 15:07:03.265027 21584 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0818 15:07:03.265269 21584 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0818 15:07:03.265285 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.265291 21584 net.cpp:165] Memory required for data: 607745500\nI0818 15:07:03.265301 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 15:07:03.265311 21584 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0818 15:07:03.265317 21584 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0818 15:07:03.265326 21584 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.265377 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 15:07:03.265516 21584 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0818 15:07:03.265529 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.265534 21584 net.cpp:165] Memory required for data: 615937500\nI0818 15:07:03.265543 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0818 15:07:03.265554 21584 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0818 15:07:03.265560 21584 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0818 15:07:03.265568 21584 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.265583 21584 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0818 15:07:03.265590 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.265594 21584 net.cpp:165] Memory required for data: 624129500\nI0818 15:07:03.265599 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0818 15:07:03.265610 21584 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0818 15:07:03.265616 21584 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0818 15:07:03.265627 21584 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0818 15:07:03.265944 21584 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0818 15:07:03.265959 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.265964 21584 net.cpp:165] Memory required for data: 632321500\nI0818 15:07:03.265980 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0818 15:07:03.265990 21584 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0818 15:07:03.265996 21584 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0818 15:07:03.266010 21584 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0818 15:07:03.266249 21584 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0818 15:07:03.266263 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.266268 21584 net.cpp:165] Memory required for data: 640513500\nI0818 15:07:03.266278 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 15:07:03.266290 21584 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0818 15:07:03.266296 21584 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0818 15:07:03.266304 21584 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0818 15:07:03.266358 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 15:07:03.266497 21584 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0818 15:07:03.266510 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.266515 21584 net.cpp:165] Memory required for data: 648705500\nI0818 15:07:03.266525 21584 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0818 15:07:03.266536 21584 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0818 15:07:03.266543 21584 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0818 15:07:03.266551 21584 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 15:07:03.266558 21584 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0818 15:07:03.266592 21584 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0818 15:07:03.266603 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.266608 21584 net.cpp:165] Memory required for data: 656897500\nI0818 15:07:03.266613 21584 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0818 15:07:03.266621 21584 net.cpp:100] Creating Layer L1_b7_relu\nI0818 15:07:03.266628 21584 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0818 15:07:03.266638 21584 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0818 15:07:03.266647 21584 net.cpp:150] Setting up L1_b7_relu\nI0818 15:07:03.266661 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.266666 21584 net.cpp:165] Memory required for data: 665089500\nI0818 15:07:03.266671 21584 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:03.266679 21584 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:03.266685 21584 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0818 15:07:03.266691 21584 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 15:07:03.266701 21584 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 15:07:03.266747 21584 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:03.266759 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.266765 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.266770 21584 net.cpp:165] Memory required for data: 681473500\nI0818 15:07:03.266775 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0818 15:07:03.266788 21584 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0818 15:07:03.266793 21584 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 15:07:03.266805 21584 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0818 15:07:03.267119 21584 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0818 15:07:03.267133 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.267138 21584 net.cpp:165] Memory required for data: 689665500\nI0818 15:07:03.267148 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0818 15:07:03.267156 21584 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0818 15:07:03.267163 21584 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0818 15:07:03.267175 21584 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0818 15:07:03.267455 21584 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0818 15:07:03.267472 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.267478 21584 net.cpp:165] Memory required for data: 697857500\nI0818 15:07:03.267488 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 15:07:03.267498 21584 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0818 15:07:03.267504 21584 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0818 15:07:03.267513 21584 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.267565 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 15:07:03.267719 21584 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0818 15:07:03.267734 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.267738 21584 net.cpp:165] Memory required for data: 706049500\nI0818 15:07:03.267747 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0818 15:07:03.267760 21584 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0818 15:07:03.267765 21584 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0818 15:07:03.267773 21584 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.267783 21584 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0818 15:07:03.267791 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.267794 21584 net.cpp:165] Memory required for data: 714241500\nI0818 15:07:03.267799 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0818 15:07:03.267813 21584 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0818 15:07:03.267819 21584 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0818 15:07:03.267830 21584 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0818 15:07:03.268147 21584 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0818 15:07:03.268162 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.268167 21584 net.cpp:165] Memory required for data: 722433500\nI0818 15:07:03.268175 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0818 15:07:03.268184 21584 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0818 15:07:03.268190 21584 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0818 15:07:03.268203 21584 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0818 15:07:03.268451 21584 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0818 15:07:03.268465 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.268470 21584 net.cpp:165] Memory required for data: 730625500\nI0818 15:07:03.268479 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 15:07:03.268492 21584 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0818 15:07:03.268498 21584 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0818 15:07:03.268507 21584 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0818 15:07:03.268559 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 15:07:03.268708 21584 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0818 15:07:03.268721 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.268728 21584 net.cpp:165] Memory required for data: 738817500\nI0818 15:07:03.268736 21584 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0818 15:07:03.268748 21584 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0818 15:07:03.268754 21584 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0818 15:07:03.268761 21584 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 15:07:03.268770 21584 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0818 15:07:03.268805 21584 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0818 15:07:03.268816 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.268821 21584 net.cpp:165] Memory required for data: 747009500\nI0818 15:07:03.268827 21584 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0818 15:07:03.268836 21584 net.cpp:100] Creating Layer L1_b8_relu\nI0818 15:07:03.268841 21584 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0818 15:07:03.268854 21584 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0818 15:07:03.268864 21584 net.cpp:150] Setting up L1_b8_relu\nI0818 15:07:03.268872 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.268883 21584 net.cpp:165] Memory required for data: 755201500\nI0818 15:07:03.268888 21584 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:03.268896 21584 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:03.268901 21584 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0818 15:07:03.268909 21584 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 15:07:03.268919 21584 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 15:07:03.268965 21584 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:03.268977 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.268985 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.268988 21584 net.cpp:165] Memory required for data: 771585500\nI0818 15:07:03.268993 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0818 15:07:03.269004 21584 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0818 15:07:03.269011 21584 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 15:07:03.269023 21584 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0818 15:07:03.269345 21584 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0818 15:07:03.269359 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.269364 21584 net.cpp:165] Memory required for data: 779777500\nI0818 15:07:03.269373 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0818 15:07:03.269388 21584 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0818 15:07:03.269395 21584 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0818 15:07:03.269407 21584 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0818 15:07:03.269654 21584 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0818 15:07:03.269668 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.269673 21584 net.cpp:165] Memory required for data: 787969500\nI0818 15:07:03.269683 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 15:07:03.269693 21584 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0818 15:07:03.269700 21584 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0818 15:07:03.269707 21584 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.269764 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 15:07:03.269908 21584 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0818 15:07:03.269920 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.269927 21584 net.cpp:165] Memory required for data: 796161500\nI0818 15:07:03.269935 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0818 15:07:03.269946 21584 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0818 15:07:03.269953 21584 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0818 15:07:03.269960 21584 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.269970 21584 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0818 15:07:03.269978 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.269982 21584 net.cpp:165] Memory required for data: 804353500\nI0818 15:07:03.269987 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0818 15:07:03.270000 21584 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0818 15:07:03.270006 21584 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0818 15:07:03.270018 21584 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0818 15:07:03.270336 21584 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0818 15:07:03.270350 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.270355 21584 net.cpp:165] Memory required for data: 812545500\nI0818 15:07:03.270364 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0818 15:07:03.270376 21584 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0818 15:07:03.270382 21584 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0818 15:07:03.270391 21584 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0818 15:07:03.270648 21584 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0818 15:07:03.270668 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.270673 21584 net.cpp:165] Memory required for data: 820737500\nI0818 15:07:03.270704 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 15:07:03.270715 21584 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0818 15:07:03.270721 21584 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0818 15:07:03.270732 21584 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0818 15:07:03.270786 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 15:07:03.270931 21584 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0818 15:07:03.270943 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.270948 21584 net.cpp:165] Memory required for data: 828929500\nI0818 15:07:03.270958 21584 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0818 15:07:03.270970 21584 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0818 15:07:03.270977 21584 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0818 15:07:03.270984 21584 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 15:07:03.270992 21584 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0818 15:07:03.271023 21584 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0818 15:07:03.271034 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.271040 21584 net.cpp:165] Memory required for data: 837121500\nI0818 15:07:03.271045 21584 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0818 15:07:03.271054 21584 net.cpp:100] Creating Layer L1_b9_relu\nI0818 15:07:03.271059 21584 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0818 15:07:03.271070 21584 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0818 15:07:03.271080 21584 net.cpp:150] Setting up L1_b9_relu\nI0818 15:07:03.271087 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.271091 21584 net.cpp:165] Memory required for data: 845313500\nI0818 15:07:03.271096 21584 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:03.271108 21584 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:03.271113 21584 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0818 15:07:03.271121 21584 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 15:07:03.271131 21584 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 15:07:03.271178 21584 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:03.271188 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.271195 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.271199 21584 net.cpp:165] Memory required for data: 861697500\nI0818 15:07:03.271204 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0818 15:07:03.271219 21584 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0818 15:07:03.271225 21584 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 15:07:03.271234 21584 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0818 15:07:03.271553 21584 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0818 15:07:03.271567 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.271572 21584 net.cpp:165] Memory required for data: 863745500\nI0818 15:07:03.271581 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0818 15:07:03.271594 21584 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0818 15:07:03.271600 21584 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0818 15:07:03.271608 21584 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0818 15:07:03.271855 21584 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0818 15:07:03.271877 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.271883 21584 net.cpp:165] Memory required for data: 865793500\nI0818 15:07:03.271893 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 15:07:03.271903 21584 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0818 15:07:03.271915 21584 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0818 15:07:03.271924 21584 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.271977 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 15:07:03.272122 21584 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0818 15:07:03.272135 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.272140 21584 net.cpp:165] Memory required for data: 867841500\nI0818 15:07:03.272150 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0818 15:07:03.272158 21584 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0818 15:07:03.272164 21584 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0818 15:07:03.272176 21584 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.272186 21584 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0818 15:07:03.272192 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.272197 21584 net.cpp:165] Memory required for data: 869889500\nI0818 15:07:03.272202 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0818 15:07:03.272215 21584 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0818 15:07:03.272222 21584 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0818 15:07:03.272230 21584 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0818 15:07:03.272543 21584 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0818 15:07:03.272557 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.272562 21584 net.cpp:165] Memory required for data: 871937500\nI0818 15:07:03.272570 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0818 15:07:03.272580 21584 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0818 15:07:03.272586 21584 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0818 15:07:03.272600 21584 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0818 15:07:03.272850 21584 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0818 15:07:03.272863 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.272868 21584 net.cpp:165] Memory required for data: 873985500\nI0818 15:07:03.272878 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 15:07:03.272891 21584 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0818 15:07:03.272897 21584 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0818 15:07:03.272905 21584 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0818 15:07:03.272960 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 15:07:03.273104 21584 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0818 15:07:03.273118 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.273123 21584 net.cpp:165] Memory required for data: 876033500\nI0818 15:07:03.273131 21584 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0818 15:07:03.273146 21584 net.cpp:100] Creating Layer L2_b1_pool\nI0818 15:07:03.273154 21584 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 15:07:03.273164 21584 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0818 15:07:03.273255 21584 net.cpp:150] Setting up L2_b1_pool\nI0818 15:07:03.273272 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.273277 21584 net.cpp:165] Memory required for data: 878081500\nI0818 15:07:03.273283 21584 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0818 15:07:03.273293 21584 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0818 15:07:03.273298 21584 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0818 15:07:03.273305 21584 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0818 15:07:03.273321 21584 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0818 15:07:03.273355 21584 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0818 15:07:03.273366 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.273372 21584 net.cpp:165] Memory required for data: 880129500\nI0818 15:07:03.273377 21584 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0818 15:07:03.273386 21584 net.cpp:100] Creating Layer L2_b1_relu\nI0818 15:07:03.273391 21584 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0818 15:07:03.273402 21584 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0818 15:07:03.273419 21584 net.cpp:150] Setting up L2_b1_relu\nI0818 15:07:03.273427 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.273432 21584 net.cpp:165] Memory required for data: 882177500\nI0818 15:07:03.273437 21584 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0818 15:07:03.273489 21584 net.cpp:100] Creating Layer L2_b1_zeros\nI0818 15:07:03.273504 21584 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0818 15:07:03.275907 21584 net.cpp:150] Setting up L2_b1_zeros\nI0818 15:07:03.275925 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.275931 21584 net.cpp:165] Memory required for data: 884225500\nI0818 15:07:03.275938 21584 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0818 15:07:03.275948 21584 net.cpp:100] Creating Layer L2_b1_concat0\nI0818 15:07:03.275954 21584 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0818 15:07:03.275961 21584 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0818 15:07:03.275974 21584 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0818 15:07:03.276060 21584 net.cpp:150] Setting up L2_b1_concat0\nI0818 15:07:03.276077 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.276082 21584 net.cpp:165] Memory required for data: 888321500\nI0818 15:07:03.276087 21584 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:03.276096 21584 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:03.276101 21584 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0818 15:07:03.276113 21584 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 15:07:03.276124 21584 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 15:07:03.276173 21584 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:03.276185 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.276191 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.276196 21584 net.cpp:165] Memory required for data: 896513500\nI0818 15:07:03.276202 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0818 15:07:03.276217 21584 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0818 15:07:03.276224 21584 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 15:07:03.276233 21584 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0818 15:07:03.277734 21584 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0818 15:07:03.277752 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.277757 21584 net.cpp:165] Memory required for data: 900609500\nI0818 15:07:03.277768 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0818 15:07:03.277781 21584 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0818 15:07:03.277788 21584 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0818 15:07:03.277797 21584 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0818 15:07:03.278048 21584 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0818 15:07:03.278069 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.278074 21584 net.cpp:165] Memory required for data: 904705500\nI0818 15:07:03.278085 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 15:07:03.278095 21584 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0818 15:07:03.278101 21584 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0818 15:07:03.278110 21584 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.278165 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 15:07:03.278317 21584 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0818 15:07:03.278331 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.278337 21584 net.cpp:165] Memory required for data: 908801500\nI0818 15:07:03.278345 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0818 15:07:03.278354 21584 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0818 15:07:03.278362 21584 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0818 15:07:03.278372 21584 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.278390 21584 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0818 15:07:03.278398 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.278403 21584 net.cpp:165] Memory required for data: 912897500\nI0818 15:07:03.278408 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0818 15:07:03.278422 21584 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0818 15:07:03.278429 21584 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0818 15:07:03.278437 21584 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0818 15:07:03.278901 21584 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0818 15:07:03.278916 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.278921 21584 net.cpp:165] Memory required for data: 916993500\nI0818 15:07:03.278930 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0818 15:07:03.278944 21584 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0818 15:07:03.278950 21584 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0818 15:07:03.278959 21584 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0818 15:07:03.279233 21584 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0818 15:07:03.279247 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.279253 21584 net.cpp:165] Memory required for data: 921089500\nI0818 15:07:03.279263 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 15:07:03.279276 21584 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0818 15:07:03.279284 21584 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0818 15:07:03.279290 21584 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0818 15:07:03.279345 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 15:07:03.279496 21584 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0818 15:07:03.279510 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.279515 21584 net.cpp:165] Memory required for data: 925185500\nI0818 15:07:03.279525 21584 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0818 15:07:03.279537 21584 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0818 15:07:03.279544 21584 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0818 15:07:03.279551 21584 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 15:07:03.279562 21584 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0818 15:07:03.279589 21584 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0818 15:07:03.279602 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.279606 21584 net.cpp:165] Memory required for data: 929281500\nI0818 15:07:03.279633 21584 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0818 15:07:03.279641 21584 net.cpp:100] Creating Layer L2_b2_relu\nI0818 15:07:03.279649 21584 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0818 15:07:03.279675 21584 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0818 15:07:03.279686 21584 net.cpp:150] Setting up L2_b2_relu\nI0818 15:07:03.279693 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.279698 21584 net.cpp:165] Memory required for data: 933377500\nI0818 15:07:03.279703 21584 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:03.279711 21584 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:03.279716 21584 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0818 15:07:03.279723 21584 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 15:07:03.279733 21584 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 15:07:03.279783 21584 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:03.279795 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.279803 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.279806 21584 net.cpp:165] Memory required for data: 941569500\nI0818 15:07:03.279811 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0818 15:07:03.279830 21584 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0818 15:07:03.279837 21584 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 15:07:03.279850 21584 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0818 15:07:03.280311 21584 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0818 15:07:03.280326 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.280331 21584 net.cpp:165] Memory required for data: 945665500\nI0818 15:07:03.280340 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0818 15:07:03.280354 21584 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0818 15:07:03.280360 21584 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0818 15:07:03.280369 21584 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0818 15:07:03.280611 21584 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0818 15:07:03.280627 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.280632 21584 net.cpp:165] Memory required for data: 949761500\nI0818 15:07:03.280643 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 15:07:03.280658 21584 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0818 15:07:03.280666 21584 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0818 15:07:03.280674 21584 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.280731 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 15:07:03.280879 21584 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0818 15:07:03.280891 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.280897 21584 net.cpp:165] Memory required for data: 953857500\nI0818 15:07:03.280906 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0818 15:07:03.280915 21584 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0818 15:07:03.280920 21584 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0818 15:07:03.280931 21584 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.280942 21584 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0818 15:07:03.280949 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.280954 21584 net.cpp:165] Memory required for data: 957953500\nI0818 15:07:03.280958 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0818 15:07:03.280971 21584 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0818 15:07:03.280975 21584 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0818 15:07:03.280987 21584 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0818 15:07:03.281442 21584 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0818 15:07:03.281456 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.281461 21584 net.cpp:165] Memory required for data: 962049500\nI0818 15:07:03.281471 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0818 15:07:03.281479 21584 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0818 15:07:03.281486 21584 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0818 15:07:03.281497 21584 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0818 15:07:03.281754 21584 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0818 15:07:03.281767 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.281774 21584 net.cpp:165] Memory required for data: 966145500\nI0818 15:07:03.281783 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 15:07:03.281795 21584 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0818 15:07:03.281802 21584 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0818 15:07:03.281810 21584 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0818 15:07:03.281865 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 15:07:03.282016 21584 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0818 15:07:03.282028 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.282033 21584 net.cpp:165] Memory required for data: 970241500\nI0818 15:07:03.282042 21584 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0818 15:07:03.282055 21584 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0818 15:07:03.282061 21584 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0818 15:07:03.282068 21584 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 15:07:03.282088 21584 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0818 15:07:03.282116 21584 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0818 15:07:03.282126 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.282131 21584 net.cpp:165] Memory required for data: 974337500\nI0818 15:07:03.282136 21584 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0818 15:07:03.282157 21584 net.cpp:100] Creating Layer L2_b3_relu\nI0818 15:07:03.282165 21584 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0818 15:07:03.282171 21584 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0818 15:07:03.282181 21584 net.cpp:150] Setting up L2_b3_relu\nI0818 15:07:03.282188 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.282193 21584 net.cpp:165] Memory required for data: 978433500\nI0818 15:07:03.282199 21584 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:03.282209 21584 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:03.282215 21584 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0818 15:07:03.282222 21584 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 15:07:03.282232 21584 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 15:07:03.282281 21584 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:03.282294 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.282300 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.282305 21584 net.cpp:165] Memory required for data: 986625500\nI0818 15:07:03.282310 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0818 15:07:03.282322 21584 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0818 15:07:03.282328 21584 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 15:07:03.282341 21584 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0818 15:07:03.282806 21584 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0818 15:07:03.282821 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.282826 21584 net.cpp:165] Memory required for data: 990721500\nI0818 15:07:03.282835 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0818 15:07:03.282845 21584 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0818 15:07:03.282851 21584 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0818 15:07:03.282863 21584 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0818 15:07:03.283109 21584 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0818 15:07:03.283123 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.283128 21584 net.cpp:165] Memory required for data: 994817500\nI0818 15:07:03.283138 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 15:07:03.283149 21584 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0818 15:07:03.283155 21584 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0818 15:07:03.283164 21584 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.283221 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 15:07:03.283367 21584 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0818 15:07:03.283380 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.283385 21584 net.cpp:165] Memory required for data: 998913500\nI0818 15:07:03.283394 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0818 15:07:03.283402 21584 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0818 15:07:03.283409 21584 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0818 15:07:03.283419 21584 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.283429 21584 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0818 15:07:03.283437 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.283442 21584 net.cpp:165] Memory required for data: 1003009500\nI0818 15:07:03.283447 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0818 15:07:03.283468 21584 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0818 15:07:03.283474 21584 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0818 15:07:03.283485 21584 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0818 15:07:03.283949 21584 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0818 15:07:03.283964 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.283969 21584 net.cpp:165] Memory required for data: 1007105500\nI0818 15:07:03.283978 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0818 15:07:03.283988 21584 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0818 15:07:03.283995 21584 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0818 15:07:03.284003 21584 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0818 15:07:03.284250 21584 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0818 15:07:03.284263 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.284268 21584 net.cpp:165] Memory required for data: 1011201500\nI0818 15:07:03.284278 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 15:07:03.284287 21584 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0818 15:07:03.284294 21584 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0818 15:07:03.284306 21584 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0818 15:07:03.284360 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 15:07:03.284507 21584 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0818 15:07:03.284519 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.284524 21584 net.cpp:165] Memory required for data: 1015297500\nI0818 15:07:03.284533 21584 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0818 15:07:03.284543 21584 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0818 15:07:03.284549 21584 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0818 15:07:03.284556 21584 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 15:07:03.284567 21584 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0818 15:07:03.284595 21584 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0818 15:07:03.284605 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.284608 21584 net.cpp:165] Memory required for data: 1019393500\nI0818 15:07:03.284615 21584 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0818 15:07:03.284624 21584 net.cpp:100] Creating Layer L2_b4_relu\nI0818 15:07:03.284631 21584 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0818 15:07:03.284638 21584 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0818 15:07:03.284647 21584 net.cpp:150] Setting up L2_b4_relu\nI0818 15:07:03.284662 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.284667 21584 net.cpp:165] Memory required for data: 1023489500\nI0818 15:07:03.284672 21584 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:03.284682 21584 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:03.284688 21584 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0818 15:07:03.284698 21584 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 15:07:03.284708 21584 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 15:07:03.284751 21584 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:03.284766 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.284775 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.284778 21584 net.cpp:165] Memory required for data: 1031681500\nI0818 15:07:03.284785 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0818 15:07:03.284795 21584 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0818 15:07:03.284801 21584 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 15:07:03.284811 21584 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0818 15:07:03.285272 21584 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0818 15:07:03.285292 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.285298 21584 net.cpp:165] Memory required for data: 1035777500\nI0818 15:07:03.285307 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0818 15:07:03.285320 21584 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0818 15:07:03.285326 21584 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0818 15:07:03.285334 21584 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0818 15:07:03.285581 21584 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0818 15:07:03.285594 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.285600 21584 net.cpp:165] Memory required for data: 1039873500\nI0818 15:07:03.285610 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 15:07:03.285619 21584 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0818 15:07:03.285626 21584 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0818 15:07:03.285636 21584 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.285698 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 15:07:03.285850 21584 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0818 15:07:03.285862 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.285867 21584 net.cpp:165] Memory required for data: 1043969500\nI0818 15:07:03.285876 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0818 15:07:03.285886 21584 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0818 15:07:03.285892 21584 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0818 15:07:03.285902 21584 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.285912 21584 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0818 15:07:03.285919 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.285924 21584 net.cpp:165] Memory required for data: 1048065500\nI0818 15:07:03.285929 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0818 15:07:03.285943 21584 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0818 15:07:03.285949 21584 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0818 15:07:03.285959 21584 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0818 15:07:03.286415 21584 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0818 15:07:03.286429 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.286434 21584 net.cpp:165] Memory required for data: 1052161500\nI0818 15:07:03.286443 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0818 15:07:03.286455 21584 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0818 15:07:03.286463 21584 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0818 15:07:03.286470 21584 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0818 15:07:03.286725 21584 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0818 15:07:03.286738 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.286744 21584 net.cpp:165] Memory required for data: 1056257500\nI0818 15:07:03.286756 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 15:07:03.286764 21584 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0818 15:07:03.286770 21584 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0818 15:07:03.286782 21584 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0818 15:07:03.286836 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 15:07:03.286983 21584 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0818 15:07:03.286996 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.287001 21584 net.cpp:165] Memory required for data: 1060353500\nI0818 15:07:03.287010 21584 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0818 15:07:03.287019 21584 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0818 15:07:03.287026 21584 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0818 15:07:03.287034 21584 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 15:07:03.287045 21584 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0818 15:07:03.287071 21584 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0818 15:07:03.287081 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.287092 21584 net.cpp:165] Memory required for data: 1064449500\nI0818 15:07:03.287098 21584 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0818 15:07:03.287109 21584 net.cpp:100] Creating Layer L2_b5_relu\nI0818 15:07:03.287116 21584 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0818 15:07:03.287122 21584 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0818 15:07:03.287132 21584 net.cpp:150] Setting up L2_b5_relu\nI0818 15:07:03.287139 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.287143 21584 net.cpp:165] Memory required for data: 1068545500\nI0818 15:07:03.287148 21584 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:03.287158 21584 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:03.287164 21584 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0818 15:07:03.287171 21584 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 15:07:03.287181 21584 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 15:07:03.287225 21584 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:03.287240 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.287247 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.287251 21584 net.cpp:165] Memory required for data: 1076737500\nI0818 15:07:03.287256 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0818 15:07:03.287268 21584 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0818 15:07:03.287274 21584 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 15:07:03.287284 21584 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0818 15:07:03.287752 21584 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0818 15:07:03.287767 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.287772 21584 net.cpp:165] Memory required for data: 1080833500\nI0818 15:07:03.287781 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0818 15:07:03.287794 21584 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0818 15:07:03.287801 21584 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0818 15:07:03.287809 21584 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0818 15:07:03.288058 21584 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0818 15:07:03.288070 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.288075 21584 net.cpp:165] Memory required for data: 1084929500\nI0818 15:07:03.288085 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 15:07:03.288095 21584 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0818 15:07:03.288101 21584 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0818 15:07:03.288112 21584 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.288167 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 15:07:03.288316 21584 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0818 15:07:03.288328 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.288333 21584 net.cpp:165] Memory required for data: 1089025500\nI0818 15:07:03.288342 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0818 15:07:03.288350 21584 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0818 15:07:03.288357 21584 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0818 15:07:03.288367 21584 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.288378 21584 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0818 15:07:03.288385 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.288390 21584 net.cpp:165] Memory required for data: 1093121500\nI0818 15:07:03.288395 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0818 15:07:03.288409 21584 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0818 15:07:03.288415 21584 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0818 15:07:03.288424 21584 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0818 15:07:03.288893 21584 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0818 15:07:03.288914 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.288920 21584 net.cpp:165] Memory required for data: 1097217500\nI0818 15:07:03.288929 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0818 15:07:03.288941 21584 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0818 15:07:03.288949 21584 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0818 15:07:03.288957 21584 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0818 15:07:03.289196 21584 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0818 15:07:03.289208 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.289213 21584 net.cpp:165] Memory required for data: 1101313500\nI0818 15:07:03.289224 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 15:07:03.289233 21584 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0818 15:07:03.289239 21584 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0818 15:07:03.289247 21584 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0818 15:07:03.289304 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 15:07:03.289448 21584 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0818 15:07:03.289463 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.289469 21584 net.cpp:165] Memory required for data: 1105409500\nI0818 15:07:03.289477 21584 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0818 15:07:03.289486 21584 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0818 15:07:03.289492 21584 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0818 15:07:03.289499 21584 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 15:07:03.289510 21584 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0818 15:07:03.289536 21584 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0818 15:07:03.289546 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.289551 21584 net.cpp:165] Memory required for data: 1109505500\nI0818 15:07:03.289556 21584 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0818 15:07:03.289563 21584 net.cpp:100] Creating Layer L2_b6_relu\nI0818 15:07:03.289569 21584 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0818 15:07:03.289579 21584 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0818 15:07:03.289589 21584 net.cpp:150] Setting up L2_b6_relu\nI0818 15:07:03.289597 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.289602 21584 net.cpp:165] Memory required for data: 1113601500\nI0818 15:07:03.289605 21584 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:03.289613 21584 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:03.289618 21584 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0818 15:07:03.289628 21584 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 15:07:03.289638 21584 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 15:07:03.289687 21584 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:03.289700 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.289707 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.289711 21584 net.cpp:165] Memory required for data: 1121793500\nI0818 15:07:03.289716 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0818 15:07:03.289731 21584 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0818 15:07:03.289738 21584 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 15:07:03.289748 21584 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0818 15:07:03.290215 21584 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0818 15:07:03.290228 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.290233 21584 net.cpp:165] Memory required for data: 1125889500\nI0818 15:07:03.290242 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0818 15:07:03.290256 21584 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0818 15:07:03.290268 21584 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0818 15:07:03.290277 21584 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0818 15:07:03.290525 21584 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0818 15:07:03.290539 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.290544 21584 net.cpp:165] Memory required for data: 1129985500\nI0818 15:07:03.290555 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 15:07:03.290563 21584 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0818 15:07:03.290570 21584 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0818 15:07:03.290580 21584 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.290637 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 15:07:03.290791 21584 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0818 15:07:03.290805 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.290810 21584 net.cpp:165] Memory required for data: 1134081500\nI0818 15:07:03.290819 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0818 15:07:03.290828 21584 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0818 15:07:03.290834 21584 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0818 15:07:03.290846 21584 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.290858 21584 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0818 15:07:03.290864 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.290868 21584 net.cpp:165] Memory required for data: 1138177500\nI0818 15:07:03.290874 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0818 15:07:03.290887 21584 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0818 15:07:03.290894 21584 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0818 15:07:03.290902 21584 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0818 15:07:03.291364 21584 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0818 15:07:03.291378 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.291384 21584 net.cpp:165] Memory required for data: 1142273500\nI0818 15:07:03.291393 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0818 15:07:03.291402 21584 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0818 15:07:03.291409 21584 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0818 15:07:03.291420 21584 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0818 15:07:03.291676 21584 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0818 15:07:03.291688 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.291693 21584 net.cpp:165] Memory required for data: 1146369500\nI0818 15:07:03.291704 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 15:07:03.291713 21584 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0818 15:07:03.291720 21584 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0818 15:07:03.291728 21584 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0818 15:07:03.291787 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 15:07:03.291935 21584 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0818 15:07:03.291951 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.291957 21584 net.cpp:165] Memory required for data: 1150465500\nI0818 15:07:03.291965 21584 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0818 15:07:03.291975 21584 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0818 15:07:03.291981 21584 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0818 15:07:03.291988 21584 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 15:07:03.291996 21584 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0818 15:07:03.292026 21584 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0818 15:07:03.292037 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.292042 21584 net.cpp:165] Memory required for data: 1154561500\nI0818 15:07:03.292047 21584 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0818 15:07:03.292054 21584 net.cpp:100] Creating Layer L2_b7_relu\nI0818 15:07:03.292060 21584 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0818 15:07:03.292076 21584 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0818 15:07:03.292088 21584 net.cpp:150] Setting up L2_b7_relu\nI0818 15:07:03.292094 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.292098 21584 net.cpp:165] Memory required for data: 1158657500\nI0818 15:07:03.292104 21584 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:03.292110 21584 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:03.292115 21584 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0818 15:07:03.292125 21584 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 15:07:03.292136 21584 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 15:07:03.292181 21584 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:03.292192 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.292199 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.292203 21584 net.cpp:165] Memory required for data: 1166849500\nI0818 15:07:03.292208 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0818 15:07:03.292223 21584 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0818 15:07:03.292230 21584 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 15:07:03.292239 21584 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0818 15:07:03.292718 21584 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0818 15:07:03.292733 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.292738 21584 net.cpp:165] Memory required for data: 1170945500\nI0818 15:07:03.292747 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0818 15:07:03.292759 21584 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0818 15:07:03.292767 21584 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0818 15:07:03.292775 21584 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0818 15:07:03.293021 21584 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0818 15:07:03.293033 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.293040 21584 net.cpp:165] Memory required for data: 1175041500\nI0818 15:07:03.293050 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 15:07:03.293058 21584 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0818 15:07:03.293064 21584 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0818 15:07:03.293076 21584 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.293131 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 15:07:03.293285 21584 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0818 15:07:03.293298 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.293303 21584 net.cpp:165] Memory required for data: 1179137500\nI0818 15:07:03.293313 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0818 15:07:03.293321 21584 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0818 15:07:03.293328 21584 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0818 15:07:03.293334 21584 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.293344 21584 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0818 15:07:03.293351 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.293356 21584 net.cpp:165] Memory required for data: 1183233500\nI0818 15:07:03.293361 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0818 15:07:03.293375 21584 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0818 15:07:03.293381 21584 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0818 15:07:03.293392 21584 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0818 15:07:03.293864 21584 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0818 15:07:03.293879 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.293884 21584 net.cpp:165] Memory required for data: 1187329500\nI0818 15:07:03.293892 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0818 15:07:03.293905 21584 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0818 15:07:03.293920 21584 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0818 15:07:03.293932 21584 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0818 15:07:03.294183 21584 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0818 15:07:03.294196 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.294203 21584 net.cpp:165] Memory required for data: 1191425500\nI0818 15:07:03.294212 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 15:07:03.294221 21584 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0818 15:07:03.294229 21584 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0818 15:07:03.294236 21584 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0818 15:07:03.294296 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 15:07:03.294443 21584 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0818 15:07:03.294456 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.294461 21584 net.cpp:165] Memory required for data: 1195521500\nI0818 15:07:03.294469 21584 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0818 15:07:03.294481 21584 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0818 15:07:03.294488 21584 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0818 15:07:03.294495 21584 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 15:07:03.294503 21584 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0818 15:07:03.294533 21584 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0818 15:07:03.294543 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.294548 21584 net.cpp:165] Memory required for data: 1199617500\nI0818 15:07:03.294553 21584 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0818 15:07:03.294560 21584 net.cpp:100] Creating Layer L2_b8_relu\nI0818 15:07:03.294566 21584 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0818 15:07:03.294576 21584 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0818 15:07:03.294586 21584 net.cpp:150] Setting up L2_b8_relu\nI0818 15:07:03.294594 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.294598 21584 net.cpp:165] Memory required for data: 1203713500\nI0818 15:07:03.294603 21584 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:03.294610 21584 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:03.294615 21584 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0818 15:07:03.294626 21584 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 15:07:03.294656 21584 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 15:07:03.294706 21584 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:03.294719 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.294726 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.294730 21584 net.cpp:165] Memory required for data: 1211905500\nI0818 15:07:03.294736 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0818 15:07:03.294751 21584 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0818 15:07:03.294759 21584 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 15:07:03.294772 21584 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0818 15:07:03.295243 21584 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0818 15:07:03.295256 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.295261 21584 net.cpp:165] Memory required for data: 1216001500\nI0818 15:07:03.295270 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0818 15:07:03.295284 21584 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0818 15:07:03.295290 21584 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0818 15:07:03.295301 21584 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0818 15:07:03.295552 21584 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0818 15:07:03.295564 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.295569 21584 net.cpp:165] Memory required for data: 1220097500\nI0818 15:07:03.295588 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 15:07:03.295598 21584 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0818 15:07:03.295604 21584 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0818 15:07:03.295613 21584 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.295678 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 15:07:03.295827 21584 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0818 15:07:03.295840 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.295845 21584 net.cpp:165] Memory required for data: 1224193500\nI0818 15:07:03.295855 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0818 15:07:03.295866 21584 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0818 15:07:03.295872 21584 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0818 15:07:03.295881 21584 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.295891 21584 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0818 15:07:03.295897 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.295902 21584 net.cpp:165] Memory required for data: 1228289500\nI0818 15:07:03.295907 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0818 15:07:03.295922 21584 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0818 15:07:03.295928 21584 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0818 15:07:03.295936 21584 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0818 15:07:03.296399 21584 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0818 15:07:03.296413 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.296419 21584 net.cpp:165] Memory required for data: 1232385500\nI0818 15:07:03.296428 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0818 15:07:03.296440 21584 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0818 15:07:03.296447 21584 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0818 15:07:03.296455 21584 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0818 15:07:03.296718 21584 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0818 15:07:03.296737 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.296743 21584 net.cpp:165] Memory required for data: 1236481500\nI0818 15:07:03.296785 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 15:07:03.296799 21584 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0818 15:07:03.296807 21584 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0818 15:07:03.296814 21584 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0818 15:07:03.296871 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 15:07:03.297019 21584 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0818 15:07:03.297031 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.297036 21584 net.cpp:165] Memory required for data: 1240577500\nI0818 15:07:03.297045 21584 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0818 15:07:03.297055 21584 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0818 15:07:03.297062 21584 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0818 15:07:03.297070 21584 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 15:07:03.297082 21584 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0818 15:07:03.297111 21584 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0818 15:07:03.297122 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.297127 21584 net.cpp:165] Memory required for data: 1244673500\nI0818 15:07:03.297133 21584 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0818 15:07:03.297142 21584 net.cpp:100] Creating Layer L2_b9_relu\nI0818 15:07:03.297147 21584 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0818 15:07:03.297154 21584 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0818 15:07:03.297163 21584 net.cpp:150] Setting up L2_b9_relu\nI0818 15:07:03.297170 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.297175 21584 net.cpp:165] Memory required for data: 1248769500\nI0818 15:07:03.297180 21584 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:03.297197 21584 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:03.297204 21584 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0818 15:07:03.297211 21584 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 15:07:03.297221 21584 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 15:07:03.297273 21584 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:03.297286 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.297292 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.297297 21584 net.cpp:165] Memory required for data: 1256961500\nI0818 15:07:03.297302 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0818 15:07:03.297313 21584 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0818 15:07:03.297320 21584 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 15:07:03.297333 21584 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0818 15:07:03.297816 21584 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0818 15:07:03.297830 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.297835 21584 net.cpp:165] Memory required for data: 1257985500\nI0818 15:07:03.297844 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0818 15:07:03.297854 21584 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0818 15:07:03.297860 21584 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0818 15:07:03.297873 21584 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0818 15:07:03.298137 21584 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0818 15:07:03.298154 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.298159 21584 net.cpp:165] Memory required for data: 1259009500\nI0818 15:07:03.298171 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 15:07:03.298179 21584 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0818 15:07:03.298185 21584 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0818 15:07:03.298193 21584 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.298249 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 15:07:03.298409 21584 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0818 15:07:03.298422 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.298427 21584 net.cpp:165] Memory required for data: 1260033500\nI0818 15:07:03.298436 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0818 15:07:03.298445 21584 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0818 15:07:03.298451 21584 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0818 15:07:03.298462 21584 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.298472 21584 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0818 15:07:03.298480 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.298485 21584 net.cpp:165] Memory required for data: 1261057500\nI0818 15:07:03.298490 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0818 15:07:03.298502 21584 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0818 15:07:03.298509 21584 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0818 15:07:03.298518 21584 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0818 15:07:03.298997 21584 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0818 15:07:03.299012 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.299017 21584 net.cpp:165] Memory required for data: 1262081500\nI0818 15:07:03.299026 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0818 15:07:03.299039 21584 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0818 15:07:03.299046 21584 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0818 15:07:03.299057 21584 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0818 15:07:03.299315 21584 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0818 15:07:03.299329 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.299334 21584 net.cpp:165] Memory required for data: 1263105500\nI0818 15:07:03.299351 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 15:07:03.299360 21584 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0818 15:07:03.299367 21584 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0818 15:07:03.299376 21584 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0818 15:07:03.299434 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 15:07:03.299590 21584 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0818 15:07:03.299603 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.299609 21584 net.cpp:165] Memory required for data: 1264129500\nI0818 15:07:03.299618 21584 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0818 15:07:03.299628 21584 net.cpp:100] Creating Layer L3_b1_pool\nI0818 15:07:03.299633 21584 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 15:07:03.299645 21584 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0818 15:07:03.299687 21584 net.cpp:150] Setting up L3_b1_pool\nI0818 15:07:03.299701 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.299706 21584 net.cpp:165] Memory required for data: 1265153500\nI0818 15:07:03.299711 21584 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0818 15:07:03.299721 21584 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0818 15:07:03.299726 21584 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0818 15:07:03.299733 21584 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0818 15:07:03.299743 21584 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0818 15:07:03.299775 21584 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0818 15:07:03.299785 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.299789 21584 net.cpp:165] Memory required for data: 1266177500\nI0818 15:07:03.299794 21584 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0818 15:07:03.299803 21584 net.cpp:100] Creating Layer L3_b1_relu\nI0818 15:07:03.299808 21584 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0818 15:07:03.299815 21584 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0818 15:07:03.299824 21584 net.cpp:150] Setting up L3_b1_relu\nI0818 15:07:03.299831 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.299835 21584 net.cpp:165] Memory required for data: 1267201500\nI0818 15:07:03.299840 21584 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0818 15:07:03.299852 21584 net.cpp:100] Creating Layer L3_b1_zeros\nI0818 15:07:03.299860 21584 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0818 15:07:03.301108 21584 net.cpp:150] Setting up L3_b1_zeros\nI0818 15:07:03.301129 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.301136 21584 net.cpp:165] Memory required for data: 1268225500\nI0818 15:07:03.301141 21584 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0818 15:07:03.301151 21584 net.cpp:100] Creating Layer L3_b1_concat0\nI0818 15:07:03.301157 21584 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0818 15:07:03.301164 21584 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0818 15:07:03.301172 21584 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0818 15:07:03.301215 21584 net.cpp:150] Setting up L3_b1_concat0\nI0818 15:07:03.301229 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.301234 21584 net.cpp:165] Memory required for data: 1270273500\nI0818 15:07:03.301239 21584 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:03.301249 21584 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:03.301255 21584 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0818 15:07:03.301264 21584 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 15:07:03.301273 21584 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 15:07:03.301326 21584 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:03.301338 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.301345 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.301376 21584 net.cpp:165] Memory required for data: 1274369500\nI0818 15:07:03.301383 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0818 15:07:03.301398 21584 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0818 15:07:03.301405 21584 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 15:07:03.301414 21584 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0818 15:07:03.303423 21584 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0818 15:07:03.303442 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.303447 21584 net.cpp:165] Memory required for data: 1276417500\nI0818 15:07:03.303457 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0818 15:07:03.303467 21584 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0818 15:07:03.303473 21584 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0818 15:07:03.303485 21584 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0818 15:07:03.303763 21584 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0818 15:07:03.303781 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.303786 21584 net.cpp:165] Memory required for data: 1278465500\nI0818 15:07:03.303797 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 15:07:03.303807 21584 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0818 15:07:03.303813 21584 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0818 15:07:03.303822 21584 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.303879 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 15:07:03.304036 21584 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0818 15:07:03.304050 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.304055 21584 net.cpp:165] Memory required for data: 1280513500\nI0818 15:07:03.304064 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0818 15:07:03.304076 21584 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0818 15:07:03.304082 21584 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0818 15:07:03.304090 21584 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.304100 21584 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0818 15:07:03.304107 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.304112 21584 net.cpp:165] Memory required for data: 1282561500\nI0818 15:07:03.304116 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0818 15:07:03.304131 21584 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0818 15:07:03.304137 21584 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0818 15:07:03.304147 21584 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0818 15:07:03.305171 21584 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0818 15:07:03.305187 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.305192 21584 net.cpp:165] Memory required for data: 1284609500\nI0818 15:07:03.305202 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0818 15:07:03.305214 21584 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0818 15:07:03.305222 21584 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0818 15:07:03.305233 21584 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0818 15:07:03.305495 21584 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0818 15:07:03.305507 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.305513 21584 net.cpp:165] Memory required for data: 1286657500\nI0818 15:07:03.305523 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 15:07:03.305533 21584 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0818 15:07:03.305539 21584 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0818 15:07:03.305550 21584 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0818 15:07:03.305608 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 15:07:03.305773 21584 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0818 15:07:03.305786 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.305793 21584 net.cpp:165] Memory required for data: 1288705500\nI0818 15:07:03.305801 21584 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0818 15:07:03.305811 21584 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0818 15:07:03.305825 21584 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0818 15:07:03.305840 21584 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 15:07:03.305848 21584 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0818 15:07:03.305886 21584 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0818 15:07:03.305897 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.305903 21584 net.cpp:165] Memory required for data: 1290753500\nI0818 15:07:03.305908 21584 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0818 15:07:03.305917 21584 net.cpp:100] Creating Layer L3_b2_relu\nI0818 15:07:03.305922 21584 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0818 15:07:03.305932 21584 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0818 15:07:03.305943 21584 net.cpp:150] Setting up L3_b2_relu\nI0818 15:07:03.305950 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.305954 21584 net.cpp:165] Memory required for data: 1292801500\nI0818 15:07:03.305959 21584 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:03.305966 21584 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:03.305972 21584 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0818 15:07:03.305979 21584 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 15:07:03.305989 21584 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 15:07:03.306040 21584 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:03.306051 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.306058 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.306063 21584 net.cpp:165] Memory required for data: 1296897500\nI0818 15:07:03.306068 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0818 15:07:03.306079 21584 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0818 15:07:03.306087 21584 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 15:07:03.306098 21584 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0818 15:07:03.307116 21584 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0818 15:07:03.307132 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.307137 21584 net.cpp:165] Memory required for data: 1298945500\nI0818 15:07:03.307147 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0818 15:07:03.307155 21584 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0818 15:07:03.307163 21584 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0818 15:07:03.307173 21584 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0818 15:07:03.307440 21584 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0818 15:07:03.307453 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.307458 21584 net.cpp:165] Memory required for data: 1300993500\nI0818 15:07:03.307468 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 15:07:03.307477 21584 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0818 15:07:03.307484 21584 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0818 15:07:03.307492 21584 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.307552 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 15:07:03.307713 21584 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0818 15:07:03.307736 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.307742 21584 net.cpp:165] Memory required for data: 1303041500\nI0818 15:07:03.307751 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0818 15:07:03.307760 21584 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0818 15:07:03.307766 21584 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0818 15:07:03.307773 21584 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.307783 21584 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0818 15:07:03.307790 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.307801 21584 net.cpp:165] Memory required for data: 1305089500\nI0818 15:07:03.307807 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0818 15:07:03.307821 21584 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0818 15:07:03.307827 21584 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0818 15:07:03.307837 21584 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0818 15:07:03.308858 21584 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0818 15:07:03.308873 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.308878 21584 net.cpp:165] Memory required for data: 1307137500\nI0818 15:07:03.308887 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0818 15:07:03.308903 21584 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0818 15:07:03.308910 21584 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0818 15:07:03.308923 21584 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0818 15:07:03.309185 21584 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0818 15:07:03.309198 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.309203 21584 net.cpp:165] Memory required for data: 1309185500\nI0818 15:07:03.309214 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 15:07:03.309223 21584 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0818 15:07:03.309229 21584 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0818 15:07:03.309242 21584 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0818 15:07:03.309299 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 15:07:03.309454 21584 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0818 15:07:03.309468 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.309473 21584 net.cpp:165] Memory required for data: 1311233500\nI0818 15:07:03.309481 21584 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0818 15:07:03.309495 21584 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0818 15:07:03.309502 21584 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0818 15:07:03.309509 21584 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 15:07:03.309517 21584 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0818 15:07:03.309554 21584 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0818 15:07:03.309566 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.309571 21584 net.cpp:165] Memory required for data: 1313281500\nI0818 15:07:03.309576 21584 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0818 15:07:03.309584 21584 net.cpp:100] Creating Layer L3_b3_relu\nI0818 15:07:03.309590 21584 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0818 15:07:03.309600 21584 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0818 15:07:03.309610 21584 net.cpp:150] Setting up L3_b3_relu\nI0818 15:07:03.309617 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.309623 21584 net.cpp:165] Memory required for data: 1315329500\nI0818 15:07:03.309628 21584 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:03.309634 21584 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:03.309639 21584 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0818 15:07:03.309648 21584 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 15:07:03.309664 21584 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 15:07:03.309713 21584 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:03.309725 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.309731 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.309736 21584 net.cpp:165] Memory required for data: 1319425500\nI0818 15:07:03.309741 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0818 15:07:03.309753 21584 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0818 15:07:03.309759 21584 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 15:07:03.309772 21584 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0818 15:07:03.310798 21584 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0818 15:07:03.310814 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.310819 21584 net.cpp:165] Memory required for data: 1321473500\nI0818 15:07:03.310828 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0818 15:07:03.310840 21584 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0818 15:07:03.310847 21584 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0818 15:07:03.310856 21584 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0818 15:07:03.311131 21584 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0818 15:07:03.311144 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.311149 21584 net.cpp:165] Memory required for data: 1323521500\nI0818 15:07:03.311161 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 15:07:03.311169 21584 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0818 15:07:03.311175 21584 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0818 15:07:03.311183 21584 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.311245 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 15:07:03.311401 21584 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0818 15:07:03.311417 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.311422 21584 net.cpp:165] Memory required for data: 1325569500\nI0818 15:07:03.311431 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0818 15:07:03.311440 21584 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0818 15:07:03.311446 21584 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0818 15:07:03.311453 21584 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.311465 21584 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0818 15:07:03.311471 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.311475 21584 net.cpp:165] Memory required for data: 1327617500\nI0818 15:07:03.311480 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0818 15:07:03.311496 21584 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0818 15:07:03.311501 21584 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0818 15:07:03.311511 21584 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0818 15:07:03.312544 21584 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0818 15:07:03.312559 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.312564 21584 net.cpp:165] Memory required for data: 1329665500\nI0818 15:07:03.312573 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0818 15:07:03.312583 21584 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0818 15:07:03.312593 21584 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0818 15:07:03.312602 21584 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0818 15:07:03.312876 21584 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0818 15:07:03.312891 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.312896 21584 net.cpp:165] Memory required for data: 1331713500\nI0818 15:07:03.312906 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 15:07:03.312916 21584 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0818 15:07:03.312922 21584 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0818 15:07:03.312933 21584 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0818 15:07:03.312993 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 15:07:03.313154 21584 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0818 15:07:03.313168 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.313172 21584 net.cpp:165] Memory required for data: 1333761500\nI0818 15:07:03.313181 21584 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0818 15:07:03.313194 21584 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0818 15:07:03.313201 21584 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0818 15:07:03.313208 21584 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 15:07:03.313216 21584 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0818 15:07:03.313257 21584 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0818 15:07:03.313273 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.313278 21584 net.cpp:165] Memory required for data: 1335809500\nI0818 15:07:03.313283 21584 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0818 15:07:03.313292 21584 net.cpp:100] Creating Layer L3_b4_relu\nI0818 15:07:03.313298 21584 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0818 15:07:03.313308 21584 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0818 15:07:03.313318 21584 net.cpp:150] Setting up L3_b4_relu\nI0818 15:07:03.313325 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.313330 21584 net.cpp:165] Memory required for data: 1337857500\nI0818 15:07:03.313335 21584 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:03.313343 21584 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:03.313347 21584 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0818 15:07:03.313355 21584 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 15:07:03.313365 21584 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 15:07:03.313415 21584 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:03.313426 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.313432 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.313437 21584 net.cpp:165] Memory required for data: 1341953500\nI0818 15:07:03.313442 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0818 15:07:03.313454 21584 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0818 15:07:03.313460 21584 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 15:07:03.313472 21584 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0818 15:07:03.314504 21584 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0818 15:07:03.314519 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.314524 21584 net.cpp:165] Memory required for data: 1344001500\nI0818 15:07:03.314533 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0818 15:07:03.314546 21584 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0818 15:07:03.314553 21584 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0818 15:07:03.314561 21584 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0818 15:07:03.315846 21584 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0818 15:07:03.315863 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.315870 21584 net.cpp:165] Memory required for data: 1346049500\nI0818 15:07:03.315881 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 15:07:03.315891 21584 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0818 15:07:03.315897 21584 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0818 15:07:03.315908 21584 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.315971 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 15:07:03.316135 21584 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0818 15:07:03.316148 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.316154 21584 net.cpp:165] Memory required for data: 1348097500\nI0818 15:07:03.316164 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0818 15:07:03.316176 21584 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0818 15:07:03.316182 21584 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0818 15:07:03.316190 21584 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.316200 21584 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0818 15:07:03.316207 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.316212 21584 net.cpp:165] Memory required for data: 1350145500\nI0818 15:07:03.316216 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0818 15:07:03.316231 21584 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0818 15:07:03.316237 21584 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0818 15:07:03.316256 21584 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0818 15:07:03.318291 21584 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0818 15:07:03.318308 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.318315 21584 net.cpp:165] Memory required for data: 1352193500\nI0818 15:07:03.318323 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0818 15:07:03.318336 21584 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0818 15:07:03.318343 21584 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0818 15:07:03.318356 21584 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0818 15:07:03.318619 21584 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0818 15:07:03.318631 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.318637 21584 net.cpp:165] Memory required for data: 1354241500\nI0818 15:07:03.318647 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 15:07:03.318663 21584 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0818 15:07:03.318670 21584 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0818 15:07:03.318681 21584 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0818 15:07:03.318742 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 15:07:03.318903 21584 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0818 15:07:03.318917 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.318922 21584 net.cpp:165] Memory required for data: 1356289500\nI0818 15:07:03.318933 21584 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0818 15:07:03.318944 21584 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0818 15:07:03.318953 21584 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0818 15:07:03.318959 21584 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 15:07:03.318967 21584 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0818 15:07:03.319005 21584 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0818 15:07:03.319015 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.319020 21584 net.cpp:165] Memory required for data: 1358337500\nI0818 15:07:03.319026 21584 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0818 15:07:03.319034 21584 net.cpp:100] Creating Layer L3_b5_relu\nI0818 15:07:03.319041 21584 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0818 15:07:03.319051 21584 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0818 15:07:03.319061 21584 net.cpp:150] Setting up L3_b5_relu\nI0818 15:07:03.319068 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.319073 21584 net.cpp:165] Memory required for data: 1360385500\nI0818 15:07:03.319077 21584 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:03.319085 21584 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:03.319090 21584 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0818 15:07:03.319098 21584 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 15:07:03.319108 21584 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 15:07:03.319156 21584 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:03.319167 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.319175 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.319180 21584 net.cpp:165] Memory required for data: 1364481500\nI0818 15:07:03.319185 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0818 15:07:03.319196 21584 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0818 15:07:03.319202 21584 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 15:07:03.319216 21584 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0818 15:07:03.320261 21584 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0818 15:07:03.320276 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.320281 21584 net.cpp:165] Memory required for data: 1366529500\nI0818 15:07:03.320291 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0818 15:07:03.320303 21584 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0818 15:07:03.320318 21584 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0818 15:07:03.320327 21584 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0818 15:07:03.320590 21584 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0818 15:07:03.320602 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.320608 21584 net.cpp:165] Memory required for data: 1368577500\nI0818 15:07:03.320618 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 15:07:03.320627 21584 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0818 15:07:03.320634 21584 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0818 15:07:03.320642 21584 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.320710 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 15:07:03.320865 21584 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0818 15:07:03.320881 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.320886 21584 net.cpp:165] Memory required for data: 1370625500\nI0818 15:07:03.320895 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0818 15:07:03.320904 21584 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0818 15:07:03.320910 21584 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0818 15:07:03.320919 21584 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.320929 21584 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0818 15:07:03.320935 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.320940 21584 net.cpp:165] Memory required for data: 1372673500\nI0818 15:07:03.320945 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0818 15:07:03.320960 21584 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0818 15:07:03.320967 21584 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0818 15:07:03.320978 21584 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0818 15:07:03.322001 21584 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0818 15:07:03.322016 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.322021 21584 net.cpp:165] Memory required for data: 1374721500\nI0818 15:07:03.322031 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0818 15:07:03.322051 21584 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0818 15:07:03.322057 21584 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0818 15:07:03.322065 21584 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0818 15:07:03.322326 21584 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0818 15:07:03.322340 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.322345 21584 net.cpp:165] Memory required for data: 1376769500\nI0818 15:07:03.322355 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 15:07:03.322368 21584 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0818 15:07:03.322376 21584 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0818 15:07:03.322383 21584 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0818 15:07:03.322441 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 15:07:03.322597 21584 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0818 15:07:03.322610 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.322615 21584 net.cpp:165] Memory required for data: 1378817500\nI0818 15:07:03.322624 21584 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0818 15:07:03.322638 21584 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0818 15:07:03.322644 21584 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0818 15:07:03.322657 21584 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 15:07:03.322667 21584 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0818 15:07:03.322705 21584 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0818 15:07:03.322716 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.322721 21584 net.cpp:165] Memory required for data: 1380865500\nI0818 15:07:03.322726 21584 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0818 15:07:03.322734 21584 net.cpp:100] Creating Layer L3_b6_relu\nI0818 15:07:03.322741 21584 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0818 15:07:03.322758 21584 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0818 15:07:03.322769 21584 net.cpp:150] Setting up L3_b6_relu\nI0818 15:07:03.322777 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.322782 21584 net.cpp:165] Memory required for data: 1382913500\nI0818 15:07:03.322787 21584 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:03.322793 21584 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:03.322798 21584 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0818 15:07:03.322806 21584 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 15:07:03.322816 21584 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 15:07:03.322865 21584 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:03.322876 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.322883 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.322888 21584 net.cpp:165] Memory required for data: 1387009500\nI0818 15:07:03.322893 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0818 15:07:03.322906 21584 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0818 15:07:03.322911 21584 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 15:07:03.322923 21584 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0818 15:07:03.323942 21584 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0818 15:07:03.323958 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.323963 21584 net.cpp:165] Memory required for data: 1389057500\nI0818 15:07:03.323972 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0818 15:07:03.323984 21584 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0818 15:07:03.323992 21584 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0818 15:07:03.324000 21584 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0818 15:07:03.324270 21584 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0818 15:07:03.324282 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.324287 21584 net.cpp:165] Memory required for data: 1391105500\nI0818 15:07:03.324298 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 15:07:03.324307 21584 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0818 15:07:03.324313 21584 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0818 15:07:03.324321 21584 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.324383 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 15:07:03.324590 21584 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0818 15:07:03.324612 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.324621 21584 net.cpp:165] Memory required for data: 1393153500\nI0818 15:07:03.324638 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0818 15:07:03.324689 21584 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0818 15:07:03.324702 21584 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0818 15:07:03.324712 21584 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.324723 21584 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0818 15:07:03.324731 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.324736 21584 net.cpp:165] Memory required for data: 1395201500\nI0818 15:07:03.324741 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0818 15:07:03.324756 21584 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0818 15:07:03.324764 21584 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0818 15:07:03.324772 21584 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0818 15:07:03.325799 21584 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0818 15:07:03.325814 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.325819 21584 net.cpp:165] Memory required for data: 1397249500\nI0818 15:07:03.325829 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0818 15:07:03.325839 21584 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0818 15:07:03.325852 21584 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0818 15:07:03.325865 21584 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0818 15:07:03.326131 21584 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0818 15:07:03.326144 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.326149 21584 net.cpp:165] Memory required for data: 1399297500\nI0818 15:07:03.326159 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 15:07:03.326169 21584 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0818 15:07:03.326175 21584 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0818 15:07:03.326184 21584 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0818 15:07:03.326244 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 15:07:03.326402 21584 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0818 15:07:03.326418 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.326423 21584 net.cpp:165] Memory required for data: 1401345500\nI0818 15:07:03.326432 21584 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0818 15:07:03.326442 21584 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0818 15:07:03.326448 21584 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0818 15:07:03.326455 21584 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 15:07:03.326463 21584 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0818 15:07:03.326500 21584 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0818 15:07:03.326512 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.326517 21584 net.cpp:165] Memory required for data: 1403393500\nI0818 15:07:03.326522 21584 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0818 15:07:03.326530 21584 net.cpp:100] Creating Layer L3_b7_relu\nI0818 15:07:03.326536 21584 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0818 15:07:03.326544 21584 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0818 15:07:03.326553 21584 net.cpp:150] Setting up L3_b7_relu\nI0818 15:07:03.326560 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.326565 21584 net.cpp:165] Memory required for data: 1405441500\nI0818 15:07:03.326570 21584 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:03.326581 21584 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:03.326587 21584 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0818 15:07:03.326594 21584 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 15:07:03.326604 21584 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 15:07:03.326655 21584 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:03.326671 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.326679 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.326683 21584 net.cpp:165] Memory required for data: 1409537500\nI0818 15:07:03.326689 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0818 15:07:03.326700 21584 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0818 15:07:03.326707 21584 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 15:07:03.326716 21584 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0818 15:07:03.327741 21584 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0818 15:07:03.327756 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.327761 21584 net.cpp:165] Memory required for data: 1411585500\nI0818 15:07:03.327771 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0818 15:07:03.327783 21584 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0818 15:07:03.327790 21584 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0818 15:07:03.327800 21584 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0818 15:07:03.328064 21584 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0818 15:07:03.328078 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.328083 21584 net.cpp:165] Memory required for data: 1413633500\nI0818 15:07:03.328099 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 15:07:03.328114 21584 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0818 15:07:03.328120 21584 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0818 15:07:03.328130 21584 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.328189 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 15:07:03.328351 21584 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0818 15:07:03.328364 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.328369 21584 net.cpp:165] Memory required for data: 1415681500\nI0818 15:07:03.328378 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0818 15:07:03.328387 21584 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0818 15:07:03.328392 21584 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0818 15:07:03.328403 21584 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.328414 21584 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0818 15:07:03.328421 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.328426 21584 net.cpp:165] Memory required for data: 1417729500\nI0818 15:07:03.328430 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0818 15:07:03.328443 21584 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0818 15:07:03.328447 21584 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0818 15:07:03.328459 21584 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0818 15:07:03.329474 21584 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0818 15:07:03.329489 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.329494 21584 net.cpp:165] Memory required for data: 1419777500\nI0818 15:07:03.329504 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0818 15:07:03.329519 21584 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0818 15:07:03.329525 21584 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0818 15:07:03.329533 21584 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0818 15:07:03.329804 21584 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0818 15:07:03.329818 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.329823 21584 net.cpp:165] Memory required for data: 1421825500\nI0818 15:07:03.329834 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 15:07:03.329864 21584 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0818 15:07:03.329874 21584 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0818 15:07:03.329883 21584 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0818 15:07:03.329946 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 15:07:03.330108 21584 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0818 15:07:03.330122 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.330127 21584 net.cpp:165] Memory required for data: 1423873500\nI0818 15:07:03.330137 21584 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0818 15:07:03.330145 21584 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0818 15:07:03.330152 21584 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0818 15:07:03.330159 21584 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 15:07:03.330170 21584 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0818 15:07:03.330204 21584 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0818 15:07:03.330216 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.330221 21584 net.cpp:165] Memory required for data: 1425921500\nI0818 15:07:03.330226 21584 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0818 15:07:03.330237 21584 net.cpp:100] Creating Layer L3_b8_relu\nI0818 15:07:03.330245 21584 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0818 15:07:03.330251 21584 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0818 15:07:03.330261 21584 net.cpp:150] Setting up L3_b8_relu\nI0818 15:07:03.330268 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.330273 21584 net.cpp:165] Memory required for data: 1427969500\nI0818 15:07:03.330278 21584 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:03.330294 21584 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:03.330302 21584 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0818 15:07:03.330309 21584 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 15:07:03.330319 21584 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 15:07:03.330365 21584 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:03.330380 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.330387 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.330392 21584 net.cpp:165] Memory required for data: 1432065500\nI0818 15:07:03.330397 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0818 15:07:03.330410 21584 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0818 15:07:03.330415 21584 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 15:07:03.330425 21584 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0818 15:07:03.332445 21584 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0818 15:07:03.332463 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.332468 21584 net.cpp:165] Memory required for data: 1434113500\nI0818 15:07:03.332479 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0818 15:07:03.332492 21584 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0818 15:07:03.332499 21584 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0818 15:07:03.332510 21584 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0818 15:07:03.332784 21584 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0818 15:07:03.332798 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.332804 21584 net.cpp:165] Memory required for data: 1436161500\nI0818 15:07:03.332814 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 15:07:03.332824 21584 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0818 15:07:03.332830 21584 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0818 15:07:03.332841 21584 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.332901 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 15:07:03.333062 21584 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0818 15:07:03.333076 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.333081 21584 net.cpp:165] Memory required for data: 1438209500\nI0818 15:07:03.333089 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0818 15:07:03.333098 21584 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0818 15:07:03.333106 21584 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0818 15:07:03.333115 21584 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.333127 21584 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0818 15:07:03.333133 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.333138 21584 net.cpp:165] Memory required for data: 1440257500\nI0818 15:07:03.333143 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0818 15:07:03.333158 21584 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0818 15:07:03.333164 21584 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0818 15:07:03.333173 21584 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0818 15:07:03.334192 21584 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0818 15:07:03.334206 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.334211 21584 net.cpp:165] Memory required for data: 1442305500\nI0818 15:07:03.334220 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0818 15:07:03.334233 21584 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0818 15:07:03.334240 21584 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0818 15:07:03.334249 21584 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0818 15:07:03.334519 21584 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0818 15:07:03.334532 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.334537 21584 net.cpp:165] Memory required for data: 1444353500\nI0818 15:07:03.334556 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 15:07:03.334568 21584 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0818 15:07:03.334575 21584 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0818 15:07:03.334586 21584 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0818 15:07:03.334645 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 15:07:03.334810 21584 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0818 15:07:03.334823 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.334828 21584 net.cpp:165] Memory required for data: 1446401500\nI0818 15:07:03.334837 21584 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0818 15:07:03.334847 21584 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0818 15:07:03.334853 21584 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0818 15:07:03.334861 21584 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 15:07:03.334872 21584 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0818 15:07:03.334906 21584 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0818 15:07:03.334918 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.334923 21584 net.cpp:165] Memory required for data: 1448449500\nI0818 15:07:03.334928 21584 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0818 15:07:03.334941 21584 net.cpp:100] Creating Layer L3_b9_relu\nI0818 15:07:03.334947 21584 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0818 15:07:03.334955 21584 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0818 15:07:03.334964 21584 net.cpp:150] Setting up L3_b9_relu\nI0818 15:07:03.334971 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.334976 21584 net.cpp:165] Memory required for data: 1450497500\nI0818 15:07:03.334981 21584 layer_factory.hpp:77] Creating layer post_pool\nI0818 15:07:03.334990 21584 net.cpp:100] Creating Layer post_pool\nI0818 15:07:03.334995 21584 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0818 15:07:03.335002 21584 net.cpp:408] post_pool -> post_pool\nI0818 15:07:03.335041 21584 net.cpp:150] Setting up post_pool\nI0818 15:07:03.335052 21584 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0818 15:07:03.335057 21584 net.cpp:165] Memory required for data: 1450529500\nI0818 15:07:03.335062 21584 layer_factory.hpp:77] Creating layer post_FC\nI0818 15:07:03.335160 21584 net.cpp:100] Creating Layer post_FC\nI0818 15:07:03.335172 21584 net.cpp:434] post_FC <- post_pool\nI0818 15:07:03.335183 21584 net.cpp:408] post_FC -> post_FC_top\nI0818 15:07:03.335454 21584 net.cpp:150] Setting up post_FC\nI0818 15:07:03.335470 21584 net.cpp:157] Top shape: 125 10 (1250)\nI0818 15:07:03.335476 21584 net.cpp:165] Memory required for data: 1450534500\nI0818 15:07:03.335485 21584 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0818 15:07:03.335494 21584 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0818 15:07:03.335500 21584 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0818 15:07:03.335512 21584 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0818 15:07:03.335523 21584 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0818 15:07:03.335571 21584 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0818 15:07:03.335585 21584 net.cpp:157] Top shape: 125 10 (1250)\nI0818 15:07:03.335592 21584 net.cpp:157] Top shape: 125 10 (1250)\nI0818 15:07:03.335597 21584 net.cpp:165] Memory required for data: 1450544500\nI0818 15:07:03.335602 21584 layer_factory.hpp:77] Creating layer accuracy\nI0818 15:07:03.335655 21584 net.cpp:100] Creating Layer accuracy\nI0818 15:07:03.335669 21584 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0818 15:07:03.335677 21584 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 15:07:03.335686 21584 net.cpp:408] accuracy -> accuracy\nI0818 15:07:03.335736 21584 net.cpp:150] Setting up accuracy\nI0818 15:07:03.335750 21584 net.cpp:157] Top shape: (1)\nI0818 15:07:03.335755 21584 net.cpp:165] Memory required for data: 1450544504\nI0818 15:07:03.335762 21584 layer_factory.hpp:77] Creating layer loss\nI0818 15:07:03.335777 21584 net.cpp:100] Creating Layer loss\nI0818 15:07:03.335784 21584 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0818 15:07:03.335791 21584 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 15:07:03.335803 21584 net.cpp:408] loss -> loss\nI0818 15:07:03.339813 21584 layer_factory.hpp:77] Creating layer loss\nI0818 15:07:03.340967 21584 net.cpp:150] Setting up loss\nI0818 15:07:03.340991 21584 net.cpp:157] Top shape: (1)\nI0818 15:07:03.340997 21584 net.cpp:160]     with loss weight 1\nI0818 15:07:03.341085 21584 net.cpp:165] Memory required for data: 1450544508\nI0818 15:07:03.341095 21584 net.cpp:226] loss needs backward computation.\nI0818 15:07:03.341102 21584 net.cpp:228] accuracy does not need backward computation.\nI0818 15:07:03.341109 21584 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0818 15:07:03.341114 21584 net.cpp:226] post_FC needs backward computation.\nI0818 15:07:03.341120 21584 net.cpp:226] post_pool needs backward computation.\nI0818 15:07:03.341125 21584 net.cpp:226] L3_b9_relu needs backward computation.\nI0818 15:07:03.341130 21584 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0818 15:07:03.341135 21584 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0818 15:07:03.341140 21584 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0818 15:07:03.341145 21584 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0818 15:07:03.341150 21584 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0818 15:07:03.341154 21584 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0818 15:07:03.341158 21584 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0818 15:07:03.341163 21584 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0818 15:07:03.341169 21584 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0818 15:07:03.341174 21584 net.cpp:226] L3_b8_relu needs backward computation.\nI0818 15:07:03.341179 21584 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0818 15:07:03.341184 21584 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0818 15:07:03.341189 21584 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0818 15:07:03.341195 21584 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0818 15:07:03.341200 21584 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0818 15:07:03.341205 21584 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0818 15:07:03.341210 21584 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0818 15:07:03.341215 21584 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0818 15:07:03.341222 21584 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0818 15:07:03.341226 21584 net.cpp:226] L3_b7_relu needs backward computation.\nI0818 15:07:03.341231 21584 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0818 15:07:03.341238 21584 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0818 15:07:03.341243 21584 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0818 15:07:03.341248 21584 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0818 15:07:03.341253 21584 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0818 15:07:03.341258 21584 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0818 15:07:03.341262 21584 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0818 15:07:03.341267 21584 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0818 15:07:03.341272 21584 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0818 15:07:03.341279 21584 net.cpp:226] L3_b6_relu needs backward computation.\nI0818 15:07:03.341284 21584 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0818 15:07:03.341289 21584 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0818 15:07:03.341295 21584 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0818 15:07:03.341300 21584 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0818 15:07:03.341305 21584 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0818 15:07:03.341318 21584 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0818 15:07:03.341323 21584 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0818 15:07:03.341329 21584 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0818 15:07:03.341338 21584 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0818 15:07:03.341344 21584 net.cpp:226] L3_b5_relu needs backward computation.\nI0818 15:07:03.341349 21584 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0818 15:07:03.341356 21584 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0818 15:07:03.341361 21584 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0818 15:07:03.341367 21584 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0818 15:07:03.341372 21584 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0818 15:07:03.341377 21584 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0818 15:07:03.341382 21584 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0818 15:07:03.341387 21584 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0818 15:07:03.341392 21584 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0818 15:07:03.341398 21584 net.cpp:226] L3_b4_relu needs backward computation.\nI0818 15:07:03.341403 21584 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0818 15:07:03.341409 21584 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0818 15:07:03.341414 21584 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0818 15:07:03.341419 21584 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0818 15:07:03.341424 21584 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0818 15:07:03.341429 21584 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0818 15:07:03.341434 21584 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0818 15:07:03.341439 21584 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0818 15:07:03.341445 21584 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0818 15:07:03.341450 21584 net.cpp:226] L3_b3_relu needs backward computation.\nI0818 15:07:03.341455 21584 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0818 15:07:03.341460 21584 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0818 15:07:03.341466 21584 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0818 15:07:03.341471 21584 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0818 15:07:03.341476 21584 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0818 15:07:03.341481 21584 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0818 15:07:03.341486 21584 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0818 15:07:03.341491 21584 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0818 15:07:03.341496 21584 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0818 15:07:03.341502 21584 net.cpp:226] L3_b2_relu needs backward computation.\nI0818 15:07:03.341507 21584 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0818 15:07:03.341513 21584 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0818 15:07:03.341518 21584 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0818 15:07:03.341524 21584 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0818 15:07:03.341529 21584 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0818 15:07:03.341534 21584 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0818 15:07:03.341539 21584 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0818 15:07:03.341544 21584 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0818 15:07:03.341550 21584 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0818 15:07:03.341555 21584 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0818 15:07:03.341562 21584 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0818 15:07:03.341567 21584 net.cpp:226] L3_b1_relu needs backward computation.\nI0818 15:07:03.341578 21584 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0818 15:07:03.341584 21584 net.cpp:226] L3_b1_pool needs backward computation.\nI0818 15:07:03.341589 21584 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0818 15:07:03.341595 21584 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0818 15:07:03.341600 21584 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0818 15:07:03.341609 21584 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0818 15:07:03.341614 21584 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0818 15:07:03.341619 21584 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0818 15:07:03.341625 21584 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0818 15:07:03.341630 21584 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0818 15:07:03.341636 21584 net.cpp:226] L2_b9_relu needs backward computation.\nI0818 15:07:03.341641 21584 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0818 15:07:03.341647 21584 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0818 15:07:03.341662 21584 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0818 15:07:03.341668 21584 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0818 15:07:03.341675 21584 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0818 15:07:03.341680 21584 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0818 15:07:03.341684 21584 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0818 15:07:03.341691 21584 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0818 15:07:03.341696 21584 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0818 15:07:03.341702 21584 net.cpp:226] L2_b8_relu needs backward computation.\nI0818 15:07:03.341707 21584 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0818 15:07:03.341713 21584 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0818 15:07:03.341718 21584 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0818 15:07:03.341724 21584 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0818 15:07:03.341729 21584 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0818 15:07:03.341734 21584 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0818 15:07:03.341739 21584 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0818 15:07:03.341744 21584 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0818 15:07:03.341750 21584 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0818 15:07:03.341755 21584 net.cpp:226] L2_b7_relu needs backward computation.\nI0818 15:07:03.341761 21584 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0818 15:07:03.341766 21584 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0818 15:07:03.341771 21584 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0818 15:07:03.341778 21584 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0818 15:07:03.341783 21584 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0818 15:07:03.341787 21584 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0818 15:07:03.341792 21584 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0818 15:07:03.341799 21584 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0818 15:07:03.341804 21584 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0818 15:07:03.341809 21584 net.cpp:226] L2_b6_relu needs backward computation.\nI0818 15:07:03.341814 21584 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0818 15:07:03.341820 21584 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0818 15:07:03.341825 21584 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0818 15:07:03.341831 21584 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0818 15:07:03.341836 21584 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0818 15:07:03.341841 21584 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0818 15:07:03.341856 21584 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0818 15:07:03.341862 21584 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0818 15:07:03.341868 21584 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0818 15:07:03.341874 21584 net.cpp:226] L2_b5_relu needs backward computation.\nI0818 15:07:03.341879 21584 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0818 15:07:03.341886 21584 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0818 15:07:03.341891 21584 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0818 15:07:03.341897 21584 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0818 15:07:03.341902 21584 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0818 15:07:03.341907 21584 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0818 15:07:03.341912 21584 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0818 15:07:03.341917 21584 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0818 15:07:03.341923 21584 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0818 15:07:03.341929 21584 net.cpp:226] L2_b4_relu needs backward computation.\nI0818 15:07:03.341934 21584 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0818 15:07:03.341940 21584 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0818 15:07:03.341945 21584 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0818 15:07:03.341951 21584 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0818 15:07:03.341958 21584 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0818 15:07:03.341962 21584 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0818 15:07:03.341967 21584 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0818 15:07:03.341974 21584 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0818 15:07:03.341979 21584 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0818 15:07:03.341984 21584 net.cpp:226] L2_b3_relu needs backward computation.\nI0818 15:07:03.341990 21584 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0818 15:07:03.341996 21584 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0818 15:07:03.342002 21584 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0818 15:07:03.342011 21584 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0818 15:07:03.342017 21584 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0818 15:07:03.342022 21584 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0818 15:07:03.342028 21584 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0818 15:07:03.342033 21584 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0818 15:07:03.342039 21584 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0818 15:07:03.342046 21584 net.cpp:226] L2_b2_relu needs backward computation.\nI0818 15:07:03.342051 21584 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0818 15:07:03.342057 21584 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0818 15:07:03.342062 21584 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0818 15:07:03.342068 21584 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0818 15:07:03.342073 21584 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0818 15:07:03.342079 21584 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0818 15:07:03.342084 21584 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0818 15:07:03.342089 21584 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0818 15:07:03.342095 21584 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0818 15:07:03.342100 21584 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0818 15:07:03.342108 21584 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0818 15:07:03.342113 21584 net.cpp:226] L2_b1_relu needs backward computation.\nI0818 15:07:03.342118 21584 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0818 15:07:03.342129 21584 net.cpp:226] L2_b1_pool needs backward computation.\nI0818 15:07:03.342135 21584 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0818 15:07:03.342140 21584 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0818 15:07:03.342146 21584 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0818 15:07:03.342152 21584 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0818 15:07:03.342157 21584 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0818 15:07:03.342164 21584 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0818 15:07:03.342169 21584 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0818 15:07:03.342175 21584 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0818 15:07:03.342180 21584 net.cpp:226] L1_b9_relu needs backward computation.\nI0818 15:07:03.342185 21584 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0818 15:07:03.342191 21584 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0818 15:07:03.342196 21584 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0818 15:07:03.342202 21584 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0818 15:07:03.342208 21584 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0818 15:07:03.342213 21584 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0818 15:07:03.342218 21584 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0818 15:07:03.342223 21584 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0818 15:07:03.342229 21584 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0818 15:07:03.342234 21584 net.cpp:226] L1_b8_relu needs backward computation.\nI0818 15:07:03.342241 21584 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0818 15:07:03.342247 21584 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0818 15:07:03.342252 21584 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0818 15:07:03.342257 21584 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0818 15:07:03.342262 21584 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0818 15:07:03.342268 21584 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0818 15:07:03.342273 21584 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0818 15:07:03.342279 21584 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0818 15:07:03.342284 21584 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0818 15:07:03.342290 21584 net.cpp:226] L1_b7_relu needs backward computation.\nI0818 15:07:03.342296 21584 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0818 15:07:03.342303 21584 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0818 15:07:03.342308 21584 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0818 15:07:03.342314 21584 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0818 15:07:03.342319 21584 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0818 15:07:03.342324 21584 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0818 15:07:03.342330 21584 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0818 15:07:03.342335 21584 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0818 15:07:03.342341 21584 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0818 15:07:03.342347 21584 net.cpp:226] L1_b6_relu needs backward computation.\nI0818 15:07:03.342352 21584 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0818 15:07:03.342358 21584 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0818 15:07:03.342365 21584 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0818 15:07:03.342370 21584 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0818 15:07:03.342376 21584 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0818 15:07:03.342381 21584 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0818 15:07:03.342386 21584 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0818 15:07:03.342397 21584 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0818 15:07:03.342403 21584 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0818 15:07:03.342409 21584 net.cpp:226] L1_b5_relu needs backward computation.\nI0818 15:07:03.342416 21584 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0818 15:07:03.342422 21584 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0818 15:07:03.342427 21584 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0818 15:07:03.342432 21584 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0818 15:07:03.342437 21584 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0818 15:07:03.342443 21584 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0818 15:07:03.342448 21584 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0818 15:07:03.342454 21584 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0818 15:07:03.342459 21584 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0818 15:07:03.342465 21584 net.cpp:226] L1_b4_relu needs backward computation.\nI0818 15:07:03.342471 21584 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0818 15:07:03.342478 21584 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0818 15:07:03.342483 21584 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0818 15:07:03.342489 21584 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0818 15:07:03.342494 21584 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0818 15:07:03.342500 21584 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0818 15:07:03.342505 21584 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0818 15:07:03.342511 21584 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0818 15:07:03.342517 21584 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0818 15:07:03.342522 21584 net.cpp:226] L1_b3_relu needs backward computation.\nI0818 15:07:03.342528 21584 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0818 15:07:03.342535 21584 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0818 15:07:03.342540 21584 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0818 15:07:03.342545 21584 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0818 15:07:03.342551 21584 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0818 15:07:03.342556 21584 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0818 15:07:03.342562 21584 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0818 15:07:03.342567 21584 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0818 15:07:03.342573 21584 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0818 15:07:03.342579 21584 net.cpp:226] L1_b2_relu needs backward computation.\nI0818 15:07:03.342584 21584 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0818 15:07:03.342591 21584 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0818 15:07:03.342597 21584 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0818 15:07:03.342602 21584 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0818 15:07:03.342607 21584 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0818 15:07:03.342612 21584 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0818 15:07:03.342618 21584 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0818 15:07:03.342624 21584 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0818 15:07:03.342630 21584 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0818 15:07:03.342635 21584 net.cpp:226] L1_b1_relu needs backward computation.\nI0818 15:07:03.342641 21584 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0818 15:07:03.342648 21584 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0818 15:07:03.342661 21584 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0818 15:07:03.342667 21584 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0818 15:07:03.342679 21584 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0818 15:07:03.342684 21584 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0818 15:07:03.342690 21584 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0818 15:07:03.342695 21584 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0818 15:07:03.342701 21584 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0818 15:07:03.342707 21584 net.cpp:226] pre_relu needs backward computation.\nI0818 15:07:03.342712 21584 net.cpp:226] pre_scale needs backward computation.\nI0818 15:07:03.342717 21584 net.cpp:226] pre_bn needs backward computation.\nI0818 15:07:03.342726 21584 net.cpp:226] pre_conv needs backward computation.\nI0818 15:07:03.342733 21584 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 15:07:03.342741 21584 net.cpp:228] dataLayer does not need backward computation.\nI0818 15:07:03.342744 21584 net.cpp:270] This network produces output accuracy\nI0818 15:07:03.342751 21584 net.cpp:270] This network produces output loss\nI0818 15:07:03.343127 21584 net.cpp:283] Network initialization done.\nI0818 15:07:03.352582 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:03.352624 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:03.352700 21584 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0818 15:07:03.353082 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0818 15:07:03.353102 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0818 15:07:03.353113 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0818 15:07:03.353123 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0818 15:07:03.353133 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0818 15:07:03.353142 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0818 15:07:03.353150 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0818 15:07:03.353159 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0818 15:07:03.353168 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0818 15:07:03.353178 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0818 15:07:03.353186 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0818 15:07:03.353194 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0818 15:07:03.353204 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0818 15:07:03.353211 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0818 15:07:03.353220 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0818 15:07:03.353229 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0818 15:07:03.353238 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0818 15:07:03.353246 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0818 15:07:03.353255 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0818 15:07:03.353274 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0818 15:07:03.353284 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0818 15:07:03.353293 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0818 15:07:03.353305 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0818 15:07:03.353314 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0818 15:07:03.353323 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0818 15:07:03.353332 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0818 15:07:03.353340 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0818 15:07:03.353348 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0818 15:07:03.353358 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0818 15:07:03.353365 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0818 15:07:03.353374 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0818 15:07:03.353384 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0818 15:07:03.353392 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0818 15:07:03.353400 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0818 15:07:03.353410 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0818 15:07:03.353417 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0818 15:07:03.353426 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0818 15:07:03.353435 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0818 15:07:03.353444 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0818 15:07:03.353452 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0818 15:07:03.353463 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0818 15:07:03.353472 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0818 15:07:03.353480 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0818 15:07:03.353489 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0818 15:07:03.353498 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0818 15:07:03.353507 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0818 15:07:03.353515 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0818 15:07:03.353523 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0818 15:07:03.353533 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0818 15:07:03.353540 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0818 15:07:03.353557 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0818 15:07:03.353566 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0818 15:07:03.353575 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0818 15:07:03.353585 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0818 15:07:03.353593 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0818 15:07:03.353601 21584 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0818 15:07:03.355276 21584 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: t\nI0818 15:07:03.356884 21584 layer_factory.hpp:77] Creating layer dataLayer\nI0818 15:07:03.357125 21584 net.cpp:100] Creating Layer dataLayer\nI0818 15:07:03.357142 21584 net.cpp:408] dataLayer -> data_top\nI0818 15:07:03.357161 21584 net.cpp:408] dataLayer -> label\nI0818 15:07:03.357180 21584 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI0818 15:07:03.414206 21591 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI0818 15:07:03.414502 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:03.421893 21584 net.cpp:150] Setting up dataLayer\nI0818 15:07:03.421916 21584 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0818 15:07:03.421926 21584 net.cpp:157] Top shape: 125 (125)\nI0818 15:07:03.421931 21584 net.cpp:165] Memory required for data: 1536500\nI0818 15:07:03.421937 21584 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0818 15:07:03.421947 21584 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0818 15:07:03.421953 21584 net.cpp:434] label_dataLayer_1_split <- label\nI0818 15:07:03.422013 21584 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0818 15:07:03.422030 21584 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0818 15:07:03.422155 21584 net.cpp:150] Setting up label_dataLayer_1_split\nI0818 15:07:03.422171 21584 net.cpp:157] Top shape: 125 (125)\nI0818 15:07:03.422179 21584 net.cpp:157] Top shape: 125 (125)\nI0818 15:07:03.422184 21584 net.cpp:165] Memory required for data: 1537500\nI0818 15:07:03.422189 21584 layer_factory.hpp:77] Creating layer pre_conv\nI0818 15:07:03.422206 21584 net.cpp:100] Creating Layer pre_conv\nI0818 15:07:03.422214 21584 net.cpp:434] pre_conv <- data_top\nI0818 15:07:03.422225 21584 net.cpp:408] pre_conv -> pre_conv_top\nI0818 15:07:03.422663 21584 net.cpp:150] Setting up pre_conv\nI0818 15:07:03.422690 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.422698 21584 net.cpp:165] Memory required for data: 9729500\nI0818 15:07:03.422713 21584 layer_factory.hpp:77] Creating layer pre_bn\nI0818 15:07:03.422722 21584 net.cpp:100] Creating Layer pre_bn\nI0818 15:07:03.422729 21584 net.cpp:434] pre_bn <- pre_conv_top\nI0818 15:07:03.422768 21584 net.cpp:408] pre_bn -> pre_bn_top\nI0818 15:07:03.423117 21584 net.cpp:150] Setting up pre_bn\nI0818 15:07:03.423132 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.423138 21584 net.cpp:165] Memory required for data: 17921500\nI0818 15:07:03.423157 21584 layer_factory.hpp:77] Creating layer pre_scale\nI0818 15:07:03.423172 21584 net.cpp:100] Creating Layer pre_scale\nI0818 15:07:03.423179 21584 net.cpp:434] pre_scale <- pre_bn_top\nI0818 15:07:03.423187 21584 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0818 15:07:03.423254 21584 layer_factory.hpp:77] Creating layer pre_scale\nI0818 15:07:03.423427 21584 net.cpp:150] Setting up pre_scale\nI0818 15:07:03.423444 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.423449 21584 net.cpp:165] Memory required for data: 26113500\nI0818 15:07:03.423460 21584 layer_factory.hpp:77] Creating layer pre_relu\nI0818 15:07:03.423471 21584 net.cpp:100] Creating Layer pre_relu\nI0818 15:07:03.423477 21584 net.cpp:434] pre_relu <- pre_bn_top\nI0818 15:07:03.423485 21584 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0818 15:07:03.423497 21584 net.cpp:150] Setting up pre_relu\nI0818 15:07:03.423506 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.423509 21584 net.cpp:165] Memory required for data: 34305500\nI0818 15:07:03.423514 21584 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0818 15:07:03.423537 21584 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0818 15:07:03.423544 21584 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0818 15:07:03.423552 21584 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0818 15:07:03.423563 21584 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0818 15:07:03.423619 21584 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0818 15:07:03.423630 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.423636 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.423641 21584 net.cpp:165] Memory required for data: 50689500\nI0818 15:07:03.423647 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0818 15:07:03.423671 21584 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0818 15:07:03.423677 21584 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0818 15:07:03.423687 21584 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0818 15:07:03.424093 21584 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0818 15:07:03.424113 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.424118 21584 net.cpp:165] Memory required for data: 58881500\nI0818 15:07:03.424129 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0818 15:07:03.424144 21584 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0818 15:07:03.424151 21584 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0818 15:07:03.424162 21584 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0818 15:07:03.424479 21584 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0818 15:07:03.424505 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.424515 21584 net.cpp:165] Memory required for data: 67073500\nI0818 15:07:03.424536 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 15:07:03.424551 21584 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0818 15:07:03.424561 21584 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0818 15:07:03.424577 21584 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.424747 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0818 15:07:03.425092 21584 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0818 15:07:03.425110 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.425117 21584 net.cpp:165] Memory required for data: 75265500\nI0818 15:07:03.425134 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0818 15:07:03.425148 21584 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0818 15:07:03.425153 21584 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0818 15:07:03.425161 21584 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.425171 21584 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0818 15:07:03.425181 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.425186 21584 net.cpp:165] Memory required for data: 83457500\nI0818 15:07:03.425191 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0818 15:07:03.425207 21584 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0818 15:07:03.425215 21584 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0818 15:07:03.425228 21584 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0818 15:07:03.425958 21584 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0818 15:07:03.425974 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.425981 21584 net.cpp:165] Memory required for data: 91649500\nI0818 15:07:03.425989 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0818 15:07:03.426002 21584 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0818 15:07:03.426008 21584 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0818 15:07:03.426017 21584 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0818 15:07:03.426292 21584 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0818 15:07:03.426309 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.426316 21584 net.cpp:165] Memory required for data: 99841500\nI0818 15:07:03.426331 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 15:07:03.426340 21584 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0818 15:07:03.426347 21584 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0818 15:07:03.426354 21584 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0818 15:07:03.426415 21584 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0818 15:07:03.426573 21584 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0818 15:07:03.426586 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.426591 21584 net.cpp:165] Memory required for data: 108033500\nI0818 15:07:03.426600 21584 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0818 15:07:03.426612 21584 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0818 15:07:03.426620 21584 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0818 15:07:03.426626 21584 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0818 15:07:03.426635 21584 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0818 15:07:03.426682 21584 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0818 15:07:03.426694 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.426699 21584 net.cpp:165] Memory required for data: 116225500\nI0818 15:07:03.426704 21584 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0818 15:07:03.426712 21584 net.cpp:100] Creating Layer L1_b1_relu\nI0818 15:07:03.426717 21584 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0818 15:07:03.426724 21584 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0818 15:07:03.426734 21584 net.cpp:150] Setting up L1_b1_relu\nI0818 15:07:03.426741 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.426746 21584 net.cpp:165] Memory required for data: 124417500\nI0818 15:07:03.426750 21584 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:03.426760 21584 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:03.426765 21584 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0818 15:07:03.426775 21584 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 15:07:03.426786 21584 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 15:07:03.426832 21584 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0818 15:07:03.426841 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.426857 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.426862 21584 net.cpp:165] Memory required for data: 140801500\nI0818 15:07:03.426868 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0818 15:07:03.426882 21584 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0818 15:07:03.426889 21584 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0818 15:07:03.426898 21584 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0818 15:07:03.427250 21584 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0818 15:07:03.427265 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.427270 21584 net.cpp:165] Memory required for data: 148993500\nI0818 15:07:03.427279 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0818 15:07:03.427289 21584 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0818 15:07:03.427299 21584 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0818 15:07:03.427307 21584 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0818 15:07:03.427623 21584 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0818 15:07:03.427637 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.427644 21584 net.cpp:165] Memory required for data: 157185500\nI0818 15:07:03.427659 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 15:07:03.427669 21584 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0818 15:07:03.427675 21584 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0818 15:07:03.427686 21584 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.427758 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0818 15:07:03.428133 21584 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0818 15:07:03.428148 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.428153 21584 net.cpp:165] Memory required for data: 165377500\nI0818 15:07:03.428162 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0818 15:07:03.428174 21584 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0818 15:07:03.428182 21584 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0818 15:07:03.428190 21584 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.428200 21584 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0818 15:07:03.428211 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.428216 21584 net.cpp:165] Memory required for data: 173569500\nI0818 15:07:03.428221 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0818 15:07:03.428236 21584 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0818 15:07:03.428243 21584 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0818 15:07:03.428259 21584 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0818 15:07:03.428658 21584 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0818 15:07:03.428674 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.428683 21584 net.cpp:165] Memory required for data: 181761500\nI0818 15:07:03.428691 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0818 15:07:03.428704 21584 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0818 15:07:03.428712 21584 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0818 15:07:03.428725 21584 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0818 15:07:03.429036 21584 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0818 15:07:03.429049 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.429054 21584 net.cpp:165] Memory required for data: 189953500\nI0818 15:07:03.429071 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 15:07:03.429083 21584 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0818 15:07:03.429090 21584 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0818 15:07:03.429101 21584 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0818 15:07:03.429173 21584 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0818 15:07:03.429352 21584 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0818 15:07:03.429366 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.429371 21584 net.cpp:165] Memory required for data: 198145500\nI0818 15:07:03.429392 21584 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0818 15:07:03.429402 21584 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0818 15:07:03.429409 21584 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0818 15:07:03.429419 21584 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0818 15:07:03.429430 21584 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0818 15:07:03.429474 21584 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0818 15:07:03.429484 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.429489 21584 net.cpp:165] Memory required for data: 206337500\nI0818 15:07:03.429496 21584 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0818 15:07:03.429502 21584 net.cpp:100] Creating Layer L1_b2_relu\nI0818 15:07:03.429508 21584 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0818 15:07:03.429519 21584 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0818 15:07:03.429533 21584 net.cpp:150] Setting up L1_b2_relu\nI0818 15:07:03.429539 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.429544 21584 net.cpp:165] Memory required for data: 214529500\nI0818 15:07:03.429549 21584 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:03.429556 21584 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:03.429563 21584 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0818 15:07:03.429572 21584 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 15:07:03.429582 21584 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 15:07:03.429637 21584 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0818 15:07:03.429648 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.429663 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.429668 21584 net.cpp:165] Memory required for data: 230913500\nI0818 15:07:03.429677 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0818 15:07:03.429688 21584 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0818 15:07:03.429695 21584 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0818 15:07:03.429710 21584 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0818 15:07:03.430128 21584 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0818 15:07:03.430146 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.430151 21584 net.cpp:165] Memory required for data: 239105500\nI0818 15:07:03.430160 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0818 15:07:03.430172 21584 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0818 15:07:03.430179 21584 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0818 15:07:03.430232 21584 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0818 15:07:03.430552 21584 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0818 15:07:03.430567 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.430572 21584 net.cpp:165] Memory required for data: 247297500\nI0818 15:07:03.430583 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 15:07:03.430598 21584 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0818 15:07:03.430606 21584 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0818 15:07:03.430614 21584 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.430711 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0818 15:07:03.430913 21584 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0818 15:07:03.430927 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.430932 21584 net.cpp:165] Memory required for data: 255489500\nI0818 15:07:03.430945 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0818 15:07:03.430954 21584 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0818 15:07:03.430960 21584 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0818 15:07:03.430971 21584 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.430994 21584 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0818 15:07:03.431001 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.431006 21584 net.cpp:165] Memory required for data: 263681500\nI0818 15:07:03.431011 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0818 15:07:03.431026 21584 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0818 15:07:03.431032 21584 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0818 15:07:03.431041 21584 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0818 15:07:03.431473 21584 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0818 15:07:03.431488 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.431493 21584 net.cpp:165] Memory required for data: 271873500\nI0818 15:07:03.431501 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0818 15:07:03.431524 21584 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0818 15:07:03.431531 21584 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0818 15:07:03.431540 21584 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0818 15:07:03.431850 21584 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0818 15:07:03.431867 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.431872 21584 net.cpp:165] Memory required for data: 280065500\nI0818 15:07:03.431884 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 15:07:03.431897 21584 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0818 15:07:03.431905 21584 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0818 15:07:03.431913 21584 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0818 15:07:03.431983 21584 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0818 15:07:03.432176 21584 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0818 15:07:03.432190 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.432198 21584 net.cpp:165] Memory required for data: 288257500\nI0818 15:07:03.432207 21584 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0818 15:07:03.432217 21584 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0818 15:07:03.432224 21584 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0818 15:07:03.432230 21584 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0818 15:07:03.432286 21584 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0818 15:07:03.432337 21584 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0818 15:07:03.432348 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.432353 21584 net.cpp:165] Memory required for data: 296449500\nI0818 15:07:03.432359 21584 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0818 15:07:03.432369 21584 net.cpp:100] Creating Layer L1_b3_relu\nI0818 15:07:03.432375 21584 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0818 15:07:03.432389 21584 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0818 15:07:03.432399 21584 net.cpp:150] Setting up L1_b3_relu\nI0818 15:07:03.432406 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.432411 21584 net.cpp:165] Memory required for data: 304641500\nI0818 15:07:03.432416 21584 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:03.432423 21584 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:03.432428 21584 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0818 15:07:03.432440 21584 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 15:07:03.432449 21584 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 15:07:03.432505 21584 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0818 15:07:03.432520 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.432528 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.432531 21584 net.cpp:165] Memory required for data: 321025500\nI0818 15:07:03.432538 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0818 15:07:03.432551 21584 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0818 15:07:03.432565 21584 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0818 15:07:03.432579 21584 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0818 15:07:03.432984 21584 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0818 15:07:03.432999 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.433004 21584 net.cpp:165] Memory required for data: 329217500\nI0818 15:07:03.433017 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0818 15:07:03.433027 21584 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0818 15:07:03.433033 21584 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0818 15:07:03.433042 21584 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0818 15:07:03.433354 21584 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0818 15:07:03.433370 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.433377 21584 net.cpp:165] Memory required for data: 337409500\nI0818 15:07:03.433387 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 15:07:03.433400 21584 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0818 15:07:03.433406 21584 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0818 15:07:03.433418 21584 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.433487 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0818 15:07:03.433673 21584 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0818 15:07:03.433691 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.433696 21584 net.cpp:165] Memory required for data: 345601500\nI0818 15:07:03.433706 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0818 15:07:03.433714 21584 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0818 15:07:03.433720 21584 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0818 15:07:03.433734 21584 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.433745 21584 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0818 15:07:03.433753 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.433758 21584 net.cpp:165] Memory required for data: 353793500\nI0818 15:07:03.433764 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0818 15:07:03.433780 21584 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0818 15:07:03.433786 21584 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0818 15:07:03.433795 21584 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0818 15:07:03.434212 21584 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0818 15:07:03.434227 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.434233 21584 net.cpp:165] Memory required for data: 361985500\nI0818 15:07:03.434242 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0818 15:07:03.434255 21584 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0818 15:07:03.434264 21584 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0818 15:07:03.434273 21584 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0818 15:07:03.434592 21584 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0818 15:07:03.434607 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.434613 21584 net.cpp:165] Memory required for data: 370177500\nI0818 15:07:03.434624 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 15:07:03.434639 21584 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0818 15:07:03.434646 21584 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0818 15:07:03.434662 21584 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0818 15:07:03.434734 21584 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0818 15:07:03.434921 21584 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0818 15:07:03.434936 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.434940 21584 net.cpp:165] Memory required for data: 378369500\nI0818 15:07:03.434949 21584 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0818 15:07:03.434962 21584 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0818 15:07:03.434968 21584 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0818 15:07:03.434975 21584 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0818 15:07:03.435004 21584 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0818 15:07:03.435047 21584 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0818 15:07:03.435062 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.435067 21584 net.cpp:165] Memory required for data: 386561500\nI0818 15:07:03.435072 21584 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0818 15:07:03.435081 21584 net.cpp:100] Creating Layer L1_b4_relu\nI0818 15:07:03.435086 21584 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0818 15:07:03.435093 21584 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0818 15:07:03.435103 21584 net.cpp:150] Setting up L1_b4_relu\nI0818 15:07:03.435111 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.435118 21584 net.cpp:165] Memory required for data: 394753500\nI0818 15:07:03.435124 21584 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:03.435134 21584 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:03.435140 21584 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0818 15:07:03.435148 21584 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 15:07:03.435161 21584 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 15:07:03.435219 21584 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0818 15:07:03.435230 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.435237 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.435241 21584 net.cpp:165] Memory required for data: 411137500\nI0818 15:07:03.435247 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0818 15:07:03.435261 21584 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0818 15:07:03.435268 21584 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0818 15:07:03.435283 21584 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0818 15:07:03.435696 21584 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0818 15:07:03.435712 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.435719 21584 net.cpp:165] Memory required for data: 419329500\nI0818 15:07:03.435741 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0818 15:07:03.435757 21584 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0818 15:07:03.435765 21584 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0818 15:07:03.435776 21584 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0818 15:07:03.436115 21584 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0818 15:07:03.436131 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.436136 21584 net.cpp:165] Memory required for data: 427521500\nI0818 15:07:03.436149 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 15:07:03.436158 21584 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0818 15:07:03.436166 21584 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0818 15:07:03.436180 21584 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.436245 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0818 15:07:03.436434 21584 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0818 15:07:03.436447 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.436452 21584 net.cpp:165] Memory required for data: 435713500\nI0818 15:07:03.436465 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0818 15:07:03.436475 21584 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0818 15:07:03.436480 21584 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0818 15:07:03.436491 21584 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.436506 21584 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0818 15:07:03.436512 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.436517 21584 net.cpp:165] Memory required for data: 443905500\nI0818 15:07:03.436522 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0818 15:07:03.436543 21584 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0818 15:07:03.436550 21584 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0818 15:07:03.436558 21584 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0818 15:07:03.437623 21584 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0818 15:07:03.437638 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.437644 21584 net.cpp:165] Memory required for data: 452097500\nI0818 15:07:03.437659 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0818 15:07:03.437670 21584 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0818 15:07:03.437680 21584 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0818 15:07:03.437690 21584 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0818 15:07:03.437963 21584 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0818 15:07:03.437975 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.437981 21584 net.cpp:165] Memory required for data: 460289500\nI0818 15:07:03.437993 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 15:07:03.438000 21584 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0818 15:07:03.438007 21584 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0818 15:07:03.438017 21584 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0818 15:07:03.438076 21584 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0818 15:07:03.438233 21584 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0818 15:07:03.438249 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.438254 21584 net.cpp:165] Memory required for data: 468481500\nI0818 15:07:03.438263 21584 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0818 15:07:03.438273 21584 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0818 15:07:03.438279 21584 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0818 15:07:03.438287 21584 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0818 15:07:03.438294 21584 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0818 15:07:03.438331 21584 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0818 15:07:03.438344 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.438349 21584 net.cpp:165] Memory required for data: 476673500\nI0818 15:07:03.438354 21584 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0818 15:07:03.438364 21584 net.cpp:100] Creating Layer L1_b5_relu\nI0818 15:07:03.438370 21584 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0818 15:07:03.438377 21584 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0818 15:07:03.438387 21584 net.cpp:150] Setting up L1_b5_relu\nI0818 15:07:03.438395 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.438400 21584 net.cpp:165] Memory required for data: 484865500\nI0818 15:07:03.438403 21584 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:03.438416 21584 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:03.438421 21584 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0818 15:07:03.438428 21584 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 15:07:03.438438 21584 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 15:07:03.438485 21584 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0818 15:07:03.438500 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.438508 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.438513 21584 net.cpp:165] Memory required for data: 501249500\nI0818 15:07:03.438518 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0818 15:07:03.438529 21584 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0818 15:07:03.438535 21584 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0818 15:07:03.438544 21584 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0818 15:07:03.438911 21584 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0818 15:07:03.438926 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.438938 21584 net.cpp:165] Memory required for data: 509441500\nI0818 15:07:03.438948 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0818 15:07:03.438961 21584 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0818 15:07:03.438966 21584 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0818 15:07:03.438976 21584 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0818 15:07:03.439251 21584 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0818 15:07:03.439268 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.439273 21584 net.cpp:165] Memory required for data: 517633500\nI0818 15:07:03.439283 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 15:07:03.439292 21584 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0818 15:07:03.439298 21584 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0818 15:07:03.439311 21584 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.439369 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0818 15:07:03.439532 21584 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0818 15:07:03.439544 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.439549 21584 net.cpp:165] Memory required for data: 525825500\nI0818 15:07:03.439558 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0818 15:07:03.439566 21584 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0818 15:07:03.439574 21584 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0818 15:07:03.439580 21584 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.439589 21584 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0818 15:07:03.439597 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.439601 21584 net.cpp:165] Memory required for data: 534017500\nI0818 15:07:03.439606 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0818 15:07:03.439627 21584 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0818 15:07:03.439633 21584 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0818 15:07:03.439644 21584 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0818 15:07:03.440008 21584 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0818 15:07:03.440023 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.440028 21584 net.cpp:165] Memory required for data: 542209500\nI0818 15:07:03.440037 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0818 15:07:03.440050 21584 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0818 15:07:03.440057 21584 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0818 15:07:03.440069 21584 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0818 15:07:03.440343 21584 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0818 15:07:03.440357 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.440362 21584 net.cpp:165] Memory required for data: 550401500\nI0818 15:07:03.440372 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 15:07:03.440382 21584 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0818 15:07:03.440387 21584 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0818 15:07:03.440395 21584 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0818 15:07:03.440456 21584 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0818 15:07:03.440615 21584 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0818 15:07:03.440629 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.440634 21584 net.cpp:165] Memory required for data: 558593500\nI0818 15:07:03.440642 21584 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0818 15:07:03.440666 21584 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0818 15:07:03.440675 21584 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0818 15:07:03.440682 21584 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0818 15:07:03.440695 21584 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0818 15:07:03.440732 21584 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0818 15:07:03.440744 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.440750 21584 net.cpp:165] Memory required for data: 566785500\nI0818 15:07:03.440762 21584 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0818 15:07:03.440770 21584 net.cpp:100] Creating Layer L1_b6_relu\nI0818 15:07:03.440776 21584 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0818 15:07:03.440783 21584 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0818 15:07:03.440793 21584 net.cpp:150] Setting up L1_b6_relu\nI0818 15:07:03.440800 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.440805 21584 net.cpp:165] Memory required for data: 574977500\nI0818 15:07:03.440809 21584 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:03.440819 21584 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:03.440825 21584 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0818 15:07:03.440834 21584 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 15:07:03.440843 21584 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 15:07:03.440894 21584 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0818 15:07:03.440907 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.440913 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.440918 21584 net.cpp:165] Memory required for data: 591361500\nI0818 15:07:03.440923 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0818 15:07:03.440934 21584 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0818 15:07:03.440942 21584 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0818 15:07:03.440953 21584 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0818 15:07:03.441310 21584 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0818 15:07:03.441324 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.441330 21584 net.cpp:165] Memory required for data: 599553500\nI0818 15:07:03.441339 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0818 15:07:03.441349 21584 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0818 15:07:03.441354 21584 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0818 15:07:03.441362 21584 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0818 15:07:03.441666 21584 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0818 15:07:03.441681 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.441686 21584 net.cpp:165] Memory required for data: 607745500\nI0818 15:07:03.441699 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 15:07:03.441710 21584 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0818 15:07:03.441717 21584 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0818 15:07:03.441725 21584 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.441788 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0818 15:07:03.441953 21584 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0818 15:07:03.441967 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.441972 21584 net.cpp:165] Memory required for data: 615937500\nI0818 15:07:03.441982 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0818 15:07:03.441989 21584 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0818 15:07:03.441995 21584 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0818 15:07:03.442006 21584 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.442016 21584 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0818 15:07:03.442023 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.442028 21584 net.cpp:165] Memory required for data: 624129500\nI0818 15:07:03.442034 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0818 15:07:03.442047 21584 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0818 15:07:03.442054 21584 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0818 15:07:03.442062 21584 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0818 15:07:03.442422 21584 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0818 15:07:03.442436 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.442448 21584 net.cpp:165] Memory required for data: 632321500\nI0818 15:07:03.442457 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0818 15:07:03.442471 21584 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0818 15:07:03.442476 21584 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0818 15:07:03.442486 21584 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0818 15:07:03.442769 21584 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0818 15:07:03.442782 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.442787 21584 net.cpp:165] Memory required for data: 640513500\nI0818 15:07:03.442798 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 15:07:03.442807 21584 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0818 15:07:03.442813 21584 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0818 15:07:03.442824 21584 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0818 15:07:03.442884 21584 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0818 15:07:03.443048 21584 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0818 15:07:03.443063 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.443068 21584 net.cpp:165] Memory required for data: 648705500\nI0818 15:07:03.443076 21584 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0818 15:07:03.443085 21584 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0818 15:07:03.443092 21584 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0818 15:07:03.443099 21584 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0818 15:07:03.443110 21584 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0818 15:07:03.443145 21584 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0818 15:07:03.443159 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.443166 21584 net.cpp:165] Memory required for data: 656897500\nI0818 15:07:03.443171 21584 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0818 15:07:03.443178 21584 net.cpp:100] Creating Layer L1_b7_relu\nI0818 15:07:03.443184 21584 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0818 15:07:03.443192 21584 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0818 15:07:03.443200 21584 net.cpp:150] Setting up L1_b7_relu\nI0818 15:07:03.443207 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.443212 21584 net.cpp:165] Memory required for data: 665089500\nI0818 15:07:03.443217 21584 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:03.443226 21584 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:03.443233 21584 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0818 15:07:03.443240 21584 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 15:07:03.443250 21584 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 15:07:03.443297 21584 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0818 15:07:03.443312 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.443320 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.443325 21584 net.cpp:165] Memory required for data: 681473500\nI0818 15:07:03.443330 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0818 15:07:03.443341 21584 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0818 15:07:03.443346 21584 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0818 15:07:03.443356 21584 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0818 15:07:03.443733 21584 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0818 15:07:03.443747 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.443753 21584 net.cpp:165] Memory required for data: 689665500\nI0818 15:07:03.443761 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0818 15:07:03.443774 21584 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0818 15:07:03.443780 21584 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0818 15:07:03.443796 21584 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0818 15:07:03.444079 21584 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0818 15:07:03.444093 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.444098 21584 net.cpp:165] Memory required for data: 697857500\nI0818 15:07:03.444108 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 15:07:03.444120 21584 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0818 15:07:03.444128 21584 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0818 15:07:03.444135 21584 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.444193 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0818 15:07:03.444356 21584 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0818 15:07:03.444370 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.444375 21584 net.cpp:165] Memory required for data: 706049500\nI0818 15:07:03.444383 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0818 15:07:03.444391 21584 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0818 15:07:03.444397 21584 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0818 15:07:03.444408 21584 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.444418 21584 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0818 15:07:03.444425 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.444430 21584 net.cpp:165] Memory required for data: 714241500\nI0818 15:07:03.444435 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0818 15:07:03.444448 21584 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0818 15:07:03.444454 21584 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0818 15:07:03.444463 21584 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0818 15:07:03.444833 21584 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0818 15:07:03.444846 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.444852 21584 net.cpp:165] Memory required for data: 722433500\nI0818 15:07:03.444861 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0818 15:07:03.444874 21584 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0818 15:07:03.444880 21584 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0818 15:07:03.444888 21584 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0818 15:07:03.445166 21584 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0818 15:07:03.445179 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.445185 21584 net.cpp:165] Memory required for data: 730625500\nI0818 15:07:03.445195 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 15:07:03.445204 21584 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0818 15:07:03.445210 21584 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0818 15:07:03.445221 21584 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0818 15:07:03.445281 21584 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0818 15:07:03.445456 21584 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0818 15:07:03.445473 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.445479 21584 net.cpp:165] Memory required for data: 738817500\nI0818 15:07:03.445488 21584 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0818 15:07:03.445497 21584 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0818 15:07:03.445504 21584 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0818 15:07:03.445511 21584 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0818 15:07:03.445519 21584 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0818 15:07:03.445559 21584 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0818 15:07:03.445569 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.445574 21584 net.cpp:165] Memory required for data: 747009500\nI0818 15:07:03.445578 21584 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0818 15:07:03.445588 21584 net.cpp:100] Creating Layer L1_b8_relu\nI0818 15:07:03.445595 21584 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0818 15:07:03.445601 21584 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0818 15:07:03.445618 21584 net.cpp:150] Setting up L1_b8_relu\nI0818 15:07:03.445626 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.445631 21584 net.cpp:165] Memory required for data: 755201500\nI0818 15:07:03.445636 21584 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:03.445646 21584 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:03.445658 21584 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0818 15:07:03.445667 21584 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 15:07:03.445677 21584 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 15:07:03.445726 21584 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0818 15:07:03.445740 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.445747 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.445752 21584 net.cpp:165] Memory required for data: 771585500\nI0818 15:07:03.445757 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0818 15:07:03.445768 21584 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0818 15:07:03.445775 21584 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0818 15:07:03.445783 21584 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0818 15:07:03.446153 21584 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0818 15:07:03.446168 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.446173 21584 net.cpp:165] Memory required for data: 779777500\nI0818 15:07:03.446182 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0818 15:07:03.446194 21584 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0818 15:07:03.446202 21584 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0818 15:07:03.446210 21584 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0818 15:07:03.446491 21584 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0818 15:07:03.446506 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.446512 21584 net.cpp:165] Memory required for data: 787969500\nI0818 15:07:03.446522 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 15:07:03.446532 21584 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0818 15:07:03.446538 21584 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0818 15:07:03.446545 21584 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.446604 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0818 15:07:03.446780 21584 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0818 15:07:03.446794 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.446799 21584 net.cpp:165] Memory required for data: 796161500\nI0818 15:07:03.446808 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0818 15:07:03.446821 21584 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0818 15:07:03.446827 21584 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0818 15:07:03.446835 21584 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.446846 21584 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0818 15:07:03.446856 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.446861 21584 net.cpp:165] Memory required for data: 804353500\nI0818 15:07:03.446866 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0818 15:07:03.446876 21584 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0818 15:07:03.446882 21584 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0818 15:07:03.446892 21584 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0818 15:07:03.447245 21584 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0818 15:07:03.447259 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.447264 21584 net.cpp:165] Memory required for data: 812545500\nI0818 15:07:03.447273 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0818 15:07:03.447283 21584 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0818 15:07:03.447289 21584 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0818 15:07:03.447321 21584 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0818 15:07:03.447600 21584 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0818 15:07:03.447614 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.447619 21584 net.cpp:165] Memory required for data: 820737500\nI0818 15:07:03.447662 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 15:07:03.447679 21584 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0818 15:07:03.447685 21584 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0818 15:07:03.447693 21584 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0818 15:07:03.447752 21584 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0818 15:07:03.447913 21584 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0818 15:07:03.447926 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.447932 21584 net.cpp:165] Memory required for data: 828929500\nI0818 15:07:03.447942 21584 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0818 15:07:03.447950 21584 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0818 15:07:03.447957 21584 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0818 15:07:03.447963 21584 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0818 15:07:03.447975 21584 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0818 15:07:03.448010 21584 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0818 15:07:03.448025 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.448030 21584 net.cpp:165] Memory required for data: 837121500\nI0818 15:07:03.448035 21584 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0818 15:07:03.448043 21584 net.cpp:100] Creating Layer L1_b9_relu\nI0818 15:07:03.448050 21584 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0818 15:07:03.448056 21584 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0818 15:07:03.448066 21584 net.cpp:150] Setting up L1_b9_relu\nI0818 15:07:03.448073 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.448077 21584 net.cpp:165] Memory required for data: 845313500\nI0818 15:07:03.448082 21584 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:03.448092 21584 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:03.448098 21584 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0818 15:07:03.448106 21584 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 15:07:03.448115 21584 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 15:07:03.448170 21584 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0818 15:07:03.448182 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.448189 21584 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0818 15:07:03.448194 21584 net.cpp:165] Memory required for data: 861697500\nI0818 15:07:03.448199 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0818 15:07:03.448210 21584 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0818 15:07:03.448216 21584 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0818 15:07:03.448228 21584 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0818 15:07:03.448598 21584 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0818 15:07:03.448612 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.448617 21584 net.cpp:165] Memory required for data: 863745500\nI0818 15:07:03.448626 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0818 15:07:03.448635 21584 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0818 15:07:03.448642 21584 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0818 15:07:03.448659 21584 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0818 15:07:03.448938 21584 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0818 15:07:03.448952 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.448957 21584 net.cpp:165] Memory required for data: 865793500\nI0818 15:07:03.448968 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 15:07:03.448987 21584 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0818 15:07:03.448993 21584 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0818 15:07:03.449002 21584 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.449064 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0818 15:07:03.449224 21584 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0818 15:07:03.449237 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.449244 21584 net.cpp:165] Memory required for data: 867841500\nI0818 15:07:03.449252 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0818 15:07:03.449264 21584 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0818 15:07:03.449270 21584 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0818 15:07:03.449277 21584 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.449287 21584 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0818 15:07:03.449295 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.449300 21584 net.cpp:165] Memory required for data: 869889500\nI0818 15:07:03.449304 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0818 15:07:03.449317 21584 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0818 15:07:03.449324 21584 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0818 15:07:03.449343 21584 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0818 15:07:03.449702 21584 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0818 15:07:03.449717 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.449723 21584 net.cpp:165] Memory required for data: 871937500\nI0818 15:07:03.449731 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0818 15:07:03.449740 21584 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0818 15:07:03.449746 21584 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0818 15:07:03.449755 21584 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0818 15:07:03.450028 21584 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0818 15:07:03.450042 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.450047 21584 net.cpp:165] Memory required for data: 873985500\nI0818 15:07:03.450057 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 15:07:03.450067 21584 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0818 15:07:03.450073 21584 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0818 15:07:03.450083 21584 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0818 15:07:03.450145 21584 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0818 15:07:03.450304 21584 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0818 15:07:03.450316 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.450322 21584 net.cpp:165] Memory required for data: 876033500\nI0818 15:07:03.450331 21584 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0818 15:07:03.450340 21584 net.cpp:100] Creating Layer L2_b1_pool\nI0818 15:07:03.450347 21584 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0818 15:07:03.450359 21584 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0818 15:07:03.450390 21584 net.cpp:150] Setting up L2_b1_pool\nI0818 15:07:03.450407 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.450412 21584 net.cpp:165] Memory required for data: 878081500\nI0818 15:07:03.450417 21584 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0818 15:07:03.450425 21584 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0818 15:07:03.450431 21584 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0818 15:07:03.450438 21584 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0818 15:07:03.450446 21584 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0818 15:07:03.450482 21584 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0818 15:07:03.450495 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.450500 21584 net.cpp:165] Memory required for data: 880129500\nI0818 15:07:03.450505 21584 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0818 15:07:03.450512 21584 net.cpp:100] Creating Layer L2_b1_relu\nI0818 15:07:03.450525 21584 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0818 15:07:03.450534 21584 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0818 15:07:03.450543 21584 net.cpp:150] Setting up L2_b1_relu\nI0818 15:07:03.450551 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.450556 21584 net.cpp:165] Memory required for data: 882177500\nI0818 15:07:03.450561 21584 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0818 15:07:03.450573 21584 net.cpp:100] Creating Layer L2_b1_zeros\nI0818 15:07:03.450582 21584 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0818 15:07:03.452853 21584 net.cpp:150] Setting up L2_b1_zeros\nI0818 15:07:03.452874 21584 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0818 15:07:03.452880 21584 net.cpp:165] Memory required for data: 884225500\nI0818 15:07:03.452886 21584 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0818 15:07:03.452896 21584 net.cpp:100] Creating Layer L2_b1_concat0\nI0818 15:07:03.452903 21584 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0818 15:07:03.452910 21584 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0818 15:07:03.452919 21584 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0818 15:07:03.452966 21584 net.cpp:150] Setting up L2_b1_concat0\nI0818 15:07:03.452980 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.452985 21584 net.cpp:165] Memory required for data: 888321500\nI0818 15:07:03.452989 21584 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:03.453001 21584 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:03.453006 21584 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0818 15:07:03.453014 21584 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 15:07:03.453025 21584 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 15:07:03.453079 21584 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0818 15:07:03.453090 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.453097 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.453102 21584 net.cpp:165] Memory required for data: 896513500\nI0818 15:07:03.453107 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0818 15:07:03.453121 21584 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0818 15:07:03.453128 21584 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0818 15:07:03.453138 21584 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0818 15:07:03.453644 21584 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0818 15:07:03.453663 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.453668 21584 net.cpp:165] Memory required for data: 900609500\nI0818 15:07:03.453678 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0818 15:07:03.453691 21584 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0818 15:07:03.453698 21584 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0818 15:07:03.453711 21584 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0818 15:07:03.453986 21584 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0818 15:07:03.454000 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.454005 21584 net.cpp:165] Memory required for data: 904705500\nI0818 15:07:03.454016 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 15:07:03.454025 21584 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0818 15:07:03.454031 21584 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0818 15:07:03.454041 21584 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.454102 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0818 15:07:03.454260 21584 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0818 15:07:03.454273 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.454278 21584 net.cpp:165] Memory required for data: 908801500\nI0818 15:07:03.454288 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0818 15:07:03.454298 21584 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0818 15:07:03.454313 21584 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0818 15:07:03.454322 21584 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.454332 21584 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0818 15:07:03.454339 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.454344 21584 net.cpp:165] Memory required for data: 912897500\nI0818 15:07:03.454349 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0818 15:07:03.454362 21584 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0818 15:07:03.454370 21584 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0818 15:07:03.454378 21584 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0818 15:07:03.454881 21584 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0818 15:07:03.454896 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.454901 21584 net.cpp:165] Memory required for data: 916993500\nI0818 15:07:03.454910 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0818 15:07:03.454922 21584 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0818 15:07:03.454931 21584 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0818 15:07:03.454938 21584 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0818 15:07:03.455205 21584 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0818 15:07:03.455221 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.455227 21584 net.cpp:165] Memory required for data: 921089500\nI0818 15:07:03.455237 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 15:07:03.455246 21584 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0818 15:07:03.455253 21584 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0818 15:07:03.455260 21584 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0818 15:07:03.455319 21584 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0818 15:07:03.455487 21584 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0818 15:07:03.455500 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.455505 21584 net.cpp:165] Memory required for data: 925185500\nI0818 15:07:03.455514 21584 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0818 15:07:03.455523 21584 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0818 15:07:03.455530 21584 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0818 15:07:03.455538 21584 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0818 15:07:03.455549 21584 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0818 15:07:03.455577 21584 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0818 15:07:03.455586 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.455591 21584 net.cpp:165] Memory required for data: 929281500\nI0818 15:07:03.455596 21584 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0818 15:07:03.455608 21584 net.cpp:100] Creating Layer L2_b2_relu\nI0818 15:07:03.455615 21584 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0818 15:07:03.455622 21584 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0818 15:07:03.455632 21584 net.cpp:150] Setting up L2_b2_relu\nI0818 15:07:03.455639 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.455643 21584 net.cpp:165] Memory required for data: 933377500\nI0818 15:07:03.455648 21584 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:03.455662 21584 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:03.455668 21584 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0818 15:07:03.455677 21584 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 15:07:03.455687 21584 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 15:07:03.455737 21584 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0818 15:07:03.455749 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.455756 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.455761 21584 net.cpp:165] Memory required for data: 941569500\nI0818 15:07:03.455773 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0818 15:07:03.455787 21584 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0818 15:07:03.455795 21584 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0818 15:07:03.455804 21584 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0818 15:07:03.456303 21584 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0818 15:07:03.456317 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.456322 21584 net.cpp:165] Memory required for data: 945665500\nI0818 15:07:03.456332 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0818 15:07:03.456344 21584 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0818 15:07:03.456351 21584 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0818 15:07:03.456362 21584 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0818 15:07:03.456631 21584 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0818 15:07:03.456645 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.456655 21584 net.cpp:165] Memory required for data: 949761500\nI0818 15:07:03.456666 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 15:07:03.456676 21584 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0818 15:07:03.456681 21584 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0818 15:07:03.456689 21584 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.456751 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0818 15:07:03.456908 21584 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0818 15:07:03.456921 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.456928 21584 net.cpp:165] Memory required for data: 953857500\nI0818 15:07:03.456936 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0818 15:07:03.456944 21584 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0818 15:07:03.456950 21584 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0818 15:07:03.456961 21584 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.456971 21584 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0818 15:07:03.456979 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.456984 21584 net.cpp:165] Memory required for data: 957953500\nI0818 15:07:03.456989 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0818 15:07:03.457001 21584 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0818 15:07:03.457008 21584 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0818 15:07:03.457017 21584 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0818 15:07:03.457510 21584 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0818 15:07:03.457525 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.457530 21584 net.cpp:165] Memory required for data: 962049500\nI0818 15:07:03.457540 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0818 15:07:03.457551 21584 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0818 15:07:03.457558 21584 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0818 15:07:03.457566 21584 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0818 15:07:03.457850 21584 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0818 15:07:03.457866 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.457872 21584 net.cpp:165] Memory required for data: 966145500\nI0818 15:07:03.457883 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 15:07:03.457892 21584 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0818 15:07:03.457898 21584 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0818 15:07:03.457906 21584 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0818 15:07:03.457967 21584 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0818 15:07:03.458125 21584 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0818 15:07:03.458139 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.458144 21584 net.cpp:165] Memory required for data: 970241500\nI0818 15:07:03.458153 21584 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0818 15:07:03.458163 21584 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0818 15:07:03.458175 21584 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0818 15:07:03.458184 21584 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0818 15:07:03.458194 21584 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0818 15:07:03.458223 21584 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0818 15:07:03.458232 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.458237 21584 net.cpp:165] Memory required for data: 974337500\nI0818 15:07:03.458242 21584 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0818 15:07:03.458266 21584 net.cpp:100] Creating Layer L2_b3_relu\nI0818 15:07:03.458272 21584 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0818 15:07:03.458281 21584 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0818 15:07:03.458290 21584 net.cpp:150] Setting up L2_b3_relu\nI0818 15:07:03.458297 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.458302 21584 net.cpp:165] Memory required for data: 978433500\nI0818 15:07:03.458307 21584 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:03.458314 21584 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:03.458319 21584 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0818 15:07:03.458328 21584 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 15:07:03.458338 21584 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 15:07:03.458391 21584 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0818 15:07:03.458403 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.458410 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.458415 21584 net.cpp:165] Memory required for data: 986625500\nI0818 15:07:03.458420 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0818 15:07:03.458431 21584 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0818 15:07:03.458438 21584 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0818 15:07:03.458451 21584 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0818 15:07:03.458950 21584 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0818 15:07:03.458966 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.458971 21584 net.cpp:165] Memory required for data: 990721500\nI0818 15:07:03.458981 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0818 15:07:03.458989 21584 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0818 15:07:03.458997 21584 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0818 15:07:03.459008 21584 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0818 15:07:03.459285 21584 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0818 15:07:03.459297 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.459302 21584 net.cpp:165] Memory required for data: 994817500\nI0818 15:07:03.459312 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 15:07:03.459324 21584 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0818 15:07:03.459331 21584 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0818 15:07:03.459338 21584 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.459398 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0818 15:07:03.459560 21584 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0818 15:07:03.459573 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.459579 21584 net.cpp:165] Memory required for data: 998913500\nI0818 15:07:03.459588 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0818 15:07:03.459599 21584 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0818 15:07:03.459605 21584 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0818 15:07:03.459614 21584 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.459623 21584 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0818 15:07:03.459630 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.459635 21584 net.cpp:165] Memory required for data: 1003009500\nI0818 15:07:03.459648 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0818 15:07:03.459669 21584 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0818 15:07:03.459676 21584 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0818 15:07:03.459687 21584 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0818 15:07:03.460180 21584 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0818 15:07:03.460194 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.460199 21584 net.cpp:165] Memory required for data: 1007105500\nI0818 15:07:03.460208 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0818 15:07:03.460218 21584 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0818 15:07:03.460225 21584 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0818 15:07:03.460237 21584 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0818 15:07:03.460506 21584 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0818 15:07:03.460520 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.460525 21584 net.cpp:165] Memory required for data: 1011201500\nI0818 15:07:03.460536 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 15:07:03.460547 21584 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0818 15:07:03.460554 21584 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0818 15:07:03.460562 21584 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0818 15:07:03.460620 21584 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0818 15:07:03.460789 21584 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0818 15:07:03.460803 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.460808 21584 net.cpp:165] Memory required for data: 1015297500\nI0818 15:07:03.460817 21584 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0818 15:07:03.460829 21584 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0818 15:07:03.460836 21584 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0818 15:07:03.460844 21584 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0818 15:07:03.460852 21584 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0818 15:07:03.460880 21584 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0818 15:07:03.460899 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.460904 21584 net.cpp:165] Memory required for data: 1019393500\nI0818 15:07:03.460909 21584 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0818 15:07:03.460917 21584 net.cpp:100] Creating Layer L2_b4_relu\nI0818 15:07:03.460923 21584 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0818 15:07:03.460930 21584 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0818 15:07:03.460940 21584 net.cpp:150] Setting up L2_b4_relu\nI0818 15:07:03.460947 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.460952 21584 net.cpp:165] Memory required for data: 1023489500\nI0818 15:07:03.460957 21584 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:03.460966 21584 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:03.460973 21584 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0818 15:07:03.460979 21584 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 15:07:03.460989 21584 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 15:07:03.461040 21584 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0818 15:07:03.461053 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.461060 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.461064 21584 net.cpp:165] Memory required for data: 1031681500\nI0818 15:07:03.461069 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0818 15:07:03.461081 21584 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0818 15:07:03.461087 21584 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0818 15:07:03.461099 21584 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0818 15:07:03.461603 21584 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0818 15:07:03.461618 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.461623 21584 net.cpp:165] Memory required for data: 1035777500\nI0818 15:07:03.461632 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0818 15:07:03.461642 21584 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0818 15:07:03.461653 21584 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0818 15:07:03.461666 21584 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0818 15:07:03.461940 21584 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0818 15:07:03.461952 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.461958 21584 net.cpp:165] Memory required for data: 1039873500\nI0818 15:07:03.461968 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 15:07:03.461980 21584 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0818 15:07:03.461987 21584 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0818 15:07:03.461995 21584 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.462054 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0818 15:07:03.462218 21584 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0818 15:07:03.462231 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.462236 21584 net.cpp:165] Memory required for data: 1043969500\nI0818 15:07:03.462245 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0818 15:07:03.462256 21584 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0818 15:07:03.462263 21584 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0818 15:07:03.462271 21584 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.462281 21584 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0818 15:07:03.462288 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.462292 21584 net.cpp:165] Memory required for data: 1048065500\nI0818 15:07:03.462297 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0818 15:07:03.462311 21584 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0818 15:07:03.462317 21584 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0818 15:07:03.462328 21584 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0818 15:07:03.462831 21584 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0818 15:07:03.462846 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.462852 21584 net.cpp:165] Memory required for data: 1052161500\nI0818 15:07:03.462859 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0818 15:07:03.462869 21584 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0818 15:07:03.462875 21584 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0818 15:07:03.462883 21584 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0818 15:07:03.463160 21584 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0818 15:07:03.463173 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.463179 21584 net.cpp:165] Memory required for data: 1056257500\nI0818 15:07:03.463189 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 15:07:03.463198 21584 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0818 15:07:03.463204 21584 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0818 15:07:03.463215 21584 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0818 15:07:03.463274 21584 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0818 15:07:03.463433 21584 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0818 15:07:03.463446 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.463451 21584 net.cpp:165] Memory required for data: 1060353500\nI0818 15:07:03.463460 21584 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0818 15:07:03.463469 21584 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0818 15:07:03.463476 21584 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0818 15:07:03.463484 21584 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0818 15:07:03.463495 21584 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0818 15:07:03.463522 21584 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0818 15:07:03.463542 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.463547 21584 net.cpp:165] Memory required for data: 1064449500\nI0818 15:07:03.463552 21584 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0818 15:07:03.463560 21584 net.cpp:100] Creating Layer L2_b5_relu\nI0818 15:07:03.463567 21584 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0818 15:07:03.463574 21584 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0818 15:07:03.463583 21584 net.cpp:150] Setting up L2_b5_relu\nI0818 15:07:03.463590 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.463595 21584 net.cpp:165] Memory required for data: 1068545500\nI0818 15:07:03.463600 21584 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:03.463610 21584 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:03.463616 21584 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0818 15:07:03.463624 21584 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 15:07:03.463634 21584 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 15:07:03.463691 21584 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0818 15:07:03.463706 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.463712 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.463716 21584 net.cpp:165] Memory required for data: 1076737500\nI0818 15:07:03.463722 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0818 15:07:03.463733 21584 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0818 15:07:03.463740 21584 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0818 15:07:03.463753 21584 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0818 15:07:03.464254 21584 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0818 15:07:03.464268 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.464273 21584 net.cpp:165] Memory required for data: 1080833500\nI0818 15:07:03.464282 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0818 15:07:03.464292 21584 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0818 15:07:03.464298 21584 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0818 15:07:03.464311 21584 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0818 15:07:03.464581 21584 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0818 15:07:03.464594 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.464599 21584 net.cpp:165] Memory required for data: 1084929500\nI0818 15:07:03.464609 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 15:07:03.464622 21584 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0818 15:07:03.464628 21584 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0818 15:07:03.464637 21584 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.464704 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0818 15:07:03.464869 21584 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0818 15:07:03.464881 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.464886 21584 net.cpp:165] Memory required for data: 1089025500\nI0818 15:07:03.464895 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0818 15:07:03.464905 21584 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0818 15:07:03.464910 21584 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0818 15:07:03.464921 21584 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.464931 21584 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0818 15:07:03.464939 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.464943 21584 net.cpp:165] Memory required for data: 1093121500\nI0818 15:07:03.464948 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0818 15:07:03.464962 21584 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0818 15:07:03.464968 21584 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0818 15:07:03.464980 21584 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0818 15:07:03.465478 21584 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0818 15:07:03.465492 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.465497 21584 net.cpp:165] Memory required for data: 1097217500\nI0818 15:07:03.465507 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0818 15:07:03.465517 21584 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0818 15:07:03.465523 21584 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0818 15:07:03.465531 21584 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0818 15:07:03.465808 21584 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0818 15:07:03.465822 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.465827 21584 net.cpp:165] Memory required for data: 1101313500\nI0818 15:07:03.465837 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 15:07:03.465847 21584 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0818 15:07:03.465852 21584 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0818 15:07:03.465864 21584 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0818 15:07:03.465924 21584 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0818 15:07:03.466083 21584 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0818 15:07:03.466095 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.466100 21584 net.cpp:165] Memory required for data: 1105409500\nI0818 15:07:03.466109 21584 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0818 15:07:03.466119 21584 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0818 15:07:03.466125 21584 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0818 15:07:03.466132 21584 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0818 15:07:03.466145 21584 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0818 15:07:03.466174 21584 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0818 15:07:03.466187 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.466192 21584 net.cpp:165] Memory required for data: 1109505500\nI0818 15:07:03.466197 21584 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0818 15:07:03.466207 21584 net.cpp:100] Creating Layer L2_b6_relu\nI0818 15:07:03.466214 21584 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0818 15:07:03.466222 21584 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0818 15:07:03.466231 21584 net.cpp:150] Setting up L2_b6_relu\nI0818 15:07:03.466238 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.466243 21584 net.cpp:165] Memory required for data: 1113601500\nI0818 15:07:03.466248 21584 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:03.466258 21584 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:03.466264 21584 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0818 15:07:03.466270 21584 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 15:07:03.466280 21584 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 15:07:03.466328 21584 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0818 15:07:03.466346 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.466352 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.466357 21584 net.cpp:165] Memory required for data: 1121793500\nI0818 15:07:03.466362 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0818 15:07:03.466374 21584 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0818 15:07:03.466382 21584 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0818 15:07:03.466390 21584 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0818 15:07:03.467903 21584 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0818 15:07:03.467921 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.467926 21584 net.cpp:165] Memory required for data: 1125889500\nI0818 15:07:03.467936 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0818 15:07:03.467957 21584 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0818 15:07:03.467964 21584 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0818 15:07:03.467977 21584 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0818 15:07:03.468247 21584 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0818 15:07:03.468261 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.468266 21584 net.cpp:165] Memory required for data: 1129985500\nI0818 15:07:03.468276 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 15:07:03.468286 21584 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0818 15:07:03.468292 21584 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0818 15:07:03.468300 21584 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.468364 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0818 15:07:03.468525 21584 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0818 15:07:03.468538 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.468544 21584 net.cpp:165] Memory required for data: 1134081500\nI0818 15:07:03.468552 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0818 15:07:03.468564 21584 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0818 15:07:03.468570 21584 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0818 15:07:03.468578 21584 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.468588 21584 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0818 15:07:03.468595 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.468600 21584 net.cpp:165] Memory required for data: 1138177500\nI0818 15:07:03.468605 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0818 15:07:03.468618 21584 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0818 15:07:03.468626 21584 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0818 15:07:03.468636 21584 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0818 15:07:03.469139 21584 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0818 15:07:03.469154 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.469159 21584 net.cpp:165] Memory required for data: 1142273500\nI0818 15:07:03.469168 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0818 15:07:03.469182 21584 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0818 15:07:03.469188 21584 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0818 15:07:03.469199 21584 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0818 15:07:03.469470 21584 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0818 15:07:03.469487 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.469492 21584 net.cpp:165] Memory required for data: 1146369500\nI0818 15:07:03.469503 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 15:07:03.469512 21584 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0818 15:07:03.469519 21584 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0818 15:07:03.469527 21584 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0818 15:07:03.469585 21584 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0818 15:07:03.469751 21584 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0818 15:07:03.469765 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.469770 21584 net.cpp:165] Memory required for data: 1150465500\nI0818 15:07:03.469779 21584 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0818 15:07:03.469789 21584 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0818 15:07:03.469795 21584 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0818 15:07:03.469805 21584 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0818 15:07:03.469815 21584 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0818 15:07:03.469843 21584 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0818 15:07:03.469856 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.469861 21584 net.cpp:165] Memory required for data: 1154561500\nI0818 15:07:03.469866 21584 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0818 15:07:03.469877 21584 net.cpp:100] Creating Layer L2_b7_relu\nI0818 15:07:03.469889 21584 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0818 15:07:03.469897 21584 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0818 15:07:03.469907 21584 net.cpp:150] Setting up L2_b7_relu\nI0818 15:07:03.469915 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.469919 21584 net.cpp:165] Memory required for data: 1158657500\nI0818 15:07:03.469924 21584 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:03.469931 21584 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:03.469936 21584 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0818 15:07:03.469944 21584 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 15:07:03.469954 21584 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 15:07:03.470007 21584 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0818 15:07:03.470019 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.470026 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.470031 21584 net.cpp:165] Memory required for data: 1166849500\nI0818 15:07:03.470036 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0818 15:07:03.470052 21584 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0818 15:07:03.470058 21584 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0818 15:07:03.470067 21584 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0818 15:07:03.470558 21584 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0818 15:07:03.470572 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.470577 21584 net.cpp:165] Memory required for data: 1170945500\nI0818 15:07:03.470587 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0818 15:07:03.470599 21584 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0818 15:07:03.470607 21584 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0818 15:07:03.470618 21584 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0818 15:07:03.470901 21584 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0818 15:07:03.470914 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.470919 21584 net.cpp:165] Memory required for data: 1175041500\nI0818 15:07:03.470929 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 15:07:03.470938 21584 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0818 15:07:03.470945 21584 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0818 15:07:03.470953 21584 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.471017 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0818 15:07:03.471174 21584 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0818 15:07:03.471187 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.471192 21584 net.cpp:165] Memory required for data: 1179137500\nI0818 15:07:03.471201 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0818 15:07:03.471212 21584 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0818 15:07:03.471220 21584 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0818 15:07:03.471226 21584 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.471237 21584 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0818 15:07:03.471245 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.471249 21584 net.cpp:165] Memory required for data: 1183233500\nI0818 15:07:03.471254 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0818 15:07:03.471267 21584 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0818 15:07:03.471274 21584 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0818 15:07:03.471283 21584 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0818 15:07:03.471783 21584 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0818 15:07:03.471798 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.471803 21584 net.cpp:165] Memory required for data: 1187329500\nI0818 15:07:03.471812 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0818 15:07:03.471832 21584 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0818 15:07:03.471839 21584 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0818 15:07:03.471848 21584 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0818 15:07:03.472122 21584 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0818 15:07:03.472138 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.472144 21584 net.cpp:165] Memory required for data: 1191425500\nI0818 15:07:03.472154 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 15:07:03.472164 21584 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0818 15:07:03.472170 21584 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0818 15:07:03.472178 21584 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0818 15:07:03.472237 21584 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0818 15:07:03.472405 21584 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0818 15:07:03.472419 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.472424 21584 net.cpp:165] Memory required for data: 1195521500\nI0818 15:07:03.472432 21584 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0818 15:07:03.472441 21584 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0818 15:07:03.472448 21584 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0818 15:07:03.472455 21584 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0818 15:07:03.472468 21584 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0818 15:07:03.472497 21584 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0818 15:07:03.472509 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.472514 21584 net.cpp:165] Memory required for data: 1199617500\nI0818 15:07:03.472519 21584 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0818 15:07:03.472530 21584 net.cpp:100] Creating Layer L2_b8_relu\nI0818 15:07:03.472537 21584 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0818 15:07:03.472544 21584 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0818 15:07:03.472553 21584 net.cpp:150] Setting up L2_b8_relu\nI0818 15:07:03.472561 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.472565 21584 net.cpp:165] Memory required for data: 1203713500\nI0818 15:07:03.472570 21584 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:03.472578 21584 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:03.472582 21584 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0818 15:07:03.472590 21584 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 15:07:03.472614 21584 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 15:07:03.472674 21584 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0818 15:07:03.472688 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.472695 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.472699 21584 net.cpp:165] Memory required for data: 1211905500\nI0818 15:07:03.472705 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0818 15:07:03.472717 21584 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0818 15:07:03.472723 21584 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0818 15:07:03.472736 21584 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0818 15:07:03.473234 21584 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0818 15:07:03.473249 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.473254 21584 net.cpp:165] Memory required for data: 1216001500\nI0818 15:07:03.473263 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0818 15:07:03.473276 21584 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0818 15:07:03.473284 21584 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0818 15:07:03.473291 21584 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0818 15:07:03.473569 21584 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0818 15:07:03.473592 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.473598 21584 net.cpp:165] Memory required for data: 1220097500\nI0818 15:07:03.473608 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 15:07:03.473616 21584 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0818 15:07:03.473623 21584 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0818 15:07:03.473631 21584 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.473700 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0818 15:07:03.473865 21584 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0818 15:07:03.473879 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.473884 21584 net.cpp:165] Memory required for data: 1224193500\nI0818 15:07:03.473893 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0818 15:07:03.473902 21584 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0818 15:07:03.473909 21584 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0818 15:07:03.473919 21584 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.473930 21584 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0818 15:07:03.473937 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.473942 21584 net.cpp:165] Memory required for data: 1228289500\nI0818 15:07:03.473947 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0818 15:07:03.473958 21584 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0818 15:07:03.473963 21584 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0818 15:07:03.473975 21584 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0818 15:07:03.475469 21584 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0818 15:07:03.475486 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.475492 21584 net.cpp:165] Memory required for data: 1232385500\nI0818 15:07:03.475502 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0818 15:07:03.475514 21584 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0818 15:07:03.475522 21584 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0818 15:07:03.475533 21584 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0818 15:07:03.475811 21584 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0818 15:07:03.475826 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.475831 21584 net.cpp:165] Memory required for data: 1236481500\nI0818 15:07:03.475879 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 15:07:03.475894 21584 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0818 15:07:03.475901 21584 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0818 15:07:03.475909 21584 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0818 15:07:03.475973 21584 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0818 15:07:03.476128 21584 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0818 15:07:03.476141 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.476146 21584 net.cpp:165] Memory required for data: 1240577500\nI0818 15:07:03.476156 21584 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0818 15:07:03.476168 21584 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0818 15:07:03.476176 21584 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0818 15:07:03.476183 21584 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0818 15:07:03.476191 21584 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0818 15:07:03.476223 21584 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0818 15:07:03.476234 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.476239 21584 net.cpp:165] Memory required for data: 1244673500\nI0818 15:07:03.476245 21584 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0818 15:07:03.476253 21584 net.cpp:100] Creating Layer L2_b9_relu\nI0818 15:07:03.476259 21584 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0818 15:07:03.476272 21584 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0818 15:07:03.476284 21584 net.cpp:150] Setting up L2_b9_relu\nI0818 15:07:03.476291 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.476303 21584 net.cpp:165] Memory required for data: 1248769500\nI0818 15:07:03.476308 21584 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:03.476316 21584 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:03.476321 21584 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0818 15:07:03.476332 21584 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 15:07:03.476342 21584 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 15:07:03.476394 21584 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0818 15:07:03.476407 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.476413 21584 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0818 15:07:03.476418 21584 net.cpp:165] Memory required for data: 1256961500\nI0818 15:07:03.476423 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0818 15:07:03.476434 21584 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0818 15:07:03.476440 21584 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0818 15:07:03.476454 21584 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0818 15:07:03.476960 21584 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0818 15:07:03.476975 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.476981 21584 net.cpp:165] Memory required for data: 1257985500\nI0818 15:07:03.476990 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0818 15:07:03.477000 21584 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0818 15:07:03.477007 21584 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0818 15:07:03.477018 21584 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0818 15:07:03.477293 21584 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0818 15:07:03.477308 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.477314 21584 net.cpp:165] Memory required for data: 1259009500\nI0818 15:07:03.477325 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 15:07:03.477334 21584 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0818 15:07:03.477340 21584 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0818 15:07:03.477349 21584 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.477406 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0818 15:07:03.477571 21584 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0818 15:07:03.477584 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.477589 21584 net.cpp:165] Memory required for data: 1260033500\nI0818 15:07:03.477599 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0818 15:07:03.477610 21584 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0818 15:07:03.477617 21584 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0818 15:07:03.477625 21584 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0818 15:07:03.477635 21584 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0818 15:07:03.477643 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.477648 21584 net.cpp:165] Memory required for data: 1261057500\nI0818 15:07:03.477658 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0818 15:07:03.477674 21584 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0818 15:07:03.477680 21584 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0818 15:07:03.477689 21584 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0818 15:07:03.478181 21584 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0818 15:07:03.478196 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.478201 21584 net.cpp:165] Memory required for data: 1262081500\nI0818 15:07:03.478211 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0818 15:07:03.478224 21584 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0818 15:07:03.478231 21584 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0818 15:07:03.478242 21584 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0818 15:07:03.478519 21584 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0818 15:07:03.478540 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.478545 21584 net.cpp:165] Memory required for data: 1263105500\nI0818 15:07:03.478555 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 15:07:03.478564 21584 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0818 15:07:03.478571 21584 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0818 15:07:03.478582 21584 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0818 15:07:03.478641 21584 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0818 15:07:03.478817 21584 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0818 15:07:03.478832 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.478837 21584 net.cpp:165] Memory required for data: 1264129500\nI0818 15:07:03.478847 21584 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0818 15:07:03.478855 21584 net.cpp:100] Creating Layer L3_b1_pool\nI0818 15:07:03.478863 21584 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0818 15:07:03.478874 21584 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0818 15:07:03.478914 21584 net.cpp:150] Setting up L3_b1_pool\nI0818 15:07:03.478926 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.478931 21584 net.cpp:165] Memory required for data: 1265153500\nI0818 15:07:03.478936 21584 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0818 15:07:03.478945 21584 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0818 15:07:03.478951 21584 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0818 15:07:03.478960 21584 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0818 15:07:03.478971 21584 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0818 15:07:03.479004 21584 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0818 15:07:03.479017 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.479022 21584 net.cpp:165] Memory required for data: 1266177500\nI0818 15:07:03.479027 21584 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0818 15:07:03.479034 21584 net.cpp:100] Creating Layer L3_b1_relu\nI0818 15:07:03.479040 21584 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0818 15:07:03.479048 21584 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0818 15:07:03.479058 21584 net.cpp:150] Setting up L3_b1_relu\nI0818 15:07:03.479064 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.479068 21584 net.cpp:165] Memory required for data: 1267201500\nI0818 15:07:03.479074 21584 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0818 15:07:03.479085 21584 net.cpp:100] Creating Layer L3_b1_zeros\nI0818 15:07:03.479094 21584 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0818 15:07:03.480451 21584 net.cpp:150] Setting up L3_b1_zeros\nI0818 15:07:03.480469 21584 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0818 15:07:03.480475 21584 net.cpp:165] Memory required for data: 1268225500\nI0818 15:07:03.480480 21584 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0818 15:07:03.480491 21584 net.cpp:100] Creating Layer L3_b1_concat0\nI0818 15:07:03.480497 21584 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0818 15:07:03.480505 21584 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0818 15:07:03.480516 21584 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0818 15:07:03.480559 21584 net.cpp:150] Setting up L3_b1_concat0\nI0818 15:07:03.480576 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.480581 21584 net.cpp:165] Memory required for data: 1270273500\nI0818 15:07:03.480587 21584 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:03.480594 21584 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:03.480600 21584 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0818 15:07:03.480608 21584 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 15:07:03.480621 21584 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 15:07:03.480685 21584 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0818 15:07:03.480698 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.480713 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.480718 21584 net.cpp:165] Memory required for data: 1274369500\nI0818 15:07:03.480725 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0818 15:07:03.480739 21584 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0818 15:07:03.480746 21584 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0818 15:07:03.480756 21584 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0818 15:07:03.481812 21584 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0818 15:07:03.481827 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.481832 21584 net.cpp:165] Memory required for data: 1276417500\nI0818 15:07:03.481842 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0818 15:07:03.481854 21584 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0818 15:07:03.481861 21584 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0818 15:07:03.481869 21584 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0818 15:07:03.482146 21584 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0818 15:07:03.482158 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.482163 21584 net.cpp:165] Memory required for data: 1278465500\nI0818 15:07:03.482174 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 15:07:03.482187 21584 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0818 15:07:03.482193 21584 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0818 15:07:03.482201 21584 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.482264 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0818 15:07:03.482424 21584 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0818 15:07:03.482437 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.482444 21584 net.cpp:165] Memory required for data: 1280513500\nI0818 15:07:03.482452 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0818 15:07:03.482463 21584 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0818 15:07:03.482471 21584 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0818 15:07:03.482478 21584 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0818 15:07:03.482488 21584 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0818 15:07:03.482496 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.482501 21584 net.cpp:165] Memory required for data: 1282561500\nI0818 15:07:03.482506 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0818 15:07:03.482519 21584 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0818 15:07:03.482525 21584 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0818 15:07:03.482537 21584 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0818 15:07:03.483582 21584 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0818 15:07:03.483597 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.483603 21584 net.cpp:165] Memory required for data: 1284609500\nI0818 15:07:03.483611 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0818 15:07:03.483621 21584 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0818 15:07:03.483628 21584 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0818 15:07:03.483640 21584 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0818 15:07:03.483924 21584 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0818 15:07:03.483942 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.483947 21584 net.cpp:165] Memory required for data: 1286657500\nI0818 15:07:03.483958 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 15:07:03.483966 21584 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0818 15:07:03.483973 21584 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0818 15:07:03.483981 21584 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0818 15:07:03.484041 21584 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0818 15:07:03.484203 21584 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0818 15:07:03.484216 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.484222 21584 net.cpp:165] Memory required for data: 1288705500\nI0818 15:07:03.484238 21584 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0818 15:07:03.484251 21584 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0818 15:07:03.484258 21584 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0818 15:07:03.484266 21584 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0818 15:07:03.484273 21584 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0818 15:07:03.484310 21584 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0818 15:07:03.484323 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.484328 21584 net.cpp:165] Memory required for data: 1290753500\nI0818 15:07:03.484333 21584 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0818 15:07:03.484340 21584 net.cpp:100] Creating Layer L3_b2_relu\nI0818 15:07:03.484346 21584 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0818 15:07:03.484354 21584 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0818 15:07:03.484364 21584 net.cpp:150] Setting up L3_b2_relu\nI0818 15:07:03.484371 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.484375 21584 net.cpp:165] Memory required for data: 1292801500\nI0818 15:07:03.484380 21584 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:03.484387 21584 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:03.484392 21584 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0818 15:07:03.484403 21584 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 15:07:03.484414 21584 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 15:07:03.484460 21584 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0818 15:07:03.484472 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.484479 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.484483 21584 net.cpp:165] Memory required for data: 1296897500\nI0818 15:07:03.484488 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0818 15:07:03.484503 21584 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0818 15:07:03.484510 21584 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0818 15:07:03.484519 21584 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0818 15:07:03.485569 21584 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0818 15:07:03.485584 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.485589 21584 net.cpp:165] Memory required for data: 1298945500\nI0818 15:07:03.485599 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0818 15:07:03.485611 21584 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0818 15:07:03.485618 21584 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0818 15:07:03.485627 21584 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0818 15:07:03.485906 21584 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0818 15:07:03.485920 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.485925 21584 net.cpp:165] Memory required for data: 1300993500\nI0818 15:07:03.485936 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 15:07:03.485949 21584 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0818 15:07:03.485955 21584 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0818 15:07:03.485963 21584 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.486027 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0818 15:07:03.486192 21584 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0818 15:07:03.486205 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.486212 21584 net.cpp:165] Memory required for data: 1303041500\nI0818 15:07:03.486220 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0818 15:07:03.486233 21584 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0818 15:07:03.486240 21584 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0818 15:07:03.486248 21584 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0818 15:07:03.486258 21584 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0818 15:07:03.486274 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.486280 21584 net.cpp:165] Memory required for data: 1305089500\nI0818 15:07:03.486285 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0818 15:07:03.486296 21584 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0818 15:07:03.486302 21584 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0818 15:07:03.486315 21584 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0818 15:07:03.487359 21584 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0818 15:07:03.487373 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.487378 21584 net.cpp:165] Memory required for data: 1307137500\nI0818 15:07:03.487388 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0818 15:07:03.487397 21584 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0818 15:07:03.487404 21584 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0818 15:07:03.487416 21584 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0818 15:07:03.487700 21584 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0818 15:07:03.487715 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.487720 21584 net.cpp:165] Memory required for data: 1309185500\nI0818 15:07:03.487730 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 15:07:03.487740 21584 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0818 15:07:03.487747 21584 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0818 15:07:03.487756 21584 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0818 15:07:03.487818 21584 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0818 15:07:03.487977 21584 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0818 15:07:03.487993 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.487999 21584 net.cpp:165] Memory required for data: 1311233500\nI0818 15:07:03.488008 21584 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0818 15:07:03.488018 21584 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0818 15:07:03.488024 21584 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0818 15:07:03.488031 21584 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0818 15:07:03.488040 21584 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0818 15:07:03.488077 21584 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0818 15:07:03.488088 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.488095 21584 net.cpp:165] Memory required for data: 1313281500\nI0818 15:07:03.488099 21584 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0818 15:07:03.488107 21584 net.cpp:100] Creating Layer L3_b3_relu\nI0818 15:07:03.488113 21584 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0818 15:07:03.488121 21584 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0818 15:07:03.488131 21584 net.cpp:150] Setting up L3_b3_relu\nI0818 15:07:03.488138 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.488143 21584 net.cpp:165] Memory required for data: 1315329500\nI0818 15:07:03.488147 21584 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:03.488154 21584 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:03.488160 21584 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0818 15:07:03.488171 21584 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 15:07:03.488181 21584 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 15:07:03.488229 21584 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0818 15:07:03.488243 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.488250 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.488255 21584 net.cpp:165] Memory required for data: 1319425500\nI0818 15:07:03.488260 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0818 15:07:03.488272 21584 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0818 15:07:03.488278 21584 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0818 15:07:03.488296 21584 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0818 15:07:03.489356 21584 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0818 15:07:03.489372 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.489377 21584 net.cpp:165] Memory required for data: 1321473500\nI0818 15:07:03.489387 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0818 15:07:03.489398 21584 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0818 15:07:03.489405 21584 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0818 15:07:03.489414 21584 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0818 15:07:03.489698 21584 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0818 15:07:03.489712 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.489717 21584 net.cpp:165] Memory required for data: 1323521500\nI0818 15:07:03.489728 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 15:07:03.489742 21584 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0818 15:07:03.489748 21584 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0818 15:07:03.489756 21584 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.489819 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0818 15:07:03.489980 21584 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0818 15:07:03.489994 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.489998 21584 net.cpp:165] Memory required for data: 1325569500\nI0818 15:07:03.490007 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0818 15:07:03.490020 21584 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0818 15:07:03.490025 21584 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0818 15:07:03.490036 21584 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0818 15:07:03.490047 21584 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0818 15:07:03.490054 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.490058 21584 net.cpp:165] Memory required for data: 1327617500\nI0818 15:07:03.490063 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0818 15:07:03.490074 21584 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0818 15:07:03.490080 21584 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0818 15:07:03.490092 21584 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0818 15:07:03.492127 21584 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0818 15:07:03.492144 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.492151 21584 net.cpp:165] Memory required for data: 1329665500\nI0818 15:07:03.492159 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0818 15:07:03.492172 21584 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0818 15:07:03.492179 21584 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0818 15:07:03.492188 21584 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0818 15:07:03.492467 21584 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0818 15:07:03.492480 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.492486 21584 net.cpp:165] Memory required for data: 1331713500\nI0818 15:07:03.492496 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 15:07:03.492509 21584 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0818 15:07:03.492516 21584 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0818 15:07:03.492524 21584 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0818 15:07:03.492588 21584 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0818 15:07:03.492763 21584 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0818 15:07:03.492776 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.492782 21584 net.cpp:165] Memory required for data: 1333761500\nI0818 15:07:03.492791 21584 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0818 15:07:03.492805 21584 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0818 15:07:03.492811 21584 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0818 15:07:03.492818 21584 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0818 15:07:03.492830 21584 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0818 15:07:03.492873 21584 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0818 15:07:03.492883 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.492888 21584 net.cpp:165] Memory required for data: 1335809500\nI0818 15:07:03.492893 21584 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0818 15:07:03.492904 21584 net.cpp:100] Creating Layer L3_b4_relu\nI0818 15:07:03.492911 21584 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0818 15:07:03.492919 21584 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0818 15:07:03.492928 21584 net.cpp:150] Setting up L3_b4_relu\nI0818 15:07:03.492935 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.492940 21584 net.cpp:165] Memory required for data: 1337857500\nI0818 15:07:03.492945 21584 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:03.492952 21584 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:03.492957 21584 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0818 15:07:03.492965 21584 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 15:07:03.492975 21584 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 15:07:03.493026 21584 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0818 15:07:03.493038 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.493046 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.493049 21584 net.cpp:165] Memory required for data: 1341953500\nI0818 15:07:03.493055 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0818 15:07:03.493069 21584 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0818 15:07:03.493077 21584 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0818 15:07:03.493085 21584 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0818 15:07:03.494112 21584 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0818 15:07:03.494127 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.494132 21584 net.cpp:165] Memory required for data: 1344001500\nI0818 15:07:03.494143 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0818 15:07:03.494155 21584 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0818 15:07:03.494163 21584 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0818 15:07:03.494175 21584 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0818 15:07:03.494451 21584 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0818 15:07:03.494464 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.494470 21584 net.cpp:165] Memory required for data: 1346049500\nI0818 15:07:03.494480 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 15:07:03.494489 21584 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0818 15:07:03.494496 21584 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0818 15:07:03.494504 21584 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.494566 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0818 15:07:03.494736 21584 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0818 15:07:03.494750 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.494755 21584 net.cpp:165] Memory required for data: 1348097500\nI0818 15:07:03.494765 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0818 15:07:03.494772 21584 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0818 15:07:03.494779 21584 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0818 15:07:03.494791 21584 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0818 15:07:03.494802 21584 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0818 15:07:03.494809 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.494814 21584 net.cpp:165] Memory required for data: 1350145500\nI0818 15:07:03.494819 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0818 15:07:03.494833 21584 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0818 15:07:03.494840 21584 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0818 15:07:03.494855 21584 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0818 15:07:03.495893 21584 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0818 15:07:03.495908 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.495913 21584 net.cpp:165] Memory required for data: 1352193500\nI0818 15:07:03.495923 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0818 15:07:03.495934 21584 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0818 15:07:03.495941 21584 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0818 15:07:03.495950 21584 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0818 15:07:03.496220 21584 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0818 15:07:03.496233 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.496239 21584 net.cpp:165] Memory required for data: 1354241500\nI0818 15:07:03.496249 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 15:07:03.496263 21584 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0818 15:07:03.496270 21584 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0818 15:07:03.496279 21584 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0818 15:07:03.496341 21584 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0818 15:07:03.496507 21584 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0818 15:07:03.496521 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.496526 21584 net.cpp:165] Memory required for data: 1356289500\nI0818 15:07:03.496536 21584 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0818 15:07:03.496547 21584 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0818 15:07:03.496554 21584 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0818 15:07:03.496562 21584 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0818 15:07:03.496572 21584 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0818 15:07:03.496608 21584 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0818 15:07:03.496618 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.496623 21584 net.cpp:165] Memory required for data: 1358337500\nI0818 15:07:03.496628 21584 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0818 15:07:03.496640 21584 net.cpp:100] Creating Layer L3_b5_relu\nI0818 15:07:03.496647 21584 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0818 15:07:03.496661 21584 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0818 15:07:03.496672 21584 net.cpp:150] Setting up L3_b5_relu\nI0818 15:07:03.496680 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.496685 21584 net.cpp:165] Memory required for data: 1360385500\nI0818 15:07:03.496688 21584 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:03.496696 21584 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:03.496701 21584 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0818 15:07:03.496709 21584 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 15:07:03.496719 21584 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 15:07:03.496769 21584 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0818 15:07:03.496781 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.496788 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.496793 21584 net.cpp:165] Memory required for data: 1364481500\nI0818 15:07:03.496798 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0818 15:07:03.496814 21584 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0818 15:07:03.496820 21584 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0818 15:07:03.496829 21584 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0818 15:07:03.497864 21584 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0818 15:07:03.497879 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.497884 21584 net.cpp:165] Memory required for data: 1366529500\nI0818 15:07:03.497894 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0818 15:07:03.497912 21584 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0818 15:07:03.497920 21584 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0818 15:07:03.497931 21584 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0818 15:07:03.498204 21584 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0818 15:07:03.498217 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.498224 21584 net.cpp:165] Memory required for data: 1368577500\nI0818 15:07:03.498234 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 15:07:03.498242 21584 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0818 15:07:03.498250 21584 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0818 15:07:03.498260 21584 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.498322 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0818 15:07:03.498489 21584 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0818 15:07:03.498503 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.498508 21584 net.cpp:165] Memory required for data: 1370625500\nI0818 15:07:03.498517 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0818 15:07:03.498527 21584 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0818 15:07:03.498533 21584 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0818 15:07:03.498543 21584 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0818 15:07:03.498554 21584 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0818 15:07:03.498561 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.498565 21584 net.cpp:165] Memory required for data: 1372673500\nI0818 15:07:03.498570 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0818 15:07:03.498584 21584 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0818 15:07:03.498590 21584 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0818 15:07:03.498600 21584 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0818 15:07:03.499639 21584 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0818 15:07:03.499658 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.499665 21584 net.cpp:165] Memory required for data: 1374721500\nI0818 15:07:03.499675 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0818 15:07:03.499686 21584 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0818 15:07:03.499693 21584 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0818 15:07:03.499702 21584 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0818 15:07:03.499979 21584 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0818 15:07:03.499992 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.499999 21584 net.cpp:165] Memory required for data: 1376769500\nI0818 15:07:03.500008 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 15:07:03.500020 21584 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0818 15:07:03.500027 21584 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0818 15:07:03.500038 21584 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0818 15:07:03.500097 21584 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0818 15:07:03.500262 21584 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0818 15:07:03.500275 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.500280 21584 net.cpp:165] Memory required for data: 1378817500\nI0818 15:07:03.500289 21584 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0818 15:07:03.500298 21584 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0818 15:07:03.500305 21584 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0818 15:07:03.500313 21584 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0818 15:07:03.500324 21584 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0818 15:07:03.500358 21584 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0818 15:07:03.500370 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.500375 21584 net.cpp:165] Memory required for data: 1380865500\nI0818 15:07:03.500380 21584 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0818 15:07:03.500392 21584 net.cpp:100] Creating Layer L3_b6_relu\nI0818 15:07:03.500406 21584 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0818 15:07:03.500414 21584 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0818 15:07:03.500424 21584 net.cpp:150] Setting up L3_b6_relu\nI0818 15:07:03.500432 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.500435 21584 net.cpp:165] Memory required for data: 1382913500\nI0818 15:07:03.500440 21584 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:03.500448 21584 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:03.500453 21584 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0818 15:07:03.500461 21584 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 15:07:03.500471 21584 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 15:07:03.500526 21584 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0818 15:07:03.500538 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.500545 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.500550 21584 net.cpp:165] Memory required for data: 1387009500\nI0818 15:07:03.500555 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0818 15:07:03.500568 21584 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0818 15:07:03.500576 21584 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0818 15:07:03.500584 21584 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0818 15:07:03.501647 21584 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0818 15:07:03.501668 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.501673 21584 net.cpp:165] Memory required for data: 1389057500\nI0818 15:07:03.501682 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0818 15:07:03.501698 21584 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0818 15:07:03.501705 21584 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0818 15:07:03.501718 21584 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0818 15:07:03.501997 21584 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0818 15:07:03.502012 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.502017 21584 net.cpp:165] Memory required for data: 1391105500\nI0818 15:07:03.502027 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 15:07:03.502037 21584 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0818 15:07:03.502043 21584 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0818 15:07:03.502053 21584 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.502116 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0818 15:07:03.502281 21584 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0818 15:07:03.502295 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.502300 21584 net.cpp:165] Memory required for data: 1393153500\nI0818 15:07:03.502308 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0818 15:07:03.502346 21584 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0818 15:07:03.502354 21584 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0818 15:07:03.502363 21584 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0818 15:07:03.502373 21584 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0818 15:07:03.502382 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.502385 21584 net.cpp:165] Memory required for data: 1395201500\nI0818 15:07:03.502391 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0818 15:07:03.502405 21584 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0818 15:07:03.502411 21584 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0818 15:07:03.502420 21584 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0818 15:07:03.503463 21584 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0818 15:07:03.503479 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.503484 21584 net.cpp:165] Memory required for data: 1397249500\nI0818 15:07:03.503494 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0818 15:07:03.503515 21584 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0818 15:07:03.503521 21584 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0818 15:07:03.503533 21584 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0818 15:07:03.503816 21584 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0818 15:07:03.503829 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.503835 21584 net.cpp:165] Memory required for data: 1399297500\nI0818 15:07:03.503845 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 15:07:03.503854 21584 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0818 15:07:03.503861 21584 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0818 15:07:03.503872 21584 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0818 15:07:03.503934 21584 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0818 15:07:03.504106 21584 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0818 15:07:03.504118 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.504124 21584 net.cpp:165] Memory required for data: 1401345500\nI0818 15:07:03.504133 21584 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0818 15:07:03.504142 21584 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0818 15:07:03.504149 21584 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0818 15:07:03.504156 21584 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0818 15:07:03.504168 21584 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0818 15:07:03.504206 21584 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0818 15:07:03.504218 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.504225 21584 net.cpp:165] Memory required for data: 1403393500\nI0818 15:07:03.504230 21584 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0818 15:07:03.504238 21584 net.cpp:100] Creating Layer L3_b7_relu\nI0818 15:07:03.504245 21584 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0818 15:07:03.504251 21584 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0818 15:07:03.504264 21584 net.cpp:150] Setting up L3_b7_relu\nI0818 15:07:03.504271 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.504276 21584 net.cpp:165] Memory required for data: 1405441500\nI0818 15:07:03.504281 21584 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:03.504288 21584 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:03.504294 21584 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0818 15:07:03.504302 21584 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 15:07:03.504312 21584 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 15:07:03.504364 21584 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0818 15:07:03.504375 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.504382 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.504387 21584 net.cpp:165] Memory required for data: 1409537500\nI0818 15:07:03.504392 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0818 15:07:03.504403 21584 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0818 15:07:03.504410 21584 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0818 15:07:03.504422 21584 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0818 15:07:03.506455 21584 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0818 15:07:03.506474 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.506479 21584 net.cpp:165] Memory required for data: 1411585500\nI0818 15:07:03.506489 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0818 15:07:03.506498 21584 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0818 15:07:03.506510 21584 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0818 15:07:03.506518 21584 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0818 15:07:03.506801 21584 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0818 15:07:03.506815 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.506829 21584 net.cpp:165] Memory required for data: 1413633500\nI0818 15:07:03.506839 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 15:07:03.506849 21584 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0818 15:07:03.506855 21584 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0818 15:07:03.506870 21584 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.506932 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0818 15:07:03.507098 21584 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0818 15:07:03.507112 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.507117 21584 net.cpp:165] Memory required for data: 1415681500\nI0818 15:07:03.507125 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0818 15:07:03.507138 21584 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0818 15:07:03.507144 21584 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0818 15:07:03.507151 21584 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0818 15:07:03.507161 21584 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0818 15:07:03.507169 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.507174 21584 net.cpp:165] Memory required for data: 1417729500\nI0818 15:07:03.507179 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0818 15:07:03.507192 21584 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0818 15:07:03.507200 21584 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0818 15:07:03.507210 21584 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0818 15:07:03.508247 21584 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0818 15:07:03.508263 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.508268 21584 net.cpp:165] Memory required for data: 1419777500\nI0818 15:07:03.508277 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0818 15:07:03.508287 21584 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0818 15:07:03.508294 21584 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0818 15:07:03.508306 21584 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0818 15:07:03.508582 21584 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0818 15:07:03.508599 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.508605 21584 net.cpp:165] Memory required for data: 1421825500\nI0818 15:07:03.508615 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 15:07:03.508625 21584 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0818 15:07:03.508630 21584 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0818 15:07:03.508638 21584 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0818 15:07:03.508704 21584 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0818 15:07:03.508869 21584 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0818 15:07:03.508882 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.508888 21584 net.cpp:165] Memory required for data: 1423873500\nI0818 15:07:03.508898 21584 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0818 15:07:03.508910 21584 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0818 15:07:03.508918 21584 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0818 15:07:03.508924 21584 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0818 15:07:03.508932 21584 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0818 15:07:03.508970 21584 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0818 15:07:03.508981 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.508986 21584 net.cpp:165] Memory required for data: 1425921500\nI0818 15:07:03.508992 21584 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0818 15:07:03.509001 21584 net.cpp:100] Creating Layer L3_b8_relu\nI0818 15:07:03.509006 21584 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0818 15:07:03.509014 21584 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0818 15:07:03.509024 21584 net.cpp:150] Setting up L3_b8_relu\nI0818 15:07:03.509032 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.509035 21584 net.cpp:165] Memory required for data: 1427969500\nI0818 15:07:03.509050 21584 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:03.509058 21584 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:03.509063 21584 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0818 15:07:03.509074 21584 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 15:07:03.509085 21584 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 15:07:03.509132 21584 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0818 15:07:03.509145 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.509151 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.509156 21584 net.cpp:165] Memory required for data: 1432065500\nI0818 15:07:03.509161 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0818 15:07:03.509178 21584 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0818 15:07:03.509186 21584 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0818 15:07:03.509194 21584 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0818 15:07:03.510227 21584 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0818 15:07:03.510242 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.510247 21584 net.cpp:165] Memory required for data: 1434113500\nI0818 15:07:03.510257 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0818 15:07:03.510269 21584 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0818 15:07:03.510277 21584 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0818 15:07:03.510285 21584 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0818 15:07:03.510560 21584 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0818 15:07:03.510574 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.510579 21584 net.cpp:165] Memory required for data: 1436161500\nI0818 15:07:03.510589 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 15:07:03.510601 21584 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0818 15:07:03.510608 21584 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0818 15:07:03.510617 21584 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.510691 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0818 15:07:03.510859 21584 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0818 15:07:03.510872 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.510877 21584 net.cpp:165] Memory required for data: 1438209500\nI0818 15:07:03.510886 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0818 15:07:03.510900 21584 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0818 15:07:03.510906 21584 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0818 15:07:03.510915 21584 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0818 15:07:03.510924 21584 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0818 15:07:03.510931 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.510936 21584 net.cpp:165] Memory required for data: 1440257500\nI0818 15:07:03.510941 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0818 15:07:03.510956 21584 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0818 15:07:03.510962 21584 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0818 15:07:03.510973 21584 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0818 15:07:03.512002 21584 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0818 15:07:03.512017 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.512023 21584 net.cpp:165] Memory required for data: 1442305500\nI0818 15:07:03.512032 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0818 15:07:03.512042 21584 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0818 15:07:03.512048 21584 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0818 15:07:03.512060 21584 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0818 15:07:03.512341 21584 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0818 15:07:03.512358 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.512370 21584 net.cpp:165] Memory required for data: 1444353500\nI0818 15:07:03.512382 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 15:07:03.512392 21584 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0818 15:07:03.512398 21584 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0818 15:07:03.512405 21584 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0818 15:07:03.512466 21584 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0818 15:07:03.512631 21584 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0818 15:07:03.512645 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.512655 21584 net.cpp:165] Memory required for data: 1446401500\nI0818 15:07:03.512665 21584 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0818 15:07:03.512679 21584 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0818 15:07:03.512687 21584 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0818 15:07:03.512694 21584 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0818 15:07:03.512702 21584 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0818 15:07:03.512742 21584 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0818 15:07:03.512753 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.512758 21584 net.cpp:165] Memory required for data: 1448449500\nI0818 15:07:03.512763 21584 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0818 15:07:03.512771 21584 net.cpp:100] Creating Layer L3_b9_relu\nI0818 15:07:03.512778 21584 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0818 15:07:03.512784 21584 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0818 15:07:03.512794 21584 net.cpp:150] Setting up L3_b9_relu\nI0818 15:07:03.512801 21584 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0818 15:07:03.512805 21584 net.cpp:165] Memory required for data: 1450497500\nI0818 15:07:03.512810 21584 layer_factory.hpp:77] Creating layer post_pool\nI0818 15:07:03.512820 21584 net.cpp:100] Creating Layer post_pool\nI0818 15:07:03.512825 21584 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0818 15:07:03.512835 21584 net.cpp:408] post_pool -> post_pool\nI0818 15:07:03.512872 21584 net.cpp:150] Setting up post_pool\nI0818 15:07:03.512882 21584 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0818 15:07:03.512887 21584 net.cpp:165] Memory required for data: 1450529500\nI0818 15:07:03.512892 21584 layer_factory.hpp:77] Creating layer post_FC\nI0818 15:07:03.512903 21584 net.cpp:100] Creating Layer post_FC\nI0818 15:07:03.512909 21584 net.cpp:434] post_FC <- post_pool\nI0818 15:07:03.512922 21584 net.cpp:408] post_FC -> post_FC_top\nI0818 15:07:03.513085 21584 net.cpp:150] Setting up post_FC\nI0818 15:07:03.513099 21584 net.cpp:157] Top shape: 125 10 (1250)\nI0818 15:07:03.513104 21584 net.cpp:165] Memory required for data: 1450534500\nI0818 15:07:03.513113 21584 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0818 15:07:03.513124 21584 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0818 15:07:03.513131 21584 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0818 15:07:03.513139 21584 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0818 15:07:03.513149 21584 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0818 15:07:03.513203 21584 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0818 15:07:03.513216 21584 net.cpp:157] Top shape: 125 10 (1250)\nI0818 15:07:03.513221 21584 net.cpp:157] Top shape: 125 10 (1250)\nI0818 15:07:03.513226 21584 net.cpp:165] Memory required for data: 1450544500\nI0818 15:07:03.513231 21584 layer_factory.hpp:77] Creating layer accuracy\nI0818 15:07:03.513240 21584 net.cpp:100] Creating Layer accuracy\nI0818 15:07:03.513247 21584 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0818 15:07:03.513253 21584 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0818 15:07:03.513262 21584 net.cpp:408] accuracy -> accuracy\nI0818 15:07:03.513274 21584 net.cpp:150] Setting up accuracy\nI0818 15:07:03.513281 21584 net.cpp:157] Top shape: (1)\nI0818 15:07:03.513293 21584 net.cpp:165] Memory required for data: 1450544504\nI0818 15:07:03.513298 21584 layer_factory.hpp:77] Creating layer loss\nI0818 15:07:03.513311 21584 net.cpp:100] Creating Layer loss\nI0818 15:07:03.513317 21584 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0818 15:07:03.513324 21584 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0818 15:07:03.513332 21584 net.cpp:408] loss -> loss\nI0818 15:07:03.513345 21584 layer_factory.hpp:77] Creating layer loss\nI0818 15:07:03.513468 21584 net.cpp:150] Setting up loss\nI0818 15:07:03.513481 21584 net.cpp:157] Top shape: (1)\nI0818 15:07:03.513486 21584 net.cpp:160]     with loss weight 1\nI0818 15:07:03.513502 21584 net.cpp:165] Memory required for data: 1450544508\nI0818 15:07:03.513509 21584 net.cpp:226] loss needs backward computation.\nI0818 15:07:03.513515 21584 net.cpp:228] accuracy does not need backward computation.\nI0818 15:07:03.513521 21584 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0818 15:07:03.513527 21584 net.cpp:226] post_FC needs backward computation.\nI0818 15:07:03.513532 21584 net.cpp:226] post_pool needs backward computation.\nI0818 15:07:03.513537 21584 net.cpp:226] L3_b9_relu needs backward computation.\nI0818 15:07:03.513541 21584 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0818 15:07:03.513547 21584 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0818 15:07:03.513552 21584 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0818 15:07:03.513557 21584 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0818 15:07:03.513562 21584 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0818 15:07:03.513567 21584 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0818 15:07:03.513572 21584 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0818 15:07:03.513577 21584 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0818 15:07:03.513582 21584 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0818 15:07:03.513587 21584 net.cpp:226] L3_b8_relu needs backward computation.\nI0818 15:07:03.513592 21584 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0818 15:07:03.513600 21584 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0818 15:07:03.513607 21584 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0818 15:07:03.513612 21584 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0818 15:07:03.513617 21584 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0818 15:07:03.513622 21584 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0818 15:07:03.513626 21584 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0818 15:07:03.513633 21584 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0818 15:07:03.513638 21584 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0818 15:07:03.513643 21584 net.cpp:226] L3_b7_relu needs backward computation.\nI0818 15:07:03.513648 21584 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0818 15:07:03.513660 21584 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0818 15:07:03.513665 21584 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0818 15:07:03.513671 21584 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0818 15:07:03.513676 21584 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0818 15:07:03.513681 21584 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0818 15:07:03.513686 21584 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0818 15:07:03.513691 21584 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0818 15:07:03.513697 21584 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0818 15:07:03.513702 21584 net.cpp:226] L3_b6_relu needs backward computation.\nI0818 15:07:03.513707 21584 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0818 15:07:03.513713 21584 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0818 15:07:03.513718 21584 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0818 15:07:03.513731 21584 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0818 15:07:03.513736 21584 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0818 15:07:03.513741 21584 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0818 15:07:03.513746 21584 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0818 15:07:03.513751 21584 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0818 15:07:03.513757 21584 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0818 15:07:03.513762 21584 net.cpp:226] L3_b5_relu needs backward computation.\nI0818 15:07:03.513767 21584 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0818 15:07:03.513773 21584 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0818 15:07:03.513778 21584 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0818 15:07:03.513783 21584 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0818 15:07:03.513789 21584 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0818 15:07:03.513794 21584 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0818 15:07:03.513799 21584 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0818 15:07:03.513804 21584 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0818 15:07:03.513809 21584 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0818 15:07:03.513815 21584 net.cpp:226] L3_b4_relu needs backward computation.\nI0818 15:07:03.513820 21584 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0818 15:07:03.513826 21584 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0818 15:07:03.513831 21584 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0818 15:07:03.513836 21584 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0818 15:07:03.513842 21584 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0818 15:07:03.513847 21584 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0818 15:07:03.513852 21584 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0818 15:07:03.513857 21584 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0818 15:07:03.513862 21584 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0818 15:07:03.513869 21584 net.cpp:226] L3_b3_relu needs backward computation.\nI0818 15:07:03.513873 21584 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0818 15:07:03.513878 21584 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0818 15:07:03.513885 21584 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0818 15:07:03.513890 21584 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0818 15:07:03.513898 21584 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0818 15:07:03.513903 21584 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0818 15:07:03.513909 21584 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0818 15:07:03.513914 21584 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0818 15:07:03.513921 21584 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0818 15:07:03.513926 21584 net.cpp:226] L3_b2_relu needs backward computation.\nI0818 15:07:03.513931 21584 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0818 15:07:03.513937 21584 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0818 15:07:03.513942 21584 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0818 15:07:03.513947 21584 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0818 15:07:03.513953 21584 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0818 15:07:03.513958 21584 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0818 15:07:03.513963 21584 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0818 15:07:03.513968 21584 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0818 15:07:03.513974 21584 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0818 15:07:03.513979 21584 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0818 15:07:03.513991 21584 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0818 15:07:03.513996 21584 net.cpp:226] L3_b1_relu needs backward computation.\nI0818 15:07:03.514003 21584 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0818 15:07:03.514008 21584 net.cpp:226] L3_b1_pool needs backward computation.\nI0818 15:07:03.514014 21584 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0818 15:07:03.514019 21584 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0818 15:07:03.514024 21584 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0818 15:07:03.514030 21584 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0818 15:07:03.514035 21584 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0818 15:07:03.514040 21584 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0818 15:07:03.514046 21584 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0818 15:07:03.514051 21584 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0818 15:07:03.514057 21584 net.cpp:226] L2_b9_relu needs backward computation.\nI0818 15:07:03.514062 21584 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0818 15:07:03.514068 21584 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0818 15:07:03.514073 21584 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0818 15:07:03.514080 21584 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0818 15:07:03.514084 21584 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0818 15:07:03.514091 21584 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0818 15:07:03.514096 21584 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0818 15:07:03.514101 21584 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0818 15:07:03.514106 21584 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0818 15:07:03.514111 21584 net.cpp:226] L2_b8_relu needs backward computation.\nI0818 15:07:03.514117 21584 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0818 15:07:03.514123 21584 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0818 15:07:03.514128 21584 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0818 15:07:03.514134 21584 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0818 15:07:03.514139 21584 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0818 15:07:03.514144 21584 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0818 15:07:03.514150 21584 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0818 15:07:03.514155 21584 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0818 15:07:03.514161 21584 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0818 15:07:03.514166 21584 net.cpp:226] L2_b7_relu needs backward computation.\nI0818 15:07:03.514173 21584 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0818 15:07:03.514178 21584 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0818 15:07:03.514183 21584 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0818 15:07:03.514189 21584 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0818 15:07:03.514194 21584 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0818 15:07:03.514199 21584 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0818 15:07:03.514204 21584 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0818 15:07:03.514210 21584 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0818 15:07:03.514216 21584 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0818 15:07:03.514221 21584 net.cpp:226] L2_b6_relu needs backward computation.\nI0818 15:07:03.514226 21584 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0818 15:07:03.514233 21584 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0818 15:07:03.514238 21584 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0818 15:07:03.514245 21584 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0818 15:07:03.514250 21584 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0818 15:07:03.514263 21584 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0818 15:07:03.514268 21584 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0818 15:07:03.514274 21584 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0818 15:07:03.514284 21584 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0818 15:07:03.514291 21584 net.cpp:226] L2_b5_relu needs backward computation.\nI0818 15:07:03.514297 21584 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0818 15:07:03.514302 21584 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0818 15:07:03.514307 21584 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0818 15:07:03.514313 21584 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0818 15:07:03.514319 21584 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0818 15:07:03.514324 21584 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0818 15:07:03.514329 21584 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0818 15:07:03.514335 21584 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0818 15:07:03.514340 21584 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0818 15:07:03.514346 21584 net.cpp:226] L2_b4_relu needs backward computation.\nI0818 15:07:03.514351 21584 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0818 15:07:03.514358 21584 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0818 15:07:03.514364 21584 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0818 15:07:03.514369 21584 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0818 15:07:03.514375 21584 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0818 15:07:03.514380 21584 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0818 15:07:03.514385 21584 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0818 15:07:03.514391 21584 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0818 15:07:03.514397 21584 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0818 15:07:03.514403 21584 net.cpp:226] L2_b3_relu needs backward computation.\nI0818 15:07:03.514408 21584 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0818 15:07:03.514415 21584 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0818 15:07:03.514420 21584 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0818 15:07:03.514426 21584 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0818 15:07:03.514431 21584 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0818 15:07:03.514437 21584 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0818 15:07:03.514442 21584 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0818 15:07:03.514448 21584 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0818 15:07:03.514454 21584 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0818 15:07:03.514461 21584 net.cpp:226] L2_b2_relu needs backward computation.\nI0818 15:07:03.514466 21584 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0818 15:07:03.514472 21584 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0818 15:07:03.514477 21584 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0818 15:07:03.514483 21584 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0818 15:07:03.514488 21584 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0818 15:07:03.514494 21584 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0818 15:07:03.514499 21584 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0818 15:07:03.514505 21584 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0818 15:07:03.514510 21584 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0818 15:07:03.514516 21584 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0818 15:07:03.514523 21584 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0818 15:07:03.514528 21584 net.cpp:226] L2_b1_relu needs backward computation.\nI0818 15:07:03.514539 21584 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0818 15:07:03.514545 21584 net.cpp:226] L2_b1_pool needs backward computation.\nI0818 15:07:03.514552 21584 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0818 15:07:03.514557 21584 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0818 15:07:03.514564 21584 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0818 15:07:03.514569 21584 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0818 15:07:03.514575 21584 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0818 15:07:03.514580 21584 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0818 15:07:03.514586 21584 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0818 15:07:03.514591 21584 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0818 15:07:03.514597 21584 net.cpp:226] L1_b9_relu needs backward computation.\nI0818 15:07:03.514603 21584 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0818 15:07:03.514609 21584 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0818 15:07:03.514614 21584 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0818 15:07:03.514621 21584 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0818 15:07:03.514626 21584 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0818 15:07:03.514631 21584 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0818 15:07:03.514636 21584 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0818 15:07:03.514642 21584 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0818 15:07:03.514648 21584 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0818 15:07:03.514660 21584 net.cpp:226] L1_b8_relu needs backward computation.\nI0818 15:07:03.514665 21584 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0818 15:07:03.514672 21584 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0818 15:07:03.514678 21584 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0818 15:07:03.514683 21584 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0818 15:07:03.514690 21584 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0818 15:07:03.514695 21584 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0818 15:07:03.514701 21584 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0818 15:07:03.514708 21584 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0818 15:07:03.514714 21584 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0818 15:07:03.514719 21584 net.cpp:226] L1_b7_relu needs backward computation.\nI0818 15:07:03.514724 21584 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0818 15:07:03.514731 21584 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0818 15:07:03.514736 21584 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0818 15:07:03.514742 21584 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0818 15:07:03.514749 21584 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0818 15:07:03.514755 21584 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0818 15:07:03.514760 21584 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0818 15:07:03.514765 21584 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0818 15:07:03.514771 21584 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0818 15:07:03.514777 21584 net.cpp:226] L1_b6_relu needs backward computation.\nI0818 15:07:03.514783 21584 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0818 15:07:03.514791 21584 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0818 15:07:03.514796 21584 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0818 15:07:03.514801 21584 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0818 15:07:03.514807 21584 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0818 15:07:03.514812 21584 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0818 15:07:03.514823 21584 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0818 15:07:03.514829 21584 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0818 15:07:03.514835 21584 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0818 15:07:03.514842 21584 net.cpp:226] L1_b5_relu needs backward computation.\nI0818 15:07:03.514847 21584 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0818 15:07:03.514853 21584 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0818 15:07:03.514859 21584 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0818 15:07:03.514865 21584 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0818 15:07:03.514871 21584 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0818 15:07:03.514876 21584 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0818 15:07:03.514883 21584 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0818 15:07:03.514888 21584 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0818 15:07:03.514894 21584 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0818 15:07:03.514899 21584 net.cpp:226] L1_b4_relu needs backward computation.\nI0818 15:07:03.514905 21584 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0818 15:07:03.514911 21584 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0818 15:07:03.514917 21584 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0818 15:07:03.514924 21584 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0818 15:07:03.514928 21584 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0818 15:07:03.514935 21584 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0818 15:07:03.514940 21584 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0818 15:07:03.514946 21584 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0818 15:07:03.514952 21584 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0818 15:07:03.514957 21584 net.cpp:226] L1_b3_relu needs backward computation.\nI0818 15:07:03.514963 21584 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0818 15:07:03.514969 21584 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0818 15:07:03.514976 21584 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0818 15:07:03.514984 21584 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0818 15:07:03.514991 21584 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0818 15:07:03.514997 21584 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0818 15:07:03.515002 21584 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0818 15:07:03.515008 21584 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0818 15:07:03.515014 21584 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0818 15:07:03.515020 21584 net.cpp:226] L1_b2_relu needs backward computation.\nI0818 15:07:03.515025 21584 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0818 15:07:03.515033 21584 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0818 15:07:03.515038 21584 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0818 15:07:03.515044 21584 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0818 15:07:03.515050 21584 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0818 15:07:03.515055 21584 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0818 15:07:03.515061 21584 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0818 15:07:03.515067 21584 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0818 15:07:03.515074 21584 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0818 15:07:03.515079 21584 net.cpp:226] L1_b1_relu needs backward computation.\nI0818 15:07:03.515085 21584 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0818 15:07:03.515092 21584 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0818 15:07:03.515099 21584 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0818 15:07:03.515110 21584 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0818 15:07:03.515116 21584 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0818 15:07:03.515122 21584 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0818 15:07:03.515128 21584 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0818 15:07:03.515135 21584 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0818 15:07:03.515141 21584 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0818 15:07:03.515146 21584 net.cpp:226] pre_relu needs backward computation.\nI0818 15:07:03.515151 21584 net.cpp:226] pre_scale needs backward computation.\nI0818 15:07:03.515157 21584 net.cpp:226] pre_bn needs backward computation.\nI0818 15:07:03.515163 21584 net.cpp:226] pre_conv needs backward computation.\nI0818 15:07:03.515169 21584 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0818 15:07:03.515177 21584 net.cpp:228] dataLayer does not need backward computation.\nI0818 15:07:03.515180 21584 net.cpp:270] This network produces output accuracy\nI0818 15:07:03.515187 21584 net.cpp:270] This network produces output loss\nI0818 15:07:03.515514 21584 net.cpp:283] Network initialization done.\nI0818 15:07:03.516531 21584 solver.cpp:60] Solver scaffolding done.\nI0818 15:07:03.741080 21584 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0818 15:07:04.096664 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:04.096720 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:04.104112 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:04.332435 21584 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:04.332521 21584 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 15:07:04.367161 21584 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:04.367245 21584 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 15:07:04.807153 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:04.807209 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:04.815048 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:05.073428 21584 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:05.073534 21584 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 15:07:05.124794 21584 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:05.124899 21584 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 15:07:05.634459 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:05.634536 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:05.643054 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:05.904865 21584 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:05.905038 21584 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 15:07:05.975162 21584 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:05.975317 21584 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 15:07:06.058334 21584 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0818 15:07:06.546103 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:06.546172 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:06.556182 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:06.851933 21584 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:06.852128 21584 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 15:07:06.942791 21584 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:06.942977 21584 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 15:07:07.596145 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:07.596199 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:07.606314 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:07.915768 21584 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:07.915946 21584 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 15:07:08.028221 21584 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:08.028398 21584 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 15:07:08.736816 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:08.736868 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:08.748502 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:09.085996 21584 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:09.086201 21584 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 15:07:09.217099 21584 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:09.217299 21584 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 15:07:09.991261 21584 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0818 15:07:09.991315 21584 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0818 15:07:10.003442 21584 data_layer.cpp:41] output data size: 125,3,32,32\nI0818 15:07:10.109155 21595 blocking_queue.cpp:50] Waiting for data\nI0818 15:07:10.218726 21598 blocking_queue.cpp:50] Waiting for data\nI0818 15:07:10.460918 21584 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0818 15:07:10.461153 21584 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0818 15:07:10.611805 21584 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0818 15:07:10.612036 21584 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0818 15:07:10.781106 21584 parallel.cpp:425] Starting Optimization\nI0818 15:07:10.782795 21584 solver.cpp:279] Solving Cifar-Resnet\nI0818 15:07:10.782810 21584 solver.cpp:280] Learning Rate Policy: triangular\nI0818 15:07:10.787302 21584 solver.cpp:337] Iteration 0, Testing net (#0)\nI0818 15:08:33.416121 21584 solver.cpp:404]     Test net output #0: accuracy = 0.1124\nI0818 15:08:33.416393 21584 solver.cpp:404]     Test net output #1: loss = 3.411 (* 1 = 3.411 loss)\nI0818 15:08:37.526449 21584 solver.cpp:228] Iteration 0, loss = 3.57319\nI0818 15:08:37.526490 21584 solver.cpp:244]     Train net output #0: accuracy = 0.128\nI0818 15:08:37.526506 21584 solver.cpp:244]     Train net output #1: loss = 3.57319 (* 1 = 3.57319 loss)\nI0818 15:08:37.542634 21584 sgd_solver.cpp:166] Iteration 0, lr = 0\nI0818 15:10:56.288283 21584 solver.cpp:337] Iteration 100, Testing net (#0)\nI0818 15:12:18.757721 21584 solver.cpp:404]     Test net output #0: accuracy = 0.3126\nI0818 15:12:18.757992 21584 solver.cpp:404]     Test net output #1: loss = 1.87979 (* 1 = 1.87979 loss)\nI0818 15:12:20.056885 21584 solver.cpp:228] Iteration 100, loss = 1.83387\nI0818 15:12:20.056927 21584 solver.cpp:244]     Train net output #0: accuracy = 0.392\nI0818 15:12:20.056944 21584 solver.cpp:244]     Train net output #1: loss = 1.83387 (* 1 = 1.83387 loss)\nI0818 15:12:20.175618 21584 sgd_solver.cpp:166] Iteration 100, lr = 0.00250006\nI0818 15:14:38.751132 21584 solver.cpp:337] Iteration 200, Testing net (#0)\nI0818 15:16:01.230435 21584 solver.cpp:404]     Test net output #0: accuracy = 0.447\nI0818 15:16:01.230700 21584 solver.cpp:404]     Test net output #1: loss = 1.49258 (* 1 = 1.49258 loss)\nI0818 15:16:02.529366 21584 solver.cpp:228] Iteration 200, loss = 1.494\nI0818 15:16:02.529407 21584 solver.cpp:244]     Train net output #0: accuracy = 0.456\nI0818 15:16:02.529422 21584 solver.cpp:244]     Train net output #1: loss = 1.494 (* 1 = 1.494 loss)\nI0818 15:16:02.646540 21584 sgd_solver.cpp:166] Iteration 200, lr = 0.005\nI0818 15:18:21.261046 21584 solver.cpp:337] Iteration 300, Testing net (#0)\nI0818 15:19:43.697605 21584 solver.cpp:404]     Test net output #0: accuracy = 0.52396\nI0818 15:19:43.697867 21584 solver.cpp:404]     Test net output #1: loss = 1.30937 (* 1 = 1.30937 loss)\nI0818 15:19:44.995370 21584 solver.cpp:228] Iteration 300, loss = 1.23586\nI0818 15:19:44.995412 21584 solver.cpp:244]     Train net output #0: accuracy = 0.56\nI0818 15:19:44.995429 21584 solver.cpp:244]     Train net output #1: loss = 1.23586 (* 1 = 1.23586 loss)\nI0818 15:19:45.112579 21584 sgd_solver.cpp:166] Iteration 300, lr = 0.00750005\nI0818 15:22:03.819885 21584 solver.cpp:337] Iteration 400, Testing net (#0)\nI0818 15:23:26.256139 21584 solver.cpp:404]     Test net output #0: accuracy = 0.58668\nI0818 15:23:26.256376 21584 solver.cpp:404]     Test net output #1: loss = 1.16209 (* 1 = 1.16209 loss)\nI0818 15:23:27.554477 21584 solver.cpp:228] Iteration 400, loss = 1.10759\nI0818 15:23:27.554519 21584 solver.cpp:244]     Train net output #0: accuracy = 0.608\nI0818 15:23:27.554535 21584 solver.cpp:244]     Train net output #1: loss = 1.10759 (* 1 = 1.10759 loss)\nI0818 15:23:27.672847 21584 sgd_solver.cpp:166] Iteration 400, lr = 0.00999999\nI0818 15:25:46.251298 21584 solver.cpp:337] Iteration 500, Testing net (#0)\nI0818 15:27:08.686261 21584 solver.cpp:404]     Test net output #0: accuracy = 0.62444\nI0818 15:27:08.686522 21584 solver.cpp:404]     Test net output #1: loss = 1.04635 (* 1 = 1.04635 loss)\nI0818 15:27:09.984372 21584 solver.cpp:228] Iteration 500, loss = 0.956186\nI0818 15:27:09.984413 21584 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0818 15:27:09.984429 21584 solver.cpp:244]     Train net output #1: loss = 0.956186 (* 1 = 0.956186 loss)\nI0818 15:27:10.102859 21584 sgd_solver.cpp:166] Iteration 500, lr = 0.0125\nI0818 15:29:28.734323 21584 solver.cpp:337] Iteration 600, Testing net (#0)\nI0818 15:30:51.167289 21584 solver.cpp:404]     Test net output #0: accuracy = 0.65328\nI0818 15:30:51.167537 21584 solver.cpp:404]     Test net output #1: loss = 0.969793 (* 1 = 0.969793 loss)\nI0818 15:30:52.465589 21584 solver.cpp:228] Iteration 600, loss = 0.898877\nI0818 15:30:52.465629 21584 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0818 15:30:52.465646 21584 solver.cpp:244]     Train net output #1: loss = 0.898877 (* 1 = 0.898877 loss)\nI0818 15:30:52.580684 21584 sgd_solver.cpp:166] Iteration 600, lr = 0.015\nI0818 15:33:11.240197 21584 solver.cpp:337] Iteration 700, Testing net (#0)\nI0818 15:34:33.668119 21584 solver.cpp:404]     Test net output #0: accuracy = 0.68632\nI0818 15:34:33.668375 21584 solver.cpp:404]     Test net output #1: loss = 0.883919 (* 1 = 0.883919 loss)\nI0818 15:34:34.966825 21584 solver.cpp:228] Iteration 700, loss = 0.816914\nI0818 15:34:34.966867 21584 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 15:34:34.966884 21584 solver.cpp:244]     Train net output #1: loss = 0.816914 (* 1 = 0.816914 loss)\nI0818 15:34:35.086513 21584 sgd_solver.cpp:166] Iteration 700, lr = 0.0175\nI0818 15:36:53.739056 21584 solver.cpp:337] Iteration 800, Testing net (#0)\nI0818 15:38:16.175954 21584 solver.cpp:404]     Test net output #0: accuracy = 0.7114\nI0818 15:38:16.176232 21584 solver.cpp:404]     Test net output #1: loss = 0.837101 (* 1 = 0.837101 loss)\nI0818 15:38:17.474216 21584 solver.cpp:228] Iteration 800, loss = 0.726399\nI0818 15:38:17.474259 21584 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0818 15:38:17.474274 21584 solver.cpp:244]     Train net output #1: loss = 0.726399 (* 1 = 0.726399 loss)\nI0818 15:38:17.590091 21584 sgd_solver.cpp:166] Iteration 800, lr = 0.02\nI0818 15:40:36.186192 21584 solver.cpp:337] Iteration 900, Testing net (#0)\nI0818 15:41:58.626404 21584 solver.cpp:404]     Test net output #0: accuracy = 0.72304\nI0818 15:41:58.626667 21584 solver.cpp:404]     Test net output #1: loss = 0.796538 (* 1 = 0.796538 loss)\nI0818 15:41:59.925673 21584 solver.cpp:228] Iteration 900, loss = 0.653013\nI0818 15:41:59.925716 21584 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0818 15:41:59.925736 21584 solver.cpp:244]     Train net output #1: loss = 0.653013 (* 1 = 0.653013 loss)\nI0818 15:42:00.046581 21584 sgd_solver.cpp:166] Iteration 900, lr = 0.0225\nI0818 15:44:18.692332 21584 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0818 15:45:41.160795 21584 solver.cpp:404]     Test net output #0: accuracy = 0.73772\nI0818 15:45:41.161062 21584 solver.cpp:404]     Test net output #1: loss = 0.775093 (* 1 = 0.775093 loss)\nI0818 15:45:42.459347 21584 solver.cpp:228] Iteration 1000, loss = 0.599237\nI0818 15:45:42.459388 21584 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0818 15:45:42.459403 21584 solver.cpp:244]     Train net output #1: loss = 0.599237 (* 1 = 0.599237 loss)\nI0818 15:45:42.575861 21584 sgd_solver.cpp:166] Iteration 1000, lr = 0.025\nI0818 15:48:01.135262 21584 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0818 15:49:23.605211 21584 solver.cpp:404]     Test net output #0: accuracy = 0.74756\nI0818 15:49:23.605479 21584 solver.cpp:404]     Test net output #1: loss = 0.75818 (* 1 = 0.75818 loss)\nI0818 15:49:24.903815 21584 solver.cpp:228] Iteration 1100, loss = 0.544499\nI0818 15:49:24.903857 21584 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0818 15:49:24.903873 21584 solver.cpp:244]     Train net output #1: loss = 0.544499 (* 1 = 0.544499 loss)\nI0818 15:49:25.018069 21584 sgd_solver.cpp:166] Iteration 1100, lr = 0.0275\nI0818 15:51:43.665261 21584 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0818 15:53:06.140550 21584 solver.cpp:404]     Test net output #0: accuracy = 0.76184\nI0818 15:53:06.140817 21584 solver.cpp:404]     Test net output #1: loss = 0.715131 (* 1 = 0.715131 loss)\nI0818 15:53:07.440201 21584 solver.cpp:228] Iteration 1200, loss = 0.445277\nI0818 15:53:07.440245 21584 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0818 15:53:07.440261 21584 solver.cpp:244]     Train net output #1: loss = 0.445277 (* 1 = 0.445277 loss)\nI0818 15:53:07.558720 21584 sgd_solver.cpp:166] Iteration 1200, lr = 0.03\nI0818 15:55:26.256531 21584 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0818 15:56:48.746098 21584 solver.cpp:404]     Test net output #0: accuracy = 0.76924\nI0818 15:56:48.746359 21584 solver.cpp:404]     Test net output #1: loss = 0.697534 (* 1 = 0.697534 loss)\nI0818 15:56:50.045670 21584 solver.cpp:228] Iteration 1300, loss = 0.419985\nI0818 15:56:50.045712 21584 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0818 15:56:50.045728 21584 solver.cpp:244]     Train net output #1: loss = 0.419985 (* 1 = 0.419985 loss)\nI0818 15:56:50.163439 21584 sgd_solver.cpp:166] Iteration 1300, lr = 0.0325\nI0818 15:59:08.719072 21584 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0818 16:00:31.212085 21584 solver.cpp:404]     Test net output #0: accuracy = 0.7702\nI0818 16:00:31.212306 21584 solver.cpp:404]     Test net output #1: loss = 0.710896 (* 1 = 0.710896 loss)\nI0818 16:00:32.511940 21584 solver.cpp:228] Iteration 1400, loss = 0.435386\nI0818 16:00:32.511984 21584 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI0818 16:00:32.511999 21584 solver.cpp:244]     Train net output #1: loss = 0.435386 (* 1 = 0.435386 loss)\nI0818 16:00:32.628244 21584 sgd_solver.cpp:166] Iteration 1400, lr = 0.035\nI0818 16:02:51.122042 21584 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0818 16:04:13.612262 21584 solver.cpp:404]     Test net output #0: accuracy = 0.76832\nI0818 16:04:13.612532 21584 solver.cpp:404]     Test net output #1: loss = 0.72503 (* 1 = 0.72503 loss)\nI0818 16:04:14.912358 21584 solver.cpp:228] Iteration 1500, loss = 0.288446\nI0818 16:04:14.912403 21584 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 16:04:14.912420 21584 solver.cpp:244]     Train net output #1: loss = 0.288446 (* 1 = 0.288446 loss)\nI0818 16:04:15.032567 21584 sgd_solver.cpp:166] Iteration 1500, lr = 0.0375\nI0818 16:06:33.706290 21584 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0818 16:07:56.200011 21584 solver.cpp:404]     Test net output #0: accuracy = 0.76656\nI0818 16:07:56.200271 21584 solver.cpp:404]     Test net output #1: loss = 0.754802 (* 1 = 0.754802 loss)\nI0818 16:07:57.500092 21584 solver.cpp:228] Iteration 1600, loss = 0.460264\nI0818 16:07:57.500136 21584 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0818 16:07:57.500154 21584 solver.cpp:244]     Train net output #1: loss = 0.460264 (* 1 = 0.460264 loss)\nI0818 16:07:57.619036 21584 sgd_solver.cpp:166] Iteration 1600, lr = 0.04\nI0818 16:10:16.325917 21584 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0818 16:11:38.818298 21584 solver.cpp:404]     Test net output #0: accuracy = 0.77588\nI0818 16:11:38.818536 21584 solver.cpp:404]     Test net output #1: loss = 0.719681 (* 1 = 0.719681 loss)\nI0818 16:11:40.118075 21584 solver.cpp:228] Iteration 1700, loss = 0.325724\nI0818 16:11:40.118119 21584 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0818 16:11:40.118135 21584 solver.cpp:244]     Train net output #1: loss = 0.325724 (* 1 = 0.325724 loss)\nI0818 16:11:40.241461 21584 sgd_solver.cpp:166] Iteration 1700, lr = 0.0425\nI0818 16:13:58.870705 21584 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0818 16:15:21.358530 21584 solver.cpp:404]     Test net output #0: accuracy = 0.7818\nI0818 16:15:21.358794 21584 solver.cpp:404]     Test net output #1: loss = 0.717021 (* 1 = 0.717021 loss)\nI0818 16:15:22.658416 21584 solver.cpp:228] Iteration 1800, loss = 0.340065\nI0818 16:15:22.658460 21584 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0818 16:15:22.658476 21584 solver.cpp:244]     Train net output #1: loss = 0.340065 (* 1 = 0.340065 loss)\nI0818 16:15:22.773370 21584 sgd_solver.cpp:166] Iteration 1800, lr = 0.045\nI0818 16:17:41.357029 21584 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0818 16:19:03.847257 21584 solver.cpp:404]     Test net output #0: accuracy = 0.78736\nI0818 16:19:03.847522 21584 solver.cpp:404]     Test net output #1: loss = 0.700058 (* 1 = 0.700058 loss)\nI0818 16:19:05.146670 21584 solver.cpp:228] Iteration 1900, loss = 0.293626\nI0818 16:19:05.146714 21584 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 16:19:05.146730 21584 solver.cpp:244]     Train net output #1: loss = 0.293626 (* 1 = 0.293626 loss)\nI0818 16:19:05.267765 21584 sgd_solver.cpp:166] Iteration 1900, lr = 0.0475\nI0818 16:21:23.896616 21584 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0818 16:22:46.274420 21584 solver.cpp:404]     Test net output #0: accuracy = 0.78248\nI0818 16:22:46.274686 21584 solver.cpp:404]     Test net output #1: loss = 0.723459 (* 1 = 0.723459 loss)\nI0818 16:22:47.574419 21584 solver.cpp:228] Iteration 2000, loss = 0.28447\nI0818 16:22:47.574463 21584 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 16:22:47.574479 21584 solver.cpp:244]     Train net output #1: loss = 0.28447 (* 1 = 0.28447 loss)\nI0818 16:22:47.694602 21584 sgd_solver.cpp:166] Iteration 2000, lr = 0.05\nI0818 16:25:06.274708 21584 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0818 16:26:28.608115 21584 solver.cpp:404]     Test net output #0: accuracy = 0.78936\nI0818 16:26:28.608381 21584 solver.cpp:404]     Test net output #1: loss = 0.712884 (* 1 = 0.712884 loss)\nI0818 16:26:29.907181 21584 solver.cpp:228] Iteration 2100, loss = 0.412839\nI0818 16:26:29.907224 21584 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0818 16:26:29.907241 21584 solver.cpp:244]     Train net output #1: loss = 0.412839 (* 1 = 0.412839 loss)\nI0818 16:26:30.022649 21584 sgd_solver.cpp:166] Iteration 2100, lr = 0.0525\nI0818 16:28:48.475064 21584 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0818 16:30:10.809202 21584 solver.cpp:404]     Test net output #0: accuracy = 0.79724\nI0818 16:30:10.809469 21584 solver.cpp:404]     Test net output #1: loss = 0.721425 (* 1 = 0.721425 loss)\nI0818 16:30:12.108363 21584 solver.cpp:228] Iteration 2200, loss = 0.270031\nI0818 16:30:12.108408 21584 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0818 16:30:12.108424 21584 solver.cpp:244]     Train net output #1: loss = 0.270031 (* 1 = 0.270031 loss)\nI0818 16:30:12.226832 21584 sgd_solver.cpp:166] Iteration 2200, lr = 0.0549999\nI0818 16:32:30.705642 21584 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0818 16:33:53.102393 21584 solver.cpp:404]     Test net output #0: accuracy = 0.79948\nI0818 16:33:53.102659 21584 solver.cpp:404]     Test net output #1: loss = 0.709935 (* 1 = 0.709935 loss)\nI0818 16:33:54.402395 21584 solver.cpp:228] Iteration 2300, loss = 0.269704\nI0818 16:33:54.402439 21584 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0818 16:33:54.402454 21584 solver.cpp:244]     Train net output #1: loss = 0.269704 (* 1 = 0.269704 loss)\nI0818 16:33:54.517946 21584 sgd_solver.cpp:166] Iteration 2300, lr = 0.0575\nI0818 16:36:12.878317 21584 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0818 16:37:35.230582 21584 solver.cpp:404]     Test net output #0: accuracy = 0.7984\nI0818 16:37:35.230849 21584 solver.cpp:404]     Test net output #1: loss = 0.7035 (* 1 = 0.7035 loss)\nI0818 16:37:36.529783 21584 solver.cpp:228] Iteration 2400, loss = 0.227552\nI0818 16:37:36.529825 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 16:37:36.529841 21584 solver.cpp:244]     Train net output #1: loss = 0.227552 (* 1 = 0.227552 loss)\nI0818 16:37:36.645109 21584 sgd_solver.cpp:166] Iteration 2400, lr = 0.0599999\nI0818 16:39:55.035853 21584 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0818 16:41:17.363384 21584 solver.cpp:404]     Test net output #0: accuracy = 0.79584\nI0818 16:41:17.363626 21584 solver.cpp:404]     Test net output #1: loss = 0.734577 (* 1 = 0.734577 loss)\nI0818 16:41:18.662940 21584 solver.cpp:228] Iteration 2500, loss = 0.25479\nI0818 16:41:18.662984 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0818 16:41:18.663000 21584 solver.cpp:244]     Train net output #1: loss = 0.25479 (* 1 = 0.25479 loss)\nI0818 16:41:18.778955 21584 sgd_solver.cpp:166] Iteration 2500, lr = 0.0625\nI0818 16:43:37.125778 21584 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0818 16:44:59.448559 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8026\nI0818 16:44:59.448819 21584 solver.cpp:404]     Test net output #1: loss = 0.710724 (* 1 = 0.710724 loss)\nI0818 16:45:00.747978 21584 solver.cpp:228] Iteration 2600, loss = 0.160092\nI0818 16:45:00.748023 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 16:45:00.748039 21584 solver.cpp:244]     Train net output #1: loss = 0.160092 (* 1 = 0.160092 loss)\nI0818 16:45:00.860644 21584 sgd_solver.cpp:166] Iteration 2600, lr = 0.0650001\nI0818 16:47:19.236721 21584 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0818 16:48:41.553694 21584 solver.cpp:404]     Test net output #0: accuracy = 0.80676\nI0818 16:48:41.553968 21584 solver.cpp:404]     Test net output #1: loss = 0.698588 (* 1 = 0.698588 loss)\nI0818 16:48:42.853384 21584 solver.cpp:228] Iteration 2700, loss = 0.137146\nI0818 16:48:42.853430 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 16:48:42.853446 21584 solver.cpp:244]     Train net output #1: loss = 0.137146 (* 1 = 0.137146 loss)\nI0818 16:48:42.966440 21584 sgd_solver.cpp:166] Iteration 2700, lr = 0.0675\nI0818 16:51:01.362465 21584 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0818 16:52:23.740993 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8028\nI0818 16:52:23.741271 21584 solver.cpp:404]     Test net output #1: loss = 0.770981 (* 1 = 0.770981 loss)\nI0818 16:52:25.040765 21584 solver.cpp:228] Iteration 2800, loss = 0.184618\nI0818 16:52:25.040809 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:52:25.040827 21584 solver.cpp:244]     Train net output #1: loss = 0.184618 (* 1 = 0.184618 loss)\nI0818 16:52:25.158506 21584 sgd_solver.cpp:166] Iteration 2800, lr = 0.0700001\nI0818 16:54:43.565009 21584 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0818 16:56:06.050078 21584 solver.cpp:404]     Test net output #0: accuracy = 0.80092\nI0818 16:56:06.050339 21584 solver.cpp:404]     Test net output #1: loss = 0.757122 (* 1 = 0.757122 loss)\nI0818 16:56:07.350222 21584 solver.cpp:228] Iteration 2900, loss = 0.150857\nI0818 16:56:07.350268 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 16:56:07.350286 21584 solver.cpp:244]     Train net output #1: loss = 0.150857 (* 1 = 0.150857 loss)\nI0818 16:56:07.464170 21584 sgd_solver.cpp:166] Iteration 2900, lr = 0.0725\nI0818 16:58:25.835611 21584 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0818 16:59:48.326510 21584 solver.cpp:404]     Test net output #0: accuracy = 0.80408\nI0818 16:59:48.326781 21584 solver.cpp:404]     Test net output #1: loss = 0.769581 (* 1 = 0.769581 loss)\nI0818 16:59:49.626402 21584 solver.cpp:228] Iteration 3000, loss = 0.1236\nI0818 16:59:49.626447 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 16:59:49.626463 21584 solver.cpp:244]     Train net output #1: loss = 0.1236 (* 1 = 0.1236 loss)\nI0818 16:59:49.742190 21584 sgd_solver.cpp:166] Iteration 3000, lr = 0.075\nI0818 17:02:08.087594 21584 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0818 17:03:30.577201 21584 solver.cpp:404]     Test net output #0: accuracy = 0.81084\nI0818 17:03:30.577471 21584 solver.cpp:404]     Test net output #1: loss = 0.733885 (* 1 = 0.733885 loss)\nI0818 17:03:31.875766 21584 solver.cpp:228] Iteration 3100, loss = 0.103745\nI0818 17:03:31.875808 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 17:03:31.875823 21584 solver.cpp:244]     Train net output #1: loss = 0.103745 (* 1 = 0.103745 loss)\nI0818 17:03:31.995492 21584 sgd_solver.cpp:166] Iteration 3100, lr = 0.0775\nI0818 17:05:50.516007 21584 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0818 17:07:13.005682 21584 solver.cpp:404]     Test net output #0: accuracy = 0.80544\nI0818 17:07:13.005944 21584 solver.cpp:404]     Test net output #1: loss = 0.75514 (* 1 = 0.75514 loss)\nI0818 17:07:14.305711 21584 solver.cpp:228] Iteration 3200, loss = 0.117632\nI0818 17:07:14.305753 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:07:14.305776 21584 solver.cpp:244]     Train net output #1: loss = 0.117632 (* 1 = 0.117632 loss)\nI0818 17:07:14.423301 21584 sgd_solver.cpp:166] Iteration 3200, lr = 0.08\nI0818 17:09:32.928424 21584 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0818 17:10:55.370714 21584 solver.cpp:404]     Test net output #0: accuracy = 0.80912\nI0818 17:10:55.370991 21584 solver.cpp:404]     Test net output #1: loss = 0.752635 (* 1 = 0.752635 loss)\nI0818 17:10:56.669533 21584 solver.cpp:228] Iteration 3300, loss = 0.16071\nI0818 17:10:56.669574 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0818 17:10:56.669596 21584 solver.cpp:244]     Train net output #1: loss = 0.16071 (* 1 = 0.16071 loss)\nI0818 17:10:56.790324 21584 sgd_solver.cpp:166] Iteration 3300, lr = 0.0825\nI0818 17:13:15.232584 21584 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0818 17:14:37.681105 21584 solver.cpp:404]     Test net output #0: accuracy = 0.81412\nI0818 17:14:37.681365 21584 solver.cpp:404]     Test net output #1: loss = 0.753688 (* 1 = 0.753688 loss)\nI0818 17:14:38.980564 21584 solver.cpp:228] Iteration 3400, loss = 0.0943332\nI0818 17:14:38.980608 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 17:14:38.980624 21584 solver.cpp:244]     Train net output #1: loss = 0.0943332 (* 1 = 0.0943332 loss)\nI0818 17:14:39.097111 21584 sgd_solver.cpp:166] Iteration 3400, lr = 0.085\nI0818 17:16:57.435899 21584 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0818 17:18:19.861058 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8164\nI0818 17:18:19.861315 21584 solver.cpp:404]     Test net output #1: loss = 0.743944 (* 1 = 0.743944 loss)\nI0818 17:18:21.160650 21584 solver.cpp:228] Iteration 3500, loss = 0.153098\nI0818 17:18:21.160698 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 17:18:21.160715 21584 solver.cpp:244]     Train net output #1: loss = 0.153098 (* 1 = 0.153098 loss)\nI0818 17:18:21.273422 21584 sgd_solver.cpp:166] Iteration 3500, lr = 0.0875\nI0818 17:20:39.625664 21584 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0818 17:22:02.049160 21584 solver.cpp:404]     Test net output #0: accuracy = 0.81908\nI0818 17:22:02.049367 21584 solver.cpp:404]     Test net output #1: loss = 0.739849 (* 1 = 0.739849 loss)\nI0818 17:22:03.348093 21584 solver.cpp:228] Iteration 3600, loss = 0.0945833\nI0818 17:22:03.348137 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:22:03.348155 21584 solver.cpp:244]     Train net output #1: loss = 0.0945833 (* 1 = 0.0945833 loss)\nI0818 17:22:03.464808 21584 sgd_solver.cpp:166] Iteration 3600, lr = 0.09\nI0818 17:24:21.874465 21584 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0818 17:25:44.336061 21584 solver.cpp:404]     Test net output #0: accuracy = 0.81828\nI0818 17:25:44.336325 21584 solver.cpp:404]     Test net output #1: loss = 0.764748 (* 1 = 0.764748 loss)\nI0818 17:25:45.635090 21584 solver.cpp:228] Iteration 3700, loss = 0.116978\nI0818 17:25:45.635136 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 17:25:45.635154 21584 solver.cpp:244]     Train net output #1: loss = 0.116978 (* 1 = 0.116978 loss)\nI0818 17:25:45.752812 21584 sgd_solver.cpp:166] Iteration 3700, lr = 0.0925\nI0818 17:28:04.249799 21584 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0818 17:29:26.731693 21584 solver.cpp:404]     Test net output #0: accuracy = 0.81276\nI0818 17:29:26.731982 21584 solver.cpp:404]     Test net output #1: loss = 0.771249 (* 1 = 0.771249 loss)\nI0818 17:29:28.031734 21584 solver.cpp:228] Iteration 3800, loss = 0.155994\nI0818 17:29:28.031780 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 17:29:28.031795 21584 solver.cpp:244]     Train net output #1: loss = 0.155994 (* 1 = 0.155994 loss)\nI0818 17:29:28.149185 21584 sgd_solver.cpp:166] Iteration 3800, lr = 0.095\nI0818 17:31:46.484943 21584 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0818 17:33:08.960014 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8238\nI0818 17:33:08.960279 21584 solver.cpp:404]     Test net output #1: loss = 0.742504 (* 1 = 0.742504 loss)\nI0818 17:33:10.259162 21584 solver.cpp:228] Iteration 3900, loss = 0.083383\nI0818 17:33:10.259209 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 17:33:10.259227 21584 solver.cpp:244]     Train net output #1: loss = 0.083383 (* 1 = 0.083383 loss)\nI0818 17:33:10.379310 21584 sgd_solver.cpp:166] Iteration 3900, lr = 0.0975\nI0818 17:35:28.794411 21584 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0818 17:36:51.282979 21584 solver.cpp:404]     Test net output #0: accuracy = 0.82604\nI0818 17:36:51.283246 21584 solver.cpp:404]     Test net output #1: loss = 0.743303 (* 1 = 0.743303 loss)\nI0818 17:36:52.581605 21584 solver.cpp:228] Iteration 4000, loss = 0.0950147\nI0818 17:36:52.581652 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:36:52.581668 21584 solver.cpp:244]     Train net output #1: loss = 0.0950146 (* 1 = 0.0950146 loss)\nI0818 17:36:52.699327 21584 sgd_solver.cpp:166] Iteration 4000, lr = 0.1\nI0818 17:39:11.034117 21584 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0818 17:40:33.515941 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8286\nI0818 17:40:33.516216 21584 solver.cpp:404]     Test net output #1: loss = 0.738944 (* 1 = 0.738944 loss)\nI0818 17:40:34.815021 21584 solver.cpp:228] Iteration 4100, loss = 0.0616137\nI0818 17:40:34.815065 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:40:34.815083 21584 solver.cpp:244]     Train net output #1: loss = 0.0616137 (* 1 = 0.0616137 loss)\nI0818 17:40:34.932960 21584 sgd_solver.cpp:166] Iteration 4100, lr = 0.1025\nI0818 17:42:53.286351 21584 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0818 17:44:15.775568 21584 solver.cpp:404]     Test net output #0: accuracy = 0.82764\nI0818 17:44:15.775835 21584 solver.cpp:404]     Test net output #1: loss = 0.767377 (* 1 = 0.767377 loss)\nI0818 17:44:17.075850 21584 solver.cpp:228] Iteration 4200, loss = 0.0646866\nI0818 17:44:17.075896 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 17:44:17.075912 21584 solver.cpp:244]     Train net output #1: loss = 0.0646866 (* 1 = 0.0646866 loss)\nI0818 17:44:17.191504 21584 sgd_solver.cpp:166] Iteration 4200, lr = 0.105\nI0818 17:46:35.513115 21584 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0818 17:47:58.006804 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83312\nI0818 17:47:58.007071 21584 solver.cpp:404]     Test net output #1: loss = 0.729697 (* 1 = 0.729697 loss)\nI0818 17:47:59.305681 21584 solver.cpp:228] Iteration 4300, loss = 0.0886565\nI0818 17:47:59.305725 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:47:59.305747 21584 solver.cpp:244]     Train net output #1: loss = 0.0886565 (* 1 = 0.0886565 loss)\nI0818 17:47:59.425812 21584 sgd_solver.cpp:166] Iteration 4300, lr = 0.1075\nI0818 17:50:17.891926 21584 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0818 17:51:40.385357 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8276\nI0818 17:51:40.385628 21584 solver.cpp:404]     Test net output #1: loss = 0.760779 (* 1 = 0.760779 loss)\nI0818 17:51:41.684389 21584 solver.cpp:228] Iteration 4400, loss = 0.0678891\nI0818 17:51:41.684435 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 17:51:41.684451 21584 solver.cpp:244]     Train net output #1: loss = 0.0678891 (* 1 = 0.0678891 loss)\nI0818 17:51:41.798513 21584 sgd_solver.cpp:166] Iteration 4400, lr = 0.11\nI0818 17:54:00.160385 21584 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0818 17:55:22.634259 21584 solver.cpp:404]     Test net output #0: accuracy = 0.82752\nI0818 17:55:22.634526 21584 solver.cpp:404]     Test net output #1: loss = 0.731923 (* 1 = 0.731923 loss)\nI0818 17:55:23.933207 21584 solver.cpp:228] Iteration 4500, loss = 0.0399576\nI0818 17:55:23.933251 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 17:55:23.933267 21584 solver.cpp:244]     Train net output #1: loss = 0.0399576 (* 1 = 0.0399576 loss)\nI0818 17:55:24.046314 21584 sgd_solver.cpp:166] Iteration 4500, lr = 0.1125\nI0818 17:57:42.472591 21584 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0818 17:59:04.956428 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8224\nI0818 17:59:04.956691 21584 solver.cpp:404]     Test net output #1: loss = 0.787387 (* 1 = 0.787387 loss)\nI0818 17:59:06.255198 21584 solver.cpp:228] Iteration 4600, loss = 0.0450902\nI0818 17:59:06.255241 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 17:59:06.255257 21584 solver.cpp:244]     Train net output #1: loss = 0.0450901 (* 1 = 0.0450901 loss)\nI0818 17:59:06.374434 21584 sgd_solver.cpp:166] Iteration 4600, lr = 0.115\nI0818 18:01:24.755584 21584 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0818 18:02:47.237803 21584 solver.cpp:404]     Test net output #0: accuracy = 0.828761\nI0818 18:02:47.238073 21584 solver.cpp:404]     Test net output #1: loss = 0.749697 (* 1 = 0.749697 loss)\nI0818 18:02:48.536803 21584 solver.cpp:228] Iteration 4700, loss = 0.0744253\nI0818 18:02:48.536846 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:02:48.536862 21584 solver.cpp:244]     Train net output #1: loss = 0.0744253 (* 1 = 0.0744253 loss)\nI0818 18:02:48.654247 21584 sgd_solver.cpp:166] Iteration 4700, lr = 0.1175\nI0818 18:05:07.100389 21584 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0818 18:06:29.596016 21584 solver.cpp:404]     Test net output #0: accuracy = 0.82596\nI0818 18:06:29.596299 21584 solver.cpp:404]     Test net output #1: loss = 0.797205 (* 1 = 0.797205 loss)\nI0818 18:06:30.894989 21584 solver.cpp:228] Iteration 4800, loss = 0.10019\nI0818 18:06:30.895035 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:06:30.895051 21584 solver.cpp:244]     Train net output #1: loss = 0.10019 (* 1 = 0.10019 loss)\nI0818 18:06:31.009331 21584 sgd_solver.cpp:166] Iteration 4800, lr = 0.12\nI0818 18:08:49.500851 21584 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0818 18:10:11.995106 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83084\nI0818 18:10:11.995370 21584 solver.cpp:404]     Test net output #1: loss = 0.782906 (* 1 = 0.782906 loss)\nI0818 18:10:13.294276 21584 solver.cpp:228] Iteration 4900, loss = 0.0539607\nI0818 18:10:13.294322 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:10:13.294337 21584 solver.cpp:244]     Train net output #1: loss = 0.0539607 (* 1 = 0.0539607 loss)\nI0818 18:10:13.408356 21584 sgd_solver.cpp:166] Iteration 4900, lr = 0.1225\nI0818 18:12:31.755619 21584 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0818 18:13:54.258011 21584 solver.cpp:404]     Test net output #0: accuracy = 0.82812\nI0818 18:13:54.258282 21584 solver.cpp:404]     Test net output #1: loss = 0.755568 (* 1 = 0.755568 loss)\nI0818 18:13:55.556516 21584 solver.cpp:228] Iteration 5000, loss = 0.109249\nI0818 18:13:55.556562 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:13:55.556578 21584 solver.cpp:244]     Train net output #1: loss = 0.109249 (* 1 = 0.109249 loss)\nI0818 18:13:55.670605 21584 sgd_solver.cpp:166] Iteration 5000, lr = 0.125\nI0818 18:16:14.267349 21584 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0818 18:17:36.766335 21584 solver.cpp:404]     Test net output #0: accuracy = 0.831\nI0818 18:17:36.766588 21584 solver.cpp:404]     Test net output #1: loss = 0.736041 (* 1 = 0.736041 loss)\nI0818 18:17:38.065287 21584 solver.cpp:228] Iteration 5100, loss = 0.0235911\nI0818 18:17:38.065332 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 18:17:38.065348 21584 solver.cpp:244]     Train net output #1: loss = 0.0235911 (* 1 = 0.0235911 loss)\nI0818 18:17:38.184119 21584 sgd_solver.cpp:166] Iteration 5100, lr = 0.1275\nI0818 18:19:56.799746 21584 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0818 18:21:19.295534 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8284\nI0818 18:21:19.295800 21584 solver.cpp:404]     Test net output #1: loss = 0.774668 (* 1 = 0.774668 loss)\nI0818 18:21:20.594467 21584 solver.cpp:228] Iteration 5200, loss = 0.0670187\nI0818 18:21:20.594511 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:21:20.594528 21584 solver.cpp:244]     Train net output #1: loss = 0.0670187 (* 1 = 0.0670187 loss)\nI0818 18:21:20.709344 21584 sgd_solver.cpp:166] Iteration 5200, lr = 0.13\nI0818 18:23:39.289165 21584 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0818 18:25:01.782682 21584 solver.cpp:404]     Test net output #0: accuracy = 0.82876\nI0818 18:25:01.782979 21584 solver.cpp:404]     Test net output #1: loss = 0.763849 (* 1 = 0.763849 loss)\nI0818 18:25:03.082490 21584 solver.cpp:228] Iteration 5300, loss = 0.0649599\nI0818 18:25:03.082540 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:25:03.082563 21584 solver.cpp:244]     Train net output #1: loss = 0.0649599 (* 1 = 0.0649599 loss)\nI0818 18:25:03.196507 21584 sgd_solver.cpp:166] Iteration 5300, lr = 0.1325\nI0818 18:27:21.853298 21584 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0818 18:28:44.349035 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83344\nI0818 18:28:44.349308 21584 solver.cpp:404]     Test net output #1: loss = 0.75683 (* 1 = 0.75683 loss)\nI0818 18:28:45.648445 21584 solver.cpp:228] Iteration 5400, loss = 0.0555181\nI0818 18:28:45.648491 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:28:45.648507 21584 solver.cpp:244]     Train net output #1: loss = 0.0555181 (* 1 = 0.0555181 loss)\nI0818 18:28:45.764111 21584 sgd_solver.cpp:166] Iteration 5400, lr = 0.135\nI0818 18:31:04.434996 21584 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0818 18:32:26.926229 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83836\nI0818 18:32:26.926506 21584 solver.cpp:404]     Test net output #1: loss = 0.719775 (* 1 = 0.719775 loss)\nI0818 18:32:28.226117 21584 solver.cpp:228] Iteration 5500, loss = 0.066581\nI0818 18:32:28.226153 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:32:28.226169 21584 solver.cpp:244]     Train net output #1: loss = 0.066581 (* 1 = 0.066581 loss)\nI0818 18:32:28.347525 21584 sgd_solver.cpp:166] Iteration 5500, lr = 0.1375\nI0818 18:34:46.997265 21584 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0818 18:36:09.490816 21584 solver.cpp:404]     Test net output #0: accuracy = 0.82892\nI0818 18:36:09.491087 21584 solver.cpp:404]     Test net output #1: loss = 0.7763 (* 1 = 0.7763 loss)\nI0818 18:36:10.791276 21584 solver.cpp:228] Iteration 5600, loss = 0.0669041\nI0818 18:36:10.791322 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 18:36:10.791338 21584 solver.cpp:244]     Train net output #1: loss = 0.066904 (* 1 = 0.066904 loss)\nI0818 18:36:10.908717 21584 sgd_solver.cpp:166] Iteration 5600, lr = 0.14\nI0818 18:38:29.611763 21584 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0818 18:39:51.999146 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83036\nI0818 18:39:51.999424 21584 solver.cpp:404]     Test net output #1: loss = 0.740668 (* 1 = 0.740668 loss)\nI0818 18:39:53.298023 21584 solver.cpp:228] Iteration 5700, loss = 0.105478\nI0818 18:39:53.298068 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0818 18:39:53.298084 21584 solver.cpp:244]     Train net output #1: loss = 0.105478 (* 1 = 0.105478 loss)\nI0818 18:39:53.415684 21584 sgd_solver.cpp:166] Iteration 5700, lr = 0.1425\nI0818 18:42:12.044483 21584 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0818 18:43:34.427678 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83204\nI0818 18:43:34.427906 21584 solver.cpp:404]     Test net output #1: loss = 0.764354 (* 1 = 0.764354 loss)\nI0818 18:43:35.726770 21584 solver.cpp:228] Iteration 5800, loss = 0.0379951\nI0818 18:43:35.726814 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 18:43:35.726830 21584 solver.cpp:244]     Train net output #1: loss = 0.0379951 (* 1 = 0.0379951 loss)\nI0818 18:43:35.844749 21584 sgd_solver.cpp:166] Iteration 5800, lr = 0.145\nI0818 18:45:54.514261 21584 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0818 18:47:16.871798 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83144\nI0818 18:47:16.872021 21584 solver.cpp:404]     Test net output #1: loss = 0.751675 (* 1 = 0.751675 loss)\nI0818 18:47:18.170997 21584 solver.cpp:228] Iteration 5900, loss = 0.0808643\nI0818 18:47:18.171041 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:47:18.171056 21584 solver.cpp:244]     Train net output #1: loss = 0.0808643 (* 1 = 0.0808643 loss)\nI0818 18:47:18.297169 21584 sgd_solver.cpp:166] Iteration 5900, lr = 0.1475\nI0818 18:49:36.932919 21584 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0818 18:50:59.292971 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83552\nI0818 18:50:59.293222 21584 solver.cpp:404]     Test net output #1: loss = 0.727961 (* 1 = 0.727961 loss)\nI0818 18:51:00.591506 21584 solver.cpp:228] Iteration 6000, loss = 0.0652097\nI0818 18:51:00.591539 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:51:00.591554 21584 solver.cpp:244]     Train net output #1: loss = 0.0652097 (* 1 = 0.0652097 loss)\nI0818 18:51:00.713197 21584 sgd_solver.cpp:166] Iteration 6000, lr = 0.15\nI0818 18:53:19.298473 21584 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0818 18:54:41.675248 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83672\nI0818 18:54:41.675475 21584 solver.cpp:404]     Test net output #1: loss = 0.744008 (* 1 = 0.744008 loss)\nI0818 18:54:42.974154 21584 solver.cpp:228] Iteration 6100, loss = 0.0771915\nI0818 18:54:42.974201 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 18:54:42.974217 21584 solver.cpp:244]     Train net output #1: loss = 0.0771915 (* 1 = 0.0771915 loss)\nI0818 18:54:43.094234 21584 sgd_solver.cpp:166] Iteration 6100, lr = 0.1525\nI0818 18:57:01.728049 21584 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0818 18:58:24.107902 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83596\nI0818 18:58:24.108160 21584 solver.cpp:404]     Test net output #1: loss = 0.765239 (* 1 = 0.765239 loss)\nI0818 18:58:25.407681 21584 solver.cpp:228] Iteration 6200, loss = 0.110619\nI0818 18:58:25.407727 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 18:58:25.407743 21584 solver.cpp:244]     Train net output #1: loss = 0.110619 (* 1 = 0.110619 loss)\nI0818 18:58:25.524807 21584 sgd_solver.cpp:166] Iteration 6200, lr = 0.155\nI0818 19:00:43.992089 21584 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0818 19:02:06.378216 21584 solver.cpp:404]     Test net output #0: accuracy = 0.82936\nI0818 19:02:06.378476 21584 solver.cpp:404]     Test net output #1: loss = 0.758451 (* 1 = 0.758451 loss)\nI0818 19:02:07.677358 21584 solver.cpp:228] Iteration 6300, loss = 0.0791276\nI0818 19:02:07.677404 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:02:07.677420 21584 solver.cpp:244]     Train net output #1: loss = 0.0791276 (* 1 = 0.0791276 loss)\nI0818 19:02:07.794823 21584 sgd_solver.cpp:166] Iteration 6300, lr = 0.1575\nI0818 19:04:26.315999 21584 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0818 19:05:48.704290 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83672\nI0818 19:05:48.704526 21584 solver.cpp:404]     Test net output #1: loss = 0.763659 (* 1 = 0.763659 loss)\nI0818 19:05:50.003641 21584 solver.cpp:228] Iteration 6400, loss = 0.0876114\nI0818 19:05:50.003692 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:05:50.003710 21584 solver.cpp:244]     Train net output #1: loss = 0.0876113 (* 1 = 0.0876113 loss)\nI0818 19:05:50.126588 21584 sgd_solver.cpp:166] Iteration 6400, lr = 0.16\nI0818 19:08:08.569170 21584 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0818 19:09:30.956284 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83052\nI0818 19:09:30.956537 21584 solver.cpp:404]     Test net output #1: loss = 0.775609 (* 1 = 0.775609 loss)\nI0818 19:09:32.255769 21584 solver.cpp:228] Iteration 6500, loss = 0.0904797\nI0818 19:09:32.255815 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:09:32.255831 21584 solver.cpp:244]     Train net output #1: loss = 0.0904797 (* 1 = 0.0904797 loss)\nI0818 19:09:32.376982 21584 sgd_solver.cpp:166] Iteration 6500, lr = 0.1625\nI0818 19:11:50.873581 21584 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0818 19:13:13.358371 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8384\nI0818 19:13:13.358603 21584 solver.cpp:404]     Test net output #1: loss = 0.73539 (* 1 = 0.73539 loss)\nI0818 19:13:14.657304 21584 solver.cpp:228] Iteration 6600, loss = 0.076321\nI0818 19:13:14.657340 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:13:14.657354 21584 solver.cpp:244]     Train net output #1: loss = 0.076321 (* 1 = 0.076321 loss)\nI0818 19:13:14.771333 21584 sgd_solver.cpp:166] Iteration 6600, lr = 0.165\nI0818 19:15:33.247081 21584 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0818 19:16:55.733433 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8424\nI0818 19:16:55.733659 21584 solver.cpp:404]     Test net output #1: loss = 0.720115 (* 1 = 0.720115 loss)\nI0818 19:16:57.032757 21584 solver.cpp:228] Iteration 6700, loss = 0.0340223\nI0818 19:16:57.032804 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:16:57.032819 21584 solver.cpp:244]     Train net output #1: loss = 0.0340223 (* 1 = 0.0340223 loss)\nI0818 19:16:57.149786 21584 sgd_solver.cpp:166] Iteration 6700, lr = 0.1675\nI0818 19:19:15.698568 21584 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0818 19:20:38.164351 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8452\nI0818 19:20:38.164618 21584 solver.cpp:404]     Test net output #1: loss = 0.719492 (* 1 = 0.719492 loss)\nI0818 19:20:39.463640 21584 solver.cpp:228] Iteration 6800, loss = 0.0657025\nI0818 19:20:39.463692 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:20:39.463711 21584 solver.cpp:244]     Train net output #1: loss = 0.0657025 (* 1 = 0.0657025 loss)\nI0818 19:20:39.580139 21584 sgd_solver.cpp:166] Iteration 6800, lr = 0.17\nI0818 19:22:58.113812 21584 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0818 19:24:20.585275 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8356\nI0818 19:24:20.585503 21584 solver.cpp:404]     Test net output #1: loss = 0.751713 (* 1 = 0.751713 loss)\nI0818 19:24:21.884699 21584 solver.cpp:228] Iteration 6900, loss = 0.0432898\nI0818 19:24:21.884744 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:24:21.884762 21584 solver.cpp:244]     Train net output #1: loss = 0.0432898 (* 1 = 0.0432898 loss)\nI0818 19:24:22.001179 21584 sgd_solver.cpp:166] Iteration 6900, lr = 0.1725\nI0818 19:26:40.527592 21584 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0818 19:28:02.993438 21584 solver.cpp:404]     Test net output #0: accuracy = 0.843\nI0818 19:28:02.993676 21584 solver.cpp:404]     Test net output #1: loss = 0.722147 (* 1 = 0.722147 loss)\nI0818 19:28:04.293388 21584 solver.cpp:228] Iteration 7000, loss = 0.0280236\nI0818 19:28:04.293433 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:28:04.293449 21584 solver.cpp:244]     Train net output #1: loss = 0.0280236 (* 1 = 0.0280236 loss)\nI0818 19:28:04.413086 21584 sgd_solver.cpp:166] Iteration 7000, lr = 0.175\nI0818 19:30:22.981189 21584 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0818 19:31:45.467247 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84164\nI0818 19:31:45.467483 21584 solver.cpp:404]     Test net output #1: loss = 0.725866 (* 1 = 0.725866 loss)\nI0818 19:31:46.766813 21584 solver.cpp:228] Iteration 7100, loss = 0.0269502\nI0818 19:31:46.766857 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:31:46.766875 21584 solver.cpp:244]     Train net output #1: loss = 0.0269502 (* 1 = 0.0269502 loss)\nI0818 19:31:46.885852 21584 sgd_solver.cpp:166] Iteration 7100, lr = 0.1775\nI0818 19:34:05.416110 21584 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0818 19:35:27.899432 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84212\nI0818 19:35:27.899660 21584 solver.cpp:404]     Test net output #1: loss = 0.718645 (* 1 = 0.718645 loss)\nI0818 19:35:29.198707 21584 solver.cpp:228] Iteration 7200, loss = 0.0670022\nI0818 19:35:29.198752 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 19:35:29.198768 21584 solver.cpp:244]     Train net output #1: loss = 0.0670022 (* 1 = 0.0670022 loss)\nI0818 19:35:29.320360 21584 sgd_solver.cpp:166] Iteration 7200, lr = 0.18\nI0818 19:37:47.896737 21584 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0818 19:39:10.378182 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84352\nI0818 19:39:10.378437 21584 solver.cpp:404]     Test net output #1: loss = 0.693126 (* 1 = 0.693126 loss)\nI0818 19:39:11.677996 21584 solver.cpp:228] Iteration 7300, loss = 0.0403112\nI0818 19:39:11.678041 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:39:11.678057 21584 solver.cpp:244]     Train net output #1: loss = 0.0403112 (* 1 = 0.0403112 loss)\nI0818 19:39:11.796281 21584 sgd_solver.cpp:166] Iteration 7300, lr = 0.1825\nI0818 19:41:30.298399 21584 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0818 19:42:52.785099 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83796\nI0818 19:42:52.785352 21584 solver.cpp:404]     Test net output #1: loss = 0.708577 (* 1 = 0.708577 loss)\nI0818 19:42:54.085084 21584 solver.cpp:228] Iteration 7400, loss = 0.10149\nI0818 19:42:54.085129 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 19:42:54.085144 21584 solver.cpp:244]     Train net output #1: loss = 0.10149 (* 1 = 0.10149 loss)\nI0818 19:42:54.204398 21584 sgd_solver.cpp:166] Iteration 7400, lr = 0.185\nI0818 19:45:12.692733 21584 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0818 19:46:35.161849 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84636\nI0818 19:46:35.162101 21584 solver.cpp:404]     Test net output #1: loss = 0.675331 (* 1 = 0.675331 loss)\nI0818 19:46:36.461236 21584 solver.cpp:228] Iteration 7500, loss = 0.0445981\nI0818 19:46:36.461279 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 19:46:36.461295 21584 solver.cpp:244]     Train net output #1: loss = 0.0445981 (* 1 = 0.0445981 loss)\nI0818 19:46:36.574566 21584 sgd_solver.cpp:166] Iteration 7500, lr = 0.1875\nI0818 19:48:55.079356 21584 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0818 19:50:17.546779 21584 solver.cpp:404]     Test net output #0: accuracy = 0.83788\nI0818 19:50:17.547039 21584 solver.cpp:404]     Test net output #1: loss = 0.756139 (* 1 = 0.756139 loss)\nI0818 19:50:18.847060 21584 solver.cpp:228] Iteration 7600, loss = 0.065707\nI0818 19:50:18.847107 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 19:50:18.847124 21584 solver.cpp:244]     Train net output #1: loss = 0.065707 (* 1 = 0.065707 loss)\nI0818 19:50:18.965744 21584 sgd_solver.cpp:166] Iteration 7600, lr = 0.19\nI0818 19:52:37.473196 21584 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0818 19:53:59.947423 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84148\nI0818 19:53:59.947629 21584 solver.cpp:404]     Test net output #1: loss = 0.698002 (* 1 = 0.698002 loss)\nI0818 19:54:01.245661 21584 solver.cpp:228] Iteration 7700, loss = 0.0375632\nI0818 19:54:01.245712 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:54:01.245728 21584 solver.cpp:244]     Train net output #1: loss = 0.0375632 (* 1 = 0.0375632 loss)\nI0818 19:54:01.368902 21584 sgd_solver.cpp:166] Iteration 7700, lr = 0.1925\nI0818 19:56:20.039505 21584 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0818 19:57:42.513005 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8468\nI0818 19:57:42.513242 21584 solver.cpp:404]     Test net output #1: loss = 0.715738 (* 1 = 0.715738 loss)\nI0818 19:57:43.811630 21584 solver.cpp:228] Iteration 7800, loss = 0.0326896\nI0818 19:57:43.811676 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 19:57:43.811693 21584 solver.cpp:244]     Train net output #1: loss = 0.0326897 (* 1 = 0.0326897 loss)\nI0818 19:57:43.929100 21584 sgd_solver.cpp:166] Iteration 7800, lr = 0.195\nI0818 20:00:02.554041 21584 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0818 20:01:25.022960 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8512\nI0818 20:01:25.023180 21584 solver.cpp:404]     Test net output #1: loss = 0.642757 (* 1 = 0.642757 loss)\nI0818 20:01:26.322860 21584 solver.cpp:228] Iteration 7900, loss = 0.0349844\nI0818 20:01:26.322903 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:01:26.322919 21584 solver.cpp:244]     Train net output #1: loss = 0.0349844 (* 1 = 0.0349844 loss)\nI0818 20:01:26.441516 21584 sgd_solver.cpp:166] Iteration 7900, lr = 0.1975\nI0818 20:03:45.030302 21584 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0818 20:05:07.504859 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84236\nI0818 20:05:07.505116 21584 solver.cpp:404]     Test net output #1: loss = 0.723656 (* 1 = 0.723656 loss)\nI0818 20:05:08.805244 21584 solver.cpp:228] Iteration 8000, loss = 0.0436643\nI0818 20:05:08.805286 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:05:08.805301 21584 solver.cpp:244]     Train net output #1: loss = 0.0436643 (* 1 = 0.0436643 loss)\nI0818 20:05:08.920892 21584 sgd_solver.cpp:166] Iteration 8000, lr = 0.2\nI0818 20:07:27.533274 21584 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0818 20:08:49.969323 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84604\nI0818 20:08:49.969550 21584 solver.cpp:404]     Test net output #1: loss = 0.693942 (* 1 = 0.693942 loss)\nI0818 20:08:51.269428 21584 solver.cpp:228] Iteration 8100, loss = 0.220364\nI0818 20:08:51.269471 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0818 20:08:51.269487 21584 solver.cpp:244]     Train net output #1: loss = 0.220364 (* 1 = 0.220364 loss)\nI0818 20:08:51.388833 21584 sgd_solver.cpp:166] Iteration 8100, lr = 0.2025\nI0818 20:11:10.045904 21584 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0818 20:12:32.488442 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8472\nI0818 20:12:32.488680 21584 solver.cpp:404]     Test net output #1: loss = 0.691185 (* 1 = 0.691185 loss)\nI0818 20:12:33.787889 21584 solver.cpp:228] Iteration 8200, loss = 0.0414678\nI0818 20:12:33.787932 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:12:33.787948 21584 solver.cpp:244]     Train net output #1: loss = 0.0414678 (* 1 = 0.0414678 loss)\nI0818 20:12:33.909010 21584 sgd_solver.cpp:166] Iteration 8200, lr = 0.205\nI0818 20:14:52.544286 21584 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0818 20:16:15.052268 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84708\nI0818 20:16:15.052517 21584 solver.cpp:404]     Test net output #1: loss = 0.689928 (* 1 = 0.689928 loss)\nI0818 20:16:16.351758 21584 solver.cpp:228] Iteration 8300, loss = 0.144566\nI0818 20:16:16.351801 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:16:16.351824 21584 solver.cpp:244]     Train net output #1: loss = 0.144566 (* 1 = 0.144566 loss)\nI0818 20:16:16.464131 21584 sgd_solver.cpp:166] Iteration 8300, lr = 0.2075\nI0818 20:18:35.065960 21584 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0818 20:19:57.559367 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84348\nI0818 20:19:57.559614 21584 solver.cpp:404]     Test net output #1: loss = 0.689445 (* 1 = 0.689445 loss)\nI0818 20:19:58.862208 21584 solver.cpp:228] Iteration 8400, loss = 0.120846\nI0818 20:19:58.862251 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:19:58.862274 21584 solver.cpp:244]     Train net output #1: loss = 0.120846 (* 1 = 0.120846 loss)\nI0818 20:19:58.977735 21584 sgd_solver.cpp:166] Iteration 8400, lr = 0.21\nI0818 20:22:17.638097 21584 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0818 20:23:40.130708 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84864\nI0818 20:23:40.130967 21584 solver.cpp:404]     Test net output #1: loss = 0.66226 (* 1 = 0.66226 loss)\nI0818 20:23:41.433466 21584 solver.cpp:228] Iteration 8500, loss = 0.0731827\nI0818 20:23:41.433509 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:23:41.433526 21584 solver.cpp:244]     Train net output #1: loss = 0.0731827 (* 1 = 0.0731827 loss)\nI0818 20:23:41.548837 21584 sgd_solver.cpp:166] Iteration 8500, lr = 0.2125\nI0818 20:26:00.137084 21584 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0818 20:27:22.634500 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84532\nI0818 20:27:22.634757 21584 solver.cpp:404]     Test net output #1: loss = 0.668593 (* 1 = 0.668593 loss)\nI0818 20:27:23.936955 21584 solver.cpp:228] Iteration 8600, loss = 0.152371\nI0818 20:27:23.936997 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 20:27:23.937012 21584 solver.cpp:244]     Train net output #1: loss = 0.152371 (* 1 = 0.152371 loss)\nI0818 20:27:24.051657 21584 sgd_solver.cpp:166] Iteration 8600, lr = 0.215\nI0818 20:29:42.653584 21584 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0818 20:31:05.147094 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84624\nI0818 20:31:05.147328 21584 solver.cpp:404]     Test net output #1: loss = 0.684247 (* 1 = 0.684247 loss)\nI0818 20:31:06.449285 21584 solver.cpp:228] Iteration 8700, loss = 0.0580688\nI0818 20:31:06.449326 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:31:06.449343 21584 solver.cpp:244]     Train net output #1: loss = 0.0580688 (* 1 = 0.0580688 loss)\nI0818 20:31:06.562533 21584 sgd_solver.cpp:166] Iteration 8700, lr = 0.2175\nI0818 20:33:25.201715 21584 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0818 20:34:47.704246 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85036\nI0818 20:34:47.704509 21584 solver.cpp:404]     Test net output #1: loss = 0.63531 (* 1 = 0.63531 loss)\nI0818 20:34:49.006314 21584 solver.cpp:228] Iteration 8800, loss = 0.0996252\nI0818 20:34:49.006356 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 20:34:49.006371 21584 solver.cpp:244]     Train net output #1: loss = 0.0996252 (* 1 = 0.0996252 loss)\nI0818 20:34:49.124617 21584 sgd_solver.cpp:166] Iteration 8800, lr = 0.22\nI0818 20:37:07.706218 21584 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0818 20:38:30.202718 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84648\nI0818 20:38:30.202975 21584 solver.cpp:404]     Test net output #1: loss = 0.668494 (* 1 = 0.668494 loss)\nI0818 20:38:31.505331 21584 solver.cpp:228] Iteration 8900, loss = 0.0294645\nI0818 20:38:31.505372 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 20:38:31.505388 21584 solver.cpp:244]     Train net output #1: loss = 0.0294645 (* 1 = 0.0294645 loss)\nI0818 20:38:31.617897 21584 sgd_solver.cpp:166] Iteration 8900, lr = 0.2225\nI0818 20:40:50.221771 21584 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0818 20:42:12.695160 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85044\nI0818 20:42:12.695411 21584 solver.cpp:404]     Test net output #1: loss = 0.658308 (* 1 = 0.658308 loss)\nI0818 20:42:13.997445 21584 solver.cpp:228] Iteration 9000, loss = 0.0662099\nI0818 20:42:13.997488 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:42:13.997503 21584 solver.cpp:244]     Train net output #1: loss = 0.0662099 (* 1 = 0.0662099 loss)\nI0818 20:42:14.118017 21584 sgd_solver.cpp:166] Iteration 9000, lr = 0.225\nI0818 20:44:32.704435 21584 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0818 20:45:55.173506 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84576\nI0818 20:45:55.173743 21584 solver.cpp:404]     Test net output #1: loss = 0.676644 (* 1 = 0.676644 loss)\nI0818 20:45:56.472533 21584 solver.cpp:228] Iteration 9100, loss = 0.0330086\nI0818 20:45:56.472575 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 20:45:56.472592 21584 solver.cpp:244]     Train net output #1: loss = 0.0330086 (* 1 = 0.0330086 loss)\nI0818 20:45:56.589876 21584 sgd_solver.cpp:166] Iteration 9100, lr = 0.2275\nI0818 20:48:15.126572 21584 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0818 20:49:37.603492 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84852\nI0818 20:49:37.603751 21584 solver.cpp:404]     Test net output #1: loss = 0.675623 (* 1 = 0.675623 loss)\nI0818 20:49:38.902496 21584 solver.cpp:228] Iteration 9200, loss = 0.0343485\nI0818 20:49:38.902541 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 20:49:38.902559 21584 solver.cpp:244]     Train net output #1: loss = 0.0343485 (* 1 = 0.0343485 loss)\nI0818 20:49:39.021031 21584 sgd_solver.cpp:166] Iteration 9200, lr = 0.23\nI0818 20:51:57.665966 21584 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0818 20:53:20.140506 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85068\nI0818 20:53:20.140758 21584 solver.cpp:404]     Test net output #1: loss = 0.65383 (* 1 = 0.65383 loss)\nI0818 20:53:21.439157 21584 solver.cpp:228] Iteration 9300, loss = 0.0913154\nI0818 20:53:21.439200 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 20:53:21.439216 21584 solver.cpp:244]     Train net output #1: loss = 0.0913155 (* 1 = 0.0913155 loss)\nI0818 20:53:21.560966 21584 sgd_solver.cpp:166] Iteration 9300, lr = 0.2325\nI0818 20:55:40.176400 21584 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0818 20:57:02.519922 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85088\nI0818 20:57:02.520184 21584 solver.cpp:404]     Test net output #1: loss = 0.657908 (* 1 = 0.657908 loss)\nI0818 20:57:03.818547 21584 solver.cpp:228] Iteration 9400, loss = 0.113156\nI0818 20:57:03.818590 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 20:57:03.818608 21584 solver.cpp:244]     Train net output #1: loss = 0.113156 (* 1 = 0.113156 loss)\nI0818 20:57:03.934618 21584 sgd_solver.cpp:166] Iteration 9400, lr = 0.235\nI0818 20:59:22.577308 21584 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0818 21:00:44.983425 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84956\nI0818 21:00:44.983680 21584 solver.cpp:404]     Test net output #1: loss = 0.673117 (* 1 = 0.673117 loss)\nI0818 21:00:46.282608 21584 solver.cpp:228] Iteration 9500, loss = 0.0344444\nI0818 21:00:46.282650 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:00:46.282666 21584 solver.cpp:244]     Train net output #1: loss = 0.0344444 (* 1 = 0.0344444 loss)\nI0818 21:00:46.395779 21584 sgd_solver.cpp:166] Iteration 9500, lr = 0.2375\nI0818 21:03:05.028731 21584 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0818 21:04:27.426139 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84848\nI0818 21:04:27.426409 21584 solver.cpp:404]     Test net output #1: loss = 0.653726 (* 1 = 0.653726 loss)\nI0818 21:04:28.725122 21584 solver.cpp:228] Iteration 9600, loss = 0.0359781\nI0818 21:04:28.725165 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:04:28.725181 21584 solver.cpp:244]     Train net output #1: loss = 0.0359781 (* 1 = 0.0359781 loss)\nI0818 21:04:28.842957 21584 sgd_solver.cpp:166] Iteration 9600, lr = 0.24\nI0818 21:06:47.287926 21584 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0818 21:08:09.680981 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8502\nI0818 21:08:09.681260 21584 solver.cpp:404]     Test net output #1: loss = 0.659816 (* 1 = 0.659816 loss)\nI0818 21:08:10.979838 21584 solver.cpp:228] Iteration 9700, loss = 0.0366142\nI0818 21:08:10.979882 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:08:10.979898 21584 solver.cpp:244]     Train net output #1: loss = 0.0366142 (* 1 = 0.0366142 loss)\nI0818 21:08:11.093950 21584 sgd_solver.cpp:166] Iteration 9700, lr = 0.2425\nI0818 21:10:29.612704 21584 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0818 21:11:52.000524 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84792\nI0818 21:11:52.000792 21584 solver.cpp:404]     Test net output #1: loss = 0.670432 (* 1 = 0.670432 loss)\nI0818 21:11:53.299481 21584 solver.cpp:228] Iteration 9800, loss = 0.0889219\nI0818 21:11:53.299526 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:11:53.299541 21584 solver.cpp:244]     Train net output #1: loss = 0.0889219 (* 1 = 0.0889219 loss)\nI0818 21:11:53.426098 21584 sgd_solver.cpp:166] Iteration 9800, lr = 0.245\nI0818 21:14:11.945983 21584 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0818 21:15:34.335973 21584 solver.cpp:404]     Test net output #0: accuracy = 0.852\nI0818 21:15:34.336246 21584 solver.cpp:404]     Test net output #1: loss = 0.656336 (* 1 = 0.656336 loss)\nI0818 21:15:35.635618 21584 solver.cpp:228] Iteration 9900, loss = 0.074835\nI0818 21:15:35.635663 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:15:35.635679 21584 solver.cpp:244]     Train net output #1: loss = 0.074835 (* 1 = 0.074835 loss)\nI0818 21:15:35.753315 21584 sgd_solver.cpp:166] Iteration 9900, lr = 0.2475\nI0818 21:17:54.302624 21584 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0818 21:19:16.672272 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85048\nI0818 21:19:16.672545 21584 solver.cpp:404]     Test net output #1: loss = 0.668116 (* 1 = 0.668116 loss)\nI0818 21:19:17.971144 21584 solver.cpp:228] Iteration 10000, loss = 0.0258338\nI0818 21:19:17.971189 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:19:17.971205 21584 solver.cpp:244]     Train net output #1: loss = 0.0258338 (* 1 = 0.0258338 loss)\nI0818 21:19:18.094213 21584 sgd_solver.cpp:166] Iteration 10000, lr = 0.25\nI0818 21:21:36.695552 21584 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0818 21:22:59.066864 21584 solver.cpp:404]     Test net output #0: accuracy = 0.849\nI0818 21:22:59.067138 21584 solver.cpp:404]     Test net output #1: loss = 0.636519 (* 1 = 0.636519 loss)\nI0818 21:23:00.365766 21584 solver.cpp:228] Iteration 10100, loss = 0.0892019\nI0818 21:23:00.365808 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:23:00.365825 21584 solver.cpp:244]     Train net output #1: loss = 0.0892019 (* 1 = 0.0892019 loss)\nI0818 21:23:00.480556 21584 sgd_solver.cpp:166] Iteration 10100, lr = 0.2525\nI0818 21:25:18.998142 21584 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0818 21:26:41.368094 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8498\nI0818 21:26:41.368363 21584 solver.cpp:404]     Test net output #1: loss = 0.650491 (* 1 = 0.650491 loss)\nI0818 21:26:42.667129 21584 solver.cpp:228] Iteration 10200, loss = 0.0629236\nI0818 21:26:42.667172 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:26:42.667188 21584 solver.cpp:244]     Train net output #1: loss = 0.0629236 (* 1 = 0.0629236 loss)\nI0818 21:26:42.788398 21584 sgd_solver.cpp:166] Iteration 10200, lr = 0.255\nI0818 21:29:01.321775 21584 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0818 21:30:23.793956 21584 solver.cpp:404]     Test net output #0: accuracy = 0.851\nI0818 21:30:23.794209 21584 solver.cpp:404]     Test net output #1: loss = 0.645318 (* 1 = 0.645318 loss)\nI0818 21:30:25.092663 21584 solver.cpp:228] Iteration 10300, loss = 0.0814235\nI0818 21:30:25.092706 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:30:25.092722 21584 solver.cpp:244]     Train net output #1: loss = 0.0814235 (* 1 = 0.0814235 loss)\nI0818 21:30:25.215090 21584 sgd_solver.cpp:166] Iteration 10300, lr = 0.2575\nI0818 21:32:43.755367 21584 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0818 21:34:06.223685 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85788\nI0818 21:34:06.223951 21584 solver.cpp:404]     Test net output #1: loss = 0.59935 (* 1 = 0.59935 loss)\nI0818 21:34:07.523973 21584 solver.cpp:228] Iteration 10400, loss = 0.0345876\nI0818 21:34:07.524014 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 21:34:07.524030 21584 solver.cpp:244]     Train net output #1: loss = 0.0345876 (* 1 = 0.0345876 loss)\nI0818 21:34:07.644122 21584 sgd_solver.cpp:166] Iteration 10400, lr = 0.26\nI0818 21:36:26.259032 21584 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0818 21:37:48.762773 21584 solver.cpp:404]     Test net output #0: accuracy = 0.84768\nI0818 21:37:48.763027 21584 solver.cpp:404]     Test net output #1: loss = 0.651002 (* 1 = 0.651002 loss)\nI0818 21:37:50.062085 21584 solver.cpp:228] Iteration 10500, loss = 0.0432109\nI0818 21:37:50.062131 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:37:50.062155 21584 solver.cpp:244]     Train net output #1: loss = 0.0432109 (* 1 = 0.0432109 loss)\nI0818 21:37:50.178338 21584 sgd_solver.cpp:166] Iteration 10500, lr = 0.2625\nI0818 21:40:08.870579 21584 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0818 21:41:31.364125 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85488\nI0818 21:41:31.364374 21584 solver.cpp:404]     Test net output #1: loss = 0.642932 (* 1 = 0.642932 loss)\nI0818 21:41:32.663054 21584 solver.cpp:228] Iteration 10600, loss = 0.0289185\nI0818 21:41:32.663094 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:41:32.663110 21584 solver.cpp:244]     Train net output #1: loss = 0.0289185 (* 1 = 0.0289185 loss)\nI0818 21:41:32.780315 21584 sgd_solver.cpp:166] Iteration 10600, lr = 0.265\nI0818 21:43:51.425617 21584 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0818 21:45:13.917037 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85512\nI0818 21:45:13.917315 21584 solver.cpp:404]     Test net output #1: loss = 0.602246 (* 1 = 0.602246 loss)\nI0818 21:45:15.215898 21584 solver.cpp:228] Iteration 10700, loss = 0.12992\nI0818 21:45:15.215939 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 21:45:15.215953 21584 solver.cpp:244]     Train net output #1: loss = 0.12992 (* 1 = 0.12992 loss)\nI0818 21:45:15.335371 21584 sgd_solver.cpp:166] Iteration 10700, lr = 0.2675\nI0818 21:47:33.992193 21584 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0818 21:48:56.474303 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85792\nI0818 21:48:56.474582 21584 solver.cpp:404]     Test net output #1: loss = 0.618235 (* 1 = 0.618235 loss)\nI0818 21:48:57.772852 21584 solver.cpp:228] Iteration 10800, loss = 0.0416245\nI0818 21:48:57.772898 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 21:48:57.772914 21584 solver.cpp:244]     Train net output #1: loss = 0.0416245 (* 1 = 0.0416245 loss)\nI0818 21:48:57.893857 21584 sgd_solver.cpp:166] Iteration 10800, lr = 0.27\nI0818 21:51:16.490236 21584 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0818 21:52:38.974501 21584 solver.cpp:404]     Test net output #0: accuracy = 0.852761\nI0818 21:52:38.974774 21584 solver.cpp:404]     Test net output #1: loss = 0.62455 (* 1 = 0.62455 loss)\nI0818 21:52:40.275588 21584 solver.cpp:228] Iteration 10900, loss = 0.0936677\nI0818 21:52:40.275635 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 21:52:40.275650 21584 solver.cpp:244]     Train net output #1: loss = 0.0936676 (* 1 = 0.0936676 loss)\nI0818 21:52:40.390146 21584 sgd_solver.cpp:166] Iteration 10900, lr = 0.2725\nI0818 21:54:58.992813 21584 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0818 21:56:21.481858 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85864\nI0818 21:56:21.482134 21584 solver.cpp:404]     Test net output #1: loss = 0.61737 (* 1 = 0.61737 loss)\nI0818 21:56:22.781574 21584 solver.cpp:228] Iteration 11000, loss = 0.0844685\nI0818 21:56:22.781620 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 21:56:22.781636 21584 solver.cpp:244]     Train net output #1: loss = 0.0844684 (* 1 = 0.0844684 loss)\nI0818 21:56:22.895484 21584 sgd_solver.cpp:166] Iteration 11000, lr = 0.275\nI0818 21:58:41.334378 21584 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0818 22:00:03.823987 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85232\nI0818 22:00:03.824241 21584 solver.cpp:404]     Test net output #1: loss = 0.609878 (* 1 = 0.609878 loss)\nI0818 22:00:05.127005 21584 solver.cpp:228] Iteration 11100, loss = 0.108779\nI0818 22:00:05.127049 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:00:05.127066 21584 solver.cpp:244]     Train net output #1: loss = 0.108779 (* 1 = 0.108779 loss)\nI0818 22:00:05.240485 21584 sgd_solver.cpp:166] Iteration 11100, lr = 0.2775\nI0818 22:02:23.177584 21584 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0818 22:03:45.667587 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85648\nI0818 22:03:45.667870 21584 solver.cpp:404]     Test net output #1: loss = 0.605271 (* 1 = 0.605271 loss)\nI0818 22:03:46.969841 21584 solver.cpp:228] Iteration 11200, loss = 0.0183138\nI0818 22:03:46.969887 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:03:46.969902 21584 solver.cpp:244]     Train net output #1: loss = 0.0183138 (* 1 = 0.0183138 loss)\nI0818 22:03:47.074728 21584 sgd_solver.cpp:166] Iteration 11200, lr = 0.28\nI0818 22:06:04.915714 21584 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0818 22:07:27.405927 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8584\nI0818 22:07:27.406201 21584 solver.cpp:404]     Test net output #1: loss = 0.5952 (* 1 = 0.5952 loss)\nI0818 22:07:28.709488 21584 solver.cpp:228] Iteration 11300, loss = 0.0284912\nI0818 22:07:28.709534 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0818 22:07:28.709550 21584 solver.cpp:244]     Train net output #1: loss = 0.0284912 (* 1 = 0.0284912 loss)\nI0818 22:07:28.811568 21584 sgd_solver.cpp:166] Iteration 11300, lr = 0.2825\nI0818 22:09:46.243438 21584 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0818 22:11:08.726683 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85564\nI0818 22:11:08.726933 21584 solver.cpp:404]     Test net output #1: loss = 0.653319 (* 1 = 0.653319 loss)\nI0818 22:11:10.029000 21584 solver.cpp:228] Iteration 11400, loss = 0.0665982\nI0818 22:11:10.029047 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 22:11:10.029065 21584 solver.cpp:244]     Train net output #1: loss = 0.0665982 (* 1 = 0.0665982 loss)\nI0818 22:11:10.129405 21584 sgd_solver.cpp:166] Iteration 11400, lr = 0.285\nI0818 22:13:27.542172 21584 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0818 22:14:50.024344 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86164\nI0818 22:14:50.024601 21584 solver.cpp:404]     Test net output #1: loss = 0.605496 (* 1 = 0.605496 loss)\nI0818 22:14:51.327416 21584 solver.cpp:228] Iteration 11500, loss = 0.10426\nI0818 22:14:51.327461 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0818 22:14:51.327477 21584 solver.cpp:244]     Train net output #1: loss = 0.10426 (* 1 = 0.10426 loss)\nI0818 22:14:51.434116 21584 sgd_solver.cpp:166] Iteration 11500, lr = 0.2875\nI0818 22:17:08.865705 21584 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0818 22:18:31.350106 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85796\nI0818 22:18:31.350383 21584 solver.cpp:404]     Test net output #1: loss = 0.601284 (* 1 = 0.601284 loss)\nI0818 22:18:32.653975 21584 solver.cpp:228] Iteration 11600, loss = 0.0477512\nI0818 22:18:32.654021 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:18:32.654037 21584 solver.cpp:244]     Train net output #1: loss = 0.0477512 (* 1 = 0.0477512 loss)\nI0818 22:18:32.757252 21584 sgd_solver.cpp:166] Iteration 11600, lr = 0.29\nI0818 22:20:50.204054 21584 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0818 22:22:12.684008 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86504\nI0818 22:22:12.684273 21584 solver.cpp:404]     Test net output #1: loss = 0.571632 (* 1 = 0.571632 loss)\nI0818 22:22:13.987360 21584 solver.cpp:228] Iteration 11700, loss = 0.0715255\nI0818 22:22:13.987406 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:22:13.987422 21584 solver.cpp:244]     Train net output #1: loss = 0.0715255 (* 1 = 0.0715255 loss)\nI0818 22:22:14.095162 21584 sgd_solver.cpp:166] Iteration 11700, lr = 0.2925\nI0818 22:24:31.531728 21584 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0818 22:25:54.017241 21584 solver.cpp:404]     Test net output #0: accuracy = 0.861121\nI0818 22:25:54.017510 21584 solver.cpp:404]     Test net output #1: loss = 0.612915 (* 1 = 0.612915 loss)\nI0818 22:25:55.319802 21584 solver.cpp:228] Iteration 11800, loss = 0.0373194\nI0818 22:25:55.319846 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:25:55.319862 21584 solver.cpp:244]     Train net output #1: loss = 0.0373194 (* 1 = 0.0373194 loss)\nI0818 22:25:55.426759 21584 sgd_solver.cpp:166] Iteration 11800, lr = 0.295\nI0818 22:28:12.882200 21584 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0818 22:29:35.344951 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86344\nI0818 22:29:35.345177 21584 solver.cpp:404]     Test net output #1: loss = 0.577028 (* 1 = 0.577028 loss)\nI0818 22:29:36.647985 21584 solver.cpp:228] Iteration 11900, loss = 0.0190215\nI0818 22:29:36.648030 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:29:36.648047 21584 solver.cpp:244]     Train net output #1: loss = 0.0190215 (* 1 = 0.0190215 loss)\nI0818 22:29:36.751121 21584 sgd_solver.cpp:166] Iteration 11900, lr = 0.2975\nI0818 22:31:54.248555 21584 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0818 22:33:16.716311 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86328\nI0818 22:33:16.716567 21584 solver.cpp:404]     Test net output #1: loss = 0.584905 (* 1 = 0.584905 loss)\nI0818 22:33:18.018816 21584 solver.cpp:228] Iteration 12000, loss = 0.0512548\nI0818 22:33:18.018862 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:33:18.018877 21584 solver.cpp:244]     Train net output #1: loss = 0.0512548 (* 1 = 0.0512548 loss)\nI0818 22:33:18.125840 21584 sgd_solver.cpp:166] Iteration 12000, lr = 0.3\nI0818 22:35:35.688134 21584 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0818 22:36:58.169684 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86108\nI0818 22:36:58.169976 21584 solver.cpp:404]     Test net output #1: loss = 0.571396 (* 1 = 0.571396 loss)\nI0818 22:36:59.472739 21584 solver.cpp:228] Iteration 12100, loss = 0.0885252\nI0818 22:36:59.472784 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:36:59.472800 21584 solver.cpp:244]     Train net output #1: loss = 0.0885252 (* 1 = 0.0885252 loss)\nI0818 22:36:59.577821 21584 sgd_solver.cpp:166] Iteration 12100, lr = 0.3025\nI0818 22:39:17.106940 21584 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0818 22:40:39.585853 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86116\nI0818 22:40:39.586098 21584 solver.cpp:404]     Test net output #1: loss = 0.601385 (* 1 = 0.601385 loss)\nI0818 22:40:40.888978 21584 solver.cpp:228] Iteration 12200, loss = 0.0389472\nI0818 22:40:40.889020 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 22:40:40.889036 21584 solver.cpp:244]     Train net output #1: loss = 0.0389472 (* 1 = 0.0389472 loss)\nI0818 22:40:40.993794 21584 sgd_solver.cpp:166] Iteration 12200, lr = 0.305\nI0818 22:42:58.452903 21584 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0818 22:44:20.938194 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85956\nI0818 22:44:20.938464 21584 solver.cpp:404]     Test net output #1: loss = 0.607123 (* 1 = 0.607123 loss)\nI0818 22:44:22.240605 21584 solver.cpp:228] Iteration 12300, loss = 0.059502\nI0818 22:44:22.240648 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:44:22.240664 21584 solver.cpp:244]     Train net output #1: loss = 0.0595021 (* 1 = 0.0595021 loss)\nI0818 22:44:22.348186 21584 sgd_solver.cpp:166] Iteration 12300, lr = 0.3075\nI0818 22:46:39.841992 21584 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0818 22:48:02.328606 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86332\nI0818 22:48:02.328883 21584 solver.cpp:404]     Test net output #1: loss = 0.577954 (* 1 = 0.577954 loss)\nI0818 22:48:03.631244 21584 solver.cpp:228] Iteration 12400, loss = 0.0208928\nI0818 22:48:03.631286 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:48:03.631302 21584 solver.cpp:244]     Train net output #1: loss = 0.0208928 (* 1 = 0.0208928 loss)\nI0818 22:48:03.733386 21584 sgd_solver.cpp:166] Iteration 12400, lr = 0.31\nI0818 22:50:21.286717 21584 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0818 22:51:43.772840 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85956\nI0818 22:51:43.773106 21584 solver.cpp:404]     Test net output #1: loss = 0.595729 (* 1 = 0.595729 loss)\nI0818 22:51:45.075201 21584 solver.cpp:228] Iteration 12500, loss = 0.0388304\nI0818 22:51:45.075244 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 22:51:45.075260 21584 solver.cpp:244]     Train net output #1: loss = 0.0388305 (* 1 = 0.0388305 loss)\nI0818 22:51:45.175153 21584 sgd_solver.cpp:166] Iteration 12500, lr = 0.3125\nI0818 22:54:02.609583 21584 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0818 22:55:25.094271 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8674\nI0818 22:55:25.094540 21584 solver.cpp:404]     Test net output #1: loss = 0.566923 (* 1 = 0.566923 loss)\nI0818 22:55:26.396752 21584 solver.cpp:228] Iteration 12600, loss = 0.0645943\nI0818 22:55:26.396791 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 22:55:26.396807 21584 solver.cpp:244]     Train net output #1: loss = 0.0645944 (* 1 = 0.0645944 loss)\nI0818 22:55:26.505157 21584 sgd_solver.cpp:166] Iteration 12600, lr = 0.315\nI0818 22:57:44.008435 21584 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 22:59:06.485864 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8682\nI0818 22:59:06.486143 21584 solver.cpp:404]     Test net output #1: loss = 0.570802 (* 1 = 0.570802 loss)\nI0818 22:59:07.788426 21584 solver.cpp:228] Iteration 12700, loss = 0.094768\nI0818 22:59:07.788470 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 22:59:07.788493 21584 solver.cpp:244]     Train net output #1: loss = 0.0947681 (* 1 = 0.0947681 loss)\nI0818 22:59:07.889600 21584 sgd_solver.cpp:166] Iteration 12700, lr = 0.3175\nI0818 23:01:25.354674 21584 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 23:02:47.854784 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86196\nI0818 23:02:47.855073 21584 solver.cpp:404]     Test net output #1: loss = 0.584674 (* 1 = 0.584674 loss)\nI0818 23:02:49.158098 21584 solver.cpp:228] Iteration 12800, loss = 0.117667\nI0818 23:02:49.158141 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 23:02:49.158165 21584 solver.cpp:244]     Train net output #1: loss = 0.117667 (* 1 = 0.117667 loss)\nI0818 23:02:49.261303 21584 sgd_solver.cpp:166] Iteration 12800, lr = 0.32\nI0818 23:05:06.805939 21584 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 23:06:29.297176 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86884\nI0818 23:06:29.297433 21584 solver.cpp:404]     Test net output #1: loss = 0.551727 (* 1 = 0.551727 loss)\nI0818 23:06:30.600834 21584 solver.cpp:228] Iteration 12900, loss = 0.0385766\nI0818 23:06:30.600880 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 23:06:30.600904 21584 solver.cpp:244]     Train net output #1: loss = 0.0385766 (* 1 = 0.0385766 loss)\nI0818 23:06:30.701400 21584 sgd_solver.cpp:166] Iteration 12900, lr = 0.3225\nI0818 23:08:48.176331 21584 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 23:10:10.650095 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85832\nI0818 23:10:10.650380 21584 solver.cpp:404]     Test net output #1: loss = 0.598679 (* 1 = 0.598679 loss)\nI0818 23:10:11.953292 21584 solver.cpp:228] Iteration 13000, loss = 0.0466499\nI0818 23:10:11.953338 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:10:11.953362 21584 solver.cpp:244]     Train net output #1: loss = 0.0466499 (* 1 = 0.0466499 loss)\nI0818 23:10:12.059343 21584 sgd_solver.cpp:166] Iteration 13000, lr = 0.325\nI0818 23:12:29.564101 21584 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 23:13:51.915130 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86012\nI0818 23:13:51.915410 21584 solver.cpp:404]     Test net output #1: loss = 0.58974 (* 1 = 0.58974 loss)\nI0818 23:13:53.217939 21584 solver.cpp:228] Iteration 13100, loss = 0.0507807\nI0818 23:13:53.217980 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:13:53.217996 21584 solver.cpp:244]     Train net output #1: loss = 0.0507807 (* 1 = 0.0507807 loss)\nI0818 23:13:53.321954 21584 sgd_solver.cpp:166] Iteration 13100, lr = 0.3275\nI0818 23:16:10.765405 21584 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 23:17:33.105832 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8646\nI0818 23:17:33.106112 21584 solver.cpp:404]     Test net output #1: loss = 0.586345 (* 1 = 0.586345 loss)\nI0818 23:17:34.408210 21584 solver.cpp:228] Iteration 13200, loss = 0.0650019\nI0818 23:17:34.408252 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:17:34.408268 21584 solver.cpp:244]     Train net output #1: loss = 0.065002 (* 1 = 0.065002 loss)\nI0818 23:17:34.510514 21584 sgd_solver.cpp:166] Iteration 13200, lr = 0.33\nI0818 23:19:52.038993 21584 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 23:21:14.382725 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85516\nI0818 23:21:14.383004 21584 solver.cpp:404]     Test net output #1: loss = 0.614426 (* 1 = 0.614426 loss)\nI0818 23:21:15.685531 21584 solver.cpp:228] Iteration 13300, loss = 0.0449343\nI0818 23:21:15.685575 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:21:15.685592 21584 solver.cpp:244]     Train net output #1: loss = 0.0449344 (* 1 = 0.0449344 loss)\nI0818 23:21:15.788722 21584 sgd_solver.cpp:166] Iteration 13300, lr = 0.3325\nI0818 23:23:33.244698 21584 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 23:24:55.596947 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85948\nI0818 23:24:55.597228 21584 solver.cpp:404]     Test net output #1: loss = 0.607588 (* 1 = 0.607588 loss)\nI0818 23:24:56.900558 21584 solver.cpp:228] Iteration 13400, loss = 0.0762766\nI0818 23:24:56.900601 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:24:56.900617 21584 solver.cpp:244]     Train net output #1: loss = 0.0762767 (* 1 = 0.0762767 loss)\nI0818 23:24:57.003813 21584 sgd_solver.cpp:166] Iteration 13400, lr = 0.335\nI0818 23:27:14.397719 21584 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 23:28:36.774174 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8686\nI0818 23:28:36.774440 21584 solver.cpp:404]     Test net output #1: loss = 0.560883 (* 1 = 0.560883 loss)\nI0818 23:28:38.077625 21584 solver.cpp:228] Iteration 13500, loss = 0.0565803\nI0818 23:28:38.077671 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 23:28:38.077687 21584 solver.cpp:244]     Train net output #1: loss = 0.0565803 (* 1 = 0.0565803 loss)\nI0818 23:28:38.179450 21584 sgd_solver.cpp:166] Iteration 13500, lr = 0.3375\nI0818 23:30:55.669317 21584 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 23:32:18.033494 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85768\nI0818 23:32:18.033774 21584 solver.cpp:404]     Test net output #1: loss = 0.612796 (* 1 = 0.612796 loss)\nI0818 23:32:19.335893 21584 solver.cpp:228] Iteration 13600, loss = 0.0660083\nI0818 23:32:19.335937 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:32:19.335953 21584 solver.cpp:244]     Train net output #1: loss = 0.0660083 (* 1 = 0.0660083 loss)\nI0818 23:32:19.442474 21584 sgd_solver.cpp:166] Iteration 13600, lr = 0.34\nI0818 23:34:36.855659 21584 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 23:35:59.223683 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86452\nI0818 23:35:59.223968 21584 solver.cpp:404]     Test net output #1: loss = 0.562345 (* 1 = 0.562345 loss)\nI0818 23:36:00.526384 21584 solver.cpp:228] Iteration 13700, loss = 0.0621645\nI0818 23:36:00.526427 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:36:00.526443 21584 solver.cpp:244]     Train net output #1: loss = 0.0621645 (* 1 = 0.0621645 loss)\nI0818 23:36:00.630926 21584 sgd_solver.cpp:166] Iteration 13700, lr = 0.3425\nI0818 23:38:18.026935 21584 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 23:39:40.401304 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86604\nI0818 23:39:40.401583 21584 solver.cpp:404]     Test net output #1: loss = 0.556679 (* 1 = 0.556679 loss)\nI0818 23:39:41.703753 21584 solver.cpp:228] Iteration 13800, loss = 0.0707048\nI0818 23:39:41.703795 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:39:41.703811 21584 solver.cpp:244]     Train net output #1: loss = 0.0707049 (* 1 = 0.0707049 loss)\nI0818 23:39:41.805613 21584 sgd_solver.cpp:166] Iteration 13800, lr = 0.345\nI0818 23:41:59.423463 21584 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 23:43:21.792333 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8626\nI0818 23:43:21.792613 21584 solver.cpp:404]     Test net output #1: loss = 0.579171 (* 1 = 0.579171 loss)\nI0818 23:43:23.095413 21584 solver.cpp:228] Iteration 13900, loss = 0.0521939\nI0818 23:43:23.095456 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0818 23:43:23.095473 21584 solver.cpp:244]     Train net output #1: loss = 0.052194 (* 1 = 0.052194 loss)\nI0818 23:43:23.198415 21584 sgd_solver.cpp:166] Iteration 13900, lr = 0.3475\nI0818 23:45:40.706064 21584 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 23:47:03.173219 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86504\nI0818 23:47:03.173485 21584 solver.cpp:404]     Test net output #1: loss = 0.559529 (* 1 = 0.559529 loss)\nI0818 23:47:04.476063 21584 solver.cpp:228] Iteration 14000, loss = 0.0618295\nI0818 23:47:04.476104 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0818 23:47:04.476120 21584 solver.cpp:244]     Train net output #1: loss = 0.0618296 (* 1 = 0.0618296 loss)\nI0818 23:47:04.579912 21584 sgd_solver.cpp:166] Iteration 14000, lr = 0.35\nI0818 23:49:22.047730 21584 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 23:50:44.517350 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86664\nI0818 23:50:44.517637 21584 solver.cpp:404]     Test net output #1: loss = 0.551558 (* 1 = 0.551558 loss)\nI0818 23:50:45.819636 21584 solver.cpp:228] Iteration 14100, loss = 0.105774\nI0818 23:50:45.819679 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0818 23:50:45.819695 21584 solver.cpp:244]     Train net output #1: loss = 0.105774 (* 1 = 0.105774 loss)\nI0818 23:50:45.927934 21584 sgd_solver.cpp:166] Iteration 14100, lr = 0.3525\nI0818 23:53:03.364591 21584 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 23:54:25.831450 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86232\nI0818 23:54:25.831732 21584 solver.cpp:404]     Test net output #1: loss = 0.543996 (* 1 = 0.543996 loss)\nI0818 23:54:27.133968 21584 solver.cpp:228] Iteration 14200, loss = 0.0866621\nI0818 23:54:27.134009 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0818 23:54:27.134026 21584 solver.cpp:244]     Train net output #1: loss = 0.0866621 (* 1 = 0.0866621 loss)\nI0818 23:54:27.239501 21584 sgd_solver.cpp:166] Iteration 14200, lr = 0.355\nI0818 23:56:44.733582 21584 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 23:58:07.206702 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86388\nI0818 23:58:07.206956 21584 solver.cpp:404]     Test net output #1: loss = 0.574671 (* 1 = 0.574671 loss)\nI0818 23:58:08.508699 21584 solver.cpp:228] Iteration 14300, loss = 0.0387365\nI0818 23:58:08.508745 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0818 23:58:08.508762 21584 solver.cpp:244]     Train net output #1: loss = 0.0387366 (* 1 = 0.0387366 loss)\nI0818 23:58:08.620754 21584 sgd_solver.cpp:166] Iteration 14300, lr = 0.3575\nI0819 00:00:26.200579 21584 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0819 00:01:48.675827 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86608\nI0819 00:01:48.676090 21584 solver.cpp:404]     Test net output #1: loss = 0.567406 (* 1 = 0.567406 loss)\nI0819 00:01:49.979533 21584 solver.cpp:228] Iteration 14400, loss = 0.142527\nI0819 00:01:49.979578 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 00:01:49.979593 21584 solver.cpp:244]     Train net output #1: loss = 0.142527 (* 1 = 0.142527 loss)\nI0819 00:01:50.085309 21584 sgd_solver.cpp:166] Iteration 14400, lr = 0.36\nI0819 00:04:07.699946 21584 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0819 00:05:30.180799 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86572\nI0819 00:05:30.181073 21584 solver.cpp:404]     Test net output #1: loss = 0.555359 (* 1 = 0.555359 loss)\nI0819 00:05:31.484154 21584 solver.cpp:228] Iteration 14500, loss = 0.0660571\nI0819 00:05:31.484200 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:05:31.484215 21584 solver.cpp:244]     Train net output #1: loss = 0.0660572 (* 1 = 0.0660572 loss)\nI0819 00:05:31.585525 21584 sgd_solver.cpp:166] Iteration 14500, lr = 0.3625\nI0819 00:07:49.179221 21584 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0819 00:09:11.650123 21584 solver.cpp:404]     Test net output #0: accuracy = 0.85996\nI0819 00:09:11.650396 21584 solver.cpp:404]     Test net output #1: loss = 0.587669 (* 1 = 0.587669 loss)\nI0819 00:09:12.953869 21584 solver.cpp:228] Iteration 14600, loss = 0.141897\nI0819 00:09:12.953910 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:09:12.953927 21584 solver.cpp:244]     Train net output #1: loss = 0.141897 (* 1 = 0.141897 loss)\nI0819 00:09:13.060094 21584 sgd_solver.cpp:166] Iteration 14600, lr = 0.365\nI0819 00:11:30.620815 21584 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0819 00:12:53.106293 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86452\nI0819 00:12:53.106552 21584 solver.cpp:404]     Test net output #1: loss = 0.547435 (* 1 = 0.547435 loss)\nI0819 00:12:54.408875 21584 solver.cpp:228] Iteration 14700, loss = 0.103684\nI0819 00:12:54.408916 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:12:54.408931 21584 solver.cpp:244]     Train net output #1: loss = 0.103684 (* 1 = 0.103684 loss)\nI0819 00:12:54.513433 21584 sgd_solver.cpp:166] Iteration 14700, lr = 0.3675\nI0819 00:15:11.936695 21584 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0819 00:16:34.420271 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8638\nI0819 00:16:34.420550 21584 solver.cpp:404]     Test net output #1: loss = 0.552146 (* 1 = 0.552146 loss)\nI0819 00:16:35.722955 21584 solver.cpp:228] Iteration 14800, loss = 0.0423092\nI0819 00:16:35.722995 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:16:35.723009 21584 solver.cpp:244]     Train net output #1: loss = 0.0423093 (* 1 = 0.0423093 loss)\nI0819 00:16:35.826211 21584 sgd_solver.cpp:166] Iteration 14800, lr = 0.37\nI0819 00:18:53.346344 21584 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0819 00:20:15.829149 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8618\nI0819 00:20:15.829428 21584 solver.cpp:404]     Test net output #1: loss = 0.555973 (* 1 = 0.555973 loss)\nI0819 00:20:17.131799 21584 solver.cpp:228] Iteration 14900, loss = 0.107204\nI0819 00:20:17.131839 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:20:17.131855 21584 solver.cpp:244]     Train net output #1: loss = 0.107204 (* 1 = 0.107204 loss)\nI0819 00:20:17.236173 21584 sgd_solver.cpp:166] Iteration 14900, lr = 0.3725\nI0819 00:22:34.858211 21584 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0819 00:23:57.328019 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86536\nI0819 00:23:57.328282 21584 solver.cpp:404]     Test net output #1: loss = 0.574037 (* 1 = 0.574037 loss)\nI0819 00:23:58.630744 21584 solver.cpp:228] Iteration 15000, loss = 0.0511968\nI0819 00:23:58.630787 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:23:58.630802 21584 solver.cpp:244]     Train net output #1: loss = 0.0511969 (* 1 = 0.0511969 loss)\nI0819 00:23:58.738565 21584 sgd_solver.cpp:166] Iteration 15000, lr = 0.375\nI0819 00:26:16.271904 21584 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0819 00:27:38.740955 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8688\nI0819 00:27:38.741215 21584 solver.cpp:404]     Test net output #1: loss = 0.558893 (* 1 = 0.558893 loss)\nI0819 00:27:40.044199 21584 solver.cpp:228] Iteration 15100, loss = 0.0466383\nI0819 00:27:40.044241 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:27:40.044257 21584 solver.cpp:244]     Train net output #1: loss = 0.0466384 (* 1 = 0.0466384 loss)\nI0819 00:27:40.146952 21584 sgd_solver.cpp:166] Iteration 15100, lr = 0.3775\nI0819 00:29:57.756564 21584 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0819 00:31:20.218765 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87192\nI0819 00:31:20.219043 21584 solver.cpp:404]     Test net output #1: loss = 0.554626 (* 1 = 0.554626 loss)\nI0819 00:31:21.521488 21584 solver.cpp:228] Iteration 15200, loss = 0.0810899\nI0819 00:31:21.521531 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 00:31:21.521546 21584 solver.cpp:244]     Train net output #1: loss = 0.08109 (* 1 = 0.08109 loss)\nI0819 00:31:21.629767 21584 sgd_solver.cpp:166] Iteration 15200, lr = 0.38\nI0819 00:33:39.187260 21584 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0819 00:35:01.649255 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87136\nI0819 00:35:01.649504 21584 solver.cpp:404]     Test net output #1: loss = 0.544772 (* 1 = 0.544772 loss)\nI0819 00:35:02.951560 21584 solver.cpp:228] Iteration 15300, loss = 0.0356459\nI0819 00:35:02.951601 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 00:35:02.951617 21584 solver.cpp:244]     Train net output #1: loss = 0.035646 (* 1 = 0.035646 loss)\nI0819 00:35:03.061972 21584 sgd_solver.cpp:166] Iteration 15300, lr = 0.3825\nI0819 00:37:20.683537 21584 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0819 00:38:43.150851 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8672\nI0819 00:38:43.151126 21584 solver.cpp:404]     Test net output #1: loss = 0.550598 (* 1 = 0.550598 loss)\nI0819 00:38:44.452720 21584 solver.cpp:228] Iteration 15400, loss = 0.0241324\nI0819 00:38:44.452759 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 00:38:44.452774 21584 solver.cpp:244]     Train net output #1: loss = 0.0241325 (* 1 = 0.0241325 loss)\nI0819 00:38:44.557502 21584 sgd_solver.cpp:166] Iteration 15400, lr = 0.385\nI0819 00:41:02.086097 21584 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0819 00:42:24.540431 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87136\nI0819 00:42:24.540704 21584 solver.cpp:404]     Test net output #1: loss = 0.542123 (* 1 = 0.542123 loss)\nI0819 00:42:25.842535 21584 solver.cpp:228] Iteration 15500, loss = 0.0743449\nI0819 00:42:25.842581 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:42:25.842597 21584 solver.cpp:244]     Train net output #1: loss = 0.074345 (* 1 = 0.074345 loss)\nI0819 00:42:25.948916 21584 sgd_solver.cpp:166] Iteration 15500, lr = 0.3875\nI0819 00:44:43.519721 21584 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0819 00:46:05.932986 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86788\nI0819 00:46:05.933238 21584 solver.cpp:404]     Test net output #1: loss = 0.547312 (* 1 = 0.547312 loss)\nI0819 00:46:07.235548 21584 solver.cpp:228] Iteration 15600, loss = 0.0657823\nI0819 00:46:07.235599 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 00:46:07.235616 21584 solver.cpp:244]     Train net output #1: loss = 0.0657824 (* 1 = 0.0657824 loss)\nI0819 00:46:07.343207 21584 sgd_solver.cpp:166] Iteration 15600, lr = 0.39\nI0819 00:48:24.824882 21584 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0819 00:49:47.482509 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87536\nI0819 00:49:47.482797 21584 solver.cpp:404]     Test net output #1: loss = 0.531338 (* 1 = 0.531338 loss)\nI0819 00:49:48.790465 21584 solver.cpp:228] Iteration 15700, loss = 0.119918\nI0819 00:49:48.790514 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:49:48.790530 21584 solver.cpp:244]     Train net output #1: loss = 0.119918 (* 1 = 0.119918 loss)\nI0819 00:49:48.893844 21584 sgd_solver.cpp:166] Iteration 15700, lr = 0.3925\nI0819 00:52:06.402684 21584 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0819 00:53:28.860656 21584 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 00:53:28.860939 21584 solver.cpp:404]     Test net output #1: loss = 0.548145 (* 1 = 0.548145 loss)\nI0819 00:53:30.164427 21584 solver.cpp:228] Iteration 15800, loss = 0.0859471\nI0819 00:53:30.164472 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 00:53:30.164496 21584 solver.cpp:244]     Train net output #1: loss = 0.0859472 (* 1 = 0.0859472 loss)\nI0819 00:53:30.267865 21584 sgd_solver.cpp:166] Iteration 15800, lr = 0.395\nI0819 00:55:47.725170 21584 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0819 00:57:10.195086 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86276\nI0819 00:57:10.195324 21584 solver.cpp:404]     Test net output #1: loss = 0.557922 (* 1 = 0.557922 loss)\nI0819 00:57:11.498435 21584 solver.cpp:228] Iteration 15900, loss = 0.0890416\nI0819 00:57:11.498481 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 00:57:11.498504 21584 solver.cpp:244]     Train net output #1: loss = 0.0890417 (* 1 = 0.0890417 loss)\nI0819 00:57:11.604832 21584 sgd_solver.cpp:166] Iteration 15900, lr = 0.3975\nI0819 00:59:29.152588 21584 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0819 01:00:51.628931 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86464\nI0819 01:00:51.629211 21584 solver.cpp:404]     Test net output #1: loss = 0.571223 (* 1 = 0.571223 loss)\nI0819 01:00:52.931797 21584 solver.cpp:228] Iteration 16000, loss = 0.217796\nI0819 01:00:52.931840 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:00:52.931864 21584 solver.cpp:244]     Train net output #1: loss = 0.217796 (* 1 = 0.217796 loss)\nI0819 01:00:53.038398 21584 sgd_solver.cpp:166] Iteration 16000, lr = 0.4\nI0819 01:03:10.537791 21584 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0819 01:04:33.015522 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86832\nI0819 01:04:33.015786 21584 solver.cpp:404]     Test net output #1: loss = 0.547349 (* 1 = 0.547349 loss)\nI0819 01:04:34.317509 21584 solver.cpp:228] Iteration 16100, loss = 0.0470178\nI0819 01:04:34.317555 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:04:34.317579 21584 solver.cpp:244]     Train net output #1: loss = 0.047018 (* 1 = 0.047018 loss)\nI0819 01:04:34.427537 21584 sgd_solver.cpp:166] Iteration 16100, lr = 0.4025\nI0819 01:06:51.915691 21584 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0819 01:08:14.407033 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86904\nI0819 01:08:14.407320 21584 solver.cpp:404]     Test net output #1: loss = 0.550414 (* 1 = 0.550414 loss)\nI0819 01:08:15.709920 21584 solver.cpp:228] Iteration 16200, loss = 0.0655418\nI0819 01:08:15.709966 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 01:08:15.709991 21584 solver.cpp:244]     Train net output #1: loss = 0.0655419 (* 1 = 0.0655419 loss)\nI0819 01:08:15.812639 21584 sgd_solver.cpp:166] Iteration 16200, lr = 0.405\nI0819 01:10:33.272492 21584 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0819 01:11:55.749831 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87036\nI0819 01:11:55.750103 21584 solver.cpp:404]     Test net output #1: loss = 0.546382 (* 1 = 0.546382 loss)\nI0819 01:11:57.053423 21584 solver.cpp:228] Iteration 16300, loss = 0.0142509\nI0819 01:11:57.053468 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 01:11:57.053493 21584 solver.cpp:244]     Train net output #1: loss = 0.014251 (* 1 = 0.014251 loss)\nI0819 01:11:57.160221 21584 sgd_solver.cpp:166] Iteration 16300, lr = 0.4075\nI0819 01:14:14.603163 21584 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0819 01:15:37.031049 21584 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 01:15:37.031330 21584 solver.cpp:404]     Test net output #1: loss = 0.553596 (* 1 = 0.553596 loss)\nI0819 01:15:38.332355 21584 solver.cpp:228] Iteration 16400, loss = 0.0603539\nI0819 01:15:38.332401 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 01:15:38.332425 21584 solver.cpp:244]     Train net output #1: loss = 0.060354 (* 1 = 0.060354 loss)\nI0819 01:15:38.440403 21584 sgd_solver.cpp:166] Iteration 16400, lr = 0.41\nI0819 01:17:55.850683 21584 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0819 01:19:18.321595 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86852\nI0819 01:19:18.321884 21584 solver.cpp:404]     Test net output #1: loss = 0.530293 (* 1 = 0.530293 loss)\nI0819 01:19:19.624704 21584 solver.cpp:228] Iteration 16500, loss = 0.155376\nI0819 01:19:19.624745 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 01:19:19.624769 21584 solver.cpp:244]     Train net output #1: loss = 0.155376 (* 1 = 0.155376 loss)\nI0819 01:19:19.729521 21584 sgd_solver.cpp:166] Iteration 16500, lr = 0.4125\nI0819 01:21:37.200064 21584 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0819 01:22:59.678297 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86608\nI0819 01:22:59.678588 21584 solver.cpp:404]     Test net output #1: loss = 0.528812 (* 1 = 0.528812 loss)\nI0819 01:23:00.981544 21584 solver.cpp:228] Iteration 16600, loss = 0.159234\nI0819 01:23:00.981587 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 01:23:00.981611 21584 solver.cpp:244]     Train net output #1: loss = 0.159234 (* 1 = 0.159234 loss)\nI0819 01:23:01.089241 21584 sgd_solver.cpp:166] Iteration 16600, lr = 0.415\nI0819 01:25:18.538944 21584 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0819 01:26:41.025277 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87164\nI0819 01:26:41.025548 21584 solver.cpp:404]     Test net output #1: loss = 0.532932 (* 1 = 0.532932 loss)\nI0819 01:26:42.328423 21584 solver.cpp:228] Iteration 16700, loss = 0.0927079\nI0819 01:26:42.328469 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 01:26:42.328492 21584 solver.cpp:244]     Train net output #1: loss = 0.0927081 (* 1 = 0.0927081 loss)\nI0819 01:26:42.432572 21584 sgd_solver.cpp:166] Iteration 16700, lr = 0.4175\nI0819 01:28:59.906406 21584 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0819 01:30:22.292286 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86996\nI0819 01:30:22.292577 21584 solver.cpp:404]     Test net output #1: loss = 0.537758 (* 1 = 0.537758 loss)\nI0819 01:30:23.595546 21584 solver.cpp:228] Iteration 16800, loss = 0.0563104\nI0819 01:30:23.595592 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:30:23.595616 21584 solver.cpp:244]     Train net output #1: loss = 0.0563105 (* 1 = 0.0563105 loss)\nI0819 01:30:23.702476 21584 sgd_solver.cpp:166] Iteration 16800, lr = 0.42\nI0819 01:32:41.215927 21584 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0819 01:34:03.576594 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86656\nI0819 01:34:03.576881 21584 solver.cpp:404]     Test net output #1: loss = 0.556668 (* 1 = 0.556668 loss)\nI0819 01:34:04.880084 21584 solver.cpp:228] Iteration 16900, loss = 0.0568862\nI0819 01:34:04.880127 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:34:04.880151 21584 solver.cpp:244]     Train net output #1: loss = 0.0568864 (* 1 = 0.0568864 loss)\nI0819 01:34:04.979802 21584 sgd_solver.cpp:166] Iteration 16900, lr = 0.4225\nI0819 01:36:22.459146 21584 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0819 01:37:44.831252 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86728\nI0819 01:37:44.831542 21584 solver.cpp:404]     Test net output #1: loss = 0.561316 (* 1 = 0.561316 loss)\nI0819 01:37:46.134248 21584 solver.cpp:228] Iteration 17000, loss = 0.0275623\nI0819 01:37:46.134292 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 01:37:46.134316 21584 solver.cpp:244]     Train net output #1: loss = 0.0275624 (* 1 = 0.0275624 loss)\nI0819 01:37:46.234977 21584 sgd_solver.cpp:166] Iteration 17000, lr = 0.425\nI0819 01:40:03.684996 21584 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0819 01:41:26.073405 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86404\nI0819 01:41:26.073695 21584 solver.cpp:404]     Test net output #1: loss = 0.56272 (* 1 = 0.56272 loss)\nI0819 01:41:27.376924 21584 solver.cpp:228] Iteration 17100, loss = 0.0310995\nI0819 01:41:27.376967 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 01:41:27.376992 21584 solver.cpp:244]     Train net output #1: loss = 0.0310996 (* 1 = 0.0310996 loss)\nI0819 01:41:27.476510 21584 sgd_solver.cpp:166] Iteration 17100, lr = 0.4275\nI0819 01:43:45.062093 21584 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0819 01:45:07.479429 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86596\nI0819 01:45:07.479710 21584 solver.cpp:404]     Test net output #1: loss = 0.572205 (* 1 = 0.572205 loss)\nI0819 01:45:08.783211 21584 solver.cpp:228] Iteration 17200, loss = 0.112033\nI0819 01:45:08.783257 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 01:45:08.783280 21584 solver.cpp:244]     Train net output #1: loss = 0.112033 (* 1 = 0.112033 loss)\nI0819 01:45:08.883806 21584 sgd_solver.cpp:166] Iteration 17200, lr = 0.43\nI0819 01:47:26.357121 21584 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0819 01:48:48.729910 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87184\nI0819 01:48:48.730165 21584 solver.cpp:404]     Test net output #1: loss = 0.511536 (* 1 = 0.511536 loss)\nI0819 01:48:50.033330 21584 solver.cpp:228] Iteration 17300, loss = 0.0377894\nI0819 01:48:50.033376 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 01:48:50.033401 21584 solver.cpp:244]     Train net output #1: loss = 0.0377895 (* 1 = 0.0377895 loss)\nI0819 01:48:50.133381 21584 sgd_solver.cpp:166] Iteration 17300, lr = 0.4325\nI0819 01:51:07.711051 21584 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0819 01:52:30.122400 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87256\nI0819 01:52:30.122670 21584 solver.cpp:404]     Test net output #1: loss = 0.536235 (* 1 = 0.536235 loss)\nI0819 01:52:31.424878 21584 solver.cpp:228] Iteration 17400, loss = 0.0492075\nI0819 01:52:31.424919 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 01:52:31.424937 21584 solver.cpp:244]     Train net output #1: loss = 0.0492077 (* 1 = 0.0492077 loss)\nI0819 01:52:31.531508 21584 sgd_solver.cpp:166] Iteration 17400, lr = 0.435\nI0819 01:54:49.033428 21584 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0819 01:56:11.406275 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86732\nI0819 01:56:11.406550 21584 solver.cpp:404]     Test net output #1: loss = 0.565199 (* 1 = 0.565199 loss)\nI0819 01:56:12.708259 21584 solver.cpp:228] Iteration 17500, loss = 0.162614\nI0819 01:56:12.708299 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 01:56:12.708314 21584 solver.cpp:244]     Train net output #1: loss = 0.162614 (* 1 = 0.162614 loss)\nI0819 01:56:12.811338 21584 sgd_solver.cpp:166] Iteration 17500, lr = 0.4375\nI0819 01:58:30.333606 21584 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0819 01:59:52.699973 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86824\nI0819 01:59:52.700258 21584 solver.cpp:404]     Test net output #1: loss = 0.570175 (* 1 = 0.570175 loss)\nI0819 01:59:54.002192 21584 solver.cpp:228] Iteration 17600, loss = 0.115871\nI0819 01:59:54.002231 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 01:59:54.002249 21584 solver.cpp:244]     Train net output #1: loss = 0.115872 (* 1 = 0.115872 loss)\nI0819 01:59:54.104594 21584 sgd_solver.cpp:166] Iteration 17600, lr = 0.44\nI0819 02:02:11.608685 21584 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0819 02:03:34.078620 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87072\nI0819 02:03:34.078922 21584 solver.cpp:404]     Test net output #1: loss = 0.527268 (* 1 = 0.527268 loss)\nI0819 02:03:35.380913 21584 solver.cpp:228] Iteration 17700, loss = 0.0796718\nI0819 02:03:35.380954 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:03:35.380970 21584 solver.cpp:244]     Train net output #1: loss = 0.0796719 (* 1 = 0.0796719 loss)\nI0819 02:03:35.487105 21584 sgd_solver.cpp:166] Iteration 17700, lr = 0.4425\nI0819 02:05:52.985878 21584 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0819 02:07:15.414639 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87568\nI0819 02:07:15.414929 21584 solver.cpp:404]     Test net output #1: loss = 0.517337 (* 1 = 0.517337 loss)\nI0819 02:07:16.716665 21584 solver.cpp:228] Iteration 17800, loss = 0.102947\nI0819 02:07:16.716709 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:07:16.716725 21584 solver.cpp:244]     Train net output #1: loss = 0.102947 (* 1 = 0.102947 loss)\nI0819 02:07:16.820891 21584 sgd_solver.cpp:166] Iteration 17800, lr = 0.445\nI0819 02:09:34.400774 21584 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0819 02:10:56.857131 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87316\nI0819 02:10:56.857417 21584 solver.cpp:404]     Test net output #1: loss = 0.536154 (* 1 = 0.536154 loss)\nI0819 02:10:58.158848 21584 solver.cpp:228] Iteration 17900, loss = 0.0808461\nI0819 02:10:58.158891 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:10:58.158907 21584 solver.cpp:244]     Train net output #1: loss = 0.0808463 (* 1 = 0.0808463 loss)\nI0819 02:10:58.262130 21584 sgd_solver.cpp:166] Iteration 17900, lr = 0.4475\nI0819 02:13:15.919878 21584 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0819 02:14:38.401684 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86592\nI0819 02:14:38.401975 21584 solver.cpp:404]     Test net output #1: loss = 0.565399 (* 1 = 0.565399 loss)\nI0819 02:14:39.705328 21584 solver.cpp:228] Iteration 18000, loss = 0.0853106\nI0819 02:14:39.705373 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:14:39.705389 21584 solver.cpp:244]     Train net output #1: loss = 0.0853107 (* 1 = 0.0853107 loss)\nI0819 02:14:39.808085 21584 sgd_solver.cpp:166] Iteration 18000, lr = 0.45\nI0819 02:16:57.414088 21584 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0819 02:18:19.892994 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87004\nI0819 02:18:19.893254 21584 solver.cpp:404]     Test net output #1: loss = 0.527067 (* 1 = 0.527067 loss)\nI0819 02:18:21.195487 21584 solver.cpp:228] Iteration 18100, loss = 0.102028\nI0819 02:18:21.195531 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 02:18:21.195547 21584 solver.cpp:244]     Train net output #1: loss = 0.102029 (* 1 = 0.102029 loss)\nI0819 02:18:21.304806 21584 sgd_solver.cpp:166] Iteration 18100, lr = 0.4525\nI0819 02:20:38.824080 21584 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0819 02:22:01.305320 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87752\nI0819 02:22:01.305599 21584 solver.cpp:404]     Test net output #1: loss = 0.49961 (* 1 = 0.49961 loss)\nI0819 02:22:02.608119 21584 solver.cpp:228] Iteration 18200, loss = 0.106346\nI0819 02:22:02.608166 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 02:22:02.608183 21584 solver.cpp:244]     Train net output #1: loss = 0.106347 (* 1 = 0.106347 loss)\nI0819 02:22:02.716585 21584 sgd_solver.cpp:166] Iteration 18200, lr = 0.455\nI0819 02:24:20.320340 21584 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0819 02:25:42.798655 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8682\nI0819 02:25:42.798919 21584 solver.cpp:404]     Test net output #1: loss = 0.524438 (* 1 = 0.524438 loss)\nI0819 02:25:44.101269 21584 solver.cpp:228] Iteration 18300, loss = 0.0861118\nI0819 02:25:44.101311 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:25:44.101327 21584 solver.cpp:244]     Train net output #1: loss = 0.0861119 (* 1 = 0.0861119 loss)\nI0819 02:25:44.205618 21584 sgd_solver.cpp:166] Iteration 18300, lr = 0.4575\nI0819 02:28:01.739118 21584 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0819 02:29:25.006098 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87064\nI0819 02:29:25.006332 21584 solver.cpp:404]     Test net output #1: loss = 0.554816 (* 1 = 0.554816 loss)\nI0819 02:29:26.313104 21584 solver.cpp:228] Iteration 18400, loss = 0.0689989\nI0819 02:29:26.313151 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:29:26.313174 21584 solver.cpp:244]     Train net output #1: loss = 0.068999 (* 1 = 0.068999 loss)\nI0819 02:29:26.408602 21584 sgd_solver.cpp:166] Iteration 18400, lr = 0.46\nI0819 02:31:44.102414 21584 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0819 02:33:07.197736 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87024\nI0819 02:33:07.198037 21584 solver.cpp:404]     Test net output #1: loss = 0.518565 (* 1 = 0.518565 loss)\nI0819 02:33:08.504827 21584 solver.cpp:228] Iteration 18500, loss = 0.0518748\nI0819 02:33:08.504874 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:33:08.504899 21584 solver.cpp:244]     Train net output #1: loss = 0.0518749 (* 1 = 0.0518749 loss)\nI0819 02:33:08.611914 21584 sgd_solver.cpp:166] Iteration 18500, lr = 0.4625\nI0819 02:35:26.423538 21584 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0819 02:36:49.495402 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87324\nI0819 02:36:49.495656 21584 solver.cpp:404]     Test net output #1: loss = 0.514246 (* 1 = 0.514246 loss)\nI0819 02:36:50.801769 21584 solver.cpp:228] Iteration 18600, loss = 0.045102\nI0819 02:36:50.801820 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 02:36:50.801843 21584 solver.cpp:244]     Train net output #1: loss = 0.0451021 (* 1 = 0.0451021 loss)\nI0819 02:36:50.900305 21584 sgd_solver.cpp:166] Iteration 18600, lr = 0.465\nI0819 02:39:08.658555 21584 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0819 02:40:31.811451 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87408\nI0819 02:40:31.811688 21584 solver.cpp:404]     Test net output #1: loss = 0.513491 (* 1 = 0.513491 loss)\nI0819 02:40:33.118059 21584 solver.cpp:228] Iteration 18700, loss = 0.0669568\nI0819 02:40:33.118105 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 02:40:33.118129 21584 solver.cpp:244]     Train net output #1: loss = 0.0669569 (* 1 = 0.0669569 loss)\nI0819 02:40:33.222753 21584 sgd_solver.cpp:166] Iteration 18700, lr = 0.4675\nI0819 02:42:51.011910 21584 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0819 02:44:14.193501 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8716\nI0819 02:44:14.193740 21584 solver.cpp:404]     Test net output #1: loss = 0.52163 (* 1 = 0.52163 loss)\nI0819 02:44:15.499927 21584 solver.cpp:228] Iteration 18800, loss = 0.0666035\nI0819 02:44:15.499990 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:44:15.500015 21584 solver.cpp:244]     Train net output #1: loss = 0.0666036 (* 1 = 0.0666036 loss)\nI0819 02:44:15.598395 21584 sgd_solver.cpp:166] Iteration 18800, lr = 0.47\nI0819 02:46:33.346092 21584 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0819 02:47:56.698460 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87508\nI0819 02:47:56.698694 21584 solver.cpp:404]     Test net output #1: loss = 0.49738 (* 1 = 0.49738 loss)\nI0819 02:47:58.005295 21584 solver.cpp:228] Iteration 18900, loss = 0.0510766\nI0819 02:47:58.005342 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:47:58.005367 21584 solver.cpp:244]     Train net output #1: loss = 0.0510766 (* 1 = 0.0510766 loss)\nI0819 02:47:58.107098 21584 sgd_solver.cpp:166] Iteration 18900, lr = 0.4725\nI0819 02:50:15.839527 21584 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0819 02:51:38.925421 21584 solver.cpp:404]     Test net output #0: accuracy = 0.867\nI0819 02:51:38.925710 21584 solver.cpp:404]     Test net output #1: loss = 0.539414 (* 1 = 0.539414 loss)\nI0819 02:51:40.232118 21584 solver.cpp:228] Iteration 19000, loss = 0.0496182\nI0819 02:51:40.232167 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 02:51:40.232189 21584 solver.cpp:244]     Train net output #1: loss = 0.0496182 (* 1 = 0.0496182 loss)\nI0819 02:51:40.336566 21584 sgd_solver.cpp:166] Iteration 19000, lr = 0.475\nI0819 02:53:58.016494 21584 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0819 02:55:21.202344 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86008\nI0819 02:55:21.202654 21584 solver.cpp:404]     Test net output #1: loss = 0.557897 (* 1 = 0.557897 loss)\nI0819 02:55:22.508137 21584 solver.cpp:228] Iteration 19100, loss = 0.118176\nI0819 02:55:22.508185 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 02:55:22.508209 21584 solver.cpp:244]     Train net output #1: loss = 0.118176 (* 1 = 0.118176 loss)\nI0819 02:55:22.612906 21584 sgd_solver.cpp:166] Iteration 19100, lr = 0.4775\nI0819 02:57:40.383894 21584 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0819 02:59:03.538924 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87576\nI0819 02:59:03.539216 21584 solver.cpp:404]     Test net output #1: loss = 0.481837 (* 1 = 0.481837 loss)\nI0819 02:59:04.845122 21584 solver.cpp:228] Iteration 19200, loss = 0.0825094\nI0819 02:59:04.845170 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 02:59:04.845191 21584 solver.cpp:244]     Train net output #1: loss = 0.0825094 (* 1 = 0.0825094 loss)\nI0819 02:59:04.947954 21584 sgd_solver.cpp:166] Iteration 19200, lr = 0.48\nI0819 03:01:22.740332 21584 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0819 03:02:46.034234 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87632\nI0819 03:02:46.034525 21584 solver.cpp:404]     Test net output #1: loss = 0.497838 (* 1 = 0.497838 loss)\nI0819 03:02:47.339946 21584 solver.cpp:228] Iteration 19300, loss = 0.150285\nI0819 03:02:47.339993 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 03:02:47.340014 21584 solver.cpp:244]     Train net output #1: loss = 0.150286 (* 1 = 0.150286 loss)\nI0819 03:02:47.443598 21584 sgd_solver.cpp:166] Iteration 19300, lr = 0.4825\nI0819 03:05:05.239703 21584 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0819 03:06:28.614540 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86968\nI0819 03:06:28.614871 21584 solver.cpp:404]     Test net output #1: loss = 0.542075 (* 1 = 0.542075 loss)\nI0819 03:06:29.922143 21584 solver.cpp:228] Iteration 19400, loss = 0.0666485\nI0819 03:06:29.922191 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:06:29.922214 21584 solver.cpp:244]     Train net output #1: loss = 0.0666486 (* 1 = 0.0666486 loss)\nI0819 03:06:30.021417 21584 sgd_solver.cpp:166] Iteration 19400, lr = 0.485\nI0819 03:08:47.746276 21584 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0819 03:10:10.907444 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87472\nI0819 03:10:10.907757 21584 solver.cpp:404]     Test net output #1: loss = 0.499475 (* 1 = 0.499475 loss)\nI0819 03:10:12.214191 21584 solver.cpp:228] Iteration 19500, loss = 0.0314301\nI0819 03:10:12.214238 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:10:12.214262 21584 solver.cpp:244]     Train net output #1: loss = 0.0314302 (* 1 = 0.0314302 loss)\nI0819 03:10:12.322361 21584 sgd_solver.cpp:166] Iteration 19500, lr = 0.4875\nI0819 03:12:30.060745 21584 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0819 03:13:53.259768 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87132\nI0819 03:13:53.260087 21584 solver.cpp:404]     Test net output #1: loss = 0.535868 (* 1 = 0.535868 loss)\nI0819 03:13:54.566987 21584 solver.cpp:228] Iteration 19600, loss = 0.0240204\nI0819 03:13:54.567032 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 03:13:54.567049 21584 solver.cpp:244]     Train net output #1: loss = 0.0240204 (* 1 = 0.0240204 loss)\nI0819 03:13:54.675473 21584 sgd_solver.cpp:166] Iteration 19600, lr = 0.49\nI0819 03:16:12.392679 21584 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0819 03:17:35.671211 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8748\nI0819 03:17:35.671504 21584 solver.cpp:404]     Test net output #1: loss = 0.506612 (* 1 = 0.506612 loss)\nI0819 03:17:36.977651 21584 solver.cpp:228] Iteration 19700, loss = 0.0846316\nI0819 03:17:36.977699 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:17:36.977721 21584 solver.cpp:244]     Train net output #1: loss = 0.0846316 (* 1 = 0.0846316 loss)\nI0819 03:17:37.081277 21584 sgd_solver.cpp:166] Iteration 19700, lr = 0.4925\nI0819 03:19:54.790184 21584 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0819 03:21:17.990430 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87632\nI0819 03:21:17.990743 21584 solver.cpp:404]     Test net output #1: loss = 0.504939 (* 1 = 0.504939 loss)\nI0819 03:21:19.297308 21584 solver.cpp:228] Iteration 19800, loss = 0.079888\nI0819 03:21:19.297374 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:21:19.297399 21584 solver.cpp:244]     Train net output #1: loss = 0.079888 (* 1 = 0.079888 loss)\nI0819 03:21:19.401284 21584 sgd_solver.cpp:166] Iteration 19800, lr = 0.495\nI0819 03:23:37.097972 21584 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0819 03:25:00.204954 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8688\nI0819 03:25:00.205238 21584 solver.cpp:404]     Test net output #1: loss = 0.534564 (* 1 = 0.534564 loss)\nI0819 03:25:01.519070 21584 solver.cpp:228] Iteration 19900, loss = 0.140247\nI0819 03:25:01.519116 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 03:25:01.519132 21584 solver.cpp:244]     Train net output #1: loss = 0.140247 (* 1 = 0.140247 loss)\nI0819 03:25:01.618204 21584 sgd_solver.cpp:166] Iteration 19900, lr = 0.4975\nI0819 03:27:19.331692 21584 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0819 03:28:42.768867 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0819 03:28:42.769201 21584 solver.cpp:404]     Test net output #1: loss = 0.479427 (* 1 = 0.479427 loss)\nI0819 03:28:44.077477 21584 solver.cpp:228] Iteration 20000, loss = 0.0272264\nI0819 03:28:44.077523 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:28:44.077545 21584 solver.cpp:244]     Train net output #1: loss = 0.0272264 (* 1 = 0.0272264 loss)\nI0819 03:28:44.173024 21584 sgd_solver.cpp:166] Iteration 20000, lr = 0.5\nI0819 03:31:01.849885 21584 solver.cpp:337] Iteration 20100, Testing net (#0)\nI0819 03:32:25.287762 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0819 03:32:25.288103 21584 solver.cpp:404]     Test net output #1: loss = 0.485818 (* 1 = 0.485818 loss)\nI0819 03:32:26.594671 21584 solver.cpp:228] Iteration 20100, loss = 0.0620262\nI0819 03:32:26.594733 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:32:26.594759 21584 solver.cpp:244]     Train net output #1: loss = 0.0620263 (* 1 = 0.0620263 loss)\nI0819 03:32:26.702075 21584 sgd_solver.cpp:166] Iteration 20100, lr = 0.5025\nI0819 03:34:44.338922 21584 solver.cpp:337] Iteration 20200, Testing net (#0)\nI0819 03:36:07.768667 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8712\nI0819 03:36:07.769006 21584 solver.cpp:404]     Test net output #1: loss = 0.54751 (* 1 = 0.54751 loss)\nI0819 03:36:09.075034 21584 solver.cpp:228] Iteration 20200, loss = 0.114409\nI0819 03:36:09.075079 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 03:36:09.075095 21584 solver.cpp:244]     Train net output #1: loss = 0.114409 (* 1 = 0.114409 loss)\nI0819 03:36:09.177047 21584 sgd_solver.cpp:166] Iteration 20200, lr = 0.505\nI0819 03:38:26.824610 21584 solver.cpp:337] Iteration 20300, Testing net (#0)\nI0819 03:39:50.218304 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8658\nI0819 03:39:50.218652 21584 solver.cpp:404]     Test net output #1: loss = 0.512668 (* 1 = 0.512668 loss)\nI0819 03:39:51.523710 21584 solver.cpp:228] Iteration 20300, loss = 0.0832521\nI0819 03:39:51.523772 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:39:51.523790 21584 solver.cpp:244]     Train net output #1: loss = 0.0832521 (* 1 = 0.0832521 loss)\nI0819 03:39:51.622753 21584 sgd_solver.cpp:166] Iteration 20300, lr = 0.5075\nI0819 03:42:09.266494 21584 solver.cpp:337] Iteration 20400, Testing net (#0)\nI0819 03:43:32.653914 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87764\nI0819 03:43:32.654250 21584 solver.cpp:404]     Test net output #1: loss = 0.483573 (* 1 = 0.483573 loss)\nI0819 03:43:33.959444 21584 solver.cpp:228] Iteration 20400, loss = 0.109976\nI0819 03:43:33.959503 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:43:33.959522 21584 solver.cpp:244]     Train net output #1: loss = 0.109976 (* 1 = 0.109976 loss)\nI0819 03:43:34.064544 21584 sgd_solver.cpp:166] Iteration 20400, lr = 0.51\nI0819 03:45:51.738916 21584 solver.cpp:337] Iteration 20500, Testing net (#0)\nI0819 03:47:15.150463 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87056\nI0819 03:47:15.150779 21584 solver.cpp:404]     Test net output #1: loss = 0.534816 (* 1 = 0.534816 loss)\nI0819 03:47:16.455783 21584 solver.cpp:228] Iteration 20500, loss = 0.07795\nI0819 03:47:16.455826 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:47:16.455840 21584 solver.cpp:244]     Train net output #1: loss = 0.07795 (* 1 = 0.07795 loss)\nI0819 03:47:16.563443 21584 sgd_solver.cpp:166] Iteration 20500, lr = 0.5125\nI0819 03:49:34.334869 21584 solver.cpp:337] Iteration 20600, Testing net (#0)\nI0819 03:50:57.739464 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87304\nI0819 03:50:57.739799 21584 solver.cpp:404]     Test net output #1: loss = 0.489738 (* 1 = 0.489738 loss)\nI0819 03:50:59.045148 21584 solver.cpp:228] Iteration 20600, loss = 0.0419884\nI0819 03:50:59.045192 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 03:50:59.045207 21584 solver.cpp:244]     Train net output #1: loss = 0.0419884 (* 1 = 0.0419884 loss)\nI0819 03:50:59.152196 21584 sgd_solver.cpp:166] Iteration 20600, lr = 0.515\nI0819 03:53:16.857692 21584 solver.cpp:337] Iteration 20700, Testing net (#0)\nI0819 03:54:40.263972 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88088\nI0819 03:54:40.264294 21584 solver.cpp:404]     Test net output #1: loss = 0.472612 (* 1 = 0.472612 loss)\nI0819 03:54:41.570206 21584 solver.cpp:228] Iteration 20700, loss = 0.0794769\nI0819 03:54:41.570263 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 03:54:41.570281 21584 solver.cpp:244]     Train net output #1: loss = 0.0794768 (* 1 = 0.0794768 loss)\nI0819 03:54:41.671578 21584 sgd_solver.cpp:166] Iteration 20700, lr = 0.5175\nI0819 03:56:59.419430 21584 solver.cpp:337] Iteration 20800, Testing net (#0)\nI0819 03:58:22.800292 21584 solver.cpp:404]     Test net output #0: accuracy = 0.868\nI0819 03:58:22.800623 21584 solver.cpp:404]     Test net output #1: loss = 0.55438 (* 1 = 0.55438 loss)\nI0819 03:58:24.105733 21584 solver.cpp:228] Iteration 20800, loss = 0.138447\nI0819 03:58:24.105779 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 03:58:24.105795 21584 solver.cpp:244]     Train net output #1: loss = 0.138447 (* 1 = 0.138447 loss)\nI0819 03:58:24.208210 21584 sgd_solver.cpp:166] Iteration 20800, lr = 0.52\nI0819 04:00:42.016599 21584 solver.cpp:337] Iteration 20900, Testing net (#0)\nI0819 04:02:05.471535 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87868\nI0819 04:02:05.471864 21584 solver.cpp:404]     Test net output #1: loss = 0.490571 (* 1 = 0.490571 loss)\nI0819 04:02:06.777911 21584 solver.cpp:228] Iteration 20900, loss = 0.0640592\nI0819 04:02:06.777967 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:02:06.777992 21584 solver.cpp:244]     Train net output #1: loss = 0.0640592 (* 1 = 0.0640592 loss)\nI0819 04:02:06.885721 21584 sgd_solver.cpp:166] Iteration 20900, lr = 0.5225\nI0819 04:04:24.661953 21584 solver.cpp:337] Iteration 21000, Testing net (#0)\nI0819 04:05:48.074164 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8758\nI0819 04:05:48.074496 21584 solver.cpp:404]     Test net output #1: loss = 0.49657 (* 1 = 0.49657 loss)\nI0819 04:05:49.380002 21584 solver.cpp:228] Iteration 21000, loss = 0.0509165\nI0819 04:05:49.380058 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:05:49.380082 21584 solver.cpp:244]     Train net output #1: loss = 0.0509165 (* 1 = 0.0509165 loss)\nI0819 04:05:49.481873 21584 sgd_solver.cpp:166] Iteration 21000, lr = 0.525\nI0819 04:08:07.229744 21584 solver.cpp:337] Iteration 21100, Testing net (#0)\nI0819 04:09:30.654386 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87776\nI0819 04:09:30.654727 21584 solver.cpp:404]     Test net output #1: loss = 0.480498 (* 1 = 0.480498 loss)\nI0819 04:09:31.960067 21584 solver.cpp:228] Iteration 21100, loss = 0.0556877\nI0819 04:09:31.960109 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:09:31.960132 21584 solver.cpp:244]     Train net output #1: loss = 0.0556877 (* 1 = 0.0556877 loss)\nI0819 04:09:32.065587 21584 sgd_solver.cpp:166] Iteration 21100, lr = 0.5275\nI0819 04:11:49.864377 21584 solver.cpp:337] Iteration 21200, Testing net (#0)\nI0819 04:13:13.251808 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87428\nI0819 04:13:13.252123 21584 solver.cpp:404]     Test net output #1: loss = 0.510289 (* 1 = 0.510289 loss)\nI0819 04:13:14.556968 21584 solver.cpp:228] Iteration 21200, loss = 0.0727585\nI0819 04:13:14.557008 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:13:14.557024 21584 solver.cpp:244]     Train net output #1: loss = 0.0727585 (* 1 = 0.0727585 loss)\nI0819 04:13:14.662876 21584 sgd_solver.cpp:166] Iteration 21200, lr = 0.53\nI0819 04:15:32.366684 21584 solver.cpp:337] Iteration 21300, Testing net (#0)\nI0819 04:16:55.768244 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8744\nI0819 04:16:55.768555 21584 solver.cpp:404]     Test net output #1: loss = 0.470421 (* 1 = 0.470421 loss)\nI0819 04:16:57.074070 21584 solver.cpp:228] Iteration 21300, loss = 0.134652\nI0819 04:16:57.074116 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 04:16:57.074132 21584 solver.cpp:244]     Train net output #1: loss = 0.134652 (* 1 = 0.134652 loss)\nI0819 04:16:57.182363 21584 sgd_solver.cpp:166] Iteration 21300, lr = 0.5325\nI0819 04:19:14.851203 21584 solver.cpp:337] Iteration 21400, Testing net (#0)\nI0819 04:20:38.255965 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87128\nI0819 04:20:38.256276 21584 solver.cpp:404]     Test net output #1: loss = 0.503574 (* 1 = 0.503574 loss)\nI0819 04:20:39.563223 21584 solver.cpp:228] Iteration 21400, loss = 0.0928525\nI0819 04:20:39.563269 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:20:39.563287 21584 solver.cpp:244]     Train net output #1: loss = 0.0928525 (* 1 = 0.0928525 loss)\nI0819 04:20:39.667212 21584 sgd_solver.cpp:166] Iteration 21400, lr = 0.535\nI0819 04:22:57.452621 21584 solver.cpp:337] Iteration 21500, Testing net (#0)\nI0819 04:24:20.850174 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87804\nI0819 04:24:20.850486 21584 solver.cpp:404]     Test net output #1: loss = 0.484883 (* 1 = 0.484883 loss)\nI0819 04:24:22.155653 21584 solver.cpp:228] Iteration 21500, loss = 0.121213\nI0819 04:24:22.155699 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 04:24:22.155714 21584 solver.cpp:244]     Train net output #1: loss = 0.121213 (* 1 = 0.121213 loss)\nI0819 04:24:22.264834 21584 sgd_solver.cpp:166] Iteration 21500, lr = 0.5375\nI0819 04:26:39.973392 21584 solver.cpp:337] Iteration 21600, Testing net (#0)\nI0819 04:28:03.403641 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0819 04:28:03.403959 21584 solver.cpp:404]     Test net output #1: loss = 0.481911 (* 1 = 0.481911 loss)\nI0819 04:28:04.710412 21584 solver.cpp:228] Iteration 21600, loss = 0.0613996\nI0819 04:28:04.710455 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:28:04.710472 21584 solver.cpp:244]     Train net output #1: loss = 0.0613995 (* 1 = 0.0613995 loss)\nI0819 04:28:04.814851 21584 sgd_solver.cpp:166] Iteration 21600, lr = 0.54\nI0819 04:30:22.625371 21584 solver.cpp:337] Iteration 21700, Testing net (#0)\nI0819 04:31:46.025018 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88004\nI0819 04:31:46.025362 21584 solver.cpp:404]     Test net output #1: loss = 0.474821 (* 1 = 0.474821 loss)\nI0819 04:31:47.331246 21584 solver.cpp:228] Iteration 21700, loss = 0.0317977\nI0819 04:31:47.331306 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 04:31:47.331323 21584 solver.cpp:244]     Train net output #1: loss = 0.0317976 (* 1 = 0.0317976 loss)\nI0819 04:31:47.434293 21584 sgd_solver.cpp:166] Iteration 21700, lr = 0.5425\nI0819 04:34:05.211736 21584 solver.cpp:337] Iteration 21800, Testing net (#0)\nI0819 04:35:28.639008 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87232\nI0819 04:35:28.639328 21584 solver.cpp:404]     Test net output #1: loss = 0.520207 (* 1 = 0.520207 loss)\nI0819 04:35:29.944687 21584 solver.cpp:228] Iteration 21800, loss = 0.104083\nI0819 04:35:29.944730 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:35:29.944746 21584 solver.cpp:244]     Train net output #1: loss = 0.104083 (* 1 = 0.104083 loss)\nI0819 04:35:30.057492 21584 sgd_solver.cpp:166] Iteration 21800, lr = 0.545\nI0819 04:37:47.866096 21584 solver.cpp:337] Iteration 21900, Testing net (#0)\nI0819 04:39:11.283823 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87156\nI0819 04:39:11.284162 21584 solver.cpp:404]     Test net output #1: loss = 0.51783 (* 1 = 0.51783 loss)\nI0819 04:39:12.598873 21584 solver.cpp:228] Iteration 21900, loss = 0.095252\nI0819 04:39:12.598920 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:39:12.598942 21584 solver.cpp:244]     Train net output #1: loss = 0.0952519 (* 1 = 0.0952519 loss)\nI0819 04:39:12.694984 21584 sgd_solver.cpp:166] Iteration 21900, lr = 0.5475\nI0819 04:41:31.002068 21584 solver.cpp:337] Iteration 22000, Testing net (#0)\nI0819 04:42:54.428594 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87364\nI0819 04:42:54.428927 21584 solver.cpp:404]     Test net output #1: loss = 0.49851 (* 1 = 0.49851 loss)\nI0819 04:42:55.735108 21584 solver.cpp:228] Iteration 22000, loss = 0.0226022\nI0819 04:42:55.735155 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 04:42:55.735177 21584 solver.cpp:244]     Train net output #1: loss = 0.0226022 (* 1 = 0.0226022 loss)\nI0819 04:42:55.845276 21584 sgd_solver.cpp:166] Iteration 22000, lr = 0.55\nI0819 04:45:14.494930 21584 solver.cpp:337] Iteration 22100, Testing net (#0)\nI0819 04:46:37.034855 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87396\nI0819 04:46:37.035181 21584 solver.cpp:404]     Test net output #1: loss = 0.506196 (* 1 = 0.506196 loss)\nI0819 04:46:38.339273 21584 solver.cpp:228] Iteration 22100, loss = 0.144866\nI0819 04:46:38.339319 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 04:46:38.339342 21584 solver.cpp:244]     Train net output #1: loss = 0.144866 (* 1 = 0.144866 loss)\nI0819 04:46:38.454313 21584 sgd_solver.cpp:166] Iteration 22100, lr = 0.5525\nI0819 04:48:56.836194 21584 solver.cpp:337] Iteration 22200, Testing net (#0)\nI0819 04:50:19.317385 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87888\nI0819 04:50:19.317653 21584 solver.cpp:404]     Test net output #1: loss = 0.504716 (* 1 = 0.504716 loss)\nI0819 04:50:20.620470 21584 solver.cpp:228] Iteration 22200, loss = 0.12128\nI0819 04:50:20.620517 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 04:50:20.620542 21584 solver.cpp:244]     Train net output #1: loss = 0.12128 (* 1 = 0.12128 loss)\nI0819 04:50:20.735993 21584 sgd_solver.cpp:166] Iteration 22200, lr = 0.555\nI0819 04:52:39.150986 21584 solver.cpp:337] Iteration 22300, Testing net (#0)\nI0819 04:54:01.638598 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87788\nI0819 04:54:01.638880 21584 solver.cpp:404]     Test net output #1: loss = 0.461945 (* 1 = 0.461945 loss)\nI0819 04:54:02.940742 21584 solver.cpp:228] Iteration 22300, loss = 0.0603504\nI0819 04:54:02.940791 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 04:54:02.940816 21584 solver.cpp:244]     Train net output #1: loss = 0.0603503 (* 1 = 0.0603503 loss)\nI0819 04:54:03.054980 21584 sgd_solver.cpp:166] Iteration 22300, lr = 0.5575\nI0819 04:56:21.461622 21584 solver.cpp:337] Iteration 22400, Testing net (#0)\nI0819 04:57:43.945523 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88068\nI0819 04:57:43.945783 21584 solver.cpp:404]     Test net output #1: loss = 0.481989 (* 1 = 0.481989 loss)\nI0819 04:57:45.249156 21584 solver.cpp:228] Iteration 22400, loss = 0.0730381\nI0819 04:57:45.249204 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 04:57:45.249228 21584 solver.cpp:244]     Train net output #1: loss = 0.073038 (* 1 = 0.073038 loss)\nI0819 04:57:45.358575 21584 sgd_solver.cpp:166] Iteration 22400, lr = 0.56\nI0819 05:00:03.873965 21584 solver.cpp:337] Iteration 22500, Testing net (#0)\nI0819 05:01:26.350986 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87296\nI0819 05:01:26.351225 21584 solver.cpp:404]     Test net output #1: loss = 0.489679 (* 1 = 0.489679 loss)\nI0819 05:01:27.654525 21584 solver.cpp:228] Iteration 22500, loss = 0.030146\nI0819 05:01:27.654573 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:01:27.654597 21584 solver.cpp:244]     Train net output #1: loss = 0.0301459 (* 1 = 0.0301459 loss)\nI0819 05:01:27.766558 21584 sgd_solver.cpp:166] Iteration 22500, lr = 0.5625\nI0819 05:03:45.646553 21584 solver.cpp:337] Iteration 22600, Testing net (#0)\nI0819 05:05:08.134160 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87816\nI0819 05:05:08.134443 21584 solver.cpp:404]     Test net output #1: loss = 0.489302 (* 1 = 0.489302 loss)\nI0819 05:05:09.436672 21584 solver.cpp:228] Iteration 22600, loss = 0.0344814\nI0819 05:05:09.436717 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 05:05:09.436739 21584 solver.cpp:244]     Train net output #1: loss = 0.0344813 (* 1 = 0.0344813 loss)\nI0819 05:05:09.544534 21584 sgd_solver.cpp:166] Iteration 22600, lr = 0.565\nI0819 05:07:27.423719 21584 solver.cpp:337] Iteration 22700, Testing net (#0)\nI0819 05:08:49.912737 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88624\nI0819 05:08:49.913020 21584 solver.cpp:404]     Test net output #1: loss = 0.450815 (* 1 = 0.450815 loss)\nI0819 05:08:51.216111 21584 solver.cpp:228] Iteration 22700, loss = 0.048469\nI0819 05:08:51.216156 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:08:51.216179 21584 solver.cpp:244]     Train net output #1: loss = 0.0484689 (* 1 = 0.0484689 loss)\nI0819 05:08:51.318832 21584 sgd_solver.cpp:166] Iteration 22700, lr = 0.5675\nI0819 05:11:09.096797 21584 solver.cpp:337] Iteration 22800, Testing net (#0)\nI0819 05:12:31.537645 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87916\nI0819 05:12:31.537885 21584 solver.cpp:404]     Test net output #1: loss = 0.477722 (* 1 = 0.477722 loss)\nI0819 05:12:32.840216 21584 solver.cpp:228] Iteration 22800, loss = 0.0962249\nI0819 05:12:32.840260 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 05:12:32.840283 21584 solver.cpp:244]     Train net output #1: loss = 0.0962248 (* 1 = 0.0962248 loss)\nI0819 05:12:32.952721 21584 sgd_solver.cpp:166] Iteration 22800, lr = 0.57\nI0819 05:14:50.842360 21584 solver.cpp:337] Iteration 22900, Testing net (#0)\nI0819 05:16:13.272881 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88536\nI0819 05:16:13.273169 21584 solver.cpp:404]     Test net output #1: loss = 0.469847 (* 1 = 0.469847 loss)\nI0819 05:16:14.575572 21584 solver.cpp:228] Iteration 22900, loss = 0.0464506\nI0819 05:16:14.575616 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 05:16:14.575640 21584 solver.cpp:244]     Train net output #1: loss = 0.0464505 (* 1 = 0.0464505 loss)\nI0819 05:16:14.679829 21584 sgd_solver.cpp:166] Iteration 22900, lr = 0.5725\nI0819 05:18:32.562613 21584 solver.cpp:337] Iteration 23000, Testing net (#0)\nI0819 05:19:55.047996 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87552\nI0819 05:19:55.048254 21584 solver.cpp:404]     Test net output #1: loss = 0.500319 (* 1 = 0.500319 loss)\nI0819 05:19:56.351384 21584 solver.cpp:228] Iteration 23000, loss = 0.0609332\nI0819 05:19:56.351426 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:19:56.351450 21584 solver.cpp:244]     Train net output #1: loss = 0.0609331 (* 1 = 0.0609331 loss)\nI0819 05:19:56.462936 21584 sgd_solver.cpp:166] Iteration 23000, lr = 0.575\nI0819 05:22:14.364281 21584 solver.cpp:337] Iteration 23100, Testing net (#0)\nI0819 05:23:36.855316 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87976\nI0819 05:23:36.855578 21584 solver.cpp:404]     Test net output #1: loss = 0.47698 (* 1 = 0.47698 loss)\nI0819 05:23:38.159009 21584 solver.cpp:228] Iteration 23100, loss = 0.0361982\nI0819 05:23:38.159055 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 05:23:38.159080 21584 solver.cpp:244]     Train net output #1: loss = 0.0361981 (* 1 = 0.0361981 loss)\nI0819 05:23:38.267319 21584 sgd_solver.cpp:166] Iteration 23100, lr = 0.5775\nI0819 05:25:56.153923 21584 solver.cpp:337] Iteration 23200, Testing net (#0)\nI0819 05:27:18.644207 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87424\nI0819 05:27:18.644438 21584 solver.cpp:404]     Test net output #1: loss = 0.494615 (* 1 = 0.494615 loss)\nI0819 05:27:19.947298 21584 solver.cpp:228] Iteration 23200, loss = 0.0301291\nI0819 05:27:19.947343 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 05:27:19.947367 21584 solver.cpp:244]     Train net output #1: loss = 0.0301291 (* 1 = 0.0301291 loss)\nI0819 05:27:20.055472 21584 sgd_solver.cpp:166] Iteration 23200, lr = 0.58\nI0819 05:29:37.885169 21584 solver.cpp:337] Iteration 23300, Testing net (#0)\nI0819 05:31:00.368413 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88176\nI0819 05:31:00.368669 21584 solver.cpp:404]     Test net output #1: loss = 0.469022 (* 1 = 0.469022 loss)\nI0819 05:31:01.671058 21584 solver.cpp:228] Iteration 23300, loss = 0.0527986\nI0819 05:31:01.671103 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 05:31:01.671126 21584 solver.cpp:244]     Train net output #1: loss = 0.0527985 (* 1 = 0.0527985 loss)\nI0819 05:31:01.781605 21584 sgd_solver.cpp:166] Iteration 23300, lr = 0.5825\nI0819 05:33:19.639168 21584 solver.cpp:337] Iteration 23400, Testing net (#0)\nI0819 05:34:42.109802 21584 solver.cpp:404]     Test net output #0: accuracy = 0.881521\nI0819 05:34:42.110090 21584 solver.cpp:404]     Test net output #1: loss = 0.479725 (* 1 = 0.479725 loss)\nI0819 05:34:43.412883 21584 solver.cpp:228] Iteration 23400, loss = 0.0709116\nI0819 05:34:43.412933 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:34:43.412956 21584 solver.cpp:244]     Train net output #1: loss = 0.0709116 (* 1 = 0.0709116 loss)\nI0819 05:34:43.522569 21584 sgd_solver.cpp:166] Iteration 23400, lr = 0.585\nI0819 05:37:01.421046 21584 solver.cpp:337] Iteration 23500, Testing net (#0)\nI0819 05:38:23.898329 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88464\nI0819 05:38:23.898602 21584 solver.cpp:404]     Test net output #1: loss = 0.437948 (* 1 = 0.437948 loss)\nI0819 05:38:25.201112 21584 solver.cpp:228] Iteration 23500, loss = 0.088297\nI0819 05:38:25.201158 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 05:38:25.201181 21584 solver.cpp:244]     Train net output #1: loss = 0.0882969 (* 1 = 0.0882969 loss)\nI0819 05:38:25.308746 21584 sgd_solver.cpp:166] Iteration 23500, lr = 0.5875\nI0819 05:40:43.095549 21584 solver.cpp:337] Iteration 23600, Testing net (#0)\nI0819 05:42:05.584714 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88008\nI0819 05:42:05.584995 21584 solver.cpp:404]     Test net output #1: loss = 0.482089 (* 1 = 0.482089 loss)\nI0819 05:42:06.888196 21584 solver.cpp:228] Iteration 23600, loss = 0.0972245\nI0819 05:42:06.888238 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 05:42:06.888254 21584 solver.cpp:244]     Train net output #1: loss = 0.0972244 (* 1 = 0.0972244 loss)\nI0819 05:42:07.000030 21584 sgd_solver.cpp:166] Iteration 23600, lr = 0.59\nI0819 05:44:24.846488 21584 solver.cpp:337] Iteration 23700, Testing net (#0)\nI0819 05:45:47.315629 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88072\nI0819 05:45:47.315886 21584 solver.cpp:404]     Test net output #1: loss = 0.478908 (* 1 = 0.478908 loss)\nI0819 05:45:48.619253 21584 solver.cpp:228] Iteration 23700, loss = 0.0444181\nI0819 05:45:48.619297 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:45:48.619313 21584 solver.cpp:244]     Train net output #1: loss = 0.044418 (* 1 = 0.044418 loss)\nI0819 05:45:48.726282 21584 sgd_solver.cpp:166] Iteration 23700, lr = 0.5925\nI0819 05:48:06.713593 21584 solver.cpp:337] Iteration 23800, Testing net (#0)\nI0819 05:49:29.186260 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87764\nI0819 05:49:29.186514 21584 solver.cpp:404]     Test net output #1: loss = 0.49718 (* 1 = 0.49718 loss)\nI0819 05:49:30.488270 21584 solver.cpp:228] Iteration 23800, loss = 0.0743498\nI0819 05:49:30.488313 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 05:49:30.488329 21584 solver.cpp:244]     Train net output #1: loss = 0.0743497 (* 1 = 0.0743497 loss)\nI0819 05:49:30.598089 21584 sgd_solver.cpp:166] Iteration 23800, lr = 0.595\nI0819 05:51:48.513689 21584 solver.cpp:337] Iteration 23900, Testing net (#0)\nI0819 05:53:10.993055 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87128\nI0819 05:53:10.993335 21584 solver.cpp:404]     Test net output #1: loss = 0.532988 (* 1 = 0.532988 loss)\nI0819 05:53:12.296160 21584 solver.cpp:228] Iteration 23900, loss = 0.098748\nI0819 05:53:12.296205 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 05:53:12.296221 21584 solver.cpp:244]     Train net output #1: loss = 0.0987479 (* 1 = 0.0987479 loss)\nI0819 05:53:12.402698 21584 sgd_solver.cpp:166] Iteration 23900, lr = 0.5975\nI0819 05:55:30.342931 21584 solver.cpp:337] Iteration 24000, Testing net (#0)\nI0819 05:56:52.808665 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87552\nI0819 05:56:52.808954 21584 solver.cpp:404]     Test net output #1: loss = 0.522124 (* 1 = 0.522124 loss)\nI0819 05:56:54.112202 21584 solver.cpp:228] Iteration 24000, loss = 0.0255437\nI0819 05:56:54.112247 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 05:56:54.112263 21584 solver.cpp:244]     Train net output #1: loss = 0.0255436 (* 1 = 0.0255436 loss)\nI0819 05:56:54.222790 21584 sgd_solver.cpp:166] Iteration 24000, lr = 0.6\nI0819 05:59:12.075016 21584 solver.cpp:337] Iteration 24100, Testing net (#0)\nI0819 06:00:34.478162 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88316\nI0819 06:00:34.478457 21584 solver.cpp:404]     Test net output #1: loss = 0.470167 (* 1 = 0.470167 loss)\nI0819 06:00:35.780772 21584 solver.cpp:228] Iteration 24100, loss = 0.0704635\nI0819 06:00:35.780815 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 06:00:35.780831 21584 solver.cpp:244]     Train net output #1: loss = 0.0704635 (* 1 = 0.0704635 loss)\nI0819 06:00:35.891495 21584 sgd_solver.cpp:166] Iteration 24100, lr = 0.6025\nI0819 06:02:53.810472 21584 solver.cpp:337] Iteration 24200, Testing net (#0)\nI0819 06:04:16.176230 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87776\nI0819 06:04:16.176504 21584 solver.cpp:404]     Test net output #1: loss = 0.492219 (* 1 = 0.492219 loss)\nI0819 06:04:17.479589 21584 solver.cpp:228] Iteration 24200, loss = 0.190185\nI0819 06:04:17.479632 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 06:04:17.479648 21584 solver.cpp:244]     Train net output #1: loss = 0.190185 (* 1 = 0.190185 loss)\nI0819 06:04:17.583670 21584 sgd_solver.cpp:166] Iteration 24200, lr = 0.605\nI0819 06:06:35.535488 21584 solver.cpp:337] Iteration 24300, Testing net (#0)\nI0819 06:07:57.901278 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87144\nI0819 06:07:57.901540 21584 solver.cpp:404]     Test net output #1: loss = 0.497096 (* 1 = 0.497096 loss)\nI0819 06:07:59.203680 21584 solver.cpp:228] Iteration 24300, loss = 0.0391567\nI0819 06:07:59.203721 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 06:07:59.203737 21584 solver.cpp:244]     Train net output #1: loss = 0.0391566 (* 1 = 0.0391566 loss)\nI0819 06:07:59.309731 21584 sgd_solver.cpp:166] Iteration 24300, lr = 0.6075\nI0819 06:10:17.159111 21584 solver.cpp:337] Iteration 24400, Testing net (#0)\nI0819 06:11:39.519685 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0819 06:11:39.519958 21584 solver.cpp:404]     Test net output #1: loss = 0.470811 (* 1 = 0.470811 loss)\nI0819 06:11:40.822481 21584 solver.cpp:228] Iteration 24400, loss = 0.0237534\nI0819 06:11:40.822521 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 06:11:40.822537 21584 solver.cpp:244]     Train net output #1: loss = 0.0237533 (* 1 = 0.0237533 loss)\nI0819 06:11:40.929729 21584 sgd_solver.cpp:166] Iteration 24400, lr = 0.61\nI0819 06:13:58.884656 21584 solver.cpp:337] Iteration 24500, Testing net (#0)\nI0819 06:15:21.252811 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87112\nI0819 06:15:21.253098 21584 solver.cpp:404]     Test net output #1: loss = 0.503634 (* 1 = 0.503634 loss)\nI0819 06:15:22.555970 21584 solver.cpp:228] Iteration 24500, loss = 0.0524026\nI0819 06:15:22.556012 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:15:22.556028 21584 solver.cpp:244]     Train net output #1: loss = 0.0524025 (* 1 = 0.0524025 loss)\nI0819 06:15:22.661078 21584 sgd_solver.cpp:166] Iteration 24500, lr = 0.6125\nI0819 06:17:40.548445 21584 solver.cpp:337] Iteration 24600, Testing net (#0)\nI0819 06:19:02.910845 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8788\nI0819 06:19:02.911136 21584 solver.cpp:404]     Test net output #1: loss = 0.490159 (* 1 = 0.490159 loss)\nI0819 06:19:04.213414 21584 solver.cpp:228] Iteration 24600, loss = 0.06344\nI0819 06:19:04.213455 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:19:04.213472 21584 solver.cpp:244]     Train net output #1: loss = 0.0634399 (* 1 = 0.0634399 loss)\nI0819 06:19:04.323582 21584 sgd_solver.cpp:166] Iteration 24600, lr = 0.615\nI0819 06:21:22.249744 21584 solver.cpp:337] Iteration 24700, Testing net (#0)\nI0819 06:22:44.624189 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87468\nI0819 06:22:44.624464 21584 solver.cpp:404]     Test net output #1: loss = 0.489621 (* 1 = 0.489621 loss)\nI0819 06:22:45.926622 21584 solver.cpp:228] Iteration 24700, loss = 0.110173\nI0819 06:22:45.926664 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:22:45.926681 21584 solver.cpp:244]     Train net output #1: loss = 0.110173 (* 1 = 0.110173 loss)\nI0819 06:22:46.031610 21584 sgd_solver.cpp:166] Iteration 24700, lr = 0.6175\nI0819 06:25:04.031086 21584 solver.cpp:337] Iteration 24800, Testing net (#0)\nI0819 06:26:26.352841 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87824\nI0819 06:26:26.353111 21584 solver.cpp:404]     Test net output #1: loss = 0.47555 (* 1 = 0.47555 loss)\nI0819 06:26:27.655336 21584 solver.cpp:228] Iteration 24800, loss = 0.122749\nI0819 06:26:27.655377 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:26:27.655393 21584 solver.cpp:244]     Train net output #1: loss = 0.122749 (* 1 = 0.122749 loss)\nI0819 06:26:27.763608 21584 sgd_solver.cpp:166] Iteration 24800, lr = 0.62\nI0819 06:28:45.755059 21584 solver.cpp:337] Iteration 24900, Testing net (#0)\nI0819 06:30:08.127739 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0819 06:30:08.128003 21584 solver.cpp:404]     Test net output #1: loss = 0.469453 (* 1 = 0.469453 loss)\nI0819 06:30:09.430335 21584 solver.cpp:228] Iteration 24900, loss = 0.110512\nI0819 06:30:09.430378 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:30:09.430395 21584 solver.cpp:244]     Train net output #1: loss = 0.110512 (* 1 = 0.110512 loss)\nI0819 06:30:09.540714 21584 sgd_solver.cpp:166] Iteration 24900, lr = 0.6225\nI0819 06:32:27.548151 21584 solver.cpp:337] Iteration 25000, Testing net (#0)\nI0819 06:33:49.851111 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87376\nI0819 06:33:49.851347 21584 solver.cpp:404]     Test net output #1: loss = 0.497703 (* 1 = 0.497703 loss)\nI0819 06:33:51.153930 21584 solver.cpp:228] Iteration 25000, loss = 0.118429\nI0819 06:33:51.153975 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 06:33:51.153990 21584 solver.cpp:244]     Train net output #1: loss = 0.118429 (* 1 = 0.118429 loss)\nI0819 06:33:51.263233 21584 sgd_solver.cpp:166] Iteration 25000, lr = 0.625\nI0819 06:36:09.145061 21584 solver.cpp:337] Iteration 25100, Testing net (#0)\nI0819 06:37:31.613303 21584 solver.cpp:404]     Test net output #0: accuracy = 0.874\nI0819 06:37:31.613575 21584 solver.cpp:404]     Test net output #1: loss = 0.49167 (* 1 = 0.49167 loss)\nI0819 06:37:32.916606 21584 solver.cpp:228] Iteration 25100, loss = 0.0677706\nI0819 06:37:32.916649 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 06:37:32.916666 21584 solver.cpp:244]     Train net output #1: loss = 0.0677706 (* 1 = 0.0677706 loss)\nI0819 06:37:33.024019 21584 sgd_solver.cpp:166] Iteration 25100, lr = 0.6275\nI0819 06:39:51.014966 21584 solver.cpp:337] Iteration 25200, Testing net (#0)\nI0819 06:41:13.485324 21584 solver.cpp:404]     Test net output #0: accuracy = 0.873201\nI0819 06:41:13.485605 21584 solver.cpp:404]     Test net output #1: loss = 0.487895 (* 1 = 0.487895 loss)\nI0819 06:41:14.787602 21584 solver.cpp:228] Iteration 25200, loss = 0.142883\nI0819 06:41:14.787645 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:41:14.787662 21584 solver.cpp:244]     Train net output #1: loss = 0.142883 (* 1 = 0.142883 loss)\nI0819 06:41:14.898788 21584 sgd_solver.cpp:166] Iteration 25200, lr = 0.63\nI0819 06:43:32.815106 21584 solver.cpp:337] Iteration 25300, Testing net (#0)\nI0819 06:44:55.283838 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87604\nI0819 06:44:55.284119 21584 solver.cpp:404]     Test net output #1: loss = 0.475838 (* 1 = 0.475838 loss)\nI0819 06:44:56.586889 21584 solver.cpp:228] Iteration 25300, loss = 0.0841037\nI0819 06:44:56.586938 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 06:44:56.586956 21584 solver.cpp:244]     Train net output #1: loss = 0.0841037 (* 1 = 0.0841037 loss)\nI0819 06:44:56.694582 21584 sgd_solver.cpp:166] Iteration 25300, lr = 0.6325\nI0819 06:47:14.623203 21584 solver.cpp:337] Iteration 25400, Testing net (#0)\nI0819 06:48:37.091711 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88292\nI0819 06:48:37.091989 21584 solver.cpp:404]     Test net output #1: loss = 0.451539 (* 1 = 0.451539 loss)\nI0819 06:48:38.395531 21584 solver.cpp:228] Iteration 25400, loss = 0.0534119\nI0819 06:48:38.395576 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 06:48:38.395592 21584 solver.cpp:244]     Train net output #1: loss = 0.0534119 (* 1 = 0.0534119 loss)\nI0819 06:48:38.501557 21584 sgd_solver.cpp:166] Iteration 25400, lr = 0.635\nI0819 06:50:56.444898 21584 solver.cpp:337] Iteration 25500, Testing net (#0)\nI0819 06:52:18.934098 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87008\nI0819 06:52:18.934373 21584 solver.cpp:404]     Test net output #1: loss = 0.49182 (* 1 = 0.49182 loss)\nI0819 06:52:20.236582 21584 solver.cpp:228] Iteration 25500, loss = 0.119868\nI0819 06:52:20.236629 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 06:52:20.236660 21584 solver.cpp:244]     Train net output #1: loss = 0.119868 (* 1 = 0.119868 loss)\nI0819 06:52:20.347242 21584 sgd_solver.cpp:166] Iteration 25500, lr = 0.6375\nI0819 06:54:38.361800 21584 solver.cpp:337] Iteration 25600, Testing net (#0)\nI0819 06:56:01.806671 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88108\nI0819 06:56:01.807019 21584 solver.cpp:404]     Test net output #1: loss = 0.459223 (* 1 = 0.459223 loss)\nI0819 06:56:03.114300 21584 solver.cpp:228] Iteration 25600, loss = 0.103888\nI0819 06:56:03.114342 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 06:56:03.114359 21584 solver.cpp:244]     Train net output #1: loss = 0.103888 (* 1 = 0.103888 loss)\nI0819 06:56:03.215106 21584 sgd_solver.cpp:166] Iteration 25600, lr = 0.64\nI0819 06:58:21.383561 21584 solver.cpp:337] Iteration 25700, Testing net (#0)\nI0819 06:59:44.102262 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0819 06:59:44.102551 21584 solver.cpp:404]     Test net output #1: loss = 0.456055 (* 1 = 0.456055 loss)\nI0819 06:59:45.406314 21584 solver.cpp:228] Iteration 25700, loss = 0.0614105\nI0819 06:59:45.406361 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 06:59:45.406378 21584 solver.cpp:244]     Train net output #1: loss = 0.0614105 (* 1 = 0.0614105 loss)\nI0819 06:59:45.516455 21584 sgd_solver.cpp:166] Iteration 25700, lr = 0.6425\nI0819 07:02:03.494851 21584 solver.cpp:337] Iteration 25800, Testing net (#0)\nI0819 07:03:25.966411 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8792\nI0819 07:03:25.966677 21584 solver.cpp:404]     Test net output #1: loss = 0.460468 (* 1 = 0.460468 loss)\nI0819 07:03:27.268947 21584 solver.cpp:228] Iteration 25800, loss = 0.0147342\nI0819 07:03:27.269132 21584 solver.cpp:244]     Train net output #0: accuracy = 1\nI0819 07:03:27.269162 21584 solver.cpp:244]     Train net output #1: loss = 0.0147342 (* 1 = 0.0147342 loss)\nI0819 07:03:27.378780 21584 sgd_solver.cpp:166] Iteration 25800, lr = 0.645\nI0819 07:05:45.323678 21584 solver.cpp:337] Iteration 25900, Testing net (#0)\nI0819 07:07:07.796180 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88144\nI0819 07:07:07.796463 21584 solver.cpp:404]     Test net output #1: loss = 0.457994 (* 1 = 0.457994 loss)\nI0819 07:07:09.098582 21584 solver.cpp:228] Iteration 25900, loss = 0.0581094\nI0819 07:07:09.098626 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:07:09.098649 21584 solver.cpp:244]     Train net output #1: loss = 0.0581095 (* 1 = 0.0581095 loss)\nI0819 07:07:09.205044 21584 sgd_solver.cpp:166] Iteration 25900, lr = 0.6475\nI0819 07:09:27.190898 21584 solver.cpp:337] Iteration 26000, Testing net (#0)\nI0819 07:10:49.661100 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8726\nI0819 07:10:49.661360 21584 solver.cpp:404]     Test net output #1: loss = 0.490692 (* 1 = 0.490692 loss)\nI0819 07:10:50.963678 21584 solver.cpp:228] Iteration 26000, loss = 0.0537677\nI0819 07:10:50.963723 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:10:50.963747 21584 solver.cpp:244]     Train net output #1: loss = 0.0537677 (* 1 = 0.0537677 loss)\nI0819 07:10:51.072613 21584 sgd_solver.cpp:166] Iteration 26000, lr = 0.65\nI0819 07:13:09.180430 21584 solver.cpp:337] Iteration 26100, Testing net (#0)\nI0819 07:14:31.653627 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88136\nI0819 07:14:31.653889 21584 solver.cpp:404]     Test net output #1: loss = 0.483032 (* 1 = 0.483032 loss)\nI0819 07:14:32.956076 21584 solver.cpp:228] Iteration 26100, loss = 0.0721378\nI0819 07:14:32.956121 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 07:14:32.956145 21584 solver.cpp:244]     Train net output #1: loss = 0.0721379 (* 1 = 0.0721379 loss)\nI0819 07:14:33.062960 21584 sgd_solver.cpp:166] Iteration 26100, lr = 0.6525\nI0819 07:16:50.992771 21584 solver.cpp:337] Iteration 26200, Testing net (#0)\nI0819 07:18:13.466297 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87784\nI0819 07:18:13.466562 21584 solver.cpp:404]     Test net output #1: loss = 0.482637 (* 1 = 0.482637 loss)\nI0819 07:18:14.769551 21584 solver.cpp:228] Iteration 26200, loss = 0.0959421\nI0819 07:18:14.769595 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:18:14.769619 21584 solver.cpp:244]     Train net output #1: loss = 0.0959421 (* 1 = 0.0959421 loss)\nI0819 07:18:14.880580 21584 sgd_solver.cpp:166] Iteration 26200, lr = 0.655\nI0819 07:20:32.716879 21584 solver.cpp:337] Iteration 26300, Testing net (#0)\nI0819 07:21:56.133891 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87596\nI0819 07:21:56.134234 21584 solver.cpp:404]     Test net output #1: loss = 0.492871 (* 1 = 0.492871 loss)\nI0819 07:21:57.440310 21584 solver.cpp:228] Iteration 26300, loss = 0.133376\nI0819 07:21:57.440371 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 07:21:57.440388 21584 solver.cpp:244]     Train net output #1: loss = 0.133376 (* 1 = 0.133376 loss)\nI0819 07:21:57.542245 21584 sgd_solver.cpp:166] Iteration 26300, lr = 0.6575\nI0819 07:24:15.258646 21584 solver.cpp:337] Iteration 26400, Testing net (#0)\nI0819 07:25:38.650001 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87284\nI0819 07:25:38.650300 21584 solver.cpp:404]     Test net output #1: loss = 0.497457 (* 1 = 0.497457 loss)\nI0819 07:25:39.955566 21584 solver.cpp:228] Iteration 26400, loss = 0.0495786\nI0819 07:25:39.955610 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 07:25:39.955626 21584 solver.cpp:244]     Train net output #1: loss = 0.0495786 (* 1 = 0.0495786 loss)\nI0819 07:25:40.060961 21584 sgd_solver.cpp:166] Iteration 26400, lr = 0.66\nI0819 07:27:57.804394 21584 solver.cpp:337] Iteration 26500, Testing net (#0)\nI0819 07:29:21.202428 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8764\nI0819 07:29:21.202775 21584 solver.cpp:404]     Test net output #1: loss = 0.480138 (* 1 = 0.480138 loss)\nI0819 07:29:22.508471 21584 solver.cpp:228] Iteration 26500, loss = 0.0882006\nI0819 07:29:22.508514 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 07:29:22.508530 21584 solver.cpp:244]     Train net output #1: loss = 0.0882006 (* 1 = 0.0882006 loss)\nI0819 07:29:22.609730 21584 sgd_solver.cpp:166] Iteration 26500, lr = 0.6625\nI0819 07:31:40.415789 21584 solver.cpp:337] Iteration 26600, Testing net (#0)\nI0819 07:33:03.811506 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88068\nI0819 07:33:03.811822 21584 solver.cpp:404]     Test net output #1: loss = 0.464764 (* 1 = 0.464764 loss)\nI0819 07:33:05.117391 21584 solver.cpp:228] Iteration 26600, loss = 0.0937572\nI0819 07:33:05.117434 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:33:05.117449 21584 solver.cpp:244]     Train net output #1: loss = 0.0937572 (* 1 = 0.0937572 loss)\nI0819 07:33:05.218284 21584 sgd_solver.cpp:166] Iteration 26600, lr = 0.665\nI0819 07:35:22.999949 21584 solver.cpp:337] Iteration 26700, Testing net (#0)\nI0819 07:36:46.418344 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87448\nI0819 07:36:46.418690 21584 solver.cpp:404]     Test net output #1: loss = 0.499175 (* 1 = 0.499175 loss)\nI0819 07:36:47.725281 21584 solver.cpp:228] Iteration 26700, loss = 0.0905726\nI0819 07:36:47.725324 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 07:36:47.725340 21584 solver.cpp:244]     Train net output #1: loss = 0.0905726 (* 1 = 0.0905726 loss)\nI0819 07:36:47.826398 21584 sgd_solver.cpp:166] Iteration 26700, lr = 0.6675\nI0819 07:39:05.710139 21584 solver.cpp:337] Iteration 26800, Testing net (#0)\nI0819 07:40:29.082000 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0819 07:40:29.082345 21584 solver.cpp:404]     Test net output #1: loss = 0.478886 (* 1 = 0.478886 loss)\nI0819 07:40:30.387190 21584 solver.cpp:228] Iteration 26800, loss = 0.083809\nI0819 07:40:30.387243 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:40:30.387260 21584 solver.cpp:244]     Train net output #1: loss = 0.083809 (* 1 = 0.083809 loss)\nI0819 07:40:30.499482 21584 sgd_solver.cpp:166] Iteration 26800, lr = 0.67\nI0819 07:42:49.068153 21584 solver.cpp:337] Iteration 26900, Testing net (#0)\nI0819 07:44:12.456599 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88068\nI0819 07:44:12.456948 21584 solver.cpp:404]     Test net output #1: loss = 0.460013 (* 1 = 0.460013 loss)\nI0819 07:44:13.762408 21584 solver.cpp:228] Iteration 26900, loss = 0.082327\nI0819 07:44:13.762457 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 07:44:13.762473 21584 solver.cpp:244]     Train net output #1: loss = 0.082327 (* 1 = 0.082327 loss)\nI0819 07:44:13.870724 21584 sgd_solver.cpp:166] Iteration 26900, lr = 0.6725\nI0819 07:46:32.496054 21584 solver.cpp:337] Iteration 27000, Testing net (#0)\nI0819 07:47:55.859850 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88144\nI0819 07:47:55.860175 21584 solver.cpp:404]     Test net output #1: loss = 0.469283 (* 1 = 0.469283 loss)\nI0819 07:47:57.165654 21584 solver.cpp:228] Iteration 27000, loss = 0.122388\nI0819 07:47:57.165701 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 07:47:57.165717 21584 solver.cpp:244]     Train net output #1: loss = 0.122388 (* 1 = 0.122388 loss)\nI0819 07:47:57.269976 21584 sgd_solver.cpp:166] Iteration 27000, lr = 0.675\nI0819 07:50:15.854334 21584 solver.cpp:337] Iteration 27100, Testing net (#0)\nI0819 07:51:39.256534 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8784\nI0819 07:51:39.256887 21584 solver.cpp:404]     Test net output #1: loss = 0.47731 (* 1 = 0.47731 loss)\nI0819 07:51:40.562340 21584 solver.cpp:228] Iteration 27100, loss = 0.0769639\nI0819 07:51:40.562386 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 07:51:40.562402 21584 solver.cpp:244]     Train net output #1: loss = 0.0769639 (* 1 = 0.0769639 loss)\nI0819 07:51:40.671356 21584 sgd_solver.cpp:166] Iteration 27100, lr = 0.6775\nI0819 07:53:59.221370 21584 solver.cpp:337] Iteration 27200, Testing net (#0)\nI0819 07:55:22.630964 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87228\nI0819 07:55:22.631289 21584 solver.cpp:404]     Test net output #1: loss = 0.498829 (* 1 = 0.498829 loss)\nI0819 07:55:23.936558 21584 solver.cpp:228] Iteration 27200, loss = 0.0923279\nI0819 07:55:23.936605 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 07:55:23.936621 21584 solver.cpp:244]     Train net output #1: loss = 0.0923279 (* 1 = 0.0923279 loss)\nI0819 07:55:24.047919 21584 sgd_solver.cpp:166] Iteration 27200, lr = 0.68\nI0819 07:57:42.581168 21584 solver.cpp:337] Iteration 27300, Testing net (#0)\nI0819 07:59:05.990212 21584 solver.cpp:404]     Test net output #0: accuracy = 0.888\nI0819 07:59:05.990561 21584 solver.cpp:404]     Test net output #1: loss = 0.430568 (* 1 = 0.430568 loss)\nI0819 07:59:07.295784 21584 solver.cpp:228] Iteration 27300, loss = 0.0414532\nI0819 07:59:07.295835 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 07:59:07.295850 21584 solver.cpp:244]     Train net output #1: loss = 0.0414532 (* 1 = 0.0414532 loss)\nI0819 07:59:07.404763 21584 sgd_solver.cpp:166] Iteration 27300, lr = 0.6825\nI0819 08:01:25.885288 21584 solver.cpp:337] Iteration 27400, Testing net (#0)\nI0819 08:02:49.284024 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88416\nI0819 08:02:49.284363 21584 solver.cpp:404]     Test net output #1: loss = 0.451094 (* 1 = 0.451094 loss)\nI0819 08:02:50.589599 21584 solver.cpp:228] Iteration 27400, loss = 0.0500706\nI0819 08:02:50.589649 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:02:50.589666 21584 solver.cpp:244]     Train net output #1: loss = 0.0500706 (* 1 = 0.0500706 loss)\nI0819 08:02:50.697916 21584 sgd_solver.cpp:166] Iteration 27400, lr = 0.685\nI0819 08:05:09.216027 21584 solver.cpp:337] Iteration 27500, Testing net (#0)\nI0819 08:06:32.634253 21584 solver.cpp:404]     Test net output #0: accuracy = 0.884\nI0819 08:06:32.634577 21584 solver.cpp:404]     Test net output #1: loss = 0.443363 (* 1 = 0.443363 loss)\nI0819 08:06:33.939944 21584 solver.cpp:228] Iteration 27500, loss = 0.077026\nI0819 08:06:33.939996 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:06:33.940012 21584 solver.cpp:244]     Train net output #1: loss = 0.0770261 (* 1 = 0.0770261 loss)\nI0819 08:06:34.051597 21584 sgd_solver.cpp:166] Iteration 27500, lr = 0.6875\nI0819 08:08:52.546947 21584 solver.cpp:337] Iteration 27600, Testing net (#0)\nI0819 08:10:15.802415 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88092\nI0819 08:10:15.802757 21584 solver.cpp:404]     Test net output #1: loss = 0.477219 (* 1 = 0.477219 loss)\nI0819 08:10:17.108019 21584 solver.cpp:228] Iteration 27600, loss = 0.0689275\nI0819 08:10:17.108070 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:10:17.108088 21584 solver.cpp:244]     Train net output #1: loss = 0.0689276 (* 1 = 0.0689276 loss)\nI0819 08:10:17.215929 21584 sgd_solver.cpp:166] Iteration 27600, lr = 0.69\nI0819 08:12:35.729115 21584 solver.cpp:337] Iteration 27700, Testing net (#0)\nI0819 08:13:58.772048 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0819 08:13:58.772277 21584 solver.cpp:404]     Test net output #1: loss = 0.450297 (* 1 = 0.450297 loss)\nI0819 08:14:00.077536 21584 solver.cpp:228] Iteration 27700, loss = 0.118026\nI0819 08:14:00.077591 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 08:14:00.077610 21584 solver.cpp:244]     Train net output #1: loss = 0.118026 (* 1 = 0.118026 loss)\nI0819 08:14:00.184598 21584 sgd_solver.cpp:166] Iteration 27700, lr = 0.6925\nI0819 08:16:18.736840 21584 solver.cpp:337] Iteration 27800, Testing net (#0)\nI0819 08:17:41.818583 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0819 08:17:41.818853 21584 solver.cpp:404]     Test net output #1: loss = 0.468586 (* 1 = 0.468586 loss)\nI0819 08:17:43.123978 21584 solver.cpp:228] Iteration 27800, loss = 0.0738885\nI0819 08:17:43.124028 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:17:43.124044 21584 solver.cpp:244]     Train net output #1: loss = 0.0738886 (* 1 = 0.0738886 loss)\nI0819 08:17:43.238183 21584 sgd_solver.cpp:166] Iteration 27800, lr = 0.695\nI0819 08:20:01.815223 21584 solver.cpp:337] Iteration 27900, Testing net (#0)\nI0819 08:21:25.206651 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88384\nI0819 08:21:25.206899 21584 solver.cpp:404]     Test net output #1: loss = 0.439631 (* 1 = 0.439631 loss)\nI0819 08:21:26.512096 21584 solver.cpp:228] Iteration 27900, loss = 0.0509075\nI0819 08:21:26.512145 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:21:26.512163 21584 solver.cpp:244]     Train net output #1: loss = 0.0509075 (* 1 = 0.0509075 loss)\nI0819 08:21:26.623512 21584 sgd_solver.cpp:166] Iteration 27900, lr = 0.6975\nI0819 08:23:45.139266 21584 solver.cpp:337] Iteration 28000, Testing net (#0)\nI0819 08:25:08.536078 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0819 08:25:08.536340 21584 solver.cpp:404]     Test net output #1: loss = 0.45944 (* 1 = 0.45944 loss)\nI0819 08:25:09.841154 21584 solver.cpp:228] Iteration 28000, loss = 0.0529303\nI0819 08:25:09.841208 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 08:25:09.841225 21584 solver.cpp:244]     Train net output #1: loss = 0.0529303 (* 1 = 0.0529303 loss)\nI0819 08:25:09.951012 21584 sgd_solver.cpp:166] Iteration 28000, lr = 0.7\nI0819 08:27:28.505791 21584 solver.cpp:337] Iteration 28100, Testing net (#0)\nI0819 08:28:51.898157 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87268\nI0819 08:28:51.898433 21584 solver.cpp:404]     Test net output #1: loss = 0.495967 (* 1 = 0.495967 loss)\nI0819 08:28:53.203941 21584 solver.cpp:228] Iteration 28100, loss = 0.0778869\nI0819 08:28:53.203995 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 08:28:53.204012 21584 solver.cpp:244]     Train net output #1: loss = 0.0778869 (* 1 = 0.0778869 loss)\nI0819 08:28:53.319341 21584 sgd_solver.cpp:166] Iteration 28100, lr = 0.7025\nI0819 08:31:12.045157 21584 solver.cpp:337] Iteration 28200, Testing net (#0)\nI0819 08:32:35.390250 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88068\nI0819 08:32:35.390548 21584 solver.cpp:404]     Test net output #1: loss = 0.452285 (* 1 = 0.452285 loss)\nI0819 08:32:36.695611 21584 solver.cpp:228] Iteration 28200, loss = 0.12734\nI0819 08:32:36.695660 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:32:36.695677 21584 solver.cpp:244]     Train net output #1: loss = 0.12734 (* 1 = 0.12734 loss)\nI0819 08:32:36.808349 21584 sgd_solver.cpp:166] Iteration 28200, lr = 0.705\nI0819 08:34:55.532414 21584 solver.cpp:337] Iteration 28300, Testing net (#0)\nI0819 08:36:18.880071 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87248\nI0819 08:36:18.880342 21584 solver.cpp:404]     Test net output #1: loss = 0.492465 (* 1 = 0.492465 loss)\nI0819 08:36:20.185860 21584 solver.cpp:228] Iteration 28300, loss = 0.116026\nI0819 08:36:20.185919 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 08:36:20.185935 21584 solver.cpp:244]     Train net output #1: loss = 0.116026 (* 1 = 0.116026 loss)\nI0819 08:36:20.297778 21584 sgd_solver.cpp:166] Iteration 28300, lr = 0.7075\nI0819 08:38:39.046876 21584 solver.cpp:337] Iteration 28400, Testing net (#0)\nI0819 08:40:02.367234 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87824\nI0819 08:40:02.367514 21584 solver.cpp:404]     Test net output #1: loss = 0.464902 (* 1 = 0.464902 loss)\nI0819 08:40:03.672454 21584 solver.cpp:228] Iteration 28400, loss = 0.0919881\nI0819 08:40:03.672513 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:40:03.672530 21584 solver.cpp:244]     Train net output #1: loss = 0.0919882 (* 1 = 0.0919882 loss)\nI0819 08:40:03.782456 21584 sgd_solver.cpp:166] Iteration 28400, lr = 0.71\nI0819 08:42:22.577662 21584 solver.cpp:337] Iteration 28500, Testing net (#0)\nI0819 08:43:45.480027 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88412\nI0819 08:43:45.480289 21584 solver.cpp:404]     Test net output #1: loss = 0.445314 (* 1 = 0.445314 loss)\nI0819 08:43:46.785823 21584 solver.cpp:228] Iteration 28500, loss = 0.0716925\nI0819 08:43:46.785884 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 08:43:46.785903 21584 solver.cpp:244]     Train net output #1: loss = 0.0716925 (* 1 = 0.0716925 loss)\nI0819 08:43:46.894629 21584 sgd_solver.cpp:166] Iteration 28500, lr = 0.7125\nI0819 08:46:05.651151 21584 solver.cpp:337] Iteration 28600, Testing net (#0)\nI0819 08:47:28.474828 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88108\nI0819 08:47:28.475096 21584 solver.cpp:404]     Test net output #1: loss = 0.467533 (* 1 = 0.467533 loss)\nI0819 08:47:29.780357 21584 solver.cpp:228] Iteration 28600, loss = 0.138737\nI0819 08:47:29.780411 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 08:47:29.780428 21584 solver.cpp:244]     Train net output #1: loss = 0.138737 (* 1 = 0.138737 loss)\nI0819 08:47:29.894723 21584 sgd_solver.cpp:166] Iteration 28600, lr = 0.715\nI0819 08:49:48.691107 21584 solver.cpp:337] Iteration 28700, Testing net (#0)\nI0819 08:51:11.936280 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87748\nI0819 08:51:11.936573 21584 solver.cpp:404]     Test net output #1: loss = 0.472997 (* 1 = 0.472997 loss)\nI0819 08:51:13.242054 21584 solver.cpp:228] Iteration 28700, loss = 0.15595\nI0819 08:51:13.242103 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 08:51:13.242120 21584 solver.cpp:244]     Train net output #1: loss = 0.15595 (* 1 = 0.15595 loss)\nI0819 08:51:13.356109 21584 sgd_solver.cpp:166] Iteration 28700, lr = 0.7175\nI0819 08:53:32.031384 21584 solver.cpp:337] Iteration 28800, Testing net (#0)\nI0819 08:54:55.374028 21584 solver.cpp:404]     Test net output #0: accuracy = 0.881761\nI0819 08:54:55.374300 21584 solver.cpp:404]     Test net output #1: loss = 0.458086 (* 1 = 0.458086 loss)\nI0819 08:54:56.679787 21584 solver.cpp:228] Iteration 28800, loss = 0.0555156\nI0819 08:54:56.679836 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 08:54:56.679853 21584 solver.cpp:244]     Train net output #1: loss = 0.0555156 (* 1 = 0.0555156 loss)\nI0819 08:54:56.791330 21584 sgd_solver.cpp:166] Iteration 28800, lr = 0.72\nI0819 08:57:15.376060 21584 solver.cpp:337] Iteration 28900, Testing net (#0)\nI0819 08:58:38.707340 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88196\nI0819 08:58:38.707587 21584 solver.cpp:404]     Test net output #1: loss = 0.45766 (* 1 = 0.45766 loss)\nI0819 08:58:40.013113 21584 solver.cpp:228] Iteration 28900, loss = 0.0621497\nI0819 08:58:40.013167 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 08:58:40.013185 21584 solver.cpp:244]     Train net output #1: loss = 0.0621498 (* 1 = 0.0621498 loss)\nI0819 08:58:40.121192 21584 sgd_solver.cpp:166] Iteration 28900, lr = 0.7225\nI0819 09:00:58.709897 21584 solver.cpp:337] Iteration 29000, Testing net (#0)\nI0819 09:02:21.935212 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88328\nI0819 09:02:21.935500 21584 solver.cpp:404]     Test net output #1: loss = 0.461621 (* 1 = 0.461621 loss)\nI0819 09:02:23.240955 21584 solver.cpp:228] Iteration 29000, loss = 0.0607974\nI0819 09:02:23.241014 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:02:23.241030 21584 solver.cpp:244]     Train net output #1: loss = 0.0607975 (* 1 = 0.0607975 loss)\nI0819 09:02:23.350414 21584 sgd_solver.cpp:166] Iteration 29000, lr = 0.725\nI0819 09:04:41.964984 21584 solver.cpp:337] Iteration 29100, Testing net (#0)\nI0819 09:06:04.932349 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88208\nI0819 09:06:04.932590 21584 solver.cpp:404]     Test net output #1: loss = 0.454078 (* 1 = 0.454078 loss)\nI0819 09:06:06.237567 21584 solver.cpp:228] Iteration 29100, loss = 0.0678226\nI0819 09:06:06.237620 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:06:06.237638 21584 solver.cpp:244]     Train net output #1: loss = 0.0678227 (* 1 = 0.0678227 loss)\nI0819 09:06:06.349884 21584 sgd_solver.cpp:166] Iteration 29100, lr = 0.7275\nI0819 09:08:24.828344 21584 solver.cpp:337] Iteration 29200, Testing net (#0)\nI0819 09:09:47.992148 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87172\nI0819 09:09:47.992413 21584 solver.cpp:404]     Test net output #1: loss = 0.504597 (* 1 = 0.504597 loss)\nI0819 09:09:49.297317 21584 solver.cpp:228] Iteration 29200, loss = 0.0956429\nI0819 09:09:49.297374 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 09:09:49.297394 21584 solver.cpp:244]     Train net output #1: loss = 0.095643 (* 1 = 0.095643 loss)\nI0819 09:09:49.404160 21584 sgd_solver.cpp:166] Iteration 29200, lr = 0.73\nI0819 09:12:07.885264 21584 solver.cpp:337] Iteration 29300, Testing net (#0)\nI0819 09:13:31.134534 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87628\nI0819 09:13:31.134825 21584 solver.cpp:404]     Test net output #1: loss = 0.478578 (* 1 = 0.478578 loss)\nI0819 09:13:32.439595 21584 solver.cpp:228] Iteration 29300, loss = 0.118807\nI0819 09:13:32.439649 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 09:13:32.439667 21584 solver.cpp:244]     Train net output #1: loss = 0.118807 (* 1 = 0.118807 loss)\nI0819 09:13:32.553210 21584 sgd_solver.cpp:166] Iteration 29300, lr = 0.7325\nI0819 09:15:51.109385 21584 solver.cpp:337] Iteration 29400, Testing net (#0)\nI0819 09:17:14.430146 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87472\nI0819 09:17:14.430423 21584 solver.cpp:404]     Test net output #1: loss = 0.49002 (* 1 = 0.49002 loss)\nI0819 09:17:15.735337 21584 solver.cpp:228] Iteration 29400, loss = 0.0645931\nI0819 09:17:15.735390 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 09:17:15.735409 21584 solver.cpp:244]     Train net output #1: loss = 0.0645932 (* 1 = 0.0645932 loss)\nI0819 09:17:15.846737 21584 sgd_solver.cpp:166] Iteration 29400, lr = 0.735\nI0819 09:19:34.355659 21584 solver.cpp:337] Iteration 29500, Testing net (#0)\nI0819 09:20:57.648572 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87428\nI0819 09:20:57.648878 21584 solver.cpp:404]     Test net output #1: loss = 0.50828 (* 1 = 0.50828 loss)\nI0819 09:20:58.953830 21584 solver.cpp:228] Iteration 29500, loss = 0.124718\nI0819 09:20:58.953908 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:20:58.953927 21584 solver.cpp:244]     Train net output #1: loss = 0.124718 (* 1 = 0.124718 loss)\nI0819 09:20:59.059684 21584 sgd_solver.cpp:166] Iteration 29500, lr = 0.7375\nI0819 09:23:17.586980 21584 solver.cpp:337] Iteration 29600, Testing net (#0)\nI0819 09:24:40.919782 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87928\nI0819 09:24:40.920042 21584 solver.cpp:404]     Test net output #1: loss = 0.467664 (* 1 = 0.467664 loss)\nI0819 09:24:42.224985 21584 solver.cpp:228] Iteration 29600, loss = 0.06556\nI0819 09:24:42.225034 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:24:42.225050 21584 solver.cpp:244]     Train net output #1: loss = 0.06556 (* 1 = 0.06556 loss)\nI0819 09:24:42.338610 21584 sgd_solver.cpp:166] Iteration 29600, lr = 0.74\nI0819 09:27:00.913179 21584 solver.cpp:337] Iteration 29700, Testing net (#0)\nI0819 09:28:24.144218 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87992\nI0819 09:28:24.144484 21584 solver.cpp:404]     Test net output #1: loss = 0.452289 (* 1 = 0.452289 loss)\nI0819 09:28:25.450400 21584 solver.cpp:228] Iteration 29700, loss = 0.101107\nI0819 09:28:25.450456 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:28:25.450475 21584 solver.cpp:244]     Train net output #1: loss = 0.101108 (* 1 = 0.101108 loss)\nI0819 09:28:25.557108 21584 sgd_solver.cpp:166] Iteration 29700, lr = 0.7425\nI0819 09:30:44.199137 21584 solver.cpp:337] Iteration 29800, Testing net (#0)\nI0819 09:32:07.584170 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87864\nI0819 09:32:07.584452 21584 solver.cpp:404]     Test net output #1: loss = 0.475458 (* 1 = 0.475458 loss)\nI0819 09:32:08.889746 21584 solver.cpp:228] Iteration 29800, loss = 0.0851264\nI0819 09:32:08.889802 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 09:32:08.889820 21584 solver.cpp:244]     Train net output #1: loss = 0.0851264 (* 1 = 0.0851264 loss)\nI0819 09:32:08.996124 21584 sgd_solver.cpp:166] Iteration 29800, lr = 0.745\nI0819 09:34:27.545812 21584 solver.cpp:337] Iteration 29900, Testing net (#0)\nI0819 09:35:50.889107 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86812\nI0819 09:35:50.889384 21584 solver.cpp:404]     Test net output #1: loss = 0.493691 (* 1 = 0.493691 loss)\nI0819 09:35:52.196249 21584 solver.cpp:228] Iteration 29900, loss = 0.124474\nI0819 09:35:52.196302 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 09:35:52.196326 21584 solver.cpp:244]     Train net output #1: loss = 0.124474 (* 1 = 0.124474 loss)\nI0819 09:35:52.307377 21584 sgd_solver.cpp:166] Iteration 29900, lr = 0.7475\nI0819 09:38:11.013795 21584 solver.cpp:337] Iteration 30000, Testing net (#0)\nI0819 09:39:34.429751 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88484\nI0819 09:39:34.430048 21584 solver.cpp:404]     Test net output #1: loss = 0.448303 (* 1 = 0.448303 loss)\nI0819 09:39:35.736162 21584 solver.cpp:228] Iteration 30000, loss = 0.0447661\nI0819 09:39:35.736222 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:39:35.736248 21584 solver.cpp:244]     Train net output #1: loss = 0.0447662 (* 1 = 0.0447662 loss)\nI0819 09:39:35.842406 21584 sgd_solver.cpp:166] Iteration 30000, lr = 0.75\nI0819 09:41:54.297472 21584 solver.cpp:337] Iteration 30100, Testing net (#0)\nI0819 09:43:16.772958 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88424\nI0819 09:43:16.773253 21584 solver.cpp:404]     Test net output #1: loss = 0.439271 (* 1 = 0.439271 loss)\nI0819 09:43:18.075924 21584 solver.cpp:228] Iteration 30100, loss = 0.190821\nI0819 09:43:18.075969 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 09:43:18.075984 21584 solver.cpp:244]     Train net output #1: loss = 0.190821 (* 1 = 0.190821 loss)\nI0819 09:43:18.190479 21584 sgd_solver.cpp:166] Iteration 30100, lr = 0.7525\nI0819 09:45:36.569425 21584 solver.cpp:337] Iteration 30200, Testing net (#0)\nI0819 09:46:59.051352 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88576\nI0819 09:46:59.051652 21584 solver.cpp:404]     Test net output #1: loss = 0.466304 (* 1 = 0.466304 loss)\nI0819 09:47:00.353479 21584 solver.cpp:228] Iteration 30200, loss = 0.0322401\nI0819 09:47:00.353523 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 09:47:00.353539 21584 solver.cpp:244]     Train net output #1: loss = 0.0322401 (* 1 = 0.0322401 loss)\nI0819 09:47:00.471509 21584 sgd_solver.cpp:166] Iteration 30200, lr = 0.755\nI0819 09:49:18.921241 21584 solver.cpp:337] Iteration 30300, Testing net (#0)\nI0819 09:50:41.331961 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0819 09:50:41.332263 21584 solver.cpp:404]     Test net output #1: loss = 0.445587 (* 1 = 0.445587 loss)\nI0819 09:50:42.634493 21584 solver.cpp:228] Iteration 30300, loss = 0.0998095\nI0819 09:50:42.634536 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 09:50:42.634552 21584 solver.cpp:244]     Train net output #1: loss = 0.0998096 (* 1 = 0.0998096 loss)\nI0819 09:50:42.746855 21584 sgd_solver.cpp:166] Iteration 30300, lr = 0.7575\nI0819 09:53:01.240809 21584 solver.cpp:337] Iteration 30400, Testing net (#0)\nI0819 09:54:23.716959 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87756\nI0819 09:54:23.717265 21584 solver.cpp:404]     Test net output #1: loss = 0.46948 (* 1 = 0.46948 loss)\nI0819 09:54:25.019363 21584 solver.cpp:228] Iteration 30400, loss = 0.114035\nI0819 09:54:25.019404 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 09:54:25.019420 21584 solver.cpp:244]     Train net output #1: loss = 0.114035 (* 1 = 0.114035 loss)\nI0819 09:54:25.136371 21584 sgd_solver.cpp:166] Iteration 30400, lr = 0.76\nI0819 09:56:43.791476 21584 solver.cpp:337] Iteration 30500, Testing net (#0)\nI0819 09:58:06.232342 21584 solver.cpp:404]     Test net output #0: accuracy = 0.885641\nI0819 09:58:06.232646 21584 solver.cpp:404]     Test net output #1: loss = 0.440733 (* 1 = 0.440733 loss)\nI0819 09:58:07.534360 21584 solver.cpp:228] Iteration 30500, loss = 0.110106\nI0819 09:58:07.534401 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 09:58:07.534416 21584 solver.cpp:244]     Train net output #1: loss = 0.110106 (* 1 = 0.110106 loss)\nI0819 09:58:07.650004 21584 sgd_solver.cpp:166] Iteration 30500, lr = 0.7625\nI0819 10:00:26.313036 21584 solver.cpp:337] Iteration 30600, Testing net (#0)\nI0819 10:01:48.752789 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87696\nI0819 10:01:48.753068 21584 solver.cpp:404]     Test net output #1: loss = 0.465749 (* 1 = 0.465749 loss)\nI0819 10:01:50.056254 21584 solver.cpp:228] Iteration 30600, loss = 0.0651292\nI0819 10:01:50.056298 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 10:01:50.056313 21584 solver.cpp:244]     Train net output #1: loss = 0.0651293 (* 1 = 0.0651293 loss)\nI0819 10:01:50.166362 21584 sgd_solver.cpp:166] Iteration 30600, lr = 0.765\nI0819 10:04:08.828248 21584 solver.cpp:337] Iteration 30700, Testing net (#0)\nI0819 10:05:31.289153 21584 solver.cpp:404]     Test net output #0: accuracy = 0.878881\nI0819 10:05:31.289459 21584 solver.cpp:404]     Test net output #1: loss = 0.4437 (* 1 = 0.4437 loss)\nI0819 10:05:32.591862 21584 solver.cpp:228] Iteration 30700, loss = 0.0545383\nI0819 10:05:32.591905 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 10:05:32.591922 21584 solver.cpp:244]     Train net output #1: loss = 0.0545384 (* 1 = 0.0545384 loss)\nI0819 10:05:32.705341 21584 sgd_solver.cpp:166] Iteration 30700, lr = 0.7675\nI0819 10:07:51.345175 21584 solver.cpp:337] Iteration 30800, Testing net (#0)\nI0819 10:09:13.802723 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88132\nI0819 10:09:13.803023 21584 solver.cpp:404]     Test net output #1: loss = 0.450965 (* 1 = 0.450965 loss)\nI0819 10:09:15.104874 21584 solver.cpp:228] Iteration 30800, loss = 0.050095\nI0819 10:09:15.104915 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 10:09:15.104931 21584 solver.cpp:244]     Train net output #1: loss = 0.0500951 (* 1 = 0.0500951 loss)\nI0819 10:09:15.222234 21584 sgd_solver.cpp:166] Iteration 30800, lr = 0.77\nI0819 10:11:33.743268 21584 solver.cpp:337] Iteration 30900, Testing net (#0)\nI0819 10:12:56.687701 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88028\nI0819 10:12:56.688053 21584 solver.cpp:404]     Test net output #1: loss = 0.45956 (* 1 = 0.45956 loss)\nI0819 10:12:57.994889 21584 solver.cpp:228] Iteration 30900, loss = 0.21871\nI0819 10:12:57.994943 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 10:12:57.994961 21584 solver.cpp:244]     Train net output #1: loss = 0.218711 (* 1 = 0.218711 loss)\nI0819 10:12:58.100764 21584 sgd_solver.cpp:166] Iteration 30900, lr = 0.7725\nI0819 10:15:16.797663 21584 solver.cpp:337] Iteration 31000, Testing net (#0)\nI0819 10:16:39.811146 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8834\nI0819 10:16:39.811467 21584 solver.cpp:404]     Test net output #1: loss = 0.460088 (* 1 = 0.460088 loss)\nI0819 10:16:41.117812 21584 solver.cpp:228] Iteration 31000, loss = 0.0845547\nI0819 10:16:41.117867 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:16:41.117884 21584 solver.cpp:244]     Train net output #1: loss = 0.0845548 (* 1 = 0.0845548 loss)\nI0819 10:16:41.229887 21584 sgd_solver.cpp:166] Iteration 31000, lr = 0.775\nI0819 10:18:59.864527 21584 solver.cpp:337] Iteration 31100, Testing net (#0)\nI0819 10:20:23.089412 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0819 10:20:23.089732 21584 solver.cpp:404]     Test net output #1: loss = 0.43644 (* 1 = 0.43644 loss)\nI0819 10:20:24.405381 21584 solver.cpp:228] Iteration 31100, loss = 0.0967743\nI0819 10:20:24.405426 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:20:24.405442 21584 solver.cpp:244]     Train net output #1: loss = 0.0967744 (* 1 = 0.0967744 loss)\nI0819 10:20:24.507678 21584 sgd_solver.cpp:166] Iteration 31100, lr = 0.7775\nI0819 10:22:43.203955 21584 solver.cpp:337] Iteration 31200, Testing net (#0)\nI0819 10:24:06.221323 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87652\nI0819 10:24:06.221662 21584 solver.cpp:404]     Test net output #1: loss = 0.468623 (* 1 = 0.468623 loss)\nI0819 10:24:07.528290 21584 solver.cpp:228] Iteration 31200, loss = 0.0957588\nI0819 10:24:07.528342 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:24:07.528359 21584 solver.cpp:244]     Train net output #1: loss = 0.0957589 (* 1 = 0.0957589 loss)\nI0819 10:24:07.636143 21584 sgd_solver.cpp:166] Iteration 31200, lr = 0.78\nI0819 10:26:26.104447 21584 solver.cpp:337] Iteration 31300, Testing net (#0)\nI0819 10:27:49.098960 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87524\nI0819 10:27:49.099267 21584 solver.cpp:404]     Test net output #1: loss = 0.469626 (* 1 = 0.469626 loss)\nI0819 10:27:50.405634 21584 solver.cpp:228] Iteration 31300, loss = 0.125104\nI0819 10:27:50.405686 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:27:50.405704 21584 solver.cpp:244]     Train net output #1: loss = 0.125104 (* 1 = 0.125104 loss)\nI0819 10:27:50.517750 21584 sgd_solver.cpp:166] Iteration 31300, lr = 0.7825\nI0819 10:30:09.088989 21584 solver.cpp:337] Iteration 31400, Testing net (#0)\nI0819 10:31:32.133008 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88132\nI0819 10:31:32.133337 21584 solver.cpp:404]     Test net output #1: loss = 0.450282 (* 1 = 0.450282 loss)\nI0819 10:31:33.439929 21584 solver.cpp:228] Iteration 31400, loss = 0.0741767\nI0819 10:31:33.439988 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:31:33.440006 21584 solver.cpp:244]     Train net output #1: loss = 0.0741768 (* 1 = 0.0741768 loss)\nI0819 10:31:33.546674 21584 sgd_solver.cpp:166] Iteration 31400, lr = 0.785\nI0819 10:33:52.117702 21584 solver.cpp:337] Iteration 31500, Testing net (#0)\nI0819 10:35:15.218752 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87872\nI0819 10:35:15.219090 21584 solver.cpp:404]     Test net output #1: loss = 0.450841 (* 1 = 0.450841 loss)\nI0819 10:35:16.524871 21584 solver.cpp:228] Iteration 31500, loss = 0.116188\nI0819 10:35:16.524931 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 10:35:16.524950 21584 solver.cpp:244]     Train net output #1: loss = 0.116189 (* 1 = 0.116189 loss)\nI0819 10:35:16.635385 21584 sgd_solver.cpp:166] Iteration 31500, lr = 0.7875\nI0819 10:37:35.175451 21584 solver.cpp:337] Iteration 31600, Testing net (#0)\nI0819 10:38:58.279683 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0819 10:38:58.279968 21584 solver.cpp:404]     Test net output #1: loss = 0.467827 (* 1 = 0.467827 loss)\nI0819 10:38:59.585688 21584 solver.cpp:228] Iteration 31600, loss = 0.0933185\nI0819 10:38:59.585737 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:38:59.585755 21584 solver.cpp:244]     Train net output #1: loss = 0.0933186 (* 1 = 0.0933186 loss)\nI0819 10:38:59.697746 21584 sgd_solver.cpp:166] Iteration 31600, lr = 0.79\nI0819 10:41:18.341650 21584 solver.cpp:337] Iteration 31700, Testing net (#0)\nI0819 10:42:41.521548 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87864\nI0819 10:42:41.521883 21584 solver.cpp:404]     Test net output #1: loss = 0.460143 (* 1 = 0.460143 loss)\nI0819 10:42:42.826684 21584 solver.cpp:228] Iteration 31700, loss = 0.0977813\nI0819 10:42:42.826736 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 10:42:42.826753 21584 solver.cpp:244]     Train net output #1: loss = 0.0977815 (* 1 = 0.0977815 loss)\nI0819 10:42:42.932588 21584 sgd_solver.cpp:166] Iteration 31700, lr = 0.7925\nI0819 10:45:01.481650 21584 solver.cpp:337] Iteration 31800, Testing net (#0)\nI0819 10:46:24.736331 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87416\nI0819 10:46:24.736657 21584 solver.cpp:404]     Test net output #1: loss = 0.473546 (* 1 = 0.473546 loss)\nI0819 10:46:26.042589 21584 solver.cpp:228] Iteration 31800, loss = 0.0547717\nI0819 10:46:26.042647 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 10:46:26.042665 21584 solver.cpp:244]     Train net output #1: loss = 0.0547719 (* 1 = 0.0547719 loss)\nI0819 10:46:26.151319 21584 sgd_solver.cpp:166] Iteration 31800, lr = 0.795\nI0819 10:48:44.751793 21584 solver.cpp:337] Iteration 31900, Testing net (#0)\nI0819 10:50:07.808477 21584 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 10:50:07.808751 21584 solver.cpp:404]     Test net output #1: loss = 0.451604 (* 1 = 0.451604 loss)\nI0819 10:50:09.114637 21584 solver.cpp:228] Iteration 31900, loss = 0.0690571\nI0819 10:50:09.114698 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 10:50:09.114724 21584 solver.cpp:244]     Train net output #1: loss = 0.0690573 (* 1 = 0.0690573 loss)\nI0819 10:50:09.224644 21584 sgd_solver.cpp:166] Iteration 31900, lr = 0.7975\nI0819 10:52:27.839659 21584 solver.cpp:337] Iteration 32000, Testing net (#0)\nI0819 10:53:50.928622 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8816\nI0819 10:53:50.928907 21584 solver.cpp:404]     Test net output #1: loss = 0.4565 (* 1 = 0.4565 loss)\nI0819 10:53:52.235033 21584 solver.cpp:228] Iteration 32000, loss = 0.0878287\nI0819 10:53:52.235091 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 10:53:52.235110 21584 solver.cpp:244]     Train net output #1: loss = 0.0878288 (* 1 = 0.0878288 loss)\nI0819 10:53:52.340989 21584 sgd_solver.cpp:166] Iteration 32000, lr = 0.8\nI0819 10:56:10.807343 21584 solver.cpp:337] Iteration 32100, Testing net (#0)\nI0819 10:57:34.207254 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0819 10:57:34.207558 21584 solver.cpp:404]     Test net output #1: loss = 0.47508 (* 1 = 0.47508 loss)\nI0819 10:57:35.513061 21584 solver.cpp:228] Iteration 32100, loss = 0.0551872\nI0819 10:57:35.513118 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 10:57:35.513137 21584 solver.cpp:244]     Train net output #1: loss = 0.0551874 (* 1 = 0.0551874 loss)\nI0819 10:57:35.620851 21584 sgd_solver.cpp:166] Iteration 32100, lr = 0.8025\nI0819 10:59:54.104495 21584 solver.cpp:337] Iteration 32200, Testing net (#0)\nI0819 11:01:17.512145 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0819 11:01:17.512503 21584 solver.cpp:404]     Test net output #1: loss = 0.436255 (* 1 = 0.436255 loss)\nI0819 11:01:18.818241 21584 solver.cpp:228] Iteration 32200, loss = 0.169408\nI0819 11:01:18.818300 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 11:01:18.818317 21584 solver.cpp:244]     Train net output #1: loss = 0.169408 (* 1 = 0.169408 loss)\nI0819 11:01:18.926555 21584 sgd_solver.cpp:166] Iteration 32200, lr = 0.805\nI0819 11:03:37.643209 21584 solver.cpp:337] Iteration 32300, Testing net (#0)\nI0819 11:05:01.057535 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88048\nI0819 11:05:01.057867 21584 solver.cpp:404]     Test net output #1: loss = 0.457288 (* 1 = 0.457288 loss)\nI0819 11:05:02.362990 21584 solver.cpp:228] Iteration 32300, loss = 0.037689\nI0819 11:05:02.363042 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 11:05:02.363059 21584 solver.cpp:244]     Train net output #1: loss = 0.0376892 (* 1 = 0.0376892 loss)\nI0819 11:05:02.477056 21584 sgd_solver.cpp:166] Iteration 32300, lr = 0.8075\nI0819 11:07:21.173534 21584 solver.cpp:337] Iteration 32400, Testing net (#0)\nI0819 11:08:44.559329 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88144\nI0819 11:08:44.559680 21584 solver.cpp:404]     Test net output #1: loss = 0.461214 (* 1 = 0.461214 loss)\nI0819 11:08:45.864449 21584 solver.cpp:228] Iteration 32400, loss = 0.0915148\nI0819 11:08:45.864506 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:08:45.864521 21584 solver.cpp:244]     Train net output #1: loss = 0.091515 (* 1 = 0.091515 loss)\nI0819 11:08:45.983166 21584 sgd_solver.cpp:166] Iteration 32400, lr = 0.81\nI0819 11:11:04.689853 21584 solver.cpp:337] Iteration 32500, Testing net (#0)\nI0819 11:12:28.060118 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88544\nI0819 11:12:28.060441 21584 solver.cpp:404]     Test net output #1: loss = 0.432301 (* 1 = 0.432301 loss)\nI0819 11:12:29.365602 21584 solver.cpp:228] Iteration 32500, loss = 0.111822\nI0819 11:12:29.365651 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:12:29.365669 21584 solver.cpp:244]     Train net output #1: loss = 0.111822 (* 1 = 0.111822 loss)\nI0819 11:12:29.481487 21584 sgd_solver.cpp:166] Iteration 32500, lr = 0.8125\nI0819 11:14:48.056545 21584 solver.cpp:337] Iteration 32600, Testing net (#0)\nI0819 11:16:11.426986 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87692\nI0819 11:16:11.427316 21584 solver.cpp:404]     Test net output #1: loss = 0.469909 (* 1 = 0.469909 loss)\nI0819 11:16:12.732482 21584 solver.cpp:228] Iteration 32600, loss = 0.1209\nI0819 11:16:12.732539 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:16:12.732556 21584 solver.cpp:244]     Train net output #1: loss = 0.1209 (* 1 = 0.1209 loss)\nI0819 11:16:12.842017 21584 sgd_solver.cpp:166] Iteration 32600, lr = 0.815\nI0819 11:18:31.395934 21584 solver.cpp:337] Iteration 32700, Testing net (#0)\nI0819 11:19:54.774499 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88648\nI0819 11:19:54.774864 21584 solver.cpp:404]     Test net output #1: loss = 0.423595 (* 1 = 0.423595 loss)\nI0819 11:19:56.079655 21584 solver.cpp:228] Iteration 32700, loss = 0.0890528\nI0819 11:19:56.079706 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 11:19:56.079723 21584 solver.cpp:244]     Train net output #1: loss = 0.089053 (* 1 = 0.089053 loss)\nI0819 11:19:56.192742 21584 sgd_solver.cpp:166] Iteration 32700, lr = 0.8175\nI0819 11:22:14.711253 21584 solver.cpp:337] Iteration 32800, Testing net (#0)\nI0819 11:23:38.090800 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0819 11:23:38.091156 21584 solver.cpp:404]     Test net output #1: loss = 0.459216 (* 1 = 0.459216 loss)\nI0819 11:23:39.395340 21584 solver.cpp:228] Iteration 32800, loss = 0.157811\nI0819 11:23:39.395395 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:23:39.395411 21584 solver.cpp:244]     Train net output #1: loss = 0.157811 (* 1 = 0.157811 loss)\nI0819 11:23:39.505849 21584 sgd_solver.cpp:166] Iteration 32800, lr = 0.82\nI0819 11:25:58.055222 21584 solver.cpp:337] Iteration 32900, Testing net (#0)\nI0819 11:27:21.453140 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8764\nI0819 11:27:21.453493 21584 solver.cpp:404]     Test net output #1: loss = 0.477794 (* 1 = 0.477794 loss)\nI0819 11:27:22.758476 21584 solver.cpp:228] Iteration 32900, loss = 0.0453228\nI0819 11:27:22.758528 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 11:27:22.758545 21584 solver.cpp:244]     Train net output #1: loss = 0.0453229 (* 1 = 0.0453229 loss)\nI0819 11:27:22.873103 21584 sgd_solver.cpp:166] Iteration 32900, lr = 0.8225\nI0819 11:29:41.409950 21584 solver.cpp:337] Iteration 33000, Testing net (#0)\nI0819 11:31:04.801544 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88276\nI0819 11:31:04.801887 21584 solver.cpp:404]     Test net output #1: loss = 0.437265 (* 1 = 0.437265 loss)\nI0819 11:31:06.107544 21584 solver.cpp:228] Iteration 33000, loss = 0.152028\nI0819 11:31:06.107599 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:31:06.107617 21584 solver.cpp:244]     Train net output #1: loss = 0.152028 (* 1 = 0.152028 loss)\nI0819 11:31:06.215414 21584 sgd_solver.cpp:166] Iteration 33000, lr = 0.825\nI0819 11:33:24.713855 21584 solver.cpp:337] Iteration 33100, Testing net (#0)\nI0819 11:34:48.084347 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88216\nI0819 11:34:48.084679 21584 solver.cpp:404]     Test net output #1: loss = 0.438523 (* 1 = 0.438523 loss)\nI0819 11:34:49.389911 21584 solver.cpp:228] Iteration 33100, loss = 0.117731\nI0819 11:34:49.389966 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:34:49.389983 21584 solver.cpp:244]     Train net output #1: loss = 0.117731 (* 1 = 0.117731 loss)\nI0819 11:34:49.496433 21584 sgd_solver.cpp:166] Iteration 33100, lr = 0.8275\nI0819 11:37:07.980221 21584 solver.cpp:337] Iteration 33200, Testing net (#0)\nI0819 11:38:31.359558 21584 solver.cpp:404]     Test net output #0: accuracy = 0.875\nI0819 11:38:31.359897 21584 solver.cpp:404]     Test net output #1: loss = 0.462319 (* 1 = 0.462319 loss)\nI0819 11:38:32.664721 21584 solver.cpp:228] Iteration 33200, loss = 0.0942343\nI0819 11:38:32.664772 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:38:32.664789 21584 solver.cpp:244]     Train net output #1: loss = 0.0942344 (* 1 = 0.0942344 loss)\nI0819 11:38:32.771860 21584 sgd_solver.cpp:166] Iteration 33200, lr = 0.83\nI0819 11:40:51.395661 21584 solver.cpp:337] Iteration 33300, Testing net (#0)\nI0819 11:42:14.787099 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87364\nI0819 11:42:14.787439 21584 solver.cpp:404]     Test net output #1: loss = 0.486045 (* 1 = 0.486045 loss)\nI0819 11:42:16.092780 21584 solver.cpp:228] Iteration 33300, loss = 0.112275\nI0819 11:42:16.092833 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:42:16.092850 21584 solver.cpp:244]     Train net output #1: loss = 0.112275 (* 1 = 0.112275 loss)\nI0819 11:42:16.200121 21584 sgd_solver.cpp:166] Iteration 33300, lr = 0.8325\nI0819 11:44:34.740466 21584 solver.cpp:337] Iteration 33400, Testing net (#0)\nI0819 11:45:58.176774 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8874\nI0819 11:45:58.177132 21584 solver.cpp:404]     Test net output #1: loss = 0.418123 (* 1 = 0.418123 loss)\nI0819 11:45:59.483144 21584 solver.cpp:228] Iteration 33400, loss = 0.064607\nI0819 11:45:59.483196 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 11:45:59.483219 21584 solver.cpp:244]     Train net output #1: loss = 0.0646071 (* 1 = 0.0646071 loss)\nI0819 11:45:59.590943 21584 sgd_solver.cpp:166] Iteration 33400, lr = 0.835\nI0819 11:48:18.143360 21584 solver.cpp:337] Iteration 33500, Testing net (#0)\nI0819 11:49:41.546535 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87172\nI0819 11:49:41.546891 21584 solver.cpp:404]     Test net output #1: loss = 0.461796 (* 1 = 0.461796 loss)\nI0819 11:49:42.852207 21584 solver.cpp:228] Iteration 33500, loss = 0.115661\nI0819 11:49:42.852260 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 11:49:42.852277 21584 solver.cpp:244]     Train net output #1: loss = 0.115661 (* 1 = 0.115661 loss)\nI0819 11:49:42.962321 21584 sgd_solver.cpp:166] Iteration 33500, lr = 0.8375\nI0819 11:52:01.520997 21584 solver.cpp:337] Iteration 33600, Testing net (#0)\nI0819 11:53:24.921612 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87952\nI0819 11:53:24.921936 21584 solver.cpp:404]     Test net output #1: loss = 0.450417 (* 1 = 0.450417 loss)\nI0819 11:53:26.227514 21584 solver.cpp:228] Iteration 33600, loss = 0.0866563\nI0819 11:53:26.227567 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 11:53:26.227584 21584 solver.cpp:244]     Train net output #1: loss = 0.0866563 (* 1 = 0.0866563 loss)\nI0819 11:53:26.345011 21584 sgd_solver.cpp:166] Iteration 33600, lr = 0.84\nI0819 11:55:44.923477 21584 solver.cpp:337] Iteration 33700, Testing net (#0)\nI0819 11:57:08.328915 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88488\nI0819 11:57:08.329268 21584 solver.cpp:404]     Test net output #1: loss = 0.425381 (* 1 = 0.425381 loss)\nI0819 11:57:09.634521 21584 solver.cpp:228] Iteration 33700, loss = 0.131063\nI0819 11:57:09.634572 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 11:57:09.634590 21584 solver.cpp:244]     Train net output #1: loss = 0.131063 (* 1 = 0.131063 loss)\nI0819 11:57:09.743356 21584 sgd_solver.cpp:166] Iteration 33700, lr = 0.8425\nI0819 11:59:28.325090 21584 solver.cpp:337] Iteration 33800, Testing net (#0)\nI0819 12:00:51.723361 21584 solver.cpp:404]     Test net output #0: accuracy = 0.883481\nI0819 12:00:51.723660 21584 solver.cpp:404]     Test net output #1: loss = 0.430282 (* 1 = 0.430282 loss)\nI0819 12:00:53.029875 21584 solver.cpp:228] Iteration 33800, loss = 0.0718823\nI0819 12:00:53.029928 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:00:53.029945 21584 solver.cpp:244]     Train net output #1: loss = 0.0718823 (* 1 = 0.0718823 loss)\nI0819 12:00:53.138294 21584 sgd_solver.cpp:166] Iteration 33800, lr = 0.845\nI0819 12:03:11.763083 21584 solver.cpp:337] Iteration 33900, Testing net (#0)\nI0819 12:04:35.162000 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87896\nI0819 12:04:35.162341 21584 solver.cpp:404]     Test net output #1: loss = 0.434473 (* 1 = 0.434473 loss)\nI0819 12:04:36.469054 21584 solver.cpp:228] Iteration 33900, loss = 0.119646\nI0819 12:04:36.469111 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:04:36.469128 21584 solver.cpp:244]     Train net output #1: loss = 0.119646 (* 1 = 0.119646 loss)\nI0819 12:04:36.576683 21584 sgd_solver.cpp:166] Iteration 33900, lr = 0.8475\nI0819 12:06:55.174551 21584 solver.cpp:337] Iteration 34000, Testing net (#0)\nI0819 12:08:18.573999 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88156\nI0819 12:08:18.574360 21584 solver.cpp:404]     Test net output #1: loss = 0.442662 (* 1 = 0.442662 loss)\nI0819 12:08:19.880468 21584 solver.cpp:228] Iteration 34000, loss = 0.106904\nI0819 12:08:19.880528 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 12:08:19.880548 21584 solver.cpp:244]     Train net output #1: loss = 0.106904 (* 1 = 0.106904 loss)\nI0819 12:08:19.989151 21584 sgd_solver.cpp:166] Iteration 34000, lr = 0.85\nI0819 12:10:38.581217 21584 solver.cpp:337] Iteration 34100, Testing net (#0)\nI0819 12:12:01.986150 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88672\nI0819 12:12:01.986428 21584 solver.cpp:404]     Test net output #1: loss = 0.422386 (* 1 = 0.422386 loss)\nI0819 12:12:03.291612 21584 solver.cpp:228] Iteration 34100, loss = 0.112474\nI0819 12:12:03.291666 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 12:12:03.291683 21584 solver.cpp:244]     Train net output #1: loss = 0.112474 (* 1 = 0.112474 loss)\nI0819 12:12:03.401252 21584 sgd_solver.cpp:166] Iteration 34100, lr = 0.8525\nI0819 12:14:21.910008 21584 solver.cpp:337] Iteration 34200, Testing net (#0)\nI0819 12:15:45.312494 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0819 12:15:45.312824 21584 solver.cpp:404]     Test net output #1: loss = 0.432993 (* 1 = 0.432993 loss)\nI0819 12:15:46.617751 21584 solver.cpp:228] Iteration 34200, loss = 0.139188\nI0819 12:15:46.617804 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 12:15:46.617820 21584 solver.cpp:244]     Train net output #1: loss = 0.139188 (* 1 = 0.139188 loss)\nI0819 12:15:46.728076 21584 sgd_solver.cpp:166] Iteration 34200, lr = 0.855\nI0819 12:18:05.198070 21584 solver.cpp:337] Iteration 34300, Testing net (#0)\nI0819 12:19:28.603425 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88236\nI0819 12:19:28.603757 21584 solver.cpp:404]     Test net output #1: loss = 0.443027 (* 1 = 0.443027 loss)\nI0819 12:19:29.910498 21584 solver.cpp:228] Iteration 34300, loss = 0.0853598\nI0819 12:19:29.910548 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 12:19:29.910563 21584 solver.cpp:244]     Train net output #1: loss = 0.0853598 (* 1 = 0.0853598 loss)\nI0819 12:19:30.018821 21584 sgd_solver.cpp:166] Iteration 34300, lr = 0.8575\nI0819 12:21:48.713065 21584 solver.cpp:337] Iteration 34400, Testing net (#0)\nI0819 12:23:12.082657 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0819 12:23:12.083006 21584 solver.cpp:404]     Test net output #1: loss = 0.466339 (* 1 = 0.466339 loss)\nI0819 12:23:13.389202 21584 solver.cpp:228] Iteration 34400, loss = 0.110275\nI0819 12:23:13.389253 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:23:13.389269 21584 solver.cpp:244]     Train net output #1: loss = 0.110275 (* 1 = 0.110275 loss)\nI0819 12:23:13.495954 21584 sgd_solver.cpp:166] Iteration 34400, lr = 0.86\nI0819 12:25:32.135035 21584 solver.cpp:337] Iteration 34500, Testing net (#0)\nI0819 12:26:55.545800 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0819 12:26:55.546134 21584 solver.cpp:404]     Test net output #1: loss = 0.436308 (* 1 = 0.436308 loss)\nI0819 12:26:56.852967 21584 solver.cpp:228] Iteration 34500, loss = 0.0723142\nI0819 12:26:56.853025 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:26:56.853042 21584 solver.cpp:244]     Train net output #1: loss = 0.0723142 (* 1 = 0.0723142 loss)\nI0819 12:26:56.962990 21584 sgd_solver.cpp:166] Iteration 34500, lr = 0.8625\nI0819 12:29:15.416203 21584 solver.cpp:337] Iteration 34600, Testing net (#0)\nI0819 12:30:38.827586 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88284\nI0819 12:30:38.827921 21584 solver.cpp:404]     Test net output #1: loss = 0.441263 (* 1 = 0.441263 loss)\nI0819 12:30:40.134958 21584 solver.cpp:228] Iteration 34600, loss = 0.129306\nI0819 12:30:40.135015 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 12:30:40.135032 21584 solver.cpp:244]     Train net output #1: loss = 0.129305 (* 1 = 0.129305 loss)\nI0819 12:30:40.245692 21584 sgd_solver.cpp:166] Iteration 34600, lr = 0.865\nI0819 12:32:58.822345 21584 solver.cpp:337] Iteration 34700, Testing net (#0)\nI0819 12:34:22.237512 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8804\nI0819 12:34:22.237870 21584 solver.cpp:404]     Test net output #1: loss = 0.456632 (* 1 = 0.456632 loss)\nI0819 12:34:23.544386 21584 solver.cpp:228] Iteration 34700, loss = 0.0702908\nI0819 12:34:23.544445 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 12:34:23.544462 21584 solver.cpp:244]     Train net output #1: loss = 0.0702908 (* 1 = 0.0702908 loss)\nI0819 12:34:23.656958 21584 sgd_solver.cpp:166] Iteration 34700, lr = 0.8675\nI0819 12:36:42.248585 21584 solver.cpp:337] Iteration 34800, Testing net (#0)\nI0819 12:38:04.722625 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87568\nI0819 12:38:04.722911 21584 solver.cpp:404]     Test net output #1: loss = 0.47656 (* 1 = 0.47656 loss)\nI0819 12:38:06.024613 21584 solver.cpp:228] Iteration 34800, loss = 0.208219\nI0819 12:38:06.024657 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 12:38:06.024673 21584 solver.cpp:244]     Train net output #1: loss = 0.208219 (* 1 = 0.208219 loss)\nI0819 12:38:06.137817 21584 sgd_solver.cpp:166] Iteration 34800, lr = 0.87\nI0819 12:40:24.499940 21584 solver.cpp:337] Iteration 34900, Testing net (#0)\nI0819 12:41:46.981271 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88272\nI0819 12:41:46.981559 21584 solver.cpp:404]     Test net output #1: loss = 0.434241 (* 1 = 0.434241 loss)\nI0819 12:41:48.284055 21584 solver.cpp:228] Iteration 34900, loss = 0.108614\nI0819 12:41:48.284098 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:41:48.284116 21584 solver.cpp:244]     Train net output #1: loss = 0.108614 (* 1 = 0.108614 loss)\nI0819 12:41:48.397474 21584 sgd_solver.cpp:166] Iteration 34900, lr = 0.8725\nI0819 12:44:06.795759 21584 solver.cpp:337] Iteration 35000, Testing net (#0)\nI0819 12:45:29.253733 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88496\nI0819 12:45:29.254024 21584 solver.cpp:404]     Test net output #1: loss = 0.42904 (* 1 = 0.42904 loss)\nI0819 12:45:30.555891 21584 solver.cpp:228] Iteration 35000, loss = 0.116574\nI0819 12:45:30.555933 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 12:45:30.555950 21584 solver.cpp:244]     Train net output #1: loss = 0.116574 (* 1 = 0.116574 loss)\nI0819 12:45:30.670260 21584 sgd_solver.cpp:166] Iteration 35000, lr = 0.875\nI0819 12:47:49.171370 21584 solver.cpp:337] Iteration 35100, Testing net (#0)\nI0819 12:49:11.631997 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0819 12:49:11.632270 21584 solver.cpp:404]     Test net output #1: loss = 0.443099 (* 1 = 0.443099 loss)\nI0819 12:49:12.934864 21584 solver.cpp:228] Iteration 35100, loss = 0.0619195\nI0819 12:49:12.934903 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:49:12.934919 21584 solver.cpp:244]     Train net output #1: loss = 0.0619195 (* 1 = 0.0619195 loss)\nI0819 12:49:13.049902 21584 sgd_solver.cpp:166] Iteration 35100, lr = 0.8775\nI0819 12:51:31.411371 21584 solver.cpp:337] Iteration 35200, Testing net (#0)\nI0819 12:52:53.719746 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87944\nI0819 12:52:53.720042 21584 solver.cpp:404]     Test net output #1: loss = 0.474765 (* 1 = 0.474765 loss)\nI0819 12:52:55.021877 21584 solver.cpp:228] Iteration 35200, loss = 0.120187\nI0819 12:52:55.021919 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 12:52:55.021935 21584 solver.cpp:244]     Train net output #1: loss = 0.120187 (* 1 = 0.120187 loss)\nI0819 12:52:55.137095 21584 sgd_solver.cpp:166] Iteration 35200, lr = 0.88\nI0819 12:55:13.484114 21584 solver.cpp:337] Iteration 35300, Testing net (#0)\nI0819 12:56:35.792382 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87916\nI0819 12:56:35.792676 21584 solver.cpp:404]     Test net output #1: loss = 0.442093 (* 1 = 0.442093 loss)\nI0819 12:56:37.094166 21584 solver.cpp:228] Iteration 35300, loss = 0.0703216\nI0819 12:56:37.094208 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 12:56:37.094223 21584 solver.cpp:244]     Train net output #1: loss = 0.0703215 (* 1 = 0.0703215 loss)\nI0819 12:56:37.211105 21584 sgd_solver.cpp:166] Iteration 35300, lr = 0.8825\nI0819 12:58:55.474632 21584 solver.cpp:337] Iteration 35400, Testing net (#0)\nI0819 13:00:17.826093 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88188\nI0819 13:00:17.826380 21584 solver.cpp:404]     Test net output #1: loss = 0.451154 (* 1 = 0.451154 loss)\nI0819 13:00:19.129294 21584 solver.cpp:228] Iteration 35400, loss = 0.0749952\nI0819 13:00:19.129338 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 13:00:19.129354 21584 solver.cpp:244]     Train net output #1: loss = 0.0749952 (* 1 = 0.0749952 loss)\nI0819 13:00:19.240298 21584 sgd_solver.cpp:166] Iteration 35400, lr = 0.885\nI0819 13:02:37.648854 21584 solver.cpp:337] Iteration 35500, Testing net (#0)\nI0819 13:03:59.987890 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8794\nI0819 13:03:59.988171 21584 solver.cpp:404]     Test net output #1: loss = 0.452592 (* 1 = 0.452592 loss)\nI0819 13:04:01.289894 21584 solver.cpp:228] Iteration 35500, loss = 0.11984\nI0819 13:04:01.289937 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:04:01.289952 21584 solver.cpp:244]     Train net output #1: loss = 0.11984 (* 1 = 0.11984 loss)\nI0819 13:04:01.405539 21584 sgd_solver.cpp:166] Iteration 35500, lr = 0.8875\nI0819 13:06:19.685200 21584 solver.cpp:337] Iteration 35600, Testing net (#0)\nI0819 13:07:42.033005 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88616\nI0819 13:07:42.033308 21584 solver.cpp:404]     Test net output #1: loss = 0.437789 (* 1 = 0.437789 loss)\nI0819 13:07:43.335389 21584 solver.cpp:228] Iteration 35600, loss = 0.0385353\nI0819 13:07:43.335430 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 13:07:43.335445 21584 solver.cpp:244]     Train net output #1: loss = 0.0385352 (* 1 = 0.0385352 loss)\nI0819 13:07:43.447136 21584 sgd_solver.cpp:166] Iteration 35600, lr = 0.89\nI0819 13:10:01.541538 21584 solver.cpp:337] Iteration 35700, Testing net (#0)\nI0819 13:11:23.923324 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87536\nI0819 13:11:23.923591 21584 solver.cpp:404]     Test net output #1: loss = 0.452483 (* 1 = 0.452483 loss)\nI0819 13:11:25.226068 21584 solver.cpp:228] Iteration 35700, loss = 0.0732944\nI0819 13:11:25.226111 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 13:11:25.226127 21584 solver.cpp:244]     Train net output #1: loss = 0.0732944 (* 1 = 0.0732944 loss)\nI0819 13:11:25.334381 21584 sgd_solver.cpp:166] Iteration 35700, lr = 0.8925\nI0819 13:13:43.222312 21584 solver.cpp:337] Iteration 35800, Testing net (#0)\nI0819 13:15:05.534955 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0819 13:15:05.535254 21584 solver.cpp:404]     Test net output #1: loss = 0.450419 (* 1 = 0.450419 loss)\nI0819 13:15:06.836724 21584 solver.cpp:228] Iteration 35800, loss = 0.105745\nI0819 13:15:06.836772 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:15:06.836789 21584 solver.cpp:244]     Train net output #1: loss = 0.105745 (* 1 = 0.105745 loss)\nI0819 13:15:06.944378 21584 sgd_solver.cpp:166] Iteration 35800, lr = 0.895\nI0819 13:17:24.783535 21584 solver.cpp:337] Iteration 35900, Testing net (#0)\nI0819 13:18:47.096130 21584 solver.cpp:404]     Test net output #0: accuracy = 0.881\nI0819 13:18:47.096424 21584 solver.cpp:404]     Test net output #1: loss = 0.428454 (* 1 = 0.428454 loss)\nI0819 13:18:48.397761 21584 solver.cpp:228] Iteration 35900, loss = 0.0883862\nI0819 13:18:48.397804 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:18:48.397820 21584 solver.cpp:244]     Train net output #1: loss = 0.0883862 (* 1 = 0.0883862 loss)\nI0819 13:18:48.506476 21584 sgd_solver.cpp:166] Iteration 35900, lr = 0.8975\nI0819 13:21:06.350698 21584 solver.cpp:337] Iteration 36000, Testing net (#0)\nI0819 13:22:28.667690 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0819 13:22:28.668004 21584 solver.cpp:404]     Test net output #1: loss = 0.441411 (* 1 = 0.441411 loss)\nI0819 13:22:29.970187 21584 solver.cpp:228] Iteration 36000, loss = 0.139508\nI0819 13:22:29.970230 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 13:22:29.970247 21584 solver.cpp:244]     Train net output #1: loss = 0.139508 (* 1 = 0.139508 loss)\nI0819 13:22:30.078665 21584 sgd_solver.cpp:166] Iteration 36000, lr = 0.9\nI0819 13:24:47.900498 21584 solver.cpp:337] Iteration 36100, Testing net (#0)\nI0819 13:26:10.372141 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88444\nI0819 13:26:10.372448 21584 solver.cpp:404]     Test net output #1: loss = 0.43632 (* 1 = 0.43632 loss)\nI0819 13:26:11.674873 21584 solver.cpp:228] Iteration 36100, loss = 0.114204\nI0819 13:26:11.674918 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 13:26:11.674934 21584 solver.cpp:244]     Train net output #1: loss = 0.114204 (* 1 = 0.114204 loss)\nI0819 13:26:11.787597 21584 sgd_solver.cpp:166] Iteration 36100, lr = 0.9025\nI0819 13:28:29.775521 21584 solver.cpp:337] Iteration 36200, Testing net (#0)\nI0819 13:29:52.246958 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0819 13:29:52.247241 21584 solver.cpp:404]     Test net output #1: loss = 0.432135 (* 1 = 0.432135 loss)\nI0819 13:29:53.549067 21584 solver.cpp:228] Iteration 36200, loss = 0.0457592\nI0819 13:29:53.549111 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 13:29:53.549127 21584 solver.cpp:244]     Train net output #1: loss = 0.0457592 (* 1 = 0.0457592 loss)\nI0819 13:29:53.654742 21584 sgd_solver.cpp:166] Iteration 36200, lr = 0.905\nI0819 13:32:11.467072 21584 solver.cpp:337] Iteration 36300, Testing net (#0)\nI0819 13:33:33.927572 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0819 13:33:33.927873 21584 solver.cpp:404]     Test net output #1: loss = 0.441228 (* 1 = 0.441228 loss)\nI0819 13:33:35.230350 21584 solver.cpp:228] Iteration 36300, loss = 0.0636973\nI0819 13:33:35.230393 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 13:33:35.230409 21584 solver.cpp:244]     Train net output #1: loss = 0.0636973 (* 1 = 0.0636973 loss)\nI0819 13:33:35.340122 21584 sgd_solver.cpp:166] Iteration 36300, lr = 0.9075\nI0819 13:35:53.233943 21584 solver.cpp:337] Iteration 36400, Testing net (#0)\nI0819 13:37:15.696893 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0819 13:37:15.697201 21584 solver.cpp:404]     Test net output #1: loss = 0.463411 (* 1 = 0.463411 loss)\nI0819 13:37:16.999303 21584 solver.cpp:228] Iteration 36400, loss = 0.109209\nI0819 13:37:16.999347 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:37:16.999363 21584 solver.cpp:244]     Train net output #1: loss = 0.109209 (* 1 = 0.109209 loss)\nI0819 13:37:17.108325 21584 sgd_solver.cpp:166] Iteration 36400, lr = 0.91\nI0819 13:39:35.108090 21584 solver.cpp:337] Iteration 36500, Testing net (#0)\nI0819 13:40:57.591189 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88072\nI0819 13:40:57.591501 21584 solver.cpp:404]     Test net output #1: loss = 0.455948 (* 1 = 0.455948 loss)\nI0819 13:40:58.893299 21584 solver.cpp:228] Iteration 36500, loss = 0.128112\nI0819 13:40:58.893343 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 13:40:58.893359 21584 solver.cpp:244]     Train net output #1: loss = 0.128112 (* 1 = 0.128112 loss)\nI0819 13:40:59.004367 21584 sgd_solver.cpp:166] Iteration 36500, lr = 0.9125\nI0819 13:43:17.045377 21584 solver.cpp:337] Iteration 36600, Testing net (#0)\nI0819 13:44:39.529851 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8768\nI0819 13:44:39.530159 21584 solver.cpp:404]     Test net output #1: loss = 0.462731 (* 1 = 0.462731 loss)\nI0819 13:44:40.832300 21584 solver.cpp:228] Iteration 36600, loss = 0.197457\nI0819 13:44:40.832343 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 13:44:40.832360 21584 solver.cpp:244]     Train net output #1: loss = 0.197457 (* 1 = 0.197457 loss)\nI0819 13:44:40.947665 21584 sgd_solver.cpp:166] Iteration 36600, lr = 0.915\nI0819 13:46:58.791694 21584 solver.cpp:337] Iteration 36700, Testing net (#0)\nI0819 13:48:21.260469 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88256\nI0819 13:48:21.260778 21584 solver.cpp:404]     Test net output #1: loss = 0.440864 (* 1 = 0.440864 loss)\nI0819 13:48:22.562274 21584 solver.cpp:228] Iteration 36700, loss = 0.0897526\nI0819 13:48:22.562317 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:48:22.562335 21584 solver.cpp:244]     Train net output #1: loss = 0.0897525 (* 1 = 0.0897525 loss)\nI0819 13:48:22.675127 21584 sgd_solver.cpp:166] Iteration 36700, lr = 0.9175\nI0819 13:50:40.609149 21584 solver.cpp:337] Iteration 36800, Testing net (#0)\nI0819 13:52:03.068315 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0819 13:52:03.068596 21584 solver.cpp:404]     Test net output #1: loss = 0.425015 (* 1 = 0.425015 loss)\nI0819 13:52:04.370041 21584 solver.cpp:228] Iteration 36800, loss = 0.251476\nI0819 13:52:04.370085 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 13:52:04.370100 21584 solver.cpp:244]     Train net output #1: loss = 0.251475 (* 1 = 0.251475 loss)\nI0819 13:52:04.479804 21584 sgd_solver.cpp:166] Iteration 36800, lr = 0.92\nI0819 13:54:22.376210 21584 solver.cpp:337] Iteration 36900, Testing net (#0)\nI0819 13:55:44.860069 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88248\nI0819 13:55:44.860354 21584 solver.cpp:404]     Test net output #1: loss = 0.416234 (* 1 = 0.416234 loss)\nI0819 13:55:46.162670 21584 solver.cpp:228] Iteration 36900, loss = 0.125461\nI0819 13:55:46.162714 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:55:46.162729 21584 solver.cpp:244]     Train net output #1: loss = 0.125461 (* 1 = 0.125461 loss)\nI0819 13:55:46.268990 21584 sgd_solver.cpp:166] Iteration 36900, lr = 0.9225\nI0819 13:58:03.744283 21584 solver.cpp:337] Iteration 37000, Testing net (#0)\nI0819 13:59:26.225623 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0819 13:59:26.225924 21584 solver.cpp:404]     Test net output #1: loss = 0.449138 (* 1 = 0.449138 loss)\nI0819 13:59:27.527796 21584 solver.cpp:228] Iteration 37000, loss = 0.0878445\nI0819 13:59:27.527840 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 13:59:27.527856 21584 solver.cpp:244]     Train net output #1: loss = 0.0878444 (* 1 = 0.0878444 loss)\nI0819 13:59:27.634387 21584 sgd_solver.cpp:166] Iteration 37000, lr = 0.925\nI0819 14:01:45.185369 21584 solver.cpp:337] Iteration 37100, Testing net (#0)\nI0819 14:03:07.671911 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0819 14:03:07.672228 21584 solver.cpp:404]     Test net output #1: loss = 0.443984 (* 1 = 0.443984 loss)\nI0819 14:03:08.974117 21584 solver.cpp:228] Iteration 37100, loss = 0.106902\nI0819 14:03:08.974162 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 14:03:08.974179 21584 solver.cpp:244]     Train net output #1: loss = 0.106901 (* 1 = 0.106901 loss)\nI0819 14:03:09.080355 21584 sgd_solver.cpp:166] Iteration 37100, lr = 0.9275\nI0819 14:05:26.656873 21584 solver.cpp:337] Iteration 37200, Testing net (#0)\nI0819 14:06:49.139544 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88464\nI0819 14:06:49.139855 21584 solver.cpp:404]     Test net output #1: loss = 0.425882 (* 1 = 0.425882 loss)\nI0819 14:06:50.441877 21584 solver.cpp:228] Iteration 37200, loss = 0.0841881\nI0819 14:06:50.441926 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:06:50.441942 21584 solver.cpp:244]     Train net output #1: loss = 0.084188 (* 1 = 0.084188 loss)\nI0819 14:06:50.545636 21584 sgd_solver.cpp:166] Iteration 37200, lr = 0.93\nI0819 14:09:08.152915 21584 solver.cpp:337] Iteration 37300, Testing net (#0)\nI0819 14:10:30.641952 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87504\nI0819 14:10:30.642273 21584 solver.cpp:404]     Test net output #1: loss = 0.45729 (* 1 = 0.45729 loss)\nI0819 14:10:31.944252 21584 solver.cpp:228] Iteration 37300, loss = 0.125927\nI0819 14:10:31.944293 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:10:31.944309 21584 solver.cpp:244]     Train net output #1: loss = 0.125927 (* 1 = 0.125927 loss)\nI0819 14:10:32.044965 21584 sgd_solver.cpp:166] Iteration 37300, lr = 0.9325\nI0819 14:12:49.603893 21584 solver.cpp:337] Iteration 37400, Testing net (#0)\nI0819 14:14:12.077621 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0819 14:14:12.077911 21584 solver.cpp:404]     Test net output #1: loss = 0.441411 (* 1 = 0.441411 loss)\nI0819 14:14:13.376654 21584 solver.cpp:228] Iteration 37400, loss = 0.12646\nI0819 14:14:13.376695 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:14:13.376711 21584 solver.cpp:244]     Train net output #1: loss = 0.126459 (* 1 = 0.126459 loss)\nI0819 14:14:13.491277 21584 sgd_solver.cpp:166] Iteration 37400, lr = 0.935\nI0819 14:16:31.035920 21584 solver.cpp:337] Iteration 37500, Testing net (#0)\nI0819 14:17:53.517066 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87756\nI0819 14:17:53.517369 21584 solver.cpp:404]     Test net output #1: loss = 0.451434 (* 1 = 0.451434 loss)\nI0819 14:17:54.816021 21584 solver.cpp:228] Iteration 37500, loss = 0.212028\nI0819 14:17:54.816061 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 14:17:54.816077 21584 solver.cpp:244]     Train net output #1: loss = 0.212028 (* 1 = 0.212028 loss)\nI0819 14:17:54.921650 21584 sgd_solver.cpp:166] Iteration 37500, lr = 0.9375\nI0819 14:20:12.365245 21584 solver.cpp:337] Iteration 37600, Testing net (#0)\nI0819 14:21:34.842981 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87648\nI0819 14:21:34.843292 21584 solver.cpp:404]     Test net output #1: loss = 0.456764 (* 1 = 0.456764 loss)\nI0819 14:21:36.141219 21584 solver.cpp:228] Iteration 37600, loss = 0.117125\nI0819 14:21:36.141263 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:21:36.141278 21584 solver.cpp:244]     Train net output #1: loss = 0.117125 (* 1 = 0.117125 loss)\nI0819 14:21:36.252151 21584 sgd_solver.cpp:166] Iteration 37600, lr = 0.94\nI0819 14:23:53.801090 21584 solver.cpp:337] Iteration 37700, Testing net (#0)\nI0819 14:25:16.278833 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0819 14:25:16.279122 21584 solver.cpp:404]     Test net output #1: loss = 0.438702 (* 1 = 0.438702 loss)\nI0819 14:25:17.577821 21584 solver.cpp:228] Iteration 37700, loss = 0.148298\nI0819 14:25:17.577862 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 14:25:17.577877 21584 solver.cpp:244]     Train net output #1: loss = 0.148298 (* 1 = 0.148298 loss)\nI0819 14:25:17.689199 21584 sgd_solver.cpp:166] Iteration 37700, lr = 0.9425\nI0819 14:27:35.243108 21584 solver.cpp:337] Iteration 37800, Testing net (#0)\nI0819 14:28:57.714509 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87376\nI0819 14:28:57.714828 21584 solver.cpp:404]     Test net output #1: loss = 0.475011 (* 1 = 0.475011 loss)\nI0819 14:28:59.012768 21584 solver.cpp:228] Iteration 37800, loss = 0.144288\nI0819 14:28:59.012809 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:28:59.012825 21584 solver.cpp:244]     Train net output #1: loss = 0.144288 (* 1 = 0.144288 loss)\nI0819 14:28:59.122903 21584 sgd_solver.cpp:166] Iteration 37800, lr = 0.945\nI0819 14:31:16.675459 21584 solver.cpp:337] Iteration 37900, Testing net (#0)\nI0819 14:32:39.151159 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88532\nI0819 14:32:39.151469 21584 solver.cpp:404]     Test net output #1: loss = 0.414916 (* 1 = 0.414916 loss)\nI0819 14:32:40.450461 21584 solver.cpp:228] Iteration 37900, loss = 0.0991645\nI0819 14:32:40.450505 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:32:40.450520 21584 solver.cpp:244]     Train net output #1: loss = 0.0991644 (* 1 = 0.0991644 loss)\nI0819 14:32:40.561285 21584 sgd_solver.cpp:166] Iteration 37900, lr = 0.9475\nI0819 14:34:58.228793 21584 solver.cpp:337] Iteration 38000, Testing net (#0)\nI0819 14:36:20.701189 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0819 14:36:20.701500 21584 solver.cpp:404]     Test net output #1: loss = 0.431274 (* 1 = 0.431274 loss)\nI0819 14:36:21.999963 21584 solver.cpp:228] Iteration 38000, loss = 0.0859494\nI0819 14:36:22.000007 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 14:36:22.000025 21584 solver.cpp:244]     Train net output #1: loss = 0.0859494 (* 1 = 0.0859494 loss)\nI0819 14:36:22.110581 21584 sgd_solver.cpp:166] Iteration 38000, lr = 0.95\nI0819 14:38:39.708467 21584 solver.cpp:337] Iteration 38100, Testing net (#0)\nI0819 14:40:02.199698 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0819 14:40:02.200003 21584 solver.cpp:404]     Test net output #1: loss = 0.436364 (* 1 = 0.436364 loss)\nI0819 14:40:03.499663 21584 solver.cpp:228] Iteration 38100, loss = 0.107504\nI0819 14:40:03.499709 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 14:40:03.499734 21584 solver.cpp:244]     Train net output #1: loss = 0.107504 (* 1 = 0.107504 loss)\nI0819 14:40:03.606523 21584 sgd_solver.cpp:166] Iteration 38100, lr = 0.9525\nI0819 14:42:21.252776 21584 solver.cpp:337] Iteration 38200, Testing net (#0)\nI0819 14:43:43.764315 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0819 14:43:43.764617 21584 solver.cpp:404]     Test net output #1: loss = 0.444168 (* 1 = 0.444168 loss)\nI0819 14:43:45.063619 21584 solver.cpp:228] Iteration 38200, loss = 0.0705126\nI0819 14:43:45.063670 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:43:45.063696 21584 solver.cpp:244]     Train net output #1: loss = 0.0705125 (* 1 = 0.0705125 loss)\nI0819 14:43:45.170600 21584 sgd_solver.cpp:166] Iteration 38200, lr = 0.955\nI0819 14:46:02.760125 21584 solver.cpp:337] Iteration 38300, Testing net (#0)\nI0819 14:47:25.246628 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88548\nI0819 14:47:25.246968 21584 solver.cpp:404]     Test net output #1: loss = 0.444411 (* 1 = 0.444411 loss)\nI0819 14:47:26.546169 21584 solver.cpp:228] Iteration 38300, loss = 0.0743812\nI0819 14:47:26.546213 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:47:26.546237 21584 solver.cpp:244]     Train net output #1: loss = 0.0743812 (* 1 = 0.0743812 loss)\nI0819 14:47:26.651867 21584 sgd_solver.cpp:166] Iteration 38300, lr = 0.9575\nI0819 14:49:44.350807 21584 solver.cpp:337] Iteration 38400, Testing net (#0)\nI0819 14:51:06.842622 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87656\nI0819 14:51:06.842960 21584 solver.cpp:404]     Test net output #1: loss = 0.455765 (* 1 = 0.455765 loss)\nI0819 14:51:08.142402 21584 solver.cpp:228] Iteration 38400, loss = 0.10294\nI0819 14:51:08.142449 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:51:08.142474 21584 solver.cpp:244]     Train net output #1: loss = 0.10294 (* 1 = 0.10294 loss)\nI0819 14:51:08.245841 21584 sgd_solver.cpp:166] Iteration 38400, lr = 0.96\nI0819 14:53:25.909054 21584 solver.cpp:337] Iteration 38500, Testing net (#0)\nI0819 14:54:48.398766 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0819 14:54:48.399061 21584 solver.cpp:404]     Test net output #1: loss = 0.413545 (* 1 = 0.413545 loss)\nI0819 14:54:49.697866 21584 solver.cpp:228] Iteration 38500, loss = 0.0993816\nI0819 14:54:49.697914 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 14:54:49.697938 21584 solver.cpp:244]     Train net output #1: loss = 0.0993815 (* 1 = 0.0993815 loss)\nI0819 14:54:49.804352 21584 sgd_solver.cpp:166] Iteration 38500, lr = 0.9625\nI0819 14:57:07.404430 21584 solver.cpp:337] Iteration 38600, Testing net (#0)\nI0819 14:58:29.890250 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88044\nI0819 14:58:29.890784 21584 solver.cpp:404]     Test net output #1: loss = 0.441603 (* 1 = 0.441603 loss)\nI0819 14:58:31.190775 21584 solver.cpp:228] Iteration 38600, loss = 0.0851321\nI0819 14:58:31.190821 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 14:58:31.190846 21584 solver.cpp:244]     Train net output #1: loss = 0.085132 (* 1 = 0.085132 loss)\nI0819 14:58:31.303997 21584 sgd_solver.cpp:166] Iteration 38600, lr = 0.965\nI0819 15:00:48.953183 21584 solver.cpp:337] Iteration 38700, Testing net (#0)\nI0819 15:02:11.448918 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88904\nI0819 15:02:11.449244 21584 solver.cpp:404]     Test net output #1: loss = 0.416628 (* 1 = 0.416628 loss)\nI0819 15:02:12.748762 21584 solver.cpp:228] Iteration 38700, loss = 0.095244\nI0819 15:02:12.748809 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:02:12.748832 21584 solver.cpp:244]     Train net output #1: loss = 0.0952439 (* 1 = 0.0952439 loss)\nI0819 15:02:12.858348 21584 sgd_solver.cpp:166] Iteration 38700, lr = 0.9675\nI0819 15:04:30.416754 21584 solver.cpp:337] Iteration 38800, Testing net (#0)\nI0819 15:05:52.892459 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88924\nI0819 15:05:52.892797 21584 solver.cpp:404]     Test net output #1: loss = 0.422695 (* 1 = 0.422695 loss)\nI0819 15:05:54.191751 21584 solver.cpp:228] Iteration 38800, loss = 0.0819013\nI0819 15:05:54.191793 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:05:54.191808 21584 solver.cpp:244]     Train net output #1: loss = 0.0819012 (* 1 = 0.0819012 loss)\nI0819 15:05:54.307798 21584 sgd_solver.cpp:166] Iteration 38800, lr = 0.97\nI0819 15:08:11.913744 21584 solver.cpp:337] Iteration 38900, Testing net (#0)\nI0819 15:09:34.274123 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0819 15:09:34.274417 21584 solver.cpp:404]     Test net output #1: loss = 0.415279 (* 1 = 0.415279 loss)\nI0819 15:09:35.572973 21584 solver.cpp:228] Iteration 38900, loss = 0.148069\nI0819 15:09:35.573014 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 15:09:35.573029 21584 solver.cpp:244]     Train net output #1: loss = 0.148069 (* 1 = 0.148069 loss)\nI0819 15:09:35.685600 21584 sgd_solver.cpp:166] Iteration 38900, lr = 0.9725\nI0819 15:11:53.239383 21584 solver.cpp:337] Iteration 39000, Testing net (#0)\nI0819 15:13:15.553906 21584 solver.cpp:404]     Test net output #0: accuracy = 0.883281\nI0819 15:13:15.554224 21584 solver.cpp:404]     Test net output #1: loss = 0.425445 (* 1 = 0.425445 loss)\nI0819 15:13:16.852635 21584 solver.cpp:228] Iteration 39000, loss = 0.0535886\nI0819 15:13:16.852676 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 15:13:16.852691 21584 solver.cpp:244]     Train net output #1: loss = 0.0535884 (* 1 = 0.0535884 loss)\nI0819 15:13:16.959483 21584 sgd_solver.cpp:166] Iteration 39000, lr = 0.975\nI0819 15:15:34.513352 21584 solver.cpp:337] Iteration 39100, Testing net (#0)\nI0819 15:16:56.873078 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88044\nI0819 15:16:56.873389 21584 solver.cpp:404]     Test net output #1: loss = 0.411317 (* 1 = 0.411317 loss)\nI0819 15:16:58.173010 21584 solver.cpp:228] Iteration 39100, loss = 0.0686645\nI0819 15:16:58.173054 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 15:16:58.173071 21584 solver.cpp:244]     Train net output #1: loss = 0.0686644 (* 1 = 0.0686644 loss)\nI0819 15:16:58.280923 21584 sgd_solver.cpp:166] Iteration 39100, lr = 0.9775\nI0819 15:19:15.869089 21584 solver.cpp:337] Iteration 39200, Testing net (#0)\nI0819 15:20:38.283932 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87396\nI0819 15:20:38.284243 21584 solver.cpp:404]     Test net output #1: loss = 0.480308 (* 1 = 0.480308 loss)\nI0819 15:20:39.584110 21584 solver.cpp:228] Iteration 39200, loss = 0.0517399\nI0819 15:20:39.584158 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 15:20:39.584182 21584 solver.cpp:244]     Train net output #1: loss = 0.0517398 (* 1 = 0.0517398 loss)\nI0819 15:20:39.687273 21584 sgd_solver.cpp:166] Iteration 39200, lr = 0.98\nI0819 15:22:57.336067 21584 solver.cpp:337] Iteration 39300, Testing net (#0)\nI0819 15:24:19.764379 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0819 15:24:19.764655 21584 solver.cpp:404]     Test net output #1: loss = 0.424204 (* 1 = 0.424204 loss)\nI0819 15:24:21.064703 21584 solver.cpp:228] Iteration 39300, loss = 0.146739\nI0819 15:24:21.064750 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:24:21.064774 21584 solver.cpp:244]     Train net output #1: loss = 0.146739 (* 1 = 0.146739 loss)\nI0819 15:24:21.177842 21584 sgd_solver.cpp:166] Iteration 39300, lr = 0.9825\nI0819 15:26:38.862388 21584 solver.cpp:337] Iteration 39400, Testing net (#0)\nI0819 15:28:01.293104 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88508\nI0819 15:28:01.293417 21584 solver.cpp:404]     Test net output #1: loss = 0.409193 (* 1 = 0.409193 loss)\nI0819 15:28:02.592819 21584 solver.cpp:228] Iteration 39400, loss = 0.115977\nI0819 15:28:02.592869 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:28:02.592893 21584 solver.cpp:244]     Train net output #1: loss = 0.115977 (* 1 = 0.115977 loss)\nI0819 15:28:02.696774 21584 sgd_solver.cpp:166] Iteration 39400, lr = 0.985\nI0819 15:30:20.627658 21584 solver.cpp:337] Iteration 39500, Testing net (#0)\nI0819 15:31:43.075768 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0819 15:31:43.076095 21584 solver.cpp:404]     Test net output #1: loss = 0.444167 (* 1 = 0.444167 loss)\nI0819 15:31:44.373915 21584 solver.cpp:228] Iteration 39500, loss = 0.0963468\nI0819 15:31:44.373963 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:31:44.373987 21584 solver.cpp:244]     Train net output #1: loss = 0.0963467 (* 1 = 0.0963467 loss)\nI0819 15:31:44.491402 21584 sgd_solver.cpp:166] Iteration 39500, lr = 0.9875\nI0819 15:34:02.363365 21584 solver.cpp:337] Iteration 39600, Testing net (#0)\nI0819 15:35:25.726225 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87948\nI0819 15:35:25.726496 21584 solver.cpp:404]     Test net output #1: loss = 0.445899 (* 1 = 0.445899 loss)\nI0819 15:35:27.030686 21584 solver.cpp:228] Iteration 39600, loss = 0.19377\nI0819 15:35:27.030727 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 15:35:27.030742 21584 solver.cpp:244]     Train net output #1: loss = 0.19377 (* 1 = 0.19377 loss)\nI0819 15:35:27.133723 21584 sgd_solver.cpp:166] Iteration 39600, lr = 0.99\nI0819 15:37:44.833066 21584 solver.cpp:337] Iteration 39700, Testing net (#0)\nI0819 15:39:07.913274 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88548\nI0819 15:39:07.913513 21584 solver.cpp:404]     Test net output #1: loss = 0.427838 (* 1 = 0.427838 loss)\nI0819 15:39:09.215337 21584 solver.cpp:228] Iteration 39700, loss = 0.0642426\nI0819 15:39:09.215376 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:39:09.215391 21584 solver.cpp:244]     Train net output #1: loss = 0.0642425 (* 1 = 0.0642425 loss)\nI0819 15:39:09.320519 21584 sgd_solver.cpp:166] Iteration 39700, lr = 0.9925\nI0819 15:41:27.113636 21584 solver.cpp:337] Iteration 39800, Testing net (#0)\nI0819 15:42:50.099606 21584 solver.cpp:404]     Test net output #0: accuracy = 0.888\nI0819 15:42:50.099923 21584 solver.cpp:404]     Test net output #1: loss = 0.411795 (* 1 = 0.411795 loss)\nI0819 15:42:51.403162 21584 solver.cpp:228] Iteration 39800, loss = 0.119044\nI0819 15:42:51.403205 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:42:51.403221 21584 solver.cpp:244]     Train net output #1: loss = 0.119044 (* 1 = 0.119044 loss)\nI0819 15:42:51.511132 21584 sgd_solver.cpp:166] Iteration 39800, lr = 0.995\nI0819 15:45:09.337600 21584 solver.cpp:337] Iteration 39900, Testing net (#0)\nI0819 15:46:32.748144 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88816\nI0819 15:46:32.748428 21584 solver.cpp:404]     Test net output #1: loss = 0.407689 (* 1 = 0.407689 loss)\nI0819 15:46:34.053162 21584 solver.cpp:228] Iteration 39900, loss = 0.094845\nI0819 15:46:34.053210 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:46:34.053232 21584 solver.cpp:244]     Train net output #1: loss = 0.0948448 (* 1 = 0.0948448 loss)\nI0819 15:46:34.160943 21584 sgd_solver.cpp:166] Iteration 39900, lr = 0.9975\nI0819 15:48:52.145722 21584 solver.cpp:337] Iteration 40000, Testing net (#0)\nI0819 15:50:14.624244 21584 solver.cpp:404]     Test net output #0: accuracy = 0.884001\nI0819 15:50:14.624519 21584 solver.cpp:404]     Test net output #1: loss = 0.430951 (* 1 = 0.430951 loss)\nI0819 15:50:15.924546 21584 solver.cpp:228] Iteration 40000, loss = 0.0834296\nI0819 15:50:15.924592 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 15:50:15.924607 21584 solver.cpp:244]     Train net output #1: loss = 0.0834294 (* 1 = 0.0834294 loss)\nI0819 15:50:16.034895 21584 sgd_solver.cpp:166] Iteration 40000, lr = 1\nI0819 15:52:33.964435 21584 solver.cpp:337] Iteration 40100, Testing net (#0)\nI0819 15:53:56.445169 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0819 15:53:56.445474 21584 solver.cpp:404]     Test net output #1: loss = 0.43478 (* 1 = 0.43478 loss)\nI0819 15:53:57.744360 21584 solver.cpp:228] Iteration 40100, loss = 0.11753\nI0819 15:53:57.744405 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 15:53:57.744421 21584 solver.cpp:244]     Train net output #1: loss = 0.11753 (* 1 = 0.11753 loss)\nI0819 15:53:57.853689 21584 sgd_solver.cpp:166] Iteration 40100, lr = 1.0025\nI0819 15:56:15.843917 21584 solver.cpp:337] Iteration 40200, Testing net (#0)\nI0819 15:57:38.270208 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87904\nI0819 15:57:38.270505 21584 solver.cpp:404]     Test net output #1: loss = 0.449263 (* 1 = 0.449263 loss)\nI0819 15:57:39.569659 21584 solver.cpp:228] Iteration 40200, loss = 0.164939\nI0819 15:57:39.569702 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 15:57:39.569718 21584 solver.cpp:244]     Train net output #1: loss = 0.164939 (* 1 = 0.164939 loss)\nI0819 15:57:39.684700 21584 sgd_solver.cpp:166] Iteration 40200, lr = 1.005\nI0819 15:59:57.671780 21584 solver.cpp:337] Iteration 40300, Testing net (#0)\nI0819 16:01:20.100287 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87476\nI0819 16:01:20.100602 21584 solver.cpp:404]     Test net output #1: loss = 0.478922 (* 1 = 0.478922 loss)\nI0819 16:01:21.399411 21584 solver.cpp:228] Iteration 40300, loss = 0.28368\nI0819 16:01:21.399456 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 16:01:21.399471 21584 solver.cpp:244]     Train net output #1: loss = 0.28368 (* 1 = 0.28368 loss)\nI0819 16:01:21.511917 21584 sgd_solver.cpp:166] Iteration 40300, lr = 1.0075\nI0819 16:03:39.450294 21584 solver.cpp:337] Iteration 40400, Testing net (#0)\nI0819 16:05:01.878235 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0819 16:05:01.878548 21584 solver.cpp:404]     Test net output #1: loss = 0.414507 (* 1 = 0.414507 loss)\nI0819 16:05:03.176952 21584 solver.cpp:228] Iteration 40400, loss = 0.0577403\nI0819 16:05:03.176996 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 16:05:03.177012 21584 solver.cpp:244]     Train net output #1: loss = 0.0577402 (* 1 = 0.0577402 loss)\nI0819 16:05:03.290527 21584 sgd_solver.cpp:166] Iteration 40400, lr = 1.01\nI0819 16:07:21.629756 21584 solver.cpp:337] Iteration 40500, Testing net (#0)\nI0819 16:08:44.051378 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88776\nI0819 16:08:44.051674 21584 solver.cpp:404]     Test net output #1: loss = 0.406734 (* 1 = 0.406734 loss)\nI0819 16:08:45.349989 21584 solver.cpp:228] Iteration 40500, loss = 0.0709822\nI0819 16:08:45.350034 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 16:08:45.350050 21584 solver.cpp:244]     Train net output #1: loss = 0.0709821 (* 1 = 0.0709821 loss)\nI0819 16:08:45.462599 21584 sgd_solver.cpp:166] Iteration 40500, lr = 1.0125\nI0819 16:11:03.834700 21584 solver.cpp:337] Iteration 40600, Testing net (#0)\nI0819 16:12:26.253515 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88228\nI0819 16:12:26.253877 21584 solver.cpp:404]     Test net output #1: loss = 0.438976 (* 1 = 0.438976 loss)\nI0819 16:12:27.552038 21584 solver.cpp:228] Iteration 40600, loss = 0.118761\nI0819 16:12:27.552079 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:12:27.552096 21584 solver.cpp:244]     Train net output #1: loss = 0.118761 (* 1 = 0.118761 loss)\nI0819 16:12:27.670835 21584 sgd_solver.cpp:166] Iteration 40600, lr = 1.015\nI0819 16:14:46.118696 21584 solver.cpp:337] Iteration 40700, Testing net (#0)\nI0819 16:16:08.536393 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87952\nI0819 16:16:08.536705 21584 solver.cpp:404]     Test net output #1: loss = 0.447667 (* 1 = 0.447667 loss)\nI0819 16:16:09.835703 21584 solver.cpp:228] Iteration 40700, loss = 0.108546\nI0819 16:16:09.835752 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 16:16:09.835768 21584 solver.cpp:244]     Train net output #1: loss = 0.108546 (* 1 = 0.108546 loss)\nI0819 16:16:09.951395 21584 sgd_solver.cpp:166] Iteration 40700, lr = 1.0175\nI0819 16:18:28.428509 21584 solver.cpp:337] Iteration 40800, Testing net (#0)\nI0819 16:19:50.848037 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88552\nI0819 16:19:50.848331 21584 solver.cpp:404]     Test net output #1: loss = 0.416698 (* 1 = 0.416698 loss)\nI0819 16:19:52.147033 21584 solver.cpp:228] Iteration 40800, loss = 0.0468205\nI0819 16:19:52.147076 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 16:19:52.147092 21584 solver.cpp:244]     Train net output #1: loss = 0.0468204 (* 1 = 0.0468204 loss)\nI0819 16:19:52.261011 21584 sgd_solver.cpp:166] Iteration 40800, lr = 1.02\nI0819 16:22:10.699826 21584 solver.cpp:337] Iteration 40900, Testing net (#0)\nI0819 16:23:33.119372 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0819 16:23:33.119680 21584 solver.cpp:404]     Test net output #1: loss = 0.426601 (* 1 = 0.426601 loss)\nI0819 16:23:34.418351 21584 solver.cpp:228] Iteration 40900, loss = 0.122733\nI0819 16:23:34.418392 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 16:23:34.418408 21584 solver.cpp:244]     Train net output #1: loss = 0.122733 (* 1 = 0.122733 loss)\nI0819 16:23:34.530006 21584 sgd_solver.cpp:166] Iteration 40900, lr = 1.0225\nI0819 16:25:52.955914 21584 solver.cpp:337] Iteration 41000, Testing net (#0)\nI0819 16:27:15.422999 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87952\nI0819 16:27:15.423285 21584 solver.cpp:404]     Test net output #1: loss = 0.431301 (* 1 = 0.431301 loss)\nI0819 16:27:16.721688 21584 solver.cpp:228] Iteration 41000, loss = 0.0587591\nI0819 16:27:16.721730 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 16:27:16.721751 21584 solver.cpp:244]     Train net output #1: loss = 0.0587589 (* 1 = 0.0587589 loss)\nI0819 16:27:16.842198 21584 sgd_solver.cpp:166] Iteration 41000, lr = 1.025\nI0819 16:29:35.295244 21584 solver.cpp:337] Iteration 41100, Testing net (#0)\nI0819 16:30:57.755872 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8856\nI0819 16:30:57.756170 21584 solver.cpp:404]     Test net output #1: loss = 0.410555 (* 1 = 0.410555 loss)\nI0819 16:30:59.055212 21584 solver.cpp:228] Iteration 41100, loss = 0.0693507\nI0819 16:30:59.055254 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 16:30:59.055270 21584 solver.cpp:244]     Train net output #1: loss = 0.0693505 (* 1 = 0.0693505 loss)\nI0819 16:30:59.175019 21584 sgd_solver.cpp:166] Iteration 41100, lr = 1.0275\nI0819 16:33:17.580070 21584 solver.cpp:337] Iteration 41200, Testing net (#0)\nI0819 16:34:40.035362 21584 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 16:34:40.035620 21584 solver.cpp:404]     Test net output #1: loss = 0.450128 (* 1 = 0.450128 loss)\nI0819 16:34:41.334115 21584 solver.cpp:228] Iteration 41200, loss = 0.161706\nI0819 16:34:41.334156 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 16:34:41.334172 21584 solver.cpp:244]     Train net output #1: loss = 0.161706 (* 1 = 0.161706 loss)\nI0819 16:34:41.447865 21584 sgd_solver.cpp:166] Iteration 41200, lr = 1.03\nI0819 16:36:59.877080 21584 solver.cpp:337] Iteration 41300, Testing net (#0)\nI0819 16:38:22.332911 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88584\nI0819 16:38:22.333183 21584 solver.cpp:404]     Test net output #1: loss = 0.414456 (* 1 = 0.414456 loss)\nI0819 16:38:23.631667 21584 solver.cpp:228] Iteration 41300, loss = 0.0880044\nI0819 16:38:23.631709 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:38:23.631726 21584 solver.cpp:244]     Train net output #1: loss = 0.0880042 (* 1 = 0.0880042 loss)\nI0819 16:38:23.760685 21584 sgd_solver.cpp:166] Iteration 41300, lr = 1.0325\nI0819 16:40:42.089401 21584 solver.cpp:337] Iteration 41400, Testing net (#0)\nI0819 16:42:04.548804 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88364\nI0819 16:42:04.549095 21584 solver.cpp:404]     Test net output #1: loss = 0.420155 (* 1 = 0.420155 loss)\nI0819 16:42:05.847002 21584 solver.cpp:228] Iteration 41400, loss = 0.0723058\nI0819 16:42:05.847043 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 16:42:05.847059 21584 solver.cpp:244]     Train net output #1: loss = 0.0723055 (* 1 = 0.0723055 loss)\nI0819 16:42:05.961376 21584 sgd_solver.cpp:166] Iteration 41400, lr = 1.035\nI0819 16:44:24.279513 21584 solver.cpp:337] Iteration 41500, Testing net (#0)\nI0819 16:45:46.736589 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88744\nI0819 16:45:46.736836 21584 solver.cpp:404]     Test net output #1: loss = 0.410111 (* 1 = 0.410111 loss)\nI0819 16:45:48.034867 21584 solver.cpp:228] Iteration 41500, loss = 0.100063\nI0819 16:45:48.034909 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:45:48.034926 21584 solver.cpp:244]     Train net output #1: loss = 0.100063 (* 1 = 0.100063 loss)\nI0819 16:45:48.154528 21584 sgd_solver.cpp:166] Iteration 41500, lr = 1.0375\nI0819 16:48:06.603691 21584 solver.cpp:337] Iteration 41600, Testing net (#0)\nI0819 16:49:29.084601 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88204\nI0819 16:49:29.084869 21584 solver.cpp:404]     Test net output #1: loss = 0.434272 (* 1 = 0.434272 loss)\nI0819 16:49:30.383440 21584 solver.cpp:228] Iteration 41600, loss = 0.107062\nI0819 16:49:30.383486 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 16:49:30.383502 21584 solver.cpp:244]     Train net output #1: loss = 0.107062 (* 1 = 0.107062 loss)\nI0819 16:49:30.498324 21584 sgd_solver.cpp:166] Iteration 41600, lr = 1.04\nI0819 16:51:48.862885 21584 solver.cpp:337] Iteration 41700, Testing net (#0)\nI0819 16:53:12.154870 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88204\nI0819 16:53:12.155239 21584 solver.cpp:404]     Test net output #1: loss = 0.443985 (* 1 = 0.443985 loss)\nI0819 16:53:13.458963 21584 solver.cpp:228] Iteration 41700, loss = 0.0720979\nI0819 16:53:13.459007 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:53:13.459022 21584 solver.cpp:244]     Train net output #1: loss = 0.0720976 (* 1 = 0.0720976 loss)\nI0819 16:53:13.574235 21584 sgd_solver.cpp:166] Iteration 41700, lr = 1.0425\nI0819 16:55:32.124263 21584 solver.cpp:337] Iteration 41800, Testing net (#0)\nI0819 16:56:55.561919 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88592\nI0819 16:56:55.562256 21584 solver.cpp:404]     Test net output #1: loss = 0.410655 (* 1 = 0.410655 loss)\nI0819 16:56:56.864493 21584 solver.cpp:228] Iteration 41800, loss = 0.0907887\nI0819 16:56:56.864547 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 16:56:56.864563 21584 solver.cpp:244]     Train net output #1: loss = 0.0907884 (* 1 = 0.0907884 loss)\nI0819 16:56:56.983702 21584 sgd_solver.cpp:166] Iteration 41800, lr = 1.045\nI0819 16:59:15.539137 21584 solver.cpp:337] Iteration 41900, Testing net (#0)\nI0819 17:00:37.999261 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88484\nI0819 17:00:37.999564 21584 solver.cpp:404]     Test net output #1: loss = 0.426513 (* 1 = 0.426513 loss)\nI0819 17:00:39.299403 21584 solver.cpp:228] Iteration 41900, loss = 0.126038\nI0819 17:00:39.299448 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:00:39.299463 21584 solver.cpp:244]     Train net output #1: loss = 0.126037 (* 1 = 0.126037 loss)\nI0819 17:00:39.419798 21584 sgd_solver.cpp:166] Iteration 41900, lr = 1.0475\nI0819 17:02:58.029824 21584 solver.cpp:337] Iteration 42000, Testing net (#0)\nI0819 17:04:20.512861 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88524\nI0819 17:04:20.513164 21584 solver.cpp:404]     Test net output #1: loss = 0.428902 (* 1 = 0.428902 loss)\nI0819 17:04:21.812860 21584 solver.cpp:228] Iteration 42000, loss = 0.0903655\nI0819 17:04:21.812904 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 17:04:21.812921 21584 solver.cpp:244]     Train net output #1: loss = 0.0903652 (* 1 = 0.0903652 loss)\nI0819 17:04:21.929826 21584 sgd_solver.cpp:166] Iteration 42000, lr = 1.05\nI0819 17:06:40.524340 21584 solver.cpp:337] Iteration 42100, Testing net (#0)\nI0819 17:08:03.010978 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88592\nI0819 17:08:03.011241 21584 solver.cpp:404]     Test net output #1: loss = 0.419383 (* 1 = 0.419383 loss)\nI0819 17:08:04.310875 21584 solver.cpp:228] Iteration 42100, loss = 0.209747\nI0819 17:08:04.310920 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 17:08:04.310936 21584 solver.cpp:244]     Train net output #1: loss = 0.209746 (* 1 = 0.209746 loss)\nI0819 17:08:04.424935 21584 sgd_solver.cpp:166] Iteration 42100, lr = 1.0525\nI0819 17:10:23.077033 21584 solver.cpp:337] Iteration 42200, Testing net (#0)\nI0819 17:11:45.560956 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88184\nI0819 17:11:45.561218 21584 solver.cpp:404]     Test net output #1: loss = 0.427538 (* 1 = 0.427538 loss)\nI0819 17:11:46.859717 21584 solver.cpp:228] Iteration 42200, loss = 0.128251\nI0819 17:11:46.859758 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 17:11:46.859774 21584 solver.cpp:244]     Train net output #1: loss = 0.128251 (* 1 = 0.128251 loss)\nI0819 17:11:46.976835 21584 sgd_solver.cpp:166] Iteration 42200, lr = 1.055\nI0819 17:14:05.576788 21584 solver.cpp:337] Iteration 42300, Testing net (#0)\nI0819 17:15:28.055961 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88204\nI0819 17:15:28.056252 21584 solver.cpp:404]     Test net output #1: loss = 0.41274 (* 1 = 0.41274 loss)\nI0819 17:15:29.354809 21584 solver.cpp:228] Iteration 42300, loss = 0.0583062\nI0819 17:15:29.354851 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 17:15:29.354866 21584 solver.cpp:244]     Train net output #1: loss = 0.0583059 (* 1 = 0.0583059 loss)\nI0819 17:15:29.478096 21584 sgd_solver.cpp:166] Iteration 42300, lr = 1.0575\nI0819 17:17:48.060250 21584 solver.cpp:337] Iteration 42400, Testing net (#0)\nI0819 17:19:10.539857 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87868\nI0819 17:19:10.540160 21584 solver.cpp:404]     Test net output #1: loss = 0.458946 (* 1 = 0.458946 loss)\nI0819 17:19:11.838735 21584 solver.cpp:228] Iteration 42400, loss = 0.0803763\nI0819 17:19:11.838779 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 17:19:11.838795 21584 solver.cpp:244]     Train net output #1: loss = 0.080376 (* 1 = 0.080376 loss)\nI0819 17:19:11.960567 21584 sgd_solver.cpp:166] Iteration 42400, lr = 1.06\nI0819 17:21:30.634708 21584 solver.cpp:337] Iteration 42500, Testing net (#0)\nI0819 17:22:53.080957 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88352\nI0819 17:22:53.081261 21584 solver.cpp:404]     Test net output #1: loss = 0.423322 (* 1 = 0.423322 loss)\nI0819 17:22:54.379961 21584 solver.cpp:228] Iteration 42500, loss = 0.116231\nI0819 17:22:54.380002 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 17:22:54.380018 21584 solver.cpp:244]     Train net output #1: loss = 0.116231 (* 1 = 0.116231 loss)\nI0819 17:22:54.501607 21584 sgd_solver.cpp:166] Iteration 42500, lr = 1.0625\nI0819 17:25:13.086333 21584 solver.cpp:337] Iteration 42600, Testing net (#0)\nI0819 17:26:35.464601 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88144\nI0819 17:26:35.464908 21584 solver.cpp:404]     Test net output #1: loss = 0.416247 (* 1 = 0.416247 loss)\nI0819 17:26:36.765123 21584 solver.cpp:228] Iteration 42600, loss = 0.104416\nI0819 17:26:36.765162 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 17:26:36.765177 21584 solver.cpp:244]     Train net output #1: loss = 0.104416 (* 1 = 0.104416 loss)\nI0819 17:26:36.883436 21584 sgd_solver.cpp:166] Iteration 42600, lr = 1.065\nI0819 17:28:55.529881 21584 solver.cpp:337] Iteration 42700, Testing net (#0)\nI0819 17:30:17.916121 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87092\nI0819 17:30:17.916415 21584 solver.cpp:404]     Test net output #1: loss = 0.472007 (* 1 = 0.472007 loss)\nI0819 17:30:19.216096 21584 solver.cpp:228] Iteration 42700, loss = 0.117438\nI0819 17:30:19.216137 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 17:30:19.216152 21584 solver.cpp:244]     Train net output #1: loss = 0.117437 (* 1 = 0.117437 loss)\nI0819 17:30:19.331224 21584 sgd_solver.cpp:166] Iteration 42700, lr = 1.0675\nI0819 17:32:37.910615 21584 solver.cpp:337] Iteration 42800, Testing net (#0)\nI0819 17:34:00.273478 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87888\nI0819 17:34:00.273794 21584 solver.cpp:404]     Test net output #1: loss = 0.423353 (* 1 = 0.423353 loss)\nI0819 17:34:01.572234 21584 solver.cpp:228] Iteration 42800, loss = 0.0532774\nI0819 17:34:01.572274 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 17:34:01.572289 21584 solver.cpp:244]     Train net output #1: loss = 0.0532772 (* 1 = 0.0532772 loss)\nI0819 17:34:01.695806 21584 sgd_solver.cpp:166] Iteration 42800, lr = 1.07\nI0819 17:36:20.264853 21584 solver.cpp:337] Iteration 42900, Testing net (#0)\nI0819 17:37:42.581475 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88852\nI0819 17:37:42.581784 21584 solver.cpp:404]     Test net output #1: loss = 0.405716 (* 1 = 0.405716 loss)\nI0819 17:37:43.880707 21584 solver.cpp:228] Iteration 42900, loss = 0.134206\nI0819 17:37:43.880748 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:37:43.880764 21584 solver.cpp:244]     Train net output #1: loss = 0.134205 (* 1 = 0.134205 loss)\nI0819 17:37:44.000844 21584 sgd_solver.cpp:166] Iteration 42900, lr = 1.0725\nI0819 17:40:02.314776 21584 solver.cpp:337] Iteration 43000, Testing net (#0)\nI0819 17:41:24.639420 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88556\nI0819 17:41:24.639742 21584 solver.cpp:404]     Test net output #1: loss = 0.416058 (* 1 = 0.416058 loss)\nI0819 17:41:25.938181 21584 solver.cpp:228] Iteration 43000, loss = 0.124682\nI0819 17:41:25.938222 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:41:25.938237 21584 solver.cpp:244]     Train net output #1: loss = 0.124681 (* 1 = 0.124681 loss)\nI0819 17:41:26.049114 21584 sgd_solver.cpp:166] Iteration 43000, lr = 1.075\nI0819 17:43:44.003828 21584 solver.cpp:337] Iteration 43100, Testing net (#0)\nI0819 17:45:06.391057 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88084\nI0819 17:45:06.391374 21584 solver.cpp:404]     Test net output #1: loss = 0.439476 (* 1 = 0.439476 loss)\nI0819 17:45:07.690933 21584 solver.cpp:228] Iteration 43100, loss = 0.138398\nI0819 17:45:07.690978 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:45:07.690992 21584 solver.cpp:244]     Train net output #1: loss = 0.138398 (* 1 = 0.138398 loss)\nI0819 17:45:07.807675 21584 sgd_solver.cpp:166] Iteration 43100, lr = 1.0775\nI0819 17:47:25.710211 21584 solver.cpp:337] Iteration 43200, Testing net (#0)\nI0819 17:48:48.100790 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0819 17:48:48.101094 21584 solver.cpp:404]     Test net output #1: loss = 0.41551 (* 1 = 0.41551 loss)\nI0819 17:48:49.400358 21584 solver.cpp:228] Iteration 43200, loss = 0.0762509\nI0819 17:48:49.400399 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 17:48:49.400414 21584 solver.cpp:244]     Train net output #1: loss = 0.0762506 (* 1 = 0.0762506 loss)\nI0819 17:48:49.516841 21584 sgd_solver.cpp:166] Iteration 43200, lr = 1.08\nI0819 17:51:07.439452 21584 solver.cpp:337] Iteration 43300, Testing net (#0)\nI0819 17:52:29.762043 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8844\nI0819 17:52:29.762365 21584 solver.cpp:404]     Test net output #1: loss = 0.428747 (* 1 = 0.428747 loss)\nI0819 17:52:31.061357 21584 solver.cpp:228] Iteration 43300, loss = 0.0804753\nI0819 17:52:31.061399 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 17:52:31.061415 21584 solver.cpp:244]     Train net output #1: loss = 0.080475 (* 1 = 0.080475 loss)\nI0819 17:52:31.173234 21584 sgd_solver.cpp:166] Iteration 43300, lr = 1.0825\nI0819 17:54:49.146709 21584 solver.cpp:337] Iteration 43400, Testing net (#0)\nI0819 17:56:11.473912 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88116\nI0819 17:56:11.474231 21584 solver.cpp:404]     Test net output #1: loss = 0.453975 (* 1 = 0.453975 loss)\nI0819 17:56:12.773546 21584 solver.cpp:228] Iteration 43400, loss = 0.148413\nI0819 17:56:12.773587 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 17:56:12.773603 21584 solver.cpp:244]     Train net output #1: loss = 0.148413 (* 1 = 0.148413 loss)\nI0819 17:56:12.884814 21584 sgd_solver.cpp:166] Iteration 43400, lr = 1.085\nI0819 17:58:30.872565 21584 solver.cpp:337] Iteration 43500, Testing net (#0)\nI0819 17:59:53.303352 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88608\nI0819 17:59:53.303670 21584 solver.cpp:404]     Test net output #1: loss = 0.394826 (* 1 = 0.394826 loss)\nI0819 17:59:54.602936 21584 solver.cpp:228] Iteration 43500, loss = 0.0828499\nI0819 17:59:54.602983 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 17:59:54.602999 21584 solver.cpp:244]     Train net output #1: loss = 0.0828497 (* 1 = 0.0828497 loss)\nI0819 17:59:54.718320 21584 sgd_solver.cpp:166] Iteration 43500, lr = 1.0875\nI0819 18:02:12.627274 21584 solver.cpp:337] Iteration 43600, Testing net (#0)\nI0819 18:03:35.120955 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88484\nI0819 18:03:35.121275 21584 solver.cpp:404]     Test net output #1: loss = 0.426767 (* 1 = 0.426767 loss)\nI0819 18:03:36.419978 21584 solver.cpp:228] Iteration 43600, loss = 0.0951307\nI0819 18:03:36.420020 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:03:36.420037 21584 solver.cpp:244]     Train net output #1: loss = 0.0951305 (* 1 = 0.0951305 loss)\nI0819 18:03:36.531919 21584 sgd_solver.cpp:166] Iteration 43600, lr = 1.09\nI0819 18:05:54.454565 21584 solver.cpp:337] Iteration 43700, Testing net (#0)\nI0819 18:07:16.949947 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8886\nI0819 18:07:16.950270 21584 solver.cpp:404]     Test net output #1: loss = 0.420717 (* 1 = 0.420717 loss)\nI0819 18:07:18.248905 21584 solver.cpp:228] Iteration 43700, loss = 0.100235\nI0819 18:07:18.248952 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 18:07:18.248970 21584 solver.cpp:244]     Train net output #1: loss = 0.100234 (* 1 = 0.100234 loss)\nI0819 18:07:18.358197 21584 sgd_solver.cpp:166] Iteration 43700, lr = 1.0925\nI0819 18:09:36.227131 21584 solver.cpp:337] Iteration 43800, Testing net (#0)\nI0819 18:10:58.660753 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88724\nI0819 18:10:58.661075 21584 solver.cpp:404]     Test net output #1: loss = 0.435778 (* 1 = 0.435778 loss)\nI0819 18:10:59.959455 21584 solver.cpp:228] Iteration 43800, loss = 0.171391\nI0819 18:10:59.959496 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 18:10:59.959512 21584 solver.cpp:244]     Train net output #1: loss = 0.17139 (* 1 = 0.17139 loss)\nI0819 18:11:00.071890 21584 sgd_solver.cpp:166] Iteration 43800, lr = 1.095\nI0819 18:13:17.938158 21584 solver.cpp:337] Iteration 43900, Testing net (#0)\nI0819 18:14:40.375314 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88256\nI0819 18:14:40.375600 21584 solver.cpp:404]     Test net output #1: loss = 0.415367 (* 1 = 0.415367 loss)\nI0819 18:14:41.675139 21584 solver.cpp:228] Iteration 43900, loss = 0.143216\nI0819 18:14:41.675184 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:14:41.675209 21584 solver.cpp:244]     Train net output #1: loss = 0.143216 (* 1 = 0.143216 loss)\nI0819 18:14:41.787600 21584 sgd_solver.cpp:166] Iteration 43900, lr = 1.0975\nI0819 18:16:59.651412 21584 solver.cpp:337] Iteration 44000, Testing net (#0)\nI0819 18:18:22.082648 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0819 18:18:22.082916 21584 solver.cpp:404]     Test net output #1: loss = 0.435275 (* 1 = 0.435275 loss)\nI0819 18:18:23.381155 21584 solver.cpp:228] Iteration 44000, loss = 0.0714507\nI0819 18:18:23.381201 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 18:18:23.381225 21584 solver.cpp:244]     Train net output #1: loss = 0.0714503 (* 1 = 0.0714503 loss)\nI0819 18:18:23.491675 21584 sgd_solver.cpp:166] Iteration 44000, lr = 1.1\nI0819 18:20:41.466747 21584 solver.cpp:337] Iteration 44100, Testing net (#0)\nI0819 18:22:03.936005 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88532\nI0819 18:22:03.936305 21584 solver.cpp:404]     Test net output #1: loss = 0.39999 (* 1 = 0.39999 loss)\nI0819 18:22:05.235950 21584 solver.cpp:228] Iteration 44100, loss = 0.0610788\nI0819 18:22:05.235998 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:22:05.236021 21584 solver.cpp:244]     Train net output #1: loss = 0.0610785 (* 1 = 0.0610785 loss)\nI0819 18:22:05.344158 21584 sgd_solver.cpp:166] Iteration 44100, lr = 1.1025\nI0819 18:24:23.279163 21584 solver.cpp:337] Iteration 44200, Testing net (#0)\nI0819 18:25:45.738111 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87792\nI0819 18:25:45.738391 21584 solver.cpp:404]     Test net output #1: loss = 0.450013 (* 1 = 0.450013 loss)\nI0819 18:25:47.037947 21584 solver.cpp:228] Iteration 44200, loss = 0.0687723\nI0819 18:25:47.037994 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:25:47.038018 21584 solver.cpp:244]     Train net output #1: loss = 0.068772 (* 1 = 0.068772 loss)\nI0819 18:25:47.152320 21584 sgd_solver.cpp:166] Iteration 44200, lr = 1.105\nI0819 18:28:05.053791 21584 solver.cpp:337] Iteration 44300, Testing net (#0)\nI0819 18:29:28.472257 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0819 18:29:28.472623 21584 solver.cpp:404]     Test net output #1: loss = 0.431859 (* 1 = 0.431859 loss)\nI0819 18:29:29.776499 21584 solver.cpp:228] Iteration 44300, loss = 0.105015\nI0819 18:29:29.776557 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 18:29:29.776576 21584 solver.cpp:244]     Train net output #1: loss = 0.105014 (* 1 = 0.105014 loss)\nI0819 18:29:29.885864 21584 sgd_solver.cpp:166] Iteration 44300, lr = 1.1075\nI0819 18:31:48.076779 21584 solver.cpp:337] Iteration 44400, Testing net (#0)\nI0819 18:33:11.502815 21584 solver.cpp:404]     Test net output #0: accuracy = 0.883\nI0819 18:33:11.503175 21584 solver.cpp:404]     Test net output #1: loss = 0.433838 (* 1 = 0.433838 loss)\nI0819 18:33:12.807402 21584 solver.cpp:228] Iteration 44400, loss = 0.120023\nI0819 18:33:12.807448 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 18:33:12.807463 21584 solver.cpp:244]     Train net output #1: loss = 0.120022 (* 1 = 0.120022 loss)\nI0819 18:33:12.916893 21584 sgd_solver.cpp:166] Iteration 44400, lr = 1.11\nI0819 18:35:31.011885 21584 solver.cpp:337] Iteration 44500, Testing net (#0)\nI0819 18:36:54.448581 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88136\nI0819 18:36:54.448961 21584 solver.cpp:404]     Test net output #1: loss = 0.433694 (* 1 = 0.433694 loss)\nI0819 18:36:55.752955 21584 solver.cpp:228] Iteration 44500, loss = 0.128609\nI0819 18:36:55.753001 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 18:36:55.753031 21584 solver.cpp:244]     Train net output #1: loss = 0.128608 (* 1 = 0.128608 loss)\nI0819 18:36:55.861886 21584 sgd_solver.cpp:166] Iteration 44500, lr = 1.1125\nI0819 18:39:13.940279 21584 solver.cpp:337] Iteration 44600, Testing net (#0)\nI0819 18:40:37.469549 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88812\nI0819 18:40:37.469910 21584 solver.cpp:404]     Test net output #1: loss = 0.42289 (* 1 = 0.42289 loss)\nI0819 18:40:38.773597 21584 solver.cpp:228] Iteration 44600, loss = 0.0831169\nI0819 18:40:38.773660 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 18:40:38.773687 21584 solver.cpp:244]     Train net output #1: loss = 0.0831166 (* 1 = 0.0831166 loss)\nI0819 18:40:38.879619 21584 sgd_solver.cpp:166] Iteration 44600, lr = 1.115\nI0819 18:42:57.029806 21584 solver.cpp:337] Iteration 44700, Testing net (#0)\nI0819 18:44:20.500536 21584 solver.cpp:404]     Test net output #0: accuracy = 0.885601\nI0819 18:44:20.500903 21584 solver.cpp:404]     Test net output #1: loss = 0.400924 (* 1 = 0.400924 loss)\nI0819 18:44:21.804602 21584 solver.cpp:228] Iteration 44700, loss = 0.110336\nI0819 18:44:21.804651 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 18:44:21.804673 21584 solver.cpp:244]     Train net output #1: loss = 0.110335 (* 1 = 0.110335 loss)\nI0819 18:44:21.914641 21584 sgd_solver.cpp:166] Iteration 44700, lr = 1.1175\nI0819 18:46:39.969544 21584 solver.cpp:337] Iteration 44800, Testing net (#0)\nI0819 18:48:03.434474 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88276\nI0819 18:48:03.434818 21584 solver.cpp:404]     Test net output #1: loss = 0.432048 (* 1 = 0.432048 loss)\nI0819 18:48:04.738385 21584 solver.cpp:228] Iteration 44800, loss = 0.269398\nI0819 18:48:04.738433 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 18:48:04.738457 21584 solver.cpp:244]     Train net output #1: loss = 0.269397 (* 1 = 0.269397 loss)\nI0819 18:48:04.849791 21584 sgd_solver.cpp:166] Iteration 44800, lr = 1.12\nI0819 18:50:22.960762 21584 solver.cpp:337] Iteration 44900, Testing net (#0)\nI0819 18:51:46.493333 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88112\nI0819 18:51:46.493713 21584 solver.cpp:404]     Test net output #1: loss = 0.420588 (* 1 = 0.420588 loss)\nI0819 18:51:47.797238 21584 solver.cpp:228] Iteration 44900, loss = 0.0919223\nI0819 18:51:47.797286 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 18:51:47.797308 21584 solver.cpp:244]     Train net output #1: loss = 0.0919221 (* 1 = 0.0919221 loss)\nI0819 18:51:47.906855 21584 sgd_solver.cpp:166] Iteration 44900, lr = 1.1225\nI0819 18:54:05.991502 21584 solver.cpp:337] Iteration 45000, Testing net (#0)\nI0819 18:55:29.444645 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8828\nI0819 18:55:29.445008 21584 solver.cpp:404]     Test net output #1: loss = 0.421097 (* 1 = 0.421097 loss)\nI0819 18:55:30.748982 21584 solver.cpp:228] Iteration 45000, loss = 0.0892634\nI0819 18:55:30.749033 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 18:55:30.749058 21584 solver.cpp:244]     Train net output #1: loss = 0.0892631 (* 1 = 0.0892631 loss)\nI0819 18:55:30.853482 21584 sgd_solver.cpp:166] Iteration 45000, lr = 1.125\nI0819 18:57:48.918130 21584 solver.cpp:337] Iteration 45100, Testing net (#0)\nI0819 18:59:12.350600 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87968\nI0819 18:59:12.350977 21584 solver.cpp:404]     Test net output #1: loss = 0.410323 (* 1 = 0.410323 loss)\nI0819 18:59:13.655131 21584 solver.cpp:228] Iteration 45100, loss = 0.0934626\nI0819 18:59:13.655195 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 18:59:13.655222 21584 solver.cpp:244]     Train net output #1: loss = 0.0934623 (* 1 = 0.0934623 loss)\nI0819 18:59:13.760885 21584 sgd_solver.cpp:166] Iteration 45100, lr = 1.1275\nI0819 19:01:31.877905 21584 solver.cpp:337] Iteration 45200, Testing net (#0)\nI0819 19:02:55.316560 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87012\nI0819 19:02:55.316931 21584 solver.cpp:404]     Test net output #1: loss = 0.496445 (* 1 = 0.496445 loss)\nI0819 19:02:56.621155 21584 solver.cpp:228] Iteration 45200, loss = 0.19534\nI0819 19:02:56.621201 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 19:02:56.621224 21584 solver.cpp:244]     Train net output #1: loss = 0.19534 (* 1 = 0.19534 loss)\nI0819 19:02:56.726656 21584 sgd_solver.cpp:166] Iteration 45200, lr = 1.13\nI0819 19:05:14.727149 21584 solver.cpp:337] Iteration 45300, Testing net (#0)\nI0819 19:06:38.233078 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87832\nI0819 19:06:38.233448 21584 solver.cpp:404]     Test net output #1: loss = 0.432602 (* 1 = 0.432602 loss)\nI0819 19:06:39.537073 21584 solver.cpp:228] Iteration 45300, loss = 0.124468\nI0819 19:06:39.537142 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:06:39.537168 21584 solver.cpp:244]     Train net output #1: loss = 0.124468 (* 1 = 0.124468 loss)\nI0819 19:06:39.644781 21584 sgd_solver.cpp:166] Iteration 45300, lr = 1.1325\nI0819 19:08:57.764853 21584 solver.cpp:337] Iteration 45400, Testing net (#0)\nI0819 19:10:21.302700 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88416\nI0819 19:10:21.303045 21584 solver.cpp:404]     Test net output #1: loss = 0.405239 (* 1 = 0.405239 loss)\nI0819 19:10:22.606624 21584 solver.cpp:228] Iteration 45400, loss = 0.12007\nI0819 19:10:22.606672 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 19:10:22.606695 21584 solver.cpp:244]     Train net output #1: loss = 0.12007 (* 1 = 0.12007 loss)\nI0819 19:10:22.716888 21584 sgd_solver.cpp:166] Iteration 45400, lr = 1.135\nI0819 19:12:40.805045 21584 solver.cpp:337] Iteration 45500, Testing net (#0)\nI0819 19:14:04.227110 21584 solver.cpp:404]     Test net output #0: accuracy = 0.879\nI0819 19:14:04.227458 21584 solver.cpp:404]     Test net output #1: loss = 0.425332 (* 1 = 0.425332 loss)\nI0819 19:14:05.531322 21584 solver.cpp:228] Iteration 45500, loss = 0.108886\nI0819 19:14:05.531364 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:14:05.531380 21584 solver.cpp:244]     Train net output #1: loss = 0.108886 (* 1 = 0.108886 loss)\nI0819 19:14:05.640929 21584 sgd_solver.cpp:166] Iteration 45500, lr = 1.1375\nI0819 19:16:23.688275 21584 solver.cpp:337] Iteration 45600, Testing net (#0)\nI0819 19:17:47.100116 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8872\nI0819 19:17:47.100461 21584 solver.cpp:404]     Test net output #1: loss = 0.416357 (* 1 = 0.416357 loss)\nI0819 19:17:48.404587 21584 solver.cpp:228] Iteration 45600, loss = 0.08956\nI0819 19:17:48.404630 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:17:48.404647 21584 solver.cpp:244]     Train net output #1: loss = 0.0895597 (* 1 = 0.0895597 loss)\nI0819 19:17:48.509197 21584 sgd_solver.cpp:166] Iteration 45600, lr = 1.14\nI0819 19:20:06.683681 21584 solver.cpp:337] Iteration 45700, Testing net (#0)\nI0819 19:21:30.093400 21584 solver.cpp:404]     Test net output #0: accuracy = 0.882\nI0819 19:21:30.093763 21584 solver.cpp:404]     Test net output #1: loss = 0.407492 (* 1 = 0.407492 loss)\nI0819 19:21:31.397243 21584 solver.cpp:228] Iteration 45700, loss = 0.109795\nI0819 19:21:31.397286 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:21:31.397302 21584 solver.cpp:244]     Train net output #1: loss = 0.109795 (* 1 = 0.109795 loss)\nI0819 19:21:31.507630 21584 sgd_solver.cpp:166] Iteration 45700, lr = 1.1425\nI0819 19:23:49.809881 21584 solver.cpp:337] Iteration 45800, Testing net (#0)\nI0819 19:25:13.215456 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0819 19:25:13.215826 21584 solver.cpp:404]     Test net output #1: loss = 0.424203 (* 1 = 0.424203 loss)\nI0819 19:25:14.520056 21584 solver.cpp:228] Iteration 45800, loss = 0.109921\nI0819 19:25:14.520100 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 19:25:14.520117 21584 solver.cpp:244]     Train net output #1: loss = 0.10992 (* 1 = 0.10992 loss)\nI0819 19:25:14.628751 21584 sgd_solver.cpp:166] Iteration 45800, lr = 1.145\nI0819 19:27:32.865833 21584 solver.cpp:337] Iteration 45900, Testing net (#0)\nI0819 19:28:56.271590 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87292\nI0819 19:28:56.271936 21584 solver.cpp:404]     Test net output #1: loss = 0.451892 (* 1 = 0.451892 loss)\nI0819 19:28:57.575659 21584 solver.cpp:228] Iteration 45900, loss = 0.0915844\nI0819 19:28:57.575703 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 19:28:57.575719 21584 solver.cpp:244]     Train net output #1: loss = 0.0915841 (* 1 = 0.0915841 loss)\nI0819 19:28:57.685667 21584 sgd_solver.cpp:166] Iteration 45900, lr = 1.1475\nI0819 19:31:15.839324 21584 solver.cpp:337] Iteration 46000, Testing net (#0)\nI0819 19:32:39.243919 21584 solver.cpp:404]     Test net output #0: accuracy = 0.882\nI0819 19:32:39.244267 21584 solver.cpp:404]     Test net output #1: loss = 0.404448 (* 1 = 0.404448 loss)\nI0819 19:32:40.546628 21584 solver.cpp:228] Iteration 46000, loss = 0.0924327\nI0819 19:32:40.546669 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:32:40.546684 21584 solver.cpp:244]     Train net output #1: loss = 0.0924324 (* 1 = 0.0924324 loss)\nI0819 19:32:40.655091 21584 sgd_solver.cpp:166] Iteration 46000, lr = 1.15\nI0819 19:34:58.784579 21584 solver.cpp:337] Iteration 46100, Testing net (#0)\nI0819 19:36:22.189769 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88808\nI0819 19:36:22.190119 21584 solver.cpp:404]     Test net output #1: loss = 0.400806 (* 1 = 0.400806 loss)\nI0819 19:36:23.492346 21584 solver.cpp:228] Iteration 46100, loss = 0.128391\nI0819 19:36:23.492385 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 19:36:23.492401 21584 solver.cpp:244]     Train net output #1: loss = 0.12839 (* 1 = 0.12839 loss)\nI0819 19:36:23.600949 21584 sgd_solver.cpp:166] Iteration 46100, lr = 1.1525\nI0819 19:38:41.748363 21584 solver.cpp:337] Iteration 46200, Testing net (#0)\nI0819 19:40:05.130581 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88296\nI0819 19:40:05.130920 21584 solver.cpp:404]     Test net output #1: loss = 0.417364 (* 1 = 0.417364 loss)\nI0819 19:40:06.432998 21584 solver.cpp:228] Iteration 46200, loss = 0.0763827\nI0819 19:40:06.433038 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:40:06.433053 21584 solver.cpp:244]     Train net output #1: loss = 0.0763824 (* 1 = 0.0763824 loss)\nI0819 19:40:06.542021 21584 sgd_solver.cpp:166] Iteration 46200, lr = 1.155\nI0819 19:42:24.676093 21584 solver.cpp:337] Iteration 46300, Testing net (#0)\nI0819 19:43:48.056147 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88044\nI0819 19:43:48.056514 21584 solver.cpp:404]     Test net output #1: loss = 0.412374 (* 1 = 0.412374 loss)\nI0819 19:43:49.358563 21584 solver.cpp:228] Iteration 46300, loss = 0.0940682\nI0819 19:43:49.358603 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 19:43:49.358618 21584 solver.cpp:244]     Train net output #1: loss = 0.0940678 (* 1 = 0.0940678 loss)\nI0819 19:43:49.470778 21584 sgd_solver.cpp:166] Iteration 46300, lr = 1.1575\nI0819 19:46:07.590728 21584 solver.cpp:337] Iteration 46400, Testing net (#0)\nI0819 19:47:30.978605 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88248\nI0819 19:47:30.978978 21584 solver.cpp:404]     Test net output #1: loss = 0.423491 (* 1 = 0.423491 loss)\nI0819 19:47:32.280735 21584 solver.cpp:228] Iteration 46400, loss = 0.168295\nI0819 19:47:32.280774 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 19:47:32.280791 21584 solver.cpp:244]     Train net output #1: loss = 0.168294 (* 1 = 0.168294 loss)\nI0819 19:47:32.394582 21584 sgd_solver.cpp:166] Iteration 46400, lr = 1.16\nI0819 19:49:50.547353 21584 solver.cpp:337] Iteration 46500, Testing net (#0)\nI0819 19:51:13.962098 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87944\nI0819 19:51:13.962435 21584 solver.cpp:404]     Test net output #1: loss = 0.426874 (* 1 = 0.426874 loss)\nI0819 19:51:15.264593 21584 solver.cpp:228] Iteration 46500, loss = 0.14108\nI0819 19:51:15.264633 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 19:51:15.264649 21584 solver.cpp:244]     Train net output #1: loss = 0.141079 (* 1 = 0.141079 loss)\nI0819 19:51:15.372524 21584 sgd_solver.cpp:166] Iteration 46500, lr = 1.1625\nI0819 19:53:33.587656 21584 solver.cpp:337] Iteration 46600, Testing net (#0)\nI0819 19:54:56.981345 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88\nI0819 19:54:56.981703 21584 solver.cpp:404]     Test net output #1: loss = 0.427236 (* 1 = 0.427236 loss)\nI0819 19:54:58.283658 21584 solver.cpp:228] Iteration 46600, loss = 0.0520936\nI0819 19:54:58.283699 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 19:54:58.283713 21584 solver.cpp:244]     Train net output #1: loss = 0.0520932 (* 1 = 0.0520932 loss)\nI0819 19:54:58.397153 21584 sgd_solver.cpp:166] Iteration 46600, lr = 1.165\nI0819 19:57:16.608525 21584 solver.cpp:337] Iteration 46700, Testing net (#0)\nI0819 19:58:40.012579 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8784\nI0819 19:58:40.012944 21584 solver.cpp:404]     Test net output #1: loss = 0.424144 (* 1 = 0.424144 loss)\nI0819 19:58:41.315348 21584 solver.cpp:228] Iteration 46700, loss = 0.131712\nI0819 19:58:41.315388 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 19:58:41.315402 21584 solver.cpp:244]     Train net output #1: loss = 0.131712 (* 1 = 0.131712 loss)\nI0819 19:58:41.422113 21584 sgd_solver.cpp:166] Iteration 46700, lr = 1.1675\nI0819 20:00:59.609580 21584 solver.cpp:337] Iteration 46800, Testing net (#0)\nI0819 20:02:23.030711 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8822\nI0819 20:02:23.031085 21584 solver.cpp:404]     Test net output #1: loss = 0.444143 (* 1 = 0.444143 loss)\nI0819 20:02:24.333746 21584 solver.cpp:228] Iteration 46800, loss = 0.0703191\nI0819 20:02:24.333784 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 20:02:24.333803 21584 solver.cpp:244]     Train net output #1: loss = 0.0703188 (* 1 = 0.0703188 loss)\nI0819 20:02:24.444737 21584 sgd_solver.cpp:166] Iteration 46800, lr = 1.17\nI0819 20:04:42.605068 21584 solver.cpp:337] Iteration 46900, Testing net (#0)\nI0819 20:06:06.018019 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8818\nI0819 20:06:06.018364 21584 solver.cpp:404]     Test net output #1: loss = 0.424007 (* 1 = 0.424007 loss)\nI0819 20:06:07.320578 21584 solver.cpp:228] Iteration 46900, loss = 0.0850857\nI0819 20:06:07.320617 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 20:06:07.320633 21584 solver.cpp:244]     Train net output #1: loss = 0.0850854 (* 1 = 0.0850854 loss)\nI0819 20:06:07.431398 21584 sgd_solver.cpp:166] Iteration 46900, lr = 1.1725\nI0819 20:08:25.650010 21584 solver.cpp:337] Iteration 47000, Testing net (#0)\nI0819 20:09:49.132189 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8808\nI0819 20:09:49.132539 21584 solver.cpp:404]     Test net output #1: loss = 0.430849 (* 1 = 0.430849 loss)\nI0819 20:09:50.434968 21584 solver.cpp:228] Iteration 47000, loss = 0.0772256\nI0819 20:09:50.435009 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 20:09:50.435037 21584 solver.cpp:244]     Train net output #1: loss = 0.0772253 (* 1 = 0.0772253 loss)\nI0819 20:09:50.545033 21584 sgd_solver.cpp:166] Iteration 47000, lr = 1.175\nI0819 20:12:08.680619 21584 solver.cpp:337] Iteration 47100, Testing net (#0)\nI0819 20:13:32.192901 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8876\nI0819 20:13:32.193279 21584 solver.cpp:404]     Test net output #1: loss = 0.413146 (* 1 = 0.413146 loss)\nI0819 20:13:33.497267 21584 solver.cpp:228] Iteration 47100, loss = 0.10084\nI0819 20:13:33.497310 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:13:33.497334 21584 solver.cpp:244]     Train net output #1: loss = 0.10084 (* 1 = 0.10084 loss)\nI0819 20:13:33.607542 21584 sgd_solver.cpp:166] Iteration 47100, lr = 1.1775\nI0819 20:15:51.810631 21584 solver.cpp:337] Iteration 47200, Testing net (#0)\nI0819 20:17:15.313835 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89276\nI0819 20:17:15.314193 21584 solver.cpp:404]     Test net output #1: loss = 0.392233 (* 1 = 0.392233 loss)\nI0819 20:17:16.618250 21584 solver.cpp:228] Iteration 47200, loss = 0.0794741\nI0819 20:17:16.618294 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 20:17:16.618315 21584 solver.cpp:244]     Train net output #1: loss = 0.0794738 (* 1 = 0.0794738 loss)\nI0819 20:17:16.727079 21584 sgd_solver.cpp:166] Iteration 47200, lr = 1.18\nI0819 20:19:34.909888 21584 solver.cpp:337] Iteration 47300, Testing net (#0)\nI0819 20:20:58.372512 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8884\nI0819 20:20:58.372853 21584 solver.cpp:404]     Test net output #1: loss = 0.402129 (* 1 = 0.402129 loss)\nI0819 20:20:59.676542 21584 solver.cpp:228] Iteration 47300, loss = 0.128462\nI0819 20:20:59.676585 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:20:59.676609 21584 solver.cpp:244]     Train net output #1: loss = 0.128461 (* 1 = 0.128461 loss)\nI0819 20:20:59.787117 21584 sgd_solver.cpp:166] Iteration 47300, lr = 1.1825\nI0819 20:23:17.935438 21584 solver.cpp:337] Iteration 47400, Testing net (#0)\nI0819 20:24:41.409739 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88972\nI0819 20:24:41.410125 21584 solver.cpp:404]     Test net output #1: loss = 0.40563 (* 1 = 0.40563 loss)\nI0819 20:24:42.714058 21584 solver.cpp:228] Iteration 47400, loss = 0.0775875\nI0819 20:24:42.714117 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 20:24:42.714143 21584 solver.cpp:244]     Train net output #1: loss = 0.0775873 (* 1 = 0.0775873 loss)\nI0819 20:24:42.818825 21584 sgd_solver.cpp:166] Iteration 47400, lr = 1.185\nI0819 20:27:00.981597 21584 solver.cpp:337] Iteration 47500, Testing net (#0)\nI0819 20:28:24.480037 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8818\nI0819 20:28:24.480389 21584 solver.cpp:404]     Test net output #1: loss = 0.473401 (* 1 = 0.473401 loss)\nI0819 20:28:25.784214 21584 solver.cpp:228] Iteration 47500, loss = 0.0631687\nI0819 20:28:25.784258 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 20:28:25.784281 21584 solver.cpp:244]     Train net output #1: loss = 0.0631684 (* 1 = 0.0631684 loss)\nI0819 20:28:25.892844 21584 sgd_solver.cpp:166] Iteration 47500, lr = 1.1875\nI0819 20:30:43.988512 21584 solver.cpp:337] Iteration 47600, Testing net (#0)\nI0819 20:32:07.440371 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88932\nI0819 20:32:07.440723 21584 solver.cpp:404]     Test net output #1: loss = 0.378272 (* 1 = 0.378272 loss)\nI0819 20:32:08.744686 21584 solver.cpp:228] Iteration 47600, loss = 0.11975\nI0819 20:32:08.744746 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 20:32:08.744771 21584 solver.cpp:244]     Train net output #1: loss = 0.11975 (* 1 = 0.11975 loss)\nI0819 20:32:08.851140 21584 sgd_solver.cpp:166] Iteration 47600, lr = 1.19\nI0819 20:34:26.910960 21584 solver.cpp:337] Iteration 47700, Testing net (#0)\nI0819 20:35:50.452543 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88068\nI0819 20:35:50.452919 21584 solver.cpp:404]     Test net output #1: loss = 0.427453 (* 1 = 0.427453 loss)\nI0819 20:35:51.756140 21584 solver.cpp:228] Iteration 47700, loss = 0.14089\nI0819 20:35:51.756183 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 20:35:51.756206 21584 solver.cpp:244]     Train net output #1: loss = 0.14089 (* 1 = 0.14089 loss)\nI0819 20:35:51.867990 21584 sgd_solver.cpp:166] Iteration 47700, lr = 1.1925\nI0819 20:38:09.987740 21584 solver.cpp:337] Iteration 47800, Testing net (#0)\nI0819 20:39:33.551363 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8862\nI0819 20:39:33.551713 21584 solver.cpp:404]     Test net output #1: loss = 0.391899 (* 1 = 0.391899 loss)\nI0819 20:39:34.855830 21584 solver.cpp:228] Iteration 47800, loss = 0.113206\nI0819 20:39:34.855873 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:39:34.855895 21584 solver.cpp:244]     Train net output #1: loss = 0.113206 (* 1 = 0.113206 loss)\nI0819 20:39:34.966567 21584 sgd_solver.cpp:166] Iteration 47800, lr = 1.195\nI0819 20:41:52.977691 21584 solver.cpp:337] Iteration 47900, Testing net (#0)\nI0819 20:43:15.467936 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88284\nI0819 20:43:15.468240 21584 solver.cpp:404]     Test net output #1: loss = 0.40209 (* 1 = 0.40209 loss)\nI0819 20:43:16.766662 21584 solver.cpp:228] Iteration 47900, loss = 0.126188\nI0819 20:43:16.766705 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 20:43:16.766721 21584 solver.cpp:244]     Train net output #1: loss = 0.126188 (* 1 = 0.126188 loss)\nI0819 20:43:16.879112 21584 sgd_solver.cpp:166] Iteration 47900, lr = 1.1975\nI0819 20:45:34.809265 21584 solver.cpp:337] Iteration 48000, Testing net (#0)\nI0819 20:46:57.288698 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8814\nI0819 20:46:57.288993 21584 solver.cpp:404]     Test net output #1: loss = 0.42429 (* 1 = 0.42429 loss)\nI0819 20:46:58.587790 21584 solver.cpp:228] Iteration 48000, loss = 0.13537\nI0819 20:46:58.587832 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 20:46:58.587848 21584 solver.cpp:244]     Train net output #1: loss = 0.135369 (* 1 = 0.135369 loss)\nI0819 20:46:58.700669 21584 sgd_solver.cpp:166] Iteration 48000, lr = 1.2\nI0819 20:49:16.588948 21584 solver.cpp:337] Iteration 48100, Testing net (#0)\nI0819 20:50:39.075955 21584 solver.cpp:404]     Test net output #0: accuracy = 0.881641\nI0819 20:50:39.076234 21584 solver.cpp:404]     Test net output #1: loss = 0.418538 (* 1 = 0.418538 loss)\nI0819 20:50:40.374758 21584 solver.cpp:228] Iteration 48100, loss = 0.18858\nI0819 20:50:40.374802 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 20:50:40.374819 21584 solver.cpp:244]     Train net output #1: loss = 0.18858 (* 1 = 0.18858 loss)\nI0819 20:50:40.487360 21584 sgd_solver.cpp:166] Iteration 48100, lr = 1.2025\nI0819 20:52:58.404848 21584 solver.cpp:337] Iteration 48200, Testing net (#0)\nI0819 20:54:20.869804 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88512\nI0819 20:54:20.870050 21584 solver.cpp:404]     Test net output #1: loss = 0.433057 (* 1 = 0.433057 loss)\nI0819 20:54:22.169488 21584 solver.cpp:228] Iteration 48200, loss = 0.0583132\nI0819 20:54:22.169533 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 20:54:22.169549 21584 solver.cpp:244]     Train net output #1: loss = 0.0583129 (* 1 = 0.0583129 loss)\nI0819 20:54:22.278792 21584 sgd_solver.cpp:166] Iteration 48200, lr = 1.205\nI0819 20:56:40.167104 21584 solver.cpp:337] Iteration 48300, Testing net (#0)\nI0819 20:58:02.624636 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88612\nI0819 20:58:02.624939 21584 solver.cpp:404]     Test net output #1: loss = 0.402355 (* 1 = 0.402355 loss)\nI0819 20:58:03.923207 21584 solver.cpp:228] Iteration 48300, loss = 0.111429\nI0819 20:58:03.923250 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 20:58:03.923267 21584 solver.cpp:244]     Train net output #1: loss = 0.111429 (* 1 = 0.111429 loss)\nI0819 20:58:04.034207 21584 sgd_solver.cpp:166] Iteration 48300, lr = 1.2075\nI0819 21:00:22.067734 21584 solver.cpp:337] Iteration 48400, Testing net (#0)\nI0819 21:01:44.530133 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88364\nI0819 21:01:44.530385 21584 solver.cpp:404]     Test net output #1: loss = 0.413814 (* 1 = 0.413814 loss)\nI0819 21:01:45.829001 21584 solver.cpp:228] Iteration 48400, loss = 0.0861711\nI0819 21:01:45.829046 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:01:45.829063 21584 solver.cpp:244]     Train net output #1: loss = 0.0861708 (* 1 = 0.0861708 loss)\nI0819 21:01:45.941838 21584 sgd_solver.cpp:166] Iteration 48400, lr = 1.21\nI0819 21:04:03.932093 21584 solver.cpp:337] Iteration 48500, Testing net (#0)\nI0819 21:05:26.395584 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88392\nI0819 21:05:26.395876 21584 solver.cpp:404]     Test net output #1: loss = 0.411946 (* 1 = 0.411946 loss)\nI0819 21:05:27.696985 21584 solver.cpp:228] Iteration 48500, loss = 0.0925359\nI0819 21:05:27.697032 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:05:27.697049 21584 solver.cpp:244]     Train net output #1: loss = 0.0925356 (* 1 = 0.0925356 loss)\nI0819 21:05:27.802994 21584 sgd_solver.cpp:166] Iteration 48500, lr = 1.2125\nI0819 21:07:45.792449 21584 solver.cpp:337] Iteration 48600, Testing net (#0)\nI0819 21:09:08.244421 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88728\nI0819 21:09:08.244695 21584 solver.cpp:404]     Test net output #1: loss = 0.398166 (* 1 = 0.398166 loss)\nI0819 21:09:09.543264 21584 solver.cpp:228] Iteration 48600, loss = 0.109646\nI0819 21:09:09.543310 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:09:09.543326 21584 solver.cpp:244]     Train net output #1: loss = 0.109646 (* 1 = 0.109646 loss)\nI0819 21:09:09.655869 21584 sgd_solver.cpp:166] Iteration 48600, lr = 1.215\nI0819 21:11:27.559383 21584 solver.cpp:337] Iteration 48700, Testing net (#0)\nI0819 21:12:50.015502 21584 solver.cpp:404]     Test net output #0: accuracy = 0.888881\nI0819 21:12:50.015805 21584 solver.cpp:404]     Test net output #1: loss = 0.388022 (* 1 = 0.388022 loss)\nI0819 21:12:51.314491 21584 solver.cpp:228] Iteration 48700, loss = 0.126828\nI0819 21:12:51.314532 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:12:51.314548 21584 solver.cpp:244]     Train net output #1: loss = 0.126828 (* 1 = 0.126828 loss)\nI0819 21:12:51.426760 21584 sgd_solver.cpp:166] Iteration 48700, lr = 1.2175\nI0819 21:15:09.370873 21584 solver.cpp:337] Iteration 48800, Testing net (#0)\nI0819 21:16:31.818542 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88624\nI0819 21:16:31.818814 21584 solver.cpp:404]     Test net output #1: loss = 0.393444 (* 1 = 0.393444 loss)\nI0819 21:16:33.116961 21584 solver.cpp:228] Iteration 48800, loss = 0.0965396\nI0819 21:16:33.117003 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:16:33.117019 21584 solver.cpp:244]     Train net output #1: loss = 0.0965393 (* 1 = 0.0965393 loss)\nI0819 21:16:33.229486 21584 sgd_solver.cpp:166] Iteration 48800, lr = 1.22\nI0819 21:18:51.070825 21584 solver.cpp:337] Iteration 48900, Testing net (#0)\nI0819 21:20:13.524289 21584 solver.cpp:404]     Test net output #0: accuracy = 0.885001\nI0819 21:20:13.524585 21584 solver.cpp:404]     Test net output #1: loss = 0.416458 (* 1 = 0.416458 loss)\nI0819 21:20:14.823173 21584 solver.cpp:228] Iteration 48900, loss = 0.206608\nI0819 21:20:14.823213 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 21:20:14.823230 21584 solver.cpp:244]     Train net output #1: loss = 0.206607 (* 1 = 0.206607 loss)\nI0819 21:20:14.930850 21584 sgd_solver.cpp:166] Iteration 48900, lr = 1.2225\nI0819 21:22:32.780881 21584 solver.cpp:337] Iteration 49000, Testing net (#0)\nI0819 21:23:55.229661 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88672\nI0819 21:23:55.229970 21584 solver.cpp:404]     Test net output #1: loss = 0.41128 (* 1 = 0.41128 loss)\nI0819 21:23:56.528810 21584 solver.cpp:228] Iteration 49000, loss = 0.103712\nI0819 21:23:56.528848 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:23:56.528863 21584 solver.cpp:244]     Train net output #1: loss = 0.103712 (* 1 = 0.103712 loss)\nI0819 21:23:56.640890 21584 sgd_solver.cpp:166] Iteration 49000, lr = 1.225\nI0819 21:26:14.526901 21584 solver.cpp:337] Iteration 49100, Testing net (#0)\nI0819 21:27:36.996891 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87948\nI0819 21:27:36.997189 21584 solver.cpp:404]     Test net output #1: loss = 0.422457 (* 1 = 0.422457 loss)\nI0819 21:27:38.295728 21584 solver.cpp:228] Iteration 49100, loss = 0.050825\nI0819 21:27:38.295774 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 21:27:38.295789 21584 solver.cpp:244]     Train net output #1: loss = 0.0508247 (* 1 = 0.0508247 loss)\nI0819 21:27:38.409168 21584 sgd_solver.cpp:166] Iteration 49100, lr = 1.2275\nI0819 21:29:56.300360 21584 solver.cpp:337] Iteration 49200, Testing net (#0)\nI0819 21:31:18.769170 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8852\nI0819 21:31:18.769484 21584 solver.cpp:404]     Test net output #1: loss = 0.409351 (* 1 = 0.409351 loss)\nI0819 21:31:20.068181 21584 solver.cpp:228] Iteration 49200, loss = 0.148821\nI0819 21:31:20.068223 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 21:31:20.068238 21584 solver.cpp:244]     Train net output #1: loss = 0.148821 (* 1 = 0.148821 loss)\nI0819 21:31:20.177850 21584 sgd_solver.cpp:166] Iteration 49200, lr = 1.23\nI0819 21:33:38.022727 21584 solver.cpp:337] Iteration 49300, Testing net (#0)\nI0819 21:35:00.489948 21584 solver.cpp:404]     Test net output #0: accuracy = 0.887\nI0819 21:35:00.490218 21584 solver.cpp:404]     Test net output #1: loss = 0.394852 (* 1 = 0.394852 loss)\nI0819 21:35:01.788839 21584 solver.cpp:228] Iteration 49300, loss = 0.190789\nI0819 21:35:01.788879 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 21:35:01.788894 21584 solver.cpp:244]     Train net output #1: loss = 0.190789 (* 1 = 0.190789 loss)\nI0819 21:35:01.901247 21584 sgd_solver.cpp:166] Iteration 49300, lr = 1.2325\nI0819 21:37:19.805261 21584 solver.cpp:337] Iteration 49400, Testing net (#0)\nI0819 21:38:42.270907 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87672\nI0819 21:38:42.271167 21584 solver.cpp:404]     Test net output #1: loss = 0.452783 (* 1 = 0.452783 loss)\nI0819 21:38:43.569521 21584 solver.cpp:228] Iteration 49400, loss = 0.0530214\nI0819 21:38:43.569566 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0819 21:38:43.569581 21584 solver.cpp:244]     Train net output #1: loss = 0.0530211 (* 1 = 0.0530211 loss)\nI0819 21:38:43.680662 21584 sgd_solver.cpp:166] Iteration 49400, lr = 1.235\nI0819 21:41:01.574213 21584 solver.cpp:337] Iteration 49500, Testing net (#0)\nI0819 21:42:24.043280 21584 solver.cpp:404]     Test net output #0: accuracy = 0.891\nI0819 21:42:24.043570 21584 solver.cpp:404]     Test net output #1: loss = 0.402161 (* 1 = 0.402161 loss)\nI0819 21:42:25.342368 21584 solver.cpp:228] Iteration 49500, loss = 0.134785\nI0819 21:42:25.342412 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 21:42:25.342428 21584 solver.cpp:244]     Train net output #1: loss = 0.134785 (* 1 = 0.134785 loss)\nI0819 21:42:25.452975 21584 sgd_solver.cpp:166] Iteration 49500, lr = 1.2375\nI0819 21:44:43.366282 21584 solver.cpp:337] Iteration 49600, Testing net (#0)\nI0819 21:46:05.836416 21584 solver.cpp:404]     Test net output #0: accuracy = 0.883041\nI0819 21:46:05.836714 21584 solver.cpp:404]     Test net output #1: loss = 0.424134 (* 1 = 0.424134 loss)\nI0819 21:46:07.135331 21584 solver.cpp:228] Iteration 49600, loss = 0.0875538\nI0819 21:46:07.135375 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 21:46:07.135390 21584 solver.cpp:244]     Train net output #1: loss = 0.0875535 (* 1 = 0.0875535 loss)\nI0819 21:46:07.246323 21584 sgd_solver.cpp:166] Iteration 49600, lr = 1.24\nI0819 21:48:25.208343 21584 solver.cpp:337] Iteration 49700, Testing net (#0)\nI0819 21:49:47.675223 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87736\nI0819 21:49:47.675506 21584 solver.cpp:404]     Test net output #1: loss = 0.438506 (* 1 = 0.438506 loss)\nI0819 21:49:48.973853 21584 solver.cpp:228] Iteration 49700, loss = 0.179048\nI0819 21:49:48.973896 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0819 21:49:48.973912 21584 solver.cpp:244]     Train net output #1: loss = 0.179048 (* 1 = 0.179048 loss)\nI0819 21:49:49.088564 21584 sgd_solver.cpp:166] Iteration 49700, lr = 1.2425\nI0819 21:52:06.972730 21584 solver.cpp:337] Iteration 49800, Testing net (#0)\nI0819 21:53:29.443936 21584 solver.cpp:404]     Test net output #0: accuracy = 0.881\nI0819 21:53:29.444236 21584 solver.cpp:404]     Test net output #1: loss = 0.404509 (* 1 = 0.404509 loss)\nI0819 21:53:30.742879 21584 solver.cpp:228] Iteration 49800, loss = 0.0965259\nI0819 21:53:30.742923 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0819 21:53:30.742938 21584 solver.cpp:244]     Train net output #1: loss = 0.0965257 (* 1 = 0.0965257 loss)\nI0819 21:53:30.856813 21584 sgd_solver.cpp:166] Iteration 49800, lr = 1.245\nI0819 21:55:48.755087 21584 solver.cpp:337] Iteration 49900, Testing net (#0)\nI0819 21:57:11.122681 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87868\nI0819 21:57:11.122964 21584 solver.cpp:404]     Test net output #1: loss = 0.416345 (* 1 = 0.416345 loss)\nI0819 21:57:12.424839 21584 solver.cpp:228] Iteration 49900, loss = 0.167412\nI0819 21:57:12.424881 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 21:57:12.424897 21584 solver.cpp:244]     Train net output #1: loss = 0.167411 (* 1 = 0.167411 loss)\nI0819 21:57:12.537714 21584 sgd_solver.cpp:166] Iteration 49900, lr = 1.2475\nI0819 21:59:30.443230 21584 solver.cpp:337] Iteration 50000, Testing net (#0)\nI0819 22:00:52.806437 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88044\nI0819 22:00:52.806747 21584 solver.cpp:404]     Test net output #1: loss = 0.428863 (* 1 = 0.428863 loss)\nI0819 22:00:54.108924 21584 solver.cpp:228] Iteration 50000, loss = 0.0935507\nI0819 22:00:54.108968 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 22:00:54.108984 21584 solver.cpp:244]     Train net output #1: loss = 0.0935504 (* 1 = 0.0935504 loss)\nI0819 22:00:54.221011 21584 sgd_solver.cpp:166] Iteration 50000, lr = 1.25\nI0819 22:03:12.197437 21584 solver.cpp:337] Iteration 50100, Testing net (#0)\nI0819 22:04:34.562233 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88752\nI0819 22:04:34.562527 21584 solver.cpp:404]     Test net output #1: loss = 0.390467 (* 1 = 0.390467 loss)\nI0819 22:04:35.865077 21584 solver.cpp:228] Iteration 50100, loss = 0.138667\nI0819 22:04:35.865118 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 22:04:35.865134 21584 solver.cpp:244]     Train net output #1: loss = 0.138667 (* 1 = 0.138667 loss)\nI0819 22:04:35.971005 21584 sgd_solver.cpp:166] Iteration 50100, lr = 1.2525\nI0819 22:06:53.977974 21584 solver.cpp:337] Iteration 50200, Testing net (#0)\nI0819 22:08:16.341291 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88176\nI0819 22:08:16.341552 21584 solver.cpp:404]     Test net output #1: loss = 0.440171 (* 1 = 0.440171 loss)\nI0819 22:08:17.644349 21584 solver.cpp:228] Iteration 50200, loss = 0.140295\nI0819 22:08:17.644392 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:08:17.644407 21584 solver.cpp:244]     Train net output #1: loss = 0.140294 (* 1 = 0.140294 loss)\nI0819 22:08:17.749032 21584 sgd_solver.cpp:166] Iteration 50200, lr = 1.255\nI0819 22:10:35.684237 21584 solver.cpp:337] Iteration 50300, Testing net (#0)\nI0819 22:11:58.054539 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89028\nI0819 22:11:58.054839 21584 solver.cpp:404]     Test net output #1: loss = 0.391051 (* 1 = 0.391051 loss)\nI0819 22:11:59.356436 21584 solver.cpp:228] Iteration 50300, loss = 0.105192\nI0819 22:11:59.356475 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 22:11:59.356492 21584 solver.cpp:244]     Train net output #1: loss = 0.105192 (* 1 = 0.105192 loss)\nI0819 22:11:59.464196 21584 sgd_solver.cpp:166] Iteration 50300, lr = 1.2575\nI0819 22:14:17.367130 21584 solver.cpp:337] Iteration 50400, Testing net (#0)\nI0819 22:15:39.738665 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 22:15:39.738950 21584 solver.cpp:404]     Test net output #1: loss = 0.412505 (* 1 = 0.412505 loss)\nI0819 22:15:41.041690 21584 solver.cpp:228] Iteration 50400, loss = 0.0885299\nI0819 22:15:41.041731 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:15:41.041752 21584 solver.cpp:244]     Train net output #1: loss = 0.0885297 (* 1 = 0.0885297 loss)\nI0819 22:15:41.156944 21584 sgd_solver.cpp:166] Iteration 50400, lr = 1.26\nI0819 22:17:59.135130 21584 solver.cpp:337] Iteration 50500, Testing net (#0)\nI0819 22:19:21.511575 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8852\nI0819 22:19:21.511849 21584 solver.cpp:404]     Test net output #1: loss = 0.425334 (* 1 = 0.425334 loss)\nI0819 22:19:22.810724 21584 solver.cpp:228] Iteration 50500, loss = 0.0903569\nI0819 22:19:22.810771 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:19:22.810787 21584 solver.cpp:244]     Train net output #1: loss = 0.0903566 (* 1 = 0.0903566 loss)\nI0819 22:19:22.923349 21584 sgd_solver.cpp:166] Iteration 50500, lr = 1.2625\nI0819 22:21:40.872766 21584 solver.cpp:337] Iteration 50600, Testing net (#0)\nI0819 22:23:03.242727 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8838\nI0819 22:23:03.243037 21584 solver.cpp:404]     Test net output #1: loss = 0.40682 (* 1 = 0.40682 loss)\nI0819 22:23:04.541831 21584 solver.cpp:228] Iteration 50600, loss = 0.106226\nI0819 22:23:04.541873 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:23:04.541889 21584 solver.cpp:244]     Train net output #1: loss = 0.106226 (* 1 = 0.106226 loss)\nI0819 22:23:04.656610 21584 sgd_solver.cpp:166] Iteration 50600, lr = 1.265\nI0819 22:25:22.584897 21584 solver.cpp:337] Iteration 50700, Testing net (#0)\nI0819 22:26:44.889032 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88672\nI0819 22:26:44.889310 21584 solver.cpp:404]     Test net output #1: loss = 0.382054 (* 1 = 0.382054 loss)\nI0819 22:26:46.188071 21584 solver.cpp:228] Iteration 50700, loss = 0.120592\nI0819 22:26:46.188113 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 22:26:46.188128 21584 solver.cpp:244]     Train net output #1: loss = 0.120592 (* 1 = 0.120592 loss)\nI0819 22:26:46.301120 21584 sgd_solver.cpp:166] Iteration 50700, lr = 1.2675\nI0819 22:29:04.309026 21584 solver.cpp:337] Iteration 50800, Testing net (#0)\nI0819 22:30:26.728523 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88112\nI0819 22:30:26.728811 21584 solver.cpp:404]     Test net output #1: loss = 0.420514 (* 1 = 0.420514 loss)\nI0819 22:30:28.027747 21584 solver.cpp:228] Iteration 50800, loss = 0.128433\nI0819 22:30:28.027792 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 22:30:28.027809 21584 solver.cpp:244]     Train net output #1: loss = 0.128432 (* 1 = 0.128432 loss)\nI0819 22:30:28.144672 21584 sgd_solver.cpp:166] Iteration 50800, lr = 1.27\nI0819 22:32:46.111315 21584 solver.cpp:337] Iteration 50900, Testing net (#0)\nI0819 22:34:08.587239 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8828\nI0819 22:34:08.587512 21584 solver.cpp:404]     Test net output #1: loss = 0.416882 (* 1 = 0.416882 loss)\nI0819 22:34:09.885968 21584 solver.cpp:228] Iteration 50900, loss = 0.11793\nI0819 22:34:09.886011 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 22:34:09.886027 21584 solver.cpp:244]     Train net output #1: loss = 0.11793 (* 1 = 0.11793 loss)\nI0819 22:34:09.997966 21584 sgd_solver.cpp:166] Iteration 50900, lr = 1.2725\nI0819 22:36:27.962983 21584 solver.cpp:337] Iteration 51000, Testing net (#0)\nI0819 22:37:50.433027 21584 solver.cpp:404]     Test net output #0: accuracy = 0.876121\nI0819 22:37:50.433312 21584 solver.cpp:404]     Test net output #1: loss = 0.437438 (* 1 = 0.437438 loss)\nI0819 22:37:51.731784 21584 solver.cpp:228] Iteration 51000, loss = 0.214435\nI0819 22:37:51.731824 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 22:37:51.731840 21584 solver.cpp:244]     Train net output #1: loss = 0.214435 (* 1 = 0.214435 loss)\nI0819 22:37:51.848101 21584 sgd_solver.cpp:166] Iteration 51000, lr = 1.275\nI0819 22:40:09.808817 21584 solver.cpp:337] Iteration 51100, Testing net (#0)\nI0819 22:41:32.293366 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88232\nI0819 22:41:32.293679 21584 solver.cpp:404]     Test net output #1: loss = 0.42302 (* 1 = 0.42302 loss)\nI0819 22:41:33.593201 21584 solver.cpp:228] Iteration 51100, loss = 0.222965\nI0819 22:41:33.593246 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0819 22:41:33.593271 21584 solver.cpp:244]     Train net output #1: loss = 0.222965 (* 1 = 0.222965 loss)\nI0819 22:41:33.702564 21584 sgd_solver.cpp:166] Iteration 51100, lr = 1.2775\nI0819 22:43:51.667503 21584 solver.cpp:337] Iteration 51200, Testing net (#0)\nI0819 22:45:14.146240 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88424\nI0819 22:45:14.146520 21584 solver.cpp:404]     Test net output #1: loss = 0.411869 (* 1 = 0.411869 loss)\nI0819 22:45:15.446514 21584 solver.cpp:228] Iteration 51200, loss = 0.0583581\nI0819 22:45:15.446559 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0819 22:45:15.446584 21584 solver.cpp:244]     Train net output #1: loss = 0.0583579 (* 1 = 0.0583579 loss)\nI0819 22:45:15.558221 21584 sgd_solver.cpp:166] Iteration 51200, lr = 1.28\nI0819 22:47:33.465965 21584 solver.cpp:337] Iteration 51300, Testing net (#0)\nI0819 22:48:55.949514 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8854\nI0819 22:48:55.949805 21584 solver.cpp:404]     Test net output #1: loss = 0.402961 (* 1 = 0.402961 loss)\nI0819 22:48:57.248847 21584 solver.cpp:228] Iteration 51300, loss = 0.157466\nI0819 22:48:57.248893 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0819 22:48:57.248922 21584 solver.cpp:244]     Train net output #1: loss = 0.157465 (* 1 = 0.157465 loss)\nI0819 22:48:57.358770 21584 sgd_solver.cpp:166] Iteration 51300, lr = 1.2825\nI0819 22:51:15.343070 21584 solver.cpp:337] Iteration 51400, Testing net (#0)\nI0819 22:52:37.830371 21584 solver.cpp:404]     Test net output #0: accuracy = 0.876\nI0819 22:52:37.830687 21584 solver.cpp:404]     Test net output #1: loss = 0.439521 (* 1 = 0.439521 loss)\nI0819 22:52:39.130198 21584 solver.cpp:228] Iteration 51400, loss = 0.16267\nI0819 22:52:39.130242 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 22:52:39.130266 21584 solver.cpp:244]     Train net output #1: loss = 0.16267 (* 1 = 0.16267 loss)\nI0819 22:52:39.243731 21584 sgd_solver.cpp:166] Iteration 51400, lr = 1.285\nI0819 22:54:57.301496 21584 solver.cpp:337] Iteration 51500, Testing net (#0)\nI0819 22:56:19.775347 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8866\nI0819 22:56:19.775666 21584 solver.cpp:404]     Test net output #1: loss = 0.399005 (* 1 = 0.399005 loss)\nI0819 22:56:21.074766 21584 solver.cpp:228] Iteration 51500, loss = 0.133248\nI0819 22:56:21.074810 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 22:56:21.074834 21584 solver.cpp:244]     Train net output #1: loss = 0.133248 (* 1 = 0.133248 loss)\nI0819 22:56:21.187134 21584 sgd_solver.cpp:166] Iteration 51500, lr = 1.2875\nI0819 22:58:39.110177 21584 solver.cpp:337] Iteration 51600, Testing net (#0)\nI0819 23:00:01.586791 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8812\nI0819 23:00:01.587296 21584 solver.cpp:404]     Test net output #1: loss = 0.416844 (* 1 = 0.416844 loss)\nI0819 23:00:02.886268 21584 solver.cpp:228] Iteration 51600, loss = 0.109386\nI0819 23:00:02.886310 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 23:00:02.886335 21584 solver.cpp:244]     Train net output #1: loss = 0.109386 (* 1 = 0.109386 loss)\nI0819 23:00:02.993849 21584 sgd_solver.cpp:166] Iteration 51600, lr = 1.29\nI0819 23:02:20.814113 21584 solver.cpp:337] Iteration 51700, Testing net (#0)\nI0819 23:03:43.243281 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88164\nI0819 23:03:43.243594 21584 solver.cpp:404]     Test net output #1: loss = 0.420277 (* 1 = 0.420277 loss)\nI0819 23:03:44.542632 21584 solver.cpp:228] Iteration 51700, loss = 0.103954\nI0819 23:03:44.542677 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:03:44.542701 21584 solver.cpp:244]     Train net output #1: loss = 0.103953 (* 1 = 0.103953 loss)\nI0819 23:03:44.658522 21584 sgd_solver.cpp:166] Iteration 51700, lr = 1.2925\nI0819 23:06:02.620817 21584 solver.cpp:337] Iteration 51800, Testing net (#0)\nI0819 23:07:25.113603 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88376\nI0819 23:07:25.113885 21584 solver.cpp:404]     Test net output #1: loss = 0.409478 (* 1 = 0.409478 loss)\nI0819 23:07:26.413182 21584 solver.cpp:228] Iteration 51800, loss = 0.137716\nI0819 23:07:26.413225 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 23:07:26.413249 21584 solver.cpp:244]     Train net output #1: loss = 0.137716 (* 1 = 0.137716 loss)\nI0819 23:07:26.522063 21584 sgd_solver.cpp:166] Iteration 51800, lr = 1.295\nI0819 23:09:44.615037 21584 solver.cpp:337] Iteration 51900, Testing net (#0)\nI0819 23:11:07.093828 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88316\nI0819 23:11:07.094144 21584 solver.cpp:404]     Test net output #1: loss = 0.400578 (* 1 = 0.400578 loss)\nI0819 23:11:08.393368 21584 solver.cpp:228] Iteration 51900, loss = 0.114333\nI0819 23:11:08.393414 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:11:08.393437 21584 solver.cpp:244]     Train net output #1: loss = 0.114333 (* 1 = 0.114333 loss)\nI0819 23:11:08.507580 21584 sgd_solver.cpp:166] Iteration 51900, lr = 1.2975\nI0819 23:13:26.540602 21584 solver.cpp:337] Iteration 52000, Testing net (#0)\nI0819 23:14:49.014058 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8788\nI0819 23:14:49.014365 21584 solver.cpp:404]     Test net output #1: loss = 0.44423 (* 1 = 0.44423 loss)\nI0819 23:14:50.312574 21584 solver.cpp:228] Iteration 52000, loss = 0.157537\nI0819 23:14:50.312616 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0819 23:14:50.312631 21584 solver.cpp:244]     Train net output #1: loss = 0.157537 (* 1 = 0.157537 loss)\nI0819 23:14:50.430157 21584 sgd_solver.cpp:166] Iteration 52000, lr = 1.3\nI0819 23:17:08.509341 21584 solver.cpp:337] Iteration 52100, Testing net (#0)\nI0819 23:18:30.976513 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88536\nI0819 23:18:30.976800 21584 solver.cpp:404]     Test net output #1: loss = 0.39996 (* 1 = 0.39996 loss)\nI0819 23:18:32.276042 21584 solver.cpp:228] Iteration 52100, loss = 0.16685\nI0819 23:18:32.276082 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 23:18:32.276098 21584 solver.cpp:244]     Train net output #1: loss = 0.16685 (* 1 = 0.16685 loss)\nI0819 23:18:32.388203 21584 sgd_solver.cpp:166] Iteration 52100, lr = 1.3025\nI0819 23:20:50.410203 21584 solver.cpp:337] Iteration 52200, Testing net (#0)\nI0819 23:22:12.853747 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88424\nI0819 23:22:12.854012 21584 solver.cpp:404]     Test net output #1: loss = 0.414685 (* 1 = 0.414685 loss)\nI0819 23:22:14.152323 21584 solver.cpp:228] Iteration 52200, loss = 0.119657\nI0819 23:22:14.152365 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:22:14.152381 21584 solver.cpp:244]     Train net output #1: loss = 0.119657 (* 1 = 0.119657 loss)\nI0819 23:22:14.261564 21584 sgd_solver.cpp:166] Iteration 52200, lr = 1.305\nI0819 23:24:32.245465 21584 solver.cpp:337] Iteration 52300, Testing net (#0)\nI0819 23:25:54.688385 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88016\nI0819 23:25:54.688683 21584 solver.cpp:404]     Test net output #1: loss = 0.425612 (* 1 = 0.425612 loss)\nI0819 23:25:55.987216 21584 solver.cpp:228] Iteration 52300, loss = 0.225319\nI0819 23:25:55.987260 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 23:25:55.987275 21584 solver.cpp:244]     Train net output #1: loss = 0.225319 (* 1 = 0.225319 loss)\nI0819 23:25:56.101862 21584 sgd_solver.cpp:166] Iteration 52300, lr = 1.3075\nI0819 23:28:14.194736 21584 solver.cpp:337] Iteration 52400, Testing net (#0)\nI0819 23:29:36.673163 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8856\nI0819 23:29:36.673446 21584 solver.cpp:404]     Test net output #1: loss = 0.409467 (* 1 = 0.409467 loss)\nI0819 23:29:37.972641 21584 solver.cpp:228] Iteration 52400, loss = 0.174\nI0819 23:29:37.972685 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 23:29:37.972702 21584 solver.cpp:244]     Train net output #1: loss = 0.173999 (* 1 = 0.173999 loss)\nI0819 23:29:38.083767 21584 sgd_solver.cpp:166] Iteration 52400, lr = 1.31\nI0819 23:31:56.199509 21584 solver.cpp:337] Iteration 52500, Testing net (#0)\nI0819 23:33:18.672255 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88236\nI0819 23:33:18.672562 21584 solver.cpp:404]     Test net output #1: loss = 0.410772 (* 1 = 0.410772 loss)\nI0819 23:33:19.971881 21584 solver.cpp:228] Iteration 52500, loss = 0.188936\nI0819 23:33:19.971925 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0819 23:33:19.971941 21584 solver.cpp:244]     Train net output #1: loss = 0.188936 (* 1 = 0.188936 loss)\nI0819 23:33:20.086935 21584 sgd_solver.cpp:166] Iteration 52500, lr = 1.3125\nI0819 23:35:38.095916 21584 solver.cpp:337] Iteration 52600, Testing net (#0)\nI0819 23:37:00.567525 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88564\nI0819 23:37:00.567802 21584 solver.cpp:404]     Test net output #1: loss = 0.401716 (* 1 = 0.401716 loss)\nI0819 23:37:01.867115 21584 solver.cpp:228] Iteration 52600, loss = 0.0739455\nI0819 23:37:01.867158 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:37:01.867173 21584 solver.cpp:244]     Train net output #1: loss = 0.0739453 (* 1 = 0.0739453 loss)\nI0819 23:37:01.974501 21584 sgd_solver.cpp:166] Iteration 52600, lr = 1.315\nI0819 23:39:19.906002 21584 solver.cpp:337] Iteration 52700, Testing net (#0)\nI0819 23:40:42.367607 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0819 23:40:42.367918 21584 solver.cpp:404]     Test net output #1: loss = 0.422645 (* 1 = 0.422645 loss)\nI0819 23:40:43.667855 21584 solver.cpp:228] Iteration 52700, loss = 0.0796651\nI0819 23:40:43.667901 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:40:43.667917 21584 solver.cpp:244]     Train net output #1: loss = 0.0796649 (* 1 = 0.0796649 loss)\nI0819 23:40:43.782630 21584 sgd_solver.cpp:166] Iteration 52700, lr = 1.3175\nI0819 23:43:01.789953 21584 solver.cpp:337] Iteration 52800, Testing net (#0)\nI0819 23:44:24.250371 21584 solver.cpp:404]     Test net output #0: accuracy = 0.890321\nI0819 23:44:24.250655 21584 solver.cpp:404]     Test net output #1: loss = 0.401132 (* 1 = 0.401132 loss)\nI0819 23:44:25.550292 21584 solver.cpp:228] Iteration 52800, loss = 0.105532\nI0819 23:44:25.550335 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 23:44:25.550351 21584 solver.cpp:244]     Train net output #1: loss = 0.105532 (* 1 = 0.105532 loss)\nI0819 23:44:25.658357 21584 sgd_solver.cpp:166] Iteration 52800, lr = 1.32\nI0819 23:46:43.665596 21584 solver.cpp:337] Iteration 52900, Testing net (#0)\nI0819 23:48:06.150023 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88592\nI0819 23:48:06.150360 21584 solver.cpp:404]     Test net output #1: loss = 0.399938 (* 1 = 0.399938 loss)\nI0819 23:48:07.450331 21584 solver.cpp:228] Iteration 52900, loss = 0.111086\nI0819 23:48:07.450376 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0819 23:48:07.450390 21584 solver.cpp:244]     Train net output #1: loss = 0.111086 (* 1 = 0.111086 loss)\nI0819 23:48:07.560400 21584 sgd_solver.cpp:166] Iteration 52900, lr = 1.3225\nI0819 23:50:25.634363 21584 solver.cpp:337] Iteration 53000, Testing net (#0)\nI0819 23:51:48.113888 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88844\nI0819 23:51:48.114241 21584 solver.cpp:404]     Test net output #1: loss = 0.393957 (* 1 = 0.393957 loss)\nI0819 23:51:49.413036 21584 solver.cpp:228] Iteration 53000, loss = 0.114567\nI0819 23:51:49.413080 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:51:49.413096 21584 solver.cpp:244]     Train net output #1: loss = 0.114567 (* 1 = 0.114567 loss)\nI0819 23:51:49.523123 21584 sgd_solver.cpp:166] Iteration 53000, lr = 1.325\nI0819 23:54:07.452105 21584 solver.cpp:337] Iteration 53100, Testing net (#0)\nI0819 23:55:29.935014 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8788\nI0819 23:55:29.935346 21584 solver.cpp:404]     Test net output #1: loss = 0.414311 (* 1 = 0.414311 loss)\nI0819 23:55:31.234479 21584 solver.cpp:228] Iteration 53100, loss = 0.107333\nI0819 23:55:31.234524 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0819 23:55:31.234539 21584 solver.cpp:244]     Train net output #1: loss = 0.107333 (* 1 = 0.107333 loss)\nI0819 23:55:31.349124 21584 sgd_solver.cpp:166] Iteration 53100, lr = 1.3275\nI0819 23:57:49.234472 21584 solver.cpp:337] Iteration 53200, Testing net (#0)\nI0819 23:59:11.719697 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88416\nI0819 23:59:11.720026 21584 solver.cpp:404]     Test net output #1: loss = 0.41745 (* 1 = 0.41745 loss)\nI0819 23:59:13.019351 21584 solver.cpp:228] Iteration 53200, loss = 0.0871266\nI0819 23:59:13.019394 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0819 23:59:13.019410 21584 solver.cpp:244]     Train net output #1: loss = 0.0871264 (* 1 = 0.0871264 loss)\nI0819 23:59:13.130568 21584 sgd_solver.cpp:166] Iteration 53200, lr = 1.33\nI0820 00:01:30.986380 21584 solver.cpp:337] Iteration 53300, Testing net (#0)\nI0820 00:02:53.452989 21584 solver.cpp:404]     Test net output #0: accuracy = 0.884641\nI0820 00:02:53.453312 21584 solver.cpp:404]     Test net output #1: loss = 0.400797 (* 1 = 0.400797 loss)\nI0820 00:02:54.753127 21584 solver.cpp:228] Iteration 53300, loss = 0.0724632\nI0820 00:02:54.753170 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 00:02:54.753186 21584 solver.cpp:244]     Train net output #1: loss = 0.0724629 (* 1 = 0.0724629 loss)\nI0820 00:02:54.865775 21584 sgd_solver.cpp:166] Iteration 53300, lr = 1.3325\nI0820 00:05:12.757699 21584 solver.cpp:337] Iteration 53400, Testing net (#0)\nI0820 00:06:35.236786 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88644\nI0820 00:06:35.237128 21584 solver.cpp:404]     Test net output #1: loss = 0.392344 (* 1 = 0.392344 loss)\nI0820 00:06:36.536103 21584 solver.cpp:228] Iteration 53400, loss = 0.0676743\nI0820 00:06:36.536147 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 00:06:36.536164 21584 solver.cpp:244]     Train net output #1: loss = 0.0676741 (* 1 = 0.0676741 loss)\nI0820 00:06:36.647251 21584 sgd_solver.cpp:166] Iteration 53400, lr = 1.335\nI0820 00:08:54.477162 21584 solver.cpp:337] Iteration 53500, Testing net (#0)\nI0820 00:10:16.962736 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88276\nI0820 00:10:16.963074 21584 solver.cpp:404]     Test net output #1: loss = 0.418577 (* 1 = 0.418577 loss)\nI0820 00:10:18.262508 21584 solver.cpp:228] Iteration 53500, loss = 0.186981\nI0820 00:10:18.262552 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 00:10:18.262568 21584 solver.cpp:244]     Train net output #1: loss = 0.186981 (* 1 = 0.186981 loss)\nI0820 00:10:18.375730 21584 sgd_solver.cpp:166] Iteration 53500, lr = 1.3375\nI0820 00:12:36.187346 21584 solver.cpp:337] Iteration 53600, Testing net (#0)\nI0820 00:13:58.561044 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88492\nI0820 00:13:58.561373 21584 solver.cpp:404]     Test net output #1: loss = 0.425966 (* 1 = 0.425966 loss)\nI0820 00:13:59.860883 21584 solver.cpp:228] Iteration 53600, loss = 0.170416\nI0820 00:13:59.860929 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 00:13:59.860949 21584 solver.cpp:244]     Train net output #1: loss = 0.170416 (* 1 = 0.170416 loss)\nI0820 00:13:59.974530 21584 sgd_solver.cpp:166] Iteration 53600, lr = 1.34\nI0820 00:16:17.900957 21584 solver.cpp:337] Iteration 53700, Testing net (#0)\nI0820 00:17:40.282352 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88912\nI0820 00:17:40.282675 21584 solver.cpp:404]     Test net output #1: loss = 0.373903 (* 1 = 0.373903 loss)\nI0820 00:17:41.581429 21584 solver.cpp:228] Iteration 53700, loss = 0.115423\nI0820 00:17:41.581473 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 00:17:41.581490 21584 solver.cpp:244]     Train net output #1: loss = 0.115423 (* 1 = 0.115423 loss)\nI0820 00:17:41.689463 21584 sgd_solver.cpp:166] Iteration 53700, lr = 1.3425\nI0820 00:19:59.571928 21584 solver.cpp:337] Iteration 53800, Testing net (#0)\nI0820 00:21:21.957381 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88636\nI0820 00:21:21.957715 21584 solver.cpp:404]     Test net output #1: loss = 0.398357 (* 1 = 0.398357 loss)\nI0820 00:21:23.256301 21584 solver.cpp:228] Iteration 53800, loss = 0.152585\nI0820 00:21:23.256345 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 00:21:23.256361 21584 solver.cpp:244]     Train net output #1: loss = 0.152585 (* 1 = 0.152585 loss)\nI0820 00:21:23.369369 21584 sgd_solver.cpp:166] Iteration 53800, lr = 1.345\nI0820 00:23:41.304962 21584 solver.cpp:337] Iteration 53900, Testing net (#0)\nI0820 00:25:03.682337 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0820 00:25:03.682668 21584 solver.cpp:404]     Test net output #1: loss = 0.413727 (* 1 = 0.413727 loss)\nI0820 00:25:04.982231 21584 solver.cpp:228] Iteration 53900, loss = 0.121364\nI0820 00:25:04.982275 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 00:25:04.982291 21584 solver.cpp:244]     Train net output #1: loss = 0.121364 (* 1 = 0.121364 loss)\nI0820 00:25:05.104115 21584 sgd_solver.cpp:166] Iteration 53900, lr = 1.3475\nI0820 00:27:22.939995 21584 solver.cpp:337] Iteration 54000, Testing net (#0)\nI0820 00:28:45.317057 21584 solver.cpp:404]     Test net output #0: accuracy = 0.890681\nI0820 00:28:45.317368 21584 solver.cpp:404]     Test net output #1: loss = 0.374762 (* 1 = 0.374762 loss)\nI0820 00:28:46.615968 21584 solver.cpp:228] Iteration 54000, loss = 0.142946\nI0820 00:28:46.616011 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 00:28:46.616027 21584 solver.cpp:244]     Train net output #1: loss = 0.142945 (* 1 = 0.142945 loss)\nI0820 00:28:46.733737 21584 sgd_solver.cpp:166] Iteration 54000, lr = 1.35\nI0820 00:31:04.616756 21584 solver.cpp:337] Iteration 54100, Testing net (#0)\nI0820 00:32:26.985272 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87032\nI0820 00:32:26.985607 21584 solver.cpp:404]     Test net output #1: loss = 0.458752 (* 1 = 0.458752 loss)\nI0820 00:32:28.285374 21584 solver.cpp:228] Iteration 54100, loss = 0.185916\nI0820 00:32:28.285415 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 00:32:28.285431 21584 solver.cpp:244]     Train net output #1: loss = 0.185916 (* 1 = 0.185916 loss)\nI0820 00:32:28.393370 21584 sgd_solver.cpp:166] Iteration 54100, lr = 1.3525\nI0820 00:34:46.371556 21584 solver.cpp:337] Iteration 54200, Testing net (#0)\nI0820 00:36:08.740249 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0820 00:36:08.740582 21584 solver.cpp:404]     Test net output #1: loss = 0.414205 (* 1 = 0.414205 loss)\nI0820 00:36:10.039810 21584 solver.cpp:228] Iteration 54200, loss = 0.163084\nI0820 00:36:10.039851 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 00:36:10.039867 21584 solver.cpp:244]     Train net output #1: loss = 0.163083 (* 1 = 0.163083 loss)\nI0820 00:36:10.148146 21584 sgd_solver.cpp:166] Iteration 54200, lr = 1.355\nI0820 00:38:27.938400 21584 solver.cpp:337] Iteration 54300, Testing net (#0)\nI0820 00:39:50.308181 21584 solver.cpp:404]     Test net output #0: accuracy = 0.884\nI0820 00:39:50.308517 21584 solver.cpp:404]     Test net output #1: loss = 0.401315 (* 1 = 0.401315 loss)\nI0820 00:39:51.608335 21584 solver.cpp:228] Iteration 54300, loss = 0.101442\nI0820 00:39:51.608374 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 00:39:51.608391 21584 solver.cpp:244]     Train net output #1: loss = 0.101442 (* 1 = 0.101442 loss)\nI0820 00:39:51.715878 21584 sgd_solver.cpp:166] Iteration 54300, lr = 1.3575\nI0820 00:42:09.589994 21584 solver.cpp:337] Iteration 54400, Testing net (#0)\nI0820 00:43:32.450021 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0820 00:43:32.450333 21584 solver.cpp:404]     Test net output #1: loss = 0.412337 (* 1 = 0.412337 loss)\nI0820 00:43:33.759189 21584 solver.cpp:228] Iteration 54400, loss = 0.169529\nI0820 00:43:33.759235 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 00:43:33.759253 21584 solver.cpp:244]     Train net output #1: loss = 0.169529 (* 1 = 0.169529 loss)\nI0820 00:43:33.864640 21584 sgd_solver.cpp:166] Iteration 54400, lr = 1.36\nI0820 00:45:51.763044 21584 solver.cpp:337] Iteration 54500, Testing net (#0)\nI0820 00:47:14.346901 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89044\nI0820 00:47:14.347226 21584 solver.cpp:404]     Test net output #1: loss = 0.392485 (* 1 = 0.392485 loss)\nI0820 00:47:15.650687 21584 solver.cpp:228] Iteration 54500, loss = 0.147625\nI0820 00:47:15.650738 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 00:47:15.650756 21584 solver.cpp:244]     Train net output #1: loss = 0.147625 (* 1 = 0.147625 loss)\nI0820 00:47:15.753897 21584 sgd_solver.cpp:166] Iteration 54500, lr = 1.3625\nI0820 00:49:33.624166 21584 solver.cpp:337] Iteration 54600, Testing net (#0)\nI0820 00:50:56.412729 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88588\nI0820 00:50:56.413053 21584 solver.cpp:404]     Test net output #1: loss = 0.392435 (* 1 = 0.392435 loss)\nI0820 00:50:57.724534 21584 solver.cpp:228] Iteration 54600, loss = 0.105917\nI0820 00:50:57.724797 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 00:50:57.724901 21584 solver.cpp:244]     Train net output #1: loss = 0.105917 (* 1 = 0.105917 loss)\nI0820 00:50:57.821643 21584 sgd_solver.cpp:166] Iteration 54600, lr = 1.365\nI0820 00:53:15.940732 21584 solver.cpp:337] Iteration 54700, Testing net (#0)\nI0820 00:54:39.415927 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0820 00:54:39.416261 21584 solver.cpp:404]     Test net output #1: loss = 0.414355 (* 1 = 0.414355 loss)\nI0820 00:54:40.718941 21584 solver.cpp:228] Iteration 54700, loss = 0.116731\nI0820 00:54:40.718986 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 00:54:40.719002 21584 solver.cpp:244]     Train net output #1: loss = 0.116731 (* 1 = 0.116731 loss)\nI0820 00:54:40.829288 21584 sgd_solver.cpp:166] Iteration 54700, lr = 1.3675\nI0820 00:56:58.889715 21584 solver.cpp:337] Iteration 54800, Testing net (#0)\nI0820 00:58:22.289054 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0820 00:58:22.289412 21584 solver.cpp:404]     Test net output #1: loss = 0.40799 (* 1 = 0.40799 loss)\nI0820 00:58:23.592483 21584 solver.cpp:228] Iteration 54800, loss = 0.218801\nI0820 00:58:23.592527 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 00:58:23.592545 21584 solver.cpp:244]     Train net output #1: loss = 0.218801 (* 1 = 0.218801 loss)\nI0820 00:58:23.698586 21584 sgd_solver.cpp:166] Iteration 54800, lr = 1.37\nI0820 01:00:41.881860 21584 solver.cpp:337] Iteration 54900, Testing net (#0)\nI0820 01:02:05.280421 21584 solver.cpp:404]     Test net output #0: accuracy = 0.881\nI0820 01:02:05.280766 21584 solver.cpp:404]     Test net output #1: loss = 0.413225 (* 1 = 0.413225 loss)\nI0820 01:02:06.584656 21584 solver.cpp:228] Iteration 54900, loss = 0.145886\nI0820 01:02:06.584715 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:02:06.584734 21584 solver.cpp:244]     Train net output #1: loss = 0.145885 (* 1 = 0.145885 loss)\nI0820 01:02:06.694015 21584 sgd_solver.cpp:166] Iteration 54900, lr = 1.3725\nI0820 01:04:24.797837 21584 solver.cpp:337] Iteration 55000, Testing net (#0)\nI0820 01:05:48.219754 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87944\nI0820 01:05:48.220118 21584 solver.cpp:404]     Test net output #1: loss = 0.420589 (* 1 = 0.420589 loss)\nI0820 01:05:49.522861 21584 solver.cpp:228] Iteration 55000, loss = 0.0941247\nI0820 01:05:49.522922 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 01:05:49.522939 21584 solver.cpp:244]     Train net output #1: loss = 0.0941244 (* 1 = 0.0941244 loss)\nI0820 01:05:49.632225 21584 sgd_solver.cpp:166] Iteration 55000, lr = 1.375\nI0820 01:08:07.810158 21584 solver.cpp:337] Iteration 55100, Testing net (#0)\nI0820 01:09:31.227201 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88196\nI0820 01:09:31.227589 21584 solver.cpp:404]     Test net output #1: loss = 0.409647 (* 1 = 0.409647 loss)\nI0820 01:09:32.531584 21584 solver.cpp:228] Iteration 55100, loss = 0.0799494\nI0820 01:09:32.531627 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 01:09:32.531643 21584 solver.cpp:244]     Train net output #1: loss = 0.079949 (* 1 = 0.079949 loss)\nI0820 01:09:32.635800 21584 sgd_solver.cpp:166] Iteration 55100, lr = 1.3775\nI0820 01:11:50.753628 21584 solver.cpp:337] Iteration 55200, Testing net (#0)\nI0820 01:13:14.294353 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8828\nI0820 01:13:14.294749 21584 solver.cpp:404]     Test net output #1: loss = 0.412839 (* 1 = 0.412839 loss)\nI0820 01:13:15.598297 21584 solver.cpp:228] Iteration 55200, loss = 0.0954338\nI0820 01:13:15.598353 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 01:13:15.598378 21584 solver.cpp:244]     Train net output #1: loss = 0.0954335 (* 1 = 0.0954335 loss)\nI0820 01:13:15.705977 21584 sgd_solver.cpp:166] Iteration 55200, lr = 1.38\nI0820 01:15:33.801717 21584 solver.cpp:337] Iteration 55300, Testing net (#0)\nI0820 01:16:57.373148 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88204\nI0820 01:16:57.373518 21584 solver.cpp:404]     Test net output #1: loss = 0.396846 (* 1 = 0.396846 loss)\nI0820 01:16:58.677630 21584 solver.cpp:228] Iteration 55300, loss = 0.169562\nI0820 01:16:58.677688 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:16:58.677713 21584 solver.cpp:244]     Train net output #1: loss = 0.169562 (* 1 = 0.169562 loss)\nI0820 01:16:58.785390 21584 sgd_solver.cpp:166] Iteration 55300, lr = 1.3825\nI0820 01:19:16.891492 21584 solver.cpp:337] Iteration 55400, Testing net (#0)\nI0820 01:20:40.302734 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0820 01:20:40.303098 21584 solver.cpp:404]     Test net output #1: loss = 0.408412 (* 1 = 0.408412 loss)\nI0820 01:20:41.606262 21584 solver.cpp:228] Iteration 55400, loss = 0.126222\nI0820 01:20:41.606312 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:20:41.606336 21584 solver.cpp:244]     Train net output #1: loss = 0.126222 (* 1 = 0.126222 loss)\nI0820 01:20:41.714433 21584 sgd_solver.cpp:166] Iteration 55400, lr = 1.385\nI0820 01:22:59.767060 21584 solver.cpp:337] Iteration 55500, Testing net (#0)\nI0820 01:24:23.251960 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87584\nI0820 01:24:23.252354 21584 solver.cpp:404]     Test net output #1: loss = 0.431393 (* 1 = 0.431393 loss)\nI0820 01:24:24.555549 21584 solver.cpp:228] Iteration 55500, loss = 0.162515\nI0820 01:24:24.555598 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:24:24.555624 21584 solver.cpp:244]     Train net output #1: loss = 0.162514 (* 1 = 0.162514 loss)\nI0820 01:24:24.664170 21584 sgd_solver.cpp:166] Iteration 55500, lr = 1.3875\nI0820 01:26:42.732350 21584 solver.cpp:337] Iteration 55600, Testing net (#0)\nI0820 01:28:06.227054 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0820 01:28:06.227414 21584 solver.cpp:404]     Test net output #1: loss = 0.41312 (* 1 = 0.41312 loss)\nI0820 01:28:07.530375 21584 solver.cpp:228] Iteration 55600, loss = 0.215923\nI0820 01:28:07.530428 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 01:28:07.530453 21584 solver.cpp:244]     Train net output #1: loss = 0.215922 (* 1 = 0.215922 loss)\nI0820 01:28:07.637105 21584 sgd_solver.cpp:166] Iteration 55600, lr = 1.39\nI0820 01:30:25.701083 21584 solver.cpp:337] Iteration 55700, Testing net (#0)\nI0820 01:31:49.228245 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88652\nI0820 01:31:49.228611 21584 solver.cpp:404]     Test net output #1: loss = 0.397845 (* 1 = 0.397845 loss)\nI0820 01:31:50.531209 21584 solver.cpp:228] Iteration 55700, loss = 0.128655\nI0820 01:31:50.531265 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:31:50.531288 21584 solver.cpp:244]     Train net output #1: loss = 0.128655 (* 1 = 0.128655 loss)\nI0820 01:31:50.643369 21584 sgd_solver.cpp:166] Iteration 55700, lr = 1.3925\nI0820 01:34:08.584004 21584 solver.cpp:337] Iteration 55800, Testing net (#0)\nI0820 01:35:31.044912 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88748\nI0820 01:35:31.045188 21584 solver.cpp:404]     Test net output #1: loss = 0.399633 (* 1 = 0.399633 loss)\nI0820 01:35:32.348301 21584 solver.cpp:228] Iteration 55800, loss = 0.144042\nI0820 01:35:32.348336 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:35:32.348351 21584 solver.cpp:244]     Train net output #1: loss = 0.144041 (* 1 = 0.144041 loss)\nI0820 01:35:32.456725 21584 sgd_solver.cpp:166] Iteration 55800, lr = 1.395\nI0820 01:37:50.412369 21584 solver.cpp:337] Iteration 55900, Testing net (#0)\nI0820 01:39:12.866415 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87544\nI0820 01:39:12.866719 21584 solver.cpp:404]     Test net output #1: loss = 0.444948 (* 1 = 0.444948 loss)\nI0820 01:39:14.170342 21584 solver.cpp:228] Iteration 55900, loss = 0.172383\nI0820 01:39:14.170387 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 01:39:14.170403 21584 solver.cpp:244]     Train net output #1: loss = 0.172382 (* 1 = 0.172382 loss)\nI0820 01:39:14.276054 21584 sgd_solver.cpp:166] Iteration 55900, lr = 1.3975\nI0820 01:41:32.177299 21584 solver.cpp:337] Iteration 56000, Testing net (#0)\nI0820 01:42:54.649015 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8836\nI0820 01:42:54.649327 21584 solver.cpp:404]     Test net output #1: loss = 0.406465 (* 1 = 0.406465 loss)\nI0820 01:42:55.952419 21584 solver.cpp:228] Iteration 56000, loss = 0.176274\nI0820 01:42:55.952461 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:42:55.952477 21584 solver.cpp:244]     Train net output #1: loss = 0.176273 (* 1 = 0.176273 loss)\nI0820 01:42:56.059252 21584 sgd_solver.cpp:166] Iteration 56000, lr = 1.4\nI0820 01:45:14.039443 21584 solver.cpp:337] Iteration 56100, Testing net (#0)\nI0820 01:46:36.495913 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89396\nI0820 01:46:36.496227 21584 solver.cpp:404]     Test net output #1: loss = 0.374873 (* 1 = 0.374873 loss)\nI0820 01:46:37.799937 21584 solver.cpp:228] Iteration 56100, loss = 0.0979393\nI0820 01:46:37.799973 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 01:46:37.799988 21584 solver.cpp:244]     Train net output #1: loss = 0.097939 (* 1 = 0.097939 loss)\nI0820 01:46:37.909426 21584 sgd_solver.cpp:166] Iteration 56100, lr = 1.4025\nI0820 01:48:55.895431 21584 solver.cpp:337] Iteration 56200, Testing net (#0)\nI0820 01:50:18.369455 21584 solver.cpp:404]     Test net output #0: accuracy = 0.886361\nI0820 01:50:18.369746 21584 solver.cpp:404]     Test net output #1: loss = 0.397726 (* 1 = 0.397726 loss)\nI0820 01:50:19.672380 21584 solver.cpp:228] Iteration 56200, loss = 0.118818\nI0820 01:50:19.672425 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 01:50:19.672441 21584 solver.cpp:244]     Train net output #1: loss = 0.118818 (* 1 = 0.118818 loss)\nI0820 01:50:19.784116 21584 sgd_solver.cpp:166] Iteration 56200, lr = 1.405\nI0820 01:52:37.795249 21584 solver.cpp:337] Iteration 56300, Testing net (#0)\nI0820 01:54:00.275744 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88404\nI0820 01:54:00.276058 21584 solver.cpp:404]     Test net output #1: loss = 0.399811 (* 1 = 0.399811 loss)\nI0820 01:54:01.578423 21584 solver.cpp:228] Iteration 56300, loss = 0.149765\nI0820 01:54:01.578465 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 01:54:01.578481 21584 solver.cpp:244]     Train net output #1: loss = 0.149765 (* 1 = 0.149765 loss)\nI0820 01:54:01.683248 21584 sgd_solver.cpp:166] Iteration 56300, lr = 1.4075\nI0820 01:56:19.641330 21584 solver.cpp:337] Iteration 56400, Testing net (#0)\nI0820 01:57:42.117877 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0820 01:57:42.118134 21584 solver.cpp:404]     Test net output #1: loss = 0.398598 (* 1 = 0.398598 loss)\nI0820 01:57:43.420198 21584 solver.cpp:228] Iteration 56400, loss = 0.140683\nI0820 01:57:43.420234 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 01:57:43.420249 21584 solver.cpp:244]     Train net output #1: loss = 0.140683 (* 1 = 0.140683 loss)\nI0820 01:57:43.530863 21584 sgd_solver.cpp:166] Iteration 56400, lr = 1.41\nI0820 02:00:01.507863 21584 solver.cpp:337] Iteration 56500, Testing net (#0)\nI0820 02:01:23.980536 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88296\nI0820 02:01:23.980808 21584 solver.cpp:404]     Test net output #1: loss = 0.393453 (* 1 = 0.393453 loss)\nI0820 02:01:25.282788 21584 solver.cpp:228] Iteration 56500, loss = 0.102754\nI0820 02:01:25.282832 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:01:25.282848 21584 solver.cpp:244]     Train net output #1: loss = 0.102754 (* 1 = 0.102754 loss)\nI0820 02:01:25.389117 21584 sgd_solver.cpp:166] Iteration 56500, lr = 1.4125\nI0820 02:03:43.364305 21584 solver.cpp:337] Iteration 56600, Testing net (#0)\nI0820 02:05:05.795532 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89176\nI0820 02:05:05.795843 21584 solver.cpp:404]     Test net output #1: loss = 0.391455 (* 1 = 0.391455 loss)\nI0820 02:05:07.097685 21584 solver.cpp:228] Iteration 56600, loss = 0.145792\nI0820 02:05:07.097728 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:05:07.097748 21584 solver.cpp:244]     Train net output #1: loss = 0.145792 (* 1 = 0.145792 loss)\nI0820 02:05:07.201633 21584 sgd_solver.cpp:166] Iteration 56600, lr = 1.415\nI0820 02:07:25.133980 21584 solver.cpp:337] Iteration 56700, Testing net (#0)\nI0820 02:08:47.548635 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88616\nI0820 02:08:47.548946 21584 solver.cpp:404]     Test net output #1: loss = 0.38837 (* 1 = 0.38837 loss)\nI0820 02:08:48.851030 21584 solver.cpp:228] Iteration 56700, loss = 0.115914\nI0820 02:08:48.851075 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:08:48.851090 21584 solver.cpp:244]     Train net output #1: loss = 0.115913 (* 1 = 0.115913 loss)\nI0820 02:08:48.957839 21584 sgd_solver.cpp:166] Iteration 56700, lr = 1.4175\nI0820 02:11:06.888824 21584 solver.cpp:337] Iteration 56800, Testing net (#0)\nI0820 02:12:29.308012 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88352\nI0820 02:12:29.308320 21584 solver.cpp:404]     Test net output #1: loss = 0.401864 (* 1 = 0.401864 loss)\nI0820 02:12:30.610162 21584 solver.cpp:228] Iteration 56800, loss = 0.0679442\nI0820 02:12:30.610208 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0820 02:12:30.610222 21584 solver.cpp:244]     Train net output #1: loss = 0.0679438 (* 1 = 0.0679438 loss)\nI0820 02:12:30.718260 21584 sgd_solver.cpp:166] Iteration 56800, lr = 1.42\nI0820 02:14:48.715088 21584 solver.cpp:337] Iteration 56900, Testing net (#0)\nI0820 02:16:11.135159 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87996\nI0820 02:16:11.135457 21584 solver.cpp:404]     Test net output #1: loss = 0.403677 (* 1 = 0.403677 loss)\nI0820 02:16:12.436913 21584 solver.cpp:228] Iteration 56900, loss = 0.154917\nI0820 02:16:12.436947 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:16:12.436961 21584 solver.cpp:244]     Train net output #1: loss = 0.154917 (* 1 = 0.154917 loss)\nI0820 02:16:12.550717 21584 sgd_solver.cpp:166] Iteration 56900, lr = 1.4225\nI0820 02:18:30.593029 21584 solver.cpp:337] Iteration 57000, Testing net (#0)\nI0820 02:19:53.009527 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88444\nI0820 02:19:53.009788 21584 solver.cpp:404]     Test net output #1: loss = 0.408966 (* 1 = 0.408966 loss)\nI0820 02:19:54.312458 21584 solver.cpp:228] Iteration 57000, loss = 0.144989\nI0820 02:19:54.312501 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:19:54.312516 21584 solver.cpp:244]     Train net output #1: loss = 0.144989 (* 1 = 0.144989 loss)\nI0820 02:19:54.419256 21584 sgd_solver.cpp:166] Iteration 57000, lr = 1.425\nI0820 02:22:12.326262 21584 solver.cpp:337] Iteration 57100, Testing net (#0)\nI0820 02:23:34.780441 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88576\nI0820 02:23:34.780689 21584 solver.cpp:404]     Test net output #1: loss = 0.39185 (* 1 = 0.39185 loss)\nI0820 02:23:36.083653 21584 solver.cpp:228] Iteration 57100, loss = 0.0604681\nI0820 02:23:36.083703 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 02:23:36.083719 21584 solver.cpp:244]     Train net output #1: loss = 0.0604678 (* 1 = 0.0604678 loss)\nI0820 02:23:36.190948 21584 sgd_solver.cpp:166] Iteration 57100, lr = 1.4275\nI0820 02:25:54.206372 21584 solver.cpp:337] Iteration 57200, Testing net (#0)\nI0820 02:27:16.632509 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8782\nI0820 02:27:16.632787 21584 solver.cpp:404]     Test net output #1: loss = 0.428268 (* 1 = 0.428268 loss)\nI0820 02:27:17.935806 21584 solver.cpp:228] Iteration 57200, loss = 0.159458\nI0820 02:27:17.935839 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:27:17.935854 21584 solver.cpp:244]     Train net output #1: loss = 0.159458 (* 1 = 0.159458 loss)\nI0820 02:27:18.043722 21584 sgd_solver.cpp:166] Iteration 57200, lr = 1.43\nI0820 02:29:36.021541 21584 solver.cpp:337] Iteration 57300, Testing net (#0)\nI0820 02:30:58.374037 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88064\nI0820 02:30:58.374348 21584 solver.cpp:404]     Test net output #1: loss = 0.399686 (* 1 = 0.399686 loss)\nI0820 02:30:59.676992 21584 solver.cpp:228] Iteration 57300, loss = 0.193558\nI0820 02:30:59.677027 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:30:59.677043 21584 solver.cpp:244]     Train net output #1: loss = 0.193558 (* 1 = 0.193558 loss)\nI0820 02:30:59.788419 21584 sgd_solver.cpp:166] Iteration 57300, lr = 1.4325\nI0820 02:33:17.761337 21584 solver.cpp:337] Iteration 57400, Testing net (#0)\nI0820 02:34:40.110492 21584 solver.cpp:404]     Test net output #0: accuracy = 0.881\nI0820 02:34:40.110810 21584 solver.cpp:404]     Test net output #1: loss = 0.413722 (* 1 = 0.413722 loss)\nI0820 02:34:41.414062 21584 solver.cpp:228] Iteration 57400, loss = 0.184922\nI0820 02:34:41.414100 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 02:34:41.414115 21584 solver.cpp:244]     Train net output #1: loss = 0.184921 (* 1 = 0.184921 loss)\nI0820 02:34:41.524725 21584 sgd_solver.cpp:166] Iteration 57400, lr = 1.435\nI0820 02:36:59.454210 21584 solver.cpp:337] Iteration 57500, Testing net (#0)\nI0820 02:38:21.823448 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8866\nI0820 02:38:21.823776 21584 solver.cpp:404]     Test net output #1: loss = 0.390947 (* 1 = 0.390947 loss)\nI0820 02:38:23.126097 21584 solver.cpp:228] Iteration 57500, loss = 0.141154\nI0820 02:38:23.126142 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:38:23.126165 21584 solver.cpp:244]     Train net output #1: loss = 0.141154 (* 1 = 0.141154 loss)\nI0820 02:38:23.231873 21584 sgd_solver.cpp:166] Iteration 57500, lr = 1.4375\nI0820 02:40:41.192894 21584 solver.cpp:337] Iteration 57600, Testing net (#0)\nI0820 02:42:03.579813 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88584\nI0820 02:42:03.580127 21584 solver.cpp:404]     Test net output #1: loss = 0.390243 (* 1 = 0.390243 loss)\nI0820 02:42:04.882750 21584 solver.cpp:228] Iteration 57600, loss = 0.137822\nI0820 02:42:04.882791 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:42:04.882807 21584 solver.cpp:244]     Train net output #1: loss = 0.137822 (* 1 = 0.137822 loss)\nI0820 02:42:04.990937 21584 sgd_solver.cpp:166] Iteration 57600, lr = 1.44\nI0820 02:44:22.946063 21584 solver.cpp:337] Iteration 57700, Testing net (#0)\nI0820 02:45:45.319823 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0820 02:45:45.320086 21584 solver.cpp:404]     Test net output #1: loss = 0.400432 (* 1 = 0.400432 loss)\nI0820 02:45:46.622215 21584 solver.cpp:228] Iteration 57700, loss = 0.153847\nI0820 02:45:46.622256 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 02:45:46.622272 21584 solver.cpp:244]     Train net output #1: loss = 0.153846 (* 1 = 0.153846 loss)\nI0820 02:45:46.732300 21584 sgd_solver.cpp:166] Iteration 57700, lr = 1.4425\nI0820 02:48:04.800696 21584 solver.cpp:337] Iteration 57800, Testing net (#0)\nI0820 02:49:27.134639 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88288\nI0820 02:49:27.134924 21584 solver.cpp:404]     Test net output #1: loss = 0.40492 (* 1 = 0.40492 loss)\nI0820 02:49:28.435995 21584 solver.cpp:228] Iteration 57800, loss = 0.137719\nI0820 02:49:28.436036 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 02:49:28.436053 21584 solver.cpp:244]     Train net output #1: loss = 0.137718 (* 1 = 0.137718 loss)\nI0820 02:49:28.548115 21584 sgd_solver.cpp:166] Iteration 57800, lr = 1.445\nI0820 02:51:46.556761 21584 solver.cpp:337] Iteration 57900, Testing net (#0)\nI0820 02:53:08.889672 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88008\nI0820 02:53:08.890002 21584 solver.cpp:404]     Test net output #1: loss = 0.393172 (* 1 = 0.393172 loss)\nI0820 02:53:10.191996 21584 solver.cpp:228] Iteration 57900, loss = 0.119763\nI0820 02:53:10.192037 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 02:53:10.192054 21584 solver.cpp:244]     Train net output #1: loss = 0.119763 (* 1 = 0.119763 loss)\nI0820 02:53:10.305577 21584 sgd_solver.cpp:166] Iteration 57900, lr = 1.4475\nI0820 02:55:28.175561 21584 solver.cpp:337] Iteration 58000, Testing net (#0)\nI0820 02:56:50.524227 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0820 02:56:50.524547 21584 solver.cpp:404]     Test net output #1: loss = 0.426038 (* 1 = 0.426038 loss)\nI0820 02:56:51.826424 21584 solver.cpp:228] Iteration 58000, loss = 0.130432\nI0820 02:56:51.826465 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 02:56:51.826480 21584 solver.cpp:244]     Train net output #1: loss = 0.130431 (* 1 = 0.130431 loss)\nI0820 02:56:51.937752 21584 sgd_solver.cpp:166] Iteration 58000, lr = 1.45\nI0820 02:59:09.778746 21584 solver.cpp:337] Iteration 58100, Testing net (#0)\nI0820 03:00:32.121639 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88148\nI0820 03:00:32.121963 21584 solver.cpp:404]     Test net output #1: loss = 0.412495 (* 1 = 0.412495 loss)\nI0820 03:00:33.423996 21584 solver.cpp:228] Iteration 58100, loss = 0.267233\nI0820 03:00:33.424037 21584 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 03:00:33.424052 21584 solver.cpp:244]     Train net output #1: loss = 0.267232 (* 1 = 0.267232 loss)\nI0820 03:00:33.529160 21584 sgd_solver.cpp:166] Iteration 58100, lr = 1.4525\nI0820 03:02:51.431396 21584 solver.cpp:337] Iteration 58200, Testing net (#0)\nI0820 03:04:13.882066 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88992\nI0820 03:04:13.882365 21584 solver.cpp:404]     Test net output #1: loss = 0.38141 (* 1 = 0.38141 loss)\nI0820 03:04:15.184211 21584 solver.cpp:228] Iteration 58200, loss = 0.0833689\nI0820 03:04:15.184252 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 03:04:15.184268 21584 solver.cpp:244]     Train net output #1: loss = 0.0833687 (* 1 = 0.0833687 loss)\nI0820 03:04:15.289964 21584 sgd_solver.cpp:166] Iteration 58200, lr = 1.455\nI0820 03:06:33.231446 21584 solver.cpp:337] Iteration 58300, Testing net (#0)\nI0820 03:07:55.703624 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89124\nI0820 03:07:55.703891 21584 solver.cpp:404]     Test net output #1: loss = 0.38009 (* 1 = 0.38009 loss)\nI0820 03:07:57.005594 21584 solver.cpp:228] Iteration 58300, loss = 0.188897\nI0820 03:07:57.005637 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 03:07:57.005655 21584 solver.cpp:244]     Train net output #1: loss = 0.188896 (* 1 = 0.188896 loss)\nI0820 03:07:57.118283 21584 sgd_solver.cpp:166] Iteration 58300, lr = 1.4575\nI0820 03:10:15.097635 21584 solver.cpp:337] Iteration 58400, Testing net (#0)\nI0820 03:11:37.519040 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88492\nI0820 03:11:37.519320 21584 solver.cpp:404]     Test net output #1: loss = 0.400102 (* 1 = 0.400102 loss)\nI0820 03:11:38.821753 21584 solver.cpp:228] Iteration 58400, loss = 0.0899026\nI0820 03:11:38.821797 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 03:11:38.821813 21584 solver.cpp:244]     Train net output #1: loss = 0.0899024 (* 1 = 0.0899024 loss)\nI0820 03:11:38.931540 21584 sgd_solver.cpp:166] Iteration 58400, lr = 1.46\nI0820 03:13:56.967769 21584 solver.cpp:337] Iteration 58500, Testing net (#0)\nI0820 03:15:19.365393 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88692\nI0820 03:15:19.365697 21584 solver.cpp:404]     Test net output #1: loss = 0.406214 (* 1 = 0.406214 loss)\nI0820 03:15:20.667829 21584 solver.cpp:228] Iteration 58500, loss = 0.156866\nI0820 03:15:20.667871 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 03:15:20.667887 21584 solver.cpp:244]     Train net output #1: loss = 0.156865 (* 1 = 0.156865 loss)\nI0820 03:15:20.776821 21584 sgd_solver.cpp:166] Iteration 58500, lr = 1.4625\nI0820 03:17:38.805948 21584 solver.cpp:337] Iteration 58600, Testing net (#0)\nI0820 03:19:01.213074 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87956\nI0820 03:19:01.213397 21584 solver.cpp:404]     Test net output #1: loss = 0.420028 (* 1 = 0.420028 loss)\nI0820 03:19:02.516404 21584 solver.cpp:228] Iteration 58600, loss = 0.100238\nI0820 03:19:02.516445 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 03:19:02.516461 21584 solver.cpp:244]     Train net output #1: loss = 0.100238 (* 1 = 0.100238 loss)\nI0820 03:19:02.625471 21584 sgd_solver.cpp:166] Iteration 58600, lr = 1.465\nI0820 03:21:20.714320 21584 solver.cpp:337] Iteration 58700, Testing net (#0)\nI0820 03:22:43.123049 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88444\nI0820 03:22:43.123368 21584 solver.cpp:404]     Test net output #1: loss = 0.398556 (* 1 = 0.398556 loss)\nI0820 03:22:44.425320 21584 solver.cpp:228] Iteration 58700, loss = 0.244807\nI0820 03:22:44.425364 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 03:22:44.425379 21584 solver.cpp:244]     Train net output #1: loss = 0.244806 (* 1 = 0.244806 loss)\nI0820 03:22:44.536485 21584 sgd_solver.cpp:166] Iteration 58700, lr = 1.4675\nI0820 03:25:02.621140 21584 solver.cpp:337] Iteration 58800, Testing net (#0)\nI0820 03:26:25.030488 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88344\nI0820 03:26:25.030786 21584 solver.cpp:404]     Test net output #1: loss = 0.3992 (* 1 = 0.3992 loss)\nI0820 03:26:26.333132 21584 solver.cpp:228] Iteration 58800, loss = 0.112647\nI0820 03:26:26.333175 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 03:26:26.333191 21584 solver.cpp:244]     Train net output #1: loss = 0.112647 (* 1 = 0.112647 loss)\nI0820 03:26:26.447916 21584 sgd_solver.cpp:166] Iteration 58800, lr = 1.47\nI0820 03:28:44.497028 21584 solver.cpp:337] Iteration 58900, Testing net (#0)\nI0820 03:30:06.950996 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89112\nI0820 03:30:06.951319 21584 solver.cpp:404]     Test net output #1: loss = 0.371773 (* 1 = 0.371773 loss)\nI0820 03:30:08.254524 21584 solver.cpp:228] Iteration 58900, loss = 0.0565908\nI0820 03:30:08.254568 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 03:30:08.254585 21584 solver.cpp:244]     Train net output #1: loss = 0.0565907 (* 1 = 0.0565907 loss)\nI0820 03:30:08.363916 21584 sgd_solver.cpp:166] Iteration 58900, lr = 1.4725\nI0820 03:32:26.307289 21584 solver.cpp:337] Iteration 59000, Testing net (#0)\nI0820 03:33:48.762114 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8768\nI0820 03:33:48.762394 21584 solver.cpp:404]     Test net output #1: loss = 0.414192 (* 1 = 0.414192 loss)\nI0820 03:33:50.065567 21584 solver.cpp:228] Iteration 59000, loss = 0.180175\nI0820 03:33:50.065611 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:33:50.065627 21584 solver.cpp:244]     Train net output #1: loss = 0.180175 (* 1 = 0.180175 loss)\nI0820 03:33:50.172477 21584 sgd_solver.cpp:166] Iteration 59000, lr = 1.475\nI0820 03:36:08.112051 21584 solver.cpp:337] Iteration 59100, Testing net (#0)\nI0820 03:37:30.561875 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89036\nI0820 03:37:30.562187 21584 solver.cpp:404]     Test net output #1: loss = 0.379292 (* 1 = 0.379292 loss)\nI0820 03:37:31.864994 21584 solver.cpp:228] Iteration 59100, loss = 0.0583816\nI0820 03:37:31.865039 21584 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI0820 03:37:31.865056 21584 solver.cpp:244]     Train net output #1: loss = 0.0583814 (* 1 = 0.0583814 loss)\nI0820 03:37:31.987018 21584 sgd_solver.cpp:166] Iteration 59100, lr = 1.4775\nI0820 03:39:50.673413 21584 solver.cpp:337] Iteration 59200, Testing net (#0)\nI0820 03:41:13.129809 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88532\nI0820 03:41:13.130122 21584 solver.cpp:404]     Test net output #1: loss = 0.401047 (* 1 = 0.401047 loss)\nI0820 03:41:14.433284 21584 solver.cpp:228] Iteration 59200, loss = 0.212658\nI0820 03:41:14.433329 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 03:41:14.433346 21584 solver.cpp:244]     Train net output #1: loss = 0.212658 (* 1 = 0.212658 loss)\nI0820 03:41:14.556825 21584 sgd_solver.cpp:166] Iteration 59200, lr = 1.48\nI0820 03:43:33.322767 21584 solver.cpp:337] Iteration 59300, Testing net (#0)\nI0820 03:44:55.777933 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8866\nI0820 03:44:55.778239 21584 solver.cpp:404]     Test net output #1: loss = 0.392572 (* 1 = 0.392572 loss)\nI0820 03:44:57.081077 21584 solver.cpp:228] Iteration 59300, loss = 0.183017\nI0820 03:44:57.081110 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 03:44:57.081125 21584 solver.cpp:244]     Train net output #1: loss = 0.183017 (* 1 = 0.183017 loss)\nI0820 03:44:57.194916 21584 sgd_solver.cpp:166] Iteration 59300, lr = 1.4825\nI0820 03:47:15.845911 21584 solver.cpp:337] Iteration 59400, Testing net (#0)\nI0820 03:48:38.323902 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0820 03:48:38.324229 21584 solver.cpp:404]     Test net output #1: loss = 0.415717 (* 1 = 0.415717 loss)\nI0820 03:48:39.626592 21584 solver.cpp:228] Iteration 59400, loss = 0.207382\nI0820 03:48:39.626637 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 03:48:39.626653 21584 solver.cpp:244]     Train net output #1: loss = 0.207382 (* 1 = 0.207382 loss)\nI0820 03:48:39.746080 21584 sgd_solver.cpp:166] Iteration 59400, lr = 1.485\nI0820 03:50:58.372234 21584 solver.cpp:337] Iteration 59500, Testing net (#0)\nI0820 03:52:20.831527 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8822\nI0820 03:52:20.831837 21584 solver.cpp:404]     Test net output #1: loss = 0.401172 (* 1 = 0.401172 loss)\nI0820 03:52:22.133894 21584 solver.cpp:228] Iteration 59500, loss = 0.199459\nI0820 03:52:22.133925 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 03:52:22.133940 21584 solver.cpp:244]     Train net output #1: loss = 0.199459 (* 1 = 0.199459 loss)\nI0820 03:52:22.250293 21584 sgd_solver.cpp:166] Iteration 59500, lr = 1.4875\nI0820 03:54:40.914116 21584 solver.cpp:337] Iteration 59600, Testing net (#0)\nI0820 03:56:03.384497 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88396\nI0820 03:56:03.384783 21584 solver.cpp:404]     Test net output #1: loss = 0.400822 (* 1 = 0.400822 loss)\nI0820 03:56:04.687363 21584 solver.cpp:228] Iteration 59600, loss = 0.178659\nI0820 03:56:04.687407 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 03:56:04.687423 21584 solver.cpp:244]     Train net output #1: loss = 0.178659 (* 1 = 0.178659 loss)\nI0820 03:56:04.805910 21584 sgd_solver.cpp:166] Iteration 59600, lr = 1.49\nI0820 03:58:23.482638 21584 solver.cpp:337] Iteration 59700, Testing net (#0)\nI0820 03:59:45.954674 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0820 03:59:45.954970 21584 solver.cpp:404]     Test net output #1: loss = 0.423284 (* 1 = 0.423284 loss)\nI0820 03:59:47.257513 21584 solver.cpp:228] Iteration 59700, loss = 0.165676\nI0820 03:59:47.257555 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 03:59:47.257570 21584 solver.cpp:244]     Train net output #1: loss = 0.165676 (* 1 = 0.165676 loss)\nI0820 03:59:47.377198 21584 sgd_solver.cpp:166] Iteration 59700, lr = 1.4925\nI0820 04:02:06.018563 21584 solver.cpp:337] Iteration 59800, Testing net (#0)\nI0820 04:03:28.432739 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88512\nI0820 04:03:28.433027 21584 solver.cpp:404]     Test net output #1: loss = 0.410222 (* 1 = 0.410222 loss)\nI0820 04:03:29.734553 21584 solver.cpp:228] Iteration 59800, loss = 0.14203\nI0820 04:03:29.734597 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:03:29.734613 21584 solver.cpp:244]     Train net output #1: loss = 0.14203 (* 1 = 0.14203 loss)\nI0820 04:03:29.849383 21584 sgd_solver.cpp:166] Iteration 59800, lr = 1.495\nI0820 04:05:48.511605 21584 solver.cpp:337] Iteration 59900, Testing net (#0)\nI0820 04:07:10.924953 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88044\nI0820 04:07:10.925276 21584 solver.cpp:404]     Test net output #1: loss = 0.407786 (* 1 = 0.407786 loss)\nI0820 04:07:12.226830 21584 solver.cpp:228] Iteration 59900, loss = 0.125311\nI0820 04:07:12.226873 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 04:07:12.226889 21584 solver.cpp:244]     Train net output #1: loss = 0.125311 (* 1 = 0.125311 loss)\nI0820 04:07:12.338346 21584 sgd_solver.cpp:166] Iteration 59900, lr = 1.4975\nI0820 04:09:30.978905 21584 solver.cpp:337] Iteration 60000, Testing net (#0)\nI0820 04:10:53.390916 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0820 04:10:53.391196 21584 solver.cpp:404]     Test net output #1: loss = 0.439591 (* 1 = 0.439591 loss)\nI0820 04:10:54.693454 21584 solver.cpp:228] Iteration 60000, loss = 0.156081\nI0820 04:10:54.693498 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:10:54.693514 21584 solver.cpp:244]     Train net output #1: loss = 0.156081 (* 1 = 0.156081 loss)\nI0820 04:10:54.813122 21584 sgd_solver.cpp:166] Iteration 60000, lr = 1.5\nI0820 04:13:13.421871 21584 solver.cpp:337] Iteration 60100, Testing net (#0)\nI0820 04:14:35.832875 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0820 04:14:35.833194 21584 solver.cpp:404]     Test net output #1: loss = 0.402287 (* 1 = 0.402287 loss)\nI0820 04:14:37.134974 21584 solver.cpp:228] Iteration 60100, loss = 0.202971\nI0820 04:14:37.135022 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 04:14:37.135040 21584 solver.cpp:244]     Train net output #1: loss = 0.202971 (* 1 = 0.202971 loss)\nI0820 04:14:37.253679 21584 sgd_solver.cpp:166] Iteration 60100, lr = 1.5025\nI0820 04:16:55.948987 21584 solver.cpp:337] Iteration 60200, Testing net (#0)\nI0820 04:18:18.402470 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0820 04:18:18.402793 21584 solver.cpp:404]     Test net output #1: loss = 0.406877 (* 1 = 0.406877 loss)\nI0820 04:18:19.705139 21584 solver.cpp:228] Iteration 60200, loss = 0.116564\nI0820 04:18:19.705183 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:18:19.705199 21584 solver.cpp:244]     Train net output #1: loss = 0.116564 (* 1 = 0.116564 loss)\nI0820 04:18:19.821476 21584 sgd_solver.cpp:166] Iteration 60200, lr = 1.505\nI0820 04:20:38.366796 21584 solver.cpp:337] Iteration 60300, Testing net (#0)\nI0820 04:22:00.776854 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89296\nI0820 04:22:00.777142 21584 solver.cpp:404]     Test net output #1: loss = 0.366844 (* 1 = 0.366844 loss)\nI0820 04:22:02.078673 21584 solver.cpp:228] Iteration 60300, loss = 0.233386\nI0820 04:22:02.078708 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 04:22:02.078722 21584 solver.cpp:244]     Train net output #1: loss = 0.233386 (* 1 = 0.233386 loss)\nI0820 04:22:02.191887 21584 sgd_solver.cpp:166] Iteration 60300, lr = 1.5075\nI0820 04:24:20.841821 21584 solver.cpp:337] Iteration 60400, Testing net (#0)\nI0820 04:25:43.253338 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88728\nI0820 04:25:43.253598 21584 solver.cpp:404]     Test net output #1: loss = 0.40615 (* 1 = 0.40615 loss)\nI0820 04:25:44.555091 21584 solver.cpp:228] Iteration 60400, loss = 0.150816\nI0820 04:25:44.555135 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 04:25:44.555151 21584 solver.cpp:244]     Train net output #1: loss = 0.150816 (* 1 = 0.150816 loss)\nI0820 04:25:44.666991 21584 sgd_solver.cpp:166] Iteration 60400, lr = 1.51\nI0820 04:28:03.324493 21584 solver.cpp:337] Iteration 60500, Testing net (#0)\nI0820 04:29:25.734380 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88572\nI0820 04:29:25.734686 21584 solver.cpp:404]     Test net output #1: loss = 0.420426 (* 1 = 0.420426 loss)\nI0820 04:29:27.036051 21584 solver.cpp:228] Iteration 60500, loss = 0.126478\nI0820 04:29:27.036094 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 04:29:27.036110 21584 solver.cpp:244]     Train net output #1: loss = 0.126477 (* 1 = 0.126477 loss)\nI0820 04:29:27.154011 21584 sgd_solver.cpp:166] Iteration 60500, lr = 1.5125\nI0820 04:31:45.740658 21584 solver.cpp:337] Iteration 60600, Testing net (#0)\nI0820 04:33:08.153255 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89216\nI0820 04:33:08.153566 21584 solver.cpp:404]     Test net output #1: loss = 0.389657 (* 1 = 0.389657 loss)\nI0820 04:33:09.454859 21584 solver.cpp:228] Iteration 60600, loss = 0.128007\nI0820 04:33:09.454905 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 04:33:09.454921 21584 solver.cpp:244]     Train net output #1: loss = 0.128006 (* 1 = 0.128006 loss)\nI0820 04:33:09.575122 21584 sgd_solver.cpp:166] Iteration 60600, lr = 1.515\nI0820 04:35:28.316792 21584 solver.cpp:337] Iteration 60700, Testing net (#0)\nI0820 04:36:50.728534 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88136\nI0820 04:36:50.728843 21584 solver.cpp:404]     Test net output #1: loss = 0.399567 (* 1 = 0.399567 loss)\nI0820 04:36:52.030015 21584 solver.cpp:228] Iteration 60700, loss = 0.120255\nI0820 04:36:52.030050 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 04:36:52.030066 21584 solver.cpp:244]     Train net output #1: loss = 0.120255 (* 1 = 0.120255 loss)\nI0820 04:36:52.144480 21584 sgd_solver.cpp:166] Iteration 60700, lr = 1.5175\nI0820 04:39:10.745466 21584 solver.cpp:337] Iteration 60800, Testing net (#0)\nI0820 04:40:33.153013 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88096\nI0820 04:40:33.153340 21584 solver.cpp:404]     Test net output #1: loss = 0.406272 (* 1 = 0.406272 loss)\nI0820 04:40:34.454299 21584 solver.cpp:228] Iteration 60800, loss = 0.102684\nI0820 04:40:34.454344 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 04:40:34.454360 21584 solver.cpp:244]     Train net output #1: loss = 0.102684 (* 1 = 0.102684 loss)\nI0820 04:40:34.567793 21584 sgd_solver.cpp:166] Iteration 60800, lr = 1.52\nI0820 04:42:53.175926 21584 solver.cpp:337] Iteration 60900, Testing net (#0)\nI0820 04:44:15.586313 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88016\nI0820 04:44:15.586609 21584 solver.cpp:404]     Test net output #1: loss = 0.422325 (* 1 = 0.422325 loss)\nI0820 04:44:16.888110 21584 solver.cpp:228] Iteration 60900, loss = 0.189684\nI0820 04:44:16.888154 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 04:44:16.888170 21584 solver.cpp:244]     Train net output #1: loss = 0.189684 (* 1 = 0.189684 loss)\nI0820 04:44:17.000769 21584 sgd_solver.cpp:166] Iteration 60900, lr = 1.5225\nI0820 04:46:35.630038 21584 solver.cpp:337] Iteration 61000, Testing net (#0)\nI0820 04:47:57.968796 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88464\nI0820 04:47:57.969079 21584 solver.cpp:404]     Test net output #1: loss = 0.415342 (* 1 = 0.415342 loss)\nI0820 04:47:59.271369 21584 solver.cpp:228] Iteration 61000, loss = 0.137345\nI0820 04:47:59.271414 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 04:47:59.271430 21584 solver.cpp:244]     Train net output #1: loss = 0.137345 (* 1 = 0.137345 loss)\nI0820 04:47:59.389027 21584 sgd_solver.cpp:166] Iteration 61000, lr = 1.525\nI0820 04:50:18.074977 21584 solver.cpp:337] Iteration 61100, Testing net (#0)\nI0820 04:51:40.416746 21584 solver.cpp:404]     Test net output #0: accuracy = 0.880201\nI0820 04:51:40.417023 21584 solver.cpp:404]     Test net output #1: loss = 0.409048 (* 1 = 0.409048 loss)\nI0820 04:51:41.718931 21584 solver.cpp:228] Iteration 61100, loss = 0.159387\nI0820 04:51:41.718974 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 04:51:41.718991 21584 solver.cpp:244]     Train net output #1: loss = 0.159387 (* 1 = 0.159387 loss)\nI0820 04:51:41.835606 21584 sgd_solver.cpp:166] Iteration 61100, lr = 1.5275\nI0820 04:54:00.277534 21584 solver.cpp:337] Iteration 61200, Testing net (#0)\nI0820 04:55:22.616118 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8794\nI0820 04:55:22.616443 21584 solver.cpp:404]     Test net output #1: loss = 0.403838 (* 1 = 0.403838 loss)\nI0820 04:55:23.918259 21584 solver.cpp:228] Iteration 61200, loss = 0.164437\nI0820 04:55:23.918303 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 04:55:23.918318 21584 solver.cpp:244]     Train net output #1: loss = 0.164436 (* 1 = 0.164436 loss)\nI0820 04:55:24.031385 21584 sgd_solver.cpp:166] Iteration 61200, lr = 1.53\nI0820 04:57:42.031054 21584 solver.cpp:337] Iteration 61300, Testing net (#0)\nI0820 04:59:04.368407 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88908\nI0820 04:59:04.368717 21584 solver.cpp:404]     Test net output #1: loss = 0.383142 (* 1 = 0.383142 loss)\nI0820 04:59:05.670670 21584 solver.cpp:228] Iteration 61300, loss = 0.11479\nI0820 04:59:05.670706 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 04:59:05.670720 21584 solver.cpp:244]     Train net output #1: loss = 0.114789 (* 1 = 0.114789 loss)\nI0820 04:59:05.776830 21584 sgd_solver.cpp:166] Iteration 61300, lr = 1.5325\nI0820 05:01:23.785519 21584 solver.cpp:337] Iteration 61400, Testing net (#0)\nI0820 05:02:46.120270 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88268\nI0820 05:02:46.120584 21584 solver.cpp:404]     Test net output #1: loss = 0.402653 (* 1 = 0.402653 loss)\nI0820 05:02:47.423028 21584 solver.cpp:228] Iteration 61400, loss = 0.120742\nI0820 05:02:47.423074 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 05:02:47.423090 21584 solver.cpp:244]     Train net output #1: loss = 0.120742 (* 1 = 0.120742 loss)\nI0820 05:02:47.529013 21584 sgd_solver.cpp:166] Iteration 61400, lr = 1.535\nI0820 05:05:05.561226 21584 solver.cpp:337] Iteration 61500, Testing net (#0)\nI0820 05:06:27.900168 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88792\nI0820 05:06:27.900456 21584 solver.cpp:404]     Test net output #1: loss = 0.389059 (* 1 = 0.389059 loss)\nI0820 05:06:29.202353 21584 solver.cpp:228] Iteration 61500, loss = 0.193975\nI0820 05:06:29.202388 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 05:06:29.202402 21584 solver.cpp:244]     Train net output #1: loss = 0.193974 (* 1 = 0.193974 loss)\nI0820 05:06:29.313129 21584 sgd_solver.cpp:166] Iteration 61500, lr = 1.5375\nI0820 05:08:47.338065 21584 solver.cpp:337] Iteration 61600, Testing net (#0)\nI0820 05:10:09.667676 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88632\nI0820 05:10:09.667994 21584 solver.cpp:404]     Test net output #1: loss = 0.414713 (* 1 = 0.414713 loss)\nI0820 05:10:10.969287 21584 solver.cpp:228] Iteration 61600, loss = 0.141813\nI0820 05:10:10.969321 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 05:10:10.969336 21584 solver.cpp:244]     Train net output #1: loss = 0.141812 (* 1 = 0.141812 loss)\nI0820 05:10:11.076746 21584 sgd_solver.cpp:166] Iteration 61600, lr = 1.54\nI0820 05:12:29.049897 21584 solver.cpp:337] Iteration 61700, Testing net (#0)\nI0820 05:13:51.372761 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88424\nI0820 05:13:51.373008 21584 solver.cpp:404]     Test net output #1: loss = 0.398361 (* 1 = 0.398361 loss)\nI0820 05:13:52.675838 21584 solver.cpp:228] Iteration 61700, loss = 0.150522\nI0820 05:13:52.675881 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 05:13:52.675897 21584 solver.cpp:244]     Train net output #1: loss = 0.150522 (* 1 = 0.150522 loss)\nI0820 05:13:52.785027 21584 sgd_solver.cpp:166] Iteration 61700, lr = 1.5425\nI0820 05:16:10.814661 21584 solver.cpp:337] Iteration 61800, Testing net (#0)\nI0820 05:17:33.145195 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8896\nI0820 05:17:33.145511 21584 solver.cpp:404]     Test net output #1: loss = 0.372934 (* 1 = 0.372934 loss)\nI0820 05:17:34.447136 21584 solver.cpp:228] Iteration 61800, loss = 0.148583\nI0820 05:17:34.447178 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:17:34.447194 21584 solver.cpp:244]     Train net output #1: loss = 0.148583 (* 1 = 0.148583 loss)\nI0820 05:17:34.556803 21584 sgd_solver.cpp:166] Iteration 61800, lr = 1.545\nI0820 05:19:52.555141 21584 solver.cpp:337] Iteration 61900, Testing net (#0)\nI0820 05:21:15.030613 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88384\nI0820 05:21:15.030923 21584 solver.cpp:404]     Test net output #1: loss = 0.3938 (* 1 = 0.3938 loss)\nI0820 05:21:16.333792 21584 solver.cpp:228] Iteration 61900, loss = 0.139306\nI0820 05:21:16.333837 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:21:16.333851 21584 solver.cpp:244]     Train net output #1: loss = 0.139306 (* 1 = 0.139306 loss)\nI0820 05:21:16.444407 21584 sgd_solver.cpp:166] Iteration 61900, lr = 1.5475\nI0820 05:23:34.417568 21584 solver.cpp:337] Iteration 62000, Testing net (#0)\nI0820 05:24:56.860599 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88268\nI0820 05:24:56.860900 21584 solver.cpp:404]     Test net output #1: loss = 0.428861 (* 1 = 0.428861 loss)\nI0820 05:24:58.163079 21584 solver.cpp:228] Iteration 62000, loss = 0.173096\nI0820 05:24:58.163123 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 05:24:58.163139 21584 solver.cpp:244]     Train net output #1: loss = 0.173096 (* 1 = 0.173096 loss)\nI0820 05:24:58.270473 21584 sgd_solver.cpp:166] Iteration 62000, lr = 1.55\nI0820 05:27:16.219914 21584 solver.cpp:337] Iteration 62100, Testing net (#0)\nI0820 05:28:38.653525 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88508\nI0820 05:28:38.653802 21584 solver.cpp:404]     Test net output #1: loss = 0.394393 (* 1 = 0.394393 loss)\nI0820 05:28:39.955555 21584 solver.cpp:228] Iteration 62100, loss = 0.092123\nI0820 05:28:39.955590 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 05:28:39.955606 21584 solver.cpp:244]     Train net output #1: loss = 0.0921228 (* 1 = 0.0921228 loss)\nI0820 05:28:40.061050 21584 sgd_solver.cpp:166] Iteration 62100, lr = 1.5525\nI0820 05:30:58.036229 21584 solver.cpp:337] Iteration 62200, Testing net (#0)\nI0820 05:32:20.446784 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88644\nI0820 05:32:20.447057 21584 solver.cpp:404]     Test net output #1: loss = 0.397415 (* 1 = 0.397415 loss)\nI0820 05:32:21.747730 21584 solver.cpp:228] Iteration 62200, loss = 0.175731\nI0820 05:32:21.747779 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 05:32:21.747797 21584 solver.cpp:244]     Train net output #1: loss = 0.175731 (* 1 = 0.175731 loss)\nI0820 05:32:21.865025 21584 sgd_solver.cpp:166] Iteration 62200, lr = 1.555\nI0820 05:34:39.886214 21584 solver.cpp:337] Iteration 62300, Testing net (#0)\nI0820 05:36:02.309535 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88472\nI0820 05:36:02.309813 21584 solver.cpp:404]     Test net output #1: loss = 0.38065 (* 1 = 0.38065 loss)\nI0820 05:36:03.612556 21584 solver.cpp:228] Iteration 62300, loss = 0.140616\nI0820 05:36:03.612601 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 05:36:03.612617 21584 solver.cpp:244]     Train net output #1: loss = 0.140615 (* 1 = 0.140615 loss)\nI0820 05:36:03.718287 21584 sgd_solver.cpp:166] Iteration 62300, lr = 1.5575\nI0820 05:38:21.693722 21584 solver.cpp:337] Iteration 62400, Testing net (#0)\nI0820 05:39:44.113519 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87644\nI0820 05:39:44.113801 21584 solver.cpp:404]     Test net output #1: loss = 0.452967 (* 1 = 0.452967 loss)\nI0820 05:39:45.415911 21584 solver.cpp:228] Iteration 62400, loss = 0.12873\nI0820 05:39:45.415946 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 05:39:45.415961 21584 solver.cpp:244]     Train net output #1: loss = 0.12873 (* 1 = 0.12873 loss)\nI0820 05:39:45.520731 21584 sgd_solver.cpp:166] Iteration 62400, lr = 1.56\nI0820 05:42:03.601202 21584 solver.cpp:337] Iteration 62500, Testing net (#0)\nI0820 05:43:26.023979 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87468\nI0820 05:43:26.024265 21584 solver.cpp:404]     Test net output #1: loss = 0.44337 (* 1 = 0.44337 loss)\nI0820 05:43:27.327076 21584 solver.cpp:228] Iteration 62500, loss = 0.173585\nI0820 05:43:27.327121 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 05:43:27.327136 21584 solver.cpp:244]     Train net output #1: loss = 0.173585 (* 1 = 0.173585 loss)\nI0820 05:43:27.439250 21584 sgd_solver.cpp:166] Iteration 62500, lr = 1.5625\nI0820 05:45:45.406918 21584 solver.cpp:337] Iteration 62600, Testing net (#0)\nI0820 05:47:07.828075 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88004\nI0820 05:47:07.828394 21584 solver.cpp:404]     Test net output #1: loss = 0.420465 (* 1 = 0.420465 loss)\nI0820 05:47:09.131131 21584 solver.cpp:228] Iteration 62600, loss = 0.22697\nI0820 05:47:09.131165 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 05:47:09.131181 21584 solver.cpp:244]     Train net output #1: loss = 0.22697 (* 1 = 0.22697 loss)\nI0820 05:47:09.242677 21584 sgd_solver.cpp:166] Iteration 62600, lr = 1.565\nI0820 05:49:27.222553 21584 solver.cpp:337] Iteration 62700, Testing net (#0)\nI0820 05:50:49.639066 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88448\nI0820 05:50:49.639391 21584 solver.cpp:404]     Test net output #1: loss = 0.394648 (* 1 = 0.394648 loss)\nI0820 05:50:50.942214 21584 solver.cpp:228] Iteration 62700, loss = 0.130292\nI0820 05:50:50.942258 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 05:50:50.942275 21584 solver.cpp:244]     Train net output #1: loss = 0.130292 (* 1 = 0.130292 loss)\nI0820 05:50:51.052891 21584 sgd_solver.cpp:166] Iteration 62700, lr = 1.5675\nI0820 05:53:09.052512 21584 solver.cpp:337] Iteration 62800, Testing net (#0)\nI0820 05:54:31.514406 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88968\nI0820 05:54:31.514694 21584 solver.cpp:404]     Test net output #1: loss = 0.395603 (* 1 = 0.395603 loss)\nI0820 05:54:32.817493 21584 solver.cpp:228] Iteration 62800, loss = 0.0933268\nI0820 05:54:32.817528 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:54:32.817543 21584 solver.cpp:244]     Train net output #1: loss = 0.0933266 (* 1 = 0.0933266 loss)\nI0820 05:54:32.925190 21584 sgd_solver.cpp:166] Iteration 62800, lr = 1.57\nI0820 05:56:50.910254 21584 solver.cpp:337] Iteration 62900, Testing net (#0)\nI0820 05:58:13.378844 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88532\nI0820 05:58:13.379128 21584 solver.cpp:404]     Test net output #1: loss = 0.386988 (* 1 = 0.386988 loss)\nI0820 05:58:14.681529 21584 solver.cpp:228] Iteration 62900, loss = 0.106415\nI0820 05:58:14.681572 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 05:58:14.681588 21584 solver.cpp:244]     Train net output #1: loss = 0.106415 (* 1 = 0.106415 loss)\nI0820 05:58:14.787369 21584 sgd_solver.cpp:166] Iteration 62900, lr = 1.5725\nI0820 06:00:32.763038 21584 solver.cpp:337] Iteration 63000, Testing net (#0)\nI0820 06:01:55.212944 21584 solver.cpp:404]     Test net output #0: accuracy = 0.883\nI0820 06:01:55.213246 21584 solver.cpp:404]     Test net output #1: loss = 0.393638 (* 1 = 0.393638 loss)\nI0820 06:01:56.515620 21584 solver.cpp:228] Iteration 63000, loss = 0.1304\nI0820 06:01:56.515662 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 06:01:56.515679 21584 solver.cpp:244]     Train net output #1: loss = 0.130399 (* 1 = 0.130399 loss)\nI0820 06:01:56.626741 21584 sgd_solver.cpp:166] Iteration 63000, lr = 1.575\nI0820 06:04:14.575044 21584 solver.cpp:337] Iteration 63100, Testing net (#0)\nI0820 06:05:36.999022 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88316\nI0820 06:05:36.999310 21584 solver.cpp:404]     Test net output #1: loss = 0.411758 (* 1 = 0.411758 loss)\nI0820 06:05:38.300928 21584 solver.cpp:228] Iteration 63100, loss = 0.125405\nI0820 06:05:38.300971 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 06:05:38.300988 21584 solver.cpp:244]     Train net output #1: loss = 0.125404 (* 1 = 0.125404 loss)\nI0820 06:05:38.410792 21584 sgd_solver.cpp:166] Iteration 63100, lr = 1.5775\nI0820 06:07:56.332289 21584 solver.cpp:337] Iteration 63200, Testing net (#0)\nI0820 06:09:18.726505 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88052\nI0820 06:09:18.726786 21584 solver.cpp:404]     Test net output #1: loss = 0.427547 (* 1 = 0.427547 loss)\nI0820 06:09:20.029451 21584 solver.cpp:228] Iteration 63200, loss = 0.160161\nI0820 06:09:20.029484 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 06:09:20.029500 21584 solver.cpp:244]     Train net output #1: loss = 0.16016 (* 1 = 0.16016 loss)\nI0820 06:09:20.142225 21584 sgd_solver.cpp:166] Iteration 63200, lr = 1.58\nI0820 06:11:38.141500 21584 solver.cpp:337] Iteration 63300, Testing net (#0)\nI0820 06:13:00.546545 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0820 06:13:00.546855 21584 solver.cpp:404]     Test net output #1: loss = 0.412091 (* 1 = 0.412091 loss)\nI0820 06:13:01.848968 21584 solver.cpp:228] Iteration 63300, loss = 0.130748\nI0820 06:13:01.849004 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 06:13:01.849019 21584 solver.cpp:244]     Train net output #1: loss = 0.130747 (* 1 = 0.130747 loss)\nI0820 06:13:01.958271 21584 sgd_solver.cpp:166] Iteration 63300, lr = 1.5825\nI0820 06:15:19.895270 21584 solver.cpp:337] Iteration 63400, Testing net (#0)\nI0820 06:16:42.299522 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88844\nI0820 06:16:42.299831 21584 solver.cpp:404]     Test net output #1: loss = 0.388864 (* 1 = 0.388864 loss)\nI0820 06:16:43.601676 21584 solver.cpp:228] Iteration 63400, loss = 0.174004\nI0820 06:16:43.601724 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 06:16:43.601740 21584 solver.cpp:244]     Train net output #1: loss = 0.174004 (* 1 = 0.174004 loss)\nI0820 06:16:43.709746 21584 sgd_solver.cpp:166] Iteration 63400, lr = 1.585\nI0820 06:19:01.680147 21584 solver.cpp:337] Iteration 63500, Testing net (#0)\nI0820 06:20:24.118641 21584 solver.cpp:404]     Test net output #0: accuracy = 0.874\nI0820 06:20:24.118973 21584 solver.cpp:404]     Test net output #1: loss = 0.440277 (* 1 = 0.440277 loss)\nI0820 06:20:25.422099 21584 solver.cpp:228] Iteration 63500, loss = 0.124225\nI0820 06:20:25.422143 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 06:20:25.422160 21584 solver.cpp:244]     Train net output #1: loss = 0.124225 (* 1 = 0.124225 loss)\nI0820 06:20:25.529907 21584 sgd_solver.cpp:166] Iteration 63500, lr = 1.5875\nI0820 06:22:43.618933 21584 solver.cpp:337] Iteration 63600, Testing net (#0)\nI0820 06:24:06.060242 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87416\nI0820 06:24:06.060523 21584 solver.cpp:404]     Test net output #1: loss = 0.417944 (* 1 = 0.417944 loss)\nI0820 06:24:07.363690 21584 solver.cpp:228] Iteration 63600, loss = 0.242901\nI0820 06:24:07.363739 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 06:24:07.363755 21584 solver.cpp:244]     Train net output #1: loss = 0.242901 (* 1 = 0.242901 loss)\nI0820 06:24:07.475658 21584 sgd_solver.cpp:166] Iteration 63600, lr = 1.59\nI0820 06:26:25.514920 21584 solver.cpp:337] Iteration 63700, Testing net (#0)\nI0820 06:27:47.957953 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8876\nI0820 06:27:47.958242 21584 solver.cpp:404]     Test net output #1: loss = 0.372434 (* 1 = 0.372434 loss)\nI0820 06:27:49.260223 21584 solver.cpp:228] Iteration 63700, loss = 0.123654\nI0820 06:27:49.260257 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 06:27:49.260272 21584 solver.cpp:244]     Train net output #1: loss = 0.123654 (* 1 = 0.123654 loss)\nI0820 06:27:49.364992 21584 sgd_solver.cpp:166] Iteration 63700, lr = 1.5925\nI0820 06:30:07.355227 21584 solver.cpp:337] Iteration 63800, Testing net (#0)\nI0820 06:31:29.790951 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86956\nI0820 06:31:29.791265 21584 solver.cpp:404]     Test net output #1: loss = 0.447865 (* 1 = 0.447865 loss)\nI0820 06:31:31.094352 21584 solver.cpp:228] Iteration 63800, loss = 0.222313\nI0820 06:31:31.094396 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 06:31:31.094413 21584 solver.cpp:244]     Train net output #1: loss = 0.222313 (* 1 = 0.222313 loss)\nI0820 06:31:31.204376 21584 sgd_solver.cpp:166] Iteration 63800, lr = 1.595\nI0820 06:33:49.141286 21584 solver.cpp:337] Iteration 63900, Testing net (#0)\nI0820 06:35:11.572854 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87792\nI0820 06:35:11.573137 21584 solver.cpp:404]     Test net output #1: loss = 0.427719 (* 1 = 0.427719 loss)\nI0820 06:35:12.876612 21584 solver.cpp:228] Iteration 63900, loss = 0.224108\nI0820 06:35:12.876654 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 06:35:12.876670 21584 solver.cpp:244]     Train net output #1: loss = 0.224108 (* 1 = 0.224108 loss)\nI0820 06:35:12.985015 21584 sgd_solver.cpp:166] Iteration 63900, lr = 1.5975\nI0820 06:37:30.963816 21584 solver.cpp:337] Iteration 64000, Testing net (#0)\nI0820 06:38:53.396246 21584 solver.cpp:404]     Test net output #0: accuracy = 0.878\nI0820 06:38:53.396524 21584 solver.cpp:404]     Test net output #1: loss = 0.407451 (* 1 = 0.407451 loss)\nI0820 06:38:54.698809 21584 solver.cpp:228] Iteration 64000, loss = 0.104152\nI0820 06:38:54.698853 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 06:38:54.698868 21584 solver.cpp:244]     Train net output #1: loss = 0.104152 (* 1 = 0.104152 loss)\nI0820 06:38:54.802111 21584 sgd_solver.cpp:166] Iteration 64000, lr = 1.6\nI0820 06:41:12.729516 21584 solver.cpp:337] Iteration 64100, Testing net (#0)\nI0820 06:42:35.160593 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88504\nI0820 06:42:35.160881 21584 solver.cpp:404]     Test net output #1: loss = 0.409229 (* 1 = 0.409229 loss)\nI0820 06:42:36.462910 21584 solver.cpp:228] Iteration 64100, loss = 0.1688\nI0820 06:42:36.462949 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 06:42:36.462963 21584 solver.cpp:244]     Train net output #1: loss = 0.1688 (* 1 = 0.1688 loss)\nI0820 06:42:36.568172 21584 sgd_solver.cpp:166] Iteration 64100, lr = 1.6025\nI0820 06:44:54.507736 21584 solver.cpp:337] Iteration 64200, Testing net (#0)\nI0820 06:46:16.913944 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88336\nI0820 06:46:16.914222 21584 solver.cpp:404]     Test net output #1: loss = 0.408936 (* 1 = 0.408936 loss)\nI0820 06:46:18.215745 21584 solver.cpp:228] Iteration 64200, loss = 0.0896944\nI0820 06:46:18.215776 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 06:46:18.215791 21584 solver.cpp:244]     Train net output #1: loss = 0.0896943 (* 1 = 0.0896943 loss)\nI0820 06:46:18.321751 21584 sgd_solver.cpp:166] Iteration 64200, lr = 1.605\nI0820 06:48:36.271139 21584 solver.cpp:337] Iteration 64300, Testing net (#0)\nI0820 06:49:58.676915 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8858\nI0820 06:49:58.677170 21584 solver.cpp:404]     Test net output #1: loss = 0.390805 (* 1 = 0.390805 loss)\nI0820 06:49:59.978186 21584 solver.cpp:228] Iteration 64300, loss = 0.208429\nI0820 06:49:59.978227 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 06:49:59.978242 21584 solver.cpp:244]     Train net output #1: loss = 0.208429 (* 1 = 0.208429 loss)\nI0820 06:50:00.085041 21584 sgd_solver.cpp:166] Iteration 64300, lr = 1.6075\nI0820 06:52:18.054322 21584 solver.cpp:337] Iteration 64400, Testing net (#0)\nI0820 06:53:40.481127 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87992\nI0820 06:53:40.481375 21584 solver.cpp:404]     Test net output #1: loss = 0.417396 (* 1 = 0.417396 loss)\nI0820 06:53:41.783188 21584 solver.cpp:228] Iteration 64400, loss = 0.139701\nI0820 06:53:41.783224 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 06:53:41.783239 21584 solver.cpp:244]     Train net output #1: loss = 0.139701 (* 1 = 0.139701 loss)\nI0820 06:53:41.896085 21584 sgd_solver.cpp:166] Iteration 64400, lr = 1.61\nI0820 06:55:59.820925 21584 solver.cpp:337] Iteration 64500, Testing net (#0)\nI0820 06:57:22.253046 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8842\nI0820 06:57:22.253347 21584 solver.cpp:404]     Test net output #1: loss = 0.3847 (* 1 = 0.3847 loss)\nI0820 06:57:23.554931 21584 solver.cpp:228] Iteration 64500, loss = 0.14057\nI0820 06:57:23.554970 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 06:57:23.554986 21584 solver.cpp:244]     Train net output #1: loss = 0.14057 (* 1 = 0.14057 loss)\nI0820 06:57:23.666141 21584 sgd_solver.cpp:166] Iteration 64500, lr = 1.6125\nI0820 06:59:41.577167 21584 solver.cpp:337] Iteration 64600, Testing net (#0)\nI0820 07:01:04.007645 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8854\nI0820 07:01:04.007977 21584 solver.cpp:404]     Test net output #1: loss = 0.406416 (* 1 = 0.406416 loss)\nI0820 07:01:05.309412 21584 solver.cpp:228] Iteration 64600, loss = 0.211745\nI0820 07:01:05.309450 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 07:01:05.309466 21584 solver.cpp:244]     Train net output #1: loss = 0.211745 (* 1 = 0.211745 loss)\nI0820 07:01:05.420439 21584 sgd_solver.cpp:166] Iteration 64600, lr = 1.615\nI0820 07:03:23.361829 21584 solver.cpp:337] Iteration 64700, Testing net (#0)\nI0820 07:04:45.646559 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88596\nI0820 07:04:45.646873 21584 solver.cpp:404]     Test net output #1: loss = 0.382713 (* 1 = 0.382713 loss)\nI0820 07:04:46.949012 21584 solver.cpp:228] Iteration 64700, loss = 0.172701\nI0820 07:04:46.949049 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 07:04:46.949065 21584 solver.cpp:244]     Train net output #1: loss = 0.172701 (* 1 = 0.172701 loss)\nI0820 07:04:47.063722 21584 sgd_solver.cpp:166] Iteration 64700, lr = 1.6175\nI0820 07:07:04.986001 21584 solver.cpp:337] Iteration 64800, Testing net (#0)\nI0820 07:08:27.292238 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87344\nI0820 07:08:27.292520 21584 solver.cpp:404]     Test net output #1: loss = 0.443297 (* 1 = 0.443297 loss)\nI0820 07:08:28.594575 21584 solver.cpp:228] Iteration 64800, loss = 0.123095\nI0820 07:08:28.594615 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 07:08:28.594631 21584 solver.cpp:244]     Train net output #1: loss = 0.123095 (* 1 = 0.123095 loss)\nI0820 07:08:28.702498 21584 sgd_solver.cpp:166] Iteration 64800, lr = 1.62\nI0820 07:10:46.562629 21584 solver.cpp:337] Iteration 64900, Testing net (#0)\nI0820 07:12:08.873180 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88452\nI0820 07:12:08.873478 21584 solver.cpp:404]     Test net output #1: loss = 0.408244 (* 1 = 0.408244 loss)\nI0820 07:12:10.175554 21584 solver.cpp:228] Iteration 64900, loss = 0.249702\nI0820 07:12:10.175593 21584 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI0820 07:12:10.175609 21584 solver.cpp:244]     Train net output #1: loss = 0.249702 (* 1 = 0.249702 loss)\nI0820 07:12:10.283031 21584 sgd_solver.cpp:166] Iteration 64900, lr = 1.6225\nI0820 07:14:28.168397 21584 solver.cpp:337] Iteration 65000, Testing net (#0)\nI0820 07:15:50.457180 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88288\nI0820 07:15:50.457432 21584 solver.cpp:404]     Test net output #1: loss = 0.389427 (* 1 = 0.389427 loss)\nI0820 07:15:51.759218 21584 solver.cpp:228] Iteration 65000, loss = 0.190182\nI0820 07:15:51.759258 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 07:15:51.759274 21584 solver.cpp:244]     Train net output #1: loss = 0.190182 (* 1 = 0.190182 loss)\nI0820 07:15:51.868465 21584 sgd_solver.cpp:166] Iteration 65000, lr = 1.625\nI0820 07:18:09.748592 21584 solver.cpp:337] Iteration 65100, Testing net (#0)\nI0820 07:19:32.039994 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88356\nI0820 07:19:32.040309 21584 solver.cpp:404]     Test net output #1: loss = 0.389019 (* 1 = 0.389019 loss)\nI0820 07:19:33.341953 21584 solver.cpp:228] Iteration 65100, loss = 0.183687\nI0820 07:19:33.341994 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 07:19:33.342010 21584 solver.cpp:244]     Train net output #1: loss = 0.183686 (* 1 = 0.183686 loss)\nI0820 07:19:33.449926 21584 sgd_solver.cpp:166] Iteration 65100, lr = 1.6275\nI0820 07:21:51.335283 21584 solver.cpp:337] Iteration 65200, Testing net (#0)\nI0820 07:23:13.617269 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87596\nI0820 07:23:13.617554 21584 solver.cpp:404]     Test net output #1: loss = 0.435573 (* 1 = 0.435573 loss)\nI0820 07:23:14.918874 21584 solver.cpp:228] Iteration 65200, loss = 0.262196\nI0820 07:23:14.918915 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 07:23:14.918929 21584 solver.cpp:244]     Train net output #1: loss = 0.262196 (* 1 = 0.262196 loss)\nI0820 07:23:15.029234 21584 sgd_solver.cpp:166] Iteration 65200, lr = 1.63\nI0820 07:25:33.017613 21584 solver.cpp:337] Iteration 65300, Testing net (#0)\nI0820 07:26:55.295755 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88812\nI0820 07:26:55.296075 21584 solver.cpp:404]     Test net output #1: loss = 0.396281 (* 1 = 0.396281 loss)\nI0820 07:26:56.596864 21584 solver.cpp:228] Iteration 65300, loss = 0.100563\nI0820 07:26:56.596902 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 07:26:56.596918 21584 solver.cpp:244]     Train net output #1: loss = 0.100563 (* 1 = 0.100563 loss)\nI0820 07:26:56.711177 21584 sgd_solver.cpp:166] Iteration 65300, lr = 1.6325\nI0820 07:29:14.630146 21584 solver.cpp:337] Iteration 65400, Testing net (#0)\nI0820 07:30:36.898121 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8762\nI0820 07:30:36.898461 21584 solver.cpp:404]     Test net output #1: loss = 0.418019 (* 1 = 0.418019 loss)\nI0820 07:30:38.199703 21584 solver.cpp:228] Iteration 65400, loss = 0.133619\nI0820 07:30:38.199748 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 07:30:38.199764 21584 solver.cpp:244]     Train net output #1: loss = 0.133619 (* 1 = 0.133619 loss)\nI0820 07:30:38.317966 21584 sgd_solver.cpp:166] Iteration 65400, lr = 1.635\nI0820 07:32:56.307348 21584 solver.cpp:337] Iteration 65500, Testing net (#0)\nI0820 07:34:18.586904 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88092\nI0820 07:34:18.587193 21584 solver.cpp:404]     Test net output #1: loss = 0.401276 (* 1 = 0.401276 loss)\nI0820 07:34:19.888721 21584 solver.cpp:228] Iteration 65500, loss = 0.158058\nI0820 07:34:19.888766 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 07:34:19.888782 21584 solver.cpp:244]     Train net output #1: loss = 0.158057 (* 1 = 0.158057 loss)\nI0820 07:34:19.997637 21584 sgd_solver.cpp:166] Iteration 65500, lr = 1.6375\nI0820 07:36:38.095578 21584 solver.cpp:337] Iteration 65600, Testing net (#0)\nI0820 07:38:00.494199 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8796\nI0820 07:38:00.494437 21584 solver.cpp:404]     Test net output #1: loss = 0.422135 (* 1 = 0.422135 loss)\nI0820 07:38:01.796164 21584 solver.cpp:228] Iteration 65600, loss = 0.142043\nI0820 07:38:01.796206 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 07:38:01.796221 21584 solver.cpp:244]     Train net output #1: loss = 0.142043 (* 1 = 0.142043 loss)\nI0820 07:38:01.906883 21584 sgd_solver.cpp:166] Iteration 65600, lr = 1.64\nI0820 07:40:19.956431 21584 solver.cpp:337] Iteration 65700, Testing net (#0)\nI0820 07:41:42.419602 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88012\nI0820 07:41:42.419893 21584 solver.cpp:404]     Test net output #1: loss = 0.391971 (* 1 = 0.391971 loss)\nI0820 07:41:43.722352 21584 solver.cpp:228] Iteration 65700, loss = 0.140484\nI0820 07:41:43.722395 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 07:41:43.722411 21584 solver.cpp:244]     Train net output #1: loss = 0.140483 (* 1 = 0.140483 loss)\nI0820 07:41:43.829793 21584 sgd_solver.cpp:166] Iteration 65700, lr = 1.6425\nI0820 07:44:01.848417 21584 solver.cpp:337] Iteration 65800, Testing net (#0)\nI0820 07:45:24.310979 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88168\nI0820 07:45:24.311314 21584 solver.cpp:404]     Test net output #1: loss = 0.426403 (* 1 = 0.426403 loss)\nI0820 07:45:25.614892 21584 solver.cpp:228] Iteration 65800, loss = 0.103042\nI0820 07:45:25.614926 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:45:25.614941 21584 solver.cpp:244]     Train net output #1: loss = 0.103042 (* 1 = 0.103042 loss)\nI0820 07:45:25.723964 21584 sgd_solver.cpp:166] Iteration 65800, lr = 1.645\nI0820 07:47:43.724123 21584 solver.cpp:337] Iteration 65900, Testing net (#0)\nI0820 07:49:06.211804 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0820 07:49:06.212141 21584 solver.cpp:404]     Test net output #1: loss = 0.405292 (* 1 = 0.405292 loss)\nI0820 07:49:07.514196 21584 solver.cpp:228] Iteration 65900, loss = 0.118011\nI0820 07:49:07.514238 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 07:49:07.514255 21584 solver.cpp:244]     Train net output #1: loss = 0.118011 (* 1 = 0.118011 loss)\nI0820 07:49:07.625139 21584 sgd_solver.cpp:166] Iteration 65900, lr = 1.6475\nI0820 07:51:25.528409 21584 solver.cpp:337] Iteration 66000, Testing net (#0)\nI0820 07:52:48.012578 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88324\nI0820 07:52:48.012923 21584 solver.cpp:404]     Test net output #1: loss = 0.402513 (* 1 = 0.402513 loss)\nI0820 07:52:49.315482 21584 solver.cpp:228] Iteration 66000, loss = 0.105828\nI0820 07:52:49.315516 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 07:52:49.315531 21584 solver.cpp:244]     Train net output #1: loss = 0.105827 (* 1 = 0.105827 loss)\nI0820 07:52:49.425171 21584 sgd_solver.cpp:166] Iteration 66000, lr = 1.65\nI0820 07:55:07.446756 21584 solver.cpp:337] Iteration 66100, Testing net (#0)\nI0820 07:56:29.933305 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88296\nI0820 07:56:29.933625 21584 solver.cpp:404]     Test net output #1: loss = 0.399731 (* 1 = 0.399731 loss)\nI0820 07:56:31.236239 21584 solver.cpp:228] Iteration 66100, loss = 0.173479\nI0820 07:56:31.236281 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 07:56:31.236297 21584 solver.cpp:244]     Train net output #1: loss = 0.173479 (* 1 = 0.173479 loss)\nI0820 07:56:31.344998 21584 sgd_solver.cpp:166] Iteration 66100, lr = 1.6525\nI0820 07:58:49.401103 21584 solver.cpp:337] Iteration 66200, Testing net (#0)\nI0820 08:00:11.891806 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88328\nI0820 08:00:11.892135 21584 solver.cpp:404]     Test net output #1: loss = 0.394817 (* 1 = 0.394817 loss)\nI0820 08:00:13.195757 21584 solver.cpp:228] Iteration 66200, loss = 0.199283\nI0820 08:00:13.195791 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 08:00:13.195806 21584 solver.cpp:244]     Train net output #1: loss = 0.199283 (* 1 = 0.199283 loss)\nI0820 08:00:13.302222 21584 sgd_solver.cpp:166] Iteration 66200, lr = 1.655\nI0820 08:02:31.345715 21584 solver.cpp:337] Iteration 66300, Testing net (#0)\nI0820 08:03:53.763555 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88028\nI0820 08:03:53.763839 21584 solver.cpp:404]     Test net output #1: loss = 0.401231 (* 1 = 0.401231 loss)\nI0820 08:03:55.066256 21584 solver.cpp:228] Iteration 66300, loss = 0.14066\nI0820 08:03:55.066296 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 08:03:55.066313 21584 solver.cpp:244]     Train net output #1: loss = 0.140659 (* 1 = 0.140659 loss)\nI0820 08:03:55.176784 21584 sgd_solver.cpp:166] Iteration 66300, lr = 1.6575\nI0820 08:06:13.216948 21584 solver.cpp:337] Iteration 66400, Testing net (#0)\nI0820 08:07:35.667012 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88552\nI0820 08:07:35.667453 21584 solver.cpp:404]     Test net output #1: loss = 0.394734 (* 1 = 0.394734 loss)\nI0820 08:07:36.969758 21584 solver.cpp:228] Iteration 66400, loss = 0.155089\nI0820 08:07:36.969796 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:07:36.969811 21584 solver.cpp:244]     Train net output #1: loss = 0.155088 (* 1 = 0.155088 loss)\nI0820 08:07:37.076025 21584 sgd_solver.cpp:166] Iteration 66400, lr = 1.66\nI0820 08:09:55.154016 21584 solver.cpp:337] Iteration 66500, Testing net (#0)\nI0820 08:11:17.598155 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8868\nI0820 08:11:17.598469 21584 solver.cpp:404]     Test net output #1: loss = 0.373708 (* 1 = 0.373708 loss)\nI0820 08:11:18.902982 21584 solver.cpp:228] Iteration 66500, loss = 0.0727128\nI0820 08:11:18.903025 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 08:11:18.903041 21584 solver.cpp:244]     Train net output #1: loss = 0.0727127 (* 1 = 0.0727127 loss)\nI0820 08:11:19.004823 21584 sgd_solver.cpp:166] Iteration 66500, lr = 1.6625\nI0820 08:13:36.896507 21584 solver.cpp:337] Iteration 66600, Testing net (#0)\nI0820 08:14:59.336954 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88408\nI0820 08:14:59.337272 21584 solver.cpp:404]     Test net output #1: loss = 0.395141 (* 1 = 0.395141 loss)\nI0820 08:15:00.639401 21584 solver.cpp:228] Iteration 66600, loss = 0.0912545\nI0820 08:15:00.639444 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 08:15:00.639469 21584 solver.cpp:244]     Train net output #1: loss = 0.0912544 (* 1 = 0.0912544 loss)\nI0820 08:15:00.745839 21584 sgd_solver.cpp:166] Iteration 66600, lr = 1.665\nI0820 08:17:18.689188 21584 solver.cpp:337] Iteration 66700, Testing net (#0)\nI0820 08:18:41.124498 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89056\nI0820 08:18:41.124837 21584 solver.cpp:404]     Test net output #1: loss = 0.372417 (* 1 = 0.372417 loss)\nI0820 08:18:42.426596 21584 solver.cpp:228] Iteration 66700, loss = 0.211664\nI0820 08:18:42.426638 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 08:18:42.426662 21584 solver.cpp:244]     Train net output #1: loss = 0.211664 (* 1 = 0.211664 loss)\nI0820 08:18:42.533480 21584 sgd_solver.cpp:166] Iteration 66700, lr = 1.6675\nI0820 08:21:00.543596 21584 solver.cpp:337] Iteration 66800, Testing net (#0)\nI0820 08:22:23.015249 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0820 08:22:23.015580 21584 solver.cpp:404]     Test net output #1: loss = 0.406169 (* 1 = 0.406169 loss)\nI0820 08:22:24.318379 21584 solver.cpp:228] Iteration 66800, loss = 0.202889\nI0820 08:22:24.318424 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 08:22:24.318449 21584 solver.cpp:244]     Train net output #1: loss = 0.202889 (* 1 = 0.202889 loss)\nI0820 08:22:24.425696 21584 sgd_solver.cpp:166] Iteration 66800, lr = 1.67\nI0820 08:24:42.420339 21584 solver.cpp:337] Iteration 66900, Testing net (#0)\nI0820 08:26:04.878841 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8832\nI0820 08:26:04.879153 21584 solver.cpp:404]     Test net output #1: loss = 0.399358 (* 1 = 0.399358 loss)\nI0820 08:26:06.182171 21584 solver.cpp:228] Iteration 66900, loss = 0.139316\nI0820 08:26:06.182217 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 08:26:06.182241 21584 solver.cpp:244]     Train net output #1: loss = 0.139316 (* 1 = 0.139316 loss)\nI0820 08:26:06.289458 21584 sgd_solver.cpp:166] Iteration 66900, lr = 1.6725\nI0820 08:28:24.435266 21584 solver.cpp:337] Iteration 67000, Testing net (#0)\nI0820 08:29:46.889624 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8826\nI0820 08:29:46.889963 21584 solver.cpp:404]     Test net output #1: loss = 0.40563 (* 1 = 0.40563 loss)\nI0820 08:29:48.192173 21584 solver.cpp:228] Iteration 67000, loss = 0.267177\nI0820 08:29:48.192219 21584 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 08:29:48.192243 21584 solver.cpp:244]     Train net output #1: loss = 0.267177 (* 1 = 0.267177 loss)\nI0820 08:29:48.305497 21584 sgd_solver.cpp:166] Iteration 67000, lr = 1.675\nI0820 08:32:06.347656 21584 solver.cpp:337] Iteration 67100, Testing net (#0)\nI0820 08:33:28.795002 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87688\nI0820 08:33:28.795289 21584 solver.cpp:404]     Test net output #1: loss = 0.424805 (* 1 = 0.424805 loss)\nI0820 08:33:30.097487 21584 solver.cpp:228] Iteration 67100, loss = 0.161909\nI0820 08:33:30.097532 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 08:33:30.097556 21584 solver.cpp:244]     Train net output #1: loss = 0.161909 (* 1 = 0.161909 loss)\nI0820 08:33:30.205670 21584 sgd_solver.cpp:166] Iteration 67100, lr = 1.6775\nI0820 08:35:48.132568 21584 solver.cpp:337] Iteration 67200, Testing net (#0)\nI0820 08:37:10.584007 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88872\nI0820 08:37:10.584331 21584 solver.cpp:404]     Test net output #1: loss = 0.38917 (* 1 = 0.38917 loss)\nI0820 08:37:11.886531 21584 solver.cpp:228] Iteration 67200, loss = 0.160435\nI0820 08:37:11.886575 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:37:11.886600 21584 solver.cpp:244]     Train net output #1: loss = 0.160435 (* 1 = 0.160435 loss)\nI0820 08:37:11.994441 21584 sgd_solver.cpp:166] Iteration 67200, lr = 1.68\nI0820 08:39:29.992324 21584 solver.cpp:337] Iteration 67300, Testing net (#0)\nI0820 08:40:52.438958 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88608\nI0820 08:40:52.439296 21584 solver.cpp:404]     Test net output #1: loss = 0.379261 (* 1 = 0.379261 loss)\nI0820 08:40:53.742027 21584 solver.cpp:228] Iteration 67300, loss = 0.167496\nI0820 08:40:53.742072 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 08:40:53.742096 21584 solver.cpp:244]     Train net output #1: loss = 0.167496 (* 1 = 0.167496 loss)\nI0820 08:40:53.853623 21584 sgd_solver.cpp:166] Iteration 67300, lr = 1.6825\nI0820 08:43:11.867856 21584 solver.cpp:337] Iteration 67400, Testing net (#0)\nI0820 08:44:34.319445 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88624\nI0820 08:44:34.319748 21584 solver.cpp:404]     Test net output #1: loss = 0.383397 (* 1 = 0.383397 loss)\nI0820 08:44:35.622826 21584 solver.cpp:228] Iteration 67400, loss = 0.0931444\nI0820 08:44:35.622874 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 08:44:35.622898 21584 solver.cpp:244]     Train net output #1: loss = 0.0931444 (* 1 = 0.0931444 loss)\nI0820 08:44:35.729820 21584 sgd_solver.cpp:166] Iteration 67400, lr = 1.685\nI0820 08:46:53.741387 21584 solver.cpp:337] Iteration 67500, Testing net (#0)\nI0820 08:48:16.188410 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8806\nI0820 08:48:16.188719 21584 solver.cpp:404]     Test net output #1: loss = 0.398441 (* 1 = 0.398441 loss)\nI0820 08:48:17.491580 21584 solver.cpp:228] Iteration 67500, loss = 0.17646\nI0820 08:48:17.491616 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 08:48:17.491641 21584 solver.cpp:244]     Train net output #1: loss = 0.17646 (* 1 = 0.17646 loss)\nI0820 08:48:17.598856 21584 sgd_solver.cpp:166] Iteration 67500, lr = 1.6875\nI0820 08:50:35.540613 21584 solver.cpp:337] Iteration 67600, Testing net (#0)\nI0820 08:51:57.956270 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87536\nI0820 08:51:57.956596 21584 solver.cpp:404]     Test net output #1: loss = 0.429159 (* 1 = 0.429159 loss)\nI0820 08:51:59.259305 21584 solver.cpp:228] Iteration 67600, loss = 0.185511\nI0820 08:51:59.259349 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 08:51:59.259372 21584 solver.cpp:244]     Train net output #1: loss = 0.185511 (* 1 = 0.185511 loss)\nI0820 08:51:59.368155 21584 sgd_solver.cpp:166] Iteration 67600, lr = 1.69\nI0820 08:54:17.353543 21584 solver.cpp:337] Iteration 67700, Testing net (#0)\nI0820 08:55:39.772265 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0820 08:55:39.772603 21584 solver.cpp:404]     Test net output #1: loss = 0.396136 (* 1 = 0.396136 loss)\nI0820 08:55:41.074177 21584 solver.cpp:228] Iteration 67700, loss = 0.0983299\nI0820 08:55:41.074214 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 08:55:41.074237 21584 solver.cpp:244]     Train net output #1: loss = 0.09833 (* 1 = 0.09833 loss)\nI0820 08:55:41.179111 21584 sgd_solver.cpp:166] Iteration 67700, lr = 1.6925\nI0820 08:57:59.137347 21584 solver.cpp:337] Iteration 67800, Testing net (#0)\nI0820 08:59:21.556308 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87872\nI0820 08:59:21.556640 21584 solver.cpp:404]     Test net output #1: loss = 0.41423 (* 1 = 0.41423 loss)\nI0820 08:59:22.858271 21584 solver.cpp:228] Iteration 67800, loss = 0.100266\nI0820 08:59:22.858315 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 08:59:22.858341 21584 solver.cpp:244]     Train net output #1: loss = 0.100266 (* 1 = 0.100266 loss)\nI0820 08:59:22.973834 21584 sgd_solver.cpp:166] Iteration 67800, lr = 1.695\nI0820 09:01:40.945586 21584 solver.cpp:337] Iteration 67900, Testing net (#0)\nI0820 09:03:03.364220 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87888\nI0820 09:03:03.364527 21584 solver.cpp:404]     Test net output #1: loss = 0.42055 (* 1 = 0.42055 loss)\nI0820 09:03:04.665778 21584 solver.cpp:228] Iteration 67900, loss = 0.145741\nI0820 09:03:04.665822 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:03:04.665846 21584 solver.cpp:244]     Train net output #1: loss = 0.145741 (* 1 = 0.145741 loss)\nI0820 09:03:04.778197 21584 sgd_solver.cpp:166] Iteration 67900, lr = 1.6975\nI0820 09:05:22.789599 21584 solver.cpp:337] Iteration 68000, Testing net (#0)\nI0820 09:06:45.204172 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86808\nI0820 09:06:45.204491 21584 solver.cpp:404]     Test net output #1: loss = 0.463503 (* 1 = 0.463503 loss)\nI0820 09:06:46.506038 21584 solver.cpp:228] Iteration 68000, loss = 0.152243\nI0820 09:06:46.506083 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 09:06:46.506105 21584 solver.cpp:244]     Train net output #1: loss = 0.152243 (* 1 = 0.152243 loss)\nI0820 09:06:46.611953 21584 sgd_solver.cpp:166] Iteration 68000, lr = 1.7\nI0820 09:09:04.550447 21584 solver.cpp:337] Iteration 68100, Testing net (#0)\nI0820 09:10:26.957998 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87836\nI0820 09:10:26.958318 21584 solver.cpp:404]     Test net output #1: loss = 0.403155 (* 1 = 0.403155 loss)\nI0820 09:10:28.259796 21584 solver.cpp:228] Iteration 68100, loss = 0.163478\nI0820 09:10:28.259842 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 09:10:28.259858 21584 solver.cpp:244]     Train net output #1: loss = 0.163479 (* 1 = 0.163479 loss)\nI0820 09:10:28.365870 21584 sgd_solver.cpp:166] Iteration 68100, lr = 1.7025\nI0820 09:12:46.262722 21584 solver.cpp:337] Iteration 68200, Testing net (#0)\nI0820 09:14:08.840243 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88228\nI0820 09:14:08.840550 21584 solver.cpp:404]     Test net output #1: loss = 0.40469 (* 1 = 0.40469 loss)\nI0820 09:14:10.142464 21584 solver.cpp:228] Iteration 68200, loss = 0.088872\nI0820 09:14:10.142513 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 09:14:10.142537 21584 solver.cpp:244]     Train net output #1: loss = 0.088872 (* 1 = 0.088872 loss)\nI0820 09:14:10.249683 21584 sgd_solver.cpp:166] Iteration 68200, lr = 1.705\nI0820 09:16:28.196135 21584 solver.cpp:337] Iteration 68300, Testing net (#0)\nI0820 09:17:50.619408 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87584\nI0820 09:17:50.619732 21584 solver.cpp:404]     Test net output #1: loss = 0.428937 (* 1 = 0.428937 loss)\nI0820 09:17:51.921540 21584 solver.cpp:228] Iteration 68300, loss = 0.201192\nI0820 09:17:51.921582 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 09:17:51.921598 21584 solver.cpp:244]     Train net output #1: loss = 0.201192 (* 1 = 0.201192 loss)\nI0820 09:17:52.029664 21584 sgd_solver.cpp:166] Iteration 68300, lr = 1.7075\nI0820 09:20:10.103471 21584 solver.cpp:337] Iteration 68400, Testing net (#0)\nI0820 09:21:32.416951 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88312\nI0820 09:21:32.417207 21584 solver.cpp:404]     Test net output #1: loss = 0.411114 (* 1 = 0.411114 loss)\nI0820 09:21:33.718533 21584 solver.cpp:228] Iteration 68400, loss = 0.141003\nI0820 09:21:33.718577 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 09:21:33.718593 21584 solver.cpp:244]     Train net output #1: loss = 0.141003 (* 1 = 0.141003 loss)\nI0820 09:21:33.826055 21584 sgd_solver.cpp:166] Iteration 68400, lr = 1.71\nI0820 09:23:51.881178 21584 solver.cpp:337] Iteration 68500, Testing net (#0)\nI0820 09:25:14.220350 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0820 09:25:14.220669 21584 solver.cpp:404]     Test net output #1: loss = 0.402244 (* 1 = 0.402244 loss)\nI0820 09:25:15.522951 21584 solver.cpp:228] Iteration 68500, loss = 0.155706\nI0820 09:25:15.522999 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:25:15.523021 21584 solver.cpp:244]     Train net output #1: loss = 0.155706 (* 1 = 0.155706 loss)\nI0820 09:25:15.634245 21584 sgd_solver.cpp:166] Iteration 68500, lr = 1.7125\nI0820 09:27:33.656033 21584 solver.cpp:337] Iteration 68600, Testing net (#0)\nI0820 09:28:55.988366 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88588\nI0820 09:28:55.988685 21584 solver.cpp:404]     Test net output #1: loss = 0.377307 (* 1 = 0.377307 loss)\nI0820 09:28:57.292619 21584 solver.cpp:228] Iteration 68600, loss = 0.101326\nI0820 09:28:57.292676 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 09:28:57.292693 21584 solver.cpp:244]     Train net output #1: loss = 0.101326 (* 1 = 0.101326 loss)\nI0820 09:28:57.397516 21584 sgd_solver.cpp:166] Iteration 68600, lr = 1.715\nI0820 09:31:15.211477 21584 solver.cpp:337] Iteration 68700, Testing net (#0)\nI0820 09:32:38.573863 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88176\nI0820 09:32:38.574249 21584 solver.cpp:404]     Test net output #1: loss = 0.394703 (* 1 = 0.394703 loss)\nI0820 09:32:39.878885 21584 solver.cpp:228] Iteration 68700, loss = 0.161349\nI0820 09:32:39.878933 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 09:32:39.878949 21584 solver.cpp:244]     Train net output #1: loss = 0.161349 (* 1 = 0.161349 loss)\nI0820 09:32:39.984858 21584 sgd_solver.cpp:166] Iteration 68700, lr = 1.7175\nI0820 09:34:57.803388 21584 solver.cpp:337] Iteration 68800, Testing net (#0)\nI0820 09:36:21.174201 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87808\nI0820 09:36:21.174579 21584 solver.cpp:404]     Test net output #1: loss = 0.438462 (* 1 = 0.438462 loss)\nI0820 09:36:22.480764 21584 solver.cpp:228] Iteration 68800, loss = 0.165699\nI0820 09:36:22.480824 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 09:36:22.480842 21584 solver.cpp:244]     Train net output #1: loss = 0.165699 (* 1 = 0.165699 loss)\nI0820 09:36:22.584550 21584 sgd_solver.cpp:166] Iteration 68800, lr = 1.72\nI0820 09:38:40.377161 21584 solver.cpp:337] Iteration 68900, Testing net (#0)\nI0820 09:40:03.748988 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87348\nI0820 09:40:03.749372 21584 solver.cpp:404]     Test net output #1: loss = 0.437293 (* 1 = 0.437293 loss)\nI0820 09:40:05.055135 21584 solver.cpp:228] Iteration 68900, loss = 0.184072\nI0820 09:40:05.055189 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 09:40:05.055207 21584 solver.cpp:244]     Train net output #1: loss = 0.184072 (* 1 = 0.184072 loss)\nI0820 09:40:05.154850 21584 sgd_solver.cpp:166] Iteration 68900, lr = 1.7225\nI0820 09:42:23.049582 21584 solver.cpp:337] Iteration 69000, Testing net (#0)\nI0820 09:43:46.394774 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88848\nI0820 09:43:46.395182 21584 solver.cpp:404]     Test net output #1: loss = 0.38816 (* 1 = 0.38816 loss)\nI0820 09:43:47.699841 21584 solver.cpp:228] Iteration 69000, loss = 0.179309\nI0820 09:43:47.699895 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 09:43:47.699911 21584 solver.cpp:244]     Train net output #1: loss = 0.179309 (* 1 = 0.179309 loss)\nI0820 09:43:47.805177 21584 sgd_solver.cpp:166] Iteration 69000, lr = 1.725\nI0820 09:46:05.530831 21584 solver.cpp:337] Iteration 69100, Testing net (#0)\nI0820 09:47:28.887143 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88124\nI0820 09:47:28.887521 21584 solver.cpp:404]     Test net output #1: loss = 0.400898 (* 1 = 0.400898 loss)\nI0820 09:47:30.192662 21584 solver.cpp:228] Iteration 69100, loss = 0.127451\nI0820 09:47:30.192718 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 09:47:30.192734 21584 solver.cpp:244]     Train net output #1: loss = 0.127451 (* 1 = 0.127451 loss)\nI0820 09:47:30.294772 21584 sgd_solver.cpp:166] Iteration 69100, lr = 1.7275\nI0820 09:49:48.078799 21584 solver.cpp:337] Iteration 69200, Testing net (#0)\nI0820 09:51:11.445565 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87644\nI0820 09:51:11.445946 21584 solver.cpp:404]     Test net output #1: loss = 0.427937 (* 1 = 0.427937 loss)\nI0820 09:51:12.750785 21584 solver.cpp:228] Iteration 69200, loss = 0.118043\nI0820 09:51:12.750843 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 09:51:12.750859 21584 solver.cpp:244]     Train net output #1: loss = 0.118043 (* 1 = 0.118043 loss)\nI0820 09:51:12.854596 21584 sgd_solver.cpp:166] Iteration 69200, lr = 1.73\nI0820 09:53:30.690340 21584 solver.cpp:337] Iteration 69300, Testing net (#0)\nI0820 09:54:54.061991 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87716\nI0820 09:54:54.062393 21584 solver.cpp:404]     Test net output #1: loss = 0.416642 (* 1 = 0.416642 loss)\nI0820 09:54:55.367125 21584 solver.cpp:228] Iteration 69300, loss = 0.229992\nI0820 09:54:55.367179 21584 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 09:54:55.367195 21584 solver.cpp:244]     Train net output #1: loss = 0.229992 (* 1 = 0.229992 loss)\nI0820 09:54:55.470192 21584 sgd_solver.cpp:166] Iteration 69300, lr = 1.7325\nI0820 09:57:13.276085 21584 solver.cpp:337] Iteration 69400, Testing net (#0)\nI0820 09:58:36.659574 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0820 09:58:36.659978 21584 solver.cpp:404]     Test net output #1: loss = 0.417278 (* 1 = 0.417278 loss)\nI0820 09:58:37.965117 21584 solver.cpp:228] Iteration 69400, loss = 0.160272\nI0820 09:58:37.965170 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 09:58:37.965188 21584 solver.cpp:244]     Train net output #1: loss = 0.160272 (* 1 = 0.160272 loss)\nI0820 09:58:38.068420 21584 sgd_solver.cpp:166] Iteration 69400, lr = 1.735\nI0820 10:00:55.884327 21584 solver.cpp:337] Iteration 69500, Testing net (#0)\nI0820 10:02:19.260442 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8824\nI0820 10:02:19.260819 21584 solver.cpp:404]     Test net output #1: loss = 0.404676 (* 1 = 0.404676 loss)\nI0820 10:02:20.566920 21584 solver.cpp:228] Iteration 69500, loss = 0.210782\nI0820 10:02:20.566980 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 10:02:20.566998 21584 solver.cpp:244]     Train net output #1: loss = 0.210782 (* 1 = 0.210782 loss)\nI0820 10:02:20.670089 21584 sgd_solver.cpp:166] Iteration 69500, lr = 1.7375\nI0820 10:04:38.415947 21584 solver.cpp:337] Iteration 69600, Testing net (#0)\nI0820 10:06:01.780139 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87996\nI0820 10:06:01.780530 21584 solver.cpp:404]     Test net output #1: loss = 0.40868 (* 1 = 0.40868 loss)\nI0820 10:06:03.086593 21584 solver.cpp:228] Iteration 69600, loss = 0.202648\nI0820 10:06:03.086647 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 10:06:03.086663 21584 solver.cpp:244]     Train net output #1: loss = 0.202648 (* 1 = 0.202648 loss)\nI0820 10:06:03.186034 21584 sgd_solver.cpp:166] Iteration 69600, lr = 1.74\nI0820 10:08:21.036407 21584 solver.cpp:337] Iteration 69700, Testing net (#0)\nI0820 10:09:44.407155 21584 solver.cpp:404]     Test net output #0: accuracy = 0.884\nI0820 10:09:44.407536 21584 solver.cpp:404]     Test net output #1: loss = 0.377479 (* 1 = 0.377479 loss)\nI0820 10:09:45.713629 21584 solver.cpp:228] Iteration 69700, loss = 0.153441\nI0820 10:09:45.713688 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:09:45.713706 21584 solver.cpp:244]     Train net output #1: loss = 0.153441 (* 1 = 0.153441 loss)\nI0820 10:09:45.810907 21584 sgd_solver.cpp:166] Iteration 69700, lr = 1.7425\nI0820 10:12:03.645797 21584 solver.cpp:337] Iteration 69800, Testing net (#0)\nI0820 10:13:27.013427 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88448\nI0820 10:13:27.013833 21584 solver.cpp:404]     Test net output #1: loss = 0.378043 (* 1 = 0.378043 loss)\nI0820 10:13:28.318722 21584 solver.cpp:228] Iteration 69800, loss = 0.13527\nI0820 10:13:28.318783 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 10:13:28.318799 21584 solver.cpp:244]     Train net output #1: loss = 0.13527 (* 1 = 0.13527 loss)\nI0820 10:13:28.424549 21584 sgd_solver.cpp:166] Iteration 69800, lr = 1.745\nI0820 10:15:46.242455 21584 solver.cpp:337] Iteration 69900, Testing net (#0)\nI0820 10:17:09.608184 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87932\nI0820 10:17:09.608587 21584 solver.cpp:404]     Test net output #1: loss = 0.407604 (* 1 = 0.407604 loss)\nI0820 10:17:10.913203 21584 solver.cpp:228] Iteration 69900, loss = 0.222433\nI0820 10:17:10.913264 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 10:17:10.913281 21584 solver.cpp:244]     Train net output #1: loss = 0.222433 (* 1 = 0.222433 loss)\nI0820 10:17:11.014139 21584 sgd_solver.cpp:166] Iteration 69900, lr = 1.7475\nI0820 10:19:28.888844 21584 solver.cpp:337] Iteration 70000, Testing net (#0)\nI0820 10:20:52.255794 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88552\nI0820 10:20:52.256189 21584 solver.cpp:404]     Test net output #1: loss = 0.380434 (* 1 = 0.380434 loss)\nI0820 10:20:53.561878 21584 solver.cpp:228] Iteration 70000, loss = 0.145603\nI0820 10:20:53.561921 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 10:20:53.561938 21584 solver.cpp:244]     Train net output #1: loss = 0.145603 (* 1 = 0.145603 loss)\nI0820 10:20:53.667606 21584 sgd_solver.cpp:166] Iteration 70000, lr = 1.75\nI0820 10:23:11.502905 21584 solver.cpp:337] Iteration 70100, Testing net (#0)\nI0820 10:24:34.874765 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8818\nI0820 10:24:34.875157 21584 solver.cpp:404]     Test net output #1: loss = 0.39677 (* 1 = 0.39677 loss)\nI0820 10:24:36.181141 21584 solver.cpp:228] Iteration 70100, loss = 0.130822\nI0820 10:24:36.181185 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 10:24:36.181200 21584 solver.cpp:244]     Train net output #1: loss = 0.130822 (* 1 = 0.130822 loss)\nI0820 10:24:36.288290 21584 sgd_solver.cpp:166] Iteration 70100, lr = 1.7525\nI0820 10:26:54.059381 21584 solver.cpp:337] Iteration 70200, Testing net (#0)\nI0820 10:28:17.431028 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87856\nI0820 10:28:17.431411 21584 solver.cpp:404]     Test net output #1: loss = 0.399703 (* 1 = 0.399703 loss)\nI0820 10:28:18.736062 21584 solver.cpp:228] Iteration 70200, loss = 0.213238\nI0820 10:28:18.736122 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 10:28:18.736140 21584 solver.cpp:244]     Train net output #1: loss = 0.213238 (* 1 = 0.213238 loss)\nI0820 10:28:18.834818 21584 sgd_solver.cpp:166] Iteration 70200, lr = 1.755\nI0820 10:30:36.603451 21584 solver.cpp:337] Iteration 70300, Testing net (#0)\nI0820 10:31:59.974825 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88028\nI0820 10:31:59.975247 21584 solver.cpp:404]     Test net output #1: loss = 0.397531 (* 1 = 0.397531 loss)\nI0820 10:32:01.280480 21584 solver.cpp:228] Iteration 70300, loss = 0.20748\nI0820 10:32:01.280519 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 10:32:01.280535 21584 solver.cpp:244]     Train net output #1: loss = 0.20748 (* 1 = 0.20748 loss)\nI0820 10:32:01.381672 21584 sgd_solver.cpp:166] Iteration 70300, lr = 1.7575\nI0820 10:34:19.094166 21584 solver.cpp:337] Iteration 70400, Testing net (#0)\nI0820 10:35:42.484591 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0820 10:35:42.485002 21584 solver.cpp:404]     Test net output #1: loss = 0.398854 (* 1 = 0.398854 loss)\nI0820 10:35:43.791159 21584 solver.cpp:228] Iteration 70400, loss = 0.0841167\nI0820 10:35:43.791203 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 10:35:43.791218 21584 solver.cpp:244]     Train net output #1: loss = 0.0841168 (* 1 = 0.0841168 loss)\nI0820 10:35:43.894891 21584 sgd_solver.cpp:166] Iteration 70400, lr = 1.76\nI0820 10:38:01.609969 21584 solver.cpp:337] Iteration 70500, Testing net (#0)\nI0820 10:39:25.009632 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87864\nI0820 10:39:25.010022 21584 solver.cpp:404]     Test net output #1: loss = 0.402804 (* 1 = 0.402804 loss)\nI0820 10:39:26.315927 21584 solver.cpp:228] Iteration 70500, loss = 0.118463\nI0820 10:39:26.315973 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 10:39:26.315989 21584 solver.cpp:244]     Train net output #1: loss = 0.118463 (* 1 = 0.118463 loss)\nI0820 10:39:26.415761 21584 sgd_solver.cpp:166] Iteration 70500, lr = 1.7625\nI0820 10:41:44.130864 21584 solver.cpp:337] Iteration 70600, Testing net (#0)\nI0820 10:43:07.494421 21584 solver.cpp:404]     Test net output #0: accuracy = 0.878\nI0820 10:43:07.494792 21584 solver.cpp:404]     Test net output #1: loss = 0.412732 (* 1 = 0.412732 loss)\nI0820 10:43:08.800721 21584 solver.cpp:228] Iteration 70600, loss = 0.124629\nI0820 10:43:08.800767 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 10:43:08.800784 21584 solver.cpp:244]     Train net output #1: loss = 0.124629 (* 1 = 0.124629 loss)\nI0820 10:43:08.908561 21584 sgd_solver.cpp:166] Iteration 70600, lr = 1.765\nI0820 10:45:26.743857 21584 solver.cpp:337] Iteration 70700, Testing net (#0)\nI0820 10:46:50.063787 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8892\nI0820 10:46:50.064091 21584 solver.cpp:404]     Test net output #1: loss = 0.368675 (* 1 = 0.368675 loss)\nI0820 10:46:51.370302 21584 solver.cpp:228] Iteration 70700, loss = 0.19848\nI0820 10:46:51.370347 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 10:46:51.370362 21584 solver.cpp:244]     Train net output #1: loss = 0.198481 (* 1 = 0.198481 loss)\nI0820 10:46:51.467829 21584 sgd_solver.cpp:166] Iteration 70700, lr = 1.7675\nI0820 10:49:09.215533 21584 solver.cpp:337] Iteration 70800, Testing net (#0)\nI0820 10:50:32.398365 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88804\nI0820 10:50:32.398623 21584 solver.cpp:404]     Test net output #1: loss = 0.376058 (* 1 = 0.376058 loss)\nI0820 10:50:33.704572 21584 solver.cpp:228] Iteration 70800, loss = 0.156239\nI0820 10:50:33.704615 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 10:50:33.704632 21584 solver.cpp:244]     Train net output #1: loss = 0.15624 (* 1 = 0.15624 loss)\nI0820 10:50:33.807596 21584 sgd_solver.cpp:166] Iteration 70800, lr = 1.77\nI0820 10:52:51.565660 21584 solver.cpp:337] Iteration 70900, Testing net (#0)\nI0820 10:54:14.890728 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88052\nI0820 10:54:14.891049 21584 solver.cpp:404]     Test net output #1: loss = 0.398561 (* 1 = 0.398561 loss)\nI0820 10:54:16.196537 21584 solver.cpp:228] Iteration 70900, loss = 0.143605\nI0820 10:54:16.196578 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 10:54:16.196594 21584 solver.cpp:244]     Train net output #1: loss = 0.143605 (* 1 = 0.143605 loss)\nI0820 10:54:16.296674 21584 sgd_solver.cpp:166] Iteration 70900, lr = 1.7725\nI0820 10:56:34.112612 21584 solver.cpp:337] Iteration 71000, Testing net (#0)\nI0820 10:57:57.479063 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88424\nI0820 10:57:57.479337 21584 solver.cpp:404]     Test net output #1: loss = 0.388619 (* 1 = 0.388619 loss)\nI0820 10:57:58.783962 21584 solver.cpp:228] Iteration 71000, loss = 0.217391\nI0820 10:57:58.784005 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 10:57:58.784019 21584 solver.cpp:244]     Train net output #1: loss = 0.217391 (* 1 = 0.217391 loss)\nI0820 10:57:58.892370 21584 sgd_solver.cpp:166] Iteration 71000, lr = 1.775\nI0820 11:00:16.710621 21584 solver.cpp:337] Iteration 71100, Testing net (#0)\nI0820 11:01:40.087504 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88116\nI0820 11:01:40.087834 21584 solver.cpp:404]     Test net output #1: loss = 0.392275 (* 1 = 0.392275 loss)\nI0820 11:01:41.393244 21584 solver.cpp:228] Iteration 71100, loss = 0.129982\nI0820 11:01:41.393286 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 11:01:41.393301 21584 solver.cpp:244]     Train net output #1: loss = 0.129982 (* 1 = 0.129982 loss)\nI0820 11:01:41.499450 21584 sgd_solver.cpp:166] Iteration 71100, lr = 1.7775\nI0820 11:03:59.168196 21584 solver.cpp:337] Iteration 71200, Testing net (#0)\nI0820 11:05:22.520176 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88032\nI0820 11:05:22.520457 21584 solver.cpp:404]     Test net output #1: loss = 0.409238 (* 1 = 0.409238 loss)\nI0820 11:05:23.824772 21584 solver.cpp:228] Iteration 71200, loss = 0.14698\nI0820 11:05:23.824815 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 11:05:23.824832 21584 solver.cpp:244]     Train net output #1: loss = 0.14698 (* 1 = 0.14698 loss)\nI0820 11:05:23.929003 21584 sgd_solver.cpp:166] Iteration 71200, lr = 1.78\nI0820 11:07:41.706512 21584 solver.cpp:337] Iteration 71300, Testing net (#0)\nI0820 11:09:05.052017 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88208\nI0820 11:09:05.052291 21584 solver.cpp:404]     Test net output #1: loss = 0.402431 (* 1 = 0.402431 loss)\nI0820 11:09:06.357358 21584 solver.cpp:228] Iteration 71300, loss = 0.102405\nI0820 11:09:06.357401 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 11:09:06.357416 21584 solver.cpp:244]     Train net output #1: loss = 0.102405 (* 1 = 0.102405 loss)\nI0820 11:09:06.467900 21584 sgd_solver.cpp:166] Iteration 71300, lr = 1.7825\nI0820 11:11:24.220959 21584 solver.cpp:337] Iteration 71400, Testing net (#0)\nI0820 11:12:47.578827 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87728\nI0820 11:12:47.579160 21584 solver.cpp:404]     Test net output #1: loss = 0.402225 (* 1 = 0.402225 loss)\nI0820 11:12:48.884322 21584 solver.cpp:228] Iteration 71400, loss = 0.197713\nI0820 11:12:48.884366 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 11:12:48.884382 21584 solver.cpp:244]     Train net output #1: loss = 0.197714 (* 1 = 0.197714 loss)\nI0820 11:12:48.985455 21584 sgd_solver.cpp:166] Iteration 71400, lr = 1.785\nI0820 11:15:06.671296 21584 solver.cpp:337] Iteration 71500, Testing net (#0)\nI0820 11:16:30.041368 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88448\nI0820 11:16:30.041667 21584 solver.cpp:404]     Test net output #1: loss = 0.406486 (* 1 = 0.406486 loss)\nI0820 11:16:31.346495 21584 solver.cpp:228] Iteration 71500, loss = 0.100333\nI0820 11:16:31.346555 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 11:16:31.346575 21584 solver.cpp:244]     Train net output #1: loss = 0.100333 (* 1 = 0.100333 loss)\nI0820 11:16:31.448822 21584 sgd_solver.cpp:166] Iteration 71500, lr = 1.7875\nI0820 11:18:49.244254 21584 solver.cpp:337] Iteration 71600, Testing net (#0)\nI0820 11:20:12.574765 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0820 11:20:12.575084 21584 solver.cpp:404]     Test net output #1: loss = 0.409096 (* 1 = 0.409096 loss)\nI0820 11:20:13.879806 21584 solver.cpp:228] Iteration 71600, loss = 0.156323\nI0820 11:20:13.879851 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 11:20:13.879868 21584 solver.cpp:244]     Train net output #1: loss = 0.156324 (* 1 = 0.156324 loss)\nI0820 11:20:13.982727 21584 sgd_solver.cpp:166] Iteration 71600, lr = 1.79\nI0820 11:22:31.675364 21584 solver.cpp:337] Iteration 71700, Testing net (#0)\nI0820 11:23:54.502775 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87444\nI0820 11:23:54.503082 21584 solver.cpp:404]     Test net output #1: loss = 0.425754 (* 1 = 0.425754 loss)\nI0820 11:23:55.807670 21584 solver.cpp:228] Iteration 71700, loss = 0.189832\nI0820 11:23:55.807713 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 11:23:55.807729 21584 solver.cpp:244]     Train net output #1: loss = 0.189832 (* 1 = 0.189832 loss)\nI0820 11:23:55.910526 21584 sgd_solver.cpp:166] Iteration 71700, lr = 1.7925\nI0820 11:26:13.659277 21584 solver.cpp:337] Iteration 71800, Testing net (#0)\nI0820 11:27:36.998600 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88016\nI0820 11:27:36.998915 21584 solver.cpp:404]     Test net output #1: loss = 0.412667 (* 1 = 0.412667 loss)\nI0820 11:27:38.303241 21584 solver.cpp:228] Iteration 71800, loss = 0.150328\nI0820 11:27:38.303297 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 11:27:38.303315 21584 solver.cpp:244]     Train net output #1: loss = 0.150328 (* 1 = 0.150328 loss)\nI0820 11:27:38.406311 21584 sgd_solver.cpp:166] Iteration 71800, lr = 1.795\nI0820 11:29:56.727082 21584 solver.cpp:337] Iteration 71900, Testing net (#0)\nI0820 11:31:19.997895 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8816\nI0820 11:31:19.998265 21584 solver.cpp:404]     Test net output #1: loss = 0.407138 (* 1 = 0.407138 loss)\nI0820 11:31:21.302716 21584 solver.cpp:228] Iteration 71900, loss = 0.271356\nI0820 11:31:21.302772 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 11:31:21.302789 21584 solver.cpp:244]     Train net output #1: loss = 0.271356 (* 1 = 0.271356 loss)\nI0820 11:31:21.418102 21584 sgd_solver.cpp:166] Iteration 71900, lr = 1.7975\nI0820 11:33:40.083751 21584 solver.cpp:337] Iteration 72000, Testing net (#0)\nI0820 11:35:03.273267 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88016\nI0820 11:35:03.273547 21584 solver.cpp:404]     Test net output #1: loss = 0.395509 (* 1 = 0.395509 loss)\nI0820 11:35:04.577883 21584 solver.cpp:228] Iteration 72000, loss = 0.162043\nI0820 11:35:04.577939 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 11:35:04.577955 21584 solver.cpp:244]     Train net output #1: loss = 0.162043 (* 1 = 0.162043 loss)\nI0820 11:35:04.688480 21584 sgd_solver.cpp:166] Iteration 72000, lr = 1.8\nI0820 11:37:23.379395 21584 solver.cpp:337] Iteration 72100, Testing net (#0)\nI0820 11:38:46.658748 21584 solver.cpp:404]     Test net output #0: accuracy = 0.869041\nI0820 11:38:46.659039 21584 solver.cpp:404]     Test net output #1: loss = 0.443492 (* 1 = 0.443492 loss)\nI0820 11:38:47.964057 21584 solver.cpp:228] Iteration 72100, loss = 0.267731\nI0820 11:38:47.964112 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 11:38:47.964128 21584 solver.cpp:244]     Train net output #1: loss = 0.267731 (* 1 = 0.267731 loss)\nI0820 11:38:48.076491 21584 sgd_solver.cpp:166] Iteration 72100, lr = 1.8025\nI0820 11:41:06.683081 21584 solver.cpp:337] Iteration 72200, Testing net (#0)\nI0820 11:42:30.037657 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8816\nI0820 11:42:30.038013 21584 solver.cpp:404]     Test net output #1: loss = 0.398677 (* 1 = 0.398677 loss)\nI0820 11:42:31.351359 21584 solver.cpp:228] Iteration 72200, loss = 0.137802\nI0820 11:42:31.351413 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 11:42:31.351436 21584 solver.cpp:244]     Train net output #1: loss = 0.137802 (* 1 = 0.137802 loss)\nI0820 11:42:31.449008 21584 sgd_solver.cpp:166] Iteration 72200, lr = 1.805\nI0820 11:44:49.941540 21584 solver.cpp:337] Iteration 72300, Testing net (#0)\nI0820 11:46:13.267127 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8796\nI0820 11:46:13.267410 21584 solver.cpp:404]     Test net output #1: loss = 0.392027 (* 1 = 0.392027 loss)\nI0820 11:46:14.572481 21584 solver.cpp:228] Iteration 72300, loss = 0.172855\nI0820 11:46:14.572542 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 11:46:14.572568 21584 solver.cpp:244]     Train net output #1: loss = 0.172855 (* 1 = 0.172855 loss)\nI0820 11:46:14.680166 21584 sgd_solver.cpp:166] Iteration 72300, lr = 1.8075\nI0820 11:48:33.221527 21584 solver.cpp:337] Iteration 72400, Testing net (#0)\nI0820 11:49:56.540683 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88164\nI0820 11:49:56.540971 21584 solver.cpp:404]     Test net output #1: loss = 0.404446 (* 1 = 0.404446 loss)\nI0820 11:49:57.846879 21584 solver.cpp:228] Iteration 72400, loss = 0.176354\nI0820 11:49:57.846933 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 11:49:57.846957 21584 solver.cpp:244]     Train net output #1: loss = 0.176354 (* 1 = 0.176354 loss)\nI0820 11:49:57.953248 21584 sgd_solver.cpp:166] Iteration 72400, lr = 1.81\nI0820 11:52:16.633826 21584 solver.cpp:337] Iteration 72500, Testing net (#0)\nI0820 11:53:39.844696 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88312\nI0820 11:53:39.844964 21584 solver.cpp:404]     Test net output #1: loss = 0.391588 (* 1 = 0.391588 loss)\nI0820 11:53:41.150986 21584 solver.cpp:228] Iteration 72500, loss = 0.18968\nI0820 11:53:41.151043 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 11:53:41.151068 21584 solver.cpp:244]     Train net output #1: loss = 0.18968 (* 1 = 0.18968 loss)\nI0820 11:53:41.263538 21584 sgd_solver.cpp:166] Iteration 72500, lr = 1.8125\nI0820 11:56:00.005779 21584 solver.cpp:337] Iteration 72600, Testing net (#0)\nI0820 11:57:23.314558 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87916\nI0820 11:57:23.314888 21584 solver.cpp:404]     Test net output #1: loss = 0.410696 (* 1 = 0.410696 loss)\nI0820 11:57:24.620916 21584 solver.cpp:228] Iteration 72600, loss = 0.0701539\nI0820 11:57:24.620975 21584 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI0820 11:57:24.621002 21584 solver.cpp:244]     Train net output #1: loss = 0.0701539 (* 1 = 0.0701539 loss)\nI0820 11:57:24.730515 21584 sgd_solver.cpp:166] Iteration 72600, lr = 1.815\nI0820 11:59:43.357509 21584 solver.cpp:337] Iteration 72700, Testing net (#0)\nI0820 12:01:06.654945 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88068\nI0820 12:01:06.655316 21584 solver.cpp:404]     Test net output #1: loss = 0.389958 (* 1 = 0.389958 loss)\nI0820 12:01:07.961479 21584 solver.cpp:228] Iteration 72700, loss = 0.192774\nI0820 12:01:07.961529 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 12:01:07.961551 21584 solver.cpp:244]     Train net output #1: loss = 0.192774 (* 1 = 0.192774 loss)\nI0820 12:01:08.067008 21584 sgd_solver.cpp:166] Iteration 72700, lr = 1.8175\nI0820 12:03:26.650038 21584 solver.cpp:337] Iteration 72800, Testing net (#0)\nI0820 12:04:49.799649 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87572\nI0820 12:04:49.799957 21584 solver.cpp:404]     Test net output #1: loss = 0.424994 (* 1 = 0.424994 loss)\nI0820 12:04:51.104682 21584 solver.cpp:228] Iteration 72800, loss = 0.0975729\nI0820 12:04:51.104729 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 12:04:51.104746 21584 solver.cpp:244]     Train net output #1: loss = 0.0975729 (* 1 = 0.0975729 loss)\nI0820 12:04:51.217269 21584 sgd_solver.cpp:166] Iteration 72800, lr = 1.82\nI0820 12:07:09.717506 21584 solver.cpp:337] Iteration 72900, Testing net (#0)\nI0820 12:08:32.560355 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87332\nI0820 12:08:32.560611 21584 solver.cpp:404]     Test net output #1: loss = 0.415874 (* 1 = 0.415874 loss)\nI0820 12:08:33.865049 21584 solver.cpp:228] Iteration 72900, loss = 0.199458\nI0820 12:08:33.865103 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 12:08:33.865120 21584 solver.cpp:244]     Train net output #1: loss = 0.199458 (* 1 = 0.199458 loss)\nI0820 12:08:33.977970 21584 sgd_solver.cpp:166] Iteration 72900, lr = 1.8225\nI0820 12:10:52.622305 21584 solver.cpp:337] Iteration 73000, Testing net (#0)\nI0820 12:12:15.513682 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88028\nI0820 12:12:15.514029 21584 solver.cpp:404]     Test net output #1: loss = 0.407639 (* 1 = 0.407639 loss)\nI0820 12:12:16.818867 21584 solver.cpp:228] Iteration 73000, loss = 0.151698\nI0820 12:12:16.818909 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:12:16.818925 21584 solver.cpp:244]     Train net output #1: loss = 0.151698 (* 1 = 0.151698 loss)\nI0820 12:12:16.929347 21584 sgd_solver.cpp:166] Iteration 73000, lr = 1.825\nI0820 12:14:35.463991 21584 solver.cpp:337] Iteration 73100, Testing net (#0)\nI0820 12:15:58.815897 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87544\nI0820 12:15:58.816182 21584 solver.cpp:404]     Test net output #1: loss = 0.418849 (* 1 = 0.418849 loss)\nI0820 12:16:00.122113 21584 solver.cpp:228] Iteration 73100, loss = 0.245921\nI0820 12:16:00.122174 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 12:16:00.122191 21584 solver.cpp:244]     Train net output #1: loss = 0.245921 (* 1 = 0.245921 loss)\nI0820 12:16:00.229112 21584 sgd_solver.cpp:166] Iteration 73100, lr = 1.8275\nI0820 12:18:18.715665 21584 solver.cpp:337] Iteration 73200, Testing net (#0)\nI0820 12:19:42.066435 21584 solver.cpp:404]     Test net output #0: accuracy = 0.882681\nI0820 12:19:42.066792 21584 solver.cpp:404]     Test net output #1: loss = 0.405959 (* 1 = 0.405959 loss)\nI0820 12:19:43.372517 21584 solver.cpp:228] Iteration 73200, loss = 0.145609\nI0820 12:19:43.372576 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:19:43.372594 21584 solver.cpp:244]     Train net output #1: loss = 0.145608 (* 1 = 0.145608 loss)\nI0820 12:19:43.481750 21584 sgd_solver.cpp:166] Iteration 73200, lr = 1.83\nI0820 12:22:02.111210 21584 solver.cpp:337] Iteration 73300, Testing net (#0)\nI0820 12:23:25.449851 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88516\nI0820 12:23:25.450145 21584 solver.cpp:404]     Test net output #1: loss = 0.390291 (* 1 = 0.390291 loss)\nI0820 12:23:26.755769 21584 solver.cpp:228] Iteration 73300, loss = 0.247967\nI0820 12:23:26.755821 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 12:23:26.755843 21584 solver.cpp:244]     Train net output #1: loss = 0.247966 (* 1 = 0.247966 loss)\nI0820 12:23:26.872043 21584 sgd_solver.cpp:166] Iteration 73300, lr = 1.8325\nI0820 12:25:45.555474 21584 solver.cpp:337] Iteration 73400, Testing net (#0)\nI0820 12:27:08.902500 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88208\nI0820 12:27:08.902787 21584 solver.cpp:404]     Test net output #1: loss = 0.392172 (* 1 = 0.392172 loss)\nI0820 12:27:10.207459 21584 solver.cpp:228] Iteration 73400, loss = 0.0947774\nI0820 12:27:10.207511 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 12:27:10.207527 21584 solver.cpp:244]     Train net output #1: loss = 0.0947773 (* 1 = 0.0947773 loss)\nI0820 12:27:10.321153 21584 sgd_solver.cpp:166] Iteration 73400, lr = 1.835\nI0820 12:29:28.884233 21584 solver.cpp:337] Iteration 73500, Testing net (#0)\nI0820 12:30:52.192795 21584 solver.cpp:404]     Test net output #0: accuracy = 0.872\nI0820 12:30:52.193068 21584 solver.cpp:404]     Test net output #1: loss = 0.430141 (* 1 = 0.430141 loss)\nI0820 12:30:53.498769 21584 solver.cpp:228] Iteration 73500, loss = 0.183663\nI0820 12:30:53.498829 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:30:53.498850 21584 solver.cpp:244]     Train net output #1: loss = 0.183663 (* 1 = 0.183663 loss)\nI0820 12:30:53.609833 21584 sgd_solver.cpp:166] Iteration 73500, lr = 1.8375\nI0820 12:33:12.266741 21584 solver.cpp:337] Iteration 73600, Testing net (#0)\nI0820 12:34:35.507598 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8732\nI0820 12:34:35.507907 21584 solver.cpp:404]     Test net output #1: loss = 0.426729 (* 1 = 0.426729 loss)\nI0820 12:34:36.812960 21584 solver.cpp:228] Iteration 73600, loss = 0.247415\nI0820 12:34:36.813019 21584 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI0820 12:34:36.813036 21584 solver.cpp:244]     Train net output #1: loss = 0.247414 (* 1 = 0.247414 loss)\nI0820 12:34:36.923601 21584 sgd_solver.cpp:166] Iteration 73600, lr = 1.84\nI0820 12:36:55.549520 21584 solver.cpp:337] Iteration 73700, Testing net (#0)\nI0820 12:38:18.632401 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8746\nI0820 12:38:18.632701 21584 solver.cpp:404]     Test net output #1: loss = 0.417025 (* 1 = 0.417025 loss)\nI0820 12:38:19.938473 21584 solver.cpp:228] Iteration 73700, loss = 0.222081\nI0820 12:38:19.938534 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 12:38:19.938550 21584 solver.cpp:244]     Train net output #1: loss = 0.222081 (* 1 = 0.222081 loss)\nI0820 12:38:20.046959 21584 sgd_solver.cpp:166] Iteration 73700, lr = 1.8425\nI0820 12:40:38.639101 21584 solver.cpp:337] Iteration 73800, Testing net (#0)\nI0820 12:42:01.747476 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87704\nI0820 12:42:01.747786 21584 solver.cpp:404]     Test net output #1: loss = 0.413796 (* 1 = 0.413796 loss)\nI0820 12:42:03.053339 21584 solver.cpp:228] Iteration 73800, loss = 0.143824\nI0820 12:42:03.053397 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 12:42:03.053414 21584 solver.cpp:244]     Train net output #1: loss = 0.143824 (* 1 = 0.143824 loss)\nI0820 12:42:03.162099 21584 sgd_solver.cpp:166] Iteration 73800, lr = 1.845\nI0820 12:44:21.783687 21584 solver.cpp:337] Iteration 73900, Testing net (#0)\nI0820 12:45:45.130113 21584 solver.cpp:404]     Test net output #0: accuracy = 0.865841\nI0820 12:45:45.130393 21584 solver.cpp:404]     Test net output #1: loss = 0.461328 (* 1 = 0.461328 loss)\nI0820 12:45:46.434973 21584 solver.cpp:228] Iteration 73900, loss = 0.183669\nI0820 12:45:46.435025 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 12:45:46.435041 21584 solver.cpp:244]     Train net output #1: loss = 0.183669 (* 1 = 0.183669 loss)\nI0820 12:45:46.545634 21584 sgd_solver.cpp:166] Iteration 73900, lr = 1.8475\nI0820 12:48:05.239300 21584 solver.cpp:337] Iteration 74000, Testing net (#0)\nI0820 12:49:28.581979 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8786\nI0820 12:49:28.582269 21584 solver.cpp:404]     Test net output #1: loss = 0.405501 (* 1 = 0.405501 loss)\nI0820 12:49:29.885973 21584 solver.cpp:228] Iteration 74000, loss = 0.154324\nI0820 12:49:29.886029 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 12:49:29.886045 21584 solver.cpp:244]     Train net output #1: loss = 0.154324 (* 1 = 0.154324 loss)\nI0820 12:49:29.997164 21584 sgd_solver.cpp:166] Iteration 74000, lr = 1.85\nI0820 12:51:48.572952 21584 solver.cpp:337] Iteration 74100, Testing net (#0)\nI0820 12:53:11.761741 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88084\nI0820 12:53:11.762137 21584 solver.cpp:404]     Test net output #1: loss = 0.393047 (* 1 = 0.393047 loss)\nI0820 12:53:13.066699 21584 solver.cpp:228] Iteration 74100, loss = 0.121033\nI0820 12:53:13.066758 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 12:53:13.066774 21584 solver.cpp:244]     Train net output #1: loss = 0.121033 (* 1 = 0.121033 loss)\nI0820 12:53:13.180373 21584 sgd_solver.cpp:166] Iteration 74100, lr = 1.8525\nI0820 12:55:31.693336 21584 solver.cpp:337] Iteration 74200, Testing net (#0)\nI0820 12:56:54.122321 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88356\nI0820 12:56:54.122678 21584 solver.cpp:404]     Test net output #1: loss = 0.396469 (* 1 = 0.396469 loss)\nI0820 12:56:55.425091 21584 solver.cpp:228] Iteration 74200, loss = 0.194767\nI0820 12:56:55.425134 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 12:56:55.425150 21584 solver.cpp:244]     Train net output #1: loss = 0.194767 (* 1 = 0.194767 loss)\nI0820 12:56:55.536130 21584 sgd_solver.cpp:166] Iteration 74200, lr = 1.855\nI0820 12:59:13.982519 21584 solver.cpp:337] Iteration 74300, Testing net (#0)\nI0820 13:00:36.408155 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87472\nI0820 13:00:36.408511 21584 solver.cpp:404]     Test net output #1: loss = 0.419814 (* 1 = 0.419814 loss)\nI0820 13:00:37.711109 21584 solver.cpp:228] Iteration 74300, loss = 0.294804\nI0820 13:00:37.711153 21584 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 13:00:37.711169 21584 solver.cpp:244]     Train net output #1: loss = 0.294804 (* 1 = 0.294804 loss)\nI0820 13:00:37.830448 21584 sgd_solver.cpp:166] Iteration 74300, lr = 1.8575\nI0820 13:02:56.113718 21584 solver.cpp:337] Iteration 74400, Testing net (#0)\nI0820 13:04:18.515826 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87988\nI0820 13:04:18.516183 21584 solver.cpp:404]     Test net output #1: loss = 0.397151 (* 1 = 0.397151 loss)\nI0820 13:04:19.818696 21584 solver.cpp:228] Iteration 74400, loss = 0.169283\nI0820 13:04:19.818740 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:04:19.818755 21584 solver.cpp:244]     Train net output #1: loss = 0.169282 (* 1 = 0.169282 loss)\nI0820 13:04:19.920704 21584 sgd_solver.cpp:166] Iteration 74400, lr = 1.86\nI0820 13:06:37.506490 21584 solver.cpp:337] Iteration 74500, Testing net (#0)\nI0820 13:07:59.904100 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88224\nI0820 13:07:59.904458 21584 solver.cpp:404]     Test net output #1: loss = 0.3936 (* 1 = 0.3936 loss)\nI0820 13:08:01.207168 21584 solver.cpp:228] Iteration 74500, loss = 0.153302\nI0820 13:08:01.207208 21584 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI0820 13:08:01.207223 21584 solver.cpp:244]     Train net output #1: loss = 0.153302 (* 1 = 0.153302 loss)\nI0820 13:08:01.312068 21584 sgd_solver.cpp:166] Iteration 74500, lr = 1.8625\nI0820 13:10:18.908185 21584 solver.cpp:337] Iteration 74600, Testing net (#0)\nI0820 13:11:41.308719 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87316\nI0820 13:11:41.309063 21584 solver.cpp:404]     Test net output #1: loss = 0.428122 (* 1 = 0.428122 loss)\nI0820 13:11:42.611727 21584 solver.cpp:228] Iteration 74600, loss = 0.0964502\nI0820 13:11:42.611769 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 13:11:42.611784 21584 solver.cpp:244]     Train net output #1: loss = 0.0964501 (* 1 = 0.0964501 loss)\nI0820 13:11:42.717425 21584 sgd_solver.cpp:166] Iteration 74600, lr = 1.865\nI0820 13:14:00.317040 21584 solver.cpp:337] Iteration 74700, Testing net (#0)\nI0820 13:15:22.722517 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88212\nI0820 13:15:22.722868 21584 solver.cpp:404]     Test net output #1: loss = 0.38609 (* 1 = 0.38609 loss)\nI0820 13:15:24.025480 21584 solver.cpp:228] Iteration 74700, loss = 0.199129\nI0820 13:15:24.025522 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 13:15:24.025537 21584 solver.cpp:244]     Train net output #1: loss = 0.199129 (* 1 = 0.199129 loss)\nI0820 13:15:24.131831 21584 sgd_solver.cpp:166] Iteration 74700, lr = 1.8675\nI0820 13:17:41.792137 21584 solver.cpp:337] Iteration 74800, Testing net (#0)\nI0820 13:19:04.194757 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87436\nI0820 13:19:04.195119 21584 solver.cpp:404]     Test net output #1: loss = 0.42456 (* 1 = 0.42456 loss)\nI0820 13:19:05.497611 21584 solver.cpp:228] Iteration 74800, loss = 0.172342\nI0820 13:19:05.497654 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 13:19:05.497669 21584 solver.cpp:244]     Train net output #1: loss = 0.172342 (* 1 = 0.172342 loss)\nI0820 13:19:05.609473 21584 sgd_solver.cpp:166] Iteration 74800, lr = 1.87\nI0820 13:21:23.149173 21584 solver.cpp:337] Iteration 74900, Testing net (#0)\nI0820 13:22:45.545018 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88432\nI0820 13:22:45.545375 21584 solver.cpp:404]     Test net output #1: loss = 0.380495 (* 1 = 0.380495 loss)\nI0820 13:22:46.847365 21584 solver.cpp:228] Iteration 74900, loss = 0.218751\nI0820 13:22:46.847405 21584 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 13:22:46.847420 21584 solver.cpp:244]     Train net output #1: loss = 0.218751 (* 1 = 0.218751 loss)\nI0820 13:22:46.961477 21584 sgd_solver.cpp:166] Iteration 74900, lr = 1.8725\nI0820 13:25:05.240947 21584 solver.cpp:337] Iteration 75000, Testing net (#0)\nI0820 13:26:27.637866 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87636\nI0820 13:26:27.638211 21584 solver.cpp:404]     Test net output #1: loss = 0.423482 (* 1 = 0.423482 loss)\nI0820 13:26:28.940364 21584 solver.cpp:228] Iteration 75000, loss = 0.230122\nI0820 13:26:28.940409 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:26:28.940424 21584 solver.cpp:244]     Train net output #1: loss = 0.230122 (* 1 = 0.230122 loss)\nI0820 13:26:29.058462 21584 sgd_solver.cpp:166] Iteration 75000, lr = 1.875\nI0820 13:28:47.351649 21584 solver.cpp:337] Iteration 75100, Testing net (#0)\nI0820 13:30:09.741531 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87984\nI0820 13:30:09.741884 21584 solver.cpp:404]     Test net output #1: loss = 0.404761 (* 1 = 0.404761 loss)\nI0820 13:30:11.043998 21584 solver.cpp:228] Iteration 75100, loss = 0.202892\nI0820 13:30:11.044045 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 13:30:11.044060 21584 solver.cpp:244]     Train net output #1: loss = 0.202892 (* 1 = 0.202892 loss)\nI0820 13:30:11.161304 21584 sgd_solver.cpp:166] Iteration 75100, lr = 1.8775\nI0820 13:32:29.519557 21584 solver.cpp:337] Iteration 75200, Testing net (#0)\nI0820 13:33:51.920356 21584 solver.cpp:404]     Test net output #0: accuracy = 0.89524\nI0820 13:33:51.920724 21584 solver.cpp:404]     Test net output #1: loss = 0.354624 (* 1 = 0.354624 loss)\nI0820 13:33:53.223961 21584 solver.cpp:228] Iteration 75200, loss = 0.224156\nI0820 13:33:53.224009 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 13:33:53.224025 21584 solver.cpp:244]     Train net output #1: loss = 0.224156 (* 1 = 0.224156 loss)\nI0820 13:33:53.332816 21584 sgd_solver.cpp:166] Iteration 75200, lr = 1.88\nI0820 13:36:11.711786 21584 solver.cpp:337] Iteration 75300, Testing net (#0)\nI0820 13:37:34.113777 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87552\nI0820 13:37:34.114135 21584 solver.cpp:404]     Test net output #1: loss = 0.408214 (* 1 = 0.408214 loss)\nI0820 13:37:35.416630 21584 solver.cpp:228] Iteration 75300, loss = 0.238986\nI0820 13:37:35.416672 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 13:37:35.416688 21584 solver.cpp:244]     Train net output #1: loss = 0.238986 (* 1 = 0.238986 loss)\nI0820 13:37:35.530887 21584 sgd_solver.cpp:166] Iteration 75300, lr = 1.8825\nI0820 13:39:53.889045 21584 solver.cpp:337] Iteration 75400, Testing net (#0)\nI0820 13:41:16.292281 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87112\nI0820 13:41:16.292639 21584 solver.cpp:404]     Test net output #1: loss = 0.437924 (* 1 = 0.437924 loss)\nI0820 13:41:17.595283 21584 solver.cpp:228] Iteration 75400, loss = 0.273585\nI0820 13:41:17.595317 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 13:41:17.595332 21584 solver.cpp:244]     Train net output #1: loss = 0.273585 (* 1 = 0.273585 loss)\nI0820 13:41:17.704509 21584 sgd_solver.cpp:166] Iteration 75400, lr = 1.885\nI0820 13:43:36.039314 21584 solver.cpp:337] Iteration 75500, Testing net (#0)\nI0820 13:44:58.437607 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88128\nI0820 13:44:58.437971 21584 solver.cpp:404]     Test net output #1: loss = 0.383083 (* 1 = 0.383083 loss)\nI0820 13:44:59.740169 21584 solver.cpp:228] Iteration 75500, loss = 0.160271\nI0820 13:44:59.740211 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 13:44:59.740226 21584 solver.cpp:244]     Train net output #1: loss = 0.160271 (* 1 = 0.160271 loss)\nI0820 13:44:59.849320 21584 sgd_solver.cpp:166] Iteration 75500, lr = 1.8875\nI0820 13:47:18.276795 21584 solver.cpp:337] Iteration 75600, Testing net (#0)\nI0820 13:48:40.676342 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87612\nI0820 13:48:40.676704 21584 solver.cpp:404]     Test net output #1: loss = 0.398988 (* 1 = 0.398988 loss)\nI0820 13:48:41.978919 21584 solver.cpp:228] Iteration 75600, loss = 0.208373\nI0820 13:48:41.978953 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:48:41.978968 21584 solver.cpp:244]     Train net output #1: loss = 0.208373 (* 1 = 0.208373 loss)\nI0820 13:48:42.090065 21584 sgd_solver.cpp:166] Iteration 75600, lr = 1.89\nI0820 13:51:00.454432 21584 solver.cpp:337] Iteration 75700, Testing net (#0)\nI0820 13:52:22.796680 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87852\nI0820 13:52:22.797052 21584 solver.cpp:404]     Test net output #1: loss = 0.388526 (* 1 = 0.388526 loss)\nI0820 13:52:24.099998 21584 solver.cpp:228] Iteration 75700, loss = 0.217622\nI0820 13:52:24.100035 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 13:52:24.100051 21584 solver.cpp:244]     Train net output #1: loss = 0.217622 (* 1 = 0.217622 loss)\nI0820 13:52:24.209147 21584 sgd_solver.cpp:166] Iteration 75700, lr = 1.8925\nI0820 13:54:42.699137 21584 solver.cpp:337] Iteration 75800, Testing net (#0)\nI0820 13:56:04.994931 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8758\nI0820 13:56:04.995307 21584 solver.cpp:404]     Test net output #1: loss = 0.407028 (* 1 = 0.407028 loss)\nI0820 13:56:06.297469 21584 solver.cpp:228] Iteration 75800, loss = 0.137884\nI0820 13:56:06.297510 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 13:56:06.297526 21584 solver.cpp:244]     Train net output #1: loss = 0.137884 (* 1 = 0.137884 loss)\nI0820 13:56:06.412003 21584 sgd_solver.cpp:166] Iteration 75800, lr = 1.895\nI0820 13:58:24.894708 21584 solver.cpp:337] Iteration 75900, Testing net (#0)\nI0820 13:59:47.189169 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88036\nI0820 13:59:47.189527 21584 solver.cpp:404]     Test net output #1: loss = 0.409371 (* 1 = 0.409371 loss)\nI0820 13:59:48.492537 21584 solver.cpp:228] Iteration 75900, loss = 0.178293\nI0820 13:59:48.492578 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 13:59:48.492594 21584 solver.cpp:244]     Train net output #1: loss = 0.178293 (* 1 = 0.178293 loss)\nI0820 13:59:48.606750 21584 sgd_solver.cpp:166] Iteration 75900, lr = 1.8975\nI0820 14:02:06.883986 21584 solver.cpp:337] Iteration 76000, Testing net (#0)\nI0820 14:03:29.181481 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8798\nI0820 14:03:29.181828 21584 solver.cpp:404]     Test net output #1: loss = 0.394 (* 1 = 0.394 loss)\nI0820 14:03:30.484531 21584 solver.cpp:228] Iteration 76000, loss = 0.2573\nI0820 14:03:30.484572 21584 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI0820 14:03:30.484588 21584 solver.cpp:244]     Train net output #1: loss = 0.2573 (* 1 = 0.2573 loss)\nI0820 14:03:30.600375 21584 sgd_solver.cpp:166] Iteration 76000, lr = 1.9\nI0820 14:05:48.997300 21584 solver.cpp:337] Iteration 76100, Testing net (#0)\nI0820 14:07:11.296227 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8794\nI0820 14:07:11.296584 21584 solver.cpp:404]     Test net output #1: loss = 0.407259 (* 1 = 0.407259 loss)\nI0820 14:07:12.598312 21584 solver.cpp:228] Iteration 76100, loss = 0.0966657\nI0820 14:07:12.598352 21584 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI0820 14:07:12.598368 21584 solver.cpp:244]     Train net output #1: loss = 0.0966656 (* 1 = 0.0966656 loss)\nI0820 14:07:12.710209 21584 sgd_solver.cpp:166] Iteration 76100, lr = 1.9025\nI0820 14:09:31.088918 21584 solver.cpp:337] Iteration 76200, Testing net (#0)\nI0820 14:10:53.380224 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87884\nI0820 14:10:53.380581 21584 solver.cpp:404]     Test net output #1: loss = 0.412655 (* 1 = 0.412655 loss)\nI0820 14:10:54.681865 21584 solver.cpp:228] Iteration 76200, loss = 0.142861\nI0820 14:10:54.681898 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 14:10:54.681913 21584 solver.cpp:244]     Train net output #1: loss = 0.142861 (* 1 = 0.142861 loss)\nI0820 14:10:54.795110 21584 sgd_solver.cpp:166] Iteration 76200, lr = 1.905\nI0820 14:13:13.144901 21584 solver.cpp:337] Iteration 76300, Testing net (#0)\nI0820 14:14:35.445919 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88052\nI0820 14:14:35.446290 21584 solver.cpp:404]     Test net output #1: loss = 0.396509 (* 1 = 0.396509 loss)\nI0820 14:14:36.749177 21584 solver.cpp:228] Iteration 76300, loss = 0.177501\nI0820 14:14:36.749209 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:14:36.749223 21584 solver.cpp:244]     Train net output #1: loss = 0.177501 (* 1 = 0.177501 loss)\nI0820 14:14:36.864976 21584 sgd_solver.cpp:166] Iteration 76300, lr = 1.9075\nI0820 14:16:55.307095 21584 solver.cpp:337] Iteration 76400, Testing net (#0)\nI0820 14:18:17.644062 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87792\nI0820 14:18:17.644421 21584 solver.cpp:404]     Test net output #1: loss = 0.400517 (* 1 = 0.400517 loss)\nI0820 14:18:18.947549 21584 solver.cpp:228] Iteration 76400, loss = 0.164929\nI0820 14:18:18.947590 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 14:18:18.947607 21584 solver.cpp:244]     Train net output #1: loss = 0.164929 (* 1 = 0.164929 loss)\nI0820 14:18:19.057039 21584 sgd_solver.cpp:166] Iteration 76400, lr = 1.91\nI0820 14:20:37.560369 21584 solver.cpp:337] Iteration 76500, Testing net (#0)\nI0820 14:21:59.899976 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88628\nI0820 14:21:59.900351 21584 solver.cpp:404]     Test net output #1: loss = 0.381869 (* 1 = 0.381869 loss)\nI0820 14:22:01.203433 21584 solver.cpp:228] Iteration 76500, loss = 0.182438\nI0820 14:22:01.203474 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 14:22:01.203490 21584 solver.cpp:244]     Train net output #1: loss = 0.182438 (* 1 = 0.182438 loss)\nI0820 14:22:01.317226 21584 sgd_solver.cpp:166] Iteration 76500, lr = 1.9125\nI0820 14:24:19.851953 21584 solver.cpp:337] Iteration 76600, Testing net (#0)\nI0820 14:25:42.194802 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8788\nI0820 14:25:42.195171 21584 solver.cpp:404]     Test net output #1: loss = 0.384765 (* 1 = 0.384765 loss)\nI0820 14:25:43.497254 21584 solver.cpp:228] Iteration 76600, loss = 0.215349\nI0820 14:25:43.497292 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:25:43.497308 21584 solver.cpp:244]     Train net output #1: loss = 0.215349 (* 1 = 0.215349 loss)\nI0820 14:25:43.609681 21584 sgd_solver.cpp:166] Iteration 76600, lr = 1.915\nI0820 14:28:02.047343 21584 solver.cpp:337] Iteration 76700, Testing net (#0)\nI0820 14:29:24.456583 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8646\nI0820 14:29:24.456903 21584 solver.cpp:404]     Test net output #1: loss = 0.428343 (* 1 = 0.428343 loss)\nI0820 14:29:25.759472 21584 solver.cpp:228] Iteration 76700, loss = 0.220184\nI0820 14:29:25.759512 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 14:29:25.759528 21584 solver.cpp:244]     Train net output #1: loss = 0.220184 (* 1 = 0.220184 loss)\nI0820 14:29:25.872035 21584 sgd_solver.cpp:166] Iteration 76700, lr = 1.9175\nI0820 14:31:43.866354 21584 solver.cpp:337] Iteration 76800, Testing net (#0)\nI0820 14:33:07.028949 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88172\nI0820 14:33:07.029340 21584 solver.cpp:404]     Test net output #1: loss = 0.393579 (* 1 = 0.393579 loss)\nI0820 14:33:08.335072 21584 solver.cpp:228] Iteration 76800, loss = 0.160418\nI0820 14:33:08.335119 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:33:08.335135 21584 solver.cpp:244]     Train net output #1: loss = 0.160418 (* 1 = 0.160418 loss)\nI0820 14:33:08.441326 21584 sgd_solver.cpp:166] Iteration 76800, lr = 1.92\nI0820 14:35:26.369609 21584 solver.cpp:337] Iteration 76900, Testing net (#0)\nI0820 14:36:48.794796 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87548\nI0820 14:36:48.795161 21584 solver.cpp:404]     Test net output #1: loss = 0.405244 (* 1 = 0.405244 loss)\nI0820 14:36:50.096686 21584 solver.cpp:228] Iteration 76900, loss = 0.163198\nI0820 14:36:50.096735 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 14:36:50.096751 21584 solver.cpp:244]     Train net output #1: loss = 0.163198 (* 1 = 0.163198 loss)\nI0820 14:36:50.212188 21584 sgd_solver.cpp:166] Iteration 76900, lr = 1.9225\nI0820 14:39:08.199275 21584 solver.cpp:337] Iteration 77000, Testing net (#0)\nI0820 14:40:30.602737 21584 solver.cpp:404]     Test net output #0: accuracy = 0.888\nI0820 14:40:30.603096 21584 solver.cpp:404]     Test net output #1: loss = 0.373113 (* 1 = 0.373113 loss)\nI0820 14:40:31.903970 21584 solver.cpp:228] Iteration 77000, loss = 0.148679\nI0820 14:40:31.904016 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 14:40:31.904031 21584 solver.cpp:244]     Train net output #1: loss = 0.148679 (* 1 = 0.148679 loss)\nI0820 14:40:32.018189 21584 sgd_solver.cpp:166] Iteration 77000, lr = 1.925\nI0820 14:42:50.116179 21584 solver.cpp:337] Iteration 77100, Testing net (#0)\nI0820 14:44:12.579416 21584 solver.cpp:404]     Test net output #0: accuracy = 0.881121\nI0820 14:44:12.579780 21584 solver.cpp:404]     Test net output #1: loss = 0.390473 (* 1 = 0.390473 loss)\nI0820 14:44:13.882025 21584 solver.cpp:228] Iteration 77100, loss = 0.166939\nI0820 14:44:13.882061 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 14:44:13.882076 21584 solver.cpp:244]     Train net output #1: loss = 0.166939 (* 1 = 0.166939 loss)\nI0820 14:44:13.992776 21584 sgd_solver.cpp:166] Iteration 77100, lr = 1.9275\nI0820 14:46:32.029203 21584 solver.cpp:337] Iteration 77200, Testing net (#0)\nI0820 14:47:54.498395 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87668\nI0820 14:47:54.498761 21584 solver.cpp:404]     Test net output #1: loss = 0.405289 (* 1 = 0.405289 loss)\nI0820 14:47:55.800973 21584 solver.cpp:228] Iteration 77200, loss = 0.186216\nI0820 14:47:55.801007 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 14:47:55.801023 21584 solver.cpp:244]     Train net output #1: loss = 0.186216 (* 1 = 0.186216 loss)\nI0820 14:47:55.910415 21584 sgd_solver.cpp:166] Iteration 77200, lr = 1.93\nI0820 14:50:13.934876 21584 solver.cpp:337] Iteration 77300, Testing net (#0)\nI0820 14:51:36.403122 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88388\nI0820 14:51:36.403486 21584 solver.cpp:404]     Test net output #1: loss = 0.377114 (* 1 = 0.377114 loss)\nI0820 14:51:37.706210 21584 solver.cpp:228] Iteration 77300, loss = 0.185316\nI0820 14:51:37.706255 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 14:51:37.706270 21584 solver.cpp:244]     Train net output #1: loss = 0.185316 (* 1 = 0.185316 loss)\nI0820 14:51:37.818210 21584 sgd_solver.cpp:166] Iteration 77300, lr = 1.9325\nI0820 14:53:55.827353 21584 solver.cpp:337] Iteration 77400, Testing net (#0)\nI0820 14:55:18.275804 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8776\nI0820 14:55:18.276162 21584 solver.cpp:404]     Test net output #1: loss = 0.403494 (* 1 = 0.403494 loss)\nI0820 14:55:19.579143 21584 solver.cpp:228] Iteration 77400, loss = 0.143925\nI0820 14:55:19.579190 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 14:55:19.579206 21584 solver.cpp:244]     Train net output #1: loss = 0.143925 (* 1 = 0.143925 loss)\nI0820 14:55:19.689483 21584 sgd_solver.cpp:166] Iteration 77400, lr = 1.935\nI0820 14:57:37.705549 21584 solver.cpp:337] Iteration 77500, Testing net (#0)\nI0820 14:59:00.187696 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88132\nI0820 14:59:00.188040 21584 solver.cpp:404]     Test net output #1: loss = 0.399079 (* 1 = 0.399079 loss)\nI0820 14:59:01.491224 21584 solver.cpp:228] Iteration 77500, loss = 0.172039\nI0820 14:59:01.491268 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 14:59:01.491284 21584 solver.cpp:244]     Train net output #1: loss = 0.172039 (* 1 = 0.172039 loss)\nI0820 14:59:01.603258 21584 sgd_solver.cpp:166] Iteration 77500, lr = 1.9375\nI0820 15:01:19.620290 21584 solver.cpp:337] Iteration 77600, Testing net (#0)\nI0820 15:02:42.077504 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8804\nI0820 15:02:42.077877 21584 solver.cpp:404]     Test net output #1: loss = 0.410211 (* 1 = 0.410211 loss)\nI0820 15:02:43.381778 21584 solver.cpp:228] Iteration 77600, loss = 0.233664\nI0820 15:02:43.381811 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 15:02:43.381826 21584 solver.cpp:244]     Train net output #1: loss = 0.233663 (* 1 = 0.233663 loss)\nI0820 15:02:43.499560 21584 sgd_solver.cpp:166] Iteration 77600, lr = 1.94\nI0820 15:05:01.535328 21584 solver.cpp:337] Iteration 77700, Testing net (#0)\nI0820 15:06:23.993579 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8844\nI0820 15:06:23.993942 21584 solver.cpp:404]     Test net output #1: loss = 0.386145 (* 1 = 0.386145 loss)\nI0820 15:06:25.297760 21584 solver.cpp:228] Iteration 77700, loss = 0.221918\nI0820 15:06:25.297794 21584 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 15:06:25.297809 21584 solver.cpp:244]     Train net output #1: loss = 0.221918 (* 1 = 0.221918 loss)\nI0820 15:06:25.415168 21584 sgd_solver.cpp:166] Iteration 77700, lr = 1.9425\nI0820 15:08:43.424847 21584 solver.cpp:337] Iteration 77800, Testing net (#0)\nI0820 15:10:05.880524 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87812\nI0820 15:10:05.880877 21584 solver.cpp:404]     Test net output #1: loss = 0.417389 (* 1 = 0.417389 loss)\nI0820 15:10:07.184095 21584 solver.cpp:228] Iteration 77800, loss = 0.199826\nI0820 15:10:07.184139 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 15:10:07.184155 21584 solver.cpp:244]     Train net output #1: loss = 0.199826 (* 1 = 0.199826 loss)\nI0820 15:10:07.290479 21584 sgd_solver.cpp:166] Iteration 77800, lr = 1.945\nI0820 15:12:25.291178 21584 solver.cpp:337] Iteration 77900, Testing net (#0)\nI0820 15:13:47.740229 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88076\nI0820 15:13:47.740597 21584 solver.cpp:404]     Test net output #1: loss = 0.386663 (* 1 = 0.386663 loss)\nI0820 15:13:49.042747 21584 solver.cpp:228] Iteration 77900, loss = 0.211357\nI0820 15:13:49.042791 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 15:13:49.042806 21584 solver.cpp:244]     Train net output #1: loss = 0.211357 (* 1 = 0.211357 loss)\nI0820 15:13:49.152693 21584 sgd_solver.cpp:166] Iteration 77900, lr = 1.9475\nI0820 15:16:07.133803 21584 solver.cpp:337] Iteration 78000, Testing net (#0)\nI0820 15:17:29.583451 21584 solver.cpp:404]     Test net output #0: accuracy = 0.884681\nI0820 15:17:29.583815 21584 solver.cpp:404]     Test net output #1: loss = 0.379517 (* 1 = 0.379517 loss)\nI0820 15:17:30.886998 21584 solver.cpp:228] Iteration 78000, loss = 0.203031\nI0820 15:17:30.887044 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 15:17:30.887059 21584 solver.cpp:244]     Train net output #1: loss = 0.203031 (* 1 = 0.203031 loss)\nI0820 15:17:30.991400 21584 sgd_solver.cpp:166] Iteration 78000, lr = 1.95\nI0820 15:19:48.909939 21584 solver.cpp:337] Iteration 78100, Testing net (#0)\nI0820 15:21:11.360882 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87876\nI0820 15:21:11.361230 21584 solver.cpp:404]     Test net output #1: loss = 0.425131 (* 1 = 0.425131 loss)\nI0820 15:21:12.664067 21584 solver.cpp:228] Iteration 78100, loss = 0.11625\nI0820 15:21:12.664111 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 15:21:12.664127 21584 solver.cpp:244]     Train net output #1: loss = 0.11625 (* 1 = 0.11625 loss)\nI0820 15:21:12.776216 21584 sgd_solver.cpp:166] Iteration 78100, lr = 1.9525\nI0820 15:23:30.763942 21584 solver.cpp:337] Iteration 78200, Testing net (#0)\nI0820 15:24:53.231349 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87944\nI0820 15:24:53.231699 21584 solver.cpp:404]     Test net output #1: loss = 0.412225 (* 1 = 0.412225 loss)\nI0820 15:24:54.535054 21584 solver.cpp:228] Iteration 78200, loss = 0.273388\nI0820 15:24:54.535097 21584 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI0820 15:24:54.535114 21584 solver.cpp:244]     Train net output #1: loss = 0.273388 (* 1 = 0.273388 loss)\nI0820 15:24:54.641561 21584 sgd_solver.cpp:166] Iteration 78200, lr = 1.955\nI0820 15:27:12.624744 21584 solver.cpp:337] Iteration 78300, Testing net (#0)\nI0820 15:28:35.172219 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87268\nI0820 15:28:35.172590 21584 solver.cpp:404]     Test net output #1: loss = 0.410166 (* 1 = 0.410166 loss)\nI0820 15:28:36.476574 21584 solver.cpp:228] Iteration 78300, loss = 0.259285\nI0820 15:28:36.476621 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 15:28:36.476645 21584 solver.cpp:244]     Train net output #1: loss = 0.259285 (* 1 = 0.259285 loss)\nI0820 15:28:36.582165 21584 sgd_solver.cpp:166] Iteration 78300, lr = 1.9575\nI0820 15:30:54.405253 21584 solver.cpp:337] Iteration 78400, Testing net (#0)\nI0820 15:32:16.974058 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87968\nI0820 15:32:16.974432 21584 solver.cpp:404]     Test net output #1: loss = 0.393082 (* 1 = 0.393082 loss)\nI0820 15:32:18.278023 21584 solver.cpp:228] Iteration 78400, loss = 0.127502\nI0820 15:32:18.278061 21584 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI0820 15:32:18.278084 21584 solver.cpp:244]     Train net output #1: loss = 0.127502 (* 1 = 0.127502 loss)\nI0820 15:32:18.386667 21584 sgd_solver.cpp:166] Iteration 78400, lr = 1.96\nI0820 15:34:36.383054 21584 solver.cpp:337] Iteration 78500, Testing net (#0)\nI0820 15:35:58.852952 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87188\nI0820 15:35:58.853317 21584 solver.cpp:404]     Test net output #1: loss = 0.42161 (* 1 = 0.42161 loss)\nI0820 15:36:00.155563 21584 solver.cpp:228] Iteration 78500, loss = 0.217011\nI0820 15:36:00.155607 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 15:36:00.155624 21584 solver.cpp:244]     Train net output #1: loss = 0.217011 (* 1 = 0.217011 loss)\nI0820 15:36:00.269238 21584 sgd_solver.cpp:166] Iteration 78500, lr = 1.9625\nI0820 15:38:18.214639 21584 solver.cpp:337] Iteration 78600, Testing net (#0)\nI0820 15:39:40.683918 21584 solver.cpp:404]     Test net output #0: accuracy = 0.8676\nI0820 15:39:40.684262 21584 solver.cpp:404]     Test net output #1: loss = 0.457014 (* 1 = 0.457014 loss)\nI0820 15:39:41.986485 21584 solver.cpp:228] Iteration 78600, loss = 0.277004\nI0820 15:39:41.986519 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 15:39:41.986534 21584 solver.cpp:244]     Train net output #1: loss = 0.277004 (* 1 = 0.277004 loss)\nI0820 15:39:42.094825 21584 sgd_solver.cpp:166] Iteration 78600, lr = 1.965\nI0820 15:42:00.022519 21584 solver.cpp:337] Iteration 78700, Testing net (#0)\nI0820 15:43:22.493347 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87016\nI0820 15:43:22.493696 21584 solver.cpp:404]     Test net output #1: loss = 0.417526 (* 1 = 0.417526 loss)\nI0820 15:43:23.796036 21584 solver.cpp:228] Iteration 78700, loss = 0.143598\nI0820 15:43:23.796077 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 15:43:23.796092 21584 solver.cpp:244]     Train net output #1: loss = 0.143598 (* 1 = 0.143598 loss)\nI0820 15:43:23.906401 21584 sgd_solver.cpp:166] Iteration 78700, lr = 1.9675\nI0820 15:45:41.881851 21584 solver.cpp:337] Iteration 78800, Testing net (#0)\nI0820 15:47:04.354903 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87544\nI0820 15:47:04.355267 21584 solver.cpp:404]     Test net output #1: loss = 0.415557 (* 1 = 0.415557 loss)\nI0820 15:47:05.657781 21584 solver.cpp:228] Iteration 78800, loss = 0.223682\nI0820 15:47:05.657814 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 15:47:05.657827 21584 solver.cpp:244]     Train net output #1: loss = 0.223682 (* 1 = 0.223682 loss)\nI0820 15:47:05.771140 21584 sgd_solver.cpp:166] Iteration 78800, lr = 1.97\nI0820 15:49:23.829987 21584 solver.cpp:337] Iteration 78900, Testing net (#0)\nI0820 15:50:46.280694 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87848\nI0820 15:50:46.281059 21584 solver.cpp:404]     Test net output #1: loss = 0.389474 (* 1 = 0.389474 loss)\nI0820 15:50:47.582919 21584 solver.cpp:228] Iteration 78900, loss = 0.227679\nI0820 15:50:47.582960 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 15:50:47.582975 21584 solver.cpp:244]     Train net output #1: loss = 0.227679 (* 1 = 0.227679 loss)\nI0820 15:50:47.694229 21584 sgd_solver.cpp:166] Iteration 78900, lr = 1.9725\nI0820 15:53:05.713261 21584 solver.cpp:337] Iteration 79000, Testing net (#0)\nI0820 15:54:28.159804 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87608\nI0820 15:54:28.160156 21584 solver.cpp:404]     Test net output #1: loss = 0.408505 (* 1 = 0.408505 loss)\nI0820 15:54:29.462970 21584 solver.cpp:228] Iteration 79000, loss = 0.142508\nI0820 15:54:29.463011 21584 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI0820 15:54:29.463026 21584 solver.cpp:244]     Train net output #1: loss = 0.142508 (* 1 = 0.142508 loss)\nI0820 15:54:29.574055 21584 sgd_solver.cpp:166] Iteration 79000, lr = 1.975\nI0820 15:56:47.571300 21584 solver.cpp:337] Iteration 79100, Testing net (#0)\nI0820 15:58:10.077067 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86696\nI0820 15:58:10.077425 21584 solver.cpp:404]     Test net output #1: loss = 0.455258 (* 1 = 0.455258 loss)\nI0820 15:58:11.379752 21584 solver.cpp:228] Iteration 79100, loss = 0.144564\nI0820 15:58:11.379792 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 15:58:11.379808 21584 solver.cpp:244]     Train net output #1: loss = 0.144564 (* 1 = 0.144564 loss)\nI0820 15:58:11.494473 21584 sgd_solver.cpp:166] Iteration 79100, lr = 1.9775\nI0820 16:00:29.448418 21584 solver.cpp:337] Iteration 79200, Testing net (#0)\nI0820 16:01:51.957453 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87332\nI0820 16:01:51.957816 21584 solver.cpp:404]     Test net output #1: loss = 0.428943 (* 1 = 0.428943 loss)\nI0820 16:01:53.261032 21584 solver.cpp:228] Iteration 79200, loss = 0.219891\nI0820 16:01:53.261077 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 16:01:53.261103 21584 solver.cpp:244]     Train net output #1: loss = 0.219891 (* 1 = 0.219891 loss)\nI0820 16:01:53.366426 21584 sgd_solver.cpp:166] Iteration 79200, lr = 1.98\nI0820 16:04:11.236062 21584 solver.cpp:337] Iteration 79300, Testing net (#0)\nI0820 16:05:33.755825 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87444\nI0820 16:05:33.756191 21584 solver.cpp:404]     Test net output #1: loss = 0.427324 (* 1 = 0.427324 loss)\nI0820 16:05:35.059525 21584 solver.cpp:228] Iteration 79300, loss = 0.225979\nI0820 16:05:35.059566 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 16:05:35.059592 21584 solver.cpp:244]     Train net output #1: loss = 0.225979 (* 1 = 0.225979 loss)\nI0820 16:05:35.171486 21584 sgd_solver.cpp:166] Iteration 79300, lr = 1.9825\nI0820 16:07:53.094043 21584 solver.cpp:337] Iteration 79400, Testing net (#0)\nI0820 16:09:15.491088 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87712\nI0820 16:09:15.491449 21584 solver.cpp:404]     Test net output #1: loss = 0.420469 (* 1 = 0.420469 loss)\nI0820 16:09:16.792589 21584 solver.cpp:228] Iteration 79400, loss = 0.229035\nI0820 16:09:16.792630 21584 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI0820 16:09:16.792654 21584 solver.cpp:244]     Train net output #1: loss = 0.229035 (* 1 = 0.229035 loss)\nI0820 16:09:16.901523 21584 sgd_solver.cpp:166] Iteration 79400, lr = 1.985\nI0820 16:11:34.784994 21584 solver.cpp:337] Iteration 79500, Testing net (#0)\nI0820 16:12:57.199908 21584 solver.cpp:404]     Test net output #0: accuracy = 0.88152\nI0820 16:12:57.200274 21584 solver.cpp:404]     Test net output #1: loss = 0.381791 (* 1 = 0.381791 loss)\nI0820 16:12:58.503679 21584 solver.cpp:228] Iteration 79500, loss = 0.185096\nI0820 16:12:58.503723 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 16:12:58.503747 21584 solver.cpp:244]     Train net output #1: loss = 0.185095 (* 1 = 0.185095 loss)\nI0820 16:12:58.612609 21584 sgd_solver.cpp:166] Iteration 79500, lr = 1.9875\nI0820 16:15:16.560621 21584 solver.cpp:337] Iteration 79600, Testing net (#0)\nI0820 16:16:38.965159 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87064\nI0820 16:16:38.965533 21584 solver.cpp:404]     Test net output #1: loss = 0.422081 (* 1 = 0.422081 loss)\nI0820 16:16:40.268115 21584 solver.cpp:228] Iteration 79600, loss = 0.193476\nI0820 16:16:40.268158 21584 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI0820 16:16:40.268182 21584 solver.cpp:244]     Train net output #1: loss = 0.193476 (* 1 = 0.193476 loss)\nI0820 16:16:40.376971 21584 sgd_solver.cpp:166] Iteration 79600, lr = 1.99\nI0820 16:18:58.292942 21584 solver.cpp:337] Iteration 79700, Testing net (#0)\nI0820 16:20:20.731287 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87252\nI0820 16:20:20.731660 21584 solver.cpp:404]     Test net output #1: loss = 0.411552 (* 1 = 0.411552 loss)\nI0820 16:20:22.034292 21584 solver.cpp:228] Iteration 79700, loss = 0.155805\nI0820 16:20:22.034337 21584 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI0820 16:20:22.034361 21584 solver.cpp:244]     Train net output #1: loss = 0.155805 (* 1 = 0.155805 loss)\nI0820 16:20:22.146366 21584 sgd_solver.cpp:166] Iteration 79700, lr = 1.9925\nI0820 16:22:40.047261 21584 solver.cpp:337] Iteration 79800, Testing net (#0)\nI0820 16:24:02.410078 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87972\nI0820 16:24:02.410465 21584 solver.cpp:404]     Test net output #1: loss = 0.396468 (* 1 = 0.396468 loss)\nI0820 16:24:03.712977 21584 solver.cpp:228] Iteration 79800, loss = 0.16333\nI0820 16:24:03.713016 21584 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI0820 16:24:03.713032 21584 solver.cpp:244]     Train net output #1: loss = 0.16333 (* 1 = 0.16333 loss)\nI0820 16:24:03.820668 21584 sgd_solver.cpp:166] Iteration 79800, lr = 1.995\nI0820 16:26:21.757997 21584 solver.cpp:337] Iteration 79900, Testing net (#0)\nI0820 16:27:44.112790 21584 solver.cpp:404]     Test net output #0: accuracy = 0.87828\nI0820 16:27:44.113160 21584 solver.cpp:404]     Test net output #1: loss = 0.422215 (* 1 = 0.422215 loss)\nI0820 16:27:45.415858 21584 solver.cpp:228] Iteration 79900, loss = 0.215535\nI0820 16:27:45.415899 21584 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI0820 16:27:45.415913 21584 solver.cpp:244]     Train net output #1: loss = 0.215535 (* 1 = 0.215535 loss)\nI0820 16:27:45.526944 21584 sgd_solver.cpp:166] Iteration 79900, lr = 1.9975\nI0820 16:30:03.549568 21584 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range2SS80kRes56wd1_iter_80000.caffemodel\nI0820 16:30:03.768965 21584 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/range2SS80kRes56wd1_iter_80000.solverstate\nI0820 16:30:04.203821 21584 solver.cpp:317] Iteration 80000, loss = 0.274747\nI0820 16:30:04.203860 21584 solver.cpp:337] Iteration 80000, Testing net (#0)\nI0820 16:31:26.508566 21584 solver.cpp:404]     Test net output #0: accuracy = 0.86756\nI0820 16:31:26.508942 21584 solver.cpp:404]     Test net output #1: loss = 0.450921 (* 1 = 0.450921 loss)\nI0820 16:31:26.508955 21584 solver.cpp:322] Optimization Done.\nI0820 16:31:31.835294 21584 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range3Cifar100kFig8",
    "content": "I0817 16:13:04.798003 17318 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI0817 16:13:04.800588 17318 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI0817 16:13:04.801780 17318 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI0817 16:13:04.802978 17318 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI0817 16:13:04.804165 17318 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI0817 16:13:04.805366 17318 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI0817 16:13:04.806571 17318 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI0817 16:13:04.808010 17318 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI0817 16:13:04.809514 17318 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI0817 16:13:05.228298 17318 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range3Cifar100kFig8\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI0817 16:13:05.231408 17318 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI0817 16:13:05.242219 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:05.242283 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:05.243314 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI0817 16:13:05.243366 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI0817 16:13:05.243378 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:13:05.243388 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:13:05.243398 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:13:05.243407 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:13:05.243417 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:13:05.243425 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:13:05.243435 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:13:05.243444 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:13:05.243454 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:13:05.243463 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:13:05.243472 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:13:05.243481 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:13:05.243491 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:13:05.243499 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:13:05.243508 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:13:05.243517 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:13:05.243526 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:13:05.243535 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:13:05.243559 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:13:05.243568 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:13:05.243582 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:13:05.243592 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:13:05.243602 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:13:05.243609 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:13:05.243618 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:13:05.243626 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:13:05.243635 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:13:05.243644 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:13:05.243654 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:13:05.243662 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:13:05.243671 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:13:05.243680 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:13:05.243688 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:13:05.243697 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:13:05.243706 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:13:05.243716 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:13:05.243724 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:13:05.243733 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:13:05.243746 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:13:05.243754 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:13:05.243763 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:13:05.243772 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:13:05.243782 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:13:05.243790 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:13:05.243799 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:13:05.243808 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:13:05.243816 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:13:05.243824 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:13:05.243834 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:13:05.243856 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:13:05.243867 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:13:05.243876 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:13:05.243886 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:13:05.243894 17318 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:13:05.245606 17318 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar100/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar100/cifar100_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b\nI0817 16:13:05.247547 17318 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:13:05.248682 17318 net.cpp:100] Creating Layer dataLayer\nI0817 16:13:05.248739 17318 net.cpp:408] dataLayer -> data_top\nI0817 16:13:05.248913 17318 net.cpp:408] dataLayer -> label\nI0817 16:13:05.249001 17318 data_transformer.cpp:25] Loading mean file from: examples/cifar100/mean.binaryproto\nI0817 16:13:05.262596 17323 db_lmdb.cpp:35] Opened lmdb examples/cifar100/cifar100_train_lmdb\nI0817 16:13:05.282021 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:05.289261 17318 net.cpp:150] Setting up dataLayer\nI0817 16:13:05.289324 17318 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:13:05.289336 17318 net.cpp:157] Top shape: 125 (125)\nI0817 16:13:05.289342 17318 net.cpp:165] Memory required for data: 1536500\nI0817 16:13:05.289357 17318 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:13:05.289376 17318 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:13:05.289384 17318 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:13:05.289402 17318 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:13:05.289418 17318 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:13:05.289503 17318 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:13:05.289516 17318 net.cpp:157] Top shape: 125 (125)\nI0817 16:13:05.289523 17318 net.cpp:157] Top shape: 125 (125)\nI0817 16:13:05.289528 17318 net.cpp:165] Memory required for data: 1537500\nI0817 16:13:05.289535 17318 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:13:05.289595 17318 net.cpp:100] Creating Layer pre_conv\nI0817 16:13:05.289608 17318 net.cpp:434] pre_conv <- data_top\nI0817 16:13:05.289620 17318 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:13:05.291853 17318 net.cpp:150] Setting up pre_conv\nI0817 16:13:05.291873 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.291879 17318 net.cpp:165] Memory required for data: 9729500\nI0817 16:13:05.291939 17318 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:13:05.292007 17318 net.cpp:100] Creating Layer pre_bn\nI0817 16:13:05.292019 17318 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:13:05.292032 17318 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:13:05.292688 17324 blocking_queue.cpp:50] Waiting for data\nI0817 16:13:05.292724 17318 net.cpp:150] Setting up pre_bn\nI0817 16:13:05.292742 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.292748 17318 net.cpp:165] Memory required for data: 17921500\nI0817 16:13:05.292765 17318 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:13:05.292821 17318 net.cpp:100] Creating Layer pre_scale\nI0817 16:13:05.292834 17318 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:13:05.292850 17318 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:13:05.293015 17318 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:13:05.293267 17318 net.cpp:150] Setting up pre_scale\nI0817 16:13:05.293282 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.293288 17318 net.cpp:165] Memory required for data: 26113500\nI0817 16:13:05.293299 17318 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:13:05.293344 17318 net.cpp:100] Creating Layer pre_relu\nI0817 16:13:05.293355 17318 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:13:05.293365 17318 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:13:05.293375 17318 net.cpp:150] Setting up pre_relu\nI0817 16:13:05.293382 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.293387 17318 net.cpp:165] Memory required for data: 34305500\nI0817 16:13:05.293392 17318 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:13:05.293400 17318 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:13:05.293404 17318 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:13:05.293414 17318 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:13:05.293426 17318 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:13:05.293473 17318 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:13:05.293488 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.293495 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.293500 17318 net.cpp:165] Memory required for data: 50689500\nI0817 16:13:05.293505 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:13:05.293517 17318 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:13:05.293524 17318 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:13:05.293532 17318 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:13:05.293846 17318 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:13:05.293861 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.293867 17318 net.cpp:165] Memory required for data: 58881500\nI0817 16:13:05.293879 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:13:05.293895 17318 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:13:05.293900 17318 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:13:05.293911 17318 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:13:05.294140 17318 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:13:05.294154 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.294159 17318 net.cpp:165] Memory required for data: 67073500\nI0817 16:13:05.294170 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:13:05.294185 17318 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:13:05.294191 17318 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:13:05.294199 17318 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.294248 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:13:05.294387 17318 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:13:05.294399 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.294405 17318 net.cpp:165] Memory required for data: 75265500\nI0817 16:13:05.294414 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:13:05.294430 17318 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:13:05.294436 17318 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:13:05.294445 17318 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.294456 17318 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:13:05.294462 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.294467 17318 net.cpp:165] Memory required for data: 83457500\nI0817 16:13:05.294472 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:13:05.294487 17318 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:13:05.294493 17318 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:13:05.294502 17318 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:13:05.294821 17318 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:13:05.294836 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.294847 17318 net.cpp:165] Memory required for data: 91649500\nI0817 16:13:05.294857 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:13:05.294870 17318 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:13:05.294876 17318 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:13:05.294884 17318 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:13:05.295120 17318 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:13:05.295132 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.295137 17318 net.cpp:165] Memory required for data: 99841500\nI0817 16:13:05.295151 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:13:05.295164 17318 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:13:05.295171 17318 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:13:05.295181 17318 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:13:05.295235 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:13:05.295390 17318 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:13:05.295403 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.295409 17318 net.cpp:165] Memory required for data: 108033500\nI0817 16:13:05.295418 17318 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:13:05.295470 17318 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:13:05.295482 17318 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:13:05.295490 17318 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:13:05.295498 17318 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:13:05.295572 17318 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:13:05.295585 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.295591 17318 net.cpp:165] Memory required for data: 116225500\nI0817 16:13:05.295598 17318 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:13:05.295605 17318 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:13:05.295611 17318 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:13:05.295624 17318 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:13:05.295634 17318 net.cpp:150] Setting up L1_b1_relu\nI0817 16:13:05.295642 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.295647 17318 net.cpp:165] Memory required for data: 124417500\nI0817 16:13:05.295652 17318 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:13:05.295661 17318 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:13:05.295666 17318 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:13:05.295675 17318 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:13:05.295682 17318 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:13:05.295727 17318 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:13:05.295739 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.295747 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.295758 17318 net.cpp:165] Memory required for data: 140801500\nI0817 16:13:05.295763 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:13:05.295778 17318 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:13:05.295784 17318 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:13:05.295794 17318 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:13:05.296104 17318 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:13:05.296118 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.296124 17318 net.cpp:165] Memory required for data: 148993500\nI0817 16:13:05.296134 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:13:05.296144 17318 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:13:05.296149 17318 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:13:05.296161 17318 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:13:05.296404 17318 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:13:05.296418 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.296423 17318 net.cpp:165] Memory required for data: 157185500\nI0817 16:13:05.296433 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:13:05.296442 17318 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:13:05.296448 17318 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:13:05.296455 17318 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.296509 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:13:05.296648 17318 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:13:05.296661 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.296666 17318 net.cpp:165] Memory required for data: 165377500\nI0817 16:13:05.296675 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:13:05.296684 17318 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:13:05.296689 17318 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:13:05.296700 17318 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.296710 17318 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:13:05.296716 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.296721 17318 net.cpp:165] Memory required for data: 173569500\nI0817 16:13:05.296726 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:13:05.296737 17318 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:13:05.296742 17318 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:13:05.296753 17318 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:13:05.297062 17318 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:13:05.297077 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.297082 17318 net.cpp:165] Memory required for data: 181761500\nI0817 16:13:05.297091 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:13:05.297101 17318 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:13:05.297106 17318 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:13:05.297117 17318 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:13:05.297358 17318 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:13:05.297374 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.297379 17318 net.cpp:165] Memory required for data: 189953500\nI0817 16:13:05.297394 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:13:05.297404 17318 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:13:05.297408 17318 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:13:05.297420 17318 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:13:05.297472 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:13:05.297607 17318 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:13:05.297623 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.297628 17318 net.cpp:165] Memory required for data: 198145500\nI0817 16:13:05.297637 17318 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:13:05.297652 17318 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:13:05.297659 17318 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:13:05.297665 17318 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:13:05.297673 17318 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:13:05.297708 17318 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:13:05.297719 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.297724 17318 net.cpp:165] Memory required for data: 206337500\nI0817 16:13:05.297729 17318 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:13:05.297737 17318 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:13:05.297742 17318 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:13:05.297749 17318 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:13:05.297758 17318 net.cpp:150] Setting up L1_b2_relu\nI0817 16:13:05.297765 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.297770 17318 net.cpp:165] Memory required for data: 214529500\nI0817 16:13:05.297775 17318 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:13:05.297782 17318 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:13:05.297787 17318 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:13:05.297797 17318 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:13:05.297807 17318 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:13:05.297855 17318 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:13:05.297868 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.297874 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.297879 17318 net.cpp:165] Memory required for data: 230913500\nI0817 16:13:05.297884 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:13:05.297899 17318 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:13:05.297905 17318 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:13:05.297914 17318 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:13:05.298219 17318 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:13:05.298233 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.298238 17318 net.cpp:165] Memory required for data: 239105500\nI0817 16:13:05.298247 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:13:05.298259 17318 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:13:05.298265 17318 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:13:05.298274 17318 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:13:05.298509 17318 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:13:05.298522 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.298528 17318 net.cpp:165] Memory required for data: 247297500\nI0817 16:13:05.298539 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:13:05.298547 17318 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:13:05.298553 17318 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:13:05.298563 17318 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.298615 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:13:05.298756 17318 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:13:05.298773 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.298779 17318 net.cpp:165] Memory required for data: 255489500\nI0817 16:13:05.298786 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:13:05.298795 17318 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:13:05.298800 17318 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:13:05.298807 17318 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.298817 17318 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:13:05.298831 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.298836 17318 net.cpp:165] Memory required for data: 263681500\nI0817 16:13:05.298846 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:13:05.298862 17318 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:13:05.298868 17318 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:13:05.298879 17318 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:13:05.299186 17318 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:13:05.299201 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.299206 17318 net.cpp:165] Memory required for data: 271873500\nI0817 16:13:05.299214 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:13:05.299232 17318 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:13:05.299238 17318 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:13:05.299247 17318 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:13:05.299479 17318 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:13:05.299492 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.299497 17318 net.cpp:165] Memory required for data: 280065500\nI0817 16:13:05.299507 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:13:05.299516 17318 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:13:05.299522 17318 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:13:05.299532 17318 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:13:05.299583 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:13:05.299716 17318 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:13:05.299732 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.299737 17318 net.cpp:165] Memory required for data: 288257500\nI0817 16:13:05.299746 17318 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:13:05.299756 17318 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:13:05.299760 17318 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:13:05.299767 17318 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:13:05.299778 17318 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:13:05.299808 17318 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:13:05.299818 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.299823 17318 net.cpp:165] Memory required for data: 296449500\nI0817 16:13:05.299827 17318 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:13:05.299839 17318 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:13:05.299862 17318 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:13:05.299870 17318 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:13:05.299880 17318 net.cpp:150] Setting up L1_b3_relu\nI0817 16:13:05.299887 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.299892 17318 net.cpp:165] Memory required for data: 304641500\nI0817 16:13:05.299897 17318 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:13:05.299907 17318 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:13:05.299913 17318 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:13:05.299921 17318 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:13:05.299929 17318 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:13:05.299971 17318 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:13:05.299986 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.299993 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.299998 17318 net.cpp:165] Memory required for data: 321025500\nI0817 16:13:05.300004 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:13:05.300014 17318 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:13:05.300020 17318 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:13:05.300038 17318 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:13:05.300353 17318 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:13:05.300367 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.300372 17318 net.cpp:165] Memory required for data: 329217500\nI0817 16:13:05.300381 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:13:05.300393 17318 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:13:05.300400 17318 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:13:05.300407 17318 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:13:05.300644 17318 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:13:05.300658 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.300663 17318 net.cpp:165] Memory required for data: 337409500\nI0817 16:13:05.300673 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:13:05.300683 17318 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:13:05.300688 17318 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:13:05.300698 17318 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.300750 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:13:05.300899 17318 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:13:05.300914 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.300918 17318 net.cpp:165] Memory required for data: 345601500\nI0817 16:13:05.300928 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:13:05.300936 17318 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:13:05.300941 17318 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:13:05.300951 17318 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.300962 17318 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:13:05.300969 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.300974 17318 net.cpp:165] Memory required for data: 353793500\nI0817 16:13:05.300979 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:13:05.300992 17318 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:13:05.300999 17318 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:13:05.301007 17318 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:13:05.301316 17318 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:13:05.301329 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.301336 17318 net.cpp:165] Memory required for data: 361985500\nI0817 16:13:05.301343 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:13:05.301353 17318 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:13:05.301358 17318 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:13:05.301370 17318 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:13:05.301609 17318 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:13:05.301622 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.301627 17318 net.cpp:165] Memory required for data: 370177500\nI0817 16:13:05.301637 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:13:05.301650 17318 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:13:05.301656 17318 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:13:05.301662 17318 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:13:05.301717 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:13:05.301864 17318 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:13:05.301878 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.301884 17318 net.cpp:165] Memory required for data: 378369500\nI0817 16:13:05.301893 17318 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:13:05.301903 17318 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:13:05.301908 17318 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:13:05.301914 17318 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:13:05.301925 17318 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:13:05.301968 17318 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:13:05.301978 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.301983 17318 net.cpp:165] Memory required for data: 386561500\nI0817 16:13:05.301988 17318 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:13:05.301996 17318 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:13:05.302001 17318 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:13:05.302008 17318 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:13:05.302021 17318 net.cpp:150] Setting up L1_b4_relu\nI0817 16:13:05.302028 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.302033 17318 net.cpp:165] Memory required for data: 394753500\nI0817 16:13:05.302037 17318 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:13:05.302044 17318 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:13:05.302050 17318 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:13:05.302057 17318 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:13:05.302067 17318 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:13:05.302111 17318 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:13:05.302124 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.302130 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.302135 17318 net.cpp:165] Memory required for data: 411137500\nI0817 16:13:05.302140 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:13:05.302151 17318 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:13:05.302157 17318 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:13:05.302168 17318 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:13:05.302475 17318 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:13:05.302489 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.302495 17318 net.cpp:165] Memory required for data: 419329500\nI0817 16:13:05.302516 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:13:05.302526 17318 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:13:05.302532 17318 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:13:05.302543 17318 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:13:05.302783 17318 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:13:05.302795 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.302800 17318 net.cpp:165] Memory required for data: 427521500\nI0817 16:13:05.302811 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:13:05.302822 17318 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:13:05.302829 17318 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:13:05.302836 17318 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.302896 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:13:05.303036 17318 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:13:05.303050 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.303055 17318 net.cpp:165] Memory required for data: 435713500\nI0817 16:13:05.303063 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:13:05.303072 17318 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:13:05.303078 17318 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:13:05.303088 17318 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.303098 17318 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:13:05.303105 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.303110 17318 net.cpp:165] Memory required for data: 443905500\nI0817 16:13:05.303114 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:13:05.303128 17318 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:13:05.303135 17318 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:13:05.303150 17318 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:13:05.303462 17318 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:13:05.303477 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.303481 17318 net.cpp:165] Memory required for data: 452097500\nI0817 16:13:05.303490 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:13:05.303503 17318 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:13:05.303508 17318 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:13:05.303516 17318 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:13:05.303753 17318 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:13:05.303766 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.303771 17318 net.cpp:165] Memory required for data: 460289500\nI0817 16:13:05.303781 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:13:05.303791 17318 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:13:05.303795 17318 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:13:05.303807 17318 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:13:05.303864 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:13:05.304002 17318 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:13:05.304018 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.304024 17318 net.cpp:165] Memory required for data: 468481500\nI0817 16:13:05.304033 17318 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:13:05.304042 17318 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:13:05.304049 17318 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:13:05.304055 17318 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:13:05.304062 17318 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:13:05.304095 17318 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:13:05.304107 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.304112 17318 net.cpp:165] Memory required for data: 476673500\nI0817 16:13:05.304117 17318 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:13:05.304127 17318 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:13:05.304133 17318 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:13:05.304141 17318 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:13:05.304149 17318 net.cpp:150] Setting up L1_b5_relu\nI0817 16:13:05.304157 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.304162 17318 net.cpp:165] Memory required for data: 484865500\nI0817 16:13:05.304167 17318 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:13:05.304175 17318 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:13:05.304181 17318 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:13:05.304188 17318 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:13:05.304198 17318 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:13:05.304239 17318 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:13:05.304255 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.304261 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.304266 17318 net.cpp:165] Memory required for data: 501249500\nI0817 16:13:05.304271 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:13:05.304282 17318 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:13:05.304288 17318 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:13:05.304297 17318 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:13:05.304610 17318 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:13:05.304623 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.304628 17318 net.cpp:165] Memory required for data: 509441500\nI0817 16:13:05.304643 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:13:05.304656 17318 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:13:05.304662 17318 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:13:05.304671 17318 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:13:05.304918 17318 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:13:05.304931 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.304936 17318 net.cpp:165] Memory required for data: 517633500\nI0817 16:13:05.304947 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:13:05.304955 17318 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:13:05.304961 17318 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:13:05.304971 17318 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.305023 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:13:05.305166 17318 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:13:05.305179 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.305184 17318 net.cpp:165] Memory required for data: 525825500\nI0817 16:13:05.305193 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:13:05.305202 17318 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:13:05.305207 17318 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:13:05.305215 17318 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.305227 17318 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:13:05.305235 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.305239 17318 net.cpp:165] Memory required for data: 534017500\nI0817 16:13:05.305244 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:13:05.305255 17318 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:13:05.305263 17318 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:13:05.305272 17318 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:13:05.305586 17318 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:13:05.305600 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.305605 17318 net.cpp:165] Memory required for data: 542209500\nI0817 16:13:05.305614 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:13:05.305625 17318 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:13:05.305632 17318 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:13:05.305642 17318 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:13:05.305888 17318 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:13:05.305902 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.305907 17318 net.cpp:165] Memory required for data: 550401500\nI0817 16:13:05.305918 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:13:05.305927 17318 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:13:05.305932 17318 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:13:05.305943 17318 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:13:05.305995 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:13:05.306133 17318 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:13:05.306149 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.306154 17318 net.cpp:165] Memory required for data: 558593500\nI0817 16:13:05.306164 17318 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:13:05.306180 17318 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:13:05.306186 17318 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:13:05.306195 17318 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:13:05.306202 17318 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:13:05.306236 17318 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:13:05.306247 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.306252 17318 net.cpp:165] Memory required for data: 566785500\nI0817 16:13:05.306258 17318 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:13:05.306275 17318 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:13:05.306282 17318 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:13:05.306290 17318 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:13:05.306300 17318 net.cpp:150] Setting up L1_b6_relu\nI0817 16:13:05.306308 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.306311 17318 net.cpp:165] Memory required for data: 574977500\nI0817 16:13:05.306316 17318 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:13:05.306324 17318 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:13:05.306329 17318 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:13:05.306335 17318 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:13:05.306345 17318 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:13:05.306391 17318 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:13:05.306402 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.306409 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.306413 17318 net.cpp:165] Memory required for data: 591361500\nI0817 16:13:05.306418 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:13:05.306429 17318 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:13:05.306435 17318 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:13:05.306447 17318 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:13:05.306792 17318 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:13:05.306807 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.306813 17318 net.cpp:165] Memory required for data: 599553500\nI0817 16:13:05.306821 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:13:05.306830 17318 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:13:05.306836 17318 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:13:05.306857 17318 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:13:05.307101 17318 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:13:05.307116 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.307121 17318 net.cpp:165] Memory required for data: 607745500\nI0817 16:13:05.307130 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:13:05.307142 17318 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:13:05.307148 17318 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:13:05.307157 17318 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.307207 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:13:05.307350 17318 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:13:05.307363 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.307369 17318 net.cpp:165] Memory required for data: 615937500\nI0817 16:13:05.307379 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:13:05.307389 17318 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:13:05.307395 17318 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:13:05.307402 17318 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.307412 17318 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:13:05.307420 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.307425 17318 net.cpp:165] Memory required for data: 624129500\nI0817 16:13:05.307430 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:13:05.307445 17318 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:13:05.307451 17318 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:13:05.307461 17318 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:13:05.307770 17318 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:13:05.307783 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.307788 17318 net.cpp:165] Memory required for data: 632321500\nI0817 16:13:05.307806 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:13:05.307814 17318 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:13:05.307821 17318 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:13:05.307828 17318 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:13:05.308076 17318 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:13:05.308090 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.308096 17318 net.cpp:165] Memory required for data: 640513500\nI0817 16:13:05.308106 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:13:05.308117 17318 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:13:05.308125 17318 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:13:05.308131 17318 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:13:05.308187 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:13:05.308327 17318 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:13:05.308341 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.308346 17318 net.cpp:165] Memory required for data: 648705500\nI0817 16:13:05.308354 17318 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:13:05.308363 17318 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:13:05.308369 17318 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:13:05.308375 17318 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:13:05.308388 17318 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:13:05.308423 17318 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:13:05.308435 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.308440 17318 net.cpp:165] Memory required for data: 656897500\nI0817 16:13:05.308445 17318 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:13:05.308454 17318 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:13:05.308459 17318 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:13:05.308468 17318 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:13:05.308478 17318 net.cpp:150] Setting up L1_b7_relu\nI0817 16:13:05.308485 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.308490 17318 net.cpp:165] Memory required for data: 665089500\nI0817 16:13:05.308495 17318 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:13:05.308501 17318 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:13:05.308507 17318 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:13:05.308513 17318 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:13:05.308523 17318 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:13:05.308568 17318 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:13:05.308580 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.308588 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.308593 17318 net.cpp:165] Memory required for data: 681473500\nI0817 16:13:05.308598 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:13:05.308607 17318 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:13:05.308614 17318 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:13:05.308625 17318 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:13:05.308951 17318 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:13:05.308965 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.308970 17318 net.cpp:165] Memory required for data: 689665500\nI0817 16:13:05.308979 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:13:05.308989 17318 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:13:05.308995 17318 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:13:05.309006 17318 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:13:05.309260 17318 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:13:05.309274 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.309279 17318 net.cpp:165] Memory required for data: 697857500\nI0817 16:13:05.309289 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:13:05.309301 17318 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:13:05.309307 17318 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:13:05.309314 17318 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.309366 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:13:05.309509 17318 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:13:05.309522 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.309527 17318 net.cpp:165] Memory required for data: 706049500\nI0817 16:13:05.309536 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:13:05.309545 17318 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:13:05.309551 17318 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:13:05.309561 17318 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.309571 17318 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:13:05.309577 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.309581 17318 net.cpp:165] Memory required for data: 714241500\nI0817 16:13:05.309587 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:13:05.309600 17318 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:13:05.309607 17318 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:13:05.309617 17318 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:13:05.309938 17318 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:13:05.309952 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.309958 17318 net.cpp:165] Memory required for data: 722433500\nI0817 16:13:05.309967 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:13:05.309976 17318 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:13:05.309983 17318 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:13:05.309990 17318 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:13:05.310238 17318 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:13:05.310251 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.310256 17318 net.cpp:165] Memory required for data: 730625500\nI0817 16:13:05.310266 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:13:05.310281 17318 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:13:05.310287 17318 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:13:05.310295 17318 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:13:05.310350 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:13:05.310492 17318 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:13:05.310505 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.310510 17318 net.cpp:165] Memory required for data: 738817500\nI0817 16:13:05.310519 17318 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:13:05.310528 17318 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:13:05.310534 17318 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:13:05.310540 17318 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:13:05.310551 17318 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:13:05.310585 17318 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:13:05.310596 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.310601 17318 net.cpp:165] Memory required for data: 747009500\nI0817 16:13:05.310607 17318 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:13:05.310614 17318 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:13:05.310621 17318 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:13:05.310627 17318 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:13:05.310636 17318 net.cpp:150] Setting up L1_b8_relu\nI0817 16:13:05.310643 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.310654 17318 net.cpp:165] Memory required for data: 755201500\nI0817 16:13:05.310659 17318 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:13:05.310669 17318 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:13:05.310675 17318 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:13:05.310683 17318 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:13:05.310693 17318 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:13:05.310739 17318 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:13:05.310750 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.310756 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.310761 17318 net.cpp:165] Memory required for data: 771585500\nI0817 16:13:05.310766 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:13:05.310778 17318 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:13:05.310784 17318 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:13:05.310796 17318 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:13:05.311125 17318 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:13:05.311139 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.311144 17318 net.cpp:165] Memory required for data: 779777500\nI0817 16:13:05.311153 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:13:05.311166 17318 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:13:05.311172 17318 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:13:05.311180 17318 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:13:05.311427 17318 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:13:05.311441 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.311446 17318 net.cpp:165] Memory required for data: 787969500\nI0817 16:13:05.311456 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:13:05.311465 17318 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:13:05.311471 17318 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:13:05.311478 17318 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.311533 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:13:05.311681 17318 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:13:05.311694 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.311699 17318 net.cpp:165] Memory required for data: 796161500\nI0817 16:13:05.311708 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:13:05.311717 17318 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:13:05.311722 17318 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:13:05.311733 17318 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.311743 17318 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:13:05.311749 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.311753 17318 net.cpp:165] Memory required for data: 804353500\nI0817 16:13:05.311758 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:13:05.311772 17318 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:13:05.311779 17318 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:13:05.311786 17318 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:13:05.312110 17318 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:13:05.312125 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.312130 17318 net.cpp:165] Memory required for data: 812545500\nI0817 16:13:05.312139 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:13:05.312151 17318 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:13:05.312157 17318 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:13:05.312166 17318 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:13:05.312413 17318 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:13:05.312428 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.312434 17318 net.cpp:165] Memory required for data: 820737500\nI0817 16:13:05.312465 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:13:05.312476 17318 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:13:05.312484 17318 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:13:05.312490 17318 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:13:05.312544 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:13:05.312682 17318 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:13:05.312695 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.312700 17318 net.cpp:165] Memory required for data: 828929500\nI0817 16:13:05.312710 17318 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:13:05.312721 17318 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:13:05.312727 17318 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:13:05.312734 17318 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:13:05.312741 17318 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:13:05.312772 17318 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:13:05.312782 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.312786 17318 net.cpp:165] Memory required for data: 837121500\nI0817 16:13:05.312791 17318 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:13:05.312799 17318 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:13:05.312805 17318 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:13:05.312815 17318 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:13:05.312825 17318 net.cpp:150] Setting up L1_b9_relu\nI0817 16:13:05.312832 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.312837 17318 net.cpp:165] Memory required for data: 845313500\nI0817 16:13:05.312847 17318 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:13:05.312856 17318 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:13:05.312862 17318 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:13:05.312873 17318 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:13:05.312885 17318 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:13:05.312930 17318 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:13:05.312942 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.312949 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.312954 17318 net.cpp:165] Memory required for data: 861697500\nI0817 16:13:05.312959 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:13:05.312969 17318 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:13:05.312975 17318 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:13:05.312988 17318 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:13:05.313303 17318 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:13:05.313318 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.313323 17318 net.cpp:165] Memory required for data: 863745500\nI0817 16:13:05.313331 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:13:05.313343 17318 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:13:05.313349 17318 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:13:05.313357 17318 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:13:05.313594 17318 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:13:05.313606 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.313612 17318 net.cpp:165] Memory required for data: 865793500\nI0817 16:13:05.313622 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:13:05.313637 17318 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:13:05.313649 17318 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:13:05.313658 17318 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.313710 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:13:05.313858 17318 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:13:05.313871 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.313876 17318 net.cpp:165] Memory required for data: 867841500\nI0817 16:13:05.313885 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:13:05.313896 17318 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:13:05.313902 17318 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:13:05.313912 17318 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.313922 17318 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:13:05.313930 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.313933 17318 net.cpp:165] Memory required for data: 869889500\nI0817 16:13:05.313938 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:13:05.313949 17318 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:13:05.313954 17318 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:13:05.313966 17318 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:13:05.314277 17318 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:13:05.314291 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.314296 17318 net.cpp:165] Memory required for data: 871937500\nI0817 16:13:05.314304 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:13:05.314313 17318 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:13:05.314319 17318 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:13:05.314330 17318 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:13:05.314575 17318 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:13:05.314589 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.314594 17318 net.cpp:165] Memory required for data: 873985500\nI0817 16:13:05.314604 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:13:05.314616 17318 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:13:05.314622 17318 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:13:05.314630 17318 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:13:05.314683 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:13:05.314826 17318 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:13:05.314839 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.314851 17318 net.cpp:165] Memory required for data: 876033500\nI0817 16:13:05.314860 17318 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:13:05.314874 17318 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:13:05.314882 17318 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:13:05.314890 17318 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:13:05.314975 17318 net.cpp:150] Setting up L2_b1_pool\nI0817 16:13:05.314990 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.314996 17318 net.cpp:165] Memory required for data: 878081500\nI0817 16:13:05.315001 17318 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:13:05.315011 17318 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:13:05.315017 17318 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:13:05.315024 17318 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:13:05.315035 17318 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:13:05.315068 17318 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:13:05.315079 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.315084 17318 net.cpp:165] Memory required for data: 880129500\nI0817 16:13:05.315090 17318 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:13:05.315098 17318 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:13:05.315104 17318 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:13:05.315110 17318 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:13:05.315127 17318 net.cpp:150] Setting up L2_b1_relu\nI0817 16:13:05.315135 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.315140 17318 net.cpp:165] Memory required for data: 882177500\nI0817 16:13:05.315145 17318 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:13:05.315193 17318 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:13:05.315207 17318 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:13:05.317520 17318 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:13:05.317539 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.317545 17318 net.cpp:165] Memory required for data: 884225500\nI0817 16:13:05.317551 17318 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:13:05.317561 17318 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:13:05.317569 17318 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:13:05.317575 17318 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:13:05.317586 17318 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:13:05.317662 17318 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:13:05.317680 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.317687 17318 net.cpp:165] Memory required for data: 888321500\nI0817 16:13:05.317692 17318 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:13:05.317700 17318 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:13:05.317706 17318 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:13:05.317714 17318 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:13:05.317725 17318 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:13:05.317775 17318 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:13:05.317787 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.317795 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.317800 17318 net.cpp:165] Memory required for data: 896513500\nI0817 16:13:05.317804 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:13:05.317818 17318 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:13:05.317826 17318 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:13:05.317834 17318 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:13:05.319295 17318 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:13:05.319314 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.319319 17318 net.cpp:165] Memory required for data: 900609500\nI0817 16:13:05.319329 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:13:05.319342 17318 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:13:05.319350 17318 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:13:05.319358 17318 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:13:05.319604 17318 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:13:05.319618 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.319623 17318 net.cpp:165] Memory required for data: 904705500\nI0817 16:13:05.319634 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:13:05.319646 17318 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:13:05.319653 17318 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:13:05.319660 17318 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.319715 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:13:05.319867 17318 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:13:05.319881 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.319886 17318 net.cpp:165] Memory required for data: 908801500\nI0817 16:13:05.319896 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:13:05.319907 17318 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:13:05.319914 17318 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:13:05.319923 17318 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.319941 17318 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:13:05.319949 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.319954 17318 net.cpp:165] Memory required for data: 912897500\nI0817 16:13:05.319959 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:13:05.319970 17318 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:13:05.319977 17318 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:13:05.319988 17318 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:13:05.320442 17318 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:13:05.320456 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.320461 17318 net.cpp:165] Memory required for data: 916993500\nI0817 16:13:05.320471 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:13:05.320479 17318 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:13:05.320487 17318 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:13:05.320497 17318 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:13:05.320747 17318 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:13:05.320761 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.320766 17318 net.cpp:165] Memory required for data: 921089500\nI0817 16:13:05.320777 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:13:05.320789 17318 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:13:05.320796 17318 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:13:05.320803 17318 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:13:05.320865 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:13:05.321010 17318 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:13:05.321023 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.321028 17318 net.cpp:165] Memory required for data: 925185500\nI0817 16:13:05.321038 17318 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:13:05.321050 17318 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:13:05.321056 17318 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:13:05.321064 17318 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:13:05.321071 17318 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:13:05.321101 17318 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:13:05.321110 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.321115 17318 net.cpp:165] Memory required for data: 929281500\nI0817 16:13:05.321121 17318 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:13:05.321128 17318 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:13:05.321135 17318 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:13:05.321144 17318 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:13:05.321153 17318 net.cpp:150] Setting up L2_b2_relu\nI0817 16:13:05.321161 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.321166 17318 net.cpp:165] Memory required for data: 933377500\nI0817 16:13:05.321171 17318 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:13:05.321177 17318 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:13:05.321182 17318 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:13:05.321190 17318 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:13:05.321200 17318 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:13:05.321247 17318 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:13:05.321259 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.321266 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.321271 17318 net.cpp:165] Memory required for data: 941569500\nI0817 16:13:05.321276 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:13:05.321295 17318 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:13:05.321301 17318 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:13:05.321313 17318 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:13:05.321769 17318 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:13:05.321784 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.321789 17318 net.cpp:165] Memory required for data: 945665500\nI0817 16:13:05.321797 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:13:05.321806 17318 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:13:05.321812 17318 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:13:05.321823 17318 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:13:05.322072 17318 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:13:05.322085 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.322090 17318 net.cpp:165] Memory required for data: 949761500\nI0817 16:13:05.322101 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:13:05.322113 17318 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:13:05.322119 17318 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:13:05.322127 17318 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.322181 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:13:05.322327 17318 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:13:05.322340 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.322345 17318 net.cpp:165] Memory required for data: 953857500\nI0817 16:13:05.322355 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:13:05.322365 17318 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:13:05.322371 17318 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:13:05.322378 17318 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.322388 17318 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:13:05.322398 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.322403 17318 net.cpp:165] Memory required for data: 957953500\nI0817 16:13:05.322408 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:13:05.322419 17318 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:13:05.322424 17318 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:13:05.322435 17318 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:13:05.322898 17318 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:13:05.322912 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.322918 17318 net.cpp:165] Memory required for data: 962049500\nI0817 16:13:05.322927 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:13:05.322937 17318 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:13:05.322942 17318 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:13:05.322953 17318 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:13:05.323201 17318 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:13:05.323213 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.323220 17318 net.cpp:165] Memory required for data: 966145500\nI0817 16:13:05.323230 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:13:05.323241 17318 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:13:05.323247 17318 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:13:05.323254 17318 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:13:05.323308 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:13:05.323453 17318 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:13:05.323467 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.323472 17318 net.cpp:165] Memory required for data: 970241500\nI0817 16:13:05.323479 17318 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:13:05.323492 17318 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:13:05.323498 17318 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:13:05.323505 17318 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:13:05.323519 17318 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:13:05.323551 17318 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:13:05.323561 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.323566 17318 net.cpp:165] Memory required for data: 974337500\nI0817 16:13:05.323571 17318 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:13:05.323592 17318 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:13:05.323598 17318 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:13:05.323606 17318 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:13:05.323616 17318 net.cpp:150] Setting up L2_b3_relu\nI0817 16:13:05.323622 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.323627 17318 net.cpp:165] Memory required for data: 978433500\nI0817 16:13:05.323632 17318 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:13:05.323642 17318 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:13:05.323648 17318 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:13:05.323655 17318 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:13:05.323665 17318 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:13:05.323709 17318 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:13:05.323724 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.323732 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.323736 17318 net.cpp:165] Memory required for data: 986625500\nI0817 16:13:05.323741 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:13:05.323752 17318 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:13:05.323758 17318 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:13:05.323767 17318 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:13:05.324232 17318 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:13:05.324246 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.324252 17318 net.cpp:165] Memory required for data: 990721500\nI0817 16:13:05.324260 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:13:05.324275 17318 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:13:05.324281 17318 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:13:05.324290 17318 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:13:05.324537 17318 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:13:05.324548 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.324554 17318 net.cpp:165] Memory required for data: 994817500\nI0817 16:13:05.324564 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:13:05.324573 17318 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:13:05.324579 17318 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:13:05.324589 17318 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.324645 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:13:05.324792 17318 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:13:05.324805 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.324810 17318 net.cpp:165] Memory required for data: 998913500\nI0817 16:13:05.324820 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:13:05.324827 17318 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:13:05.324833 17318 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:13:05.324848 17318 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.324861 17318 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:13:05.324867 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.324872 17318 net.cpp:165] Memory required for data: 1003009500\nI0817 16:13:05.324877 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:13:05.324898 17318 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:13:05.324904 17318 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:13:05.324913 17318 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:13:05.325374 17318 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:13:05.325388 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.325394 17318 net.cpp:165] Memory required for data: 1007105500\nI0817 16:13:05.325403 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:13:05.325417 17318 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:13:05.325423 17318 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:13:05.325431 17318 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:13:05.325672 17318 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:13:05.325685 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.325690 17318 net.cpp:165] Memory required for data: 1011201500\nI0817 16:13:05.325700 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:13:05.325708 17318 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:13:05.325714 17318 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:13:05.325726 17318 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:13:05.325783 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:13:05.325935 17318 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:13:05.325949 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.325954 17318 net.cpp:165] Memory required for data: 1015297500\nI0817 16:13:05.325963 17318 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:13:05.325971 17318 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:13:05.325978 17318 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:13:05.325984 17318 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:13:05.325995 17318 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:13:05.326021 17318 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:13:05.326031 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.326036 17318 net.cpp:165] Memory required for data: 1019393500\nI0817 16:13:05.326041 17318 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:13:05.326048 17318 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:13:05.326057 17318 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:13:05.326064 17318 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:13:05.326074 17318 net.cpp:150] Setting up L2_b4_relu\nI0817 16:13:05.326081 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.326086 17318 net.cpp:165] Memory required for data: 1023489500\nI0817 16:13:05.326091 17318 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:13:05.326097 17318 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:13:05.326102 17318 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:13:05.326112 17318 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:13:05.326122 17318 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:13:05.326166 17318 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:13:05.326177 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.326184 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.326189 17318 net.cpp:165] Memory required for data: 1031681500\nI0817 16:13:05.326195 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:13:05.326208 17318 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:13:05.326215 17318 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:13:05.326223 17318 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:13:05.326681 17318 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:13:05.326701 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.326707 17318 net.cpp:165] Memory required for data: 1035777500\nI0817 16:13:05.326716 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:13:05.326728 17318 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:13:05.326735 17318 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:13:05.326742 17318 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:13:05.327000 17318 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:13:05.327014 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.327019 17318 net.cpp:165] Memory required for data: 1039873500\nI0817 16:13:05.327030 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:13:05.327039 17318 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:13:05.327045 17318 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:13:05.327055 17318 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.327111 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:13:05.327256 17318 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:13:05.327270 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.327275 17318 net.cpp:165] Memory required for data: 1043969500\nI0817 16:13:05.327283 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:13:05.327291 17318 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:13:05.327297 17318 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:13:05.327307 17318 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.327317 17318 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:13:05.327324 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.327329 17318 net.cpp:165] Memory required for data: 1048065500\nI0817 16:13:05.327334 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:13:05.327350 17318 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:13:05.327358 17318 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:13:05.327365 17318 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:13:05.327826 17318 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:13:05.327846 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.327852 17318 net.cpp:165] Memory required for data: 1052161500\nI0817 16:13:05.327862 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:13:05.327873 17318 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:13:05.327880 17318 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:13:05.327888 17318 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:13:05.328131 17318 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:13:05.328143 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.328150 17318 net.cpp:165] Memory required for data: 1056257500\nI0817 16:13:05.328160 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:13:05.328168 17318 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:13:05.328174 17318 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:13:05.328181 17318 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:13:05.328238 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:13:05.328382 17318 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:13:05.328397 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.328403 17318 net.cpp:165] Memory required for data: 1060353500\nI0817 16:13:05.328413 17318 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:13:05.328421 17318 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:13:05.328428 17318 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:13:05.328434 17318 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:13:05.328443 17318 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:13:05.328471 17318 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:13:05.328481 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.328492 17318 net.cpp:165] Memory required for data: 1064449500\nI0817 16:13:05.328497 17318 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:13:05.328505 17318 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:13:05.328511 17318 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:13:05.328521 17318 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:13:05.328531 17318 net.cpp:150] Setting up L2_b5_relu\nI0817 16:13:05.328537 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.328542 17318 net.cpp:165] Memory required for data: 1068545500\nI0817 16:13:05.328547 17318 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:13:05.328554 17318 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:13:05.328559 17318 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:13:05.328569 17318 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:13:05.328579 17318 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:13:05.328622 17318 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:13:05.328634 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.328641 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.328645 17318 net.cpp:165] Memory required for data: 1076737500\nI0817 16:13:05.328651 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:13:05.328665 17318 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:13:05.328671 17318 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:13:05.328681 17318 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:13:05.329156 17318 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:13:05.329170 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.329176 17318 net.cpp:165] Memory required for data: 1080833500\nI0817 16:13:05.329185 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:13:05.329197 17318 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:13:05.329203 17318 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:13:05.329211 17318 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:13:05.329461 17318 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:13:05.329474 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.329480 17318 net.cpp:165] Memory required for data: 1084929500\nI0817 16:13:05.329490 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:13:05.329499 17318 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:13:05.329505 17318 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:13:05.329515 17318 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.329571 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:13:05.329715 17318 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:13:05.329727 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.329732 17318 net.cpp:165] Memory required for data: 1089025500\nI0817 16:13:05.329741 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:13:05.329749 17318 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:13:05.329756 17318 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:13:05.329762 17318 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.329771 17318 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:13:05.329778 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.329783 17318 net.cpp:165] Memory required for data: 1093121500\nI0817 16:13:05.329788 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:13:05.329802 17318 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:13:05.329808 17318 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:13:05.329819 17318 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:13:05.330283 17318 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:13:05.330304 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.330309 17318 net.cpp:165] Memory required for data: 1097217500\nI0817 16:13:05.330318 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:13:05.330332 17318 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:13:05.330338 17318 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:13:05.330348 17318 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:13:05.330590 17318 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:13:05.330602 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.330607 17318 net.cpp:165] Memory required for data: 1101313500\nI0817 16:13:05.330617 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:13:05.330626 17318 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:13:05.330631 17318 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:13:05.330638 17318 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:13:05.330694 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:13:05.330849 17318 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:13:05.330866 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.330871 17318 net.cpp:165] Memory required for data: 1105409500\nI0817 16:13:05.330881 17318 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:13:05.330890 17318 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:13:05.330896 17318 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:13:05.330904 17318 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:13:05.330910 17318 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:13:05.330941 17318 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:13:05.330951 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.330956 17318 net.cpp:165] Memory required for data: 1109505500\nI0817 16:13:05.330961 17318 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:13:05.330970 17318 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:13:05.330974 17318 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:13:05.330984 17318 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:13:05.330994 17318 net.cpp:150] Setting up L2_b6_relu\nI0817 16:13:05.331001 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.331006 17318 net.cpp:165] Memory required for data: 1113601500\nI0817 16:13:05.331010 17318 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:13:05.331017 17318 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:13:05.331023 17318 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:13:05.331033 17318 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:13:05.331043 17318 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:13:05.331086 17318 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:13:05.331097 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.331104 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.331110 17318 net.cpp:165] Memory required for data: 1121793500\nI0817 16:13:05.331115 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:13:05.331130 17318 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:13:05.331135 17318 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:13:05.331145 17318 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:13:05.331606 17318 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:13:05.331620 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.331625 17318 net.cpp:165] Memory required for data: 1125889500\nI0817 16:13:05.331635 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:13:05.331648 17318 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:13:05.331661 17318 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:13:05.331670 17318 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:13:05.331925 17318 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:13:05.331939 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.331944 17318 net.cpp:165] Memory required for data: 1129985500\nI0817 16:13:05.331954 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:13:05.331964 17318 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:13:05.331969 17318 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:13:05.331977 17318 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.332034 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:13:05.332178 17318 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:13:05.332193 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.332200 17318 net.cpp:165] Memory required for data: 1134081500\nI0817 16:13:05.332208 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:13:05.332216 17318 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:13:05.332221 17318 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:13:05.332228 17318 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.332238 17318 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:13:05.332245 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.332250 17318 net.cpp:165] Memory required for data: 1138177500\nI0817 16:13:05.332255 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:13:05.332269 17318 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:13:05.332276 17318 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:13:05.332288 17318 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:13:05.332751 17318 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:13:05.332765 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.332772 17318 net.cpp:165] Memory required for data: 1142273500\nI0817 16:13:05.332779 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:13:05.332792 17318 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:13:05.332798 17318 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:13:05.332809 17318 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:13:05.333067 17318 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:13:05.333081 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.333086 17318 net.cpp:165] Memory required for data: 1146369500\nI0817 16:13:05.333097 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:13:05.333106 17318 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:13:05.333112 17318 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:13:05.333119 17318 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:13:05.333176 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:13:05.333323 17318 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:13:05.333335 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.333340 17318 net.cpp:165] Memory required for data: 1150465500\nI0817 16:13:05.333349 17318 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:13:05.333361 17318 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:13:05.333367 17318 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:13:05.333374 17318 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:13:05.333382 17318 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:13:05.333410 17318 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:13:05.333418 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.333423 17318 net.cpp:165] Memory required for data: 1154561500\nI0817 16:13:05.333428 17318 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:13:05.333439 17318 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:13:05.333446 17318 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:13:05.333458 17318 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:13:05.333468 17318 net.cpp:150] Setting up L2_b7_relu\nI0817 16:13:05.333475 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.333480 17318 net.cpp:165] Memory required for data: 1158657500\nI0817 16:13:05.333485 17318 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:13:05.333492 17318 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:13:05.333498 17318 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:13:05.333504 17318 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:13:05.333514 17318 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:13:05.333561 17318 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:13:05.333573 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.333580 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.333585 17318 net.cpp:165] Memory required for data: 1166849500\nI0817 16:13:05.333590 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:13:05.333603 17318 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:13:05.333611 17318 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:13:05.333619 17318 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:13:05.334098 17318 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:13:05.334112 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.334118 17318 net.cpp:165] Memory required for data: 1170945500\nI0817 16:13:05.334127 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:13:05.334139 17318 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:13:05.334146 17318 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:13:05.334156 17318 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:13:05.334409 17318 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:13:05.334422 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.334429 17318 net.cpp:165] Memory required for data: 1175041500\nI0817 16:13:05.334439 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:13:05.334446 17318 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:13:05.334452 17318 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:13:05.334460 17318 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.334517 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:13:05.334666 17318 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:13:05.334681 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.334687 17318 net.cpp:165] Memory required for data: 1179137500\nI0817 16:13:05.334697 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:13:05.334704 17318 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:13:05.334710 17318 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:13:05.334717 17318 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.334727 17318 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:13:05.334733 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.334738 17318 net.cpp:165] Memory required for data: 1183233500\nI0817 16:13:05.334743 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:13:05.334756 17318 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:13:05.334764 17318 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:13:05.334774 17318 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:13:05.335247 17318 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:13:05.335261 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.335266 17318 net.cpp:165] Memory required for data: 1187329500\nI0817 16:13:05.335275 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:13:05.335288 17318 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:13:05.335302 17318 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:13:05.335314 17318 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:13:05.335568 17318 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:13:05.335582 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.335587 17318 net.cpp:165] Memory required for data: 1191425500\nI0817 16:13:05.335597 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:13:05.335605 17318 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:13:05.335613 17318 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:13:05.335619 17318 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:13:05.335677 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:13:05.335826 17318 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:13:05.335839 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.335851 17318 net.cpp:165] Memory required for data: 1195521500\nI0817 16:13:05.335860 17318 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:13:05.335873 17318 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:13:05.335880 17318 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:13:05.335887 17318 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:13:05.335894 17318 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:13:05.335922 17318 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:13:05.335932 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.335937 17318 net.cpp:165] Memory required for data: 1199617500\nI0817 16:13:05.335942 17318 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:13:05.335952 17318 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:13:05.335958 17318 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:13:05.335965 17318 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:13:05.335975 17318 net.cpp:150] Setting up L2_b8_relu\nI0817 16:13:05.335983 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.335986 17318 net.cpp:165] Memory required for data: 1203713500\nI0817 16:13:05.335991 17318 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:13:05.335999 17318 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:13:05.336004 17318 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:13:05.336010 17318 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:13:05.336032 17318 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:13:05.336082 17318 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:13:05.336096 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.336102 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.336107 17318 net.cpp:165] Memory required for data: 1211905500\nI0817 16:13:05.336112 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:13:05.336127 17318 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:13:05.336133 17318 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:13:05.336146 17318 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:13:05.336618 17318 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:13:05.336632 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.336637 17318 net.cpp:165] Memory required for data: 1216001500\nI0817 16:13:05.336647 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:13:05.336658 17318 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:13:05.336665 17318 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:13:05.336676 17318 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:13:05.336930 17318 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:13:05.336947 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.336952 17318 net.cpp:165] Memory required for data: 1220097500\nI0817 16:13:05.336971 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:13:05.336979 17318 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:13:05.336985 17318 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:13:05.336993 17318 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.337054 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:13:05.337203 17318 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:13:05.337215 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.337220 17318 net.cpp:165] Memory required for data: 1224193500\nI0817 16:13:05.337229 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:13:05.337237 17318 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:13:05.337244 17318 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:13:05.337254 17318 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.337263 17318 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:13:05.337271 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.337275 17318 net.cpp:165] Memory required for data: 1228289500\nI0817 16:13:05.337280 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:13:05.337294 17318 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:13:05.337301 17318 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:13:05.337309 17318 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:13:05.337772 17318 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:13:05.337786 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.337791 17318 net.cpp:165] Memory required for data: 1232385500\nI0817 16:13:05.337800 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:13:05.337812 17318 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:13:05.337819 17318 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:13:05.337827 17318 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:13:05.338085 17318 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:13:05.338102 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.338107 17318 net.cpp:165] Memory required for data: 1236481500\nI0817 16:13:05.338150 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:13:05.338165 17318 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:13:05.338171 17318 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:13:05.338178 17318 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:13:05.338233 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:13:05.338378 17318 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:13:05.338392 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.338397 17318 net.cpp:165] Memory required for data: 1240577500\nI0817 16:13:05.338404 17318 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:13:05.338414 17318 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:13:05.338420 17318 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:13:05.338428 17318 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:13:05.338439 17318 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:13:05.338466 17318 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:13:05.338475 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.338480 17318 net.cpp:165] Memory required for data: 1244673500\nI0817 16:13:05.338485 17318 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:13:05.338496 17318 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:13:05.338502 17318 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:13:05.338510 17318 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:13:05.338520 17318 net.cpp:150] Setting up L2_b9_relu\nI0817 16:13:05.338526 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.338531 17318 net.cpp:165] Memory required for data: 1248769500\nI0817 16:13:05.338542 17318 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:13:05.338552 17318 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:13:05.338558 17318 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:13:05.338567 17318 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:13:05.338575 17318 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:13:05.338624 17318 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:13:05.338639 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.338645 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.338650 17318 net.cpp:165] Memory required for data: 1256961500\nI0817 16:13:05.338655 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:13:05.338667 17318 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:13:05.338673 17318 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:13:05.338682 17318 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:13:05.339160 17318 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:13:05.339175 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.339180 17318 net.cpp:165] Memory required for data: 1257985500\nI0817 16:13:05.339190 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:13:05.339201 17318 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:13:05.339208 17318 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:13:05.339216 17318 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:13:05.339478 17318 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:13:05.339491 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.339496 17318 net.cpp:165] Memory required for data: 1259009500\nI0817 16:13:05.339506 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:13:05.339519 17318 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:13:05.339525 17318 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:13:05.339534 17318 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.339591 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:13:05.339745 17318 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:13:05.339757 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.339762 17318 net.cpp:165] Memory required for data: 1260033500\nI0817 16:13:05.339771 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:13:05.339779 17318 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:13:05.339787 17318 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:13:05.339797 17318 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.339807 17318 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:13:05.339813 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.339818 17318 net.cpp:165] Memory required for data: 1261057500\nI0817 16:13:05.339823 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:13:05.339833 17318 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:13:05.339839 17318 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:13:05.339857 17318 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:13:05.340328 17318 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:13:05.340342 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.340348 17318 net.cpp:165] Memory required for data: 1262081500\nI0817 16:13:05.340356 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:13:05.340368 17318 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:13:05.340375 17318 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:13:05.340384 17318 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:13:05.340643 17318 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:13:05.340656 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.340662 17318 net.cpp:165] Memory required for data: 1263105500\nI0817 16:13:05.340678 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:13:05.340687 17318 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:13:05.340693 17318 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:13:05.340701 17318 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:13:05.340759 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:13:05.340925 17318 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:13:05.340941 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.340946 17318 net.cpp:165] Memory required for data: 1264129500\nI0817 16:13:05.340956 17318 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:13:05.340965 17318 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:13:05.340972 17318 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:13:05.340983 17318 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:13:05.341018 17318 net.cpp:150] Setting up L3_b1_pool\nI0817 16:13:05.341032 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.341037 17318 net.cpp:165] Memory required for data: 1265153500\nI0817 16:13:05.341042 17318 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:13:05.341050 17318 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:13:05.341056 17318 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:13:05.341063 17318 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:13:05.341070 17318 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:13:05.341105 17318 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:13:05.341117 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.341122 17318 net.cpp:165] Memory required for data: 1266177500\nI0817 16:13:05.341127 17318 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:13:05.341135 17318 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:13:05.341141 17318 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:13:05.341148 17318 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:13:05.341157 17318 net.cpp:150] Setting up L3_b1_relu\nI0817 16:13:05.341164 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.341168 17318 net.cpp:165] Memory required for data: 1267201500\nI0817 16:13:05.341173 17318 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:13:05.341182 17318 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:13:05.341193 17318 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:13:05.342419 17318 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:13:05.342442 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.342447 17318 net.cpp:165] Memory required for data: 1268225500\nI0817 16:13:05.342453 17318 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:13:05.342463 17318 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:13:05.342469 17318 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:13:05.342476 17318 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:13:05.342484 17318 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:13:05.342526 17318 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:13:05.342538 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.342543 17318 net.cpp:165] Memory required for data: 1270273500\nI0817 16:13:05.342550 17318 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:13:05.342556 17318 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:13:05.342566 17318 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:13:05.342573 17318 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:13:05.342583 17318 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:13:05.342633 17318 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:13:05.342644 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.342651 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.342664 17318 net.cpp:165] Memory required for data: 1274369500\nI0817 16:13:05.342669 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:13:05.342684 17318 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:13:05.342690 17318 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:13:05.342700 17318 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:13:05.344681 17318 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:13:05.344702 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.344708 17318 net.cpp:165] Memory required for data: 1276417500\nI0817 16:13:05.344717 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:13:05.344727 17318 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:13:05.344734 17318 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:13:05.344745 17318 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:13:05.345016 17318 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:13:05.345033 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.345038 17318 net.cpp:165] Memory required for data: 1278465500\nI0817 16:13:05.345049 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:13:05.345058 17318 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:13:05.345064 17318 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:13:05.345072 17318 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.345129 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:13:05.345281 17318 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:13:05.345294 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.345299 17318 net.cpp:165] Memory required for data: 1280513500\nI0817 16:13:05.345309 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:13:05.345316 17318 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:13:05.345322 17318 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:13:05.345332 17318 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.345343 17318 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:13:05.345350 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.345355 17318 net.cpp:165] Memory required for data: 1282561500\nI0817 16:13:05.345360 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:13:05.345373 17318 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:13:05.345379 17318 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:13:05.345388 17318 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:13:05.346408 17318 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:13:05.346423 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.346428 17318 net.cpp:165] Memory required for data: 1284609500\nI0817 16:13:05.346438 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:13:05.346451 17318 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:13:05.346457 17318 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:13:05.346468 17318 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:13:05.346735 17318 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:13:05.346748 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.346753 17318 net.cpp:165] Memory required for data: 1286657500\nI0817 16:13:05.346763 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:13:05.346772 17318 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:13:05.346778 17318 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:13:05.346786 17318 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:13:05.346851 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:13:05.347008 17318 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:13:05.347021 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.347028 17318 net.cpp:165] Memory required for data: 1288705500\nI0817 16:13:05.347036 17318 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:13:05.347045 17318 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:13:05.347059 17318 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:13:05.347066 17318 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:13:05.347077 17318 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:13:05.347111 17318 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:13:05.347124 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.347129 17318 net.cpp:165] Memory required for data: 1290753500\nI0817 16:13:05.347134 17318 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:13:05.347142 17318 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:13:05.347148 17318 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:13:05.347156 17318 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:13:05.347164 17318 net.cpp:150] Setting up L3_b2_relu\nI0817 16:13:05.347172 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.347175 17318 net.cpp:165] Memory required for data: 1292801500\nI0817 16:13:05.347180 17318 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:13:05.347190 17318 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:13:05.347196 17318 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:13:05.347203 17318 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:13:05.347214 17318 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:13:05.347265 17318 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:13:05.347275 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.347282 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.347287 17318 net.cpp:165] Memory required for data: 1296897500\nI0817 16:13:05.347292 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:13:05.347304 17318 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:13:05.347311 17318 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:13:05.347321 17318 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:13:05.348345 17318 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:13:05.348359 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.348364 17318 net.cpp:165] Memory required for data: 1298945500\nI0817 16:13:05.348373 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:13:05.348383 17318 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:13:05.348389 17318 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:13:05.348400 17318 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:13:05.348664 17318 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:13:05.348680 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.348686 17318 net.cpp:165] Memory required for data: 1300993500\nI0817 16:13:05.348696 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:13:05.348706 17318 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:13:05.348711 17318 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:13:05.348718 17318 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.348776 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:13:05.348942 17318 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:13:05.348956 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.348963 17318 net.cpp:165] Memory required for data: 1303041500\nI0817 16:13:05.348971 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:13:05.348983 17318 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:13:05.348989 17318 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:13:05.348996 17318 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.349005 17318 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:13:05.349012 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.349025 17318 net.cpp:165] Memory required for data: 1305089500\nI0817 16:13:05.349030 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:13:05.349043 17318 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:13:05.349050 17318 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:13:05.349058 17318 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:13:05.350075 17318 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:13:05.350090 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.350095 17318 net.cpp:165] Memory required for data: 1307137500\nI0817 16:13:05.350105 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:13:05.350117 17318 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:13:05.350123 17318 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:13:05.350136 17318 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:13:05.350404 17318 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:13:05.350416 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.350421 17318 net.cpp:165] Memory required for data: 1309185500\nI0817 16:13:05.350431 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:13:05.350440 17318 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:13:05.350446 17318 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:13:05.350456 17318 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:13:05.350513 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:13:05.350672 17318 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:13:05.350684 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.350689 17318 net.cpp:165] Memory required for data: 1311233500\nI0817 16:13:05.350698 17318 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:13:05.350708 17318 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:13:05.350713 17318 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:13:05.350720 17318 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:13:05.350733 17318 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:13:05.350769 17318 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:13:05.350780 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.350785 17318 net.cpp:165] Memory required for data: 1313281500\nI0817 16:13:05.350790 17318 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:13:05.350798 17318 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:13:05.350805 17318 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:13:05.350813 17318 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:13:05.350823 17318 net.cpp:150] Setting up L3_b3_relu\nI0817 16:13:05.350831 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.350836 17318 net.cpp:165] Memory required for data: 1315329500\nI0817 16:13:05.350839 17318 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:13:05.350853 17318 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:13:05.350859 17318 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:13:05.350867 17318 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:13:05.350877 17318 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:13:05.350926 17318 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:13:05.350939 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.350944 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.350950 17318 net.cpp:165] Memory required for data: 1319425500\nI0817 16:13:05.350955 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:13:05.350965 17318 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:13:05.350971 17318 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:13:05.350983 17318 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:13:05.352020 17318 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:13:05.352035 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.352041 17318 net.cpp:165] Memory required for data: 1321473500\nI0817 16:13:05.352051 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:13:05.352059 17318 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:13:05.352066 17318 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:13:05.352077 17318 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:13:05.352349 17318 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:13:05.352365 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.352370 17318 net.cpp:165] Memory required for data: 1323521500\nI0817 16:13:05.352380 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:13:05.352390 17318 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:13:05.352396 17318 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:13:05.352402 17318 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.352459 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:13:05.352617 17318 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:13:05.352629 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.352634 17318 net.cpp:165] Memory required for data: 1325569500\nI0817 16:13:05.352643 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:13:05.352654 17318 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:13:05.352660 17318 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:13:05.352668 17318 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.352677 17318 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:13:05.352684 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.352689 17318 net.cpp:165] Memory required for data: 1327617500\nI0817 16:13:05.352694 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:13:05.352708 17318 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:13:05.352715 17318 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:13:05.352722 17318 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:13:05.353762 17318 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:13:05.353777 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.353782 17318 net.cpp:165] Memory required for data: 1329665500\nI0817 16:13:05.353791 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:13:05.353803 17318 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:13:05.353811 17318 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:13:05.353821 17318 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:13:05.354100 17318 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:13:05.354115 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.354120 17318 net.cpp:165] Memory required for data: 1331713500\nI0817 16:13:05.354130 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:13:05.354138 17318 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:13:05.354145 17318 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:13:05.354154 17318 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:13:05.354214 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:13:05.354379 17318 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:13:05.354393 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.354398 17318 net.cpp:165] Memory required for data: 1333761500\nI0817 16:13:05.354408 17318 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:13:05.354415 17318 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:13:05.354423 17318 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:13:05.354429 17318 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:13:05.354439 17318 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:13:05.354476 17318 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:13:05.354492 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.354497 17318 net.cpp:165] Memory required for data: 1335809500\nI0817 16:13:05.354503 17318 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:13:05.354511 17318 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:13:05.354516 17318 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:13:05.354526 17318 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:13:05.354537 17318 net.cpp:150] Setting up L3_b4_relu\nI0817 16:13:05.354543 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.354548 17318 net.cpp:165] Memory required for data: 1337857500\nI0817 16:13:05.354553 17318 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:13:05.354560 17318 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:13:05.354565 17318 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:13:05.354573 17318 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:13:05.354583 17318 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:13:05.354632 17318 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:13:05.354645 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.354651 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.354656 17318 net.cpp:165] Memory required for data: 1341953500\nI0817 16:13:05.354660 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:13:05.354672 17318 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:13:05.354678 17318 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:13:05.354691 17318 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:13:05.355720 17318 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:13:05.355734 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.355741 17318 net.cpp:165] Memory required for data: 1344001500\nI0817 16:13:05.355748 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:13:05.355758 17318 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:13:05.355764 17318 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:13:05.355775 17318 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:13:05.357035 17318 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:13:05.357053 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.357059 17318 net.cpp:165] Memory required for data: 1346049500\nI0817 16:13:05.357070 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:13:05.357080 17318 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:13:05.357086 17318 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:13:05.357097 17318 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.357157 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:13:05.357316 17318 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:13:05.357327 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.357333 17318 net.cpp:165] Memory required for data: 1348097500\nI0817 16:13:05.357342 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:13:05.357350 17318 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:13:05.357357 17318 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:13:05.357367 17318 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.357376 17318 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:13:05.357383 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.357388 17318 net.cpp:165] Memory required for data: 1350145500\nI0817 16:13:05.357393 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:13:05.357408 17318 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:13:05.357414 17318 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:13:05.357421 17318 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:13:05.359426 17318 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:13:05.359443 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.359448 17318 net.cpp:165] Memory required for data: 1352193500\nI0817 16:13:05.359458 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:13:05.359472 17318 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:13:05.359478 17318 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:13:05.359490 17318 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:13:05.359748 17318 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:13:05.359761 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.359766 17318 net.cpp:165] Memory required for data: 1354241500\nI0817 16:13:05.359777 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:13:05.359786 17318 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:13:05.359792 17318 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:13:05.359803 17318 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:13:05.359865 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:13:05.360018 17318 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:13:05.360031 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.360036 17318 net.cpp:165] Memory required for data: 1356289500\nI0817 16:13:05.360046 17318 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:13:05.360055 17318 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:13:05.360061 17318 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:13:05.360069 17318 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:13:05.360080 17318 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:13:05.360114 17318 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:13:05.360126 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.360131 17318 net.cpp:165] Memory required for data: 1358337500\nI0817 16:13:05.360136 17318 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:13:05.360146 17318 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:13:05.360152 17318 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:13:05.360162 17318 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:13:05.360172 17318 net.cpp:150] Setting up L3_b5_relu\nI0817 16:13:05.360178 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.360183 17318 net.cpp:165] Memory required for data: 1360385500\nI0817 16:13:05.360188 17318 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:13:05.360194 17318 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:13:05.360200 17318 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:13:05.360208 17318 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:13:05.360218 17318 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:13:05.360265 17318 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:13:05.360276 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.360283 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.360288 17318 net.cpp:165] Memory required for data: 1364481500\nI0817 16:13:05.360293 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:13:05.360306 17318 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:13:05.360311 17318 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:13:05.360323 17318 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:13:05.361335 17318 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:13:05.361351 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.361356 17318 net.cpp:165] Memory required for data: 1366529500\nI0817 16:13:05.361364 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:13:05.361373 17318 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:13:05.361387 17318 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:13:05.361400 17318 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:13:05.361659 17318 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:13:05.361671 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.361677 17318 net.cpp:165] Memory required for data: 1368577500\nI0817 16:13:05.361687 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:13:05.361696 17318 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:13:05.361702 17318 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:13:05.361711 17318 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.361771 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:13:05.361932 17318 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:13:05.361945 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.361951 17318 net.cpp:165] Memory required for data: 1370625500\nI0817 16:13:05.361960 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:13:05.361971 17318 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:13:05.361979 17318 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:13:05.361985 17318 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.361995 17318 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:13:05.362004 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.362007 17318 net.cpp:165] Memory required for data: 1372673500\nI0817 16:13:05.362012 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:13:05.362026 17318 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:13:05.362033 17318 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:13:05.362041 17318 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:13:05.363051 17318 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:13:05.363066 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.363072 17318 net.cpp:165] Memory required for data: 1374721500\nI0817 16:13:05.363081 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:13:05.363093 17318 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:13:05.363101 17318 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:13:05.363111 17318 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:13:05.363368 17318 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:13:05.363380 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.363385 17318 net.cpp:165] Memory required for data: 1376769500\nI0817 16:13:05.363395 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:13:05.363404 17318 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:13:05.363410 17318 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:13:05.363421 17318 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:13:05.363477 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:13:05.363628 17318 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:13:05.363641 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.363646 17318 net.cpp:165] Memory required for data: 1378817500\nI0817 16:13:05.363656 17318 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:13:05.363667 17318 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:13:05.363673 17318 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:13:05.363680 17318 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:13:05.363688 17318 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:13:05.363724 17318 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:13:05.363735 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.363740 17318 net.cpp:165] Memory required for data: 1380865500\nI0817 16:13:05.363746 17318 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:13:05.363754 17318 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:13:05.363760 17318 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:13:05.363776 17318 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:13:05.363787 17318 net.cpp:150] Setting up L3_b6_relu\nI0817 16:13:05.363795 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.363800 17318 net.cpp:165] Memory required for data: 1382913500\nI0817 16:13:05.363804 17318 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:13:05.363811 17318 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:13:05.363817 17318 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:13:05.363824 17318 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:13:05.363833 17318 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:13:05.363888 17318 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:13:05.363901 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.363909 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.363912 17318 net.cpp:165] Memory required for data: 1387009500\nI0817 16:13:05.363917 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:13:05.363929 17318 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:13:05.363936 17318 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:13:05.363950 17318 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:13:05.364995 17318 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:13:05.365012 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.365017 17318 net.cpp:165] Memory required for data: 1389057500\nI0817 16:13:05.365026 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:13:05.365036 17318 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:13:05.365042 17318 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:13:05.365054 17318 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:13:05.365314 17318 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:13:05.365329 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.365334 17318 net.cpp:165] Memory required for data: 1391105500\nI0817 16:13:05.365344 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:13:05.365352 17318 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:13:05.365358 17318 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:13:05.365366 17318 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.365425 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:13:05.365577 17318 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:13:05.365592 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.365598 17318 net.cpp:165] Memory required for data: 1393153500\nI0817 16:13:05.365607 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:13:05.365638 17318 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:13:05.365648 17318 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:13:05.365655 17318 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.365665 17318 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:13:05.365672 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.365677 17318 net.cpp:165] Memory required for data: 1395201500\nI0817 16:13:05.365684 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:13:05.365697 17318 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:13:05.365703 17318 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:13:05.365712 17318 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:13:05.366736 17318 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:13:05.366751 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.366757 17318 net.cpp:165] Memory required for data: 1397249500\nI0817 16:13:05.366766 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:13:05.366775 17318 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:13:05.366788 17318 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:13:05.366801 17318 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:13:05.367079 17318 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:13:05.367094 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.367100 17318 net.cpp:165] Memory required for data: 1399297500\nI0817 16:13:05.367110 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:13:05.367120 17318 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:13:05.367125 17318 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:13:05.367133 17318 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:13:05.367189 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:13:05.367346 17318 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:13:05.367358 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.367364 17318 net.cpp:165] Memory required for data: 1401345500\nI0817 16:13:05.367372 17318 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:13:05.367384 17318 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:13:05.367391 17318 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:13:05.367398 17318 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:13:05.367406 17318 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:13:05.367444 17318 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:13:05.367455 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.367460 17318 net.cpp:165] Memory required for data: 1403393500\nI0817 16:13:05.367465 17318 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:13:05.367472 17318 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:13:05.367478 17318 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:13:05.367486 17318 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:13:05.367496 17318 net.cpp:150] Setting up L3_b7_relu\nI0817 16:13:05.367502 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.367506 17318 net.cpp:165] Memory required for data: 1405441500\nI0817 16:13:05.367511 17318 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:13:05.367518 17318 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:13:05.367524 17318 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:13:05.367534 17318 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:13:05.367545 17318 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:13:05.367589 17318 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:13:05.367601 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.367609 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.367612 17318 net.cpp:165] Memory required for data: 1409537500\nI0817 16:13:05.367619 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:13:05.367632 17318 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:13:05.367640 17318 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:13:05.367650 17318 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:13:05.368661 17318 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:13:05.368676 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.368682 17318 net.cpp:165] Memory required for data: 1411585500\nI0817 16:13:05.368691 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:13:05.368703 17318 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:13:05.368710 17318 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:13:05.368718 17318 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:13:05.368989 17318 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:13:05.369004 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.369009 17318 net.cpp:165] Memory required for data: 1413633500\nI0817 16:13:05.369025 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:13:05.369037 17318 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:13:05.369045 17318 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:13:05.369052 17318 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.369115 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:13:05.369269 17318 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:13:05.369282 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.369287 17318 net.cpp:165] Memory required for data: 1415681500\nI0817 16:13:05.369297 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:13:05.369307 17318 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:13:05.369314 17318 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:13:05.369321 17318 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.369331 17318 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:13:05.369341 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.369346 17318 net.cpp:165] Memory required for data: 1417729500\nI0817 16:13:05.369350 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:13:05.369362 17318 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:13:05.369367 17318 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:13:05.369379 17318 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:13:05.370389 17318 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:13:05.370404 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.370409 17318 net.cpp:165] Memory required for data: 1419777500\nI0817 16:13:05.370419 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:13:05.370427 17318 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:13:05.370434 17318 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:13:05.370447 17318 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:13:05.370710 17318 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:13:05.370723 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.370729 17318 net.cpp:165] Memory required for data: 1421825500\nI0817 16:13:05.370739 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:13:05.370748 17318 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:13:05.370754 17318 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:13:05.370762 17318 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:13:05.370821 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:13:05.370980 17318 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:13:05.370997 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.371002 17318 net.cpp:165] Memory required for data: 1423873500\nI0817 16:13:05.371011 17318 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:13:05.371021 17318 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:13:05.371027 17318 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:13:05.371034 17318 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:13:05.371042 17318 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:13:05.371103 17318 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:13:05.371115 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.371121 17318 net.cpp:165] Memory required for data: 1425921500\nI0817 16:13:05.371126 17318 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:13:05.371134 17318 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:13:05.371140 17318 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:13:05.371147 17318 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:13:05.371157 17318 net.cpp:150] Setting up L3_b8_relu\nI0817 16:13:05.371165 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.371170 17318 net.cpp:165] Memory required for data: 1427969500\nI0817 16:13:05.371173 17318 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:13:05.371188 17318 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:13:05.371194 17318 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:13:05.371206 17318 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:13:05.371217 17318 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:13:05.371263 17318 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:13:05.371279 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.371285 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.371290 17318 net.cpp:165] Memory required for data: 1432065500\nI0817 16:13:05.371296 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:13:05.371307 17318 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:13:05.371315 17318 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:13:05.371322 17318 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:13:05.373320 17318 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:13:05.373337 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.373343 17318 net.cpp:165] Memory required for data: 1434113500\nI0817 16:13:05.373353 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:13:05.373368 17318 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:13:05.373374 17318 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:13:05.373383 17318 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:13:05.373647 17318 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:13:05.373661 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.373667 17318 net.cpp:165] Memory required for data: 1436161500\nI0817 16:13:05.373677 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:13:05.373685 17318 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:13:05.373692 17318 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:13:05.373699 17318 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.373759 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:13:05.373926 17318 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:13:05.373940 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.373945 17318 net.cpp:165] Memory required for data: 1438209500\nI0817 16:13:05.373955 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:13:05.373963 17318 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:13:05.373970 17318 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:13:05.373980 17318 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.373989 17318 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:13:05.373996 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.374001 17318 net.cpp:165] Memory required for data: 1440257500\nI0817 16:13:05.374006 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:13:05.374022 17318 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:13:05.374029 17318 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:13:05.374038 17318 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:13:05.375061 17318 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:13:05.375075 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.375080 17318 net.cpp:165] Memory required for data: 1442305500\nI0817 16:13:05.375089 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:13:05.375102 17318 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:13:05.375108 17318 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:13:05.375116 17318 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:13:05.375383 17318 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:13:05.375396 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.375401 17318 net.cpp:165] Memory required for data: 1444353500\nI0817 16:13:05.375419 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:13:05.375432 17318 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:13:05.375437 17318 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:13:05.375445 17318 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:13:05.375506 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:13:05.375660 17318 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:13:05.375674 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.375679 17318 net.cpp:165] Memory required for data: 1446401500\nI0817 16:13:05.375687 17318 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:13:05.375700 17318 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:13:05.375707 17318 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:13:05.375715 17318 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:13:05.375725 17318 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:13:05.375757 17318 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:13:05.375768 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.375773 17318 net.cpp:165] Memory required for data: 1448449500\nI0817 16:13:05.375778 17318 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:13:05.375792 17318 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:13:05.375797 17318 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:13:05.375805 17318 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:13:05.375814 17318 net.cpp:150] Setting up L3_b9_relu\nI0817 16:13:05.375821 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.375826 17318 net.cpp:165] Memory required for data: 1450497500\nI0817 16:13:05.375830 17318 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:13:05.375838 17318 net.cpp:100] Creating Layer post_pool\nI0817 16:13:05.375851 17318 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:13:05.375859 17318 net.cpp:408] post_pool -> post_pool\nI0817 16:13:05.375897 17318 net.cpp:150] Setting up post_pool\nI0817 16:13:05.375910 17318 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:13:05.375915 17318 net.cpp:165] Memory required for data: 1450529500\nI0817 16:13:05.375921 17318 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:13:05.376004 17318 net.cpp:100] Creating Layer post_FC\nI0817 16:13:05.376018 17318 net.cpp:434] post_FC <- post_pool\nI0817 16:13:05.376027 17318 net.cpp:408] post_FC -> post_FC_top\nI0817 16:13:05.376325 17318 net.cpp:150] Setting up post_FC\nI0817 16:13:05.376341 17318 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:13:05.376346 17318 net.cpp:165] Memory required for data: 1450579500\nI0817 16:13:05.376356 17318 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:13:05.376365 17318 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:13:05.376371 17318 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:13:05.376379 17318 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:13:05.376394 17318 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:13:05.376441 17318 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:13:05.376452 17318 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:13:05.376459 17318 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:13:05.376464 17318 net.cpp:165] Memory required for data: 1450679500\nI0817 16:13:05.376469 17318 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:13:05.376515 17318 net.cpp:100] Creating Layer accuracy\nI0817 16:13:05.376528 17318 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:13:05.376535 17318 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:13:05.376543 17318 net.cpp:408] accuracy -> accuracy\nI0817 16:13:05.376585 17318 net.cpp:150] Setting up accuracy\nI0817 16:13:05.376597 17318 net.cpp:157] Top shape: (1)\nI0817 16:13:05.376602 17318 net.cpp:165] Memory required for data: 1450679504\nI0817 16:13:05.376608 17318 layer_factory.hpp:77] Creating layer loss\nI0817 16:13:05.376627 17318 net.cpp:100] Creating Layer loss\nI0817 16:13:05.376634 17318 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:13:05.376641 17318 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:13:05.376648 17318 net.cpp:408] loss -> loss\nI0817 16:13:05.376694 17318 layer_factory.hpp:77] Creating layer loss\nI0817 16:13:05.376870 17318 net.cpp:150] Setting up loss\nI0817 16:13:05.376885 17318 net.cpp:157] Top shape: (1)\nI0817 16:13:05.376891 17318 net.cpp:160]     with loss weight 1\nI0817 16:13:05.376968 17318 net.cpp:165] Memory required for data: 1450679508\nI0817 16:13:05.376977 17318 net.cpp:226] loss needs backward computation.\nI0817 16:13:05.376983 17318 net.cpp:228] accuracy does not need backward computation.\nI0817 16:13:05.376991 17318 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:13:05.376996 17318 net.cpp:226] post_FC needs backward computation.\nI0817 16:13:05.377001 17318 net.cpp:226] post_pool needs backward computation.\nI0817 16:13:05.377005 17318 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:13:05.377010 17318 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:13:05.377017 17318 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:13:05.377020 17318 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:13:05.377027 17318 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:13:05.377032 17318 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:13:05.377037 17318 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:13:05.377040 17318 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:13:05.377045 17318 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:13:05.377051 17318 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:13:05.377056 17318 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:13:05.377061 17318 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:13:05.377066 17318 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:13:05.377071 17318 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:13:05.377077 17318 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:13:05.377082 17318 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:13:05.377086 17318 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:13:05.377091 17318 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:13:05.377096 17318 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:13:05.377102 17318 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:13:05.377107 17318 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:13:05.377112 17318 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:13:05.377117 17318 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:13:05.377122 17318 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:13:05.377127 17318 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:13:05.377132 17318 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:13:05.377137 17318 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:13:05.377142 17318 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:13:05.377147 17318 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:13:05.377152 17318 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:13:05.377158 17318 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:13:05.377163 17318 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:13:05.377168 17318 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:13:05.377173 17318 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:13:05.377179 17318 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:13:05.377192 17318 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:13:05.377197 17318 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:13:05.377202 17318 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:13:05.377207 17318 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:13:05.377213 17318 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:13:05.377218 17318 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:13:05.377223 17318 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:13:05.377229 17318 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:13:05.377234 17318 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:13:05.377239 17318 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:13:05.377244 17318 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:13:05.377249 17318 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:13:05.377254 17318 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:13:05.377260 17318 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:13:05.377265 17318 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:13:05.377270 17318 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:13:05.377275 17318 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:13:05.377286 17318 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:13:05.377293 17318 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:13:05.377300 17318 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:13:05.377305 17318 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:13:05.377310 17318 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:13:05.377315 17318 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:13:05.377321 17318 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:13:05.377326 17318 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:13:05.377331 17318 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:13:05.377336 17318 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:13:05.377342 17318 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:13:05.377347 17318 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:13:05.377352 17318 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:13:05.377357 17318 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:13:05.377363 17318 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:13:05.377368 17318 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:13:05.377373 17318 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:13:05.377378 17318 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:13:05.377384 17318 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:13:05.377389 17318 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:13:05.377395 17318 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:13:05.377400 17318 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:13:05.377406 17318 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:13:05.377411 17318 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:13:05.377416 17318 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:13:05.377423 17318 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:13:05.377427 17318 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:13:05.377434 17318 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:13:05.377439 17318 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:13:05.377445 17318 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:13:05.377455 17318 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:13:05.377461 17318 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:13:05.377467 17318 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:13:05.377473 17318 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:13:05.377478 17318 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:13:05.377485 17318 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:13:05.377490 17318 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:13:05.377495 17318 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:13:05.377501 17318 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:13:05.377506 17318 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:13:05.377511 17318 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:13:05.377516 17318 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:13:05.377521 17318 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:13:05.377527 17318 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:13:05.377532 17318 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:13:05.377538 17318 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:13:05.377544 17318 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:13:05.377549 17318 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:13:05.377554 17318 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:13:05.377559 17318 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:13:05.377565 17318 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:13:05.377574 17318 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:13:05.377579 17318 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:13:05.377585 17318 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:13:05.377591 17318 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:13:05.377596 17318 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:13:05.377602 17318 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:13:05.377607 17318 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:13:05.377614 17318 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:13:05.377619 17318 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:13:05.377624 17318 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:13:05.377629 17318 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:13:05.377635 17318 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:13:05.377640 17318 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:13:05.377646 17318 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:13:05.377651 17318 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:13:05.377657 17318 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:13:05.377662 17318 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:13:05.377667 17318 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:13:05.377673 17318 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:13:05.377678 17318 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:13:05.377684 17318 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:13:05.377689 17318 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:13:05.377696 17318 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:13:05.377701 17318 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:13:05.377707 17318 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:13:05.377712 17318 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:13:05.377717 17318 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:13:05.377727 17318 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:13:05.377732 17318 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:13:05.377738 17318 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:13:05.377743 17318 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:13:05.377749 17318 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:13:05.377755 17318 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:13:05.377760 17318 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:13:05.377766 17318 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:13:05.377771 17318 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:13:05.377776 17318 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:13:05.377781 17318 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:13:05.377787 17318 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:13:05.377792 17318 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:13:05.377799 17318 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:13:05.377804 17318 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:13:05.377810 17318 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:13:05.377815 17318 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:13:05.377820 17318 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:13:05.377826 17318 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:13:05.377831 17318 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:13:05.377836 17318 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:13:05.377848 17318 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:13:05.377856 17318 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:13:05.377861 17318 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:13:05.377867 17318 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:13:05.377873 17318 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:13:05.377878 17318 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:13:05.377884 17318 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:13:05.377890 17318 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:13:05.377895 17318 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:13:05.377902 17318 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:13:05.377907 17318 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:13:05.377912 17318 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:13:05.377918 17318 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:13:05.377924 17318 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:13:05.377930 17318 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:13:05.377935 17318 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:13:05.377941 17318 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:13:05.377948 17318 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:13:05.377952 17318 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:13:05.377959 17318 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:13:05.377964 17318 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:13:05.377972 17318 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:13:05.377979 17318 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:13:05.377985 17318 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:13:05.377991 17318 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:13:05.377997 17318 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:13:05.378008 17318 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:13:05.378015 17318 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:13:05.378021 17318 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:13:05.378026 17318 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:13:05.378032 17318 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:13:05.378037 17318 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:13:05.378043 17318 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:13:05.378048 17318 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:13:05.378054 17318 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:13:05.378060 17318 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:13:05.378065 17318 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:13:05.378072 17318 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:13:05.378077 17318 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:13:05.378083 17318 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:13:05.378088 17318 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:13:05.378094 17318 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:13:05.378099 17318 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:13:05.378105 17318 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:13:05.378110 17318 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:13:05.378116 17318 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:13:05.378121 17318 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:13:05.378127 17318 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:13:05.378134 17318 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:13:05.378139 17318 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:13:05.378145 17318 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:13:05.378150 17318 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:13:05.378155 17318 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:13:05.378161 17318 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:13:05.378167 17318 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:13:05.378173 17318 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:13:05.378180 17318 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:13:05.378185 17318 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:13:05.378190 17318 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:13:05.378196 17318 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:13:05.378202 17318 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:13:05.378207 17318 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:13:05.378213 17318 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:13:05.378219 17318 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:13:05.378226 17318 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:13:05.378231 17318 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:13:05.378237 17318 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:13:05.378242 17318 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:13:05.378248 17318 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:13:05.378253 17318 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:13:05.378259 17318 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:13:05.378264 17318 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:13:05.378270 17318 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:13:05.378280 17318 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:13:05.378288 17318 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:13:05.378293 17318 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:13:05.378298 17318 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:13:05.378305 17318 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:13:05.378310 17318 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:13:05.378316 17318 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:13:05.378322 17318 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:13:05.378329 17318 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:13:05.378334 17318 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:13:05.378340 17318 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:13:05.378345 17318 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:13:05.378350 17318 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:13:05.378355 17318 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:13:05.378362 17318 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:13:05.378367 17318 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:13:05.378373 17318 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:13:05.378379 17318 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:13:05.378384 17318 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:13:05.378391 17318 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:13:05.378396 17318 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:13:05.378402 17318 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:13:05.378408 17318 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:13:05.378413 17318 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:13:05.378420 17318 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:13:05.378425 17318 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:13:05.378432 17318 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:13:05.378437 17318 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:13:05.378443 17318 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:13:05.378448 17318 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:13:05.378453 17318 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:13:05.378459 17318 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:13:05.378465 17318 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:13:05.378470 17318 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:13:05.378478 17318 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:13:05.378482 17318 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:13:05.378489 17318 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:13:05.378494 17318 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:13:05.378499 17318 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:13:05.378505 17318 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:13:05.378511 17318 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:13:05.378517 17318 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:13:05.378522 17318 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:13:05.378528 17318 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:13:05.378535 17318 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:13:05.378540 17318 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:13:05.378546 17318 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:13:05.378556 17318 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:13:05.378562 17318 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:13:05.378568 17318 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:13:05.378574 17318 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:13:05.378581 17318 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:13:05.378585 17318 net.cpp:226] pre_relu needs backward computation.\nI0817 16:13:05.378592 17318 net.cpp:226] pre_scale needs backward computation.\nI0817 16:13:05.378597 17318 net.cpp:226] pre_bn needs backward computation.\nI0817 16:13:05.378602 17318 net.cpp:226] pre_conv needs backward computation.\nI0817 16:13:05.378609 17318 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:13:05.378617 17318 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:13:05.378620 17318 net.cpp:270] This network produces output accuracy\nI0817 16:13:05.378628 17318 net.cpp:270] This network produces output loss\nI0817 16:13:05.378998 17318 net.cpp:283] Network initialization done.\nI0817 16:13:05.388865 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:05.388906 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:05.388967 17318 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI0817 16:13:05.389353 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI0817 16:13:05.389370 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI0817 16:13:05.389381 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI0817 16:13:05.389391 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI0817 16:13:05.389400 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI0817 16:13:05.389410 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI0817 16:13:05.389418 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI0817 16:13:05.389427 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI0817 16:13:05.389436 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI0817 16:13:05.389446 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI0817 16:13:05.389456 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI0817 16:13:05.389463 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI0817 16:13:05.389472 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI0817 16:13:05.389480 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI0817 16:13:05.389490 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI0817 16:13:05.389498 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI0817 16:13:05.389508 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI0817 16:13:05.389516 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI0817 16:13:05.389525 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI0817 16:13:05.389545 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI0817 16:13:05.389555 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI0817 16:13:05.389564 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI0817 16:13:05.389576 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI0817 16:13:05.389585 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI0817 16:13:05.389595 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI0817 16:13:05.389602 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI0817 16:13:05.389611 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI0817 16:13:05.389619 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI0817 16:13:05.389628 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI0817 16:13:05.389636 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI0817 16:13:05.389645 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI0817 16:13:05.389654 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI0817 16:13:05.389663 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI0817 16:13:05.389672 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI0817 16:13:05.389679 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI0817 16:13:05.389688 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI0817 16:13:05.389698 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI0817 16:13:05.389706 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI0817 16:13:05.389715 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI0817 16:13:05.389724 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI0817 16:13:05.389734 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI0817 16:13:05.389744 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI0817 16:13:05.389751 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI0817 16:13:05.389760 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI0817 16:13:05.389770 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI0817 16:13:05.389777 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI0817 16:13:05.389786 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI0817 16:13:05.389794 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI0817 16:13:05.389802 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI0817 16:13:05.389819 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI0817 16:13:05.389828 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI0817 16:13:05.389837 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI0817 16:13:05.389854 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI0817 16:13:05.389863 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI0817 16:13:05.389873 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI0817 16:13:05.389880 17318 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI0817 16:13:05.391722 17318 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar100/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar100/cifar100_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"\nI0817 16:13:05.393334 17318 layer_factory.hpp:77] Creating layer dataLayer\nI0817 16:13:05.393568 17318 net.cpp:100] Creating Layer dataLayer\nI0817 16:13:05.393591 17318 net.cpp:408] dataLayer -> data_top\nI0817 16:13:05.393609 17318 net.cpp:408] dataLayer -> label\nI0817 16:13:05.393620 17318 data_transformer.cpp:25] Loading mean file from: examples/cifar100/mean.binaryproto\nI0817 16:13:05.406584 17325 db_lmdb.cpp:35] Opened lmdb examples/cifar100/cifar100_test_lmdb\nI0817 16:13:05.406821 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:05.415405 17318 net.cpp:150] Setting up dataLayer\nI0817 16:13:05.415433 17318 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI0817 16:13:05.415442 17318 net.cpp:157] Top shape: 125 (125)\nI0817 16:13:05.415448 17318 net.cpp:165] Memory required for data: 1536500\nI0817 16:13:05.415459 17318 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI0817 16:13:05.415529 17318 net.cpp:100] Creating Layer label_dataLayer_1_split\nI0817 16:13:05.415539 17318 net.cpp:434] label_dataLayer_1_split <- label\nI0817 16:13:05.415550 17318 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI0817 16:13:05.415570 17318 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI0817 16:13:05.415767 17318 net.cpp:150] Setting up label_dataLayer_1_split\nI0817 16:13:05.415781 17318 net.cpp:157] Top shape: 125 (125)\nI0817 16:13:05.415788 17318 net.cpp:157] Top shape: 125 (125)\nI0817 16:13:05.415793 17318 net.cpp:165] Memory required for data: 1537500\nI0817 16:13:05.415799 17318 layer_factory.hpp:77] Creating layer pre_conv\nI0817 16:13:05.415824 17318 net.cpp:100] Creating Layer pre_conv\nI0817 16:13:05.415832 17318 net.cpp:434] pre_conv <- data_top\nI0817 16:13:05.415843 17318 net.cpp:408] pre_conv -> pre_conv_top\nI0817 16:13:05.416321 17318 net.cpp:150] Setting up pre_conv\nI0817 16:13:05.416347 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.416352 17318 net.cpp:165] Memory required for data: 9729500\nI0817 16:13:05.416373 17318 layer_factory.hpp:77] Creating layer pre_bn\nI0817 16:13:05.416388 17318 net.cpp:100] Creating Layer pre_bn\nI0817 16:13:05.416393 17318 net.cpp:434] pre_bn <- pre_conv_top\nI0817 16:13:05.416405 17318 net.cpp:408] pre_bn -> pre_bn_top\nI0817 16:13:05.416762 17318 net.cpp:150] Setting up pre_bn\nI0817 16:13:05.416779 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.416785 17318 net.cpp:165] Memory required for data: 17921500\nI0817 16:13:05.416800 17318 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:13:05.416813 17318 net.cpp:100] Creating Layer pre_scale\nI0817 16:13:05.416820 17318 net.cpp:434] pre_scale <- pre_bn_top\nI0817 16:13:05.416829 17318 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI0817 16:13:05.416911 17318 layer_factory.hpp:77] Creating layer pre_scale\nI0817 16:13:05.417146 17318 net.cpp:150] Setting up pre_scale\nI0817 16:13:05.417161 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.417167 17318 net.cpp:165] Memory required for data: 26113500\nI0817 16:13:05.417177 17318 layer_factory.hpp:77] Creating layer pre_relu\nI0817 16:13:05.417187 17318 net.cpp:100] Creating Layer pre_relu\nI0817 16:13:05.417196 17318 net.cpp:434] pre_relu <- pre_bn_top\nI0817 16:13:05.417203 17318 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI0817 16:13:05.417213 17318 net.cpp:150] Setting up pre_relu\nI0817 16:13:05.417222 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.417225 17318 net.cpp:165] Memory required for data: 34305500\nI0817 16:13:05.417230 17318 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI0817 16:13:05.417242 17318 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI0817 16:13:05.417246 17318 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI0817 16:13:05.417253 17318 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI0817 16:13:05.417263 17318 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI0817 16:13:05.417322 17318 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI0817 16:13:05.417335 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.417341 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.417346 17318 net.cpp:165] Memory required for data: 50689500\nI0817 16:13:05.417351 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI0817 16:13:05.417366 17318 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI0817 16:13:05.417373 17318 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI0817 16:13:05.417385 17318 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI0817 16:13:05.417795 17318 net.cpp:150] Setting up L1_b1_cbr1_conv\nI0817 16:13:05.417809 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.417815 17318 net.cpp:165] Memory required for data: 58881500\nI0817 16:13:05.417830 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI0817 16:13:05.417851 17318 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI0817 16:13:05.417857 17318 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI0817 16:13:05.417870 17318 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI0817 16:13:05.418457 17318 net.cpp:150] Setting up L1_b1_cbr1_bn\nI0817 16:13:05.418476 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.418483 17318 net.cpp:165] Memory required for data: 67073500\nI0817 16:13:05.418495 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:13:05.418505 17318 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI0817 16:13:05.418511 17318 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI0817 16:13:05.418519 17318 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.418576 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI0817 16:13:05.419014 17318 net.cpp:150] Setting up L1_b1_cbr1_scale\nI0817 16:13:05.419044 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.419055 17318 net.cpp:165] Memory required for data: 75265500\nI0817 16:13:05.419077 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI0817 16:13:05.419092 17318 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI0817 16:13:05.419100 17318 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI0817 16:13:05.419108 17318 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.419121 17318 net.cpp:150] Setting up L1_b1_cbr1_relu\nI0817 16:13:05.419128 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.419133 17318 net.cpp:165] Memory required for data: 83457500\nI0817 16:13:05.419140 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI0817 16:13:05.419157 17318 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI0817 16:13:05.419162 17318 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI0817 16:13:05.419178 17318 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI0817 16:13:05.419559 17318 net.cpp:150] Setting up L1_b1_cbr2_conv\nI0817 16:13:05.419574 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.419579 17318 net.cpp:165] Memory required for data: 91649500\nI0817 16:13:05.419589 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI0817 16:13:05.419598 17318 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI0817 16:13:05.419605 17318 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI0817 16:13:05.419613 17318 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI0817 16:13:05.419880 17318 net.cpp:150] Setting up L1_b1_cbr2_bn\nI0817 16:13:05.419911 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.419917 17318 net.cpp:165] Memory required for data: 99841500\nI0817 16:13:05.419936 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:13:05.419947 17318 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI0817 16:13:05.419953 17318 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI0817 16:13:05.419963 17318 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI0817 16:13:05.420042 17318 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI0817 16:13:05.420233 17318 net.cpp:150] Setting up L1_b1_cbr2_scale\nI0817 16:13:05.420249 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.420255 17318 net.cpp:165] Memory required for data: 108033500\nI0817 16:13:05.420264 17318 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI0817 16:13:05.420279 17318 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI0817 16:13:05.420296 17318 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI0817 16:13:05.420311 17318 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI0817 16:13:05.420321 17318 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI0817 16:13:05.420363 17318 net.cpp:150] Setting up L1_b1_sum_eltwise\nI0817 16:13:05.420377 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.420382 17318 net.cpp:165] Memory required for data: 116225500\nI0817 16:13:05.420387 17318 layer_factory.hpp:77] Creating layer L1_b1_relu\nI0817 16:13:05.420397 17318 net.cpp:100] Creating Layer L1_b1_relu\nI0817 16:13:05.420403 17318 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI0817 16:13:05.420413 17318 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI0817 16:13:05.420423 17318 net.cpp:150] Setting up L1_b1_relu\nI0817 16:13:05.420429 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.420434 17318 net.cpp:165] Memory required for data: 124417500\nI0817 16:13:05.420439 17318 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:13:05.420449 17318 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:13:05.420454 17318 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI0817 16:13:05.420461 17318 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:13:05.420475 17318 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:13:05.420529 17318 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI0817 16:13:05.420547 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.420554 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.420559 17318 net.cpp:165] Memory required for data: 140801500\nI0817 16:13:05.420565 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI0817 16:13:05.420578 17318 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI0817 16:13:05.420586 17318 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI0817 16:13:05.420601 17318 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI0817 16:13:05.421299 17318 net.cpp:150] Setting up L1_b2_cbr1_conv\nI0817 16:13:05.421316 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.421322 17318 net.cpp:165] Memory required for data: 148993500\nI0817 16:13:05.421332 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI0817 16:13:05.421344 17318 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI0817 16:13:05.421351 17318 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI0817 16:13:05.421402 17318 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI0817 16:13:05.421716 17318 net.cpp:150] Setting up L1_b2_cbr1_bn\nI0817 16:13:05.421735 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.421741 17318 net.cpp:165] Memory required for data: 157185500\nI0817 16:13:05.421751 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:13:05.421761 17318 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI0817 16:13:05.421767 17318 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI0817 16:13:05.421775 17318 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.421862 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI0817 16:13:05.422045 17318 net.cpp:150] Setting up L1_b2_cbr1_scale\nI0817 16:13:05.422062 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.422068 17318 net.cpp:165] Memory required for data: 165377500\nI0817 16:13:05.422078 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI0817 16:13:05.422089 17318 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI0817 16:13:05.422096 17318 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI0817 16:13:05.422106 17318 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.422116 17318 net.cpp:150] Setting up L1_b2_cbr1_relu\nI0817 16:13:05.422124 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.422128 17318 net.cpp:165] Memory required for data: 173569500\nI0817 16:13:05.422133 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI0817 16:13:05.422147 17318 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI0817 16:13:05.422155 17318 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI0817 16:13:05.422169 17318 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI0817 16:13:05.422556 17318 net.cpp:150] Setting up L1_b2_cbr2_conv\nI0817 16:13:05.422574 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.422580 17318 net.cpp:165] Memory required for data: 181761500\nI0817 16:13:05.422591 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI0817 16:13:05.422600 17318 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI0817 16:13:05.422606 17318 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI0817 16:13:05.422621 17318 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI0817 16:13:05.422931 17318 net.cpp:150] Setting up L1_b2_cbr2_bn\nI0817 16:13:05.422950 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.422955 17318 net.cpp:165] Memory required for data: 189953500\nI0817 16:13:05.422981 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:13:05.422991 17318 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI0817 16:13:05.423001 17318 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI0817 16:13:05.423012 17318 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI0817 16:13:05.423080 17318 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI0817 16:13:05.423295 17318 net.cpp:150] Setting up L1_b2_cbr2_scale\nI0817 16:13:05.423310 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.423316 17318 net.cpp:165] Memory required for data: 198145500\nI0817 16:13:05.423333 17318 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI0817 16:13:05.423343 17318 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI0817 16:13:05.423352 17318 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI0817 16:13:05.423360 17318 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI0817 16:13:05.423368 17318 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI0817 16:13:05.423409 17318 net.cpp:150] Setting up L1_b2_sum_eltwise\nI0817 16:13:05.423419 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.423424 17318 net.cpp:165] Memory required for data: 206337500\nI0817 16:13:05.423430 17318 layer_factory.hpp:77] Creating layer L1_b2_relu\nI0817 16:13:05.423442 17318 net.cpp:100] Creating Layer L1_b2_relu\nI0817 16:13:05.423451 17318 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI0817 16:13:05.423460 17318 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI0817 16:13:05.423470 17318 net.cpp:150] Setting up L1_b2_relu\nI0817 16:13:05.423477 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.423482 17318 net.cpp:165] Memory required for data: 214529500\nI0817 16:13:05.423487 17318 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:13:05.423494 17318 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:13:05.423501 17318 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI0817 16:13:05.423511 17318 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:13:05.423521 17318 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:13:05.423570 17318 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI0817 16:13:05.423580 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.423588 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.423593 17318 net.cpp:165] Memory required for data: 230913500\nI0817 16:13:05.423598 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI0817 16:13:05.423615 17318 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI0817 16:13:05.423622 17318 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI0817 16:13:05.423633 17318 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI0817 16:13:05.424270 17318 net.cpp:150] Setting up L1_b3_cbr1_conv\nI0817 16:13:05.424286 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.424293 17318 net.cpp:165] Memory required for data: 239105500\nI0817 16:13:05.424305 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI0817 16:13:05.424319 17318 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI0817 16:13:05.424324 17318 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI0817 16:13:05.424340 17318 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI0817 16:13:05.424634 17318 net.cpp:150] Setting up L1_b3_cbr1_bn\nI0817 16:13:05.424649 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.424654 17318 net.cpp:165] Memory required for data: 247297500\nI0817 16:13:05.424665 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:13:05.424674 17318 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI0817 16:13:05.424681 17318 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI0817 16:13:05.424688 17318 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.424762 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI0817 16:13:05.424934 17318 net.cpp:150] Setting up L1_b3_cbr1_scale\nI0817 16:13:05.424948 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.424954 17318 net.cpp:165] Memory required for data: 255489500\nI0817 16:13:05.424973 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI0817 16:13:05.424988 17318 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI0817 16:13:05.424994 17318 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI0817 16:13:05.425002 17318 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.425024 17318 net.cpp:150] Setting up L1_b3_cbr1_relu\nI0817 16:13:05.425032 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.425038 17318 net.cpp:165] Memory required for data: 263681500\nI0817 16:13:05.425043 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI0817 16:13:05.425058 17318 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI0817 16:13:05.425065 17318 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI0817 16:13:05.425076 17318 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI0817 16:13:05.425463 17318 net.cpp:150] Setting up L1_b3_cbr2_conv\nI0817 16:13:05.425480 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.425487 17318 net.cpp:165] Memory required for data: 271873500\nI0817 16:13:05.425496 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI0817 16:13:05.425513 17318 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI0817 16:13:05.425520 17318 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI0817 16:13:05.425534 17318 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI0817 16:13:05.425858 17318 net.cpp:150] Setting up L1_b3_cbr2_bn\nI0817 16:13:05.425874 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.425880 17318 net.cpp:165] Memory required for data: 280065500\nI0817 16:13:05.425894 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:13:05.425904 17318 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI0817 16:13:05.425910 17318 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI0817 16:13:05.425918 17318 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI0817 16:13:05.426004 17318 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI0817 16:13:05.426180 17318 net.cpp:150] Setting up L1_b3_cbr2_scale\nI0817 16:13:05.426195 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.426200 17318 net.cpp:165] Memory required for data: 288257500\nI0817 16:13:05.426210 17318 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI0817 16:13:05.426220 17318 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI0817 16:13:05.426228 17318 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI0817 16:13:05.426237 17318 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI0817 16:13:05.426245 17318 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI0817 16:13:05.426283 17318 net.cpp:150] Setting up L1_b3_sum_eltwise\nI0817 16:13:05.426295 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.426300 17318 net.cpp:165] Memory required for data: 296449500\nI0817 16:13:05.426306 17318 layer_factory.hpp:77] Creating layer L1_b3_relu\nI0817 16:13:05.426318 17318 net.cpp:100] Creating Layer L1_b3_relu\nI0817 16:13:05.426326 17318 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI0817 16:13:05.426339 17318 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI0817 16:13:05.426349 17318 net.cpp:150] Setting up L1_b3_relu\nI0817 16:13:05.426357 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.426362 17318 net.cpp:165] Memory required for data: 304641500\nI0817 16:13:05.426369 17318 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:13:05.426378 17318 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:13:05.426383 17318 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI0817 16:13:05.426390 17318 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:13:05.426400 17318 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:13:05.426457 17318 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI0817 16:13:05.426468 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.426475 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.426479 17318 net.cpp:165] Memory required for data: 321025500\nI0817 16:13:05.426486 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI0817 16:13:05.426499 17318 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI0817 16:13:05.426513 17318 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI0817 16:13:05.426527 17318 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI0817 16:13:05.426973 17318 net.cpp:150] Setting up L1_b4_cbr1_conv\nI0817 16:13:05.426991 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.426997 17318 net.cpp:165] Memory required for data: 329217500\nI0817 16:13:05.427006 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI0817 16:13:05.427019 17318 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI0817 16:13:05.427026 17318 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI0817 16:13:05.427037 17318 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI0817 16:13:05.427336 17318 net.cpp:150] Setting up L1_b4_cbr1_bn\nI0817 16:13:05.427350 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.427356 17318 net.cpp:165] Memory required for data: 337409500\nI0817 16:13:05.427368 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:13:05.427378 17318 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI0817 16:13:05.427384 17318 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI0817 16:13:05.427392 17318 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.427456 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI0817 16:13:05.427628 17318 net.cpp:150] Setting up L1_b4_cbr1_scale\nI0817 16:13:05.427642 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.427647 17318 net.cpp:165] Memory required for data: 345601500\nI0817 16:13:05.427659 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI0817 16:13:05.427671 17318 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI0817 16:13:05.427677 17318 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI0817 16:13:05.427685 17318 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.427695 17318 net.cpp:150] Setting up L1_b4_cbr1_relu\nI0817 16:13:05.427701 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.427709 17318 net.cpp:165] Memory required for data: 353793500\nI0817 16:13:05.427714 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI0817 16:13:05.427728 17318 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI0817 16:13:05.427736 17318 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI0817 16:13:05.427745 17318 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI0817 16:13:05.428145 17318 net.cpp:150] Setting up L1_b4_cbr2_conv\nI0817 16:13:05.428159 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.428165 17318 net.cpp:165] Memory required for data: 361985500\nI0817 16:13:05.428176 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI0817 16:13:05.428190 17318 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI0817 16:13:05.428196 17318 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI0817 16:13:05.428205 17318 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI0817 16:13:05.428514 17318 net.cpp:150] Setting up L1_b4_cbr2_bn\nI0817 16:13:05.428530 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.428537 17318 net.cpp:165] Memory required for data: 370177500\nI0817 16:13:05.428548 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:13:05.428557 17318 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI0817 16:13:05.428563 17318 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI0817 16:13:05.428570 17318 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI0817 16:13:05.428634 17318 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI0817 16:13:05.428802 17318 net.cpp:150] Setting up L1_b4_cbr2_scale\nI0817 16:13:05.428817 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.428824 17318 net.cpp:165] Memory required for data: 378369500\nI0817 16:13:05.428833 17318 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI0817 16:13:05.428846 17318 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI0817 16:13:05.428853 17318 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI0817 16:13:05.428858 17318 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI0817 16:13:05.428874 17318 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI0817 16:13:05.428915 17318 net.cpp:150] Setting up L1_b4_sum_eltwise\nI0817 16:13:05.428925 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.428930 17318 net.cpp:165] Memory required for data: 386561500\nI0817 16:13:05.428938 17318 layer_factory.hpp:77] Creating layer L1_b4_relu\nI0817 16:13:05.428946 17318 net.cpp:100] Creating Layer L1_b4_relu\nI0817 16:13:05.428952 17318 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI0817 16:13:05.428959 17318 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI0817 16:13:05.428975 17318 net.cpp:150] Setting up L1_b4_relu\nI0817 16:13:05.428984 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.428989 17318 net.cpp:165] Memory required for data: 394753500\nI0817 16:13:05.428994 17318 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:13:05.429002 17318 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:13:05.429006 17318 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI0817 16:13:05.429016 17318 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:13:05.429028 17318 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:13:05.429080 17318 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI0817 16:13:05.429092 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.429098 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.429103 17318 net.cpp:165] Memory required for data: 411137500\nI0817 16:13:05.429108 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI0817 16:13:05.429122 17318 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI0817 16:13:05.429129 17318 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI0817 16:13:05.429141 17318 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI0817 16:13:05.429529 17318 net.cpp:150] Setting up L1_b5_cbr1_conv\nI0817 16:13:05.429546 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.429551 17318 net.cpp:165] Memory required for data: 419329500\nI0817 16:13:05.429576 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI0817 16:13:05.429589 17318 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI0817 16:13:05.429595 17318 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI0817 16:13:05.429607 17318 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI0817 16:13:05.429903 17318 net.cpp:150] Setting up L1_b5_cbr1_bn\nI0817 16:13:05.429919 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.429924 17318 net.cpp:165] Memory required for data: 427521500\nI0817 16:13:05.429935 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:13:05.429944 17318 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI0817 16:13:05.429950 17318 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI0817 16:13:05.429957 17318 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.430078 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI0817 16:13:05.430258 17318 net.cpp:150] Setting up L1_b5_cbr1_scale\nI0817 16:13:05.430271 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.430279 17318 net.cpp:165] Memory required for data: 435713500\nI0817 16:13:05.430289 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI0817 16:13:05.430297 17318 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI0817 16:13:05.430304 17318 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI0817 16:13:05.430315 17318 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.430325 17318 net.cpp:150] Setting up L1_b5_cbr1_relu\nI0817 16:13:05.430335 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.430339 17318 net.cpp:165] Memory required for data: 443905500\nI0817 16:13:05.430344 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI0817 16:13:05.430366 17318 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI0817 16:13:05.430372 17318 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI0817 16:13:05.430380 17318 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI0817 16:13:05.430757 17318 net.cpp:150] Setting up L1_b5_cbr2_conv\nI0817 16:13:05.430771 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.430776 17318 net.cpp:165] Memory required for data: 452097500\nI0817 16:13:05.430786 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI0817 16:13:05.430799 17318 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI0817 16:13:05.430804 17318 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI0817 16:13:05.430812 17318 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI0817 16:13:05.431092 17318 net.cpp:150] Setting up L1_b5_cbr2_bn\nI0817 16:13:05.431109 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.431115 17318 net.cpp:165] Memory required for data: 460289500\nI0817 16:13:05.431125 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:13:05.431134 17318 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI0817 16:13:05.431140 17318 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI0817 16:13:05.431149 17318 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI0817 16:13:05.431205 17318 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI0817 16:13:05.431360 17318 net.cpp:150] Setting up L1_b5_cbr2_scale\nI0817 16:13:05.431373 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.431378 17318 net.cpp:165] Memory required for data: 468481500\nI0817 16:13:05.431387 17318 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI0817 16:13:05.431399 17318 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI0817 16:13:05.431406 17318 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI0817 16:13:05.431412 17318 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI0817 16:13:05.431423 17318 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI0817 16:13:05.431457 17318 net.cpp:150] Setting up L1_b5_sum_eltwise\nI0817 16:13:05.431466 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.431471 17318 net.cpp:165] Memory required for data: 476673500\nI0817 16:13:05.431476 17318 layer_factory.hpp:77] Creating layer L1_b5_relu\nI0817 16:13:05.431491 17318 net.cpp:100] Creating Layer L1_b5_relu\nI0817 16:13:05.431498 17318 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI0817 16:13:05.431504 17318 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI0817 16:13:05.431514 17318 net.cpp:150] Setting up L1_b5_relu\nI0817 16:13:05.431520 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.431525 17318 net.cpp:165] Memory required for data: 484865500\nI0817 16:13:05.431530 17318 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:13:05.431537 17318 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:13:05.431542 17318 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI0817 16:13:05.431550 17318 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:13:05.431560 17318 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:13:05.431632 17318 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI0817 16:13:05.431645 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.431653 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.431656 17318 net.cpp:165] Memory required for data: 501249500\nI0817 16:13:05.431663 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI0817 16:13:05.431676 17318 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI0817 16:13:05.431684 17318 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI0817 16:13:05.431692 17318 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI0817 16:13:05.432054 17318 net.cpp:150] Setting up L1_b6_cbr1_conv\nI0817 16:13:05.432070 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.432081 17318 net.cpp:165] Memory required for data: 509441500\nI0817 16:13:05.432091 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI0817 16:13:05.432103 17318 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI0817 16:13:05.432111 17318 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI0817 16:13:05.432118 17318 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI0817 16:13:05.432433 17318 net.cpp:150] Setting up L1_b6_cbr1_bn\nI0817 16:13:05.432449 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.432454 17318 net.cpp:165] Memory required for data: 517633500\nI0817 16:13:05.432466 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:13:05.432474 17318 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI0817 16:13:05.432481 17318 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI0817 16:13:05.432488 17318 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.432574 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI0817 16:13:05.432744 17318 net.cpp:150] Setting up L1_b6_cbr1_scale\nI0817 16:13:05.432757 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.432762 17318 net.cpp:165] Memory required for data: 525825500\nI0817 16:13:05.432772 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI0817 16:13:05.432780 17318 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI0817 16:13:05.432790 17318 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI0817 16:13:05.432809 17318 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.432826 17318 net.cpp:150] Setting up L1_b6_cbr1_relu\nI0817 16:13:05.432833 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.432838 17318 net.cpp:165] Memory required for data: 534017500\nI0817 16:13:05.432843 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI0817 16:13:05.432858 17318 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI0817 16:13:05.432864 17318 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI0817 16:13:05.432873 17318 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI0817 16:13:05.433228 17318 net.cpp:150] Setting up L1_b6_cbr2_conv\nI0817 16:13:05.433243 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.433248 17318 net.cpp:165] Memory required for data: 542209500\nI0817 16:13:05.433257 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI0817 16:13:05.433267 17318 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI0817 16:13:05.433272 17318 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI0817 16:13:05.433284 17318 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI0817 16:13:05.433560 17318 net.cpp:150] Setting up L1_b6_cbr2_bn\nI0817 16:13:05.433574 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.433579 17318 net.cpp:165] Memory required for data: 550401500\nI0817 16:13:05.433589 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:13:05.433601 17318 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI0817 16:13:05.433609 17318 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI0817 16:13:05.433615 17318 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI0817 16:13:05.433672 17318 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI0817 16:13:05.433837 17318 net.cpp:150] Setting up L1_b6_cbr2_scale\nI0817 16:13:05.433851 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.433857 17318 net.cpp:165] Memory required for data: 558593500\nI0817 16:13:05.433866 17318 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI0817 16:13:05.433888 17318 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI0817 16:13:05.433894 17318 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI0817 16:13:05.433902 17318 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI0817 16:13:05.433909 17318 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI0817 16:13:05.433948 17318 net.cpp:150] Setting up L1_b6_sum_eltwise\nI0817 16:13:05.433959 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.433964 17318 net.cpp:165] Memory required for data: 566785500\nI0817 16:13:05.433981 17318 layer_factory.hpp:77] Creating layer L1_b6_relu\nI0817 16:13:05.433990 17318 net.cpp:100] Creating Layer L1_b6_relu\nI0817 16:13:05.433996 17318 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI0817 16:13:05.434003 17318 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI0817 16:13:05.434013 17318 net.cpp:150] Setting up L1_b6_relu\nI0817 16:13:05.434020 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.434025 17318 net.cpp:165] Memory required for data: 574977500\nI0817 16:13:05.434029 17318 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:13:05.434036 17318 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:13:05.434041 17318 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI0817 16:13:05.434052 17318 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:13:05.434063 17318 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:13:05.434111 17318 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI0817 16:13:05.434121 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.434128 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.434132 17318 net.cpp:165] Memory required for data: 591361500\nI0817 16:13:05.434137 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI0817 16:13:05.434151 17318 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI0817 16:13:05.434159 17318 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI0817 16:13:05.434167 17318 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI0817 16:13:05.434523 17318 net.cpp:150] Setting up L1_b7_cbr1_conv\nI0817 16:13:05.434538 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.434543 17318 net.cpp:165] Memory required for data: 599553500\nI0817 16:13:05.434552 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI0817 16:13:05.434564 17318 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI0817 16:13:05.434571 17318 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI0817 16:13:05.434581 17318 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI0817 16:13:05.434856 17318 net.cpp:150] Setting up L1_b7_cbr1_bn\nI0817 16:13:05.434870 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.434875 17318 net.cpp:165] Memory required for data: 607745500\nI0817 16:13:05.434885 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:13:05.434895 17318 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI0817 16:13:05.434900 17318 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI0817 16:13:05.434907 17318 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.434973 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI0817 16:13:05.435137 17318 net.cpp:150] Setting up L1_b7_cbr1_scale\nI0817 16:13:05.435150 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.435155 17318 net.cpp:165] Memory required for data: 615937500\nI0817 16:13:05.435164 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI0817 16:13:05.435175 17318 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI0817 16:13:05.435181 17318 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI0817 16:13:05.435189 17318 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.435199 17318 net.cpp:150] Setting up L1_b7_cbr1_relu\nI0817 16:13:05.435205 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.435210 17318 net.cpp:165] Memory required for data: 624129500\nI0817 16:13:05.435215 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI0817 16:13:05.435230 17318 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI0817 16:13:05.435236 17318 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI0817 16:13:05.435246 17318 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI0817 16:13:05.435608 17318 net.cpp:150] Setting up L1_b7_cbr2_conv\nI0817 16:13:05.435622 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.435633 17318 net.cpp:165] Memory required for data: 632321500\nI0817 16:13:05.435643 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI0817 16:13:05.435674 17318 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI0817 16:13:05.435683 17318 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI0817 16:13:05.435693 17318 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI0817 16:13:05.435976 17318 net.cpp:150] Setting up L1_b7_cbr2_bn\nI0817 16:13:05.435991 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.435997 17318 net.cpp:165] Memory required for data: 640513500\nI0817 16:13:05.436007 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:13:05.436015 17318 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI0817 16:13:05.436022 17318 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI0817 16:13:05.436029 17318 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI0817 16:13:05.436091 17318 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI0817 16:13:05.436254 17318 net.cpp:150] Setting up L1_b7_cbr2_scale\nI0817 16:13:05.436267 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.436272 17318 net.cpp:165] Memory required for data: 648705500\nI0817 16:13:05.436281 17318 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI0817 16:13:05.436290 17318 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI0817 16:13:05.436296 17318 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI0817 16:13:05.436303 17318 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI0817 16:13:05.436314 17318 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI0817 16:13:05.436347 17318 net.cpp:150] Setting up L1_b7_sum_eltwise\nI0817 16:13:05.436357 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.436362 17318 net.cpp:165] Memory required for data: 656897500\nI0817 16:13:05.436367 17318 layer_factory.hpp:77] Creating layer L1_b7_relu\nI0817 16:13:05.436378 17318 net.cpp:100] Creating Layer L1_b7_relu\nI0817 16:13:05.436384 17318 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI0817 16:13:05.436391 17318 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI0817 16:13:05.436400 17318 net.cpp:150] Setting up L1_b7_relu\nI0817 16:13:05.436408 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.436413 17318 net.cpp:165] Memory required for data: 665089500\nI0817 16:13:05.436417 17318 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:13:05.436424 17318 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:13:05.436429 17318 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI0817 16:13:05.436436 17318 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:13:05.436446 17318 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:13:05.436496 17318 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI0817 16:13:05.436506 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.436512 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.436517 17318 net.cpp:165] Memory required for data: 681473500\nI0817 16:13:05.436522 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI0817 16:13:05.436539 17318 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI0817 16:13:05.436547 17318 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI0817 16:13:05.436555 17318 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI0817 16:13:05.436910 17318 net.cpp:150] Setting up L1_b8_cbr1_conv\nI0817 16:13:05.436924 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.436930 17318 net.cpp:165] Memory required for data: 689665500\nI0817 16:13:05.436939 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI0817 16:13:05.436951 17318 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI0817 16:13:05.436959 17318 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI0817 16:13:05.436981 17318 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI0817 16:13:05.437260 17318 net.cpp:150] Setting up L1_b8_cbr1_bn\nI0817 16:13:05.437274 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.437279 17318 net.cpp:165] Memory required for data: 697857500\nI0817 16:13:05.437289 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:13:05.437299 17318 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI0817 16:13:05.437304 17318 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI0817 16:13:05.437311 17318 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.437373 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI0817 16:13:05.437556 17318 net.cpp:150] Setting up L1_b8_cbr1_scale\nI0817 16:13:05.437572 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.437577 17318 net.cpp:165] Memory required for data: 706049500\nI0817 16:13:05.437585 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI0817 16:13:05.437593 17318 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI0817 16:13:05.437600 17318 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI0817 16:13:05.437610 17318 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.437620 17318 net.cpp:150] Setting up L1_b8_cbr1_relu\nI0817 16:13:05.437628 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.437633 17318 net.cpp:165] Memory required for data: 714241500\nI0817 16:13:05.437638 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI0817 16:13:05.437650 17318 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI0817 16:13:05.437657 17318 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI0817 16:13:05.437665 17318 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI0817 16:13:05.438037 17318 net.cpp:150] Setting up L1_b8_cbr2_conv\nI0817 16:13:05.438052 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.438057 17318 net.cpp:165] Memory required for data: 722433500\nI0817 16:13:05.438066 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI0817 16:13:05.438078 17318 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI0817 16:13:05.438086 17318 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI0817 16:13:05.438093 17318 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI0817 16:13:05.438379 17318 net.cpp:150] Setting up L1_b8_cbr2_bn\nI0817 16:13:05.438395 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.438400 17318 net.cpp:165] Memory required for data: 730625500\nI0817 16:13:05.438410 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:13:05.438418 17318 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI0817 16:13:05.438426 17318 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI0817 16:13:05.438432 17318 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI0817 16:13:05.438490 17318 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI0817 16:13:05.438655 17318 net.cpp:150] Setting up L1_b8_cbr2_scale\nI0817 16:13:05.438668 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.438674 17318 net.cpp:165] Memory required for data: 738817500\nI0817 16:13:05.438683 17318 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI0817 16:13:05.438691 17318 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI0817 16:13:05.438697 17318 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI0817 16:13:05.438704 17318 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI0817 16:13:05.438715 17318 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI0817 16:13:05.438750 17318 net.cpp:150] Setting up L1_b8_sum_eltwise\nI0817 16:13:05.438760 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.438765 17318 net.cpp:165] Memory required for data: 747009500\nI0817 16:13:05.438769 17318 layer_factory.hpp:77] Creating layer L1_b8_relu\nI0817 16:13:05.438781 17318 net.cpp:100] Creating Layer L1_b8_relu\nI0817 16:13:05.438786 17318 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI0817 16:13:05.438793 17318 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI0817 16:13:05.438809 17318 net.cpp:150] Setting up L1_b8_relu\nI0817 16:13:05.438817 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.438822 17318 net.cpp:165] Memory required for data: 755201500\nI0817 16:13:05.438827 17318 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:13:05.438833 17318 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:13:05.438839 17318 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI0817 16:13:05.438846 17318 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:13:05.438856 17318 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:13:05.438906 17318 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI0817 16:13:05.438916 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.438923 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.438927 17318 net.cpp:165] Memory required for data: 771585500\nI0817 16:13:05.438932 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI0817 16:13:05.438946 17318 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI0817 16:13:05.438953 17318 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI0817 16:13:05.438962 17318 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI0817 16:13:05.439337 17318 net.cpp:150] Setting up L1_b9_cbr1_conv\nI0817 16:13:05.439352 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.439358 17318 net.cpp:165] Memory required for data: 779777500\nI0817 16:13:05.439368 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI0817 16:13:05.439379 17318 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI0817 16:13:05.439386 17318 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI0817 16:13:05.439394 17318 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI0817 16:13:05.439680 17318 net.cpp:150] Setting up L1_b9_cbr1_bn\nI0817 16:13:05.439694 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.439700 17318 net.cpp:165] Memory required for data: 787969500\nI0817 16:13:05.439710 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:13:05.439719 17318 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI0817 16:13:05.439725 17318 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI0817 16:13:05.439735 17318 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.439795 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI0817 16:13:05.439961 17318 net.cpp:150] Setting up L1_b9_cbr1_scale\nI0817 16:13:05.439981 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.439987 17318 net.cpp:165] Memory required for data: 796161500\nI0817 16:13:05.439996 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI0817 16:13:05.440006 17318 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI0817 16:13:05.440011 17318 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI0817 16:13:05.440021 17318 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.440032 17318 net.cpp:150] Setting up L1_b9_cbr1_relu\nI0817 16:13:05.440038 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.440043 17318 net.cpp:165] Memory required for data: 804353500\nI0817 16:13:05.440048 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI0817 16:13:05.440063 17318 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI0817 16:13:05.440069 17318 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI0817 16:13:05.440078 17318 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI0817 16:13:05.440439 17318 net.cpp:150] Setting up L1_b9_cbr2_conv\nI0817 16:13:05.440454 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.440459 17318 net.cpp:165] Memory required for data: 812545500\nI0817 16:13:05.440467 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI0817 16:13:05.440477 17318 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI0817 16:13:05.440486 17318 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI0817 16:13:05.440501 17318 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI0817 16:13:05.440781 17318 net.cpp:150] Setting up L1_b9_cbr2_bn\nI0817 16:13:05.440794 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.440799 17318 net.cpp:165] Memory required for data: 820737500\nI0817 16:13:05.440831 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:13:05.440845 17318 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI0817 16:13:05.440851 17318 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI0817 16:13:05.440860 17318 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI0817 16:13:05.440919 17318 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI0817 16:13:05.441088 17318 net.cpp:150] Setting up L1_b9_cbr2_scale\nI0817 16:13:05.441102 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.441107 17318 net.cpp:165] Memory required for data: 828929500\nI0817 16:13:05.441118 17318 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI0817 16:13:05.441126 17318 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI0817 16:13:05.441133 17318 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI0817 16:13:05.441139 17318 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI0817 16:13:05.441148 17318 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI0817 16:13:05.441185 17318 net.cpp:150] Setting up L1_b9_sum_eltwise\nI0817 16:13:05.441195 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.441200 17318 net.cpp:165] Memory required for data: 837121500\nI0817 16:13:05.441205 17318 layer_factory.hpp:77] Creating layer L1_b9_relu\nI0817 16:13:05.441212 17318 net.cpp:100] Creating Layer L1_b9_relu\nI0817 16:13:05.441218 17318 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI0817 16:13:05.441227 17318 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI0817 16:13:05.441237 17318 net.cpp:150] Setting up L1_b9_relu\nI0817 16:13:05.441244 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.441249 17318 net.cpp:165] Memory required for data: 845313500\nI0817 16:13:05.441253 17318 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:13:05.441260 17318 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:13:05.441267 17318 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI0817 16:13:05.441273 17318 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:13:05.441283 17318 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:13:05.441335 17318 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI0817 16:13:05.441345 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.441352 17318 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI0817 16:13:05.441356 17318 net.cpp:165] Memory required for data: 861697500\nI0817 16:13:05.441361 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI0817 16:13:05.441375 17318 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI0817 16:13:05.441381 17318 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI0817 16:13:05.441390 17318 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI0817 16:13:05.442175 17318 net.cpp:150] Setting up L2_b1_cbr1_conv\nI0817 16:13:05.442198 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.442204 17318 net.cpp:165] Memory required for data: 863745500\nI0817 16:13:05.442214 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI0817 16:13:05.442224 17318 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI0817 16:13:05.442230 17318 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI0817 16:13:05.442242 17318 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI0817 16:13:05.442514 17318 net.cpp:150] Setting up L2_b1_cbr1_bn\nI0817 16:13:05.442528 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.442533 17318 net.cpp:165] Memory required for data: 865793500\nI0817 16:13:05.442553 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:13:05.442561 17318 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI0817 16:13:05.442567 17318 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI0817 16:13:05.442575 17318 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.442636 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI0817 16:13:05.442797 17318 net.cpp:150] Setting up L2_b1_cbr1_scale\nI0817 16:13:05.442813 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.442819 17318 net.cpp:165] Memory required for data: 867841500\nI0817 16:13:05.442828 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI0817 16:13:05.442836 17318 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI0817 16:13:05.442842 17318 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI0817 16:13:05.442849 17318 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.442859 17318 net.cpp:150] Setting up L2_b1_cbr1_relu\nI0817 16:13:05.442867 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.442872 17318 net.cpp:165] Memory required for data: 869889500\nI0817 16:13:05.442876 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI0817 16:13:05.442890 17318 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI0817 16:13:05.442896 17318 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI0817 16:13:05.442908 17318 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI0817 16:13:05.443310 17318 net.cpp:150] Setting up L2_b1_cbr2_conv\nI0817 16:13:05.443326 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.443332 17318 net.cpp:165] Memory required for data: 871937500\nI0817 16:13:05.443341 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI0817 16:13:05.443353 17318 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI0817 16:13:05.443366 17318 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI0817 16:13:05.443385 17318 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI0817 16:13:05.443692 17318 net.cpp:150] Setting up L2_b1_cbr2_bn\nI0817 16:13:05.443707 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.443713 17318 net.cpp:165] Memory required for data: 873985500\nI0817 16:13:05.443724 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:13:05.443733 17318 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI0817 16:13:05.443739 17318 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI0817 16:13:05.443747 17318 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI0817 16:13:05.443809 17318 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI0817 16:13:05.443980 17318 net.cpp:150] Setting up L2_b1_cbr2_scale\nI0817 16:13:05.443995 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.444000 17318 net.cpp:165] Memory required for data: 876033500\nI0817 16:13:05.444010 17318 layer_factory.hpp:77] Creating layer L2_b1_pool\nI0817 16:13:05.444023 17318 net.cpp:100] Creating Layer L2_b1_pool\nI0817 16:13:05.444031 17318 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI0817 16:13:05.444039 17318 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI0817 16:13:05.444072 17318 net.cpp:150] Setting up L2_b1_pool\nI0817 16:13:05.444082 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.444087 17318 net.cpp:165] Memory required for data: 878081500\nI0817 16:13:05.444092 17318 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI0817 16:13:05.444103 17318 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI0817 16:13:05.444110 17318 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI0817 16:13:05.444116 17318 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI0817 16:13:05.444124 17318 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI0817 16:13:05.444157 17318 net.cpp:150] Setting up L2_b1_sum_eltwise\nI0817 16:13:05.444167 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.444172 17318 net.cpp:165] Memory required for data: 880129500\nI0817 16:13:05.444177 17318 layer_factory.hpp:77] Creating layer L2_b1_relu\nI0817 16:13:05.444187 17318 net.cpp:100] Creating Layer L2_b1_relu\nI0817 16:13:05.444201 17318 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI0817 16:13:05.444209 17318 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI0817 16:13:05.444218 17318 net.cpp:150] Setting up L2_b1_relu\nI0817 16:13:05.444226 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.444231 17318 net.cpp:165] Memory required for data: 882177500\nI0817 16:13:05.444236 17318 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI0817 16:13:05.444245 17318 net.cpp:100] Creating Layer L2_b1_zeros\nI0817 16:13:05.444252 17318 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI0817 16:13:05.446589 17318 net.cpp:150] Setting up L2_b1_zeros\nI0817 16:13:05.446619 17318 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI0817 16:13:05.446629 17318 net.cpp:165] Memory required for data: 884225500\nI0817 16:13:05.446637 17318 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI0817 16:13:05.446652 17318 net.cpp:100] Creating Layer L2_b1_concat0\nI0817 16:13:05.446666 17318 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI0817 16:13:05.446677 17318 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI0817 16:13:05.446693 17318 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI0817 16:13:05.446740 17318 net.cpp:150] Setting up L2_b1_concat0\nI0817 16:13:05.446750 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.446755 17318 net.cpp:165] Memory required for data: 888321500\nI0817 16:13:05.446760 17318 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:13:05.446771 17318 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:13:05.446779 17318 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI0817 16:13:05.446785 17318 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:13:05.446795 17318 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:13:05.446848 17318 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI0817 16:13:05.446858 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.446864 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.446869 17318 net.cpp:165] Memory required for data: 896513500\nI0817 16:13:05.446874 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI0817 16:13:05.446889 17318 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI0817 16:13:05.446895 17318 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI0817 16:13:05.446907 17318 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI0817 16:13:05.447437 17318 net.cpp:150] Setting up L2_b2_cbr1_conv\nI0817 16:13:05.447451 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.447456 17318 net.cpp:165] Memory required for data: 900609500\nI0817 16:13:05.447466 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI0817 16:13:05.447479 17318 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI0817 16:13:05.447484 17318 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI0817 16:13:05.447494 17318 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI0817 16:13:05.447769 17318 net.cpp:150] Setting up L2_b2_cbr1_bn\nI0817 16:13:05.447785 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.447790 17318 net.cpp:165] Memory required for data: 904705500\nI0817 16:13:05.447801 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:13:05.447810 17318 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI0817 16:13:05.447816 17318 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI0817 16:13:05.447824 17318 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.447882 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI0817 16:13:05.448051 17318 net.cpp:150] Setting up L2_b2_cbr1_scale\nI0817 16:13:05.448065 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.448071 17318 net.cpp:165] Memory required for data: 908801500\nI0817 16:13:05.448079 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI0817 16:13:05.448087 17318 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI0817 16:13:05.448101 17318 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI0817 16:13:05.448112 17318 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.448122 17318 net.cpp:150] Setting up L2_b2_cbr1_relu\nI0817 16:13:05.448129 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.448133 17318 net.cpp:165] Memory required for data: 912897500\nI0817 16:13:05.448138 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI0817 16:13:05.448149 17318 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI0817 16:13:05.448155 17318 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI0817 16:13:05.448166 17318 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI0817 16:13:05.448664 17318 net.cpp:150] Setting up L2_b2_cbr2_conv\nI0817 16:13:05.448679 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.448684 17318 net.cpp:165] Memory required for data: 916993500\nI0817 16:13:05.448693 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI0817 16:13:05.448703 17318 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI0817 16:13:05.448709 17318 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI0817 16:13:05.448720 17318 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI0817 16:13:05.448992 17318 net.cpp:150] Setting up L2_b2_cbr2_bn\nI0817 16:13:05.449007 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.449012 17318 net.cpp:165] Memory required for data: 921089500\nI0817 16:13:05.449023 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:13:05.449033 17318 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI0817 16:13:05.449040 17318 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI0817 16:13:05.449048 17318 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI0817 16:13:05.449105 17318 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI0817 16:13:05.449267 17318 net.cpp:150] Setting up L2_b2_cbr2_scale\nI0817 16:13:05.449280 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.449285 17318 net.cpp:165] Memory required for data: 925185500\nI0817 16:13:05.449295 17318 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI0817 16:13:05.449306 17318 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI0817 16:13:05.449313 17318 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI0817 16:13:05.449321 17318 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI0817 16:13:05.449327 17318 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI0817 16:13:05.449358 17318 net.cpp:150] Setting up L2_b2_sum_eltwise\nI0817 16:13:05.449368 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.449373 17318 net.cpp:165] Memory required for data: 929281500\nI0817 16:13:05.449378 17318 layer_factory.hpp:77] Creating layer L2_b2_relu\nI0817 16:13:05.449386 17318 net.cpp:100] Creating Layer L2_b2_relu\nI0817 16:13:05.449393 17318 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI0817 16:13:05.449403 17318 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI0817 16:13:05.449412 17318 net.cpp:150] Setting up L2_b2_relu\nI0817 16:13:05.449419 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.449424 17318 net.cpp:165] Memory required for data: 933377500\nI0817 16:13:05.449429 17318 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:13:05.449436 17318 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:13:05.449441 17318 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI0817 16:13:05.449448 17318 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:13:05.449458 17318 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:13:05.449509 17318 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI0817 16:13:05.449522 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.449528 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.449532 17318 net.cpp:165] Memory required for data: 941569500\nI0817 16:13:05.449544 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI0817 16:13:05.449555 17318 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI0817 16:13:05.449563 17318 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI0817 16:13:05.449574 17318 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI0817 16:13:05.450076 17318 net.cpp:150] Setting up L2_b3_cbr1_conv\nI0817 16:13:05.450091 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.450096 17318 net.cpp:165] Memory required for data: 945665500\nI0817 16:13:05.450105 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI0817 16:13:05.450114 17318 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI0817 16:13:05.450120 17318 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI0817 16:13:05.450131 17318 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI0817 16:13:05.450397 17318 net.cpp:150] Setting up L2_b3_cbr1_bn\nI0817 16:13:05.450410 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.450417 17318 net.cpp:165] Memory required for data: 949761500\nI0817 16:13:05.450426 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:13:05.450438 17318 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI0817 16:13:05.450444 17318 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI0817 16:13:05.450451 17318 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.450508 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI0817 16:13:05.450666 17318 net.cpp:150] Setting up L2_b3_cbr1_scale\nI0817 16:13:05.450680 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.450685 17318 net.cpp:165] Memory required for data: 953857500\nI0817 16:13:05.450693 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI0817 16:13:05.450704 17318 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI0817 16:13:05.450711 17318 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI0817 16:13:05.450717 17318 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.450727 17318 net.cpp:150] Setting up L2_b3_cbr1_relu\nI0817 16:13:05.450738 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.450743 17318 net.cpp:165] Memory required for data: 957953500\nI0817 16:13:05.450748 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI0817 16:13:05.450759 17318 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI0817 16:13:05.450765 17318 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI0817 16:13:05.450776 17318 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI0817 16:13:05.451277 17318 net.cpp:150] Setting up L2_b3_cbr2_conv\nI0817 16:13:05.451292 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.451297 17318 net.cpp:165] Memory required for data: 962049500\nI0817 16:13:05.451306 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI0817 16:13:05.451315 17318 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI0817 16:13:05.451321 17318 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI0817 16:13:05.451333 17318 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI0817 16:13:05.451602 17318 net.cpp:150] Setting up L2_b3_cbr2_bn\nI0817 16:13:05.451616 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.451620 17318 net.cpp:165] Memory required for data: 966145500\nI0817 16:13:05.451630 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:13:05.451642 17318 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI0817 16:13:05.451648 17318 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI0817 16:13:05.451655 17318 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI0817 16:13:05.451714 17318 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI0817 16:13:05.451869 17318 net.cpp:150] Setting up L2_b3_cbr2_scale\nI0817 16:13:05.451882 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.451889 17318 net.cpp:165] Memory required for data: 970241500\nI0817 16:13:05.451897 17318 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI0817 16:13:05.451910 17318 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI0817 16:13:05.451921 17318 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI0817 16:13:05.451930 17318 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI0817 16:13:05.451936 17318 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI0817 16:13:05.451973 17318 net.cpp:150] Setting up L2_b3_sum_eltwise\nI0817 16:13:05.451985 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.451990 17318 net.cpp:165] Memory required for data: 974337500\nI0817 16:13:05.451997 17318 layer_factory.hpp:77] Creating layer L2_b3_relu\nI0817 16:13:05.452018 17318 net.cpp:100] Creating Layer L2_b3_relu\nI0817 16:13:05.452025 17318 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI0817 16:13:05.452033 17318 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI0817 16:13:05.452042 17318 net.cpp:150] Setting up L2_b3_relu\nI0817 16:13:05.452049 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.452054 17318 net.cpp:165] Memory required for data: 978433500\nI0817 16:13:05.452059 17318 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:13:05.452069 17318 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:13:05.452075 17318 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI0817 16:13:05.452083 17318 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:13:05.452093 17318 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:13:05.452142 17318 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI0817 16:13:05.452157 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.452164 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.452169 17318 net.cpp:165] Memory required for data: 986625500\nI0817 16:13:05.452174 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI0817 16:13:05.452185 17318 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI0817 16:13:05.452191 17318 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI0817 16:13:05.452200 17318 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI0817 16:13:05.452698 17318 net.cpp:150] Setting up L2_b4_cbr1_conv\nI0817 16:13:05.452713 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.452718 17318 net.cpp:165] Memory required for data: 990721500\nI0817 16:13:05.452728 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI0817 16:13:05.452740 17318 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI0817 16:13:05.452747 17318 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI0817 16:13:05.452755 17318 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI0817 16:13:05.453039 17318 net.cpp:150] Setting up L2_b4_cbr1_bn\nI0817 16:13:05.453053 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.453058 17318 net.cpp:165] Memory required for data: 994817500\nI0817 16:13:05.453069 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:13:05.453078 17318 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI0817 16:13:05.453083 17318 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI0817 16:13:05.453094 17318 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.453153 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI0817 16:13:05.453312 17318 net.cpp:150] Setting up L2_b4_cbr1_scale\nI0817 16:13:05.453325 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.453331 17318 net.cpp:165] Memory required for data: 998913500\nI0817 16:13:05.453341 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI0817 16:13:05.453348 17318 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI0817 16:13:05.453354 17318 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI0817 16:13:05.453364 17318 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.453373 17318 net.cpp:150] Setting up L2_b4_cbr1_relu\nI0817 16:13:05.453382 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.453385 17318 net.cpp:165] Memory required for data: 1003009500\nI0817 16:13:05.453398 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI0817 16:13:05.453413 17318 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI0817 16:13:05.453419 17318 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI0817 16:13:05.453428 17318 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI0817 16:13:05.453927 17318 net.cpp:150] Setting up L2_b4_cbr2_conv\nI0817 16:13:05.453940 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.453946 17318 net.cpp:165] Memory required for data: 1007105500\nI0817 16:13:05.453955 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI0817 16:13:05.453971 17318 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI0817 16:13:05.453979 17318 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI0817 16:13:05.453989 17318 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI0817 16:13:05.454257 17318 net.cpp:150] Setting up L2_b4_cbr2_bn\nI0817 16:13:05.454269 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.454275 17318 net.cpp:165] Memory required for data: 1011201500\nI0817 16:13:05.454285 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:13:05.454294 17318 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI0817 16:13:05.454300 17318 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI0817 16:13:05.454310 17318 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI0817 16:13:05.454368 17318 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI0817 16:13:05.454531 17318 net.cpp:150] Setting up L2_b4_cbr2_scale\nI0817 16:13:05.454545 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.454550 17318 net.cpp:165] Memory required for data: 1015297500\nI0817 16:13:05.454558 17318 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI0817 16:13:05.454567 17318 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI0817 16:13:05.454574 17318 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI0817 16:13:05.454581 17318 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI0817 16:13:05.454593 17318 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI0817 16:13:05.454622 17318 net.cpp:150] Setting up L2_b4_sum_eltwise\nI0817 16:13:05.454632 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.454637 17318 net.cpp:165] Memory required for data: 1019393500\nI0817 16:13:05.454641 17318 layer_factory.hpp:77] Creating layer L2_b4_relu\nI0817 16:13:05.454651 17318 net.cpp:100] Creating Layer L2_b4_relu\nI0817 16:13:05.454658 17318 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI0817 16:13:05.454665 17318 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI0817 16:13:05.454674 17318 net.cpp:150] Setting up L2_b4_relu\nI0817 16:13:05.454681 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.454685 17318 net.cpp:165] Memory required for data: 1023489500\nI0817 16:13:05.454690 17318 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:13:05.454697 17318 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:13:05.454704 17318 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI0817 16:13:05.454713 17318 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:13:05.454723 17318 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:13:05.454771 17318 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI0817 16:13:05.454782 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.454789 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.454794 17318 net.cpp:165] Memory required for data: 1031681500\nI0817 16:13:05.454799 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI0817 16:13:05.454813 17318 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI0817 16:13:05.454819 17318 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI0817 16:13:05.454828 17318 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI0817 16:13:05.455344 17318 net.cpp:150] Setting up L2_b5_cbr1_conv\nI0817 16:13:05.455359 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.455364 17318 net.cpp:165] Memory required for data: 1035777500\nI0817 16:13:05.455374 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI0817 16:13:05.455386 17318 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI0817 16:13:05.455392 17318 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI0817 16:13:05.455400 17318 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI0817 16:13:05.455672 17318 net.cpp:150] Setting up L2_b5_cbr1_bn\nI0817 16:13:05.455684 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.455689 17318 net.cpp:165] Memory required for data: 1039873500\nI0817 16:13:05.455700 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:13:05.455708 17318 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI0817 16:13:05.455714 17318 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI0817 16:13:05.455725 17318 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.455783 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI0817 16:13:05.455943 17318 net.cpp:150] Setting up L2_b5_cbr1_scale\nI0817 16:13:05.455956 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.455961 17318 net.cpp:165] Memory required for data: 1043969500\nI0817 16:13:05.455976 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI0817 16:13:05.455984 17318 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI0817 16:13:05.455991 17318 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI0817 16:13:05.456001 17318 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.456010 17318 net.cpp:150] Setting up L2_b5_cbr1_relu\nI0817 16:13:05.456017 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.456022 17318 net.cpp:165] Memory required for data: 1048065500\nI0817 16:13:05.456027 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI0817 16:13:05.456040 17318 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI0817 16:13:05.456046 17318 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI0817 16:13:05.456055 17318 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI0817 16:13:05.456549 17318 net.cpp:150] Setting up L2_b5_cbr2_conv\nI0817 16:13:05.456563 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.456568 17318 net.cpp:165] Memory required for data: 1052161500\nI0817 16:13:05.456578 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI0817 16:13:05.456593 17318 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI0817 16:13:05.456599 17318 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI0817 16:13:05.456607 17318 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI0817 16:13:05.456878 17318 net.cpp:150] Setting up L2_b5_cbr2_bn\nI0817 16:13:05.456892 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.456897 17318 net.cpp:165] Memory required for data: 1056257500\nI0817 16:13:05.456907 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:13:05.456917 17318 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI0817 16:13:05.456923 17318 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI0817 16:13:05.456930 17318 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI0817 16:13:05.456996 17318 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI0817 16:13:05.457155 17318 net.cpp:150] Setting up L2_b5_cbr2_scale\nI0817 16:13:05.457171 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.457176 17318 net.cpp:165] Memory required for data: 1060353500\nI0817 16:13:05.457185 17318 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI0817 16:13:05.457195 17318 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI0817 16:13:05.457201 17318 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI0817 16:13:05.457207 17318 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI0817 16:13:05.457216 17318 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI0817 16:13:05.457247 17318 net.cpp:150] Setting up L2_b5_sum_eltwise\nI0817 16:13:05.457262 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.457267 17318 net.cpp:165] Memory required for data: 1064449500\nI0817 16:13:05.457273 17318 layer_factory.hpp:77] Creating layer L2_b5_relu\nI0817 16:13:05.457281 17318 net.cpp:100] Creating Layer L2_b5_relu\nI0817 16:13:05.457286 17318 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI0817 16:13:05.457296 17318 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI0817 16:13:05.457306 17318 net.cpp:150] Setting up L2_b5_relu\nI0817 16:13:05.457314 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.457319 17318 net.cpp:165] Memory required for data: 1068545500\nI0817 16:13:05.457322 17318 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:13:05.457330 17318 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:13:05.457335 17318 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI0817 16:13:05.457345 17318 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:13:05.457355 17318 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:13:05.457404 17318 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI0817 16:13:05.457417 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.457423 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.457427 17318 net.cpp:165] Memory required for data: 1076737500\nI0817 16:13:05.457433 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI0817 16:13:05.457448 17318 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI0817 16:13:05.457454 17318 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI0817 16:13:05.457463 17318 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI0817 16:13:05.458945 17318 net.cpp:150] Setting up L2_b6_cbr1_conv\nI0817 16:13:05.458963 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.458974 17318 net.cpp:165] Memory required for data: 1080833500\nI0817 16:13:05.458984 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI0817 16:13:05.458998 17318 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI0817 16:13:05.459004 17318 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI0817 16:13:05.459013 17318 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI0817 16:13:05.459286 17318 net.cpp:150] Setting up L2_b6_cbr1_bn\nI0817 16:13:05.459302 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.459308 17318 net.cpp:165] Memory required for data: 1084929500\nI0817 16:13:05.459318 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:13:05.459327 17318 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI0817 16:13:05.459333 17318 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI0817 16:13:05.459342 17318 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.459400 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI0817 16:13:05.459563 17318 net.cpp:150] Setting up L2_b6_cbr1_scale\nI0817 16:13:05.459575 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.459580 17318 net.cpp:165] Memory required for data: 1089025500\nI0817 16:13:05.459589 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI0817 16:13:05.459599 17318 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI0817 16:13:05.459604 17318 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI0817 16:13:05.459614 17318 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.459625 17318 net.cpp:150] Setting up L2_b6_cbr1_relu\nI0817 16:13:05.459631 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.459636 17318 net.cpp:165] Memory required for data: 1093121500\nI0817 16:13:05.459641 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI0817 16:13:05.459656 17318 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI0817 16:13:05.459661 17318 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI0817 16:13:05.459671 17318 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI0817 16:13:05.460180 17318 net.cpp:150] Setting up L2_b6_cbr2_conv\nI0817 16:13:05.460194 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.460199 17318 net.cpp:165] Memory required for data: 1097217500\nI0817 16:13:05.460209 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI0817 16:13:05.460222 17318 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI0817 16:13:05.460228 17318 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI0817 16:13:05.460237 17318 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI0817 16:13:05.460510 17318 net.cpp:150] Setting up L2_b6_cbr2_bn\nI0817 16:13:05.460522 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.460527 17318 net.cpp:165] Memory required for data: 1101313500\nI0817 16:13:05.460538 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:13:05.460552 17318 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI0817 16:13:05.460558 17318 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI0817 16:13:05.460566 17318 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI0817 16:13:05.460624 17318 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI0817 16:13:05.460784 17318 net.cpp:150] Setting up L2_b6_cbr2_scale\nI0817 16:13:05.460798 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.460803 17318 net.cpp:165] Memory required for data: 1105409500\nI0817 16:13:05.460811 17318 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI0817 16:13:05.460824 17318 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI0817 16:13:05.460830 17318 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI0817 16:13:05.460837 17318 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI0817 16:13:05.460847 17318 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI0817 16:13:05.460876 17318 net.cpp:150] Setting up L2_b6_sum_eltwise\nI0817 16:13:05.460888 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.460893 17318 net.cpp:165] Memory required for data: 1109505500\nI0817 16:13:05.460898 17318 layer_factory.hpp:77] Creating layer L2_b6_relu\nI0817 16:13:05.460906 17318 net.cpp:100] Creating Layer L2_b6_relu\nI0817 16:13:05.460912 17318 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI0817 16:13:05.460922 17318 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI0817 16:13:05.460932 17318 net.cpp:150] Setting up L2_b6_relu\nI0817 16:13:05.460939 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.460944 17318 net.cpp:165] Memory required for data: 1113601500\nI0817 16:13:05.460949 17318 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:13:05.460957 17318 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:13:05.460963 17318 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI0817 16:13:05.460974 17318 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:13:05.460985 17318 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:13:05.461038 17318 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI0817 16:13:05.461051 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.461058 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.461062 17318 net.cpp:165] Memory required for data: 1121793500\nI0817 16:13:05.461068 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI0817 16:13:05.461079 17318 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI0817 16:13:05.461086 17318 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI0817 16:13:05.461097 17318 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI0817 16:13:05.461596 17318 net.cpp:150] Setting up L2_b7_cbr1_conv\nI0817 16:13:05.461611 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.461616 17318 net.cpp:165] Memory required for data: 1125889500\nI0817 16:13:05.461625 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI0817 16:13:05.461643 17318 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI0817 16:13:05.461650 17318 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI0817 16:13:05.461659 17318 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI0817 16:13:05.461932 17318 net.cpp:150] Setting up L2_b7_cbr1_bn\nI0817 16:13:05.461948 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.461953 17318 net.cpp:165] Memory required for data: 1129985500\nI0817 16:13:05.461964 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:13:05.461978 17318 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI0817 16:13:05.461985 17318 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI0817 16:13:05.461992 17318 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.462052 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI0817 16:13:05.462219 17318 net.cpp:150] Setting up L2_b7_cbr1_scale\nI0817 16:13:05.462232 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.462239 17318 net.cpp:165] Memory required for data: 1134081500\nI0817 16:13:05.462247 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI0817 16:13:05.462255 17318 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI0817 16:13:05.462261 17318 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI0817 16:13:05.462271 17318 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.462281 17318 net.cpp:150] Setting up L2_b7_cbr1_relu\nI0817 16:13:05.462290 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.462293 17318 net.cpp:165] Memory required for data: 1138177500\nI0817 16:13:05.462298 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI0817 16:13:05.462309 17318 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI0817 16:13:05.462316 17318 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI0817 16:13:05.462327 17318 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI0817 16:13:05.462811 17318 net.cpp:150] Setting up L2_b7_cbr2_conv\nI0817 16:13:05.462826 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.462831 17318 net.cpp:165] Memory required for data: 1142273500\nI0817 16:13:05.462839 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI0817 16:13:05.462848 17318 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI0817 16:13:05.462854 17318 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI0817 16:13:05.462865 17318 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI0817 16:13:05.463145 17318 net.cpp:150] Setting up L2_b7_cbr2_bn\nI0817 16:13:05.463158 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.463163 17318 net.cpp:165] Memory required for data: 1146369500\nI0817 16:13:05.463173 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:13:05.463186 17318 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI0817 16:13:05.463192 17318 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI0817 16:13:05.463199 17318 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI0817 16:13:05.463256 17318 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI0817 16:13:05.463414 17318 net.cpp:150] Setting up L2_b7_cbr2_scale\nI0817 16:13:05.463428 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.463433 17318 net.cpp:165] Memory required for data: 1150465500\nI0817 16:13:05.463441 17318 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI0817 16:13:05.463452 17318 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI0817 16:13:05.463459 17318 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI0817 16:13:05.463466 17318 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI0817 16:13:05.463477 17318 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI0817 16:13:05.463506 17318 net.cpp:150] Setting up L2_b7_sum_eltwise\nI0817 16:13:05.463518 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.463523 17318 net.cpp:165] Memory required for data: 1154561500\nI0817 16:13:05.463528 17318 layer_factory.hpp:77] Creating layer L2_b7_relu\nI0817 16:13:05.463536 17318 net.cpp:100] Creating Layer L2_b7_relu\nI0817 16:13:05.463549 17318 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI0817 16:13:05.463559 17318 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI0817 16:13:05.463570 17318 net.cpp:150] Setting up L2_b7_relu\nI0817 16:13:05.463577 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.463582 17318 net.cpp:165] Memory required for data: 1158657500\nI0817 16:13:05.463587 17318 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:13:05.463593 17318 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:13:05.463599 17318 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI0817 16:13:05.463606 17318 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:13:05.463616 17318 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:13:05.463668 17318 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI0817 16:13:05.463680 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.463687 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.463691 17318 net.cpp:165] Memory required for data: 1166849500\nI0817 16:13:05.463696 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI0817 16:13:05.463707 17318 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI0817 16:13:05.463714 17318 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI0817 16:13:05.463726 17318 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI0817 16:13:05.464226 17318 net.cpp:150] Setting up L2_b8_cbr1_conv\nI0817 16:13:05.464241 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.464246 17318 net.cpp:165] Memory required for data: 1170945500\nI0817 16:13:05.464254 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI0817 16:13:05.464267 17318 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI0817 16:13:05.464273 17318 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI0817 16:13:05.464282 17318 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI0817 16:13:05.464561 17318 net.cpp:150] Setting up L2_b8_cbr1_bn\nI0817 16:13:05.464574 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.464579 17318 net.cpp:165] Memory required for data: 1175041500\nI0817 16:13:05.464589 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:13:05.464601 17318 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI0817 16:13:05.464607 17318 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI0817 16:13:05.464615 17318 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.464673 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI0817 16:13:05.464831 17318 net.cpp:150] Setting up L2_b8_cbr1_scale\nI0817 16:13:05.464844 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.464849 17318 net.cpp:165] Memory required for data: 1179137500\nI0817 16:13:05.464859 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI0817 16:13:05.464869 17318 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI0817 16:13:05.464876 17318 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI0817 16:13:05.464885 17318 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.464896 17318 net.cpp:150] Setting up L2_b8_cbr1_relu\nI0817 16:13:05.464903 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.464908 17318 net.cpp:165] Memory required for data: 1183233500\nI0817 16:13:05.464912 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI0817 16:13:05.464923 17318 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI0817 16:13:05.464929 17318 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI0817 16:13:05.464941 17318 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI0817 16:13:05.465432 17318 net.cpp:150] Setting up L2_b8_cbr2_conv\nI0817 16:13:05.465446 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.465452 17318 net.cpp:165] Memory required for data: 1187329500\nI0817 16:13:05.465461 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI0817 16:13:05.465476 17318 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI0817 16:13:05.465482 17318 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI0817 16:13:05.465495 17318 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI0817 16:13:05.465768 17318 net.cpp:150] Setting up L2_b8_cbr2_bn\nI0817 16:13:05.465782 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.465787 17318 net.cpp:165] Memory required for data: 1191425500\nI0817 16:13:05.465797 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:13:05.465808 17318 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI0817 16:13:05.465816 17318 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI0817 16:13:05.465823 17318 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI0817 16:13:05.465881 17318 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI0817 16:13:05.466053 17318 net.cpp:150] Setting up L2_b8_cbr2_scale\nI0817 16:13:05.466065 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.466071 17318 net.cpp:165] Memory required for data: 1195521500\nI0817 16:13:05.466080 17318 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI0817 16:13:05.466091 17318 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI0817 16:13:05.466099 17318 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI0817 16:13:05.466105 17318 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI0817 16:13:05.466114 17318 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI0817 16:13:05.466145 17318 net.cpp:150] Setting up L2_b8_sum_eltwise\nI0817 16:13:05.466156 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.466161 17318 net.cpp:165] Memory required for data: 1199617500\nI0817 16:13:05.466166 17318 layer_factory.hpp:77] Creating layer L2_b8_relu\nI0817 16:13:05.466174 17318 net.cpp:100] Creating Layer L2_b8_relu\nI0817 16:13:05.466181 17318 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI0817 16:13:05.466190 17318 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI0817 16:13:05.466200 17318 net.cpp:150] Setting up L2_b8_relu\nI0817 16:13:05.466207 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.466212 17318 net.cpp:165] Memory required for data: 1203713500\nI0817 16:13:05.466217 17318 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:13:05.466223 17318 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:13:05.466229 17318 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI0817 16:13:05.466236 17318 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:13:05.466260 17318 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:13:05.466310 17318 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI0817 16:13:05.466326 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.466333 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.466337 17318 net.cpp:165] Memory required for data: 1211905500\nI0817 16:13:05.466343 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI0817 16:13:05.466354 17318 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI0817 16:13:05.466361 17318 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI0817 16:13:05.466372 17318 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI0817 16:13:05.466871 17318 net.cpp:150] Setting up L2_b9_cbr1_conv\nI0817 16:13:05.466884 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.466891 17318 net.cpp:165] Memory required for data: 1216001500\nI0817 16:13:05.466898 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI0817 16:13:05.466908 17318 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI0817 16:13:05.466915 17318 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI0817 16:13:05.466926 17318 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI0817 16:13:05.467212 17318 net.cpp:150] Setting up L2_b9_cbr1_bn\nI0817 16:13:05.467232 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.467238 17318 net.cpp:165] Memory required for data: 1220097500\nI0817 16:13:05.467248 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:13:05.467260 17318 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI0817 16:13:05.467267 17318 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI0817 16:13:05.467274 17318 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.467334 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI0817 16:13:05.467495 17318 net.cpp:150] Setting up L2_b9_cbr1_scale\nI0817 16:13:05.467509 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.467514 17318 net.cpp:165] Memory required for data: 1224193500\nI0817 16:13:05.467523 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI0817 16:13:05.467535 17318 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI0817 16:13:05.467540 17318 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI0817 16:13:05.467548 17318 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.467557 17318 net.cpp:150] Setting up L2_b9_cbr1_relu\nI0817 16:13:05.467564 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.467569 17318 net.cpp:165] Memory required for data: 1228289500\nI0817 16:13:05.467574 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI0817 16:13:05.467591 17318 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI0817 16:13:05.467597 17318 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI0817 16:13:05.467607 17318 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI0817 16:13:05.469076 17318 net.cpp:150] Setting up L2_b9_cbr2_conv\nI0817 16:13:05.469094 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.469099 17318 net.cpp:165] Memory required for data: 1232385500\nI0817 16:13:05.469110 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI0817 16:13:05.469121 17318 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI0817 16:13:05.469130 17318 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI0817 16:13:05.469137 17318 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI0817 16:13:05.469404 17318 net.cpp:150] Setting up L2_b9_cbr2_bn\nI0817 16:13:05.469418 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.469424 17318 net.cpp:165] Memory required for data: 1236481500\nI0817 16:13:05.469473 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:13:05.469488 17318 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI0817 16:13:05.469496 17318 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI0817 16:13:05.469504 17318 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI0817 16:13:05.469563 17318 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI0817 16:13:05.469717 17318 net.cpp:150] Setting up L2_b9_cbr2_scale\nI0817 16:13:05.469730 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.469736 17318 net.cpp:165] Memory required for data: 1240577500\nI0817 16:13:05.469745 17318 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI0817 16:13:05.469754 17318 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI0817 16:13:05.469760 17318 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI0817 16:13:05.469768 17318 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI0817 16:13:05.469779 17318 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI0817 16:13:05.469806 17318 net.cpp:150] Setting up L2_b9_sum_eltwise\nI0817 16:13:05.469815 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.469820 17318 net.cpp:165] Memory required for data: 1244673500\nI0817 16:13:05.469825 17318 layer_factory.hpp:77] Creating layer L2_b9_relu\nI0817 16:13:05.469841 17318 net.cpp:100] Creating Layer L2_b9_relu\nI0817 16:13:05.469846 17318 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI0817 16:13:05.469853 17318 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI0817 16:13:05.469863 17318 net.cpp:150] Setting up L2_b9_relu\nI0817 16:13:05.469871 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.469882 17318 net.cpp:165] Memory required for data: 1248769500\nI0817 16:13:05.469887 17318 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:13:05.469897 17318 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:13:05.469903 17318 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI0817 16:13:05.469911 17318 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:13:05.469921 17318 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:13:05.469980 17318 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI0817 16:13:05.469995 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.470003 17318 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI0817 16:13:05.470007 17318 net.cpp:165] Memory required for data: 1256961500\nI0817 16:13:05.470012 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI0817 16:13:05.470024 17318 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI0817 16:13:05.470031 17318 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI0817 16:13:05.470039 17318 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI0817 16:13:05.470540 17318 net.cpp:150] Setting up L3_b1_cbr1_conv\nI0817 16:13:05.470553 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.470559 17318 net.cpp:165] Memory required for data: 1257985500\nI0817 16:13:05.470568 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI0817 16:13:05.470580 17318 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI0817 16:13:05.470587 17318 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI0817 16:13:05.470595 17318 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI0817 16:13:05.470868 17318 net.cpp:150] Setting up L3_b1_cbr1_bn\nI0817 16:13:05.470882 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.470887 17318 net.cpp:165] Memory required for data: 1259009500\nI0817 16:13:05.470898 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:13:05.470911 17318 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI0817 16:13:05.470916 17318 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI0817 16:13:05.470924 17318 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.470989 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI0817 16:13:05.471185 17318 net.cpp:150] Setting up L3_b1_cbr1_scale\nI0817 16:13:05.471209 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.471217 17318 net.cpp:165] Memory required for data: 1260033500\nI0817 16:13:05.471235 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI0817 16:13:05.471248 17318 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI0817 16:13:05.471259 17318 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI0817 16:13:05.471276 17318 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI0817 16:13:05.471293 17318 net.cpp:150] Setting up L3_b1_cbr1_relu\nI0817 16:13:05.471307 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.471312 17318 net.cpp:165] Memory required for data: 1261057500\nI0817 16:13:05.471318 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI0817 16:13:05.471329 17318 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI0817 16:13:05.471336 17318 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI0817 16:13:05.471348 17318 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI0817 16:13:05.471840 17318 net.cpp:150] Setting up L3_b1_cbr2_conv\nI0817 16:13:05.471855 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.471860 17318 net.cpp:165] Memory required for data: 1262081500\nI0817 16:13:05.471869 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI0817 16:13:05.471885 17318 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI0817 16:13:05.471892 17318 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI0817 16:13:05.471900 17318 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI0817 16:13:05.472180 17318 net.cpp:150] Setting up L3_b1_cbr2_bn\nI0817 16:13:05.472200 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.472205 17318 net.cpp:165] Memory required for data: 1263105500\nI0817 16:13:05.472216 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:13:05.472225 17318 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI0817 16:13:05.472231 17318 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI0817 16:13:05.472239 17318 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI0817 16:13:05.472297 17318 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI0817 16:13:05.472460 17318 net.cpp:150] Setting up L3_b1_cbr2_scale\nI0817 16:13:05.472476 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.472481 17318 net.cpp:165] Memory required for data: 1264129500\nI0817 16:13:05.472491 17318 layer_factory.hpp:77] Creating layer L3_b1_pool\nI0817 16:13:05.472499 17318 net.cpp:100] Creating Layer L3_b1_pool\nI0817 16:13:05.472506 17318 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI0817 16:13:05.472517 17318 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI0817 16:13:05.472553 17318 net.cpp:150] Setting up L3_b1_pool\nI0817 16:13:05.472563 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.472566 17318 net.cpp:165] Memory required for data: 1265153500\nI0817 16:13:05.472571 17318 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI0817 16:13:05.472584 17318 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI0817 16:13:05.472589 17318 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI0817 16:13:05.472596 17318 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI0817 16:13:05.472604 17318 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI0817 16:13:05.472640 17318 net.cpp:150] Setting up L3_b1_sum_eltwise\nI0817 16:13:05.472651 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.472656 17318 net.cpp:165] Memory required for data: 1266177500\nI0817 16:13:05.472661 17318 layer_factory.hpp:77] Creating layer L3_b1_relu\nI0817 16:13:05.472669 17318 net.cpp:100] Creating Layer L3_b1_relu\nI0817 16:13:05.472676 17318 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI0817 16:13:05.472682 17318 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI0817 16:13:05.472692 17318 net.cpp:150] Setting up L3_b1_relu\nI0817 16:13:05.472698 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.472703 17318 net.cpp:165] Memory required for data: 1267201500\nI0817 16:13:05.472707 17318 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI0817 16:13:05.472717 17318 net.cpp:100] Creating Layer L3_b1_zeros\nI0817 16:13:05.472728 17318 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI0817 16:13:05.473942 17318 net.cpp:150] Setting up L3_b1_zeros\nI0817 16:13:05.473963 17318 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI0817 16:13:05.473973 17318 net.cpp:165] Memory required for data: 1268225500\nI0817 16:13:05.473980 17318 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI0817 16:13:05.473989 17318 net.cpp:100] Creating Layer L3_b1_concat0\nI0817 16:13:05.473996 17318 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI0817 16:13:05.474004 17318 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI0817 16:13:05.474011 17318 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI0817 16:13:05.474056 17318 net.cpp:150] Setting up L3_b1_concat0\nI0817 16:13:05.474069 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.474074 17318 net.cpp:165] Memory required for data: 1270273500\nI0817 16:13:05.474079 17318 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:13:05.474087 17318 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:13:05.474093 17318 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI0817 16:13:05.474107 17318 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:13:05.474117 17318 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:13:05.474174 17318 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI0817 16:13:05.474185 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.474200 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.474205 17318 net.cpp:165] Memory required for data: 1274369500\nI0817 16:13:05.474210 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI0817 16:13:05.474223 17318 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI0817 16:13:05.474231 17318 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI0817 16:13:05.474239 17318 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI0817 16:13:05.475299 17318 net.cpp:150] Setting up L3_b2_cbr1_conv\nI0817 16:13:05.475314 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.475320 17318 net.cpp:165] Memory required for data: 1276417500\nI0817 16:13:05.475329 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI0817 16:13:05.475342 17318 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI0817 16:13:05.475348 17318 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI0817 16:13:05.475358 17318 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI0817 16:13:05.475636 17318 net.cpp:150] Setting up L3_b2_cbr1_bn\nI0817 16:13:05.475649 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.475654 17318 net.cpp:165] Memory required for data: 1278465500\nI0817 16:13:05.475666 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:13:05.475674 17318 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI0817 16:13:05.475680 17318 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI0817 16:13:05.475692 17318 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.475751 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI0817 16:13:05.475915 17318 net.cpp:150] Setting up L3_b2_cbr1_scale\nI0817 16:13:05.475929 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.475934 17318 net.cpp:165] Memory required for data: 1280513500\nI0817 16:13:05.475942 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI0817 16:13:05.475950 17318 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI0817 16:13:05.475957 17318 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI0817 16:13:05.475971 17318 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI0817 16:13:05.475983 17318 net.cpp:150] Setting up L3_b2_cbr1_relu\nI0817 16:13:05.475991 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.475996 17318 net.cpp:165] Memory required for data: 1282561500\nI0817 16:13:05.476001 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI0817 16:13:05.476014 17318 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI0817 16:13:05.476020 17318 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI0817 16:13:05.476029 17318 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI0817 16:13:05.477084 17318 net.cpp:150] Setting up L3_b2_cbr2_conv\nI0817 16:13:05.477099 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.477104 17318 net.cpp:165] Memory required for data: 1284609500\nI0817 16:13:05.477113 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI0817 16:13:05.477125 17318 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI0817 16:13:05.477133 17318 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI0817 16:13:05.477140 17318 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI0817 16:13:05.477416 17318 net.cpp:150] Setting up L3_b2_cbr2_bn\nI0817 16:13:05.477429 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.477434 17318 net.cpp:165] Memory required for data: 1286657500\nI0817 16:13:05.477444 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:13:05.477457 17318 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI0817 16:13:05.477463 17318 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI0817 16:13:05.477470 17318 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI0817 16:13:05.477531 17318 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI0817 16:13:05.477695 17318 net.cpp:150] Setting up L3_b2_cbr2_scale\nI0817 16:13:05.477708 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.477713 17318 net.cpp:165] Memory required for data: 1288705500\nI0817 16:13:05.477730 17318 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI0817 16:13:05.477740 17318 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI0817 16:13:05.477746 17318 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI0817 16:13:05.477753 17318 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI0817 16:13:05.477763 17318 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI0817 16:13:05.477798 17318 net.cpp:150] Setting up L3_b2_sum_eltwise\nI0817 16:13:05.477808 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.477813 17318 net.cpp:165] Memory required for data: 1290753500\nI0817 16:13:05.477818 17318 layer_factory.hpp:77] Creating layer L3_b2_relu\nI0817 16:13:05.477829 17318 net.cpp:100] Creating Layer L3_b2_relu\nI0817 16:13:05.477835 17318 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI0817 16:13:05.477843 17318 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI0817 16:13:05.477852 17318 net.cpp:150] Setting up L3_b2_relu\nI0817 16:13:05.477859 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.477864 17318 net.cpp:165] Memory required for data: 1292801500\nI0817 16:13:05.477869 17318 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:13:05.477875 17318 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:13:05.477881 17318 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI0817 16:13:05.477888 17318 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:13:05.477898 17318 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:13:05.477947 17318 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI0817 16:13:05.477959 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.477972 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.477977 17318 net.cpp:165] Memory required for data: 1296897500\nI0817 16:13:05.477982 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI0817 16:13:05.477996 17318 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI0817 16:13:05.478003 17318 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI0817 16:13:05.478013 17318 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI0817 16:13:05.479063 17318 net.cpp:150] Setting up L3_b3_cbr1_conv\nI0817 16:13:05.479079 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.479084 17318 net.cpp:165] Memory required for data: 1298945500\nI0817 16:13:05.479094 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI0817 16:13:05.479105 17318 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI0817 16:13:05.479112 17318 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI0817 16:13:05.479122 17318 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI0817 16:13:05.479389 17318 net.cpp:150] Setting up L3_b3_cbr1_bn\nI0817 16:13:05.479403 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.479408 17318 net.cpp:165] Memory required for data: 1300993500\nI0817 16:13:05.479418 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:13:05.479426 17318 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI0817 16:13:05.479432 17318 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI0817 16:13:05.479444 17318 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.479501 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI0817 16:13:05.479662 17318 net.cpp:150] Setting up L3_b3_cbr1_scale\nI0817 16:13:05.479676 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.479681 17318 net.cpp:165] Memory required for data: 1303041500\nI0817 16:13:05.479691 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI0817 16:13:05.479698 17318 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI0817 16:13:05.479704 17318 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI0817 16:13:05.479713 17318 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI0817 16:13:05.479724 17318 net.cpp:150] Setting up L3_b3_cbr1_relu\nI0817 16:13:05.479738 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.479743 17318 net.cpp:165] Memory required for data: 1305089500\nI0817 16:13:05.479748 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI0817 16:13:05.479763 17318 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI0817 16:13:05.479769 17318 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI0817 16:13:05.479779 17318 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI0817 16:13:05.480825 17318 net.cpp:150] Setting up L3_b3_cbr2_conv\nI0817 16:13:05.480841 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.480846 17318 net.cpp:165] Memory required for data: 1307137500\nI0817 16:13:05.480855 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI0817 16:13:05.480867 17318 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI0817 16:13:05.480873 17318 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI0817 16:13:05.480882 17318 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI0817 16:13:05.481160 17318 net.cpp:150] Setting up L3_b3_cbr2_bn\nI0817 16:13:05.481174 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.481179 17318 net.cpp:165] Memory required for data: 1309185500\nI0817 16:13:05.481189 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:13:05.481201 17318 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI0817 16:13:05.481209 17318 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI0817 16:13:05.481220 17318 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI0817 16:13:05.481279 17318 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI0817 16:13:05.481443 17318 net.cpp:150] Setting up L3_b3_cbr2_scale\nI0817 16:13:05.481457 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.481462 17318 net.cpp:165] Memory required for data: 1311233500\nI0817 16:13:05.481472 17318 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI0817 16:13:05.481480 17318 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI0817 16:13:05.481487 17318 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI0817 16:13:05.481494 17318 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI0817 16:13:05.481504 17318 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI0817 16:13:05.481539 17318 net.cpp:150] Setting up L3_b3_sum_eltwise\nI0817 16:13:05.481550 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.481555 17318 net.cpp:165] Memory required for data: 1313281500\nI0817 16:13:05.481560 17318 layer_factory.hpp:77] Creating layer L3_b3_relu\nI0817 16:13:05.481570 17318 net.cpp:100] Creating Layer L3_b3_relu\nI0817 16:13:05.481577 17318 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI0817 16:13:05.481583 17318 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI0817 16:13:05.481593 17318 net.cpp:150] Setting up L3_b3_relu\nI0817 16:13:05.481600 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.481606 17318 net.cpp:165] Memory required for data: 1315329500\nI0817 16:13:05.481609 17318 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:13:05.481617 17318 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:13:05.481622 17318 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI0817 16:13:05.481631 17318 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:13:05.481639 17318 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:13:05.481693 17318 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI0817 16:13:05.481704 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.481710 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.481715 17318 net.cpp:165] Memory required for data: 1319425500\nI0817 16:13:05.481720 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI0817 16:13:05.481734 17318 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI0817 16:13:05.481741 17318 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI0817 16:13:05.481757 17318 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI0817 16:13:05.482803 17318 net.cpp:150] Setting up L3_b4_cbr1_conv\nI0817 16:13:05.482818 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.482823 17318 net.cpp:165] Memory required for data: 1321473500\nI0817 16:13:05.482832 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI0817 16:13:05.482844 17318 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI0817 16:13:05.482851 17318 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI0817 16:13:05.482861 17318 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI0817 16:13:05.483140 17318 net.cpp:150] Setting up L3_b4_cbr1_bn\nI0817 16:13:05.483155 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.483160 17318 net.cpp:165] Memory required for data: 1323521500\nI0817 16:13:05.483170 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:13:05.483178 17318 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI0817 16:13:05.483184 17318 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI0817 16:13:05.483196 17318 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.483253 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI0817 16:13:05.483414 17318 net.cpp:150] Setting up L3_b4_cbr1_scale\nI0817 16:13:05.483428 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.483433 17318 net.cpp:165] Memory required for data: 1325569500\nI0817 16:13:05.483443 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI0817 16:13:05.483450 17318 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI0817 16:13:05.483456 17318 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI0817 16:13:05.483466 17318 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI0817 16:13:05.483476 17318 net.cpp:150] Setting up L3_b4_cbr1_relu\nI0817 16:13:05.483484 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.483489 17318 net.cpp:165] Memory required for data: 1327617500\nI0817 16:13:05.483494 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI0817 16:13:05.483507 17318 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI0817 16:13:05.483515 17318 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI0817 16:13:05.483525 17318 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI0817 16:13:05.485563 17318 net.cpp:150] Setting up L3_b4_cbr2_conv\nI0817 16:13:05.485581 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.485586 17318 net.cpp:165] Memory required for data: 1329665500\nI0817 16:13:05.485596 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI0817 16:13:05.485611 17318 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI0817 16:13:05.485620 17318 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI0817 16:13:05.485630 17318 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI0817 16:13:05.485903 17318 net.cpp:150] Setting up L3_b4_cbr2_bn\nI0817 16:13:05.485918 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.485924 17318 net.cpp:165] Memory required for data: 1331713500\nI0817 16:13:05.485934 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:13:05.485942 17318 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI0817 16:13:05.485949 17318 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI0817 16:13:05.485960 17318 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI0817 16:13:05.486026 17318 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI0817 16:13:05.486184 17318 net.cpp:150] Setting up L3_b4_cbr2_scale\nI0817 16:13:05.486197 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.486203 17318 net.cpp:165] Memory required for data: 1333761500\nI0817 16:13:05.486212 17318 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI0817 16:13:05.486222 17318 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI0817 16:13:05.486227 17318 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI0817 16:13:05.486234 17318 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI0817 16:13:05.486245 17318 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI0817 16:13:05.486290 17318 net.cpp:150] Setting up L3_b4_sum_eltwise\nI0817 16:13:05.486302 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.486308 17318 net.cpp:165] Memory required for data: 1335809500\nI0817 16:13:05.486313 17318 layer_factory.hpp:77] Creating layer L3_b4_relu\nI0817 16:13:05.486321 17318 net.cpp:100] Creating Layer L3_b4_relu\nI0817 16:13:05.486327 17318 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI0817 16:13:05.486337 17318 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI0817 16:13:05.486347 17318 net.cpp:150] Setting up L3_b4_relu\nI0817 16:13:05.486354 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.486359 17318 net.cpp:165] Memory required for data: 1337857500\nI0817 16:13:05.486363 17318 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:13:05.486371 17318 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:13:05.486377 17318 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI0817 16:13:05.486384 17318 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:13:05.486394 17318 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:13:05.486443 17318 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI0817 16:13:05.486455 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.486462 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.486467 17318 net.cpp:165] Memory required for data: 1341953500\nI0817 16:13:05.486472 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI0817 16:13:05.486484 17318 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI0817 16:13:05.486490 17318 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI0817 16:13:05.486501 17318 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI0817 16:13:05.487534 17318 net.cpp:150] Setting up L3_b5_cbr1_conv\nI0817 16:13:05.487548 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.487555 17318 net.cpp:165] Memory required for data: 1344001500\nI0817 16:13:05.487563 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI0817 16:13:05.487572 17318 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI0817 16:13:05.487579 17318 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI0817 16:13:05.487591 17318 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI0817 16:13:05.487874 17318 net.cpp:150] Setting up L3_b5_cbr1_bn\nI0817 16:13:05.487886 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.487891 17318 net.cpp:165] Memory required for data: 1346049500\nI0817 16:13:05.487902 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:13:05.487910 17318 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI0817 16:13:05.487917 17318 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI0817 16:13:05.487924 17318 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.487990 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI0817 16:13:05.488153 17318 net.cpp:150] Setting up L3_b5_cbr1_scale\nI0817 16:13:05.488170 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.488176 17318 net.cpp:165] Memory required for data: 1348097500\nI0817 16:13:05.488185 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI0817 16:13:05.488193 17318 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI0817 16:13:05.488200 17318 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI0817 16:13:05.488207 17318 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI0817 16:13:05.488217 17318 net.cpp:150] Setting up L3_b5_cbr1_relu\nI0817 16:13:05.488224 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.488229 17318 net.cpp:165] Memory required for data: 1350145500\nI0817 16:13:05.488234 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI0817 16:13:05.488247 17318 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI0817 16:13:05.488255 17318 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI0817 16:13:05.488270 17318 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI0817 16:13:05.489307 17318 net.cpp:150] Setting up L3_b5_cbr2_conv\nI0817 16:13:05.489322 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.489327 17318 net.cpp:165] Memory required for data: 1352193500\nI0817 16:13:05.489336 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI0817 16:13:05.489348 17318 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI0817 16:13:05.489356 17318 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI0817 16:13:05.489367 17318 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI0817 16:13:05.489635 17318 net.cpp:150] Setting up L3_b5_cbr2_bn\nI0817 16:13:05.489648 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.489655 17318 net.cpp:165] Memory required for data: 1354241500\nI0817 16:13:05.489665 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:13:05.489672 17318 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI0817 16:13:05.489679 17318 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI0817 16:13:05.489691 17318 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI0817 16:13:05.489750 17318 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI0817 16:13:05.489913 17318 net.cpp:150] Setting up L3_b5_cbr2_scale\nI0817 16:13:05.489928 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.489933 17318 net.cpp:165] Memory required for data: 1356289500\nI0817 16:13:05.489941 17318 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI0817 16:13:05.489953 17318 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI0817 16:13:05.489960 17318 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI0817 16:13:05.489971 17318 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI0817 16:13:05.489980 17318 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI0817 16:13:05.490018 17318 net.cpp:150] Setting up L3_b5_sum_eltwise\nI0817 16:13:05.490030 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.490034 17318 net.cpp:165] Memory required for data: 1358337500\nI0817 16:13:05.490041 17318 layer_factory.hpp:77] Creating layer L3_b5_relu\nI0817 16:13:05.490069 17318 net.cpp:100] Creating Layer L3_b5_relu\nI0817 16:13:05.490079 17318 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI0817 16:13:05.490089 17318 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI0817 16:13:05.490100 17318 net.cpp:150] Setting up L3_b5_relu\nI0817 16:13:05.490108 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.490113 17318 net.cpp:165] Memory required for data: 1360385500\nI0817 16:13:05.490118 17318 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:13:05.490125 17318 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:13:05.490130 17318 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI0817 16:13:05.490139 17318 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:13:05.490149 17318 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:13:05.490200 17318 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI0817 16:13:05.490211 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.490218 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.490223 17318 net.cpp:165] Memory required for data: 1364481500\nI0817 16:13:05.490228 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI0817 16:13:05.490239 17318 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI0817 16:13:05.490245 17318 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI0817 16:13:05.490257 17318 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI0817 16:13:05.491312 17318 net.cpp:150] Setting up L3_b6_cbr1_conv\nI0817 16:13:05.491328 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.491333 17318 net.cpp:165] Memory required for data: 1366529500\nI0817 16:13:05.491349 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI0817 16:13:05.491364 17318 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI0817 16:13:05.491371 17318 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI0817 16:13:05.491380 17318 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI0817 16:13:05.491658 17318 net.cpp:150] Setting up L3_b6_cbr1_bn\nI0817 16:13:05.491672 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.491677 17318 net.cpp:165] Memory required for data: 1368577500\nI0817 16:13:05.491688 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:13:05.491696 17318 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI0817 16:13:05.491703 17318 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI0817 16:13:05.491709 17318 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.491771 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI0817 16:13:05.491932 17318 net.cpp:150] Setting up L3_b6_cbr1_scale\nI0817 16:13:05.491948 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.491953 17318 net.cpp:165] Memory required for data: 1370625500\nI0817 16:13:05.491963 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI0817 16:13:05.491976 17318 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI0817 16:13:05.491983 17318 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI0817 16:13:05.491991 17318 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI0817 16:13:05.492000 17318 net.cpp:150] Setting up L3_b6_cbr1_relu\nI0817 16:13:05.492008 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.492013 17318 net.cpp:165] Memory required for data: 1372673500\nI0817 16:13:05.492018 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI0817 16:13:05.492035 17318 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI0817 16:13:05.492041 17318 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI0817 16:13:05.492050 17318 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI0817 16:13:05.493084 17318 net.cpp:150] Setting up L3_b6_cbr2_conv\nI0817 16:13:05.493099 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.493105 17318 net.cpp:165] Memory required for data: 1374721500\nI0817 16:13:05.493113 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI0817 16:13:05.493124 17318 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI0817 16:13:05.493129 17318 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI0817 16:13:05.493140 17318 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI0817 16:13:05.493412 17318 net.cpp:150] Setting up L3_b6_cbr2_bn\nI0817 16:13:05.493425 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.493430 17318 net.cpp:165] Memory required for data: 1376769500\nI0817 16:13:05.493441 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:13:05.493450 17318 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI0817 16:13:05.493456 17318 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI0817 16:13:05.493466 17318 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI0817 16:13:05.493526 17318 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI0817 16:13:05.493690 17318 net.cpp:150] Setting up L3_b6_cbr2_scale\nI0817 16:13:05.493703 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.493708 17318 net.cpp:165] Memory required for data: 1378817500\nI0817 16:13:05.493717 17318 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI0817 16:13:05.493729 17318 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI0817 16:13:05.493736 17318 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI0817 16:13:05.493743 17318 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI0817 16:13:05.493751 17318 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI0817 16:13:05.493788 17318 net.cpp:150] Setting up L3_b6_sum_eltwise\nI0817 16:13:05.493799 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.493804 17318 net.cpp:165] Memory required for data: 1380865500\nI0817 16:13:05.493809 17318 layer_factory.hpp:77] Creating layer L3_b6_relu\nI0817 16:13:05.493818 17318 net.cpp:100] Creating Layer L3_b6_relu\nI0817 16:13:05.493830 17318 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI0817 16:13:05.493840 17318 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI0817 16:13:05.493850 17318 net.cpp:150] Setting up L3_b6_relu\nI0817 16:13:05.493857 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.493862 17318 net.cpp:165] Memory required for data: 1382913500\nI0817 16:13:05.493866 17318 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:13:05.493875 17318 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:13:05.493880 17318 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI0817 16:13:05.493886 17318 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:13:05.493896 17318 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:13:05.493947 17318 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI0817 16:13:05.493958 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.493969 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.493975 17318 net.cpp:165] Memory required for data: 1387009500\nI0817 16:13:05.493980 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI0817 16:13:05.493993 17318 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI0817 16:13:05.493999 17318 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI0817 16:13:05.494012 17318 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI0817 16:13:05.495039 17318 net.cpp:150] Setting up L3_b7_cbr1_conv\nI0817 16:13:05.495055 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.495060 17318 net.cpp:165] Memory required for data: 1389057500\nI0817 16:13:05.495069 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI0817 16:13:05.495081 17318 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI0817 16:13:05.495088 17318 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI0817 16:13:05.495096 17318 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI0817 16:13:05.495373 17318 net.cpp:150] Setting up L3_b7_cbr1_bn\nI0817 16:13:05.495386 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.495391 17318 net.cpp:165] Memory required for data: 1391105500\nI0817 16:13:05.495401 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:13:05.495410 17318 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI0817 16:13:05.495416 17318 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI0817 16:13:05.495424 17318 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.495484 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI0817 16:13:05.495643 17318 net.cpp:150] Setting up L3_b7_cbr1_scale\nI0817 16:13:05.495658 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.495663 17318 net.cpp:165] Memory required for data: 1393153500\nI0817 16:13:05.495673 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI0817 16:13:05.495709 17318 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI0817 16:13:05.495718 17318 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI0817 16:13:05.495726 17318 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI0817 16:13:05.495738 17318 net.cpp:150] Setting up L3_b7_cbr1_relu\nI0817 16:13:05.495744 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.495749 17318 net.cpp:165] Memory required for data: 1395201500\nI0817 16:13:05.495754 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI0817 16:13:05.495769 17318 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI0817 16:13:05.495775 17318 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI0817 16:13:05.495784 17318 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI0817 16:13:05.496820 17318 net.cpp:150] Setting up L3_b7_cbr2_conv\nI0817 16:13:05.496835 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.496840 17318 net.cpp:165] Memory required for data: 1397249500\nI0817 16:13:05.496850 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI0817 16:13:05.496865 17318 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI0817 16:13:05.496872 17318 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI0817 16:13:05.496884 17318 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI0817 16:13:05.497174 17318 net.cpp:150] Setting up L3_b7_cbr2_bn\nI0817 16:13:05.497187 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.497192 17318 net.cpp:165] Memory required for data: 1399297500\nI0817 16:13:05.497203 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:13:05.497212 17318 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI0817 16:13:05.497218 17318 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI0817 16:13:05.497226 17318 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI0817 16:13:05.497288 17318 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI0817 16:13:05.497445 17318 net.cpp:150] Setting up L3_b7_cbr2_scale\nI0817 16:13:05.497460 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.497467 17318 net.cpp:165] Memory required for data: 1401345500\nI0817 16:13:05.497475 17318 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI0817 16:13:05.497484 17318 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI0817 16:13:05.497491 17318 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI0817 16:13:05.497498 17318 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI0817 16:13:05.497506 17318 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI0817 16:13:05.497544 17318 net.cpp:150] Setting up L3_b7_sum_eltwise\nI0817 16:13:05.497555 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.497560 17318 net.cpp:165] Memory required for data: 1403393500\nI0817 16:13:05.497565 17318 layer_factory.hpp:77] Creating layer L3_b7_relu\nI0817 16:13:05.497573 17318 net.cpp:100] Creating Layer L3_b7_relu\nI0817 16:13:05.497579 17318 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI0817 16:13:05.497586 17318 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI0817 16:13:05.497596 17318 net.cpp:150] Setting up L3_b7_relu\nI0817 16:13:05.497603 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.497607 17318 net.cpp:165] Memory required for data: 1405441500\nI0817 16:13:05.497612 17318 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:13:05.497619 17318 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:13:05.497624 17318 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI0817 16:13:05.497634 17318 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:13:05.497645 17318 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:13:05.497694 17318 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI0817 16:13:05.497707 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.497714 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.497720 17318 net.cpp:165] Memory required for data: 1409537500\nI0817 16:13:05.497725 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI0817 16:13:05.497736 17318 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI0817 16:13:05.497742 17318 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI0817 16:13:05.497751 17318 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI0817 16:13:05.499748 17318 net.cpp:150] Setting up L3_b8_cbr1_conv\nI0817 16:13:05.499766 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.499771 17318 net.cpp:165] Memory required for data: 1411585500\nI0817 16:13:05.499781 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI0817 16:13:05.499794 17318 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI0817 16:13:05.499800 17318 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI0817 16:13:05.499809 17318 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI0817 16:13:05.500097 17318 net.cpp:150] Setting up L3_b8_cbr1_bn\nI0817 16:13:05.500118 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.500123 17318 net.cpp:165] Memory required for data: 1413633500\nI0817 16:13:05.500134 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:13:05.500144 17318 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI0817 16:13:05.500149 17318 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI0817 16:13:05.500157 17318 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.500223 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI0817 16:13:05.500388 17318 net.cpp:150] Setting up L3_b8_cbr1_scale\nI0817 16:13:05.500402 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.500407 17318 net.cpp:165] Memory required for data: 1415681500\nI0817 16:13:05.500416 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI0817 16:13:05.500424 17318 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI0817 16:13:05.500430 17318 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI0817 16:13:05.500442 17318 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI0817 16:13:05.500452 17318 net.cpp:150] Setting up L3_b8_cbr1_relu\nI0817 16:13:05.500458 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.500463 17318 net.cpp:165] Memory required for data: 1417729500\nI0817 16:13:05.500468 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI0817 16:13:05.500483 17318 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI0817 16:13:05.500488 17318 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI0817 16:13:05.500496 17318 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI0817 16:13:05.501533 17318 net.cpp:150] Setting up L3_b8_cbr2_conv\nI0817 16:13:05.501549 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.501554 17318 net.cpp:165] Memory required for data: 1419777500\nI0817 16:13:05.501564 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI0817 16:13:05.501575 17318 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI0817 16:13:05.501581 17318 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI0817 16:13:05.501590 17318 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI0817 16:13:05.501865 17318 net.cpp:150] Setting up L3_b8_cbr2_bn\nI0817 16:13:05.501879 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.501884 17318 net.cpp:165] Memory required for data: 1421825500\nI0817 16:13:05.501895 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:13:05.501906 17318 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI0817 16:13:05.501914 17318 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI0817 16:13:05.501920 17318 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI0817 16:13:05.501987 17318 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI0817 16:13:05.502151 17318 net.cpp:150] Setting up L3_b8_cbr2_scale\nI0817 16:13:05.502164 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.502169 17318 net.cpp:165] Memory required for data: 1423873500\nI0817 16:13:05.502179 17318 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI0817 16:13:05.502194 17318 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI0817 16:13:05.502202 17318 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI0817 16:13:05.502208 17318 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI0817 16:13:05.502218 17318 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI0817 16:13:05.502254 17318 net.cpp:150] Setting up L3_b8_sum_eltwise\nI0817 16:13:05.502264 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.502270 17318 net.cpp:165] Memory required for data: 1425921500\nI0817 16:13:05.502275 17318 layer_factory.hpp:77] Creating layer L3_b8_relu\nI0817 16:13:05.502287 17318 net.cpp:100] Creating Layer L3_b8_relu\nI0817 16:13:05.502293 17318 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI0817 16:13:05.502301 17318 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI0817 16:13:05.502310 17318 net.cpp:150] Setting up L3_b8_relu\nI0817 16:13:05.502317 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.502322 17318 net.cpp:165] Memory required for data: 1427969500\nI0817 16:13:05.502333 17318 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:13:05.502341 17318 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:13:05.502347 17318 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI0817 16:13:05.502354 17318 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:13:05.502364 17318 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:13:05.502415 17318 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI0817 16:13:05.502427 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.502434 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.502439 17318 net.cpp:165] Memory required for data: 1432065500\nI0817 16:13:05.502444 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI0817 16:13:05.502457 17318 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI0817 16:13:05.502465 17318 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI0817 16:13:05.502473 17318 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI0817 16:13:05.503505 17318 net.cpp:150] Setting up L3_b9_cbr1_conv\nI0817 16:13:05.503520 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.503526 17318 net.cpp:165] Memory required for data: 1434113500\nI0817 16:13:05.503535 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI0817 16:13:05.503547 17318 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI0817 16:13:05.503554 17318 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI0817 16:13:05.503566 17318 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI0817 16:13:05.503840 17318 net.cpp:150] Setting up L3_b9_cbr1_bn\nI0817 16:13:05.503854 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.503859 17318 net.cpp:165] Memory required for data: 1436161500\nI0817 16:13:05.503870 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:13:05.503878 17318 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI0817 16:13:05.503885 17318 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI0817 16:13:05.503896 17318 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.503957 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI0817 16:13:05.504124 17318 net.cpp:150] Setting up L3_b9_cbr1_scale\nI0817 16:13:05.504138 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.504143 17318 net.cpp:165] Memory required for data: 1438209500\nI0817 16:13:05.504153 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI0817 16:13:05.504160 17318 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI0817 16:13:05.504168 17318 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI0817 16:13:05.504179 17318 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI0817 16:13:05.504189 17318 net.cpp:150] Setting up L3_b9_cbr1_relu\nI0817 16:13:05.504196 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.504201 17318 net.cpp:165] Memory required for data: 1440257500\nI0817 16:13:05.504206 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI0817 16:13:05.504220 17318 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI0817 16:13:05.504226 17318 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI0817 16:13:05.504235 17318 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI0817 16:13:05.505269 17318 net.cpp:150] Setting up L3_b9_cbr2_conv\nI0817 16:13:05.505283 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.505290 17318 net.cpp:165] Memory required for data: 1442305500\nI0817 16:13:05.505297 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI0817 16:13:05.505311 17318 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI0817 16:13:05.505316 17318 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI0817 16:13:05.505326 17318 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI0817 16:13:05.505604 17318 net.cpp:150] Setting up L3_b9_cbr2_bn\nI0817 16:13:05.505616 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.505628 17318 net.cpp:165] Memory required for data: 1444353500\nI0817 16:13:05.505640 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:13:05.505650 17318 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI0817 16:13:05.505657 17318 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI0817 16:13:05.505666 17318 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI0817 16:13:05.505728 17318 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI0817 16:13:05.505895 17318 net.cpp:150] Setting up L3_b9_cbr2_scale\nI0817 16:13:05.505909 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.505914 17318 net.cpp:165] Memory required for data: 1446401500\nI0817 16:13:05.505923 17318 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI0817 16:13:05.505933 17318 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI0817 16:13:05.505939 17318 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI0817 16:13:05.505946 17318 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI0817 16:13:05.505957 17318 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI0817 16:13:05.505997 17318 net.cpp:150] Setting up L3_b9_sum_eltwise\nI0817 16:13:05.506009 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.506014 17318 net.cpp:165] Memory required for data: 1448449500\nI0817 16:13:05.506019 17318 layer_factory.hpp:77] Creating layer L3_b9_relu\nI0817 16:13:05.506031 17318 net.cpp:100] Creating Layer L3_b9_relu\nI0817 16:13:05.506036 17318 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI0817 16:13:05.506044 17318 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI0817 16:13:05.506053 17318 net.cpp:150] Setting up L3_b9_relu\nI0817 16:13:05.506060 17318 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI0817 16:13:05.506065 17318 net.cpp:165] Memory required for data: 1450497500\nI0817 16:13:05.506070 17318 layer_factory.hpp:77] Creating layer post_pool\nI0817 16:13:05.506078 17318 net.cpp:100] Creating Layer post_pool\nI0817 16:13:05.506083 17318 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI0817 16:13:05.506091 17318 net.cpp:408] post_pool -> post_pool\nI0817 16:13:05.506130 17318 net.cpp:150] Setting up post_pool\nI0817 16:13:05.506142 17318 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI0817 16:13:05.506148 17318 net.cpp:165] Memory required for data: 1450529500\nI0817 16:13:05.506153 17318 layer_factory.hpp:77] Creating layer post_FC\nI0817 16:13:05.506165 17318 net.cpp:100] Creating Layer post_FC\nI0817 16:13:05.506171 17318 net.cpp:434] post_FC <- post_pool\nI0817 16:13:05.506180 17318 net.cpp:408] post_FC -> post_FC_top\nI0817 16:13:05.506397 17318 net.cpp:150] Setting up post_FC\nI0817 16:13:05.506412 17318 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:13:05.506417 17318 net.cpp:165] Memory required for data: 1450579500\nI0817 16:13:05.506425 17318 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI0817 16:13:05.506433 17318 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI0817 16:13:05.506439 17318 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI0817 16:13:05.506450 17318 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI0817 16:13:05.506461 17318 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI0817 16:13:05.506510 17318 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI0817 16:13:05.506523 17318 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:13:05.506531 17318 net.cpp:157] Top shape: 125 100 (12500)\nI0817 16:13:05.506536 17318 net.cpp:165] Memory required for data: 1450679500\nI0817 16:13:05.506541 17318 layer_factory.hpp:77] Creating layer accuracy\nI0817 16:13:05.506548 17318 net.cpp:100] Creating Layer accuracy\nI0817 16:13:05.506554 17318 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI0817 16:13:05.506561 17318 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI0817 16:13:05.506568 17318 net.cpp:408] accuracy -> accuracy\nI0817 16:13:05.506582 17318 net.cpp:150] Setting up accuracy\nI0817 16:13:05.506588 17318 net.cpp:157] Top shape: (1)\nI0817 16:13:05.506599 17318 net.cpp:165] Memory required for data: 1450679504\nI0817 16:13:05.506605 17318 layer_factory.hpp:77] Creating layer loss\nI0817 16:13:05.506613 17318 net.cpp:100] Creating Layer loss\nI0817 16:13:05.506619 17318 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI0817 16:13:05.506625 17318 net.cpp:434] loss <- label_dataLayer_1_split_1\nI0817 16:13:05.506633 17318 net.cpp:408] loss -> loss\nI0817 16:13:05.506645 17318 layer_factory.hpp:77] Creating layer loss\nI0817 16:13:05.506953 17318 net.cpp:150] Setting up loss\nI0817 16:13:05.506971 17318 net.cpp:157] Top shape: (1)\nI0817 16:13:05.506978 17318 net.cpp:160]     with loss weight 1\nI0817 16:13:05.506994 17318 net.cpp:165] Memory required for data: 1450679508\nI0817 16:13:05.507001 17318 net.cpp:226] loss needs backward computation.\nI0817 16:13:05.507007 17318 net.cpp:228] accuracy does not need backward computation.\nI0817 16:13:05.507014 17318 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI0817 16:13:05.507019 17318 net.cpp:226] post_FC needs backward computation.\nI0817 16:13:05.507024 17318 net.cpp:226] post_pool needs backward computation.\nI0817 16:13:05.507028 17318 net.cpp:226] L3_b9_relu needs backward computation.\nI0817 16:13:05.507033 17318 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI0817 16:13:05.507038 17318 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI0817 16:13:05.507043 17318 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI0817 16:13:05.507048 17318 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI0817 16:13:05.507053 17318 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI0817 16:13:05.507058 17318 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI0817 16:13:05.507062 17318 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI0817 16:13:05.507067 17318 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI0817 16:13:05.507072 17318 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI0817 16:13:05.507078 17318 net.cpp:226] L3_b8_relu needs backward computation.\nI0817 16:13:05.507083 17318 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI0817 16:13:05.507088 17318 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI0817 16:13:05.507093 17318 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI0817 16:13:05.507098 17318 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI0817 16:13:05.507103 17318 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI0817 16:13:05.507108 17318 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI0817 16:13:05.507113 17318 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI0817 16:13:05.507118 17318 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI0817 16:13:05.507123 17318 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI0817 16:13:05.507129 17318 net.cpp:226] L3_b7_relu needs backward computation.\nI0817 16:13:05.507134 17318 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI0817 16:13:05.507139 17318 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI0817 16:13:05.507144 17318 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI0817 16:13:05.507149 17318 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI0817 16:13:05.507154 17318 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI0817 16:13:05.507159 17318 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI0817 16:13:05.507164 17318 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI0817 16:13:05.507169 17318 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI0817 16:13:05.507174 17318 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI0817 16:13:05.507179 17318 net.cpp:226] L3_b6_relu needs backward computation.\nI0817 16:13:05.507185 17318 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI0817 16:13:05.507190 17318 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI0817 16:13:05.507195 17318 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI0817 16:13:05.507207 17318 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI0817 16:13:05.507213 17318 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI0817 16:13:05.507218 17318 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI0817 16:13:05.507223 17318 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI0817 16:13:05.507228 17318 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI0817 16:13:05.507235 17318 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI0817 16:13:05.507239 17318 net.cpp:226] L3_b5_relu needs backward computation.\nI0817 16:13:05.507244 17318 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI0817 16:13:05.507249 17318 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI0817 16:13:05.507256 17318 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI0817 16:13:05.507263 17318 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI0817 16:13:05.507268 17318 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI0817 16:13:05.507274 17318 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI0817 16:13:05.507279 17318 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI0817 16:13:05.507284 17318 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI0817 16:13:05.507289 17318 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI0817 16:13:05.507295 17318 net.cpp:226] L3_b4_relu needs backward computation.\nI0817 16:13:05.507300 17318 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI0817 16:13:05.507305 17318 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI0817 16:13:05.507311 17318 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI0817 16:13:05.507316 17318 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI0817 16:13:05.507321 17318 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI0817 16:13:05.507326 17318 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI0817 16:13:05.507331 17318 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI0817 16:13:05.507336 17318 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI0817 16:13:05.507342 17318 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI0817 16:13:05.507347 17318 net.cpp:226] L3_b3_relu needs backward computation.\nI0817 16:13:05.507352 17318 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI0817 16:13:05.507357 17318 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI0817 16:13:05.507362 17318 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI0817 16:13:05.507369 17318 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI0817 16:13:05.507374 17318 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI0817 16:13:05.507378 17318 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI0817 16:13:05.507383 17318 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI0817 16:13:05.507388 17318 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI0817 16:13:05.507393 17318 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI0817 16:13:05.507400 17318 net.cpp:226] L3_b2_relu needs backward computation.\nI0817 16:13:05.507405 17318 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI0817 16:13:05.507411 17318 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI0817 16:13:05.507416 17318 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI0817 16:13:05.507421 17318 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI0817 16:13:05.507426 17318 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI0817 16:13:05.507431 17318 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI0817 16:13:05.507436 17318 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI0817 16:13:05.507441 17318 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI0817 16:13:05.507447 17318 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI0817 16:13:05.507452 17318 net.cpp:226] L3_b1_concat0 needs backward computation.\nI0817 16:13:05.507463 17318 net.cpp:228] L3_b1_zeros does not need backward computation.\nI0817 16:13:05.507469 17318 net.cpp:226] L3_b1_relu needs backward computation.\nI0817 16:13:05.507474 17318 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI0817 16:13:05.507480 17318 net.cpp:226] L3_b1_pool needs backward computation.\nI0817 16:13:05.507486 17318 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI0817 16:13:05.507491 17318 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI0817 16:13:05.507496 17318 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI0817 16:13:05.507503 17318 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI0817 16:13:05.507508 17318 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI0817 16:13:05.507513 17318 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI0817 16:13:05.507517 17318 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI0817 16:13:05.507524 17318 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI0817 16:13:05.507535 17318 net.cpp:226] L2_b9_relu needs backward computation.\nI0817 16:13:05.507540 17318 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI0817 16:13:05.507546 17318 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI0817 16:13:05.507552 17318 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI0817 16:13:05.507557 17318 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI0817 16:13:05.507562 17318 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI0817 16:13:05.507567 17318 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI0817 16:13:05.507573 17318 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI0817 16:13:05.507578 17318 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI0817 16:13:05.507583 17318 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI0817 16:13:05.507589 17318 net.cpp:226] L2_b8_relu needs backward computation.\nI0817 16:13:05.507594 17318 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI0817 16:13:05.507601 17318 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI0817 16:13:05.507606 17318 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI0817 16:13:05.507611 17318 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI0817 16:13:05.507616 17318 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI0817 16:13:05.507622 17318 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI0817 16:13:05.507627 17318 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI0817 16:13:05.507632 17318 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI0817 16:13:05.507637 17318 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI0817 16:13:05.507643 17318 net.cpp:226] L2_b7_relu needs backward computation.\nI0817 16:13:05.507648 17318 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI0817 16:13:05.507654 17318 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI0817 16:13:05.507659 17318 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI0817 16:13:05.507665 17318 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI0817 16:13:05.507670 17318 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI0817 16:13:05.507676 17318 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI0817 16:13:05.507681 17318 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI0817 16:13:05.507686 17318 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI0817 16:13:05.507692 17318 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI0817 16:13:05.507697 17318 net.cpp:226] L2_b6_relu needs backward computation.\nI0817 16:13:05.507702 17318 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI0817 16:13:05.507709 17318 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI0817 16:13:05.507714 17318 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI0817 16:13:05.507719 17318 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI0817 16:13:05.507730 17318 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI0817 16:13:05.507735 17318 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI0817 16:13:05.507740 17318 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI0817 16:13:05.507745 17318 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI0817 16:13:05.507751 17318 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI0817 16:13:05.507757 17318 net.cpp:226] L2_b5_relu needs backward computation.\nI0817 16:13:05.507762 17318 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI0817 16:13:05.507768 17318 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI0817 16:13:05.507773 17318 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI0817 16:13:05.507779 17318 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI0817 16:13:05.507784 17318 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI0817 16:13:05.507789 17318 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI0817 16:13:05.507796 17318 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI0817 16:13:05.507800 17318 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI0817 16:13:05.507807 17318 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI0817 16:13:05.507812 17318 net.cpp:226] L2_b4_relu needs backward computation.\nI0817 16:13:05.507817 17318 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI0817 16:13:05.507823 17318 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI0817 16:13:05.507828 17318 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI0817 16:13:05.507834 17318 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI0817 16:13:05.507840 17318 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI0817 16:13:05.507845 17318 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI0817 16:13:05.507850 17318 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI0817 16:13:05.507856 17318 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI0817 16:13:05.507863 17318 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI0817 16:13:05.507869 17318 net.cpp:226] L2_b3_relu needs backward computation.\nI0817 16:13:05.507874 17318 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI0817 16:13:05.507879 17318 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI0817 16:13:05.507884 17318 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI0817 16:13:05.507890 17318 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI0817 16:13:05.507895 17318 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI0817 16:13:05.507901 17318 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI0817 16:13:05.507906 17318 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI0817 16:13:05.507912 17318 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI0817 16:13:05.507917 17318 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI0817 16:13:05.507927 17318 net.cpp:226] L2_b2_relu needs backward computation.\nI0817 16:13:05.507935 17318 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI0817 16:13:05.507941 17318 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI0817 16:13:05.507946 17318 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI0817 16:13:05.507952 17318 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI0817 16:13:05.507957 17318 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI0817 16:13:05.507963 17318 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI0817 16:13:05.507974 17318 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI0817 16:13:05.507982 17318 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI0817 16:13:05.507987 17318 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI0817 16:13:05.507993 17318 net.cpp:226] L2_b1_concat0 needs backward computation.\nI0817 16:13:05.507999 17318 net.cpp:228] L2_b1_zeros does not need backward computation.\nI0817 16:13:05.508010 17318 net.cpp:226] L2_b1_relu needs backward computation.\nI0817 16:13:05.508016 17318 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI0817 16:13:05.508023 17318 net.cpp:226] L2_b1_pool needs backward computation.\nI0817 16:13:05.508028 17318 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI0817 16:13:05.508034 17318 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI0817 16:13:05.508040 17318 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI0817 16:13:05.508046 17318 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI0817 16:13:05.508051 17318 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI0817 16:13:05.508057 17318 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI0817 16:13:05.508062 17318 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI0817 16:13:05.508069 17318 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI0817 16:13:05.508074 17318 net.cpp:226] L1_b9_relu needs backward computation.\nI0817 16:13:05.508080 17318 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI0817 16:13:05.508087 17318 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI0817 16:13:05.508092 17318 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI0817 16:13:05.508098 17318 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI0817 16:13:05.508103 17318 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI0817 16:13:05.508110 17318 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI0817 16:13:05.508114 17318 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI0817 16:13:05.508121 17318 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI0817 16:13:05.508126 17318 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI0817 16:13:05.508132 17318 net.cpp:226] L1_b8_relu needs backward computation.\nI0817 16:13:05.508138 17318 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI0817 16:13:05.508144 17318 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI0817 16:13:05.508149 17318 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI0817 16:13:05.508155 17318 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI0817 16:13:05.508162 17318 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI0817 16:13:05.508167 17318 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI0817 16:13:05.508172 17318 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI0817 16:13:05.508178 17318 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI0817 16:13:05.508184 17318 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI0817 16:13:05.508190 17318 net.cpp:226] L1_b7_relu needs backward computation.\nI0817 16:13:05.508196 17318 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI0817 16:13:05.508203 17318 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI0817 16:13:05.508208 17318 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI0817 16:13:05.508214 17318 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI0817 16:13:05.508220 17318 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI0817 16:13:05.508225 17318 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI0817 16:13:05.508231 17318 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI0817 16:13:05.508237 17318 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI0817 16:13:05.508244 17318 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI0817 16:13:05.508249 17318 net.cpp:226] L1_b6_relu needs backward computation.\nI0817 16:13:05.508255 17318 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI0817 16:13:05.508260 17318 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI0817 16:13:05.508266 17318 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI0817 16:13:05.508272 17318 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI0817 16:13:05.508278 17318 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI0817 16:13:05.508283 17318 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI0817 16:13:05.508294 17318 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI0817 16:13:05.508301 17318 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI0817 16:13:05.508306 17318 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI0817 16:13:05.508312 17318 net.cpp:226] L1_b5_relu needs backward computation.\nI0817 16:13:05.508318 17318 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI0817 16:13:05.508324 17318 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI0817 16:13:05.508330 17318 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI0817 16:13:05.508337 17318 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI0817 16:13:05.508342 17318 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI0817 16:13:05.508347 17318 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI0817 16:13:05.508352 17318 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI0817 16:13:05.508358 17318 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI0817 16:13:05.508364 17318 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI0817 16:13:05.508370 17318 net.cpp:226] L1_b4_relu needs backward computation.\nI0817 16:13:05.508375 17318 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI0817 16:13:05.508383 17318 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI0817 16:13:05.508388 17318 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI0817 16:13:05.508394 17318 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI0817 16:13:05.508399 17318 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI0817 16:13:05.508405 17318 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI0817 16:13:05.508410 17318 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI0817 16:13:05.508416 17318 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI0817 16:13:05.508422 17318 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI0817 16:13:05.508429 17318 net.cpp:226] L1_b3_relu needs backward computation.\nI0817 16:13:05.508435 17318 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI0817 16:13:05.508441 17318 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI0817 16:13:05.508446 17318 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI0817 16:13:05.508452 17318 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI0817 16:13:05.508458 17318 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI0817 16:13:05.508463 17318 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI0817 16:13:05.508469 17318 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI0817 16:13:05.508476 17318 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI0817 16:13:05.508481 17318 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI0817 16:13:05.508487 17318 net.cpp:226] L1_b2_relu needs backward computation.\nI0817 16:13:05.508492 17318 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI0817 16:13:05.508499 17318 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI0817 16:13:05.508504 17318 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI0817 16:13:05.508512 17318 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI0817 16:13:05.508517 17318 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI0817 16:13:05.508522 17318 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI0817 16:13:05.508528 17318 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI0817 16:13:05.508533 17318 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI0817 16:13:05.508539 17318 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI0817 16:13:05.508545 17318 net.cpp:226] L1_b1_relu needs backward computation.\nI0817 16:13:05.508551 17318 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI0817 16:13:05.508558 17318 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI0817 16:13:05.508563 17318 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI0817 16:13:05.508575 17318 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI0817 16:13:05.508582 17318 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI0817 16:13:05.508589 17318 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI0817 16:13:05.508594 17318 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI0817 16:13:05.508599 17318 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI0817 16:13:05.508605 17318 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI0817 16:13:05.508611 17318 net.cpp:226] pre_relu needs backward computation.\nI0817 16:13:05.508616 17318 net.cpp:226] pre_scale needs backward computation.\nI0817 16:13:05.508622 17318 net.cpp:226] pre_bn needs backward computation.\nI0817 16:13:05.508627 17318 net.cpp:226] pre_conv needs backward computation.\nI0817 16:13:05.508635 17318 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI0817 16:13:05.508644 17318 net.cpp:228] dataLayer does not need backward computation.\nI0817 16:13:05.508649 17318 net.cpp:270] This network produces output accuracy\nI0817 16:13:05.508656 17318 net.cpp:270] This network produces output loss\nI0817 16:13:05.508990 17318 net.cpp:283] Network initialization done.\nI0817 16:13:05.510079 17318 solver.cpp:60] Solver scaffolding done.\nI0817 16:13:05.736176 17318 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI0817 16:13:06.088829 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:06.088901 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:06.095554 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:06.325969 17318 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:13:06.326081 17318 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:13:06.360455 17318 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:13:06.360563 17318 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:13:06.795719 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:06.795773 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:06.803037 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:07.052186 17318 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:13:07.052294 17318 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:13:07.103610 17318 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:13:07.103715 17318 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:13:07.600524 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:07.600587 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:07.609952 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:07.878998 17318 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:13:07.879125 17318 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:13:07.949937 17318 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:13:07.950067 17318 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:13:08.033309 17318 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI0817 16:13:08.503756 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:08.503824 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:08.513314 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:08.801584 17318 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:13:08.801772 17318 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:13:08.892377 17318 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:13:08.892556 17318 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:13:09.539507 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:09.539562 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:09.549998 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:09.860222 17318 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:13:09.860402 17318 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:13:09.972218 17318 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:13:09.972394 17318 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:13:10.670959 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:10.671023 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:10.682458 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:11.020047 17318 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:13:11.020289 17318 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:13:11.151970 17318 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:13:11.152205 17318 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:13:11.912612 17318 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI0817 16:13:11.912677 17318 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI0817 16:13:11.924948 17318 data_layer.cpp:41] output data size: 125,3,32,32\nI0817 16:13:11.996348 17345 blocking_queue.cpp:50] Waiting for data\nI0817 16:13:12.063204 17342 blocking_queue.cpp:50] Waiting for data\nI0817 16:13:12.372012 17318 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI0817 16:13:12.372246 17318 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI0817 16:13:12.520998 17318 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI0817 16:13:12.521226 17318 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI0817 16:13:12.689393 17318 parallel.cpp:425] Starting Optimization\nI0817 16:13:12.690532 17318 solver.cpp:279] Solving Cifar-Resnet\nI0817 16:13:12.690549 17318 solver.cpp:280] Learning Rate Policy: triangular\nI0817 16:13:12.695569 17318 solver.cpp:337] Iteration 0, Testing net (#0)\nI0817 16:14:34.446169 17318 solver.cpp:404]     Test net output #0: accuracy = 0.01008\nI0817 16:14:34.446436 17318 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI0817 16:14:38.326139 17318 solver.cpp:228] Iteration 0, loss = 7.29693\nI0817 16:14:38.326179 17318 solver.cpp:244]     Train net output #0: accuracy = 0.008\nI0817 16:14:38.326195 17318 solver.cpp:244]     Train net output #1: loss = 7.29693 (* 1 = 7.29693 loss)\nI0817 16:14:38.350590 17318 sgd_solver.cpp:166] Iteration 0, lr = 0\nI0817 16:16:56.813781 17318 solver.cpp:337] Iteration 100, Testing net (#0)\nI0817 16:18:18.112435 17318 solver.cpp:404]     Test net output #0: accuracy = 0.05036\nI0817 16:18:18.112716 17318 solver.cpp:404]     Test net output #1: loss = 4.31738 (* 1 = 4.31738 loss)\nI0817 16:18:19.433749 17318 solver.cpp:228] Iteration 100, loss = 4.08341\nI0817 16:18:19.433782 17318 solver.cpp:244]     Train net output #0: accuracy = 0.088\nI0817 16:18:19.433799 17318 solver.cpp:244]     Train net output #1: loss = 4.08341 (* 1 = 4.08341 loss)\nI0817 16:18:19.514650 17318 sgd_solver.cpp:166] Iteration 100, lr = 0.015\nI0817 16:20:37.800498 17318 solver.cpp:337] Iteration 200, Testing net (#0)\nI0817 16:21:58.973806 17318 solver.cpp:404]     Test net output #0: accuracy = 0.11148\nI0817 16:21:58.974084 17318 solver.cpp:404]     Test net output #1: loss = 3.83275 (* 1 = 3.83275 loss)\nI0817 16:22:00.298172 17318 solver.cpp:228] Iteration 200, loss = 3.47993\nI0817 16:22:00.298208 17318 solver.cpp:244]     Train net output #0: accuracy = 0.192\nI0817 16:22:00.298355 17318 solver.cpp:244]     Train net output #1: loss = 3.47993 (* 1 = 3.47993 loss)\nI0817 16:22:00.379554 17318 sgd_solver.cpp:166] Iteration 200, lr = 0.03\nI0817 16:24:18.751513 17318 solver.cpp:337] Iteration 300, Testing net (#0)\nI0817 16:25:39.913236 17318 solver.cpp:404]     Test net output #0: accuracy = 0.17708\nI0817 16:25:39.913502 17318 solver.cpp:404]     Test net output #1: loss = 3.47018 (* 1 = 3.47018 loss)\nI0817 16:25:41.233995 17318 solver.cpp:228] Iteration 300, loss = 2.9114\nI0817 16:25:41.234028 17318 solver.cpp:244]     Train net output #0: accuracy = 0.272\nI0817 16:25:41.234045 17318 solver.cpp:244]     Train net output #1: loss = 2.9114 (* 1 = 2.9114 loss)\nI0817 16:25:41.325413 17318 sgd_solver.cpp:166] Iteration 300, lr = 0.045\nI0817 16:28:00.180872 17318 solver.cpp:337] Iteration 400, Testing net (#0)\nI0817 16:29:21.334940 17318 solver.cpp:404]     Test net output #0: accuracy = 0.19208\nI0817 16:29:21.335180 17318 solver.cpp:404]     Test net output #1: loss = 3.71648 (* 1 = 3.71648 loss)\nI0817 16:29:22.656301 17318 solver.cpp:228] Iteration 400, loss = 2.53096\nI0817 16:29:22.656335 17318 solver.cpp:244]     Train net output #0: accuracy = 0.312\nI0817 16:29:22.656352 17318 solver.cpp:244]     Train net output #1: loss = 2.53096 (* 1 = 2.53096 loss)\nI0817 16:29:22.751956 17318 sgd_solver.cpp:166] Iteration 400, lr = 0.0599999\nI0817 16:31:41.599740 17318 solver.cpp:337] Iteration 500, Testing net (#0)\nI0817 16:33:02.751946 17318 solver.cpp:404]     Test net output #0: accuracy = 0.2694\nI0817 16:33:02.752207 17318 solver.cpp:404]     Test net output #1: loss = 3.06459 (* 1 = 3.06459 loss)\nI0817 16:33:04.073215 17318 solver.cpp:228] Iteration 500, loss = 2.30039\nI0817 16:33:04.073248 17318 solver.cpp:244]     Train net output #0: accuracy = 0.416\nI0817 16:33:04.073263 17318 solver.cpp:244]     Train net output #1: loss = 2.30039 (* 1 = 2.30039 loss)\nI0817 16:33:04.168632 17318 sgd_solver.cpp:166] Iteration 500, lr = 0.0749999\nI0817 16:35:22.967419 17318 solver.cpp:337] Iteration 600, Testing net (#0)\nI0817 16:36:44.114701 17318 solver.cpp:404]     Test net output #0: accuracy = 0.35644\nI0817 16:36:44.114954 17318 solver.cpp:404]     Test net output #1: loss = 2.53838 (* 1 = 2.53838 loss)\nI0817 16:36:45.436127 17318 solver.cpp:228] Iteration 600, loss = 2.06566\nI0817 16:36:45.436161 17318 solver.cpp:244]     Train net output #0: accuracy = 0.472\nI0817 16:36:45.436175 17318 solver.cpp:244]     Train net output #1: loss = 2.06566 (* 1 = 2.06566 loss)\nI0817 16:36:45.529791 17318 sgd_solver.cpp:166] Iteration 600, lr = 0.0899999\nI0817 16:39:04.389935 17318 solver.cpp:337] Iteration 700, Testing net (#0)\nI0817 16:40:25.540349 17318 solver.cpp:404]     Test net output #0: accuracy = 0.36216\nI0817 16:40:25.540596 17318 solver.cpp:404]     Test net output #1: loss = 2.51484 (* 1 = 2.51484 loss)\nI0817 16:40:26.861999 17318 solver.cpp:228] Iteration 700, loss = 1.84152\nI0817 16:40:26.862032 17318 solver.cpp:244]     Train net output #0: accuracy = 0.472\nI0817 16:40:26.862048 17318 solver.cpp:244]     Train net output #1: loss = 1.84152 (* 1 = 1.84152 loss)\nI0817 16:40:26.953868 17318 sgd_solver.cpp:166] Iteration 700, lr = 0.105\nI0817 16:42:45.757858 17318 solver.cpp:337] Iteration 800, Testing net (#0)\nI0817 16:44:07.029196 17318 solver.cpp:404]     Test net output #0: accuracy = 0.26148\nI0817 16:44:07.029469 17318 solver.cpp:404]     Test net output #1: loss = 3.40303 (* 1 = 3.40303 loss)\nI0817 16:44:08.351140 17318 solver.cpp:228] Iteration 800, loss = 1.60373\nI0817 16:44:08.351176 17318 solver.cpp:244]     Train net output #0: accuracy = 0.536\nI0817 16:44:08.351199 17318 solver.cpp:244]     Train net output #1: loss = 1.60373 (* 1 = 1.60373 loss)\nI0817 16:44:08.441675 17318 sgd_solver.cpp:166] Iteration 800, lr = 0.12\nI0817 16:46:27.264506 17318 solver.cpp:337] Iteration 900, Testing net (#0)\nI0817 16:47:48.499202 17318 solver.cpp:404]     Test net output #0: accuracy = 0.35948\nI0817 16:47:48.499474 17318 solver.cpp:404]     Test net output #1: loss = 2.69376 (* 1 = 2.69376 loss)\nI0817 16:47:49.821502 17318 solver.cpp:228] Iteration 900, loss = 1.55538\nI0817 16:47:49.821540 17318 solver.cpp:244]     Train net output #0: accuracy = 0.568\nI0817 16:47:49.821564 17318 solver.cpp:244]     Train net output #1: loss = 1.55538 (* 1 = 1.55538 loss)\nI0817 16:47:49.916697 17318 sgd_solver.cpp:166] Iteration 900, lr = 0.135\nI0817 16:50:08.814263 17318 solver.cpp:337] Iteration 1000, Testing net (#0)\nI0817 16:51:30.026595 17318 solver.cpp:404]     Test net output #0: accuracy = 0.32664\nI0817 16:51:30.026865 17318 solver.cpp:404]     Test net output #1: loss = 3.06348 (* 1 = 3.06348 loss)\nI0817 16:51:31.348922 17318 solver.cpp:228] Iteration 1000, loss = 1.49453\nI0817 16:51:31.348961 17318 solver.cpp:244]     Train net output #0: accuracy = 0.544\nI0817 16:51:31.348984 17318 solver.cpp:244]     Train net output #1: loss = 1.49453 (* 1 = 1.49453 loss)\nI0817 16:51:31.439666 17318 sgd_solver.cpp:166] Iteration 1000, lr = 0.15\nI0817 16:53:50.305966 17318 solver.cpp:337] Iteration 1100, Testing net (#0)\nI0817 16:55:11.590421 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40556\nI0817 16:55:11.590689 17318 solver.cpp:404]     Test net output #1: loss = 2.37962 (* 1 = 2.37962 loss)\nI0817 16:55:12.912042 17318 solver.cpp:228] Iteration 1100, loss = 1.17765\nI0817 16:55:12.912078 17318 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0817 16:55:12.912094 17318 solver.cpp:244]     Train net output #1: loss = 1.17765 (* 1 = 1.17765 loss)\nI0817 16:55:13.003587 17318 sgd_solver.cpp:166] Iteration 1100, lr = 0.165\nI0817 16:57:31.926066 17318 solver.cpp:337] Iteration 1200, Testing net (#0)\nI0817 16:58:53.193439 17318 solver.cpp:404]     Test net output #0: accuracy = 0.3432\nI0817 16:58:53.193701 17318 solver.cpp:404]     Test net output #1: loss = 3.15452 (* 1 = 3.15452 loss)\nI0817 16:58:54.515265 17318 solver.cpp:228] Iteration 1200, loss = 1.12012\nI0817 16:58:54.515301 17318 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 16:58:54.515317 17318 solver.cpp:244]     Train net output #1: loss = 1.12012 (* 1 = 1.12012 loss)\nI0817 16:58:54.608249 17318 sgd_solver.cpp:166] Iteration 1200, lr = 0.18\nI0817 17:01:13.467994 17318 solver.cpp:337] Iteration 1300, Testing net (#0)\nI0817 17:02:34.724695 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41496\nI0817 17:02:34.724957 17318 solver.cpp:404]     Test net output #1: loss = 2.5369 (* 1 = 2.5369 loss)\nI0817 17:02:36.045176 17318 solver.cpp:228] Iteration 1300, loss = 1.11526\nI0817 17:02:36.045212 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 17:02:36.045228 17318 solver.cpp:244]     Train net output #1: loss = 1.11526 (* 1 = 1.11526 loss)\nI0817 17:02:36.141584 17318 sgd_solver.cpp:166] Iteration 1300, lr = 0.195\nI0817 17:04:55.017098 17318 solver.cpp:337] Iteration 1400, Testing net (#0)\nI0817 17:06:16.284430 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42104\nI0817 17:06:16.284695 17318 solver.cpp:404]     Test net output #1: loss = 2.44087 (* 1 = 2.44087 loss)\nI0817 17:06:17.605880 17318 solver.cpp:228] Iteration 1400, loss = 1.03459\nI0817 17:06:17.605913 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 17:06:17.605929 17318 solver.cpp:244]     Train net output #1: loss = 1.03459 (* 1 = 1.03459 loss)\nI0817 17:06:17.694051 17318 sgd_solver.cpp:166] Iteration 1400, lr = 0.21\nI0817 17:08:36.496204 17318 solver.cpp:337] Iteration 1500, Testing net (#0)\nI0817 17:09:57.749155 17318 solver.cpp:404]     Test net output #0: accuracy = 0.36212\nI0817 17:09:57.749423 17318 solver.cpp:404]     Test net output #1: loss = 3.2517 (* 1 = 3.2517 loss)\nI0817 17:09:59.069519 17318 solver.cpp:228] Iteration 1500, loss = 1.08145\nI0817 17:09:59.069550 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 17:09:59.069566 17318 solver.cpp:244]     Train net output #1: loss = 1.08145 (* 1 = 1.08145 loss)\nI0817 17:09:59.166937 17318 sgd_solver.cpp:166] Iteration 1500, lr = 0.225\nI0817 17:12:17.974735 17318 solver.cpp:337] Iteration 1600, Testing net (#0)\nI0817 17:13:39.227600 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41156\nI0817 17:13:39.227860 17318 solver.cpp:404]     Test net output #1: loss = 2.57708 (* 1 = 2.57708 loss)\nI0817 17:13:40.549139 17318 solver.cpp:228] Iteration 1600, loss = 1.01548\nI0817 17:13:40.549172 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0817 17:13:40.549187 17318 solver.cpp:244]     Train net output #1: loss = 1.01548 (* 1 = 1.01548 loss)\nI0817 17:13:40.644346 17318 sgd_solver.cpp:166] Iteration 1600, lr = 0.24\nI0817 17:15:59.498153 17318 solver.cpp:337] Iteration 1700, Testing net (#0)\nI0817 17:17:20.753211 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41504\nI0817 17:17:20.753463 17318 solver.cpp:404]     Test net output #1: loss = 2.81571 (* 1 = 2.81571 loss)\nI0817 17:17:22.074246 17318 solver.cpp:228] Iteration 1700, loss = 1.00271\nI0817 17:17:22.074281 17318 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0817 17:17:22.074296 17318 solver.cpp:244]     Train net output #1: loss = 1.00271 (* 1 = 1.00271 loss)\nI0817 17:17:22.164079 17318 sgd_solver.cpp:166] Iteration 1700, lr = 0.255\nI0817 17:19:41.036876 17318 solver.cpp:337] Iteration 1800, Testing net (#0)\nI0817 17:21:02.286104 17318 solver.cpp:404]     Test net output #0: accuracy = 0.428\nI0817 17:21:02.286367 17318 solver.cpp:404]     Test net output #1: loss = 2.55938 (* 1 = 2.55938 loss)\nI0817 17:21:03.607262 17318 solver.cpp:228] Iteration 1800, loss = 0.972514\nI0817 17:21:03.607296 17318 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0817 17:21:03.607312 17318 solver.cpp:244]     Train net output #1: loss = 0.972514 (* 1 = 0.972514 loss)\nI0817 17:21:03.699750 17318 sgd_solver.cpp:166] Iteration 1800, lr = 0.27\nI0817 17:23:22.580163 17318 solver.cpp:337] Iteration 1900, Testing net (#0)\nI0817 17:24:43.823350 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44768\nI0817 17:24:43.823614 17318 solver.cpp:404]     Test net output #1: loss = 2.53382 (* 1 = 2.53382 loss)\nI0817 17:24:45.144085 17318 solver.cpp:228] Iteration 1900, loss = 1.04347\nI0817 17:24:45.144119 17318 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0817 17:24:45.144134 17318 solver.cpp:244]     Train net output #1: loss = 1.04347 (* 1 = 1.04347 loss)\nI0817 17:24:45.236107 17318 sgd_solver.cpp:166] Iteration 1900, lr = 0.285\nI0817 17:27:04.056783 17318 solver.cpp:337] Iteration 2000, Testing net (#0)\nI0817 17:28:25.299058 17318 solver.cpp:404]     Test net output #0: accuracy = 0.38044\nI0817 17:28:25.299324 17318 solver.cpp:404]     Test net output #1: loss = 3.10088 (* 1 = 3.10088 loss)\nI0817 17:28:26.620484 17318 solver.cpp:228] Iteration 2000, loss = 0.87039\nI0817 17:28:26.620518 17318 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 17:28:26.620534 17318 solver.cpp:244]     Train net output #1: loss = 0.87039 (* 1 = 0.87039 loss)\nI0817 17:28:26.714699 17318 sgd_solver.cpp:166] Iteration 2000, lr = 0.3\nI0817 17:30:45.579989 17318 solver.cpp:337] Iteration 2100, Testing net (#0)\nI0817 17:32:06.825357 17318 solver.cpp:404]     Test net output #0: accuracy = 0.39856\nI0817 17:32:06.825616 17318 solver.cpp:404]     Test net output #1: loss = 3.1529 (* 1 = 3.1529 loss)\nI0817 17:32:08.146354 17318 solver.cpp:228] Iteration 2100, loss = 0.867245\nI0817 17:32:08.146389 17318 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 17:32:08.146404 17318 solver.cpp:244]     Train net output #1: loss = 0.867245 (* 1 = 0.867245 loss)\nI0817 17:32:08.238993 17318 sgd_solver.cpp:166] Iteration 2100, lr = 0.315\nI0817 17:34:27.132295 17318 solver.cpp:337] Iteration 2200, Testing net (#0)\nI0817 17:35:48.375861 17318 solver.cpp:404]     Test net output #0: accuracy = 0.30096\nI0817 17:35:48.376149 17318 solver.cpp:404]     Test net output #1: loss = 4.28487 (* 1 = 4.28487 loss)\nI0817 17:35:49.696804 17318 solver.cpp:228] Iteration 2200, loss = 0.834165\nI0817 17:35:49.696837 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 17:35:49.696852 17318 solver.cpp:244]     Train net output #1: loss = 0.834165 (* 1 = 0.834165 loss)\nI0817 17:35:49.787812 17318 sgd_solver.cpp:166] Iteration 2200, lr = 0.33\nI0817 17:38:08.655072 17318 solver.cpp:337] Iteration 2300, Testing net (#0)\nI0817 17:39:29.903985 17318 solver.cpp:404]     Test net output #0: accuracy = 0.38792\nI0817 17:39:29.904254 17318 solver.cpp:404]     Test net output #1: loss = 3.36788 (* 1 = 3.36788 loss)\nI0817 17:39:31.225370 17318 solver.cpp:228] Iteration 2300, loss = 0.920377\nI0817 17:39:31.225406 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 17:39:31.225421 17318 solver.cpp:244]     Train net output #1: loss = 0.920377 (* 1 = 0.920377 loss)\nI0817 17:39:31.314761 17318 sgd_solver.cpp:166] Iteration 2300, lr = 0.345\nI0817 17:41:50.197618 17318 solver.cpp:337] Iteration 2400, Testing net (#0)\nI0817 17:43:11.447461 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42244\nI0817 17:43:11.447718 17318 solver.cpp:404]     Test net output #1: loss = 2.91832 (* 1 = 2.91832 loss)\nI0817 17:43:12.768844 17318 solver.cpp:228] Iteration 2400, loss = 0.840987\nI0817 17:43:12.768878 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 17:43:12.768894 17318 solver.cpp:244]     Train net output #1: loss = 0.840987 (* 1 = 0.840987 loss)\nI0817 17:43:12.857592 17318 sgd_solver.cpp:166] Iteration 2400, lr = 0.36\nI0817 17:45:31.859277 17318 solver.cpp:337] Iteration 2500, Testing net (#0)\nI0817 17:46:53.107533 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45036\nI0817 17:46:53.107796 17318 solver.cpp:404]     Test net output #1: loss = 2.76296 (* 1 = 2.76296 loss)\nI0817 17:46:54.428750 17318 solver.cpp:228] Iteration 2500, loss = 0.8439\nI0817 17:46:54.428783 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 17:46:54.428797 17318 solver.cpp:244]     Train net output #1: loss = 0.8439 (* 1 = 0.8439 loss)\nI0817 17:46:54.519417 17318 sgd_solver.cpp:166] Iteration 2500, lr = 0.375\nI0817 17:49:13.455173 17318 solver.cpp:337] Iteration 2600, Testing net (#0)\nI0817 17:50:34.704370 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46108\nI0817 17:50:34.704632 17318 solver.cpp:404]     Test net output #1: loss = 2.67954 (* 1 = 2.67954 loss)\nI0817 17:50:36.024726 17318 solver.cpp:228] Iteration 2600, loss = 0.775872\nI0817 17:50:36.024760 17318 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 17:50:36.024775 17318 solver.cpp:244]     Train net output #1: loss = 0.775872 (* 1 = 0.775872 loss)\nI0817 17:50:36.116400 17318 sgd_solver.cpp:166] Iteration 2600, lr = 0.39\nI0817 17:52:55.086729 17318 solver.cpp:337] Iteration 2700, Testing net (#0)\nI0817 17:54:16.337219 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4218\nI0817 17:54:16.337462 17318 solver.cpp:404]     Test net output #1: loss = 3.15474 (* 1 = 3.15474 loss)\nI0817 17:54:17.659049 17318 solver.cpp:228] Iteration 2700, loss = 0.683296\nI0817 17:54:17.659083 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 17:54:17.659099 17318 solver.cpp:244]     Train net output #1: loss = 0.683296 (* 1 = 0.683296 loss)\nI0817 17:54:17.747270 17318 sgd_solver.cpp:166] Iteration 2700, lr = 0.405\nI0817 17:56:36.604898 17318 solver.cpp:337] Iteration 2800, Testing net (#0)\nI0817 17:57:57.858166 17318 solver.cpp:404]     Test net output #0: accuracy = 0.48188\nI0817 17:57:57.858433 17318 solver.cpp:404]     Test net output #1: loss = 2.42669 (* 1 = 2.42669 loss)\nI0817 17:57:59.179632 17318 solver.cpp:228] Iteration 2800, loss = 0.587486\nI0817 17:57:59.179667 17318 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 17:57:59.179683 17318 solver.cpp:244]     Train net output #1: loss = 0.587486 (* 1 = 0.587486 loss)\nI0817 17:57:59.273043 17318 sgd_solver.cpp:166] Iteration 2800, lr = 0.42\nI0817 18:00:18.072268 17318 solver.cpp:337] Iteration 2900, Testing net (#0)\nI0817 18:01:39.321640 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45356\nI0817 18:01:39.321893 17318 solver.cpp:404]     Test net output #1: loss = 2.8652 (* 1 = 2.8652 loss)\nI0817 18:01:40.644068 17318 solver.cpp:228] Iteration 2900, loss = 0.672747\nI0817 18:01:40.644104 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 18:01:40.644119 17318 solver.cpp:244]     Train net output #1: loss = 0.672747 (* 1 = 0.672747 loss)\nI0817 18:01:40.733803 17318 sgd_solver.cpp:166] Iteration 2900, lr = 0.435\nI0817 18:03:59.647739 17318 solver.cpp:337] Iteration 3000, Testing net (#0)\nI0817 18:05:20.901393 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4448\nI0817 18:05:20.901661 17318 solver.cpp:404]     Test net output #1: loss = 2.80577 (* 1 = 2.80577 loss)\nI0817 18:05:22.222429 17318 solver.cpp:228] Iteration 3000, loss = 0.482577\nI0817 18:05:22.222465 17318 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI0817 18:05:22.222481 17318 solver.cpp:244]     Train net output #1: loss = 0.482577 (* 1 = 0.482577 loss)\nI0817 18:05:22.314366 17318 sgd_solver.cpp:166] Iteration 3000, lr = 0.45\nI0817 18:07:41.298641 17318 solver.cpp:337] Iteration 3100, Testing net (#0)\nI0817 18:09:02.542315 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4176\nI0817 18:09:02.542575 17318 solver.cpp:404]     Test net output #1: loss = 3.29928 (* 1 = 3.29928 loss)\nI0817 18:09:03.863436 17318 solver.cpp:228] Iteration 3100, loss = 0.552229\nI0817 18:09:03.863472 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 18:09:03.863487 17318 solver.cpp:244]     Train net output #1: loss = 0.552229 (* 1 = 0.552229 loss)\nI0817 18:09:03.953488 17318 sgd_solver.cpp:166] Iteration 3100, lr = 0.465\nI0817 18:11:22.945274 17318 solver.cpp:337] Iteration 3200, Testing net (#0)\nI0817 18:12:44.199072 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42968\nI0817 18:12:44.199328 17318 solver.cpp:404]     Test net output #1: loss = 3.09865 (* 1 = 3.09865 loss)\nI0817 18:12:45.520104 17318 solver.cpp:228] Iteration 3200, loss = 0.734247\nI0817 18:12:45.520140 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 18:12:45.520156 17318 solver.cpp:244]     Train net output #1: loss = 0.734247 (* 1 = 0.734247 loss)\nI0817 18:12:45.607028 17318 sgd_solver.cpp:166] Iteration 3200, lr = 0.48\nI0817 18:15:04.468816 17318 solver.cpp:337] Iteration 3300, Testing net (#0)\nI0817 18:16:25.724056 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44732\nI0817 18:16:25.724325 17318 solver.cpp:404]     Test net output #1: loss = 3.05135 (* 1 = 3.05135 loss)\nI0817 18:16:27.046116 17318 solver.cpp:228] Iteration 3300, loss = 0.731257\nI0817 18:16:27.046152 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 18:16:27.046169 17318 solver.cpp:244]     Train net output #1: loss = 0.731257 (* 1 = 0.731257 loss)\nI0817 18:16:27.140363 17318 sgd_solver.cpp:166] Iteration 3300, lr = 0.495\nI0817 18:18:46.008477 17318 solver.cpp:337] Iteration 3400, Testing net (#0)\nI0817 18:20:07.254689 17318 solver.cpp:404]     Test net output #0: accuracy = 0.49136\nI0817 18:20:07.254938 17318 solver.cpp:404]     Test net output #1: loss = 2.55202 (* 1 = 2.55202 loss)\nI0817 18:20:08.575270 17318 solver.cpp:228] Iteration 3400, loss = 0.534946\nI0817 18:20:08.575305 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 18:20:08.575321 17318 solver.cpp:244]     Train net output #1: loss = 0.534946 (* 1 = 0.534946 loss)\nI0817 18:20:08.668548 17318 sgd_solver.cpp:166] Iteration 3400, lr = 0.51\nI0817 18:22:27.604363 17318 solver.cpp:337] Iteration 3500, Testing net (#0)\nI0817 18:23:48.854887 17318 solver.cpp:404]     Test net output #0: accuracy = 0.439\nI0817 18:23:48.855165 17318 solver.cpp:404]     Test net output #1: loss = 3.09567 (* 1 = 3.09567 loss)\nI0817 18:23:50.177054 17318 solver.cpp:228] Iteration 3500, loss = 0.594467\nI0817 18:23:50.177095 17318 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 18:23:50.177120 17318 solver.cpp:244]     Train net output #1: loss = 0.594467 (* 1 = 0.594467 loss)\nI0817 18:23:50.268388 17318 sgd_solver.cpp:166] Iteration 3500, lr = 0.525\nI0817 18:26:09.245256 17318 solver.cpp:337] Iteration 3600, Testing net (#0)\nI0817 18:27:30.497427 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43388\nI0817 18:27:30.497699 17318 solver.cpp:404]     Test net output #1: loss = 3.00366 (* 1 = 3.00366 loss)\nI0817 18:27:31.819375 17318 solver.cpp:228] Iteration 3600, loss = 0.715326\nI0817 18:27:31.819416 17318 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 18:27:31.819437 17318 solver.cpp:244]     Train net output #1: loss = 0.715326 (* 1 = 0.715326 loss)\nI0817 18:27:31.908776 17318 sgd_solver.cpp:166] Iteration 3600, lr = 0.54\nI0817 18:29:50.921643 17318 solver.cpp:337] Iteration 3700, Testing net (#0)\nI0817 18:31:12.176615 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41796\nI0817 18:31:12.176889 17318 solver.cpp:404]     Test net output #1: loss = 3.45788 (* 1 = 3.45788 loss)\nI0817 18:31:13.498354 17318 solver.cpp:228] Iteration 3700, loss = 0.699558\nI0817 18:31:13.498392 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 18:31:13.498416 17318 solver.cpp:244]     Train net output #1: loss = 0.699558 (* 1 = 0.699558 loss)\nI0817 18:31:13.588667 17318 sgd_solver.cpp:166] Iteration 3700, lr = 0.555\nI0817 18:33:32.421089 17318 solver.cpp:337] Iteration 3800, Testing net (#0)\nI0817 18:34:53.777621 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45324\nI0817 18:34:53.777891 17318 solver.cpp:404]     Test net output #1: loss = 2.98527 (* 1 = 2.98527 loss)\nI0817 18:34:55.100002 17318 solver.cpp:228] Iteration 3800, loss = 0.619092\nI0817 18:34:55.100040 17318 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 18:34:55.100064 17318 solver.cpp:244]     Train net output #1: loss = 0.619092 (* 1 = 0.619092 loss)\nI0817 18:34:55.188397 17318 sgd_solver.cpp:166] Iteration 3800, lr = 0.57\nI0817 18:37:14.043040 17318 solver.cpp:337] Iteration 3900, Testing net (#0)\nI0817 18:38:35.340114 17318 solver.cpp:404]     Test net output #0: accuracy = 0.47456\nI0817 18:38:35.340381 17318 solver.cpp:404]     Test net output #1: loss = 2.77344 (* 1 = 2.77344 loss)\nI0817 18:38:36.662966 17318 solver.cpp:228] Iteration 3900, loss = 0.60156\nI0817 18:38:36.663002 17318 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 18:38:36.663024 17318 solver.cpp:244]     Train net output #1: loss = 0.60156 (* 1 = 0.60156 loss)\nI0817 18:38:36.753636 17318 sgd_solver.cpp:166] Iteration 3900, lr = 0.585\nI0817 18:40:55.703125 17318 solver.cpp:337] Iteration 4000, Testing net (#0)\nI0817 18:42:16.926717 17318 solver.cpp:404]     Test net output #0: accuracy = 0.47244\nI0817 18:42:16.926996 17318 solver.cpp:404]     Test net output #1: loss = 2.71418 (* 1 = 2.71418 loss)\nI0817 18:42:18.249357 17318 solver.cpp:228] Iteration 4000, loss = 0.478235\nI0817 18:42:18.249393 17318 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 18:42:18.249415 17318 solver.cpp:244]     Train net output #1: loss = 0.478235 (* 1 = 0.478235 loss)\nI0817 18:42:18.341429 17318 sgd_solver.cpp:166] Iteration 4000, lr = 0.6\nI0817 18:44:37.271073 17318 solver.cpp:337] Iteration 4100, Testing net (#0)\nI0817 18:45:58.567113 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45228\nI0817 18:45:58.567383 17318 solver.cpp:404]     Test net output #1: loss = 3.09896 (* 1 = 3.09896 loss)\nI0817 18:45:59.887853 17318 solver.cpp:228] Iteration 4100, loss = 0.547764\nI0817 18:45:59.887888 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 18:45:59.887912 17318 solver.cpp:244]     Train net output #1: loss = 0.547764 (* 1 = 0.547764 loss)\nI0817 18:45:59.979208 17318 sgd_solver.cpp:166] Iteration 4100, lr = 0.615\nI0817 18:48:18.799620 17318 solver.cpp:337] Iteration 4200, Testing net (#0)\nI0817 18:49:40.058548 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46316\nI0817 18:49:40.058821 17318 solver.cpp:404]     Test net output #1: loss = 2.98801 (* 1 = 2.98801 loss)\nI0817 18:49:41.380228 17318 solver.cpp:228] Iteration 4200, loss = 0.530367\nI0817 18:49:41.380264 17318 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 18:49:41.380287 17318 solver.cpp:244]     Train net output #1: loss = 0.530367 (* 1 = 0.530367 loss)\nI0817 18:49:41.467011 17318 sgd_solver.cpp:166] Iteration 4200, lr = 0.63\nI0817 18:52:00.374116 17318 solver.cpp:337] Iteration 4300, Testing net (#0)\nI0817 18:53:21.629276 17318 solver.cpp:404]     Test net output #0: accuracy = 0.48292\nI0817 18:53:21.629540 17318 solver.cpp:404]     Test net output #1: loss = 2.59745 (* 1 = 2.59745 loss)\nI0817 18:53:22.951134 17318 solver.cpp:228] Iteration 4300, loss = 0.561769\nI0817 18:53:22.951171 17318 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 18:53:22.951194 17318 solver.cpp:244]     Train net output #1: loss = 0.561769 (* 1 = 0.561769 loss)\nI0817 18:53:23.038121 17318 sgd_solver.cpp:166] Iteration 4300, lr = 0.645\nI0817 18:55:41.978071 17318 solver.cpp:337] Iteration 4400, Testing net (#0)\nI0817 18:57:03.209522 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43748\nI0817 18:57:03.209769 17318 solver.cpp:404]     Test net output #1: loss = 3.19507 (* 1 = 3.19507 loss)\nI0817 18:57:04.531497 17318 solver.cpp:228] Iteration 4400, loss = 0.638719\nI0817 18:57:04.531534 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 18:57:04.531558 17318 solver.cpp:244]     Train net output #1: loss = 0.638719 (* 1 = 0.638719 loss)\nI0817 18:57:04.618376 17318 sgd_solver.cpp:166] Iteration 4400, lr = 0.66\nI0817 18:59:23.586320 17318 solver.cpp:337] Iteration 4500, Testing net (#0)\nI0817 19:00:44.857146 17318 solver.cpp:404]     Test net output #0: accuracy = 0.47096\nI0817 19:00:44.857421 17318 solver.cpp:404]     Test net output #1: loss = 2.88089 (* 1 = 2.88089 loss)\nI0817 19:00:46.179725 17318 solver.cpp:228] Iteration 4500, loss = 0.471139\nI0817 19:00:46.179764 17318 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 19:00:46.179786 17318 solver.cpp:244]     Train net output #1: loss = 0.471139 (* 1 = 0.471139 loss)\nI0817 19:00:46.266736 17318 sgd_solver.cpp:166] Iteration 4500, lr = 0.675\nI0817 19:03:05.288579 17318 solver.cpp:337] Iteration 4600, Testing net (#0)\nI0817 19:04:26.493527 17318 solver.cpp:404]     Test net output #0: accuracy = 0.49488\nI0817 19:04:26.493803 17318 solver.cpp:404]     Test net output #1: loss = 2.43734 (* 1 = 2.43734 loss)\nI0817 19:04:27.815538 17318 solver.cpp:228] Iteration 4600, loss = 0.507711\nI0817 19:04:27.815577 17318 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI0817 19:04:27.815600 17318 solver.cpp:244]     Train net output #1: loss = 0.507711 (* 1 = 0.507711 loss)\nI0817 19:04:27.908660 17318 sgd_solver.cpp:166] Iteration 4600, lr = 0.69\nI0817 19:06:46.855898 17318 solver.cpp:337] Iteration 4700, Testing net (#0)\nI0817 19:08:08.116504 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43592\nI0817 19:08:08.116777 17318 solver.cpp:404]     Test net output #1: loss = 3.13484 (* 1 = 3.13484 loss)\nI0817 19:08:09.438889 17318 solver.cpp:228] Iteration 4700, loss = 0.682772\nI0817 19:08:09.438926 17318 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 19:08:09.438949 17318 solver.cpp:244]     Train net output #1: loss = 0.682772 (* 1 = 0.682772 loss)\nI0817 19:08:09.530843 17318 sgd_solver.cpp:166] Iteration 4700, lr = 0.705\nI0817 19:10:28.462263 17318 solver.cpp:337] Iteration 4800, Testing net (#0)\nI0817 19:11:49.810048 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46748\nI0817 19:11:49.810324 17318 solver.cpp:404]     Test net output #1: loss = 2.75943 (* 1 = 2.75943 loss)\nI0817 19:11:51.131479 17318 solver.cpp:228] Iteration 4800, loss = 0.499173\nI0817 19:11:51.131516 17318 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 19:11:51.131541 17318 solver.cpp:244]     Train net output #1: loss = 0.499173 (* 1 = 0.499173 loss)\nI0817 19:11:51.220480 17318 sgd_solver.cpp:166] Iteration 4800, lr = 0.72\nI0817 19:14:10.123821 17318 solver.cpp:337] Iteration 4900, Testing net (#0)\nI0817 19:15:31.396545 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46836\nI0817 19:15:31.396808 17318 solver.cpp:404]     Test net output #1: loss = 2.65495 (* 1 = 2.65495 loss)\nI0817 19:15:32.717887 17318 solver.cpp:228] Iteration 4900, loss = 0.559229\nI0817 19:15:32.717921 17318 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 19:15:32.717936 17318 solver.cpp:244]     Train net output #1: loss = 0.559229 (* 1 = 0.559229 loss)\nI0817 19:15:32.803696 17318 sgd_solver.cpp:166] Iteration 4900, lr = 0.735\nI0817 19:17:51.616576 17318 solver.cpp:337] Iteration 5000, Testing net (#0)\nI0817 19:19:12.887101 17318 solver.cpp:404]     Test net output #0: accuracy = 0.49864\nI0817 19:19:12.887358 17318 solver.cpp:404]     Test net output #1: loss = 2.48735 (* 1 = 2.48735 loss)\nI0817 19:19:14.208741 17318 solver.cpp:228] Iteration 5000, loss = 0.562681\nI0817 19:19:14.208775 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 19:19:14.208789 17318 solver.cpp:244]     Train net output #1: loss = 0.562681 (* 1 = 0.562681 loss)\nI0817 19:19:14.301936 17318 sgd_solver.cpp:166] Iteration 5000, lr = 0.75\nI0817 19:21:33.144631 17318 solver.cpp:337] Iteration 5100, Testing net (#0)\nI0817 19:22:54.409752 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43692\nI0817 19:22:54.410003 17318 solver.cpp:404]     Test net output #1: loss = 3.04159 (* 1 = 3.04159 loss)\nI0817 19:22:55.731317 17318 solver.cpp:228] Iteration 5100, loss = 0.482677\nI0817 19:22:55.731350 17318 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 19:22:55.731365 17318 solver.cpp:244]     Train net output #1: loss = 0.482677 (* 1 = 0.482677 loss)\nI0817 19:22:55.823748 17318 sgd_solver.cpp:166] Iteration 5100, lr = 0.765\nI0817 19:25:14.658361 17318 solver.cpp:337] Iteration 5200, Testing net (#0)\nI0817 19:26:35.915318 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45416\nI0817 19:26:35.915581 17318 solver.cpp:404]     Test net output #1: loss = 3.03724 (* 1 = 3.03724 loss)\nI0817 19:26:37.236390 17318 solver.cpp:228] Iteration 5200, loss = 0.679741\nI0817 19:26:37.236429 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 19:26:37.236446 17318 solver.cpp:244]     Train net output #1: loss = 0.679741 (* 1 = 0.679741 loss)\nI0817 19:26:37.333762 17318 sgd_solver.cpp:166] Iteration 5200, lr = 0.78\nI0817 19:28:56.236742 17318 solver.cpp:337] Iteration 5300, Testing net (#0)\nI0817 19:30:17.499493 17318 solver.cpp:404]     Test net output #0: accuracy = 0.49944\nI0817 19:30:17.499755 17318 solver.cpp:404]     Test net output #1: loss = 2.44947 (* 1 = 2.44947 loss)\nI0817 19:30:18.821033 17318 solver.cpp:228] Iteration 5300, loss = 0.541379\nI0817 19:30:18.821068 17318 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 19:30:18.821082 17318 solver.cpp:244]     Train net output #1: loss = 0.541379 (* 1 = 0.541379 loss)\nI0817 19:30:18.913485 17318 sgd_solver.cpp:166] Iteration 5300, lr = 0.795\nI0817 19:32:37.773172 17318 solver.cpp:337] Iteration 5400, Testing net (#0)\nI0817 19:33:59.034723 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4404\nI0817 19:33:59.034974 17318 solver.cpp:404]     Test net output #1: loss = 3.07183 (* 1 = 3.07183 loss)\nI0817 19:34:00.356299 17318 solver.cpp:228] Iteration 5400, loss = 0.594027\nI0817 19:34:00.356333 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 19:34:00.356348 17318 solver.cpp:244]     Train net output #1: loss = 0.594027 (* 1 = 0.594027 loss)\nI0817 19:34:00.444478 17318 sgd_solver.cpp:166] Iteration 5400, lr = 0.81\nI0817 19:36:19.300063 17318 solver.cpp:337] Iteration 5500, Testing net (#0)\nI0817 19:37:40.566495 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43688\nI0817 19:37:40.566804 17318 solver.cpp:404]     Test net output #1: loss = 3.11215 (* 1 = 3.11215 loss)\nI0817 19:37:41.887827 17318 solver.cpp:228] Iteration 5500, loss = 0.639214\nI0817 19:37:41.887861 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 19:37:41.887877 17318 solver.cpp:244]     Train net output #1: loss = 0.639214 (* 1 = 0.639214 loss)\nI0817 19:37:41.976362 17318 sgd_solver.cpp:166] Iteration 5500, lr = 0.825\nI0817 19:40:00.885757 17318 solver.cpp:337] Iteration 5600, Testing net (#0)\nI0817 19:41:22.158269 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4442\nI0817 19:41:22.158537 17318 solver.cpp:404]     Test net output #1: loss = 2.9379 (* 1 = 2.9379 loss)\nI0817 19:41:23.479924 17318 solver.cpp:228] Iteration 5600, loss = 0.55578\nI0817 19:41:23.479976 17318 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 19:41:23.479995 17318 solver.cpp:244]     Train net output #1: loss = 0.55578 (* 1 = 0.55578 loss)\nI0817 19:41:23.573453 17318 sgd_solver.cpp:166] Iteration 5600, lr = 0.84\nI0817 19:43:42.384522 17318 solver.cpp:337] Iteration 5700, Testing net (#0)\nI0817 19:45:03.657263 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4434\nI0817 19:45:03.657527 17318 solver.cpp:404]     Test net output #1: loss = 3.00827 (* 1 = 3.00827 loss)\nI0817 19:45:04.979058 17318 solver.cpp:228] Iteration 5700, loss = 0.647453\nI0817 19:45:04.979092 17318 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 19:45:04.979109 17318 solver.cpp:244]     Train net output #1: loss = 0.647453 (* 1 = 0.647453 loss)\nI0817 19:45:05.065837 17318 sgd_solver.cpp:166] Iteration 5700, lr = 0.855\nI0817 19:47:23.941009 17318 solver.cpp:337] Iteration 5800, Testing net (#0)\nI0817 19:48:45.209662 17318 solver.cpp:404]     Test net output #0: accuracy = 0.47408\nI0817 19:48:45.209913 17318 solver.cpp:404]     Test net output #1: loss = 2.58226 (* 1 = 2.58226 loss)\nI0817 19:48:46.530931 17318 solver.cpp:228] Iteration 5800, loss = 0.450551\nI0817 19:48:46.530974 17318 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 19:48:46.530990 17318 solver.cpp:244]     Train net output #1: loss = 0.450551 (* 1 = 0.450551 loss)\nI0817 19:48:46.619091 17318 sgd_solver.cpp:166] Iteration 5800, lr = 0.87\nI0817 19:51:05.425657 17318 solver.cpp:337] Iteration 5900, Testing net (#0)\nI0817 19:52:26.690996 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4404\nI0817 19:52:26.691257 17318 solver.cpp:404]     Test net output #1: loss = 2.74591 (* 1 = 2.74591 loss)\nI0817 19:52:28.012486 17318 solver.cpp:228] Iteration 5900, loss = 0.4917\nI0817 19:52:28.012521 17318 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 19:52:28.012537 17318 solver.cpp:244]     Train net output #1: loss = 0.4917 (* 1 = 0.4917 loss)\nI0817 19:52:28.097880 17318 sgd_solver.cpp:166] Iteration 5900, lr = 0.885\nI0817 19:54:46.950171 17318 solver.cpp:337] Iteration 6000, Testing net (#0)\nI0817 19:56:08.219200 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42428\nI0817 19:56:08.219461 17318 solver.cpp:404]     Test net output #1: loss = 3.36946 (* 1 = 3.36946 loss)\nI0817 19:56:09.540469 17318 solver.cpp:228] Iteration 6000, loss = 0.515964\nI0817 19:56:09.540503 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 19:56:09.540518 17318 solver.cpp:244]     Train net output #1: loss = 0.515964 (* 1 = 0.515964 loss)\nI0817 19:56:09.632217 17318 sgd_solver.cpp:166] Iteration 6000, lr = 0.9\nI0817 19:58:28.494938 17318 solver.cpp:337] Iteration 6100, Testing net (#0)\nI0817 19:59:49.764616 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42688\nI0817 19:59:49.764883 17318 solver.cpp:404]     Test net output #1: loss = 3.10668 (* 1 = 3.10668 loss)\nI0817 19:59:51.086308 17318 solver.cpp:228] Iteration 6100, loss = 0.637547\nI0817 19:59:51.086343 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 19:59:51.086359 17318 solver.cpp:244]     Train net output #1: loss = 0.637547 (* 1 = 0.637547 loss)\nI0817 19:59:51.178611 17318 sgd_solver.cpp:166] Iteration 6100, lr = 0.915\nI0817 20:02:10.069478 17318 solver.cpp:337] Iteration 6200, Testing net (#0)\nI0817 20:03:31.437973 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4496\nI0817 20:03:31.438244 17318 solver.cpp:404]     Test net output #1: loss = 3.03126 (* 1 = 3.03126 loss)\nI0817 20:03:32.758626 17318 solver.cpp:228] Iteration 6200, loss = 0.617239\nI0817 20:03:32.758673 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 20:03:32.758697 17318 solver.cpp:244]     Train net output #1: loss = 0.617239 (* 1 = 0.617239 loss)\nI0817 20:03:32.847451 17318 sgd_solver.cpp:166] Iteration 6200, lr = 0.93\nI0817 20:05:51.676069 17318 solver.cpp:337] Iteration 6300, Testing net (#0)\nI0817 20:07:13.037606 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44376\nI0817 20:07:13.037861 17318 solver.cpp:404]     Test net output #1: loss = 2.68102 (* 1 = 2.68102 loss)\nI0817 20:07:14.358517 17318 solver.cpp:228] Iteration 6300, loss = 0.600854\nI0817 20:07:14.358566 17318 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 20:07:14.358590 17318 solver.cpp:244]     Train net output #1: loss = 0.600854 (* 1 = 0.600854 loss)\nI0817 20:07:14.452605 17318 sgd_solver.cpp:166] Iteration 6300, lr = 0.945\nI0817 20:09:33.309535 17318 solver.cpp:337] Iteration 6400, Testing net (#0)\nI0817 20:10:54.679133 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42616\nI0817 20:10:54.679410 17318 solver.cpp:404]     Test net output #1: loss = 3.02692 (* 1 = 3.02692 loss)\nI0817 20:10:55.999716 17318 solver.cpp:228] Iteration 6400, loss = 0.492396\nI0817 20:10:55.999764 17318 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 20:10:55.999788 17318 solver.cpp:244]     Train net output #1: loss = 0.492396 (* 1 = 0.492396 loss)\nI0817 20:10:56.086088 17318 sgd_solver.cpp:166] Iteration 6400, lr = 0.96\nI0817 20:13:15.112402 17318 solver.cpp:337] Iteration 6500, Testing net (#0)\nI0817 20:14:36.469135 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4558\nI0817 20:14:36.469411 17318 solver.cpp:404]     Test net output #1: loss = 2.71448 (* 1 = 2.71448 loss)\nI0817 20:14:37.789373 17318 solver.cpp:228] Iteration 6500, loss = 0.693396\nI0817 20:14:37.789422 17318 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 20:14:37.789445 17318 solver.cpp:244]     Train net output #1: loss = 0.693396 (* 1 = 0.693396 loss)\nI0817 20:14:37.876127 17318 sgd_solver.cpp:166] Iteration 6500, lr = 0.975\nI0817 20:16:56.794052 17318 solver.cpp:337] Iteration 6600, Testing net (#0)\nI0817 20:18:18.084390 17318 solver.cpp:404]     Test net output #0: accuracy = 0.47792\nI0817 20:18:18.084668 17318 solver.cpp:404]     Test net output #1: loss = 2.56075 (* 1 = 2.56075 loss)\nI0817 20:18:19.404235 17318 solver.cpp:228] Iteration 6600, loss = 0.561373\nI0817 20:18:19.404283 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 20:18:19.404307 17318 solver.cpp:244]     Train net output #1: loss = 0.561373 (* 1 = 0.561373 loss)\nI0817 20:18:19.489215 17318 sgd_solver.cpp:166] Iteration 6600, lr = 0.99\nI0817 20:20:38.418612 17318 solver.cpp:337] Iteration 6700, Testing net (#0)\nI0817 20:21:59.703135 17318 solver.cpp:404]     Test net output #0: accuracy = 0.38316\nI0817 20:21:59.703399 17318 solver.cpp:404]     Test net output #1: loss = 4.00172 (* 1 = 4.00172 loss)\nI0817 20:22:01.022789 17318 solver.cpp:228] Iteration 6700, loss = 0.604294\nI0817 20:22:01.022835 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 20:22:01.022861 17318 solver.cpp:244]     Train net output #1: loss = 0.604294 (* 1 = 0.604294 loss)\nI0817 20:22:01.119349 17318 sgd_solver.cpp:166] Iteration 6700, lr = 1.005\nI0817 20:24:20.007716 17318 solver.cpp:337] Iteration 6800, Testing net (#0)\nI0817 20:25:41.327128 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42432\nI0817 20:25:41.327414 17318 solver.cpp:404]     Test net output #1: loss = 2.78506 (* 1 = 2.78506 loss)\nI0817 20:25:42.646551 17318 solver.cpp:228] Iteration 6800, loss = 0.542507\nI0817 20:25:42.646600 17318 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 20:25:42.646625 17318 solver.cpp:244]     Train net output #1: loss = 0.542507 (* 1 = 0.542507 loss)\nI0817 20:25:42.740468 17318 sgd_solver.cpp:166] Iteration 6800, lr = 1.02\nI0817 20:28:01.560931 17318 solver.cpp:337] Iteration 6900, Testing net (#0)\nI0817 20:29:22.863942 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4126\nI0817 20:29:22.864220 17318 solver.cpp:404]     Test net output #1: loss = 3.23479 (* 1 = 3.23479 loss)\nI0817 20:29:24.183081 17318 solver.cpp:228] Iteration 6900, loss = 0.620267\nI0817 20:29:24.183132 17318 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 20:29:24.183157 17318 solver.cpp:244]     Train net output #1: loss = 0.620267 (* 1 = 0.620267 loss)\nI0817 20:29:24.273232 17318 sgd_solver.cpp:166] Iteration 6900, lr = 1.035\nI0817 20:31:43.152990 17318 solver.cpp:337] Iteration 7000, Testing net (#0)\nI0817 20:33:04.424341 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43556\nI0817 20:33:04.424621 17318 solver.cpp:404]     Test net output #1: loss = 2.75631 (* 1 = 2.75631 loss)\nI0817 20:33:05.746718 17318 solver.cpp:228] Iteration 7000, loss = 0.538403\nI0817 20:33:05.746762 17318 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 20:33:05.746778 17318 solver.cpp:244]     Train net output #1: loss = 0.538403 (* 1 = 0.538403 loss)\nI0817 20:33:05.838471 17318 sgd_solver.cpp:166] Iteration 7000, lr = 1.05\nI0817 20:35:24.719115 17318 solver.cpp:337] Iteration 7100, Testing net (#0)\nI0817 20:36:45.977547 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44628\nI0817 20:36:45.977797 17318 solver.cpp:404]     Test net output #1: loss = 3.10283 (* 1 = 3.10283 loss)\nI0817 20:36:47.296890 17318 solver.cpp:228] Iteration 7100, loss = 0.757827\nI0817 20:36:47.296931 17318 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 20:36:47.296947 17318 solver.cpp:244]     Train net output #1: loss = 0.757827 (* 1 = 0.757827 loss)\nI0817 20:36:47.386871 17318 sgd_solver.cpp:166] Iteration 7100, lr = 1.065\nI0817 20:39:06.272259 17318 solver.cpp:337] Iteration 7200, Testing net (#0)\nI0817 20:40:27.527539 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41848\nI0817 20:40:27.527789 17318 solver.cpp:404]     Test net output #1: loss = 3.04564 (* 1 = 3.04564 loss)\nI0817 20:40:28.846923 17318 solver.cpp:228] Iteration 7200, loss = 0.583535\nI0817 20:40:28.846966 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 20:40:28.846982 17318 solver.cpp:244]     Train net output #1: loss = 0.583535 (* 1 = 0.583535 loss)\nI0817 20:40:28.939935 17318 sgd_solver.cpp:166] Iteration 7200, lr = 1.08\nI0817 20:42:47.819082 17318 solver.cpp:337] Iteration 7300, Testing net (#0)\nI0817 20:44:09.078784 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44652\nI0817 20:44:09.079046 17318 solver.cpp:404]     Test net output #1: loss = 2.65357 (* 1 = 2.65357 loss)\nI0817 20:44:10.397879 17318 solver.cpp:228] Iteration 7300, loss = 0.59511\nI0817 20:44:10.397922 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 20:44:10.397938 17318 solver.cpp:244]     Train net output #1: loss = 0.59511 (* 1 = 0.59511 loss)\nI0817 20:44:10.492399 17318 sgd_solver.cpp:166] Iteration 7300, lr = 1.095\nI0817 20:46:29.303791 17318 solver.cpp:337] Iteration 7400, Testing net (#0)\nI0817 20:47:50.564504 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44388\nI0817 20:47:50.564769 17318 solver.cpp:404]     Test net output #1: loss = 2.94337 (* 1 = 2.94337 loss)\nI0817 20:47:51.883930 17318 solver.cpp:228] Iteration 7400, loss = 0.609872\nI0817 20:47:51.883975 17318 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 20:47:51.883991 17318 solver.cpp:244]     Train net output #1: loss = 0.609872 (* 1 = 0.609872 loss)\nI0817 20:47:51.976997 17318 sgd_solver.cpp:166] Iteration 7400, lr = 1.11\nI0817 20:50:10.829187 17318 solver.cpp:337] Iteration 7500, Testing net (#0)\nI0817 20:51:32.092263 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42752\nI0817 20:51:32.092547 17318 solver.cpp:404]     Test net output #1: loss = 2.78579 (* 1 = 2.78579 loss)\nI0817 20:51:33.412058 17318 solver.cpp:228] Iteration 7500, loss = 0.630478\nI0817 20:51:33.412101 17318 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 20:51:33.412117 17318 solver.cpp:244]     Train net output #1: loss = 0.630478 (* 1 = 0.630478 loss)\nI0817 20:51:33.497402 17318 sgd_solver.cpp:166] Iteration 7500, lr = 1.125\nI0817 20:53:52.451941 17318 solver.cpp:337] Iteration 7600, Testing net (#0)\nI0817 20:55:13.613574 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43256\nI0817 20:55:13.613823 17318 solver.cpp:404]     Test net output #1: loss = 2.76685 (* 1 = 2.76685 loss)\nI0817 20:55:14.932925 17318 solver.cpp:228] Iteration 7600, loss = 0.675817\nI0817 20:55:14.932967 17318 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 20:55:14.932984 17318 solver.cpp:244]     Train net output #1: loss = 0.675817 (* 1 = 0.675817 loss)\nI0817 20:55:15.026345 17318 sgd_solver.cpp:166] Iteration 7600, lr = 1.14\nI0817 20:57:34.497421 17318 solver.cpp:337] Iteration 7700, Testing net (#0)\nI0817 20:58:56.510190 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4442\nI0817 20:58:56.510398 17318 solver.cpp:404]     Test net output #1: loss = 2.62367 (* 1 = 2.62367 loss)\nI0817 20:58:57.833169 17318 solver.cpp:228] Iteration 7700, loss = 0.577261\nI0817 20:58:57.833215 17318 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 20:58:57.833231 17318 solver.cpp:244]     Train net output #1: loss = 0.577261 (* 1 = 0.577261 loss)\nI0817 20:58:57.924973 17318 sgd_solver.cpp:166] Iteration 7700, lr = 1.155\nI0817 21:01:17.521761 17318 solver.cpp:337] Iteration 7800, Testing net (#0)\nI0817 21:02:39.631247 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42572\nI0817 21:02:39.631506 17318 solver.cpp:404]     Test net output #1: loss = 3.11128 (* 1 = 3.11128 loss)\nI0817 21:02:40.954080 17318 solver.cpp:228] Iteration 7800, loss = 0.63795\nI0817 21:02:40.954125 17318 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 21:02:40.954140 17318 solver.cpp:244]     Train net output #1: loss = 0.63795 (* 1 = 0.63795 loss)\nI0817 21:02:41.047646 17318 sgd_solver.cpp:166] Iteration 7800, lr = 1.17\nI0817 21:05:00.669077 17318 solver.cpp:337] Iteration 7900, Testing net (#0)\nI0817 21:06:22.805166 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42788\nI0817 21:06:22.805407 17318 solver.cpp:404]     Test net output #1: loss = 2.85823 (* 1 = 2.85823 loss)\nI0817 21:06:24.127665 17318 solver.cpp:228] Iteration 7900, loss = 0.67184\nI0817 21:06:24.127709 17318 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 21:06:24.127725 17318 solver.cpp:244]     Train net output #1: loss = 0.67184 (* 1 = 0.67184 loss)\nI0817 21:06:24.209342 17318 sgd_solver.cpp:166] Iteration 7900, lr = 1.185\nI0817 21:08:43.279551 17318 solver.cpp:337] Iteration 8000, Testing net (#0)\nI0817 21:10:05.394434 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44568\nI0817 21:10:05.394647 17318 solver.cpp:404]     Test net output #1: loss = 2.92554 (* 1 = 2.92554 loss)\nI0817 21:10:06.717044 17318 solver.cpp:228] Iteration 8000, loss = 0.53702\nI0817 21:10:06.717089 17318 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI0817 21:10:06.717105 17318 solver.cpp:244]     Train net output #1: loss = 0.53702 (* 1 = 0.53702 loss)\nI0817 21:10:06.795641 17318 sgd_solver.cpp:166] Iteration 8000, lr = 1.2\nI0817 21:12:25.872864 17318 solver.cpp:337] Iteration 8100, Testing net (#0)\nI0817 21:13:48.017568 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44036\nI0817 21:13:48.017783 17318 solver.cpp:404]     Test net output #1: loss = 3.15134 (* 1 = 3.15134 loss)\nI0817 21:13:49.341162 17318 solver.cpp:228] Iteration 8100, loss = 0.760541\nI0817 21:13:49.341205 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 21:13:49.341223 17318 solver.cpp:244]     Train net output #1: loss = 0.760541 (* 1 = 0.760541 loss)\nI0817 21:13:49.429875 17318 sgd_solver.cpp:166] Iteration 8100, lr = 1.215\nI0817 21:16:08.578073 17318 solver.cpp:337] Iteration 8200, Testing net (#0)\nI0817 21:17:30.705271 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44292\nI0817 21:17:30.705479 17318 solver.cpp:404]     Test net output #1: loss = 2.79644 (* 1 = 2.79644 loss)\nI0817 21:17:32.029115 17318 solver.cpp:228] Iteration 8200, loss = 0.608145\nI0817 21:17:32.029160 17318 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 21:17:32.029176 17318 solver.cpp:244]     Train net output #1: loss = 0.608145 (* 1 = 0.608145 loss)\nI0817 21:17:32.109926 17318 sgd_solver.cpp:166] Iteration 8200, lr = 1.23\nI0817 21:19:50.886132 17318 solver.cpp:337] Iteration 8300, Testing net (#0)\nI0817 21:21:12.049770 17318 solver.cpp:404]     Test net output #0: accuracy = 0.459\nI0817 21:21:12.050034 17318 solver.cpp:404]     Test net output #1: loss = 2.69796 (* 1 = 2.69796 loss)\nI0817 21:21:13.369374 17318 solver.cpp:228] Iteration 8300, loss = 0.715495\nI0817 21:21:13.369410 17318 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 21:21:13.369424 17318 solver.cpp:244]     Train net output #1: loss = 0.715495 (* 1 = 0.715495 loss)\nI0817 21:21:13.455313 17318 sgd_solver.cpp:166] Iteration 8300, lr = 1.245\nI0817 21:23:31.767149 17318 solver.cpp:337] Iteration 8400, Testing net (#0)\nI0817 21:24:52.921200 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40776\nI0817 21:24:52.921437 17318 solver.cpp:404]     Test net output #1: loss = 3.63058 (* 1 = 3.63058 loss)\nI0817 21:24:54.240309 17318 solver.cpp:228] Iteration 8400, loss = 0.697233\nI0817 21:24:54.240342 17318 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 21:24:54.240357 17318 solver.cpp:244]     Train net output #1: loss = 0.697233 (* 1 = 0.697233 loss)\nI0817 21:24:54.319839 17318 sgd_solver.cpp:166] Iteration 8400, lr = 1.26\nI0817 21:27:12.630126 17318 solver.cpp:337] Iteration 8500, Testing net (#0)\nI0817 21:28:33.902210 17318 solver.cpp:404]     Test net output #0: accuracy = 0.403\nI0817 21:28:33.902472 17318 solver.cpp:404]     Test net output #1: loss = 3.43042 (* 1 = 3.43042 loss)\nI0817 21:28:35.221717 17318 solver.cpp:228] Iteration 8500, loss = 0.682413\nI0817 21:28:35.221751 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 21:28:35.221766 17318 solver.cpp:244]     Train net output #1: loss = 0.682413 (* 1 = 0.682413 loss)\nI0817 21:28:35.310066 17318 sgd_solver.cpp:166] Iteration 8500, lr = 1.275\nI0817 21:30:53.734889 17318 solver.cpp:337] Iteration 8600, Testing net (#0)\nI0817 21:32:15.015486 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42108\nI0817 21:32:15.015769 17318 solver.cpp:404]     Test net output #1: loss = 3.11081 (* 1 = 3.11081 loss)\nI0817 21:32:16.335160 17318 solver.cpp:228] Iteration 8600, loss = 0.601196\nI0817 21:32:16.335196 17318 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI0817 21:32:16.335211 17318 solver.cpp:244]     Train net output #1: loss = 0.601196 (* 1 = 0.601196 loss)\nI0817 21:32:16.426688 17318 sgd_solver.cpp:166] Iteration 8600, lr = 1.29\nI0817 21:34:34.761504 17318 solver.cpp:337] Iteration 8700, Testing net (#0)\nI0817 21:35:56.037784 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41304\nI0817 21:35:56.038053 17318 solver.cpp:404]     Test net output #1: loss = 2.93304 (* 1 = 2.93304 loss)\nI0817 21:35:57.357404 17318 solver.cpp:228] Iteration 8700, loss = 0.580842\nI0817 21:35:57.357439 17318 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 21:35:57.357455 17318 solver.cpp:244]     Train net output #1: loss = 0.580842 (* 1 = 0.580842 loss)\nI0817 21:35:57.438654 17318 sgd_solver.cpp:166] Iteration 8700, lr = 1.305\nI0817 21:38:15.831403 17318 solver.cpp:337] Iteration 8800, Testing net (#0)\nI0817 21:39:37.113322 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4346\nI0817 21:39:37.113575 17318 solver.cpp:404]     Test net output #1: loss = 3.01992 (* 1 = 3.01992 loss)\nI0817 21:39:38.432891 17318 solver.cpp:228] Iteration 8800, loss = 0.568333\nI0817 21:39:38.432926 17318 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI0817 21:39:38.432942 17318 solver.cpp:244]     Train net output #1: loss = 0.568333 (* 1 = 0.568333 loss)\nI0817 21:39:38.515417 17318 sgd_solver.cpp:166] Iteration 8800, lr = 1.32\nI0817 21:41:56.869660 17318 solver.cpp:337] Iteration 8900, Testing net (#0)\nI0817 21:43:18.163457 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46828\nI0817 21:43:18.163723 17318 solver.cpp:404]     Test net output #1: loss = 2.81868 (* 1 = 2.81868 loss)\nI0817 21:43:19.483502 17318 solver.cpp:228] Iteration 8900, loss = 0.661329\nI0817 21:43:19.483536 17318 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 21:43:19.483551 17318 solver.cpp:244]     Train net output #1: loss = 0.661329 (* 1 = 0.661329 loss)\nI0817 21:43:19.569188 17318 sgd_solver.cpp:166] Iteration 8900, lr = 1.335\nI0817 21:45:37.902967 17318 solver.cpp:337] Iteration 9000, Testing net (#0)\nI0817 21:46:59.185348 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40144\nI0817 21:46:59.185626 17318 solver.cpp:404]     Test net output #1: loss = 3.36983 (* 1 = 3.36983 loss)\nI0817 21:47:00.505764 17318 solver.cpp:228] Iteration 9000, loss = 1.03199\nI0817 21:47:00.505801 17318 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 21:47:00.505825 17318 solver.cpp:244]     Train net output #1: loss = 1.03199 (* 1 = 1.03199 loss)\nI0817 21:47:00.597501 17318 sgd_solver.cpp:166] Iteration 9000, lr = 1.35\nI0817 21:49:18.944026 17318 solver.cpp:337] Iteration 9100, Testing net (#0)\nI0817 21:50:40.243595 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41264\nI0817 21:50:40.243876 17318 solver.cpp:404]     Test net output #1: loss = 3.16346 (* 1 = 3.16346 loss)\nI0817 21:50:41.564252 17318 solver.cpp:228] Iteration 9100, loss = 0.882418\nI0817 21:50:41.564290 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 21:50:41.564312 17318 solver.cpp:244]     Train net output #1: loss = 0.882418 (* 1 = 0.882418 loss)\nI0817 21:50:41.649801 17318 sgd_solver.cpp:166] Iteration 9100, lr = 1.365\nI0817 21:52:59.981735 17318 solver.cpp:337] Iteration 9200, Testing net (#0)\nI0817 21:54:21.284658 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4484\nI0817 21:54:21.284939 17318 solver.cpp:404]     Test net output #1: loss = 2.62403 (* 1 = 2.62403 loss)\nI0817 21:54:22.604913 17318 solver.cpp:228] Iteration 9200, loss = 0.677126\nI0817 21:54:22.604949 17318 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 21:54:22.604972 17318 solver.cpp:244]     Train net output #1: loss = 0.677126 (* 1 = 0.677126 loss)\nI0817 21:54:22.686906 17318 sgd_solver.cpp:166] Iteration 9200, lr = 1.38\nI0817 21:56:40.985335 17318 solver.cpp:337] Iteration 9300, Testing net (#0)\nI0817 21:58:02.257506 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44288\nI0817 21:58:02.257781 17318 solver.cpp:404]     Test net output #1: loss = 2.87497 (* 1 = 2.87497 loss)\nI0817 21:58:03.577764 17318 solver.cpp:228] Iteration 9300, loss = 0.851452\nI0817 21:58:03.577801 17318 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 21:58:03.577824 17318 solver.cpp:244]     Train net output #1: loss = 0.851452 (* 1 = 0.851452 loss)\nI0817 21:58:03.656956 17318 sgd_solver.cpp:166] Iteration 9300, lr = 1.395\nI0817 22:00:21.940171 17318 solver.cpp:337] Iteration 9400, Testing net (#0)\nI0817 22:01:43.246421 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41552\nI0817 22:01:43.246700 17318 solver.cpp:404]     Test net output #1: loss = 3.11416 (* 1 = 3.11416 loss)\nI0817 22:01:44.566843 17318 solver.cpp:228] Iteration 9400, loss = 0.798701\nI0817 22:01:44.566880 17318 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 22:01:44.566903 17318 solver.cpp:244]     Train net output #1: loss = 0.798701 (* 1 = 0.798701 loss)\nI0817 22:01:44.650943 17318 sgd_solver.cpp:166] Iteration 9400, lr = 1.41\nI0817 22:04:02.955655 17318 solver.cpp:337] Iteration 9500, Testing net (#0)\nI0817 22:05:24.228739 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43928\nI0817 22:05:24.229017 17318 solver.cpp:404]     Test net output #1: loss = 2.72436 (* 1 = 2.72436 loss)\nI0817 22:05:25.548216 17318 solver.cpp:228] Iteration 9500, loss = 0.631808\nI0817 22:05:25.548252 17318 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 22:05:25.548275 17318 solver.cpp:244]     Train net output #1: loss = 0.631808 (* 1 = 0.631808 loss)\nI0817 22:05:25.632469 17318 sgd_solver.cpp:166] Iteration 9500, lr = 1.425\nI0817 22:07:43.936723 17318 solver.cpp:337] Iteration 9600, Testing net (#0)\nI0817 22:09:05.259173 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44332\nI0817 22:09:05.259441 17318 solver.cpp:404]     Test net output #1: loss = 2.75252 (* 1 = 2.75252 loss)\nI0817 22:09:06.579457 17318 solver.cpp:228] Iteration 9600, loss = 0.602184\nI0817 22:09:06.579493 17318 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 22:09:06.579517 17318 solver.cpp:244]     Train net output #1: loss = 0.602184 (* 1 = 0.602184 loss)\nI0817 22:09:06.659482 17318 sgd_solver.cpp:166] Iteration 9600, lr = 1.44\nI0817 22:11:25.048699 17318 solver.cpp:337] Iteration 9700, Testing net (#0)\nI0817 22:12:46.343047 17318 solver.cpp:404]     Test net output #0: accuracy = 0.35892\nI0817 22:12:46.343339 17318 solver.cpp:404]     Test net output #1: loss = 3.83007 (* 1 = 3.83007 loss)\nI0817 22:12:47.663141 17318 solver.cpp:228] Iteration 9700, loss = 0.597125\nI0817 22:12:47.663177 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0817 22:12:47.663200 17318 solver.cpp:244]     Train net output #1: loss = 0.597125 (* 1 = 0.597125 loss)\nI0817 22:12:47.744719 17318 sgd_solver.cpp:166] Iteration 9700, lr = 1.455\nI0817 22:15:06.144476 17318 solver.cpp:337] Iteration 9800, Testing net (#0)\nI0817 22:16:27.484199 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44268\nI0817 22:16:27.484468 17318 solver.cpp:404]     Test net output #1: loss = 2.86546 (* 1 = 2.86546 loss)\nI0817 22:16:28.803534 17318 solver.cpp:228] Iteration 9800, loss = 0.72132\nI0817 22:16:28.803567 17318 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0817 22:16:28.803591 17318 solver.cpp:244]     Train net output #1: loss = 0.72132 (* 1 = 0.72132 loss)\nI0817 22:16:28.892071 17318 sgd_solver.cpp:166] Iteration 9800, lr = 1.47\nI0817 22:18:47.258440 17318 solver.cpp:337] Iteration 9900, Testing net (#0)\nI0817 22:20:08.604370 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4636\nI0817 22:20:08.604655 17318 solver.cpp:404]     Test net output #1: loss = 2.6779 (* 1 = 2.6779 loss)\nI0817 22:20:09.924312 17318 solver.cpp:228] Iteration 9900, loss = 0.678092\nI0817 22:20:09.924350 17318 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI0817 22:20:09.924372 17318 solver.cpp:244]     Train net output #1: loss = 0.678092 (* 1 = 0.678092 loss)\nI0817 22:20:10.011173 17318 sgd_solver.cpp:166] Iteration 9900, lr = 1.485\nI0817 22:22:28.359761 17318 solver.cpp:337] Iteration 10000, Testing net (#0)\nI0817 22:23:49.676618 17318 solver.cpp:404]     Test net output #0: accuracy = 0.29532\nI0817 22:23:49.676873 17318 solver.cpp:404]     Test net output #1: loss = 5.22076 (* 1 = 5.22076 loss)\nI0817 22:23:50.999385 17318 solver.cpp:228] Iteration 10000, loss = 0.705596\nI0817 22:23:50.999420 17318 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 22:23:50.999436 17318 solver.cpp:244]     Train net output #1: loss = 0.705596 (* 1 = 0.705596 loss)\nI0817 22:23:51.088238 17318 sgd_solver.cpp:166] Iteration 10000, lr = 1.5\nI0817 22:26:09.500067 17318 solver.cpp:337] Iteration 10100, Testing net (#0)\nI0817 22:27:30.858696 17318 solver.cpp:404]     Test net output #0: accuracy = 0.49272\nI0817 22:27:30.858974 17318 solver.cpp:404]     Test net output #1: loss = 2.35838 (* 1 = 2.35838 loss)\nI0817 22:27:32.179041 17318 solver.cpp:228] Iteration 10100, loss = 0.7995\nI0817 22:27:32.179075 17318 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 22:27:32.179090 17318 solver.cpp:244]     Train net output #1: loss = 0.7995 (* 1 = 0.7995 loss)\nI0817 22:27:32.258906 17318 sgd_solver.cpp:166] Iteration 10100, lr = 1.515\nI0817 22:29:50.646616 17318 solver.cpp:337] Iteration 10200, Testing net (#0)\nI0817 22:31:11.941206 17318 solver.cpp:404]     Test net output #0: accuracy = 0.35328\nI0817 22:31:11.941493 17318 solver.cpp:404]     Test net output #1: loss = 4.00744 (* 1 = 4.00744 loss)\nI0817 22:31:13.261217 17318 solver.cpp:228] Iteration 10200, loss = 0.863812\nI0817 22:31:13.261250 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 22:31:13.261266 17318 solver.cpp:244]     Train net output #1: loss = 0.863812 (* 1 = 0.863812 loss)\nI0817 22:31:13.348333 17318 sgd_solver.cpp:166] Iteration 10200, lr = 1.53\nI0817 22:33:31.695258 17318 solver.cpp:337] Iteration 10300, Testing net (#0)\nI0817 22:34:52.974886 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41724\nI0817 22:34:52.975172 17318 solver.cpp:404]     Test net output #1: loss = 3.31115 (* 1 = 3.31115 loss)\nI0817 22:34:54.295101 17318 solver.cpp:228] Iteration 10300, loss = 0.570697\nI0817 22:34:54.295138 17318 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI0817 22:34:54.295155 17318 solver.cpp:244]     Train net output #1: loss = 0.570697 (* 1 = 0.570697 loss)\nI0817 22:34:54.379032 17318 sgd_solver.cpp:166] Iteration 10300, lr = 1.545\nI0817 22:37:12.748190 17318 solver.cpp:337] Iteration 10400, Testing net (#0)\nI0817 22:38:34.108855 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4582\nI0817 22:38:34.109144 17318 solver.cpp:404]     Test net output #1: loss = 2.58109 (* 1 = 2.58109 loss)\nI0817 22:38:35.429649 17318 solver.cpp:228] Iteration 10400, loss = 0.946124\nI0817 22:38:35.429682 17318 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0817 22:38:35.429697 17318 solver.cpp:244]     Train net output #1: loss = 0.946124 (* 1 = 0.946124 loss)\nI0817 22:38:35.513042 17318 sgd_solver.cpp:166] Iteration 10400, lr = 1.56\nI0817 22:40:53.881953 17318 solver.cpp:337] Iteration 10500, Testing net (#0)\nI0817 22:42:15.175278 17318 solver.cpp:404]     Test net output #0: accuracy = 0.3688\nI0817 22:42:15.175562 17318 solver.cpp:404]     Test net output #1: loss = 4.09282 (* 1 = 4.09282 loss)\nI0817 22:42:16.496141 17318 solver.cpp:228] Iteration 10500, loss = 0.871368\nI0817 22:42:16.496175 17318 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 22:42:16.496191 17318 solver.cpp:244]     Train net output #1: loss = 0.871368 (* 1 = 0.871368 loss)\nI0817 22:42:16.587931 17318 sgd_solver.cpp:166] Iteration 10500, lr = 1.575\nI0817 22:44:35.018600 17318 solver.cpp:337] Iteration 10600, Testing net (#0)\nI0817 22:45:56.326700 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42168\nI0817 22:45:56.326966 17318 solver.cpp:404]     Test net output #1: loss = 2.80663 (* 1 = 2.80663 loss)\nI0817 22:45:57.645992 17318 solver.cpp:228] Iteration 10600, loss = 0.802675\nI0817 22:45:57.646028 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 22:45:57.646044 17318 solver.cpp:244]     Train net output #1: loss = 0.802675 (* 1 = 0.802675 loss)\nI0817 22:45:57.729205 17318 sgd_solver.cpp:166] Iteration 10600, lr = 1.59\nI0817 22:48:16.068900 17318 solver.cpp:337] Iteration 10700, Testing net (#0)\nI0817 22:49:37.356577 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44136\nI0817 22:49:37.356842 17318 solver.cpp:404]     Test net output #1: loss = 3.03327 (* 1 = 3.03327 loss)\nI0817 22:49:38.676841 17318 solver.cpp:228] Iteration 10700, loss = 0.819304\nI0817 22:49:38.676880 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 22:49:38.676897 17318 solver.cpp:244]     Train net output #1: loss = 0.819304 (* 1 = 0.819304 loss)\nI0817 22:49:38.761132 17318 sgd_solver.cpp:166] Iteration 10700, lr = 1.605\nI0817 22:51:57.119510 17318 solver.cpp:337] Iteration 10800, Testing net (#0)\nI0817 22:53:18.392619 17318 solver.cpp:404]     Test net output #0: accuracy = 0.47064\nI0817 22:53:18.392887 17318 solver.cpp:404]     Test net output #1: loss = 2.48194 (* 1 = 2.48194 loss)\nI0817 22:53:19.712843 17318 solver.cpp:228] Iteration 10800, loss = 0.764516\nI0817 22:53:19.712882 17318 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 22:53:19.712898 17318 solver.cpp:244]     Train net output #1: loss = 0.764516 (* 1 = 0.764516 loss)\nI0817 22:53:19.799075 17318 sgd_solver.cpp:166] Iteration 10800, lr = 1.62\nI0817 22:55:38.233363 17318 solver.cpp:337] Iteration 10900, Testing net (#0)\nI0817 22:56:59.504436 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44988\nI0817 22:56:59.504707 17318 solver.cpp:404]     Test net output #1: loss = 2.77434 (* 1 = 2.77434 loss)\nI0817 22:57:00.824530 17318 solver.cpp:228] Iteration 10900, loss = 0.883373\nI0817 22:57:00.824564 17318 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0817 22:57:00.824580 17318 solver.cpp:244]     Train net output #1: loss = 0.883373 (* 1 = 0.883373 loss)\nI0817 22:57:00.908884 17318 sgd_solver.cpp:166] Iteration 10900, lr = 1.635\nI0817 22:59:19.283391 17318 solver.cpp:337] Iteration 11000, Testing net (#0)\nI0817 23:00:40.544633 17318 solver.cpp:404]     Test net output #0: accuracy = 0.38196\nI0817 23:00:40.544900 17318 solver.cpp:404]     Test net output #1: loss = 3.28802 (* 1 = 3.28802 loss)\nI0817 23:00:41.864051 17318 solver.cpp:228] Iteration 11000, loss = 0.937575\nI0817 23:00:41.864086 17318 solver.cpp:244]     Train net output #0: accuracy = 0.784\nI0817 23:00:41.864102 17318 solver.cpp:244]     Train net output #1: loss = 0.937575 (* 1 = 0.937575 loss)\nI0817 23:00:41.946974 17318 sgd_solver.cpp:166] Iteration 11000, lr = 1.65\nI0817 23:03:00.303241 17318 solver.cpp:337] Iteration 11100, Testing net (#0)\nI0817 23:04:21.567317 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40884\nI0817 23:04:21.567592 17318 solver.cpp:404]     Test net output #1: loss = 3.20885 (* 1 = 3.20885 loss)\nI0817 23:04:22.886620 17318 solver.cpp:228] Iteration 11100, loss = 0.756844\nI0817 23:04:22.886654 17318 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 23:04:22.886670 17318 solver.cpp:244]     Train net output #1: loss = 0.756844 (* 1 = 0.756844 loss)\nI0817 23:04:22.973534 17318 sgd_solver.cpp:166] Iteration 11100, lr = 1.665\nI0817 23:06:41.320546 17318 solver.cpp:337] Iteration 11200, Testing net (#0)\nI0817 23:08:02.588917 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45748\nI0817 23:08:02.589203 17318 solver.cpp:404]     Test net output #1: loss = 2.41472 (* 1 = 2.41472 loss)\nI0817 23:08:03.909287 17318 solver.cpp:228] Iteration 11200, loss = 0.72778\nI0817 23:08:03.909323 17318 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 23:08:03.909338 17318 solver.cpp:244]     Train net output #1: loss = 0.72778 (* 1 = 0.72778 loss)\nI0817 23:08:03.989276 17318 sgd_solver.cpp:166] Iteration 11200, lr = 1.68\nI0817 23:10:22.311733 17318 solver.cpp:337] Iteration 11300, Testing net (#0)\nI0817 23:11:43.477787 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41492\nI0817 23:11:43.478052 17318 solver.cpp:404]     Test net output #1: loss = 3.1475 (* 1 = 3.1475 loss)\nI0817 23:11:44.797236 17318 solver.cpp:228] Iteration 11300, loss = 0.891209\nI0817 23:11:44.797271 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 23:11:44.797286 17318 solver.cpp:244]     Train net output #1: loss = 0.891209 (* 1 = 0.891209 loss)\nI0817 23:11:44.881669 17318 sgd_solver.cpp:166] Iteration 11300, lr = 1.695\nI0817 23:14:03.250605 17318 solver.cpp:337] Iteration 11400, Testing net (#0)\nI0817 23:15:24.416147 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43\nI0817 23:15:24.416421 17318 solver.cpp:404]     Test net output #1: loss = 2.8528 (* 1 = 2.8528 loss)\nI0817 23:15:25.736651 17318 solver.cpp:228] Iteration 11400, loss = 0.934865\nI0817 23:15:25.736685 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 23:15:25.736701 17318 solver.cpp:244]     Train net output #1: loss = 0.934865 (* 1 = 0.934865 loss)\nI0817 23:15:25.818186 17318 sgd_solver.cpp:166] Iteration 11400, lr = 1.71\nI0817 23:17:44.162631 17318 solver.cpp:337] Iteration 11500, Testing net (#0)\nI0817 23:19:05.331841 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46284\nI0817 23:19:05.332123 17318 solver.cpp:404]     Test net output #1: loss = 2.45766 (* 1 = 2.45766 loss)\nI0817 23:19:06.651391 17318 solver.cpp:228] Iteration 11500, loss = 0.786937\nI0817 23:19:06.651427 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 23:19:06.651443 17318 solver.cpp:244]     Train net output #1: loss = 0.786937 (* 1 = 0.786937 loss)\nI0817 23:19:06.736903 17318 sgd_solver.cpp:166] Iteration 11500, lr = 1.725\nI0817 23:21:25.101951 17318 solver.cpp:337] Iteration 11600, Testing net (#0)\nI0817 23:22:46.275377 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40864\nI0817 23:22:46.275640 17318 solver.cpp:404]     Test net output #1: loss = 3.33696 (* 1 = 3.33696 loss)\nI0817 23:22:47.595546 17318 solver.cpp:228] Iteration 11600, loss = 0.817313\nI0817 23:22:47.595582 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 23:22:47.595598 17318 solver.cpp:244]     Train net output #1: loss = 0.817313 (* 1 = 0.817313 loss)\nI0817 23:22:47.683923 17318 sgd_solver.cpp:166] Iteration 11600, lr = 1.74\nI0817 23:25:06.056696 17318 solver.cpp:337] Iteration 11700, Testing net (#0)\nI0817 23:26:27.227695 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43304\nI0817 23:26:27.227969 17318 solver.cpp:404]     Test net output #1: loss = 2.74291 (* 1 = 2.74291 loss)\nI0817 23:26:28.548239 17318 solver.cpp:228] Iteration 11700, loss = 0.812999\nI0817 23:26:28.548272 17318 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0817 23:26:28.548287 17318 solver.cpp:244]     Train net output #1: loss = 0.812999 (* 1 = 0.812999 loss)\nI0817 23:26:28.631920 17318 sgd_solver.cpp:166] Iteration 11700, lr = 1.755\nI0817 23:28:46.956115 17318 solver.cpp:337] Iteration 11800, Testing net (#0)\nI0817 23:30:08.122225 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45012\nI0817 23:30:08.122504 17318 solver.cpp:404]     Test net output #1: loss = 2.51154 (* 1 = 2.51154 loss)\nI0817 23:30:09.442797 17318 solver.cpp:228] Iteration 11800, loss = 0.775842\nI0817 23:30:09.442831 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0817 23:30:09.442847 17318 solver.cpp:244]     Train net output #1: loss = 0.775842 (* 1 = 0.775842 loss)\nI0817 23:30:09.523442 17318 sgd_solver.cpp:166] Iteration 11800, lr = 1.77\nI0817 23:32:27.814402 17318 solver.cpp:337] Iteration 11900, Testing net (#0)\nI0817 23:33:48.976549 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41828\nI0817 23:33:48.976828 17318 solver.cpp:404]     Test net output #1: loss = 2.81519 (* 1 = 2.81519 loss)\nI0817 23:33:50.296854 17318 solver.cpp:228] Iteration 11900, loss = 0.73705\nI0817 23:33:50.296888 17318 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI0817 23:33:50.296903 17318 solver.cpp:244]     Train net output #1: loss = 0.73705 (* 1 = 0.73705 loss)\nI0817 23:33:50.378355 17318 sgd_solver.cpp:166] Iteration 11900, lr = 1.785\nI0817 23:36:08.689026 17318 solver.cpp:337] Iteration 12000, Testing net (#0)\nI0817 23:37:29.861917 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40576\nI0817 23:37:29.862200 17318 solver.cpp:404]     Test net output #1: loss = 2.96156 (* 1 = 2.96156 loss)\nI0817 23:37:31.182368 17318 solver.cpp:228] Iteration 12000, loss = 0.731816\nI0817 23:37:31.182404 17318 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI0817 23:37:31.182427 17318 solver.cpp:244]     Train net output #1: loss = 0.731816 (* 1 = 0.731816 loss)\nI0817 23:37:31.274075 17318 sgd_solver.cpp:166] Iteration 12000, lr = 1.8\nI0817 23:39:49.637040 17318 solver.cpp:337] Iteration 12100, Testing net (#0)\nI0817 23:41:10.807235 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4578\nI0817 23:41:10.807525 17318 solver.cpp:404]     Test net output #1: loss = 2.33367 (* 1 = 2.33367 loss)\nI0817 23:41:12.126967 17318 solver.cpp:228] Iteration 12100, loss = 0.820781\nI0817 23:41:12.127004 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 23:41:12.127027 17318 solver.cpp:244]     Train net output #1: loss = 0.820781 (* 1 = 0.820781 loss)\nI0817 23:41:12.215615 17318 sgd_solver.cpp:166] Iteration 12100, lr = 1.815\nI0817 23:43:30.591112 17318 solver.cpp:337] Iteration 12200, Testing net (#0)\nI0817 23:44:51.869036 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4154\nI0817 23:44:51.869295 17318 solver.cpp:404]     Test net output #1: loss = 2.99879 (* 1 = 2.99879 loss)\nI0817 23:44:53.188611 17318 solver.cpp:228] Iteration 12200, loss = 1.01388\nI0817 23:44:53.188642 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 23:44:53.188657 17318 solver.cpp:244]     Train net output #1: loss = 1.01388 (* 1 = 1.01388 loss)\nI0817 23:44:53.272281 17318 sgd_solver.cpp:166] Iteration 12200, lr = 1.83\nI0817 23:47:11.614392 17318 solver.cpp:337] Iteration 12300, Testing net (#0)\nI0817 23:48:32.909688 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4256\nI0817 23:48:32.909961 17318 solver.cpp:404]     Test net output #1: loss = 2.57695 (* 1 = 2.57695 loss)\nI0817 23:48:34.230624 17318 solver.cpp:228] Iteration 12300, loss = 0.967691\nI0817 23:48:34.230661 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0817 23:48:34.230684 17318 solver.cpp:244]     Train net output #1: loss = 0.967691 (* 1 = 0.967691 loss)\nI0817 23:48:34.313519 17318 sgd_solver.cpp:166] Iteration 12300, lr = 1.845\nI0817 23:50:52.657773 17318 solver.cpp:337] Iteration 12400, Testing net (#0)\nI0817 23:52:13.941094 17318 solver.cpp:404]     Test net output #0: accuracy = 0.39416\nI0817 23:52:13.941359 17318 solver.cpp:404]     Test net output #1: loss = 3.00196 (* 1 = 3.00196 loss)\nI0817 23:52:15.262325 17318 solver.cpp:228] Iteration 12400, loss = 0.948827\nI0817 23:52:15.262361 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0817 23:52:15.262382 17318 solver.cpp:244]     Train net output #1: loss = 0.948827 (* 1 = 0.948827 loss)\nI0817 23:52:15.345774 17318 sgd_solver.cpp:166] Iteration 12400, lr = 1.86\nI0817 23:54:33.686223 17318 solver.cpp:337] Iteration 12500, Testing net (#0)\nI0817 23:55:54.950641 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4372\nI0817 23:55:54.950911 17318 solver.cpp:404]     Test net output #1: loss = 2.70808 (* 1 = 2.70808 loss)\nI0817 23:55:56.271410 17318 solver.cpp:228] Iteration 12500, loss = 0.955375\nI0817 23:55:56.271446 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0817 23:55:56.271469 17318 solver.cpp:244]     Train net output #1: loss = 0.955375 (* 1 = 0.955375 loss)\nI0817 23:55:56.360739 17318 sgd_solver.cpp:166] Iteration 12500, lr = 1.875\nI0817 23:58:14.704038 17318 solver.cpp:337] Iteration 12600, Testing net (#0)\nI0817 23:59:35.972592 17318 solver.cpp:404]     Test net output #0: accuracy = 0.29416\nI0817 23:59:35.972864 17318 solver.cpp:404]     Test net output #1: loss = 4.83856 (* 1 = 4.83856 loss)\nI0817 23:59:37.292017 17318 solver.cpp:228] Iteration 12600, loss = 0.827689\nI0817 23:59:37.292050 17318 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0817 23:59:37.292065 17318 solver.cpp:244]     Train net output #1: loss = 0.827689 (* 1 = 0.827689 loss)\nI0817 23:59:37.370379 17318 sgd_solver.cpp:166] Iteration 12600, lr = 1.89\nI0818 00:01:55.709441 17318 solver.cpp:337] Iteration 12700, Testing net (#0)\nI0818 00:03:16.978622 17318 solver.cpp:404]     Test net output #0: accuracy = 0.38128\nI0818 00:03:16.978893 17318 solver.cpp:404]     Test net output #1: loss = 3.30402 (* 1 = 3.30402 loss)\nI0818 00:03:18.298169 17318 solver.cpp:228] Iteration 12700, loss = 0.84478\nI0818 00:03:18.298202 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0818 00:03:18.298218 17318 solver.cpp:244]     Train net output #1: loss = 0.84478 (* 1 = 0.84478 loss)\nI0818 00:03:18.386941 17318 sgd_solver.cpp:166] Iteration 12700, lr = 1.905\nI0818 00:05:36.775696 17318 solver.cpp:337] Iteration 12800, Testing net (#0)\nI0818 00:06:58.044275 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41496\nI0818 00:06:58.044528 17318 solver.cpp:404]     Test net output #1: loss = 3.37329 (* 1 = 3.37329 loss)\nI0818 00:06:59.363365 17318 solver.cpp:228] Iteration 12800, loss = 0.781015\nI0818 00:06:59.363397 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0818 00:06:59.363412 17318 solver.cpp:244]     Train net output #1: loss = 0.781015 (* 1 = 0.781015 loss)\nI0818 00:06:59.451903 17318 sgd_solver.cpp:166] Iteration 12800, lr = 1.92\nI0818 00:09:17.849767 17318 solver.cpp:337] Iteration 12900, Testing net (#0)\nI0818 00:10:39.117568 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44404\nI0818 00:10:39.117826 17318 solver.cpp:404]     Test net output #1: loss = 2.74356 (* 1 = 2.74356 loss)\nI0818 00:10:40.436883 17318 solver.cpp:228] Iteration 12900, loss = 0.867392\nI0818 00:10:40.436916 17318 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0818 00:10:40.436931 17318 solver.cpp:244]     Train net output #1: loss = 0.867392 (* 1 = 0.867392 loss)\nI0818 00:10:40.524044 17318 sgd_solver.cpp:166] Iteration 12900, lr = 1.935\nI0818 00:12:58.834146 17318 solver.cpp:337] Iteration 13000, Testing net (#0)\nI0818 00:14:20.092382 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43892\nI0818 00:14:20.092644 17318 solver.cpp:404]     Test net output #1: loss = 2.5223 (* 1 = 2.5223 loss)\nI0818 00:14:21.411710 17318 solver.cpp:228] Iteration 13000, loss = 1.0831\nI0818 00:14:21.411743 17318 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0818 00:14:21.411759 17318 solver.cpp:244]     Train net output #1: loss = 1.0831 (* 1 = 1.0831 loss)\nI0818 00:14:21.496639 17318 sgd_solver.cpp:166] Iteration 13000, lr = 1.95\nI0818 00:16:39.835438 17318 solver.cpp:337] Iteration 13100, Testing net (#0)\nI0818 00:18:01.104297 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42524\nI0818 00:18:01.104570 17318 solver.cpp:404]     Test net output #1: loss = 2.74535 (* 1 = 2.74535 loss)\nI0818 00:18:02.423415 17318 solver.cpp:228] Iteration 13100, loss = 0.883186\nI0818 00:18:02.423449 17318 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0818 00:18:02.423465 17318 solver.cpp:244]     Train net output #1: loss = 0.883186 (* 1 = 0.883186 loss)\nI0818 00:18:02.510113 17318 sgd_solver.cpp:166] Iteration 13100, lr = 1.965\nI0818 00:20:20.840872 17318 solver.cpp:337] Iteration 13200, Testing net (#0)\nI0818 00:21:42.097270 17318 solver.cpp:404]     Test net output #0: accuracy = 0.38956\nI0818 00:21:42.097548 17318 solver.cpp:404]     Test net output #1: loss = 3.09253 (* 1 = 3.09253 loss)\nI0818 00:21:43.416538 17318 solver.cpp:228] Iteration 13200, loss = 0.964635\nI0818 00:21:43.416571 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 00:21:43.416586 17318 solver.cpp:244]     Train net output #1: loss = 0.964635 (* 1 = 0.964635 loss)\nI0818 00:21:43.501580 17318 sgd_solver.cpp:166] Iteration 13200, lr = 1.98\nI0818 00:24:01.901067 17318 solver.cpp:337] Iteration 13300, Testing net (#0)\nI0818 00:25:23.160758 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4724\nI0818 00:25:23.161027 17318 solver.cpp:404]     Test net output #1: loss = 2.36928 (* 1 = 2.36928 loss)\nI0818 00:25:24.479988 17318 solver.cpp:228] Iteration 13300, loss = 0.821903\nI0818 00:25:24.480022 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0818 00:25:24.480038 17318 solver.cpp:244]     Train net output #1: loss = 0.821903 (* 1 = 0.821903 loss)\nI0818 00:25:24.566454 17318 sgd_solver.cpp:166] Iteration 13300, lr = 1.995\nI0818 00:27:42.937870 17318 solver.cpp:337] Iteration 13400, Testing net (#0)\nI0818 00:29:04.200263 17318 solver.cpp:404]     Test net output #0: accuracy = 0.39384\nI0818 00:29:04.200527 17318 solver.cpp:404]     Test net output #1: loss = 3.34282 (* 1 = 3.34282 loss)\nI0818 00:29:05.519714 17318 solver.cpp:228] Iteration 13400, loss = 0.855395\nI0818 00:29:05.519747 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 00:29:05.519762 17318 solver.cpp:244]     Train net output #1: loss = 0.855395 (* 1 = 0.855395 loss)\nI0818 00:29:05.605921 17318 sgd_solver.cpp:166] Iteration 13400, lr = 2.01\nI0818 00:31:23.992950 17318 solver.cpp:337] Iteration 13500, Testing net (#0)\nI0818 00:32:45.249287 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40716\nI0818 00:32:45.249565 17318 solver.cpp:404]     Test net output #1: loss = 2.85311 (* 1 = 2.85311 loss)\nI0818 00:32:46.568624 17318 solver.cpp:228] Iteration 13500, loss = 0.99332\nI0818 00:32:46.568656 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 00:32:46.568671 17318 solver.cpp:244]     Train net output #1: loss = 0.99332 (* 1 = 0.99332 loss)\nI0818 00:32:46.648066 17318 sgd_solver.cpp:166] Iteration 13500, lr = 2.025\nI0818 00:35:05.002866 17318 solver.cpp:337] Iteration 13600, Testing net (#0)\nI0818 00:36:26.260499 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42008\nI0818 00:36:26.260776 17318 solver.cpp:404]     Test net output #1: loss = 2.75175 (* 1 = 2.75175 loss)\nI0818 00:36:27.579790 17318 solver.cpp:228] Iteration 13600, loss = 0.652778\nI0818 00:36:27.579825 17318 solver.cpp:244]     Train net output #0: accuracy = 0.8\nI0818 00:36:27.579841 17318 solver.cpp:244]     Train net output #1: loss = 0.652778 (* 1 = 0.652778 loss)\nI0818 00:36:27.667847 17318 sgd_solver.cpp:166] Iteration 13600, lr = 2.04\nI0818 00:38:45.949287 17318 solver.cpp:337] Iteration 13700, Testing net (#0)\nI0818 00:40:07.205243 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43788\nI0818 00:40:07.205499 17318 solver.cpp:404]     Test net output #1: loss = 2.54375 (* 1 = 2.54375 loss)\nI0818 00:40:08.524502 17318 solver.cpp:228] Iteration 13700, loss = 0.965605\nI0818 00:40:08.524543 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 00:40:08.524559 17318 solver.cpp:244]     Train net output #1: loss = 0.965605 (* 1 = 0.965605 loss)\nI0818 00:40:08.610656 17318 sgd_solver.cpp:166] Iteration 13700, lr = 2.055\nI0818 00:42:26.924023 17318 solver.cpp:337] Iteration 13800, Testing net (#0)\nI0818 00:43:48.473047 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44648\nI0818 00:43:48.473291 17318 solver.cpp:404]     Test net output #1: loss = 2.4769 (* 1 = 2.4769 loss)\nI0818 00:43:49.795979 17318 solver.cpp:228] Iteration 13800, loss = 0.646603\nI0818 00:43:49.796028 17318 solver.cpp:244]     Train net output #0: accuracy = 0.808\nI0818 00:43:49.796061 17318 solver.cpp:244]     Train net output #1: loss = 0.646603 (* 1 = 0.646603 loss)\nI0818 00:43:49.877707 17318 sgd_solver.cpp:166] Iteration 13800, lr = 2.07\nI0818 00:46:08.241340 17318 solver.cpp:337] Iteration 13900, Testing net (#0)\nI0818 00:47:29.752279 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42712\nI0818 00:47:29.752518 17318 solver.cpp:404]     Test net output #1: loss = 2.73078 (* 1 = 2.73078 loss)\nI0818 00:47:31.075150 17318 solver.cpp:228] Iteration 13900, loss = 0.895928\nI0818 00:47:31.075192 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 00:47:31.075208 17318 solver.cpp:244]     Train net output #1: loss = 0.895928 (* 1 = 0.895928 loss)\nI0818 00:47:31.156354 17318 sgd_solver.cpp:166] Iteration 13900, lr = 2.085\nI0818 00:49:49.557412 17318 solver.cpp:337] Iteration 14000, Testing net (#0)\nI0818 00:51:11.082473 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40408\nI0818 00:51:11.082706 17318 solver.cpp:404]     Test net output #1: loss = 2.95212 (* 1 = 2.95212 loss)\nI0818 00:51:12.404731 17318 solver.cpp:228] Iteration 14000, loss = 0.817879\nI0818 00:51:12.404781 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0818 00:51:12.404798 17318 solver.cpp:244]     Train net output #1: loss = 0.817879 (* 1 = 0.817879 loss)\nI0818 00:51:12.490970 17318 sgd_solver.cpp:166] Iteration 14000, lr = 2.1\nI0818 00:53:30.934244 17318 solver.cpp:337] Iteration 14100, Testing net (#0)\nI0818 00:54:52.650395 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43008\nI0818 00:54:52.650605 17318 solver.cpp:404]     Test net output #1: loss = 2.60123 (* 1 = 2.60123 loss)\nI0818 00:54:53.973482 17318 solver.cpp:228] Iteration 14100, loss = 0.785592\nI0818 00:54:53.973536 17318 solver.cpp:244]     Train net output #0: accuracy = 0.792\nI0818 00:54:53.973552 17318 solver.cpp:244]     Train net output #1: loss = 0.785592 (* 1 = 0.785592 loss)\nI0818 00:54:54.052567 17318 sgd_solver.cpp:166] Iteration 14100, lr = 2.115\nI0818 00:57:12.502939 17318 solver.cpp:337] Iteration 14200, Testing net (#0)\nI0818 00:58:34.315387 17318 solver.cpp:404]     Test net output #0: accuracy = 0.47896\nI0818 00:58:34.315610 17318 solver.cpp:404]     Test net output #1: loss = 2.31655 (* 1 = 2.31655 loss)\nI0818 00:58:35.637895 17318 solver.cpp:228] Iteration 14200, loss = 0.833567\nI0818 00:58:35.637948 17318 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0818 00:58:35.637966 17318 solver.cpp:244]     Train net output #1: loss = 0.833567 (* 1 = 0.833567 loss)\nI0818 00:58:35.719679 17318 sgd_solver.cpp:166] Iteration 14200, lr = 2.13\nI0818 01:00:54.224144 17318 solver.cpp:337] Iteration 14300, Testing net (#0)\nI0818 01:02:16.078635 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41788\nI0818 01:02:16.078910 17318 solver.cpp:404]     Test net output #1: loss = 2.6895 (* 1 = 2.6895 loss)\nI0818 01:02:17.401378 17318 solver.cpp:228] Iteration 14300, loss = 0.848524\nI0818 01:02:17.401432 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 01:02:17.401450 17318 solver.cpp:244]     Train net output #1: loss = 0.848524 (* 1 = 0.848524 loss)\nI0818 01:02:17.482101 17318 sgd_solver.cpp:166] Iteration 14300, lr = 2.145\nI0818 01:04:35.989644 17318 solver.cpp:337] Iteration 14400, Testing net (#0)\nI0818 01:05:58.096304 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4006\nI0818 01:05:58.096515 17318 solver.cpp:404]     Test net output #1: loss = 2.83125 (* 1 = 2.83125 loss)\nI0818 01:05:59.419586 17318 solver.cpp:228] Iteration 14400, loss = 1.00671\nI0818 01:05:59.419637 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 01:05:59.419656 17318 solver.cpp:244]     Train net output #1: loss = 1.00671 (* 1 = 1.00671 loss)\nI0818 01:05:59.497356 17318 sgd_solver.cpp:166] Iteration 14400, lr = 2.16\nI0818 01:08:17.978704 17318 solver.cpp:337] Iteration 14500, Testing net (#0)\nI0818 01:09:40.092190 17318 solver.cpp:404]     Test net output #0: accuracy = 0.49192\nI0818 01:09:40.092413 17318 solver.cpp:404]     Test net output #1: loss = 2.11274 (* 1 = 2.11274 loss)\nI0818 01:09:41.415129 17318 solver.cpp:228] Iteration 14500, loss = 0.931773\nI0818 01:09:41.415184 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0818 01:09:41.415200 17318 solver.cpp:244]     Train net output #1: loss = 0.931773 (* 1 = 0.931773 loss)\nI0818 01:09:41.494632 17318 sgd_solver.cpp:166] Iteration 14500, lr = 2.175\nI0818 01:11:59.974740 17318 solver.cpp:337] Iteration 14600, Testing net (#0)\nI0818 01:13:22.116623 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41148\nI0818 01:13:22.116919 17318 solver.cpp:404]     Test net output #1: loss = 2.77087 (* 1 = 2.77087 loss)\nI0818 01:13:23.439790 17318 solver.cpp:228] Iteration 14600, loss = 0.770811\nI0818 01:13:23.439841 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0818 01:13:23.439857 17318 solver.cpp:244]     Train net output #1: loss = 0.770811 (* 1 = 0.770811 loss)\nI0818 01:13:23.519920 17318 sgd_solver.cpp:166] Iteration 14600, lr = 2.19\nI0818 01:15:42.009701 17318 solver.cpp:337] Iteration 14700, Testing net (#0)\nI0818 01:17:04.010159 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4198\nI0818 01:17:04.010416 17318 solver.cpp:404]     Test net output #1: loss = 2.98444 (* 1 = 2.98444 loss)\nI0818 01:17:05.333446 17318 solver.cpp:228] Iteration 14700, loss = 0.917993\nI0818 01:17:05.333495 17318 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0818 01:17:05.333513 17318 solver.cpp:244]     Train net output #1: loss = 0.917993 (* 1 = 0.917993 loss)\nI0818 01:17:05.413108 17318 sgd_solver.cpp:166] Iteration 14700, lr = 2.205\nI0818 01:19:23.908329 17318 solver.cpp:337] Iteration 14800, Testing net (#0)\nI0818 01:20:45.926728 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44628\nI0818 01:20:45.927019 17318 solver.cpp:404]     Test net output #1: loss = 2.54916 (* 1 = 2.54916 loss)\nI0818 01:20:47.249466 17318 solver.cpp:228] Iteration 14800, loss = 0.839376\nI0818 01:20:47.249517 17318 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0818 01:20:47.249534 17318 solver.cpp:244]     Train net output #1: loss = 0.839376 (* 1 = 0.839376 loss)\nI0818 01:20:47.333426 17318 sgd_solver.cpp:166] Iteration 14800, lr = 2.22\nI0818 01:23:05.840144 17318 solver.cpp:337] Iteration 14900, Testing net (#0)\nI0818 01:24:27.959790 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40004\nI0818 01:24:27.960034 17318 solver.cpp:404]     Test net output #1: loss = 2.92997 (* 1 = 2.92997 loss)\nI0818 01:24:29.282479 17318 solver.cpp:228] Iteration 14900, loss = 0.932561\nI0818 01:24:29.282529 17318 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0818 01:24:29.282547 17318 solver.cpp:244]     Train net output #1: loss = 0.932561 (* 1 = 0.932561 loss)\nI0818 01:24:29.358912 17318 sgd_solver.cpp:166] Iteration 14900, lr = 2.235\nI0818 01:26:47.883010 17318 solver.cpp:337] Iteration 15000, Testing net (#0)\nI0818 01:28:10.047715 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4364\nI0818 01:28:10.047955 17318 solver.cpp:404]     Test net output #1: loss = 2.67131 (* 1 = 2.67131 loss)\nI0818 01:28:11.370983 17318 solver.cpp:228] Iteration 15000, loss = 0.930941\nI0818 01:28:11.371037 17318 solver.cpp:244]     Train net output #0: accuracy = 0.728\nI0818 01:28:11.371054 17318 solver.cpp:244]     Train net output #1: loss = 0.930941 (* 1 = 0.930941 loss)\nI0818 01:28:11.450283 17318 sgd_solver.cpp:166] Iteration 15000, lr = 2.25\nI0818 01:30:29.954916 17318 solver.cpp:337] Iteration 15100, Testing net (#0)\nI0818 01:31:52.042842 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44472\nI0818 01:31:52.043104 17318 solver.cpp:404]     Test net output #1: loss = 2.51508 (* 1 = 2.51508 loss)\nI0818 01:31:53.366391 17318 solver.cpp:228] Iteration 15100, loss = 1.07496\nI0818 01:31:53.366446 17318 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0818 01:31:53.366463 17318 solver.cpp:244]     Train net output #1: loss = 1.07496 (* 1 = 1.07496 loss)\nI0818 01:31:53.448442 17318 sgd_solver.cpp:166] Iteration 15100, lr = 2.265\nI0818 01:34:11.964778 17318 solver.cpp:337] Iteration 15200, Testing net (#0)\nI0818 01:35:34.010417 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42084\nI0818 01:35:34.010633 17318 solver.cpp:404]     Test net output #1: loss = 2.67714 (* 1 = 2.67714 loss)\nI0818 01:35:35.333062 17318 solver.cpp:228] Iteration 15200, loss = 0.718328\nI0818 01:35:35.333119 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0818 01:35:35.333137 17318 solver.cpp:244]     Train net output #1: loss = 0.718328 (* 1 = 0.718328 loss)\nI0818 01:35:35.421159 17318 sgd_solver.cpp:166] Iteration 15200, lr = 2.28\nI0818 01:37:53.953366 17318 solver.cpp:337] Iteration 15300, Testing net (#0)\nI0818 01:39:16.071446 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43064\nI0818 01:39:16.071666 17318 solver.cpp:404]     Test net output #1: loss = 2.4663 (* 1 = 2.4663 loss)\nI0818 01:39:17.394531 17318 solver.cpp:228] Iteration 15300, loss = 0.875483\nI0818 01:39:17.394584 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0818 01:39:17.394603 17318 solver.cpp:244]     Train net output #1: loss = 0.875483 (* 1 = 0.875483 loss)\nI0818 01:39:17.476084 17318 sgd_solver.cpp:166] Iteration 15300, lr = 2.295\nI0818 01:41:35.938298 17318 solver.cpp:337] Iteration 15400, Testing net (#0)\nI0818 01:42:58.085770 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44592\nI0818 01:42:58.086062 17318 solver.cpp:404]     Test net output #1: loss = 2.61604 (* 1 = 2.61604 loss)\nI0818 01:42:59.408474 17318 solver.cpp:228] Iteration 15400, loss = 1.0598\nI0818 01:42:59.408529 17318 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0818 01:42:59.408553 17318 solver.cpp:244]     Train net output #1: loss = 1.0598 (* 1 = 1.0598 loss)\nI0818 01:42:59.486809 17318 sgd_solver.cpp:166] Iteration 15400, lr = 2.31\nI0818 01:45:17.982180 17318 solver.cpp:337] Iteration 15500, Testing net (#0)\nI0818 01:46:40.137084 17318 solver.cpp:404]     Test net output #0: accuracy = 0.51784\nI0818 01:46:40.137313 17318 solver.cpp:404]     Test net output #1: loss = 1.85405 (* 1 = 1.85405 loss)\nI0818 01:46:41.460060 17318 solver.cpp:228] Iteration 15500, loss = 0.815587\nI0818 01:46:41.460117 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0818 01:46:41.460141 17318 solver.cpp:244]     Train net output #1: loss = 0.815587 (* 1 = 0.815587 loss)\nI0818 01:46:41.540748 17318 sgd_solver.cpp:166] Iteration 15500, lr = 2.325\nI0818 01:49:00.041563 17318 solver.cpp:337] Iteration 15600, Testing net (#0)\nI0818 01:50:22.200966 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45508\nI0818 01:50:22.201236 17318 solver.cpp:404]     Test net output #1: loss = 2.20451 (* 1 = 2.20451 loss)\nI0818 01:50:23.525058 17318 solver.cpp:228] Iteration 15600, loss = 1.01207\nI0818 01:50:23.525115 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 01:50:23.525140 17318 solver.cpp:244]     Train net output #1: loss = 1.01207 (* 1 = 1.01207 loss)\nI0818 01:50:23.605231 17318 sgd_solver.cpp:166] Iteration 15600, lr = 2.34\nI0818 01:52:42.078572 17318 solver.cpp:337] Iteration 15700, Testing net (#0)\nI0818 01:54:04.191542 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43272\nI0818 01:54:04.191767 17318 solver.cpp:404]     Test net output #1: loss = 2.7014 (* 1 = 2.7014 loss)\nI0818 01:54:05.515228 17318 solver.cpp:228] Iteration 15700, loss = 0.924619\nI0818 01:54:05.515283 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 01:54:05.515307 17318 solver.cpp:244]     Train net output #1: loss = 0.924619 (* 1 = 0.924619 loss)\nI0818 01:54:05.594118 17318 sgd_solver.cpp:166] Iteration 15700, lr = 2.355\nI0818 01:56:24.176784 17318 solver.cpp:337] Iteration 15800, Testing net (#0)\nI0818 01:57:46.289546 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4432\nI0818 01:57:46.289775 17318 solver.cpp:404]     Test net output #1: loss = 2.58136 (* 1 = 2.58136 loss)\nI0818 01:57:47.613209 17318 solver.cpp:228] Iteration 15800, loss = 0.929764\nI0818 01:57:47.613266 17318 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI0818 01:57:47.613291 17318 solver.cpp:244]     Train net output #1: loss = 0.929764 (* 1 = 0.929764 loss)\nI0818 01:57:47.688419 17318 sgd_solver.cpp:166] Iteration 15800, lr = 2.37\nI0818 02:00:06.246999 17318 solver.cpp:337] Iteration 15900, Testing net (#0)\nI0818 02:01:28.398401 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41524\nI0818 02:01:28.398680 17318 solver.cpp:404]     Test net output #1: loss = 2.58827 (* 1 = 2.58827 loss)\nI0818 02:01:29.722182 17318 solver.cpp:228] Iteration 15900, loss = 0.99395\nI0818 02:01:29.722239 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 02:01:29.722265 17318 solver.cpp:244]     Train net output #1: loss = 0.99395 (* 1 = 0.99395 loss)\nI0818 02:01:29.800424 17318 sgd_solver.cpp:166] Iteration 15900, lr = 2.385\nI0818 02:03:48.344375 17318 solver.cpp:337] Iteration 16000, Testing net (#0)\nI0818 02:05:10.486742 17318 solver.cpp:404]     Test net output #0: accuracy = 0.43464\nI0818 02:05:10.486968 17318 solver.cpp:404]     Test net output #1: loss = 2.63112 (* 1 = 2.63112 loss)\nI0818 02:05:11.810607 17318 solver.cpp:228] Iteration 16000, loss = 0.949727\nI0818 02:05:11.810665 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 02:05:11.810690 17318 solver.cpp:244]     Train net output #1: loss = 0.949727 (* 1 = 0.949727 loss)\nI0818 02:05:11.889438 17318 sgd_solver.cpp:166] Iteration 16000, lr = 2.4\nI0818 02:07:30.457797 17318 solver.cpp:337] Iteration 16100, Testing net (#0)\nI0818 02:08:52.659476 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45628\nI0818 02:08:52.659723 17318 solver.cpp:404]     Test net output #1: loss = 2.44768 (* 1 = 2.44768 loss)\nI0818 02:08:53.982815 17318 solver.cpp:228] Iteration 16100, loss = 1.0123\nI0818 02:08:53.982870 17318 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI0818 02:08:53.982895 17318 solver.cpp:244]     Train net output #1: loss = 1.0123 (* 1 = 1.0123 loss)\nI0818 02:08:54.067762 17318 sgd_solver.cpp:166] Iteration 16100, lr = 2.415\nI0818 02:11:12.651317 17318 solver.cpp:337] Iteration 16200, Testing net (#0)\nI0818 02:12:34.820490 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41132\nI0818 02:12:34.820756 17318 solver.cpp:404]     Test net output #1: loss = 2.65408 (* 1 = 2.65408 loss)\nI0818 02:12:36.143993 17318 solver.cpp:228] Iteration 16200, loss = 0.877076\nI0818 02:12:36.144052 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 02:12:36.144076 17318 solver.cpp:244]     Train net output #1: loss = 0.877076 (* 1 = 0.877076 loss)\nI0818 02:12:36.225831 17318 sgd_solver.cpp:166] Iteration 16200, lr = 2.43\nI0818 02:14:54.804100 17318 solver.cpp:337] Iteration 16300, Testing net (#0)\nI0818 02:16:16.980347 17318 solver.cpp:404]     Test net output #0: accuracy = 0.33284\nI0818 02:16:16.980577 17318 solver.cpp:404]     Test net output #1: loss = 3.28165 (* 1 = 3.28165 loss)\nI0818 02:16:18.304306 17318 solver.cpp:228] Iteration 16300, loss = 0.929632\nI0818 02:16:18.304363 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0818 02:16:18.304388 17318 solver.cpp:244]     Train net output #1: loss = 0.929632 (* 1 = 0.929632 loss)\nI0818 02:16:18.386523 17318 sgd_solver.cpp:166] Iteration 16300, lr = 2.445\nI0818 02:18:37.010076 17318 solver.cpp:337] Iteration 16400, Testing net (#0)\nI0818 02:19:59.176482 17318 solver.cpp:404]     Test net output #0: accuracy = 0.39476\nI0818 02:19:59.176760 17318 solver.cpp:404]     Test net output #1: loss = 3.07453 (* 1 = 3.07453 loss)\nI0818 02:20:00.504935 17318 solver.cpp:228] Iteration 16400, loss = 1.16889\nI0818 02:20:00.504990 17318 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0818 02:20:00.505009 17318 solver.cpp:244]     Train net output #1: loss = 1.16889 (* 1 = 1.16889 loss)\nI0818 02:20:00.581414 17318 sgd_solver.cpp:166] Iteration 16400, lr = 2.46\nI0818 02:22:19.270061 17318 solver.cpp:337] Iteration 16500, Testing net (#0)\nI0818 02:23:41.426383 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41084\nI0818 02:23:41.426656 17318 solver.cpp:404]     Test net output #1: loss = 2.81106 (* 1 = 2.81106 loss)\nI0818 02:23:42.749326 17318 solver.cpp:228] Iteration 16500, loss = 0.817138\nI0818 02:23:42.749372 17318 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0818 02:23:42.749388 17318 solver.cpp:244]     Train net output #1: loss = 0.817138 (* 1 = 0.817138 loss)\nI0818 02:23:42.841130 17318 sgd_solver.cpp:166] Iteration 16500, lr = 2.475\nI0818 02:26:01.981773 17318 solver.cpp:337] Iteration 16600, Testing net (#0)\nI0818 02:27:24.089416 17318 solver.cpp:404]     Test net output #0: accuracy = 0.35996\nI0818 02:27:24.089661 17318 solver.cpp:404]     Test net output #1: loss = 3.62716 (* 1 = 3.62716 loss)\nI0818 02:27:25.412358 17318 solver.cpp:228] Iteration 16600, loss = 0.855956\nI0818 02:27:25.412400 17318 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0818 02:27:25.412416 17318 solver.cpp:244]     Train net output #1: loss = 0.855956 (* 1 = 0.855956 loss)\nI0818 02:27:25.500213 17318 sgd_solver.cpp:166] Iteration 16600, lr = 2.49\nI0818 02:29:44.524225 17318 solver.cpp:337] Iteration 16700, Testing net (#0)\nI0818 02:31:06.642879 17318 solver.cpp:404]     Test net output #0: accuracy = 0.3784\nI0818 02:31:06.643160 17318 solver.cpp:404]     Test net output #1: loss = 3.04199 (* 1 = 3.04199 loss)\nI0818 02:31:07.965427 17318 solver.cpp:228] Iteration 16700, loss = 0.884775\nI0818 02:31:07.965471 17318 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI0818 02:31:07.965488 17318 solver.cpp:244]     Train net output #1: loss = 0.884775 (* 1 = 0.884775 loss)\nI0818 02:31:08.055554 17318 sgd_solver.cpp:166] Iteration 16700, lr = 2.505\nI0818 02:33:27.174093 17318 solver.cpp:337] Iteration 16800, Testing net (#0)\nI0818 02:34:49.319541 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4616\nI0818 02:34:49.319787 17318 solver.cpp:404]     Test net output #1: loss = 2.43599 (* 1 = 2.43599 loss)\nI0818 02:34:50.642807 17318 solver.cpp:228] Iteration 16800, loss = 1.06727\nI0818 02:34:50.642850 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 02:34:50.642868 17318 solver.cpp:244]     Train net output #1: loss = 1.06727 (* 1 = 1.06727 loss)\nI0818 02:34:50.728354 17318 sgd_solver.cpp:166] Iteration 16800, lr = 2.52\nI0818 02:37:09.832695 17318 solver.cpp:337] Iteration 16900, Testing net (#0)\nI0818 02:38:32.009534 17318 solver.cpp:404]     Test net output #0: accuracy = 0.33876\nI0818 02:38:32.009771 17318 solver.cpp:404]     Test net output #1: loss = 3.26286 (* 1 = 3.26286 loss)\nI0818 02:38:33.332159 17318 solver.cpp:228] Iteration 16900, loss = 1.14874\nI0818 02:38:33.332197 17318 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0818 02:38:33.332213 17318 solver.cpp:244]     Train net output #1: loss = 1.14874 (* 1 = 1.14874 loss)\nI0818 02:38:33.420488 17318 sgd_solver.cpp:166] Iteration 16900, lr = 2.535\nI0818 02:40:52.412633 17318 solver.cpp:337] Iteration 17000, Testing net (#0)\nI0818 02:42:14.576871 17318 solver.cpp:404]     Test net output #0: accuracy = 0.37076\nI0818 02:42:14.577128 17318 solver.cpp:404]     Test net output #1: loss = 3.22924 (* 1 = 3.22924 loss)\nI0818 02:42:15.899560 17318 solver.cpp:228] Iteration 17000, loss = 1.01213\nI0818 02:42:15.899603 17318 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0818 02:42:15.899619 17318 solver.cpp:244]     Train net output #1: loss = 1.01213 (* 1 = 1.01213 loss)\nI0818 02:42:15.989506 17318 sgd_solver.cpp:166] Iteration 17000, lr = 2.55\nI0818 02:44:35.059496 17318 solver.cpp:337] Iteration 17100, Testing net (#0)\nI0818 02:45:56.868043 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46092\nI0818 02:45:56.868255 17318 solver.cpp:404]     Test net output #1: loss = 2.34136 (* 1 = 2.34136 loss)\nI0818 02:45:58.190152 17318 solver.cpp:228] Iteration 17100, loss = 1.04351\nI0818 02:45:58.190194 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 02:45:58.190210 17318 solver.cpp:244]     Train net output #1: loss = 1.04351 (* 1 = 1.04351 loss)\nI0818 02:45:58.282395 17318 sgd_solver.cpp:166] Iteration 17100, lr = 2.565\nI0818 02:48:17.351721 17318 solver.cpp:337] Iteration 17200, Testing net (#0)\nI0818 02:49:39.432481 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40212\nI0818 02:49:39.432732 17318 solver.cpp:404]     Test net output #1: loss = 2.73324 (* 1 = 2.73324 loss)\nI0818 02:49:40.755034 17318 solver.cpp:228] Iteration 17200, loss = 0.882339\nI0818 02:49:40.755077 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0818 02:49:40.755095 17318 solver.cpp:244]     Train net output #1: loss = 0.882339 (* 1 = 0.882339 loss)\nI0818 02:49:40.845919 17318 sgd_solver.cpp:166] Iteration 17200, lr = 2.58\nI0818 02:51:59.890027 17318 solver.cpp:337] Iteration 17300, Testing net (#0)\nI0818 02:53:22.032781 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42052\nI0818 02:53:22.033054 17318 solver.cpp:404]     Test net output #1: loss = 2.57734 (* 1 = 2.57734 loss)\nI0818 02:53:23.355164 17318 solver.cpp:228] Iteration 17300, loss = 1.12372\nI0818 02:53:23.355203 17318 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0818 02:53:23.355221 17318 solver.cpp:244]     Train net output #1: loss = 1.12372 (* 1 = 1.12372 loss)\nI0818 02:53:23.444017 17318 sgd_solver.cpp:166] Iteration 17300, lr = 2.595\nI0818 02:55:42.492414 17318 solver.cpp:337] Iteration 17400, Testing net (#0)\nI0818 02:57:04.608547 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41904\nI0818 02:57:04.608803 17318 solver.cpp:404]     Test net output #1: loss = 2.59112 (* 1 = 2.59112 loss)\nI0818 02:57:05.931337 17318 solver.cpp:228] Iteration 17400, loss = 1.01021\nI0818 02:57:05.931378 17318 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI0818 02:57:05.931396 17318 solver.cpp:244]     Train net output #1: loss = 1.01021 (* 1 = 1.01021 loss)\nI0818 02:57:06.015548 17318 sgd_solver.cpp:166] Iteration 17400, lr = 2.61\nI0818 02:59:25.084481 17318 solver.cpp:337] Iteration 17500, Testing net (#0)\nI0818 03:00:47.173421 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46192\nI0818 03:00:47.173681 17318 solver.cpp:404]     Test net output #1: loss = 2.24571 (* 1 = 2.24571 loss)\nI0818 03:00:48.497885 17318 solver.cpp:228] Iteration 17500, loss = 0.855021\nI0818 03:00:48.497928 17318 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI0818 03:00:48.497944 17318 solver.cpp:244]     Train net output #1: loss = 0.855021 (* 1 = 0.855021 loss)\nI0818 03:00:48.583760 17318 sgd_solver.cpp:166] Iteration 17500, lr = 2.625\nI0818 03:03:07.655619 17318 solver.cpp:337] Iteration 17600, Testing net (#0)\nI0818 03:04:29.751689 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4794\nI0818 03:04:29.751937 17318 solver.cpp:404]     Test net output #1: loss = 2.15987 (* 1 = 2.15987 loss)\nI0818 03:04:31.076010 17318 solver.cpp:228] Iteration 17600, loss = 0.881325\nI0818 03:04:31.076059 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 03:04:31.076076 17318 solver.cpp:244]     Train net output #1: loss = 0.881325 (* 1 = 0.881325 loss)\nI0818 03:04:31.163507 17318 sgd_solver.cpp:166] Iteration 17600, lr = 2.64\nI0818 03:06:50.302388 17318 solver.cpp:337] Iteration 17700, Testing net (#0)\nI0818 03:08:12.465899 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46088\nI0818 03:08:12.466130 17318 solver.cpp:404]     Test net output #1: loss = 2.42296 (* 1 = 2.42296 loss)\nI0818 03:08:13.789019 17318 solver.cpp:228] Iteration 17700, loss = 1.04321\nI0818 03:08:13.789065 17318 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0818 03:08:13.789083 17318 solver.cpp:244]     Train net output #1: loss = 1.04321 (* 1 = 1.04321 loss)\nI0818 03:08:13.872087 17318 sgd_solver.cpp:166] Iteration 17700, lr = 2.655\nI0818 03:10:32.917699 17318 solver.cpp:337] Iteration 17800, Testing net (#0)\nI0818 03:11:55.085252 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44392\nI0818 03:11:55.085580 17318 solver.cpp:404]     Test net output #1: loss = 2.57227 (* 1 = 2.57227 loss)\nI0818 03:11:56.407970 17318 solver.cpp:228] Iteration 17800, loss = 0.848452\nI0818 03:11:56.408010 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 03:11:56.408026 17318 solver.cpp:244]     Train net output #1: loss = 0.848452 (* 1 = 0.848452 loss)\nI0818 03:11:56.499928 17318 sgd_solver.cpp:166] Iteration 17800, lr = 2.67\nI0818 03:14:15.556561 17318 solver.cpp:337] Iteration 17900, Testing net (#0)\nI0818 03:15:37.709980 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45364\nI0818 03:15:37.710238 17318 solver.cpp:404]     Test net output #1: loss = 2.31456 (* 1 = 2.31456 loss)\nI0818 03:15:39.032538 17318 solver.cpp:228] Iteration 17900, loss = 0.971924\nI0818 03:15:39.032577 17318 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0818 03:15:39.032594 17318 solver.cpp:244]     Train net output #1: loss = 0.971924 (* 1 = 0.971924 loss)\nI0818 03:15:39.116964 17318 sgd_solver.cpp:166] Iteration 17900, lr = 2.685\nI0818 03:17:58.171432 17318 solver.cpp:337] Iteration 18000, Testing net (#0)\nI0818 03:19:20.152281 17318 solver.cpp:404]     Test net output #0: accuracy = 0.38032\nI0818 03:19:20.152514 17318 solver.cpp:404]     Test net output #1: loss = 2.71131 (* 1 = 2.71131 loss)\nI0818 03:19:21.475085 17318 solver.cpp:228] Iteration 18000, loss = 1.15373\nI0818 03:19:21.475126 17318 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0818 03:19:21.475143 17318 solver.cpp:244]     Train net output #1: loss = 1.15373 (* 1 = 1.15373 loss)\nI0818 03:19:21.559141 17318 sgd_solver.cpp:166] Iteration 18000, lr = 2.7\nI0818 03:21:40.568994 17318 solver.cpp:337] Iteration 18100, Testing net (#0)\nI0818 03:23:02.558001 17318 solver.cpp:404]     Test net output #0: accuracy = 0.45648\nI0818 03:23:02.558275 17318 solver.cpp:404]     Test net output #1: loss = 2.24723 (* 1 = 2.24723 loss)\nI0818 03:23:03.881320 17318 solver.cpp:228] Iteration 18100, loss = 1.12308\nI0818 03:23:03.881361 17318 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0818 03:23:03.881377 17318 solver.cpp:244]     Train net output #1: loss = 1.12308 (* 1 = 1.12308 loss)\nI0818 03:23:03.966962 17318 sgd_solver.cpp:166] Iteration 18100, lr = 2.715\nI0818 03:25:23.027230 17318 solver.cpp:337] Iteration 18200, Testing net (#0)\nI0818 03:26:45.114403 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41332\nI0818 03:26:45.114639 17318 solver.cpp:404]     Test net output #1: loss = 2.50368 (* 1 = 2.50368 loss)\nI0818 03:26:46.436906 17318 solver.cpp:228] Iteration 18200, loss = 0.952987\nI0818 03:26:46.436949 17318 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0818 03:26:46.436964 17318 solver.cpp:244]     Train net output #1: loss = 0.952987 (* 1 = 0.952987 loss)\nI0818 03:26:46.526103 17318 sgd_solver.cpp:166] Iteration 18200, lr = 2.73\nI0818 03:29:05.665155 17318 solver.cpp:337] Iteration 18300, Testing net (#0)\nI0818 03:30:27.583919 17318 solver.cpp:404]     Test net output #0: accuracy = 0.36764\nI0818 03:30:27.584168 17318 solver.cpp:404]     Test net output #1: loss = 2.75287 (* 1 = 2.75287 loss)\nI0818 03:30:28.906867 17318 solver.cpp:228] Iteration 18300, loss = 1.22163\nI0818 03:30:28.906908 17318 solver.cpp:244]     Train net output #0: accuracy = 0.648\nI0818 03:30:28.906924 17318 solver.cpp:244]     Train net output #1: loss = 1.22163 (* 1 = 1.22163 loss)\nI0818 03:30:28.993934 17318 sgd_solver.cpp:166] Iteration 18300, lr = 2.745\nI0818 03:32:48.237586 17318 solver.cpp:337] Iteration 18400, Testing net (#0)\nI0818 03:34:10.006112 17318 solver.cpp:404]     Test net output #0: accuracy = 0.44296\nI0818 03:34:10.006394 17318 solver.cpp:404]     Test net output #1: loss = 2.23222 (* 1 = 2.23222 loss)\nI0818 03:34:11.328869 17318 solver.cpp:228] Iteration 18400, loss = 1.11209\nI0818 03:34:11.328909 17318 solver.cpp:244]     Train net output #0: accuracy = 0.64\nI0818 03:34:11.328925 17318 solver.cpp:244]     Train net output #1: loss = 1.11209 (* 1 = 1.11209 loss)\nI0818 03:34:11.415639 17318 sgd_solver.cpp:166] Iteration 18400, lr = 2.76\nI0818 03:36:30.527464 17318 solver.cpp:337] Iteration 18500, Testing net (#0)\nI0818 03:37:52.508347 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4514\nI0818 03:37:52.508599 17318 solver.cpp:404]     Test net output #1: loss = 2.31371 (* 1 = 2.31371 loss)\nI0818 03:37:53.831605 17318 solver.cpp:228] Iteration 18500, loss = 0.849772\nI0818 03:37:53.831646 17318 solver.cpp:244]     Train net output #0: accuracy = 0.752\nI0818 03:37:53.831662 17318 solver.cpp:244]     Train net output #1: loss = 0.849772 (* 1 = 0.849772 loss)\nI0818 03:37:53.912484 17318 sgd_solver.cpp:166] Iteration 18500, lr = 2.775\nI0818 03:40:13.032770 17318 solver.cpp:337] Iteration 18600, Testing net (#0)\nI0818 03:41:34.787699 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42424\nI0818 03:41:34.787964 17318 solver.cpp:404]     Test net output #1: loss = 2.48753 (* 1 = 2.48753 loss)\nI0818 03:41:36.110543 17318 solver.cpp:228] Iteration 18600, loss = 0.955007\nI0818 03:41:36.110581 17318 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0818 03:41:36.110599 17318 solver.cpp:244]     Train net output #1: loss = 0.955007 (* 1 = 0.955007 loss)\nI0818 03:41:36.192584 17318 sgd_solver.cpp:166] Iteration 18600, lr = 2.79\nI0818 03:43:55.291986 17318 solver.cpp:337] Iteration 18700, Testing net (#0)\nI0818 03:45:17.031188 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40224\nI0818 03:45:17.031445 17318 solver.cpp:404]     Test net output #1: loss = 2.78098 (* 1 = 2.78098 loss)\nI0818 03:45:18.354230 17318 solver.cpp:228] Iteration 18700, loss = 1.03988\nI0818 03:45:18.354271 17318 solver.cpp:244]     Train net output #0: accuracy = 0.696\nI0818 03:45:18.354288 17318 solver.cpp:244]     Train net output #1: loss = 1.03988 (* 1 = 1.03988 loss)\nI0818 03:45:18.437769 17318 sgd_solver.cpp:166] Iteration 18700, lr = 2.805\nI0818 03:47:37.561079 17318 solver.cpp:337] Iteration 18800, Testing net (#0)\nI0818 03:48:59.585878 17318 solver.cpp:404]     Test net output #0: accuracy = 0.35328\nI0818 03:48:59.586110 17318 solver.cpp:404]     Test net output #1: loss = 2.86277 (* 1 = 2.86277 loss)\nI0818 03:49:00.909225 17318 solver.cpp:228] Iteration 18800, loss = 1.02232\nI0818 03:49:00.909267 17318 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0818 03:49:00.909283 17318 solver.cpp:244]     Train net output #1: loss = 1.02232 (* 1 = 1.02232 loss)\nI0818 03:49:00.996330 17318 sgd_solver.cpp:166] Iteration 18800, lr = 2.82\nI0818 03:51:20.053598 17318 solver.cpp:337] Iteration 18900, Testing net (#0)\nI0818 03:52:41.795111 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42512\nI0818 03:52:41.795389 17318 solver.cpp:404]     Test net output #1: loss = 2.45315 (* 1 = 2.45315 loss)\nI0818 03:52:43.118563 17318 solver.cpp:228] Iteration 18900, loss = 1.19618\nI0818 03:52:43.118608 17318 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0818 03:52:43.118625 17318 solver.cpp:244]     Train net output #1: loss = 1.19618 (* 1 = 1.19618 loss)\nI0818 03:52:43.200310 17318 sgd_solver.cpp:166] Iteration 18900, lr = 2.835\nI0818 03:55:02.283865 17318 solver.cpp:337] Iteration 19000, Testing net (#0)\nI0818 03:56:24.491713 17318 solver.cpp:404]     Test net output #0: accuracy = 0.33432\nI0818 03:56:24.491966 17318 solver.cpp:404]     Test net output #1: loss = 3.02375 (* 1 = 3.02375 loss)\nI0818 03:56:25.816001 17318 solver.cpp:228] Iteration 19000, loss = 1.15376\nI0818 03:56:25.816047 17318 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI0818 03:56:25.816069 17318 solver.cpp:244]     Train net output #1: loss = 1.15376 (* 1 = 1.15376 loss)\nI0818 03:56:25.900254 17318 sgd_solver.cpp:166] Iteration 19000, lr = 2.85\nI0818 03:58:45.028852 17318 solver.cpp:337] Iteration 19100, Testing net (#0)\nI0818 04:00:07.225705 17318 solver.cpp:404]     Test net output #0: accuracy = 0.50452\nI0818 04:00:07.225952 17318 solver.cpp:404]     Test net output #1: loss = 1.89368 (* 1 = 1.89368 loss)\nI0818 04:00:08.550071 17318 solver.cpp:228] Iteration 19100, loss = 1.09712\nI0818 04:00:08.550114 17318 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0818 04:00:08.550132 17318 solver.cpp:244]     Train net output #1: loss = 1.09712 (* 1 = 1.09712 loss)\nI0818 04:00:08.635857 17318 sgd_solver.cpp:166] Iteration 19100, lr = 2.865\nI0818 04:02:27.795446 17318 solver.cpp:337] Iteration 19200, Testing net (#0)\nI0818 04:03:49.684891 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4334\nI0818 04:03:49.685148 17318 solver.cpp:404]     Test net output #1: loss = 2.54012 (* 1 = 2.54012 loss)\nI0818 04:03:51.008697 17318 solver.cpp:228] Iteration 19200, loss = 0.991809\nI0818 04:03:51.008740 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 04:03:51.008757 17318 solver.cpp:244]     Train net output #1: loss = 0.991809 (* 1 = 0.991809 loss)\nI0818 04:03:51.094280 17318 sgd_solver.cpp:166] Iteration 19200, lr = 2.88\nI0818 04:06:10.231293 17318 solver.cpp:337] Iteration 19300, Testing net (#0)\nI0818 04:07:32.408417 17318 solver.cpp:404]     Test net output #0: accuracy = 0.37868\nI0818 04:07:32.408668 17318 solver.cpp:404]     Test net output #1: loss = 2.83244 (* 1 = 2.83244 loss)\nI0818 04:07:33.732296 17318 solver.cpp:228] Iteration 19300, loss = 0.942373\nI0818 04:07:33.732342 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 04:07:33.732357 17318 solver.cpp:244]     Train net output #1: loss = 0.942373 (* 1 = 0.942373 loss)\nI0818 04:07:33.817656 17318 sgd_solver.cpp:166] Iteration 19300, lr = 2.895\nI0818 04:09:53.451894 17318 solver.cpp:337] Iteration 19400, Testing net (#0)\nI0818 04:11:15.663451 17318 solver.cpp:404]     Test net output #0: accuracy = 0.42804\nI0818 04:11:15.663693 17318 solver.cpp:404]     Test net output #1: loss = 2.63031 (* 1 = 2.63031 loss)\nI0818 04:11:16.987367 17318 solver.cpp:228] Iteration 19400, loss = 1.10948\nI0818 04:11:16.987411 17318 solver.cpp:244]     Train net output #0: accuracy = 0.68\nI0818 04:11:16.987428 17318 solver.cpp:244]     Train net output #1: loss = 1.10948 (* 1 = 1.10948 loss)\nI0818 04:11:17.075465 17318 sgd_solver.cpp:166] Iteration 19400, lr = 2.91\nI0818 04:13:36.638506 17318 solver.cpp:337] Iteration 19500, Testing net (#0)\nI0818 04:14:58.772127 17318 solver.cpp:404]     Test net output #0: accuracy = 0.38836\nI0818 04:14:58.772353 17318 solver.cpp:404]     Test net output #1: loss = 2.72565 (* 1 = 2.72565 loss)\nI0818 04:15:00.095770 17318 solver.cpp:228] Iteration 19500, loss = 0.983262\nI0818 04:15:00.095814 17318 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0818 04:15:00.095831 17318 solver.cpp:244]     Train net output #1: loss = 0.983262 (* 1 = 0.983262 loss)\nI0818 04:15:00.180800 17318 sgd_solver.cpp:166] Iteration 19500, lr = 2.925\nI0818 04:17:19.762811 17318 solver.cpp:337] Iteration 19600, Testing net (#0)\nI0818 04:18:41.626953 17318 solver.cpp:404]     Test net output #0: accuracy = 0.46604\nI0818 04:18:41.627213 17318 solver.cpp:404]     Test net output #1: loss = 2.15668 (* 1 = 2.15668 loss)\nI0818 04:18:42.951087 17318 solver.cpp:228] Iteration 19600, loss = 1.05435\nI0818 04:18:42.951133 17318 solver.cpp:244]     Train net output #0: accuracy = 0.664\nI0818 04:18:42.951149 17318 solver.cpp:244]     Train net output #1: loss = 1.05435 (* 1 = 1.05435 loss)\nI0818 04:18:43.042440 17318 sgd_solver.cpp:166] Iteration 19600, lr = 2.94\nI0818 04:21:02.629251 17318 solver.cpp:337] Iteration 19700, Testing net (#0)\nI0818 04:22:24.500044 17318 solver.cpp:404]     Test net output #0: accuracy = 0.25572\nI0818 04:22:24.500330 17318 solver.cpp:404]     Test net output #1: loss = 4.21139 (* 1 = 4.21139 loss)\nI0818 04:22:25.824111 17318 solver.cpp:228] Iteration 19700, loss = 1.12746\nI0818 04:22:25.824156 17318 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI0818 04:22:25.824173 17318 solver.cpp:244]     Train net output #1: loss = 1.12746 (* 1 = 1.12746 loss)\nI0818 04:22:25.917490 17318 sgd_solver.cpp:166] Iteration 19700, lr = 2.955\nI0818 04:24:45.455395 17318 solver.cpp:337] Iteration 19800, Testing net (#0)\nI0818 04:26:07.562343 17318 solver.cpp:404]     Test net output #0: accuracy = 0.4734\nI0818 04:26:07.562577 17318 solver.cpp:404]     Test net output #1: loss = 2.17339 (* 1 = 2.17339 loss)\nI0818 04:26:08.886381 17318 solver.cpp:228] Iteration 19800, loss = 1.03847\nI0818 04:26:08.886426 17318 solver.cpp:244]     Train net output #0: accuracy = 0.704\nI0818 04:26:08.886445 17318 solver.cpp:244]     Train net output #1: loss = 1.03847 (* 1 = 1.03847 loss)\nI0818 04:26:08.973929 17318 sgd_solver.cpp:166] Iteration 19800, lr = 2.97\nI0818 04:28:28.568706 17318 solver.cpp:337] Iteration 19900, Testing net (#0)\nI0818 04:29:50.561889 17318 solver.cpp:404]     Test net output #0: accuracy = 0.41284\nI0818 04:29:50.562105 17318 solver.cpp:404]     Test net output #1: loss = 2.44436 (* 1 = 2.44436 loss)\nI0818 04:29:51.884449 17318 solver.cpp:228] Iteration 19900, loss = 1.05935\nI0818 04:29:51.884495 17318 solver.cpp:244]     Train net output #0: accuracy = 0.736\nI0818 04:29:51.884511 17318 solver.cpp:244]     Train net output #1: loss = 1.05935 (* 1 = 1.05935 loss)\nI0818 04:29:51.978324 17318 sgd_solver.cpp:166] Iteration 19900, lr = 2.985\nI0818 04:32:11.502754 17318 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range3Cifar100kFig8_iter_20000.caffemodel\nI0818 04:32:11.719983 17318 sgd_solver.cpp:333] Snapshotting solver state to binary proto file examples/sc/snapshots/range3Cifar100kFig8_iter_20000.solverstate\nI0818 04:32:12.166751 17318 solver.cpp:317] Iteration 20000, loss = 1.33031\nI0818 04:32:12.166800 17318 solver.cpp:337] Iteration 20000, Testing net (#0)\nI0818 04:33:34.179395 17318 solver.cpp:404]     Test net output #0: accuracy = 0.40824\nI0818 04:33:34.179666 17318 solver.cpp:404]     Test net output #1: loss = 2.45534 (* 1 = 2.45534 loss)\nI0818 04:33:34.179678 17318 solver.cpp:322] Optimization Done.\nI0818 04:33:39.530261 17318 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range3Iter100kFig2b",
    "content": "I1212 06:17:18.044482 20613 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1212 06:17:18.046854 20613 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1212 06:17:18.048070 20613 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1212 06:17:18.049283 20613 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1212 06:17:18.050493 20613 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1212 06:17:18.051726 20613 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1212 06:17:18.052951 20613 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1212 06:17:18.054179 20613 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1212 06:17:18.055413 20613 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1212 06:17:18.480574 20613 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 100000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 100000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range3Iter100kFig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI1212 06:17:18.484700 20613 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1212 06:17:18.543431 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:18.543514 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:18.544795 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1212 06:17:18.544849 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI1212 06:17:18.544869 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI1212 06:17:18.544888 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI1212 06:17:18.544914 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI1212 06:17:18.544932 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI1212 06:17:18.544951 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI1212 06:17:18.544970 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI1212 06:17:18.544991 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI1212 06:17:18.545009 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI1212 06:17:18.545027 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI1212 06:17:18.545044 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI1212 06:17:18.545064 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI1212 06:17:18.545084 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI1212 06:17:18.545104 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI1212 06:17:18.545122 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI1212 06:17:18.545141 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI1212 06:17:18.545158 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI1212 06:17:18.545178 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI1212 06:17:18.545197 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI1212 06:17:18.545228 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI1212 06:17:18.545248 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI1212 06:17:18.545274 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI1212 06:17:18.545292 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI1212 06:17:18.545310 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI1212 06:17:18.545325 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI1212 06:17:18.545342 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI1212 06:17:18.545361 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI1212 06:17:18.545378 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI1212 06:17:18.545397 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI1212 06:17:18.545418 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI1212 06:17:18.545435 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI1212 06:17:18.545455 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI1212 06:17:18.545472 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI1212 06:17:18.545492 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI1212 06:17:18.545511 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI1212 06:17:18.545529 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI1212 06:17:18.545548 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI1212 06:17:18.545567 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI1212 06:17:18.545586 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI1212 06:17:18.545611 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI1212 06:17:18.545639 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI1212 06:17:18.545658 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI1212 06:17:18.545677 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI1212 06:17:18.545697 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI1212 06:17:18.545716 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI1212 06:17:18.545735 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI1212 06:17:18.545753 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI1212 06:17:18.545773 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI1212 06:17:18.545790 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI1212 06:17:18.545809 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI1212 06:17:18.545837 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI1212 06:17:18.545858 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI1212 06:17:18.545877 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI1212 06:17:18.545897 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI1212 06:17:18.545914 20613 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI1212 06:17:18.547739 20613 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI1212 06:17:18.549897 20613 layer_factory.hpp:77] Creating layer dataLayer\nI1212 06:17:18.551095 20613 net.cpp:100] Creating Layer dataLayer\nI1212 06:17:18.551131 20613 net.cpp:408] dataLayer -> data_top\nI1212 06:17:18.551373 20613 net.cpp:408] dataLayer -> label\nI1212 06:17:18.551482 20613 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1212 06:17:18.753057 20619 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1212 06:17:18.849059 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:18.856119 20613 net.cpp:150] Setting up dataLayer\nI1212 06:17:18.856189 20613 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI1212 06:17:18.856201 20613 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:18.856207 20613 net.cpp:165] Memory required for data: 1536500\nI1212 06:17:18.856225 20613 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1212 06:17:18.856240 20613 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1212 06:17:18.856248 20613 net.cpp:434] label_dataLayer_1_split <- label\nI1212 06:17:18.856271 20613 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1212 06:17:18.856287 20613 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1212 06:17:18.856361 20613 net.cpp:150] Setting up label_dataLayer_1_split\nI1212 06:17:18.856377 20613 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:18.856384 20613 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:18.856389 20613 net.cpp:165] Memory required for data: 1537500\nI1212 06:17:18.856395 20613 layer_factory.hpp:77] Creating layer pre_conv\nI1212 06:17:18.856423 20613 net.cpp:100] Creating Layer pre_conv\nI1212 06:17:18.856431 20613 net.cpp:434] pre_conv <- data_top\nI1212 06:17:18.856441 20613 net.cpp:408] pre_conv -> pre_conv_top\nI1212 06:17:18.858188 20613 net.cpp:150] Setting up pre_conv\nI1212 06:17:18.858209 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.858216 20613 net.cpp:165] Memory required for data: 9729500\nI1212 06:17:18.858336 20613 layer_factory.hpp:77] Creating layer pre_bn\nI1212 06:17:18.858394 20613 net.cpp:100] Creating Layer pre_bn\nI1212 06:17:18.858410 20613 net.cpp:434] pre_bn <- pre_conv_top\nI1212 06:17:18.858420 20613 net.cpp:408] pre_bn -> pre_bn_top\nI1212 06:17:18.858994 20613 net.cpp:150] Setting up pre_bn\nI1212 06:17:18.859011 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.859017 20613 net.cpp:165] Memory required for data: 17921500\nI1212 06:17:18.859035 20613 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:17:18.859094 20613 net.cpp:100] Creating Layer pre_scale\nI1212 06:17:18.859105 20613 net.cpp:434] pre_scale <- pre_bn_top\nI1212 06:17:18.859117 20613 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI1212 06:17:18.859432 20613 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:17:18.867813 20613 net.cpp:150] Setting up pre_scale\nI1212 06:17:18.867836 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.867842 20613 net.cpp:165] Memory required for data: 26113500\nI1212 06:17:18.867854 20613 layer_factory.hpp:77] Creating layer pre_relu\nI1212 06:17:18.867908 20613 net.cpp:100] Creating Layer pre_relu\nI1212 06:17:18.867919 20613 net.cpp:434] pre_relu <- pre_bn_top\nI1212 06:17:18.867928 20613 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI1212 06:17:18.867940 20613 net.cpp:150] Setting up pre_relu\nI1212 06:17:18.867949 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.867954 20613 net.cpp:165] Memory required for data: 34305500\nI1212 06:17:18.867959 20613 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI1212 06:17:18.867969 20613 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI1212 06:17:18.867975 20613 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI1212 06:17:18.867983 20613 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI1212 06:17:18.867997 20613 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI1212 06:17:18.868047 20613 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI1212 06:17:18.868060 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.868067 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.868072 20613 net.cpp:165] Memory required for data: 50689500\nI1212 06:17:18.868077 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI1212 06:17:18.868089 20613 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI1212 06:17:18.868095 20613 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI1212 06:17:18.868108 20613 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI1212 06:17:18.868420 20613 net.cpp:150] Setting up L1_b1_cbr1_conv\nI1212 06:17:18.868434 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.868439 20613 net.cpp:165] Memory required for data: 58881500\nI1212 06:17:18.868453 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI1212 06:17:18.868468 20613 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI1212 06:17:18.868474 20613 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI1212 06:17:18.868484 20613 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI1212 06:17:18.868722 20613 net.cpp:150] Setting up L1_b1_cbr1_bn\nI1212 06:17:18.868743 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.868749 20613 net.cpp:165] Memory required for data: 67073500\nI1212 06:17:18.868760 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:17:18.868769 20613 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI1212 06:17:18.868775 20613 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI1212 06:17:18.868782 20613 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:17:18.868834 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:17:18.868978 20613 net.cpp:150] Setting up L1_b1_cbr1_scale\nI1212 06:17:18.868990 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.868995 20613 net.cpp:165] Memory required for data: 75265500\nI1212 06:17:18.869004 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI1212 06:17:18.869015 20613 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI1212 06:17:18.869029 20613 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI1212 06:17:18.869037 20613 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:17:18.869047 20613 net.cpp:150] Setting up L1_b1_cbr1_relu\nI1212 06:17:18.869055 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.869060 20613 net.cpp:165] Memory required for data: 83457500\nI1212 06:17:18.869065 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI1212 06:17:18.869081 20613 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI1212 06:17:18.869087 20613 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI1212 06:17:18.869098 20613 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI1212 06:17:18.869424 20613 net.cpp:150] Setting up L1_b1_cbr2_conv\nI1212 06:17:18.869439 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.869444 20613 net.cpp:165] Memory required for data: 91649500\nI1212 06:17:18.869453 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI1212 06:17:18.869462 20613 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI1212 06:17:18.869468 20613 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI1212 06:17:18.869479 20613 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI1212 06:17:18.869731 20613 net.cpp:150] Setting up L1_b1_cbr2_bn\nI1212 06:17:18.869745 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.869750 20613 net.cpp:165] Memory required for data: 99841500\nI1212 06:17:18.869768 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:17:18.869778 20613 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI1212 06:17:18.869784 20613 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI1212 06:17:18.869792 20613 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI1212 06:17:18.869848 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:17:18.869987 20613 net.cpp:150] Setting up L1_b1_cbr2_scale\nI1212 06:17:18.870000 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.870005 20613 net.cpp:165] Memory required for data: 108033500\nI1212 06:17:18.870014 20613 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1212 06:17:18.870028 20613 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1212 06:17:18.870033 20613 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI1212 06:17:18.870040 20613 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI1212 06:17:18.870050 20613 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1212 06:17:18.870138 20613 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1212 06:17:18.870154 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.870160 20613 net.cpp:165] Memory required for data: 116225500\nI1212 06:17:18.870167 20613 layer_factory.hpp:77] Creating layer L1_b1_relu\nI1212 06:17:18.870182 20613 net.cpp:100] Creating Layer L1_b1_relu\nI1212 06:17:18.870189 20613 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI1212 06:17:18.870198 20613 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI1212 06:17:18.870208 20613 net.cpp:150] Setting up L1_b1_relu\nI1212 06:17:18.870214 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.870219 20613 net.cpp:165] Memory required for data: 124417500\nI1212 06:17:18.870224 20613 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:18.870417 20613 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:18.870429 20613 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI1212 06:17:18.870440 20613 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:17:18.870450 20613 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:17:18.870499 20613 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:18.870512 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.870518 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.870523 20613 net.cpp:165] Memory required for data: 140801500\nI1212 06:17:18.870537 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI1212 06:17:18.870551 20613 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI1212 06:17:18.870558 20613 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:17:18.870568 20613 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI1212 06:17:18.870888 20613 net.cpp:150] Setting up L1_b2_cbr1_conv\nI1212 06:17:18.870903 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.870908 20613 net.cpp:165] Memory required for data: 148993500\nI1212 06:17:18.870918 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI1212 06:17:18.870934 20613 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI1212 06:17:18.870941 20613 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI1212 06:17:18.870952 20613 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI1212 06:17:18.871192 20613 net.cpp:150] Setting up L1_b2_cbr1_bn\nI1212 06:17:18.871206 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.871212 20613 net.cpp:165] Memory required for data: 157185500\nI1212 06:17:18.871222 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:17:18.871232 20613 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI1212 06:17:18.871237 20613 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI1212 06:17:18.871245 20613 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:17:18.871299 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:17:18.871435 20613 net.cpp:150] Setting up L1_b2_cbr1_scale\nI1212 06:17:18.871448 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.871453 20613 net.cpp:165] Memory required for data: 165377500\nI1212 06:17:18.871461 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI1212 06:17:18.871474 20613 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI1212 06:17:18.871480 20613 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI1212 06:17:18.871489 20613 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:17:18.871497 20613 net.cpp:150] Setting up L1_b2_cbr1_relu\nI1212 06:17:18.871505 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.871510 20613 net.cpp:165] Memory required for data: 173569500\nI1212 06:17:18.871513 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI1212 06:17:18.871528 20613 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI1212 06:17:18.871534 20613 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI1212 06:17:18.871546 20613 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI1212 06:17:18.871861 20613 net.cpp:150] Setting up L1_b2_cbr2_conv\nI1212 06:17:18.871876 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.871881 20613 net.cpp:165] Memory required for data: 181761500\nI1212 06:17:18.871889 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI1212 06:17:18.871902 20613 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI1212 06:17:18.871908 20613 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI1212 06:17:18.871917 20613 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI1212 06:17:18.872160 20613 net.cpp:150] Setting up L1_b2_cbr2_bn\nI1212 06:17:18.872174 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.872179 20613 net.cpp:165] Memory required for data: 189953500\nI1212 06:17:18.872195 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:17:18.872207 20613 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI1212 06:17:18.872215 20613 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI1212 06:17:18.872221 20613 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI1212 06:17:18.872274 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:17:18.872416 20613 net.cpp:150] Setting up L1_b2_cbr2_scale\nI1212 06:17:18.872429 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.872434 20613 net.cpp:165] Memory required for data: 198145500\nI1212 06:17:18.872443 20613 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1212 06:17:18.872452 20613 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1212 06:17:18.872467 20613 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI1212 06:17:18.872473 20613 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:17:18.872484 20613 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1212 06:17:18.872516 20613 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1212 06:17:18.872525 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.872530 20613 net.cpp:165] Memory required for data: 206337500\nI1212 06:17:18.872535 20613 layer_factory.hpp:77] Creating layer L1_b2_relu\nI1212 06:17:18.872544 20613 net.cpp:100] Creating Layer L1_b2_relu\nI1212 06:17:18.872550 20613 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI1212 06:17:18.872556 20613 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI1212 06:17:18.872565 20613 net.cpp:150] Setting up L1_b2_relu\nI1212 06:17:18.872571 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.872576 20613 net.cpp:165] Memory required for data: 214529500\nI1212 06:17:18.872581 20613 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:18.872591 20613 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:18.872597 20613 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI1212 06:17:18.872604 20613 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:17:18.872614 20613 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:17:18.872665 20613 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:18.872675 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.872681 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.872686 20613 net.cpp:165] Memory required for data: 230913500\nI1212 06:17:18.872691 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI1212 06:17:18.872704 20613 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI1212 06:17:18.872709 20613 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:17:18.872721 20613 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI1212 06:17:18.873026 20613 net.cpp:150] Setting up L1_b3_cbr1_conv\nI1212 06:17:18.873040 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.873045 20613 net.cpp:165] Memory required for data: 239105500\nI1212 06:17:18.873054 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI1212 06:17:18.873064 20613 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI1212 06:17:18.873070 20613 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI1212 06:17:18.873078 20613 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI1212 06:17:18.873314 20613 net.cpp:150] Setting up L1_b3_cbr1_bn\nI1212 06:17:18.873327 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.873332 20613 net.cpp:165] Memory required for data: 247297500\nI1212 06:17:18.873343 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:17:18.873355 20613 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI1212 06:17:18.873361 20613 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI1212 06:17:18.873369 20613 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:17:18.873428 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:17:18.873574 20613 net.cpp:150] Setting up L1_b3_cbr1_scale\nI1212 06:17:18.873587 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.873592 20613 net.cpp:165] Memory required for data: 255489500\nI1212 06:17:18.873601 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI1212 06:17:18.873610 20613 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI1212 06:17:18.873615 20613 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI1212 06:17:18.873632 20613 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:17:18.873642 20613 net.cpp:150] Setting up L1_b3_cbr1_relu\nI1212 06:17:18.873651 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.873661 20613 net.cpp:165] Memory required for data: 263681500\nI1212 06:17:18.873667 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI1212 06:17:18.873682 20613 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI1212 06:17:18.873687 20613 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI1212 06:17:18.873697 20613 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI1212 06:17:18.874003 20613 net.cpp:150] Setting up L1_b3_cbr2_conv\nI1212 06:17:18.874017 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.874022 20613 net.cpp:165] Memory required for data: 271873500\nI1212 06:17:18.874032 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI1212 06:17:18.874047 20613 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI1212 06:17:18.874053 20613 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI1212 06:17:18.874063 20613 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI1212 06:17:18.874299 20613 net.cpp:150] Setting up L1_b3_cbr2_bn\nI1212 06:17:18.874311 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.874316 20613 net.cpp:165] Memory required for data: 280065500\nI1212 06:17:18.874327 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:17:18.874339 20613 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI1212 06:17:18.874346 20613 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI1212 06:17:18.874353 20613 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI1212 06:17:18.874408 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:17:18.874543 20613 net.cpp:150] Setting up L1_b3_cbr2_scale\nI1212 06:17:18.874557 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.874562 20613 net.cpp:165] Memory required for data: 288257500\nI1212 06:17:18.874570 20613 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1212 06:17:18.874578 20613 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1212 06:17:18.874584 20613 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI1212 06:17:18.874591 20613 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:17:18.874603 20613 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1212 06:17:18.874644 20613 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1212 06:17:18.874653 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.874658 20613 net.cpp:165] Memory required for data: 296449500\nI1212 06:17:18.874665 20613 layer_factory.hpp:77] Creating layer L1_b3_relu\nI1212 06:17:18.874672 20613 net.cpp:100] Creating Layer L1_b3_relu\nI1212 06:17:18.874677 20613 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI1212 06:17:18.874687 20613 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI1212 06:17:18.874697 20613 net.cpp:150] Setting up L1_b3_relu\nI1212 06:17:18.874704 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.874708 20613 net.cpp:165] Memory required for data: 304641500\nI1212 06:17:18.874713 20613 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:18.874722 20613 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:18.874727 20613 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI1212 06:17:18.874734 20613 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:17:18.874743 20613 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:17:18.874794 20613 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:18.874806 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.874812 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.874817 20613 net.cpp:165] Memory required for data: 321025500\nI1212 06:17:18.874822 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI1212 06:17:18.874835 20613 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI1212 06:17:18.874840 20613 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:17:18.874852 20613 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI1212 06:17:18.875169 20613 net.cpp:150] Setting up L1_b4_cbr1_conv\nI1212 06:17:18.875182 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.875187 20613 net.cpp:165] Memory required for data: 329217500\nI1212 06:17:18.875196 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI1212 06:17:18.875206 20613 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI1212 06:17:18.875212 20613 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI1212 06:17:18.875223 20613 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI1212 06:17:18.875461 20613 net.cpp:150] Setting up L1_b4_cbr1_bn\nI1212 06:17:18.875474 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.875479 20613 net.cpp:165] Memory required for data: 337409500\nI1212 06:17:18.875490 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:17:18.875502 20613 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI1212 06:17:18.875509 20613 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI1212 06:17:18.875516 20613 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:17:18.875569 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:17:18.875718 20613 net.cpp:150] Setting up L1_b4_cbr1_scale\nI1212 06:17:18.875732 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.875737 20613 net.cpp:165] Memory required for data: 345601500\nI1212 06:17:18.875747 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI1212 06:17:18.875756 20613 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI1212 06:17:18.875761 20613 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI1212 06:17:18.875771 20613 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:17:18.875782 20613 net.cpp:150] Setting up L1_b4_cbr1_relu\nI1212 06:17:18.875788 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.875792 20613 net.cpp:165] Memory required for data: 353793500\nI1212 06:17:18.875797 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI1212 06:17:18.875813 20613 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI1212 06:17:18.875818 20613 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI1212 06:17:18.875830 20613 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI1212 06:17:18.876137 20613 net.cpp:150] Setting up L1_b4_cbr2_conv\nI1212 06:17:18.876152 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.876157 20613 net.cpp:165] Memory required for data: 361985500\nI1212 06:17:18.876165 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI1212 06:17:18.876175 20613 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI1212 06:17:18.876181 20613 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI1212 06:17:18.876189 20613 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI1212 06:17:18.876438 20613 net.cpp:150] Setting up L1_b4_cbr2_bn\nI1212 06:17:18.876452 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.876457 20613 net.cpp:165] Memory required for data: 370177500\nI1212 06:17:18.876471 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:17:18.876479 20613 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI1212 06:17:18.876485 20613 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI1212 06:17:18.876495 20613 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI1212 06:17:18.876550 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:17:18.876699 20613 net.cpp:150] Setting up L1_b4_cbr2_scale\nI1212 06:17:18.876713 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.876718 20613 net.cpp:165] Memory required for data: 378369500\nI1212 06:17:18.876726 20613 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1212 06:17:18.876739 20613 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1212 06:17:18.876744 20613 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI1212 06:17:18.876751 20613 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:17:18.876760 20613 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1212 06:17:18.876794 20613 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1212 06:17:18.876811 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.876816 20613 net.cpp:165] Memory required for data: 386561500\nI1212 06:17:18.876822 20613 layer_factory.hpp:77] Creating layer L1_b4_relu\nI1212 06:17:18.876830 20613 net.cpp:100] Creating Layer L1_b4_relu\nI1212 06:17:18.876835 20613 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI1212 06:17:18.876845 20613 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI1212 06:17:18.876855 20613 net.cpp:150] Setting up L1_b4_relu\nI1212 06:17:18.876863 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.876866 20613 net.cpp:165] Memory required for data: 394753500\nI1212 06:17:18.876871 20613 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:18.876879 20613 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:18.876885 20613 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI1212 06:17:18.876893 20613 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:17:18.876902 20613 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:17:18.876947 20613 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:18.876960 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.876966 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.876971 20613 net.cpp:165] Memory required for data: 411137500\nI1212 06:17:18.876976 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI1212 06:17:18.876987 20613 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI1212 06:17:18.876993 20613 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:17:18.877005 20613 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI1212 06:17:18.877316 20613 net.cpp:150] Setting up L1_b5_cbr1_conv\nI1212 06:17:18.877331 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.877336 20613 net.cpp:165] Memory required for data: 419329500\nI1212 06:17:18.877359 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI1212 06:17:18.877373 20613 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI1212 06:17:18.877379 20613 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI1212 06:17:18.877388 20613 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI1212 06:17:18.877637 20613 net.cpp:150] Setting up L1_b5_cbr1_bn\nI1212 06:17:18.877651 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.877656 20613 net.cpp:165] Memory required for data: 427521500\nI1212 06:17:18.877666 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:17:18.877678 20613 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI1212 06:17:18.877684 20613 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI1212 06:17:18.877692 20613 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:17:18.877745 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:17:18.877893 20613 net.cpp:150] Setting up L1_b5_cbr1_scale\nI1212 06:17:18.877907 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.877912 20613 net.cpp:165] Memory required for data: 435713500\nI1212 06:17:18.877920 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI1212 06:17:18.877931 20613 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI1212 06:17:18.877938 20613 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI1212 06:17:18.877945 20613 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:17:18.877954 20613 net.cpp:150] Setting up L1_b5_cbr1_relu\nI1212 06:17:18.877961 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.877965 20613 net.cpp:165] Memory required for data: 443905500\nI1212 06:17:18.877970 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI1212 06:17:18.877985 20613 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI1212 06:17:18.877990 20613 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI1212 06:17:18.878001 20613 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI1212 06:17:18.878322 20613 net.cpp:150] Setting up L1_b5_cbr2_conv\nI1212 06:17:18.878336 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.878341 20613 net.cpp:165] Memory required for data: 452097500\nI1212 06:17:18.878350 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI1212 06:17:18.878360 20613 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI1212 06:17:18.878366 20613 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI1212 06:17:18.878374 20613 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI1212 06:17:18.878612 20613 net.cpp:150] Setting up L1_b5_cbr2_bn\nI1212 06:17:18.878631 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.878636 20613 net.cpp:165] Memory required for data: 460289500\nI1212 06:17:18.878648 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:17:18.878659 20613 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI1212 06:17:18.878665 20613 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI1212 06:17:18.878674 20613 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI1212 06:17:18.878729 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:17:18.878872 20613 net.cpp:150] Setting up L1_b5_cbr2_scale\nI1212 06:17:18.878885 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.878890 20613 net.cpp:165] Memory required for data: 468481500\nI1212 06:17:18.878899 20613 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1212 06:17:18.878908 20613 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1212 06:17:18.878914 20613 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI1212 06:17:18.878921 20613 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:17:18.878931 20613 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1212 06:17:18.878965 20613 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1212 06:17:18.878976 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.878981 20613 net.cpp:165] Memory required for data: 476673500\nI1212 06:17:18.878986 20613 layer_factory.hpp:77] Creating layer L1_b5_relu\nI1212 06:17:18.878995 20613 net.cpp:100] Creating Layer L1_b5_relu\nI1212 06:17:18.879000 20613 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI1212 06:17:18.879010 20613 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI1212 06:17:18.879020 20613 net.cpp:150] Setting up L1_b5_relu\nI1212 06:17:18.879027 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.879031 20613 net.cpp:165] Memory required for data: 484865500\nI1212 06:17:18.879036 20613 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:18.879043 20613 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:18.879048 20613 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI1212 06:17:18.879056 20613 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:17:18.879066 20613 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:17:18.879112 20613 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:18.879123 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.879130 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.879134 20613 net.cpp:165] Memory required for data: 501249500\nI1212 06:17:18.879139 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI1212 06:17:18.879151 20613 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI1212 06:17:18.879158 20613 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:17:18.879173 20613 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI1212 06:17:18.879484 20613 net.cpp:150] Setting up L1_b6_cbr1_conv\nI1212 06:17:18.879498 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.879503 20613 net.cpp:165] Memory required for data: 509441500\nI1212 06:17:18.879513 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI1212 06:17:18.879528 20613 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI1212 06:17:18.879534 20613 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI1212 06:17:18.879546 20613 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI1212 06:17:18.879796 20613 net.cpp:150] Setting up L1_b6_cbr1_bn\nI1212 06:17:18.879809 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.879814 20613 net.cpp:165] Memory required for data: 517633500\nI1212 06:17:18.879825 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:17:18.879837 20613 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI1212 06:17:18.879844 20613 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI1212 06:17:18.879853 20613 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:17:18.879904 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:17:18.880048 20613 net.cpp:150] Setting up L1_b6_cbr1_scale\nI1212 06:17:18.880060 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.880065 20613 net.cpp:165] Memory required for data: 525825500\nI1212 06:17:18.880074 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI1212 06:17:18.880082 20613 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI1212 06:17:18.880089 20613 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI1212 06:17:18.880100 20613 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:17:18.880108 20613 net.cpp:150] Setting up L1_b6_cbr1_relu\nI1212 06:17:18.880115 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.880120 20613 net.cpp:165] Memory required for data: 534017500\nI1212 06:17:18.880125 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI1212 06:17:18.880139 20613 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI1212 06:17:18.880146 20613 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI1212 06:17:18.880157 20613 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI1212 06:17:18.880470 20613 net.cpp:150] Setting up L1_b6_cbr2_conv\nI1212 06:17:18.880483 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.880488 20613 net.cpp:165] Memory required for data: 542209500\nI1212 06:17:18.880497 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI1212 06:17:18.880506 20613 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI1212 06:17:18.880512 20613 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI1212 06:17:18.880520 20613 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI1212 06:17:18.880774 20613 net.cpp:150] Setting up L1_b6_cbr2_bn\nI1212 06:17:18.880787 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.880792 20613 net.cpp:165] Memory required for data: 550401500\nI1212 06:17:18.880803 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:17:18.880815 20613 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI1212 06:17:18.880821 20613 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI1212 06:17:18.880830 20613 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI1212 06:17:18.880885 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:17:18.881027 20613 net.cpp:150] Setting up L1_b6_cbr2_scale\nI1212 06:17:18.881041 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.881045 20613 net.cpp:165] Memory required for data: 558593500\nI1212 06:17:18.881054 20613 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1212 06:17:18.881070 20613 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1212 06:17:18.881078 20613 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI1212 06:17:18.881084 20613 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:17:18.881094 20613 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1212 06:17:18.881126 20613 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1212 06:17:18.881139 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.881144 20613 net.cpp:165] Memory required for data: 566785500\nI1212 06:17:18.881148 20613 layer_factory.hpp:77] Creating layer L1_b6_relu\nI1212 06:17:18.881160 20613 net.cpp:100] Creating Layer L1_b6_relu\nI1212 06:17:18.881175 20613 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI1212 06:17:18.881182 20613 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI1212 06:17:18.881192 20613 net.cpp:150] Setting up L1_b6_relu\nI1212 06:17:18.881199 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.881203 20613 net.cpp:165] Memory required for data: 574977500\nI1212 06:17:18.881208 20613 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:18.881217 20613 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:18.881222 20613 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI1212 06:17:18.881228 20613 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:17:18.881238 20613 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:17:18.881284 20613 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:18.881296 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.881304 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.881307 20613 net.cpp:165] Memory required for data: 591361500\nI1212 06:17:18.881314 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI1212 06:17:18.881327 20613 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI1212 06:17:18.881333 20613 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:17:18.881343 20613 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI1212 06:17:18.881664 20613 net.cpp:150] Setting up L1_b7_cbr1_conv\nI1212 06:17:18.881678 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.881683 20613 net.cpp:165] Memory required for data: 599553500\nI1212 06:17:18.881692 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI1212 06:17:18.881705 20613 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI1212 06:17:18.881711 20613 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI1212 06:17:18.881719 20613 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI1212 06:17:18.881963 20613 net.cpp:150] Setting up L1_b7_cbr1_bn\nI1212 06:17:18.881976 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.881981 20613 net.cpp:165] Memory required for data: 607745500\nI1212 06:17:18.881992 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:17:18.882000 20613 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI1212 06:17:18.882006 20613 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI1212 06:17:18.882014 20613 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:17:18.882069 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:17:18.882213 20613 net.cpp:150] Setting up L1_b7_cbr1_scale\nI1212 06:17:18.882226 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.882231 20613 net.cpp:165] Memory required for data: 615937500\nI1212 06:17:18.882241 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI1212 06:17:18.882248 20613 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI1212 06:17:18.882254 20613 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI1212 06:17:18.882266 20613 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:17:18.882277 20613 net.cpp:150] Setting up L1_b7_cbr1_relu\nI1212 06:17:18.882283 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.882288 20613 net.cpp:165] Memory required for data: 624129500\nI1212 06:17:18.882292 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI1212 06:17:18.882304 20613 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI1212 06:17:18.882310 20613 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI1212 06:17:18.882323 20613 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI1212 06:17:18.882638 20613 net.cpp:150] Setting up L1_b7_cbr2_conv\nI1212 06:17:18.882652 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.882658 20613 net.cpp:165] Memory required for data: 632321500\nI1212 06:17:18.882666 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI1212 06:17:18.882683 20613 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI1212 06:17:18.882690 20613 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI1212 06:17:18.882701 20613 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI1212 06:17:18.882943 20613 net.cpp:150] Setting up L1_b7_cbr2_bn\nI1212 06:17:18.882959 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.882964 20613 net.cpp:165] Memory required for data: 640513500\nI1212 06:17:18.882975 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:17:18.882984 20613 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI1212 06:17:18.882990 20613 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI1212 06:17:18.882997 20613 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI1212 06:17:18.883051 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:17:18.883193 20613 net.cpp:150] Setting up L1_b7_cbr2_scale\nI1212 06:17:18.883205 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.883210 20613 net.cpp:165] Memory required for data: 648705500\nI1212 06:17:18.883219 20613 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI1212 06:17:18.883230 20613 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI1212 06:17:18.883236 20613 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI1212 06:17:18.883244 20613 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:17:18.883253 20613 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI1212 06:17:18.883285 20613 net.cpp:150] Setting up L1_b7_sum_eltwise\nI1212 06:17:18.883294 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.883298 20613 net.cpp:165] Memory required for data: 656897500\nI1212 06:17:18.883304 20613 layer_factory.hpp:77] Creating layer L1_b7_relu\nI1212 06:17:18.883314 20613 net.cpp:100] Creating Layer L1_b7_relu\nI1212 06:17:18.883321 20613 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI1212 06:17:18.883328 20613 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI1212 06:17:18.883337 20613 net.cpp:150] Setting up L1_b7_relu\nI1212 06:17:18.883344 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.883348 20613 net.cpp:165] Memory required for data: 665089500\nI1212 06:17:18.883353 20613 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:18.883360 20613 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:18.883366 20613 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI1212 06:17:18.883373 20613 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:17:18.883383 20613 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:17:18.883429 20613 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:18.883440 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.883446 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.883451 20613 net.cpp:165] Memory required for data: 681473500\nI1212 06:17:18.883456 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI1212 06:17:18.883471 20613 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI1212 06:17:18.883477 20613 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:17:18.883486 20613 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI1212 06:17:18.883813 20613 net.cpp:150] Setting up L1_b8_cbr1_conv\nI1212 06:17:18.883828 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.883833 20613 net.cpp:165] Memory required for data: 689665500\nI1212 06:17:18.883842 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI1212 06:17:18.883855 20613 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI1212 06:17:18.883862 20613 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI1212 06:17:18.883870 20613 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI1212 06:17:18.884119 20613 net.cpp:150] Setting up L1_b8_cbr1_bn\nI1212 06:17:18.884141 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.884147 20613 net.cpp:165] Memory required for data: 697857500\nI1212 06:17:18.884157 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:17:18.884166 20613 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI1212 06:17:18.884172 20613 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI1212 06:17:18.884181 20613 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:17:18.884234 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:17:18.884382 20613 net.cpp:150] Setting up L1_b8_cbr1_scale\nI1212 06:17:18.884394 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.884399 20613 net.cpp:165] Memory required for data: 706049500\nI1212 06:17:18.884408 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI1212 06:17:18.884420 20613 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI1212 06:17:18.884426 20613 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI1212 06:17:18.884436 20613 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:17:18.884446 20613 net.cpp:150] Setting up L1_b8_cbr1_relu\nI1212 06:17:18.884454 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.884459 20613 net.cpp:165] Memory required for data: 714241500\nI1212 06:17:18.884462 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI1212 06:17:18.884474 20613 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI1212 06:17:18.884480 20613 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI1212 06:17:18.884492 20613 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI1212 06:17:18.884815 20613 net.cpp:150] Setting up L1_b8_cbr2_conv\nI1212 06:17:18.884829 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.884835 20613 net.cpp:165] Memory required for data: 722433500\nI1212 06:17:18.884843 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI1212 06:17:18.884852 20613 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI1212 06:17:18.884860 20613 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI1212 06:17:18.884871 20613 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI1212 06:17:18.885123 20613 net.cpp:150] Setting up L1_b8_cbr2_bn\nI1212 06:17:18.885136 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.885141 20613 net.cpp:165] Memory required for data: 730625500\nI1212 06:17:18.885151 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:17:18.885164 20613 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI1212 06:17:18.885170 20613 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI1212 06:17:18.885179 20613 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI1212 06:17:18.885231 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:17:18.885373 20613 net.cpp:150] Setting up L1_b8_cbr2_scale\nI1212 06:17:18.885385 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.885390 20613 net.cpp:165] Memory required for data: 738817500\nI1212 06:17:18.885399 20613 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI1212 06:17:18.885411 20613 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI1212 06:17:18.885417 20613 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI1212 06:17:18.885424 20613 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:17:18.885432 20613 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI1212 06:17:18.885466 20613 net.cpp:150] Setting up L1_b8_sum_eltwise\nI1212 06:17:18.885475 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.885480 20613 net.cpp:165] Memory required for data: 747009500\nI1212 06:17:18.885485 20613 layer_factory.hpp:77] Creating layer L1_b8_relu\nI1212 06:17:18.885493 20613 net.cpp:100] Creating Layer L1_b8_relu\nI1212 06:17:18.885499 20613 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI1212 06:17:18.885509 20613 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI1212 06:17:18.885519 20613 net.cpp:150] Setting up L1_b8_relu\nI1212 06:17:18.885526 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.885530 20613 net.cpp:165] Memory required for data: 755201500\nI1212 06:17:18.885542 20613 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:18.885550 20613 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:18.885555 20613 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI1212 06:17:18.885563 20613 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:17:18.885573 20613 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:17:18.885625 20613 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:18.885637 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.885644 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.885649 20613 net.cpp:165] Memory required for data: 771585500\nI1212 06:17:18.885654 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI1212 06:17:18.885666 20613 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI1212 06:17:18.885673 20613 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:17:18.885684 20613 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI1212 06:17:18.886008 20613 net.cpp:150] Setting up L1_b9_cbr1_conv\nI1212 06:17:18.886029 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.886034 20613 net.cpp:165] Memory required for data: 779777500\nI1212 06:17:18.886044 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI1212 06:17:18.886052 20613 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI1212 06:17:18.886059 20613 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI1212 06:17:18.886070 20613 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI1212 06:17:18.886317 20613 net.cpp:150] Setting up L1_b9_cbr1_bn\nI1212 06:17:18.886330 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.886335 20613 net.cpp:165] Memory required for data: 787969500\nI1212 06:17:18.886345 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:17:18.886354 20613 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI1212 06:17:18.886360 20613 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI1212 06:17:18.886371 20613 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:17:18.886425 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:17:18.886570 20613 net.cpp:150] Setting up L1_b9_cbr1_scale\nI1212 06:17:18.886590 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.886595 20613 net.cpp:165] Memory required for data: 796161500\nI1212 06:17:18.886605 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI1212 06:17:18.886613 20613 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI1212 06:17:18.886626 20613 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI1212 06:17:18.886633 20613 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:17:18.886643 20613 net.cpp:150] Setting up L1_b9_cbr1_relu\nI1212 06:17:18.886651 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.886654 20613 net.cpp:165] Memory required for data: 804353500\nI1212 06:17:18.886659 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI1212 06:17:18.886674 20613 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI1212 06:17:18.886680 20613 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI1212 06:17:18.886692 20613 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI1212 06:17:18.887015 20613 net.cpp:150] Setting up L1_b9_cbr2_conv\nI1212 06:17:18.887029 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.887034 20613 net.cpp:165] Memory required for data: 812545500\nI1212 06:17:18.887043 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI1212 06:17:18.887055 20613 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI1212 06:17:18.887061 20613 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI1212 06:17:18.887073 20613 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI1212 06:17:18.887315 20613 net.cpp:150] Setting up L1_b9_cbr2_bn\nI1212 06:17:18.887336 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.887341 20613 net.cpp:165] Memory required for data: 820737500\nI1212 06:17:18.887372 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:17:18.887382 20613 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI1212 06:17:18.887388 20613 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI1212 06:17:18.887398 20613 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI1212 06:17:18.887450 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:17:18.887596 20613 net.cpp:150] Setting up L1_b9_cbr2_scale\nI1212 06:17:18.887609 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.887614 20613 net.cpp:165] Memory required for data: 828929500\nI1212 06:17:18.887629 20613 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI1212 06:17:18.887642 20613 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI1212 06:17:18.887650 20613 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI1212 06:17:18.887656 20613 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:17:18.887663 20613 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI1212 06:17:18.887696 20613 net.cpp:150] Setting up L1_b9_sum_eltwise\nI1212 06:17:18.887704 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.887709 20613 net.cpp:165] Memory required for data: 837121500\nI1212 06:17:18.887715 20613 layer_factory.hpp:77] Creating layer L1_b9_relu\nI1212 06:17:18.887727 20613 net.cpp:100] Creating Layer L1_b9_relu\nI1212 06:17:18.887732 20613 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI1212 06:17:18.887739 20613 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI1212 06:17:18.887748 20613 net.cpp:150] Setting up L1_b9_relu\nI1212 06:17:18.887755 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.887759 20613 net.cpp:165] Memory required for data: 845313500\nI1212 06:17:18.887764 20613 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:18.887776 20613 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:18.887783 20613 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI1212 06:17:18.887789 20613 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:17:18.887799 20613 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:17:18.887845 20613 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:18.887856 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.887863 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:18.887868 20613 net.cpp:165] Memory required for data: 861697500\nI1212 06:17:18.887873 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI1212 06:17:18.887887 20613 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI1212 06:17:18.887893 20613 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:17:18.887903 20613 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI1212 06:17:18.888226 20613 net.cpp:150] Setting up L2_b1_cbr1_conv\nI1212 06:17:18.888239 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.888244 20613 net.cpp:165] Memory required for data: 863745500\nI1212 06:17:18.888254 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI1212 06:17:18.888267 20613 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI1212 06:17:18.888273 20613 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI1212 06:17:18.888283 20613 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI1212 06:17:18.888523 20613 net.cpp:150] Setting up L2_b1_cbr1_bn\nI1212 06:17:18.888536 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.888541 20613 net.cpp:165] Memory required for data: 865793500\nI1212 06:17:18.888552 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:17:18.888561 20613 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI1212 06:17:18.888567 20613 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI1212 06:17:18.888582 20613 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:17:18.888645 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:17:18.888787 20613 net.cpp:150] Setting up L2_b1_cbr1_scale\nI1212 06:17:18.888800 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.888805 20613 net.cpp:165] Memory required for data: 867841500\nI1212 06:17:18.888814 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI1212 06:17:18.888823 20613 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI1212 06:17:18.888829 20613 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI1212 06:17:18.888839 20613 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:17:18.888849 20613 net.cpp:150] Setting up L2_b1_cbr1_relu\nI1212 06:17:18.888856 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.888861 20613 net.cpp:165] Memory required for data: 869889500\nI1212 06:17:18.888865 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI1212 06:17:18.888880 20613 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI1212 06:17:18.888887 20613 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI1212 06:17:18.888896 20613 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI1212 06:17:18.889217 20613 net.cpp:150] Setting up L2_b1_cbr2_conv\nI1212 06:17:18.889231 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.889236 20613 net.cpp:165] Memory required for data: 871937500\nI1212 06:17:18.889245 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI1212 06:17:18.889259 20613 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI1212 06:17:18.889266 20613 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI1212 06:17:18.889276 20613 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI1212 06:17:18.889525 20613 net.cpp:150] Setting up L2_b1_cbr2_bn\nI1212 06:17:18.889541 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.889546 20613 net.cpp:165] Memory required for data: 873985500\nI1212 06:17:18.889557 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:17:18.889566 20613 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI1212 06:17:18.889571 20613 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI1212 06:17:18.889580 20613 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI1212 06:17:18.889642 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:17:18.889792 20613 net.cpp:150] Setting up L2_b1_cbr2_scale\nI1212 06:17:18.889806 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.889811 20613 net.cpp:165] Memory required for data: 876033500\nI1212 06:17:18.889819 20613 layer_factory.hpp:77] Creating layer L2_b1_pool\nI1212 06:17:18.889830 20613 net.cpp:100] Creating Layer L2_b1_pool\nI1212 06:17:18.889837 20613 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:17:18.889848 20613 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI1212 06:17:18.889968 20613 net.cpp:150] Setting up L2_b1_pool\nI1212 06:17:18.889986 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.889991 20613 net.cpp:165] Memory required for data: 878081500\nI1212 06:17:18.889997 20613 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1212 06:17:18.890012 20613 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1212 06:17:18.890017 20613 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI1212 06:17:18.890025 20613 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI1212 06:17:18.890033 20613 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1212 06:17:18.890067 20613 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1212 06:17:18.890076 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.890081 20613 net.cpp:165] Memory required for data: 880129500\nI1212 06:17:18.890087 20613 layer_factory.hpp:77] Creating layer L2_b1_relu\nI1212 06:17:18.890094 20613 net.cpp:100] Creating Layer L2_b1_relu\nI1212 06:17:18.890100 20613 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI1212 06:17:18.890111 20613 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI1212 06:17:18.890128 20613 net.cpp:150] Setting up L2_b1_relu\nI1212 06:17:18.890136 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.890141 20613 net.cpp:165] Memory required for data: 882177500\nI1212 06:17:18.890146 20613 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI1212 06:17:18.890198 20613 net.cpp:100] Creating Layer L2_b1_zeros\nI1212 06:17:18.890213 20613 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI1212 06:17:18.892598 20613 net.cpp:150] Setting up L2_b1_zeros\nI1212 06:17:18.892623 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:18.892629 20613 net.cpp:165] Memory required for data: 884225500\nI1212 06:17:18.892635 20613 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI1212 06:17:18.892650 20613 net.cpp:100] Creating Layer L2_b1_concat0\nI1212 06:17:18.892657 20613 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI1212 06:17:18.892665 20613 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI1212 06:17:18.892673 20613 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI1212 06:17:18.892724 20613 net.cpp:150] Setting up L2_b1_concat0\nI1212 06:17:18.892736 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.892741 20613 net.cpp:165] Memory required for data: 888321500\nI1212 06:17:18.892746 20613 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:18.892755 20613 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:18.892760 20613 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI1212 06:17:18.892771 20613 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:17:18.892782 20613 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:17:18.892832 20613 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:18.892846 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.892853 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.892858 20613 net.cpp:165] Memory required for data: 896513500\nI1212 06:17:18.892863 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI1212 06:17:18.892876 20613 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI1212 06:17:18.892884 20613 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:17:18.892894 20613 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI1212 06:17:18.894387 20613 net.cpp:150] Setting up L2_b2_cbr1_conv\nI1212 06:17:18.894403 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.894409 20613 net.cpp:165] Memory required for data: 900609500\nI1212 06:17:18.894419 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI1212 06:17:18.894433 20613 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI1212 06:17:18.894439 20613 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI1212 06:17:18.894451 20613 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI1212 06:17:18.894709 20613 net.cpp:150] Setting up L2_b2_cbr1_bn\nI1212 06:17:18.894723 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.894728 20613 net.cpp:165] Memory required for data: 904705500\nI1212 06:17:18.894740 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:17:18.894749 20613 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI1212 06:17:18.894757 20613 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI1212 06:17:18.894764 20613 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:17:18.894824 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:17:18.894969 20613 net.cpp:150] Setting up L2_b2_cbr1_scale\nI1212 06:17:18.894982 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.894987 20613 net.cpp:165] Memory required for data: 908801500\nI1212 06:17:18.894996 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI1212 06:17:18.895009 20613 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI1212 06:17:18.895015 20613 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI1212 06:17:18.895022 20613 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:17:18.895040 20613 net.cpp:150] Setting up L2_b2_cbr1_relu\nI1212 06:17:18.895047 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.895052 20613 net.cpp:165] Memory required for data: 912897500\nI1212 06:17:18.895057 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI1212 06:17:18.895072 20613 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI1212 06:17:18.895079 20613 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI1212 06:17:18.895088 20613 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI1212 06:17:18.895550 20613 net.cpp:150] Setting up L2_b2_cbr2_conv\nI1212 06:17:18.895565 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.895570 20613 net.cpp:165] Memory required for data: 916993500\nI1212 06:17:18.895579 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI1212 06:17:18.895592 20613 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI1212 06:17:18.895599 20613 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI1212 06:17:18.895608 20613 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI1212 06:17:18.895866 20613 net.cpp:150] Setting up L2_b2_cbr2_bn\nI1212 06:17:18.895884 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.895889 20613 net.cpp:165] Memory required for data: 921089500\nI1212 06:17:18.895900 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:17:18.895908 20613 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI1212 06:17:18.895915 20613 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI1212 06:17:18.895922 20613 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI1212 06:17:18.895978 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:17:18.896126 20613 net.cpp:150] Setting up L2_b2_cbr2_scale\nI1212 06:17:18.896139 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.896144 20613 net.cpp:165] Memory required for data: 925185500\nI1212 06:17:18.896153 20613 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1212 06:17:18.896163 20613 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1212 06:17:18.896169 20613 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI1212 06:17:18.896176 20613 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:17:18.896188 20613 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1212 06:17:18.896214 20613 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1212 06:17:18.896224 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.896229 20613 net.cpp:165] Memory required for data: 929281500\nI1212 06:17:18.896234 20613 layer_factory.hpp:77] Creating layer L2_b2_relu\nI1212 06:17:18.896245 20613 net.cpp:100] Creating Layer L2_b2_relu\nI1212 06:17:18.896251 20613 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI1212 06:17:18.896258 20613 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI1212 06:17:18.896268 20613 net.cpp:150] Setting up L2_b2_relu\nI1212 06:17:18.896275 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.896280 20613 net.cpp:165] Memory required for data: 933377500\nI1212 06:17:18.896284 20613 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:18.896292 20613 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:18.896297 20613 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI1212 06:17:18.896306 20613 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:17:18.896314 20613 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:17:18.896363 20613 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:18.896375 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.896381 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.896386 20613 net.cpp:165] Memory required for data: 941569500\nI1212 06:17:18.896391 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI1212 06:17:18.896406 20613 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI1212 06:17:18.896420 20613 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:17:18.896430 20613 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI1212 06:17:18.896903 20613 net.cpp:150] Setting up L2_b3_cbr1_conv\nI1212 06:17:18.896919 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.896924 20613 net.cpp:165] Memory required for data: 945665500\nI1212 06:17:18.896934 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI1212 06:17:18.896945 20613 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI1212 06:17:18.896952 20613 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI1212 06:17:18.896963 20613 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI1212 06:17:18.897209 20613 net.cpp:150] Setting up L2_b3_cbr1_bn\nI1212 06:17:18.897225 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.897230 20613 net.cpp:165] Memory required for data: 949761500\nI1212 06:17:18.897241 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:17:18.897250 20613 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI1212 06:17:18.897256 20613 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI1212 06:17:18.897264 20613 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:17:18.897321 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:17:18.897470 20613 net.cpp:150] Setting up L2_b3_cbr1_scale\nI1212 06:17:18.897483 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.897488 20613 net.cpp:165] Memory required for data: 953857500\nI1212 06:17:18.897497 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI1212 06:17:18.897505 20613 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI1212 06:17:18.897511 20613 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI1212 06:17:18.897522 20613 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:17:18.897532 20613 net.cpp:150] Setting up L2_b3_cbr1_relu\nI1212 06:17:18.897539 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.897543 20613 net.cpp:165] Memory required for data: 957953500\nI1212 06:17:18.897548 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI1212 06:17:18.897563 20613 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI1212 06:17:18.897569 20613 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI1212 06:17:18.897578 20613 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI1212 06:17:18.898051 20613 net.cpp:150] Setting up L2_b3_cbr2_conv\nI1212 06:17:18.898066 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.898071 20613 net.cpp:165] Memory required for data: 962049500\nI1212 06:17:18.898079 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI1212 06:17:18.898092 20613 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI1212 06:17:18.898098 20613 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI1212 06:17:18.898108 20613 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI1212 06:17:18.898358 20613 net.cpp:150] Setting up L2_b3_cbr2_bn\nI1212 06:17:18.898375 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.898380 20613 net.cpp:165] Memory required for data: 966145500\nI1212 06:17:18.898391 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:17:18.898399 20613 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI1212 06:17:18.898406 20613 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI1212 06:17:18.898413 20613 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI1212 06:17:18.898468 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:17:18.898625 20613 net.cpp:150] Setting up L2_b3_cbr2_scale\nI1212 06:17:18.898639 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.898644 20613 net.cpp:165] Memory required for data: 970241500\nI1212 06:17:18.898653 20613 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1212 06:17:18.898663 20613 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1212 06:17:18.898669 20613 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI1212 06:17:18.898676 20613 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:17:18.898696 20613 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1212 06:17:18.898725 20613 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1212 06:17:18.898736 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.898741 20613 net.cpp:165] Memory required for data: 974337500\nI1212 06:17:18.898746 20613 layer_factory.hpp:77] Creating layer L2_b3_relu\nI1212 06:17:18.898766 20613 net.cpp:100] Creating Layer L2_b3_relu\nI1212 06:17:18.898772 20613 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI1212 06:17:18.898780 20613 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI1212 06:17:18.898792 20613 net.cpp:150] Setting up L2_b3_relu\nI1212 06:17:18.898800 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.898805 20613 net.cpp:165] Memory required for data: 978433500\nI1212 06:17:18.898810 20613 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:18.898818 20613 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:18.898823 20613 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI1212 06:17:18.898831 20613 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:17:18.898841 20613 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:17:18.898890 20613 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:18.898901 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.898908 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.898913 20613 net.cpp:165] Memory required for data: 986625500\nI1212 06:17:18.898918 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI1212 06:17:18.898931 20613 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI1212 06:17:18.898936 20613 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:17:18.898949 20613 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI1212 06:17:18.899416 20613 net.cpp:150] Setting up L2_b4_cbr1_conv\nI1212 06:17:18.899431 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.899436 20613 net.cpp:165] Memory required for data: 990721500\nI1212 06:17:18.899446 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI1212 06:17:18.899454 20613 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI1212 06:17:18.899461 20613 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI1212 06:17:18.899472 20613 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI1212 06:17:18.899730 20613 net.cpp:150] Setting up L2_b4_cbr1_bn\nI1212 06:17:18.899744 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.899749 20613 net.cpp:165] Memory required for data: 994817500\nI1212 06:17:18.899760 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:17:18.899771 20613 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI1212 06:17:18.899778 20613 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI1212 06:17:18.899786 20613 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:17:18.899840 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:17:18.899988 20613 net.cpp:150] Setting up L2_b4_cbr1_scale\nI1212 06:17:18.900002 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.900007 20613 net.cpp:165] Memory required for data: 998913500\nI1212 06:17:18.900015 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI1212 06:17:18.900027 20613 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI1212 06:17:18.900033 20613 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI1212 06:17:18.900040 20613 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:17:18.900050 20613 net.cpp:150] Setting up L2_b4_cbr1_relu\nI1212 06:17:18.900058 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.900061 20613 net.cpp:165] Memory required for data: 1003009500\nI1212 06:17:18.900066 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI1212 06:17:18.900080 20613 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI1212 06:17:18.900094 20613 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI1212 06:17:18.900105 20613 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI1212 06:17:18.900565 20613 net.cpp:150] Setting up L2_b4_cbr2_conv\nI1212 06:17:18.900580 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.900585 20613 net.cpp:165] Memory required for data: 1007105500\nI1212 06:17:18.900593 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI1212 06:17:18.900604 20613 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI1212 06:17:18.900609 20613 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI1212 06:17:18.900629 20613 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI1212 06:17:18.900879 20613 net.cpp:150] Setting up L2_b4_cbr2_bn\nI1212 06:17:18.900892 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.900897 20613 net.cpp:165] Memory required for data: 1011201500\nI1212 06:17:18.900908 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:17:18.900924 20613 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI1212 06:17:18.900931 20613 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI1212 06:17:18.900939 20613 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI1212 06:17:18.900990 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:17:18.901137 20613 net.cpp:150] Setting up L2_b4_cbr2_scale\nI1212 06:17:18.901150 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.901155 20613 net.cpp:165] Memory required for data: 1015297500\nI1212 06:17:18.901165 20613 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1212 06:17:18.901173 20613 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1212 06:17:18.901180 20613 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI1212 06:17:18.901190 20613 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:17:18.901197 20613 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1212 06:17:18.901224 20613 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1212 06:17:18.901237 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.901242 20613 net.cpp:165] Memory required for data: 1019393500\nI1212 06:17:18.901247 20613 layer_factory.hpp:77] Creating layer L2_b4_relu\nI1212 06:17:18.901255 20613 net.cpp:100] Creating Layer L2_b4_relu\nI1212 06:17:18.901260 20613 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI1212 06:17:18.901268 20613 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI1212 06:17:18.901278 20613 net.cpp:150] Setting up L2_b4_relu\nI1212 06:17:18.901283 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.901288 20613 net.cpp:165] Memory required for data: 1023489500\nI1212 06:17:18.901293 20613 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:18.901304 20613 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:18.901309 20613 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI1212 06:17:18.901316 20613 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:17:18.901326 20613 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:17:18.901373 20613 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:18.901386 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.901391 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.901396 20613 net.cpp:165] Memory required for data: 1031681500\nI1212 06:17:18.901401 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI1212 06:17:18.901413 20613 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI1212 06:17:18.901419 20613 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:17:18.901432 20613 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI1212 06:17:18.901904 20613 net.cpp:150] Setting up L2_b5_cbr1_conv\nI1212 06:17:18.901919 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.901932 20613 net.cpp:165] Memory required for data: 1035777500\nI1212 06:17:18.901940 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI1212 06:17:18.901950 20613 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI1212 06:17:18.901957 20613 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI1212 06:17:18.901968 20613 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI1212 06:17:18.902221 20613 net.cpp:150] Setting up L2_b5_cbr1_bn\nI1212 06:17:18.902235 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.902240 20613 net.cpp:165] Memory required for data: 1039873500\nI1212 06:17:18.902251 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:17:18.902262 20613 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI1212 06:17:18.902268 20613 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI1212 06:17:18.902276 20613 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:17:18.902331 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:17:18.902482 20613 net.cpp:150] Setting up L2_b5_cbr1_scale\nI1212 06:17:18.902495 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.902500 20613 net.cpp:165] Memory required for data: 1043969500\nI1212 06:17:18.902509 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI1212 06:17:18.902525 20613 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI1212 06:17:18.902531 20613 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI1212 06:17:18.902539 20613 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:17:18.902549 20613 net.cpp:150] Setting up L2_b5_cbr1_relu\nI1212 06:17:18.902555 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.902559 20613 net.cpp:165] Memory required for data: 1048065500\nI1212 06:17:18.902565 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI1212 06:17:18.902580 20613 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI1212 06:17:18.902586 20613 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI1212 06:17:18.902597 20613 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI1212 06:17:18.903062 20613 net.cpp:150] Setting up L2_b5_cbr2_conv\nI1212 06:17:18.903077 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.903082 20613 net.cpp:165] Memory required for data: 1052161500\nI1212 06:17:18.903091 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI1212 06:17:18.903101 20613 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI1212 06:17:18.903107 20613 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI1212 06:17:18.903115 20613 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI1212 06:17:18.903362 20613 net.cpp:150] Setting up L2_b5_cbr2_bn\nI1212 06:17:18.903375 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.903380 20613 net.cpp:165] Memory required for data: 1056257500\nI1212 06:17:18.903391 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:17:18.903400 20613 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI1212 06:17:18.903405 20613 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI1212 06:17:18.903416 20613 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI1212 06:17:18.903472 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:17:18.903625 20613 net.cpp:150] Setting up L2_b5_cbr2_scale\nI1212 06:17:18.903638 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.903645 20613 net.cpp:165] Memory required for data: 1060353500\nI1212 06:17:18.903653 20613 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1212 06:17:18.903662 20613 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1212 06:17:18.903668 20613 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI1212 06:17:18.903676 20613 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:17:18.903687 20613 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1212 06:17:18.903714 20613 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1212 06:17:18.903726 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.903731 20613 net.cpp:165] Memory required for data: 1064449500\nI1212 06:17:18.903743 20613 layer_factory.hpp:77] Creating layer L2_b5_relu\nI1212 06:17:18.903753 20613 net.cpp:100] Creating Layer L2_b5_relu\nI1212 06:17:18.903758 20613 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI1212 06:17:18.903765 20613 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI1212 06:17:18.903774 20613 net.cpp:150] Setting up L2_b5_relu\nI1212 06:17:18.903781 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.903785 20613 net.cpp:165] Memory required for data: 1068545500\nI1212 06:17:18.903790 20613 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:18.903800 20613 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:18.903806 20613 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI1212 06:17:18.903813 20613 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:17:18.903823 20613 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:17:18.903868 20613 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:18.903883 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.903890 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.903894 20613 net.cpp:165] Memory required for data: 1076737500\nI1212 06:17:18.903900 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI1212 06:17:18.903911 20613 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI1212 06:17:18.903918 20613 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:17:18.903928 20613 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI1212 06:17:18.904394 20613 net.cpp:150] Setting up L2_b6_cbr1_conv\nI1212 06:17:18.904412 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.904417 20613 net.cpp:165] Memory required for data: 1080833500\nI1212 06:17:18.904426 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI1212 06:17:18.904436 20613 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI1212 06:17:18.904443 20613 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI1212 06:17:18.904455 20613 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI1212 06:17:18.904722 20613 net.cpp:150] Setting up L2_b6_cbr1_bn\nI1212 06:17:18.904736 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.904742 20613 net.cpp:165] Memory required for data: 1084929500\nI1212 06:17:18.904752 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:17:18.904764 20613 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI1212 06:17:18.904770 20613 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI1212 06:17:18.904778 20613 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:17:18.904830 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:17:18.904978 20613 net.cpp:150] Setting up L2_b6_cbr1_scale\nI1212 06:17:18.904990 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.904995 20613 net.cpp:165] Memory required for data: 1089025500\nI1212 06:17:18.905004 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI1212 06:17:18.905012 20613 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI1212 06:17:18.905019 20613 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI1212 06:17:18.905028 20613 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:17:18.905038 20613 net.cpp:150] Setting up L2_b6_cbr1_relu\nI1212 06:17:18.905045 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.905050 20613 net.cpp:165] Memory required for data: 1093121500\nI1212 06:17:18.905055 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI1212 06:17:18.905069 20613 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI1212 06:17:18.905076 20613 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI1212 06:17:18.905084 20613 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI1212 06:17:18.905550 20613 net.cpp:150] Setting up L2_b6_cbr2_conv\nI1212 06:17:18.905565 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.905575 20613 net.cpp:165] Memory required for data: 1097217500\nI1212 06:17:18.905586 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI1212 06:17:18.905598 20613 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI1212 06:17:18.905606 20613 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI1212 06:17:18.905613 20613 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI1212 06:17:18.905869 20613 net.cpp:150] Setting up L2_b6_cbr2_bn\nI1212 06:17:18.905882 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.905887 20613 net.cpp:165] Memory required for data: 1101313500\nI1212 06:17:18.905897 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:17:18.905906 20613 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI1212 06:17:18.905912 20613 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI1212 06:17:18.905923 20613 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI1212 06:17:18.905979 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:17:18.906132 20613 net.cpp:150] Setting up L2_b6_cbr2_scale\nI1212 06:17:18.906146 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.906150 20613 net.cpp:165] Memory required for data: 1105409500\nI1212 06:17:18.906159 20613 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1212 06:17:18.906168 20613 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1212 06:17:18.906174 20613 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI1212 06:17:18.906182 20613 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:17:18.906194 20613 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1212 06:17:18.906222 20613 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1212 06:17:18.906231 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.906236 20613 net.cpp:165] Memory required for data: 1109505500\nI1212 06:17:18.906241 20613 layer_factory.hpp:77] Creating layer L2_b6_relu\nI1212 06:17:18.906252 20613 net.cpp:100] Creating Layer L2_b6_relu\nI1212 06:17:18.906258 20613 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI1212 06:17:18.906265 20613 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI1212 06:17:18.906275 20613 net.cpp:150] Setting up L2_b6_relu\nI1212 06:17:18.906281 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.906286 20613 net.cpp:165] Memory required for data: 1113601500\nI1212 06:17:18.906291 20613 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:18.906301 20613 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:18.906306 20613 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI1212 06:17:18.906314 20613 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:17:18.906324 20613 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:17:18.906368 20613 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:18.906383 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.906389 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.906394 20613 net.cpp:165] Memory required for data: 1121793500\nI1212 06:17:18.906399 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI1212 06:17:18.906411 20613 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI1212 06:17:18.906417 20613 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:17:18.906427 20613 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI1212 06:17:18.906903 20613 net.cpp:150] Setting up L2_b7_cbr1_conv\nI1212 06:17:18.906916 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.906921 20613 net.cpp:165] Memory required for data: 1125889500\nI1212 06:17:18.906930 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI1212 06:17:18.906944 20613 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI1212 06:17:18.906950 20613 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI1212 06:17:18.906965 20613 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI1212 06:17:18.907222 20613 net.cpp:150] Setting up L2_b7_cbr1_bn\nI1212 06:17:18.907234 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.907239 20613 net.cpp:165] Memory required for data: 1129985500\nI1212 06:17:18.907250 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:17:18.907259 20613 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI1212 06:17:18.907265 20613 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI1212 06:17:18.907276 20613 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:17:18.907332 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:17:18.907482 20613 net.cpp:150] Setting up L2_b7_cbr1_scale\nI1212 06:17:18.907495 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.907500 20613 net.cpp:165] Memory required for data: 1134081500\nI1212 06:17:18.907510 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI1212 06:17:18.907517 20613 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI1212 06:17:18.907523 20613 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI1212 06:17:18.907534 20613 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:17:18.907546 20613 net.cpp:150] Setting up L2_b7_cbr1_relu\nI1212 06:17:18.907552 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.907557 20613 net.cpp:165] Memory required for data: 1138177500\nI1212 06:17:18.907562 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI1212 06:17:18.907577 20613 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI1212 06:17:18.907582 20613 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI1212 06:17:18.907591 20613 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI1212 06:17:18.908071 20613 net.cpp:150] Setting up L2_b7_cbr2_conv\nI1212 06:17:18.908087 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.908092 20613 net.cpp:165] Memory required for data: 1142273500\nI1212 06:17:18.908100 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI1212 06:17:18.908113 20613 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI1212 06:17:18.908119 20613 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI1212 06:17:18.908128 20613 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI1212 06:17:18.908382 20613 net.cpp:150] Setting up L2_b7_cbr2_bn\nI1212 06:17:18.908396 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.908401 20613 net.cpp:165] Memory required for data: 1146369500\nI1212 06:17:18.908411 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:17:18.908421 20613 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI1212 06:17:18.908427 20613 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI1212 06:17:18.908438 20613 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI1212 06:17:18.908496 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:17:18.908682 20613 net.cpp:150] Setting up L2_b7_cbr2_scale\nI1212 06:17:18.908696 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.908701 20613 net.cpp:165] Memory required for data: 1150465500\nI1212 06:17:18.908711 20613 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI1212 06:17:18.908720 20613 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI1212 06:17:18.908726 20613 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI1212 06:17:18.908733 20613 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:17:18.908745 20613 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI1212 06:17:18.908772 20613 net.cpp:150] Setting up L2_b7_sum_eltwise\nI1212 06:17:18.908782 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.908787 20613 net.cpp:165] Memory required for data: 1154561500\nI1212 06:17:18.908792 20613 layer_factory.hpp:77] Creating layer L2_b7_relu\nI1212 06:17:18.908803 20613 net.cpp:100] Creating Layer L2_b7_relu\nI1212 06:17:18.908809 20613 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI1212 06:17:18.908816 20613 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI1212 06:17:18.908833 20613 net.cpp:150] Setting up L2_b7_relu\nI1212 06:17:18.908840 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.908845 20613 net.cpp:165] Memory required for data: 1158657500\nI1212 06:17:18.908850 20613 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:18.908857 20613 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:18.908862 20613 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI1212 06:17:18.908872 20613 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:17:18.908884 20613 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:17:18.908929 20613 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:18.908944 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.908951 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.908955 20613 net.cpp:165] Memory required for data: 1166849500\nI1212 06:17:18.908960 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI1212 06:17:18.908972 20613 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI1212 06:17:18.908979 20613 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:17:18.908988 20613 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI1212 06:17:18.909462 20613 net.cpp:150] Setting up L2_b8_cbr1_conv\nI1212 06:17:18.909477 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.909482 20613 net.cpp:165] Memory required for data: 1170945500\nI1212 06:17:18.909492 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI1212 06:17:18.909503 20613 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI1212 06:17:18.909510 20613 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI1212 06:17:18.909518 20613 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI1212 06:17:18.909785 20613 net.cpp:150] Setting up L2_b8_cbr1_bn\nI1212 06:17:18.909821 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.909826 20613 net.cpp:165] Memory required for data: 1175041500\nI1212 06:17:18.909837 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:17:18.909847 20613 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI1212 06:17:18.909853 20613 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI1212 06:17:18.909864 20613 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:17:18.909924 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:17:18.910079 20613 net.cpp:150] Setting up L2_b8_cbr1_scale\nI1212 06:17:18.910091 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.910097 20613 net.cpp:165] Memory required for data: 1179137500\nI1212 06:17:18.910106 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI1212 06:17:18.910115 20613 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI1212 06:17:18.910120 20613 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI1212 06:17:18.910131 20613 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:17:18.910141 20613 net.cpp:150] Setting up L2_b8_cbr1_relu\nI1212 06:17:18.910148 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.910153 20613 net.cpp:165] Memory required for data: 1183233500\nI1212 06:17:18.910157 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI1212 06:17:18.910172 20613 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI1212 06:17:18.910179 20613 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI1212 06:17:18.910188 20613 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI1212 06:17:18.910670 20613 net.cpp:150] Setting up L2_b8_cbr2_conv\nI1212 06:17:18.910683 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.910689 20613 net.cpp:165] Memory required for data: 1187329500\nI1212 06:17:18.910698 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI1212 06:17:18.910711 20613 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI1212 06:17:18.910717 20613 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI1212 06:17:18.910734 20613 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI1212 06:17:18.911157 20613 net.cpp:150] Setting up L2_b8_cbr2_bn\nI1212 06:17:18.911283 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.911291 20613 net.cpp:165] Memory required for data: 1191425500\nI1212 06:17:18.911303 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:17:18.911314 20613 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI1212 06:17:18.911319 20613 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI1212 06:17:18.911329 20613 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI1212 06:17:18.911391 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:17:18.911651 20613 net.cpp:150] Setting up L2_b8_cbr2_scale\nI1212 06:17:18.911670 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.911676 20613 net.cpp:165] Memory required for data: 1195521500\nI1212 06:17:18.911685 20613 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI1212 06:17:18.911695 20613 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI1212 06:17:18.911701 20613 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI1212 06:17:18.911708 20613 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:17:18.911715 20613 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI1212 06:17:18.911749 20613 net.cpp:150] Setting up L2_b8_sum_eltwise\nI1212 06:17:18.911761 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.911767 20613 net.cpp:165] Memory required for data: 1199617500\nI1212 06:17:18.911772 20613 layer_factory.hpp:77] Creating layer L2_b8_relu\nI1212 06:17:18.911780 20613 net.cpp:100] Creating Layer L2_b8_relu\nI1212 06:17:18.911785 20613 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI1212 06:17:18.911797 20613 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI1212 06:17:18.911806 20613 net.cpp:150] Setting up L2_b8_relu\nI1212 06:17:18.911813 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.911818 20613 net.cpp:165] Memory required for data: 1203713500\nI1212 06:17:18.911823 20613 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:18.911829 20613 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:18.911835 20613 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI1212 06:17:18.911845 20613 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:17:18.911870 20613 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:17:18.911917 20613 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:18.911929 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.911936 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.911940 20613 net.cpp:165] Memory required for data: 1211905500\nI1212 06:17:18.911945 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI1212 06:17:18.911962 20613 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI1212 06:17:18.911968 20613 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:17:18.911981 20613 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI1212 06:17:18.912448 20613 net.cpp:150] Setting up L2_b9_cbr1_conv\nI1212 06:17:18.912463 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.912468 20613 net.cpp:165] Memory required for data: 1216001500\nI1212 06:17:18.912477 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI1212 06:17:18.912487 20613 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI1212 06:17:18.912493 20613 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI1212 06:17:18.912505 20613 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI1212 06:17:18.912760 20613 net.cpp:150] Setting up L2_b9_cbr1_bn\nI1212 06:17:18.912775 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.912780 20613 net.cpp:165] Memory required for data: 1220097500\nI1212 06:17:18.912798 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:17:18.912807 20613 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI1212 06:17:18.912813 20613 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI1212 06:17:18.912822 20613 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:17:18.912881 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:17:18.913034 20613 net.cpp:150] Setting up L2_b9_cbr1_scale\nI1212 06:17:18.913049 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.913055 20613 net.cpp:165] Memory required for data: 1224193500\nI1212 06:17:18.913064 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI1212 06:17:18.913072 20613 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI1212 06:17:18.913079 20613 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI1212 06:17:18.913085 20613 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:17:18.913095 20613 net.cpp:150] Setting up L2_b9_cbr1_relu\nI1212 06:17:18.913102 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.913106 20613 net.cpp:165] Memory required for data: 1228289500\nI1212 06:17:18.913111 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI1212 06:17:18.913125 20613 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI1212 06:17:18.913132 20613 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI1212 06:17:18.913143 20613 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI1212 06:17:18.913622 20613 net.cpp:150] Setting up L2_b9_cbr2_conv\nI1212 06:17:18.913636 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.913642 20613 net.cpp:165] Memory required for data: 1232385500\nI1212 06:17:18.913651 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI1212 06:17:18.913663 20613 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI1212 06:17:18.913671 20613 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI1212 06:17:18.913681 20613 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI1212 06:17:18.913938 20613 net.cpp:150] Setting up L2_b9_cbr2_bn\nI1212 06:17:18.913950 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.913955 20613 net.cpp:165] Memory required for data: 1236481500\nI1212 06:17:18.913998 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:17:18.914013 20613 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI1212 06:17:18.914021 20613 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI1212 06:17:18.914028 20613 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI1212 06:17:18.914113 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:17:18.914269 20613 net.cpp:150] Setting up L2_b9_cbr2_scale\nI1212 06:17:18.914283 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.914288 20613 net.cpp:165] Memory required for data: 1240577500\nI1212 06:17:18.914296 20613 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI1212 06:17:18.914309 20613 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI1212 06:17:18.914316 20613 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI1212 06:17:18.914324 20613 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:17:18.914331 20613 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI1212 06:17:18.914362 20613 net.cpp:150] Setting up L2_b9_sum_eltwise\nI1212 06:17:18.914371 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.914376 20613 net.cpp:165] Memory required for data: 1244673500\nI1212 06:17:18.914381 20613 layer_factory.hpp:77] Creating layer L2_b9_relu\nI1212 06:17:18.914389 20613 net.cpp:100] Creating Layer L2_b9_relu\nI1212 06:17:18.914396 20613 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI1212 06:17:18.914402 20613 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI1212 06:17:18.914414 20613 net.cpp:150] Setting up L2_b9_relu\nI1212 06:17:18.914422 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.914427 20613 net.cpp:165] Memory required for data: 1248769500\nI1212 06:17:18.914432 20613 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:18.914445 20613 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:18.914451 20613 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI1212 06:17:18.914463 20613 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:17:18.914472 20613 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:17:18.914521 20613 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:18.914532 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.914539 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:18.914544 20613 net.cpp:165] Memory required for data: 1256961500\nI1212 06:17:18.914549 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI1212 06:17:18.914561 20613 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI1212 06:17:18.914567 20613 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:17:18.914579 20613 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI1212 06:17:18.915061 20613 net.cpp:150] Setting up L3_b1_cbr1_conv\nI1212 06:17:18.915076 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.915081 20613 net.cpp:165] Memory required for data: 1257985500\nI1212 06:17:18.915089 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI1212 06:17:18.915099 20613 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI1212 06:17:18.915105 20613 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI1212 06:17:18.915117 20613 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI1212 06:17:18.915385 20613 net.cpp:150] Setting up L3_b1_cbr1_bn\nI1212 06:17:18.915402 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.915407 20613 net.cpp:165] Memory required for data: 1259009500\nI1212 06:17:18.915418 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:17:18.915427 20613 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI1212 06:17:18.915433 20613 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI1212 06:17:18.915441 20613 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:17:18.915496 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:17:18.915657 20613 net.cpp:150] Setting up L3_b1_cbr1_scale\nI1212 06:17:18.915671 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.915676 20613 net.cpp:165] Memory required for data: 1260033500\nI1212 06:17:18.915685 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI1212 06:17:18.915697 20613 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI1212 06:17:18.915704 20613 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI1212 06:17:18.915710 20613 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:17:18.915720 20613 net.cpp:150] Setting up L3_b1_cbr1_relu\nI1212 06:17:18.915727 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.915731 20613 net.cpp:165] Memory required for data: 1261057500\nI1212 06:17:18.915736 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI1212 06:17:18.915750 20613 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI1212 06:17:18.915757 20613 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI1212 06:17:18.915766 20613 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI1212 06:17:18.916237 20613 net.cpp:150] Setting up L3_b1_cbr2_conv\nI1212 06:17:18.916252 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.916257 20613 net.cpp:165] Memory required for data: 1262081500\nI1212 06:17:18.916265 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI1212 06:17:18.916278 20613 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI1212 06:17:18.916285 20613 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI1212 06:17:18.916296 20613 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI1212 06:17:18.916553 20613 net.cpp:150] Setting up L3_b1_cbr2_bn\nI1212 06:17:18.916565 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.916570 20613 net.cpp:165] Memory required for data: 1263105500\nI1212 06:17:18.916589 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:17:18.916597 20613 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI1212 06:17:18.916604 20613 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI1212 06:17:18.916615 20613 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI1212 06:17:18.916677 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:17:18.916852 20613 net.cpp:150] Setting up L3_b1_cbr2_scale\nI1212 06:17:18.916867 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.916872 20613 net.cpp:165] Memory required for data: 1264129500\nI1212 06:17:18.916880 20613 layer_factory.hpp:77] Creating layer L3_b1_pool\nI1212 06:17:18.916889 20613 net.cpp:100] Creating Layer L3_b1_pool\nI1212 06:17:18.916896 20613 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:17:18.916908 20613 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI1212 06:17:18.916945 20613 net.cpp:150] Setting up L3_b1_pool\nI1212 06:17:18.916957 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.916961 20613 net.cpp:165] Memory required for data: 1265153500\nI1212 06:17:18.916967 20613 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1212 06:17:18.916975 20613 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1212 06:17:18.916981 20613 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI1212 06:17:18.916988 20613 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI1212 06:17:18.916998 20613 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1212 06:17:18.917032 20613 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1212 06:17:18.917042 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.917047 20613 net.cpp:165] Memory required for data: 1266177500\nI1212 06:17:18.917052 20613 layer_factory.hpp:77] Creating layer L3_b1_relu\nI1212 06:17:18.917060 20613 net.cpp:100] Creating Layer L3_b1_relu\nI1212 06:17:18.917066 20613 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI1212 06:17:18.917073 20613 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI1212 06:17:18.917083 20613 net.cpp:150] Setting up L3_b1_relu\nI1212 06:17:18.917089 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.917094 20613 net.cpp:165] Memory required for data: 1267201500\nI1212 06:17:18.917098 20613 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI1212 06:17:18.917111 20613 net.cpp:100] Creating Layer L3_b1_zeros\nI1212 06:17:18.917119 20613 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI1212 06:17:18.918378 20613 net.cpp:150] Setting up L3_b1_zeros\nI1212 06:17:18.918397 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:18.918402 20613 net.cpp:165] Memory required for data: 1268225500\nI1212 06:17:18.918408 20613 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI1212 06:17:18.918419 20613 net.cpp:100] Creating Layer L3_b1_concat0\nI1212 06:17:18.918426 20613 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI1212 06:17:18.918433 20613 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI1212 06:17:18.918444 20613 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI1212 06:17:18.918484 20613 net.cpp:150] Setting up L3_b1_concat0\nI1212 06:17:18.918500 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.918505 20613 net.cpp:165] Memory required for data: 1270273500\nI1212 06:17:18.918510 20613 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:18.918519 20613 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:18.918524 20613 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI1212 06:17:18.918532 20613 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:17:18.918542 20613 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:17:18.918592 20613 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:18.918604 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.918611 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.918615 20613 net.cpp:165] Memory required for data: 1274369500\nI1212 06:17:18.918635 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI1212 06:17:18.918651 20613 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI1212 06:17:18.918658 20613 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:17:18.918668 20613 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI1212 06:17:18.920696 20613 net.cpp:150] Setting up L3_b2_cbr1_conv\nI1212 06:17:18.920712 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.920718 20613 net.cpp:165] Memory required for data: 1276417500\nI1212 06:17:18.920728 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI1212 06:17:18.920742 20613 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI1212 06:17:18.920748 20613 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI1212 06:17:18.920758 20613 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI1212 06:17:18.921022 20613 net.cpp:150] Setting up L3_b2_cbr1_bn\nI1212 06:17:18.921036 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.921041 20613 net.cpp:165] Memory required for data: 1278465500\nI1212 06:17:18.921052 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:17:18.921061 20613 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI1212 06:17:18.921068 20613 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI1212 06:17:18.921077 20613 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:17:18.921135 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:17:18.921290 20613 net.cpp:150] Setting up L3_b2_cbr1_scale\nI1212 06:17:18.921306 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.921311 20613 net.cpp:165] Memory required for data: 1280513500\nI1212 06:17:18.921321 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI1212 06:17:18.921329 20613 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI1212 06:17:18.921336 20613 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI1212 06:17:18.921344 20613 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:17:18.921353 20613 net.cpp:150] Setting up L3_b2_cbr1_relu\nI1212 06:17:18.921360 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.921365 20613 net.cpp:165] Memory required for data: 1282561500\nI1212 06:17:18.921370 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI1212 06:17:18.921385 20613 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI1212 06:17:18.921391 20613 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI1212 06:17:18.921401 20613 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI1212 06:17:18.922425 20613 net.cpp:150] Setting up L3_b2_cbr2_conv\nI1212 06:17:18.922441 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.922446 20613 net.cpp:165] Memory required for data: 1284609500\nI1212 06:17:18.922456 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI1212 06:17:18.922464 20613 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI1212 06:17:18.922474 20613 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI1212 06:17:18.922483 20613 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI1212 06:17:18.922758 20613 net.cpp:150] Setting up L3_b2_cbr2_bn\nI1212 06:17:18.922772 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.922777 20613 net.cpp:165] Memory required for data: 1286657500\nI1212 06:17:18.922787 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:17:18.922797 20613 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI1212 06:17:18.922803 20613 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI1212 06:17:18.922814 20613 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI1212 06:17:18.922873 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:17:18.923027 20613 net.cpp:150] Setting up L3_b2_cbr2_scale\nI1212 06:17:18.923040 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.923045 20613 net.cpp:165] Memory required for data: 1288705500\nI1212 06:17:18.923055 20613 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1212 06:17:18.923068 20613 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1212 06:17:18.923081 20613 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI1212 06:17:18.923089 20613 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:17:18.923097 20613 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1212 06:17:18.923135 20613 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1212 06:17:18.923147 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.923152 20613 net.cpp:165] Memory required for data: 1290753500\nI1212 06:17:18.923158 20613 layer_factory.hpp:77] Creating layer L3_b2_relu\nI1212 06:17:18.923166 20613 net.cpp:100] Creating Layer L3_b2_relu\nI1212 06:17:18.923172 20613 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI1212 06:17:18.923182 20613 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI1212 06:17:18.923192 20613 net.cpp:150] Setting up L3_b2_relu\nI1212 06:17:18.923199 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.923204 20613 net.cpp:165] Memory required for data: 1292801500\nI1212 06:17:18.923209 20613 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:18.923216 20613 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:18.923221 20613 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI1212 06:17:18.923229 20613 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:17:18.923239 20613 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:17:18.923290 20613 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:18.923302 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.923308 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.923313 20613 net.cpp:165] Memory required for data: 1296897500\nI1212 06:17:18.923318 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI1212 06:17:18.923331 20613 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI1212 06:17:18.923336 20613 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:17:18.923349 20613 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI1212 06:17:18.924374 20613 net.cpp:150] Setting up L3_b3_cbr1_conv\nI1212 06:17:18.924389 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.924394 20613 net.cpp:165] Memory required for data: 1298945500\nI1212 06:17:18.924403 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI1212 06:17:18.924415 20613 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI1212 06:17:18.924422 20613 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI1212 06:17:18.924432 20613 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI1212 06:17:18.924698 20613 net.cpp:150] Setting up L3_b3_cbr1_bn\nI1212 06:17:18.924712 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.924717 20613 net.cpp:165] Memory required for data: 1300993500\nI1212 06:17:18.924728 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:17:18.924736 20613 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI1212 06:17:18.924743 20613 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI1212 06:17:18.924751 20613 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:17:18.924813 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:17:18.924971 20613 net.cpp:150] Setting up L3_b3_cbr1_scale\nI1212 06:17:18.924988 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.924993 20613 net.cpp:165] Memory required for data: 1303041500\nI1212 06:17:18.925001 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI1212 06:17:18.925009 20613 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI1212 06:17:18.925016 20613 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI1212 06:17:18.925024 20613 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:17:18.925034 20613 net.cpp:150] Setting up L3_b3_cbr1_relu\nI1212 06:17:18.925040 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.925045 20613 net.cpp:165] Memory required for data: 1305089500\nI1212 06:17:18.925056 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI1212 06:17:18.925071 20613 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI1212 06:17:18.925078 20613 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI1212 06:17:18.925091 20613 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI1212 06:17:18.926110 20613 net.cpp:150] Setting up L3_b3_cbr2_conv\nI1212 06:17:18.926126 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.926131 20613 net.cpp:165] Memory required for data: 1307137500\nI1212 06:17:18.926139 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI1212 06:17:18.926153 20613 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI1212 06:17:18.926160 20613 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI1212 06:17:18.926169 20613 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI1212 06:17:18.926432 20613 net.cpp:150] Setting up L3_b3_cbr2_bn\nI1212 06:17:18.926445 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.926450 20613 net.cpp:165] Memory required for data: 1309185500\nI1212 06:17:18.926461 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:17:18.926472 20613 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI1212 06:17:18.926479 20613 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI1212 06:17:18.926487 20613 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI1212 06:17:18.926548 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:17:18.926712 20613 net.cpp:150] Setting up L3_b3_cbr2_scale\nI1212 06:17:18.926725 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.926730 20613 net.cpp:165] Memory required for data: 1311233500\nI1212 06:17:18.926740 20613 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1212 06:17:18.926751 20613 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1212 06:17:18.926759 20613 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI1212 06:17:18.926765 20613 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:17:18.926774 20613 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1212 06:17:18.926810 20613 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1212 06:17:18.926820 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.926825 20613 net.cpp:165] Memory required for data: 1313281500\nI1212 06:17:18.926829 20613 layer_factory.hpp:77] Creating layer L3_b3_relu\nI1212 06:17:18.926837 20613 net.cpp:100] Creating Layer L3_b3_relu\nI1212 06:17:18.926843 20613 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI1212 06:17:18.926854 20613 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI1212 06:17:18.926864 20613 net.cpp:150] Setting up L3_b3_relu\nI1212 06:17:18.926872 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.926877 20613 net.cpp:165] Memory required for data: 1315329500\nI1212 06:17:18.926880 20613 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:18.926888 20613 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:18.926893 20613 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI1212 06:17:18.926901 20613 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:17:18.926911 20613 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:17:18.926959 20613 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:18.926970 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.926977 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.926982 20613 net.cpp:165] Memory required for data: 1319425500\nI1212 06:17:18.926987 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI1212 06:17:18.927002 20613 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI1212 06:17:18.927009 20613 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:17:18.927019 20613 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI1212 06:17:18.928042 20613 net.cpp:150] Setting up L3_b4_cbr1_conv\nI1212 06:17:18.928064 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.928069 20613 net.cpp:165] Memory required for data: 1321473500\nI1212 06:17:18.928078 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI1212 06:17:18.928093 20613 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI1212 06:17:18.928100 20613 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI1212 06:17:18.928110 20613 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI1212 06:17:18.928378 20613 net.cpp:150] Setting up L3_b4_cbr1_bn\nI1212 06:17:18.928391 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.928396 20613 net.cpp:165] Memory required for data: 1323521500\nI1212 06:17:18.928407 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:17:18.928416 20613 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI1212 06:17:18.928422 20613 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI1212 06:17:18.928431 20613 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:17:18.928490 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:17:18.928655 20613 net.cpp:150] Setting up L3_b4_cbr1_scale\nI1212 06:17:18.928669 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.928674 20613 net.cpp:165] Memory required for data: 1325569500\nI1212 06:17:18.928683 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI1212 06:17:18.928692 20613 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI1212 06:17:18.928699 20613 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI1212 06:17:18.928705 20613 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:17:18.928719 20613 net.cpp:150] Setting up L3_b4_cbr1_relu\nI1212 06:17:18.928726 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.928730 20613 net.cpp:165] Memory required for data: 1327617500\nI1212 06:17:18.928735 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI1212 06:17:18.928746 20613 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI1212 06:17:18.928755 20613 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI1212 06:17:18.928764 20613 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI1212 06:17:18.929793 20613 net.cpp:150] Setting up L3_b4_cbr2_conv\nI1212 06:17:18.929810 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.929814 20613 net.cpp:165] Memory required for data: 1329665500\nI1212 06:17:18.929822 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI1212 06:17:18.929836 20613 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI1212 06:17:18.929842 20613 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI1212 06:17:18.929850 20613 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI1212 06:17:18.930125 20613 net.cpp:150] Setting up L3_b4_cbr2_bn\nI1212 06:17:18.930140 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.930145 20613 net.cpp:165] Memory required for data: 1331713500\nI1212 06:17:18.930155 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:17:18.930166 20613 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI1212 06:17:18.930173 20613 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI1212 06:17:18.930181 20613 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI1212 06:17:18.930243 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:17:18.930408 20613 net.cpp:150] Setting up L3_b4_cbr2_scale\nI1212 06:17:18.930420 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.930425 20613 net.cpp:165] Memory required for data: 1333761500\nI1212 06:17:18.930434 20613 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1212 06:17:18.930446 20613 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1212 06:17:18.930454 20613 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI1212 06:17:18.930460 20613 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:17:18.930471 20613 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1212 06:17:18.930505 20613 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1212 06:17:18.930514 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.930526 20613 net.cpp:165] Memory required for data: 1335809500\nI1212 06:17:18.930531 20613 layer_factory.hpp:77] Creating layer L3_b4_relu\nI1212 06:17:18.930542 20613 net.cpp:100] Creating Layer L3_b4_relu\nI1212 06:17:18.930548 20613 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI1212 06:17:18.930557 20613 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI1212 06:17:18.930565 20613 net.cpp:150] Setting up L3_b4_relu\nI1212 06:17:18.930572 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.930577 20613 net.cpp:165] Memory required for data: 1337857500\nI1212 06:17:18.930583 20613 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:18.930589 20613 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:18.930595 20613 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI1212 06:17:18.930603 20613 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:17:18.930613 20613 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:17:18.930670 20613 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:18.930682 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.930690 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.930694 20613 net.cpp:165] Memory required for data: 1341953500\nI1212 06:17:18.930707 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI1212 06:17:18.930722 20613 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI1212 06:17:18.930729 20613 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:17:18.930739 20613 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI1212 06:17:18.931769 20613 net.cpp:150] Setting up L3_b5_cbr1_conv\nI1212 06:17:18.931784 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.931789 20613 net.cpp:165] Memory required for data: 1344001500\nI1212 06:17:18.931798 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI1212 06:17:18.931812 20613 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI1212 06:17:18.931818 20613 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI1212 06:17:18.931826 20613 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI1212 06:17:18.933113 20613 net.cpp:150] Setting up L3_b5_cbr1_bn\nI1212 06:17:18.933130 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.933135 20613 net.cpp:165] Memory required for data: 1346049500\nI1212 06:17:18.933147 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:17:18.933161 20613 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI1212 06:17:18.933167 20613 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI1212 06:17:18.933176 20613 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:17:18.933239 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:17:18.933399 20613 net.cpp:150] Setting up L3_b5_cbr1_scale\nI1212 06:17:18.933411 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.933416 20613 net.cpp:165] Memory required for data: 1348097500\nI1212 06:17:18.933426 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI1212 06:17:18.933437 20613 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI1212 06:17:18.933444 20613 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI1212 06:17:18.933451 20613 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:17:18.933461 20613 net.cpp:150] Setting up L3_b5_cbr1_relu\nI1212 06:17:18.933471 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.933476 20613 net.cpp:165] Memory required for data: 1350145500\nI1212 06:17:18.933481 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI1212 06:17:18.933493 20613 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI1212 06:17:18.933499 20613 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI1212 06:17:18.933511 20613 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI1212 06:17:18.935542 20613 net.cpp:150] Setting up L3_b5_cbr2_conv\nI1212 06:17:18.935567 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.935573 20613 net.cpp:165] Memory required for data: 1352193500\nI1212 06:17:18.935583 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI1212 06:17:18.935596 20613 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI1212 06:17:18.935603 20613 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI1212 06:17:18.935612 20613 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI1212 06:17:18.935884 20613 net.cpp:150] Setting up L3_b5_cbr2_bn\nI1212 06:17:18.935899 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.935904 20613 net.cpp:165] Memory required for data: 1354241500\nI1212 06:17:18.935914 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:17:18.935926 20613 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI1212 06:17:18.935933 20613 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI1212 06:17:18.935941 20613 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI1212 06:17:18.936002 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:17:18.936156 20613 net.cpp:150] Setting up L3_b5_cbr2_scale\nI1212 06:17:18.936168 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.936173 20613 net.cpp:165] Memory required for data: 1356289500\nI1212 06:17:18.936182 20613 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1212 06:17:18.936194 20613 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1212 06:17:18.936202 20613 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI1212 06:17:18.936209 20613 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:17:18.936219 20613 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1212 06:17:18.936252 20613 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1212 06:17:18.936261 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.936266 20613 net.cpp:165] Memory required for data: 1358337500\nI1212 06:17:18.936271 20613 layer_factory.hpp:77] Creating layer L3_b5_relu\nI1212 06:17:18.936280 20613 net.cpp:100] Creating Layer L3_b5_relu\nI1212 06:17:18.936285 20613 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI1212 06:17:18.936296 20613 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI1212 06:17:18.936306 20613 net.cpp:150] Setting up L3_b5_relu\nI1212 06:17:18.936313 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.936318 20613 net.cpp:165] Memory required for data: 1360385500\nI1212 06:17:18.936323 20613 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:18.936331 20613 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:18.936336 20613 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI1212 06:17:18.936343 20613 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:17:18.936353 20613 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:17:18.936401 20613 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:18.936414 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.936419 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.936424 20613 net.cpp:165] Memory required for data: 1364481500\nI1212 06:17:18.936429 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI1212 06:17:18.936444 20613 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI1212 06:17:18.936451 20613 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:17:18.936461 20613 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI1212 06:17:18.937477 20613 net.cpp:150] Setting up L3_b6_cbr1_conv\nI1212 06:17:18.937494 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.937499 20613 net.cpp:165] Memory required for data: 1366529500\nI1212 06:17:18.937507 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI1212 06:17:18.937520 20613 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI1212 06:17:18.937526 20613 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI1212 06:17:18.937542 20613 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI1212 06:17:18.937808 20613 net.cpp:150] Setting up L3_b6_cbr1_bn\nI1212 06:17:18.937822 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.937827 20613 net.cpp:165] Memory required for data: 1368577500\nI1212 06:17:18.937837 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:17:18.937846 20613 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI1212 06:17:18.937853 20613 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI1212 06:17:18.937861 20613 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:17:18.937921 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:17:18.938079 20613 net.cpp:150] Setting up L3_b6_cbr1_scale\nI1212 06:17:18.938092 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.938097 20613 net.cpp:165] Memory required for data: 1370625500\nI1212 06:17:18.938107 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI1212 06:17:18.938114 20613 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI1212 06:17:18.938122 20613 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI1212 06:17:18.938134 20613 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:17:18.938145 20613 net.cpp:150] Setting up L3_b6_cbr1_relu\nI1212 06:17:18.938153 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.938158 20613 net.cpp:165] Memory required for data: 1372673500\nI1212 06:17:18.938163 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI1212 06:17:18.938176 20613 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI1212 06:17:18.938184 20613 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI1212 06:17:18.938192 20613 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI1212 06:17:18.939200 20613 net.cpp:150] Setting up L3_b6_cbr2_conv\nI1212 06:17:18.939215 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.939220 20613 net.cpp:165] Memory required for data: 1374721500\nI1212 06:17:18.939229 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI1212 06:17:18.939241 20613 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI1212 06:17:18.939249 20613 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI1212 06:17:18.939257 20613 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI1212 06:17:18.939517 20613 net.cpp:150] Setting up L3_b6_cbr2_bn\nI1212 06:17:18.939530 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.939535 20613 net.cpp:165] Memory required for data: 1376769500\nI1212 06:17:18.939546 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:17:18.939559 20613 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI1212 06:17:18.939566 20613 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI1212 06:17:18.939574 20613 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI1212 06:17:18.939640 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:17:18.939797 20613 net.cpp:150] Setting up L3_b6_cbr2_scale\nI1212 06:17:18.939810 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.939815 20613 net.cpp:165] Memory required for data: 1378817500\nI1212 06:17:18.939826 20613 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1212 06:17:18.939837 20613 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1212 06:17:18.939844 20613 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI1212 06:17:18.939851 20613 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:17:18.939862 20613 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1212 06:17:18.939894 20613 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1212 06:17:18.939903 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.939908 20613 net.cpp:165] Memory required for data: 1380865500\nI1212 06:17:18.939913 20613 layer_factory.hpp:77] Creating layer L3_b6_relu\nI1212 06:17:18.939924 20613 net.cpp:100] Creating Layer L3_b6_relu\nI1212 06:17:18.939930 20613 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI1212 06:17:18.939939 20613 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI1212 06:17:18.939954 20613 net.cpp:150] Setting up L3_b6_relu\nI1212 06:17:18.939961 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.939966 20613 net.cpp:165] Memory required for data: 1382913500\nI1212 06:17:18.939971 20613 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:18.939978 20613 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:18.939985 20613 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI1212 06:17:18.939991 20613 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:17:18.940002 20613 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:17:18.940055 20613 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:18.940068 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.940074 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.940079 20613 net.cpp:165] Memory required for data: 1387009500\nI1212 06:17:18.940084 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI1212 06:17:18.940099 20613 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI1212 06:17:18.940106 20613 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:17:18.940116 20613 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI1212 06:17:18.941138 20613 net.cpp:150] Setting up L3_b7_cbr1_conv\nI1212 06:17:18.941153 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.941157 20613 net.cpp:165] Memory required for data: 1389057500\nI1212 06:17:18.941166 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI1212 06:17:18.941181 20613 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI1212 06:17:18.941189 20613 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI1212 06:17:18.941200 20613 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI1212 06:17:18.941462 20613 net.cpp:150] Setting up L3_b7_cbr1_bn\nI1212 06:17:18.941474 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.941479 20613 net.cpp:165] Memory required for data: 1391105500\nI1212 06:17:18.941489 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:17:18.941498 20613 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI1212 06:17:18.941505 20613 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI1212 06:17:18.941514 20613 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:17:18.941573 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:17:18.941736 20613 net.cpp:150] Setting up L3_b7_cbr1_scale\nI1212 06:17:18.941751 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.941756 20613 net.cpp:165] Memory required for data: 1393153500\nI1212 06:17:18.941764 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI1212 06:17:18.941800 20613 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI1212 06:17:18.941810 20613 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI1212 06:17:18.941818 20613 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:17:18.941828 20613 net.cpp:150] Setting up L3_b7_cbr1_relu\nI1212 06:17:18.941835 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.941840 20613 net.cpp:165] Memory required for data: 1395201500\nI1212 06:17:18.941846 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI1212 06:17:18.941860 20613 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI1212 06:17:18.941867 20613 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI1212 06:17:18.941876 20613 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI1212 06:17:18.942901 20613 net.cpp:150] Setting up L3_b7_cbr2_conv\nI1212 06:17:18.942916 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.942921 20613 net.cpp:165] Memory required for data: 1397249500\nI1212 06:17:18.942931 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI1212 06:17:18.942944 20613 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI1212 06:17:18.942950 20613 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI1212 06:17:18.942965 20613 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI1212 06:17:18.943239 20613 net.cpp:150] Setting up L3_b7_cbr2_bn\nI1212 06:17:18.943253 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.943258 20613 net.cpp:165] Memory required for data: 1399297500\nI1212 06:17:18.943269 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:17:18.943276 20613 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI1212 06:17:18.943284 20613 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI1212 06:17:18.943291 20613 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI1212 06:17:18.943351 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:17:18.943509 20613 net.cpp:150] Setting up L3_b7_cbr2_scale\nI1212 06:17:18.943522 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.943527 20613 net.cpp:165] Memory required for data: 1401345500\nI1212 06:17:18.943536 20613 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI1212 06:17:18.943545 20613 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI1212 06:17:18.943552 20613 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI1212 06:17:18.943558 20613 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:17:18.943569 20613 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI1212 06:17:18.943603 20613 net.cpp:150] Setting up L3_b7_sum_eltwise\nI1212 06:17:18.943614 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.943627 20613 net.cpp:165] Memory required for data: 1403393500\nI1212 06:17:18.943634 20613 layer_factory.hpp:77] Creating layer L3_b7_relu\nI1212 06:17:18.943641 20613 net.cpp:100] Creating Layer L3_b7_relu\nI1212 06:17:18.943647 20613 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI1212 06:17:18.943655 20613 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI1212 06:17:18.943665 20613 net.cpp:150] Setting up L3_b7_relu\nI1212 06:17:18.943671 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.943675 20613 net.cpp:165] Memory required for data: 1405441500\nI1212 06:17:18.943681 20613 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:18.943692 20613 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:18.943698 20613 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI1212 06:17:18.943706 20613 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:17:18.943717 20613 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:17:18.943764 20613 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:18.943776 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.943783 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.943789 20613 net.cpp:165] Memory required for data: 1409537500\nI1212 06:17:18.943794 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI1212 06:17:18.943804 20613 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI1212 06:17:18.943811 20613 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:17:18.943823 20613 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI1212 06:17:18.944844 20613 net.cpp:150] Setting up L3_b8_cbr1_conv\nI1212 06:17:18.944859 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.944864 20613 net.cpp:165] Memory required for data: 1411585500\nI1212 06:17:18.944874 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI1212 06:17:18.944883 20613 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI1212 06:17:18.944890 20613 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI1212 06:17:18.944901 20613 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI1212 06:17:18.945173 20613 net.cpp:150] Setting up L3_b8_cbr1_bn\nI1212 06:17:18.945188 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.945194 20613 net.cpp:165] Memory required for data: 1413633500\nI1212 06:17:18.945205 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:17:18.945225 20613 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI1212 06:17:18.945231 20613 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI1212 06:17:18.945240 20613 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:17:18.945298 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:17:18.945458 20613 net.cpp:150] Setting up L3_b8_cbr1_scale\nI1212 06:17:18.945471 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.945475 20613 net.cpp:165] Memory required for data: 1415681500\nI1212 06:17:18.945484 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI1212 06:17:18.945493 20613 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI1212 06:17:18.945499 20613 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI1212 06:17:18.945509 20613 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:17:18.945519 20613 net.cpp:150] Setting up L3_b8_cbr1_relu\nI1212 06:17:18.945526 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.945531 20613 net.cpp:165] Memory required for data: 1417729500\nI1212 06:17:18.945536 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI1212 06:17:18.945550 20613 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI1212 06:17:18.945557 20613 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI1212 06:17:18.945566 20613 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI1212 06:17:18.946590 20613 net.cpp:150] Setting up L3_b8_cbr2_conv\nI1212 06:17:18.946605 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.946610 20613 net.cpp:165] Memory required for data: 1419777500\nI1212 06:17:18.946624 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI1212 06:17:18.946642 20613 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI1212 06:17:18.946650 20613 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI1212 06:17:18.946661 20613 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI1212 06:17:18.946925 20613 net.cpp:150] Setting up L3_b8_cbr2_bn\nI1212 06:17:18.946938 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.946943 20613 net.cpp:165] Memory required for data: 1421825500\nI1212 06:17:18.946959 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:17:18.946966 20613 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI1212 06:17:18.946974 20613 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI1212 06:17:18.946981 20613 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI1212 06:17:18.947041 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:17:18.947197 20613 net.cpp:150] Setting up L3_b8_cbr2_scale\nI1212 06:17:18.947211 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.947216 20613 net.cpp:165] Memory required for data: 1423873500\nI1212 06:17:18.947224 20613 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI1212 06:17:18.947233 20613 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI1212 06:17:18.947239 20613 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI1212 06:17:18.947247 20613 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:17:18.947257 20613 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI1212 06:17:18.947290 20613 net.cpp:150] Setting up L3_b8_sum_eltwise\nI1212 06:17:18.947302 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.947307 20613 net.cpp:165] Memory required for data: 1425921500\nI1212 06:17:18.947314 20613 layer_factory.hpp:77] Creating layer L3_b8_relu\nI1212 06:17:18.947320 20613 net.cpp:100] Creating Layer L3_b8_relu\nI1212 06:17:18.947326 20613 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI1212 06:17:18.947335 20613 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI1212 06:17:18.947343 20613 net.cpp:150] Setting up L3_b8_relu\nI1212 06:17:18.947350 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.947355 20613 net.cpp:165] Memory required for data: 1427969500\nI1212 06:17:18.947360 20613 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:18.947369 20613 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:18.947386 20613 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI1212 06:17:18.947394 20613 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:17:18.947404 20613 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:17:18.947453 20613 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:18.947465 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.947473 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.947477 20613 net.cpp:165] Memory required for data: 1432065500\nI1212 06:17:18.947482 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI1212 06:17:18.947494 20613 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI1212 06:17:18.947500 20613 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:17:18.947512 20613 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI1212 06:17:18.949539 20613 net.cpp:150] Setting up L3_b9_cbr1_conv\nI1212 06:17:18.949558 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.949563 20613 net.cpp:165] Memory required for data: 1434113500\nI1212 06:17:18.949573 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI1212 06:17:18.949585 20613 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI1212 06:17:18.949592 20613 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI1212 06:17:18.949604 20613 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI1212 06:17:18.949877 20613 net.cpp:150] Setting up L3_b9_cbr1_bn\nI1212 06:17:18.949892 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.949898 20613 net.cpp:165] Memory required for data: 1436161500\nI1212 06:17:18.949908 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:17:18.949918 20613 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI1212 06:17:18.949923 20613 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI1212 06:17:18.949935 20613 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:17:18.949993 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:17:18.950155 20613 net.cpp:150] Setting up L3_b9_cbr1_scale\nI1212 06:17:18.950167 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.950173 20613 net.cpp:165] Memory required for data: 1438209500\nI1212 06:17:18.950182 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI1212 06:17:18.950191 20613 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI1212 06:17:18.950196 20613 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI1212 06:17:18.950206 20613 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:17:18.950217 20613 net.cpp:150] Setting up L3_b9_cbr1_relu\nI1212 06:17:18.950224 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.950229 20613 net.cpp:165] Memory required for data: 1440257500\nI1212 06:17:18.950234 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI1212 06:17:18.950253 20613 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI1212 06:17:18.950258 20613 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI1212 06:17:18.950270 20613 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI1212 06:17:18.951292 20613 net.cpp:150] Setting up L3_b9_cbr2_conv\nI1212 06:17:18.951308 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.951313 20613 net.cpp:165] Memory required for data: 1442305500\nI1212 06:17:18.951321 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI1212 06:17:18.951331 20613 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI1212 06:17:18.951339 20613 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI1212 06:17:18.951349 20613 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI1212 06:17:18.951622 20613 net.cpp:150] Setting up L3_b9_cbr2_bn\nI1212 06:17:18.951639 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.951645 20613 net.cpp:165] Memory required for data: 1444353500\nI1212 06:17:18.951655 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:17:18.951673 20613 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI1212 06:17:18.951679 20613 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI1212 06:17:18.951687 20613 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI1212 06:17:18.951746 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:17:18.951901 20613 net.cpp:150] Setting up L3_b9_cbr2_scale\nI1212 06:17:18.951915 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.951920 20613 net.cpp:165] Memory required for data: 1446401500\nI1212 06:17:18.951928 20613 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI1212 06:17:18.951941 20613 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI1212 06:17:18.951947 20613 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI1212 06:17:18.951956 20613 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:17:18.951963 20613 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI1212 06:17:18.951999 20613 net.cpp:150] Setting up L3_b9_sum_eltwise\nI1212 06:17:18.952013 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.952018 20613 net.cpp:165] Memory required for data: 1448449500\nI1212 06:17:18.952023 20613 layer_factory.hpp:77] Creating layer L3_b9_relu\nI1212 06:17:18.952030 20613 net.cpp:100] Creating Layer L3_b9_relu\nI1212 06:17:18.952036 20613 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI1212 06:17:18.952044 20613 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI1212 06:17:18.952054 20613 net.cpp:150] Setting up L3_b9_relu\nI1212 06:17:18.952060 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:18.952064 20613 net.cpp:165] Memory required for data: 1450497500\nI1212 06:17:18.952069 20613 layer_factory.hpp:77] Creating layer post_pool\nI1212 06:17:18.952078 20613 net.cpp:100] Creating Layer post_pool\nI1212 06:17:18.952085 20613 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI1212 06:17:18.952095 20613 net.cpp:408] post_pool -> post_pool\nI1212 06:17:18.952129 20613 net.cpp:150] Setting up post_pool\nI1212 06:17:18.952142 20613 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI1212 06:17:18.952147 20613 net.cpp:165] Memory required for data: 1450529500\nI1212 06:17:18.952152 20613 layer_factory.hpp:77] Creating layer post_FC\nI1212 06:17:18.952229 20613 net.cpp:100] Creating Layer post_FC\nI1212 06:17:18.952244 20613 net.cpp:434] post_FC <- post_pool\nI1212 06:17:18.952258 20613 net.cpp:408] post_FC -> post_FC_top\nI1212 06:17:18.952484 20613 net.cpp:150] Setting up post_FC\nI1212 06:17:18.952502 20613 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:18.952507 20613 net.cpp:165] Memory required for data: 1450534500\nI1212 06:17:18.952515 20613 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1212 06:17:18.952524 20613 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1212 06:17:18.952530 20613 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1212 06:17:18.952543 20613 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1212 06:17:18.952554 20613 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1212 06:17:18.952613 20613 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1212 06:17:18.952632 20613 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:18.952639 20613 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:18.952644 20613 net.cpp:165] Memory required for data: 1450544500\nI1212 06:17:18.952649 20613 layer_factory.hpp:77] Creating layer accuracy\nI1212 06:17:18.952658 20613 net.cpp:100] Creating Layer accuracy\nI1212 06:17:18.952664 20613 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1212 06:17:18.952672 20613 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1212 06:17:18.952679 20613 net.cpp:408] accuracy -> accuracy\nI1212 06:17:18.952759 20613 net.cpp:150] Setting up accuracy\nI1212 06:17:18.952772 20613 net.cpp:157] Top shape: (1)\nI1212 06:17:18.952777 20613 net.cpp:165] Memory required for data: 1450544504\nI1212 06:17:18.952783 20613 layer_factory.hpp:77] Creating layer loss\nI1212 06:17:18.952792 20613 net.cpp:100] Creating Layer loss\nI1212 06:17:18.952810 20613 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1212 06:17:18.952817 20613 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1212 06:17:18.952829 20613 net.cpp:408] loss -> loss\nI1212 06:17:18.956557 20613 layer_factory.hpp:77] Creating layer loss\nI1212 06:17:18.956727 20613 net.cpp:150] Setting up loss\nI1212 06:17:18.956743 20613 net.cpp:157] Top shape: (1)\nI1212 06:17:18.956749 20613 net.cpp:160]     with loss weight 1\nI1212 06:17:18.956842 20613 net.cpp:165] Memory required for data: 1450544508\nI1212 06:17:18.956851 20613 net.cpp:226] loss needs backward computation.\nI1212 06:17:18.956859 20613 net.cpp:228] accuracy does not need backward computation.\nI1212 06:17:18.956866 20613 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1212 06:17:18.956871 20613 net.cpp:226] post_FC needs backward computation.\nI1212 06:17:18.956876 20613 net.cpp:226] post_pool needs backward computation.\nI1212 06:17:18.956881 20613 net.cpp:226] L3_b9_relu needs backward computation.\nI1212 06:17:18.956887 20613 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI1212 06:17:18.956892 20613 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI1212 06:17:18.956897 20613 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI1212 06:17:18.956902 20613 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI1212 06:17:18.956907 20613 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI1212 06:17:18.956912 20613 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI1212 06:17:18.956917 20613 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI1212 06:17:18.956921 20613 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI1212 06:17:18.956928 20613 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI1212 06:17:18.956933 20613 net.cpp:226] L3_b8_relu needs backward computation.\nI1212 06:17:18.956938 20613 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI1212 06:17:18.956943 20613 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI1212 06:17:18.956948 20613 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI1212 06:17:18.956954 20613 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI1212 06:17:18.956959 20613 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI1212 06:17:18.956964 20613 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI1212 06:17:18.956969 20613 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI1212 06:17:18.956974 20613 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI1212 06:17:18.956979 20613 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI1212 06:17:18.956984 20613 net.cpp:226] L3_b7_relu needs backward computation.\nI1212 06:17:18.956990 20613 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI1212 06:17:18.956995 20613 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI1212 06:17:18.957000 20613 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI1212 06:17:18.957006 20613 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI1212 06:17:18.957018 20613 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI1212 06:17:18.957024 20613 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI1212 06:17:18.957029 20613 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI1212 06:17:18.957034 20613 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI1212 06:17:18.957041 20613 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI1212 06:17:18.957046 20613 net.cpp:226] L3_b6_relu needs backward computation.\nI1212 06:17:18.957051 20613 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1212 06:17:18.957057 20613 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI1212 06:17:18.957062 20613 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI1212 06:17:18.957067 20613 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI1212 06:17:18.957073 20613 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI1212 06:17:18.957087 20613 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI1212 06:17:18.957093 20613 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI1212 06:17:18.957098 20613 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI1212 06:17:18.957103 20613 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI1212 06:17:18.957108 20613 net.cpp:226] L3_b5_relu needs backward computation.\nI1212 06:17:18.957113 20613 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1212 06:17:18.957119 20613 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI1212 06:17:18.957124 20613 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI1212 06:17:18.957130 20613 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI1212 06:17:18.957135 20613 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI1212 06:17:18.957140 20613 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI1212 06:17:18.957146 20613 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI1212 06:17:18.957151 20613 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI1212 06:17:18.957157 20613 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI1212 06:17:18.957162 20613 net.cpp:226] L3_b4_relu needs backward computation.\nI1212 06:17:18.957167 20613 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1212 06:17:18.957173 20613 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI1212 06:17:18.957178 20613 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI1212 06:17:18.957185 20613 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI1212 06:17:18.957190 20613 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI1212 06:17:18.957195 20613 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI1212 06:17:18.957201 20613 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI1212 06:17:18.957206 20613 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI1212 06:17:18.957211 20613 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI1212 06:17:18.957216 20613 net.cpp:226] L3_b3_relu needs backward computation.\nI1212 06:17:18.957221 20613 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1212 06:17:18.957227 20613 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI1212 06:17:18.957232 20613 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI1212 06:17:18.957238 20613 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI1212 06:17:18.957243 20613 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI1212 06:17:18.957248 20613 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI1212 06:17:18.957253 20613 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI1212 06:17:18.957258 20613 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI1212 06:17:18.957264 20613 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI1212 06:17:18.957269 20613 net.cpp:226] L3_b2_relu needs backward computation.\nI1212 06:17:18.957275 20613 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1212 06:17:18.957281 20613 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI1212 06:17:18.957286 20613 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI1212 06:17:18.957293 20613 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI1212 06:17:18.957298 20613 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI1212 06:17:18.957303 20613 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI1212 06:17:18.957307 20613 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI1212 06:17:18.957314 20613 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI1212 06:17:18.957322 20613 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI1212 06:17:18.957327 20613 net.cpp:226] L3_b1_concat0 needs backward computation.\nI1212 06:17:18.957334 20613 net.cpp:228] L3_b1_zeros does not need backward computation.\nI1212 06:17:18.957340 20613 net.cpp:226] L3_b1_relu needs backward computation.\nI1212 06:17:18.957350 20613 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1212 06:17:18.957356 20613 net.cpp:226] L3_b1_pool needs backward computation.\nI1212 06:17:18.957362 20613 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI1212 06:17:18.957367 20613 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI1212 06:17:18.957373 20613 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI1212 06:17:18.957378 20613 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI1212 06:17:18.957383 20613 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI1212 06:17:18.957388 20613 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI1212 06:17:18.957394 20613 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI1212 06:17:18.957401 20613 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI1212 06:17:18.957406 20613 net.cpp:226] L2_b9_relu needs backward computation.\nI1212 06:17:18.957410 20613 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI1212 06:17:18.957417 20613 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI1212 06:17:18.957422 20613 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI1212 06:17:18.957427 20613 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI1212 06:17:18.957432 20613 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI1212 06:17:18.957438 20613 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI1212 06:17:18.957443 20613 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI1212 06:17:18.957449 20613 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI1212 06:17:18.957454 20613 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI1212 06:17:18.957460 20613 net.cpp:226] L2_b8_relu needs backward computation.\nI1212 06:17:18.957465 20613 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI1212 06:17:18.957471 20613 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI1212 06:17:18.957478 20613 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI1212 06:17:18.957482 20613 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI1212 06:17:18.957489 20613 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI1212 06:17:18.957494 20613 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI1212 06:17:18.957499 20613 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI1212 06:17:18.957504 20613 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI1212 06:17:18.957509 20613 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI1212 06:17:18.957515 20613 net.cpp:226] L2_b7_relu needs backward computation.\nI1212 06:17:18.957520 20613 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI1212 06:17:18.957526 20613 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI1212 06:17:18.957532 20613 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI1212 06:17:18.957537 20613 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI1212 06:17:18.957543 20613 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI1212 06:17:18.957548 20613 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI1212 06:17:18.957553 20613 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI1212 06:17:18.957559 20613 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI1212 06:17:18.957564 20613 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI1212 06:17:18.957571 20613 net.cpp:226] L2_b6_relu needs backward computation.\nI1212 06:17:18.957576 20613 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1212 06:17:18.957581 20613 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI1212 06:17:18.957587 20613 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI1212 06:17:18.957592 20613 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI1212 06:17:18.957597 20613 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI1212 06:17:18.957602 20613 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI1212 06:17:18.957607 20613 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI1212 06:17:18.957625 20613 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI1212 06:17:18.957633 20613 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI1212 06:17:18.957638 20613 net.cpp:226] L2_b5_relu needs backward computation.\nI1212 06:17:18.957644 20613 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1212 06:17:18.957650 20613 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI1212 06:17:18.957655 20613 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI1212 06:17:18.957660 20613 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI1212 06:17:18.957666 20613 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI1212 06:17:18.957672 20613 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI1212 06:17:18.957677 20613 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI1212 06:17:18.957682 20613 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI1212 06:17:18.957689 20613 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI1212 06:17:18.957695 20613 net.cpp:226] L2_b4_relu needs backward computation.\nI1212 06:17:18.957700 20613 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1212 06:17:18.957713 20613 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI1212 06:17:18.957720 20613 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI1212 06:17:18.957726 20613 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI1212 06:17:18.957731 20613 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI1212 06:17:18.957736 20613 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI1212 06:17:18.957741 20613 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI1212 06:17:18.957747 20613 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI1212 06:17:18.957752 20613 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI1212 06:17:18.957758 20613 net.cpp:226] L2_b3_relu needs backward computation.\nI1212 06:17:18.957763 20613 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1212 06:17:18.957770 20613 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI1212 06:17:18.957775 20613 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI1212 06:17:18.957782 20613 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI1212 06:17:18.957787 20613 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI1212 06:17:18.957792 20613 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI1212 06:17:18.957798 20613 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI1212 06:17:18.957803 20613 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI1212 06:17:18.957808 20613 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI1212 06:17:18.957814 20613 net.cpp:226] L2_b2_relu needs backward computation.\nI1212 06:17:18.957820 20613 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1212 06:17:18.957826 20613 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI1212 06:17:18.957831 20613 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI1212 06:17:18.957837 20613 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI1212 06:17:18.957842 20613 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI1212 06:17:18.957849 20613 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI1212 06:17:18.957854 20613 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI1212 06:17:18.957859 20613 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI1212 06:17:18.957864 20613 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI1212 06:17:18.957870 20613 net.cpp:226] L2_b1_concat0 needs backward computation.\nI1212 06:17:18.957876 20613 net.cpp:228] L2_b1_zeros does not need backward computation.\nI1212 06:17:18.957882 20613 net.cpp:226] L2_b1_relu needs backward computation.\nI1212 06:17:18.957887 20613 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1212 06:17:18.957895 20613 net.cpp:226] L2_b1_pool needs backward computation.\nI1212 06:17:18.957904 20613 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI1212 06:17:18.957911 20613 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI1212 06:17:18.957916 20613 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI1212 06:17:18.957922 20613 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI1212 06:17:18.957927 20613 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI1212 06:17:18.957933 20613 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI1212 06:17:18.957938 20613 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI1212 06:17:18.957944 20613 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI1212 06:17:18.957950 20613 net.cpp:226] L1_b9_relu needs backward computation.\nI1212 06:17:18.957955 20613 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI1212 06:17:18.957962 20613 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI1212 06:17:18.957967 20613 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI1212 06:17:18.957973 20613 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI1212 06:17:18.957978 20613 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI1212 06:17:18.957984 20613 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI1212 06:17:18.957989 20613 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI1212 06:17:18.957994 20613 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI1212 06:17:18.958000 20613 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI1212 06:17:18.958006 20613 net.cpp:226] L1_b8_relu needs backward computation.\nI1212 06:17:18.958012 20613 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI1212 06:17:18.958019 20613 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI1212 06:17:18.958024 20613 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI1212 06:17:18.958029 20613 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI1212 06:17:18.958035 20613 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI1212 06:17:18.958040 20613 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI1212 06:17:18.958046 20613 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI1212 06:17:18.958052 20613 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI1212 06:17:18.958058 20613 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI1212 06:17:18.958065 20613 net.cpp:226] L1_b7_relu needs backward computation.\nI1212 06:17:18.958070 20613 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI1212 06:17:18.958076 20613 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI1212 06:17:18.958081 20613 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI1212 06:17:18.958087 20613 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI1212 06:17:18.958092 20613 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI1212 06:17:18.958098 20613 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI1212 06:17:18.958103 20613 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI1212 06:17:18.958109 20613 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI1212 06:17:18.958114 20613 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI1212 06:17:18.958120 20613 net.cpp:226] L1_b6_relu needs backward computation.\nI1212 06:17:18.958127 20613 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1212 06:17:18.958132 20613 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI1212 06:17:18.958138 20613 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI1212 06:17:18.958144 20613 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI1212 06:17:18.958149 20613 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI1212 06:17:18.958155 20613 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI1212 06:17:18.958160 20613 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI1212 06:17:18.958166 20613 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI1212 06:17:18.958178 20613 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI1212 06:17:18.958184 20613 net.cpp:226] L1_b5_relu needs backward computation.\nI1212 06:17:18.958189 20613 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1212 06:17:18.958196 20613 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI1212 06:17:18.958201 20613 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI1212 06:17:18.958207 20613 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI1212 06:17:18.958214 20613 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI1212 06:17:18.958220 20613 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI1212 06:17:18.958225 20613 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI1212 06:17:18.958230 20613 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI1212 06:17:18.958236 20613 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI1212 06:17:18.958242 20613 net.cpp:226] L1_b4_relu needs backward computation.\nI1212 06:17:18.958248 20613 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1212 06:17:18.958254 20613 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI1212 06:17:18.958261 20613 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI1212 06:17:18.958266 20613 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI1212 06:17:18.958271 20613 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI1212 06:17:18.958277 20613 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI1212 06:17:18.958282 20613 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI1212 06:17:18.958288 20613 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI1212 06:17:18.958294 20613 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI1212 06:17:18.958300 20613 net.cpp:226] L1_b3_relu needs backward computation.\nI1212 06:17:18.958305 20613 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1212 06:17:18.958312 20613 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI1212 06:17:18.958317 20613 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI1212 06:17:18.958323 20613 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI1212 06:17:18.958329 20613 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI1212 06:17:18.958334 20613 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI1212 06:17:18.958339 20613 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI1212 06:17:18.958345 20613 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI1212 06:17:18.958351 20613 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI1212 06:17:18.958358 20613 net.cpp:226] L1_b2_relu needs backward computation.\nI1212 06:17:18.958364 20613 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1212 06:17:18.958369 20613 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI1212 06:17:18.958375 20613 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI1212 06:17:18.958381 20613 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI1212 06:17:18.958387 20613 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI1212 06:17:18.958392 20613 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI1212 06:17:18.958397 20613 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI1212 06:17:18.958403 20613 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI1212 06:17:18.958412 20613 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI1212 06:17:18.958420 20613 net.cpp:226] L1_b1_relu needs backward computation.\nI1212 06:17:18.958425 20613 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1212 06:17:18.958431 20613 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI1212 06:17:18.958437 20613 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI1212 06:17:18.958443 20613 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI1212 06:17:18.958449 20613 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI1212 06:17:18.958461 20613 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI1212 06:17:18.958467 20613 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI1212 06:17:18.958472 20613 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI1212 06:17:18.958478 20613 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI1212 06:17:18.958483 20613 net.cpp:226] pre_relu needs backward computation.\nI1212 06:17:18.958489 20613 net.cpp:226] pre_scale needs backward computation.\nI1212 06:17:18.958494 20613 net.cpp:226] pre_bn needs backward computation.\nI1212 06:17:18.958499 20613 net.cpp:226] pre_conv needs backward computation.\nI1212 06:17:18.958506 20613 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1212 06:17:18.958513 20613 net.cpp:228] dataLayer does not need backward computation.\nI1212 06:17:18.958518 20613 net.cpp:270] This network produces output accuracy\nI1212 06:17:18.958525 20613 net.cpp:270] This network produces output loss\nI1212 06:17:18.958858 20613 net.cpp:283] Network initialization done.\nI1212 06:17:18.967592 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:18.967644 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:18.967712 20613 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1212 06:17:18.968096 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1212 06:17:18.968114 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI1212 06:17:18.968127 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI1212 06:17:18.968137 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI1212 06:17:18.968147 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI1212 06:17:18.968155 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI1212 06:17:18.968165 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI1212 06:17:18.968174 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI1212 06:17:18.968185 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI1212 06:17:18.968194 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI1212 06:17:18.968204 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI1212 06:17:18.968214 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI1212 06:17:18.968224 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI1212 06:17:18.968232 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI1212 06:17:18.968241 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI1212 06:17:18.968250 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI1212 06:17:18.968261 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI1212 06:17:18.968269 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI1212 06:17:18.968279 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI1212 06:17:18.968288 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI1212 06:17:18.968308 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI1212 06:17:18.968318 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI1212 06:17:18.968331 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI1212 06:17:18.968340 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI1212 06:17:18.968350 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI1212 06:17:18.968359 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI1212 06:17:18.968369 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI1212 06:17:18.968377 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI1212 06:17:18.968387 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI1212 06:17:18.968396 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI1212 06:17:18.968406 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI1212 06:17:18.968416 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI1212 06:17:18.968426 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI1212 06:17:18.968435 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI1212 06:17:18.968444 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI1212 06:17:18.968453 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI1212 06:17:18.968462 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI1212 06:17:18.968472 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI1212 06:17:18.968482 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI1212 06:17:18.968490 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI1212 06:17:18.968502 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI1212 06:17:18.968513 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI1212 06:17:18.968521 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI1212 06:17:18.968530 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI1212 06:17:18.968539 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI1212 06:17:18.968549 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI1212 06:17:18.968559 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI1212 06:17:18.968567 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI1212 06:17:18.968576 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI1212 06:17:18.968585 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI1212 06:17:18.968602 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI1212 06:17:18.968612 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI1212 06:17:18.968631 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI1212 06:17:18.968641 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI1212 06:17:18.968650 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI1212 06:17:18.968658 20613 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI1212 06:17:18.970516 20613 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI1212 06:17:18.972162 20613 layer_factory.hpp:77] Creating layer dataLayer\nI1212 06:17:18.972406 20613 net.cpp:100] Creating Layer dataLayer\nI1212 06:17:18.972424 20613 net.cpp:408] dataLayer -> data_top\nI1212 06:17:18.972440 20613 net.cpp:408] dataLayer -> label\nI1212 06:17:18.972453 20613 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1212 06:17:19.036694 20621 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1212 06:17:19.036996 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:19.045092 20613 net.cpp:150] Setting up dataLayer\nI1212 06:17:19.045114 20613 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI1212 06:17:19.045121 20613 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:19.045126 20613 net.cpp:165] Memory required for data: 1536500\nI1212 06:17:19.045133 20613 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1212 06:17:19.045145 20613 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1212 06:17:19.045150 20613 net.cpp:434] label_dataLayer_1_split <- label\nI1212 06:17:19.045182 20613 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1212 06:17:19.045200 20613 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1212 06:17:19.045331 20613 net.cpp:150] Setting up label_dataLayer_1_split\nI1212 06:17:19.045344 20613 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:19.045351 20613 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:19.045358 20613 net.cpp:165] Memory required for data: 1537500\nI1212 06:17:19.045364 20613 layer_factory.hpp:77] Creating layer pre_conv\nI1212 06:17:19.045382 20613 net.cpp:100] Creating Layer pre_conv\nI1212 06:17:19.045388 20613 net.cpp:434] pre_conv <- data_top\nI1212 06:17:19.045454 20613 net.cpp:408] pre_conv -> pre_conv_top\nI1212 06:17:19.045852 20613 net.cpp:150] Setting up pre_conv\nI1212 06:17:19.045871 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.045886 20613 net.cpp:165] Memory required for data: 9729500\nI1212 06:17:19.045902 20613 layer_factory.hpp:77] Creating layer pre_bn\nI1212 06:17:19.045912 20613 net.cpp:100] Creating Layer pre_bn\nI1212 06:17:19.045918 20613 net.cpp:434] pre_bn <- pre_conv_top\nI1212 06:17:19.045931 20613 net.cpp:408] pre_bn -> pre_bn_top\nI1212 06:17:19.046290 20613 net.cpp:150] Setting up pre_bn\nI1212 06:17:19.046308 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.046315 20613 net.cpp:165] Memory required for data: 17921500\nI1212 06:17:19.046332 20613 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:17:19.046342 20613 net.cpp:100] Creating Layer pre_scale\nI1212 06:17:19.046349 20613 net.cpp:434] pre_scale <- pre_bn_top\nI1212 06:17:19.046357 20613 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI1212 06:17:19.046420 20613 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:17:19.046610 20613 net.cpp:150] Setting up pre_scale\nI1212 06:17:19.046630 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.046635 20613 net.cpp:165] Memory required for data: 26113500\nI1212 06:17:19.046645 20613 layer_factory.hpp:77] Creating layer pre_relu\nI1212 06:17:19.046658 20613 net.cpp:100] Creating Layer pre_relu\nI1212 06:17:19.046663 20613 net.cpp:434] pre_relu <- pre_bn_top\nI1212 06:17:19.046671 20613 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI1212 06:17:19.046680 20613 net.cpp:150] Setting up pre_relu\nI1212 06:17:19.046690 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.046695 20613 net.cpp:165] Memory required for data: 34305500\nI1212 06:17:19.046700 20613 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI1212 06:17:19.046711 20613 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI1212 06:17:19.046716 20613 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI1212 06:17:19.046725 20613 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI1212 06:17:19.046736 20613 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI1212 06:17:19.046795 20613 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI1212 06:17:19.046805 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.046813 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.046816 20613 net.cpp:165] Memory required for data: 50689500\nI1212 06:17:19.046823 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI1212 06:17:19.046836 20613 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI1212 06:17:19.046844 20613 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI1212 06:17:19.046852 20613 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI1212 06:17:19.047248 20613 net.cpp:150] Setting up L1_b1_cbr1_conv\nI1212 06:17:19.047263 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.047268 20613 net.cpp:165] Memory required for data: 58881500\nI1212 06:17:19.047281 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI1212 06:17:19.047300 20613 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI1212 06:17:19.047310 20613 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI1212 06:17:19.047323 20613 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI1212 06:17:19.047875 20613 net.cpp:150] Setting up L1_b1_cbr1_bn\nI1212 06:17:19.047890 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.047899 20613 net.cpp:165] Memory required for data: 67073500\nI1212 06:17:19.047910 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:17:19.047919 20613 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI1212 06:17:19.047925 20613 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI1212 06:17:19.047936 20613 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:17:19.048007 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:17:19.048190 20613 net.cpp:150] Setting up L1_b1_cbr1_scale\nI1212 06:17:19.048203 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.048208 20613 net.cpp:165] Memory required for data: 75265500\nI1212 06:17:19.048218 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI1212 06:17:19.048235 20613 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI1212 06:17:19.048243 20613 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI1212 06:17:19.048254 20613 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:17:19.048264 20613 net.cpp:150] Setting up L1_b1_cbr1_relu\nI1212 06:17:19.048270 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.048275 20613 net.cpp:165] Memory required for data: 83457500\nI1212 06:17:19.048280 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI1212 06:17:19.048297 20613 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI1212 06:17:19.048305 20613 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI1212 06:17:19.048316 20613 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI1212 06:17:19.048722 20613 net.cpp:150] Setting up L1_b1_cbr2_conv\nI1212 06:17:19.048738 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.048744 20613 net.cpp:165] Memory required for data: 91649500\nI1212 06:17:19.048753 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI1212 06:17:19.048765 20613 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI1212 06:17:19.048773 20613 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI1212 06:17:19.048780 20613 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI1212 06:17:19.049053 20613 net.cpp:150] Setting up L1_b1_cbr2_bn\nI1212 06:17:19.049067 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.049072 20613 net.cpp:165] Memory required for data: 99841500\nI1212 06:17:19.049087 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:17:19.049095 20613 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI1212 06:17:19.049101 20613 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI1212 06:17:19.049116 20613 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI1212 06:17:19.049175 20613 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:17:19.049331 20613 net.cpp:150] Setting up L1_b1_cbr2_scale\nI1212 06:17:19.049345 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.049350 20613 net.cpp:165] Memory required for data: 108033500\nI1212 06:17:19.049360 20613 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1212 06:17:19.049371 20613 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1212 06:17:19.049377 20613 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI1212 06:17:19.049384 20613 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI1212 06:17:19.049392 20613 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1212 06:17:19.049428 20613 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1212 06:17:19.049438 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.049443 20613 net.cpp:165] Memory required for data: 116225500\nI1212 06:17:19.049448 20613 layer_factory.hpp:77] Creating layer L1_b1_relu\nI1212 06:17:19.049456 20613 net.cpp:100] Creating Layer L1_b1_relu\nI1212 06:17:19.049461 20613 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI1212 06:17:19.049468 20613 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI1212 06:17:19.049477 20613 net.cpp:150] Setting up L1_b1_relu\nI1212 06:17:19.049484 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.049489 20613 net.cpp:165] Memory required for data: 124417500\nI1212 06:17:19.049494 20613 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:19.049504 20613 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:19.049509 20613 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI1212 06:17:19.049518 20613 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:17:19.049528 20613 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:17:19.049579 20613 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:19.049589 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.049605 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.049610 20613 net.cpp:165] Memory required for data: 140801500\nI1212 06:17:19.049615 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI1212 06:17:19.049638 20613 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI1212 06:17:19.049644 20613 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:17:19.049654 20613 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI1212 06:17:19.050010 20613 net.cpp:150] Setting up L1_b2_cbr1_conv\nI1212 06:17:19.050025 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.050030 20613 net.cpp:165] Memory required for data: 148993500\nI1212 06:17:19.050040 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI1212 06:17:19.050051 20613 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI1212 06:17:19.050057 20613 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI1212 06:17:19.050066 20613 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI1212 06:17:19.050343 20613 net.cpp:150] Setting up L1_b2_cbr1_bn\nI1212 06:17:19.050356 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.050361 20613 net.cpp:165] Memory required for data: 157185500\nI1212 06:17:19.050374 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:17:19.050382 20613 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI1212 06:17:19.050387 20613 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI1212 06:17:19.050398 20613 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:17:19.050457 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:17:19.050657 20613 net.cpp:150] Setting up L1_b2_cbr1_scale\nI1212 06:17:19.050671 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.050676 20613 net.cpp:165] Memory required for data: 165377500\nI1212 06:17:19.050686 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI1212 06:17:19.050695 20613 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI1212 06:17:19.050700 20613 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI1212 06:17:19.050709 20613 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:17:19.050721 20613 net.cpp:150] Setting up L1_b2_cbr1_relu\nI1212 06:17:19.050729 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.050734 20613 net.cpp:165] Memory required for data: 173569500\nI1212 06:17:19.050739 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI1212 06:17:19.050750 20613 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI1212 06:17:19.050763 20613 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI1212 06:17:19.050773 20613 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI1212 06:17:19.051280 20613 net.cpp:150] Setting up L1_b2_cbr2_conv\nI1212 06:17:19.051295 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.051301 20613 net.cpp:165] Memory required for data: 181761500\nI1212 06:17:19.051311 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI1212 06:17:19.051323 20613 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI1212 06:17:19.051329 20613 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI1212 06:17:19.051340 20613 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI1212 06:17:19.051611 20613 net.cpp:150] Setting up L1_b2_cbr2_bn\nI1212 06:17:19.051631 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.051636 20613 net.cpp:165] Memory required for data: 189953500\nI1212 06:17:19.051656 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:17:19.051666 20613 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI1212 06:17:19.051671 20613 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI1212 06:17:19.051682 20613 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI1212 06:17:19.051741 20613 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:17:19.051901 20613 net.cpp:150] Setting up L1_b2_cbr2_scale\nI1212 06:17:19.051915 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.051920 20613 net.cpp:165] Memory required for data: 198145500\nI1212 06:17:19.051929 20613 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1212 06:17:19.051949 20613 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1212 06:17:19.051955 20613 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI1212 06:17:19.051962 20613 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:17:19.051970 20613 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1212 06:17:19.052008 20613 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1212 06:17:19.052018 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.052023 20613 net.cpp:165] Memory required for data: 206337500\nI1212 06:17:19.052028 20613 layer_factory.hpp:77] Creating layer L1_b2_relu\nI1212 06:17:19.052037 20613 net.cpp:100] Creating Layer L1_b2_relu\nI1212 06:17:19.052042 20613 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI1212 06:17:19.052052 20613 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI1212 06:17:19.052062 20613 net.cpp:150] Setting up L1_b2_relu\nI1212 06:17:19.052069 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.052073 20613 net.cpp:165] Memory required for data: 214529500\nI1212 06:17:19.052078 20613 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:19.052086 20613 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:19.052091 20613 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI1212 06:17:19.052099 20613 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:17:19.052109 20613 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:17:19.052158 20613 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:19.052167 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.052175 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.052178 20613 net.cpp:165] Memory required for data: 230913500\nI1212 06:17:19.052184 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI1212 06:17:19.052196 20613 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI1212 06:17:19.052201 20613 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:17:19.052213 20613 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI1212 06:17:19.052561 20613 net.cpp:150] Setting up L1_b3_cbr1_conv\nI1212 06:17:19.052577 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.052582 20613 net.cpp:165] Memory required for data: 239105500\nI1212 06:17:19.052590 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI1212 06:17:19.052600 20613 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI1212 06:17:19.052605 20613 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI1212 06:17:19.052623 20613 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI1212 06:17:19.052901 20613 net.cpp:150] Setting up L1_b3_cbr1_bn\nI1212 06:17:19.052917 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.052923 20613 net.cpp:165] Memory required for data: 247297500\nI1212 06:17:19.052934 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:17:19.052943 20613 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI1212 06:17:19.052949 20613 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI1212 06:17:19.052956 20613 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:17:19.053014 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:17:19.053174 20613 net.cpp:150] Setting up L1_b3_cbr1_scale\nI1212 06:17:19.053186 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.053191 20613 net.cpp:165] Memory required for data: 255489500\nI1212 06:17:19.053200 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI1212 06:17:19.053215 20613 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI1212 06:17:19.053220 20613 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI1212 06:17:19.053228 20613 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:17:19.053237 20613 net.cpp:150] Setting up L1_b3_cbr1_relu\nI1212 06:17:19.053251 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.053256 20613 net.cpp:165] Memory required for data: 263681500\nI1212 06:17:19.053261 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI1212 06:17:19.053275 20613 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI1212 06:17:19.053282 20613 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI1212 06:17:19.053293 20613 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI1212 06:17:19.053668 20613 net.cpp:150] Setting up L1_b3_cbr2_conv\nI1212 06:17:19.053683 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.053689 20613 net.cpp:165] Memory required for data: 271873500\nI1212 06:17:19.053697 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI1212 06:17:19.053714 20613 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI1212 06:17:19.053721 20613 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI1212 06:17:19.053730 20613 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI1212 06:17:19.054106 20613 net.cpp:150] Setting up L1_b3_cbr2_bn\nI1212 06:17:19.054126 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.054131 20613 net.cpp:165] Memory required for data: 280065500\nI1212 06:17:19.054141 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:17:19.054150 20613 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI1212 06:17:19.054157 20613 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI1212 06:17:19.054164 20613 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI1212 06:17:19.054222 20613 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:17:19.054385 20613 net.cpp:150] Setting up L1_b3_cbr2_scale\nI1212 06:17:19.054399 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.054404 20613 net.cpp:165] Memory required for data: 288257500\nI1212 06:17:19.054412 20613 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1212 06:17:19.054425 20613 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1212 06:17:19.054430 20613 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI1212 06:17:19.054437 20613 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:17:19.054448 20613 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1212 06:17:19.054481 20613 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1212 06:17:19.054491 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.054496 20613 net.cpp:165] Memory required for data: 296449500\nI1212 06:17:19.054500 20613 layer_factory.hpp:77] Creating layer L1_b3_relu\nI1212 06:17:19.054512 20613 net.cpp:100] Creating Layer L1_b3_relu\nI1212 06:17:19.054517 20613 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI1212 06:17:19.054524 20613 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI1212 06:17:19.054533 20613 net.cpp:150] Setting up L1_b3_relu\nI1212 06:17:19.054540 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.054545 20613 net.cpp:165] Memory required for data: 304641500\nI1212 06:17:19.054550 20613 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:19.054558 20613 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:19.054563 20613 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI1212 06:17:19.054569 20613 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:17:19.054579 20613 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:17:19.054637 20613 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:19.054651 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.054657 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.054662 20613 net.cpp:165] Memory required for data: 321025500\nI1212 06:17:19.054667 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI1212 06:17:19.054682 20613 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI1212 06:17:19.054688 20613 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:17:19.054705 20613 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI1212 06:17:19.055057 20613 net.cpp:150] Setting up L1_b4_cbr1_conv\nI1212 06:17:19.055071 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.055076 20613 net.cpp:165] Memory required for data: 329217500\nI1212 06:17:19.055093 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI1212 06:17:19.055104 20613 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI1212 06:17:19.055111 20613 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI1212 06:17:19.055119 20613 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI1212 06:17:19.055392 20613 net.cpp:150] Setting up L1_b4_cbr1_bn\nI1212 06:17:19.055408 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.055413 20613 net.cpp:165] Memory required for data: 337409500\nI1212 06:17:19.055423 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:17:19.055431 20613 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI1212 06:17:19.055438 20613 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI1212 06:17:19.055445 20613 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:17:19.055501 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:17:19.055675 20613 net.cpp:150] Setting up L1_b4_cbr1_scale\nI1212 06:17:19.055688 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.055693 20613 net.cpp:165] Memory required for data: 345601500\nI1212 06:17:19.055702 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI1212 06:17:19.055714 20613 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI1212 06:17:19.055721 20613 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI1212 06:17:19.055732 20613 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:17:19.055745 20613 net.cpp:150] Setting up L1_b4_cbr1_relu\nI1212 06:17:19.055752 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.055757 20613 net.cpp:165] Memory required for data: 353793500\nI1212 06:17:19.055763 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI1212 06:17:19.055773 20613 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI1212 06:17:19.055779 20613 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI1212 06:17:19.055794 20613 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI1212 06:17:19.056162 20613 net.cpp:150] Setting up L1_b4_cbr2_conv\nI1212 06:17:19.056180 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.056185 20613 net.cpp:165] Memory required for data: 361985500\nI1212 06:17:19.056193 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI1212 06:17:19.056203 20613 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI1212 06:17:19.056210 20613 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI1212 06:17:19.056231 20613 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI1212 06:17:19.056540 20613 net.cpp:150] Setting up L1_b4_cbr2_bn\nI1212 06:17:19.056556 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.056562 20613 net.cpp:165] Memory required for data: 370177500\nI1212 06:17:19.056576 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:17:19.056599 20613 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI1212 06:17:19.056607 20613 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI1212 06:17:19.056627 20613 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI1212 06:17:19.056695 20613 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:17:19.056866 20613 net.cpp:150] Setting up L1_b4_cbr2_scale\nI1212 06:17:19.056882 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.056887 20613 net.cpp:165] Memory required for data: 378369500\nI1212 06:17:19.056897 20613 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1212 06:17:19.056910 20613 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1212 06:17:19.056917 20613 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI1212 06:17:19.056926 20613 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:17:19.056946 20613 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1212 06:17:19.056982 20613 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1212 06:17:19.056991 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.056996 20613 net.cpp:165] Memory required for data: 386561500\nI1212 06:17:19.057005 20613 layer_factory.hpp:77] Creating layer L1_b4_relu\nI1212 06:17:19.057018 20613 net.cpp:100] Creating Layer L1_b4_relu\nI1212 06:17:19.057024 20613 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI1212 06:17:19.057031 20613 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI1212 06:17:19.057041 20613 net.cpp:150] Setting up L1_b4_relu\nI1212 06:17:19.057049 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.057052 20613 net.cpp:165] Memory required for data: 394753500\nI1212 06:17:19.057060 20613 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:19.057067 20613 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:19.057073 20613 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI1212 06:17:19.057081 20613 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:17:19.057091 20613 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:17:19.057145 20613 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:19.057159 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.057166 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.057171 20613 net.cpp:165] Memory required for data: 411137500\nI1212 06:17:19.057176 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI1212 06:17:19.057191 20613 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI1212 06:17:19.057199 20613 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:17:19.057207 20613 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI1212 06:17:19.057595 20613 net.cpp:150] Setting up L1_b5_cbr1_conv\nI1212 06:17:19.057611 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.057616 20613 net.cpp:165] Memory required for data: 419329500\nI1212 06:17:19.057656 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI1212 06:17:19.057669 20613 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI1212 06:17:19.057677 20613 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI1212 06:17:19.057685 20613 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI1212 06:17:19.058004 20613 net.cpp:150] Setting up L1_b5_cbr1_bn\nI1212 06:17:19.058024 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.058029 20613 net.cpp:165] Memory required for data: 427521500\nI1212 06:17:19.058042 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:17:19.058050 20613 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI1212 06:17:19.058056 20613 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI1212 06:17:19.058064 20613 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:17:19.058128 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:17:19.058320 20613 net.cpp:150] Setting up L1_b5_cbr1_scale\nI1212 06:17:19.058334 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.058341 20613 net.cpp:165] Memory required for data: 435713500\nI1212 06:17:19.058351 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI1212 06:17:19.058363 20613 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI1212 06:17:19.058369 20613 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI1212 06:17:19.058377 20613 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:17:19.058389 20613 net.cpp:150] Setting up L1_b5_cbr1_relu\nI1212 06:17:19.058398 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.058403 20613 net.cpp:165] Memory required for data: 443905500\nI1212 06:17:19.058408 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI1212 06:17:19.058421 20613 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI1212 06:17:19.058435 20613 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI1212 06:17:19.058449 20613 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI1212 06:17:19.058861 20613 net.cpp:150] Setting up L1_b5_cbr2_conv\nI1212 06:17:19.058878 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.058883 20613 net.cpp:165] Memory required for data: 452097500\nI1212 06:17:19.058893 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI1212 06:17:19.058902 20613 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI1212 06:17:19.058908 20613 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI1212 06:17:19.058917 20613 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI1212 06:17:19.059226 20613 net.cpp:150] Setting up L1_b5_cbr2_bn\nI1212 06:17:19.059240 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.059245 20613 net.cpp:165] Memory required for data: 460289500\nI1212 06:17:19.059257 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:17:19.059278 20613 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI1212 06:17:19.059286 20613 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI1212 06:17:19.059295 20613 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI1212 06:17:19.059366 20613 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:17:19.059546 20613 net.cpp:150] Setting up L1_b5_cbr2_scale\nI1212 06:17:19.059562 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.059567 20613 net.cpp:165] Memory required for data: 468481500\nI1212 06:17:19.059576 20613 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1212 06:17:19.059586 20613 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1212 06:17:19.059592 20613 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI1212 06:17:19.059602 20613 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:17:19.059614 20613 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1212 06:17:19.059659 20613 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1212 06:17:19.059671 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.059679 20613 net.cpp:165] Memory required for data: 476673500\nI1212 06:17:19.059684 20613 layer_factory.hpp:77] Creating layer L1_b5_relu\nI1212 06:17:19.059691 20613 net.cpp:100] Creating Layer L1_b5_relu\nI1212 06:17:19.059697 20613 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI1212 06:17:19.059705 20613 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI1212 06:17:19.059715 20613 net.cpp:150] Setting up L1_b5_relu\nI1212 06:17:19.059723 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.059728 20613 net.cpp:165] Memory required for data: 484865500\nI1212 06:17:19.059734 20613 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:19.059746 20613 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:19.059752 20613 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI1212 06:17:19.059761 20613 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:17:19.059773 20613 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:17:19.059826 20613 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:19.059836 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.059846 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.059852 20613 net.cpp:165] Memory required for data: 501249500\nI1212 06:17:19.059857 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI1212 06:17:19.059870 20613 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI1212 06:17:19.059875 20613 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:17:19.059888 20613 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI1212 06:17:19.060276 20613 net.cpp:150] Setting up L1_b6_cbr1_conv\nI1212 06:17:19.060290 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.060295 20613 net.cpp:165] Memory required for data: 509441500\nI1212 06:17:19.060312 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI1212 06:17:19.060322 20613 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI1212 06:17:19.060330 20613 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI1212 06:17:19.060339 20613 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI1212 06:17:19.060655 20613 net.cpp:150] Setting up L1_b6_cbr1_bn\nI1212 06:17:19.060669 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.060674 20613 net.cpp:165] Memory required for data: 517633500\nI1212 06:17:19.060688 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:17:19.060699 20613 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI1212 06:17:19.060706 20613 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI1212 06:17:19.060714 20613 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:17:19.060786 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:17:19.060964 20613 net.cpp:150] Setting up L1_b6_cbr1_scale\nI1212 06:17:19.060977 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.060983 20613 net.cpp:165] Memory required for data: 525825500\nI1212 06:17:19.060992 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI1212 06:17:19.061000 20613 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI1212 06:17:19.061009 20613 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI1212 06:17:19.061022 20613 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:17:19.061031 20613 net.cpp:150] Setting up L1_b6_cbr1_relu\nI1212 06:17:19.061039 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.061043 20613 net.cpp:165] Memory required for data: 534017500\nI1212 06:17:19.061048 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI1212 06:17:19.061065 20613 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI1212 06:17:19.061072 20613 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI1212 06:17:19.061081 20613 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI1212 06:17:19.061491 20613 net.cpp:150] Setting up L1_b6_cbr2_conv\nI1212 06:17:19.061508 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.061514 20613 net.cpp:165] Memory required for data: 542209500\nI1212 06:17:19.061523 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI1212 06:17:19.061537 20613 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI1212 06:17:19.061544 20613 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI1212 06:17:19.061554 20613 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI1212 06:17:19.061899 20613 net.cpp:150] Setting up L1_b6_cbr2_bn\nI1212 06:17:19.061914 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.061920 20613 net.cpp:165] Memory required for data: 550401500\nI1212 06:17:19.061933 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:17:19.061944 20613 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI1212 06:17:19.061949 20613 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI1212 06:17:19.061960 20613 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI1212 06:17:19.062029 20613 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:17:19.062203 20613 net.cpp:150] Setting up L1_b6_cbr2_scale\nI1212 06:17:19.062222 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.062227 20613 net.cpp:165] Memory required for data: 558593500\nI1212 06:17:19.062237 20613 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1212 06:17:19.062256 20613 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1212 06:17:19.062264 20613 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI1212 06:17:19.062271 20613 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:17:19.062279 20613 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1212 06:17:19.062324 20613 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1212 06:17:19.062336 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.062341 20613 net.cpp:165] Memory required for data: 566785500\nI1212 06:17:19.062347 20613 layer_factory.hpp:77] Creating layer L1_b6_relu\nI1212 06:17:19.062364 20613 net.cpp:100] Creating Layer L1_b6_relu\nI1212 06:17:19.062371 20613 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI1212 06:17:19.062382 20613 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI1212 06:17:19.062394 20613 net.cpp:150] Setting up L1_b6_relu\nI1212 06:17:19.062402 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.062407 20613 net.cpp:165] Memory required for data: 574977500\nI1212 06:17:19.062412 20613 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:19.062419 20613 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:19.062425 20613 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI1212 06:17:19.062433 20613 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:17:19.062446 20613 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:17:19.062503 20613 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:19.062516 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.062522 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.062527 20613 net.cpp:165] Memory required for data: 591361500\nI1212 06:17:19.062532 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI1212 06:17:19.062546 20613 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI1212 06:17:19.062553 20613 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:17:19.062566 20613 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI1212 06:17:19.062980 20613 net.cpp:150] Setting up L1_b7_cbr1_conv\nI1212 06:17:19.062995 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.063001 20613 net.cpp:165] Memory required for data: 599553500\nI1212 06:17:19.063014 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI1212 06:17:19.063024 20613 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI1212 06:17:19.063030 20613 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI1212 06:17:19.063041 20613 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI1212 06:17:19.063364 20613 net.cpp:150] Setting up L1_b7_cbr1_bn\nI1212 06:17:19.063380 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.063385 20613 net.cpp:165] Memory required for data: 607745500\nI1212 06:17:19.063395 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:17:19.063406 20613 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI1212 06:17:19.063413 20613 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI1212 06:17:19.063421 20613 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:17:19.063488 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:17:19.063688 20613 net.cpp:150] Setting up L1_b7_cbr1_scale\nI1212 06:17:19.063704 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.063709 20613 net.cpp:165] Memory required for data: 615937500\nI1212 06:17:19.063719 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI1212 06:17:19.063726 20613 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI1212 06:17:19.063732 20613 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI1212 06:17:19.063746 20613 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:17:19.063757 20613 net.cpp:150] Setting up L1_b7_cbr1_relu\nI1212 06:17:19.063765 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.063769 20613 net.cpp:165] Memory required for data: 624129500\nI1212 06:17:19.063774 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI1212 06:17:19.063791 20613 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI1212 06:17:19.063798 20613 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI1212 06:17:19.063810 20613 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI1212 06:17:19.064237 20613 net.cpp:150] Setting up L1_b7_cbr2_conv\nI1212 06:17:19.064252 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.064257 20613 net.cpp:165] Memory required for data: 632321500\nI1212 06:17:19.064273 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI1212 06:17:19.064283 20613 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI1212 06:17:19.064290 20613 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI1212 06:17:19.064298 20613 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI1212 06:17:19.064622 20613 net.cpp:150] Setting up L1_b7_cbr2_bn\nI1212 06:17:19.064640 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.064646 20613 net.cpp:165] Memory required for data: 640513500\nI1212 06:17:19.064656 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:17:19.064668 20613 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI1212 06:17:19.064677 20613 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI1212 06:17:19.064687 20613 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI1212 06:17:19.064755 20613 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:17:19.064957 20613 net.cpp:150] Setting up L1_b7_cbr2_scale\nI1212 06:17:19.064975 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.064980 20613 net.cpp:165] Memory required for data: 648705500\nI1212 06:17:19.064988 20613 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI1212 06:17:19.064997 20613 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI1212 06:17:19.065003 20613 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI1212 06:17:19.065011 20613 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:17:19.065021 20613 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI1212 06:17:19.065062 20613 net.cpp:150] Setting up L1_b7_sum_eltwise\nI1212 06:17:19.065076 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.065084 20613 net.cpp:165] Memory required for data: 656897500\nI1212 06:17:19.065090 20613 layer_factory.hpp:77] Creating layer L1_b7_relu\nI1212 06:17:19.065099 20613 net.cpp:100] Creating Layer L1_b7_relu\nI1212 06:17:19.065104 20613 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI1212 06:17:19.065111 20613 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI1212 06:17:19.065120 20613 net.cpp:150] Setting up L1_b7_relu\nI1212 06:17:19.065130 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.065135 20613 net.cpp:165] Memory required for data: 665089500\nI1212 06:17:19.065140 20613 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:19.065150 20613 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:19.065157 20613 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI1212 06:17:19.065165 20613 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:17:19.065176 20613 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:17:19.065237 20613 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:19.065250 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.065258 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.065261 20613 net.cpp:165] Memory required for data: 681473500\nI1212 06:17:19.065268 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI1212 06:17:19.065279 20613 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI1212 06:17:19.065287 20613 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:17:19.065301 20613 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI1212 06:17:19.065697 20613 net.cpp:150] Setting up L1_b8_cbr1_conv\nI1212 06:17:19.065712 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.065717 20613 net.cpp:165] Memory required for data: 689665500\nI1212 06:17:19.065727 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI1212 06:17:19.065737 20613 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI1212 06:17:19.065742 20613 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI1212 06:17:19.065752 20613 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI1212 06:17:19.066046 20613 net.cpp:150] Setting up L1_b8_cbr1_bn\nI1212 06:17:19.066061 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.066066 20613 net.cpp:165] Memory required for data: 697857500\nI1212 06:17:19.066076 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:17:19.066088 20613 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI1212 06:17:19.066095 20613 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI1212 06:17:19.066103 20613 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:17:19.066166 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:17:19.066378 20613 net.cpp:150] Setting up L1_b8_cbr1_scale\nI1212 06:17:19.066404 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.066424 20613 net.cpp:165] Memory required for data: 706049500\nI1212 06:17:19.066434 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI1212 06:17:19.066442 20613 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI1212 06:17:19.066448 20613 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI1212 06:17:19.066459 20613 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:17:19.066469 20613 net.cpp:150] Setting up L1_b8_cbr1_relu\nI1212 06:17:19.066476 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.066481 20613 net.cpp:165] Memory required for data: 714241500\nI1212 06:17:19.066485 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI1212 06:17:19.066500 20613 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI1212 06:17:19.066506 20613 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI1212 06:17:19.066515 20613 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI1212 06:17:19.066884 20613 net.cpp:150] Setting up L1_b8_cbr2_conv\nI1212 06:17:19.066898 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.066903 20613 net.cpp:165] Memory required for data: 722433500\nI1212 06:17:19.066912 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI1212 06:17:19.066928 20613 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI1212 06:17:19.066936 20613 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI1212 06:17:19.066944 20613 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI1212 06:17:19.067222 20613 net.cpp:150] Setting up L1_b8_cbr2_bn\nI1212 06:17:19.067236 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.067241 20613 net.cpp:165] Memory required for data: 730625500\nI1212 06:17:19.067251 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:17:19.067260 20613 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI1212 06:17:19.067266 20613 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI1212 06:17:19.067277 20613 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI1212 06:17:19.067337 20613 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:17:19.067497 20613 net.cpp:150] Setting up L1_b8_cbr2_scale\nI1212 06:17:19.067512 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.067517 20613 net.cpp:165] Memory required for data: 738817500\nI1212 06:17:19.067525 20613 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI1212 06:17:19.067533 20613 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI1212 06:17:19.067539 20613 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI1212 06:17:19.067546 20613 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:17:19.067558 20613 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI1212 06:17:19.067592 20613 net.cpp:150] Setting up L1_b8_sum_eltwise\nI1212 06:17:19.067605 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.067610 20613 net.cpp:165] Memory required for data: 747009500\nI1212 06:17:19.067615 20613 layer_factory.hpp:77] Creating layer L1_b8_relu\nI1212 06:17:19.067629 20613 net.cpp:100] Creating Layer L1_b8_relu\nI1212 06:17:19.067636 20613 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI1212 06:17:19.067642 20613 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI1212 06:17:19.067652 20613 net.cpp:150] Setting up L1_b8_relu\nI1212 06:17:19.067667 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.067672 20613 net.cpp:165] Memory required for data: 755201500\nI1212 06:17:19.067677 20613 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:19.067687 20613 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:19.067692 20613 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI1212 06:17:19.067700 20613 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:17:19.067710 20613 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:17:19.067762 20613 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:19.067775 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.067781 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.067785 20613 net.cpp:165] Memory required for data: 771585500\nI1212 06:17:19.067791 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI1212 06:17:19.067801 20613 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI1212 06:17:19.067808 20613 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:17:19.067821 20613 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI1212 06:17:19.068187 20613 net.cpp:150] Setting up L1_b9_cbr1_conv\nI1212 06:17:19.068202 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.068207 20613 net.cpp:165] Memory required for data: 779777500\nI1212 06:17:19.068217 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI1212 06:17:19.068230 20613 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI1212 06:17:19.068238 20613 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI1212 06:17:19.068246 20613 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI1212 06:17:19.068533 20613 net.cpp:150] Setting up L1_b9_cbr1_bn\nI1212 06:17:19.068547 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.068552 20613 net.cpp:165] Memory required for data: 787969500\nI1212 06:17:19.068562 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:17:19.068570 20613 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI1212 06:17:19.068578 20613 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI1212 06:17:19.068584 20613 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:17:19.068651 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:17:19.068815 20613 net.cpp:150] Setting up L1_b9_cbr1_scale\nI1212 06:17:19.068828 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.068833 20613 net.cpp:165] Memory required for data: 796161500\nI1212 06:17:19.068843 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI1212 06:17:19.068851 20613 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI1212 06:17:19.068857 20613 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI1212 06:17:19.068869 20613 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:17:19.068879 20613 net.cpp:150] Setting up L1_b9_cbr1_relu\nI1212 06:17:19.068886 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.068892 20613 net.cpp:165] Memory required for data: 804353500\nI1212 06:17:19.068895 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI1212 06:17:19.068907 20613 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI1212 06:17:19.068912 20613 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI1212 06:17:19.068924 20613 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI1212 06:17:19.069279 20613 net.cpp:150] Setting up L1_b9_cbr2_conv\nI1212 06:17:19.069293 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.069298 20613 net.cpp:165] Memory required for data: 812545500\nI1212 06:17:19.069308 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI1212 06:17:19.069317 20613 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI1212 06:17:19.069324 20613 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI1212 06:17:19.069334 20613 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI1212 06:17:19.069625 20613 net.cpp:150] Setting up L1_b9_cbr2_bn\nI1212 06:17:19.069643 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.069648 20613 net.cpp:165] Memory required for data: 820737500\nI1212 06:17:19.069679 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:17:19.069691 20613 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI1212 06:17:19.069699 20613 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI1212 06:17:19.069705 20613 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI1212 06:17:19.069766 20613 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:17:19.069934 20613 net.cpp:150] Setting up L1_b9_cbr2_scale\nI1212 06:17:19.069947 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.069952 20613 net.cpp:165] Memory required for data: 828929500\nI1212 06:17:19.069962 20613 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI1212 06:17:19.069970 20613 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI1212 06:17:19.069977 20613 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI1212 06:17:19.069984 20613 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:17:19.069994 20613 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI1212 06:17:19.070029 20613 net.cpp:150] Setting up L1_b9_sum_eltwise\nI1212 06:17:19.070039 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.070044 20613 net.cpp:165] Memory required for data: 837121500\nI1212 06:17:19.070050 20613 layer_factory.hpp:77] Creating layer L1_b9_relu\nI1212 06:17:19.070057 20613 net.cpp:100] Creating Layer L1_b9_relu\nI1212 06:17:19.070063 20613 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI1212 06:17:19.070073 20613 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI1212 06:17:19.070082 20613 net.cpp:150] Setting up L1_b9_relu\nI1212 06:17:19.070089 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.070094 20613 net.cpp:165] Memory required for data: 845313500\nI1212 06:17:19.070099 20613 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:19.070106 20613 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:19.070111 20613 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI1212 06:17:19.070122 20613 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:17:19.070132 20613 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:17:19.070183 20613 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:19.070194 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.070201 20613 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:19.070206 20613 net.cpp:165] Memory required for data: 861697500\nI1212 06:17:19.070211 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI1212 06:17:19.070224 20613 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI1212 06:17:19.070230 20613 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:17:19.070427 20613 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI1212 06:17:19.070813 20613 net.cpp:150] Setting up L2_b1_cbr1_conv\nI1212 06:17:19.070828 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.070834 20613 net.cpp:165] Memory required for data: 863745500\nI1212 06:17:19.070843 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI1212 06:17:19.070853 20613 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI1212 06:17:19.070859 20613 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI1212 06:17:19.070878 20613 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI1212 06:17:19.071149 20613 net.cpp:150] Setting up L2_b1_cbr1_bn\nI1212 06:17:19.071162 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.071167 20613 net.cpp:165] Memory required for data: 865793500\nI1212 06:17:19.071178 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:17:19.071197 20613 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI1212 06:17:19.071204 20613 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI1212 06:17:19.071213 20613 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:17:19.071272 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:17:19.071434 20613 net.cpp:150] Setting up L2_b1_cbr1_scale\nI1212 06:17:19.071446 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.071451 20613 net.cpp:165] Memory required for data: 867841500\nI1212 06:17:19.071461 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI1212 06:17:19.071472 20613 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI1212 06:17:19.071480 20613 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI1212 06:17:19.071486 20613 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:17:19.071496 20613 net.cpp:150] Setting up L2_b1_cbr1_relu\nI1212 06:17:19.071503 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.071507 20613 net.cpp:165] Memory required for data: 869889500\nI1212 06:17:19.071512 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI1212 06:17:19.071527 20613 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI1212 06:17:19.071533 20613 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI1212 06:17:19.071545 20613 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI1212 06:17:19.071913 20613 net.cpp:150] Setting up L2_b1_cbr2_conv\nI1212 06:17:19.071928 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.071933 20613 net.cpp:165] Memory required for data: 871937500\nI1212 06:17:19.071943 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI1212 06:17:19.071952 20613 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI1212 06:17:19.071959 20613 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI1212 06:17:19.071972 20613 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI1212 06:17:19.072244 20613 net.cpp:150] Setting up L2_b1_cbr2_bn\nI1212 06:17:19.072258 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.072263 20613 net.cpp:165] Memory required for data: 873985500\nI1212 06:17:19.072273 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:17:19.072285 20613 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI1212 06:17:19.072293 20613 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI1212 06:17:19.072299 20613 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI1212 06:17:19.072360 20613 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:17:19.072521 20613 net.cpp:150] Setting up L2_b1_cbr2_scale\nI1212 06:17:19.072535 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.072540 20613 net.cpp:165] Memory required for data: 876033500\nI1212 06:17:19.072548 20613 layer_factory.hpp:77] Creating layer L2_b1_pool\nI1212 06:17:19.072561 20613 net.cpp:100] Creating Layer L2_b1_pool\nI1212 06:17:19.072568 20613 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:17:19.072576 20613 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI1212 06:17:19.072610 20613 net.cpp:150] Setting up L2_b1_pool\nI1212 06:17:19.072628 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.072633 20613 net.cpp:165] Memory required for data: 878081500\nI1212 06:17:19.072639 20613 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1212 06:17:19.072649 20613 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1212 06:17:19.072654 20613 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI1212 06:17:19.072661 20613 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI1212 06:17:19.072672 20613 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1212 06:17:19.072708 20613 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1212 06:17:19.072718 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.072723 20613 net.cpp:165] Memory required for data: 880129500\nI1212 06:17:19.072728 20613 layer_factory.hpp:77] Creating layer L2_b1_relu\nI1212 06:17:19.072736 20613 net.cpp:100] Creating Layer L2_b1_relu\nI1212 06:17:19.072741 20613 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI1212 06:17:19.072757 20613 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI1212 06:17:19.072767 20613 net.cpp:150] Setting up L2_b1_relu\nI1212 06:17:19.072773 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.072778 20613 net.cpp:165] Memory required for data: 882177500\nI1212 06:17:19.072783 20613 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI1212 06:17:19.072795 20613 net.cpp:100] Creating Layer L2_b1_zeros\nI1212 06:17:19.072803 20613 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI1212 06:17:19.075116 20613 net.cpp:150] Setting up L2_b1_zeros\nI1212 06:17:19.075135 20613 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:19.075141 20613 net.cpp:165] Memory required for data: 884225500\nI1212 06:17:19.075147 20613 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI1212 06:17:19.075156 20613 net.cpp:100] Creating Layer L2_b1_concat0\nI1212 06:17:19.075163 20613 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI1212 06:17:19.075171 20613 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI1212 06:17:19.075182 20613 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI1212 06:17:19.075227 20613 net.cpp:150] Setting up L2_b1_concat0\nI1212 06:17:19.075242 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.075248 20613 net.cpp:165] Memory required for data: 888321500\nI1212 06:17:19.075253 20613 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:19.075260 20613 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:19.075266 20613 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI1212 06:17:19.075273 20613 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:17:19.075284 20613 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:17:19.075337 20613 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:19.075350 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.075356 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.075361 20613 net.cpp:165] Memory required for data: 896513500\nI1212 06:17:19.075366 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI1212 06:17:19.075381 20613 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI1212 06:17:19.075387 20613 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:17:19.075397 20613 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI1212 06:17:19.075913 20613 net.cpp:150] Setting up L2_b2_cbr1_conv\nI1212 06:17:19.075928 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.075933 20613 net.cpp:165] Memory required for data: 900609500\nI1212 06:17:19.075942 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI1212 06:17:19.075955 20613 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI1212 06:17:19.075963 20613 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI1212 06:17:19.075973 20613 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI1212 06:17:19.076247 20613 net.cpp:150] Setting up L2_b2_cbr1_bn\nI1212 06:17:19.076261 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.076266 20613 net.cpp:165] Memory required for data: 904705500\nI1212 06:17:19.076277 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:17:19.076285 20613 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI1212 06:17:19.076292 20613 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI1212 06:17:19.076299 20613 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:17:19.076361 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:17:19.076517 20613 net.cpp:150] Setting up L2_b2_cbr1_scale\nI1212 06:17:19.076534 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.076539 20613 net.cpp:165] Memory required for data: 908801500\nI1212 06:17:19.076548 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI1212 06:17:19.076556 20613 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI1212 06:17:19.076562 20613 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI1212 06:17:19.076578 20613 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:17:19.076588 20613 net.cpp:150] Setting up L2_b2_cbr1_relu\nI1212 06:17:19.076596 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.076601 20613 net.cpp:165] Memory required for data: 912897500\nI1212 06:17:19.076606 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI1212 06:17:19.076625 20613 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI1212 06:17:19.076633 20613 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI1212 06:17:19.076645 20613 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI1212 06:17:19.077145 20613 net.cpp:150] Setting up L2_b2_cbr2_conv\nI1212 06:17:19.077159 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.077165 20613 net.cpp:165] Memory required for data: 916993500\nI1212 06:17:19.077174 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI1212 06:17:19.077186 20613 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI1212 06:17:19.077193 20613 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI1212 06:17:19.077204 20613 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI1212 06:17:19.077474 20613 net.cpp:150] Setting up L2_b2_cbr2_bn\nI1212 06:17:19.077487 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.077492 20613 net.cpp:165] Memory required for data: 921089500\nI1212 06:17:19.077503 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:17:19.077512 20613 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI1212 06:17:19.077518 20613 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI1212 06:17:19.077527 20613 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI1212 06:17:19.077589 20613 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:17:19.077754 20613 net.cpp:150] Setting up L2_b2_cbr2_scale\nI1212 06:17:19.077769 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.077774 20613 net.cpp:165] Memory required for data: 925185500\nI1212 06:17:19.077782 20613 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1212 06:17:19.077795 20613 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1212 06:17:19.077801 20613 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI1212 06:17:19.077808 20613 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:17:19.077816 20613 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1212 06:17:19.077846 20613 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1212 06:17:19.077854 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.077859 20613 net.cpp:165] Memory required for data: 929281500\nI1212 06:17:19.077864 20613 layer_factory.hpp:77] Creating layer L2_b2_relu\nI1212 06:17:19.077877 20613 net.cpp:100] Creating Layer L2_b2_relu\nI1212 06:17:19.077883 20613 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI1212 06:17:19.077890 20613 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI1212 06:17:19.077899 20613 net.cpp:150] Setting up L2_b2_relu\nI1212 06:17:19.077906 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.077910 20613 net.cpp:165] Memory required for data: 933377500\nI1212 06:17:19.077915 20613 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:19.077922 20613 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:19.077929 20613 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI1212 06:17:19.077935 20613 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:17:19.077945 20613 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:17:19.077996 20613 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:19.078007 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.078014 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.078018 20613 net.cpp:165] Memory required for data: 941569500\nI1212 06:17:19.078023 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI1212 06:17:19.078045 20613 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI1212 06:17:19.078053 20613 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:17:19.078063 20613 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI1212 06:17:19.078569 20613 net.cpp:150] Setting up L2_b3_cbr1_conv\nI1212 06:17:19.078584 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.078589 20613 net.cpp:165] Memory required for data: 945665500\nI1212 06:17:19.078598 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI1212 06:17:19.078610 20613 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI1212 06:17:19.078622 20613 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI1212 06:17:19.078634 20613 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI1212 06:17:19.078904 20613 net.cpp:150] Setting up L2_b3_cbr1_bn\nI1212 06:17:19.078917 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.078922 20613 net.cpp:165] Memory required for data: 949761500\nI1212 06:17:19.078933 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:17:19.078943 20613 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI1212 06:17:19.078948 20613 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI1212 06:17:19.078956 20613 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:17:19.079016 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:17:19.079174 20613 net.cpp:150] Setting up L2_b3_cbr1_scale\nI1212 06:17:19.079186 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.079192 20613 net.cpp:165] Memory required for data: 953857500\nI1212 06:17:19.079201 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI1212 06:17:19.079212 20613 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI1212 06:17:19.079218 20613 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI1212 06:17:19.079226 20613 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:17:19.079236 20613 net.cpp:150] Setting up L2_b3_cbr1_relu\nI1212 06:17:19.079242 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.079246 20613 net.cpp:165] Memory required for data: 957953500\nI1212 06:17:19.079252 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI1212 06:17:19.079269 20613 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI1212 06:17:19.079275 20613 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI1212 06:17:19.079286 20613 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI1212 06:17:19.079792 20613 net.cpp:150] Setting up L2_b3_cbr2_conv\nI1212 06:17:19.079807 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.079812 20613 net.cpp:165] Memory required for data: 962049500\nI1212 06:17:19.079820 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI1212 06:17:19.079833 20613 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI1212 06:17:19.079839 20613 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI1212 06:17:19.079850 20613 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI1212 06:17:19.080121 20613 net.cpp:150] Setting up L2_b3_cbr2_bn\nI1212 06:17:19.080137 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.080142 20613 net.cpp:165] Memory required for data: 966145500\nI1212 06:17:19.080152 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:17:19.080162 20613 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI1212 06:17:19.080168 20613 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI1212 06:17:19.080175 20613 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI1212 06:17:19.080233 20613 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:17:19.080396 20613 net.cpp:150] Setting up L2_b3_cbr2_scale\nI1212 06:17:19.080410 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.080415 20613 net.cpp:165] Memory required for data: 970241500\nI1212 06:17:19.080423 20613 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1212 06:17:19.080432 20613 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1212 06:17:19.080440 20613 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI1212 06:17:19.080456 20613 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:17:19.080464 20613 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1212 06:17:19.080493 20613 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1212 06:17:19.080502 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.080507 20613 net.cpp:165] Memory required for data: 974337500\nI1212 06:17:19.080513 20613 layer_factory.hpp:77] Creating layer L2_b3_relu\nI1212 06:17:19.080535 20613 net.cpp:100] Creating Layer L2_b3_relu\nI1212 06:17:19.080543 20613 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI1212 06:17:19.080550 20613 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI1212 06:17:19.080559 20613 net.cpp:150] Setting up L2_b3_relu\nI1212 06:17:19.080566 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.080571 20613 net.cpp:165] Memory required for data: 978433500\nI1212 06:17:19.080576 20613 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:19.080585 20613 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:19.080590 20613 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI1212 06:17:19.080597 20613 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:17:19.080607 20613 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:17:19.080667 20613 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:19.080679 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.080687 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.080690 20613 net.cpp:165] Memory required for data: 986625500\nI1212 06:17:19.080696 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI1212 06:17:19.080708 20613 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI1212 06:17:19.080714 20613 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:17:19.080726 20613 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI1212 06:17:19.081223 20613 net.cpp:150] Setting up L2_b4_cbr1_conv\nI1212 06:17:19.081238 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.081243 20613 net.cpp:165] Memory required for data: 990721500\nI1212 06:17:19.081252 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI1212 06:17:19.081269 20613 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI1212 06:17:19.081275 20613 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI1212 06:17:19.081284 20613 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI1212 06:17:19.081558 20613 net.cpp:150] Setting up L2_b4_cbr1_bn\nI1212 06:17:19.081571 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.081576 20613 net.cpp:165] Memory required for data: 994817500\nI1212 06:17:19.081588 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:17:19.081599 20613 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI1212 06:17:19.081605 20613 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI1212 06:17:19.081614 20613 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:17:19.081679 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:17:19.081840 20613 net.cpp:150] Setting up L2_b4_cbr1_scale\nI1212 06:17:19.081853 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.081858 20613 net.cpp:165] Memory required for data: 998913500\nI1212 06:17:19.081867 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI1212 06:17:19.081881 20613 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI1212 06:17:19.081887 20613 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI1212 06:17:19.081897 20613 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:17:19.081907 20613 net.cpp:150] Setting up L2_b4_cbr1_relu\nI1212 06:17:19.081913 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.081918 20613 net.cpp:165] Memory required for data: 1003009500\nI1212 06:17:19.081923 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI1212 06:17:19.081943 20613 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI1212 06:17:19.081949 20613 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI1212 06:17:19.081961 20613 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI1212 06:17:19.082453 20613 net.cpp:150] Setting up L2_b4_cbr2_conv\nI1212 06:17:19.082468 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.082473 20613 net.cpp:165] Memory required for data: 1007105500\nI1212 06:17:19.082482 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI1212 06:17:19.082492 20613 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI1212 06:17:19.082499 20613 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI1212 06:17:19.082512 20613 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI1212 06:17:19.082789 20613 net.cpp:150] Setting up L2_b4_cbr2_bn\nI1212 06:17:19.082803 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.082808 20613 net.cpp:165] Memory required for data: 1011201500\nI1212 06:17:19.082819 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:17:19.082832 20613 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI1212 06:17:19.082839 20613 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI1212 06:17:19.082847 20613 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI1212 06:17:19.082911 20613 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:17:19.083071 20613 net.cpp:150] Setting up L2_b4_cbr2_scale\nI1212 06:17:19.083084 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.083089 20613 net.cpp:165] Memory required for data: 1015297500\nI1212 06:17:19.083098 20613 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1212 06:17:19.083111 20613 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1212 06:17:19.083117 20613 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI1212 06:17:19.083123 20613 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:17:19.083132 20613 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1212 06:17:19.083163 20613 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1212 06:17:19.083171 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.083176 20613 net.cpp:165] Memory required for data: 1019393500\nI1212 06:17:19.083181 20613 layer_factory.hpp:77] Creating layer L2_b4_relu\nI1212 06:17:19.083189 20613 net.cpp:100] Creating Layer L2_b4_relu\nI1212 06:17:19.083195 20613 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI1212 06:17:19.083205 20613 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI1212 06:17:19.083214 20613 net.cpp:150] Setting up L2_b4_relu\nI1212 06:17:19.083222 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.083226 20613 net.cpp:165] Memory required for data: 1023489500\nI1212 06:17:19.083231 20613 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:19.083240 20613 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:19.083245 20613 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI1212 06:17:19.083252 20613 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:17:19.083262 20613 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:17:19.083312 20613 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:19.083324 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.083330 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.083335 20613 net.cpp:165] Memory required for data: 1031681500\nI1212 06:17:19.083340 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI1212 06:17:19.083353 20613 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI1212 06:17:19.083359 20613 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:17:19.083370 20613 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI1212 06:17:19.083876 20613 net.cpp:150] Setting up L2_b5_cbr1_conv\nI1212 06:17:19.083899 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.083904 20613 net.cpp:165] Memory required for data: 1035777500\nI1212 06:17:19.083912 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI1212 06:17:19.083922 20613 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI1212 06:17:19.083928 20613 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI1212 06:17:19.083940 20613 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI1212 06:17:19.084210 20613 net.cpp:150] Setting up L2_b5_cbr1_bn\nI1212 06:17:19.084224 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.084229 20613 net.cpp:165] Memory required for data: 1039873500\nI1212 06:17:19.084245 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:17:19.084257 20613 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI1212 06:17:19.084264 20613 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI1212 06:17:19.084271 20613 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:17:19.084329 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:17:19.084492 20613 net.cpp:150] Setting up L2_b5_cbr1_scale\nI1212 06:17:19.084506 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.084511 20613 net.cpp:165] Memory required for data: 1043969500\nI1212 06:17:19.084519 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI1212 06:17:19.084530 20613 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI1212 06:17:19.084537 20613 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI1212 06:17:19.084544 20613 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:17:19.084553 20613 net.cpp:150] Setting up L2_b5_cbr1_relu\nI1212 06:17:19.084560 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.084565 20613 net.cpp:165] Memory required for data: 1048065500\nI1212 06:17:19.084570 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI1212 06:17:19.084587 20613 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI1212 06:17:19.084594 20613 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI1212 06:17:19.084605 20613 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI1212 06:17:19.085108 20613 net.cpp:150] Setting up L2_b5_cbr2_conv\nI1212 06:17:19.085122 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.085127 20613 net.cpp:165] Memory required for data: 1052161500\nI1212 06:17:19.085136 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI1212 06:17:19.085145 20613 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI1212 06:17:19.085151 20613 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI1212 06:17:19.085163 20613 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI1212 06:17:19.085444 20613 net.cpp:150] Setting up L2_b5_cbr2_bn\nI1212 06:17:19.085458 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.085463 20613 net.cpp:165] Memory required for data: 1056257500\nI1212 06:17:19.085472 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:17:19.085481 20613 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI1212 06:17:19.085487 20613 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI1212 06:17:19.085499 20613 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI1212 06:17:19.085553 20613 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:17:19.085723 20613 net.cpp:150] Setting up L2_b5_cbr2_scale\nI1212 06:17:19.085737 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.085742 20613 net.cpp:165] Memory required for data: 1060353500\nI1212 06:17:19.085752 20613 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1212 06:17:19.085760 20613 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1212 06:17:19.085767 20613 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI1212 06:17:19.085773 20613 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:17:19.085784 20613 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1212 06:17:19.085813 20613 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1212 06:17:19.085826 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.085837 20613 net.cpp:165] Memory required for data: 1064449500\nI1212 06:17:19.085844 20613 layer_factory.hpp:77] Creating layer L2_b5_relu\nI1212 06:17:19.085852 20613 net.cpp:100] Creating Layer L2_b5_relu\nI1212 06:17:19.085858 20613 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI1212 06:17:19.085865 20613 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI1212 06:17:19.085875 20613 net.cpp:150] Setting up L2_b5_relu\nI1212 06:17:19.085881 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.085886 20613 net.cpp:165] Memory required for data: 1068545500\nI1212 06:17:19.085891 20613 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:19.085901 20613 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:19.085907 20613 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI1212 06:17:19.085916 20613 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:17:19.085925 20613 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:17:19.085978 20613 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:19.085989 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.085996 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.086000 20613 net.cpp:165] Memory required for data: 1076737500\nI1212 06:17:19.086006 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI1212 06:17:19.086017 20613 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI1212 06:17:19.086024 20613 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:17:19.086036 20613 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI1212 06:17:19.086541 20613 net.cpp:150] Setting up L2_b6_cbr1_conv\nI1212 06:17:19.086555 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.086560 20613 net.cpp:165] Memory required for data: 1080833500\nI1212 06:17:19.086570 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI1212 06:17:19.086580 20613 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI1212 06:17:19.086585 20613 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI1212 06:17:19.086614 20613 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI1212 06:17:19.086900 20613 net.cpp:150] Setting up L2_b6_cbr1_bn\nI1212 06:17:19.086915 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.086920 20613 net.cpp:165] Memory required for data: 1084929500\nI1212 06:17:19.086930 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:17:19.086942 20613 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI1212 06:17:19.086948 20613 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI1212 06:17:19.086956 20613 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:17:19.087015 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:17:19.087189 20613 net.cpp:150] Setting up L2_b6_cbr1_scale\nI1212 06:17:19.087203 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.087208 20613 net.cpp:165] Memory required for data: 1089025500\nI1212 06:17:19.087218 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI1212 06:17:19.087225 20613 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI1212 06:17:19.087232 20613 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI1212 06:17:19.087242 20613 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:17:19.087252 20613 net.cpp:150] Setting up L2_b6_cbr1_relu\nI1212 06:17:19.087260 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.087265 20613 net.cpp:165] Memory required for data: 1093121500\nI1212 06:17:19.087270 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI1212 06:17:19.087283 20613 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI1212 06:17:19.087290 20613 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI1212 06:17:19.087298 20613 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI1212 06:17:19.087798 20613 net.cpp:150] Setting up L2_b6_cbr2_conv\nI1212 06:17:19.087819 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.087824 20613 net.cpp:165] Memory required for data: 1097217500\nI1212 06:17:19.087834 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI1212 06:17:19.087846 20613 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI1212 06:17:19.087853 20613 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI1212 06:17:19.087862 20613 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI1212 06:17:19.088135 20613 net.cpp:150] Setting up L2_b6_cbr2_bn\nI1212 06:17:19.088148 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.088153 20613 net.cpp:165] Memory required for data: 1101313500\nI1212 06:17:19.088165 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:17:19.088172 20613 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI1212 06:17:19.088178 20613 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI1212 06:17:19.088191 20613 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI1212 06:17:19.088249 20613 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:17:19.088414 20613 net.cpp:150] Setting up L2_b6_cbr2_scale\nI1212 06:17:19.088428 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.088433 20613 net.cpp:165] Memory required for data: 1105409500\nI1212 06:17:19.088443 20613 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1212 06:17:19.088451 20613 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1212 06:17:19.088457 20613 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI1212 06:17:19.088464 20613 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:17:19.088487 20613 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1212 06:17:19.088521 20613 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1212 06:17:19.088531 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.088536 20613 net.cpp:165] Memory required for data: 1109505500\nI1212 06:17:19.088541 20613 layer_factory.hpp:77] Creating layer L2_b6_relu\nI1212 06:17:19.088552 20613 net.cpp:100] Creating Layer L2_b6_relu\nI1212 06:17:19.088558 20613 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI1212 06:17:19.088567 20613 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI1212 06:17:19.088575 20613 net.cpp:150] Setting up L2_b6_relu\nI1212 06:17:19.088582 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.088587 20613 net.cpp:165] Memory required for data: 1113601500\nI1212 06:17:19.088593 20613 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:19.088603 20613 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:19.088608 20613 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI1212 06:17:19.088615 20613 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:17:19.088632 20613 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:17:19.088681 20613 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:19.088697 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.088704 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.088708 20613 net.cpp:165] Memory required for data: 1121793500\nI1212 06:17:19.088714 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI1212 06:17:19.088726 20613 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI1212 06:17:19.088732 20613 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:17:19.088742 20613 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI1212 06:17:19.090268 20613 net.cpp:150] Setting up L2_b7_cbr1_conv\nI1212 06:17:19.090286 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.090291 20613 net.cpp:165] Memory required for data: 1125889500\nI1212 06:17:19.090301 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI1212 06:17:19.090314 20613 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI1212 06:17:19.090329 20613 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI1212 06:17:19.090342 20613 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI1212 06:17:19.090622 20613 net.cpp:150] Setting up L2_b7_cbr1_bn\nI1212 06:17:19.090636 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.090641 20613 net.cpp:165] Memory required for data: 1129985500\nI1212 06:17:19.090652 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:17:19.090662 20613 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI1212 06:17:19.090667 20613 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI1212 06:17:19.090675 20613 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:17:19.090744 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:17:19.090904 20613 net.cpp:150] Setting up L2_b7_cbr1_scale\nI1212 06:17:19.090917 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.090922 20613 net.cpp:165] Memory required for data: 1134081500\nI1212 06:17:19.090932 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI1212 06:17:19.090943 20613 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI1212 06:17:19.090950 20613 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI1212 06:17:19.090957 20613 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:17:19.090967 20613 net.cpp:150] Setting up L2_b7_cbr1_relu\nI1212 06:17:19.090973 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.090978 20613 net.cpp:165] Memory required for data: 1138177500\nI1212 06:17:19.090983 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI1212 06:17:19.090998 20613 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI1212 06:17:19.091004 20613 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI1212 06:17:19.091013 20613 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI1212 06:17:19.091506 20613 net.cpp:150] Setting up L2_b7_cbr2_conv\nI1212 06:17:19.091519 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.091524 20613 net.cpp:165] Memory required for data: 1142273500\nI1212 06:17:19.091533 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI1212 06:17:19.091547 20613 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI1212 06:17:19.091552 20613 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI1212 06:17:19.091562 20613 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI1212 06:17:19.091846 20613 net.cpp:150] Setting up L2_b7_cbr2_bn\nI1212 06:17:19.091863 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.091869 20613 net.cpp:165] Memory required for data: 1146369500\nI1212 06:17:19.091879 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:17:19.091889 20613 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI1212 06:17:19.091895 20613 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI1212 06:17:19.091903 20613 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI1212 06:17:19.091962 20613 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:17:19.092124 20613 net.cpp:150] Setting up L2_b7_cbr2_scale\nI1212 06:17:19.092137 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.092142 20613 net.cpp:165] Memory required for data: 1150465500\nI1212 06:17:19.092151 20613 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI1212 06:17:19.092160 20613 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI1212 06:17:19.092166 20613 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI1212 06:17:19.092173 20613 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:17:19.092185 20613 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI1212 06:17:19.092213 20613 net.cpp:150] Setting up L2_b7_sum_eltwise\nI1212 06:17:19.092222 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.092227 20613 net.cpp:165] Memory required for data: 1154561500\nI1212 06:17:19.092232 20613 layer_factory.hpp:77] Creating layer L2_b7_relu\nI1212 06:17:19.092243 20613 net.cpp:100] Creating Layer L2_b7_relu\nI1212 06:17:19.092250 20613 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI1212 06:17:19.092264 20613 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI1212 06:17:19.092274 20613 net.cpp:150] Setting up L2_b7_relu\nI1212 06:17:19.092281 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.092286 20613 net.cpp:165] Memory required for data: 1158657500\nI1212 06:17:19.092291 20613 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:19.092298 20613 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:19.092303 20613 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI1212 06:17:19.092311 20613 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:17:19.092321 20613 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:17:19.092375 20613 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:19.092387 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.092394 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.092398 20613 net.cpp:165] Memory required for data: 1166849500\nI1212 06:17:19.092403 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI1212 06:17:19.092423 20613 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI1212 06:17:19.092430 20613 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:17:19.092439 20613 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI1212 06:17:19.092937 20613 net.cpp:150] Setting up L2_b8_cbr1_conv\nI1212 06:17:19.092952 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.092957 20613 net.cpp:165] Memory required for data: 1170945500\nI1212 06:17:19.092967 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI1212 06:17:19.092978 20613 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI1212 06:17:19.092985 20613 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI1212 06:17:19.092996 20613 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI1212 06:17:19.093268 20613 net.cpp:150] Setting up L2_b8_cbr1_bn\nI1212 06:17:19.093281 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.093286 20613 net.cpp:165] Memory required for data: 1175041500\nI1212 06:17:19.093297 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:17:19.093307 20613 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI1212 06:17:19.093312 20613 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI1212 06:17:19.093320 20613 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:17:19.093387 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:17:19.093544 20613 net.cpp:150] Setting up L2_b8_cbr1_scale\nI1212 06:17:19.093557 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.093564 20613 net.cpp:165] Memory required for data: 1179137500\nI1212 06:17:19.093572 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI1212 06:17:19.093580 20613 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI1212 06:17:19.093586 20613 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI1212 06:17:19.093596 20613 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:17:19.093606 20613 net.cpp:150] Setting up L2_b8_cbr1_relu\nI1212 06:17:19.093613 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.093624 20613 net.cpp:165] Memory required for data: 1183233500\nI1212 06:17:19.093631 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI1212 06:17:19.093646 20613 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI1212 06:17:19.093652 20613 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI1212 06:17:19.093662 20613 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI1212 06:17:19.094156 20613 net.cpp:150] Setting up L2_b8_cbr2_conv\nI1212 06:17:19.094171 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.094175 20613 net.cpp:165] Memory required for data: 1187329500\nI1212 06:17:19.094184 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI1212 06:17:19.094197 20613 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI1212 06:17:19.094210 20613 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI1212 06:17:19.094219 20613 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI1212 06:17:19.094501 20613 net.cpp:150] Setting up L2_b8_cbr2_bn\nI1212 06:17:19.094516 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.094522 20613 net.cpp:165] Memory required for data: 1191425500\nI1212 06:17:19.094532 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:17:19.094542 20613 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI1212 06:17:19.094547 20613 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI1212 06:17:19.094555 20613 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI1212 06:17:19.094614 20613 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:17:19.094784 20613 net.cpp:150] Setting up L2_b8_cbr2_scale\nI1212 06:17:19.094797 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.094804 20613 net.cpp:165] Memory required for data: 1195521500\nI1212 06:17:19.094812 20613 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI1212 06:17:19.094821 20613 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI1212 06:17:19.094827 20613 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI1212 06:17:19.094835 20613 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:17:19.094847 20613 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI1212 06:17:19.094875 20613 net.cpp:150] Setting up L2_b8_sum_eltwise\nI1212 06:17:19.094884 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.094889 20613 net.cpp:165] Memory required for data: 1199617500\nI1212 06:17:19.094894 20613 layer_factory.hpp:77] Creating layer L2_b8_relu\nI1212 06:17:19.094902 20613 net.cpp:100] Creating Layer L2_b8_relu\nI1212 06:17:19.094907 20613 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI1212 06:17:19.094918 20613 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI1212 06:17:19.094928 20613 net.cpp:150] Setting up L2_b8_relu\nI1212 06:17:19.094934 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.094939 20613 net.cpp:165] Memory required for data: 1203713500\nI1212 06:17:19.094944 20613 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:19.094951 20613 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:19.094956 20613 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI1212 06:17:19.094964 20613 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:17:19.094987 20613 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:17:19.095041 20613 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:19.095054 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.095062 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.095067 20613 net.cpp:165] Memory required for data: 1211905500\nI1212 06:17:19.095072 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI1212 06:17:19.095083 20613 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI1212 06:17:19.095090 20613 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:17:19.095103 20613 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI1212 06:17:19.095598 20613 net.cpp:150] Setting up L2_b9_cbr1_conv\nI1212 06:17:19.095613 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.095624 20613 net.cpp:165] Memory required for data: 1216001500\nI1212 06:17:19.095634 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI1212 06:17:19.095648 20613 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI1212 06:17:19.095654 20613 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI1212 06:17:19.095662 20613 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI1212 06:17:19.095945 20613 net.cpp:150] Setting up L2_b9_cbr1_bn\nI1212 06:17:19.095959 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.095970 20613 net.cpp:165] Memory required for data: 1220097500\nI1212 06:17:19.095981 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:17:19.095994 20613 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI1212 06:17:19.096000 20613 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI1212 06:17:19.096009 20613 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:17:19.096071 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:17:19.096242 20613 net.cpp:150] Setting up L2_b9_cbr1_scale\nI1212 06:17:19.096256 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.096261 20613 net.cpp:165] Memory required for data: 1224193500\nI1212 06:17:19.096269 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI1212 06:17:19.096282 20613 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI1212 06:17:19.096287 20613 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI1212 06:17:19.096298 20613 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:17:19.096308 20613 net.cpp:150] Setting up L2_b9_cbr1_relu\nI1212 06:17:19.096315 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.096319 20613 net.cpp:165] Memory required for data: 1228289500\nI1212 06:17:19.096324 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI1212 06:17:19.096336 20613 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI1212 06:17:19.096343 20613 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI1212 06:17:19.096354 20613 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI1212 06:17:19.097864 20613 net.cpp:150] Setting up L2_b9_cbr2_conv\nI1212 06:17:19.097882 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.097888 20613 net.cpp:165] Memory required for data: 1232385500\nI1212 06:17:19.097898 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI1212 06:17:19.097910 20613 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI1212 06:17:19.097918 20613 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI1212 06:17:19.097929 20613 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI1212 06:17:19.098201 20613 net.cpp:150] Setting up L2_b9_cbr2_bn\nI1212 06:17:19.098215 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.098220 20613 net.cpp:165] Memory required for data: 1236481500\nI1212 06:17:19.098268 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:17:19.098287 20613 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI1212 06:17:19.098294 20613 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI1212 06:17:19.098302 20613 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI1212 06:17:19.098366 20613 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:17:19.098520 20613 net.cpp:150] Setting up L2_b9_cbr2_scale\nI1212 06:17:19.098532 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.098537 20613 net.cpp:165] Memory required for data: 1240577500\nI1212 06:17:19.098547 20613 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI1212 06:17:19.098559 20613 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI1212 06:17:19.098567 20613 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI1212 06:17:19.098573 20613 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:17:19.098582 20613 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI1212 06:17:19.098613 20613 net.cpp:150] Setting up L2_b9_sum_eltwise\nI1212 06:17:19.098630 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.098635 20613 net.cpp:165] Memory required for data: 1244673500\nI1212 06:17:19.098641 20613 layer_factory.hpp:77] Creating layer L2_b9_relu\nI1212 06:17:19.098649 20613 net.cpp:100] Creating Layer L2_b9_relu\nI1212 06:17:19.098655 20613 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI1212 06:17:19.098662 20613 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI1212 06:17:19.098672 20613 net.cpp:150] Setting up L2_b9_relu\nI1212 06:17:19.098680 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.098683 20613 net.cpp:165] Memory required for data: 1248769500\nI1212 06:17:19.098696 20613 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:19.098707 20613 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:19.098713 20613 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI1212 06:17:19.098721 20613 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:17:19.098734 20613 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:17:19.098788 20613 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:19.098799 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.098805 20613 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:19.098810 20613 net.cpp:165] Memory required for data: 1256961500\nI1212 06:17:19.098815 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI1212 06:17:19.098827 20613 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI1212 06:17:19.098834 20613 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:17:19.098846 20613 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI1212 06:17:19.099350 20613 net.cpp:150] Setting up L3_b1_cbr1_conv\nI1212 06:17:19.099365 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.099370 20613 net.cpp:165] Memory required for data: 1257985500\nI1212 06:17:19.099380 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI1212 06:17:19.099388 20613 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI1212 06:17:19.099395 20613 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI1212 06:17:19.099406 20613 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI1212 06:17:19.099692 20613 net.cpp:150] Setting up L3_b1_cbr1_bn\nI1212 06:17:19.099709 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.099714 20613 net.cpp:165] Memory required for data: 1259009500\nI1212 06:17:19.099728 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:17:19.099737 20613 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI1212 06:17:19.099743 20613 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI1212 06:17:19.099751 20613 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:17:19.099808 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:17:19.099979 20613 net.cpp:150] Setting up L3_b1_cbr1_scale\nI1212 06:17:19.099992 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.099997 20613 net.cpp:165] Memory required for data: 1260033500\nI1212 06:17:19.100006 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI1212 06:17:19.100018 20613 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI1212 06:17:19.100025 20613 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI1212 06:17:19.100033 20613 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:17:19.100042 20613 net.cpp:150] Setting up L3_b1_cbr1_relu\nI1212 06:17:19.100049 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.100054 20613 net.cpp:165] Memory required for data: 1261057500\nI1212 06:17:19.100059 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI1212 06:17:19.100076 20613 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI1212 06:17:19.100082 20613 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI1212 06:17:19.100091 20613 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI1212 06:17:19.100587 20613 net.cpp:150] Setting up L3_b1_cbr2_conv\nI1212 06:17:19.100601 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.100606 20613 net.cpp:165] Memory required for data: 1262081500\nI1212 06:17:19.100615 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI1212 06:17:19.100636 20613 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI1212 06:17:19.100642 20613 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI1212 06:17:19.100653 20613 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI1212 06:17:19.100932 20613 net.cpp:150] Setting up L3_b1_cbr2_bn\nI1212 06:17:19.100945 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.100957 20613 net.cpp:165] Memory required for data: 1263105500\nI1212 06:17:19.100968 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:17:19.100977 20613 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI1212 06:17:19.100983 20613 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI1212 06:17:19.100994 20613 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI1212 06:17:19.101053 20613 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:17:19.101224 20613 net.cpp:150] Setting up L3_b1_cbr2_scale\nI1212 06:17:19.101238 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.101243 20613 net.cpp:165] Memory required for data: 1264129500\nI1212 06:17:19.101253 20613 layer_factory.hpp:77] Creating layer L3_b1_pool\nI1212 06:17:19.101261 20613 net.cpp:100] Creating Layer L3_b1_pool\nI1212 06:17:19.101267 20613 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:17:19.101279 20613 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI1212 06:17:19.101318 20613 net.cpp:150] Setting up L3_b1_pool\nI1212 06:17:19.101330 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.101336 20613 net.cpp:165] Memory required for data: 1265153500\nI1212 06:17:19.101341 20613 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1212 06:17:19.101348 20613 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1212 06:17:19.101354 20613 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI1212 06:17:19.101361 20613 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI1212 06:17:19.101372 20613 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1212 06:17:19.101406 20613 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1212 06:17:19.101415 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.101420 20613 net.cpp:165] Memory required for data: 1266177500\nI1212 06:17:19.101425 20613 layer_factory.hpp:77] Creating layer L3_b1_relu\nI1212 06:17:19.101433 20613 net.cpp:100] Creating Layer L3_b1_relu\nI1212 06:17:19.101438 20613 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI1212 06:17:19.101445 20613 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI1212 06:17:19.101454 20613 net.cpp:150] Setting up L3_b1_relu\nI1212 06:17:19.101461 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.101466 20613 net.cpp:165] Memory required for data: 1267201500\nI1212 06:17:19.101471 20613 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI1212 06:17:19.101483 20613 net.cpp:100] Creating Layer L3_b1_zeros\nI1212 06:17:19.101491 20613 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI1212 06:17:19.102751 20613 net.cpp:150] Setting up L3_b1_zeros\nI1212 06:17:19.102768 20613 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:19.102773 20613 net.cpp:165] Memory required for data: 1268225500\nI1212 06:17:19.102779 20613 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI1212 06:17:19.102789 20613 net.cpp:100] Creating Layer L3_b1_concat0\nI1212 06:17:19.102795 20613 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI1212 06:17:19.102803 20613 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI1212 06:17:19.102814 20613 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI1212 06:17:19.102859 20613 net.cpp:150] Setting up L3_b1_concat0\nI1212 06:17:19.102874 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.102880 20613 net.cpp:165] Memory required for data: 1270273500\nI1212 06:17:19.102885 20613 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:19.102892 20613 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:19.102898 20613 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI1212 06:17:19.102906 20613 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:17:19.102916 20613 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:17:19.102973 20613 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:19.102985 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.102991 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.103004 20613 net.cpp:165] Memory required for data: 1274369500\nI1212 06:17:19.103009 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI1212 06:17:19.103025 20613 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI1212 06:17:19.103032 20613 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:17:19.103042 20613 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI1212 06:17:19.104102 20613 net.cpp:150] Setting up L3_b2_cbr1_conv\nI1212 06:17:19.104118 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.104123 20613 net.cpp:165] Memory required for data: 1276417500\nI1212 06:17:19.104132 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI1212 06:17:19.104146 20613 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI1212 06:17:19.104152 20613 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI1212 06:17:19.104161 20613 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI1212 06:17:19.104440 20613 net.cpp:150] Setting up L3_b2_cbr1_bn\nI1212 06:17:19.104454 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.104460 20613 net.cpp:165] Memory required for data: 1278465500\nI1212 06:17:19.104470 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:17:19.104482 20613 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI1212 06:17:19.104488 20613 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI1212 06:17:19.104496 20613 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:17:19.104557 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:17:19.104728 20613 net.cpp:150] Setting up L3_b2_cbr1_scale\nI1212 06:17:19.104743 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.104748 20613 net.cpp:165] Memory required for data: 1280513500\nI1212 06:17:19.104756 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI1212 06:17:19.104768 20613 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI1212 06:17:19.104774 20613 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI1212 06:17:19.104782 20613 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:17:19.104792 20613 net.cpp:150] Setting up L3_b2_cbr1_relu\nI1212 06:17:19.104799 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.104804 20613 net.cpp:165] Memory required for data: 1282561500\nI1212 06:17:19.104809 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI1212 06:17:19.104825 20613 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI1212 06:17:19.104831 20613 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI1212 06:17:19.104842 20613 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI1212 06:17:19.105903 20613 net.cpp:150] Setting up L3_b2_cbr2_conv\nI1212 06:17:19.105918 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.105924 20613 net.cpp:165] Memory required for data: 1284609500\nI1212 06:17:19.105933 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI1212 06:17:19.105942 20613 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI1212 06:17:19.105949 20613 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI1212 06:17:19.105960 20613 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI1212 06:17:19.106236 20613 net.cpp:150] Setting up L3_b2_cbr2_bn\nI1212 06:17:19.106252 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.106257 20613 net.cpp:165] Memory required for data: 1286657500\nI1212 06:17:19.106268 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:17:19.106278 20613 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI1212 06:17:19.106284 20613 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI1212 06:17:19.106292 20613 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI1212 06:17:19.106353 20613 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:17:19.106515 20613 net.cpp:150] Setting up L3_b2_cbr2_scale\nI1212 06:17:19.106528 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.106533 20613 net.cpp:165] Memory required for data: 1288705500\nI1212 06:17:19.106542 20613 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1212 06:17:19.106560 20613 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1212 06:17:19.106567 20613 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI1212 06:17:19.106575 20613 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:17:19.106582 20613 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1212 06:17:19.106626 20613 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1212 06:17:19.106638 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.106643 20613 net.cpp:165] Memory required for data: 1290753500\nI1212 06:17:19.106649 20613 layer_factory.hpp:77] Creating layer L3_b2_relu\nI1212 06:17:19.106657 20613 net.cpp:100] Creating Layer L3_b2_relu\nI1212 06:17:19.106663 20613 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI1212 06:17:19.106672 20613 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI1212 06:17:19.106681 20613 net.cpp:150] Setting up L3_b2_relu\nI1212 06:17:19.106688 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.106693 20613 net.cpp:165] Memory required for data: 1292801500\nI1212 06:17:19.106698 20613 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:19.106704 20613 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:19.106710 20613 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI1212 06:17:19.106720 20613 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:17:19.106731 20613 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:17:19.106778 20613 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:19.106791 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.106797 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.106801 20613 net.cpp:165] Memory required for data: 1296897500\nI1212 06:17:19.106806 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI1212 06:17:19.106822 20613 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI1212 06:17:19.106828 20613 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:17:19.106838 20613 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI1212 06:17:19.107893 20613 net.cpp:150] Setting up L3_b3_cbr1_conv\nI1212 06:17:19.107908 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.107913 20613 net.cpp:165] Memory required for data: 1298945500\nI1212 06:17:19.107923 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI1212 06:17:19.107936 20613 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI1212 06:17:19.107942 20613 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI1212 06:17:19.107951 20613 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI1212 06:17:19.108227 20613 net.cpp:150] Setting up L3_b3_cbr1_bn\nI1212 06:17:19.108242 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.108247 20613 net.cpp:165] Memory required for data: 1300993500\nI1212 06:17:19.108256 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:17:19.108268 20613 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI1212 06:17:19.108275 20613 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI1212 06:17:19.108283 20613 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:17:19.108347 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:17:19.108510 20613 net.cpp:150] Setting up L3_b3_cbr1_scale\nI1212 06:17:19.108522 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.108552 20613 net.cpp:165] Memory required for data: 1303041500\nI1212 06:17:19.108563 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI1212 06:17:19.108577 20613 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI1212 06:17:19.108583 20613 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI1212 06:17:19.108592 20613 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:17:19.108602 20613 net.cpp:150] Setting up L3_b3_cbr1_relu\nI1212 06:17:19.108608 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.108626 20613 net.cpp:165] Memory required for data: 1305089500\nI1212 06:17:19.108633 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI1212 06:17:19.108647 20613 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI1212 06:17:19.108654 20613 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI1212 06:17:19.108666 20613 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI1212 06:17:19.109710 20613 net.cpp:150] Setting up L3_b3_cbr2_conv\nI1212 06:17:19.109726 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.109731 20613 net.cpp:165] Memory required for data: 1307137500\nI1212 06:17:19.109740 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI1212 06:17:19.109750 20613 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI1212 06:17:19.109756 20613 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI1212 06:17:19.109767 20613 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI1212 06:17:19.110066 20613 net.cpp:150] Setting up L3_b3_cbr2_bn\nI1212 06:17:19.110085 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.110090 20613 net.cpp:165] Memory required for data: 1309185500\nI1212 06:17:19.110101 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:17:19.110111 20613 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI1212 06:17:19.110117 20613 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI1212 06:17:19.110126 20613 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI1212 06:17:19.110188 20613 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:17:19.110352 20613 net.cpp:150] Setting up L3_b3_cbr2_scale\nI1212 06:17:19.110364 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.110369 20613 net.cpp:165] Memory required for data: 1311233500\nI1212 06:17:19.110378 20613 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1212 06:17:19.110390 20613 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1212 06:17:19.110397 20613 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI1212 06:17:19.110404 20613 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:17:19.110412 20613 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1212 06:17:19.110450 20613 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1212 06:17:19.110462 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.110467 20613 net.cpp:165] Memory required for data: 1313281500\nI1212 06:17:19.110472 20613 layer_factory.hpp:77] Creating layer L3_b3_relu\nI1212 06:17:19.110481 20613 net.cpp:100] Creating Layer L3_b3_relu\nI1212 06:17:19.110486 20613 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI1212 06:17:19.110494 20613 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI1212 06:17:19.110503 20613 net.cpp:150] Setting up L3_b3_relu\nI1212 06:17:19.110510 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.110514 20613 net.cpp:165] Memory required for data: 1315329500\nI1212 06:17:19.110519 20613 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:19.110527 20613 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:19.110532 20613 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI1212 06:17:19.110543 20613 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:17:19.110553 20613 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:17:19.110600 20613 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:19.110611 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.110623 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.110628 20613 net.cpp:165] Memory required for data: 1319425500\nI1212 06:17:19.110635 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI1212 06:17:19.110649 20613 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI1212 06:17:19.110656 20613 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:17:19.110673 20613 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI1212 06:17:19.111729 20613 net.cpp:150] Setting up L3_b4_cbr1_conv\nI1212 06:17:19.111744 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.111749 20613 net.cpp:165] Memory required for data: 1321473500\nI1212 06:17:19.111758 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI1212 06:17:19.111771 20613 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI1212 06:17:19.111778 20613 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI1212 06:17:19.111788 20613 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI1212 06:17:19.112061 20613 net.cpp:150] Setting up L3_b4_cbr1_bn\nI1212 06:17:19.112073 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.112078 20613 net.cpp:165] Memory required for data: 1323521500\nI1212 06:17:19.112088 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:17:19.112100 20613 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI1212 06:17:19.112107 20613 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI1212 06:17:19.112115 20613 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:17:19.112176 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:17:19.112337 20613 net.cpp:150] Setting up L3_b4_cbr1_scale\nI1212 06:17:19.112350 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.112355 20613 net.cpp:165] Memory required for data: 1325569500\nI1212 06:17:19.112365 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI1212 06:17:19.112375 20613 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI1212 06:17:19.112382 20613 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI1212 06:17:19.112390 20613 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:17:19.112399 20613 net.cpp:150] Setting up L3_b4_cbr1_relu\nI1212 06:17:19.112409 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.112414 20613 net.cpp:165] Memory required for data: 1327617500\nI1212 06:17:19.112419 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI1212 06:17:19.112431 20613 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI1212 06:17:19.112437 20613 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI1212 06:17:19.112448 20613 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI1212 06:17:19.114519 20613 net.cpp:150] Setting up L3_b4_cbr2_conv\nI1212 06:17:19.114537 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.114543 20613 net.cpp:165] Memory required for data: 1329665500\nI1212 06:17:19.114553 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI1212 06:17:19.114567 20613 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI1212 06:17:19.114573 20613 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI1212 06:17:19.114583 20613 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI1212 06:17:19.114864 20613 net.cpp:150] Setting up L3_b4_cbr2_bn\nI1212 06:17:19.114878 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.114884 20613 net.cpp:165] Memory required for data: 1331713500\nI1212 06:17:19.114894 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:17:19.114907 20613 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI1212 06:17:19.114913 20613 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI1212 06:17:19.114922 20613 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI1212 06:17:19.114984 20613 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:17:19.115150 20613 net.cpp:150] Setting up L3_b4_cbr2_scale\nI1212 06:17:19.115164 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.115169 20613 net.cpp:165] Memory required for data: 1333761500\nI1212 06:17:19.115178 20613 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1212 06:17:19.115190 20613 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1212 06:17:19.115197 20613 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI1212 06:17:19.115205 20613 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:17:19.115216 20613 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1212 06:17:19.115249 20613 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1212 06:17:19.115267 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.115272 20613 net.cpp:165] Memory required for data: 1335809500\nI1212 06:17:19.115278 20613 layer_factory.hpp:77] Creating layer L3_b4_relu\nI1212 06:17:19.115288 20613 net.cpp:100] Creating Layer L3_b4_relu\nI1212 06:17:19.115295 20613 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI1212 06:17:19.115303 20613 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI1212 06:17:19.115312 20613 net.cpp:150] Setting up L3_b4_relu\nI1212 06:17:19.115319 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.115324 20613 net.cpp:165] Memory required for data: 1337857500\nI1212 06:17:19.115329 20613 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:19.115336 20613 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:19.115342 20613 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI1212 06:17:19.115350 20613 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:17:19.115360 20613 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:17:19.115411 20613 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:19.115422 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.115428 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.115433 20613 net.cpp:165] Memory required for data: 1341953500\nI1212 06:17:19.115438 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI1212 06:17:19.115458 20613 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI1212 06:17:19.115464 20613 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:17:19.115474 20613 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI1212 06:17:19.116506 20613 net.cpp:150] Setting up L3_b5_cbr1_conv\nI1212 06:17:19.116523 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.116528 20613 net.cpp:165] Memory required for data: 1344001500\nI1212 06:17:19.116536 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI1212 06:17:19.116549 20613 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI1212 06:17:19.116557 20613 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI1212 06:17:19.116565 20613 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI1212 06:17:19.116850 20613 net.cpp:150] Setting up L3_b5_cbr1_bn\nI1212 06:17:19.116864 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.116869 20613 net.cpp:165] Memory required for data: 1346049500\nI1212 06:17:19.116880 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:17:19.116889 20613 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI1212 06:17:19.116896 20613 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI1212 06:17:19.116904 20613 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:17:19.116966 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:17:19.117130 20613 net.cpp:150] Setting up L3_b5_cbr1_scale\nI1212 06:17:19.117143 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.117148 20613 net.cpp:165] Memory required for data: 1348097500\nI1212 06:17:19.117158 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI1212 06:17:19.117166 20613 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI1212 06:17:19.117172 20613 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI1212 06:17:19.117185 20613 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:17:19.117195 20613 net.cpp:150] Setting up L3_b5_cbr1_relu\nI1212 06:17:19.117203 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.117208 20613 net.cpp:165] Memory required for data: 1350145500\nI1212 06:17:19.117213 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI1212 06:17:19.117226 20613 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI1212 06:17:19.117233 20613 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI1212 06:17:19.117242 20613 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI1212 06:17:19.118286 20613 net.cpp:150] Setting up L3_b5_cbr2_conv\nI1212 06:17:19.118302 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.118307 20613 net.cpp:165] Memory required for data: 1352193500\nI1212 06:17:19.118316 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI1212 06:17:19.118329 20613 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI1212 06:17:19.118335 20613 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI1212 06:17:19.118345 20613 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI1212 06:17:19.118628 20613 net.cpp:150] Setting up L3_b5_cbr2_bn\nI1212 06:17:19.118641 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.118646 20613 net.cpp:165] Memory required for data: 1354241500\nI1212 06:17:19.118657 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:17:19.118670 20613 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI1212 06:17:19.118677 20613 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI1212 06:17:19.118685 20613 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI1212 06:17:19.118747 20613 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:17:19.118909 20613 net.cpp:150] Setting up L3_b5_cbr2_scale\nI1212 06:17:19.118922 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.118927 20613 net.cpp:165] Memory required for data: 1356289500\nI1212 06:17:19.118937 20613 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1212 06:17:19.118948 20613 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1212 06:17:19.118955 20613 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI1212 06:17:19.118963 20613 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:17:19.118973 20613 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1212 06:17:19.119006 20613 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1212 06:17:19.119015 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.119020 20613 net.cpp:165] Memory required for data: 1358337500\nI1212 06:17:19.119026 20613 layer_factory.hpp:77] Creating layer L3_b5_relu\nI1212 06:17:19.119038 20613 net.cpp:100] Creating Layer L3_b5_relu\nI1212 06:17:19.119045 20613 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI1212 06:17:19.119051 20613 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI1212 06:17:19.119060 20613 net.cpp:150] Setting up L3_b5_relu\nI1212 06:17:19.119067 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.119072 20613 net.cpp:165] Memory required for data: 1360385500\nI1212 06:17:19.119077 20613 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:19.119084 20613 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:19.119091 20613 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI1212 06:17:19.119097 20613 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:17:19.119107 20613 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:17:19.119156 20613 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:19.119168 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.119174 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.119179 20613 net.cpp:165] Memory required for data: 1364481500\nI1212 06:17:19.119184 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI1212 06:17:19.119199 20613 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI1212 06:17:19.119206 20613 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:17:19.119216 20613 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI1212 06:17:19.120247 20613 net.cpp:150] Setting up L3_b6_cbr1_conv\nI1212 06:17:19.120262 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.120268 20613 net.cpp:165] Memory required for data: 1366529500\nI1212 06:17:19.120277 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI1212 06:17:19.120296 20613 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI1212 06:17:19.120304 20613 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI1212 06:17:19.120316 20613 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI1212 06:17:19.120594 20613 net.cpp:150] Setting up L3_b6_cbr1_bn\nI1212 06:17:19.120606 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.120611 20613 net.cpp:165] Memory required for data: 1368577500\nI1212 06:17:19.120627 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:17:19.120637 20613 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI1212 06:17:19.120643 20613 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI1212 06:17:19.120654 20613 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:17:19.120717 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:17:19.120890 20613 net.cpp:150] Setting up L3_b6_cbr1_scale\nI1212 06:17:19.120903 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.120908 20613 net.cpp:165] Memory required for data: 1370625500\nI1212 06:17:19.120918 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI1212 06:17:19.120926 20613 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI1212 06:17:19.120932 20613 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI1212 06:17:19.120942 20613 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:17:19.120954 20613 net.cpp:150] Setting up L3_b6_cbr1_relu\nI1212 06:17:19.120960 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.120965 20613 net.cpp:165] Memory required for data: 1372673500\nI1212 06:17:19.120970 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI1212 06:17:19.120985 20613 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI1212 06:17:19.120990 20613 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI1212 06:17:19.120999 20613 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI1212 06:17:19.122035 20613 net.cpp:150] Setting up L3_b6_cbr2_conv\nI1212 06:17:19.122050 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.122054 20613 net.cpp:165] Memory required for data: 1374721500\nI1212 06:17:19.122063 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI1212 06:17:19.122076 20613 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI1212 06:17:19.122082 20613 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI1212 06:17:19.122092 20613 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI1212 06:17:19.122368 20613 net.cpp:150] Setting up L3_b6_cbr2_bn\nI1212 06:17:19.122381 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.122387 20613 net.cpp:165] Memory required for data: 1376769500\nI1212 06:17:19.122397 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:17:19.122409 20613 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI1212 06:17:19.122416 20613 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI1212 06:17:19.122424 20613 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI1212 06:17:19.122485 20613 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:17:19.122656 20613 net.cpp:150] Setting up L3_b6_cbr2_scale\nI1212 06:17:19.122670 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.122675 20613 net.cpp:165] Memory required for data: 1378817500\nI1212 06:17:19.122684 20613 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1212 06:17:19.122694 20613 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1212 06:17:19.122700 20613 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI1212 06:17:19.122707 20613 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:17:19.122717 20613 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1212 06:17:19.122753 20613 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1212 06:17:19.122762 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.122767 20613 net.cpp:165] Memory required for data: 1380865500\nI1212 06:17:19.122773 20613 layer_factory.hpp:77] Creating layer L3_b6_relu\nI1212 06:17:19.122783 20613 net.cpp:100] Creating Layer L3_b6_relu\nI1212 06:17:19.122790 20613 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI1212 06:17:19.122804 20613 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI1212 06:17:19.122814 20613 net.cpp:150] Setting up L3_b6_relu\nI1212 06:17:19.122822 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.122826 20613 net.cpp:165] Memory required for data: 1382913500\nI1212 06:17:19.122831 20613 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:19.122839 20613 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:19.122844 20613 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI1212 06:17:19.122853 20613 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:17:19.122862 20613 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:17:19.122915 20613 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:19.122927 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.122934 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.122938 20613 net.cpp:165] Memory required for data: 1387009500\nI1212 06:17:19.122944 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI1212 06:17:19.122962 20613 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI1212 06:17:19.122969 20613 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:17:19.122979 20613 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI1212 06:17:19.124017 20613 net.cpp:150] Setting up L3_b7_cbr1_conv\nI1212 06:17:19.124032 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.124037 20613 net.cpp:165] Memory required for data: 1389057500\nI1212 06:17:19.124045 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI1212 06:17:19.124060 20613 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI1212 06:17:19.124068 20613 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI1212 06:17:19.124078 20613 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI1212 06:17:19.124353 20613 net.cpp:150] Setting up L3_b7_cbr1_bn\nI1212 06:17:19.124366 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.124372 20613 net.cpp:165] Memory required for data: 1391105500\nI1212 06:17:19.124382 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:17:19.124392 20613 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI1212 06:17:19.124397 20613 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI1212 06:17:19.124408 20613 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:17:19.124469 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:17:19.124637 20613 net.cpp:150] Setting up L3_b7_cbr1_scale\nI1212 06:17:19.124651 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.124656 20613 net.cpp:165] Memory required for data: 1393153500\nI1212 06:17:19.124665 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI1212 06:17:19.124701 20613 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI1212 06:17:19.124711 20613 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI1212 06:17:19.124718 20613 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:17:19.124729 20613 net.cpp:150] Setting up L3_b7_cbr1_relu\nI1212 06:17:19.124737 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.124742 20613 net.cpp:165] Memory required for data: 1395201500\nI1212 06:17:19.124747 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI1212 06:17:19.124761 20613 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI1212 06:17:19.124768 20613 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI1212 06:17:19.124776 20613 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI1212 06:17:19.125818 20613 net.cpp:150] Setting up L3_b7_cbr2_conv\nI1212 06:17:19.125833 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.125838 20613 net.cpp:165] Memory required for data: 1397249500\nI1212 06:17:19.125846 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI1212 06:17:19.125860 20613 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI1212 06:17:19.125874 20613 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI1212 06:17:19.125885 20613 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI1212 06:17:19.126168 20613 net.cpp:150] Setting up L3_b7_cbr2_bn\nI1212 06:17:19.126181 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.126188 20613 net.cpp:165] Memory required for data: 1399297500\nI1212 06:17:19.126197 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:17:19.126206 20613 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI1212 06:17:19.126214 20613 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI1212 06:17:19.126220 20613 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI1212 06:17:19.126284 20613 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:17:19.126451 20613 net.cpp:150] Setting up L3_b7_cbr2_scale\nI1212 06:17:19.126466 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.126471 20613 net.cpp:165] Memory required for data: 1401345500\nI1212 06:17:19.126479 20613 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI1212 06:17:19.126488 20613 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI1212 06:17:19.126495 20613 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI1212 06:17:19.126502 20613 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:17:19.126513 20613 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI1212 06:17:19.126549 20613 net.cpp:150] Setting up L3_b7_sum_eltwise\nI1212 06:17:19.126561 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.126566 20613 net.cpp:165] Memory required for data: 1403393500\nI1212 06:17:19.126571 20613 layer_factory.hpp:77] Creating layer L3_b7_relu\nI1212 06:17:19.126579 20613 net.cpp:100] Creating Layer L3_b7_relu\nI1212 06:17:19.126585 20613 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI1212 06:17:19.126592 20613 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI1212 06:17:19.126601 20613 net.cpp:150] Setting up L3_b7_relu\nI1212 06:17:19.126608 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.126613 20613 net.cpp:165] Memory required for data: 1405441500\nI1212 06:17:19.126623 20613 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:19.126634 20613 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:19.126641 20613 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI1212 06:17:19.126648 20613 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:17:19.126659 20613 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:17:19.126710 20613 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:19.126722 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.126729 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.126734 20613 net.cpp:165] Memory required for data: 1409537500\nI1212 06:17:19.126739 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI1212 06:17:19.126749 20613 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI1212 06:17:19.126756 20613 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:17:19.126770 20613 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI1212 06:17:19.128823 20613 net.cpp:150] Setting up L3_b8_cbr1_conv\nI1212 06:17:19.128840 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.128845 20613 net.cpp:165] Memory required for data: 1411585500\nI1212 06:17:19.128855 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI1212 06:17:19.128870 20613 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI1212 06:17:19.128876 20613 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI1212 06:17:19.128888 20613 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI1212 06:17:19.129170 20613 net.cpp:150] Setting up L3_b8_cbr1_bn\nI1212 06:17:19.129184 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.129189 20613 net.cpp:165] Memory required for data: 1413633500\nI1212 06:17:19.129207 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:17:19.129217 20613 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI1212 06:17:19.129223 20613 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI1212 06:17:19.129235 20613 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:17:19.129297 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:17:19.129462 20613 net.cpp:150] Setting up L3_b8_cbr1_scale\nI1212 06:17:19.129475 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.129480 20613 net.cpp:165] Memory required for data: 1415681500\nI1212 06:17:19.129489 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI1212 06:17:19.129498 20613 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI1212 06:17:19.129504 20613 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI1212 06:17:19.129514 20613 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:17:19.129524 20613 net.cpp:150] Setting up L3_b8_cbr1_relu\nI1212 06:17:19.129531 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.129536 20613 net.cpp:165] Memory required for data: 1417729500\nI1212 06:17:19.129541 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI1212 06:17:19.129555 20613 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI1212 06:17:19.129562 20613 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI1212 06:17:19.129573 20613 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI1212 06:17:19.130612 20613 net.cpp:150] Setting up L3_b8_cbr2_conv\nI1212 06:17:19.130633 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.130638 20613 net.cpp:165] Memory required for data: 1419777500\nI1212 06:17:19.130647 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI1212 06:17:19.130657 20613 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI1212 06:17:19.130664 20613 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI1212 06:17:19.130676 20613 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI1212 06:17:19.130954 20613 net.cpp:150] Setting up L3_b8_cbr2_bn\nI1212 06:17:19.130970 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.130976 20613 net.cpp:165] Memory required for data: 1421825500\nI1212 06:17:19.130986 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:17:19.130995 20613 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI1212 06:17:19.131001 20613 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI1212 06:17:19.131009 20613 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI1212 06:17:19.131070 20613 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:17:19.131237 20613 net.cpp:150] Setting up L3_b8_cbr2_scale\nI1212 06:17:19.131252 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.131256 20613 net.cpp:165] Memory required for data: 1423873500\nI1212 06:17:19.131265 20613 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI1212 06:17:19.131278 20613 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI1212 06:17:19.131285 20613 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI1212 06:17:19.131292 20613 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:17:19.131300 20613 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI1212 06:17:19.131337 20613 net.cpp:150] Setting up L3_b8_sum_eltwise\nI1212 06:17:19.131348 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.131353 20613 net.cpp:165] Memory required for data: 1425921500\nI1212 06:17:19.131358 20613 layer_factory.hpp:77] Creating layer L3_b8_relu\nI1212 06:17:19.131366 20613 net.cpp:100] Creating Layer L3_b8_relu\nI1212 06:17:19.131372 20613 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI1212 06:17:19.131379 20613 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI1212 06:17:19.131389 20613 net.cpp:150] Setting up L3_b8_relu\nI1212 06:17:19.131397 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.131400 20613 net.cpp:165] Memory required for data: 1427969500\nI1212 06:17:19.131405 20613 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:19.131419 20613 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:19.131425 20613 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI1212 06:17:19.131436 20613 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:17:19.131448 20613 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:17:19.131494 20613 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:19.131506 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.131512 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.131517 20613 net.cpp:165] Memory required for data: 1432065500\nI1212 06:17:19.131522 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI1212 06:17:19.131539 20613 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI1212 06:17:19.131546 20613 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:17:19.131556 20613 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI1212 06:17:19.132592 20613 net.cpp:150] Setting up L3_b9_cbr1_conv\nI1212 06:17:19.132607 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.132612 20613 net.cpp:165] Memory required for data: 1434113500\nI1212 06:17:19.132627 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI1212 06:17:19.132637 20613 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI1212 06:17:19.132647 20613 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI1212 06:17:19.132655 20613 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI1212 06:17:19.132930 20613 net.cpp:150] Setting up L3_b9_cbr1_bn\nI1212 06:17:19.132944 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.132949 20613 net.cpp:165] Memory required for data: 1436161500\nI1212 06:17:19.132959 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:17:19.132968 20613 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI1212 06:17:19.132974 20613 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI1212 06:17:19.132985 20613 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:17:19.133047 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:17:19.133213 20613 net.cpp:150] Setting up L3_b9_cbr1_scale\nI1212 06:17:19.133225 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.133230 20613 net.cpp:165] Memory required for data: 1438209500\nI1212 06:17:19.133239 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI1212 06:17:19.133250 20613 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI1212 06:17:19.133257 20613 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI1212 06:17:19.133265 20613 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:17:19.133275 20613 net.cpp:150] Setting up L3_b9_cbr1_relu\nI1212 06:17:19.133281 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.133286 20613 net.cpp:165] Memory required for data: 1440257500\nI1212 06:17:19.133291 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI1212 06:17:19.133306 20613 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI1212 06:17:19.133312 20613 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI1212 06:17:19.133327 20613 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI1212 06:17:19.134364 20613 net.cpp:150] Setting up L3_b9_cbr2_conv\nI1212 06:17:19.134379 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.134384 20613 net.cpp:165] Memory required for data: 1442305500\nI1212 06:17:19.134393 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI1212 06:17:19.134402 20613 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI1212 06:17:19.134409 20613 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI1212 06:17:19.134420 20613 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI1212 06:17:19.134702 20613 net.cpp:150] Setting up L3_b9_cbr2_bn\nI1212 06:17:19.134718 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.134723 20613 net.cpp:165] Memory required for data: 1444353500\nI1212 06:17:19.134742 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:17:19.134750 20613 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI1212 06:17:19.134757 20613 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI1212 06:17:19.134764 20613 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI1212 06:17:19.134825 20613 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:17:19.134987 20613 net.cpp:150] Setting up L3_b9_cbr2_scale\nI1212 06:17:19.135000 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.135005 20613 net.cpp:165] Memory required for data: 1446401500\nI1212 06:17:19.135015 20613 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI1212 06:17:19.135028 20613 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI1212 06:17:19.135035 20613 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI1212 06:17:19.135042 20613 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:17:19.135051 20613 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI1212 06:17:19.135087 20613 net.cpp:150] Setting up L3_b9_sum_eltwise\nI1212 06:17:19.135099 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.135104 20613 net.cpp:165] Memory required for data: 1448449500\nI1212 06:17:19.135109 20613 layer_factory.hpp:77] Creating layer L3_b9_relu\nI1212 06:17:19.135118 20613 net.cpp:100] Creating Layer L3_b9_relu\nI1212 06:17:19.135123 20613 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI1212 06:17:19.135130 20613 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI1212 06:17:19.135139 20613 net.cpp:150] Setting up L3_b9_relu\nI1212 06:17:19.135146 20613 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:19.135151 20613 net.cpp:165] Memory required for data: 1450497500\nI1212 06:17:19.135155 20613 layer_factory.hpp:77] Creating layer post_pool\nI1212 06:17:19.135164 20613 net.cpp:100] Creating Layer post_pool\nI1212 06:17:19.135169 20613 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI1212 06:17:19.135180 20613 net.cpp:408] post_pool -> post_pool\nI1212 06:17:19.135216 20613 net.cpp:150] Setting up post_pool\nI1212 06:17:19.135227 20613 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI1212 06:17:19.135232 20613 net.cpp:165] Memory required for data: 1450529500\nI1212 06:17:19.135237 20613 layer_factory.hpp:77] Creating layer post_FC\nI1212 06:17:19.135249 20613 net.cpp:100] Creating Layer post_FC\nI1212 06:17:19.135255 20613 net.cpp:434] post_FC <- post_pool\nI1212 06:17:19.135267 20613 net.cpp:408] post_FC -> post_FC_top\nI1212 06:17:19.135434 20613 net.cpp:150] Setting up post_FC\nI1212 06:17:19.135447 20613 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:19.135452 20613 net.cpp:165] Memory required for data: 1450534500\nI1212 06:17:19.135462 20613 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1212 06:17:19.135473 20613 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1212 06:17:19.135479 20613 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1212 06:17:19.135488 20613 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1212 06:17:19.135498 20613 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1212 06:17:19.135550 20613 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1212 06:17:19.135562 20613 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:19.135568 20613 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:19.135573 20613 net.cpp:165] Memory required for data: 1450544500\nI1212 06:17:19.135578 20613 layer_factory.hpp:77] Creating layer accuracy\nI1212 06:17:19.135586 20613 net.cpp:100] Creating Layer accuracy\nI1212 06:17:19.135592 20613 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1212 06:17:19.135599 20613 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1212 06:17:19.135607 20613 net.cpp:408] accuracy -> accuracy\nI1212 06:17:19.135625 20613 net.cpp:150] Setting up accuracy\nI1212 06:17:19.135634 20613 net.cpp:157] Top shape: (1)\nI1212 06:17:19.135639 20613 net.cpp:165] Memory required for data: 1450544504\nI1212 06:17:19.135656 20613 layer_factory.hpp:77] Creating layer loss\nI1212 06:17:19.135664 20613 net.cpp:100] Creating Layer loss\nI1212 06:17:19.135670 20613 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1212 06:17:19.135677 20613 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1212 06:17:19.135689 20613 net.cpp:408] loss -> loss\nI1212 06:17:19.135700 20613 layer_factory.hpp:77] Creating layer loss\nI1212 06:17:19.135825 20613 net.cpp:150] Setting up loss\nI1212 06:17:19.135838 20613 net.cpp:157] Top shape: (1)\nI1212 06:17:19.135843 20613 net.cpp:160]     with loss weight 1\nI1212 06:17:19.135860 20613 net.cpp:165] Memory required for data: 1450544508\nI1212 06:17:19.135867 20613 net.cpp:226] loss needs backward computation.\nI1212 06:17:19.135874 20613 net.cpp:228] accuracy does not need backward computation.\nI1212 06:17:19.135879 20613 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1212 06:17:19.135885 20613 net.cpp:226] post_FC needs backward computation.\nI1212 06:17:19.135890 20613 net.cpp:226] post_pool needs backward computation.\nI1212 06:17:19.135895 20613 net.cpp:226] L3_b9_relu needs backward computation.\nI1212 06:17:19.135900 20613 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI1212 06:17:19.135905 20613 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI1212 06:17:19.135910 20613 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI1212 06:17:19.135915 20613 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI1212 06:17:19.135921 20613 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI1212 06:17:19.135926 20613 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI1212 06:17:19.135929 20613 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI1212 06:17:19.135936 20613 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI1212 06:17:19.135941 20613 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI1212 06:17:19.135946 20613 net.cpp:226] L3_b8_relu needs backward computation.\nI1212 06:17:19.135951 20613 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI1212 06:17:19.135957 20613 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI1212 06:17:19.135962 20613 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI1212 06:17:19.135967 20613 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI1212 06:17:19.135972 20613 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI1212 06:17:19.135977 20613 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI1212 06:17:19.135982 20613 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI1212 06:17:19.135989 20613 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI1212 06:17:19.135998 20613 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI1212 06:17:19.136004 20613 net.cpp:226] L3_b7_relu needs backward computation.\nI1212 06:17:19.136009 20613 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI1212 06:17:19.136014 20613 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI1212 06:17:19.136020 20613 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI1212 06:17:19.136025 20613 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI1212 06:17:19.136030 20613 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI1212 06:17:19.136035 20613 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI1212 06:17:19.136040 20613 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI1212 06:17:19.136045 20613 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI1212 06:17:19.136050 20613 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI1212 06:17:19.136056 20613 net.cpp:226] L3_b6_relu needs backward computation.\nI1212 06:17:19.136061 20613 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1212 06:17:19.136066 20613 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI1212 06:17:19.136072 20613 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI1212 06:17:19.136077 20613 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI1212 06:17:19.136088 20613 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI1212 06:17:19.136095 20613 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI1212 06:17:19.136099 20613 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI1212 06:17:19.136104 20613 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI1212 06:17:19.136109 20613 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI1212 06:17:19.136116 20613 net.cpp:226] L3_b5_relu needs backward computation.\nI1212 06:17:19.136121 20613 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1212 06:17:19.136126 20613 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI1212 06:17:19.136131 20613 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI1212 06:17:19.136137 20613 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI1212 06:17:19.136142 20613 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI1212 06:17:19.136147 20613 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI1212 06:17:19.136152 20613 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI1212 06:17:19.136157 20613 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI1212 06:17:19.136162 20613 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI1212 06:17:19.136168 20613 net.cpp:226] L3_b4_relu needs backward computation.\nI1212 06:17:19.136173 20613 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1212 06:17:19.136178 20613 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI1212 06:17:19.136183 20613 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI1212 06:17:19.136189 20613 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI1212 06:17:19.136194 20613 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI1212 06:17:19.136199 20613 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI1212 06:17:19.136204 20613 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI1212 06:17:19.136209 20613 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI1212 06:17:19.136215 20613 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI1212 06:17:19.136220 20613 net.cpp:226] L3_b3_relu needs backward computation.\nI1212 06:17:19.136225 20613 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1212 06:17:19.136231 20613 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI1212 06:17:19.136236 20613 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI1212 06:17:19.136242 20613 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI1212 06:17:19.136247 20613 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI1212 06:17:19.136252 20613 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI1212 06:17:19.136257 20613 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI1212 06:17:19.136262 20613 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI1212 06:17:19.136268 20613 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI1212 06:17:19.136277 20613 net.cpp:226] L3_b2_relu needs backward computation.\nI1212 06:17:19.136282 20613 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1212 06:17:19.136288 20613 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI1212 06:17:19.136294 20613 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI1212 06:17:19.136299 20613 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI1212 06:17:19.136306 20613 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI1212 06:17:19.136310 20613 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI1212 06:17:19.136315 20613 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI1212 06:17:19.136320 20613 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI1212 06:17:19.136327 20613 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI1212 06:17:19.136332 20613 net.cpp:226] L3_b1_concat0 needs backward computation.\nI1212 06:17:19.136338 20613 net.cpp:228] L3_b1_zeros does not need backward computation.\nI1212 06:17:19.136349 20613 net.cpp:226] L3_b1_relu needs backward computation.\nI1212 06:17:19.136355 20613 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1212 06:17:19.136361 20613 net.cpp:226] L3_b1_pool needs backward computation.\nI1212 06:17:19.136366 20613 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI1212 06:17:19.136373 20613 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI1212 06:17:19.136378 20613 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI1212 06:17:19.136382 20613 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI1212 06:17:19.136389 20613 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI1212 06:17:19.136394 20613 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI1212 06:17:19.136399 20613 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI1212 06:17:19.136404 20613 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI1212 06:17:19.136409 20613 net.cpp:226] L2_b9_relu needs backward computation.\nI1212 06:17:19.136415 20613 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI1212 06:17:19.136420 20613 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI1212 06:17:19.136426 20613 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI1212 06:17:19.136431 20613 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI1212 06:17:19.136436 20613 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI1212 06:17:19.136442 20613 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI1212 06:17:19.136447 20613 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI1212 06:17:19.136452 20613 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI1212 06:17:19.136458 20613 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI1212 06:17:19.136464 20613 net.cpp:226] L2_b8_relu needs backward computation.\nI1212 06:17:19.136469 20613 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI1212 06:17:19.136476 20613 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI1212 06:17:19.136481 20613 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI1212 06:17:19.136487 20613 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI1212 06:17:19.136492 20613 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI1212 06:17:19.136497 20613 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI1212 06:17:19.136502 20613 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI1212 06:17:19.136507 20613 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI1212 06:17:19.136512 20613 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI1212 06:17:19.136518 20613 net.cpp:226] L2_b7_relu needs backward computation.\nI1212 06:17:19.136523 20613 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI1212 06:17:19.136529 20613 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI1212 06:17:19.136534 20613 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI1212 06:17:19.136540 20613 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI1212 06:17:19.136545 20613 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI1212 06:17:19.136551 20613 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI1212 06:17:19.136556 20613 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI1212 06:17:19.136562 20613 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI1212 06:17:19.136569 20613 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI1212 06:17:19.136574 20613 net.cpp:226] L2_b6_relu needs backward computation.\nI1212 06:17:19.136579 20613 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1212 06:17:19.136584 20613 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI1212 06:17:19.136590 20613 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI1212 06:17:19.136596 20613 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI1212 06:17:19.136601 20613 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI1212 06:17:19.136611 20613 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI1212 06:17:19.136622 20613 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI1212 06:17:19.136629 20613 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI1212 06:17:19.136636 20613 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI1212 06:17:19.136641 20613 net.cpp:226] L2_b5_relu needs backward computation.\nI1212 06:17:19.136646 20613 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1212 06:17:19.136652 20613 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI1212 06:17:19.136658 20613 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI1212 06:17:19.136668 20613 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI1212 06:17:19.136674 20613 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI1212 06:17:19.136679 20613 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI1212 06:17:19.136685 20613 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI1212 06:17:19.136690 20613 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI1212 06:17:19.136696 20613 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI1212 06:17:19.136703 20613 net.cpp:226] L2_b4_relu needs backward computation.\nI1212 06:17:19.136708 20613 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1212 06:17:19.136714 20613 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI1212 06:17:19.136719 20613 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI1212 06:17:19.136725 20613 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI1212 06:17:19.136731 20613 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI1212 06:17:19.136736 20613 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI1212 06:17:19.136742 20613 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI1212 06:17:19.136749 20613 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI1212 06:17:19.136754 20613 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI1212 06:17:19.136760 20613 net.cpp:226] L2_b3_relu needs backward computation.\nI1212 06:17:19.136765 20613 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1212 06:17:19.136771 20613 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI1212 06:17:19.136777 20613 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI1212 06:17:19.136783 20613 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI1212 06:17:19.136790 20613 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI1212 06:17:19.136795 20613 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI1212 06:17:19.136800 20613 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI1212 06:17:19.136806 20613 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI1212 06:17:19.136811 20613 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI1212 06:17:19.136817 20613 net.cpp:226] L2_b2_relu needs backward computation.\nI1212 06:17:19.136822 20613 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1212 06:17:19.136828 20613 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI1212 06:17:19.136834 20613 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI1212 06:17:19.136840 20613 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI1212 06:17:19.136845 20613 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI1212 06:17:19.136852 20613 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI1212 06:17:19.136857 20613 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI1212 06:17:19.136862 20613 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI1212 06:17:19.136868 20613 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI1212 06:17:19.136873 20613 net.cpp:226] L2_b1_concat0 needs backward computation.\nI1212 06:17:19.136880 20613 net.cpp:228] L2_b1_zeros does not need backward computation.\nI1212 06:17:19.136885 20613 net.cpp:226] L2_b1_relu needs backward computation.\nI1212 06:17:19.136891 20613 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1212 06:17:19.136903 20613 net.cpp:226] L2_b1_pool needs backward computation.\nI1212 06:17:19.136909 20613 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI1212 06:17:19.136914 20613 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI1212 06:17:19.136920 20613 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI1212 06:17:19.136926 20613 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI1212 06:17:19.136931 20613 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI1212 06:17:19.136937 20613 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI1212 06:17:19.136943 20613 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI1212 06:17:19.136948 20613 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI1212 06:17:19.136955 20613 net.cpp:226] L1_b9_relu needs backward computation.\nI1212 06:17:19.136960 20613 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI1212 06:17:19.136966 20613 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI1212 06:17:19.136971 20613 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI1212 06:17:19.136977 20613 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI1212 06:17:19.136983 20613 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI1212 06:17:19.136988 20613 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI1212 06:17:19.136994 20613 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI1212 06:17:19.136999 20613 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI1212 06:17:19.137006 20613 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI1212 06:17:19.137012 20613 net.cpp:226] L1_b8_relu needs backward computation.\nI1212 06:17:19.137017 20613 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI1212 06:17:19.137023 20613 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI1212 06:17:19.137028 20613 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI1212 06:17:19.137034 20613 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI1212 06:17:19.137040 20613 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI1212 06:17:19.137045 20613 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI1212 06:17:19.137051 20613 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI1212 06:17:19.137058 20613 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI1212 06:17:19.137063 20613 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI1212 06:17:19.137069 20613 net.cpp:226] L1_b7_relu needs backward computation.\nI1212 06:17:19.137075 20613 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI1212 06:17:19.137081 20613 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI1212 06:17:19.137087 20613 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI1212 06:17:19.137094 20613 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI1212 06:17:19.137099 20613 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI1212 06:17:19.137104 20613 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI1212 06:17:19.137109 20613 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI1212 06:17:19.137115 20613 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI1212 06:17:19.137121 20613 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI1212 06:17:19.137127 20613 net.cpp:226] L1_b6_relu needs backward computation.\nI1212 06:17:19.137133 20613 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1212 06:17:19.137140 20613 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI1212 06:17:19.137145 20613 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI1212 06:17:19.137151 20613 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI1212 06:17:19.137157 20613 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI1212 06:17:19.137162 20613 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI1212 06:17:19.137168 20613 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI1212 06:17:19.137178 20613 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI1212 06:17:19.137186 20613 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI1212 06:17:19.137192 20613 net.cpp:226] L1_b5_relu needs backward computation.\nI1212 06:17:19.137197 20613 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1212 06:17:19.137203 20613 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI1212 06:17:19.137209 20613 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI1212 06:17:19.137215 20613 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI1212 06:17:19.137221 20613 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI1212 06:17:19.137226 20613 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI1212 06:17:19.137233 20613 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI1212 06:17:19.137238 20613 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI1212 06:17:19.137244 20613 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI1212 06:17:19.137249 20613 net.cpp:226] L1_b4_relu needs backward computation.\nI1212 06:17:19.137255 20613 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1212 06:17:19.137261 20613 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI1212 06:17:19.137267 20613 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI1212 06:17:19.137274 20613 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI1212 06:17:19.137279 20613 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI1212 06:17:19.137284 20613 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI1212 06:17:19.137290 20613 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI1212 06:17:19.137295 20613 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI1212 06:17:19.137301 20613 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI1212 06:17:19.137307 20613 net.cpp:226] L1_b3_relu needs backward computation.\nI1212 06:17:19.137313 20613 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1212 06:17:19.137320 20613 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI1212 06:17:19.137326 20613 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI1212 06:17:19.137331 20613 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI1212 06:17:19.137337 20613 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI1212 06:17:19.137342 20613 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI1212 06:17:19.137348 20613 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI1212 06:17:19.137354 20613 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI1212 06:17:19.137361 20613 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI1212 06:17:19.137368 20613 net.cpp:226] L1_b2_relu needs backward computation.\nI1212 06:17:19.137375 20613 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1212 06:17:19.137382 20613 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI1212 06:17:19.137387 20613 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI1212 06:17:19.137393 20613 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI1212 06:17:19.137399 20613 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI1212 06:17:19.137405 20613 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI1212 06:17:19.137410 20613 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI1212 06:17:19.137416 20613 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI1212 06:17:19.137423 20613 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI1212 06:17:19.137428 20613 net.cpp:226] L1_b1_relu needs backward computation.\nI1212 06:17:19.137434 20613 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1212 06:17:19.137440 20613 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI1212 06:17:19.137446 20613 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI1212 06:17:19.137452 20613 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI1212 06:17:19.137465 20613 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI1212 06:17:19.137470 20613 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI1212 06:17:19.137476 20613 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI1212 06:17:19.137482 20613 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI1212 06:17:19.137488 20613 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI1212 06:17:19.137495 20613 net.cpp:226] pre_relu needs backward computation.\nI1212 06:17:19.137500 20613 net.cpp:226] pre_scale needs backward computation.\nI1212 06:17:19.137506 20613 net.cpp:226] pre_bn needs backward computation.\nI1212 06:17:19.137511 20613 net.cpp:226] pre_conv needs backward computation.\nI1212 06:17:19.137518 20613 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1212 06:17:19.137526 20613 net.cpp:228] dataLayer does not need backward computation.\nI1212 06:17:19.137529 20613 net.cpp:270] This network produces output accuracy\nI1212 06:17:19.137537 20613 net.cpp:270] This network produces output loss\nI1212 06:17:19.137863 20613 net.cpp:283] Network initialization done.\nI1212 06:17:19.138885 20613 solver.cpp:60] Solver scaffolding done.\nI1212 06:17:19.362326 20613 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1212 06:17:19.713075 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:19.713140 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:19.720060 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:19.959482 20613 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:19.959566 20613 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:19.994326 20613 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:19.994411 20613 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:20.438061 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:20.438143 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:20.446352 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:20.693294 20613 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:20.693440 20613 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:20.746367 20613 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:20.746502 20613 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:21.272750 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:21.272830 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:21.281208 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:21.559911 20613 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:21.560099 20613 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:21.633107 20613 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:21.633272 20613 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:21.718204 20613 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1212 06:17:22.217545 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:22.217602 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:22.226842 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:22.524340 20613 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:22.524513 20613 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:22.617389 20613 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:22.617544 20613 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:23.274123 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:23.274180 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:23.284718 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:23.614346 20613 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:23.614528 20613 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:23.728879 20613 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:23.729065 20613 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:24.453167 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:24.453248 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:24.464900 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:24.804237 20613 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:24.804503 20613 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:24.942277 20613 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:24.942504 20613 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:25.754896 20613 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:25.754953 20613 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:25.767519 20613 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:26.198180 20613 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:26.198460 20613 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:26.352447 20613 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:26.352721 20613 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:26.523802 20613 parallel.cpp:425] Starting Optimization\nI1212 06:17:26.525681 20613 solver.cpp:279] Solving Cifar-Resnet\nI1212 06:17:26.525703 20613 solver.cpp:280] Learning Rate Policy: triangular\nI1212 06:17:26.530863 20613 solver.cpp:337] Iteration 0, Testing net (#0)\nI1212 06:18:47.666090 20613 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI1212 06:18:47.666615 20613 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI1212 06:18:51.726694 20613 solver.cpp:228] Iteration 0, loss = 4.48113\nI1212 06:18:51.726747 20613 solver.cpp:244]     Train net output #0: accuracy = 0.072\nI1212 06:18:51.726771 20613 solver.cpp:244]     Train net output #1: loss = 4.48113 (* 1 = 4.48113 loss)\nI1212 06:18:51.803313 20613 sgd_solver.cpp:174] Iteration 0, lr = 0\nI1212 06:18:51.816622 20613 sgd_solver.cpp:149] Gradient: L2 norm 15.7547\nI1212 06:21:09.410142 20613 solver.cpp:337] Iteration 100, Testing net (#0)\nI1212 06:22:31.162739 20613 solver.cpp:404]     Test net output #0: accuracy = 0.33352\nI1212 06:22:31.163064 20613 solver.cpp:404]     Test net output #1: loss = 1.78045 (* 1 = 1.78045 loss)\nI1212 06:22:32.476596 20613 solver.cpp:228] Iteration 100, loss = 1.71111\nI1212 06:22:32.476656 20613 solver.cpp:244]     Train net output #0: accuracy = 0.312\nI1212 06:22:32.476675 20613 solver.cpp:244]     Train net output #1: loss = 1.71111 (* 1 = 1.71111 loss)\nI1212 06:22:32.568349 20613 sgd_solver.cpp:174] Iteration 100, lr = 0.00299996\nI1212 06:22:32.582204 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.74546\nI1212 06:24:50.260228 20613 solver.cpp:337] Iteration 200, Testing net (#0)\nI1212 06:26:12.014540 20613 solver.cpp:404]     Test net output #0: accuracy = 0.47108\nI1212 06:26:12.014852 20613 solver.cpp:404]     Test net output #1: loss = 1.45354 (* 1 = 1.45354 loss)\nI1212 06:26:13.328027 20613 solver.cpp:228] Iteration 200, loss = 1.4353\nI1212 06:26:13.328086 20613 solver.cpp:244]     Train net output #0: accuracy = 0.448\nI1212 06:26:13.328106 20613 solver.cpp:244]     Train net output #1: loss = 1.4353 (* 1 = 1.4353 loss)\nI1212 06:26:13.414873 20613 sgd_solver.cpp:174] Iteration 200, lr = 0.00599992\nI1212 06:26:13.428737 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.71557\nI1212 06:28:31.779901 20613 solver.cpp:337] Iteration 300, Testing net (#0)\nI1212 06:29:53.528882 20613 solver.cpp:404]     Test net output #0: accuracy = 0.52704\nI1212 06:29:53.529182 20613 solver.cpp:404]     Test net output #1: loss = 1.2984 (* 1 = 1.2984 loss)\nI1212 06:29:54.842502 20613 solver.cpp:228] Iteration 300, loss = 1.24728\nI1212 06:29:54.842547 20613 solver.cpp:244]     Train net output #0: accuracy = 0.504\nI1212 06:29:54.842564 20613 solver.cpp:244]     Train net output #1: loss = 1.24728 (* 1 = 1.24728 loss)\nI1212 06:29:54.933094 20613 sgd_solver.cpp:174] Iteration 300, lr = 0.00900006\nI1212 06:29:54.947044 20613 sgd_solver.cpp:149] Gradient: L2 norm 2.11612\nI1212 06:32:13.282577 20613 solver.cpp:337] Iteration 400, Testing net (#0)\nI1212 06:33:35.032418 20613 solver.cpp:404]     Test net output #0: accuracy = 0.59512\nI1212 06:33:35.032742 20613 solver.cpp:404]     Test net output #1: loss = 1.13619 (* 1 = 1.13619 loss)\nI1212 06:33:36.345155 20613 solver.cpp:228] Iteration 400, loss = 1.07707\nI1212 06:33:36.345216 20613 solver.cpp:244]     Train net output #0: accuracy = 0.592\nI1212 06:33:36.345237 20613 solver.cpp:244]     Train net output #1: loss = 1.07707 (* 1 = 1.07707 loss)\nI1212 06:33:36.432452 20613 sgd_solver.cpp:174] Iteration 400, lr = 0.012\nI1212 06:33:36.446548 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.88405\nI1212 06:35:54.784446 20613 solver.cpp:337] Iteration 500, Testing net (#0)\nI1212 06:37:16.495735 20613 solver.cpp:404]     Test net output #0: accuracy = 0.62556\nI1212 06:37:16.496012 20613 solver.cpp:404]     Test net output #1: loss = 1.07161 (* 1 = 1.07161 loss)\nI1212 06:37:17.808310 20613 solver.cpp:228] Iteration 500, loss = 0.874339\nI1212 06:37:17.808368 20613 solver.cpp:244]     Train net output #0: accuracy = 0.656\nI1212 06:37:17.808387 20613 solver.cpp:244]     Train net output #1: loss = 0.874339 (* 1 = 0.874339 loss)\nI1212 06:37:17.901096 20613 sgd_solver.cpp:174] Iteration 500, lr = 0.015\nI1212 06:37:17.914976 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.95704\nI1212 06:39:36.232636 20613 solver.cpp:337] Iteration 600, Testing net (#0)\nI1212 06:40:57.953418 20613 solver.cpp:404]     Test net output #0: accuracy = 0.64548\nI1212 06:40:57.953670 20613 solver.cpp:404]     Test net output #1: loss = 1.04263 (* 1 = 1.04263 loss)\nI1212 06:40:59.266810 20613 solver.cpp:228] Iteration 600, loss = 0.773171\nI1212 06:40:59.266871 20613 solver.cpp:244]     Train net output #0: accuracy = 0.72\nI1212 06:40:59.266888 20613 solver.cpp:244]     Train net output #1: loss = 0.773171 (* 1 = 0.773171 loss)\nI1212 06:40:59.356914 20613 sgd_solver.cpp:174] Iteration 600, lr = 0.0179999\nI1212 06:40:59.370707 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.99178\nI1212 06:43:17.686602 20613 solver.cpp:337] Iteration 700, Testing net (#0)\nI1212 06:44:39.395074 20613 solver.cpp:404]     Test net output #0: accuracy = 0.63144\nI1212 06:44:39.395293 20613 solver.cpp:404]     Test net output #1: loss = 1.12418 (* 1 = 1.12418 loss)\nI1212 06:44:40.708696 20613 solver.cpp:228] Iteration 700, loss = 0.74089\nI1212 06:44:40.708742 20613 solver.cpp:244]     Train net output #0: accuracy = 0.744\nI1212 06:44:40.708758 20613 solver.cpp:244]     Train net output #1: loss = 0.74089 (* 1 = 0.74089 loss)\nI1212 06:44:40.798182 20613 sgd_solver.cpp:174] Iteration 700, lr = 0.0210001\nI1212 06:44:40.812052 20613 sgd_solver.cpp:149] Gradient: L2 norm 2.09767\nI1212 06:46:59.196607 20613 solver.cpp:337] Iteration 800, Testing net (#0)\nI1212 06:48:20.913679 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6668\nI1212 06:48:20.913935 20613 solver.cpp:404]     Test net output #1: loss = 0.975003 (* 1 = 0.975003 loss)\nI1212 06:48:22.228453 20613 solver.cpp:228] Iteration 800, loss = 0.626398\nI1212 06:48:22.228518 20613 solver.cpp:244]     Train net output #0: accuracy = 0.76\nI1212 06:48:22.228543 20613 solver.cpp:244]     Train net output #1: loss = 0.626398 (* 1 = 0.626398 loss)\nI1212 06:48:22.312240 20613 sgd_solver.cpp:174] Iteration 800, lr = 0.024\nI1212 06:48:22.326097 20613 sgd_solver.cpp:149] Gradient: L2 norm 2.00993\nI1212 06:50:40.606783 20613 solver.cpp:337] Iteration 900, Testing net (#0)\nI1212 06:52:02.297008 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65336\nI1212 06:52:02.297297 20613 solver.cpp:404]     Test net output #1: loss = 1.18145 (* 1 = 1.18145 loss)\nI1212 06:52:03.610496 20613 solver.cpp:228] Iteration 900, loss = 0.575988\nI1212 06:52:03.610556 20613 solver.cpp:244]     Train net output #0: accuracy = 0.768\nI1212 06:52:03.610580 20613 solver.cpp:244]     Train net output #1: loss = 0.575988 (* 1 = 0.575988 loss)\nI1212 06:52:03.698813 20613 sgd_solver.cpp:174] Iteration 900, lr = 0.027\nI1212 06:52:03.712692 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.86893\nI1212 06:54:22.010000 20613 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1212 06:55:43.719352 20613 solver.cpp:404]     Test net output #0: accuracy = 0.62764\nI1212 06:55:43.719571 20613 solver.cpp:404]     Test net output #1: loss = 1.32766 (* 1 = 1.32766 loss)\nI1212 06:55:45.033347 20613 solver.cpp:228] Iteration 1000, loss = 0.539174\nI1212 06:55:45.033408 20613 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI1212 06:55:45.033427 20613 solver.cpp:244]     Train net output #1: loss = 0.539174 (* 1 = 0.539174 loss)\nI1212 06:55:45.119326 20613 sgd_solver.cpp:174] Iteration 1000, lr = 0.03\nI1212 06:55:45.132980 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.95176\nI1212 06:58:03.491044 20613 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1212 06:59:25.202718 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67964\nI1212 06:59:25.202937 20613 solver.cpp:404]     Test net output #1: loss = 1.06117 (* 1 = 1.06117 loss)\nI1212 06:59:26.515527 20613 solver.cpp:228] Iteration 1100, loss = 0.447834\nI1212 06:59:26.515588 20613 solver.cpp:244]     Train net output #0: accuracy = 0.84\nI1212 06:59:26.515606 20613 solver.cpp:244]     Train net output #1: loss = 0.447834 (* 1 = 0.447834 loss)\nI1212 06:59:26.607105 20613 sgd_solver.cpp:174] Iteration 1100, lr = 0.0329999\nI1212 06:59:26.620821 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.93893\nI1212 07:01:44.892331 20613 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1212 07:03:06.457082 20613 solver.cpp:404]     Test net output #0: accuracy = 0.60292\nI1212 07:03:06.457361 20613 solver.cpp:404]     Test net output #1: loss = 1.45355 (* 1 = 1.45355 loss)\nI1212 07:03:07.771119 20613 solver.cpp:228] Iteration 1200, loss = 0.421175\nI1212 07:03:07.771178 20613 solver.cpp:244]     Train net output #0: accuracy = 0.824\nI1212 07:03:07.771198 20613 solver.cpp:244]     Train net output #1: loss = 0.421175 (* 1 = 0.421175 loss)\nI1212 07:03:07.863160 20613 sgd_solver.cpp:174] Iteration 1200, lr = 0.0360001\nI1212 07:03:07.877023 20613 sgd_solver.cpp:149] Gradient: L2 norm 2.20554\nI1212 07:05:26.163136 20613 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1212 07:06:47.458345 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65544\nI1212 07:06:47.458565 20613 solver.cpp:404]     Test net output #1: loss = 1.25907 (* 1 = 1.25907 loss)\nI1212 07:06:48.772168 20613 solver.cpp:228] Iteration 1300, loss = 0.367358\nI1212 07:06:48.772212 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 07:06:48.772228 20613 solver.cpp:244]     Train net output #1: loss = 0.367358 (* 1 = 0.367358 loss)\nI1212 07:06:48.861187 20613 sgd_solver.cpp:174] Iteration 1300, lr = 0.039\nI1212 07:06:48.875128 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.9363\nI1212 07:09:07.229496 20613 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1212 07:10:28.519917 20613 solver.cpp:404]     Test net output #0: accuracy = 0.56684\nI1212 07:10:28.520165 20613 solver.cpp:404]     Test net output #1: loss = 1.70662 (* 1 = 1.70662 loss)\nI1212 07:10:29.832640 20613 solver.cpp:228] Iteration 1400, loss = 0.340521\nI1212 07:10:29.832685 20613 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1212 07:10:29.832702 20613 solver.cpp:244]     Train net output #1: loss = 0.340521 (* 1 = 0.340521 loss)\nI1212 07:10:29.924437 20613 sgd_solver.cpp:174] Iteration 1400, lr = 0.042\nI1212 07:10:29.938282 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.86485\nI1212 07:12:48.286969 20613 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1212 07:14:09.660812 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69252\nI1212 07:14:09.661039 20613 solver.cpp:404]     Test net output #1: loss = 1.04295 (* 1 = 1.04295 loss)\nI1212 07:14:10.974328 20613 solver.cpp:228] Iteration 1500, loss = 0.366772\nI1212 07:14:10.974373 20613 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1212 07:14:10.974390 20613 solver.cpp:244]     Train net output #1: loss = 0.366772 (* 1 = 0.366772 loss)\nI1212 07:14:11.063113 20613 sgd_solver.cpp:174] Iteration 1500, lr = 0.045\nI1212 07:14:11.076992 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.66875\nI1212 07:16:29.403867 20613 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1212 07:17:50.930610 20613 solver.cpp:404]     Test net output #0: accuracy = 0.62784\nI1212 07:17:50.930820 20613 solver.cpp:404]     Test net output #1: loss = 1.67544 (* 1 = 1.67544 loss)\nI1212 07:17:52.242887 20613 solver.cpp:228] Iteration 1600, loss = 0.295895\nI1212 07:17:52.242949 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 07:17:52.242966 20613 solver.cpp:244]     Train net output #1: loss = 0.295895 (* 1 = 0.295895 loss)\nI1212 07:17:52.330945 20613 sgd_solver.cpp:174] Iteration 1600, lr = 0.0479999\nI1212 07:17:52.344954 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.85722\nI1212 07:20:10.721928 20613 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1212 07:21:32.301393 20613 solver.cpp:404]     Test net output #0: accuracy = 0.697\nI1212 07:21:32.301630 20613 solver.cpp:404]     Test net output #1: loss = 1.15553 (* 1 = 1.15553 loss)\nI1212 07:21:33.615653 20613 solver.cpp:228] Iteration 1700, loss = 0.280103\nI1212 07:21:33.615715 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 07:21:33.615733 20613 solver.cpp:244]     Train net output #1: loss = 0.280103 (* 1 = 0.280103 loss)\nI1212 07:21:33.699937 20613 sgd_solver.cpp:174] Iteration 1700, lr = 0.0510001\nI1212 07:21:33.713810 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.57882\nI1212 07:23:52.014160 20613 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1212 07:25:13.734431 20613 solver.cpp:404]     Test net output #0: accuracy = 0.58636\nI1212 07:25:13.734673 20613 solver.cpp:404]     Test net output #1: loss = 1.87385 (* 1 = 1.87385 loss)\nI1212 07:25:15.047672 20613 solver.cpp:228] Iteration 1800, loss = 0.303588\nI1212 07:25:15.047734 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 07:25:15.047754 20613 solver.cpp:244]     Train net output #1: loss = 0.303588 (* 1 = 0.303588 loss)\nI1212 07:25:15.134953 20613 sgd_solver.cpp:174] Iteration 1800, lr = 0.054\nI1212 07:25:15.148851 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.66513\nI1212 07:27:33.466712 20613 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1212 07:28:55.191025 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71124\nI1212 07:28:55.191275 20613 solver.cpp:404]     Test net output #1: loss = 1.0563 (* 1 = 1.0563 loss)\nI1212 07:28:56.504158 20613 solver.cpp:228] Iteration 1900, loss = 0.34891\nI1212 07:28:56.504218 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 07:28:56.504236 20613 solver.cpp:244]     Train net output #1: loss = 0.34891 (* 1 = 0.34891 loss)\nI1212 07:28:56.595777 20613 sgd_solver.cpp:174] Iteration 1900, lr = 0.057\nI1212 07:28:56.609622 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.69913\nI1212 07:31:14.946601 20613 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1212 07:32:36.686936 20613 solver.cpp:404]     Test net output #0: accuracy = 0.64608\nI1212 07:32:36.687222 20613 solver.cpp:404]     Test net output #1: loss = 1.71831 (* 1 = 1.71831 loss)\nI1212 07:32:38.001284 20613 solver.cpp:228] Iteration 2000, loss = 0.238812\nI1212 07:32:38.001327 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 07:32:38.001343 20613 solver.cpp:244]     Train net output #1: loss = 0.238812 (* 1 = 0.238812 loss)\nI1212 07:32:38.087899 20613 sgd_solver.cpp:174] Iteration 2000, lr = 0.0599999\nI1212 07:32:38.101794 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.4869\nI1212 07:34:56.449041 20613 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1212 07:36:18.198500 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65344\nI1212 07:36:18.198752 20613 solver.cpp:404]     Test net output #1: loss = 1.6446 (* 1 = 1.6446 loss)\nI1212 07:36:19.512253 20613 solver.cpp:228] Iteration 2100, loss = 0.30828\nI1212 07:36:19.512311 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 07:36:19.512329 20613 solver.cpp:244]     Train net output #1: loss = 0.30828 (* 1 = 0.30828 loss)\nI1212 07:36:19.604336 20613 sgd_solver.cpp:174] Iteration 2100, lr = 0.0630001\nI1212 07:36:19.618209 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.67188\nI1212 07:38:38.043069 20613 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1212 07:39:59.771843 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71812\nI1212 07:39:59.772100 20613 solver.cpp:404]     Test net output #1: loss = 1.14006 (* 1 = 1.14006 loss)\nI1212 07:40:01.085780 20613 solver.cpp:228] Iteration 2200, loss = 0.255405\nI1212 07:40:01.085824 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 07:40:01.085840 20613 solver.cpp:244]     Train net output #1: loss = 0.255404 (* 1 = 0.255404 loss)\nI1212 07:40:01.178222 20613 sgd_solver.cpp:174] Iteration 2200, lr = 0.066\nI1212 07:40:01.192116 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.59497\nI1212 07:42:19.422680 20613 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1212 07:43:41.147166 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65168\nI1212 07:43:41.147409 20613 solver.cpp:404]     Test net output #1: loss = 1.80406 (* 1 = 1.80406 loss)\nI1212 07:43:42.461231 20613 solver.cpp:228] Iteration 2300, loss = 0.272457\nI1212 07:43:42.461274 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 07:43:42.461292 20613 solver.cpp:244]     Train net output #1: loss = 0.272457 (* 1 = 0.272457 loss)\nI1212 07:43:42.549432 20613 sgd_solver.cpp:174] Iteration 2300, lr = 0.069\nI1212 07:43:42.563331 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.46058\nI1212 07:46:00.863935 20613 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1212 07:47:22.481007 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66312\nI1212 07:47:22.481257 20613 solver.cpp:404]     Test net output #1: loss = 1.56367 (* 1 = 1.56367 loss)\nI1212 07:47:23.795433 20613 solver.cpp:228] Iteration 2400, loss = 0.325639\nI1212 07:47:23.795493 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 07:47:23.795511 20613 solver.cpp:244]     Train net output #1: loss = 0.325639 (* 1 = 0.325639 loss)\nI1212 07:47:23.884961 20613 sgd_solver.cpp:174] Iteration 2400, lr = 0.072\nI1212 07:47:23.898934 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.35356\nI1212 07:49:42.283862 20613 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1212 07:51:03.554574 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7204\nI1212 07:51:03.554783 20613 solver.cpp:404]     Test net output #1: loss = 1.034 (* 1 = 1.034 loss)\nI1212 07:51:04.868834 20613 solver.cpp:228] Iteration 2500, loss = 0.217227\nI1212 07:51:04.868896 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 07:51:04.868914 20613 solver.cpp:244]     Train net output #1: loss = 0.217227 (* 1 = 0.217227 loss)\nI1212 07:51:04.958307 20613 sgd_solver.cpp:174] Iteration 2500, lr = 0.0749999\nI1212 07:51:04.972314 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.27557\nI1212 07:53:23.206490 20613 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1212 07:54:44.763376 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6102\nI1212 07:54:44.763635 20613 solver.cpp:404]     Test net output #1: loss = 1.96649 (* 1 = 1.96649 loss)\nI1212 07:54:46.077479 20613 solver.cpp:228] Iteration 2600, loss = 0.174721\nI1212 07:54:46.077527 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 07:54:46.077543 20613 solver.cpp:244]     Train net output #1: loss = 0.174721 (* 1 = 0.174721 loss)\nI1212 07:54:46.164743 20613 sgd_solver.cpp:174] Iteration 2600, lr = 0.0780001\nI1212 07:54:46.178673 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.27527\nI1212 07:57:04.516649 20613 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1212 07:58:26.054450 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68124\nI1212 07:58:26.054656 20613 solver.cpp:404]     Test net output #1: loss = 1.37183 (* 1 = 1.37183 loss)\nI1212 07:58:27.368888 20613 solver.cpp:228] Iteration 2700, loss = 0.228653\nI1212 07:58:27.368932 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 07:58:27.368947 20613 solver.cpp:244]     Train net output #1: loss = 0.228653 (* 1 = 0.228653 loss)\nI1212 07:58:27.453907 20613 sgd_solver.cpp:174] Iteration 2700, lr = 0.081\nI1212 07:58:27.467775 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.2742\nI1212 08:00:45.739828 20613 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1212 08:02:07.093780 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71092\nI1212 08:02:07.094068 20613 solver.cpp:404]     Test net output #1: loss = 1.444 (* 1 = 1.444 loss)\nI1212 08:02:08.407572 20613 solver.cpp:228] Iteration 2800, loss = 0.102511\nI1212 08:02:08.407616 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 08:02:08.407632 20613 solver.cpp:244]     Train net output #1: loss = 0.102511 (* 1 = 0.102511 loss)\nI1212 08:02:08.495507 20613 sgd_solver.cpp:174] Iteration 2800, lr = 0.084\nI1212 08:02:08.509353 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.17027\nI1212 08:04:26.745790 20613 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1212 08:05:48.463726 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66092\nI1212 08:05:48.463977 20613 solver.cpp:404]     Test net output #1: loss = 1.72975 (* 1 = 1.72975 loss)\nI1212 08:05:49.778024 20613 solver.cpp:228] Iteration 2900, loss = 0.0858457\nI1212 08:05:49.778085 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:05:49.778103 20613 solver.cpp:244]     Train net output #1: loss = 0.0858456 (* 1 = 0.0858456 loss)\nI1212 08:05:49.867836 20613 sgd_solver.cpp:174] Iteration 2900, lr = 0.087\nI1212 08:05:49.881681 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.13229\nI1212 08:08:08.162228 20613 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1212 08:09:29.881177 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7228\nI1212 08:09:29.881402 20613 solver.cpp:404]     Test net output #1: loss = 1.29251 (* 1 = 1.29251 loss)\nI1212 08:09:31.195235 20613 solver.cpp:228] Iteration 3000, loss = 0.141316\nI1212 08:09:31.195291 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 08:09:31.195308 20613 solver.cpp:244]     Train net output #1: loss = 0.141316 (* 1 = 0.141316 loss)\nI1212 08:09:31.287516 20613 sgd_solver.cpp:174] Iteration 3000, lr = 0.0899999\nI1212 08:09:31.301456 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.06313\nI1212 08:11:49.673615 20613 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1212 08:13:11.024544 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77332\nI1212 08:13:11.024801 20613 solver.cpp:404]     Test net output #1: loss = 0.941807 (* 1 = 0.941807 loss)\nI1212 08:13:12.339038 20613 solver.cpp:228] Iteration 3100, loss = 0.144745\nI1212 08:13:12.339082 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 08:13:12.339098 20613 solver.cpp:244]     Train net output #1: loss = 0.144745 (* 1 = 0.144745 loss)\nI1212 08:13:12.427935 20613 sgd_solver.cpp:174] Iteration 3100, lr = 0.0930001\nI1212 08:13:12.441668 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.09123\nI1212 08:15:30.740108 20613 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1212 08:16:52.473749 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71316\nI1212 08:16:52.474012 20613 solver.cpp:404]     Test net output #1: loss = 1.34572 (* 1 = 1.34572 loss)\nI1212 08:16:53.787910 20613 solver.cpp:228] Iteration 3200, loss = 0.171412\nI1212 08:16:53.787952 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 08:16:53.787968 20613 solver.cpp:244]     Train net output #1: loss = 0.171411 (* 1 = 0.171411 loss)\nI1212 08:16:53.874953 20613 sgd_solver.cpp:174] Iteration 3200, lr = 0.096\nI1212 08:16:53.888841 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.11653\nI1212 08:19:12.220890 20613 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1212 08:20:33.598326 20613 solver.cpp:404]     Test net output #0: accuracy = 0.5688\nI1212 08:20:33.598575 20613 solver.cpp:404]     Test net output #1: loss = 3.03584 (* 1 = 3.03584 loss)\nI1212 08:20:34.910946 20613 solver.cpp:228] Iteration 3300, loss = 0.151101\nI1212 08:20:34.911005 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 08:20:34.911022 20613 solver.cpp:244]     Train net output #1: loss = 0.151101 (* 1 = 0.151101 loss)\nI1212 08:20:34.999105 20613 sgd_solver.cpp:174] Iteration 3300, lr = 0.099\nI1212 08:20:35.012987 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.10547\nI1212 08:22:53.236788 20613 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1212 08:24:14.581068 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76332\nI1212 08:24:14.581312 20613 solver.cpp:404]     Test net output #1: loss = 1.07979 (* 1 = 1.07979 loss)\nI1212 08:24:15.894043 20613 solver.cpp:228] Iteration 3400, loss = 0.110899\nI1212 08:24:15.894101 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 08:24:15.894126 20613 solver.cpp:244]     Train net output #1: loss = 0.110899 (* 1 = 0.110899 loss)\nI1212 08:24:15.988515 20613 sgd_solver.cpp:174] Iteration 3400, lr = 0.102\nI1212 08:24:16.001956 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.990188\nI1212 08:26:34.138690 20613 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1212 08:27:55.446471 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69776\nI1212 08:27:55.446736 20613 solver.cpp:404]     Test net output #1: loss = 1.60812 (* 1 = 1.60812 loss)\nI1212 08:27:56.759546 20613 solver.cpp:228] Iteration 3500, loss = 0.125105\nI1212 08:27:56.759603 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 08:27:56.759629 20613 solver.cpp:244]     Train net output #1: loss = 0.125105 (* 1 = 0.125105 loss)\nI1212 08:27:56.847147 20613 sgd_solver.cpp:174] Iteration 3500, lr = 0.105\nI1212 08:27:56.861291 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.00601\nI1212 08:30:15.026042 20613 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1212 08:31:36.781600 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7178\nI1212 08:31:36.781841 20613 solver.cpp:404]     Test net output #1: loss = 1.41225 (* 1 = 1.41225 loss)\nI1212 08:31:38.095955 20613 solver.cpp:228] Iteration 3600, loss = 0.132169\nI1212 08:31:38.095999 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 08:31:38.096021 20613 solver.cpp:244]     Train net output #1: loss = 0.132169 (* 1 = 0.132169 loss)\nI1212 08:31:38.186802 20613 sgd_solver.cpp:174] Iteration 3600, lr = 0.108\nI1212 08:31:38.200767 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.08792\nI1212 08:33:56.492040 20613 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1212 08:35:17.897465 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71176\nI1212 08:35:17.897732 20613 solver.cpp:404]     Test net output #1: loss = 1.36774 (* 1 = 1.36774 loss)\nI1212 08:35:19.211295 20613 solver.cpp:228] Iteration 3700, loss = 0.118605\nI1212 08:35:19.211347 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:35:19.211371 20613 solver.cpp:244]     Train net output #1: loss = 0.118605 (* 1 = 0.118605 loss)\nI1212 08:35:19.300770 20613 sgd_solver.cpp:174] Iteration 3700, lr = 0.111\nI1212 08:35:19.314757 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.869602\nI1212 08:37:37.460677 20613 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1212 08:38:58.747176 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72476\nI1212 08:38:58.747395 20613 solver.cpp:404]     Test net output #1: loss = 1.30046 (* 1 = 1.30046 loss)\nI1212 08:39:00.061424 20613 solver.cpp:228] Iteration 3800, loss = 0.171073\nI1212 08:39:00.061470 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 08:39:00.061492 20613 solver.cpp:244]     Train net output #1: loss = 0.171073 (* 1 = 0.171073 loss)\nI1212 08:39:00.154136 20613 sgd_solver.cpp:174] Iteration 3800, lr = 0.114\nI1212 08:39:00.168001 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.07006\nI1212 08:41:18.441407 20613 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1212 08:42:39.892145 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69996\nI1212 08:42:39.892433 20613 solver.cpp:404]     Test net output #1: loss = 1.48882 (* 1 = 1.48882 loss)\nI1212 08:42:41.205008 20613 solver.cpp:228] Iteration 3900, loss = 0.0922242\nI1212 08:42:41.205062 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 08:42:41.205087 20613 solver.cpp:244]     Train net output #1: loss = 0.0922242 (* 1 = 0.0922242 loss)\nI1212 08:42:41.293891 20613 sgd_solver.cpp:174] Iteration 3900, lr = 0.117\nI1212 08:42:41.307792 20613 sgd_solver.cpp:149] Gradient: L2 norm 1.01674\nI1212 08:44:59.595978 20613 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1212 08:46:21.372355 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7424\nI1212 08:46:21.372622 20613 solver.cpp:404]     Test net output #1: loss = 1.29197 (* 1 = 1.29197 loss)\nI1212 08:46:22.685597 20613 solver.cpp:228] Iteration 4000, loss = 0.136131\nI1212 08:46:22.685654 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 08:46:22.685679 20613 solver.cpp:244]     Train net output #1: loss = 0.136131 (* 1 = 0.136131 loss)\nI1212 08:46:22.775817 20613 sgd_solver.cpp:174] Iteration 4000, lr = 0.12\nI1212 08:46:22.789669 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.900418\nI1212 08:48:41.039223 20613 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1212 08:50:02.803177 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76712\nI1212 08:50:02.803417 20613 solver.cpp:404]     Test net output #1: loss = 1.07009 (* 1 = 1.07009 loss)\nI1212 08:50:04.116880 20613 solver.cpp:228] Iteration 4100, loss = 0.0920577\nI1212 08:50:04.116940 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:50:04.116964 20613 solver.cpp:244]     Train net output #1: loss = 0.0920577 (* 1 = 0.0920577 loss)\nI1212 08:50:04.207619 20613 sgd_solver.cpp:174] Iteration 4100, lr = 0.123\nI1212 08:50:04.221554 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.729343\nI1212 08:52:22.422484 20613 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1212 08:53:44.124316 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73332\nI1212 08:53:44.124533 20613 solver.cpp:404]     Test net output #1: loss = 1.45893 (* 1 = 1.45893 loss)\nI1212 08:53:45.437501 20613 solver.cpp:228] Iteration 4200, loss = 0.0646437\nI1212 08:53:45.437551 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:53:45.437574 20613 solver.cpp:244]     Train net output #1: loss = 0.0646436 (* 1 = 0.0646436 loss)\nI1212 08:53:45.525632 20613 sgd_solver.cpp:174] Iteration 4200, lr = 0.126\nI1212 08:53:45.539639 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.937816\nI1212 08:56:03.792176 20613 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1212 08:57:25.412057 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76432\nI1212 08:57:25.412298 20613 solver.cpp:404]     Test net output #1: loss = 1.0993 (* 1 = 1.0993 loss)\nI1212 08:57:26.724968 20613 solver.cpp:228] Iteration 4300, loss = 0.0443304\nI1212 08:57:26.725028 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 08:57:26.725054 20613 solver.cpp:244]     Train net output #1: loss = 0.0443303 (* 1 = 0.0443303 loss)\nI1212 08:57:26.812407 20613 sgd_solver.cpp:174] Iteration 4300, lr = 0.129\nI1212 08:57:26.826244 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.814063\nI1212 08:59:44.933336 20613 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1212 09:01:06.289645 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79396\nI1212 09:01:06.289888 20613 solver.cpp:404]     Test net output #1: loss = 0.963629 (* 1 = 0.963629 loss)\nI1212 09:01:07.602543 20613 solver.cpp:228] Iteration 4400, loss = 0.112725\nI1212 09:01:07.602589 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 09:01:07.602613 20613 solver.cpp:244]     Train net output #1: loss = 0.112725 (* 1 = 0.112725 loss)\nI1212 09:01:07.691802 20613 sgd_solver.cpp:174] Iteration 4400, lr = 0.132\nI1212 09:01:07.705804 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.900217\nI1212 09:03:25.894662 20613 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1212 09:04:47.592075 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70792\nI1212 09:04:47.592339 20613 solver.cpp:404]     Test net output #1: loss = 1.66985 (* 1 = 1.66985 loss)\nI1212 09:04:48.906085 20613 solver.cpp:228] Iteration 4500, loss = 0.0867565\nI1212 09:04:48.906139 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 09:04:48.906165 20613 solver.cpp:244]     Train net output #1: loss = 0.0867564 (* 1 = 0.0867564 loss)\nI1212 09:04:48.993551 20613 sgd_solver.cpp:174] Iteration 4500, lr = 0.135\nI1212 09:04:49.007601 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.813427\nI1212 09:07:07.115149 20613 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1212 09:08:28.839614 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73316\nI1212 09:08:28.839879 20613 solver.cpp:404]     Test net output #1: loss = 1.42372 (* 1 = 1.42372 loss)\nI1212 09:08:30.153534 20613 solver.cpp:228] Iteration 4600, loss = 0.0837318\nI1212 09:08:30.153589 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 09:08:30.153612 20613 solver.cpp:244]     Train net output #1: loss = 0.0837317 (* 1 = 0.0837317 loss)\nI1212 09:08:30.241655 20613 sgd_solver.cpp:174] Iteration 4600, lr = 0.138\nI1212 09:08:30.255612 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.836678\nI1212 09:10:48.488667 20613 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1212 09:12:10.176177 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77316\nI1212 09:12:10.176457 20613 solver.cpp:404]     Test net output #1: loss = 1.10281 (* 1 = 1.10281 loss)\nI1212 09:12:11.489049 20613 solver.cpp:228] Iteration 4700, loss = 0.0589057\nI1212 09:12:11.489095 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 09:12:11.489120 20613 solver.cpp:244]     Train net output #1: loss = 0.0589057 (* 1 = 0.0589057 loss)\nI1212 09:12:11.582535 20613 sgd_solver.cpp:174] Iteration 4700, lr = 0.141\nI1212 09:12:11.596395 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.796495\nI1212 09:14:29.848539 20613 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1212 09:15:51.558639 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73784\nI1212 09:15:51.558883 20613 solver.cpp:404]     Test net output #1: loss = 1.38626 (* 1 = 1.38626 loss)\nI1212 09:15:52.872107 20613 solver.cpp:228] Iteration 4800, loss = 0.0654544\nI1212 09:15:52.872160 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 09:15:52.872185 20613 solver.cpp:244]     Train net output #1: loss = 0.0654544 (* 1 = 0.0654544 loss)\nI1212 09:15:52.957597 20613 sgd_solver.cpp:174] Iteration 4800, lr = 0.144\nI1212 09:15:52.971529 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.86606\nI1212 09:18:11.153018 20613 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1212 09:19:32.883249 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77624\nI1212 09:19:32.883492 20613 solver.cpp:404]     Test net output #1: loss = 1.11714 (* 1 = 1.11714 loss)\nI1212 09:19:34.196923 20613 solver.cpp:228] Iteration 4900, loss = 0.0663906\nI1212 09:19:34.196967 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 09:19:34.196991 20613 solver.cpp:244]     Train net output #1: loss = 0.0663906 (* 1 = 0.0663906 loss)\nI1212 09:19:34.288594 20613 sgd_solver.cpp:174] Iteration 4900, lr = 0.147\nI1212 09:19:34.302429 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.659294\nI1212 09:21:52.552942 20613 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1212 09:23:13.867064 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77708\nI1212 09:23:13.867368 20613 solver.cpp:404]     Test net output #1: loss = 1.14502 (* 1 = 1.14502 loss)\nI1212 09:23:15.180943 20613 solver.cpp:228] Iteration 5000, loss = 0.0360676\nI1212 09:23:15.181000 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 09:23:15.181027 20613 solver.cpp:244]     Train net output #1: loss = 0.0360675 (* 1 = 0.0360675 loss)\nI1212 09:23:15.268615 20613 sgd_solver.cpp:174] Iteration 5000, lr = 0.15\nI1212 09:23:15.282433 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.68127\nI1212 09:25:33.582739 20613 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1212 09:26:54.911564 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76036\nI1212 09:26:54.911818 20613 solver.cpp:404]     Test net output #1: loss = 1.24069 (* 1 = 1.24069 loss)\nI1212 09:26:56.226699 20613 solver.cpp:228] Iteration 5100, loss = 0.115909\nI1212 09:26:56.226750 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 09:26:56.226774 20613 solver.cpp:244]     Train net output #1: loss = 0.115909 (* 1 = 0.115909 loss)\nI1212 09:26:56.316874 20613 sgd_solver.cpp:174] Iteration 5100, lr = 0.153\nI1212 09:26:56.330667 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.744167\nI1212 09:29:14.543216 20613 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1212 09:30:35.963217 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77944\nI1212 09:30:35.963476 20613 solver.cpp:404]     Test net output #1: loss = 1.23683 (* 1 = 1.23683 loss)\nI1212 09:30:37.276231 20613 solver.cpp:228] Iteration 5200, loss = 0.0602076\nI1212 09:30:37.276275 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 09:30:37.276298 20613 solver.cpp:244]     Train net output #1: loss = 0.0602075 (* 1 = 0.0602075 loss)\nI1212 09:30:37.361100 20613 sgd_solver.cpp:174] Iteration 5200, lr = 0.156\nI1212 09:30:37.374863 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.785102\nI1212 09:32:55.604339 20613 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1212 09:34:17.006721 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7488\nI1212 09:34:17.006945 20613 solver.cpp:404]     Test net output #1: loss = 1.37928 (* 1 = 1.37928 loss)\nI1212 09:34:18.319995 20613 solver.cpp:228] Iteration 5300, loss = 0.0567701\nI1212 09:34:18.320055 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 09:34:18.320080 20613 solver.cpp:244]     Train net output #1: loss = 0.05677 (* 1 = 0.05677 loss)\nI1212 09:34:18.407711 20613 sgd_solver.cpp:174] Iteration 5300, lr = 0.159\nI1212 09:34:18.421638 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.894537\nI1212 09:36:36.668359 20613 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1212 09:37:58.040235 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7572\nI1212 09:37:58.040496 20613 solver.cpp:404]     Test net output #1: loss = 1.21481 (* 1 = 1.21481 loss)\nI1212 09:37:59.353658 20613 solver.cpp:228] Iteration 5400, loss = 0.0581382\nI1212 09:37:59.353713 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 09:37:59.353737 20613 solver.cpp:244]     Train net output #1: loss = 0.0581381 (* 1 = 0.0581381 loss)\nI1212 09:37:59.446707 20613 sgd_solver.cpp:174] Iteration 5400, lr = 0.162\nI1212 09:37:59.460626 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.836064\nI1212 09:40:17.661463 20613 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1212 09:41:39.024214 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80276\nI1212 09:41:39.024435 20613 solver.cpp:404]     Test net output #1: loss = 0.888026 (* 1 = 0.888026 loss)\nI1212 09:41:40.338665 20613 solver.cpp:228] Iteration 5500, loss = 0.0175064\nI1212 09:41:40.338712 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 09:41:40.338735 20613 solver.cpp:244]     Train net output #1: loss = 0.0175063 (* 1 = 0.0175063 loss)\nI1212 09:41:40.424871 20613 sgd_solver.cpp:174] Iteration 5500, lr = 0.165\nI1212 09:41:40.438766 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.687444\nI1212 09:43:58.613191 20613 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1212 09:45:20.032794 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76816\nI1212 09:45:20.033012 20613 solver.cpp:404]     Test net output #1: loss = 1.06827 (* 1 = 1.06827 loss)\nI1212 09:45:21.345901 20613 solver.cpp:228] Iteration 5600, loss = 0.0733377\nI1212 09:45:21.345954 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 09:45:21.345978 20613 solver.cpp:244]     Train net output #1: loss = 0.0733376 (* 1 = 0.0733376 loss)\nI1212 09:45:21.436278 20613 sgd_solver.cpp:174] Iteration 5600, lr = 0.168\nI1212 09:45:21.450219 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.731818\nI1212 09:47:39.663785 20613 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1212 09:49:01.189786 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77068\nI1212 09:49:01.190021 20613 solver.cpp:404]     Test net output #1: loss = 1.15425 (* 1 = 1.15425 loss)\nI1212 09:49:02.503471 20613 solver.cpp:228] Iteration 5700, loss = 0.0315854\nI1212 09:49:02.503520 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 09:49:02.503545 20613 solver.cpp:244]     Train net output #1: loss = 0.0315853 (* 1 = 0.0315853 loss)\nI1212 09:49:02.590283 20613 sgd_solver.cpp:174] Iteration 5700, lr = 0.171\nI1212 09:49:02.604203 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.684991\nI1212 09:51:20.741255 20613 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1212 09:52:42.375272 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75304\nI1212 09:52:42.375550 20613 solver.cpp:404]     Test net output #1: loss = 1.31392 (* 1 = 1.31392 loss)\nI1212 09:52:43.689153 20613 solver.cpp:228] Iteration 5800, loss = 0.108675\nI1212 09:52:43.689213 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 09:52:43.689236 20613 solver.cpp:244]     Train net output #1: loss = 0.108675 (* 1 = 0.108675 loss)\nI1212 09:52:43.778944 20613 sgd_solver.cpp:174] Iteration 5800, lr = 0.174\nI1212 09:52:43.792860 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.773913\nI1212 09:55:01.971812 20613 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1212 09:56:23.667189 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73224\nI1212 09:56:23.667423 20613 solver.cpp:404]     Test net output #1: loss = 1.47475 (* 1 = 1.47475 loss)\nI1212 09:56:24.979724 20613 solver.cpp:228] Iteration 5900, loss = 0.0703566\nI1212 09:56:24.979786 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 09:56:24.979811 20613 solver.cpp:244]     Train net output #1: loss = 0.0703565 (* 1 = 0.0703565 loss)\nI1212 09:56:25.069419 20613 sgd_solver.cpp:174] Iteration 5900, lr = 0.177\nI1212 09:56:25.083524 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.633493\nI1212 09:58:43.250206 20613 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1212 10:00:04.960183 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79352\nI1212 10:00:04.960417 20613 solver.cpp:404]     Test net output #1: loss = 0.94699 (* 1 = 0.94699 loss)\nI1212 10:00:06.272840 20613 solver.cpp:228] Iteration 6000, loss = 0.0459927\nI1212 10:00:06.272900 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 10:00:06.272925 20613 solver.cpp:244]     Train net output #1: loss = 0.0459925 (* 1 = 0.0459925 loss)\nI1212 10:00:06.362056 20613 sgd_solver.cpp:174] Iteration 6000, lr = 0.18\nI1212 10:00:06.375566 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.701574\nI1212 10:02:24.591433 20613 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1212 10:03:46.126703 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77532\nI1212 10:03:46.126971 20613 solver.cpp:404]     Test net output #1: loss = 1.02529 (* 1 = 1.02529 loss)\nI1212 10:03:47.440186 20613 solver.cpp:228] Iteration 6100, loss = 0.0436661\nI1212 10:03:47.440243 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 10:03:47.440269 20613 solver.cpp:244]     Train net output #1: loss = 0.043666 (* 1 = 0.043666 loss)\nI1212 10:03:47.526144 20613 sgd_solver.cpp:174] Iteration 6100, lr = 0.183\nI1212 10:03:47.540002 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.565425\nI1212 10:06:05.783767 20613 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1212 10:07:27.346989 20613 solver.cpp:404]     Test net output #0: accuracy = 0.788\nI1212 10:07:27.347223 20613 solver.cpp:404]     Test net output #1: loss = 1.06889 (* 1 = 1.06889 loss)\nI1212 10:07:28.660992 20613 solver.cpp:228] Iteration 6200, loss = 0.0537052\nI1212 10:07:28.661039 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 10:07:28.661063 20613 solver.cpp:244]     Train net output #1: loss = 0.0537051 (* 1 = 0.0537051 loss)\nI1212 10:07:28.748606 20613 sgd_solver.cpp:174] Iteration 6200, lr = 0.186\nI1212 10:07:28.762446 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.590926\nI1212 10:09:47.073330 20613 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1212 10:11:08.713757 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7974\nI1212 10:11:08.714018 20613 solver.cpp:404]     Test net output #1: loss = 0.998095 (* 1 = 0.998095 loss)\nI1212 10:11:10.027173 20613 solver.cpp:228] Iteration 6300, loss = 0.0710801\nI1212 10:11:10.027235 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 10:11:10.027259 20613 solver.cpp:244]     Train net output #1: loss = 0.07108 (* 1 = 0.07108 loss)\nI1212 10:11:10.118695 20613 sgd_solver.cpp:174] Iteration 6300, lr = 0.189\nI1212 10:11:10.132513 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.616095\nI1212 10:13:28.427716 20613 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1212 10:14:50.171735 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7936\nI1212 10:14:50.171975 20613 solver.cpp:404]     Test net output #1: loss = 1.08059 (* 1 = 1.08059 loss)\nI1212 10:14:51.484355 20613 solver.cpp:228] Iteration 6400, loss = 0.0630696\nI1212 10:14:51.484400 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 10:14:51.484422 20613 solver.cpp:244]     Train net output #1: loss = 0.0630695 (* 1 = 0.0630695 loss)\nI1212 10:14:51.575558 20613 sgd_solver.cpp:174] Iteration 6400, lr = 0.192\nI1212 10:14:51.589469 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.617619\nI1212 10:17:09.873024 20613 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1212 10:18:31.278698 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76816\nI1212 10:18:31.278964 20613 solver.cpp:404]     Test net output #1: loss = 1.13754 (* 1 = 1.13754 loss)\nI1212 10:18:32.591620 20613 solver.cpp:228] Iteration 6500, loss = 0.0330694\nI1212 10:18:32.591670 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 10:18:32.591691 20613 solver.cpp:244]     Train net output #1: loss = 0.0330692 (* 1 = 0.0330692 loss)\nI1212 10:18:32.681082 20613 sgd_solver.cpp:174] Iteration 6500, lr = 0.195\nI1212 10:18:32.695022 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.608305\nI1212 10:20:51.000147 20613 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1212 10:22:12.645812 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82288\nI1212 10:22:12.646082 20613 solver.cpp:404]     Test net output #1: loss = 0.843268 (* 1 = 0.843268 loss)\nI1212 10:22:13.958299 20613 solver.cpp:228] Iteration 6600, loss = 0.107251\nI1212 10:22:13.958353 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 10:22:13.958371 20613 solver.cpp:244]     Train net output #1: loss = 0.107251 (* 1 = 0.107251 loss)\nI1212 10:22:14.045526 20613 sgd_solver.cpp:174] Iteration 6600, lr = 0.198\nI1212 10:22:14.059336 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.757723\nI1212 10:24:32.361918 20613 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1212 10:25:53.900049 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79008\nI1212 10:25:53.900312 20613 solver.cpp:404]     Test net output #1: loss = 1.02023 (* 1 = 1.02023 loss)\nI1212 10:25:55.213110 20613 solver.cpp:228] Iteration 6700, loss = 0.0870944\nI1212 10:25:55.213153 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 10:25:55.213171 20613 solver.cpp:244]     Train net output #1: loss = 0.0870943 (* 1 = 0.0870943 loss)\nI1212 10:25:55.298960 20613 sgd_solver.cpp:174] Iteration 6700, lr = 0.201\nI1212 10:25:55.312794 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.690017\nI1212 10:28:13.587810 20613 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1212 10:29:35.191161 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79744\nI1212 10:29:35.191396 20613 solver.cpp:404]     Test net output #1: loss = 1.04173 (* 1 = 1.04173 loss)\nI1212 10:29:36.504271 20613 solver.cpp:228] Iteration 6800, loss = 0.0387573\nI1212 10:29:36.504314 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 10:29:36.504330 20613 solver.cpp:244]     Train net output #1: loss = 0.0387572 (* 1 = 0.0387572 loss)\nI1212 10:29:36.590556 20613 sgd_solver.cpp:174] Iteration 6800, lr = 0.204\nI1212 10:29:36.604454 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.565462\nI1212 10:31:54.901441 20613 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1212 10:33:16.636584 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7704\nI1212 10:33:16.636837 20613 solver.cpp:404]     Test net output #1: loss = 1.18453 (* 1 = 1.18453 loss)\nI1212 10:33:17.949930 20613 solver.cpp:228] Iteration 6900, loss = 0.0891181\nI1212 10:33:17.949976 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 10:33:17.949992 20613 solver.cpp:244]     Train net output #1: loss = 0.089118 (* 1 = 0.089118 loss)\nI1212 10:33:18.041605 20613 sgd_solver.cpp:174] Iteration 6900, lr = 0.207\nI1212 10:33:18.055446 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.596717\nI1212 10:35:36.354112 20613 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1212 10:36:57.996423 20613 solver.cpp:404]     Test net output #0: accuracy = 0.807\nI1212 10:36:57.996672 20613 solver.cpp:404]     Test net output #1: loss = 0.895832 (* 1 = 0.895832 loss)\nI1212 10:36:59.310457 20613 solver.cpp:228] Iteration 7000, loss = 0.0293432\nI1212 10:36:59.310503 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 10:36:59.310519 20613 solver.cpp:244]     Train net output #1: loss = 0.029343 (* 1 = 0.029343 loss)\nI1212 10:36:59.398676 20613 sgd_solver.cpp:174] Iteration 7000, lr = 0.21\nI1212 10:36:59.412519 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.641727\nI1212 10:39:16.791026 20613 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1212 10:40:38.148579 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82816\nI1212 10:40:38.148800 20613 solver.cpp:404]     Test net output #1: loss = 0.75619 (* 1 = 0.75619 loss)\nI1212 10:40:39.462731 20613 solver.cpp:228] Iteration 7100, loss = 0.0784665\nI1212 10:40:39.462780 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 10:40:39.462797 20613 solver.cpp:244]     Train net output #1: loss = 0.0784664 (* 1 = 0.0784664 loss)\nI1212 10:40:39.555970 20613 sgd_solver.cpp:174] Iteration 7100, lr = 0.213\nI1212 10:40:39.569952 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.521628\nI1212 10:42:57.845221 20613 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1212 10:44:19.180622 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79808\nI1212 10:44:19.180853 20613 solver.cpp:404]     Test net output #1: loss = 1.04798 (* 1 = 1.04798 loss)\nI1212 10:44:20.494544 20613 solver.cpp:228] Iteration 7200, loss = 0.128322\nI1212 10:44:20.494604 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 10:44:20.494622 20613 solver.cpp:244]     Train net output #1: loss = 0.128322 (* 1 = 0.128322 loss)\nI1212 10:44:20.581015 20613 sgd_solver.cpp:174] Iteration 7200, lr = 0.216\nI1212 10:44:20.594812 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.80846\nI1212 10:46:37.951757 20613 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1212 10:47:59.277560 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79956\nI1212 10:47:59.277815 20613 solver.cpp:404]     Test net output #1: loss = 0.897744 (* 1 = 0.897744 loss)\nI1212 10:48:00.591850 20613 solver.cpp:228] Iteration 7300, loss = 0.0761182\nI1212 10:48:00.591897 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 10:48:00.591913 20613 solver.cpp:244]     Train net output #1: loss = 0.0761181 (* 1 = 0.0761181 loss)\nI1212 10:48:00.680075 20613 sgd_solver.cpp:174] Iteration 7300, lr = 0.219\nI1212 10:48:00.693977 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.703518\nI1212 10:50:19.149237 20613 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1212 10:51:40.520380 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8174\nI1212 10:51:40.520589 20613 solver.cpp:404]     Test net output #1: loss = 0.823042 (* 1 = 0.823042 loss)\nI1212 10:51:41.835309 20613 solver.cpp:228] Iteration 7400, loss = 0.0570529\nI1212 10:51:41.835364 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 10:51:41.835389 20613 solver.cpp:244]     Train net output #1: loss = 0.0570528 (* 1 = 0.0570528 loss)\nI1212 10:51:41.924815 20613 sgd_solver.cpp:174] Iteration 7400, lr = 0.222\nI1212 10:51:41.938786 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.608288\nI1212 10:54:00.408272 20613 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1212 10:55:21.823971 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8354\nI1212 10:55:21.824224 20613 solver.cpp:404]     Test net output #1: loss = 0.683481 (* 1 = 0.683481 loss)\nI1212 10:55:23.138748 20613 solver.cpp:228] Iteration 7500, loss = 0.0160493\nI1212 10:55:23.138797 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 10:55:23.138818 20613 solver.cpp:244]     Train net output #1: loss = 0.0160492 (* 1 = 0.0160492 loss)\nI1212 10:55:23.227807 20613 sgd_solver.cpp:174] Iteration 7500, lr = 0.225\nI1212 10:55:23.241698 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.676872\nI1212 10:57:41.775974 20613 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1212 10:59:03.244148 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80388\nI1212 10:59:03.244398 20613 solver.cpp:404]     Test net output #1: loss = 1.083 (* 1 = 1.083 loss)\nI1212 10:59:04.558878 20613 solver.cpp:228] Iteration 7600, loss = 0.0224736\nI1212 10:59:04.558933 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 10:59:04.558950 20613 solver.cpp:244]     Train net output #1: loss = 0.0224735 (* 1 = 0.0224735 loss)\nI1212 10:59:04.646431 20613 sgd_solver.cpp:174] Iteration 7600, lr = 0.228\nI1212 10:59:04.660358 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.593213\nI1212 11:01:23.118309 20613 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1212 11:02:44.656764 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75956\nI1212 11:02:44.657064 20613 solver.cpp:404]     Test net output #1: loss = 1.24578 (* 1 = 1.24578 loss)\nI1212 11:02:45.970952 20613 solver.cpp:228] Iteration 7700, loss = 0.0813018\nI1212 11:02:45.971002 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 11:02:45.971020 20613 solver.cpp:244]     Train net output #1: loss = 0.0813017 (* 1 = 0.0813017 loss)\nI1212 11:02:46.059470 20613 sgd_solver.cpp:174] Iteration 7700, lr = 0.231\nI1212 11:02:46.073396 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.644384\nI1212 11:05:03.780792 20613 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1212 11:06:25.460410 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79712\nI1212 11:06:25.460664 20613 solver.cpp:404]     Test net output #1: loss = 1.00163 (* 1 = 1.00163 loss)\nI1212 11:06:26.774904 20613 solver.cpp:228] Iteration 7800, loss = 0.105627\nI1212 11:06:26.774947 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 11:06:26.774963 20613 solver.cpp:244]     Train net output #1: loss = 0.105627 (* 1 = 0.105627 loss)\nI1212 11:06:26.861316 20613 sgd_solver.cpp:174] Iteration 7800, lr = 0.234\nI1212 11:06:26.875174 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.607624\nI1212 11:08:45.421906 20613 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1212 11:10:06.964658 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74588\nI1212 11:10:06.964884 20613 solver.cpp:404]     Test net output #1: loss = 1.41849 (* 1 = 1.41849 loss)\nI1212 11:10:08.279021 20613 solver.cpp:228] Iteration 7900, loss = 0.101725\nI1212 11:10:08.279070 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 11:10:08.279088 20613 solver.cpp:244]     Train net output #1: loss = 0.101725 (* 1 = 0.101725 loss)\nI1212 11:10:08.369419 20613 sgd_solver.cpp:174] Iteration 7900, lr = 0.237\nI1212 11:10:08.383189 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.60075\nI1212 11:12:26.938760 20613 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1212 11:13:48.515990 20613 solver.cpp:404]     Test net output #0: accuracy = 0.796\nI1212 11:13:48.516217 20613 solver.cpp:404]     Test net output #1: loss = 1.03733 (* 1 = 1.03733 loss)\nI1212 11:13:49.830127 20613 solver.cpp:228] Iteration 8000, loss = 0.0833473\nI1212 11:13:49.830184 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 11:13:49.830201 20613 solver.cpp:244]     Train net output #1: loss = 0.0833472 (* 1 = 0.0833472 loss)\nI1212 11:13:49.923990 20613 sgd_solver.cpp:174] Iteration 8000, lr = 0.24\nI1212 11:13:49.937959 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.521547\nI1212 11:16:07.688977 20613 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1212 11:17:29.280328 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77068\nI1212 11:17:29.280581 20613 solver.cpp:404]     Test net output #1: loss = 1.09603 (* 1 = 1.09603 loss)\nI1212 11:17:30.595110 20613 solver.cpp:228] Iteration 8100, loss = 0.0829946\nI1212 11:17:30.595165 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 11:17:30.595183 20613 solver.cpp:244]     Train net output #1: loss = 0.0829945 (* 1 = 0.0829945 loss)\nI1212 11:17:30.687736 20613 sgd_solver.cpp:174] Iteration 8100, lr = 0.243\nI1212 11:17:30.701689 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.728054\nI1212 11:19:49.076280 20613 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1212 11:21:10.682606 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8168\nI1212 11:21:10.682838 20613 solver.cpp:404]     Test net output #1: loss = 0.792507 (* 1 = 0.792507 loss)\nI1212 11:21:11.997361 20613 solver.cpp:228] Iteration 8200, loss = 0.0424467\nI1212 11:21:11.997417 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 11:21:11.997434 20613 solver.cpp:244]     Train net output #1: loss = 0.0424466 (* 1 = 0.0424466 loss)\nI1212 11:21:12.091640 20613 sgd_solver.cpp:174] Iteration 8200, lr = 0.246\nI1212 11:21:12.105540 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.63278\nI1212 11:23:30.539697 20613 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1212 11:24:51.961951 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75564\nI1212 11:24:51.962182 20613 solver.cpp:404]     Test net output #1: loss = 1.16025 (* 1 = 1.16025 loss)\nI1212 11:24:53.276582 20613 solver.cpp:228] Iteration 8300, loss = 0.0671413\nI1212 11:24:53.276638 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 11:24:53.276655 20613 solver.cpp:244]     Train net output #1: loss = 0.0671412 (* 1 = 0.0671412 loss)\nI1212 11:24:53.369911 20613 sgd_solver.cpp:174] Iteration 8300, lr = 0.249\nI1212 11:24:53.383839 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.622688\nI1212 11:27:11.849547 20613 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1212 11:28:33.535578 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77048\nI1212 11:28:33.535826 20613 solver.cpp:404]     Test net output #1: loss = 1.16011 (* 1 = 1.16011 loss)\nI1212 11:28:34.850365 20613 solver.cpp:228] Iteration 8400, loss = 0.121847\nI1212 11:28:34.850409 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 11:28:34.850426 20613 solver.cpp:244]     Train net output #1: loss = 0.121847 (* 1 = 0.121847 loss)\nI1212 11:28:34.938199 20613 sgd_solver.cpp:174] Iteration 8400, lr = 0.252\nI1212 11:28:34.952179 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.671743\nI1212 11:30:52.528815 20613 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1212 11:32:14.226989 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81768\nI1212 11:32:14.227259 20613 solver.cpp:404]     Test net output #1: loss = 0.771623 (* 1 = 0.771623 loss)\nI1212 11:32:15.540088 20613 solver.cpp:228] Iteration 8500, loss = 0.0791735\nI1212 11:32:15.540140 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 11:32:15.540158 20613 solver.cpp:244]     Train net output #1: loss = 0.0791733 (* 1 = 0.0791733 loss)\nI1212 11:32:15.631038 20613 sgd_solver.cpp:174] Iteration 8500, lr = 0.255\nI1212 11:32:15.644968 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.690662\nI1212 11:34:33.053169 20613 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1212 11:35:54.725014 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77796\nI1212 11:35:54.725241 20613 solver.cpp:404]     Test net output #1: loss = 1.09651 (* 1 = 1.09651 loss)\nI1212 11:35:56.038002 20613 solver.cpp:228] Iteration 8600, loss = 0.089599\nI1212 11:35:56.038059 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 11:35:56.038076 20613 solver.cpp:244]     Train net output #1: loss = 0.0895989 (* 1 = 0.0895989 loss)\nI1212 11:35:56.127504 20613 sgd_solver.cpp:174] Iteration 8600, lr = 0.258\nI1212 11:35:56.140954 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.597206\nI1212 11:38:13.596266 20613 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1212 11:39:35.308435 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77652\nI1212 11:39:35.308665 20613 solver.cpp:404]     Test net output #1: loss = 0.979407 (* 1 = 0.979407 loss)\nI1212 11:39:36.620565 20613 solver.cpp:228] Iteration 8700, loss = 0.0690541\nI1212 11:39:36.620620 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 11:39:36.620638 20613 solver.cpp:244]     Train net output #1: loss = 0.069054 (* 1 = 0.069054 loss)\nI1212 11:39:36.712429 20613 sgd_solver.cpp:174] Iteration 8700, lr = 0.261\nI1212 11:39:36.726320 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.687342\nI1212 11:41:54.979944 20613 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1212 11:43:16.690439 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78352\nI1212 11:43:16.690712 20613 solver.cpp:404]     Test net output #1: loss = 1.06992 (* 1 = 1.06992 loss)\nI1212 11:43:18.002892 20613 solver.cpp:228] Iteration 8800, loss = 0.0211361\nI1212 11:43:18.002951 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 11:43:18.002969 20613 solver.cpp:244]     Train net output #1: loss = 0.021136 (* 1 = 0.021136 loss)\nI1212 11:43:18.097826 20613 sgd_solver.cpp:174] Iteration 8800, lr = 0.264\nI1212 11:43:18.111727 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.539667\nI1212 11:45:35.567396 20613 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1212 11:46:57.267557 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79028\nI1212 11:46:57.267827 20613 solver.cpp:404]     Test net output #1: loss = 0.983256 (* 1 = 0.983256 loss)\nI1212 11:46:58.580303 20613 solver.cpp:228] Iteration 8900, loss = 0.0623047\nI1212 11:46:58.580363 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 11:46:58.580380 20613 solver.cpp:244]     Train net output #1: loss = 0.0623046 (* 1 = 0.0623046 loss)\nI1212 11:46:58.674996 20613 sgd_solver.cpp:174] Iteration 8900, lr = 0.267\nI1212 11:46:58.688859 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.592472\nI1212 11:49:16.965646 20613 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1212 11:50:38.699738 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81556\nI1212 11:50:38.699987 20613 solver.cpp:404]     Test net output #1: loss = 0.842217 (* 1 = 0.842217 loss)\nI1212 11:50:40.012025 20613 solver.cpp:228] Iteration 9000, loss = 0.104532\nI1212 11:50:40.012086 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 11:50:40.012104 20613 solver.cpp:244]     Train net output #1: loss = 0.104532 (* 1 = 0.104532 loss)\nI1212 11:50:40.106734 20613 sgd_solver.cpp:174] Iteration 9000, lr = 0.27\nI1212 11:50:40.120683 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.587321\nI1212 11:52:57.583655 20613 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1212 11:54:18.969722 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82644\nI1212 11:54:18.969950 20613 solver.cpp:404]     Test net output #1: loss = 0.839973 (* 1 = 0.839973 loss)\nI1212 11:54:20.282232 20613 solver.cpp:228] Iteration 9100, loss = 0.0196335\nI1212 11:54:20.282291 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 11:54:20.282308 20613 solver.cpp:244]     Train net output #1: loss = 0.0196334 (* 1 = 0.0196334 loss)\nI1212 11:54:20.375355 20613 sgd_solver.cpp:174] Iteration 9100, lr = 0.273\nI1212 11:54:20.389164 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.539178\nI1212 11:56:38.651710 20613 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1212 11:58:00.146456 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78184\nI1212 11:58:00.146687 20613 solver.cpp:404]     Test net output #1: loss = 1.12186 (* 1 = 1.12186 loss)\nI1212 11:58:01.459034 20613 solver.cpp:228] Iteration 9200, loss = 0.0529878\nI1212 11:58:01.459095 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 11:58:01.459113 20613 solver.cpp:244]     Train net output #1: loss = 0.0529877 (* 1 = 0.0529877 loss)\nI1212 11:58:01.549551 20613 sgd_solver.cpp:174] Iteration 9200, lr = 0.276\nI1212 11:58:01.563427 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.723246\nI1212 12:00:19.780778 20613 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1212 12:01:41.163591 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71252\nI1212 12:01:41.163801 20613 solver.cpp:404]     Test net output #1: loss = 1.56588 (* 1 = 1.56588 loss)\nI1212 12:01:42.476163 20613 solver.cpp:228] Iteration 9300, loss = 0.0675801\nI1212 12:01:42.476227 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 12:01:42.476243 20613 solver.cpp:244]     Train net output #1: loss = 0.0675799 (* 1 = 0.0675799 loss)\nI1212 12:01:42.570056 20613 sgd_solver.cpp:174] Iteration 9300, lr = 0.279\nI1212 12:01:42.583966 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.626036\nI1212 12:04:00.916227 20613 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1212 12:05:22.537828 20613 solver.cpp:404]     Test net output #0: accuracy = 0.823\nI1212 12:05:22.538089 20613 solver.cpp:404]     Test net output #1: loss = 0.73556 (* 1 = 0.73556 loss)\nI1212 12:05:23.855247 20613 solver.cpp:228] Iteration 9400, loss = 0.0528252\nI1212 12:05:23.855311 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 12:05:23.855337 20613 solver.cpp:244]     Train net output #1: loss = 0.0528251 (* 1 = 0.0528251 loss)\nI1212 12:05:23.943200 20613 sgd_solver.cpp:174] Iteration 9400, lr = 0.282\nI1212 12:05:23.957187 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.555337\nI1212 12:07:41.515707 20613 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1212 12:09:03.306371 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75508\nI1212 12:09:03.306615 20613 solver.cpp:404]     Test net output #1: loss = 1.31404 (* 1 = 1.31404 loss)\nI1212 12:09:04.620877 20613 solver.cpp:228] Iteration 9500, loss = 0.0552971\nI1212 12:09:04.620944 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 12:09:04.620970 20613 solver.cpp:244]     Train net output #1: loss = 0.055297 (* 1 = 0.055297 loss)\nI1212 12:09:04.715507 20613 sgd_solver.cpp:174] Iteration 9500, lr = 0.285\nI1212 12:09:04.729321 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.557709\nI1212 12:11:23.134074 20613 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1212 12:12:44.776250 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80192\nI1212 12:12:44.776559 20613 solver.cpp:404]     Test net output #1: loss = 0.967896 (* 1 = 0.967896 loss)\nI1212 12:12:46.090437 20613 solver.cpp:228] Iteration 9600, loss = 0.0767407\nI1212 12:12:46.090487 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 12:12:46.090509 20613 solver.cpp:244]     Train net output #1: loss = 0.0767406 (* 1 = 0.0767406 loss)\nI1212 12:12:46.182366 20613 sgd_solver.cpp:174] Iteration 9600, lr = 0.288\nI1212 12:12:46.195832 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.607422\nI1212 12:15:03.783011 20613 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1212 12:16:25.488319 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76796\nI1212 12:16:25.488536 20613 solver.cpp:404]     Test net output #1: loss = 1.16588 (* 1 = 1.16588 loss)\nI1212 12:16:26.802546 20613 solver.cpp:228] Iteration 9700, loss = 0.142646\nI1212 12:16:26.802608 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 12:16:26.802634 20613 solver.cpp:244]     Train net output #1: loss = 0.142646 (* 1 = 0.142646 loss)\nI1212 12:16:26.895006 20613 sgd_solver.cpp:174] Iteration 9700, lr = 0.291\nI1212 12:16:26.908850 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.571548\nI1212 12:18:44.445341 20613 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1212 12:20:06.039297 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72028\nI1212 12:20:06.039583 20613 solver.cpp:404]     Test net output #1: loss = 1.63172 (* 1 = 1.63172 loss)\nI1212 12:20:07.353425 20613 solver.cpp:228] Iteration 9800, loss = 0.0899649\nI1212 12:20:07.353488 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 12:20:07.353514 20613 solver.cpp:244]     Train net output #1: loss = 0.0899647 (* 1 = 0.0899647 loss)\nI1212 12:20:07.446590 20613 sgd_solver.cpp:174] Iteration 9800, lr = 0.294\nI1212 12:20:07.460537 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.715483\nI1212 12:22:24.928812 20613 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1212 12:23:46.433459 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7886\nI1212 12:23:46.433748 20613 solver.cpp:404]     Test net output #1: loss = 1.05957 (* 1 = 1.05957 loss)\nI1212 12:23:47.747444 20613 solver.cpp:228] Iteration 9900, loss = 0.0339565\nI1212 12:23:47.747508 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 12:23:47.747532 20613 solver.cpp:244]     Train net output #1: loss = 0.0339564 (* 1 = 0.0339564 loss)\nI1212 12:23:47.837709 20613 sgd_solver.cpp:174] Iteration 9900, lr = 0.297\nI1212 12:23:47.851673 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.486763\nI1212 12:26:06.234931 20613 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1212 12:27:27.693775 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7936\nI1212 12:27:27.694061 20613 solver.cpp:404]     Test net output #1: loss = 0.997435 (* 1 = 0.997435 loss)\nI1212 12:27:29.007727 20613 solver.cpp:228] Iteration 10000, loss = 0.0536948\nI1212 12:27:29.007786 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 12:27:29.007812 20613 solver.cpp:244]     Train net output #1: loss = 0.0536947 (* 1 = 0.0536947 loss)\nI1212 12:27:29.098634 20613 sgd_solver.cpp:174] Iteration 10000, lr = 0.3\nI1212 12:27:29.112545 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.526288\nI1212 12:29:47.452512 20613 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1212 12:31:09.190979 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8334\nI1212 12:31:09.191275 20613 solver.cpp:404]     Test net output #1: loss = 0.777999 (* 1 = 0.777999 loss)\nI1212 12:31:10.505089 20613 solver.cpp:228] Iteration 10100, loss = 0.0838545\nI1212 12:31:10.505146 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 12:31:10.505172 20613 solver.cpp:244]     Train net output #1: loss = 0.0838543 (* 1 = 0.0838543 loss)\nI1212 12:31:10.596412 20613 sgd_solver.cpp:174] Iteration 10100, lr = 0.303\nI1212 12:31:10.610318 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.637303\nI1212 12:33:28.251476 20613 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1212 12:34:49.625291 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7928\nI1212 12:34:49.625535 20613 solver.cpp:404]     Test net output #1: loss = 1.00048 (* 1 = 1.00048 loss)\nI1212 12:34:50.938204 20613 solver.cpp:228] Iteration 10200, loss = 0.101429\nI1212 12:34:50.938261 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 12:34:50.938287 20613 solver.cpp:244]     Train net output #1: loss = 0.101429 (* 1 = 0.101429 loss)\nI1212 12:34:51.030802 20613 sgd_solver.cpp:174] Iteration 10200, lr = 0.306\nI1212 12:34:51.044734 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.610882\nI1212 12:37:08.528935 20613 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1212 12:38:29.861444 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81396\nI1212 12:38:29.861709 20613 solver.cpp:404]     Test net output #1: loss = 0.840135 (* 1 = 0.840135 loss)\nI1212 12:38:31.173853 20613 solver.cpp:228] Iteration 10300, loss = 0.0810015\nI1212 12:38:31.173912 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 12:38:31.173938 20613 solver.cpp:244]     Train net output #1: loss = 0.0810014 (* 1 = 0.0810014 loss)\nI1212 12:38:31.267419 20613 sgd_solver.cpp:174] Iteration 10300, lr = 0.309\nI1212 12:38:31.281244 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.595141\nI1212 12:40:48.715055 20613 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1212 12:42:10.095924 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8182\nI1212 12:42:10.096241 20613 solver.cpp:404]     Test net output #1: loss = 0.839064 (* 1 = 0.839064 loss)\nI1212 12:42:11.408828 20613 solver.cpp:228] Iteration 10400, loss = 0.0650215\nI1212 12:42:11.408886 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 12:42:11.408912 20613 solver.cpp:244]     Train net output #1: loss = 0.0650213 (* 1 = 0.0650213 loss)\nI1212 12:42:11.499650 20613 sgd_solver.cpp:174] Iteration 10400, lr = 0.312\nI1212 12:42:11.513556 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.614855\nI1212 12:44:29.791869 20613 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1212 12:45:51.179622 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79112\nI1212 12:45:51.179862 20613 solver.cpp:404]     Test net output #1: loss = 1.006 (* 1 = 1.006 loss)\nI1212 12:45:52.492663 20613 solver.cpp:228] Iteration 10500, loss = 0.0215628\nI1212 12:45:52.492722 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 12:45:52.492746 20613 solver.cpp:244]     Train net output #1: loss = 0.0215627 (* 1 = 0.0215627 loss)\nI1212 12:45:52.585278 20613 sgd_solver.cpp:174] Iteration 10500, lr = 0.315\nI1212 12:45:52.599172 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.621962\nI1212 12:48:10.030732 20613 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1212 12:49:31.648705 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84052\nI1212 12:49:31.648977 20613 solver.cpp:404]     Test net output #1: loss = 0.729741 (* 1 = 0.729741 loss)\nI1212 12:49:32.962438 20613 solver.cpp:228] Iteration 10600, loss = 0.0759061\nI1212 12:49:32.962483 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 12:49:32.962507 20613 solver.cpp:244]     Train net output #1: loss = 0.075906 (* 1 = 0.075906 loss)\nI1212 12:49:33.053654 20613 sgd_solver.cpp:174] Iteration 10600, lr = 0.318\nI1212 12:49:33.067648 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.527741\nI1212 12:51:51.400491 20613 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1212 12:53:13.037645 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79572\nI1212 12:53:13.037971 20613 solver.cpp:404]     Test net output #1: loss = 1.05246 (* 1 = 1.05246 loss)\nI1212 12:53:14.350895 20613 solver.cpp:228] Iteration 10700, loss = 0.0327702\nI1212 12:53:14.350955 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 12:53:14.350980 20613 solver.cpp:244]     Train net output #1: loss = 0.0327701 (* 1 = 0.0327701 loss)\nI1212 12:53:14.444814 20613 sgd_solver.cpp:174] Iteration 10700, lr = 0.321\nI1212 12:53:14.458601 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.45687\nI1212 12:55:32.807539 20613 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1212 12:56:54.433260 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80404\nI1212 12:56:54.433496 20613 solver.cpp:404]     Test net output #1: loss = 0.863813 (* 1 = 0.863813 loss)\nI1212 12:56:55.747211 20613 solver.cpp:228] Iteration 10800, loss = 0.0457963\nI1212 12:56:55.747272 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 12:56:55.747298 20613 solver.cpp:244]     Train net output #1: loss = 0.0457963 (* 1 = 0.0457963 loss)\nI1212 12:56:55.836195 20613 sgd_solver.cpp:174] Iteration 10800, lr = 0.324\nI1212 12:56:55.850023 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.599482\nI1212 12:59:14.138546 20613 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1212 13:00:35.661429 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80572\nI1212 13:00:35.661711 20613 solver.cpp:404]     Test net output #1: loss = 0.837768 (* 1 = 0.837768 loss)\nI1212 13:00:36.974864 20613 solver.cpp:228] Iteration 10900, loss = 0.0694226\nI1212 13:00:36.974920 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 13:00:36.974946 20613 solver.cpp:244]     Train net output #1: loss = 0.0694225 (* 1 = 0.0694225 loss)\nI1212 13:00:37.061337 20613 sgd_solver.cpp:174] Iteration 10900, lr = 0.327\nI1212 13:00:37.075371 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.487084\nI1212 13:02:55.332228 20613 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1212 13:04:17.107868 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80968\nI1212 13:04:17.108108 20613 solver.cpp:404]     Test net output #1: loss = 0.889843 (* 1 = 0.889843 loss)\nI1212 13:04:18.421942 20613 solver.cpp:228] Iteration 11000, loss = 0.0784114\nI1212 13:04:18.421989 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 13:04:18.422019 20613 solver.cpp:244]     Train net output #1: loss = 0.0784113 (* 1 = 0.0784113 loss)\nI1212 13:04:18.508579 20613 sgd_solver.cpp:174] Iteration 11000, lr = 0.33\nI1212 13:04:18.522464 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.553814\nI1212 13:06:35.939249 20613 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1212 13:07:57.367554 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84096\nI1212 13:07:57.367790 20613 solver.cpp:404]     Test net output #1: loss = 0.739993 (* 1 = 0.739993 loss)\nI1212 13:07:58.681977 20613 solver.cpp:228] Iteration 11100, loss = 0.0850812\nI1212 13:07:58.682029 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 13:07:58.682052 20613 solver.cpp:244]     Train net output #1: loss = 0.0850811 (* 1 = 0.0850811 loss)\nI1212 13:07:58.767822 20613 sgd_solver.cpp:174] Iteration 11100, lr = 0.333\nI1212 13:07:58.781869 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.526199\nI1212 13:10:16.144558 20613 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1212 13:11:37.404649 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79844\nI1212 13:11:37.404871 20613 solver.cpp:404]     Test net output #1: loss = 0.922245 (* 1 = 0.922245 loss)\nI1212 13:11:38.718683 20613 solver.cpp:228] Iteration 11200, loss = 0.0678305\nI1212 13:11:38.718739 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 13:11:38.718765 20613 solver.cpp:244]     Train net output #1: loss = 0.0678304 (* 1 = 0.0678304 loss)\nI1212 13:11:38.808621 20613 sgd_solver.cpp:174] Iteration 11200, lr = 0.336\nI1212 13:11:38.822540 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.567759\nI1212 13:13:57.076524 20613 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1212 13:15:18.421847 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80444\nI1212 13:15:18.422085 20613 solver.cpp:404]     Test net output #1: loss = 0.951063 (* 1 = 0.951063 loss)\nI1212 13:15:19.736093 20613 solver.cpp:228] Iteration 11300, loss = 0.0739108\nI1212 13:15:19.736156 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 13:15:19.736184 20613 solver.cpp:244]     Train net output #1: loss = 0.0739107 (* 1 = 0.0739107 loss)\nI1212 13:15:19.824036 20613 sgd_solver.cpp:174] Iteration 11300, lr = 0.339\nI1212 13:15:19.837855 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.633032\nI1212 13:17:38.097537 20613 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1212 13:18:59.390086 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7688\nI1212 13:18:59.390344 20613 solver.cpp:404]     Test net output #1: loss = 1.14078 (* 1 = 1.14078 loss)\nI1212 13:19:00.703279 20613 solver.cpp:228] Iteration 11400, loss = 0.0973576\nI1212 13:19:00.703342 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 13:19:00.703368 20613 solver.cpp:244]     Train net output #1: loss = 0.0973576 (* 1 = 0.0973576 loss)\nI1212 13:19:00.791642 20613 sgd_solver.cpp:174] Iteration 11400, lr = 0.342\nI1212 13:19:00.805518 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.630554\nI1212 13:21:18.213402 20613 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1212 13:22:39.690838 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70172\nI1212 13:22:39.691154 20613 solver.cpp:404]     Test net output #1: loss = 1.63402 (* 1 = 1.63402 loss)\nI1212 13:22:41.004645 20613 solver.cpp:228] Iteration 11500, loss = 0.0522295\nI1212 13:22:41.004703 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 13:22:41.004729 20613 solver.cpp:244]     Train net output #1: loss = 0.0522294 (* 1 = 0.0522294 loss)\nI1212 13:22:41.097501 20613 sgd_solver.cpp:174] Iteration 11500, lr = 0.345\nI1212 13:22:41.111423 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.539439\nI1212 13:24:59.386255 20613 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1212 13:26:21.147547 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83408\nI1212 13:26:21.147815 20613 solver.cpp:404]     Test net output #1: loss = 0.729325 (* 1 = 0.729325 loss)\nI1212 13:26:22.461796 20613 solver.cpp:228] Iteration 11600, loss = 0.0798722\nI1212 13:26:22.461843 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 13:26:22.461868 20613 solver.cpp:244]     Train net output #1: loss = 0.0798721 (* 1 = 0.0798721 loss)\nI1212 13:26:22.552173 20613 sgd_solver.cpp:174] Iteration 11600, lr = 0.348\nI1212 13:26:22.566112 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.573235\nI1212 13:28:40.910161 20613 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1212 13:30:02.588922 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75328\nI1212 13:30:02.589159 20613 solver.cpp:404]     Test net output #1: loss = 1.19718 (* 1 = 1.19718 loss)\nI1212 13:30:03.903060 20613 solver.cpp:228] Iteration 11700, loss = 0.0357034\nI1212 13:30:03.903107 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 13:30:03.903131 20613 solver.cpp:244]     Train net output #1: loss = 0.0357033 (* 1 = 0.0357033 loss)\nI1212 13:30:03.992717 20613 sgd_solver.cpp:174] Iteration 11700, lr = 0.351\nI1212 13:30:04.006686 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.471131\nI1212 13:32:22.270622 20613 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1212 13:33:43.585218 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81892\nI1212 13:33:43.585451 20613 solver.cpp:404]     Test net output #1: loss = 0.783381 (* 1 = 0.783381 loss)\nI1212 13:33:44.899080 20613 solver.cpp:228] Iteration 11800, loss = 0.115356\nI1212 13:33:44.899142 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 13:33:44.899168 20613 solver.cpp:244]     Train net output #1: loss = 0.115356 (* 1 = 0.115356 loss)\nI1212 13:33:44.983167 20613 sgd_solver.cpp:174] Iteration 11800, lr = 0.354\nI1212 13:33:44.997071 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.588139\nI1212 13:36:03.220693 20613 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1212 13:37:24.951913 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77088\nI1212 13:37:24.952181 20613 solver.cpp:404]     Test net output #1: loss = 1.26529 (* 1 = 1.26529 loss)\nI1212 13:37:26.265774 20613 solver.cpp:228] Iteration 11900, loss = 0.0986134\nI1212 13:37:26.265828 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 13:37:26.265854 20613 solver.cpp:244]     Train net output #1: loss = 0.0986133 (* 1 = 0.0986133 loss)\nI1212 13:37:26.353883 20613 sgd_solver.cpp:174] Iteration 11900, lr = 0.357\nI1212 13:37:26.367326 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.513623\nI1212 13:39:44.679378 20613 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1212 13:41:06.163458 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79124\nI1212 13:41:06.163767 20613 solver.cpp:404]     Test net output #1: loss = 0.979692 (* 1 = 0.979692 loss)\nI1212 13:41:07.477418 20613 solver.cpp:228] Iteration 12000, loss = 0.0562174\nI1212 13:41:07.477461 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 13:41:07.477476 20613 solver.cpp:244]     Train net output #1: loss = 0.0562172 (* 1 = 0.0562172 loss)\nI1212 13:41:07.566948 20613 sgd_solver.cpp:174] Iteration 12000, lr = 0.36\nI1212 13:41:07.580636 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.619274\nI1212 13:43:24.918504 20613 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1212 13:44:46.272052 20613 solver.cpp:404]     Test net output #0: accuracy = 0.789\nI1212 13:44:46.272315 20613 solver.cpp:404]     Test net output #1: loss = 1.0371 (* 1 = 1.0371 loss)\nI1212 13:44:47.586014 20613 solver.cpp:228] Iteration 12100, loss = 0.0531234\nI1212 13:44:47.586064 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 13:44:47.586081 20613 solver.cpp:244]     Train net output #1: loss = 0.0531233 (* 1 = 0.0531233 loss)\nI1212 13:44:47.674899 20613 sgd_solver.cpp:174] Iteration 12100, lr = 0.363\nI1212 13:44:47.688733 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.611109\nI1212 13:47:05.094771 20613 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1212 13:48:26.409062 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84164\nI1212 13:48:26.409322 20613 solver.cpp:404]     Test net output #1: loss = 0.690489 (* 1 = 0.690489 loss)\nI1212 13:48:27.723280 20613 solver.cpp:228] Iteration 12200, loss = 0.0619731\nI1212 13:48:27.723330 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 13:48:27.723347 20613 solver.cpp:244]     Train net output #1: loss = 0.0619729 (* 1 = 0.0619729 loss)\nI1212 13:48:27.810521 20613 sgd_solver.cpp:174] Iteration 12200, lr = 0.366\nI1212 13:48:27.824388 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.476977\nI1212 13:50:46.045473 20613 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1212 13:52:07.425488 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80944\nI1212 13:52:07.425758 20613 solver.cpp:404]     Test net output #1: loss = 0.882173 (* 1 = 0.882173 loss)\nI1212 13:52:08.738899 20613 solver.cpp:228] Iteration 12300, loss = 0.0385493\nI1212 13:52:08.738942 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 13:52:08.738958 20613 solver.cpp:244]     Train net output #1: loss = 0.0385491 (* 1 = 0.0385491 loss)\nI1212 13:52:08.829408 20613 sgd_solver.cpp:174] Iteration 12300, lr = 0.369\nI1212 13:52:08.843264 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.382115\nI1212 13:54:27.095263 20613 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1212 13:55:48.763075 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83084\nI1212 13:55:48.763325 20613 solver.cpp:404]     Test net output #1: loss = 0.712316 (* 1 = 0.712316 loss)\nI1212 13:55:50.076320 20613 solver.cpp:228] Iteration 12400, loss = 0.119259\nI1212 13:55:50.076375 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 13:55:50.076393 20613 solver.cpp:244]     Train net output #1: loss = 0.119259 (* 1 = 0.119259 loss)\nI1212 13:55:50.164237 20613 sgd_solver.cpp:174] Iteration 12400, lr = 0.372\nI1212 13:55:50.177847 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.527643\nI1212 13:58:08.467770 20613 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1212 13:59:30.095703 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82284\nI1212 13:59:30.095963 20613 solver.cpp:404]     Test net output #1: loss = 0.775429 (* 1 = 0.775429 loss)\nI1212 13:59:31.408257 20613 solver.cpp:228] Iteration 12500, loss = 0.0650783\nI1212 13:59:31.408301 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 13:59:31.408318 20613 solver.cpp:244]     Train net output #1: loss = 0.0650782 (* 1 = 0.0650782 loss)\nI1212 13:59:31.494654 20613 sgd_solver.cpp:174] Iteration 12500, lr = 0.375\nI1212 13:59:31.508532 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.688465\nI1212 14:01:49.738430 20613 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1212 14:03:11.521914 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78444\nI1212 14:03:11.522245 20613 solver.cpp:404]     Test net output #1: loss = 1.06746 (* 1 = 1.06746 loss)\nI1212 14:03:12.836097 20613 solver.cpp:228] Iteration 12600, loss = 0.0750111\nI1212 14:03:12.836161 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 14:03:12.836186 20613 solver.cpp:244]     Train net output #1: loss = 0.075011 (* 1 = 0.075011 loss)\nI1212 14:03:12.929023 20613 sgd_solver.cpp:174] Iteration 12600, lr = 0.378\nI1212 14:03:12.942853 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.452083\nI1212 14:05:31.175076 20613 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1212 14:06:52.964208 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77628\nI1212 14:06:52.964506 20613 solver.cpp:404]     Test net output #1: loss = 1.19984 (* 1 = 1.19984 loss)\nI1212 14:06:54.278204 20613 solver.cpp:228] Iteration 12700, loss = 0.0681946\nI1212 14:06:54.278270 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 14:06:54.278295 20613 solver.cpp:244]     Train net output #1: loss = 0.0681945 (* 1 = 0.0681945 loss)\nI1212 14:06:54.370354 20613 sgd_solver.cpp:174] Iteration 12700, lr = 0.381\nI1212 14:06:54.384397 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.549424\nI1212 14:09:11.687580 20613 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1212 14:10:33.507159 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81036\nI1212 14:10:33.507465 20613 solver.cpp:404]     Test net output #1: loss = 0.909384 (* 1 = 0.909384 loss)\nI1212 14:10:34.821558 20613 solver.cpp:228] Iteration 12800, loss = 0.074797\nI1212 14:10:34.821614 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 14:10:34.821640 20613 solver.cpp:244]     Train net output #1: loss = 0.0747969 (* 1 = 0.0747969 loss)\nI1212 14:10:34.907129 20613 sgd_solver.cpp:174] Iteration 12800, lr = 0.384\nI1212 14:10:34.920928 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.502625\nI1212 14:12:53.162528 20613 solver.cpp:337] Iteration 12900, Testing net (#0)\nI1212 14:14:14.998739 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79412\nI1212 14:14:14.999066 20613 solver.cpp:404]     Test net output #1: loss = 0.929799 (* 1 = 0.929799 loss)\nI1212 14:14:16.313374 20613 solver.cpp:228] Iteration 12900, loss = 0.0741927\nI1212 14:14:16.313436 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 14:14:16.313460 20613 solver.cpp:244]     Train net output #1: loss = 0.0741926 (* 1 = 0.0741926 loss)\nI1212 14:14:16.405346 20613 sgd_solver.cpp:174] Iteration 12900, lr = 0.387\nI1212 14:14:16.419263 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.540569\nI1212 14:16:34.607622 20613 solver.cpp:337] Iteration 13000, Testing net (#0)\nI1212 14:17:56.430263 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8192\nI1212 14:17:56.430586 20613 solver.cpp:404]     Test net output #1: loss = 0.74878 (* 1 = 0.74878 loss)\nI1212 14:17:57.744249 20613 solver.cpp:228] Iteration 13000, loss = 0.0592051\nI1212 14:17:57.744313 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 14:17:57.744338 20613 solver.cpp:244]     Train net output #1: loss = 0.059205 (* 1 = 0.059205 loss)\nI1212 14:17:57.834983 20613 sgd_solver.cpp:174] Iteration 13000, lr = 0.39\nI1212 14:17:57.848898 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.547302\nI1212 14:20:15.996685 20613 solver.cpp:337] Iteration 13100, Testing net (#0)\nI1212 14:21:37.852676 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8184\nI1212 14:21:37.853003 20613 solver.cpp:404]     Test net output #1: loss = 0.785182 (* 1 = 0.785182 loss)\nI1212 14:21:39.167083 20613 solver.cpp:228] Iteration 13100, loss = 0.0505687\nI1212 14:21:39.167145 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 14:21:39.167171 20613 solver.cpp:244]     Train net output #1: loss = 0.0505686 (* 1 = 0.0505686 loss)\nI1212 14:21:39.258464 20613 sgd_solver.cpp:174] Iteration 13100, lr = 0.393\nI1212 14:21:39.272256 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.555858\nI1212 14:23:57.477304 20613 solver.cpp:337] Iteration 13200, Testing net (#0)\nI1212 14:25:19.308430 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8384\nI1212 14:25:19.308773 20613 solver.cpp:404]     Test net output #1: loss = 0.704238 (* 1 = 0.704238 loss)\nI1212 14:25:20.622330 20613 solver.cpp:228] Iteration 13200, loss = 0.0328069\nI1212 14:25:20.622392 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 14:25:20.622418 20613 solver.cpp:244]     Train net output #1: loss = 0.0328069 (* 1 = 0.0328069 loss)\nI1212 14:25:20.713623 20613 sgd_solver.cpp:174] Iteration 13200, lr = 0.396\nI1212 14:25:20.727505 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.593391\nI1212 14:27:37.999963 20613 solver.cpp:337] Iteration 13300, Testing net (#0)\nI1212 14:28:59.848347 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77132\nI1212 14:28:59.848677 20613 solver.cpp:404]     Test net output #1: loss = 1.12397 (* 1 = 1.12397 loss)\nI1212 14:29:01.162358 20613 solver.cpp:228] Iteration 13300, loss = 0.0806593\nI1212 14:29:01.162415 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 14:29:01.162441 20613 solver.cpp:244]     Train net output #1: loss = 0.0806592 (* 1 = 0.0806592 loss)\nI1212 14:29:01.254495 20613 sgd_solver.cpp:174] Iteration 13300, lr = 0.399\nI1212 14:29:01.268412 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.551315\nI1212 14:31:18.684350 20613 solver.cpp:337] Iteration 13400, Testing net (#0)\nI1212 14:32:40.551316 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7616\nI1212 14:32:40.551646 20613 solver.cpp:404]     Test net output #1: loss = 1.18944 (* 1 = 1.18944 loss)\nI1212 14:32:41.864872 20613 solver.cpp:228] Iteration 13400, loss = 0.0351381\nI1212 14:32:41.864920 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 14:32:41.864944 20613 solver.cpp:244]     Train net output #1: loss = 0.035138 (* 1 = 0.035138 loss)\nI1212 14:32:41.954577 20613 sgd_solver.cpp:174] Iteration 13400, lr = 0.402\nI1212 14:32:41.968531 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.549751\nI1212 14:34:59.357373 20613 solver.cpp:337] Iteration 13500, Testing net (#0)\nI1212 14:36:21.260685 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7276\nI1212 14:36:21.261044 20613 solver.cpp:404]     Test net output #1: loss = 1.44112 (* 1 = 1.44112 loss)\nI1212 14:36:22.574451 20613 solver.cpp:228] Iteration 13500, loss = 0.101547\nI1212 14:36:22.574498 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 14:36:22.574522 20613 solver.cpp:244]     Train net output #1: loss = 0.101546 (* 1 = 0.101546 loss)\nI1212 14:36:22.665092 20613 sgd_solver.cpp:174] Iteration 13500, lr = 0.405\nI1212 14:36:22.678903 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.628548\nI1212 14:38:39.979162 20613 solver.cpp:337] Iteration 13600, Testing net (#0)\nI1212 14:40:01.790119 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78484\nI1212 14:40:01.790417 20613 solver.cpp:404]     Test net output #1: loss = 1.03799 (* 1 = 1.03799 loss)\nI1212 14:40:03.102936 20613 solver.cpp:228] Iteration 13600, loss = 0.0252393\nI1212 14:40:03.102998 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 14:40:03.103026 20613 solver.cpp:244]     Train net output #1: loss = 0.0252391 (* 1 = 0.0252391 loss)\nI1212 14:40:03.195750 20613 sgd_solver.cpp:174] Iteration 13600, lr = 0.408\nI1212 14:40:03.209540 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.47822\nI1212 14:42:21.409783 20613 solver.cpp:337] Iteration 13700, Testing net (#0)\nI1212 14:43:43.331192 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82504\nI1212 14:43:43.331516 20613 solver.cpp:404]     Test net output #1: loss = 0.673782 (* 1 = 0.673782 loss)\nI1212 14:43:44.644767 20613 solver.cpp:228] Iteration 13700, loss = 0.0795851\nI1212 14:43:44.644824 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 14:43:44.644847 20613 solver.cpp:244]     Train net output #1: loss = 0.0795849 (* 1 = 0.0795849 loss)\nI1212 14:43:44.732744 20613 sgd_solver.cpp:174] Iteration 13700, lr = 0.411\nI1212 14:43:44.746692 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.53116\nI1212 14:46:02.942872 20613 solver.cpp:337] Iteration 13800, Testing net (#0)\nI1212 14:47:24.765883 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8004\nI1212 14:47:24.766232 20613 solver.cpp:404]     Test net output #1: loss = 0.909179 (* 1 = 0.909179 loss)\nI1212 14:47:26.080055 20613 solver.cpp:228] Iteration 13800, loss = 0.0460643\nI1212 14:47:26.080112 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 14:47:26.080138 20613 solver.cpp:244]     Train net output #1: loss = 0.0460642 (* 1 = 0.0460642 loss)\nI1212 14:47:26.165539 20613 sgd_solver.cpp:174] Iteration 13800, lr = 0.414\nI1212 14:47:26.179584 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.43587\nI1212 14:49:44.358196 20613 solver.cpp:337] Iteration 13900, Testing net (#0)\nI1212 14:51:06.211798 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77036\nI1212 14:51:06.212110 20613 solver.cpp:404]     Test net output #1: loss = 1.13307 (* 1 = 1.13307 loss)\nI1212 14:51:07.524560 20613 solver.cpp:228] Iteration 13900, loss = 0.0931037\nI1212 14:51:07.524617 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 14:51:07.524642 20613 solver.cpp:244]     Train net output #1: loss = 0.0931035 (* 1 = 0.0931035 loss)\nI1212 14:51:07.612052 20613 sgd_solver.cpp:174] Iteration 13900, lr = 0.417\nI1212 14:51:07.625903 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.567581\nI1212 14:53:25.057267 20613 solver.cpp:337] Iteration 14000, Testing net (#0)\nI1212 14:54:46.937641 20613 solver.cpp:404]     Test net output #0: accuracy = 0.813\nI1212 14:54:46.937986 20613 solver.cpp:404]     Test net output #1: loss = 0.949737 (* 1 = 0.949737 loss)\nI1212 14:54:48.251621 20613 solver.cpp:228] Iteration 14000, loss = 0.0565943\nI1212 14:54:48.251677 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 14:54:48.251701 20613 solver.cpp:244]     Train net output #1: loss = 0.0565941 (* 1 = 0.0565941 loss)\nI1212 14:54:48.340968 20613 sgd_solver.cpp:174] Iteration 14000, lr = 0.42\nI1212 14:54:48.354934 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.534538\nI1212 14:57:05.618983 20613 solver.cpp:337] Iteration 14100, Testing net (#0)\nI1212 14:58:27.410301 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78096\nI1212 14:58:27.410620 20613 solver.cpp:404]     Test net output #1: loss = 1.27076 (* 1 = 1.27076 loss)\nI1212 14:58:28.724310 20613 solver.cpp:228] Iteration 14100, loss = 0.121344\nI1212 14:58:28.724371 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 14:58:28.724396 20613 solver.cpp:244]     Train net output #1: loss = 0.121344 (* 1 = 0.121344 loss)\nI1212 14:58:28.810983 20613 sgd_solver.cpp:174] Iteration 14100, lr = 0.423\nI1212 14:58:28.824846 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.614012\nI1212 15:00:47.084022 20613 solver.cpp:337] Iteration 14200, Testing net (#0)\nI1212 15:02:08.930197 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81096\nI1212 15:02:08.930500 20613 solver.cpp:404]     Test net output #1: loss = 0.845875 (* 1 = 0.845875 loss)\nI1212 15:02:10.244107 20613 solver.cpp:228] Iteration 14200, loss = 0.0680539\nI1212 15:02:10.244165 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 15:02:10.244190 20613 solver.cpp:244]     Train net output #1: loss = 0.0680538 (* 1 = 0.0680538 loss)\nI1212 15:02:10.336231 20613 sgd_solver.cpp:174] Iteration 14200, lr = 0.426\nI1212 15:02:10.350106 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.448936\nI1212 15:04:28.593502 20613 solver.cpp:337] Iteration 14300, Testing net (#0)\nI1212 15:05:50.453025 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76088\nI1212 15:05:50.453349 20613 solver.cpp:404]     Test net output #1: loss = 1.22373 (* 1 = 1.22373 loss)\nI1212 15:05:51.767168 20613 solver.cpp:228] Iteration 14300, loss = 0.0473961\nI1212 15:05:51.767231 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 15:05:51.767256 20613 solver.cpp:244]     Train net output #1: loss = 0.047396 (* 1 = 0.047396 loss)\nI1212 15:05:51.854162 20613 sgd_solver.cpp:174] Iteration 14300, lr = 0.429\nI1212 15:05:51.867959 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.462932\nI1212 15:08:09.316601 20613 solver.cpp:337] Iteration 14400, Testing net (#0)\nI1212 15:09:31.157794 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78504\nI1212 15:09:31.158123 20613 solver.cpp:404]     Test net output #1: loss = 0.919634 (* 1 = 0.919634 loss)\nI1212 15:09:32.471972 20613 solver.cpp:228] Iteration 14400, loss = 0.0627954\nI1212 15:09:32.472040 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 15:09:32.472065 20613 solver.cpp:244]     Train net output #1: loss = 0.0627953 (* 1 = 0.0627953 loss)\nI1212 15:09:32.560717 20613 sgd_solver.cpp:174] Iteration 14400, lr = 0.432\nI1212 15:09:32.574544 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.489701\nI1212 15:11:50.863142 20613 solver.cpp:337] Iteration 14500, Testing net (#0)\nI1212 15:13:12.753387 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81364\nI1212 15:13:12.753702 20613 solver.cpp:404]     Test net output #1: loss = 0.863016 (* 1 = 0.863016 loss)\nI1212 15:13:14.067106 20613 solver.cpp:228] Iteration 14500, loss = 0.0590734\nI1212 15:13:14.067165 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 15:13:14.067191 20613 solver.cpp:244]     Train net output #1: loss = 0.0590733 (* 1 = 0.0590733 loss)\nI1212 15:13:14.155258 20613 sgd_solver.cpp:174] Iteration 14500, lr = 0.435\nI1212 15:13:14.169256 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.504464\nI1212 15:15:32.455343 20613 solver.cpp:337] Iteration 14600, Testing net (#0)\nI1212 15:16:54.267969 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7968\nI1212 15:16:54.268296 20613 solver.cpp:404]     Test net output #1: loss = 0.853592 (* 1 = 0.853592 loss)\nI1212 15:16:55.581759 20613 solver.cpp:228] Iteration 14600, loss = 0.100131\nI1212 15:16:55.581821 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 15:16:55.581848 20613 solver.cpp:244]     Train net output #1: loss = 0.10013 (* 1 = 0.10013 loss)\nI1212 15:16:55.672106 20613 sgd_solver.cpp:174] Iteration 14600, lr = 0.438\nI1212 15:16:55.686069 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.540069\nI1212 15:19:13.913434 20613 solver.cpp:337] Iteration 14700, Testing net (#0)\nI1212 15:20:35.774854 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8218\nI1212 15:20:35.775189 20613 solver.cpp:404]     Test net output #1: loss = 0.725419 (* 1 = 0.725419 loss)\nI1212 15:20:37.088712 20613 solver.cpp:228] Iteration 14700, loss = 0.14001\nI1212 15:20:37.088770 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 15:20:37.088796 20613 solver.cpp:244]     Train net output #1: loss = 0.140009 (* 1 = 0.140009 loss)\nI1212 15:20:37.178279 20613 sgd_solver.cpp:174] Iteration 14700, lr = 0.441\nI1212 15:20:37.191984 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.548174\nI1212 15:22:55.537611 20613 solver.cpp:337] Iteration 14800, Testing net (#0)\nI1212 15:24:17.470305 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7976\nI1212 15:24:17.470633 20613 solver.cpp:404]     Test net output #1: loss = 0.959257 (* 1 = 0.959257 loss)\nI1212 15:24:18.784713 20613 solver.cpp:228] Iteration 14800, loss = 0.0958815\nI1212 15:24:18.784773 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 15:24:18.784798 20613 solver.cpp:244]     Train net output #1: loss = 0.0958814 (* 1 = 0.0958814 loss)\nI1212 15:24:18.875047 20613 sgd_solver.cpp:174] Iteration 14800, lr = 0.444\nI1212 15:24:18.888841 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.554393\nI1212 15:26:37.137892 20613 solver.cpp:337] Iteration 14900, Testing net (#0)\nI1212 15:27:58.999198 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8142\nI1212 15:27:58.999522 20613 solver.cpp:404]     Test net output #1: loss = 0.821972 (* 1 = 0.821972 loss)\nI1212 15:28:00.312880 20613 solver.cpp:228] Iteration 14900, loss = 0.0196679\nI1212 15:28:00.312938 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 15:28:00.312966 20613 solver.cpp:244]     Train net output #1: loss = 0.0196677 (* 1 = 0.0196677 loss)\nI1212 15:28:00.399580 20613 sgd_solver.cpp:174] Iteration 14900, lr = 0.447\nI1212 15:28:00.413462 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.414338\nI1212 15:30:18.743772 20613 solver.cpp:337] Iteration 15000, Testing net (#0)\nI1212 15:31:40.584290 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83616\nI1212 15:31:40.584625 20613 solver.cpp:404]     Test net output #1: loss = 0.775448 (* 1 = 0.775448 loss)\nI1212 15:31:41.898066 20613 solver.cpp:228] Iteration 15000, loss = 0.0331584\nI1212 15:31:41.898120 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 15:31:41.898147 20613 solver.cpp:244]     Train net output #1: loss = 0.0331583 (* 1 = 0.0331583 loss)\nI1212 15:31:41.987185 20613 sgd_solver.cpp:174] Iteration 15000, lr = 0.45\nI1212 15:31:42.001134 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.474875\nI1212 15:33:59.472982 20613 solver.cpp:337] Iteration 15100, Testing net (#0)\nI1212 15:35:21.384372 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8362\nI1212 15:35:21.384699 20613 solver.cpp:404]     Test net output #1: loss = 0.671821 (* 1 = 0.671821 loss)\nI1212 15:35:22.698508 20613 solver.cpp:228] Iteration 15100, loss = 0.0978967\nI1212 15:35:22.698563 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 15:35:22.698590 20613 solver.cpp:244]     Train net output #1: loss = 0.0978965 (* 1 = 0.0978965 loss)\nI1212 15:35:22.785748 20613 sgd_solver.cpp:174] Iteration 15100, lr = 0.453\nI1212 15:35:22.799669 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.555568\nI1212 15:37:41.124353 20613 solver.cpp:337] Iteration 15200, Testing net (#0)\nI1212 15:39:02.985730 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81072\nI1212 15:39:02.986078 20613 solver.cpp:404]     Test net output #1: loss = 0.767871 (* 1 = 0.767871 loss)\nI1212 15:39:04.298807 20613 solver.cpp:228] Iteration 15200, loss = 0.16369\nI1212 15:39:04.298858 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 15:39:04.298884 20613 solver.cpp:244]     Train net output #1: loss = 0.163689 (* 1 = 0.163689 loss)\nI1212 15:39:04.389106 20613 sgd_solver.cpp:174] Iteration 15200, lr = 0.456\nI1212 15:39:04.402909 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.483516\nI1212 15:41:22.732791 20613 solver.cpp:337] Iteration 15300, Testing net (#0)\nI1212 15:42:44.594285 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82948\nI1212 15:42:44.594611 20613 solver.cpp:404]     Test net output #1: loss = 0.824376 (* 1 = 0.824376 loss)\nI1212 15:42:45.909898 20613 solver.cpp:228] Iteration 15300, loss = 0.07398\nI1212 15:42:45.909952 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 15:42:45.909978 20613 solver.cpp:244]     Train net output #1: loss = 0.0739798 (* 1 = 0.0739798 loss)\nI1212 15:42:46.003173 20613 sgd_solver.cpp:174] Iteration 15300, lr = 0.459\nI1212 15:42:46.016651 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.546173\nI1212 15:45:04.442255 20613 solver.cpp:337] Iteration 15400, Testing net (#0)\nI1212 15:46:26.291589 20613 solver.cpp:404]     Test net output #0: accuracy = 0.781\nI1212 15:46:26.291940 20613 solver.cpp:404]     Test net output #1: loss = 1.08072 (* 1 = 1.08072 loss)\nI1212 15:46:27.607909 20613 solver.cpp:228] Iteration 15400, loss = 0.091186\nI1212 15:46:27.607972 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 15:46:27.608005 20613 solver.cpp:244]     Train net output #1: loss = 0.0911858 (* 1 = 0.0911858 loss)\nI1212 15:46:27.698770 20613 sgd_solver.cpp:174] Iteration 15400, lr = 0.462\nI1212 15:46:27.712666 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.419442\nI1212 15:48:46.145207 20613 solver.cpp:337] Iteration 15500, Testing net (#0)\nI1212 15:50:08.029057 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83656\nI1212 15:50:08.029371 20613 solver.cpp:404]     Test net output #1: loss = 0.704451 (* 1 = 0.704451 loss)\nI1212 15:50:09.345372 20613 solver.cpp:228] Iteration 15500, loss = 0.114246\nI1212 15:50:09.345437 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 15:50:09.345461 20613 solver.cpp:244]     Train net output #1: loss = 0.114246 (* 1 = 0.114246 loss)\nI1212 15:50:09.437456 20613 sgd_solver.cpp:174] Iteration 15500, lr = 0.465\nI1212 15:50:09.451351 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.5117\nI1212 15:52:27.899634 20613 solver.cpp:337] Iteration 15600, Testing net (#0)\nI1212 15:53:49.828795 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81376\nI1212 15:53:49.829138 20613 solver.cpp:404]     Test net output #1: loss = 0.868368 (* 1 = 0.868368 loss)\nI1212 15:53:51.145022 20613 solver.cpp:228] Iteration 15600, loss = 0.0357419\nI1212 15:53:51.145083 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 15:53:51.145107 20613 solver.cpp:244]     Train net output #1: loss = 0.0357417 (* 1 = 0.0357417 loss)\nI1212 15:53:51.234432 20613 sgd_solver.cpp:174] Iteration 15600, lr = 0.468\nI1212 15:53:51.248244 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.443971\nI1212 15:56:08.928092 20613 solver.cpp:337] Iteration 15700, Testing net (#0)\nI1212 15:57:30.708459 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81472\nI1212 15:57:30.708757 20613 solver.cpp:404]     Test net output #1: loss = 0.830482 (* 1 = 0.830482 loss)\nI1212 15:57:32.023324 20613 solver.cpp:228] Iteration 15700, loss = 0.0502872\nI1212 15:57:32.023386 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 15:57:32.023404 20613 solver.cpp:244]     Train net output #1: loss = 0.050287 (* 1 = 0.050287 loss)\nI1212 15:57:32.116662 20613 sgd_solver.cpp:174] Iteration 15700, lr = 0.471\nI1212 15:57:32.130367 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.53394\nI1212 15:59:50.591382 20613 solver.cpp:337] Iteration 15800, Testing net (#0)\nI1212 16:01:12.372949 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8212\nI1212 16:01:12.373263 20613 solver.cpp:404]     Test net output #1: loss = 0.854524 (* 1 = 0.854524 loss)\nI1212 16:01:13.688472 20613 solver.cpp:228] Iteration 15800, loss = 0.0520589\nI1212 16:01:13.688531 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 16:01:13.688550 20613 solver.cpp:244]     Train net output #1: loss = 0.0520587 (* 1 = 0.0520587 loss)\nI1212 16:01:13.785157 20613 sgd_solver.cpp:174] Iteration 15800, lr = 0.474\nI1212 16:01:13.799032 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.505565\nI1212 16:03:32.343767 20613 solver.cpp:337] Iteration 15900, Testing net (#0)\nI1212 16:04:54.124634 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85048\nI1212 16:04:54.124958 20613 solver.cpp:404]     Test net output #1: loss = 0.663896 (* 1 = 0.663896 loss)\nI1212 16:04:55.440218 20613 solver.cpp:228] Iteration 15900, loss = 0.122969\nI1212 16:04:55.440277 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 16:04:55.440295 20613 solver.cpp:244]     Train net output #1: loss = 0.122968 (* 1 = 0.122968 loss)\nI1212 16:04:55.539443 20613 sgd_solver.cpp:174] Iteration 15900, lr = 0.477\nI1212 16:04:55.553239 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.470446\nI1212 16:07:14.041713 20613 solver.cpp:337] Iteration 16000, Testing net (#0)\nI1212 16:08:35.813382 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78816\nI1212 16:08:35.813725 20613 solver.cpp:404]     Test net output #1: loss = 0.971788 (* 1 = 0.971788 loss)\nI1212 16:08:37.129328 20613 solver.cpp:228] Iteration 16000, loss = 0.100577\nI1212 16:08:37.129389 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 16:08:37.129407 20613 solver.cpp:244]     Train net output #1: loss = 0.100577 (* 1 = 0.100577 loss)\nI1212 16:08:37.220005 20613 sgd_solver.cpp:174] Iteration 16000, lr = 0.48\nI1212 16:08:37.233836 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.471491\nI1212 16:10:55.806740 20613 solver.cpp:337] Iteration 16100, Testing net (#0)\nI1212 16:12:17.595437 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81296\nI1212 16:12:17.595758 20613 solver.cpp:404]     Test net output #1: loss = 0.803086 (* 1 = 0.803086 loss)\nI1212 16:12:18.911442 20613 solver.cpp:228] Iteration 16100, loss = 0.137317\nI1212 16:12:18.911501 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 16:12:18.911520 20613 solver.cpp:244]     Train net output #1: loss = 0.137317 (* 1 = 0.137317 loss)\nI1212 16:12:18.998512 20613 sgd_solver.cpp:174] Iteration 16100, lr = 0.483\nI1212 16:12:19.012349 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.53954\nI1212 16:14:37.513746 20613 solver.cpp:337] Iteration 16200, Testing net (#0)\nI1212 16:15:59.287317 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83732\nI1212 16:15:59.287658 20613 solver.cpp:404]     Test net output #1: loss = 0.674581 (* 1 = 0.674581 loss)\nI1212 16:16:00.603119 20613 solver.cpp:228] Iteration 16200, loss = 0.0838325\nI1212 16:16:00.603179 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 16:16:00.603199 20613 solver.cpp:244]     Train net output #1: loss = 0.0838324 (* 1 = 0.0838324 loss)\nI1212 16:16:00.695966 20613 sgd_solver.cpp:174] Iteration 16200, lr = 0.486\nI1212 16:16:00.709755 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.456081\nI1212 16:18:19.221397 20613 solver.cpp:337] Iteration 16300, Testing net (#0)\nI1212 16:19:40.999271 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7966\nI1212 16:19:40.999613 20613 solver.cpp:404]     Test net output #1: loss = 0.908486 (* 1 = 0.908486 loss)\nI1212 16:19:42.313935 20613 solver.cpp:228] Iteration 16300, loss = 0.0318866\nI1212 16:19:42.313994 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 16:19:42.314013 20613 solver.cpp:244]     Train net output #1: loss = 0.0318864 (* 1 = 0.0318864 loss)\nI1212 16:19:42.414881 20613 sgd_solver.cpp:174] Iteration 16300, lr = 0.489\nI1212 16:19:42.428675 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.493346\nI1212 16:22:01.001576 20613 solver.cpp:337] Iteration 16400, Testing net (#0)\nI1212 16:23:22.771941 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83352\nI1212 16:23:22.772248 20613 solver.cpp:404]     Test net output #1: loss = 0.673862 (* 1 = 0.673862 loss)\nI1212 16:23:24.087725 20613 solver.cpp:228] Iteration 16400, loss = 0.106566\nI1212 16:23:24.087785 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 16:23:24.087801 20613 solver.cpp:244]     Train net output #1: loss = 0.106566 (* 1 = 0.106566 loss)\nI1212 16:23:24.180850 20613 sgd_solver.cpp:174] Iteration 16400, lr = 0.492\nI1212 16:23:24.194700 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.496887\nI1212 16:25:42.782353 20613 solver.cpp:337] Iteration 16500, Testing net (#0)\nI1212 16:27:04.562887 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84212\nI1212 16:27:04.563246 20613 solver.cpp:404]     Test net output #1: loss = 0.660002 (* 1 = 0.660002 loss)\nI1212 16:27:05.879557 20613 solver.cpp:228] Iteration 16500, loss = 0.109605\nI1212 16:27:05.879601 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 16:27:05.879616 20613 solver.cpp:244]     Train net output #1: loss = 0.109605 (* 1 = 0.109605 loss)\nI1212 16:27:05.970304 20613 sgd_solver.cpp:174] Iteration 16500, lr = 0.495\nI1212 16:27:05.984151 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.516627\nI1212 16:29:24.551414 20613 solver.cpp:337] Iteration 16600, Testing net (#0)\nI1212 16:30:46.326663 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78928\nI1212 16:30:46.326990 20613 solver.cpp:404]     Test net output #1: loss = 1.02931 (* 1 = 1.02931 loss)\nI1212 16:30:47.642670 20613 solver.cpp:228] Iteration 16600, loss = 0.136761\nI1212 16:30:47.642714 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 16:30:47.642729 20613 solver.cpp:244]     Train net output #1: loss = 0.136761 (* 1 = 0.136761 loss)\nI1212 16:30:47.733851 20613 sgd_solver.cpp:174] Iteration 16600, lr = 0.498\nI1212 16:30:47.747617 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.517824\nI1212 16:33:06.239023 20613 solver.cpp:337] Iteration 16700, Testing net (#0)\nI1212 16:34:28.019043 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80032\nI1212 16:34:28.019393 20613 solver.cpp:404]     Test net output #1: loss = 0.864448 (* 1 = 0.864448 loss)\nI1212 16:34:29.334774 20613 solver.cpp:228] Iteration 16700, loss = 0.119208\nI1212 16:34:29.334817 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 16:34:29.334833 20613 solver.cpp:244]     Train net output #1: loss = 0.119208 (* 1 = 0.119208 loss)\nI1212 16:34:29.424137 20613 sgd_solver.cpp:174] Iteration 16700, lr = 0.501\nI1212 16:34:29.438114 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.470635\nI1212 16:36:47.104120 20613 solver.cpp:337] Iteration 16800, Testing net (#0)\nI1212 16:38:08.870105 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74568\nI1212 16:38:08.870450 20613 solver.cpp:404]     Test net output #1: loss = 1.16259 (* 1 = 1.16259 loss)\nI1212 16:38:10.185814 20613 solver.cpp:228] Iteration 16800, loss = 0.120371\nI1212 16:38:10.185871 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 16:38:10.185889 20613 solver.cpp:244]     Train net output #1: loss = 0.120371 (* 1 = 0.120371 loss)\nI1212 16:38:10.280220 20613 sgd_solver.cpp:174] Iteration 16800, lr = 0.504\nI1212 16:38:10.294149 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.435954\nI1212 16:40:28.908705 20613 solver.cpp:337] Iteration 16900, Testing net (#0)\nI1212 16:41:50.686766 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80692\nI1212 16:41:50.687094 20613 solver.cpp:404]     Test net output #1: loss = 0.883252 (* 1 = 0.883252 loss)\nI1212 16:41:52.001407 20613 solver.cpp:228] Iteration 16900, loss = 0.0567393\nI1212 16:41:52.001462 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 16:41:52.001480 20613 solver.cpp:244]     Train net output #1: loss = 0.0567392 (* 1 = 0.0567392 loss)\nI1212 16:41:52.096102 20613 sgd_solver.cpp:174] Iteration 16900, lr = 0.507\nI1212 16:41:52.109982 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.434021\nI1212 16:44:09.799685 20613 solver.cpp:337] Iteration 17000, Testing net (#0)\nI1212 16:45:31.540419 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7908\nI1212 16:45:31.540766 20613 solver.cpp:404]     Test net output #1: loss = 1.00968 (* 1 = 1.00968 loss)\nI1212 16:45:32.855684 20613 solver.cpp:228] Iteration 17000, loss = 0.0739498\nI1212 16:45:32.855742 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 16:45:32.855760 20613 solver.cpp:244]     Train net output #1: loss = 0.0739496 (* 1 = 0.0739496 loss)\nI1212 16:45:32.941655 20613 sgd_solver.cpp:174] Iteration 17000, lr = 0.51\nI1212 16:45:32.955210 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.426293\nI1212 16:47:51.533584 20613 solver.cpp:337] Iteration 17100, Testing net (#0)\nI1212 16:49:13.266854 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8304\nI1212 16:49:13.267179 20613 solver.cpp:404]     Test net output #1: loss = 0.730587 (* 1 = 0.730587 loss)\nI1212 16:49:14.581398 20613 solver.cpp:228] Iteration 17100, loss = 0.0536153\nI1212 16:49:14.581455 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 16:49:14.581473 20613 solver.cpp:244]     Train net output #1: loss = 0.0536151 (* 1 = 0.0536151 loss)\nI1212 16:49:14.669565 20613 sgd_solver.cpp:174] Iteration 17100, lr = 0.513\nI1212 16:49:14.683464 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.414356\nI1212 16:51:33.145943 20613 solver.cpp:337] Iteration 17200, Testing net (#0)\nI1212 16:52:54.667107 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73352\nI1212 16:52:54.667418 20613 solver.cpp:404]     Test net output #1: loss = 1.40495 (* 1 = 1.40495 loss)\nI1212 16:52:55.981360 20613 solver.cpp:228] Iteration 17200, loss = 0.0591442\nI1212 16:52:55.981420 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 16:52:55.981437 20613 solver.cpp:244]     Train net output #1: loss = 0.059144 (* 1 = 0.059144 loss)\nI1212 16:52:56.070068 20613 sgd_solver.cpp:174] Iteration 17200, lr = 0.516\nI1212 16:52:56.083518 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.443961\nI1212 16:55:14.510469 20613 solver.cpp:337] Iteration 17300, Testing net (#0)\nI1212 16:56:36.102407 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82676\nI1212 16:56:36.102732 20613 solver.cpp:404]     Test net output #1: loss = 0.705274 (* 1 = 0.705274 loss)\nI1212 16:56:37.417625 20613 solver.cpp:228] Iteration 17300, loss = 0.113362\nI1212 16:56:37.417680 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 16:56:37.417698 20613 solver.cpp:244]     Train net output #1: loss = 0.113362 (* 1 = 0.113362 loss)\nI1212 16:56:37.505424 20613 sgd_solver.cpp:174] Iteration 17300, lr = 0.519\nI1212 16:56:37.519254 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.445157\nI1212 16:58:56.151401 20613 solver.cpp:337] Iteration 17400, Testing net (#0)\nI1212 17:00:17.920855 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82752\nI1212 17:00:17.921188 20613 solver.cpp:404]     Test net output #1: loss = 0.69977 (* 1 = 0.69977 loss)\nI1212 17:00:19.236784 20613 solver.cpp:228] Iteration 17400, loss = 0.0363882\nI1212 17:00:19.236845 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 17:00:19.236863 20613 solver.cpp:244]     Train net output #1: loss = 0.0363881 (* 1 = 0.0363881 loss)\nI1212 17:00:19.329601 20613 sgd_solver.cpp:174] Iteration 17400, lr = 0.522\nI1212 17:00:19.343622 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.48602\nI1212 17:02:37.886605 20613 solver.cpp:337] Iteration 17500, Testing net (#0)\nI1212 17:03:59.656009 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82576\nI1212 17:03:59.656358 20613 solver.cpp:404]     Test net output #1: loss = 0.716151 (* 1 = 0.716151 loss)\nI1212 17:04:00.972218 20613 solver.cpp:228] Iteration 17500, loss = 0.0286004\nI1212 17:04:00.972280 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 17:04:00.972299 20613 solver.cpp:244]     Train net output #1: loss = 0.0286003 (* 1 = 0.0286003 loss)\nI1212 17:04:01.061882 20613 sgd_solver.cpp:174] Iteration 17500, lr = 0.525\nI1212 17:04:01.075801 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.379396\nI1212 17:06:19.638643 20613 solver.cpp:337] Iteration 17600, Testing net (#0)\nI1212 17:07:41.403158 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77784\nI1212 17:07:41.403488 20613 solver.cpp:404]     Test net output #1: loss = 1.10521 (* 1 = 1.10521 loss)\nI1212 17:07:42.718207 20613 solver.cpp:228] Iteration 17600, loss = 0.0664305\nI1212 17:07:42.718267 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 17:07:42.718286 20613 solver.cpp:244]     Train net output #1: loss = 0.0664305 (* 1 = 0.0664305 loss)\nI1212 17:07:42.805822 20613 sgd_solver.cpp:174] Iteration 17600, lr = 0.528\nI1212 17:07:42.819732 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.449682\nI1212 17:10:01.439286 20613 solver.cpp:337] Iteration 17700, Testing net (#0)\nI1212 17:11:23.209893 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82592\nI1212 17:11:23.210217 20613 solver.cpp:404]     Test net output #1: loss = 0.757505 (* 1 = 0.757505 loss)\nI1212 17:11:24.525847 20613 solver.cpp:228] Iteration 17700, loss = 0.0372799\nI1212 17:11:24.525890 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 17:11:24.525907 20613 solver.cpp:244]     Train net output #1: loss = 0.0372798 (* 1 = 0.0372798 loss)\nI1212 17:11:24.616946 20613 sgd_solver.cpp:174] Iteration 17700, lr = 0.531\nI1212 17:11:24.630873 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.460453\nI1212 17:13:43.120594 20613 solver.cpp:337] Iteration 17800, Testing net (#0)\nI1212 17:15:04.884305 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7674\nI1212 17:15:04.884650 20613 solver.cpp:404]     Test net output #1: loss = 1.02708 (* 1 = 1.02708 loss)\nI1212 17:15:06.198714 20613 solver.cpp:228] Iteration 17800, loss = 0.068262\nI1212 17:15:06.198765 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 17:15:06.198783 20613 solver.cpp:244]     Train net output #1: loss = 0.0682619 (* 1 = 0.0682619 loss)\nI1212 17:15:06.291559 20613 sgd_solver.cpp:174] Iteration 17800, lr = 0.534\nI1212 17:15:06.305459 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.474483\nI1212 17:17:24.685931 20613 solver.cpp:337] Iteration 17900, Testing net (#0)\nI1212 17:18:46.446319 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80508\nI1212 17:18:46.446666 20613 solver.cpp:404]     Test net output #1: loss = 0.872309 (* 1 = 0.872309 loss)\nI1212 17:18:47.761910 20613 solver.cpp:228] Iteration 17900, loss = 0.0361638\nI1212 17:18:47.761963 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 17:18:47.761981 20613 solver.cpp:244]     Train net output #1: loss = 0.0361636 (* 1 = 0.0361636 loss)\nI1212 17:18:47.855954 20613 sgd_solver.cpp:174] Iteration 17900, lr = 0.537\nI1212 17:18:47.869820 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.537261\nI1212 17:21:06.361369 20613 solver.cpp:337] Iteration 18000, Testing net (#0)\nI1212 17:22:28.124908 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82968\nI1212 17:22:28.125243 20613 solver.cpp:404]     Test net output #1: loss = 0.693079 (* 1 = 0.693079 loss)\nI1212 17:22:29.440009 20613 solver.cpp:228] Iteration 18000, loss = 0.241609\nI1212 17:22:29.440055 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 17:22:29.440071 20613 solver.cpp:244]     Train net output #1: loss = 0.241609 (* 1 = 0.241609 loss)\nI1212 17:22:29.529639 20613 sgd_solver.cpp:174] Iteration 18000, lr = 0.54\nI1212 17:22:29.543431 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.622116\nI1212 17:24:48.019264 20613 solver.cpp:337] Iteration 18100, Testing net (#0)\nI1212 17:26:09.779273 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78152\nI1212 17:26:09.779620 20613 solver.cpp:404]     Test net output #1: loss = 1.01524 (* 1 = 1.01524 loss)\nI1212 17:26:11.095327 20613 solver.cpp:228] Iteration 18100, loss = 0.0911873\nI1212 17:26:11.095387 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 17:26:11.095405 20613 solver.cpp:244]     Train net output #1: loss = 0.0911872 (* 1 = 0.0911872 loss)\nI1212 17:26:11.184680 20613 sgd_solver.cpp:174] Iteration 18100, lr = 0.543\nI1212 17:26:11.198619 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.595583\nI1212 17:28:29.550010 20613 solver.cpp:337] Iteration 18200, Testing net (#0)\nI1212 17:29:51.309751 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8382\nI1212 17:29:51.310089 20613 solver.cpp:404]     Test net output #1: loss = 0.623792 (* 1 = 0.623792 loss)\nI1212 17:29:52.625303 20613 solver.cpp:228] Iteration 18200, loss = 0.0232312\nI1212 17:29:52.625344 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 17:29:52.625360 20613 solver.cpp:244]     Train net output #1: loss = 0.0232311 (* 1 = 0.0232311 loss)\nI1212 17:29:52.714454 20613 sgd_solver.cpp:174] Iteration 18200, lr = 0.546\nI1212 17:29:52.728297 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.422183\nI1212 17:32:11.110194 20613 solver.cpp:337] Iteration 18300, Testing net (#0)\nI1212 17:33:32.875255 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83328\nI1212 17:33:32.875599 20613 solver.cpp:404]     Test net output #1: loss = 0.637037 (* 1 = 0.637037 loss)\nI1212 17:33:34.189616 20613 solver.cpp:228] Iteration 18300, loss = 0.0969629\nI1212 17:33:34.189667 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 17:33:34.189684 20613 solver.cpp:244]     Train net output #1: loss = 0.0969629 (* 1 = 0.0969629 loss)\nI1212 17:33:34.283444 20613 sgd_solver.cpp:174] Iteration 18300, lr = 0.549\nI1212 17:33:34.297250 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.500923\nI1212 17:35:52.728744 20613 solver.cpp:337] Iteration 18400, Testing net (#0)\nI1212 17:37:14.484794 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81428\nI1212 17:37:14.485122 20613 solver.cpp:404]     Test net output #1: loss = 0.888275 (* 1 = 0.888275 loss)\nI1212 17:37:15.800695 20613 solver.cpp:228] Iteration 18400, loss = 0.0917042\nI1212 17:37:15.800737 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 17:37:15.800755 20613 solver.cpp:244]     Train net output #1: loss = 0.0917041 (* 1 = 0.0917041 loss)\nI1212 17:37:15.893045 20613 sgd_solver.cpp:174] Iteration 18400, lr = 0.552\nI1212 17:37:15.907012 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.470714\nI1212 17:39:34.335438 20613 solver.cpp:337] Iteration 18500, Testing net (#0)\nI1212 17:40:56.100002 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8098\nI1212 17:40:56.100340 20613 solver.cpp:404]     Test net output #1: loss = 0.777584 (* 1 = 0.777584 loss)\nI1212 17:40:57.415689 20613 solver.cpp:228] Iteration 18500, loss = 0.172129\nI1212 17:40:57.415738 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 17:40:57.415755 20613 solver.cpp:244]     Train net output #1: loss = 0.172129 (* 1 = 0.172129 loss)\nI1212 17:40:57.507088 20613 sgd_solver.cpp:174] Iteration 18500, lr = 0.555\nI1212 17:40:57.520983 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.507719\nI1212 17:43:15.104190 20613 solver.cpp:337] Iteration 18600, Testing net (#0)\nI1212 17:44:36.866832 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77216\nI1212 17:44:36.867153 20613 solver.cpp:404]     Test net output #1: loss = 1.06073 (* 1 = 1.06073 loss)\nI1212 17:44:38.181771 20613 solver.cpp:228] Iteration 18600, loss = 0.0971909\nI1212 17:44:38.181815 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 17:44:38.181831 20613 solver.cpp:244]     Train net output #1: loss = 0.0971908 (* 1 = 0.0971908 loss)\nI1212 17:44:38.273808 20613 sgd_solver.cpp:174] Iteration 18600, lr = 0.558\nI1212 17:44:38.287654 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.439147\nI1212 17:46:56.819686 20613 solver.cpp:337] Iteration 18700, Testing net (#0)\nI1212 17:48:18.579433 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84128\nI1212 17:48:18.579784 20613 solver.cpp:404]     Test net output #1: loss = 0.653195 (* 1 = 0.653195 loss)\nI1212 17:48:19.895413 20613 solver.cpp:228] Iteration 18700, loss = 0.0913727\nI1212 17:48:19.895457 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 17:48:19.895474 20613 solver.cpp:244]     Train net output #1: loss = 0.0913727 (* 1 = 0.0913727 loss)\nI1212 17:48:19.987567 20613 sgd_solver.cpp:174] Iteration 18700, lr = 0.561\nI1212 17:48:20.001464 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.53015\nI1212 17:50:38.458652 20613 solver.cpp:337] Iteration 18800, Testing net (#0)\nI1212 17:52:00.233655 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8498\nI1212 17:52:00.233988 20613 solver.cpp:404]     Test net output #1: loss = 0.579466 (* 1 = 0.579466 loss)\nI1212 17:52:01.549188 20613 solver.cpp:228] Iteration 18800, loss = 0.0213615\nI1212 17:52:01.549248 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 17:52:01.549268 20613 solver.cpp:244]     Train net output #1: loss = 0.0213614 (* 1 = 0.0213614 loss)\nI1212 17:52:01.641559 20613 sgd_solver.cpp:174] Iteration 18800, lr = 0.564\nI1212 17:52:01.655534 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.400779\nI1212 17:54:20.196400 20613 solver.cpp:337] Iteration 18900, Testing net (#0)\nI1212 17:55:41.971596 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8096\nI1212 17:55:41.971920 20613 solver.cpp:404]     Test net output #1: loss = 0.761244 (* 1 = 0.761244 loss)\nI1212 17:55:43.285797 20613 solver.cpp:228] Iteration 18900, loss = 0.0511598\nI1212 17:55:43.285856 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 17:55:43.285873 20613 solver.cpp:244]     Train net output #1: loss = 0.0511598 (* 1 = 0.0511598 loss)\nI1212 17:55:43.377696 20613 sgd_solver.cpp:174] Iteration 18900, lr = 0.567\nI1212 17:55:43.391513 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.418154\nI1212 17:58:02.020434 20613 solver.cpp:337] Iteration 19000, Testing net (#0)\nI1212 17:59:23.792093 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77776\nI1212 17:59:23.792415 20613 solver.cpp:404]     Test net output #1: loss = 1.0357 (* 1 = 1.0357 loss)\nI1212 17:59:25.107962 20613 solver.cpp:228] Iteration 19000, loss = 0.0636416\nI1212 17:59:25.108021 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 17:59:25.108037 20613 solver.cpp:244]     Train net output #1: loss = 0.0636415 (* 1 = 0.0636415 loss)\nI1212 17:59:25.195282 20613 sgd_solver.cpp:174] Iteration 19000, lr = 0.57\nI1212 17:59:25.209219 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.479013\nI1212 18:01:43.743582 20613 solver.cpp:337] Iteration 19100, Testing net (#0)\nI1212 18:03:05.520707 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84896\nI1212 18:03:05.521041 20613 solver.cpp:404]     Test net output #1: loss = 0.573347 (* 1 = 0.573347 loss)\nI1212 18:03:06.836055 20613 solver.cpp:228] Iteration 19100, loss = 0.117735\nI1212 18:03:06.836120 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 18:03:06.836138 20613 solver.cpp:244]     Train net output #1: loss = 0.117735 (* 1 = 0.117735 loss)\nI1212 18:03:06.924311 20613 sgd_solver.cpp:174] Iteration 19100, lr = 0.573\nI1212 18:03:06.938199 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.418227\nI1212 18:05:25.427309 20613 solver.cpp:337] Iteration 19200, Testing net (#0)\nI1212 18:06:47.203616 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7944\nI1212 18:06:47.203963 20613 solver.cpp:404]     Test net output #1: loss = 0.893946 (* 1 = 0.893946 loss)\nI1212 18:06:48.519707 20613 solver.cpp:228] Iteration 19200, loss = 0.0655091\nI1212 18:06:48.519765 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 18:06:48.519783 20613 solver.cpp:244]     Train net output #1: loss = 0.0655091 (* 1 = 0.0655091 loss)\nI1212 18:06:48.611814 20613 sgd_solver.cpp:174] Iteration 19200, lr = 0.576\nI1212 18:06:48.625751 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.502812\nI1212 18:09:07.180742 20613 solver.cpp:337] Iteration 19300, Testing net (#0)\nI1212 18:10:28.978209 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8472\nI1212 18:10:28.978524 20613 solver.cpp:404]     Test net output #1: loss = 0.633003 (* 1 = 0.633003 loss)\nI1212 18:10:30.293998 20613 solver.cpp:228] Iteration 19300, loss = 0.0908205\nI1212 18:10:30.294057 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 18:10:30.294075 20613 solver.cpp:244]     Train net output #1: loss = 0.0908205 (* 1 = 0.0908205 loss)\nI1212 18:10:30.386608 20613 sgd_solver.cpp:174] Iteration 19300, lr = 0.579\nI1212 18:10:30.400116 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.387807\nI1212 18:12:48.880455 20613 solver.cpp:337] Iteration 19400, Testing net (#0)\nI1212 18:14:10.673007 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83892\nI1212 18:14:10.673337 20613 solver.cpp:404]     Test net output #1: loss = 0.671293 (* 1 = 0.671293 loss)\nI1212 18:14:11.988894 20613 solver.cpp:228] Iteration 19400, loss = 0.124708\nI1212 18:14:11.988940 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 18:14:11.988955 20613 solver.cpp:244]     Train net output #1: loss = 0.124708 (* 1 = 0.124708 loss)\nI1212 18:14:12.074180 20613 sgd_solver.cpp:174] Iteration 19400, lr = 0.582\nI1212 18:14:12.088083 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.409909\nI1212 18:16:30.553781 20613 solver.cpp:337] Iteration 19500, Testing net (#0)\nI1212 18:17:52.321285 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7542\nI1212 18:17:52.321609 20613 solver.cpp:404]     Test net output #1: loss = 1.0552 (* 1 = 1.0552 loss)\nI1212 18:17:53.636785 20613 solver.cpp:228] Iteration 19500, loss = 0.136449\nI1212 18:17:53.636827 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 18:17:53.636843 20613 solver.cpp:244]     Train net output #1: loss = 0.136449 (* 1 = 0.136449 loss)\nI1212 18:17:53.728535 20613 sgd_solver.cpp:174] Iteration 19500, lr = 0.585\nI1212 18:17:53.742440 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.567679\nI1212 18:20:12.253111 20613 solver.cpp:337] Iteration 19600, Testing net (#0)\nI1212 18:21:34.025265 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8468\nI1212 18:21:34.025594 20613 solver.cpp:404]     Test net output #1: loss = 0.654869 (* 1 = 0.654869 loss)\nI1212 18:21:35.341194 20613 solver.cpp:228] Iteration 19600, loss = 0.0231939\nI1212 18:21:35.341254 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 18:21:35.341272 20613 solver.cpp:244]     Train net output #1: loss = 0.0231938 (* 1 = 0.0231938 loss)\nI1212 18:21:35.427018 20613 sgd_solver.cpp:174] Iteration 19600, lr = 0.588\nI1212 18:21:35.440806 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.362915\nI1212 18:23:54.017925 20613 solver.cpp:337] Iteration 19700, Testing net (#0)\nI1212 18:25:15.792783 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82016\nI1212 18:25:15.793118 20613 solver.cpp:404]     Test net output #1: loss = 0.729812 (* 1 = 0.729812 loss)\nI1212 18:25:17.108530 20613 solver.cpp:228] Iteration 19700, loss = 0.0680308\nI1212 18:25:17.108592 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 18:25:17.108618 20613 solver.cpp:244]     Train net output #1: loss = 0.0680307 (* 1 = 0.0680307 loss)\nI1212 18:25:17.196084 20613 sgd_solver.cpp:174] Iteration 19700, lr = 0.591\nI1212 18:25:17.210006 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.442288\nI1212 18:27:35.681646 20613 solver.cpp:337] Iteration 19800, Testing net (#0)\nI1212 18:28:57.555058 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72476\nI1212 18:28:57.555413 20613 solver.cpp:404]     Test net output #1: loss = 1.37711 (* 1 = 1.37711 loss)\nI1212 18:28:58.871227 20613 solver.cpp:228] Iteration 19800, loss = 0.0417198\nI1212 18:28:58.871274 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 18:28:58.871299 20613 solver.cpp:244]     Train net output #1: loss = 0.0417197 (* 1 = 0.0417197 loss)\nI1212 18:28:58.958809 20613 sgd_solver.cpp:174] Iteration 19800, lr = 0.594\nI1212 18:28:58.972735 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.412256\nI1212 18:31:17.533061 20613 solver.cpp:337] Iteration 19900, Testing net (#0)\nI1212 18:32:39.368652 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80556\nI1212 18:32:39.368990 20613 solver.cpp:404]     Test net output #1: loss = 0.812944 (* 1 = 0.812944 loss)\nI1212 18:32:40.684020 20613 solver.cpp:228] Iteration 19900, loss = 0.0201343\nI1212 18:32:40.684082 20613 solver.cpp:244]     Train net output #0: accuracy = 1\nI1212 18:32:40.684108 20613 solver.cpp:244]     Train net output #1: loss = 0.0201342 (* 1 = 0.0201342 loss)\nI1212 18:32:40.771555 20613 sgd_solver.cpp:174] Iteration 19900, lr = 0.597\nI1212 18:32:40.785429 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.441783\nI1212 18:34:59.321486 20613 solver.cpp:337] Iteration 20000, Testing net (#0)\nI1212 18:36:21.171566 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79952\nI1212 18:36:21.171900 20613 solver.cpp:404]     Test net output #1: loss = 0.995917 (* 1 = 0.995917 loss)\nI1212 18:36:22.486805 20613 solver.cpp:228] Iteration 20000, loss = 0.140054\nI1212 18:36:22.486851 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 18:36:22.486874 20613 solver.cpp:244]     Train net output #1: loss = 0.140053 (* 1 = 0.140053 loss)\nI1212 18:36:22.579478 20613 sgd_solver.cpp:174] Iteration 20000, lr = 0.6\nI1212 18:36:22.593291 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.457851\nI1212 18:38:41.120504 20613 solver.cpp:337] Iteration 20100, Testing net (#0)\nI1212 18:40:02.983192 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80416\nI1212 18:40:02.983527 20613 solver.cpp:404]     Test net output #1: loss = 0.857424 (* 1 = 0.857424 loss)\nI1212 18:40:04.297956 20613 solver.cpp:228] Iteration 20100, loss = 0.0702737\nI1212 18:40:04.298002 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 18:40:04.298027 20613 solver.cpp:244]     Train net output #1: loss = 0.0702736 (* 1 = 0.0702736 loss)\nI1212 18:40:04.386384 20613 sgd_solver.cpp:174] Iteration 20100, lr = 0.603\nI1212 18:40:04.400239 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.422713\nI1212 18:42:22.797680 20613 solver.cpp:337] Iteration 20200, Testing net (#0)\nI1212 18:43:44.666970 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81508\nI1212 18:43:44.667331 20613 solver.cpp:404]     Test net output #1: loss = 0.746311 (* 1 = 0.746311 loss)\nI1212 18:43:45.982693 20613 solver.cpp:228] Iteration 20200, loss = 0.132657\nI1212 18:43:45.982753 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 18:43:45.982779 20613 solver.cpp:244]     Train net output #1: loss = 0.132657 (* 1 = 0.132657 loss)\nI1212 18:43:46.076793 20613 sgd_solver.cpp:174] Iteration 20200, lr = 0.606\nI1212 18:43:46.090751 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.473598\nI1212 18:46:04.397882 20613 solver.cpp:337] Iteration 20300, Testing net (#0)\nI1212 18:47:26.221078 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80164\nI1212 18:47:26.221415 20613 solver.cpp:404]     Test net output #1: loss = 0.895751 (* 1 = 0.895751 loss)\nI1212 18:47:27.535665 20613 solver.cpp:228] Iteration 20300, loss = 0.0953621\nI1212 18:47:27.535720 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 18:47:27.535748 20613 solver.cpp:244]     Train net output #1: loss = 0.0953621 (* 1 = 0.0953621 loss)\nI1212 18:47:27.620093 20613 sgd_solver.cpp:174] Iteration 20300, lr = 0.609\nI1212 18:47:27.633960 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.548776\nI1212 18:49:45.921723 20613 solver.cpp:337] Iteration 20400, Testing net (#0)\nI1212 18:51:07.788321 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81536\nI1212 18:51:07.788645 20613 solver.cpp:404]     Test net output #1: loss = 0.801555 (* 1 = 0.801555 loss)\nI1212 18:51:09.102071 20613 solver.cpp:228] Iteration 20400, loss = 0.0461271\nI1212 18:51:09.102126 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 18:51:09.102154 20613 solver.cpp:244]     Train net output #1: loss = 0.046127 (* 1 = 0.046127 loss)\nI1212 18:51:09.187734 20613 sgd_solver.cpp:174] Iteration 20400, lr = 0.612\nI1212 18:51:09.201699 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.429584\nI1212 18:53:27.495499 20613 solver.cpp:337] Iteration 20500, Testing net (#0)\nI1212 18:54:49.332700 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8084\nI1212 18:54:49.333066 20613 solver.cpp:404]     Test net output #1: loss = 0.79958 (* 1 = 0.79958 loss)\nI1212 18:54:50.646651 20613 solver.cpp:228] Iteration 20500, loss = 0.110183\nI1212 18:54:50.646694 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 18:54:50.646716 20613 solver.cpp:244]     Train net output #1: loss = 0.110183 (* 1 = 0.110183 loss)\nI1212 18:54:50.734359 20613 sgd_solver.cpp:174] Iteration 20500, lr = 0.615\nI1212 18:54:50.748174 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.494294\nI1212 18:57:09.068869 20613 solver.cpp:337] Iteration 20600, Testing net (#0)\nI1212 18:58:30.911965 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82904\nI1212 18:58:30.912299 20613 solver.cpp:404]     Test net output #1: loss = 0.675583 (* 1 = 0.675583 loss)\nI1212 18:58:32.226217 20613 solver.cpp:228] Iteration 20600, loss = 0.0436095\nI1212 18:58:32.226279 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 18:58:32.226305 20613 solver.cpp:244]     Train net output #1: loss = 0.0436094 (* 1 = 0.0436094 loss)\nI1212 18:58:32.314272 20613 sgd_solver.cpp:174] Iteration 20600, lr = 0.618\nI1212 18:58:32.327582 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.361849\nI1212 19:00:50.599088 20613 solver.cpp:337] Iteration 20700, Testing net (#0)\nI1212 19:02:12.477046 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74664\nI1212 19:02:12.477383 20613 solver.cpp:404]     Test net output #1: loss = 1.22465 (* 1 = 1.22465 loss)\nI1212 19:02:13.790252 20613 solver.cpp:228] Iteration 20700, loss = 0.0552966\nI1212 19:02:13.790313 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 19:02:13.790339 20613 solver.cpp:244]     Train net output #1: loss = 0.0552965 (* 1 = 0.0552965 loss)\nI1212 19:02:13.882642 20613 sgd_solver.cpp:174] Iteration 20700, lr = 0.621\nI1212 19:02:13.896436 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.445899\nI1212 19:04:32.173084 20613 solver.cpp:337] Iteration 20800, Testing net (#0)\nI1212 19:05:53.999907 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7888\nI1212 19:05:54.000241 20613 solver.cpp:404]     Test net output #1: loss = 0.912812 (* 1 = 0.912812 loss)\nI1212 19:05:55.314021 20613 solver.cpp:228] Iteration 20800, loss = 0.109104\nI1212 19:05:55.314085 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 19:05:55.314111 20613 solver.cpp:244]     Train net output #1: loss = 0.109104 (* 1 = 0.109104 loss)\nI1212 19:05:55.406656 20613 sgd_solver.cpp:174] Iteration 20800, lr = 0.624\nI1212 19:05:55.420568 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.439203\nI1212 19:08:13.785640 20613 solver.cpp:337] Iteration 20900, Testing net (#0)\nI1212 19:09:35.579403 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74036\nI1212 19:09:35.579758 20613 solver.cpp:404]     Test net output #1: loss = 1.23743 (* 1 = 1.23743 loss)\nI1212 19:09:36.893455 20613 solver.cpp:228] Iteration 20900, loss = 0.127573\nI1212 19:09:36.893519 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 19:09:36.893544 20613 solver.cpp:244]     Train net output #1: loss = 0.127572 (* 1 = 0.127572 loss)\nI1212 19:09:36.982861 20613 sgd_solver.cpp:174] Iteration 20900, lr = 0.627\nI1212 19:09:36.996671 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.449373\nI1212 19:11:55.314949 20613 solver.cpp:337] Iteration 21000, Testing net (#0)\nI1212 19:13:17.142256 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8084\nI1212 19:13:17.142601 20613 solver.cpp:404]     Test net output #1: loss = 0.837605 (* 1 = 0.837605 loss)\nI1212 19:13:18.456245 20613 solver.cpp:228] Iteration 21000, loss = 0.0940473\nI1212 19:13:18.456307 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 19:13:18.456333 20613 solver.cpp:244]     Train net output #1: loss = 0.0940472 (* 1 = 0.0940472 loss)\nI1212 19:13:18.545197 20613 sgd_solver.cpp:174] Iteration 21000, lr = 0.63\nI1212 19:13:18.559072 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.489378\nI1212 19:15:36.803346 20613 solver.cpp:337] Iteration 21100, Testing net (#0)\nI1212 19:16:58.683310 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8214\nI1212 19:16:58.683645 20613 solver.cpp:404]     Test net output #1: loss = 0.703746 (* 1 = 0.703746 loss)\nI1212 19:16:59.997596 20613 solver.cpp:228] Iteration 21100, loss = 0.0811126\nI1212 19:16:59.997650 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 19:16:59.997675 20613 solver.cpp:244]     Train net output #1: loss = 0.0811125 (* 1 = 0.0811125 loss)\nI1212 19:17:00.089236 20613 sgd_solver.cpp:174] Iteration 21100, lr = 0.633\nI1212 19:17:00.103096 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.374126\nI1212 19:19:18.364778 20613 solver.cpp:337] Iteration 21200, Testing net (#0)\nI1212 19:20:40.217314 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82016\nI1212 19:20:40.217634 20613 solver.cpp:404]     Test net output #1: loss = 0.720105 (* 1 = 0.720105 loss)\nI1212 19:20:41.531620 20613 solver.cpp:228] Iteration 21200, loss = 0.0764744\nI1212 19:20:41.531682 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 19:20:41.531708 20613 solver.cpp:244]     Train net output #1: loss = 0.0764744 (* 1 = 0.0764744 loss)\nI1212 19:20:41.622808 20613 sgd_solver.cpp:174] Iteration 21200, lr = 0.636\nI1212 19:20:41.636693 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.441987\nI1212 19:22:59.914595 20613 solver.cpp:337] Iteration 21300, Testing net (#0)\nI1212 19:24:21.824518 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82216\nI1212 19:24:21.824869 20613 solver.cpp:404]     Test net output #1: loss = 0.729888 (* 1 = 0.729888 loss)\nI1212 19:24:23.138833 20613 solver.cpp:228] Iteration 21300, loss = 0.0816714\nI1212 19:24:23.138895 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 19:24:23.138921 20613 solver.cpp:244]     Train net output #1: loss = 0.0816714 (* 1 = 0.0816714 loss)\nI1212 19:24:23.229003 20613 sgd_solver.cpp:174] Iteration 21300, lr = 0.639\nI1212 19:24:23.242784 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.469716\nI1212 19:26:40.761324 20613 solver.cpp:337] Iteration 21400, Testing net (#0)\nI1212 19:28:02.690619 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84576\nI1212 19:28:02.690971 20613 solver.cpp:404]     Test net output #1: loss = 0.630465 (* 1 = 0.630465 loss)\nI1212 19:28:04.004953 20613 solver.cpp:228] Iteration 21400, loss = 0.108007\nI1212 19:28:04.005015 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 19:28:04.005043 20613 solver.cpp:244]     Train net output #1: loss = 0.108007 (* 1 = 0.108007 loss)\nI1212 19:28:04.096638 20613 sgd_solver.cpp:174] Iteration 21400, lr = 0.642\nI1212 19:28:04.110456 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.421304\nI1212 19:30:22.414086 20613 solver.cpp:337] Iteration 21500, Testing net (#0)\nI1212 19:31:44.059379 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82696\nI1212 19:31:44.059731 20613 solver.cpp:404]     Test net output #1: loss = 0.719364 (* 1 = 0.719364 loss)\nI1212 19:31:45.373683 20613 solver.cpp:228] Iteration 21500, loss = 0.0792849\nI1212 19:31:45.373730 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 19:31:45.373756 20613 solver.cpp:244]     Train net output #1: loss = 0.0792848 (* 1 = 0.0792848 loss)\nI1212 19:31:45.461704 20613 sgd_solver.cpp:174] Iteration 21500, lr = 0.645\nI1212 19:31:45.475468 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.414047\nI1212 19:34:03.829139 20613 solver.cpp:337] Iteration 21600, Testing net (#0)\nI1212 19:35:25.667915 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81128\nI1212 19:35:25.668259 20613 solver.cpp:404]     Test net output #1: loss = 0.721812 (* 1 = 0.721812 loss)\nI1212 19:35:26.981854 20613 solver.cpp:228] Iteration 21600, loss = 0.0710283\nI1212 19:35:26.981906 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 19:35:26.981933 20613 solver.cpp:244]     Train net output #1: loss = 0.0710282 (* 1 = 0.0710282 loss)\nI1212 19:35:27.071681 20613 sgd_solver.cpp:174] Iteration 21600, lr = 0.648\nI1212 19:35:27.085539 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.405643\nI1212 19:37:45.411175 20613 solver.cpp:337] Iteration 21700, Testing net (#0)\nI1212 19:39:07.221154 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82308\nI1212 19:39:07.221511 20613 solver.cpp:404]     Test net output #1: loss = 0.716301 (* 1 = 0.716301 loss)\nI1212 19:39:08.535873 20613 solver.cpp:228] Iteration 21700, loss = 0.106434\nI1212 19:39:08.535929 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 19:39:08.535954 20613 solver.cpp:244]     Train net output #1: loss = 0.106434 (* 1 = 0.106434 loss)\nI1212 19:39:08.624238 20613 sgd_solver.cpp:174] Iteration 21700, lr = 0.651\nI1212 19:39:08.638103 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.427049\nI1212 19:41:27.031229 20613 solver.cpp:337] Iteration 21800, Testing net (#0)\nI1212 19:42:48.813338 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79972\nI1212 19:42:48.813659 20613 solver.cpp:404]     Test net output #1: loss = 0.87859 (* 1 = 0.87859 loss)\nI1212 19:42:50.127362 20613 solver.cpp:228] Iteration 21800, loss = 0.126878\nI1212 19:42:50.127419 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 19:42:50.127444 20613 solver.cpp:244]     Train net output #1: loss = 0.126878 (* 1 = 0.126878 loss)\nI1212 19:42:50.216117 20613 sgd_solver.cpp:174] Iteration 21800, lr = 0.654\nI1212 19:42:50.230039 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.423969\nI1212 19:45:08.596369 20613 solver.cpp:337] Iteration 21900, Testing net (#0)\nI1212 19:46:30.434590 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79336\nI1212 19:46:30.434942 20613 solver.cpp:404]     Test net output #1: loss = 0.872004 (* 1 = 0.872004 loss)\nI1212 19:46:31.747251 20613 solver.cpp:228] Iteration 21900, loss = 0.057668\nI1212 19:46:31.747308 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 19:46:31.747334 20613 solver.cpp:244]     Train net output #1: loss = 0.0576679 (* 1 = 0.0576679 loss)\nI1212 19:46:31.838927 20613 sgd_solver.cpp:174] Iteration 21900, lr = 0.657\nI1212 19:46:31.852787 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.500621\nI1212 19:48:50.218472 20613 solver.cpp:337] Iteration 22000, Testing net (#0)\nI1212 19:50:11.994292 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80216\nI1212 19:50:11.994590 20613 solver.cpp:404]     Test net output #1: loss = 0.886842 (* 1 = 0.886842 loss)\nI1212 19:50:13.308413 20613 solver.cpp:228] Iteration 22000, loss = 0.052812\nI1212 19:50:13.308470 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 19:50:13.308495 20613 solver.cpp:244]     Train net output #1: loss = 0.0528119 (* 1 = 0.0528119 loss)\nI1212 19:50:13.398156 20613 sgd_solver.cpp:174] Iteration 22000, lr = 0.66\nI1212 19:50:13.411936 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.389965\nI1212 19:52:30.861007 20613 solver.cpp:337] Iteration 22100, Testing net (#0)\nI1212 19:53:52.706327 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81104\nI1212 19:53:52.706699 20613 solver.cpp:404]     Test net output #1: loss = 0.86732 (* 1 = 0.86732 loss)\nI1212 19:53:54.019688 20613 solver.cpp:228] Iteration 22100, loss = 0.065205\nI1212 19:53:54.019743 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 19:53:54.019769 20613 solver.cpp:244]     Train net output #1: loss = 0.0652049 (* 1 = 0.0652049 loss)\nI1212 19:53:54.106887 20613 sgd_solver.cpp:174] Iteration 22100, lr = 0.663\nI1212 19:53:54.120441 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347353\nI1212 19:56:12.414336 20613 solver.cpp:337] Iteration 22200, Testing net (#0)\nI1212 19:57:34.206782 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80284\nI1212 19:57:34.207104 20613 solver.cpp:404]     Test net output #1: loss = 0.817477 (* 1 = 0.817477 loss)\nI1212 19:57:35.520148 20613 solver.cpp:228] Iteration 22200, loss = 0.125639\nI1212 19:57:35.520201 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 19:57:35.520227 20613 solver.cpp:244]     Train net output #1: loss = 0.125639 (* 1 = 0.125639 loss)\nI1212 19:57:35.610687 20613 sgd_solver.cpp:174] Iteration 22200, lr = 0.666\nI1212 19:57:35.624598 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.504645\nI1212 19:59:53.895176 20613 solver.cpp:337] Iteration 22300, Testing net (#0)\nI1212 20:01:15.739820 20613 solver.cpp:404]     Test net output #0: accuracy = 0.814\nI1212 20:01:15.740166 20613 solver.cpp:404]     Test net output #1: loss = 0.873783 (* 1 = 0.873783 loss)\nI1212 20:01:17.053561 20613 solver.cpp:228] Iteration 22300, loss = 0.100561\nI1212 20:01:17.053617 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 20:01:17.053643 20613 solver.cpp:244]     Train net output #1: loss = 0.100561 (* 1 = 0.100561 loss)\nI1212 20:01:17.139829 20613 sgd_solver.cpp:174] Iteration 22300, lr = 0.669\nI1212 20:01:17.153658 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.437147\nI1212 20:03:35.386770 20613 solver.cpp:337] Iteration 22400, Testing net (#0)\nI1212 20:04:57.249591 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78292\nI1212 20:04:57.249927 20613 solver.cpp:404]     Test net output #1: loss = 1.05376 (* 1 = 1.05376 loss)\nI1212 20:04:58.563457 20613 solver.cpp:228] Iteration 22400, loss = 0.0732823\nI1212 20:04:58.563513 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 20:04:58.563539 20613 solver.cpp:244]     Train net output #1: loss = 0.0732822 (* 1 = 0.0732822 loss)\nI1212 20:04:58.653957 20613 sgd_solver.cpp:174] Iteration 22400, lr = 0.672\nI1212 20:04:58.667749 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.458666\nI1212 20:07:17.025885 20613 solver.cpp:337] Iteration 22500, Testing net (#0)\nI1212 20:08:38.837534 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82168\nI1212 20:08:38.837868 20613 solver.cpp:404]     Test net output #1: loss = 0.759365 (* 1 = 0.759365 loss)\nI1212 20:08:40.151535 20613 solver.cpp:228] Iteration 22500, loss = 0.0622108\nI1212 20:08:40.151587 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 20:08:40.151612 20613 solver.cpp:244]     Train net output #1: loss = 0.0622107 (* 1 = 0.0622107 loss)\nI1212 20:08:40.242369 20613 sgd_solver.cpp:174] Iteration 22500, lr = 0.675\nI1212 20:08:40.256363 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.412325\nI1212 20:10:58.567487 20613 solver.cpp:337] Iteration 22600, Testing net (#0)\nI1212 20:12:20.456627 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84536\nI1212 20:12:20.456946 20613 solver.cpp:404]     Test net output #1: loss = 0.608042 (* 1 = 0.608042 loss)\nI1212 20:12:21.770531 20613 solver.cpp:228] Iteration 22600, loss = 0.0409903\nI1212 20:12:21.770577 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 20:12:21.770602 20613 solver.cpp:244]     Train net output #1: loss = 0.0409903 (* 1 = 0.0409903 loss)\nI1212 20:12:21.859520 20613 sgd_solver.cpp:174] Iteration 22600, lr = 0.678\nI1212 20:12:21.873363 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.435103\nI1212 20:14:40.198704 20613 solver.cpp:337] Iteration 22700, Testing net (#0)\nI1212 20:16:02.040807 20613 solver.cpp:404]     Test net output #0: accuracy = 0.847\nI1212 20:16:02.041152 20613 solver.cpp:404]     Test net output #1: loss = 0.546834 (* 1 = 0.546834 loss)\nI1212 20:16:03.354328 20613 solver.cpp:228] Iteration 22700, loss = 0.140769\nI1212 20:16:03.354380 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 20:16:03.354405 20613 solver.cpp:244]     Train net output #1: loss = 0.140768 (* 1 = 0.140768 loss)\nI1212 20:16:03.443768 20613 sgd_solver.cpp:174] Iteration 22700, lr = 0.681\nI1212 20:16:03.457682 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.327082\nI1212 20:18:21.788267 20613 solver.cpp:337] Iteration 22800, Testing net (#0)\nI1212 20:19:43.617137 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81732\nI1212 20:19:43.617472 20613 solver.cpp:404]     Test net output #1: loss = 0.781907 (* 1 = 0.781907 loss)\nI1212 20:19:44.930704 20613 solver.cpp:228] Iteration 22800, loss = 0.0868467\nI1212 20:19:44.930758 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 20:19:44.930784 20613 solver.cpp:244]     Train net output #1: loss = 0.0868467 (* 1 = 0.0868467 loss)\nI1212 20:19:45.019394 20613 sgd_solver.cpp:174] Iteration 22800, lr = 0.684\nI1212 20:19:45.033231 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.389144\nI1212 20:22:03.399376 20613 solver.cpp:337] Iteration 22900, Testing net (#0)\nI1212 20:23:25.157924 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69776\nI1212 20:23:25.158288 20613 solver.cpp:404]     Test net output #1: loss = 1.67548 (* 1 = 1.67548 loss)\nI1212 20:23:26.472139 20613 solver.cpp:228] Iteration 22900, loss = 0.0755339\nI1212 20:23:26.472196 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 20:23:26.472221 20613 solver.cpp:244]     Train net output #1: loss = 0.0755339 (* 1 = 0.0755339 loss)\nI1212 20:23:26.563685 20613 sgd_solver.cpp:174] Iteration 22900, lr = 0.687\nI1212 20:23:26.577177 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.469217\nI1212 20:25:44.880389 20613 solver.cpp:337] Iteration 23000, Testing net (#0)\nI1212 20:27:06.713731 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82776\nI1212 20:27:06.714083 20613 solver.cpp:404]     Test net output #1: loss = 0.683329 (* 1 = 0.683329 loss)\nI1212 20:27:08.026548 20613 solver.cpp:228] Iteration 23000, loss = 0.122\nI1212 20:27:08.026607 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 20:27:08.026633 20613 solver.cpp:244]     Train net output #1: loss = 0.122 (* 1 = 0.122 loss)\nI1212 20:27:08.118376 20613 sgd_solver.cpp:174] Iteration 23000, lr = 0.69\nI1212 20:27:08.132395 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.478408\nI1212 20:29:26.467289 20613 solver.cpp:337] Iteration 23100, Testing net (#0)\nI1212 20:30:48.126575 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81808\nI1212 20:30:48.126888 20613 solver.cpp:404]     Test net output #1: loss = 0.717443 (* 1 = 0.717443 loss)\nI1212 20:30:49.439710 20613 solver.cpp:228] Iteration 23100, loss = 0.0713622\nI1212 20:30:49.439771 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 20:30:49.439798 20613 solver.cpp:244]     Train net output #1: loss = 0.0713622 (* 1 = 0.0713622 loss)\nI1212 20:30:49.526736 20613 sgd_solver.cpp:174] Iteration 23100, lr = 0.693\nI1212 20:30:49.540664 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.356926\nI1212 20:33:07.864825 20613 solver.cpp:337] Iteration 23200, Testing net (#0)\nI1212 20:34:29.604291 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79548\nI1212 20:34:29.604584 20613 solver.cpp:404]     Test net output #1: loss = 0.921826 (* 1 = 0.921826 loss)\nI1212 20:34:30.918328 20613 solver.cpp:228] Iteration 23200, loss = 0.0703923\nI1212 20:34:30.918385 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 20:34:30.918411 20613 solver.cpp:244]     Train net output #1: loss = 0.0703922 (* 1 = 0.0703922 loss)\nI1212 20:34:31.008158 20613 sgd_solver.cpp:174] Iteration 23200, lr = 0.696\nI1212 20:34:31.022079 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.48107\nI1212 20:36:49.377643 20613 solver.cpp:337] Iteration 23300, Testing net (#0)\nI1212 20:38:11.078366 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80812\nI1212 20:38:11.078706 20613 solver.cpp:404]     Test net output #1: loss = 0.841173 (* 1 = 0.841173 loss)\nI1212 20:38:12.391502 20613 solver.cpp:228] Iteration 23300, loss = 0.0607137\nI1212 20:38:12.391563 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 20:38:12.391588 20613 solver.cpp:244]     Train net output #1: loss = 0.0607136 (* 1 = 0.0607136 loss)\nI1212 20:38:12.483805 20613 sgd_solver.cpp:174] Iteration 23300, lr = 0.699\nI1212 20:38:12.497539 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347653\nI1212 20:40:30.842241 20613 solver.cpp:337] Iteration 23400, Testing net (#0)\nI1212 20:41:52.541591 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82396\nI1212 20:41:52.541908 20613 solver.cpp:404]     Test net output #1: loss = 0.749998 (* 1 = 0.749998 loss)\nI1212 20:41:53.855855 20613 solver.cpp:228] Iteration 23400, loss = 0.0907321\nI1212 20:41:53.855917 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 20:41:53.855942 20613 solver.cpp:244]     Train net output #1: loss = 0.090732 (* 1 = 0.090732 loss)\nI1212 20:41:53.943276 20613 sgd_solver.cpp:174] Iteration 23400, lr = 0.702\nI1212 20:41:53.957257 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.474673\nI1212 20:44:12.197854 20613 solver.cpp:337] Iteration 23500, Testing net (#0)\nI1212 20:45:33.807574 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79288\nI1212 20:45:33.807914 20613 solver.cpp:404]     Test net output #1: loss = 0.892431 (* 1 = 0.892431 loss)\nI1212 20:45:35.121685 20613 solver.cpp:228] Iteration 23500, loss = 0.0521162\nI1212 20:45:35.121742 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 20:45:35.121767 20613 solver.cpp:244]     Train net output #1: loss = 0.0521161 (* 1 = 0.0521161 loss)\nI1212 20:45:35.209514 20613 sgd_solver.cpp:174] Iteration 23500, lr = 0.705\nI1212 20:45:35.223635 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.356198\nI1212 20:47:53.632937 20613 solver.cpp:337] Iteration 23600, Testing net (#0)\nI1212 20:49:15.146404 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82176\nI1212 20:49:15.146699 20613 solver.cpp:404]     Test net output #1: loss = 0.820487 (* 1 = 0.820487 loss)\nI1212 20:49:16.460455 20613 solver.cpp:228] Iteration 23600, loss = 0.0914894\nI1212 20:49:16.460512 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 20:49:16.460537 20613 solver.cpp:244]     Train net output #1: loss = 0.0914893 (* 1 = 0.0914893 loss)\nI1212 20:49:16.553705 20613 sgd_solver.cpp:174] Iteration 23600, lr = 0.708\nI1212 20:49:16.567608 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.456199\nI1212 20:51:34.950443 20613 solver.cpp:337] Iteration 23700, Testing net (#0)\nI1212 20:52:56.557896 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78968\nI1212 20:52:56.558251 20613 solver.cpp:404]     Test net output #1: loss = 0.94528 (* 1 = 0.94528 loss)\nI1212 20:52:57.871203 20613 solver.cpp:228] Iteration 23700, loss = 0.0821846\nI1212 20:52:57.871259 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 20:52:57.871285 20613 solver.cpp:244]     Train net output #1: loss = 0.0821845 (* 1 = 0.0821845 loss)\nI1212 20:52:57.964694 20613 sgd_solver.cpp:174] Iteration 23700, lr = 0.711\nI1212 20:52:57.978528 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.444933\nI1212 20:55:16.369503 20613 solver.cpp:337] Iteration 23800, Testing net (#0)\nI1212 20:56:38.070964 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8232\nI1212 20:56:38.071262 20613 solver.cpp:404]     Test net output #1: loss = 0.748714 (* 1 = 0.748714 loss)\nI1212 20:56:39.385253 20613 solver.cpp:228] Iteration 23800, loss = 0.0957014\nI1212 20:56:39.385308 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 20:56:39.385332 20613 solver.cpp:244]     Train net output #1: loss = 0.0957012 (* 1 = 0.0957012 loss)\nI1212 20:56:39.474880 20613 sgd_solver.cpp:174] Iteration 23800, lr = 0.714\nI1212 20:56:39.488786 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.380135\nI1212 20:58:57.836428 20613 solver.cpp:337] Iteration 23900, Testing net (#0)\nI1212 21:00:19.446977 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80676\nI1212 21:00:19.447293 20613 solver.cpp:404]     Test net output #1: loss = 0.794057 (* 1 = 0.794057 loss)\nI1212 21:00:20.759869 20613 solver.cpp:228] Iteration 23900, loss = 0.148717\nI1212 21:00:20.759928 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 21:00:20.759954 20613 solver.cpp:244]     Train net output #1: loss = 0.148717 (* 1 = 0.148717 loss)\nI1212 21:00:20.850673 20613 sgd_solver.cpp:174] Iteration 23900, lr = 0.717\nI1212 21:00:20.864612 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.480053\nI1212 21:02:38.435190 20613 solver.cpp:337] Iteration 24000, Testing net (#0)\nI1212 21:03:59.851887 20613 solver.cpp:404]     Test net output #0: accuracy = 0.829\nI1212 21:03:59.852306 20613 solver.cpp:404]     Test net output #1: loss = 0.757646 (* 1 = 0.757646 loss)\nI1212 21:04:01.165334 20613 solver.cpp:228] Iteration 24000, loss = 0.0772512\nI1212 21:04:01.165395 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 21:04:01.165421 20613 solver.cpp:244]     Train net output #1: loss = 0.0772511 (* 1 = 0.0772511 loss)\nI1212 21:04:01.258848 20613 sgd_solver.cpp:174] Iteration 24000, lr = 0.72\nI1212 21:04:01.272783 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.389488\nI1212 21:06:19.648836 20613 solver.cpp:337] Iteration 24100, Testing net (#0)\nI1212 21:07:41.112922 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76792\nI1212 21:07:41.113222 20613 solver.cpp:404]     Test net output #1: loss = 1.14001 (* 1 = 1.14001 loss)\nI1212 21:07:42.426617 20613 solver.cpp:228] Iteration 24100, loss = 0.0675209\nI1212 21:07:42.426679 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 21:07:42.426703 20613 solver.cpp:244]     Train net output #1: loss = 0.0675208 (* 1 = 0.0675208 loss)\nI1212 21:07:42.518409 20613 sgd_solver.cpp:174] Iteration 24100, lr = 0.723\nI1212 21:07:42.532222 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.383221\nI1212 21:10:00.862654 20613 solver.cpp:337] Iteration 24200, Testing net (#0)\nI1212 21:11:22.330276 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8256\nI1212 21:11:22.330612 20613 solver.cpp:404]     Test net output #1: loss = 0.706169 (* 1 = 0.706169 loss)\nI1212 21:11:23.644605 20613 solver.cpp:228] Iteration 24200, loss = 0.0344311\nI1212 21:11:23.644665 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 21:11:23.644691 20613 solver.cpp:244]     Train net output #1: loss = 0.034431 (* 1 = 0.034431 loss)\nI1212 21:11:23.731117 20613 sgd_solver.cpp:174] Iteration 24200, lr = 0.726\nI1212 21:11:23.745031 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.399188\nI1212 21:13:42.145759 20613 solver.cpp:337] Iteration 24300, Testing net (#0)\nI1212 21:15:03.740823 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84172\nI1212 21:15:03.741163 20613 solver.cpp:404]     Test net output #1: loss = 0.715659 (* 1 = 0.715659 loss)\nI1212 21:15:05.053418 20613 solver.cpp:228] Iteration 24300, loss = 0.0608982\nI1212 21:15:05.053478 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 21:15:05.053503 20613 solver.cpp:244]     Train net output #1: loss = 0.0608981 (* 1 = 0.0608981 loss)\nI1212 21:15:05.139531 20613 sgd_solver.cpp:174] Iteration 24300, lr = 0.729\nI1212 21:15:05.153430 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.452949\nI1212 21:17:23.447886 20613 solver.cpp:337] Iteration 24400, Testing net (#0)\nI1212 21:18:44.800721 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7856\nI1212 21:18:44.801051 20613 solver.cpp:404]     Test net output #1: loss = 0.920401 (* 1 = 0.920401 loss)\nI1212 21:18:46.114647 20613 solver.cpp:228] Iteration 24400, loss = 0.0378291\nI1212 21:18:46.114711 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 21:18:46.114738 20613 solver.cpp:244]     Train net output #1: loss = 0.037829 (* 1 = 0.037829 loss)\nI1212 21:18:46.202024 20613 sgd_solver.cpp:174] Iteration 24400, lr = 0.732\nI1212 21:18:46.216011 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.457096\nI1212 21:21:04.504400 20613 solver.cpp:337] Iteration 24500, Testing net (#0)\nI1212 21:22:26.052765 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78156\nI1212 21:22:26.053102 20613 solver.cpp:404]     Test net output #1: loss = 1.08521 (* 1 = 1.08521 loss)\nI1212 21:22:27.366781 20613 solver.cpp:228] Iteration 24500, loss = 0.122666\nI1212 21:22:27.366837 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 21:22:27.366855 20613 solver.cpp:244]     Train net output #1: loss = 0.122666 (* 1 = 0.122666 loss)\nI1212 21:22:27.454629 20613 sgd_solver.cpp:174] Iteration 24500, lr = 0.735\nI1212 21:22:27.468487 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.469676\nI1212 21:24:45.769122 20613 solver.cpp:337] Iteration 24600, Testing net (#0)\nI1212 21:26:07.530542 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81764\nI1212 21:26:07.530858 20613 solver.cpp:404]     Test net output #1: loss = 0.799055 (* 1 = 0.799055 loss)\nI1212 21:26:08.842819 20613 solver.cpp:228] Iteration 24600, loss = 0.065764\nI1212 21:26:08.842880 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 21:26:08.842897 20613 solver.cpp:244]     Train net output #1: loss = 0.0657639 (* 1 = 0.0657639 loss)\nI1212 21:26:08.928901 20613 sgd_solver.cpp:174] Iteration 24600, lr = 0.738\nI1212 21:26:08.942740 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.324409\nI1212 21:28:26.345449 20613 solver.cpp:337] Iteration 24700, Testing net (#0)\nI1212 21:29:48.111637 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79288\nI1212 21:29:48.111975 20613 solver.cpp:404]     Test net output #1: loss = 0.92349 (* 1 = 0.92349 loss)\nI1212 21:29:49.425268 20613 solver.cpp:228] Iteration 24700, loss = 0.163994\nI1212 21:29:49.425328 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 21:29:49.425345 20613 solver.cpp:244]     Train net output #1: loss = 0.163993 (* 1 = 0.163993 loss)\nI1212 21:29:49.511565 20613 sgd_solver.cpp:174] Iteration 24700, lr = 0.741\nI1212 21:29:49.525463 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.405521\nI1212 21:32:07.764670 20613 solver.cpp:337] Iteration 24800, Testing net (#0)\nI1212 21:33:29.532088 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7868\nI1212 21:33:29.532407 20613 solver.cpp:404]     Test net output #1: loss = 0.957705 (* 1 = 0.957705 loss)\nI1212 21:33:30.846261 20613 solver.cpp:228] Iteration 24800, loss = 0.138386\nI1212 21:33:30.846319 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 21:33:30.846338 20613 solver.cpp:244]     Train net output #1: loss = 0.138386 (* 1 = 0.138386 loss)\nI1212 21:33:30.932452 20613 sgd_solver.cpp:174] Iteration 24800, lr = 0.744\nI1212 21:33:30.946336 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.512939\nI1212 21:35:49.175827 20613 solver.cpp:337] Iteration 24900, Testing net (#0)\nI1212 21:37:10.936316 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7922\nI1212 21:37:10.936630 20613 solver.cpp:404]     Test net output #1: loss = 0.932212 (* 1 = 0.932212 loss)\nI1212 21:37:12.250095 20613 solver.cpp:228] Iteration 24900, loss = 0.0868315\nI1212 21:37:12.250145 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 21:37:12.250164 20613 solver.cpp:244]     Train net output #1: loss = 0.0868314 (* 1 = 0.0868314 loss)\nI1212 21:37:12.337602 20613 sgd_solver.cpp:174] Iteration 24900, lr = 0.747\nI1212 21:37:12.351004 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.440882\nI1212 21:39:30.603010 20613 solver.cpp:337] Iteration 25000, Testing net (#0)\nI1212 21:40:52.349944 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80008\nI1212 21:40:52.350280 20613 solver.cpp:404]     Test net output #1: loss = 0.9 (* 1 = 0.9 loss)\nI1212 21:40:53.663563 20613 solver.cpp:228] Iteration 25000, loss = 0.106605\nI1212 21:40:53.663615 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 21:40:53.663633 20613 solver.cpp:244]     Train net output #1: loss = 0.106605 (* 1 = 0.106605 loss)\nI1212 21:40:53.751962 20613 sgd_solver.cpp:174] Iteration 25000, lr = 0.75\nI1212 21:40:53.765707 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.401902\nI1212 21:43:12.052950 20613 solver.cpp:337] Iteration 25100, Testing net (#0)\nI1212 21:44:33.807090 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81992\nI1212 21:44:33.807399 20613 solver.cpp:404]     Test net output #1: loss = 0.732078 (* 1 = 0.732078 loss)\nI1212 21:44:35.121028 20613 solver.cpp:228] Iteration 25100, loss = 0.0727806\nI1212 21:44:35.121076 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 21:44:35.121093 20613 solver.cpp:244]     Train net output #1: loss = 0.0727805 (* 1 = 0.0727805 loss)\nI1212 21:44:35.208822 20613 sgd_solver.cpp:174] Iteration 25100, lr = 0.753\nI1212 21:44:35.222584 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.351306\nI1212 21:46:53.561730 20613 solver.cpp:337] Iteration 25200, Testing net (#0)\nI1212 21:48:15.327535 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7792\nI1212 21:48:15.327862 20613 solver.cpp:404]     Test net output #1: loss = 0.901727 (* 1 = 0.901727 loss)\nI1212 21:48:16.641582 20613 solver.cpp:228] Iteration 25200, loss = 0.19748\nI1212 21:48:16.641626 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 21:48:16.641644 20613 solver.cpp:244]     Train net output #1: loss = 0.19748 (* 1 = 0.19748 loss)\nI1212 21:48:16.731621 20613 sgd_solver.cpp:174] Iteration 25200, lr = 0.756\nI1212 21:48:16.745299 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.55399\nI1212 21:50:34.981889 20613 solver.cpp:337] Iteration 25300, Testing net (#0)\nI1212 21:51:56.743073 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77568\nI1212 21:51:56.743398 20613 solver.cpp:404]     Test net output #1: loss = 1.04972 (* 1 = 1.04972 loss)\nI1212 21:51:58.055972 20613 solver.cpp:228] Iteration 25300, loss = 0.0710862\nI1212 21:51:58.056028 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 21:51:58.056046 20613 solver.cpp:244]     Train net output #1: loss = 0.0710861 (* 1 = 0.0710861 loss)\nI1212 21:51:58.148707 20613 sgd_solver.cpp:174] Iteration 25300, lr = 0.759\nI1212 21:51:58.162103 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.445525\nI1212 21:54:16.440241 20613 solver.cpp:337] Iteration 25400, Testing net (#0)\nI1212 21:55:38.230176 20613 solver.cpp:404]     Test net output #0: accuracy = 0.805\nI1212 21:55:38.230537 20613 solver.cpp:404]     Test net output #1: loss = 0.801147 (* 1 = 0.801147 loss)\nI1212 21:55:39.542712 20613 solver.cpp:228] Iteration 25400, loss = 0.164899\nI1212 21:55:39.542767 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 21:55:39.542791 20613 solver.cpp:244]     Train net output #1: loss = 0.164899 (* 1 = 0.164899 loss)\nI1212 21:55:39.631930 20613 sgd_solver.cpp:174] Iteration 25400, lr = 0.762\nI1212 21:55:39.645812 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.438261\nI1212 21:57:57.816213 20613 solver.cpp:337] Iteration 25500, Testing net (#0)\nI1212 21:59:19.594941 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74876\nI1212 21:59:19.595278 20613 solver.cpp:404]     Test net output #1: loss = 1.11839 (* 1 = 1.11839 loss)\nI1212 21:59:20.908673 20613 solver.cpp:228] Iteration 25500, loss = 0.209132\nI1212 21:59:20.908725 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 21:59:20.908742 20613 solver.cpp:244]     Train net output #1: loss = 0.209132 (* 1 = 0.209132 loss)\nI1212 21:59:21.003096 20613 sgd_solver.cpp:174] Iteration 25500, lr = 0.765\nI1212 21:59:21.016976 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.476173\nI1212 22:01:39.343272 20613 solver.cpp:337] Iteration 25600, Testing net (#0)\nI1212 22:03:01.118993 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8202\nI1212 22:03:01.119326 20613 solver.cpp:404]     Test net output #1: loss = 0.744552 (* 1 = 0.744552 loss)\nI1212 22:03:02.432579 20613 solver.cpp:228] Iteration 25600, loss = 0.0932698\nI1212 22:03:02.432632 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 22:03:02.432649 20613 solver.cpp:244]     Train net output #1: loss = 0.0932698 (* 1 = 0.0932698 loss)\nI1212 22:03:02.526314 20613 sgd_solver.cpp:174] Iteration 25600, lr = 0.768\nI1212 22:03:02.540191 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.414432\nI1212 22:05:20.774000 20613 solver.cpp:337] Iteration 25700, Testing net (#0)\nI1212 22:06:42.551553 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80888\nI1212 22:06:42.551908 20613 solver.cpp:404]     Test net output #1: loss = 0.815965 (* 1 = 0.815965 loss)\nI1212 22:06:43.865301 20613 solver.cpp:228] Iteration 25700, loss = 0.140273\nI1212 22:06:43.865363 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 22:06:43.865381 20613 solver.cpp:244]     Train net output #1: loss = 0.140273 (* 1 = 0.140273 loss)\nI1212 22:06:43.949466 20613 sgd_solver.cpp:174] Iteration 25700, lr = 0.771\nI1212 22:06:43.963294 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.454908\nI1212 22:09:02.291548 20613 solver.cpp:337] Iteration 25800, Testing net (#0)\nI1212 22:10:24.063457 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79608\nI1212 22:10:24.063805 20613 solver.cpp:404]     Test net output #1: loss = 0.853441 (* 1 = 0.853441 loss)\nI1212 22:10:25.376863 20613 solver.cpp:228] Iteration 25800, loss = 0.0527816\nI1212 22:10:25.376924 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 22:10:25.376941 20613 solver.cpp:244]     Train net output #1: loss = 0.0527815 (* 1 = 0.0527815 loss)\nI1212 22:10:25.469854 20613 sgd_solver.cpp:174] Iteration 25800, lr = 0.774\nI1212 22:10:25.483752 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.373566\nI1212 22:12:43.821329 20613 solver.cpp:337] Iteration 25900, Testing net (#0)\nI1212 22:14:05.597995 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7976\nI1212 22:14:05.598354 20613 solver.cpp:404]     Test net output #1: loss = 0.885139 (* 1 = 0.885139 loss)\nI1212 22:14:06.912000 20613 solver.cpp:228] Iteration 25900, loss = 0.0655589\nI1212 22:14:06.912060 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 22:14:06.912083 20613 solver.cpp:244]     Train net output #1: loss = 0.0655588 (* 1 = 0.0655588 loss)\nI1212 22:14:07.001307 20613 sgd_solver.cpp:174] Iteration 25900, lr = 0.777\nI1212 22:14:07.015126 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.398031\nI1212 22:16:25.329807 20613 solver.cpp:337] Iteration 26000, Testing net (#0)\nI1212 22:17:47.113361 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80664\nI1212 22:17:47.113700 20613 solver.cpp:404]     Test net output #1: loss = 0.917934 (* 1 = 0.917934 loss)\nI1212 22:17:48.427340 20613 solver.cpp:228] Iteration 26000, loss = 0.0648305\nI1212 22:17:48.427397 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 22:17:48.427414 20613 solver.cpp:244]     Train net output #1: loss = 0.0648304 (* 1 = 0.0648304 loss)\nI1212 22:17:48.516948 20613 sgd_solver.cpp:174] Iteration 26000, lr = 0.78\nI1212 22:17:48.530797 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.400575\nI1212 22:20:06.839529 20613 solver.cpp:337] Iteration 26100, Testing net (#0)\nI1212 22:21:28.611677 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82048\nI1212 22:21:28.612020 20613 solver.cpp:404]     Test net output #1: loss = 0.727172 (* 1 = 0.727172 loss)\nI1212 22:21:29.925021 20613 solver.cpp:228] Iteration 26100, loss = 0.0516298\nI1212 22:21:29.925083 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 22:21:29.925101 20613 solver.cpp:244]     Train net output #1: loss = 0.0516297 (* 1 = 0.0516297 loss)\nI1212 22:21:30.015116 20613 sgd_solver.cpp:174] Iteration 26100, lr = 0.783\nI1212 22:21:30.028901 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.399144\nI1212 22:23:48.350901 20613 solver.cpp:337] Iteration 26200, Testing net (#0)\nI1212 22:25:10.121758 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84812\nI1212 22:25:10.122112 20613 solver.cpp:404]     Test net output #1: loss = 0.597296 (* 1 = 0.597296 loss)\nI1212 22:25:11.434713 20613 solver.cpp:228] Iteration 26200, loss = 0.137638\nI1212 22:25:11.434762 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 22:25:11.434779 20613 solver.cpp:244]     Train net output #1: loss = 0.137638 (* 1 = 0.137638 loss)\nI1212 22:25:11.526643 20613 sgd_solver.cpp:174] Iteration 26200, lr = 0.786\nI1212 22:25:11.540554 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.371744\nI1212 22:27:29.957850 20613 solver.cpp:337] Iteration 26300, Testing net (#0)\nI1212 22:28:51.735364 20613 solver.cpp:404]     Test net output #0: accuracy = 0.86488\nI1212 22:28:51.735703 20613 solver.cpp:404]     Test net output #1: loss = 0.486497 (* 1 = 0.486497 loss)\nI1212 22:28:53.049101 20613 solver.cpp:228] Iteration 26300, loss = 0.0348102\nI1212 22:28:53.049149 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 22:28:53.049167 20613 solver.cpp:244]     Train net output #1: loss = 0.0348102 (* 1 = 0.0348102 loss)\nI1212 22:28:53.143617 20613 sgd_solver.cpp:174] Iteration 26300, lr = 0.789\nI1212 22:28:53.157287 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.378412\nI1212 22:31:11.515508 20613 solver.cpp:337] Iteration 26400, Testing net (#0)\nI1212 22:32:33.293154 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8118\nI1212 22:32:33.293496 20613 solver.cpp:404]     Test net output #1: loss = 0.82284 (* 1 = 0.82284 loss)\nI1212 22:32:34.605778 20613 solver.cpp:228] Iteration 26400, loss = 0.0532647\nI1212 22:32:34.605828 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 22:32:34.605844 20613 solver.cpp:244]     Train net output #1: loss = 0.0532647 (* 1 = 0.0532647 loss)\nI1212 22:32:34.695705 20613 sgd_solver.cpp:174] Iteration 26400, lr = 0.792\nI1212 22:32:34.709465 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.402638\nI1212 22:34:53.096127 20613 solver.cpp:337] Iteration 26500, Testing net (#0)\nI1212 22:36:14.869024 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79776\nI1212 22:36:14.869387 20613 solver.cpp:404]     Test net output #1: loss = 0.791315 (* 1 = 0.791315 loss)\nI1212 22:36:16.182518 20613 solver.cpp:228] Iteration 26500, loss = 0.0653583\nI1212 22:36:16.182572 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 22:36:16.182590 20613 solver.cpp:244]     Train net output #1: loss = 0.0653582 (* 1 = 0.0653582 loss)\nI1212 22:36:16.272235 20613 sgd_solver.cpp:174] Iteration 26500, lr = 0.795\nI1212 22:36:16.286042 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.364852\nI1212 22:38:34.594338 20613 solver.cpp:337] Iteration 26600, Testing net (#0)\nI1212 22:39:56.379245 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83624\nI1212 22:39:56.379580 20613 solver.cpp:404]     Test net output #1: loss = 0.650382 (* 1 = 0.650382 loss)\nI1212 22:39:57.693372 20613 solver.cpp:228] Iteration 26600, loss = 0.143662\nI1212 22:39:57.693424 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 22:39:57.693441 20613 solver.cpp:244]     Train net output #1: loss = 0.143662 (* 1 = 0.143662 loss)\nI1212 22:39:57.784525 20613 sgd_solver.cpp:174] Iteration 26600, lr = 0.798\nI1212 22:39:57.798382 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.375113\nI1212 22:42:16.179695 20613 solver.cpp:337] Iteration 26700, Testing net (#0)\nI1212 22:43:37.963088 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8186\nI1212 22:43:37.963443 20613 solver.cpp:404]     Test net output #1: loss = 0.731693 (* 1 = 0.731693 loss)\nI1212 22:43:39.277253 20613 solver.cpp:228] Iteration 26700, loss = 0.111377\nI1212 22:43:39.277308 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 22:43:39.277326 20613 solver.cpp:244]     Train net output #1: loss = 0.111377 (* 1 = 0.111377 loss)\nI1212 22:43:39.367208 20613 sgd_solver.cpp:174] Iteration 26700, lr = 0.801\nI1212 22:43:39.381067 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.370823\nI1212 22:45:57.719314 20613 solver.cpp:337] Iteration 26800, Testing net (#0)\nI1212 22:47:19.511399 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83748\nI1212 22:47:19.511734 20613 solver.cpp:404]     Test net output #1: loss = 0.670505 (* 1 = 0.670505 loss)\nI1212 22:47:20.825619 20613 solver.cpp:228] Iteration 26800, loss = 0.193391\nI1212 22:47:20.825670 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 22:47:20.825687 20613 solver.cpp:244]     Train net output #1: loss = 0.193391 (* 1 = 0.193391 loss)\nI1212 22:47:20.913684 20613 sgd_solver.cpp:174] Iteration 26800, lr = 0.804\nI1212 22:47:20.927472 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.427489\nI1212 22:49:39.228086 20613 solver.cpp:337] Iteration 26900, Testing net (#0)\nI1212 22:51:01.008692 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78052\nI1212 22:51:01.009037 20613 solver.cpp:404]     Test net output #1: loss = 0.895136 (* 1 = 0.895136 loss)\nI1212 22:51:02.323002 20613 solver.cpp:228] Iteration 26900, loss = 0.0523955\nI1212 22:51:02.323056 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 22:51:02.323074 20613 solver.cpp:244]     Train net output #1: loss = 0.0523955 (* 1 = 0.0523955 loss)\nI1212 22:51:02.412786 20613 sgd_solver.cpp:174] Iteration 26900, lr = 0.807\nI1212 22:51:02.426287 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.38067\nI1212 22:53:20.783854 20613 solver.cpp:337] Iteration 27000, Testing net (#0)\nI1212 22:54:42.571679 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82468\nI1212 22:54:42.572036 20613 solver.cpp:404]     Test net output #1: loss = 0.667842 (* 1 = 0.667842 loss)\nI1212 22:54:43.885004 20613 solver.cpp:228] Iteration 27000, loss = 0.125454\nI1212 22:54:43.885059 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 22:54:43.885082 20613 solver.cpp:244]     Train net output #1: loss = 0.125454 (* 1 = 0.125454 loss)\nI1212 22:54:43.977438 20613 sgd_solver.cpp:174] Iteration 27000, lr = 0.81\nI1212 22:54:43.991160 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.388195\nI1212 22:57:02.274888 20613 solver.cpp:337] Iteration 27100, Testing net (#0)\nI1212 22:58:24.049867 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74752\nI1212 22:58:24.050226 20613 solver.cpp:404]     Test net output #1: loss = 1.19271 (* 1 = 1.19271 loss)\nI1212 22:58:25.363409 20613 solver.cpp:228] Iteration 27100, loss = 0.0897815\nI1212 22:58:25.363462 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 22:58:25.363481 20613 solver.cpp:244]     Train net output #1: loss = 0.0897815 (* 1 = 0.0897815 loss)\nI1212 22:58:25.452675 20613 sgd_solver.cpp:174] Iteration 27100, lr = 0.813\nI1212 22:58:25.466390 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.408347\nI1212 23:00:43.808797 20613 solver.cpp:337] Iteration 27200, Testing net (#0)\nI1212 23:02:05.725708 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81164\nI1212 23:02:05.726025 20613 solver.cpp:404]     Test net output #1: loss = 0.811318 (* 1 = 0.811318 loss)\nI1212 23:02:07.043548 20613 solver.cpp:228] Iteration 27200, loss = 0.149864\nI1212 23:02:07.043608 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 23:02:07.043635 20613 solver.cpp:244]     Train net output #1: loss = 0.149864 (* 1 = 0.149864 loss)\nI1212 23:02:07.131392 20613 sgd_solver.cpp:174] Iteration 27200, lr = 0.816\nI1212 23:02:07.145226 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.54008\nI1212 23:04:25.468807 20613 solver.cpp:337] Iteration 27300, Testing net (#0)\nI1212 23:05:47.371887 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8244\nI1212 23:05:47.372238 20613 solver.cpp:404]     Test net output #1: loss = 0.654165 (* 1 = 0.654165 loss)\nI1212 23:05:48.686386 20613 solver.cpp:228] Iteration 27300, loss = 0.074653\nI1212 23:05:48.686451 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 23:05:48.686477 20613 solver.cpp:244]     Train net output #1: loss = 0.074653 (* 1 = 0.074653 loss)\nI1212 23:05:48.775647 20613 sgd_solver.cpp:174] Iteration 27300, lr = 0.819\nI1212 23:05:48.789415 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.410358\nI1212 23:08:07.164186 20613 solver.cpp:337] Iteration 27400, Testing net (#0)\nI1212 23:09:29.106340 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8024\nI1212 23:09:29.106699 20613 solver.cpp:404]     Test net output #1: loss = 0.822725 (* 1 = 0.822725 loss)\nI1212 23:09:30.420433 20613 solver.cpp:228] Iteration 27400, loss = 0.0806874\nI1212 23:09:30.420492 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 23:09:30.420517 20613 solver.cpp:244]     Train net output #1: loss = 0.0806874 (* 1 = 0.0806874 loss)\nI1212 23:09:30.512982 20613 sgd_solver.cpp:174] Iteration 27400, lr = 0.822\nI1212 23:09:30.526805 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.404225\nI1212 23:11:48.810667 20613 solver.cpp:337] Iteration 27500, Testing net (#0)\nI1212 23:13:10.705544 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8166\nI1212 23:13:10.705893 20613 solver.cpp:404]     Test net output #1: loss = 0.790891 (* 1 = 0.790891 loss)\nI1212 23:13:12.018725 20613 solver.cpp:228] Iteration 27500, loss = 0.110102\nI1212 23:13:12.018782 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 23:13:12.018808 20613 solver.cpp:244]     Train net output #1: loss = 0.110102 (* 1 = 0.110102 loss)\nI1212 23:13:12.110734 20613 sgd_solver.cpp:174] Iteration 27500, lr = 0.825\nI1212 23:13:12.124591 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.428042\nI1212 23:15:30.455770 20613 solver.cpp:337] Iteration 27600, Testing net (#0)\nI1212 23:16:52.272560 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81456\nI1212 23:16:52.272900 20613 solver.cpp:404]     Test net output #1: loss = 0.761178 (* 1 = 0.761178 loss)\nI1212 23:16:53.586593 20613 solver.cpp:228] Iteration 27600, loss = 0.152061\nI1212 23:16:53.586648 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 23:16:53.586674 20613 solver.cpp:244]     Train net output #1: loss = 0.152061 (* 1 = 0.152061 loss)\nI1212 23:16:53.673166 20613 sgd_solver.cpp:174] Iteration 27600, lr = 0.828\nI1212 23:16:53.687110 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.477237\nI1212 23:19:11.989295 20613 solver.cpp:337] Iteration 27700, Testing net (#0)\nI1212 23:20:33.864503 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76468\nI1212 23:20:33.864852 20613 solver.cpp:404]     Test net output #1: loss = 1.13997 (* 1 = 1.13997 loss)\nI1212 23:20:35.178742 20613 solver.cpp:228] Iteration 27700, loss = 0.0603208\nI1212 23:20:35.178800 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 23:20:35.178825 20613 solver.cpp:244]     Train net output #1: loss = 0.0603208 (* 1 = 0.0603208 loss)\nI1212 23:20:35.265782 20613 sgd_solver.cpp:174] Iteration 27700, lr = 0.831\nI1212 23:20:35.279685 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.382185\nI1212 23:22:53.538072 20613 solver.cpp:337] Iteration 27800, Testing net (#0)\nI1212 23:24:15.414731 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78884\nI1212 23:24:15.415081 20613 solver.cpp:404]     Test net output #1: loss = 0.892667 (* 1 = 0.892667 loss)\nI1212 23:24:16.728474 20613 solver.cpp:228] Iteration 27800, loss = 0.0608432\nI1212 23:24:16.728531 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 23:24:16.728556 20613 solver.cpp:244]     Train net output #1: loss = 0.0608432 (* 1 = 0.0608432 loss)\nI1212 23:24:16.819723 20613 sgd_solver.cpp:174] Iteration 27800, lr = 0.834\nI1212 23:24:16.833676 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.413973\nI1212 23:26:35.149888 20613 solver.cpp:337] Iteration 27900, Testing net (#0)\nI1212 23:27:57.052752 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83508\nI1212 23:27:57.053107 20613 solver.cpp:404]     Test net output #1: loss = 0.680838 (* 1 = 0.680838 loss)\nI1212 23:27:58.366719 20613 solver.cpp:228] Iteration 27900, loss = 0.153274\nI1212 23:27:58.366776 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 23:27:58.366803 20613 solver.cpp:244]     Train net output #1: loss = 0.153274 (* 1 = 0.153274 loss)\nI1212 23:27:58.453073 20613 sgd_solver.cpp:174] Iteration 27900, lr = 0.837\nI1212 23:27:58.466928 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.511894\nI1212 23:30:16.679795 20613 solver.cpp:337] Iteration 28000, Testing net (#0)\nI1212 23:31:38.568830 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80364\nI1212 23:31:38.569196 20613 solver.cpp:404]     Test net output #1: loss = 0.847597 (* 1 = 0.847597 loss)\nI1212 23:31:39.882500 20613 solver.cpp:228] Iteration 28000, loss = 0.159614\nI1212 23:31:39.882558 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 23:31:39.882583 20613 solver.cpp:244]     Train net output #1: loss = 0.159614 (* 1 = 0.159614 loss)\nI1212 23:31:39.975206 20613 sgd_solver.cpp:174] Iteration 28000, lr = 0.84\nI1212 23:31:39.989125 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.464056\nI1212 23:33:58.267619 20613 solver.cpp:337] Iteration 28100, Testing net (#0)\nI1212 23:35:20.163831 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81996\nI1212 23:35:20.164178 20613 solver.cpp:404]     Test net output #1: loss = 0.730928 (* 1 = 0.730928 loss)\nI1212 23:35:21.477782 20613 solver.cpp:228] Iteration 28100, loss = 0.0816185\nI1212 23:35:21.477840 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 23:35:21.477866 20613 solver.cpp:244]     Train net output #1: loss = 0.0816185 (* 1 = 0.0816185 loss)\nI1212 23:35:21.569778 20613 sgd_solver.cpp:174] Iteration 28100, lr = 0.843\nI1212 23:35:21.583782 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.388207\nI1212 23:37:39.805814 20613 solver.cpp:337] Iteration 28200, Testing net (#0)\nI1212 23:39:01.663148 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80684\nI1212 23:39:01.663514 20613 solver.cpp:404]     Test net output #1: loss = 0.797826 (* 1 = 0.797826 loss)\nI1212 23:39:02.976964 20613 solver.cpp:228] Iteration 28200, loss = 0.142118\nI1212 23:39:02.977025 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 23:39:02.977052 20613 solver.cpp:244]     Train net output #1: loss = 0.142118 (* 1 = 0.142118 loss)\nI1212 23:39:03.064312 20613 sgd_solver.cpp:174] Iteration 28200, lr = 0.846\nI1212 23:39:03.078157 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.402283\nI1212 23:41:21.364157 20613 solver.cpp:337] Iteration 28300, Testing net (#0)\nI1212 23:42:43.126132 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79376\nI1212 23:42:43.126478 20613 solver.cpp:404]     Test net output #1: loss = 0.885734 (* 1 = 0.885734 loss)\nI1212 23:42:44.439491 20613 solver.cpp:228] Iteration 28300, loss = 0.0734277\nI1212 23:42:44.439546 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 23:42:44.439563 20613 solver.cpp:244]     Train net output #1: loss = 0.0734277 (* 1 = 0.0734277 loss)\nI1212 23:42:44.532160 20613 sgd_solver.cpp:174] Iteration 28300, lr = 0.849\nI1212 23:42:44.545668 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.458602\nI1212 23:45:02.853135 20613 solver.cpp:337] Iteration 28400, Testing net (#0)\nI1212 23:46:24.621407 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83756\nI1212 23:46:24.621740 20613 solver.cpp:404]     Test net output #1: loss = 0.632504 (* 1 = 0.632504 loss)\nI1212 23:46:25.933830 20613 solver.cpp:228] Iteration 28400, loss = 0.0765259\nI1212 23:46:25.933884 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 23:46:25.933902 20613 solver.cpp:244]     Train net output #1: loss = 0.076526 (* 1 = 0.076526 loss)\nI1212 23:46:26.025171 20613 sgd_solver.cpp:174] Iteration 28400, lr = 0.852\nI1212 23:46:26.039019 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.392219\nI1212 23:48:44.361526 20613 solver.cpp:337] Iteration 28500, Testing net (#0)\nI1212 23:50:06.131619 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81136\nI1212 23:50:06.131938 20613 solver.cpp:404]     Test net output #1: loss = 0.726304 (* 1 = 0.726304 loss)\nI1212 23:50:07.444149 20613 solver.cpp:228] Iteration 28500, loss = 0.0765367\nI1212 23:50:07.444201 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 23:50:07.444218 20613 solver.cpp:244]     Train net output #1: loss = 0.0765367 (* 1 = 0.0765367 loss)\nI1212 23:50:07.541561 20613 sgd_solver.cpp:174] Iteration 28500, lr = 0.855\nI1212 23:50:07.555389 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.336258\nI1212 23:52:25.831115 20613 solver.cpp:337] Iteration 28600, Testing net (#0)\nI1212 23:53:47.602123 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83712\nI1212 23:53:47.602521 20613 solver.cpp:404]     Test net output #1: loss = 0.680819 (* 1 = 0.680819 loss)\nI1212 23:53:48.915058 20613 solver.cpp:228] Iteration 28600, loss = 0.0817215\nI1212 23:53:48.915110 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 23:53:48.915127 20613 solver.cpp:244]     Train net output #1: loss = 0.0817215 (* 1 = 0.0817215 loss)\nI1212 23:53:49.005964 20613 sgd_solver.cpp:174] Iteration 28600, lr = 0.858\nI1212 23:53:49.019748 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.424559\nI1212 23:56:07.353519 20613 solver.cpp:337] Iteration 28700, Testing net (#0)\nI1212 23:57:29.163740 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81844\nI1212 23:57:29.164103 20613 solver.cpp:404]     Test net output #1: loss = 0.750364 (* 1 = 0.750364 loss)\nI1212 23:57:30.477053 20613 solver.cpp:228] Iteration 28700, loss = 0.103115\nI1212 23:57:30.477105 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 23:57:30.477123 20613 solver.cpp:244]     Train net output #1: loss = 0.103115 (* 1 = 0.103115 loss)\nI1212 23:57:30.569605 20613 sgd_solver.cpp:174] Iteration 28700, lr = 0.861\nI1212 23:57:30.583274 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.35422\nI1212 23:59:48.943843 20613 solver.cpp:337] Iteration 28800, Testing net (#0)\nI1213 00:01:10.745414 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77628\nI1213 00:01:10.745757 20613 solver.cpp:404]     Test net output #1: loss = 1.04183 (* 1 = 1.04183 loss)\nI1213 00:01:12.058820 20613 solver.cpp:228] Iteration 28800, loss = 0.0905518\nI1213 00:01:12.058876 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 00:01:12.058892 20613 solver.cpp:244]     Train net output #1: loss = 0.0905518 (* 1 = 0.0905518 loss)\nI1213 00:01:12.153401 20613 sgd_solver.cpp:174] Iteration 28800, lr = 0.864\nI1213 00:01:12.166867 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.380487\nI1213 00:03:30.489289 20613 solver.cpp:337] Iteration 28900, Testing net (#0)\nI1213 00:04:52.293035 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70684\nI1213 00:04:52.293457 20613 solver.cpp:404]     Test net output #1: loss = 1.36319 (* 1 = 1.36319 loss)\nI1213 00:04:53.606256 20613 solver.cpp:228] Iteration 28900, loss = 0.10608\nI1213 00:04:53.606307 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 00:04:53.606324 20613 solver.cpp:244]     Train net output #1: loss = 0.10608 (* 1 = 0.10608 loss)\nI1213 00:04:53.695375 20613 sgd_solver.cpp:174] Iteration 28900, lr = 0.867\nI1213 00:04:53.709311 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.402105\nI1213 00:07:12.068794 20613 solver.cpp:337] Iteration 29000, Testing net (#0)\nI1213 00:08:33.847512 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77788\nI1213 00:08:33.847874 20613 solver.cpp:404]     Test net output #1: loss = 0.938981 (* 1 = 0.938981 loss)\nI1213 00:08:35.160085 20613 solver.cpp:228] Iteration 29000, loss = 0.098611\nI1213 00:08:35.160136 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 00:08:35.160153 20613 solver.cpp:244]     Train net output #1: loss = 0.0986109 (* 1 = 0.0986109 loss)\nI1213 00:08:35.251811 20613 sgd_solver.cpp:174] Iteration 29000, lr = 0.87\nI1213 00:08:35.265627 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.382566\nI1213 00:10:53.689798 20613 solver.cpp:337] Iteration 29100, Testing net (#0)\nI1213 00:12:15.432822 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73096\nI1213 00:12:15.433146 20613 solver.cpp:404]     Test net output #1: loss = 1.34129 (* 1 = 1.34129 loss)\nI1213 00:12:16.747741 20613 solver.cpp:228] Iteration 29100, loss = 0.14173\nI1213 00:12:16.747797 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 00:12:16.747813 20613 solver.cpp:244]     Train net output #1: loss = 0.141729 (* 1 = 0.141729 loss)\nI1213 00:12:16.841389 20613 sgd_solver.cpp:174] Iteration 29100, lr = 0.873\nI1213 00:12:16.855182 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.397566\nI1213 00:14:35.359603 20613 solver.cpp:337] Iteration 29200, Testing net (#0)\nI1213 00:15:57.111526 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81084\nI1213 00:15:57.111913 20613 solver.cpp:404]     Test net output #1: loss = 0.747634 (* 1 = 0.747634 loss)\nI1213 00:15:58.426151 20613 solver.cpp:228] Iteration 29200, loss = 0.107383\nI1213 00:15:58.426203 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 00:15:58.426219 20613 solver.cpp:244]     Train net output #1: loss = 0.107382 (* 1 = 0.107382 loss)\nI1213 00:15:58.515895 20613 sgd_solver.cpp:174] Iteration 29200, lr = 0.876\nI1213 00:15:58.529719 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.387953\nI1213 00:18:16.980355 20613 solver.cpp:337] Iteration 29300, Testing net (#0)\nI1213 00:19:38.720978 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82052\nI1213 00:19:38.721341 20613 solver.cpp:404]     Test net output #1: loss = 0.738744 (* 1 = 0.738744 loss)\nI1213 00:19:40.035856 20613 solver.cpp:228] Iteration 29300, loss = 0.0726536\nI1213 00:19:40.035912 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1213 00:19:40.035928 20613 solver.cpp:244]     Train net output #1: loss = 0.0726535 (* 1 = 0.0726535 loss)\nI1213 00:19:40.138460 20613 sgd_solver.cpp:174] Iteration 29300, lr = 0.879\nI1213 00:19:40.151854 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.341404\nI1213 00:21:58.627316 20613 solver.cpp:337] Iteration 29400, Testing net (#0)\nI1213 00:23:20.374758 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78928\nI1213 00:23:20.375123 20613 solver.cpp:404]     Test net output #1: loss = 0.955727 (* 1 = 0.955727 loss)\nI1213 00:23:21.690656 20613 solver.cpp:228] Iteration 29400, loss = 0.137677\nI1213 00:23:21.690706 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 00:23:21.690724 20613 solver.cpp:244]     Train net output #1: loss = 0.137677 (* 1 = 0.137677 loss)\nI1213 00:23:21.783429 20613 sgd_solver.cpp:174] Iteration 29400, lr = 0.882\nI1213 00:23:21.797266 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.421303\nI1213 00:25:40.227627 20613 solver.cpp:337] Iteration 29500, Testing net (#0)\nI1213 00:27:01.971783 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74152\nI1213 00:27:01.972153 20613 solver.cpp:404]     Test net output #1: loss = 1.23338 (* 1 = 1.23338 loss)\nI1213 00:27:03.285974 20613 solver.cpp:228] Iteration 29500, loss = 0.0993855\nI1213 00:27:03.286033 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 00:27:03.286051 20613 solver.cpp:244]     Train net output #1: loss = 0.0993854 (* 1 = 0.0993854 loss)\nI1213 00:27:03.380520 20613 sgd_solver.cpp:174] Iteration 29500, lr = 0.885\nI1213 00:27:03.394330 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.43875\nI1213 00:29:21.881085 20613 solver.cpp:337] Iteration 29600, Testing net (#0)\nI1213 00:30:43.612278 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79848\nI1213 00:30:43.612627 20613 solver.cpp:404]     Test net output #1: loss = 0.829606 (* 1 = 0.829606 loss)\nI1213 00:30:44.927193 20613 solver.cpp:228] Iteration 29600, loss = 0.186909\nI1213 00:30:44.927237 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 00:30:44.927253 20613 solver.cpp:244]     Train net output #1: loss = 0.186909 (* 1 = 0.186909 loss)\nI1213 00:30:45.021569 20613 sgd_solver.cpp:174] Iteration 29600, lr = 0.888\nI1213 00:30:45.035353 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.415921\nI1213 00:33:03.479112 20613 solver.cpp:337] Iteration 29700, Testing net (#0)\nI1213 00:34:25.179770 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84896\nI1213 00:34:25.180058 20613 solver.cpp:404]     Test net output #1: loss = 0.571895 (* 1 = 0.571895 loss)\nI1213 00:34:26.494210 20613 solver.cpp:228] Iteration 29700, loss = 0.0653666\nI1213 00:34:26.494266 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 00:34:26.494284 20613 solver.cpp:244]     Train net output #1: loss = 0.0653666 (* 1 = 0.0653666 loss)\nI1213 00:34:26.589046 20613 sgd_solver.cpp:174] Iteration 29700, lr = 0.891\nI1213 00:34:26.602815 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.413199\nI1213 00:36:45.156025 20613 solver.cpp:337] Iteration 29800, Testing net (#0)\nI1213 00:38:06.805063 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74728\nI1213 00:38:06.805318 20613 solver.cpp:404]     Test net output #1: loss = 1.07406 (* 1 = 1.07406 loss)\nI1213 00:38:08.119792 20613 solver.cpp:228] Iteration 29800, loss = 0.106249\nI1213 00:38:08.119834 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 00:38:08.119855 20613 solver.cpp:244]     Train net output #1: loss = 0.106249 (* 1 = 0.106249 loss)\nI1213 00:38:08.212237 20613 sgd_solver.cpp:174] Iteration 29800, lr = 0.894\nI1213 00:38:08.226027 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.382092\nI1213 00:40:26.686414 20613 solver.cpp:337] Iteration 29900, Testing net (#0)\nI1213 00:41:48.169780 20613 solver.cpp:404]     Test net output #0: accuracy = 0.822\nI1213 00:41:48.170042 20613 solver.cpp:404]     Test net output #1: loss = 0.654946 (* 1 = 0.654946 loss)\nI1213 00:41:49.484365 20613 solver.cpp:228] Iteration 29900, loss = 0.101618\nI1213 00:41:49.484421 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 00:41:49.484437 20613 solver.cpp:244]     Train net output #1: loss = 0.101617 (* 1 = 0.101617 loss)\nI1213 00:41:49.581543 20613 sgd_solver.cpp:174] Iteration 29900, lr = 0.897\nI1213 00:41:49.595422 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.438278\nI1213 00:44:08.137871 20613 solver.cpp:337] Iteration 30000, Testing net (#0)\nI1213 00:45:30.186758 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82808\nI1213 00:45:30.187095 20613 solver.cpp:404]     Test net output #1: loss = 0.661032 (* 1 = 0.661032 loss)\nI1213 00:45:31.501827 20613 solver.cpp:228] Iteration 30000, loss = 0.111299\nI1213 00:45:31.501871 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 00:45:31.501888 20613 solver.cpp:244]     Train net output #1: loss = 0.111299 (* 1 = 0.111299 loss)\nI1213 00:45:31.595083 20613 sgd_solver.cpp:174] Iteration 30000, lr = 0.9\nI1213 00:45:31.608875 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.399093\nI1213 00:47:50.186653 20613 solver.cpp:337] Iteration 30100, Testing net (#0)\nI1213 00:49:11.473147 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79508\nI1213 00:49:11.473425 20613 solver.cpp:404]     Test net output #1: loss = 0.798958 (* 1 = 0.798958 loss)\nI1213 00:49:12.787542 20613 solver.cpp:228] Iteration 30100, loss = 0.144309\nI1213 00:49:12.787597 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 00:49:12.787614 20613 solver.cpp:244]     Train net output #1: loss = 0.144309 (* 1 = 0.144309 loss)\nI1213 00:49:12.883819 20613 sgd_solver.cpp:174] Iteration 30100, lr = 0.903\nI1213 00:49:12.897580 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.439373\nI1213 00:51:31.414084 20613 solver.cpp:337] Iteration 30200, Testing net (#0)\nI1213 00:52:52.859642 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7692\nI1213 00:52:52.859968 20613 solver.cpp:404]     Test net output #1: loss = 0.942101 (* 1 = 0.942101 loss)\nI1213 00:52:54.174443 20613 solver.cpp:228] Iteration 30200, loss = 0.140265\nI1213 00:52:54.174496 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 00:52:54.174513 20613 solver.cpp:244]     Train net output #1: loss = 0.140265 (* 1 = 0.140265 loss)\nI1213 00:52:54.265080 20613 sgd_solver.cpp:174] Iteration 30200, lr = 0.906\nI1213 00:52:54.278286 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.470936\nI1213 00:55:12.673405 20613 solver.cpp:337] Iteration 30300, Testing net (#0)\nI1213 00:56:33.885087 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82708\nI1213 00:56:33.885426 20613 solver.cpp:404]     Test net output #1: loss = 0.648808 (* 1 = 0.648808 loss)\nI1213 00:56:35.200069 20613 solver.cpp:228] Iteration 30300, loss = 0.0970538\nI1213 00:56:35.200120 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 00:56:35.200137 20613 solver.cpp:244]     Train net output #1: loss = 0.0970537 (* 1 = 0.0970537 loss)\nI1213 00:56:35.294143 20613 sgd_solver.cpp:174] Iteration 30300, lr = 0.909\nI1213 00:56:35.308028 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.394642\nI1213 00:58:53.741353 20613 solver.cpp:337] Iteration 30400, Testing net (#0)\nI1213 01:00:14.981549 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7952\nI1213 01:00:14.981806 20613 solver.cpp:404]     Test net output #1: loss = 0.801355 (* 1 = 0.801355 loss)\nI1213 01:00:16.296203 20613 solver.cpp:228] Iteration 30400, loss = 0.107773\nI1213 01:00:16.296254 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 01:00:16.296272 20613 solver.cpp:244]     Train net output #1: loss = 0.107773 (* 1 = 0.107773 loss)\nI1213 01:00:16.390467 20613 sgd_solver.cpp:174] Iteration 30400, lr = 0.912\nI1213 01:00:16.403695 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.378783\nI1213 01:02:34.919286 20613 solver.cpp:337] Iteration 30500, Testing net (#0)\nI1213 01:03:56.539561 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84568\nI1213 01:03:56.539831 20613 solver.cpp:404]     Test net output #1: loss = 0.5618 (* 1 = 0.5618 loss)\nI1213 01:03:57.854276 20613 solver.cpp:228] Iteration 30500, loss = 0.0339728\nI1213 01:03:57.854351 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1213 01:03:57.854370 20613 solver.cpp:244]     Train net output #1: loss = 0.0339727 (* 1 = 0.0339727 loss)\nI1213 01:03:57.950868 20613 sgd_solver.cpp:174] Iteration 30500, lr = 0.915\nI1213 01:03:57.964789 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.298119\nI1213 01:06:16.516165 20613 solver.cpp:337] Iteration 30600, Testing net (#0)\nI1213 01:07:38.022768 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71268\nI1213 01:07:38.023073 20613 solver.cpp:404]     Test net output #1: loss = 1.36967 (* 1 = 1.36967 loss)\nI1213 01:07:39.337493 20613 solver.cpp:228] Iteration 30600, loss = 0.0820381\nI1213 01:07:39.337550 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 01:07:39.337568 20613 solver.cpp:244]     Train net output #1: loss = 0.082038 (* 1 = 0.082038 loss)\nI1213 01:07:39.428855 20613 sgd_solver.cpp:174] Iteration 30600, lr = 0.918\nI1213 01:07:39.442745 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.398272\nI1213 01:09:57.884234 20613 solver.cpp:337] Iteration 30700, Testing net (#0)\nI1213 01:11:19.276592 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6866\nI1213 01:11:19.276917 20613 solver.cpp:404]     Test net output #1: loss = 1.61505 (* 1 = 1.61505 loss)\nI1213 01:11:20.591135 20613 solver.cpp:228] Iteration 30700, loss = 0.160593\nI1213 01:11:20.591190 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 01:11:20.591207 20613 solver.cpp:244]     Train net output #1: loss = 0.160593 (* 1 = 0.160593 loss)\nI1213 01:11:20.684607 20613 sgd_solver.cpp:174] Iteration 30700, lr = 0.921\nI1213 01:11:20.698379 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.474204\nI1213 01:13:39.338165 20613 solver.cpp:337] Iteration 30800, Testing net (#0)\nI1213 01:15:01.098727 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80376\nI1213 01:15:01.099072 20613 solver.cpp:404]     Test net output #1: loss = 0.74977 (* 1 = 0.74977 loss)\nI1213 01:15:02.413733 20613 solver.cpp:228] Iteration 30800, loss = 0.0384595\nI1213 01:15:02.413789 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 01:15:02.413806 20613 solver.cpp:244]     Train net output #1: loss = 0.0384594 (* 1 = 0.0384594 loss)\nI1213 01:15:02.508263 20613 sgd_solver.cpp:174] Iteration 30800, lr = 0.924\nI1213 01:15:02.522481 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347248\nI1213 01:17:21.089462 20613 solver.cpp:337] Iteration 30900, Testing net (#0)\nI1213 01:18:42.661093 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82924\nI1213 01:18:42.661351 20613 solver.cpp:404]     Test net output #1: loss = 0.666756 (* 1 = 0.666756 loss)\nI1213 01:18:43.976166 20613 solver.cpp:228] Iteration 30900, loss = 0.080528\nI1213 01:18:43.976227 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 01:18:43.976245 20613 solver.cpp:244]     Train net output #1: loss = 0.0805279 (* 1 = 0.0805279 loss)\nI1213 01:18:44.063519 20613 sgd_solver.cpp:174] Iteration 30900, lr = 0.927\nI1213 01:18:44.077374 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.404045\nI1213 01:21:01.736030 20613 solver.cpp:337] Iteration 31000, Testing net (#0)\nI1213 01:22:23.357451 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76468\nI1213 01:22:23.357770 20613 solver.cpp:404]     Test net output #1: loss = 0.92225 (* 1 = 0.92225 loss)\nI1213 01:22:24.672113 20613 solver.cpp:228] Iteration 31000, loss = 0.13892\nI1213 01:22:24.672170 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 01:22:24.672188 20613 solver.cpp:244]     Train net output #1: loss = 0.138919 (* 1 = 0.138919 loss)\nI1213 01:22:24.762325 20613 sgd_solver.cpp:174] Iteration 31000, lr = 0.93\nI1213 01:22:24.776254 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.441035\nI1213 01:24:43.254334 20613 solver.cpp:337] Iteration 31100, Testing net (#0)\nI1213 01:26:04.917780 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80756\nI1213 01:26:04.918071 20613 solver.cpp:404]     Test net output #1: loss = 0.898251 (* 1 = 0.898251 loss)\nI1213 01:26:06.232586 20613 solver.cpp:228] Iteration 31100, loss = 0.130929\nI1213 01:26:06.232646 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 01:26:06.232664 20613 solver.cpp:244]     Train net output #1: loss = 0.130929 (* 1 = 0.130929 loss)\nI1213 01:26:06.325830 20613 sgd_solver.cpp:174] Iteration 31100, lr = 0.933\nI1213 01:26:06.339749 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.368802\nI1213 01:28:24.805433 20613 solver.cpp:337] Iteration 31200, Testing net (#0)\nI1213 01:29:46.459977 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77168\nI1213 01:29:46.460211 20613 solver.cpp:404]     Test net output #1: loss = 1.15648 (* 1 = 1.15648 loss)\nI1213 01:29:47.774695 20613 solver.cpp:228] Iteration 31200, loss = 0.195861\nI1213 01:29:47.774752 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 01:29:47.774770 20613 solver.cpp:244]     Train net output #1: loss = 0.195861 (* 1 = 0.195861 loss)\nI1213 01:29:47.864804 20613 sgd_solver.cpp:174] Iteration 31200, lr = 0.936\nI1213 01:29:47.878705 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.450785\nI1213 01:32:06.352315 20613 solver.cpp:337] Iteration 31300, Testing net (#0)\nI1213 01:33:28.040096 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8238\nI1213 01:33:28.040431 20613 solver.cpp:404]     Test net output #1: loss = 0.673228 (* 1 = 0.673228 loss)\nI1213 01:33:29.354035 20613 solver.cpp:228] Iteration 31300, loss = 0.0412056\nI1213 01:33:29.354095 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 01:33:29.354113 20613 solver.cpp:244]     Train net output #1: loss = 0.0412055 (* 1 = 0.0412055 loss)\nI1213 01:33:29.446717 20613 sgd_solver.cpp:174] Iteration 31300, lr = 0.939\nI1213 01:33:29.460595 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.306678\nI1213 01:35:47.942435 20613 solver.cpp:337] Iteration 31400, Testing net (#0)\nI1213 01:37:09.541365 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73744\nI1213 01:37:09.541618 20613 solver.cpp:404]     Test net output #1: loss = 1.29823 (* 1 = 1.29823 loss)\nI1213 01:37:10.857051 20613 solver.cpp:228] Iteration 31400, loss = 0.116576\nI1213 01:37:10.857096 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 01:37:10.857113 20613 solver.cpp:244]     Train net output #1: loss = 0.116575 (* 1 = 0.116575 loss)\nI1213 01:37:10.950747 20613 sgd_solver.cpp:174] Iteration 31400, lr = 0.942\nI1213 01:37:10.964622 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.414783\nI1213 01:39:29.575075 20613 solver.cpp:337] Iteration 31500, Testing net (#0)\nI1213 01:40:51.150858 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83212\nI1213 01:40:51.151190 20613 solver.cpp:404]     Test net output #1: loss = 0.587824 (* 1 = 0.587824 loss)\nI1213 01:40:52.467432 20613 solver.cpp:228] Iteration 31500, loss = 0.168156\nI1213 01:40:52.467494 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 01:40:52.467510 20613 solver.cpp:244]     Train net output #1: loss = 0.168156 (* 1 = 0.168156 loss)\nI1213 01:40:52.559958 20613 sgd_solver.cpp:174] Iteration 31500, lr = 0.945\nI1213 01:40:52.573906 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.375878\nI1213 01:43:11.023603 20613 solver.cpp:337] Iteration 31600, Testing net (#0)\nI1213 01:44:32.537916 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79636\nI1213 01:44:32.538166 20613 solver.cpp:404]     Test net output #1: loss = 0.816106 (* 1 = 0.816106 loss)\nI1213 01:44:33.852843 20613 solver.cpp:228] Iteration 31600, loss = 0.14311\nI1213 01:44:33.852906 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 01:44:33.852926 20613 solver.cpp:244]     Train net output #1: loss = 0.14311 (* 1 = 0.14311 loss)\nI1213 01:44:33.939298 20613 sgd_solver.cpp:174] Iteration 31600, lr = 0.948\nI1213 01:44:33.953261 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.438483\nI1213 01:46:52.476737 20613 solver.cpp:337] Iteration 31700, Testing net (#0)\nI1213 01:48:13.851557 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8306\nI1213 01:48:13.851827 20613 solver.cpp:404]     Test net output #1: loss = 0.669045 (* 1 = 0.669045 loss)\nI1213 01:48:15.165930 20613 solver.cpp:228] Iteration 31700, loss = 0.0842813\nI1213 01:48:15.165993 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 01:48:15.166012 20613 solver.cpp:244]     Train net output #1: loss = 0.0842811 (* 1 = 0.0842811 loss)\nI1213 01:48:15.260233 20613 sgd_solver.cpp:174] Iteration 31700, lr = 0.951\nI1213 01:48:15.274209 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3842\nI1213 01:50:33.912555 20613 solver.cpp:337] Iteration 31800, Testing net (#0)\nI1213 01:51:55.289105 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8128\nI1213 01:51:55.289337 20613 solver.cpp:404]     Test net output #1: loss = 0.770984 (* 1 = 0.770984 loss)\nI1213 01:51:56.604061 20613 solver.cpp:228] Iteration 31800, loss = 0.174491\nI1213 01:51:56.604121 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 01:51:56.604138 20613 solver.cpp:244]     Train net output #1: loss = 0.174491 (* 1 = 0.174491 loss)\nI1213 01:51:56.696177 20613 sgd_solver.cpp:174] Iteration 31800, lr = 0.954\nI1213 01:51:56.710037 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.395205\nI1213 01:54:15.195822 20613 solver.cpp:337] Iteration 31900, Testing net (#0)\nI1213 01:55:36.683133 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83088\nI1213 01:55:36.683424 20613 solver.cpp:404]     Test net output #1: loss = 0.632815 (* 1 = 0.632815 loss)\nI1213 01:55:37.997766 20613 solver.cpp:228] Iteration 31900, loss = 0.064785\nI1213 01:55:37.997807 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 01:55:37.997824 20613 solver.cpp:244]     Train net output #1: loss = 0.0647848 (* 1 = 0.0647848 loss)\nI1213 01:55:38.093797 20613 sgd_solver.cpp:174] Iteration 31900, lr = 0.957\nI1213 01:55:38.107640 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.372568\nI1213 01:57:55.890905 20613 solver.cpp:337] Iteration 32000, Testing net (#0)\nI1213 01:59:17.175968 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85136\nI1213 01:59:17.176301 20613 solver.cpp:404]     Test net output #1: loss = 0.533545 (* 1 = 0.533545 loss)\nI1213 01:59:18.490785 20613 solver.cpp:228] Iteration 32000, loss = 0.117063\nI1213 01:59:18.490851 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 01:59:18.490870 20613 solver.cpp:244]     Train net output #1: loss = 0.117063 (* 1 = 0.117063 loss)\nI1213 01:59:18.582386 20613 sgd_solver.cpp:174] Iteration 32000, lr = 0.96\nI1213 01:59:18.596189 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.431902\nI1213 02:01:37.041438 20613 solver.cpp:337] Iteration 32100, Testing net (#0)\nI1213 02:02:58.327093 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79672\nI1213 02:02:58.327433 20613 solver.cpp:404]     Test net output #1: loss = 0.736249 (* 1 = 0.736249 loss)\nI1213 02:02:59.641729 20613 solver.cpp:228] Iteration 32100, loss = 0.0517364\nI1213 02:02:59.641791 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 02:02:59.641809 20613 solver.cpp:244]     Train net output #1: loss = 0.0517362 (* 1 = 0.0517362 loss)\nI1213 02:02:59.728500 20613 sgd_solver.cpp:174] Iteration 32100, lr = 0.963\nI1213 02:02:59.742436 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.349\nI1213 02:05:18.216843 20613 solver.cpp:337] Iteration 32200, Testing net (#0)\nI1213 02:06:39.952030 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80228\nI1213 02:06:39.952371 20613 solver.cpp:404]     Test net output #1: loss = 0.735683 (* 1 = 0.735683 loss)\nI1213 02:06:41.267179 20613 solver.cpp:228] Iteration 32200, loss = 0.187581\nI1213 02:06:41.267237 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 02:06:41.267256 20613 solver.cpp:244]     Train net output #1: loss = 0.187581 (* 1 = 0.187581 loss)\nI1213 02:06:41.360545 20613 sgd_solver.cpp:174] Iteration 32200, lr = 0.966\nI1213 02:06:41.374418 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.384877\nI1213 02:08:59.087973 20613 solver.cpp:337] Iteration 32300, Testing net (#0)\nI1213 02:10:20.853690 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8382\nI1213 02:10:20.854022 20613 solver.cpp:404]     Test net output #1: loss = 0.659235 (* 1 = 0.659235 loss)\nI1213 02:10:22.169028 20613 solver.cpp:228] Iteration 32300, loss = 0.158786\nI1213 02:10:22.169086 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 02:10:22.169106 20613 solver.cpp:244]     Train net output #1: loss = 0.158785 (* 1 = 0.158785 loss)\nI1213 02:10:22.255445 20613 sgd_solver.cpp:174] Iteration 32300, lr = 0.969\nI1213 02:10:22.269383 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.434222\nI1213 02:12:40.722379 20613 solver.cpp:337] Iteration 32400, Testing net (#0)\nI1213 02:14:02.499055 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74156\nI1213 02:14:02.499414 20613 solver.cpp:404]     Test net output #1: loss = 1.27842 (* 1 = 1.27842 loss)\nI1213 02:14:03.813917 20613 solver.cpp:228] Iteration 32400, loss = 0.110091\nI1213 02:14:03.813978 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 02:14:03.813997 20613 solver.cpp:244]     Train net output #1: loss = 0.110091 (* 1 = 0.110091 loss)\nI1213 02:14:03.908248 20613 sgd_solver.cpp:174] Iteration 32400, lr = 0.972\nI1213 02:14:03.922160 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.413752\nI1213 02:16:22.450575 20613 solver.cpp:337] Iteration 32500, Testing net (#0)\nI1213 02:17:44.227013 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84528\nI1213 02:17:44.227380 20613 solver.cpp:404]     Test net output #1: loss = 0.592845 (* 1 = 0.592845 loss)\nI1213 02:17:45.541785 20613 solver.cpp:228] Iteration 32500, loss = 0.101167\nI1213 02:17:45.541827 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 02:17:45.541844 20613 solver.cpp:244]     Train net output #1: loss = 0.101167 (* 1 = 0.101167 loss)\nI1213 02:17:45.632980 20613 sgd_solver.cpp:174] Iteration 32500, lr = 0.975\nI1213 02:17:45.646857 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.34646\nI1213 02:20:03.321846 20613 solver.cpp:337] Iteration 32600, Testing net (#0)\nI1213 02:21:25.081290 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78516\nI1213 02:21:25.081629 20613 solver.cpp:404]     Test net output #1: loss = 0.78623 (* 1 = 0.78623 loss)\nI1213 02:21:26.395902 20613 solver.cpp:228] Iteration 32600, loss = 0.0864093\nI1213 02:21:26.395961 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 02:21:26.395980 20613 solver.cpp:244]     Train net output #1: loss = 0.0864091 (* 1 = 0.0864091 loss)\nI1213 02:21:26.489645 20613 sgd_solver.cpp:174] Iteration 32600, lr = 0.978\nI1213 02:21:26.503144 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.333052\nI1213 02:23:45.068876 20613 solver.cpp:337] Iteration 32700, Testing net (#0)\nI1213 02:25:06.849050 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80484\nI1213 02:25:06.849421 20613 solver.cpp:404]     Test net output #1: loss = 0.737978 (* 1 = 0.737978 loss)\nI1213 02:25:08.164484 20613 solver.cpp:228] Iteration 32700, loss = 0.103672\nI1213 02:25:08.164541 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 02:25:08.164561 20613 solver.cpp:244]     Train net output #1: loss = 0.103672 (* 1 = 0.103672 loss)\nI1213 02:25:08.256655 20613 sgd_solver.cpp:174] Iteration 32700, lr = 0.981\nI1213 02:25:08.270516 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.401653\nI1213 02:27:26.010936 20613 solver.cpp:337] Iteration 32800, Testing net (#0)\nI1213 02:28:47.787528 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85552\nI1213 02:28:47.787870 20613 solver.cpp:404]     Test net output #1: loss = 0.530804 (* 1 = 0.530804 loss)\nI1213 02:28:49.102303 20613 solver.cpp:228] Iteration 32800, loss = 0.115529\nI1213 02:28:49.102365 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 02:28:49.102382 20613 solver.cpp:244]     Train net output #1: loss = 0.115529 (* 1 = 0.115529 loss)\nI1213 02:28:49.193258 20613 sgd_solver.cpp:174] Iteration 32800, lr = 0.984\nI1213 02:28:49.207181 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.393314\nI1213 02:31:07.868057 20613 solver.cpp:337] Iteration 32900, Testing net (#0)\nI1213 02:32:29.631054 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85088\nI1213 02:32:29.631405 20613 solver.cpp:404]     Test net output #1: loss = 0.543313 (* 1 = 0.543313 loss)\nI1213 02:32:30.945758 20613 solver.cpp:228] Iteration 32900, loss = 0.116433\nI1213 02:32:30.945818 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 02:32:30.945837 20613 solver.cpp:244]     Train net output #1: loss = 0.116433 (* 1 = 0.116433 loss)\nI1213 02:32:31.039777 20613 sgd_solver.cpp:174] Iteration 32900, lr = 0.987\nI1213 02:32:31.053706 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.424332\nI1213 02:34:48.758801 20613 solver.cpp:337] Iteration 33000, Testing net (#0)\nI1213 02:36:10.532805 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82272\nI1213 02:36:10.533174 20613 solver.cpp:404]     Test net output #1: loss = 0.774101 (* 1 = 0.774101 loss)\nI1213 02:36:11.847277 20613 solver.cpp:228] Iteration 33000, loss = 0.09367\nI1213 02:36:11.847333 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 02:36:11.847352 20613 solver.cpp:244]     Train net output #1: loss = 0.0936699 (* 1 = 0.0936699 loss)\nI1213 02:36:11.942389 20613 sgd_solver.cpp:174] Iteration 33000, lr = 0.99\nI1213 02:36:11.956352 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.302489\nI1213 02:38:30.580976 20613 solver.cpp:337] Iteration 33100, Testing net (#0)\nI1213 02:39:52.347688 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79712\nI1213 02:39:52.348057 20613 solver.cpp:404]     Test net output #1: loss = 0.806623 (* 1 = 0.806623 loss)\nI1213 02:39:53.662502 20613 solver.cpp:228] Iteration 33100, loss = 0.0640615\nI1213 02:39:53.662559 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 02:39:53.662576 20613 solver.cpp:244]     Train net output #1: loss = 0.0640614 (* 1 = 0.0640614 loss)\nI1213 02:39:53.755864 20613 sgd_solver.cpp:174] Iteration 33100, lr = 0.993\nI1213 02:39:53.769703 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.462502\nI1213 02:42:12.298738 20613 solver.cpp:337] Iteration 33200, Testing net (#0)\nI1213 02:43:34.059185 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81124\nI1213 02:43:34.059552 20613 solver.cpp:404]     Test net output #1: loss = 0.70502 (* 1 = 0.70502 loss)\nI1213 02:43:35.373528 20613 solver.cpp:228] Iteration 33200, loss = 0.156863\nI1213 02:43:35.373572 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 02:43:35.373587 20613 solver.cpp:244]     Train net output #1: loss = 0.156863 (* 1 = 0.156863 loss)\nI1213 02:43:35.469317 20613 sgd_solver.cpp:174] Iteration 33200, lr = 0.996\nI1213 02:43:35.483250 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.455854\nI1213 02:45:54.051692 20613 solver.cpp:337] Iteration 33300, Testing net (#0)\nI1213 02:47:15.820050 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80424\nI1213 02:47:15.820417 20613 solver.cpp:404]     Test net output #1: loss = 0.804546 (* 1 = 0.804546 loss)\nI1213 02:47:17.134436 20613 solver.cpp:228] Iteration 33300, loss = 0.0628662\nI1213 02:47:17.134490 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 02:47:17.134508 20613 solver.cpp:244]     Train net output #1: loss = 0.0628661 (* 1 = 0.0628661 loss)\nI1213 02:47:17.228452 20613 sgd_solver.cpp:174] Iteration 33300, lr = 0.999\nI1213 02:47:17.242375 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.415159\nI1213 02:49:34.968629 20613 solver.cpp:337] Iteration 33400, Testing net (#0)\nI1213 02:50:56.728226 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83308\nI1213 02:50:56.728581 20613 solver.cpp:404]     Test net output #1: loss = 0.62099 (* 1 = 0.62099 loss)\nI1213 02:50:58.042933 20613 solver.cpp:228] Iteration 33400, loss = 0.101152\nI1213 02:50:58.042989 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 02:50:58.043007 20613 solver.cpp:244]     Train net output #1: loss = 0.101152 (* 1 = 0.101152 loss)\nI1213 02:50:58.130743 20613 sgd_solver.cpp:174] Iteration 33400, lr = 1.002\nI1213 02:50:58.144167 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.445549\nI1213 02:53:16.560269 20613 solver.cpp:337] Iteration 33500, Testing net (#0)\nI1213 02:54:38.328562 20613 solver.cpp:404]     Test net output #0: accuracy = 0.86344\nI1213 02:54:38.328912 20613 solver.cpp:404]     Test net output #1: loss = 0.490986 (* 1 = 0.490986 loss)\nI1213 02:54:39.643424 20613 solver.cpp:228] Iteration 33500, loss = 0.0839851\nI1213 02:54:39.643476 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 02:54:39.643496 20613 solver.cpp:244]     Train net output #1: loss = 0.083985 (* 1 = 0.083985 loss)\nI1213 02:54:39.729528 20613 sgd_solver.cpp:174] Iteration 33500, lr = 1.005\nI1213 02:54:39.743505 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.33853\nI1213 02:56:57.471251 20613 solver.cpp:337] Iteration 33600, Testing net (#0)\nI1213 02:58:19.236227 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69984\nI1213 02:58:19.236588 20613 solver.cpp:404]     Test net output #1: loss = 1.47089 (* 1 = 1.47089 loss)\nI1213 02:58:20.551100 20613 solver.cpp:228] Iteration 33600, loss = 0.104344\nI1213 02:58:20.551156 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 02:58:20.551173 20613 solver.cpp:244]     Train net output #1: loss = 0.104344 (* 1 = 0.104344 loss)\nI1213 02:58:20.638677 20613 sgd_solver.cpp:174] Iteration 33600, lr = 1.008\nI1213 02:58:20.652611 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.391773\nI1213 03:00:39.164639 20613 solver.cpp:337] Iteration 33700, Testing net (#0)\nI1213 03:02:00.937254 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80304\nI1213 03:02:00.937585 20613 solver.cpp:404]     Test net output #1: loss = 0.765609 (* 1 = 0.765609 loss)\nI1213 03:02:02.251698 20613 solver.cpp:228] Iteration 33700, loss = 0.136185\nI1213 03:02:02.251746 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 03:02:02.251765 20613 solver.cpp:244]     Train net output #1: loss = 0.136185 (* 1 = 0.136185 loss)\nI1213 03:02:02.343997 20613 sgd_solver.cpp:174] Iteration 33700, lr = 1.011\nI1213 03:02:02.358052 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.417025\nI1213 03:04:20.770359 20613 solver.cpp:337] Iteration 33800, Testing net (#0)\nI1213 03:05:42.533362 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79524\nI1213 03:05:42.533728 20613 solver.cpp:404]     Test net output #1: loss = 0.761255 (* 1 = 0.761255 loss)\nI1213 03:05:43.846256 20613 solver.cpp:228] Iteration 33800, loss = 0.0720786\nI1213 03:05:43.846307 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 03:05:43.846325 20613 solver.cpp:244]     Train net output #1: loss = 0.0720785 (* 1 = 0.0720785 loss)\nI1213 03:05:43.938552 20613 sgd_solver.cpp:174] Iteration 33800, lr = 1.014\nI1213 03:05:43.952395 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.454848\nI1213 03:08:02.278035 20613 solver.cpp:337] Iteration 33900, Testing net (#0)\nI1213 03:09:24.042815 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82644\nI1213 03:09:24.043319 20613 solver.cpp:404]     Test net output #1: loss = 0.615136 (* 1 = 0.615136 loss)\nI1213 03:09:25.356201 20613 solver.cpp:228] Iteration 33900, loss = 0.110361\nI1213 03:09:25.356253 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 03:09:25.356269 20613 solver.cpp:244]     Train net output #1: loss = 0.110361 (* 1 = 0.110361 loss)\nI1213 03:09:25.453166 20613 sgd_solver.cpp:174] Iteration 33900, lr = 1.017\nI1213 03:09:25.467056 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.37173\nI1213 03:11:43.802675 20613 solver.cpp:337] Iteration 34000, Testing net (#0)\nI1213 03:13:05.558382 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82544\nI1213 03:13:05.558734 20613 solver.cpp:404]     Test net output #1: loss = 0.658208 (* 1 = 0.658208 loss)\nI1213 03:13:06.871596 20613 solver.cpp:228] Iteration 34000, loss = 0.0391417\nI1213 03:13:06.871649 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1213 03:13:06.871667 20613 solver.cpp:244]     Train net output #1: loss = 0.0391416 (* 1 = 0.0391416 loss)\nI1213 03:13:06.962965 20613 sgd_solver.cpp:174] Iteration 34000, lr = 1.02\nI1213 03:13:06.976879 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.316218\nI1213 03:15:25.387663 20613 solver.cpp:337] Iteration 34100, Testing net (#0)\nI1213 03:16:47.149160 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77788\nI1213 03:16:47.149525 20613 solver.cpp:404]     Test net output #1: loss = 0.956776 (* 1 = 0.956776 loss)\nI1213 03:16:48.462144 20613 solver.cpp:228] Iteration 34100, loss = 0.100934\nI1213 03:16:48.462195 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 03:16:48.462213 20613 solver.cpp:244]     Train net output #1: loss = 0.100934 (* 1 = 0.100934 loss)\nI1213 03:16:48.554632 20613 sgd_solver.cpp:174] Iteration 34100, lr = 1.023\nI1213 03:16:48.568565 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.449258\nI1213 03:19:06.902911 20613 solver.cpp:337] Iteration 34200, Testing net (#0)\nI1213 03:20:28.677791 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7944\nI1213 03:20:28.678145 20613 solver.cpp:404]     Test net output #1: loss = 0.828729 (* 1 = 0.828729 loss)\nI1213 03:20:29.990658 20613 solver.cpp:228] Iteration 34200, loss = 0.131025\nI1213 03:20:29.990715 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 03:20:29.990732 20613 solver.cpp:244]     Train net output #1: loss = 0.131025 (* 1 = 0.131025 loss)\nI1213 03:20:30.085108 20613 sgd_solver.cpp:174] Iteration 34200, lr = 1.026\nI1213 03:20:30.098961 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.47214\nI1213 03:22:48.458644 20613 solver.cpp:337] Iteration 34300, Testing net (#0)\nI1213 03:24:10.229439 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI1213 03:24:10.229812 20613 solver.cpp:404]     Test net output #1: loss = 0.791526 (* 1 = 0.791526 loss)\nI1213 03:24:11.542434 20613 solver.cpp:228] Iteration 34300, loss = 0.111626\nI1213 03:24:11.542485 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 03:24:11.542502 20613 solver.cpp:244]     Train net output #1: loss = 0.111626 (* 1 = 0.111626 loss)\nI1213 03:24:11.637763 20613 sgd_solver.cpp:174] Iteration 34300, lr = 1.029\nI1213 03:24:11.651600 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.371157\nI1213 03:26:29.858079 20613 solver.cpp:337] Iteration 34400, Testing net (#0)\nI1213 03:27:51.618647 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7882\nI1213 03:27:51.619020 20613 solver.cpp:404]     Test net output #1: loss = 0.828831 (* 1 = 0.828831 loss)\nI1213 03:27:52.931968 20613 solver.cpp:228] Iteration 34400, loss = 0.129038\nI1213 03:27:52.932020 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 03:27:52.932036 20613 solver.cpp:244]     Train net output #1: loss = 0.129038 (* 1 = 0.129038 loss)\nI1213 03:27:53.023763 20613 sgd_solver.cpp:174] Iteration 34400, lr = 1.032\nI1213 03:27:53.037652 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.418885\nI1213 03:30:11.396000 20613 solver.cpp:337] Iteration 34500, Testing net (#0)\nI1213 03:31:33.175654 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8368\nI1213 03:31:33.176023 20613 solver.cpp:404]     Test net output #1: loss = 0.622821 (* 1 = 0.622821 loss)\nI1213 03:31:34.489363 20613 solver.cpp:228] Iteration 34500, loss = 0.0747538\nI1213 03:31:34.489414 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 03:31:34.489431 20613 solver.cpp:244]     Train net output #1: loss = 0.0747538 (* 1 = 0.0747538 loss)\nI1213 03:31:34.576886 20613 sgd_solver.cpp:174] Iteration 34500, lr = 1.035\nI1213 03:31:34.590541 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.348556\nI1213 03:33:52.138960 20613 solver.cpp:337] Iteration 34600, Testing net (#0)\nI1213 03:35:13.921051 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8616\nI1213 03:35:13.921394 20613 solver.cpp:404]     Test net output #1: loss = 0.539391 (* 1 = 0.539391 loss)\nI1213 03:35:15.233666 20613 solver.cpp:228] Iteration 34600, loss = 0.154829\nI1213 03:35:15.233717 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 03:35:15.233736 20613 solver.cpp:244]     Train net output #1: loss = 0.154829 (* 1 = 0.154829 loss)\nI1213 03:35:15.322580 20613 sgd_solver.cpp:174] Iteration 34600, lr = 1.038\nI1213 03:35:15.336413 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.362008\nI1213 03:37:33.740383 20613 solver.cpp:337] Iteration 34700, Testing net (#0)\nI1213 03:38:55.525411 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8042\nI1213 03:38:55.525823 20613 solver.cpp:404]     Test net output #1: loss = 0.825032 (* 1 = 0.825032 loss)\nI1213 03:38:56.838807 20613 solver.cpp:228] Iteration 34700, loss = 0.0648341\nI1213 03:38:56.838862 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 03:38:56.838879 20613 solver.cpp:244]     Train net output #1: loss = 0.0648341 (* 1 = 0.0648341 loss)\nI1213 03:38:56.928102 20613 sgd_solver.cpp:174] Iteration 34700, lr = 1.041\nI1213 03:38:56.942061 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.362285\nI1213 03:41:15.308742 20613 solver.cpp:337] Iteration 34800, Testing net (#0)\nI1213 03:42:37.087244 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74424\nI1213 03:42:37.087570 20613 solver.cpp:404]     Test net output #1: loss = 1.20485 (* 1 = 1.20485 loss)\nI1213 03:42:38.400334 20613 solver.cpp:228] Iteration 34800, loss = 0.224452\nI1213 03:42:38.400388 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 03:42:38.400405 20613 solver.cpp:244]     Train net output #1: loss = 0.224452 (* 1 = 0.224452 loss)\nI1213 03:42:38.491050 20613 sgd_solver.cpp:174] Iteration 34800, lr = 1.044\nI1213 03:42:38.504829 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.458032\nI1213 03:44:56.928524 20613 solver.cpp:337] Iteration 34900, Testing net (#0)\nI1213 03:46:18.723196 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83152\nI1213 03:46:18.723541 20613 solver.cpp:404]     Test net output #1: loss = 0.681071 (* 1 = 0.681071 loss)\nI1213 03:46:20.036296 20613 solver.cpp:228] Iteration 34900, loss = 0.0473477\nI1213 03:46:20.036350 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1213 03:46:20.036367 20613 solver.cpp:244]     Train net output #1: loss = 0.0473477 (* 1 = 0.0473477 loss)\nI1213 03:46:20.132753 20613 sgd_solver.cpp:174] Iteration 34900, lr = 1.047\nI1213 03:46:20.146675 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.383902\nI1213 03:48:38.437929 20613 solver.cpp:337] Iteration 35000, Testing net (#0)\nI1213 03:50:00.202756 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79844\nI1213 03:50:00.203126 20613 solver.cpp:404]     Test net output #1: loss = 0.87702 (* 1 = 0.87702 loss)\nI1213 03:50:01.515614 20613 solver.cpp:228] Iteration 35000, loss = 0.0810627\nI1213 03:50:01.515666 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 03:50:01.515682 20613 solver.cpp:244]     Train net output #1: loss = 0.0810626 (* 1 = 0.0810626 loss)\nI1213 03:50:01.608969 20613 sgd_solver.cpp:174] Iteration 35000, lr = 1.05\nI1213 03:50:01.622858 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.344323\nI1213 03:52:19.987943 20613 solver.cpp:337] Iteration 35100, Testing net (#0)\nI1213 03:53:41.758131 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80164\nI1213 03:53:41.758496 20613 solver.cpp:404]     Test net output #1: loss = 0.831542 (* 1 = 0.831542 loss)\nI1213 03:53:43.070755 20613 solver.cpp:228] Iteration 35100, loss = 0.18525\nI1213 03:53:43.070811 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 03:53:43.070828 20613 solver.cpp:244]     Train net output #1: loss = 0.18525 (* 1 = 0.18525 loss)\nI1213 03:53:43.163148 20613 sgd_solver.cpp:174] Iteration 35100, lr = 1.053\nI1213 03:53:43.177022 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.415242\nI1213 03:56:01.514998 20613 solver.cpp:337] Iteration 35200, Testing net (#0)\nI1213 03:57:23.139232 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8248\nI1213 03:57:23.139603 20613 solver.cpp:404]     Test net output #1: loss = 0.658945 (* 1 = 0.658945 loss)\nI1213 03:57:24.451692 20613 solver.cpp:228] Iteration 35200, loss = 0.180217\nI1213 03:57:24.451751 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 03:57:24.451771 20613 solver.cpp:244]     Train net output #1: loss = 0.180217 (* 1 = 0.180217 loss)\nI1213 03:57:24.545963 20613 sgd_solver.cpp:174] Iteration 35200, lr = 1.056\nI1213 03:57:24.559259 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.422339\nI1213 03:59:42.936508 20613 solver.cpp:337] Iteration 35300, Testing net (#0)\nI1213 04:01:04.531481 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82248\nI1213 04:01:04.531836 20613 solver.cpp:404]     Test net output #1: loss = 0.692272 (* 1 = 0.692272 loss)\nI1213 04:01:05.844460 20613 solver.cpp:228] Iteration 35300, loss = 0.0668891\nI1213 04:01:05.844517 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 04:01:05.844537 20613 solver.cpp:244]     Train net output #1: loss = 0.066889 (* 1 = 0.066889 loss)\nI1213 04:01:05.942459 20613 sgd_solver.cpp:174] Iteration 35300, lr = 1.059\nI1213 04:01:05.956250 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.460043\nI1213 04:03:23.491520 20613 solver.cpp:337] Iteration 35400, Testing net (#0)\nI1213 04:04:45.264586 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80544\nI1213 04:04:45.264955 20613 solver.cpp:404]     Test net output #1: loss = 0.830775 (* 1 = 0.830775 loss)\nI1213 04:04:46.577487 20613 solver.cpp:228] Iteration 35400, loss = 0.135596\nI1213 04:04:46.577545 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 04:04:46.577564 20613 solver.cpp:244]     Train net output #1: loss = 0.135596 (* 1 = 0.135596 loss)\nI1213 04:04:46.668925 20613 sgd_solver.cpp:174] Iteration 35400, lr = 1.062\nI1213 04:04:46.682837 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.363129\nI1213 04:07:05.057952 20613 solver.cpp:337] Iteration 35500, Testing net (#0)\nI1213 04:08:26.830106 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81464\nI1213 04:08:26.830451 20613 solver.cpp:404]     Test net output #1: loss = 0.845574 (* 1 = 0.845574 loss)\nI1213 04:08:28.143196 20613 solver.cpp:228] Iteration 35500, loss = 0.117855\nI1213 04:08:28.143254 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 04:08:28.143272 20613 solver.cpp:244]     Train net output #1: loss = 0.117855 (* 1 = 0.117855 loss)\nI1213 04:08:28.236444 20613 sgd_solver.cpp:174] Iteration 35500, lr = 1.065\nI1213 04:08:28.250380 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.389672\nI1213 04:10:46.583351 20613 solver.cpp:337] Iteration 35600, Testing net (#0)\nI1213 04:12:08.366492 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83204\nI1213 04:12:08.366850 20613 solver.cpp:404]     Test net output #1: loss = 0.678284 (* 1 = 0.678284 loss)\nI1213 04:12:09.680054 20613 solver.cpp:228] Iteration 35600, loss = 0.153671\nI1213 04:12:09.680096 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 04:12:09.680112 20613 solver.cpp:244]     Train net output #1: loss = 0.153671 (* 1 = 0.153671 loss)\nI1213 04:12:09.772287 20613 sgd_solver.cpp:174] Iteration 35600, lr = 1.068\nI1213 04:12:09.786175 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.378041\nI1213 04:14:28.134065 20613 solver.cpp:337] Iteration 35700, Testing net (#0)\nI1213 04:15:49.906155 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84028\nI1213 04:15:49.906522 20613 solver.cpp:404]     Test net output #1: loss = 0.599818 (* 1 = 0.599818 loss)\nI1213 04:15:51.219195 20613 solver.cpp:228] Iteration 35700, loss = 0.135164\nI1213 04:15:51.219251 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 04:15:51.219269 20613 solver.cpp:244]     Train net output #1: loss = 0.135164 (* 1 = 0.135164 loss)\nI1213 04:15:51.315094 20613 sgd_solver.cpp:174] Iteration 35700, lr = 1.071\nI1213 04:15:51.328953 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.405615\nI1213 04:18:09.584720 20613 solver.cpp:337] Iteration 35800, Testing net (#0)\nI1213 04:19:31.357404 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8238\nI1213 04:19:31.357754 20613 solver.cpp:404]     Test net output #1: loss = 0.638673 (* 1 = 0.638673 loss)\nI1213 04:19:32.671053 20613 solver.cpp:228] Iteration 35800, loss = 0.197513\nI1213 04:19:32.671110 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 04:19:32.671128 20613 solver.cpp:244]     Train net output #1: loss = 0.197513 (* 1 = 0.197513 loss)\nI1213 04:19:32.764817 20613 sgd_solver.cpp:174] Iteration 35800, lr = 1.074\nI1213 04:19:32.778719 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.427336\nI1213 04:21:51.121316 20613 solver.cpp:337] Iteration 35900, Testing net (#0)\nI1213 04:23:12.908854 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81192\nI1213 04:23:12.909214 20613 solver.cpp:404]     Test net output #1: loss = 0.718062 (* 1 = 0.718062 loss)\nI1213 04:23:14.222213 20613 solver.cpp:228] Iteration 35900, loss = 0.0669941\nI1213 04:23:14.222265 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 04:23:14.222282 20613 solver.cpp:244]     Train net output #1: loss = 0.066994 (* 1 = 0.066994 loss)\nI1213 04:23:14.317121 20613 sgd_solver.cpp:174] Iteration 35900, lr = 1.077\nI1213 04:23:14.330544 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.380077\nI1213 04:25:31.964095 20613 solver.cpp:337] Iteration 36000, Testing net (#0)\nI1213 04:26:53.750092 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79952\nI1213 04:26:53.750437 20613 solver.cpp:404]     Test net output #1: loss = 0.744262 (* 1 = 0.744262 loss)\nI1213 04:26:55.063287 20613 solver.cpp:228] Iteration 36000, loss = 0.161516\nI1213 04:26:55.063340 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 04:26:55.063359 20613 solver.cpp:244]     Train net output #1: loss = 0.161516 (* 1 = 0.161516 loss)\nI1213 04:26:55.154592 20613 sgd_solver.cpp:174] Iteration 36000, lr = 1.08\nI1213 04:26:55.168463 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.376619\nI1213 04:29:13.512236 20613 solver.cpp:337] Iteration 36100, Testing net (#0)\nI1213 04:30:35.285184 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76496\nI1213 04:30:35.285555 20613 solver.cpp:404]     Test net output #1: loss = 1.12146 (* 1 = 1.12146 loss)\nI1213 04:30:36.598062 20613 solver.cpp:228] Iteration 36100, loss = 0.101271\nI1213 04:30:36.598119 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 04:30:36.598137 20613 solver.cpp:244]     Train net output #1: loss = 0.101271 (* 1 = 0.101271 loss)\nI1213 04:30:36.694025 20613 sgd_solver.cpp:174] Iteration 36100, lr = 1.083\nI1213 04:30:36.707859 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.413721\nI1213 04:32:55.078660 20613 solver.cpp:337] Iteration 36200, Testing net (#0)\nI1213 04:34:16.782927 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85132\nI1213 04:34:16.783257 20613 solver.cpp:404]     Test net output #1: loss = 0.56477 (* 1 = 0.56477 loss)\nI1213 04:34:18.095496 20613 solver.cpp:228] Iteration 36200, loss = 0.0643243\nI1213 04:34:18.095554 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 04:34:18.095572 20613 solver.cpp:244]     Train net output #1: loss = 0.0643242 (* 1 = 0.0643242 loss)\nI1213 04:34:18.187048 20613 sgd_solver.cpp:174] Iteration 36200, lr = 1.086\nI1213 04:34:18.200930 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.343102\nI1213 04:36:36.571818 20613 solver.cpp:337] Iteration 36300, Testing net (#0)\nI1213 04:37:58.292729 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81016\nI1213 04:37:58.293107 20613 solver.cpp:404]     Test net output #1: loss = 0.754684 (* 1 = 0.754684 loss)\nI1213 04:37:59.605583 20613 solver.cpp:228] Iteration 36300, loss = 0.126988\nI1213 04:37:59.605624 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 04:37:59.605639 20613 solver.cpp:244]     Train net output #1: loss = 0.126988 (* 1 = 0.126988 loss)\nI1213 04:37:59.700901 20613 sgd_solver.cpp:174] Iteration 36300, lr = 1.089\nI1213 04:37:59.714709 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.408347\nI1213 04:40:18.104750 20613 solver.cpp:337] Iteration 36400, Testing net (#0)\nI1213 04:41:39.870918 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81328\nI1213 04:41:39.871263 20613 solver.cpp:404]     Test net output #1: loss = 0.765233 (* 1 = 0.765233 loss)\nI1213 04:41:41.183648 20613 solver.cpp:228] Iteration 36400, loss = 0.133074\nI1213 04:41:41.183703 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 04:41:41.183722 20613 solver.cpp:244]     Train net output #1: loss = 0.133074 (* 1 = 0.133074 loss)\nI1213 04:41:41.273532 20613 sgd_solver.cpp:174] Iteration 36400, lr = 1.092\nI1213 04:41:41.287417 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.355119\nI1213 04:43:58.944131 20613 solver.cpp:337] Iteration 36500, Testing net (#0)\nI1213 04:45:20.697878 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83904\nI1213 04:45:20.698227 20613 solver.cpp:404]     Test net output #1: loss = 0.572353 (* 1 = 0.572353 loss)\nI1213 04:45:22.010941 20613 solver.cpp:228] Iteration 36500, loss = 0.174759\nI1213 04:45:22.010998 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 04:45:22.011015 20613 solver.cpp:244]     Train net output #1: loss = 0.174759 (* 1 = 0.174759 loss)\nI1213 04:45:22.103564 20613 sgd_solver.cpp:174] Iteration 36500, lr = 1.095\nI1213 04:45:22.117491 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.405715\nI1213 04:47:40.398083 20613 solver.cpp:337] Iteration 36600, Testing net (#0)\nI1213 04:49:02.151737 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80232\nI1213 04:49:02.152115 20613 solver.cpp:404]     Test net output #1: loss = 0.812534 (* 1 = 0.812534 loss)\nI1213 04:49:03.464350 20613 solver.cpp:228] Iteration 36600, loss = 0.12278\nI1213 04:49:03.464404 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 04:49:03.464422 20613 solver.cpp:244]     Train net output #1: loss = 0.12278 (* 1 = 0.12278 loss)\nI1213 04:49:03.557338 20613 sgd_solver.cpp:174] Iteration 36600, lr = 1.098\nI1213 04:49:03.571228 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.332104\nI1213 04:51:21.009419 20613 solver.cpp:337] Iteration 36700, Testing net (#0)\nI1213 04:52:42.760373 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78188\nI1213 04:52:42.760730 20613 solver.cpp:404]     Test net output #1: loss = 0.870725 (* 1 = 0.870725 loss)\nI1213 04:52:44.072932 20613 solver.cpp:228] Iteration 36700, loss = 0.0733516\nI1213 04:52:44.072986 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 04:52:44.073004 20613 solver.cpp:244]     Train net output #1: loss = 0.0733515 (* 1 = 0.0733515 loss)\nI1213 04:52:44.163681 20613 sgd_solver.cpp:174] Iteration 36700, lr = 1.101\nI1213 04:52:44.177553 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.376941\nI1213 04:55:02.498627 20613 solver.cpp:337] Iteration 36800, Testing net (#0)\nI1213 04:56:24.247293 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77428\nI1213 04:56:24.247639 20613 solver.cpp:404]     Test net output #1: loss = 0.950996 (* 1 = 0.950996 loss)\nI1213 04:56:25.560111 20613 solver.cpp:228] Iteration 36800, loss = 0.107995\nI1213 04:56:25.560168 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 04:56:25.560186 20613 solver.cpp:244]     Train net output #1: loss = 0.107994 (* 1 = 0.107994 loss)\nI1213 04:56:25.655524 20613 sgd_solver.cpp:174] Iteration 36800, lr = 1.104\nI1213 04:56:25.669467 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.311025\nI1213 04:58:44.038151 20613 solver.cpp:337] Iteration 36900, Testing net (#0)\nI1213 05:00:05.788915 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84744\nI1213 05:00:05.789258 20613 solver.cpp:404]     Test net output #1: loss = 0.606571 (* 1 = 0.606571 loss)\nI1213 05:00:07.101812 20613 solver.cpp:228] Iteration 36900, loss = 0.176579\nI1213 05:00:07.101855 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 05:00:07.101872 20613 solver.cpp:244]     Train net output #1: loss = 0.176579 (* 1 = 0.176579 loss)\nI1213 05:00:07.194216 20613 sgd_solver.cpp:174] Iteration 36900, lr = 1.107\nI1213 05:00:07.208055 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.400227\nI1213 05:02:24.729327 20613 solver.cpp:337] Iteration 37000, Testing net (#0)\nI1213 05:03:46.477676 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71576\nI1213 05:03:46.478067 20613 solver.cpp:404]     Test net output #1: loss = 1.43713 (* 1 = 1.43713 loss)\nI1213 05:03:47.790524 20613 solver.cpp:228] Iteration 37000, loss = 0.102603\nI1213 05:03:47.790580 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 05:03:47.790598 20613 solver.cpp:244]     Train net output #1: loss = 0.102603 (* 1 = 0.102603 loss)\nI1213 05:03:47.880395 20613 sgd_solver.cpp:174] Iteration 37000, lr = 1.11\nI1213 05:03:47.894204 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.382033\nI1213 05:06:06.139333 20613 solver.cpp:337] Iteration 37100, Testing net (#0)\nI1213 05:07:27.895879 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82304\nI1213 05:07:27.896232 20613 solver.cpp:404]     Test net output #1: loss = 0.721321 (* 1 = 0.721321 loss)\nI1213 05:07:29.208585 20613 solver.cpp:228] Iteration 37100, loss = 0.131029\nI1213 05:07:29.208639 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 05:07:29.208658 20613 solver.cpp:244]     Train net output #1: loss = 0.131028 (* 1 = 0.131028 loss)\nI1213 05:07:29.301662 20613 sgd_solver.cpp:174] Iteration 37100, lr = 1.113\nI1213 05:07:29.315450 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.413529\nI1213 05:09:46.777403 20613 solver.cpp:337] Iteration 37200, Testing net (#0)\nI1213 05:11:08.545384 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75696\nI1213 05:11:08.545749 20613 solver.cpp:404]     Test net output #1: loss = 1.08048 (* 1 = 1.08048 loss)\nI1213 05:11:09.858214 20613 solver.cpp:228] Iteration 37200, loss = 0.0841369\nI1213 05:11:09.858270 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 05:11:09.858289 20613 solver.cpp:244]     Train net output #1: loss = 0.0841368 (* 1 = 0.0841368 loss)\nI1213 05:11:09.947886 20613 sgd_solver.cpp:174] Iteration 37200, lr = 1.116\nI1213 05:11:09.961762 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3473\nI1213 05:13:27.354773 20613 solver.cpp:337] Iteration 37300, Testing net (#0)\nI1213 05:14:49.127339 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7804\nI1213 05:14:49.127689 20613 solver.cpp:404]     Test net output #1: loss = 0.89502 (* 1 = 0.89502 loss)\nI1213 05:14:50.440500 20613 solver.cpp:228] Iteration 37300, loss = 0.0638287\nI1213 05:14:50.440549 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 05:14:50.440567 20613 solver.cpp:244]     Train net output #1: loss = 0.0638287 (* 1 = 0.0638287 loss)\nI1213 05:14:50.525774 20613 sgd_solver.cpp:174] Iteration 37300, lr = 1.119\nI1213 05:14:50.539268 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.300606\nI1213 05:17:08.901203 20613 solver.cpp:337] Iteration 37400, Testing net (#0)\nI1213 05:18:30.673171 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73844\nI1213 05:18:30.673517 20613 solver.cpp:404]     Test net output #1: loss = 0.999243 (* 1 = 0.999243 loss)\nI1213 05:18:31.985896 20613 solver.cpp:228] Iteration 37400, loss = 0.144837\nI1213 05:18:31.985944 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 05:18:31.985961 20613 solver.cpp:244]     Train net output #1: loss = 0.144837 (* 1 = 0.144837 loss)\nI1213 05:18:32.080263 20613 sgd_solver.cpp:174] Iteration 37400, lr = 1.122\nI1213 05:18:32.094069 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.438535\nI1213 05:20:49.544823 20613 solver.cpp:337] Iteration 37500, Testing net (#0)\nI1213 05:22:11.316052 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7976\nI1213 05:22:11.316390 20613 solver.cpp:404]     Test net output #1: loss = 0.832969 (* 1 = 0.832969 loss)\nI1213 05:22:12.628885 20613 solver.cpp:228] Iteration 37500, loss = 0.112646\nI1213 05:22:12.628937 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 05:22:12.628955 20613 solver.cpp:244]     Train net output #1: loss = 0.112646 (* 1 = 0.112646 loss)\nI1213 05:22:12.718426 20613 sgd_solver.cpp:174] Iteration 37500, lr = 1.125\nI1213 05:22:12.732270 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.368663\nI1213 05:24:30.186731 20613 solver.cpp:337] Iteration 37600, Testing net (#0)\nI1213 05:25:51.978772 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83432\nI1213 05:25:51.979145 20613 solver.cpp:404]     Test net output #1: loss = 0.581581 (* 1 = 0.581581 loss)\nI1213 05:25:53.291831 20613 solver.cpp:228] Iteration 37600, loss = 0.186234\nI1213 05:25:53.291893 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 05:25:53.291911 20613 solver.cpp:244]     Train net output #1: loss = 0.186234 (* 1 = 0.186234 loss)\nI1213 05:25:53.387339 20613 sgd_solver.cpp:174] Iteration 37600, lr = 1.128\nI1213 05:25:53.401204 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.38214\nI1213 05:28:11.707468 20613 solver.cpp:337] Iteration 37700, Testing net (#0)\nI1213 05:29:33.490507 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79596\nI1213 05:29:33.490886 20613 solver.cpp:404]     Test net output #1: loss = 0.854945 (* 1 = 0.854945 loss)\nI1213 05:29:34.803406 20613 solver.cpp:228] Iteration 37700, loss = 0.178231\nI1213 05:29:34.803463 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 05:29:34.803481 20613 solver.cpp:244]     Train net output #1: loss = 0.178231 (* 1 = 0.178231 loss)\nI1213 05:29:34.893126 20613 sgd_solver.cpp:174] Iteration 37700, lr = 1.131\nI1213 05:29:34.907002 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.386532\nI1213 05:31:52.444365 20613 solver.cpp:337] Iteration 37800, Testing net (#0)\nI1213 05:33:14.216677 20613 solver.cpp:404]     Test net output #0: accuracy = 0.752\nI1213 05:33:14.217020 20613 solver.cpp:404]     Test net output #1: loss = 0.962527 (* 1 = 0.962527 loss)\nI1213 05:33:15.529346 20613 solver.cpp:228] Iteration 37800, loss = 0.106435\nI1213 05:33:15.529398 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 05:33:15.529417 20613 solver.cpp:244]     Train net output #1: loss = 0.106435 (* 1 = 0.106435 loss)\nI1213 05:33:15.625347 20613 sgd_solver.cpp:174] Iteration 37800, lr = 1.134\nI1213 05:33:15.639214 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.398506\nI1213 05:35:33.192876 20613 solver.cpp:337] Iteration 37900, Testing net (#0)\nI1213 05:36:54.952247 20613 solver.cpp:404]     Test net output #0: accuracy = 0.835\nI1213 05:36:54.952630 20613 solver.cpp:404]     Test net output #1: loss = 0.581884 (* 1 = 0.581884 loss)\nI1213 05:36:56.264802 20613 solver.cpp:228] Iteration 37900, loss = 0.0976939\nI1213 05:36:56.264859 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 05:36:56.264878 20613 solver.cpp:244]     Train net output #1: loss = 0.0976939 (* 1 = 0.0976939 loss)\nI1213 05:36:56.357795 20613 sgd_solver.cpp:174] Iteration 37900, lr = 1.137\nI1213 05:36:56.371222 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.36726\nI1213 05:39:14.681998 20613 solver.cpp:337] Iteration 38000, Testing net (#0)\nI1213 05:40:36.448868 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79536\nI1213 05:40:36.449206 20613 solver.cpp:404]     Test net output #1: loss = 0.894425 (* 1 = 0.894425 loss)\nI1213 05:40:37.762100 20613 solver.cpp:228] Iteration 38000, loss = 0.114043\nI1213 05:40:37.762152 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 05:40:37.762169 20613 solver.cpp:244]     Train net output #1: loss = 0.114043 (* 1 = 0.114043 loss)\nI1213 05:40:37.853682 20613 sgd_solver.cpp:174] Iteration 38000, lr = 1.14\nI1213 05:40:37.867532 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.353335\nI1213 05:42:56.154978 20613 solver.cpp:337] Iteration 38100, Testing net (#0)\nI1213 05:44:17.933339 20613 solver.cpp:404]     Test net output #0: accuracy = 0.845681\nI1213 05:44:17.933712 20613 solver.cpp:404]     Test net output #1: loss = 0.5981 (* 1 = 0.5981 loss)\nI1213 05:44:19.246126 20613 solver.cpp:228] Iteration 38100, loss = 0.101092\nI1213 05:44:19.246187 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 05:44:19.246206 20613 solver.cpp:244]     Train net output #1: loss = 0.101092 (* 1 = 0.101092 loss)\nI1213 05:44:19.341665 20613 sgd_solver.cpp:174] Iteration 38100, lr = 1.143\nI1213 05:44:19.355543 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.432251\nI1213 05:46:36.907963 20613 solver.cpp:337] Iteration 38200, Testing net (#0)\nI1213 05:47:58.685133 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82432\nI1213 05:47:58.685500 20613 solver.cpp:404]     Test net output #1: loss = 0.726741 (* 1 = 0.726741 loss)\nI1213 05:47:59.998394 20613 solver.cpp:228] Iteration 38200, loss = 0.172102\nI1213 05:47:59.998445 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 05:47:59.998461 20613 solver.cpp:244]     Train net output #1: loss = 0.172102 (* 1 = 0.172102 loss)\nI1213 05:48:00.093515 20613 sgd_solver.cpp:174] Iteration 38200, lr = 1.146\nI1213 05:48:00.107396 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.376588\nI1213 05:50:18.438233 20613 solver.cpp:337] Iteration 38300, Testing net (#0)\nI1213 05:51:40.202860 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79844\nI1213 05:51:40.203217 20613 solver.cpp:404]     Test net output #1: loss = 0.742946 (* 1 = 0.742946 loss)\nI1213 05:51:41.515517 20613 solver.cpp:228] Iteration 38300, loss = 0.107577\nI1213 05:51:41.515571 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 05:51:41.515590 20613 solver.cpp:244]     Train net output #1: loss = 0.107576 (* 1 = 0.107576 loss)\nI1213 05:51:41.604689 20613 sgd_solver.cpp:174] Iteration 38300, lr = 1.149\nI1213 05:51:41.618465 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.40147\nI1213 05:53:59.024292 20613 solver.cpp:337] Iteration 38400, Testing net (#0)\nI1213 05:55:20.492789 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78792\nI1213 05:55:20.493105 20613 solver.cpp:404]     Test net output #1: loss = 0.840393 (* 1 = 0.840393 loss)\nI1213 05:55:21.806023 20613 solver.cpp:228] Iteration 38400, loss = 0.176902\nI1213 05:55:21.806076 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 05:55:21.806093 20613 solver.cpp:244]     Train net output #1: loss = 0.176902 (* 1 = 0.176902 loss)\nI1213 05:55:21.895709 20613 sgd_solver.cpp:174] Iteration 38400, lr = 1.152\nI1213 05:55:21.909552 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.367412\nI1213 05:57:39.612390 20613 solver.cpp:337] Iteration 38500, Testing net (#0)\nI1213 05:59:01.197337 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79872\nI1213 05:59:01.197659 20613 solver.cpp:404]     Test net output #1: loss = 0.827443 (* 1 = 0.827443 loss)\nI1213 05:59:02.511212 20613 solver.cpp:228] Iteration 38500, loss = 0.0945517\nI1213 05:59:02.511255 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 05:59:02.511271 20613 solver.cpp:244]     Train net output #1: loss = 0.0945516 (* 1 = 0.0945516 loss)\nI1213 05:59:02.604061 20613 sgd_solver.cpp:174] Iteration 38500, lr = 1.155\nI1213 05:59:02.618010 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.370923\nI1213 06:01:21.185717 20613 solver.cpp:337] Iteration 38600, Testing net (#0)\nI1213 06:02:42.787844 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8468\nI1213 06:02:42.788197 20613 solver.cpp:404]     Test net output #1: loss = 0.563606 (* 1 = 0.563606 loss)\nI1213 06:02:44.102653 20613 solver.cpp:228] Iteration 38600, loss = 0.135212\nI1213 06:02:44.102713 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 06:02:44.102732 20613 solver.cpp:244]     Train net output #1: loss = 0.135211 (* 1 = 0.135211 loss)\nI1213 06:02:44.194411 20613 sgd_solver.cpp:174] Iteration 38600, lr = 1.158\nI1213 06:02:44.207821 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.405445\nI1213 06:05:01.907222 20613 solver.cpp:337] Iteration 38700, Testing net (#0)\nI1213 06:06:23.555344 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82168\nI1213 06:06:23.555675 20613 solver.cpp:404]     Test net output #1: loss = 0.705478 (* 1 = 0.705478 loss)\nI1213 06:06:24.869884 20613 solver.cpp:228] Iteration 38700, loss = 0.0923432\nI1213 06:06:24.869930 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 06:06:24.869947 20613 solver.cpp:244]     Train net output #1: loss = 0.0923431 (* 1 = 0.0923431 loss)\nI1213 06:06:24.959496 20613 sgd_solver.cpp:174] Iteration 38700, lr = 1.161\nI1213 06:06:24.973467 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.366026\nI1213 06:08:43.436574 20613 solver.cpp:337] Iteration 38800, Testing net (#0)\nI1213 06:10:05.197639 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8442\nI1213 06:10:05.198004 20613 solver.cpp:404]     Test net output #1: loss = 0.593012 (* 1 = 0.593012 loss)\nI1213 06:10:06.512413 20613 solver.cpp:228] Iteration 38800, loss = 0.107425\nI1213 06:10:06.512475 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 06:10:06.512492 20613 solver.cpp:244]     Train net output #1: loss = 0.107425 (* 1 = 0.107425 loss)\nI1213 06:10:06.606897 20613 sgd_solver.cpp:174] Iteration 38800, lr = 1.164\nI1213 06:10:06.620826 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.398159\nI1213 06:12:25.146958 20613 solver.cpp:337] Iteration 38900, Testing net (#0)\nI1213 06:13:46.921747 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8332\nI1213 06:13:46.922101 20613 solver.cpp:404]     Test net output #1: loss = 0.591667 (* 1 = 0.591667 loss)\nI1213 06:13:48.236766 20613 solver.cpp:228] Iteration 38900, loss = 0.129294\nI1213 06:13:48.236807 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 06:13:48.236824 20613 solver.cpp:244]     Train net output #1: loss = 0.129294 (* 1 = 0.129294 loss)\nI1213 06:13:48.327648 20613 sgd_solver.cpp:174] Iteration 38900, lr = 1.167\nI1213 06:13:48.341493 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.366589\nI1213 06:16:06.009238 20613 solver.cpp:337] Iteration 39000, Testing net (#0)\nI1213 06:17:27.811265 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80536\nI1213 06:17:27.811614 20613 solver.cpp:404]     Test net output #1: loss = 0.753087 (* 1 = 0.753087 loss)\nI1213 06:17:29.126689 20613 solver.cpp:228] Iteration 39000, loss = 0.113716\nI1213 06:17:29.126749 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 06:17:29.126766 20613 solver.cpp:244]     Train net output #1: loss = 0.113716 (* 1 = 0.113716 loss)\nI1213 06:17:29.219478 20613 sgd_solver.cpp:174] Iteration 39000, lr = 1.17\nI1213 06:17:29.233319 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.381399\nI1213 06:19:46.973639 20613 solver.cpp:337] Iteration 39100, Testing net (#0)\nI1213 06:21:08.777184 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85624\nI1213 06:21:08.777549 20613 solver.cpp:404]     Test net output #1: loss = 0.508323 (* 1 = 0.508323 loss)\nI1213 06:21:10.092281 20613 solver.cpp:228] Iteration 39100, loss = 0.169162\nI1213 06:21:10.092325 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 06:21:10.092342 20613 solver.cpp:244]     Train net output #1: loss = 0.169162 (* 1 = 0.169162 loss)\nI1213 06:21:10.182248 20613 sgd_solver.cpp:174] Iteration 39100, lr = 1.173\nI1213 06:21:10.195688 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.362561\nI1213 06:23:28.759311 20613 solver.cpp:337] Iteration 39200, Testing net (#0)\nI1213 06:24:50.583420 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80248\nI1213 06:24:50.583778 20613 solver.cpp:404]     Test net output #1: loss = 0.708904 (* 1 = 0.708904 loss)\nI1213 06:24:51.900900 20613 solver.cpp:228] Iteration 39200, loss = 0.122807\nI1213 06:24:51.900962 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 06:24:51.900980 20613 solver.cpp:244]     Train net output #1: loss = 0.122807 (* 1 = 0.122807 loss)\nI1213 06:24:51.985819 20613 sgd_solver.cpp:174] Iteration 39200, lr = 1.176\nI1213 06:24:51.999776 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.415638\nI1213 06:27:09.764464 20613 solver.cpp:337] Iteration 39300, Testing net (#0)\nI1213 06:28:31.551703 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83072\nI1213 06:28:31.552085 20613 solver.cpp:404]     Test net output #1: loss = 0.610749 (* 1 = 0.610749 loss)\nI1213 06:28:32.867198 20613 solver.cpp:228] Iteration 39300, loss = 0.142385\nI1213 06:28:32.867259 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 06:28:32.867277 20613 solver.cpp:244]     Train net output #1: loss = 0.142385 (* 1 = 0.142385 loss)\nI1213 06:28:32.960099 20613 sgd_solver.cpp:174] Iteration 39300, lr = 1.179\nI1213 06:28:32.974040 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.354438\nI1213 06:30:51.520651 20613 solver.cpp:337] Iteration 39400, Testing net (#0)\nI1213 06:32:13.291908 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82104\nI1213 06:32:13.292294 20613 solver.cpp:404]     Test net output #1: loss = 0.715438 (* 1 = 0.715438 loss)\nI1213 06:32:14.607235 20613 solver.cpp:228] Iteration 39400, loss = 0.119372\nI1213 06:32:14.607295 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 06:32:14.607313 20613 solver.cpp:244]     Train net output #1: loss = 0.119372 (* 1 = 0.119372 loss)\nI1213 06:32:14.701963 20613 sgd_solver.cpp:174] Iteration 39400, lr = 1.182\nI1213 06:32:14.715761 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.383717\nI1213 06:34:32.349174 20613 solver.cpp:337] Iteration 39500, Testing net (#0)\nI1213 06:35:54.126122 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80452\nI1213 06:35:54.126492 20613 solver.cpp:404]     Test net output #1: loss = 0.76667 (* 1 = 0.76667 loss)\nI1213 06:35:55.441283 20613 solver.cpp:228] Iteration 39500, loss = 0.155031\nI1213 06:35:55.441323 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 06:35:55.441339 20613 solver.cpp:244]     Train net output #1: loss = 0.155031 (* 1 = 0.155031 loss)\nI1213 06:35:55.532979 20613 sgd_solver.cpp:174] Iteration 39500, lr = 1.185\nI1213 06:35:55.546933 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.460722\nI1213 06:38:13.278153 20613 solver.cpp:337] Iteration 39600, Testing net (#0)\nI1213 06:39:35.085259 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82636\nI1213 06:39:35.085610 20613 solver.cpp:404]     Test net output #1: loss = 0.665605 (* 1 = 0.665605 loss)\nI1213 06:39:36.401846 20613 solver.cpp:228] Iteration 39600, loss = 0.173284\nI1213 06:39:36.401902 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 06:39:36.401921 20613 solver.cpp:244]     Train net output #1: loss = 0.173284 (* 1 = 0.173284 loss)\nI1213 06:39:36.488809 20613 sgd_solver.cpp:174] Iteration 39600, lr = 1.188\nI1213 06:39:36.502641 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.436344\nI1213 06:41:54.067973 20613 solver.cpp:337] Iteration 39700, Testing net (#0)\nI1213 06:43:15.879655 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82716\nI1213 06:43:15.880009 20613 solver.cpp:404]     Test net output #1: loss = 0.675325 (* 1 = 0.675325 loss)\nI1213 06:43:17.195106 20613 solver.cpp:228] Iteration 39700, loss = 0.17285\nI1213 06:43:17.195160 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 06:43:17.195178 20613 solver.cpp:244]     Train net output #1: loss = 0.17285 (* 1 = 0.17285 loss)\nI1213 06:43:17.285527 20613 sgd_solver.cpp:174] Iteration 39700, lr = 1.191\nI1213 06:43:17.299394 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.37078\nI1213 06:45:34.883656 20613 solver.cpp:337] Iteration 39800, Testing net (#0)\nI1213 06:46:56.705066 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83156\nI1213 06:46:56.705451 20613 solver.cpp:404]     Test net output #1: loss = 0.670254 (* 1 = 0.670254 loss)\nI1213 06:46:58.021811 20613 solver.cpp:228] Iteration 39800, loss = 0.137013\nI1213 06:46:58.021865 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 06:46:58.021889 20613 solver.cpp:244]     Train net output #1: loss = 0.137013 (* 1 = 0.137013 loss)\nI1213 06:46:58.114140 20613 sgd_solver.cpp:174] Iteration 39800, lr = 1.194\nI1213 06:46:58.127621 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.387263\nI1213 06:49:16.513155 20613 solver.cpp:337] Iteration 39900, Testing net (#0)\nI1213 06:50:38.336591 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79896\nI1213 06:50:38.336949 20613 solver.cpp:404]     Test net output #1: loss = 0.749659 (* 1 = 0.749659 loss)\nI1213 06:50:39.652848 20613 solver.cpp:228] Iteration 39900, loss = 0.118334\nI1213 06:50:39.652907 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 06:50:39.652932 20613 solver.cpp:244]     Train net output #1: loss = 0.118334 (* 1 = 0.118334 loss)\nI1213 06:50:39.742372 20613 sgd_solver.cpp:174] Iteration 39900, lr = 1.197\nI1213 06:50:39.756652 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.370102\nI1213 06:52:58.136646 20613 solver.cpp:337] Iteration 40000, Testing net (#0)\nI1213 06:54:19.950611 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75828\nI1213 06:54:19.950975 20613 solver.cpp:404]     Test net output #1: loss = 0.997559 (* 1 = 0.997559 loss)\nI1213 06:54:21.267438 20613 solver.cpp:228] Iteration 40000, loss = 0.105967\nI1213 06:54:21.267495 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 06:54:21.267521 20613 solver.cpp:244]     Train net output #1: loss = 0.105967 (* 1 = 0.105967 loss)\nI1213 06:54:21.355931 20613 sgd_solver.cpp:174] Iteration 40000, lr = 1.2\nI1213 06:54:21.369668 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.341034\nI1213 06:56:39.774052 20613 solver.cpp:337] Iteration 40100, Testing net (#0)\nI1213 06:58:01.552592 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83216\nI1213 06:58:01.552986 20613 solver.cpp:404]     Test net output #1: loss = 0.683044 (* 1 = 0.683044 loss)\nI1213 06:58:02.868218 20613 solver.cpp:228] Iteration 40100, loss = 0.0764363\nI1213 06:58:02.868278 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 06:58:02.868301 20613 solver.cpp:244]     Train net output #1: loss = 0.0764362 (* 1 = 0.0764362 loss)\nI1213 06:58:02.959245 20613 sgd_solver.cpp:174] Iteration 40100, lr = 1.203\nI1213 06:58:02.973141 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.379756\nI1213 07:00:21.401829 20613 solver.cpp:337] Iteration 40200, Testing net (#0)\nI1213 07:01:43.179946 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80896\nI1213 07:01:43.180330 20613 solver.cpp:404]     Test net output #1: loss = 0.666951 (* 1 = 0.666951 loss)\nI1213 07:01:44.495527 20613 solver.cpp:228] Iteration 40200, loss = 0.0401964\nI1213 07:01:44.495584 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1213 07:01:44.495610 20613 solver.cpp:244]     Train net output #1: loss = 0.0401963 (* 1 = 0.0401963 loss)\nI1213 07:01:44.584857 20613 sgd_solver.cpp:174] Iteration 40200, lr = 1.206\nI1213 07:01:44.598762 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.359938\nI1213 07:04:03.086978 20613 solver.cpp:337] Iteration 40300, Testing net (#0)\nI1213 07:05:24.863235 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72532\nI1213 07:05:24.863622 20613 solver.cpp:404]     Test net output #1: loss = 1.14223 (* 1 = 1.14223 loss)\nI1213 07:05:26.177954 20613 solver.cpp:228] Iteration 40300, loss = 0.162345\nI1213 07:05:26.178014 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 07:05:26.178040 20613 solver.cpp:244]     Train net output #1: loss = 0.162345 (* 1 = 0.162345 loss)\nI1213 07:05:26.272198 20613 sgd_solver.cpp:174] Iteration 40300, lr = 1.209\nI1213 07:05:26.286193 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.376799\nI1213 07:07:43.861943 20613 solver.cpp:337] Iteration 40400, Testing net (#0)\nI1213 07:09:05.637630 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80692\nI1213 07:09:05.637995 20613 solver.cpp:404]     Test net output #1: loss = 0.776858 (* 1 = 0.776858 loss)\nI1213 07:09:06.953090 20613 solver.cpp:228] Iteration 40400, loss = 0.166639\nI1213 07:09:06.953143 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 07:09:06.953168 20613 solver.cpp:244]     Train net output #1: loss = 0.166639 (* 1 = 0.166639 loss)\nI1213 07:09:07.045445 20613 sgd_solver.cpp:174] Iteration 40400, lr = 1.212\nI1213 07:09:07.059386 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.435042\nI1213 07:11:25.498347 20613 solver.cpp:337] Iteration 40500, Testing net (#0)\nI1213 07:12:47.255334 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83808\nI1213 07:12:47.255690 20613 solver.cpp:404]     Test net output #1: loss = 0.60831 (* 1 = 0.60831 loss)\nI1213 07:12:48.570096 20613 solver.cpp:228] Iteration 40500, loss = 0.139413\nI1213 07:12:48.570148 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 07:12:48.570165 20613 solver.cpp:244]     Train net output #1: loss = 0.139413 (* 1 = 0.139413 loss)\nI1213 07:12:48.666198 20613 sgd_solver.cpp:174] Iteration 40500, lr = 1.215\nI1213 07:12:48.680061 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.311966\nI1213 07:15:06.372598 20613 solver.cpp:337] Iteration 40600, Testing net (#0)\nI1213 07:16:28.139111 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8094\nI1213 07:16:28.139538 20613 solver.cpp:404]     Test net output #1: loss = 0.788441 (* 1 = 0.788441 loss)\nI1213 07:16:29.453977 20613 solver.cpp:228] Iteration 40600, loss = 0.120008\nI1213 07:16:29.454027 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 07:16:29.454044 20613 solver.cpp:244]     Train net output #1: loss = 0.120008 (* 1 = 0.120008 loss)\nI1213 07:16:29.546942 20613 sgd_solver.cpp:174] Iteration 40600, lr = 1.218\nI1213 07:16:29.560796 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.390169\nI1213 07:18:47.998252 20613 solver.cpp:337] Iteration 40700, Testing net (#0)\nI1213 07:20:09.755254 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78472\nI1213 07:20:09.755592 20613 solver.cpp:404]     Test net output #1: loss = 0.858791 (* 1 = 0.858791 loss)\nI1213 07:20:11.070159 20613 solver.cpp:228] Iteration 40700, loss = 0.0554708\nI1213 07:20:11.070209 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 07:20:11.070226 20613 solver.cpp:244]     Train net output #1: loss = 0.0554708 (* 1 = 0.0554708 loss)\nI1213 07:20:11.163686 20613 sgd_solver.cpp:174] Iteration 40700, lr = 1.221\nI1213 07:20:11.177513 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.306153\nI1213 07:22:29.630451 20613 solver.cpp:337] Iteration 40800, Testing net (#0)\nI1213 07:23:51.398809 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI1213 07:23:51.399164 20613 solver.cpp:404]     Test net output #1: loss = 0.808311 (* 1 = 0.808311 loss)\nI1213 07:23:52.714460 20613 solver.cpp:228] Iteration 40800, loss = 0.200463\nI1213 07:23:52.714519 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 07:23:52.714535 20613 solver.cpp:244]     Train net output #1: loss = 0.200463 (* 1 = 0.200463 loss)\nI1213 07:23:52.803112 20613 sgd_solver.cpp:174] Iteration 40800, lr = 1.224\nI1213 07:23:52.816973 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.465705\nI1213 07:26:11.327596 20613 solver.cpp:337] Iteration 40900, Testing net (#0)\nI1213 07:27:33.092661 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83592\nI1213 07:27:33.093050 20613 solver.cpp:404]     Test net output #1: loss = 0.587481 (* 1 = 0.587481 loss)\nI1213 07:27:34.407022 20613 solver.cpp:228] Iteration 40900, loss = 0.0988227\nI1213 07:27:34.407071 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 07:27:34.407088 20613 solver.cpp:244]     Train net output #1: loss = 0.0988227 (* 1 = 0.0988227 loss)\nI1213 07:27:34.500439 20613 sgd_solver.cpp:174] Iteration 40900, lr = 1.227\nI1213 07:27:34.514266 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.317991\nI1213 07:29:52.162083 20613 solver.cpp:337] Iteration 41000, Testing net (#0)\nI1213 07:31:13.932672 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79856\nI1213 07:31:13.933048 20613 solver.cpp:404]     Test net output #1: loss = 0.702925 (* 1 = 0.702925 loss)\nI1213 07:31:15.247169 20613 solver.cpp:228] Iteration 41000, loss = 0.146417\nI1213 07:31:15.247226 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 07:31:15.247244 20613 solver.cpp:244]     Train net output #1: loss = 0.146416 (* 1 = 0.146416 loss)\nI1213 07:31:15.334394 20613 sgd_solver.cpp:174] Iteration 41000, lr = 1.23\nI1213 07:31:15.348222 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.373196\nI1213 07:33:33.852341 20613 solver.cpp:337] Iteration 41100, Testing net (#0)\nI1213 07:34:55.630344 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79712\nI1213 07:34:55.630723 20613 solver.cpp:404]     Test net output #1: loss = 0.791087 (* 1 = 0.791087 loss)\nI1213 07:34:56.946264 20613 solver.cpp:228] Iteration 41100, loss = 0.1816\nI1213 07:34:56.946317 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 07:34:56.946336 20613 solver.cpp:244]     Train net output #1: loss = 0.1816 (* 1 = 0.1816 loss)\nI1213 07:34:57.037837 20613 sgd_solver.cpp:174] Iteration 41100, lr = 1.233\nI1213 07:34:57.051700 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.449613\nI1213 07:37:15.523931 20613 solver.cpp:337] Iteration 41200, Testing net (#0)\nI1213 07:38:37.313944 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82416\nI1213 07:38:37.314298 20613 solver.cpp:404]     Test net output #1: loss = 0.623422 (* 1 = 0.623422 loss)\nI1213 07:38:38.628897 20613 solver.cpp:228] Iteration 41200, loss = 0.1496\nI1213 07:38:38.628957 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 07:38:38.628975 20613 solver.cpp:244]     Train net output #1: loss = 0.1496 (* 1 = 0.1496 loss)\nI1213 07:38:38.719558 20613 sgd_solver.cpp:174] Iteration 41200, lr = 1.236\nI1213 07:38:38.733398 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.434354\nI1213 07:40:57.229212 20613 solver.cpp:337] Iteration 41300, Testing net (#0)\nI1213 07:42:19.020855 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7816\nI1213 07:42:19.021219 20613 solver.cpp:404]     Test net output #1: loss = 0.870395 (* 1 = 0.870395 loss)\nI1213 07:42:20.336501 20613 solver.cpp:228] Iteration 41300, loss = 0.146708\nI1213 07:42:20.336555 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1213 07:42:20.336572 20613 solver.cpp:244]     Train net output #1: loss = 0.146708 (* 1 = 0.146708 loss)\nI1213 07:42:20.425047 20613 sgd_solver.cpp:174] Iteration 41300, lr = 1.239\nI1213 07:42:20.438949 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.39559\nI1213 07:44:38.024554 20613 solver.cpp:337] Iteration 41400, Testing net (#0)\nI1213 07:45:59.821214 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80392\nI1213 07:45:59.821590 20613 solver.cpp:404]     Test net output #1: loss = 0.79073 (* 1 = 0.79073 loss)\nI1213 07:46:01.137450 20613 solver.cpp:228] Iteration 41400, loss = 0.142101\nI1213 07:46:01.137511 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 07:46:01.137537 20613 solver.cpp:244]     Train net output #1: loss = 0.142101 (* 1 = 0.142101 loss)\nI1213 07:46:01.224295 20613 sgd_solver.cpp:174] Iteration 41400, lr = 1.242\nI1213 07:46:01.238255 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.405985\nI1213 07:48:19.831790 20613 solver.cpp:337] Iteration 41500, Testing net (#0)\nI1213 07:49:41.618531 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79292\nI1213 07:49:41.618911 20613 solver.cpp:404]     Test net output #1: loss = 0.852263 (* 1 = 0.852263 loss)\nI1213 07:49:42.933903 20613 solver.cpp:228] Iteration 41500, loss = 0.0666519\nI1213 07:49:42.933965 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 07:49:42.933984 20613 solver.cpp:244]     Train net output #1: loss = 0.0666518 (* 1 = 0.0666518 loss)\nI1213 07:49:43.021356 20613 sgd_solver.cpp:174] Iteration 41500, lr = 1.245\nI1213 07:49:43.035181 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.316183\nI1213 07:52:00.793640 20613 solver.cpp:337] Iteration 41600, Testing net (#0)\nI1213 07:53:22.577038 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79156\nI1213 07:53:22.577419 20613 solver.cpp:404]     Test net output #1: loss = 0.852159 (* 1 = 0.852159 loss)\nI1213 07:53:23.892549 20613 solver.cpp:228] Iteration 41600, loss = 0.157602\nI1213 07:53:23.892603 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 07:53:23.892619 20613 solver.cpp:244]     Train net output #1: loss = 0.157602 (* 1 = 0.157602 loss)\nI1213 07:53:23.984472 20613 sgd_solver.cpp:174] Iteration 41600, lr = 1.248\nI1213 07:53:23.998302 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.375457\nI1213 07:55:42.558275 20613 solver.cpp:337] Iteration 41700, Testing net (#0)\nI1213 07:57:04.332031 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75304\nI1213 07:57:04.332383 20613 solver.cpp:404]     Test net output #1: loss = 0.998221 (* 1 = 0.998221 loss)\nI1213 07:57:05.647668 20613 solver.cpp:228] Iteration 41700, loss = 0.0853965\nI1213 07:57:05.647723 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 07:57:05.647740 20613 solver.cpp:244]     Train net output #1: loss = 0.0853965 (* 1 = 0.0853965 loss)\nI1213 07:57:05.741101 20613 sgd_solver.cpp:174] Iteration 41700, lr = 1.251\nI1213 07:57:05.754966 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.335076\nI1213 07:59:24.296545 20613 solver.cpp:337] Iteration 41800, Testing net (#0)\nI1213 08:00:46.067322 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74456\nI1213 08:00:46.067647 20613 solver.cpp:404]     Test net output #1: loss = 1.09821 (* 1 = 1.09821 loss)\nI1213 08:00:47.382304 20613 solver.cpp:228] Iteration 41800, loss = 0.232753\nI1213 08:00:47.382359 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 08:00:47.382375 20613 solver.cpp:244]     Train net output #1: loss = 0.232753 (* 1 = 0.232753 loss)\nI1213 08:00:47.469646 20613 sgd_solver.cpp:174] Iteration 41800, lr = 1.254\nI1213 08:00:47.483534 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.405041\nI1213 08:03:05.909749 20613 solver.cpp:337] Iteration 41900, Testing net (#0)\nI1213 08:04:27.683234 20613 solver.cpp:404]     Test net output #0: accuracy = 0.779\nI1213 08:04:27.683575 20613 solver.cpp:404]     Test net output #1: loss = 0.953331 (* 1 = 0.953331 loss)\nI1213 08:04:28.999089 20613 solver.cpp:228] Iteration 41900, loss = 0.170432\nI1213 08:04:28.999141 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 08:04:28.999157 20613 solver.cpp:244]     Train net output #1: loss = 0.170432 (* 1 = 0.170432 loss)\nI1213 08:04:29.091081 20613 sgd_solver.cpp:174] Iteration 41900, lr = 1.257\nI1213 08:04:29.104902 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.288332\nI1213 08:06:47.523347 20613 solver.cpp:337] Iteration 42000, Testing net (#0)\nI1213 08:08:09.313946 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81988\nI1213 08:08:09.314301 20613 solver.cpp:404]     Test net output #1: loss = 0.72222 (* 1 = 0.72222 loss)\nI1213 08:08:10.629017 20613 solver.cpp:228] Iteration 42000, loss = 0.22118\nI1213 08:08:10.629073 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 08:08:10.629089 20613 solver.cpp:244]     Train net output #1: loss = 0.22118 (* 1 = 0.22118 loss)\nI1213 08:08:10.724195 20613 sgd_solver.cpp:174] Iteration 42000, lr = 1.26\nI1213 08:08:10.738016 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.386721\nI1213 08:10:28.351755 20613 solver.cpp:337] Iteration 42100, Testing net (#0)\nI1213 08:11:50.150677 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71784\nI1213 08:11:50.151072 20613 solver.cpp:404]     Test net output #1: loss = 1.39944 (* 1 = 1.39944 loss)\nI1213 08:11:51.465813 20613 solver.cpp:228] Iteration 42100, loss = 0.135301\nI1213 08:11:51.465874 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 08:11:51.465891 20613 solver.cpp:244]     Train net output #1: loss = 0.135301 (* 1 = 0.135301 loss)\nI1213 08:11:51.557206 20613 sgd_solver.cpp:174] Iteration 42100, lr = 1.263\nI1213 08:11:51.571071 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.381394\nI1213 08:14:09.206943 20613 solver.cpp:337] Iteration 42200, Testing net (#0)\nI1213 08:15:30.998746 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79784\nI1213 08:15:30.999111 20613 solver.cpp:404]     Test net output #1: loss = 0.765207 (* 1 = 0.765207 loss)\nI1213 08:15:32.313983 20613 solver.cpp:228] Iteration 42200, loss = 0.154278\nI1213 08:15:32.314033 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 08:15:32.314049 20613 solver.cpp:244]     Train net output #1: loss = 0.154278 (* 1 = 0.154278 loss)\nI1213 08:15:32.407193 20613 sgd_solver.cpp:174] Iteration 42200, lr = 1.266\nI1213 08:15:32.420943 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.342215\nI1213 08:17:50.820114 20613 solver.cpp:337] Iteration 42300, Testing net (#0)\nI1213 08:19:12.620108 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8132\nI1213 08:19:12.620486 20613 solver.cpp:404]     Test net output #1: loss = 0.674598 (* 1 = 0.674598 loss)\nI1213 08:19:13.935063 20613 solver.cpp:228] Iteration 42300, loss = 0.164159\nI1213 08:19:13.935112 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 08:19:13.935128 20613 solver.cpp:244]     Train net output #1: loss = 0.164159 (* 1 = 0.164159 loss)\nI1213 08:19:14.027833 20613 sgd_solver.cpp:174] Iteration 42300, lr = 1.269\nI1213 08:19:14.041692 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.380133\nI1213 08:21:32.491516 20613 solver.cpp:337] Iteration 42400, Testing net (#0)\nI1213 08:22:54.291821 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8224\nI1213 08:22:54.292166 20613 solver.cpp:404]     Test net output #1: loss = 0.603749 (* 1 = 0.603749 loss)\nI1213 08:22:55.606247 20613 solver.cpp:228] Iteration 42400, loss = 0.0882155\nI1213 08:22:55.606299 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 08:22:55.606315 20613 solver.cpp:244]     Train net output #1: loss = 0.0882154 (* 1 = 0.0882154 loss)\nI1213 08:22:55.695586 20613 sgd_solver.cpp:174] Iteration 42400, lr = 1.272\nI1213 08:22:55.709400 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.361224\nI1213 08:25:14.143689 20613 solver.cpp:337] Iteration 42500, Testing net (#0)\nI1213 08:26:35.939103 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77556\nI1213 08:26:35.939504 20613 solver.cpp:404]     Test net output #1: loss = 0.971229 (* 1 = 0.971229 loss)\nI1213 08:26:37.253939 20613 solver.cpp:228] Iteration 42500, loss = 0.153576\nI1213 08:26:37.253993 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 08:26:37.254009 20613 solver.cpp:244]     Train net output #1: loss = 0.153576 (* 1 = 0.153576 loss)\nI1213 08:26:37.341918 20613 sgd_solver.cpp:174] Iteration 42500, lr = 1.275\nI1213 08:26:37.355792 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.370207\nI1213 08:28:55.742522 20613 solver.cpp:337] Iteration 42600, Testing net (#0)\nI1213 08:30:17.544706 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8356\nI1213 08:30:17.545078 20613 solver.cpp:404]     Test net output #1: loss = 0.566523 (* 1 = 0.566523 loss)\nI1213 08:30:18.859544 20613 solver.cpp:228] Iteration 42600, loss = 0.0805085\nI1213 08:30:18.859601 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 08:30:18.859617 20613 solver.cpp:244]     Train net output #1: loss = 0.0805084 (* 1 = 0.0805084 loss)\nI1213 08:30:18.953577 20613 sgd_solver.cpp:174] Iteration 42600, lr = 1.278\nI1213 08:30:18.967427 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.327461\nI1213 08:32:37.440181 20613 solver.cpp:337] Iteration 42700, Testing net (#0)\nI1213 08:33:59.237810 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78428\nI1213 08:33:59.238193 20613 solver.cpp:404]     Test net output #1: loss = 0.860048 (* 1 = 0.860048 loss)\nI1213 08:34:00.553066 20613 solver.cpp:228] Iteration 42700, loss = 0.123852\nI1213 08:34:00.553118 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 08:34:00.553134 20613 solver.cpp:244]     Train net output #1: loss = 0.123852 (* 1 = 0.123852 loss)\nI1213 08:34:00.642534 20613 sgd_solver.cpp:174] Iteration 42700, lr = 1.281\nI1213 08:34:00.656463 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.38753\nI1213 08:36:19.104532 20613 solver.cpp:337] Iteration 42800, Testing net (#0)\nI1213 08:37:40.902029 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67076\nI1213 08:37:40.902415 20613 solver.cpp:404]     Test net output #1: loss = 1.67579 (* 1 = 1.67579 loss)\nI1213 08:37:42.217900 20613 solver.cpp:228] Iteration 42800, loss = 0.145703\nI1213 08:37:42.217950 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 08:37:42.217967 20613 solver.cpp:244]     Train net output #1: loss = 0.145703 (* 1 = 0.145703 loss)\nI1213 08:37:42.311908 20613 sgd_solver.cpp:174] Iteration 42800, lr = 1.284\nI1213 08:37:42.325736 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.412742\nI1213 08:40:00.647944 20613 solver.cpp:337] Iteration 42900, Testing net (#0)\nI1213 08:41:21.430436 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81852\nI1213 08:41:21.430764 20613 solver.cpp:404]     Test net output #1: loss = 0.682293 (* 1 = 0.682293 loss)\nI1213 08:41:22.742537 20613 solver.cpp:228] Iteration 42900, loss = 0.0876677\nI1213 08:41:22.742581 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 08:41:22.742599 20613 solver.cpp:244]     Train net output #1: loss = 0.0876676 (* 1 = 0.0876676 loss)\nI1213 08:41:22.833492 20613 sgd_solver.cpp:174] Iteration 42900, lr = 1.287\nI1213 08:41:22.846192 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.389645\nI1213 08:43:40.991807 20613 solver.cpp:337] Iteration 43000, Testing net (#0)\nI1213 08:45:01.773258 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80872\nI1213 08:45:01.773522 20613 solver.cpp:404]     Test net output #1: loss = 0.753223 (* 1 = 0.753223 loss)\nI1213 08:45:03.083811 20613 solver.cpp:228] Iteration 43000, loss = 0.11769\nI1213 08:45:03.083856 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 08:45:03.083873 20613 solver.cpp:244]     Train net output #1: loss = 0.11769 (* 1 = 0.11769 loss)\nI1213 08:45:03.176650 20613 sgd_solver.cpp:174] Iteration 43000, lr = 1.29\nI1213 08:45:03.189443 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.386125\nI1213 08:47:21.371556 20613 solver.cpp:337] Iteration 43100, Testing net (#0)\nI1213 08:48:42.156631 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77912\nI1213 08:48:42.156916 20613 solver.cpp:404]     Test net output #1: loss = 0.842105 (* 1 = 0.842105 loss)\nI1213 08:48:43.468062 20613 solver.cpp:228] Iteration 43100, loss = 0.16396\nI1213 08:48:43.468106 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 08:48:43.468123 20613 solver.cpp:244]     Train net output #1: loss = 0.16396 (* 1 = 0.16396 loss)\nI1213 08:48:43.561441 20613 sgd_solver.cpp:174] Iteration 43100, lr = 1.293\nI1213 08:48:43.574280 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.393841\nI1213 08:51:01.724337 20613 solver.cpp:337] Iteration 43200, Testing net (#0)\nI1213 08:52:22.511366 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8394\nI1213 08:52:22.511638 20613 solver.cpp:404]     Test net output #1: loss = 0.616503 (* 1 = 0.616503 loss)\nI1213 08:52:23.822818 20613 solver.cpp:228] Iteration 43200, loss = 0.0896149\nI1213 08:52:23.822862 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 08:52:23.822880 20613 solver.cpp:244]     Train net output #1: loss = 0.0896148 (* 1 = 0.0896148 loss)\nI1213 08:52:23.915383 20613 sgd_solver.cpp:174] Iteration 43200, lr = 1.296\nI1213 08:52:23.928058 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.329573\nI1213 08:54:42.045972 20613 solver.cpp:337] Iteration 43300, Testing net (#0)\nI1213 08:56:02.829001 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76892\nI1213 08:56:02.829322 20613 solver.cpp:404]     Test net output #1: loss = 0.883347 (* 1 = 0.883347 loss)\nI1213 08:56:04.140841 20613 solver.cpp:228] Iteration 43300, loss = 0.238709\nI1213 08:56:04.140887 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 08:56:04.140903 20613 solver.cpp:244]     Train net output #1: loss = 0.238709 (* 1 = 0.238709 loss)\nI1213 08:56:04.234297 20613 sgd_solver.cpp:174] Iteration 43300, lr = 1.299\nI1213 08:56:04.247076 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.400329\nI1213 08:58:22.382072 20613 solver.cpp:337] Iteration 43400, Testing net (#0)\nI1213 08:59:43.163857 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84036\nI1213 08:59:43.164149 20613 solver.cpp:404]     Test net output #1: loss = 0.579074 (* 1 = 0.579074 loss)\nI1213 08:59:44.475361 20613 solver.cpp:228] Iteration 43400, loss = 0.123291\nI1213 08:59:44.475407 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 08:59:44.475423 20613 solver.cpp:244]     Train net output #1: loss = 0.123291 (* 1 = 0.123291 loss)\nI1213 08:59:44.566200 20613 sgd_solver.cpp:174] Iteration 43400, lr = 1.302\nI1213 08:59:44.578801 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.382261\nI1213 09:02:02.585723 20613 solver.cpp:337] Iteration 43500, Testing net (#0)\nI1213 09:03:23.379590 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79336\nI1213 09:03:23.379937 20613 solver.cpp:404]     Test net output #1: loss = 0.828047 (* 1 = 0.828047 loss)\nI1213 09:03:24.691216 20613 solver.cpp:228] Iteration 43500, loss = 0.202011\nI1213 09:03:24.691262 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 09:03:24.691277 20613 solver.cpp:244]     Train net output #1: loss = 0.202011 (* 1 = 0.202011 loss)\nI1213 09:03:24.784937 20613 sgd_solver.cpp:174] Iteration 43500, lr = 1.305\nI1213 09:03:24.797680 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.368038\nI1213 09:05:42.846245 20613 solver.cpp:337] Iteration 43600, Testing net (#0)\nI1213 09:07:03.636894 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80304\nI1213 09:07:03.637162 20613 solver.cpp:404]     Test net output #1: loss = 0.73926 (* 1 = 0.73926 loss)\nI1213 09:07:04.949504 20613 solver.cpp:228] Iteration 43600, loss = 0.184876\nI1213 09:07:04.949550 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 09:07:04.949566 20613 solver.cpp:244]     Train net output #1: loss = 0.184876 (* 1 = 0.184876 loss)\nI1213 09:07:05.040199 20613 sgd_solver.cpp:174] Iteration 43600, lr = 1.308\nI1213 09:07:05.052927 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.357554\nI1213 09:09:22.998389 20613 solver.cpp:337] Iteration 43700, Testing net (#0)\nI1213 09:10:43.789968 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7768\nI1213 09:10:43.790289 20613 solver.cpp:404]     Test net output #1: loss = 0.902728 (* 1 = 0.902728 loss)\nI1213 09:10:45.102382 20613 solver.cpp:228] Iteration 43700, loss = 0.0672362\nI1213 09:10:45.102417 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 09:10:45.102432 20613 solver.cpp:244]     Train net output #1: loss = 0.0672361 (* 1 = 0.0672361 loss)\nI1213 09:10:45.195086 20613 sgd_solver.cpp:174] Iteration 43700, lr = 1.311\nI1213 09:10:45.207831 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.292666\nI1213 09:13:03.279220 20613 solver.cpp:337] Iteration 43800, Testing net (#0)\nI1213 09:14:23.952388 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81464\nI1213 09:14:23.952713 20613 solver.cpp:404]     Test net output #1: loss = 0.678012 (* 1 = 0.678012 loss)\nI1213 09:14:25.264349 20613 solver.cpp:228] Iteration 43800, loss = 0.167688\nI1213 09:14:25.264395 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 09:14:25.264411 20613 solver.cpp:244]     Train net output #1: loss = 0.167688 (* 1 = 0.167688 loss)\nI1213 09:14:25.358547 20613 sgd_solver.cpp:174] Iteration 43800, lr = 1.314\nI1213 09:14:25.371250 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.34542\nI1213 09:16:43.411476 20613 solver.cpp:337] Iteration 43900, Testing net (#0)\nI1213 09:18:04.066479 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80608\nI1213 09:18:04.066764 20613 solver.cpp:404]     Test net output #1: loss = 0.761098 (* 1 = 0.761098 loss)\nI1213 09:18:05.377357 20613 solver.cpp:228] Iteration 43900, loss = 0.152359\nI1213 09:18:05.377403 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 09:18:05.377418 20613 solver.cpp:244]     Train net output #1: loss = 0.152359 (* 1 = 0.152359 loss)\nI1213 09:18:05.469692 20613 sgd_solver.cpp:174] Iteration 43900, lr = 1.317\nI1213 09:18:05.482429 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.389553\nI1213 09:20:23.568955 20613 solver.cpp:337] Iteration 44000, Testing net (#0)\nI1213 09:21:44.222322 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7788\nI1213 09:21:44.222640 20613 solver.cpp:404]     Test net output #1: loss = 0.901664 (* 1 = 0.901664 loss)\nI1213 09:21:45.534013 20613 solver.cpp:228] Iteration 44000, loss = 0.0935108\nI1213 09:21:45.534056 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 09:21:45.534072 20613 solver.cpp:244]     Train net output #1: loss = 0.0935107 (* 1 = 0.0935107 loss)\nI1213 09:21:45.629384 20613 sgd_solver.cpp:174] Iteration 44000, lr = 1.32\nI1213 09:21:45.642088 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347947\nI1213 09:24:03.769644 20613 solver.cpp:337] Iteration 44100, Testing net (#0)\nI1213 09:25:24.420032 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79088\nI1213 09:25:24.420384 20613 solver.cpp:404]     Test net output #1: loss = 0.848098 (* 1 = 0.848098 loss)\nI1213 09:25:25.730793 20613 solver.cpp:228] Iteration 44100, loss = 0.159245\nI1213 09:25:25.730829 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 09:25:25.730844 20613 solver.cpp:244]     Train net output #1: loss = 0.159245 (* 1 = 0.159245 loss)\nI1213 09:25:25.822051 20613 sgd_solver.cpp:174] Iteration 44100, lr = 1.323\nI1213 09:25:25.834802 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.35554\nI1213 09:27:43.992782 20613 solver.cpp:337] Iteration 44200, Testing net (#0)\nI1213 09:29:04.645771 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85404\nI1213 09:29:04.646095 20613 solver.cpp:404]     Test net output #1: loss = 0.52396 (* 1 = 0.52396 loss)\nI1213 09:29:05.956965 20613 solver.cpp:228] Iteration 44200, loss = 0.181309\nI1213 09:29:05.957000 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 09:29:05.957015 20613 solver.cpp:244]     Train net output #1: loss = 0.181309 (* 1 = 0.181309 loss)\nI1213 09:29:06.051126 20613 sgd_solver.cpp:174] Iteration 44200, lr = 1.326\nI1213 09:29:06.063872 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.403397\nI1213 09:31:24.151659 20613 solver.cpp:337] Iteration 44300, Testing net (#0)\nI1213 09:32:44.828042 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84696\nI1213 09:32:44.828384 20613 solver.cpp:404]     Test net output #1: loss = 0.548954 (* 1 = 0.548954 loss)\nI1213 09:32:46.138571 20613 solver.cpp:228] Iteration 44300, loss = 0.134085\nI1213 09:32:46.138612 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 09:32:46.138628 20613 solver.cpp:244]     Train net output #1: loss = 0.134085 (* 1 = 0.134085 loss)\nI1213 09:32:46.232681 20613 sgd_solver.cpp:174] Iteration 44300, lr = 1.329\nI1213 09:32:46.245352 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.396504\nI1213 09:35:04.450069 20613 solver.cpp:337] Iteration 44400, Testing net (#0)\nI1213 09:36:25.106518 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80836\nI1213 09:36:25.106873 20613 solver.cpp:404]     Test net output #1: loss = 0.809048 (* 1 = 0.809048 loss)\nI1213 09:36:26.417315 20613 solver.cpp:228] Iteration 44400, loss = 0.114161\nI1213 09:36:26.417357 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 09:36:26.417373 20613 solver.cpp:244]     Train net output #1: loss = 0.114161 (* 1 = 0.114161 loss)\nI1213 09:36:26.509608 20613 sgd_solver.cpp:174] Iteration 44400, lr = 1.332\nI1213 09:36:26.522375 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.32274\nI1213 09:38:44.628136 20613 solver.cpp:337] Iteration 44500, Testing net (#0)\nI1213 09:40:05.288256 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78876\nI1213 09:40:05.288586 20613 solver.cpp:404]     Test net output #1: loss = 0.828661 (* 1 = 0.828661 loss)\nI1213 09:40:06.599016 20613 solver.cpp:228] Iteration 44500, loss = 0.126661\nI1213 09:40:06.599051 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 09:40:06.599066 20613 solver.cpp:244]     Train net output #1: loss = 0.126661 (* 1 = 0.126661 loss)\nI1213 09:40:06.691541 20613 sgd_solver.cpp:174] Iteration 44500, lr = 1.335\nI1213 09:40:06.704300 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.327823\nI1213 09:42:24.783329 20613 solver.cpp:337] Iteration 44600, Testing net (#0)\nI1213 09:43:45.444985 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82492\nI1213 09:43:45.445338 20613 solver.cpp:404]     Test net output #1: loss = 0.625682 (* 1 = 0.625682 loss)\nI1213 09:43:46.756024 20613 solver.cpp:228] Iteration 44600, loss = 0.143808\nI1213 09:43:46.756064 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 09:43:46.756080 20613 solver.cpp:244]     Train net output #1: loss = 0.143807 (* 1 = 0.143807 loss)\nI1213 09:43:46.849938 20613 sgd_solver.cpp:174] Iteration 44600, lr = 1.338\nI1213 09:43:46.862642 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.377814\nI1213 09:46:04.849596 20613 solver.cpp:337] Iteration 44700, Testing net (#0)\nI1213 09:47:25.646534 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81556\nI1213 09:47:25.646900 20613 solver.cpp:404]     Test net output #1: loss = 0.639068 (* 1 = 0.639068 loss)\nI1213 09:47:26.958897 20613 solver.cpp:228] Iteration 44700, loss = 0.135329\nI1213 09:47:26.958930 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 09:47:26.958946 20613 solver.cpp:244]     Train net output #1: loss = 0.135329 (* 1 = 0.135329 loss)\nI1213 09:47:27.047688 20613 sgd_solver.cpp:174] Iteration 44700, lr = 1.341\nI1213 09:47:27.060303 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.355192\nI1213 09:49:45.161545 20613 solver.cpp:337] Iteration 44800, Testing net (#0)\nI1213 09:51:05.956197 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81264\nI1213 09:51:05.956547 20613 solver.cpp:404]     Test net output #1: loss = 0.707329 (* 1 = 0.707329 loss)\nI1213 09:51:07.268625 20613 solver.cpp:228] Iteration 44800, loss = 0.0708979\nI1213 09:51:07.268672 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 09:51:07.268689 20613 solver.cpp:244]     Train net output #1: loss = 0.0708977 (* 1 = 0.0708977 loss)\nI1213 09:51:07.361105 20613 sgd_solver.cpp:174] Iteration 44800, lr = 1.344\nI1213 09:51:07.373769 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.28364\nI1213 09:53:25.499037 20613 solver.cpp:337] Iteration 44900, Testing net (#0)\nI1213 09:54:46.264627 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8186\nI1213 09:54:46.264935 20613 solver.cpp:404]     Test net output #1: loss = 0.64401 (* 1 = 0.64401 loss)\nI1213 09:54:47.575352 20613 solver.cpp:228] Iteration 44900, loss = 0.106893\nI1213 09:54:47.575394 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 09:54:47.575412 20613 solver.cpp:244]     Train net output #1: loss = 0.106893 (* 1 = 0.106893 loss)\nI1213 09:54:47.668493 20613 sgd_solver.cpp:174] Iteration 44900, lr = 1.347\nI1213 09:54:47.681224 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.355335\nI1213 09:57:05.848081 20613 solver.cpp:337] Iteration 45000, Testing net (#0)\nI1213 09:58:26.610570 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78712\nI1213 09:58:26.610872 20613 solver.cpp:404]     Test net output #1: loss = 0.803909 (* 1 = 0.803909 loss)\nI1213 09:58:27.922817 20613 solver.cpp:228] Iteration 45000, loss = 0.181373\nI1213 09:58:27.922858 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 09:58:27.922874 20613 solver.cpp:244]     Train net output #1: loss = 0.181373 (* 1 = 0.181373 loss)\nI1213 09:58:28.012266 20613 sgd_solver.cpp:174] Iteration 45000, lr = 1.35\nI1213 09:58:28.024912 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.39573\nI1213 10:00:46.150954 20613 solver.cpp:337] Iteration 45100, Testing net (#0)\nI1213 10:02:06.897470 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84576\nI1213 10:02:06.897790 20613 solver.cpp:404]     Test net output #1: loss = 0.517036 (* 1 = 0.517036 loss)\nI1213 10:02:08.208449 20613 solver.cpp:228] Iteration 45100, loss = 0.176935\nI1213 10:02:08.208492 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 10:02:08.208508 20613 solver.cpp:244]     Train net output #1: loss = 0.176934 (* 1 = 0.176934 loss)\nI1213 10:02:08.296453 20613 sgd_solver.cpp:174] Iteration 45100, lr = 1.353\nI1213 10:02:08.309196 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.418327\nI1213 10:04:26.429695 20613 solver.cpp:337] Iteration 45200, Testing net (#0)\nI1213 10:05:47.177248 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79364\nI1213 10:05:47.177551 20613 solver.cpp:404]     Test net output #1: loss = 0.876685 (* 1 = 0.876685 loss)\nI1213 10:05:48.488958 20613 solver.cpp:228] Iteration 45200, loss = 0.128208\nI1213 10:05:48.489001 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 10:05:48.489017 20613 solver.cpp:244]     Train net output #1: loss = 0.128208 (* 1 = 0.128208 loss)\nI1213 10:05:48.582237 20613 sgd_solver.cpp:174] Iteration 45200, lr = 1.356\nI1213 10:05:48.595000 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.340852\nI1213 10:08:06.605406 20613 solver.cpp:337] Iteration 45300, Testing net (#0)\nI1213 10:09:27.364508 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80884\nI1213 10:09:27.364792 20613 solver.cpp:404]     Test net output #1: loss = 0.756298 (* 1 = 0.756298 loss)\nI1213 10:09:28.675951 20613 solver.cpp:228] Iteration 45300, loss = 0.112648\nI1213 10:09:28.675984 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 10:09:28.676000 20613 solver.cpp:244]     Train net output #1: loss = 0.112648 (* 1 = 0.112648 loss)\nI1213 10:09:28.765862 20613 sgd_solver.cpp:174] Iteration 45300, lr = 1.359\nI1213 10:09:28.778515 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.325575\nI1213 10:11:46.795583 20613 solver.cpp:337] Iteration 45400, Testing net (#0)\nI1213 10:13:07.553100 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76112\nI1213 10:13:07.553432 20613 solver.cpp:404]     Test net output #1: loss = 0.975967 (* 1 = 0.975967 loss)\nI1213 10:13:08.864508 20613 solver.cpp:228] Iteration 45400, loss = 0.173782\nI1213 10:13:08.864550 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 10:13:08.864567 20613 solver.cpp:244]     Train net output #1: loss = 0.173782 (* 1 = 0.173782 loss)\nI1213 10:13:08.960095 20613 sgd_solver.cpp:174] Iteration 45400, lr = 1.362\nI1213 10:13:08.972817 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.404534\nI1213 10:15:27.061007 20613 solver.cpp:337] Iteration 45500, Testing net (#0)\nI1213 10:16:47.814476 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78\nI1213 10:16:47.814736 20613 solver.cpp:404]     Test net output #1: loss = 0.899638 (* 1 = 0.899638 loss)\nI1213 10:16:49.125139 20613 solver.cpp:228] Iteration 45500, loss = 0.0865627\nI1213 10:16:49.125182 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 10:16:49.125200 20613 solver.cpp:244]     Train net output #1: loss = 0.0865627 (* 1 = 0.0865627 loss)\nI1213 10:16:49.220763 20613 sgd_solver.cpp:174] Iteration 45500, lr = 1.365\nI1213 10:16:49.233469 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.340956\nI1213 10:19:07.411738 20613 solver.cpp:337] Iteration 45600, Testing net (#0)\nI1213 10:20:28.171187 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81804\nI1213 10:20:28.171470 20613 solver.cpp:404]     Test net output #1: loss = 0.685367 (* 1 = 0.685367 loss)\nI1213 10:20:29.482432 20613 solver.cpp:228] Iteration 45600, loss = 0.0753042\nI1213 10:20:29.482475 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 10:20:29.482492 20613 solver.cpp:244]     Train net output #1: loss = 0.0753042 (* 1 = 0.0753042 loss)\nI1213 10:20:29.576138 20613 sgd_solver.cpp:174] Iteration 45600, lr = 1.368\nI1213 10:20:29.588897 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.32872\nI1213 10:22:47.533095 20613 solver.cpp:337] Iteration 45700, Testing net (#0)\nI1213 10:24:08.300024 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83536\nI1213 10:24:08.300312 20613 solver.cpp:404]     Test net output #1: loss = 0.564366 (* 1 = 0.564366 loss)\nI1213 10:24:09.611479 20613 solver.cpp:228] Iteration 45700, loss = 0.121935\nI1213 10:24:09.611522 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 10:24:09.611541 20613 solver.cpp:244]     Train net output #1: loss = 0.121935 (* 1 = 0.121935 loss)\nI1213 10:24:09.699415 20613 sgd_solver.cpp:174] Iteration 45700, lr = 1.371\nI1213 10:24:09.712060 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.407883\nI1213 10:26:27.823556 20613 solver.cpp:337] Iteration 45800, Testing net (#0)\nI1213 10:27:48.603332 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84084\nI1213 10:27:48.603646 20613 solver.cpp:404]     Test net output #1: loss = 0.560703 (* 1 = 0.560703 loss)\nI1213 10:27:49.914326 20613 solver.cpp:228] Iteration 45800, loss = 0.209722\nI1213 10:27:49.914369 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 10:27:49.914386 20613 solver.cpp:244]     Train net output #1: loss = 0.209722 (* 1 = 0.209722 loss)\nI1213 10:27:50.009011 20613 sgd_solver.cpp:174] Iteration 45800, lr = 1.374\nI1213 10:27:50.021775 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.383746\nI1213 10:30:08.008992 20613 solver.cpp:337] Iteration 45900, Testing net (#0)\nI1213 10:31:28.786643 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74612\nI1213 10:31:28.786948 20613 solver.cpp:404]     Test net output #1: loss = 1.09623 (* 1 = 1.09623 loss)\nI1213 10:31:30.097528 20613 solver.cpp:228] Iteration 45900, loss = 0.257909\nI1213 10:31:30.097564 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 10:31:30.097579 20613 solver.cpp:244]     Train net output #1: loss = 0.257909 (* 1 = 0.257909 loss)\nI1213 10:31:30.187739 20613 sgd_solver.cpp:174] Iteration 45900, lr = 1.377\nI1213 10:31:30.200248 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.42624\nI1213 10:33:48.331769 20613 solver.cpp:337] Iteration 46000, Testing net (#0)\nI1213 10:35:09.083117 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78992\nI1213 10:35:09.083395 20613 solver.cpp:404]     Test net output #1: loss = 0.802171 (* 1 = 0.802171 loss)\nI1213 10:35:10.395089 20613 solver.cpp:228] Iteration 46000, loss = 0.0689145\nI1213 10:35:10.395123 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 10:35:10.395139 20613 solver.cpp:244]     Train net output #1: loss = 0.0689145 (* 1 = 0.0689145 loss)\nI1213 10:35:10.488806 20613 sgd_solver.cpp:174] Iteration 46000, lr = 1.38\nI1213 10:35:10.501379 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.335952\nI1213 10:37:28.539448 20613 solver.cpp:337] Iteration 46100, Testing net (#0)\nI1213 10:38:49.300020 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80012\nI1213 10:38:49.300333 20613 solver.cpp:404]     Test net output #1: loss = 0.777478 (* 1 = 0.777478 loss)\nI1213 10:38:50.611394 20613 solver.cpp:228] Iteration 46100, loss = 0.151674\nI1213 10:38:50.611438 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 10:38:50.611454 20613 solver.cpp:244]     Train net output #1: loss = 0.151674 (* 1 = 0.151674 loss)\nI1213 10:38:50.702910 20613 sgd_solver.cpp:174] Iteration 46100, lr = 1.383\nI1213 10:38:50.715739 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.400831\nI1213 10:41:08.859285 20613 solver.cpp:337] Iteration 46200, Testing net (#0)\nI1213 10:42:29.633216 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81164\nI1213 10:42:29.633527 20613 solver.cpp:404]     Test net output #1: loss = 0.625624 (* 1 = 0.625624 loss)\nI1213 10:42:30.943617 20613 solver.cpp:228] Iteration 46200, loss = 0.190663\nI1213 10:42:30.943652 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 10:42:30.943673 20613 solver.cpp:244]     Train net output #1: loss = 0.190663 (* 1 = 0.190663 loss)\nI1213 10:42:31.037055 20613 sgd_solver.cpp:174] Iteration 46200, lr = 1.386\nI1213 10:42:31.049746 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.420424\nI1213 10:44:49.110304 20613 solver.cpp:337] Iteration 46300, Testing net (#0)\nI1213 10:46:09.868119 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83116\nI1213 10:46:09.868407 20613 solver.cpp:404]     Test net output #1: loss = 0.58484 (* 1 = 0.58484 loss)\nI1213 10:46:11.179201 20613 solver.cpp:228] Iteration 46300, loss = 0.109113\nI1213 10:46:11.179237 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 10:46:11.179252 20613 solver.cpp:244]     Train net output #1: loss = 0.109113 (* 1 = 0.109113 loss)\nI1213 10:46:11.272223 20613 sgd_solver.cpp:174] Iteration 46300, lr = 1.389\nI1213 10:46:11.285002 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.381175\nI1213 10:48:29.351797 20613 solver.cpp:337] Iteration 46400, Testing net (#0)\nI1213 10:49:50.114410 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80584\nI1213 10:49:50.114704 20613 solver.cpp:404]     Test net output #1: loss = 0.727542 (* 1 = 0.727542 loss)\nI1213 10:49:51.425555 20613 solver.cpp:228] Iteration 46400, loss = 0.120133\nI1213 10:49:51.425590 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 10:49:51.425606 20613 solver.cpp:244]     Train net output #1: loss = 0.120133 (* 1 = 0.120133 loss)\nI1213 10:49:51.519816 20613 sgd_solver.cpp:174] Iteration 46400, lr = 1.392\nI1213 10:49:51.532500 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305102\nI1213 10:52:09.650643 20613 solver.cpp:337] Iteration 46500, Testing net (#0)\nI1213 10:53:30.405230 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83232\nI1213 10:53:30.405478 20613 solver.cpp:404]     Test net output #1: loss = 0.616203 (* 1 = 0.616203 loss)\nI1213 10:53:31.716102 20613 solver.cpp:228] Iteration 46500, loss = 0.171153\nI1213 10:53:31.716138 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 10:53:31.716154 20613 solver.cpp:244]     Train net output #1: loss = 0.171153 (* 1 = 0.171153 loss)\nI1213 10:53:31.807916 20613 sgd_solver.cpp:174] Iteration 46500, lr = 1.395\nI1213 10:53:31.820735 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.334302\nI1213 10:55:49.921828 20613 solver.cpp:337] Iteration 46600, Testing net (#0)\nI1213 10:57:10.674547 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80792\nI1213 10:57:10.674845 20613 solver.cpp:404]     Test net output #1: loss = 0.694899 (* 1 = 0.694899 loss)\nI1213 10:57:11.985770 20613 solver.cpp:228] Iteration 46600, loss = 0.194622\nI1213 10:57:11.985813 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 10:57:11.985831 20613 solver.cpp:244]     Train net output #1: loss = 0.194622 (* 1 = 0.194622 loss)\nI1213 10:57:12.076972 20613 sgd_solver.cpp:174] Iteration 46600, lr = 1.398\nI1213 10:57:12.089685 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347605\nI1213 10:59:30.191674 20613 solver.cpp:337] Iteration 46700, Testing net (#0)\nI1213 11:00:50.945150 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82296\nI1213 11:00:50.945456 20613 solver.cpp:404]     Test net output #1: loss = 0.67138 (* 1 = 0.67138 loss)\nI1213 11:00:52.257160 20613 solver.cpp:228] Iteration 46700, loss = 0.0852272\nI1213 11:00:52.257203 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 11:00:52.257220 20613 solver.cpp:244]     Train net output #1: loss = 0.0852272 (* 1 = 0.0852272 loss)\nI1213 11:00:52.356073 20613 sgd_solver.cpp:174] Iteration 46700, lr = 1.401\nI1213 11:00:52.368811 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.28566\nI1213 11:03:09.669225 20613 solver.cpp:337] Iteration 46800, Testing net (#0)\nI1213 11:04:30.435473 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79448\nI1213 11:04:30.435798 20613 solver.cpp:404]     Test net output #1: loss = 0.803783 (* 1 = 0.803783 loss)\nI1213 11:04:31.746891 20613 solver.cpp:228] Iteration 46800, loss = 0.0899763\nI1213 11:04:31.746933 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 11:04:31.746950 20613 solver.cpp:244]     Train net output #1: loss = 0.0899763 (* 1 = 0.0899763 loss)\nI1213 11:04:31.843099 20613 sgd_solver.cpp:174] Iteration 46800, lr = 1.404\nI1213 11:04:31.855634 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.356548\nI1213 11:06:49.940291 20613 solver.cpp:337] Iteration 46900, Testing net (#0)\nI1213 11:08:10.707734 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81784\nI1213 11:08:10.708042 20613 solver.cpp:404]     Test net output #1: loss = 0.639286 (* 1 = 0.639286 loss)\nI1213 11:08:12.019182 20613 solver.cpp:228] Iteration 46900, loss = 0.100731\nI1213 11:08:12.019225 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 11:08:12.019243 20613 solver.cpp:244]     Train net output #1: loss = 0.100731 (* 1 = 0.100731 loss)\nI1213 11:08:12.112015 20613 sgd_solver.cpp:174] Iteration 46900, lr = 1.407\nI1213 11:08:12.124734 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.387844\nI1213 11:10:30.261528 20613 solver.cpp:337] Iteration 47000, Testing net (#0)\nI1213 11:11:51.029561 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79436\nI1213 11:11:51.029901 20613 solver.cpp:404]     Test net output #1: loss = 0.741266 (* 1 = 0.741266 loss)\nI1213 11:11:52.340304 20613 solver.cpp:228] Iteration 47000, loss = 0.124458\nI1213 11:11:52.340349 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 11:11:52.340366 20613 solver.cpp:244]     Train net output #1: loss = 0.124458 (* 1 = 0.124458 loss)\nI1213 11:11:52.439702 20613 sgd_solver.cpp:174] Iteration 47000, lr = 1.41\nI1213 11:11:52.452440 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.391454\nI1213 11:14:10.591789 20613 solver.cpp:337] Iteration 47100, Testing net (#0)\nI1213 11:15:31.367873 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70484\nI1213 11:15:31.368158 20613 solver.cpp:404]     Test net output #1: loss = 1.48765 (* 1 = 1.48765 loss)\nI1213 11:15:32.678977 20613 solver.cpp:228] Iteration 47100, loss = 0.184594\nI1213 11:15:32.679020 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 11:15:32.679038 20613 solver.cpp:244]     Train net output #1: loss = 0.184594 (* 1 = 0.184594 loss)\nI1213 11:15:32.773710 20613 sgd_solver.cpp:174] Iteration 47100, lr = 1.413\nI1213 11:15:32.786425 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.35355\nI1213 11:17:50.975425 20613 solver.cpp:337] Iteration 47200, Testing net (#0)\nI1213 11:19:11.744849 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82908\nI1213 11:19:11.745112 20613 solver.cpp:404]     Test net output #1: loss = 0.614719 (* 1 = 0.614719 loss)\nI1213 11:19:13.057284 20613 solver.cpp:228] Iteration 47200, loss = 0.0703769\nI1213 11:19:13.057329 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 11:19:13.057348 20613 solver.cpp:244]     Train net output #1: loss = 0.070377 (* 1 = 0.070377 loss)\nI1213 11:19:13.152997 20613 sgd_solver.cpp:174] Iteration 47200, lr = 1.416\nI1213 11:19:13.165740 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.354014\nI1213 11:21:31.258613 20613 solver.cpp:337] Iteration 47300, Testing net (#0)\nI1213 11:22:52.689208 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79392\nI1213 11:22:52.689580 20613 solver.cpp:404]     Test net output #1: loss = 0.743509 (* 1 = 0.743509 loss)\nI1213 11:22:54.008451 20613 solver.cpp:228] Iteration 47300, loss = 0.0926776\nI1213 11:22:54.008509 20613 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1213 11:22:54.008527 20613 solver.cpp:244]     Train net output #1: loss = 0.0926777 (* 1 = 0.0926777 loss)\nI1213 11:22:54.095227 20613 sgd_solver.cpp:174] Iteration 47300, lr = 1.419\nI1213 11:22:54.109114 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.367061\nI1213 11:25:12.365679 20613 solver.cpp:337] Iteration 47400, Testing net (#0)\nI1213 11:26:34.259913 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83928\nI1213 11:26:34.260308 20613 solver.cpp:404]     Test net output #1: loss = 0.542514 (* 1 = 0.542514 loss)\nI1213 11:26:35.573493 20613 solver.cpp:228] Iteration 47400, loss = 0.123388\nI1213 11:26:35.573549 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 11:26:35.573575 20613 solver.cpp:244]     Train net output #1: loss = 0.123388 (* 1 = 0.123388 loss)\nI1213 11:26:35.668467 20613 sgd_solver.cpp:174] Iteration 47400, lr = 1.422\nI1213 11:26:35.682339 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.306011\nI1213 11:28:54.070194 20613 solver.cpp:337] Iteration 47500, Testing net (#0)\nI1213 11:30:15.893868 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81848\nI1213 11:30:15.894255 20613 solver.cpp:404]     Test net output #1: loss = 0.668771 (* 1 = 0.668771 loss)\nI1213 11:30:17.207656 20613 solver.cpp:228] Iteration 47500, loss = 0.130215\nI1213 11:30:17.207713 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 11:30:17.207738 20613 solver.cpp:244]     Train net output #1: loss = 0.130215 (* 1 = 0.130215 loss)\nI1213 11:30:17.301100 20613 sgd_solver.cpp:174] Iteration 47500, lr = 1.425\nI1213 11:30:17.314944 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.343064\nI1213 11:32:34.903056 20613 solver.cpp:337] Iteration 47600, Testing net (#0)\nI1213 11:33:56.819835 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84668\nI1213 11:33:56.820214 20613 solver.cpp:404]     Test net output #1: loss = 0.525983 (* 1 = 0.525983 loss)\nI1213 11:33:58.134714 20613 solver.cpp:228] Iteration 47600, loss = 0.0966742\nI1213 11:33:58.134765 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 11:33:58.134791 20613 solver.cpp:244]     Train net output #1: loss = 0.0966742 (* 1 = 0.0966742 loss)\nI1213 11:33:58.221216 20613 sgd_solver.cpp:174] Iteration 47600, lr = 1.428\nI1213 11:33:58.235105 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.349404\nI1213 11:36:16.534765 20613 solver.cpp:337] Iteration 47700, Testing net (#0)\nI1213 11:37:38.381682 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78304\nI1213 11:37:38.382081 20613 solver.cpp:404]     Test net output #1: loss = 0.833319 (* 1 = 0.833319 loss)\nI1213 11:37:39.695832 20613 solver.cpp:228] Iteration 47700, loss = 0.146122\nI1213 11:37:39.695889 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 11:37:39.695915 20613 solver.cpp:244]     Train net output #1: loss = 0.146122 (* 1 = 0.146122 loss)\nI1213 11:37:39.782726 20613 sgd_solver.cpp:174] Iteration 47700, lr = 1.431\nI1213 11:37:39.796533 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.411828\nI1213 11:39:58.139919 20613 solver.cpp:337] Iteration 47800, Testing net (#0)\nI1213 11:41:20.025588 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84404\nI1213 11:41:20.025943 20613 solver.cpp:404]     Test net output #1: loss = 0.572893 (* 1 = 0.572893 loss)\nI1213 11:41:21.340173 20613 solver.cpp:228] Iteration 47800, loss = 0.14869\nI1213 11:41:21.340232 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 11:41:21.340257 20613 solver.cpp:244]     Train net output #1: loss = 0.14869 (* 1 = 0.14869 loss)\nI1213 11:41:21.427589 20613 sgd_solver.cpp:174] Iteration 47800, lr = 1.434\nI1213 11:41:21.441491 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.364873\nI1213 11:43:39.739223 20613 solver.cpp:337] Iteration 47900, Testing net (#0)\nI1213 11:45:01.572582 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8008\nI1213 11:45:01.572952 20613 solver.cpp:404]     Test net output #1: loss = 0.720563 (* 1 = 0.720563 loss)\nI1213 11:45:02.887513 20613 solver.cpp:228] Iteration 47900, loss = 0.274968\nI1213 11:45:02.887567 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 11:45:02.887591 20613 solver.cpp:244]     Train net output #1: loss = 0.274968 (* 1 = 0.274968 loss)\nI1213 11:45:02.973956 20613 sgd_solver.cpp:174] Iteration 47900, lr = 1.437\nI1213 11:45:02.987861 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.381514\nI1213 11:47:21.344844 20613 solver.cpp:337] Iteration 48000, Testing net (#0)\nI1213 11:48:43.240061 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72696\nI1213 11:48:43.240455 20613 solver.cpp:404]     Test net output #1: loss = 1.30472 (* 1 = 1.30472 loss)\nI1213 11:48:44.553792 20613 solver.cpp:228] Iteration 48000, loss = 0.151851\nI1213 11:48:44.553849 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 11:48:44.553875 20613 solver.cpp:244]     Train net output #1: loss = 0.151851 (* 1 = 0.151851 loss)\nI1213 11:48:44.646173 20613 sgd_solver.cpp:174] Iteration 48000, lr = 1.44\nI1213 11:48:44.660109 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347888\nI1213 11:51:02.996424 20613 solver.cpp:337] Iteration 48100, Testing net (#0)\nI1213 11:52:24.918592 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83668\nI1213 11:52:24.918961 20613 solver.cpp:404]     Test net output #1: loss = 0.557554 (* 1 = 0.557554 loss)\nI1213 11:52:26.233134 20613 solver.cpp:228] Iteration 48100, loss = 0.109068\nI1213 11:52:26.233189 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 11:52:26.233216 20613 solver.cpp:244]     Train net output #1: loss = 0.109068 (* 1 = 0.109068 loss)\nI1213 11:52:26.323400 20613 sgd_solver.cpp:174] Iteration 48100, lr = 1.443\nI1213 11:52:26.337396 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.342897\nI1213 11:54:44.715004 20613 solver.cpp:337] Iteration 48200, Testing net (#0)\nI1213 11:56:06.574301 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82364\nI1213 11:56:06.574688 20613 solver.cpp:404]     Test net output #1: loss = 0.635177 (* 1 = 0.635177 loss)\nI1213 11:56:07.888018 20613 solver.cpp:228] Iteration 48200, loss = 0.132091\nI1213 11:56:07.888077 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 11:56:07.888103 20613 solver.cpp:244]     Train net output #1: loss = 0.132091 (* 1 = 0.132091 loss)\nI1213 11:56:07.981549 20613 sgd_solver.cpp:174] Iteration 48200, lr = 1.446\nI1213 11:56:07.995055 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.364157\nI1213 11:58:26.255619 20613 solver.cpp:337] Iteration 48300, Testing net (#0)\nI1213 11:59:48.077561 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78504\nI1213 11:59:48.077932 20613 solver.cpp:404]     Test net output #1: loss = 0.925162 (* 1 = 0.925162 loss)\nI1213 11:59:49.391952 20613 solver.cpp:228] Iteration 48300, loss = 0.111163\nI1213 11:59:49.392014 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 11:59:49.392041 20613 solver.cpp:244]     Train net output #1: loss = 0.111163 (* 1 = 0.111163 loss)\nI1213 11:59:49.478941 20613 sgd_solver.cpp:174] Iteration 48300, lr = 1.449\nI1213 11:59:49.492744 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.374304\nI1213 12:02:06.952411 20613 solver.cpp:337] Iteration 48400, Testing net (#0)\nI1213 12:03:28.770521 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76668\nI1213 12:03:28.770915 20613 solver.cpp:404]     Test net output #1: loss = 0.839491 (* 1 = 0.839491 loss)\nI1213 12:03:30.084987 20613 solver.cpp:228] Iteration 48400, loss = 0.132872\nI1213 12:03:30.085050 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 12:03:30.085075 20613 solver.cpp:244]     Train net output #1: loss = 0.132872 (* 1 = 0.132872 loss)\nI1213 12:03:30.176260 20613 sgd_solver.cpp:174] Iteration 48400, lr = 1.452\nI1213 12:03:30.190099 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.319263\nI1213 12:05:48.544018 20613 solver.cpp:337] Iteration 48500, Testing net (#0)\nI1213 12:07:10.345953 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7292\nI1213 12:07:10.346329 20613 solver.cpp:404]     Test net output #1: loss = 1.02112 (* 1 = 1.02112 loss)\nI1213 12:07:11.660053 20613 solver.cpp:228] Iteration 48500, loss = 0.18029\nI1213 12:07:11.660109 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 12:07:11.660135 20613 solver.cpp:244]     Train net output #1: loss = 0.18029 (* 1 = 0.18029 loss)\nI1213 12:07:11.755375 20613 sgd_solver.cpp:174] Iteration 48500, lr = 1.455\nI1213 12:07:11.769208 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307267\nI1213 12:09:29.238648 20613 solver.cpp:337] Iteration 48600, Testing net (#0)\nI1213 12:10:51.145679 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8322\nI1213 12:10:51.146044 20613 solver.cpp:404]     Test net output #1: loss = 0.545659 (* 1 = 0.545659 loss)\nI1213 12:10:52.459884 20613 solver.cpp:228] Iteration 48600, loss = 0.0929437\nI1213 12:10:52.459945 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 12:10:52.459970 20613 solver.cpp:244]     Train net output #1: loss = 0.092944 (* 1 = 0.092944 loss)\nI1213 12:10:52.550837 20613 sgd_solver.cpp:174] Iteration 48600, lr = 1.458\nI1213 12:10:52.564604 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.393134\nI1213 12:13:10.100142 20613 solver.cpp:337] Iteration 48700, Testing net (#0)\nI1213 12:14:32.009672 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80116\nI1213 12:14:32.010071 20613 solver.cpp:404]     Test net output #1: loss = 0.706872 (* 1 = 0.706872 loss)\nI1213 12:14:33.323709 20613 solver.cpp:228] Iteration 48700, loss = 0.165305\nI1213 12:14:33.323766 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 12:14:33.323791 20613 solver.cpp:244]     Train net output #1: loss = 0.165305 (* 1 = 0.165305 loss)\nI1213 12:14:33.415029 20613 sgd_solver.cpp:174] Iteration 48700, lr = 1.461\nI1213 12:14:33.428757 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305172\nI1213 12:16:51.752804 20613 solver.cpp:337] Iteration 48800, Testing net (#0)\nI1213 12:18:13.563305 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80868\nI1213 12:18:13.563705 20613 solver.cpp:404]     Test net output #1: loss = 0.673337 (* 1 = 0.673337 loss)\nI1213 12:18:14.876451 20613 solver.cpp:228] Iteration 48800, loss = 0.10275\nI1213 12:18:14.876510 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 12:18:14.876534 20613 solver.cpp:244]     Train net output #1: loss = 0.10275 (* 1 = 0.10275 loss)\nI1213 12:18:14.962879 20613 sgd_solver.cpp:174] Iteration 48800, lr = 1.464\nI1213 12:18:14.976640 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.335015\nI1213 12:20:33.226289 20613 solver.cpp:337] Iteration 48900, Testing net (#0)\nI1213 12:21:55.008787 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78172\nI1213 12:21:55.009161 20613 solver.cpp:404]     Test net output #1: loss = 0.809715 (* 1 = 0.809715 loss)\nI1213 12:21:56.321823 20613 solver.cpp:228] Iteration 48900, loss = 0.130491\nI1213 12:21:56.321880 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 12:21:56.321905 20613 solver.cpp:244]     Train net output #1: loss = 0.130491 (* 1 = 0.130491 loss)\nI1213 12:21:56.415112 20613 sgd_solver.cpp:174] Iteration 48900, lr = 1.467\nI1213 12:21:56.428936 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.343652\nI1213 12:24:14.730841 20613 solver.cpp:337] Iteration 49000, Testing net (#0)\nI1213 12:25:36.556876 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74248\nI1213 12:25:36.557252 20613 solver.cpp:404]     Test net output #1: loss = 1.27232 (* 1 = 1.27232 loss)\nI1213 12:25:37.871013 20613 solver.cpp:228] Iteration 49000, loss = 0.374674\nI1213 12:25:37.871076 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1213 12:25:37.871104 20613 solver.cpp:244]     Train net output #1: loss = 0.374674 (* 1 = 0.374674 loss)\nI1213 12:25:37.956531 20613 sgd_solver.cpp:174] Iteration 49000, lr = 1.47\nI1213 12:25:37.970469 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.485818\nI1213 12:27:56.308470 20613 solver.cpp:337] Iteration 49100, Testing net (#0)\nI1213 12:29:18.144978 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80448\nI1213 12:29:18.145368 20613 solver.cpp:404]     Test net output #1: loss = 0.703445 (* 1 = 0.703445 loss)\nI1213 12:29:19.458907 20613 solver.cpp:228] Iteration 49100, loss = 0.144054\nI1213 12:29:19.458969 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 12:29:19.459002 20613 solver.cpp:244]     Train net output #1: loss = 0.144054 (* 1 = 0.144054 loss)\nI1213 12:29:19.545137 20613 sgd_solver.cpp:174] Iteration 49100, lr = 1.473\nI1213 12:29:19.558969 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.334129\nI1213 12:31:37.877432 20613 solver.cpp:337] Iteration 49200, Testing net (#0)\nI1213 12:32:59.631595 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80824\nI1213 12:32:59.631945 20613 solver.cpp:404]     Test net output #1: loss = 0.824729 (* 1 = 0.824729 loss)\nI1213 12:33:00.944172 20613 solver.cpp:228] Iteration 49200, loss = 0.091121\nI1213 12:33:00.944233 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 12:33:00.944252 20613 solver.cpp:244]     Train net output #1: loss = 0.0911212 (* 1 = 0.0911212 loss)\nI1213 12:33:01.037039 20613 sgd_solver.cpp:174] Iteration 49200, lr = 1.476\nI1213 12:33:01.050897 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.360573\nI1213 12:35:19.472592 20613 solver.cpp:337] Iteration 49300, Testing net (#0)\nI1213 12:36:41.213210 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8334\nI1213 12:36:41.213575 20613 solver.cpp:404]     Test net output #1: loss = 0.546417 (* 1 = 0.546417 loss)\nI1213 12:36:42.526165 20613 solver.cpp:228] Iteration 49300, loss = 0.195495\nI1213 12:36:42.526223 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 12:36:42.526242 20613 solver.cpp:244]     Train net output #1: loss = 0.195495 (* 1 = 0.195495 loss)\nI1213 12:36:42.618121 20613 sgd_solver.cpp:174] Iteration 49300, lr = 1.479\nI1213 12:36:42.631965 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.333876\nI1213 12:39:00.939242 20613 solver.cpp:337] Iteration 49400, Testing net (#0)\nI1213 12:40:22.684813 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81092\nI1213 12:40:22.685195 20613 solver.cpp:404]     Test net output #1: loss = 0.696178 (* 1 = 0.696178 loss)\nI1213 12:40:23.997090 20613 solver.cpp:228] Iteration 49400, loss = 0.231428\nI1213 12:40:23.997148 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 12:40:23.997166 20613 solver.cpp:244]     Train net output #1: loss = 0.231428 (* 1 = 0.231428 loss)\nI1213 12:40:24.090767 20613 sgd_solver.cpp:174] Iteration 49400, lr = 1.482\nI1213 12:40:24.104246 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.375469\nI1213 12:42:42.422816 20613 solver.cpp:337] Iteration 49500, Testing net (#0)\nI1213 12:44:04.170698 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81736\nI1213 12:44:04.171066 20613 solver.cpp:404]     Test net output #1: loss = 0.653532 (* 1 = 0.653532 loss)\nI1213 12:44:05.483656 20613 solver.cpp:228] Iteration 49500, loss = 0.161478\nI1213 12:44:05.483714 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 12:44:05.483733 20613 solver.cpp:244]     Train net output #1: loss = 0.161479 (* 1 = 0.161479 loss)\nI1213 12:44:05.575510 20613 sgd_solver.cpp:174] Iteration 49500, lr = 1.485\nI1213 12:44:05.589391 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.34056\nI1213 12:46:23.914640 20613 solver.cpp:337] Iteration 49600, Testing net (#0)\nI1213 12:47:45.670279 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82668\nI1213 12:47:45.670642 20613 solver.cpp:404]     Test net output #1: loss = 0.601003 (* 1 = 0.601003 loss)\nI1213 12:47:46.982756 20613 solver.cpp:228] Iteration 49600, loss = 0.155591\nI1213 12:47:46.982815 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 12:47:46.982834 20613 solver.cpp:244]     Train net output #1: loss = 0.155592 (* 1 = 0.155592 loss)\nI1213 12:47:47.074759 20613 sgd_solver.cpp:174] Iteration 49600, lr = 1.488\nI1213 12:47:47.088590 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.372576\nI1213 12:50:05.393085 20613 solver.cpp:337] Iteration 49700, Testing net (#0)\nI1213 12:51:27.166379 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80824\nI1213 12:51:27.166728 20613 solver.cpp:404]     Test net output #1: loss = 0.679501 (* 1 = 0.679501 loss)\nI1213 12:51:28.479235 20613 solver.cpp:228] Iteration 49700, loss = 0.169409\nI1213 12:51:28.479298 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 12:51:28.479323 20613 solver.cpp:244]     Train net output #1: loss = 0.16941 (* 1 = 0.16941 loss)\nI1213 12:51:28.571941 20613 sgd_solver.cpp:174] Iteration 49700, lr = 1.491\nI1213 12:51:28.585775 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.314926\nI1213 12:53:46.977535 20613 solver.cpp:337] Iteration 49800, Testing net (#0)\nI1213 12:55:08.769186 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82284\nI1213 12:55:08.769578 20613 solver.cpp:404]     Test net output #1: loss = 0.681623 (* 1 = 0.681623 loss)\nI1213 12:55:10.081758 20613 solver.cpp:228] Iteration 49800, loss = 0.109171\nI1213 12:55:10.081814 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 12:55:10.081838 20613 solver.cpp:244]     Train net output #1: loss = 0.109171 (* 1 = 0.109171 loss)\nI1213 12:55:10.174999 20613 sgd_solver.cpp:174] Iteration 49800, lr = 1.494\nI1213 12:55:10.188822 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.382753\nI1213 12:57:28.547577 20613 solver.cpp:337] Iteration 49900, Testing net (#0)\nI1213 12:58:50.339148 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82212\nI1213 12:58:50.339537 20613 solver.cpp:404]     Test net output #1: loss = 0.619438 (* 1 = 0.619438 loss)\nI1213 12:58:51.652032 20613 solver.cpp:228] Iteration 49900, loss = 0.114426\nI1213 12:58:51.652089 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 12:58:51.652114 20613 solver.cpp:244]     Train net output #1: loss = 0.114426 (* 1 = 0.114426 loss)\nI1213 12:58:51.745307 20613 sgd_solver.cpp:174] Iteration 49900, lr = 1.497\nI1213 12:58:51.759243 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.366906\nI1213 13:01:10.085202 20613 solver.cpp:337] Iteration 50000, Testing net (#0)\nI1213 13:02:31.861173 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80064\nI1213 13:02:31.861554 20613 solver.cpp:404]     Test net output #1: loss = 0.735178 (* 1 = 0.735178 loss)\nI1213 13:02:33.174836 20613 solver.cpp:228] Iteration 50000, loss = 0.0960498\nI1213 13:02:33.174896 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 13:02:33.174922 20613 solver.cpp:244]     Train net output #1: loss = 0.09605 (* 1 = 0.09605 loss)\nI1213 13:02:33.266463 20613 sgd_solver.cpp:174] Iteration 50000, lr = 1.5\nI1213 13:02:33.280387 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.362067\nI1213 13:04:51.590858 20613 solver.cpp:337] Iteration 50100, Testing net (#0)\nI1213 13:06:13.378276 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7518\nI1213 13:06:13.378654 20613 solver.cpp:404]     Test net output #1: loss = 0.89878 (* 1 = 0.89878 loss)\nI1213 13:06:14.691223 20613 solver.cpp:228] Iteration 50100, loss = 0.144157\nI1213 13:06:14.691279 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 13:06:14.691305 20613 solver.cpp:244]     Train net output #1: loss = 0.144157 (* 1 = 0.144157 loss)\nI1213 13:06:14.776080 20613 sgd_solver.cpp:174] Iteration 50100, lr = 1.503\nI1213 13:06:14.789994 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.375745\nI1213 13:08:33.099107 20613 solver.cpp:337] Iteration 50200, Testing net (#0)\nI1213 13:09:54.975417 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82032\nI1213 13:09:54.975792 20613 solver.cpp:404]     Test net output #1: loss = 0.648964 (* 1 = 0.648964 loss)\nI1213 13:09:56.288565 20613 solver.cpp:228] Iteration 50200, loss = 0.145596\nI1213 13:09:56.288619 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 13:09:56.288635 20613 solver.cpp:244]     Train net output #1: loss = 0.145597 (* 1 = 0.145597 loss)\nI1213 13:09:56.376263 20613 sgd_solver.cpp:174] Iteration 50200, lr = 1.506\nI1213 13:09:56.390064 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.338461\nI1213 13:12:14.724213 20613 solver.cpp:337] Iteration 50300, Testing net (#0)\nI1213 13:13:36.481905 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76124\nI1213 13:13:36.482271 20613 solver.cpp:404]     Test net output #1: loss = 1.01095 (* 1 = 1.01095 loss)\nI1213 13:13:37.794687 20613 solver.cpp:228] Iteration 50300, loss = 0.149274\nI1213 13:13:37.794737 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 13:13:37.794754 20613 solver.cpp:244]     Train net output #1: loss = 0.149274 (* 1 = 0.149274 loss)\nI1213 13:13:37.883512 20613 sgd_solver.cpp:174] Iteration 50300, lr = 1.509\nI1213 13:13:37.897361 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347307\nI1213 13:15:55.485918 20613 solver.cpp:337] Iteration 50400, Testing net (#0)\nI1213 13:17:17.239197 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83548\nI1213 13:17:17.239588 20613 solver.cpp:404]     Test net output #1: loss = 0.560572 (* 1 = 0.560572 loss)\nI1213 13:17:18.551694 20613 solver.cpp:228] Iteration 50400, loss = 0.172419\nI1213 13:17:18.551749 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 13:17:18.551766 20613 solver.cpp:244]     Train net output #1: loss = 0.17242 (* 1 = 0.17242 loss)\nI1213 13:17:18.641249 20613 sgd_solver.cpp:174] Iteration 50400, lr = 1.512\nI1213 13:17:18.655153 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.274685\nI1213 13:19:36.894345 20613 solver.cpp:337] Iteration 50500, Testing net (#0)\nI1213 13:20:58.649827 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81964\nI1213 13:20:58.650177 20613 solver.cpp:404]     Test net output #1: loss = 0.625709 (* 1 = 0.625709 loss)\nI1213 13:20:59.962890 20613 solver.cpp:228] Iteration 50500, loss = 0.124588\nI1213 13:20:59.962944 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 13:20:59.962962 20613 solver.cpp:244]     Train net output #1: loss = 0.124588 (* 1 = 0.124588 loss)\nI1213 13:21:00.052906 20613 sgd_solver.cpp:174] Iteration 50500, lr = 1.515\nI1213 13:21:00.066706 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.322248\nI1213 13:23:18.294505 20613 solver.cpp:337] Iteration 50600, Testing net (#0)\nI1213 13:24:40.245918 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75708\nI1213 13:24:40.246294 20613 solver.cpp:404]     Test net output #1: loss = 0.952015 (* 1 = 0.952015 loss)\nI1213 13:24:41.559598 20613 solver.cpp:228] Iteration 50600, loss = 0.209713\nI1213 13:24:41.559655 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 13:24:41.559680 20613 solver.cpp:244]     Train net output #1: loss = 0.209714 (* 1 = 0.209714 loss)\nI1213 13:24:41.649683 20613 sgd_solver.cpp:174] Iteration 50600, lr = 1.518\nI1213 13:24:41.663422 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.415235\nI1213 13:26:59.917691 20613 solver.cpp:337] Iteration 50700, Testing net (#0)\nI1213 13:28:21.712122 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82252\nI1213 13:28:21.712497 20613 solver.cpp:404]     Test net output #1: loss = 0.63198 (* 1 = 0.63198 loss)\nI1213 13:28:23.026338 20613 solver.cpp:228] Iteration 50700, loss = 0.154314\nI1213 13:28:23.026394 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 13:28:23.026420 20613 solver.cpp:244]     Train net output #1: loss = 0.154314 (* 1 = 0.154314 loss)\nI1213 13:28:23.112517 20613 sgd_solver.cpp:174] Iteration 50700, lr = 1.521\nI1213 13:28:23.126390 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.397796\nI1213 13:30:41.292938 20613 solver.cpp:337] Iteration 50800, Testing net (#0)\nI1213 13:32:03.080173 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78756\nI1213 13:32:03.080482 20613 solver.cpp:404]     Test net output #1: loss = 0.788132 (* 1 = 0.788132 loss)\nI1213 13:32:04.393316 20613 solver.cpp:228] Iteration 50800, loss = 0.141694\nI1213 13:32:04.393362 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 13:32:04.393388 20613 solver.cpp:244]     Train net output #1: loss = 0.141694 (* 1 = 0.141694 loss)\nI1213 13:32:04.485368 20613 sgd_solver.cpp:174] Iteration 50800, lr = 1.524\nI1213 13:32:04.499240 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.373507\nI1213 13:34:22.811786 20613 solver.cpp:337] Iteration 50900, Testing net (#0)\nI1213 13:35:43.577325 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75176\nI1213 13:35:43.577581 20613 solver.cpp:404]     Test net output #1: loss = 0.973089 (* 1 = 0.973089 loss)\nI1213 13:35:44.889395 20613 solver.cpp:228] Iteration 50900, loss = 0.0930522\nI1213 13:35:44.889443 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 13:35:44.889467 20613 solver.cpp:244]     Train net output #1: loss = 0.0930524 (* 1 = 0.0930524 loss)\nI1213 13:35:44.975093 20613 sgd_solver.cpp:174] Iteration 50900, lr = 1.527\nI1213 13:35:44.987903 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321918\nI1213 13:38:02.776425 20613 solver.cpp:337] Iteration 51000, Testing net (#0)\nI1213 13:39:23.527709 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79352\nI1213 13:39:23.528005 20613 solver.cpp:404]     Test net output #1: loss = 0.736859 (* 1 = 0.736859 loss)\nI1213 13:39:24.836655 20613 solver.cpp:228] Iteration 51000, loss = 0.0899194\nI1213 13:39:24.836704 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 13:39:24.836729 20613 solver.cpp:244]     Train net output #1: loss = 0.0899196 (* 1 = 0.0899196 loss)\nI1213 13:39:24.930887 20613 sgd_solver.cpp:174] Iteration 51000, lr = 1.53\nI1213 13:39:24.943598 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.282327\nI1213 13:41:42.756151 20613 solver.cpp:337] Iteration 51100, Testing net (#0)\nI1213 13:43:03.509089 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84276\nI1213 13:43:03.509421 20613 solver.cpp:404]     Test net output #1: loss = 0.538867 (* 1 = 0.538867 loss)\nI1213 13:43:04.818936 20613 solver.cpp:228] Iteration 51100, loss = 0.149419\nI1213 13:43:04.818981 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 13:43:04.818998 20613 solver.cpp:244]     Train net output #1: loss = 0.149419 (* 1 = 0.149419 loss)\nI1213 13:43:04.908865 20613 sgd_solver.cpp:174] Iteration 51100, lr = 1.533\nI1213 13:43:04.921485 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.306246\nI1213 13:45:22.731745 20613 solver.cpp:337] Iteration 51200, Testing net (#0)\nI1213 13:46:43.376247 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6914\nI1213 13:46:43.376562 20613 solver.cpp:404]     Test net output #1: loss = 1.30855 (* 1 = 1.30855 loss)\nI1213 13:46:44.685714 20613 solver.cpp:228] Iteration 51200, loss = 0.125038\nI1213 13:46:44.685756 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 13:46:44.685772 20613 solver.cpp:244]     Train net output #1: loss = 0.125038 (* 1 = 0.125038 loss)\nI1213 13:46:44.778149 20613 sgd_solver.cpp:174] Iteration 51200, lr = 1.536\nI1213 13:46:44.790896 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.388952\nI1213 13:49:02.623277 20613 solver.cpp:337] Iteration 51300, Testing net (#0)\nI1213 13:50:23.266100 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80452\nI1213 13:50:23.266389 20613 solver.cpp:404]     Test net output #1: loss = 0.631113 (* 1 = 0.631113 loss)\nI1213 13:50:24.575032 20613 solver.cpp:228] Iteration 51300, loss = 0.140966\nI1213 13:50:24.575075 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 13:50:24.575091 20613 solver.cpp:244]     Train net output #1: loss = 0.140966 (* 1 = 0.140966 loss)\nI1213 13:50:24.667800 20613 sgd_solver.cpp:174] Iteration 51300, lr = 1.539\nI1213 13:50:24.680583 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.344089\nI1213 13:52:42.520356 20613 solver.cpp:337] Iteration 51400, Testing net (#0)\nI1213 13:54:03.179618 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71972\nI1213 13:54:03.179960 20613 solver.cpp:404]     Test net output #1: loss = 1.12169 (* 1 = 1.12169 loss)\nI1213 13:54:04.489086 20613 solver.cpp:228] Iteration 51400, loss = 0.148185\nI1213 13:54:04.489127 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 13:54:04.489145 20613 solver.cpp:244]     Train net output #1: loss = 0.148185 (* 1 = 0.148185 loss)\nI1213 13:54:04.574594 20613 sgd_solver.cpp:174] Iteration 51400, lr = 1.542\nI1213 13:54:04.587350 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.322115\nI1213 13:56:22.477105 20613 solver.cpp:337] Iteration 51500, Testing net (#0)\nI1213 13:57:43.153971 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85156\nI1213 13:57:43.154235 20613 solver.cpp:404]     Test net output #1: loss = 0.488483 (* 1 = 0.488483 loss)\nI1213 13:57:44.464040 20613 solver.cpp:228] Iteration 51500, loss = 0.124499\nI1213 13:57:44.464084 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 13:57:44.464107 20613 solver.cpp:244]     Train net output #1: loss = 0.124499 (* 1 = 0.124499 loss)\nI1213 13:57:44.555044 20613 sgd_solver.cpp:174] Iteration 51500, lr = 1.545\nI1213 13:57:44.567572 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.322557\nI1213 14:00:02.371599 20613 solver.cpp:337] Iteration 51600, Testing net (#0)\nI1213 14:01:23.039501 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI1213 14:01:23.039805 20613 solver.cpp:404]     Test net output #1: loss = 0.723161 (* 1 = 0.723161 loss)\nI1213 14:01:24.349517 20613 solver.cpp:228] Iteration 51600, loss = 0.118367\nI1213 14:01:24.349555 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 14:01:24.349577 20613 solver.cpp:244]     Train net output #1: loss = 0.118367 (* 1 = 0.118367 loss)\nI1213 14:01:24.441819 20613 sgd_solver.cpp:174] Iteration 51600, lr = 1.548\nI1213 14:01:24.454202 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.319011\nI1213 14:03:42.293639 20613 solver.cpp:337] Iteration 51700, Testing net (#0)\nI1213 14:05:02.949538 20613 solver.cpp:404]     Test net output #0: accuracy = 0.85208\nI1213 14:05:02.949839 20613 solver.cpp:404]     Test net output #1: loss = 0.487649 (* 1 = 0.487649 loss)\nI1213 14:05:04.258631 20613 solver.cpp:228] Iteration 51700, loss = 0.0642185\nI1213 14:05:04.258673 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 14:05:04.258693 20613 solver.cpp:244]     Train net output #1: loss = 0.0642188 (* 1 = 0.0642188 loss)\nI1213 14:05:04.352064 20613 sgd_solver.cpp:174] Iteration 51700, lr = 1.551\nI1213 14:05:04.364802 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.313248\nI1213 14:07:22.137094 20613 solver.cpp:337] Iteration 51800, Testing net (#0)\nI1213 14:08:42.788313 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78344\nI1213 14:08:42.788625 20613 solver.cpp:404]     Test net output #1: loss = 0.822819 (* 1 = 0.822819 loss)\nI1213 14:08:44.097062 20613 solver.cpp:228] Iteration 51800, loss = 0.151146\nI1213 14:08:44.097102 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 14:08:44.097118 20613 solver.cpp:244]     Train net output #1: loss = 0.151147 (* 1 = 0.151147 loss)\nI1213 14:08:44.188477 20613 sgd_solver.cpp:174] Iteration 51800, lr = 1.554\nI1213 14:08:44.201102 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.392953\nI1213 14:11:01.970839 20613 solver.cpp:337] Iteration 51900, Testing net (#0)\nI1213 14:12:22.622766 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83848\nI1213 14:12:22.623098 20613 solver.cpp:404]     Test net output #1: loss = 0.544175 (* 1 = 0.544175 loss)\nI1213 14:12:23.931951 20613 solver.cpp:228] Iteration 51900, loss = 0.191898\nI1213 14:12:23.931990 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 14:12:23.932006 20613 solver.cpp:244]     Train net output #1: loss = 0.191898 (* 1 = 0.191898 loss)\nI1213 14:12:24.024368 20613 sgd_solver.cpp:174] Iteration 51900, lr = 1.557\nI1213 14:12:24.036942 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.359109\nI1213 14:14:41.793171 20613 solver.cpp:337] Iteration 52000, Testing net (#0)\nI1213 14:16:02.447577 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83204\nI1213 14:16:02.447924 20613 solver.cpp:404]     Test net output #1: loss = 0.541593 (* 1 = 0.541593 loss)\nI1213 14:16:03.756929 20613 solver.cpp:228] Iteration 52000, loss = 0.192802\nI1213 14:16:03.756961 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 14:16:03.756976 20613 solver.cpp:244]     Train net output #1: loss = 0.192802 (* 1 = 0.192802 loss)\nI1213 14:16:03.847664 20613 sgd_solver.cpp:174] Iteration 52000, lr = 1.56\nI1213 14:16:03.860460 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.30789\nI1213 14:18:21.699936 20613 solver.cpp:337] Iteration 52100, Testing net (#0)\nI1213 14:19:42.451103 20613 solver.cpp:404]     Test net output #0: accuracy = 0.62628\nI1213 14:19:42.451397 20613 solver.cpp:404]     Test net output #1: loss = 2.15927 (* 1 = 2.15927 loss)\nI1213 14:19:43.759527 20613 solver.cpp:228] Iteration 52100, loss = 0.124019\nI1213 14:19:43.759568 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 14:19:43.759584 20613 solver.cpp:244]     Train net output #1: loss = 0.124019 (* 1 = 0.124019 loss)\nI1213 14:19:43.858729 20613 sgd_solver.cpp:174] Iteration 52100, lr = 1.563\nI1213 14:19:43.871503 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.313694\nI1213 14:22:01.672538 20613 solver.cpp:337] Iteration 52200, Testing net (#0)\nI1213 14:23:22.439389 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80376\nI1213 14:23:22.439715 20613 solver.cpp:404]     Test net output #1: loss = 0.755819 (* 1 = 0.755819 loss)\nI1213 14:23:23.748610 20613 solver.cpp:228] Iteration 52200, loss = 0.121385\nI1213 14:23:23.748649 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 14:23:23.748666 20613 solver.cpp:244]     Train net output #1: loss = 0.121385 (* 1 = 0.121385 loss)\nI1213 14:23:23.839552 20613 sgd_solver.cpp:174] Iteration 52200, lr = 1.566\nI1213 14:23:23.852326 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.365547\nI1213 14:25:41.642443 20613 solver.cpp:337] Iteration 52300, Testing net (#0)\nI1213 14:27:02.415444 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78316\nI1213 14:27:02.415771 20613 solver.cpp:404]     Test net output #1: loss = 0.830159 (* 1 = 0.830159 loss)\nI1213 14:27:03.723928 20613 solver.cpp:228] Iteration 52300, loss = 0.0941218\nI1213 14:27:03.723963 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 14:27:03.723978 20613 solver.cpp:244]     Train net output #1: loss = 0.0941221 (* 1 = 0.0941221 loss)\nI1213 14:27:03.812613 20613 sgd_solver.cpp:174] Iteration 52300, lr = 1.569\nI1213 14:27:03.825379 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.368808\nI1213 14:29:21.695997 20613 solver.cpp:337] Iteration 52400, Testing net (#0)\nI1213 14:30:42.466352 20613 solver.cpp:404]     Test net output #0: accuracy = 0.55748\nI1213 14:30:42.466691 20613 solver.cpp:404]     Test net output #1: loss = 2.25693 (* 1 = 2.25693 loss)\nI1213 14:30:43.775511 20613 solver.cpp:228] Iteration 52400, loss = 0.146766\nI1213 14:30:43.775547 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 14:30:43.775563 20613 solver.cpp:244]     Train net output #1: loss = 0.146766 (* 1 = 0.146766 loss)\nI1213 14:30:43.866259 20613 sgd_solver.cpp:174] Iteration 52400, lr = 1.572\nI1213 14:30:43.879060 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.448217\nI1213 14:33:00.872442 20613 solver.cpp:337] Iteration 52500, Testing net (#0)\nI1213 14:34:21.648207 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74152\nI1213 14:34:21.648504 20613 solver.cpp:404]     Test net output #1: loss = 0.972077 (* 1 = 0.972077 loss)\nI1213 14:34:22.957398 20613 solver.cpp:228] Iteration 52500, loss = 0.113646\nI1213 14:34:22.957442 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 14:34:22.957465 20613 solver.cpp:244]     Train net output #1: loss = 0.113647 (* 1 = 0.113647 loss)\nI1213 14:34:23.051254 20613 sgd_solver.cpp:174] Iteration 52500, lr = 1.575\nI1213 14:34:23.063724 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.339769\nI1213 14:36:40.914716 20613 solver.cpp:337] Iteration 52600, Testing net (#0)\nI1213 14:38:01.686276 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8392\nI1213 14:38:01.686535 20613 solver.cpp:404]     Test net output #1: loss = 0.52884 (* 1 = 0.52884 loss)\nI1213 14:38:02.995823 20613 solver.cpp:228] Iteration 52600, loss = 0.166865\nI1213 14:38:02.995869 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 14:38:02.995899 20613 solver.cpp:244]     Train net output #1: loss = 0.166865 (* 1 = 0.166865 loss)\nI1213 14:38:03.087736 20613 sgd_solver.cpp:174] Iteration 52600, lr = 1.578\nI1213 14:38:03.100474 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.325862\nI1213 14:40:20.070291 20613 solver.cpp:337] Iteration 52700, Testing net (#0)\nI1213 14:41:40.837615 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74332\nI1213 14:41:40.837883 20613 solver.cpp:404]     Test net output #1: loss = 1.04417 (* 1 = 1.04417 loss)\nI1213 14:41:42.146610 20613 solver.cpp:228] Iteration 52700, loss = 0.142666\nI1213 14:41:42.146646 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 14:41:42.146667 20613 solver.cpp:244]     Train net output #1: loss = 0.142666 (* 1 = 0.142666 loss)\nI1213 14:41:42.239095 20613 sgd_solver.cpp:174] Iteration 52700, lr = 1.581\nI1213 14:41:42.251835 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.32011\nI1213 14:44:00.170366 20613 solver.cpp:337] Iteration 52800, Testing net (#0)\nI1213 14:45:20.953104 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81028\nI1213 14:45:20.953440 20613 solver.cpp:404]     Test net output #1: loss = 0.66902 (* 1 = 0.66902 loss)\nI1213 14:45:22.262473 20613 solver.cpp:228] Iteration 52800, loss = 0.118486\nI1213 14:45:22.262518 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 14:45:22.262537 20613 solver.cpp:244]     Train net output #1: loss = 0.118487 (* 1 = 0.118487 loss)\nI1213 14:45:22.354961 20613 sgd_solver.cpp:174] Iteration 52800, lr = 1.584\nI1213 14:45:22.367684 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.33035\nI1213 14:47:39.282769 20613 solver.cpp:337] Iteration 52900, Testing net (#0)\nI1213 14:49:00.059968 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72924\nI1213 14:49:00.060271 20613 solver.cpp:404]     Test net output #1: loss = 1.20273 (* 1 = 1.20273 loss)\nI1213 14:49:01.369978 20613 solver.cpp:228] Iteration 52900, loss = 0.209373\nI1213 14:49:01.370023 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 14:49:01.370040 20613 solver.cpp:244]     Train net output #1: loss = 0.209373 (* 1 = 0.209373 loss)\nI1213 14:49:01.461835 20613 sgd_solver.cpp:174] Iteration 52900, lr = 1.587\nI1213 14:49:01.474750 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.361925\nI1213 14:51:19.331463 20613 solver.cpp:337] Iteration 53000, Testing net (#0)\nI1213 14:52:40.105747 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71156\nI1213 14:52:40.106036 20613 solver.cpp:404]     Test net output #1: loss = 1.19473 (* 1 = 1.19473 loss)\nI1213 14:52:41.415833 20613 solver.cpp:228] Iteration 53000, loss = 0.118918\nI1213 14:52:41.415880 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 14:52:41.415896 20613 solver.cpp:244]     Train net output #1: loss = 0.118919 (* 1 = 0.118919 loss)\nI1213 14:52:41.504901 20613 sgd_solver.cpp:174] Iteration 53000, lr = 1.59\nI1213 14:52:41.517705 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.352077\nI1213 14:54:59.378401 20613 solver.cpp:337] Iteration 53100, Testing net (#0)\nI1213 14:56:20.155035 20613 solver.cpp:404]     Test net output #0: accuracy = 0.817\nI1213 14:56:20.155328 20613 solver.cpp:404]     Test net output #1: loss = 0.690309 (* 1 = 0.690309 loss)\nI1213 14:56:21.464074 20613 solver.cpp:228] Iteration 53100, loss = 0.149389\nI1213 14:56:21.464119 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 14:56:21.464135 20613 solver.cpp:244]     Train net output #1: loss = 0.14939 (* 1 = 0.14939 loss)\nI1213 14:56:21.553619 20613 sgd_solver.cpp:174] Iteration 53100, lr = 1.593\nI1213 14:56:21.566349 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307427\nI1213 14:58:39.562450 20613 solver.cpp:337] Iteration 53200, Testing net (#0)\nI1213 15:00:00.341445 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7996\nI1213 15:00:00.341722 20613 solver.cpp:404]     Test net output #1: loss = 0.77153 (* 1 = 0.77153 loss)\nI1213 15:00:01.651134 20613 solver.cpp:228] Iteration 53200, loss = 0.120219\nI1213 15:00:01.651170 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 15:00:01.651185 20613 solver.cpp:244]     Train net output #1: loss = 0.12022 (* 1 = 0.12022 loss)\nI1213 15:00:01.741257 20613 sgd_solver.cpp:174] Iteration 53200, lr = 1.596\nI1213 15:00:01.754241 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.333956\nI1213 15:02:19.603945 20613 solver.cpp:337] Iteration 53300, Testing net (#0)\nI1213 15:03:40.392765 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68296\nI1213 15:03:40.393033 20613 solver.cpp:404]     Test net output #1: loss = 1.30269 (* 1 = 1.30269 loss)\nI1213 15:03:41.702579 20613 solver.cpp:228] Iteration 53300, loss = 0.166558\nI1213 15:03:41.702626 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 15:03:41.702642 20613 solver.cpp:244]     Train net output #1: loss = 0.166559 (* 1 = 0.166559 loss)\nI1213 15:03:41.794740 20613 sgd_solver.cpp:174] Iteration 53300, lr = 1.599\nI1213 15:03:41.807618 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.366841\nI1213 15:05:59.681216 20613 solver.cpp:337] Iteration 53400, Testing net (#0)\nI1213 15:07:20.454867 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77504\nI1213 15:07:20.455178 20613 solver.cpp:404]     Test net output #1: loss = 0.742931 (* 1 = 0.742931 loss)\nI1213 15:07:21.764788 20613 solver.cpp:228] Iteration 53400, loss = 0.188956\nI1213 15:07:21.764833 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 15:07:21.764850 20613 solver.cpp:244]     Train net output #1: loss = 0.188956 (* 1 = 0.188956 loss)\nI1213 15:07:21.860672 20613 sgd_solver.cpp:174] Iteration 53400, lr = 1.602\nI1213 15:07:21.873409 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3811\nI1213 15:09:39.782254 20613 solver.cpp:337] Iteration 53500, Testing net (#0)\nI1213 15:11:00.536878 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77968\nI1213 15:11:00.537214 20613 solver.cpp:404]     Test net output #1: loss = 0.971237 (* 1 = 0.971237 loss)\nI1213 15:11:01.846065 20613 solver.cpp:228] Iteration 53500, loss = 0.11798\nI1213 15:11:01.846109 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 15:11:01.846125 20613 solver.cpp:244]     Train net output #1: loss = 0.11798 (* 1 = 0.11798 loss)\nI1213 15:11:01.940062 20613 sgd_solver.cpp:174] Iteration 53500, lr = 1.605\nI1213 15:11:01.952818 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.338915\nI1213 15:13:19.792275 20613 solver.cpp:337] Iteration 53600, Testing net (#0)\nI1213 15:14:40.547690 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77156\nI1213 15:14:40.548029 20613 solver.cpp:404]     Test net output #1: loss = 0.939415 (* 1 = 0.939415 loss)\nI1213 15:14:41.856503 20613 solver.cpp:228] Iteration 53600, loss = 0.172904\nI1213 15:14:41.856546 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 15:14:41.856564 20613 solver.cpp:244]     Train net output #1: loss = 0.172905 (* 1 = 0.172905 loss)\nI1213 15:14:41.949254 20613 sgd_solver.cpp:174] Iteration 53600, lr = 1.608\nI1213 15:14:41.961980 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.369042\nI1213 15:16:59.059042 20613 solver.cpp:337] Iteration 53700, Testing net (#0)\nI1213 15:18:19.819536 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75032\nI1213 15:18:19.819839 20613 solver.cpp:404]     Test net output #1: loss = 1.08337 (* 1 = 1.08337 loss)\nI1213 15:18:21.129075 20613 solver.cpp:228] Iteration 53700, loss = 0.205087\nI1213 15:18:21.129118 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 15:18:21.129137 20613 solver.cpp:244]     Train net output #1: loss = 0.205087 (* 1 = 0.205087 loss)\nI1213 15:18:21.222829 20613 sgd_solver.cpp:174] Iteration 53700, lr = 1.611\nI1213 15:18:21.235376 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.39616\nI1213 15:20:39.124066 20613 solver.cpp:337] Iteration 53800, Testing net (#0)\nI1213 15:21:59.878816 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83848\nI1213 15:21:59.879068 20613 solver.cpp:404]     Test net output #1: loss = 0.529891 (* 1 = 0.529891 loss)\nI1213 15:22:01.189116 20613 solver.cpp:228] Iteration 53800, loss = 0.219911\nI1213 15:22:01.189157 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 15:22:01.189174 20613 solver.cpp:244]     Train net output #1: loss = 0.219911 (* 1 = 0.219911 loss)\nI1213 15:22:01.281635 20613 sgd_solver.cpp:174] Iteration 53800, lr = 1.614\nI1213 15:22:01.295420 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.410556\nI1213 15:24:19.105264 20613 solver.cpp:337] Iteration 53900, Testing net (#0)\nI1213 15:25:39.864822 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI1213 15:25:39.865088 20613 solver.cpp:404]     Test net output #1: loss = 0.742151 (* 1 = 0.742151 loss)\nI1213 15:25:41.173774 20613 solver.cpp:228] Iteration 53900, loss = 0.160249\nI1213 15:25:41.173821 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 15:25:41.173846 20613 solver.cpp:244]     Train net output #1: loss = 0.16025 (* 1 = 0.16025 loss)\nI1213 15:25:41.265350 20613 sgd_solver.cpp:174] Iteration 53900, lr = 1.617\nI1213 15:25:41.278110 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.332553\nI1213 15:27:59.014358 20613 solver.cpp:337] Iteration 54000, Testing net (#0)\nI1213 15:29:19.775501 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80348\nI1213 15:29:19.775842 20613 solver.cpp:404]     Test net output #1: loss = 0.674487 (* 1 = 0.674487 loss)\nI1213 15:29:21.084738 20613 solver.cpp:228] Iteration 54000, loss = 0.149759\nI1213 15:29:21.084784 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 15:29:21.084808 20613 solver.cpp:244]     Train net output #1: loss = 0.149759 (* 1 = 0.149759 loss)\nI1213 15:29:21.176573 20613 sgd_solver.cpp:174] Iteration 54000, lr = 1.62\nI1213 15:29:21.189352 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307683\nI1213 15:31:39.003093 20613 solver.cpp:337] Iteration 54100, Testing net (#0)\nI1213 15:32:59.754824 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77464\nI1213 15:32:59.755125 20613 solver.cpp:404]     Test net output #1: loss = 0.869187 (* 1 = 0.869187 loss)\nI1213 15:33:01.064426 20613 solver.cpp:228] Iteration 54100, loss = 0.219257\nI1213 15:33:01.064472 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 15:33:01.064497 20613 solver.cpp:244]     Train net output #1: loss = 0.219257 (* 1 = 0.219257 loss)\nI1213 15:33:01.156270 20613 sgd_solver.cpp:174] Iteration 54100, lr = 1.623\nI1213 15:33:01.169080 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.374722\nI1213 15:35:19.059651 20613 solver.cpp:337] Iteration 54200, Testing net (#0)\nI1213 15:36:39.828693 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6896\nI1213 15:36:39.829041 20613 solver.cpp:404]     Test net output #1: loss = 1.43342 (* 1 = 1.43342 loss)\nI1213 15:36:41.138532 20613 solver.cpp:228] Iteration 54200, loss = 0.116926\nI1213 15:36:41.138581 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 15:36:41.138605 20613 solver.cpp:244]     Train net output #1: loss = 0.116926 (* 1 = 0.116926 loss)\nI1213 15:36:41.231439 20613 sgd_solver.cpp:174] Iteration 54200, lr = 1.626\nI1213 15:36:41.244191 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.318044\nI1213 15:38:59.136168 20613 solver.cpp:337] Iteration 54300, Testing net (#0)\nI1213 15:40:19.894565 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73624\nI1213 15:40:19.894896 20613 solver.cpp:404]     Test net output #1: loss = 1.24341 (* 1 = 1.24341 loss)\nI1213 15:40:21.204191 20613 solver.cpp:228] Iteration 54300, loss = 0.134116\nI1213 15:40:21.204238 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 15:40:21.204263 20613 solver.cpp:244]     Train net output #1: loss = 0.134117 (* 1 = 0.134117 loss)\nI1213 15:40:21.293514 20613 sgd_solver.cpp:174] Iteration 54300, lr = 1.629\nI1213 15:40:21.306246 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.476618\nI1213 15:42:39.196631 20613 solver.cpp:337] Iteration 54400, Testing net (#0)\nI1213 15:43:59.962503 20613 solver.cpp:404]     Test net output #0: accuracy = 0.792799\nI1213 15:43:59.962810 20613 solver.cpp:404]     Test net output #1: loss = 0.80163 (* 1 = 0.80163 loss)\nI1213 15:44:01.271256 20613 solver.cpp:228] Iteration 54400, loss = 0.234807\nI1213 15:44:01.271304 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 15:44:01.271329 20613 solver.cpp:244]     Train net output #1: loss = 0.234808 (* 1 = 0.234808 loss)\nI1213 15:44:01.361455 20613 sgd_solver.cpp:174] Iteration 54400, lr = 1.632\nI1213 15:44:01.373944 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.44898\nI1213 15:46:19.271307 20613 solver.cpp:337] Iteration 54500, Testing net (#0)\nI1213 15:47:40.031044 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81096\nI1213 15:47:40.031304 20613 solver.cpp:404]     Test net output #1: loss = 0.67126 (* 1 = 0.67126 loss)\nI1213 15:47:41.339452 20613 solver.cpp:228] Iteration 54500, loss = 0.107766\nI1213 15:47:41.339498 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 15:47:41.339522 20613 solver.cpp:244]     Train net output #1: loss = 0.107767 (* 1 = 0.107767 loss)\nI1213 15:47:41.432027 20613 sgd_solver.cpp:174] Iteration 54500, lr = 1.635\nI1213 15:47:41.444726 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.376337\nI1213 15:49:59.306483 20613 solver.cpp:337] Iteration 54600, Testing net (#0)\nI1213 15:51:20.067337 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78728\nI1213 15:51:20.067641 20613 solver.cpp:404]     Test net output #1: loss = 0.845767 (* 1 = 0.845767 loss)\nI1213 15:51:21.376508 20613 solver.cpp:228] Iteration 54600, loss = 0.127799\nI1213 15:51:21.376555 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 15:51:21.376579 20613 solver.cpp:244]     Train net output #1: loss = 0.1278 (* 1 = 0.1278 loss)\nI1213 15:51:21.465616 20613 sgd_solver.cpp:174] Iteration 54600, lr = 1.638\nI1213 15:51:21.478380 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.352291\nI1213 15:53:39.341375 20613 solver.cpp:337] Iteration 54700, Testing net (#0)\nI1213 15:55:00.105088 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81636\nI1213 15:55:00.105391 20613 solver.cpp:404]     Test net output #1: loss = 0.624559 (* 1 = 0.624559 loss)\nI1213 15:55:01.415129 20613 solver.cpp:228] Iteration 54700, loss = 0.147226\nI1213 15:55:01.415174 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 15:55:01.415199 20613 solver.cpp:244]     Train net output #1: loss = 0.147226 (* 1 = 0.147226 loss)\nI1213 15:55:01.502095 20613 sgd_solver.cpp:174] Iteration 54700, lr = 1.641\nI1213 15:55:01.514853 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.394905\nI1213 15:57:19.358661 20613 solver.cpp:337] Iteration 54800, Testing net (#0)\nI1213 15:58:40.125906 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75728\nI1213 15:58:40.126250 20613 solver.cpp:404]     Test net output #1: loss = 1.03606 (* 1 = 1.03606 loss)\nI1213 15:58:41.435586 20613 solver.cpp:228] Iteration 54800, loss = 0.146943\nI1213 15:58:41.435622 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 15:58:41.435647 20613 solver.cpp:244]     Train net output #1: loss = 0.146944 (* 1 = 0.146944 loss)\nI1213 15:58:41.530014 20613 sgd_solver.cpp:174] Iteration 54800, lr = 1.644\nI1213 15:58:41.542709 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.300697\nI1213 16:00:59.411308 20613 solver.cpp:337] Iteration 54900, Testing net (#0)\nI1213 16:02:20.071051 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70284\nI1213 16:02:20.071347 20613 solver.cpp:404]     Test net output #1: loss = 1.43485 (* 1 = 1.43485 loss)\nI1213 16:02:21.380866 20613 solver.cpp:228] Iteration 54900, loss = 0.153688\nI1213 16:02:21.380916 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 16:02:21.380941 20613 solver.cpp:244]     Train net output #1: loss = 0.153688 (* 1 = 0.153688 loss)\nI1213 16:02:21.473505 20613 sgd_solver.cpp:174] Iteration 54900, lr = 1.647\nI1213 16:02:21.486255 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.337746\nI1213 16:04:39.308578 20613 solver.cpp:337] Iteration 55000, Testing net (#0)\nI1213 16:05:59.971513 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7834\nI1213 16:05:59.971781 20613 solver.cpp:404]     Test net output #1: loss = 0.839747 (* 1 = 0.839747 loss)\nI1213 16:06:01.280797 20613 solver.cpp:228] Iteration 55000, loss = 0.140952\nI1213 16:06:01.280835 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 16:06:01.280858 20613 solver.cpp:244]     Train net output #1: loss = 0.140952 (* 1 = 0.140952 loss)\nI1213 16:06:01.374789 20613 sgd_solver.cpp:174] Iteration 55000, lr = 1.65\nI1213 16:06:01.387460 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.34205\nI1213 16:08:19.239471 20613 solver.cpp:337] Iteration 55100, Testing net (#0)\nI1213 16:09:39.900157 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84508\nI1213 16:09:39.900463 20613 solver.cpp:404]     Test net output #1: loss = 0.518462 (* 1 = 0.518462 loss)\nI1213 16:09:41.208591 20613 solver.cpp:228] Iteration 55100, loss = 0.110661\nI1213 16:09:41.208638 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 16:09:41.208664 20613 solver.cpp:244]     Train net output #1: loss = 0.110662 (* 1 = 0.110662 loss)\nI1213 16:09:41.301285 20613 sgd_solver.cpp:174] Iteration 55100, lr = 1.653\nI1213 16:09:41.313992 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.299192\nI1213 16:11:59.085841 20613 solver.cpp:337] Iteration 55200, Testing net (#0)\nI1213 16:13:19.736799 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77216\nI1213 16:13:19.737152 20613 solver.cpp:404]     Test net output #1: loss = 0.909718 (* 1 = 0.909718 loss)\nI1213 16:13:21.046494 20613 solver.cpp:228] Iteration 55200, loss = 0.137904\nI1213 16:13:21.046540 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 16:13:21.046566 20613 solver.cpp:244]     Train net output #1: loss = 0.137904 (* 1 = 0.137904 loss)\nI1213 16:13:21.134836 20613 sgd_solver.cpp:174] Iteration 55200, lr = 1.656\nI1213 16:13:21.147580 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.332032\nI1213 16:15:38.892696 20613 solver.cpp:337] Iteration 55300, Testing net (#0)\nI1213 16:16:59.548032 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76868\nI1213 16:16:59.548329 20613 solver.cpp:404]     Test net output #1: loss = 0.860211 (* 1 = 0.860211 loss)\nI1213 16:17:00.857913 20613 solver.cpp:228] Iteration 55300, loss = 0.148568\nI1213 16:17:00.857950 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 16:17:00.857973 20613 solver.cpp:244]     Train net output #1: loss = 0.148569 (* 1 = 0.148569 loss)\nI1213 16:17:00.950331 20613 sgd_solver.cpp:174] Iteration 55300, lr = 1.659\nI1213 16:17:00.963044 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.352603\nI1213 16:19:18.775672 20613 solver.cpp:337] Iteration 55400, Testing net (#0)\nI1213 16:20:39.437885 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77484\nI1213 16:20:39.438218 20613 solver.cpp:404]     Test net output #1: loss = 0.866196 (* 1 = 0.866196 loss)\nI1213 16:20:40.747632 20613 solver.cpp:228] Iteration 55400, loss = 0.108271\nI1213 16:20:40.747678 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 16:20:40.747704 20613 solver.cpp:244]     Train net output #1: loss = 0.108272 (* 1 = 0.108272 loss)\nI1213 16:20:40.841393 20613 sgd_solver.cpp:174] Iteration 55400, lr = 1.662\nI1213 16:20:40.854089 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.295444\nI1213 16:22:58.639761 20613 solver.cpp:337] Iteration 55500, Testing net (#0)\nI1213 16:24:19.273672 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70888\nI1213 16:24:19.274024 20613 solver.cpp:404]     Test net output #1: loss = 1.36298 (* 1 = 1.36298 loss)\nI1213 16:24:20.583431 20613 solver.cpp:228] Iteration 55500, loss = 0.167119\nI1213 16:24:20.583475 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 16:24:20.583499 20613 solver.cpp:244]     Train net output #1: loss = 0.16712 (* 1 = 0.16712 loss)\nI1213 16:24:20.673749 20613 sgd_solver.cpp:174] Iteration 55500, lr = 1.665\nI1213 16:24:20.686489 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.401744\nI1213 16:26:38.514248 20613 solver.cpp:337] Iteration 55600, Testing net (#0)\nI1213 16:27:59.148761 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67904\nI1213 16:27:59.149058 20613 solver.cpp:404]     Test net output #1: loss = 1.55244 (* 1 = 1.55244 loss)\nI1213 16:28:00.458323 20613 solver.cpp:228] Iteration 55600, loss = 0.193885\nI1213 16:28:00.458370 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 16:28:00.458394 20613 solver.cpp:244]     Train net output #1: loss = 0.193886 (* 1 = 0.193886 loss)\nI1213 16:28:00.548137 20613 sgd_solver.cpp:174] Iteration 55600, lr = 1.668\nI1213 16:28:00.560715 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.387391\nI1213 16:30:18.365231 20613 solver.cpp:337] Iteration 55700, Testing net (#0)\nI1213 16:31:39.012637 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70152\nI1213 16:31:39.012925 20613 solver.cpp:404]     Test net output #1: loss = 1.37599 (* 1 = 1.37599 loss)\nI1213 16:31:40.322350 20613 solver.cpp:228] Iteration 55700, loss = 0.0943518\nI1213 16:31:40.322397 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 16:31:40.322422 20613 solver.cpp:244]     Train net output #1: loss = 0.0943523 (* 1 = 0.0943523 loss)\nI1213 16:31:40.414750 20613 sgd_solver.cpp:174] Iteration 55700, lr = 1.671\nI1213 16:31:40.427533 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.343599\nI1213 16:33:58.181646 20613 solver.cpp:337] Iteration 55800, Testing net (#0)\nI1213 16:35:18.848320 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77884\nI1213 16:35:18.848673 20613 solver.cpp:404]     Test net output #1: loss = 0.795981 (* 1 = 0.795981 loss)\nI1213 16:35:20.157974 20613 solver.cpp:228] Iteration 55800, loss = 0.125749\nI1213 16:35:20.158013 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 16:35:20.158035 20613 solver.cpp:244]     Train net output #1: loss = 0.125749 (* 1 = 0.125749 loss)\nI1213 16:35:20.245908 20613 sgd_solver.cpp:174] Iteration 55800, lr = 1.674\nI1213 16:35:20.258373 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290098\nI1213 16:37:38.058943 20613 solver.cpp:337] Iteration 55900, Testing net (#0)\nI1213 16:38:58.795428 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79236\nI1213 16:38:58.795764 20613 solver.cpp:404]     Test net output #1: loss = 0.77204 (* 1 = 0.77204 loss)\nI1213 16:39:00.104032 20613 solver.cpp:228] Iteration 55900, loss = 0.0753469\nI1213 16:39:00.104071 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1213 16:39:00.104094 20613 solver.cpp:244]     Train net output #1: loss = 0.0753474 (* 1 = 0.0753474 loss)\nI1213 16:39:00.197046 20613 sgd_solver.cpp:174] Iteration 55900, lr = 1.677\nI1213 16:39:00.209777 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.311829\nI1213 16:41:18.028548 20613 solver.cpp:337] Iteration 56000, Testing net (#0)\nI1213 16:42:38.779531 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78408\nI1213 16:42:38.779897 20613 solver.cpp:404]     Test net output #1: loss = 0.72943 (* 1 = 0.72943 loss)\nI1213 16:42:40.089648 20613 solver.cpp:228] Iteration 56000, loss = 0.232246\nI1213 16:42:40.089692 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 16:42:40.089717 20613 solver.cpp:244]     Train net output #1: loss = 0.232247 (* 1 = 0.232247 loss)\nI1213 16:42:40.180433 20613 sgd_solver.cpp:174] Iteration 56000, lr = 1.68\nI1213 16:42:40.193179 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.322415\nI1213 16:44:58.009765 20613 solver.cpp:337] Iteration 56100, Testing net (#0)\nI1213 16:46:18.766073 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73576\nI1213 16:46:18.766389 20613 solver.cpp:404]     Test net output #1: loss = 1.03454 (* 1 = 1.03454 loss)\nI1213 16:46:20.074954 20613 solver.cpp:228] Iteration 56100, loss = 0.0870113\nI1213 16:46:20.075000 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 16:46:20.075024 20613 solver.cpp:244]     Train net output #1: loss = 0.0870117 (* 1 = 0.0870117 loss)\nI1213 16:46:20.164753 20613 sgd_solver.cpp:174] Iteration 56100, lr = 1.683\nI1213 16:46:20.177377 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.298844\nI1213 16:48:38.030589 20613 solver.cpp:337] Iteration 56200, Testing net (#0)\nI1213 16:49:58.783931 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81024\nI1213 16:49:58.784193 20613 solver.cpp:404]     Test net output #1: loss = 0.665216 (* 1 = 0.665216 loss)\nI1213 16:50:00.093330 20613 solver.cpp:228] Iteration 56200, loss = 0.242544\nI1213 16:50:00.093367 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 16:50:00.093391 20613 solver.cpp:244]     Train net output #1: loss = 0.242544 (* 1 = 0.242544 loss)\nI1213 16:50:00.185106 20613 sgd_solver.cpp:174] Iteration 56200, lr = 1.686\nI1213 16:50:00.197805 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.396217\nI1213 16:52:17.979183 20613 solver.cpp:337] Iteration 56300, Testing net (#0)\nI1213 16:53:38.730219 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80972\nI1213 16:53:38.730526 20613 solver.cpp:404]     Test net output #1: loss = 0.593074 (* 1 = 0.593074 loss)\nI1213 16:53:40.040135 20613 solver.cpp:228] Iteration 56300, loss = 0.207538\nI1213 16:53:40.040174 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 16:53:40.040197 20613 solver.cpp:244]     Train net output #1: loss = 0.207538 (* 1 = 0.207538 loss)\nI1213 16:53:40.130117 20613 sgd_solver.cpp:174] Iteration 56300, lr = 1.689\nI1213 16:53:40.142881 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.345472\nI1213 16:55:57.999275 20613 solver.cpp:337] Iteration 56400, Testing net (#0)\nI1213 16:57:18.762933 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75064\nI1213 16:57:18.763280 20613 solver.cpp:404]     Test net output #1: loss = 0.923374 (* 1 = 0.923374 loss)\nI1213 16:57:20.071923 20613 solver.cpp:228] Iteration 56400, loss = 0.135589\nI1213 16:57:20.071969 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 16:57:20.071993 20613 solver.cpp:244]     Train net output #1: loss = 0.13559 (* 1 = 0.13559 loss)\nI1213 16:57:20.164476 20613 sgd_solver.cpp:174] Iteration 56400, lr = 1.692\nI1213 16:57:20.177000 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305588\nI1213 16:59:38.167520 20613 solver.cpp:337] Iteration 56500, Testing net (#0)\nI1213 17:00:58.928381 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72396\nI1213 17:00:58.928702 20613 solver.cpp:404]     Test net output #1: loss = 1.12225 (* 1 = 1.12225 loss)\nI1213 17:01:00.238538 20613 solver.cpp:228] Iteration 56500, loss = 0.130827\nI1213 17:01:00.238582 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 17:01:00.238606 20613 solver.cpp:244]     Train net output #1: loss = 0.130827 (* 1 = 0.130827 loss)\nI1213 17:01:00.329109 20613 sgd_solver.cpp:174] Iteration 56500, lr = 1.695\nI1213 17:01:00.341894 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.35968\nI1213 17:03:18.182065 20613 solver.cpp:337] Iteration 56600, Testing net (#0)\nI1213 17:04:38.957043 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80404\nI1213 17:04:38.957373 20613 solver.cpp:404]     Test net output #1: loss = 0.744349 (* 1 = 0.744349 loss)\nI1213 17:04:40.267132 20613 solver.cpp:228] Iteration 56600, loss = 0.134272\nI1213 17:04:40.267179 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 17:04:40.267204 20613 solver.cpp:244]     Train net output #1: loss = 0.134272 (* 1 = 0.134272 loss)\nI1213 17:04:40.359886 20613 sgd_solver.cpp:174] Iteration 56600, lr = 1.698\nI1213 17:04:40.372500 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307815\nI1213 17:06:58.244256 20613 solver.cpp:337] Iteration 56700, Testing net (#0)\nI1213 17:08:19.020733 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71256\nI1213 17:08:19.021086 20613 solver.cpp:404]     Test net output #1: loss = 1.35822 (* 1 = 1.35822 loss)\nI1213 17:08:20.330394 20613 solver.cpp:228] Iteration 56700, loss = 0.191882\nI1213 17:08:20.330433 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 17:08:20.330456 20613 solver.cpp:244]     Train net output #1: loss = 0.191883 (* 1 = 0.191883 loss)\nI1213 17:08:20.425920 20613 sgd_solver.cpp:174] Iteration 56700, lr = 1.701\nI1213 17:08:20.438690 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.360785\nI1213 17:10:38.370523 20613 solver.cpp:337] Iteration 56800, Testing net (#0)\nI1213 17:11:59.135704 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73524\nI1213 17:11:59.136016 20613 solver.cpp:404]     Test net output #1: loss = 1.20062 (* 1 = 1.20062 loss)\nI1213 17:12:00.446202 20613 solver.cpp:228] Iteration 56800, loss = 0.220982\nI1213 17:12:00.446249 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 17:12:00.446274 20613 solver.cpp:244]     Train net output #1: loss = 0.220983 (* 1 = 0.220983 loss)\nI1213 17:12:00.536113 20613 sgd_solver.cpp:174] Iteration 56800, lr = 1.704\nI1213 17:12:00.548825 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.325279\nI1213 17:14:18.518045 20613 solver.cpp:337] Iteration 56900, Testing net (#0)\nI1213 17:15:39.284937 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82404\nI1213 17:15:39.285236 20613 solver.cpp:404]     Test net output #1: loss = 0.612851 (* 1 = 0.612851 loss)\nI1213 17:15:40.593922 20613 solver.cpp:228] Iteration 56900, loss = 0.134631\nI1213 17:15:40.593969 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 17:15:40.593993 20613 solver.cpp:244]     Train net output #1: loss = 0.134632 (* 1 = 0.134632 loss)\nI1213 17:15:40.686488 20613 sgd_solver.cpp:174] Iteration 56900, lr = 1.707\nI1213 17:15:40.699240 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.345706\nI1213 17:17:58.612726 20613 solver.cpp:337] Iteration 57000, Testing net (#0)\nI1213 17:19:19.379077 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7216\nI1213 17:19:19.379401 20613 solver.cpp:404]     Test net output #1: loss = 1.26509 (* 1 = 1.26509 loss)\nI1213 17:19:20.688086 20613 solver.cpp:228] Iteration 57000, loss = 0.160507\nI1213 17:19:20.688133 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 17:19:20.688158 20613 solver.cpp:244]     Train net output #1: loss = 0.160508 (* 1 = 0.160508 loss)\nI1213 17:19:20.779950 20613 sgd_solver.cpp:174] Iteration 57000, lr = 1.71\nI1213 17:19:20.792785 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.34354\nI1213 17:21:38.658361 20613 solver.cpp:337] Iteration 57100, Testing net (#0)\nI1213 17:22:59.408320 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78124\nI1213 17:22:59.408664 20613 solver.cpp:404]     Test net output #1: loss = 0.732015 (* 1 = 0.732015 loss)\nI1213 17:23:00.718232 20613 solver.cpp:228] Iteration 57100, loss = 0.147764\nI1213 17:23:00.718281 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 17:23:00.718305 20613 solver.cpp:244]     Train net output #1: loss = 0.147765 (* 1 = 0.147765 loss)\nI1213 17:23:00.812034 20613 sgd_solver.cpp:174] Iteration 57100, lr = 1.713\nI1213 17:23:00.824782 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3728\nI1213 17:25:18.745333 20613 solver.cpp:337] Iteration 57200, Testing net (#0)\nI1213 17:26:39.489166 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76528\nI1213 17:26:39.489506 20613 solver.cpp:404]     Test net output #1: loss = 0.917029 (* 1 = 0.917029 loss)\nI1213 17:26:40.799307 20613 solver.cpp:228] Iteration 57200, loss = 0.24513\nI1213 17:26:40.799355 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 17:26:40.799382 20613 solver.cpp:244]     Train net output #1: loss = 0.245131 (* 1 = 0.245131 loss)\nI1213 17:26:40.889765 20613 sgd_solver.cpp:174] Iteration 57200, lr = 1.716\nI1213 17:26:40.902524 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.343523\nI1213 17:28:58.866194 20613 solver.cpp:337] Iteration 57300, Testing net (#0)\nI1213 17:30:19.631682 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73824\nI1213 17:30:19.632035 20613 solver.cpp:404]     Test net output #1: loss = 0.942116 (* 1 = 0.942116 loss)\nI1213 17:30:20.941843 20613 solver.cpp:228] Iteration 57300, loss = 0.104081\nI1213 17:30:20.941884 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 17:30:20.941912 20613 solver.cpp:244]     Train net output #1: loss = 0.104081 (* 1 = 0.104081 loss)\nI1213 17:30:21.031579 20613 sgd_solver.cpp:174] Iteration 57300, lr = 1.719\nI1213 17:30:21.044414 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.311\nI1213 17:32:39.033999 20613 solver.cpp:337] Iteration 57400, Testing net (#0)\nI1213 17:33:59.799470 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75284\nI1213 17:33:59.799757 20613 solver.cpp:404]     Test net output #1: loss = 1.01789 (* 1 = 1.01789 loss)\nI1213 17:34:01.109637 20613 solver.cpp:228] Iteration 57400, loss = 0.152255\nI1213 17:34:01.109684 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 17:34:01.109709 20613 solver.cpp:244]     Train net output #1: loss = 0.152255 (* 1 = 0.152255 loss)\nI1213 17:34:01.199735 20613 sgd_solver.cpp:174] Iteration 57400, lr = 1.722\nI1213 17:34:01.212272 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321583\nI1213 17:36:19.128837 20613 solver.cpp:337] Iteration 57500, Testing net (#0)\nI1213 17:37:39.914057 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72452\nI1213 17:37:39.914348 20613 solver.cpp:404]     Test net output #1: loss = 1.19424 (* 1 = 1.19424 loss)\nI1213 17:37:41.224135 20613 solver.cpp:228] Iteration 57500, loss = 0.132186\nI1213 17:37:41.224184 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 17:37:41.224207 20613 solver.cpp:244]     Train net output #1: loss = 0.132186 (* 1 = 0.132186 loss)\nI1213 17:37:41.314205 20613 sgd_solver.cpp:174] Iteration 57500, lr = 1.725\nI1213 17:37:41.326932 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.386177\nI1213 17:39:59.190743 20613 solver.cpp:337] Iteration 57600, Testing net (#0)\nI1213 17:41:19.971493 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7906\nI1213 17:41:19.971843 20613 solver.cpp:404]     Test net output #1: loss = 0.8271 (* 1 = 0.8271 loss)\nI1213 17:41:21.283864 20613 solver.cpp:228] Iteration 57600, loss = 0.169482\nI1213 17:41:21.283897 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 17:41:21.283913 20613 solver.cpp:244]     Train net output #1: loss = 0.169483 (* 1 = 0.169483 loss)\nI1213 17:41:21.372658 20613 sgd_solver.cpp:174] Iteration 57600, lr = 1.728\nI1213 17:41:21.385445 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.304867\nI1213 17:43:39.415151 20613 solver.cpp:337] Iteration 57700, Testing net (#0)\nI1213 17:45:00.165884 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72672\nI1213 17:45:00.166191 20613 solver.cpp:404]     Test net output #1: loss = 1.28585 (* 1 = 1.28585 loss)\nI1213 17:45:01.477388 20613 solver.cpp:228] Iteration 57700, loss = 0.139748\nI1213 17:45:01.477429 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 17:45:01.477445 20613 solver.cpp:244]     Train net output #1: loss = 0.139748 (* 1 = 0.139748 loss)\nI1213 17:45:01.571694 20613 sgd_solver.cpp:174] Iteration 57700, lr = 1.731\nI1213 17:45:01.584487 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.343711\nI1213 17:47:19.694257 20613 solver.cpp:337] Iteration 57800, Testing net (#0)\nI1213 17:48:40.438241 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82488\nI1213 17:48:40.438563 20613 solver.cpp:404]     Test net output #1: loss = 0.633844 (* 1 = 0.633844 loss)\nI1213 17:48:41.749333 20613 solver.cpp:228] Iteration 57800, loss = 0.131343\nI1213 17:48:41.749374 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 17:48:41.749390 20613 solver.cpp:244]     Train net output #1: loss = 0.131344 (* 1 = 0.131344 loss)\nI1213 17:48:41.841768 20613 sgd_solver.cpp:174] Iteration 57800, lr = 1.734\nI1213 17:48:41.854526 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.324612\nI1213 17:50:59.663388 20613 solver.cpp:337] Iteration 57900, Testing net (#0)\nI1213 17:52:20.396607 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81092\nI1213 17:52:20.396968 20613 solver.cpp:404]     Test net output #1: loss = 0.618191 (* 1 = 0.618191 loss)\nI1213 17:52:21.706040 20613 solver.cpp:228] Iteration 57900, loss = 0.185639\nI1213 17:52:21.706082 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 17:52:21.706099 20613 solver.cpp:244]     Train net output #1: loss = 0.18564 (* 1 = 0.18564 loss)\nI1213 17:52:21.796663 20613 sgd_solver.cpp:174] Iteration 57900, lr = 1.737\nI1213 17:52:21.809370 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.346883\nI1213 17:54:39.642861 20613 solver.cpp:337] Iteration 58000, Testing net (#0)\nI1213 17:56:00.381121 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7828\nI1213 17:56:00.381422 20613 solver.cpp:404]     Test net output #1: loss = 0.835381 (* 1 = 0.835381 loss)\nI1213 17:56:01.690671 20613 solver.cpp:228] Iteration 58000, loss = 0.191946\nI1213 17:56:01.690712 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 17:56:01.690728 20613 solver.cpp:244]     Train net output #1: loss = 0.191946 (* 1 = 0.191946 loss)\nI1213 17:56:01.777675 20613 sgd_solver.cpp:174] Iteration 58000, lr = 1.74\nI1213 17:56:01.790437 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.332477\nI1213 17:58:19.638809 20613 solver.cpp:337] Iteration 58100, Testing net (#0)\nI1213 17:59:40.391628 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74256\nI1213 17:59:40.391960 20613 solver.cpp:404]     Test net output #1: loss = 0.873015 (* 1 = 0.873015 loss)\nI1213 17:59:41.700723 20613 solver.cpp:228] Iteration 58100, loss = 0.13719\nI1213 17:59:41.700764 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 17:59:41.700780 20613 solver.cpp:244]     Train net output #1: loss = 0.13719 (* 1 = 0.13719 loss)\nI1213 17:59:41.792526 20613 sgd_solver.cpp:174] Iteration 58100, lr = 1.743\nI1213 17:59:41.805291 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.343136\nI1213 18:01:59.582104 20613 solver.cpp:337] Iteration 58200, Testing net (#0)\nI1213 18:03:20.323251 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80256\nI1213 18:03:20.323592 20613 solver.cpp:404]     Test net output #1: loss = 0.748063 (* 1 = 0.748063 loss)\nI1213 18:03:21.632954 20613 solver.cpp:228] Iteration 58200, loss = 0.13574\nI1213 18:03:21.632999 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 18:03:21.633015 20613 solver.cpp:244]     Train net output #1: loss = 0.135741 (* 1 = 0.135741 loss)\nI1213 18:03:21.723768 20613 sgd_solver.cpp:174] Iteration 58200, lr = 1.746\nI1213 18:03:21.736526 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.345099\nI1213 18:05:39.608820 20613 solver.cpp:337] Iteration 58300, Testing net (#0)\nI1213 18:07:00.349997 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68924\nI1213 18:07:00.350335 20613 solver.cpp:404]     Test net output #1: loss = 1.48432 (* 1 = 1.48432 loss)\nI1213 18:07:01.659898 20613 solver.cpp:228] Iteration 58300, loss = 0.180675\nI1213 18:07:01.659934 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 18:07:01.659950 20613 solver.cpp:244]     Train net output #1: loss = 0.180676 (* 1 = 0.180676 loss)\nI1213 18:07:01.750288 20613 sgd_solver.cpp:174] Iteration 58300, lr = 1.749\nI1213 18:07:01.762974 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.415655\nI1213 18:09:19.540223 20613 solver.cpp:337] Iteration 58400, Testing net (#0)\nI1213 18:10:40.293067 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84152\nI1213 18:10:40.293402 20613 solver.cpp:404]     Test net output #1: loss = 0.506091 (* 1 = 0.506091 loss)\nI1213 18:10:41.602430 20613 solver.cpp:228] Iteration 58400, loss = 0.204197\nI1213 18:10:41.602474 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 18:10:41.602490 20613 solver.cpp:244]     Train net output #1: loss = 0.204198 (* 1 = 0.204198 loss)\nI1213 18:10:41.693806 20613 sgd_solver.cpp:174] Iteration 58400, lr = 1.752\nI1213 18:10:41.706524 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.303674\nI1213 18:12:59.480063 20613 solver.cpp:337] Iteration 58500, Testing net (#0)\nI1213 18:14:20.231292 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78696\nI1213 18:14:20.231597 20613 solver.cpp:404]     Test net output #1: loss = 0.725499 (* 1 = 0.725499 loss)\nI1213 18:14:21.541250 20613 solver.cpp:228] Iteration 58500, loss = 0.177065\nI1213 18:14:21.541293 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 18:14:21.541311 20613 solver.cpp:244]     Train net output #1: loss = 0.177065 (* 1 = 0.177065 loss)\nI1213 18:14:21.633047 20613 sgd_solver.cpp:174] Iteration 58500, lr = 1.755\nI1213 18:14:21.645854 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.32785\nI1213 18:16:39.412086 20613 solver.cpp:337] Iteration 58600, Testing net (#0)\nI1213 18:18:00.105209 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74908\nI1213 18:18:00.105504 20613 solver.cpp:404]     Test net output #1: loss = 0.990725 (* 1 = 0.990725 loss)\nI1213 18:18:01.414459 20613 solver.cpp:228] Iteration 58600, loss = 0.165203\nI1213 18:18:01.414492 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 18:18:01.414508 20613 solver.cpp:244]     Train net output #1: loss = 0.165204 (* 1 = 0.165204 loss)\nI1213 18:18:01.506140 20613 sgd_solver.cpp:174] Iteration 58600, lr = 1.758\nI1213 18:18:01.518909 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.344011\nI1213 18:20:19.354629 20613 solver.cpp:337] Iteration 58700, Testing net (#0)\nI1213 18:21:40.002532 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78372\nI1213 18:21:40.002790 20613 solver.cpp:404]     Test net output #1: loss = 0.785384 (* 1 = 0.785384 loss)\nI1213 18:21:41.311400 20613 solver.cpp:228] Iteration 58700, loss = 0.109246\nI1213 18:21:41.311445 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 18:21:41.311461 20613 solver.cpp:244]     Train net output #1: loss = 0.109247 (* 1 = 0.109247 loss)\nI1213 18:21:41.404273 20613 sgd_solver.cpp:174] Iteration 58700, lr = 1.761\nI1213 18:21:41.417104 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.316836\nI1213 18:23:59.248347 20613 solver.cpp:337] Iteration 58800, Testing net (#0)\nI1213 18:25:19.893807 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7922\nI1213 18:25:19.894116 20613 solver.cpp:404]     Test net output #1: loss = 0.773117 (* 1 = 0.773117 loss)\nI1213 18:25:21.203542 20613 solver.cpp:228] Iteration 58800, loss = 0.168542\nI1213 18:25:21.203586 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 18:25:21.203603 20613 solver.cpp:244]     Train net output #1: loss = 0.168543 (* 1 = 0.168543 loss)\nI1213 18:25:21.290207 20613 sgd_solver.cpp:174] Iteration 58800, lr = 1.764\nI1213 18:25:21.302945 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.324418\nI1213 18:27:39.177331 20613 solver.cpp:337] Iteration 58900, Testing net (#0)\nI1213 18:28:59.823774 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7234\nI1213 18:28:59.824124 20613 solver.cpp:404]     Test net output #1: loss = 1.19927 (* 1 = 1.19927 loss)\nI1213 18:29:01.133715 20613 solver.cpp:228] Iteration 58900, loss = 0.146832\nI1213 18:29:01.133761 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 18:29:01.133779 20613 solver.cpp:244]     Train net output #1: loss = 0.146833 (* 1 = 0.146833 loss)\nI1213 18:29:01.222060 20613 sgd_solver.cpp:174] Iteration 58900, lr = 1.767\nI1213 18:29:01.234732 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.349832\nI1213 18:31:19.153311 20613 solver.cpp:337] Iteration 59000, Testing net (#0)\nI1213 18:32:39.776072 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82464\nI1213 18:32:39.776365 20613 solver.cpp:404]     Test net output #1: loss = 0.674901 (* 1 = 0.674901 loss)\nI1213 18:32:41.085608 20613 solver.cpp:228] Iteration 59000, loss = 0.168952\nI1213 18:32:41.085652 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 18:32:41.085669 20613 solver.cpp:244]     Train net output #1: loss = 0.168952 (* 1 = 0.168952 loss)\nI1213 18:32:41.185472 20613 sgd_solver.cpp:174] Iteration 59000, lr = 1.77\nI1213 18:32:41.198287 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.36077\nI1213 18:34:59.263381 20613 solver.cpp:337] Iteration 59100, Testing net (#0)\nI1213 18:36:19.889057 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82272\nI1213 18:36:19.889313 20613 solver.cpp:404]     Test net output #1: loss = 0.666168 (* 1 = 0.666168 loss)\nI1213 18:36:21.198710 20613 solver.cpp:228] Iteration 59100, loss = 0.213303\nI1213 18:36:21.198753 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 18:36:21.198770 20613 solver.cpp:244]     Train net output #1: loss = 0.213304 (* 1 = 0.213304 loss)\nI1213 18:36:21.296749 20613 sgd_solver.cpp:174] Iteration 59100, lr = 1.773\nI1213 18:36:21.309368 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.332791\nI1213 18:38:39.334441 20613 solver.cpp:337] Iteration 59200, Testing net (#0)\nI1213 18:39:59.955384 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81308\nI1213 18:39:59.955639 20613 solver.cpp:404]     Test net output #1: loss = 0.623215 (* 1 = 0.623215 loss)\nI1213 18:40:01.264300 20613 solver.cpp:228] Iteration 59200, loss = 0.116095\nI1213 18:40:01.264345 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 18:40:01.264364 20613 solver.cpp:244]     Train net output #1: loss = 0.116095 (* 1 = 0.116095 loss)\nI1213 18:40:01.364009 20613 sgd_solver.cpp:174] Iteration 59200, lr = 1.776\nI1213 18:40:01.376720 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305524\nI1213 18:42:19.316303 20613 solver.cpp:337] Iteration 59300, Testing net (#0)\nI1213 18:43:39.953315 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76204\nI1213 18:43:39.953584 20613 solver.cpp:404]     Test net output #1: loss = 0.983083 (* 1 = 0.983083 loss)\nI1213 18:43:41.262260 20613 solver.cpp:228] Iteration 59300, loss = 0.112288\nI1213 18:43:41.262295 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 18:43:41.262311 20613 solver.cpp:244]     Train net output #1: loss = 0.112289 (* 1 = 0.112289 loss)\nI1213 18:43:41.357033 20613 sgd_solver.cpp:174] Iteration 59300, lr = 1.779\nI1213 18:43:41.369755 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.287607\nI1213 18:45:59.323838 20613 solver.cpp:337] Iteration 59400, Testing net (#0)\nI1213 18:47:19.958573 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82116\nI1213 18:47:19.958884 20613 solver.cpp:404]     Test net output #1: loss = 0.557861 (* 1 = 0.557861 loss)\nI1213 18:47:21.268352 20613 solver.cpp:228] Iteration 59400, loss = 0.175304\nI1213 18:47:21.268399 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 18:47:21.268424 20613 solver.cpp:244]     Train net output #1: loss = 0.175305 (* 1 = 0.175305 loss)\nI1213 18:47:21.366286 20613 sgd_solver.cpp:174] Iteration 59400, lr = 1.782\nI1213 18:47:21.379019 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.373418\nI1213 18:49:39.295210 20613 solver.cpp:337] Iteration 59500, Testing net (#0)\nI1213 18:50:59.942188 20613 solver.cpp:404]     Test net output #0: accuracy = 0.715\nI1213 18:50:59.942570 20613 solver.cpp:404]     Test net output #1: loss = 1.03524 (* 1 = 1.03524 loss)\nI1213 18:51:01.252385 20613 solver.cpp:228] Iteration 59500, loss = 0.0997057\nI1213 18:51:01.252434 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 18:51:01.252459 20613 solver.cpp:244]     Train net output #1: loss = 0.0997063 (* 1 = 0.0997063 loss)\nI1213 18:51:01.346920 20613 sgd_solver.cpp:174] Iteration 59500, lr = 1.785\nI1213 18:51:01.359555 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.32549\nI1213 18:53:19.282440 20613 solver.cpp:337] Iteration 59600, Testing net (#0)\nI1213 18:54:40.035593 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65352\nI1213 18:54:40.035898 20613 solver.cpp:404]     Test net output #1: loss = 1.77988 (* 1 = 1.77988 loss)\nI1213 18:54:41.345523 20613 solver.cpp:228] Iteration 59600, loss = 0.201032\nI1213 18:54:41.345567 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 18:54:41.345583 20613 solver.cpp:244]     Train net output #1: loss = 0.201033 (* 1 = 0.201033 loss)\nI1213 18:54:41.441012 20613 sgd_solver.cpp:174] Iteration 59600, lr = 1.788\nI1213 18:54:41.453685 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.367531\nI1213 18:56:59.302721 20613 solver.cpp:337] Iteration 59700, Testing net (#0)\nI1213 18:58:20.056015 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80864\nI1213 18:58:20.056358 20613 solver.cpp:404]     Test net output #1: loss = 0.69594 (* 1 = 0.69594 loss)\nI1213 18:58:21.365896 20613 solver.cpp:228] Iteration 59700, loss = 0.189868\nI1213 18:58:21.365939 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 18:58:21.365957 20613 solver.cpp:244]     Train net output #1: loss = 0.189869 (* 1 = 0.189869 loss)\nI1213 18:58:21.463474 20613 sgd_solver.cpp:174] Iteration 59700, lr = 1.791\nI1213 18:58:21.476218 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.308883\nI1213 19:00:39.396225 20613 solver.cpp:337] Iteration 59800, Testing net (#0)\nI1213 19:02:00.151126 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7774\nI1213 19:02:00.151438 20613 solver.cpp:404]     Test net output #1: loss = 0.904681 (* 1 = 0.904681 loss)\nI1213 19:02:01.460587 20613 solver.cpp:228] Iteration 59800, loss = 0.18486\nI1213 19:02:01.460623 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 19:02:01.460644 20613 solver.cpp:244]     Train net output #1: loss = 0.184861 (* 1 = 0.184861 loss)\nI1213 19:02:01.558681 20613 sgd_solver.cpp:174] Iteration 59800, lr = 1.794\nI1213 19:02:01.571344 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.319885\nI1213 19:04:19.441699 20613 solver.cpp:337] Iteration 59900, Testing net (#0)\nI1213 19:05:40.187515 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73444\nI1213 19:05:40.187855 20613 solver.cpp:404]     Test net output #1: loss = 0.975369 (* 1 = 0.975369 loss)\nI1213 19:05:41.496120 20613 solver.cpp:228] Iteration 59900, loss = 0.236524\nI1213 19:05:41.496165 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 19:05:41.496182 20613 solver.cpp:244]     Train net output #1: loss = 0.236524 (* 1 = 0.236524 loss)\nI1213 19:05:41.589462 20613 sgd_solver.cpp:174] Iteration 59900, lr = 1.797\nI1213 19:05:41.602211 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.360066\nI1213 19:07:59.559770 20613 solver.cpp:337] Iteration 60000, Testing net (#0)\nI1213 19:09:20.333703 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7708\nI1213 19:09:20.333974 20613 solver.cpp:404]     Test net output #1: loss = 0.866967 (* 1 = 0.866967 loss)\nI1213 19:09:21.642228 20613 solver.cpp:228] Iteration 60000, loss = 0.249472\nI1213 19:09:21.642274 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 19:09:21.642297 20613 solver.cpp:244]     Train net output #1: loss = 0.249472 (* 1 = 0.249472 loss)\nI1213 19:09:21.741379 20613 sgd_solver.cpp:174] Iteration 60000, lr = 1.8\nI1213 19:09:21.754101 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.400397\nI1213 19:11:39.617646 20613 solver.cpp:337] Iteration 60100, Testing net (#0)\nI1213 19:13:00.378885 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79128\nI1213 19:13:00.379233 20613 solver.cpp:404]     Test net output #1: loss = 0.730222 (* 1 = 0.730222 loss)\nI1213 19:13:01.687191 20613 solver.cpp:228] Iteration 60100, loss = 0.123716\nI1213 19:13:01.687233 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 19:13:01.687250 20613 solver.cpp:244]     Train net output #1: loss = 0.123716 (* 1 = 0.123716 loss)\nI1213 19:13:01.780351 20613 sgd_solver.cpp:174] Iteration 60100, lr = 1.803\nI1213 19:13:01.793104 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321801\nI1213 19:15:19.605543 20613 solver.cpp:337] Iteration 60200, Testing net (#0)\nI1213 19:16:40.357997 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8168\nI1213 19:16:40.358336 20613 solver.cpp:404]     Test net output #1: loss = 0.679934 (* 1 = 0.679934 loss)\nI1213 19:16:41.667101 20613 solver.cpp:228] Iteration 60200, loss = 0.207307\nI1213 19:16:41.667145 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 19:16:41.667161 20613 solver.cpp:244]     Train net output #1: loss = 0.207308 (* 1 = 0.207308 loss)\nI1213 19:16:41.765856 20613 sgd_solver.cpp:174] Iteration 60200, lr = 1.806\nI1213 19:16:41.778648 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.309472\nI1213 19:18:59.648784 20613 solver.cpp:337] Iteration 60300, Testing net (#0)\nI1213 19:20:20.397140 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65396\nI1213 19:20:20.397461 20613 solver.cpp:404]     Test net output #1: loss = 1.61858 (* 1 = 1.61858 loss)\nI1213 19:20:21.705579 20613 solver.cpp:228] Iteration 60300, loss = 0.19301\nI1213 19:20:21.705622 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 19:20:21.705638 20613 solver.cpp:244]     Train net output #1: loss = 0.193011 (* 1 = 0.193011 loss)\nI1213 19:20:21.797161 20613 sgd_solver.cpp:174] Iteration 60300, lr = 1.809\nI1213 19:20:21.809749 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.322094\nI1213 19:22:39.688148 20613 solver.cpp:337] Iteration 60400, Testing net (#0)\nI1213 19:24:00.429536 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77832\nI1213 19:24:00.429829 20613 solver.cpp:404]     Test net output #1: loss = 0.80765 (* 1 = 0.80765 loss)\nI1213 19:24:01.738670 20613 solver.cpp:228] Iteration 60400, loss = 0.168412\nI1213 19:24:01.738716 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 19:24:01.738732 20613 solver.cpp:244]     Train net output #1: loss = 0.168413 (* 1 = 0.168413 loss)\nI1213 19:24:01.832631 20613 sgd_solver.cpp:174] Iteration 60400, lr = 1.812\nI1213 19:24:01.845440 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.328113\nI1213 19:26:19.767691 20613 solver.cpp:337] Iteration 60500, Testing net (#0)\nI1213 19:27:40.508296 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78832\nI1213 19:27:40.508607 20613 solver.cpp:404]     Test net output #1: loss = 0.78774 (* 1 = 0.78774 loss)\nI1213 19:27:41.817032 20613 solver.cpp:228] Iteration 60500, loss = 0.128198\nI1213 19:27:41.817075 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 19:27:41.817091 20613 solver.cpp:244]     Train net output #1: loss = 0.128199 (* 1 = 0.128199 loss)\nI1213 19:27:41.912025 20613 sgd_solver.cpp:174] Iteration 60500, lr = 1.815\nI1213 19:27:41.924813 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.342304\nI1213 19:29:59.852424 20613 solver.cpp:337] Iteration 60600, Testing net (#0)\nI1213 19:31:20.595592 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81288\nI1213 19:31:20.595896 20613 solver.cpp:404]     Test net output #1: loss = 0.667823 (* 1 = 0.667823 loss)\nI1213 19:31:21.904932 20613 solver.cpp:228] Iteration 60600, loss = 0.159411\nI1213 19:31:21.904974 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 19:31:21.904991 20613 solver.cpp:244]     Train net output #1: loss = 0.159412 (* 1 = 0.159412 loss)\nI1213 19:31:22.000147 20613 sgd_solver.cpp:174] Iteration 60600, lr = 1.818\nI1213 19:31:22.012946 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.38463\nI1213 19:33:39.882601 20613 solver.cpp:337] Iteration 60700, Testing net (#0)\nI1213 19:35:00.646200 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7772\nI1213 19:35:00.646522 20613 solver.cpp:404]     Test net output #1: loss = 0.914847 (* 1 = 0.914847 loss)\nI1213 19:35:01.955166 20613 solver.cpp:228] Iteration 60700, loss = 0.174353\nI1213 19:35:01.955200 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 19:35:01.955216 20613 solver.cpp:244]     Train net output #1: loss = 0.174354 (* 1 = 0.174354 loss)\nI1213 19:35:02.055554 20613 sgd_solver.cpp:174] Iteration 60700, lr = 1.821\nI1213 19:35:02.068390 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.299006\nI1213 19:37:19.991665 20613 solver.cpp:337] Iteration 60800, Testing net (#0)\nI1213 19:38:40.756259 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83364\nI1213 19:38:40.756603 20613 solver.cpp:404]     Test net output #1: loss = 0.556859 (* 1 = 0.556859 loss)\nI1213 19:38:42.064890 20613 solver.cpp:228] Iteration 60800, loss = 0.145758\nI1213 19:38:42.064927 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 19:38:42.064944 20613 solver.cpp:244]     Train net output #1: loss = 0.145758 (* 1 = 0.145758 loss)\nI1213 19:38:42.164417 20613 sgd_solver.cpp:174] Iteration 60800, lr = 1.824\nI1213 19:38:42.177191 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.31644\nI1213 19:41:00.064363 20613 solver.cpp:337] Iteration 60900, Testing net (#0)\nI1213 19:42:20.827735 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71064\nI1213 19:42:20.828076 20613 solver.cpp:404]     Test net output #1: loss = 1.02221 (* 1 = 1.02221 loss)\nI1213 19:42:22.136687 20613 solver.cpp:228] Iteration 60900, loss = 0.127124\nI1213 19:42:22.136734 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 19:42:22.136751 20613 solver.cpp:244]     Train net output #1: loss = 0.127125 (* 1 = 0.127125 loss)\nI1213 19:42:22.234535 20613 sgd_solver.cpp:174] Iteration 60900, lr = 1.827\nI1213 19:42:22.247242 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3321\nI1213 19:44:40.217141 20613 solver.cpp:337] Iteration 61000, Testing net (#0)\nI1213 19:46:00.964231 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7688\nI1213 19:46:00.964512 20613 solver.cpp:404]     Test net output #1: loss = 0.932766 (* 1 = 0.932766 loss)\nI1213 19:46:02.272963 20613 solver.cpp:228] Iteration 61000, loss = 0.177966\nI1213 19:46:02.273006 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 19:46:02.273023 20613 solver.cpp:244]     Train net output #1: loss = 0.177967 (* 1 = 0.177967 loss)\nI1213 19:46:02.367645 20613 sgd_solver.cpp:174] Iteration 61000, lr = 1.83\nI1213 19:46:02.380414 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347362\nI1213 19:48:20.195178 20613 solver.cpp:337] Iteration 61100, Testing net (#0)\nI1213 19:49:40.942266 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80292\nI1213 19:49:40.942603 20613 solver.cpp:404]     Test net output #1: loss = 0.68036 (* 1 = 0.68036 loss)\nI1213 19:49:42.250808 20613 solver.cpp:228] Iteration 61100, loss = 0.107423\nI1213 19:49:42.250852 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 19:49:42.250869 20613 solver.cpp:244]     Train net output #1: loss = 0.107424 (* 1 = 0.107424 loss)\nI1213 19:49:42.348429 20613 sgd_solver.cpp:174] Iteration 61100, lr = 1.833\nI1213 19:49:42.361163 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.336523\nI1213 19:52:00.204917 20613 solver.cpp:337] Iteration 61200, Testing net (#0)\nI1213 19:53:20.943266 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76304\nI1213 19:53:20.943629 20613 solver.cpp:404]     Test net output #1: loss = 0.818537 (* 1 = 0.818537 loss)\nI1213 19:53:22.251632 20613 solver.cpp:228] Iteration 61200, loss = 0.149476\nI1213 19:53:22.251677 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 19:53:22.251695 20613 solver.cpp:244]     Train net output #1: loss = 0.149477 (* 1 = 0.149477 loss)\nI1213 19:53:22.344353 20613 sgd_solver.cpp:174] Iteration 61200, lr = 1.836\nI1213 19:53:22.357228 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.293099\nI1213 19:55:40.162540 20613 solver.cpp:337] Iteration 61300, Testing net (#0)\nI1213 19:57:00.897020 20613 solver.cpp:404]     Test net output #0: accuracy = 0.781\nI1213 19:57:00.897342 20613 solver.cpp:404]     Test net output #1: loss = 0.760845 (* 1 = 0.760845 loss)\nI1213 19:57:02.205655 20613 solver.cpp:228] Iteration 61300, loss = 0.239611\nI1213 19:57:02.205698 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1213 19:57:02.205715 20613 solver.cpp:244]     Train net output #1: loss = 0.239612 (* 1 = 0.239612 loss)\nI1213 19:57:02.300233 20613 sgd_solver.cpp:174] Iteration 61300, lr = 1.839\nI1213 19:57:02.313078 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.318948\nI1213 19:59:20.105849 20613 solver.cpp:337] Iteration 61400, Testing net (#0)\nI1213 20:00:40.884299 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70704\nI1213 20:00:40.884629 20613 solver.cpp:404]     Test net output #1: loss = 1.10673 (* 1 = 1.10673 loss)\nI1213 20:00:42.193073 20613 solver.cpp:228] Iteration 61400, loss = 0.150109\nI1213 20:00:42.193117 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 20:00:42.193135 20613 solver.cpp:244]     Train net output #1: loss = 0.150109 (* 1 = 0.150109 loss)\nI1213 20:00:42.288323 20613 sgd_solver.cpp:174] Iteration 61400, lr = 1.842\nI1213 20:00:42.301106 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.320072\nI1213 20:03:00.163852 20613 solver.cpp:337] Iteration 61500, Testing net (#0)\nI1213 20:04:20.950273 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80156\nI1213 20:04:20.950569 20613 solver.cpp:404]     Test net output #1: loss = 0.705782 (* 1 = 0.705782 loss)\nI1213 20:04:22.258937 20613 solver.cpp:228] Iteration 61500, loss = 0.149028\nI1213 20:04:22.258982 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 20:04:22.258998 20613 solver.cpp:244]     Train net output #1: loss = 0.149029 (* 1 = 0.149029 loss)\nI1213 20:04:22.350589 20613 sgd_solver.cpp:174] Iteration 61500, lr = 1.845\nI1213 20:04:22.363389 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.346498\nI1213 20:06:40.233397 20613 solver.cpp:337] Iteration 61600, Testing net (#0)\nI1213 20:08:00.996981 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70032\nI1213 20:08:00.997275 20613 solver.cpp:404]     Test net output #1: loss = 1.29892 (* 1 = 1.29892 loss)\nI1213 20:08:02.307533 20613 solver.cpp:228] Iteration 61600, loss = 0.19023\nI1213 20:08:02.307569 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 20:08:02.307585 20613 solver.cpp:244]     Train net output #1: loss = 0.190231 (* 1 = 0.190231 loss)\nI1213 20:08:02.403111 20613 sgd_solver.cpp:174] Iteration 61600, lr = 1.848\nI1213 20:08:02.415916 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.285065\nI1213 20:10:20.429749 20613 solver.cpp:337] Iteration 61700, Testing net (#0)\nI1213 20:11:41.181884 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81464\nI1213 20:11:41.182184 20613 solver.cpp:404]     Test net output #1: loss = 0.57878 (* 1 = 0.57878 loss)\nI1213 20:11:42.492405 20613 solver.cpp:228] Iteration 61700, loss = 0.154265\nI1213 20:11:42.492450 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 20:11:42.492467 20613 solver.cpp:244]     Train net output #1: loss = 0.154266 (* 1 = 0.154266 loss)\nI1213 20:11:42.586617 20613 sgd_solver.cpp:174] Iteration 61700, lr = 1.851\nI1213 20:11:42.599319 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.326159\nI1213 20:14:00.640132 20613 solver.cpp:337] Iteration 61800, Testing net (#0)\nI1213 20:15:21.402225 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70656\nI1213 20:15:21.402566 20613 solver.cpp:404]     Test net output #1: loss = 1.30156 (* 1 = 1.30156 loss)\nI1213 20:15:22.713603 20613 solver.cpp:228] Iteration 61800, loss = 0.169953\nI1213 20:15:22.713645 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 20:15:22.713662 20613 solver.cpp:244]     Train net output #1: loss = 0.169954 (* 1 = 0.169954 loss)\nI1213 20:15:22.802971 20613 sgd_solver.cpp:174] Iteration 61800, lr = 1.854\nI1213 20:15:22.815709 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.331799\nI1213 20:17:40.771280 20613 solver.cpp:337] Iteration 61900, Testing net (#0)\nI1213 20:19:01.525701 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77328\nI1213 20:19:01.526013 20613 solver.cpp:404]     Test net output #1: loss = 0.908332 (* 1 = 0.908332 loss)\nI1213 20:19:02.836844 20613 solver.cpp:228] Iteration 61900, loss = 0.211196\nI1213 20:19:02.836890 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 20:19:02.836907 20613 solver.cpp:244]     Train net output #1: loss = 0.211196 (* 1 = 0.211196 loss)\nI1213 20:19:02.933598 20613 sgd_solver.cpp:174] Iteration 61900, lr = 1.857\nI1213 20:19:02.946333 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.332408\nI1213 20:21:20.977649 20613 solver.cpp:337] Iteration 62000, Testing net (#0)\nI1213 20:22:41.733106 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81424\nI1213 20:22:41.733422 20613 solver.cpp:404]     Test net output #1: loss = 0.624401 (* 1 = 0.624401 loss)\nI1213 20:22:43.044364 20613 solver.cpp:228] Iteration 62000, loss = 0.217731\nI1213 20:22:43.044401 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 20:22:43.044417 20613 solver.cpp:244]     Train net output #1: loss = 0.217731 (* 1 = 0.217731 loss)\nI1213 20:22:43.141809 20613 sgd_solver.cpp:174] Iteration 62000, lr = 1.86\nI1213 20:22:43.154526 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.313635\nI1213 20:25:01.106884 20613 solver.cpp:337] Iteration 62100, Testing net (#0)\nI1213 20:26:21.863471 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78776\nI1213 20:26:21.863741 20613 solver.cpp:404]     Test net output #1: loss = 0.695241 (* 1 = 0.695241 loss)\nI1213 20:26:23.173820 20613 solver.cpp:228] Iteration 62100, loss = 0.185879\nI1213 20:26:23.173866 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 20:26:23.173882 20613 solver.cpp:244]     Train net output #1: loss = 0.18588 (* 1 = 0.18588 loss)\nI1213 20:26:23.268504 20613 sgd_solver.cpp:174] Iteration 62100, lr = 1.863\nI1213 20:26:23.281189 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.349784\nI1213 20:28:41.242280 20613 solver.cpp:337] Iteration 62200, Testing net (#0)\nI1213 20:30:01.997197 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74588\nI1213 20:30:01.997498 20613 solver.cpp:404]     Test net output #1: loss = 1.03314 (* 1 = 1.03314 loss)\nI1213 20:30:03.308485 20613 solver.cpp:228] Iteration 62200, loss = 0.134798\nI1213 20:30:03.308531 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 20:30:03.308547 20613 solver.cpp:244]     Train net output #1: loss = 0.134798 (* 1 = 0.134798 loss)\nI1213 20:30:03.399752 20613 sgd_solver.cpp:174] Iteration 62200, lr = 1.866\nI1213 20:30:03.412514 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.308629\nI1213 20:32:21.420650 20613 solver.cpp:337] Iteration 62300, Testing net (#0)\nI1213 20:33:42.177536 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77292\nI1213 20:33:42.177800 20613 solver.cpp:404]     Test net output #1: loss = 0.868165 (* 1 = 0.868165 loss)\nI1213 20:33:43.487920 20613 solver.cpp:228] Iteration 62300, loss = 0.174899\nI1213 20:33:43.487957 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 20:33:43.487972 20613 solver.cpp:244]     Train net output #1: loss = 0.1749 (* 1 = 0.1749 loss)\nI1213 20:33:43.584005 20613 sgd_solver.cpp:174] Iteration 62300, lr = 1.869\nI1213 20:33:43.596720 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.267845\nI1213 20:36:01.673468 20613 solver.cpp:337] Iteration 62400, Testing net (#0)\nI1213 20:37:22.331161 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77352\nI1213 20:37:22.331511 20613 solver.cpp:404]     Test net output #1: loss = 0.813524 (* 1 = 0.813524 loss)\nI1213 20:37:23.642760 20613 solver.cpp:228] Iteration 62400, loss = 0.173664\nI1213 20:37:23.642805 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 20:37:23.642822 20613 solver.cpp:244]     Train net output #1: loss = 0.173664 (* 1 = 0.173664 loss)\nI1213 20:37:23.733145 20613 sgd_solver.cpp:174] Iteration 62400, lr = 1.872\nI1213 20:37:23.745952 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.304612\nI1213 20:39:41.826220 20613 solver.cpp:337] Iteration 62500, Testing net (#0)\nI1213 20:41:02.468214 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8064\nI1213 20:41:02.468513 20613 solver.cpp:404]     Test net output #1: loss = 0.697381 (* 1 = 0.697381 loss)\nI1213 20:41:03.780115 20613 solver.cpp:228] Iteration 62500, loss = 0.227808\nI1213 20:41:03.780160 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 20:41:03.780177 20613 solver.cpp:244]     Train net output #1: loss = 0.227808 (* 1 = 0.227808 loss)\nI1213 20:41:03.872555 20613 sgd_solver.cpp:174] Iteration 62500, lr = 1.875\nI1213 20:41:03.885290 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.396211\nI1213 20:43:22.026652 20613 solver.cpp:337] Iteration 62600, Testing net (#0)\nI1213 20:44:42.664415 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75716\nI1213 20:44:42.664695 20613 solver.cpp:404]     Test net output #1: loss = 0.938116 (* 1 = 0.938116 loss)\nI1213 20:44:43.975625 20613 solver.cpp:228] Iteration 62600, loss = 0.129997\nI1213 20:44:43.975667 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 20:44:43.975684 20613 solver.cpp:244]     Train net output #1: loss = 0.129998 (* 1 = 0.129998 loss)\nI1213 20:44:44.070729 20613 sgd_solver.cpp:174] Iteration 62600, lr = 1.878\nI1213 20:44:44.083549 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.337882\nI1213 20:47:02.197145 20613 solver.cpp:337] Iteration 62700, Testing net (#0)\nI1213 20:48:22.847450 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8246\nI1213 20:48:22.847728 20613 solver.cpp:404]     Test net output #1: loss = 0.610058 (* 1 = 0.610058 loss)\nI1213 20:48:24.159231 20613 solver.cpp:228] Iteration 62700, loss = 0.106523\nI1213 20:48:24.159277 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 20:48:24.159294 20613 solver.cpp:244]     Train net output #1: loss = 0.106523 (* 1 = 0.106523 loss)\nI1213 20:48:24.250893 20613 sgd_solver.cpp:174] Iteration 62700, lr = 1.881\nI1213 20:48:24.263604 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.284005\nI1213 20:50:42.238850 20613 solver.cpp:337] Iteration 62800, Testing net (#0)\nI1213 20:52:02.885083 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78784\nI1213 20:52:02.885422 20613 solver.cpp:404]     Test net output #1: loss = 0.770626 (* 1 = 0.770626 loss)\nI1213 20:52:04.197839 20613 solver.cpp:228] Iteration 62800, loss = 0.142006\nI1213 20:52:04.197882 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 20:52:04.197901 20613 solver.cpp:244]     Train net output #1: loss = 0.142007 (* 1 = 0.142007 loss)\nI1213 20:52:04.290238 20613 sgd_solver.cpp:174] Iteration 62800, lr = 1.884\nI1213 20:52:04.302913 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.328454\nI1213 20:54:22.311729 20613 solver.cpp:337] Iteration 62900, Testing net (#0)\nI1213 20:55:42.959492 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74916\nI1213 20:55:42.959842 20613 solver.cpp:404]     Test net output #1: loss = 0.911189 (* 1 = 0.911189 loss)\nI1213 20:55:44.270267 20613 solver.cpp:228] Iteration 62900, loss = 0.142708\nI1213 20:55:44.270301 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 20:55:44.270316 20613 solver.cpp:244]     Train net output #1: loss = 0.142708 (* 1 = 0.142708 loss)\nI1213 20:55:44.362529 20613 sgd_solver.cpp:174] Iteration 62900, lr = 1.887\nI1213 20:55:44.375310 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.330997\nI1213 20:58:02.594156 20613 solver.cpp:337] Iteration 63000, Testing net (#0)\nI1213 20:59:24.420614 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75484\nI1213 20:59:24.421015 20613 solver.cpp:404]     Test net output #1: loss = 1.05448 (* 1 = 1.05448 loss)\nI1213 20:59:25.734640 20613 solver.cpp:228] Iteration 63000, loss = 0.139269\nI1213 20:59:25.734701 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 20:59:25.734726 20613 solver.cpp:244]     Train net output #1: loss = 0.13927 (* 1 = 0.13927 loss)\nI1213 20:59:25.830513 20613 sgd_solver.cpp:174] Iteration 63000, lr = 1.89\nI1213 20:59:25.844377 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.337327\nI1213 21:01:44.131204 20613 solver.cpp:337] Iteration 63100, Testing net (#0)\nI1213 21:03:05.934726 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8072\nI1213 21:03:05.935142 20613 solver.cpp:404]     Test net output #1: loss = 0.653231 (* 1 = 0.653231 loss)\nI1213 21:03:07.248950 20613 solver.cpp:228] Iteration 63100, loss = 0.10578\nI1213 21:03:07.249008 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 21:03:07.249037 20613 solver.cpp:244]     Train net output #1: loss = 0.105781 (* 1 = 0.105781 loss)\nI1213 21:03:07.334997 20613 sgd_solver.cpp:174] Iteration 63100, lr = 1.893\nI1213 21:03:07.348774 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.319429\nI1213 21:05:25.575032 20613 solver.cpp:337] Iteration 63200, Testing net (#0)\nI1213 21:06:47.381820 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81092\nI1213 21:06:47.382243 20613 solver.cpp:404]     Test net output #1: loss = 0.740904 (* 1 = 0.740904 loss)\nI1213 21:06:48.695847 20613 solver.cpp:228] Iteration 63200, loss = 0.172714\nI1213 21:06:48.695904 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 21:06:48.695929 20613 solver.cpp:244]     Train net output #1: loss = 0.172714 (* 1 = 0.172714 loss)\nI1213 21:06:48.787922 20613 sgd_solver.cpp:174] Iteration 63200, lr = 1.896\nI1213 21:06:48.801731 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.379444\nI1213 21:09:07.002918 20613 solver.cpp:337] Iteration 63300, Testing net (#0)\nI1213 21:10:28.827162 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79464\nI1213 21:10:28.827571 20613 solver.cpp:404]     Test net output #1: loss = 0.664582 (* 1 = 0.664582 loss)\nI1213 21:10:30.142952 20613 solver.cpp:228] Iteration 63300, loss = 0.110217\nI1213 21:10:30.143007 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 21:10:30.143024 20613 solver.cpp:244]     Train net output #1: loss = 0.110218 (* 1 = 0.110218 loss)\nI1213 21:10:30.232450 20613 sgd_solver.cpp:174] Iteration 63300, lr = 1.899\nI1213 21:10:30.246248 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.291019\nI1213 21:12:48.514286 20613 solver.cpp:337] Iteration 63400, Testing net (#0)\nI1213 21:14:10.311159 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70432\nI1213 21:14:10.311552 20613 solver.cpp:404]     Test net output #1: loss = 1.22647 (* 1 = 1.22647 loss)\nI1213 21:14:11.624101 20613 solver.cpp:228] Iteration 63400, loss = 0.222787\nI1213 21:14:11.624158 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 21:14:11.624176 20613 solver.cpp:244]     Train net output #1: loss = 0.222788 (* 1 = 0.222788 loss)\nI1213 21:14:11.716327 20613 sgd_solver.cpp:174] Iteration 63400, lr = 1.902\nI1213 21:14:11.730199 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.315792\nI1213 21:16:29.992130 20613 solver.cpp:337] Iteration 63500, Testing net (#0)\nI1213 21:17:51.785121 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81292\nI1213 21:17:51.785547 20613 solver.cpp:404]     Test net output #1: loss = 0.661804 (* 1 = 0.661804 loss)\nI1213 21:17:53.098289 20613 solver.cpp:228] Iteration 63500, loss = 0.203893\nI1213 21:17:53.098340 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 21:17:53.098357 20613 solver.cpp:244]     Train net output #1: loss = 0.203893 (* 1 = 0.203893 loss)\nI1213 21:17:53.186748 20613 sgd_solver.cpp:174] Iteration 63500, lr = 1.905\nI1213 21:17:53.200562 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.345019\nI1213 21:20:11.438141 20613 solver.cpp:337] Iteration 63600, Testing net (#0)\nI1213 21:21:33.216467 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71528\nI1213 21:21:33.216899 20613 solver.cpp:404]     Test net output #1: loss = 1.24827 (* 1 = 1.24827 loss)\nI1213 21:21:34.529453 20613 solver.cpp:228] Iteration 63600, loss = 0.141136\nI1213 21:21:34.529506 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 21:21:34.529523 20613 solver.cpp:244]     Train net output #1: loss = 0.141136 (* 1 = 0.141136 loss)\nI1213 21:21:34.620120 20613 sgd_solver.cpp:174] Iteration 63600, lr = 1.908\nI1213 21:21:34.633929 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.291851\nI1213 21:23:52.910603 20613 solver.cpp:337] Iteration 63700, Testing net (#0)\nI1213 21:25:14.676539 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76476\nI1213 21:25:14.676826 20613 solver.cpp:404]     Test net output #1: loss = 0.86736 (* 1 = 0.86736 loss)\nI1213 21:25:15.989634 20613 solver.cpp:228] Iteration 63700, loss = 0.187106\nI1213 21:25:15.989691 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 21:25:15.989707 20613 solver.cpp:244]     Train net output #1: loss = 0.187107 (* 1 = 0.187107 loss)\nI1213 21:25:16.081419 20613 sgd_solver.cpp:174] Iteration 63700, lr = 1.911\nI1213 21:25:16.095235 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.298576\nI1213 21:27:34.380834 20613 solver.cpp:337] Iteration 63800, Testing net (#0)\nI1213 21:28:56.134675 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80188\nI1213 21:28:56.134990 20613 solver.cpp:404]     Test net output #1: loss = 0.677019 (* 1 = 0.677019 loss)\nI1213 21:28:57.447749 20613 solver.cpp:228] Iteration 63800, loss = 0.234501\nI1213 21:28:57.447805 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 21:28:57.447824 20613 solver.cpp:244]     Train net output #1: loss = 0.234502 (* 1 = 0.234502 loss)\nI1213 21:28:57.536878 20613 sgd_solver.cpp:174] Iteration 63800, lr = 1.914\nI1213 21:28:57.550678 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.314406\nI1213 21:31:15.784224 20613 solver.cpp:337] Iteration 63900, Testing net (#0)\nI1213 21:32:37.463956 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74792\nI1213 21:32:37.464293 20613 solver.cpp:404]     Test net output #1: loss = 0.992546 (* 1 = 0.992546 loss)\nI1213 21:32:38.776688 20613 solver.cpp:228] Iteration 63900, loss = 0.198583\nI1213 21:32:38.776738 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 21:32:38.776756 20613 solver.cpp:244]     Train net output #1: loss = 0.198584 (* 1 = 0.198584 loss)\nI1213 21:32:38.870489 20613 sgd_solver.cpp:174] Iteration 63900, lr = 1.917\nI1213 21:32:38.884318 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.322762\nI1213 21:34:57.185907 20613 solver.cpp:337] Iteration 64000, Testing net (#0)\nI1213 21:36:18.876896 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83632\nI1213 21:36:18.877182 20613 solver.cpp:404]     Test net output #1: loss = 0.590967 (* 1 = 0.590967 loss)\nI1213 21:36:20.189647 20613 solver.cpp:228] Iteration 64000, loss = 0.155558\nI1213 21:36:20.189698 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 21:36:20.189714 20613 solver.cpp:244]     Train net output #1: loss = 0.155559 (* 1 = 0.155559 loss)\nI1213 21:36:20.283028 20613 sgd_solver.cpp:174] Iteration 64000, lr = 1.92\nI1213 21:36:20.296865 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347472\nI1213 21:38:38.530496 20613 solver.cpp:337] Iteration 64100, Testing net (#0)\nI1213 21:40:00.249459 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73156\nI1213 21:40:00.249764 20613 solver.cpp:404]     Test net output #1: loss = 0.982504 (* 1 = 0.982504 loss)\nI1213 21:40:01.562340 20613 solver.cpp:228] Iteration 64100, loss = 0.114742\nI1213 21:40:01.562397 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 21:40:01.562414 20613 solver.cpp:244]     Train net output #1: loss = 0.114742 (* 1 = 0.114742 loss)\nI1213 21:40:01.650763 20613 sgd_solver.cpp:174] Iteration 64100, lr = 1.923\nI1213 21:40:01.665009 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.267437\nI1213 21:42:19.998644 20613 solver.cpp:337] Iteration 64200, Testing net (#0)\nI1213 21:43:41.742434 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76956\nI1213 21:43:41.742828 20613 solver.cpp:404]     Test net output #1: loss = 0.897326 (* 1 = 0.897326 loss)\nI1213 21:43:43.055142 20613 solver.cpp:228] Iteration 64200, loss = 0.125105\nI1213 21:43:43.055191 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1213 21:43:43.055207 20613 solver.cpp:244]     Train net output #1: loss = 0.125105 (* 1 = 0.125105 loss)\nI1213 21:43:43.148883 20613 sgd_solver.cpp:174] Iteration 64200, lr = 1.926\nI1213 21:43:43.162675 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.313718\nI1213 21:46:01.419868 20613 solver.cpp:337] Iteration 64300, Testing net (#0)\nI1213 21:47:23.174098 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76132\nI1213 21:47:23.174394 20613 solver.cpp:404]     Test net output #1: loss = 0.925497 (* 1 = 0.925497 loss)\nI1213 21:47:24.487216 20613 solver.cpp:228] Iteration 64300, loss = 0.128523\nI1213 21:47:24.487272 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 21:47:24.487289 20613 solver.cpp:244]     Train net output #1: loss = 0.128523 (* 1 = 0.128523 loss)\nI1213 21:47:24.579293 20613 sgd_solver.cpp:174] Iteration 64300, lr = 1.929\nI1213 21:47:24.592857 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.33616\nI1213 21:49:42.748016 20613 solver.cpp:337] Iteration 64400, Testing net (#0)\nI1213 21:51:04.490655 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73204\nI1213 21:51:04.490949 20613 solver.cpp:404]     Test net output #1: loss = 0.960563 (* 1 = 0.960563 loss)\nI1213 21:51:05.803951 20613 solver.cpp:228] Iteration 64400, loss = 0.163714\nI1213 21:51:05.804003 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 21:51:05.804019 20613 solver.cpp:244]     Train net output #1: loss = 0.163714 (* 1 = 0.163714 loss)\nI1213 21:51:05.892803 20613 sgd_solver.cpp:174] Iteration 64400, lr = 1.932\nI1213 21:51:05.906677 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.35227\nI1213 21:53:24.097115 20613 solver.cpp:337] Iteration 64500, Testing net (#0)\nI1213 21:54:45.836274 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65044\nI1213 21:54:45.836562 20613 solver.cpp:404]     Test net output #1: loss = 1.82854 (* 1 = 1.82854 loss)\nI1213 21:54:47.149523 20613 solver.cpp:228] Iteration 64500, loss = 0.216485\nI1213 21:54:47.149576 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 21:54:47.149595 20613 solver.cpp:244]     Train net output #1: loss = 0.216486 (* 1 = 0.216486 loss)\nI1213 21:54:47.241469 20613 sgd_solver.cpp:174] Iteration 64500, lr = 1.935\nI1213 21:54:47.255170 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.349212\nI1213 21:57:05.602604 20613 solver.cpp:337] Iteration 64600, Testing net (#0)\nI1213 21:58:27.336448 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79624\nI1213 21:58:27.336773 20613 solver.cpp:404]     Test net output #1: loss = 0.720702 (* 1 = 0.720702 loss)\nI1213 21:58:28.649435 20613 solver.cpp:228] Iteration 64600, loss = 0.172426\nI1213 21:58:28.649487 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 21:58:28.649504 20613 solver.cpp:244]     Train net output #1: loss = 0.172427 (* 1 = 0.172427 loss)\nI1213 21:58:28.739482 20613 sgd_solver.cpp:174] Iteration 64600, lr = 1.938\nI1213 21:58:28.753360 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.276129\nI1213 22:00:47.032819 20613 solver.cpp:337] Iteration 64700, Testing net (#0)\nI1213 22:02:08.765348 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70424\nI1213 22:02:08.765705 20613 solver.cpp:404]     Test net output #1: loss = 1.37591 (* 1 = 1.37591 loss)\nI1213 22:02:10.078166 20613 solver.cpp:228] Iteration 64700, loss = 0.239701\nI1213 22:02:10.078217 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 22:02:10.078234 20613 solver.cpp:244]     Train net output #1: loss = 0.239701 (* 1 = 0.239701 loss)\nI1213 22:02:10.167768 20613 sgd_solver.cpp:174] Iteration 64700, lr = 1.941\nI1213 22:02:10.181581 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.313014\nI1213 22:04:28.488572 20613 solver.cpp:337] Iteration 64800, Testing net (#0)\nI1213 22:05:50.175936 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73852\nI1213 22:05:50.176264 20613 solver.cpp:404]     Test net output #1: loss = 0.974029 (* 1 = 0.974029 loss)\nI1213 22:05:51.488742 20613 solver.cpp:228] Iteration 64800, loss = 0.200609\nI1213 22:05:51.488793 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1213 22:05:51.488811 20613 solver.cpp:244]     Train net output #1: loss = 0.200609 (* 1 = 0.200609 loss)\nI1213 22:05:51.580303 20613 sgd_solver.cpp:174] Iteration 64800, lr = 1.944\nI1213 22:05:51.594106 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.335375\nI1213 22:08:09.884493 20613 solver.cpp:337] Iteration 64900, Testing net (#0)\nI1213 22:09:31.593282 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7686\nI1213 22:09:31.593575 20613 solver.cpp:404]     Test net output #1: loss = 0.83402 (* 1 = 0.83402 loss)\nI1213 22:09:32.905794 20613 solver.cpp:228] Iteration 64900, loss = 0.152143\nI1213 22:09:32.905845 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 22:09:32.905866 20613 solver.cpp:244]     Train net output #1: loss = 0.152144 (* 1 = 0.152144 loss)\nI1213 22:09:32.997750 20613 sgd_solver.cpp:174] Iteration 64900, lr = 1.947\nI1213 22:09:33.011606 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.275002\nI1213 22:11:51.295311 20613 solver.cpp:337] Iteration 65000, Testing net (#0)\nI1213 22:13:13.032704 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72184\nI1213 22:13:13.033097 20613 solver.cpp:404]     Test net output #1: loss = 1.03183 (* 1 = 1.03183 loss)\nI1213 22:13:14.345520 20613 solver.cpp:228] Iteration 65000, loss = 0.209228\nI1213 22:13:14.345579 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 22:13:14.345597 20613 solver.cpp:244]     Train net output #1: loss = 0.209228 (* 1 = 0.209228 loss)\nI1213 22:13:14.434957 20613 sgd_solver.cpp:174] Iteration 65000, lr = 1.95\nI1213 22:13:14.448776 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.345394\nI1213 22:15:32.702167 20613 solver.cpp:337] Iteration 65100, Testing net (#0)\nI1213 22:16:54.447348 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7942\nI1213 22:16:54.447660 20613 solver.cpp:404]     Test net output #1: loss = 0.68376 (* 1 = 0.68376 loss)\nI1213 22:16:55.761104 20613 solver.cpp:228] Iteration 65100, loss = 0.21764\nI1213 22:16:55.761164 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 22:16:55.761181 20613 solver.cpp:244]     Train net output #1: loss = 0.21764 (* 1 = 0.21764 loss)\nI1213 22:16:55.850915 20613 sgd_solver.cpp:174] Iteration 65100, lr = 1.953\nI1213 22:16:55.864797 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.320009\nI1213 22:19:14.047812 20613 solver.cpp:337] Iteration 65200, Testing net (#0)\nI1213 22:20:35.753921 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82912\nI1213 22:20:35.754227 20613 solver.cpp:404]     Test net output #1: loss = 0.622101 (* 1 = 0.622101 loss)\nI1213 22:20:37.067100 20613 solver.cpp:228] Iteration 65200, loss = 0.162822\nI1213 22:20:37.067152 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 22:20:37.067168 20613 solver.cpp:244]     Train net output #1: loss = 0.162823 (* 1 = 0.162823 loss)\nI1213 22:20:37.154922 20613 sgd_solver.cpp:174] Iteration 65200, lr = 1.956\nI1213 22:20:37.168812 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.293627\nI1213 22:22:55.392740 20613 solver.cpp:337] Iteration 65300, Testing net (#0)\nI1213 22:24:17.116462 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69224\nI1213 22:24:17.116750 20613 solver.cpp:404]     Test net output #1: loss = 1.2687 (* 1 = 1.2687 loss)\nI1213 22:24:18.429921 20613 solver.cpp:228] Iteration 65300, loss = 0.264573\nI1213 22:24:18.429980 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 22:24:18.429998 20613 solver.cpp:244]     Train net output #1: loss = 0.264573 (* 1 = 0.264573 loss)\nI1213 22:24:18.519806 20613 sgd_solver.cpp:174] Iteration 65300, lr = 1.959\nI1213 22:24:18.533560 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.316685\nI1213 22:26:36.811254 20613 solver.cpp:337] Iteration 65400, Testing net (#0)\nI1213 22:27:58.496147 20613 solver.cpp:404]     Test net output #0: accuracy = 0.781\nI1213 22:27:58.496469 20613 solver.cpp:404]     Test net output #1: loss = 0.717332 (* 1 = 0.717332 loss)\nI1213 22:27:59.809831 20613 solver.cpp:228] Iteration 65400, loss = 0.116191\nI1213 22:27:59.809887 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 22:27:59.809906 20613 solver.cpp:244]     Train net output #1: loss = 0.116191 (* 1 = 0.116191 loss)\nI1213 22:27:59.894624 20613 sgd_solver.cpp:174] Iteration 65400, lr = 1.962\nI1213 22:27:59.908339 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.334749\nI1213 22:30:18.080164 20613 solver.cpp:337] Iteration 65500, Testing net (#0)\nI1213 22:31:39.796013 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71328\nI1213 22:31:39.796320 20613 solver.cpp:404]     Test net output #1: loss = 1.18094 (* 1 = 1.18094 loss)\nI1213 22:31:41.109912 20613 solver.cpp:228] Iteration 65500, loss = 0.211234\nI1213 22:31:41.109971 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 22:31:41.109989 20613 solver.cpp:244]     Train net output #1: loss = 0.211235 (* 1 = 0.211235 loss)\nI1213 22:31:41.201328 20613 sgd_solver.cpp:174] Iteration 65500, lr = 1.965\nI1213 22:31:41.215108 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.388255\nI1213 22:33:59.397022 20613 solver.cpp:337] Iteration 65600, Testing net (#0)\nI1213 22:35:21.121747 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81332\nI1213 22:35:21.122063 20613 solver.cpp:404]     Test net output #1: loss = 0.628144 (* 1 = 0.628144 loss)\nI1213 22:35:22.433913 20613 solver.cpp:228] Iteration 65600, loss = 0.172195\nI1213 22:35:22.433970 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 22:35:22.433987 20613 solver.cpp:244]     Train net output #1: loss = 0.172196 (* 1 = 0.172196 loss)\nI1213 22:35:22.523283 20613 sgd_solver.cpp:174] Iteration 65600, lr = 1.968\nI1213 22:35:22.537071 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3417\nI1213 22:37:40.941311 20613 solver.cpp:337] Iteration 65700, Testing net (#0)\nI1213 22:39:02.667783 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7314\nI1213 22:39:02.668095 20613 solver.cpp:404]     Test net output #1: loss = 1.0479 (* 1 = 1.0479 loss)\nI1213 22:39:03.980890 20613 solver.cpp:228] Iteration 65700, loss = 0.240008\nI1213 22:39:03.980945 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1213 22:39:03.980963 20613 solver.cpp:244]     Train net output #1: loss = 0.240008 (* 1 = 0.240008 loss)\nI1213 22:39:04.071463 20613 sgd_solver.cpp:174] Iteration 65700, lr = 1.971\nI1213 22:39:04.085285 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.396849\nI1213 22:41:22.385767 20613 solver.cpp:337] Iteration 65800, Testing net (#0)\nI1213 22:42:44.056232 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80628\nI1213 22:42:44.056574 20613 solver.cpp:404]     Test net output #1: loss = 0.740613 (* 1 = 0.740613 loss)\nI1213 22:42:45.369310 20613 solver.cpp:228] Iteration 65800, loss = 0.166096\nI1213 22:42:45.369362 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 22:42:45.369379 20613 solver.cpp:244]     Train net output #1: loss = 0.166096 (* 1 = 0.166096 loss)\nI1213 22:42:45.462293 20613 sgd_solver.cpp:174] Iteration 65800, lr = 1.974\nI1213 22:42:45.476125 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290249\nI1213 22:45:03.793335 20613 solver.cpp:337] Iteration 65900, Testing net (#0)\nI1213 22:46:25.398058 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81624\nI1213 22:46:25.398325 20613 solver.cpp:404]     Test net output #1: loss = 0.59678 (* 1 = 0.59678 loss)\nI1213 22:46:26.711046 20613 solver.cpp:228] Iteration 65900, loss = 0.126499\nI1213 22:46:26.711102 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1213 22:46:26.711118 20613 solver.cpp:244]     Train net output #1: loss = 0.126499 (* 1 = 0.126499 loss)\nI1213 22:46:26.798750 20613 sgd_solver.cpp:174] Iteration 65900, lr = 1.977\nI1213 22:46:26.812610 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.297718\nI1213 22:48:45.117388 20613 solver.cpp:337] Iteration 66000, Testing net (#0)\nI1213 22:50:06.821527 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80728\nI1213 22:50:06.821820 20613 solver.cpp:404]     Test net output #1: loss = 0.7154 (* 1 = 0.7154 loss)\nI1213 22:50:08.133815 20613 solver.cpp:228] Iteration 66000, loss = 0.13534\nI1213 22:50:08.133872 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 22:50:08.133891 20613 solver.cpp:244]     Train net output #1: loss = 0.135341 (* 1 = 0.135341 loss)\nI1213 22:50:08.222501 20613 sgd_solver.cpp:174] Iteration 66000, lr = 1.98\nI1213 22:50:08.236358 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.372078\nI1213 22:52:26.417645 20613 solver.cpp:337] Iteration 66100, Testing net (#0)\nI1213 22:53:48.090059 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74652\nI1213 22:53:48.090378 20613 solver.cpp:404]     Test net output #1: loss = 0.837054 (* 1 = 0.837054 loss)\nI1213 22:53:49.402746 20613 solver.cpp:228] Iteration 66100, loss = 0.184285\nI1213 22:53:49.402793 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 22:53:49.402811 20613 solver.cpp:244]     Train net output #1: loss = 0.184285 (* 1 = 0.184285 loss)\nI1213 22:53:49.491828 20613 sgd_solver.cpp:174] Iteration 66100, lr = 1.983\nI1213 22:53:49.505702 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.390226\nI1213 22:56:07.749640 20613 solver.cpp:337] Iteration 66200, Testing net (#0)\nI1213 22:57:29.456527 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75996\nI1213 22:57:29.456852 20613 solver.cpp:404]     Test net output #1: loss = 1.04648 (* 1 = 1.04648 loss)\nI1213 22:57:30.769484 20613 solver.cpp:228] Iteration 66200, loss = 0.154773\nI1213 22:57:30.769533 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 22:57:30.769551 20613 solver.cpp:244]     Train net output #1: loss = 0.154774 (* 1 = 0.154774 loss)\nI1213 22:57:30.858072 20613 sgd_solver.cpp:174] Iteration 66200, lr = 1.986\nI1213 22:57:30.871903 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.30642\nI1213 22:59:49.063604 20613 solver.cpp:337] Iteration 66300, Testing net (#0)\nI1213 23:01:10.810854 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81984\nI1213 23:01:10.811172 20613 solver.cpp:404]     Test net output #1: loss = 0.566117 (* 1 = 0.566117 loss)\nI1213 23:01:12.123975 20613 solver.cpp:228] Iteration 66300, loss = 0.154766\nI1213 23:01:12.124030 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 23:01:12.124048 20613 solver.cpp:244]     Train net output #1: loss = 0.154767 (* 1 = 0.154767 loss)\nI1213 23:01:12.215034 20613 sgd_solver.cpp:174] Iteration 66300, lr = 1.989\nI1213 23:01:12.228543 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.344886\nI1213 23:03:30.423382 20613 solver.cpp:337] Iteration 66400, Testing net (#0)\nI1213 23:04:52.169150 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68388\nI1213 23:04:52.169441 20613 solver.cpp:404]     Test net output #1: loss = 1.12721 (* 1 = 1.12721 loss)\nI1213 23:04:53.481737 20613 solver.cpp:228] Iteration 66400, loss = 0.229125\nI1213 23:04:53.481783 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1213 23:04:53.481801 20613 solver.cpp:244]     Train net output #1: loss = 0.229126 (* 1 = 0.229126 loss)\nI1213 23:04:53.569924 20613 sgd_solver.cpp:174] Iteration 66400, lr = 1.992\nI1213 23:04:53.583727 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.301377\nI1213 23:07:11.740958 20613 solver.cpp:337] Iteration 66500, Testing net (#0)\nI1213 23:08:33.482810 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80296\nI1213 23:08:33.483114 20613 solver.cpp:404]     Test net output #1: loss = 0.654916 (* 1 = 0.654916 loss)\nI1213 23:08:34.795500 20613 solver.cpp:228] Iteration 66500, loss = 0.255176\nI1213 23:08:34.795552 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 23:08:34.795569 20613 solver.cpp:244]     Train net output #1: loss = 0.255177 (* 1 = 0.255177 loss)\nI1213 23:08:34.888900 20613 sgd_solver.cpp:174] Iteration 66500, lr = 1.995\nI1213 23:08:34.902736 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.340313\nI1213 23:10:53.112761 20613 solver.cpp:337] Iteration 66600, Testing net (#0)\nI1213 23:12:14.747768 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78664\nI1213 23:12:14.748133 20613 solver.cpp:404]     Test net output #1: loss = 0.804785 (* 1 = 0.804785 loss)\nI1213 23:12:16.060710 20613 solver.cpp:228] Iteration 66600, loss = 0.232968\nI1213 23:12:16.060765 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1213 23:12:16.060782 20613 solver.cpp:244]     Train net output #1: loss = 0.232969 (* 1 = 0.232969 loss)\nI1213 23:12:16.150898 20613 sgd_solver.cpp:174] Iteration 66600, lr = 1.998\nI1213 23:12:16.164780 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.339652\nI1213 23:14:34.393447 20613 solver.cpp:337] Iteration 66700, Testing net (#0)\nI1213 23:15:56.105700 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66956\nI1213 23:15:56.106027 20613 solver.cpp:404]     Test net output #1: loss = 1.37574 (* 1 = 1.37574 loss)\nI1213 23:15:57.418232 20613 solver.cpp:228] Iteration 66700, loss = 0.15438\nI1213 23:15:57.418279 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 23:15:57.418296 20613 solver.cpp:244]     Train net output #1: loss = 0.15438 (* 1 = 0.15438 loss)\nI1213 23:15:57.508805 20613 sgd_solver.cpp:174] Iteration 66700, lr = 2.001\nI1213 23:15:57.522603 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.295913\nI1213 23:18:15.738178 20613 solver.cpp:337] Iteration 66800, Testing net (#0)\nI1213 23:19:37.443532 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7604\nI1213 23:19:37.443833 20613 solver.cpp:404]     Test net output #1: loss = 1.08441 (* 1 = 1.08441 loss)\nI1213 23:19:38.756752 20613 solver.cpp:228] Iteration 66800, loss = 0.168633\nI1213 23:19:38.756803 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1213 23:19:38.756820 20613 solver.cpp:244]     Train net output #1: loss = 0.168634 (* 1 = 0.168634 loss)\nI1213 23:19:38.848137 20613 sgd_solver.cpp:174] Iteration 66800, lr = 2.004\nI1213 23:19:38.861997 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.285467\nI1213 23:21:57.116060 20613 solver.cpp:337] Iteration 66900, Testing net (#0)\nI1213 23:23:18.872529 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84964\nI1213 23:23:18.872895 20613 solver.cpp:404]     Test net output #1: loss = 0.477366 (* 1 = 0.477366 loss)\nI1213 23:23:20.185374 20613 solver.cpp:228] Iteration 66900, loss = 0.13892\nI1213 23:23:20.185430 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 23:23:20.185446 20613 solver.cpp:244]     Train net output #1: loss = 0.138921 (* 1 = 0.138921 loss)\nI1213 23:23:20.276476 20613 sgd_solver.cpp:174] Iteration 66900, lr = 2.007\nI1213 23:23:20.290431 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.275911\nI1213 23:25:38.546566 20613 solver.cpp:337] Iteration 67000, Testing net (#0)\nI1213 23:27:00.284129 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81132\nI1213 23:27:00.284431 20613 solver.cpp:404]     Test net output #1: loss = 0.62901 (* 1 = 0.62901 loss)\nI1213 23:27:01.597115 20613 solver.cpp:228] Iteration 67000, loss = 0.14366\nI1213 23:27:01.597168 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 23:27:01.597185 20613 solver.cpp:244]     Train net output #1: loss = 0.143661 (* 1 = 0.143661 loss)\nI1213 23:27:01.690248 20613 sgd_solver.cpp:174] Iteration 67000, lr = 2.01\nI1213 23:27:01.704107 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.348434\nI1213 23:29:19.938030 20613 solver.cpp:337] Iteration 67100, Testing net (#0)\nI1213 23:30:41.686419 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79816\nI1213 23:30:41.686748 20613 solver.cpp:404]     Test net output #1: loss = 0.671548 (* 1 = 0.671548 loss)\nI1213 23:30:42.999475 20613 solver.cpp:228] Iteration 67100, loss = 0.165521\nI1213 23:30:42.999521 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1213 23:30:42.999538 20613 solver.cpp:244]     Train net output #1: loss = 0.165521 (* 1 = 0.165521 loss)\nI1213 23:30:43.088073 20613 sgd_solver.cpp:174] Iteration 67100, lr = 2.013\nI1213 23:30:43.101922 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.327183\nI1213 23:33:01.345497 20613 solver.cpp:337] Iteration 67200, Testing net (#0)\nI1213 23:34:23.071835 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6622\nI1213 23:34:23.072135 20613 solver.cpp:404]     Test net output #1: loss = 1.46031 (* 1 = 1.46031 loss)\nI1213 23:34:24.384466 20613 solver.cpp:228] Iteration 67200, loss = 0.238568\nI1213 23:34:24.384522 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1213 23:34:24.384541 20613 solver.cpp:244]     Train net output #1: loss = 0.238568 (* 1 = 0.238568 loss)\nI1213 23:34:24.475641 20613 sgd_solver.cpp:174] Iteration 67200, lr = 2.016\nI1213 23:34:24.489414 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.318886\nI1213 23:36:42.809747 20613 solver.cpp:337] Iteration 67300, Testing net (#0)\nI1213 23:38:04.533308 20613 solver.cpp:404]     Test net output #0: accuracy = 0.661281\nI1213 23:38:04.533602 20613 solver.cpp:404]     Test net output #1: loss = 1.62958 (* 1 = 1.62958 loss)\nI1213 23:38:05.845588 20613 solver.cpp:228] Iteration 67300, loss = 0.243286\nI1213 23:38:05.845643 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 23:38:05.845660 20613 solver.cpp:244]     Train net output #1: loss = 0.243287 (* 1 = 0.243287 loss)\nI1213 23:38:05.933435 20613 sgd_solver.cpp:174] Iteration 67300, lr = 2.019\nI1213 23:38:05.947315 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.367466\nI1213 23:40:24.297683 20613 solver.cpp:337] Iteration 67400, Testing net (#0)\nI1213 23:41:46.032734 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82208\nI1213 23:41:46.033032 20613 solver.cpp:404]     Test net output #1: loss = 0.575291 (* 1 = 0.575291 loss)\nI1213 23:41:47.345737 20613 solver.cpp:228] Iteration 67400, loss = 0.18837\nI1213 23:41:47.345793 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 23:41:47.345810 20613 solver.cpp:244]     Train net output #1: loss = 0.18837 (* 1 = 0.18837 loss)\nI1213 23:41:47.438822 20613 sgd_solver.cpp:174] Iteration 67400, lr = 2.022\nI1213 23:41:47.452662 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.282329\nI1213 23:44:05.714270 20613 solver.cpp:337] Iteration 67500, Testing net (#0)\nI1213 23:45:27.444573 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82212\nI1213 23:45:27.444869 20613 solver.cpp:404]     Test net output #1: loss = 0.577175 (* 1 = 0.577175 loss)\nI1213 23:45:28.757100 20613 solver.cpp:228] Iteration 67500, loss = 0.185762\nI1213 23:45:28.757149 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1213 23:45:28.757166 20613 solver.cpp:244]     Train net output #1: loss = 0.185762 (* 1 = 0.185762 loss)\nI1213 23:45:28.852581 20613 sgd_solver.cpp:174] Iteration 67500, lr = 2.025\nI1213 23:45:28.866451 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.339846\nI1213 23:47:47.143697 20613 solver.cpp:337] Iteration 67600, Testing net (#0)\nI1213 23:49:08.857893 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78232\nI1213 23:49:08.858186 20613 solver.cpp:404]     Test net output #1: loss = 0.76617 (* 1 = 0.76617 loss)\nI1213 23:49:10.170441 20613 solver.cpp:228] Iteration 67600, loss = 0.153556\nI1213 23:49:10.170492 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1213 23:49:10.170509 20613 solver.cpp:244]     Train net output #1: loss = 0.153557 (* 1 = 0.153557 loss)\nI1213 23:49:10.263541 20613 sgd_solver.cpp:174] Iteration 67600, lr = 2.028\nI1213 23:49:10.277396 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.334624\nI1213 23:51:28.596753 20613 solver.cpp:337] Iteration 67700, Testing net (#0)\nI1213 23:52:50.322544 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72204\nI1213 23:52:50.323078 20613 solver.cpp:404]     Test net output #1: loss = 1.0793 (* 1 = 1.0793 loss)\nI1213 23:52:51.635392 20613 solver.cpp:228] Iteration 67700, loss = 0.183855\nI1213 23:52:51.635450 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1213 23:52:51.635468 20613 solver.cpp:244]     Train net output #1: loss = 0.183855 (* 1 = 0.183855 loss)\nI1213 23:52:51.726936 20613 sgd_solver.cpp:174] Iteration 67700, lr = 2.031\nI1213 23:52:51.740752 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.298297\nI1213 23:55:10.139933 20613 solver.cpp:337] Iteration 67800, Testing net (#0)\nI1213 23:56:31.883513 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8022\nI1213 23:56:31.883889 20613 solver.cpp:404]     Test net output #1: loss = 0.666074 (* 1 = 0.666074 loss)\nI1213 23:56:33.196323 20613 solver.cpp:228] Iteration 67800, loss = 0.16373\nI1213 23:56:33.196380 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1213 23:56:33.196398 20613 solver.cpp:244]     Train net output #1: loss = 0.163731 (* 1 = 0.163731 loss)\nI1213 23:56:33.287883 20613 sgd_solver.cpp:174] Iteration 67800, lr = 2.034\nI1213 23:56:33.301751 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.359238\nI1213 23:58:51.620625 20613 solver.cpp:337] Iteration 67900, Testing net (#0)\nI1214 00:00:13.380234 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79032\nI1214 00:00:13.380599 20613 solver.cpp:404]     Test net output #1: loss = 0.757837 (* 1 = 0.757837 loss)\nI1214 00:00:14.692641 20613 solver.cpp:228] Iteration 67900, loss = 0.186776\nI1214 00:00:14.692700 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 00:00:14.692720 20613 solver.cpp:244]     Train net output #1: loss = 0.186777 (* 1 = 0.186777 loss)\nI1214 00:00:14.781426 20613 sgd_solver.cpp:174] Iteration 67900, lr = 2.037\nI1214 00:00:14.795338 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.377148\nI1214 00:02:33.089208 20613 solver.cpp:337] Iteration 68000, Testing net (#0)\nI1214 00:03:54.846755 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75564\nI1214 00:03:54.847131 20613 solver.cpp:404]     Test net output #1: loss = 0.928195 (* 1 = 0.928195 loss)\nI1214 00:03:56.159183 20613 solver.cpp:228] Iteration 68000, loss = 0.244376\nI1214 00:03:56.159240 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 00:03:56.159258 20613 solver.cpp:244]     Train net output #1: loss = 0.244376 (* 1 = 0.244376 loss)\nI1214 00:03:56.244709 20613 sgd_solver.cpp:174] Iteration 68000, lr = 2.04\nI1214 00:03:56.258205 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.337791\nI1214 00:06:14.516405 20613 solver.cpp:337] Iteration 68100, Testing net (#0)\nI1214 00:07:36.247656 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75244\nI1214 00:07:36.248044 20613 solver.cpp:404]     Test net output #1: loss = 0.985461 (* 1 = 0.985461 loss)\nI1214 00:07:37.559893 20613 solver.cpp:228] Iteration 68100, loss = 0.171985\nI1214 00:07:37.559948 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 00:07:37.559967 20613 solver.cpp:244]     Train net output #1: loss = 0.171986 (* 1 = 0.171986 loss)\nI1214 00:07:37.646658 20613 sgd_solver.cpp:174] Iteration 68100, lr = 2.043\nI1214 00:07:37.660490 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.328078\nI1214 00:09:55.854933 20613 solver.cpp:337] Iteration 68200, Testing net (#0)\nI1214 00:11:17.603958 20613 solver.cpp:404]     Test net output #0: accuracy = 0.664\nI1214 00:11:17.604326 20613 solver.cpp:404]     Test net output #1: loss = 1.4667 (* 1 = 1.4667 loss)\nI1214 00:11:18.916872 20613 solver.cpp:228] Iteration 68200, loss = 0.277003\nI1214 00:11:18.916930 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 00:11:18.916949 20613 solver.cpp:244]     Train net output #1: loss = 0.277003 (* 1 = 0.277003 loss)\nI1214 00:11:19.007977 20613 sgd_solver.cpp:174] Iteration 68200, lr = 2.046\nI1214 00:11:19.021885 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.370552\nI1214 00:13:37.186580 20613 solver.cpp:337] Iteration 68300, Testing net (#0)\nI1214 00:14:58.944393 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78888\nI1214 00:14:58.944766 20613 solver.cpp:404]     Test net output #1: loss = 0.750767 (* 1 = 0.750767 loss)\nI1214 00:15:00.257550 20613 solver.cpp:228] Iteration 68300, loss = 0.106579\nI1214 00:15:00.257609 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1214 00:15:00.257627 20613 solver.cpp:244]     Train net output #1: loss = 0.106579 (* 1 = 0.106579 loss)\nI1214 00:15:00.348093 20613 sgd_solver.cpp:174] Iteration 68300, lr = 2.049\nI1214 00:15:00.361963 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.340713\nI1214 00:17:18.602600 20613 solver.cpp:337] Iteration 68400, Testing net (#0)\nI1214 00:18:40.357928 20613 solver.cpp:404]     Test net output #0: accuracy = 0.84324\nI1214 00:18:40.358325 20613 solver.cpp:404]     Test net output #1: loss = 0.524485 (* 1 = 0.524485 loss)\nI1214 00:18:41.670565 20613 solver.cpp:228] Iteration 68400, loss = 0.200377\nI1214 00:18:41.670614 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 00:18:41.670631 20613 solver.cpp:244]     Train net output #1: loss = 0.200378 (* 1 = 0.200378 loss)\nI1214 00:18:41.759299 20613 sgd_solver.cpp:174] Iteration 68400, lr = 2.052\nI1214 00:18:41.773124 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.314433\nI1214 00:21:00.079413 20613 solver.cpp:337] Iteration 68500, Testing net (#0)\nI1214 00:22:21.817976 20613 solver.cpp:404]     Test net output #0: accuracy = 0.758\nI1214 00:22:21.818347 20613 solver.cpp:404]     Test net output #1: loss = 0.809189 (* 1 = 0.809189 loss)\nI1214 00:22:23.130137 20613 solver.cpp:228] Iteration 68500, loss = 0.161812\nI1214 00:22:23.130188 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 00:22:23.130205 20613 solver.cpp:244]     Train net output #1: loss = 0.161812 (* 1 = 0.161812 loss)\nI1214 00:22:23.216774 20613 sgd_solver.cpp:174] Iteration 68500, lr = 2.055\nI1214 00:22:23.230666 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.260363\nI1214 00:24:41.513581 20613 solver.cpp:337] Iteration 68600, Testing net (#0)\nI1214 00:26:03.254588 20613 solver.cpp:404]     Test net output #0: accuracy = 0.83364\nI1214 00:26:03.254962 20613 solver.cpp:404]     Test net output #1: loss = 0.606905 (* 1 = 0.606905 loss)\nI1214 00:26:04.567382 20613 solver.cpp:228] Iteration 68600, loss = 0.190789\nI1214 00:26:04.567435 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 00:26:04.567451 20613 solver.cpp:244]     Train net output #1: loss = 0.190789 (* 1 = 0.190789 loss)\nI1214 00:26:04.666406 20613 sgd_solver.cpp:174] Iteration 68600, lr = 2.058\nI1214 00:26:04.680316 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.315573\nI1214 00:28:22.900197 20613 solver.cpp:337] Iteration 68700, Testing net (#0)\nI1214 00:29:44.636047 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82848\nI1214 00:29:44.636418 20613 solver.cpp:404]     Test net output #1: loss = 0.539282 (* 1 = 0.539282 loss)\nI1214 00:29:45.948721 20613 solver.cpp:228] Iteration 68700, loss = 0.193703\nI1214 00:29:45.948771 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 00:29:45.948794 20613 solver.cpp:244]     Train net output #1: loss = 0.193703 (* 1 = 0.193703 loss)\nI1214 00:29:46.037878 20613 sgd_solver.cpp:174] Iteration 68700, lr = 2.061\nI1214 00:29:46.051810 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.340843\nI1214 00:32:04.432842 20613 solver.cpp:337] Iteration 68800, Testing net (#0)\nI1214 00:33:26.171618 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70156\nI1214 00:33:26.171996 20613 solver.cpp:404]     Test net output #1: loss = 1.25371 (* 1 = 1.25371 loss)\nI1214 00:33:27.484519 20613 solver.cpp:228] Iteration 68800, loss = 0.306434\nI1214 00:33:27.484575 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 00:33:27.484593 20613 solver.cpp:244]     Train net output #1: loss = 0.306435 (* 1 = 0.306435 loss)\nI1214 00:33:27.577939 20613 sgd_solver.cpp:174] Iteration 68800, lr = 2.064\nI1214 00:33:27.591809 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.346504\nI1214 00:35:45.901495 20613 solver.cpp:337] Iteration 68900, Testing net (#0)\nI1214 00:37:07.664014 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7794\nI1214 00:37:07.664415 20613 solver.cpp:404]     Test net output #1: loss = 0.763324 (* 1 = 0.763324 loss)\nI1214 00:37:08.976614 20613 solver.cpp:228] Iteration 68900, loss = 0.162923\nI1214 00:37:08.976668 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 00:37:08.976686 20613 solver.cpp:244]     Train net output #1: loss = 0.162924 (* 1 = 0.162924 loss)\nI1214 00:37:09.065685 20613 sgd_solver.cpp:174] Iteration 68900, lr = 2.067\nI1214 00:37:09.079557 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.309957\nI1214 00:39:27.360381 20613 solver.cpp:337] Iteration 69000, Testing net (#0)\nI1214 00:40:49.121749 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7216\nI1214 00:40:49.122169 20613 solver.cpp:404]     Test net output #1: loss = 0.985175 (* 1 = 0.985175 loss)\nI1214 00:40:50.434098 20613 solver.cpp:228] Iteration 69000, loss = 0.191109\nI1214 00:40:50.434147 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 00:40:50.434165 20613 solver.cpp:244]     Train net output #1: loss = 0.191109 (* 1 = 0.191109 loss)\nI1214 00:40:50.526326 20613 sgd_solver.cpp:174] Iteration 69000, lr = 2.07\nI1214 00:40:50.540042 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.25064\nI1214 00:43:08.860836 20613 solver.cpp:337] Iteration 69100, Testing net (#0)\nI1214 00:44:30.837158 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7452\nI1214 00:44:30.837527 20613 solver.cpp:404]     Test net output #1: loss = 0.862338 (* 1 = 0.862338 loss)\nI1214 00:44:32.149788 20613 solver.cpp:228] Iteration 69100, loss = 0.169806\nI1214 00:44:32.149840 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 00:44:32.149864 20613 solver.cpp:244]     Train net output #1: loss = 0.169806 (* 1 = 0.169806 loss)\nI1214 00:44:32.240061 20613 sgd_solver.cpp:174] Iteration 69100, lr = 2.073\nI1214 00:44:32.253684 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.319891\nI1214 00:46:50.509281 20613 solver.cpp:337] Iteration 69200, Testing net (#0)\nI1214 00:48:12.316788 20613 solver.cpp:404]     Test net output #0: accuracy = 0.765\nI1214 00:48:12.317180 20613 solver.cpp:404]     Test net output #1: loss = 0.871497 (* 1 = 0.871497 loss)\nI1214 00:48:13.629624 20613 solver.cpp:228] Iteration 69200, loss = 0.15105\nI1214 00:48:13.629673 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 00:48:13.629689 20613 solver.cpp:244]     Train net output #1: loss = 0.15105 (* 1 = 0.15105 loss)\nI1214 00:48:13.725148 20613 sgd_solver.cpp:174] Iteration 69200, lr = 2.076\nI1214 00:48:13.739027 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.338132\nI1214 00:50:31.967901 20613 solver.cpp:337] Iteration 69300, Testing net (#0)\nI1214 00:51:53.719975 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76208\nI1214 00:51:53.720384 20613 solver.cpp:404]     Test net output #1: loss = 0.863772 (* 1 = 0.863772 loss)\nI1214 00:51:55.032582 20613 solver.cpp:228] Iteration 69300, loss = 0.197087\nI1214 00:51:55.032631 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 00:51:55.032649 20613 solver.cpp:244]     Train net output #1: loss = 0.197088 (* 1 = 0.197088 loss)\nI1214 00:51:55.121606 20613 sgd_solver.cpp:174] Iteration 69300, lr = 2.079\nI1214 00:51:55.135540 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.357579\nI1214 00:54:13.459355 20613 solver.cpp:337] Iteration 69400, Testing net (#0)\nI1214 00:55:35.041762 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78044\nI1214 00:55:35.042140 20613 solver.cpp:404]     Test net output #1: loss = 0.719817 (* 1 = 0.719817 loss)\nI1214 00:55:36.354539 20613 solver.cpp:228] Iteration 69400, loss = 0.27964\nI1214 00:55:36.354594 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 00:55:36.354614 20613 solver.cpp:244]     Train net output #1: loss = 0.27964 (* 1 = 0.27964 loss)\nI1214 00:55:36.449045 20613 sgd_solver.cpp:174] Iteration 69400, lr = 2.082\nI1214 00:55:36.462339 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.372489\nI1214 00:57:54.763617 20613 solver.cpp:337] Iteration 69500, Testing net (#0)\nI1214 00:59:16.421001 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6782\nI1214 00:59:16.421417 20613 solver.cpp:404]     Test net output #1: loss = 1.34056 (* 1 = 1.34056 loss)\nI1214 00:59:17.734805 20613 solver.cpp:228] Iteration 69500, loss = 0.217507\nI1214 00:59:17.734858 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 00:59:17.734876 20613 solver.cpp:244]     Train net output #1: loss = 0.217507 (* 1 = 0.217507 loss)\nI1214 00:59:17.832216 20613 sgd_solver.cpp:174] Iteration 69500, lr = 2.085\nI1214 00:59:17.846012 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.415304\nI1214 01:01:36.150312 20613 solver.cpp:337] Iteration 69600, Testing net (#0)\nI1214 01:02:58.015440 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80708\nI1214 01:02:58.015864 20613 solver.cpp:404]     Test net output #1: loss = 0.653201 (* 1 = 0.653201 loss)\nI1214 01:02:59.329100 20613 solver.cpp:228] Iteration 69600, loss = 0.165964\nI1214 01:02:59.329154 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 01:02:59.329170 20613 solver.cpp:244]     Train net output #1: loss = 0.165964 (* 1 = 0.165964 loss)\nI1214 01:02:59.425668 20613 sgd_solver.cpp:174] Iteration 69600, lr = 2.088\nI1214 01:02:59.439502 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.318721\nI1214 01:05:17.737280 20613 solver.cpp:337] Iteration 69700, Testing net (#0)\nI1214 01:06:39.656105 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77376\nI1214 01:06:39.656510 20613 solver.cpp:404]     Test net output #1: loss = 0.814711 (* 1 = 0.814711 loss)\nI1214 01:06:40.968451 20613 solver.cpp:228] Iteration 69700, loss = 0.154216\nI1214 01:06:40.968504 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 01:06:40.968521 20613 solver.cpp:244]     Train net output #1: loss = 0.154216 (* 1 = 0.154216 loss)\nI1214 01:06:41.063782 20613 sgd_solver.cpp:174] Iteration 69700, lr = 2.091\nI1214 01:06:41.077687 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.372806\nI1214 01:08:59.431198 20613 solver.cpp:337] Iteration 69800, Testing net (#0)\nI1214 01:10:20.068272 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67788\nI1214 01:10:20.068599 20613 solver.cpp:404]     Test net output #1: loss = 1.24023 (* 1 = 1.24023 loss)\nI1214 01:10:21.379524 20613 solver.cpp:228] Iteration 69800, loss = 0.192139\nI1214 01:10:21.379562 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 01:10:21.379578 20613 solver.cpp:244]     Train net output #1: loss = 0.192139 (* 1 = 0.192139 loss)\nI1214 01:10:21.477255 20613 sgd_solver.cpp:174] Iteration 69800, lr = 2.094\nI1214 01:10:21.490103 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3706\nI1214 01:12:39.634865 20613 solver.cpp:337] Iteration 69900, Testing net (#0)\nI1214 01:14:00.262848 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68784\nI1214 01:14:00.263214 20613 solver.cpp:404]     Test net output #1: loss = 1.14923 (* 1 = 1.14923 loss)\nI1214 01:14:01.573510 20613 solver.cpp:228] Iteration 69900, loss = 0.143546\nI1214 01:14:01.573552 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 01:14:01.573570 20613 solver.cpp:244]     Train net output #1: loss = 0.143546 (* 1 = 0.143546 loss)\nI1214 01:14:01.665741 20613 sgd_solver.cpp:174] Iteration 69900, lr = 2.097\nI1214 01:14:01.678488 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.349276\nI1214 01:16:19.834133 20613 solver.cpp:337] Iteration 70000, Testing net (#0)\nI1214 01:17:40.461937 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75352\nI1214 01:17:40.462306 20613 solver.cpp:404]     Test net output #1: loss = 0.902259 (* 1 = 0.902259 loss)\nI1214 01:17:41.772037 20613 solver.cpp:228] Iteration 70000, loss = 0.0886217\nI1214 01:17:41.772070 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1214 01:17:41.772086 20613 solver.cpp:244]     Train net output #1: loss = 0.0886222 (* 1 = 0.0886222 loss)\nI1214 01:17:41.868674 20613 sgd_solver.cpp:174] Iteration 70000, lr = 2.1\nI1214 01:17:41.881283 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.255306\nI1214 01:19:59.959334 20613 solver.cpp:337] Iteration 70100, Testing net (#0)\nI1214 01:21:20.590734 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71416\nI1214 01:21:20.591094 20613 solver.cpp:404]     Test net output #1: loss = 1.1528 (* 1 = 1.1528 loss)\nI1214 01:21:21.901685 20613 solver.cpp:228] Iteration 70100, loss = 0.161009\nI1214 01:21:21.901731 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 01:21:21.901748 20613 solver.cpp:244]     Train net output #1: loss = 0.161009 (* 1 = 0.161009 loss)\nI1214 01:21:21.994601 20613 sgd_solver.cpp:174] Iteration 70100, lr = 2.103\nI1214 01:21:22.007299 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.306131\nI1214 01:23:40.145354 20613 solver.cpp:337] Iteration 70200, Testing net (#0)\nI1214 01:25:00.818621 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7556\nI1214 01:25:00.818923 20613 solver.cpp:404]     Test net output #1: loss = 0.812136 (* 1 = 0.812136 loss)\nI1214 01:25:02.129793 20613 solver.cpp:228] Iteration 70200, loss = 0.145442\nI1214 01:25:02.129827 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 01:25:02.129842 20613 solver.cpp:244]     Train net output #1: loss = 0.145443 (* 1 = 0.145443 loss)\nI1214 01:25:02.221770 20613 sgd_solver.cpp:174] Iteration 70200, lr = 2.106\nI1214 01:25:02.234269 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321784\nI1214 01:27:20.268348 20613 solver.cpp:337] Iteration 70300, Testing net (#0)\nI1214 01:28:40.938861 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76764\nI1214 01:28:40.939224 20613 solver.cpp:404]     Test net output #1: loss = 0.977482 (* 1 = 0.977482 loss)\nI1214 01:28:42.251287 20613 solver.cpp:228] Iteration 70300, loss = 0.281994\nI1214 01:28:42.251322 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 01:28:42.251338 20613 solver.cpp:244]     Train net output #1: loss = 0.281995 (* 1 = 0.281995 loss)\nI1214 01:28:42.349752 20613 sgd_solver.cpp:174] Iteration 70300, lr = 2.109\nI1214 01:28:42.362534 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.369416\nI1214 01:31:00.465822 20613 solver.cpp:337] Iteration 70400, Testing net (#0)\nI1214 01:32:21.142949 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7592\nI1214 01:32:21.143256 20613 solver.cpp:404]     Test net output #1: loss = 0.773832 (* 1 = 0.773832 loss)\nI1214 01:32:22.453496 20613 solver.cpp:228] Iteration 70400, loss = 0.238142\nI1214 01:32:22.453537 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 01:32:22.453554 20613 solver.cpp:244]     Train net output #1: loss = 0.238143 (* 1 = 0.238143 loss)\nI1214 01:32:22.547579 20613 sgd_solver.cpp:174] Iteration 70400, lr = 2.112\nI1214 01:32:22.560236 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290979\nI1214 01:34:40.695466 20613 solver.cpp:337] Iteration 70500, Testing net (#0)\nI1214 01:36:01.374619 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74608\nI1214 01:36:01.375000 20613 solver.cpp:404]     Test net output #1: loss = 0.932096 (* 1 = 0.932096 loss)\nI1214 01:36:02.685472 20613 solver.cpp:228] Iteration 70500, loss = 0.167965\nI1214 01:36:02.685510 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 01:36:02.685528 20613 solver.cpp:244]     Train net output #1: loss = 0.167965 (* 1 = 0.167965 loss)\nI1214 01:36:02.781774 20613 sgd_solver.cpp:174] Iteration 70500, lr = 2.115\nI1214 01:36:02.794447 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.299917\nI1214 01:38:20.901743 20613 solver.cpp:337] Iteration 70600, Testing net (#0)\nI1214 01:39:41.577126 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72644\nI1214 01:39:41.577446 20613 solver.cpp:404]     Test net output #1: loss = 0.970636 (* 1 = 0.970636 loss)\nI1214 01:39:42.888464 20613 solver.cpp:228] Iteration 70600, loss = 0.174279\nI1214 01:39:42.888504 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 01:39:42.888521 20613 solver.cpp:244]     Train net output #1: loss = 0.174279 (* 1 = 0.174279 loss)\nI1214 01:39:42.988235 20613 sgd_solver.cpp:174] Iteration 70600, lr = 2.118\nI1214 01:39:43.000962 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.315978\nI1214 01:42:01.144560 20613 solver.cpp:337] Iteration 70700, Testing net (#0)\nI1214 01:43:21.922842 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8334\nI1214 01:43:21.923156 20613 solver.cpp:404]     Test net output #1: loss = 0.538723 (* 1 = 0.538723 loss)\nI1214 01:43:23.234727 20613 solver.cpp:228] Iteration 70700, loss = 0.166727\nI1214 01:43:23.234774 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 01:43:23.234791 20613 solver.cpp:244]     Train net output #1: loss = 0.166728 (* 1 = 0.166728 loss)\nI1214 01:43:23.328562 20613 sgd_solver.cpp:174] Iteration 70700, lr = 2.121\nI1214 01:43:23.341248 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.332982\nI1214 01:45:41.431936 20613 solver.cpp:337] Iteration 70800, Testing net (#0)\nI1214 01:47:02.175283 20613 solver.cpp:404]     Test net output #0: accuracy = 0.765\nI1214 01:47:02.175581 20613 solver.cpp:404]     Test net output #1: loss = 0.78752 (* 1 = 0.78752 loss)\nI1214 01:47:03.487144 20613 solver.cpp:228] Iteration 70800, loss = 0.172278\nI1214 01:47:03.487190 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 01:47:03.487207 20613 solver.cpp:244]     Train net output #1: loss = 0.172278 (* 1 = 0.172278 loss)\nI1214 01:47:03.585621 20613 sgd_solver.cpp:174] Iteration 70800, lr = 2.124\nI1214 01:47:03.598332 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.339167\nI1214 01:49:21.634523 20613 solver.cpp:337] Iteration 70900, Testing net (#0)\nI1214 01:50:42.408819 20613 solver.cpp:404]     Test net output #0: accuracy = 0.62156\nI1214 01:50:42.409181 20613 solver.cpp:404]     Test net output #1: loss = 1.70947 (* 1 = 1.70947 loss)\nI1214 01:50:43.721010 20613 solver.cpp:228] Iteration 70900, loss = 0.175096\nI1214 01:50:43.721055 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 01:50:43.721072 20613 solver.cpp:244]     Train net output #1: loss = 0.175096 (* 1 = 0.175096 loss)\nI1214 01:50:43.816496 20613 sgd_solver.cpp:174] Iteration 70900, lr = 2.127\nI1214 01:50:43.829215 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.299765\nI1214 01:53:01.924752 20613 solver.cpp:337] Iteration 71000, Testing net (#0)\nI1214 01:54:22.694473 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7908\nI1214 01:54:22.694823 20613 solver.cpp:404]     Test net output #1: loss = 0.699672 (* 1 = 0.699672 loss)\nI1214 01:54:24.005489 20613 solver.cpp:228] Iteration 71000, loss = 0.274488\nI1214 01:54:24.005534 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 01:54:24.005551 20613 solver.cpp:244]     Train net output #1: loss = 0.274488 (* 1 = 0.274488 loss)\nI1214 01:54:24.095898 20613 sgd_solver.cpp:174] Iteration 71000, lr = 2.13\nI1214 01:54:24.108628 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.397854\nI1214 01:56:42.237643 20613 solver.cpp:337] Iteration 71100, Testing net (#0)\nI1214 01:58:03.022198 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66616\nI1214 01:58:03.022516 20613 solver.cpp:404]     Test net output #1: loss = 1.58412 (* 1 = 1.58412 loss)\nI1214 01:58:04.333411 20613 solver.cpp:228] Iteration 71100, loss = 0.336992\nI1214 01:58:04.333451 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 01:58:04.333472 20613 solver.cpp:244]     Train net output #1: loss = 0.336992 (* 1 = 0.336992 loss)\nI1214 01:58:04.432554 20613 sgd_solver.cpp:174] Iteration 71100, lr = 2.133\nI1214 01:58:04.445214 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.364587\nI1214 02:00:22.644949 20613 solver.cpp:337] Iteration 71200, Testing net (#0)\nI1214 02:01:43.422083 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71728\nI1214 02:01:43.422464 20613 solver.cpp:404]     Test net output #1: loss = 1.0929 (* 1 = 1.0929 loss)\nI1214 02:01:44.733604 20613 solver.cpp:228] Iteration 71200, loss = 0.213227\nI1214 02:01:44.733654 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 02:01:44.733670 20613 solver.cpp:244]     Train net output #1: loss = 0.213228 (* 1 = 0.213228 loss)\nI1214 02:01:44.833003 20613 sgd_solver.cpp:174] Iteration 71200, lr = 2.136\nI1214 02:01:44.845665 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.38271\nI1214 02:04:02.893304 20613 solver.cpp:337] Iteration 71300, Testing net (#0)\nI1214 02:05:23.677970 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78824\nI1214 02:05:23.678243 20613 solver.cpp:404]     Test net output #1: loss = 0.789349 (* 1 = 0.789349 loss)\nI1214 02:05:24.989934 20613 solver.cpp:228] Iteration 71300, loss = 0.158211\nI1214 02:05:24.989974 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 02:05:24.989995 20613 solver.cpp:244]     Train net output #1: loss = 0.158211 (* 1 = 0.158211 loss)\nI1214 02:05:25.088670 20613 sgd_solver.cpp:174] Iteration 71300, lr = 2.139\nI1214 02:05:25.101402 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307561\nI1214 02:07:43.155702 20613 solver.cpp:337] Iteration 71400, Testing net (#0)\nI1214 02:09:03.939378 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65944\nI1214 02:09:03.939658 20613 solver.cpp:404]     Test net output #1: loss = 1.85301 (* 1 = 1.85301 loss)\nI1214 02:09:05.249497 20613 solver.cpp:228] Iteration 71400, loss = 0.195783\nI1214 02:09:05.249544 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 02:09:05.249569 20613 solver.cpp:244]     Train net output #1: loss = 0.195783 (* 1 = 0.195783 loss)\nI1214 02:09:05.347887 20613 sgd_solver.cpp:174] Iteration 71400, lr = 2.142\nI1214 02:09:05.360632 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.331638\nI1214 02:11:23.413203 20613 solver.cpp:337] Iteration 71500, Testing net (#0)\nI1214 02:12:44.203722 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80536\nI1214 02:12:44.204084 20613 solver.cpp:404]     Test net output #1: loss = 0.719899 (* 1 = 0.719899 loss)\nI1214 02:12:45.514700 20613 solver.cpp:228] Iteration 71500, loss = 0.130324\nI1214 02:12:45.514747 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 02:12:45.514771 20613 solver.cpp:244]     Train net output #1: loss = 0.130324 (* 1 = 0.130324 loss)\nI1214 02:12:45.611215 20613 sgd_solver.cpp:174] Iteration 71500, lr = 2.145\nI1214 02:12:45.624300 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.322198\nI1214 02:15:03.768307 20613 solver.cpp:337] Iteration 71600, Testing net (#0)\nI1214 02:16:24.552739 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8134\nI1214 02:16:24.553033 20613 solver.cpp:404]     Test net output #1: loss = 0.703187 (* 1 = 0.703187 loss)\nI1214 02:16:25.863713 20613 solver.cpp:228] Iteration 71600, loss = 0.137124\nI1214 02:16:25.863760 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 02:16:25.863785 20613 solver.cpp:244]     Train net output #1: loss = 0.137124 (* 1 = 0.137124 loss)\nI1214 02:16:25.962829 20613 sgd_solver.cpp:174] Iteration 71600, lr = 2.148\nI1214 02:16:25.975642 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305839\nI1214 02:18:44.026815 20613 solver.cpp:337] Iteration 71700, Testing net (#0)\nI1214 02:20:04.809190 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82212\nI1214 02:20:04.809535 20613 solver.cpp:404]     Test net output #1: loss = 0.632299 (* 1 = 0.632299 loss)\nI1214 02:20:06.121160 20613 solver.cpp:228] Iteration 71700, loss = 0.167237\nI1214 02:20:06.121206 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 02:20:06.121232 20613 solver.cpp:244]     Train net output #1: loss = 0.167238 (* 1 = 0.167238 loss)\nI1214 02:20:06.220402 20613 sgd_solver.cpp:174] Iteration 71700, lr = 2.151\nI1214 02:20:06.232852 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305222\nI1214 02:22:24.352180 20613 solver.cpp:337] Iteration 71800, Testing net (#0)\nI1214 02:23:45.139400 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70276\nI1214 02:23:45.139690 20613 solver.cpp:404]     Test net output #1: loss = 1.08547 (* 1 = 1.08547 loss)\nI1214 02:23:46.450479 20613 solver.cpp:228] Iteration 71800, loss = 0.253415\nI1214 02:23:46.450523 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 02:23:46.450548 20613 solver.cpp:244]     Train net output #1: loss = 0.253416 (* 1 = 0.253416 loss)\nI1214 02:23:46.546445 20613 sgd_solver.cpp:174] Iteration 71800, lr = 2.154\nI1214 02:23:46.559182 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.274704\nI1214 02:26:04.646914 20613 solver.cpp:337] Iteration 71900, Testing net (#0)\nI1214 02:27:25.427314 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77844\nI1214 02:27:25.427613 20613 solver.cpp:404]     Test net output #1: loss = 0.759004 (* 1 = 0.759004 loss)\nI1214 02:27:26.738224 20613 solver.cpp:228] Iteration 71900, loss = 0.174306\nI1214 02:27:26.738260 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 02:27:26.738283 20613 solver.cpp:244]     Train net output #1: loss = 0.174307 (* 1 = 0.174307 loss)\nI1214 02:27:26.834179 20613 sgd_solver.cpp:174] Iteration 71900, lr = 2.157\nI1214 02:27:26.846958 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.337549\nI1214 02:29:44.919136 20613 solver.cpp:337] Iteration 72000, Testing net (#0)\nI1214 02:31:05.703294 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72676\nI1214 02:31:05.703601 20613 solver.cpp:404]     Test net output #1: loss = 1.07468 (* 1 = 1.07468 loss)\nI1214 02:31:07.015300 20613 solver.cpp:228] Iteration 72000, loss = 0.223108\nI1214 02:31:07.015336 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 02:31:07.015362 20613 solver.cpp:244]     Train net output #1: loss = 0.223109 (* 1 = 0.223109 loss)\nI1214 02:31:07.108983 20613 sgd_solver.cpp:174] Iteration 72000, lr = 2.16\nI1214 02:31:07.121599 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.336691\nI1214 02:33:25.185392 20613 solver.cpp:337] Iteration 72100, Testing net (#0)\nI1214 02:34:45.970558 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7672\nI1214 02:34:45.970877 20613 solver.cpp:404]     Test net output #1: loss = 0.917418 (* 1 = 0.917418 loss)\nI1214 02:34:47.282007 20613 solver.cpp:228] Iteration 72100, loss = 0.132453\nI1214 02:34:47.282049 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 02:34:47.282066 20613 solver.cpp:244]     Train net output #1: loss = 0.132454 (* 1 = 0.132454 loss)\nI1214 02:34:47.373553 20613 sgd_solver.cpp:174] Iteration 72100, lr = 2.163\nI1214 02:34:47.386297 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.296023\nI1214 02:37:05.463430 20613 solver.cpp:337] Iteration 72200, Testing net (#0)\nI1214 02:38:26.233606 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7134\nI1214 02:38:26.233976 20613 solver.cpp:404]     Test net output #1: loss = 1.14984 (* 1 = 1.14984 loss)\nI1214 02:38:27.544093 20613 solver.cpp:228] Iteration 72200, loss = 0.225274\nI1214 02:38:27.544142 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 02:38:27.544167 20613 solver.cpp:244]     Train net output #1: loss = 0.225274 (* 1 = 0.225274 loss)\nI1214 02:38:27.639156 20613 sgd_solver.cpp:174] Iteration 72200, lr = 2.166\nI1214 02:38:27.651904 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.376996\nI1214 02:40:45.768123 20613 solver.cpp:337] Iteration 72300, Testing net (#0)\nI1214 02:42:06.549929 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81212\nI1214 02:42:06.550298 20613 solver.cpp:404]     Test net output #1: loss = 0.686251 (* 1 = 0.686251 loss)\nI1214 02:42:07.862150 20613 solver.cpp:228] Iteration 72300, loss = 0.170195\nI1214 02:42:07.862197 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 02:42:07.862221 20613 solver.cpp:244]     Train net output #1: loss = 0.170195 (* 1 = 0.170195 loss)\nI1214 02:42:07.955847 20613 sgd_solver.cpp:174] Iteration 72300, lr = 2.169\nI1214 02:42:07.968458 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.327463\nI1214 02:44:25.996038 20613 solver.cpp:337] Iteration 72400, Testing net (#0)\nI1214 02:45:46.759019 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65864\nI1214 02:45:46.759404 20613 solver.cpp:404]     Test net output #1: loss = 1.70116 (* 1 = 1.70116 loss)\nI1214 02:45:48.069418 20613 solver.cpp:228] Iteration 72400, loss = 0.125265\nI1214 02:45:48.069458 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 02:45:48.069479 20613 solver.cpp:244]     Train net output #1: loss = 0.125266 (* 1 = 0.125266 loss)\nI1214 02:45:48.163727 20613 sgd_solver.cpp:174] Iteration 72400, lr = 2.172\nI1214 02:45:48.176255 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.28999\nI1214 02:48:06.240074 20613 solver.cpp:337] Iteration 72500, Testing net (#0)\nI1214 02:49:27.002032 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7808\nI1214 02:49:27.002415 20613 solver.cpp:404]     Test net output #1: loss = 0.726266 (* 1 = 0.726266 loss)\nI1214 02:49:28.312786 20613 solver.cpp:228] Iteration 72500, loss = 0.133357\nI1214 02:49:28.312824 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 02:49:28.312846 20613 solver.cpp:244]     Train net output #1: loss = 0.133357 (* 1 = 0.133357 loss)\nI1214 02:49:28.407481 20613 sgd_solver.cpp:174] Iteration 72500, lr = 2.175\nI1214 02:49:28.420204 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.327022\nI1214 02:51:46.488420 20613 solver.cpp:337] Iteration 72600, Testing net (#0)\nI1214 02:53:07.243733 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74456\nI1214 02:53:07.244115 20613 solver.cpp:404]     Test net output #1: loss = 1.04384 (* 1 = 1.04384 loss)\nI1214 02:53:08.554627 20613 solver.cpp:228] Iteration 72600, loss = 0.239369\nI1214 02:53:08.554674 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 02:53:08.554699 20613 solver.cpp:244]     Train net output #1: loss = 0.23937 (* 1 = 0.23937 loss)\nI1214 02:53:08.648571 20613 sgd_solver.cpp:174] Iteration 72600, lr = 2.178\nI1214 02:53:08.661357 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.27562\nI1214 02:55:26.687826 20613 solver.cpp:337] Iteration 72700, Testing net (#0)\nI1214 02:56:47.444270 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69116\nI1214 02:56:47.444665 20613 solver.cpp:404]     Test net output #1: loss = 1.33712 (* 1 = 1.33712 loss)\nI1214 02:56:48.754892 20613 solver.cpp:228] Iteration 72700, loss = 0.212019\nI1214 02:56:48.754940 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 02:56:48.754964 20613 solver.cpp:244]     Train net output #1: loss = 0.212019 (* 1 = 0.212019 loss)\nI1214 02:56:48.854414 20613 sgd_solver.cpp:174] Iteration 72700, lr = 2.181\nI1214 02:56:48.867149 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.325691\nI1214 02:59:06.929345 20613 solver.cpp:337] Iteration 72800, Testing net (#0)\nI1214 03:00:27.685926 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75028\nI1214 03:00:27.686331 20613 solver.cpp:404]     Test net output #1: loss = 0.908528 (* 1 = 0.908528 loss)\nI1214 03:00:28.996544 20613 solver.cpp:228] Iteration 72800, loss = 0.201984\nI1214 03:00:28.996588 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 03:00:28.996613 20613 solver.cpp:244]     Train net output #1: loss = 0.201984 (* 1 = 0.201984 loss)\nI1214 03:00:29.093606 20613 sgd_solver.cpp:174] Iteration 72800, lr = 2.184\nI1214 03:00:29.106142 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.31275\nI1214 03:02:47.204313 20613 solver.cpp:337] Iteration 72900, Testing net (#0)\nI1214 03:04:07.966996 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81872\nI1214 03:04:07.967384 20613 solver.cpp:404]     Test net output #1: loss = 0.588936 (* 1 = 0.588936 loss)\nI1214 03:04:09.279005 20613 solver.cpp:228] Iteration 72900, loss = 0.149004\nI1214 03:04:09.279045 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 03:04:09.279062 20613 solver.cpp:244]     Train net output #1: loss = 0.149004 (* 1 = 0.149004 loss)\nI1214 03:04:09.378577 20613 sgd_solver.cpp:174] Iteration 72900, lr = 2.187\nI1214 03:04:09.391178 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.2721\nI1214 03:06:27.462877 20613 solver.cpp:337] Iteration 73000, Testing net (#0)\nI1214 03:07:48.216636 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75636\nI1214 03:07:48.217000 20613 solver.cpp:404]     Test net output #1: loss = 0.856271 (* 1 = 0.856271 loss)\nI1214 03:07:49.528367 20613 solver.cpp:228] Iteration 73000, loss = 0.155183\nI1214 03:07:49.528410 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 03:07:49.528426 20613 solver.cpp:244]     Train net output #1: loss = 0.155184 (* 1 = 0.155184 loss)\nI1214 03:07:49.621901 20613 sgd_solver.cpp:174] Iteration 73000, lr = 2.19\nI1214 03:07:49.634652 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.304646\nI1214 03:10:07.763087 20613 solver.cpp:337] Iteration 73100, Testing net (#0)\nI1214 03:11:28.518771 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80168\nI1214 03:11:28.519174 20613 solver.cpp:404]     Test net output #1: loss = 0.693839 (* 1 = 0.693839 loss)\nI1214 03:11:29.830086 20613 solver.cpp:228] Iteration 73100, loss = 0.238732\nI1214 03:11:29.830128 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 03:11:29.830145 20613 solver.cpp:244]     Train net output #1: loss = 0.238733 (* 1 = 0.238733 loss)\nI1214 03:11:29.930799 20613 sgd_solver.cpp:174] Iteration 73100, lr = 2.193\nI1214 03:11:29.943476 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.275199\nI1214 03:13:48.103443 20613 solver.cpp:337] Iteration 73200, Testing net (#0)\nI1214 03:15:08.861117 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69632\nI1214 03:15:08.861488 20613 solver.cpp:404]     Test net output #1: loss = 1.11358 (* 1 = 1.11358 loss)\nI1214 03:15:10.172791 20613 solver.cpp:228] Iteration 73200, loss = 0.180417\nI1214 03:15:10.172827 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 03:15:10.172842 20613 solver.cpp:244]     Train net output #1: loss = 0.180418 (* 1 = 0.180418 loss)\nI1214 03:15:10.271347 20613 sgd_solver.cpp:174] Iteration 73200, lr = 2.196\nI1214 03:15:10.284055 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.32366\nI1214 03:17:28.414777 20613 solver.cpp:337] Iteration 73300, Testing net (#0)\nI1214 03:18:49.158071 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68076\nI1214 03:18:49.158468 20613 solver.cpp:404]     Test net output #1: loss = 1.3729 (* 1 = 1.3729 loss)\nI1214 03:18:50.468243 20613 solver.cpp:228] Iteration 73300, loss = 0.156117\nI1214 03:18:50.468286 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 03:18:50.468302 20613 solver.cpp:244]     Train net output #1: loss = 0.156118 (* 1 = 0.156118 loss)\nI1214 03:18:50.563318 20613 sgd_solver.cpp:174] Iteration 73300, lr = 2.199\nI1214 03:18:50.576002 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.293717\nI1214 03:21:08.734437 20613 solver.cpp:337] Iteration 73400, Testing net (#0)\nI1214 03:22:29.500571 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76584\nI1214 03:22:29.500957 20613 solver.cpp:404]     Test net output #1: loss = 0.840344 (* 1 = 0.840344 loss)\nI1214 03:22:30.812289 20613 solver.cpp:228] Iteration 73400, loss = 0.172241\nI1214 03:22:30.812331 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 03:22:30.812347 20613 solver.cpp:244]     Train net output #1: loss = 0.172241 (* 1 = 0.172241 loss)\nI1214 03:22:30.912596 20613 sgd_solver.cpp:174] Iteration 73400, lr = 2.202\nI1214 03:22:30.925302 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.318928\nI1214 03:24:49.113849 20613 solver.cpp:337] Iteration 73500, Testing net (#0)\nI1214 03:26:09.773111 20613 solver.cpp:404]     Test net output #0: accuracy = 0.651\nI1214 03:26:09.773510 20613 solver.cpp:404]     Test net output #1: loss = 1.50414 (* 1 = 1.50414 loss)\nI1214 03:26:11.084774 20613 solver.cpp:228] Iteration 73500, loss = 0.220305\nI1214 03:26:11.084817 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 03:26:11.084833 20613 solver.cpp:244]     Train net output #1: loss = 0.220305 (* 1 = 0.220305 loss)\nI1214 03:26:11.183027 20613 sgd_solver.cpp:174] Iteration 73500, lr = 2.205\nI1214 03:26:11.195741 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.327823\nI1214 03:28:29.295764 20613 solver.cpp:337] Iteration 73600, Testing net (#0)\nI1214 03:29:49.955906 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6916\nI1214 03:29:49.956313 20613 solver.cpp:404]     Test net output #1: loss = 1.06391 (* 1 = 1.06391 loss)\nI1214 03:29:51.267470 20613 solver.cpp:228] Iteration 73600, loss = 0.212406\nI1214 03:29:51.267506 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 03:29:51.267521 20613 solver.cpp:244]     Train net output #1: loss = 0.212406 (* 1 = 0.212406 loss)\nI1214 03:29:51.360374 20613 sgd_solver.cpp:174] Iteration 73600, lr = 2.208\nI1214 03:29:51.373138 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.295904\nI1214 03:32:09.470302 20613 solver.cpp:337] Iteration 73700, Testing net (#0)\nI1214 03:33:30.121968 20613 solver.cpp:404]     Test net output #0: accuracy = 0.694\nI1214 03:33:30.122376 20613 solver.cpp:404]     Test net output #1: loss = 1.11517 (* 1 = 1.11517 loss)\nI1214 03:33:31.433625 20613 solver.cpp:228] Iteration 73700, loss = 0.188143\nI1214 03:33:31.433668 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 03:33:31.433689 20613 solver.cpp:244]     Train net output #1: loss = 0.188144 (* 1 = 0.188144 loss)\nI1214 03:33:31.531878 20613 sgd_solver.cpp:174] Iteration 73700, lr = 2.211\nI1214 03:33:31.544644 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.292632\nI1214 03:35:49.669924 20613 solver.cpp:337] Iteration 73800, Testing net (#0)\nI1214 03:37:10.318624 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65832\nI1214 03:37:10.318992 20613 solver.cpp:404]     Test net output #1: loss = 1.55057 (* 1 = 1.55057 loss)\nI1214 03:37:11.630712 20613 solver.cpp:228] Iteration 73800, loss = 0.391877\nI1214 03:37:11.630754 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 03:37:11.630770 20613 solver.cpp:244]     Train net output #1: loss = 0.391877 (* 1 = 0.391877 loss)\nI1214 03:37:11.726737 20613 sgd_solver.cpp:174] Iteration 73800, lr = 2.214\nI1214 03:37:11.739514 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.338013\nI1214 03:39:29.767168 20613 solver.cpp:337] Iteration 73900, Testing net (#0)\nI1214 03:40:50.417549 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68844\nI1214 03:40:50.417917 20613 solver.cpp:404]     Test net output #1: loss = 1.16747 (* 1 = 1.16747 loss)\nI1214 03:40:51.726943 20613 solver.cpp:228] Iteration 73900, loss = 0.201359\nI1214 03:40:51.726977 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 03:40:51.726992 20613 solver.cpp:244]     Train net output #1: loss = 0.20136 (* 1 = 0.20136 loss)\nI1214 03:40:51.824537 20613 sgd_solver.cpp:174] Iteration 73900, lr = 2.217\nI1214 03:40:51.837359 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.293291\nI1214 03:43:09.723644 20613 solver.cpp:337] Iteration 74000, Testing net (#0)\nI1214 03:44:30.378108 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76308\nI1214 03:44:30.378506 20613 solver.cpp:404]     Test net output #1: loss = 0.879507 (* 1 = 0.879507 loss)\nI1214 03:44:31.686892 20613 solver.cpp:228] Iteration 74000, loss = 0.185539\nI1214 03:44:31.686933 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 03:44:31.686950 20613 solver.cpp:244]     Train net output #1: loss = 0.185539 (* 1 = 0.185539 loss)\nI1214 03:44:31.781862 20613 sgd_solver.cpp:174] Iteration 74000, lr = 2.22\nI1214 03:44:31.794643 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.319035\nI1214 03:46:49.740782 20613 solver.cpp:337] Iteration 74100, Testing net (#0)\nI1214 03:48:10.387363 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77652\nI1214 03:48:10.387770 20613 solver.cpp:404]     Test net output #1: loss = 0.82083 (* 1 = 0.82083 loss)\nI1214 03:48:11.696184 20613 solver.cpp:228] Iteration 74100, loss = 0.162731\nI1214 03:48:11.696226 20613 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1214 03:48:11.696243 20613 solver.cpp:244]     Train net output #1: loss = 0.162731 (* 1 = 0.162731 loss)\nI1214 03:48:11.791364 20613 sgd_solver.cpp:174] Iteration 74100, lr = 2.223\nI1214 03:48:11.804198 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.28395\nI1214 03:50:29.686662 20613 solver.cpp:337] Iteration 74200, Testing net (#0)\nI1214 03:51:50.324182 20613 solver.cpp:404]     Test net output #0: accuracy = 0.58016\nI1214 03:51:50.324579 20613 solver.cpp:404]     Test net output #1: loss = 1.79338 (* 1 = 1.79338 loss)\nI1214 03:51:51.632308 20613 solver.cpp:228] Iteration 74200, loss = 0.157745\nI1214 03:51:51.632349 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 03:51:51.632365 20613 solver.cpp:244]     Train net output #1: loss = 0.157745 (* 1 = 0.157745 loss)\nI1214 03:51:51.724355 20613 sgd_solver.cpp:174] Iteration 74200, lr = 2.226\nI1214 03:51:51.736994 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.354671\nI1214 03:54:09.653136 20613 solver.cpp:337] Iteration 74300, Testing net (#0)\nI1214 03:55:30.291453 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67924\nI1214 03:55:30.291838 20613 solver.cpp:404]     Test net output #1: loss = 1.27701 (* 1 = 1.27701 loss)\nI1214 03:55:31.600378 20613 solver.cpp:228] Iteration 74300, loss = 0.329795\nI1214 03:55:31.600419 20613 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1214 03:55:31.600435 20613 solver.cpp:244]     Train net output #1: loss = 0.329796 (* 1 = 0.329796 loss)\nI1214 03:55:31.697679 20613 sgd_solver.cpp:174] Iteration 74300, lr = 2.229\nI1214 03:55:31.710443 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.347594\nI1214 03:57:49.683279 20613 solver.cpp:337] Iteration 74400, Testing net (#0)\nI1214 03:59:10.426345 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74904\nI1214 03:59:10.426751 20613 solver.cpp:404]     Test net output #1: loss = 0.880817 (* 1 = 0.880817 loss)\nI1214 03:59:11.735105 20613 solver.cpp:228] Iteration 74400, loss = 0.183644\nI1214 03:59:11.735146 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 03:59:11.735162 20613 solver.cpp:244]     Train net output #1: loss = 0.183645 (* 1 = 0.183645 loss)\nI1214 03:59:11.828953 20613 sgd_solver.cpp:174] Iteration 74400, lr = 2.232\nI1214 03:59:11.841689 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.30195\nI1214 04:01:29.663975 20613 solver.cpp:337] Iteration 74500, Testing net (#0)\nI1214 04:02:50.410594 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72916\nI1214 04:02:50.410980 20613 solver.cpp:404]     Test net output #1: loss = 0.968568 (* 1 = 0.968568 loss)\nI1214 04:02:51.718719 20613 solver.cpp:228] Iteration 74500, loss = 0.195468\nI1214 04:02:51.718760 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 04:02:51.718776 20613 solver.cpp:244]     Train net output #1: loss = 0.195468 (* 1 = 0.195468 loss)\nI1214 04:02:51.807948 20613 sgd_solver.cpp:174] Iteration 74500, lr = 2.235\nI1214 04:02:51.820745 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.271977\nI1214 04:05:09.707269 20613 solver.cpp:337] Iteration 74600, Testing net (#0)\nI1214 04:06:30.453359 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73012\nI1214 04:06:30.453747 20613 solver.cpp:404]     Test net output #1: loss = 1.16021 (* 1 = 1.16021 loss)\nI1214 04:06:31.761857 20613 solver.cpp:228] Iteration 74600, loss = 0.207998\nI1214 04:06:31.761895 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 04:06:31.761912 20613 solver.cpp:244]     Train net output #1: loss = 0.207998 (* 1 = 0.207998 loss)\nI1214 04:06:31.856194 20613 sgd_solver.cpp:174] Iteration 74600, lr = 2.238\nI1214 04:06:31.868907 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.300112\nI1214 04:08:49.775786 20613 solver.cpp:337] Iteration 74700, Testing net (#0)\nI1214 04:10:10.521930 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67428\nI1214 04:10:10.522323 20613 solver.cpp:404]     Test net output #1: loss = 1.26658 (* 1 = 1.26658 loss)\nI1214 04:10:11.830281 20613 solver.cpp:228] Iteration 74700, loss = 0.147496\nI1214 04:10:11.830322 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 04:10:11.830337 20613 solver.cpp:244]     Train net output #1: loss = 0.147496 (* 1 = 0.147496 loss)\nI1214 04:10:11.926684 20613 sgd_solver.cpp:174] Iteration 74700, lr = 2.241\nI1214 04:10:11.939447 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.287977\nI1214 04:12:29.820929 20613 solver.cpp:337] Iteration 74800, Testing net (#0)\nI1214 04:13:50.567190 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78704\nI1214 04:13:50.567580 20613 solver.cpp:404]     Test net output #1: loss = 0.699239 (* 1 = 0.699239 loss)\nI1214 04:13:51.875649 20613 solver.cpp:228] Iteration 74800, loss = 0.210115\nI1214 04:13:51.875691 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 04:13:51.875713 20613 solver.cpp:244]     Train net output #1: loss = 0.210115 (* 1 = 0.210115 loss)\nI1214 04:13:51.973644 20613 sgd_solver.cpp:174] Iteration 74800, lr = 2.244\nI1214 04:13:51.986454 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290113\nI1214 04:16:09.867516 20613 solver.cpp:337] Iteration 74900, Testing net (#0)\nI1214 04:17:30.611503 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74592\nI1214 04:17:30.611924 20613 solver.cpp:404]     Test net output #1: loss = 1.06395 (* 1 = 1.06395 loss)\nI1214 04:17:31.920137 20613 solver.cpp:228] Iteration 74900, loss = 0.195202\nI1214 04:17:31.920177 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 04:17:31.920194 20613 solver.cpp:244]     Train net output #1: loss = 0.195203 (* 1 = 0.195203 loss)\nI1214 04:17:32.013638 20613 sgd_solver.cpp:174] Iteration 74900, lr = 2.247\nI1214 04:17:32.026407 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.277004\nI1214 04:19:49.867385 20613 solver.cpp:337] Iteration 75000, Testing net (#0)\nI1214 04:21:10.613911 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7196\nI1214 04:21:10.614313 20613 solver.cpp:404]     Test net output #1: loss = 1.20161 (* 1 = 1.20161 loss)\nI1214 04:21:11.922076 20613 solver.cpp:228] Iteration 75000, loss = 0.22606\nI1214 04:21:11.922117 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 04:21:11.922134 20613 solver.cpp:244]     Train net output #1: loss = 0.22606 (* 1 = 0.22606 loss)\nI1214 04:21:12.017491 20613 sgd_solver.cpp:174] Iteration 75000, lr = 2.25\nI1214 04:21:12.030061 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.415432\nI1214 04:23:29.907513 20613 solver.cpp:337] Iteration 75100, Testing net (#0)\nI1214 04:24:50.650071 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80708\nI1214 04:24:50.650471 20613 solver.cpp:404]     Test net output #1: loss = 0.672529 (* 1 = 0.672529 loss)\nI1214 04:24:51.958541 20613 solver.cpp:228] Iteration 75100, loss = 0.301972\nI1214 04:24:51.958581 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 04:24:51.958598 20613 solver.cpp:244]     Train net output #1: loss = 0.301973 (* 1 = 0.301973 loss)\nI1214 04:24:52.054821 20613 sgd_solver.cpp:174] Iteration 75100, lr = 2.253\nI1214 04:24:52.067553 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.297098\nI1214 04:27:09.893692 20613 solver.cpp:337] Iteration 75200, Testing net (#0)\nI1214 04:28:30.651317 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73416\nI1214 04:28:30.651723 20613 solver.cpp:404]     Test net output #1: loss = 0.831465 (* 1 = 0.831465 loss)\nI1214 04:28:31.959653 20613 solver.cpp:228] Iteration 75200, loss = 0.151327\nI1214 04:28:31.959686 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 04:28:31.959707 20613 solver.cpp:244]     Train net output #1: loss = 0.151328 (* 1 = 0.151328 loss)\nI1214 04:28:32.051370 20613 sgd_solver.cpp:174] Iteration 75200, lr = 2.256\nI1214 04:28:32.064136 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.260483\nI1214 04:30:49.972641 20613 solver.cpp:337] Iteration 75300, Testing net (#0)\nI1214 04:32:10.729648 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73648\nI1214 04:32:10.730057 20613 solver.cpp:404]     Test net output #1: loss = 0.945757 (* 1 = 0.945757 loss)\nI1214 04:32:12.038033 20613 solver.cpp:228] Iteration 75300, loss = 0.134756\nI1214 04:32:12.038076 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1214 04:32:12.038092 20613 solver.cpp:244]     Train net output #1: loss = 0.134756 (* 1 = 0.134756 loss)\nI1214 04:32:12.129828 20613 sgd_solver.cpp:174] Iteration 75300, lr = 2.259\nI1214 04:32:12.142647 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307927\nI1214 04:34:30.002326 20613 solver.cpp:337] Iteration 75400, Testing net (#0)\nI1214 04:35:50.793737 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73552\nI1214 04:35:50.794137 20613 solver.cpp:404]     Test net output #1: loss = 0.861889 (* 1 = 0.861889 loss)\nI1214 04:35:52.102304 20613 solver.cpp:228] Iteration 75400, loss = 0.284888\nI1214 04:35:52.102346 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 04:35:52.102363 20613 solver.cpp:244]     Train net output #1: loss = 0.284889 (* 1 = 0.284889 loss)\nI1214 04:35:52.197625 20613 sgd_solver.cpp:174] Iteration 75400, lr = 2.262\nI1214 04:35:52.210206 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.380459\nI1214 04:38:10.131849 20613 solver.cpp:337] Iteration 75500, Testing net (#0)\nI1214 04:39:30.926003 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77508\nI1214 04:39:30.926396 20613 solver.cpp:404]     Test net output #1: loss = 0.747122 (* 1 = 0.747122 loss)\nI1214 04:39:32.234562 20613 solver.cpp:228] Iteration 75500, loss = 0.195039\nI1214 04:39:32.234606 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 04:39:32.234622 20613 solver.cpp:244]     Train net output #1: loss = 0.195039 (* 1 = 0.195039 loss)\nI1214 04:39:32.335083 20613 sgd_solver.cpp:174] Iteration 75500, lr = 2.265\nI1214 04:39:32.347767 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.29637\nI1214 04:41:50.188997 20613 solver.cpp:337] Iteration 75600, Testing net (#0)\nI1214 04:43:10.982965 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69644\nI1214 04:43:10.983347 20613 solver.cpp:404]     Test net output #1: loss = 1.15662 (* 1 = 1.15662 loss)\nI1214 04:43:12.292264 20613 solver.cpp:228] Iteration 75600, loss = 0.25664\nI1214 04:43:12.292309 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 04:43:12.292325 20613 solver.cpp:244]     Train net output #1: loss = 0.256641 (* 1 = 0.256641 loss)\nI1214 04:43:12.383283 20613 sgd_solver.cpp:174] Iteration 75600, lr = 2.268\nI1214 04:43:12.396028 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.2831\nI1214 04:45:30.284375 20613 solver.cpp:337] Iteration 75700, Testing net (#0)\nI1214 04:46:51.066536 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75772\nI1214 04:46:51.066932 20613 solver.cpp:404]     Test net output #1: loss = 0.743968 (* 1 = 0.743968 loss)\nI1214 04:46:52.375141 20613 solver.cpp:228] Iteration 75700, loss = 0.216657\nI1214 04:46:52.375185 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 04:46:52.375200 20613 solver.cpp:244]     Train net output #1: loss = 0.216658 (* 1 = 0.216658 loss)\nI1214 04:46:52.470502 20613 sgd_solver.cpp:174] Iteration 75700, lr = 2.271\nI1214 04:46:52.483327 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.314574\nI1214 04:49:10.355051 20613 solver.cpp:337] Iteration 75800, Testing net (#0)\nI1214 04:50:31.147814 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77944\nI1214 04:50:31.148200 20613 solver.cpp:404]     Test net output #1: loss = 0.717857 (* 1 = 0.717857 loss)\nI1214 04:50:32.456269 20613 solver.cpp:228] Iteration 75800, loss = 0.212857\nI1214 04:50:32.456312 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 04:50:32.456328 20613 solver.cpp:244]     Train net output #1: loss = 0.212857 (* 1 = 0.212857 loss)\nI1214 04:50:32.551645 20613 sgd_solver.cpp:174] Iteration 75800, lr = 2.274\nI1214 04:50:32.564440 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307472\nI1214 04:52:50.516010 20613 solver.cpp:337] Iteration 75900, Testing net (#0)\nI1214 04:54:11.311451 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78396\nI1214 04:54:11.311858 20613 solver.cpp:404]     Test net output #1: loss = 0.777525 (* 1 = 0.777525 loss)\nI1214 04:54:12.620820 20613 solver.cpp:228] Iteration 75900, loss = 0.177366\nI1214 04:54:12.620864 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 04:54:12.620880 20613 solver.cpp:244]     Train net output #1: loss = 0.177366 (* 1 = 0.177366 loss)\nI1214 04:54:12.712795 20613 sgd_solver.cpp:174] Iteration 75900, lr = 2.277\nI1214 04:54:12.725575 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.269185\nI1214 04:56:30.643280 20613 solver.cpp:337] Iteration 76000, Testing net (#0)\nI1214 04:57:51.431677 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68604\nI1214 04:57:51.432051 20613 solver.cpp:404]     Test net output #1: loss = 1.17527 (* 1 = 1.17527 loss)\nI1214 04:57:52.740382 20613 solver.cpp:228] Iteration 76000, loss = 0.248301\nI1214 04:57:52.740417 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 04:57:52.740432 20613 solver.cpp:244]     Train net output #1: loss = 0.248301 (* 1 = 0.248301 loss)\nI1214 04:57:52.829895 20613 sgd_solver.cpp:174] Iteration 76000, lr = 2.28\nI1214 04:57:52.842730 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.302271\nI1214 05:00:10.704352 20613 solver.cpp:337] Iteration 76100, Testing net (#0)\nI1214 05:01:31.498842 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74336\nI1214 05:01:31.499243 20613 solver.cpp:404]     Test net output #1: loss = 0.824463 (* 1 = 0.824463 loss)\nI1214 05:01:32.807538 20613 solver.cpp:228] Iteration 76100, loss = 0.224349\nI1214 05:01:32.807580 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 05:01:32.807597 20613 solver.cpp:244]     Train net output #1: loss = 0.22435 (* 1 = 0.22435 loss)\nI1214 05:01:32.899449 20613 sgd_solver.cpp:174] Iteration 76100, lr = 2.283\nI1214 05:01:32.912132 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.310578\nI1214 05:03:50.821748 20613 solver.cpp:337] Iteration 76200, Testing net (#0)\nI1214 05:05:11.618841 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7742\nI1214 05:05:11.619238 20613 solver.cpp:404]     Test net output #1: loss = 0.829181 (* 1 = 0.829181 loss)\nI1214 05:05:12.928179 20613 solver.cpp:228] Iteration 76200, loss = 0.220307\nI1214 05:05:12.928221 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 05:05:12.928238 20613 solver.cpp:244]     Train net output #1: loss = 0.220307 (* 1 = 0.220307 loss)\nI1214 05:05:13.018962 20613 sgd_solver.cpp:174] Iteration 76200, lr = 2.286\nI1214 05:05:13.031730 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.323719\nI1214 05:07:30.871170 20613 solver.cpp:337] Iteration 76300, Testing net (#0)\nI1214 05:08:51.648963 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68244\nI1214 05:08:51.649281 20613 solver.cpp:404]     Test net output #1: loss = 1.58036 (* 1 = 1.58036 loss)\nI1214 05:08:52.959010 20613 solver.cpp:228] Iteration 76300, loss = 0.124624\nI1214 05:08:52.959056 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 05:08:52.959074 20613 solver.cpp:244]     Train net output #1: loss = 0.124625 (* 1 = 0.124625 loss)\nI1214 05:08:53.051267 20613 sgd_solver.cpp:174] Iteration 76300, lr = 2.289\nI1214 05:08:53.064026 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290853\nI1214 05:11:10.840751 20613 solver.cpp:337] Iteration 76400, Testing net (#0)\nI1214 05:12:31.628593 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82248\nI1214 05:12:31.628962 20613 solver.cpp:404]     Test net output #1: loss = 0.611577 (* 1 = 0.611577 loss)\nI1214 05:12:32.938453 20613 solver.cpp:228] Iteration 76400, loss = 0.235791\nI1214 05:12:32.938496 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 05:12:32.938513 20613 solver.cpp:244]     Train net output #1: loss = 0.235791 (* 1 = 0.235791 loss)\nI1214 05:12:33.026449 20613 sgd_solver.cpp:174] Iteration 76400, lr = 2.292\nI1214 05:12:33.039047 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.30561\nI1214 05:14:50.862316 20613 solver.cpp:337] Iteration 76500, Testing net (#0)\nI1214 05:16:11.652058 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7404\nI1214 05:16:11.652426 20613 solver.cpp:404]     Test net output #1: loss = 0.857025 (* 1 = 0.857025 loss)\nI1214 05:16:12.961733 20613 solver.cpp:228] Iteration 76500, loss = 0.168278\nI1214 05:16:12.961776 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 05:16:12.961792 20613 solver.cpp:244]     Train net output #1: loss = 0.168279 (* 1 = 0.168279 loss)\nI1214 05:16:13.054211 20613 sgd_solver.cpp:174] Iteration 76500, lr = 2.295\nI1214 05:16:13.066962 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.339338\nI1214 05:18:30.856917 20613 solver.cpp:337] Iteration 76600, Testing net (#0)\nI1214 05:19:51.651159 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7744\nI1214 05:19:51.651474 20613 solver.cpp:404]     Test net output #1: loss = 0.7432 (* 1 = 0.7432 loss)\nI1214 05:19:52.960546 20613 solver.cpp:228] Iteration 76600, loss = 0.185549\nI1214 05:19:52.960592 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 05:19:52.960609 20613 solver.cpp:244]     Train net output #1: loss = 0.18555 (* 1 = 0.18555 loss)\nI1214 05:19:53.058195 20613 sgd_solver.cpp:174] Iteration 76600, lr = 2.298\nI1214 05:19:53.070972 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.346246\nI1214 05:22:10.850227 20613 solver.cpp:337] Iteration 76700, Testing net (#0)\nI1214 05:23:31.628110 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70856\nI1214 05:23:31.628428 20613 solver.cpp:404]     Test net output #1: loss = 1.24086 (* 1 = 1.24086 loss)\nI1214 05:23:32.937958 20613 solver.cpp:228] Iteration 76700, loss = 0.161138\nI1214 05:23:32.937993 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 05:23:32.938009 20613 solver.cpp:244]     Train net output #1: loss = 0.161139 (* 1 = 0.161139 loss)\nI1214 05:23:33.031241 20613 sgd_solver.cpp:174] Iteration 76700, lr = 2.301\nI1214 05:23:33.043856 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.314085\nI1214 05:25:50.886000 20613 solver.cpp:337] Iteration 76800, Testing net (#0)\nI1214 05:27:11.667804 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80716\nI1214 05:27:11.668113 20613 solver.cpp:404]     Test net output #1: loss = 0.650776 (* 1 = 0.650776 loss)\nI1214 05:27:12.977187 20613 solver.cpp:228] Iteration 76800, loss = 0.160627\nI1214 05:27:12.977231 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 05:27:12.977247 20613 solver.cpp:244]     Train net output #1: loss = 0.160627 (* 1 = 0.160627 loss)\nI1214 05:27:13.077301 20613 sgd_solver.cpp:174] Iteration 76800, lr = 2.304\nI1214 05:27:13.090106 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.309278\nI1214 05:29:31.303385 20613 solver.cpp:337] Iteration 76900, Testing net (#0)\nI1214 05:30:53.110407 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78476\nI1214 05:30:53.110831 20613 solver.cpp:404]     Test net output #1: loss = 0.724978 (* 1 = 0.724978 loss)\nI1214 05:30:54.424146 20613 solver.cpp:228] Iteration 76900, loss = 0.259383\nI1214 05:30:54.424194 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 05:30:54.424211 20613 solver.cpp:244]     Train net output #1: loss = 0.259384 (* 1 = 0.259384 loss)\nI1214 05:30:54.518743 20613 sgd_solver.cpp:174] Iteration 76900, lr = 2.307\nI1214 05:30:54.532233 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.323755\nI1214 05:33:12.770406 20613 solver.cpp:337] Iteration 77000, Testing net (#0)\nI1214 05:34:34.580673 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74708\nI1214 05:34:34.581125 20613 solver.cpp:404]     Test net output #1: loss = 0.867394 (* 1 = 0.867394 loss)\nI1214 05:34:35.894574 20613 solver.cpp:228] Iteration 77000, loss = 0.201269\nI1214 05:34:35.894623 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 05:34:35.894640 20613 solver.cpp:244]     Train net output #1: loss = 0.201269 (* 1 = 0.201269 loss)\nI1214 05:34:35.984437 20613 sgd_solver.cpp:174] Iteration 77000, lr = 2.31\nI1214 05:34:35.998306 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.303407\nI1214 05:36:54.270107 20613 solver.cpp:337] Iteration 77100, Testing net (#0)\nI1214 05:38:16.082195 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69252\nI1214 05:38:16.082639 20613 solver.cpp:404]     Test net output #1: loss = 1.18777 (* 1 = 1.18777 loss)\nI1214 05:38:17.395582 20613 solver.cpp:228] Iteration 77100, loss = 0.258223\nI1214 05:38:17.395632 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 05:38:17.395650 20613 solver.cpp:244]     Train net output #1: loss = 0.258224 (* 1 = 0.258224 loss)\nI1214 05:38:17.487426 20613 sgd_solver.cpp:174] Iteration 77100, lr = 2.313\nI1214 05:38:17.501325 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.292736\nI1214 05:40:35.801348 20613 solver.cpp:337] Iteration 77200, Testing net (#0)\nI1214 05:41:57.623054 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77552\nI1214 05:41:57.623504 20613 solver.cpp:404]     Test net output #1: loss = 0.824083 (* 1 = 0.824083 loss)\nI1214 05:41:58.935948 20613 solver.cpp:228] Iteration 77200, loss = 0.188853\nI1214 05:41:58.935997 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 05:41:58.936013 20613 solver.cpp:244]     Train net output #1: loss = 0.188853 (* 1 = 0.188853 loss)\nI1214 05:41:59.024858 20613 sgd_solver.cpp:174] Iteration 77200, lr = 2.316\nI1214 05:41:59.038746 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.30103\nI1214 05:44:17.240432 20613 solver.cpp:337] Iteration 77300, Testing net (#0)\nI1214 05:45:39.063853 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7542\nI1214 05:45:39.064291 20613 solver.cpp:404]     Test net output #1: loss = 0.911018 (* 1 = 0.911018 loss)\nI1214 05:45:40.377317 20613 solver.cpp:228] Iteration 77300, loss = 0.24428\nI1214 05:45:40.377372 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 05:45:40.377390 20613 solver.cpp:244]     Train net output #1: loss = 0.244281 (* 1 = 0.244281 loss)\nI1214 05:45:40.465375 20613 sgd_solver.cpp:174] Iteration 77300, lr = 2.319\nI1214 05:45:40.479171 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305041\nI1214 05:47:58.787571 20613 solver.cpp:337] Iteration 77400, Testing net (#0)\nI1214 05:49:20.596565 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79508\nI1214 05:49:20.597046 20613 solver.cpp:404]     Test net output #1: loss = 0.667332 (* 1 = 0.667332 loss)\nI1214 05:49:21.909996 20613 solver.cpp:228] Iteration 77400, loss = 0.192603\nI1214 05:49:21.910051 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 05:49:21.910069 20613 solver.cpp:244]     Train net output #1: loss = 0.192603 (* 1 = 0.192603 loss)\nI1214 05:49:22.001305 20613 sgd_solver.cpp:174] Iteration 77400, lr = 2.322\nI1214 05:49:22.015141 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.294132\nI1214 05:51:40.252892 20613 solver.cpp:337] Iteration 77500, Testing net (#0)\nI1214 05:53:02.047283 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67152\nI1214 05:53:02.047688 20613 solver.cpp:404]     Test net output #1: loss = 1.46284 (* 1 = 1.46284 loss)\nI1214 05:53:03.359985 20613 solver.cpp:228] Iteration 77500, loss = 0.254758\nI1214 05:53:03.360041 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 05:53:03.360059 20613 solver.cpp:244]     Train net output #1: loss = 0.254759 (* 1 = 0.254759 loss)\nI1214 05:53:03.453642 20613 sgd_solver.cpp:174] Iteration 77500, lr = 2.325\nI1214 05:53:03.467630 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.320164\nI1214 05:55:21.758201 20613 solver.cpp:337] Iteration 77600, Testing net (#0)\nI1214 05:56:43.522136 20613 solver.cpp:404]     Test net output #0: accuracy = 0.61304\nI1214 05:56:43.522575 20613 solver.cpp:404]     Test net output #1: loss = 2.00816 (* 1 = 2.00816 loss)\nI1214 05:56:44.835551 20613 solver.cpp:228] Iteration 77600, loss = 0.231999\nI1214 05:56:44.835605 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 05:56:44.835623 20613 solver.cpp:244]     Train net output #1: loss = 0.231999 (* 1 = 0.231999 loss)\nI1214 05:56:44.936451 20613 sgd_solver.cpp:174] Iteration 77600, lr = 2.328\nI1214 05:56:44.950295 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.287696\nI1214 05:59:03.314512 20613 solver.cpp:337] Iteration 77700, Testing net (#0)\nI1214 06:00:25.228783 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78268\nI1214 06:00:25.229218 20613 solver.cpp:404]     Test net output #1: loss = 0.778079 (* 1 = 0.778079 loss)\nI1214 06:00:26.542758 20613 solver.cpp:228] Iteration 77700, loss = 0.0953449\nI1214 06:00:26.542815 20613 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1214 06:00:26.542842 20613 solver.cpp:244]     Train net output #1: loss = 0.0953453 (* 1 = 0.0953453 loss)\nI1214 06:00:26.636571 20613 sgd_solver.cpp:174] Iteration 77700, lr = 2.331\nI1214 06:00:26.650599 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.278842\nI1214 06:02:44.995419 20613 solver.cpp:337] Iteration 77800, Testing net (#0)\nI1214 06:04:06.937561 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80408\nI1214 06:04:06.937985 20613 solver.cpp:404]     Test net output #1: loss = 0.708239 (* 1 = 0.708239 loss)\nI1214 06:04:08.257022 20613 solver.cpp:228] Iteration 77800, loss = 0.196337\nI1214 06:04:08.257072 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 06:04:08.257097 20613 solver.cpp:244]     Train net output #1: loss = 0.196337 (* 1 = 0.196337 loss)\nI1214 06:04:08.348578 20613 sgd_solver.cpp:174] Iteration 77800, lr = 2.334\nI1214 06:04:08.362473 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307064\nI1214 06:06:26.701375 20613 solver.cpp:337] Iteration 77900, Testing net (#0)\nI1214 06:07:48.545773 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77892\nI1214 06:07:48.546227 20613 solver.cpp:404]     Test net output #1: loss = 0.781522 (* 1 = 0.781522 loss)\nI1214 06:07:49.859439 20613 solver.cpp:228] Iteration 77900, loss = 0.169015\nI1214 06:07:49.859491 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 06:07:49.859516 20613 solver.cpp:244]     Train net output #1: loss = 0.169016 (* 1 = 0.169016 loss)\nI1214 06:07:49.958207 20613 sgd_solver.cpp:174] Iteration 77900, lr = 2.337\nI1214 06:07:49.972028 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.303564\nI1214 06:10:08.295006 20613 solver.cpp:337] Iteration 78000, Testing net (#0)\nI1214 06:11:30.039216 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7186\nI1214 06:11:30.039610 20613 solver.cpp:404]     Test net output #1: loss = 0.941678 (* 1 = 0.941678 loss)\nI1214 06:11:31.353137 20613 solver.cpp:228] Iteration 78000, loss = 0.201498\nI1214 06:11:31.353189 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 06:11:31.353206 20613 solver.cpp:244]     Train net output #1: loss = 0.201498 (* 1 = 0.201498 loss)\nI1214 06:11:31.453217 20613 sgd_solver.cpp:174] Iteration 78000, lr = 2.34\nI1214 06:11:31.467082 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.294149\nI1214 06:13:49.872748 20613 solver.cpp:337] Iteration 78100, Testing net (#0)\nI1214 06:15:11.657014 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78408\nI1214 06:15:11.657462 20613 solver.cpp:404]     Test net output #1: loss = 0.72134 (* 1 = 0.72134 loss)\nI1214 06:15:12.969574 20613 solver.cpp:228] Iteration 78100, loss = 0.12315\nI1214 06:15:12.969629 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 06:15:12.969645 20613 solver.cpp:244]     Train net output #1: loss = 0.12315 (* 1 = 0.12315 loss)\nI1214 06:15:13.064524 20613 sgd_solver.cpp:174] Iteration 78100, lr = 2.343\nI1214 06:15:13.078191 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.297289\nI1214 06:17:31.441067 20613 solver.cpp:337] Iteration 78200, Testing net (#0)\nI1214 06:18:53.222764 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73456\nI1214 06:18:53.223181 20613 solver.cpp:404]     Test net output #1: loss = 0.903955 (* 1 = 0.903955 loss)\nI1214 06:18:54.535384 20613 solver.cpp:228] Iteration 78200, loss = 0.187943\nI1214 06:18:54.535436 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 06:18:54.535454 20613 solver.cpp:244]     Train net output #1: loss = 0.187943 (* 1 = 0.187943 loss)\nI1214 06:18:54.630739 20613 sgd_solver.cpp:174] Iteration 78200, lr = 2.346\nI1214 06:18:54.644532 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290655\nI1214 06:21:12.997304 20613 solver.cpp:337] Iteration 78300, Testing net (#0)\nI1214 06:22:34.764042 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8174\nI1214 06:22:34.764482 20613 solver.cpp:404]     Test net output #1: loss = 0.590177 (* 1 = 0.590177 loss)\nI1214 06:22:36.076618 20613 solver.cpp:228] Iteration 78300, loss = 0.213133\nI1214 06:22:36.076669 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 06:22:36.076687 20613 solver.cpp:244]     Train net output #1: loss = 0.213133 (* 1 = 0.213133 loss)\nI1214 06:22:36.173544 20613 sgd_solver.cpp:174] Iteration 78300, lr = 2.349\nI1214 06:22:36.187335 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3019\nI1214 06:24:54.424460 20613 solver.cpp:337] Iteration 78400, Testing net (#0)\nI1214 06:26:16.213618 20613 solver.cpp:404]     Test net output #0: accuracy = 0.61452\nI1214 06:26:16.214042 20613 solver.cpp:404]     Test net output #1: loss = 1.48898 (* 1 = 1.48898 loss)\nI1214 06:26:17.527243 20613 solver.cpp:228] Iteration 78400, loss = 0.212673\nI1214 06:26:17.527295 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 06:26:17.527312 20613 solver.cpp:244]     Train net output #1: loss = 0.212674 (* 1 = 0.212674 loss)\nI1214 06:26:17.621240 20613 sgd_solver.cpp:174] Iteration 78400, lr = 2.352\nI1214 06:26:17.635044 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.274838\nI1214 06:28:35.848331 20613 solver.cpp:337] Iteration 78500, Testing net (#0)\nI1214 06:29:57.630836 20613 solver.cpp:404]     Test net output #0: accuracy = 0.64396\nI1214 06:29:57.631229 20613 solver.cpp:404]     Test net output #1: loss = 1.67975 (* 1 = 1.67975 loss)\nI1214 06:29:58.943809 20613 solver.cpp:228] Iteration 78500, loss = 0.265621\nI1214 06:29:58.943861 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 06:29:58.943877 20613 solver.cpp:244]     Train net output #1: loss = 0.265621 (* 1 = 0.265621 loss)\nI1214 06:29:59.040896 20613 sgd_solver.cpp:174] Iteration 78500, lr = 2.355\nI1214 06:29:59.054608 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321951\nI1214 06:32:17.361130 20613 solver.cpp:337] Iteration 78600, Testing net (#0)\nI1214 06:33:39.157047 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81156\nI1214 06:33:39.157496 20613 solver.cpp:404]     Test net output #1: loss = 0.567978 (* 1 = 0.567978 loss)\nI1214 06:33:40.470780 20613 solver.cpp:228] Iteration 78600, loss = 0.26362\nI1214 06:33:40.470840 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 06:33:40.470857 20613 solver.cpp:244]     Train net output #1: loss = 0.26362 (* 1 = 0.26362 loss)\nI1214 06:33:40.569491 20613 sgd_solver.cpp:174] Iteration 78600, lr = 2.358\nI1214 06:33:40.583395 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.2988\nI1214 06:35:58.860595 20613 solver.cpp:337] Iteration 78700, Testing net (#0)\nI1214 06:37:20.645414 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79404\nI1214 06:37:20.645861 20613 solver.cpp:404]     Test net output #1: loss = 0.701418 (* 1 = 0.701418 loss)\nI1214 06:37:21.958667 20613 solver.cpp:228] Iteration 78700, loss = 0.153756\nI1214 06:37:21.958719 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 06:37:21.958737 20613 solver.cpp:244]     Train net output #1: loss = 0.153756 (* 1 = 0.153756 loss)\nI1214 06:37:22.051992 20613 sgd_solver.cpp:174] Iteration 78700, lr = 2.361\nI1214 06:37:22.065433 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.27563\nI1214 06:39:40.369822 20613 solver.cpp:337] Iteration 78800, Testing net (#0)\nI1214 06:41:02.152241 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68596\nI1214 06:41:02.152675 20613 solver.cpp:404]     Test net output #1: loss = 1.19387 (* 1 = 1.19387 loss)\nI1214 06:41:03.466557 20613 solver.cpp:228] Iteration 78800, loss = 0.239515\nI1214 06:41:03.466609 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 06:41:03.466627 20613 solver.cpp:244]     Train net output #1: loss = 0.239515 (* 1 = 0.239515 loss)\nI1214 06:41:03.554121 20613 sgd_solver.cpp:174] Iteration 78800, lr = 2.364\nI1214 06:41:03.567991 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.338031\nI1214 06:43:21.849551 20613 solver.cpp:337] Iteration 78900, Testing net (#0)\nI1214 06:44:43.643198 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6668\nI1214 06:44:43.643621 20613 solver.cpp:404]     Test net output #1: loss = 1.28956 (* 1 = 1.28956 loss)\nI1214 06:44:44.957365 20613 solver.cpp:228] Iteration 78900, loss = 0.207814\nI1214 06:44:44.957422 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 06:44:44.957440 20613 solver.cpp:244]     Train net output #1: loss = 0.207814 (* 1 = 0.207814 loss)\nI1214 06:44:45.055137 20613 sgd_solver.cpp:174] Iteration 78900, lr = 2.367\nI1214 06:44:45.068927 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.308447\nI1214 06:47:03.331066 20613 solver.cpp:337] Iteration 79000, Testing net (#0)\nI1214 06:48:25.131477 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68272\nI1214 06:48:25.131924 20613 solver.cpp:404]     Test net output #1: loss = 1.0622 (* 1 = 1.0622 loss)\nI1214 06:48:26.445571 20613 solver.cpp:228] Iteration 79000, loss = 0.318271\nI1214 06:48:26.445624 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 06:48:26.445641 20613 solver.cpp:244]     Train net output #1: loss = 0.318271 (* 1 = 0.318271 loss)\nI1214 06:48:26.537206 20613 sgd_solver.cpp:174] Iteration 79000, lr = 2.37\nI1214 06:48:26.551226 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.384781\nI1214 06:50:44.852705 20613 solver.cpp:337] Iteration 79100, Testing net (#0)\nI1214 06:52:06.626245 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75408\nI1214 06:52:06.626672 20613 solver.cpp:404]     Test net output #1: loss = 0.836325 (* 1 = 0.836325 loss)\nI1214 06:52:07.940697 20613 solver.cpp:228] Iteration 79100, loss = 0.1195\nI1214 06:52:07.940755 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 06:52:07.940774 20613 solver.cpp:244]     Train net output #1: loss = 0.119501 (* 1 = 0.119501 loss)\nI1214 06:52:08.035876 20613 sgd_solver.cpp:174] Iteration 79100, lr = 2.373\nI1214 06:52:08.049748 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.283409\nI1214 06:54:26.322026 20613 solver.cpp:337] Iteration 79200, Testing net (#0)\nI1214 06:55:48.107614 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76036\nI1214 06:55:48.108057 20613 solver.cpp:404]     Test net output #1: loss = 0.733874 (* 1 = 0.733874 loss)\nI1214 06:55:49.421525 20613 solver.cpp:228] Iteration 79200, loss = 0.231369\nI1214 06:55:49.421579 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 06:55:49.421597 20613 solver.cpp:244]     Train net output #1: loss = 0.231369 (* 1 = 0.231369 loss)\nI1214 06:55:49.513528 20613 sgd_solver.cpp:174] Iteration 79200, lr = 2.376\nI1214 06:55:49.527336 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.320553\nI1214 06:58:07.764238 20613 solver.cpp:337] Iteration 79300, Testing net (#0)\nI1214 06:59:29.542759 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78652\nI1214 06:59:29.543208 20613 solver.cpp:404]     Test net output #1: loss = 0.757123 (* 1 = 0.757123 loss)\nI1214 06:59:30.855360 20613 solver.cpp:228] Iteration 79300, loss = 0.22408\nI1214 06:59:30.855420 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 06:59:30.855438 20613 solver.cpp:244]     Train net output #1: loss = 0.22408 (* 1 = 0.22408 loss)\nI1214 06:59:30.951251 20613 sgd_solver.cpp:174] Iteration 79300, lr = 2.379\nI1214 06:59:30.965049 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.352041\nI1214 07:01:49.111125 20613 solver.cpp:337] Iteration 79400, Testing net (#0)\nI1214 07:03:09.879238 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73204\nI1214 07:03:09.879628 20613 solver.cpp:404]     Test net output #1: loss = 0.837204 (* 1 = 0.837204 loss)\nI1214 07:03:11.190599 20613 solver.cpp:228] Iteration 79400, loss = 0.266809\nI1214 07:03:11.190644 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 07:03:11.190660 20613 solver.cpp:244]     Train net output #1: loss = 0.266809 (* 1 = 0.266809 loss)\nI1214 07:03:11.282532 20613 sgd_solver.cpp:174] Iteration 79400, lr = 2.382\nI1214 07:03:11.295220 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.314375\nI1214 07:05:29.403861 20613 solver.cpp:337] Iteration 79500, Testing net (#0)\nI1214 07:06:50.171406 20613 solver.cpp:404]     Test net output #0: accuracy = 0.766\nI1214 07:06:50.171783 20613 solver.cpp:404]     Test net output #1: loss = 0.777926 (* 1 = 0.777926 loss)\nI1214 07:06:51.482098 20613 solver.cpp:228] Iteration 79500, loss = 0.167832\nI1214 07:06:51.482141 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 07:06:51.482157 20613 solver.cpp:244]     Train net output #1: loss = 0.167832 (* 1 = 0.167832 loss)\nI1214 07:06:51.578133 20613 sgd_solver.cpp:174] Iteration 79500, lr = 2.385\nI1214 07:06:51.590689 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.353736\nI1214 07:09:09.681094 20613 solver.cpp:337] Iteration 79600, Testing net (#0)\nI1214 07:10:30.458647 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78296\nI1214 07:10:30.459031 20613 solver.cpp:404]     Test net output #1: loss = 0.668427 (* 1 = 0.668427 loss)\nI1214 07:10:31.769903 20613 solver.cpp:228] Iteration 79600, loss = 0.215897\nI1214 07:10:31.769945 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 07:10:31.769963 20613 solver.cpp:244]     Train net output #1: loss = 0.215897 (* 1 = 0.215897 loss)\nI1214 07:10:31.866243 20613 sgd_solver.cpp:174] Iteration 79600, lr = 2.388\nI1214 07:10:31.878968 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.294397\nI1214 07:12:50.034896 20613 solver.cpp:337] Iteration 79700, Testing net (#0)\nI1214 07:14:10.818291 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7516\nI1214 07:14:10.818668 20613 solver.cpp:404]     Test net output #1: loss = 0.855508 (* 1 = 0.855508 loss)\nI1214 07:14:12.128921 20613 solver.cpp:228] Iteration 79700, loss = 0.153212\nI1214 07:14:12.128962 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 07:14:12.128979 20613 solver.cpp:244]     Train net output #1: loss = 0.153212 (* 1 = 0.153212 loss)\nI1214 07:14:12.232941 20613 sgd_solver.cpp:174] Iteration 79700, lr = 2.391\nI1214 07:14:12.245497 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.25462\nI1214 07:16:30.282251 20613 solver.cpp:337] Iteration 79800, Testing net (#0)\nI1214 07:17:51.063496 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75732\nI1214 07:17:51.063824 20613 solver.cpp:404]     Test net output #1: loss = 0.8208 (* 1 = 0.8208 loss)\nI1214 07:17:52.375100 20613 solver.cpp:228] Iteration 79800, loss = 0.205702\nI1214 07:17:52.375142 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 07:17:52.375159 20613 solver.cpp:244]     Train net output #1: loss = 0.205703 (* 1 = 0.205703 loss)\nI1214 07:17:52.474033 20613 sgd_solver.cpp:174] Iteration 79800, lr = 2.394\nI1214 07:17:52.486902 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.308073\nI1214 07:20:10.662395 20613 solver.cpp:337] Iteration 79900, Testing net (#0)\nI1214 07:21:31.441535 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74024\nI1214 07:21:31.441856 20613 solver.cpp:404]     Test net output #1: loss = 1.05517 (* 1 = 1.05517 loss)\nI1214 07:21:32.752979 20613 solver.cpp:228] Iteration 79900, loss = 0.259668\nI1214 07:21:32.753023 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 07:21:32.753041 20613 solver.cpp:244]     Train net output #1: loss = 0.259668 (* 1 = 0.259668 loss)\nI1214 07:21:32.847013 20613 sgd_solver.cpp:174] Iteration 79900, lr = 2.397\nI1214 07:21:32.859755 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.32362\nI1214 07:23:50.910038 20613 solver.cpp:337] Iteration 80000, Testing net (#0)\nI1214 07:25:11.688124 20613 solver.cpp:404]     Test net output #0: accuracy = 0.61792\nI1214 07:25:11.688483 20613 solver.cpp:404]     Test net output #1: loss = 1.74875 (* 1 = 1.74875 loss)\nI1214 07:25:12.999845 20613 solver.cpp:228] Iteration 80000, loss = 0.211369\nI1214 07:25:12.999891 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 07:25:12.999909 20613 solver.cpp:244]     Train net output #1: loss = 0.211369 (* 1 = 0.211369 loss)\nI1214 07:25:13.093565 20613 sgd_solver.cpp:174] Iteration 80000, lr = 2.4\nI1214 07:25:13.106256 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.317149\nI1214 07:27:31.211221 20613 solver.cpp:337] Iteration 80100, Testing net (#0)\nI1214 07:28:51.991166 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75824\nI1214 07:28:51.991504 20613 solver.cpp:404]     Test net output #1: loss = 0.821583 (* 1 = 0.821583 loss)\nI1214 07:28:53.301772 20613 solver.cpp:228] Iteration 80100, loss = 0.204348\nI1214 07:28:53.301815 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 07:28:53.301832 20613 solver.cpp:244]     Train net output #1: loss = 0.204348 (* 1 = 0.204348 loss)\nI1214 07:28:53.401018 20613 sgd_solver.cpp:174] Iteration 80100, lr = 2.403\nI1214 07:28:53.413583 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.313257\nI1214 07:31:11.585659 20613 solver.cpp:337] Iteration 80200, Testing net (#0)\nI1214 07:32:32.356111 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80056\nI1214 07:32:32.356492 20613 solver.cpp:404]     Test net output #1: loss = 0.680828 (* 1 = 0.680828 loss)\nI1214 07:32:33.667345 20613 solver.cpp:228] Iteration 80200, loss = 0.170249\nI1214 07:32:33.667379 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 07:32:33.667395 20613 solver.cpp:244]     Train net output #1: loss = 0.17025 (* 1 = 0.17025 loss)\nI1214 07:32:33.762498 20613 sgd_solver.cpp:174] Iteration 80200, lr = 2.406\nI1214 07:32:33.775249 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.276596\nI1214 07:34:51.881657 20613 solver.cpp:337] Iteration 80300, Testing net (#0)\nI1214 07:36:12.659413 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78892\nI1214 07:36:12.659788 20613 solver.cpp:404]     Test net output #1: loss = 0.740924 (* 1 = 0.740924 loss)\nI1214 07:36:13.970036 20613 solver.cpp:228] Iteration 80300, loss = 0.170761\nI1214 07:36:13.970080 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 07:36:13.970098 20613 solver.cpp:244]     Train net output #1: loss = 0.170761 (* 1 = 0.170761 loss)\nI1214 07:36:14.066804 20613 sgd_solver.cpp:174] Iteration 80300, lr = 2.409\nI1214 07:36:14.079385 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.335594\nI1214 07:38:32.208056 20613 solver.cpp:337] Iteration 80400, Testing net (#0)\nI1214 07:39:52.976630 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75768\nI1214 07:39:52.976979 20613 solver.cpp:404]     Test net output #1: loss = 0.779435 (* 1 = 0.779435 loss)\nI1214 07:39:54.287631 20613 solver.cpp:228] Iteration 80400, loss = 0.303754\nI1214 07:39:54.287674 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 07:39:54.287691 20613 solver.cpp:244]     Train net output #1: loss = 0.303754 (* 1 = 0.303754 loss)\nI1214 07:39:54.383872 20613 sgd_solver.cpp:174] Iteration 80400, lr = 2.412\nI1214 07:39:54.396497 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.366825\nI1214 07:42:12.592257 20613 solver.cpp:337] Iteration 80500, Testing net (#0)\nI1214 07:43:33.368139 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70244\nI1214 07:43:33.368468 20613 solver.cpp:404]     Test net output #1: loss = 1.13377 (* 1 = 1.13377 loss)\nI1214 07:43:34.680450 20613 solver.cpp:228] Iteration 80500, loss = 0.198266\nI1214 07:43:34.680495 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 07:43:34.680519 20613 solver.cpp:244]     Train net output #1: loss = 0.198266 (* 1 = 0.198266 loss)\nI1214 07:43:34.778246 20613 sgd_solver.cpp:174] Iteration 80500, lr = 2.415\nI1214 07:43:34.791049 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.289984\nI1214 07:45:52.894351 20613 solver.cpp:337] Iteration 80600, Testing net (#0)\nI1214 07:47:13.673812 20613 solver.cpp:404]     Test net output #0: accuracy = 0.755\nI1214 07:47:13.674150 20613 solver.cpp:404]     Test net output #1: loss = 0.859157 (* 1 = 0.859157 loss)\nI1214 07:47:14.984879 20613 solver.cpp:228] Iteration 80600, loss = 0.220303\nI1214 07:47:14.984931 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 07:47:14.984954 20613 solver.cpp:244]     Train net output #1: loss = 0.220303 (* 1 = 0.220303 loss)\nI1214 07:47:15.087923 20613 sgd_solver.cpp:174] Iteration 80600, lr = 2.418\nI1214 07:47:15.100634 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.271889\nI1214 07:49:33.247957 20613 solver.cpp:337] Iteration 80700, Testing net (#0)\nI1214 07:50:53.981981 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79608\nI1214 07:50:53.982316 20613 solver.cpp:404]     Test net output #1: loss = 0.686676 (* 1 = 0.686676 loss)\nI1214 07:50:55.293066 20613 solver.cpp:228] Iteration 80700, loss = 0.266248\nI1214 07:50:55.293110 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1214 07:50:55.293134 20613 solver.cpp:244]     Train net output #1: loss = 0.266248 (* 1 = 0.266248 loss)\nI1214 07:50:55.392688 20613 sgd_solver.cpp:174] Iteration 80700, lr = 2.421\nI1214 07:50:55.405436 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.304878\nI1214 07:53:13.417062 20613 solver.cpp:337] Iteration 80800, Testing net (#0)\nI1214 07:54:34.200908 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74448\nI1214 07:54:34.201292 20613 solver.cpp:404]     Test net output #1: loss = 0.968027 (* 1 = 0.968027 loss)\nI1214 07:54:35.510979 20613 solver.cpp:228] Iteration 80800, loss = 0.225001\nI1214 07:54:35.511018 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 07:54:35.511041 20613 solver.cpp:244]     Train net output #1: loss = 0.225001 (* 1 = 0.225001 loss)\nI1214 07:54:35.607806 20613 sgd_solver.cpp:174] Iteration 80800, lr = 2.424\nI1214 07:54:35.620561 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.279775\nI1214 07:56:53.735951 20613 solver.cpp:337] Iteration 80900, Testing net (#0)\nI1214 07:58:14.373656 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74052\nI1214 07:58:14.374061 20613 solver.cpp:404]     Test net output #1: loss = 0.99112 (* 1 = 0.99112 loss)\nI1214 07:58:15.685508 20613 solver.cpp:228] Iteration 80900, loss = 0.224835\nI1214 07:58:15.685555 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 07:58:15.685580 20613 solver.cpp:244]     Train net output #1: loss = 0.224835 (* 1 = 0.224835 loss)\nI1214 07:58:15.785270 20613 sgd_solver.cpp:174] Iteration 80900, lr = 2.427\nI1214 07:58:15.798003 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.296128\nI1214 08:00:33.944998 20613 solver.cpp:337] Iteration 81000, Testing net (#0)\nI1214 08:01:54.575601 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72136\nI1214 08:01:54.575968 20613 solver.cpp:404]     Test net output #1: loss = 1.00363 (* 1 = 1.00363 loss)\nI1214 08:01:55.887580 20613 solver.cpp:228] Iteration 81000, loss = 0.341368\nI1214 08:01:55.887627 20613 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI1214 08:01:55.887651 20613 solver.cpp:244]     Train net output #1: loss = 0.341369 (* 1 = 0.341369 loss)\nI1214 08:01:55.980144 20613 sgd_solver.cpp:174] Iteration 81000, lr = 2.43\nI1214 08:01:55.992909 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.39858\nI1214 08:04:14.112614 20613 solver.cpp:337] Iteration 81100, Testing net (#0)\nI1214 08:05:34.740694 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68084\nI1214 08:05:34.741025 20613 solver.cpp:404]     Test net output #1: loss = 1.23407 (* 1 = 1.23407 loss)\nI1214 08:05:36.051934 20613 solver.cpp:228] Iteration 81100, loss = 0.17094\nI1214 08:05:36.051982 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 08:05:36.052007 20613 solver.cpp:244]     Train net output #1: loss = 0.170941 (* 1 = 0.170941 loss)\nI1214 08:05:36.151985 20613 sgd_solver.cpp:174] Iteration 81100, lr = 2.433\nI1214 08:05:36.164816 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.30663\nI1214 08:07:54.275570 20613 solver.cpp:337] Iteration 81200, Testing net (#0)\nI1214 08:09:14.912585 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71128\nI1214 08:09:14.912971 20613 solver.cpp:404]     Test net output #1: loss = 1.27942 (* 1 = 1.27942 loss)\nI1214 08:09:16.222878 20613 solver.cpp:228] Iteration 81200, loss = 0.341847\nI1214 08:09:16.222929 20613 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1214 08:09:16.222954 20613 solver.cpp:244]     Train net output #1: loss = 0.341847 (* 1 = 0.341847 loss)\nI1214 08:09:16.318084 20613 sgd_solver.cpp:174] Iteration 81200, lr = 2.436\nI1214 08:09:16.330811 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.36552\nI1214 08:11:34.355078 20613 solver.cpp:337] Iteration 81300, Testing net (#0)\nI1214 08:12:54.995086 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70016\nI1214 08:12:54.995466 20613 solver.cpp:404]     Test net output #1: loss = 1.2162 (* 1 = 1.2162 loss)\nI1214 08:12:56.307240 20613 solver.cpp:228] Iteration 81300, loss = 0.217315\nI1214 08:12:56.307287 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 08:12:56.307312 20613 solver.cpp:244]     Train net output #1: loss = 0.217316 (* 1 = 0.217316 loss)\nI1214 08:12:56.404764 20613 sgd_solver.cpp:174] Iteration 81300, lr = 2.439\nI1214 08:12:56.417559 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.293694\nI1214 08:15:14.482146 20613 solver.cpp:337] Iteration 81400, Testing net (#0)\nI1214 08:16:35.126324 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73988\nI1214 08:16:35.126657 20613 solver.cpp:404]     Test net output #1: loss = 0.934419 (* 1 = 0.934419 loss)\nI1214 08:16:36.438060 20613 solver.cpp:228] Iteration 81400, loss = 0.169328\nI1214 08:16:36.438108 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 08:16:36.438133 20613 solver.cpp:244]     Train net output #1: loss = 0.169328 (* 1 = 0.169328 loss)\nI1214 08:16:36.531509 20613 sgd_solver.cpp:174] Iteration 81400, lr = 2.442\nI1214 08:16:36.544342 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.345909\nI1214 08:18:54.560045 20613 solver.cpp:337] Iteration 81500, Testing net (#0)\nI1214 08:20:15.212188 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72132\nI1214 08:20:15.212574 20613 solver.cpp:404]     Test net output #1: loss = 0.991489 (* 1 = 0.991489 loss)\nI1214 08:20:16.523772 20613 solver.cpp:228] Iteration 81500, loss = 0.17786\nI1214 08:20:16.523820 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 08:20:16.523844 20613 solver.cpp:244]     Train net output #1: loss = 0.17786 (* 1 = 0.17786 loss)\nI1214 08:20:16.614753 20613 sgd_solver.cpp:174] Iteration 81500, lr = 2.445\nI1214 08:20:16.627563 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305015\nI1214 08:22:34.668721 20613 solver.cpp:337] Iteration 81600, Testing net (#0)\nI1214 08:23:55.329423 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81448\nI1214 08:23:55.329753 20613 solver.cpp:404]     Test net output #1: loss = 0.585355 (* 1 = 0.585355 loss)\nI1214 08:23:56.640713 20613 solver.cpp:228] Iteration 81600, loss = 0.268863\nI1214 08:23:56.640751 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 08:23:56.640775 20613 solver.cpp:244]     Train net output #1: loss = 0.268863 (* 1 = 0.268863 loss)\nI1214 08:23:56.730300 20613 sgd_solver.cpp:174] Iteration 81600, lr = 2.448\nI1214 08:23:56.743073 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.283515\nI1214 08:26:14.817415 20613 solver.cpp:337] Iteration 81700, Testing net (#0)\nI1214 08:27:35.472041 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79212\nI1214 08:27:35.472369 20613 solver.cpp:404]     Test net output #1: loss = 0.664542 (* 1 = 0.664542 loss)\nI1214 08:27:36.783798 20613 solver.cpp:228] Iteration 81700, loss = 0.15648\nI1214 08:27:36.783846 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 08:27:36.783871 20613 solver.cpp:244]     Train net output #1: loss = 0.156481 (* 1 = 0.156481 loss)\nI1214 08:27:36.884351 20613 sgd_solver.cpp:174] Iteration 81700, lr = 2.451\nI1214 08:27:36.897022 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.330829\nI1214 08:29:54.986850 20613 solver.cpp:337] Iteration 81800, Testing net (#0)\nI1214 08:31:15.682379 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71112\nI1214 08:31:15.682740 20613 solver.cpp:404]     Test net output #1: loss = 0.989269 (* 1 = 0.989269 loss)\nI1214 08:31:16.994590 20613 solver.cpp:228] Iteration 81800, loss = 0.213429\nI1214 08:31:16.994629 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 08:31:16.994653 20613 solver.cpp:244]     Train net output #1: loss = 0.213429 (* 1 = 0.213429 loss)\nI1214 08:31:17.087715 20613 sgd_solver.cpp:174] Iteration 81800, lr = 2.454\nI1214 08:31:17.100416 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3196\nI1214 08:33:35.096520 20613 solver.cpp:337] Iteration 81900, Testing net (#0)\nI1214 08:34:55.859138 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7072\nI1214 08:34:55.859478 20613 solver.cpp:404]     Test net output #1: loss = 0.957326 (* 1 = 0.957326 loss)\nI1214 08:34:57.169653 20613 solver.cpp:228] Iteration 81900, loss = 0.182513\nI1214 08:34:57.169697 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 08:34:57.169723 20613 solver.cpp:244]     Train net output #1: loss = 0.182514 (* 1 = 0.182514 loss)\nI1214 08:34:57.265059 20613 sgd_solver.cpp:174] Iteration 81900, lr = 2.457\nI1214 08:34:57.277804 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.348913\nI1214 08:37:15.407125 20613 solver.cpp:337] Iteration 82000, Testing net (#0)\nI1214 08:38:36.160790 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82112\nI1214 08:38:36.161185 20613 solver.cpp:404]     Test net output #1: loss = 0.549186 (* 1 = 0.549186 loss)\nI1214 08:38:37.471863 20613 solver.cpp:228] Iteration 82000, loss = 0.201802\nI1214 08:38:37.471913 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 08:38:37.471937 20613 solver.cpp:244]     Train net output #1: loss = 0.201803 (* 1 = 0.201803 loss)\nI1214 08:38:37.565608 20613 sgd_solver.cpp:174] Iteration 82000, lr = 2.46\nI1214 08:38:37.578362 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.269184\nI1214 08:40:55.737282 20613 solver.cpp:337] Iteration 82100, Testing net (#0)\nI1214 08:42:16.503100 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81552\nI1214 08:42:16.503482 20613 solver.cpp:404]     Test net output #1: loss = 0.595089 (* 1 = 0.595089 loss)\nI1214 08:42:17.814297 20613 solver.cpp:228] Iteration 82100, loss = 0.179521\nI1214 08:42:17.814343 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 08:42:17.814368 20613 solver.cpp:244]     Train net output #1: loss = 0.179521 (* 1 = 0.179521 loss)\nI1214 08:42:17.905241 20613 sgd_solver.cpp:174] Iteration 82100, lr = 2.463\nI1214 08:42:17.918048 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.311848\nI1214 08:44:36.072955 20613 solver.cpp:337] Iteration 82200, Testing net (#0)\nI1214 08:45:56.848399 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7674\nI1214 08:45:56.848739 20613 solver.cpp:404]     Test net output #1: loss = 0.912241 (* 1 = 0.912241 loss)\nI1214 08:45:58.160364 20613 solver.cpp:228] Iteration 82200, loss = 0.213641\nI1214 08:45:58.160408 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 08:45:58.160434 20613 solver.cpp:244]     Train net output #1: loss = 0.213641 (* 1 = 0.213641 loss)\nI1214 08:45:58.255076 20613 sgd_solver.cpp:174] Iteration 82200, lr = 2.466\nI1214 08:45:58.267762 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.35622\nI1214 08:48:16.310750 20613 solver.cpp:337] Iteration 82300, Testing net (#0)\nI1214 08:49:37.098316 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65336\nI1214 08:49:37.098693 20613 solver.cpp:404]     Test net output #1: loss = 1.19948 (* 1 = 1.19948 loss)\nI1214 08:49:38.410094 20613 solver.cpp:228] Iteration 82300, loss = 0.243436\nI1214 08:49:38.410140 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 08:49:38.410164 20613 solver.cpp:244]     Train net output #1: loss = 0.243436 (* 1 = 0.243436 loss)\nI1214 08:49:38.503937 20613 sgd_solver.cpp:174] Iteration 82300, lr = 2.469\nI1214 08:49:38.516671 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.388115\nI1214 08:51:56.616681 20613 solver.cpp:337] Iteration 82400, Testing net (#0)\nI1214 08:53:17.395578 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69584\nI1214 08:53:17.395988 20613 solver.cpp:404]     Test net output #1: loss = 1.17841 (* 1 = 1.17841 loss)\nI1214 08:53:18.708222 20613 solver.cpp:228] Iteration 82400, loss = 0.232764\nI1214 08:53:18.708271 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 08:53:18.708295 20613 solver.cpp:244]     Train net output #1: loss = 0.232765 (* 1 = 0.232765 loss)\nI1214 08:53:18.799772 20613 sgd_solver.cpp:174] Iteration 82400, lr = 2.472\nI1214 08:53:18.812206 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.278436\nI1214 08:55:36.864418 20613 solver.cpp:337] Iteration 82500, Testing net (#0)\nI1214 08:56:57.651131 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7574\nI1214 08:56:57.651515 20613 solver.cpp:404]     Test net output #1: loss = 0.78936 (* 1 = 0.78936 loss)\nI1214 08:56:58.963035 20613 solver.cpp:228] Iteration 82500, loss = 0.270041\nI1214 08:56:58.963083 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 08:56:58.963107 20613 solver.cpp:244]     Train net output #1: loss = 0.270041 (* 1 = 0.270041 loss)\nI1214 08:56:59.054705 20613 sgd_solver.cpp:174] Iteration 82500, lr = 2.475\nI1214 08:56:59.067420 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.325893\nI1214 08:59:17.143209 20613 solver.cpp:337] Iteration 82600, Testing net (#0)\nI1214 09:00:37.934427 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6674\nI1214 09:00:37.934805 20613 solver.cpp:404]     Test net output #1: loss = 1.37746 (* 1 = 1.37746 loss)\nI1214 09:00:39.246387 20613 solver.cpp:228] Iteration 82600, loss = 0.300344\nI1214 09:00:39.246434 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 09:00:39.246459 20613 solver.cpp:244]     Train net output #1: loss = 0.300344 (* 1 = 0.300344 loss)\nI1214 09:00:39.337973 20613 sgd_solver.cpp:174] Iteration 82600, lr = 2.478\nI1214 09:00:39.350577 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.288871\nI1214 09:02:57.422652 20613 solver.cpp:337] Iteration 82700, Testing net (#0)\nI1214 09:04:18.218703 20613 solver.cpp:404]     Test net output #0: accuracy = 0.753\nI1214 09:04:18.219040 20613 solver.cpp:404]     Test net output #1: loss = 1.00256 (* 1 = 1.00256 loss)\nI1214 09:04:19.530509 20613 solver.cpp:228] Iteration 82700, loss = 0.311977\nI1214 09:04:19.530557 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 09:04:19.530581 20613 solver.cpp:244]     Train net output #1: loss = 0.311978 (* 1 = 0.311978 loss)\nI1214 09:04:19.621567 20613 sgd_solver.cpp:174] Iteration 82700, lr = 2.481\nI1214 09:04:19.634284 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.342298\nI1214 09:06:37.642231 20613 solver.cpp:337] Iteration 82800, Testing net (#0)\nI1214 09:07:58.434051 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81152\nI1214 09:07:58.434350 20613 solver.cpp:404]     Test net output #1: loss = 0.686337 (* 1 = 0.686337 loss)\nI1214 09:07:59.745512 20613 solver.cpp:228] Iteration 82800, loss = 0.239937\nI1214 09:07:59.745560 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 09:07:59.745584 20613 solver.cpp:244]     Train net output #1: loss = 0.239937 (* 1 = 0.239937 loss)\nI1214 09:07:59.836802 20613 sgd_solver.cpp:174] Iteration 82800, lr = 2.484\nI1214 09:07:59.849496 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.269202\nI1214 09:10:17.920735 20613 solver.cpp:337] Iteration 82900, Testing net (#0)\nI1214 09:11:38.705315 20613 solver.cpp:404]     Test net output #0: accuracy = 0.61888\nI1214 09:11:38.705643 20613 solver.cpp:404]     Test net output #1: loss = 1.64993 (* 1 = 1.64993 loss)\nI1214 09:11:40.017565 20613 solver.cpp:228] Iteration 82900, loss = 0.259792\nI1214 09:11:40.017613 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 09:11:40.017639 20613 solver.cpp:244]     Train net output #1: loss = 0.259792 (* 1 = 0.259792 loss)\nI1214 09:11:40.111624 20613 sgd_solver.cpp:174] Iteration 82900, lr = 2.487\nI1214 09:11:40.124343 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.355381\nI1214 09:13:58.181609 20613 solver.cpp:337] Iteration 83000, Testing net (#0)\nI1214 09:15:18.952961 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77596\nI1214 09:15:18.953312 20613 solver.cpp:404]     Test net output #1: loss = 0.715607 (* 1 = 0.715607 loss)\nI1214 09:15:20.264889 20613 solver.cpp:228] Iteration 83000, loss = 0.198346\nI1214 09:15:20.264933 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 09:15:20.264950 20613 solver.cpp:244]     Train net output #1: loss = 0.198347 (* 1 = 0.198347 loss)\nI1214 09:15:20.354672 20613 sgd_solver.cpp:174] Iteration 83000, lr = 2.49\nI1214 09:15:20.367429 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.324241\nI1214 09:17:38.411006 20613 solver.cpp:337] Iteration 83100, Testing net (#0)\nI1214 09:18:59.182472 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76168\nI1214 09:18:59.182806 20613 solver.cpp:404]     Test net output #1: loss = 0.789021 (* 1 = 0.789021 loss)\nI1214 09:19:00.493515 20613 solver.cpp:228] Iteration 83100, loss = 0.165127\nI1214 09:19:00.493559 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 09:19:00.493577 20613 solver.cpp:244]     Train net output #1: loss = 0.165127 (* 1 = 0.165127 loss)\nI1214 09:19:00.585546 20613 sgd_solver.cpp:174] Iteration 83100, lr = 2.493\nI1214 09:19:00.598129 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.266688\nI1214 09:21:18.603358 20613 solver.cpp:337] Iteration 83200, Testing net (#0)\nI1214 09:22:39.378648 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74808\nI1214 09:22:39.379026 20613 solver.cpp:404]     Test net output #1: loss = 0.840964 (* 1 = 0.840964 loss)\nI1214 09:22:40.689770 20613 solver.cpp:228] Iteration 83200, loss = 0.236523\nI1214 09:22:40.689815 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 09:22:40.689831 20613 solver.cpp:244]     Train net output #1: loss = 0.236524 (* 1 = 0.236524 loss)\nI1214 09:22:40.778208 20613 sgd_solver.cpp:174] Iteration 83200, lr = 2.496\nI1214 09:22:40.790961 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.30553\nI1214 09:24:58.880811 20613 solver.cpp:337] Iteration 83300, Testing net (#0)\nI1214 09:26:19.654091 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79708\nI1214 09:26:19.654374 20613 solver.cpp:404]     Test net output #1: loss = 0.647043 (* 1 = 0.647043 loss)\nI1214 09:26:20.965116 20613 solver.cpp:228] Iteration 83300, loss = 0.221674\nI1214 09:26:20.965150 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 09:26:20.965167 20613 solver.cpp:244]     Train net output #1: loss = 0.221675 (* 1 = 0.221675 loss)\nI1214 09:26:21.061843 20613 sgd_solver.cpp:174] Iteration 83300, lr = 2.499\nI1214 09:26:21.074666 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.337396\nI1214 09:28:39.151301 20613 solver.cpp:337] Iteration 83400, Testing net (#0)\nI1214 09:29:59.918023 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68804\nI1214 09:29:59.918340 20613 solver.cpp:404]     Test net output #1: loss = 1.20393 (* 1 = 1.20393 loss)\nI1214 09:30:01.229074 20613 solver.cpp:228] Iteration 83400, loss = 0.183489\nI1214 09:30:01.229117 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 09:30:01.229135 20613 solver.cpp:244]     Train net output #1: loss = 0.183489 (* 1 = 0.183489 loss)\nI1214 09:30:01.322357 20613 sgd_solver.cpp:174] Iteration 83400, lr = 2.502\nI1214 09:30:01.335064 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.295525\nI1214 09:32:19.420245 20613 solver.cpp:337] Iteration 83500, Testing net (#0)\nI1214 09:33:40.191499 20613 solver.cpp:404]     Test net output #0: accuracy = 0.57008\nI1214 09:33:40.191804 20613 solver.cpp:404]     Test net output #1: loss = 2.29031 (* 1 = 2.29031 loss)\nI1214 09:33:41.502804 20613 solver.cpp:228] Iteration 83500, loss = 0.17498\nI1214 09:33:41.502838 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 09:33:41.502853 20613 solver.cpp:244]     Train net output #1: loss = 0.17498 (* 1 = 0.17498 loss)\nI1214 09:33:41.594038 20613 sgd_solver.cpp:174] Iteration 83500, lr = 2.505\nI1214 09:33:41.606783 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.28526\nI1214 09:35:59.635288 20613 solver.cpp:337] Iteration 83600, Testing net (#0)\nI1214 09:37:20.418692 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80516\nI1214 09:37:20.419059 20613 solver.cpp:404]     Test net output #1: loss = 0.682366 (* 1 = 0.682366 loss)\nI1214 09:37:21.730072 20613 solver.cpp:228] Iteration 83600, loss = 0.15719\nI1214 09:37:21.730116 20613 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1214 09:37:21.730132 20613 solver.cpp:244]     Train net output #1: loss = 0.15719 (* 1 = 0.15719 loss)\nI1214 09:37:21.823781 20613 sgd_solver.cpp:174] Iteration 83600, lr = 2.508\nI1214 09:37:21.836627 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.281379\nI1214 09:39:39.902927 20613 solver.cpp:337] Iteration 83700, Testing net (#0)\nI1214 09:41:00.676172 20613 solver.cpp:404]     Test net output #0: accuracy = 0.64156\nI1214 09:41:00.676565 20613 solver.cpp:404]     Test net output #1: loss = 1.63988 (* 1 = 1.63988 loss)\nI1214 09:41:01.987629 20613 solver.cpp:228] Iteration 83700, loss = 0.321425\nI1214 09:41:01.987671 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 09:41:01.987687 20613 solver.cpp:244]     Train net output #1: loss = 0.321425 (* 1 = 0.321425 loss)\nI1214 09:41:02.080967 20613 sgd_solver.cpp:174] Iteration 83700, lr = 2.511\nI1214 09:41:02.093565 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.322539\nI1214 09:43:20.130002 20613 solver.cpp:337] Iteration 83800, Testing net (#0)\nI1214 09:44:40.900332 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68952\nI1214 09:44:40.900678 20613 solver.cpp:404]     Test net output #1: loss = 1.20706 (* 1 = 1.20706 loss)\nI1214 09:44:42.211352 20613 solver.cpp:228] Iteration 83800, loss = 0.248401\nI1214 09:44:42.211397 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 09:44:42.211413 20613 solver.cpp:244]     Train net output #1: loss = 0.248402 (* 1 = 0.248402 loss)\nI1214 09:44:42.304816 20613 sgd_solver.cpp:174] Iteration 83800, lr = 2.514\nI1214 09:44:42.317430 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.358608\nI1214 09:47:00.266541 20613 solver.cpp:337] Iteration 83900, Testing net (#0)\nI1214 09:48:21.043000 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72912\nI1214 09:48:21.043274 20613 solver.cpp:404]     Test net output #1: loss = 0.890385 (* 1 = 0.890385 loss)\nI1214 09:48:22.354017 20613 solver.cpp:228] Iteration 83900, loss = 0.256417\nI1214 09:48:22.354051 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 09:48:22.354066 20613 solver.cpp:244]     Train net output #1: loss = 0.256418 (* 1 = 0.256418 loss)\nI1214 09:48:22.445464 20613 sgd_solver.cpp:174] Iteration 83900, lr = 2.517\nI1214 09:48:22.458138 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.331691\nI1214 09:50:40.454111 20613 solver.cpp:337] Iteration 84000, Testing net (#0)\nI1214 09:52:01.228508 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79496\nI1214 09:52:01.228830 20613 solver.cpp:404]     Test net output #1: loss = 0.690914 (* 1 = 0.690914 loss)\nI1214 09:52:02.539275 20613 solver.cpp:228] Iteration 84000, loss = 0.18248\nI1214 09:52:02.539317 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 09:52:02.539333 20613 solver.cpp:244]     Train net output #1: loss = 0.182481 (* 1 = 0.182481 loss)\nI1214 09:52:02.633500 20613 sgd_solver.cpp:174] Iteration 84000, lr = 2.52\nI1214 09:52:02.646555 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.264215\nI1214 09:54:20.665256 20613 solver.cpp:337] Iteration 84100, Testing net (#0)\nI1214 09:55:41.444051 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78372\nI1214 09:55:41.444378 20613 solver.cpp:404]     Test net output #1: loss = 0.681043 (* 1 = 0.681043 loss)\nI1214 09:55:42.756212 20613 solver.cpp:228] Iteration 84100, loss = 0.193158\nI1214 09:55:42.756258 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 09:55:42.756273 20613 solver.cpp:244]     Train net output #1: loss = 0.193159 (* 1 = 0.193159 loss)\nI1214 09:55:42.847625 20613 sgd_solver.cpp:174] Iteration 84100, lr = 2.523\nI1214 09:55:42.860285 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.264954\nI1214 09:58:00.898465 20613 solver.cpp:337] Iteration 84200, Testing net (#0)\nI1214 09:59:21.678104 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72208\nI1214 09:59:21.678480 20613 solver.cpp:404]     Test net output #1: loss = 1.00808 (* 1 = 1.00808 loss)\nI1214 09:59:22.989235 20613 solver.cpp:228] Iteration 84200, loss = 0.245479\nI1214 09:59:22.989272 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 09:59:22.989289 20613 solver.cpp:244]     Train net output #1: loss = 0.245479 (* 1 = 0.245479 loss)\nI1214 09:59:23.083218 20613 sgd_solver.cpp:174] Iteration 84200, lr = 2.526\nI1214 09:59:23.095986 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.266746\nI1214 10:01:41.270542 20613 solver.cpp:337] Iteration 84300, Testing net (#0)\nI1214 10:03:02.057437 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68656\nI1214 10:03:02.057776 20613 solver.cpp:404]     Test net output #1: loss = 1.21278 (* 1 = 1.21278 loss)\nI1214 10:03:03.369046 20613 solver.cpp:228] Iteration 84300, loss = 0.265375\nI1214 10:03:03.369091 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 10:03:03.369107 20613 solver.cpp:244]     Train net output #1: loss = 0.265376 (* 1 = 0.265376 loss)\nI1214 10:03:03.459978 20613 sgd_solver.cpp:174] Iteration 84300, lr = 2.529\nI1214 10:03:03.472769 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.319016\nI1214 10:05:21.542429 20613 solver.cpp:337] Iteration 84400, Testing net (#0)\nI1214 10:06:42.318310 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69052\nI1214 10:06:42.318668 20613 solver.cpp:404]     Test net output #1: loss = 1.31171 (* 1 = 1.31171 loss)\nI1214 10:06:43.630020 20613 solver.cpp:228] Iteration 84400, loss = 0.30859\nI1214 10:06:43.630065 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 10:06:43.630082 20613 solver.cpp:244]     Train net output #1: loss = 0.308591 (* 1 = 0.308591 loss)\nI1214 10:06:43.718788 20613 sgd_solver.cpp:174] Iteration 84400, lr = 2.532\nI1214 10:06:43.731570 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.257015\nI1214 10:09:01.776654 20613 solver.cpp:337] Iteration 84500, Testing net (#0)\nI1214 10:10:22.512524 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71724\nI1214 10:10:22.512861 20613 solver.cpp:404]     Test net output #1: loss = 1.21197 (* 1 = 1.21197 loss)\nI1214 10:10:23.823014 20613 solver.cpp:228] Iteration 84500, loss = 0.195828\nI1214 10:10:23.823057 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 10:10:23.823074 20613 solver.cpp:244]     Train net output #1: loss = 0.195829 (* 1 = 0.195829 loss)\nI1214 10:10:23.916616 20613 sgd_solver.cpp:174] Iteration 84500, lr = 2.535\nI1214 10:10:23.929412 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.303825\nI1214 10:12:41.980465 20613 solver.cpp:337] Iteration 84600, Testing net (#0)\nI1214 10:14:02.649370 20613 solver.cpp:404]     Test net output #0: accuracy = 0.58688\nI1214 10:14:02.649708 20613 solver.cpp:404]     Test net output #1: loss = 1.62746 (* 1 = 1.62746 loss)\nI1214 10:14:03.961341 20613 solver.cpp:228] Iteration 84600, loss = 0.272105\nI1214 10:14:03.961386 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 10:14:03.961403 20613 solver.cpp:244]     Train net output #1: loss = 0.272106 (* 1 = 0.272106 loss)\nI1214 10:14:04.049510 20613 sgd_solver.cpp:174] Iteration 84600, lr = 2.538\nI1214 10:14:04.062269 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.300473\nI1214 10:16:22.172834 20613 solver.cpp:337] Iteration 84700, Testing net (#0)\nI1214 10:17:42.811121 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71816\nI1214 10:17:42.811467 20613 solver.cpp:404]     Test net output #1: loss = 0.952605 (* 1 = 0.952605 loss)\nI1214 10:17:44.122737 20613 solver.cpp:228] Iteration 84700, loss = 0.212291\nI1214 10:17:44.122783 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 10:17:44.122800 20613 solver.cpp:244]     Train net output #1: loss = 0.212291 (* 1 = 0.212291 loss)\nI1214 10:17:44.217660 20613 sgd_solver.cpp:174] Iteration 84700, lr = 2.541\nI1214 10:17:44.230417 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.273688\nI1214 10:20:01.549217 20613 solver.cpp:337] Iteration 84800, Testing net (#0)\nI1214 10:21:22.187213 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77484\nI1214 10:21:22.187494 20613 solver.cpp:404]     Test net output #1: loss = 0.738313 (* 1 = 0.738313 loss)\nI1214 10:21:23.498483 20613 solver.cpp:228] Iteration 84800, loss = 0.147792\nI1214 10:21:23.498528 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 10:21:23.498544 20613 solver.cpp:244]     Train net output #1: loss = 0.147792 (* 1 = 0.147792 loss)\nI1214 10:21:23.589221 20613 sgd_solver.cpp:174] Iteration 84800, lr = 2.544\nI1214 10:21:23.602032 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.273747\nI1214 10:23:41.658892 20613 solver.cpp:337] Iteration 84900, Testing net (#0)\nI1214 10:25:02.296697 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77224\nI1214 10:25:02.297063 20613 solver.cpp:404]     Test net output #1: loss = 0.703426 (* 1 = 0.703426 loss)\nI1214 10:25:03.607722 20613 solver.cpp:228] Iteration 84900, loss = 0.239301\nI1214 10:25:03.607767 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 10:25:03.607784 20613 solver.cpp:244]     Train net output #1: loss = 0.239301 (* 1 = 0.239301 loss)\nI1214 10:25:03.701356 20613 sgd_solver.cpp:174] Iteration 84900, lr = 2.547\nI1214 10:25:03.714140 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.314005\nI1214 10:27:21.807569 20613 solver.cpp:337] Iteration 85000, Testing net (#0)\nI1214 10:28:42.441256 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74404\nI1214 10:28:42.441642 20613 solver.cpp:404]     Test net output #1: loss = 0.856283 (* 1 = 0.856283 loss)\nI1214 10:28:43.752707 20613 solver.cpp:228] Iteration 85000, loss = 0.309368\nI1214 10:28:43.752753 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 10:28:43.752768 20613 solver.cpp:244]     Train net output #1: loss = 0.309368 (* 1 = 0.309368 loss)\nI1214 10:28:43.847210 20613 sgd_solver.cpp:174] Iteration 85000, lr = 2.55\nI1214 10:28:43.860043 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307857\nI1214 10:31:02.019580 20613 solver.cpp:337] Iteration 85100, Testing net (#0)\nI1214 10:32:22.668174 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7428\nI1214 10:32:22.668562 20613 solver.cpp:404]     Test net output #1: loss = 0.900099 (* 1 = 0.900099 loss)\nI1214 10:32:23.979068 20613 solver.cpp:228] Iteration 85100, loss = 0.282571\nI1214 10:32:23.979113 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 10:32:23.979130 20613 solver.cpp:244]     Train net output #1: loss = 0.282572 (* 1 = 0.282572 loss)\nI1214 10:32:24.070794 20613 sgd_solver.cpp:174] Iteration 85100, lr = 2.553\nI1214 10:32:24.083618 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307561\nI1214 10:34:42.094630 20613 solver.cpp:337] Iteration 85200, Testing net (#0)\nI1214 10:36:02.734333 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77796\nI1214 10:36:02.734665 20613 solver.cpp:404]     Test net output #1: loss = 0.775679 (* 1 = 0.775679 loss)\nI1214 10:36:04.045372 20613 solver.cpp:228] Iteration 85200, loss = 0.238989\nI1214 10:36:04.045418 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 10:36:04.045434 20613 solver.cpp:244]     Train net output #1: loss = 0.23899 (* 1 = 0.23899 loss)\nI1214 10:36:04.137383 20613 sgd_solver.cpp:174] Iteration 85200, lr = 2.556\nI1214 10:36:04.150218 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.350341\nI1214 10:38:22.229640 20613 solver.cpp:337] Iteration 85300, Testing net (#0)\nI1214 10:39:42.881106 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81972\nI1214 10:39:42.881489 20613 solver.cpp:404]     Test net output #1: loss = 0.608725 (* 1 = 0.608725 loss)\nI1214 10:39:44.192332 20613 solver.cpp:228] Iteration 85300, loss = 0.280313\nI1214 10:39:44.192378 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 10:39:44.192394 20613 solver.cpp:244]     Train net output #1: loss = 0.280314 (* 1 = 0.280314 loss)\nI1214 10:39:44.287977 20613 sgd_solver.cpp:174] Iteration 85300, lr = 2.559\nI1214 10:39:44.300613 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.302502\nI1214 10:42:02.343103 20613 solver.cpp:337] Iteration 85400, Testing net (#0)\nI1214 10:43:22.983067 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79772\nI1214 10:43:22.983453 20613 solver.cpp:404]     Test net output #1: loss = 0.62081 (* 1 = 0.62081 loss)\nI1214 10:43:24.294610 20613 solver.cpp:228] Iteration 85400, loss = 0.277906\nI1214 10:43:24.294654 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 10:43:24.294672 20613 solver.cpp:244]     Train net output #1: loss = 0.277906 (* 1 = 0.277906 loss)\nI1214 10:43:24.389991 20613 sgd_solver.cpp:174] Iteration 85400, lr = 2.562\nI1214 10:43:24.402761 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.300945\nI1214 10:45:42.491118 20613 solver.cpp:337] Iteration 85500, Testing net (#0)\nI1214 10:47:03.138572 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81476\nI1214 10:47:03.138912 20613 solver.cpp:404]     Test net output #1: loss = 0.583669 (* 1 = 0.583669 loss)\nI1214 10:47:04.450755 20613 solver.cpp:228] Iteration 85500, loss = 0.185803\nI1214 10:47:04.450789 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 10:47:04.450804 20613 solver.cpp:244]     Train net output #1: loss = 0.185803 (* 1 = 0.185803 loss)\nI1214 10:47:04.544313 20613 sgd_solver.cpp:174] Iteration 85500, lr = 2.565\nI1214 10:47:04.556977 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.255183\nI1214 10:49:22.579216 20613 solver.cpp:337] Iteration 85600, Testing net (#0)\nI1214 10:50:43.331192 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69612\nI1214 10:50:43.331545 20613 solver.cpp:404]     Test net output #1: loss = 1.13714 (* 1 = 1.13714 loss)\nI1214 10:50:44.641567 20613 solver.cpp:228] Iteration 85600, loss = 0.204287\nI1214 10:50:44.641611 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 10:50:44.641628 20613 solver.cpp:244]     Train net output #1: loss = 0.204287 (* 1 = 0.204287 loss)\nI1214 10:50:44.739361 20613 sgd_solver.cpp:174] Iteration 85600, lr = 2.568\nI1214 10:50:44.752137 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.249053\nI1214 10:53:02.799366 20613 solver.cpp:337] Iteration 85700, Testing net (#0)\nI1214 10:54:23.551625 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78708\nI1214 10:54:23.551961 20613 solver.cpp:404]     Test net output #1: loss = 0.649294 (* 1 = 0.649294 loss)\nI1214 10:54:24.862886 20613 solver.cpp:228] Iteration 85700, loss = 0.289449\nI1214 10:54:24.862931 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 10:54:24.862948 20613 solver.cpp:244]     Train net output #1: loss = 0.289449 (* 1 = 0.289449 loss)\nI1214 10:54:24.957792 20613 sgd_solver.cpp:174] Iteration 85700, lr = 2.571\nI1214 10:54:24.970440 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307624\nI1214 10:56:42.973810 20613 solver.cpp:337] Iteration 85800, Testing net (#0)\nI1214 10:58:03.735853 20613 solver.cpp:404]     Test net output #0: accuracy = 0.772\nI1214 10:58:03.736192 20613 solver.cpp:404]     Test net output #1: loss = 0.740411 (* 1 = 0.740411 loss)\nI1214 10:58:05.047739 20613 solver.cpp:228] Iteration 85800, loss = 0.257307\nI1214 10:58:05.047780 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 10:58:05.047803 20613 solver.cpp:244]     Train net output #1: loss = 0.257308 (* 1 = 0.257308 loss)\nI1214 10:58:05.137197 20613 sgd_solver.cpp:174] Iteration 85800, lr = 2.574\nI1214 10:58:05.149830 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.310207\nI1214 11:00:23.161317 20613 solver.cpp:337] Iteration 85900, Testing net (#0)\nI1214 11:01:43.941849 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67468\nI1214 11:01:43.942198 20613 solver.cpp:404]     Test net output #1: loss = 1.16087 (* 1 = 1.16087 loss)\nI1214 11:01:45.253736 20613 solver.cpp:228] Iteration 85900, loss = 0.307593\nI1214 11:01:45.253784 20613 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1214 11:01:45.253808 20613 solver.cpp:244]     Train net output #1: loss = 0.307594 (* 1 = 0.307594 loss)\nI1214 11:01:45.344235 20613 sgd_solver.cpp:174] Iteration 85900, lr = 2.577\nI1214 11:01:45.356988 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.352324\nI1214 11:04:03.382922 20613 solver.cpp:337] Iteration 86000, Testing net (#0)\nI1214 11:05:24.159607 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80552\nI1214 11:05:24.159977 20613 solver.cpp:404]     Test net output #1: loss = 0.59047 (* 1 = 0.59047 loss)\nI1214 11:05:25.470930 20613 solver.cpp:228] Iteration 86000, loss = 0.23434\nI1214 11:05:25.470976 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 11:05:25.470993 20613 solver.cpp:244]     Train net output #1: loss = 0.23434 (* 1 = 0.23434 loss)\nI1214 11:05:25.564502 20613 sgd_solver.cpp:174] Iteration 86000, lr = 2.58\nI1214 11:05:25.577261 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.262595\nI1214 11:07:43.594851 20613 solver.cpp:337] Iteration 86100, Testing net (#0)\nI1214 11:09:04.374495 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74136\nI1214 11:09:04.374872 20613 solver.cpp:404]     Test net output #1: loss = 0.863688 (* 1 = 0.863688 loss)\nI1214 11:09:05.686861 20613 solver.cpp:228] Iteration 86100, loss = 0.220407\nI1214 11:09:05.686905 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 11:09:05.686923 20613 solver.cpp:244]     Train net output #1: loss = 0.220408 (* 1 = 0.220408 loss)\nI1214 11:09:05.776000 20613 sgd_solver.cpp:174] Iteration 86100, lr = 2.583\nI1214 11:09:05.788691 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.315824\nI1214 11:11:23.764202 20613 solver.cpp:337] Iteration 86200, Testing net (#0)\nI1214 11:12:44.549839 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76916\nI1214 11:12:44.550173 20613 solver.cpp:404]     Test net output #1: loss = 0.777087 (* 1 = 0.777087 loss)\nI1214 11:12:45.862115 20613 solver.cpp:228] Iteration 86200, loss = 0.224952\nI1214 11:12:45.862159 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 11:12:45.862176 20613 solver.cpp:244]     Train net output #1: loss = 0.224952 (* 1 = 0.224952 loss)\nI1214 11:12:45.950628 20613 sgd_solver.cpp:174] Iteration 86200, lr = 2.586\nI1214 11:12:45.963371 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.280786\nI1214 11:15:03.162403 20613 solver.cpp:337] Iteration 86300, Testing net (#0)\nI1214 11:16:23.930563 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71488\nI1214 11:16:23.930848 20613 solver.cpp:404]     Test net output #1: loss = 1.00407 (* 1 = 1.00407 loss)\nI1214 11:16:25.239091 20613 solver.cpp:228] Iteration 86300, loss = 0.254821\nI1214 11:16:25.239137 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 11:16:25.239154 20613 solver.cpp:244]     Train net output #1: loss = 0.254822 (* 1 = 0.254822 loss)\nI1214 11:16:25.331915 20613 sgd_solver.cpp:174] Iteration 86300, lr = 2.589\nI1214 11:16:25.344703 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.281542\nI1214 11:18:43.358866 20613 solver.cpp:337] Iteration 86400, Testing net (#0)\nI1214 11:20:04.129951 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69828\nI1214 11:20:04.130283 20613 solver.cpp:404]     Test net output #1: loss = 1.08112 (* 1 = 1.08112 loss)\nI1214 11:20:05.441181 20613 solver.cpp:228] Iteration 86400, loss = 0.198461\nI1214 11:20:05.441217 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 11:20:05.441232 20613 solver.cpp:244]     Train net output #1: loss = 0.198462 (* 1 = 0.198462 loss)\nI1214 11:20:05.537052 20613 sgd_solver.cpp:174] Iteration 86400, lr = 2.592\nI1214 11:20:05.549674 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.292042\nI1214 11:22:22.689182 20613 solver.cpp:337] Iteration 86500, Testing net (#0)\nI1214 11:23:43.469792 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7696\nI1214 11:23:43.470124 20613 solver.cpp:404]     Test net output #1: loss = 0.851468 (* 1 = 0.851468 loss)\nI1214 11:23:44.781790 20613 solver.cpp:228] Iteration 86500, loss = 0.305735\nI1214 11:23:44.781832 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 11:23:44.781847 20613 solver.cpp:244]     Train net output #1: loss = 0.305735 (* 1 = 0.305735 loss)\nI1214 11:23:44.870472 20613 sgd_solver.cpp:174] Iteration 86500, lr = 2.595\nI1214 11:23:44.883244 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.312961\nI1214 11:26:02.691329 20613 solver.cpp:337] Iteration 86600, Testing net (#0)\nI1214 11:27:23.470536 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80284\nI1214 11:27:23.470851 20613 solver.cpp:404]     Test net output #1: loss = 0.66278 (* 1 = 0.66278 loss)\nI1214 11:27:24.780174 20613 solver.cpp:228] Iteration 86600, loss = 0.273965\nI1214 11:27:24.780220 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 11:27:24.780243 20613 solver.cpp:244]     Train net output #1: loss = 0.273966 (* 1 = 0.273966 loss)\nI1214 11:27:24.873234 20613 sgd_solver.cpp:174] Iteration 86600, lr = 2.598\nI1214 11:27:24.886014 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.303042\nI1214 11:29:41.854216 20613 solver.cpp:337] Iteration 86700, Testing net (#0)\nI1214 11:31:02.639871 20613 solver.cpp:404]     Test net output #0: accuracy = 0.833\nI1214 11:31:02.640252 20613 solver.cpp:404]     Test net output #1: loss = 0.535715 (* 1 = 0.535715 loss)\nI1214 11:31:03.949906 20613 solver.cpp:228] Iteration 86700, loss = 0.224708\nI1214 11:31:03.949954 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 11:31:03.949976 20613 solver.cpp:244]     Train net output #1: loss = 0.224708 (* 1 = 0.224708 loss)\nI1214 11:31:04.039721 20613 sgd_solver.cpp:174] Iteration 86700, lr = 2.601\nI1214 11:31:04.052532 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.305007\nI1214 11:33:21.858230 20613 solver.cpp:337] Iteration 86800, Testing net (#0)\nI1214 11:34:42.627818 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69216\nI1214 11:34:42.628196 20613 solver.cpp:404]     Test net output #1: loss = 1.1308 (* 1 = 1.1308 loss)\nI1214 11:34:43.936815 20613 solver.cpp:228] Iteration 86800, loss = 0.265959\nI1214 11:34:43.936857 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 11:34:43.936874 20613 solver.cpp:244]     Train net output #1: loss = 0.265959 (* 1 = 0.265959 loss)\nI1214 11:34:44.033294 20613 sgd_solver.cpp:174] Iteration 86800, lr = 2.604\nI1214 11:34:44.046036 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.342583\nI1214 11:37:01.906317 20613 solver.cpp:337] Iteration 86900, Testing net (#0)\nI1214 11:38:22.671545 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77004\nI1214 11:38:22.671877 20613 solver.cpp:404]     Test net output #1: loss = 0.837308 (* 1 = 0.837308 loss)\nI1214 11:38:23.981499 20613 solver.cpp:228] Iteration 86900, loss = 0.244126\nI1214 11:38:23.981540 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 11:38:23.981556 20613 solver.cpp:244]     Train net output #1: loss = 0.244126 (* 1 = 0.244126 loss)\nI1214 11:38:24.072962 20613 sgd_solver.cpp:174] Iteration 86900, lr = 2.607\nI1214 11:38:24.085641 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.265632\nI1214 11:40:41.178107 20613 solver.cpp:337] Iteration 87000, Testing net (#0)\nI1214 11:42:01.931524 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70624\nI1214 11:42:01.931902 20613 solver.cpp:404]     Test net output #1: loss = 0.96041 (* 1 = 0.96041 loss)\nI1214 11:42:03.240728 20613 solver.cpp:228] Iteration 87000, loss = 0.276859\nI1214 11:42:03.240768 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 11:42:03.240785 20613 solver.cpp:244]     Train net output #1: loss = 0.276859 (* 1 = 0.276859 loss)\nI1214 11:42:03.336730 20613 sgd_solver.cpp:174] Iteration 87000, lr = 2.61\nI1214 11:42:03.349792 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.345001\nI1214 11:44:20.287116 20613 solver.cpp:337] Iteration 87100, Testing net (#0)\nI1214 11:45:41.028548 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66132\nI1214 11:45:41.028935 20613 solver.cpp:404]     Test net output #1: loss = 1.30762 (* 1 = 1.30762 loss)\nI1214 11:45:42.337479 20613 solver.cpp:228] Iteration 87100, loss = 0.229699\nI1214 11:45:42.337518 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 11:45:42.337533 20613 solver.cpp:244]     Train net output #1: loss = 0.229699 (* 1 = 0.229699 loss)\nI1214 11:45:42.433326 20613 sgd_solver.cpp:174] Iteration 87100, lr = 2.613\nI1214 11:45:42.445929 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.315841\nI1214 11:47:59.464468 20613 solver.cpp:337] Iteration 87200, Testing net (#0)\nI1214 11:49:20.200520 20613 solver.cpp:404]     Test net output #0: accuracy = 0.61732\nI1214 11:49:20.200862 20613 solver.cpp:404]     Test net output #1: loss = 1.69357 (* 1 = 1.69357 loss)\nI1214 11:49:21.508860 20613 solver.cpp:228] Iteration 87200, loss = 0.226747\nI1214 11:49:21.508900 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 11:49:21.508915 20613 solver.cpp:244]     Train net output #1: loss = 0.226747 (* 1 = 0.226747 loss)\nI1214 11:49:21.601496 20613 sgd_solver.cpp:174] Iteration 87200, lr = 2.616\nI1214 11:49:21.614326 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.318224\nI1214 11:51:38.672606 20613 solver.cpp:337] Iteration 87300, Testing net (#0)\nI1214 11:52:59.413794 20613 solver.cpp:404]     Test net output #0: accuracy = 0.62568\nI1214 11:52:59.414149 20613 solver.cpp:404]     Test net output #1: loss = 1.31945 (* 1 = 1.31945 loss)\nI1214 11:53:00.722903 20613 solver.cpp:228] Iteration 87300, loss = 0.266312\nI1214 11:53:00.722945 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 11:53:00.722960 20613 solver.cpp:244]     Train net output #1: loss = 0.266312 (* 1 = 0.266312 loss)\nI1214 11:53:00.818012 20613 sgd_solver.cpp:174] Iteration 87300, lr = 2.619\nI1214 11:53:00.830787 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.29665\nI1214 11:55:17.822607 20613 solver.cpp:337] Iteration 87400, Testing net (#0)\nI1214 11:56:38.573151 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74816\nI1214 11:56:38.573434 20613 solver.cpp:404]     Test net output #1: loss = 0.809026 (* 1 = 0.809026 loss)\nI1214 11:56:39.881099 20613 solver.cpp:228] Iteration 87400, loss = 0.243045\nI1214 11:56:39.881140 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 11:56:39.881155 20613 solver.cpp:244]     Train net output #1: loss = 0.243045 (* 1 = 0.243045 loss)\nI1214 11:56:39.972522 20613 sgd_solver.cpp:174] Iteration 87400, lr = 2.622\nI1214 11:56:39.985313 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.308055\nI1214 11:58:56.933879 20613 solver.cpp:337] Iteration 87500, Testing net (#0)\nI1214 12:00:17.709261 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7254\nI1214 12:00:17.709594 20613 solver.cpp:404]     Test net output #1: loss = 0.932948 (* 1 = 0.932948 loss)\nI1214 12:00:19.018148 20613 solver.cpp:228] Iteration 87500, loss = 0.296081\nI1214 12:00:19.018193 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 12:00:19.018208 20613 solver.cpp:244]     Train net output #1: loss = 0.296081 (* 1 = 0.296081 loss)\nI1214 12:00:19.114226 20613 sgd_solver.cpp:174] Iteration 87500, lr = 2.625\nI1214 12:00:19.126958 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.360155\nI1214 12:02:36.141628 20613 solver.cpp:337] Iteration 87600, Testing net (#0)\nI1214 12:03:56.905411 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74708\nI1214 12:03:56.905711 20613 solver.cpp:404]     Test net output #1: loss = 0.918795 (* 1 = 0.918795 loss)\nI1214 12:03:58.214373 20613 solver.cpp:228] Iteration 87600, loss = 0.306344\nI1214 12:03:58.214416 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1214 12:03:58.214431 20613 solver.cpp:244]     Train net output #1: loss = 0.306344 (* 1 = 0.306344 loss)\nI1214 12:03:58.303525 20613 sgd_solver.cpp:174] Iteration 87600, lr = 2.628\nI1214 12:03:58.316308 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.28843\nI1214 12:06:15.343082 20613 solver.cpp:337] Iteration 87700, Testing net (#0)\nI1214 12:07:36.119637 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73148\nI1214 12:07:36.119977 20613 solver.cpp:404]     Test net output #1: loss = 0.919403 (* 1 = 0.919403 loss)\nI1214 12:07:37.428685 20613 solver.cpp:228] Iteration 87700, loss = 0.23231\nI1214 12:07:37.428730 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 12:07:37.428746 20613 solver.cpp:244]     Train net output #1: loss = 0.23231 (* 1 = 0.23231 loss)\nI1214 12:07:37.518652 20613 sgd_solver.cpp:174] Iteration 87700, lr = 2.631\nI1214 12:07:37.531410 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.292472\nI1214 12:09:54.462553 20613 solver.cpp:337] Iteration 87800, Testing net (#0)\nI1214 12:11:15.231643 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72536\nI1214 12:11:15.231982 20613 solver.cpp:404]     Test net output #1: loss = 0.955422 (* 1 = 0.955422 loss)\nI1214 12:11:16.540644 20613 solver.cpp:228] Iteration 87800, loss = 0.277633\nI1214 12:11:16.540686 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 12:11:16.540707 20613 solver.cpp:244]     Train net output #1: loss = 0.277634 (* 1 = 0.277634 loss)\nI1214 12:11:16.628944 20613 sgd_solver.cpp:174] Iteration 87800, lr = 2.634\nI1214 12:11:16.641803 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.331409\nI1214 12:13:33.643522 20613 solver.cpp:337] Iteration 87900, Testing net (#0)\nI1214 12:14:54.424790 20613 solver.cpp:404]     Test net output #0: accuracy = 0.5986\nI1214 12:14:54.425174 20613 solver.cpp:404]     Test net output #1: loss = 1.92376 (* 1 = 1.92376 loss)\nI1214 12:14:55.733721 20613 solver.cpp:228] Iteration 87900, loss = 0.290973\nI1214 12:14:55.733762 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 12:14:55.733778 20613 solver.cpp:244]     Train net output #1: loss = 0.290973 (* 1 = 0.290973 loss)\nI1214 12:14:55.825124 20613 sgd_solver.cpp:174] Iteration 87900, lr = 2.637\nI1214 12:14:55.837744 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.376848\nI1214 12:17:12.744447 20613 solver.cpp:337] Iteration 88000, Testing net (#0)\nI1214 12:18:33.528424 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79164\nI1214 12:18:33.528800 20613 solver.cpp:404]     Test net output #1: loss = 0.649607 (* 1 = 0.649607 loss)\nI1214 12:18:34.837527 20613 solver.cpp:228] Iteration 88000, loss = 0.239717\nI1214 12:18:34.837561 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 12:18:34.837576 20613 solver.cpp:244]     Train net output #1: loss = 0.239717 (* 1 = 0.239717 loss)\nI1214 12:18:34.930865 20613 sgd_solver.cpp:174] Iteration 88000, lr = 2.64\nI1214 12:18:34.943661 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.286935\nI1214 12:20:52.262567 20613 solver.cpp:337] Iteration 88100, Testing net (#0)\nI1214 12:22:13.037925 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65676\nI1214 12:22:13.038323 20613 solver.cpp:404]     Test net output #1: loss = 1.19104 (* 1 = 1.19104 loss)\nI1214 12:22:14.347029 20613 solver.cpp:228] Iteration 88100, loss = 0.230375\nI1214 12:22:14.347072 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 12:22:14.347087 20613 solver.cpp:244]     Train net output #1: loss = 0.230376 (* 1 = 0.230376 loss)\nI1214 12:22:14.445590 20613 sgd_solver.cpp:174] Iteration 88100, lr = 2.643\nI1214 12:22:14.458406 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.343891\nI1214 12:24:32.035894 20613 solver.cpp:337] Iteration 88200, Testing net (#0)\nI1214 12:25:52.806076 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77948\nI1214 12:25:52.806491 20613 solver.cpp:404]     Test net output #1: loss = 0.701694 (* 1 = 0.701694 loss)\nI1214 12:25:54.116592 20613 solver.cpp:228] Iteration 88200, loss = 0.174909\nI1214 12:25:54.116633 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 12:25:54.116649 20613 solver.cpp:244]     Train net output #1: loss = 0.174909 (* 1 = 0.174909 loss)\nI1214 12:25:54.216652 20613 sgd_solver.cpp:174] Iteration 88200, lr = 2.646\nI1214 12:25:54.229411 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.262665\nI1214 12:28:11.722620 20613 solver.cpp:337] Iteration 88300, Testing net (#0)\nI1214 12:29:32.497074 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6662\nI1214 12:29:32.497475 20613 solver.cpp:404]     Test net output #1: loss = 1.47456 (* 1 = 1.47456 loss)\nI1214 12:29:33.806496 20613 solver.cpp:228] Iteration 88300, loss = 0.202391\nI1214 12:29:33.806538 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 12:29:33.806553 20613 solver.cpp:244]     Train net output #1: loss = 0.202392 (* 1 = 0.202392 loss)\nI1214 12:29:33.900885 20613 sgd_solver.cpp:174] Iteration 88300, lr = 2.649\nI1214 12:29:33.913667 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.304279\nI1214 12:31:51.468302 20613 solver.cpp:337] Iteration 88400, Testing net (#0)\nI1214 12:33:12.131813 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72996\nI1214 12:33:12.132195 20613 solver.cpp:404]     Test net output #1: loss = 0.992096 (* 1 = 0.992096 loss)\nI1214 12:33:13.440770 20613 solver.cpp:228] Iteration 88400, loss = 0.270724\nI1214 12:33:13.440812 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1214 12:33:13.440829 20613 solver.cpp:244]     Train net output #1: loss = 0.270724 (* 1 = 0.270724 loss)\nI1214 12:33:13.541687 20613 sgd_solver.cpp:174] Iteration 88400, lr = 2.652\nI1214 12:33:13.554435 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.306546\nI1214 12:35:30.736137 20613 solver.cpp:337] Iteration 88500, Testing net (#0)\nI1214 12:36:51.405962 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75736\nI1214 12:36:51.406383 20613 solver.cpp:404]     Test net output #1: loss = 0.805177 (* 1 = 0.805177 loss)\nI1214 12:36:52.714699 20613 solver.cpp:228] Iteration 88500, loss = 0.303947\nI1214 12:36:52.714745 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 12:36:52.714761 20613 solver.cpp:244]     Train net output #1: loss = 0.303947 (* 1 = 0.303947 loss)\nI1214 12:36:52.819106 20613 sgd_solver.cpp:174] Iteration 88500, lr = 2.655\nI1214 12:36:52.831884 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.316056\nI1214 12:39:10.056949 20613 solver.cpp:337] Iteration 88600, Testing net (#0)\nI1214 12:40:30.733399 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73392\nI1214 12:40:30.733808 20613 solver.cpp:404]     Test net output #1: loss = 0.911454 (* 1 = 0.911454 loss)\nI1214 12:40:32.042500 20613 solver.cpp:228] Iteration 88600, loss = 0.174689\nI1214 12:40:32.042541 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 12:40:32.042557 20613 solver.cpp:244]     Train net output #1: loss = 0.17469 (* 1 = 0.17469 loss)\nI1214 12:40:32.136878 20613 sgd_solver.cpp:174] Iteration 88600, lr = 2.658\nI1214 12:40:32.149672 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.306169\nI1214 12:42:49.348026 20613 solver.cpp:337] Iteration 88700, Testing net (#0)\nI1214 12:44:10.024055 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66508\nI1214 12:44:10.024473 20613 solver.cpp:404]     Test net output #1: loss = 1.52953 (* 1 = 1.52953 loss)\nI1214 12:44:11.332846 20613 solver.cpp:228] Iteration 88700, loss = 0.223542\nI1214 12:44:11.332888 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 12:44:11.332904 20613 solver.cpp:244]     Train net output #1: loss = 0.223543 (* 1 = 0.223543 loss)\nI1214 12:44:11.433526 20613 sgd_solver.cpp:174] Iteration 88700, lr = 2.661\nI1214 12:44:11.446326 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.269156\nI1214 12:46:28.648104 20613 solver.cpp:337] Iteration 88800, Testing net (#0)\nI1214 12:47:49.296540 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66\nI1214 12:47:49.296969 20613 solver.cpp:404]     Test net output #1: loss = 1.36774 (* 1 = 1.36774 loss)\nI1214 12:47:50.605391 20613 solver.cpp:228] Iteration 88800, loss = 0.27308\nI1214 12:47:50.605432 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 12:47:50.605448 20613 solver.cpp:244]     Train net output #1: loss = 0.27308 (* 1 = 0.27308 loss)\nI1214 12:47:50.701833 20613 sgd_solver.cpp:174] Iteration 88800, lr = 2.664\nI1214 12:47:50.714676 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.316207\nI1214 12:50:07.856923 20613 solver.cpp:337] Iteration 88900, Testing net (#0)\nI1214 12:51:28.504221 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79396\nI1214 12:51:28.504642 20613 solver.cpp:404]     Test net output #1: loss = 0.690879 (* 1 = 0.690879 loss)\nI1214 12:51:29.812491 20613 solver.cpp:228] Iteration 88900, loss = 0.229373\nI1214 12:51:29.812530 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 12:51:29.812546 20613 solver.cpp:244]     Train net output #1: loss = 0.229373 (* 1 = 0.229373 loss)\nI1214 12:51:29.912842 20613 sgd_solver.cpp:174] Iteration 88900, lr = 2.667\nI1214 12:51:29.925595 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.297237\nI1214 12:53:46.993490 20613 solver.cpp:337] Iteration 89000, Testing net (#0)\nI1214 12:55:07.640275 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75576\nI1214 12:55:07.640686 20613 solver.cpp:404]     Test net output #1: loss = 0.902327 (* 1 = 0.902327 loss)\nI1214 12:55:08.949017 20613 solver.cpp:228] Iteration 89000, loss = 0.245889\nI1214 12:55:08.949056 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 12:55:08.949071 20613 solver.cpp:244]     Train net output #1: loss = 0.245889 (* 1 = 0.245889 loss)\nI1214 12:55:09.044950 20613 sgd_solver.cpp:174] Iteration 89000, lr = 2.67\nI1214 12:55:09.057579 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.320267\nI1214 12:57:26.218094 20613 solver.cpp:337] Iteration 89100, Testing net (#0)\nI1214 12:58:46.861558 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79556\nI1214 12:58:46.861987 20613 solver.cpp:404]     Test net output #1: loss = 0.632897 (* 1 = 0.632897 loss)\nI1214 12:58:48.170554 20613 solver.cpp:228] Iteration 89100, loss = 0.291586\nI1214 12:58:48.170593 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 12:58:48.170608 20613 solver.cpp:244]     Train net output #1: loss = 0.291586 (* 1 = 0.291586 loss)\nI1214 12:58:48.273200 20613 sgd_solver.cpp:174] Iteration 89100, lr = 2.673\nI1214 12:58:48.285960 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.340053\nI1214 13:01:05.402758 20613 solver.cpp:337] Iteration 89200, Testing net (#0)\nI1214 13:02:26.052332 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70148\nI1214 13:02:26.052760 20613 solver.cpp:404]     Test net output #1: loss = 1.19326 (* 1 = 1.19326 loss)\nI1214 13:02:27.360980 20613 solver.cpp:228] Iteration 89200, loss = 0.37429\nI1214 13:02:27.361021 20613 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1214 13:02:27.361035 20613 solver.cpp:244]     Train net output #1: loss = 0.37429 (* 1 = 0.37429 loss)\nI1214 13:02:27.461587 20613 sgd_solver.cpp:174] Iteration 89200, lr = 2.676\nI1214 13:02:27.474352 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.309689\nI1214 13:04:44.745329 20613 solver.cpp:337] Iteration 89300, Testing net (#0)\nI1214 13:06:05.501076 20613 solver.cpp:404]     Test net output #0: accuracy = 0.62752\nI1214 13:06:05.501499 20613 solver.cpp:404]     Test net output #1: loss = 1.45165 (* 1 = 1.45165 loss)\nI1214 13:06:06.810894 20613 solver.cpp:228] Iteration 89300, loss = 0.214299\nI1214 13:06:06.810935 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 13:06:06.810951 20613 solver.cpp:244]     Train net output #1: loss = 0.214299 (* 1 = 0.214299 loss)\nI1214 13:06:06.909225 20613 sgd_solver.cpp:174] Iteration 89300, lr = 2.679\nI1214 13:06:06.921996 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.251649\nI1214 13:08:24.012265 20613 solver.cpp:337] Iteration 89400, Testing net (#0)\nI1214 13:09:44.755589 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66868\nI1214 13:09:44.755957 20613 solver.cpp:404]     Test net output #1: loss = 1.16296 (* 1 = 1.16296 loss)\nI1214 13:09:46.064612 20613 solver.cpp:228] Iteration 89400, loss = 0.23569\nI1214 13:09:46.064654 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 13:09:46.064669 20613 solver.cpp:244]     Train net output #1: loss = 0.23569 (* 1 = 0.23569 loss)\nI1214 13:09:46.163326 20613 sgd_solver.cpp:174] Iteration 89400, lr = 2.682\nI1214 13:09:46.176090 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.282365\nI1214 13:12:03.412657 20613 solver.cpp:337] Iteration 89500, Testing net (#0)\nI1214 13:13:24.153650 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79796\nI1214 13:13:24.154019 20613 solver.cpp:404]     Test net output #1: loss = 0.60435 (* 1 = 0.60435 loss)\nI1214 13:13:25.462898 20613 solver.cpp:228] Iteration 89500, loss = 0.254051\nI1214 13:13:25.462940 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 13:13:25.462956 20613 solver.cpp:244]     Train net output #1: loss = 0.254051 (* 1 = 0.254051 loss)\nI1214 13:13:25.560953 20613 sgd_solver.cpp:174] Iteration 89500, lr = 2.685\nI1214 13:13:25.573748 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.275868\nI1214 13:15:42.675829 20613 solver.cpp:337] Iteration 89600, Testing net (#0)\nI1214 13:17:03.419862 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76932\nI1214 13:17:03.420219 20613 solver.cpp:404]     Test net output #1: loss = 0.742644 (* 1 = 0.742644 loss)\nI1214 13:17:04.729420 20613 solver.cpp:228] Iteration 89600, loss = 0.196645\nI1214 13:17:04.729462 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 13:17:04.729478 20613 solver.cpp:244]     Train net output #1: loss = 0.196645 (* 1 = 0.196645 loss)\nI1214 13:17:04.827018 20613 sgd_solver.cpp:174] Iteration 89600, lr = 2.688\nI1214 13:17:04.839763 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.26374\nI1214 13:19:22.715663 20613 solver.cpp:337] Iteration 89700, Testing net (#0)\nI1214 13:20:43.490221 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6896\nI1214 13:20:43.490627 20613 solver.cpp:404]     Test net output #1: loss = 1.28694 (* 1 = 1.28694 loss)\nI1214 13:20:44.800339 20613 solver.cpp:228] Iteration 89700, loss = 0.193404\nI1214 13:20:44.800381 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 13:20:44.800396 20613 solver.cpp:244]     Train net output #1: loss = 0.193404 (* 1 = 0.193404 loss)\nI1214 13:20:44.897436 20613 sgd_solver.cpp:174] Iteration 89700, lr = 2.691\nI1214 13:20:44.910241 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.29143\nI1214 13:23:02.048419 20613 solver.cpp:337] Iteration 89800, Testing net (#0)\nI1214 13:24:22.796244 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70152\nI1214 13:24:22.796576 20613 solver.cpp:404]     Test net output #1: loss = 1.38284 (* 1 = 1.38284 loss)\nI1214 13:24:24.105747 20613 solver.cpp:228] Iteration 89800, loss = 0.198141\nI1214 13:24:24.105789 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 13:24:24.105808 20613 solver.cpp:244]     Train net output #1: loss = 0.198141 (* 1 = 0.198141 loss)\nI1214 13:24:24.199838 20613 sgd_solver.cpp:174] Iteration 89800, lr = 2.694\nI1214 13:24:24.212453 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.293453\nI1214 13:26:41.321554 20613 solver.cpp:337] Iteration 89900, Testing net (#0)\nI1214 13:28:02.068428 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75156\nI1214 13:28:02.068843 20613 solver.cpp:404]     Test net output #1: loss = 0.950836 (* 1 = 0.950836 loss)\nI1214 13:28:03.377718 20613 solver.cpp:228] Iteration 89900, loss = 0.268307\nI1214 13:28:03.377760 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 13:28:03.377775 20613 solver.cpp:244]     Train net output #1: loss = 0.268307 (* 1 = 0.268307 loss)\nI1214 13:28:03.475666 20613 sgd_solver.cpp:174] Iteration 89900, lr = 2.697\nI1214 13:28:03.488492 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.309427\nI1214 13:30:20.770105 20613 solver.cpp:337] Iteration 90000, Testing net (#0)\nI1214 13:31:41.520995 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77368\nI1214 13:31:41.521421 20613 solver.cpp:404]     Test net output #1: loss = 0.811588 (* 1 = 0.811588 loss)\nI1214 13:31:42.829927 20613 solver.cpp:228] Iteration 90000, loss = 0.198612\nI1214 13:31:42.829969 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 13:31:42.829984 20613 solver.cpp:244]     Train net output #1: loss = 0.198612 (* 1 = 0.198612 loss)\nI1214 13:31:42.931015 20613 sgd_solver.cpp:174] Iteration 90000, lr = 2.7\nI1214 13:31:42.943814 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307915\nI1214 13:34:00.055593 20613 solver.cpp:337] Iteration 90100, Testing net (#0)\nI1214 13:35:20.850008 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65476\nI1214 13:35:20.850430 20613 solver.cpp:404]     Test net output #1: loss = 1.24389 (* 1 = 1.24389 loss)\nI1214 13:35:22.159200 20613 solver.cpp:228] Iteration 90100, loss = 0.265553\nI1214 13:35:22.159234 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 13:35:22.159247 20613 solver.cpp:244]     Train net output #1: loss = 0.265553 (* 1 = 0.265553 loss)\nI1214 13:35:22.256520 20613 sgd_solver.cpp:174] Iteration 90100, lr = 2.703\nI1214 13:35:22.269385 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.294367\nI1214 13:37:40.088122 20613 solver.cpp:337] Iteration 90200, Testing net (#0)\nI1214 13:39:00.882540 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7304\nI1214 13:39:00.882963 20613 solver.cpp:404]     Test net output #1: loss = 0.921709 (* 1 = 0.921709 loss)\nI1214 13:39:02.191714 20613 solver.cpp:228] Iteration 90200, loss = 0.305948\nI1214 13:39:02.191753 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 13:39:02.191768 20613 solver.cpp:244]     Train net output #1: loss = 0.305948 (* 1 = 0.305948 loss)\nI1214 13:39:02.291398 20613 sgd_solver.cpp:174] Iteration 90200, lr = 2.706\nI1214 13:39:02.304137 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.297327\nI1214 13:41:19.361346 20613 solver.cpp:337] Iteration 90300, Testing net (#0)\nI1214 13:42:40.110064 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79096\nI1214 13:42:40.110486 20613 solver.cpp:404]     Test net output #1: loss = 0.63286 (* 1 = 0.63286 loss)\nI1214 13:42:41.420445 20613 solver.cpp:228] Iteration 90300, loss = 0.148724\nI1214 13:42:41.420487 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 13:42:41.420503 20613 solver.cpp:244]     Train net output #1: loss = 0.148724 (* 1 = 0.148724 loss)\nI1214 13:42:41.522239 20613 sgd_solver.cpp:174] Iteration 90300, lr = 2.709\nI1214 13:42:41.535018 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.275433\nI1214 13:44:58.762437 20613 solver.cpp:337] Iteration 90400, Testing net (#0)\nI1214 13:46:19.512435 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76216\nI1214 13:46:19.512852 20613 solver.cpp:404]     Test net output #1: loss = 0.741006 (* 1 = 0.741006 loss)\nI1214 13:46:20.823326 20613 solver.cpp:228] Iteration 90400, loss = 0.156963\nI1214 13:46:20.823371 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 13:46:20.823387 20613 solver.cpp:244]     Train net output #1: loss = 0.156963 (* 1 = 0.156963 loss)\nI1214 13:46:20.920310 20613 sgd_solver.cpp:174] Iteration 90400, lr = 2.712\nI1214 13:46:20.933018 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.266142\nI1214 13:48:38.987862 20613 solver.cpp:337] Iteration 90500, Testing net (#0)\nI1214 13:49:59.730706 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7628\nI1214 13:49:59.731132 20613 solver.cpp:404]     Test net output #1: loss = 0.714898 (* 1 = 0.714898 loss)\nI1214 13:50:01.041611 20613 solver.cpp:228] Iteration 90500, loss = 0.268677\nI1214 13:50:01.041649 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 13:50:01.041664 20613 solver.cpp:244]     Train net output #1: loss = 0.268677 (* 1 = 0.268677 loss)\nI1214 13:50:01.136890 20613 sgd_solver.cpp:174] Iteration 90500, lr = 2.715\nI1214 13:50:01.149713 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321915\nI1214 13:52:18.353752 20613 solver.cpp:337] Iteration 90600, Testing net (#0)\nI1214 13:53:39.104444 20613 solver.cpp:404]     Test net output #0: accuracy = 0.64508\nI1214 13:53:39.104849 20613 solver.cpp:404]     Test net output #1: loss = 1.37605 (* 1 = 1.37605 loss)\nI1214 13:53:40.414945 20613 solver.cpp:228] Iteration 90600, loss = 0.195453\nI1214 13:53:40.414984 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 13:53:40.415000 20613 solver.cpp:244]     Train net output #1: loss = 0.195453 (* 1 = 0.195453 loss)\nI1214 13:53:40.510522 20613 sgd_solver.cpp:174] Iteration 90600, lr = 2.718\nI1214 13:53:40.523277 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.286948\nI1214 13:55:57.811996 20613 solver.cpp:337] Iteration 90700, Testing net (#0)\nI1214 13:57:18.559804 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76828\nI1214 13:57:18.560235 20613 solver.cpp:404]     Test net output #1: loss = 0.735728 (* 1 = 0.735728 loss)\nI1214 13:57:19.872242 20613 solver.cpp:228] Iteration 90700, loss = 0.252322\nI1214 13:57:19.872282 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 13:57:19.872298 20613 solver.cpp:244]     Train net output #1: loss = 0.252322 (* 1 = 0.252322 loss)\nI1214 13:57:19.966246 20613 sgd_solver.cpp:174] Iteration 90700, lr = 2.721\nI1214 13:57:19.979053 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.285602\nI1214 13:59:37.443701 20613 solver.cpp:337] Iteration 90800, Testing net (#0)\nI1214 14:00:58.189321 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75\nI1214 14:00:58.189739 20613 solver.cpp:404]     Test net output #1: loss = 0.910857 (* 1 = 0.910857 loss)\nI1214 14:00:59.500062 20613 solver.cpp:228] Iteration 90800, loss = 0.248573\nI1214 14:00:59.500102 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 14:00:59.500118 20613 solver.cpp:244]     Train net output #1: loss = 0.248573 (* 1 = 0.248573 loss)\nI1214 14:00:59.598693 20613 sgd_solver.cpp:174] Iteration 90800, lr = 2.724\nI1214 14:00:59.611351 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321845\nI1214 14:03:16.973400 20613 solver.cpp:337] Iteration 90900, Testing net (#0)\nI1214 14:04:37.713116 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75148\nI1214 14:04:37.713536 20613 solver.cpp:404]     Test net output #1: loss = 0.844012 (* 1 = 0.844012 loss)\nI1214 14:04:39.024279 20613 solver.cpp:228] Iteration 90900, loss = 0.301622\nI1214 14:04:39.024319 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 14:04:39.024335 20613 solver.cpp:244]     Train net output #1: loss = 0.301622 (* 1 = 0.301622 loss)\nI1214 14:04:39.117393 20613 sgd_solver.cpp:174] Iteration 90900, lr = 2.727\nI1214 14:04:39.129901 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.282299\nI1214 14:06:57.165221 20613 solver.cpp:337] Iteration 91000, Testing net (#0)\nI1214 14:08:17.908957 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76044\nI1214 14:08:17.909385 20613 solver.cpp:404]     Test net output #1: loss = 0.850125 (* 1 = 0.850125 loss)\nI1214 14:08:19.219869 20613 solver.cpp:228] Iteration 91000, loss = 0.178377\nI1214 14:08:19.219902 20613 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1214 14:08:19.219916 20613 solver.cpp:244]     Train net output #1: loss = 0.178377 (* 1 = 0.178377 loss)\nI1214 14:08:19.314543 20613 sgd_solver.cpp:174] Iteration 91000, lr = 2.73\nI1214 14:08:19.327237 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.278844\nI1214 14:10:36.538707 20613 solver.cpp:337] Iteration 91100, Testing net (#0)\nI1214 14:11:57.316045 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7816\nI1214 14:11:57.316473 20613 solver.cpp:404]     Test net output #1: loss = 0.751062 (* 1 = 0.751062 loss)\nI1214 14:11:58.627799 20613 solver.cpp:228] Iteration 91100, loss = 0.160655\nI1214 14:11:58.627831 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 14:11:58.627846 20613 solver.cpp:244]     Train net output #1: loss = 0.160655 (* 1 = 0.160655 loss)\nI1214 14:11:58.723430 20613 sgd_solver.cpp:174] Iteration 91100, lr = 2.733\nI1214 14:11:58.736189 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.270286\nI1214 14:14:15.942785 20613 solver.cpp:337] Iteration 91200, Testing net (#0)\nI1214 14:15:36.725308 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78096\nI1214 14:15:36.725745 20613 solver.cpp:404]     Test net output #1: loss = 0.723583 (* 1 = 0.723583 loss)\nI1214 14:15:38.036662 20613 solver.cpp:228] Iteration 91200, loss = 0.325017\nI1214 14:15:38.036700 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 14:15:38.036715 20613 solver.cpp:244]     Train net output #1: loss = 0.325017 (* 1 = 0.325017 loss)\nI1214 14:15:38.133276 20613 sgd_solver.cpp:174] Iteration 91200, lr = 2.736\nI1214 14:15:38.145828 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.334769\nI1214 14:17:55.465281 20613 solver.cpp:337] Iteration 91300, Testing net (#0)\nI1214 14:19:16.257426 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74652\nI1214 14:19:16.257861 20613 solver.cpp:404]     Test net output #1: loss = 0.859469 (* 1 = 0.859469 loss)\nI1214 14:19:17.568892 20613 solver.cpp:228] Iteration 91300, loss = 0.229459\nI1214 14:19:17.568931 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 14:19:17.568948 20613 solver.cpp:244]     Train net output #1: loss = 0.22946 (* 1 = 0.22946 loss)\nI1214 14:19:17.667981 20613 sgd_solver.cpp:174] Iteration 91300, lr = 2.739\nI1214 14:19:17.680652 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.254111\nI1214 14:21:34.901793 20613 solver.cpp:337] Iteration 91400, Testing net (#0)\nI1214 14:22:55.674129 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72204\nI1214 14:22:55.674554 20613 solver.cpp:404]     Test net output #1: loss = 1.10227 (* 1 = 1.10227 loss)\nI1214 14:22:56.985000 20613 solver.cpp:228] Iteration 91400, loss = 0.189985\nI1214 14:22:56.985040 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 14:22:56.985055 20613 solver.cpp:244]     Train net output #1: loss = 0.189985 (* 1 = 0.189985 loss)\nI1214 14:22:57.084342 20613 sgd_solver.cpp:174] Iteration 91400, lr = 2.742\nI1214 14:22:57.097072 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.238397\nI1214 14:25:14.152730 20613 solver.cpp:337] Iteration 91500, Testing net (#0)\nI1214 14:26:34.885179 20613 solver.cpp:404]     Test net output #0: accuracy = 0.59112\nI1214 14:26:34.885612 20613 solver.cpp:404]     Test net output #1: loss = 1.55557 (* 1 = 1.55557 loss)\nI1214 14:26:36.196501 20613 solver.cpp:228] Iteration 91500, loss = 0.217253\nI1214 14:26:36.196542 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 14:26:36.196557 20613 solver.cpp:244]     Train net output #1: loss = 0.217254 (* 1 = 0.217254 loss)\nI1214 14:26:36.290922 20613 sgd_solver.cpp:174] Iteration 91500, lr = 2.745\nI1214 14:26:36.303649 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.303034\nI1214 14:28:53.501663 20613 solver.cpp:337] Iteration 91600, Testing net (#0)\nI1214 14:30:14.245867 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77728\nI1214 14:30:14.246276 20613 solver.cpp:404]     Test net output #1: loss = 0.747607 (* 1 = 0.747607 loss)\nI1214 14:30:15.556721 20613 solver.cpp:228] Iteration 91600, loss = 0.386556\nI1214 14:30:15.556763 20613 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1214 14:30:15.556779 20613 solver.cpp:244]     Train net output #1: loss = 0.386556 (* 1 = 0.386556 loss)\nI1214 14:30:15.650552 20613 sgd_solver.cpp:174] Iteration 91600, lr = 2.748\nI1214 14:30:15.663305 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.362332\nI1214 14:32:32.874979 20613 solver.cpp:337] Iteration 91700, Testing net (#0)\nI1214 14:33:53.621301 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7452\nI1214 14:33:53.621726 20613 solver.cpp:404]     Test net output #1: loss = 0.820418 (* 1 = 0.820418 loss)\nI1214 14:33:54.933015 20613 solver.cpp:228] Iteration 91700, loss = 0.278869\nI1214 14:33:54.933055 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 14:33:54.933071 20613 solver.cpp:244]     Train net output #1: loss = 0.278869 (* 1 = 0.278869 loss)\nI1214 14:33:55.026964 20613 sgd_solver.cpp:174] Iteration 91700, lr = 2.751\nI1214 14:33:55.039721 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.269585\nI1214 14:36:12.269130 20613 solver.cpp:337] Iteration 91800, Testing net (#0)\nI1214 14:37:33.003103 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72028\nI1214 14:37:33.003495 20613 solver.cpp:404]     Test net output #1: loss = 1.10223 (* 1 = 1.10223 loss)\nI1214 14:37:34.313514 20613 solver.cpp:228] Iteration 91800, loss = 0.178649\nI1214 14:37:34.313555 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 14:37:34.313570 20613 solver.cpp:244]     Train net output #1: loss = 0.178649 (* 1 = 0.178649 loss)\nI1214 14:37:34.410789 20613 sgd_solver.cpp:174] Iteration 91800, lr = 2.754\nI1214 14:37:34.423499 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321778\nI1214 14:39:51.584095 20613 solver.cpp:337] Iteration 91900, Testing net (#0)\nI1214 14:41:12.320611 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74024\nI1214 14:41:12.321018 20613 solver.cpp:404]     Test net output #1: loss = 0.969844 (* 1 = 0.969844 loss)\nI1214 14:41:13.631283 20613 solver.cpp:228] Iteration 91900, loss = 0.267368\nI1214 14:41:13.631325 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 14:41:13.631340 20613 solver.cpp:244]     Train net output #1: loss = 0.267369 (* 1 = 0.267369 loss)\nI1214 14:41:13.722347 20613 sgd_solver.cpp:174] Iteration 91900, lr = 2.757\nI1214 14:41:13.734957 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.323812\nI1214 14:43:30.975960 20613 solver.cpp:337] Iteration 92000, Testing net (#0)\nI1214 14:44:51.710587 20613 solver.cpp:404]     Test net output #0: accuracy = 0.70032\nI1214 14:44:51.710877 20613 solver.cpp:404]     Test net output #1: loss = 1.16375 (* 1 = 1.16375 loss)\nI1214 14:44:53.020761 20613 solver.cpp:228] Iteration 92000, loss = 0.275533\nI1214 14:44:53.020794 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 14:44:53.020808 20613 solver.cpp:244]     Train net output #1: loss = 0.275533 (* 1 = 0.275533 loss)\nI1214 14:44:53.114446 20613 sgd_solver.cpp:174] Iteration 92000, lr = 2.76\nI1214 14:44:53.127135 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.259499\nI1214 14:47:10.413769 20613 solver.cpp:337] Iteration 92100, Testing net (#0)\nI1214 14:48:31.043730 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76812\nI1214 14:48:31.044049 20613 solver.cpp:404]     Test net output #1: loss = 0.723975 (* 1 = 0.723975 loss)\nI1214 14:48:32.354430 20613 solver.cpp:228] Iteration 92100, loss = 0.297158\nI1214 14:48:32.354470 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 14:48:32.354486 20613 solver.cpp:244]     Train net output #1: loss = 0.297158 (* 1 = 0.297158 loss)\nI1214 14:48:32.453474 20613 sgd_solver.cpp:174] Iteration 92100, lr = 2.763\nI1214 14:48:32.466060 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.368941\nI1214 14:50:50.458686 20613 solver.cpp:337] Iteration 92200, Testing net (#0)\nI1214 14:52:11.108752 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71464\nI1214 14:52:11.109146 20613 solver.cpp:404]     Test net output #1: loss = 1.00879 (* 1 = 1.00879 loss)\nI1214 14:52:12.419116 20613 solver.cpp:228] Iteration 92200, loss = 0.214678\nI1214 14:52:12.419157 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 14:52:12.419173 20613 solver.cpp:244]     Train net output #1: loss = 0.214678 (* 1 = 0.214678 loss)\nI1214 14:52:12.517037 20613 sgd_solver.cpp:174] Iteration 92200, lr = 2.766\nI1214 14:52:12.529618 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.298747\nI1214 14:54:30.547798 20613 solver.cpp:337] Iteration 92300, Testing net (#0)\nI1214 14:55:51.190349 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68844\nI1214 14:55:51.190696 20613 solver.cpp:404]     Test net output #1: loss = 1.09076 (* 1 = 1.09076 loss)\nI1214 14:55:52.501287 20613 solver.cpp:228] Iteration 92300, loss = 0.217042\nI1214 14:55:52.501328 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 14:55:52.501343 20613 solver.cpp:244]     Train net output #1: loss = 0.217042 (* 1 = 0.217042 loss)\nI1214 14:55:52.597755 20613 sgd_solver.cpp:174] Iteration 92300, lr = 2.769\nI1214 14:55:52.610359 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.249109\nI1214 14:58:10.624423 20613 solver.cpp:337] Iteration 92400, Testing net (#0)\nI1214 14:59:31.276320 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69592\nI1214 14:59:31.276656 20613 solver.cpp:404]     Test net output #1: loss = 0.896442 (* 1 = 0.896442 loss)\nI1214 14:59:32.587409 20613 solver.cpp:228] Iteration 92400, loss = 0.266989\nI1214 14:59:32.587450 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 14:59:32.587466 20613 solver.cpp:244]     Train net output #1: loss = 0.266989 (* 1 = 0.266989 loss)\nI1214 14:59:32.685124 20613 sgd_solver.cpp:174] Iteration 92400, lr = 2.772\nI1214 14:59:32.697795 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.300646\nI1214 15:01:49.791467 20613 solver.cpp:337] Iteration 92500, Testing net (#0)\nI1214 15:03:10.450232 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76672\nI1214 15:03:10.450572 20613 solver.cpp:404]     Test net output #1: loss = 0.773533 (* 1 = 0.773533 loss)\nI1214 15:03:11.761670 20613 solver.cpp:228] Iteration 92500, loss = 0.252887\nI1214 15:03:11.761713 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 15:03:11.761729 20613 solver.cpp:244]     Train net output #1: loss = 0.252887 (* 1 = 0.252887 loss)\nI1214 15:03:11.853057 20613 sgd_solver.cpp:174] Iteration 92500, lr = 2.775\nI1214 15:03:11.865814 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.337983\nI1214 15:05:29.082190 20613 solver.cpp:337] Iteration 92600, Testing net (#0)\nI1214 15:06:49.749377 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76804\nI1214 15:06:49.749676 20613 solver.cpp:404]     Test net output #1: loss = 0.768126 (* 1 = 0.768126 loss)\nI1214 15:06:51.060153 20613 solver.cpp:228] Iteration 92600, loss = 0.296856\nI1214 15:06:51.060195 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 15:06:51.060210 20613 solver.cpp:244]     Train net output #1: loss = 0.296856 (* 1 = 0.296856 loss)\nI1214 15:06:51.156982 20613 sgd_solver.cpp:174] Iteration 92600, lr = 2.778\nI1214 15:06:51.169667 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.315188\nI1214 15:09:08.408520 20613 solver.cpp:337] Iteration 92700, Testing net (#0)\nI1214 15:10:29.065037 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78096\nI1214 15:10:29.065426 20613 solver.cpp:404]     Test net output #1: loss = 0.654788 (* 1 = 0.654788 loss)\nI1214 15:10:30.376639 20613 solver.cpp:228] Iteration 92700, loss = 0.274998\nI1214 15:10:30.376673 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 15:10:30.376688 20613 solver.cpp:244]     Train net output #1: loss = 0.274998 (* 1 = 0.274998 loss)\nI1214 15:10:30.469548 20613 sgd_solver.cpp:174] Iteration 92700, lr = 2.781\nI1214 15:10:30.482168 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.324752\nI1214 15:31:14.393554 20613 solver.cpp:337] Iteration 92800, Testing net (#0)\nI1214 15:32:35.069866 20613 solver.cpp:404]     Test net output #0: accuracy = 0.65388\nI1214 15:32:35.070296 20613 solver.cpp:404]     Test net output #1: loss = 1.12544 (* 1 = 1.12544 loss)\nI1214 15:32:36.399613 20613 solver.cpp:228] Iteration 92800, loss = 0.226111\nI1214 15:32:36.399653 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 15:32:36.399668 20613 solver.cpp:244]     Train net output #1: loss = 0.226111 (* 1 = 0.226111 loss)\nI1214 15:32:36.474444 20613 sgd_solver.cpp:174] Iteration 92800, lr = 2.784\nI1214 15:32:36.487177 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.274819\nI1214 15:34:53.879099 20613 solver.cpp:337] Iteration 92900, Testing net (#0)\nI1214 15:36:14.225909 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71516\nI1214 15:36:14.226310 20613 solver.cpp:404]     Test net output #1: loss = 1.06418 (* 1 = 1.06418 loss)\nI1214 15:36:15.536532 20613 solver.cpp:228] Iteration 92900, loss = 0.290136\nI1214 15:36:15.536577 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 15:36:15.536594 20613 solver.cpp:244]     Train net output #1: loss = 0.290136 (* 1 = 0.290136 loss)\nI1214 15:36:15.645113 20613 sgd_solver.cpp:174] Iteration 92900, lr = 2.787\nI1214 15:36:15.657910 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290561\nI1214 15:38:33.029922 20613 solver.cpp:337] Iteration 93000, Testing net (#0)\nI1214 15:39:53.733944 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6968\nI1214 15:39:53.734355 20613 solver.cpp:404]     Test net output #1: loss = 0.947501 (* 1 = 0.947501 loss)\nI1214 15:39:55.044430 20613 solver.cpp:228] Iteration 93000, loss = 0.324826\nI1214 15:39:55.044476 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 15:39:55.044492 20613 solver.cpp:244]     Train net output #1: loss = 0.324827 (* 1 = 0.324827 loss)\nI1214 15:39:55.136638 20613 sgd_solver.cpp:174] Iteration 93000, lr = 2.79\nI1214 15:39:55.149363 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.344227\nI1214 15:42:12.511030 20613 solver.cpp:337] Iteration 93100, Testing net (#0)\nI1214 15:43:33.202347 20613 solver.cpp:404]     Test net output #0: accuracy = 0.68164\nI1214 15:43:33.202757 20613 solver.cpp:404]     Test net output #1: loss = 1.11378 (* 1 = 1.11378 loss)\nI1214 15:43:34.513713 20613 solver.cpp:228] Iteration 93100, loss = 0.279689\nI1214 15:43:34.513749 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 15:43:34.513764 20613 solver.cpp:244]     Train net output #1: loss = 0.279689 (* 1 = 0.279689 loss)\nI1214 15:43:34.610534 20613 sgd_solver.cpp:174] Iteration 93100, lr = 2.793\nI1214 15:43:34.623241 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.276302\nI1214 15:45:51.922483 20613 solver.cpp:337] Iteration 93200, Testing net (#0)\nI1214 15:47:12.624117 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7374\nI1214 15:47:12.624466 20613 solver.cpp:404]     Test net output #1: loss = 0.905403 (* 1 = 0.905403 loss)\nI1214 15:47:13.935015 20613 solver.cpp:228] Iteration 93200, loss = 0.187383\nI1214 15:47:13.935052 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 15:47:13.935067 20613 solver.cpp:244]     Train net output #1: loss = 0.187384 (* 1 = 0.187384 loss)\nI1214 15:47:14.032346 20613 sgd_solver.cpp:174] Iteration 93200, lr = 2.796\nI1214 15:47:14.045051 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.296745\nI1214 15:49:31.967566 20613 solver.cpp:337] Iteration 93300, Testing net (#0)\nI1214 15:50:52.673132 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77236\nI1214 15:50:52.673532 20613 solver.cpp:404]     Test net output #1: loss = 0.822801 (* 1 = 0.822801 loss)\nI1214 15:50:53.984372 20613 solver.cpp:228] Iteration 93300, loss = 0.324648\nI1214 15:50:53.984419 20613 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1214 15:50:53.984436 20613 solver.cpp:244]     Train net output #1: loss = 0.324648 (* 1 = 0.324648 loss)\nI1214 15:50:54.083552 20613 sgd_solver.cpp:174] Iteration 93300, lr = 2.799\nI1214 15:50:54.096125 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.287262\nI1214 15:53:12.091039 20613 solver.cpp:337] Iteration 93400, Testing net (#0)\nI1214 15:54:32.788450 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77976\nI1214 15:54:32.788847 20613 solver.cpp:404]     Test net output #1: loss = 0.76492 (* 1 = 0.76492 loss)\nI1214 15:54:34.098076 20613 solver.cpp:228] Iteration 93400, loss = 0.20784\nI1214 15:54:34.098122 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 15:54:34.098140 20613 solver.cpp:244]     Train net output #1: loss = 0.20784 (* 1 = 0.20784 loss)\nI1214 15:54:34.194440 20613 sgd_solver.cpp:174] Iteration 93400, lr = 2.802\nI1214 15:54:34.207216 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.277806\nI1214 15:56:52.269345 20613 solver.cpp:337] Iteration 93500, Testing net (#0)\nI1214 15:58:12.961477 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75984\nI1214 15:58:12.961861 20613 solver.cpp:404]     Test net output #1: loss = 0.741286 (* 1 = 0.741286 loss)\nI1214 15:58:14.271805 20613 solver.cpp:228] Iteration 93500, loss = 0.237136\nI1214 15:58:14.271852 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 15:58:14.271867 20613 solver.cpp:244]     Train net output #1: loss = 0.237136 (* 1 = 0.237136 loss)\nI1214 15:58:14.368036 20613 sgd_solver.cpp:174] Iteration 93500, lr = 2.805\nI1214 15:58:14.380789 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.309362\nI1214 16:00:32.440951 20613 solver.cpp:337] Iteration 93600, Testing net (#0)\nI1214 16:01:53.132774 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74656\nI1214 16:01:53.133188 20613 solver.cpp:404]     Test net output #1: loss = 0.952491 (* 1 = 0.952491 loss)\nI1214 16:01:54.444305 20613 solver.cpp:228] Iteration 93600, loss = 0.27202\nI1214 16:01:54.444350 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 16:01:54.444366 20613 solver.cpp:244]     Train net output #1: loss = 0.27202 (* 1 = 0.27202 loss)\nI1214 16:01:54.542098 20613 sgd_solver.cpp:174] Iteration 93600, lr = 2.808\nI1214 16:01:54.554847 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.297188\nI1214 16:09:24.482976 20613 solver.cpp:337] Iteration 93700, Testing net (#0)\nI1214 16:10:45.219678 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76408\nI1214 16:10:45.220038 20613 solver.cpp:404]     Test net output #1: loss = 0.792285 (* 1 = 0.792285 loss)\nI1214 16:10:46.530910 20613 solver.cpp:228] Iteration 93700, loss = 0.216641\nI1214 16:10:46.530948 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 16:10:46.530964 20613 solver.cpp:244]     Train net output #1: loss = 0.216641 (* 1 = 0.216641 loss)\nI1214 16:10:46.629566 20613 sgd_solver.cpp:174] Iteration 93700, lr = 2.811\nI1214 16:10:46.642267 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.269704\nI1214 16:13:03.966354 20613 solver.cpp:337] Iteration 93800, Testing net (#0)\nI1214 16:14:24.677242 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71996\nI1214 16:14:24.677588 20613 solver.cpp:404]     Test net output #1: loss = 0.955218 (* 1 = 0.955218 loss)\nI1214 16:14:25.988850 20613 solver.cpp:228] Iteration 93800, loss = 0.269956\nI1214 16:14:25.988899 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 16:14:25.988917 20613 solver.cpp:244]     Train net output #1: loss = 0.269957 (* 1 = 0.269957 loss)\nI1214 16:14:26.084379 20613 sgd_solver.cpp:174] Iteration 93800, lr = 2.814\nI1214 16:14:26.097097 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.300925\nI1214 16:16:43.307482 20613 solver.cpp:337] Iteration 93900, Testing net (#0)\nI1214 16:18:04.015801 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72616\nI1214 16:18:04.016197 20613 solver.cpp:404]     Test net output #1: loss = 0.92519 (* 1 = 0.92519 loss)\nI1214 16:18:05.326750 20613 solver.cpp:228] Iteration 93900, loss = 0.220772\nI1214 16:18:05.326798 20613 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1214 16:18:05.326815 20613 solver.cpp:244]     Train net output #1: loss = 0.220772 (* 1 = 0.220772 loss)\nI1214 16:18:05.417726 20613 sgd_solver.cpp:174] Iteration 93900, lr = 2.817\nI1214 16:18:05.430371 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.27553\nI1214 16:20:22.564194 20613 solver.cpp:337] Iteration 94000, Testing net (#0)\nI1214 16:21:43.270138 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69232\nI1214 16:21:43.270505 20613 solver.cpp:404]     Test net output #1: loss = 1.24488 (* 1 = 1.24488 loss)\nI1214 16:21:44.580644 20613 solver.cpp:228] Iteration 94000, loss = 0.284622\nI1214 16:21:44.580693 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 16:21:44.580720 20613 solver.cpp:244]     Train net output #1: loss = 0.284623 (* 1 = 0.284623 loss)\nI1214 16:21:44.672153 20613 sgd_solver.cpp:174] Iteration 94000, lr = 2.82\nI1214 16:21:44.684909 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.312049\nI1214 16:24:01.807512 20613 solver.cpp:337] Iteration 94100, Testing net (#0)\nI1214 16:25:22.518837 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74056\nI1214 16:25:22.519176 20613 solver.cpp:404]     Test net output #1: loss = 0.882523 (* 1 = 0.882523 loss)\nI1214 16:25:23.829403 20613 solver.cpp:228] Iteration 94100, loss = 0.378731\nI1214 16:25:23.829452 20613 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1214 16:25:23.829468 20613 solver.cpp:244]     Train net output #1: loss = 0.378731 (* 1 = 0.378731 loss)\nI1214 16:25:23.926533 20613 sgd_solver.cpp:174] Iteration 94100, lr = 2.823\nI1214 16:25:23.939283 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290731\nI1214 16:27:42.001132 20613 solver.cpp:337] Iteration 94200, Testing net (#0)\nI1214 16:29:02.715353 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66396\nI1214 16:29:02.715685 20613 solver.cpp:404]     Test net output #1: loss = 1.47336 (* 1 = 1.47336 loss)\nI1214 16:29:04.025959 20613 solver.cpp:228] Iteration 94200, loss = 0.290301\nI1214 16:29:04.026005 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 16:29:04.026021 20613 solver.cpp:244]     Train net output #1: loss = 0.290301 (* 1 = 0.290301 loss)\nI1214 16:29:04.118295 20613 sgd_solver.cpp:174] Iteration 94200, lr = 2.826\nI1214 16:29:04.130995 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.266003\nI1214 16:31:22.099714 20613 solver.cpp:337] Iteration 94300, Testing net (#0)\nI1214 16:32:42.808593 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81368\nI1214 16:32:42.809001 20613 solver.cpp:404]     Test net output #1: loss = 0.577779 (* 1 = 0.577779 loss)\nI1214 16:32:44.119442 20613 solver.cpp:228] Iteration 94300, loss = 0.287113\nI1214 16:32:44.119493 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 16:32:44.119510 20613 solver.cpp:244]     Train net output #1: loss = 0.287113 (* 1 = 0.287113 loss)\nI1214 16:32:44.211874 20613 sgd_solver.cpp:174] Iteration 94300, lr = 2.829\nI1214 16:32:44.224625 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.264966\nI1214 16:35:02.213196 20613 solver.cpp:337] Iteration 94400, Testing net (#0)\nI1214 16:36:22.916586 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76708\nI1214 16:36:22.916996 20613 solver.cpp:404]     Test net output #1: loss = 0.700871 (* 1 = 0.700871 loss)\nI1214 16:36:24.228534 20613 solver.cpp:228] Iteration 94400, loss = 0.255047\nI1214 16:36:24.228583 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 16:36:24.228600 20613 solver.cpp:244]     Train net output #1: loss = 0.255047 (* 1 = 0.255047 loss)\nI1214 16:36:24.318831 20613 sgd_solver.cpp:174] Iteration 94400, lr = 2.832\nI1214 16:36:24.331496 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.295971\nI1214 16:38:41.451089 20613 solver.cpp:337] Iteration 94500, Testing net (#0)\nI1214 16:40:02.148150 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81568\nI1214 16:40:02.148501 20613 solver.cpp:404]     Test net output #1: loss = 0.585734 (* 1 = 0.585734 loss)\nI1214 16:40:03.458982 20613 solver.cpp:228] Iteration 94500, loss = 0.222762\nI1214 16:40:03.459033 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 16:40:03.459049 20613 solver.cpp:244]     Train net output #1: loss = 0.222763 (* 1 = 0.222763 loss)\nI1214 16:40:03.551028 20613 sgd_solver.cpp:174] Iteration 94500, lr = 2.835\nI1214 16:40:03.563690 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.279323\nI1214 16:42:21.587342 20613 solver.cpp:337] Iteration 94600, Testing net (#0)\nI1214 16:43:42.286667 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78796\nI1214 16:43:42.287019 20613 solver.cpp:404]     Test net output #1: loss = 0.727907 (* 1 = 0.727907 loss)\nI1214 16:43:43.597756 20613 solver.cpp:228] Iteration 94600, loss = 0.323649\nI1214 16:43:43.597806 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 16:43:43.597825 20613 solver.cpp:244]     Train net output #1: loss = 0.323649 (* 1 = 0.323649 loss)\nI1214 16:43:43.695915 20613 sgd_solver.cpp:174] Iteration 94600, lr = 2.838\nI1214 16:43:43.708694 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.292647\nI1214 16:46:01.752744 20613 solver.cpp:337] Iteration 94700, Testing net (#0)\nI1214 16:47:22.440701 20613 solver.cpp:404]     Test net output #0: accuracy = 0.57\nI1214 16:47:22.440996 20613 solver.cpp:404]     Test net output #1: loss = 1.85601 (* 1 = 1.85601 loss)\nI1214 16:47:23.751318 20613 solver.cpp:228] Iteration 94700, loss = 0.228\nI1214 16:47:23.751368 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 16:47:23.751386 20613 solver.cpp:244]     Train net output #1: loss = 0.228 (* 1 = 0.228 loss)\nI1214 16:47:23.848455 20613 sgd_solver.cpp:174] Iteration 94700, lr = 2.841\nI1214 16:47:23.861233 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.295648\nI1214 16:49:41.867664 20613 solver.cpp:337] Iteration 94800, Testing net (#0)\nI1214 16:51:02.547750 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80912\nI1214 16:51:02.548135 20613 solver.cpp:404]     Test net output #1: loss = 0.587331 (* 1 = 0.587331 loss)\nI1214 16:51:03.858130 20613 solver.cpp:228] Iteration 94800, loss = 0.325256\nI1214 16:51:03.858178 20613 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1214 16:51:03.858193 20613 solver.cpp:244]     Train net output #1: loss = 0.325256 (* 1 = 0.325256 loss)\nI1214 16:51:03.954275 20613 sgd_solver.cpp:174] Iteration 94800, lr = 2.844\nI1214 16:51:03.967032 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.265685\nI1214 16:53:21.933948 20613 solver.cpp:337] Iteration 94900, Testing net (#0)\nI1214 16:54:42.616631 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71812\nI1214 16:54:42.617003 20613 solver.cpp:404]     Test net output #1: loss = 1.04721 (* 1 = 1.04721 loss)\nI1214 16:54:43.928303 20613 solver.cpp:228] Iteration 94900, loss = 0.297141\nI1214 16:54:43.928351 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 16:54:43.928369 20613 solver.cpp:244]     Train net output #1: loss = 0.297141 (* 1 = 0.297141 loss)\nI1214 16:54:44.022969 20613 sgd_solver.cpp:174] Iteration 94900, lr = 2.847\nI1214 16:54:44.035746 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.332409\nI1214 16:57:02.058099 20613 solver.cpp:337] Iteration 95000, Testing net (#0)\nI1214 16:58:22.753973 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69844\nI1214 16:58:22.754359 20613 solver.cpp:404]     Test net output #1: loss = 1.06532 (* 1 = 1.06532 loss)\nI1214 16:58:24.066175 20613 solver.cpp:228] Iteration 95000, loss = 0.253662\nI1214 16:58:24.066215 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 16:58:24.066231 20613 solver.cpp:244]     Train net output #1: loss = 0.253663 (* 1 = 0.253663 loss)\nI1214 16:58:24.157135 20613 sgd_solver.cpp:174] Iteration 95000, lr = 2.85\nI1214 16:58:24.169850 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.263416\nI1214 17:00:42.106326 20613 solver.cpp:337] Iteration 95100, Testing net (#0)\nI1214 17:02:02.806982 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66888\nI1214 17:02:02.807358 20613 solver.cpp:404]     Test net output #1: loss = 1.12962 (* 1 = 1.12962 loss)\nI1214 17:02:04.118101 20613 solver.cpp:228] Iteration 95100, loss = 0.378448\nI1214 17:02:04.118139 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 17:02:04.118155 20613 solver.cpp:244]     Train net output #1: loss = 0.378448 (* 1 = 0.378448 loss)\nI1214 17:02:04.214653 20613 sgd_solver.cpp:174] Iteration 95100, lr = 2.853\nI1214 17:02:04.227458 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.295318\nI1214 17:04:22.222182 20613 solver.cpp:337] Iteration 95200, Testing net (#0)\nI1214 17:05:42.930069 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76748\nI1214 17:05:42.930461 20613 solver.cpp:404]     Test net output #1: loss = 0.777728 (* 1 = 0.777728 loss)\nI1214 17:05:44.241341 20613 solver.cpp:228] Iteration 95200, loss = 0.228777\nI1214 17:05:44.241377 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 17:05:44.241394 20613 solver.cpp:244]     Train net output #1: loss = 0.228777 (* 1 = 0.228777 loss)\nI1214 17:05:44.333145 20613 sgd_solver.cpp:174] Iteration 95200, lr = 2.856\nI1214 17:05:44.345875 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.261005\nI1214 17:08:02.308593 20613 solver.cpp:337] Iteration 95300, Testing net (#0)\nI1214 17:09:22.993935 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69276\nI1214 17:09:22.994283 20613 solver.cpp:404]     Test net output #1: loss = 0.977911 (* 1 = 0.977911 loss)\nI1214 17:09:24.303781 20613 solver.cpp:228] Iteration 95300, loss = 0.324629\nI1214 17:09:24.303828 20613 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1214 17:09:24.303845 20613 solver.cpp:244]     Train net output #1: loss = 0.324629 (* 1 = 0.324629 loss)\nI1214 17:09:24.401099 20613 sgd_solver.cpp:174] Iteration 95300, lr = 2.859\nI1214 17:09:24.413826 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.3208\nI1214 17:11:42.433581 20613 solver.cpp:337] Iteration 95400, Testing net (#0)\nI1214 17:13:03.114816 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72504\nI1214 17:13:03.115224 20613 solver.cpp:404]     Test net output #1: loss = 0.907151 (* 1 = 0.907151 loss)\nI1214 17:13:04.425935 20613 solver.cpp:228] Iteration 95400, loss = 0.330282\nI1214 17:13:04.425972 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1214 17:13:04.425987 20613 solver.cpp:244]     Train net output #1: loss = 0.330283 (* 1 = 0.330283 loss)\nI1214 17:13:04.520362 20613 sgd_solver.cpp:174] Iteration 95400, lr = 2.862\nI1214 17:13:04.533126 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321785\nI1214 17:15:22.639386 20613 solver.cpp:337] Iteration 95500, Testing net (#0)\nI1214 17:16:43.328291 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79608\nI1214 17:16:43.328688 20613 solver.cpp:404]     Test net output #1: loss = 0.705999 (* 1 = 0.705999 loss)\nI1214 17:16:44.639580 20613 solver.cpp:228] Iteration 95500, loss = 0.241127\nI1214 17:16:44.639627 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 17:16:44.639645 20613 solver.cpp:244]     Train net output #1: loss = 0.241128 (* 1 = 0.241128 loss)\nI1214 17:16:44.735312 20613 sgd_solver.cpp:174] Iteration 95500, lr = 2.865\nI1214 17:16:44.747954 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.276491\nI1214 17:19:02.808723 20613 solver.cpp:337] Iteration 95600, Testing net (#0)\nI1214 17:20:23.519999 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75084\nI1214 17:20:23.520351 20613 solver.cpp:404]     Test net output #1: loss = 0.785051 (* 1 = 0.785051 loss)\nI1214 17:20:24.831408 20613 solver.cpp:228] Iteration 95600, loss = 0.220207\nI1214 17:20:24.831459 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 17:20:24.831485 20613 solver.cpp:244]     Train net output #1: loss = 0.220208 (* 1 = 0.220208 loss)\nI1214 17:20:24.930099 20613 sgd_solver.cpp:174] Iteration 95600, lr = 2.868\nI1214 17:20:24.942862 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.25537\nI1214 17:22:43.024408 20613 solver.cpp:337] Iteration 95700, Testing net (#0)\nI1214 17:24:03.738304 20613 solver.cpp:404]     Test net output #0: accuracy = 0.80516\nI1214 17:24:03.738632 20613 solver.cpp:404]     Test net output #1: loss = 0.666792 (* 1 = 0.666792 loss)\nI1214 17:24:05.050300 20613 solver.cpp:228] Iteration 95700, loss = 0.258664\nI1214 17:24:05.050349 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 17:24:05.050374 20613 solver.cpp:244]     Train net output #1: loss = 0.258664 (* 1 = 0.258664 loss)\nI1214 17:24:05.140807 20613 sgd_solver.cpp:174] Iteration 95700, lr = 2.871\nI1214 17:24:05.153501 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.299504\nI1214 17:26:23.252668 20613 solver.cpp:337] Iteration 95800, Testing net (#0)\nI1214 17:27:43.858271 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74272\nI1214 17:27:43.858575 20613 solver.cpp:404]     Test net output #1: loss = 0.743101 (* 1 = 0.743101 loss)\nI1214 17:27:45.169697 20613 solver.cpp:228] Iteration 95800, loss = 0.261399\nI1214 17:27:45.169739 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 17:27:45.169761 20613 solver.cpp:244]     Train net output #1: loss = 0.2614 (* 1 = 0.2614 loss)\nI1214 17:27:45.268390 20613 sgd_solver.cpp:174] Iteration 95800, lr = 2.874\nI1214 17:27:45.280869 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.308459\nI1214 17:30:03.446744 20613 solver.cpp:337] Iteration 95900, Testing net (#0)\nI1214 17:31:24.027007 20613 solver.cpp:404]     Test net output #0: accuracy = 0.61008\nI1214 17:31:24.027339 20613 solver.cpp:404]     Test net output #1: loss = 1.32676 (* 1 = 1.32676 loss)\nI1214 17:31:25.338143 20613 solver.cpp:228] Iteration 95900, loss = 0.307388\nI1214 17:31:25.338192 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 17:31:25.338217 20613 solver.cpp:244]     Train net output #1: loss = 0.307388 (* 1 = 0.307388 loss)\nI1214 17:31:25.432812 20613 sgd_solver.cpp:174] Iteration 95900, lr = 2.877\nI1214 17:31:25.445590 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.287588\nI1214 17:33:43.470336 20613 solver.cpp:337] Iteration 96000, Testing net (#0)\nI1214 17:35:04.042443 20613 solver.cpp:404]     Test net output #0: accuracy = 0.8064\nI1214 17:35:04.042801 20613 solver.cpp:404]     Test net output #1: loss = 0.610517 (* 1 = 0.610517 loss)\nI1214 17:35:05.352856 20613 solver.cpp:228] Iteration 96000, loss = 0.166367\nI1214 17:35:05.352906 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 17:35:05.352924 20613 solver.cpp:244]     Train net output #1: loss = 0.166368 (* 1 = 0.166368 loss)\nI1214 17:35:05.451308 20613 sgd_solver.cpp:174] Iteration 96000, lr = 2.88\nI1214 17:35:05.464102 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.249061\nI1214 17:37:23.498905 20613 solver.cpp:337] Iteration 96100, Testing net (#0)\nI1214 17:38:44.067451 20613 solver.cpp:404]     Test net output #0: accuracy = 0.56904\nI1214 17:38:44.067860 20613 solver.cpp:404]     Test net output #1: loss = 1.7562 (* 1 = 1.7562 loss)\nI1214 17:38:45.379096 20613 solver.cpp:228] Iteration 96100, loss = 0.324457\nI1214 17:38:45.379137 20613 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1214 17:38:45.379153 20613 solver.cpp:244]     Train net output #1: loss = 0.324457 (* 1 = 0.324457 loss)\nI1214 17:38:45.470181 20613 sgd_solver.cpp:174] Iteration 96100, lr = 2.883\nI1214 17:38:45.482899 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.293587\nI1214 17:41:03.483536 20613 solver.cpp:337] Iteration 96200, Testing net (#0)\nI1214 17:42:24.079893 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75364\nI1214 17:42:24.080302 20613 solver.cpp:404]     Test net output #1: loss = 0.839015 (* 1 = 0.839015 loss)\nI1214 17:42:25.391116 20613 solver.cpp:228] Iteration 96200, loss = 0.24732\nI1214 17:42:25.391166 20613 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1214 17:42:25.391183 20613 solver.cpp:244]     Train net output #1: loss = 0.24732 (* 1 = 0.24732 loss)\nI1214 17:42:25.484825 20613 sgd_solver.cpp:174] Iteration 96200, lr = 2.886\nI1214 17:42:25.497385 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.281163\nI1214 17:44:43.561074 20613 solver.cpp:337] Iteration 96300, Testing net (#0)\nI1214 17:46:04.153998 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6632\nI1214 17:46:04.154314 20613 solver.cpp:404]     Test net output #1: loss = 1.18596 (* 1 = 1.18596 loss)\nI1214 17:46:05.464527 20613 solver.cpp:228] Iteration 96300, loss = 0.273129\nI1214 17:46:05.464579 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 17:46:05.464603 20613 solver.cpp:244]     Train net output #1: loss = 0.27313 (* 1 = 0.27313 loss)\nI1214 17:46:05.560505 20613 sgd_solver.cpp:174] Iteration 96300, lr = 2.889\nI1214 17:46:05.573324 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.247452\nI1214 17:48:23.689239 20613 solver.cpp:337] Iteration 96400, Testing net (#0)\nI1214 17:49:44.282794 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71356\nI1214 17:49:44.283154 20613 solver.cpp:404]     Test net output #1: loss = 0.994566 (* 1 = 0.994566 loss)\nI1214 17:49:45.594177 20613 solver.cpp:228] Iteration 96400, loss = 0.280243\nI1214 17:49:45.594229 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 17:49:45.594254 20613 solver.cpp:244]     Train net output #1: loss = 0.280243 (* 1 = 0.280243 loss)\nI1214 17:49:45.685634 20613 sgd_solver.cpp:174] Iteration 96400, lr = 2.892\nI1214 17:49:45.698356 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.250386\nI1214 17:52:03.795200 20613 solver.cpp:337] Iteration 96500, Testing net (#0)\nI1214 17:53:24.380211 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72404\nI1214 17:53:24.380560 20613 solver.cpp:404]     Test net output #1: loss = 0.843952 (* 1 = 0.843952 loss)\nI1214 17:53:25.692005 20613 solver.cpp:228] Iteration 96500, loss = 0.30166\nI1214 17:53:25.692055 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 17:53:25.692073 20613 solver.cpp:244]     Train net output #1: loss = 0.30166 (* 1 = 0.30166 loss)\nI1214 17:53:25.784698 20613 sgd_solver.cpp:174] Iteration 96500, lr = 2.895\nI1214 17:53:25.797358 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.285747\nI1214 17:55:43.825067 20613 solver.cpp:337] Iteration 96600, Testing net (#0)\nI1214 17:57:04.411686 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7162\nI1214 17:57:04.412032 20613 solver.cpp:404]     Test net output #1: loss = 0.983919 (* 1 = 0.983919 loss)\nI1214 17:57:05.722283 20613 solver.cpp:228] Iteration 96600, loss = 0.3614\nI1214 17:57:05.722323 20613 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1214 17:57:05.722345 20613 solver.cpp:244]     Train net output #1: loss = 0.3614 (* 1 = 0.3614 loss)\nI1214 17:57:05.816959 20613 sgd_solver.cpp:174] Iteration 96600, lr = 2.898\nI1214 17:57:05.829531 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.307759\nI1214 17:59:24.001060 20613 solver.cpp:337] Iteration 96700, Testing net (#0)\nI1214 18:00:44.712198 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74368\nI1214 18:00:44.712596 20613 solver.cpp:404]     Test net output #1: loss = 0.793427 (* 1 = 0.793427 loss)\nI1214 18:00:46.023988 20613 solver.cpp:228] Iteration 96700, loss = 0.353404\nI1214 18:00:46.024040 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1214 18:00:46.024065 20613 solver.cpp:244]     Train net output #1: loss = 0.353404 (* 1 = 0.353404 loss)\nI1214 18:00:46.115803 20613 sgd_solver.cpp:174] Iteration 96700, lr = 2.901\nI1214 18:00:46.128357 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.290841\nI1214 18:03:04.195137 20613 solver.cpp:337] Iteration 96800, Testing net (#0)\nI1214 18:04:24.909801 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77256\nI1214 18:04:24.910141 20613 solver.cpp:404]     Test net output #1: loss = 0.772072 (* 1 = 0.772072 loss)\nI1214 18:04:26.221045 20613 solver.cpp:228] Iteration 96800, loss = 0.231495\nI1214 18:04:26.221087 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 18:04:26.221109 20613 solver.cpp:244]     Train net output #1: loss = 0.231496 (* 1 = 0.231496 loss)\nI1214 18:04:26.314787 20613 sgd_solver.cpp:174] Iteration 96800, lr = 2.904\nI1214 18:04:26.327515 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.261354\nI1214 18:06:44.448432 20613 solver.cpp:337] Iteration 96900, Testing net (#0)\nI1214 18:08:05.163002 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7836\nI1214 18:08:05.163365 20613 solver.cpp:404]     Test net output #1: loss = 0.674281 (* 1 = 0.674281 loss)\nI1214 18:08:06.474025 20613 solver.cpp:228] Iteration 96900, loss = 0.239915\nI1214 18:08:06.474074 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 18:08:06.474099 20613 solver.cpp:244]     Train net output #1: loss = 0.239915 (* 1 = 0.239915 loss)\nI1214 18:08:06.567003 20613 sgd_solver.cpp:174] Iteration 96900, lr = 2.907\nI1214 18:08:06.579694 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.272941\nI1214 18:10:24.688689 20613 solver.cpp:337] Iteration 97000, Testing net (#0)\nI1214 18:11:45.389869 20613 solver.cpp:404]     Test net output #0: accuracy = 0.78536\nI1214 18:11:45.390175 20613 solver.cpp:404]     Test net output #1: loss = 0.71505 (* 1 = 0.71505 loss)\nI1214 18:11:46.701486 20613 solver.cpp:228] Iteration 97000, loss = 0.269649\nI1214 18:11:46.701537 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 18:11:46.701562 20613 solver.cpp:244]     Train net output #1: loss = 0.269649 (* 1 = 0.269649 loss)\nI1214 18:11:46.797705 20613 sgd_solver.cpp:174] Iteration 97000, lr = 2.91\nI1214 18:11:46.810473 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.247981\nI1214 18:14:04.898591 20613 solver.cpp:337] Iteration 97100, Testing net (#0)\nI1214 18:15:25.593119 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75652\nI1214 18:15:25.593425 20613 solver.cpp:404]     Test net output #1: loss = 0.802566 (* 1 = 0.802566 loss)\nI1214 18:15:26.904330 20613 solver.cpp:228] Iteration 97100, loss = 0.350524\nI1214 18:15:26.904378 20613 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1214 18:15:26.904395 20613 solver.cpp:244]     Train net output #1: loss = 0.350524 (* 1 = 0.350524 loss)\nI1214 18:15:27.001152 20613 sgd_solver.cpp:174] Iteration 97100, lr = 2.913\nI1214 18:15:27.013902 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.329617\nI1214 18:17:45.070629 20613 solver.cpp:337] Iteration 97200, Testing net (#0)\nI1214 18:19:05.774992 20613 solver.cpp:404]     Test net output #0: accuracy = 0.54792\nI1214 18:19:05.775347 20613 solver.cpp:404]     Test net output #1: loss = 1.76944 (* 1 = 1.76944 loss)\nI1214 18:19:07.085695 20613 solver.cpp:228] Iteration 97200, loss = 0.348695\nI1214 18:19:07.085742 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1214 18:19:07.085759 20613 solver.cpp:244]     Train net output #1: loss = 0.348695 (* 1 = 0.348695 loss)\nI1214 18:19:07.176362 20613 sgd_solver.cpp:174] Iteration 97200, lr = 2.916\nI1214 18:19:07.189025 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.31507\nI1214 18:21:25.245358 20613 solver.cpp:337] Iteration 97300, Testing net (#0)\nI1214 18:22:45.968590 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79612\nI1214 18:22:45.969020 20613 solver.cpp:404]     Test net output #1: loss = 0.603635 (* 1 = 0.603635 loss)\nI1214 18:22:47.279232 20613 solver.cpp:228] Iteration 97300, loss = 0.221748\nI1214 18:22:47.279275 20613 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1214 18:22:47.279292 20613 solver.cpp:244]     Train net output #1: loss = 0.221748 (* 1 = 0.221748 loss)\nI1214 18:22:47.373359 20613 sgd_solver.cpp:174] Iteration 97300, lr = 2.919\nI1214 18:22:47.386132 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.266851\nI1214 18:25:05.456787 20613 solver.cpp:337] Iteration 97400, Testing net (#0)\nI1214 18:26:26.178328 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71916\nI1214 18:26:26.178745 20613 solver.cpp:404]     Test net output #1: loss = 1.07611 (* 1 = 1.07611 loss)\nI1214 18:26:27.488822 20613 solver.cpp:228] Iteration 97400, loss = 0.256336\nI1214 18:26:27.488859 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 18:26:27.488875 20613 solver.cpp:244]     Train net output #1: loss = 0.256337 (* 1 = 0.256337 loss)\nI1214 18:26:27.578567 20613 sgd_solver.cpp:174] Iteration 97400, lr = 2.922\nI1214 18:26:27.591362 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.283711\nI1214 18:28:45.764271 20613 solver.cpp:337] Iteration 97500, Testing net (#0)\nI1214 18:30:06.490892 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74824\nI1214 18:30:06.491310 20613 solver.cpp:404]     Test net output #1: loss = 0.825773 (* 1 = 0.825773 loss)\nI1214 18:30:07.801759 20613 solver.cpp:228] Iteration 97500, loss = 0.220329\nI1214 18:30:07.801803 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 18:30:07.801820 20613 solver.cpp:244]     Train net output #1: loss = 0.220329 (* 1 = 0.220329 loss)\nI1214 18:30:07.898735 20613 sgd_solver.cpp:174] Iteration 97500, lr = 2.925\nI1214 18:30:07.911473 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.252983\nI1214 18:32:26.002753 20613 solver.cpp:337] Iteration 97600, Testing net (#0)\nI1214 18:33:46.724859 20613 solver.cpp:404]     Test net output #0: accuracy = 0.677\nI1214 18:33:46.725282 20613 solver.cpp:404]     Test net output #1: loss = 1.17812 (* 1 = 1.17812 loss)\nI1214 18:33:48.036561 20613 solver.cpp:228] Iteration 97600, loss = 0.333395\nI1214 18:33:48.036609 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 18:33:48.036626 20613 solver.cpp:244]     Train net output #1: loss = 0.333396 (* 1 = 0.333396 loss)\nI1214 18:33:48.133229 20613 sgd_solver.cpp:174] Iteration 97600, lr = 2.928\nI1214 18:33:48.146000 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.280284\nI1214 18:36:06.141227 20613 solver.cpp:337] Iteration 97700, Testing net (#0)\nI1214 18:37:26.859457 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72524\nI1214 18:37:26.859903 20613 solver.cpp:404]     Test net output #1: loss = 0.994355 (* 1 = 0.994355 loss)\nI1214 18:37:28.170030 20613 solver.cpp:228] Iteration 97700, loss = 0.328163\nI1214 18:37:28.170078 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 18:37:28.170096 20613 solver.cpp:244]     Train net output #1: loss = 0.328163 (* 1 = 0.328163 loss)\nI1214 18:37:28.263329 20613 sgd_solver.cpp:174] Iteration 97700, lr = 2.931\nI1214 18:37:28.276012 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.321817\nI1214 18:39:46.267570 20613 solver.cpp:337] Iteration 97800, Testing net (#0)\nI1214 18:41:06.986944 20613 solver.cpp:404]     Test net output #0: accuracy = 0.678\nI1214 18:41:06.987347 20613 solver.cpp:404]     Test net output #1: loss = 1.2315 (* 1 = 1.2315 loss)\nI1214 18:41:08.298276 20613 solver.cpp:228] Iteration 97800, loss = 0.305139\nI1214 18:41:08.298328 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 18:41:08.298346 20613 solver.cpp:244]     Train net output #1: loss = 0.30514 (* 1 = 0.30514 loss)\nI1214 18:41:08.389111 20613 sgd_solver.cpp:174] Iteration 97800, lr = 2.934\nI1214 18:41:08.401834 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.291286\nI1214 18:43:26.458573 20613 solver.cpp:337] Iteration 97900, Testing net (#0)\nI1214 18:44:47.181264 20613 solver.cpp:404]     Test net output #0: accuracy = 0.69416\nI1214 18:44:47.181717 20613 solver.cpp:404]     Test net output #1: loss = 1.06057 (* 1 = 1.06057 loss)\nI1214 18:44:48.492740 20613 solver.cpp:228] Iteration 97900, loss = 0.258749\nI1214 18:44:48.492790 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 18:44:48.492808 20613 solver.cpp:244]     Train net output #1: loss = 0.258749 (* 1 = 0.258749 loss)\nI1214 18:44:48.588129 20613 sgd_solver.cpp:174] Iteration 97900, lr = 2.937\nI1214 18:44:48.600895 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.244879\nI1214 18:47:06.624861 20613 solver.cpp:337] Iteration 98000, Testing net (#0)\nI1214 18:48:27.351523 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72744\nI1214 18:48:27.351994 20613 solver.cpp:404]     Test net output #1: loss = 0.997714 (* 1 = 0.997714 loss)\nI1214 18:48:28.663496 20613 solver.cpp:228] Iteration 98000, loss = 0.291495\nI1214 18:48:28.663547 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 18:48:28.663563 20613 solver.cpp:244]     Train net output #1: loss = 0.291495 (* 1 = 0.291495 loss)\nI1214 18:48:28.757695 20613 sgd_solver.cpp:174] Iteration 98000, lr = 2.94\nI1214 18:48:28.770433 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.312562\nI1214 18:50:46.806289 20613 solver.cpp:337] Iteration 98100, Testing net (#0)\nI1214 18:52:07.534636 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81092\nI1214 18:52:07.535079 20613 solver.cpp:404]     Test net output #1: loss = 0.600259 (* 1 = 0.600259 loss)\nI1214 18:52:08.845358 20613 solver.cpp:228] Iteration 98100, loss = 0.197276\nI1214 18:52:08.845398 20613 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1214 18:52:08.845414 20613 solver.cpp:244]     Train net output #1: loss = 0.197276 (* 1 = 0.197276 loss)\nI1214 18:52:08.938580 20613 sgd_solver.cpp:174] Iteration 98100, lr = 2.943\nI1214 18:52:08.951267 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.242228\nI1214 18:54:27.075441 20613 solver.cpp:337] Iteration 98200, Testing net (#0)\nI1214 18:55:47.787952 20613 solver.cpp:404]     Test net output #0: accuracy = 0.82476\nI1214 18:55:47.788390 20613 solver.cpp:404]     Test net output #1: loss = 0.547872 (* 1 = 0.547872 loss)\nI1214 18:55:49.098970 20613 solver.cpp:228] Iteration 98200, loss = 0.29239\nI1214 18:55:49.099009 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 18:55:49.099025 20613 solver.cpp:244]     Train net output #1: loss = 0.29239 (* 1 = 0.29239 loss)\nI1214 18:55:49.192797 20613 sgd_solver.cpp:174] Iteration 98200, lr = 2.946\nI1214 18:55:49.205423 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.270869\nI1214 18:58:07.212806 20613 solver.cpp:337] Iteration 98300, Testing net (#0)\nI1214 18:59:27.928846 20613 solver.cpp:404]     Test net output #0: accuracy = 0.7146\nI1214 18:59:27.929261 20613 solver.cpp:404]     Test net output #1: loss = 0.933365 (* 1 = 0.933365 loss)\nI1214 18:59:29.239785 20613 solver.cpp:228] Iteration 98300, loss = 0.329014\nI1214 18:59:29.239833 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 18:59:29.239851 20613 solver.cpp:244]     Train net output #1: loss = 0.329015 (* 1 = 0.329015 loss)\nI1214 18:59:29.329365 20613 sgd_solver.cpp:174] Iteration 98300, lr = 2.949\nI1214 18:59:29.342072 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.274716\nI1214 19:01:47.300608 20613 solver.cpp:337] Iteration 98400, Testing net (#0)\nI1214 19:03:08.024266 20613 solver.cpp:404]     Test net output #0: accuracy = 0.77544\nI1214 19:03:08.024705 20613 solver.cpp:404]     Test net output #1: loss = 0.699252 (* 1 = 0.699252 loss)\nI1214 19:03:09.335852 20613 solver.cpp:228] Iteration 98400, loss = 0.267175\nI1214 19:03:09.335903 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 19:03:09.335921 20613 solver.cpp:244]     Train net output #1: loss = 0.267175 (* 1 = 0.267175 loss)\nI1214 19:03:09.430857 20613 sgd_solver.cpp:174] Iteration 98400, lr = 2.952\nI1214 19:03:09.443591 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.248197\nI1214 19:05:27.394348 20613 solver.cpp:337] Iteration 98500, Testing net (#0)\nI1214 19:06:48.113795 20613 solver.cpp:404]     Test net output #0: accuracy = 0.74752\nI1214 19:06:48.114251 20613 solver.cpp:404]     Test net output #1: loss = 0.782843 (* 1 = 0.782843 loss)\nI1214 19:06:49.425611 20613 solver.cpp:228] Iteration 98500, loss = 0.304538\nI1214 19:06:49.425649 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 19:06:49.425665 20613 solver.cpp:244]     Train net output #1: loss = 0.304538 (* 1 = 0.304538 loss)\nI1214 19:06:49.524339 20613 sgd_solver.cpp:174] Iteration 98500, lr = 2.955\nI1214 19:06:49.537094 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.275663\nI1214 19:09:07.620443 20613 solver.cpp:337] Iteration 98600, Testing net (#0)\nI1214 19:10:28.335561 20613 solver.cpp:404]     Test net output #0: accuracy = 0.81052\nI1214 19:10:28.336004 20613 solver.cpp:404]     Test net output #1: loss = 0.563602 (* 1 = 0.563602 loss)\nI1214 19:10:29.647222 20613 solver.cpp:228] Iteration 98600, loss = 0.325665\nI1214 19:10:29.647270 20613 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1214 19:10:29.647287 20613 solver.cpp:244]     Train net output #1: loss = 0.325665 (* 1 = 0.325665 loss)\nI1214 19:10:29.737397 20613 sgd_solver.cpp:174] Iteration 98600, lr = 2.958\nI1214 19:10:29.750103 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.257442\nI1214 19:12:47.759223 20613 solver.cpp:337] Iteration 98700, Testing net (#0)\nI1214 19:14:08.459884 20613 solver.cpp:404]     Test net output #0: accuracy = 0.71928\nI1214 19:14:08.460275 20613 solver.cpp:404]     Test net output #1: loss = 1.09575 (* 1 = 1.09575 loss)\nI1214 19:14:09.771319 20613 solver.cpp:228] Iteration 98700, loss = 0.293382\nI1214 19:14:09.771369 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 19:14:09.771386 20613 solver.cpp:244]     Train net output #1: loss = 0.293382 (* 1 = 0.293382 loss)\nI1214 19:14:09.873730 20613 sgd_solver.cpp:174] Iteration 98700, lr = 2.961\nI1214 19:14:09.886461 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.260629\nI1214 19:16:27.956316 20613 solver.cpp:337] Iteration 98800, Testing net (#0)\nI1214 19:17:48.665743 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73004\nI1214 19:17:48.666185 20613 solver.cpp:404]     Test net output #1: loss = 0.874151 (* 1 = 0.874151 loss)\nI1214 19:17:49.976378 20613 solver.cpp:228] Iteration 98800, loss = 0.307688\nI1214 19:17:49.976426 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 19:17:49.976444 20613 solver.cpp:244]     Train net output #1: loss = 0.307688 (* 1 = 0.307688 loss)\nI1214 19:17:50.069154 20613 sgd_solver.cpp:174] Iteration 98800, lr = 2.964\nI1214 19:17:50.081864 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.263763\nI1214 19:20:08.144487 20613 solver.cpp:337] Iteration 98900, Testing net (#0)\nI1214 19:21:28.845747 20613 solver.cpp:404]     Test net output #0: accuracy = 0.6804\nI1214 19:21:28.846201 20613 solver.cpp:404]     Test net output #1: loss = 1.1167 (* 1 = 1.1167 loss)\nI1214 19:21:30.155726 20613 solver.cpp:228] Iteration 98900, loss = 0.276627\nI1214 19:21:30.155763 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 19:21:30.155779 20613 solver.cpp:244]     Train net output #1: loss = 0.276627 (* 1 = 0.276627 loss)\nI1214 19:21:30.252482 20613 sgd_solver.cpp:174] Iteration 98900, lr = 2.967\nI1214 19:21:30.265198 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.268237\nI1214 19:23:48.280314 20613 solver.cpp:337] Iteration 99000, Testing net (#0)\nI1214 19:25:08.987956 20613 solver.cpp:404]     Test net output #0: accuracy = 0.79636\nI1214 19:25:08.988390 20613 solver.cpp:404]     Test net output #1: loss = 0.660406 (* 1 = 0.660406 loss)\nI1214 19:25:10.298799 20613 solver.cpp:228] Iteration 99000, loss = 0.337599\nI1214 19:25:10.298849 20613 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1214 19:25:10.298866 20613 solver.cpp:244]     Train net output #1: loss = 0.3376 (* 1 = 0.3376 loss)\nI1214 19:25:10.389775 20613 sgd_solver.cpp:174] Iteration 99000, lr = 2.97\nI1214 19:25:10.402510 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.271001\nI1214 19:27:28.434711 20613 solver.cpp:337] Iteration 99100, Testing net (#0)\nI1214 19:28:49.128981 20613 solver.cpp:404]     Test net output #0: accuracy = 0.66144\nI1214 19:28:49.129425 20613 solver.cpp:404]     Test net output #1: loss = 1.22264 (* 1 = 1.22264 loss)\nI1214 19:28:50.439559 20613 solver.cpp:228] Iteration 99100, loss = 0.24615\nI1214 19:28:50.439607 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 19:28:50.439625 20613 solver.cpp:244]     Train net output #1: loss = 0.24615 (* 1 = 0.24615 loss)\nI1214 19:28:50.536824 20613 sgd_solver.cpp:174] Iteration 99100, lr = 2.973\nI1214 19:28:50.549614 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.26101\nI1214 19:31:08.540010 20613 solver.cpp:337] Iteration 99200, Testing net (#0)\nI1214 19:32:29.226326 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76052\nI1214 19:32:29.226778 20613 solver.cpp:404]     Test net output #1: loss = 0.73129 (* 1 = 0.73129 loss)\nI1214 19:32:30.536974 20613 solver.cpp:228] Iteration 99200, loss = 0.338288\nI1214 19:32:30.537022 20613 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1214 19:32:30.537040 20613 solver.cpp:244]     Train net output #1: loss = 0.338288 (* 1 = 0.338288 loss)\nI1214 19:32:30.633256 20613 sgd_solver.cpp:174] Iteration 99200, lr = 2.976\nI1214 19:32:30.645993 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.28884\nI1214 19:34:48.605888 20613 solver.cpp:337] Iteration 99300, Testing net (#0)\nI1214 19:36:09.315044 20613 solver.cpp:404]     Test net output #0: accuracy = 0.767\nI1214 19:36:09.315460 20613 solver.cpp:404]     Test net output #1: loss = 0.778689 (* 1 = 0.778689 loss)\nI1214 19:36:10.625599 20613 solver.cpp:228] Iteration 99300, loss = 0.298182\nI1214 19:36:10.625648 20613 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1214 19:36:10.625664 20613 solver.cpp:244]     Train net output #1: loss = 0.298182 (* 1 = 0.298182 loss)\nI1214 19:36:10.721133 20613 sgd_solver.cpp:174] Iteration 99300, lr = 2.979\nI1214 19:36:10.733758 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.235424\nI1214 19:38:28.705516 20613 solver.cpp:337] Iteration 99400, Testing net (#0)\nI1214 19:39:49.407667 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67032\nI1214 19:39:49.408118 20613 solver.cpp:404]     Test net output #1: loss = 1.39474 (* 1 = 1.39474 loss)\nI1214 19:39:50.718205 20613 solver.cpp:228] Iteration 99400, loss = 0.253769\nI1214 19:39:50.718255 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 19:39:50.718273 20613 solver.cpp:244]     Train net output #1: loss = 0.253769 (* 1 = 0.253769 loss)\nI1214 19:39:50.813350 20613 sgd_solver.cpp:174] Iteration 99400, lr = 2.982\nI1214 19:39:50.826087 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.278492\nI1214 19:42:08.803552 20613 solver.cpp:337] Iteration 99500, Testing net (#0)\nI1214 19:43:29.416281 20613 solver.cpp:404]     Test net output #0: accuracy = 0.75624\nI1214 19:43:29.416721 20613 solver.cpp:404]     Test net output #1: loss = 0.779273 (* 1 = 0.779273 loss)\nI1214 19:43:30.727690 20613 solver.cpp:228] Iteration 99500, loss = 0.271255\nI1214 19:43:30.727731 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 19:43:30.727747 20613 solver.cpp:244]     Train net output #1: loss = 0.271255 (* 1 = 0.271255 loss)\nI1214 19:43:30.821782 20613 sgd_solver.cpp:174] Iteration 99500, lr = 2.985\nI1214 19:43:30.834534 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.271926\nI1214 19:45:48.873705 20613 solver.cpp:337] Iteration 99600, Testing net (#0)\nI1214 19:47:09.494679 20613 solver.cpp:404]     Test net output #0: accuracy = 0.67128\nI1214 19:47:09.495097 20613 solver.cpp:404]     Test net output #1: loss = 1.27489 (* 1 = 1.27489 loss)\nI1214 19:47:10.806324 20613 solver.cpp:228] Iteration 99600, loss = 0.299562\nI1214 19:47:10.806361 20613 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1214 19:47:10.806376 20613 solver.cpp:244]     Train net output #1: loss = 0.299562 (* 1 = 0.299562 loss)\nI1214 19:47:10.899621 20613 sgd_solver.cpp:174] Iteration 99600, lr = 2.988\nI1214 19:47:10.912364 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.291789\nI1214 19:49:28.932735 20613 solver.cpp:337] Iteration 99700, Testing net (#0)\nI1214 19:50:49.558429 20613 solver.cpp:404]     Test net output #0: accuracy = 0.62452\nI1214 19:50:49.558851 20613 solver.cpp:404]     Test net output #1: loss = 1.70787 (* 1 = 1.70787 loss)\nI1214 19:50:50.869514 20613 solver.cpp:228] Iteration 99700, loss = 0.30876\nI1214 19:50:50.869561 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 19:50:50.869580 20613 solver.cpp:244]     Train net output #1: loss = 0.30876 (* 1 = 0.30876 loss)\nI1214 19:50:50.965229 20613 sgd_solver.cpp:174] Iteration 99700, lr = 2.991\nI1214 19:50:50.977986 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.284076\nI1214 19:53:09.044775 20613 solver.cpp:337] Iteration 99800, Testing net (#0)\nI1214 19:54:29.660006 20613 solver.cpp:404]     Test net output #0: accuracy = 0.73216\nI1214 19:54:29.660454 20613 solver.cpp:404]     Test net output #1: loss = 0.872708 (* 1 = 0.872708 loss)\nI1214 19:54:30.971829 20613 solver.cpp:228] Iteration 99800, loss = 0.281471\nI1214 19:54:30.971868 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 19:54:30.971881 20613 solver.cpp:244]     Train net output #1: loss = 0.281471 (* 1 = 0.281471 loss)\nI1214 19:54:31.065606 20613 sgd_solver.cpp:174] Iteration 99800, lr = 2.994\nI1214 19:54:31.078285 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.294965\nI1214 19:56:49.028403 20613 solver.cpp:337] Iteration 99900, Testing net (#0)\nI1214 19:58:09.658008 20613 solver.cpp:404]     Test net output #0: accuracy = 0.76888\nI1214 19:58:09.658468 20613 solver.cpp:404]     Test net output #1: loss = 0.759165 (* 1 = 0.759165 loss)\nI1214 19:58:10.969305 20613 solver.cpp:228] Iteration 99900, loss = 0.293256\nI1214 19:58:10.969352 20613 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1214 19:58:10.969368 20613 solver.cpp:244]     Train net output #1: loss = 0.293256 (* 1 = 0.293256 loss)\nI1214 19:58:11.063598 20613 sgd_solver.cpp:174] Iteration 99900, lr = 2.997\nI1214 19:58:11.076154 20613 sgd_solver.cpp:149] Gradient: L2 norm 0.279563\nI1214 20:00:29.155941 20613 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range3Iter100kFig2b_iter_100000.caffemodel\nI1214 20:00:29.742666 20613 sgd_solver.cpp:341] Snapshotting solver state to binary proto file examples/sc/snapshots/range3Iter100kFig2b_iter_100000.solverstate\nI1214 20:00:30.188860 20613 solver.cpp:317] Iteration 100000, loss = 0.29898\nI1214 20:00:30.188905 20613 solver.cpp:337] Iteration 100000, Testing net (#0)\nI1214 20:01:50.790117 20613 solver.cpp:404]     Test net output #0: accuracy = 0.72248\nI1214 20:01:50.790575 20613 solver.cpp:404]     Test net output #1: loss = 1.11955 (* 1 = 1.11955 loss)\nI1214 20:01:50.790594 20613 solver.cpp:322] Optimization Done.\nI1214 20:01:56.511255 20613 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range3Iter20kFig2b",
    "content": "I1212 06:17:45.837908 18921 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1212 06:17:45.840430 18921 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1212 06:17:45.841647 18921 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1212 06:17:45.842859 18921 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1212 06:17:45.844069 18921 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1212 06:17:45.845489 18921 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1212 06:17:45.846717 18921 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1212 06:17:45.847944 18921 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1212 06:17:45.849469 18921 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1212 06:17:46.287474 18921 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 20000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 20000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range3Iter20kFig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI1212 06:17:46.292151 18921 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1212 06:17:46.344518 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:46.344599 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:46.345901 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1212 06:17:46.345955 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI1212 06:17:46.345983 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI1212 06:17:46.346004 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI1212 06:17:46.346024 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI1212 06:17:46.346041 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI1212 06:17:46.346060 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI1212 06:17:46.346079 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI1212 06:17:46.346099 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI1212 06:17:46.346118 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI1212 06:17:46.346138 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI1212 06:17:46.346155 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI1212 06:17:46.346175 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI1212 06:17:46.346194 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI1212 06:17:46.346213 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI1212 06:17:46.346232 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI1212 06:17:46.346251 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI1212 06:17:46.346271 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI1212 06:17:46.346290 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI1212 06:17:46.346309 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI1212 06:17:46.346345 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI1212 06:17:46.346375 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI1212 06:17:46.346401 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI1212 06:17:46.346421 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI1212 06:17:46.346441 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI1212 06:17:46.346457 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI1212 06:17:46.346477 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI1212 06:17:46.346493 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI1212 06:17:46.346510 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI1212 06:17:46.346529 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI1212 06:17:46.346549 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI1212 06:17:46.346566 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI1212 06:17:46.346585 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI1212 06:17:46.346602 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI1212 06:17:46.346621 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI1212 06:17:46.346640 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI1212 06:17:46.346659 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI1212 06:17:46.346679 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI1212 06:17:46.346698 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI1212 06:17:46.346716 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI1212 06:17:46.346742 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI1212 06:17:46.346760 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI1212 06:17:46.346778 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI1212 06:17:46.346796 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI1212 06:17:46.346815 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI1212 06:17:46.346834 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI1212 06:17:46.346854 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI1212 06:17:46.346870 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI1212 06:17:46.346890 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI1212 06:17:46.346909 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI1212 06:17:46.346925 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI1212 06:17:46.346954 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI1212 06:17:46.346976 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI1212 06:17:46.346995 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI1212 06:17:46.347015 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI1212 06:17:46.347033 18921 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI1212 06:17:46.348886 18921 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI1212 06:17:46.351063 18921 layer_factory.hpp:77] Creating layer dataLayer\nI1212 06:17:46.352265 18921 net.cpp:100] Creating Layer dataLayer\nI1212 06:17:46.352303 18921 net.cpp:408] dataLayer -> data_top\nI1212 06:17:46.352545 18921 net.cpp:408] dataLayer -> label\nI1212 06:17:46.352660 18921 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1212 06:17:46.400148 18927 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1212 06:17:46.452164 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:46.459944 18921 net.cpp:150] Setting up dataLayer\nI1212 06:17:46.460032 18921 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI1212 06:17:46.460053 18921 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:46.460063 18921 net.cpp:165] Memory required for data: 1536500\nI1212 06:17:46.460088 18921 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1212 06:17:46.460114 18921 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1212 06:17:46.460129 18921 net.cpp:434] label_dataLayer_1_split <- label\nI1212 06:17:46.460161 18921 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1212 06:17:46.460191 18921 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1212 06:17:46.460302 18921 net.cpp:150] Setting up label_dataLayer_1_split\nI1212 06:17:46.460322 18921 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:46.460335 18921 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:46.460345 18921 net.cpp:165] Memory required for data: 1537500\nI1212 06:17:46.460355 18921 layer_factory.hpp:77] Creating layer pre_conv\nI1212 06:17:46.460405 18921 net.cpp:100] Creating Layer pre_conv\nI1212 06:17:46.460419 18921 net.cpp:434] pre_conv <- data_top\nI1212 06:17:46.460441 18921 net.cpp:408] pre_conv -> pre_conv_top\nI1212 06:17:46.462131 18921 net.cpp:150] Setting up pre_conv\nI1212 06:17:46.462154 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.462165 18921 net.cpp:165] Memory required for data: 9729500\nI1212 06:17:46.462328 18921 layer_factory.hpp:77] Creating layer pre_bn\nI1212 06:17:46.462419 18921 net.cpp:100] Creating Layer pre_bn\nI1212 06:17:46.462436 18921 net.cpp:434] pre_bn <- pre_conv_top\nI1212 06:17:46.462453 18921 net.cpp:408] pre_bn -> pre_bn_top\nI1212 06:17:46.462824 18921 net.cpp:150] Setting up pre_bn\nI1212 06:17:46.462846 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.462857 18921 net.cpp:165] Memory required for data: 17921500\nI1212 06:17:46.462887 18921 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:17:46.462963 18921 net.cpp:100] Creating Layer pre_scale\nI1212 06:17:46.462980 18921 net.cpp:434] pre_scale <- pre_bn_top\nI1212 06:17:46.462994 18921 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI1212 06:17:46.463372 18921 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:17:46.465302 18921 net.cpp:150] Setting up pre_scale\nI1212 06:17:46.465324 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.465334 18921 net.cpp:165] Memory required for data: 26113500\nI1212 06:17:46.465354 18921 layer_factory.hpp:77] Creating layer pre_relu\nI1212 06:17:46.465438 18921 net.cpp:100] Creating Layer pre_relu\nI1212 06:17:46.465453 18921 net.cpp:434] pre_relu <- pre_bn_top\nI1212 06:17:46.465474 18921 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI1212 06:17:46.465495 18921 net.cpp:150] Setting up pre_relu\nI1212 06:17:46.465510 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.465519 18921 net.cpp:165] Memory required for data: 34305500\nI1212 06:17:46.465530 18921 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI1212 06:17:46.465545 18921 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI1212 06:17:46.465555 18921 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI1212 06:17:46.465574 18921 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI1212 06:17:46.465595 18921 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI1212 06:17:46.465675 18921 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI1212 06:17:46.465697 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.465710 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.465719 18921 net.cpp:165] Memory required for data: 50689500\nI1212 06:17:46.465730 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI1212 06:17:46.465751 18921 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI1212 06:17:46.465764 18921 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI1212 06:17:46.465790 18921 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI1212 06:17:46.466166 18921 net.cpp:150] Setting up L1_b1_cbr1_conv\nI1212 06:17:46.466187 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.466195 18921 net.cpp:165] Memory required for data: 58881500\nI1212 06:17:46.466217 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI1212 06:17:46.466243 18921 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI1212 06:17:46.466255 18921 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI1212 06:17:46.466271 18921 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI1212 06:17:46.466550 18921 net.cpp:150] Setting up L1_b1_cbr1_bn\nI1212 06:17:46.466573 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.466584 18921 net.cpp:165] Memory required for data: 67073500\nI1212 06:17:46.466605 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:17:46.466622 18921 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI1212 06:17:46.466634 18921 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI1212 06:17:46.466650 18921 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.466737 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:17:46.466913 18921 net.cpp:150] Setting up L1_b1_cbr1_scale\nI1212 06:17:46.466933 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.466943 18921 net.cpp:165] Memory required for data: 75265500\nI1212 06:17:46.466960 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI1212 06:17:46.466976 18921 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI1212 06:17:46.466996 18921 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI1212 06:17:46.467017 18921 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.467037 18921 net.cpp:150] Setting up L1_b1_cbr1_relu\nI1212 06:17:46.467241 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.467250 18921 net.cpp:165] Memory required for data: 83457500\nI1212 06:17:46.467262 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI1212 06:17:46.467283 18921 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI1212 06:17:46.467294 18921 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI1212 06:17:46.467320 18921 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI1212 06:17:46.467734 18921 net.cpp:150] Setting up L1_b1_cbr2_conv\nI1212 06:17:46.467754 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.467763 18921 net.cpp:165] Memory required for data: 91649500\nI1212 06:17:46.467782 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI1212 06:17:46.467799 18921 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI1212 06:17:46.467810 18921 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI1212 06:17:46.467831 18921 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI1212 06:17:46.468113 18921 net.cpp:150] Setting up L1_b1_cbr2_bn\nI1212 06:17:46.468137 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.468147 18921 net.cpp:165] Memory required for data: 99841500\nI1212 06:17:46.468175 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:17:46.468194 18921 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI1212 06:17:46.468204 18921 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI1212 06:17:46.468220 18921 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI1212 06:17:46.468310 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:17:46.468492 18921 net.cpp:150] Setting up L1_b1_cbr2_scale\nI1212 06:17:46.468511 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.468520 18921 net.cpp:165] Memory required for data: 108033500\nI1212 06:17:46.468539 18921 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1212 06:17:46.468564 18921 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1212 06:17:46.468575 18921 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI1212 06:17:46.468587 18921 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI1212 06:17:46.468608 18921 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1212 06:17:46.468720 18921 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1212 06:17:46.468740 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.468750 18921 net.cpp:165] Memory required for data: 116225500\nI1212 06:17:46.468767 18921 layer_factory.hpp:77] Creating layer L1_b1_relu\nI1212 06:17:46.468788 18921 net.cpp:100] Creating Layer L1_b1_relu\nI1212 06:17:46.468799 18921 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI1212 06:17:46.468816 18921 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI1212 06:17:46.468833 18921 net.cpp:150] Setting up L1_b1_relu\nI1212 06:17:46.468847 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.468857 18921 net.cpp:165] Memory required for data: 124417500\nI1212 06:17:46.468868 18921 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:46.468885 18921 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:46.468895 18921 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI1212 06:17:46.468911 18921 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:17:46.468930 18921 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:17:46.469008 18921 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:46.469032 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.469043 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.469053 18921 net.cpp:165] Memory required for data: 140801500\nI1212 06:17:46.469074 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI1212 06:17:46.469099 18921 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI1212 06:17:46.469113 18921 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:17:46.469132 18921 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI1212 06:17:46.469491 18921 net.cpp:150] Setting up L1_b2_cbr1_conv\nI1212 06:17:46.469511 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.469521 18921 net.cpp:165] Memory required for data: 148993500\nI1212 06:17:46.469538 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI1212 06:17:46.469560 18921 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI1212 06:17:46.469573 18921 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI1212 06:17:46.469595 18921 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI1212 06:17:46.469872 18921 net.cpp:150] Setting up L1_b2_cbr1_bn\nI1212 06:17:46.469892 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.469902 18921 net.cpp:165] Memory required for data: 157185500\nI1212 06:17:46.469923 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:17:46.469939 18921 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI1212 06:17:46.469950 18921 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI1212 06:17:46.469969 18921 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.470057 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:17:46.470237 18921 net.cpp:150] Setting up L1_b2_cbr1_scale\nI1212 06:17:46.470268 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.470278 18921 net.cpp:165] Memory required for data: 165377500\nI1212 06:17:46.470299 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI1212 06:17:46.470314 18921 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI1212 06:17:46.470324 18921 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI1212 06:17:46.470337 18921 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.470355 18921 net.cpp:150] Setting up L1_b2_cbr1_relu\nI1212 06:17:46.470378 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.470388 18921 net.cpp:165] Memory required for data: 173569500\nI1212 06:17:46.470398 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI1212 06:17:46.470427 18921 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI1212 06:17:46.470440 18921 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI1212 06:17:46.470463 18921 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI1212 06:17:46.470818 18921 net.cpp:150] Setting up L1_b2_cbr2_conv\nI1212 06:17:46.470836 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.470845 18921 net.cpp:165] Memory required for data: 181761500\nI1212 06:17:46.470863 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI1212 06:17:46.470885 18921 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI1212 06:17:46.470897 18921 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI1212 06:17:46.470917 18921 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI1212 06:17:46.471189 18921 net.cpp:150] Setting up L1_b2_cbr2_bn\nI1212 06:17:46.471207 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.471216 18921 net.cpp:165] Memory required for data: 189953500\nI1212 06:17:46.471246 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:17:46.471271 18921 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI1212 06:17:46.471283 18921 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI1212 06:17:46.471299 18921 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI1212 06:17:46.471403 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:17:46.471582 18921 net.cpp:150] Setting up L1_b2_cbr2_scale\nI1212 06:17:46.471601 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.471611 18921 net.cpp:165] Memory required for data: 198145500\nI1212 06:17:46.471629 18921 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1212 06:17:46.471645 18921 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1212 06:17:46.471668 18921 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI1212 06:17:46.471680 18921 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:17:46.471700 18921 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1212 06:17:46.471760 18921 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1212 06:17:46.471778 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.471788 18921 net.cpp:165] Memory required for data: 206337500\nI1212 06:17:46.471799 18921 layer_factory.hpp:77] Creating layer L1_b2_relu\nI1212 06:17:46.471813 18921 net.cpp:100] Creating Layer L1_b2_relu\nI1212 06:17:46.471823 18921 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI1212 06:17:46.471843 18921 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI1212 06:17:46.471861 18921 net.cpp:150] Setting up L1_b2_relu\nI1212 06:17:46.471876 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.471886 18921 net.cpp:165] Memory required for data: 214529500\nI1212 06:17:46.471896 18921 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:46.471910 18921 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:46.471921 18921 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI1212 06:17:46.471935 18921 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:17:46.471954 18921 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:17:46.472034 18921 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:46.472054 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.472067 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.472076 18921 net.cpp:165] Memory required for data: 230913500\nI1212 06:17:46.472086 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI1212 06:17:46.472108 18921 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI1212 06:17:46.472120 18921 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:17:46.472144 18921 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI1212 06:17:46.472499 18921 net.cpp:150] Setting up L1_b3_cbr1_conv\nI1212 06:17:46.472519 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.472528 18921 net.cpp:165] Memory required for data: 239105500\nI1212 06:17:46.472546 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI1212 06:17:46.472564 18921 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI1212 06:17:46.472576 18921 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI1212 06:17:46.472596 18921 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI1212 06:17:46.472875 18921 net.cpp:150] Setting up L1_b3_cbr1_bn\nI1212 06:17:46.472895 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.472904 18921 net.cpp:165] Memory required for data: 247297500\nI1212 06:17:46.472925 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:17:46.472945 18921 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI1212 06:17:46.472957 18921 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI1212 06:17:46.472972 18921 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.473062 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:17:46.473244 18921 net.cpp:150] Setting up L1_b3_cbr1_scale\nI1212 06:17:46.473264 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.473273 18921 net.cpp:165] Memory required for data: 255489500\nI1212 06:17:46.473292 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI1212 06:17:46.473307 18921 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI1212 06:17:46.473318 18921 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI1212 06:17:46.473337 18921 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.473357 18921 net.cpp:150] Setting up L1_b3_cbr1_relu\nI1212 06:17:46.473383 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.473403 18921 net.cpp:165] Memory required for data: 263681500\nI1212 06:17:46.473414 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI1212 06:17:46.473440 18921 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI1212 06:17:46.473453 18921 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI1212 06:17:46.473475 18921 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI1212 06:17:46.473824 18921 net.cpp:150] Setting up L1_b3_cbr2_conv\nI1212 06:17:46.473845 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.473853 18921 net.cpp:165] Memory required for data: 271873500\nI1212 06:17:46.473871 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI1212 06:17:46.473892 18921 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI1212 06:17:46.473903 18921 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI1212 06:17:46.473924 18921 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI1212 06:17:46.474200 18921 net.cpp:150] Setting up L1_b3_cbr2_bn\nI1212 06:17:46.474218 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.474228 18921 net.cpp:165] Memory required for data: 280065500\nI1212 06:17:46.474249 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:17:46.474277 18921 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI1212 06:17:46.474290 18921 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI1212 06:17:46.474306 18921 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI1212 06:17:46.474396 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:17:46.474575 18921 net.cpp:150] Setting up L1_b3_cbr2_scale\nI1212 06:17:46.474594 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.474603 18921 net.cpp:165] Memory required for data: 288257500\nI1212 06:17:46.474622 18921 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1212 06:17:46.474643 18921 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1212 06:17:46.474655 18921 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI1212 06:17:46.474668 18921 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:17:46.474684 18921 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1212 06:17:46.474742 18921 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1212 06:17:46.474762 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.474771 18921 net.cpp:165] Memory required for data: 296449500\nI1212 06:17:46.474781 18921 layer_factory.hpp:77] Creating layer L1_b3_relu\nI1212 06:17:46.474795 18921 net.cpp:100] Creating Layer L1_b3_relu\nI1212 06:17:46.474805 18921 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI1212 06:17:46.474824 18921 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI1212 06:17:46.474843 18921 net.cpp:150] Setting up L1_b3_relu\nI1212 06:17:46.474858 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.474866 18921 net.cpp:165] Memory required for data: 304641500\nI1212 06:17:46.474876 18921 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:46.474890 18921 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:46.474900 18921 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI1212 06:17:46.474915 18921 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:17:46.474933 18921 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:17:46.475013 18921 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:46.475030 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.475044 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.475054 18921 net.cpp:165] Memory required for data: 321025500\nI1212 06:17:46.475064 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI1212 06:17:46.475083 18921 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI1212 06:17:46.475095 18921 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:17:46.475121 18921 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI1212 06:17:46.475515 18921 net.cpp:150] Setting up L1_b4_cbr1_conv\nI1212 06:17:46.475535 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.475544 18921 net.cpp:165] Memory required for data: 329217500\nI1212 06:17:46.475563 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI1212 06:17:46.475579 18921 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI1212 06:17:46.475590 18921 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI1212 06:17:46.475610 18921 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI1212 06:17:46.475891 18921 net.cpp:150] Setting up L1_b4_cbr1_bn\nI1212 06:17:46.475914 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.475924 18921 net.cpp:165] Memory required for data: 337409500\nI1212 06:17:46.475945 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:17:46.475961 18921 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI1212 06:17:46.475972 18921 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI1212 06:17:46.475987 18921 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.476074 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:17:46.476260 18921 net.cpp:150] Setting up L1_b4_cbr1_scale\nI1212 06:17:46.476279 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.476289 18921 net.cpp:165] Memory required for data: 345601500\nI1212 06:17:46.476306 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI1212 06:17:46.476332 18921 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI1212 06:17:46.476346 18921 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI1212 06:17:46.476366 18921 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.476387 18921 net.cpp:150] Setting up L1_b4_cbr1_relu\nI1212 06:17:46.476402 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.476411 18921 net.cpp:165] Memory required for data: 353793500\nI1212 06:17:46.476421 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI1212 06:17:46.476447 18921 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI1212 06:17:46.476460 18921 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI1212 06:17:46.476481 18921 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI1212 06:17:46.476837 18921 net.cpp:150] Setting up L1_b4_cbr2_conv\nI1212 06:17:46.476856 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.476866 18921 net.cpp:165] Memory required for data: 361985500\nI1212 06:17:46.476882 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI1212 06:17:46.476899 18921 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI1212 06:17:46.476910 18921 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI1212 06:17:46.476930 18921 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI1212 06:17:46.477216 18921 net.cpp:150] Setting up L1_b4_cbr2_bn\nI1212 06:17:46.477236 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.477244 18921 net.cpp:165] Memory required for data: 370177500\nI1212 06:17:46.477270 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:17:46.477288 18921 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI1212 06:17:46.477300 18921 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI1212 06:17:46.477315 18921 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI1212 06:17:46.477407 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:17:46.477600 18921 net.cpp:150] Setting up L1_b4_cbr2_scale\nI1212 06:17:46.477622 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.477630 18921 net.cpp:165] Memory required for data: 378369500\nI1212 06:17:46.477649 18921 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1212 06:17:46.477671 18921 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1212 06:17:46.477682 18921 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI1212 06:17:46.477696 18921 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:17:46.477716 18921 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1212 06:17:46.477769 18921 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1212 06:17:46.477797 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.477809 18921 net.cpp:165] Memory required for data: 386561500\nI1212 06:17:46.477820 18921 layer_factory.hpp:77] Creating layer L1_b4_relu\nI1212 06:17:46.477843 18921 net.cpp:100] Creating Layer L1_b4_relu\nI1212 06:17:46.477855 18921 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI1212 06:17:46.477869 18921 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI1212 06:17:46.477888 18921 net.cpp:150] Setting up L1_b4_relu\nI1212 06:17:46.477902 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.477912 18921 net.cpp:165] Memory required for data: 394753500\nI1212 06:17:46.477922 18921 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:46.477936 18921 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:46.477946 18921 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI1212 06:17:46.477962 18921 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:17:46.477982 18921 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:17:46.478062 18921 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:46.478083 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.478096 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.478106 18921 net.cpp:165] Memory required for data: 411137500\nI1212 06:17:46.478116 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI1212 06:17:46.478142 18921 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI1212 06:17:46.478153 18921 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:17:46.478173 18921 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI1212 06:17:46.478530 18921 net.cpp:150] Setting up L1_b5_cbr1_conv\nI1212 06:17:46.478550 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.478559 18921 net.cpp:165] Memory required for data: 419329500\nI1212 06:17:46.478605 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI1212 06:17:46.478629 18921 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI1212 06:17:46.478642 18921 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI1212 06:17:46.478659 18921 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI1212 06:17:46.478945 18921 net.cpp:150] Setting up L1_b5_cbr1_bn\nI1212 06:17:46.478968 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.478978 18921 net.cpp:165] Memory required for data: 427521500\nI1212 06:17:46.479001 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:17:46.479017 18921 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI1212 06:17:46.479028 18921 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI1212 06:17:46.479043 18921 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.479131 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:17:46.479311 18921 net.cpp:150] Setting up L1_b5_cbr1_scale\nI1212 06:17:46.479331 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.479339 18921 net.cpp:165] Memory required for data: 435713500\nI1212 06:17:46.479358 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI1212 06:17:46.479385 18921 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI1212 06:17:46.479398 18921 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI1212 06:17:46.479413 18921 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.479431 18921 net.cpp:150] Setting up L1_b5_cbr1_relu\nI1212 06:17:46.479451 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.479460 18921 net.cpp:165] Memory required for data: 443905500\nI1212 06:17:46.479470 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI1212 06:17:46.479491 18921 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI1212 06:17:46.479503 18921 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI1212 06:17:46.479526 18921 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI1212 06:17:46.479907 18921 net.cpp:150] Setting up L1_b5_cbr2_conv\nI1212 06:17:46.479928 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.479938 18921 net.cpp:165] Memory required for data: 452097500\nI1212 06:17:46.479954 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI1212 06:17:46.479971 18921 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI1212 06:17:46.479982 18921 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI1212 06:17:46.480006 18921 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI1212 06:17:46.480284 18921 net.cpp:150] Setting up L1_b5_cbr2_bn\nI1212 06:17:46.480304 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.480314 18921 net.cpp:165] Memory required for data: 460289500\nI1212 06:17:46.480334 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:17:46.480355 18921 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI1212 06:17:46.480374 18921 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI1212 06:17:46.480391 18921 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI1212 06:17:46.480489 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:17:46.480666 18921 net.cpp:150] Setting up L1_b5_cbr2_scale\nI1212 06:17:46.480685 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.480695 18921 net.cpp:165] Memory required for data: 468481500\nI1212 06:17:46.480712 18921 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1212 06:17:46.480734 18921 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1212 06:17:46.480746 18921 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI1212 06:17:46.480760 18921 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:17:46.480777 18921 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1212 06:17:46.480831 18921 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1212 06:17:46.480849 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.480859 18921 net.cpp:165] Memory required for data: 476673500\nI1212 06:17:46.480870 18921 layer_factory.hpp:77] Creating layer L1_b5_relu\nI1212 06:17:46.480885 18921 net.cpp:100] Creating Layer L1_b5_relu\nI1212 06:17:46.480895 18921 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI1212 06:17:46.480914 18921 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI1212 06:17:46.480933 18921 net.cpp:150] Setting up L1_b5_relu\nI1212 06:17:46.480948 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.480957 18921 net.cpp:165] Memory required for data: 484865500\nI1212 06:17:46.480967 18921 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:46.480981 18921 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:46.480993 18921 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI1212 06:17:46.481006 18921 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:17:46.481026 18921 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:17:46.481106 18921 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:46.481124 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.481137 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.481145 18921 net.cpp:165] Memory required for data: 501249500\nI1212 06:17:46.481155 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI1212 06:17:46.481176 18921 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI1212 06:17:46.481187 18921 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:17:46.481210 18921 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI1212 06:17:46.481565 18921 net.cpp:150] Setting up L1_b6_cbr1_conv\nI1212 06:17:46.481585 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.481593 18921 net.cpp:165] Memory required for data: 509441500\nI1212 06:17:46.481611 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI1212 06:17:46.481639 18921 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI1212 06:17:46.481650 18921 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI1212 06:17:46.481672 18921 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI1212 06:17:46.481966 18921 net.cpp:150] Setting up L1_b6_cbr1_bn\nI1212 06:17:46.481990 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.482000 18921 net.cpp:165] Memory required for data: 517633500\nI1212 06:17:46.482023 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:17:46.482039 18921 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI1212 06:17:46.482049 18921 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI1212 06:17:46.482065 18921 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.482154 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:17:46.482339 18921 net.cpp:150] Setting up L1_b6_cbr1_scale\nI1212 06:17:46.482358 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.482375 18921 net.cpp:165] Memory required for data: 525825500\nI1212 06:17:46.482394 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI1212 06:17:46.482415 18921 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI1212 06:17:46.482426 18921 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI1212 06:17:46.482441 18921 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.482460 18921 net.cpp:150] Setting up L1_b6_cbr1_relu\nI1212 06:17:46.482475 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.482484 18921 net.cpp:165] Memory required for data: 534017500\nI1212 06:17:46.482494 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI1212 06:17:46.482527 18921 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI1212 06:17:46.482542 18921 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI1212 06:17:46.482563 18921 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI1212 06:17:46.482931 18921 net.cpp:150] Setting up L1_b6_cbr2_conv\nI1212 06:17:46.482950 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.482959 18921 net.cpp:165] Memory required for data: 542209500\nI1212 06:17:46.482977 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI1212 06:17:46.482995 18921 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI1212 06:17:46.483006 18921 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI1212 06:17:46.483033 18921 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI1212 06:17:46.483392 18921 net.cpp:150] Setting up L1_b6_cbr2_bn\nI1212 06:17:46.483412 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.483420 18921 net.cpp:165] Memory required for data: 550401500\nI1212 06:17:46.483443 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:17:46.483464 18921 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI1212 06:17:46.483477 18921 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI1212 06:17:46.483490 18921 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI1212 06:17:46.483582 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:17:46.483767 18921 net.cpp:150] Setting up L1_b6_cbr2_scale\nI1212 06:17:46.483786 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.483795 18921 net.cpp:165] Memory required for data: 558593500\nI1212 06:17:46.483815 18921 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1212 06:17:46.483845 18921 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1212 06:17:46.483860 18921 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI1212 06:17:46.483873 18921 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:17:46.483889 18921 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1212 06:17:46.483943 18921 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1212 06:17:46.483963 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.483971 18921 net.cpp:165] Memory required for data: 566785500\nI1212 06:17:46.483983 18921 layer_factory.hpp:77] Creating layer L1_b6_relu\nI1212 06:17:46.484001 18921 net.cpp:100] Creating Layer L1_b6_relu\nI1212 06:17:46.484026 18921 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI1212 06:17:46.484041 18921 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI1212 06:17:46.484061 18921 net.cpp:150] Setting up L1_b6_relu\nI1212 06:17:46.484076 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.484084 18921 net.cpp:165] Memory required for data: 574977500\nI1212 06:17:46.484094 18921 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:46.484108 18921 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:46.484119 18921 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI1212 06:17:46.484135 18921 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:17:46.484154 18921 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:17:46.484236 18921 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:46.484258 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.484272 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.484280 18921 net.cpp:165] Memory required for data: 591361500\nI1212 06:17:46.484292 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI1212 06:17:46.484318 18921 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI1212 06:17:46.484331 18921 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:17:46.484350 18921 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI1212 06:17:46.484726 18921 net.cpp:150] Setting up L1_b7_cbr1_conv\nI1212 06:17:46.484746 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.484755 18921 net.cpp:165] Memory required for data: 599553500\nI1212 06:17:46.484772 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI1212 06:17:46.484797 18921 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI1212 06:17:46.484810 18921 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI1212 06:17:46.484833 18921 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI1212 06:17:46.485114 18921 net.cpp:150] Setting up L1_b7_cbr1_bn\nI1212 06:17:46.485133 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.485142 18921 net.cpp:165] Memory required for data: 607745500\nI1212 06:17:46.485164 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:17:46.485182 18921 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI1212 06:17:46.485193 18921 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI1212 06:17:46.485208 18921 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.485301 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:17:46.485486 18921 net.cpp:150] Setting up L1_b7_cbr1_scale\nI1212 06:17:46.485505 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.485514 18921 net.cpp:165] Memory required for data: 615937500\nI1212 06:17:46.485533 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI1212 06:17:46.485548 18921 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI1212 06:17:46.485559 18921 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI1212 06:17:46.485579 18921 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.485599 18921 net.cpp:150] Setting up L1_b7_cbr1_relu\nI1212 06:17:46.485612 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.485622 18921 net.cpp:165] Memory required for data: 624129500\nI1212 06:17:46.485632 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI1212 06:17:46.485657 18921 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI1212 06:17:46.485671 18921 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI1212 06:17:46.485693 18921 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI1212 06:17:46.486063 18921 net.cpp:150] Setting up L1_b7_cbr2_conv\nI1212 06:17:46.486083 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.486093 18921 net.cpp:165] Memory required for data: 632321500\nI1212 06:17:46.486110 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI1212 06:17:46.486141 18921 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI1212 06:17:46.486155 18921 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI1212 06:17:46.486172 18921 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI1212 06:17:46.486476 18921 net.cpp:150] Setting up L1_b7_cbr2_bn\nI1212 06:17:46.486496 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.486505 18921 net.cpp:165] Memory required for data: 640513500\nI1212 06:17:46.486527 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:17:46.486544 18921 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI1212 06:17:46.486555 18921 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI1212 06:17:46.486570 18921 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI1212 06:17:46.486661 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:17:46.486851 18921 net.cpp:150] Setting up L1_b7_cbr2_scale\nI1212 06:17:46.486871 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.486879 18921 net.cpp:165] Memory required for data: 648705500\nI1212 06:17:46.486897 18921 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI1212 06:17:46.486914 18921 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI1212 06:17:46.486925 18921 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI1212 06:17:46.486938 18921 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:17:46.486959 18921 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI1212 06:17:46.487013 18921 net.cpp:150] Setting up L1_b7_sum_eltwise\nI1212 06:17:46.487031 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.487041 18921 net.cpp:165] Memory required for data: 656897500\nI1212 06:17:46.487051 18921 layer_factory.hpp:77] Creating layer L1_b7_relu\nI1212 06:17:46.487071 18921 net.cpp:100] Creating Layer L1_b7_relu\nI1212 06:17:46.487082 18921 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI1212 06:17:46.487097 18921 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI1212 06:17:46.487115 18921 net.cpp:150] Setting up L1_b7_relu\nI1212 06:17:46.487129 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.487139 18921 net.cpp:165] Memory required for data: 665089500\nI1212 06:17:46.487149 18921 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:46.487162 18921 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:46.487174 18921 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI1212 06:17:46.487188 18921 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:17:46.487208 18921 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:17:46.487288 18921 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:46.487308 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.487321 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.487331 18921 net.cpp:165] Memory required for data: 681473500\nI1212 06:17:46.487341 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI1212 06:17:46.487373 18921 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI1212 06:17:46.487387 18921 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:17:46.487406 18921 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI1212 06:17:46.487774 18921 net.cpp:150] Setting up L1_b8_cbr1_conv\nI1212 06:17:46.487794 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.487803 18921 net.cpp:165] Memory required for data: 689665500\nI1212 06:17:46.487820 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI1212 06:17:46.487843 18921 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI1212 06:17:46.487854 18921 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI1212 06:17:46.487871 18921 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI1212 06:17:46.488176 18921 net.cpp:150] Setting up L1_b8_cbr1_bn\nI1212 06:17:46.488203 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.488214 18921 net.cpp:165] Memory required for data: 697857500\nI1212 06:17:46.488235 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:17:46.488251 18921 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI1212 06:17:46.488262 18921 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI1212 06:17:46.488276 18921 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.488373 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:17:46.488559 18921 net.cpp:150] Setting up L1_b8_cbr1_scale\nI1212 06:17:46.488579 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.488589 18921 net.cpp:165] Memory required for data: 706049500\nI1212 06:17:46.488607 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI1212 06:17:46.488625 18921 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI1212 06:17:46.488636 18921 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI1212 06:17:46.488656 18921 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.488675 18921 net.cpp:150] Setting up L1_b8_cbr1_relu\nI1212 06:17:46.488689 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.488698 18921 net.cpp:165] Memory required for data: 714241500\nI1212 06:17:46.488708 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI1212 06:17:46.488734 18921 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI1212 06:17:46.488747 18921 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI1212 06:17:46.488765 18921 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI1212 06:17:46.489140 18921 net.cpp:150] Setting up L1_b8_cbr2_conv\nI1212 06:17:46.489159 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.489169 18921 net.cpp:165] Memory required for data: 722433500\nI1212 06:17:46.489187 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI1212 06:17:46.489209 18921 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI1212 06:17:46.489222 18921 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI1212 06:17:46.489238 18921 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI1212 06:17:46.489534 18921 net.cpp:150] Setting up L1_b8_cbr2_bn\nI1212 06:17:46.489560 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.489572 18921 net.cpp:165] Memory required for data: 730625500\nI1212 06:17:46.489593 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:17:46.489610 18921 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI1212 06:17:46.489622 18921 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI1212 06:17:46.489639 18921 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI1212 06:17:46.489723 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:17:46.489904 18921 net.cpp:150] Setting up L1_b8_cbr2_scale\nI1212 06:17:46.489923 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.489933 18921 net.cpp:165] Memory required for data: 738817500\nI1212 06:17:46.489951 18921 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI1212 06:17:46.489972 18921 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI1212 06:17:46.489984 18921 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI1212 06:17:46.489997 18921 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:17:46.490018 18921 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI1212 06:17:46.490070 18921 net.cpp:150] Setting up L1_b8_sum_eltwise\nI1212 06:17:46.490089 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.490099 18921 net.cpp:165] Memory required for data: 747009500\nI1212 06:17:46.490109 18921 layer_factory.hpp:77] Creating layer L1_b8_relu\nI1212 06:17:46.490130 18921 net.cpp:100] Creating Layer L1_b8_relu\nI1212 06:17:46.490144 18921 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI1212 06:17:46.490159 18921 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI1212 06:17:46.490176 18921 net.cpp:150] Setting up L1_b8_relu\nI1212 06:17:46.490191 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.490200 18921 net.cpp:165] Memory required for data: 755201500\nI1212 06:17:46.490221 18921 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:46.490236 18921 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:46.490247 18921 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI1212 06:17:46.490262 18921 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:17:46.490283 18921 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:17:46.490371 18921 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:46.490392 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.490406 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.490416 18921 net.cpp:165] Memory required for data: 771585500\nI1212 06:17:46.490425 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI1212 06:17:46.490451 18921 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI1212 06:17:46.490464 18921 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:17:46.490483 18921 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI1212 06:17:46.490867 18921 net.cpp:150] Setting up L1_b9_cbr1_conv\nI1212 06:17:46.490887 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.490896 18921 net.cpp:165] Memory required for data: 779777500\nI1212 06:17:46.490914 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI1212 06:17:46.490936 18921 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI1212 06:17:46.490948 18921 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI1212 06:17:46.490965 18921 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI1212 06:17:46.491255 18921 net.cpp:150] Setting up L1_b9_cbr1_bn\nI1212 06:17:46.491276 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.491284 18921 net.cpp:165] Memory required for data: 787969500\nI1212 06:17:46.491305 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:17:46.491322 18921 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI1212 06:17:46.491333 18921 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI1212 06:17:46.491353 18921 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.491448 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:17:46.491633 18921 net.cpp:150] Setting up L1_b9_cbr1_scale\nI1212 06:17:46.491652 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.491662 18921 net.cpp:165] Memory required for data: 796161500\nI1212 06:17:46.491679 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI1212 06:17:46.491695 18921 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI1212 06:17:46.491706 18921 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI1212 06:17:46.491721 18921 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.491740 18921 net.cpp:150] Setting up L1_b9_cbr1_relu\nI1212 06:17:46.491755 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.491763 18921 net.cpp:165] Memory required for data: 804353500\nI1212 06:17:46.491773 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI1212 06:17:46.491801 18921 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI1212 06:17:46.491813 18921 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI1212 06:17:46.491835 18921 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI1212 06:17:46.492213 18921 net.cpp:150] Setting up L1_b9_cbr2_conv\nI1212 06:17:46.492233 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.492242 18921 net.cpp:165] Memory required for data: 812545500\nI1212 06:17:46.492261 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI1212 06:17:46.492282 18921 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI1212 06:17:46.492295 18921 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI1212 06:17:46.492316 18921 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI1212 06:17:46.492616 18921 net.cpp:150] Setting up L1_b9_cbr2_bn\nI1212 06:17:46.492643 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.492652 18921 net.cpp:165] Memory required for data: 820737500\nI1212 06:17:46.492704 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:17:46.492728 18921 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI1212 06:17:46.492741 18921 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI1212 06:17:46.492758 18921 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI1212 06:17:46.492851 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:17:46.493039 18921 net.cpp:150] Setting up L1_b9_cbr2_scale\nI1212 06:17:46.493058 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.493067 18921 net.cpp:165] Memory required for data: 828929500\nI1212 06:17:46.493085 18921 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI1212 06:17:46.493103 18921 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI1212 06:17:46.493114 18921 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI1212 06:17:46.493127 18921 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:17:46.493142 18921 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI1212 06:17:46.493196 18921 net.cpp:150] Setting up L1_b9_sum_eltwise\nI1212 06:17:46.493214 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.493224 18921 net.cpp:165] Memory required for data: 837121500\nI1212 06:17:46.493234 18921 layer_factory.hpp:77] Creating layer L1_b9_relu\nI1212 06:17:46.493254 18921 net.cpp:100] Creating Layer L1_b9_relu\nI1212 06:17:46.493266 18921 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI1212 06:17:46.493285 18921 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI1212 06:17:46.493304 18921 net.cpp:150] Setting up L1_b9_relu\nI1212 06:17:46.493319 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.493329 18921 net.cpp:165] Memory required for data: 845313500\nI1212 06:17:46.493337 18921 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:46.493351 18921 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:46.493371 18921 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI1212 06:17:46.493387 18921 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:17:46.493407 18921 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:17:46.493489 18921 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:46.493507 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.493520 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.493530 18921 net.cpp:165] Memory required for data: 861697500\nI1212 06:17:46.493540 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI1212 06:17:46.493564 18921 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI1212 06:17:46.493578 18921 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:17:46.493597 18921 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI1212 06:17:46.493978 18921 net.cpp:150] Setting up L2_b1_cbr1_conv\nI1212 06:17:46.493998 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.494006 18921 net.cpp:165] Memory required for data: 863745500\nI1212 06:17:46.494024 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI1212 06:17:46.494046 18921 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI1212 06:17:46.494058 18921 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI1212 06:17:46.494079 18921 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI1212 06:17:46.494366 18921 net.cpp:150] Setting up L2_b1_cbr1_bn\nI1212 06:17:46.494386 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.494395 18921 net.cpp:165] Memory required for data: 865793500\nI1212 06:17:46.494417 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:17:46.494433 18921 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI1212 06:17:46.494446 18921 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI1212 06:17:46.494469 18921 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.494568 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:17:46.494751 18921 net.cpp:150] Setting up L2_b1_cbr1_scale\nI1212 06:17:46.494771 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.494779 18921 net.cpp:165] Memory required for data: 867841500\nI1212 06:17:46.494798 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI1212 06:17:46.494818 18921 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI1212 06:17:46.494830 18921 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI1212 06:17:46.494844 18921 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.494863 18921 net.cpp:150] Setting up L2_b1_cbr1_relu\nI1212 06:17:46.494876 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.494886 18921 net.cpp:165] Memory required for data: 869889500\nI1212 06:17:46.494896 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI1212 06:17:46.494922 18921 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI1212 06:17:46.494935 18921 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI1212 06:17:46.494958 18921 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI1212 06:17:46.495337 18921 net.cpp:150] Setting up L2_b1_cbr2_conv\nI1212 06:17:46.495357 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.495373 18921 net.cpp:165] Memory required for data: 871937500\nI1212 06:17:46.495391 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI1212 06:17:46.495414 18921 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI1212 06:17:46.495425 18921 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI1212 06:17:46.495447 18921 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI1212 06:17:46.495726 18921 net.cpp:150] Setting up L2_b1_cbr2_bn\nI1212 06:17:46.495745 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.495755 18921 net.cpp:165] Memory required for data: 873985500\nI1212 06:17:46.495776 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:17:46.495793 18921 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI1212 06:17:46.495805 18921 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI1212 06:17:46.495820 18921 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI1212 06:17:46.495911 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:17:46.496094 18921 net.cpp:150] Setting up L2_b1_cbr2_scale\nI1212 06:17:46.496114 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.496122 18921 net.cpp:165] Memory required for data: 876033500\nI1212 06:17:46.496140 18921 layer_factory.hpp:77] Creating layer L2_b1_pool\nI1212 06:17:46.496163 18921 net.cpp:100] Creating Layer L2_b1_pool\nI1212 06:17:46.496176 18921 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:17:46.496192 18921 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI1212 06:17:46.496330 18921 net.cpp:150] Setting up L2_b1_pool\nI1212 06:17:46.496350 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.496359 18921 net.cpp:165] Memory required for data: 878081500\nI1212 06:17:46.496376 18921 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1212 06:17:46.496400 18921 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1212 06:17:46.496412 18921 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI1212 06:17:46.496426 18921 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI1212 06:17:46.496441 18921 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1212 06:17:46.496496 18921 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1212 06:17:46.496516 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.496526 18921 net.cpp:165] Memory required for data: 880129500\nI1212 06:17:46.496537 18921 layer_factory.hpp:77] Creating layer L2_b1_relu\nI1212 06:17:46.496556 18921 net.cpp:100] Creating Layer L2_b1_relu\nI1212 06:17:46.496568 18921 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI1212 06:17:46.496584 18921 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI1212 06:17:46.496613 18921 net.cpp:150] Setting up L2_b1_relu\nI1212 06:17:46.496629 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.496639 18921 net.cpp:165] Memory required for data: 882177500\nI1212 06:17:46.496649 18921 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI1212 06:17:46.496726 18921 net.cpp:100] Creating Layer L2_b1_zeros\nI1212 06:17:46.496747 18921 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI1212 06:17:46.499282 18921 net.cpp:150] Setting up L2_b1_zeros\nI1212 06:17:46.499310 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.499320 18921 net.cpp:165] Memory required for data: 884225500\nI1212 06:17:46.499331 18921 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI1212 06:17:46.499348 18921 net.cpp:100] Creating Layer L2_b1_concat0\nI1212 06:17:46.499367 18921 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI1212 06:17:46.499382 18921 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI1212 06:17:46.499398 18921 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI1212 06:17:46.499470 18921 net.cpp:150] Setting up L2_b1_concat0\nI1212 06:17:46.499490 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.499498 18921 net.cpp:165] Memory required for data: 888321500\nI1212 06:17:46.499510 18921 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:46.499524 18921 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:46.499536 18921 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI1212 06:17:46.499554 18921 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:17:46.499577 18921 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:17:46.499672 18921 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:46.499693 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.499706 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.499716 18921 net.cpp:165] Memory required for data: 896513500\nI1212 06:17:46.499725 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI1212 06:17:46.499750 18921 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI1212 06:17:46.499764 18921 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:17:46.499791 18921 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI1212 06:17:46.501386 18921 net.cpp:150] Setting up L2_b2_cbr1_conv\nI1212 06:17:46.501408 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.501418 18921 net.cpp:165] Memory required for data: 900609500\nI1212 06:17:46.501437 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI1212 06:17:46.501459 18921 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI1212 06:17:46.501472 18921 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI1212 06:17:46.501494 18921 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI1212 06:17:46.501775 18921 net.cpp:150] Setting up L2_b2_cbr1_bn\nI1212 06:17:46.501793 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.501802 18921 net.cpp:165] Memory required for data: 904705500\nI1212 06:17:46.501824 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:17:46.501842 18921 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI1212 06:17:46.501854 18921 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI1212 06:17:46.501870 18921 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.501963 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:17:46.502151 18921 net.cpp:150] Setting up L2_b2_cbr1_scale\nI1212 06:17:46.502173 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.502184 18921 net.cpp:165] Memory required for data: 908801500\nI1212 06:17:46.502203 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI1212 06:17:46.502218 18921 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI1212 06:17:46.502229 18921 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI1212 06:17:46.502243 18921 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.502274 18921 net.cpp:150] Setting up L2_b2_cbr1_relu\nI1212 06:17:46.502290 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.502300 18921 net.cpp:165] Memory required for data: 912897500\nI1212 06:17:46.502310 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI1212 06:17:46.502336 18921 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI1212 06:17:46.502349 18921 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI1212 06:17:46.502379 18921 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI1212 06:17:46.502899 18921 net.cpp:150] Setting up L2_b2_cbr2_conv\nI1212 06:17:46.502919 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.502928 18921 net.cpp:165] Memory required for data: 916993500\nI1212 06:17:46.502946 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI1212 06:17:46.502969 18921 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI1212 06:17:46.502980 18921 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI1212 06:17:46.503001 18921 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI1212 06:17:46.503289 18921 net.cpp:150] Setting up L2_b2_cbr2_bn\nI1212 06:17:46.503309 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.503319 18921 net.cpp:165] Memory required for data: 921089500\nI1212 06:17:46.503340 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:17:46.503357 18921 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI1212 06:17:46.503376 18921 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI1212 06:17:46.503392 18921 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI1212 06:17:46.503486 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:17:46.503669 18921 net.cpp:150] Setting up L2_b2_cbr2_scale\nI1212 06:17:46.503687 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.503696 18921 net.cpp:165] Memory required for data: 925185500\nI1212 06:17:46.503715 18921 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1212 06:17:46.503736 18921 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1212 06:17:46.503748 18921 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI1212 06:17:46.503762 18921 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:17:46.503778 18921 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1212 06:17:46.503823 18921 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1212 06:17:46.503841 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.503851 18921 net.cpp:165] Memory required for data: 929281500\nI1212 06:17:46.503861 18921 layer_factory.hpp:77] Creating layer L2_b2_relu\nI1212 06:17:46.503880 18921 net.cpp:100] Creating Layer L2_b2_relu\nI1212 06:17:46.503892 18921 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI1212 06:17:46.503907 18921 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI1212 06:17:46.503926 18921 net.cpp:150] Setting up L2_b2_relu\nI1212 06:17:46.503942 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.503952 18921 net.cpp:165] Memory required for data: 933377500\nI1212 06:17:46.503962 18921 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:46.503975 18921 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:46.503985 18921 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI1212 06:17:46.504001 18921 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:17:46.504020 18921 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:17:46.504104 18921 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:46.504123 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.504137 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.504145 18921 net.cpp:165] Memory required for data: 941569500\nI1212 06:17:46.504156 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI1212 06:17:46.504181 18921 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI1212 06:17:46.504204 18921 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:17:46.504225 18921 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI1212 06:17:46.504739 18921 net.cpp:150] Setting up L2_b3_cbr1_conv\nI1212 06:17:46.504758 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.504767 18921 net.cpp:165] Memory required for data: 945665500\nI1212 06:17:46.504786 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI1212 06:17:46.504807 18921 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI1212 06:17:46.504819 18921 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI1212 06:17:46.504842 18921 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI1212 06:17:46.505126 18921 net.cpp:150] Setting up L2_b3_cbr1_bn\nI1212 06:17:46.505146 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.505156 18921 net.cpp:165] Memory required for data: 949761500\nI1212 06:17:46.505177 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:17:46.505193 18921 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI1212 06:17:46.505204 18921 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI1212 06:17:46.505219 18921 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.505314 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:17:46.505506 18921 net.cpp:150] Setting up L2_b3_cbr1_scale\nI1212 06:17:46.505524 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.505533 18921 net.cpp:165] Memory required for data: 953857500\nI1212 06:17:46.505553 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI1212 06:17:46.505573 18921 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI1212 06:17:46.505584 18921 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI1212 06:17:46.505599 18921 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.505619 18921 net.cpp:150] Setting up L2_b3_cbr1_relu\nI1212 06:17:46.505631 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.505640 18921 net.cpp:165] Memory required for data: 957953500\nI1212 06:17:46.505651 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI1212 06:17:46.505677 18921 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI1212 06:17:46.505690 18921 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI1212 06:17:46.505713 18921 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI1212 06:17:46.506232 18921 net.cpp:150] Setting up L2_b3_cbr2_conv\nI1212 06:17:46.506252 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.506260 18921 net.cpp:165] Memory required for data: 962049500\nI1212 06:17:46.506278 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI1212 06:17:46.506301 18921 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI1212 06:17:46.506314 18921 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI1212 06:17:46.506335 18921 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI1212 06:17:46.506624 18921 net.cpp:150] Setting up L2_b3_cbr2_bn\nI1212 06:17:46.506647 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.506657 18921 net.cpp:165] Memory required for data: 966145500\nI1212 06:17:46.506680 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:17:46.506696 18921 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI1212 06:17:46.506707 18921 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI1212 06:17:46.506722 18921 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI1212 06:17:46.506813 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:17:46.507006 18921 net.cpp:150] Setting up L2_b3_cbr2_scale\nI1212 06:17:46.507025 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.507035 18921 net.cpp:165] Memory required for data: 970241500\nI1212 06:17:46.507052 18921 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1212 06:17:46.507069 18921 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1212 06:17:46.507081 18921 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI1212 06:17:46.507098 18921 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:17:46.507125 18921 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1212 06:17:46.507174 18921 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1212 06:17:46.507194 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.507205 18921 net.cpp:165] Memory required for data: 974337500\nI1212 06:17:46.507213 18921 layer_factory.hpp:77] Creating layer L2_b3_relu\nI1212 06:17:46.507251 18921 net.cpp:100] Creating Layer L2_b3_relu\nI1212 06:17:46.507266 18921 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI1212 06:17:46.507282 18921 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI1212 06:17:46.507300 18921 net.cpp:150] Setting up L2_b3_relu\nI1212 06:17:46.507314 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.507324 18921 net.cpp:165] Memory required for data: 978433500\nI1212 06:17:46.507335 18921 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:46.507349 18921 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:46.507359 18921 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI1212 06:17:46.507385 18921 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:17:46.507405 18921 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:17:46.507493 18921 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:46.507513 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.507524 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.507534 18921 net.cpp:165] Memory required for data: 986625500\nI1212 06:17:46.507544 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI1212 06:17:46.507565 18921 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI1212 06:17:46.507577 18921 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:17:46.507601 18921 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI1212 06:17:46.508117 18921 net.cpp:150] Setting up L2_b4_cbr1_conv\nI1212 06:17:46.508137 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.508147 18921 net.cpp:165] Memory required for data: 990721500\nI1212 06:17:46.508165 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI1212 06:17:46.508186 18921 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI1212 06:17:46.508198 18921 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI1212 06:17:46.508215 18921 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI1212 06:17:46.508512 18921 net.cpp:150] Setting up L2_b4_cbr1_bn\nI1212 06:17:46.508532 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.508540 18921 net.cpp:165] Memory required for data: 994817500\nI1212 06:17:46.508561 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:17:46.508582 18921 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI1212 06:17:46.508594 18921 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI1212 06:17:46.508610 18921 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.508703 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:17:46.508890 18921 net.cpp:150] Setting up L2_b4_cbr1_scale\nI1212 06:17:46.508909 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.508919 18921 net.cpp:165] Memory required for data: 998913500\nI1212 06:17:46.508937 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI1212 06:17:46.508956 18921 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI1212 06:17:46.508968 18921 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI1212 06:17:46.508986 18921 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.509006 18921 net.cpp:150] Setting up L2_b4_cbr1_relu\nI1212 06:17:46.509021 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.509029 18921 net.cpp:165] Memory required for data: 1003009500\nI1212 06:17:46.509039 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI1212 06:17:46.509061 18921 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI1212 06:17:46.509081 18921 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI1212 06:17:46.509105 18921 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI1212 06:17:46.509625 18921 net.cpp:150] Setting up L2_b4_cbr2_conv\nI1212 06:17:46.509646 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.509655 18921 net.cpp:165] Memory required for data: 1007105500\nI1212 06:17:46.509673 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI1212 06:17:46.509690 18921 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI1212 06:17:46.509701 18921 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI1212 06:17:46.509726 18921 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI1212 06:17:46.510015 18921 net.cpp:150] Setting up L2_b4_cbr2_bn\nI1212 06:17:46.510033 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.510042 18921 net.cpp:165] Memory required for data: 1011201500\nI1212 06:17:46.510063 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:17:46.510085 18921 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI1212 06:17:46.510097 18921 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI1212 06:17:46.510113 18921 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI1212 06:17:46.510205 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:17:46.510399 18921 net.cpp:150] Setting up L2_b4_cbr2_scale\nI1212 06:17:46.510419 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.510428 18921 net.cpp:165] Memory required for data: 1015297500\nI1212 06:17:46.510447 18921 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1212 06:17:46.510468 18921 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1212 06:17:46.510480 18921 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI1212 06:17:46.510493 18921 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:17:46.510509 18921 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1212 06:17:46.510560 18921 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1212 06:17:46.510581 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.510591 18921 net.cpp:165] Memory required for data: 1019393500\nI1212 06:17:46.510601 18921 layer_factory.hpp:77] Creating layer L2_b4_relu\nI1212 06:17:46.510615 18921 net.cpp:100] Creating Layer L2_b4_relu\nI1212 06:17:46.510625 18921 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI1212 06:17:46.510644 18921 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI1212 06:17:46.510664 18921 net.cpp:150] Setting up L2_b4_relu\nI1212 06:17:46.510679 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.510689 18921 net.cpp:165] Memory required for data: 1023489500\nI1212 06:17:46.510699 18921 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:46.510712 18921 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:46.510723 18921 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI1212 06:17:46.510738 18921 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:17:46.510757 18921 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:17:46.510841 18921 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:46.510864 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.510876 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.510885 18921 net.cpp:165] Memory required for data: 1031681500\nI1212 06:17:46.510896 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI1212 06:17:46.510916 18921 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI1212 06:17:46.510929 18921 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:17:46.510952 18921 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI1212 06:17:46.511482 18921 net.cpp:150] Setting up L2_b5_cbr1_conv\nI1212 06:17:46.511502 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.511520 18921 net.cpp:165] Memory required for data: 1035777500\nI1212 06:17:46.511538 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI1212 06:17:46.511556 18921 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI1212 06:17:46.511569 18921 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI1212 06:17:46.511590 18921 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI1212 06:17:46.511893 18921 net.cpp:150] Setting up L2_b5_cbr1_bn\nI1212 06:17:46.511912 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.511921 18921 net.cpp:165] Memory required for data: 1039873500\nI1212 06:17:46.511943 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:17:46.511965 18921 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI1212 06:17:46.511977 18921 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI1212 06:17:46.511992 18921 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.512082 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:17:46.512275 18921 net.cpp:150] Setting up L2_b5_cbr1_scale\nI1212 06:17:46.512295 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.512305 18921 net.cpp:165] Memory required for data: 1043969500\nI1212 06:17:46.512322 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI1212 06:17:46.512343 18921 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI1212 06:17:46.512356 18921 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI1212 06:17:46.512378 18921 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.512398 18921 net.cpp:150] Setting up L2_b5_cbr1_relu\nI1212 06:17:46.512413 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.512423 18921 net.cpp:165] Memory required for data: 1048065500\nI1212 06:17:46.512432 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI1212 06:17:46.512459 18921 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI1212 06:17:46.512472 18921 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI1212 06:17:46.512495 18921 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI1212 06:17:46.513003 18921 net.cpp:150] Setting up L2_b5_cbr2_conv\nI1212 06:17:46.513023 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.513032 18921 net.cpp:165] Memory required for data: 1052161500\nI1212 06:17:46.513049 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI1212 06:17:46.513067 18921 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI1212 06:17:46.513078 18921 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI1212 06:17:46.513099 18921 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI1212 06:17:46.513401 18921 net.cpp:150] Setting up L2_b5_cbr2_bn\nI1212 06:17:46.513422 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.513432 18921 net.cpp:165] Memory required for data: 1056257500\nI1212 06:17:46.513453 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:17:46.513474 18921 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI1212 06:17:46.513487 18921 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI1212 06:17:46.513502 18921 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI1212 06:17:46.513592 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:17:46.513782 18921 net.cpp:150] Setting up L2_b5_cbr2_scale\nI1212 06:17:46.513801 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.513810 18921 net.cpp:165] Memory required for data: 1060353500\nI1212 06:17:46.513828 18921 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1212 06:17:46.513856 18921 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1212 06:17:46.513870 18921 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI1212 06:17:46.513883 18921 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:17:46.513900 18921 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1212 06:17:46.513952 18921 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1212 06:17:46.513970 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.513980 18921 net.cpp:165] Memory required for data: 1064449500\nI1212 06:17:46.514001 18921 layer_factory.hpp:77] Creating layer L2_b5_relu\nI1212 06:17:46.514016 18921 net.cpp:100] Creating Layer L2_b5_relu\nI1212 06:17:46.514027 18921 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI1212 06:17:46.514042 18921 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI1212 06:17:46.514065 18921 net.cpp:150] Setting up L2_b5_relu\nI1212 06:17:46.514081 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.514091 18921 net.cpp:165] Memory required for data: 1068545500\nI1212 06:17:46.514101 18921 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:46.514116 18921 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:46.514127 18921 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI1212 06:17:46.514142 18921 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:17:46.514163 18921 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:17:46.514246 18921 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:46.514269 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.514283 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.514293 18921 net.cpp:165] Memory required for data: 1076737500\nI1212 06:17:46.514304 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI1212 06:17:46.514324 18921 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI1212 06:17:46.514338 18921 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:17:46.514367 18921 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI1212 06:17:46.514886 18921 net.cpp:150] Setting up L2_b6_cbr1_conv\nI1212 06:17:46.514905 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.514915 18921 net.cpp:165] Memory required for data: 1080833500\nI1212 06:17:46.514932 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI1212 06:17:46.514950 18921 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI1212 06:17:46.514961 18921 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI1212 06:17:46.514982 18921 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI1212 06:17:46.515276 18921 net.cpp:150] Setting up L2_b6_cbr1_bn\nI1212 06:17:46.515295 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.515305 18921 net.cpp:165] Memory required for data: 1084929500\nI1212 06:17:46.515326 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:17:46.515347 18921 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI1212 06:17:46.515359 18921 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI1212 06:17:46.515383 18921 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.515470 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:17:46.515656 18921 net.cpp:150] Setting up L2_b6_cbr1_scale\nI1212 06:17:46.515676 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.515684 18921 net.cpp:165] Memory required for data: 1089025500\nI1212 06:17:46.515702 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI1212 06:17:46.515722 18921 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI1212 06:17:46.515733 18921 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI1212 06:17:46.515748 18921 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.515766 18921 net.cpp:150] Setting up L2_b6_cbr1_relu\nI1212 06:17:46.515780 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.515790 18921 net.cpp:165] Memory required for data: 1093121500\nI1212 06:17:46.515800 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI1212 06:17:46.515826 18921 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI1212 06:17:46.515839 18921 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI1212 06:17:46.515864 18921 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI1212 06:17:46.516388 18921 net.cpp:150] Setting up L2_b6_cbr2_conv\nI1212 06:17:46.516408 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.516427 18921 net.cpp:165] Memory required for data: 1097217500\nI1212 06:17:46.516444 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI1212 06:17:46.516463 18921 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI1212 06:17:46.516474 18921 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI1212 06:17:46.516492 18921 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI1212 06:17:46.516793 18921 net.cpp:150] Setting up L2_b6_cbr2_bn\nI1212 06:17:46.516813 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.516821 18921 net.cpp:165] Memory required for data: 1101313500\nI1212 06:17:46.516844 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:17:46.516865 18921 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI1212 06:17:46.516878 18921 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI1212 06:17:46.516894 18921 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI1212 06:17:46.516983 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:17:46.517174 18921 net.cpp:150] Setting up L2_b6_cbr2_scale\nI1212 06:17:46.517191 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.517201 18921 net.cpp:165] Memory required for data: 1105409500\nI1212 06:17:46.517220 18921 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1212 06:17:46.517236 18921 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1212 06:17:46.517248 18921 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI1212 06:17:46.517261 18921 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:17:46.517282 18921 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1212 06:17:46.517329 18921 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1212 06:17:46.517357 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.517375 18921 net.cpp:165] Memory required for data: 1109505500\nI1212 06:17:46.517386 18921 layer_factory.hpp:77] Creating layer L2_b6_relu\nI1212 06:17:46.517400 18921 net.cpp:100] Creating Layer L2_b6_relu\nI1212 06:17:46.517412 18921 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI1212 06:17:46.517426 18921 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI1212 06:17:46.517446 18921 net.cpp:150] Setting up L2_b6_relu\nI1212 06:17:46.517460 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.517469 18921 net.cpp:165] Memory required for data: 1113601500\nI1212 06:17:46.517480 18921 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:46.517498 18921 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:46.517510 18921 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI1212 06:17:46.517525 18921 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:17:46.517545 18921 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:17:46.517630 18921 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:46.517648 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.517662 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.517670 18921 net.cpp:165] Memory required for data: 1121793500\nI1212 06:17:46.517680 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI1212 06:17:46.517701 18921 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI1212 06:17:46.517714 18921 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:17:46.517736 18921 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI1212 06:17:46.518260 18921 net.cpp:150] Setting up L2_b7_cbr1_conv\nI1212 06:17:46.518280 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.518290 18921 net.cpp:165] Memory required for data: 1125889500\nI1212 06:17:46.518307 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI1212 06:17:46.518324 18921 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI1212 06:17:46.518337 18921 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI1212 06:17:46.518373 18921 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI1212 06:17:46.518688 18921 net.cpp:150] Setting up L2_b7_cbr1_bn\nI1212 06:17:46.518708 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.518718 18921 net.cpp:165] Memory required for data: 1129985500\nI1212 06:17:46.518739 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:17:46.518760 18921 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI1212 06:17:46.518771 18921 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI1212 06:17:46.518788 18921 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.518879 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:17:46.519070 18921 net.cpp:150] Setting up L2_b7_cbr1_scale\nI1212 06:17:46.519089 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.519098 18921 net.cpp:165] Memory required for data: 1134081500\nI1212 06:17:46.519117 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI1212 06:17:46.519134 18921 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI1212 06:17:46.519145 18921 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI1212 06:17:46.519166 18921 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.519187 18921 net.cpp:150] Setting up L2_b7_cbr1_relu\nI1212 06:17:46.519201 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.519210 18921 net.cpp:165] Memory required for data: 1138177500\nI1212 06:17:46.519220 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI1212 06:17:46.519246 18921 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI1212 06:17:46.519259 18921 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI1212 06:17:46.519281 18921 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI1212 06:17:46.519809 18921 net.cpp:150] Setting up L2_b7_cbr2_conv\nI1212 06:17:46.519829 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.519839 18921 net.cpp:165] Memory required for data: 1142273500\nI1212 06:17:46.519856 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI1212 06:17:46.519873 18921 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI1212 06:17:46.519884 18921 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI1212 06:17:46.519901 18921 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI1212 06:17:46.520200 18921 net.cpp:150] Setting up L2_b7_cbr2_bn\nI1212 06:17:46.520218 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.520228 18921 net.cpp:165] Memory required for data: 1146369500\nI1212 06:17:46.520249 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:17:46.520267 18921 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI1212 06:17:46.520277 18921 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI1212 06:17:46.520298 18921 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI1212 06:17:46.520397 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:17:46.520588 18921 net.cpp:150] Setting up L2_b7_cbr2_scale\nI1212 06:17:46.520606 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.520615 18921 net.cpp:165] Memory required for data: 1150465500\nI1212 06:17:46.520633 18921 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI1212 06:17:46.520650 18921 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI1212 06:17:46.520663 18921 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI1212 06:17:46.520674 18921 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:17:46.520695 18921 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI1212 06:17:46.520745 18921 net.cpp:150] Setting up L2_b7_sum_eltwise\nI1212 06:17:46.520767 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.520778 18921 net.cpp:165] Memory required for data: 1154561500\nI1212 06:17:46.520788 18921 layer_factory.hpp:77] Creating layer L2_b7_relu\nI1212 06:17:46.520803 18921 net.cpp:100] Creating Layer L2_b7_relu\nI1212 06:17:46.520814 18921 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI1212 06:17:46.520829 18921 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI1212 06:17:46.520858 18921 net.cpp:150] Setting up L2_b7_relu\nI1212 06:17:46.520872 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.520882 18921 net.cpp:165] Memory required for data: 1158657500\nI1212 06:17:46.520892 18921 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:46.520917 18921 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:46.520931 18921 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI1212 06:17:46.520947 18921 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:17:46.520967 18921 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:17:46.521049 18921 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:46.521073 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.521088 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.521097 18921 net.cpp:165] Memory required for data: 1166849500\nI1212 06:17:46.521108 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI1212 06:17:46.521131 18921 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI1212 06:17:46.521145 18921 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:17:46.521164 18921 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI1212 06:17:46.521698 18921 net.cpp:150] Setting up L2_b8_cbr1_conv\nI1212 06:17:46.521724 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.521734 18921 net.cpp:165] Memory required for data: 1170945500\nI1212 06:17:46.521752 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI1212 06:17:46.521770 18921 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI1212 06:17:46.521782 18921 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI1212 06:17:46.521806 18921 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI1212 06:17:46.522097 18921 net.cpp:150] Setting up L2_b8_cbr1_bn\nI1212 06:17:46.522117 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.522126 18921 net.cpp:165] Memory required for data: 1175041500\nI1212 06:17:46.522147 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:17:46.522169 18921 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI1212 06:17:46.522181 18921 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI1212 06:17:46.522197 18921 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.522286 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:17:46.522487 18921 net.cpp:150] Setting up L2_b8_cbr1_scale\nI1212 06:17:46.522506 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.522516 18921 net.cpp:165] Memory required for data: 1179137500\nI1212 06:17:46.522534 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI1212 06:17:46.522550 18921 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI1212 06:17:46.522562 18921 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI1212 06:17:46.522581 18921 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.522601 18921 net.cpp:150] Setting up L2_b8_cbr1_relu\nI1212 06:17:46.522615 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.522625 18921 net.cpp:165] Memory required for data: 1183233500\nI1212 06:17:46.522635 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI1212 06:17:46.522660 18921 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI1212 06:17:46.522673 18921 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI1212 06:17:46.522691 18921 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI1212 06:17:46.523214 18921 net.cpp:150] Setting up L2_b8_cbr2_conv\nI1212 06:17:46.523233 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.523243 18921 net.cpp:165] Memory required for data: 1187329500\nI1212 06:17:46.523260 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI1212 06:17:46.523283 18921 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI1212 06:17:46.523295 18921 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI1212 06:17:46.523324 18921 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI1212 06:17:46.523639 18921 net.cpp:150] Setting up L2_b8_cbr2_bn\nI1212 06:17:46.523659 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.523669 18921 net.cpp:165] Memory required for data: 1191425500\nI1212 06:17:46.523689 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:17:46.523706 18921 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI1212 06:17:46.523717 18921 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI1212 06:17:46.523737 18921 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI1212 06:17:46.523829 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:17:46.524020 18921 net.cpp:150] Setting up L2_b8_cbr2_scale\nI1212 06:17:46.524039 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.524049 18921 net.cpp:165] Memory required for data: 1195521500\nI1212 06:17:46.524066 18921 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI1212 06:17:46.524083 18921 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI1212 06:17:46.524094 18921 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI1212 06:17:46.524107 18921 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:17:46.524132 18921 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI1212 06:17:46.524181 18921 net.cpp:150] Setting up L2_b8_sum_eltwise\nI1212 06:17:46.524202 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.524211 18921 net.cpp:165] Memory required for data: 1199617500\nI1212 06:17:46.524222 18921 layer_factory.hpp:77] Creating layer L2_b8_relu\nI1212 06:17:46.524241 18921 net.cpp:100] Creating Layer L2_b8_relu\nI1212 06:17:46.524253 18921 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI1212 06:17:46.524269 18921 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI1212 06:17:46.524288 18921 net.cpp:150] Setting up L2_b8_relu\nI1212 06:17:46.524302 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.524312 18921 net.cpp:165] Memory required for data: 1203713500\nI1212 06:17:46.524322 18921 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:46.524340 18921 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:46.524353 18921 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI1212 06:17:46.524376 18921 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:17:46.524416 18921 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:17:46.524502 18921 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:46.524520 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.524533 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.524544 18921 net.cpp:165] Memory required for data: 1211905500\nI1212 06:17:46.524554 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI1212 06:17:46.524582 18921 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI1212 06:17:46.524596 18921 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:17:46.524615 18921 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI1212 06:17:46.525135 18921 net.cpp:150] Setting up L2_b9_cbr1_conv\nI1212 06:17:46.525154 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.525163 18921 net.cpp:165] Memory required for data: 1216001500\nI1212 06:17:46.525182 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI1212 06:17:46.525202 18921 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI1212 06:17:46.525215 18921 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI1212 06:17:46.525233 18921 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI1212 06:17:46.525532 18921 net.cpp:150] Setting up L2_b9_cbr1_bn\nI1212 06:17:46.525552 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.525562 18921 net.cpp:165] Memory required for data: 1220097500\nI1212 06:17:46.525591 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:17:46.525609 18921 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI1212 06:17:46.525621 18921 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI1212 06:17:46.525641 18921 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.525744 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:17:46.525938 18921 net.cpp:150] Setting up L2_b9_cbr1_scale\nI1212 06:17:46.525957 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.525966 18921 net.cpp:165] Memory required for data: 1224193500\nI1212 06:17:46.525985 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI1212 06:17:46.526001 18921 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI1212 06:17:46.526012 18921 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI1212 06:17:46.526026 18921 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.526051 18921 net.cpp:150] Setting up L2_b9_cbr1_relu\nI1212 06:17:46.526065 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.526075 18921 net.cpp:165] Memory required for data: 1228289500\nI1212 06:17:46.526085 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI1212 06:17:46.526106 18921 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI1212 06:17:46.526124 18921 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI1212 06:17:46.526144 18921 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI1212 06:17:46.526657 18921 net.cpp:150] Setting up L2_b9_cbr2_conv\nI1212 06:17:46.526677 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.526687 18921 net.cpp:165] Memory required for data: 1232385500\nI1212 06:17:46.526705 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI1212 06:17:46.526727 18921 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI1212 06:17:46.526739 18921 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI1212 06:17:46.526762 18921 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI1212 06:17:46.527055 18921 net.cpp:150] Setting up L2_b9_cbr2_bn\nI1212 06:17:46.527076 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.527084 18921 net.cpp:165] Memory required for data: 1236481500\nI1212 06:17:46.527155 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:17:46.527178 18921 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI1212 06:17:46.527189 18921 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI1212 06:17:46.527205 18921 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI1212 06:17:46.527300 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:17:46.527496 18921 net.cpp:150] Setting up L2_b9_cbr2_scale\nI1212 06:17:46.527515 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.527525 18921 net.cpp:165] Memory required for data: 1240577500\nI1212 06:17:46.527544 18921 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI1212 06:17:46.527567 18921 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI1212 06:17:46.527580 18921 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI1212 06:17:46.527595 18921 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:17:46.527621 18921 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI1212 06:17:46.527670 18921 net.cpp:150] Setting up L2_b9_sum_eltwise\nI1212 06:17:46.527688 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.527698 18921 net.cpp:165] Memory required for data: 1244673500\nI1212 06:17:46.527709 18921 layer_factory.hpp:77] Creating layer L2_b9_relu\nI1212 06:17:46.527722 18921 net.cpp:100] Creating Layer L2_b9_relu\nI1212 06:17:46.527734 18921 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI1212 06:17:46.527752 18921 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI1212 06:17:46.527772 18921 net.cpp:150] Setting up L2_b9_relu\nI1212 06:17:46.527787 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.527796 18921 net.cpp:165] Memory required for data: 1248769500\nI1212 06:17:46.527806 18921 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:46.527830 18921 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:46.527842 18921 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI1212 06:17:46.527863 18921 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:17:46.527884 18921 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:17:46.527971 18921 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:46.527989 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.528002 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.528010 18921 net.cpp:165] Memory required for data: 1256961500\nI1212 06:17:46.528020 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI1212 06:17:46.528041 18921 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI1212 06:17:46.528053 18921 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:17:46.528077 18921 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI1212 06:17:46.528615 18921 net.cpp:150] Setting up L3_b1_cbr1_conv\nI1212 06:17:46.528635 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.528645 18921 net.cpp:165] Memory required for data: 1257985500\nI1212 06:17:46.528662 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI1212 06:17:46.528684 18921 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI1212 06:17:46.528697 18921 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI1212 06:17:46.528713 18921 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI1212 06:17:46.529017 18921 net.cpp:150] Setting up L3_b1_cbr1_bn\nI1212 06:17:46.529036 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.529045 18921 net.cpp:165] Memory required for data: 1259009500\nI1212 06:17:46.529067 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:17:46.529083 18921 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI1212 06:17:46.529095 18921 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI1212 06:17:46.529110 18921 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.529211 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:17:46.529409 18921 net.cpp:150] Setting up L3_b1_cbr1_scale\nI1212 06:17:46.529433 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.529444 18921 net.cpp:165] Memory required for data: 1260033500\nI1212 06:17:46.529462 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI1212 06:17:46.529477 18921 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI1212 06:17:46.529489 18921 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI1212 06:17:46.529503 18921 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.529521 18921 net.cpp:150] Setting up L3_b1_cbr1_relu\nI1212 06:17:46.529536 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.529546 18921 net.cpp:165] Memory required for data: 1261057500\nI1212 06:17:46.529556 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI1212 06:17:46.529584 18921 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI1212 06:17:46.529598 18921 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI1212 06:17:46.529616 18921 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI1212 06:17:46.530143 18921 net.cpp:150] Setting up L3_b1_cbr2_conv\nI1212 06:17:46.530164 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.530172 18921 net.cpp:165] Memory required for data: 1262081500\nI1212 06:17:46.530190 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI1212 06:17:46.530208 18921 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI1212 06:17:46.530232 18921 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI1212 06:17:46.530249 18921 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI1212 06:17:46.530550 18921 net.cpp:150] Setting up L3_b1_cbr2_bn\nI1212 06:17:46.530570 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.530578 18921 net.cpp:165] Memory required for data: 1263105500\nI1212 06:17:46.530609 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:17:46.530627 18921 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI1212 06:17:46.530639 18921 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI1212 06:17:46.530659 18921 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI1212 06:17:46.530756 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:17:46.530953 18921 net.cpp:150] Setting up L3_b1_cbr2_scale\nI1212 06:17:46.530972 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.530982 18921 net.cpp:165] Memory required for data: 1264129500\nI1212 06:17:46.531000 18921 layer_factory.hpp:77] Creating layer L3_b1_pool\nI1212 06:17:46.531021 18921 net.cpp:100] Creating Layer L3_b1_pool\nI1212 06:17:46.531033 18921 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:17:46.531049 18921 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI1212 06:17:46.531111 18921 net.cpp:150] Setting up L3_b1_pool\nI1212 06:17:46.531131 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.531139 18921 net.cpp:165] Memory required for data: 1265153500\nI1212 06:17:46.531149 18921 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1212 06:17:46.531165 18921 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1212 06:17:46.531177 18921 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI1212 06:17:46.531194 18921 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI1212 06:17:46.531211 18921 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1212 06:17:46.531265 18921 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1212 06:17:46.531285 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.531294 18921 net.cpp:165] Memory required for data: 1266177500\nI1212 06:17:46.531304 18921 layer_factory.hpp:77] Creating layer L3_b1_relu\nI1212 06:17:46.531318 18921 net.cpp:100] Creating Layer L3_b1_relu\nI1212 06:17:46.531330 18921 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI1212 06:17:46.531349 18921 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI1212 06:17:46.531376 18921 net.cpp:150] Setting up L3_b1_relu\nI1212 06:17:46.531394 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.531404 18921 net.cpp:165] Memory required for data: 1267201500\nI1212 06:17:46.531414 18921 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI1212 06:17:46.531430 18921 net.cpp:100] Creating Layer L3_b1_zeros\nI1212 06:17:46.531445 18921 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI1212 06:17:46.532789 18921 net.cpp:150] Setting up L3_b1_zeros\nI1212 06:17:46.532812 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.532821 18921 net.cpp:165] Memory required for data: 1268225500\nI1212 06:17:46.532832 18921 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI1212 06:17:46.532850 18921 net.cpp:100] Creating Layer L3_b1_concat0\nI1212 06:17:46.532861 18921 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI1212 06:17:46.532874 18921 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI1212 06:17:46.532896 18921 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI1212 06:17:46.532961 18921 net.cpp:150] Setting up L3_b1_concat0\nI1212 06:17:46.532982 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.532991 18921 net.cpp:165] Memory required for data: 1270273500\nI1212 06:17:46.533002 18921 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:46.533017 18921 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:46.533028 18921 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI1212 06:17:46.533054 18921 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:17:46.533077 18921 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:17:46.533164 18921 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:46.533185 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.533200 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.533210 18921 net.cpp:165] Memory required for data: 1274369500\nI1212 06:17:46.533231 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI1212 06:17:46.533257 18921 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI1212 06:17:46.533272 18921 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:17:46.533290 18921 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI1212 06:17:46.535400 18921 net.cpp:150] Setting up L3_b2_cbr1_conv\nI1212 06:17:46.535423 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.535432 18921 net.cpp:165] Memory required for data: 1276417500\nI1212 06:17:46.535450 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI1212 06:17:46.535475 18921 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI1212 06:17:46.535486 18921 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI1212 06:17:46.535503 18921 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI1212 06:17:46.535810 18921 net.cpp:150] Setting up L3_b2_cbr1_bn\nI1212 06:17:46.535830 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.535838 18921 net.cpp:165] Memory required for data: 1278465500\nI1212 06:17:46.535861 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:17:46.535877 18921 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI1212 06:17:46.535888 18921 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI1212 06:17:46.535905 18921 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.536002 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:17:46.536201 18921 net.cpp:150] Setting up L3_b2_cbr1_scale\nI1212 06:17:46.536219 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.536228 18921 net.cpp:165] Memory required for data: 1280513500\nI1212 06:17:46.536247 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI1212 06:17:46.536262 18921 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI1212 06:17:46.536274 18921 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI1212 06:17:46.536293 18921 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.536314 18921 net.cpp:150] Setting up L3_b2_cbr1_relu\nI1212 06:17:46.536327 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.536336 18921 net.cpp:165] Memory required for data: 1282561500\nI1212 06:17:46.536347 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI1212 06:17:46.536382 18921 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI1212 06:17:46.536396 18921 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI1212 06:17:46.536414 18921 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI1212 06:17:46.537489 18921 net.cpp:150] Setting up L3_b2_cbr2_conv\nI1212 06:17:46.537510 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.537519 18921 net.cpp:165] Memory required for data: 1284609500\nI1212 06:17:46.537537 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI1212 06:17:46.537560 18921 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI1212 06:17:46.537572 18921 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI1212 06:17:46.537590 18921 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI1212 06:17:46.537895 18921 net.cpp:150] Setting up L3_b2_cbr2_bn\nI1212 06:17:46.537914 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.537925 18921 net.cpp:165] Memory required for data: 1286657500\nI1212 06:17:46.537945 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:17:46.537966 18921 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI1212 06:17:46.537978 18921 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI1212 06:17:46.537995 18921 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI1212 06:17:46.538091 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:17:46.538295 18921 net.cpp:150] Setting up L3_b2_cbr2_scale\nI1212 06:17:46.538314 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.538323 18921 net.cpp:165] Memory required for data: 1288705500\nI1212 06:17:46.538341 18921 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1212 06:17:46.538370 18921 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1212 06:17:46.538384 18921 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI1212 06:17:46.538409 18921 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:17:46.538432 18921 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1212 06:17:46.538491 18921 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1212 06:17:46.538511 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.538520 18921 net.cpp:165] Memory required for data: 1290753500\nI1212 06:17:46.538532 18921 layer_factory.hpp:77] Creating layer L3_b2_relu\nI1212 06:17:46.538554 18921 net.cpp:100] Creating Layer L3_b2_relu\nI1212 06:17:46.538565 18921 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI1212 06:17:46.538580 18921 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI1212 06:17:46.538599 18921 net.cpp:150] Setting up L3_b2_relu\nI1212 06:17:46.538614 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.538622 18921 net.cpp:165] Memory required for data: 1292801500\nI1212 06:17:46.538632 18921 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:46.538646 18921 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:46.538657 18921 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI1212 06:17:46.538673 18921 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:17:46.538693 18921 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:17:46.538779 18921 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:46.538800 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.538812 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.538821 18921 net.cpp:165] Memory required for data: 1296897500\nI1212 06:17:46.538832 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI1212 06:17:46.538857 18921 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI1212 06:17:46.538872 18921 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:17:46.538890 18921 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI1212 06:17:46.539963 18921 net.cpp:150] Setting up L3_b3_cbr1_conv\nI1212 06:17:46.539983 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.539993 18921 net.cpp:165] Memory required for data: 1298945500\nI1212 06:17:46.540011 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI1212 06:17:46.540033 18921 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI1212 06:17:46.540045 18921 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI1212 06:17:46.540067 18921 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI1212 06:17:46.540383 18921 net.cpp:150] Setting up L3_b3_cbr1_bn\nI1212 06:17:46.540402 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.540411 18921 net.cpp:165] Memory required for data: 1300993500\nI1212 06:17:46.540433 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:17:46.540451 18921 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI1212 06:17:46.540462 18921 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI1212 06:17:46.540477 18921 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.540577 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:17:46.540774 18921 net.cpp:150] Setting up L3_b3_cbr1_scale\nI1212 06:17:46.540793 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.540802 18921 net.cpp:165] Memory required for data: 1303041500\nI1212 06:17:46.540820 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI1212 06:17:46.540835 18921 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI1212 06:17:46.540848 18921 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI1212 06:17:46.540866 18921 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.540886 18921 net.cpp:150] Setting up L3_b3_cbr1_relu\nI1212 06:17:46.540900 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.540910 18921 net.cpp:165] Memory required for data: 1305089500\nI1212 06:17:46.540930 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI1212 06:17:46.540956 18921 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI1212 06:17:46.540969 18921 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI1212 06:17:46.540987 18921 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI1212 06:17:46.542058 18921 net.cpp:150] Setting up L3_b3_cbr2_conv\nI1212 06:17:46.542078 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.542088 18921 net.cpp:165] Memory required for data: 1307137500\nI1212 06:17:46.542105 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI1212 06:17:46.542127 18921 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI1212 06:17:46.542140 18921 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI1212 06:17:46.542157 18921 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI1212 06:17:46.542480 18921 net.cpp:150] Setting up L3_b3_cbr2_bn\nI1212 06:17:46.542500 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.542508 18921 net.cpp:165] Memory required for data: 1309185500\nI1212 06:17:46.542531 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:17:46.542551 18921 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI1212 06:17:46.542563 18921 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI1212 06:17:46.542579 18921 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI1212 06:17:46.542677 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:17:46.542882 18921 net.cpp:150] Setting up L3_b3_cbr2_scale\nI1212 06:17:46.542901 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.542910 18921 net.cpp:165] Memory required for data: 1311233500\nI1212 06:17:46.542929 18921 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1212 06:17:46.542950 18921 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1212 06:17:46.542963 18921 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI1212 06:17:46.542976 18921 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:17:46.542997 18921 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1212 06:17:46.543053 18921 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1212 06:17:46.543072 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.543081 18921 net.cpp:165] Memory required for data: 1313281500\nI1212 06:17:46.543092 18921 layer_factory.hpp:77] Creating layer L3_b3_relu\nI1212 06:17:46.543110 18921 net.cpp:100] Creating Layer L3_b3_relu\nI1212 06:17:46.543124 18921 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI1212 06:17:46.543139 18921 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI1212 06:17:46.543157 18921 net.cpp:150] Setting up L3_b3_relu\nI1212 06:17:46.543171 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.543180 18921 net.cpp:165] Memory required for data: 1315329500\nI1212 06:17:46.543191 18921 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:46.543205 18921 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:46.543216 18921 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI1212 06:17:46.543231 18921 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:17:46.543251 18921 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:17:46.543337 18921 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:46.543354 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.543375 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.543385 18921 net.cpp:165] Memory required for data: 1319425500\nI1212 06:17:46.543395 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI1212 06:17:46.543421 18921 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI1212 06:17:46.543434 18921 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:17:46.543453 18921 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI1212 06:17:46.544543 18921 net.cpp:150] Setting up L3_b4_cbr1_conv\nI1212 06:17:46.544570 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.544580 18921 net.cpp:165] Memory required for data: 1321473500\nI1212 06:17:46.544598 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI1212 06:17:46.544620 18921 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI1212 06:17:46.544633 18921 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI1212 06:17:46.544654 18921 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI1212 06:17:46.544965 18921 net.cpp:150] Setting up L3_b4_cbr1_bn\nI1212 06:17:46.544986 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.544994 18921 net.cpp:165] Memory required for data: 1323521500\nI1212 06:17:46.545016 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:17:46.545032 18921 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI1212 06:17:46.545043 18921 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI1212 06:17:46.545063 18921 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.545168 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:17:46.545382 18921 net.cpp:150] Setting up L3_b4_cbr1_scale\nI1212 06:17:46.545400 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.545410 18921 net.cpp:165] Memory required for data: 1325569500\nI1212 06:17:46.545428 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI1212 06:17:46.545444 18921 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI1212 06:17:46.545456 18921 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI1212 06:17:46.545475 18921 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.545495 18921 net.cpp:150] Setting up L3_b4_cbr1_relu\nI1212 06:17:46.545509 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.545519 18921 net.cpp:165] Memory required for data: 1327617500\nI1212 06:17:46.545529 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI1212 06:17:46.545557 18921 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI1212 06:17:46.545569 18921 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI1212 06:17:46.545588 18921 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI1212 06:17:46.546680 18921 net.cpp:150] Setting up L3_b4_cbr2_conv\nI1212 06:17:46.546701 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.546710 18921 net.cpp:165] Memory required for data: 1329665500\nI1212 06:17:46.546728 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI1212 06:17:46.546749 18921 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI1212 06:17:46.546762 18921 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI1212 06:17:46.546779 18921 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI1212 06:17:46.547091 18921 net.cpp:150] Setting up L3_b4_cbr2_bn\nI1212 06:17:46.547111 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.547121 18921 net.cpp:165] Memory required for data: 1331713500\nI1212 06:17:46.547142 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:17:46.547163 18921 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI1212 06:17:46.547176 18921 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI1212 06:17:46.547196 18921 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI1212 06:17:46.547291 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:17:46.547505 18921 net.cpp:150] Setting up L3_b4_cbr2_scale\nI1212 06:17:46.547525 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.547534 18921 net.cpp:165] Memory required for data: 1333761500\nI1212 06:17:46.547552 18921 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1212 06:17:46.547569 18921 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1212 06:17:46.547580 18921 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI1212 06:17:46.547593 18921 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:17:46.547616 18921 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1212 06:17:46.547674 18921 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1212 06:17:46.547693 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.547713 18921 net.cpp:165] Memory required for data: 1335809500\nI1212 06:17:46.547724 18921 layer_factory.hpp:77] Creating layer L3_b4_relu\nI1212 06:17:46.547742 18921 net.cpp:100] Creating Layer L3_b4_relu\nI1212 06:17:46.547755 18921 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI1212 06:17:46.547770 18921 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI1212 06:17:46.547791 18921 net.cpp:150] Setting up L3_b4_relu\nI1212 06:17:46.547804 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.547813 18921 net.cpp:165] Memory required for data: 1337857500\nI1212 06:17:46.547823 18921 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:46.547837 18921 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:46.547849 18921 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI1212 06:17:46.547865 18921 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:17:46.547885 18921 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:17:46.547976 18921 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:46.547996 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.548007 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.548017 18921 net.cpp:165] Memory required for data: 1341953500\nI1212 06:17:46.548027 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI1212 06:17:46.548053 18921 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI1212 06:17:46.548066 18921 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:17:46.548084 18921 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI1212 06:17:46.549167 18921 net.cpp:150] Setting up L3_b5_cbr1_conv\nI1212 06:17:46.549187 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.549197 18921 net.cpp:165] Memory required for data: 1344001500\nI1212 06:17:46.549214 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI1212 06:17:46.549240 18921 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI1212 06:17:46.549253 18921 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI1212 06:17:46.549275 18921 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI1212 06:17:46.550668 18921 net.cpp:150] Setting up L3_b5_cbr1_bn\nI1212 06:17:46.550690 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.550700 18921 net.cpp:165] Memory required for data: 1346049500\nI1212 06:17:46.550724 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:17:46.550745 18921 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI1212 06:17:46.550758 18921 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI1212 06:17:46.550779 18921 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.550873 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:17:46.551075 18921 net.cpp:150] Setting up L3_b5_cbr1_scale\nI1212 06:17:46.551095 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.551103 18921 net.cpp:165] Memory required for data: 1348097500\nI1212 06:17:46.551122 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI1212 06:17:46.551138 18921 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI1212 06:17:46.551149 18921 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI1212 06:17:46.551168 18921 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.551189 18921 net.cpp:150] Setting up L3_b5_cbr1_relu\nI1212 06:17:46.551204 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.551213 18921 net.cpp:165] Memory required for data: 1350145500\nI1212 06:17:46.551225 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI1212 06:17:46.551245 18921 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI1212 06:17:46.551259 18921 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI1212 06:17:46.551282 18921 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI1212 06:17:46.553436 18921 net.cpp:150] Setting up L3_b5_cbr2_conv\nI1212 06:17:46.553465 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.553475 18921 net.cpp:165] Memory required for data: 1352193500\nI1212 06:17:46.553494 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI1212 06:17:46.553516 18921 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI1212 06:17:46.553529 18921 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI1212 06:17:46.553546 18921 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI1212 06:17:46.553848 18921 net.cpp:150] Setting up L3_b5_cbr2_bn\nI1212 06:17:46.553867 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.553876 18921 net.cpp:165] Memory required for data: 1354241500\nI1212 06:17:46.553899 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:17:46.553920 18921 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI1212 06:17:46.553932 18921 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI1212 06:17:46.553947 18921 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI1212 06:17:46.554041 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:17:46.554242 18921 net.cpp:150] Setting up L3_b5_cbr2_scale\nI1212 06:17:46.554261 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.554271 18921 net.cpp:165] Memory required for data: 1356289500\nI1212 06:17:46.554289 18921 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1212 06:17:46.554307 18921 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1212 06:17:46.554318 18921 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI1212 06:17:46.554332 18921 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:17:46.554354 18921 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1212 06:17:46.554416 18921 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1212 06:17:46.554435 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.554445 18921 net.cpp:165] Memory required for data: 1358337500\nI1212 06:17:46.554456 18921 layer_factory.hpp:77] Creating layer L3_b5_relu\nI1212 06:17:46.554474 18921 net.cpp:100] Creating Layer L3_b5_relu\nI1212 06:17:46.554487 18921 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI1212 06:17:46.554502 18921 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI1212 06:17:46.554520 18921 net.cpp:150] Setting up L3_b5_relu\nI1212 06:17:46.554535 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.554544 18921 net.cpp:165] Memory required for data: 1360385500\nI1212 06:17:46.554555 18921 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:46.554569 18921 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:46.554581 18921 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI1212 06:17:46.554595 18921 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:17:46.554615 18921 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:17:46.554697 18921 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:46.554716 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.554728 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.554738 18921 net.cpp:165] Memory required for data: 1364481500\nI1212 06:17:46.554747 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI1212 06:17:46.554772 18921 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI1212 06:17:46.554786 18921 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:17:46.554806 18921 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI1212 06:17:46.555872 18921 net.cpp:150] Setting up L3_b6_cbr1_conv\nI1212 06:17:46.555893 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.555902 18921 net.cpp:165] Memory required for data: 1366529500\nI1212 06:17:46.555920 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI1212 06:17:46.555943 18921 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI1212 06:17:46.555955 18921 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI1212 06:17:46.555994 18921 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI1212 06:17:46.556318 18921 net.cpp:150] Setting up L3_b6_cbr1_bn\nI1212 06:17:46.556337 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.556346 18921 net.cpp:165] Memory required for data: 1368577500\nI1212 06:17:46.556376 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:17:46.556396 18921 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI1212 06:17:46.556407 18921 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI1212 06:17:46.556428 18921 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.556526 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:17:46.556720 18921 net.cpp:150] Setting up L3_b6_cbr1_scale\nI1212 06:17:46.556740 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.556749 18921 net.cpp:165] Memory required for data: 1370625500\nI1212 06:17:46.556768 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI1212 06:17:46.556783 18921 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI1212 06:17:46.556795 18921 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI1212 06:17:46.556818 18921 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.556838 18921 net.cpp:150] Setting up L3_b6_cbr1_relu\nI1212 06:17:46.556852 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.556861 18921 net.cpp:165] Memory required for data: 1372673500\nI1212 06:17:46.556872 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI1212 06:17:46.556897 18921 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI1212 06:17:46.556911 18921 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI1212 06:17:46.556928 18921 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI1212 06:17:46.557999 18921 net.cpp:150] Setting up L3_b6_cbr2_conv\nI1212 06:17:46.558020 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.558029 18921 net.cpp:165] Memory required for data: 1374721500\nI1212 06:17:46.558048 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI1212 06:17:46.558070 18921 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI1212 06:17:46.558082 18921 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI1212 06:17:46.558099 18921 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI1212 06:17:46.558414 18921 net.cpp:150] Setting up L3_b6_cbr2_bn\nI1212 06:17:46.558435 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.558444 18921 net.cpp:165] Memory required for data: 1376769500\nI1212 06:17:46.558466 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:17:46.558491 18921 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI1212 06:17:46.558504 18921 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI1212 06:17:46.558526 18921 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI1212 06:17:46.558629 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:17:46.558826 18921 net.cpp:150] Setting up L3_b6_cbr2_scale\nI1212 06:17:46.558845 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.558854 18921 net.cpp:165] Memory required for data: 1378817500\nI1212 06:17:46.558873 18921 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1212 06:17:46.558889 18921 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1212 06:17:46.558902 18921 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI1212 06:17:46.558913 18921 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:17:46.558935 18921 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1212 06:17:46.558990 18921 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1212 06:17:46.559010 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.559020 18921 net.cpp:165] Memory required for data: 1380865500\nI1212 06:17:46.559029 18921 layer_factory.hpp:77] Creating layer L3_b6_relu\nI1212 06:17:46.559048 18921 net.cpp:100] Creating Layer L3_b6_relu\nI1212 06:17:46.559061 18921 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI1212 06:17:46.559075 18921 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI1212 06:17:46.559103 18921 net.cpp:150] Setting up L3_b6_relu\nI1212 06:17:46.559118 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.559128 18921 net.cpp:165] Memory required for data: 1382913500\nI1212 06:17:46.559137 18921 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:46.559151 18921 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:46.559162 18921 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI1212 06:17:46.559177 18921 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:17:46.559197 18921 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:17:46.559278 18921 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:46.559295 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.559307 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.559316 18921 net.cpp:165] Memory required for data: 1387009500\nI1212 06:17:46.559326 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI1212 06:17:46.559352 18921 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI1212 06:17:46.559373 18921 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:17:46.559393 18921 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI1212 06:17:46.560458 18921 net.cpp:150] Setting up L3_b7_cbr1_conv\nI1212 06:17:46.560478 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.560488 18921 net.cpp:165] Memory required for data: 1389057500\nI1212 06:17:46.560506 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI1212 06:17:46.560528 18921 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI1212 06:17:46.560540 18921 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI1212 06:17:46.560561 18921 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI1212 06:17:46.560858 18921 net.cpp:150] Setting up L3_b7_cbr1_bn\nI1212 06:17:46.560878 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.560887 18921 net.cpp:165] Memory required for data: 1391105500\nI1212 06:17:46.560909 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:17:46.560925 18921 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI1212 06:17:46.560936 18921 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI1212 06:17:46.560956 18921 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.561048 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:17:46.561246 18921 net.cpp:150] Setting up L3_b7_cbr1_scale\nI1212 06:17:46.561266 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.561275 18921 net.cpp:165] Memory required for data: 1393153500\nI1212 06:17:46.561295 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI1212 06:17:46.561353 18921 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI1212 06:17:46.561378 18921 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI1212 06:17:46.561394 18921 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.561414 18921 net.cpp:150] Setting up L3_b7_cbr1_relu\nI1212 06:17:46.561429 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.561439 18921 net.cpp:165] Memory required for data: 1395201500\nI1212 06:17:46.561450 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI1212 06:17:46.561476 18921 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI1212 06:17:46.561489 18921 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI1212 06:17:46.561508 18921 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI1212 06:17:46.562592 18921 net.cpp:150] Setting up L3_b7_cbr2_conv\nI1212 06:17:46.562613 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.562623 18921 net.cpp:165] Memory required for data: 1397249500\nI1212 06:17:46.562640 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI1212 06:17:46.562662 18921 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI1212 06:17:46.562675 18921 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI1212 06:17:46.562705 18921 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI1212 06:17:46.563021 18921 net.cpp:150] Setting up L3_b7_cbr2_bn\nI1212 06:17:46.563041 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.563050 18921 net.cpp:165] Memory required for data: 1399297500\nI1212 06:17:46.563072 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:17:46.563089 18921 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI1212 06:17:46.563102 18921 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI1212 06:17:46.563122 18921 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI1212 06:17:46.563220 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:17:46.563422 18921 net.cpp:150] Setting up L3_b7_cbr2_scale\nI1212 06:17:46.563442 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.563452 18921 net.cpp:165] Memory required for data: 1401345500\nI1212 06:17:46.563470 18921 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI1212 06:17:46.563488 18921 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI1212 06:17:46.563499 18921 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI1212 06:17:46.563513 18921 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:17:46.563534 18921 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI1212 06:17:46.563594 18921 net.cpp:150] Setting up L3_b7_sum_eltwise\nI1212 06:17:46.563613 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.563623 18921 net.cpp:165] Memory required for data: 1403393500\nI1212 06:17:46.563632 18921 layer_factory.hpp:77] Creating layer L3_b7_relu\nI1212 06:17:46.563647 18921 net.cpp:100] Creating Layer L3_b7_relu\nI1212 06:17:46.563659 18921 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI1212 06:17:46.563681 18921 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI1212 06:17:46.563701 18921 net.cpp:150] Setting up L3_b7_relu\nI1212 06:17:46.563716 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.563724 18921 net.cpp:165] Memory required for data: 1405441500\nI1212 06:17:46.563735 18921 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:46.563750 18921 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:46.563760 18921 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI1212 06:17:46.563776 18921 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:17:46.563796 18921 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:17:46.563876 18921 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:46.563894 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.563907 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.563916 18921 net.cpp:165] Memory required for data: 1409537500\nI1212 06:17:46.563926 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI1212 06:17:46.563947 18921 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI1212 06:17:46.563959 18921 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:17:46.563982 18921 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI1212 06:17:46.565071 18921 net.cpp:150] Setting up L3_b8_cbr1_conv\nI1212 06:17:46.565093 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.565101 18921 net.cpp:165] Memory required for data: 1411585500\nI1212 06:17:46.565119 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI1212 06:17:46.565136 18921 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI1212 06:17:46.565148 18921 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI1212 06:17:46.565170 18921 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI1212 06:17:46.565488 18921 net.cpp:150] Setting up L3_b8_cbr1_bn\nI1212 06:17:46.565511 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.565521 18921 net.cpp:165] Memory required for data: 1413633500\nI1212 06:17:46.565544 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:17:46.565570 18921 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI1212 06:17:46.565583 18921 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI1212 06:17:46.565599 18921 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.565701 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:17:46.565899 18921 net.cpp:150] Setting up L3_b8_cbr1_scale\nI1212 06:17:46.565918 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.565927 18921 net.cpp:165] Memory required for data: 1415681500\nI1212 06:17:46.565946 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI1212 06:17:46.565966 18921 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI1212 06:17:46.565979 18921 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI1212 06:17:46.565992 18921 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.566011 18921 net.cpp:150] Setting up L3_b8_cbr1_relu\nI1212 06:17:46.566025 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.566035 18921 net.cpp:165] Memory required for data: 1417729500\nI1212 06:17:46.566045 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI1212 06:17:46.566071 18921 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI1212 06:17:46.566084 18921 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI1212 06:17:46.566103 18921 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI1212 06:17:46.567376 18921 net.cpp:150] Setting up L3_b8_cbr2_conv\nI1212 06:17:46.567397 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.567406 18921 net.cpp:165] Memory required for data: 1419777500\nI1212 06:17:46.567423 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI1212 06:17:46.567445 18921 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI1212 06:17:46.567456 18921 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI1212 06:17:46.567476 18921 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI1212 06:17:46.567821 18921 net.cpp:150] Setting up L3_b8_cbr2_bn\nI1212 06:17:46.567842 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.567850 18921 net.cpp:165] Memory required for data: 1421825500\nI1212 06:17:46.567873 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:17:46.567889 18921 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI1212 06:17:46.567901 18921 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI1212 06:17:46.567929 18921 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI1212 06:17:46.568022 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:17:46.568218 18921 net.cpp:150] Setting up L3_b8_cbr2_scale\nI1212 06:17:46.568238 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.568246 18921 net.cpp:165] Memory required for data: 1423873500\nI1212 06:17:46.568265 18921 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI1212 06:17:46.568281 18921 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI1212 06:17:46.568294 18921 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI1212 06:17:46.568311 18921 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:17:46.568330 18921 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI1212 06:17:46.568398 18921 net.cpp:150] Setting up L3_b8_sum_eltwise\nI1212 06:17:46.568418 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.568428 18921 net.cpp:165] Memory required for data: 1425921500\nI1212 06:17:46.568437 18921 layer_factory.hpp:77] Creating layer L3_b8_relu\nI1212 06:17:46.568452 18921 net.cpp:100] Creating Layer L3_b8_relu\nI1212 06:17:46.568464 18921 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI1212 06:17:46.568482 18921 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI1212 06:17:46.568502 18921 net.cpp:150] Setting up L3_b8_relu\nI1212 06:17:46.568517 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.568526 18921 net.cpp:165] Memory required for data: 1427969500\nI1212 06:17:46.568536 18921 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:46.568552 18921 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:46.568572 18921 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI1212 06:17:46.568588 18921 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:17:46.568608 18921 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:17:46.568694 18921 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:46.568714 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.568727 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.568737 18921 net.cpp:165] Memory required for data: 1432065500\nI1212 06:17:46.568747 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI1212 06:17:46.568769 18921 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI1212 06:17:46.568783 18921 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:17:46.568806 18921 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI1212 06:17:46.570950 18921 net.cpp:150] Setting up L3_b9_cbr1_conv\nI1212 06:17:46.570972 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.570982 18921 net.cpp:165] Memory required for data: 1434113500\nI1212 06:17:46.571002 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI1212 06:17:46.571023 18921 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI1212 06:17:46.571036 18921 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI1212 06:17:46.571054 18921 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI1212 06:17:46.571375 18921 net.cpp:150] Setting up L3_b9_cbr1_bn\nI1212 06:17:46.571395 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.571405 18921 net.cpp:165] Memory required for data: 1436161500\nI1212 06:17:46.571427 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:17:46.571449 18921 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI1212 06:17:46.571461 18921 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI1212 06:17:46.571476 18921 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.571573 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:17:46.571779 18921 net.cpp:150] Setting up L3_b9_cbr1_scale\nI1212 06:17:46.571801 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.571808 18921 net.cpp:165] Memory required for data: 1438209500\nI1212 06:17:46.571818 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI1212 06:17:46.571831 18921 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI1212 06:17:46.571838 18921 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI1212 06:17:46.571846 18921 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.571856 18921 net.cpp:150] Setting up L3_b9_cbr1_relu\nI1212 06:17:46.571863 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.571868 18921 net.cpp:165] Memory required for data: 1440257500\nI1212 06:17:46.571872 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI1212 06:17:46.571888 18921 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI1212 06:17:46.571894 18921 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI1212 06:17:46.571907 18921 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI1212 06:17:46.572964 18921 net.cpp:150] Setting up L3_b9_cbr2_conv\nI1212 06:17:46.572984 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.572993 18921 net.cpp:165] Memory required for data: 1442305500\nI1212 06:17:46.573011 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI1212 06:17:46.573029 18921 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI1212 06:17:46.573040 18921 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI1212 06:17:46.573062 18921 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI1212 06:17:46.573380 18921 net.cpp:150] Setting up L3_b9_cbr2_bn\nI1212 06:17:46.573405 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.573415 18921 net.cpp:165] Memory required for data: 1444353500\nI1212 06:17:46.573436 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:17:46.573462 18921 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI1212 06:17:46.573477 18921 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI1212 06:17:46.573493 18921 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI1212 06:17:46.573591 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:17:46.573792 18921 net.cpp:150] Setting up L3_b9_cbr2_scale\nI1212 06:17:46.573812 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.573822 18921 net.cpp:165] Memory required for data: 1446401500\nI1212 06:17:46.573840 18921 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI1212 06:17:46.573863 18921 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI1212 06:17:46.573874 18921 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI1212 06:17:46.573889 18921 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:17:46.573904 18921 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI1212 06:17:46.573964 18921 net.cpp:150] Setting up L3_b9_sum_eltwise\nI1212 06:17:46.573983 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.573993 18921 net.cpp:165] Memory required for data: 1448449500\nI1212 06:17:46.574004 18921 layer_factory.hpp:77] Creating layer L3_b9_relu\nI1212 06:17:46.574018 18921 net.cpp:100] Creating Layer L3_b9_relu\nI1212 06:17:46.574030 18921 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI1212 06:17:46.574044 18921 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI1212 06:17:46.574064 18921 net.cpp:150] Setting up L3_b9_relu\nI1212 06:17:46.574079 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.574087 18921 net.cpp:165] Memory required for data: 1450497500\nI1212 06:17:46.574097 18921 layer_factory.hpp:77] Creating layer post_pool\nI1212 06:17:46.574115 18921 net.cpp:100] Creating Layer post_pool\nI1212 06:17:46.574126 18921 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI1212 06:17:46.574148 18921 net.cpp:408] post_pool -> post_pool\nI1212 06:17:46.574208 18921 net.cpp:150] Setting up post_pool\nI1212 06:17:46.574226 18921 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI1212 06:17:46.574235 18921 net.cpp:165] Memory required for data: 1450529500\nI1212 06:17:46.574245 18921 layer_factory.hpp:77] Creating layer post_FC\nI1212 06:17:46.574342 18921 net.cpp:100] Creating Layer post_FC\nI1212 06:17:46.574358 18921 net.cpp:434] post_FC <- post_pool\nI1212 06:17:46.574391 18921 net.cpp:408] post_FC -> post_FC_top\nI1212 06:17:46.574657 18921 net.cpp:150] Setting up post_FC\nI1212 06:17:46.574677 18921 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:46.574687 18921 net.cpp:165] Memory required for data: 1450534500\nI1212 06:17:46.574704 18921 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1212 06:17:46.574725 18921 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1212 06:17:46.574738 18921 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1212 06:17:46.574753 18921 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1212 06:17:46.574774 18921 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1212 06:17:46.574863 18921 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1212 06:17:46.574885 18921 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:46.574899 18921 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:46.574908 18921 net.cpp:165] Memory required for data: 1450544500\nI1212 06:17:46.574919 18921 layer_factory.hpp:77] Creating layer accuracy\nI1212 06:17:46.574935 18921 net.cpp:100] Creating Layer accuracy\nI1212 06:17:46.574946 18921 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1212 06:17:46.574960 18921 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1212 06:17:46.574976 18921 net.cpp:408] accuracy -> accuracy\nI1212 06:17:46.575088 18921 net.cpp:150] Setting up accuracy\nI1212 06:17:46.575107 18921 net.cpp:157] Top shape: (1)\nI1212 06:17:46.575117 18921 net.cpp:165] Memory required for data: 1450544504\nI1212 06:17:46.575127 18921 layer_factory.hpp:77] Creating layer loss\nI1212 06:17:46.575147 18921 net.cpp:100] Creating Layer loss\nI1212 06:17:46.575170 18921 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1212 06:17:46.575183 18921 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1212 06:17:46.575201 18921 net.cpp:408] loss -> loss\nI1212 06:17:46.576315 18921 layer_factory.hpp:77] Creating layer loss\nI1212 06:17:46.576503 18921 net.cpp:150] Setting up loss\nI1212 06:17:46.576522 18921 net.cpp:157] Top shape: (1)\nI1212 06:17:46.576531 18921 net.cpp:160]     with loss weight 1\nI1212 06:17:46.576642 18921 net.cpp:165] Memory required for data: 1450544508\nI1212 06:17:46.576658 18921 net.cpp:226] loss needs backward computation.\nI1212 06:17:46.576668 18921 net.cpp:228] accuracy does not need backward computation.\nI1212 06:17:46.576678 18921 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1212 06:17:46.576687 18921 net.cpp:226] post_FC needs backward computation.\nI1212 06:17:46.576697 18921 net.cpp:226] post_pool needs backward computation.\nI1212 06:17:46.576707 18921 net.cpp:226] L3_b9_relu needs backward computation.\nI1212 06:17:46.576716 18921 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI1212 06:17:46.576727 18921 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI1212 06:17:46.576737 18921 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI1212 06:17:46.576747 18921 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI1212 06:17:46.576757 18921 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI1212 06:17:46.576766 18921 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI1212 06:17:46.576776 18921 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI1212 06:17:46.576786 18921 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI1212 06:17:46.576797 18921 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI1212 06:17:46.576812 18921 net.cpp:226] L3_b8_relu needs backward computation.\nI1212 06:17:46.576823 18921 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI1212 06:17:46.576834 18921 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI1212 06:17:46.576844 18921 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI1212 06:17:46.576854 18921 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI1212 06:17:46.576864 18921 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI1212 06:17:46.576874 18921 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI1212 06:17:46.576884 18921 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI1212 06:17:46.576894 18921 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI1212 06:17:46.576905 18921 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI1212 06:17:46.576915 18921 net.cpp:226] L3_b7_relu needs backward computation.\nI1212 06:17:46.576926 18921 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI1212 06:17:46.576936 18921 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI1212 06:17:46.576946 18921 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI1212 06:17:46.576956 18921 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI1212 06:17:46.576967 18921 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI1212 06:17:46.576977 18921 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI1212 06:17:46.576987 18921 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI1212 06:17:46.576997 18921 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI1212 06:17:46.577008 18921 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI1212 06:17:46.577019 18921 net.cpp:226] L3_b6_relu needs backward computation.\nI1212 06:17:46.577029 18921 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1212 06:17:46.577039 18921 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI1212 06:17:46.577049 18921 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI1212 06:17:46.577132 18921 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI1212 06:17:46.577149 18921 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI1212 06:17:46.577170 18921 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI1212 06:17:46.577181 18921 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI1212 06:17:46.577191 18921 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI1212 06:17:46.577203 18921 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI1212 06:17:46.577214 18921 net.cpp:226] L3_b5_relu needs backward computation.\nI1212 06:17:46.577222 18921 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1212 06:17:46.577368 18921 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI1212 06:17:46.577380 18921 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI1212 06:17:46.577391 18921 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI1212 06:17:46.577402 18921 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI1212 06:17:46.577412 18921 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI1212 06:17:46.577421 18921 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI1212 06:17:46.577432 18921 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI1212 06:17:46.577443 18921 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI1212 06:17:46.577455 18921 net.cpp:226] L3_b4_relu needs backward computation.\nI1212 06:17:46.577464 18921 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1212 06:17:46.577476 18921 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI1212 06:17:46.577486 18921 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI1212 06:17:46.577494 18921 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI1212 06:17:46.577504 18921 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI1212 06:17:46.577514 18921 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI1212 06:17:46.577523 18921 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI1212 06:17:46.577533 18921 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI1212 06:17:46.577544 18921 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI1212 06:17:46.577555 18921 net.cpp:226] L3_b3_relu needs backward computation.\nI1212 06:17:46.577565 18921 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1212 06:17:46.577575 18921 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI1212 06:17:46.577592 18921 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI1212 06:17:46.577603 18921 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI1212 06:17:46.577613 18921 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI1212 06:17:46.577623 18921 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI1212 06:17:46.577633 18921 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI1212 06:17:46.577644 18921 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI1212 06:17:46.577656 18921 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI1212 06:17:46.577666 18921 net.cpp:226] L3_b2_relu needs backward computation.\nI1212 06:17:46.577677 18921 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1212 06:17:46.577687 18921 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI1212 06:17:46.577697 18921 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI1212 06:17:46.577708 18921 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI1212 06:17:46.577718 18921 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI1212 06:17:46.577729 18921 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI1212 06:17:46.577739 18921 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI1212 06:17:46.577749 18921 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI1212 06:17:46.577760 18921 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI1212 06:17:46.577771 18921 net.cpp:226] L3_b1_concat0 needs backward computation.\nI1212 06:17:46.577783 18921 net.cpp:228] L3_b1_zeros does not need backward computation.\nI1212 06:17:46.577792 18921 net.cpp:226] L3_b1_relu needs backward computation.\nI1212 06:17:46.577812 18921 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1212 06:17:46.577824 18921 net.cpp:226] L3_b1_pool needs backward computation.\nI1212 06:17:46.577836 18921 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI1212 06:17:46.577846 18921 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI1212 06:17:46.577857 18921 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI1212 06:17:46.577867 18921 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI1212 06:17:46.577877 18921 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI1212 06:17:46.577886 18921 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI1212 06:17:46.577898 18921 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI1212 06:17:46.577908 18921 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI1212 06:17:46.577919 18921 net.cpp:226] L2_b9_relu needs backward computation.\nI1212 06:17:46.577930 18921 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI1212 06:17:46.577940 18921 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI1212 06:17:46.577952 18921 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI1212 06:17:46.577962 18921 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI1212 06:17:46.577973 18921 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI1212 06:17:46.577983 18921 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI1212 06:17:46.577993 18921 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI1212 06:17:46.578004 18921 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI1212 06:17:46.578014 18921 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI1212 06:17:46.578025 18921 net.cpp:226] L2_b8_relu needs backward computation.\nI1212 06:17:46.578037 18921 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI1212 06:17:46.578047 18921 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI1212 06:17:46.578058 18921 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI1212 06:17:46.578068 18921 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI1212 06:17:46.578079 18921 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI1212 06:17:46.578090 18921 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI1212 06:17:46.578100 18921 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI1212 06:17:46.578110 18921 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI1212 06:17:46.578121 18921 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI1212 06:17:46.578132 18921 net.cpp:226] L2_b7_relu needs backward computation.\nI1212 06:17:46.578143 18921 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI1212 06:17:46.578155 18921 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI1212 06:17:46.578164 18921 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI1212 06:17:46.578176 18921 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI1212 06:17:46.578186 18921 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI1212 06:17:46.578197 18921 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI1212 06:17:46.578207 18921 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI1212 06:17:46.578217 18921 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI1212 06:17:46.578229 18921 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI1212 06:17:46.578240 18921 net.cpp:226] L2_b6_relu needs backward computation.\nI1212 06:17:46.578250 18921 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1212 06:17:46.578261 18921 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI1212 06:17:46.578272 18921 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI1212 06:17:46.578284 18921 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI1212 06:17:46.578299 18921 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI1212 06:17:46.578311 18921 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI1212 06:17:46.578321 18921 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI1212 06:17:46.578341 18921 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI1212 06:17:46.578352 18921 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI1212 06:17:46.578372 18921 net.cpp:226] L2_b5_relu needs backward computation.\nI1212 06:17:46.578385 18921 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1212 06:17:46.578397 18921 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI1212 06:17:46.578408 18921 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI1212 06:17:46.578418 18921 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI1212 06:17:46.578430 18921 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI1212 06:17:46.578440 18921 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI1212 06:17:46.578451 18921 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI1212 06:17:46.578461 18921 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI1212 06:17:46.578472 18921 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI1212 06:17:46.578483 18921 net.cpp:226] L2_b4_relu needs backward computation.\nI1212 06:17:46.578493 18921 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1212 06:17:46.578505 18921 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI1212 06:17:46.578516 18921 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI1212 06:17:46.578526 18921 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI1212 06:17:46.578537 18921 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI1212 06:17:46.578548 18921 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI1212 06:17:46.578559 18921 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI1212 06:17:46.578569 18921 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI1212 06:17:46.578582 18921 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI1212 06:17:46.578593 18921 net.cpp:226] L2_b3_relu needs backward computation.\nI1212 06:17:46.578603 18921 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1212 06:17:46.578614 18921 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI1212 06:17:46.578625 18921 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI1212 06:17:46.578636 18921 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI1212 06:17:46.578647 18921 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI1212 06:17:46.578658 18921 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI1212 06:17:46.578668 18921 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI1212 06:17:46.578680 18921 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI1212 06:17:46.578691 18921 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI1212 06:17:46.578701 18921 net.cpp:226] L2_b2_relu needs backward computation.\nI1212 06:17:46.578712 18921 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1212 06:17:46.578723 18921 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI1212 06:17:46.578733 18921 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI1212 06:17:46.578744 18921 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI1212 06:17:46.578755 18921 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI1212 06:17:46.578765 18921 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI1212 06:17:46.578775 18921 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI1212 06:17:46.578786 18921 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI1212 06:17:46.578797 18921 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI1212 06:17:46.578809 18921 net.cpp:226] L2_b1_concat0 needs backward computation.\nI1212 06:17:46.578821 18921 net.cpp:228] L2_b1_zeros does not need backward computation.\nI1212 06:17:46.578832 18921 net.cpp:226] L2_b1_relu needs backward computation.\nI1212 06:17:46.578842 18921 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1212 06:17:46.578855 18921 net.cpp:226] L2_b1_pool needs backward computation.\nI1212 06:17:46.578873 18921 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI1212 06:17:46.578886 18921 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI1212 06:17:46.578896 18921 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI1212 06:17:46.578907 18921 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI1212 06:17:46.578917 18921 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI1212 06:17:46.578927 18921 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI1212 06:17:46.578939 18921 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI1212 06:17:46.578950 18921 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI1212 06:17:46.578963 18921 net.cpp:226] L1_b9_relu needs backward computation.\nI1212 06:17:46.578972 18921 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI1212 06:17:46.578984 18921 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI1212 06:17:46.578995 18921 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI1212 06:17:46.579006 18921 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI1212 06:17:46.579017 18921 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI1212 06:17:46.579028 18921 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI1212 06:17:46.579040 18921 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI1212 06:17:46.579049 18921 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI1212 06:17:46.579061 18921 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI1212 06:17:46.579073 18921 net.cpp:226] L1_b8_relu needs backward computation.\nI1212 06:17:46.579084 18921 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI1212 06:17:46.579095 18921 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI1212 06:17:46.579107 18921 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI1212 06:17:46.579119 18921 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI1212 06:17:46.579131 18921 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI1212 06:17:46.579141 18921 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI1212 06:17:46.579151 18921 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI1212 06:17:46.579164 18921 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI1212 06:17:46.579175 18921 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI1212 06:17:46.579186 18921 net.cpp:226] L1_b7_relu needs backward computation.\nI1212 06:17:46.579196 18921 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI1212 06:17:46.579208 18921 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI1212 06:17:46.579221 18921 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI1212 06:17:46.579231 18921 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI1212 06:17:46.579241 18921 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI1212 06:17:46.579253 18921 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI1212 06:17:46.579263 18921 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI1212 06:17:46.579273 18921 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI1212 06:17:46.579285 18921 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI1212 06:17:46.579298 18921 net.cpp:226] L1_b6_relu needs backward computation.\nI1212 06:17:46.579308 18921 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1212 06:17:46.579319 18921 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI1212 06:17:46.579330 18921 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI1212 06:17:46.579341 18921 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI1212 06:17:46.579352 18921 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI1212 06:17:46.579370 18921 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI1212 06:17:46.579382 18921 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI1212 06:17:46.579393 18921 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI1212 06:17:46.579412 18921 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI1212 06:17:46.579426 18921 net.cpp:226] L1_b5_relu needs backward computation.\nI1212 06:17:46.579437 18921 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1212 06:17:46.579447 18921 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI1212 06:17:46.579458 18921 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI1212 06:17:46.579469 18921 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI1212 06:17:46.579480 18921 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI1212 06:17:46.579491 18921 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI1212 06:17:46.579501 18921 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI1212 06:17:46.579512 18921 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI1212 06:17:46.579524 18921 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI1212 06:17:46.579535 18921 net.cpp:226] L1_b4_relu needs backward computation.\nI1212 06:17:46.579546 18921 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1212 06:17:46.579557 18921 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI1212 06:17:46.579569 18921 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI1212 06:17:46.579581 18921 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI1212 06:17:46.579592 18921 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI1212 06:17:46.579603 18921 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI1212 06:17:46.579614 18921 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI1212 06:17:46.579627 18921 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI1212 06:17:46.579638 18921 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI1212 06:17:46.579648 18921 net.cpp:226] L1_b3_relu needs backward computation.\nI1212 06:17:46.579660 18921 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1212 06:17:46.579677 18921 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI1212 06:17:46.579689 18921 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI1212 06:17:46.579701 18921 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI1212 06:17:46.579712 18921 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI1212 06:17:46.579723 18921 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI1212 06:17:46.579733 18921 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI1212 06:17:46.579746 18921 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI1212 06:17:46.579756 18921 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI1212 06:17:46.579767 18921 net.cpp:226] L1_b2_relu needs backward computation.\nI1212 06:17:46.579779 18921 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1212 06:17:46.579792 18921 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI1212 06:17:46.579802 18921 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI1212 06:17:46.579813 18921 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI1212 06:17:46.579824 18921 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI1212 06:17:46.579834 18921 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI1212 06:17:46.579845 18921 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI1212 06:17:46.579856 18921 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI1212 06:17:46.579869 18921 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI1212 06:17:46.579879 18921 net.cpp:226] L1_b1_relu needs backward computation.\nI1212 06:17:46.579890 18921 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1212 06:17:46.579902 18921 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI1212 06:17:46.579913 18921 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI1212 06:17:46.579924 18921 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI1212 06:17:46.579936 18921 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI1212 06:17:46.579955 18921 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI1212 06:17:46.579967 18921 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI1212 06:17:46.579978 18921 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI1212 06:17:46.579989 18921 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI1212 06:17:46.580001 18921 net.cpp:226] pre_relu needs backward computation.\nI1212 06:17:46.580011 18921 net.cpp:226] pre_scale needs backward computation.\nI1212 06:17:46.580023 18921 net.cpp:226] pre_bn needs backward computation.\nI1212 06:17:46.580032 18921 net.cpp:226] pre_conv needs backward computation.\nI1212 06:17:46.580044 18921 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1212 06:17:46.580057 18921 net.cpp:228] dataLayer does not need backward computation.\nI1212 06:17:46.580067 18921 net.cpp:270] This network produces output accuracy\nI1212 06:17:46.580080 18921 net.cpp:270] This network produces output loss\nI1212 06:17:46.580446 18921 net.cpp:283] Network initialization done.\nI1212 06:17:46.589288 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:46.589339 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:46.589421 18921 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1212 06:17:46.589828 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1212 06:17:46.589854 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI1212 06:17:46.589874 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI1212 06:17:46.589893 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI1212 06:17:46.589915 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI1212 06:17:46.589934 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI1212 06:17:46.589954 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI1212 06:17:46.589973 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI1212 06:17:46.589993 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI1212 06:17:46.590013 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI1212 06:17:46.590034 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI1212 06:17:46.590049 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI1212 06:17:46.590070 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI1212 06:17:46.590088 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI1212 06:17:46.590108 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI1212 06:17:46.590128 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI1212 06:17:46.590147 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI1212 06:17:46.590165 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI1212 06:17:46.590185 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI1212 06:17:46.590204 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI1212 06:17:46.590236 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI1212 06:17:46.590255 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI1212 06:17:46.590281 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI1212 06:17:46.590301 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI1212 06:17:46.590320 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI1212 06:17:46.590337 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI1212 06:17:46.590355 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI1212 06:17:46.590384 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI1212 06:17:46.590404 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI1212 06:17:46.590421 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI1212 06:17:46.590442 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI1212 06:17:46.590459 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI1212 06:17:46.590481 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI1212 06:17:46.590497 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI1212 06:17:46.590515 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI1212 06:17:46.590533 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI1212 06:17:46.590553 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI1212 06:17:46.590571 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI1212 06:17:46.590590 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI1212 06:17:46.590608 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI1212 06:17:46.590633 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI1212 06:17:46.590652 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI1212 06:17:46.590672 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI1212 06:17:46.590688 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI1212 06:17:46.590708 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI1212 06:17:46.590726 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI1212 06:17:46.590747 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI1212 06:17:46.590764 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI1212 06:17:46.590782 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI1212 06:17:46.590801 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI1212 06:17:46.590831 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI1212 06:17:46.590852 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI1212 06:17:46.590872 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI1212 06:17:46.590890 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI1212 06:17:46.590910 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI1212 06:17:46.590926 18921 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI1212 06:17:46.592645 18921 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI1212 06:17:46.594476 18921 layer_factory.hpp:77] Creating layer dataLayer\nI1212 06:17:46.594739 18921 net.cpp:100] Creating Layer dataLayer\nI1212 06:17:46.594769 18921 net.cpp:408] dataLayer -> data_top\nI1212 06:17:46.594795 18921 net.cpp:408] dataLayer -> label\nI1212 06:17:46.594820 18921 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1212 06:17:46.609146 18929 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1212 06:17:46.609453 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:46.617121 18921 net.cpp:150] Setting up dataLayer\nI1212 06:17:46.617148 18921 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI1212 06:17:46.617162 18921 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:46.617172 18921 net.cpp:165] Memory required for data: 1536500\nI1212 06:17:46.617182 18921 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1212 06:17:46.617210 18921 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1212 06:17:46.617223 18921 net.cpp:434] label_dataLayer_1_split <- label\nI1212 06:17:46.617236 18921 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1212 06:17:46.617265 18921 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1212 06:17:46.617431 18921 net.cpp:150] Setting up label_dataLayer_1_split\nI1212 06:17:46.617455 18921 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:46.617467 18921 net.cpp:157] Top shape: 125 (125)\nI1212 06:17:46.617476 18921 net.cpp:165] Memory required for data: 1537500\nI1212 06:17:46.617486 18921 layer_factory.hpp:77] Creating layer pre_conv\nI1212 06:17:46.617514 18921 net.cpp:100] Creating Layer pre_conv\nI1212 06:17:46.617529 18921 net.cpp:434] pre_conv <- data_top\nI1212 06:17:46.617579 18921 net.cpp:408] pre_conv -> pre_conv_top\nI1212 06:17:46.618034 18921 net.cpp:150] Setting up pre_conv\nI1212 06:17:46.618055 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.618078 18921 net.cpp:165] Memory required for data: 9729500\nI1212 06:17:46.618105 18921 layer_factory.hpp:77] Creating layer pre_bn\nI1212 06:17:46.618129 18921 net.cpp:100] Creating Layer pre_bn\nI1212 06:17:46.618141 18921 net.cpp:434] pre_bn <- pre_conv_top\nI1212 06:17:46.618158 18921 net.cpp:408] pre_bn -> pre_bn_top\nI1212 06:17:46.618584 18921 net.cpp:150] Setting up pre_bn\nI1212 06:17:46.618613 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.618628 18921 net.cpp:165] Memory required for data: 17921500\nI1212 06:17:46.618659 18921 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:17:46.618680 18921 net.cpp:100] Creating Layer pre_scale\nI1212 06:17:46.618692 18921 net.cpp:434] pre_scale <- pre_bn_top\nI1212 06:17:46.618706 18921 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI1212 06:17:46.618809 18921 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:17:46.619041 18921 net.cpp:150] Setting up pre_scale\nI1212 06:17:46.619061 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.619071 18921 net.cpp:165] Memory required for data: 26113500\nI1212 06:17:46.619087 18921 layer_factory.hpp:77] Creating layer pre_relu\nI1212 06:17:46.619112 18921 net.cpp:100] Creating Layer pre_relu\nI1212 06:17:46.619124 18921 net.cpp:434] pre_relu <- pre_bn_top\nI1212 06:17:46.619146 18921 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI1212 06:17:46.619165 18921 net.cpp:150] Setting up pre_relu\nI1212 06:17:46.619184 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.619192 18921 net.cpp:165] Memory required for data: 34305500\nI1212 06:17:46.619202 18921 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI1212 06:17:46.619215 18921 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI1212 06:17:46.619226 18921 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI1212 06:17:46.619243 18921 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI1212 06:17:46.619261 18921 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI1212 06:17:46.619357 18921 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI1212 06:17:46.619380 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.619393 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.619406 18921 net.cpp:165] Memory required for data: 50689500\nI1212 06:17:46.619416 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI1212 06:17:46.619459 18921 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI1212 06:17:46.619474 18921 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI1212 06:17:46.619498 18921 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI1212 06:17:46.620008 18921 net.cpp:150] Setting up L1_b1_cbr1_conv\nI1212 06:17:46.620033 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.620043 18921 net.cpp:165] Memory required for data: 58881500\nI1212 06:17:46.620072 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI1212 06:17:46.620103 18921 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI1212 06:17:46.620117 18921 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI1212 06:17:46.620134 18921 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI1212 06:17:46.620486 18921 net.cpp:150] Setting up L1_b1_cbr1_bn\nI1212 06:17:46.620508 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.620517 18921 net.cpp:165] Memory required for data: 67073500\nI1212 06:17:46.620543 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:17:46.620561 18921 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI1212 06:17:46.620571 18921 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI1212 06:17:46.620591 18921 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.620694 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:17:46.620895 18921 net.cpp:150] Setting up L1_b1_cbr1_scale\nI1212 06:17:46.620915 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.620924 18921 net.cpp:165] Memory required for data: 75265500\nI1212 06:17:46.620942 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI1212 06:17:46.620971 18921 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI1212 06:17:46.620982 18921 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI1212 06:17:46.620997 18921 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.621016 18921 net.cpp:150] Setting up L1_b1_cbr1_relu\nI1212 06:17:46.621031 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.621042 18921 net.cpp:165] Memory required for data: 83457500\nI1212 06:17:46.621050 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI1212 06:17:46.621074 18921 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI1212 06:17:46.621088 18921 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI1212 06:17:46.621114 18921 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI1212 06:17:46.621546 18921 net.cpp:150] Setting up L1_b1_cbr2_conv\nI1212 06:17:46.621567 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.621575 18921 net.cpp:165] Memory required for data: 91649500\nI1212 06:17:46.621592 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI1212 06:17:46.621614 18921 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI1212 06:17:46.621626 18921 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI1212 06:17:46.621642 18921 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI1212 06:17:46.621955 18921 net.cpp:150] Setting up L1_b1_cbr2_bn\nI1212 06:17:46.621974 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.621984 18921 net.cpp:165] Memory required for data: 99841500\nI1212 06:17:46.622011 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:17:46.622028 18921 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI1212 06:17:46.622040 18921 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI1212 06:17:46.622059 18921 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI1212 06:17:46.622153 18921 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:17:46.622493 18921 net.cpp:150] Setting up L1_b1_cbr2_scale\nI1212 06:17:46.622517 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.622527 18921 net.cpp:165] Memory required for data: 108033500\nI1212 06:17:46.622545 18921 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1212 06:17:46.622562 18921 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1212 06:17:46.622573 18921 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI1212 06:17:46.622586 18921 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI1212 06:17:46.622602 18921 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1212 06:17:46.622665 18921 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1212 06:17:46.622684 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.622694 18921 net.cpp:165] Memory required for data: 116225500\nI1212 06:17:46.622707 18921 layer_factory.hpp:77] Creating layer L1_b1_relu\nI1212 06:17:46.622721 18921 net.cpp:100] Creating Layer L1_b1_relu\nI1212 06:17:46.622732 18921 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI1212 06:17:46.622745 18921 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI1212 06:17:46.622764 18921 net.cpp:150] Setting up L1_b1_relu\nI1212 06:17:46.622779 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.622787 18921 net.cpp:165] Memory required for data: 124417500\nI1212 06:17:46.622797 18921 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:46.622814 18921 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:46.622824 18921 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI1212 06:17:46.622843 18921 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:17:46.622867 18921 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:17:46.622951 18921 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:17:46.622977 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.623005 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.623016 18921 net.cpp:165] Memory required for data: 140801500\nI1212 06:17:46.623026 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI1212 06:17:46.623046 18921 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI1212 06:17:46.623059 18921 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:17:46.623076 18921 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI1212 06:17:46.623528 18921 net.cpp:150] Setting up L1_b2_cbr1_conv\nI1212 06:17:46.623550 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.623560 18921 net.cpp:165] Memory required for data: 148993500\nI1212 06:17:46.623581 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI1212 06:17:46.623605 18921 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI1212 06:17:46.623615 18921 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI1212 06:17:46.623631 18921 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI1212 06:17:46.623996 18921 net.cpp:150] Setting up L1_b2_cbr1_bn\nI1212 06:17:46.624019 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.624029 18921 net.cpp:165] Memory required for data: 157185500\nI1212 06:17:46.624052 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:17:46.624074 18921 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI1212 06:17:46.624089 18921 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI1212 06:17:46.624104 18921 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.624212 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:17:46.624454 18921 net.cpp:150] Setting up L1_b2_cbr1_scale\nI1212 06:17:46.624478 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.624488 18921 net.cpp:165] Memory required for data: 165377500\nI1212 06:17:46.624506 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI1212 06:17:46.624521 18921 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI1212 06:17:46.624532 18921 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI1212 06:17:46.624553 18921 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.624574 18921 net.cpp:150] Setting up L1_b2_cbr1_relu\nI1212 06:17:46.624595 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.624605 18921 net.cpp:165] Memory required for data: 173569500\nI1212 06:17:46.624616 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI1212 06:17:46.624644 18921 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI1212 06:17:46.624657 18921 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI1212 06:17:46.624680 18921 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI1212 06:17:46.625295 18921 net.cpp:150] Setting up L1_b2_cbr2_conv\nI1212 06:17:46.625319 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.625329 18921 net.cpp:165] Memory required for data: 181761500\nI1212 06:17:46.625346 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI1212 06:17:46.625370 18921 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI1212 06:17:46.625382 18921 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI1212 06:17:46.625411 18921 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI1212 06:17:46.625756 18921 net.cpp:150] Setting up L1_b2_cbr2_bn\nI1212 06:17:46.625779 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.625790 18921 net.cpp:165] Memory required for data: 189953500\nI1212 06:17:46.625819 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:17:46.625845 18921 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI1212 06:17:46.625861 18921 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI1212 06:17:46.625880 18921 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI1212 06:17:46.625993 18921 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:17:46.626219 18921 net.cpp:150] Setting up L1_b2_cbr2_scale\nI1212 06:17:46.626240 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.626250 18921 net.cpp:165] Memory required for data: 198145500\nI1212 06:17:46.626267 18921 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1212 06:17:46.626297 18921 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1212 06:17:46.626310 18921 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI1212 06:17:46.626322 18921 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:17:46.626348 18921 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1212 06:17:46.626435 18921 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1212 06:17:46.626458 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.626469 18921 net.cpp:165] Memory required for data: 206337500\nI1212 06:17:46.626482 18921 layer_factory.hpp:77] Creating layer L1_b2_relu\nI1212 06:17:46.626498 18921 net.cpp:100] Creating Layer L1_b2_relu\nI1212 06:17:46.626509 18921 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI1212 06:17:46.626528 18921 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI1212 06:17:46.626550 18921 net.cpp:150] Setting up L1_b2_relu\nI1212 06:17:46.626569 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.626581 18921 net.cpp:165] Memory required for data: 214529500\nI1212 06:17:46.626592 18921 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:46.626606 18921 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:46.626616 18921 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI1212 06:17:46.626631 18921 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:17:46.626652 18921 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:17:46.626744 18921 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:17:46.626761 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.626772 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.626781 18921 net.cpp:165] Memory required for data: 230913500\nI1212 06:17:46.626794 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI1212 06:17:46.626814 18921 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI1212 06:17:46.626829 18921 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:17:46.626855 18921 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI1212 06:17:46.627287 18921 net.cpp:150] Setting up L1_b3_cbr1_conv\nI1212 06:17:46.627310 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.627321 18921 net.cpp:165] Memory required for data: 239105500\nI1212 06:17:46.627337 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI1212 06:17:46.627354 18921 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI1212 06:17:46.627377 18921 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI1212 06:17:46.627406 18921 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI1212 06:17:46.627790 18921 net.cpp:150] Setting up L1_b3_cbr1_bn\nI1212 06:17:46.627810 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.627820 18921 net.cpp:165] Memory required for data: 247297500\nI1212 06:17:46.627848 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:17:46.627882 18921 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI1212 06:17:46.627897 18921 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI1212 06:17:46.627913 18921 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.628026 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:17:46.628283 18921 net.cpp:150] Setting up L1_b3_cbr1_scale\nI1212 06:17:46.628304 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.628312 18921 net.cpp:165] Memory required for data: 255489500\nI1212 06:17:46.628334 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI1212 06:17:46.628350 18921 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI1212 06:17:46.628367 18921 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI1212 06:17:46.628391 18921 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.628412 18921 net.cpp:150] Setting up L1_b3_cbr1_relu\nI1212 06:17:46.628439 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.628449 18921 net.cpp:165] Memory required for data: 263681500\nI1212 06:17:46.628460 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI1212 06:17:46.628490 18921 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI1212 06:17:46.628502 18921 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI1212 06:17:46.628520 18921 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI1212 06:17:46.628981 18921 net.cpp:150] Setting up L1_b3_cbr2_conv\nI1212 06:17:46.629001 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.629010 18921 net.cpp:165] Memory required for data: 271873500\nI1212 06:17:46.629032 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI1212 06:17:46.629062 18921 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI1212 06:17:46.629076 18921 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI1212 06:17:46.629091 18921 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI1212 06:17:46.629463 18921 net.cpp:150] Setting up L1_b3_cbr2_bn\nI1212 06:17:46.629483 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.629492 18921 net.cpp:165] Memory required for data: 280065500\nI1212 06:17:46.629518 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:17:46.629542 18921 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI1212 06:17:46.629554 18921 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI1212 06:17:46.629568 18921 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI1212 06:17:46.629678 18921 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:17:46.629901 18921 net.cpp:150] Setting up L1_b3_cbr2_scale\nI1212 06:17:46.629923 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.629933 18921 net.cpp:165] Memory required for data: 288257500\nI1212 06:17:46.629952 18921 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1212 06:17:46.629968 18921 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1212 06:17:46.629978 18921 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI1212 06:17:46.629994 18921 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:17:46.630017 18921 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1212 06:17:46.630081 18921 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1212 06:17:46.630110 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.630122 18921 net.cpp:165] Memory required for data: 296449500\nI1212 06:17:46.630132 18921 layer_factory.hpp:77] Creating layer L1_b3_relu\nI1212 06:17:46.630146 18921 net.cpp:100] Creating Layer L1_b3_relu\nI1212 06:17:46.630157 18921 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI1212 06:17:46.630174 18921 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI1212 06:17:46.630193 18921 net.cpp:150] Setting up L1_b3_relu\nI1212 06:17:46.630211 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.630220 18921 net.cpp:165] Memory required for data: 304641500\nI1212 06:17:46.630230 18921 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:46.630252 18921 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:46.630264 18921 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI1212 06:17:46.630280 18921 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:17:46.630298 18921 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:17:46.630400 18921 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:17:46.630420 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.630431 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.630440 18921 net.cpp:165] Memory required for data: 321025500\nI1212 06:17:46.630453 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI1212 06:17:46.630473 18921 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI1212 06:17:46.630487 18921 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:17:46.630522 18921 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI1212 06:17:46.630978 18921 net.cpp:150] Setting up L1_b4_cbr1_conv\nI1212 06:17:46.631000 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.631011 18921 net.cpp:165] Memory required for data: 329217500\nI1212 06:17:46.631027 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI1212 06:17:46.631044 18921 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI1212 06:17:46.631055 18921 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI1212 06:17:46.631074 18921 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI1212 06:17:46.631505 18921 net.cpp:150] Setting up L1_b4_cbr1_bn\nI1212 06:17:46.631552 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.631563 18921 net.cpp:165] Memory required for data: 337409500\nI1212 06:17:46.631587 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:17:46.631613 18921 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI1212 06:17:46.631629 18921 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI1212 06:17:46.631645 18921 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.631752 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:17:46.631976 18921 net.cpp:150] Setting up L1_b4_cbr1_scale\nI1212 06:17:46.631995 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.632004 18921 net.cpp:165] Memory required for data: 345601500\nI1212 06:17:46.632022 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI1212 06:17:46.632038 18921 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI1212 06:17:46.632050 18921 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI1212 06:17:46.632068 18921 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.632087 18921 net.cpp:150] Setting up L1_b4_cbr1_relu\nI1212 06:17:46.632102 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.632110 18921 net.cpp:165] Memory required for data: 353793500\nI1212 06:17:46.632122 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI1212 06:17:46.632148 18921 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI1212 06:17:46.632160 18921 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI1212 06:17:46.632176 18921 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI1212 06:17:46.632597 18921 net.cpp:150] Setting up L1_b4_cbr2_conv\nI1212 06:17:46.632617 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.632627 18921 net.cpp:165] Memory required for data: 361985500\nI1212 06:17:46.632644 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI1212 06:17:46.632665 18921 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI1212 06:17:46.632678 18921 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI1212 06:17:46.632694 18921 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI1212 06:17:46.633013 18921 net.cpp:150] Setting up L1_b4_cbr2_bn\nI1212 06:17:46.633033 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.633042 18921 net.cpp:165] Memory required for data: 370177500\nI1212 06:17:46.633064 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:17:46.633085 18921 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI1212 06:17:46.633096 18921 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI1212 06:17:46.633111 18921 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI1212 06:17:46.633213 18921 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:17:46.633417 18921 net.cpp:150] Setting up L1_b4_cbr2_scale\nI1212 06:17:46.633436 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.633446 18921 net.cpp:165] Memory required for data: 378369500\nI1212 06:17:46.633463 18921 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1212 06:17:46.633481 18921 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1212 06:17:46.633491 18921 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI1212 06:17:46.633504 18921 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:17:46.633532 18921 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1212 06:17:46.633605 18921 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1212 06:17:46.633623 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.633633 18921 net.cpp:165] Memory required for data: 386561500\nI1212 06:17:46.633644 18921 layer_factory.hpp:77] Creating layer L1_b4_relu\nI1212 06:17:46.633658 18921 net.cpp:100] Creating Layer L1_b4_relu\nI1212 06:17:46.633669 18921 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI1212 06:17:46.633689 18921 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI1212 06:17:46.633708 18921 net.cpp:150] Setting up L1_b4_relu\nI1212 06:17:46.633723 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.633733 18921 net.cpp:165] Memory required for data: 394753500\nI1212 06:17:46.633744 18921 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:46.633756 18921 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:46.633767 18921 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI1212 06:17:46.633782 18921 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:17:46.633800 18921 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:17:46.633888 18921 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:17:46.633906 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.633919 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.633929 18921 net.cpp:165] Memory required for data: 411137500\nI1212 06:17:46.633939 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI1212 06:17:46.633958 18921 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI1212 06:17:46.633971 18921 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:17:46.633996 18921 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI1212 06:17:46.634414 18921 net.cpp:150] Setting up L1_b5_cbr1_conv\nI1212 06:17:46.634434 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.634444 18921 net.cpp:165] Memory required for data: 419329500\nI1212 06:17:46.634482 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI1212 06:17:46.634501 18921 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI1212 06:17:46.634513 18921 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI1212 06:17:46.634533 18921 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI1212 06:17:46.634858 18921 net.cpp:150] Setting up L1_b5_cbr1_bn\nI1212 06:17:46.634878 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.634887 18921 net.cpp:165] Memory required for data: 427521500\nI1212 06:17:46.634907 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:17:46.634929 18921 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI1212 06:17:46.634940 18921 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI1212 06:17:46.634955 18921 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.635053 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:17:46.635258 18921 net.cpp:150] Setting up L1_b5_cbr1_scale\nI1212 06:17:46.635278 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.635288 18921 net.cpp:165] Memory required for data: 435713500\nI1212 06:17:46.635305 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI1212 06:17:46.635319 18921 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI1212 06:17:46.635331 18921 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI1212 06:17:46.635350 18921 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.635376 18921 net.cpp:150] Setting up L1_b5_cbr1_relu\nI1212 06:17:46.635391 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.635401 18921 net.cpp:165] Memory required for data: 443905500\nI1212 06:17:46.635411 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI1212 06:17:46.635437 18921 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI1212 06:17:46.635457 18921 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI1212 06:17:46.635476 18921 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI1212 06:17:46.635897 18921 net.cpp:150] Setting up L1_b5_cbr2_conv\nI1212 06:17:46.635917 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.635926 18921 net.cpp:165] Memory required for data: 452097500\nI1212 06:17:46.635942 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI1212 06:17:46.635959 18921 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI1212 06:17:46.635977 18921 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI1212 06:17:46.635993 18921 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI1212 06:17:46.636322 18921 net.cpp:150] Setting up L1_b5_cbr2_bn\nI1212 06:17:46.636342 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.636350 18921 net.cpp:165] Memory required for data: 460289500\nI1212 06:17:46.636379 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:17:46.636395 18921 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI1212 06:17:46.636406 18921 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI1212 06:17:46.636426 18921 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI1212 06:17:46.636526 18921 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:17:46.636741 18921 net.cpp:150] Setting up L1_b5_cbr2_scale\nI1212 06:17:46.636765 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.636775 18921 net.cpp:165] Memory required for data: 468481500\nI1212 06:17:46.636791 18921 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1212 06:17:46.636807 18921 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1212 06:17:46.636818 18921 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI1212 06:17:46.636831 18921 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:17:46.636844 18921 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1212 06:17:46.636909 18921 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1212 06:17:46.636926 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.636935 18921 net.cpp:165] Memory required for data: 476673500\nI1212 06:17:46.636945 18921 layer_factory.hpp:77] Creating layer L1_b5_relu\nI1212 06:17:46.636963 18921 net.cpp:100] Creating Layer L1_b5_relu\nI1212 06:17:46.636976 18921 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI1212 06:17:46.636988 18921 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI1212 06:17:46.637006 18921 net.cpp:150] Setting up L1_b5_relu\nI1212 06:17:46.637022 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.637029 18921 net.cpp:165] Memory required for data: 484865500\nI1212 06:17:46.637038 18921 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:46.637059 18921 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:46.637071 18921 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI1212 06:17:46.637086 18921 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:17:46.637104 18921 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:17:46.637188 18921 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:17:46.637212 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.637225 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.637235 18921 net.cpp:165] Memory required for data: 501249500\nI1212 06:17:46.637245 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI1212 06:17:46.637264 18921 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI1212 06:17:46.637276 18921 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:17:46.637295 18921 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI1212 06:17:46.637706 18921 net.cpp:150] Setting up L1_b6_cbr1_conv\nI1212 06:17:46.637727 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.637735 18921 net.cpp:165] Memory required for data: 509441500\nI1212 06:17:46.637763 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI1212 06:17:46.637784 18921 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI1212 06:17:46.637795 18921 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI1212 06:17:46.637811 18921 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI1212 06:17:46.638149 18921 net.cpp:150] Setting up L1_b6_cbr1_bn\nI1212 06:17:46.638169 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.638177 18921 net.cpp:165] Memory required for data: 517633500\nI1212 06:17:46.638198 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:17:46.638214 18921 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI1212 06:17:46.638226 18921 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI1212 06:17:46.638247 18921 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.638347 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:17:46.638553 18921 net.cpp:150] Setting up L1_b6_cbr1_scale\nI1212 06:17:46.638576 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.638584 18921 net.cpp:165] Memory required for data: 525825500\nI1212 06:17:46.638602 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI1212 06:17:46.638617 18921 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI1212 06:17:46.638628 18921 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI1212 06:17:46.638640 18921 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.638659 18921 net.cpp:150] Setting up L1_b6_cbr1_relu\nI1212 06:17:46.638672 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.638681 18921 net.cpp:165] Memory required for data: 534017500\nI1212 06:17:46.638690 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI1212 06:17:46.638715 18921 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI1212 06:17:46.638728 18921 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI1212 06:17:46.638749 18921 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI1212 06:17:46.639163 18921 net.cpp:150] Setting up L1_b6_cbr2_conv\nI1212 06:17:46.639181 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.639190 18921 net.cpp:165] Memory required for data: 542209500\nI1212 06:17:46.639209 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI1212 06:17:46.639228 18921 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI1212 06:17:46.639241 18921 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI1212 06:17:46.639263 18921 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI1212 06:17:46.639595 18921 net.cpp:150] Setting up L1_b6_cbr2_bn\nI1212 06:17:46.639616 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.639624 18921 net.cpp:165] Memory required for data: 550401500\nI1212 06:17:46.639647 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:17:46.639662 18921 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI1212 06:17:46.639673 18921 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI1212 06:17:46.639686 18921 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI1212 06:17:46.639789 18921 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:17:46.639992 18921 net.cpp:150] Setting up L1_b6_cbr2_scale\nI1212 06:17:46.640012 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.640020 18921 net.cpp:165] Memory required for data: 558593500\nI1212 06:17:46.640038 18921 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1212 06:17:46.640074 18921 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1212 06:17:46.640086 18921 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI1212 06:17:46.640100 18921 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:17:46.640116 18921 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1212 06:17:46.640179 18921 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1212 06:17:46.640199 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.640210 18921 net.cpp:165] Memory required for data: 566785500\nI1212 06:17:46.640221 18921 layer_factory.hpp:77] Creating layer L1_b6_relu\nI1212 06:17:46.640251 18921 net.cpp:100] Creating Layer L1_b6_relu\nI1212 06:17:46.640262 18921 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI1212 06:17:46.640281 18921 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI1212 06:17:46.640301 18921 net.cpp:150] Setting up L1_b6_relu\nI1212 06:17:46.640316 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.640326 18921 net.cpp:165] Memory required for data: 574977500\nI1212 06:17:46.640336 18921 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:46.640348 18921 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:46.640358 18921 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI1212 06:17:46.640381 18921 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:17:46.640400 18921 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:17:46.640489 18921 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:17:46.640511 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.640524 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.640533 18921 net.cpp:165] Memory required for data: 591361500\nI1212 06:17:46.640543 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI1212 06:17:46.640563 18921 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI1212 06:17:46.640574 18921 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:17:46.640596 18921 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI1212 06:17:46.641007 18921 net.cpp:150] Setting up L1_b7_cbr1_conv\nI1212 06:17:46.641026 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.641036 18921 net.cpp:165] Memory required for data: 599553500\nI1212 06:17:46.641052 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI1212 06:17:46.641069 18921 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI1212 06:17:46.641080 18921 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI1212 06:17:46.641101 18921 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI1212 06:17:46.641443 18921 net.cpp:150] Setting up L1_b7_cbr1_bn\nI1212 06:17:46.641463 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.641472 18921 net.cpp:165] Memory required for data: 607745500\nI1212 06:17:46.641492 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:17:46.641513 18921 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI1212 06:17:46.641525 18921 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI1212 06:17:46.641540 18921 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.641638 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:17:46.641842 18921 net.cpp:150] Setting up L1_b7_cbr1_scale\nI1212 06:17:46.641861 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.641870 18921 net.cpp:165] Memory required for data: 615937500\nI1212 06:17:46.641888 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI1212 06:17:46.641903 18921 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI1212 06:17:46.641914 18921 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI1212 06:17:46.641933 18921 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.641953 18921 net.cpp:150] Setting up L1_b7_cbr1_relu\nI1212 06:17:46.641968 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.641978 18921 net.cpp:165] Memory required for data: 624129500\nI1212 06:17:46.641988 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI1212 06:17:46.642012 18921 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI1212 06:17:46.642025 18921 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI1212 06:17:46.642046 18921 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI1212 06:17:46.642748 18921 net.cpp:150] Setting up L1_b7_cbr2_conv\nI1212 06:17:46.642769 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.642778 18921 net.cpp:165] Memory required for data: 632321500\nI1212 06:17:46.642805 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI1212 06:17:46.642823 18921 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI1212 06:17:46.642835 18921 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI1212 06:17:46.642851 18921 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI1212 06:17:46.643187 18921 net.cpp:150] Setting up L1_b7_cbr2_bn\nI1212 06:17:46.643206 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.643216 18921 net.cpp:165] Memory required for data: 640513500\nI1212 06:17:46.643236 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:17:46.643260 18921 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI1212 06:17:46.643271 18921 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI1212 06:17:46.643287 18921 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI1212 06:17:46.643394 18921 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:17:46.643597 18921 net.cpp:150] Setting up L1_b7_cbr2_scale\nI1212 06:17:46.643616 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.643625 18921 net.cpp:165] Memory required for data: 648705500\nI1212 06:17:46.643643 18921 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI1212 06:17:46.643661 18921 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI1212 06:17:46.643672 18921 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI1212 06:17:46.643685 18921 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:17:46.643705 18921 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI1212 06:17:46.643767 18921 net.cpp:150] Setting up L1_b7_sum_eltwise\nI1212 06:17:46.643785 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.643795 18921 net.cpp:165] Memory required for data: 656897500\nI1212 06:17:46.643805 18921 layer_factory.hpp:77] Creating layer L1_b7_relu\nI1212 06:17:46.643820 18921 net.cpp:100] Creating Layer L1_b7_relu\nI1212 06:17:46.643831 18921 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI1212 06:17:46.643844 18921 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI1212 06:17:46.643862 18921 net.cpp:150] Setting up L1_b7_relu\nI1212 06:17:46.643877 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.643887 18921 net.cpp:165] Memory required for data: 665089500\nI1212 06:17:46.643896 18921 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:46.643914 18921 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:46.643926 18921 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI1212 06:17:46.643941 18921 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:17:46.643962 18921 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:17:46.644052 18921 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:17:46.644073 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.644086 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.644096 18921 net.cpp:165] Memory required for data: 681473500\nI1212 06:17:46.644106 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI1212 06:17:46.644126 18921 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI1212 06:17:46.644138 18921 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:17:46.644162 18921 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI1212 06:17:46.644585 18921 net.cpp:150] Setting up L1_b8_cbr1_conv\nI1212 06:17:46.644605 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.644614 18921 net.cpp:165] Memory required for data: 689665500\nI1212 06:17:46.644632 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI1212 06:17:46.644649 18921 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI1212 06:17:46.644660 18921 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI1212 06:17:46.644676 18921 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI1212 06:17:46.645022 18921 net.cpp:150] Setting up L1_b8_cbr1_bn\nI1212 06:17:46.645041 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.645051 18921 net.cpp:165] Memory required for data: 697857500\nI1212 06:17:46.645071 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:17:46.645090 18921 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI1212 06:17:46.645103 18921 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI1212 06:17:46.645118 18921 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.645226 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:17:46.645452 18921 net.cpp:150] Setting up L1_b8_cbr1_scale\nI1212 06:17:46.645473 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.645481 18921 net.cpp:165] Memory required for data: 706049500\nI1212 06:17:46.645499 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI1212 06:17:46.645514 18921 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI1212 06:17:46.645525 18921 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI1212 06:17:46.645545 18921 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.645565 18921 net.cpp:150] Setting up L1_b8_cbr1_relu\nI1212 06:17:46.645579 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.645587 18921 net.cpp:165] Memory required for data: 714241500\nI1212 06:17:46.645596 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI1212 06:17:46.645622 18921 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI1212 06:17:46.645634 18921 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI1212 06:17:46.645650 18921 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI1212 06:17:46.646064 18921 net.cpp:150] Setting up L1_b8_cbr2_conv\nI1212 06:17:46.646082 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.646091 18921 net.cpp:165] Memory required for data: 722433500\nI1212 06:17:46.646108 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI1212 06:17:46.646131 18921 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI1212 06:17:46.646143 18921 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI1212 06:17:46.646159 18921 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI1212 06:17:46.646502 18921 net.cpp:150] Setting up L1_b8_cbr2_bn\nI1212 06:17:46.646522 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.646530 18921 net.cpp:165] Memory required for data: 730625500\nI1212 06:17:46.646553 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:17:46.646569 18921 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI1212 06:17:46.646579 18921 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI1212 06:17:46.646598 18921 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI1212 06:17:46.646699 18921 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:17:46.646905 18921 net.cpp:150] Setting up L1_b8_cbr2_scale\nI1212 06:17:46.646925 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.646934 18921 net.cpp:165] Memory required for data: 738817500\nI1212 06:17:46.646950 18921 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI1212 06:17:46.646966 18921 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI1212 06:17:46.646977 18921 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI1212 06:17:46.646989 18921 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:17:46.647009 18921 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI1212 06:17:46.647068 18921 net.cpp:150] Setting up L1_b8_sum_eltwise\nI1212 06:17:46.647092 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.647102 18921 net.cpp:165] Memory required for data: 747009500\nI1212 06:17:46.647112 18921 layer_factory.hpp:77] Creating layer L1_b8_relu\nI1212 06:17:46.647126 18921 net.cpp:100] Creating Layer L1_b8_relu\nI1212 06:17:46.647137 18921 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI1212 06:17:46.647151 18921 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI1212 06:17:46.647169 18921 net.cpp:150] Setting up L1_b8_relu\nI1212 06:17:46.647192 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.647202 18921 net.cpp:165] Memory required for data: 755201500\nI1212 06:17:46.647212 18921 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:46.647229 18921 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:46.647240 18921 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI1212 06:17:46.647258 18921 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:17:46.647276 18921 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:17:46.647375 18921 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:17:46.647397 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.647410 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.647420 18921 net.cpp:165] Memory required for data: 771585500\nI1212 06:17:46.647430 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI1212 06:17:46.647449 18921 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI1212 06:17:46.647461 18921 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:17:46.647483 18921 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI1212 06:17:46.648214 18921 net.cpp:150] Setting up L1_b9_cbr1_conv\nI1212 06:17:46.648236 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.648244 18921 net.cpp:165] Memory required for data: 779777500\nI1212 06:17:46.648262 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI1212 06:17:46.648288 18921 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI1212 06:17:46.648300 18921 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI1212 06:17:46.648317 18921 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI1212 06:17:46.648646 18921 net.cpp:150] Setting up L1_b9_cbr1_bn\nI1212 06:17:46.648666 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.648675 18921 net.cpp:165] Memory required for data: 787969500\nI1212 06:17:46.648697 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:17:46.648715 18921 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI1212 06:17:46.648725 18921 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI1212 06:17:46.648741 18921 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.648839 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:17:46.649047 18921 net.cpp:150] Setting up L1_b9_cbr1_scale\nI1212 06:17:46.649066 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.649075 18921 net.cpp:165] Memory required for data: 796161500\nI1212 06:17:46.649093 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI1212 06:17:46.649109 18921 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI1212 06:17:46.649121 18921 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI1212 06:17:46.649140 18921 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.649160 18921 net.cpp:150] Setting up L1_b9_cbr1_relu\nI1212 06:17:46.649175 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.649184 18921 net.cpp:165] Memory required for data: 804353500\nI1212 06:17:46.649194 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI1212 06:17:46.649214 18921 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI1212 06:17:46.649226 18921 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI1212 06:17:46.649248 18921 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI1212 06:17:46.649652 18921 net.cpp:150] Setting up L1_b9_cbr2_conv\nI1212 06:17:46.649672 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.649682 18921 net.cpp:165] Memory required for data: 812545500\nI1212 06:17:46.649698 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI1212 06:17:46.649715 18921 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI1212 06:17:46.649727 18921 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI1212 06:17:46.649745 18921 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI1212 06:17:46.650097 18921 net.cpp:150] Setting up L1_b9_cbr2_bn\nI1212 06:17:46.650125 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.650135 18921 net.cpp:165] Memory required for data: 820737500\nI1212 06:17:46.650185 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:17:46.650208 18921 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI1212 06:17:46.650221 18921 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI1212 06:17:46.650236 18921 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI1212 06:17:46.650336 18921 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:17:46.650544 18921 net.cpp:150] Setting up L1_b9_cbr2_scale\nI1212 06:17:46.650563 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.650573 18921 net.cpp:165] Memory required for data: 828929500\nI1212 06:17:46.650590 18921 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI1212 06:17:46.650606 18921 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI1212 06:17:46.650619 18921 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI1212 06:17:46.650631 18921 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:17:46.650651 18921 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI1212 06:17:46.650712 18921 net.cpp:150] Setting up L1_b9_sum_eltwise\nI1212 06:17:46.650732 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.650741 18921 net.cpp:165] Memory required for data: 837121500\nI1212 06:17:46.650751 18921 layer_factory.hpp:77] Creating layer L1_b9_relu\nI1212 06:17:46.650765 18921 net.cpp:100] Creating Layer L1_b9_relu\nI1212 06:17:46.650776 18921 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI1212 06:17:46.650794 18921 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI1212 06:17:46.650815 18921 net.cpp:150] Setting up L1_b9_relu\nI1212 06:17:46.650828 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.650837 18921 net.cpp:165] Memory required for data: 845313500\nI1212 06:17:46.650847 18921 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:46.650862 18921 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:46.650873 18921 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI1212 06:17:46.650892 18921 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:17:46.650913 18921 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:17:46.651006 18921 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:17:46.651026 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.651038 18921 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:17:46.651047 18921 net.cpp:165] Memory required for data: 861697500\nI1212 06:17:46.651057 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI1212 06:17:46.651077 18921 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI1212 06:17:46.651089 18921 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:17:46.651113 18921 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI1212 06:17:46.651523 18921 net.cpp:150] Setting up L2_b1_cbr1_conv\nI1212 06:17:46.651543 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.651552 18921 net.cpp:165] Memory required for data: 863745500\nI1212 06:17:46.651571 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI1212 06:17:46.651587 18921 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI1212 06:17:46.651599 18921 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI1212 06:17:46.651620 18921 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI1212 06:17:46.651937 18921 net.cpp:150] Setting up L2_b1_cbr1_bn\nI1212 06:17:46.651955 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.651964 18921 net.cpp:165] Memory required for data: 865793500\nI1212 06:17:46.651986 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:17:46.652024 18921 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI1212 06:17:46.652038 18921 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI1212 06:17:46.652055 18921 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.652156 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:17:46.652359 18921 net.cpp:150] Setting up L2_b1_cbr1_scale\nI1212 06:17:46.652382 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.652392 18921 net.cpp:165] Memory required for data: 867841500\nI1212 06:17:46.652410 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI1212 06:17:46.652431 18921 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI1212 06:17:46.652441 18921 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI1212 06:17:46.652456 18921 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.652475 18921 net.cpp:150] Setting up L2_b1_cbr1_relu\nI1212 06:17:46.652489 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.652499 18921 net.cpp:165] Memory required for data: 869889500\nI1212 06:17:46.652509 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI1212 06:17:46.652534 18921 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI1212 06:17:46.652547 18921 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI1212 06:17:46.652570 18921 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI1212 06:17:46.652979 18921 net.cpp:150] Setting up L2_b1_cbr2_conv\nI1212 06:17:46.652998 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.653008 18921 net.cpp:165] Memory required for data: 871937500\nI1212 06:17:46.653025 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI1212 06:17:46.653043 18921 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI1212 06:17:46.653054 18921 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI1212 06:17:46.653079 18921 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI1212 06:17:46.653399 18921 net.cpp:150] Setting up L2_b1_cbr2_bn\nI1212 06:17:46.653419 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.653429 18921 net.cpp:165] Memory required for data: 873985500\nI1212 06:17:46.653450 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:17:46.653470 18921 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI1212 06:17:46.653482 18921 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI1212 06:17:46.653497 18921 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI1212 06:17:46.653594 18921 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:17:46.653795 18921 net.cpp:150] Setting up L2_b1_cbr2_scale\nI1212 06:17:46.653812 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.653822 18921 net.cpp:165] Memory required for data: 876033500\nI1212 06:17:46.653841 18921 layer_factory.hpp:77] Creating layer L2_b1_pool\nI1212 06:17:46.653862 18921 net.cpp:100] Creating Layer L2_b1_pool\nI1212 06:17:46.653874 18921 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:17:46.653892 18921 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI1212 06:17:46.653947 18921 net.cpp:150] Setting up L2_b1_pool\nI1212 06:17:46.653970 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.653980 18921 net.cpp:165] Memory required for data: 878081500\nI1212 06:17:46.653990 18921 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1212 06:17:46.654006 18921 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1212 06:17:46.654016 18921 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI1212 06:17:46.654028 18921 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI1212 06:17:46.654049 18921 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1212 06:17:46.654109 18921 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1212 06:17:46.654129 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.654139 18921 net.cpp:165] Memory required for data: 880129500\nI1212 06:17:46.654148 18921 layer_factory.hpp:77] Creating layer L2_b1_relu\nI1212 06:17:46.654161 18921 net.cpp:100] Creating Layer L2_b1_relu\nI1212 06:17:46.654172 18921 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI1212 06:17:46.654206 18921 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI1212 06:17:46.654225 18921 net.cpp:150] Setting up L2_b1_relu\nI1212 06:17:46.654240 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.654250 18921 net.cpp:165] Memory required for data: 882177500\nI1212 06:17:46.654260 18921 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI1212 06:17:46.654284 18921 net.cpp:100] Creating Layer L2_b1_zeros\nI1212 06:17:46.654300 18921 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI1212 06:17:46.656738 18921 net.cpp:150] Setting up L2_b1_zeros\nI1212 06:17:46.656760 18921 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:17:46.656770 18921 net.cpp:165] Memory required for data: 884225500\nI1212 06:17:46.656780 18921 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI1212 06:17:46.656797 18921 net.cpp:100] Creating Layer L2_b1_concat0\nI1212 06:17:46.656810 18921 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI1212 06:17:46.656822 18921 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI1212 06:17:46.656844 18921 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI1212 06:17:46.656910 18921 net.cpp:150] Setting up L2_b1_concat0\nI1212 06:17:46.656937 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.656947 18921 net.cpp:165] Memory required for data: 888321500\nI1212 06:17:46.656958 18921 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:46.656973 18921 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:46.656983 18921 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI1212 06:17:46.656999 18921 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:17:46.657018 18921 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:17:46.657114 18921 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:17:46.657135 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.657150 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.657160 18921 net.cpp:165] Memory required for data: 896513500\nI1212 06:17:46.657169 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI1212 06:17:46.657196 18921 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI1212 06:17:46.657208 18921 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:17:46.657227 18921 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI1212 06:17:46.657789 18921 net.cpp:150] Setting up L2_b2_cbr1_conv\nI1212 06:17:46.657810 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.657819 18921 net.cpp:165] Memory required for data: 900609500\nI1212 06:17:46.657837 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI1212 06:17:46.657855 18921 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI1212 06:17:46.657871 18921 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI1212 06:17:46.657888 18921 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI1212 06:17:46.658203 18921 net.cpp:150] Setting up L2_b2_cbr1_bn\nI1212 06:17:46.658222 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.658232 18921 net.cpp:165] Memory required for data: 904705500\nI1212 06:17:46.658254 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:17:46.658272 18921 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI1212 06:17:46.658284 18921 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI1212 06:17:46.658299 18921 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.658404 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:17:46.658603 18921 net.cpp:150] Setting up L2_b2_cbr1_scale\nI1212 06:17:46.658627 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.658638 18921 net.cpp:165] Memory required for data: 908801500\nI1212 06:17:46.658655 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI1212 06:17:46.658670 18921 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI1212 06:17:46.658682 18921 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI1212 06:17:46.658706 18921 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.658727 18921 net.cpp:150] Setting up L2_b2_cbr1_relu\nI1212 06:17:46.658742 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.658752 18921 net.cpp:165] Memory required for data: 912897500\nI1212 06:17:46.658762 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI1212 06:17:46.658788 18921 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI1212 06:17:46.658802 18921 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI1212 06:17:46.658824 18921 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI1212 06:17:46.659379 18921 net.cpp:150] Setting up L2_b2_cbr2_conv\nI1212 06:17:46.659399 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.659409 18921 net.cpp:165] Memory required for data: 916993500\nI1212 06:17:46.659426 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI1212 06:17:46.659447 18921 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI1212 06:17:46.659461 18921 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI1212 06:17:46.659482 18921 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI1212 06:17:46.659790 18921 net.cpp:150] Setting up L2_b2_cbr2_bn\nI1212 06:17:46.659808 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.659817 18921 net.cpp:165] Memory required for data: 921089500\nI1212 06:17:46.659839 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:17:46.659855 18921 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI1212 06:17:46.659868 18921 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI1212 06:17:46.659883 18921 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI1212 06:17:46.659981 18921 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:17:46.660178 18921 net.cpp:150] Setting up L2_b2_cbr2_scale\nI1212 06:17:46.660197 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.660207 18921 net.cpp:165] Memory required for data: 925185500\nI1212 06:17:46.660225 18921 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1212 06:17:46.660248 18921 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1212 06:17:46.660260 18921 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI1212 06:17:46.660274 18921 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:17:46.660289 18921 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1212 06:17:46.660337 18921 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1212 06:17:46.660358 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.660375 18921 net.cpp:165] Memory required for data: 929281500\nI1212 06:17:46.660387 18921 layer_factory.hpp:77] Creating layer L2_b2_relu\nI1212 06:17:46.660405 18921 net.cpp:100] Creating Layer L2_b2_relu\nI1212 06:17:46.660418 18921 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI1212 06:17:46.660432 18921 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI1212 06:17:46.660451 18921 net.cpp:150] Setting up L2_b2_relu\nI1212 06:17:46.660465 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.660475 18921 net.cpp:165] Memory required for data: 933377500\nI1212 06:17:46.660485 18921 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:46.660498 18921 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:46.660508 18921 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI1212 06:17:46.660524 18921 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:17:46.660542 18921 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:17:46.660634 18921 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:17:46.660651 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.660665 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.660673 18921 net.cpp:165] Memory required for data: 941569500\nI1212 06:17:46.660683 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI1212 06:17:46.660717 18921 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI1212 06:17:46.660732 18921 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:17:46.660751 18921 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI1212 06:17:46.661303 18921 net.cpp:150] Setting up L2_b3_cbr1_conv\nI1212 06:17:46.661322 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.661332 18921 net.cpp:165] Memory required for data: 945665500\nI1212 06:17:46.661350 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI1212 06:17:46.661377 18921 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI1212 06:17:46.661391 18921 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI1212 06:17:46.661412 18921 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI1212 06:17:46.661722 18921 net.cpp:150] Setting up L2_b3_cbr1_bn\nI1212 06:17:46.661741 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.661751 18921 net.cpp:165] Memory required for data: 949761500\nI1212 06:17:46.661772 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:17:46.661788 18921 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI1212 06:17:46.661800 18921 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI1212 06:17:46.661814 18921 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.661913 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:17:46.662111 18921 net.cpp:150] Setting up L2_b3_cbr1_scale\nI1212 06:17:46.662129 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.662139 18921 net.cpp:165] Memory required for data: 953857500\nI1212 06:17:46.662158 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI1212 06:17:46.662178 18921 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI1212 06:17:46.662189 18921 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI1212 06:17:46.662204 18921 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.662221 18921 net.cpp:150] Setting up L2_b3_cbr1_relu\nI1212 06:17:46.662235 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.662245 18921 net.cpp:165] Memory required for data: 957953500\nI1212 06:17:46.662256 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI1212 06:17:46.662281 18921 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI1212 06:17:46.662294 18921 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI1212 06:17:46.662317 18921 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI1212 06:17:46.662874 18921 net.cpp:150] Setting up L2_b3_cbr2_conv\nI1212 06:17:46.662894 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.662902 18921 net.cpp:165] Memory required for data: 962049500\nI1212 06:17:46.662920 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI1212 06:17:46.662942 18921 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI1212 06:17:46.662955 18921 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI1212 06:17:46.662976 18921 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI1212 06:17:46.663288 18921 net.cpp:150] Setting up L2_b3_cbr2_bn\nI1212 06:17:46.663307 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.663317 18921 net.cpp:165] Memory required for data: 966145500\nI1212 06:17:46.663338 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:17:46.663355 18921 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI1212 06:17:46.663378 18921 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI1212 06:17:46.663394 18921 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI1212 06:17:46.663491 18921 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:17:46.663694 18921 net.cpp:150] Setting up L2_b3_cbr2_scale\nI1212 06:17:46.663714 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.663723 18921 net.cpp:165] Memory required for data: 970241500\nI1212 06:17:46.663740 18921 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1212 06:17:46.663761 18921 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1212 06:17:46.663774 18921 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI1212 06:17:46.663797 18921 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:17:46.663815 18921 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1212 06:17:46.663863 18921 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1212 06:17:46.663882 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.663892 18921 net.cpp:165] Memory required for data: 974337500\nI1212 06:17:46.663902 18921 layer_factory.hpp:77] Creating layer L2_b3_relu\nI1212 06:17:46.663934 18921 net.cpp:100] Creating Layer L2_b3_relu\nI1212 06:17:46.663949 18921 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI1212 06:17:46.663964 18921 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI1212 06:17:46.663983 18921 net.cpp:150] Setting up L2_b3_relu\nI1212 06:17:46.663998 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.664007 18921 net.cpp:165] Memory required for data: 978433500\nI1212 06:17:46.664018 18921 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:46.664032 18921 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:46.664043 18921 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI1212 06:17:46.664058 18921 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:17:46.664078 18921 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:17:46.664170 18921 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:17:46.664191 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.664206 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.664214 18921 net.cpp:165] Memory required for data: 986625500\nI1212 06:17:46.664224 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI1212 06:17:46.664245 18921 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI1212 06:17:46.664258 18921 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:17:46.664281 18921 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI1212 06:17:46.664844 18921 net.cpp:150] Setting up L2_b4_cbr1_conv\nI1212 06:17:46.664865 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.664873 18921 net.cpp:165] Memory required for data: 990721500\nI1212 06:17:46.664891 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI1212 06:17:46.664912 18921 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI1212 06:17:46.664924 18921 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI1212 06:17:46.664940 18921 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI1212 06:17:46.665257 18921 net.cpp:150] Setting up L2_b4_cbr1_bn\nI1212 06:17:46.665277 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.665287 18921 net.cpp:165] Memory required for data: 994817500\nI1212 06:17:46.665307 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:17:46.665329 18921 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI1212 06:17:46.665341 18921 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI1212 06:17:46.665356 18921 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.665459 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:17:46.665659 18921 net.cpp:150] Setting up L2_b4_cbr1_scale\nI1212 06:17:46.665678 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.665688 18921 net.cpp:165] Memory required for data: 998913500\nI1212 06:17:46.665705 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI1212 06:17:46.665729 18921 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI1212 06:17:46.665741 18921 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI1212 06:17:46.665760 18921 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.665781 18921 net.cpp:150] Setting up L2_b4_cbr1_relu\nI1212 06:17:46.665794 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.665804 18921 net.cpp:165] Memory required for data: 1003009500\nI1212 06:17:46.665814 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI1212 06:17:46.665848 18921 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI1212 06:17:46.665861 18921 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI1212 06:17:46.665884 18921 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI1212 06:17:46.666432 18921 net.cpp:150] Setting up L2_b4_cbr2_conv\nI1212 06:17:46.666452 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.666461 18921 net.cpp:165] Memory required for data: 1007105500\nI1212 06:17:46.666478 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI1212 06:17:46.666496 18921 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI1212 06:17:46.666507 18921 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI1212 06:17:46.666532 18921 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI1212 06:17:46.666846 18921 net.cpp:150] Setting up L2_b4_cbr2_bn\nI1212 06:17:46.666864 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.666873 18921 net.cpp:165] Memory required for data: 1011201500\nI1212 06:17:46.666894 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:17:46.666919 18921 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI1212 06:17:46.666931 18921 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI1212 06:17:46.666949 18921 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI1212 06:17:46.667040 18921 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:17:46.667431 18921 net.cpp:150] Setting up L2_b4_cbr2_scale\nI1212 06:17:46.667455 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.667466 18921 net.cpp:165] Memory required for data: 1015297500\nI1212 06:17:46.667490 18921 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1212 06:17:46.667510 18921 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1212 06:17:46.667520 18921 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI1212 06:17:46.667532 18921 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:17:46.667547 18921 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1212 06:17:46.667594 18921 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1212 06:17:46.667608 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.667613 18921 net.cpp:165] Memory required for data: 1019393500\nI1212 06:17:46.667619 18921 layer_factory.hpp:77] Creating layer L2_b4_relu\nI1212 06:17:46.667628 18921 net.cpp:100] Creating Layer L2_b4_relu\nI1212 06:17:46.667632 18921 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI1212 06:17:46.667644 18921 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI1212 06:17:46.667655 18921 net.cpp:150] Setting up L2_b4_relu\nI1212 06:17:46.667662 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.667667 18921 net.cpp:165] Memory required for data: 1023489500\nI1212 06:17:46.667672 18921 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:46.667680 18921 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:46.667685 18921 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI1212 06:17:46.667692 18921 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:17:46.667702 18921 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:17:46.667757 18921 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:17:46.667768 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.667774 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.667778 18921 net.cpp:165] Memory required for data: 1031681500\nI1212 06:17:46.667783 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI1212 06:17:46.667795 18921 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI1212 06:17:46.667803 18921 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:17:46.667814 18921 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI1212 06:17:46.668318 18921 net.cpp:150] Setting up L2_b5_cbr1_conv\nI1212 06:17:46.668339 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.668344 18921 net.cpp:165] Memory required for data: 1035777500\nI1212 06:17:46.668352 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI1212 06:17:46.668370 18921 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI1212 06:17:46.668377 18921 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI1212 06:17:46.668390 18921 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI1212 06:17:46.668673 18921 net.cpp:150] Setting up L2_b5_cbr1_bn\nI1212 06:17:46.668691 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.668700 18921 net.cpp:165] Memory required for data: 1039873500\nI1212 06:17:46.668720 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:17:46.668740 18921 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI1212 06:17:46.668753 18921 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI1212 06:17:46.668767 18921 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.668866 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:17:46.669072 18921 net.cpp:150] Setting up L2_b5_cbr1_scale\nI1212 06:17:46.669091 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.669101 18921 net.cpp:165] Memory required for data: 1043969500\nI1212 06:17:46.669119 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI1212 06:17:46.669140 18921 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI1212 06:17:46.669152 18921 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI1212 06:17:46.669167 18921 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.669185 18921 net.cpp:150] Setting up L2_b5_cbr1_relu\nI1212 06:17:46.669205 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.669215 18921 net.cpp:165] Memory required for data: 1048065500\nI1212 06:17:46.669226 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI1212 06:17:46.669247 18921 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI1212 06:17:46.669260 18921 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI1212 06:17:46.669282 18921 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI1212 06:17:46.669839 18921 net.cpp:150] Setting up L2_b5_cbr2_conv\nI1212 06:17:46.669858 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.669868 18921 net.cpp:165] Memory required for data: 1052161500\nI1212 06:17:46.669885 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI1212 06:17:46.669903 18921 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI1212 06:17:46.669914 18921 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI1212 06:17:46.669935 18921 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI1212 06:17:46.670255 18921 net.cpp:150] Setting up L2_b5_cbr2_bn\nI1212 06:17:46.670274 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.670284 18921 net.cpp:165] Memory required for data: 1056257500\nI1212 06:17:46.670305 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:17:46.670326 18921 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI1212 06:17:46.670338 18921 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI1212 06:17:46.670354 18921 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI1212 06:17:46.670456 18921 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:17:46.670660 18921 net.cpp:150] Setting up L2_b5_cbr2_scale\nI1212 06:17:46.670680 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.670689 18921 net.cpp:165] Memory required for data: 1060353500\nI1212 06:17:46.670707 18921 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1212 06:17:46.670728 18921 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1212 06:17:46.670740 18921 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI1212 06:17:46.670754 18921 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:17:46.670770 18921 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1212 06:17:46.670825 18921 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1212 06:17:46.670843 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.670862 18921 net.cpp:165] Memory required for data: 1064449500\nI1212 06:17:46.670874 18921 layer_factory.hpp:77] Creating layer L2_b5_relu\nI1212 06:17:46.670888 18921 net.cpp:100] Creating Layer L2_b5_relu\nI1212 06:17:46.670899 18921 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI1212 06:17:46.670918 18921 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI1212 06:17:46.670938 18921 net.cpp:150] Setting up L2_b5_relu\nI1212 06:17:46.670951 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.670961 18921 net.cpp:165] Memory required for data: 1068545500\nI1212 06:17:46.670971 18921 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:46.670985 18921 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:46.670996 18921 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI1212 06:17:46.671010 18921 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:17:46.671030 18921 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:17:46.671121 18921 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:17:46.671144 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.671157 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.671166 18921 net.cpp:165] Memory required for data: 1076737500\nI1212 06:17:46.671177 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI1212 06:17:46.671197 18921 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI1212 06:17:46.671211 18921 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:17:46.671233 18921 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI1212 06:17:46.671797 18921 net.cpp:150] Setting up L2_b6_cbr1_conv\nI1212 06:17:46.671818 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.671828 18921 net.cpp:165] Memory required for data: 1080833500\nI1212 06:17:46.671844 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI1212 06:17:46.671861 18921 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI1212 06:17:46.671874 18921 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI1212 06:17:46.671896 18921 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI1212 06:17:46.672224 18921 net.cpp:150] Setting up L2_b6_cbr1_bn\nI1212 06:17:46.672243 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.672253 18921 net.cpp:165] Memory required for data: 1084929500\nI1212 06:17:46.672276 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:17:46.672297 18921 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI1212 06:17:46.672309 18921 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI1212 06:17:46.672325 18921 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.672425 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:17:46.672624 18921 net.cpp:150] Setting up L2_b6_cbr1_scale\nI1212 06:17:46.672643 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.672653 18921 net.cpp:165] Memory required for data: 1089025500\nI1212 06:17:46.672672 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI1212 06:17:46.672691 18921 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI1212 06:17:46.672703 18921 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI1212 06:17:46.672718 18921 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.672736 18921 net.cpp:150] Setting up L2_b6_cbr1_relu\nI1212 06:17:46.672750 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.672760 18921 net.cpp:165] Memory required for data: 1093121500\nI1212 06:17:46.672770 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI1212 06:17:46.672794 18921 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI1212 06:17:46.672808 18921 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI1212 06:17:46.672829 18921 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI1212 06:17:46.673393 18921 net.cpp:150] Setting up L2_b6_cbr2_conv\nI1212 06:17:46.673425 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.673435 18921 net.cpp:165] Memory required for data: 1097217500\nI1212 06:17:46.673452 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI1212 06:17:46.673470 18921 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI1212 06:17:46.673481 18921 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI1212 06:17:46.673503 18921 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI1212 06:17:46.673818 18921 net.cpp:150] Setting up L2_b6_cbr2_bn\nI1212 06:17:46.673837 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.673846 18921 net.cpp:165] Memory required for data: 1101313500\nI1212 06:17:46.673867 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:17:46.673888 18921 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI1212 06:17:46.673902 18921 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI1212 06:17:46.673916 18921 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI1212 06:17:46.674010 18921 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:17:46.674208 18921 net.cpp:150] Setting up L2_b6_cbr2_scale\nI1212 06:17:46.674226 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.674237 18921 net.cpp:165] Memory required for data: 1105409500\nI1212 06:17:46.674255 18921 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1212 06:17:46.674275 18921 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1212 06:17:46.674288 18921 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI1212 06:17:46.674306 18921 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:17:46.674324 18921 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1212 06:17:46.674381 18921 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1212 06:17:46.674404 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.674415 18921 net.cpp:165] Memory required for data: 1109505500\nI1212 06:17:46.674425 18921 layer_factory.hpp:77] Creating layer L2_b6_relu\nI1212 06:17:46.674439 18921 net.cpp:100] Creating Layer L2_b6_relu\nI1212 06:17:46.674450 18921 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI1212 06:17:46.674464 18921 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI1212 06:17:46.674484 18921 net.cpp:150] Setting up L2_b6_relu\nI1212 06:17:46.674497 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.674507 18921 net.cpp:165] Memory required for data: 1113601500\nI1212 06:17:46.674516 18921 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:46.674535 18921 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:46.674547 18921 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI1212 06:17:46.674562 18921 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:17:46.674582 18921 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:17:46.674669 18921 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:17:46.674687 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.674700 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.674710 18921 net.cpp:165] Memory required for data: 1121793500\nI1212 06:17:46.674720 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI1212 06:17:46.674741 18921 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI1212 06:17:46.674753 18921 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:17:46.674777 18921 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI1212 06:17:46.676412 18921 net.cpp:150] Setting up L2_b7_cbr1_conv\nI1212 06:17:46.676434 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.676445 18921 net.cpp:165] Memory required for data: 1125889500\nI1212 06:17:46.676462 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI1212 06:17:46.676486 18921 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI1212 06:17:46.676507 18921 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI1212 06:17:46.676525 18921 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI1212 06:17:46.676847 18921 net.cpp:150] Setting up L2_b7_cbr1_bn\nI1212 06:17:46.676867 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.676877 18921 net.cpp:165] Memory required for data: 1129985500\nI1212 06:17:46.676898 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:17:46.676916 18921 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI1212 06:17:46.676928 18921 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI1212 06:17:46.676942 18921 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.677042 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:17:46.677242 18921 net.cpp:150] Setting up L2_b7_cbr1_scale\nI1212 06:17:46.677264 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.677275 18921 net.cpp:165] Memory required for data: 1134081500\nI1212 06:17:46.677294 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI1212 06:17:46.677309 18921 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI1212 06:17:46.677320 18921 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI1212 06:17:46.677335 18921 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.677353 18921 net.cpp:150] Setting up L2_b7_cbr1_relu\nI1212 06:17:46.677376 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.677386 18921 net.cpp:165] Memory required for data: 1138177500\nI1212 06:17:46.677397 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI1212 06:17:46.677422 18921 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI1212 06:17:46.677434 18921 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI1212 06:17:46.677458 18921 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI1212 06:17:46.678009 18921 net.cpp:150] Setting up L2_b7_cbr2_conv\nI1212 06:17:46.678030 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.678038 18921 net.cpp:165] Memory required for data: 1142273500\nI1212 06:17:46.678056 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI1212 06:17:46.678078 18921 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI1212 06:17:46.678092 18921 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI1212 06:17:46.678112 18921 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI1212 06:17:46.678442 18921 net.cpp:150] Setting up L2_b7_cbr2_bn\nI1212 06:17:46.678462 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.678472 18921 net.cpp:165] Memory required for data: 1146369500\nI1212 06:17:46.678493 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:17:46.678509 18921 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI1212 06:17:46.678520 18921 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI1212 06:17:46.678536 18921 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI1212 06:17:46.678634 18921 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:17:46.678830 18921 net.cpp:150] Setting up L2_b7_cbr2_scale\nI1212 06:17:46.678849 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.678858 18921 net.cpp:165] Memory required for data: 1150465500\nI1212 06:17:46.678877 18921 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI1212 06:17:46.678899 18921 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI1212 06:17:46.678910 18921 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI1212 06:17:46.678923 18921 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:17:46.678941 18921 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI1212 06:17:46.678988 18921 net.cpp:150] Setting up L2_b7_sum_eltwise\nI1212 06:17:46.679006 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.679016 18921 net.cpp:165] Memory required for data: 1154561500\nI1212 06:17:46.679026 18921 layer_factory.hpp:77] Creating layer L2_b7_relu\nI1212 06:17:46.679045 18921 net.cpp:100] Creating Layer L2_b7_relu\nI1212 06:17:46.679057 18921 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI1212 06:17:46.679081 18921 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI1212 06:17:46.679100 18921 net.cpp:150] Setting up L2_b7_relu\nI1212 06:17:46.679116 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.679126 18921 net.cpp:165] Memory required for data: 1158657500\nI1212 06:17:46.679136 18921 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:46.679149 18921 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:46.679159 18921 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI1212 06:17:46.679173 18921 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:17:46.679194 18921 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:17:46.679281 18921 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:17:46.679301 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.679313 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.679322 18921 net.cpp:165] Memory required for data: 1166849500\nI1212 06:17:46.679333 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI1212 06:17:46.679358 18921 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI1212 06:17:46.679378 18921 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:17:46.679399 18921 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI1212 06:17:46.679947 18921 net.cpp:150] Setting up L2_b8_cbr1_conv\nI1212 06:17:46.679967 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.679976 18921 net.cpp:165] Memory required for data: 1170945500\nI1212 06:17:46.679994 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI1212 06:17:46.680016 18921 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI1212 06:17:46.680028 18921 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI1212 06:17:46.680052 18921 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI1212 06:17:46.680371 18921 net.cpp:150] Setting up L2_b8_cbr1_bn\nI1212 06:17:46.680392 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.680400 18921 net.cpp:165] Memory required for data: 1175041500\nI1212 06:17:46.680421 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:17:46.680438 18921 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI1212 06:17:46.680449 18921 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI1212 06:17:46.680464 18921 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.680564 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:17:46.680761 18921 net.cpp:150] Setting up L2_b8_cbr1_scale\nI1212 06:17:46.680784 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.680794 18921 net.cpp:165] Memory required for data: 1179137500\nI1212 06:17:46.680814 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI1212 06:17:46.680829 18921 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI1212 06:17:46.680840 18921 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI1212 06:17:46.680853 18921 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.680872 18921 net.cpp:150] Setting up L2_b8_cbr1_relu\nI1212 06:17:46.680886 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.680896 18921 net.cpp:165] Memory required for data: 1183233500\nI1212 06:17:46.680905 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI1212 06:17:46.680938 18921 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI1212 06:17:46.680951 18921 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI1212 06:17:46.680974 18921 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI1212 06:17:46.681527 18921 net.cpp:150] Setting up L2_b8_cbr2_conv\nI1212 06:17:46.681548 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.681557 18921 net.cpp:165] Memory required for data: 1187329500\nI1212 06:17:46.681574 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI1212 06:17:46.681596 18921 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI1212 06:17:46.681617 18921 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI1212 06:17:46.681639 18921 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI1212 06:17:46.681963 18921 net.cpp:150] Setting up L2_b8_cbr2_bn\nI1212 06:17:46.681982 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.681991 18921 net.cpp:165] Memory required for data: 1191425500\nI1212 06:17:46.682013 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:17:46.682029 18921 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI1212 06:17:46.682040 18921 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI1212 06:17:46.682056 18921 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI1212 06:17:46.682158 18921 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:17:46.682356 18921 net.cpp:150] Setting up L2_b8_cbr2_scale\nI1212 06:17:46.682380 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.682389 18921 net.cpp:165] Memory required for data: 1195521500\nI1212 06:17:46.682409 18921 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI1212 06:17:46.682430 18921 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI1212 06:17:46.682442 18921 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI1212 06:17:46.682456 18921 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:17:46.682471 18921 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI1212 06:17:46.682520 18921 net.cpp:150] Setting up L2_b8_sum_eltwise\nI1212 06:17:46.682538 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.682549 18921 net.cpp:165] Memory required for data: 1199617500\nI1212 06:17:46.682559 18921 layer_factory.hpp:77] Creating layer L2_b8_relu\nI1212 06:17:46.682579 18921 net.cpp:100] Creating Layer L2_b8_relu\nI1212 06:17:46.682590 18921 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI1212 06:17:46.682605 18921 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI1212 06:17:46.682623 18921 net.cpp:150] Setting up L2_b8_relu\nI1212 06:17:46.682638 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.682648 18921 net.cpp:165] Memory required for data: 1203713500\nI1212 06:17:46.682658 18921 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:46.682672 18921 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:46.682682 18921 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI1212 06:17:46.682698 18921 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:17:46.682741 18921 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:17:46.682837 18921 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:17:46.682858 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.682871 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.682881 18921 net.cpp:165] Memory required for data: 1211905500\nI1212 06:17:46.682891 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI1212 06:17:46.682917 18921 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI1212 06:17:46.682931 18921 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:17:46.682953 18921 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI1212 06:17:46.683575 18921 net.cpp:150] Setting up L2_b9_cbr1_conv\nI1212 06:17:46.683596 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.683606 18921 net.cpp:165] Memory required for data: 1216001500\nI1212 06:17:46.683622 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI1212 06:17:46.683650 18921 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI1212 06:17:46.683663 18921 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI1212 06:17:46.683686 18921 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI1212 06:17:46.684010 18921 net.cpp:150] Setting up L2_b9_cbr1_bn\nI1212 06:17:46.684033 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.684058 18921 net.cpp:165] Memory required for data: 1220097500\nI1212 06:17:46.684082 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:17:46.684098 18921 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI1212 06:17:46.684110 18921 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI1212 06:17:46.684125 18921 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.684231 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:17:46.684442 18921 net.cpp:150] Setting up L2_b9_cbr1_scale\nI1212 06:17:46.684461 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.684471 18921 net.cpp:165] Memory required for data: 1224193500\nI1212 06:17:46.684489 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI1212 06:17:46.684504 18921 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI1212 06:17:46.684516 18921 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI1212 06:17:46.684535 18921 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.684556 18921 net.cpp:150] Setting up L2_b9_cbr1_relu\nI1212 06:17:46.684571 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.684579 18921 net.cpp:165] Memory required for data: 1228289500\nI1212 06:17:46.684590 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI1212 06:17:46.684615 18921 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI1212 06:17:46.684629 18921 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI1212 06:17:46.684648 18921 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI1212 06:17:46.686264 18921 net.cpp:150] Setting up L2_b9_cbr2_conv\nI1212 06:17:46.686286 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.686296 18921 net.cpp:165] Memory required for data: 1232385500\nI1212 06:17:46.686314 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI1212 06:17:46.686332 18921 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI1212 06:17:46.686344 18921 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI1212 06:17:46.686383 18921 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI1212 06:17:46.686705 18921 net.cpp:150] Setting up L2_b9_cbr2_bn\nI1212 06:17:46.686725 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.686734 18921 net.cpp:165] Memory required for data: 1236481500\nI1212 06:17:46.686815 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:17:46.686837 18921 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI1212 06:17:46.686851 18921 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI1212 06:17:46.686866 18921 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI1212 06:17:46.686959 18921 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:17:46.687156 18921 net.cpp:150] Setting up L2_b9_cbr2_scale\nI1212 06:17:46.687175 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.687185 18921 net.cpp:165] Memory required for data: 1240577500\nI1212 06:17:46.687202 18921 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI1212 06:17:46.687225 18921 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI1212 06:17:46.687238 18921 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI1212 06:17:46.687252 18921 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:17:46.687273 18921 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI1212 06:17:46.687321 18921 net.cpp:150] Setting up L2_b9_sum_eltwise\nI1212 06:17:46.687340 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.687350 18921 net.cpp:165] Memory required for data: 1244673500\nI1212 06:17:46.687367 18921 layer_factory.hpp:77] Creating layer L2_b9_relu\nI1212 06:17:46.687382 18921 net.cpp:100] Creating Layer L2_b9_relu\nI1212 06:17:46.687396 18921 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI1212 06:17:46.687414 18921 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI1212 06:17:46.687433 18921 net.cpp:150] Setting up L2_b9_relu\nI1212 06:17:46.687448 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.687458 18921 net.cpp:165] Memory required for data: 1248769500\nI1212 06:17:46.687477 18921 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:46.687494 18921 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:46.687505 18921 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI1212 06:17:46.687525 18921 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:17:46.687546 18921 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:17:46.687636 18921 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:17:46.687655 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.687669 18921 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:17:46.687677 18921 net.cpp:165] Memory required for data: 1256961500\nI1212 06:17:46.687687 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI1212 06:17:46.687708 18921 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI1212 06:17:46.687721 18921 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:17:46.687744 18921 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI1212 06:17:46.688302 18921 net.cpp:150] Setting up L3_b1_cbr1_conv\nI1212 06:17:46.688323 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.688333 18921 net.cpp:165] Memory required for data: 1257985500\nI1212 06:17:46.688350 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI1212 06:17:46.688379 18921 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI1212 06:17:46.688392 18921 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI1212 06:17:46.688410 18921 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI1212 06:17:46.688737 18921 net.cpp:150] Setting up L3_b1_cbr1_bn\nI1212 06:17:46.688756 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.688766 18921 net.cpp:165] Memory required for data: 1259009500\nI1212 06:17:46.688787 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:17:46.688803 18921 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI1212 06:17:46.688815 18921 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI1212 06:17:46.688830 18921 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.688926 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:17:46.689134 18921 net.cpp:150] Setting up L3_b1_cbr1_scale\nI1212 06:17:46.689157 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.689167 18921 net.cpp:165] Memory required for data: 1260033500\nI1212 06:17:46.689185 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI1212 06:17:46.689201 18921 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI1212 06:17:46.689213 18921 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI1212 06:17:46.689226 18921 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:17:46.689245 18921 net.cpp:150] Setting up L3_b1_cbr1_relu\nI1212 06:17:46.689260 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.689268 18921 net.cpp:165] Memory required for data: 1261057500\nI1212 06:17:46.689278 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI1212 06:17:46.689303 18921 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI1212 06:17:46.689317 18921 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI1212 06:17:46.689335 18921 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI1212 06:17:46.689874 18921 net.cpp:150] Setting up L3_b1_cbr2_conv\nI1212 06:17:46.689893 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.689903 18921 net.cpp:165] Memory required for data: 1262081500\nI1212 06:17:46.689919 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI1212 06:17:46.689940 18921 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI1212 06:17:46.689954 18921 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI1212 06:17:46.689970 18921 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI1212 06:17:46.690294 18921 net.cpp:150] Setting up L3_b1_cbr2_bn\nI1212 06:17:46.690315 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.690332 18921 net.cpp:165] Memory required for data: 1263105500\nI1212 06:17:46.690356 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:17:46.690379 18921 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI1212 06:17:46.690392 18921 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI1212 06:17:46.690412 18921 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI1212 06:17:46.690508 18921 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:17:46.690724 18921 net.cpp:150] Setting up L3_b1_cbr2_scale\nI1212 06:17:46.690744 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.690753 18921 net.cpp:165] Memory required for data: 1264129500\nI1212 06:17:46.690771 18921 layer_factory.hpp:77] Creating layer L3_b1_pool\nI1212 06:17:46.690793 18921 net.cpp:100] Creating Layer L3_b1_pool\nI1212 06:17:46.690805 18921 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:17:46.690822 18921 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI1212 06:17:46.690884 18921 net.cpp:150] Setting up L3_b1_pool\nI1212 06:17:46.690906 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.690915 18921 net.cpp:165] Memory required for data: 1265153500\nI1212 06:17:46.690925 18921 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1212 06:17:46.690946 18921 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1212 06:17:46.690958 18921 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI1212 06:17:46.690971 18921 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI1212 06:17:46.690987 18921 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1212 06:17:46.691045 18921 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1212 06:17:46.691064 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.691074 18921 net.cpp:165] Memory required for data: 1266177500\nI1212 06:17:46.691084 18921 layer_factory.hpp:77] Creating layer L3_b1_relu\nI1212 06:17:46.691098 18921 net.cpp:100] Creating Layer L3_b1_relu\nI1212 06:17:46.691110 18921 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI1212 06:17:46.691133 18921 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI1212 06:17:46.691151 18921 net.cpp:150] Setting up L3_b1_relu\nI1212 06:17:46.691166 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.691176 18921 net.cpp:165] Memory required for data: 1267201500\nI1212 06:17:46.691186 18921 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI1212 06:17:46.691205 18921 net.cpp:100] Creating Layer L3_b1_zeros\nI1212 06:17:46.691220 18921 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI1212 06:17:46.692900 18921 net.cpp:150] Setting up L3_b1_zeros\nI1212 06:17:46.692922 18921 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:17:46.692932 18921 net.cpp:165] Memory required for data: 1268225500\nI1212 06:17:46.692942 18921 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI1212 06:17:46.692958 18921 net.cpp:100] Creating Layer L3_b1_concat0\nI1212 06:17:46.692970 18921 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI1212 06:17:46.692988 18921 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI1212 06:17:46.693006 18921 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI1212 06:17:46.693074 18921 net.cpp:150] Setting up L3_b1_concat0\nI1212 06:17:46.693095 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.693105 18921 net.cpp:165] Memory required for data: 1270273500\nI1212 06:17:46.693116 18921 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:46.693131 18921 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:46.693142 18921 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI1212 06:17:46.693162 18921 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:17:46.693184 18921 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:17:46.693274 18921 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:17:46.693297 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.693311 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.693331 18921 net.cpp:165] Memory required for data: 1274369500\nI1212 06:17:46.693342 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI1212 06:17:46.693372 18921 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI1212 06:17:46.693384 18921 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:17:46.693403 18921 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI1212 06:17:46.694495 18921 net.cpp:150] Setting up L3_b2_cbr1_conv\nI1212 06:17:46.694515 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.694525 18921 net.cpp:165] Memory required for data: 1276417500\nI1212 06:17:46.694542 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI1212 06:17:46.694564 18921 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI1212 06:17:46.694577 18921 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI1212 06:17:46.694594 18921 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI1212 06:17:46.694916 18921 net.cpp:150] Setting up L3_b2_cbr1_bn\nI1212 06:17:46.694934 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.694944 18921 net.cpp:165] Memory required for data: 1278465500\nI1212 06:17:46.694965 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:17:46.694993 18921 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI1212 06:17:46.695008 18921 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI1212 06:17:46.695024 18921 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.695129 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:17:46.695336 18921 net.cpp:150] Setting up L3_b2_cbr1_scale\nI1212 06:17:46.695355 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.695371 18921 net.cpp:165] Memory required for data: 1280513500\nI1212 06:17:46.695391 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI1212 06:17:46.695411 18921 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI1212 06:17:46.695425 18921 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI1212 06:17:46.695443 18921 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:17:46.695462 18921 net.cpp:150] Setting up L3_b2_cbr1_relu\nI1212 06:17:46.695477 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.695488 18921 net.cpp:165] Memory required for data: 1282561500\nI1212 06:17:46.695498 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI1212 06:17:46.695519 18921 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI1212 06:17:46.695533 18921 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI1212 06:17:46.695554 18921 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI1212 06:17:46.696655 18921 net.cpp:150] Setting up L3_b2_cbr2_conv\nI1212 06:17:46.696674 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.696684 18921 net.cpp:165] Memory required for data: 1284609500\nI1212 06:17:46.696702 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI1212 06:17:46.696724 18921 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI1212 06:17:46.696736 18921 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI1212 06:17:46.696753 18921 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI1212 06:17:46.697075 18921 net.cpp:150] Setting up L3_b2_cbr2_bn\nI1212 06:17:46.697094 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.697103 18921 net.cpp:165] Memory required for data: 1286657500\nI1212 06:17:46.697124 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:17:46.697141 18921 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI1212 06:17:46.697154 18921 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI1212 06:17:46.697168 18921 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI1212 06:17:46.697269 18921 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:17:46.697479 18921 net.cpp:150] Setting up L3_b2_cbr2_scale\nI1212 06:17:46.697502 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.697512 18921 net.cpp:165] Memory required for data: 1288705500\nI1212 06:17:46.697531 18921 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1212 06:17:46.697557 18921 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1212 06:17:46.697571 18921 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI1212 06:17:46.697583 18921 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:17:46.697600 18921 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1212 06:17:46.697669 18921 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1212 06:17:46.697690 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.697698 18921 net.cpp:165] Memory required for data: 1290753500\nI1212 06:17:46.697710 18921 layer_factory.hpp:77] Creating layer L3_b2_relu\nI1212 06:17:46.697723 18921 net.cpp:100] Creating Layer L3_b2_relu\nI1212 06:17:46.697736 18921 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI1212 06:17:46.697751 18921 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI1212 06:17:46.697769 18921 net.cpp:150] Setting up L3_b2_relu\nI1212 06:17:46.697784 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.697793 18921 net.cpp:165] Memory required for data: 1292801500\nI1212 06:17:46.697803 18921 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:46.697824 18921 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:46.697834 18921 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI1212 06:17:46.697851 18921 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:17:46.697871 18921 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:17:46.697955 18921 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:17:46.697981 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.697995 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.698004 18921 net.cpp:165] Memory required for data: 1296897500\nI1212 06:17:46.698015 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI1212 06:17:46.698035 18921 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI1212 06:17:46.698048 18921 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:17:46.698066 18921 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI1212 06:17:46.699179 18921 net.cpp:150] Setting up L3_b3_cbr1_conv\nI1212 06:17:46.699199 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.699209 18921 net.cpp:165] Memory required for data: 1298945500\nI1212 06:17:46.699228 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI1212 06:17:46.699249 18921 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI1212 06:17:46.699262 18921 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI1212 06:17:46.699280 18921 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI1212 06:17:46.699604 18921 net.cpp:150] Setting up L3_b3_cbr1_bn\nI1212 06:17:46.699623 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.699632 18921 net.cpp:165] Memory required for data: 1300993500\nI1212 06:17:46.699653 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:17:46.699676 18921 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI1212 06:17:46.699689 18921 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI1212 06:17:46.699709 18921 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.699805 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:17:46.700011 18921 net.cpp:150] Setting up L3_b3_cbr1_scale\nI1212 06:17:46.700031 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.700040 18921 net.cpp:165] Memory required for data: 1303041500\nI1212 06:17:46.700058 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI1212 06:17:46.700074 18921 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI1212 06:17:46.700086 18921 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI1212 06:17:46.700106 18921 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:17:46.700127 18921 net.cpp:150] Setting up L3_b3_cbr1_relu\nI1212 06:17:46.700142 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.700160 18921 net.cpp:165] Memory required for data: 1305089500\nI1212 06:17:46.700171 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI1212 06:17:46.700192 18921 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI1212 06:17:46.700206 18921 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI1212 06:17:46.700229 18921 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI1212 06:17:46.701319 18921 net.cpp:150] Setting up L3_b3_cbr2_conv\nI1212 06:17:46.701340 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.701349 18921 net.cpp:165] Memory required for data: 1307137500\nI1212 06:17:46.701373 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI1212 06:17:46.701397 18921 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI1212 06:17:46.701409 18921 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI1212 06:17:46.701426 18921 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI1212 06:17:46.701747 18921 net.cpp:150] Setting up L3_b3_cbr2_bn\nI1212 06:17:46.701767 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.701776 18921 net.cpp:165] Memory required for data: 1309185500\nI1212 06:17:46.701797 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:17:46.701814 18921 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI1212 06:17:46.701825 18921 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI1212 06:17:46.701841 18921 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI1212 06:17:46.701942 18921 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:17:46.702145 18921 net.cpp:150] Setting up L3_b3_cbr2_scale\nI1212 06:17:46.702164 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.702173 18921 net.cpp:165] Memory required for data: 1311233500\nI1212 06:17:46.702193 18921 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1212 06:17:46.702208 18921 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1212 06:17:46.702221 18921 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI1212 06:17:46.702234 18921 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:17:46.702255 18921 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1212 06:17:46.702313 18921 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1212 06:17:46.702332 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.702342 18921 net.cpp:165] Memory required for data: 1313281500\nI1212 06:17:46.702353 18921 layer_factory.hpp:77] Creating layer L3_b3_relu\nI1212 06:17:46.702378 18921 net.cpp:100] Creating Layer L3_b3_relu\nI1212 06:17:46.702391 18921 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI1212 06:17:46.702405 18921 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI1212 06:17:46.702425 18921 net.cpp:150] Setting up L3_b3_relu\nI1212 06:17:46.702440 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.702450 18921 net.cpp:165] Memory required for data: 1315329500\nI1212 06:17:46.702460 18921 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:46.702478 18921 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:46.702491 18921 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI1212 06:17:46.702507 18921 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:17:46.702525 18921 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:17:46.702610 18921 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:17:46.702635 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.702647 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.702656 18921 net.cpp:165] Memory required for data: 1319425500\nI1212 06:17:46.702666 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI1212 06:17:46.702687 18921 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI1212 06:17:46.702699 18921 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:17:46.702726 18921 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI1212 06:17:46.703845 18921 net.cpp:150] Setting up L3_b4_cbr1_conv\nI1212 06:17:46.703866 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.703876 18921 net.cpp:165] Memory required for data: 1321473500\nI1212 06:17:46.703893 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI1212 06:17:46.703915 18921 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI1212 06:17:46.703927 18921 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI1212 06:17:46.703944 18921 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI1212 06:17:46.704263 18921 net.cpp:150] Setting up L3_b4_cbr1_bn\nI1212 06:17:46.704283 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.704293 18921 net.cpp:165] Memory required for data: 1323521500\nI1212 06:17:46.704315 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:17:46.704344 18921 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI1212 06:17:46.704357 18921 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI1212 06:17:46.704386 18921 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.704488 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:17:46.704694 18921 net.cpp:150] Setting up L3_b4_cbr1_scale\nI1212 06:17:46.704713 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.704722 18921 net.cpp:165] Memory required for data: 1325569500\nI1212 06:17:46.704741 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI1212 06:17:46.704756 18921 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI1212 06:17:46.704768 18921 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI1212 06:17:46.704787 18921 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:17:46.704807 18921 net.cpp:150] Setting up L3_b4_cbr1_relu\nI1212 06:17:46.704823 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.704831 18921 net.cpp:165] Memory required for data: 1327617500\nI1212 06:17:46.704841 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI1212 06:17:46.704867 18921 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI1212 06:17:46.704881 18921 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI1212 06:17:46.704900 18921 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI1212 06:17:46.707046 18921 net.cpp:150] Setting up L3_b4_cbr2_conv\nI1212 06:17:46.707068 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.707078 18921 net.cpp:165] Memory required for data: 1329665500\nI1212 06:17:46.707096 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI1212 06:17:46.707118 18921 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI1212 06:17:46.707132 18921 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI1212 06:17:46.707149 18921 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI1212 06:17:46.707484 18921 net.cpp:150] Setting up L3_b4_cbr2_bn\nI1212 06:17:46.707504 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.707514 18921 net.cpp:165] Memory required for data: 1331713500\nI1212 06:17:46.707535 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:17:46.707557 18921 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI1212 06:17:46.707569 18921 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI1212 06:17:46.707597 18921 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI1212 06:17:46.707700 18921 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:17:46.707911 18921 net.cpp:150] Setting up L3_b4_cbr2_scale\nI1212 06:17:46.707931 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.707939 18921 net.cpp:165] Memory required for data: 1333761500\nI1212 06:17:46.707958 18921 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1212 06:17:46.707975 18921 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1212 06:17:46.707988 18921 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI1212 06:17:46.708000 18921 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:17:46.708022 18921 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1212 06:17:46.708079 18921 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1212 06:17:46.708108 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.708120 18921 net.cpp:165] Memory required for data: 1335809500\nI1212 06:17:46.708132 18921 layer_factory.hpp:77] Creating layer L3_b4_relu\nI1212 06:17:46.708150 18921 net.cpp:100] Creating Layer L3_b4_relu\nI1212 06:17:46.708163 18921 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI1212 06:17:46.708179 18921 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI1212 06:17:46.708199 18921 net.cpp:150] Setting up L3_b4_relu\nI1212 06:17:46.708214 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.708222 18921 net.cpp:165] Memory required for data: 1337857500\nI1212 06:17:46.708233 18921 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:46.708247 18921 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:46.708257 18921 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI1212 06:17:46.708273 18921 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:17:46.708293 18921 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:17:46.708394 18921 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:17:46.708413 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.708426 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.708436 18921 net.cpp:165] Memory required for data: 1341953500\nI1212 06:17:46.708446 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI1212 06:17:46.708472 18921 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI1212 06:17:46.708484 18921 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:17:46.708503 18921 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI1212 06:17:46.709592 18921 net.cpp:150] Setting up L3_b5_cbr1_conv\nI1212 06:17:46.709612 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.709622 18921 net.cpp:165] Memory required for data: 1344001500\nI1212 06:17:46.709640 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI1212 06:17:46.709662 18921 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI1212 06:17:46.709676 18921 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI1212 06:17:46.709699 18921 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI1212 06:17:46.710012 18921 net.cpp:150] Setting up L3_b5_cbr1_bn\nI1212 06:17:46.710031 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.710041 18921 net.cpp:165] Memory required for data: 1346049500\nI1212 06:17:46.710062 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:17:46.710078 18921 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI1212 06:17:46.710089 18921 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI1212 06:17:46.710109 18921 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.710209 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:17:46.710422 18921 net.cpp:150] Setting up L3_b5_cbr1_scale\nI1212 06:17:46.710441 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.710450 18921 net.cpp:165] Memory required for data: 1348097500\nI1212 06:17:46.710469 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI1212 06:17:46.710484 18921 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI1212 06:17:46.710495 18921 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI1212 06:17:46.710523 18921 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:17:46.710546 18921 net.cpp:150] Setting up L3_b5_cbr1_relu\nI1212 06:17:46.710561 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.710571 18921 net.cpp:165] Memory required for data: 1350145500\nI1212 06:17:46.710582 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI1212 06:17:46.710608 18921 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI1212 06:17:46.710621 18921 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI1212 06:17:46.710639 18921 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI1212 06:17:46.711740 18921 net.cpp:150] Setting up L3_b5_cbr2_conv\nI1212 06:17:46.711764 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.711776 18921 net.cpp:165] Memory required for data: 1352193500\nI1212 06:17:46.711793 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI1212 06:17:46.711809 18921 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI1212 06:17:46.711822 18921 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI1212 06:17:46.711845 18921 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI1212 06:17:46.712160 18921 net.cpp:150] Setting up L3_b5_cbr2_bn\nI1212 06:17:46.712180 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.712189 18921 net.cpp:165] Memory required for data: 1354241500\nI1212 06:17:46.712211 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:17:46.712234 18921 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI1212 06:17:46.712246 18921 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI1212 06:17:46.712262 18921 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI1212 06:17:46.712357 18921 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:17:46.712565 18921 net.cpp:150] Setting up L3_b5_cbr2_scale\nI1212 06:17:46.712584 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.712594 18921 net.cpp:165] Memory required for data: 1356289500\nI1212 06:17:46.712611 18921 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1212 06:17:46.712628 18921 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1212 06:17:46.712641 18921 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI1212 06:17:46.712653 18921 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:17:46.712676 18921 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1212 06:17:46.712733 18921 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1212 06:17:46.712751 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.712760 18921 net.cpp:165] Memory required for data: 1358337500\nI1212 06:17:46.712771 18921 layer_factory.hpp:77] Creating layer L3_b5_relu\nI1212 06:17:46.712791 18921 net.cpp:100] Creating Layer L3_b5_relu\nI1212 06:17:46.712805 18921 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI1212 06:17:46.712819 18921 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI1212 06:17:46.712837 18921 net.cpp:150] Setting up L3_b5_relu\nI1212 06:17:46.712852 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.712862 18921 net.cpp:165] Memory required for data: 1360385500\nI1212 06:17:46.712870 18921 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:46.712885 18921 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:46.712896 18921 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI1212 06:17:46.712911 18921 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:17:46.712932 18921 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:17:46.713017 18921 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:17:46.713035 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.713047 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.713057 18921 net.cpp:165] Memory required for data: 1364481500\nI1212 06:17:46.713068 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI1212 06:17:46.713093 18921 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI1212 06:17:46.713106 18921 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:17:46.713126 18921 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI1212 06:17:46.714200 18921 net.cpp:150] Setting up L3_b6_cbr1_conv\nI1212 06:17:46.714221 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.714231 18921 net.cpp:165] Memory required for data: 1366529500\nI1212 06:17:46.714247 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI1212 06:17:46.714285 18921 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI1212 06:17:46.714300 18921 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI1212 06:17:46.714321 18921 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI1212 06:17:46.714649 18921 net.cpp:150] Setting up L3_b6_cbr1_bn\nI1212 06:17:46.714669 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.714679 18921 net.cpp:165] Memory required for data: 1368577500\nI1212 06:17:46.714699 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:17:46.714716 18921 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI1212 06:17:46.714727 18921 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI1212 06:17:46.714748 18921 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.714844 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:17:46.715052 18921 net.cpp:150] Setting up L3_b6_cbr1_scale\nI1212 06:17:46.715071 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.715080 18921 net.cpp:165] Memory required for data: 1370625500\nI1212 06:17:46.715100 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI1212 06:17:46.715114 18921 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI1212 06:17:46.715127 18921 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI1212 06:17:46.715145 18921 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:17:46.715167 18921 net.cpp:150] Setting up L3_b6_cbr1_relu\nI1212 06:17:46.715180 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.715190 18921 net.cpp:165] Memory required for data: 1372673500\nI1212 06:17:46.715200 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI1212 06:17:46.715226 18921 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI1212 06:17:46.715240 18921 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI1212 06:17:46.715261 18921 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI1212 06:17:46.716353 18921 net.cpp:150] Setting up L3_b6_cbr2_conv\nI1212 06:17:46.716379 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.716388 18921 net.cpp:165] Memory required for data: 1374721500\nI1212 06:17:46.716406 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI1212 06:17:46.716423 18921 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI1212 06:17:46.716436 18921 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI1212 06:17:46.716459 18921 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI1212 06:17:46.716778 18921 net.cpp:150] Setting up L3_b6_cbr2_bn\nI1212 06:17:46.716807 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.716819 18921 net.cpp:165] Memory required for data: 1376769500\nI1212 06:17:46.716840 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:17:46.716856 18921 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI1212 06:17:46.716868 18921 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI1212 06:17:46.716884 18921 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI1212 06:17:46.716984 18921 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:17:46.717187 18921 net.cpp:150] Setting up L3_b6_cbr2_scale\nI1212 06:17:46.717206 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.717216 18921 net.cpp:165] Memory required for data: 1378817500\nI1212 06:17:46.717233 18921 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1212 06:17:46.717257 18921 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1212 06:17:46.717268 18921 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI1212 06:17:46.717281 18921 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:17:46.717298 18921 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1212 06:17:46.717366 18921 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1212 06:17:46.717386 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.717397 18921 net.cpp:165] Memory required for data: 1380865500\nI1212 06:17:46.717407 18921 layer_factory.hpp:77] Creating layer L3_b6_relu\nI1212 06:17:46.717422 18921 net.cpp:100] Creating Layer L3_b6_relu\nI1212 06:17:46.717434 18921 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI1212 06:17:46.717455 18921 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI1212 06:17:46.717475 18921 net.cpp:150] Setting up L3_b6_relu\nI1212 06:17:46.717490 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.717500 18921 net.cpp:165] Memory required for data: 1382913500\nI1212 06:17:46.717509 18921 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:46.717523 18921 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:46.717535 18921 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI1212 06:17:46.717555 18921 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:17:46.717576 18921 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:17:46.717663 18921 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:17:46.717680 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.717694 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.717702 18921 net.cpp:165] Memory required for data: 1387009500\nI1212 06:17:46.717713 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI1212 06:17:46.717741 18921 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI1212 06:17:46.717754 18921 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:17:46.717774 18921 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI1212 06:17:46.718870 18921 net.cpp:150] Setting up L3_b7_cbr1_conv\nI1212 06:17:46.718890 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.718899 18921 net.cpp:165] Memory required for data: 1389057500\nI1212 06:17:46.718917 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI1212 06:17:46.718935 18921 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI1212 06:17:46.718951 18921 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI1212 06:17:46.718968 18921 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI1212 06:17:46.719285 18921 net.cpp:150] Setting up L3_b7_cbr1_bn\nI1212 06:17:46.719303 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.719312 18921 net.cpp:165] Memory required for data: 1391105500\nI1212 06:17:46.719334 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:17:46.719352 18921 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI1212 06:17:46.719369 18921 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI1212 06:17:46.719391 18921 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.719487 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:17:46.719691 18921 net.cpp:150] Setting up L3_b7_cbr1_scale\nI1212 06:17:46.719710 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.719719 18921 net.cpp:165] Memory required for data: 1393153500\nI1212 06:17:46.719738 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI1212 06:17:46.719802 18921 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI1212 06:17:46.719820 18921 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI1212 06:17:46.719836 18921 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:17:46.719856 18921 net.cpp:150] Setting up L3_b7_cbr1_relu\nI1212 06:17:46.719871 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.719882 18921 net.cpp:165] Memory required for data: 1395201500\nI1212 06:17:46.719892 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI1212 06:17:46.719914 18921 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI1212 06:17:46.719926 18921 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI1212 06:17:46.719944 18921 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI1212 06:17:46.721020 18921 net.cpp:150] Setting up L3_b7_cbr2_conv\nI1212 06:17:46.721041 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.721050 18921 net.cpp:165] Memory required for data: 1397249500\nI1212 06:17:46.721067 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI1212 06:17:46.721092 18921 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI1212 06:17:46.721113 18921 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI1212 06:17:46.721135 18921 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI1212 06:17:46.721472 18921 net.cpp:150] Setting up L3_b7_cbr2_bn\nI1212 06:17:46.721491 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.721500 18921 net.cpp:165] Memory required for data: 1399297500\nI1212 06:17:46.721523 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:17:46.721539 18921 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI1212 06:17:46.721551 18921 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI1212 06:17:46.721577 18921 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI1212 06:17:46.721675 18921 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:17:46.721884 18921 net.cpp:150] Setting up L3_b7_cbr2_scale\nI1212 06:17:46.721904 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.721913 18921 net.cpp:165] Memory required for data: 1401345500\nI1212 06:17:46.721931 18921 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI1212 06:17:46.721952 18921 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI1212 06:17:46.721964 18921 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI1212 06:17:46.721978 18921 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:17:46.721995 18921 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI1212 06:17:46.722057 18921 net.cpp:150] Setting up L3_b7_sum_eltwise\nI1212 06:17:46.722076 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.722086 18921 net.cpp:165] Memory required for data: 1403393500\nI1212 06:17:46.722096 18921 layer_factory.hpp:77] Creating layer L3_b7_relu\nI1212 06:17:46.722111 18921 net.cpp:100] Creating Layer L3_b7_relu\nI1212 06:17:46.722123 18921 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI1212 06:17:46.722142 18921 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI1212 06:17:46.722162 18921 net.cpp:150] Setting up L3_b7_relu\nI1212 06:17:46.722177 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.722185 18921 net.cpp:165] Memory required for data: 1405441500\nI1212 06:17:46.722195 18921 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:46.722210 18921 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:46.722223 18921 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI1212 06:17:46.722237 18921 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:17:46.722257 18921 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:17:46.722345 18921 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:17:46.722369 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.722384 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.722393 18921 net.cpp:165] Memory required for data: 1409537500\nI1212 06:17:46.722404 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI1212 06:17:46.722424 18921 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI1212 06:17:46.722436 18921 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:17:46.722460 18921 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI1212 06:17:46.724625 18921 net.cpp:150] Setting up L3_b8_cbr1_conv\nI1212 06:17:46.724647 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.724658 18921 net.cpp:165] Memory required for data: 1411585500\nI1212 06:17:46.724676 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI1212 06:17:46.724699 18921 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI1212 06:17:46.724711 18921 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI1212 06:17:46.724728 18921 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI1212 06:17:46.725055 18921 net.cpp:150] Setting up L3_b8_cbr1_bn\nI1212 06:17:46.725075 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.725083 18921 net.cpp:165] Memory required for data: 1413633500\nI1212 06:17:46.725116 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:17:46.725139 18921 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI1212 06:17:46.725153 18921 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI1212 06:17:46.725169 18921 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.725275 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:17:46.725488 18921 net.cpp:150] Setting up L3_b8_cbr1_scale\nI1212 06:17:46.725507 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.725517 18921 net.cpp:165] Memory required for data: 1415681500\nI1212 06:17:46.725534 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI1212 06:17:46.725555 18921 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI1212 06:17:46.725567 18921 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI1212 06:17:46.725581 18921 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:17:46.725601 18921 net.cpp:150] Setting up L3_b8_cbr1_relu\nI1212 06:17:46.725621 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.725631 18921 net.cpp:165] Memory required for data: 1417729500\nI1212 06:17:46.725642 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI1212 06:17:46.725662 18921 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI1212 06:17:46.725675 18921 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI1212 06:17:46.725699 18921 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI1212 06:17:46.726786 18921 net.cpp:150] Setting up L3_b8_cbr2_conv\nI1212 06:17:46.726806 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.726816 18921 net.cpp:165] Memory required for data: 1419777500\nI1212 06:17:46.726835 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI1212 06:17:46.726851 18921 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI1212 06:17:46.726863 18921 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI1212 06:17:46.726884 18921 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI1212 06:17:46.727210 18921 net.cpp:150] Setting up L3_b8_cbr2_bn\nI1212 06:17:46.727231 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.727239 18921 net.cpp:165] Memory required for data: 1421825500\nI1212 06:17:46.727260 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:17:46.727277 18921 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI1212 06:17:46.727288 18921 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI1212 06:17:46.727304 18921 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI1212 06:17:46.727408 18921 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:17:46.727610 18921 net.cpp:150] Setting up L3_b8_cbr2_scale\nI1212 06:17:46.727633 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.727643 18921 net.cpp:165] Memory required for data: 1423873500\nI1212 06:17:46.727663 18921 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI1212 06:17:46.727679 18921 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI1212 06:17:46.727690 18921 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI1212 06:17:46.727704 18921 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:17:46.727720 18921 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI1212 06:17:46.727782 18921 net.cpp:150] Setting up L3_b8_sum_eltwise\nI1212 06:17:46.727800 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.727810 18921 net.cpp:165] Memory required for data: 1425921500\nI1212 06:17:46.727820 18921 layer_factory.hpp:77] Creating layer L3_b8_relu\nI1212 06:17:46.727835 18921 net.cpp:100] Creating Layer L3_b8_relu\nI1212 06:17:46.727847 18921 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI1212 06:17:46.727861 18921 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI1212 06:17:46.727880 18921 net.cpp:150] Setting up L3_b8_relu\nI1212 06:17:46.727895 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.727905 18921 net.cpp:165] Memory required for data: 1427969500\nI1212 06:17:46.727915 18921 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:46.727937 18921 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:46.727951 18921 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI1212 06:17:46.727970 18921 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:17:46.727993 18921 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:17:46.728077 18921 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:17:46.728103 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.728118 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.728128 18921 net.cpp:165] Memory required for data: 1432065500\nI1212 06:17:46.728139 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI1212 06:17:46.728160 18921 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI1212 06:17:46.728173 18921 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:17:46.728193 18921 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI1212 06:17:46.729286 18921 net.cpp:150] Setting up L3_b9_cbr1_conv\nI1212 06:17:46.729307 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.729316 18921 net.cpp:165] Memory required for data: 1434113500\nI1212 06:17:46.729334 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI1212 06:17:46.729356 18921 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI1212 06:17:46.729374 18921 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI1212 06:17:46.729393 18921 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI1212 06:17:46.729719 18921 net.cpp:150] Setting up L3_b9_cbr1_bn\nI1212 06:17:46.729738 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.729748 18921 net.cpp:165] Memory required for data: 1436161500\nI1212 06:17:46.729768 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:17:46.729789 18921 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI1212 06:17:46.729801 18921 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI1212 06:17:46.729816 18921 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.729919 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:17:46.730124 18921 net.cpp:150] Setting up L3_b9_cbr1_scale\nI1212 06:17:46.730144 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.730152 18921 net.cpp:165] Memory required for data: 1438209500\nI1212 06:17:46.730171 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI1212 06:17:46.730191 18921 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI1212 06:17:46.730203 18921 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI1212 06:17:46.730222 18921 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:17:46.730242 18921 net.cpp:150] Setting up L3_b9_cbr1_relu\nI1212 06:17:46.730257 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.730265 18921 net.cpp:165] Memory required for data: 1440257500\nI1212 06:17:46.730275 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI1212 06:17:46.730296 18921 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI1212 06:17:46.730309 18921 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI1212 06:17:46.730331 18921 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI1212 06:17:46.731420 18921 net.cpp:150] Setting up L3_b9_cbr2_conv\nI1212 06:17:46.731439 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.731449 18921 net.cpp:165] Memory required for data: 1442305500\nI1212 06:17:46.731467 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI1212 06:17:46.731488 18921 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI1212 06:17:46.731501 18921 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI1212 06:17:46.731518 18921 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI1212 06:17:46.731843 18921 net.cpp:150] Setting up L3_b9_cbr2_bn\nI1212 06:17:46.731863 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.731871 18921 net.cpp:165] Memory required for data: 1444353500\nI1212 06:17:46.731902 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:17:46.731920 18921 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI1212 06:17:46.731932 18921 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI1212 06:17:46.731948 18921 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI1212 06:17:46.732053 18921 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:17:46.732262 18921 net.cpp:150] Setting up L3_b9_cbr2_scale\nI1212 06:17:46.732287 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.732298 18921 net.cpp:165] Memory required for data: 1446401500\nI1212 06:17:46.732317 18921 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI1212 06:17:46.732333 18921 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI1212 06:17:46.732345 18921 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI1212 06:17:46.732358 18921 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:17:46.732383 18921 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI1212 06:17:46.732445 18921 net.cpp:150] Setting up L3_b9_sum_eltwise\nI1212 06:17:46.732465 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.732475 18921 net.cpp:165] Memory required for data: 1448449500\nI1212 06:17:46.732484 18921 layer_factory.hpp:77] Creating layer L3_b9_relu\nI1212 06:17:46.732498 18921 net.cpp:100] Creating Layer L3_b9_relu\nI1212 06:17:46.732511 18921 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI1212 06:17:46.732523 18921 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI1212 06:17:46.732542 18921 net.cpp:150] Setting up L3_b9_relu\nI1212 06:17:46.732556 18921 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:17:46.732565 18921 net.cpp:165] Memory required for data: 1450497500\nI1212 06:17:46.732575 18921 layer_factory.hpp:77] Creating layer post_pool\nI1212 06:17:46.732595 18921 net.cpp:100] Creating Layer post_pool\nI1212 06:17:46.732607 18921 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI1212 06:17:46.732623 18921 net.cpp:408] post_pool -> post_pool\nI1212 06:17:46.732681 18921 net.cpp:150] Setting up post_pool\nI1212 06:17:46.732703 18921 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI1212 06:17:46.732713 18921 net.cpp:165] Memory required for data: 1450529500\nI1212 06:17:46.732724 18921 layer_factory.hpp:77] Creating layer post_FC\nI1212 06:17:46.732743 18921 net.cpp:100] Creating Layer post_FC\nI1212 06:17:46.732755 18921 net.cpp:434] post_FC <- post_pool\nI1212 06:17:46.732777 18921 net.cpp:408] post_FC -> post_FC_top\nI1212 06:17:46.732981 18921 net.cpp:150] Setting up post_FC\nI1212 06:17:46.733001 18921 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:46.733011 18921 net.cpp:165] Memory required for data: 1450534500\nI1212 06:17:46.733028 18921 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1212 06:17:46.733047 18921 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1212 06:17:46.733059 18921 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1212 06:17:46.733078 18921 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1212 06:17:46.733098 18921 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1212 06:17:46.733186 18921 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1212 06:17:46.733204 18921 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:46.733218 18921 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:17:46.733227 18921 net.cpp:165] Memory required for data: 1450544500\nI1212 06:17:46.733237 18921 layer_factory.hpp:77] Creating layer accuracy\nI1212 06:17:46.733253 18921 net.cpp:100] Creating Layer accuracy\nI1212 06:17:46.733265 18921 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1212 06:17:46.733279 18921 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1212 06:17:46.733294 18921 net.cpp:408] accuracy -> accuracy\nI1212 06:17:46.733319 18921 net.cpp:150] Setting up accuracy\nI1212 06:17:46.733333 18921 net.cpp:157] Top shape: (1)\nI1212 06:17:46.733343 18921 net.cpp:165] Memory required for data: 1450544504\nI1212 06:17:46.733369 18921 layer_factory.hpp:77] Creating layer loss\nI1212 06:17:46.733394 18921 net.cpp:100] Creating Layer loss\nI1212 06:17:46.733407 18921 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1212 06:17:46.733420 18921 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1212 06:17:46.733436 18921 net.cpp:408] loss -> loss\nI1212 06:17:46.733459 18921 layer_factory.hpp:77] Creating layer loss\nI1212 06:17:46.733631 18921 net.cpp:150] Setting up loss\nI1212 06:17:46.733654 18921 net.cpp:157] Top shape: (1)\nI1212 06:17:46.733664 18921 net.cpp:160]     with loss weight 1\nI1212 06:17:46.733690 18921 net.cpp:165] Memory required for data: 1450544508\nI1212 06:17:46.733701 18921 net.cpp:226] loss needs backward computation.\nI1212 06:17:46.733712 18921 net.cpp:228] accuracy does not need backward computation.\nI1212 06:17:46.733724 18921 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1212 06:17:46.733736 18921 net.cpp:226] post_FC needs backward computation.\nI1212 06:17:46.733745 18921 net.cpp:226] post_pool needs backward computation.\nI1212 06:17:46.733754 18921 net.cpp:226] L3_b9_relu needs backward computation.\nI1212 06:17:46.733764 18921 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI1212 06:17:46.733774 18921 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI1212 06:17:46.733784 18921 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI1212 06:17:46.733793 18921 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI1212 06:17:46.733803 18921 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI1212 06:17:46.733814 18921 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI1212 06:17:46.733822 18921 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI1212 06:17:46.733832 18921 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI1212 06:17:46.733842 18921 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI1212 06:17:46.733853 18921 net.cpp:226] L3_b8_relu needs backward computation.\nI1212 06:17:46.733863 18921 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI1212 06:17:46.733875 18921 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI1212 06:17:46.733883 18921 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI1212 06:17:46.733894 18921 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI1212 06:17:46.733906 18921 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI1212 06:17:46.733914 18921 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI1212 06:17:46.733924 18921 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI1212 06:17:46.733934 18921 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI1212 06:17:46.733945 18921 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI1212 06:17:46.733955 18921 net.cpp:226] L3_b7_relu needs backward computation.\nI1212 06:17:46.733965 18921 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI1212 06:17:46.733976 18921 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI1212 06:17:46.733988 18921 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI1212 06:17:46.733997 18921 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI1212 06:17:46.734007 18921 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI1212 06:17:46.734017 18921 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI1212 06:17:46.734027 18921 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI1212 06:17:46.734037 18921 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI1212 06:17:46.734048 18921 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI1212 06:17:46.734060 18921 net.cpp:226] L3_b6_relu needs backward computation.\nI1212 06:17:46.734069 18921 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1212 06:17:46.734079 18921 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI1212 06:17:46.734089 18921 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI1212 06:17:46.734100 18921 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI1212 06:17:46.734120 18921 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI1212 06:17:46.734131 18921 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI1212 06:17:46.734140 18921 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI1212 06:17:46.734151 18921 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI1212 06:17:46.734161 18921 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI1212 06:17:46.734172 18921 net.cpp:226] L3_b5_relu needs backward computation.\nI1212 06:17:46.734182 18921 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1212 06:17:46.734194 18921 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI1212 06:17:46.734203 18921 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI1212 06:17:46.734213 18921 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI1212 06:17:46.734225 18921 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI1212 06:17:46.734235 18921 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI1212 06:17:46.734244 18921 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI1212 06:17:46.734254 18921 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI1212 06:17:46.734264 18921 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI1212 06:17:46.734282 18921 net.cpp:226] L3_b4_relu needs backward computation.\nI1212 06:17:46.734292 18921 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1212 06:17:46.734302 18921 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI1212 06:17:46.734313 18921 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI1212 06:17:46.734324 18921 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI1212 06:17:46.734333 18921 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI1212 06:17:46.734344 18921 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI1212 06:17:46.734354 18921 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI1212 06:17:46.734371 18921 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI1212 06:17:46.734385 18921 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI1212 06:17:46.734395 18921 net.cpp:226] L3_b3_relu needs backward computation.\nI1212 06:17:46.734405 18921 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1212 06:17:46.734416 18921 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI1212 06:17:46.734426 18921 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI1212 06:17:46.734436 18921 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI1212 06:17:46.734447 18921 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI1212 06:17:46.734457 18921 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI1212 06:17:46.734465 18921 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI1212 06:17:46.734477 18921 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI1212 06:17:46.734488 18921 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI1212 06:17:46.734498 18921 net.cpp:226] L3_b2_relu needs backward computation.\nI1212 06:17:46.734508 18921 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1212 06:17:46.734519 18921 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI1212 06:17:46.734530 18921 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI1212 06:17:46.734540 18921 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI1212 06:17:46.734550 18921 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI1212 06:17:46.734560 18921 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI1212 06:17:46.734571 18921 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI1212 06:17:46.734581 18921 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI1212 06:17:46.734592 18921 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI1212 06:17:46.734602 18921 net.cpp:226] L3_b1_concat0 needs backward computation.\nI1212 06:17:46.734614 18921 net.cpp:228] L3_b1_zeros does not need backward computation.\nI1212 06:17:46.734634 18921 net.cpp:226] L3_b1_relu needs backward computation.\nI1212 06:17:46.734645 18921 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1212 06:17:46.734657 18921 net.cpp:226] L3_b1_pool needs backward computation.\nI1212 06:17:46.734668 18921 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI1212 06:17:46.734678 18921 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI1212 06:17:46.734688 18921 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI1212 06:17:46.734699 18921 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI1212 06:17:46.734709 18921 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI1212 06:17:46.734719 18921 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI1212 06:17:46.734730 18921 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI1212 06:17:46.734740 18921 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI1212 06:17:46.734750 18921 net.cpp:226] L2_b9_relu needs backward computation.\nI1212 06:17:46.734760 18921 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI1212 06:17:46.734772 18921 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI1212 06:17:46.734782 18921 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI1212 06:17:46.734792 18921 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI1212 06:17:46.734803 18921 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI1212 06:17:46.734814 18921 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI1212 06:17:46.734824 18921 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI1212 06:17:46.734834 18921 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI1212 06:17:46.734846 18921 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI1212 06:17:46.734858 18921 net.cpp:226] L2_b8_relu needs backward computation.\nI1212 06:17:46.734866 18921 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI1212 06:17:46.734877 18921 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI1212 06:17:46.734889 18921 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI1212 06:17:46.734899 18921 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI1212 06:17:46.734910 18921 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI1212 06:17:46.734920 18921 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI1212 06:17:46.734930 18921 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI1212 06:17:46.734941 18921 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI1212 06:17:46.734951 18921 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI1212 06:17:46.734961 18921 net.cpp:226] L2_b7_relu needs backward computation.\nI1212 06:17:46.734972 18921 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI1212 06:17:46.734989 18921 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI1212 06:17:46.735002 18921 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI1212 06:17:46.735013 18921 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI1212 06:17:46.735023 18921 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI1212 06:17:46.735033 18921 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI1212 06:17:46.735044 18921 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI1212 06:17:46.735054 18921 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI1212 06:17:46.735065 18921 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI1212 06:17:46.735075 18921 net.cpp:226] L2_b6_relu needs backward computation.\nI1212 06:17:46.735086 18921 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1212 06:17:46.735097 18921 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI1212 06:17:46.735108 18921 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI1212 06:17:46.735122 18921 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI1212 06:17:46.735133 18921 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI1212 06:17:46.735143 18921 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI1212 06:17:46.735162 18921 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI1212 06:17:46.735174 18921 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI1212 06:17:46.735185 18921 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI1212 06:17:46.735196 18921 net.cpp:226] L2_b5_relu needs backward computation.\nI1212 06:17:46.735208 18921 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1212 06:17:46.735219 18921 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI1212 06:17:46.735229 18921 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI1212 06:17:46.735239 18921 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI1212 06:17:46.735250 18921 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI1212 06:17:46.735261 18921 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI1212 06:17:46.735271 18921 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI1212 06:17:46.735281 18921 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI1212 06:17:46.735293 18921 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI1212 06:17:46.735304 18921 net.cpp:226] L2_b4_relu needs backward computation.\nI1212 06:17:46.735316 18921 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1212 06:17:46.735325 18921 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI1212 06:17:46.735337 18921 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI1212 06:17:46.735347 18921 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI1212 06:17:46.735358 18921 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI1212 06:17:46.735376 18921 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI1212 06:17:46.735388 18921 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI1212 06:17:46.735399 18921 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI1212 06:17:46.735409 18921 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI1212 06:17:46.735420 18921 net.cpp:226] L2_b3_relu needs backward computation.\nI1212 06:17:46.735431 18921 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1212 06:17:46.735442 18921 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI1212 06:17:46.735453 18921 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI1212 06:17:46.735466 18921 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI1212 06:17:46.735476 18921 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI1212 06:17:46.735486 18921 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI1212 06:17:46.735496 18921 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI1212 06:17:46.735507 18921 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI1212 06:17:46.735519 18921 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI1212 06:17:46.735530 18921 net.cpp:226] L2_b2_relu needs backward computation.\nI1212 06:17:46.735540 18921 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1212 06:17:46.735553 18921 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI1212 06:17:46.735563 18921 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI1212 06:17:46.735575 18921 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI1212 06:17:46.735585 18921 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI1212 06:17:46.735596 18921 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI1212 06:17:46.735607 18921 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI1212 06:17:46.735618 18921 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI1212 06:17:46.735630 18921 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI1212 06:17:46.735641 18921 net.cpp:226] L2_b1_concat0 needs backward computation.\nI1212 06:17:46.735653 18921 net.cpp:228] L2_b1_zeros does not need backward computation.\nI1212 06:17:46.735664 18921 net.cpp:226] L2_b1_relu needs backward computation.\nI1212 06:17:46.735676 18921 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1212 06:17:46.735697 18921 net.cpp:226] L2_b1_pool needs backward computation.\nI1212 06:17:46.735707 18921 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI1212 06:17:46.735718 18921 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI1212 06:17:46.735729 18921 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI1212 06:17:46.735740 18921 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI1212 06:17:46.735751 18921 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI1212 06:17:46.735762 18921 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI1212 06:17:46.735774 18921 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI1212 06:17:46.735785 18921 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI1212 06:17:46.735795 18921 net.cpp:226] L1_b9_relu needs backward computation.\nI1212 06:17:46.735805 18921 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI1212 06:17:46.735816 18921 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI1212 06:17:46.735827 18921 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI1212 06:17:46.735839 18921 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI1212 06:17:46.735851 18921 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI1212 06:17:46.735862 18921 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI1212 06:17:46.735872 18921 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI1212 06:17:46.735883 18921 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI1212 06:17:46.735894 18921 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI1212 06:17:46.735905 18921 net.cpp:226] L1_b8_relu needs backward computation.\nI1212 06:17:46.735916 18921 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI1212 06:17:46.735929 18921 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI1212 06:17:46.735939 18921 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI1212 06:17:46.735950 18921 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI1212 06:17:46.735962 18921 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI1212 06:17:46.735973 18921 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI1212 06:17:46.735983 18921 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI1212 06:17:46.735994 18921 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI1212 06:17:46.736006 18921 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI1212 06:17:46.736017 18921 net.cpp:226] L1_b7_relu needs backward computation.\nI1212 06:17:46.736028 18921 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI1212 06:17:46.736040 18921 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI1212 06:17:46.736052 18921 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI1212 06:17:46.736063 18921 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI1212 06:17:46.736073 18921 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI1212 06:17:46.736084 18921 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI1212 06:17:46.736095 18921 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI1212 06:17:46.736106 18921 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI1212 06:17:46.736119 18921 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI1212 06:17:46.736130 18921 net.cpp:226] L1_b6_relu needs backward computation.\nI1212 06:17:46.736141 18921 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1212 06:17:46.736153 18921 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI1212 06:17:46.736166 18921 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI1212 06:17:46.736176 18921 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI1212 06:17:46.736187 18921 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI1212 06:17:46.736199 18921 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI1212 06:17:46.736210 18921 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI1212 06:17:46.736230 18921 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI1212 06:17:46.736243 18921 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI1212 06:17:46.736253 18921 net.cpp:226] L1_b5_relu needs backward computation.\nI1212 06:17:46.736264 18921 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1212 06:17:46.736276 18921 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI1212 06:17:46.736287 18921 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI1212 06:17:46.736299 18921 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI1212 06:17:46.736310 18921 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI1212 06:17:46.736320 18921 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI1212 06:17:46.736332 18921 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI1212 06:17:46.736343 18921 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI1212 06:17:46.736356 18921 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI1212 06:17:46.736380 18921 net.cpp:226] L1_b4_relu needs backward computation.\nI1212 06:17:46.736392 18921 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1212 06:17:46.736404 18921 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI1212 06:17:46.736415 18921 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI1212 06:17:46.736428 18921 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI1212 06:17:46.736438 18921 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI1212 06:17:46.736449 18921 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI1212 06:17:46.736460 18921 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI1212 06:17:46.736471 18921 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI1212 06:17:46.736482 18921 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI1212 06:17:46.736495 18921 net.cpp:226] L1_b3_relu needs backward computation.\nI1212 06:17:46.736505 18921 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1212 06:17:46.736517 18921 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI1212 06:17:46.736528 18921 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI1212 06:17:46.736541 18921 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI1212 06:17:46.736552 18921 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI1212 06:17:46.736562 18921 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI1212 06:17:46.736572 18921 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI1212 06:17:46.736584 18921 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI1212 06:17:46.736595 18921 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI1212 06:17:46.736608 18921 net.cpp:226] L1_b2_relu needs backward computation.\nI1212 06:17:46.736618 18921 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1212 06:17:46.736630 18921 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI1212 06:17:46.736641 18921 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI1212 06:17:46.736652 18921 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI1212 06:17:46.736665 18921 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI1212 06:17:46.736675 18921 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI1212 06:17:46.736686 18921 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI1212 06:17:46.736697 18921 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI1212 06:17:46.736709 18921 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI1212 06:17:46.736721 18921 net.cpp:226] L1_b1_relu needs backward computation.\nI1212 06:17:46.736732 18921 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1212 06:17:46.736743 18921 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI1212 06:17:46.736754 18921 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI1212 06:17:46.736766 18921 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI1212 06:17:46.736788 18921 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI1212 06:17:46.736800 18921 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI1212 06:17:46.736810 18921 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI1212 06:17:46.736822 18921 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI1212 06:17:46.736834 18921 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI1212 06:17:46.736845 18921 net.cpp:226] pre_relu needs backward computation.\nI1212 06:17:46.736855 18921 net.cpp:226] pre_scale needs backward computation.\nI1212 06:17:46.736865 18921 net.cpp:226] pre_bn needs backward computation.\nI1212 06:17:46.736876 18921 net.cpp:226] pre_conv needs backward computation.\nI1212 06:17:46.736889 18921 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1212 06:17:46.736903 18921 net.cpp:228] dataLayer does not need backward computation.\nI1212 06:17:46.736912 18921 net.cpp:270] This network produces output accuracy\nI1212 06:17:46.736923 18921 net.cpp:270] This network produces output loss\nI1212 06:17:46.737274 18921 net.cpp:283] Network initialization done.\nI1212 06:17:46.738307 18921 solver.cpp:60] Solver scaffolding done.\nI1212 06:17:46.961772 18921 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1212 06:17:47.324820 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:47.324890 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:47.332270 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:47.570572 18921 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:47.570660 18921 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:47.605664 18921 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:47.605743 18921 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:48.053926 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:48.054000 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:48.061596 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:48.319097 18921 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:48.319245 18921 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:48.372004 18921 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:48.372146 18921 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:48.894199 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:48.894275 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:48.902931 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:49.177203 18921 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:49.177347 18921 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:49.249423 18921 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:49.249557 18921 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:49.333969 18921 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1212 06:17:49.840312 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:49.840368 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:49.850481 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:50.145944 18921 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:50.146155 18921 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:50.239257 18921 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:50.239452 18921 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:50.903887 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:50.903965 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:50.914234 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:51.233851 18921 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:51.234091 18921 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:51.349352 18921 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:51.349576 18921 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:52.094952 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:52.095018 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:52.106600 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:52.448375 18921 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:52.448638 18921 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:52.583537 18921 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:52.583778 18921 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:53.390019 18921 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:17:53.390085 18921 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:17:53.402290 18921 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:17:53.860375 18921 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:17:53.860662 18921 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:17:54.013700 18921 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:17:54.013963 18921 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:17:54.186203 18921 parallel.cpp:425] Starting Optimization\nI1212 06:17:54.187930 18921 solver.cpp:279] Solving Cifar-Resnet\nI1212 06:17:54.187947 18921 solver.cpp:280] Learning Rate Policy: triangular\nI1212 06:17:54.192625 18921 solver.cpp:337] Iteration 0, Testing net (#0)\nI1212 06:19:16.983000 18921 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI1212 06:19:16.983301 18921 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI1212 06:19:20.978768 18921 solver.cpp:228] Iteration 0, loss = 4.50959\nI1212 06:19:20.978811 18921 solver.cpp:244]     Train net output #0: accuracy = 0.128\nI1212 06:19:20.978828 18921 solver.cpp:244]     Train net output #1: loss = 4.50959 (* 1 = 4.50959 loss)\nI1212 06:19:20.978971 18921 sgd_solver.cpp:174] Iteration 0, lr = 0\nI1212 06:19:21.004465 18921 sgd_solver.cpp:149] Gradient: L2 norm 8.96938\nI1212 06:21:40.283126 18921 solver.cpp:337] Iteration 100, Testing net (#0)\nI1212 06:23:03.353282 18921 solver.cpp:404]     Test net output #0: accuracy = 0.42712\nI1212 06:23:03.353510 18921 solver.cpp:404]     Test net output #1: loss = 1.52885 (* 1 = 1.52885 loss)\nI1212 06:23:04.706511 18921 solver.cpp:228] Iteration 100, loss = 1.44864\nI1212 06:23:04.706547 18921 solver.cpp:244]     Train net output #0: accuracy = 0.464\nI1212 06:23:04.706562 18921 solver.cpp:244]     Train net output #1: loss = 1.44864 (* 1 = 1.44864 loss)\nI1212 06:23:04.773509 18921 sgd_solver.cpp:174] Iteration 100, lr = 0.015\nI1212 06:23:04.786144 18921 sgd_solver.cpp:149] Gradient: L2 norm 1.76694\nI1212 06:25:24.282757 18921 solver.cpp:337] Iteration 200, Testing net (#0)\nI1212 06:26:47.328466 18921 solver.cpp:404]     Test net output #0: accuracy = 0.47856\nI1212 06:26:47.328727 18921 solver.cpp:404]     Test net output #1: loss = 1.67814 (* 1 = 1.67814 loss)\nI1212 06:26:48.681401 18921 solver.cpp:228] Iteration 200, loss = 1.12244\nI1212 06:26:48.681442 18921 solver.cpp:244]     Train net output #0: accuracy = 0.6\nI1212 06:26:48.681457 18921 solver.cpp:244]     Train net output #1: loss = 1.12244 (* 1 = 1.12244 loss)\nI1212 06:26:48.738739 18921 sgd_solver.cpp:174] Iteration 200, lr = 0.03\nI1212 06:26:48.751371 18921 sgd_solver.cpp:149] Gradient: L2 norm 2.10804\nI1212 06:29:08.252702 18921 solver.cpp:337] Iteration 300, Testing net (#0)\nI1212 06:30:31.293393 18921 solver.cpp:404]     Test net output #0: accuracy = 0.47988\nI1212 06:30:31.293651 18921 solver.cpp:404]     Test net output #1: loss = 1.83158 (* 1 = 1.83158 loss)\nI1212 06:30:32.646687 18921 solver.cpp:228] Iteration 300, loss = 0.917057\nI1212 06:30:32.646723 18921 solver.cpp:244]     Train net output #0: accuracy = 0.688\nI1212 06:30:32.646737 18921 solver.cpp:244]     Train net output #1: loss = 0.917057 (* 1 = 0.917057 loss)\nI1212 06:30:32.700572 18921 sgd_solver.cpp:174] Iteration 300, lr = 0.045\nI1212 06:30:32.713196 18921 sgd_solver.cpp:149] Gradient: L2 norm 2.10649\nI1212 06:32:52.245964 18921 solver.cpp:337] Iteration 400, Testing net (#0)\nI1212 06:34:15.301596 18921 solver.cpp:404]     Test net output #0: accuracy = 0.61876\nI1212 06:34:15.301836 18921 solver.cpp:404]     Test net output #1: loss = 1.23822 (* 1 = 1.23822 loss)\nI1212 06:34:16.654413 18921 solver.cpp:228] Iteration 400, loss = 0.698549\nI1212 06:34:16.654454 18921 solver.cpp:244]     Train net output #0: accuracy = 0.776\nI1212 06:34:16.654469 18921 solver.cpp:244]     Train net output #1: loss = 0.698549 (* 1 = 0.698549 loss)\nI1212 06:34:16.714588 18921 sgd_solver.cpp:174] Iteration 400, lr = 0.0599999\nI1212 06:34:16.727193 18921 sgd_solver.cpp:149] Gradient: L2 norm 1.83684\nI1212 06:36:36.172507 18921 solver.cpp:337] Iteration 500, Testing net (#0)\nI1212 06:37:59.227356 18921 solver.cpp:404]     Test net output #0: accuracy = 0.65732\nI1212 06:37:59.227568 18921 solver.cpp:404]     Test net output #1: loss = 1.11317 (* 1 = 1.11317 loss)\nI1212 06:38:00.580607 18921 solver.cpp:228] Iteration 500, loss = 0.534555\nI1212 06:38:00.580642 18921 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI1212 06:38:00.580658 18921 solver.cpp:244]     Train net output #1: loss = 0.534555 (* 1 = 0.534555 loss)\nI1212 06:38:00.637408 18921 sgd_solver.cpp:174] Iteration 500, lr = 0.0749999\nI1212 06:38:00.650069 18921 sgd_solver.cpp:149] Gradient: L2 norm 1.35701\nI1212 06:40:20.141585 18921 solver.cpp:337] Iteration 600, Testing net (#0)\nI1212 06:41:43.187811 18921 solver.cpp:404]     Test net output #0: accuracy = 0.68396\nI1212 06:41:43.188055 18921 solver.cpp:404]     Test net output #1: loss = 0.989851 (* 1 = 0.989851 loss)\nI1212 06:41:44.541342 18921 solver.cpp:228] Iteration 600, loss = 0.502535\nI1212 06:41:44.541380 18921 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI1212 06:41:44.541395 18921 solver.cpp:244]     Train net output #1: loss = 0.502535 (* 1 = 0.502535 loss)\nI1212 06:41:44.596406 18921 sgd_solver.cpp:174] Iteration 600, lr = 0.0899999\nI1212 06:41:44.608955 18921 sgd_solver.cpp:149] Gradient: L2 norm 1.24311\nI1212 06:44:04.124871 18921 solver.cpp:337] Iteration 700, Testing net (#0)\nI1212 06:45:27.173261 18921 solver.cpp:404]     Test net output #0: accuracy = 0.74532\nI1212 06:45:27.173516 18921 solver.cpp:404]     Test net output #1: loss = 0.801095 (* 1 = 0.801095 loss)\nI1212 06:45:28.526700 18921 solver.cpp:228] Iteration 700, loss = 0.462699\nI1212 06:45:28.526736 18921 solver.cpp:244]     Train net output #0: accuracy = 0.832\nI1212 06:45:28.526751 18921 solver.cpp:244]     Train net output #1: loss = 0.462699 (* 1 = 0.462699 loss)\nI1212 06:45:28.580190 18921 sgd_solver.cpp:174] Iteration 700, lr = 0.105\nI1212 06:45:28.592862 18921 sgd_solver.cpp:149] Gradient: L2 norm 1.20557\nI1212 06:47:48.032840 18921 solver.cpp:337] Iteration 800, Testing net (#0)\nI1212 06:49:11.074656 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7212\nI1212 06:49:11.074919 18921 solver.cpp:404]     Test net output #1: loss = 0.89381 (* 1 = 0.89381 loss)\nI1212 06:49:12.427229 18921 solver.cpp:228] Iteration 800, loss = 0.335647\nI1212 06:49:12.427263 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 06:49:12.427279 18921 solver.cpp:244]     Train net output #1: loss = 0.335647 (* 1 = 0.335647 loss)\nI1212 06:49:12.479709 18921 sgd_solver.cpp:174] Iteration 800, lr = 0.12\nI1212 06:49:12.492207 18921 sgd_solver.cpp:149] Gradient: L2 norm 1.13968\nI1212 06:51:31.943406 18921 solver.cpp:337] Iteration 900, Testing net (#0)\nI1212 06:52:54.911829 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73804\nI1212 06:52:54.912094 18921 solver.cpp:404]     Test net output #1: loss = 0.825656 (* 1 = 0.825656 loss)\nI1212 06:52:56.271037 18921 solver.cpp:228] Iteration 900, loss = 0.243062\nI1212 06:52:56.271073 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 06:52:56.271090 18921 solver.cpp:244]     Train net output #1: loss = 0.243062 (* 1 = 0.243062 loss)\nI1212 06:52:56.316196 18921 sgd_solver.cpp:174] Iteration 900, lr = 0.135\nI1212 06:52:56.328883 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.940773\nI1212 06:55:15.794560 18921 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1212 06:56:38.745201 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73872\nI1212 06:56:38.745465 18921 solver.cpp:404]     Test net output #1: loss = 0.809399 (* 1 = 0.809399 loss)\nI1212 06:56:40.097431 18921 solver.cpp:228] Iteration 1000, loss = 0.374495\nI1212 06:56:40.097466 18921 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1212 06:56:40.097482 18921 solver.cpp:244]     Train net output #1: loss = 0.374495 (* 1 = 0.374495 loss)\nI1212 06:56:40.156796 18921 sgd_solver.cpp:174] Iteration 1000, lr = 0.15\nI1212 06:56:40.169404 18921 sgd_solver.cpp:149] Gradient: L2 norm 1.20211\nI1212 06:58:59.641968 18921 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1212 07:00:22.580096 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7394\nI1212 07:00:22.580353 18921 solver.cpp:404]     Test net output #1: loss = 0.898705 (* 1 = 0.898705 loss)\nI1212 07:00:23.932976 18921 solver.cpp:228] Iteration 1100, loss = 0.447941\nI1212 07:00:23.933009 18921 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1212 07:00:23.933025 18921 solver.cpp:244]     Train net output #1: loss = 0.447941 (* 1 = 0.447941 loss)\nI1212 07:00:23.986191 18921 sgd_solver.cpp:174] Iteration 1100, lr = 0.165\nI1212 07:00:23.998752 18921 sgd_solver.cpp:149] Gradient: L2 norm 1.15827\nI1212 07:02:43.468148 18921 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1212 07:04:06.413198 18921 solver.cpp:404]     Test net output #0: accuracy = 0.67136\nI1212 07:04:06.413449 18921 solver.cpp:404]     Test net output #1: loss = 1.266 (* 1 = 1.266 loss)\nI1212 07:04:07.770927 18921 solver.cpp:228] Iteration 1200, loss = 0.321972\nI1212 07:04:07.770964 18921 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1212 07:04:07.770979 18921 solver.cpp:244]     Train net output #1: loss = 0.321972 (* 1 = 0.321972 loss)\nI1212 07:04:07.821051 18921 sgd_solver.cpp:174] Iteration 1200, lr = 0.18\nI1212 07:04:07.833704 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.989191\nI1212 07:06:27.378746 18921 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1212 07:07:50.321575 18921 solver.cpp:404]     Test net output #0: accuracy = 0.61596\nI1212 07:07:50.321823 18921 solver.cpp:404]     Test net output #1: loss = 1.87077 (* 1 = 1.87077 loss)\nI1212 07:07:51.677731 18921 solver.cpp:228] Iteration 1300, loss = 0.228233\nI1212 07:07:51.677767 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 07:07:51.677783 18921 solver.cpp:244]     Train net output #1: loss = 0.228233 (* 1 = 0.228233 loss)\nI1212 07:07:51.730439 18921 sgd_solver.cpp:174] Iteration 1300, lr = 0.195\nI1212 07:07:51.744581 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.850691\nI1212 07:10:11.169224 18921 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1212 07:11:34.105432 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7474\nI1212 07:11:34.105675 18921 solver.cpp:404]     Test net output #1: loss = 0.880487 (* 1 = 0.880487 loss)\nI1212 07:11:35.465330 18921 solver.cpp:228] Iteration 1400, loss = 0.248355\nI1212 07:11:35.465368 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 07:11:35.465384 18921 solver.cpp:244]     Train net output #1: loss = 0.248355 (* 1 = 0.248355 loss)\nI1212 07:11:35.518040 18921 sgd_solver.cpp:174] Iteration 1400, lr = 0.21\nI1212 07:11:35.530686 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.774358\nI1212 07:13:54.929782 18921 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1212 07:15:17.866616 18921 solver.cpp:404]     Test net output #0: accuracy = 0.64784\nI1212 07:15:17.866878 18921 solver.cpp:404]     Test net output #1: loss = 1.69002 (* 1 = 1.69002 loss)\nI1212 07:15:19.218822 18921 solver.cpp:228] Iteration 1500, loss = 0.314756\nI1212 07:15:19.218854 18921 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1212 07:15:19.218869 18921 solver.cpp:244]     Train net output #1: loss = 0.314756 (* 1 = 0.314756 loss)\nI1212 07:15:19.277400 18921 sgd_solver.cpp:174] Iteration 1500, lr = 0.225\nI1212 07:15:19.290125 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.763743\nI1212 07:17:38.676553 18921 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1212 07:19:01.619302 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78944\nI1212 07:19:01.619565 18921 solver.cpp:404]     Test net output #1: loss = 0.684077 (* 1 = 0.684077 loss)\nI1212 07:19:02.971395 18921 solver.cpp:228] Iteration 1600, loss = 0.280164\nI1212 07:19:02.971431 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 07:19:02.971451 18921 solver.cpp:244]     Train net output #1: loss = 0.280164 (* 1 = 0.280164 loss)\nI1212 07:19:03.028035 18921 sgd_solver.cpp:174] Iteration 1600, lr = 0.24\nI1212 07:19:03.040640 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.702561\nI1212 07:21:22.440424 18921 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1212 07:22:45.383242 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7532\nI1212 07:22:45.383502 18921 solver.cpp:404]     Test net output #1: loss = 0.915843 (* 1 = 0.915843 loss)\nI1212 07:22:46.735450 18921 solver.cpp:228] Iteration 1700, loss = 0.247961\nI1212 07:22:46.735483 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 07:22:46.735499 18921 solver.cpp:244]     Train net output #1: loss = 0.247961 (* 1 = 0.247961 loss)\nI1212 07:22:46.791748 18921 sgd_solver.cpp:174] Iteration 1700, lr = 0.255\nI1212 07:22:46.804409 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.763179\nI1212 07:25:06.194492 18921 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1212 07:26:29.222898 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73748\nI1212 07:26:29.223158 18921 solver.cpp:404]     Test net output #1: loss = 0.970757 (* 1 = 0.970757 loss)\nI1212 07:26:30.575489 18921 solver.cpp:228] Iteration 1800, loss = 0.265535\nI1212 07:26:30.575522 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 07:26:30.575538 18921 solver.cpp:244]     Train net output #1: loss = 0.265535 (* 1 = 0.265535 loss)\nI1212 07:26:30.630640 18921 sgd_solver.cpp:174] Iteration 1800, lr = 0.27\nI1212 07:26:30.643262 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.743498\nI1212 07:28:50.062278 18921 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1212 07:30:13.088476 18921 solver.cpp:404]     Test net output #0: accuracy = 0.74808\nI1212 07:30:13.088731 18921 solver.cpp:404]     Test net output #1: loss = 1.01785 (* 1 = 1.01785 loss)\nI1212 07:30:14.440723 18921 solver.cpp:228] Iteration 1900, loss = 0.231124\nI1212 07:30:14.440758 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 07:30:14.440774 18921 solver.cpp:244]     Train net output #1: loss = 0.231124 (* 1 = 0.231124 loss)\nI1212 07:30:14.501312 18921 sgd_solver.cpp:174] Iteration 1900, lr = 0.285\nI1212 07:30:14.513934 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.696054\nI1212 07:32:33.933385 18921 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1212 07:33:56.957775 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7444\nI1212 07:33:56.957983 18921 solver.cpp:404]     Test net output #1: loss = 1.08033 (* 1 = 1.08033 loss)\nI1212 07:33:58.309489 18921 solver.cpp:228] Iteration 2000, loss = 0.154613\nI1212 07:33:58.309522 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 07:33:58.309537 18921 solver.cpp:244]     Train net output #1: loss = 0.154613 (* 1 = 0.154613 loss)\nI1212 07:33:58.365160 18921 sgd_solver.cpp:174] Iteration 2000, lr = 0.3\nI1212 07:33:58.377660 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.583661\nI1212 07:36:18.227977 18921 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1212 07:37:41.963500 18921 solver.cpp:404]     Test net output #0: accuracy = 0.80788\nI1212 07:37:41.963809 18921 solver.cpp:404]     Test net output #1: loss = 0.726329 (* 1 = 0.726329 loss)\nI1212 07:37:43.317349 18921 solver.cpp:228] Iteration 2100, loss = 0.167357\nI1212 07:37:43.317394 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 07:37:43.317409 18921 solver.cpp:244]     Train net output #1: loss = 0.167358 (* 1 = 0.167358 loss)\nI1212 07:37:43.376237 18921 sgd_solver.cpp:174] Iteration 2100, lr = 0.315\nI1212 07:37:43.390240 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.637141\nI1212 07:40:03.232051 18921 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1212 07:41:26.999195 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75788\nI1212 07:41:26.999500 18921 solver.cpp:404]     Test net output #1: loss = 0.96256 (* 1 = 0.96256 loss)\nI1212 07:41:28.354076 18921 solver.cpp:228] Iteration 2200, loss = 0.23686\nI1212 07:41:28.354121 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 07:41:28.354136 18921 solver.cpp:244]     Train net output #1: loss = 0.23686 (* 1 = 0.23686 loss)\nI1212 07:41:28.404278 18921 sgd_solver.cpp:174] Iteration 2200, lr = 0.33\nI1212 07:41:28.418278 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.785088\nI1212 07:43:48.228278 18921 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1212 07:45:11.950120 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77516\nI1212 07:45:11.950443 18921 solver.cpp:404]     Test net output #1: loss = 0.894028 (* 1 = 0.894028 loss)\nI1212 07:45:13.305191 18921 solver.cpp:228] Iteration 2300, loss = 0.151837\nI1212 07:45:13.305238 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 07:45:13.305253 18921 solver.cpp:244]     Train net output #1: loss = 0.151837 (* 1 = 0.151837 loss)\nI1212 07:45:13.354686 18921 sgd_solver.cpp:174] Iteration 2300, lr = 0.345\nI1212 07:45:13.368711 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.637901\nI1212 07:47:33.273928 18921 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1212 07:48:56.966465 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7272\nI1212 07:48:56.966789 18921 solver.cpp:404]     Test net output #1: loss = 1.13131 (* 1 = 1.13131 loss)\nI1212 07:48:58.321087 18921 solver.cpp:228] Iteration 2400, loss = 0.166829\nI1212 07:48:58.321132 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 07:48:58.321148 18921 solver.cpp:244]     Train net output #1: loss = 0.166829 (* 1 = 0.166829 loss)\nI1212 07:48:58.373697 18921 sgd_solver.cpp:174] Iteration 2400, lr = 0.36\nI1212 07:48:58.387759 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.579553\nI1212 07:51:18.287114 18921 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1212 07:52:42.000730 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7118\nI1212 07:52:42.001057 18921 solver.cpp:404]     Test net output #1: loss = 1.18152 (* 1 = 1.18152 loss)\nI1212 07:52:43.355531 18921 solver.cpp:228] Iteration 2500, loss = 0.16013\nI1212 07:52:43.355576 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 07:52:43.355592 18921 solver.cpp:244]     Train net output #1: loss = 0.16013 (* 1 = 0.16013 loss)\nI1212 07:52:43.405715 18921 sgd_solver.cpp:174] Iteration 2500, lr = 0.375\nI1212 07:52:43.419716 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.614992\nI1212 07:55:03.310346 18921 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1212 07:56:27.057551 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79592\nI1212 07:56:27.057880 18921 solver.cpp:404]     Test net output #1: loss = 0.776875 (* 1 = 0.776875 loss)\nI1212 07:56:28.412513 18921 solver.cpp:228] Iteration 2600, loss = 0.119867\nI1212 07:56:28.412561 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 07:56:28.412578 18921 solver.cpp:244]     Train net output #1: loss = 0.119867 (* 1 = 0.119867 loss)\nI1212 07:56:28.464238 18921 sgd_solver.cpp:174] Iteration 2600, lr = 0.39\nI1212 07:56:28.478335 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.524118\nI1212 07:58:48.396638 18921 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1212 08:00:12.111099 18921 solver.cpp:404]     Test net output #0: accuracy = 0.786\nI1212 08:00:12.111412 18921 solver.cpp:404]     Test net output #1: loss = 0.819087 (* 1 = 0.819087 loss)\nI1212 08:00:13.466038 18921 solver.cpp:228] Iteration 2700, loss = 0.103961\nI1212 08:00:13.466083 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 08:00:13.466099 18921 solver.cpp:244]     Train net output #1: loss = 0.103961 (* 1 = 0.103961 loss)\nI1212 08:00:13.522552 18921 sgd_solver.cpp:174] Iteration 2700, lr = 0.405\nI1212 08:00:13.536612 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.539913\nI1212 08:02:33.363641 18921 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1212 08:03:57.070183 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76036\nI1212 08:03:57.070513 18921 solver.cpp:404]     Test net output #1: loss = 0.952027 (* 1 = 0.952027 loss)\nI1212 08:03:58.423738 18921 solver.cpp:228] Iteration 2800, loss = 0.133385\nI1212 08:03:58.423782 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 08:03:58.423799 18921 solver.cpp:244]     Train net output #1: loss = 0.133385 (* 1 = 0.133385 loss)\nI1212 08:03:58.478212 18921 sgd_solver.cpp:174] Iteration 2800, lr = 0.42\nI1212 08:03:58.492182 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.540211\nI1212 08:06:18.265606 18921 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1212 08:07:41.972347 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78852\nI1212 08:07:41.972666 18921 solver.cpp:404]     Test net output #1: loss = 0.790671 (* 1 = 0.790671 loss)\nI1212 08:07:43.327020 18921 solver.cpp:228] Iteration 2900, loss = 0.102226\nI1212 08:07:43.327064 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 08:07:43.327080 18921 solver.cpp:244]     Train net output #1: loss = 0.102226 (* 1 = 0.102226 loss)\nI1212 08:07:43.379189 18921 sgd_solver.cpp:174] Iteration 2900, lr = 0.435\nI1212 08:07:43.393152 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.607368\nI1212 08:10:03.265261 18921 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1212 08:11:27.013595 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7942\nI1212 08:11:27.013898 18921 solver.cpp:404]     Test net output #1: loss = 0.766057 (* 1 = 0.766057 loss)\nI1212 08:11:28.368968 18921 solver.cpp:228] Iteration 3000, loss = 0.11138\nI1212 08:11:28.369011 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 08:11:28.369029 18921 solver.cpp:244]     Train net output #1: loss = 0.11138 (* 1 = 0.11138 loss)\nI1212 08:11:28.423600 18921 sgd_solver.cpp:174] Iteration 3000, lr = 0.45\nI1212 08:11:28.437678 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.582946\nI1212 08:13:48.360894 18921 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1212 08:15:12.096066 18921 solver.cpp:404]     Test net output #0: accuracy = 0.68072\nI1212 08:15:12.096362 18921 solver.cpp:404]     Test net output #1: loss = 1.46974 (* 1 = 1.46974 loss)\nI1212 08:15:13.450968 18921 solver.cpp:228] Iteration 3100, loss = 0.0891513\nI1212 08:15:13.451012 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:15:13.451030 18921 solver.cpp:244]     Train net output #1: loss = 0.0891514 (* 1 = 0.0891514 loss)\nI1212 08:15:13.507882 18921 sgd_solver.cpp:174] Iteration 3100, lr = 0.465\nI1212 08:15:13.521940 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.550086\nI1212 08:17:33.394840 18921 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1212 08:18:57.145591 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7744\nI1212 08:18:57.145931 18921 solver.cpp:404]     Test net output #1: loss = 0.920745 (* 1 = 0.920745 loss)\nI1212 08:18:58.500073 18921 solver.cpp:228] Iteration 3200, loss = 0.187893\nI1212 08:18:58.500118 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 08:18:58.500133 18921 solver.cpp:244]     Train net output #1: loss = 0.187893 (* 1 = 0.187893 loss)\nI1212 08:18:58.551376 18921 sgd_solver.cpp:174] Iteration 3200, lr = 0.48\nI1212 08:18:58.565366 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.550963\nI1212 08:21:18.482714 18921 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1212 08:22:42.204778 18921 solver.cpp:404]     Test net output #0: accuracy = 0.80744\nI1212 08:22:42.205075 18921 solver.cpp:404]     Test net output #1: loss = 0.767808 (* 1 = 0.767808 loss)\nI1212 08:22:43.559569 18921 solver.cpp:228] Iteration 3300, loss = 0.156376\nI1212 08:22:43.559612 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 08:22:43.559628 18921 solver.cpp:244]     Train net output #1: loss = 0.156376 (* 1 = 0.156376 loss)\nI1212 08:22:43.610947 18921 sgd_solver.cpp:174] Iteration 3300, lr = 0.495\nI1212 08:22:43.625039 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.507745\nI1212 08:25:03.539681 18921 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1212 08:26:27.225433 18921 solver.cpp:404]     Test net output #0: accuracy = 0.714\nI1212 08:26:27.225749 18921 solver.cpp:404]     Test net output #1: loss = 1.44497 (* 1 = 1.44497 loss)\nI1212 08:26:28.579967 18921 solver.cpp:228] Iteration 3400, loss = 0.15392\nI1212 08:26:28.580010 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 08:26:28.580026 18921 solver.cpp:244]     Train net output #1: loss = 0.15392 (* 1 = 0.15392 loss)\nI1212 08:26:28.638284 18921 sgd_solver.cpp:174] Iteration 3400, lr = 0.51\nI1212 08:26:28.652348 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.499065\nI1212 08:28:48.570032 18921 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1212 08:30:12.295313 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76448\nI1212 08:30:12.295608 18921 solver.cpp:404]     Test net output #1: loss = 1.08306 (* 1 = 1.08306 loss)\nI1212 08:30:13.650017 18921 solver.cpp:228] Iteration 3500, loss = 0.143201\nI1212 08:30:13.650060 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:30:13.650077 18921 solver.cpp:244]     Train net output #1: loss = 0.143201 (* 1 = 0.143201 loss)\nI1212 08:30:13.707314 18921 sgd_solver.cpp:174] Iteration 3500, lr = 0.525\nI1212 08:30:13.721369 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.574758\nI1212 08:32:33.580261 18921 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1212 08:33:57.296612 18921 solver.cpp:404]     Test net output #0: accuracy = 0.6564\nI1212 08:33:57.296937 18921 solver.cpp:404]     Test net output #1: loss = 1.85972 (* 1 = 1.85972 loss)\nI1212 08:33:58.651036 18921 solver.cpp:228] Iteration 3600, loss = 0.046306\nI1212 08:33:58.651082 18921 solver.cpp:244]     Train net output #0: accuracy = 0.992\nI1212 08:33:58.651098 18921 solver.cpp:244]     Train net output #1: loss = 0.0463061 (* 1 = 0.0463061 loss)\nI1212 08:33:58.706737 18921 sgd_solver.cpp:174] Iteration 3600, lr = 0.54\nI1212 08:33:58.720830 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.489947\nI1212 08:36:18.592070 18921 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1212 08:37:42.321867 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72904\nI1212 08:37:42.322198 18921 solver.cpp:404]     Test net output #1: loss = 1.28735 (* 1 = 1.28735 loss)\nI1212 08:37:43.676369 18921 solver.cpp:228] Iteration 3700, loss = 0.112952\nI1212 08:37:43.676412 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 08:37:43.676429 18921 solver.cpp:244]     Train net output #1: loss = 0.112952 (* 1 = 0.112952 loss)\nI1212 08:37:43.731233 18921 sgd_solver.cpp:174] Iteration 3700, lr = 0.555\nI1212 08:37:43.745271 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.522571\nI1212 08:40:03.634441 18921 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1212 08:41:27.343750 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79024\nI1212 08:41:27.344082 18921 solver.cpp:404]     Test net output #1: loss = 0.911065 (* 1 = 0.911065 loss)\nI1212 08:41:28.698307 18921 solver.cpp:228] Iteration 3800, loss = 0.123429\nI1212 08:41:28.698351 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:41:28.698369 18921 solver.cpp:244]     Train net output #1: loss = 0.123429 (* 1 = 0.123429 loss)\nI1212 08:41:28.754426 18921 sgd_solver.cpp:174] Iteration 3800, lr = 0.57\nI1212 08:41:28.768559 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.539931\nI1212 08:43:48.690654 18921 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1212 08:45:12.393877 18921 solver.cpp:404]     Test net output #0: accuracy = 0.8148\nI1212 08:45:12.394204 18921 solver.cpp:404]     Test net output #1: loss = 0.738508 (* 1 = 0.738508 loss)\nI1212 08:45:13.748855 18921 solver.cpp:228] Iteration 3900, loss = 0.116493\nI1212 08:45:13.748899 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:45:13.748915 18921 solver.cpp:244]     Train net output #1: loss = 0.116493 (* 1 = 0.116493 loss)\nI1212 08:45:13.802430 18921 sgd_solver.cpp:174] Iteration 3900, lr = 0.585\nI1212 08:45:13.816493 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.444425\nI1212 08:47:33.723361 18921 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1212 08:48:57.420553 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79888\nI1212 08:48:57.420859 18921 solver.cpp:404]     Test net output #1: loss = 0.817509 (* 1 = 0.817509 loss)\nI1212 08:48:58.775048 18921 solver.cpp:228] Iteration 4000, loss = 0.110467\nI1212 08:48:58.775092 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 08:48:58.775108 18921 solver.cpp:244]     Train net output #1: loss = 0.110467 (* 1 = 0.110467 loss)\nI1212 08:48:58.834766 18921 sgd_solver.cpp:174] Iteration 4000, lr = 0.6\nI1212 08:48:58.848803 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.484588\nI1212 08:51:18.742148 18921 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1212 08:52:42.430328 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7918\nI1212 08:52:42.430625 18921 solver.cpp:404]     Test net output #1: loss = 0.845928 (* 1 = 0.845928 loss)\nI1212 08:52:43.784747 18921 solver.cpp:228] Iteration 4100, loss = 0.199738\nI1212 08:52:43.784790 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 08:52:43.784806 18921 solver.cpp:244]     Train net output #1: loss = 0.199738 (* 1 = 0.199738 loss)\nI1212 08:52:43.837780 18921 sgd_solver.cpp:174] Iteration 4100, lr = 0.615\nI1212 08:52:43.851826 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.589267\nI1212 08:55:03.718658 18921 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1212 08:56:27.412210 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79352\nI1212 08:56:27.412541 18921 solver.cpp:404]     Test net output #1: loss = 0.795534 (* 1 = 0.795534 loss)\nI1212 08:56:28.766829 18921 solver.cpp:228] Iteration 4200, loss = 0.0708927\nI1212 08:56:28.766873 18921 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 08:56:28.766890 18921 solver.cpp:244]     Train net output #1: loss = 0.0708927 (* 1 = 0.0708927 loss)\nI1212 08:56:28.823921 18921 sgd_solver.cpp:174] Iteration 4200, lr = 0.63\nI1212 08:56:28.837891 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.429957\nI1212 08:58:48.715929 18921 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1212 09:00:12.419910 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76376\nI1212 09:00:12.420229 18921 solver.cpp:404]     Test net output #1: loss = 0.940594 (* 1 = 0.940594 loss)\nI1212 09:00:13.773049 18921 solver.cpp:228] Iteration 4300, loss = 0.0788311\nI1212 09:00:13.773092 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 09:00:13.773108 18921 solver.cpp:244]     Train net output #1: loss = 0.0788312 (* 1 = 0.0788312 loss)\nI1212 09:00:13.826644 18921 sgd_solver.cpp:174] Iteration 4300, lr = 0.645\nI1212 09:00:13.840591 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.497754\nI1212 09:02:33.633563 18921 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1212 09:03:57.328146 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78016\nI1212 09:03:57.328491 18921 solver.cpp:404]     Test net output #1: loss = 0.899629 (* 1 = 0.899629 loss)\nI1212 09:03:58.681438 18921 solver.cpp:228] Iteration 4400, loss = 0.146576\nI1212 09:03:58.681481 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 09:03:58.681498 18921 solver.cpp:244]     Train net output #1: loss = 0.146576 (* 1 = 0.146576 loss)\nI1212 09:03:58.734902 18921 sgd_solver.cpp:174] Iteration 4400, lr = 0.66\nI1212 09:03:58.748785 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.404473\nI1212 09:06:18.386854 18921 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1212 09:07:42.092624 18921 solver.cpp:404]     Test net output #0: accuracy = 0.82892\nI1212 09:07:42.092931 18921 solver.cpp:404]     Test net output #1: loss = 0.600077 (* 1 = 0.600077 loss)\nI1212 09:07:43.443311 18921 solver.cpp:228] Iteration 4500, loss = 0.147833\nI1212 09:07:43.443356 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 09:07:43.443372 18921 solver.cpp:244]     Train net output #1: loss = 0.147833 (* 1 = 0.147833 loss)\nI1212 09:07:43.500057 18921 sgd_solver.cpp:174] Iteration 4500, lr = 0.675\nI1212 09:07:43.514014 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.527345\nI1212 09:10:02.967417 18921 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1212 09:11:26.684675 18921 solver.cpp:404]     Test net output #0: accuracy = 0.83028\nI1212 09:11:26.684981 18921 solver.cpp:404]     Test net output #1: loss = 0.625123 (* 1 = 0.625123 loss)\nI1212 09:11:28.035358 18921 solver.cpp:228] Iteration 4600, loss = 0.0955179\nI1212 09:11:28.035398 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 09:11:28.035413 18921 solver.cpp:244]     Train net output #1: loss = 0.095518 (* 1 = 0.095518 loss)\nI1212 09:11:28.085098 18921 sgd_solver.cpp:174] Iteration 4600, lr = 0.69\nI1212 09:11:28.099114 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.46008\nI1212 09:13:47.570662 18921 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1212 09:15:11.274569 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75976\nI1212 09:15:11.274893 18921 solver.cpp:404]     Test net output #1: loss = 1.00297 (* 1 = 1.00297 loss)\nI1212 09:15:12.630460 18921 solver.cpp:228] Iteration 4700, loss = 0.0868635\nI1212 09:15:12.630506 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 09:15:12.630522 18921 solver.cpp:244]     Train net output #1: loss = 0.0868636 (* 1 = 0.0868636 loss)\nI1212 09:15:12.677178 18921 sgd_solver.cpp:174] Iteration 4700, lr = 0.705\nI1212 09:15:12.690687 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.465098\nI1212 09:17:32.289840 18921 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1212 09:18:55.998178 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79476\nI1212 09:18:55.998515 18921 solver.cpp:404]     Test net output #1: loss = 0.879885 (* 1 = 0.879885 loss)\nI1212 09:18:57.349980 18921 solver.cpp:228] Iteration 4800, loss = 0.154656\nI1212 09:18:57.350023 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 09:18:57.350039 18921 solver.cpp:244]     Train net output #1: loss = 0.154656 (* 1 = 0.154656 loss)\nI1212 09:18:57.402724 18921 sgd_solver.cpp:174] Iteration 4800, lr = 0.72\nI1212 09:18:57.416635 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.524402\nI1212 09:21:17.049001 18921 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1212 09:22:40.748855 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73196\nI1212 09:22:40.749168 18921 solver.cpp:404]     Test net output #1: loss = 1.14869 (* 1 = 1.14869 loss)\nI1212 09:22:42.100569 18921 solver.cpp:228] Iteration 4900, loss = 0.140843\nI1212 09:22:42.100613 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 09:22:42.100628 18921 solver.cpp:244]     Train net output #1: loss = 0.140843 (* 1 = 0.140843 loss)\nI1212 09:22:42.152413 18921 sgd_solver.cpp:174] Iteration 4900, lr = 0.735\nI1212 09:22:42.166388 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.530058\nI1212 09:25:01.784313 18921 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1212 09:26:25.482803 18921 solver.cpp:404]     Test net output #0: accuracy = 0.8268\nI1212 09:26:25.483115 18921 solver.cpp:404]     Test net output #1: loss = 0.649589 (* 1 = 0.649589 loss)\nI1212 09:26:26.834710 18921 solver.cpp:228] Iteration 5000, loss = 0.163901\nI1212 09:26:26.834754 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 09:26:26.834770 18921 solver.cpp:244]     Train net output #1: loss = 0.163901 (* 1 = 0.163901 loss)\nI1212 09:26:26.893702 18921 sgd_solver.cpp:174] Iteration 5000, lr = 0.75\nI1212 09:26:26.907760 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.482656\nI1212 09:28:46.516738 18921 solver.cpp:337] Iteration 5100, Testing net (#0)\nI1212 09:30:10.214550 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76848\nI1212 09:30:10.214845 18921 solver.cpp:404]     Test net output #1: loss = 0.891672 (* 1 = 0.891672 loss)\nI1212 09:30:11.571743 18921 solver.cpp:228] Iteration 5100, loss = 0.15636\nI1212 09:30:11.571787 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 09:30:11.571802 18921 solver.cpp:244]     Train net output #1: loss = 0.15636 (* 1 = 0.15636 loss)\nI1212 09:30:11.619844 18921 sgd_solver.cpp:174] Iteration 5100, lr = 0.765\nI1212 09:30:11.633903 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.412405\nI1212 09:32:31.242602 18921 solver.cpp:337] Iteration 5200, Testing net (#0)\nI1212 09:33:54.938441 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79256\nI1212 09:33:54.938772 18921 solver.cpp:404]     Test net output #1: loss = 0.859979 (* 1 = 0.859979 loss)\nI1212 09:33:56.290663 18921 solver.cpp:228] Iteration 5200, loss = 0.0587636\nI1212 09:33:56.290706 18921 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 09:33:56.290722 18921 solver.cpp:244]     Train net output #1: loss = 0.0587637 (* 1 = 0.0587637 loss)\nI1212 09:33:56.349797 18921 sgd_solver.cpp:174] Iteration 5200, lr = 0.78\nI1212 09:33:56.363816 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.38071\nI1212 09:36:15.948957 18921 solver.cpp:337] Iteration 5300, Testing net (#0)\nI1212 09:37:39.651330 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73412\nI1212 09:37:39.651643 18921 solver.cpp:404]     Test net output #1: loss = 1.19891 (* 1 = 1.19891 loss)\nI1212 09:37:41.002295 18921 solver.cpp:228] Iteration 5300, loss = 0.117172\nI1212 09:37:41.002341 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 09:37:41.002357 18921 solver.cpp:244]     Train net output #1: loss = 0.117173 (* 1 = 0.117173 loss)\nI1212 09:37:41.064466 18921 sgd_solver.cpp:174] Iteration 5300, lr = 0.795\nI1212 09:37:41.078487 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.466481\nI1212 09:40:00.574527 18921 solver.cpp:337] Iteration 5400, Testing net (#0)\nI1212 09:41:24.272445 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77844\nI1212 09:41:24.272749 18921 solver.cpp:404]     Test net output #1: loss = 0.945933 (* 1 = 0.945933 loss)\nI1212 09:41:25.623668 18921 solver.cpp:228] Iteration 5400, loss = 0.110931\nI1212 09:41:25.623713 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 09:41:25.623728 18921 solver.cpp:244]     Train net output #1: loss = 0.110931 (* 1 = 0.110931 loss)\nI1212 09:41:25.679774 18921 sgd_solver.cpp:174] Iteration 5400, lr = 0.81\nI1212 09:41:25.693819 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.400129\nI1212 09:43:45.278136 18921 solver.cpp:337] Iteration 5500, Testing net (#0)\nI1212 09:45:08.975006 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75596\nI1212 09:45:08.975339 18921 solver.cpp:404]     Test net output #1: loss = 0.926956 (* 1 = 0.926956 loss)\nI1212 09:45:10.327217 18921 solver.cpp:228] Iteration 5500, loss = 0.190177\nI1212 09:45:10.327260 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 09:45:10.327275 18921 solver.cpp:244]     Train net output #1: loss = 0.190177 (* 1 = 0.190177 loss)\nI1212 09:45:10.382452 18921 sgd_solver.cpp:174] Iteration 5500, lr = 0.825\nI1212 09:45:10.396512 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.415136\nI1212 09:47:30.017366 18921 solver.cpp:337] Iteration 5600, Testing net (#0)\nI1212 09:48:53.715801 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79532\nI1212 09:48:53.716132 18921 solver.cpp:404]     Test net output #1: loss = 0.849777 (* 1 = 0.849777 loss)\nI1212 09:48:55.068230 18921 solver.cpp:228] Iteration 5600, loss = 0.13402\nI1212 09:48:55.068272 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 09:48:55.068289 18921 solver.cpp:244]     Train net output #1: loss = 0.13402 (* 1 = 0.13402 loss)\nI1212 09:48:55.122184 18921 sgd_solver.cpp:174] Iteration 5600, lr = 0.84\nI1212 09:48:55.136207 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.455732\nI1212 09:51:14.754997 18921 solver.cpp:337] Iteration 5700, Testing net (#0)\nI1212 09:52:38.469835 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78\nI1212 09:52:38.470165 18921 solver.cpp:404]     Test net output #1: loss = 0.877089 (* 1 = 0.877089 loss)\nI1212 09:52:39.821193 18921 solver.cpp:228] Iteration 5700, loss = 0.106415\nI1212 09:52:39.821238 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 09:52:39.821254 18921 solver.cpp:244]     Train net output #1: loss = 0.106415 (* 1 = 0.106415 loss)\nI1212 09:52:39.873299 18921 sgd_solver.cpp:174] Iteration 5700, lr = 0.855\nI1212 09:52:39.887305 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.379595\nI1212 09:54:59.454923 18921 solver.cpp:337] Iteration 5800, Testing net (#0)\nI1212 09:56:23.174127 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76136\nI1212 09:56:23.174466 18921 solver.cpp:404]     Test net output #1: loss = 1.00381 (* 1 = 1.00381 loss)\nI1212 09:56:24.525791 18921 solver.cpp:228] Iteration 5800, loss = 0.180806\nI1212 09:56:24.525836 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 09:56:24.525852 18921 solver.cpp:244]     Train net output #1: loss = 0.180806 (* 1 = 0.180806 loss)\nI1212 09:56:24.584177 18921 sgd_solver.cpp:174] Iteration 5800, lr = 0.87\nI1212 09:56:24.598186 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.426157\nI1212 09:58:44.113054 18921 solver.cpp:337] Iteration 5900, Testing net (#0)\nI1212 10:00:07.813992 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72212\nI1212 10:00:07.814322 18921 solver.cpp:404]     Test net output #1: loss = 1.48626 (* 1 = 1.48626 loss)\nI1212 10:00:09.166404 18921 solver.cpp:228] Iteration 5900, loss = 0.28077\nI1212 10:00:09.166445 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 10:00:09.166461 18921 solver.cpp:244]     Train net output #1: loss = 0.28077 (* 1 = 0.28077 loss)\nI1212 10:00:09.221379 18921 sgd_solver.cpp:174] Iteration 5900, lr = 0.885\nI1212 10:00:09.235384 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.500038\nI1212 10:02:28.833258 18921 solver.cpp:337] Iteration 6000, Testing net (#0)\nI1212 10:03:52.579948 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79896\nI1212 10:03:52.580301 18921 solver.cpp:404]     Test net output #1: loss = 0.797676 (* 1 = 0.797676 loss)\nI1212 10:03:53.932399 18921 solver.cpp:228] Iteration 6000, loss = 0.08796\nI1212 10:03:53.932445 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 10:03:53.932467 18921 solver.cpp:244]     Train net output #1: loss = 0.0879601 (* 1 = 0.0879601 loss)\nI1212 10:03:53.987879 18921 sgd_solver.cpp:174] Iteration 6000, lr = 0.9\nI1212 10:03:54.001929 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.41842\nI1212 10:06:13.567899 18921 solver.cpp:337] Iteration 6100, Testing net (#0)\nI1212 10:07:37.297535 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7566\nI1212 10:07:37.297850 18921 solver.cpp:404]     Test net output #1: loss = 1.00531 (* 1 = 1.00531 loss)\nI1212 10:07:38.649921 18921 solver.cpp:228] Iteration 6100, loss = 0.175416\nI1212 10:07:38.649968 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 10:07:38.649991 18921 solver.cpp:244]     Train net output #1: loss = 0.175416 (* 1 = 0.175416 loss)\nI1212 10:07:38.706593 18921 sgd_solver.cpp:174] Iteration 6100, lr = 0.915\nI1212 10:07:38.720664 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.453405\nI1212 10:09:58.269840 18921 solver.cpp:337] Iteration 6200, Testing net (#0)\nI1212 10:11:22.002796 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7862\nI1212 10:11:22.003119 18921 solver.cpp:404]     Test net output #1: loss = 0.793085 (* 1 = 0.793085 loss)\nI1212 10:11:23.355239 18921 solver.cpp:228] Iteration 6200, loss = 0.185555\nI1212 10:11:23.355283 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 10:11:23.355306 18921 solver.cpp:244]     Train net output #1: loss = 0.185555 (* 1 = 0.185555 loss)\nI1212 10:11:23.414921 18921 sgd_solver.cpp:174] Iteration 6200, lr = 0.93\nI1212 10:11:23.428933 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.465613\nI1212 10:13:42.947342 18921 solver.cpp:337] Iteration 6300, Testing net (#0)\nI1212 10:15:06.649978 18921 solver.cpp:404]     Test net output #0: accuracy = 0.70696\nI1212 10:15:06.650311 18921 solver.cpp:404]     Test net output #1: loss = 1.38932 (* 1 = 1.38932 loss)\nI1212 10:15:08.002146 18921 solver.cpp:228] Iteration 6300, loss = 0.199461\nI1212 10:15:08.002189 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 10:15:08.002205 18921 solver.cpp:244]     Train net output #1: loss = 0.199461 (* 1 = 0.199461 loss)\nI1212 10:15:08.059489 18921 sgd_solver.cpp:174] Iteration 6300, lr = 0.945\nI1212 10:15:08.073453 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.394673\nI1212 10:17:27.685047 18921 solver.cpp:337] Iteration 6400, Testing net (#0)\nI1212 10:18:51.378396 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73048\nI1212 10:18:51.378705 18921 solver.cpp:404]     Test net output #1: loss = 1.26947 (* 1 = 1.26947 loss)\nI1212 10:18:52.730093 18921 solver.cpp:228] Iteration 6400, loss = 0.280971\nI1212 10:18:52.730134 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 10:18:52.730150 18921 solver.cpp:244]     Train net output #1: loss = 0.280971 (* 1 = 0.280971 loss)\nI1212 10:18:52.788815 18921 sgd_solver.cpp:174] Iteration 6400, lr = 0.96\nI1212 10:18:52.802660 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.460573\nI1212 10:21:12.435469 18921 solver.cpp:337] Iteration 6500, Testing net (#0)\nI1212 10:22:36.129119 18921 solver.cpp:404]     Test net output #0: accuracy = 0.81848\nI1212 10:22:36.129418 18921 solver.cpp:404]     Test net output #1: loss = 0.692468 (* 1 = 0.692468 loss)\nI1212 10:22:37.480880 18921 solver.cpp:228] Iteration 6500, loss = 0.0742519\nI1212 10:22:37.480921 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 10:22:37.480937 18921 solver.cpp:244]     Train net output #1: loss = 0.0742519 (* 1 = 0.0742519 loss)\nI1212 10:22:37.539244 18921 sgd_solver.cpp:174] Iteration 6500, lr = 0.975\nI1212 10:22:37.553232 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.393742\nI1212 10:24:57.174326 18921 solver.cpp:337] Iteration 6600, Testing net (#0)\nI1212 10:26:20.880867 18921 solver.cpp:404]     Test net output #0: accuracy = 0.80236\nI1212 10:26:20.881194 18921 solver.cpp:404]     Test net output #1: loss = 0.697949 (* 1 = 0.697949 loss)\nI1212 10:26:22.233459 18921 solver.cpp:228] Iteration 6600, loss = 0.252722\nI1212 10:26:22.233501 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 10:26:22.233518 18921 solver.cpp:244]     Train net output #1: loss = 0.252722 (* 1 = 0.252722 loss)\nI1212 10:26:22.295222 18921 sgd_solver.cpp:174] Iteration 6600, lr = 0.99\nI1212 10:26:22.309231 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.431077\nI1212 10:28:41.961891 18921 solver.cpp:337] Iteration 6700, Testing net (#0)\nI1212 10:30:05.658556 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72208\nI1212 10:30:05.658871 18921 solver.cpp:404]     Test net output #1: loss = 1.29661 (* 1 = 1.29661 loss)\nI1212 10:30:07.013504 18921 solver.cpp:228] Iteration 6700, loss = 0.115457\nI1212 10:30:07.013545 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 10:30:07.013561 18921 solver.cpp:244]     Train net output #1: loss = 0.115457 (* 1 = 0.115457 loss)\nI1212 10:30:07.067292 18921 sgd_solver.cpp:174] Iteration 6700, lr = 1.005\nI1212 10:30:07.081307 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.367705\nI1212 10:32:26.924206 18921 solver.cpp:337] Iteration 6800, Testing net (#0)\nI1212 10:33:50.616878 18921 solver.cpp:404]     Test net output #0: accuracy = 0.80128\nI1212 10:33:50.617203 18921 solver.cpp:404]     Test net output #1: loss = 0.758267 (* 1 = 0.758267 loss)\nI1212 10:33:51.971272 18921 solver.cpp:228] Iteration 6800, loss = 0.162616\nI1212 10:33:51.971318 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 10:33:51.971334 18921 solver.cpp:244]     Train net output #1: loss = 0.162616 (* 1 = 0.162616 loss)\nI1212 10:33:52.023279 18921 sgd_solver.cpp:174] Iteration 6800, lr = 1.02\nI1212 10:33:52.037293 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.446158\nI1212 10:36:11.880810 18921 solver.cpp:337] Iteration 6900, Testing net (#0)\nI1212 10:37:35.612229 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77424\nI1212 10:37:35.612548 18921 solver.cpp:404]     Test net output #1: loss = 0.811459 (* 1 = 0.811459 loss)\nI1212 10:37:36.966154 18921 solver.cpp:228] Iteration 6900, loss = 0.0801131\nI1212 10:37:36.966197 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 10:37:36.966213 18921 solver.cpp:244]     Train net output #1: loss = 0.0801132 (* 1 = 0.0801132 loss)\nI1212 10:37:37.027161 18921 sgd_solver.cpp:174] Iteration 6900, lr = 1.035\nI1212 10:37:37.041208 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.380593\nI1212 10:39:56.848175 18921 solver.cpp:337] Iteration 7000, Testing net (#0)\nI1212 10:41:20.550309 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79108\nI1212 10:41:20.550634 18921 solver.cpp:404]     Test net output #1: loss = 0.886904 (* 1 = 0.886904 loss)\nI1212 10:41:21.903751 18921 solver.cpp:228] Iteration 7000, loss = 0.136816\nI1212 10:41:21.903791 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 10:41:21.903806 18921 solver.cpp:244]     Train net output #1: loss = 0.136816 (* 1 = 0.136816 loss)\nI1212 10:41:21.961563 18921 sgd_solver.cpp:174] Iteration 7000, lr = 1.05\nI1212 10:41:21.975596 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.433061\nI1212 10:43:41.825953 18921 solver.cpp:337] Iteration 7100, Testing net (#0)\nI1212 10:45:05.527236 18921 solver.cpp:404]     Test net output #0: accuracy = 0.74492\nI1212 10:45:05.527551 18921 solver.cpp:404]     Test net output #1: loss = 1.09567 (* 1 = 1.09567 loss)\nI1212 10:45:06.880570 18921 solver.cpp:228] Iteration 7100, loss = 0.172804\nI1212 10:45:06.880609 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 10:45:06.880625 18921 solver.cpp:244]     Train net output #1: loss = 0.172803 (* 1 = 0.172803 loss)\nI1212 10:45:06.940569 18921 sgd_solver.cpp:174] Iteration 7100, lr = 1.065\nI1212 10:45:06.954577 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.424938\nI1212 10:47:26.817651 18921 solver.cpp:337] Iteration 7200, Testing net (#0)\nI1212 10:48:50.525315 18921 solver.cpp:404]     Test net output #0: accuracy = 0.69992\nI1212 10:48:50.525626 18921 solver.cpp:404]     Test net output #1: loss = 1.54758 (* 1 = 1.54758 loss)\nI1212 10:48:51.879284 18921 solver.cpp:228] Iteration 7200, loss = 0.0694921\nI1212 10:48:51.879321 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 10:48:51.879338 18921 solver.cpp:244]     Train net output #1: loss = 0.069492 (* 1 = 0.069492 loss)\nI1212 10:48:51.934357 18921 sgd_solver.cpp:174] Iteration 7200, lr = 1.08\nI1212 10:48:51.948448 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.329956\nI1212 10:51:11.735119 18921 solver.cpp:337] Iteration 7300, Testing net (#0)\nI1212 10:52:35.445333 18921 solver.cpp:404]     Test net output #0: accuracy = 0.74568\nI1212 10:52:35.445657 18921 solver.cpp:404]     Test net output #1: loss = 1.19541 (* 1 = 1.19541 loss)\nI1212 10:52:36.799490 18921 solver.cpp:228] Iteration 7300, loss = 0.228192\nI1212 10:52:36.799530 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 10:52:36.799546 18921 solver.cpp:244]     Train net output #1: loss = 0.228192 (* 1 = 0.228192 loss)\nI1212 10:52:36.853801 18921 sgd_solver.cpp:174] Iteration 7300, lr = 1.095\nI1212 10:52:36.867835 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.480166\nI1212 10:54:56.653743 18921 solver.cpp:337] Iteration 7400, Testing net (#0)\nI1212 10:56:20.363344 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7\nI1212 10:56:20.363680 18921 solver.cpp:404]     Test net output #1: loss = 1.26661 (* 1 = 1.26661 loss)\nI1212 10:56:21.716478 18921 solver.cpp:228] Iteration 7400, loss = 0.153778\nI1212 10:56:21.716517 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 10:56:21.716532 18921 solver.cpp:244]     Train net output #1: loss = 0.153778 (* 1 = 0.153778 loss)\nI1212 10:56:21.772415 18921 sgd_solver.cpp:174] Iteration 7400, lr = 1.11\nI1212 10:56:21.786450 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.38362\nI1212 10:58:41.562405 18921 solver.cpp:337] Iteration 7500, Testing net (#0)\nI1212 11:00:05.265767 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78044\nI1212 11:00:05.266077 18921 solver.cpp:404]     Test net output #1: loss = 0.864683 (* 1 = 0.864683 loss)\nI1212 11:00:06.624776 18921 solver.cpp:228] Iteration 7500, loss = 0.198786\nI1212 11:00:06.624816 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 11:00:06.624832 18921 solver.cpp:244]     Train net output #1: loss = 0.198786 (* 1 = 0.198786 loss)\nI1212 11:00:06.675848 18921 sgd_solver.cpp:174] Iteration 7500, lr = 1.125\nI1212 11:00:06.689862 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.430987\nI1212 11:02:26.484889 18921 solver.cpp:337] Iteration 7600, Testing net (#0)\nI1212 11:03:50.187875 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72916\nI1212 11:03:50.188208 18921 solver.cpp:404]     Test net output #1: loss = 1.20823 (* 1 = 1.20823 loss)\nI1212 11:03:51.541560 18921 solver.cpp:228] Iteration 7600, loss = 0.133191\nI1212 11:03:51.541599 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 11:03:51.541615 18921 solver.cpp:244]     Train net output #1: loss = 0.133191 (* 1 = 0.133191 loss)\nI1212 11:03:51.598901 18921 sgd_solver.cpp:174] Iteration 7600, lr = 1.14\nI1212 11:03:51.612828 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.387715\nI1212 11:06:11.427299 18921 solver.cpp:337] Iteration 7700, Testing net (#0)\nI1212 11:07:35.140193 18921 solver.cpp:404]     Test net output #0: accuracy = 0.67824\nI1212 11:07:35.140513 18921 solver.cpp:404]     Test net output #1: loss = 1.48409 (* 1 = 1.48409 loss)\nI1212 11:07:36.494596 18921 solver.cpp:228] Iteration 7700, loss = 0.0905037\nI1212 11:07:36.494638 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 11:07:36.494654 18921 solver.cpp:244]     Train net output #1: loss = 0.0905037 (* 1 = 0.0905037 loss)\nI1212 11:07:36.551141 18921 sgd_solver.cpp:174] Iteration 7700, lr = 1.155\nI1212 11:07:36.565213 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.426985\nI1212 11:09:56.428382 18921 solver.cpp:337] Iteration 7800, Testing net (#0)\nI1212 11:11:20.145340 18921 solver.cpp:404]     Test net output #0: accuracy = 0.69796\nI1212 11:11:20.145644 18921 solver.cpp:404]     Test net output #1: loss = 1.33682 (* 1 = 1.33682 loss)\nI1212 11:11:21.501016 18921 solver.cpp:228] Iteration 7800, loss = 0.118618\nI1212 11:11:21.501060 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 11:11:21.501075 18921 solver.cpp:244]     Train net output #1: loss = 0.118618 (* 1 = 0.118618 loss)\nI1212 11:11:21.550736 18921 sgd_solver.cpp:174] Iteration 7800, lr = 1.17\nI1212 11:11:21.564817 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.43787\nI1212 11:13:41.417543 18921 solver.cpp:337] Iteration 7900, Testing net (#0)\nI1212 11:15:05.133231 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77548\nI1212 11:15:05.133576 18921 solver.cpp:404]     Test net output #1: loss = 0.840464 (* 1 = 0.840464 loss)\nI1212 11:15:06.487567 18921 solver.cpp:228] Iteration 7900, loss = 0.118112\nI1212 11:15:06.487607 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 11:15:06.487624 18921 solver.cpp:244]     Train net output #1: loss = 0.118112 (* 1 = 0.118112 loss)\nI1212 11:15:06.545184 18921 sgd_solver.cpp:174] Iteration 7900, lr = 1.185\nI1212 11:15:06.559218 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.347162\nI1212 11:17:26.465142 18921 solver.cpp:337] Iteration 8000, Testing net (#0)\nI1212 11:18:50.176334 18921 solver.cpp:404]     Test net output #0: accuracy = 0.80096\nI1212 11:18:50.176672 18921 solver.cpp:404]     Test net output #1: loss = 0.662433 (* 1 = 0.662433 loss)\nI1212 11:18:51.531580 18921 solver.cpp:228] Iteration 8000, loss = 0.120998\nI1212 11:18:51.531620 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 11:18:51.531636 18921 solver.cpp:244]     Train net output #1: loss = 0.120998 (* 1 = 0.120998 loss)\nI1212 11:18:51.584120 18921 sgd_solver.cpp:174] Iteration 8000, lr = 1.2\nI1212 11:18:51.598207 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.387772\nI1212 11:21:11.504448 18921 solver.cpp:337] Iteration 8100, Testing net (#0)\nI1212 11:22:35.224525 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76692\nI1212 11:22:35.224850 18921 solver.cpp:404]     Test net output #1: loss = 0.975677 (* 1 = 0.975677 loss)\nI1212 11:22:36.580166 18921 solver.cpp:228] Iteration 8100, loss = 0.153094\nI1212 11:22:36.580209 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 11:22:36.580225 18921 solver.cpp:244]     Train net output #1: loss = 0.153094 (* 1 = 0.153094 loss)\nI1212 11:22:36.633934 18921 sgd_solver.cpp:174] Iteration 8100, lr = 1.215\nI1212 11:22:36.647959 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.361732\nI1212 11:24:56.436556 18921 solver.cpp:337] Iteration 8200, Testing net (#0)\nI1212 11:26:20.155722 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76968\nI1212 11:26:20.156095 18921 solver.cpp:404]     Test net output #1: loss = 0.823837 (* 1 = 0.823837 loss)\nI1212 11:26:21.509938 18921 solver.cpp:228] Iteration 8200, loss = 0.11581\nI1212 11:26:21.509976 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 11:26:21.509994 18921 solver.cpp:244]     Train net output #1: loss = 0.11581 (* 1 = 0.11581 loss)\nI1212 11:26:21.564918 18921 sgd_solver.cpp:174] Iteration 8200, lr = 1.23\nI1212 11:26:21.578994 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.357873\nI1212 11:28:41.400998 18921 solver.cpp:337] Iteration 8300, Testing net (#0)\nI1212 11:30:05.136310 18921 solver.cpp:404]     Test net output #0: accuracy = 0.62384\nI1212 11:30:05.136605 18921 solver.cpp:404]     Test net output #1: loss = 2.32168 (* 1 = 2.32168 loss)\nI1212 11:30:06.490416 18921 solver.cpp:228] Iteration 8300, loss = 0.135064\nI1212 11:30:06.490456 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 11:30:06.490473 18921 solver.cpp:244]     Train net output #1: loss = 0.135064 (* 1 = 0.135064 loss)\nI1212 11:30:06.543658 18921 sgd_solver.cpp:174] Iteration 8300, lr = 1.245\nI1212 11:30:06.557693 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.442349\nI1212 11:32:26.384769 18921 solver.cpp:337] Iteration 8400, Testing net (#0)\nI1212 11:33:50.129981 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72652\nI1212 11:33:50.130318 18921 solver.cpp:404]     Test net output #1: loss = 1.28308 (* 1 = 1.28308 loss)\nI1212 11:33:51.484284 18921 solver.cpp:228] Iteration 8400, loss = 0.134735\nI1212 11:33:51.484324 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 11:33:51.484340 18921 solver.cpp:244]     Train net output #1: loss = 0.134735 (* 1 = 0.134735 loss)\nI1212 11:33:51.544698 18921 sgd_solver.cpp:174] Iteration 8400, lr = 1.26\nI1212 11:33:51.558781 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.311922\nI1212 11:36:11.469452 18921 solver.cpp:337] Iteration 8500, Testing net (#0)\nI1212 11:37:35.201300 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76216\nI1212 11:37:35.201639 18921 solver.cpp:404]     Test net output #1: loss = 0.878416 (* 1 = 0.878416 loss)\nI1212 11:37:36.555555 18921 solver.cpp:228] Iteration 8500, loss = 0.142302\nI1212 11:37:36.555594 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 11:37:36.555610 18921 solver.cpp:244]     Train net output #1: loss = 0.142302 (* 1 = 0.142302 loss)\nI1212 11:37:36.609184 18921 sgd_solver.cpp:174] Iteration 8500, lr = 1.275\nI1212 11:37:36.623136 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.354299\nI1212 11:39:56.550065 18921 solver.cpp:337] Iteration 8600, Testing net (#0)\nI1212 11:41:20.277482 18921 solver.cpp:404]     Test net output #0: accuracy = 0.74712\nI1212 11:41:20.277797 18921 solver.cpp:404]     Test net output #1: loss = 0.995565 (* 1 = 0.995565 loss)\nI1212 11:41:21.631503 18921 solver.cpp:228] Iteration 8600, loss = 0.134835\nI1212 11:41:21.631541 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 11:41:21.631557 18921 solver.cpp:244]     Train net output #1: loss = 0.134836 (* 1 = 0.134836 loss)\nI1212 11:41:21.685293 18921 sgd_solver.cpp:174] Iteration 8600, lr = 1.29\nI1212 11:41:21.699342 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.431226\nI1212 11:43:41.528218 18921 solver.cpp:337] Iteration 8700, Testing net (#0)\nI1212 11:45:05.297016 18921 solver.cpp:404]     Test net output #0: accuracy = 0.74792\nI1212 11:45:05.297380 18921 solver.cpp:404]     Test net output #1: loss = 0.951176 (* 1 = 0.951176 loss)\nI1212 11:45:06.652468 18921 solver.cpp:228] Iteration 8700, loss = 0.154351\nI1212 11:45:06.652506 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 11:45:06.652523 18921 solver.cpp:244]     Train net output #1: loss = 0.154351 (* 1 = 0.154351 loss)\nI1212 11:45:06.710038 18921 sgd_solver.cpp:174] Iteration 8700, lr = 1.305\nI1212 11:45:06.724133 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.397846\nI1212 11:47:26.569811 18921 solver.cpp:337] Iteration 8800, Testing net (#0)\nI1212 11:48:50.289710 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76372\nI1212 11:48:50.290032 18921 solver.cpp:404]     Test net output #1: loss = 0.939746 (* 1 = 0.939746 loss)\nI1212 11:48:51.644312 18921 solver.cpp:228] Iteration 8800, loss = 0.15031\nI1212 11:48:51.644353 18921 solver.cpp:244]     Train net output #0: accuracy = 0.968\nI1212 11:48:51.644369 18921 solver.cpp:244]     Train net output #1: loss = 0.15031 (* 1 = 0.15031 loss)\nI1212 11:48:51.697358 18921 sgd_solver.cpp:174] Iteration 8800, lr = 1.32\nI1212 11:48:51.711443 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.360228\nI1212 11:51:11.544672 18921 solver.cpp:337] Iteration 8900, Testing net (#0)\nI1212 11:52:35.261119 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73992\nI1212 11:52:35.261448 18921 solver.cpp:404]     Test net output #1: loss = 0.987689 (* 1 = 0.987689 loss)\nI1212 11:52:36.614434 18921 solver.cpp:228] Iteration 8900, loss = 0.185065\nI1212 11:52:36.614470 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 11:52:36.614485 18921 solver.cpp:244]     Train net output #1: loss = 0.185065 (* 1 = 0.185065 loss)\nI1212 11:52:36.668128 18921 sgd_solver.cpp:174] Iteration 8900, lr = 1.335\nI1212 11:52:36.682158 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.447433\nI1212 11:54:56.613421 18921 solver.cpp:337] Iteration 9000, Testing net (#0)\nI1212 11:56:20.341249 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78148\nI1212 11:56:20.341568 18921 solver.cpp:404]     Test net output #1: loss = 0.82127 (* 1 = 0.82127 loss)\nI1212 11:56:21.696630 18921 solver.cpp:228] Iteration 9000, loss = 0.138968\nI1212 11:56:21.696669 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 11:56:21.696686 18921 solver.cpp:244]     Train net output #1: loss = 0.138968 (* 1 = 0.138968 loss)\nI1212 11:56:21.750402 18921 sgd_solver.cpp:174] Iteration 9000, lr = 1.35\nI1212 11:56:21.764448 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.482535\nI1212 11:58:41.670259 18921 solver.cpp:337] Iteration 9100, Testing net (#0)\nI1212 12:00:05.397902 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79912\nI1212 12:00:05.398201 18921 solver.cpp:404]     Test net output #1: loss = 0.673352 (* 1 = 0.673352 loss)\nI1212 12:00:06.757467 18921 solver.cpp:228] Iteration 9100, loss = 0.175137\nI1212 12:00:06.757510 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 12:00:06.757531 18921 solver.cpp:244]     Train net output #1: loss = 0.175137 (* 1 = 0.175137 loss)\nI1212 12:00:06.810925 18921 sgd_solver.cpp:174] Iteration 9100, lr = 1.365\nI1212 12:00:06.824973 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.345875\nI1212 12:02:26.656055 18921 solver.cpp:337] Iteration 9200, Testing net (#0)\nI1212 12:03:50.380911 18921 solver.cpp:404]     Test net output #0: accuracy = 0.81884\nI1212 12:03:50.381247 18921 solver.cpp:404]     Test net output #1: loss = 0.628686 (* 1 = 0.628686 loss)\nI1212 12:03:51.734474 18921 solver.cpp:228] Iteration 9200, loss = 0.158537\nI1212 12:03:51.734515 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 12:03:51.734535 18921 solver.cpp:244]     Train net output #1: loss = 0.158537 (* 1 = 0.158537 loss)\nI1212 12:03:51.786886 18921 sgd_solver.cpp:174] Iteration 9200, lr = 1.38\nI1212 12:03:51.800896 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.389975\nI1212 12:06:11.724414 18921 solver.cpp:337] Iteration 9300, Testing net (#0)\nI1212 12:07:35.457864 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77432\nI1212 12:07:35.458181 18921 solver.cpp:404]     Test net output #1: loss = 0.864492 (* 1 = 0.864492 loss)\nI1212 12:07:36.812739 18921 solver.cpp:228] Iteration 9300, loss = 0.189823\nI1212 12:07:36.812783 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 12:07:36.812798 18921 solver.cpp:244]     Train net output #1: loss = 0.189823 (* 1 = 0.189823 loss)\nI1212 12:07:36.868157 18921 sgd_solver.cpp:174] Iteration 9300, lr = 1.395\nI1212 12:07:36.882236 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.411882\nI1212 12:09:56.770737 18921 solver.cpp:337] Iteration 9400, Testing net (#0)\nI1212 12:11:20.489614 18921 solver.cpp:404]     Test net output #0: accuracy = 0.81688\nI1212 12:11:20.489920 18921 solver.cpp:404]     Test net output #1: loss = 0.576189 (* 1 = 0.576189 loss)\nI1212 12:11:21.844743 18921 solver.cpp:228] Iteration 9400, loss = 0.170625\nI1212 12:11:21.844784 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 12:11:21.844799 18921 solver.cpp:244]     Train net output #1: loss = 0.170625 (* 1 = 0.170625 loss)\nI1212 12:11:21.901847 18921 sgd_solver.cpp:174] Iteration 9400, lr = 1.41\nI1212 12:11:21.915905 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.366884\nI1212 12:13:41.768667 18921 solver.cpp:337] Iteration 9500, Testing net (#0)\nI1212 12:15:05.483683 18921 solver.cpp:404]     Test net output #0: accuracy = 0.8126\nI1212 12:15:05.484017 18921 solver.cpp:404]     Test net output #1: loss = 0.720571 (* 1 = 0.720571 loss)\nI1212 12:15:06.837734 18921 solver.cpp:228] Iteration 9500, loss = 0.217767\nI1212 12:15:06.837775 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 12:15:06.837790 18921 solver.cpp:244]     Train net output #1: loss = 0.217768 (* 1 = 0.217768 loss)\nI1212 12:15:06.892120 18921 sgd_solver.cpp:174] Iteration 9500, lr = 1.425\nI1212 12:15:06.906158 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.311827\nI1212 12:17:26.815218 18921 solver.cpp:337] Iteration 9600, Testing net (#0)\nI1212 12:18:50.536762 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7468\nI1212 12:18:50.537075 18921 solver.cpp:404]     Test net output #1: loss = 0.998885 (* 1 = 0.998885 loss)\nI1212 12:18:51.890612 18921 solver.cpp:228] Iteration 9600, loss = 0.136181\nI1212 12:18:51.890652 18921 solver.cpp:244]     Train net output #0: accuracy = 0.976\nI1212 12:18:51.890669 18921 solver.cpp:244]     Train net output #1: loss = 0.136181 (* 1 = 0.136181 loss)\nI1212 12:18:51.947898 18921 sgd_solver.cpp:174] Iteration 9600, lr = 1.44\nI1212 12:18:51.961792 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.341746\nI1212 12:21:11.796274 18921 solver.cpp:337] Iteration 9700, Testing net (#0)\nI1212 12:22:35.518188 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78936\nI1212 12:22:35.518509 18921 solver.cpp:404]     Test net output #1: loss = 0.753755 (* 1 = 0.753755 loss)\nI1212 12:22:36.871636 18921 solver.cpp:228] Iteration 9700, loss = 0.150255\nI1212 12:22:36.871676 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 12:22:36.871691 18921 solver.cpp:244]     Train net output #1: loss = 0.150255 (* 1 = 0.150255 loss)\nI1212 12:22:36.929733 18921 sgd_solver.cpp:174] Iteration 9700, lr = 1.455\nI1212 12:22:36.943696 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.342031\nI1212 12:24:56.850724 18921 solver.cpp:337] Iteration 9800, Testing net (#0)\nI1212 12:26:20.569234 18921 solver.cpp:404]     Test net output #0: accuracy = 0.762\nI1212 12:26:20.569581 18921 solver.cpp:404]     Test net output #1: loss = 0.945562 (* 1 = 0.945562 loss)\nI1212 12:26:21.924542 18921 solver.cpp:228] Iteration 9800, loss = 0.218904\nI1212 12:26:21.924583 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 12:26:21.924599 18921 solver.cpp:244]     Train net output #1: loss = 0.218904 (* 1 = 0.218904 loss)\nI1212 12:26:21.986016 18921 sgd_solver.cpp:174] Iteration 9800, lr = 1.47\nI1212 12:26:22.000063 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.407469\nI1212 12:28:41.878456 18921 solver.cpp:337] Iteration 9900, Testing net (#0)\nI1212 12:30:05.658490 18921 solver.cpp:404]     Test net output #0: accuracy = 0.81068\nI1212 12:30:05.658820 18921 solver.cpp:404]     Test net output #1: loss = 0.642069 (* 1 = 0.642069 loss)\nI1212 12:30:07.012323 18921 solver.cpp:228] Iteration 9900, loss = 0.250557\nI1212 12:30:07.012364 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 12:30:07.012380 18921 solver.cpp:244]     Train net output #1: loss = 0.250557 (* 1 = 0.250557 loss)\nI1212 12:30:07.072878 18921 sgd_solver.cpp:174] Iteration 9900, lr = 1.485\nI1212 12:30:07.086386 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.373964\nI1212 12:32:26.957698 18921 solver.cpp:337] Iteration 10000, Testing net (#0)\nI1212 12:33:50.788981 18921 solver.cpp:404]     Test net output #0: accuracy = 0.70904\nI1212 12:33:50.789319 18921 solver.cpp:404]     Test net output #1: loss = 1.36598 (* 1 = 1.36598 loss)\nI1212 12:33:52.142760 18921 solver.cpp:228] Iteration 10000, loss = 0.10555\nI1212 12:33:52.142801 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 12:33:52.142817 18921 solver.cpp:244]     Train net output #1: loss = 0.105551 (* 1 = 0.105551 loss)\nI1212 12:33:52.202675 18921 sgd_solver.cpp:174] Iteration 10000, lr = 1.5\nI1212 12:33:52.216786 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.401592\nI1212 12:36:12.112273 18921 solver.cpp:337] Iteration 10100, Testing net (#0)\nI1212 12:37:35.978047 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7644\nI1212 12:37:35.978387 18921 solver.cpp:404]     Test net output #1: loss = 0.85092 (* 1 = 0.85092 loss)\nI1212 12:37:37.333273 18921 solver.cpp:228] Iteration 10100, loss = 0.253688\nI1212 12:37:37.333314 18921 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 12:37:37.333330 18921 solver.cpp:244]     Train net output #1: loss = 0.253688 (* 1 = 0.253688 loss)\nI1212 12:37:37.387650 18921 sgd_solver.cpp:174] Iteration 10100, lr = 1.515\nI1212 12:37:37.401664 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.396599\nI1212 12:39:57.276654 18921 solver.cpp:337] Iteration 10200, Testing net (#0)\nI1212 12:41:21.017333 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78228\nI1212 12:41:21.017665 18921 solver.cpp:404]     Test net output #1: loss = 0.853314 (* 1 = 0.853314 loss)\nI1212 12:41:22.372284 18921 solver.cpp:228] Iteration 10200, loss = 0.15251\nI1212 12:41:22.372331 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 12:41:22.372354 18921 solver.cpp:244]     Train net output #1: loss = 0.15251 (* 1 = 0.15251 loss)\nI1212 12:41:22.428143 18921 sgd_solver.cpp:174] Iteration 10200, lr = 1.53\nI1212 12:41:22.442240 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.40607\nI1212 12:43:42.393151 18921 solver.cpp:337] Iteration 10300, Testing net (#0)\nI1212 12:45:06.118880 18921 solver.cpp:404]     Test net output #0: accuracy = 0.83128\nI1212 12:45:06.119216 18921 solver.cpp:404]     Test net output #1: loss = 0.624724 (* 1 = 0.624724 loss)\nI1212 12:45:07.475409 18921 solver.cpp:228] Iteration 10300, loss = 0.141448\nI1212 12:45:07.475453 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 12:45:07.475468 18921 solver.cpp:244]     Train net output #1: loss = 0.141449 (* 1 = 0.141449 loss)\nI1212 12:45:07.530128 18921 sgd_solver.cpp:174] Iteration 10300, lr = 1.545\nI1212 12:45:07.544078 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.342553\nI1212 12:47:27.424055 18921 solver.cpp:337] Iteration 10400, Testing net (#0)\nI1212 12:48:51.130440 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75988\nI1212 12:48:51.130785 18921 solver.cpp:404]     Test net output #1: loss = 0.96358 (* 1 = 0.96358 loss)\nI1212 12:48:52.485975 18921 solver.cpp:228] Iteration 10400, loss = 0.215217\nI1212 12:48:52.486016 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 12:48:52.486032 18921 solver.cpp:244]     Train net output #1: loss = 0.215217 (* 1 = 0.215217 loss)\nI1212 12:48:52.544721 18921 sgd_solver.cpp:174] Iteration 10400, lr = 1.56\nI1212 12:48:52.558836 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.369879\nI1212 12:51:12.445585 18921 solver.cpp:337] Iteration 10500, Testing net (#0)\nI1212 12:52:36.271018 18921 solver.cpp:404]     Test net output #0: accuracy = 0.71948\nI1212 12:52:36.271323 18921 solver.cpp:404]     Test net output #1: loss = 1.25736 (* 1 = 1.25736 loss)\nI1212 12:52:37.625591 18921 solver.cpp:228] Iteration 10500, loss = 0.191647\nI1212 12:52:37.625633 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 12:52:37.625648 18921 solver.cpp:244]     Train net output #1: loss = 0.191647 (* 1 = 0.191647 loss)\nI1212 12:52:37.682426 18921 sgd_solver.cpp:174] Iteration 10500, lr = 1.575\nI1212 12:52:37.696550 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.37335\nI1212 12:54:57.536532 18921 solver.cpp:337] Iteration 10600, Testing net (#0)\nI1212 12:56:21.258168 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7962\nI1212 12:56:21.258522 18921 solver.cpp:404]     Test net output #1: loss = 0.806743 (* 1 = 0.806743 loss)\nI1212 12:56:22.612043 18921 solver.cpp:228] Iteration 10600, loss = 0.30746\nI1212 12:56:22.612085 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 12:56:22.612108 18921 solver.cpp:244]     Train net output #1: loss = 0.30746 (* 1 = 0.30746 loss)\nI1212 12:56:22.669479 18921 sgd_solver.cpp:174] Iteration 10600, lr = 1.59\nI1212 12:56:22.683531 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.400325\nI1212 12:58:42.531740 18921 solver.cpp:337] Iteration 10700, Testing net (#0)\nI1212 13:00:06.292137 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75248\nI1212 13:00:06.292456 18921 solver.cpp:404]     Test net output #1: loss = 0.913786 (* 1 = 0.913786 loss)\nI1212 13:00:07.647054 18921 solver.cpp:228] Iteration 10700, loss = 0.138131\nI1212 13:00:07.647095 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 13:00:07.647119 18921 solver.cpp:244]     Train net output #1: loss = 0.138131 (* 1 = 0.138131 loss)\nI1212 13:00:07.711544 18921 sgd_solver.cpp:174] Iteration 10700, lr = 1.605\nI1212 13:00:07.725625 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.331195\nI1212 13:02:27.642599 18921 solver.cpp:337] Iteration 10800, Testing net (#0)\nI1212 13:03:51.403470 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76388\nI1212 13:03:51.403815 18921 solver.cpp:404]     Test net output #1: loss = 0.907134 (* 1 = 0.907134 loss)\nI1212 13:03:52.764084 18921 solver.cpp:228] Iteration 10800, loss = 0.229232\nI1212 13:03:52.764129 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 13:03:52.764153 18921 solver.cpp:244]     Train net output #1: loss = 0.229232 (* 1 = 0.229232 loss)\nI1212 13:03:52.817534 18921 sgd_solver.cpp:174] Iteration 10800, lr = 1.62\nI1212 13:03:52.831660 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.371971\nI1212 13:06:12.750509 18921 solver.cpp:337] Iteration 10900, Testing net (#0)\nI1212 13:07:36.544675 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75324\nI1212 13:07:36.544999 18921 solver.cpp:404]     Test net output #1: loss = 0.895616 (* 1 = 0.895616 loss)\nI1212 13:07:37.906313 18921 solver.cpp:228] Iteration 10900, loss = 0.191211\nI1212 13:07:37.906359 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 13:07:37.906381 18921 solver.cpp:244]     Train net output #1: loss = 0.191211 (* 1 = 0.191211 loss)\nI1212 13:07:37.957315 18921 sgd_solver.cpp:174] Iteration 10900, lr = 1.635\nI1212 13:07:37.971426 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.37977\nI1212 13:09:57.902689 18921 solver.cpp:337] Iteration 11000, Testing net (#0)\nI1212 13:11:21.656210 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72724\nI1212 13:11:21.656560 18921 solver.cpp:404]     Test net output #1: loss = 1.10003 (* 1 = 1.10003 loss)\nI1212 13:11:23.017330 18921 solver.cpp:228] Iteration 11000, loss = 0.234295\nI1212 13:11:23.017376 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 13:11:23.017400 18921 solver.cpp:244]     Train net output #1: loss = 0.234295 (* 1 = 0.234295 loss)\nI1212 13:11:23.064433 18921 sgd_solver.cpp:174] Iteration 11000, lr = 1.65\nI1212 13:11:23.078425 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.393506\nI1212 13:13:43.054163 18921 solver.cpp:337] Iteration 11100, Testing net (#0)\nI1212 13:15:06.839741 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7504\nI1212 13:15:06.840085 18921 solver.cpp:404]     Test net output #1: loss = 0.980153 (* 1 = 0.980153 loss)\nI1212 13:15:08.195228 18921 solver.cpp:228] Iteration 11100, loss = 0.147025\nI1212 13:15:08.195276 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 13:15:08.195293 18921 solver.cpp:244]     Train net output #1: loss = 0.147025 (* 1 = 0.147025 loss)\nI1212 13:15:08.255363 18921 sgd_solver.cpp:174] Iteration 11100, lr = 1.665\nI1212 13:15:08.269392 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.426254\nI1212 13:17:28.248577 18921 solver.cpp:337] Iteration 11200, Testing net (#0)\nI1212 13:18:52.081929 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7908\nI1212 13:18:52.082271 18921 solver.cpp:404]     Test net output #1: loss = 0.796901 (* 1 = 0.796901 loss)\nI1212 13:18:53.436975 18921 solver.cpp:228] Iteration 11200, loss = 0.06111\nI1212 13:18:53.437019 18921 solver.cpp:244]     Train net output #0: accuracy = 0.984\nI1212 13:18:53.437036 18921 solver.cpp:244]     Train net output #1: loss = 0.0611101 (* 1 = 0.0611101 loss)\nI1212 13:18:53.497196 18921 sgd_solver.cpp:174] Iteration 11200, lr = 1.68\nI1212 13:18:53.511309 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.312845\nI1212 13:21:13.477227 18921 solver.cpp:337] Iteration 11300, Testing net (#0)\nI1212 13:22:37.202231 18921 solver.cpp:404]     Test net output #0: accuracy = 0.82012\nI1212 13:22:37.202564 18921 solver.cpp:404]     Test net output #1: loss = 0.59455 (* 1 = 0.59455 loss)\nI1212 13:22:38.557391 18921 solver.cpp:228] Iteration 11300, loss = 0.203856\nI1212 13:22:38.557440 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 13:22:38.557462 18921 solver.cpp:244]     Train net output #1: loss = 0.203856 (* 1 = 0.203856 loss)\nI1212 13:22:38.617230 18921 sgd_solver.cpp:174] Iteration 11300, lr = 1.695\nI1212 13:22:38.631299 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.343201\nI1212 13:24:58.550673 18921 solver.cpp:337] Iteration 11400, Testing net (#0)\nI1212 13:26:22.317715 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73064\nI1212 13:26:22.318040 18921 solver.cpp:404]     Test net output #1: loss = 1.16462 (* 1 = 1.16462 loss)\nI1212 13:26:23.678416 18921 solver.cpp:228] Iteration 11400, loss = 0.214748\nI1212 13:26:23.678462 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 13:26:23.678484 18921 solver.cpp:244]     Train net output #1: loss = 0.214748 (* 1 = 0.214748 loss)\nI1212 13:26:23.730861 18921 sgd_solver.cpp:174] Iteration 11400, lr = 1.71\nI1212 13:26:23.744940 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.326448\nI1212 13:28:43.655544 18921 solver.cpp:337] Iteration 11500, Testing net (#0)\nI1212 13:30:07.374474 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77408\nI1212 13:30:07.374781 18921 solver.cpp:404]     Test net output #1: loss = 0.733935 (* 1 = 0.733935 loss)\nI1212 13:30:08.728653 18921 solver.cpp:228] Iteration 11500, loss = 0.20025\nI1212 13:30:08.728696 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 13:30:08.728713 18921 solver.cpp:244]     Train net output #1: loss = 0.20025 (* 1 = 0.20025 loss)\nI1212 13:30:08.791290 18921 sgd_solver.cpp:174] Iteration 11500, lr = 1.725\nI1212 13:30:08.805272 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.345609\nI1212 13:32:28.677829 18921 solver.cpp:337] Iteration 11600, Testing net (#0)\nI1212 13:33:52.411361 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79352\nI1212 13:33:52.411702 18921 solver.cpp:404]     Test net output #1: loss = 0.760768 (* 1 = 0.760768 loss)\nI1212 13:33:53.763756 18921 solver.cpp:228] Iteration 11600, loss = 0.189943\nI1212 13:33:53.763805 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 13:33:53.763828 18921 solver.cpp:244]     Train net output #1: loss = 0.189943 (* 1 = 0.189943 loss)\nI1212 13:33:53.824995 18921 sgd_solver.cpp:174] Iteration 11600, lr = 1.74\nI1212 13:33:53.838973 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.383889\nI1212 13:36:13.493808 18921 solver.cpp:337] Iteration 11700, Testing net (#0)\nI1212 13:37:37.227600 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76432\nI1212 13:37:37.227942 18921 solver.cpp:404]     Test net output #1: loss = 0.887326 (* 1 = 0.887326 loss)\nI1212 13:37:38.579733 18921 solver.cpp:228] Iteration 11700, loss = 0.196743\nI1212 13:37:38.579777 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 13:37:38.579793 18921 solver.cpp:244]     Train net output #1: loss = 0.196743 (* 1 = 0.196743 loss)\nI1212 13:37:38.638046 18921 sgd_solver.cpp:174] Iteration 11700, lr = 1.755\nI1212 13:37:38.652087 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.360667\nI1212 13:39:58.159854 18921 solver.cpp:337] Iteration 11800, Testing net (#0)\nI1212 13:41:21.854638 18921 solver.cpp:404]     Test net output #0: accuracy = 0.70632\nI1212 13:41:21.854949 18921 solver.cpp:404]     Test net output #1: loss = 1.31541 (* 1 = 1.31541 loss)\nI1212 13:41:23.206900 18921 solver.cpp:228] Iteration 11800, loss = 0.147012\nI1212 13:41:23.206943 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 13:41:23.206959 18921 solver.cpp:244]     Train net output #1: loss = 0.147012 (* 1 = 0.147012 loss)\nI1212 13:41:23.262953 18921 sgd_solver.cpp:174] Iteration 11800, lr = 1.77\nI1212 13:41:23.276944 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.30277\nI1212 13:43:42.777050 18921 solver.cpp:337] Iteration 11900, Testing net (#0)\nI1212 13:45:06.474165 18921 solver.cpp:404]     Test net output #0: accuracy = 0.68456\nI1212 13:45:06.474508 18921 solver.cpp:404]     Test net output #1: loss = 1.28901 (* 1 = 1.28901 loss)\nI1212 13:45:07.825058 18921 solver.cpp:228] Iteration 11900, loss = 0.221155\nI1212 13:45:07.825103 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 13:45:07.825117 18921 solver.cpp:244]     Train net output #1: loss = 0.221155 (* 1 = 0.221155 loss)\nI1212 13:45:07.885224 18921 sgd_solver.cpp:174] Iteration 11900, lr = 1.785\nI1212 13:45:07.899327 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.397461\nI1212 13:47:27.595510 18921 solver.cpp:337] Iteration 12000, Testing net (#0)\nI1212 13:48:51.304855 18921 solver.cpp:404]     Test net output #0: accuracy = 0.61496\nI1212 13:48:51.305174 18921 solver.cpp:404]     Test net output #1: loss = 2.23194 (* 1 = 2.23194 loss)\nI1212 13:48:52.658581 18921 solver.cpp:228] Iteration 12000, loss = 0.216921\nI1212 13:48:52.658625 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 13:48:52.658641 18921 solver.cpp:244]     Train net output #1: loss = 0.216922 (* 1 = 0.216922 loss)\nI1212 13:48:52.712644 18921 sgd_solver.cpp:174] Iteration 12000, lr = 1.8\nI1212 13:48:52.726702 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.358082\nI1212 13:51:12.487573 18921 solver.cpp:337] Iteration 12100, Testing net (#0)\nI1212 13:52:36.208370 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76288\nI1212 13:52:36.208679 18921 solver.cpp:404]     Test net output #1: loss = 0.888761 (* 1 = 0.888761 loss)\nI1212 13:52:37.562234 18921 solver.cpp:228] Iteration 12100, loss = 0.227311\nI1212 13:52:37.562278 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 13:52:37.562294 18921 solver.cpp:244]     Train net output #1: loss = 0.227312 (* 1 = 0.227312 loss)\nI1212 13:52:37.616878 18921 sgd_solver.cpp:174] Iteration 12100, lr = 1.815\nI1212 13:52:37.630971 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.299343\nI1212 13:54:57.407680 18921 solver.cpp:337] Iteration 12200, Testing net (#0)\nI1212 13:56:21.114763 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7684\nI1212 13:56:21.115087 18921 solver.cpp:404]     Test net output #1: loss = 0.794754 (* 1 = 0.794754 loss)\nI1212 13:56:22.468150 18921 solver.cpp:228] Iteration 12200, loss = 0.243432\nI1212 13:56:22.468194 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 13:56:22.468209 18921 solver.cpp:244]     Train net output #1: loss = 0.243433 (* 1 = 0.243433 loss)\nI1212 13:56:22.523429 18921 sgd_solver.cpp:174] Iteration 12200, lr = 1.83\nI1212 13:56:22.537448 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.337771\nI1212 13:58:42.288113 18921 solver.cpp:337] Iteration 12300, Testing net (#0)\nI1212 14:00:05.997484 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79796\nI1212 14:00:05.997817 18921 solver.cpp:404]     Test net output #1: loss = 0.756873 (* 1 = 0.756873 loss)\nI1212 14:00:07.351483 18921 solver.cpp:228] Iteration 12300, loss = 0.172158\nI1212 14:00:07.351529 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 14:00:07.351544 18921 solver.cpp:244]     Train net output #1: loss = 0.172158 (* 1 = 0.172158 loss)\nI1212 14:00:07.415145 18921 sgd_solver.cpp:174] Iteration 12300, lr = 1.845\nI1212 14:00:07.429219 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.336564\nI1212 14:02:27.199120 18921 solver.cpp:337] Iteration 12400, Testing net (#0)\nI1212 14:03:50.909176 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79872\nI1212 14:03:50.909535 18921 solver.cpp:404]     Test net output #1: loss = 0.66185 (* 1 = 0.66185 loss)\nI1212 14:03:52.263245 18921 solver.cpp:228] Iteration 12400, loss = 0.13371\nI1212 14:03:52.263289 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 14:03:52.263306 18921 solver.cpp:244]     Train net output #1: loss = 0.13371 (* 1 = 0.13371 loss)\nI1212 14:03:52.322615 18921 sgd_solver.cpp:174] Iteration 12400, lr = 1.86\nI1212 14:03:52.336606 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.316131\nI1212 14:06:12.113193 18921 solver.cpp:337] Iteration 12500, Testing net (#0)\nI1212 14:07:35.831792 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7808\nI1212 14:07:35.832111 18921 solver.cpp:404]     Test net output #1: loss = 0.814822 (* 1 = 0.814822 loss)\nI1212 14:07:37.185739 18921 solver.cpp:228] Iteration 12500, loss = 0.256087\nI1212 14:07:37.185784 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 14:07:37.185801 18921 solver.cpp:244]     Train net output #1: loss = 0.256087 (* 1 = 0.256087 loss)\nI1212 14:07:37.237478 18921 sgd_solver.cpp:174] Iteration 12500, lr = 1.875\nI1212 14:07:37.251013 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.384333\nI1212 14:09:57.028306 18921 solver.cpp:337] Iteration 12600, Testing net (#0)\nI1212 14:11:20.746294 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75644\nI1212 14:11:20.746613 18921 solver.cpp:404]     Test net output #1: loss = 0.891366 (* 1 = 0.891366 loss)\nI1212 14:11:22.098078 18921 solver.cpp:228] Iteration 12600, loss = 0.19131\nI1212 14:11:22.098121 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 14:11:22.098137 18921 solver.cpp:244]     Train net output #1: loss = 0.191311 (* 1 = 0.191311 loss)\nI1212 14:11:22.146360 18921 sgd_solver.cpp:174] Iteration 12600, lr = 1.89\nI1212 14:11:22.160243 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.341972\nI1212 14:13:41.640826 18921 solver.cpp:337] Iteration 12700, Testing net (#0)\nI1212 14:15:05.357547 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76028\nI1212 14:15:05.357887 18921 solver.cpp:404]     Test net output #1: loss = 0.764908 (* 1 = 0.764908 loss)\nI1212 14:15:06.708454 18921 solver.cpp:228] Iteration 12700, loss = 0.198738\nI1212 14:15:06.708498 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 14:15:06.708513 18921 solver.cpp:244]     Train net output #1: loss = 0.198738 (* 1 = 0.198738 loss)\nI1212 14:15:06.758643 18921 sgd_solver.cpp:174] Iteration 12700, lr = 1.905\nI1212 14:15:06.772627 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.348293\nI1212 14:17:26.281404 18921 solver.cpp:337] Iteration 12800, Testing net (#0)\nI1212 14:18:50.007237 18921 solver.cpp:404]     Test net output #0: accuracy = 0.69944\nI1212 14:18:50.007585 18921 solver.cpp:404]     Test net output #1: loss = 1.15753 (* 1 = 1.15753 loss)\nI1212 14:18:51.358412 18921 solver.cpp:228] Iteration 12800, loss = 0.154403\nI1212 14:18:51.358454 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 14:18:51.358470 18921 solver.cpp:244]     Train net output #1: loss = 0.154403 (* 1 = 0.154403 loss)\nI1212 14:18:51.405453 18921 sgd_solver.cpp:174] Iteration 12800, lr = 1.92\nI1212 14:18:51.419453 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.371159\nI1212 14:21:10.907011 18921 solver.cpp:337] Iteration 12900, Testing net (#0)\nI1212 14:22:34.626188 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76368\nI1212 14:22:34.626623 18921 solver.cpp:404]     Test net output #1: loss = 0.922247 (* 1 = 0.922247 loss)\nI1212 14:22:35.977682 18921 solver.cpp:228] Iteration 12900, loss = 0.167468\nI1212 14:22:35.977726 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 14:22:35.977742 18921 solver.cpp:244]     Train net output #1: loss = 0.167468 (* 1 = 0.167468 loss)\nI1212 14:22:36.036348 18921 sgd_solver.cpp:174] Iteration 12900, lr = 1.935\nI1212 14:22:36.050298 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.366045\nI1212 14:24:55.531127 18921 solver.cpp:337] Iteration 13000, Testing net (#0)\nI1212 14:26:19.232347 18921 solver.cpp:404]     Test net output #0: accuracy = 0.68616\nI1212 14:26:19.232688 18921 solver.cpp:404]     Test net output #1: loss = 1.28493 (* 1 = 1.28493 loss)\nI1212 14:26:20.583916 18921 solver.cpp:228] Iteration 13000, loss = 0.208511\nI1212 14:26:20.583959 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 14:26:20.583976 18921 solver.cpp:244]     Train net output #1: loss = 0.208511 (* 1 = 0.208511 loss)\nI1212 14:26:20.640581 18921 sgd_solver.cpp:174] Iteration 13000, lr = 1.95\nI1212 14:26:20.654455 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.327542\nI1212 14:28:40.171435 18921 solver.cpp:337] Iteration 13100, Testing net (#0)\nI1212 14:30:03.889320 18921 solver.cpp:404]     Test net output #0: accuracy = 0.62656\nI1212 14:30:03.889634 18921 solver.cpp:404]     Test net output #1: loss = 1.86694 (* 1 = 1.86694 loss)\nI1212 14:30:05.240442 18921 solver.cpp:228] Iteration 13100, loss = 0.224692\nI1212 14:30:05.240485 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 14:30:05.240501 18921 solver.cpp:244]     Train net output #1: loss = 0.224692 (* 1 = 0.224692 loss)\nI1212 14:30:05.300964 18921 sgd_solver.cpp:174] Iteration 13100, lr = 1.965\nI1212 14:30:05.315009 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.373866\nI1212 14:32:24.819937 18921 solver.cpp:337] Iteration 13200, Testing net (#0)\nI1212 14:33:48.536649 18921 solver.cpp:404]     Test net output #0: accuracy = 0.54844\nI1212 14:33:48.536967 18921 solver.cpp:404]     Test net output #1: loss = 2.82299 (* 1 = 2.82299 loss)\nI1212 14:33:49.888414 18921 solver.cpp:228] Iteration 13200, loss = 0.251532\nI1212 14:33:49.888458 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 14:33:49.888473 18921 solver.cpp:244]     Train net output #1: loss = 0.251532 (* 1 = 0.251532 loss)\nI1212 14:33:49.949059 18921 sgd_solver.cpp:174] Iteration 13200, lr = 1.98\nI1212 14:33:49.963106 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.357636\nI1212 14:36:09.452069 18921 solver.cpp:337] Iteration 13300, Testing net (#0)\nI1212 14:37:33.160192 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78004\nI1212 14:37:33.160542 18921 solver.cpp:404]     Test net output #1: loss = 0.831991 (* 1 = 0.831991 loss)\nI1212 14:37:34.511629 18921 solver.cpp:228] Iteration 13300, loss = 0.255431\nI1212 14:37:34.511672 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 14:37:34.511688 18921 solver.cpp:244]     Train net output #1: loss = 0.255432 (* 1 = 0.255432 loss)\nI1212 14:37:34.574002 18921 sgd_solver.cpp:174] Iteration 13300, lr = 1.995\nI1212 14:37:34.587952 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.322034\nI1212 14:39:54.092272 18921 solver.cpp:337] Iteration 13400, Testing net (#0)\nI1212 14:41:17.813649 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7472\nI1212 14:41:17.813977 18921 solver.cpp:404]     Test net output #1: loss = 1.01699 (* 1 = 1.01699 loss)\nI1212 14:41:19.165606 18921 solver.cpp:228] Iteration 13400, loss = 0.218326\nI1212 14:41:19.165649 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 14:41:19.165665 18921 solver.cpp:244]     Train net output #1: loss = 0.218326 (* 1 = 0.218326 loss)\nI1212 14:41:19.228278 18921 sgd_solver.cpp:174] Iteration 13400, lr = 2.01\nI1212 14:41:19.242297 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.309143\nI1212 14:43:38.741072 18921 solver.cpp:337] Iteration 13500, Testing net (#0)\nI1212 14:45:02.454417 18921 solver.cpp:404]     Test net output #0: accuracy = 0.6082\nI1212 14:45:02.454761 18921 solver.cpp:404]     Test net output #1: loss = 1.84205 (* 1 = 1.84205 loss)\nI1212 14:45:03.805959 18921 solver.cpp:228] Iteration 13500, loss = 0.215113\nI1212 14:45:03.806004 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 14:45:03.806020 18921 solver.cpp:244]     Train net output #1: loss = 0.215113 (* 1 = 0.215113 loss)\nI1212 14:45:03.870328 18921 sgd_solver.cpp:174] Iteration 13500, lr = 2.025\nI1212 14:45:03.884361 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.359644\nI1212 14:47:23.397042 18921 solver.cpp:337] Iteration 13600, Testing net (#0)\nI1212 14:48:47.100715 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73608\nI1212 14:48:47.101052 18921 solver.cpp:404]     Test net output #1: loss = 1.107 (* 1 = 1.107 loss)\nI1212 14:48:48.452041 18921 solver.cpp:228] Iteration 13600, loss = 0.159755\nI1212 14:48:48.452085 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 14:48:48.452100 18921 solver.cpp:244]     Train net output #1: loss = 0.159756 (* 1 = 0.159756 loss)\nI1212 14:48:48.510447 18921 sgd_solver.cpp:174] Iteration 13600, lr = 2.04\nI1212 14:48:48.524464 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.326191\nI1212 14:51:08.019286 18921 solver.cpp:337] Iteration 13700, Testing net (#0)\nI1212 14:52:31.712859 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7026\nI1212 14:52:31.713165 18921 solver.cpp:404]     Test net output #1: loss = 1.15845 (* 1 = 1.15845 loss)\nI1212 14:52:33.064141 18921 solver.cpp:228] Iteration 13700, loss = 0.27343\nI1212 14:52:33.064185 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 14:52:33.064201 18921 solver.cpp:244]     Train net output #1: loss = 0.27343 (* 1 = 0.27343 loss)\nI1212 14:52:33.124992 18921 sgd_solver.cpp:174] Iteration 13700, lr = 2.055\nI1212 14:52:33.139003 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.290148\nI1212 14:54:52.635865 18921 solver.cpp:337] Iteration 13800, Testing net (#0)\nI1212 14:56:16.329818 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7344\nI1212 14:56:16.330142 18921 solver.cpp:404]     Test net output #1: loss = 0.943695 (* 1 = 0.943695 loss)\nI1212 14:56:17.681200 18921 solver.cpp:228] Iteration 13800, loss = 0.215673\nI1212 14:56:17.681244 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 14:56:17.681259 18921 solver.cpp:244]     Train net output #1: loss = 0.215674 (* 1 = 0.215674 loss)\nI1212 14:56:17.741755 18921 sgd_solver.cpp:174] Iteration 13800, lr = 2.07\nI1212 14:56:17.755762 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.363422\nI1212 14:58:37.266275 18921 solver.cpp:337] Iteration 13900, Testing net (#0)\nI1212 15:00:01.040182 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7884\nI1212 15:00:01.040513 18921 solver.cpp:404]     Test net output #1: loss = 0.699661 (* 1 = 0.699661 loss)\nI1212 15:00:02.396101 18921 solver.cpp:228] Iteration 13900, loss = 0.162704\nI1212 15:00:02.396142 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 15:00:02.396159 18921 solver.cpp:244]     Train net output #1: loss = 0.162704 (* 1 = 0.162704 loss)\nI1212 15:00:02.460422 18921 sgd_solver.cpp:174] Iteration 13900, lr = 2.085\nI1212 15:00:02.474077 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.347476\nI1212 15:02:22.317682 18921 solver.cpp:337] Iteration 14000, Testing net (#0)\nI1212 15:03:46.114375 18921 solver.cpp:404]     Test net output #0: accuracy = 0.71196\nI1212 15:03:46.114717 18921 solver.cpp:404]     Test net output #1: loss = 1.04281 (* 1 = 1.04281 loss)\nI1212 15:03:47.469805 18921 solver.cpp:228] Iteration 14000, loss = 0.241418\nI1212 15:03:47.469849 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 15:03:47.469866 18921 solver.cpp:244]     Train net output #1: loss = 0.241418 (* 1 = 0.241418 loss)\nI1212 15:03:47.526764 18921 sgd_solver.cpp:174] Iteration 14000, lr = 2.1\nI1212 15:03:47.540830 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.387318\nI1212 15:06:07.410079 18921 solver.cpp:337] Iteration 14100, Testing net (#0)\nI1212 15:07:31.199540 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77092\nI1212 15:07:31.199884 18921 solver.cpp:404]     Test net output #1: loss = 0.788792 (* 1 = 0.788792 loss)\nI1212 15:07:32.554106 18921 solver.cpp:228] Iteration 14100, loss = 0.260499\nI1212 15:07:32.554152 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 15:07:32.554168 18921 solver.cpp:244]     Train net output #1: loss = 0.260499 (* 1 = 0.260499 loss)\nI1212 15:07:32.607545 18921 sgd_solver.cpp:174] Iteration 14100, lr = 2.115\nI1212 15:07:32.621637 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.330224\nI1212 15:09:52.541834 18921 solver.cpp:337] Iteration 14200, Testing net (#0)\nI1212 15:11:16.371541 18921 solver.cpp:404]     Test net output #0: accuracy = 0.675\nI1212 15:11:16.371876 18921 solver.cpp:404]     Test net output #1: loss = 1.38697 (* 1 = 1.38697 loss)\nI1212 15:11:17.727068 18921 solver.cpp:228] Iteration 14200, loss = 0.211527\nI1212 15:11:17.727113 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 15:11:17.727128 18921 solver.cpp:244]     Train net output #1: loss = 0.211527 (* 1 = 0.211527 loss)\nI1212 15:11:17.788247 18921 sgd_solver.cpp:174] Iteration 14200, lr = 2.13\nI1212 15:11:17.802338 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.343342\nI1212 15:13:37.721583 18921 solver.cpp:337] Iteration 14300, Testing net (#0)\nI1212 15:15:01.538585 18921 solver.cpp:404]     Test net output #0: accuracy = 0.74348\nI1212 15:15:01.538926 18921 solver.cpp:404]     Test net output #1: loss = 0.945117 (* 1 = 0.945117 loss)\nI1212 15:15:02.894258 18921 solver.cpp:228] Iteration 14300, loss = 0.220779\nI1212 15:15:02.894300 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 15:15:02.894317 18921 solver.cpp:244]     Train net output #1: loss = 0.220779 (* 1 = 0.220779 loss)\nI1212 15:15:02.950160 18921 sgd_solver.cpp:174] Iteration 14300, lr = 2.145\nI1212 15:15:02.964181 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.329062\nI1212 15:17:22.886179 18921 solver.cpp:337] Iteration 14400, Testing net (#0)\nI1212 15:18:46.695842 18921 solver.cpp:404]     Test net output #0: accuracy = 0.82072\nI1212 15:18:46.696187 18921 solver.cpp:404]     Test net output #1: loss = 0.610052 (* 1 = 0.610052 loss)\nI1212 15:18:48.050873 18921 solver.cpp:228] Iteration 14400, loss = 0.267397\nI1212 15:18:48.050918 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 15:18:48.050933 18921 solver.cpp:244]     Train net output #1: loss = 0.267398 (* 1 = 0.267398 loss)\nI1212 15:18:48.102916 18921 sgd_solver.cpp:174] Iteration 14400, lr = 2.16\nI1212 15:18:48.116977 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.320401\nI1212 15:21:08.047428 18921 solver.cpp:337] Iteration 14500, Testing net (#0)\nI1212 15:22:31.826963 18921 solver.cpp:404]     Test net output #0: accuracy = 0.71268\nI1212 15:22:31.827296 18921 solver.cpp:404]     Test net output #1: loss = 1.22173 (* 1 = 1.22173 loss)\nI1212 15:22:33.182086 18921 solver.cpp:228] Iteration 14500, loss = 0.24201\nI1212 15:22:33.182132 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 15:22:33.182147 18921 solver.cpp:244]     Train net output #1: loss = 0.24201 (* 1 = 0.24201 loss)\nI1212 15:22:33.242885 18921 sgd_solver.cpp:174] Iteration 14500, lr = 2.175\nI1212 15:22:33.256934 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.330001\nI1212 15:24:53.132340 18921 solver.cpp:337] Iteration 14600, Testing net (#0)\nI1212 15:26:16.957774 18921 solver.cpp:404]     Test net output #0: accuracy = 0.70196\nI1212 15:26:16.958119 18921 solver.cpp:404]     Test net output #1: loss = 1.06596 (* 1 = 1.06596 loss)\nI1212 15:26:18.311709 18921 solver.cpp:228] Iteration 14600, loss = 0.239456\nI1212 15:26:18.311751 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 15:26:18.311769 18921 solver.cpp:244]     Train net output #1: loss = 0.239456 (* 1 = 0.239456 loss)\nI1212 15:26:18.371695 18921 sgd_solver.cpp:174] Iteration 14600, lr = 2.19\nI1212 15:26:18.385788 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.352551\nI1212 15:28:38.281750 18921 solver.cpp:337] Iteration 14700, Testing net (#0)\nI1212 15:30:02.109565 18921 solver.cpp:404]     Test net output #0: accuracy = 0.69908\nI1212 15:30:02.109874 18921 solver.cpp:404]     Test net output #1: loss = 1.4394 (* 1 = 1.4394 loss)\nI1212 15:30:03.464470 18921 solver.cpp:228] Iteration 14700, loss = 0.224561\nI1212 15:30:03.464520 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 15:30:03.464537 18921 solver.cpp:244]     Train net output #1: loss = 0.224562 (* 1 = 0.224562 loss)\nI1212 15:30:03.519794 18921 sgd_solver.cpp:174] Iteration 14700, lr = 2.205\nI1212 15:30:03.533785 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.310065\nI1212 15:32:23.473117 18921 solver.cpp:337] Iteration 14800, Testing net (#0)\nI1212 15:33:47.315450 18921 solver.cpp:404]     Test net output #0: accuracy = 0.69192\nI1212 15:33:47.315798 18921 solver.cpp:404]     Test net output #1: loss = 1.28041 (* 1 = 1.28041 loss)\nI1212 15:33:48.669100 18921 solver.cpp:228] Iteration 14800, loss = 0.389876\nI1212 15:33:48.669145 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 15:33:48.669162 18921 solver.cpp:244]     Train net output #1: loss = 0.389877 (* 1 = 0.389877 loss)\nI1212 15:33:48.729707 18921 sgd_solver.cpp:174] Iteration 14800, lr = 2.22\nI1212 15:33:48.743690 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.293909\nI1212 15:36:08.575013 18921 solver.cpp:337] Iteration 14900, Testing net (#0)\nI1212 15:37:32.368549 18921 solver.cpp:404]     Test net output #0: accuracy = 0.70104\nI1212 15:37:32.368878 18921 solver.cpp:404]     Test net output #1: loss = 1.04982 (* 1 = 1.04982 loss)\nI1212 15:37:33.725081 18921 solver.cpp:228] Iteration 14900, loss = 0.287743\nI1212 15:37:33.725128 18921 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 15:37:33.725144 18921 solver.cpp:244]     Train net output #1: loss = 0.287743 (* 1 = 0.287743 loss)\nI1212 15:37:33.783309 18921 sgd_solver.cpp:174] Iteration 14900, lr = 2.235\nI1212 15:37:33.797374 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.308318\nI1212 15:39:53.747445 18921 solver.cpp:337] Iteration 15000, Testing net (#0)\nI1212 15:41:16.784749 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76904\nI1212 15:41:16.785027 18921 solver.cpp:404]     Test net output #1: loss = 0.815072 (* 1 = 0.815072 loss)\nI1212 15:41:18.138650 18921 solver.cpp:228] Iteration 15000, loss = 0.213072\nI1212 15:41:18.138686 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 15:41:18.138702 18921 solver.cpp:244]     Train net output #1: loss = 0.213072 (* 1 = 0.213072 loss)\nI1212 15:41:18.185962 18921 sgd_solver.cpp:174] Iteration 15000, lr = 2.25\nI1212 15:41:18.198635 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.263388\nI1212 15:43:37.626096 18921 solver.cpp:337] Iteration 15100, Testing net (#0)\nI1212 15:45:00.649206 18921 solver.cpp:404]     Test net output #0: accuracy = 0.63764\nI1212 15:45:00.649452 18921 solver.cpp:404]     Test net output #1: loss = 1.3656 (* 1 = 1.3656 loss)\nI1212 15:45:02.000854 18921 solver.cpp:228] Iteration 15100, loss = 0.253239\nI1212 15:45:02.000887 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 15:45:02.000902 18921 solver.cpp:244]     Train net output #1: loss = 0.253239 (* 1 = 0.253239 loss)\nI1212 15:45:02.058553 18921 sgd_solver.cpp:174] Iteration 15100, lr = 2.265\nI1212 15:45:02.071192 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.351389\nI1212 15:47:21.468428 18921 solver.cpp:337] Iteration 15200, Testing net (#0)\nI1212 15:48:44.492378 18921 solver.cpp:404]     Test net output #0: accuracy = 0.71028\nI1212 15:48:44.492636 18921 solver.cpp:404]     Test net output #1: loss = 1.11833 (* 1 = 1.11833 loss)\nI1212 15:48:45.844197 18921 solver.cpp:228] Iteration 15200, loss = 0.184137\nI1212 15:48:45.844233 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 15:48:45.844247 18921 solver.cpp:244]     Train net output #1: loss = 0.184137 (* 1 = 0.184137 loss)\nI1212 15:48:45.899999 18921 sgd_solver.cpp:174] Iteration 15200, lr = 2.28\nI1212 15:48:45.912693 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.336938\nI1212 15:51:05.359036 18921 solver.cpp:337] Iteration 15300, Testing net (#0)\nI1212 15:52:28.378963 18921 solver.cpp:404]     Test net output #0: accuracy = 0.68872\nI1212 15:52:28.379238 18921 solver.cpp:404]     Test net output #1: loss = 1.23975 (* 1 = 1.23975 loss)\nI1212 15:52:29.731624 18921 solver.cpp:228] Iteration 15300, loss = 0.292805\nI1212 15:52:29.731657 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 15:52:29.731673 18921 solver.cpp:244]     Train net output #1: loss = 0.292805 (* 1 = 0.292805 loss)\nI1212 15:52:29.790058 18921 sgd_solver.cpp:174] Iteration 15300, lr = 2.295\nI1212 15:52:29.802711 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.337818\nI1212 15:54:49.229740 18921 solver.cpp:337] Iteration 15400, Testing net (#0)\nI1212 15:56:12.257746 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77956\nI1212 15:56:12.258036 18921 solver.cpp:404]     Test net output #1: loss = 0.671487 (* 1 = 0.671487 loss)\nI1212 15:56:13.615895 18921 solver.cpp:228] Iteration 15400, loss = 0.223969\nI1212 15:56:13.615929 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 15:56:13.615944 18921 solver.cpp:244]     Train net output #1: loss = 0.223969 (* 1 = 0.223969 loss)\nI1212 15:56:13.672382 18921 sgd_solver.cpp:174] Iteration 15400, lr = 2.31\nI1212 15:56:13.685014 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.298187\nI1212 15:58:33.124694 18921 solver.cpp:337] Iteration 15500, Testing net (#0)\nI1212 15:59:56.064733 18921 solver.cpp:404]     Test net output #0: accuracy = 0.68044\nI1212 15:59:56.064992 18921 solver.cpp:404]     Test net output #1: loss = 1.40058 (* 1 = 1.40058 loss)\nI1212 15:59:57.417284 18921 solver.cpp:228] Iteration 15500, loss = 0.22895\nI1212 15:59:57.417318 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 15:59:57.417333 18921 solver.cpp:244]     Train net output #1: loss = 0.22895 (* 1 = 0.22895 loss)\nI1212 15:59:57.476938 18921 sgd_solver.cpp:174] Iteration 15500, lr = 2.325\nI1212 15:59:57.489563 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.298174\nI1212 16:02:16.914571 18921 solver.cpp:337] Iteration 15600, Testing net (#0)\nI1212 16:03:39.860757 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77872\nI1212 16:03:39.861039 18921 solver.cpp:404]     Test net output #1: loss = 0.785482 (* 1 = 0.785482 loss)\nI1212 16:03:41.212990 18921 solver.cpp:228] Iteration 15600, loss = 0.238897\nI1212 16:03:41.213023 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 16:03:41.213038 18921 solver.cpp:244]     Train net output #1: loss = 0.238897 (* 1 = 0.238897 loss)\nI1212 16:03:41.268918 18921 sgd_solver.cpp:174] Iteration 15600, lr = 2.34\nI1212 16:03:41.281589 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.325425\nI1212 16:06:00.711009 18921 solver.cpp:337] Iteration 15700, Testing net (#0)\nI1212 16:07:23.648344 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7022\nI1212 16:07:23.648638 18921 solver.cpp:404]     Test net output #1: loss = 1.1099 (* 1 = 1.1099 loss)\nI1212 16:07:24.999120 18921 solver.cpp:228] Iteration 15700, loss = 0.206889\nI1212 16:07:24.999167 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 16:07:24.999184 18921 solver.cpp:244]     Train net output #1: loss = 0.206889 (* 1 = 0.206889 loss)\nI1212 16:07:25.053057 18921 sgd_solver.cpp:174] Iteration 15700, lr = 2.355\nI1212 16:07:25.066555 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.311032\nI1212 16:09:44.716048 18921 solver.cpp:337] Iteration 15800, Testing net (#0)\nI1212 16:11:08.446193 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72224\nI1212 16:11:08.446511 18921 solver.cpp:404]     Test net output #1: loss = 0.954265 (* 1 = 0.954265 loss)\nI1212 16:11:09.798565 18921 solver.cpp:228] Iteration 15800, loss = 0.227557\nI1212 16:11:09.798609 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 16:11:09.798625 18921 solver.cpp:244]     Train net output #1: loss = 0.227557 (* 1 = 0.227557 loss)\nI1212 16:11:09.852988 18921 sgd_solver.cpp:174] Iteration 15800, lr = 2.37\nI1212 16:11:09.867049 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.313205\nI1212 16:13:29.513906 18921 solver.cpp:337] Iteration 15900, Testing net (#0)\nI1212 16:14:53.233178 18921 solver.cpp:404]     Test net output #0: accuracy = 0.70212\nI1212 16:14:53.233542 18921 solver.cpp:404]     Test net output #1: loss = 1.1222 (* 1 = 1.1222 loss)\nI1212 16:14:54.585216 18921 solver.cpp:228] Iteration 15900, loss = 0.260289\nI1212 16:14:54.585260 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 16:14:54.585276 18921 solver.cpp:244]     Train net output #1: loss = 0.260289 (* 1 = 0.260289 loss)\nI1212 16:14:54.638448 18921 sgd_solver.cpp:174] Iteration 15900, lr = 2.385\nI1212 16:14:54.652474 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.366729\nI1212 16:17:14.302013 18921 solver.cpp:337] Iteration 16000, Testing net (#0)\nI1212 16:18:38.025218 18921 solver.cpp:404]     Test net output #0: accuracy = 0.79568\nI1212 16:18:38.025547 18921 solver.cpp:404]     Test net output #1: loss = 0.677475 (* 1 = 0.677475 loss)\nI1212 16:18:39.377053 18921 solver.cpp:228] Iteration 16000, loss = 0.309112\nI1212 16:18:39.377095 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 16:18:39.377111 18921 solver.cpp:244]     Train net output #1: loss = 0.309112 (* 1 = 0.309112 loss)\nI1212 16:18:39.437837 18921 sgd_solver.cpp:174] Iteration 16000, lr = 2.4\nI1212 16:18:39.451867 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.321856\nI1212 16:20:59.006803 18921 solver.cpp:337] Iteration 16100, Testing net (#0)\nI1212 16:22:22.730844 18921 solver.cpp:404]     Test net output #0: accuracy = 0.70392\nI1212 16:22:22.731168 18921 solver.cpp:404]     Test net output #1: loss = 1.03678 (* 1 = 1.03678 loss)\nI1212 16:22:24.082394 18921 solver.cpp:228] Iteration 16100, loss = 0.289121\nI1212 16:22:24.082438 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 16:22:24.082454 18921 solver.cpp:244]     Train net output #1: loss = 0.289121 (* 1 = 0.289121 loss)\nI1212 16:22:24.139876 18921 sgd_solver.cpp:174] Iteration 16100, lr = 2.415\nI1212 16:22:24.153883 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.353435\nI1212 16:24:43.667848 18921 solver.cpp:337] Iteration 16200, Testing net (#0)\nI1212 16:26:07.373996 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77772\nI1212 16:26:07.374341 18921 solver.cpp:404]     Test net output #1: loss = 0.717169 (* 1 = 0.717169 loss)\nI1212 16:26:08.725962 18921 solver.cpp:228] Iteration 16200, loss = 0.154605\nI1212 16:26:08.726006 18921 solver.cpp:244]     Train net output #0: accuracy = 0.96\nI1212 16:26:08.726022 18921 solver.cpp:244]     Train net output #1: loss = 0.154605 (* 1 = 0.154605 loss)\nI1212 16:26:08.789098 18921 sgd_solver.cpp:174] Iteration 16200, lr = 2.43\nI1212 16:26:08.803155 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.289292\nI1212 16:28:28.327301 18921 solver.cpp:337] Iteration 16300, Testing net (#0)\nI1212 16:29:52.028920 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7572\nI1212 16:29:52.029242 18921 solver.cpp:404]     Test net output #1: loss = 0.861797 (* 1 = 0.861797 loss)\nI1212 16:29:53.380396 18921 solver.cpp:228] Iteration 16300, loss = 0.220399\nI1212 16:29:53.380439 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 16:29:53.380455 18921 solver.cpp:244]     Train net output #1: loss = 0.220399 (* 1 = 0.220399 loss)\nI1212 16:29:53.437608 18921 sgd_solver.cpp:174] Iteration 16300, lr = 2.445\nI1212 16:29:53.451675 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.282592\nI1212 16:32:12.946722 18921 solver.cpp:337] Iteration 16400, Testing net (#0)\nI1212 16:33:36.655642 18921 solver.cpp:404]     Test net output #0: accuracy = 0.81416\nI1212 16:33:36.655966 18921 solver.cpp:404]     Test net output #1: loss = 0.640828 (* 1 = 0.640828 loss)\nI1212 16:33:38.006600 18921 solver.cpp:228] Iteration 16400, loss = 0.250722\nI1212 16:33:38.006641 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 16:33:38.006656 18921 solver.cpp:244]     Train net output #1: loss = 0.250722 (* 1 = 0.250722 loss)\nI1212 16:33:38.066025 18921 sgd_solver.cpp:174] Iteration 16400, lr = 2.46\nI1212 16:33:38.079978 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.269478\nI1212 16:35:57.570428 18921 solver.cpp:337] Iteration 16500, Testing net (#0)\nI1212 16:37:21.283475 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76504\nI1212 16:37:21.283795 18921 solver.cpp:404]     Test net output #1: loss = 0.775696 (* 1 = 0.775696 loss)\nI1212 16:37:22.634768 18921 solver.cpp:228] Iteration 16500, loss = 0.237891\nI1212 16:37:22.634809 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 16:37:22.634824 18921 solver.cpp:244]     Train net output #1: loss = 0.237891 (* 1 = 0.237891 loss)\nI1212 16:37:22.693758 18921 sgd_solver.cpp:174] Iteration 16500, lr = 2.475\nI1212 16:37:22.707767 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.270802\nI1212 16:39:42.203059 18921 solver.cpp:337] Iteration 16600, Testing net (#0)\nI1212 16:41:05.912464 18921 solver.cpp:404]     Test net output #0: accuracy = 0.70272\nI1212 16:41:05.912775 18921 solver.cpp:404]     Test net output #1: loss = 1.1038 (* 1 = 1.1038 loss)\nI1212 16:41:07.263548 18921 solver.cpp:228] Iteration 16600, loss = 0.225614\nI1212 16:41:07.263592 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 16:41:07.263607 18921 solver.cpp:244]     Train net output #1: loss = 0.225614 (* 1 = 0.225614 loss)\nI1212 16:41:07.319447 18921 sgd_solver.cpp:174] Iteration 16600, lr = 2.49\nI1212 16:41:07.333451 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.300248\nI1212 16:43:26.858970 18921 solver.cpp:337] Iteration 16700, Testing net (#0)\nI1212 16:44:50.571447 18921 solver.cpp:404]     Test net output #0: accuracy = 0.60228\nI1212 16:44:50.571794 18921 solver.cpp:404]     Test net output #1: loss = 2.05995 (* 1 = 2.05995 loss)\nI1212 16:44:51.922940 18921 solver.cpp:228] Iteration 16700, loss = 0.332356\nI1212 16:44:51.922982 18921 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 16:44:51.922998 18921 solver.cpp:244]     Train net output #1: loss = 0.332356 (* 1 = 0.332356 loss)\nI1212 16:44:51.984987 18921 sgd_solver.cpp:174] Iteration 16700, lr = 2.505\nI1212 16:44:51.998984 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.330717\nI1212 16:47:11.515960 18921 solver.cpp:337] Iteration 16800, Testing net (#0)\nI1212 16:48:35.229241 18921 solver.cpp:404]     Test net output #0: accuracy = 0.695\nI1212 16:48:35.229591 18921 solver.cpp:404]     Test net output #1: loss = 1.42171 (* 1 = 1.42171 loss)\nI1212 16:48:36.585366 18921 solver.cpp:228] Iteration 16800, loss = 0.307666\nI1212 16:48:36.585408 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 16:48:36.585423 18921 solver.cpp:244]     Train net output #1: loss = 0.307666 (* 1 = 0.307666 loss)\nI1212 16:48:36.637691 18921 sgd_solver.cpp:174] Iteration 16800, lr = 2.52\nI1212 16:48:36.651679 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.340495\nI1212 16:50:56.173748 18921 solver.cpp:337] Iteration 16900, Testing net (#0)\nI1212 16:52:19.877677 18921 solver.cpp:404]     Test net output #0: accuracy = 0.69124\nI1212 16:52:19.877996 18921 solver.cpp:404]     Test net output #1: loss = 1.05238 (* 1 = 1.05238 loss)\nI1212 16:52:21.228608 18921 solver.cpp:228] Iteration 16900, loss = 0.194802\nI1212 16:52:21.228649 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 16:52:21.228664 18921 solver.cpp:244]     Train net output #1: loss = 0.194802 (* 1 = 0.194802 loss)\nI1212 16:52:21.282304 18921 sgd_solver.cpp:174] Iteration 16900, lr = 2.535\nI1212 16:52:21.296144 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.341093\nI1212 16:54:40.772636 18921 solver.cpp:337] Iteration 17000, Testing net (#0)\nI1212 16:56:04.482693 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75856\nI1212 16:56:04.483016 18921 solver.cpp:404]     Test net output #1: loss = 0.845253 (* 1 = 0.845253 loss)\nI1212 16:56:05.834157 18921 solver.cpp:228] Iteration 17000, loss = 0.258925\nI1212 16:56:05.834198 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 16:56:05.834214 18921 solver.cpp:244]     Train net output #1: loss = 0.258925 (* 1 = 0.258925 loss)\nI1212 16:56:05.888007 18921 sgd_solver.cpp:174] Iteration 17000, lr = 2.55\nI1212 16:56:05.901947 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.313091\nI1212 16:58:25.390983 18921 solver.cpp:337] Iteration 17100, Testing net (#0)\nI1212 16:59:49.103448 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76488\nI1212 16:59:49.103772 18921 solver.cpp:404]     Test net output #1: loss = 0.799695 (* 1 = 0.799695 loss)\nI1212 16:59:50.454946 18921 solver.cpp:228] Iteration 17100, loss = 0.228942\nI1212 16:59:50.454985 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 16:59:50.455001 18921 solver.cpp:244]     Train net output #1: loss = 0.228942 (* 1 = 0.228942 loss)\nI1212 16:59:50.516710 18921 sgd_solver.cpp:174] Iteration 17100, lr = 2.565\nI1212 16:59:50.530694 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.287039\nI1212 17:02:10.008352 18921 solver.cpp:337] Iteration 17200, Testing net (#0)\nI1212 17:03:33.719717 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7162\nI1212 17:03:33.720042 18921 solver.cpp:404]     Test net output #1: loss = 1.17399 (* 1 = 1.17399 loss)\nI1212 17:03:35.070875 18921 solver.cpp:228] Iteration 17200, loss = 0.240298\nI1212 17:03:35.070915 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 17:03:35.070930 18921 solver.cpp:244]     Train net output #1: loss = 0.240298 (* 1 = 0.240298 loss)\nI1212 17:03:35.131659 18921 sgd_solver.cpp:174] Iteration 17200, lr = 2.58\nI1212 17:03:35.145496 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.294335\nI1212 17:05:54.630650 18921 solver.cpp:337] Iteration 17300, Testing net (#0)\nI1212 17:07:18.338145 18921 solver.cpp:404]     Test net output #0: accuracy = 0.728\nI1212 17:07:18.338477 18921 solver.cpp:404]     Test net output #1: loss = 0.897827 (* 1 = 0.897827 loss)\nI1212 17:07:19.690040 18921 solver.cpp:228] Iteration 17300, loss = 0.253941\nI1212 17:07:19.690080 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 17:07:19.690096 18921 solver.cpp:244]     Train net output #1: loss = 0.253941 (* 1 = 0.253941 loss)\nI1212 17:07:19.747040 18921 sgd_solver.cpp:174] Iteration 17300, lr = 2.595\nI1212 17:07:19.761049 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.323579\nI1212 17:09:39.235064 18921 solver.cpp:337] Iteration 17400, Testing net (#0)\nI1212 17:11:02.921336 18921 solver.cpp:404]     Test net output #0: accuracy = 0.66048\nI1212 17:11:02.921672 18921 solver.cpp:404]     Test net output #1: loss = 1.27366 (* 1 = 1.27366 loss)\nI1212 17:11:04.272439 18921 solver.cpp:228] Iteration 17400, loss = 0.35105\nI1212 17:11:04.272480 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 17:11:04.272496 18921 solver.cpp:244]     Train net output #1: loss = 0.35105 (* 1 = 0.35105 loss)\nI1212 17:11:04.333381 18921 sgd_solver.cpp:174] Iteration 17400, lr = 2.61\nI1212 17:11:04.347477 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.329874\nI1212 17:13:23.812371 18921 solver.cpp:337] Iteration 17500, Testing net (#0)\nI1212 17:14:47.508754 18921 solver.cpp:404]     Test net output #0: accuracy = 0.77328\nI1212 17:14:47.509057 18921 solver.cpp:404]     Test net output #1: loss = 0.739108 (* 1 = 0.739108 loss)\nI1212 17:14:48.865036 18921 solver.cpp:228] Iteration 17500, loss = 0.304725\nI1212 17:14:48.865077 18921 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 17:14:48.865092 18921 solver.cpp:244]     Train net output #1: loss = 0.304725 (* 1 = 0.304725 loss)\nI1212 17:14:48.918314 18921 sgd_solver.cpp:174] Iteration 17500, lr = 2.625\nI1212 17:14:48.932324 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.289916\nI1212 17:17:08.393265 18921 solver.cpp:337] Iteration 17600, Testing net (#0)\nI1212 17:18:32.089165 18921 solver.cpp:404]     Test net output #0: accuracy = 0.75232\nI1212 17:18:32.089496 18921 solver.cpp:404]     Test net output #1: loss = 0.752529 (* 1 = 0.752529 loss)\nI1212 17:18:33.439935 18921 solver.cpp:228] Iteration 17600, loss = 0.301702\nI1212 17:18:33.439977 18921 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 17:18:33.439993 18921 solver.cpp:244]     Train net output #1: loss = 0.301702 (* 1 = 0.301702 loss)\nI1212 17:18:33.507987 18921 sgd_solver.cpp:174] Iteration 17600, lr = 2.64\nI1212 17:18:33.521961 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.299936\nI1212 17:20:53.019587 18921 solver.cpp:337] Iteration 17700, Testing net (#0)\nI1212 17:22:16.722329 18921 solver.cpp:404]     Test net output #0: accuracy = 0.76852\nI1212 17:22:16.722661 18921 solver.cpp:404]     Test net output #1: loss = 0.717572 (* 1 = 0.717572 loss)\nI1212 17:22:18.074353 18921 solver.cpp:228] Iteration 17700, loss = 0.301134\nI1212 17:22:18.074395 18921 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 17:22:18.074411 18921 solver.cpp:244]     Train net output #1: loss = 0.301134 (* 1 = 0.301134 loss)\nI1212 17:22:18.133610 18921 sgd_solver.cpp:174] Iteration 17700, lr = 2.655\nI1212 17:22:18.147583 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.284486\nI1212 17:24:37.638296 18921 solver.cpp:337] Iteration 17800, Testing net (#0)\nI1212 17:26:01.333740 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7688\nI1212 17:26:01.334094 18921 solver.cpp:404]     Test net output #1: loss = 0.864169 (* 1 = 0.864169 loss)\nI1212 17:26:02.685423 18921 solver.cpp:228] Iteration 17800, loss = 0.290511\nI1212 17:26:02.685464 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 17:26:02.685480 18921 solver.cpp:244]     Train net output #1: loss = 0.290511 (* 1 = 0.290511 loss)\nI1212 17:26:02.742570 18921 sgd_solver.cpp:174] Iteration 17800, lr = 2.67\nI1212 17:26:02.756542 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.297045\nI1212 17:28:22.213300 18921 solver.cpp:337] Iteration 17900, Testing net (#0)\nI1212 17:29:45.912753 18921 solver.cpp:404]     Test net output #0: accuracy = 0.66272\nI1212 17:29:45.913084 18921 solver.cpp:404]     Test net output #1: loss = 1.20603 (* 1 = 1.20603 loss)\nI1212 17:29:47.264714 18921 solver.cpp:228] Iteration 17900, loss = 0.195337\nI1212 17:29:47.264752 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 17:29:47.264768 18921 solver.cpp:244]     Train net output #1: loss = 0.195337 (* 1 = 0.195337 loss)\nI1212 17:29:47.325145 18921 sgd_solver.cpp:174] Iteration 17900, lr = 2.685\nI1212 17:29:47.339136 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.30746\nI1212 17:32:06.836437 18921 solver.cpp:337] Iteration 18000, Testing net (#0)\nI1212 17:33:30.546002 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73196\nI1212 17:33:30.546362 18921 solver.cpp:404]     Test net output #1: loss = 0.858368 (* 1 = 0.858368 loss)\nI1212 17:33:31.898035 18921 solver.cpp:228] Iteration 18000, loss = 0.250174\nI1212 17:33:31.898074 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 17:33:31.898090 18921 solver.cpp:244]     Train net output #1: loss = 0.250174 (* 1 = 0.250174 loss)\nI1212 17:33:31.952440 18921 sgd_solver.cpp:174] Iteration 18000, lr = 2.7\nI1212 17:33:31.966440 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.312457\nI1212 17:35:51.464431 18921 solver.cpp:337] Iteration 18100, Testing net (#0)\nI1212 17:37:15.170702 18921 solver.cpp:404]     Test net output #0: accuracy = 0.69384\nI1212 17:37:15.171031 18921 solver.cpp:404]     Test net output #1: loss = 1.17536 (* 1 = 1.17536 loss)\nI1212 17:37:16.521698 18921 solver.cpp:228] Iteration 18100, loss = 0.214583\nI1212 17:37:16.521740 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 17:37:16.521756 18921 solver.cpp:244]     Train net output #1: loss = 0.214583 (* 1 = 0.214583 loss)\nI1212 17:37:16.574091 18921 sgd_solver.cpp:174] Iteration 18100, lr = 2.715\nI1212 17:37:16.588091 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.260355\nI1212 17:39:36.079177 18921 solver.cpp:337] Iteration 18200, Testing net (#0)\nI1212 17:40:59.779929 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72192\nI1212 17:40:59.780251 18921 solver.cpp:404]     Test net output #1: loss = 0.944724 (* 1 = 0.944724 loss)\nI1212 17:41:01.131525 18921 solver.cpp:228] Iteration 18200, loss = 0.20907\nI1212 17:41:01.131566 18921 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 17:41:01.131580 18921 solver.cpp:244]     Train net output #1: loss = 0.20907 (* 1 = 0.20907 loss)\nI1212 17:41:01.192152 18921 sgd_solver.cpp:174] Iteration 18200, lr = 2.73\nI1212 17:41:01.205649 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.272464\nI1212 17:43:20.670593 18921 solver.cpp:337] Iteration 18300, Testing net (#0)\nI1212 17:44:44.365638 18921 solver.cpp:404]     Test net output #0: accuracy = 0.64024\nI1212 17:44:44.365984 18921 solver.cpp:404]     Test net output #1: loss = 1.331 (* 1 = 1.331 loss)\nI1212 17:44:45.717046 18921 solver.cpp:228] Iteration 18300, loss = 0.257455\nI1212 17:44:45.717087 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 17:44:45.717103 18921 solver.cpp:244]     Train net output #1: loss = 0.257455 (* 1 = 0.257455 loss)\nI1212 17:44:45.778384 18921 sgd_solver.cpp:174] Iteration 18300, lr = 2.745\nI1212 17:44:45.792421 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.29878\nI1212 17:47:05.254607 18921 solver.cpp:337] Iteration 18400, Testing net (#0)\nI1212 17:48:28.941109 18921 solver.cpp:404]     Test net output #0: accuracy = 0.67232\nI1212 17:48:28.941437 18921 solver.cpp:404]     Test net output #1: loss = 1.31979 (* 1 = 1.31979 loss)\nI1212 17:48:30.292160 18921 solver.cpp:228] Iteration 18400, loss = 0.280516\nI1212 17:48:30.292201 18921 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 17:48:30.292217 18921 solver.cpp:244]     Train net output #1: loss = 0.280516 (* 1 = 0.280516 loss)\nI1212 17:48:30.345160 18921 sgd_solver.cpp:174] Iteration 18400, lr = 2.76\nI1212 17:48:30.359236 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.299553\nI1212 17:50:49.810199 18921 solver.cpp:337] Iteration 18500, Testing net (#0)\nI1212 17:52:13.486403 18921 solver.cpp:404]     Test net output #0: accuracy = 0.80132\nI1212 17:52:13.486735 18921 solver.cpp:404]     Test net output #1: loss = 0.644083 (* 1 = 0.644083 loss)\nI1212 17:52:14.837750 18921 solver.cpp:228] Iteration 18500, loss = 0.272341\nI1212 17:52:14.837792 18921 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 17:52:14.837808 18921 solver.cpp:244]     Train net output #1: loss = 0.272341 (* 1 = 0.272341 loss)\nI1212 17:52:14.898241 18921 sgd_solver.cpp:174] Iteration 18500, lr = 2.775\nI1212 17:52:14.911751 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.306305\nI1212 17:54:34.370438 18921 solver.cpp:337] Iteration 18600, Testing net (#0)\nI1212 17:55:58.059561 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73544\nI1212 17:55:58.059877 18921 solver.cpp:404]     Test net output #1: loss = 0.999724 (* 1 = 0.999724 loss)\nI1212 17:55:59.410732 18921 solver.cpp:228] Iteration 18600, loss = 0.281018\nI1212 17:55:59.410773 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 17:55:59.410789 18921 solver.cpp:244]     Train net output #1: loss = 0.281018 (* 1 = 0.281018 loss)\nI1212 17:55:59.471930 18921 sgd_solver.cpp:174] Iteration 18600, lr = 2.79\nI1212 17:55:59.485945 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.333375\nI1212 17:58:18.952452 18921 solver.cpp:337] Iteration 18700, Testing net (#0)\nI1212 17:59:42.630662 18921 solver.cpp:404]     Test net output #0: accuracy = 0.685\nI1212 17:59:42.630986 18921 solver.cpp:404]     Test net output #1: loss = 1.01754 (* 1 = 1.01754 loss)\nI1212 17:59:43.981776 18921 solver.cpp:228] Iteration 18700, loss = 0.245261\nI1212 17:59:43.981817 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 17:59:43.981833 18921 solver.cpp:244]     Train net output #1: loss = 0.245261 (* 1 = 0.245261 loss)\nI1212 17:59:44.041488 18921 sgd_solver.cpp:174] Iteration 18700, lr = 2.805\nI1212 17:59:44.055510 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.279579\nI1212 18:02:03.519654 18921 solver.cpp:337] Iteration 18800, Testing net (#0)\nI1212 18:03:27.213351 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72664\nI1212 18:03:27.213697 18921 solver.cpp:404]     Test net output #1: loss = 0.888085 (* 1 = 0.888085 loss)\nI1212 18:03:28.570050 18921 solver.cpp:228] Iteration 18800, loss = 0.331362\nI1212 18:03:28.570096 18921 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 18:03:28.570111 18921 solver.cpp:244]     Train net output #1: loss = 0.331362 (* 1 = 0.331362 loss)\nI1212 18:03:28.626063 18921 sgd_solver.cpp:174] Iteration 18800, lr = 2.82\nI1212 18:03:28.640038 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.311675\nI1212 18:05:48.120642 18921 solver.cpp:337] Iteration 18900, Testing net (#0)\nI1212 18:07:11.807271 18921 solver.cpp:404]     Test net output #0: accuracy = 0.69032\nI1212 18:07:11.807620 18921 solver.cpp:404]     Test net output #1: loss = 1.13069 (* 1 = 1.13069 loss)\nI1212 18:07:13.159035 18921 solver.cpp:228] Iteration 18900, loss = 0.206624\nI1212 18:07:13.159075 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 18:07:13.159092 18921 solver.cpp:244]     Train net output #1: loss = 0.206624 (* 1 = 0.206624 loss)\nI1212 18:07:13.217841 18921 sgd_solver.cpp:174] Iteration 18900, lr = 2.835\nI1212 18:07:13.231840 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.28578\nI1212 18:09:32.678351 18921 solver.cpp:337] Iteration 19000, Testing net (#0)\nI1212 18:10:56.364962 18921 solver.cpp:404]     Test net output #0: accuracy = 0.73316\nI1212 18:10:56.365285 18921 solver.cpp:404]     Test net output #1: loss = 0.830838 (* 1 = 0.830838 loss)\nI1212 18:10:57.715791 18921 solver.cpp:228] Iteration 19000, loss = 0.32242\nI1212 18:10:57.715833 18921 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 18:10:57.715848 18921 solver.cpp:244]     Train net output #1: loss = 0.32242 (* 1 = 0.32242 loss)\nI1212 18:10:57.776365 18921 sgd_solver.cpp:174] Iteration 19000, lr = 2.85\nI1212 18:10:57.790294 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.304347\nI1212 18:13:17.246517 18921 solver.cpp:337] Iteration 19100, Testing net (#0)\nI1212 18:14:40.937451 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7548\nI1212 18:14:40.937778 18921 solver.cpp:404]     Test net output #1: loss = 0.787239 (* 1 = 0.787239 loss)\nI1212 18:14:42.288710 18921 solver.cpp:228] Iteration 19100, loss = 0.318475\nI1212 18:14:42.288750 18921 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1212 18:14:42.288765 18921 solver.cpp:244]     Train net output #1: loss = 0.318475 (* 1 = 0.318475 loss)\nI1212 18:14:42.345227 18921 sgd_solver.cpp:174] Iteration 19100, lr = 2.865\nI1212 18:14:42.359257 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.272602\nI1212 18:17:01.807117 18921 solver.cpp:337] Iteration 19200, Testing net (#0)\nI1212 18:18:25.499299 18921 solver.cpp:404]     Test net output #0: accuracy = 0.72356\nI1212 18:18:25.499722 18921 solver.cpp:404]     Test net output #1: loss = 0.936694 (* 1 = 0.936694 loss)\nI1212 18:18:26.850934 18921 solver.cpp:228] Iteration 19200, loss = 0.313957\nI1212 18:18:26.850975 18921 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 18:18:26.850991 18921 solver.cpp:244]     Train net output #1: loss = 0.313957 (* 1 = 0.313957 loss)\nI1212 18:18:26.905242 18921 sgd_solver.cpp:174] Iteration 19200, lr = 2.88\nI1212 18:18:26.918607 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.294239\nI1212 18:20:46.398026 18921 solver.cpp:337] Iteration 19300, Testing net (#0)\nI1212 18:22:10.085777 18921 solver.cpp:404]     Test net output #0: accuracy = 0.7898\nI1212 18:22:10.086091 18921 solver.cpp:404]     Test net output #1: loss = 0.72244 (* 1 = 0.72244 loss)\nI1212 18:22:11.436802 18921 solver.cpp:228] Iteration 19300, loss = 0.26746\nI1212 18:22:11.436846 18921 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 18:22:11.436861 18921 solver.cpp:244]     Train net output #1: loss = 0.26746 (* 1 = 0.26746 loss)\nI1212 18:22:11.495769 18921 sgd_solver.cpp:174] Iteration 19300, lr = 2.895\nI1212 18:22:11.509667 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.295986\nI1212 18:24:30.977394 18921 solver.cpp:337] Iteration 19400, Testing net (#0)\nI1212 18:25:54.676944 18921 solver.cpp:404]     Test net output #0: accuracy = 0.62636\nI1212 18:25:54.677304 18921 solver.cpp:404]     Test net output #1: loss = 1.33647 (* 1 = 1.33647 loss)\nI1212 18:25:56.028337 18921 solver.cpp:228] Iteration 19400, loss = 0.273896\nI1212 18:25:56.028381 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 18:25:56.028396 18921 solver.cpp:244]     Train net output #1: loss = 0.273896 (* 1 = 0.273896 loss)\nI1212 18:25:56.090754 18921 sgd_solver.cpp:174] Iteration 19400, lr = 2.91\nI1212 18:25:56.104763 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.342354\nI1212 18:28:15.601617 18921 solver.cpp:337] Iteration 19500, Testing net (#0)\nI1212 18:29:39.297032 18921 solver.cpp:404]     Test net output #0: accuracy = 0.71764\nI1212 18:29:39.297420 18921 solver.cpp:404]     Test net output #1: loss = 0.94349 (* 1 = 0.94349 loss)\nI1212 18:29:40.649406 18921 solver.cpp:228] Iteration 19500, loss = 0.234101\nI1212 18:29:40.649451 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 18:29:40.649472 18921 solver.cpp:244]     Train net output #1: loss = 0.234101 (* 1 = 0.234101 loss)\nI1212 18:29:40.712023 18921 sgd_solver.cpp:174] Iteration 19500, lr = 2.925\nI1212 18:29:40.726171 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.331393\nI1212 18:32:00.243888 18921 solver.cpp:337] Iteration 19600, Testing net (#0)\nI1212 18:33:23.965054 18921 solver.cpp:404]     Test net output #0: accuracy = 0.74872\nI1212 18:33:23.965407 18921 solver.cpp:404]     Test net output #1: loss = 0.806399 (* 1 = 0.806399 loss)\nI1212 18:33:25.317543 18921 solver.cpp:228] Iteration 19600, loss = 0.227552\nI1212 18:33:25.317589 18921 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 18:33:25.317605 18921 solver.cpp:244]     Train net output #1: loss = 0.227552 (* 1 = 0.227552 loss)\nI1212 18:33:25.369994 18921 sgd_solver.cpp:174] Iteration 19600, lr = 2.94\nI1212 18:33:25.384086 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.296923\nI1212 18:35:44.883283 18921 solver.cpp:337] Iteration 19700, Testing net (#0)\nI1212 18:37:08.615249 18921 solver.cpp:404]     Test net output #0: accuracy = 0.71832\nI1212 18:37:08.615605 18921 solver.cpp:404]     Test net output #1: loss = 0.971244 (* 1 = 0.971244 loss)\nI1212 18:37:09.967847 18921 solver.cpp:228] Iteration 19700, loss = 0.342038\nI1212 18:37:09.967891 18921 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1212 18:37:09.967907 18921 solver.cpp:244]     Train net output #1: loss = 0.342038 (* 1 = 0.342038 loss)\nI1212 18:37:10.031474 18921 sgd_solver.cpp:174] Iteration 19700, lr = 2.955\nI1212 18:37:10.045557 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.344417\nI1212 18:39:29.547835 18921 solver.cpp:337] Iteration 19800, Testing net (#0)\nI1212 18:40:53.259435 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78188\nI1212 18:40:53.259775 18921 solver.cpp:404]     Test net output #1: loss = 0.644106 (* 1 = 0.644106 loss)\nI1212 18:40:54.610601 18921 solver.cpp:228] Iteration 19800, loss = 0.296883\nI1212 18:40:54.610644 18921 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 18:40:54.610661 18921 solver.cpp:244]     Train net output #1: loss = 0.296883 (* 1 = 0.296883 loss)\nI1212 18:40:54.676364 18921 sgd_solver.cpp:174] Iteration 19800, lr = 2.97\nI1212 18:40:54.690496 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.275478\nI1212 18:43:14.206128 18921 solver.cpp:337] Iteration 19900, Testing net (#0)\nI1212 18:44:37.899065 18921 solver.cpp:404]     Test net output #0: accuracy = 0.71704\nI1212 18:44:37.899410 18921 solver.cpp:404]     Test net output #1: loss = 1.18975 (* 1 = 1.18975 loss)\nI1212 18:44:39.252888 18921 solver.cpp:228] Iteration 19900, loss = 0.249042\nI1212 18:44:39.252928 18921 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 18:44:39.252944 18921 solver.cpp:244]     Train net output #1: loss = 0.249042 (* 1 = 0.249042 loss)\nI1212 18:44:39.307490 18921 sgd_solver.cpp:174] Iteration 19900, lr = 2.985\nI1212 18:44:39.321554 18921 sgd_solver.cpp:149] Gradient: L2 norm 0.256627\nI1212 18:46:59.094226 18921 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range3Iter20kFig2b_iter_20000.caffemodel\nI1212 18:46:59.356034 18921 sgd_solver.cpp:341] Snapshotting solver state to binary proto file examples/sc/snapshots/range3Iter20kFig2b_iter_20000.solverstate\nI1212 18:46:59.810463 18921 solver.cpp:317] Iteration 20000, loss = 0.276982\nI1212 18:46:59.810513 18921 solver.cpp:337] Iteration 20000, Testing net (#0)\nI1212 18:48:23.500727 18921 solver.cpp:404]     Test net output #0: accuracy = 0.78548\nI1212 18:48:23.501080 18921 solver.cpp:404]     Test net output #1: loss = 0.688526 (* 1 = 0.688526 loss)\nI1212 18:48:23.501091 18921 solver.cpp:322] Optimization Done.\nI1212 18:48:28.988687 18921 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "Results/range3Iter5kFig2b",
    "content": "I1212 06:18:16.023185 12086 caffe.cpp:217] Using GPUs 0, 1, 2, 3, 4, 5, 6, 7\nI1212 06:18:16.025761 12086 caffe.cpp:222] GPU 0: GeForce GTX TITAN Black\nI1212 06:18:16.026978 12086 caffe.cpp:222] GPU 1: GeForce GTX TITAN Black\nI1212 06:18:16.028192 12086 caffe.cpp:222] GPU 2: GeForce GTX TITAN Black\nI1212 06:18:16.029407 12086 caffe.cpp:222] GPU 3: GeForce GTX TITAN Black\nI1212 06:18:16.030632 12086 caffe.cpp:222] GPU 4: GeForce GTX TITAN Black\nI1212 06:18:16.031860 12086 caffe.cpp:222] GPU 5: GeForce GTX TITAN Black\nI1212 06:18:16.033090 12086 caffe.cpp:222] GPU 6: GeForce GTX TITAN Black\nI1212 06:18:16.034320 12086 caffe.cpp:222] GPU 7: GeForce GTX TITAN Black\nI1212 06:18:16.465911 12086 solver.cpp:48] Initializing solver from parameters: \ntest_iter: 200\ntest_interval: 100\nbase_lr: 0\ndisplay: 100\nmax_iter: 5000\nlr_policy: \"triangular\"\nmomentum: 0.9\nweight_decay: 0.0001\nstepsize: 5000\nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/range3Iter5kFig2b\"\nsolver_mode: GPU\ndevice_id: 0\nnet: \"examples/sc/architectures/arch.prototxt\"\ntrain_state {\n  level: 0\n  stage: \"\"\n}\nmax_lr: 3\nI1212 06:18:16.469650 12086 solver.cpp:91] Creating training net from net file: examples/sc/architectures/arch.prototxt\nI1212 06:18:16.537998 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:16.538081 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:16.539352 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer dataLayer\nI1212 06:18:16.539408 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer pre_bn\nI1212 06:18:16.539429 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr1_bn\nI1212 06:18:16.539449 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b1_cbr2_bn\nI1212 06:18:16.539470 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr1_bn\nI1212 06:18:16.539494 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b2_cbr2_bn\nI1212 06:18:16.539513 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr1_bn\nI1212 06:18:16.539533 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b3_cbr2_bn\nI1212 06:18:16.539554 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr1_bn\nI1212 06:18:16.539573 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b4_cbr2_bn\nI1212 06:18:16.539593 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr1_bn\nI1212 06:18:16.539609 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b5_cbr2_bn\nI1212 06:18:16.539629 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr1_bn\nI1212 06:18:16.539649 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b6_cbr2_bn\nI1212 06:18:16.539669 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr1_bn\nI1212 06:18:16.539686 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b7_cbr2_bn\nI1212 06:18:16.539705 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr1_bn\nI1212 06:18:16.539724 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b8_cbr2_bn\nI1212 06:18:16.539744 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr1_bn\nI1212 06:18:16.539763 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L1_b9_cbr2_bn\nI1212 06:18:16.539800 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr1_bn\nI1212 06:18:16.539829 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b1_cbr2_bn\nI1212 06:18:16.539856 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr1_bn\nI1212 06:18:16.539878 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b2_cbr2_bn\nI1212 06:18:16.539896 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr1_bn\nI1212 06:18:16.539912 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b3_cbr2_bn\nI1212 06:18:16.539932 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr1_bn\nI1212 06:18:16.539948 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b4_cbr2_bn\nI1212 06:18:16.539966 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr1_bn\nI1212 06:18:16.539985 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b5_cbr2_bn\nI1212 06:18:16.540005 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr1_bn\nI1212 06:18:16.540024 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b6_cbr2_bn\nI1212 06:18:16.540045 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr1_bn\nI1212 06:18:16.540062 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b7_cbr2_bn\nI1212 06:18:16.540081 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr1_bn\nI1212 06:18:16.540100 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b8_cbr2_bn\nI1212 06:18:16.540122 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr1_bn\nI1212 06:18:16.540139 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L2_b9_cbr2_bn\nI1212 06:18:16.540159 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr1_bn\nI1212 06:18:16.540176 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b1_cbr2_bn\nI1212 06:18:16.540202 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr1_bn\nI1212 06:18:16.540220 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b2_cbr2_bn\nI1212 06:18:16.540240 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr1_bn\nI1212 06:18:16.540258 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b3_cbr2_bn\nI1212 06:18:16.540278 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr1_bn\nI1212 06:18:16.540297 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b4_cbr2_bn\nI1212 06:18:16.540315 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr1_bn\nI1212 06:18:16.540333 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b5_cbr2_bn\nI1212 06:18:16.540352 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr1_bn\nI1212 06:18:16.540370 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b6_cbr2_bn\nI1212 06:18:16.540388 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr1_bn\nI1212 06:18:16.540416 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b7_cbr2_bn\nI1212 06:18:16.540437 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr1_bn\nI1212 06:18:16.540457 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b8_cbr2_bn\nI1212 06:18:16.540477 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr1_bn\nI1212 06:18:16.540493 12086 net.cpp:322] The NetState phase (0) differed from the phase (1) specified by a rule in layer L3_b9_cbr2_bn\nI1212 06:18:16.542306 12086 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TRAIN\n  level: 0\n  stage: \"\"\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n    shuffle: true\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_c\nI1212 06:18:16.544476 12086 layer_factory.hpp:77] Creating layer dataLayer\nI1212 06:18:16.545667 12086 net.cpp:100] Creating Layer dataLayer\nI1212 06:18:16.545703 12086 net.cpp:408] dataLayer -> data_top\nI1212 06:18:16.545948 12086 net.cpp:408] dataLayer -> label\nI1212 06:18:16.546059 12086 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1212 06:18:16.590628 12092 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_train_lmdb\nI1212 06:18:16.641525 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:16.648874 12086 net.cpp:150] Setting up dataLayer\nI1212 06:18:16.648959 12086 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI1212 06:18:16.648982 12086 net.cpp:157] Top shape: 125 (125)\nI1212 06:18:16.648993 12086 net.cpp:165] Memory required for data: 1536500\nI1212 06:18:16.649019 12086 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1212 06:18:16.649044 12086 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1212 06:18:16.649060 12086 net.cpp:434] label_dataLayer_1_split <- label\nI1212 06:18:16.649106 12086 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1212 06:18:16.649135 12086 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1212 06:18:16.649236 12086 net.cpp:150] Setting up label_dataLayer_1_split\nI1212 06:18:16.649263 12086 net.cpp:157] Top shape: 125 (125)\nI1212 06:18:16.649278 12086 net.cpp:157] Top shape: 125 (125)\nI1212 06:18:16.649288 12086 net.cpp:165] Memory required for data: 1537500\nI1212 06:18:16.649299 12086 layer_factory.hpp:77] Creating layer pre_conv\nI1212 06:18:16.649339 12086 net.cpp:100] Creating Layer pre_conv\nI1212 06:18:16.649353 12086 net.cpp:434] pre_conv <- data_top\nI1212 06:18:16.649372 12086 net.cpp:408] pre_conv -> pre_conv_top\nI1212 06:18:16.651046 12086 net.cpp:150] Setting up pre_conv\nI1212 06:18:16.651077 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.651088 12086 net.cpp:165] Memory required for data: 9729500\nI1212 06:18:16.651245 12086 layer_factory.hpp:77] Creating layer pre_bn\nI1212 06:18:16.651345 12086 net.cpp:100] Creating Layer pre_bn\nI1212 06:18:16.651360 12086 net.cpp:434] pre_bn <- pre_conv_top\nI1212 06:18:16.651377 12086 net.cpp:408] pre_bn -> pre_bn_top\nI1212 06:18:16.651742 12086 net.cpp:150] Setting up pre_bn\nI1212 06:18:16.651762 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.651772 12086 net.cpp:165] Memory required for data: 17921500\nI1212 06:18:16.651803 12086 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:18:16.651882 12086 net.cpp:100] Creating Layer pre_scale\nI1212 06:18:16.651898 12086 net.cpp:434] pre_scale <- pre_bn_top\nI1212 06:18:16.651918 12086 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI1212 06:18:16.652289 12086 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:18:16.654081 12086 net.cpp:150] Setting up pre_scale\nI1212 06:18:16.654103 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.654114 12086 net.cpp:165] Memory required for data: 26113500\nI1212 06:18:16.654134 12086 layer_factory.hpp:77] Creating layer pre_relu\nI1212 06:18:16.654204 12086 net.cpp:100] Creating Layer pre_relu\nI1212 06:18:16.654220 12086 net.cpp:434] pre_relu <- pre_bn_top\nI1212 06:18:16.654235 12086 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI1212 06:18:16.654255 12086 net.cpp:150] Setting up pre_relu\nI1212 06:18:16.654271 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.654281 12086 net.cpp:165] Memory required for data: 34305500\nI1212 06:18:16.654291 12086 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI1212 06:18:16.654311 12086 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI1212 06:18:16.654322 12086 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI1212 06:18:16.654341 12086 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI1212 06:18:16.654363 12086 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI1212 06:18:16.654444 12086 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI1212 06:18:16.654466 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.654479 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.654490 12086 net.cpp:165] Memory required for data: 50689500\nI1212 06:18:16.654501 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI1212 06:18:16.654521 12086 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI1212 06:18:16.654534 12086 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI1212 06:18:16.654556 12086 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI1212 06:18:16.654914 12086 net.cpp:150] Setting up L1_b1_cbr1_conv\nI1212 06:18:16.654934 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.654944 12086 net.cpp:165] Memory required for data: 58881500\nI1212 06:18:16.654968 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI1212 06:18:16.654994 12086 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI1212 06:18:16.655006 12086 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI1212 06:18:16.655022 12086 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI1212 06:18:16.655300 12086 net.cpp:150] Setting up L1_b1_cbr1_bn\nI1212 06:18:16.655330 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.655340 12086 net.cpp:165] Memory required for data: 67073500\nI1212 06:18:16.655362 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:18:16.655380 12086 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI1212 06:18:16.655391 12086 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI1212 06:18:16.655407 12086 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.655493 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:18:16.655666 12086 net.cpp:150] Setting up L1_b1_cbr1_scale\nI1212 06:18:16.655685 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.655695 12086 net.cpp:165] Memory required for data: 75265500\nI1212 06:18:16.655714 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI1212 06:18:16.655735 12086 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI1212 06:18:16.655752 12086 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI1212 06:18:16.655768 12086 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.655787 12086 net.cpp:150] Setting up L1_b1_cbr1_relu\nI1212 06:18:16.655802 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.655812 12086 net.cpp:165] Memory required for data: 83457500\nI1212 06:18:16.655822 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI1212 06:18:16.655851 12086 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI1212 06:18:16.655864 12086 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI1212 06:18:16.655886 12086 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI1212 06:18:16.656239 12086 net.cpp:150] Setting up L1_b1_cbr2_conv\nI1212 06:18:16.656265 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.656275 12086 net.cpp:165] Memory required for data: 91649500\nI1212 06:18:16.656292 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI1212 06:18:16.656311 12086 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI1212 06:18:16.656322 12086 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI1212 06:18:16.656342 12086 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI1212 06:18:16.656616 12086 net.cpp:150] Setting up L1_b1_cbr2_bn\nI1212 06:18:16.656635 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.656644 12086 net.cpp:165] Memory required for data: 99841500\nI1212 06:18:16.656677 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:18:16.656694 12086 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI1212 06:18:16.656707 12086 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI1212 06:18:16.656723 12086 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI1212 06:18:16.656808 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:18:16.656987 12086 net.cpp:150] Setting up L1_b1_cbr2_scale\nI1212 06:18:16.657006 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.657016 12086 net.cpp:165] Memory required for data: 108033500\nI1212 06:18:16.657035 12086 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1212 06:18:16.657060 12086 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1212 06:18:16.657079 12086 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI1212 06:18:16.657094 12086 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI1212 06:18:16.657122 12086 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1212 06:18:16.657225 12086 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1212 06:18:16.657245 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.657255 12086 net.cpp:165] Memory required for data: 116225500\nI1212 06:18:16.657266 12086 layer_factory.hpp:77] Creating layer L1_b1_relu\nI1212 06:18:16.657287 12086 net.cpp:100] Creating Layer L1_b1_relu\nI1212 06:18:16.657299 12086 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI1212 06:18:16.657313 12086 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI1212 06:18:16.657333 12086 net.cpp:150] Setting up L1_b1_relu\nI1212 06:18:16.657348 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.657358 12086 net.cpp:165] Memory required for data: 124417500\nI1212 06:18:16.657368 12086 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:18:16.657385 12086 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:18:16.657397 12086 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI1212 06:18:16.657411 12086 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:18:16.657430 12086 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:18:16.657508 12086 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:18:16.657529 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.657542 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.657552 12086 net.cpp:165] Memory required for data: 140801500\nI1212 06:18:16.657572 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI1212 06:18:16.657598 12086 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI1212 06:18:16.657613 12086 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:18:16.657631 12086 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI1212 06:18:16.657979 12086 net.cpp:150] Setting up L1_b2_cbr1_conv\nI1212 06:18:16.657997 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.658007 12086 net.cpp:165] Memory required for data: 148993500\nI1212 06:18:16.658025 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI1212 06:18:16.658051 12086 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI1212 06:18:16.658062 12086 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI1212 06:18:16.658094 12086 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI1212 06:18:16.658392 12086 net.cpp:150] Setting up L1_b2_cbr1_bn\nI1212 06:18:16.658413 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.658423 12086 net.cpp:165] Memory required for data: 157185500\nI1212 06:18:16.658445 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:18:16.658463 12086 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI1212 06:18:16.658473 12086 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI1212 06:18:16.658488 12086 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.658579 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:18:16.658751 12086 net.cpp:150] Setting up L1_b2_cbr1_scale\nI1212 06:18:16.658769 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.658779 12086 net.cpp:165] Memory required for data: 165377500\nI1212 06:18:16.658797 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI1212 06:18:16.658819 12086 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI1212 06:18:16.658834 12086 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI1212 06:18:16.658849 12086 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.658869 12086 net.cpp:150] Setting up L1_b2_cbr1_relu\nI1212 06:18:16.658884 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.658893 12086 net.cpp:165] Memory required for data: 173569500\nI1212 06:18:16.658903 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI1212 06:18:16.658929 12086 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI1212 06:18:16.658941 12086 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI1212 06:18:16.658963 12086 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI1212 06:18:16.659324 12086 net.cpp:150] Setting up L1_b2_cbr2_conv\nI1212 06:18:16.659344 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.659353 12086 net.cpp:165] Memory required for data: 181761500\nI1212 06:18:16.659371 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI1212 06:18:16.659394 12086 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI1212 06:18:16.659405 12086 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI1212 06:18:16.659426 12086 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI1212 06:18:16.659708 12086 net.cpp:150] Setting up L1_b2_cbr2_bn\nI1212 06:18:16.659726 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.659736 12086 net.cpp:165] Memory required for data: 189953500\nI1212 06:18:16.659765 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:18:16.659787 12086 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI1212 06:18:16.659799 12086 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI1212 06:18:16.659816 12086 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI1212 06:18:16.659904 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:18:16.660089 12086 net.cpp:150] Setting up L1_b2_cbr2_scale\nI1212 06:18:16.660109 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.660120 12086 net.cpp:165] Memory required for data: 198145500\nI1212 06:18:16.660138 12086 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1212 06:18:16.660156 12086 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1212 06:18:16.660174 12086 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI1212 06:18:16.660188 12086 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:18:16.660209 12086 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1212 06:18:16.660264 12086 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1212 06:18:16.660282 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.660291 12086 net.cpp:165] Memory required for data: 206337500\nI1212 06:18:16.660301 12086 layer_factory.hpp:77] Creating layer L1_b2_relu\nI1212 06:18:16.660316 12086 net.cpp:100] Creating Layer L1_b2_relu\nI1212 06:18:16.660327 12086 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI1212 06:18:16.660341 12086 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI1212 06:18:16.660360 12086 net.cpp:150] Setting up L1_b2_relu\nI1212 06:18:16.660375 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.660384 12086 net.cpp:165] Memory required for data: 214529500\nI1212 06:18:16.660395 12086 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:18:16.660414 12086 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:18:16.660425 12086 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI1212 06:18:16.660440 12086 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:18:16.660460 12086 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:18:16.660537 12086 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:18:16.660557 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.660571 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.660581 12086 net.cpp:165] Memory required for data: 230913500\nI1212 06:18:16.660593 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI1212 06:18:16.660611 12086 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI1212 06:18:16.660624 12086 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:18:16.660647 12086 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI1212 06:18:16.661000 12086 net.cpp:150] Setting up L1_b3_cbr1_conv\nI1212 06:18:16.661020 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.661029 12086 net.cpp:165] Memory required for data: 239105500\nI1212 06:18:16.661047 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI1212 06:18:16.661065 12086 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI1212 06:18:16.661083 12086 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI1212 06:18:16.661101 12086 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI1212 06:18:16.661375 12086 net.cpp:150] Setting up L1_b3_cbr1_bn\nI1212 06:18:16.661394 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.661403 12086 net.cpp:165] Memory required for data: 247297500\nI1212 06:18:16.661424 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:18:16.661445 12086 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI1212 06:18:16.661458 12086 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI1212 06:18:16.661473 12086 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.661567 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:18:16.661744 12086 net.cpp:150] Setting up L1_b3_cbr1_scale\nI1212 06:18:16.661763 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.661772 12086 net.cpp:165] Memory required for data: 255489500\nI1212 06:18:16.661792 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI1212 06:18:16.661808 12086 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI1212 06:18:16.661818 12086 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI1212 06:18:16.661837 12086 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.661857 12086 net.cpp:150] Setting up L1_b3_cbr1_relu\nI1212 06:18:16.661871 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.661890 12086 net.cpp:165] Memory required for data: 263681500\nI1212 06:18:16.661900 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI1212 06:18:16.661927 12086 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI1212 06:18:16.661939 12086 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI1212 06:18:16.661957 12086 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI1212 06:18:16.662319 12086 net.cpp:150] Setting up L1_b3_cbr2_conv\nI1212 06:18:16.662339 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.662349 12086 net.cpp:165] Memory required for data: 271873500\nI1212 06:18:16.662367 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI1212 06:18:16.662394 12086 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI1212 06:18:16.662405 12086 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI1212 06:18:16.662430 12086 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI1212 06:18:16.662721 12086 net.cpp:150] Setting up L1_b3_cbr2_bn\nI1212 06:18:16.662740 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.662750 12086 net.cpp:165] Memory required for data: 280065500\nI1212 06:18:16.662772 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:18:16.662793 12086 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI1212 06:18:16.662806 12086 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI1212 06:18:16.662822 12086 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI1212 06:18:16.662909 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:18:16.663094 12086 net.cpp:150] Setting up L1_b3_cbr2_scale\nI1212 06:18:16.663112 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.663121 12086 net.cpp:165] Memory required for data: 288257500\nI1212 06:18:16.663141 12086 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1212 06:18:16.663157 12086 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1212 06:18:16.663168 12086 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI1212 06:18:16.663182 12086 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:18:16.663202 12086 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1212 06:18:16.663259 12086 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1212 06:18:16.663277 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.663287 12086 net.cpp:165] Memory required for data: 296449500\nI1212 06:18:16.663297 12086 layer_factory.hpp:77] Creating layer L1_b3_relu\nI1212 06:18:16.663312 12086 net.cpp:100] Creating Layer L1_b3_relu\nI1212 06:18:16.663323 12086 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI1212 06:18:16.663348 12086 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI1212 06:18:16.663367 12086 net.cpp:150] Setting up L1_b3_relu\nI1212 06:18:16.663383 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.663393 12086 net.cpp:165] Memory required for data: 304641500\nI1212 06:18:16.663403 12086 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:18:16.663419 12086 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:18:16.663429 12086 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI1212 06:18:16.663444 12086 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:18:16.663465 12086 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:18:16.663543 12086 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:18:16.663561 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.663575 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.663585 12086 net.cpp:165] Memory required for data: 321025500\nI1212 06:18:16.663595 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI1212 06:18:16.663616 12086 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI1212 06:18:16.663628 12086 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:18:16.663651 12086 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI1212 06:18:16.664024 12086 net.cpp:150] Setting up L1_b4_cbr1_conv\nI1212 06:18:16.664043 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.664053 12086 net.cpp:165] Memory required for data: 329217500\nI1212 06:18:16.664078 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI1212 06:18:16.664098 12086 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI1212 06:18:16.664108 12086 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI1212 06:18:16.664130 12086 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI1212 06:18:16.664408 12086 net.cpp:150] Setting up L1_b4_cbr1_bn\nI1212 06:18:16.664427 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.664438 12086 net.cpp:165] Memory required for data: 337409500\nI1212 06:18:16.664459 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:18:16.664480 12086 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI1212 06:18:16.664492 12086 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI1212 06:18:16.664510 12086 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.664597 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:18:16.664777 12086 net.cpp:150] Setting up L1_b4_cbr1_scale\nI1212 06:18:16.664796 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.664806 12086 net.cpp:165] Memory required for data: 345601500\nI1212 06:18:16.664824 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI1212 06:18:16.664845 12086 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI1212 06:18:16.664857 12086 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI1212 06:18:16.664872 12086 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.664891 12086 net.cpp:150] Setting up L1_b4_cbr1_relu\nI1212 06:18:16.664906 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.664916 12086 net.cpp:165] Memory required for data: 353793500\nI1212 06:18:16.664925 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI1212 06:18:16.664952 12086 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI1212 06:18:16.664964 12086 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI1212 06:18:16.664986 12086 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI1212 06:18:16.665555 12086 net.cpp:150] Setting up L1_b4_cbr2_conv\nI1212 06:18:16.665576 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.665586 12086 net.cpp:165] Memory required for data: 361985500\nI1212 06:18:16.665604 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI1212 06:18:16.665622 12086 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI1212 06:18:16.665634 12086 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI1212 06:18:16.665650 12086 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI1212 06:18:16.665961 12086 net.cpp:150] Setting up L1_b4_cbr2_bn\nI1212 06:18:16.665982 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.665990 12086 net.cpp:165] Memory required for data: 370177500\nI1212 06:18:16.666015 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:18:16.666033 12086 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI1212 06:18:16.666044 12086 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI1212 06:18:16.666065 12086 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI1212 06:18:16.666159 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:18:16.666335 12086 net.cpp:150] Setting up L1_b4_cbr2_scale\nI1212 06:18:16.666355 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.666365 12086 net.cpp:165] Memory required for data: 378369500\nI1212 06:18:16.666383 12086 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1212 06:18:16.666406 12086 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1212 06:18:16.666419 12086 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI1212 06:18:16.666431 12086 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:18:16.666448 12086 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1212 06:18:16.666508 12086 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1212 06:18:16.666538 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.666549 12086 net.cpp:165] Memory required for data: 386561500\nI1212 06:18:16.666560 12086 layer_factory.hpp:77] Creating layer L1_b4_relu\nI1212 06:18:16.666574 12086 net.cpp:100] Creating Layer L1_b4_relu\nI1212 06:18:16.666586 12086 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI1212 06:18:16.666605 12086 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI1212 06:18:16.666625 12086 net.cpp:150] Setting up L1_b4_relu\nI1212 06:18:16.666641 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.666651 12086 net.cpp:165] Memory required for data: 394753500\nI1212 06:18:16.666661 12086 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:18:16.666676 12086 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:18:16.666687 12086 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI1212 06:18:16.666700 12086 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:18:16.666720 12086 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:18:16.666802 12086 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:18:16.666821 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.666834 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.666843 12086 net.cpp:165] Memory required for data: 411137500\nI1212 06:18:16.666853 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI1212 06:18:16.666878 12086 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI1212 06:18:16.666891 12086 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:18:16.666910 12086 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI1212 06:18:16.667266 12086 net.cpp:150] Setting up L1_b5_cbr1_conv\nI1212 06:18:16.667286 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.667294 12086 net.cpp:165] Memory required for data: 419329500\nI1212 06:18:16.667335 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI1212 06:18:16.667358 12086 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI1212 06:18:16.667371 12086 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI1212 06:18:16.667388 12086 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI1212 06:18:16.667677 12086 net.cpp:150] Setting up L1_b5_cbr1_bn\nI1212 06:18:16.667696 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.667706 12086 net.cpp:165] Memory required for data: 427521500\nI1212 06:18:16.667728 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:18:16.667749 12086 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI1212 06:18:16.667760 12086 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI1212 06:18:16.667778 12086 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.667870 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:18:16.668051 12086 net.cpp:150] Setting up L1_b5_cbr1_scale\nI1212 06:18:16.668076 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.668087 12086 net.cpp:165] Memory required for data: 435713500\nI1212 06:18:16.668105 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI1212 06:18:16.668125 12086 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI1212 06:18:16.668138 12086 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI1212 06:18:16.668153 12086 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.668171 12086 net.cpp:150] Setting up L1_b5_cbr1_relu\nI1212 06:18:16.668185 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.668195 12086 net.cpp:165] Memory required for data: 443905500\nI1212 06:18:16.668205 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI1212 06:18:16.668231 12086 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI1212 06:18:16.668244 12086 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI1212 06:18:16.668267 12086 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI1212 06:18:16.668643 12086 net.cpp:150] Setting up L1_b5_cbr2_conv\nI1212 06:18:16.668663 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.668673 12086 net.cpp:165] Memory required for data: 452097500\nI1212 06:18:16.668690 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI1212 06:18:16.668709 12086 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI1212 06:18:16.668720 12086 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI1212 06:18:16.668736 12086 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI1212 06:18:16.669015 12086 net.cpp:150] Setting up L1_b5_cbr2_bn\nI1212 06:18:16.669034 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.669044 12086 net.cpp:165] Memory required for data: 460289500\nI1212 06:18:16.669065 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:18:16.669095 12086 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI1212 06:18:16.669108 12086 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI1212 06:18:16.669126 12086 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI1212 06:18:16.669212 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:18:16.669392 12086 net.cpp:150] Setting up L1_b5_cbr2_scale\nI1212 06:18:16.669412 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.669421 12086 net.cpp:165] Memory required for data: 468481500\nI1212 06:18:16.669440 12086 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1212 06:18:16.669456 12086 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1212 06:18:16.669468 12086 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI1212 06:18:16.669481 12086 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:18:16.669502 12086 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1212 06:18:16.669560 12086 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1212 06:18:16.669580 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.669590 12086 net.cpp:165] Memory required for data: 476673500\nI1212 06:18:16.669600 12086 layer_factory.hpp:77] Creating layer L1_b5_relu\nI1212 06:18:16.669615 12086 net.cpp:100] Creating Layer L1_b5_relu\nI1212 06:18:16.669627 12086 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI1212 06:18:16.669646 12086 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI1212 06:18:16.669665 12086 net.cpp:150] Setting up L1_b5_relu\nI1212 06:18:16.669680 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.669690 12086 net.cpp:165] Memory required for data: 484865500\nI1212 06:18:16.669700 12086 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:18:16.669714 12086 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:18:16.669725 12086 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI1212 06:18:16.669740 12086 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:18:16.669760 12086 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:18:16.669842 12086 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:18:16.669862 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.669874 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.669884 12086 net.cpp:165] Memory required for data: 501249500\nI1212 06:18:16.669894 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI1212 06:18:16.669915 12086 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI1212 06:18:16.669926 12086 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:18:16.669951 12086 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI1212 06:18:16.670320 12086 net.cpp:150] Setting up L1_b6_cbr1_conv\nI1212 06:18:16.670339 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.670348 12086 net.cpp:165] Memory required for data: 509441500\nI1212 06:18:16.670367 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI1212 06:18:16.670392 12086 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI1212 06:18:16.670405 12086 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI1212 06:18:16.670428 12086 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI1212 06:18:16.670716 12086 net.cpp:150] Setting up L1_b6_cbr1_bn\nI1212 06:18:16.670734 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.670744 12086 net.cpp:165] Memory required for data: 517633500\nI1212 06:18:16.670765 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:18:16.670786 12086 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI1212 06:18:16.670799 12086 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI1212 06:18:16.670814 12086 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.670903 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:18:16.671088 12086 net.cpp:150] Setting up L1_b6_cbr1_scale\nI1212 06:18:16.671108 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.671118 12086 net.cpp:165] Memory required for data: 525825500\nI1212 06:18:16.671136 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI1212 06:18:16.671151 12086 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI1212 06:18:16.671162 12086 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI1212 06:18:16.671181 12086 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.671201 12086 net.cpp:150] Setting up L1_b6_cbr1_relu\nI1212 06:18:16.671216 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.671224 12086 net.cpp:165] Memory required for data: 534017500\nI1212 06:18:16.671236 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI1212 06:18:16.671260 12086 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI1212 06:18:16.671274 12086 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI1212 06:18:16.671296 12086 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI1212 06:18:16.671658 12086 net.cpp:150] Setting up L1_b6_cbr2_conv\nI1212 06:18:16.671677 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.671687 12086 net.cpp:165] Memory required for data: 542209500\nI1212 06:18:16.671705 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI1212 06:18:16.671722 12086 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI1212 06:18:16.671733 12086 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI1212 06:18:16.671751 12086 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI1212 06:18:16.672039 12086 net.cpp:150] Setting up L1_b6_cbr2_bn\nI1212 06:18:16.672060 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.672068 12086 net.cpp:165] Memory required for data: 550401500\nI1212 06:18:16.672097 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:18:16.672121 12086 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI1212 06:18:16.672133 12086 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI1212 06:18:16.672149 12086 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI1212 06:18:16.672236 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:18:16.672416 12086 net.cpp:150] Setting up L1_b6_cbr2_scale\nI1212 06:18:16.672435 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.672446 12086 net.cpp:165] Memory required for data: 558593500\nI1212 06:18:16.672463 12086 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1212 06:18:16.672490 12086 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1212 06:18:16.672503 12086 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI1212 06:18:16.672518 12086 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:18:16.672538 12086 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1212 06:18:16.672591 12086 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1212 06:18:16.672610 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.672621 12086 net.cpp:165] Memory required for data: 566785500\nI1212 06:18:16.672631 12086 layer_factory.hpp:77] Creating layer L1_b6_relu\nI1212 06:18:16.672650 12086 net.cpp:100] Creating Layer L1_b6_relu\nI1212 06:18:16.672673 12086 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI1212 06:18:16.672689 12086 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI1212 06:18:16.672709 12086 net.cpp:150] Setting up L1_b6_relu\nI1212 06:18:16.672724 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.672734 12086 net.cpp:165] Memory required for data: 574977500\nI1212 06:18:16.672744 12086 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:18:16.672757 12086 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:18:16.672770 12086 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI1212 06:18:16.672785 12086 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:18:16.672803 12086 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:18:16.672886 12086 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:18:16.672904 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.672919 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.672927 12086 net.cpp:165] Memory required for data: 591361500\nI1212 06:18:16.672937 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI1212 06:18:16.672965 12086 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI1212 06:18:16.672977 12086 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:18:16.672996 12086 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI1212 06:18:16.673368 12086 net.cpp:150] Setting up L1_b7_cbr1_conv\nI1212 06:18:16.673388 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.673398 12086 net.cpp:165] Memory required for data: 599553500\nI1212 06:18:16.673416 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI1212 06:18:16.673439 12086 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI1212 06:18:16.673450 12086 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI1212 06:18:16.673467 12086 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI1212 06:18:16.673748 12086 net.cpp:150] Setting up L1_b7_cbr1_bn\nI1212 06:18:16.673768 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.673777 12086 net.cpp:165] Memory required for data: 607745500\nI1212 06:18:16.673799 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:18:16.673815 12086 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI1212 06:18:16.673827 12086 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI1212 06:18:16.673843 12086 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.673938 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:18:16.674129 12086 net.cpp:150] Setting up L1_b7_cbr1_scale\nI1212 06:18:16.674149 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.674159 12086 net.cpp:165] Memory required for data: 615937500\nI1212 06:18:16.674177 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI1212 06:18:16.674192 12086 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI1212 06:18:16.674204 12086 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI1212 06:18:16.674226 12086 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.674247 12086 net.cpp:150] Setting up L1_b7_cbr1_relu\nI1212 06:18:16.674262 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.674271 12086 net.cpp:165] Memory required for data: 624129500\nI1212 06:18:16.674283 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI1212 06:18:16.674302 12086 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI1212 06:18:16.674315 12086 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI1212 06:18:16.674337 12086 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI1212 06:18:16.674700 12086 net.cpp:150] Setting up L1_b7_cbr2_conv\nI1212 06:18:16.674720 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.674728 12086 net.cpp:165] Memory required for data: 632321500\nI1212 06:18:16.674746 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI1212 06:18:16.674773 12086 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI1212 06:18:16.674787 12086 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI1212 06:18:16.674808 12086 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI1212 06:18:16.675108 12086 net.cpp:150] Setting up L1_b7_cbr2_bn\nI1212 06:18:16.675132 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.675143 12086 net.cpp:165] Memory required for data: 640513500\nI1212 06:18:16.675164 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:18:16.675180 12086 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI1212 06:18:16.675192 12086 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI1212 06:18:16.675207 12086 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI1212 06:18:16.675297 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:18:16.675475 12086 net.cpp:150] Setting up L1_b7_cbr2_scale\nI1212 06:18:16.675494 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.675504 12086 net.cpp:165] Memory required for data: 648705500\nI1212 06:18:16.675523 12086 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI1212 06:18:16.675544 12086 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI1212 06:18:16.675556 12086 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI1212 06:18:16.675570 12086 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:18:16.675590 12086 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI1212 06:18:16.675642 12086 net.cpp:150] Setting up L1_b7_sum_eltwise\nI1212 06:18:16.675662 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.675671 12086 net.cpp:165] Memory required for data: 656897500\nI1212 06:18:16.675681 12086 layer_factory.hpp:77] Creating layer L1_b7_relu\nI1212 06:18:16.675700 12086 net.cpp:100] Creating Layer L1_b7_relu\nI1212 06:18:16.675714 12086 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI1212 06:18:16.675729 12086 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI1212 06:18:16.675746 12086 net.cpp:150] Setting up L1_b7_relu\nI1212 06:18:16.675761 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.675770 12086 net.cpp:165] Memory required for data: 665089500\nI1212 06:18:16.675781 12086 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:18:16.675796 12086 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:18:16.675807 12086 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI1212 06:18:16.675822 12086 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:18:16.675843 12086 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:18:16.675923 12086 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:18:16.675941 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.675956 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.675964 12086 net.cpp:165] Memory required for data: 681473500\nI1212 06:18:16.675974 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI1212 06:18:16.676000 12086 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI1212 06:18:16.676014 12086 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:18:16.676033 12086 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI1212 06:18:16.676409 12086 net.cpp:150] Setting up L1_b8_cbr1_conv\nI1212 06:18:16.676429 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.676437 12086 net.cpp:165] Memory required for data: 689665500\nI1212 06:18:16.676455 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI1212 06:18:16.676479 12086 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI1212 06:18:16.676492 12086 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI1212 06:18:16.676509 12086 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI1212 06:18:16.676802 12086 net.cpp:150] Setting up L1_b8_cbr1_bn\nI1212 06:18:16.676826 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.676844 12086 net.cpp:165] Memory required for data: 697857500\nI1212 06:18:16.676867 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:18:16.676884 12086 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI1212 06:18:16.676895 12086 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI1212 06:18:16.676913 12086 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.676997 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:18:16.677188 12086 net.cpp:150] Setting up L1_b8_cbr1_scale\nI1212 06:18:16.677207 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.677217 12086 net.cpp:165] Memory required for data: 706049500\nI1212 06:18:16.677237 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI1212 06:18:16.677256 12086 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI1212 06:18:16.677268 12086 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI1212 06:18:16.677289 12086 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.677309 12086 net.cpp:150] Setting up L1_b8_cbr1_relu\nI1212 06:18:16.677323 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.677332 12086 net.cpp:165] Memory required for data: 714241500\nI1212 06:18:16.677343 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI1212 06:18:16.677363 12086 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI1212 06:18:16.677376 12086 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI1212 06:18:16.677398 12086 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI1212 06:18:16.677763 12086 net.cpp:150] Setting up L1_b8_cbr2_conv\nI1212 06:18:16.677783 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.677793 12086 net.cpp:165] Memory required for data: 722433500\nI1212 06:18:16.677809 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI1212 06:18:16.677826 12086 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI1212 06:18:16.677839 12086 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI1212 06:18:16.677858 12086 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI1212 06:18:16.678164 12086 net.cpp:150] Setting up L1_b8_cbr2_bn\nI1212 06:18:16.678184 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.678192 12086 net.cpp:165] Memory required for data: 730625500\nI1212 06:18:16.678215 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:18:16.678236 12086 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI1212 06:18:16.678247 12086 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI1212 06:18:16.678263 12086 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI1212 06:18:16.678352 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:18:16.678534 12086 net.cpp:150] Setting up L1_b8_cbr2_scale\nI1212 06:18:16.678552 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.678561 12086 net.cpp:165] Memory required for data: 738817500\nI1212 06:18:16.678580 12086 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI1212 06:18:16.678601 12086 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI1212 06:18:16.678613 12086 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI1212 06:18:16.678627 12086 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:18:16.678642 12086 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI1212 06:18:16.678699 12086 net.cpp:150] Setting up L1_b8_sum_eltwise\nI1212 06:18:16.678719 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.678728 12086 net.cpp:165] Memory required for data: 747009500\nI1212 06:18:16.678738 12086 layer_factory.hpp:77] Creating layer L1_b8_relu\nI1212 06:18:16.678752 12086 net.cpp:100] Creating Layer L1_b8_relu\nI1212 06:18:16.678764 12086 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI1212 06:18:16.678784 12086 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI1212 06:18:16.678804 12086 net.cpp:150] Setting up L1_b8_relu\nI1212 06:18:16.678819 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.678828 12086 net.cpp:165] Memory required for data: 755201500\nI1212 06:18:16.678848 12086 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:18:16.678864 12086 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:18:16.678874 12086 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI1212 06:18:16.678890 12086 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:18:16.678910 12086 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:18:16.678992 12086 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:18:16.679013 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.679028 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.679038 12086 net.cpp:165] Memory required for data: 771585500\nI1212 06:18:16.679049 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI1212 06:18:16.679069 12086 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI1212 06:18:16.679090 12086 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:18:16.679113 12086 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI1212 06:18:16.679481 12086 net.cpp:150] Setting up L1_b9_cbr1_conv\nI1212 06:18:16.679508 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.679519 12086 net.cpp:165] Memory required for data: 779777500\nI1212 06:18:16.679538 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI1212 06:18:16.679554 12086 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI1212 06:18:16.679571 12086 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI1212 06:18:16.679589 12086 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI1212 06:18:16.679865 12086 net.cpp:150] Setting up L1_b9_cbr1_bn\nI1212 06:18:16.679884 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.679894 12086 net.cpp:165] Memory required for data: 787969500\nI1212 06:18:16.679915 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:18:16.679932 12086 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI1212 06:18:16.679944 12086 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI1212 06:18:16.679965 12086 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.680054 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:18:16.680244 12086 net.cpp:150] Setting up L1_b9_cbr1_scale\nI1212 06:18:16.680269 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.680279 12086 net.cpp:165] Memory required for data: 796161500\nI1212 06:18:16.680297 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI1212 06:18:16.680313 12086 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI1212 06:18:16.680325 12086 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI1212 06:18:16.680338 12086 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.680357 12086 net.cpp:150] Setting up L1_b9_cbr1_relu\nI1212 06:18:16.680371 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.680382 12086 net.cpp:165] Memory required for data: 804353500\nI1212 06:18:16.680392 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI1212 06:18:16.680416 12086 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI1212 06:18:16.680430 12086 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI1212 06:18:16.680452 12086 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI1212 06:18:16.680824 12086 net.cpp:150] Setting up L1_b9_cbr2_conv\nI1212 06:18:16.680842 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.680852 12086 net.cpp:165] Memory required for data: 812545500\nI1212 06:18:16.680871 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI1212 06:18:16.680893 12086 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI1212 06:18:16.680905 12086 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI1212 06:18:16.680927 12086 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI1212 06:18:16.681227 12086 net.cpp:150] Setting up L1_b9_cbr2_bn\nI1212 06:18:16.681253 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.681263 12086 net.cpp:165] Memory required for data: 820737500\nI1212 06:18:16.681315 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:18:16.681335 12086 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI1212 06:18:16.681347 12086 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI1212 06:18:16.681366 12086 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI1212 06:18:16.681457 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:18:16.681643 12086 net.cpp:150] Setting up L1_b9_cbr2_scale\nI1212 06:18:16.681661 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.681671 12086 net.cpp:165] Memory required for data: 828929500\nI1212 06:18:16.681689 12086 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI1212 06:18:16.681711 12086 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI1212 06:18:16.681723 12086 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI1212 06:18:16.681737 12086 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:18:16.681753 12086 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI1212 06:18:16.681807 12086 net.cpp:150] Setting up L1_b9_sum_eltwise\nI1212 06:18:16.681824 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.681834 12086 net.cpp:165] Memory required for data: 837121500\nI1212 06:18:16.681845 12086 layer_factory.hpp:77] Creating layer L1_b9_relu\nI1212 06:18:16.681864 12086 net.cpp:100] Creating Layer L1_b9_relu\nI1212 06:18:16.681876 12086 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI1212 06:18:16.681891 12086 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI1212 06:18:16.681910 12086 net.cpp:150] Setting up L1_b9_relu\nI1212 06:18:16.681924 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.681934 12086 net.cpp:165] Memory required for data: 845313500\nI1212 06:18:16.681944 12086 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:18:16.681965 12086 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:18:16.681977 12086 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI1212 06:18:16.681993 12086 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:18:16.682013 12086 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:18:16.682103 12086 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:18:16.682122 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.682135 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.682144 12086 net.cpp:165] Memory required for data: 861697500\nI1212 06:18:16.682154 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI1212 06:18:16.682179 12086 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI1212 06:18:16.682193 12086 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:18:16.682211 12086 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI1212 06:18:16.682590 12086 net.cpp:150] Setting up L2_b1_cbr1_conv\nI1212 06:18:16.682610 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.682620 12086 net.cpp:165] Memory required for data: 863745500\nI1212 06:18:16.682637 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI1212 06:18:16.682659 12086 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI1212 06:18:16.682672 12086 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI1212 06:18:16.682693 12086 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI1212 06:18:16.682972 12086 net.cpp:150] Setting up L2_b1_cbr1_bn\nI1212 06:18:16.682991 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.683001 12086 net.cpp:165] Memory required for data: 865793500\nI1212 06:18:16.683022 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:18:16.683039 12086 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI1212 06:18:16.683051 12086 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI1212 06:18:16.683082 12086 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.683182 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:18:16.683363 12086 net.cpp:150] Setting up L2_b1_cbr1_scale\nI1212 06:18:16.683382 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.683392 12086 net.cpp:165] Memory required for data: 867841500\nI1212 06:18:16.683411 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI1212 06:18:16.683426 12086 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI1212 06:18:16.683437 12086 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI1212 06:18:16.683457 12086 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.683477 12086 net.cpp:150] Setting up L2_b1_cbr1_relu\nI1212 06:18:16.683491 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.683501 12086 net.cpp:165] Memory required for data: 869889500\nI1212 06:18:16.683512 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI1212 06:18:16.683537 12086 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI1212 06:18:16.683550 12086 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI1212 06:18:16.683569 12086 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI1212 06:18:16.683938 12086 net.cpp:150] Setting up L2_b1_cbr2_conv\nI1212 06:18:16.683956 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.683966 12086 net.cpp:165] Memory required for data: 871937500\nI1212 06:18:16.683984 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI1212 06:18:16.684010 12086 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI1212 06:18:16.684022 12086 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI1212 06:18:16.684041 12086 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI1212 06:18:16.684335 12086 net.cpp:150] Setting up L2_b1_cbr2_bn\nI1212 06:18:16.684357 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.684367 12086 net.cpp:165] Memory required for data: 873985500\nI1212 06:18:16.684389 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:18:16.684406 12086 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI1212 06:18:16.684418 12086 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI1212 06:18:16.684434 12086 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI1212 06:18:16.684521 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:18:16.684708 12086 net.cpp:150] Setting up L2_b1_cbr2_scale\nI1212 06:18:16.684726 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.684736 12086 net.cpp:165] Memory required for data: 876033500\nI1212 06:18:16.684756 12086 layer_factory.hpp:77] Creating layer L2_b1_pool\nI1212 06:18:16.684775 12086 net.cpp:100] Creating Layer L2_b1_pool\nI1212 06:18:16.684787 12086 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:18:16.684809 12086 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI1212 06:18:16.684945 12086 net.cpp:150] Setting up L2_b1_pool\nI1212 06:18:16.684965 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.684975 12086 net.cpp:165] Memory required for data: 878081500\nI1212 06:18:16.684985 12086 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1212 06:18:16.685008 12086 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1212 06:18:16.685019 12086 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI1212 06:18:16.685034 12086 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI1212 06:18:16.685050 12086 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1212 06:18:16.685111 12086 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1212 06:18:16.685132 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.685142 12086 net.cpp:165] Memory required for data: 880129500\nI1212 06:18:16.685153 12086 layer_factory.hpp:77] Creating layer L2_b1_relu\nI1212 06:18:16.685168 12086 net.cpp:100] Creating Layer L2_b1_relu\nI1212 06:18:16.685179 12086 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI1212 06:18:16.685199 12086 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI1212 06:18:16.685227 12086 net.cpp:150] Setting up L2_b1_relu\nI1212 06:18:16.685243 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.685253 12086 net.cpp:165] Memory required for data: 882177500\nI1212 06:18:16.685263 12086 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI1212 06:18:16.685338 12086 net.cpp:100] Creating Layer L2_b1_zeros\nI1212 06:18:16.685361 12086 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI1212 06:18:16.687777 12086 net.cpp:150] Setting up L2_b1_zeros\nI1212 06:18:16.687800 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.687810 12086 net.cpp:165] Memory required for data: 884225500\nI1212 06:18:16.687821 12086 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI1212 06:18:16.687844 12086 net.cpp:100] Creating Layer L2_b1_concat0\nI1212 06:18:16.687857 12086 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI1212 06:18:16.687871 12086 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI1212 06:18:16.687887 12086 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI1212 06:18:16.687958 12086 net.cpp:150] Setting up L2_b1_concat0\nI1212 06:18:16.687978 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.687988 12086 net.cpp:165] Memory required for data: 888321500\nI1212 06:18:16.687999 12086 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:18:16.688014 12086 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:18:16.688026 12086 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI1212 06:18:16.688047 12086 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:18:16.688069 12086 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:18:16.688163 12086 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:18:16.688189 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.688204 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.688212 12086 net.cpp:165] Memory required for data: 896513500\nI1212 06:18:16.688223 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI1212 06:18:16.688243 12086 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI1212 06:18:16.688257 12086 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:18:16.688277 12086 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI1212 06:18:16.689798 12086 net.cpp:150] Setting up L2_b2_cbr1_conv\nI1212 06:18:16.689821 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.689831 12086 net.cpp:165] Memory required for data: 900609500\nI1212 06:18:16.689848 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI1212 06:18:16.689870 12086 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI1212 06:18:16.689883 12086 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI1212 06:18:16.689904 12086 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI1212 06:18:16.690202 12086 net.cpp:150] Setting up L2_b2_cbr1_bn\nI1212 06:18:16.690222 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.690232 12086 net.cpp:165] Memory required for data: 904705500\nI1212 06:18:16.690253 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:18:16.690269 12086 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI1212 06:18:16.690281 12086 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI1212 06:18:16.690297 12086 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.690392 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:18:16.690580 12086 net.cpp:150] Setting up L2_b2_cbr1_scale\nI1212 06:18:16.690599 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.690609 12086 net.cpp:165] Memory required for data: 908801500\nI1212 06:18:16.690629 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI1212 06:18:16.690649 12086 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI1212 06:18:16.690661 12086 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI1212 06:18:16.690676 12086 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.690707 12086 net.cpp:150] Setting up L2_b2_cbr1_relu\nI1212 06:18:16.690723 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.690733 12086 net.cpp:165] Memory required for data: 912897500\nI1212 06:18:16.690744 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI1212 06:18:16.690769 12086 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI1212 06:18:16.690783 12086 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI1212 06:18:16.690801 12086 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI1212 06:18:16.691313 12086 net.cpp:150] Setting up L2_b2_cbr2_conv\nI1212 06:18:16.691334 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.691344 12086 net.cpp:165] Memory required for data: 916993500\nI1212 06:18:16.691360 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI1212 06:18:16.691382 12086 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI1212 06:18:16.691395 12086 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI1212 06:18:16.691412 12086 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI1212 06:18:16.691704 12086 net.cpp:150] Setting up L2_b2_cbr2_bn\nI1212 06:18:16.691725 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.691736 12086 net.cpp:165] Memory required for data: 921089500\nI1212 06:18:16.691758 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:18:16.691776 12086 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI1212 06:18:16.691787 12086 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI1212 06:18:16.691803 12086 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI1212 06:18:16.691896 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:18:16.692088 12086 net.cpp:150] Setting up L2_b2_cbr2_scale\nI1212 06:18:16.692107 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.692117 12086 net.cpp:165] Memory required for data: 925185500\nI1212 06:18:16.692137 12086 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1212 06:18:16.692153 12086 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1212 06:18:16.692165 12086 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI1212 06:18:16.692178 12086 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:18:16.692200 12086 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1212 06:18:16.692247 12086 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1212 06:18:16.692265 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.692275 12086 net.cpp:165] Memory required for data: 929281500\nI1212 06:18:16.692286 12086 layer_factory.hpp:77] Creating layer L2_b2_relu\nI1212 06:18:16.692306 12086 net.cpp:100] Creating Layer L2_b2_relu\nI1212 06:18:16.692317 12086 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI1212 06:18:16.692332 12086 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI1212 06:18:16.692350 12086 net.cpp:150] Setting up L2_b2_relu\nI1212 06:18:16.692366 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.692375 12086 net.cpp:165] Memory required for data: 933377500\nI1212 06:18:16.692385 12086 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:18:16.692399 12086 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:18:16.692410 12086 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI1212 06:18:16.692425 12086 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:18:16.692445 12086 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:18:16.692529 12086 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:18:16.692548 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.692561 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.692570 12086 net.cpp:165] Memory required for data: 941569500\nI1212 06:18:16.692580 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI1212 06:18:16.692605 12086 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI1212 06:18:16.692627 12086 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:18:16.692648 12086 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI1212 06:18:16.693166 12086 net.cpp:150] Setting up L2_b3_cbr1_conv\nI1212 06:18:16.693186 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.693197 12086 net.cpp:165] Memory required for data: 945665500\nI1212 06:18:16.693215 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI1212 06:18:16.693238 12086 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI1212 06:18:16.693249 12086 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI1212 06:18:16.693271 12086 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI1212 06:18:16.693550 12086 net.cpp:150] Setting up L2_b3_cbr1_bn\nI1212 06:18:16.693569 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.693578 12086 net.cpp:165] Memory required for data: 949761500\nI1212 06:18:16.693599 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:18:16.693616 12086 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI1212 06:18:16.693627 12086 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI1212 06:18:16.693642 12086 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.693737 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:18:16.693923 12086 net.cpp:150] Setting up L2_b3_cbr1_scale\nI1212 06:18:16.693943 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.693951 12086 net.cpp:165] Memory required for data: 953857500\nI1212 06:18:16.693970 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI1212 06:18:16.693986 12086 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI1212 06:18:16.693997 12086 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI1212 06:18:16.694017 12086 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.694036 12086 net.cpp:150] Setting up L2_b3_cbr1_relu\nI1212 06:18:16.694051 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.694061 12086 net.cpp:165] Memory required for data: 957953500\nI1212 06:18:16.694078 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI1212 06:18:16.694104 12086 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI1212 06:18:16.694118 12086 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI1212 06:18:16.694135 12086 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI1212 06:18:16.694645 12086 net.cpp:150] Setting up L2_b3_cbr2_conv\nI1212 06:18:16.694665 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.694675 12086 net.cpp:165] Memory required for data: 962049500\nI1212 06:18:16.694694 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI1212 06:18:16.694715 12086 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI1212 06:18:16.694727 12086 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI1212 06:18:16.694743 12086 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI1212 06:18:16.695034 12086 net.cpp:150] Setting up L2_b3_cbr2_bn\nI1212 06:18:16.695056 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.695067 12086 net.cpp:165] Memory required for data: 966145500\nI1212 06:18:16.695097 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:18:16.695114 12086 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI1212 06:18:16.695127 12086 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI1212 06:18:16.695142 12086 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI1212 06:18:16.695230 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:18:16.695420 12086 net.cpp:150] Setting up L2_b3_cbr2_scale\nI1212 06:18:16.695438 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.695447 12086 net.cpp:165] Memory required for data: 970241500\nI1212 06:18:16.695466 12086 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1212 06:18:16.695483 12086 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1212 06:18:16.695494 12086 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI1212 06:18:16.695508 12086 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:18:16.695539 12086 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1212 06:18:16.695590 12086 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1212 06:18:16.695608 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.695617 12086 net.cpp:165] Memory required for data: 974337500\nI1212 06:18:16.695628 12086 layer_factory.hpp:77] Creating layer L2_b3_relu\nI1212 06:18:16.695662 12086 net.cpp:100] Creating Layer L2_b3_relu\nI1212 06:18:16.695675 12086 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI1212 06:18:16.695696 12086 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI1212 06:18:16.695716 12086 net.cpp:150] Setting up L2_b3_relu\nI1212 06:18:16.695731 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.695741 12086 net.cpp:165] Memory required for data: 978433500\nI1212 06:18:16.695752 12086 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:18:16.695766 12086 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:18:16.695777 12086 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI1212 06:18:16.695793 12086 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:18:16.695814 12086 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:18:16.695900 12086 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:18:16.695919 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.695933 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.695942 12086 net.cpp:165] Memory required for data: 986625500\nI1212 06:18:16.695953 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI1212 06:18:16.695974 12086 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI1212 06:18:16.695986 12086 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:18:16.696009 12086 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI1212 06:18:16.696537 12086 net.cpp:150] Setting up L2_b4_cbr1_conv\nI1212 06:18:16.696557 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.696568 12086 net.cpp:165] Memory required for data: 990721500\nI1212 06:18:16.696585 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI1212 06:18:16.696602 12086 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI1212 06:18:16.696614 12086 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI1212 06:18:16.696635 12086 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI1212 06:18:16.696923 12086 net.cpp:150] Setting up L2_b4_cbr1_bn\nI1212 06:18:16.696941 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.696951 12086 net.cpp:165] Memory required for data: 994817500\nI1212 06:18:16.696974 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:18:16.696995 12086 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI1212 06:18:16.697007 12086 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI1212 06:18:16.697023 12086 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.697119 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:18:16.697304 12086 net.cpp:150] Setting up L2_b4_cbr1_scale\nI1212 06:18:16.697322 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.697332 12086 net.cpp:165] Memory required for data: 998913500\nI1212 06:18:16.697350 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI1212 06:18:16.697369 12086 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI1212 06:18:16.697381 12086 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI1212 06:18:16.697396 12086 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.697414 12086 net.cpp:150] Setting up L2_b4_cbr1_relu\nI1212 06:18:16.697429 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.697439 12086 net.cpp:165] Memory required for data: 1003009500\nI1212 06:18:16.697449 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI1212 06:18:16.697475 12086 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI1212 06:18:16.697497 12086 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI1212 06:18:16.697522 12086 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI1212 06:18:16.698036 12086 net.cpp:150] Setting up L2_b4_cbr2_conv\nI1212 06:18:16.698056 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.698066 12086 net.cpp:165] Memory required for data: 1007105500\nI1212 06:18:16.698092 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI1212 06:18:16.698109 12086 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI1212 06:18:16.698120 12086 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI1212 06:18:16.698144 12086 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI1212 06:18:16.698448 12086 net.cpp:150] Setting up L2_b4_cbr2_bn\nI1212 06:18:16.698468 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.698477 12086 net.cpp:165] Memory required for data: 1011201500\nI1212 06:18:16.698499 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:18:16.698521 12086 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI1212 06:18:16.698534 12086 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI1212 06:18:16.698549 12086 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI1212 06:18:16.698639 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:18:16.698827 12086 net.cpp:150] Setting up L2_b4_cbr2_scale\nI1212 06:18:16.698846 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.698856 12086 net.cpp:165] Memory required for data: 1015297500\nI1212 06:18:16.698874 12086 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1212 06:18:16.698892 12086 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1212 06:18:16.698904 12086 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI1212 06:18:16.698921 12086 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:18:16.698940 12086 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1212 06:18:16.698985 12086 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1212 06:18:16.699008 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.699019 12086 net.cpp:165] Memory required for data: 1019393500\nI1212 06:18:16.699029 12086 layer_factory.hpp:77] Creating layer L2_b4_relu\nI1212 06:18:16.699044 12086 net.cpp:100] Creating Layer L2_b4_relu\nI1212 06:18:16.699055 12086 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI1212 06:18:16.699076 12086 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI1212 06:18:16.699097 12086 net.cpp:150] Setting up L2_b4_relu\nI1212 06:18:16.699112 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.699122 12086 net.cpp:165] Memory required for data: 1023489500\nI1212 06:18:16.699131 12086 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:18:16.699151 12086 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:18:16.699163 12086 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI1212 06:18:16.699178 12086 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:18:16.699199 12086 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:18:16.699280 12086 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:18:16.699298 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.699311 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.699319 12086 net.cpp:165] Memory required for data: 1031681500\nI1212 06:18:16.699329 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI1212 06:18:16.699349 12086 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI1212 06:18:16.699362 12086 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:18:16.699385 12086 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI1212 06:18:16.699903 12086 net.cpp:150] Setting up L2_b5_cbr1_conv\nI1212 06:18:16.699923 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.699941 12086 net.cpp:165] Memory required for data: 1035777500\nI1212 06:18:16.699960 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI1212 06:18:16.699977 12086 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI1212 06:18:16.699990 12086 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI1212 06:18:16.700011 12086 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI1212 06:18:16.700316 12086 net.cpp:150] Setting up L2_b5_cbr1_bn\nI1212 06:18:16.700336 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.700345 12086 net.cpp:165] Memory required for data: 1039873500\nI1212 06:18:16.700366 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:18:16.700387 12086 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI1212 06:18:16.700400 12086 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI1212 06:18:16.700415 12086 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.700505 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:18:16.700700 12086 net.cpp:150] Setting up L2_b5_cbr1_scale\nI1212 06:18:16.700719 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.700729 12086 net.cpp:165] Memory required for data: 1043969500\nI1212 06:18:16.700748 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI1212 06:18:16.700767 12086 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI1212 06:18:16.700779 12086 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI1212 06:18:16.700794 12086 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.700814 12086 net.cpp:150] Setting up L2_b5_cbr1_relu\nI1212 06:18:16.700827 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.700837 12086 net.cpp:165] Memory required for data: 1048065500\nI1212 06:18:16.700847 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI1212 06:18:16.700873 12086 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI1212 06:18:16.700886 12086 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI1212 06:18:16.700908 12086 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI1212 06:18:16.701412 12086 net.cpp:150] Setting up L2_b5_cbr2_conv\nI1212 06:18:16.701433 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.701443 12086 net.cpp:165] Memory required for data: 1052161500\nI1212 06:18:16.701462 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI1212 06:18:16.701478 12086 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI1212 06:18:16.701490 12086 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI1212 06:18:16.701506 12086 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI1212 06:18:16.701795 12086 net.cpp:150] Setting up L2_b5_cbr2_bn\nI1212 06:18:16.701814 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.701824 12086 net.cpp:165] Memory required for data: 1056257500\nI1212 06:18:16.701845 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:18:16.701864 12086 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI1212 06:18:16.701874 12086 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI1212 06:18:16.701894 12086 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI1212 06:18:16.701987 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:18:16.702180 12086 net.cpp:150] Setting up L2_b5_cbr2_scale\nI1212 06:18:16.702200 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.702210 12086 net.cpp:165] Memory required for data: 1060353500\nI1212 06:18:16.702229 12086 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1212 06:18:16.702245 12086 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1212 06:18:16.702257 12086 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI1212 06:18:16.702271 12086 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:18:16.702292 12086 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1212 06:18:16.702339 12086 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1212 06:18:16.702363 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.702373 12086 net.cpp:165] Memory required for data: 1064449500\nI1212 06:18:16.702392 12086 layer_factory.hpp:77] Creating layer L2_b5_relu\nI1212 06:18:16.702407 12086 net.cpp:100] Creating Layer L2_b5_relu\nI1212 06:18:16.702419 12086 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI1212 06:18:16.702433 12086 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI1212 06:18:16.702452 12086 net.cpp:150] Setting up L2_b5_relu\nI1212 06:18:16.702466 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.702476 12086 net.cpp:165] Memory required for data: 1068545500\nI1212 06:18:16.702486 12086 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:18:16.702509 12086 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:18:16.702522 12086 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI1212 06:18:16.702538 12086 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:18:16.702558 12086 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:18:16.702642 12086 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:18:16.702663 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.702677 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.702687 12086 net.cpp:165] Memory required for data: 1076737500\nI1212 06:18:16.702698 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI1212 06:18:16.702718 12086 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI1212 06:18:16.702731 12086 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:18:16.702754 12086 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI1212 06:18:16.703289 12086 net.cpp:150] Setting up L2_b6_cbr1_conv\nI1212 06:18:16.703310 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.703320 12086 net.cpp:165] Memory required for data: 1080833500\nI1212 06:18:16.703337 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI1212 06:18:16.703354 12086 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI1212 06:18:16.703367 12086 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI1212 06:18:16.703397 12086 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI1212 06:18:16.703685 12086 net.cpp:150] Setting up L2_b6_cbr1_bn\nI1212 06:18:16.703703 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.703713 12086 net.cpp:165] Memory required for data: 1084929500\nI1212 06:18:16.703734 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:18:16.703757 12086 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI1212 06:18:16.703769 12086 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI1212 06:18:16.703784 12086 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.703876 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:18:16.704061 12086 net.cpp:150] Setting up L2_b6_cbr1_scale\nI1212 06:18:16.704087 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.704097 12086 net.cpp:165] Memory required for data: 1089025500\nI1212 06:18:16.704114 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI1212 06:18:16.704130 12086 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI1212 06:18:16.704141 12086 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI1212 06:18:16.704161 12086 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.704182 12086 net.cpp:150] Setting up L2_b6_cbr1_relu\nI1212 06:18:16.704197 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.704208 12086 net.cpp:165] Memory required for data: 1093121500\nI1212 06:18:16.704218 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI1212 06:18:16.704246 12086 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI1212 06:18:16.704260 12086 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI1212 06:18:16.704277 12086 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI1212 06:18:16.704788 12086 net.cpp:150] Setting up L2_b6_cbr2_conv\nI1212 06:18:16.704809 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.704828 12086 net.cpp:165] Memory required for data: 1097217500\nI1212 06:18:16.704849 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI1212 06:18:16.704870 12086 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI1212 06:18:16.704883 12086 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI1212 06:18:16.704900 12086 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI1212 06:18:16.705205 12086 net.cpp:150] Setting up L2_b6_cbr2_bn\nI1212 06:18:16.705225 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.705235 12086 net.cpp:165] Memory required for data: 1101313500\nI1212 06:18:16.705256 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:18:16.705273 12086 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI1212 06:18:16.705284 12086 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI1212 06:18:16.705305 12086 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI1212 06:18:16.705396 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:18:16.705588 12086 net.cpp:150] Setting up L2_b6_cbr2_scale\nI1212 06:18:16.705607 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.705616 12086 net.cpp:165] Memory required for data: 1105409500\nI1212 06:18:16.705636 12086 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1212 06:18:16.705651 12086 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1212 06:18:16.705663 12086 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI1212 06:18:16.705677 12086 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:18:16.705700 12086 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1212 06:18:16.705749 12086 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1212 06:18:16.705766 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.705776 12086 net.cpp:165] Memory required for data: 1109505500\nI1212 06:18:16.705787 12086 layer_factory.hpp:77] Creating layer L2_b6_relu\nI1212 06:18:16.705806 12086 net.cpp:100] Creating Layer L2_b6_relu\nI1212 06:18:16.705818 12086 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI1212 06:18:16.705833 12086 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI1212 06:18:16.705852 12086 net.cpp:150] Setting up L2_b6_relu\nI1212 06:18:16.705868 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.705878 12086 net.cpp:165] Memory required for data: 1113601500\nI1212 06:18:16.705888 12086 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:18:16.705906 12086 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:18:16.705919 12086 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI1212 06:18:16.705934 12086 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:18:16.705953 12086 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:18:16.706034 12086 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:18:16.706058 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.706079 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.706089 12086 net.cpp:165] Memory required for data: 1121793500\nI1212 06:18:16.706100 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI1212 06:18:16.706121 12086 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI1212 06:18:16.706135 12086 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:18:16.706153 12086 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI1212 06:18:16.706674 12086 net.cpp:150] Setting up L2_b7_cbr1_conv\nI1212 06:18:16.706694 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.706703 12086 net.cpp:165] Memory required for data: 1125889500\nI1212 06:18:16.706722 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI1212 06:18:16.706743 12086 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI1212 06:18:16.706755 12086 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI1212 06:18:16.706780 12086 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI1212 06:18:16.707096 12086 net.cpp:150] Setting up L2_b7_cbr1_bn\nI1212 06:18:16.707116 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.707126 12086 net.cpp:165] Memory required for data: 1129985500\nI1212 06:18:16.707147 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:18:16.707165 12086 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI1212 06:18:16.707175 12086 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI1212 06:18:16.707196 12086 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.707289 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:18:16.707479 12086 net.cpp:150] Setting up L2_b7_cbr1_scale\nI1212 06:18:16.707499 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.707509 12086 net.cpp:165] Memory required for data: 1134081500\nI1212 06:18:16.707527 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI1212 06:18:16.707543 12086 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI1212 06:18:16.707556 12086 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI1212 06:18:16.707577 12086 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.707597 12086 net.cpp:150] Setting up L2_b7_cbr1_relu\nI1212 06:18:16.707612 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.707623 12086 net.cpp:165] Memory required for data: 1138177500\nI1212 06:18:16.707633 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI1212 06:18:16.707657 12086 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI1212 06:18:16.707670 12086 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI1212 06:18:16.707689 12086 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI1212 06:18:16.708217 12086 net.cpp:150] Setting up L2_b7_cbr2_conv\nI1212 06:18:16.708237 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.708247 12086 net.cpp:165] Memory required for data: 1142273500\nI1212 06:18:16.708266 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI1212 06:18:16.708287 12086 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI1212 06:18:16.708299 12086 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI1212 06:18:16.708317 12086 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI1212 06:18:16.708614 12086 net.cpp:150] Setting up L2_b7_cbr2_bn\nI1212 06:18:16.708633 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.708643 12086 net.cpp:165] Memory required for data: 1146369500\nI1212 06:18:16.708665 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:18:16.708683 12086 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI1212 06:18:16.708694 12086 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI1212 06:18:16.708717 12086 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI1212 06:18:16.708809 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:18:16.708998 12086 net.cpp:150] Setting up L2_b7_cbr2_scale\nI1212 06:18:16.709017 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.709028 12086 net.cpp:165] Memory required for data: 1150465500\nI1212 06:18:16.709048 12086 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI1212 06:18:16.709064 12086 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI1212 06:18:16.709082 12086 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI1212 06:18:16.709098 12086 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:18:16.709120 12086 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI1212 06:18:16.709167 12086 net.cpp:150] Setting up L2_b7_sum_eltwise\nI1212 06:18:16.709187 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.709197 12086 net.cpp:165] Memory required for data: 1154561500\nI1212 06:18:16.709206 12086 layer_factory.hpp:77] Creating layer L2_b7_relu\nI1212 06:18:16.709226 12086 net.cpp:100] Creating Layer L2_b7_relu\nI1212 06:18:16.709239 12086 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI1212 06:18:16.709254 12086 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI1212 06:18:16.709282 12086 net.cpp:150] Setting up L2_b7_relu\nI1212 06:18:16.709297 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.709307 12086 net.cpp:165] Memory required for data: 1158657500\nI1212 06:18:16.709318 12086 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:18:16.709331 12086 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:18:16.709343 12086 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI1212 06:18:16.709362 12086 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:18:16.709563 12086 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:18:16.709650 12086 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:18:16.709676 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.709689 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.709699 12086 net.cpp:165] Memory required for data: 1166849500\nI1212 06:18:16.709710 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI1212 06:18:16.709731 12086 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI1212 06:18:16.709744 12086 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:18:16.709763 12086 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI1212 06:18:16.710302 12086 net.cpp:150] Setting up L2_b8_cbr1_conv\nI1212 06:18:16.710324 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.710333 12086 net.cpp:165] Memory required for data: 1170945500\nI1212 06:18:16.710351 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI1212 06:18:16.710373 12086 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI1212 06:18:16.710386 12086 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI1212 06:18:16.710402 12086 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI1212 06:18:16.710703 12086 net.cpp:150] Setting up L2_b8_cbr1_bn\nI1212 06:18:16.710722 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.710732 12086 net.cpp:165] Memory required for data: 1175041500\nI1212 06:18:16.710753 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:18:16.710770 12086 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI1212 06:18:16.710781 12086 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI1212 06:18:16.710801 12086 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.710896 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:18:16.711097 12086 net.cpp:150] Setting up L2_b8_cbr1_scale\nI1212 06:18:16.711115 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.711125 12086 net.cpp:165] Memory required for data: 1179137500\nI1212 06:18:16.711143 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI1212 06:18:16.711159 12086 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI1212 06:18:16.711171 12086 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI1212 06:18:16.711191 12086 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.711212 12086 net.cpp:150] Setting up L2_b8_cbr1_relu\nI1212 06:18:16.711226 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.711236 12086 net.cpp:165] Memory required for data: 1183233500\nI1212 06:18:16.711247 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI1212 06:18:16.711273 12086 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI1212 06:18:16.711287 12086 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI1212 06:18:16.711305 12086 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI1212 06:18:16.711833 12086 net.cpp:150] Setting up L2_b8_cbr2_conv\nI1212 06:18:16.711853 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.711861 12086 net.cpp:165] Memory required for data: 1187329500\nI1212 06:18:16.711880 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI1212 06:18:16.711902 12086 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI1212 06:18:16.711915 12086 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI1212 06:18:16.711943 12086 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI1212 06:18:16.712254 12086 net.cpp:150] Setting up L2_b8_cbr2_bn\nI1212 06:18:16.712273 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.712282 12086 net.cpp:165] Memory required for data: 1191425500\nI1212 06:18:16.712306 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:18:16.712322 12086 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI1212 06:18:16.712333 12086 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI1212 06:18:16.712349 12086 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI1212 06:18:16.712445 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:18:16.712637 12086 net.cpp:150] Setting up L2_b8_cbr2_scale\nI1212 06:18:16.712659 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.712671 12086 net.cpp:165] Memory required for data: 1195521500\nI1212 06:18:16.712688 12086 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI1212 06:18:16.712704 12086 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI1212 06:18:16.712716 12086 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI1212 06:18:16.712729 12086 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:18:16.712746 12086 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI1212 06:18:16.712798 12086 net.cpp:150] Setting up L2_b8_sum_eltwise\nI1212 06:18:16.712816 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.712827 12086 net.cpp:165] Memory required for data: 1199617500\nI1212 06:18:16.712837 12086 layer_factory.hpp:77] Creating layer L2_b8_relu\nI1212 06:18:16.712852 12086 net.cpp:100] Creating Layer L2_b8_relu\nI1212 06:18:16.712863 12086 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI1212 06:18:16.712882 12086 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI1212 06:18:16.712903 12086 net.cpp:150] Setting up L2_b8_relu\nI1212 06:18:16.712918 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.712927 12086 net.cpp:165] Memory required for data: 1203713500\nI1212 06:18:16.712939 12086 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:18:16.712951 12086 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:18:16.712962 12086 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI1212 06:18:16.712981 12086 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:18:16.713021 12086 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:18:16.713115 12086 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:18:16.713138 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.713151 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.713161 12086 net.cpp:165] Memory required for data: 1211905500\nI1212 06:18:16.713171 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI1212 06:18:16.713197 12086 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI1212 06:18:16.713210 12086 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:18:16.713237 12086 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI1212 06:18:16.713758 12086 net.cpp:150] Setting up L2_b9_cbr1_conv\nI1212 06:18:16.713778 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.713788 12086 net.cpp:165] Memory required for data: 1216001500\nI1212 06:18:16.713804 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI1212 06:18:16.713821 12086 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI1212 06:18:16.713840 12086 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI1212 06:18:16.713857 12086 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI1212 06:18:16.714159 12086 net.cpp:150] Setting up L2_b9_cbr1_bn\nI1212 06:18:16.714179 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.714188 12086 net.cpp:165] Memory required for data: 1220097500\nI1212 06:18:16.714210 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:18:16.714236 12086 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI1212 06:18:16.714248 12086 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI1212 06:18:16.714265 12086 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.714365 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:18:16.714557 12086 net.cpp:150] Setting up L2_b9_cbr1_scale\nI1212 06:18:16.714581 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.714591 12086 net.cpp:165] Memory required for data: 1224193500\nI1212 06:18:16.714610 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI1212 06:18:16.714625 12086 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI1212 06:18:16.714637 12086 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI1212 06:18:16.714651 12086 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.714671 12086 net.cpp:150] Setting up L2_b9_cbr1_relu\nI1212 06:18:16.714685 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.714694 12086 net.cpp:165] Memory required for data: 1228289500\nI1212 06:18:16.714705 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI1212 06:18:16.714733 12086 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI1212 06:18:16.714747 12086 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI1212 06:18:16.714769 12086 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI1212 06:18:16.715301 12086 net.cpp:150] Setting up L2_b9_cbr2_conv\nI1212 06:18:16.715322 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.715332 12086 net.cpp:165] Memory required for data: 1232385500\nI1212 06:18:16.715349 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI1212 06:18:16.715370 12086 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI1212 06:18:16.715384 12086 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI1212 06:18:16.715405 12086 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI1212 06:18:16.715700 12086 net.cpp:150] Setting up L2_b9_cbr2_bn\nI1212 06:18:16.715719 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.715729 12086 net.cpp:165] Memory required for data: 1236481500\nI1212 06:18:16.715797 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:18:16.715821 12086 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI1212 06:18:16.715834 12086 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI1212 06:18:16.715852 12086 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI1212 06:18:16.715946 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:18:16.716140 12086 net.cpp:150] Setting up L2_b9_cbr2_scale\nI1212 06:18:16.716161 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.716169 12086 net.cpp:165] Memory required for data: 1240577500\nI1212 06:18:16.716188 12086 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI1212 06:18:16.716210 12086 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI1212 06:18:16.716223 12086 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI1212 06:18:16.716238 12086 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:18:16.716253 12086 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI1212 06:18:16.716307 12086 net.cpp:150] Setting up L2_b9_sum_eltwise\nI1212 06:18:16.716325 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.716334 12086 net.cpp:165] Memory required for data: 1244673500\nI1212 06:18:16.716346 12086 layer_factory.hpp:77] Creating layer L2_b9_relu\nI1212 06:18:16.716359 12086 net.cpp:100] Creating Layer L2_b9_relu\nI1212 06:18:16.716372 12086 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI1212 06:18:16.716385 12086 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI1212 06:18:16.716408 12086 net.cpp:150] Setting up L2_b9_relu\nI1212 06:18:16.716424 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.716434 12086 net.cpp:165] Memory required for data: 1248769500\nI1212 06:18:16.716444 12086 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:18:16.716467 12086 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:18:16.716480 12086 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI1212 06:18:16.716500 12086 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:18:16.716522 12086 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:18:16.716611 12086 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:18:16.716630 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.716642 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.716651 12086 net.cpp:165] Memory required for data: 1256961500\nI1212 06:18:16.716662 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI1212 06:18:16.716683 12086 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI1212 06:18:16.716697 12086 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:18:16.716720 12086 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI1212 06:18:16.717260 12086 net.cpp:150] Setting up L3_b1_cbr1_conv\nI1212 06:18:16.717280 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.717290 12086 net.cpp:165] Memory required for data: 1257985500\nI1212 06:18:16.717308 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI1212 06:18:16.717325 12086 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI1212 06:18:16.717337 12086 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI1212 06:18:16.717365 12086 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI1212 06:18:16.717669 12086 net.cpp:150] Setting up L3_b1_cbr1_bn\nI1212 06:18:16.717691 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.717701 12086 net.cpp:165] Memory required for data: 1259009500\nI1212 06:18:16.717725 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:18:16.717741 12086 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI1212 06:18:16.717753 12086 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI1212 06:18:16.717769 12086 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.717862 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:18:16.718060 12086 net.cpp:150] Setting up L3_b1_cbr1_scale\nI1212 06:18:16.718085 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.718096 12086 net.cpp:165] Memory required for data: 1260033500\nI1212 06:18:16.718114 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI1212 06:18:16.718135 12086 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI1212 06:18:16.718147 12086 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI1212 06:18:16.718163 12086 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.718181 12086 net.cpp:150] Setting up L3_b1_cbr1_relu\nI1212 06:18:16.718196 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.718206 12086 net.cpp:165] Memory required for data: 1261057500\nI1212 06:18:16.718216 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI1212 06:18:16.718241 12086 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI1212 06:18:16.718255 12086 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI1212 06:18:16.718273 12086 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI1212 06:18:16.718799 12086 net.cpp:150] Setting up L3_b1_cbr2_conv\nI1212 06:18:16.718819 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.718829 12086 net.cpp:165] Memory required for data: 1262081500\nI1212 06:18:16.718847 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI1212 06:18:16.718869 12086 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI1212 06:18:16.718883 12086 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI1212 06:18:16.718904 12086 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI1212 06:18:16.719213 12086 net.cpp:150] Setting up L3_b1_cbr2_bn\nI1212 06:18:16.719233 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.719244 12086 net.cpp:165] Memory required for data: 1263105500\nI1212 06:18:16.719274 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:18:16.719291 12086 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI1212 06:18:16.719303 12086 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI1212 06:18:16.719323 12086 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI1212 06:18:16.719427 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:18:16.719630 12086 net.cpp:150] Setting up L3_b1_cbr2_scale\nI1212 06:18:16.719650 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.719660 12086 net.cpp:165] Memory required for data: 1264129500\nI1212 06:18:16.719678 12086 layer_factory.hpp:77] Creating layer L3_b1_pool\nI1212 06:18:16.719696 12086 net.cpp:100] Creating Layer L3_b1_pool\nI1212 06:18:16.719707 12086 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:18:16.719729 12086 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI1212 06:18:16.719794 12086 net.cpp:150] Setting up L3_b1_pool\nI1212 06:18:16.719815 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.719823 12086 net.cpp:165] Memory required for data: 1265153500\nI1212 06:18:16.719835 12086 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1212 06:18:16.719851 12086 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1212 06:18:16.719862 12086 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI1212 06:18:16.719875 12086 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI1212 06:18:16.719897 12086 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1212 06:18:16.719954 12086 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1212 06:18:16.719976 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.719985 12086 net.cpp:165] Memory required for data: 1266177500\nI1212 06:18:16.719996 12086 layer_factory.hpp:77] Creating layer L3_b1_relu\nI1212 06:18:16.720010 12086 net.cpp:100] Creating Layer L3_b1_relu\nI1212 06:18:16.720021 12086 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI1212 06:18:16.720036 12086 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI1212 06:18:16.720055 12086 net.cpp:150] Setting up L3_b1_relu\nI1212 06:18:16.720077 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.720088 12086 net.cpp:165] Memory required for data: 1267201500\nI1212 06:18:16.720098 12086 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI1212 06:18:16.720119 12086 net.cpp:100] Creating Layer L3_b1_zeros\nI1212 06:18:16.720135 12086 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI1212 06:18:16.721448 12086 net.cpp:150] Setting up L3_b1_zeros\nI1212 06:18:16.721470 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.721480 12086 net.cpp:165] Memory required for data: 1268225500\nI1212 06:18:16.721491 12086 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI1212 06:18:16.721508 12086 net.cpp:100] Creating Layer L3_b1_concat0\nI1212 06:18:16.721520 12086 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI1212 06:18:16.721534 12086 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI1212 06:18:16.721555 12086 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI1212 06:18:16.721616 12086 net.cpp:150] Setting up L3_b1_concat0\nI1212 06:18:16.721643 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.721654 12086 net.cpp:165] Memory required for data: 1270273500\nI1212 06:18:16.721665 12086 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:18:16.721680 12086 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:18:16.721691 12086 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI1212 06:18:16.721707 12086 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:18:16.721727 12086 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:18:16.721819 12086 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:18:16.721839 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.721853 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.721861 12086 net.cpp:165] Memory required for data: 1274369500\nI1212 06:18:16.721881 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI1212 06:18:16.721907 12086 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI1212 06:18:16.721921 12086 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:18:16.721942 12086 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI1212 06:18:16.724059 12086 net.cpp:150] Setting up L3_b2_cbr1_conv\nI1212 06:18:16.724088 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.724099 12086 net.cpp:165] Memory required for data: 1276417500\nI1212 06:18:16.724118 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI1212 06:18:16.724140 12086 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI1212 06:18:16.724153 12086 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI1212 06:18:16.724171 12086 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI1212 06:18:16.724480 12086 net.cpp:150] Setting up L3_b2_cbr1_bn\nI1212 06:18:16.724499 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.724509 12086 net.cpp:165] Memory required for data: 1278465500\nI1212 06:18:16.724531 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:18:16.724548 12086 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI1212 06:18:16.724560 12086 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI1212 06:18:16.724576 12086 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.724675 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:18:16.724870 12086 net.cpp:150] Setting up L3_b2_cbr1_scale\nI1212 06:18:16.724894 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.724905 12086 net.cpp:165] Memory required for data: 1280513500\nI1212 06:18:16.724922 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI1212 06:18:16.724938 12086 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI1212 06:18:16.724951 12086 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI1212 06:18:16.724966 12086 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.724985 12086 net.cpp:150] Setting up L3_b2_cbr1_relu\nI1212 06:18:16.724999 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.725008 12086 net.cpp:165] Memory required for data: 1282561500\nI1212 06:18:16.725019 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI1212 06:18:16.725045 12086 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI1212 06:18:16.725059 12086 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI1212 06:18:16.725083 12086 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI1212 06:18:16.726166 12086 net.cpp:150] Setting up L3_b2_cbr2_conv\nI1212 06:18:16.726186 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.726197 12086 net.cpp:165] Memory required for data: 1284609500\nI1212 06:18:16.726213 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI1212 06:18:16.726231 12086 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI1212 06:18:16.726248 12086 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI1212 06:18:16.726266 12086 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI1212 06:18:16.726579 12086 net.cpp:150] Setting up L3_b2_cbr2_bn\nI1212 06:18:16.726598 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.726608 12086 net.cpp:165] Memory required for data: 1286657500\nI1212 06:18:16.726629 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:18:16.726646 12086 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI1212 06:18:16.726657 12086 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI1212 06:18:16.726677 12086 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI1212 06:18:16.726771 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:18:16.726972 12086 net.cpp:150] Setting up L3_b2_cbr2_scale\nI1212 06:18:16.726991 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.727001 12086 net.cpp:165] Memory required for data: 1288705500\nI1212 06:18:16.727020 12086 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1212 06:18:16.727041 12086 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1212 06:18:16.727054 12086 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI1212 06:18:16.727085 12086 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:18:16.727104 12086 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1212 06:18:16.727167 12086 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1212 06:18:16.727187 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.727197 12086 net.cpp:165] Memory required for data: 1290753500\nI1212 06:18:16.727208 12086 layer_factory.hpp:77] Creating layer L3_b2_relu\nI1212 06:18:16.727223 12086 net.cpp:100] Creating Layer L3_b2_relu\nI1212 06:18:16.727236 12086 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI1212 06:18:16.727254 12086 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI1212 06:18:16.727274 12086 net.cpp:150] Setting up L3_b2_relu\nI1212 06:18:16.727290 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.727300 12086 net.cpp:165] Memory required for data: 1292801500\nI1212 06:18:16.727310 12086 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:18:16.727325 12086 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:18:16.727337 12086 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI1212 06:18:16.727352 12086 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:18:16.727373 12086 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:18:16.727455 12086 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:18:16.727474 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.727488 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.727496 12086 net.cpp:165] Memory required for data: 1296897500\nI1212 06:18:16.727506 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI1212 06:18:16.727527 12086 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI1212 06:18:16.727540 12086 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:18:16.727565 12086 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI1212 06:18:16.728634 12086 net.cpp:150] Setting up L3_b3_cbr1_conv\nI1212 06:18:16.728654 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.728664 12086 net.cpp:165] Memory required for data: 1298945500\nI1212 06:18:16.728682 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI1212 06:18:16.728705 12086 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI1212 06:18:16.728718 12086 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI1212 06:18:16.728735 12086 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI1212 06:18:16.729038 12086 net.cpp:150] Setting up L3_b3_cbr1_bn\nI1212 06:18:16.729056 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.729066 12086 net.cpp:165] Memory required for data: 1300993500\nI1212 06:18:16.729094 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:18:16.729111 12086 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI1212 06:18:16.729123 12086 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI1212 06:18:16.729140 12086 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.729235 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:18:16.729439 12086 net.cpp:150] Setting up L3_b3_cbr1_scale\nI1212 06:18:16.729459 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.729468 12086 net.cpp:165] Memory required for data: 1303041500\nI1212 06:18:16.729487 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI1212 06:18:16.729502 12086 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI1212 06:18:16.729516 12086 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI1212 06:18:16.729529 12086 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.729548 12086 net.cpp:150] Setting up L3_b3_cbr1_relu\nI1212 06:18:16.729563 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.729573 12086 net.cpp:165] Memory required for data: 1305089500\nI1212 06:18:16.729593 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI1212 06:18:16.729619 12086 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI1212 06:18:16.729634 12086 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI1212 06:18:16.729656 12086 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI1212 06:18:16.730736 12086 net.cpp:150] Setting up L3_b3_cbr2_conv\nI1212 06:18:16.730757 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.730767 12086 net.cpp:165] Memory required for data: 1307137500\nI1212 06:18:16.730784 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI1212 06:18:16.730808 12086 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI1212 06:18:16.730821 12086 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI1212 06:18:16.730839 12086 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI1212 06:18:16.731154 12086 net.cpp:150] Setting up L3_b3_cbr2_bn\nI1212 06:18:16.731174 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.731184 12086 net.cpp:165] Memory required for data: 1309185500\nI1212 06:18:16.731205 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:18:16.731226 12086 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI1212 06:18:16.731238 12086 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI1212 06:18:16.731254 12086 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI1212 06:18:16.731351 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:18:16.731550 12086 net.cpp:150] Setting up L3_b3_cbr2_scale\nI1212 06:18:16.731570 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.731578 12086 net.cpp:165] Memory required for data: 1311233500\nI1212 06:18:16.731597 12086 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1212 06:18:16.731618 12086 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1212 06:18:16.731631 12086 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI1212 06:18:16.731645 12086 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:18:16.731662 12086 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1212 06:18:16.731724 12086 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1212 06:18:16.731743 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.731755 12086 net.cpp:165] Memory required for data: 1313281500\nI1212 06:18:16.731763 12086 layer_factory.hpp:77] Creating layer L3_b3_relu\nI1212 06:18:16.731779 12086 net.cpp:100] Creating Layer L3_b3_relu\nI1212 06:18:16.731791 12086 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI1212 06:18:16.731811 12086 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI1212 06:18:16.731830 12086 net.cpp:150] Setting up L3_b3_relu\nI1212 06:18:16.731847 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.731855 12086 net.cpp:165] Memory required for data: 1315329500\nI1212 06:18:16.731865 12086 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:18:16.731879 12086 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:18:16.731891 12086 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI1212 06:18:16.731907 12086 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:18:16.731927 12086 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:18:16.732009 12086 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:18:16.732028 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.732040 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.732049 12086 net.cpp:165] Memory required for data: 1319425500\nI1212 06:18:16.732060 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI1212 06:18:16.732100 12086 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI1212 06:18:16.732115 12086 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:18:16.732133 12086 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI1212 06:18:16.733214 12086 net.cpp:150] Setting up L3_b4_cbr1_conv\nI1212 06:18:16.733242 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.733252 12086 net.cpp:165] Memory required for data: 1321473500\nI1212 06:18:16.733269 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI1212 06:18:16.733295 12086 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI1212 06:18:16.733309 12086 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI1212 06:18:16.733326 12086 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI1212 06:18:16.733635 12086 net.cpp:150] Setting up L3_b4_cbr1_bn\nI1212 06:18:16.733654 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.733664 12086 net.cpp:165] Memory required for data: 1323521500\nI1212 06:18:16.733685 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:18:16.733702 12086 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI1212 06:18:16.733714 12086 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI1212 06:18:16.733729 12086 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.733829 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:18:16.734026 12086 net.cpp:150] Setting up L3_b4_cbr1_scale\nI1212 06:18:16.734046 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.734056 12086 net.cpp:165] Memory required for data: 1325569500\nI1212 06:18:16.734081 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI1212 06:18:16.734098 12086 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI1212 06:18:16.734110 12086 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI1212 06:18:16.734125 12086 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.734149 12086 net.cpp:150] Setting up L3_b4_cbr1_relu\nI1212 06:18:16.734165 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.734176 12086 net.cpp:165] Memory required for data: 1327617500\nI1212 06:18:16.734186 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI1212 06:18:16.734207 12086 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI1212 06:18:16.734226 12086 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI1212 06:18:16.734243 12086 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI1212 06:18:16.735318 12086 net.cpp:150] Setting up L3_b4_cbr2_conv\nI1212 06:18:16.735337 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.735347 12086 net.cpp:165] Memory required for data: 1329665500\nI1212 06:18:16.735365 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI1212 06:18:16.735386 12086 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI1212 06:18:16.735399 12086 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI1212 06:18:16.735417 12086 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI1212 06:18:16.735734 12086 net.cpp:150] Setting up L3_b4_cbr2_bn\nI1212 06:18:16.735754 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.735764 12086 net.cpp:165] Memory required for data: 1331713500\nI1212 06:18:16.735785 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:18:16.735806 12086 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI1212 06:18:16.735819 12086 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI1212 06:18:16.735836 12086 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI1212 06:18:16.735934 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:18:16.736145 12086 net.cpp:150] Setting up L3_b4_cbr2_scale\nI1212 06:18:16.736166 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.736174 12086 net.cpp:165] Memory required for data: 1333761500\nI1212 06:18:16.736193 12086 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1212 06:18:16.736214 12086 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1212 06:18:16.736227 12086 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI1212 06:18:16.736241 12086 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:18:16.736263 12086 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1212 06:18:16.736320 12086 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1212 06:18:16.736340 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.736358 12086 net.cpp:165] Memory required for data: 1335809500\nI1212 06:18:16.736371 12086 layer_factory.hpp:77] Creating layer L3_b4_relu\nI1212 06:18:16.736389 12086 net.cpp:100] Creating Layer L3_b4_relu\nI1212 06:18:16.736402 12086 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI1212 06:18:16.736418 12086 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI1212 06:18:16.736438 12086 net.cpp:150] Setting up L3_b4_relu\nI1212 06:18:16.736452 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.736464 12086 net.cpp:165] Memory required for data: 1337857500\nI1212 06:18:16.736474 12086 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:18:16.736487 12086 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:18:16.736498 12086 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI1212 06:18:16.736515 12086 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:18:16.736536 12086 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:18:16.736624 12086 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:18:16.736641 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.736654 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.736663 12086 net.cpp:165] Memory required for data: 1341953500\nI1212 06:18:16.736673 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI1212 06:18:16.736699 12086 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI1212 06:18:16.736712 12086 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:18:16.736732 12086 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI1212 06:18:16.737813 12086 net.cpp:150] Setting up L3_b5_cbr1_conv\nI1212 06:18:16.737834 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.737844 12086 net.cpp:165] Memory required for data: 1344001500\nI1212 06:18:16.737862 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI1212 06:18:16.737884 12086 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI1212 06:18:16.737897 12086 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI1212 06:18:16.737920 12086 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI1212 06:18:16.739282 12086 net.cpp:150] Setting up L3_b5_cbr1_bn\nI1212 06:18:16.739305 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.739315 12086 net.cpp:165] Memory required for data: 1346049500\nI1212 06:18:16.739336 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:18:16.739358 12086 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI1212 06:18:16.739372 12086 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI1212 06:18:16.739387 12086 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.739486 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:18:16.739683 12086 net.cpp:150] Setting up L3_b5_cbr1_scale\nI1212 06:18:16.739702 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.739712 12086 net.cpp:165] Memory required for data: 1348097500\nI1212 06:18:16.739732 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI1212 06:18:16.739753 12086 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI1212 06:18:16.739765 12086 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI1212 06:18:16.739780 12086 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.739800 12086 net.cpp:150] Setting up L3_b5_cbr1_relu\nI1212 06:18:16.739820 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.739830 12086 net.cpp:165] Memory required for data: 1350145500\nI1212 06:18:16.739841 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI1212 06:18:16.739862 12086 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI1212 06:18:16.739876 12086 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI1212 06:18:16.739898 12086 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI1212 06:18:16.742012 12086 net.cpp:150] Setting up L3_b5_cbr2_conv\nI1212 06:18:16.742040 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.742050 12086 net.cpp:165] Memory required for data: 1352193500\nI1212 06:18:16.742069 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI1212 06:18:16.742100 12086 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI1212 06:18:16.742112 12086 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI1212 06:18:16.742130 12086 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI1212 06:18:16.742431 12086 net.cpp:150] Setting up L3_b5_cbr2_bn\nI1212 06:18:16.742450 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.742460 12086 net.cpp:165] Memory required for data: 1354241500\nI1212 06:18:16.742482 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:18:16.742504 12086 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI1212 06:18:16.742517 12086 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI1212 06:18:16.742533 12086 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI1212 06:18:16.742627 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:18:16.742820 12086 net.cpp:150] Setting up L3_b5_cbr2_scale\nI1212 06:18:16.742839 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.742849 12086 net.cpp:165] Memory required for data: 1356289500\nI1212 06:18:16.742867 12086 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1212 06:18:16.742888 12086 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1212 06:18:16.742902 12086 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI1212 06:18:16.742915 12086 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:18:16.742936 12086 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1212 06:18:16.742992 12086 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1212 06:18:16.743011 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.743021 12086 net.cpp:165] Memory required for data: 1358337500\nI1212 06:18:16.743033 12086 layer_factory.hpp:77] Creating layer L3_b5_relu\nI1212 06:18:16.743053 12086 net.cpp:100] Creating Layer L3_b5_relu\nI1212 06:18:16.743065 12086 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI1212 06:18:16.743088 12086 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI1212 06:18:16.743108 12086 net.cpp:150] Setting up L3_b5_relu\nI1212 06:18:16.743124 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.743134 12086 net.cpp:165] Memory required for data: 1360385500\nI1212 06:18:16.743144 12086 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:18:16.743157 12086 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:18:16.743170 12086 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI1212 06:18:16.743185 12086 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:18:16.743206 12086 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:18:16.743290 12086 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:18:16.743310 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.743324 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.743333 12086 net.cpp:165] Memory required for data: 1364481500\nI1212 06:18:16.743343 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI1212 06:18:16.743369 12086 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI1212 06:18:16.743382 12086 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:18:16.743403 12086 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI1212 06:18:16.744472 12086 net.cpp:150] Setting up L3_b6_cbr1_conv\nI1212 06:18:16.744493 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.744501 12086 net.cpp:165] Memory required for data: 1366529500\nI1212 06:18:16.744519 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI1212 06:18:16.744542 12086 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI1212 06:18:16.744555 12086 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI1212 06:18:16.744581 12086 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI1212 06:18:16.744890 12086 net.cpp:150] Setting up L3_b6_cbr1_bn\nI1212 06:18:16.744910 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.744920 12086 net.cpp:165] Memory required for data: 1368577500\nI1212 06:18:16.744941 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:18:16.744958 12086 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI1212 06:18:16.744971 12086 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI1212 06:18:16.744987 12086 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.745095 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:18:16.745292 12086 net.cpp:150] Setting up L3_b6_cbr1_scale\nI1212 06:18:16.745311 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.745321 12086 net.cpp:165] Memory required for data: 1370625500\nI1212 06:18:16.745339 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI1212 06:18:16.745355 12086 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI1212 06:18:16.745368 12086 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI1212 06:18:16.745389 12086 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.745411 12086 net.cpp:150] Setting up L3_b6_cbr1_relu\nI1212 06:18:16.745426 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.745435 12086 net.cpp:165] Memory required for data: 1372673500\nI1212 06:18:16.745446 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI1212 06:18:16.745471 12086 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI1212 06:18:16.745486 12086 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI1212 06:18:16.745503 12086 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI1212 06:18:16.746568 12086 net.cpp:150] Setting up L3_b6_cbr2_conv\nI1212 06:18:16.746588 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.746598 12086 net.cpp:165] Memory required for data: 1374721500\nI1212 06:18:16.746616 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI1212 06:18:16.746639 12086 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI1212 06:18:16.746651 12086 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI1212 06:18:16.746668 12086 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI1212 06:18:16.746968 12086 net.cpp:150] Setting up L3_b6_cbr2_bn\nI1212 06:18:16.746986 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.746996 12086 net.cpp:165] Memory required for data: 1376769500\nI1212 06:18:16.747018 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:18:16.747042 12086 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI1212 06:18:16.747056 12086 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI1212 06:18:16.747078 12086 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI1212 06:18:16.747184 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:18:16.747377 12086 net.cpp:150] Setting up L3_b6_cbr2_scale\nI1212 06:18:16.747397 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.747406 12086 net.cpp:165] Memory required for data: 1378817500\nI1212 06:18:16.747426 12086 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1212 06:18:16.747445 12086 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1212 06:18:16.747458 12086 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI1212 06:18:16.747472 12086 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:18:16.747493 12086 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1212 06:18:16.747548 12086 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1212 06:18:16.747567 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.747577 12086 net.cpp:165] Memory required for data: 1380865500\nI1212 06:18:16.747587 12086 layer_factory.hpp:77] Creating layer L3_b6_relu\nI1212 06:18:16.747606 12086 net.cpp:100] Creating Layer L3_b6_relu\nI1212 06:18:16.747620 12086 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI1212 06:18:16.747634 12086 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI1212 06:18:16.747669 12086 net.cpp:150] Setting up L3_b6_relu\nI1212 06:18:16.747686 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.747696 12086 net.cpp:165] Memory required for data: 1382913500\nI1212 06:18:16.747705 12086 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:18:16.747720 12086 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:18:16.747732 12086 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI1212 06:18:16.747747 12086 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:18:16.747768 12086 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:18:16.747855 12086 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:18:16.747874 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.747887 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.747897 12086 net.cpp:165] Memory required for data: 1387009500\nI1212 06:18:16.747907 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI1212 06:18:16.747932 12086 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI1212 06:18:16.747946 12086 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:18:16.747965 12086 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI1212 06:18:16.749032 12086 net.cpp:150] Setting up L3_b7_cbr1_conv\nI1212 06:18:16.749053 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.749061 12086 net.cpp:165] Memory required for data: 1389057500\nI1212 06:18:16.749086 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI1212 06:18:16.749112 12086 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI1212 06:18:16.749126 12086 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI1212 06:18:16.749148 12086 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI1212 06:18:16.749460 12086 net.cpp:150] Setting up L3_b7_cbr1_bn\nI1212 06:18:16.749480 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.749490 12086 net.cpp:165] Memory required for data: 1391105500\nI1212 06:18:16.749511 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:18:16.749528 12086 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI1212 06:18:16.749541 12086 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI1212 06:18:16.749557 12086 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.749652 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:18:16.749848 12086 net.cpp:150] Setting up L3_b7_cbr1_scale\nI1212 06:18:16.749867 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.749876 12086 net.cpp:165] Memory required for data: 1393153500\nI1212 06:18:16.749896 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI1212 06:18:16.749948 12086 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI1212 06:18:16.749963 12086 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI1212 06:18:16.749979 12086 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.749999 12086 net.cpp:150] Setting up L3_b7_cbr1_relu\nI1212 06:18:16.750013 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.750022 12086 net.cpp:165] Memory required for data: 1395201500\nI1212 06:18:16.750035 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI1212 06:18:16.750061 12086 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI1212 06:18:16.750082 12086 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI1212 06:18:16.750103 12086 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI1212 06:18:16.751190 12086 net.cpp:150] Setting up L3_b7_cbr2_conv\nI1212 06:18:16.751211 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.751221 12086 net.cpp:165] Memory required for data: 1397249500\nI1212 06:18:16.751242 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI1212 06:18:16.751265 12086 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI1212 06:18:16.751277 12086 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI1212 06:18:16.751302 12086 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI1212 06:18:16.751621 12086 net.cpp:150] Setting up L3_b7_cbr2_bn\nI1212 06:18:16.751641 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.751652 12086 net.cpp:165] Memory required for data: 1399297500\nI1212 06:18:16.751672 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:18:16.751689 12086 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI1212 06:18:16.751701 12086 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI1212 06:18:16.751718 12086 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI1212 06:18:16.751814 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:18:16.752013 12086 net.cpp:150] Setting up L3_b7_cbr2_scale\nI1212 06:18:16.752033 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.752043 12086 net.cpp:165] Memory required for data: 1401345500\nI1212 06:18:16.752061 12086 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI1212 06:18:16.752085 12086 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI1212 06:18:16.752099 12086 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI1212 06:18:16.752112 12086 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:18:16.752137 12086 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI1212 06:18:16.752195 12086 net.cpp:150] Setting up L3_b7_sum_eltwise\nI1212 06:18:16.752219 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.752229 12086 net.cpp:165] Memory required for data: 1403393500\nI1212 06:18:16.752240 12086 layer_factory.hpp:77] Creating layer L3_b7_relu\nI1212 06:18:16.752255 12086 net.cpp:100] Creating Layer L3_b7_relu\nI1212 06:18:16.752267 12086 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI1212 06:18:16.752282 12086 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI1212 06:18:16.752301 12086 net.cpp:150] Setting up L3_b7_relu\nI1212 06:18:16.752315 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.752326 12086 net.cpp:165] Memory required for data: 1405441500\nI1212 06:18:16.752336 12086 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:18:16.752357 12086 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:18:16.752368 12086 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI1212 06:18:16.752384 12086 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:18:16.752405 12086 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:18:16.752492 12086 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:18:16.752511 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.752524 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.752534 12086 net.cpp:165] Memory required for data: 1409537500\nI1212 06:18:16.752544 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI1212 06:18:16.752565 12086 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI1212 06:18:16.752578 12086 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:18:16.752602 12086 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI1212 06:18:16.753679 12086 net.cpp:150] Setting up L3_b8_cbr1_conv\nI1212 06:18:16.753698 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.753708 12086 net.cpp:165] Memory required for data: 1411585500\nI1212 06:18:16.753726 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI1212 06:18:16.753743 12086 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI1212 06:18:16.753756 12086 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI1212 06:18:16.753779 12086 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI1212 06:18:16.754099 12086 net.cpp:150] Setting up L3_b8_cbr1_bn\nI1212 06:18:16.754130 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.754142 12086 net.cpp:165] Memory required for data: 1413633500\nI1212 06:18:16.754163 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:18:16.754190 12086 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI1212 06:18:16.754204 12086 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI1212 06:18:16.754220 12086 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.754317 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:18:16.754513 12086 net.cpp:150] Setting up L3_b8_cbr1_scale\nI1212 06:18:16.754534 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.754542 12086 net.cpp:165] Memory required for data: 1415681500\nI1212 06:18:16.754561 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI1212 06:18:16.754577 12086 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI1212 06:18:16.754590 12086 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI1212 06:18:16.754609 12086 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.754629 12086 net.cpp:150] Setting up L3_b8_cbr1_relu\nI1212 06:18:16.754644 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.754654 12086 net.cpp:165] Memory required for data: 1417729500\nI1212 06:18:16.754664 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI1212 06:18:16.754690 12086 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI1212 06:18:16.754703 12086 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI1212 06:18:16.754722 12086 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI1212 06:18:16.755792 12086 net.cpp:150] Setting up L3_b8_cbr2_conv\nI1212 06:18:16.755813 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.755822 12086 net.cpp:165] Memory required for data: 1419777500\nI1212 06:18:16.755841 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI1212 06:18:16.755867 12086 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI1212 06:18:16.755880 12086 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI1212 06:18:16.755903 12086 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI1212 06:18:16.756217 12086 net.cpp:150] Setting up L3_b8_cbr2_bn\nI1212 06:18:16.756237 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.756247 12086 net.cpp:165] Memory required for data: 1421825500\nI1212 06:18:16.756268 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:18:16.756285 12086 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI1212 06:18:16.756296 12086 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI1212 06:18:16.756312 12086 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI1212 06:18:16.756412 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:18:16.756609 12086 net.cpp:150] Setting up L3_b8_cbr2_scale\nI1212 06:18:16.756629 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.756639 12086 net.cpp:165] Memory required for data: 1423873500\nI1212 06:18:16.756657 12086 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI1212 06:18:16.756673 12086 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI1212 06:18:16.756685 12086 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI1212 06:18:16.756698 12086 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:18:16.756721 12086 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI1212 06:18:16.756777 12086 net.cpp:150] Setting up L3_b8_sum_eltwise\nI1212 06:18:16.756800 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.756811 12086 net.cpp:165] Memory required for data: 1425921500\nI1212 06:18:16.756821 12086 layer_factory.hpp:77] Creating layer L3_b8_relu\nI1212 06:18:16.756836 12086 net.cpp:100] Creating Layer L3_b8_relu\nI1212 06:18:16.756849 12086 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI1212 06:18:16.756863 12086 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI1212 06:18:16.756881 12086 net.cpp:150] Setting up L3_b8_relu\nI1212 06:18:16.756896 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.756906 12086 net.cpp:165] Memory required for data: 1427969500\nI1212 06:18:16.756916 12086 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:18:16.756940 12086 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:18:16.756961 12086 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI1212 06:18:16.756978 12086 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:18:16.757001 12086 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:18:16.757095 12086 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:18:16.757114 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.757128 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.757136 12086 net.cpp:165] Memory required for data: 1432065500\nI1212 06:18:16.757146 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI1212 06:18:16.757169 12086 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI1212 06:18:16.757181 12086 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:18:16.757205 12086 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI1212 06:18:16.759315 12086 net.cpp:150] Setting up L3_b9_cbr1_conv\nI1212 06:18:16.759336 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.759347 12086 net.cpp:165] Memory required for data: 1434113500\nI1212 06:18:16.759366 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI1212 06:18:16.759388 12086 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI1212 06:18:16.759402 12086 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI1212 06:18:16.759423 12086 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI1212 06:18:16.759728 12086 net.cpp:150] Setting up L3_b9_cbr1_bn\nI1212 06:18:16.759747 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.759757 12086 net.cpp:165] Memory required for data: 1436161500\nI1212 06:18:16.759779 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:18:16.759796 12086 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI1212 06:18:16.759809 12086 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI1212 06:18:16.759829 12086 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.759927 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:18:16.760138 12086 net.cpp:150] Setting up L3_b9_cbr1_scale\nI1212 06:18:16.760156 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.760166 12086 net.cpp:165] Memory required for data: 1438209500\nI1212 06:18:16.760185 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI1212 06:18:16.760201 12086 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI1212 06:18:16.760213 12086 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI1212 06:18:16.760239 12086 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.760262 12086 net.cpp:150] Setting up L3_b9_cbr1_relu\nI1212 06:18:16.760277 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.760288 12086 net.cpp:165] Memory required for data: 1440257500\nI1212 06:18:16.760298 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI1212 06:18:16.760324 12086 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI1212 06:18:16.760339 12086 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI1212 06:18:16.760360 12086 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI1212 06:18:16.761430 12086 net.cpp:150] Setting up L3_b9_cbr2_conv\nI1212 06:18:16.761451 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.761459 12086 net.cpp:165] Memory required for data: 1442305500\nI1212 06:18:16.761477 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI1212 06:18:16.761494 12086 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI1212 06:18:16.761507 12086 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI1212 06:18:16.761528 12086 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI1212 06:18:16.761837 12086 net.cpp:150] Setting up L3_b9_cbr2_bn\nI1212 06:18:16.761859 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.761870 12086 net.cpp:165] Memory required for data: 1444353500\nI1212 06:18:16.761893 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:18:16.761920 12086 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI1212 06:18:16.761934 12086 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI1212 06:18:16.761950 12086 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI1212 06:18:16.762048 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:18:16.762246 12086 net.cpp:150] Setting up L3_b9_cbr2_scale\nI1212 06:18:16.762265 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.762274 12086 net.cpp:165] Memory required for data: 1446401500\nI1212 06:18:16.762293 12086 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI1212 06:18:16.762315 12086 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI1212 06:18:16.762327 12086 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI1212 06:18:16.762341 12086 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:18:16.762357 12086 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI1212 06:18:16.762418 12086 net.cpp:150] Setting up L3_b9_sum_eltwise\nI1212 06:18:16.762436 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.762447 12086 net.cpp:165] Memory required for data: 1448449500\nI1212 06:18:16.762456 12086 layer_factory.hpp:77] Creating layer L3_b9_relu\nI1212 06:18:16.762471 12086 net.cpp:100] Creating Layer L3_b9_relu\nI1212 06:18:16.762483 12086 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI1212 06:18:16.762498 12086 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI1212 06:18:16.762517 12086 net.cpp:150] Setting up L3_b9_relu\nI1212 06:18:16.762532 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.762542 12086 net.cpp:165] Memory required for data: 1450497500\nI1212 06:18:16.762552 12086 layer_factory.hpp:77] Creating layer post_pool\nI1212 06:18:16.762567 12086 net.cpp:100] Creating Layer post_pool\nI1212 06:18:16.762580 12086 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI1212 06:18:16.762599 12086 net.cpp:408] post_pool -> post_pool\nI1212 06:18:16.762660 12086 net.cpp:150] Setting up post_pool\nI1212 06:18:16.762677 12086 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI1212 06:18:16.762688 12086 net.cpp:165] Memory required for data: 1450529500\nI1212 06:18:16.762698 12086 layer_factory.hpp:77] Creating layer post_FC\nI1212 06:18:16.762789 12086 net.cpp:100] Creating Layer post_FC\nI1212 06:18:16.762805 12086 net.cpp:434] post_FC <- post_pool\nI1212 06:18:16.762830 12086 net.cpp:408] post_FC -> post_FC_top\nI1212 06:18:16.763098 12086 net.cpp:150] Setting up post_FC\nI1212 06:18:16.763118 12086 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:18:16.763128 12086 net.cpp:165] Memory required for data: 1450534500\nI1212 06:18:16.763145 12086 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1212 06:18:16.763162 12086 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1212 06:18:16.763175 12086 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1212 06:18:16.763196 12086 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1212 06:18:16.763218 12086 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1212 06:18:16.763316 12086 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1212 06:18:16.763335 12086 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:18:16.763347 12086 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:18:16.763357 12086 net.cpp:165] Memory required for data: 1450544500\nI1212 06:18:16.763367 12086 layer_factory.hpp:77] Creating layer accuracy\nI1212 06:18:16.763382 12086 net.cpp:100] Creating Layer accuracy\nI1212 06:18:16.763394 12086 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1212 06:18:16.763408 12086 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1212 06:18:16.763424 12086 net.cpp:408] accuracy -> accuracy\nI1212 06:18:16.763535 12086 net.cpp:150] Setting up accuracy\nI1212 06:18:16.763555 12086 net.cpp:157] Top shape: (1)\nI1212 06:18:16.763564 12086 net.cpp:165] Memory required for data: 1450544504\nI1212 06:18:16.763576 12086 layer_factory.hpp:77] Creating layer loss\nI1212 06:18:16.763590 12086 net.cpp:100] Creating Layer loss\nI1212 06:18:16.763613 12086 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1212 06:18:16.763628 12086 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1212 06:18:16.763649 12086 net.cpp:408] loss -> loss\nI1212 06:18:16.764945 12086 layer_factory.hpp:77] Creating layer loss\nI1212 06:18:16.765126 12086 net.cpp:150] Setting up loss\nI1212 06:18:16.765146 12086 net.cpp:157] Top shape: (1)\nI1212 06:18:16.765156 12086 net.cpp:160]     with loss weight 1\nI1212 06:18:16.765266 12086 net.cpp:165] Memory required for data: 1450544508\nI1212 06:18:16.765281 12086 net.cpp:226] loss needs backward computation.\nI1212 06:18:16.765293 12086 net.cpp:228] accuracy does not need backward computation.\nI1212 06:18:16.765305 12086 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1212 06:18:16.765316 12086 net.cpp:226] post_FC needs backward computation.\nI1212 06:18:16.765326 12086 net.cpp:226] post_pool needs backward computation.\nI1212 06:18:16.765337 12086 net.cpp:226] L3_b9_relu needs backward computation.\nI1212 06:18:16.765347 12086 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI1212 06:18:16.765358 12086 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI1212 06:18:16.765367 12086 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI1212 06:18:16.765377 12086 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI1212 06:18:16.765388 12086 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI1212 06:18:16.765398 12086 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI1212 06:18:16.765408 12086 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI1212 06:18:16.765417 12086 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI1212 06:18:16.765429 12086 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI1212 06:18:16.765439 12086 net.cpp:226] L3_b8_relu needs backward computation.\nI1212 06:18:16.765450 12086 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI1212 06:18:16.765460 12086 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI1212 06:18:16.765470 12086 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI1212 06:18:16.765480 12086 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI1212 06:18:16.765491 12086 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI1212 06:18:16.765501 12086 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI1212 06:18:16.765511 12086 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI1212 06:18:16.765522 12086 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI1212 06:18:16.765533 12086 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI1212 06:18:16.765543 12086 net.cpp:226] L3_b7_relu needs backward computation.\nI1212 06:18:16.765554 12086 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI1212 06:18:16.765565 12086 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI1212 06:18:16.765575 12086 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI1212 06:18:16.765594 12086 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI1212 06:18:16.765606 12086 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI1212 06:18:16.765616 12086 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI1212 06:18:16.765626 12086 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI1212 06:18:16.765637 12086 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI1212 06:18:16.765648 12086 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI1212 06:18:16.765660 12086 net.cpp:226] L3_b6_relu needs backward computation.\nI1212 06:18:16.765668 12086 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1212 06:18:16.765681 12086 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI1212 06:18:16.765691 12086 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI1212 06:18:16.765702 12086 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI1212 06:18:16.765712 12086 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI1212 06:18:16.765730 12086 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI1212 06:18:16.765741 12086 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI1212 06:18:16.765753 12086 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI1212 06:18:16.765763 12086 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI1212 06:18:16.765774 12086 net.cpp:226] L3_b5_relu needs backward computation.\nI1212 06:18:16.765784 12086 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1212 06:18:16.765795 12086 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI1212 06:18:16.765806 12086 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI1212 06:18:16.765816 12086 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI1212 06:18:16.765827 12086 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI1212 06:18:16.765837 12086 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI1212 06:18:16.765847 12086 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI1212 06:18:16.765858 12086 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI1212 06:18:16.765869 12086 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI1212 06:18:16.765879 12086 net.cpp:226] L3_b4_relu needs backward computation.\nI1212 06:18:16.765890 12086 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1212 06:18:16.765902 12086 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI1212 06:18:16.765911 12086 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI1212 06:18:16.765923 12086 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI1212 06:18:16.765933 12086 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI1212 06:18:16.765944 12086 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI1212 06:18:16.765954 12086 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI1212 06:18:16.765964 12086 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI1212 06:18:16.765975 12086 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI1212 06:18:16.765986 12086 net.cpp:226] L3_b3_relu needs backward computation.\nI1212 06:18:16.765996 12086 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1212 06:18:16.766007 12086 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI1212 06:18:16.766018 12086 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI1212 06:18:16.766029 12086 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI1212 06:18:16.766039 12086 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI1212 06:18:16.766049 12086 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI1212 06:18:16.766060 12086 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI1212 06:18:16.766078 12086 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI1212 06:18:16.766091 12086 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI1212 06:18:16.766103 12086 net.cpp:226] L3_b2_relu needs backward computation.\nI1212 06:18:16.766113 12086 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1212 06:18:16.766124 12086 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI1212 06:18:16.766135 12086 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI1212 06:18:16.766146 12086 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI1212 06:18:16.766157 12086 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI1212 06:18:16.766167 12086 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI1212 06:18:16.766177 12086 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI1212 06:18:16.766189 12086 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI1212 06:18:16.766206 12086 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI1212 06:18:16.766216 12086 net.cpp:226] L3_b1_concat0 needs backward computation.\nI1212 06:18:16.766229 12086 net.cpp:228] L3_b1_zeros does not need backward computation.\nI1212 06:18:16.766239 12086 net.cpp:226] L3_b1_relu needs backward computation.\nI1212 06:18:16.766258 12086 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1212 06:18:16.766270 12086 net.cpp:226] L3_b1_pool needs backward computation.\nI1212 06:18:16.766283 12086 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI1212 06:18:16.766294 12086 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI1212 06:18:16.766304 12086 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI1212 06:18:16.766314 12086 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI1212 06:18:16.766324 12086 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI1212 06:18:16.766335 12086 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI1212 06:18:16.766346 12086 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI1212 06:18:16.766357 12086 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI1212 06:18:16.766368 12086 net.cpp:226] L2_b9_relu needs backward computation.\nI1212 06:18:16.766378 12086 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI1212 06:18:16.766391 12086 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI1212 06:18:16.766402 12086 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI1212 06:18:16.766413 12086 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI1212 06:18:16.766424 12086 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI1212 06:18:16.766435 12086 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI1212 06:18:16.766445 12086 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI1212 06:18:16.766458 12086 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI1212 06:18:16.766469 12086 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI1212 06:18:16.766480 12086 net.cpp:226] L2_b8_relu needs backward computation.\nI1212 06:18:16.766490 12086 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI1212 06:18:16.766502 12086 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI1212 06:18:16.766513 12086 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI1212 06:18:16.766525 12086 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI1212 06:18:16.766535 12086 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI1212 06:18:16.766546 12086 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI1212 06:18:16.766556 12086 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI1212 06:18:16.766566 12086 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI1212 06:18:16.766577 12086 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI1212 06:18:16.766589 12086 net.cpp:226] L2_b7_relu needs backward computation.\nI1212 06:18:16.766600 12086 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI1212 06:18:16.766610 12086 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI1212 06:18:16.766621 12086 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI1212 06:18:16.766633 12086 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI1212 06:18:16.766644 12086 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI1212 06:18:16.766654 12086 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI1212 06:18:16.766664 12086 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI1212 06:18:16.766676 12086 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI1212 06:18:16.766687 12086 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI1212 06:18:16.766698 12086 net.cpp:226] L2_b6_relu needs backward computation.\nI1212 06:18:16.766708 12086 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1212 06:18:16.766721 12086 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI1212 06:18:16.766731 12086 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI1212 06:18:16.766742 12086 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI1212 06:18:16.766753 12086 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI1212 06:18:16.766764 12086 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI1212 06:18:16.766774 12086 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI1212 06:18:16.766793 12086 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI1212 06:18:16.766805 12086 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI1212 06:18:16.766815 12086 net.cpp:226] L2_b5_relu needs backward computation.\nI1212 06:18:16.766826 12086 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1212 06:18:16.766837 12086 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI1212 06:18:16.766849 12086 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI1212 06:18:16.766860 12086 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI1212 06:18:16.766870 12086 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI1212 06:18:16.766880 12086 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI1212 06:18:16.766891 12086 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI1212 06:18:16.766902 12086 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI1212 06:18:16.766913 12086 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI1212 06:18:16.766926 12086 net.cpp:226] L2_b4_relu needs backward computation.\nI1212 06:18:16.766937 12086 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1212 06:18:16.766953 12086 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI1212 06:18:16.766965 12086 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI1212 06:18:16.766978 12086 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI1212 06:18:16.766988 12086 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI1212 06:18:16.766999 12086 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI1212 06:18:16.767010 12086 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI1212 06:18:16.767021 12086 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI1212 06:18:16.767032 12086 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI1212 06:18:16.767045 12086 net.cpp:226] L2_b3_relu needs backward computation.\nI1212 06:18:16.767055 12086 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1212 06:18:16.767066 12086 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI1212 06:18:16.767086 12086 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI1212 06:18:16.767097 12086 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI1212 06:18:16.767108 12086 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI1212 06:18:16.767119 12086 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI1212 06:18:16.767130 12086 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI1212 06:18:16.767143 12086 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI1212 06:18:16.767153 12086 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI1212 06:18:16.767165 12086 net.cpp:226] L2_b2_relu needs backward computation.\nI1212 06:18:16.767176 12086 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1212 06:18:16.767189 12086 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI1212 06:18:16.767199 12086 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI1212 06:18:16.767210 12086 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI1212 06:18:16.767221 12086 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI1212 06:18:16.767232 12086 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI1212 06:18:16.767242 12086 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI1212 06:18:16.767253 12086 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI1212 06:18:16.767266 12086 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI1212 06:18:16.767277 12086 net.cpp:226] L2_b1_concat0 needs backward computation.\nI1212 06:18:16.767288 12086 net.cpp:228] L2_b1_zeros does not need backward computation.\nI1212 06:18:16.767299 12086 net.cpp:226] L2_b1_relu needs backward computation.\nI1212 06:18:16.767310 12086 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1212 06:18:16.767323 12086 net.cpp:226] L2_b1_pool needs backward computation.\nI1212 06:18:16.767341 12086 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI1212 06:18:16.767354 12086 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI1212 06:18:16.767365 12086 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI1212 06:18:16.767377 12086 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI1212 06:18:16.767387 12086 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI1212 06:18:16.767398 12086 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI1212 06:18:16.767410 12086 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI1212 06:18:16.767421 12086 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI1212 06:18:16.767432 12086 net.cpp:226] L1_b9_relu needs backward computation.\nI1212 06:18:16.767443 12086 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI1212 06:18:16.767457 12086 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI1212 06:18:16.767468 12086 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI1212 06:18:16.767479 12086 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI1212 06:18:16.767491 12086 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI1212 06:18:16.767503 12086 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI1212 06:18:16.767513 12086 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI1212 06:18:16.767524 12086 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI1212 06:18:16.767536 12086 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI1212 06:18:16.767547 12086 net.cpp:226] L1_b8_relu needs backward computation.\nI1212 06:18:16.767558 12086 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI1212 06:18:16.767570 12086 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI1212 06:18:16.767581 12086 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI1212 06:18:16.767593 12086 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI1212 06:18:16.767604 12086 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI1212 06:18:16.767616 12086 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI1212 06:18:16.767627 12086 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI1212 06:18:16.767638 12086 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI1212 06:18:16.767650 12086 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI1212 06:18:16.767662 12086 net.cpp:226] L1_b7_relu needs backward computation.\nI1212 06:18:16.767673 12086 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI1212 06:18:16.767685 12086 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI1212 06:18:16.767696 12086 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI1212 06:18:16.767709 12086 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI1212 06:18:16.767720 12086 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI1212 06:18:16.767730 12086 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI1212 06:18:16.767742 12086 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI1212 06:18:16.767755 12086 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI1212 06:18:16.767765 12086 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI1212 06:18:16.767777 12086 net.cpp:226] L1_b6_relu needs backward computation.\nI1212 06:18:16.767789 12086 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1212 06:18:16.767802 12086 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI1212 06:18:16.767812 12086 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI1212 06:18:16.767824 12086 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI1212 06:18:16.767837 12086 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI1212 06:18:16.767848 12086 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI1212 06:18:16.767858 12086 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI1212 06:18:16.767869 12086 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI1212 06:18:16.767890 12086 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI1212 06:18:16.767902 12086 net.cpp:226] L1_b5_relu needs backward computation.\nI1212 06:18:16.767913 12086 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1212 06:18:16.767926 12086 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI1212 06:18:16.767938 12086 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI1212 06:18:16.767951 12086 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI1212 06:18:16.767961 12086 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI1212 06:18:16.767972 12086 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI1212 06:18:16.767983 12086 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI1212 06:18:16.767995 12086 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI1212 06:18:16.768007 12086 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI1212 06:18:16.768018 12086 net.cpp:226] L1_b4_relu needs backward computation.\nI1212 06:18:16.768030 12086 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1212 06:18:16.768043 12086 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI1212 06:18:16.768054 12086 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI1212 06:18:16.768066 12086 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI1212 06:18:16.768087 12086 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI1212 06:18:16.768098 12086 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI1212 06:18:16.768110 12086 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI1212 06:18:16.768122 12086 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI1212 06:18:16.768133 12086 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI1212 06:18:16.768146 12086 net.cpp:226] L1_b3_relu needs backward computation.\nI1212 06:18:16.768157 12086 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1212 06:18:16.768168 12086 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI1212 06:18:16.768179 12086 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI1212 06:18:16.768191 12086 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI1212 06:18:16.768203 12086 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI1212 06:18:16.768214 12086 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI1212 06:18:16.768225 12086 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI1212 06:18:16.768236 12086 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI1212 06:18:16.768249 12086 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI1212 06:18:16.768260 12086 net.cpp:226] L1_b2_relu needs backward computation.\nI1212 06:18:16.768271 12086 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1212 06:18:16.768285 12086 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI1212 06:18:16.768296 12086 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI1212 06:18:16.768308 12086 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI1212 06:18:16.768321 12086 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI1212 06:18:16.768332 12086 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI1212 06:18:16.768343 12086 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI1212 06:18:16.768359 12086 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI1212 06:18:16.768373 12086 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI1212 06:18:16.768384 12086 net.cpp:226] L1_b1_relu needs backward computation.\nI1212 06:18:16.768395 12086 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1212 06:18:16.768409 12086 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI1212 06:18:16.768419 12086 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI1212 06:18:16.768431 12086 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI1212 06:18:16.768443 12086 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI1212 06:18:16.768463 12086 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI1212 06:18:16.768474 12086 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI1212 06:18:16.768486 12086 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI1212 06:18:16.768498 12086 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI1212 06:18:16.768510 12086 net.cpp:226] pre_relu needs backward computation.\nI1212 06:18:16.768522 12086 net.cpp:226] pre_scale needs backward computation.\nI1212 06:18:16.768532 12086 net.cpp:226] pre_bn needs backward computation.\nI1212 06:18:16.768543 12086 net.cpp:226] pre_conv needs backward computation.\nI1212 06:18:16.768556 12086 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1212 06:18:16.768569 12086 net.cpp:228] dataLayer does not need backward computation.\nI1212 06:18:16.768579 12086 net.cpp:270] This network produces output accuracy\nI1212 06:18:16.768592 12086 net.cpp:270] This network produces output loss\nI1212 06:18:16.768945 12086 net.cpp:283] Network initialization done.\nI1212 06:18:16.778314 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:16.778373 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:16.778450 12086 solver.cpp:181] Creating test net (#0) specified by net file: examples/sc/architectures/arch.prototxt\nI1212 06:18:16.778858 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer dataLayer\nI1212 06:18:16.778883 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer pre_bn\nI1212 06:18:16.778903 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr1_bn\nI1212 06:18:16.778923 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b1_cbr2_bn\nI1212 06:18:16.778945 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr1_bn\nI1212 06:18:16.778964 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b2_cbr2_bn\nI1212 06:18:16.778985 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr1_bn\nI1212 06:18:16.779005 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b3_cbr2_bn\nI1212 06:18:16.779024 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr1_bn\nI1212 06:18:16.779044 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b4_cbr2_bn\nI1212 06:18:16.779064 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr1_bn\nI1212 06:18:16.779093 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b5_cbr2_bn\nI1212 06:18:16.779114 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr1_bn\nI1212 06:18:16.779132 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b6_cbr2_bn\nI1212 06:18:16.779153 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr1_bn\nI1212 06:18:16.779172 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b7_cbr2_bn\nI1212 06:18:16.779192 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr1_bn\nI1212 06:18:16.779211 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b8_cbr2_bn\nI1212 06:18:16.779232 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr1_bn\nI1212 06:18:16.779251 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L1_b9_cbr2_bn\nI1212 06:18:16.779280 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr1_bn\nI1212 06:18:16.779300 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b1_cbr2_bn\nI1212 06:18:16.779326 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr1_bn\nI1212 06:18:16.779345 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b2_cbr2_bn\nI1212 06:18:16.779366 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr1_bn\nI1212 06:18:16.779383 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b3_cbr2_bn\nI1212 06:18:16.779403 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr1_bn\nI1212 06:18:16.779422 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b4_cbr2_bn\nI1212 06:18:16.779439 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr1_bn\nI1212 06:18:16.779458 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b5_cbr2_bn\nI1212 06:18:16.779479 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr1_bn\nI1212 06:18:16.779497 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b6_cbr2_bn\nI1212 06:18:16.779517 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr1_bn\nI1212 06:18:16.779534 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b7_cbr2_bn\nI1212 06:18:16.779553 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr1_bn\nI1212 06:18:16.779572 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b8_cbr2_bn\nI1212 06:18:16.779592 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr1_bn\nI1212 06:18:16.779611 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L2_b9_cbr2_bn\nI1212 06:18:16.779630 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr1_bn\nI1212 06:18:16.779649 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b1_cbr2_bn\nI1212 06:18:16.779675 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr1_bn\nI1212 06:18:16.779693 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b2_cbr2_bn\nI1212 06:18:16.779711 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr1_bn\nI1212 06:18:16.779731 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b3_cbr2_bn\nI1212 06:18:16.779749 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr1_bn\nI1212 06:18:16.779768 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b4_cbr2_bn\nI1212 06:18:16.779788 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr1_bn\nI1212 06:18:16.779805 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b5_cbr2_bn\nI1212 06:18:16.779824 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr1_bn\nI1212 06:18:16.779844 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b6_cbr2_bn\nI1212 06:18:16.779871 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr1_bn\nI1212 06:18:16.779891 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b7_cbr2_bn\nI1212 06:18:16.779911 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr1_bn\nI1212 06:18:16.779929 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b8_cbr2_bn\nI1212 06:18:16.779950 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr1_bn\nI1212 06:18:16.779968 12086 net.cpp:322] The NetState phase (1) differed from the phase (0) specified by a rule in layer L3_b9_cbr2_bn\nI1212 06:18:16.781664 12086 net.cpp:58] Initializing net from parameters: \nname: \"Cifar-Resnet\"\nstate {\n  phase: TEST\n}\nlayer {\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer {\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\nlayer {\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\nlayer {\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer {\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n    data_filler {\n      type: \"constant\"\n      value: 0\n    }\n    shape {\n      dim: 125\n      dim: 16\n      dim: 16\n      dim: 16\n    }\n  }\n}\nlayer {\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\nlayer {\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer {\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\nlayer {\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer {\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer {\n  name: \"L2_\nI1212 06:18:16.783496 12086 layer_factory.hpp:77] Creating layer dataLayer\nI1212 06:18:16.783766 12086 net.cpp:100] Creating Layer dataLayer\nI1212 06:18:16.783789 12086 net.cpp:408] dataLayer -> data_top\nI1212 06:18:16.783814 12086 net.cpp:408] dataLayer -> label\nI1212 06:18:16.783838 12086 data_transformer.cpp:25] Loading mean file from: examples/cifar10/mean.binaryproto\nI1212 06:18:16.796660 12094 db_lmdb.cpp:35] Opened lmdb examples/cifar10/cifar10_test_lmdb\nI1212 06:18:16.796936 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:16.805313 12086 net.cpp:150] Setting up dataLayer\nI1212 06:18:16.805337 12086 net.cpp:157] Top shape: 125 3 32 32 (384000)\nI1212 06:18:16.805351 12086 net.cpp:157] Top shape: 125 (125)\nI1212 06:18:16.805361 12086 net.cpp:165] Memory required for data: 1536500\nI1212 06:18:16.805372 12086 layer_factory.hpp:77] Creating layer label_dataLayer_1_split\nI1212 06:18:16.805418 12086 net.cpp:100] Creating Layer label_dataLayer_1_split\nI1212 06:18:16.805434 12086 net.cpp:434] label_dataLayer_1_split <- label\nI1212 06:18:16.805450 12086 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_0\nI1212 06:18:16.805475 12086 net.cpp:408] label_dataLayer_1_split -> label_dataLayer_1_split_1\nI1212 06:18:16.805637 12086 net.cpp:150] Setting up label_dataLayer_1_split\nI1212 06:18:16.805657 12086 net.cpp:157] Top shape: 125 (125)\nI1212 06:18:16.805670 12086 net.cpp:157] Top shape: 125 (125)\nI1212 06:18:16.805681 12086 net.cpp:165] Memory required for data: 1537500\nI1212 06:18:16.805693 12086 layer_factory.hpp:77] Creating layer pre_conv\nI1212 06:18:16.805723 12086 net.cpp:100] Creating Layer pre_conv\nI1212 06:18:16.805737 12086 net.cpp:434] pre_conv <- data_top\nI1212 06:18:16.805799 12086 net.cpp:408] pre_conv -> pre_conv_top\nI1212 06:18:16.806247 12086 net.cpp:150] Setting up pre_conv\nI1212 06:18:16.806269 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.806293 12086 net.cpp:165] Memory required for data: 9729500\nI1212 06:18:16.806324 12086 layer_factory.hpp:77] Creating layer pre_bn\nI1212 06:18:16.806344 12086 net.cpp:100] Creating Layer pre_bn\nI1212 06:18:16.806354 12086 net.cpp:434] pre_bn <- pre_conv_top\nI1212 06:18:16.806391 12086 net.cpp:408] pre_bn -> pre_bn_top\nI1212 06:18:16.806800 12086 net.cpp:150] Setting up pre_bn\nI1212 06:18:16.806823 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.806834 12086 net.cpp:165] Memory required for data: 17921500\nI1212 06:18:16.806869 12086 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:18:16.806887 12086 net.cpp:100] Creating Layer pre_scale\nI1212 06:18:16.806902 12086 net.cpp:434] pre_scale <- pre_bn_top\nI1212 06:18:16.806918 12086 net.cpp:395] pre_scale -> pre_bn_top (in-place)\nI1212 06:18:16.807019 12086 layer_factory.hpp:77] Creating layer pre_scale\nI1212 06:18:16.807246 12086 net.cpp:150] Setting up pre_scale\nI1212 06:18:16.807268 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.807279 12086 net.cpp:165] Memory required for data: 26113500\nI1212 06:18:16.807301 12086 layer_factory.hpp:77] Creating layer pre_relu\nI1212 06:18:16.807322 12086 net.cpp:100] Creating Layer pre_relu\nI1212 06:18:16.807332 12086 net.cpp:434] pre_relu <- pre_bn_top\nI1212 06:18:16.807346 12086 net.cpp:395] pre_relu -> pre_bn_top (in-place)\nI1212 06:18:16.807379 12086 net.cpp:150] Setting up pre_relu\nI1212 06:18:16.807394 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.807404 12086 net.cpp:165] Memory required for data: 34305500\nI1212 06:18:16.807417 12086 layer_factory.hpp:77] Creating layer pre_bn_top_pre_relu_0_split\nI1212 06:18:16.807431 12086 net.cpp:100] Creating Layer pre_bn_top_pre_relu_0_split\nI1212 06:18:16.807441 12086 net.cpp:434] pre_bn_top_pre_relu_0_split <- pre_bn_top\nI1212 06:18:16.807458 12086 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_0\nI1212 06:18:16.807478 12086 net.cpp:408] pre_bn_top_pre_relu_0_split -> pre_bn_top_pre_relu_0_split_1\nI1212 06:18:16.807634 12086 net.cpp:150] Setting up pre_bn_top_pre_relu_0_split\nI1212 06:18:16.807652 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.807667 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.807679 12086 net.cpp:165] Memory required for data: 50689500\nI1212 06:18:16.807690 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_conv\nI1212 06:18:16.807719 12086 net.cpp:100] Creating Layer L1_b1_cbr1_conv\nI1212 06:18:16.807734 12086 net.cpp:434] L1_b1_cbr1_conv <- pre_bn_top_pre_relu_0_split_0\nI1212 06:18:16.807754 12086 net.cpp:408] L1_b1_cbr1_conv -> L1_b1_cbr1_conv_top\nI1212 06:18:16.808240 12086 net.cpp:150] Setting up L1_b1_cbr1_conv\nI1212 06:18:16.808261 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.808271 12086 net.cpp:165] Memory required for data: 58881500\nI1212 06:18:16.808296 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_bn\nI1212 06:18:16.808331 12086 net.cpp:100] Creating Layer L1_b1_cbr1_bn\nI1212 06:18:16.808344 12086 net.cpp:434] L1_b1_cbr1_bn <- L1_b1_cbr1_conv_top\nI1212 06:18:16.808363 12086 net.cpp:408] L1_b1_cbr1_bn -> L1_b1_cbr1_bn_top\nI1212 06:18:16.809319 12086 net.cpp:150] Setting up L1_b1_cbr1_bn\nI1212 06:18:16.809340 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.809350 12086 net.cpp:165] Memory required for data: 67073500\nI1212 06:18:16.809545 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:18:16.809566 12086 net.cpp:100] Creating Layer L1_b1_cbr1_scale\nI1212 06:18:16.809577 12086 net.cpp:434] L1_b1_cbr1_scale <- L1_b1_cbr1_bn_top\nI1212 06:18:16.809593 12086 net.cpp:395] L1_b1_cbr1_scale -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.809696 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_scale\nI1212 06:18:16.809891 12086 net.cpp:150] Setting up L1_b1_cbr1_scale\nI1212 06:18:16.809911 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.809921 12086 net.cpp:165] Memory required for data: 75265500\nI1212 06:18:16.809939 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr1_relu\nI1212 06:18:16.809967 12086 net.cpp:100] Creating Layer L1_b1_cbr1_relu\nI1212 06:18:16.809979 12086 net.cpp:434] L1_b1_cbr1_relu <- L1_b1_cbr1_bn_top\nI1212 06:18:16.809995 12086 net.cpp:395] L1_b1_cbr1_relu -> L1_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.810016 12086 net.cpp:150] Setting up L1_b1_cbr1_relu\nI1212 06:18:16.810031 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.810046 12086 net.cpp:165] Memory required for data: 83457500\nI1212 06:18:16.810057 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr2_conv\nI1212 06:18:16.810091 12086 net.cpp:100] Creating Layer L1_b1_cbr2_conv\nI1212 06:18:16.810103 12086 net.cpp:434] L1_b1_cbr2_conv <- L1_b1_cbr1_bn_top\nI1212 06:18:16.810125 12086 net.cpp:408] L1_b1_cbr2_conv -> L1_b1_cbr2_conv_top\nI1212 06:18:16.810524 12086 net.cpp:150] Setting up L1_b1_cbr2_conv\nI1212 06:18:16.810544 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.810554 12086 net.cpp:165] Memory required for data: 91649500\nI1212 06:18:16.810571 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr2_bn\nI1212 06:18:16.810593 12086 net.cpp:100] Creating Layer L1_b1_cbr2_bn\nI1212 06:18:16.810606 12086 net.cpp:434] L1_b1_cbr2_bn <- L1_b1_cbr2_conv_top\nI1212 06:18:16.810621 12086 net.cpp:408] L1_b1_cbr2_bn -> L1_b1_cbr2_bn_top\nI1212 06:18:16.811130 12086 net.cpp:150] Setting up L1_b1_cbr2_bn\nI1212 06:18:16.811151 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.811161 12086 net.cpp:165] Memory required for data: 99841500\nI1212 06:18:16.811187 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:18:16.811209 12086 net.cpp:100] Creating Layer L1_b1_cbr2_scale\nI1212 06:18:16.811220 12086 net.cpp:434] L1_b1_cbr2_scale <- L1_b1_cbr2_bn_top\nI1212 06:18:16.811241 12086 net.cpp:395] L1_b1_cbr2_scale -> L1_b1_cbr2_bn_top (in-place)\nI1212 06:18:16.811333 12086 layer_factory.hpp:77] Creating layer L1_b1_cbr2_scale\nI1212 06:18:16.811558 12086 net.cpp:150] Setting up L1_b1_cbr2_scale\nI1212 06:18:16.811579 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.811589 12086 net.cpp:165] Memory required for data: 108033500\nI1212 06:18:16.811610 12086 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise\nI1212 06:18:16.811632 12086 net.cpp:100] Creating Layer L1_b1_sum_eltwise\nI1212 06:18:16.811643 12086 net.cpp:434] L1_b1_sum_eltwise <- L1_b1_cbr2_bn_top\nI1212 06:18:16.811656 12086 net.cpp:434] L1_b1_sum_eltwise <- pre_bn_top_pre_relu_0_split_1\nI1212 06:18:16.811679 12086 net.cpp:408] L1_b1_sum_eltwise -> L1_b1_sum_eltwise_top\nI1212 06:18:16.811748 12086 net.cpp:150] Setting up L1_b1_sum_eltwise\nI1212 06:18:16.811769 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.811777 12086 net.cpp:165] Memory required for data: 116225500\nI1212 06:18:16.811789 12086 layer_factory.hpp:77] Creating layer L1_b1_relu\nI1212 06:18:16.811801 12086 net.cpp:100] Creating Layer L1_b1_relu\nI1212 06:18:16.811812 12086 net.cpp:434] L1_b1_relu <- L1_b1_sum_eltwise_top\nI1212 06:18:16.811830 12086 net.cpp:395] L1_b1_relu -> L1_b1_sum_eltwise_top (in-place)\nI1212 06:18:16.811848 12086 net.cpp:150] Setting up L1_b1_relu\nI1212 06:18:16.811867 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.811877 12086 net.cpp:165] Memory required for data: 124417500\nI1212 06:18:16.811887 12086 layer_factory.hpp:77] Creating layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:18:16.811908 12086 net.cpp:100] Creating Layer L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:18:16.811920 12086 net.cpp:434] L1_b1_sum_eltwise_top_L1_b1_relu_0_split <- L1_b1_sum_eltwise_top\nI1212 06:18:16.811941 12086 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:18:16.811960 12086 net.cpp:408] L1_b1_sum_eltwise_top_L1_b1_relu_0_split -> L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:18:16.812053 12086 net.cpp:150] Setting up L1_b1_sum_eltwise_top_L1_b1_relu_0_split\nI1212 06:18:16.812079 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.812106 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.812120 12086 net.cpp:165] Memory required for data: 140801500\nI1212 06:18:16.812131 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_conv\nI1212 06:18:16.812161 12086 net.cpp:100] Creating Layer L1_b2_cbr1_conv\nI1212 06:18:16.812180 12086 net.cpp:434] L1_b2_cbr1_conv <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_0\nI1212 06:18:16.812197 12086 net.cpp:408] L1_b2_cbr1_conv -> L1_b2_cbr1_conv_top\nI1212 06:18:16.812695 12086 net.cpp:150] Setting up L1_b2_cbr1_conv\nI1212 06:18:16.812716 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.812724 12086 net.cpp:165] Memory required for data: 148993500\nI1212 06:18:16.812743 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_bn\nI1212 06:18:16.812764 12086 net.cpp:100] Creating Layer L1_b2_cbr1_bn\nI1212 06:18:16.812777 12086 net.cpp:434] L1_b2_cbr1_bn <- L1_b2_cbr1_conv_top\nI1212 06:18:16.812805 12086 net.cpp:408] L1_b2_cbr1_bn -> L1_b2_cbr1_bn_top\nI1212 06:18:16.813176 12086 net.cpp:150] Setting up L1_b2_cbr1_bn\nI1212 06:18:16.813197 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.813210 12086 net.cpp:165] Memory required for data: 157185500\nI1212 06:18:16.813232 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:18:16.813251 12086 net.cpp:100] Creating Layer L1_b2_cbr1_scale\nI1212 06:18:16.813269 12086 net.cpp:434] L1_b2_cbr1_scale <- L1_b2_cbr1_bn_top\nI1212 06:18:16.813292 12086 net.cpp:395] L1_b2_cbr1_scale -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.813405 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_scale\nI1212 06:18:16.813802 12086 net.cpp:150] Setting up L1_b2_cbr1_scale\nI1212 06:18:16.813827 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.813838 12086 net.cpp:165] Memory required for data: 165377500\nI1212 06:18:16.813864 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr1_relu\nI1212 06:18:16.813884 12086 net.cpp:100] Creating Layer L1_b2_cbr1_relu\nI1212 06:18:16.813894 12086 net.cpp:434] L1_b2_cbr1_relu <- L1_b2_cbr1_bn_top\nI1212 06:18:16.813908 12086 net.cpp:395] L1_b2_cbr1_relu -> L1_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.813928 12086 net.cpp:150] Setting up L1_b2_cbr1_relu\nI1212 06:18:16.813942 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.813961 12086 net.cpp:165] Memory required for data: 173569500\nI1212 06:18:16.813972 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr2_conv\nI1212 06:18:16.814008 12086 net.cpp:100] Creating Layer L1_b2_cbr2_conv\nI1212 06:18:16.814026 12086 net.cpp:434] L1_b2_cbr2_conv <- L1_b2_cbr1_bn_top\nI1212 06:18:16.814052 12086 net.cpp:408] L1_b2_cbr2_conv -> L1_b2_cbr2_conv_top\nI1212 06:18:16.814507 12086 net.cpp:150] Setting up L1_b2_cbr2_conv\nI1212 06:18:16.814529 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.814539 12086 net.cpp:165] Memory required for data: 181761500\nI1212 06:18:16.814558 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr2_bn\nI1212 06:18:16.814582 12086 net.cpp:100] Creating Layer L1_b2_cbr2_bn\nI1212 06:18:16.814595 12086 net.cpp:434] L1_b2_cbr2_bn <- L1_b2_cbr2_conv_top\nI1212 06:18:16.814620 12086 net.cpp:408] L1_b2_cbr2_bn -> L1_b2_cbr2_bn_top\nI1212 06:18:16.814975 12086 net.cpp:150] Setting up L1_b2_cbr2_bn\nI1212 06:18:16.814996 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.815007 12086 net.cpp:165] Memory required for data: 189953500\nI1212 06:18:16.815039 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:18:16.815062 12086 net.cpp:100] Creating Layer L1_b2_cbr2_scale\nI1212 06:18:16.815088 12086 net.cpp:434] L1_b2_cbr2_scale <- L1_b2_cbr2_bn_top\nI1212 06:18:16.815107 12086 net.cpp:395] L1_b2_cbr2_scale -> L1_b2_cbr2_bn_top (in-place)\nI1212 06:18:16.815222 12086 layer_factory.hpp:77] Creating layer L1_b2_cbr2_scale\nI1212 06:18:16.815488 12086 net.cpp:150] Setting up L1_b2_cbr2_scale\nI1212 06:18:16.815508 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.815520 12086 net.cpp:165] Memory required for data: 198145500\nI1212 06:18:16.815539 12086 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise\nI1212 06:18:16.815570 12086 net.cpp:100] Creating Layer L1_b2_sum_eltwise\nI1212 06:18:16.815582 12086 net.cpp:434] L1_b2_sum_eltwise <- L1_b2_cbr2_bn_top\nI1212 06:18:16.815596 12086 net.cpp:434] L1_b2_sum_eltwise <- L1_b1_sum_eltwise_top_L1_b1_relu_0_split_1\nI1212 06:18:16.815618 12086 net.cpp:408] L1_b2_sum_eltwise -> L1_b2_sum_eltwise_top\nI1212 06:18:16.815690 12086 net.cpp:150] Setting up L1_b2_sum_eltwise\nI1212 06:18:16.815711 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.815721 12086 net.cpp:165] Memory required for data: 206337500\nI1212 06:18:16.815732 12086 layer_factory.hpp:77] Creating layer L1_b2_relu\nI1212 06:18:16.815747 12086 net.cpp:100] Creating Layer L1_b2_relu\nI1212 06:18:16.815757 12086 net.cpp:434] L1_b2_relu <- L1_b2_sum_eltwise_top\nI1212 06:18:16.815778 12086 net.cpp:395] L1_b2_relu -> L1_b2_sum_eltwise_top (in-place)\nI1212 06:18:16.815799 12086 net.cpp:150] Setting up L1_b2_relu\nI1212 06:18:16.815814 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.815825 12086 net.cpp:165] Memory required for data: 214529500\nI1212 06:18:16.815841 12086 layer_factory.hpp:77] Creating layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:18:16.815857 12086 net.cpp:100] Creating Layer L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:18:16.815868 12086 net.cpp:434] L1_b2_sum_eltwise_top_L1_b2_relu_0_split <- L1_b2_sum_eltwise_top\nI1212 06:18:16.815883 12086 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:18:16.815903 12086 net.cpp:408] L1_b2_sum_eltwise_top_L1_b2_relu_0_split -> L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:18:16.815994 12086 net.cpp:150] Setting up L1_b2_sum_eltwise_top_L1_b2_relu_0_split\nI1212 06:18:16.816015 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.816027 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.816036 12086 net.cpp:165] Memory required for data: 230913500\nI1212 06:18:16.816047 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_conv\nI1212 06:18:16.816067 12086 net.cpp:100] Creating Layer L1_b3_cbr1_conv\nI1212 06:18:16.816090 12086 net.cpp:434] L1_b3_cbr1_conv <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_0\nI1212 06:18:16.816120 12086 net.cpp:408] L1_b3_cbr1_conv -> L1_b3_cbr1_conv_top\nI1212 06:18:16.816565 12086 net.cpp:150] Setting up L1_b3_cbr1_conv\nI1212 06:18:16.816586 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.816596 12086 net.cpp:165] Memory required for data: 239105500\nI1212 06:18:16.816613 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_bn\nI1212 06:18:16.816630 12086 net.cpp:100] Creating Layer L1_b3_cbr1_bn\nI1212 06:18:16.816642 12086 net.cpp:434] L1_b3_cbr1_bn <- L1_b3_cbr1_conv_top\nI1212 06:18:16.816661 12086 net.cpp:408] L1_b3_cbr1_bn -> L1_b3_cbr1_bn_top\nI1212 06:18:16.817024 12086 net.cpp:150] Setting up L1_b3_cbr1_bn\nI1212 06:18:16.817046 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.817056 12086 net.cpp:165] Memory required for data: 247297500\nI1212 06:18:16.817085 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:18:16.817119 12086 net.cpp:100] Creating Layer L1_b3_cbr1_scale\nI1212 06:18:16.817134 12086 net.cpp:434] L1_b3_cbr1_scale <- L1_b3_cbr1_bn_top\nI1212 06:18:16.817153 12086 net.cpp:395] L1_b3_cbr1_scale -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.817266 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_scale\nI1212 06:18:16.817499 12086 net.cpp:150] Setting up L1_b3_cbr1_scale\nI1212 06:18:16.817518 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.817528 12086 net.cpp:165] Memory required for data: 255489500\nI1212 06:18:16.817548 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr1_relu\nI1212 06:18:16.817565 12086 net.cpp:100] Creating Layer L1_b3_cbr1_relu\nI1212 06:18:16.817579 12086 net.cpp:434] L1_b3_cbr1_relu <- L1_b3_cbr1_bn_top\nI1212 06:18:16.817602 12086 net.cpp:395] L1_b3_cbr1_relu -> L1_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.817625 12086 net.cpp:150] Setting up L1_b3_cbr1_relu\nI1212 06:18:16.817652 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.817664 12086 net.cpp:165] Memory required for data: 263681500\nI1212 06:18:16.817674 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr2_conv\nI1212 06:18:16.817700 12086 net.cpp:100] Creating Layer L1_b3_cbr2_conv\nI1212 06:18:16.817713 12086 net.cpp:434] L1_b3_cbr2_conv <- L1_b3_cbr1_bn_top\nI1212 06:18:16.817734 12086 net.cpp:408] L1_b3_cbr2_conv -> L1_b3_cbr2_conv_top\nI1212 06:18:16.818195 12086 net.cpp:150] Setting up L1_b3_cbr2_conv\nI1212 06:18:16.818217 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.818225 12086 net.cpp:165] Memory required for data: 271873500\nI1212 06:18:16.818243 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr2_bn\nI1212 06:18:16.818274 12086 net.cpp:100] Creating Layer L1_b3_cbr2_bn\nI1212 06:18:16.818286 12086 net.cpp:434] L1_b3_cbr2_bn <- L1_b3_cbr2_conv_top\nI1212 06:18:16.818307 12086 net.cpp:408] L1_b3_cbr2_bn -> L1_b3_cbr2_bn_top\nI1212 06:18:16.818680 12086 net.cpp:150] Setting up L1_b3_cbr2_bn\nI1212 06:18:16.818701 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.818712 12086 net.cpp:165] Memory required for data: 280065500\nI1212 06:18:16.818733 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:18:16.818753 12086 net.cpp:100] Creating Layer L1_b3_cbr2_scale\nI1212 06:18:16.818764 12086 net.cpp:434] L1_b3_cbr2_scale <- L1_b3_cbr2_bn_top\nI1212 06:18:16.818789 12086 net.cpp:395] L1_b3_cbr2_scale -> L1_b3_cbr2_bn_top (in-place)\nI1212 06:18:16.818895 12086 layer_factory.hpp:77] Creating layer L1_b3_cbr2_scale\nI1212 06:18:16.819130 12086 net.cpp:150] Setting up L1_b3_cbr2_scale\nI1212 06:18:16.819152 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.819162 12086 net.cpp:165] Memory required for data: 288257500\nI1212 06:18:16.819180 12086 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise\nI1212 06:18:16.819200 12086 net.cpp:100] Creating Layer L1_b3_sum_eltwise\nI1212 06:18:16.819211 12086 net.cpp:434] L1_b3_sum_eltwise <- L1_b3_cbr2_bn_top\nI1212 06:18:16.819222 12086 net.cpp:434] L1_b3_sum_eltwise <- L1_b2_sum_eltwise_top_L1_b2_relu_0_split_1\nI1212 06:18:16.819247 12086 net.cpp:408] L1_b3_sum_eltwise -> L1_b3_sum_eltwise_top\nI1212 06:18:16.819308 12086 net.cpp:150] Setting up L1_b3_sum_eltwise\nI1212 06:18:16.819344 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.819356 12086 net.cpp:165] Memory required for data: 296449500\nI1212 06:18:16.819367 12086 layer_factory.hpp:77] Creating layer L1_b3_relu\nI1212 06:18:16.819383 12086 net.cpp:100] Creating Layer L1_b3_relu\nI1212 06:18:16.819396 12086 net.cpp:434] L1_b3_relu <- L1_b3_sum_eltwise_top\nI1212 06:18:16.819411 12086 net.cpp:395] L1_b3_relu -> L1_b3_sum_eltwise_top (in-place)\nI1212 06:18:16.819433 12086 net.cpp:150] Setting up L1_b3_relu\nI1212 06:18:16.819447 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.819458 12086 net.cpp:165] Memory required for data: 304641500\nI1212 06:18:16.819468 12086 layer_factory.hpp:77] Creating layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:18:16.819485 12086 net.cpp:100] Creating Layer L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:18:16.819501 12086 net.cpp:434] L1_b3_sum_eltwise_top_L1_b3_relu_0_split <- L1_b3_sum_eltwise_top\nI1212 06:18:16.819519 12086 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:18:16.819540 12086 net.cpp:408] L1_b3_sum_eltwise_top_L1_b3_relu_0_split -> L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:18:16.819635 12086 net.cpp:150] Setting up L1_b3_sum_eltwise_top_L1_b3_relu_0_split\nI1212 06:18:16.819656 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.819669 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.819680 12086 net.cpp:165] Memory required for data: 321025500\nI1212 06:18:16.819691 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_conv\nI1212 06:18:16.819713 12086 net.cpp:100] Creating Layer L1_b4_cbr1_conv\nI1212 06:18:16.819726 12086 net.cpp:434] L1_b4_cbr1_conv <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_0\nI1212 06:18:16.819762 12086 net.cpp:408] L1_b4_cbr1_conv -> L1_b4_cbr1_conv_top\nI1212 06:18:16.820250 12086 net.cpp:150] Setting up L1_b4_cbr1_conv\nI1212 06:18:16.820271 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.820281 12086 net.cpp:165] Memory required for data: 329217500\nI1212 06:18:16.820297 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_bn\nI1212 06:18:16.820315 12086 net.cpp:100] Creating Layer L1_b4_cbr1_bn\nI1212 06:18:16.820327 12086 net.cpp:434] L1_b4_cbr1_bn <- L1_b4_cbr1_conv_top\nI1212 06:18:16.820341 12086 net.cpp:408] L1_b4_cbr1_bn -> L1_b4_cbr1_bn_top\nI1212 06:18:16.820670 12086 net.cpp:150] Setting up L1_b4_cbr1_bn\nI1212 06:18:16.820689 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.820699 12086 net.cpp:165] Memory required for data: 337409500\nI1212 06:18:16.820720 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:18:16.820742 12086 net.cpp:100] Creating Layer L1_b4_cbr1_scale\nI1212 06:18:16.820753 12086 net.cpp:434] L1_b4_cbr1_scale <- L1_b4_cbr1_bn_top\nI1212 06:18:16.820768 12086 net.cpp:395] L1_b4_cbr1_scale -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.820871 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_scale\nI1212 06:18:16.821079 12086 net.cpp:150] Setting up L1_b4_cbr1_scale\nI1212 06:18:16.821100 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.821110 12086 net.cpp:165] Memory required for data: 345601500\nI1212 06:18:16.821128 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr1_relu\nI1212 06:18:16.821144 12086 net.cpp:100] Creating Layer L1_b4_cbr1_relu\nI1212 06:18:16.821154 12086 net.cpp:434] L1_b4_cbr1_relu <- L1_b4_cbr1_bn_top\nI1212 06:18:16.821174 12086 net.cpp:395] L1_b4_cbr1_relu -> L1_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.821194 12086 net.cpp:150] Setting up L1_b4_cbr1_relu\nI1212 06:18:16.821208 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.821218 12086 net.cpp:165] Memory required for data: 353793500\nI1212 06:18:16.821228 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr2_conv\nI1212 06:18:16.821261 12086 net.cpp:100] Creating Layer L1_b4_cbr2_conv\nI1212 06:18:16.821275 12086 net.cpp:434] L1_b4_cbr2_conv <- L1_b4_cbr1_bn_top\nI1212 06:18:16.821292 12086 net.cpp:408] L1_b4_cbr2_conv -> L1_b4_cbr2_conv_top\nI1212 06:18:16.821698 12086 net.cpp:150] Setting up L1_b4_cbr2_conv\nI1212 06:18:16.821718 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.821727 12086 net.cpp:165] Memory required for data: 361985500\nI1212 06:18:16.821744 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr2_bn\nI1212 06:18:16.821768 12086 net.cpp:100] Creating Layer L1_b4_cbr2_bn\nI1212 06:18:16.821779 12086 net.cpp:434] L1_b4_cbr2_bn <- L1_b4_cbr2_conv_top\nI1212 06:18:16.821796 12086 net.cpp:408] L1_b4_cbr2_bn -> L1_b4_cbr2_bn_top\nI1212 06:18:16.822131 12086 net.cpp:150] Setting up L1_b4_cbr2_bn\nI1212 06:18:16.822154 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.822163 12086 net.cpp:165] Memory required for data: 370177500\nI1212 06:18:16.822185 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:18:16.822206 12086 net.cpp:100] Creating Layer L1_b4_cbr2_scale\nI1212 06:18:16.822217 12086 net.cpp:434] L1_b4_cbr2_scale <- L1_b4_cbr2_bn_top\nI1212 06:18:16.822233 12086 net.cpp:395] L1_b4_cbr2_scale -> L1_b4_cbr2_bn_top (in-place)\nI1212 06:18:16.822331 12086 layer_factory.hpp:77] Creating layer L1_b4_cbr2_scale\nI1212 06:18:16.822536 12086 net.cpp:150] Setting up L1_b4_cbr2_scale\nI1212 06:18:16.822556 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.822566 12086 net.cpp:165] Memory required for data: 378369500\nI1212 06:18:16.822584 12086 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise\nI1212 06:18:16.822602 12086 net.cpp:100] Creating Layer L1_b4_sum_eltwise\nI1212 06:18:16.822613 12086 net.cpp:434] L1_b4_sum_eltwise <- L1_b4_cbr2_bn_top\nI1212 06:18:16.822625 12086 net.cpp:434] L1_b4_sum_eltwise <- L1_b3_sum_eltwise_top_L1_b3_relu_0_split_1\nI1212 06:18:16.822646 12086 net.cpp:408] L1_b4_sum_eltwise -> L1_b4_sum_eltwise_top\nI1212 06:18:16.822718 12086 net.cpp:150] Setting up L1_b4_sum_eltwise\nI1212 06:18:16.822737 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.822747 12086 net.cpp:165] Memory required for data: 386561500\nI1212 06:18:16.822758 12086 layer_factory.hpp:77] Creating layer L1_b4_relu\nI1212 06:18:16.822772 12086 net.cpp:100] Creating Layer L1_b4_relu\nI1212 06:18:16.822783 12086 net.cpp:434] L1_b4_relu <- L1_b4_sum_eltwise_top\nI1212 06:18:16.822803 12086 net.cpp:395] L1_b4_relu -> L1_b4_sum_eltwise_top (in-place)\nI1212 06:18:16.822824 12086 net.cpp:150] Setting up L1_b4_relu\nI1212 06:18:16.822839 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.822849 12086 net.cpp:165] Memory required for data: 394753500\nI1212 06:18:16.822860 12086 layer_factory.hpp:77] Creating layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:18:16.822881 12086 net.cpp:100] Creating Layer L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:18:16.822890 12086 net.cpp:434] L1_b4_sum_eltwise_top_L1_b4_relu_0_split <- L1_b4_sum_eltwise_top\nI1212 06:18:16.822906 12086 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:18:16.822926 12086 net.cpp:408] L1_b4_sum_eltwise_top_L1_b4_relu_0_split -> L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:18:16.823012 12086 net.cpp:150] Setting up L1_b4_sum_eltwise_top_L1_b4_relu_0_split\nI1212 06:18:16.823031 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.823043 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.823052 12086 net.cpp:165] Memory required for data: 411137500\nI1212 06:18:16.823062 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_conv\nI1212 06:18:16.823089 12086 net.cpp:100] Creating Layer L1_b5_cbr1_conv\nI1212 06:18:16.823102 12086 net.cpp:434] L1_b5_cbr1_conv <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_0\nI1212 06:18:16.823125 12086 net.cpp:408] L1_b5_cbr1_conv -> L1_b5_cbr1_conv_top\nI1212 06:18:16.823521 12086 net.cpp:150] Setting up L1_b5_cbr1_conv\nI1212 06:18:16.823541 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.823551 12086 net.cpp:165] Memory required for data: 419329500\nI1212 06:18:16.823583 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_bn\nI1212 06:18:16.823607 12086 net.cpp:100] Creating Layer L1_b5_cbr1_bn\nI1212 06:18:16.823619 12086 net.cpp:434] L1_b5_cbr1_bn <- L1_b5_cbr1_conv_top\nI1212 06:18:16.823637 12086 net.cpp:408] L1_b5_cbr1_bn -> L1_b5_cbr1_bn_top\nI1212 06:18:16.823959 12086 net.cpp:150] Setting up L1_b5_cbr1_bn\nI1212 06:18:16.823978 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.823987 12086 net.cpp:165] Memory required for data: 427521500\nI1212 06:18:16.824008 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:18:16.824024 12086 net.cpp:100] Creating Layer L1_b5_cbr1_scale\nI1212 06:18:16.824035 12086 net.cpp:434] L1_b5_cbr1_scale <- L1_b5_cbr1_bn_top\nI1212 06:18:16.824055 12086 net.cpp:395] L1_b5_cbr1_scale -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.824162 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_scale\nI1212 06:18:16.824373 12086 net.cpp:150] Setting up L1_b5_cbr1_scale\nI1212 06:18:16.824391 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.824400 12086 net.cpp:165] Memory required for data: 435713500\nI1212 06:18:16.824419 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr1_relu\nI1212 06:18:16.824434 12086 net.cpp:100] Creating Layer L1_b5_cbr1_relu\nI1212 06:18:16.824445 12086 net.cpp:434] L1_b5_cbr1_relu <- L1_b5_cbr1_bn_top\nI1212 06:18:16.824462 12086 net.cpp:395] L1_b5_cbr1_relu -> L1_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.824483 12086 net.cpp:150] Setting up L1_b5_cbr1_relu\nI1212 06:18:16.824498 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.824506 12086 net.cpp:165] Memory required for data: 443905500\nI1212 06:18:16.824517 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr2_conv\nI1212 06:18:16.824542 12086 net.cpp:100] Creating Layer L1_b5_cbr2_conv\nI1212 06:18:16.824563 12086 net.cpp:434] L1_b5_cbr2_conv <- L1_b5_cbr1_bn_top\nI1212 06:18:16.824581 12086 net.cpp:408] L1_b5_cbr2_conv -> L1_b5_cbr2_conv_top\nI1212 06:18:16.825001 12086 net.cpp:150] Setting up L1_b5_cbr2_conv\nI1212 06:18:16.825021 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.825031 12086 net.cpp:165] Memory required for data: 452097500\nI1212 06:18:16.825049 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr2_bn\nI1212 06:18:16.825078 12086 net.cpp:100] Creating Layer L1_b5_cbr2_bn\nI1212 06:18:16.825091 12086 net.cpp:434] L1_b5_cbr2_bn <- L1_b5_cbr2_conv_top\nI1212 06:18:16.825112 12086 net.cpp:408] L1_b5_cbr2_bn -> L1_b5_cbr2_bn_top\nI1212 06:18:16.825438 12086 net.cpp:150] Setting up L1_b5_cbr2_bn\nI1212 06:18:16.825456 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.825465 12086 net.cpp:165] Memory required for data: 460289500\nI1212 06:18:16.825486 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:18:16.825503 12086 net.cpp:100] Creating Layer L1_b5_cbr2_scale\nI1212 06:18:16.825515 12086 net.cpp:434] L1_b5_cbr2_scale <- L1_b5_cbr2_bn_top\nI1212 06:18:16.825536 12086 net.cpp:395] L1_b5_cbr2_scale -> L1_b5_cbr2_bn_top (in-place)\nI1212 06:18:16.825634 12086 layer_factory.hpp:77] Creating layer L1_b5_cbr2_scale\nI1212 06:18:16.825842 12086 net.cpp:150] Setting up L1_b5_cbr2_scale\nI1212 06:18:16.825866 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.825876 12086 net.cpp:165] Memory required for data: 468481500\nI1212 06:18:16.825896 12086 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise\nI1212 06:18:16.825911 12086 net.cpp:100] Creating Layer L1_b5_sum_eltwise\nI1212 06:18:16.825922 12086 net.cpp:434] L1_b5_sum_eltwise <- L1_b5_cbr2_bn_top\nI1212 06:18:16.825934 12086 net.cpp:434] L1_b5_sum_eltwise <- L1_b4_sum_eltwise_top_L1_b4_relu_0_split_1\nI1212 06:18:16.825950 12086 net.cpp:408] L1_b5_sum_eltwise -> L1_b5_sum_eltwise_top\nI1212 06:18:16.826011 12086 net.cpp:150] Setting up L1_b5_sum_eltwise\nI1212 06:18:16.826030 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.826040 12086 net.cpp:165] Memory required for data: 476673500\nI1212 06:18:16.826050 12086 layer_factory.hpp:77] Creating layer L1_b5_relu\nI1212 06:18:16.826077 12086 net.cpp:100] Creating Layer L1_b5_relu\nI1212 06:18:16.826090 12086 net.cpp:434] L1_b5_relu <- L1_b5_sum_eltwise_top\nI1212 06:18:16.826103 12086 net.cpp:395] L1_b5_relu -> L1_b5_sum_eltwise_top (in-place)\nI1212 06:18:16.826122 12086 net.cpp:150] Setting up L1_b5_relu\nI1212 06:18:16.826136 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.826145 12086 net.cpp:165] Memory required for data: 484865500\nI1212 06:18:16.826155 12086 layer_factory.hpp:77] Creating layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:18:16.826169 12086 net.cpp:100] Creating Layer L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:18:16.826179 12086 net.cpp:434] L1_b5_sum_eltwise_top_L1_b5_relu_0_split <- L1_b5_sum_eltwise_top\nI1212 06:18:16.826197 12086 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:18:16.826218 12086 net.cpp:408] L1_b5_sum_eltwise_top_L1_b5_relu_0_split -> L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:18:16.826299 12086 net.cpp:150] Setting up L1_b5_sum_eltwise_top_L1_b5_relu_0_split\nI1212 06:18:16.826326 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.826341 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.826351 12086 net.cpp:165] Memory required for data: 501249500\nI1212 06:18:16.826361 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_conv\nI1212 06:18:16.826380 12086 net.cpp:100] Creating Layer L1_b6_cbr1_conv\nI1212 06:18:16.826392 12086 net.cpp:434] L1_b6_cbr1_conv <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_0\nI1212 06:18:16.826411 12086 net.cpp:408] L1_b6_cbr1_conv -> L1_b6_cbr1_conv_top\nI1212 06:18:16.826817 12086 net.cpp:150] Setting up L1_b6_cbr1_conv\nI1212 06:18:16.826836 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.826846 12086 net.cpp:165] Memory required for data: 509441500\nI1212 06:18:16.826874 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_bn\nI1212 06:18:16.826896 12086 net.cpp:100] Creating Layer L1_b6_cbr1_bn\nI1212 06:18:16.826907 12086 net.cpp:434] L1_b6_cbr1_bn <- L1_b6_cbr1_conv_top\nI1212 06:18:16.826923 12086 net.cpp:408] L1_b6_cbr1_bn -> L1_b6_cbr1_bn_top\nI1212 06:18:16.827288 12086 net.cpp:150] Setting up L1_b6_cbr1_bn\nI1212 06:18:16.827308 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.827318 12086 net.cpp:165] Memory required for data: 517633500\nI1212 06:18:16.827338 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:18:16.827356 12086 net.cpp:100] Creating Layer L1_b6_cbr1_scale\nI1212 06:18:16.827366 12086 net.cpp:434] L1_b6_cbr1_scale <- L1_b6_cbr1_bn_top\nI1212 06:18:16.827386 12086 net.cpp:395] L1_b6_cbr1_scale -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.827489 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_scale\nI1212 06:18:16.827692 12086 net.cpp:150] Setting up L1_b6_cbr1_scale\nI1212 06:18:16.827715 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.827724 12086 net.cpp:165] Memory required for data: 525825500\nI1212 06:18:16.827741 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr1_relu\nI1212 06:18:16.827756 12086 net.cpp:100] Creating Layer L1_b6_cbr1_relu\nI1212 06:18:16.827767 12086 net.cpp:434] L1_b6_cbr1_relu <- L1_b6_cbr1_bn_top\nI1212 06:18:16.827780 12086 net.cpp:395] L1_b6_cbr1_relu -> L1_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.827798 12086 net.cpp:150] Setting up L1_b6_cbr1_relu\nI1212 06:18:16.827812 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.827821 12086 net.cpp:165] Memory required for data: 534017500\nI1212 06:18:16.827831 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr2_conv\nI1212 06:18:16.827857 12086 net.cpp:100] Creating Layer L1_b6_cbr2_conv\nI1212 06:18:16.827869 12086 net.cpp:434] L1_b6_cbr2_conv <- L1_b6_cbr1_bn_top\nI1212 06:18:16.827893 12086 net.cpp:408] L1_b6_cbr2_conv -> L1_b6_cbr2_conv_top\nI1212 06:18:16.828316 12086 net.cpp:150] Setting up L1_b6_cbr2_conv\nI1212 06:18:16.828337 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.828346 12086 net.cpp:165] Memory required for data: 542209500\nI1212 06:18:16.828363 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr2_bn\nI1212 06:18:16.828385 12086 net.cpp:100] Creating Layer L1_b6_cbr2_bn\nI1212 06:18:16.828397 12086 net.cpp:434] L1_b6_cbr2_bn <- L1_b6_cbr2_conv_top\nI1212 06:18:16.828418 12086 net.cpp:408] L1_b6_cbr2_bn -> L1_b6_cbr2_bn_top\nI1212 06:18:16.828742 12086 net.cpp:150] Setting up L1_b6_cbr2_bn\nI1212 06:18:16.828761 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.828771 12086 net.cpp:165] Memory required for data: 550401500\nI1212 06:18:16.828791 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:18:16.828807 12086 net.cpp:100] Creating Layer L1_b6_cbr2_scale\nI1212 06:18:16.828819 12086 net.cpp:434] L1_b6_cbr2_scale <- L1_b6_cbr2_bn_top\nI1212 06:18:16.828833 12086 net.cpp:395] L1_b6_cbr2_scale -> L1_b6_cbr2_bn_top (in-place)\nI1212 06:18:16.828939 12086 layer_factory.hpp:77] Creating layer L1_b6_cbr2_scale\nI1212 06:18:16.829149 12086 net.cpp:150] Setting up L1_b6_cbr2_scale\nI1212 06:18:16.829169 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.829179 12086 net.cpp:165] Memory required for data: 558593500\nI1212 06:18:16.829196 12086 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise\nI1212 06:18:16.829228 12086 net.cpp:100] Creating Layer L1_b6_sum_eltwise\nI1212 06:18:16.829241 12086 net.cpp:434] L1_b6_sum_eltwise <- L1_b6_cbr2_bn_top\nI1212 06:18:16.829254 12086 net.cpp:434] L1_b6_sum_eltwise <- L1_b5_sum_eltwise_top_L1_b5_relu_0_split_1\nI1212 06:18:16.829272 12086 net.cpp:408] L1_b6_sum_eltwise -> L1_b6_sum_eltwise_top\nI1212 06:18:16.829336 12086 net.cpp:150] Setting up L1_b6_sum_eltwise\nI1212 06:18:16.829357 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.829366 12086 net.cpp:165] Memory required for data: 566785500\nI1212 06:18:16.829377 12086 layer_factory.hpp:77] Creating layer L1_b6_relu\nI1212 06:18:16.829401 12086 net.cpp:100] Creating Layer L1_b6_relu\nI1212 06:18:16.829412 12086 net.cpp:434] L1_b6_relu <- L1_b6_sum_eltwise_top\nI1212 06:18:16.829430 12086 net.cpp:395] L1_b6_relu -> L1_b6_sum_eltwise_top (in-place)\nI1212 06:18:16.829452 12086 net.cpp:150] Setting up L1_b6_relu\nI1212 06:18:16.829466 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.829476 12086 net.cpp:165] Memory required for data: 574977500\nI1212 06:18:16.829486 12086 layer_factory.hpp:77] Creating layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:18:16.829499 12086 net.cpp:100] Creating Layer L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:18:16.829509 12086 net.cpp:434] L1_b6_sum_eltwise_top_L1_b6_relu_0_split <- L1_b6_sum_eltwise_top\nI1212 06:18:16.829524 12086 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:18:16.829543 12086 net.cpp:408] L1_b6_sum_eltwise_top_L1_b6_relu_0_split -> L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:18:16.829632 12086 net.cpp:150] Setting up L1_b6_sum_eltwise_top_L1_b6_relu_0_split\nI1212 06:18:16.829650 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.829663 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.829671 12086 net.cpp:165] Memory required for data: 591361500\nI1212 06:18:16.829682 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_conv\nI1212 06:18:16.829702 12086 net.cpp:100] Creating Layer L1_b7_cbr1_conv\nI1212 06:18:16.829713 12086 net.cpp:434] L1_b7_cbr1_conv <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_0\nI1212 06:18:16.829737 12086 net.cpp:408] L1_b7_cbr1_conv -> L1_b7_cbr1_conv_top\nI1212 06:18:16.830154 12086 net.cpp:150] Setting up L1_b7_cbr1_conv\nI1212 06:18:16.830174 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.830184 12086 net.cpp:165] Memory required for data: 599553500\nI1212 06:18:16.830202 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_bn\nI1212 06:18:16.830219 12086 net.cpp:100] Creating Layer L1_b7_cbr1_bn\nI1212 06:18:16.830230 12086 net.cpp:434] L1_b7_cbr1_bn <- L1_b7_cbr1_conv_top\nI1212 06:18:16.830250 12086 net.cpp:408] L1_b7_cbr1_bn -> L1_b7_cbr1_bn_top\nI1212 06:18:16.830590 12086 net.cpp:150] Setting up L1_b7_cbr1_bn\nI1212 06:18:16.830610 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.830620 12086 net.cpp:165] Memory required for data: 607745500\nI1212 06:18:16.830641 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:18:16.830662 12086 net.cpp:100] Creating Layer L1_b7_cbr1_scale\nI1212 06:18:16.830672 12086 net.cpp:434] L1_b7_cbr1_scale <- L1_b7_cbr1_bn_top\nI1212 06:18:16.830688 12086 net.cpp:395] L1_b7_cbr1_scale -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.830787 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_scale\nI1212 06:18:16.830996 12086 net.cpp:150] Setting up L1_b7_cbr1_scale\nI1212 06:18:16.831015 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.831025 12086 net.cpp:165] Memory required for data: 615937500\nI1212 06:18:16.831043 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr1_relu\nI1212 06:18:16.831059 12086 net.cpp:100] Creating Layer L1_b7_cbr1_relu\nI1212 06:18:16.831077 12086 net.cpp:434] L1_b7_cbr1_relu <- L1_b7_cbr1_bn_top\nI1212 06:18:16.831099 12086 net.cpp:395] L1_b7_cbr1_relu -> L1_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.831120 12086 net.cpp:150] Setting up L1_b7_cbr1_relu\nI1212 06:18:16.831135 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.831146 12086 net.cpp:165] Memory required for data: 624129500\nI1212 06:18:16.831156 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr2_conv\nI1212 06:18:16.831182 12086 net.cpp:100] Creating Layer L1_b7_cbr2_conv\nI1212 06:18:16.831194 12086 net.cpp:434] L1_b7_cbr2_conv <- L1_b7_cbr1_bn_top\nI1212 06:18:16.831218 12086 net.cpp:408] L1_b7_cbr2_conv -> L1_b7_cbr2_conv_top\nI1212 06:18:16.831634 12086 net.cpp:150] Setting up L1_b7_cbr2_conv\nI1212 06:18:16.831653 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.831662 12086 net.cpp:165] Memory required for data: 632321500\nI1212 06:18:16.831691 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr2_bn\nI1212 06:18:16.831708 12086 net.cpp:100] Creating Layer L1_b7_cbr2_bn\nI1212 06:18:16.831719 12086 net.cpp:434] L1_b7_cbr2_bn <- L1_b7_cbr2_conv_top\nI1212 06:18:16.831737 12086 net.cpp:408] L1_b7_cbr2_bn -> L1_b7_cbr2_bn_top\nI1212 06:18:16.832082 12086 net.cpp:150] Setting up L1_b7_cbr2_bn\nI1212 06:18:16.832103 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.832111 12086 net.cpp:165] Memory required for data: 640513500\nI1212 06:18:16.832134 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:18:16.832154 12086 net.cpp:100] Creating Layer L1_b7_cbr2_scale\nI1212 06:18:16.832167 12086 net.cpp:434] L1_b7_cbr2_scale <- L1_b7_cbr2_bn_top\nI1212 06:18:16.832183 12086 net.cpp:395] L1_b7_cbr2_scale -> L1_b7_cbr2_bn_top (in-place)\nI1212 06:18:16.832283 12086 layer_factory.hpp:77] Creating layer L1_b7_cbr2_scale\nI1212 06:18:16.832501 12086 net.cpp:150] Setting up L1_b7_cbr2_scale\nI1212 06:18:16.832520 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.832530 12086 net.cpp:165] Memory required for data: 648705500\nI1212 06:18:16.832547 12086 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise\nI1212 06:18:16.832564 12086 net.cpp:100] Creating Layer L1_b7_sum_eltwise\nI1212 06:18:16.832576 12086 net.cpp:434] L1_b7_sum_eltwise <- L1_b7_cbr2_bn_top\nI1212 06:18:16.832589 12086 net.cpp:434] L1_b7_sum_eltwise <- L1_b6_sum_eltwise_top_L1_b6_relu_0_split_1\nI1212 06:18:16.832610 12086 net.cpp:408] L1_b7_sum_eltwise -> L1_b7_sum_eltwise_top\nI1212 06:18:16.832669 12086 net.cpp:150] Setting up L1_b7_sum_eltwise\nI1212 06:18:16.832695 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.832705 12086 net.cpp:165] Memory required for data: 656897500\nI1212 06:18:16.832715 12086 layer_factory.hpp:77] Creating layer L1_b7_relu\nI1212 06:18:16.832729 12086 net.cpp:100] Creating Layer L1_b7_relu\nI1212 06:18:16.832741 12086 net.cpp:434] L1_b7_relu <- L1_b7_sum_eltwise_top\nI1212 06:18:16.832756 12086 net.cpp:395] L1_b7_relu -> L1_b7_sum_eltwise_top (in-place)\nI1212 06:18:16.832777 12086 net.cpp:150] Setting up L1_b7_relu\nI1212 06:18:16.832792 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.832800 12086 net.cpp:165] Memory required for data: 665089500\nI1212 06:18:16.832810 12086 layer_factory.hpp:77] Creating layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:18:16.832829 12086 net.cpp:100] Creating Layer L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:18:16.832841 12086 net.cpp:434] L1_b7_sum_eltwise_top_L1_b7_relu_0_split <- L1_b7_sum_eltwise_top\nI1212 06:18:16.832856 12086 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:18:16.832875 12086 net.cpp:408] L1_b7_sum_eltwise_top_L1_b7_relu_0_split -> L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:18:16.832965 12086 net.cpp:150] Setting up L1_b7_sum_eltwise_top_L1_b7_relu_0_split\nI1212 06:18:16.832984 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.832998 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.833005 12086 net.cpp:165] Memory required for data: 681473500\nI1212 06:18:16.833016 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_conv\nI1212 06:18:16.833036 12086 net.cpp:100] Creating Layer L1_b8_cbr1_conv\nI1212 06:18:16.833047 12086 net.cpp:434] L1_b8_cbr1_conv <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_0\nI1212 06:18:16.833077 12086 net.cpp:408] L1_b8_cbr1_conv -> L1_b8_cbr1_conv_top\nI1212 06:18:16.833480 12086 net.cpp:150] Setting up L1_b8_cbr1_conv\nI1212 06:18:16.833500 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.833509 12086 net.cpp:165] Memory required for data: 689665500\nI1212 06:18:16.833526 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_bn\nI1212 06:18:16.833544 12086 net.cpp:100] Creating Layer L1_b8_cbr1_bn\nI1212 06:18:16.833554 12086 net.cpp:434] L1_b8_cbr1_bn <- L1_b8_cbr1_conv_top\nI1212 06:18:16.833570 12086 net.cpp:408] L1_b8_cbr1_bn -> L1_b8_cbr1_bn_top\nI1212 06:18:16.833925 12086 net.cpp:150] Setting up L1_b8_cbr1_bn\nI1212 06:18:16.833945 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.833956 12086 net.cpp:165] Memory required for data: 697857500\nI1212 06:18:16.833976 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:18:16.833997 12086 net.cpp:100] Creating Layer L1_b8_cbr1_scale\nI1212 06:18:16.834008 12086 net.cpp:434] L1_b8_cbr1_scale <- L1_b8_cbr1_bn_top\nI1212 06:18:16.834023 12086 net.cpp:395] L1_b8_cbr1_scale -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.834136 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_scale\nI1212 06:18:16.834342 12086 net.cpp:150] Setting up L1_b8_cbr1_scale\nI1212 06:18:16.834362 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.834370 12086 net.cpp:165] Memory required for data: 706049500\nI1212 06:18:16.834388 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr1_relu\nI1212 06:18:16.834401 12086 net.cpp:100] Creating Layer L1_b8_cbr1_relu\nI1212 06:18:16.834414 12086 net.cpp:434] L1_b8_cbr1_relu <- L1_b8_cbr1_bn_top\nI1212 06:18:16.834432 12086 net.cpp:395] L1_b8_cbr1_relu -> L1_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.834452 12086 net.cpp:150] Setting up L1_b8_cbr1_relu\nI1212 06:18:16.834466 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.834475 12086 net.cpp:165] Memory required for data: 714241500\nI1212 06:18:16.834484 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr2_conv\nI1212 06:18:16.834511 12086 net.cpp:100] Creating Layer L1_b8_cbr2_conv\nI1212 06:18:16.834523 12086 net.cpp:434] L1_b8_cbr2_conv <- L1_b8_cbr1_bn_top\nI1212 06:18:16.834542 12086 net.cpp:408] L1_b8_cbr2_conv -> L1_b8_cbr2_conv_top\nI1212 06:18:16.834956 12086 net.cpp:150] Setting up L1_b8_cbr2_conv\nI1212 06:18:16.834976 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.834985 12086 net.cpp:165] Memory required for data: 722433500\nI1212 06:18:16.835002 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr2_bn\nI1212 06:18:16.835024 12086 net.cpp:100] Creating Layer L1_b8_cbr2_bn\nI1212 06:18:16.835036 12086 net.cpp:434] L1_b8_cbr2_bn <- L1_b8_cbr2_conv_top\nI1212 06:18:16.835052 12086 net.cpp:408] L1_b8_cbr2_bn -> L1_b8_cbr2_bn_top\nI1212 06:18:16.835412 12086 net.cpp:150] Setting up L1_b8_cbr2_bn\nI1212 06:18:16.835431 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.835441 12086 net.cpp:165] Memory required for data: 730625500\nI1212 06:18:16.835463 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:18:16.835479 12086 net.cpp:100] Creating Layer L1_b8_cbr2_scale\nI1212 06:18:16.835490 12086 net.cpp:434] L1_b8_cbr2_scale <- L1_b8_cbr2_bn_top\nI1212 06:18:16.835510 12086 net.cpp:395] L1_b8_cbr2_scale -> L1_b8_cbr2_bn_top (in-place)\nI1212 06:18:16.835613 12086 layer_factory.hpp:77] Creating layer L1_b8_cbr2_scale\nI1212 06:18:16.835817 12086 net.cpp:150] Setting up L1_b8_cbr2_scale\nI1212 06:18:16.835837 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.835847 12086 net.cpp:165] Memory required for data: 738817500\nI1212 06:18:16.835865 12086 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise\nI1212 06:18:16.835880 12086 net.cpp:100] Creating Layer L1_b8_sum_eltwise\nI1212 06:18:16.835892 12086 net.cpp:434] L1_b8_sum_eltwise <- L1_b8_cbr2_bn_top\nI1212 06:18:16.835904 12086 net.cpp:434] L1_b8_sum_eltwise <- L1_b7_sum_eltwise_top_L1_b7_relu_0_split_1\nI1212 06:18:16.835924 12086 net.cpp:408] L1_b8_sum_eltwise -> L1_b8_sum_eltwise_top\nI1212 06:18:16.835983 12086 net.cpp:150] Setting up L1_b8_sum_eltwise\nI1212 06:18:16.836006 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.836016 12086 net.cpp:165] Memory required for data: 747009500\nI1212 06:18:16.836026 12086 layer_factory.hpp:77] Creating layer L1_b8_relu\nI1212 06:18:16.836040 12086 net.cpp:100] Creating Layer L1_b8_relu\nI1212 06:18:16.836052 12086 net.cpp:434] L1_b8_relu <- L1_b8_sum_eltwise_top\nI1212 06:18:16.836066 12086 net.cpp:395] L1_b8_relu -> L1_b8_sum_eltwise_top (in-place)\nI1212 06:18:16.836091 12086 net.cpp:150] Setting up L1_b8_relu\nI1212 06:18:16.836115 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.836125 12086 net.cpp:165] Memory required for data: 755201500\nI1212 06:18:16.836135 12086 layer_factory.hpp:77] Creating layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:18:16.836154 12086 net.cpp:100] Creating Layer L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:18:16.836167 12086 net.cpp:434] L1_b8_sum_eltwise_top_L1_b8_relu_0_split <- L1_b8_sum_eltwise_top\nI1212 06:18:16.836182 12086 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:18:16.836202 12086 net.cpp:408] L1_b8_sum_eltwise_top_L1_b8_relu_0_split -> L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:18:16.836294 12086 net.cpp:150] Setting up L1_b8_sum_eltwise_top_L1_b8_relu_0_split\nI1212 06:18:16.836313 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.836326 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.836336 12086 net.cpp:165] Memory required for data: 771585500\nI1212 06:18:16.836346 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_conv\nI1212 06:18:16.836366 12086 net.cpp:100] Creating Layer L1_b9_cbr1_conv\nI1212 06:18:16.836380 12086 net.cpp:434] L1_b9_cbr1_conv <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_0\nI1212 06:18:16.836401 12086 net.cpp:408] L1_b9_cbr1_conv -> L1_b9_cbr1_conv_top\nI1212 06:18:16.836823 12086 net.cpp:150] Setting up L1_b9_cbr1_conv\nI1212 06:18:16.836843 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.836853 12086 net.cpp:165] Memory required for data: 779777500\nI1212 06:18:16.836871 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_bn\nI1212 06:18:16.836894 12086 net.cpp:100] Creating Layer L1_b9_cbr1_bn\nI1212 06:18:16.836905 12086 net.cpp:434] L1_b9_cbr1_bn <- L1_b9_cbr1_conv_top\nI1212 06:18:16.836922 12086 net.cpp:408] L1_b9_cbr1_bn -> L1_b9_cbr1_bn_top\nI1212 06:18:16.837257 12086 net.cpp:150] Setting up L1_b9_cbr1_bn\nI1212 06:18:16.837276 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.837286 12086 net.cpp:165] Memory required for data: 787969500\nI1212 06:18:16.837307 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:18:16.837324 12086 net.cpp:100] Creating Layer L1_b9_cbr1_scale\nI1212 06:18:16.837335 12086 net.cpp:434] L1_b9_cbr1_scale <- L1_b9_cbr1_bn_top\nI1212 06:18:16.837352 12086 net.cpp:395] L1_b9_cbr1_scale -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.837450 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_scale\nI1212 06:18:16.837654 12086 net.cpp:150] Setting up L1_b9_cbr1_scale\nI1212 06:18:16.837673 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.837683 12086 net.cpp:165] Memory required for data: 796161500\nI1212 06:18:16.837702 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr1_relu\nI1212 06:18:16.837719 12086 net.cpp:100] Creating Layer L1_b9_cbr1_relu\nI1212 06:18:16.837730 12086 net.cpp:434] L1_b9_cbr1_relu <- L1_b9_cbr1_bn_top\nI1212 06:18:16.837754 12086 net.cpp:395] L1_b9_cbr1_relu -> L1_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.837774 12086 net.cpp:150] Setting up L1_b9_cbr1_relu\nI1212 06:18:16.837790 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.837800 12086 net.cpp:165] Memory required for data: 804353500\nI1212 06:18:16.837810 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr2_conv\nI1212 06:18:16.837829 12086 net.cpp:100] Creating Layer L1_b9_cbr2_conv\nI1212 06:18:16.837842 12086 net.cpp:434] L1_b9_cbr2_conv <- L1_b9_cbr1_bn_top\nI1212 06:18:16.837865 12086 net.cpp:408] L1_b9_cbr2_conv -> L1_b9_cbr2_conv_top\nI1212 06:18:16.838284 12086 net.cpp:150] Setting up L1_b9_cbr2_conv\nI1212 06:18:16.838304 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.838313 12086 net.cpp:165] Memory required for data: 812545500\nI1212 06:18:16.838330 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr2_bn\nI1212 06:18:16.838348 12086 net.cpp:100] Creating Layer L1_b9_cbr2_bn\nI1212 06:18:16.838361 12086 net.cpp:434] L1_b9_cbr2_bn <- L1_b9_cbr2_conv_top\nI1212 06:18:16.838382 12086 net.cpp:408] L1_b9_cbr2_bn -> L1_b9_cbr2_bn_top\nI1212 06:18:16.838721 12086 net.cpp:150] Setting up L1_b9_cbr2_bn\nI1212 06:18:16.838744 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.838755 12086 net.cpp:165] Memory required for data: 820737500\nI1212 06:18:16.838809 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:18:16.838831 12086 net.cpp:100] Creating Layer L1_b9_cbr2_scale\nI1212 06:18:16.838845 12086 net.cpp:434] L1_b9_cbr2_scale <- L1_b9_cbr2_bn_top\nI1212 06:18:16.838860 12086 net.cpp:395] L1_b9_cbr2_scale -> L1_b9_cbr2_bn_top (in-place)\nI1212 06:18:16.838959 12086 layer_factory.hpp:77] Creating layer L1_b9_cbr2_scale\nI1212 06:18:16.839170 12086 net.cpp:150] Setting up L1_b9_cbr2_scale\nI1212 06:18:16.839190 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.839200 12086 net.cpp:165] Memory required for data: 828929500\nI1212 06:18:16.839220 12086 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise\nI1212 06:18:16.839236 12086 net.cpp:100] Creating Layer L1_b9_sum_eltwise\nI1212 06:18:16.839247 12086 net.cpp:434] L1_b9_sum_eltwise <- L1_b9_cbr2_bn_top\nI1212 06:18:16.839260 12086 net.cpp:434] L1_b9_sum_eltwise <- L1_b8_sum_eltwise_top_L1_b8_relu_0_split_1\nI1212 06:18:16.839282 12086 net.cpp:408] L1_b9_sum_eltwise -> L1_b9_sum_eltwise_top\nI1212 06:18:16.839340 12086 net.cpp:150] Setting up L1_b9_sum_eltwise\nI1212 06:18:16.839359 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.839370 12086 net.cpp:165] Memory required for data: 837121500\nI1212 06:18:16.839380 12086 layer_factory.hpp:77] Creating layer L1_b9_relu\nI1212 06:18:16.839395 12086 net.cpp:100] Creating Layer L1_b9_relu\nI1212 06:18:16.839406 12086 net.cpp:434] L1_b9_relu <- L1_b9_sum_eltwise_top\nI1212 06:18:16.839426 12086 net.cpp:395] L1_b9_relu -> L1_b9_sum_eltwise_top (in-place)\nI1212 06:18:16.839444 12086 net.cpp:150] Setting up L1_b9_relu\nI1212 06:18:16.839459 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.839468 12086 net.cpp:165] Memory required for data: 845313500\nI1212 06:18:16.839478 12086 layer_factory.hpp:77] Creating layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:18:16.839493 12086 net.cpp:100] Creating Layer L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:18:16.839504 12086 net.cpp:434] L1_b9_sum_eltwise_top_L1_b9_relu_0_split <- L1_b9_sum_eltwise_top\nI1212 06:18:16.839524 12086 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:18:16.839545 12086 net.cpp:408] L1_b9_sum_eltwise_top_L1_b9_relu_0_split -> L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:18:16.839628 12086 net.cpp:150] Setting up L1_b9_sum_eltwise_top_L1_b9_relu_0_split\nI1212 06:18:16.839646 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.839659 12086 net.cpp:157] Top shape: 125 16 32 32 (2048000)\nI1212 06:18:16.839668 12086 net.cpp:165] Memory required for data: 861697500\nI1212 06:18:16.839679 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_conv\nI1212 06:18:16.839699 12086 net.cpp:100] Creating Layer L2_b1_cbr1_conv\nI1212 06:18:16.839711 12086 net.cpp:434] L2_b1_cbr1_conv <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_0\nI1212 06:18:16.839735 12086 net.cpp:408] L2_b1_cbr1_conv -> L2_b1_cbr1_conv_top\nI1212 06:18:16.840163 12086 net.cpp:150] Setting up L2_b1_cbr1_conv\nI1212 06:18:16.840183 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.840193 12086 net.cpp:165] Memory required for data: 863745500\nI1212 06:18:16.840210 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_bn\nI1212 06:18:16.840227 12086 net.cpp:100] Creating Layer L2_b1_cbr1_bn\nI1212 06:18:16.840240 12086 net.cpp:434] L2_b1_cbr1_bn <- L2_b1_cbr1_conv_top\nI1212 06:18:16.840261 12086 net.cpp:408] L2_b1_cbr1_bn -> L2_b1_cbr1_bn_top\nI1212 06:18:16.840577 12086 net.cpp:150] Setting up L2_b1_cbr1_bn\nI1212 06:18:16.840597 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.840606 12086 net.cpp:165] Memory required for data: 865793500\nI1212 06:18:16.840628 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:18:16.840652 12086 net.cpp:100] Creating Layer L2_b1_cbr1_scale\nI1212 06:18:16.840674 12086 net.cpp:434] L2_b1_cbr1_scale <- L2_b1_cbr1_bn_top\nI1212 06:18:16.840692 12086 net.cpp:395] L2_b1_cbr1_scale -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.840791 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_scale\nI1212 06:18:16.840998 12086 net.cpp:150] Setting up L2_b1_cbr1_scale\nI1212 06:18:16.841017 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.841027 12086 net.cpp:165] Memory required for data: 867841500\nI1212 06:18:16.841045 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr1_relu\nI1212 06:18:16.841064 12086 net.cpp:100] Creating Layer L2_b1_cbr1_relu\nI1212 06:18:16.841084 12086 net.cpp:434] L2_b1_cbr1_relu <- L2_b1_cbr1_bn_top\nI1212 06:18:16.841100 12086 net.cpp:395] L2_b1_cbr1_relu -> L2_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.841120 12086 net.cpp:150] Setting up L2_b1_cbr1_relu\nI1212 06:18:16.841135 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.841145 12086 net.cpp:165] Memory required for data: 869889500\nI1212 06:18:16.841156 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr2_conv\nI1212 06:18:16.841181 12086 net.cpp:100] Creating Layer L2_b1_cbr2_conv\nI1212 06:18:16.841195 12086 net.cpp:434] L2_b1_cbr2_conv <- L2_b1_cbr1_bn_top\nI1212 06:18:16.841217 12086 net.cpp:408] L2_b1_cbr2_conv -> L2_b1_cbr2_conv_top\nI1212 06:18:16.841630 12086 net.cpp:150] Setting up L2_b1_cbr2_conv\nI1212 06:18:16.841650 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.841660 12086 net.cpp:165] Memory required for data: 871937500\nI1212 06:18:16.841678 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr2_bn\nI1212 06:18:16.841696 12086 net.cpp:100] Creating Layer L2_b1_cbr2_bn\nI1212 06:18:16.841707 12086 net.cpp:434] L2_b1_cbr2_bn <- L2_b1_cbr2_conv_top\nI1212 06:18:16.841729 12086 net.cpp:408] L2_b1_cbr2_bn -> L2_b1_cbr2_bn_top\nI1212 06:18:16.842041 12086 net.cpp:150] Setting up L2_b1_cbr2_bn\nI1212 06:18:16.842061 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.842077 12086 net.cpp:165] Memory required for data: 873985500\nI1212 06:18:16.842099 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:18:16.842120 12086 net.cpp:100] Creating Layer L2_b1_cbr2_scale\nI1212 06:18:16.842133 12086 net.cpp:434] L2_b1_cbr2_scale <- L2_b1_cbr2_bn_top\nI1212 06:18:16.842150 12086 net.cpp:395] L2_b1_cbr2_scale -> L2_b1_cbr2_bn_top (in-place)\nI1212 06:18:16.842242 12086 layer_factory.hpp:77] Creating layer L2_b1_cbr2_scale\nI1212 06:18:16.842442 12086 net.cpp:150] Setting up L2_b1_cbr2_scale\nI1212 06:18:16.842460 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.842469 12086 net.cpp:165] Memory required for data: 876033500\nI1212 06:18:16.842488 12086 layer_factory.hpp:77] Creating layer L2_b1_pool\nI1212 06:18:16.842509 12086 net.cpp:100] Creating Layer L2_b1_pool\nI1212 06:18:16.842521 12086 net.cpp:434] L2_b1_pool <- L1_b9_sum_eltwise_top_L1_b9_relu_0_split_1\nI1212 06:18:16.842540 12086 net.cpp:408] L2_b1_pool -> L2_b1_pool\nI1212 06:18:16.842595 12086 net.cpp:150] Setting up L2_b1_pool\nI1212 06:18:16.842617 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.842628 12086 net.cpp:165] Memory required for data: 878081500\nI1212 06:18:16.842638 12086 layer_factory.hpp:77] Creating layer L2_b1_sum_eltwise\nI1212 06:18:16.842653 12086 net.cpp:100] Creating Layer L2_b1_sum_eltwise\nI1212 06:18:16.842664 12086 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_cbr2_bn_top\nI1212 06:18:16.842677 12086 net.cpp:434] L2_b1_sum_eltwise <- L2_b1_pool\nI1212 06:18:16.842698 12086 net.cpp:408] L2_b1_sum_eltwise -> L2_b1_sum_eltwise_top\nI1212 06:18:16.842757 12086 net.cpp:150] Setting up L2_b1_sum_eltwise\nI1212 06:18:16.842777 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.842787 12086 net.cpp:165] Memory required for data: 880129500\nI1212 06:18:16.842797 12086 layer_factory.hpp:77] Creating layer L2_b1_relu\nI1212 06:18:16.842810 12086 net.cpp:100] Creating Layer L2_b1_relu\nI1212 06:18:16.842823 12086 net.cpp:434] L2_b1_relu <- L2_b1_sum_eltwise_top\nI1212 06:18:16.842847 12086 net.cpp:395] L2_b1_relu -> L2_b1_sum_eltwise_top (in-place)\nI1212 06:18:16.842867 12086 net.cpp:150] Setting up L2_b1_relu\nI1212 06:18:16.842883 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.842892 12086 net.cpp:165] Memory required for data: 882177500\nI1212 06:18:16.842903 12086 layer_factory.hpp:77] Creating layer L2_b1_zeros\nI1212 06:18:16.842926 12086 net.cpp:100] Creating Layer L2_b1_zeros\nI1212 06:18:16.842942 12086 net.cpp:408] L2_b1_zeros -> L2_b1_zeros\nI1212 06:18:16.845333 12086 net.cpp:150] Setting up L2_b1_zeros\nI1212 06:18:16.845356 12086 net.cpp:157] Top shape: 125 16 16 16 (512000)\nI1212 06:18:16.845366 12086 net.cpp:165] Memory required for data: 884225500\nI1212 06:18:16.845376 12086 layer_factory.hpp:77] Creating layer L2_b1_concat0\nI1212 06:18:16.845393 12086 net.cpp:100] Creating Layer L2_b1_concat0\nI1212 06:18:16.845405 12086 net.cpp:434] L2_b1_concat0 <- L2_b1_sum_eltwise_top\nI1212 06:18:16.845419 12086 net.cpp:434] L2_b1_concat0 <- L2_b1_zeros\nI1212 06:18:16.845440 12086 net.cpp:408] L2_b1_concat0 -> L2_b1_concat0\nI1212 06:18:16.845504 12086 net.cpp:150] Setting up L2_b1_concat0\nI1212 06:18:16.845530 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.845541 12086 net.cpp:165] Memory required for data: 888321500\nI1212 06:18:16.845551 12086 layer_factory.hpp:77] Creating layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:18:16.845566 12086 net.cpp:100] Creating Layer L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:18:16.845577 12086 net.cpp:434] L2_b1_concat0_L2_b1_concat0_0_split <- L2_b1_concat0\nI1212 06:18:16.845592 12086 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:18:16.845613 12086 net.cpp:408] L2_b1_concat0_L2_b1_concat0_0_split -> L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:18:16.845710 12086 net.cpp:150] Setting up L2_b1_concat0_L2_b1_concat0_0_split\nI1212 06:18:16.845729 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.845742 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.845752 12086 net.cpp:165] Memory required for data: 896513500\nI1212 06:18:16.845762 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_conv\nI1212 06:18:16.845793 12086 net.cpp:100] Creating Layer L2_b2_cbr1_conv\nI1212 06:18:16.845806 12086 net.cpp:434] L2_b2_cbr1_conv <- L2_b1_concat0_L2_b1_concat0_0_split_0\nI1212 06:18:16.845826 12086 net.cpp:408] L2_b2_cbr1_conv -> L2_b2_cbr1_conv_top\nI1212 06:18:16.846400 12086 net.cpp:150] Setting up L2_b2_cbr1_conv\nI1212 06:18:16.846421 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.846431 12086 net.cpp:165] Memory required for data: 900609500\nI1212 06:18:16.846448 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_bn\nI1212 06:18:16.846467 12086 net.cpp:100] Creating Layer L2_b2_cbr1_bn\nI1212 06:18:16.846479 12086 net.cpp:434] L2_b2_cbr1_bn <- L2_b2_cbr1_conv_top\nI1212 06:18:16.846501 12086 net.cpp:408] L2_b2_cbr1_bn -> L2_b2_cbr1_bn_top\nI1212 06:18:16.846814 12086 net.cpp:150] Setting up L2_b2_cbr1_bn\nI1212 06:18:16.846833 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.846843 12086 net.cpp:165] Memory required for data: 904705500\nI1212 06:18:16.846864 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:18:16.846881 12086 net.cpp:100] Creating Layer L2_b2_cbr1_scale\nI1212 06:18:16.846892 12086 net.cpp:434] L2_b2_cbr1_scale <- L2_b2_cbr1_bn_top\nI1212 06:18:16.846909 12086 net.cpp:395] L2_b2_cbr1_scale -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.847008 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_scale\nI1212 06:18:16.847210 12086 net.cpp:150] Setting up L2_b2_cbr1_scale\nI1212 06:18:16.847234 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.847244 12086 net.cpp:165] Memory required for data: 908801500\nI1212 06:18:16.847262 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr1_relu\nI1212 06:18:16.847277 12086 net.cpp:100] Creating Layer L2_b2_cbr1_relu\nI1212 06:18:16.847290 12086 net.cpp:434] L2_b2_cbr1_relu <- L2_b2_cbr1_bn_top\nI1212 06:18:16.847314 12086 net.cpp:395] L2_b2_cbr1_relu -> L2_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.847334 12086 net.cpp:150] Setting up L2_b2_cbr1_relu\nI1212 06:18:16.847349 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.847359 12086 net.cpp:165] Memory required for data: 912897500\nI1212 06:18:16.847370 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr2_conv\nI1212 06:18:16.847396 12086 net.cpp:100] Creating Layer L2_b2_cbr2_conv\nI1212 06:18:16.847409 12086 net.cpp:434] L2_b2_cbr2_conv <- L2_b2_cbr1_bn_top\nI1212 06:18:16.847431 12086 net.cpp:408] L2_b2_cbr2_conv -> L2_b2_cbr2_conv_top\nI1212 06:18:16.847973 12086 net.cpp:150] Setting up L2_b2_cbr2_conv\nI1212 06:18:16.847995 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.848003 12086 net.cpp:165] Memory required for data: 916993500\nI1212 06:18:16.848021 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr2_bn\nI1212 06:18:16.848042 12086 net.cpp:100] Creating Layer L2_b2_cbr2_bn\nI1212 06:18:16.848055 12086 net.cpp:434] L2_b2_cbr2_bn <- L2_b2_cbr2_conv_top\nI1212 06:18:16.848084 12086 net.cpp:408] L2_b2_cbr2_bn -> L2_b2_cbr2_bn_top\nI1212 06:18:16.848392 12086 net.cpp:150] Setting up L2_b2_cbr2_bn\nI1212 06:18:16.848412 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.848422 12086 net.cpp:165] Memory required for data: 921089500\nI1212 06:18:16.848443 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:18:16.848459 12086 net.cpp:100] Creating Layer L2_b2_cbr2_scale\nI1212 06:18:16.848470 12086 net.cpp:434] L2_b2_cbr2_scale <- L2_b2_cbr2_bn_top\nI1212 06:18:16.848485 12086 net.cpp:395] L2_b2_cbr2_scale -> L2_b2_cbr2_bn_top (in-place)\nI1212 06:18:16.848584 12086 layer_factory.hpp:77] Creating layer L2_b2_cbr2_scale\nI1212 06:18:16.848783 12086 net.cpp:150] Setting up L2_b2_cbr2_scale\nI1212 06:18:16.848803 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.848812 12086 net.cpp:165] Memory required for data: 925185500\nI1212 06:18:16.848830 12086 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise\nI1212 06:18:16.848851 12086 net.cpp:100] Creating Layer L2_b2_sum_eltwise\nI1212 06:18:16.848865 12086 net.cpp:434] L2_b2_sum_eltwise <- L2_b2_cbr2_bn_top\nI1212 06:18:16.848879 12086 net.cpp:434] L2_b2_sum_eltwise <- L2_b1_concat0_L2_b1_concat0_0_split_1\nI1212 06:18:16.848894 12086 net.cpp:408] L2_b2_sum_eltwise -> L2_b2_sum_eltwise_top\nI1212 06:18:16.848944 12086 net.cpp:150] Setting up L2_b2_sum_eltwise\nI1212 06:18:16.848965 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.848975 12086 net.cpp:165] Memory required for data: 929281500\nI1212 06:18:16.848985 12086 layer_factory.hpp:77] Creating layer L2_b2_relu\nI1212 06:18:16.849007 12086 net.cpp:100] Creating Layer L2_b2_relu\nI1212 06:18:16.849020 12086 net.cpp:434] L2_b2_relu <- L2_b2_sum_eltwise_top\nI1212 06:18:16.849035 12086 net.cpp:395] L2_b2_relu -> L2_b2_sum_eltwise_top (in-place)\nI1212 06:18:16.849054 12086 net.cpp:150] Setting up L2_b2_relu\nI1212 06:18:16.849076 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.849089 12086 net.cpp:165] Memory required for data: 933377500\nI1212 06:18:16.849099 12086 layer_factory.hpp:77] Creating layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:18:16.849113 12086 net.cpp:100] Creating Layer L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:18:16.849125 12086 net.cpp:434] L2_b2_sum_eltwise_top_L2_b2_relu_0_split <- L2_b2_sum_eltwise_top\nI1212 06:18:16.849140 12086 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:18:16.849160 12086 net.cpp:408] L2_b2_sum_eltwise_top_L2_b2_relu_0_split -> L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:18:16.849251 12086 net.cpp:150] Setting up L2_b2_sum_eltwise_top_L2_b2_relu_0_split\nI1212 06:18:16.849269 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.849282 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.849292 12086 net.cpp:165] Memory required for data: 941569500\nI1212 06:18:16.849301 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_conv\nI1212 06:18:16.849335 12086 net.cpp:100] Creating Layer L2_b3_cbr1_conv\nI1212 06:18:16.849350 12086 net.cpp:434] L2_b3_cbr1_conv <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_0\nI1212 06:18:16.849370 12086 net.cpp:408] L2_b3_cbr1_conv -> L2_b3_cbr1_conv_top\nI1212 06:18:16.849917 12086 net.cpp:150] Setting up L2_b3_cbr1_conv\nI1212 06:18:16.849937 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.849947 12086 net.cpp:165] Memory required for data: 945665500\nI1212 06:18:16.849966 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_bn\nI1212 06:18:16.849987 12086 net.cpp:100] Creating Layer L2_b3_cbr1_bn\nI1212 06:18:16.849999 12086 net.cpp:434] L2_b3_cbr1_bn <- L2_b3_cbr1_conv_top\nI1212 06:18:16.850021 12086 net.cpp:408] L2_b3_cbr1_bn -> L2_b3_cbr1_bn_top\nI1212 06:18:16.850337 12086 net.cpp:150] Setting up L2_b3_cbr1_bn\nI1212 06:18:16.850355 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.850364 12086 net.cpp:165] Memory required for data: 949761500\nI1212 06:18:16.850388 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:18:16.850404 12086 net.cpp:100] Creating Layer L2_b3_cbr1_scale\nI1212 06:18:16.850414 12086 net.cpp:434] L2_b3_cbr1_scale <- L2_b3_cbr1_bn_top\nI1212 06:18:16.850431 12086 net.cpp:395] L2_b3_cbr1_scale -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.850529 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_scale\nI1212 06:18:16.850729 12086 net.cpp:150] Setting up L2_b3_cbr1_scale\nI1212 06:18:16.850749 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.850757 12086 net.cpp:165] Memory required for data: 953857500\nI1212 06:18:16.850774 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr1_relu\nI1212 06:18:16.850797 12086 net.cpp:100] Creating Layer L2_b3_cbr1_relu\nI1212 06:18:16.850811 12086 net.cpp:434] L2_b3_cbr1_relu <- L2_b3_cbr1_bn_top\nI1212 06:18:16.850826 12086 net.cpp:395] L2_b3_cbr1_relu -> L2_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.850843 12086 net.cpp:150] Setting up L2_b3_cbr1_relu\nI1212 06:18:16.850859 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.850868 12086 net.cpp:165] Memory required for data: 957953500\nI1212 06:18:16.850878 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr2_conv\nI1212 06:18:16.850903 12086 net.cpp:100] Creating Layer L2_b3_cbr2_conv\nI1212 06:18:16.850917 12086 net.cpp:434] L2_b3_cbr2_conv <- L2_b3_cbr1_bn_top\nI1212 06:18:16.850939 12086 net.cpp:408] L2_b3_cbr2_conv -> L2_b3_cbr2_conv_top\nI1212 06:18:16.851500 12086 net.cpp:150] Setting up L2_b3_cbr2_conv\nI1212 06:18:16.851521 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.851531 12086 net.cpp:165] Memory required for data: 962049500\nI1212 06:18:16.851548 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr2_bn\nI1212 06:18:16.851569 12086 net.cpp:100] Creating Layer L2_b3_cbr2_bn\nI1212 06:18:16.851583 12086 net.cpp:434] L2_b3_cbr2_bn <- L2_b3_cbr2_conv_top\nI1212 06:18:16.851603 12086 net.cpp:408] L2_b3_cbr2_bn -> L2_b3_cbr2_bn_top\nI1212 06:18:16.851917 12086 net.cpp:150] Setting up L2_b3_cbr2_bn\nI1212 06:18:16.851940 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.851950 12086 net.cpp:165] Memory required for data: 966145500\nI1212 06:18:16.851974 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:18:16.851990 12086 net.cpp:100] Creating Layer L2_b3_cbr2_scale\nI1212 06:18:16.852002 12086 net.cpp:434] L2_b3_cbr2_scale <- L2_b3_cbr2_bn_top\nI1212 06:18:16.852017 12086 net.cpp:395] L2_b3_cbr2_scale -> L2_b3_cbr2_bn_top (in-place)\nI1212 06:18:16.852118 12086 layer_factory.hpp:77] Creating layer L2_b3_cbr2_scale\nI1212 06:18:16.852318 12086 net.cpp:150] Setting up L2_b3_cbr2_scale\nI1212 06:18:16.852336 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.852346 12086 net.cpp:165] Memory required for data: 970241500\nI1212 06:18:16.852365 12086 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise\nI1212 06:18:16.852381 12086 net.cpp:100] Creating Layer L2_b3_sum_eltwise\nI1212 06:18:16.852399 12086 net.cpp:434] L2_b3_sum_eltwise <- L2_b3_cbr2_bn_top\nI1212 06:18:16.852422 12086 net.cpp:434] L2_b3_sum_eltwise <- L2_b2_sum_eltwise_top_L2_b2_relu_0_split_1\nI1212 06:18:16.852440 12086 net.cpp:408] L2_b3_sum_eltwise -> L2_b3_sum_eltwise_top\nI1212 06:18:16.852489 12086 net.cpp:150] Setting up L2_b3_sum_eltwise\nI1212 06:18:16.852509 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.852517 12086 net.cpp:165] Memory required for data: 974337500\nI1212 06:18:16.852529 12086 layer_factory.hpp:77] Creating layer L2_b3_relu\nI1212 06:18:16.852560 12086 net.cpp:100] Creating Layer L2_b3_relu\nI1212 06:18:16.852574 12086 net.cpp:434] L2_b3_relu <- L2_b3_sum_eltwise_top\nI1212 06:18:16.852591 12086 net.cpp:395] L2_b3_relu -> L2_b3_sum_eltwise_top (in-place)\nI1212 06:18:16.852609 12086 net.cpp:150] Setting up L2_b3_relu\nI1212 06:18:16.852624 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.852634 12086 net.cpp:165] Memory required for data: 978433500\nI1212 06:18:16.852645 12086 layer_factory.hpp:77] Creating layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:18:16.852658 12086 net.cpp:100] Creating Layer L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:18:16.852670 12086 net.cpp:434] L2_b3_sum_eltwise_top_L2_b3_relu_0_split <- L2_b3_sum_eltwise_top\nI1212 06:18:16.852685 12086 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:18:16.852705 12086 net.cpp:408] L2_b3_sum_eltwise_top_L2_b3_relu_0_split -> L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:18:16.852798 12086 net.cpp:150] Setting up L2_b3_sum_eltwise_top_L2_b3_relu_0_split\nI1212 06:18:16.852818 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.852830 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.852839 12086 net.cpp:165] Memory required for data: 986625500\nI1212 06:18:16.852850 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_conv\nI1212 06:18:16.852870 12086 net.cpp:100] Creating Layer L2_b4_cbr1_conv\nI1212 06:18:16.852882 12086 net.cpp:434] L2_b4_cbr1_conv <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_0\nI1212 06:18:16.852906 12086 net.cpp:408] L2_b4_cbr1_conv -> L2_b4_cbr1_conv_top\nI1212 06:18:16.853467 12086 net.cpp:150] Setting up L2_b4_cbr1_conv\nI1212 06:18:16.853487 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.853497 12086 net.cpp:165] Memory required for data: 990721500\nI1212 06:18:16.853514 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_bn\nI1212 06:18:16.853535 12086 net.cpp:100] Creating Layer L2_b4_cbr1_bn\nI1212 06:18:16.853549 12086 net.cpp:434] L2_b4_cbr1_bn <- L2_b4_cbr1_conv_top\nI1212 06:18:16.853565 12086 net.cpp:408] L2_b4_cbr1_bn -> L2_b4_cbr1_bn_top\nI1212 06:18:16.853889 12086 net.cpp:150] Setting up L2_b4_cbr1_bn\nI1212 06:18:16.853909 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.853919 12086 net.cpp:165] Memory required for data: 994817500\nI1212 06:18:16.853940 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:18:16.853961 12086 net.cpp:100] Creating Layer L2_b4_cbr1_scale\nI1212 06:18:16.853973 12086 net.cpp:434] L2_b4_cbr1_scale <- L2_b4_cbr1_bn_top\nI1212 06:18:16.853989 12086 net.cpp:395] L2_b4_cbr1_scale -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.854089 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_scale\nI1212 06:18:16.854295 12086 net.cpp:150] Setting up L2_b4_cbr1_scale\nI1212 06:18:16.854312 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.854322 12086 net.cpp:165] Memory required for data: 998913500\nI1212 06:18:16.854341 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr1_relu\nI1212 06:18:16.854363 12086 net.cpp:100] Creating Layer L2_b4_cbr1_relu\nI1212 06:18:16.854377 12086 net.cpp:434] L2_b4_cbr1_relu <- L2_b4_cbr1_bn_top\nI1212 06:18:16.854395 12086 net.cpp:395] L2_b4_cbr1_relu -> L2_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.854415 12086 net.cpp:150] Setting up L2_b4_cbr1_relu\nI1212 06:18:16.854430 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.854439 12086 net.cpp:165] Memory required for data: 1003009500\nI1212 06:18:16.854449 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr2_conv\nI1212 06:18:16.854481 12086 net.cpp:100] Creating Layer L2_b4_cbr2_conv\nI1212 06:18:16.854496 12086 net.cpp:434] L2_b4_cbr2_conv <- L2_b4_cbr1_bn_top\nI1212 06:18:16.854518 12086 net.cpp:408] L2_b4_cbr2_conv -> L2_b4_cbr2_conv_top\nI1212 06:18:16.855058 12086 net.cpp:150] Setting up L2_b4_cbr2_conv\nI1212 06:18:16.855083 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.855094 12086 net.cpp:165] Memory required for data: 1007105500\nI1212 06:18:16.855113 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr2_bn\nI1212 06:18:16.855130 12086 net.cpp:100] Creating Layer L2_b4_cbr2_bn\nI1212 06:18:16.855141 12086 net.cpp:434] L2_b4_cbr2_bn <- L2_b4_cbr2_conv_top\nI1212 06:18:16.855163 12086 net.cpp:408] L2_b4_cbr2_bn -> L2_b4_cbr2_bn_top\nI1212 06:18:16.855478 12086 net.cpp:150] Setting up L2_b4_cbr2_bn\nI1212 06:18:16.855496 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.855505 12086 net.cpp:165] Memory required for data: 1011201500\nI1212 06:18:16.855527 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:18:16.855558 12086 net.cpp:100] Creating Layer L2_b4_cbr2_scale\nI1212 06:18:16.855572 12086 net.cpp:434] L2_b4_cbr2_scale <- L2_b4_cbr2_bn_top\nI1212 06:18:16.855588 12086 net.cpp:395] L2_b4_cbr2_scale -> L2_b4_cbr2_bn_top (in-place)\nI1212 06:18:16.855690 12086 layer_factory.hpp:77] Creating layer L2_b4_cbr2_scale\nI1212 06:18:16.855893 12086 net.cpp:150] Setting up L2_b4_cbr2_scale\nI1212 06:18:16.855912 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.855922 12086 net.cpp:165] Memory required for data: 1015297500\nI1212 06:18:16.855939 12086 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise\nI1212 06:18:16.855962 12086 net.cpp:100] Creating Layer L2_b4_sum_eltwise\nI1212 06:18:16.855973 12086 net.cpp:434] L2_b4_sum_eltwise <- L2_b4_cbr2_bn_top\nI1212 06:18:16.855988 12086 net.cpp:434] L2_b4_sum_eltwise <- L2_b3_sum_eltwise_top_L2_b3_relu_0_split_1\nI1212 06:18:16.856003 12086 net.cpp:408] L2_b4_sum_eltwise -> L2_b4_sum_eltwise_top\nI1212 06:18:16.856060 12086 net.cpp:150] Setting up L2_b4_sum_eltwise\nI1212 06:18:16.856084 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.856096 12086 net.cpp:165] Memory required for data: 1019393500\nI1212 06:18:16.856106 12086 layer_factory.hpp:77] Creating layer L2_b4_relu\nI1212 06:18:16.856122 12086 net.cpp:100] Creating Layer L2_b4_relu\nI1212 06:18:16.856133 12086 net.cpp:434] L2_b4_relu <- L2_b4_sum_eltwise_top\nI1212 06:18:16.856153 12086 net.cpp:395] L2_b4_relu -> L2_b4_sum_eltwise_top (in-place)\nI1212 06:18:16.856173 12086 net.cpp:150] Setting up L2_b4_relu\nI1212 06:18:16.856186 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.856195 12086 net.cpp:165] Memory required for data: 1023489500\nI1212 06:18:16.856205 12086 layer_factory.hpp:77] Creating layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:18:16.856220 12086 net.cpp:100] Creating Layer L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:18:16.856231 12086 net.cpp:434] L2_b4_sum_eltwise_top_L2_b4_relu_0_split <- L2_b4_sum_eltwise_top\nI1212 06:18:16.856245 12086 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:18:16.856266 12086 net.cpp:408] L2_b4_sum_eltwise_top_L2_b4_relu_0_split -> L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:18:16.856351 12086 net.cpp:150] Setting up L2_b4_sum_eltwise_top_L2_b4_relu_0_split\nI1212 06:18:16.856369 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.856382 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.856391 12086 net.cpp:165] Memory required for data: 1031681500\nI1212 06:18:16.856401 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_conv\nI1212 06:18:16.856421 12086 net.cpp:100] Creating Layer L2_b5_cbr1_conv\nI1212 06:18:16.856433 12086 net.cpp:434] L2_b5_cbr1_conv <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_0\nI1212 06:18:16.856456 12086 net.cpp:408] L2_b5_cbr1_conv -> L2_b5_cbr1_conv_top\nI1212 06:18:16.857030 12086 net.cpp:150] Setting up L2_b5_cbr1_conv\nI1212 06:18:16.857058 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.857067 12086 net.cpp:165] Memory required for data: 1035777500\nI1212 06:18:16.857094 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_bn\nI1212 06:18:16.857110 12086 net.cpp:100] Creating Layer L2_b5_cbr1_bn\nI1212 06:18:16.857122 12086 net.cpp:434] L2_b5_cbr1_bn <- L2_b5_cbr1_conv_top\nI1212 06:18:16.857143 12086 net.cpp:408] L2_b5_cbr1_bn -> L2_b5_cbr1_bn_top\nI1212 06:18:16.857458 12086 net.cpp:150] Setting up L2_b5_cbr1_bn\nI1212 06:18:16.857477 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.857487 12086 net.cpp:165] Memory required for data: 1039873500\nI1212 06:18:16.857508 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:18:16.857529 12086 net.cpp:100] Creating Layer L2_b5_cbr1_scale\nI1212 06:18:16.857542 12086 net.cpp:434] L2_b5_cbr1_scale <- L2_b5_cbr1_bn_top\nI1212 06:18:16.857558 12086 net.cpp:395] L2_b5_cbr1_scale -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.857651 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_scale\nI1212 06:18:16.857857 12086 net.cpp:150] Setting up L2_b5_cbr1_scale\nI1212 06:18:16.857877 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.857887 12086 net.cpp:165] Memory required for data: 1043969500\nI1212 06:18:16.857905 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr1_relu\nI1212 06:18:16.857933 12086 net.cpp:100] Creating Layer L2_b5_cbr1_relu\nI1212 06:18:16.857945 12086 net.cpp:434] L2_b5_cbr1_relu <- L2_b5_cbr1_bn_top\nI1212 06:18:16.857961 12086 net.cpp:395] L2_b5_cbr1_relu -> L2_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.857980 12086 net.cpp:150] Setting up L2_b5_cbr1_relu\nI1212 06:18:16.857995 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.858005 12086 net.cpp:165] Memory required for data: 1048065500\nI1212 06:18:16.858016 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr2_conv\nI1212 06:18:16.858041 12086 net.cpp:100] Creating Layer L2_b5_cbr2_conv\nI1212 06:18:16.858054 12086 net.cpp:434] L2_b5_cbr2_conv <- L2_b5_cbr1_bn_top\nI1212 06:18:16.858085 12086 net.cpp:408] L2_b5_cbr2_conv -> L2_b5_cbr2_conv_top\nI1212 06:18:16.858630 12086 net.cpp:150] Setting up L2_b5_cbr2_conv\nI1212 06:18:16.858650 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.858660 12086 net.cpp:165] Memory required for data: 1052161500\nI1212 06:18:16.858678 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr2_bn\nI1212 06:18:16.858695 12086 net.cpp:100] Creating Layer L2_b5_cbr2_bn\nI1212 06:18:16.858708 12086 net.cpp:434] L2_b5_cbr2_bn <- L2_b5_cbr2_conv_top\nI1212 06:18:16.858729 12086 net.cpp:408] L2_b5_cbr2_bn -> L2_b5_cbr2_bn_top\nI1212 06:18:16.859046 12086 net.cpp:150] Setting up L2_b5_cbr2_bn\nI1212 06:18:16.859066 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.859082 12086 net.cpp:165] Memory required for data: 1056257500\nI1212 06:18:16.859104 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:18:16.859127 12086 net.cpp:100] Creating Layer L2_b5_cbr2_scale\nI1212 06:18:16.859139 12086 net.cpp:434] L2_b5_cbr2_scale <- L2_b5_cbr2_bn_top\nI1212 06:18:16.859156 12086 net.cpp:395] L2_b5_cbr2_scale -> L2_b5_cbr2_bn_top (in-place)\nI1212 06:18:16.859246 12086 layer_factory.hpp:77] Creating layer L2_b5_cbr2_scale\nI1212 06:18:16.859446 12086 net.cpp:150] Setting up L2_b5_cbr2_scale\nI1212 06:18:16.859464 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.859474 12086 net.cpp:165] Memory required for data: 1060353500\nI1212 06:18:16.859493 12086 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise\nI1212 06:18:16.859513 12086 net.cpp:100] Creating Layer L2_b5_sum_eltwise\nI1212 06:18:16.859526 12086 net.cpp:434] L2_b5_sum_eltwise <- L2_b5_cbr2_bn_top\nI1212 06:18:16.859539 12086 net.cpp:434] L2_b5_sum_eltwise <- L2_b4_sum_eltwise_top_L2_b4_relu_0_split_1\nI1212 06:18:16.859556 12086 net.cpp:408] L2_b5_sum_eltwise -> L2_b5_sum_eltwise_top\nI1212 06:18:16.859611 12086 net.cpp:150] Setting up L2_b5_sum_eltwise\nI1212 06:18:16.859629 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.859649 12086 net.cpp:165] Memory required for data: 1064449500\nI1212 06:18:16.859660 12086 layer_factory.hpp:77] Creating layer L2_b5_relu\nI1212 06:18:16.859675 12086 net.cpp:100] Creating Layer L2_b5_relu\nI1212 06:18:16.859688 12086 net.cpp:434] L2_b5_relu <- L2_b5_sum_eltwise_top\nI1212 06:18:16.859701 12086 net.cpp:395] L2_b5_relu -> L2_b5_sum_eltwise_top (in-place)\nI1212 06:18:16.859726 12086 net.cpp:150] Setting up L2_b5_relu\nI1212 06:18:16.859741 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.859750 12086 net.cpp:165] Memory required for data: 1068545500\nI1212 06:18:16.859761 12086 layer_factory.hpp:77] Creating layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:18:16.859776 12086 net.cpp:100] Creating Layer L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:18:16.859786 12086 net.cpp:434] L2_b5_sum_eltwise_top_L2_b5_relu_0_split <- L2_b5_sum_eltwise_top\nI1212 06:18:16.859802 12086 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:18:16.859822 12086 net.cpp:408] L2_b5_sum_eltwise_top_L2_b5_relu_0_split -> L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:18:16.859917 12086 net.cpp:150] Setting up L2_b5_sum_eltwise_top_L2_b5_relu_0_split\nI1212 06:18:16.859937 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.859951 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.859959 12086 net.cpp:165] Memory required for data: 1076737500\nI1212 06:18:16.859969 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_conv\nI1212 06:18:16.859989 12086 net.cpp:100] Creating Layer L2_b6_cbr1_conv\nI1212 06:18:16.860002 12086 net.cpp:434] L2_b6_cbr1_conv <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_0\nI1212 06:18:16.860025 12086 net.cpp:408] L2_b6_cbr1_conv -> L2_b6_cbr1_conv_top\nI1212 06:18:16.860582 12086 net.cpp:150] Setting up L2_b6_cbr1_conv\nI1212 06:18:16.860602 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.860612 12086 net.cpp:165] Memory required for data: 1080833500\nI1212 06:18:16.860630 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_bn\nI1212 06:18:16.860646 12086 net.cpp:100] Creating Layer L2_b6_cbr1_bn\nI1212 06:18:16.860658 12086 net.cpp:434] L2_b6_cbr1_bn <- L2_b6_cbr1_conv_top\nI1212 06:18:16.860682 12086 net.cpp:408] L2_b6_cbr1_bn -> L2_b6_cbr1_bn_top\nI1212 06:18:16.860998 12086 net.cpp:150] Setting up L2_b6_cbr1_bn\nI1212 06:18:16.861017 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.861027 12086 net.cpp:165] Memory required for data: 1084929500\nI1212 06:18:16.861048 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:18:16.861076 12086 net.cpp:100] Creating Layer L2_b6_cbr1_scale\nI1212 06:18:16.861091 12086 net.cpp:434] L2_b6_cbr1_scale <- L2_b6_cbr1_bn_top\nI1212 06:18:16.861107 12086 net.cpp:395] L2_b6_cbr1_scale -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.861198 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_scale\nI1212 06:18:16.861400 12086 net.cpp:150] Setting up L2_b6_cbr1_scale\nI1212 06:18:16.861419 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.861428 12086 net.cpp:165] Memory required for data: 1089025500\nI1212 06:18:16.861448 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr1_relu\nI1212 06:18:16.861467 12086 net.cpp:100] Creating Layer L2_b6_cbr1_relu\nI1212 06:18:16.861479 12086 net.cpp:434] L2_b6_cbr1_relu <- L2_b6_cbr1_bn_top\nI1212 06:18:16.861495 12086 net.cpp:395] L2_b6_cbr1_relu -> L2_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.861515 12086 net.cpp:150] Setting up L2_b6_cbr1_relu\nI1212 06:18:16.861531 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.861541 12086 net.cpp:165] Memory required for data: 1093121500\nI1212 06:18:16.861551 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr2_conv\nI1212 06:18:16.861577 12086 net.cpp:100] Creating Layer L2_b6_cbr2_conv\nI1212 06:18:16.861590 12086 net.cpp:434] L2_b6_cbr2_conv <- L2_b6_cbr1_bn_top\nI1212 06:18:16.861613 12086 net.cpp:408] L2_b6_cbr2_conv -> L2_b6_cbr2_conv_top\nI1212 06:18:16.862179 12086 net.cpp:150] Setting up L2_b6_cbr2_conv\nI1212 06:18:16.862206 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.862216 12086 net.cpp:165] Memory required for data: 1097217500\nI1212 06:18:16.862234 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr2_bn\nI1212 06:18:16.862252 12086 net.cpp:100] Creating Layer L2_b6_cbr2_bn\nI1212 06:18:16.862264 12086 net.cpp:434] L2_b6_cbr2_bn <- L2_b6_cbr2_conv_top\nI1212 06:18:16.862287 12086 net.cpp:408] L2_b6_cbr2_bn -> L2_b6_cbr2_bn_top\nI1212 06:18:16.862598 12086 net.cpp:150] Setting up L2_b6_cbr2_bn\nI1212 06:18:16.862617 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.862627 12086 net.cpp:165] Memory required for data: 1101313500\nI1212 06:18:16.862648 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:18:16.862670 12086 net.cpp:100] Creating Layer L2_b6_cbr2_scale\nI1212 06:18:16.862684 12086 net.cpp:434] L2_b6_cbr2_scale <- L2_b6_cbr2_bn_top\nI1212 06:18:16.862699 12086 net.cpp:395] L2_b6_cbr2_scale -> L2_b6_cbr2_bn_top (in-place)\nI1212 06:18:16.862788 12086 layer_factory.hpp:77] Creating layer L2_b6_cbr2_scale\nI1212 06:18:16.862998 12086 net.cpp:150] Setting up L2_b6_cbr2_scale\nI1212 06:18:16.863018 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.863028 12086 net.cpp:165] Memory required for data: 1105409500\nI1212 06:18:16.863045 12086 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise\nI1212 06:18:16.863061 12086 net.cpp:100] Creating Layer L2_b6_sum_eltwise\nI1212 06:18:16.863080 12086 net.cpp:434] L2_b6_sum_eltwise <- L2_b6_cbr2_bn_top\nI1212 06:18:16.863103 12086 net.cpp:434] L2_b6_sum_eltwise <- L2_b5_sum_eltwise_top_L2_b5_relu_0_split_1\nI1212 06:18:16.863121 12086 net.cpp:408] L2_b6_sum_eltwise -> L2_b6_sum_eltwise_top\nI1212 06:18:16.863171 12086 net.cpp:150] Setting up L2_b6_sum_eltwise\nI1212 06:18:16.863194 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.863205 12086 net.cpp:165] Memory required for data: 1109505500\nI1212 06:18:16.863214 12086 layer_factory.hpp:77] Creating layer L2_b6_relu\nI1212 06:18:16.863229 12086 net.cpp:100] Creating Layer L2_b6_relu\nI1212 06:18:16.863240 12086 net.cpp:434] L2_b6_relu <- L2_b6_sum_eltwise_top\nI1212 06:18:16.863255 12086 net.cpp:395] L2_b6_relu -> L2_b6_sum_eltwise_top (in-place)\nI1212 06:18:16.863273 12086 net.cpp:150] Setting up L2_b6_relu\nI1212 06:18:16.863289 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.863297 12086 net.cpp:165] Memory required for data: 1113601500\nI1212 06:18:16.863307 12086 layer_factory.hpp:77] Creating layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:18:16.863325 12086 net.cpp:100] Creating Layer L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:18:16.863338 12086 net.cpp:434] L2_b6_sum_eltwise_top_L2_b6_relu_0_split <- L2_b6_sum_eltwise_top\nI1212 06:18:16.863354 12086 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:18:16.863375 12086 net.cpp:408] L2_b6_sum_eltwise_top_L2_b6_relu_0_split -> L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:18:16.863467 12086 net.cpp:150] Setting up L2_b6_sum_eltwise_top_L2_b6_relu_0_split\nI1212 06:18:16.863488 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.863502 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.863512 12086 net.cpp:165] Memory required for data: 1121793500\nI1212 06:18:16.863523 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_conv\nI1212 06:18:16.863543 12086 net.cpp:100] Creating Layer L2_b7_cbr1_conv\nI1212 06:18:16.863556 12086 net.cpp:434] L2_b7_cbr1_conv <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_0\nI1212 06:18:16.863584 12086 net.cpp:408] L2_b7_cbr1_conv -> L2_b7_cbr1_conv_top\nI1212 06:18:16.865191 12086 net.cpp:150] Setting up L2_b7_cbr1_conv\nI1212 06:18:16.865212 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.865222 12086 net.cpp:165] Memory required for data: 1125889500\nI1212 06:18:16.865242 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_bn\nI1212 06:18:16.865258 12086 net.cpp:100] Creating Layer L2_b7_cbr1_bn\nI1212 06:18:16.865285 12086 net.cpp:434] L2_b7_cbr1_bn <- L2_b7_cbr1_conv_top\nI1212 06:18:16.865474 12086 net.cpp:408] L2_b7_cbr1_bn -> L2_b7_cbr1_bn_top\nI1212 06:18:16.865788 12086 net.cpp:150] Setting up L2_b7_cbr1_bn\nI1212 06:18:16.865813 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.865821 12086 net.cpp:165] Memory required for data: 1129985500\nI1212 06:18:16.865844 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:18:16.865860 12086 net.cpp:100] Creating Layer L2_b7_cbr1_scale\nI1212 06:18:16.865872 12086 net.cpp:434] L2_b7_cbr1_scale <- L2_b7_cbr1_bn_top\nI1212 06:18:16.865887 12086 net.cpp:395] L2_b7_cbr1_scale -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.865996 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_scale\nI1212 06:18:16.866204 12086 net.cpp:150] Setting up L2_b7_cbr1_scale\nI1212 06:18:16.866226 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.866236 12086 net.cpp:165] Memory required for data: 1134081500\nI1212 06:18:16.866255 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr1_relu\nI1212 06:18:16.866271 12086 net.cpp:100] Creating Layer L2_b7_cbr1_relu\nI1212 06:18:16.866281 12086 net.cpp:434] L2_b7_cbr1_relu <- L2_b7_cbr1_bn_top\nI1212 06:18:16.866297 12086 net.cpp:395] L2_b7_cbr1_relu -> L2_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.866315 12086 net.cpp:150] Setting up L2_b7_cbr1_relu\nI1212 06:18:16.866329 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.866339 12086 net.cpp:165] Memory required for data: 1138177500\nI1212 06:18:16.866349 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr2_conv\nI1212 06:18:16.866374 12086 net.cpp:100] Creating Layer L2_b7_cbr2_conv\nI1212 06:18:16.866387 12086 net.cpp:434] L2_b7_cbr2_conv <- L2_b7_cbr1_bn_top\nI1212 06:18:16.866410 12086 net.cpp:408] L2_b7_cbr2_conv -> L2_b7_cbr2_conv_top\nI1212 06:18:16.866952 12086 net.cpp:150] Setting up L2_b7_cbr2_conv\nI1212 06:18:16.866972 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.866982 12086 net.cpp:165] Memory required for data: 1142273500\nI1212 06:18:16.867000 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr2_bn\nI1212 06:18:16.867022 12086 net.cpp:100] Creating Layer L2_b7_cbr2_bn\nI1212 06:18:16.867035 12086 net.cpp:434] L2_b7_cbr2_bn <- L2_b7_cbr2_conv_top\nI1212 06:18:16.867056 12086 net.cpp:408] L2_b7_cbr2_bn -> L2_b7_cbr2_bn_top\nI1212 06:18:16.867373 12086 net.cpp:150] Setting up L2_b7_cbr2_bn\nI1212 06:18:16.867393 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.867403 12086 net.cpp:165] Memory required for data: 1146369500\nI1212 06:18:16.867424 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:18:16.867441 12086 net.cpp:100] Creating Layer L2_b7_cbr2_scale\nI1212 06:18:16.867453 12086 net.cpp:434] L2_b7_cbr2_scale <- L2_b7_cbr2_bn_top\nI1212 06:18:16.867470 12086 net.cpp:395] L2_b7_cbr2_scale -> L2_b7_cbr2_bn_top (in-place)\nI1212 06:18:16.867566 12086 layer_factory.hpp:77] Creating layer L2_b7_cbr2_scale\nI1212 06:18:16.867763 12086 net.cpp:150] Setting up L2_b7_cbr2_scale\nI1212 06:18:16.867781 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.867790 12086 net.cpp:165] Memory required for data: 1150465500\nI1212 06:18:16.867808 12086 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise\nI1212 06:18:16.867830 12086 net.cpp:100] Creating Layer L2_b7_sum_eltwise\nI1212 06:18:16.867843 12086 net.cpp:434] L2_b7_sum_eltwise <- L2_b7_cbr2_bn_top\nI1212 06:18:16.867857 12086 net.cpp:434] L2_b7_sum_eltwise <- L2_b6_sum_eltwise_top_L2_b6_relu_0_split_1\nI1212 06:18:16.867873 12086 net.cpp:408] L2_b7_sum_eltwise -> L2_b7_sum_eltwise_top\nI1212 06:18:16.867923 12086 net.cpp:150] Setting up L2_b7_sum_eltwise\nI1212 06:18:16.867944 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.867954 12086 net.cpp:165] Memory required for data: 1154561500\nI1212 06:18:16.867965 12086 layer_factory.hpp:77] Creating layer L2_b7_relu\nI1212 06:18:16.867985 12086 net.cpp:100] Creating Layer L2_b7_relu\nI1212 06:18:16.867996 12086 net.cpp:434] L2_b7_relu <- L2_b7_sum_eltwise_top\nI1212 06:18:16.868021 12086 net.cpp:395] L2_b7_relu -> L2_b7_sum_eltwise_top (in-place)\nI1212 06:18:16.868041 12086 net.cpp:150] Setting up L2_b7_relu\nI1212 06:18:16.868057 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.868067 12086 net.cpp:165] Memory required for data: 1158657500\nI1212 06:18:16.868084 12086 layer_factory.hpp:77] Creating layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:18:16.868099 12086 net.cpp:100] Creating Layer L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:18:16.868110 12086 net.cpp:434] L2_b7_sum_eltwise_top_L2_b7_relu_0_split <- L2_b7_sum_eltwise_top\nI1212 06:18:16.868126 12086 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:18:16.868146 12086 net.cpp:408] L2_b7_sum_eltwise_top_L2_b7_relu_0_split -> L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:18:16.868240 12086 net.cpp:150] Setting up L2_b7_sum_eltwise_top_L2_b7_relu_0_split\nI1212 06:18:16.868263 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.868276 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.868286 12086 net.cpp:165] Memory required for data: 1166849500\nI1212 06:18:16.868297 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_conv\nI1212 06:18:16.868329 12086 net.cpp:100] Creating Layer L2_b8_cbr1_conv\nI1212 06:18:16.868343 12086 net.cpp:434] L2_b8_cbr1_conv <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_0\nI1212 06:18:16.868362 12086 net.cpp:408] L2_b8_cbr1_conv -> L2_b8_cbr1_conv_top\nI1212 06:18:16.868907 12086 net.cpp:150] Setting up L2_b8_cbr1_conv\nI1212 06:18:16.868927 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.868937 12086 net.cpp:165] Memory required for data: 1170945500\nI1212 06:18:16.868954 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_bn\nI1212 06:18:16.868976 12086 net.cpp:100] Creating Layer L2_b8_cbr1_bn\nI1212 06:18:16.868989 12086 net.cpp:434] L2_b8_cbr1_bn <- L2_b8_cbr1_conv_top\nI1212 06:18:16.869010 12086 net.cpp:408] L2_b8_cbr1_bn -> L2_b8_cbr1_bn_top\nI1212 06:18:16.869328 12086 net.cpp:150] Setting up L2_b8_cbr1_bn\nI1212 06:18:16.869349 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.869357 12086 net.cpp:165] Memory required for data: 1175041500\nI1212 06:18:16.869379 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:18:16.869395 12086 net.cpp:100] Creating Layer L2_b8_cbr1_scale\nI1212 06:18:16.869406 12086 net.cpp:434] L2_b8_cbr1_scale <- L2_b8_cbr1_bn_top\nI1212 06:18:16.869423 12086 net.cpp:395] L2_b8_cbr1_scale -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.869521 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_scale\nI1212 06:18:16.869720 12086 net.cpp:150] Setting up L2_b8_cbr1_scale\nI1212 06:18:16.869743 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.869753 12086 net.cpp:165] Memory required for data: 1179137500\nI1212 06:18:16.869772 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr1_relu\nI1212 06:18:16.869787 12086 net.cpp:100] Creating Layer L2_b8_cbr1_relu\nI1212 06:18:16.869798 12086 net.cpp:434] L2_b8_cbr1_relu <- L2_b8_cbr1_bn_top\nI1212 06:18:16.869812 12086 net.cpp:395] L2_b8_cbr1_relu -> L2_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.869832 12086 net.cpp:150] Setting up L2_b8_cbr1_relu\nI1212 06:18:16.869848 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.869858 12086 net.cpp:165] Memory required for data: 1183233500\nI1212 06:18:16.869868 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr2_conv\nI1212 06:18:16.869892 12086 net.cpp:100] Creating Layer L2_b8_cbr2_conv\nI1212 06:18:16.869905 12086 net.cpp:434] L2_b8_cbr2_conv <- L2_b8_cbr1_bn_top\nI1212 06:18:16.869928 12086 net.cpp:408] L2_b8_cbr2_conv -> L2_b8_cbr2_conv_top\nI1212 06:18:16.870487 12086 net.cpp:150] Setting up L2_b8_cbr2_conv\nI1212 06:18:16.870508 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.870518 12086 net.cpp:165] Memory required for data: 1187329500\nI1212 06:18:16.870535 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr2_bn\nI1212 06:18:16.870558 12086 net.cpp:100] Creating Layer L2_b8_cbr2_bn\nI1212 06:18:16.870579 12086 net.cpp:434] L2_b8_cbr2_bn <- L2_b8_cbr2_conv_top\nI1212 06:18:16.870601 12086 net.cpp:408] L2_b8_cbr2_bn -> L2_b8_cbr2_bn_top\nI1212 06:18:16.870935 12086 net.cpp:150] Setting up L2_b8_cbr2_bn\nI1212 06:18:16.870955 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.870965 12086 net.cpp:165] Memory required for data: 1191425500\nI1212 06:18:16.870986 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:18:16.871003 12086 net.cpp:100] Creating Layer L2_b8_cbr2_scale\nI1212 06:18:16.871016 12086 net.cpp:434] L2_b8_cbr2_scale <- L2_b8_cbr2_bn_top\nI1212 06:18:16.871031 12086 net.cpp:395] L2_b8_cbr2_scale -> L2_b8_cbr2_bn_top (in-place)\nI1212 06:18:16.871135 12086 layer_factory.hpp:77] Creating layer L2_b8_cbr2_scale\nI1212 06:18:16.871335 12086 net.cpp:150] Setting up L2_b8_cbr2_scale\nI1212 06:18:16.871354 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.871364 12086 net.cpp:165] Memory required for data: 1195521500\nI1212 06:18:16.871382 12086 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise\nI1212 06:18:16.871403 12086 net.cpp:100] Creating Layer L2_b8_sum_eltwise\nI1212 06:18:16.871417 12086 net.cpp:434] L2_b8_sum_eltwise <- L2_b8_cbr2_bn_top\nI1212 06:18:16.871429 12086 net.cpp:434] L2_b8_sum_eltwise <- L2_b7_sum_eltwise_top_L2_b7_relu_0_split_1\nI1212 06:18:16.871445 12086 net.cpp:408] L2_b8_sum_eltwise -> L2_b8_sum_eltwise_top\nI1212 06:18:16.871495 12086 net.cpp:150] Setting up L2_b8_sum_eltwise\nI1212 06:18:16.871515 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.871526 12086 net.cpp:165] Memory required for data: 1199617500\nI1212 06:18:16.871536 12086 layer_factory.hpp:77] Creating layer L2_b8_relu\nI1212 06:18:16.871556 12086 net.cpp:100] Creating Layer L2_b8_relu\nI1212 06:18:16.871567 12086 net.cpp:434] L2_b8_relu <- L2_b8_sum_eltwise_top\nI1212 06:18:16.871582 12086 net.cpp:395] L2_b8_relu -> L2_b8_sum_eltwise_top (in-place)\nI1212 06:18:16.871601 12086 net.cpp:150] Setting up L2_b8_relu\nI1212 06:18:16.871616 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.871626 12086 net.cpp:165] Memory required for data: 1203713500\nI1212 06:18:16.871637 12086 layer_factory.hpp:77] Creating layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:18:16.871651 12086 net.cpp:100] Creating Layer L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:18:16.871662 12086 net.cpp:434] L2_b8_sum_eltwise_top_L2_b8_relu_0_split <- L2_b8_sum_eltwise_top\nI1212 06:18:16.871677 12086 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:18:16.871721 12086 net.cpp:408] L2_b8_sum_eltwise_top_L2_b8_relu_0_split -> L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:18:16.871809 12086 net.cpp:150] Setting up L2_b8_sum_eltwise_top_L2_b8_relu_0_split\nI1212 06:18:16.871829 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.871841 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.871851 12086 net.cpp:165] Memory required for data: 1211905500\nI1212 06:18:16.871861 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_conv\nI1212 06:18:16.871887 12086 net.cpp:100] Creating Layer L2_b9_cbr1_conv\nI1212 06:18:16.871901 12086 net.cpp:434] L2_b9_cbr1_conv <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_0\nI1212 06:18:16.871920 12086 net.cpp:408] L2_b9_cbr1_conv -> L2_b9_cbr1_conv_top\nI1212 06:18:16.872481 12086 net.cpp:150] Setting up L2_b9_cbr1_conv\nI1212 06:18:16.872501 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.872510 12086 net.cpp:165] Memory required for data: 1216001500\nI1212 06:18:16.872529 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_bn\nI1212 06:18:16.872550 12086 net.cpp:100] Creating Layer L2_b9_cbr1_bn\nI1212 06:18:16.872563 12086 net.cpp:434] L2_b9_cbr1_bn <- L2_b9_cbr1_conv_top\nI1212 06:18:16.872584 12086 net.cpp:408] L2_b9_cbr1_bn -> L2_b9_cbr1_bn_top\nI1212 06:18:16.872908 12086 net.cpp:150] Setting up L2_b9_cbr1_bn\nI1212 06:18:16.872931 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.872951 12086 net.cpp:165] Memory required for data: 1220097500\nI1212 06:18:16.872973 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:18:16.872990 12086 net.cpp:100] Creating Layer L2_b9_cbr1_scale\nI1212 06:18:16.873003 12086 net.cpp:434] L2_b9_cbr1_scale <- L2_b9_cbr1_bn_top\nI1212 06:18:16.873018 12086 net.cpp:395] L2_b9_cbr1_scale -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.873124 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_scale\nI1212 06:18:16.873335 12086 net.cpp:150] Setting up L2_b9_cbr1_scale\nI1212 06:18:16.873354 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.873364 12086 net.cpp:165] Memory required for data: 1224193500\nI1212 06:18:16.873383 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr1_relu\nI1212 06:18:16.873399 12086 net.cpp:100] Creating Layer L2_b9_cbr1_relu\nI1212 06:18:16.873409 12086 net.cpp:434] L2_b9_cbr1_relu <- L2_b9_cbr1_bn_top\nI1212 06:18:16.873430 12086 net.cpp:395] L2_b9_cbr1_relu -> L2_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.873450 12086 net.cpp:150] Setting up L2_b9_cbr1_relu\nI1212 06:18:16.873463 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.873472 12086 net.cpp:165] Memory required for data: 1228289500\nI1212 06:18:16.873482 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr2_conv\nI1212 06:18:16.873508 12086 net.cpp:100] Creating Layer L2_b9_cbr2_conv\nI1212 06:18:16.873522 12086 net.cpp:434] L2_b9_cbr2_conv <- L2_b9_cbr1_bn_top\nI1212 06:18:16.873540 12086 net.cpp:408] L2_b9_cbr2_conv -> L2_b9_cbr2_conv_top\nI1212 06:18:16.875138 12086 net.cpp:150] Setting up L2_b9_cbr2_conv\nI1212 06:18:16.875159 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.875169 12086 net.cpp:165] Memory required for data: 1232385500\nI1212 06:18:16.875187 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr2_bn\nI1212 06:18:16.875210 12086 net.cpp:100] Creating Layer L2_b9_cbr2_bn\nI1212 06:18:16.875222 12086 net.cpp:434] L2_b9_cbr2_bn <- L2_b9_cbr2_conv_top\nI1212 06:18:16.875244 12086 net.cpp:408] L2_b9_cbr2_bn -> L2_b9_cbr2_bn_top\nI1212 06:18:16.875551 12086 net.cpp:150] Setting up L2_b9_cbr2_bn\nI1212 06:18:16.875571 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.875581 12086 net.cpp:165] Memory required for data: 1236481500\nI1212 06:18:16.875663 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:18:16.875685 12086 net.cpp:100] Creating Layer L2_b9_cbr2_scale\nI1212 06:18:16.875699 12086 net.cpp:434] L2_b9_cbr2_scale <- L2_b9_cbr2_bn_top\nI1212 06:18:16.875715 12086 net.cpp:395] L2_b9_cbr2_scale -> L2_b9_cbr2_bn_top (in-place)\nI1212 06:18:16.875808 12086 layer_factory.hpp:77] Creating layer L2_b9_cbr2_scale\nI1212 06:18:16.876008 12086 net.cpp:150] Setting up L2_b9_cbr2_scale\nI1212 06:18:16.876027 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.876037 12086 net.cpp:165] Memory required for data: 1240577500\nI1212 06:18:16.876056 12086 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise\nI1212 06:18:16.876085 12086 net.cpp:100] Creating Layer L2_b9_sum_eltwise\nI1212 06:18:16.876098 12086 net.cpp:434] L2_b9_sum_eltwise <- L2_b9_cbr2_bn_top\nI1212 06:18:16.876112 12086 net.cpp:434] L2_b9_sum_eltwise <- L2_b8_sum_eltwise_top_L2_b8_relu_0_split_1\nI1212 06:18:16.876133 12086 net.cpp:408] L2_b9_sum_eltwise -> L2_b9_sum_eltwise_top\nI1212 06:18:16.876183 12086 net.cpp:150] Setting up L2_b9_sum_eltwise\nI1212 06:18:16.876200 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.876210 12086 net.cpp:165] Memory required for data: 1244673500\nI1212 06:18:16.876221 12086 layer_factory.hpp:77] Creating layer L2_b9_relu\nI1212 06:18:16.876236 12086 net.cpp:100] Creating Layer L2_b9_relu\nI1212 06:18:16.876248 12086 net.cpp:434] L2_b9_relu <- L2_b9_sum_eltwise_top\nI1212 06:18:16.876268 12086 net.cpp:395] L2_b9_relu -> L2_b9_sum_eltwise_top (in-place)\nI1212 06:18:16.876288 12086 net.cpp:150] Setting up L2_b9_relu\nI1212 06:18:16.876303 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.876312 12086 net.cpp:165] Memory required for data: 1248769500\nI1212 06:18:16.876332 12086 layer_factory.hpp:77] Creating layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:18:16.876348 12086 net.cpp:100] Creating Layer L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:18:16.876358 12086 net.cpp:434] L2_b9_sum_eltwise_top_L2_b9_relu_0_split <- L2_b9_sum_eltwise_top\nI1212 06:18:16.876379 12086 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:18:16.876400 12086 net.cpp:408] L2_b9_sum_eltwise_top_L2_b9_relu_0_split -> L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:18:16.876490 12086 net.cpp:150] Setting up L2_b9_sum_eltwise_top_L2_b9_relu_0_split\nI1212 06:18:16.876509 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.876523 12086 net.cpp:157] Top shape: 125 32 16 16 (1024000)\nI1212 06:18:16.876533 12086 net.cpp:165] Memory required for data: 1256961500\nI1212 06:18:16.876543 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_conv\nI1212 06:18:16.876562 12086 net.cpp:100] Creating Layer L3_b1_cbr1_conv\nI1212 06:18:16.876574 12086 net.cpp:434] L3_b1_cbr1_conv <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_0\nI1212 06:18:16.876598 12086 net.cpp:408] L3_b1_cbr1_conv -> L3_b1_cbr1_conv_top\nI1212 06:18:16.877164 12086 net.cpp:150] Setting up L3_b1_cbr1_conv\nI1212 06:18:16.877185 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.877194 12086 net.cpp:165] Memory required for data: 1257985500\nI1212 06:18:16.877213 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_bn\nI1212 06:18:16.877235 12086 net.cpp:100] Creating Layer L3_b1_cbr1_bn\nI1212 06:18:16.877249 12086 net.cpp:434] L3_b1_cbr1_bn <- L3_b1_cbr1_conv_top\nI1212 06:18:16.877265 12086 net.cpp:408] L3_b1_cbr1_bn -> L3_b1_cbr1_bn_top\nI1212 06:18:16.877588 12086 net.cpp:150] Setting up L3_b1_cbr1_bn\nI1212 06:18:16.877606 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.877616 12086 net.cpp:165] Memory required for data: 1259009500\nI1212 06:18:16.877637 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:18:16.877655 12086 net.cpp:100] Creating Layer L3_b1_cbr1_scale\nI1212 06:18:16.877665 12086 net.cpp:434] L3_b1_cbr1_scale <- L3_b1_cbr1_bn_top\nI1212 06:18:16.877681 12086 net.cpp:395] L3_b1_cbr1_scale -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.877779 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_scale\nI1212 06:18:16.877988 12086 net.cpp:150] Setting up L3_b1_cbr1_scale\nI1212 06:18:16.878018 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.878031 12086 net.cpp:165] Memory required for data: 1260033500\nI1212 06:18:16.878049 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr1_relu\nI1212 06:18:16.878064 12086 net.cpp:100] Creating Layer L3_b1_cbr1_relu\nI1212 06:18:16.878084 12086 net.cpp:434] L3_b1_cbr1_relu <- L3_b1_cbr1_bn_top\nI1212 06:18:16.878100 12086 net.cpp:395] L3_b1_cbr1_relu -> L3_b1_cbr1_bn_top (in-place)\nI1212 06:18:16.878119 12086 net.cpp:150] Setting up L3_b1_cbr1_relu\nI1212 06:18:16.878134 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.878144 12086 net.cpp:165] Memory required for data: 1261057500\nI1212 06:18:16.878154 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr2_conv\nI1212 06:18:16.878180 12086 net.cpp:100] Creating Layer L3_b1_cbr2_conv\nI1212 06:18:16.878193 12086 net.cpp:434] L3_b1_cbr2_conv <- L3_b1_cbr1_bn_top\nI1212 06:18:16.878211 12086 net.cpp:408] L3_b1_cbr2_conv -> L3_b1_cbr2_conv_top\nI1212 06:18:16.878756 12086 net.cpp:150] Setting up L3_b1_cbr2_conv\nI1212 06:18:16.878777 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.878787 12086 net.cpp:165] Memory required for data: 1262081500\nI1212 06:18:16.878804 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr2_bn\nI1212 06:18:16.878823 12086 net.cpp:100] Creating Layer L3_b1_cbr2_bn\nI1212 06:18:16.878834 12086 net.cpp:434] L3_b1_cbr2_bn <- L3_b1_cbr2_conv_top\nI1212 06:18:16.878856 12086 net.cpp:408] L3_b1_cbr2_bn -> L3_b1_cbr2_bn_top\nI1212 06:18:16.879189 12086 net.cpp:150] Setting up L3_b1_cbr2_bn\nI1212 06:18:16.879209 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.879226 12086 net.cpp:165] Memory required for data: 1263105500\nI1212 06:18:16.879250 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:18:16.879266 12086 net.cpp:100] Creating Layer L3_b1_cbr2_scale\nI1212 06:18:16.879279 12086 net.cpp:434] L3_b1_cbr2_scale <- L3_b1_cbr2_bn_top\nI1212 06:18:16.879299 12086 net.cpp:395] L3_b1_cbr2_scale -> L3_b1_cbr2_bn_top (in-place)\nI1212 06:18:16.879400 12086 layer_factory.hpp:77] Creating layer L3_b1_cbr2_scale\nI1212 06:18:16.879608 12086 net.cpp:150] Setting up L3_b1_cbr2_scale\nI1212 06:18:16.879627 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.879637 12086 net.cpp:165] Memory required for data: 1264129500\nI1212 06:18:16.879655 12086 layer_factory.hpp:77] Creating layer L3_b1_pool\nI1212 06:18:16.879678 12086 net.cpp:100] Creating Layer L3_b1_pool\nI1212 06:18:16.879690 12086 net.cpp:434] L3_b1_pool <- L2_b9_sum_eltwise_top_L2_b9_relu_0_split_1\nI1212 06:18:16.879707 12086 net.cpp:408] L3_b1_pool -> L3_b1_pool\nI1212 06:18:16.879770 12086 net.cpp:150] Setting up L3_b1_pool\nI1212 06:18:16.879791 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.879801 12086 net.cpp:165] Memory required for data: 1265153500\nI1212 06:18:16.879812 12086 layer_factory.hpp:77] Creating layer L3_b1_sum_eltwise\nI1212 06:18:16.879828 12086 net.cpp:100] Creating Layer L3_b1_sum_eltwise\nI1212 06:18:16.879840 12086 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_cbr2_bn_top\nI1212 06:18:16.879858 12086 net.cpp:434] L3_b1_sum_eltwise <- L3_b1_pool\nI1212 06:18:16.879875 12086 net.cpp:408] L3_b1_sum_eltwise -> L3_b1_sum_eltwise_top\nI1212 06:18:16.879932 12086 net.cpp:150] Setting up L3_b1_sum_eltwise\nI1212 06:18:16.879954 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.879964 12086 net.cpp:165] Memory required for data: 1266177500\nI1212 06:18:16.879976 12086 layer_factory.hpp:77] Creating layer L3_b1_relu\nI1212 06:18:16.879989 12086 net.cpp:100] Creating Layer L3_b1_relu\nI1212 06:18:16.880002 12086 net.cpp:434] L3_b1_relu <- L3_b1_sum_eltwise_top\nI1212 06:18:16.880023 12086 net.cpp:395] L3_b1_relu -> L3_b1_sum_eltwise_top (in-place)\nI1212 06:18:16.880043 12086 net.cpp:150] Setting up L3_b1_relu\nI1212 06:18:16.880059 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.880069 12086 net.cpp:165] Memory required for data: 1267201500\nI1212 06:18:16.880087 12086 layer_factory.hpp:77] Creating layer L3_b1_zeros\nI1212 06:18:16.880105 12086 net.cpp:100] Creating Layer L3_b1_zeros\nI1212 06:18:16.880120 12086 net.cpp:408] L3_b1_zeros -> L3_b1_zeros\nI1212 06:18:16.881435 12086 net.cpp:150] Setting up L3_b1_zeros\nI1212 06:18:16.881458 12086 net.cpp:157] Top shape: 125 32 8 8 (256000)\nI1212 06:18:16.881467 12086 net.cpp:165] Memory required for data: 1268225500\nI1212 06:18:16.881477 12086 layer_factory.hpp:77] Creating layer L3_b1_concat0\nI1212 06:18:16.881494 12086 net.cpp:100] Creating Layer L3_b1_concat0\nI1212 06:18:16.881505 12086 net.cpp:434] L3_b1_concat0 <- L3_b1_sum_eltwise_top\nI1212 06:18:16.881518 12086 net.cpp:434] L3_b1_concat0 <- L3_b1_zeros\nI1212 06:18:16.881539 12086 net.cpp:408] L3_b1_concat0 -> L3_b1_concat0\nI1212 06:18:16.881608 12086 net.cpp:150] Setting up L3_b1_concat0\nI1212 06:18:16.881628 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.881639 12086 net.cpp:165] Memory required for data: 1270273500\nI1212 06:18:16.881649 12086 layer_factory.hpp:77] Creating layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:18:16.881664 12086 net.cpp:100] Creating Layer L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:18:16.881675 12086 net.cpp:434] L3_b1_concat0_L3_b1_concat0_0_split <- L3_b1_concat0\nI1212 06:18:16.881695 12086 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:18:16.881716 12086 net.cpp:408] L3_b1_concat0_L3_b1_concat0_0_split -> L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:18:16.881809 12086 net.cpp:150] Setting up L3_b1_concat0_L3_b1_concat0_0_split\nI1212 06:18:16.881831 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.881845 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.881866 12086 net.cpp:165] Memory required for data: 1274369500\nI1212 06:18:16.881877 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_conv\nI1212 06:18:16.881903 12086 net.cpp:100] Creating Layer L3_b2_cbr1_conv\nI1212 06:18:16.881918 12086 net.cpp:434] L3_b2_cbr1_conv <- L3_b1_concat0_L3_b1_concat0_0_split_0\nI1212 06:18:16.881938 12086 net.cpp:408] L3_b2_cbr1_conv -> L3_b2_cbr1_conv_top\nI1212 06:18:16.883036 12086 net.cpp:150] Setting up L3_b2_cbr1_conv\nI1212 06:18:16.883057 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.883066 12086 net.cpp:165] Memory required for data: 1276417500\nI1212 06:18:16.883091 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_bn\nI1212 06:18:16.883116 12086 net.cpp:100] Creating Layer L3_b2_cbr1_bn\nI1212 06:18:16.883128 12086 net.cpp:434] L3_b2_cbr1_bn <- L3_b2_cbr1_conv_top\nI1212 06:18:16.883146 12086 net.cpp:408] L3_b2_cbr1_bn -> L3_b2_cbr1_bn_top\nI1212 06:18:16.883462 12086 net.cpp:150] Setting up L3_b2_cbr1_bn\nI1212 06:18:16.883481 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.883491 12086 net.cpp:165] Memory required for data: 1278465500\nI1212 06:18:16.883513 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:18:16.883535 12086 net.cpp:100] Creating Layer L3_b2_cbr1_scale\nI1212 06:18:16.883548 12086 net.cpp:434] L3_b2_cbr1_scale <- L3_b2_cbr1_bn_top\nI1212 06:18:16.883563 12086 net.cpp:395] L3_b2_cbr1_scale -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.883661 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_scale\nI1212 06:18:16.883863 12086 net.cpp:150] Setting up L3_b2_cbr1_scale\nI1212 06:18:16.883882 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.883893 12086 net.cpp:165] Memory required for data: 1280513500\nI1212 06:18:16.883910 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr1_relu\nI1212 06:18:16.883931 12086 net.cpp:100] Creating Layer L3_b2_cbr1_relu\nI1212 06:18:16.883944 12086 net.cpp:434] L3_b2_cbr1_relu <- L3_b2_cbr1_bn_top\nI1212 06:18:16.883962 12086 net.cpp:395] L3_b2_cbr1_relu -> L3_b2_cbr1_bn_top (in-place)\nI1212 06:18:16.883982 12086 net.cpp:150] Setting up L3_b2_cbr1_relu\nI1212 06:18:16.883997 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.884006 12086 net.cpp:165] Memory required for data: 1282561500\nI1212 06:18:16.884016 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr2_conv\nI1212 06:18:16.884037 12086 net.cpp:100] Creating Layer L3_b2_cbr2_conv\nI1212 06:18:16.884050 12086 net.cpp:434] L3_b2_cbr2_conv <- L3_b2_cbr1_bn_top\nI1212 06:18:16.884079 12086 net.cpp:408] L3_b2_cbr2_conv -> L3_b2_cbr2_conv_top\nI1212 06:18:16.885186 12086 net.cpp:150] Setting up L3_b2_cbr2_conv\nI1212 06:18:16.885207 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.885217 12086 net.cpp:165] Memory required for data: 1284609500\nI1212 06:18:16.885234 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr2_bn\nI1212 06:18:16.885257 12086 net.cpp:100] Creating Layer L3_b2_cbr2_bn\nI1212 06:18:16.885270 12086 net.cpp:434] L3_b2_cbr2_bn <- L3_b2_cbr2_conv_top\nI1212 06:18:16.885288 12086 net.cpp:408] L3_b2_cbr2_bn -> L3_b2_cbr2_bn_top\nI1212 06:18:16.885604 12086 net.cpp:150] Setting up L3_b2_cbr2_bn\nI1212 06:18:16.885624 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.885634 12086 net.cpp:165] Memory required for data: 1286657500\nI1212 06:18:16.885656 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:18:16.885673 12086 net.cpp:100] Creating Layer L3_b2_cbr2_scale\nI1212 06:18:16.885685 12086 net.cpp:434] L3_b2_cbr2_scale <- L3_b2_cbr2_bn_top\nI1212 06:18:16.885701 12086 net.cpp:395] L3_b2_cbr2_scale -> L3_b2_cbr2_bn_top (in-place)\nI1212 06:18:16.885798 12086 layer_factory.hpp:77] Creating layer L3_b2_cbr2_scale\nI1212 06:18:16.886005 12086 net.cpp:150] Setting up L3_b2_cbr2_scale\nI1212 06:18:16.886029 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.886039 12086 net.cpp:165] Memory required for data: 1288705500\nI1212 06:18:16.886059 12086 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise\nI1212 06:18:16.886091 12086 net.cpp:100] Creating Layer L3_b2_sum_eltwise\nI1212 06:18:16.886106 12086 net.cpp:434] L3_b2_sum_eltwise <- L3_b2_cbr2_bn_top\nI1212 06:18:16.886121 12086 net.cpp:434] L3_b2_sum_eltwise <- L3_b1_concat0_L3_b1_concat0_0_split_1\nI1212 06:18:16.886137 12086 net.cpp:408] L3_b2_sum_eltwise -> L3_b2_sum_eltwise_top\nI1212 06:18:16.886200 12086 net.cpp:150] Setting up L3_b2_sum_eltwise\nI1212 06:18:16.886221 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.886230 12086 net.cpp:165] Memory required for data: 1290753500\nI1212 06:18:16.886241 12086 layer_factory.hpp:77] Creating layer L3_b2_relu\nI1212 06:18:16.886256 12086 net.cpp:100] Creating Layer L3_b2_relu\nI1212 06:18:16.886267 12086 net.cpp:434] L3_b2_relu <- L3_b2_sum_eltwise_top\nI1212 06:18:16.886282 12086 net.cpp:395] L3_b2_relu -> L3_b2_sum_eltwise_top (in-place)\nI1212 06:18:16.886302 12086 net.cpp:150] Setting up L3_b2_relu\nI1212 06:18:16.886317 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.886325 12086 net.cpp:165] Memory required for data: 1292801500\nI1212 06:18:16.886337 12086 layer_factory.hpp:77] Creating layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:18:16.886355 12086 net.cpp:100] Creating Layer L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:18:16.886368 12086 net.cpp:434] L3_b2_sum_eltwise_top_L3_b2_relu_0_split <- L3_b2_sum_eltwise_top\nI1212 06:18:16.886384 12086 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:18:16.886404 12086 net.cpp:408] L3_b2_sum_eltwise_top_L3_b2_relu_0_split -> L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:18:16.886490 12086 net.cpp:150] Setting up L3_b2_sum_eltwise_top_L3_b2_relu_0_split\nI1212 06:18:16.886515 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.886529 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.886539 12086 net.cpp:165] Memory required for data: 1296897500\nI1212 06:18:16.886549 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_conv\nI1212 06:18:16.886569 12086 net.cpp:100] Creating Layer L3_b3_cbr1_conv\nI1212 06:18:16.886582 12086 net.cpp:434] L3_b3_cbr1_conv <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_0\nI1212 06:18:16.886601 12086 net.cpp:408] L3_b3_cbr1_conv -> L3_b3_cbr1_conv_top\nI1212 06:18:16.887709 12086 net.cpp:150] Setting up L3_b3_cbr1_conv\nI1212 06:18:16.887730 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.887740 12086 net.cpp:165] Memory required for data: 1298945500\nI1212 06:18:16.887758 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_bn\nI1212 06:18:16.887780 12086 net.cpp:100] Creating Layer L3_b3_cbr1_bn\nI1212 06:18:16.887792 12086 net.cpp:434] L3_b3_cbr1_bn <- L3_b3_cbr1_conv_top\nI1212 06:18:16.887809 12086 net.cpp:408] L3_b3_cbr1_bn -> L3_b3_cbr1_bn_top\nI1212 06:18:16.888146 12086 net.cpp:150] Setting up L3_b3_cbr1_bn\nI1212 06:18:16.888166 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.888175 12086 net.cpp:165] Memory required for data: 1300993500\nI1212 06:18:16.888196 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:18:16.888218 12086 net.cpp:100] Creating Layer L3_b3_cbr1_scale\nI1212 06:18:16.888231 12086 net.cpp:434] L3_b3_cbr1_scale <- L3_b3_cbr1_bn_top\nI1212 06:18:16.888247 12086 net.cpp:395] L3_b3_cbr1_scale -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.888346 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_scale\nI1212 06:18:16.888555 12086 net.cpp:150] Setting up L3_b3_cbr1_scale\nI1212 06:18:16.888573 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.888583 12086 net.cpp:165] Memory required for data: 1303041500\nI1212 06:18:16.888602 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr1_relu\nI1212 06:18:16.888617 12086 net.cpp:100] Creating Layer L3_b3_cbr1_relu\nI1212 06:18:16.888629 12086 net.cpp:434] L3_b3_cbr1_relu <- L3_b3_cbr1_bn_top\nI1212 06:18:16.888648 12086 net.cpp:395] L3_b3_cbr1_relu -> L3_b3_cbr1_bn_top (in-place)\nI1212 06:18:16.888669 12086 net.cpp:150] Setting up L3_b3_cbr1_relu\nI1212 06:18:16.888682 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.888701 12086 net.cpp:165] Memory required for data: 1305089500\nI1212 06:18:16.888712 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr2_conv\nI1212 06:18:16.888733 12086 net.cpp:100] Creating Layer L3_b3_cbr2_conv\nI1212 06:18:16.888746 12086 net.cpp:434] L3_b3_cbr2_conv <- L3_b3_cbr1_bn_top\nI1212 06:18:16.888769 12086 net.cpp:408] L3_b3_cbr2_conv -> L3_b3_cbr2_conv_top\nI1212 06:18:16.889863 12086 net.cpp:150] Setting up L3_b3_cbr2_conv\nI1212 06:18:16.889884 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.889894 12086 net.cpp:165] Memory required for data: 1307137500\nI1212 06:18:16.889912 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr2_bn\nI1212 06:18:16.889935 12086 net.cpp:100] Creating Layer L3_b3_cbr2_bn\nI1212 06:18:16.889947 12086 net.cpp:434] L3_b3_cbr2_bn <- L3_b3_cbr2_conv_top\nI1212 06:18:16.889964 12086 net.cpp:408] L3_b3_cbr2_bn -> L3_b3_cbr2_bn_top\nI1212 06:18:16.890288 12086 net.cpp:150] Setting up L3_b3_cbr2_bn\nI1212 06:18:16.890307 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.890317 12086 net.cpp:165] Memory required for data: 1309185500\nI1212 06:18:16.890339 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:18:16.890357 12086 net.cpp:100] Creating Layer L3_b3_cbr2_scale\nI1212 06:18:16.890368 12086 net.cpp:434] L3_b3_cbr2_scale <- L3_b3_cbr2_bn_top\nI1212 06:18:16.890383 12086 net.cpp:395] L3_b3_cbr2_scale -> L3_b3_cbr2_bn_top (in-place)\nI1212 06:18:16.890485 12086 layer_factory.hpp:77] Creating layer L3_b3_cbr2_scale\nI1212 06:18:16.890691 12086 net.cpp:150] Setting up L3_b3_cbr2_scale\nI1212 06:18:16.890713 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.890722 12086 net.cpp:165] Memory required for data: 1311233500\nI1212 06:18:16.890740 12086 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise\nI1212 06:18:16.890758 12086 net.cpp:100] Creating Layer L3_b3_sum_eltwise\nI1212 06:18:16.890770 12086 net.cpp:434] L3_b3_sum_eltwise <- L3_b3_cbr2_bn_top\nI1212 06:18:16.890784 12086 net.cpp:434] L3_b3_sum_eltwise <- L3_b2_sum_eltwise_top_L3_b2_relu_0_split_1\nI1212 06:18:16.890805 12086 net.cpp:408] L3_b3_sum_eltwise -> L3_b3_sum_eltwise_top\nI1212 06:18:16.890861 12086 net.cpp:150] Setting up L3_b3_sum_eltwise\nI1212 06:18:16.890880 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.890890 12086 net.cpp:165] Memory required for data: 1313281500\nI1212 06:18:16.890902 12086 layer_factory.hpp:77] Creating layer L3_b3_relu\nI1212 06:18:16.890920 12086 net.cpp:100] Creating Layer L3_b3_relu\nI1212 06:18:16.890933 12086 net.cpp:434] L3_b3_relu <- L3_b3_sum_eltwise_top\nI1212 06:18:16.890949 12086 net.cpp:395] L3_b3_relu -> L3_b3_sum_eltwise_top (in-place)\nI1212 06:18:16.890966 12086 net.cpp:150] Setting up L3_b3_relu\nI1212 06:18:16.890982 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.890991 12086 net.cpp:165] Memory required for data: 1315329500\nI1212 06:18:16.891001 12086 layer_factory.hpp:77] Creating layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:18:16.891021 12086 net.cpp:100] Creating Layer L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:18:16.891032 12086 net.cpp:434] L3_b3_sum_eltwise_top_L3_b3_relu_0_split <- L3_b3_sum_eltwise_top\nI1212 06:18:16.891048 12086 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:18:16.891068 12086 net.cpp:408] L3_b3_sum_eltwise_top_L3_b3_relu_0_split -> L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:18:16.891163 12086 net.cpp:150] Setting up L3_b3_sum_eltwise_top_L3_b3_relu_0_split\nI1212 06:18:16.891188 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.891203 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.891213 12086 net.cpp:165] Memory required for data: 1319425500\nI1212 06:18:16.891223 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_conv\nI1212 06:18:16.891244 12086 net.cpp:100] Creating Layer L3_b4_cbr1_conv\nI1212 06:18:16.891258 12086 net.cpp:434] L3_b4_cbr1_conv <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_0\nI1212 06:18:16.891286 12086 net.cpp:408] L3_b4_cbr1_conv -> L3_b4_cbr1_conv_top\nI1212 06:18:16.892402 12086 net.cpp:150] Setting up L3_b4_cbr1_conv\nI1212 06:18:16.892423 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.892433 12086 net.cpp:165] Memory required for data: 1321473500\nI1212 06:18:16.892452 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_bn\nI1212 06:18:16.892473 12086 net.cpp:100] Creating Layer L3_b4_cbr1_bn\nI1212 06:18:16.892487 12086 net.cpp:434] L3_b4_cbr1_bn <- L3_b4_cbr1_conv_top\nI1212 06:18:16.892503 12086 net.cpp:408] L3_b4_cbr1_bn -> L3_b4_cbr1_bn_top\nI1212 06:18:16.892822 12086 net.cpp:150] Setting up L3_b4_cbr1_bn\nI1212 06:18:16.892841 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.892851 12086 net.cpp:165] Memory required for data: 1323521500\nI1212 06:18:16.892873 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:18:16.892894 12086 net.cpp:100] Creating Layer L3_b4_cbr1_scale\nI1212 06:18:16.892906 12086 net.cpp:434] L3_b4_cbr1_scale <- L3_b4_cbr1_bn_top\nI1212 06:18:16.892927 12086 net.cpp:395] L3_b4_cbr1_scale -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.893019 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_scale\nI1212 06:18:16.893230 12086 net.cpp:150] Setting up L3_b4_cbr1_scale\nI1212 06:18:16.893250 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.893260 12086 net.cpp:165] Memory required for data: 1325569500\nI1212 06:18:16.893277 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr1_relu\nI1212 06:18:16.893295 12086 net.cpp:100] Creating Layer L3_b4_cbr1_relu\nI1212 06:18:16.893306 12086 net.cpp:434] L3_b4_cbr1_relu <- L3_b4_cbr1_bn_top\nI1212 06:18:16.893326 12086 net.cpp:395] L3_b4_cbr1_relu -> L3_b4_cbr1_bn_top (in-place)\nI1212 06:18:16.893347 12086 net.cpp:150] Setting up L3_b4_cbr1_relu\nI1212 06:18:16.893362 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.893371 12086 net.cpp:165] Memory required for data: 1327617500\nI1212 06:18:16.893381 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr2_conv\nI1212 06:18:16.893407 12086 net.cpp:100] Creating Layer L3_b4_cbr2_conv\nI1212 06:18:16.893420 12086 net.cpp:434] L3_b4_cbr2_conv <- L3_b4_cbr1_bn_top\nI1212 06:18:16.893438 12086 net.cpp:408] L3_b4_cbr2_conv -> L3_b4_cbr2_conv_top\nI1212 06:18:16.895565 12086 net.cpp:150] Setting up L3_b4_cbr2_conv\nI1212 06:18:16.895587 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.895597 12086 net.cpp:165] Memory required for data: 1329665500\nI1212 06:18:16.895617 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr2_bn\nI1212 06:18:16.895639 12086 net.cpp:100] Creating Layer L3_b4_cbr2_bn\nI1212 06:18:16.895653 12086 net.cpp:434] L3_b4_cbr2_bn <- L3_b4_cbr2_conv_top\nI1212 06:18:16.895669 12086 net.cpp:408] L3_b4_cbr2_bn -> L3_b4_cbr2_bn_top\nI1212 06:18:16.895993 12086 net.cpp:150] Setting up L3_b4_cbr2_bn\nI1212 06:18:16.896013 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.896021 12086 net.cpp:165] Memory required for data: 1331713500\nI1212 06:18:16.896044 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:18:16.896064 12086 net.cpp:100] Creating Layer L3_b4_cbr2_scale\nI1212 06:18:16.896096 12086 net.cpp:434] L3_b4_cbr2_scale <- L3_b4_cbr2_bn_top\nI1212 06:18:16.896121 12086 net.cpp:395] L3_b4_cbr2_scale -> L3_b4_cbr2_bn_top (in-place)\nI1212 06:18:16.896222 12086 layer_factory.hpp:77] Creating layer L3_b4_cbr2_scale\nI1212 06:18:16.896433 12086 net.cpp:150] Setting up L3_b4_cbr2_scale\nI1212 06:18:16.896452 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.896461 12086 net.cpp:165] Memory required for data: 1333761500\nI1212 06:18:16.896481 12086 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise\nI1212 06:18:16.896498 12086 net.cpp:100] Creating Layer L3_b4_sum_eltwise\nI1212 06:18:16.896510 12086 net.cpp:434] L3_b4_sum_eltwise <- L3_b4_cbr2_bn_top\nI1212 06:18:16.896523 12086 net.cpp:434] L3_b4_sum_eltwise <- L3_b3_sum_eltwise_top_L3_b3_relu_0_split_1\nI1212 06:18:16.896544 12086 net.cpp:408] L3_b4_sum_eltwise -> L3_b4_sum_eltwise_top\nI1212 06:18:16.896603 12086 net.cpp:150] Setting up L3_b4_sum_eltwise\nI1212 06:18:16.896631 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.896642 12086 net.cpp:165] Memory required for data: 1335809500\nI1212 06:18:16.896653 12086 layer_factory.hpp:77] Creating layer L3_b4_relu\nI1212 06:18:16.896672 12086 net.cpp:100] Creating Layer L3_b4_relu\nI1212 06:18:16.896684 12086 net.cpp:434] L3_b4_relu <- L3_b4_sum_eltwise_top\nI1212 06:18:16.896699 12086 net.cpp:395] L3_b4_relu -> L3_b4_sum_eltwise_top (in-place)\nI1212 06:18:16.896719 12086 net.cpp:150] Setting up L3_b4_relu\nI1212 06:18:16.896735 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.896744 12086 net.cpp:165] Memory required for data: 1337857500\nI1212 06:18:16.896755 12086 layer_factory.hpp:77] Creating layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:18:16.896770 12086 net.cpp:100] Creating Layer L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:18:16.896780 12086 net.cpp:434] L3_b4_sum_eltwise_top_L3_b4_relu_0_split <- L3_b4_sum_eltwise_top\nI1212 06:18:16.896796 12086 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:18:16.896816 12086 net.cpp:408] L3_b4_sum_eltwise_top_L3_b4_relu_0_split -> L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:18:16.896914 12086 net.cpp:150] Setting up L3_b4_sum_eltwise_top_L3_b4_relu_0_split\nI1212 06:18:16.896932 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.896945 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.896955 12086 net.cpp:165] Memory required for data: 1341953500\nI1212 06:18:16.896965 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_conv\nI1212 06:18:16.896989 12086 net.cpp:100] Creating Layer L3_b5_cbr1_conv\nI1212 06:18:16.897003 12086 net.cpp:434] L3_b5_cbr1_conv <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_0\nI1212 06:18:16.897022 12086 net.cpp:408] L3_b5_cbr1_conv -> L3_b5_cbr1_conv_top\nI1212 06:18:16.898140 12086 net.cpp:150] Setting up L3_b5_cbr1_conv\nI1212 06:18:16.898161 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.898170 12086 net.cpp:165] Memory required for data: 1344001500\nI1212 06:18:16.898188 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_bn\nI1212 06:18:16.898211 12086 net.cpp:100] Creating Layer L3_b5_cbr1_bn\nI1212 06:18:16.898224 12086 net.cpp:434] L3_b5_cbr1_bn <- L3_b5_cbr1_conv_top\nI1212 06:18:16.898250 12086 net.cpp:408] L3_b5_cbr1_bn -> L3_b5_cbr1_bn_top\nI1212 06:18:16.898560 12086 net.cpp:150] Setting up L3_b5_cbr1_bn\nI1212 06:18:16.898579 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.898588 12086 net.cpp:165] Memory required for data: 1346049500\nI1212 06:18:16.898610 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:18:16.898627 12086 net.cpp:100] Creating Layer L3_b5_cbr1_scale\nI1212 06:18:16.898638 12086 net.cpp:434] L3_b5_cbr1_scale <- L3_b5_cbr1_bn_top\nI1212 06:18:16.898659 12086 net.cpp:395] L3_b5_cbr1_scale -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.898756 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_scale\nI1212 06:18:16.898962 12086 net.cpp:150] Setting up L3_b5_cbr1_scale\nI1212 06:18:16.898982 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.898991 12086 net.cpp:165] Memory required for data: 1348097500\nI1212 06:18:16.899010 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr1_relu\nI1212 06:18:16.899025 12086 net.cpp:100] Creating Layer L3_b5_cbr1_relu\nI1212 06:18:16.899037 12086 net.cpp:434] L3_b5_cbr1_relu <- L3_b5_cbr1_bn_top\nI1212 06:18:16.899060 12086 net.cpp:395] L3_b5_cbr1_relu -> L3_b5_cbr1_bn_top (in-place)\nI1212 06:18:16.899088 12086 net.cpp:150] Setting up L3_b5_cbr1_relu\nI1212 06:18:16.899104 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.899114 12086 net.cpp:165] Memory required for data: 1350145500\nI1212 06:18:16.899124 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr2_conv\nI1212 06:18:16.899150 12086 net.cpp:100] Creating Layer L3_b5_cbr2_conv\nI1212 06:18:16.899164 12086 net.cpp:434] L3_b5_cbr2_conv <- L3_b5_cbr1_bn_top\nI1212 06:18:16.899183 12086 net.cpp:408] L3_b5_cbr2_conv -> L3_b5_cbr2_conv_top\nI1212 06:18:16.900285 12086 net.cpp:150] Setting up L3_b5_cbr2_conv\nI1212 06:18:16.900305 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.900315 12086 net.cpp:165] Memory required for data: 1352193500\nI1212 06:18:16.900333 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr2_bn\nI1212 06:18:16.900357 12086 net.cpp:100] Creating Layer L3_b5_cbr2_bn\nI1212 06:18:16.900368 12086 net.cpp:434] L3_b5_cbr2_bn <- L3_b5_cbr2_conv_top\nI1212 06:18:16.900385 12086 net.cpp:408] L3_b5_cbr2_bn -> L3_b5_cbr2_bn_top\nI1212 06:18:16.900707 12086 net.cpp:150] Setting up L3_b5_cbr2_bn\nI1212 06:18:16.900727 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.900738 12086 net.cpp:165] Memory required for data: 1354241500\nI1212 06:18:16.900758 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:18:16.900784 12086 net.cpp:100] Creating Layer L3_b5_cbr2_scale\nI1212 06:18:16.900797 12086 net.cpp:434] L3_b5_cbr2_scale <- L3_b5_cbr2_bn_top\nI1212 06:18:16.900817 12086 net.cpp:395] L3_b5_cbr2_scale -> L3_b5_cbr2_bn_top (in-place)\nI1212 06:18:16.900913 12086 layer_factory.hpp:77] Creating layer L3_b5_cbr2_scale\nI1212 06:18:16.901125 12086 net.cpp:150] Setting up L3_b5_cbr2_scale\nI1212 06:18:16.901145 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.901155 12086 net.cpp:165] Memory required for data: 1356289500\nI1212 06:18:16.901173 12086 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise\nI1212 06:18:16.901192 12086 net.cpp:100] Creating Layer L3_b5_sum_eltwise\nI1212 06:18:16.901204 12086 net.cpp:434] L3_b5_sum_eltwise <- L3_b5_cbr2_bn_top\nI1212 06:18:16.901217 12086 net.cpp:434] L3_b5_sum_eltwise <- L3_b4_sum_eltwise_top_L3_b4_relu_0_split_1\nI1212 06:18:16.901239 12086 net.cpp:408] L3_b5_sum_eltwise -> L3_b5_sum_eltwise_top\nI1212 06:18:16.901295 12086 net.cpp:150] Setting up L3_b5_sum_eltwise\nI1212 06:18:16.901314 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.901324 12086 net.cpp:165] Memory required for data: 1358337500\nI1212 06:18:16.901335 12086 layer_factory.hpp:77] Creating layer L3_b5_relu\nI1212 06:18:16.901355 12086 net.cpp:100] Creating Layer L3_b5_relu\nI1212 06:18:16.901367 12086 net.cpp:434] L3_b5_relu <- L3_b5_sum_eltwise_top\nI1212 06:18:16.901381 12086 net.cpp:395] L3_b5_relu -> L3_b5_sum_eltwise_top (in-place)\nI1212 06:18:16.901401 12086 net.cpp:150] Setting up L3_b5_relu\nI1212 06:18:16.901415 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.901424 12086 net.cpp:165] Memory required for data: 1360385500\nI1212 06:18:16.901434 12086 layer_factory.hpp:77] Creating layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:18:16.901449 12086 net.cpp:100] Creating Layer L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:18:16.901461 12086 net.cpp:434] L3_b5_sum_eltwise_top_L3_b5_relu_0_split <- L3_b5_sum_eltwise_top\nI1212 06:18:16.901475 12086 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:18:16.901496 12086 net.cpp:408] L3_b5_sum_eltwise_top_L3_b5_relu_0_split -> L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:18:16.901577 12086 net.cpp:150] Setting up L3_b5_sum_eltwise_top_L3_b5_relu_0_split\nI1212 06:18:16.901597 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.901609 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.901618 12086 net.cpp:165] Memory required for data: 1364481500\nI1212 06:18:16.901629 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_conv\nI1212 06:18:16.901655 12086 net.cpp:100] Creating Layer L3_b6_cbr1_conv\nI1212 06:18:16.901669 12086 net.cpp:434] L3_b6_cbr1_conv <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_0\nI1212 06:18:16.901688 12086 net.cpp:408] L3_b6_cbr1_conv -> L3_b6_cbr1_conv_top\nI1212 06:18:16.902781 12086 net.cpp:150] Setting up L3_b6_cbr1_conv\nI1212 06:18:16.902802 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.902812 12086 net.cpp:165] Memory required for data: 1366529500\nI1212 06:18:16.902829 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_bn\nI1212 06:18:16.902860 12086 net.cpp:100] Creating Layer L3_b6_cbr1_bn\nI1212 06:18:16.902874 12086 net.cpp:434] L3_b6_cbr1_bn <- L3_b6_cbr1_conv_top\nI1212 06:18:16.902896 12086 net.cpp:408] L3_b6_cbr1_bn -> L3_b6_cbr1_bn_top\nI1212 06:18:16.903234 12086 net.cpp:150] Setting up L3_b6_cbr1_bn\nI1212 06:18:16.903254 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.903264 12086 net.cpp:165] Memory required for data: 1368577500\nI1212 06:18:16.903285 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:18:16.903302 12086 net.cpp:100] Creating Layer L3_b6_cbr1_scale\nI1212 06:18:16.903314 12086 net.cpp:434] L3_b6_cbr1_scale <- L3_b6_cbr1_bn_top\nI1212 06:18:16.903336 12086 net.cpp:395] L3_b6_cbr1_scale -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.903432 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_scale\nI1212 06:18:16.903645 12086 net.cpp:150] Setting up L3_b6_cbr1_scale\nI1212 06:18:16.903663 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.903673 12086 net.cpp:165] Memory required for data: 1370625500\nI1212 06:18:16.903692 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr1_relu\nI1212 06:18:16.903708 12086 net.cpp:100] Creating Layer L3_b6_cbr1_relu\nI1212 06:18:16.903720 12086 net.cpp:434] L3_b6_cbr1_relu <- L3_b6_cbr1_bn_top\nI1212 06:18:16.903739 12086 net.cpp:395] L3_b6_cbr1_relu -> L3_b6_cbr1_bn_top (in-place)\nI1212 06:18:16.903759 12086 net.cpp:150] Setting up L3_b6_cbr1_relu\nI1212 06:18:16.903774 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.903784 12086 net.cpp:165] Memory required for data: 1372673500\nI1212 06:18:16.903794 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr2_conv\nI1212 06:18:16.903820 12086 net.cpp:100] Creating Layer L3_b6_cbr2_conv\nI1212 06:18:16.903832 12086 net.cpp:434] L3_b6_cbr2_conv <- L3_b6_cbr1_bn_top\nI1212 06:18:16.903856 12086 net.cpp:408] L3_b6_cbr2_conv -> L3_b6_cbr2_conv_top\nI1212 06:18:16.904947 12086 net.cpp:150] Setting up L3_b6_cbr2_conv\nI1212 06:18:16.904968 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.904978 12086 net.cpp:165] Memory required for data: 1374721500\nI1212 06:18:16.904995 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr2_bn\nI1212 06:18:16.905014 12086 net.cpp:100] Creating Layer L3_b6_cbr2_bn\nI1212 06:18:16.905025 12086 net.cpp:434] L3_b6_cbr2_bn <- L3_b6_cbr2_conv_top\nI1212 06:18:16.905047 12086 net.cpp:408] L3_b6_cbr2_bn -> L3_b6_cbr2_bn_top\nI1212 06:18:16.905372 12086 net.cpp:150] Setting up L3_b6_cbr2_bn\nI1212 06:18:16.905395 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.905405 12086 net.cpp:165] Memory required for data: 1376769500\nI1212 06:18:16.905427 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:18:16.905444 12086 net.cpp:100] Creating Layer L3_b6_cbr2_scale\nI1212 06:18:16.905457 12086 net.cpp:434] L3_b6_cbr2_scale <- L3_b6_cbr2_bn_top\nI1212 06:18:16.905472 12086 net.cpp:395] L3_b6_cbr2_scale -> L3_b6_cbr2_bn_top (in-place)\nI1212 06:18:16.905567 12086 layer_factory.hpp:77] Creating layer L3_b6_cbr2_scale\nI1212 06:18:16.905773 12086 net.cpp:150] Setting up L3_b6_cbr2_scale\nI1212 06:18:16.905791 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.905802 12086 net.cpp:165] Memory required for data: 1378817500\nI1212 06:18:16.905819 12086 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise\nI1212 06:18:16.905838 12086 net.cpp:100] Creating Layer L3_b6_sum_eltwise\nI1212 06:18:16.905855 12086 net.cpp:434] L3_b6_sum_eltwise <- L3_b6_cbr2_bn_top\nI1212 06:18:16.905869 12086 net.cpp:434] L3_b6_sum_eltwise <- L3_b5_sum_eltwise_top_L3_b5_relu_0_split_1\nI1212 06:18:16.905885 12086 net.cpp:408] L3_b6_sum_eltwise -> L3_b6_sum_eltwise_top\nI1212 06:18:16.905949 12086 net.cpp:150] Setting up L3_b6_sum_eltwise\nI1212 06:18:16.905968 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.905978 12086 net.cpp:165] Memory required for data: 1380865500\nI1212 06:18:16.905988 12086 layer_factory.hpp:77] Creating layer L3_b6_relu\nI1212 06:18:16.906002 12086 net.cpp:100] Creating Layer L3_b6_relu\nI1212 06:18:16.906014 12086 net.cpp:434] L3_b6_relu <- L3_b6_sum_eltwise_top\nI1212 06:18:16.906038 12086 net.cpp:395] L3_b6_relu -> L3_b6_sum_eltwise_top (in-place)\nI1212 06:18:16.906057 12086 net.cpp:150] Setting up L3_b6_relu\nI1212 06:18:16.906080 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.906091 12086 net.cpp:165] Memory required for data: 1382913500\nI1212 06:18:16.906102 12086 layer_factory.hpp:77] Creating layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:18:16.906116 12086 net.cpp:100] Creating Layer L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:18:16.906128 12086 net.cpp:434] L3_b6_sum_eltwise_top_L3_b6_relu_0_split <- L3_b6_sum_eltwise_top\nI1212 06:18:16.906144 12086 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:18:16.906165 12086 net.cpp:408] L3_b6_sum_eltwise_top_L3_b6_relu_0_split -> L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:18:16.906252 12086 net.cpp:150] Setting up L3_b6_sum_eltwise_top_L3_b6_relu_0_split\nI1212 06:18:16.906270 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.906283 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.906291 12086 net.cpp:165] Memory required for data: 1387009500\nI1212 06:18:16.906302 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_conv\nI1212 06:18:16.906337 12086 net.cpp:100] Creating Layer L3_b7_cbr1_conv\nI1212 06:18:16.906352 12086 net.cpp:434] L3_b7_cbr1_conv <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_0\nI1212 06:18:16.906370 12086 net.cpp:408] L3_b7_cbr1_conv -> L3_b7_cbr1_conv_top\nI1212 06:18:16.907462 12086 net.cpp:150] Setting up L3_b7_cbr1_conv\nI1212 06:18:16.907482 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.907491 12086 net.cpp:165] Memory required for data: 1389057500\nI1212 06:18:16.907510 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_bn\nI1212 06:18:16.907527 12086 net.cpp:100] Creating Layer L3_b7_cbr1_bn\nI1212 06:18:16.907539 12086 net.cpp:434] L3_b7_cbr1_bn <- L3_b7_cbr1_conv_top\nI1212 06:18:16.907562 12086 net.cpp:408] L3_b7_cbr1_bn -> L3_b7_cbr1_bn_top\nI1212 06:18:16.907878 12086 net.cpp:150] Setting up L3_b7_cbr1_bn\nI1212 06:18:16.907898 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.907908 12086 net.cpp:165] Memory required for data: 1391105500\nI1212 06:18:16.907929 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:18:16.907946 12086 net.cpp:100] Creating Layer L3_b7_cbr1_scale\nI1212 06:18:16.907958 12086 net.cpp:434] L3_b7_cbr1_scale <- L3_b7_cbr1_bn_top\nI1212 06:18:16.907979 12086 net.cpp:395] L3_b7_cbr1_scale -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.908082 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_scale\nI1212 06:18:16.908288 12086 net.cpp:150] Setting up L3_b7_cbr1_scale\nI1212 06:18:16.908308 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.908318 12086 net.cpp:165] Memory required for data: 1393153500\nI1212 06:18:16.908335 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr1_relu\nI1212 06:18:16.908393 12086 net.cpp:100] Creating Layer L3_b7_cbr1_relu\nI1212 06:18:16.908411 12086 net.cpp:434] L3_b7_cbr1_relu <- L3_b7_cbr1_bn_top\nI1212 06:18:16.908427 12086 net.cpp:395] L3_b7_cbr1_relu -> L3_b7_cbr1_bn_top (in-place)\nI1212 06:18:16.908448 12086 net.cpp:150] Setting up L3_b7_cbr1_relu\nI1212 06:18:16.908463 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.908473 12086 net.cpp:165] Memory required for data: 1395201500\nI1212 06:18:16.908483 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr2_conv\nI1212 06:18:16.908504 12086 net.cpp:100] Creating Layer L3_b7_cbr2_conv\nI1212 06:18:16.908517 12086 net.cpp:434] L3_b7_cbr2_conv <- L3_b7_cbr1_bn_top\nI1212 06:18:16.908536 12086 net.cpp:408] L3_b7_cbr2_conv -> L3_b7_cbr2_conv_top\nI1212 06:18:16.909809 12086 net.cpp:150] Setting up L3_b7_cbr2_conv\nI1212 06:18:16.909831 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.909840 12086 net.cpp:165] Memory required for data: 1397249500\nI1212 06:18:16.909858 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr2_bn\nI1212 06:18:16.909883 12086 net.cpp:100] Creating Layer L3_b7_cbr2_bn\nI1212 06:18:16.909904 12086 net.cpp:434] L3_b7_cbr2_bn <- L3_b7_cbr2_conv_top\nI1212 06:18:16.909929 12086 net.cpp:408] L3_b7_cbr2_bn -> L3_b7_cbr2_bn_top\nI1212 06:18:16.910269 12086 net.cpp:150] Setting up L3_b7_cbr2_bn\nI1212 06:18:16.910289 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.910298 12086 net.cpp:165] Memory required for data: 1399297500\nI1212 06:18:16.910320 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:18:16.910337 12086 net.cpp:100] Creating Layer L3_b7_cbr2_scale\nI1212 06:18:16.910348 12086 net.cpp:434] L3_b7_cbr2_scale <- L3_b7_cbr2_bn_top\nI1212 06:18:16.910369 12086 net.cpp:395] L3_b7_cbr2_scale -> L3_b7_cbr2_bn_top (in-place)\nI1212 06:18:16.910468 12086 layer_factory.hpp:77] Creating layer L3_b7_cbr2_scale\nI1212 06:18:16.910676 12086 net.cpp:150] Setting up L3_b7_cbr2_scale\nI1212 06:18:16.910694 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.910704 12086 net.cpp:165] Memory required for data: 1401345500\nI1212 06:18:16.910723 12086 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise\nI1212 06:18:16.910744 12086 net.cpp:100] Creating Layer L3_b7_sum_eltwise\nI1212 06:18:16.910758 12086 net.cpp:434] L3_b7_sum_eltwise <- L3_b7_cbr2_bn_top\nI1212 06:18:16.910771 12086 net.cpp:434] L3_b7_sum_eltwise <- L3_b6_sum_eltwise_top_L3_b6_relu_0_split_1\nI1212 06:18:16.910787 12086 net.cpp:408] L3_b7_sum_eltwise -> L3_b7_sum_eltwise_top\nI1212 06:18:16.910850 12086 net.cpp:150] Setting up L3_b7_sum_eltwise\nI1212 06:18:16.910868 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.910878 12086 net.cpp:165] Memory required for data: 1403393500\nI1212 06:18:16.910888 12086 layer_factory.hpp:77] Creating layer L3_b7_relu\nI1212 06:18:16.910903 12086 net.cpp:100] Creating Layer L3_b7_relu\nI1212 06:18:16.910915 12086 net.cpp:434] L3_b7_relu <- L3_b7_sum_eltwise_top\nI1212 06:18:16.910934 12086 net.cpp:395] L3_b7_relu -> L3_b7_sum_eltwise_top (in-place)\nI1212 06:18:16.910954 12086 net.cpp:150] Setting up L3_b7_relu\nI1212 06:18:16.910970 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.910979 12086 net.cpp:165] Memory required for data: 1405441500\nI1212 06:18:16.910989 12086 layer_factory.hpp:77] Creating layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:18:16.911005 12086 net.cpp:100] Creating Layer L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:18:16.911016 12086 net.cpp:434] L3_b7_sum_eltwise_top_L3_b7_relu_0_split <- L3_b7_sum_eltwise_top\nI1212 06:18:16.911031 12086 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:18:16.911051 12086 net.cpp:408] L3_b7_sum_eltwise_top_L3_b7_relu_0_split -> L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:18:16.911142 12086 net.cpp:150] Setting up L3_b7_sum_eltwise_top_L3_b7_relu_0_split\nI1212 06:18:16.911161 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.911173 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.911183 12086 net.cpp:165] Memory required for data: 1409537500\nI1212 06:18:16.911193 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_conv\nI1212 06:18:16.911213 12086 net.cpp:100] Creating Layer L3_b8_cbr1_conv\nI1212 06:18:16.911226 12086 net.cpp:434] L3_b8_cbr1_conv <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_0\nI1212 06:18:16.911249 12086 net.cpp:408] L3_b8_cbr1_conv -> L3_b8_cbr1_conv_top\nI1212 06:18:16.913416 12086 net.cpp:150] Setting up L3_b8_cbr1_conv\nI1212 06:18:16.913439 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.913449 12086 net.cpp:165] Memory required for data: 1411585500\nI1212 06:18:16.913467 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_bn\nI1212 06:18:16.913496 12086 net.cpp:100] Creating Layer L3_b8_cbr1_bn\nI1212 06:18:16.913511 12086 net.cpp:434] L3_b8_cbr1_bn <- L3_b8_cbr1_conv_top\nI1212 06:18:16.913528 12086 net.cpp:408] L3_b8_cbr1_bn -> L3_b8_cbr1_bn_top\nI1212 06:18:16.913851 12086 net.cpp:150] Setting up L3_b8_cbr1_bn\nI1212 06:18:16.913871 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.913880 12086 net.cpp:165] Memory required for data: 1413633500\nI1212 06:18:16.913913 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:18:16.913934 12086 net.cpp:100] Creating Layer L3_b8_cbr1_scale\nI1212 06:18:16.913947 12086 net.cpp:434] L3_b8_cbr1_scale <- L3_b8_cbr1_bn_top\nI1212 06:18:16.913964 12086 net.cpp:395] L3_b8_cbr1_scale -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.914075 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_scale\nI1212 06:18:16.914288 12086 net.cpp:150] Setting up L3_b8_cbr1_scale\nI1212 06:18:16.914306 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.914316 12086 net.cpp:165] Memory required for data: 1415681500\nI1212 06:18:16.914336 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr1_relu\nI1212 06:18:16.914357 12086 net.cpp:100] Creating Layer L3_b8_cbr1_relu\nI1212 06:18:16.914371 12086 net.cpp:434] L3_b8_cbr1_relu <- L3_b8_cbr1_bn_top\nI1212 06:18:16.914386 12086 net.cpp:395] L3_b8_cbr1_relu -> L3_b8_cbr1_bn_top (in-place)\nI1212 06:18:16.914405 12086 net.cpp:150] Setting up L3_b8_cbr1_relu\nI1212 06:18:16.914419 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.914430 12086 net.cpp:165] Memory required for data: 1417729500\nI1212 06:18:16.914440 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr2_conv\nI1212 06:18:16.914466 12086 net.cpp:100] Creating Layer L3_b8_cbr2_conv\nI1212 06:18:16.914480 12086 net.cpp:434] L3_b8_cbr2_conv <- L3_b8_cbr1_bn_top\nI1212 06:18:16.914503 12086 net.cpp:408] L3_b8_cbr2_conv -> L3_b8_cbr2_conv_top\nI1212 06:18:16.915591 12086 net.cpp:150] Setting up L3_b8_cbr2_conv\nI1212 06:18:16.915611 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.915621 12086 net.cpp:165] Memory required for data: 1419777500\nI1212 06:18:16.915637 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr2_bn\nI1212 06:18:16.915655 12086 net.cpp:100] Creating Layer L3_b8_cbr2_bn\nI1212 06:18:16.915668 12086 net.cpp:434] L3_b8_cbr2_bn <- L3_b8_cbr2_conv_top\nI1212 06:18:16.915690 12086 net.cpp:408] L3_b8_cbr2_bn -> L3_b8_cbr2_bn_top\nI1212 06:18:16.916016 12086 net.cpp:150] Setting up L3_b8_cbr2_bn\nI1212 06:18:16.916035 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.916045 12086 net.cpp:165] Memory required for data: 1421825500\nI1212 06:18:16.916067 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:18:16.916091 12086 net.cpp:100] Creating Layer L3_b8_cbr2_scale\nI1212 06:18:16.916105 12086 net.cpp:434] L3_b8_cbr2_scale <- L3_b8_cbr2_bn_top\nI1212 06:18:16.916121 12086 net.cpp:395] L3_b8_cbr2_scale -> L3_b8_cbr2_bn_top (in-place)\nI1212 06:18:16.916224 12086 layer_factory.hpp:77] Creating layer L3_b8_cbr2_scale\nI1212 06:18:16.916427 12086 net.cpp:150] Setting up L3_b8_cbr2_scale\nI1212 06:18:16.916451 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.916461 12086 net.cpp:165] Memory required for data: 1423873500\nI1212 06:18:16.916481 12086 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise\nI1212 06:18:16.916498 12086 net.cpp:100] Creating Layer L3_b8_sum_eltwise\nI1212 06:18:16.916510 12086 net.cpp:434] L3_b8_sum_eltwise <- L3_b8_cbr2_bn_top\nI1212 06:18:16.916524 12086 net.cpp:434] L3_b8_sum_eltwise <- L3_b7_sum_eltwise_top_L3_b7_relu_0_split_1\nI1212 06:18:16.916540 12086 net.cpp:408] L3_b8_sum_eltwise -> L3_b8_sum_eltwise_top\nI1212 06:18:16.916602 12086 net.cpp:150] Setting up L3_b8_sum_eltwise\nI1212 06:18:16.916621 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.916631 12086 net.cpp:165] Memory required for data: 1425921500\nI1212 06:18:16.916641 12086 layer_factory.hpp:77] Creating layer L3_b8_relu\nI1212 06:18:16.916656 12086 net.cpp:100] Creating Layer L3_b8_relu\nI1212 06:18:16.916668 12086 net.cpp:434] L3_b8_relu <- L3_b8_sum_eltwise_top\nI1212 06:18:16.916682 12086 net.cpp:395] L3_b8_relu -> L3_b8_sum_eltwise_top (in-place)\nI1212 06:18:16.916702 12086 net.cpp:150] Setting up L3_b8_relu\nI1212 06:18:16.916716 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.916725 12086 net.cpp:165] Memory required for data: 1427969500\nI1212 06:18:16.916734 12086 layer_factory.hpp:77] Creating layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:18:16.916759 12086 net.cpp:100] Creating Layer L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:18:16.916771 12086 net.cpp:434] L3_b8_sum_eltwise_top_L3_b8_relu_0_split <- L3_b8_sum_eltwise_top\nI1212 06:18:16.916791 12086 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:18:16.916813 12086 net.cpp:408] L3_b8_sum_eltwise_top_L3_b8_relu_0_split -> L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:18:16.916900 12086 net.cpp:150] Setting up L3_b8_sum_eltwise_top_L3_b8_relu_0_split\nI1212 06:18:16.916919 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.916932 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.916942 12086 net.cpp:165] Memory required for data: 1432065500\nI1212 06:18:16.916952 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_conv\nI1212 06:18:16.916980 12086 net.cpp:100] Creating Layer L3_b9_cbr1_conv\nI1212 06:18:16.916996 12086 net.cpp:434] L3_b9_cbr1_conv <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_0\nI1212 06:18:16.917014 12086 net.cpp:408] L3_b9_cbr1_conv -> L3_b9_cbr1_conv_top\nI1212 06:18:16.918107 12086 net.cpp:150] Setting up L3_b9_cbr1_conv\nI1212 06:18:16.918128 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.918138 12086 net.cpp:165] Memory required for data: 1434113500\nI1212 06:18:16.918155 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_bn\nI1212 06:18:16.918179 12086 net.cpp:100] Creating Layer L3_b9_cbr1_bn\nI1212 06:18:16.918190 12086 net.cpp:434] L3_b9_cbr1_bn <- L3_b9_cbr1_conv_top\nI1212 06:18:16.918207 12086 net.cpp:408] L3_b9_cbr1_bn -> L3_b9_cbr1_bn_top\nI1212 06:18:16.918532 12086 net.cpp:150] Setting up L3_b9_cbr1_bn\nI1212 06:18:16.918551 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.918561 12086 net.cpp:165] Memory required for data: 1436161500\nI1212 06:18:16.918582 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:18:16.918606 12086 net.cpp:100] Creating Layer L3_b9_cbr1_scale\nI1212 06:18:16.918618 12086 net.cpp:434] L3_b9_cbr1_scale <- L3_b9_cbr1_bn_top\nI1212 06:18:16.918635 12086 net.cpp:395] L3_b9_cbr1_scale -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.918736 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_scale\nI1212 06:18:16.918944 12086 net.cpp:150] Setting up L3_b9_cbr1_scale\nI1212 06:18:16.918963 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.918973 12086 net.cpp:165] Memory required for data: 1438209500\nI1212 06:18:16.918992 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr1_relu\nI1212 06:18:16.919020 12086 net.cpp:100] Creating Layer L3_b9_cbr1_relu\nI1212 06:18:16.919034 12086 net.cpp:434] L3_b9_cbr1_relu <- L3_b9_cbr1_bn_top\nI1212 06:18:16.919052 12086 net.cpp:395] L3_b9_cbr1_relu -> L3_b9_cbr1_bn_top (in-place)\nI1212 06:18:16.919080 12086 net.cpp:150] Setting up L3_b9_cbr1_relu\nI1212 06:18:16.919095 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.919106 12086 net.cpp:165] Memory required for data: 1440257500\nI1212 06:18:16.919116 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr2_conv\nI1212 06:18:16.919137 12086 net.cpp:100] Creating Layer L3_b9_cbr2_conv\nI1212 06:18:16.919149 12086 net.cpp:434] L3_b9_cbr2_conv <- L3_b9_cbr1_bn_top\nI1212 06:18:16.919172 12086 net.cpp:408] L3_b9_cbr2_conv -> L3_b9_cbr2_conv_top\nI1212 06:18:16.920250 12086 net.cpp:150] Setting up L3_b9_cbr2_conv\nI1212 06:18:16.920270 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.920279 12086 net.cpp:165] Memory required for data: 1442305500\nI1212 06:18:16.920297 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr2_bn\nI1212 06:18:16.920322 12086 net.cpp:100] Creating Layer L3_b9_cbr2_bn\nI1212 06:18:16.920336 12086 net.cpp:434] L3_b9_cbr2_bn <- L3_b9_cbr2_conv_top\nI1212 06:18:16.920353 12086 net.cpp:408] L3_b9_cbr2_bn -> L3_b9_cbr2_bn_top\nI1212 06:18:16.920678 12086 net.cpp:150] Setting up L3_b9_cbr2_bn\nI1212 06:18:16.920697 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.920707 12086 net.cpp:165] Memory required for data: 1444353500\nI1212 06:18:16.920738 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:18:16.920755 12086 net.cpp:100] Creating Layer L3_b9_cbr2_scale\nI1212 06:18:16.920768 12086 net.cpp:434] L3_b9_cbr2_scale <- L3_b9_cbr2_bn_top\nI1212 06:18:16.920784 12086 net.cpp:395] L3_b9_cbr2_scale -> L3_b9_cbr2_bn_top (in-place)\nI1212 06:18:16.920891 12086 layer_factory.hpp:77] Creating layer L3_b9_cbr2_scale\nI1212 06:18:16.921103 12086 net.cpp:150] Setting up L3_b9_cbr2_scale\nI1212 06:18:16.921130 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.921142 12086 net.cpp:165] Memory required for data: 1446401500\nI1212 06:18:16.921161 12086 layer_factory.hpp:77] Creating layer L3_b9_sum_eltwise\nI1212 06:18:16.921178 12086 net.cpp:100] Creating Layer L3_b9_sum_eltwise\nI1212 06:18:16.921192 12086 net.cpp:434] L3_b9_sum_eltwise <- L3_b9_cbr2_bn_top\nI1212 06:18:16.921206 12086 net.cpp:434] L3_b9_sum_eltwise <- L3_b8_sum_eltwise_top_L3_b8_relu_0_split_1\nI1212 06:18:16.921222 12086 net.cpp:408] L3_b9_sum_eltwise -> L3_b9_sum_eltwise_top\nI1212 06:18:16.921285 12086 net.cpp:150] Setting up L3_b9_sum_eltwise\nI1212 06:18:16.921303 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.921314 12086 net.cpp:165] Memory required for data: 1448449500\nI1212 06:18:16.921324 12086 layer_factory.hpp:77] Creating layer L3_b9_relu\nI1212 06:18:16.921339 12086 net.cpp:100] Creating Layer L3_b9_relu\nI1212 06:18:16.921350 12086 net.cpp:434] L3_b9_relu <- L3_b9_sum_eltwise_top\nI1212 06:18:16.921365 12086 net.cpp:395] L3_b9_relu -> L3_b9_sum_eltwise_top (in-place)\nI1212 06:18:16.921383 12086 net.cpp:150] Setting up L3_b9_relu\nI1212 06:18:16.921398 12086 net.cpp:157] Top shape: 125 64 8 8 (512000)\nI1212 06:18:16.921407 12086 net.cpp:165] Memory required for data: 1450497500\nI1212 06:18:16.921419 12086 layer_factory.hpp:77] Creating layer post_pool\nI1212 06:18:16.921439 12086 net.cpp:100] Creating Layer post_pool\nI1212 06:18:16.921452 12086 net.cpp:434] post_pool <- L3_b9_sum_eltwise_top\nI1212 06:18:16.921469 12086 net.cpp:408] post_pool -> post_pool\nI1212 06:18:16.921530 12086 net.cpp:150] Setting up post_pool\nI1212 06:18:16.921550 12086 net.cpp:157] Top shape: 125 64 1 1 (8000)\nI1212 06:18:16.921560 12086 net.cpp:165] Memory required for data: 1450529500\nI1212 06:18:16.921571 12086 layer_factory.hpp:77] Creating layer post_FC\nI1212 06:18:16.921591 12086 net.cpp:100] Creating Layer post_FC\nI1212 06:18:16.921602 12086 net.cpp:434] post_FC <- post_pool\nI1212 06:18:16.921624 12086 net.cpp:408] post_FC -> post_FC_top\nI1212 06:18:16.921833 12086 net.cpp:150] Setting up post_FC\nI1212 06:18:16.921851 12086 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:18:16.921861 12086 net.cpp:165] Memory required for data: 1450534500\nI1212 06:18:16.921880 12086 layer_factory.hpp:77] Creating layer post_FC_top_post_FC_0_split\nI1212 06:18:16.921900 12086 net.cpp:100] Creating Layer post_FC_top_post_FC_0_split\nI1212 06:18:16.921912 12086 net.cpp:434] post_FC_top_post_FC_0_split <- post_FC_top\nI1212 06:18:16.921928 12086 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_0\nI1212 06:18:16.921953 12086 net.cpp:408] post_FC_top_post_FC_0_split -> post_FC_top_post_FC_0_split_1\nI1212 06:18:16.922049 12086 net.cpp:150] Setting up post_FC_top_post_FC_0_split\nI1212 06:18:16.922068 12086 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:18:16.922088 12086 net.cpp:157] Top shape: 125 10 (1250)\nI1212 06:18:16.922099 12086 net.cpp:165] Memory required for data: 1450544500\nI1212 06:18:16.922111 12086 layer_factory.hpp:77] Creating layer accuracy\nI1212 06:18:16.922125 12086 net.cpp:100] Creating Layer accuracy\nI1212 06:18:16.922137 12086 net.cpp:434] accuracy <- post_FC_top_post_FC_0_split_0\nI1212 06:18:16.922152 12086 net.cpp:434] accuracy <- label_dataLayer_1_split_0\nI1212 06:18:16.922166 12086 net.cpp:408] accuracy -> accuracy\nI1212 06:18:16.922193 12086 net.cpp:150] Setting up accuracy\nI1212 06:18:16.922209 12086 net.cpp:157] Top shape: (1)\nI1212 06:18:16.922217 12086 net.cpp:165] Memory required for data: 1450544504\nI1212 06:18:16.922236 12086 layer_factory.hpp:77] Creating layer loss\nI1212 06:18:16.922256 12086 net.cpp:100] Creating Layer loss\nI1212 06:18:16.922268 12086 net.cpp:434] loss <- post_FC_top_post_FC_0_split_1\nI1212 06:18:16.922282 12086 net.cpp:434] loss <- label_dataLayer_1_split_1\nI1212 06:18:16.922297 12086 net.cpp:408] loss -> loss\nI1212 06:18:16.922322 12086 layer_factory.hpp:77] Creating layer loss\nI1212 06:18:16.922495 12086 net.cpp:150] Setting up loss\nI1212 06:18:16.922518 12086 net.cpp:157] Top shape: (1)\nI1212 06:18:16.922528 12086 net.cpp:160]     with loss weight 1\nI1212 06:18:16.922554 12086 net.cpp:165] Memory required for data: 1450544508\nI1212 06:18:16.922567 12086 net.cpp:226] loss needs backward computation.\nI1212 06:18:16.922579 12086 net.cpp:228] accuracy does not need backward computation.\nI1212 06:18:16.922590 12086 net.cpp:226] post_FC_top_post_FC_0_split needs backward computation.\nI1212 06:18:16.922600 12086 net.cpp:226] post_FC needs backward computation.\nI1212 06:18:16.922611 12086 net.cpp:226] post_pool needs backward computation.\nI1212 06:18:16.922621 12086 net.cpp:226] L3_b9_relu needs backward computation.\nI1212 06:18:16.922631 12086 net.cpp:226] L3_b9_sum_eltwise needs backward computation.\nI1212 06:18:16.922641 12086 net.cpp:226] L3_b9_cbr2_scale needs backward computation.\nI1212 06:18:16.922652 12086 net.cpp:226] L3_b9_cbr2_bn needs backward computation.\nI1212 06:18:16.922662 12086 net.cpp:226] L3_b9_cbr2_conv needs backward computation.\nI1212 06:18:16.922672 12086 net.cpp:226] L3_b9_cbr1_relu needs backward computation.\nI1212 06:18:16.922683 12086 net.cpp:226] L3_b9_cbr1_scale needs backward computation.\nI1212 06:18:16.922691 12086 net.cpp:226] L3_b9_cbr1_bn needs backward computation.\nI1212 06:18:16.922703 12086 net.cpp:226] L3_b9_cbr1_conv needs backward computation.\nI1212 06:18:16.922713 12086 net.cpp:226] L3_b8_sum_eltwise_top_L3_b8_relu_0_split needs backward computation.\nI1212 06:18:16.922724 12086 net.cpp:226] L3_b8_relu needs backward computation.\nI1212 06:18:16.922734 12086 net.cpp:226] L3_b8_sum_eltwise needs backward computation.\nI1212 06:18:16.922744 12086 net.cpp:226] L3_b8_cbr2_scale needs backward computation.\nI1212 06:18:16.922755 12086 net.cpp:226] L3_b8_cbr2_bn needs backward computation.\nI1212 06:18:16.922766 12086 net.cpp:226] L3_b8_cbr2_conv needs backward computation.\nI1212 06:18:16.922776 12086 net.cpp:226] L3_b8_cbr1_relu needs backward computation.\nI1212 06:18:16.922786 12086 net.cpp:226] L3_b8_cbr1_scale needs backward computation.\nI1212 06:18:16.922796 12086 net.cpp:226] L3_b8_cbr1_bn needs backward computation.\nI1212 06:18:16.922807 12086 net.cpp:226] L3_b8_cbr1_conv needs backward computation.\nI1212 06:18:16.922817 12086 net.cpp:226] L3_b7_sum_eltwise_top_L3_b7_relu_0_split needs backward computation.\nI1212 06:18:16.922827 12086 net.cpp:226] L3_b7_relu needs backward computation.\nI1212 06:18:16.922837 12086 net.cpp:226] L3_b7_sum_eltwise needs backward computation.\nI1212 06:18:16.922849 12086 net.cpp:226] L3_b7_cbr2_scale needs backward computation.\nI1212 06:18:16.922860 12086 net.cpp:226] L3_b7_cbr2_bn needs backward computation.\nI1212 06:18:16.922870 12086 net.cpp:226] L3_b7_cbr2_conv needs backward computation.\nI1212 06:18:16.922879 12086 net.cpp:226] L3_b7_cbr1_relu needs backward computation.\nI1212 06:18:16.922890 12086 net.cpp:226] L3_b7_cbr1_scale needs backward computation.\nI1212 06:18:16.922900 12086 net.cpp:226] L3_b7_cbr1_bn needs backward computation.\nI1212 06:18:16.922911 12086 net.cpp:226] L3_b7_cbr1_conv needs backward computation.\nI1212 06:18:16.922921 12086 net.cpp:226] L3_b6_sum_eltwise_top_L3_b6_relu_0_split needs backward computation.\nI1212 06:18:16.922932 12086 net.cpp:226] L3_b6_relu needs backward computation.\nI1212 06:18:16.922942 12086 net.cpp:226] L3_b6_sum_eltwise needs backward computation.\nI1212 06:18:16.922953 12086 net.cpp:226] L3_b6_cbr2_scale needs backward computation.\nI1212 06:18:16.922963 12086 net.cpp:226] L3_b6_cbr2_bn needs backward computation.\nI1212 06:18:16.922973 12086 net.cpp:226] L3_b6_cbr2_conv needs backward computation.\nI1212 06:18:16.922994 12086 net.cpp:226] L3_b6_cbr1_relu needs backward computation.\nI1212 06:18:16.923005 12086 net.cpp:226] L3_b6_cbr1_scale needs backward computation.\nI1212 06:18:16.923015 12086 net.cpp:226] L3_b6_cbr1_bn needs backward computation.\nI1212 06:18:16.923027 12086 net.cpp:226] L3_b6_cbr1_conv needs backward computation.\nI1212 06:18:16.923038 12086 net.cpp:226] L3_b5_sum_eltwise_top_L3_b5_relu_0_split needs backward computation.\nI1212 06:18:16.923048 12086 net.cpp:226] L3_b5_relu needs backward computation.\nI1212 06:18:16.923058 12086 net.cpp:226] L3_b5_sum_eltwise needs backward computation.\nI1212 06:18:16.923077 12086 net.cpp:226] L3_b5_cbr2_scale needs backward computation.\nI1212 06:18:16.923090 12086 net.cpp:226] L3_b5_cbr2_bn needs backward computation.\nI1212 06:18:16.923101 12086 net.cpp:226] L3_b5_cbr2_conv needs backward computation.\nI1212 06:18:16.923112 12086 net.cpp:226] L3_b5_cbr1_relu needs backward computation.\nI1212 06:18:16.923122 12086 net.cpp:226] L3_b5_cbr1_scale needs backward computation.\nI1212 06:18:16.923133 12086 net.cpp:226] L3_b5_cbr1_bn needs backward computation.\nI1212 06:18:16.923144 12086 net.cpp:226] L3_b5_cbr1_conv needs backward computation.\nI1212 06:18:16.923156 12086 net.cpp:226] L3_b4_sum_eltwise_top_L3_b4_relu_0_split needs backward computation.\nI1212 06:18:16.923166 12086 net.cpp:226] L3_b4_relu needs backward computation.\nI1212 06:18:16.923177 12086 net.cpp:226] L3_b4_sum_eltwise needs backward computation.\nI1212 06:18:16.923188 12086 net.cpp:226] L3_b4_cbr2_scale needs backward computation.\nI1212 06:18:16.923205 12086 net.cpp:226] L3_b4_cbr2_bn needs backward computation.\nI1212 06:18:16.923216 12086 net.cpp:226] L3_b4_cbr2_conv needs backward computation.\nI1212 06:18:16.923226 12086 net.cpp:226] L3_b4_cbr1_relu needs backward computation.\nI1212 06:18:16.923238 12086 net.cpp:226] L3_b4_cbr1_scale needs backward computation.\nI1212 06:18:16.923247 12086 net.cpp:226] L3_b4_cbr1_bn needs backward computation.\nI1212 06:18:16.923257 12086 net.cpp:226] L3_b4_cbr1_conv needs backward computation.\nI1212 06:18:16.923269 12086 net.cpp:226] L3_b3_sum_eltwise_top_L3_b3_relu_0_split needs backward computation.\nI1212 06:18:16.923280 12086 net.cpp:226] L3_b3_relu needs backward computation.\nI1212 06:18:16.923290 12086 net.cpp:226] L3_b3_sum_eltwise needs backward computation.\nI1212 06:18:16.923301 12086 net.cpp:226] L3_b3_cbr2_scale needs backward computation.\nI1212 06:18:16.923312 12086 net.cpp:226] L3_b3_cbr2_bn needs backward computation.\nI1212 06:18:16.923323 12086 net.cpp:226] L3_b3_cbr2_conv needs backward computation.\nI1212 06:18:16.923333 12086 net.cpp:226] L3_b3_cbr1_relu needs backward computation.\nI1212 06:18:16.923344 12086 net.cpp:226] L3_b3_cbr1_scale needs backward computation.\nI1212 06:18:16.923354 12086 net.cpp:226] L3_b3_cbr1_bn needs backward computation.\nI1212 06:18:16.923367 12086 net.cpp:226] L3_b3_cbr1_conv needs backward computation.\nI1212 06:18:16.923377 12086 net.cpp:226] L3_b2_sum_eltwise_top_L3_b2_relu_0_split needs backward computation.\nI1212 06:18:16.923388 12086 net.cpp:226] L3_b2_relu needs backward computation.\nI1212 06:18:16.923398 12086 net.cpp:226] L3_b2_sum_eltwise needs backward computation.\nI1212 06:18:16.923410 12086 net.cpp:226] L3_b2_cbr2_scale needs backward computation.\nI1212 06:18:16.923419 12086 net.cpp:226] L3_b2_cbr2_bn needs backward computation.\nI1212 06:18:16.923431 12086 net.cpp:226] L3_b2_cbr2_conv needs backward computation.\nI1212 06:18:16.923442 12086 net.cpp:226] L3_b2_cbr1_relu needs backward computation.\nI1212 06:18:16.923452 12086 net.cpp:226] L3_b2_cbr1_scale needs backward computation.\nI1212 06:18:16.923462 12086 net.cpp:226] L3_b2_cbr1_bn needs backward computation.\nI1212 06:18:16.923473 12086 net.cpp:226] L3_b2_cbr1_conv needs backward computation.\nI1212 06:18:16.923485 12086 net.cpp:226] L3_b1_concat0_L3_b1_concat0_0_split needs backward computation.\nI1212 06:18:16.923496 12086 net.cpp:226] L3_b1_concat0 needs backward computation.\nI1212 06:18:16.923507 12086 net.cpp:228] L3_b1_zeros does not need backward computation.\nI1212 06:18:16.923532 12086 net.cpp:226] L3_b1_relu needs backward computation.\nI1212 06:18:16.923543 12086 net.cpp:226] L3_b1_sum_eltwise needs backward computation.\nI1212 06:18:16.923555 12086 net.cpp:226] L3_b1_pool needs backward computation.\nI1212 06:18:16.923566 12086 net.cpp:226] L3_b1_cbr2_scale needs backward computation.\nI1212 06:18:16.923578 12086 net.cpp:226] L3_b1_cbr2_bn needs backward computation.\nI1212 06:18:16.923589 12086 net.cpp:226] L3_b1_cbr2_conv needs backward computation.\nI1212 06:18:16.923599 12086 net.cpp:226] L3_b1_cbr1_relu needs backward computation.\nI1212 06:18:16.923610 12086 net.cpp:226] L3_b1_cbr1_scale needs backward computation.\nI1212 06:18:16.923619 12086 net.cpp:226] L3_b1_cbr1_bn needs backward computation.\nI1212 06:18:16.923631 12086 net.cpp:226] L3_b1_cbr1_conv needs backward computation.\nI1212 06:18:16.923642 12086 net.cpp:226] L2_b9_sum_eltwise_top_L2_b9_relu_0_split needs backward computation.\nI1212 06:18:16.923653 12086 net.cpp:226] L2_b9_relu needs backward computation.\nI1212 06:18:16.923663 12086 net.cpp:226] L2_b9_sum_eltwise needs backward computation.\nI1212 06:18:16.923676 12086 net.cpp:226] L2_b9_cbr2_scale needs backward computation.\nI1212 06:18:16.923686 12086 net.cpp:226] L2_b9_cbr2_bn needs backward computation.\nI1212 06:18:16.923696 12086 net.cpp:226] L2_b9_cbr2_conv needs backward computation.\nI1212 06:18:16.923707 12086 net.cpp:226] L2_b9_cbr1_relu needs backward computation.\nI1212 06:18:16.923718 12086 net.cpp:226] L2_b9_cbr1_scale needs backward computation.\nI1212 06:18:16.923729 12086 net.cpp:226] L2_b9_cbr1_bn needs backward computation.\nI1212 06:18:16.923740 12086 net.cpp:226] L2_b9_cbr1_conv needs backward computation.\nI1212 06:18:16.923751 12086 net.cpp:226] L2_b8_sum_eltwise_top_L2_b8_relu_0_split needs backward computation.\nI1212 06:18:16.923763 12086 net.cpp:226] L2_b8_relu needs backward computation.\nI1212 06:18:16.923774 12086 net.cpp:226] L2_b8_sum_eltwise needs backward computation.\nI1212 06:18:16.923785 12086 net.cpp:226] L2_b8_cbr2_scale needs backward computation.\nI1212 06:18:16.923795 12086 net.cpp:226] L2_b8_cbr2_bn needs backward computation.\nI1212 06:18:16.923807 12086 net.cpp:226] L2_b8_cbr2_conv needs backward computation.\nI1212 06:18:16.923818 12086 net.cpp:226] L2_b8_cbr1_relu needs backward computation.\nI1212 06:18:16.923830 12086 net.cpp:226] L2_b8_cbr1_scale needs backward computation.\nI1212 06:18:16.923840 12086 net.cpp:226] L2_b8_cbr1_bn needs backward computation.\nI1212 06:18:16.923851 12086 net.cpp:226] L2_b8_cbr1_conv needs backward computation.\nI1212 06:18:16.923861 12086 net.cpp:226] L2_b7_sum_eltwise_top_L2_b7_relu_0_split needs backward computation.\nI1212 06:18:16.923872 12086 net.cpp:226] L2_b7_relu needs backward computation.\nI1212 06:18:16.923882 12086 net.cpp:226] L2_b7_sum_eltwise needs backward computation.\nI1212 06:18:16.923894 12086 net.cpp:226] L2_b7_cbr2_scale needs backward computation.\nI1212 06:18:16.923905 12086 net.cpp:226] L2_b7_cbr2_bn needs backward computation.\nI1212 06:18:16.923916 12086 net.cpp:226] L2_b7_cbr2_conv needs backward computation.\nI1212 06:18:16.923936 12086 net.cpp:226] L2_b7_cbr1_relu needs backward computation.\nI1212 06:18:16.923949 12086 net.cpp:226] L2_b7_cbr1_scale needs backward computation.\nI1212 06:18:16.923959 12086 net.cpp:226] L2_b7_cbr1_bn needs backward computation.\nI1212 06:18:16.923970 12086 net.cpp:226] L2_b7_cbr1_conv needs backward computation.\nI1212 06:18:16.923982 12086 net.cpp:226] L2_b6_sum_eltwise_top_L2_b6_relu_0_split needs backward computation.\nI1212 06:18:16.923993 12086 net.cpp:226] L2_b6_relu needs backward computation.\nI1212 06:18:16.924005 12086 net.cpp:226] L2_b6_sum_eltwise needs backward computation.\nI1212 06:18:16.924015 12086 net.cpp:226] L2_b6_cbr2_scale needs backward computation.\nI1212 06:18:16.924026 12086 net.cpp:226] L2_b6_cbr2_bn needs backward computation.\nI1212 06:18:16.924037 12086 net.cpp:226] L2_b6_cbr2_conv needs backward computation.\nI1212 06:18:16.924048 12086 net.cpp:226] L2_b6_cbr1_relu needs backward computation.\nI1212 06:18:16.924059 12086 net.cpp:226] L2_b6_cbr1_scale needs backward computation.\nI1212 06:18:16.924088 12086 net.cpp:226] L2_b6_cbr1_bn needs backward computation.\nI1212 06:18:16.924100 12086 net.cpp:226] L2_b6_cbr1_conv needs backward computation.\nI1212 06:18:16.924111 12086 net.cpp:226] L2_b5_sum_eltwise_top_L2_b5_relu_0_split needs backward computation.\nI1212 06:18:16.924124 12086 net.cpp:226] L2_b5_relu needs backward computation.\nI1212 06:18:16.924134 12086 net.cpp:226] L2_b5_sum_eltwise needs backward computation.\nI1212 06:18:16.924144 12086 net.cpp:226] L2_b5_cbr2_scale needs backward computation.\nI1212 06:18:16.924154 12086 net.cpp:226] L2_b5_cbr2_bn needs backward computation.\nI1212 06:18:16.924166 12086 net.cpp:226] L2_b5_cbr2_conv needs backward computation.\nI1212 06:18:16.924176 12086 net.cpp:226] L2_b5_cbr1_relu needs backward computation.\nI1212 06:18:16.924186 12086 net.cpp:226] L2_b5_cbr1_scale needs backward computation.\nI1212 06:18:16.924196 12086 net.cpp:226] L2_b5_cbr1_bn needs backward computation.\nI1212 06:18:16.924207 12086 net.cpp:226] L2_b5_cbr1_conv needs backward computation.\nI1212 06:18:16.924219 12086 net.cpp:226] L2_b4_sum_eltwise_top_L2_b4_relu_0_split needs backward computation.\nI1212 06:18:16.924230 12086 net.cpp:226] L2_b4_relu needs backward computation.\nI1212 06:18:16.924242 12086 net.cpp:226] L2_b4_sum_eltwise needs backward computation.\nI1212 06:18:16.924253 12086 net.cpp:226] L2_b4_cbr2_scale needs backward computation.\nI1212 06:18:16.924264 12086 net.cpp:226] L2_b4_cbr2_bn needs backward computation.\nI1212 06:18:16.924274 12086 net.cpp:226] L2_b4_cbr2_conv needs backward computation.\nI1212 06:18:16.924285 12086 net.cpp:226] L2_b4_cbr1_relu needs backward computation.\nI1212 06:18:16.924298 12086 net.cpp:226] L2_b4_cbr1_scale needs backward computation.\nI1212 06:18:16.924307 12086 net.cpp:226] L2_b4_cbr1_bn needs backward computation.\nI1212 06:18:16.924319 12086 net.cpp:226] L2_b4_cbr1_conv needs backward computation.\nI1212 06:18:16.924331 12086 net.cpp:226] L2_b3_sum_eltwise_top_L2_b3_relu_0_split needs backward computation.\nI1212 06:18:16.924343 12086 net.cpp:226] L2_b3_relu needs backward computation.\nI1212 06:18:16.924353 12086 net.cpp:226] L2_b3_sum_eltwise needs backward computation.\nI1212 06:18:16.924365 12086 net.cpp:226] L2_b3_cbr2_scale needs backward computation.\nI1212 06:18:16.924377 12086 net.cpp:226] L2_b3_cbr2_bn needs backward computation.\nI1212 06:18:16.924388 12086 net.cpp:226] L2_b3_cbr2_conv needs backward computation.\nI1212 06:18:16.924399 12086 net.cpp:226] L2_b3_cbr1_relu needs backward computation.\nI1212 06:18:16.924410 12086 net.cpp:226] L2_b3_cbr1_scale needs backward computation.\nI1212 06:18:16.924422 12086 net.cpp:226] L2_b3_cbr1_bn needs backward computation.\nI1212 06:18:16.924433 12086 net.cpp:226] L2_b3_cbr1_conv needs backward computation.\nI1212 06:18:16.924444 12086 net.cpp:226] L2_b2_sum_eltwise_top_L2_b2_relu_0_split needs backward computation.\nI1212 06:18:16.924456 12086 net.cpp:226] L2_b2_relu needs backward computation.\nI1212 06:18:16.924468 12086 net.cpp:226] L2_b2_sum_eltwise needs backward computation.\nI1212 06:18:16.924479 12086 net.cpp:226] L2_b2_cbr2_scale needs backward computation.\nI1212 06:18:16.924489 12086 net.cpp:226] L2_b2_cbr2_bn needs backward computation.\nI1212 06:18:16.924501 12086 net.cpp:226] L2_b2_cbr2_conv needs backward computation.\nI1212 06:18:16.924513 12086 net.cpp:226] L2_b2_cbr1_relu needs backward computation.\nI1212 06:18:16.924523 12086 net.cpp:226] L2_b2_cbr1_scale needs backward computation.\nI1212 06:18:16.924535 12086 net.cpp:226] L2_b2_cbr1_bn needs backward computation.\nI1212 06:18:16.924546 12086 net.cpp:226] L2_b2_cbr1_conv needs backward computation.\nI1212 06:18:16.924558 12086 net.cpp:226] L2_b1_concat0_L2_b1_concat0_0_split needs backward computation.\nI1212 06:18:16.924569 12086 net.cpp:226] L2_b1_concat0 needs backward computation.\nI1212 06:18:16.924582 12086 net.cpp:228] L2_b1_zeros does not need backward computation.\nI1212 06:18:16.924593 12086 net.cpp:226] L2_b1_relu needs backward computation.\nI1212 06:18:16.924603 12086 net.cpp:226] L2_b1_sum_eltwise needs backward computation.\nI1212 06:18:16.924623 12086 net.cpp:226] L2_b1_pool needs backward computation.\nI1212 06:18:16.924635 12086 net.cpp:226] L2_b1_cbr2_scale needs backward computation.\nI1212 06:18:16.924646 12086 net.cpp:226] L2_b1_cbr2_bn needs backward computation.\nI1212 06:18:16.924659 12086 net.cpp:226] L2_b1_cbr2_conv needs backward computation.\nI1212 06:18:16.924669 12086 net.cpp:226] L2_b1_cbr1_relu needs backward computation.\nI1212 06:18:16.924680 12086 net.cpp:226] L2_b1_cbr1_scale needs backward computation.\nI1212 06:18:16.924690 12086 net.cpp:226] L2_b1_cbr1_bn needs backward computation.\nI1212 06:18:16.924701 12086 net.cpp:226] L2_b1_cbr1_conv needs backward computation.\nI1212 06:18:16.924713 12086 net.cpp:226] L1_b9_sum_eltwise_top_L1_b9_relu_0_split needs backward computation.\nI1212 06:18:16.924724 12086 net.cpp:226] L1_b9_relu needs backward computation.\nI1212 06:18:16.924734 12086 net.cpp:226] L1_b9_sum_eltwise needs backward computation.\nI1212 06:18:16.924747 12086 net.cpp:226] L1_b9_cbr2_scale needs backward computation.\nI1212 06:18:16.924757 12086 net.cpp:226] L1_b9_cbr2_bn needs backward computation.\nI1212 06:18:16.924768 12086 net.cpp:226] L1_b9_cbr2_conv needs backward computation.\nI1212 06:18:16.924782 12086 net.cpp:226] L1_b9_cbr1_relu needs backward computation.\nI1212 06:18:16.924793 12086 net.cpp:226] L1_b9_cbr1_scale needs backward computation.\nI1212 06:18:16.924803 12086 net.cpp:226] L1_b9_cbr1_bn needs backward computation.\nI1212 06:18:16.924815 12086 net.cpp:226] L1_b9_cbr1_conv needs backward computation.\nI1212 06:18:16.924827 12086 net.cpp:226] L1_b8_sum_eltwise_top_L1_b8_relu_0_split needs backward computation.\nI1212 06:18:16.924839 12086 net.cpp:226] L1_b8_relu needs backward computation.\nI1212 06:18:16.924850 12086 net.cpp:226] L1_b8_sum_eltwise needs backward computation.\nI1212 06:18:16.924862 12086 net.cpp:226] L1_b8_cbr2_scale needs backward computation.\nI1212 06:18:16.924875 12086 net.cpp:226] L1_b8_cbr2_bn needs backward computation.\nI1212 06:18:16.924886 12086 net.cpp:226] L1_b8_cbr2_conv needs backward computation.\nI1212 06:18:16.924897 12086 net.cpp:226] L1_b8_cbr1_relu needs backward computation.\nI1212 06:18:16.924908 12086 net.cpp:226] L1_b8_cbr1_scale needs backward computation.\nI1212 06:18:16.924919 12086 net.cpp:226] L1_b8_cbr1_bn needs backward computation.\nI1212 06:18:16.924932 12086 net.cpp:226] L1_b8_cbr1_conv needs backward computation.\nI1212 06:18:16.924942 12086 net.cpp:226] L1_b7_sum_eltwise_top_L1_b7_relu_0_split needs backward computation.\nI1212 06:18:16.924954 12086 net.cpp:226] L1_b7_relu needs backward computation.\nI1212 06:18:16.924965 12086 net.cpp:226] L1_b7_sum_eltwise needs backward computation.\nI1212 06:18:16.924978 12086 net.cpp:226] L1_b7_cbr2_scale needs backward computation.\nI1212 06:18:16.924989 12086 net.cpp:226] L1_b7_cbr2_bn needs backward computation.\nI1212 06:18:16.925000 12086 net.cpp:226] L1_b7_cbr2_conv needs backward computation.\nI1212 06:18:16.925011 12086 net.cpp:226] L1_b7_cbr1_relu needs backward computation.\nI1212 06:18:16.925022 12086 net.cpp:226] L1_b7_cbr1_scale needs backward computation.\nI1212 06:18:16.925034 12086 net.cpp:226] L1_b7_cbr1_bn needs backward computation.\nI1212 06:18:16.925045 12086 net.cpp:226] L1_b7_cbr1_conv needs backward computation.\nI1212 06:18:16.925057 12086 net.cpp:226] L1_b6_sum_eltwise_top_L1_b6_relu_0_split needs backward computation.\nI1212 06:18:16.925068 12086 net.cpp:226] L1_b6_relu needs backward computation.\nI1212 06:18:16.925087 12086 net.cpp:226] L1_b6_sum_eltwise needs backward computation.\nI1212 06:18:16.925101 12086 net.cpp:226] L1_b6_cbr2_scale needs backward computation.\nI1212 06:18:16.925112 12086 net.cpp:226] L1_b6_cbr2_bn needs backward computation.\nI1212 06:18:16.925123 12086 net.cpp:226] L1_b6_cbr2_conv needs backward computation.\nI1212 06:18:16.925135 12086 net.cpp:226] L1_b6_cbr1_relu needs backward computation.\nI1212 06:18:16.925145 12086 net.cpp:226] L1_b6_cbr1_scale needs backward computation.\nI1212 06:18:16.925156 12086 net.cpp:226] L1_b6_cbr1_bn needs backward computation.\nI1212 06:18:16.925176 12086 net.cpp:226] L1_b6_cbr1_conv needs backward computation.\nI1212 06:18:16.925189 12086 net.cpp:226] L1_b5_sum_eltwise_top_L1_b5_relu_0_split needs backward computation.\nI1212 06:18:16.925199 12086 net.cpp:226] L1_b5_relu needs backward computation.\nI1212 06:18:16.925210 12086 net.cpp:226] L1_b5_sum_eltwise needs backward computation.\nI1212 06:18:16.925222 12086 net.cpp:226] L1_b5_cbr2_scale needs backward computation.\nI1212 06:18:16.925233 12086 net.cpp:226] L1_b5_cbr2_bn needs backward computation.\nI1212 06:18:16.925245 12086 net.cpp:226] L1_b5_cbr2_conv needs backward computation.\nI1212 06:18:16.925256 12086 net.cpp:226] L1_b5_cbr1_relu needs backward computation.\nI1212 06:18:16.925266 12086 net.cpp:226] L1_b5_cbr1_scale needs backward computation.\nI1212 06:18:16.925277 12086 net.cpp:226] L1_b5_cbr1_bn needs backward computation.\nI1212 06:18:16.925288 12086 net.cpp:226] L1_b5_cbr1_conv needs backward computation.\nI1212 06:18:16.925300 12086 net.cpp:226] L1_b4_sum_eltwise_top_L1_b4_relu_0_split needs backward computation.\nI1212 06:18:16.925312 12086 net.cpp:226] L1_b4_relu needs backward computation.\nI1212 06:18:16.925323 12086 net.cpp:226] L1_b4_sum_eltwise needs backward computation.\nI1212 06:18:16.925339 12086 net.cpp:226] L1_b4_cbr2_scale needs backward computation.\nI1212 06:18:16.925351 12086 net.cpp:226] L1_b4_cbr2_bn needs backward computation.\nI1212 06:18:16.925364 12086 net.cpp:226] L1_b4_cbr2_conv needs backward computation.\nI1212 06:18:16.925375 12086 net.cpp:226] L1_b4_cbr1_relu needs backward computation.\nI1212 06:18:16.925386 12086 net.cpp:226] L1_b4_cbr1_scale needs backward computation.\nI1212 06:18:16.925396 12086 net.cpp:226] L1_b4_cbr1_bn needs backward computation.\nI1212 06:18:16.925410 12086 net.cpp:226] L1_b4_cbr1_conv needs backward computation.\nI1212 06:18:16.925420 12086 net.cpp:226] L1_b3_sum_eltwise_top_L1_b3_relu_0_split needs backward computation.\nI1212 06:18:16.925431 12086 net.cpp:226] L1_b3_relu needs backward computation.\nI1212 06:18:16.925442 12086 net.cpp:226] L1_b3_sum_eltwise needs backward computation.\nI1212 06:18:16.925454 12086 net.cpp:226] L1_b3_cbr2_scale needs backward computation.\nI1212 06:18:16.925467 12086 net.cpp:226] L1_b3_cbr2_bn needs backward computation.\nI1212 06:18:16.925478 12086 net.cpp:226] L1_b3_cbr2_conv needs backward computation.\nI1212 06:18:16.925489 12086 net.cpp:226] L1_b3_cbr1_relu needs backward computation.\nI1212 06:18:16.925500 12086 net.cpp:226] L1_b3_cbr1_scale needs backward computation.\nI1212 06:18:16.925511 12086 net.cpp:226] L1_b3_cbr1_bn needs backward computation.\nI1212 06:18:16.925523 12086 net.cpp:226] L1_b3_cbr1_conv needs backward computation.\nI1212 06:18:16.925534 12086 net.cpp:226] L1_b2_sum_eltwise_top_L1_b2_relu_0_split needs backward computation.\nI1212 06:18:16.925546 12086 net.cpp:226] L1_b2_relu needs backward computation.\nI1212 06:18:16.925557 12086 net.cpp:226] L1_b2_sum_eltwise needs backward computation.\nI1212 06:18:16.925570 12086 net.cpp:226] L1_b2_cbr2_scale needs backward computation.\nI1212 06:18:16.925581 12086 net.cpp:226] L1_b2_cbr2_bn needs backward computation.\nI1212 06:18:16.925593 12086 net.cpp:226] L1_b2_cbr2_conv needs backward computation.\nI1212 06:18:16.925604 12086 net.cpp:226] L1_b2_cbr1_relu needs backward computation.\nI1212 06:18:16.925616 12086 net.cpp:226] L1_b2_cbr1_scale needs backward computation.\nI1212 06:18:16.925627 12086 net.cpp:226] L1_b2_cbr1_bn needs backward computation.\nI1212 06:18:16.925638 12086 net.cpp:226] L1_b2_cbr1_conv needs backward computation.\nI1212 06:18:16.925650 12086 net.cpp:226] L1_b1_sum_eltwise_top_L1_b1_relu_0_split needs backward computation.\nI1212 06:18:16.925663 12086 net.cpp:226] L1_b1_relu needs backward computation.\nI1212 06:18:16.925675 12086 net.cpp:226] L1_b1_sum_eltwise needs backward computation.\nI1212 06:18:16.925688 12086 net.cpp:226] L1_b1_cbr2_scale needs backward computation.\nI1212 06:18:16.925700 12086 net.cpp:226] L1_b1_cbr2_bn needs backward computation.\nI1212 06:18:16.925712 12086 net.cpp:226] L1_b1_cbr2_conv needs backward computation.\nI1212 06:18:16.925736 12086 net.cpp:226] L1_b1_cbr1_relu needs backward computation.\nI1212 06:18:16.925748 12086 net.cpp:226] L1_b1_cbr1_scale needs backward computation.\nI1212 06:18:16.925760 12086 net.cpp:226] L1_b1_cbr1_bn needs backward computation.\nI1212 06:18:16.925773 12086 net.cpp:226] L1_b1_cbr1_conv needs backward computation.\nI1212 06:18:16.925784 12086 net.cpp:226] pre_bn_top_pre_relu_0_split needs backward computation.\nI1212 06:18:16.925796 12086 net.cpp:226] pre_relu needs backward computation.\nI1212 06:18:16.925807 12086 net.cpp:226] pre_scale needs backward computation.\nI1212 06:18:16.925818 12086 net.cpp:226] pre_bn needs backward computation.\nI1212 06:18:16.925829 12086 net.cpp:226] pre_conv needs backward computation.\nI1212 06:18:16.925843 12086 net.cpp:228] label_dataLayer_1_split does not need backward computation.\nI1212 06:18:16.925855 12086 net.cpp:228] dataLayer does not need backward computation.\nI1212 06:18:16.925865 12086 net.cpp:270] This network produces output accuracy\nI1212 06:18:16.925878 12086 net.cpp:270] This network produces output loss\nI1212 06:18:16.926242 12086 net.cpp:283] Network initialization done.\nI1212 06:18:16.927297 12086 solver.cpp:60] Solver scaffolding done.\nI1212 06:18:17.150493 12086 parallel.cpp:392] GPUs pairs 0:1, 2:3, 4:5, 6:7, 0:2, 4:6, 0:4\nI1212 06:18:17.515503 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:17.515574 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:17.522189 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:17.758146 12086 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:18:17.758255 12086 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:18:17.793050 12086 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:18:17.793159 12086 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:18:18.242305 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:18.242375 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:18.250191 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:18.508054 12086 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:18:18.508196 12086 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:18:18.561152 12086 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:18:18.561288 12086 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:18:19.075261 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:19.075325 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:19.084044 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:19.360918 12086 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:18:19.361083 12086 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:18:19.433032 12086 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:18:19.433192 12086 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:18:19.517674 12086 parallel.cpp:234] GPU 4 does not have p2p access to GPU 0\nI1212 06:18:20.010166 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:20.010254 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:20.019668 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:20.309943 12086 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:18:20.310161 12086 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:18:20.403653 12086 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:18:20.403847 12086 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:18:21.075208 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:21.075266 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:21.085547 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:21.402063 12086 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:18:21.402249 12086 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:18:21.515782 12086 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:18:21.515967 12086 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:18:22.233259 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:22.233314 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:22.244341 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:22.589931 12086 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:18:22.590144 12086 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:18:22.724262 12086 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:18:22.724469 12086 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:18:23.522184 12086 upgrade_proto.cpp:77] Attempting to upgrade batch norm layers using deprecated params: examples/sc/architectures/arch.prototxt\nI1212 06:18:23.522267 12086 upgrade_proto.cpp:80] Successfully upgraded batch norm layers using deprecated params.\nI1212 06:18:23.534487 12086 data_layer.cpp:41] output data size: 125,3,32,32\nI1212 06:18:24.012114 12086 net.cpp:93] Sharing layer L2_b1_zeros from root net\nI1212 06:18:24.012369 12086 net.cpp:143] Created top blob 0 (shape: 125 16 16 16 (512000)) for shared layer L2_b1_zeros\nI1212 06:18:24.165886 12086 net.cpp:93] Sharing layer L3_b1_zeros from root net\nI1212 06:18:24.166137 12086 net.cpp:143] Created top blob 0 (shape: 125 32 8 8 (256000)) for shared layer L3_b1_zeros\nI1212 06:18:24.340255 12086 parallel.cpp:425] Starting Optimization\nI1212 06:18:24.341667 12086 solver.cpp:279] Solving Cifar-Resnet\nI1212 06:18:24.341684 12086 solver.cpp:280] Learning Rate Policy: triangular\nI1212 06:18:24.346920 12086 solver.cpp:337] Iteration 0, Testing net (#0)\nI1212 06:19:46.723734 12086 solver.cpp:404]     Test net output #0: accuracy = 0.10052\nI1212 06:19:46.724098 12086 solver.cpp:404]     Test net output #1: loss = 87.3366 (* 1 = 87.3366 loss)\nI1212 06:19:50.788616 12086 solver.cpp:228] Iteration 0, loss = 4.44974\nI1212 06:19:50.788667 12086 solver.cpp:244]     Train net output #0: accuracy = 0.112\nI1212 06:19:50.788686 12086 solver.cpp:244]     Train net output #1: loss = 4.44974 (* 1 = 4.44974 loss)\nI1212 06:19:50.848440 12086 sgd_solver.cpp:174] Iteration 0, lr = 0\nI1212 06:19:50.862339 12086 sgd_solver.cpp:149] Gradient: L2 norm 13.9673\nI1212 06:22:09.208024 12086 solver.cpp:337] Iteration 100, Testing net (#0)\nI1212 06:23:30.958976 12086 solver.cpp:404]     Test net output #0: accuracy = 0.39756\nI1212 06:23:30.959290 12086 solver.cpp:404]     Test net output #1: loss = 1.86156 (* 1 = 1.86156 loss)\nI1212 06:23:32.273497 12086 solver.cpp:228] Iteration 100, loss = 1.22139\nI1212 06:23:32.273558 12086 solver.cpp:244]     Train net output #0: accuracy = 0.536\nI1212 06:23:32.273578 12086 solver.cpp:244]     Train net output #1: loss = 1.22139 (* 1 = 1.22139 loss)\nI1212 06:23:32.371093 12086 sgd_solver.cpp:174] Iteration 100, lr = 0.0599999\nI1212 06:23:32.384757 12086 sgd_solver.cpp:149] Gradient: L2 norm 3.25439\nI1212 06:25:50.248447 12086 solver.cpp:337] Iteration 200, Testing net (#0)\nI1212 06:27:12.601969 12086 solver.cpp:404]     Test net output #0: accuracy = 0.62444\nI1212 06:27:12.602293 12086 solver.cpp:404]     Test net output #1: loss = 1.1054 (* 1 = 1.1054 loss)\nI1212 06:27:13.927209 12086 solver.cpp:228] Iteration 200, loss = 0.895431\nI1212 06:27:13.927258 12086 solver.cpp:244]     Train net output #0: accuracy = 0.672\nI1212 06:27:13.927274 12086 solver.cpp:244]     Train net output #1: loss = 0.895431 (* 1 = 0.895431 loss)\nI1212 06:27:14.021116 12086 sgd_solver.cpp:174] Iteration 200, lr = 0.12\nI1212 06:27:14.034847 12086 sgd_solver.cpp:149] Gradient: L2 norm 1.48877\nI1212 06:29:32.079072 12086 solver.cpp:337] Iteration 300, Testing net (#0)\nI1212 06:30:54.448199 12086 solver.cpp:404]     Test net output #0: accuracy = 0.41444\nI1212 06:30:54.448515 12086 solver.cpp:404]     Test net output #1: loss = 2.62137 (* 1 = 2.62137 loss)\nI1212 06:30:55.774435 12086 solver.cpp:228] Iteration 300, loss = 0.734006\nI1212 06:30:55.774482 12086 solver.cpp:244]     Train net output #0: accuracy = 0.712\nI1212 06:30:55.774497 12086 solver.cpp:244]     Train net output #1: loss = 0.734006 (* 1 = 0.734006 loss)\nI1212 06:30:55.863797 12086 sgd_solver.cpp:174] Iteration 300, lr = 0.18\nI1212 06:30:55.877470 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.95315\nI1212 06:33:13.878793 12086 solver.cpp:337] Iteration 400, Testing net (#0)\nI1212 06:34:36.240533 12086 solver.cpp:404]     Test net output #0: accuracy = 0.6146\nI1212 06:34:36.240831 12086 solver.cpp:404]     Test net output #1: loss = 1.65433 (* 1 = 1.65433 loss)\nI1212 06:34:37.566869 12086 solver.cpp:228] Iteration 400, loss = 0.515684\nI1212 06:34:37.566912 12086 solver.cpp:244]     Train net output #0: accuracy = 0.816\nI1212 06:34:37.566929 12086 solver.cpp:244]     Train net output #1: loss = 0.515684 (* 1 = 0.515684 loss)\nI1212 06:34:37.654718 12086 sgd_solver.cpp:174] Iteration 400, lr = 0.24\nI1212 06:34:37.668552 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.77781\nI1212 06:36:55.575832 12086 solver.cpp:337] Iteration 500, Testing net (#0)\nI1212 06:38:17.928736 12086 solver.cpp:404]     Test net output #0: accuracy = 0.69636\nI1212 06:38:17.929030 12086 solver.cpp:404]     Test net output #1: loss = 1.00661 (* 1 = 1.00661 loss)\nI1212 06:38:19.254345 12086 solver.cpp:228] Iteration 500, loss = 0.469556\nI1212 06:38:19.254390 12086 solver.cpp:244]     Train net output #0: accuracy = 0.856\nI1212 06:38:19.254406 12086 solver.cpp:244]     Train net output #1: loss = 0.469556 (* 1 = 0.469556 loss)\nI1212 06:38:19.345477 12086 sgd_solver.cpp:174] Iteration 500, lr = 0.3\nI1212 06:38:19.359223 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.69106\nI1212 06:40:37.235118 12086 solver.cpp:337] Iteration 600, Testing net (#0)\nI1212 06:41:59.592450 12086 solver.cpp:404]     Test net output #0: accuracy = 0.71472\nI1212 06:41:59.592743 12086 solver.cpp:404]     Test net output #1: loss = 1.00721 (* 1 = 1.00721 loss)\nI1212 06:42:00.918655 12086 solver.cpp:228] Iteration 600, loss = 0.420277\nI1212 06:42:00.918700 12086 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 06:42:00.918717 12086 solver.cpp:244]     Train net output #1: loss = 0.420277 (* 1 = 0.420277 loss)\nI1212 06:42:01.008386 12086 sgd_solver.cpp:174] Iteration 600, lr = 0.36\nI1212 06:42:01.022157 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.639064\nI1212 06:44:18.982707 12086 solver.cpp:337] Iteration 700, Testing net (#0)\nI1212 06:45:41.347123 12086 solver.cpp:404]     Test net output #0: accuracy = 0.7224\nI1212 06:45:41.347430 12086 solver.cpp:404]     Test net output #1: loss = 0.944729 (* 1 = 0.944729 loss)\nI1212 06:45:42.673923 12086 solver.cpp:228] Iteration 700, loss = 0.372368\nI1212 06:45:42.673969 12086 solver.cpp:244]     Train net output #0: accuracy = 0.864\nI1212 06:45:42.673985 12086 solver.cpp:244]     Train net output #1: loss = 0.372368 (* 1 = 0.372368 loss)\nI1212 06:45:42.762485 12086 sgd_solver.cpp:174] Iteration 700, lr = 0.42\nI1212 06:45:42.776346 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.659277\nI1212 06:48:00.683991 12086 solver.cpp:337] Iteration 800, Testing net (#0)\nI1212 06:49:23.047364 12086 solver.cpp:404]     Test net output #0: accuracy = 0.7334\nI1212 06:49:23.047683 12086 solver.cpp:404]     Test net output #1: loss = 0.8639 (* 1 = 0.8639 loss)\nI1212 06:49:24.372218 12086 solver.cpp:228] Iteration 800, loss = 0.508303\nI1212 06:49:24.372263 12086 solver.cpp:244]     Train net output #0: accuracy = 0.848\nI1212 06:49:24.372280 12086 solver.cpp:244]     Train net output #1: loss = 0.508303 (* 1 = 0.508303 loss)\nI1212 06:49:24.467056 12086 sgd_solver.cpp:174] Iteration 800, lr = 0.48\nI1212 06:49:24.480876 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.710293\nI1212 06:51:42.394793 12086 solver.cpp:337] Iteration 900, Testing net (#0)\nI1212 06:53:04.760887 12086 solver.cpp:404]     Test net output #0: accuracy = 0.73468\nI1212 06:53:04.761209 12086 solver.cpp:404]     Test net output #1: loss = 0.875734 (* 1 = 0.875734 loss)\nI1212 06:53:06.086300 12086 solver.cpp:228] Iteration 900, loss = 0.244619\nI1212 06:53:06.086341 12086 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 06:53:06.086357 12086 solver.cpp:244]     Train net output #1: loss = 0.244619 (* 1 = 0.244619 loss)\nI1212 06:53:06.173557 12086 sgd_solver.cpp:174] Iteration 900, lr = 0.54\nI1212 06:53:06.187304 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.5047\nI1212 06:55:24.106143 12086 solver.cpp:337] Iteration 1000, Testing net (#0)\nI1212 06:56:46.479794 12086 solver.cpp:404]     Test net output #0: accuracy = 0.65788\nI1212 06:56:46.480093 12086 solver.cpp:404]     Test net output #1: loss = 1.48251 (* 1 = 1.48251 loss)\nI1212 06:56:47.806107 12086 solver.cpp:228] Iteration 1000, loss = 0.285714\nI1212 06:56:47.806149 12086 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 06:56:47.806164 12086 solver.cpp:244]     Train net output #1: loss = 0.285714 (* 1 = 0.285714 loss)\nI1212 06:56:47.895138 12086 sgd_solver.cpp:174] Iteration 1000, lr = 0.6\nI1212 06:56:47.909014 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.48306\nI1212 06:59:05.787178 12086 solver.cpp:337] Iteration 1100, Testing net (#0)\nI1212 07:00:28.138844 12086 solver.cpp:404]     Test net output #0: accuracy = 0.80088\nI1212 07:00:28.139154 12086 solver.cpp:404]     Test net output #1: loss = 0.613127 (* 1 = 0.613127 loss)\nI1212 07:00:29.465211 12086 solver.cpp:228] Iteration 1100, loss = 0.303628\nI1212 07:00:29.465260 12086 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 07:00:29.465276 12086 solver.cpp:244]     Train net output #1: loss = 0.303628 (* 1 = 0.303628 loss)\nI1212 07:00:29.552145 12086 sgd_solver.cpp:174] Iteration 1100, lr = 0.66\nI1212 07:00:29.565811 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.47656\nI1212 07:02:47.437839 12086 solver.cpp:337] Iteration 1200, Testing net (#0)\nI1212 07:04:09.786262 12086 solver.cpp:404]     Test net output #0: accuracy = 0.73408\nI1212 07:04:09.786561 12086 solver.cpp:404]     Test net output #1: loss = 0.845276 (* 1 = 0.845276 loss)\nI1212 07:04:11.112392 12086 solver.cpp:228] Iteration 1200, loss = 0.34633\nI1212 07:04:11.112434 12086 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 07:04:11.112450 12086 solver.cpp:244]     Train net output #1: loss = 0.34633 (* 1 = 0.34633 loss)\nI1212 07:04:11.197739 12086 sgd_solver.cpp:174] Iteration 1200, lr = 0.72\nI1212 07:04:11.211508 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.538936\nI1212 07:06:29.091243 12086 solver.cpp:337] Iteration 1300, Testing net (#0)\nI1212 07:07:51.453816 12086 solver.cpp:404]     Test net output #0: accuracy = 0.54448\nI1212 07:07:51.454114 12086 solver.cpp:404]     Test net output #1: loss = 2.50567 (* 1 = 2.50567 loss)\nI1212 07:07:52.780582 12086 solver.cpp:228] Iteration 1300, loss = 0.36176\nI1212 07:07:52.780623 12086 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1212 07:07:52.780639 12086 solver.cpp:244]     Train net output #1: loss = 0.36176 (* 1 = 0.36176 loss)\nI1212 07:07:52.871693 12086 sgd_solver.cpp:174] Iteration 1300, lr = 0.78\nI1212 07:07:52.885540 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.511483\nI1212 07:10:10.898406 12086 solver.cpp:337] Iteration 1400, Testing net (#0)\nI1212 07:11:33.264588 12086 solver.cpp:404]     Test net output #0: accuracy = 0.7118\nI1212 07:11:33.264909 12086 solver.cpp:404]     Test net output #1: loss = 1.03886 (* 1 = 1.03886 loss)\nI1212 07:11:34.590684 12086 solver.cpp:228] Iteration 1400, loss = 0.286464\nI1212 07:11:34.590726 12086 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 07:11:34.590742 12086 solver.cpp:244]     Train net output #1: loss = 0.286464 (* 1 = 0.286464 loss)\nI1212 07:11:34.679076 12086 sgd_solver.cpp:174] Iteration 1400, lr = 0.84\nI1212 07:11:34.692885 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.467256\nI1212 07:13:52.548043 12086 solver.cpp:337] Iteration 1500, Testing net (#0)\nI1212 07:15:14.941370 12086 solver.cpp:404]     Test net output #0: accuracy = 0.79192\nI1212 07:15:14.941671 12086 solver.cpp:404]     Test net output #1: loss = 0.719505 (* 1 = 0.719505 loss)\nI1212 07:15:16.266631 12086 solver.cpp:228] Iteration 1500, loss = 0.332901\nI1212 07:15:16.266671 12086 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 07:15:16.266687 12086 solver.cpp:244]     Train net output #1: loss = 0.332901 (* 1 = 0.332901 loss)\nI1212 07:15:16.363492 12086 sgd_solver.cpp:174] Iteration 1500, lr = 0.9\nI1212 07:15:16.377207 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.475668\nI1212 07:17:34.264160 12086 solver.cpp:337] Iteration 1600, Testing net (#0)\nI1212 07:18:56.657234 12086 solver.cpp:404]     Test net output #0: accuracy = 0.80244\nI1212 07:18:56.657546 12086 solver.cpp:404]     Test net output #1: loss = 0.647161 (* 1 = 0.647161 loss)\nI1212 07:18:57.983423 12086 solver.cpp:228] Iteration 1600, loss = 0.313771\nI1212 07:18:57.983467 12086 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 07:18:57.983484 12086 solver.cpp:244]     Train net output #1: loss = 0.313771 (* 1 = 0.313771 loss)\nI1212 07:18:58.071202 12086 sgd_solver.cpp:174] Iteration 1600, lr = 0.96\nI1212 07:18:58.085050 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.442462\nI1212 07:21:15.958662 12086 solver.cpp:337] Iteration 1700, Testing net (#0)\nI1212 07:22:38.081156 12086 solver.cpp:404]     Test net output #0: accuracy = 0.6672\nI1212 07:22:38.081413 12086 solver.cpp:404]     Test net output #1: loss = 1.27549 (* 1 = 1.27549 loss)\nI1212 07:22:39.406973 12086 solver.cpp:228] Iteration 1700, loss = 0.187708\nI1212 07:22:39.407018 12086 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 07:22:39.407035 12086 solver.cpp:244]     Train net output #1: loss = 0.187708 (* 1 = 0.187708 loss)\nI1212 07:22:39.493594 12086 sgd_solver.cpp:174] Iteration 1700, lr = 1.02\nI1212 07:22:39.507392 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.389661\nI1212 07:24:57.398576 12086 solver.cpp:337] Iteration 1800, Testing net (#0)\nI1212 07:26:19.312398 12086 solver.cpp:404]     Test net output #0: accuracy = 0.67928\nI1212 07:26:19.312619 12086 solver.cpp:404]     Test net output #1: loss = 1.56538 (* 1 = 1.56538 loss)\nI1212 07:26:20.638453 12086 solver.cpp:228] Iteration 1800, loss = 0.197712\nI1212 07:26:20.638496 12086 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 07:26:20.638514 12086 solver.cpp:244]     Train net output #1: loss = 0.197712 (* 1 = 0.197712 loss)\nI1212 07:26:20.726387 12086 sgd_solver.cpp:174] Iteration 1800, lr = 1.08\nI1212 07:26:20.740262 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.436475\nI1212 07:28:38.609580 12086 solver.cpp:337] Iteration 1900, Testing net (#0)\nI1212 07:30:00.978492 12086 solver.cpp:404]     Test net output #0: accuracy = 0.75292\nI1212 07:30:00.978739 12086 solver.cpp:404]     Test net output #1: loss = 0.809217 (* 1 = 0.809217 loss)\nI1212 07:30:02.303719 12086 solver.cpp:228] Iteration 1900, loss = 0.297315\nI1212 07:30:02.303759 12086 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 07:30:02.303774 12086 solver.cpp:244]     Train net output #1: loss = 0.297315 (* 1 = 0.297315 loss)\nI1212 07:30:02.389279 12086 sgd_solver.cpp:174] Iteration 1900, lr = 1.14\nI1212 07:30:02.403096 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.439537\nI1212 07:32:20.307687 12086 solver.cpp:337] Iteration 2000, Testing net (#0)\nI1212 07:33:42.690013 12086 solver.cpp:404]     Test net output #0: accuracy = 0.8156\nI1212 07:33:42.690299 12086 solver.cpp:404]     Test net output #1: loss = 0.576576 (* 1 = 0.576576 loss)\nI1212 07:33:44.016471 12086 solver.cpp:228] Iteration 2000, loss = 0.275288\nI1212 07:33:44.016515 12086 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 07:33:44.016531 12086 solver.cpp:244]     Train net output #1: loss = 0.275288 (* 1 = 0.275288 loss)\nI1212 07:33:44.105846 12086 sgd_solver.cpp:174] Iteration 2000, lr = 1.2\nI1212 07:33:44.119639 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.424998\nI1212 07:36:01.941318 12086 solver.cpp:337] Iteration 2100, Testing net (#0)\nI1212 07:37:24.305752 12086 solver.cpp:404]     Test net output #0: accuracy = 0.76228\nI1212 07:37:24.306015 12086 solver.cpp:404]     Test net output #1: loss = 0.801866 (* 1 = 0.801866 loss)\nI1212 07:37:25.630934 12086 solver.cpp:228] Iteration 2100, loss = 0.223044\nI1212 07:37:25.630980 12086 solver.cpp:244]     Train net output #0: accuracy = 0.952\nI1212 07:37:25.631006 12086 solver.cpp:244]     Train net output #1: loss = 0.223044 (* 1 = 0.223044 loss)\nI1212 07:37:25.718729 12086 sgd_solver.cpp:174] Iteration 2100, lr = 1.26\nI1212 07:37:25.732517 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.428656\nI1212 07:39:43.670564 12086 solver.cpp:337] Iteration 2200, Testing net (#0)\nI1212 07:41:06.044781 12086 solver.cpp:404]     Test net output #0: accuracy = 0.73852\nI1212 07:41:06.045073 12086 solver.cpp:404]     Test net output #1: loss = 1.02385 (* 1 = 1.02385 loss)\nI1212 07:41:07.370038 12086 solver.cpp:228] Iteration 2200, loss = 0.218315\nI1212 07:41:07.370085 12086 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 07:41:07.370108 12086 solver.cpp:244]     Train net output #1: loss = 0.218315 (* 1 = 0.218315 loss)\nI1212 07:41:07.460310 12086 sgd_solver.cpp:174] Iteration 2200, lr = 1.32\nI1212 07:41:07.474068 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.401957\nI1212 07:43:25.368348 12086 solver.cpp:337] Iteration 2300, Testing net (#0)\nI1212 07:44:47.731958 12086 solver.cpp:404]     Test net output #0: accuracy = 0.81116\nI1212 07:44:47.732174 12086 solver.cpp:404]     Test net output #1: loss = 0.66539 (* 1 = 0.66539 loss)\nI1212 07:44:49.057499 12086 solver.cpp:228] Iteration 2300, loss = 0.265726\nI1212 07:44:49.057545 12086 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 07:44:49.057569 12086 solver.cpp:244]     Train net output #1: loss = 0.265726 (* 1 = 0.265726 loss)\nI1212 07:44:49.153383 12086 sgd_solver.cpp:174] Iteration 2300, lr = 1.38\nI1212 07:44:49.167249 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.421038\nI1212 07:47:07.064743 12086 solver.cpp:337] Iteration 2400, Testing net (#0)\nI1212 07:48:29.401492 12086 solver.cpp:404]     Test net output #0: accuracy = 0.70168\nI1212 07:48:29.401717 12086 solver.cpp:404]     Test net output #1: loss = 1.08918 (* 1 = 1.08918 loss)\nI1212 07:48:30.727818 12086 solver.cpp:228] Iteration 2400, loss = 0.181723\nI1212 07:48:30.727866 12086 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 07:48:30.727890 12086 solver.cpp:244]     Train net output #1: loss = 0.181723 (* 1 = 0.181723 loss)\nI1212 07:48:30.818024 12086 sgd_solver.cpp:174] Iteration 2400, lr = 1.44\nI1212 07:48:30.831387 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.354315\nI1212 07:50:48.825752 12086 solver.cpp:337] Iteration 2500, Testing net (#0)\nI1212 07:52:11.157490 12086 solver.cpp:404]     Test net output #0: accuracy = 0.67056\nI1212 07:52:11.157773 12086 solver.cpp:404]     Test net output #1: loss = 1.3505 (* 1 = 1.3505 loss)\nI1212 07:52:12.483578 12086 solver.cpp:228] Iteration 2500, loss = 0.24332\nI1212 07:52:12.483625 12086 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 07:52:12.483649 12086 solver.cpp:244]     Train net output #1: loss = 0.24332 (* 1 = 0.24332 loss)\nI1212 07:52:12.574012 12086 sgd_solver.cpp:174] Iteration 2500, lr = 1.5\nI1212 07:52:12.587767 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.363368\nI1212 07:54:30.603847 12086 solver.cpp:337] Iteration 2600, Testing net (#0)\nI1212 07:55:52.900112 12086 solver.cpp:404]     Test net output #0: accuracy = 0.74628\nI1212 07:55:52.900360 12086 solver.cpp:404]     Test net output #1: loss = 0.934078 (* 1 = 0.934078 loss)\nI1212 07:55:54.226307 12086 solver.cpp:228] Iteration 2600, loss = 0.283014\nI1212 07:55:54.226354 12086 solver.cpp:244]     Train net output #0: accuracy = 0.904\nI1212 07:55:54.226377 12086 solver.cpp:244]     Train net output #1: loss = 0.283014 (* 1 = 0.283014 loss)\nI1212 07:55:54.318184 12086 sgd_solver.cpp:174] Iteration 2600, lr = 1.56\nI1212 07:55:54.332106 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.394823\nI1212 07:58:12.352514 12086 solver.cpp:337] Iteration 2700, Testing net (#0)\nI1212 07:59:34.680979 12086 solver.cpp:404]     Test net output #0: accuracy = 0.68564\nI1212 07:59:34.681231 12086 solver.cpp:404]     Test net output #1: loss = 1.36364 (* 1 = 1.36364 loss)\nI1212 07:59:36.006727 12086 solver.cpp:228] Iteration 2700, loss = 0.259548\nI1212 07:59:36.006775 12086 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 07:59:36.006798 12086 solver.cpp:244]     Train net output #1: loss = 0.259548 (* 1 = 0.259548 loss)\nI1212 07:59:36.098551 12086 sgd_solver.cpp:174] Iteration 2700, lr = 1.62\nI1212 07:59:36.112368 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.365531\nI1212 08:01:53.992085 12086 solver.cpp:337] Iteration 2800, Testing net (#0)\nI1212 08:03:16.318830 12086 solver.cpp:404]     Test net output #0: accuracy = 0.64048\nI1212 08:03:16.319104 12086 solver.cpp:404]     Test net output #1: loss = 1.51854 (* 1 = 1.51854 loss)\nI1212 08:03:17.644821 12086 solver.cpp:228] Iteration 2800, loss = 0.242091\nI1212 08:03:17.644870 12086 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 08:03:17.644893 12086 solver.cpp:244]     Train net output #1: loss = 0.242091 (* 1 = 0.242091 loss)\nI1212 08:03:17.732664 12086 sgd_solver.cpp:174] Iteration 2800, lr = 1.68\nI1212 08:03:17.746410 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.391436\nI1212 08:05:35.609814 12086 solver.cpp:337] Iteration 2900, Testing net (#0)\nI1212 08:06:57.941073 12086 solver.cpp:404]     Test net output #0: accuracy = 0.76056\nI1212 08:06:57.941381 12086 solver.cpp:404]     Test net output #1: loss = 0.744947 (* 1 = 0.744947 loss)\nI1212 08:06:59.266721 12086 solver.cpp:228] Iteration 2900, loss = 0.234113\nI1212 08:06:59.266768 12086 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 08:06:59.266791 12086 solver.cpp:244]     Train net output #1: loss = 0.234113 (* 1 = 0.234113 loss)\nI1212 08:06:59.353281 12086 sgd_solver.cpp:174] Iteration 2900, lr = 1.74\nI1212 08:06:59.367141 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.305568\nI1212 08:09:17.249392 12086 solver.cpp:337] Iteration 3000, Testing net (#0)\nI1212 08:10:39.588742 12086 solver.cpp:404]     Test net output #0: accuracy = 0.77964\nI1212 08:10:39.589046 12086 solver.cpp:404]     Test net output #1: loss = 0.827457 (* 1 = 0.827457 loss)\nI1212 08:10:40.915189 12086 solver.cpp:228] Iteration 3000, loss = 0.160687\nI1212 08:10:40.915241 12086 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 08:10:40.915266 12086 solver.cpp:244]     Train net output #1: loss = 0.160687 (* 1 = 0.160687 loss)\nI1212 08:10:41.008417 12086 sgd_solver.cpp:174] Iteration 3000, lr = 1.8\nI1212 08:10:41.022195 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.349661\nI1212 08:12:58.925199 12086 solver.cpp:337] Iteration 3100, Testing net (#0)\nI1212 08:14:21.261992 12086 solver.cpp:404]     Test net output #0: accuracy = 0.70608\nI1212 08:14:21.262298 12086 solver.cpp:404]     Test net output #1: loss = 1.09403 (* 1 = 1.09403 loss)\nI1212 08:14:22.587525 12086 solver.cpp:228] Iteration 3100, loss = 0.225848\nI1212 08:14:22.587572 12086 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 08:14:22.587594 12086 solver.cpp:244]     Train net output #1: loss = 0.225848 (* 1 = 0.225848 loss)\nI1212 08:14:22.678051 12086 sgd_solver.cpp:174] Iteration 3100, lr = 1.86\nI1212 08:14:22.691879 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.318964\nI1212 08:16:40.522330 12086 solver.cpp:337] Iteration 3200, Testing net (#0)\nI1212 08:18:02.850617 12086 solver.cpp:404]     Test net output #0: accuracy = 0.75168\nI1212 08:18:02.850934 12086 solver.cpp:404]     Test net output #1: loss = 1.01133 (* 1 = 1.01133 loss)\nI1212 08:18:04.176918 12086 solver.cpp:228] Iteration 3200, loss = 0.233478\nI1212 08:18:04.176966 12086 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 08:18:04.176990 12086 solver.cpp:244]     Train net output #1: loss = 0.233478 (* 1 = 0.233478 loss)\nI1212 08:18:04.265749 12086 sgd_solver.cpp:174] Iteration 3200, lr = 1.92\nI1212 08:18:04.279673 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.333393\nI1212 08:20:22.183099 12086 solver.cpp:337] Iteration 3300, Testing net (#0)\nI1212 08:21:44.489940 12086 solver.cpp:404]     Test net output #0: accuracy = 0.76436\nI1212 08:21:44.490242 12086 solver.cpp:404]     Test net output #1: loss = 0.860878 (* 1 = 0.860878 loss)\nI1212 08:21:45.815871 12086 solver.cpp:228] Iteration 3300, loss = 0.226434\nI1212 08:21:45.815920 12086 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 08:21:45.815943 12086 solver.cpp:244]     Train net output #1: loss = 0.226434 (* 1 = 0.226434 loss)\nI1212 08:21:45.903208 12086 sgd_solver.cpp:174] Iteration 3300, lr = 1.98\nI1212 08:21:45.916517 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.294545\nI1212 08:24:03.749244 12086 solver.cpp:337] Iteration 3400, Testing net (#0)\nI1212 08:25:26.045260 12086 solver.cpp:404]     Test net output #0: accuracy = 0.78288\nI1212 08:25:26.045541 12086 solver.cpp:404]     Test net output #1: loss = 0.671772 (* 1 = 0.671772 loss)\nI1212 08:25:27.371139 12086 solver.cpp:228] Iteration 3400, loss = 0.247338\nI1212 08:25:27.371196 12086 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 08:25:27.371220 12086 solver.cpp:244]     Train net output #1: loss = 0.247338 (* 1 = 0.247338 loss)\nI1212 08:25:27.466472 12086 sgd_solver.cpp:174] Iteration 3400, lr = 2.04\nI1212 08:25:27.480319 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.350207\nI1212 08:27:45.393607 12086 solver.cpp:337] Iteration 3500, Testing net (#0)\nI1212 08:29:07.772027 12086 solver.cpp:404]     Test net output #0: accuracy = 0.61468\nI1212 08:29:07.772332 12086 solver.cpp:404]     Test net output #1: loss = 1.9445 (* 1 = 1.9445 loss)\nI1212 08:29:09.097609 12086 solver.cpp:228] Iteration 3500, loss = 0.205013\nI1212 08:29:09.097663 12086 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 08:29:09.097687 12086 solver.cpp:244]     Train net output #1: loss = 0.205013 (* 1 = 0.205013 loss)\nI1212 08:29:09.195320 12086 sgd_solver.cpp:174] Iteration 3500, lr = 2.1\nI1212 08:29:09.209192 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.292062\nI1212 08:31:27.195718 12086 solver.cpp:337] Iteration 3600, Testing net (#0)\nI1212 08:32:49.630435 12086 solver.cpp:404]     Test net output #0: accuracy = 0.78196\nI1212 08:32:49.630724 12086 solver.cpp:404]     Test net output #1: loss = 0.78902 (* 1 = 0.78902 loss)\nI1212 08:32:50.956277 12086 solver.cpp:228] Iteration 3600, loss = 0.262626\nI1212 08:32:50.956324 12086 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 08:32:50.956346 12086 solver.cpp:244]     Train net output #1: loss = 0.262626 (* 1 = 0.262626 loss)\nI1212 08:32:51.043799 12086 sgd_solver.cpp:174] Iteration 3600, lr = 2.16\nI1212 08:32:51.057641 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.313064\nI1212 08:35:08.991477 12086 solver.cpp:337] Iteration 3700, Testing net (#0)\nI1212 08:36:31.419579 12086 solver.cpp:404]     Test net output #0: accuracy = 0.65424\nI1212 08:36:31.419903 12086 solver.cpp:404]     Test net output #1: loss = 1.34387 (* 1 = 1.34387 loss)\nI1212 08:36:32.745980 12086 solver.cpp:228] Iteration 3700, loss = 0.225597\nI1212 08:36:32.746026 12086 solver.cpp:244]     Train net output #0: accuracy = 0.928\nI1212 08:36:32.746050 12086 solver.cpp:244]     Train net output #1: loss = 0.225597 (* 1 = 0.225597 loss)\nI1212 08:36:32.832605 12086 sgd_solver.cpp:174] Iteration 3700, lr = 2.22\nI1212 08:36:32.846359 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.313302\nI1212 08:38:50.775583 12086 solver.cpp:337] Iteration 3800, Testing net (#0)\nI1212 08:40:13.233280 12086 solver.cpp:404]     Test net output #0: accuracy = 0.72416\nI1212 08:40:13.233595 12086 solver.cpp:404]     Test net output #1: loss = 1.009 (* 1 = 1.009 loss)\nI1212 08:40:14.558895 12086 solver.cpp:228] Iteration 3800, loss = 0.293671\nI1212 08:40:14.558943 12086 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 08:40:14.558966 12086 solver.cpp:244]     Train net output #1: loss = 0.293671 (* 1 = 0.293671 loss)\nI1212 08:40:14.645825 12086 sgd_solver.cpp:174] Iteration 3800, lr = 2.28\nI1212 08:40:14.659677 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.313931\nI1212 08:42:32.557718 12086 solver.cpp:337] Iteration 3900, Testing net (#0)\nI1212 08:43:54.874478 12086 solver.cpp:404]     Test net output #0: accuracy = 0.79148\nI1212 08:43:54.874784 12086 solver.cpp:404]     Test net output #1: loss = 0.633687 (* 1 = 0.633687 loss)\nI1212 08:43:56.200021 12086 solver.cpp:228] Iteration 3900, loss = 0.287395\nI1212 08:43:56.200067 12086 solver.cpp:244]     Train net output #0: accuracy = 0.872\nI1212 08:43:56.200090 12086 solver.cpp:244]     Train net output #1: loss = 0.287395 (* 1 = 0.287395 loss)\nI1212 08:43:56.295610 12086 sgd_solver.cpp:174] Iteration 3900, lr = 2.34\nI1212 08:43:56.309574 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.355688\nI1212 08:46:14.243283 12086 solver.cpp:337] Iteration 4000, Testing net (#0)\nI1212 08:47:36.694407 12086 solver.cpp:404]     Test net output #0: accuracy = 0.8036\nI1212 08:47:36.694790 12086 solver.cpp:404]     Test net output #1: loss = 0.621678 (* 1 = 0.621678 loss)\nI1212 08:47:38.020575 12086 solver.cpp:228] Iteration 4000, loss = 0.237807\nI1212 08:47:38.020622 12086 solver.cpp:244]     Train net output #0: accuracy = 0.92\nI1212 08:47:38.020645 12086 solver.cpp:244]     Train net output #1: loss = 0.237806 (* 1 = 0.237806 loss)\nI1212 08:47:38.112790 12086 sgd_solver.cpp:174] Iteration 4000, lr = 2.4\nI1212 08:47:38.126569 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.287265\nI1212 08:49:55.943008 12086 solver.cpp:337] Iteration 4100, Testing net (#0)\nI1212 08:51:18.414736 12086 solver.cpp:404]     Test net output #0: accuracy = 0.8044\nI1212 08:51:18.415050 12086 solver.cpp:404]     Test net output #1: loss = 0.606398 (* 1 = 0.606398 loss)\nI1212 08:51:19.741186 12086 solver.cpp:228] Iteration 4100, loss = 0.235091\nI1212 08:51:19.741235 12086 solver.cpp:244]     Train net output #0: accuracy = 0.944\nI1212 08:51:19.741266 12086 solver.cpp:244]     Train net output #1: loss = 0.23509 (* 1 = 0.23509 loss)\nI1212 08:51:19.827085 12086 sgd_solver.cpp:174] Iteration 4100, lr = 2.46\nI1212 08:51:19.840891 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.286096\nI1212 08:53:37.654438 12086 solver.cpp:337] Iteration 4200, Testing net (#0)\nI1212 08:55:00.072440 12086 solver.cpp:404]     Test net output #0: accuracy = 0.78112\nI1212 08:55:00.072744 12086 solver.cpp:404]     Test net output #1: loss = 0.719521 (* 1 = 0.719521 loss)\nI1212 08:55:01.397202 12086 solver.cpp:228] Iteration 4200, loss = 0.328779\nI1212 08:55:01.397253 12086 solver.cpp:244]     Train net output #0: accuracy = 0.888\nI1212 08:55:01.397279 12086 solver.cpp:244]     Train net output #1: loss = 0.328779 (* 1 = 0.328779 loss)\nI1212 08:55:01.490608 12086 sgd_solver.cpp:174] Iteration 4200, lr = 2.52\nI1212 08:55:01.504364 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.288976\nI1212 08:57:19.578058 12086 solver.cpp:337] Iteration 4300, Testing net (#0)\nI1212 08:58:41.954742 12086 solver.cpp:404]     Test net output #0: accuracy = 0.78488\nI1212 08:58:41.955039 12086 solver.cpp:404]     Test net output #1: loss = 0.715 (* 1 = 0.715 loss)\nI1212 08:58:43.279155 12086 solver.cpp:228] Iteration 4300, loss = 0.286522\nI1212 08:58:43.279201 12086 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 08:58:43.279224 12086 solver.cpp:244]     Train net output #1: loss = 0.286522 (* 1 = 0.286522 loss)\nI1212 08:58:43.368453 12086 sgd_solver.cpp:174] Iteration 4300, lr = 2.58\nI1212 08:58:43.382287 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.309829\nI1212 09:01:01.417848 12086 solver.cpp:337] Iteration 4400, Testing net (#0)\nI1212 09:02:23.849709 12086 solver.cpp:404]     Test net output #0: accuracy = 0.6632\nI1212 09:02:23.850025 12086 solver.cpp:404]     Test net output #1: loss = 1.27523 (* 1 = 1.27523 loss)\nI1212 09:02:25.174790 12086 solver.cpp:228] Iteration 4400, loss = 0.347652\nI1212 09:02:25.174836 12086 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 09:02:25.174860 12086 solver.cpp:244]     Train net output #1: loss = 0.347652 (* 1 = 0.347652 loss)\nI1212 09:02:25.268471 12086 sgd_solver.cpp:174] Iteration 4400, lr = 2.64\nI1212 09:02:25.282249 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.316814\nI1212 09:04:43.218340 12086 solver.cpp:337] Iteration 4500, Testing net (#0)\nI1212 09:06:05.641500 12086 solver.cpp:404]     Test net output #0: accuracy = 0.65992\nI1212 09:06:05.641829 12086 solver.cpp:404]     Test net output #1: loss = 1.378 (* 1 = 1.378 loss)\nI1212 09:06:06.966171 12086 solver.cpp:228] Iteration 4500, loss = 0.339761\nI1212 09:06:06.966217 12086 solver.cpp:244]     Train net output #0: accuracy = 0.912\nI1212 09:06:06.966244 12086 solver.cpp:244]     Train net output #1: loss = 0.33976 (* 1 = 0.33976 loss)\nI1212 09:06:07.053488 12086 sgd_solver.cpp:174] Iteration 4500, lr = 2.7\nI1212 09:06:07.067322 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.307793\nI1212 09:08:24.939059 12086 solver.cpp:337] Iteration 4600, Testing net (#0)\nI1212 09:09:47.276865 12086 solver.cpp:404]     Test net output #0: accuracy = 0.71304\nI1212 09:09:47.277190 12086 solver.cpp:404]     Test net output #1: loss = 1.00071 (* 1 = 1.00071 loss)\nI1212 09:09:48.602890 12086 solver.cpp:228] Iteration 4600, loss = 0.348373\nI1212 09:09:48.602937 12086 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 09:09:48.602962 12086 solver.cpp:244]     Train net output #1: loss = 0.348372 (* 1 = 0.348372 loss)\nI1212 09:09:48.699059 12086 sgd_solver.cpp:174] Iteration 4600, lr = 2.76\nI1212 09:09:48.712707 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.312257\nI1212 09:12:06.725545 12086 solver.cpp:337] Iteration 4700, Testing net (#0)\nI1212 09:13:29.055954 12086 solver.cpp:404]     Test net output #0: accuracy = 0.8152\nI1212 09:13:29.056265 12086 solver.cpp:404]     Test net output #1: loss = 0.570672 (* 1 = 0.570672 loss)\nI1212 09:13:30.381739 12086 solver.cpp:228] Iteration 4700, loss = 0.23046\nI1212 09:13:30.381788 12086 solver.cpp:244]     Train net output #0: accuracy = 0.936\nI1212 09:13:30.381810 12086 solver.cpp:244]     Train net output #1: loss = 0.23046 (* 1 = 0.23046 loss)\nI1212 09:13:30.474926 12086 sgd_solver.cpp:174] Iteration 4700, lr = 2.82\nI1212 09:13:30.488740 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.27579\nI1212 09:15:48.350942 12086 solver.cpp:337] Iteration 4800, Testing net (#0)\nI1212 09:17:10.718168 12086 solver.cpp:404]     Test net output #0: accuracy = 0.78884\nI1212 09:17:10.718498 12086 solver.cpp:404]     Test net output #1: loss = 0.695892 (* 1 = 0.695892 loss)\nI1212 09:17:12.043071 12086 solver.cpp:228] Iteration 4800, loss = 0.387453\nI1212 09:17:12.043113 12086 solver.cpp:244]     Train net output #0: accuracy = 0.88\nI1212 09:17:12.043136 12086 solver.cpp:244]     Train net output #1: loss = 0.387453 (* 1 = 0.387453 loss)\nI1212 09:17:12.132433 12086 sgd_solver.cpp:174] Iteration 4800, lr = 2.88\nI1212 09:17:12.146267 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.334282\nI1212 09:19:30.004858 12086 solver.cpp:337] Iteration 4900, Testing net (#0)\nI1212 09:20:52.340461 12086 solver.cpp:404]     Test net output #0: accuracy = 0.64784\nI1212 09:20:52.340752 12086 solver.cpp:404]     Test net output #1: loss = 1.28059 (* 1 = 1.28059 loss)\nI1212 09:20:53.666265 12086 solver.cpp:228] Iteration 4900, loss = 0.316263\nI1212 09:20:53.666308 12086 solver.cpp:244]     Train net output #0: accuracy = 0.896\nI1212 09:20:53.666332 12086 solver.cpp:244]     Train net output #1: loss = 0.316263 (* 1 = 0.316263 loss)\nI1212 09:20:53.753506 12086 sgd_solver.cpp:174] Iteration 4900, lr = 2.94\nI1212 09:20:53.767213 12086 sgd_solver.cpp:149] Gradient: L2 norm 0.319737\nI1212 09:23:11.712682 12086 solver.cpp:454] Snapshotting to binary proto file examples/sc/snapshots/range3Iter5kFig2b_iter_5000.caffemodel\nI1212 09:23:11.933600 12086 sgd_solver.cpp:341] Snapshotting solver state to binary proto file examples/sc/snapshots/range3Iter5kFig2b_iter_5000.solverstate\nI1212 09:23:12.380501 12086 solver.cpp:317] Iteration 5000, loss = 0.311944\nI1212 09:23:12.380553 12086 solver.cpp:337] Iteration 5000, Testing net (#0)\nI1212 09:24:34.784507 12086 solver.cpp:404]     Test net output #0: accuracy = 0.78436\nI1212 09:24:34.784819 12086 solver.cpp:404]     Test net output #1: loss = 0.664612 (* 1 = 0.664612 loss)\nI1212 09:24:34.784837 12086 solver.cpp:322] Optimization Done.\nI1212 09:24:40.155057 12086 caffe.cpp:254] Optimization Done.\n"
  },
  {
    "path": "architectures/ResNeXt56.prototxt",
    "content": "name: \"Cifar-ResNeXt\"\nlayer { # TRAIN data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # TEST data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: false\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 85\n    backend: LMDB\n  }\n}\nlayer { # pre_conv\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#{ L1 start\n  #{ L1_b1 start\n    #{ L1_b1_brc1 start\n      layer { # L1_b1_brc1_bn\n        name: \"L1_b1_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"pre_conv_top\"\n        top: \"L1_b1_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b1_brc1_relu\n        name: \"L1_b1_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b1_brc1_bn_top\"\n        top: \"L1_b1_brc1_bn_top\"\n      }\n      layer { # L1_b1_brc1_conv\n        name: \"L1_b1_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b1_brc1_bn_top\"\n        top: \"L1_b1_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b1_brc2 start\n      layer { # L1_b1_brc2_bn\n        name: \"L1_b1_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b1_brc1_conv_top\"\n        top: \"L1_b1_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b1_brc2_relu\n        name: \"L1_b1_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b1_brc2_bn_top\"\n        top: \"L1_b1_brc2_bn_top\"\n      }\n      layer { # L1_b1_brc2_conv\n        name: \"L1_b1_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b1_brc2_bn_top\"\n        top: \"L1_b1_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b1_brc3 start\n      layer { # L1_b1_brc3_bn\n        name: \"L1_b1_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b1_brc2_conv_top\"\n        top: \"L1_b1_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b1_brc3_relu\n        name: \"L1_b1_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b1_brc3_bn_top\"\n        top: \"L1_b1_brc3_bn_top\"\n      }\n      layer { # L1_b1_brc3_conv\n        name: \"L1_b1_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b1_brc3_bn_top\"\n        top: \"L1_b1_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L1_b1_chanInc_conv\n      name: \"L1_b1_chanInc_conv\"\n      type: \"Convolution\"\n      bottom: \"pre_conv_top\"\n      top: \"L1_b1_chanInc_conv_top\"\n      param {\n        lr_mult: 1\n        decay_mult: 1\n      }\n      param {\n        lr_mult: 2\n        decay_mult: 0\n      }\n      convolution_param {\n        num_output: 64\n        pad: 0\n        kernel_size: 1\n        stride: 1\n        weight_filler {\n          type: \"msra\"\n        }\n        bias_filler {\n          type: \"constant\"\n        }\n      }\n    }\n    layer { # L1_b1_sum_eltwise\n      name: \"L1_b1_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L1_b1_brc3_conv_top\"\n      bottom: \"L1_b1_chanInc_conv_top\"\n      top: \"L1_b1_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L1_b2 start\n    #{ L1_b2_brc1 start\n      layer { # L1_b2_brc1_bn\n        name: \"L1_b2_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b1_sum_eltwise_top\"\n        top: \"L1_b2_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b2_brc1_relu\n        name: \"L1_b2_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b2_brc1_bn_top\"\n        top: \"L1_b2_brc1_bn_top\"\n      }\n      layer { # L1_b2_brc1_conv\n        name: \"L1_b2_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b2_brc1_bn_top\"\n        top: \"L1_b2_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b2_brc2 start\n      layer { # L1_b2_brc2_bn\n        name: \"L1_b2_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b2_brc1_conv_top\"\n        top: \"L1_b2_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b2_brc2_relu\n        name: \"L1_b2_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b2_brc2_bn_top\"\n        top: \"L1_b2_brc2_bn_top\"\n      }\n      layer { # L1_b2_brc2_conv\n        name: \"L1_b2_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b2_brc2_bn_top\"\n        top: \"L1_b2_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b2_brc3 start\n      layer { # L1_b2_brc3_bn\n        name: \"L1_b2_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b2_brc2_conv_top\"\n        top: \"L1_b2_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b2_brc3_relu\n        name: \"L1_b2_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b2_brc3_bn_top\"\n        top: \"L1_b2_brc3_bn_top\"\n      }\n      layer { # L1_b2_brc3_conv\n        name: \"L1_b2_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b2_brc3_bn_top\"\n        top: \"L1_b2_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L1_b2_sum_eltwise\n      name: \"L1_b2_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L1_b2_brc3_conv_top\"\n      bottom: \"L1_b1_sum_eltwise_top\"\n      top: \"L1_b2_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L1_b3 start\n    #{ L1_b3_brc1 start\n      layer { # L1_b3_brc1_bn\n        name: \"L1_b3_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b2_sum_eltwise_top\"\n        top: \"L1_b3_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b3_brc1_relu\n        name: \"L1_b3_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b3_brc1_bn_top\"\n        top: \"L1_b3_brc1_bn_top\"\n      }\n      layer { # L1_b3_brc1_conv\n        name: \"L1_b3_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b3_brc1_bn_top\"\n        top: \"L1_b3_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b3_brc2 start\n      layer { # L1_b3_brc2_bn\n        name: \"L1_b3_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b3_brc1_conv_top\"\n        top: \"L1_b3_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b3_brc2_relu\n        name: \"L1_b3_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b3_brc2_bn_top\"\n        top: \"L1_b3_brc2_bn_top\"\n      }\n      layer { # L1_b3_brc2_conv\n        name: \"L1_b3_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b3_brc2_bn_top\"\n        top: \"L1_b3_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b3_brc3 start\n      layer { # L1_b3_brc3_bn\n        name: \"L1_b3_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b3_brc2_conv_top\"\n        top: \"L1_b3_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b3_brc3_relu\n        name: \"L1_b3_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b3_brc3_bn_top\"\n        top: \"L1_b3_brc3_bn_top\"\n      }\n      layer { # L1_b3_brc3_conv\n        name: \"L1_b3_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b3_brc3_bn_top\"\n        top: \"L1_b3_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L1_b3_sum_eltwise\n      name: \"L1_b3_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L1_b3_brc3_conv_top\"\n      bottom: \"L1_b2_sum_eltwise_top\"\n      top: \"L1_b3_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L1_b4 start\n    #{ L1_b4_brc1 start\n      layer { # L1_b4_brc1_bn\n        name: \"L1_b4_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b3_sum_eltwise_top\"\n        top: \"L1_b4_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b4_brc1_relu\n        name: \"L1_b4_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b4_brc1_bn_top\"\n        top: \"L1_b4_brc1_bn_top\"\n      }\n      layer { # L1_b4_brc1_conv\n        name: \"L1_b4_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b4_brc1_bn_top\"\n        top: \"L1_b4_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b4_brc2 start\n      layer { # L1_b4_brc2_bn\n        name: \"L1_b4_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b4_brc1_conv_top\"\n        top: \"L1_b4_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b4_brc2_relu\n        name: \"L1_b4_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b4_brc2_bn_top\"\n        top: \"L1_b4_brc2_bn_top\"\n      }\n      layer { # L1_b4_brc2_conv\n        name: \"L1_b4_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b4_brc2_bn_top\"\n        top: \"L1_b4_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b4_brc3 start\n      layer { # L1_b4_brc3_bn\n        name: \"L1_b4_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b4_brc2_conv_top\"\n        top: \"L1_b4_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b4_brc3_relu\n        name: \"L1_b4_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b4_brc3_bn_top\"\n        top: \"L1_b4_brc3_bn_top\"\n      }\n      layer { # L1_b4_brc3_conv\n        name: \"L1_b4_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b4_brc3_bn_top\"\n        top: \"L1_b4_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L1_b4_sum_eltwise\n      name: \"L1_b4_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L1_b4_brc3_conv_top\"\n      bottom: \"L1_b3_sum_eltwise_top\"\n      top: \"L1_b4_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L1_b5 start\n    #{ L1_b5_brc1 start\n      layer { # L1_b5_brc1_bn\n        name: \"L1_b5_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b4_sum_eltwise_top\"\n        top: \"L1_b5_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b5_brc1_relu\n        name: \"L1_b5_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b5_brc1_bn_top\"\n        top: \"L1_b5_brc1_bn_top\"\n      }\n      layer { # L1_b5_brc1_conv\n        name: \"L1_b5_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b5_brc1_bn_top\"\n        top: \"L1_b5_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b5_brc2 start\n      layer { # L1_b5_brc2_bn\n        name: \"L1_b5_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b5_brc1_conv_top\"\n        top: \"L1_b5_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b5_brc2_relu\n        name: \"L1_b5_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b5_brc2_bn_top\"\n        top: \"L1_b5_brc2_bn_top\"\n      }\n      layer { # L1_b5_brc2_conv\n        name: \"L1_b5_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b5_brc2_bn_top\"\n        top: \"L1_b5_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b5_brc3 start\n      layer { # L1_b5_brc3_bn\n        name: \"L1_b5_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b5_brc2_conv_top\"\n        top: \"L1_b5_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b5_brc3_relu\n        name: \"L1_b5_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b5_brc3_bn_top\"\n        top: \"L1_b5_brc3_bn_top\"\n      }\n      layer { # L1_b5_brc3_conv\n        name: \"L1_b5_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b5_brc3_bn_top\"\n        top: \"L1_b5_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L1_b5_sum_eltwise\n      name: \"L1_b5_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L1_b5_brc3_conv_top\"\n      bottom: \"L1_b4_sum_eltwise_top\"\n      top: \"L1_b5_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L1_b6 start\n    #{ L1_b6_brc1 start\n      layer { # L1_b6_brc1_bn\n        name: \"L1_b6_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b5_sum_eltwise_top\"\n        top: \"L1_b6_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b6_brc1_relu\n        name: \"L1_b6_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b6_brc1_bn_top\"\n        top: \"L1_b6_brc1_bn_top\"\n      }\n      layer { # L1_b6_brc1_conv\n        name: \"L1_b6_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b6_brc1_bn_top\"\n        top: \"L1_b6_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b6_brc2 start\n      layer { # L1_b6_brc2_bn\n        name: \"L1_b6_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b6_brc1_conv_top\"\n        top: \"L1_b6_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b6_brc2_relu\n        name: \"L1_b6_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b6_brc2_bn_top\"\n        top: \"L1_b6_brc2_bn_top\"\n      }\n      layer { # L1_b6_brc2_conv\n        name: \"L1_b6_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b6_brc2_bn_top\"\n        top: \"L1_b6_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 32\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L1_b6_brc3 start\n      layer { # L1_b6_brc3_bn\n        name: \"L1_b6_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b6_brc2_conv_top\"\n        top: \"L1_b6_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L1_b6_brc3_relu\n        name: \"L1_b6_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L1_b6_brc3_bn_top\"\n        top: \"L1_b6_brc3_bn_top\"\n      }\n      layer { # L1_b6_brc3_conv\n        name: \"L1_b6_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L1_b6_brc3_bn_top\"\n        top: \"L1_b6_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L1_b6_sum_eltwise\n      name: \"L1_b6_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L1_b6_brc3_conv_top\"\n      bottom: \"L1_b5_sum_eltwise_top\"\n      top: \"L1_b6_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n#}\n#{ L2 start\n  #{ L2_b1 start\n    #{ L2_b1_brc1 start\n      layer { # L2_b1_brc1_bn\n        name: \"L2_b1_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L1_b6_sum_eltwise_top\"\n        top: \"L2_b1_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b1_brc1_relu\n        name: \"L2_b1_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b1_brc1_bn_top\"\n        top: \"L2_b1_brc1_bn_top\"\n      }\n      layer { # L2_b1_brc1_conv\n        name: \"L2_b1_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b1_brc1_bn_top\"\n        top: \"L2_b1_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 2\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b1_brc2 start\n      layer { # L2_b1_brc2_bn\n        name: \"L2_b1_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b1_brc1_conv_top\"\n        top: \"L2_b1_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b1_brc2_relu\n        name: \"L2_b1_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b1_brc2_bn_top\"\n        top: \"L2_b1_brc2_bn_top\"\n      }\n      layer { # L2_b1_brc2_conv\n        name: \"L2_b1_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b1_brc2_bn_top\"\n        top: \"L2_b1_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b1_brc3 start\n      layer { # L2_b1_brc3_bn\n        name: \"L2_b1_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b1_brc2_conv_top\"\n        top: \"L2_b1_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b1_brc3_relu\n        name: \"L2_b1_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b1_brc3_bn_top\"\n        top: \"L2_b1_brc3_bn_top\"\n      }\n      layer { # L2_b1_brc3_conv\n        name: \"L2_b1_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b1_brc3_bn_top\"\n        top: \"L2_b1_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L2_b1_chanInc_conv\n      name: \"L2_b1_chanInc_conv\"\n      type: \"Convolution\"\n      bottom: \"L1_b6_sum_eltwise_top\"\n      top: \"L2_b1_chanInc_conv_top\"\n      param {\n        lr_mult: 1\n        decay_mult: 1\n      }\n      param {\n        lr_mult: 2\n        decay_mult: 0\n      }\n      convolution_param {\n        num_output: 128\n        pad: 0\n        kernel_size: 1\n        stride: 2\n        weight_filler {\n          type: \"msra\"\n        }\n        bias_filler {\n          type: \"constant\"\n        }\n      }\n    }\n    layer { # L2_b1_sum_eltwise\n      name: \"L2_b1_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L2_b1_brc3_conv_top\"\n      bottom: \"L2_b1_chanInc_conv_top\"\n      top: \"L2_b1_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L2_b2 start\n    #{ L2_b2_brc1 start\n      layer { # L2_b2_brc1_bn\n        name: \"L2_b2_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b1_sum_eltwise_top\"\n        top: \"L2_b2_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b2_brc1_relu\n        name: \"L2_b2_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b2_brc1_bn_top\"\n        top: \"L2_b2_brc1_bn_top\"\n      }\n      layer { # L2_b2_brc1_conv\n        name: \"L2_b2_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b2_brc1_bn_top\"\n        top: \"L2_b2_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b2_brc2 start\n      layer { # L2_b2_brc2_bn\n        name: \"L2_b2_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b2_brc1_conv_top\"\n        top: \"L2_b2_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b2_brc2_relu\n        name: \"L2_b2_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b2_brc2_bn_top\"\n        top: \"L2_b2_brc2_bn_top\"\n      }\n      layer { # L2_b2_brc2_conv\n        name: \"L2_b2_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b2_brc2_bn_top\"\n        top: \"L2_b2_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b2_brc3 start\n      layer { # L2_b2_brc3_bn\n        name: \"L2_b2_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b2_brc2_conv_top\"\n        top: \"L2_b2_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b2_brc3_relu\n        name: \"L2_b2_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b2_brc3_bn_top\"\n        top: \"L2_b2_brc3_bn_top\"\n      }\n      layer { # L2_b2_brc3_conv\n        name: \"L2_b2_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b2_brc3_bn_top\"\n        top: \"L2_b2_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L2_b2_sum_eltwise\n      name: \"L2_b2_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L2_b2_brc3_conv_top\"\n      bottom: \"L2_b1_sum_eltwise_top\"\n      top: \"L2_b2_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L2_b3 start\n    #{ L2_b3_brc1 start\n      layer { # L2_b3_brc1_bn\n        name: \"L2_b3_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b2_sum_eltwise_top\"\n        top: \"L2_b3_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b3_brc1_relu\n        name: \"L2_b3_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b3_brc1_bn_top\"\n        top: \"L2_b3_brc1_bn_top\"\n      }\n      layer { # L2_b3_brc1_conv\n        name: \"L2_b3_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b3_brc1_bn_top\"\n        top: \"L2_b3_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b3_brc2 start\n      layer { # L2_b3_brc2_bn\n        name: \"L2_b3_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b3_brc1_conv_top\"\n        top: \"L2_b3_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b3_brc2_relu\n        name: \"L2_b3_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b3_brc2_bn_top\"\n        top: \"L2_b3_brc2_bn_top\"\n      }\n      layer { # L2_b3_brc2_conv\n        name: \"L2_b3_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b3_brc2_bn_top\"\n        top: \"L2_b3_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b3_brc3 start\n      layer { # L2_b3_brc3_bn\n        name: \"L2_b3_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b3_brc2_conv_top\"\n        top: \"L2_b3_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b3_brc3_relu\n        name: \"L2_b3_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b3_brc3_bn_top\"\n        top: \"L2_b3_brc3_bn_top\"\n      }\n      layer { # L2_b3_brc3_conv\n        name: \"L2_b3_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b3_brc3_bn_top\"\n        top: \"L2_b3_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L2_b3_sum_eltwise\n      name: \"L2_b3_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L2_b3_brc3_conv_top\"\n      bottom: \"L2_b2_sum_eltwise_top\"\n      top: \"L2_b3_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L2_b4 start\n    #{ L2_b4_brc1 start\n      layer { # L2_b4_brc1_bn\n        name: \"L2_b4_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b3_sum_eltwise_top\"\n        top: \"L2_b4_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b4_brc1_relu\n        name: \"L2_b4_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b4_brc1_bn_top\"\n        top: \"L2_b4_brc1_bn_top\"\n      }\n      layer { # L2_b4_brc1_conv\n        name: \"L2_b4_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b4_brc1_bn_top\"\n        top: \"L2_b4_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b4_brc2 start\n      layer { # L2_b4_brc2_bn\n        name: \"L2_b4_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b4_brc1_conv_top\"\n        top: \"L2_b4_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b4_brc2_relu\n        name: \"L2_b4_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b4_brc2_bn_top\"\n        top: \"L2_b4_brc2_bn_top\"\n      }\n      layer { # L2_b4_brc2_conv\n        name: \"L2_b4_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b4_brc2_bn_top\"\n        top: \"L2_b4_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b4_brc3 start\n      layer { # L2_b4_brc3_bn\n        name: \"L2_b4_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b4_brc2_conv_top\"\n        top: \"L2_b4_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b4_brc3_relu\n        name: \"L2_b4_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b4_brc3_bn_top\"\n        top: \"L2_b4_brc3_bn_top\"\n      }\n      layer { # L2_b4_brc3_conv\n        name: \"L2_b4_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b4_brc3_bn_top\"\n        top: \"L2_b4_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L2_b4_sum_eltwise\n      name: \"L2_b4_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L2_b4_brc3_conv_top\"\n      bottom: \"L2_b3_sum_eltwise_top\"\n      top: \"L2_b4_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L2_b5 start\n    #{ L2_b5_brc1 start\n      layer { # L2_b5_brc1_bn\n        name: \"L2_b5_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b4_sum_eltwise_top\"\n        top: \"L2_b5_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b5_brc1_relu\n        name: \"L2_b5_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b5_brc1_bn_top\"\n        top: \"L2_b5_brc1_bn_top\"\n      }\n      layer { # L2_b5_brc1_conv\n        name: \"L2_b5_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b5_brc1_bn_top\"\n        top: \"L2_b5_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b5_brc2 start\n      layer { # L2_b5_brc2_bn\n        name: \"L2_b5_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b5_brc1_conv_top\"\n        top: \"L2_b5_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b5_brc2_relu\n        name: \"L2_b5_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b5_brc2_bn_top\"\n        top: \"L2_b5_brc2_bn_top\"\n      }\n      layer { # L2_b5_brc2_conv\n        name: \"L2_b5_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b5_brc2_bn_top\"\n        top: \"L2_b5_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b5_brc3 start\n      layer { # L2_b5_brc3_bn\n        name: \"L2_b5_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b5_brc2_conv_top\"\n        top: \"L2_b5_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b5_brc3_relu\n        name: \"L2_b5_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b5_brc3_bn_top\"\n        top: \"L2_b5_brc3_bn_top\"\n      }\n      layer { # L2_b5_brc3_conv\n        name: \"L2_b5_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b5_brc3_bn_top\"\n        top: \"L2_b5_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L2_b5_sum_eltwise\n      name: \"L2_b5_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L2_b5_brc3_conv_top\"\n      bottom: \"L2_b4_sum_eltwise_top\"\n      top: \"L2_b5_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L2_b6 start\n    #{ L2_b6_brc1 start\n      layer { # L2_b6_brc1_bn\n        name: \"L2_b6_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b5_sum_eltwise_top\"\n        top: \"L2_b6_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b6_brc1_relu\n        name: \"L2_b6_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b6_brc1_bn_top\"\n        top: \"L2_b6_brc1_bn_top\"\n      }\n      layer { # L2_b6_brc1_conv\n        name: \"L2_b6_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b6_brc1_bn_top\"\n        top: \"L2_b6_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b6_brc2 start\n      layer { # L2_b6_brc2_bn\n        name: \"L2_b6_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b6_brc1_conv_top\"\n        top: \"L2_b6_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b6_brc2_relu\n        name: \"L2_b6_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b6_brc2_bn_top\"\n        top: \"L2_b6_brc2_bn_top\"\n      }\n      layer { # L2_b6_brc2_conv\n        name: \"L2_b6_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b6_brc2_bn_top\"\n        top: \"L2_b6_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 64\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L2_b6_brc3 start\n      layer { # L2_b6_brc3_bn\n        name: \"L2_b6_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b6_brc2_conv_top\"\n        top: \"L2_b6_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L2_b6_brc3_relu\n        name: \"L2_b6_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L2_b6_brc3_bn_top\"\n        top: \"L2_b6_brc3_bn_top\"\n      }\n      layer { # L2_b6_brc3_conv\n        name: \"L2_b6_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L2_b6_brc3_bn_top\"\n        top: \"L2_b6_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L2_b6_sum_eltwise\n      name: \"L2_b6_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L2_b6_brc3_conv_top\"\n      bottom: \"L2_b5_sum_eltwise_top\"\n      top: \"L2_b6_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n#}\n#{ L3 start\n  #{ L3_b1 start\n    #{ L3_b1_brc1 start\n      layer { # L3_b1_brc1_bn\n        name: \"L3_b1_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L2_b6_sum_eltwise_top\"\n        top: \"L3_b1_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b1_brc1_relu\n        name: \"L3_b1_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b1_brc1_bn_top\"\n        top: \"L3_b1_brc1_bn_top\"\n      }\n      layer { # L3_b1_brc1_conv\n        name: \"L3_b1_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b1_brc1_bn_top\"\n        top: \"L3_b1_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 2\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b1_brc2 start\n      layer { # L3_b1_brc2_bn\n        name: \"L3_b1_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b1_brc1_conv_top\"\n        top: \"L3_b1_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b1_brc2_relu\n        name: \"L3_b1_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b1_brc2_bn_top\"\n        top: \"L3_b1_brc2_bn_top\"\n      }\n      layer { # L3_b1_brc2_conv\n        name: \"L3_b1_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b1_brc2_bn_top\"\n        top: \"L3_b1_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b1_brc3 start\n      layer { # L3_b1_brc3_bn\n        name: \"L3_b1_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b1_brc2_conv_top\"\n        top: \"L3_b1_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b1_brc3_relu\n        name: \"L3_b1_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b1_brc3_bn_top\"\n        top: \"L3_b1_brc3_bn_top\"\n      }\n      layer { # L3_b1_brc3_conv\n        name: \"L3_b1_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b1_brc3_bn_top\"\n        top: \"L3_b1_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 256\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L3_b1_chanInc_conv\n      name: \"L3_b1_chanInc_conv\"\n      type: \"Convolution\"\n      bottom: \"L2_b6_sum_eltwise_top\"\n      top: \"L3_b1_chanInc_conv_top\"\n      param {\n        lr_mult: 1\n        decay_mult: 1\n      }\n      param {\n        lr_mult: 2\n        decay_mult: 0\n      }\n      convolution_param {\n        num_output: 256\n        pad: 0\n        kernel_size: 1\n        stride: 2\n        weight_filler {\n          type: \"msra\"\n        }\n        bias_filler {\n          type: \"constant\"\n        }\n      }\n    }\n    layer { # L3_b1_sum_eltwise\n      name: \"L3_b1_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L3_b1_brc3_conv_top\"\n      bottom: \"L3_b1_chanInc_conv_top\"\n      top: \"L3_b1_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L3_b2 start\n    #{ L3_b2_brc1 start\n      layer { # L3_b2_brc1_bn\n        name: \"L3_b2_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b1_sum_eltwise_top\"\n        top: \"L3_b2_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b2_brc1_relu\n        name: \"L3_b2_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b2_brc1_bn_top\"\n        top: \"L3_b2_brc1_bn_top\"\n      }\n      layer { # L3_b2_brc1_conv\n        name: \"L3_b2_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b2_brc1_bn_top\"\n        top: \"L3_b2_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b2_brc2 start\n      layer { # L3_b2_brc2_bn\n        name: \"L3_b2_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b2_brc1_conv_top\"\n        top: \"L3_b2_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b2_brc2_relu\n        name: \"L3_b2_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b2_brc2_bn_top\"\n        top: \"L3_b2_brc2_bn_top\"\n      }\n      layer { # L3_b2_brc2_conv\n        name: \"L3_b2_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b2_brc2_bn_top\"\n        top: \"L3_b2_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b2_brc3 start\n      layer { # L3_b2_brc3_bn\n        name: \"L3_b2_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b2_brc2_conv_top\"\n        top: \"L3_b2_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b2_brc3_relu\n        name: \"L3_b2_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b2_brc3_bn_top\"\n        top: \"L3_b2_brc3_bn_top\"\n      }\n      layer { # L3_b2_brc3_conv\n        name: \"L3_b2_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b2_brc3_bn_top\"\n        top: \"L3_b2_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 256\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L3_b2_sum_eltwise\n      name: \"L3_b2_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L3_b2_brc3_conv_top\"\n      bottom: \"L3_b1_sum_eltwise_top\"\n      top: \"L3_b2_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L3_b3 start\n    #{ L3_b3_brc1 start\n      layer { # L3_b3_brc1_bn\n        name: \"L3_b3_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b2_sum_eltwise_top\"\n        top: \"L3_b3_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b3_brc1_relu\n        name: \"L3_b3_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b3_brc1_bn_top\"\n        top: \"L3_b3_brc1_bn_top\"\n      }\n      layer { # L3_b3_brc1_conv\n        name: \"L3_b3_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b3_brc1_bn_top\"\n        top: \"L3_b3_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b3_brc2 start\n      layer { # L3_b3_brc2_bn\n        name: \"L3_b3_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b3_brc1_conv_top\"\n        top: \"L3_b3_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b3_brc2_relu\n        name: \"L3_b3_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b3_brc2_bn_top\"\n        top: \"L3_b3_brc2_bn_top\"\n      }\n      layer { # L3_b3_brc2_conv\n        name: \"L3_b3_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b3_brc2_bn_top\"\n        top: \"L3_b3_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b3_brc3 start\n      layer { # L3_b3_brc3_bn\n        name: \"L3_b3_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b3_brc2_conv_top\"\n        top: \"L3_b3_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b3_brc3_relu\n        name: \"L3_b3_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b3_brc3_bn_top\"\n        top: \"L3_b3_brc3_bn_top\"\n      }\n      layer { # L3_b3_brc3_conv\n        name: \"L3_b3_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b3_brc3_bn_top\"\n        top: \"L3_b3_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 256\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L3_b3_sum_eltwise\n      name: \"L3_b3_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L3_b3_brc3_conv_top\"\n      bottom: \"L3_b2_sum_eltwise_top\"\n      top: \"L3_b3_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L3_b4 start\n    #{ L3_b4_brc1 start\n      layer { # L3_b4_brc1_bn\n        name: \"L3_b4_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b3_sum_eltwise_top\"\n        top: \"L3_b4_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b4_brc1_relu\n        name: \"L3_b4_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b4_brc1_bn_top\"\n        top: \"L3_b4_brc1_bn_top\"\n      }\n      layer { # L3_b4_brc1_conv\n        name: \"L3_b4_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b4_brc1_bn_top\"\n        top: \"L3_b4_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b4_brc2 start\n      layer { # L3_b4_brc2_bn\n        name: \"L3_b4_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b4_brc1_conv_top\"\n        top: \"L3_b4_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b4_brc2_relu\n        name: \"L3_b4_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b4_brc2_bn_top\"\n        top: \"L3_b4_brc2_bn_top\"\n      }\n      layer { # L3_b4_brc2_conv\n        name: \"L3_b4_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b4_brc2_bn_top\"\n        top: \"L3_b4_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b4_brc3 start\n      layer { # L3_b4_brc3_bn\n        name: \"L3_b4_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b4_brc2_conv_top\"\n        top: \"L3_b4_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b4_brc3_relu\n        name: \"L3_b4_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b4_brc3_bn_top\"\n        top: \"L3_b4_brc3_bn_top\"\n      }\n      layer { # L3_b4_brc3_conv\n        name: \"L3_b4_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b4_brc3_bn_top\"\n        top: \"L3_b4_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 256\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L3_b4_sum_eltwise\n      name: \"L3_b4_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L3_b4_brc3_conv_top\"\n      bottom: \"L3_b3_sum_eltwise_top\"\n      top: \"L3_b4_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L3_b5 start\n    #{ L3_b5_brc1 start\n      layer { # L3_b5_brc1_bn\n        name: \"L3_b5_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b4_sum_eltwise_top\"\n        top: \"L3_b5_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b5_brc1_relu\n        name: \"L3_b5_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b5_brc1_bn_top\"\n        top: \"L3_b5_brc1_bn_top\"\n      }\n      layer { # L3_b5_brc1_conv\n        name: \"L3_b5_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b5_brc1_bn_top\"\n        top: \"L3_b5_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b5_brc2 start\n      layer { # L3_b5_brc2_bn\n        name: \"L3_b5_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b5_brc1_conv_top\"\n        top: \"L3_b5_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b5_brc2_relu\n        name: \"L3_b5_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b5_brc2_bn_top\"\n        top: \"L3_b5_brc2_bn_top\"\n      }\n      layer { # L3_b5_brc2_conv\n        name: \"L3_b5_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b5_brc2_bn_top\"\n        top: \"L3_b5_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b5_brc3 start\n      layer { # L3_b5_brc3_bn\n        name: \"L3_b5_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b5_brc2_conv_top\"\n        top: \"L3_b5_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b5_brc3_relu\n        name: \"L3_b5_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b5_brc3_bn_top\"\n        top: \"L3_b5_brc3_bn_top\"\n      }\n      layer { # L3_b5_brc3_conv\n        name: \"L3_b5_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b5_brc3_bn_top\"\n        top: \"L3_b5_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 256\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L3_b5_sum_eltwise\n      name: \"L3_b5_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L3_b5_brc3_conv_top\"\n      bottom: \"L3_b4_sum_eltwise_top\"\n      top: \"L3_b5_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n  #{ L3_b6 start\n    #{ L3_b6_brc1 start\n      layer { # L3_b6_brc1_bn\n        name: \"L3_b6_brc1_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b5_sum_eltwise_top\"\n        top: \"L3_b6_brc1_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b6_brc1_relu\n        name: \"L3_b6_brc1_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b6_brc1_bn_top\"\n        top: \"L3_b6_brc1_bn_top\"\n      }\n      layer { # L3_b6_brc1_conv\n        name: \"L3_b6_brc1_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b6_brc1_bn_top\"\n        top: \"L3_b6_brc1_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b6_brc2 start\n      layer { # L3_b6_brc2_bn\n        name: \"L3_b6_brc2_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b6_brc1_conv_top\"\n        top: \"L3_b6_brc2_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b6_brc2_relu\n        name: \"L3_b6_brc2_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b6_brc2_bn_top\"\n        top: \"L3_b6_brc2_bn_top\"\n      }\n      layer { # L3_b6_brc2_conv\n        name: \"L3_b6_brc2_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b6_brc2_bn_top\"\n        top: \"L3_b6_brc2_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 128\n          pad: 1\n          kernel_size: 3\n          stride: 1\n          group: 32\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    #{ L3_b6_brc3 start\n      layer { # L3_b6_brc3_bn\n        name: \"L3_b6_brc3_bn\"\n        type: \"BatchNorm\"\n        bottom: \"L3_b6_brc2_conv_top\"\n        top: \"L3_b6_brc3_bn_top\"\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        param {\n          lr_mult: 0\n          decay_mult: 0\n        }\n        batch_norm_param {\n          use_global_stats: false\n          moving_average_fraction: 0.95\n        }\n      }\n      layer { # L3_b6_brc3_relu\n        name: \"L3_b6_brc3_relu\"\n        type: \"ReLU\"\n        bottom: \"L3_b6_brc3_bn_top\"\n        top: \"L3_b6_brc3_bn_top\"\n      }\n      layer { # L3_b6_brc3_conv\n        name: \"L3_b6_brc3_conv\"\n        type: \"Convolution\"\n        bottom: \"L3_b6_brc3_bn_top\"\n        top: \"L3_b6_brc3_conv_top\"\n        param {\n          lr_mult: 1\n          decay_mult: 1\n        }\n        param {\n          lr_mult: 2\n          decay_mult: 0\n        }\n        convolution_param {\n          num_output: 256\n          pad: 0\n          kernel_size: 1\n          stride: 1\n          weight_filler {\n            type: \"msra\"\n          }\n          bias_filler {\n            type: \"constant\"\n          }\n        }\n      }\n    #}\n    layer { # L3_b6_sum_eltwise\n      name: \"L3_b6_sum_eltwise\"\n      type: \"Eltwise\"\n      bottom: \"L3_b6_brc3_conv_top\"\n      bottom: \"L3_b5_sum_eltwise_top\"\n      top: \"L3_b6_sum_eltwise_top\"\n      eltwise_param {\n        operation: SUM\n      }\n    }\n  #}\n#}\nlayer { # post_bn\n  name: \"post_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"post_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # post_relu\n  name: \"post_relu\"\n  type: \"ReLU\"\n  bottom: \"post_bn_top\"\n  top: \"post_bn_top\"\n}\nlayer { # post_pool\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"post_bn_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer { # post_FC\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n\t  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # accuracy\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer { # loss\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\n"
  },
  {
    "path": "architectures/Resnet110Cifar.prototxt",
    "content": "name: \"Cifar-Resnet\"\nlayer { # TRAIN data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # TEST data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer { # pre_conv\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_scale\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # pre_relu\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\n#{ L1 start\n#{ L1_b1 start\n#{ L1_b1_cbr1 start\nlayer { # L1_b1_cbr1_conv\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_scale\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b1_cbr1_relu\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\n#} L1_b1_cbr1 end\n#{ L1_b1_cbr2 start\nlayer { # L1_b1_cbr2_conv\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_scale\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b1_cbr2 end\nlayer { # L1_b1_sum_eltwise\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b1_relu\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\n#} L1_b1 end\n#{ L1_b2 start\n#{ L1_b2_cbr1 start\nlayer { # L1_b2_cbr1_conv\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_scale\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b2_cbr1_relu\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\n#} L1_b2_cbr1 end\n#{ L1_b2_cbr2 start\nlayer { # L1_b2_cbr2_conv\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_scale\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b2_cbr2 end\nlayer { # L1_b2_sum_eltwise\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b2_relu\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\n#} L1_b2 end\n#{ L1_b3 start\n#{ L1_b3_cbr1 start\nlayer { # L1_b3_cbr1_conv\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_scale\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b3_cbr1_relu\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\n#} L1_b3_cbr1 end\n#{ L1_b3_cbr2 start\nlayer { # L1_b3_cbr2_conv\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_scale\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b3_cbr2 end\nlayer { # L1_b3_sum_eltwise\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b3_relu\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\n#} L1_b3 end\n#{ L1_b4 start\n#{ L1_b4_cbr1 start\nlayer { # L1_b4_cbr1_conv\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr1_scale\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b4_cbr1_relu\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\n#} L1_b4_cbr1 end\n#{ L1_b4_cbr2 start\nlayer { # L1_b4_cbr2_conv\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr2_scale\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b4_cbr2 end\nlayer { # L1_b4_sum_eltwise\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b4_relu\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\n#} L1_b4 end\n#{ L1_b5 start\n#{ L1_b5_cbr1 start\nlayer { # L1_b5_cbr1_conv\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr1_scale\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b5_cbr1_relu\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\n#} L1_b5_cbr1 end\n#{ L1_b5_cbr2 start\nlayer { # L1_b5_cbr2_conv\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr2_scale\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b5_cbr2 end\nlayer { # L1_b5_sum_eltwise\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b5_relu\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\n#} L1_b5 end\n#{ L1_b6 start\n#{ L1_b6_cbr1 start\nlayer { # L1_b6_cbr1_conv\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr1_scale\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b6_cbr1_relu\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\n#} L1_b6_cbr1 end\n#{ L1_b6_cbr2 start\nlayer { # L1_b6_cbr2_conv\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr2_scale\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b6_cbr2 end\nlayer { # L1_b6_sum_eltwise\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b6_relu\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\n#} L1_b6 end\n#{ L1_b7 start\n#{ L1_b7_cbr1 start\nlayer { # L1_b7_cbr1_conv\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr1_scale\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b7_cbr1_relu\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\n#} L1_b7_cbr1 end\n#{ L1_b7_cbr2 start\nlayer { # L1_b7_cbr2_conv\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr2_scale\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b7_cbr2 end\nlayer { # L1_b7_sum_eltwise\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b7_relu\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\n#} L1_b7 end\n#{ L1_b8 start\n#{ L1_b8_cbr1 start\nlayer { # L1_b8_cbr1_conv\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr1_scale\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b8_cbr1_relu\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\n#} L1_b8_cbr1 end\n#{ L1_b8_cbr2 start\nlayer { # L1_b8_cbr2_conv\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr2_scale\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b8_cbr2 end\nlayer { # L1_b8_sum_eltwise\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b8_relu\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\n#} L1_b8 end\n#{ L1_b9 start\n#{ L1_b9_cbr1 start\nlayer { # L1_b9_cbr1_conv\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr1_scale\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b9_cbr1_relu\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\n#} L1_b9_cbr1 end\n#{ L1_b9_cbr2 start\nlayer { # L1_b9_cbr2_conv\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr2_scale\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b9_cbr2 end\nlayer { # L1_b9_sum_eltwise\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b9_relu\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\n#} L1_b9 end\n#{ L1_b10 start\n#{ L1_b10_cbr1 start\nlayer { # L1_b10_cbr1_conv\n  name: \"L1_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b10_cbr1_bn\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b10_cbr1_bn\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b10_cbr1_scale\n  name: \"L1_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b10_cbr1_relu\n  name: \"L1_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n}\n#} L1_b10_cbr1 end\n#{ L1_b10_cbr2 start\nlayer { # L1_b10_cbr2_conv\n  name: \"L1_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b10_cbr2_bn\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b10_cbr2_bn\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b10_cbr2_scale\n  name: \"L1_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b10_cbr2 end\nlayer { # L1_b10_sum_eltwise\n  name: \"L1_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b10_relu\n  name: \"L1_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n}\n#} L1_b10 end\n#{ L1_b11 start\n#{ L1_b11_cbr1 start\nlayer { # L1_b11_cbr1_conv\n  name: \"L1_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b11_cbr1_bn\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b11_cbr1_bn\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b11_cbr1_scale\n  name: \"L1_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b11_cbr1_relu\n  name: \"L1_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n}\n#} L1_b11_cbr1 end\n#{ L1_b11_cbr2 start\nlayer { # L1_b11_cbr2_conv\n  name: \"L1_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b11_cbr2_bn\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b11_cbr2_bn\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b11_cbr2_scale\n  name: \"L1_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b11_cbr2 end\nlayer { # L1_b11_sum_eltwise\n  name: \"L1_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b11_relu\n  name: \"L1_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n}\n#} L1_b11 end\n#{ L1_b12 start\n#{ L1_b12_cbr1 start\nlayer { # L1_b12_cbr1_conv\n  name: \"L1_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b12_cbr1_bn\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b12_cbr1_bn\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b12_cbr1_scale\n  name: \"L1_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b12_cbr1_relu\n  name: \"L1_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n}\n#} L1_b12_cbr1 end\n#{ L1_b12_cbr2 start\nlayer { # L1_b12_cbr2_conv\n  name: \"L1_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b12_cbr2_bn\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b12_cbr2_bn\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b12_cbr2_scale\n  name: \"L1_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b12_cbr2 end\nlayer { # L1_b12_sum_eltwise\n  name: \"L1_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b12_relu\n  name: \"L1_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n}\n#} L1_b12 end\n#{ L1_b13 start\n#{ L1_b13_cbr1 start\nlayer { # L1_b13_cbr1_conv\n  name: \"L1_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b13_cbr1_bn\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b13_cbr1_bn\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b13_cbr1_scale\n  name: \"L1_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b13_cbr1_relu\n  name: \"L1_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n}\n#} L1_b13_cbr1 end\n#{ L1_b13_cbr2 start\nlayer { # L1_b13_cbr2_conv\n  name: \"L1_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b13_cbr2_bn\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b13_cbr2_bn\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b13_cbr2_scale\n  name: \"L1_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b13_cbr2 end\nlayer { # L1_b13_sum_eltwise\n  name: \"L1_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b13_relu\n  name: \"L1_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n}\n#} L1_b13 end\n#{ L1_b14 start\n#{ L1_b14_cbr1 start\nlayer { # L1_b14_cbr1_conv\n  name: \"L1_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b14_cbr1_bn\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b14_cbr1_bn\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b14_cbr1_scale\n  name: \"L1_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b14_cbr1_relu\n  name: \"L1_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n}\n#} L1_b14_cbr1 end\n#{ L1_b14_cbr2 start\nlayer { # L1_b14_cbr2_conv\n  name: \"L1_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b14_cbr2_bn\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b14_cbr2_bn\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b14_cbr2_scale\n  name: \"L1_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b14_cbr2 end\nlayer { # L1_b14_sum_eltwise\n  name: \"L1_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b14_relu\n  name: \"L1_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n}\n#} L1_b14 end\n#{ L1_b15 start\n#{ L1_b15_cbr1 start\nlayer { # L1_b15_cbr1_conv\n  name: \"L1_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b15_cbr1_bn\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b15_cbr1_bn\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b15_cbr1_scale\n  name: \"L1_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b15_cbr1_relu\n  name: \"L1_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n}\n#} L1_b15_cbr1 end\n#{ L1_b15_cbr2 start\nlayer { # L1_b15_cbr2_conv\n  name: \"L1_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b15_cbr2_bn\n  name: \"L1_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr2_conv_top\"\n  top: \"L1_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b15_cbr2_bn\n  name: \"L1_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr2_conv_top\"\n  top: \"L1_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b15_cbr2_scale\n  name: \"L1_b15_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr2_bn_top\"\n  top: \"L1_b15_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b15_cbr2 end\nlayer { # L1_b15_sum_eltwise\n  name: \"L1_b15_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b15_cbr2_bn_top\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b15_relu\n  name: \"L1_b15_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_sum_eltwise_top\"\n  top: \"L1_b15_sum_eltwise_top\"\n}\n#} L1_b15 end\n#{ L1_b16 start\n#{ L1_b16_cbr1 start\nlayer { # L1_b16_cbr1_conv\n  name: \"L1_b16_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_sum_eltwise_top\"\n  top: \"L1_b16_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b16_cbr1_bn\n  name: \"L1_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b16_cbr1_conv_top\"\n  top: \"L1_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b16_cbr1_bn\n  name: \"L1_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b16_cbr1_conv_top\"\n  top: \"L1_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b16_cbr1_scale\n  name: \"L1_b16_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b16_cbr1_bn_top\"\n  top: \"L1_b16_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b16_cbr1_relu\n  name: \"L1_b16_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b16_cbr1_bn_top\"\n  top: \"L1_b16_cbr1_bn_top\"\n}\n#} L1_b16_cbr1 end\n#{ L1_b16_cbr2 start\nlayer { # L1_b16_cbr2_conv\n  name: \"L1_b16_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b16_cbr1_bn_top\"\n  top: \"L1_b16_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b16_cbr2_bn\n  name: \"L1_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b16_cbr2_conv_top\"\n  top: \"L1_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b16_cbr2_bn\n  name: \"L1_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b16_cbr2_conv_top\"\n  top: \"L1_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b16_cbr2_scale\n  name: \"L1_b16_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b16_cbr2_bn_top\"\n  top: \"L1_b16_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b16_cbr2 end\nlayer { # L1_b16_sum_eltwise\n  name: \"L1_b16_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b16_cbr2_bn_top\"\n  bottom: \"L1_b15_sum_eltwise_top\"\n  top: \"L1_b16_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b16_relu\n  name: \"L1_b16_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b16_sum_eltwise_top\"\n  top: \"L1_b16_sum_eltwise_top\"\n}\n#} L1_b16 end\n#{ L1_b17 start\n#{ L1_b17_cbr1 start\nlayer { # L1_b17_cbr1_conv\n  name: \"L1_b17_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b16_sum_eltwise_top\"\n  top: \"L1_b17_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b17_cbr1_bn\n  name: \"L1_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b17_cbr1_conv_top\"\n  top: \"L1_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b17_cbr1_bn\n  name: \"L1_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b17_cbr1_conv_top\"\n  top: \"L1_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b17_cbr1_scale\n  name: \"L1_b17_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b17_cbr1_bn_top\"\n  top: \"L1_b17_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b17_cbr1_relu\n  name: \"L1_b17_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b17_cbr1_bn_top\"\n  top: \"L1_b17_cbr1_bn_top\"\n}\n#} L1_b17_cbr1 end\n#{ L1_b17_cbr2 start\nlayer { # L1_b17_cbr2_conv\n  name: \"L1_b17_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b17_cbr1_bn_top\"\n  top: \"L1_b17_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b17_cbr2_bn\n  name: \"L1_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b17_cbr2_conv_top\"\n  top: \"L1_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b17_cbr2_bn\n  name: \"L1_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b17_cbr2_conv_top\"\n  top: \"L1_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b17_cbr2_scale\n  name: \"L1_b17_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b17_cbr2_bn_top\"\n  top: \"L1_b17_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b17_cbr2 end\nlayer { # L1_b17_sum_eltwise\n  name: \"L1_b17_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b17_cbr2_bn_top\"\n  bottom: \"L1_b16_sum_eltwise_top\"\n  top: \"L1_b17_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b17_relu\n  name: \"L1_b17_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b17_sum_eltwise_top\"\n  top: \"L1_b17_sum_eltwise_top\"\n}\n#} L1_b17 end\n#{ L1_b18 start\n#{ L1_b18_cbr1 start\nlayer { # L1_b18_cbr1_conv\n  name: \"L1_b18_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b17_sum_eltwise_top\"\n  top: \"L1_b18_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b18_cbr1_bn\n  name: \"L1_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b18_cbr1_conv_top\"\n  top: \"L1_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b18_cbr1_bn\n  name: \"L1_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b18_cbr1_conv_top\"\n  top: \"L1_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b18_cbr1_scale\n  name: \"L1_b18_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b18_cbr1_bn_top\"\n  top: \"L1_b18_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b18_cbr1_relu\n  name: \"L1_b18_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b18_cbr1_bn_top\"\n  top: \"L1_b18_cbr1_bn_top\"\n}\n#} L1_b18_cbr1 end\n#{ L1_b18_cbr2 start\nlayer { # L1_b18_cbr2_conv\n  name: \"L1_b18_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b18_cbr1_bn_top\"\n  top: \"L1_b18_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b18_cbr2_bn\n  name: \"L1_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b18_cbr2_conv_top\"\n  top: \"L1_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b18_cbr2_bn\n  name: \"L1_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b18_cbr2_conv_top\"\n  top: \"L1_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b18_cbr2_scale\n  name: \"L1_b18_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b18_cbr2_bn_top\"\n  top: \"L1_b18_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b18_cbr2 end\nlayer { # L1_b18_sum_eltwise\n  name: \"L1_b18_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b18_cbr2_bn_top\"\n  bottom: \"L1_b17_sum_eltwise_top\"\n  top: \"L1_b18_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b18_relu\n  name: \"L1_b18_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b18_sum_eltwise_top\"\n  top: \"L1_b18_sum_eltwise_top\"\n}\n#} L1_b18 end\n#} L1 end\n#{ L2 start\n#{ L2_b1 start\n#{ L2_b1_cbr1 start\nlayer { # L2_b1_cbr1_conv\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b18_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_scale\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b1_cbr1_relu\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\n#} L2_b1_cbr1 end\n#{ L2_b1_cbr2 start\nlayer { # L2_b1_cbr2_conv\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_scale\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b1_cbr2 end\nlayer { # L2_b1_pool\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b18_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L2_b1_sum_eltwise\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b1_relu\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\n#} L2_b1 end\nlayer { # L2_b1_zeros\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 16 dim: 16 dim: 16 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L2_b1_concat0\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L2_b2 start\n#{ L2_b2_cbr1 start\nlayer { # L2_b2_cbr1_conv\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_scale\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b2_cbr1_relu\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\n#} L2_b2_cbr1 end\n#{ L2_b2_cbr2 start\nlayer { # L2_b2_cbr2_conv\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_scale\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b2_cbr2 end\nlayer { # L2_b2_sum_eltwise\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b2_relu\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\n#} L2_b2 end\n#{ L2_b3 start\n#{ L2_b3_cbr1 start\nlayer { # L2_b3_cbr1_conv\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_scale\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b3_cbr1_relu\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\n#} L2_b3_cbr1 end\n#{ L2_b3_cbr2 start\nlayer { # L2_b3_cbr2_conv\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_scale\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b3_cbr2 end\nlayer { # L2_b3_sum_eltwise\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b3_relu\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\n#} L2_b3 end\n#{ L2_b4 start\n#{ L2_b4_cbr1 start\nlayer { # L2_b4_cbr1_conv\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr1_scale\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b4_cbr1_relu\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\n#} L2_b4_cbr1 end\n#{ L2_b4_cbr2 start\nlayer { # L2_b4_cbr2_conv\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr2_scale\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b4_cbr2 end\nlayer { # L2_b4_sum_eltwise\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b4_relu\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\n#} L2_b4 end\n#{ L2_b5 start\n#{ L2_b5_cbr1 start\nlayer { # L2_b5_cbr1_conv\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr1_scale\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b5_cbr1_relu\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\n#} L2_b5_cbr1 end\n#{ L2_b5_cbr2 start\nlayer { # L2_b5_cbr2_conv\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr2_scale\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b5_cbr2 end\nlayer { # L2_b5_sum_eltwise\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b5_relu\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\n#} L2_b5 end\n#{ L2_b6 start\n#{ L2_b6_cbr1 start\nlayer { # L2_b6_cbr1_conv\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr1_scale\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b6_cbr1_relu\n  name: \"L2_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n}\n#} L2_b6_cbr1 end\n#{ L2_b6_cbr2 start\nlayer { # L2_b6_cbr2_conv\n  name: \"L2_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr2_scale\n  name: \"L2_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b6_cbr2 end\nlayer { # L2_b6_sum_eltwise\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b6_relu\n  name: \"L2_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n}\n#} L2_b6 end\n#{ L2_b7 start\n#{ L2_b7_cbr1 start\nlayer { # L2_b7_cbr1_conv\n  name: \"L2_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr1_scale\n  name: \"L2_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b7_cbr1_relu\n  name: \"L2_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n}\n#} L2_b7_cbr1 end\n#{ L2_b7_cbr2 start\nlayer { # L2_b7_cbr2_conv\n  name: \"L2_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr2_scale\n  name: \"L2_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b7_cbr2 end\nlayer { # L2_b7_sum_eltwise\n  name: \"L2_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b7_relu\n  name: \"L2_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n}\n#} L2_b7 end\n#{ L2_b8 start\n#{ L2_b8_cbr1 start\nlayer { # L2_b8_cbr1_conv\n  name: \"L2_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr1_scale\n  name: \"L2_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b8_cbr1_relu\n  name: \"L2_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n}\n#} L2_b8_cbr1 end\n#{ L2_b8_cbr2 start\nlayer { # L2_b8_cbr2_conv\n  name: \"L2_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr2_scale\n  name: \"L2_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b8_cbr2 end\nlayer { # L2_b8_sum_eltwise\n  name: \"L2_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b8_relu\n  name: \"L2_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n}\n#} L2_b8 end\n#{ L2_b9 start\n#{ L2_b9_cbr1 start\nlayer { # L2_b9_cbr1_conv\n  name: \"L2_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr1_scale\n  name: \"L2_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b9_cbr1_relu\n  name: \"L2_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n}\n#} L2_b9_cbr1 end\n#{ L2_b9_cbr2 start\nlayer { # L2_b9_cbr2_conv\n  name: \"L2_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr2_scale\n  name: \"L2_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b9_cbr2 end\nlayer { # L2_b9_sum_eltwise\n  name: \"L2_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b9_relu\n  name: \"L2_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n}\n#} L2_b9 end\n#{ L2_b10 start\n#{ L2_b10_cbr1 start\nlayer { # L2_b10_cbr1_conv\n  name: \"L2_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b10_cbr1_bn\n  name: \"L2_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b10_cbr1_conv_top\"\n  top: \"L2_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b10_cbr1_bn\n  name: \"L2_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b10_cbr1_conv_top\"\n  top: \"L2_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b10_cbr1_scale\n  name: \"L2_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b10_cbr1_bn_top\"\n  top: \"L2_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b10_cbr1_relu\n  name: \"L2_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b10_cbr1_bn_top\"\n  top: \"L2_b10_cbr1_bn_top\"\n}\n#} L2_b10_cbr1 end\n#{ L2_b10_cbr2 start\nlayer { # L2_b10_cbr2_conv\n  name: \"L2_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b10_cbr1_bn_top\"\n  top: \"L2_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b10_cbr2_bn\n  name: \"L2_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b10_cbr2_conv_top\"\n  top: \"L2_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b10_cbr2_bn\n  name: \"L2_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b10_cbr2_conv_top\"\n  top: \"L2_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b10_cbr2_scale\n  name: \"L2_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b10_cbr2_bn_top\"\n  top: \"L2_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b10_cbr2 end\nlayer { # L2_b10_sum_eltwise\n  name: \"L2_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b10_cbr2_bn_top\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b10_relu\n  name: \"L2_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b10_sum_eltwise_top\"\n  top: \"L2_b10_sum_eltwise_top\"\n}\n#} L2_b10 end\n#{ L2_b11 start\n#{ L2_b11_cbr1 start\nlayer { # L2_b11_cbr1_conv\n  name: \"L2_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b10_sum_eltwise_top\"\n  top: \"L2_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b11_cbr1_bn\n  name: \"L2_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b11_cbr1_conv_top\"\n  top: \"L2_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b11_cbr1_bn\n  name: \"L2_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b11_cbr1_conv_top\"\n  top: \"L2_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b11_cbr1_scale\n  name: \"L2_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b11_cbr1_bn_top\"\n  top: \"L2_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b11_cbr1_relu\n  name: \"L2_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b11_cbr1_bn_top\"\n  top: \"L2_b11_cbr1_bn_top\"\n}\n#} L2_b11_cbr1 end\n#{ L2_b11_cbr2 start\nlayer { # L2_b11_cbr2_conv\n  name: \"L2_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b11_cbr1_bn_top\"\n  top: \"L2_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b11_cbr2_bn\n  name: \"L2_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b11_cbr2_conv_top\"\n  top: \"L2_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b11_cbr2_bn\n  name: \"L2_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b11_cbr2_conv_top\"\n  top: \"L2_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b11_cbr2_scale\n  name: \"L2_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b11_cbr2_bn_top\"\n  top: \"L2_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b11_cbr2 end\nlayer { # L2_b11_sum_eltwise\n  name: \"L2_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b11_cbr2_bn_top\"\n  bottom: \"L2_b10_sum_eltwise_top\"\n  top: \"L2_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b11_relu\n  name: \"L2_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b11_sum_eltwise_top\"\n  top: \"L2_b11_sum_eltwise_top\"\n}\n#} L2_b11 end\n#{ L2_b12 start\n#{ L2_b12_cbr1 start\nlayer { # L2_b12_cbr1_conv\n  name: \"L2_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b11_sum_eltwise_top\"\n  top: \"L2_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b12_cbr1_bn\n  name: \"L2_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b12_cbr1_conv_top\"\n  top: \"L2_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b12_cbr1_bn\n  name: \"L2_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b12_cbr1_conv_top\"\n  top: \"L2_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b12_cbr1_scale\n  name: \"L2_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b12_cbr1_bn_top\"\n  top: \"L2_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b12_cbr1_relu\n  name: \"L2_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b12_cbr1_bn_top\"\n  top: \"L2_b12_cbr1_bn_top\"\n}\n#} L2_b12_cbr1 end\n#{ L2_b12_cbr2 start\nlayer { # L2_b12_cbr2_conv\n  name: \"L2_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b12_cbr1_bn_top\"\n  top: \"L2_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b12_cbr2_bn\n  name: \"L2_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b12_cbr2_conv_top\"\n  top: \"L2_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b12_cbr2_bn\n  name: \"L2_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b12_cbr2_conv_top\"\n  top: \"L2_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b12_cbr2_scale\n  name: \"L2_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b12_cbr2_bn_top\"\n  top: \"L2_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b12_cbr2 end\nlayer { # L2_b12_sum_eltwise\n  name: \"L2_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b12_cbr2_bn_top\"\n  bottom: \"L2_b11_sum_eltwise_top\"\n  top: \"L2_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b12_relu\n  name: \"L2_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b12_sum_eltwise_top\"\n  top: \"L2_b12_sum_eltwise_top\"\n}\n#} L2_b12 end\n#{ L2_b13 start\n#{ L2_b13_cbr1 start\nlayer { # L2_b13_cbr1_conv\n  name: \"L2_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b12_sum_eltwise_top\"\n  top: \"L2_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b13_cbr1_bn\n  name: \"L2_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b13_cbr1_conv_top\"\n  top: \"L2_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b13_cbr1_bn\n  name: \"L2_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b13_cbr1_conv_top\"\n  top: \"L2_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b13_cbr1_scale\n  name: \"L2_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b13_cbr1_bn_top\"\n  top: \"L2_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b13_cbr1_relu\n  name: \"L2_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b13_cbr1_bn_top\"\n  top: \"L2_b13_cbr1_bn_top\"\n}\n#} L2_b13_cbr1 end\n#{ L2_b13_cbr2 start\nlayer { # L2_b13_cbr2_conv\n  name: \"L2_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b13_cbr1_bn_top\"\n  top: \"L2_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b13_cbr2_bn\n  name: \"L2_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b13_cbr2_conv_top\"\n  top: \"L2_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b13_cbr2_bn\n  name: \"L2_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b13_cbr2_conv_top\"\n  top: \"L2_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b13_cbr2_scale\n  name: \"L2_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b13_cbr2_bn_top\"\n  top: \"L2_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b13_cbr2 end\nlayer { # L2_b13_sum_eltwise\n  name: \"L2_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b13_cbr2_bn_top\"\n  bottom: \"L2_b12_sum_eltwise_top\"\n  top: \"L2_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b13_relu\n  name: \"L2_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b13_sum_eltwise_top\"\n  top: \"L2_b13_sum_eltwise_top\"\n}\n#} L2_b13 end\n#{ L2_b14 start\n#{ L2_b14_cbr1 start\nlayer { # L2_b14_cbr1_conv\n  name: \"L2_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b13_sum_eltwise_top\"\n  top: \"L2_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b14_cbr1_bn\n  name: \"L2_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b14_cbr1_conv_top\"\n  top: \"L2_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b14_cbr1_bn\n  name: \"L2_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b14_cbr1_conv_top\"\n  top: \"L2_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b14_cbr1_scale\n  name: \"L2_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b14_cbr1_bn_top\"\n  top: \"L2_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b14_cbr1_relu\n  name: \"L2_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b14_cbr1_bn_top\"\n  top: \"L2_b14_cbr1_bn_top\"\n}\n#} L2_b14_cbr1 end\n#{ L2_b14_cbr2 start\nlayer { # L2_b14_cbr2_conv\n  name: \"L2_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b14_cbr1_bn_top\"\n  top: \"L2_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b14_cbr2_bn\n  name: \"L2_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b14_cbr2_conv_top\"\n  top: \"L2_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b14_cbr2_bn\n  name: \"L2_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b14_cbr2_conv_top\"\n  top: \"L2_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b14_cbr2_scale\n  name: \"L2_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b14_cbr2_bn_top\"\n  top: \"L2_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b14_cbr2 end\nlayer { # L2_b14_sum_eltwise\n  name: \"L2_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b14_cbr2_bn_top\"\n  bottom: \"L2_b13_sum_eltwise_top\"\n  top: \"L2_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b14_relu\n  name: \"L2_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b14_sum_eltwise_top\"\n  top: \"L2_b14_sum_eltwise_top\"\n}\n#} L2_b14 end\n#{ L2_b15 start\n#{ L2_b15_cbr1 start\nlayer { # L2_b15_cbr1_conv\n  name: \"L2_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b14_sum_eltwise_top\"\n  top: \"L2_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b15_cbr1_bn\n  name: \"L2_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b15_cbr1_conv_top\"\n  top: \"L2_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b15_cbr1_bn\n  name: \"L2_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b15_cbr1_conv_top\"\n  top: \"L2_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b15_cbr1_scale\n  name: \"L2_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b15_cbr1_bn_top\"\n  top: \"L2_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b15_cbr1_relu\n  name: \"L2_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b15_cbr1_bn_top\"\n  top: \"L2_b15_cbr1_bn_top\"\n}\n#} L2_b15_cbr1 end\n#{ L2_b15_cbr2 start\nlayer { # L2_b15_cbr2_conv\n  name: \"L2_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b15_cbr1_bn_top\"\n  top: \"L2_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b15_cbr2_bn\n  name: \"L2_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b15_cbr2_conv_top\"\n  top: \"L2_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b15_cbr2_bn\n  name: \"L2_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b15_cbr2_conv_top\"\n  top: \"L2_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b15_cbr2_scale\n  name: \"L2_b15_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b15_cbr2_bn_top\"\n  top: \"L2_b15_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b15_cbr2 end\nlayer { # L2_b15_sum_eltwise\n  name: \"L2_b15_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b15_cbr2_bn_top\"\n  bottom: \"L2_b14_sum_eltwise_top\"\n  top: \"L2_b15_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b15_relu\n  name: \"L2_b15_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b15_sum_eltwise_top\"\n  top: \"L2_b15_sum_eltwise_top\"\n}\n#} L2_b15 end\n#{ L2_b16 start\n#{ L2_b16_cbr1 start\nlayer { # L2_b16_cbr1_conv\n  name: \"L2_b16_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b15_sum_eltwise_top\"\n  top: \"L2_b16_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b16_cbr1_bn\n  name: \"L2_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b16_cbr1_conv_top\"\n  top: \"L2_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b16_cbr1_bn\n  name: \"L2_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b16_cbr1_conv_top\"\n  top: \"L2_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b16_cbr1_scale\n  name: \"L2_b16_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b16_cbr1_bn_top\"\n  top: \"L2_b16_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b16_cbr1_relu\n  name: \"L2_b16_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b16_cbr1_bn_top\"\n  top: \"L2_b16_cbr1_bn_top\"\n}\n#} L2_b16_cbr1 end\n#{ L2_b16_cbr2 start\nlayer { # L2_b16_cbr2_conv\n  name: \"L2_b16_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b16_cbr1_bn_top\"\n  top: \"L2_b16_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b16_cbr2_bn\n  name: \"L2_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b16_cbr2_conv_top\"\n  top: \"L2_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b16_cbr2_bn\n  name: \"L2_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b16_cbr2_conv_top\"\n  top: \"L2_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b16_cbr2_scale\n  name: \"L2_b16_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b16_cbr2_bn_top\"\n  top: \"L2_b16_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b16_cbr2 end\nlayer { # L2_b16_sum_eltwise\n  name: \"L2_b16_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b16_cbr2_bn_top\"\n  bottom: \"L2_b15_sum_eltwise_top\"\n  top: \"L2_b16_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b16_relu\n  name: \"L2_b16_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b16_sum_eltwise_top\"\n  top: \"L2_b16_sum_eltwise_top\"\n}\n#} L2_b16 end\n#{ L2_b17 start\n#{ L2_b17_cbr1 start\nlayer { # L2_b17_cbr1_conv\n  name: \"L2_b17_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b16_sum_eltwise_top\"\n  top: \"L2_b17_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b17_cbr1_bn\n  name: \"L2_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b17_cbr1_conv_top\"\n  top: \"L2_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b17_cbr1_bn\n  name: \"L2_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b17_cbr1_conv_top\"\n  top: \"L2_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b17_cbr1_scale\n  name: \"L2_b17_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b17_cbr1_bn_top\"\n  top: \"L2_b17_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b17_cbr1_relu\n  name: \"L2_b17_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b17_cbr1_bn_top\"\n  top: \"L2_b17_cbr1_bn_top\"\n}\n#} L2_b17_cbr1 end\n#{ L2_b17_cbr2 start\nlayer { # L2_b17_cbr2_conv\n  name: \"L2_b17_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b17_cbr1_bn_top\"\n  top: \"L2_b17_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b17_cbr2_bn\n  name: \"L2_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b17_cbr2_conv_top\"\n  top: \"L2_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b17_cbr2_bn\n  name: \"L2_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b17_cbr2_conv_top\"\n  top: \"L2_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b17_cbr2_scale\n  name: \"L2_b17_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b17_cbr2_bn_top\"\n  top: \"L2_b17_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b17_cbr2 end\nlayer { # L2_b17_sum_eltwise\n  name: \"L2_b17_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b17_cbr2_bn_top\"\n  bottom: \"L2_b16_sum_eltwise_top\"\n  top: \"L2_b17_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b17_relu\n  name: \"L2_b17_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b17_sum_eltwise_top\"\n  top: \"L2_b17_sum_eltwise_top\"\n}\n#} L2_b17 end\n#{ L2_b18 start\n#{ L2_b18_cbr1 start\nlayer { # L2_b18_cbr1_conv\n  name: \"L2_b18_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b17_sum_eltwise_top\"\n  top: \"L2_b18_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b18_cbr1_bn\n  name: \"L2_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b18_cbr1_conv_top\"\n  top: \"L2_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b18_cbr1_bn\n  name: \"L2_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b18_cbr1_conv_top\"\n  top: \"L2_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b18_cbr1_scale\n  name: \"L2_b18_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b18_cbr1_bn_top\"\n  top: \"L2_b18_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b18_cbr1_relu\n  name: \"L2_b18_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b18_cbr1_bn_top\"\n  top: \"L2_b18_cbr1_bn_top\"\n}\n#} L2_b18_cbr1 end\n#{ L2_b18_cbr2 start\nlayer { # L2_b18_cbr2_conv\n  name: \"L2_b18_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b18_cbr1_bn_top\"\n  top: \"L2_b18_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b18_cbr2_bn\n  name: \"L2_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b18_cbr2_conv_top\"\n  top: \"L2_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b18_cbr2_bn\n  name: \"L2_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b18_cbr2_conv_top\"\n  top: \"L2_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b18_cbr2_scale\n  name: \"L2_b18_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b18_cbr2_bn_top\"\n  top: \"L2_b18_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b18_cbr2 end\nlayer { # L2_b18_sum_eltwise\n  name: \"L2_b18_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b18_cbr2_bn_top\"\n  bottom: \"L2_b17_sum_eltwise_top\"\n  top: \"L2_b18_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b18_relu\n  name: \"L2_b18_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b18_sum_eltwise_top\"\n  top: \"L2_b18_sum_eltwise_top\"\n}\n#} L2_b18 end\n#} L2 end\n#{ L3 start\n#{ L3_b1 start\n#{ L3_b1_cbr1 start\nlayer { # L3_b1_cbr1_conv\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b18_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_scale\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b1_cbr1_relu\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\n#} L3_b1_cbr1 end\n#{ L3_b1_cbr2 start\nlayer { # L3_b1_cbr2_conv\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_scale\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b1_cbr2 end\nlayer { # L3_b1_pool\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b18_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L3_b1_sum_eltwise\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b1_relu\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\n#} L3_b1 end\nlayer { # L3_b1_zeros\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 32 dim: 8 dim: 8 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L3_b1_concat0\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L3_b2 start\n#{ L3_b2_cbr1 start\nlayer { # L3_b2_cbr1_conv\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_scale\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b2_cbr1_relu\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\n#} L3_b2_cbr1 end\n#{ L3_b2_cbr2 start\nlayer { # L3_b2_cbr2_conv\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_scale\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b2_cbr2 end\nlayer { # L3_b2_sum_eltwise\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b2_relu\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\n#} L3_b2 end\n#{ L3_b3 start\n#{ L3_b3_cbr1 start\nlayer { # L3_b3_cbr1_conv\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_scale\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b3_cbr1_relu\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\n#} L3_b3_cbr1 end\n#{ L3_b3_cbr2 start\nlayer { # L3_b3_cbr2_conv\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_scale\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b3_cbr2 end\nlayer { # L3_b3_sum_eltwise\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b3_relu\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\n#} L3_b3 end\n#{ L3_b4 start\n#{ L3_b4_cbr1 start\nlayer { # L3_b4_cbr1_conv\n  name: \"L3_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr1_scale\n  name: \"L3_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b4_cbr1_relu\n  name: \"L3_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n}\n#} L3_b4_cbr1 end\n#{ L3_b4_cbr2 start\nlayer { # L3_b4_cbr2_conv\n  name: \"L3_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr2_scale\n  name: \"L3_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b4_cbr2 end\nlayer { # L3_b4_sum_eltwise\n  name: \"L3_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b4_relu\n  name: \"L3_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n}\n#} L3_b4 end\n#{ L3_b5 start\n#{ L3_b5_cbr1 start\nlayer { # L3_b5_cbr1_conv\n  name: \"L3_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr1_scale\n  name: \"L3_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b5_cbr1_relu\n  name: \"L3_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n}\n#} L3_b5_cbr1 end\n#{ L3_b5_cbr2 start\nlayer { # L3_b5_cbr2_conv\n  name: \"L3_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr2_scale\n  name: \"L3_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b5_cbr2 end\nlayer { # L3_b5_sum_eltwise\n  name: \"L3_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b5_relu\n  name: \"L3_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n}\n#} L3_b5 end\n#{ L3_b6 start\n#{ L3_b6_cbr1 start\nlayer { # L3_b6_cbr1_conv\n  name: \"L3_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr1_scale\n  name: \"L3_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b6_cbr1_relu\n  name: \"L3_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n}\n#} L3_b6_cbr1 end\n#{ L3_b6_cbr2 start\nlayer { # L3_b6_cbr2_conv\n  name: \"L3_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr2_scale\n  name: \"L3_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b6_cbr2 end\nlayer { # L3_b6_sum_eltwise\n  name: \"L3_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b6_relu\n  name: \"L3_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n}\n#} L3_b6 end\n#{ L3_b7 start\n#{ L3_b7_cbr1 start\nlayer { # L3_b7_cbr1_conv\n  name: \"L3_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr1_scale\n  name: \"L3_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b7_cbr1_relu\n  name: \"L3_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n}\n#} L3_b7_cbr1 end\n#{ L3_b7_cbr2 start\nlayer { # L3_b7_cbr2_conv\n  name: \"L3_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr2_scale\n  name: \"L3_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b7_cbr2 end\nlayer { # L3_b7_sum_eltwise\n  name: \"L3_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b7_relu\n  name: \"L3_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n}\n#} L3_b7 end\n#{ L3_b8 start\n#{ L3_b8_cbr1 start\nlayer { # L3_b8_cbr1_conv\n  name: \"L3_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr1_scale\n  name: \"L3_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b8_cbr1_relu\n  name: \"L3_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n}\n#} L3_b8_cbr1 end\n#{ L3_b8_cbr2 start\nlayer { # L3_b8_cbr2_conv\n  name: \"L3_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr2_scale\n  name: \"L3_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b8_cbr2 end\nlayer { # L3_b8_sum_eltwise\n  name: \"L3_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b8_relu\n  name: \"L3_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n}\n#} L3_b8 end\n#{ L3_b9 start\n#{ L3_b9_cbr1 start\nlayer { # L3_b9_cbr1_conv\n  name: \"L3_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr1_scale\n  name: \"L3_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b9_cbr1_relu\n  name: \"L3_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n}\n#} L3_b9_cbr1 end\n#{ L3_b9_cbr2 start\nlayer { # L3_b9_cbr2_conv\n  name: \"L3_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr2_scale\n  name: \"L3_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b9_cbr2 end\nlayer { # L3_b9_sum_eltwise\n  name: \"L3_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b9_relu\n  name: \"L3_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n}\n#} L3_b9 end\n#{ L3_b10 start\n#{ L3_b10_cbr1 start\nlayer { # L3_b10_cbr1_conv\n  name: \"L3_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b10_cbr1_bn\n  name: \"L3_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b10_cbr1_conv_top\"\n  top: \"L3_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b10_cbr1_bn\n  name: \"L3_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b10_cbr1_conv_top\"\n  top: \"L3_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b10_cbr1_scale\n  name: \"L3_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b10_cbr1_bn_top\"\n  top: \"L3_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b10_cbr1_relu\n  name: \"L3_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b10_cbr1_bn_top\"\n  top: \"L3_b10_cbr1_bn_top\"\n}\n#} L3_b10_cbr1 end\n#{ L3_b10_cbr2 start\nlayer { # L3_b10_cbr2_conv\n  name: \"L3_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b10_cbr1_bn_top\"\n  top: \"L3_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b10_cbr2_bn\n  name: \"L3_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b10_cbr2_conv_top\"\n  top: \"L3_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b10_cbr2_bn\n  name: \"L3_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b10_cbr2_conv_top\"\n  top: \"L3_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b10_cbr2_scale\n  name: \"L3_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b10_cbr2_bn_top\"\n  top: \"L3_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b10_cbr2 end\nlayer { # L3_b10_sum_eltwise\n  name: \"L3_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b10_cbr2_bn_top\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b10_relu\n  name: \"L3_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b10_sum_eltwise_top\"\n  top: \"L3_b10_sum_eltwise_top\"\n}\n#} L3_b10 end\n#{ L3_b11 start\n#{ L3_b11_cbr1 start\nlayer { # L3_b11_cbr1_conv\n  name: \"L3_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b10_sum_eltwise_top\"\n  top: \"L3_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b11_cbr1_bn\n  name: \"L3_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b11_cbr1_conv_top\"\n  top: \"L3_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b11_cbr1_bn\n  name: \"L3_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b11_cbr1_conv_top\"\n  top: \"L3_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b11_cbr1_scale\n  name: \"L3_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b11_cbr1_bn_top\"\n  top: \"L3_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b11_cbr1_relu\n  name: \"L3_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b11_cbr1_bn_top\"\n  top: \"L3_b11_cbr1_bn_top\"\n}\n#} L3_b11_cbr1 end\n#{ L3_b11_cbr2 start\nlayer { # L3_b11_cbr2_conv\n  name: \"L3_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b11_cbr1_bn_top\"\n  top: \"L3_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b11_cbr2_bn\n  name: \"L3_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b11_cbr2_conv_top\"\n  top: \"L3_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b11_cbr2_bn\n  name: \"L3_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b11_cbr2_conv_top\"\n  top: \"L3_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b11_cbr2_scale\n  name: \"L3_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b11_cbr2_bn_top\"\n  top: \"L3_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b11_cbr2 end\nlayer { # L3_b11_sum_eltwise\n  name: \"L3_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b11_cbr2_bn_top\"\n  bottom: \"L3_b10_sum_eltwise_top\"\n  top: \"L3_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b11_relu\n  name: \"L3_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b11_sum_eltwise_top\"\n  top: \"L3_b11_sum_eltwise_top\"\n}\n#} L3_b11 end\n#{ L3_b12 start\n#{ L3_b12_cbr1 start\nlayer { # L3_b12_cbr1_conv\n  name: \"L3_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b11_sum_eltwise_top\"\n  top: \"L3_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b12_cbr1_bn\n  name: \"L3_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b12_cbr1_conv_top\"\n  top: \"L3_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b12_cbr1_bn\n  name: \"L3_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b12_cbr1_conv_top\"\n  top: \"L3_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b12_cbr1_scale\n  name: \"L3_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b12_cbr1_bn_top\"\n  top: \"L3_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b12_cbr1_relu\n  name: \"L3_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b12_cbr1_bn_top\"\n  top: \"L3_b12_cbr1_bn_top\"\n}\n#} L3_b12_cbr1 end\n#{ L3_b12_cbr2 start\nlayer { # L3_b12_cbr2_conv\n  name: \"L3_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b12_cbr1_bn_top\"\n  top: \"L3_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b12_cbr2_bn\n  name: \"L3_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b12_cbr2_conv_top\"\n  top: \"L3_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b12_cbr2_bn\n  name: \"L3_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b12_cbr2_conv_top\"\n  top: \"L3_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b12_cbr2_scale\n  name: \"L3_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b12_cbr2_bn_top\"\n  top: \"L3_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b12_cbr2 end\nlayer { # L3_b12_sum_eltwise\n  name: \"L3_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b12_cbr2_bn_top\"\n  bottom: \"L3_b11_sum_eltwise_top\"\n  top: \"L3_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b12_relu\n  name: \"L3_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b12_sum_eltwise_top\"\n  top: \"L3_b12_sum_eltwise_top\"\n}\n#} L3_b12 end\n#{ L3_b13 start\n#{ L3_b13_cbr1 start\nlayer { # L3_b13_cbr1_conv\n  name: \"L3_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b12_sum_eltwise_top\"\n  top: \"L3_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b13_cbr1_bn\n  name: \"L3_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b13_cbr1_conv_top\"\n  top: \"L3_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b13_cbr1_bn\n  name: \"L3_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b13_cbr1_conv_top\"\n  top: \"L3_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b13_cbr1_scale\n  name: \"L3_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b13_cbr1_bn_top\"\n  top: \"L3_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b13_cbr1_relu\n  name: \"L3_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b13_cbr1_bn_top\"\n  top: \"L3_b13_cbr1_bn_top\"\n}\n#} L3_b13_cbr1 end\n#{ L3_b13_cbr2 start\nlayer { # L3_b13_cbr2_conv\n  name: \"L3_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b13_cbr1_bn_top\"\n  top: \"L3_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b13_cbr2_bn\n  name: \"L3_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b13_cbr2_conv_top\"\n  top: \"L3_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b13_cbr2_bn\n  name: \"L3_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b13_cbr2_conv_top\"\n  top: \"L3_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b13_cbr2_scale\n  name: \"L3_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b13_cbr2_bn_top\"\n  top: \"L3_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b13_cbr2 end\nlayer { # L3_b13_sum_eltwise\n  name: \"L3_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b13_cbr2_bn_top\"\n  bottom: \"L3_b12_sum_eltwise_top\"\n  top: \"L3_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b13_relu\n  name: \"L3_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b13_sum_eltwise_top\"\n  top: \"L3_b13_sum_eltwise_top\"\n}\n#} L3_b13 end\n#{ L3_b14 start\n#{ L3_b14_cbr1 start\nlayer { # L3_b14_cbr1_conv\n  name: \"L3_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b13_sum_eltwise_top\"\n  top: \"L3_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b14_cbr1_bn\n  name: \"L3_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b14_cbr1_conv_top\"\n  top: \"L3_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b14_cbr1_bn\n  name: \"L3_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b14_cbr1_conv_top\"\n  top: \"L3_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b14_cbr1_scale\n  name: \"L3_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b14_cbr1_bn_top\"\n  top: \"L3_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b14_cbr1_relu\n  name: \"L3_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b14_cbr1_bn_top\"\n  top: \"L3_b14_cbr1_bn_top\"\n}\n#} L3_b14_cbr1 end\n#{ L3_b14_cbr2 start\nlayer { # L3_b14_cbr2_conv\n  name: \"L3_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b14_cbr1_bn_top\"\n  top: \"L3_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b14_cbr2_bn\n  name: \"L3_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b14_cbr2_conv_top\"\n  top: \"L3_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b14_cbr2_bn\n  name: \"L3_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b14_cbr2_conv_top\"\n  top: \"L3_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b14_cbr2_scale\n  name: \"L3_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b14_cbr2_bn_top\"\n  top: \"L3_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b14_cbr2 end\nlayer { # L3_b14_sum_eltwise\n  name: \"L3_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b14_cbr2_bn_top\"\n  bottom: \"L3_b13_sum_eltwise_top\"\n  top: \"L3_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b14_relu\n  name: \"L3_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b14_sum_eltwise_top\"\n  top: \"L3_b14_sum_eltwise_top\"\n}\n#} L3_b14 end\n#{ L3_b15 start\n#{ L3_b15_cbr1 start\nlayer { # L3_b15_cbr1_conv\n  name: \"L3_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b14_sum_eltwise_top\"\n  top: \"L3_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b15_cbr1_bn\n  name: \"L3_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b15_cbr1_conv_top\"\n  top: \"L3_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b15_cbr1_bn\n  name: \"L3_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b15_cbr1_conv_top\"\n  top: \"L3_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b15_cbr1_scale\n  name: \"L3_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b15_cbr1_bn_top\"\n  top: \"L3_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b15_cbr1_relu\n  name: \"L3_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b15_cbr1_bn_top\"\n  top: \"L3_b15_cbr1_bn_top\"\n}\n#} L3_b15_cbr1 end\n#{ L3_b15_cbr2 start\nlayer { # L3_b15_cbr2_conv\n  name: \"L3_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b15_cbr1_bn_top\"\n  top: \"L3_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b15_cbr2_bn\n  name: \"L3_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b15_cbr2_conv_top\"\n  top: \"L3_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b15_cbr2_bn\n  name: \"L3_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b15_cbr2_conv_top\"\n  top: \"L3_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b15_cbr2_scale\n  name: \"L3_b15_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b15_cbr2_bn_top\"\n  top: \"L3_b15_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b15_cbr2 end\nlayer { # L3_b15_sum_eltwise\n  name: \"L3_b15_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b15_cbr2_bn_top\"\n  bottom: \"L3_b14_sum_eltwise_top\"\n  top: \"L3_b15_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b15_relu\n  name: \"L3_b15_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b15_sum_eltwise_top\"\n  top: \"L3_b15_sum_eltwise_top\"\n}\n#} L3_b15 end\n#{ L3_b16 start\n#{ L3_b16_cbr1 start\nlayer { # L3_b16_cbr1_conv\n  name: \"L3_b16_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b15_sum_eltwise_top\"\n  top: \"L3_b16_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b16_cbr1_bn\n  name: \"L3_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b16_cbr1_conv_top\"\n  top: \"L3_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b16_cbr1_bn\n  name: \"L3_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b16_cbr1_conv_top\"\n  top: \"L3_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b16_cbr1_scale\n  name: \"L3_b16_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b16_cbr1_bn_top\"\n  top: \"L3_b16_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b16_cbr1_relu\n  name: \"L3_b16_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b16_cbr1_bn_top\"\n  top: \"L3_b16_cbr1_bn_top\"\n}\n#} L3_b16_cbr1 end\n#{ L3_b16_cbr2 start\nlayer { # L3_b16_cbr2_conv\n  name: \"L3_b16_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b16_cbr1_bn_top\"\n  top: \"L3_b16_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b16_cbr2_bn\n  name: \"L3_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b16_cbr2_conv_top\"\n  top: \"L3_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b16_cbr2_bn\n  name: \"L3_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b16_cbr2_conv_top\"\n  top: \"L3_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b16_cbr2_scale\n  name: \"L3_b16_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b16_cbr2_bn_top\"\n  top: \"L3_b16_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b16_cbr2 end\nlayer { # L3_b16_sum_eltwise\n  name: \"L3_b16_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b16_cbr2_bn_top\"\n  bottom: \"L3_b15_sum_eltwise_top\"\n  top: \"L3_b16_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b16_relu\n  name: \"L3_b16_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b16_sum_eltwise_top\"\n  top: \"L3_b16_sum_eltwise_top\"\n}\n#} L3_b16 end\n#{ L3_b17 start\n#{ L3_b17_cbr1 start\nlayer { # L3_b17_cbr1_conv\n  name: \"L3_b17_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b16_sum_eltwise_top\"\n  top: \"L3_b17_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b17_cbr1_bn\n  name: \"L3_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b17_cbr1_conv_top\"\n  top: \"L3_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b17_cbr1_bn\n  name: \"L3_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b17_cbr1_conv_top\"\n  top: \"L3_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b17_cbr1_scale\n  name: \"L3_b17_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b17_cbr1_bn_top\"\n  top: \"L3_b17_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b17_cbr1_relu\n  name: \"L3_b17_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b17_cbr1_bn_top\"\n  top: \"L3_b17_cbr1_bn_top\"\n}\n#} L3_b17_cbr1 end\n#{ L3_b17_cbr2 start\nlayer { # L3_b17_cbr2_conv\n  name: \"L3_b17_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b17_cbr1_bn_top\"\n  top: \"L3_b17_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b17_cbr2_bn\n  name: \"L3_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b17_cbr2_conv_top\"\n  top: \"L3_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b17_cbr2_bn\n  name: \"L3_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b17_cbr2_conv_top\"\n  top: \"L3_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b17_cbr2_scale\n  name: \"L3_b17_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b17_cbr2_bn_top\"\n  top: \"L3_b17_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b17_cbr2 end\nlayer { # L3_b17_sum_eltwise\n  name: \"L3_b17_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b17_cbr2_bn_top\"\n  bottom: \"L3_b16_sum_eltwise_top\"\n  top: \"L3_b17_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b17_relu\n  name: \"L3_b17_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b17_sum_eltwise_top\"\n  top: \"L3_b17_sum_eltwise_top\"\n}\n#} L3_b17 end\n#{ L3_b18 start\n#{ L3_b18_cbr1 start\nlayer { # L3_b18_cbr1_conv\n  name: \"L3_b18_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b17_sum_eltwise_top\"\n  top: \"L3_b18_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b18_cbr1_bn\n  name: \"L3_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b18_cbr1_conv_top\"\n  top: \"L3_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b18_cbr1_bn\n  name: \"L3_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b18_cbr1_conv_top\"\n  top: \"L3_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b18_cbr1_scale\n  name: \"L3_b18_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b18_cbr1_bn_top\"\n  top: \"L3_b18_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b18_cbr1_relu\n  name: \"L3_b18_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b18_cbr1_bn_top\"\n  top: \"L3_b18_cbr1_bn_top\"\n}\n#} L3_b18_cbr1 end\n#{ L3_b18_cbr2 start\nlayer { # L3_b18_cbr2_conv\n  name: \"L3_b18_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b18_cbr1_bn_top\"\n  top: \"L3_b18_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b18_cbr2_bn\n  name: \"L3_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b18_cbr2_conv_top\"\n  top: \"L3_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b18_cbr2_bn\n  name: \"L3_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b18_cbr2_conv_top\"\n  top: \"L3_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b18_cbr2_scale\n  name: \"L3_b18_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b18_cbr2_bn_top\"\n  top: \"L3_b18_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b18_cbr2 end\nlayer { # L3_b18_sum_eltwise\n  name: \"L3_b18_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b18_cbr2_bn_top\"\n  bottom: \"L3_b17_sum_eltwise_top\"\n  top: \"L3_b18_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b18_relu\n  name: \"L3_b18_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b18_sum_eltwise_top\"\n  top: \"L3_b18_sum_eltwise_top\"\n}\n#} L3_b18 end\n#} L3 end\nlayer { # post_pool\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b18_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer { # post_FC\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n\t  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # accuracy\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer { # loss\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\n"
  },
  {
    "path": "architectures/Resnet20Cifar.prototxt",
    "content": "name: \"Cifar-Resnet\"\nlayer { # TRAIN data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # TEST data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer { # pre_conv\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_scale\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # pre_relu\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\n#{ L1 start\n#{ L1_b1 start\n#{ L1_b1_cbr1 start\nlayer { # L1_b1_cbr1_conv\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_scale\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b1_cbr1_relu\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\n#} L1_b1_cbr1 end\n#{ L1_b1_cbr2 start\nlayer { # L1_b1_cbr2_conv\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_scale\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b1_cbr2 end\nlayer { # L1_b1_sum_eltwise\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b1_relu\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\n#} L1_b1 end\n#{ L1_b2 start\n#{ L1_b2_cbr1 start\nlayer { # L1_b2_cbr1_conv\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_scale\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b2_cbr1_relu\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\n#} L1_b2_cbr1 end\n#{ L1_b2_cbr2 start\nlayer { # L1_b2_cbr2_conv\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_scale\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b2_cbr2 end\nlayer { # L1_b2_sum_eltwise\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b2_relu\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\n#} L1_b2 end\n#{ L1_b3 start\n#{ L1_b3_cbr1 start\nlayer { # L1_b3_cbr1_conv\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_scale\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b3_cbr1_relu\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\n#} L1_b3_cbr1 end\n#{ L1_b3_cbr2 start\nlayer { # L1_b3_cbr2_conv\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_scale\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b3_cbr2 end\nlayer { # L1_b3_sum_eltwise\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b3_relu\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\n#} L1_b3 end\n#} L1 end\n#{ L2 start\n#{ L2_b1 start\n#{ L2_b1_cbr1 start\nlayer { # L2_b1_cbr1_conv\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_scale\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b1_cbr1_relu\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\n#} L2_b1_cbr1 end\n#{ L2_b1_cbr2 start\nlayer { # L2_b1_cbr2_conv\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_scale\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b1_cbr2 end\nlayer { # L2_b1_pool\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L2_b1_sum_eltwise\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b1_relu\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\n#} L2_b1 end\nlayer { # L2_b1_zeros\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 16 dim: 16 dim: 16 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L2_b1_concat0\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L2_b2 start\n#{ L2_b2_cbr1 start\nlayer { # L2_b2_cbr1_conv\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_scale\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b2_cbr1_relu\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\n#} L2_b2_cbr1 end\n#{ L2_b2_cbr2 start\nlayer { # L2_b2_cbr2_conv\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_scale\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b2_cbr2 end\nlayer { # L2_b2_sum_eltwise\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b2_relu\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\n#} L2_b2 end\n#{ L2_b3 start\n#{ L2_b3_cbr1 start\nlayer { # L2_b3_cbr1_conv\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_scale\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b3_cbr1_relu\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\n#} L2_b3_cbr1 end\n#{ L2_b3_cbr2 start\nlayer { # L2_b3_cbr2_conv\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_scale\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b3_cbr2 end\nlayer { # L2_b3_sum_eltwise\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b3_relu\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\n#} L2_b3 end\n#} L2 end\n#{ L3 start\n#{ L3_b1 start\n#{ L3_b1_cbr1 start\nlayer { # L3_b1_cbr1_conv\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_scale\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b1_cbr1_relu\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\n#} L3_b1_cbr1 end\n#{ L3_b1_cbr2 start\nlayer { # L3_b1_cbr2_conv\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_scale\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b1_cbr2 end\nlayer { # L3_b1_pool\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L3_b1_sum_eltwise\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b1_relu\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\n#} L3_b1 end\nlayer { # L3_b1_zeros\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 32 dim: 8 dim: 8 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L3_b1_concat0\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L3_b2 start\n#{ L3_b2_cbr1 start\nlayer { # L3_b2_cbr1_conv\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_scale\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b2_cbr1_relu\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\n#} L3_b2_cbr1 end\n#{ L3_b2_cbr2 start\nlayer { # L3_b2_cbr2_conv\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_scale\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b2_cbr2 end\nlayer { # L3_b2_sum_eltwise\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b2_relu\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\n#} L3_b2 end\n#{ L3_b3 start\n#{ L3_b3_cbr1 start\nlayer { # L3_b3_cbr1_conv\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_scale\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b3_cbr1_relu\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\n#} L3_b3_cbr1 end\n#{ L3_b3_cbr2 start\nlayer { # L3_b3_cbr2_conv\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_scale\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b3_cbr2 end\nlayer { # L3_b3_sum_eltwise\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b3_relu\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\n#} L3_b3 end\n#} L3 end\nlayer { # post_pool\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer { # post_FC\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n\t  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # accuracy\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer { # loss\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\n"
  },
  {
    "path": "architectures/Resnet56Cifar-mod.prototxt",
    "content": "name: \"Cifar-Resnet\" \nlayer { # train data layer \n  name: \"dataLayer\" \n  type: \"Data\" \n  top: \"data\" \n  top: \"label\" \n  include { \n    phase: TRAIN \n  } \n  transform_param { \n    mirror: true \n    crop_size: 32 \n    mean_file: \"examples/cifar10/mean.binaryproto\" \n  } \n  data_param { \n    source: \"examples/cifar10/cifar10_train_lmdb\" \n    batch_size: 125  \n    backend: LMDB \n  } \n  image_data_param { \n  shuffle: true \n  } \n} \nlayer { # test data layer \n  name: \"dataLayer\" \n  type: \"Data\" \n  top: \"data\" \n  top: \"label\" \n  include { \n    phase: TEST \n  } \n  transform_param { \n    mirror: false \n    crop_size: 32 \n    mean_file: \"examples/cifar10/mean.binaryproto\" \n  } \n  data_param { \n    source: \"examples/cifar10/cifar10_test_lmdb\" \n    batch_size: 125 \n    backend: LMDB \n  } \n} \nlayer { # conv \n  name: \"conv\" \n  type: \"Convolution\" \n  bottom: \"data\" \n  top: \"conv\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_conv \n  name: \"batchNorm_conv\" \n  type: \"BatchNorm\" \n  bottom: \"conv\" \n  top: \"bn_conv\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_conv \n  name: \"scale_conv\" \n  type: \"Scale\" \n  bottom: \"bn_conv\" \n  top: \"bn_conv\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_conv \n  name: \"relu_bn_conv\" \n  type: \"ReLU\" \n  bottom: \"bn_conv\" \n  top: \"bn_conv\" \n} \nlayer { # Conv16_1 \n  name: \"Conv16_1\" \n  type: \"Convolution\" \n  bottom: \"bn_conv\" \n  top: \"Conv16_1\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_1 \n  name: \"batchNorm_Conv16_1\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_1\" \n  top: \"bn_Conv16_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_1 \n  name: \"scale_Conv16_1\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_1\" \n  top: \"bn_Conv16_1\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_1 \n  name: \"relu_bn_Conv16_1\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_1\" \n  top: \"bn_Conv16_1\" \n} \nlayer { # Conv16_1_b \n  name: \"Conv16_1_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_1\" \n  top: \"Conv16_1_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_1_b \n  name: \"batchNorm_Conv16_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_1_b\" \n  top: \"bn_Conv16_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_1_b \n  name: \"scale_Conv16_1_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_1_b\" \n  top: \"bn_Conv16_1_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_bn_conv \n    name: \"sum_bn_conv\" \n    type: \"Eltwise\" \n    bottom: \"bn_conv\" \n    bottom: \"bn_Conv16_1_b\" \n    top: \"sum_bn_Conv16_1_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_1_b \n  name: \"relu_sum_bn_Conv16_1_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_1_b\" \n  top: \"sum_bn_Conv16_1_b\" \n} \nlayer { # Conv16_2 \n  name: \"Conv16_2\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_1_b\" \n  top: \"Conv16_2\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_2 \n  name: \"batchNorm_Conv16_2\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_2\" \n  top: \"bn_Conv16_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_2 \n  name: \"scale_Conv16_2\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_2\" \n  top: \"bn_Conv16_2\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_2 \n  name: \"relu_bn_Conv16_2\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_2\" \n  top: \"bn_Conv16_2\" \n} \nlayer { # Conv16_2_b \n  name: \"Conv16_2_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_2\" \n  top: \"Conv16_2_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_2_b \n  name: \"batchNorm_Conv16_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_2_b\" \n  top: \"bn_Conv16_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_2_b \n  name: \"scale_Conv16_2_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_2_b\" \n  top: \"bn_Conv16_2_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_1_b \n    name: \"sum_sum_bn_Conv16_1_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_1_b\" \n    bottom: \"bn_Conv16_2_b\" \n    top: \"sum_bn_Conv16_2_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_2_b \n  name: \"relu_sum_bn_Conv16_2_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_2_b\" \n  top: \"sum_bn_Conv16_2_b\" \n} \nlayer { # Conv16_3 \n  name: \"Conv16_3\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_2_b\" \n  top: \"Conv16_3\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_3 \n  name: \"batchNorm_Conv16_3\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_3\" \n  top: \"bn_Conv16_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_3 \n  name: \"scale_Conv16_3\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_3\" \n  top: \"bn_Conv16_3\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_3 \n  name: \"relu_bn_Conv16_3\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_3\" \n  top: \"bn_Conv16_3\" \n} \nlayer { # Conv16_3_b \n  name: \"Conv16_3_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_3\" \n  top: \"Conv16_3_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_3_b \n  name: \"batchNorm_Conv16_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_3_b\" \n  top: \"bn_Conv16_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_3_b \n  name: \"scale_Conv16_3_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_3_b\" \n  top: \"bn_Conv16_3_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_2_b \n    name: \"sum_sum_bn_Conv16_2_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_2_b\" \n    bottom: \"bn_Conv16_3_b\" \n    top: \"sum_bn_Conv16_3_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_3_b \n  name: \"relu_sum_bn_Conv16_3_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_3_b\" \n  top: \"sum_bn_Conv16_3_b\" \n} \nlayer { # Conv16_4 \n  name: \"Conv16_4\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_3_b\" \n  top: \"Conv16_4\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_4 \n  name: \"batchNorm_Conv16_4\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_4\" \n  top: \"bn_Conv16_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_4 \n  name: \"scale_Conv16_4\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_4\" \n  top: \"bn_Conv16_4\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_4 \n  name: \"relu_bn_Conv16_4\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_4\" \n  top: \"bn_Conv16_4\" \n} \nlayer { # Conv16_4_b \n  name: \"Conv16_4_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_4\" \n  top: \"Conv16_4_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_4_b \n  name: \"batchNorm_Conv16_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_4_b\" \n  top: \"bn_Conv16_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_4_b \n  name: \"scale_Conv16_4_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_4_b\" \n  top: \"bn_Conv16_4_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_3_b \n    name: \"sum_sum_bn_Conv16_3_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_3_b\" \n    bottom: \"bn_Conv16_4_b\" \n    top: \"sum_bn_Conv16_4_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_4_b \n  name: \"relu_sum_bn_Conv16_4_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_4_b\" \n  top: \"sum_bn_Conv16_4_b\" \n} \nlayer { # Conv16_5 \n  name: \"Conv16_5\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_4_b\" \n  top: \"Conv16_5\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_5 \n  name: \"batchNorm_Conv16_5\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_5\" \n  top: \"bn_Conv16_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_5 \n  name: \"scale_Conv16_5\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_5\" \n  top: \"bn_Conv16_5\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_5 \n  name: \"relu_bn_Conv16_5\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_5\" \n  top: \"bn_Conv16_5\" \n} \nlayer { # Conv16_5_b \n  name: \"Conv16_5_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_5\" \n  top: \"Conv16_5_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_5_b \n  name: \"batchNorm_Conv16_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_5_b\" \n  top: \"bn_Conv16_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_5_b \n  name: \"scale_Conv16_5_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_5_b\" \n  top: \"bn_Conv16_5_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_4_b \n    name: \"sum_sum_bn_Conv16_4_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_4_b\" \n    bottom: \"bn_Conv16_5_b\" \n    top: \"sum_bn_Conv16_5_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_5_b \n  name: \"relu_sum_bn_Conv16_5_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_5_b\" \n  top: \"sum_bn_Conv16_5_b\" \n} \nlayer { # Conv16_6 \n  name: \"Conv16_6\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_5_b\" \n  top: \"Conv16_6\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_6 \n  name: \"batchNorm_Conv16_6\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_6\" \n  top: \"bn_Conv16_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_6 \n  name: \"scale_Conv16_6\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_6\" \n  top: \"bn_Conv16_6\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_6 \n  name: \"relu_bn_Conv16_6\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_6\" \n  top: \"bn_Conv16_6\" \n} \nlayer { # Conv16_6_b \n  name: \"Conv16_6_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_6\" \n  top: \"Conv16_6_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_6_b \n  name: \"batchNorm_Conv16_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_6_b\" \n  top: \"bn_Conv16_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_6_b \n  name: \"scale_Conv16_6_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_6_b\" \n  top: \"bn_Conv16_6_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_5_b \n    name: \"sum_sum_bn_Conv16_5_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_5_b\" \n    bottom: \"bn_Conv16_6_b\" \n    top: \"sum_bn_Conv16_6_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_6_b \n  name: \"relu_sum_bn_Conv16_6_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_6_b\" \n  top: \"sum_bn_Conv16_6_b\" \n} \nlayer { # Conv16_7 \n  name: \"Conv16_7\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_6_b\" \n  top: \"Conv16_7\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_7 \n  name: \"batchNorm_Conv16_7\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_7\" \n  top: \"bn_Conv16_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_7 \n  name: \"scale_Conv16_7\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_7\" \n  top: \"bn_Conv16_7\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_7 \n  name: \"relu_bn_Conv16_7\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_7\" \n  top: \"bn_Conv16_7\" \n} \nlayer { # Conv16_7_b \n  name: \"Conv16_7_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_7\" \n  top: \"Conv16_7_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_7_b \n  name: \"batchNorm_Conv16_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_7_b\" \n  top: \"bn_Conv16_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_7_b \n  name: \"scale_Conv16_7_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_7_b\" \n  top: \"bn_Conv16_7_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_6_b \n    name: \"sum_sum_bn_Conv16_6_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_6_b\" \n    bottom: \"bn_Conv16_7_b\" \n    top: \"sum_bn_Conv16_7_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_7_b \n  name: \"relu_sum_bn_Conv16_7_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_7_b\" \n  top: \"sum_bn_Conv16_7_b\" \n} \nlayer { # Conv16_8 \n  name: \"Conv16_8\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_7_b\" \n  top: \"Conv16_8\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_8 \n  name: \"batchNorm_Conv16_8\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_8\" \n  top: \"bn_Conv16_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_8 \n  name: \"scale_Conv16_8\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_8\" \n  top: \"bn_Conv16_8\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_8 \n  name: \"relu_bn_Conv16_8\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_8\" \n  top: \"bn_Conv16_8\" \n} \nlayer { # Conv16_8_b \n  name: \"Conv16_8_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_8\" \n  top: \"Conv16_8_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_8_b \n  name: \"batchNorm_Conv16_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_8_b\" \n  top: \"bn_Conv16_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_8_b \n  name: \"scale_Conv16_8_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_8_b\" \n  top: \"bn_Conv16_8_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_7_b \n    name: \"sum_sum_bn_Conv16_7_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_7_b\" \n    bottom: \"bn_Conv16_8_b\" \n    top: \"sum_bn_Conv16_8_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_8_b \n  name: \"relu_sum_bn_Conv16_8_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_8_b\" \n  top: \"sum_bn_Conv16_8_b\" \n} \nlayer { # Conv16_9 \n  name: \"Conv16_9\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_8_b\" \n  top: \"Conv16_9\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_9 \n  name: \"batchNorm_Conv16_9\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_9\" \n  top: \"bn_Conv16_9\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_9 \n  name: \"scale_Conv16_9\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_9\" \n  top: \"bn_Conv16_9\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_9 \n  name: \"relu_bn_Conv16_9\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_9\" \n  top: \"bn_Conv16_9\" \n} \nlayer { # Conv16_9_b \n  name: \"Conv16_9_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_9\" \n  top: \"Conv16_9_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_9_b \n  name: \"batchNorm_Conv16_9_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_9_b\" \n  top: \"bn_Conv16_9_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_Conv16_9_b \n  name: \"scale_Conv16_9_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_9_b\" \n  top: \"bn_Conv16_9_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_8_b \n    name: \"sum_sum_bn_Conv16_8_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_8_b\" \n    bottom: \"bn_Conv16_9_b\" \n    top: \"sum_bn_Conv16_9_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_9_b \n  name: \"relu_sum_bn_Conv16_9_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_9_b\" \n  top: \"sum_bn_Conv16_9_b\" \n} \nlayer { # resblk32 \n  name: \"resblk32\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_9_b\" \n  top: \"resblk32\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 2 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32 \n  name: \"batchNorm_resblk32\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32\" \n  top: \"bn_resblk32\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32 \n  name: \"scale_resblk32\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32\" \n  top: \"bn_resblk32\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32 \n  name: \"relu_bn_resblk32\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32\" \n  top: \"bn_resblk32\" \n} \nlayer { # resblk32_b \n  name: \"resblk32_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32\" \n  top: \"resblk32_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_b \n  name: \"batchNorm_resblk32_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_b\" \n  top: \"bn_resblk32_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_b \n  name: \"scale_resblk32_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_b\" \n  top: \"bn_resblk32_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # pool_resblk32 \n  name: \"avePooling_resblk32\" \n  type: \"Pooling\" \n  bottom: \"sum_bn_Conv16_9_b\" \n  top: \"avgPool_resblk32\" \n  pooling_param { \n    pool: AVE \n    kernel_size: 3 \n    stride: 2 \n  } \n} \nlayer { # sum_avgPool_resblk32 \n    name: \"sum_avgPool_resblk32\" \n    type: \"Eltwise\" \n    bottom: \"avgPool_resblk32\" \n    bottom: \"bn_resblk32_b\" \n    top: \"sum_bn_resblk32_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_b \n  name: \"relu_sum_bn_resblk32_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_b\" \n  top: \"sum_bn_resblk32_b\" \n} \nlayer { # Dummy \n  name: \"zeros_sum_bn_resblk32_b\" \n  type: \"DummyData\" \n  top: \"zeros_sum_bn_resblk32_b\" \n  dummy_data_param { \n    shape: {dim: 125  dim: 16 dim: 16  dim: 16 } \n    data_filler: { \n                type: \"constant\" \n                value: 0 \n        } \n  } \n} \nlayer { # ConCat_sum_bn_resblk32_b \n  name: \"CC_sum_bn_resblk32_b\" \n  bottom: \"sum_bn_resblk32_b\" \n  bottom: \"zeros_sum_bn_resblk32_b\" \n  top: \"CC_sum_bn_resblk32_b\" \n  type: \"Concat\" \n  concat_param { \n    axis: 1 \n  } \n} \nlayer { # resblk32_1 \n  name: \"resblk32_1\" \n  type: \"Convolution\" \n  bottom: \"CC_sum_bn_resblk32_b\" \n  top: \"resblk32_1\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_1 \n  name: \"batchNorm_resblk32_1\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_1\" \n  top: \"bn_resblk32_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_1 \n  name: \"scale_resblk32_1\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_1\" \n  top: \"bn_resblk32_1\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_1 \n  name: \"relu_bn_resblk32_1\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_1\" \n  top: \"bn_resblk32_1\" \n} \nlayer { # resblk32_1_b \n  name: \"resblk32_1_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_1\" \n  top: \"resblk32_1_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_1_b \n  name: \"batchNorm_resblk32_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_1_b\" \n  top: \"bn_resblk32_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_1_b \n  name: \"scale_resblk32_1_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_1_b\" \n  top: \"bn_resblk32_1_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_CC_sum_bn_resblk32_b \n    name: \"sum_CC_sum_bn_resblk32_b\" \n    type: \"Eltwise\" \n    bottom: \"CC_sum_bn_resblk32_b\" \n    bottom: \"bn_resblk32_1_b\" \n    top: \"sum_bn_resblk32_1_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_1_b \n  name: \"relu_sum_bn_resblk32_1_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_1_b\" \n  top: \"sum_bn_resblk32_1_b\" \n} \nlayer { # resblk32_2 \n  name: \"resblk32_2\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_1_b\" \n  top: \"resblk32_2\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_2 \n  name: \"batchNorm_resblk32_2\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_2\" \n  top: \"bn_resblk32_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_2 \n  name: \"scale_resblk32_2\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_2\" \n  top: \"bn_resblk32_2\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_2 \n  name: \"relu_bn_resblk32_2\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_2\" \n  top: \"bn_resblk32_2\" \n} \nlayer { # resblk32_2_b \n  name: \"resblk32_2_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_2\" \n  top: \"resblk32_2_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_2_b \n  name: \"batchNorm_resblk32_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_2_b\" \n  top: \"bn_resblk32_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_2_b \n  name: \"scale_resblk32_2_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_2_b\" \n  top: \"bn_resblk32_2_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_1_b \n    name: \"sum_sum_bn_resblk32_1_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_1_b\" \n    bottom: \"bn_resblk32_2_b\" \n    top: \"sum_bn_resblk32_2_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_2_b \n  name: \"relu_sum_bn_resblk32_2_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_2_b\" \n  top: \"sum_bn_resblk32_2_b\" \n} \nlayer { # resblk32_3 \n  name: \"resblk32_3\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_2_b\" \n  top: \"resblk32_3\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_3 \n  name: \"batchNorm_resblk32_3\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_3\" \n  top: \"bn_resblk32_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_3 \n  name: \"scale_resblk32_3\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_3\" \n  top: \"bn_resblk32_3\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_3 \n  name: \"relu_bn_resblk32_3\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_3\" \n  top: \"bn_resblk32_3\" \n} \nlayer { # resblk32_3_b \n  name: \"resblk32_3_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_3\" \n  top: \"resblk32_3_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_3_b \n  name: \"batchNorm_resblk32_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_3_b\" \n  top: \"bn_resblk32_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_3_b \n  name: \"scale_resblk32_3_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_3_b\" \n  top: \"bn_resblk32_3_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_2_b \n    name: \"sum_sum_bn_resblk32_2_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_2_b\" \n    bottom: \"bn_resblk32_3_b\" \n    top: \"sum_bn_resblk32_3_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_3_b \n  name: \"relu_sum_bn_resblk32_3_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_3_b\" \n  top: \"sum_bn_resblk32_3_b\" \n} \nlayer { # resblk32_4 \n  name: \"resblk32_4\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_3_b\" \n  top: \"resblk32_4\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_4 \n  name: \"batchNorm_resblk32_4\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_4\" \n  top: \"bn_resblk32_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_4 \n  name: \"scale_resblk32_4\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_4\" \n  top: \"bn_resblk32_4\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_4 \n  name: \"relu_bn_resblk32_4\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_4\" \n  top: \"bn_resblk32_4\" \n} \nlayer { # resblk32_4_b \n  name: \"resblk32_4_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_4\" \n  top: \"resblk32_4_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_4_b \n  name: \"batchNorm_resblk32_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_4_b\" \n  top: \"bn_resblk32_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_4_b \n  name: \"scale_resblk32_4_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_4_b\" \n  top: \"bn_resblk32_4_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_3_b \n    name: \"sum_sum_bn_resblk32_3_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_3_b\" \n    bottom: \"bn_resblk32_4_b\" \n    top: \"sum_bn_resblk32_4_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_4_b \n  name: \"relu_sum_bn_resblk32_4_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_4_b\" \n  top: \"sum_bn_resblk32_4_b\" \n} \nlayer { # resblk32_5 \n  name: \"resblk32_5\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_4_b\" \n  top: \"resblk32_5\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_5 \n  name: \"batchNorm_resblk32_5\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_5\" \n  top: \"bn_resblk32_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_5 \n  name: \"scale_resblk32_5\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_5\" \n  top: \"bn_resblk32_5\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_5 \n  name: \"relu_bn_resblk32_5\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_5\" \n  top: \"bn_resblk32_5\" \n} \nlayer { # resblk32_5_b \n  name: \"resblk32_5_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_5\" \n  top: \"resblk32_5_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_5_b \n  name: \"batchNorm_resblk32_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_5_b\" \n  top: \"bn_resblk32_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_5_b \n  name: \"scale_resblk32_5_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_5_b\" \n  top: \"bn_resblk32_5_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_4_b \n    name: \"sum_sum_bn_resblk32_4_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_4_b\" \n    bottom: \"bn_resblk32_5_b\" \n    top: \"sum_bn_resblk32_5_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_5_b \n  name: \"relu_sum_bn_resblk32_5_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_5_b\" \n  top: \"sum_bn_resblk32_5_b\" \n} \nlayer { # resblk32_6 \n  name: \"resblk32_6\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_5_b\" \n  top: \"resblk32_6\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_6 \n  name: \"batchNorm_resblk32_6\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_6\" \n  top: \"bn_resblk32_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_6 \n  name: \"scale_resblk32_6\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_6\" \n  top: \"bn_resblk32_6\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_6 \n  name: \"relu_bn_resblk32_6\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_6\" \n  top: \"bn_resblk32_6\" \n} \nlayer { # resblk32_6_b \n  name: \"resblk32_6_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_6\" \n  top: \"resblk32_6_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_6_b \n  name: \"batchNorm_resblk32_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_6_b\" \n  top: \"bn_resblk32_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_6_b \n  name: \"scale_resblk32_6_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_6_b\" \n  top: \"bn_resblk32_6_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_5_b \n    name: \"sum_sum_bn_resblk32_5_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_5_b\" \n    bottom: \"bn_resblk32_6_b\" \n    top: \"sum_bn_resblk32_6_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_6_b \n  name: \"relu_sum_bn_resblk32_6_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_6_b\" \n  top: \"sum_bn_resblk32_6_b\" \n} \nlayer { # resblk32_7 \n  name: \"resblk32_7\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_6_b\" \n  top: \"resblk32_7\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_7 \n  name: \"batchNorm_resblk32_7\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_7\" \n  top: \"bn_resblk32_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_7 \n  name: \"scale_resblk32_7\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_7\" \n  top: \"bn_resblk32_7\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_7 \n  name: \"relu_bn_resblk32_7\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_7\" \n  top: \"bn_resblk32_7\" \n} \nlayer { # resblk32_7_b \n  name: \"resblk32_7_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_7\" \n  top: \"resblk32_7_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_7_b \n  name: \"batchNorm_resblk32_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_7_b\" \n  top: \"bn_resblk32_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_7_b \n  name: \"scale_resblk32_7_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_7_b\" \n  top: \"bn_resblk32_7_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_6_b \n    name: \"sum_sum_bn_resblk32_6_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_6_b\" \n    bottom: \"bn_resblk32_7_b\" \n    top: \"sum_bn_resblk32_7_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_7_b \n  name: \"relu_sum_bn_resblk32_7_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_7_b\" \n  top: \"sum_bn_resblk32_7_b\" \n} \nlayer { # resblk32_8 \n  name: \"resblk32_8\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_7_b\" \n  top: \"resblk32_8\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_8 \n  name: \"batchNorm_resblk32_8\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_8\" \n  top: \"bn_resblk32_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_8 \n  name: \"scale_resblk32_8\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_8\" \n  top: \"bn_resblk32_8\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_8 \n  name: \"relu_bn_resblk32_8\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_8\" \n  top: \"bn_resblk32_8\" \n} \nlayer { # resblk32_8_b \n  name: \"resblk32_8_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_8\" \n  top: \"resblk32_8_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_8_b \n  name: \"batchNorm_resblk32_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_8_b\" \n  top: \"bn_resblk32_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk32_8_b \n  name: \"scale_resblk32_8_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_8_b\" \n  top: \"bn_resblk32_8_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_7_b \n    name: \"sum_sum_bn_resblk32_7_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_7_b\" \n    bottom: \"bn_resblk32_8_b\" \n    top: \"sum_bn_resblk32_8_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_8_b \n  name: \"relu_sum_bn_resblk32_8_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_8_b\" \n  top: \"sum_bn_resblk32_8_b\" \n} \nlayer { # resblk64 \n  name: \"resblk64\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_8_b\" \n  top: \"resblk64\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 2 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64 \n  name: \"batchNorm_resblk64\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64\" \n  top: \"bn_resblk64\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64 \n  name: \"scale_resblk64\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64\" \n  top: \"bn_resblk64\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64 \n  name: \"relu_bn_resblk64\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64\" \n  top: \"bn_resblk64\" \n} \nlayer { # resblk64_b \n  name: \"resblk64_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64\" \n  top: \"resblk64_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_b \n  name: \"batchNorm_resblk64_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_b\" \n  top: \"bn_resblk64_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_b \n  name: \"scale_resblk64_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_b\" \n  top: \"bn_resblk64_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # pool_resblk64 \n  name: \"avePooling_resblk64\" \n  type: \"Pooling\" \n  bottom: \"sum_bn_resblk32_8_b\" \n  top: \"avgPool_resblk64\" \n  pooling_param { \n    pool: AVE \n    kernel_size: 3 \n    stride: 2 \n  } \n} \nlayer { # sum_avgPool_resblk64 \n    name: \"sum_avgPool_resblk64\" \n    type: \"Eltwise\" \n    bottom: \"avgPool_resblk64\" \n    bottom: \"bn_resblk64_b\" \n    top: \"sum_bn_resblk64_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_b \n  name: \"relu_sum_bn_resblk64_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_b\" \n  top: \"sum_bn_resblk64_b\" \n} \nlayer { # Dummy \n  name: \"zeros_sum_bn_resblk64_b\" \n  type: \"DummyData\" \n  top: \"zeros_sum_bn_resblk64_b\" \n  dummy_data_param { \n    shape: {dim: 125  dim: 32 dim: 8  dim: 8 } \n    data_filler: { \n                type: \"constant\" \n                value: 0 \n        } \n  } \n} \nlayer { # ConCat_sum_bn_resblk64_b \n  name: \"CC_sum_bn_resblk64_b\" \n  bottom: \"sum_bn_resblk64_b\" \n  bottom: \"zeros_sum_bn_resblk64_b\" \n  top: \"CC_sum_bn_resblk64_b\" \n  type: \"Concat\" \n  concat_param { \n    axis: 1 \n  } \n} \nlayer { # resblk64_1 \n  name: \"resblk64_1\" \n  type: \"Convolution\" \n  bottom: \"CC_sum_bn_resblk64_b\" \n  top: \"resblk64_1\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_1 \n  name: \"batchNorm_resblk64_1\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_1\" \n  top: \"bn_resblk64_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_1 \n  name: \"scale_resblk64_1\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_1\" \n  top: \"bn_resblk64_1\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_1 \n  name: \"relu_bn_resblk64_1\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_1\" \n  top: \"bn_resblk64_1\" \n} \nlayer { # resblk64_1_b \n  name: \"resblk64_1_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_1\" \n  top: \"resblk64_1_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_1_b \n  name: \"batchNorm_resblk64_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_1_b\" \n  top: \"bn_resblk64_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_1_b \n  name: \"scale_resblk64_1_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_1_b\" \n  top: \"bn_resblk64_1_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_CC_sum_bn_resblk64_b \n    name: \"sum_CC_sum_bn_resblk64_b\" \n    type: \"Eltwise\" \n    bottom: \"CC_sum_bn_resblk64_b\" \n    bottom: \"bn_resblk64_1_b\" \n    top: \"sum_bn_resblk64_1_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_1_b \n  name: \"relu_sum_bn_resblk64_1_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_1_b\" \n  top: \"sum_bn_resblk64_1_b\" \n} \nlayer { # resblk64_2 \n  name: \"resblk64_2\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_1_b\" \n  top: \"resblk64_2\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_2 \n  name: \"batchNorm_resblk64_2\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_2\" \n  top: \"bn_resblk64_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_2 \n  name: \"scale_resblk64_2\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_2\" \n  top: \"bn_resblk64_2\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_2 \n  name: \"relu_bn_resblk64_2\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_2\" \n  top: \"bn_resblk64_2\" \n} \nlayer { # resblk64_2_b \n  name: \"resblk64_2_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_2\" \n  top: \"resblk64_2_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_2_b \n  name: \"batchNorm_resblk64_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_2_b\" \n  top: \"bn_resblk64_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_2_b \n  name: \"scale_resblk64_2_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_2_b\" \n  top: \"bn_resblk64_2_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_1_b \n    name: \"sum_sum_bn_resblk64_1_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_1_b\" \n    bottom: \"bn_resblk64_2_b\" \n    top: \"sum_bn_resblk64_2_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_2_b \n  name: \"relu_sum_bn_resblk64_2_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_2_b\" \n  top: \"sum_bn_resblk64_2_b\" \n} \nlayer { # resblk64_3 \n  name: \"resblk64_3\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_2_b\" \n  top: \"resblk64_3\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_3 \n  name: \"batchNorm_resblk64_3\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_3\" \n  top: \"bn_resblk64_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_3 \n  name: \"scale_resblk64_3\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_3\" \n  top: \"bn_resblk64_3\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_3 \n  name: \"relu_bn_resblk64_3\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_3\" \n  top: \"bn_resblk64_3\" \n} \nlayer { # resblk64_3_b \n  name: \"resblk64_3_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_3\" \n  top: \"resblk64_3_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_3_b \n  name: \"batchNorm_resblk64_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_3_b\" \n  top: \"bn_resblk64_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_3_b \n  name: \"scale_resblk64_3_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_3_b\" \n  top: \"bn_resblk64_3_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_2_b \n    name: \"sum_sum_bn_resblk64_2_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_2_b\" \n    bottom: \"bn_resblk64_3_b\" \n    top: \"sum_bn_resblk64_3_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_3_b \n  name: \"relu_sum_bn_resblk64_3_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_3_b\" \n  top: \"sum_bn_resblk64_3_b\" \n} \nlayer { # resblk64_4 \n  name: \"resblk64_4\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_3_b\" \n  top: \"resblk64_4\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_4 \n  name: \"batchNorm_resblk64_4\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_4\" \n  top: \"bn_resblk64_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_4 \n  name: \"scale_resblk64_4\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_4\" \n  top: \"bn_resblk64_4\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_4 \n  name: \"relu_bn_resblk64_4\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_4\" \n  top: \"bn_resblk64_4\" \n} \nlayer { # resblk64_4_b \n  name: \"resblk64_4_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_4\" \n  top: \"resblk64_4_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_4_b \n  name: \"batchNorm_resblk64_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_4_b\" \n  top: \"bn_resblk64_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_4_b \n  name: \"scale_resblk64_4_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_4_b\" \n  top: \"bn_resblk64_4_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_3_b \n    name: \"sum_sum_bn_resblk64_3_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_3_b\" \n    bottom: \"bn_resblk64_4_b\" \n    top: \"sum_bn_resblk64_4_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_4_b \n  name: \"relu_sum_bn_resblk64_4_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_4_b\" \n  top: \"sum_bn_resblk64_4_b\" \n} \nlayer { # resblk64_5 \n  name: \"resblk64_5\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_4_b\" \n  top: \"resblk64_5\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_5 \n  name: \"batchNorm_resblk64_5\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_5\" \n  top: \"bn_resblk64_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_5 \n  name: \"scale_resblk64_5\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_5\" \n  top: \"bn_resblk64_5\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_5 \n  name: \"relu_bn_resblk64_5\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_5\" \n  top: \"bn_resblk64_5\" \n} \nlayer { # resblk64_5_b \n  name: \"resblk64_5_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_5\" \n  top: \"resblk64_5_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_5_b \n  name: \"batchNorm_resblk64_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_5_b\" \n  top: \"bn_resblk64_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_5_b \n  name: \"scale_resblk64_5_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_5_b\" \n  top: \"bn_resblk64_5_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_4_b \n    name: \"sum_sum_bn_resblk64_4_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_4_b\" \n    bottom: \"bn_resblk64_5_b\" \n    top: \"sum_bn_resblk64_5_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_5_b \n  name: \"relu_sum_bn_resblk64_5_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_5_b\" \n  top: \"sum_bn_resblk64_5_b\" \n} \nlayer { # resblk64_6 \n  name: \"resblk64_6\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_5_b\" \n  top: \"resblk64_6\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_6 \n  name: \"batchNorm_resblk64_6\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_6\" \n  top: \"bn_resblk64_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_6 \n  name: \"scale_resblk64_6\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_6\" \n  top: \"bn_resblk64_6\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_6 \n  name: \"relu_bn_resblk64_6\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_6\" \n  top: \"bn_resblk64_6\" \n} \nlayer { # resblk64_6_b \n  name: \"resblk64_6_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_6\" \n  top: \"resblk64_6_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_6_b \n  name: \"batchNorm_resblk64_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_6_b\" \n  top: \"bn_resblk64_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_6_b \n  name: \"scale_resblk64_6_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_6_b\" \n  top: \"bn_resblk64_6_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_5_b \n    name: \"sum_sum_bn_resblk64_5_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_5_b\" \n    bottom: \"bn_resblk64_6_b\" \n    top: \"sum_bn_resblk64_6_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_6_b \n  name: \"relu_sum_bn_resblk64_6_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_6_b\" \n  top: \"sum_bn_resblk64_6_b\" \n} \nlayer { # resblk64_7 \n  name: \"resblk64_7\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_6_b\" \n  top: \"resblk64_7\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_7 \n  name: \"batchNorm_resblk64_7\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_7\" \n  top: \"bn_resblk64_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_7 \n  name: \"scale_resblk64_7\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_7\" \n  top: \"bn_resblk64_7\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_7 \n  name: \"relu_bn_resblk64_7\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_7\" \n  top: \"bn_resblk64_7\" \n} \nlayer { # resblk64_7_b \n  name: \"resblk64_7_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_7\" \n  top: \"resblk64_7_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_7_b \n  name: \"batchNorm_resblk64_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_7_b\" \n  top: \"bn_resblk64_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_7_b \n  name: \"scale_resblk64_7_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_7_b\" \n  top: \"bn_resblk64_7_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_6_b \n    name: \"sum_sum_bn_resblk64_6_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_6_b\" \n    bottom: \"bn_resblk64_7_b\" \n    top: \"sum_bn_resblk64_7_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_7_b \n  name: \"relu_sum_bn_resblk64_7_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_7_b\" \n  top: \"sum_bn_resblk64_7_b\" \n} \nlayer { # resblk64_8 \n  name: \"resblk64_8\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_7_b\" \n  top: \"resblk64_8\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_8 \n  name: \"batchNorm_resblk64_8\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_8\" \n  top: \"bn_resblk64_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_8 \n  name: \"scale_resblk64_8\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_8\" \n  top: \"bn_resblk64_8\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_8 \n  name: \"relu_bn_resblk64_8\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_8\" \n  top: \"bn_resblk64_8\" \n} \nlayer { # resblk64_8_b \n  name: \"resblk64_8_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_8\" \n  top: \"resblk64_8_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_8_b \n  name: \"batchNorm_resblk64_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_8_b\" \n  top: \"bn_resblk64_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  batch_norm_param { \n    use_global_stats: false \n    moving_average_fraction: 0.999 \n  } \n} \nlayer { # scale_resblk64_8_b \n  name: \"scale_resblk64_8_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_8_b\" \n  top: \"bn_resblk64_8_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_7_b \n    name: \"sum_sum_bn_resblk64_7_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_7_b\" \n    bottom: \"bn_resblk64_8_b\" \n    top: \"sum_bn_resblk64_8_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_8_b \n  name: \"relu_sum_bn_resblk64_8_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_8_b\" \n  top: \"sum_bn_resblk64_8_b\" \n} \nlayer { # pool_resblk64_8 \n  name: \"avePooling_resblk64_8\" \n  type: \"Pooling\" \n  bottom: \"sum_bn_resblk64_8_b\" \n  top: \"avgPool_resblk64_8\" \n  pooling_param { \n    pool: AVE \n    kernel_size: 8 \n    stride: 1 \n  } \n} \nlayer { # FC_final \n  name: \"FC_final\" \n  type: \"InnerProduct\" \n  bottom: \"avgPool_resblk64_8\" \n  top: \"FC_final\" \n  param { \n    lr_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n  } \n  inner_product_param { \n    num_output: 10 \n    weight_filler { \n      type: \"xavier\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # accuracy \n  name: \"accuracy\" \n  type: \"Accuracy\" \n  bottom: \"FC_final\" \n  bottom: \"label\" \n  top: \"accuracy\" \n} \nlayer { # loss \n  name: \"loss\" \n  type: \"SoftmaxWithLoss\" \n  bottom: \"FC_final\" \n  bottom: \"label\" \n  top: \"loss\" \n} \n"
  },
  {
    "path": "architectures/Resnet56Cifar.prototxt",
    "content": "name: \"Cifar-Resnet\"\nlayer { # TRAIN data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # TEST data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer { # pre_conv\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_scale\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # pre_relu\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\n#{ L1 start\n#{ L1_b1 start\n#{ L1_b1_cbr1 start\nlayer { # L1_b1_cbr1_conv\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_scale\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b1_cbr1_relu\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\n#} L1_b1_cbr1 end\n#{ L1_b1_cbr2 start\nlayer { # L1_b1_cbr2_conv\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_scale\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b1_cbr2 end\nlayer { # L1_b1_sum_eltwise\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b1_relu\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\n#} L1_b1 end\n#{ L1_b2 start\n#{ L1_b2_cbr1 start\nlayer { # L1_b2_cbr1_conv\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_scale\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b2_cbr1_relu\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\n#} L1_b2_cbr1 end\n#{ L1_b2_cbr2 start\nlayer { # L1_b2_cbr2_conv\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_scale\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b2_cbr2 end\nlayer { # L1_b2_sum_eltwise\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b2_relu\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\n#} L1_b2 end\n#{ L1_b3 start\n#{ L1_b3_cbr1 start\nlayer { # L1_b3_cbr1_conv\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_scale\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b3_cbr1_relu\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\n#} L1_b3_cbr1 end\n#{ L1_b3_cbr2 start\nlayer { # L1_b3_cbr2_conv\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_scale\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b3_cbr2 end\nlayer { # L1_b3_sum_eltwise\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b3_relu\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\n#} L1_b3 end\n#{ L1_b4 start\n#{ L1_b4_cbr1 start\nlayer { # L1_b4_cbr1_conv\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr1_scale\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b4_cbr1_relu\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\n#} L1_b4_cbr1 end\n#{ L1_b4_cbr2 start\nlayer { # L1_b4_cbr2_conv\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr2_scale\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b4_cbr2 end\nlayer { # L1_b4_sum_eltwise\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b4_relu\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\n#} L1_b4 end\n#{ L1_b5 start\n#{ L1_b5_cbr1 start\nlayer { # L1_b5_cbr1_conv\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr1_scale\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b5_cbr1_relu\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\n#} L1_b5_cbr1 end\n#{ L1_b5_cbr2 start\nlayer { # L1_b5_cbr2_conv\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr2_scale\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b5_cbr2 end\nlayer { # L1_b5_sum_eltwise\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b5_relu\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\n#} L1_b5 end\n#{ L1_b6 start\n#{ L1_b6_cbr1 start\nlayer { # L1_b6_cbr1_conv\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr1_scale\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b6_cbr1_relu\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\n#} L1_b6_cbr1 end\n#{ L1_b6_cbr2 start\nlayer { # L1_b6_cbr2_conv\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr2_scale\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b6_cbr2 end\nlayer { # L1_b6_sum_eltwise\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b6_relu\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\n#} L1_b6 end\n#{ L1_b7 start\n#{ L1_b7_cbr1 start\nlayer { # L1_b7_cbr1_conv\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr1_scale\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b7_cbr1_relu\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\n#} L1_b7_cbr1 end\n#{ L1_b7_cbr2 start\nlayer { # L1_b7_cbr2_conv\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr2_scale\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b7_cbr2 end\nlayer { # L1_b7_sum_eltwise\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b7_relu\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\n#} L1_b7 end\n#{ L1_b8 start\n#{ L1_b8_cbr1 start\nlayer { # L1_b8_cbr1_conv\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr1_scale\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b8_cbr1_relu\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\n#} L1_b8_cbr1 end\n#{ L1_b8_cbr2 start\nlayer { # L1_b8_cbr2_conv\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr2_scale\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b8_cbr2 end\nlayer { # L1_b8_sum_eltwise\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b8_relu\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\n#} L1_b8 end\n#{ L1_b9 start\n#{ L1_b9_cbr1 start\nlayer { # L1_b9_cbr1_conv\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr1_scale\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b9_cbr1_relu\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\n#} L1_b9_cbr1 end\n#{ L1_b9_cbr2 start\nlayer { # L1_b9_cbr2_conv\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr2_scale\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b9_cbr2 end\nlayer { # L1_b9_sum_eltwise\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b9_relu\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\n#} L1_b9 end\n#} L1 end\n#{ L2 start\n#{ L2_b1 start\n#{ L2_b1_cbr1 start\nlayer { # L2_b1_cbr1_conv\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_scale\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b1_cbr1_relu\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\n#} L2_b1_cbr1 end\n#{ L2_b1_cbr2 start\nlayer { # L2_b1_cbr2_conv\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_scale\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b1_cbr2 end\nlayer { # L2_b1_pool\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L2_b1_sum_eltwise\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b1_relu\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\n#} L2_b1 end\nlayer { # L2_b1_zeros\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 16 dim: 16 dim: 16 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L2_b1_concat0\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L2_b2 start\n#{ L2_b2_cbr1 start\nlayer { # L2_b2_cbr1_conv\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_scale\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b2_cbr1_relu\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\n#} L2_b2_cbr1 end\n#{ L2_b2_cbr2 start\nlayer { # L2_b2_cbr2_conv\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_scale\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b2_cbr2 end\nlayer { # L2_b2_sum_eltwise\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b2_relu\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\n#} L2_b2 end\n#{ L2_b3 start\n#{ L2_b3_cbr1 start\nlayer { # L2_b3_cbr1_conv\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_scale\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b3_cbr1_relu\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\n#} L2_b3_cbr1 end\n#{ L2_b3_cbr2 start\nlayer { # L2_b3_cbr2_conv\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_scale\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b3_cbr2 end\nlayer { # L2_b3_sum_eltwise\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b3_relu\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\n#} L2_b3 end\n#{ L2_b4 start\n#{ L2_b4_cbr1 start\nlayer { # L2_b4_cbr1_conv\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr1_scale\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b4_cbr1_relu\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\n#} L2_b4_cbr1 end\n#{ L2_b4_cbr2 start\nlayer { # L2_b4_cbr2_conv\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr2_scale\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b4_cbr2 end\nlayer { # L2_b4_sum_eltwise\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b4_relu\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\n#} L2_b4 end\n#{ L2_b5 start\n#{ L2_b5_cbr1 start\nlayer { # L2_b5_cbr1_conv\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr1_scale\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b5_cbr1_relu\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\n#} L2_b5_cbr1 end\n#{ L2_b5_cbr2 start\nlayer { # L2_b5_cbr2_conv\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr2_scale\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b5_cbr2 end\nlayer { # L2_b5_sum_eltwise\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b5_relu\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\n#} L2_b5 end\n#{ L2_b6 start\n#{ L2_b6_cbr1 start\nlayer { # L2_b6_cbr1_conv\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr1_scale\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b6_cbr1_relu\n  name: \"L2_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n}\n#} L2_b6_cbr1 end\n#{ L2_b6_cbr2 start\nlayer { # L2_b6_cbr2_conv\n  name: \"L2_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr2_scale\n  name: \"L2_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b6_cbr2 end\nlayer { # L2_b6_sum_eltwise\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b6_relu\n  name: \"L2_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n}\n#} L2_b6 end\n#{ L2_b7 start\n#{ L2_b7_cbr1 start\nlayer { # L2_b7_cbr1_conv\n  name: \"L2_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr1_scale\n  name: \"L2_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b7_cbr1_relu\n  name: \"L2_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n}\n#} L2_b7_cbr1 end\n#{ L2_b7_cbr2 start\nlayer { # L2_b7_cbr2_conv\n  name: \"L2_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr2_scale\n  name: \"L2_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b7_cbr2 end\nlayer { # L2_b7_sum_eltwise\n  name: \"L2_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b7_relu\n  name: \"L2_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n}\n#} L2_b7 end\n#{ L2_b8 start\n#{ L2_b8_cbr1 start\nlayer { # L2_b8_cbr1_conv\n  name: \"L2_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr1_scale\n  name: \"L2_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b8_cbr1_relu\n  name: \"L2_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n}\n#} L2_b8_cbr1 end\n#{ L2_b8_cbr2 start\nlayer { # L2_b8_cbr2_conv\n  name: \"L2_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr2_scale\n  name: \"L2_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b8_cbr2 end\nlayer { # L2_b8_sum_eltwise\n  name: \"L2_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b8_relu\n  name: \"L2_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n}\n#} L2_b8 end\n#{ L2_b9 start\n#{ L2_b9_cbr1 start\nlayer { # L2_b9_cbr1_conv\n  name: \"L2_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr1_scale\n  name: \"L2_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b9_cbr1_relu\n  name: \"L2_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n}\n#} L2_b9_cbr1 end\n#{ L2_b9_cbr2 start\nlayer { # L2_b9_cbr2_conv\n  name: \"L2_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr2_scale\n  name: \"L2_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b9_cbr2 end\nlayer { # L2_b9_sum_eltwise\n  name: \"L2_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b9_relu\n  name: \"L2_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n}\n#} L2_b9 end\n#} L2 end\n#{ L3 start\n#{ L3_b1 start\n#{ L3_b1_cbr1 start\nlayer { # L3_b1_cbr1_conv\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_scale\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b1_cbr1_relu\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\n#} L3_b1_cbr1 end\n#{ L3_b1_cbr2 start\nlayer { # L3_b1_cbr2_conv\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_scale\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b1_cbr2 end\nlayer { # L3_b1_pool\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L3_b1_sum_eltwise\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b1_relu\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\n#} L3_b1 end\nlayer { # L3_b1_zeros\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 32 dim: 8 dim: 8 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L3_b1_concat0\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L3_b2 start\n#{ L3_b2_cbr1 start\nlayer { # L3_b2_cbr1_conv\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_scale\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b2_cbr1_relu\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\n#} L3_b2_cbr1 end\n#{ L3_b2_cbr2 start\nlayer { # L3_b2_cbr2_conv\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_scale\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b2_cbr2 end\nlayer { # L3_b2_sum_eltwise\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b2_relu\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\n#} L3_b2 end\n#{ L3_b3 start\n#{ L3_b3_cbr1 start\nlayer { # L3_b3_cbr1_conv\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_scale\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b3_cbr1_relu\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\n#} L3_b3_cbr1 end\n#{ L3_b3_cbr2 start\nlayer { # L3_b3_cbr2_conv\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_scale\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b3_cbr2 end\nlayer { # L3_b3_sum_eltwise\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b3_relu\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\n#} L3_b3 end\n#{ L3_b4 start\n#{ L3_b4_cbr1 start\nlayer { # L3_b4_cbr1_conv\n  name: \"L3_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr1_scale\n  name: \"L3_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b4_cbr1_relu\n  name: \"L3_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n}\n#} L3_b4_cbr1 end\n#{ L3_b4_cbr2 start\nlayer { # L3_b4_cbr2_conv\n  name: \"L3_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr2_scale\n  name: \"L3_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b4_cbr2 end\nlayer { # L3_b4_sum_eltwise\n  name: \"L3_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b4_relu\n  name: \"L3_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n}\n#} L3_b4 end\n#{ L3_b5 start\n#{ L3_b5_cbr1 start\nlayer { # L3_b5_cbr1_conv\n  name: \"L3_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr1_scale\n  name: \"L3_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b5_cbr1_relu\n  name: \"L3_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n}\n#} L3_b5_cbr1 end\n#{ L3_b5_cbr2 start\nlayer { # L3_b5_cbr2_conv\n  name: \"L3_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr2_scale\n  name: \"L3_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b5_cbr2 end\nlayer { # L3_b5_sum_eltwise\n  name: \"L3_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b5_relu\n  name: \"L3_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n}\n#} L3_b5 end\n#{ L3_b6 start\n#{ L3_b6_cbr1 start\nlayer { # L3_b6_cbr1_conv\n  name: \"L3_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr1_scale\n  name: \"L3_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b6_cbr1_relu\n  name: \"L3_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n}\n#} L3_b6_cbr1 end\n#{ L3_b6_cbr2 start\nlayer { # L3_b6_cbr2_conv\n  name: \"L3_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr2_scale\n  name: \"L3_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b6_cbr2 end\nlayer { # L3_b6_sum_eltwise\n  name: \"L3_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b6_relu\n  name: \"L3_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n}\n#} L3_b6 end\n#{ L3_b7 start\n#{ L3_b7_cbr1 start\nlayer { # L3_b7_cbr1_conv\n  name: \"L3_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr1_scale\n  name: \"L3_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b7_cbr1_relu\n  name: \"L3_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n}\n#} L3_b7_cbr1 end\n#{ L3_b7_cbr2 start\nlayer { # L3_b7_cbr2_conv\n  name: \"L3_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr2_scale\n  name: \"L3_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b7_cbr2 end\nlayer { # L3_b7_sum_eltwise\n  name: \"L3_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b7_relu\n  name: \"L3_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n}\n#} L3_b7 end\n#{ L3_b8 start\n#{ L3_b8_cbr1 start\nlayer { # L3_b8_cbr1_conv\n  name: \"L3_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr1_scale\n  name: \"L3_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b8_cbr1_relu\n  name: \"L3_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n}\n#} L3_b8_cbr1 end\n#{ L3_b8_cbr2 start\nlayer { # L3_b8_cbr2_conv\n  name: \"L3_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr2_scale\n  name: \"L3_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b8_cbr2 end\nlayer { # L3_b8_sum_eltwise\n  name: \"L3_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b8_relu\n  name: \"L3_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n}\n#} L3_b8 end\n#{ L3_b9 start\n#{ L3_b9_cbr1 start\nlayer { # L3_b9_cbr1_conv\n  name: \"L3_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr1_scale\n  name: \"L3_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b9_cbr1_relu\n  name: \"L3_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n}\n#} L3_b9_cbr1 end\n#{ L3_b9_cbr2 start\nlayer { # L3_b9_cbr2_conv\n  name: \"L3_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr2_scale\n  name: \"L3_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b9_cbr2 end\nlayer { # L3_b9_sum_eltwise\n  name: \"L3_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b9_relu\n  name: \"L3_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n}\n#} L3_b9 end\n#} L3 end\nlayer { # post_pool\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer { # post_FC\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n\t  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # accuracy\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer { # loss\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\n"
  },
  {
    "path": "architectures/Resnet56DM1.prototxt",
    "content": "name: \"Cifar-Resnet\"\nlayer { # TRAIN data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # TEST data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer { # pre_conv\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_scale\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # pre_relu\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\n#{ L1 start\n#{ L1_b1 start\n#{ L1_b1_cbr1 start\nlayer { # L1_b1_cbr1_conv\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_scale\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b1_cbr1_relu\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\n#} L1_b1_cbr1 end\n#{ L1_b1_cbr2 start\nlayer { # L1_b1_cbr2_conv\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_scale\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b1_cbr2 end\nlayer { # L1_b1_sum_eltwise\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b1_relu\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\n#} L1_b1 end\n#{ L1_b2 start\n#{ L1_b2_cbr1 start\nlayer { # L1_b2_cbr1_conv\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_scale\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b2_cbr1_relu\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\n#} L1_b2_cbr1 end\n#{ L1_b2_cbr2 start\nlayer { # L1_b2_cbr2_conv\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_scale\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b2_cbr2 end\nlayer { # L1_b2_sum_eltwise\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b2_relu\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\n#} L1_b2 end\n#{ L1_b3 start\n#{ L1_b3_cbr1 start\nlayer { # L1_b3_cbr1_conv\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_scale\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b3_cbr1_relu\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\n#} L1_b3_cbr1 end\n#{ L1_b3_cbr2 start\nlayer { # L1_b3_cbr2_conv\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_scale\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b3_cbr2 end\nlayer { # L1_b3_sum_eltwise\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b3_relu\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\n#} L1_b3 end\n#{ L1_b4 start\n#{ L1_b4_cbr1 start\nlayer { # L1_b4_cbr1_conv\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr1_scale\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b4_cbr1_relu\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\n#} L1_b4_cbr1 end\n#{ L1_b4_cbr2 start\nlayer { # L1_b4_cbr2_conv\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr2_scale\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b4_cbr2 end\nlayer { # L1_b4_sum_eltwise\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b4_relu\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\n#} L1_b4 end\n#{ L1_b5 start\n#{ L1_b5_cbr1 start\nlayer { # L1_b5_cbr1_conv\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr1_scale\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b5_cbr1_relu\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\n#} L1_b5_cbr1 end\n#{ L1_b5_cbr2 start\nlayer { # L1_b5_cbr2_conv\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr2_scale\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b5_cbr2 end\nlayer { # L1_b5_sum_eltwise\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b5_relu\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\n#} L1_b5 end\n#{ L1_b6 start\n#{ L1_b6_cbr1 start\nlayer { # L1_b6_cbr1_conv\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr1_scale\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b6_cbr1_relu\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\n#} L1_b6_cbr1 end\n#{ L1_b6_cbr2 start\nlayer { # L1_b6_cbr2_conv\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr2_scale\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b6_cbr2 end\nlayer { # L1_b6_sum_eltwise\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b6_relu\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\n#} L1_b6 end\n#{ L1_b7 start\n#{ L1_b7_cbr1 start\nlayer { # L1_b7_cbr1_conv\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr1_scale\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b7_cbr1_relu\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\n#} L1_b7_cbr1 end\n#{ L1_b7_cbr2 start\nlayer { # L1_b7_cbr2_conv\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr2_scale\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b7_cbr2 end\nlayer { # L1_b7_sum_eltwise\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b7_relu\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\n#} L1_b7 end\n#{ L1_b8 start\n#{ L1_b8_cbr1 start\nlayer { # L1_b8_cbr1_conv\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr1_scale\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b8_cbr1_relu\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\n#} L1_b8_cbr1 end\n#{ L1_b8_cbr2 start\nlayer { # L1_b8_cbr2_conv\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr2_scale\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b8_cbr2 end\nlayer { # L1_b8_sum_eltwise\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b8_relu\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\n#} L1_b8 end\n#{ L1_b9 start\n#{ L1_b9_cbr1 start\nlayer { # L1_b9_cbr1_conv\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr1_scale\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b9_cbr1_relu\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\n#} L1_b9_cbr1 end\n#{ L1_b9_cbr2 start\nlayer { # L1_b9_cbr2_conv\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr2_scale\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b9_cbr2 end\nlayer { # L1_b9_sum_eltwise\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b9_relu\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\n#} L1_b9 end\n#} L1 end\n#{ L2 start\n#{ L2_b1 start\n#{ L2_b1_cbr1 start\nlayer { # L2_b1_cbr1_conv\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_scale\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b1_cbr1_relu\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\n#} L2_b1_cbr1 end\n#{ L2_b1_cbr2 start\nlayer { # L2_b1_cbr2_conv\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_scale\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b1_cbr2 end\nlayer { # L2_b1_pool\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L2_b1_sum_eltwise\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b1_relu\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\n#} L2_b1 end\nlayer { # L2_b1_zeros\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 16 dim: 16 dim: 16 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L2_b1_concat0\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L2_b2 start\n#{ L2_b2_cbr1 start\nlayer { # L2_b2_cbr1_conv\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_scale\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b2_cbr1_relu\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\n#} L2_b2_cbr1 end\n#{ L2_b2_cbr2 start\nlayer { # L2_b2_cbr2_conv\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_scale\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b2_cbr2 end\nlayer { # L2_b2_sum_eltwise\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b2_relu\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\n#} L2_b2 end\n#{ L2_b3 start\n#{ L2_b3_cbr1 start\nlayer { # L2_b3_cbr1_conv\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_scale\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b3_cbr1_relu\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\n#} L2_b3_cbr1 end\n#{ L2_b3_cbr2 start\nlayer { # L2_b3_cbr2_conv\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_scale\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b3_cbr2 end\nlayer { # L2_b3_sum_eltwise\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b3_relu\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\n#} L2_b3 end\n#{ L2_b4 start\n#{ L2_b4_cbr1 start\nlayer { # L2_b4_cbr1_conv\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr1_scale\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b4_cbr1_relu\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\n#} L2_b4_cbr1 end\n#{ L2_b4_cbr2 start\nlayer { # L2_b4_cbr2_conv\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr2_scale\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b4_cbr2 end\nlayer { # L2_b4_sum_eltwise\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b4_relu\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\n#} L2_b4 end\n#{ L2_b5 start\n#{ L2_b5_cbr1 start\nlayer { # L2_b5_cbr1_conv\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr1_scale\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b5_cbr1_relu\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\n#} L2_b5_cbr1 end\n#{ L2_b5_cbr2 start\nlayer { # L2_b5_cbr2_conv\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr2_scale\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b5_cbr2 end\nlayer { # L2_b5_sum_eltwise\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b5_relu\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\n#} L2_b5 end\n#{ L2_b6 start\n#{ L2_b6_cbr1 start\nlayer { # L2_b6_cbr1_conv\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr1_scale\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b6_cbr1_relu\n  name: \"L2_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n}\n#} L2_b6_cbr1 end\n#{ L2_b6_cbr2 start\nlayer { # L2_b6_cbr2_conv\n  name: \"L2_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr2_scale\n  name: \"L2_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b6_cbr2 end\nlayer { # L2_b6_sum_eltwise\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b6_relu\n  name: \"L2_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n}\n#} L2_b6 end\n#{ L2_b7 start\n#{ L2_b7_cbr1 start\nlayer { # L2_b7_cbr1_conv\n  name: \"L2_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr1_scale\n  name: \"L2_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b7_cbr1_relu\n  name: \"L2_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n}\n#} L2_b7_cbr1 end\n#{ L2_b7_cbr2 start\nlayer { # L2_b7_cbr2_conv\n  name: \"L2_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr2_scale\n  name: \"L2_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b7_cbr2 end\nlayer { # L2_b7_sum_eltwise\n  name: \"L2_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b7_relu\n  name: \"L2_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n}\n#} L2_b7 end\n#{ L2_b8 start\n#{ L2_b8_cbr1 start\nlayer { # L2_b8_cbr1_conv\n  name: \"L2_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr1_scale\n  name: \"L2_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b8_cbr1_relu\n  name: \"L2_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n}\n#} L2_b8_cbr1 end\n#{ L2_b8_cbr2 start\nlayer { # L2_b8_cbr2_conv\n  name: \"L2_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr2_scale\n  name: \"L2_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b8_cbr2 end\nlayer { # L2_b8_sum_eltwise\n  name: \"L2_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b8_relu\n  name: \"L2_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n}\n#} L2_b8 end\n#{ L2_b9 start\n#{ L2_b9_cbr1 start\nlayer { # L2_b9_cbr1_conv\n  name: \"L2_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr1_scale\n  name: \"L2_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b9_cbr1_relu\n  name: \"L2_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n}\n#} L2_b9_cbr1 end\n#{ L2_b9_cbr2 start\nlayer { # L2_b9_cbr2_conv\n  name: \"L2_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr2_scale\n  name: \"L2_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b9_cbr2 end\nlayer { # L2_b9_sum_eltwise\n  name: \"L2_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b9_relu\n  name: \"L2_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n}\n#} L2_b9 end\n#} L2 end\n#{ L3 start\n#{ L3_b1 start\n#{ L3_b1_cbr1 start\nlayer { # L3_b1_cbr1_conv\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_scale\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b1_cbr1_relu\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\n#} L3_b1_cbr1 end\n#{ L3_b1_cbr2 start\nlayer { # L3_b1_cbr2_conv\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_scale\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b1_cbr2 end\nlayer { # L3_b1_pool\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L3_b1_sum_eltwise\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b1_relu\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\n#} L3_b1 end\nlayer { # L3_b1_zeros\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 32 dim: 8 dim: 8 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L3_b1_concat0\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L3_b2 start\n#{ L3_b2_cbr1 start\nlayer { # L3_b2_cbr1_conv\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_scale\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b2_cbr1_relu\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\n#} L3_b2_cbr1 end\n#{ L3_b2_cbr2 start\nlayer { # L3_b2_cbr2_conv\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_scale\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b2_cbr2 end\nlayer { # L3_b2_sum_eltwise\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b2_relu\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\n#} L3_b2 end\n#{ L3_b3 start\n#{ L3_b3_cbr1 start\nlayer { # L3_b3_cbr1_conv\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_scale\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b3_cbr1_relu\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\n#} L3_b3_cbr1 end\n#{ L3_b3_cbr2 start\nlayer { # L3_b3_cbr2_conv\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_scale\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b3_cbr2 end\nlayer { # L3_b3_sum_eltwise\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b3_relu\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\n#} L3_b3 end\n#{ L3_b4 start\n#{ L3_b4_cbr1 start\nlayer { # L3_b4_cbr1_conv\n  name: \"L3_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr1_scale\n  name: \"L3_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b4_cbr1_relu\n  name: \"L3_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n}\n#} L3_b4_cbr1 end\n#{ L3_b4_cbr2 start\nlayer { # L3_b4_cbr2_conv\n  name: \"L3_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr2_scale\n  name: \"L3_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b4_cbr2 end\nlayer { # L3_b4_sum_eltwise\n  name: \"L3_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b4_relu\n  name: \"L3_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n}\n#} L3_b4 end\n#{ L3_b5 start\n#{ L3_b5_cbr1 start\nlayer { # L3_b5_cbr1_conv\n  name: \"L3_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr1_scale\n  name: \"L3_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b5_cbr1_relu\n  name: \"L3_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n}\n#} L3_b5_cbr1 end\n#{ L3_b5_cbr2 start\nlayer { # L3_b5_cbr2_conv\n  name: \"L3_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr2_scale\n  name: \"L3_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b5_cbr2 end\nlayer { # L3_b5_sum_eltwise\n  name: \"L3_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b5_relu\n  name: \"L3_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n}\n#} L3_b5 end\n#{ L3_b6 start\n#{ L3_b6_cbr1 start\nlayer { # L3_b6_cbr1_conv\n  name: \"L3_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr1_scale\n  name: \"L3_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b6_cbr1_relu\n  name: \"L3_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n}\n#} L3_b6_cbr1 end\n#{ L3_b6_cbr2 start\nlayer { # L3_b6_cbr2_conv\n  name: \"L3_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr2_scale\n  name: \"L3_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b6_cbr2 end\nlayer { # L3_b6_sum_eltwise\n  name: \"L3_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b6_relu\n  name: \"L3_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n}\n#} L3_b6 end\n#{ L3_b7 start\n#{ L3_b7_cbr1 start\nlayer { # L3_b7_cbr1_conv\n  name: \"L3_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr1_scale\n  name: \"L3_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b7_cbr1_relu\n  name: \"L3_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n}\n#} L3_b7_cbr1 end\n#{ L3_b7_cbr2 start\nlayer { # L3_b7_cbr2_conv\n  name: \"L3_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr2_scale\n  name: \"L3_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b7_cbr2 end\nlayer { # L3_b7_sum_eltwise\n  name: \"L3_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b7_relu\n  name: \"L3_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n}\n#} L3_b7 end\n#{ L3_b8 start\n#{ L3_b8_cbr1 start\nlayer { # L3_b8_cbr1_conv\n  name: \"L3_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr1_scale\n  name: \"L3_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b8_cbr1_relu\n  name: \"L3_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n}\n#} L3_b8_cbr1 end\n#{ L3_b8_cbr2 start\nlayer { # L3_b8_cbr2_conv\n  name: \"L3_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr2_scale\n  name: \"L3_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b8_cbr2 end\nlayer { # L3_b8_sum_eltwise\n  name: \"L3_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b8_relu\n  name: \"L3_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n}\n#} L3_b8 end\n#{ L3_b9 start\n#{ L3_b9_cbr1 start\nlayer { # L3_b9_cbr1_conv\n  name: \"L3_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr1_scale\n  name: \"L3_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b9_cbr1_relu\n  name: \"L3_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n}\n#} L3_b9_cbr1 end\n#{ L3_b9_cbr2 start\nlayer { # L3_b9_cbr2_conv\n  name: \"L3_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 1\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr2_scale\n  name: \"L3_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b9_cbr2 end\nlayer { # L3_b9_sum_eltwise\n  name: \"L3_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b9_relu\n  name: \"L3_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n}\n#} L3_b9 end\n#} L3 end\nlayer { # post_pool\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer { # post_FC\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n\t  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # accuracy\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer { # loss\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\n"
  },
  {
    "path": "architectures/Resnet56Dropout.prototxt",
    "content": "name: \"Cifar-Resnet\"\nlayer { # TRAIN data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # TEST data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 125\n    backend: LMDB\n  }\n}\nlayer { # pre_conv\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # pre_scale\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # pre_relu\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\n#{ L1 start\n#{ L1_b1 start\n#{ L1_b1_cbr1 start\nlayer { # L1_b1_cbr1_conv\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr1_scale\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b1_cbr1_dropout\n  name: \"L1_b1_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b1_cbr1_relu\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\n#} L1_b1_cbr1 end\n#{ L1_b1_cbr2 start\nlayer { # L1_b1_cbr2_conv\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b1_cbr2_scale\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b1_cbr2_dropout\n  name: \"L1_b1_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b1_cbr2 end\nlayer { # L1_b1_sum_eltwise\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b1_relu\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\n#} L1_b1 end\n#{ L1_b2 start\n#{ L1_b2_cbr1 start\nlayer { # L1_b2_cbr1_conv\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr1_scale\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b2_cbr1_dropout\n  name: \"L1_b2_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b2_cbr1_relu\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\n#} L1_b2_cbr1 end\n#{ L1_b2_cbr2 start\nlayer { # L1_b2_cbr2_conv\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b2_cbr2_scale\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b2_cbr2_dropout\n  name: \"L1_b2_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b2_cbr2 end\nlayer { # L1_b2_sum_eltwise\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b2_relu\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\n#} L1_b2 end\n#{ L1_b3 start\n#{ L1_b3_cbr1 start\nlayer { # L1_b3_cbr1_conv\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr1_scale\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b3_cbr1_dropout\n  name: \"L1_b3_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b3_cbr1_relu\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\n#} L1_b3_cbr1 end\n#{ L1_b3_cbr2 start\nlayer { # L1_b3_cbr2_conv\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b3_cbr2_scale\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b3_cbr2_dropout\n  name: \"L1_b3_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b3_cbr2 end\nlayer { # L1_b3_sum_eltwise\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b3_relu\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\n#} L1_b3 end\n#{ L1_b4 start\n#{ L1_b4_cbr1 start\nlayer { # L1_b4_cbr1_conv\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr1_scale\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b4_cbr1_dropout\n  name: \"L1_b4_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b4_cbr1_relu\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\n#} L1_b4_cbr1 end\n#{ L1_b4_cbr2 start\nlayer { # L1_b4_cbr2_conv\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b4_cbr2_scale\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b4_cbr2_dropout\n  name: \"L1_b4_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b4_cbr2 end\nlayer { # L1_b4_sum_eltwise\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b4_relu\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\n#} L1_b4 end\n#{ L1_b5 start\n#{ L1_b5_cbr1 start\nlayer { # L1_b5_cbr1_conv\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr1_scale\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b5_cbr1_dropout\n  name: \"L1_b5_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b5_cbr1_relu\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\n#} L1_b5_cbr1 end\n#{ L1_b5_cbr2 start\nlayer { # L1_b5_cbr2_conv\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b5_cbr2_scale\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b5_cbr2_dropout\n  name: \"L1_b5_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b5_cbr2 end\nlayer { # L1_b5_sum_eltwise\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b5_relu\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\n#} L1_b5 end\n#{ L1_b6 start\n#{ L1_b6_cbr1 start\nlayer { # L1_b6_cbr1_conv\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr1_scale\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b6_cbr1_dropout\n  name: \"L1_b6_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b6_cbr1_relu\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\n#} L1_b6_cbr1 end\n#{ L1_b6_cbr2 start\nlayer { # L1_b6_cbr2_conv\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b6_cbr2_scale\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b6_cbr2_dropout\n  name: \"L1_b6_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b6_cbr2 end\nlayer { # L1_b6_sum_eltwise\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b6_relu\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\n#} L1_b6 end\n#{ L1_b7 start\n#{ L1_b7_cbr1 start\nlayer { # L1_b7_cbr1_conv\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr1_scale\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b7_cbr1_dropout\n  name: \"L1_b7_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b7_cbr1_relu\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\n#} L1_b7_cbr1 end\n#{ L1_b7_cbr2 start\nlayer { # L1_b7_cbr2_conv\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b7_cbr2_scale\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b7_cbr2_dropout\n  name: \"L1_b7_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b7_cbr2 end\nlayer { # L1_b7_sum_eltwise\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b7_relu\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\n#} L1_b7 end\n#{ L1_b8 start\n#{ L1_b8_cbr1 start\nlayer { # L1_b8_cbr1_conv\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr1_scale\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b8_cbr1_dropout\n  name: \"L1_b8_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b8_cbr1_relu\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\n#} L1_b8_cbr1 end\n#{ L1_b8_cbr2 start\nlayer { # L1_b8_cbr2_conv\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b8_cbr2_scale\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b8_cbr2_dropout\n  name: \"L1_b8_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b8_cbr2 end\nlayer { # L1_b8_sum_eltwise\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b8_relu\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\n#} L1_b8 end\n#{ L1_b9 start\n#{ L1_b9_cbr1 start\nlayer { # L1_b9_cbr1_conv\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr1_scale\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b9_cbr1_dropout\n  name: \"L1_b9_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L1_b9_cbr1_relu\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\n#} L1_b9_cbr1 end\n#{ L1_b9_cbr2 start\nlayer { # L1_b9_cbr2_conv\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L1_b9_cbr2_scale\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b9_cbr2_dropout\n  name: \"L1_b9_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L1_b9_cbr2 end\nlayer { # L1_b9_sum_eltwise\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b9_relu\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\n#} L1_b9 end\n#} L1 end\n#{ L2 start\n#{ L2_b1 start\n#{ L2_b1_cbr1 start\nlayer { # L2_b1_cbr1_conv\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr1_scale\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b1_cbr1_dropout\n  name: \"L2_b1_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b1_cbr1_relu\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\n#} L2_b1_cbr1 end\n#{ L2_b1_cbr2 start\nlayer { # L2_b1_cbr2_conv\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b1_cbr2_scale\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b1_cbr2_dropout\n  name: \"L2_b1_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b1_cbr2 end\nlayer { # L2_b1_pool\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L2_b1_sum_eltwise\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b1_relu\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\n#} L2_b1 end\nlayer { # L2_b1_zeros\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 16 dim: 16 dim: 16 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L2_b1_concat0\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L2_b2 start\n#{ L2_b2_cbr1 start\nlayer { # L2_b2_cbr1_conv\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr1_scale\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b2_cbr1_dropout\n  name: \"L2_b2_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b2_cbr1_relu\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\n#} L2_b2_cbr1 end\n#{ L2_b2_cbr2 start\nlayer { # L2_b2_cbr2_conv\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b2_cbr2_scale\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b2_cbr2_dropout\n  name: \"L2_b2_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b2_cbr2 end\nlayer { # L2_b2_sum_eltwise\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b2_relu\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\n#} L2_b2 end\n#{ L2_b3 start\n#{ L2_b3_cbr1 start\nlayer { # L2_b3_cbr1_conv\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr1_scale\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b3_cbr1_dropout\n  name: \"L2_b3_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b3_cbr1_relu\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\n#} L2_b3_cbr1 end\n#{ L2_b3_cbr2 start\nlayer { # L2_b3_cbr2_conv\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b3_cbr2_scale\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b3_cbr2_dropout\n  name: \"L2_b3_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b3_cbr2 end\nlayer { # L2_b3_sum_eltwise\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b3_relu\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\n#} L2_b3 end\n#{ L2_b4 start\n#{ L2_b4_cbr1 start\nlayer { # L2_b4_cbr1_conv\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr1_scale\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b4_cbr1_dropout\n  name: \"L2_b4_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b4_cbr1_relu\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\n#} L2_b4_cbr1 end\n#{ L2_b4_cbr2 start\nlayer { # L2_b4_cbr2_conv\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b4_cbr2_scale\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b4_cbr2_dropout\n  name: \"L2_b4_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b4_cbr2 end\nlayer { # L2_b4_sum_eltwise\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b4_relu\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\n#} L2_b4 end\n#{ L2_b5 start\n#{ L2_b5_cbr1 start\nlayer { # L2_b5_cbr1_conv\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr1_scale\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b5_cbr1_dropout\n  name: \"L2_b5_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b5_cbr1_relu\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\n#} L2_b5_cbr1 end\n#{ L2_b5_cbr2 start\nlayer { # L2_b5_cbr2_conv\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b5_cbr2_scale\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b5_cbr2_dropout\n  name: \"L2_b5_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b5_cbr2 end\nlayer { # L2_b5_sum_eltwise\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b5_relu\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\n#} L2_b5 end\n#{ L2_b6 start\n#{ L2_b6_cbr1 start\nlayer { # L2_b6_cbr1_conv\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr1_scale\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b6_cbr1_dropout\n  name: \"L2_b6_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b6_cbr1_relu\n  name: \"L2_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n}\n#} L2_b6_cbr1 end\n#{ L2_b6_cbr2 start\nlayer { # L2_b6_cbr2_conv\n  name: \"L2_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b6_cbr2_scale\n  name: \"L2_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b6_cbr2_dropout\n  name: \"L2_b6_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b6_cbr2 end\nlayer { # L2_b6_sum_eltwise\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b6_relu\n  name: \"L2_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n}\n#} L2_b6 end\n#{ L2_b7 start\n#{ L2_b7_cbr1 start\nlayer { # L2_b7_cbr1_conv\n  name: \"L2_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr1_scale\n  name: \"L2_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b7_cbr1_dropout\n  name: \"L2_b7_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b7_cbr1_relu\n  name: \"L2_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n}\n#} L2_b7_cbr1 end\n#{ L2_b7_cbr2 start\nlayer { # L2_b7_cbr2_conv\n  name: \"L2_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b7_cbr2_scale\n  name: \"L2_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b7_cbr2_dropout\n  name: \"L2_b7_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b7_cbr2 end\nlayer { # L2_b7_sum_eltwise\n  name: \"L2_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b7_relu\n  name: \"L2_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n}\n#} L2_b7 end\n#{ L2_b8 start\n#{ L2_b8_cbr1 start\nlayer { # L2_b8_cbr1_conv\n  name: \"L2_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr1_scale\n  name: \"L2_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b8_cbr1_dropout\n  name: \"L2_b8_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b8_cbr1_relu\n  name: \"L2_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n}\n#} L2_b8_cbr1 end\n#{ L2_b8_cbr2 start\nlayer { # L2_b8_cbr2_conv\n  name: \"L2_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b8_cbr2_scale\n  name: \"L2_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b8_cbr2_dropout\n  name: \"L2_b8_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b8_cbr2 end\nlayer { # L2_b8_sum_eltwise\n  name: \"L2_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b8_relu\n  name: \"L2_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n}\n#} L2_b8 end\n#{ L2_b9 start\n#{ L2_b9_cbr1 start\nlayer { # L2_b9_cbr1_conv\n  name: \"L2_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr1_scale\n  name: \"L2_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b9_cbr1_dropout\n  name: \"L2_b9_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L2_b9_cbr1_relu\n  name: \"L2_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n}\n#} L2_b9_cbr1 end\n#{ L2_b9_cbr2 start\nlayer { # L2_b9_cbr2_conv\n  name: \"L2_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L2_b9_cbr2_scale\n  name: \"L2_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b9_cbr2_dropout\n  name: \"L2_b9_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L2_b9_cbr2 end\nlayer { # L2_b9_sum_eltwise\n  name: \"L2_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b9_relu\n  name: \"L2_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n}\n#} L2_b9 end\n#} L2 end\n#{ L3 start\n#{ L3_b1 start\n#{ L3_b1_cbr1 start\nlayer { # L3_b1_cbr1_conv\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr1_scale\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b1_cbr1_dropout\n  name: \"L3_b1_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b1_cbr1_relu\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\n#} L3_b1_cbr1 end\n#{ L3_b1_cbr2 start\nlayer { # L3_b1_cbr2_conv\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b1_cbr2_scale\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b1_cbr2_dropout\n  name: \"L3_b1_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b1_cbr2 end\nlayer { # L3_b1_pool\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L3_b1_sum_eltwise\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b1_relu\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\n#} L3_b1 end\nlayer { # L3_b1_zeros\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 125 dim: 32 dim: 8 dim: 8 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L3_b1_concat0\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L3_b2 start\n#{ L3_b2_cbr1 start\nlayer { # L3_b2_cbr1_conv\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr1_scale\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b2_cbr1_dropout\n  name: \"L3_b2_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b2_cbr1_relu\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\n#} L3_b2_cbr1 end\n#{ L3_b2_cbr2 start\nlayer { # L3_b2_cbr2_conv\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b2_cbr2_scale\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b2_cbr2_dropout\n  name: \"L3_b2_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b2_cbr2 end\nlayer { # L3_b2_sum_eltwise\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b2_relu\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\n#} L3_b2 end\n#{ L3_b3 start\n#{ L3_b3_cbr1 start\nlayer { # L3_b3_cbr1_conv\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr1_scale\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b3_cbr1_dropout\n  name: \"L3_b3_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b3_cbr1_relu\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\n#} L3_b3_cbr1 end\n#{ L3_b3_cbr2 start\nlayer { # L3_b3_cbr2_conv\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b3_cbr2_scale\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b3_cbr2_dropout\n  name: \"L3_b3_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b3_cbr2 end\nlayer { # L3_b3_sum_eltwise\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b3_relu\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\n#} L3_b3 end\n#{ L3_b4 start\n#{ L3_b4_cbr1 start\nlayer { # L3_b4_cbr1_conv\n  name: \"L3_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr1_scale\n  name: \"L3_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b4_cbr1_dropout\n  name: \"L3_b4_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b4_cbr1_relu\n  name: \"L3_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n}\n#} L3_b4_cbr1 end\n#{ L3_b4_cbr2 start\nlayer { # L3_b4_cbr2_conv\n  name: \"L3_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b4_cbr2_scale\n  name: \"L3_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b4_cbr2_dropout\n  name: \"L3_b4_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b4_cbr2 end\nlayer { # L3_b4_sum_eltwise\n  name: \"L3_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b4_relu\n  name: \"L3_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n}\n#} L3_b4 end\n#{ L3_b5 start\n#{ L3_b5_cbr1 start\nlayer { # L3_b5_cbr1_conv\n  name: \"L3_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr1_scale\n  name: \"L3_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b5_cbr1_dropout\n  name: \"L3_b5_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b5_cbr1_relu\n  name: \"L3_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n}\n#} L3_b5_cbr1 end\n#{ L3_b5_cbr2 start\nlayer { # L3_b5_cbr2_conv\n  name: \"L3_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b5_cbr2_scale\n  name: \"L3_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b5_cbr2_dropout\n  name: \"L3_b5_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b5_cbr2 end\nlayer { # L3_b5_sum_eltwise\n  name: \"L3_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b5_relu\n  name: \"L3_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n}\n#} L3_b5 end\n#{ L3_b6 start\n#{ L3_b6_cbr1 start\nlayer { # L3_b6_cbr1_conv\n  name: \"L3_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr1_scale\n  name: \"L3_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b6_cbr1_dropout\n  name: \"L3_b6_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b6_cbr1_relu\n  name: \"L3_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n}\n#} L3_b6_cbr1 end\n#{ L3_b6_cbr2 start\nlayer { # L3_b6_cbr2_conv\n  name: \"L3_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b6_cbr2_scale\n  name: \"L3_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b6_cbr2_dropout\n  name: \"L3_b6_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b6_cbr2 end\nlayer { # L3_b6_sum_eltwise\n  name: \"L3_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b6_relu\n  name: \"L3_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n}\n#} L3_b6 end\n#{ L3_b7 start\n#{ L3_b7_cbr1 start\nlayer { # L3_b7_cbr1_conv\n  name: \"L3_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr1_scale\n  name: \"L3_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b7_cbr1_dropout\n  name: \"L3_b7_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b7_cbr1_relu\n  name: \"L3_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n}\n#} L3_b7_cbr1 end\n#{ L3_b7_cbr2 start\nlayer { # L3_b7_cbr2_conv\n  name: \"L3_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b7_cbr2_scale\n  name: \"L3_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b7_cbr2_dropout\n  name: \"L3_b7_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b7_cbr2 end\nlayer { # L3_b7_sum_eltwise\n  name: \"L3_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b7_relu\n  name: \"L3_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n}\n#} L3_b7 end\n#{ L3_b8 start\n#{ L3_b8_cbr1 start\nlayer { # L3_b8_cbr1_conv\n  name: \"L3_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr1_scale\n  name: \"L3_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b8_cbr1_dropout\n  name: \"L3_b8_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b8_cbr1_relu\n  name: \"L3_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n}\n#} L3_b8_cbr1 end\n#{ L3_b8_cbr2 start\nlayer { # L3_b8_cbr2_conv\n  name: \"L3_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b8_cbr2_scale\n  name: \"L3_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b8_cbr2_dropout\n  name: \"L3_b8_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b8_cbr2 end\nlayer { # L3_b8_sum_eltwise\n  name: \"L3_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b8_relu\n  name: \"L3_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n}\n#} L3_b8 end\n#{ L3_b9 start\n#{ L3_b9_cbr1 start\nlayer { # L3_b9_cbr1_conv\n  name: \"L3_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr1_scale\n  name: \"L3_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b9_cbr1_dropout\n  name: \"L3_b9_cbr1_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\nlayer { # L3_b9_cbr1_relu\n  name: \"L3_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n}\n#} L3_b9_cbr1 end\n#{ L3_b9_cbr2 start\nlayer { # L3_b9_cbr2_conv\n  name: \"L3_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.999\n  }\n}\nlayer { # L3_b9_cbr2_scale\n  name: \"L3_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b9_cbr2_dropout\n  name: \"L3_b9_cbr2_dropout\"\n  type: \"Dropout\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  dropout_param {\n    dropout_ratio: 0.2\n  }\n}\n#} L3_b9_cbr2 end\nlayer { # L3_b9_sum_eltwise\n  name: \"L3_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b9_relu\n  name: \"L3_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n}\n#} L3_b9 end\n#} L3 end\nlayer { # post_pool\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer { # post_FC\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n\t  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # accuracy\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer { # loss\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\n"
  },
  {
    "path": "architectures/arch.prototxt",
    "content": "name: \"Cifar-Resnet\"\nlayer { # TRAIN data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # TEST data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n}\nlayer { # pre_conv\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # pre_bn\n  name: \"pre_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"pre_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # pre_scale\n  name: \"pre_scale\"\n  type: \"Scale\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # pre_relu\n  name: \"pre_relu\"\n  type: \"ReLU\"\n  bottom: \"pre_bn_top\"\n  top: \"pre_bn_top\"\n}\n#{ L1 start\n#{ L1_b1 start\n#{ L1_b1_cbr1 start\nlayer { # L1_b1_cbr1_conv\n  name: \"L1_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b1_cbr1_bn\n  name: \"L1_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr1_conv_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b1_cbr1_scale\n  name: \"L1_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b1_cbr1_relu\n  name: \"L1_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr1_bn_top\"\n}\n#} L1_b1_cbr1 end\n#{ L1_b1_cbr2 start\nlayer { # L1_b1_cbr2_conv\n  name: \"L1_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_cbr1_bn_top\"\n  top: \"L1_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b1_cbr2_bn\n  name: \"L1_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_cbr2_conv_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b1_cbr2_scale\n  name: \"L1_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  top: \"L1_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b1_cbr2 end\nlayer { # L1_b1_sum_eltwise\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_cbr2_bn_top\"\n  bottom: \"pre_bn_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b1_relu\n  name: \"L1_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n}\n#} L1_b1 end\n#{ L1_b2 start\n#{ L1_b2_cbr1 start\nlayer { # L1_b2_cbr1_conv\n  name: \"L1_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b2_cbr1_bn\n  name: \"L1_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr1_conv_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b2_cbr1_scale\n  name: \"L1_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b2_cbr1_relu\n  name: \"L1_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr1_bn_top\"\n}\n#} L1_b2_cbr1 end\n#{ L1_b2_cbr2 start\nlayer { # L1_b2_cbr2_conv\n  name: \"L1_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_cbr1_bn_top\"\n  top: \"L1_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b2_cbr2_bn\n  name: \"L1_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_cbr2_conv_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b2_cbr2_scale\n  name: \"L1_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  top: \"L1_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b2_cbr2 end\nlayer { # L1_b2_sum_eltwise\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_cbr2_bn_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b2_relu\n  name: \"L1_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n}\n#} L1_b2 end\n#{ L1_b3 start\n#{ L1_b3_cbr1 start\nlayer { # L1_b3_cbr1_conv\n  name: \"L1_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b3_cbr1_bn\n  name: \"L1_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr1_conv_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b3_cbr1_scale\n  name: \"L1_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b3_cbr1_relu\n  name: \"L1_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr1_bn_top\"\n}\n#} L1_b3_cbr1 end\n#{ L1_b3_cbr2 start\nlayer { # L1_b3_cbr2_conv\n  name: \"L1_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_cbr1_bn_top\"\n  top: \"L1_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b3_cbr2_bn\n  name: \"L1_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_cbr2_conv_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b3_cbr2_scale\n  name: \"L1_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  top: \"L1_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b3_cbr2 end\nlayer { # L1_b3_sum_eltwise\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_cbr2_bn_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b3_relu\n  name: \"L1_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n}\n#} L1_b3 end\n#{ L1_b4 start\n#{ L1_b4_cbr1 start\nlayer { # L1_b4_cbr1_conv\n  name: \"L1_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b4_cbr1_bn\n  name: \"L1_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr1_conv_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b4_cbr1_scale\n  name: \"L1_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b4_cbr1_relu\n  name: \"L1_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr1_bn_top\"\n}\n#} L1_b4_cbr1 end\n#{ L1_b4_cbr2 start\nlayer { # L1_b4_cbr2_conv\n  name: \"L1_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_cbr1_bn_top\"\n  top: \"L1_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b4_cbr2_bn\n  name: \"L1_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_cbr2_conv_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b4_cbr2_scale\n  name: \"L1_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  top: \"L1_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b4_cbr2 end\nlayer { # L1_b4_sum_eltwise\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_cbr2_bn_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b4_relu\n  name: \"L1_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n}\n#} L1_b4 end\n#{ L1_b5 start\n#{ L1_b5_cbr1 start\nlayer { # L1_b5_cbr1_conv\n  name: \"L1_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b5_cbr1_bn\n  name: \"L1_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr1_conv_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b5_cbr1_scale\n  name: \"L1_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b5_cbr1_relu\n  name: \"L1_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr1_bn_top\"\n}\n#} L1_b5_cbr1 end\n#{ L1_b5_cbr2 start\nlayer { # L1_b5_cbr2_conv\n  name: \"L1_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_cbr1_bn_top\"\n  top: \"L1_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b5_cbr2_bn\n  name: \"L1_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_cbr2_conv_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b5_cbr2_scale\n  name: \"L1_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  top: \"L1_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b5_cbr2 end\nlayer { # L1_b5_sum_eltwise\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_cbr2_bn_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b5_relu\n  name: \"L1_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n}\n#} L1_b5 end\n#{ L1_b6 start\n#{ L1_b6_cbr1 start\nlayer { # L1_b6_cbr1_conv\n  name: \"L1_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b6_cbr1_bn\n  name: \"L1_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr1_conv_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b6_cbr1_scale\n  name: \"L1_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b6_cbr1_relu\n  name: \"L1_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr1_bn_top\"\n}\n#} L1_b6_cbr1 end\n#{ L1_b6_cbr2 start\nlayer { # L1_b6_cbr2_conv\n  name: \"L1_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_cbr1_bn_top\"\n  top: \"L1_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b6_cbr2_bn\n  name: \"L1_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_cbr2_conv_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b6_cbr2_scale\n  name: \"L1_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  top: \"L1_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b6_cbr2 end\nlayer { # L1_b6_sum_eltwise\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_cbr2_bn_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b6_relu\n  name: \"L1_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n}\n#} L1_b6 end\n#{ L1_b7 start\n#{ L1_b7_cbr1 start\nlayer { # L1_b7_cbr1_conv\n  name: \"L1_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b7_cbr1_bn\n  name: \"L1_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr1_conv_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b7_cbr1_scale\n  name: \"L1_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b7_cbr1_relu\n  name: \"L1_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr1_bn_top\"\n}\n#} L1_b7_cbr1 end\n#{ L1_b7_cbr2 start\nlayer { # L1_b7_cbr2_conv\n  name: \"L1_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_cbr1_bn_top\"\n  top: \"L1_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b7_cbr2_bn\n  name: \"L1_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b7_cbr2_conv_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b7_cbr2_scale\n  name: \"L1_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  top: \"L1_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b7_cbr2 end\nlayer { # L1_b7_sum_eltwise\n  name: \"L1_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b7_cbr2_bn_top\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b7_relu\n  name: \"L1_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b7_sum_eltwise_top\"\n}\n#} L1_b7 end\n#{ L1_b8 start\n#{ L1_b8_cbr1 start\nlayer { # L1_b8_cbr1_conv\n  name: \"L1_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b8_cbr1_bn\n  name: \"L1_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr1_conv_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b8_cbr1_scale\n  name: \"L1_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b8_cbr1_relu\n  name: \"L1_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr1_bn_top\"\n}\n#} L1_b8_cbr1 end\n#{ L1_b8_cbr2 start\nlayer { # L1_b8_cbr2_conv\n  name: \"L1_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_cbr1_bn_top\"\n  top: \"L1_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b8_cbr2_bn\n  name: \"L1_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b8_cbr2_conv_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b8_cbr2_scale\n  name: \"L1_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  top: \"L1_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b8_cbr2 end\nlayer { # L1_b8_sum_eltwise\n  name: \"L1_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b8_cbr2_bn_top\"\n  bottom: \"L1_b7_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b8_relu\n  name: \"L1_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b8_sum_eltwise_top\"\n}\n#} L1_b8 end\n#{ L1_b9 start\n#{ L1_b9_cbr1 start\nlayer { # L1_b9_cbr1_conv\n  name: \"L1_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b9_cbr1_bn\n  name: \"L1_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr1_conv_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b9_cbr1_scale\n  name: \"L1_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b9_cbr1_relu\n  name: \"L1_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr1_bn_top\"\n}\n#} L1_b9_cbr1 end\n#{ L1_b9_cbr2 start\nlayer { # L1_b9_cbr2_conv\n  name: \"L1_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_cbr1_bn_top\"\n  top: \"L1_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b9_cbr2_bn\n  name: \"L1_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b9_cbr2_conv_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b9_cbr2_scale\n  name: \"L1_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  top: \"L1_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b9_cbr2 end\nlayer { # L1_b9_sum_eltwise\n  name: \"L1_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b9_cbr2_bn_top\"\n  bottom: \"L1_b8_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b9_relu\n  name: \"L1_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b9_sum_eltwise_top\"\n}\n#} L1_b9 end\n#{ L1_b10 start\n#{ L1_b10_cbr1 start\nlayer { # L1_b10_cbr1_conv\n  name: \"L1_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b10_cbr1_bn\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b10_cbr1_bn\n  name: \"L1_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr1_conv_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b10_cbr1_scale\n  name: \"L1_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b10_cbr1_relu\n  name: \"L1_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr1_bn_top\"\n}\n#} L1_b10_cbr1 end\n#{ L1_b10_cbr2 start\nlayer { # L1_b10_cbr2_conv\n  name: \"L1_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_cbr1_bn_top\"\n  top: \"L1_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b10_cbr2_bn\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b10_cbr2_bn\n  name: \"L1_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b10_cbr2_conv_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b10_cbr2_scale\n  name: \"L1_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  top: \"L1_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b10_cbr2 end\nlayer { # L1_b10_sum_eltwise\n  name: \"L1_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b10_cbr2_bn_top\"\n  bottom: \"L1_b9_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b10_relu\n  name: \"L1_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b10_sum_eltwise_top\"\n}\n#} L1_b10 end\n#{ L1_b11 start\n#{ L1_b11_cbr1 start\nlayer { # L1_b11_cbr1_conv\n  name: \"L1_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b11_cbr1_bn\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b11_cbr1_bn\n  name: \"L1_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr1_conv_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b11_cbr1_scale\n  name: \"L1_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b11_cbr1_relu\n  name: \"L1_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr1_bn_top\"\n}\n#} L1_b11_cbr1 end\n#{ L1_b11_cbr2 start\nlayer { # L1_b11_cbr2_conv\n  name: \"L1_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_cbr1_bn_top\"\n  top: \"L1_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b11_cbr2_bn\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b11_cbr2_bn\n  name: \"L1_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b11_cbr2_conv_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b11_cbr2_scale\n  name: \"L1_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  top: \"L1_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b11_cbr2 end\nlayer { # L1_b11_sum_eltwise\n  name: \"L1_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b11_cbr2_bn_top\"\n  bottom: \"L1_b10_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b11_relu\n  name: \"L1_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b11_sum_eltwise_top\"\n}\n#} L1_b11 end\n#{ L1_b12 start\n#{ L1_b12_cbr1 start\nlayer { # L1_b12_cbr1_conv\n  name: \"L1_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b12_cbr1_bn\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b12_cbr1_bn\n  name: \"L1_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr1_conv_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b12_cbr1_scale\n  name: \"L1_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b12_cbr1_relu\n  name: \"L1_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr1_bn_top\"\n}\n#} L1_b12_cbr1 end\n#{ L1_b12_cbr2 start\nlayer { # L1_b12_cbr2_conv\n  name: \"L1_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_cbr1_bn_top\"\n  top: \"L1_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b12_cbr2_bn\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b12_cbr2_bn\n  name: \"L1_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b12_cbr2_conv_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b12_cbr2_scale\n  name: \"L1_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  top: \"L1_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b12_cbr2 end\nlayer { # L1_b12_sum_eltwise\n  name: \"L1_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b12_cbr2_bn_top\"\n  bottom: \"L1_b11_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b12_relu\n  name: \"L1_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b12_sum_eltwise_top\"\n}\n#} L1_b12 end\n#{ L1_b13 start\n#{ L1_b13_cbr1 start\nlayer { # L1_b13_cbr1_conv\n  name: \"L1_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b13_cbr1_bn\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b13_cbr1_bn\n  name: \"L1_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr1_conv_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b13_cbr1_scale\n  name: \"L1_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b13_cbr1_relu\n  name: \"L1_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr1_bn_top\"\n}\n#} L1_b13_cbr1 end\n#{ L1_b13_cbr2 start\nlayer { # L1_b13_cbr2_conv\n  name: \"L1_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_cbr1_bn_top\"\n  top: \"L1_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b13_cbr2_bn\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b13_cbr2_bn\n  name: \"L1_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b13_cbr2_conv_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b13_cbr2_scale\n  name: \"L1_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  top: \"L1_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b13_cbr2 end\nlayer { # L1_b13_sum_eltwise\n  name: \"L1_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b13_cbr2_bn_top\"\n  bottom: \"L1_b12_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b13_relu\n  name: \"L1_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b13_sum_eltwise_top\"\n}\n#} L1_b13 end\n#{ L1_b14 start\n#{ L1_b14_cbr1 start\nlayer { # L1_b14_cbr1_conv\n  name: \"L1_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b14_cbr1_bn\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b14_cbr1_bn\n  name: \"L1_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr1_conv_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b14_cbr1_scale\n  name: \"L1_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b14_cbr1_relu\n  name: \"L1_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr1_bn_top\"\n}\n#} L1_b14_cbr1 end\n#{ L1_b14_cbr2 start\nlayer { # L1_b14_cbr2_conv\n  name: \"L1_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_cbr1_bn_top\"\n  top: \"L1_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b14_cbr2_bn\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b14_cbr2_bn\n  name: \"L1_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b14_cbr2_conv_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b14_cbr2_scale\n  name: \"L1_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  top: \"L1_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b14_cbr2 end\nlayer { # L1_b14_sum_eltwise\n  name: \"L1_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b14_cbr2_bn_top\"\n  bottom: \"L1_b13_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b14_relu\n  name: \"L1_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b14_sum_eltwise_top\"\n}\n#} L1_b14 end\n#{ L1_b15 start\n#{ L1_b15_cbr1 start\nlayer { # L1_b15_cbr1_conv\n  name: \"L1_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b15_cbr1_bn\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b15_cbr1_bn\n  name: \"L1_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr1_conv_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b15_cbr1_scale\n  name: \"L1_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b15_cbr1_relu\n  name: \"L1_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr1_bn_top\"\n}\n#} L1_b15_cbr1 end\n#{ L1_b15_cbr2 start\nlayer { # L1_b15_cbr2_conv\n  name: \"L1_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_cbr1_bn_top\"\n  top: \"L1_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b15_cbr2_bn\n  name: \"L1_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr2_conv_top\"\n  top: \"L1_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b15_cbr2_bn\n  name: \"L1_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b15_cbr2_conv_top\"\n  top: \"L1_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b15_cbr2_scale\n  name: \"L1_b15_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b15_cbr2_bn_top\"\n  top: \"L1_b15_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b15_cbr2 end\nlayer { # L1_b15_sum_eltwise\n  name: \"L1_b15_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b15_cbr2_bn_top\"\n  bottom: \"L1_b14_sum_eltwise_top\"\n  top: \"L1_b15_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b15_relu\n  name: \"L1_b15_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b15_sum_eltwise_top\"\n  top: \"L1_b15_sum_eltwise_top\"\n}\n#} L1_b15 end\n#{ L1_b16 start\n#{ L1_b16_cbr1 start\nlayer { # L1_b16_cbr1_conv\n  name: \"L1_b16_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b15_sum_eltwise_top\"\n  top: \"L1_b16_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b16_cbr1_bn\n  name: \"L1_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b16_cbr1_conv_top\"\n  top: \"L1_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b16_cbr1_bn\n  name: \"L1_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b16_cbr1_conv_top\"\n  top: \"L1_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b16_cbr1_scale\n  name: \"L1_b16_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b16_cbr1_bn_top\"\n  top: \"L1_b16_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b16_cbr1_relu\n  name: \"L1_b16_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b16_cbr1_bn_top\"\n  top: \"L1_b16_cbr1_bn_top\"\n}\n#} L1_b16_cbr1 end\n#{ L1_b16_cbr2 start\nlayer { # L1_b16_cbr2_conv\n  name: \"L1_b16_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b16_cbr1_bn_top\"\n  top: \"L1_b16_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b16_cbr2_bn\n  name: \"L1_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b16_cbr2_conv_top\"\n  top: \"L1_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b16_cbr2_bn\n  name: \"L1_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b16_cbr2_conv_top\"\n  top: \"L1_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b16_cbr2_scale\n  name: \"L1_b16_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b16_cbr2_bn_top\"\n  top: \"L1_b16_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b16_cbr2 end\nlayer { # L1_b16_sum_eltwise\n  name: \"L1_b16_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b16_cbr2_bn_top\"\n  bottom: \"L1_b15_sum_eltwise_top\"\n  top: \"L1_b16_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b16_relu\n  name: \"L1_b16_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b16_sum_eltwise_top\"\n  top: \"L1_b16_sum_eltwise_top\"\n}\n#} L1_b16 end\n#{ L1_b17 start\n#{ L1_b17_cbr1 start\nlayer { # L1_b17_cbr1_conv\n  name: \"L1_b17_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b16_sum_eltwise_top\"\n  top: \"L1_b17_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b17_cbr1_bn\n  name: \"L1_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b17_cbr1_conv_top\"\n  top: \"L1_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b17_cbr1_bn\n  name: \"L1_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b17_cbr1_conv_top\"\n  top: \"L1_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b17_cbr1_scale\n  name: \"L1_b17_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b17_cbr1_bn_top\"\n  top: \"L1_b17_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b17_cbr1_relu\n  name: \"L1_b17_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b17_cbr1_bn_top\"\n  top: \"L1_b17_cbr1_bn_top\"\n}\n#} L1_b17_cbr1 end\n#{ L1_b17_cbr2 start\nlayer { # L1_b17_cbr2_conv\n  name: \"L1_b17_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b17_cbr1_bn_top\"\n  top: \"L1_b17_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b17_cbr2_bn\n  name: \"L1_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b17_cbr2_conv_top\"\n  top: \"L1_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b17_cbr2_bn\n  name: \"L1_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b17_cbr2_conv_top\"\n  top: \"L1_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b17_cbr2_scale\n  name: \"L1_b17_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b17_cbr2_bn_top\"\n  top: \"L1_b17_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b17_cbr2 end\nlayer { # L1_b17_sum_eltwise\n  name: \"L1_b17_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b17_cbr2_bn_top\"\n  bottom: \"L1_b16_sum_eltwise_top\"\n  top: \"L1_b17_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b17_relu\n  name: \"L1_b17_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b17_sum_eltwise_top\"\n  top: \"L1_b17_sum_eltwise_top\"\n}\n#} L1_b17 end\n#{ L1_b18 start\n#{ L1_b18_cbr1 start\nlayer { # L1_b18_cbr1_conv\n  name: \"L1_b18_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b17_sum_eltwise_top\"\n  top: \"L1_b18_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b18_cbr1_bn\n  name: \"L1_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b18_cbr1_conv_top\"\n  top: \"L1_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b18_cbr1_bn\n  name: \"L1_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b18_cbr1_conv_top\"\n  top: \"L1_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b18_cbr1_scale\n  name: \"L1_b18_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b18_cbr1_bn_top\"\n  top: \"L1_b18_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L1_b18_cbr1_relu\n  name: \"L1_b18_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b18_cbr1_bn_top\"\n  top: \"L1_b18_cbr1_bn_top\"\n}\n#} L1_b18_cbr1 end\n#{ L1_b18_cbr2 start\nlayer { # L1_b18_cbr2_conv\n  name: \"L1_b18_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b18_cbr1_bn_top\"\n  top: \"L1_b18_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b18_cbr2_bn\n  name: \"L1_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b18_cbr2_conv_top\"\n  top: \"L1_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b18_cbr2_bn\n  name: \"L1_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b18_cbr2_conv_top\"\n  top: \"L1_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b18_cbr2_scale\n  name: \"L1_b18_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L1_b18_cbr2_bn_top\"\n  top: \"L1_b18_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L1_b18_cbr2 end\nlayer { # L1_b18_sum_eltwise\n  name: \"L1_b18_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b18_cbr2_bn_top\"\n  bottom: \"L1_b17_sum_eltwise_top\"\n  top: \"L1_b18_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L1_b18_relu\n  name: \"L1_b18_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b18_sum_eltwise_top\"\n  top: \"L1_b18_sum_eltwise_top\"\n}\n#} L1_b18 end\n#} L1 end\n#{ L2 start\n#{ L2_b1 start\n#{ L2_b1_cbr1 start\nlayer { # L2_b1_cbr1_conv\n  name: \"L2_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b18_sum_eltwise_top\"\n  top: \"L2_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b1_cbr1_bn\n  name: \"L2_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr1_conv_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b1_cbr1_scale\n  name: \"L2_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b1_cbr1_relu\n  name: \"L2_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr1_bn_top\"\n}\n#} L2_b1_cbr1 end\n#{ L2_b1_cbr2 start\nlayer { # L2_b1_cbr2_conv\n  name: \"L2_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_cbr1_bn_top\"\n  top: \"L2_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b1_cbr2_bn\n  name: \"L2_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_cbr2_conv_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b1_cbr2_scale\n  name: \"L2_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  top: \"L2_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b1_cbr2 end\nlayer { # L2_b1_pool\n  name: \"L2_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L1_b18_sum_eltwise_top\"\n  top: \"L2_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L2_b1_sum_eltwise\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_cbr2_bn_top\"\n  bottom: \"L2_b1_pool\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b1_relu\n  name: \"L2_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n}\n#} L2_b1 end\nlayer { # L2_b1_zeros\n  name: \"L2_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L2_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 100 dim: 16 dim: 16 dim: 16 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L2_b1_concat0\n  name: \"L2_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  bottom: \"L2_b1_zeros\"\n  top: \"L2_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L2_b2 start\n#{ L2_b2_cbr1 start\nlayer { # L2_b2_cbr1_conv\n  name: \"L2_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b2_cbr1_bn\n  name: \"L2_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr1_conv_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b2_cbr1_scale\n  name: \"L2_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b2_cbr1_relu\n  name: \"L2_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr1_bn_top\"\n}\n#} L2_b2_cbr1 end\n#{ L2_b2_cbr2 start\nlayer { # L2_b2_cbr2_conv\n  name: \"L2_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_cbr1_bn_top\"\n  top: \"L2_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b2_cbr2_bn\n  name: \"L2_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_cbr2_conv_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b2_cbr2_scale\n  name: \"L2_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  top: \"L2_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b2_cbr2 end\nlayer { # L2_b2_sum_eltwise\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_cbr2_bn_top\"\n  bottom: \"L2_b1_concat0\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b2_relu\n  name: \"L2_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n}\n#} L2_b2 end\n#{ L2_b3 start\n#{ L2_b3_cbr1 start\nlayer { # L2_b3_cbr1_conv\n  name: \"L2_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b3_cbr1_bn\n  name: \"L2_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr1_conv_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b3_cbr1_scale\n  name: \"L2_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b3_cbr1_relu\n  name: \"L2_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr1_bn_top\"\n}\n#} L2_b3_cbr1 end\n#{ L2_b3_cbr2 start\nlayer { # L2_b3_cbr2_conv\n  name: \"L2_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_cbr1_bn_top\"\n  top: \"L2_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b3_cbr2_bn\n  name: \"L2_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_cbr2_conv_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b3_cbr2_scale\n  name: \"L2_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  top: \"L2_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b3_cbr2 end\nlayer { # L2_b3_sum_eltwise\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_cbr2_bn_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b3_relu\n  name: \"L2_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n}\n#} L2_b3 end\n#{ L2_b4 start\n#{ L2_b4_cbr1 start\nlayer { # L2_b4_cbr1_conv\n  name: \"L2_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b4_cbr1_bn\n  name: \"L2_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr1_conv_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b4_cbr1_scale\n  name: \"L2_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b4_cbr1_relu\n  name: \"L2_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr1_bn_top\"\n}\n#} L2_b4_cbr1 end\n#{ L2_b4_cbr2 start\nlayer { # L2_b4_cbr2_conv\n  name: \"L2_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_cbr1_bn_top\"\n  top: \"L2_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b4_cbr2_bn\n  name: \"L2_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_cbr2_conv_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b4_cbr2_scale\n  name: \"L2_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  top: \"L2_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b4_cbr2 end\nlayer { # L2_b4_sum_eltwise\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_cbr2_bn_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b4_relu\n  name: \"L2_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n}\n#} L2_b4 end\n#{ L2_b5 start\n#{ L2_b5_cbr1 start\nlayer { # L2_b5_cbr1_conv\n  name: \"L2_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b5_cbr1_bn\n  name: \"L2_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr1_conv_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b5_cbr1_scale\n  name: \"L2_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b5_cbr1_relu\n  name: \"L2_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr1_bn_top\"\n}\n#} L2_b5_cbr1 end\n#{ L2_b5_cbr2 start\nlayer { # L2_b5_cbr2_conv\n  name: \"L2_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_cbr1_bn_top\"\n  top: \"L2_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b5_cbr2_bn\n  name: \"L2_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_cbr2_conv_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b5_cbr2_scale\n  name: \"L2_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  top: \"L2_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b5_cbr2 end\nlayer { # L2_b5_sum_eltwise\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_cbr2_bn_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b5_relu\n  name: \"L2_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n}\n#} L2_b5 end\n#{ L2_b6 start\n#{ L2_b6_cbr1 start\nlayer { # L2_b6_cbr1_conv\n  name: \"L2_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b6_cbr1_bn\n  name: \"L2_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr1_conv_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b6_cbr1_scale\n  name: \"L2_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b6_cbr1_relu\n  name: \"L2_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr1_bn_top\"\n}\n#} L2_b6_cbr1 end\n#{ L2_b6_cbr2 start\nlayer { # L2_b6_cbr2_conv\n  name: \"L2_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_cbr1_bn_top\"\n  top: \"L2_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b6_cbr2_bn\n  name: \"L2_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_cbr2_conv_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b6_cbr2_scale\n  name: \"L2_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  top: \"L2_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b6_cbr2 end\nlayer { # L2_b6_sum_eltwise\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_cbr2_bn_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b6_relu\n  name: \"L2_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n}\n#} L2_b6 end\n#{ L2_b7 start\n#{ L2_b7_cbr1 start\nlayer { # L2_b7_cbr1_conv\n  name: \"L2_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b7_cbr1_bn\n  name: \"L2_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr1_conv_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b7_cbr1_scale\n  name: \"L2_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b7_cbr1_relu\n  name: \"L2_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr1_bn_top\"\n}\n#} L2_b7_cbr1 end\n#{ L2_b7_cbr2 start\nlayer { # L2_b7_cbr2_conv\n  name: \"L2_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_cbr1_bn_top\"\n  top: \"L2_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b7_cbr2_bn\n  name: \"L2_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b7_cbr2_conv_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b7_cbr2_scale\n  name: \"L2_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  top: \"L2_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b7_cbr2 end\nlayer { # L2_b7_sum_eltwise\n  name: \"L2_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b7_cbr2_bn_top\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b7_relu\n  name: \"L2_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b7_sum_eltwise_top\"\n}\n#} L2_b7 end\n#{ L2_b8 start\n#{ L2_b8_cbr1 start\nlayer { # L2_b8_cbr1_conv\n  name: \"L2_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b8_cbr1_bn\n  name: \"L2_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr1_conv_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b8_cbr1_scale\n  name: \"L2_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b8_cbr1_relu\n  name: \"L2_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr1_bn_top\"\n}\n#} L2_b8_cbr1 end\n#{ L2_b8_cbr2 start\nlayer { # L2_b8_cbr2_conv\n  name: \"L2_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_cbr1_bn_top\"\n  top: \"L2_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b8_cbr2_bn\n  name: \"L2_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b8_cbr2_conv_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b8_cbr2_scale\n  name: \"L2_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  top: \"L2_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b8_cbr2 end\nlayer { # L2_b8_sum_eltwise\n  name: \"L2_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b8_cbr2_bn_top\"\n  bottom: \"L2_b7_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b8_relu\n  name: \"L2_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b8_sum_eltwise_top\"\n}\n#} L2_b8 end\n#{ L2_b9 start\n#{ L2_b9_cbr1 start\nlayer { # L2_b9_cbr1_conv\n  name: \"L2_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b9_cbr1_bn\n  name: \"L2_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr1_conv_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b9_cbr1_scale\n  name: \"L2_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b9_cbr1_relu\n  name: \"L2_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr1_bn_top\"\n}\n#} L2_b9_cbr1 end\n#{ L2_b9_cbr2 start\nlayer { # L2_b9_cbr2_conv\n  name: \"L2_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_cbr1_bn_top\"\n  top: \"L2_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b9_cbr2_bn\n  name: \"L2_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b9_cbr2_conv_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b9_cbr2_scale\n  name: \"L2_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  top: \"L2_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b9_cbr2 end\nlayer { # L2_b9_sum_eltwise\n  name: \"L2_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b9_cbr2_bn_top\"\n  bottom: \"L2_b8_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b9_relu\n  name: \"L2_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b9_sum_eltwise_top\"\n}\n#} L2_b9 end\n#{ L2_b10 start\n#{ L2_b10_cbr1 start\nlayer { # L2_b10_cbr1_conv\n  name: \"L2_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b10_cbr1_bn\n  name: \"L2_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b10_cbr1_conv_top\"\n  top: \"L2_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b10_cbr1_bn\n  name: \"L2_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b10_cbr1_conv_top\"\n  top: \"L2_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b10_cbr1_scale\n  name: \"L2_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b10_cbr1_bn_top\"\n  top: \"L2_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b10_cbr1_relu\n  name: \"L2_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b10_cbr1_bn_top\"\n  top: \"L2_b10_cbr1_bn_top\"\n}\n#} L2_b10_cbr1 end\n#{ L2_b10_cbr2 start\nlayer { # L2_b10_cbr2_conv\n  name: \"L2_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b10_cbr1_bn_top\"\n  top: \"L2_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b10_cbr2_bn\n  name: \"L2_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b10_cbr2_conv_top\"\n  top: \"L2_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b10_cbr2_bn\n  name: \"L2_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b10_cbr2_conv_top\"\n  top: \"L2_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b10_cbr2_scale\n  name: \"L2_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b10_cbr2_bn_top\"\n  top: \"L2_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b10_cbr2 end\nlayer { # L2_b10_sum_eltwise\n  name: \"L2_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b10_cbr2_bn_top\"\n  bottom: \"L2_b9_sum_eltwise_top\"\n  top: \"L2_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b10_relu\n  name: \"L2_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b10_sum_eltwise_top\"\n  top: \"L2_b10_sum_eltwise_top\"\n}\n#} L2_b10 end\n#{ L2_b11 start\n#{ L2_b11_cbr1 start\nlayer { # L2_b11_cbr1_conv\n  name: \"L2_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b10_sum_eltwise_top\"\n  top: \"L2_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b11_cbr1_bn\n  name: \"L2_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b11_cbr1_conv_top\"\n  top: \"L2_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b11_cbr1_bn\n  name: \"L2_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b11_cbr1_conv_top\"\n  top: \"L2_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b11_cbr1_scale\n  name: \"L2_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b11_cbr1_bn_top\"\n  top: \"L2_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b11_cbr1_relu\n  name: \"L2_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b11_cbr1_bn_top\"\n  top: \"L2_b11_cbr1_bn_top\"\n}\n#} L2_b11_cbr1 end\n#{ L2_b11_cbr2 start\nlayer { # L2_b11_cbr2_conv\n  name: \"L2_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b11_cbr1_bn_top\"\n  top: \"L2_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b11_cbr2_bn\n  name: \"L2_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b11_cbr2_conv_top\"\n  top: \"L2_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b11_cbr2_bn\n  name: \"L2_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b11_cbr2_conv_top\"\n  top: \"L2_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b11_cbr2_scale\n  name: \"L2_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b11_cbr2_bn_top\"\n  top: \"L2_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b11_cbr2 end\nlayer { # L2_b11_sum_eltwise\n  name: \"L2_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b11_cbr2_bn_top\"\n  bottom: \"L2_b10_sum_eltwise_top\"\n  top: \"L2_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b11_relu\n  name: \"L2_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b11_sum_eltwise_top\"\n  top: \"L2_b11_sum_eltwise_top\"\n}\n#} L2_b11 end\n#{ L2_b12 start\n#{ L2_b12_cbr1 start\nlayer { # L2_b12_cbr1_conv\n  name: \"L2_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b11_sum_eltwise_top\"\n  top: \"L2_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b12_cbr1_bn\n  name: \"L2_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b12_cbr1_conv_top\"\n  top: \"L2_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b12_cbr1_bn\n  name: \"L2_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b12_cbr1_conv_top\"\n  top: \"L2_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b12_cbr1_scale\n  name: \"L2_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b12_cbr1_bn_top\"\n  top: \"L2_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b12_cbr1_relu\n  name: \"L2_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b12_cbr1_bn_top\"\n  top: \"L2_b12_cbr1_bn_top\"\n}\n#} L2_b12_cbr1 end\n#{ L2_b12_cbr2 start\nlayer { # L2_b12_cbr2_conv\n  name: \"L2_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b12_cbr1_bn_top\"\n  top: \"L2_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b12_cbr2_bn\n  name: \"L2_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b12_cbr2_conv_top\"\n  top: \"L2_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b12_cbr2_bn\n  name: \"L2_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b12_cbr2_conv_top\"\n  top: \"L2_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b12_cbr2_scale\n  name: \"L2_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b12_cbr2_bn_top\"\n  top: \"L2_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b12_cbr2 end\nlayer { # L2_b12_sum_eltwise\n  name: \"L2_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b12_cbr2_bn_top\"\n  bottom: \"L2_b11_sum_eltwise_top\"\n  top: \"L2_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b12_relu\n  name: \"L2_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b12_sum_eltwise_top\"\n  top: \"L2_b12_sum_eltwise_top\"\n}\n#} L2_b12 end\n#{ L2_b13 start\n#{ L2_b13_cbr1 start\nlayer { # L2_b13_cbr1_conv\n  name: \"L2_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b12_sum_eltwise_top\"\n  top: \"L2_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b13_cbr1_bn\n  name: \"L2_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b13_cbr1_conv_top\"\n  top: \"L2_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b13_cbr1_bn\n  name: \"L2_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b13_cbr1_conv_top\"\n  top: \"L2_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b13_cbr1_scale\n  name: \"L2_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b13_cbr1_bn_top\"\n  top: \"L2_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b13_cbr1_relu\n  name: \"L2_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b13_cbr1_bn_top\"\n  top: \"L2_b13_cbr1_bn_top\"\n}\n#} L2_b13_cbr1 end\n#{ L2_b13_cbr2 start\nlayer { # L2_b13_cbr2_conv\n  name: \"L2_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b13_cbr1_bn_top\"\n  top: \"L2_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b13_cbr2_bn\n  name: \"L2_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b13_cbr2_conv_top\"\n  top: \"L2_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b13_cbr2_bn\n  name: \"L2_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b13_cbr2_conv_top\"\n  top: \"L2_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b13_cbr2_scale\n  name: \"L2_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b13_cbr2_bn_top\"\n  top: \"L2_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b13_cbr2 end\nlayer { # L2_b13_sum_eltwise\n  name: \"L2_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b13_cbr2_bn_top\"\n  bottom: \"L2_b12_sum_eltwise_top\"\n  top: \"L2_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b13_relu\n  name: \"L2_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b13_sum_eltwise_top\"\n  top: \"L2_b13_sum_eltwise_top\"\n}\n#} L2_b13 end\n#{ L2_b14 start\n#{ L2_b14_cbr1 start\nlayer { # L2_b14_cbr1_conv\n  name: \"L2_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b13_sum_eltwise_top\"\n  top: \"L2_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b14_cbr1_bn\n  name: \"L2_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b14_cbr1_conv_top\"\n  top: \"L2_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b14_cbr1_bn\n  name: \"L2_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b14_cbr1_conv_top\"\n  top: \"L2_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b14_cbr1_scale\n  name: \"L2_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b14_cbr1_bn_top\"\n  top: \"L2_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b14_cbr1_relu\n  name: \"L2_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b14_cbr1_bn_top\"\n  top: \"L2_b14_cbr1_bn_top\"\n}\n#} L2_b14_cbr1 end\n#{ L2_b14_cbr2 start\nlayer { # L2_b14_cbr2_conv\n  name: \"L2_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b14_cbr1_bn_top\"\n  top: \"L2_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b14_cbr2_bn\n  name: \"L2_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b14_cbr2_conv_top\"\n  top: \"L2_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b14_cbr2_bn\n  name: \"L2_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b14_cbr2_conv_top\"\n  top: \"L2_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b14_cbr2_scale\n  name: \"L2_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b14_cbr2_bn_top\"\n  top: \"L2_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b14_cbr2 end\nlayer { # L2_b14_sum_eltwise\n  name: \"L2_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b14_cbr2_bn_top\"\n  bottom: \"L2_b13_sum_eltwise_top\"\n  top: \"L2_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b14_relu\n  name: \"L2_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b14_sum_eltwise_top\"\n  top: \"L2_b14_sum_eltwise_top\"\n}\n#} L2_b14 end\n#{ L2_b15 start\n#{ L2_b15_cbr1 start\nlayer { # L2_b15_cbr1_conv\n  name: \"L2_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b14_sum_eltwise_top\"\n  top: \"L2_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b15_cbr1_bn\n  name: \"L2_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b15_cbr1_conv_top\"\n  top: \"L2_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b15_cbr1_bn\n  name: \"L2_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b15_cbr1_conv_top\"\n  top: \"L2_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b15_cbr1_scale\n  name: \"L2_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b15_cbr1_bn_top\"\n  top: \"L2_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b15_cbr1_relu\n  name: \"L2_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b15_cbr1_bn_top\"\n  top: \"L2_b15_cbr1_bn_top\"\n}\n#} L2_b15_cbr1 end\n#{ L2_b15_cbr2 start\nlayer { # L2_b15_cbr2_conv\n  name: \"L2_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b15_cbr1_bn_top\"\n  top: \"L2_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b15_cbr2_bn\n  name: \"L2_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b15_cbr2_conv_top\"\n  top: \"L2_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b15_cbr2_bn\n  name: \"L2_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b15_cbr2_conv_top\"\n  top: \"L2_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b15_cbr2_scale\n  name: \"L2_b15_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b15_cbr2_bn_top\"\n  top: \"L2_b15_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b15_cbr2 end\nlayer { # L2_b15_sum_eltwise\n  name: \"L2_b15_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b15_cbr2_bn_top\"\n  bottom: \"L2_b14_sum_eltwise_top\"\n  top: \"L2_b15_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b15_relu\n  name: \"L2_b15_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b15_sum_eltwise_top\"\n  top: \"L2_b15_sum_eltwise_top\"\n}\n#} L2_b15 end\n#{ L2_b16 start\n#{ L2_b16_cbr1 start\nlayer { # L2_b16_cbr1_conv\n  name: \"L2_b16_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b15_sum_eltwise_top\"\n  top: \"L2_b16_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b16_cbr1_bn\n  name: \"L2_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b16_cbr1_conv_top\"\n  top: \"L2_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b16_cbr1_bn\n  name: \"L2_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b16_cbr1_conv_top\"\n  top: \"L2_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b16_cbr1_scale\n  name: \"L2_b16_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b16_cbr1_bn_top\"\n  top: \"L2_b16_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b16_cbr1_relu\n  name: \"L2_b16_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b16_cbr1_bn_top\"\n  top: \"L2_b16_cbr1_bn_top\"\n}\n#} L2_b16_cbr1 end\n#{ L2_b16_cbr2 start\nlayer { # L2_b16_cbr2_conv\n  name: \"L2_b16_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b16_cbr1_bn_top\"\n  top: \"L2_b16_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b16_cbr2_bn\n  name: \"L2_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b16_cbr2_conv_top\"\n  top: \"L2_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b16_cbr2_bn\n  name: \"L2_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b16_cbr2_conv_top\"\n  top: \"L2_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b16_cbr2_scale\n  name: \"L2_b16_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b16_cbr2_bn_top\"\n  top: \"L2_b16_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b16_cbr2 end\nlayer { # L2_b16_sum_eltwise\n  name: \"L2_b16_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b16_cbr2_bn_top\"\n  bottom: \"L2_b15_sum_eltwise_top\"\n  top: \"L2_b16_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b16_relu\n  name: \"L2_b16_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b16_sum_eltwise_top\"\n  top: \"L2_b16_sum_eltwise_top\"\n}\n#} L2_b16 end\n#{ L2_b17 start\n#{ L2_b17_cbr1 start\nlayer { # L2_b17_cbr1_conv\n  name: \"L2_b17_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b16_sum_eltwise_top\"\n  top: \"L2_b17_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b17_cbr1_bn\n  name: \"L2_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b17_cbr1_conv_top\"\n  top: \"L2_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b17_cbr1_bn\n  name: \"L2_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b17_cbr1_conv_top\"\n  top: \"L2_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b17_cbr1_scale\n  name: \"L2_b17_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b17_cbr1_bn_top\"\n  top: \"L2_b17_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b17_cbr1_relu\n  name: \"L2_b17_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b17_cbr1_bn_top\"\n  top: \"L2_b17_cbr1_bn_top\"\n}\n#} L2_b17_cbr1 end\n#{ L2_b17_cbr2 start\nlayer { # L2_b17_cbr2_conv\n  name: \"L2_b17_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b17_cbr1_bn_top\"\n  top: \"L2_b17_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b17_cbr2_bn\n  name: \"L2_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b17_cbr2_conv_top\"\n  top: \"L2_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b17_cbr2_bn\n  name: \"L2_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b17_cbr2_conv_top\"\n  top: \"L2_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b17_cbr2_scale\n  name: \"L2_b17_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b17_cbr2_bn_top\"\n  top: \"L2_b17_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b17_cbr2 end\nlayer { # L2_b17_sum_eltwise\n  name: \"L2_b17_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b17_cbr2_bn_top\"\n  bottom: \"L2_b16_sum_eltwise_top\"\n  top: \"L2_b17_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b17_relu\n  name: \"L2_b17_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b17_sum_eltwise_top\"\n  top: \"L2_b17_sum_eltwise_top\"\n}\n#} L2_b17 end\n#{ L2_b18 start\n#{ L2_b18_cbr1 start\nlayer { # L2_b18_cbr1_conv\n  name: \"L2_b18_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b17_sum_eltwise_top\"\n  top: \"L2_b18_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b18_cbr1_bn\n  name: \"L2_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b18_cbr1_conv_top\"\n  top: \"L2_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b18_cbr1_bn\n  name: \"L2_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b18_cbr1_conv_top\"\n  top: \"L2_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b18_cbr1_scale\n  name: \"L2_b18_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b18_cbr1_bn_top\"\n  top: \"L2_b18_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L2_b18_cbr1_relu\n  name: \"L2_b18_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b18_cbr1_bn_top\"\n  top: \"L2_b18_cbr1_bn_top\"\n}\n#} L2_b18_cbr1 end\n#{ L2_b18_cbr2 start\nlayer { # L2_b18_cbr2_conv\n  name: \"L2_b18_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b18_cbr1_bn_top\"\n  top: \"L2_b18_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b18_cbr2_bn\n  name: \"L2_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b18_cbr2_conv_top\"\n  top: \"L2_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b18_cbr2_bn\n  name: \"L2_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b18_cbr2_conv_top\"\n  top: \"L2_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b18_cbr2_scale\n  name: \"L2_b18_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L2_b18_cbr2_bn_top\"\n  top: \"L2_b18_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L2_b18_cbr2 end\nlayer { # L2_b18_sum_eltwise\n  name: \"L2_b18_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b18_cbr2_bn_top\"\n  bottom: \"L2_b17_sum_eltwise_top\"\n  top: \"L2_b18_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L2_b18_relu\n  name: \"L2_b18_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b18_sum_eltwise_top\"\n  top: \"L2_b18_sum_eltwise_top\"\n}\n#} L2_b18 end\n#} L2 end\n#{ L3 start\n#{ L3_b1 start\n#{ L3_b1_cbr1 start\nlayer { # L3_b1_cbr1_conv\n  name: \"L3_b1_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b18_sum_eltwise_top\"\n  top: \"L3_b1_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b1_cbr1_bn\n  name: \"L3_b1_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr1_conv_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b1_cbr1_scale\n  name: \"L3_b1_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b1_cbr1_relu\n  name: \"L3_b1_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr1_bn_top\"\n}\n#} L3_b1_cbr1 end\n#{ L3_b1_cbr2 start\nlayer { # L3_b1_cbr2_conv\n  name: \"L3_b1_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_cbr1_bn_top\"\n  top: \"L3_b1_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b1_cbr2_bn\n  name: \"L3_b1_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_cbr2_conv_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b1_cbr2_scale\n  name: \"L3_b1_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  top: \"L3_b1_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b1_cbr2 end\nlayer { # L3_b1_pool\n  name: \"L3_b1_pool\"\n  type: \"Pooling\"\n  bottom: \"L2_b18_sum_eltwise_top\"\n  top: \"L3_b1_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 3\n    stride: 2\n  }\n}\nlayer { # L3_b1_sum_eltwise\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_cbr2_bn_top\"\n  bottom: \"L3_b1_pool\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b1_relu\n  name: \"L3_b1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n}\n#} L3_b1 end\nlayer { # L3_b1_zeros\n  name: \"L3_b1_zeros\"\n  type: \"DummyData\"\n  top: \"L3_b1_zeros\"\n  dummy_data_param {\n      shape: {dim: 100 dim: 32 dim: 8 dim: 8 }\n      data_filler: {\n         type: \"constant\" \n         value: 0\n      }\n  }\n}\nlayer { # L3_b1_concat0\n  name: \"L3_b1_concat0\"\n  type: \"Concat\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  bottom: \"L3_b1_zeros\"\n  top: \"L3_b1_concat0\"\n  concat_param {\n    axis: 1 \n  }\n}\n#{ L3_b2 start\n#{ L3_b2_cbr1 start\nlayer { # L3_b2_cbr1_conv\n  name: \"L3_b2_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b2_cbr1_bn\n  name: \"L3_b2_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr1_conv_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b2_cbr1_scale\n  name: \"L3_b2_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b2_cbr1_relu\n  name: \"L3_b2_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr1_bn_top\"\n}\n#} L3_b2_cbr1 end\n#{ L3_b2_cbr2 start\nlayer { # L3_b2_cbr2_conv\n  name: \"L3_b2_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_cbr1_bn_top\"\n  top: \"L3_b2_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b2_cbr2_bn\n  name: \"L3_b2_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_cbr2_conv_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b2_cbr2_scale\n  name: \"L3_b2_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  top: \"L3_b2_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b2_cbr2 end\nlayer { # L3_b2_sum_eltwise\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_cbr2_bn_top\"\n  bottom: \"L3_b1_concat0\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b2_relu\n  name: \"L3_b2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n}\n#} L3_b2 end\n#{ L3_b3 start\n#{ L3_b3_cbr1 start\nlayer { # L3_b3_cbr1_conv\n  name: \"L3_b3_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b3_cbr1_bn\n  name: \"L3_b3_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr1_conv_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b3_cbr1_scale\n  name: \"L3_b3_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b3_cbr1_relu\n  name: \"L3_b3_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr1_bn_top\"\n}\n#} L3_b3_cbr1 end\n#{ L3_b3_cbr2 start\nlayer { # L3_b3_cbr2_conv\n  name: \"L3_b3_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_cbr1_bn_top\"\n  top: \"L3_b3_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b3_cbr2_bn\n  name: \"L3_b3_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_cbr2_conv_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b3_cbr2_scale\n  name: \"L3_b3_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  top: \"L3_b3_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b3_cbr2 end\nlayer { # L3_b3_sum_eltwise\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_cbr2_bn_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b3_relu\n  name: \"L3_b3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n}\n#} L3_b3 end\n#{ L3_b4 start\n#{ L3_b4_cbr1 start\nlayer { # L3_b4_cbr1_conv\n  name: \"L3_b4_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b4_cbr1_bn\n  name: \"L3_b4_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr1_conv_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b4_cbr1_scale\n  name: \"L3_b4_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b4_cbr1_relu\n  name: \"L3_b4_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr1_bn_top\"\n}\n#} L3_b4_cbr1 end\n#{ L3_b4_cbr2 start\nlayer { # L3_b4_cbr2_conv\n  name: \"L3_b4_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_cbr1_bn_top\"\n  top: \"L3_b4_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b4_cbr2_bn\n  name: \"L3_b4_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_cbr2_conv_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b4_cbr2_scale\n  name: \"L3_b4_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  top: \"L3_b4_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b4_cbr2 end\nlayer { # L3_b4_sum_eltwise\n  name: \"L3_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b4_cbr2_bn_top\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b4_relu\n  name: \"L3_b4_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n}\n#} L3_b4 end\n#{ L3_b5 start\n#{ L3_b5_cbr1 start\nlayer { # L3_b5_cbr1_conv\n  name: \"L3_b5_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b5_cbr1_bn\n  name: \"L3_b5_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr1_conv_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b5_cbr1_scale\n  name: \"L3_b5_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b5_cbr1_relu\n  name: \"L3_b5_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr1_bn_top\"\n}\n#} L3_b5_cbr1 end\n#{ L3_b5_cbr2 start\nlayer { # L3_b5_cbr2_conv\n  name: \"L3_b5_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_cbr1_bn_top\"\n  top: \"L3_b5_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b5_cbr2_bn\n  name: \"L3_b5_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_cbr2_conv_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b5_cbr2_scale\n  name: \"L3_b5_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  top: \"L3_b5_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b5_cbr2 end\nlayer { # L3_b5_sum_eltwise\n  name: \"L3_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b5_cbr2_bn_top\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b5_relu\n  name: \"L3_b5_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n}\n#} L3_b5 end\n#{ L3_b6 start\n#{ L3_b6_cbr1 start\nlayer { # L3_b6_cbr1_conv\n  name: \"L3_b6_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b6_cbr1_bn\n  name: \"L3_b6_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr1_conv_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b6_cbr1_scale\n  name: \"L3_b6_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b6_cbr1_relu\n  name: \"L3_b6_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr1_bn_top\"\n}\n#} L3_b6_cbr1 end\n#{ L3_b6_cbr2 start\nlayer { # L3_b6_cbr2_conv\n  name: \"L3_b6_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_cbr1_bn_top\"\n  top: \"L3_b6_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b6_cbr2_bn\n  name: \"L3_b6_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_cbr2_conv_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b6_cbr2_scale\n  name: \"L3_b6_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  top: \"L3_b6_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b6_cbr2 end\nlayer { # L3_b6_sum_eltwise\n  name: \"L3_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b6_cbr2_bn_top\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b6_relu\n  name: \"L3_b6_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n}\n#} L3_b6 end\n#{ L3_b7 start\n#{ L3_b7_cbr1 start\nlayer { # L3_b7_cbr1_conv\n  name: \"L3_b7_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b7_cbr1_bn\n  name: \"L3_b7_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr1_conv_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b7_cbr1_scale\n  name: \"L3_b7_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b7_cbr1_relu\n  name: \"L3_b7_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr1_bn_top\"\n}\n#} L3_b7_cbr1 end\n#{ L3_b7_cbr2 start\nlayer { # L3_b7_cbr2_conv\n  name: \"L3_b7_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_cbr1_bn_top\"\n  top: \"L3_b7_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b7_cbr2_bn\n  name: \"L3_b7_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b7_cbr2_conv_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b7_cbr2_scale\n  name: \"L3_b7_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  top: \"L3_b7_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b7_cbr2 end\nlayer { # L3_b7_sum_eltwise\n  name: \"L3_b7_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b7_cbr2_bn_top\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b7_relu\n  name: \"L3_b7_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b7_sum_eltwise_top\"\n}\n#} L3_b7 end\n#{ L3_b8 start\n#{ L3_b8_cbr1 start\nlayer { # L3_b8_cbr1_conv\n  name: \"L3_b8_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b8_cbr1_bn\n  name: \"L3_b8_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr1_conv_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b8_cbr1_scale\n  name: \"L3_b8_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b8_cbr1_relu\n  name: \"L3_b8_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr1_bn_top\"\n}\n#} L3_b8_cbr1 end\n#{ L3_b8_cbr2 start\nlayer { # L3_b8_cbr2_conv\n  name: \"L3_b8_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_cbr1_bn_top\"\n  top: \"L3_b8_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b8_cbr2_bn\n  name: \"L3_b8_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b8_cbr2_conv_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b8_cbr2_scale\n  name: \"L3_b8_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  top: \"L3_b8_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b8_cbr2 end\nlayer { # L3_b8_sum_eltwise\n  name: \"L3_b8_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b8_cbr2_bn_top\"\n  bottom: \"L3_b7_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b8_relu\n  name: \"L3_b8_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b8_sum_eltwise_top\"\n}\n#} L3_b8 end\n#{ L3_b9 start\n#{ L3_b9_cbr1 start\nlayer { # L3_b9_cbr1_conv\n  name: \"L3_b9_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b9_cbr1_bn\n  name: \"L3_b9_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr1_conv_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b9_cbr1_scale\n  name: \"L3_b9_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b9_cbr1_relu\n  name: \"L3_b9_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr1_bn_top\"\n}\n#} L3_b9_cbr1 end\n#{ L3_b9_cbr2 start\nlayer { # L3_b9_cbr2_conv\n  name: \"L3_b9_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b9_cbr1_bn_top\"\n  top: \"L3_b9_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b9_cbr2_bn\n  name: \"L3_b9_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b9_cbr2_conv_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b9_cbr2_scale\n  name: \"L3_b9_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  top: \"L3_b9_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b9_cbr2 end\nlayer { # L3_b9_sum_eltwise\n  name: \"L3_b9_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b9_cbr2_bn_top\"\n  bottom: \"L3_b8_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b9_relu\n  name: \"L3_b9_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b9_sum_eltwise_top\"\n}\n#} L3_b9 end\n#{ L3_b10 start\n#{ L3_b10_cbr1 start\nlayer { # L3_b10_cbr1_conv\n  name: \"L3_b10_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b10_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b10_cbr1_bn\n  name: \"L3_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b10_cbr1_conv_top\"\n  top: \"L3_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b10_cbr1_bn\n  name: \"L3_b10_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b10_cbr1_conv_top\"\n  top: \"L3_b10_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b10_cbr1_scale\n  name: \"L3_b10_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b10_cbr1_bn_top\"\n  top: \"L3_b10_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b10_cbr1_relu\n  name: \"L3_b10_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b10_cbr1_bn_top\"\n  top: \"L3_b10_cbr1_bn_top\"\n}\n#} L3_b10_cbr1 end\n#{ L3_b10_cbr2 start\nlayer { # L3_b10_cbr2_conv\n  name: \"L3_b10_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b10_cbr1_bn_top\"\n  top: \"L3_b10_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b10_cbr2_bn\n  name: \"L3_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b10_cbr2_conv_top\"\n  top: \"L3_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b10_cbr2_bn\n  name: \"L3_b10_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b10_cbr2_conv_top\"\n  top: \"L3_b10_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b10_cbr2_scale\n  name: \"L3_b10_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b10_cbr2_bn_top\"\n  top: \"L3_b10_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b10_cbr2 end\nlayer { # L3_b10_sum_eltwise\n  name: \"L3_b10_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b10_cbr2_bn_top\"\n  bottom: \"L3_b9_sum_eltwise_top\"\n  top: \"L3_b10_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b10_relu\n  name: \"L3_b10_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b10_sum_eltwise_top\"\n  top: \"L3_b10_sum_eltwise_top\"\n}\n#} L3_b10 end\n#{ L3_b11 start\n#{ L3_b11_cbr1 start\nlayer { # L3_b11_cbr1_conv\n  name: \"L3_b11_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b10_sum_eltwise_top\"\n  top: \"L3_b11_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b11_cbr1_bn\n  name: \"L3_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b11_cbr1_conv_top\"\n  top: \"L3_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b11_cbr1_bn\n  name: \"L3_b11_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b11_cbr1_conv_top\"\n  top: \"L3_b11_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b11_cbr1_scale\n  name: \"L3_b11_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b11_cbr1_bn_top\"\n  top: \"L3_b11_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b11_cbr1_relu\n  name: \"L3_b11_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b11_cbr1_bn_top\"\n  top: \"L3_b11_cbr1_bn_top\"\n}\n#} L3_b11_cbr1 end\n#{ L3_b11_cbr2 start\nlayer { # L3_b11_cbr2_conv\n  name: \"L3_b11_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b11_cbr1_bn_top\"\n  top: \"L3_b11_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b11_cbr2_bn\n  name: \"L3_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b11_cbr2_conv_top\"\n  top: \"L3_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b11_cbr2_bn\n  name: \"L3_b11_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b11_cbr2_conv_top\"\n  top: \"L3_b11_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b11_cbr2_scale\n  name: \"L3_b11_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b11_cbr2_bn_top\"\n  top: \"L3_b11_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b11_cbr2 end\nlayer { # L3_b11_sum_eltwise\n  name: \"L3_b11_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b11_cbr2_bn_top\"\n  bottom: \"L3_b10_sum_eltwise_top\"\n  top: \"L3_b11_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b11_relu\n  name: \"L3_b11_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b11_sum_eltwise_top\"\n  top: \"L3_b11_sum_eltwise_top\"\n}\n#} L3_b11 end\n#{ L3_b12 start\n#{ L3_b12_cbr1 start\nlayer { # L3_b12_cbr1_conv\n  name: \"L3_b12_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b11_sum_eltwise_top\"\n  top: \"L3_b12_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b12_cbr1_bn\n  name: \"L3_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b12_cbr1_conv_top\"\n  top: \"L3_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b12_cbr1_bn\n  name: \"L3_b12_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b12_cbr1_conv_top\"\n  top: \"L3_b12_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b12_cbr1_scale\n  name: \"L3_b12_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b12_cbr1_bn_top\"\n  top: \"L3_b12_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b12_cbr1_relu\n  name: \"L3_b12_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b12_cbr1_bn_top\"\n  top: \"L3_b12_cbr1_bn_top\"\n}\n#} L3_b12_cbr1 end\n#{ L3_b12_cbr2 start\nlayer { # L3_b12_cbr2_conv\n  name: \"L3_b12_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b12_cbr1_bn_top\"\n  top: \"L3_b12_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b12_cbr2_bn\n  name: \"L3_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b12_cbr2_conv_top\"\n  top: \"L3_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b12_cbr2_bn\n  name: \"L3_b12_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b12_cbr2_conv_top\"\n  top: \"L3_b12_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b12_cbr2_scale\n  name: \"L3_b12_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b12_cbr2_bn_top\"\n  top: \"L3_b12_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b12_cbr2 end\nlayer { # L3_b12_sum_eltwise\n  name: \"L3_b12_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b12_cbr2_bn_top\"\n  bottom: \"L3_b11_sum_eltwise_top\"\n  top: \"L3_b12_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b12_relu\n  name: \"L3_b12_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b12_sum_eltwise_top\"\n  top: \"L3_b12_sum_eltwise_top\"\n}\n#} L3_b12 end\n#{ L3_b13 start\n#{ L3_b13_cbr1 start\nlayer { # L3_b13_cbr1_conv\n  name: \"L3_b13_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b12_sum_eltwise_top\"\n  top: \"L3_b13_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b13_cbr1_bn\n  name: \"L3_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b13_cbr1_conv_top\"\n  top: \"L3_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b13_cbr1_bn\n  name: \"L3_b13_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b13_cbr1_conv_top\"\n  top: \"L3_b13_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b13_cbr1_scale\n  name: \"L3_b13_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b13_cbr1_bn_top\"\n  top: \"L3_b13_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b13_cbr1_relu\n  name: \"L3_b13_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b13_cbr1_bn_top\"\n  top: \"L3_b13_cbr1_bn_top\"\n}\n#} L3_b13_cbr1 end\n#{ L3_b13_cbr2 start\nlayer { # L3_b13_cbr2_conv\n  name: \"L3_b13_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b13_cbr1_bn_top\"\n  top: \"L3_b13_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b13_cbr2_bn\n  name: \"L3_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b13_cbr2_conv_top\"\n  top: \"L3_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b13_cbr2_bn\n  name: \"L3_b13_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b13_cbr2_conv_top\"\n  top: \"L3_b13_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b13_cbr2_scale\n  name: \"L3_b13_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b13_cbr2_bn_top\"\n  top: \"L3_b13_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b13_cbr2 end\nlayer { # L3_b13_sum_eltwise\n  name: \"L3_b13_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b13_cbr2_bn_top\"\n  bottom: \"L3_b12_sum_eltwise_top\"\n  top: \"L3_b13_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b13_relu\n  name: \"L3_b13_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b13_sum_eltwise_top\"\n  top: \"L3_b13_sum_eltwise_top\"\n}\n#} L3_b13 end\n#{ L3_b14 start\n#{ L3_b14_cbr1 start\nlayer { # L3_b14_cbr1_conv\n  name: \"L3_b14_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b13_sum_eltwise_top\"\n  top: \"L3_b14_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b14_cbr1_bn\n  name: \"L3_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b14_cbr1_conv_top\"\n  top: \"L3_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b14_cbr1_bn\n  name: \"L3_b14_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b14_cbr1_conv_top\"\n  top: \"L3_b14_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b14_cbr1_scale\n  name: \"L3_b14_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b14_cbr1_bn_top\"\n  top: \"L3_b14_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b14_cbr1_relu\n  name: \"L3_b14_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b14_cbr1_bn_top\"\n  top: \"L3_b14_cbr1_bn_top\"\n}\n#} L3_b14_cbr1 end\n#{ L3_b14_cbr2 start\nlayer { # L3_b14_cbr2_conv\n  name: \"L3_b14_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b14_cbr1_bn_top\"\n  top: \"L3_b14_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b14_cbr2_bn\n  name: \"L3_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b14_cbr2_conv_top\"\n  top: \"L3_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b14_cbr2_bn\n  name: \"L3_b14_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b14_cbr2_conv_top\"\n  top: \"L3_b14_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b14_cbr2_scale\n  name: \"L3_b14_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b14_cbr2_bn_top\"\n  top: \"L3_b14_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b14_cbr2 end\nlayer { # L3_b14_sum_eltwise\n  name: \"L3_b14_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b14_cbr2_bn_top\"\n  bottom: \"L3_b13_sum_eltwise_top\"\n  top: \"L3_b14_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b14_relu\n  name: \"L3_b14_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b14_sum_eltwise_top\"\n  top: \"L3_b14_sum_eltwise_top\"\n}\n#} L3_b14 end\n#{ L3_b15 start\n#{ L3_b15_cbr1 start\nlayer { # L3_b15_cbr1_conv\n  name: \"L3_b15_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b14_sum_eltwise_top\"\n  top: \"L3_b15_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b15_cbr1_bn\n  name: \"L3_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b15_cbr1_conv_top\"\n  top: \"L3_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b15_cbr1_bn\n  name: \"L3_b15_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b15_cbr1_conv_top\"\n  top: \"L3_b15_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b15_cbr1_scale\n  name: \"L3_b15_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b15_cbr1_bn_top\"\n  top: \"L3_b15_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b15_cbr1_relu\n  name: \"L3_b15_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b15_cbr1_bn_top\"\n  top: \"L3_b15_cbr1_bn_top\"\n}\n#} L3_b15_cbr1 end\n#{ L3_b15_cbr2 start\nlayer { # L3_b15_cbr2_conv\n  name: \"L3_b15_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b15_cbr1_bn_top\"\n  top: \"L3_b15_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b15_cbr2_bn\n  name: \"L3_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b15_cbr2_conv_top\"\n  top: \"L3_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b15_cbr2_bn\n  name: \"L3_b15_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b15_cbr2_conv_top\"\n  top: \"L3_b15_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b15_cbr2_scale\n  name: \"L3_b15_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b15_cbr2_bn_top\"\n  top: \"L3_b15_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b15_cbr2 end\nlayer { # L3_b15_sum_eltwise\n  name: \"L3_b15_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b15_cbr2_bn_top\"\n  bottom: \"L3_b14_sum_eltwise_top\"\n  top: \"L3_b15_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b15_relu\n  name: \"L3_b15_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b15_sum_eltwise_top\"\n  top: \"L3_b15_sum_eltwise_top\"\n}\n#} L3_b15 end\n#{ L3_b16 start\n#{ L3_b16_cbr1 start\nlayer { # L3_b16_cbr1_conv\n  name: \"L3_b16_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b15_sum_eltwise_top\"\n  top: \"L3_b16_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b16_cbr1_bn\n  name: \"L3_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b16_cbr1_conv_top\"\n  top: \"L3_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b16_cbr1_bn\n  name: \"L3_b16_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b16_cbr1_conv_top\"\n  top: \"L3_b16_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b16_cbr1_scale\n  name: \"L3_b16_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b16_cbr1_bn_top\"\n  top: \"L3_b16_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b16_cbr1_relu\n  name: \"L3_b16_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b16_cbr1_bn_top\"\n  top: \"L3_b16_cbr1_bn_top\"\n}\n#} L3_b16_cbr1 end\n#{ L3_b16_cbr2 start\nlayer { # L3_b16_cbr2_conv\n  name: \"L3_b16_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b16_cbr1_bn_top\"\n  top: \"L3_b16_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b16_cbr2_bn\n  name: \"L3_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b16_cbr2_conv_top\"\n  top: \"L3_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b16_cbr2_bn\n  name: \"L3_b16_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b16_cbr2_conv_top\"\n  top: \"L3_b16_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b16_cbr2_scale\n  name: \"L3_b16_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b16_cbr2_bn_top\"\n  top: \"L3_b16_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b16_cbr2 end\nlayer { # L3_b16_sum_eltwise\n  name: \"L3_b16_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b16_cbr2_bn_top\"\n  bottom: \"L3_b15_sum_eltwise_top\"\n  top: \"L3_b16_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b16_relu\n  name: \"L3_b16_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b16_sum_eltwise_top\"\n  top: \"L3_b16_sum_eltwise_top\"\n}\n#} L3_b16 end\n#{ L3_b17 start\n#{ L3_b17_cbr1 start\nlayer { # L3_b17_cbr1_conv\n  name: \"L3_b17_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b16_sum_eltwise_top\"\n  top: \"L3_b17_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b17_cbr1_bn\n  name: \"L3_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b17_cbr1_conv_top\"\n  top: \"L3_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b17_cbr1_bn\n  name: \"L3_b17_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b17_cbr1_conv_top\"\n  top: \"L3_b17_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b17_cbr1_scale\n  name: \"L3_b17_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b17_cbr1_bn_top\"\n  top: \"L3_b17_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b17_cbr1_relu\n  name: \"L3_b17_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b17_cbr1_bn_top\"\n  top: \"L3_b17_cbr1_bn_top\"\n}\n#} L3_b17_cbr1 end\n#{ L3_b17_cbr2 start\nlayer { # L3_b17_cbr2_conv\n  name: \"L3_b17_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b17_cbr1_bn_top\"\n  top: \"L3_b17_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b17_cbr2_bn\n  name: \"L3_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b17_cbr2_conv_top\"\n  top: \"L3_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b17_cbr2_bn\n  name: \"L3_b17_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b17_cbr2_conv_top\"\n  top: \"L3_b17_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b17_cbr2_scale\n  name: \"L3_b17_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b17_cbr2_bn_top\"\n  top: \"L3_b17_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b17_cbr2 end\nlayer { # L3_b17_sum_eltwise\n  name: \"L3_b17_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b17_cbr2_bn_top\"\n  bottom: \"L3_b16_sum_eltwise_top\"\n  top: \"L3_b17_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b17_relu\n  name: \"L3_b17_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b17_sum_eltwise_top\"\n  top: \"L3_b17_sum_eltwise_top\"\n}\n#} L3_b17 end\n#{ L3_b18 start\n#{ L3_b18_cbr1 start\nlayer { # L3_b18_cbr1_conv\n  name: \"L3_b18_cbr1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b17_sum_eltwise_top\"\n  top: \"L3_b18_cbr1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b18_cbr1_bn\n  name: \"L3_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b18_cbr1_conv_top\"\n  top: \"L3_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b18_cbr1_bn\n  name: \"L3_b18_cbr1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b18_cbr1_conv_top\"\n  top: \"L3_b18_cbr1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b18_cbr1_scale\n  name: \"L3_b18_cbr1_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b18_cbr1_bn_top\"\n  top: \"L3_b18_cbr1_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\nlayer { # L3_b18_cbr1_relu\n  name: \"L3_b18_cbr1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b18_cbr1_bn_top\"\n  top: \"L3_b18_cbr1_bn_top\"\n}\n#} L3_b18_cbr1 end\n#{ L3_b18_cbr2 start\nlayer { # L3_b18_cbr2_conv\n  name: \"L3_b18_cbr2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b18_cbr1_bn_top\"\n  top: \"L3_b18_cbr2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b18_cbr2_bn\n  name: \"L3_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b18_cbr2_conv_top\"\n  top: \"L3_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TRAIN\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b18_cbr2_bn\n  name: \"L3_b18_cbr2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b18_cbr2_conv_top\"\n  top: \"L3_b18_cbr2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  include {\n    phase: TEST\n  }\n  batch_norm_param {\n    use_global_stats: true\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b18_cbr2_scale\n  name: \"L3_b18_cbr2_scale\"\n  type: \"Scale\"\n  bottom: \"L3_b18_cbr2_bn_top\"\n  top: \"L3_b18_cbr2_bn_top\"\n  scale_param {\n    bias_term: true\n  }\n}\n#} L3_b18_cbr2 end\nlayer { # L3_b18_sum_eltwise\n  name: \"L3_b18_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b18_cbr2_bn_top\"\n  bottom: \"L3_b17_sum_eltwise_top\"\n  top: \"L3_b18_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\nlayer { # L3_b18_relu\n  name: \"L3_b18_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b18_sum_eltwise_top\"\n  top: \"L3_b18_sum_eltwise_top\"\n}\n#} L3_b18 end\n#} L3 end\nlayer { # post_pool\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"L3_b18_sum_eltwise_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer { # post_FC\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n\t  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # accuracy\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer { # loss\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\n"
  },
  {
    "path": "architectures/bottleneckResnet56.prototxt",
    "content": "name: \"Cifar-Resnet\"\nlayer { # TRAIN data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TRAIN\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_train_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # TEST data layer\n  name: \"dataLayer\"\n  type: \"Data\"\n  top: \"data_top\"\n  top: \"label\"\n  include {\n    phase: TEST\n  }\n  transform_param {\n    mirror: true\n    crop_size: 32\n    mean_file: \"examples/cifar10/mean.binaryproto\"\n  }\n  data_param {\n    source: \"examples/cifar10/cifar10_test_lmdb\"\n    batch_size: 100\n    backend: LMDB\n  }\n  image_data_param {\n  shuffle: true\n  }\n}\nlayer { # pre_conv\n  name: \"pre_conv\"\n  type: \"Convolution\"\n  bottom: \"data_top\"\n  top: \"pre_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#{ L1 start\n#{ L1_b1 start\n#{ L1_b1_brc1 start\nlayer { # L1_b1_brc1_bn\n  name: \"L1_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b1_brc1_relu\n  name: \"L1_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_bn_top\"\n}\nlayer { # L1_b1_brc1_conv\n  name: \"L1_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc1_bn_top\"\n  top: \"L1_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b1_brc1 end\n#{ L1_b1_brc2 start\nlayer { # L1_b1_brc2_bn\n  name: \"L1_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc1_conv_top\"\n  top: \"L1_b1_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b1_brc2_relu\n  name: \"L1_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_bn_top\"\n}\nlayer { # L1_b1_brc2_conv\n  name: \"L1_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc2_bn_top\"\n  top: \"L1_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b1_brc2 end\n#{ L1_b1_brc3 start\nlayer { # L1_b1_brc3_bn\n  name: \"L1_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_brc2_conv_top\"\n  top: \"L1_b1_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b1_brc3_relu\n  name: \"L1_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_bn_top\"\n}\nlayer { # L1_b1_brc3_conv\n  name: \"L1_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b1_brc3_bn_top\"\n  top: \"L1_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b1_brc3 end\nlayer { # L1_b1_chanInc_conv\n  name: \"L1_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"pre_conv_top\"\n  top: \"L1_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L1_b1_sum_eltwise\n  name: \"L1_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b1_brc3_conv_top\"\n  bottom: \"L1_b1_chanInc_conv_top\"\n  top: \"L1_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L1_b1 end\n#{ L1_b2 start\n#{ L1_b2_brc1 start\nlayer { # L1_b2_brc1_bn\n  name: \"L1_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b2_brc1_relu\n  name: \"L1_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_bn_top\"\n}\nlayer { # L1_b2_brc1_conv\n  name: \"L1_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc1_bn_top\"\n  top: \"L1_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b2_brc1 end\n#{ L1_b2_brc2 start\nlayer { # L1_b2_brc2_bn\n  name: \"L1_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc1_conv_top\"\n  top: \"L1_b2_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b2_brc2_relu\n  name: \"L1_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_bn_top\"\n}\nlayer { # L1_b2_brc2_conv\n  name: \"L1_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc2_bn_top\"\n  top: \"L1_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b2_brc2 end\n#{ L1_b2_brc3 start\nlayer { # L1_b2_brc3_bn\n  name: \"L1_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_brc2_conv_top\"\n  top: \"L1_b2_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b2_brc3_relu\n  name: \"L1_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_bn_top\"\n}\nlayer { # L1_b2_brc3_conv\n  name: \"L1_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b2_brc3_bn_top\"\n  top: \"L1_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b2_brc3 end\nlayer { # L1_b2_sum_eltwise\n  name: \"L1_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b2_brc3_conv_top\"\n  bottom: \"L1_b1_sum_eltwise_top\"\n  top: \"L1_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L1_b2 end\n#{ L1_b3 start\n#{ L1_b3_brc1 start\nlayer { # L1_b3_brc1_bn\n  name: \"L1_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b3_brc1_relu\n  name: \"L1_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_bn_top\"\n}\nlayer { # L1_b3_brc1_conv\n  name: \"L1_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc1_bn_top\"\n  top: \"L1_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b3_brc1 end\n#{ L1_b3_brc2 start\nlayer { # L1_b3_brc2_bn\n  name: \"L1_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc1_conv_top\"\n  top: \"L1_b3_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b3_brc2_relu\n  name: \"L1_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_bn_top\"\n}\nlayer { # L1_b3_brc2_conv\n  name: \"L1_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc2_bn_top\"\n  top: \"L1_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b3_brc2 end\n#{ L1_b3_brc3 start\nlayer { # L1_b3_brc3_bn\n  name: \"L1_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_brc2_conv_top\"\n  top: \"L1_b3_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b3_brc3_relu\n  name: \"L1_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_bn_top\"\n}\nlayer { # L1_b3_brc3_conv\n  name: \"L1_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b3_brc3_bn_top\"\n  top: \"L1_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b3_brc3 end\nlayer { # L1_b3_sum_eltwise\n  name: \"L1_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b3_brc3_conv_top\"\n  bottom: \"L1_b2_sum_eltwise_top\"\n  top: \"L1_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L1_b3 end\n#{ L1_b4 start\n#{ L1_b4_brc1 start\nlayer { # L1_b4_brc1_bn\n  name: \"L1_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b4_brc1_relu\n  name: \"L1_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_bn_top\"\n}\nlayer { # L1_b4_brc1_conv\n  name: \"L1_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc1_bn_top\"\n  top: \"L1_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b4_brc1 end\n#{ L1_b4_brc2 start\nlayer { # L1_b4_brc2_bn\n  name: \"L1_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc1_conv_top\"\n  top: \"L1_b4_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b4_brc2_relu\n  name: \"L1_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_bn_top\"\n}\nlayer { # L1_b4_brc2_conv\n  name: \"L1_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc2_bn_top\"\n  top: \"L1_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b4_brc2 end\n#{ L1_b4_brc3 start\nlayer { # L1_b4_brc3_bn\n  name: \"L1_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_brc2_conv_top\"\n  top: \"L1_b4_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b4_brc3_relu\n  name: \"L1_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_bn_top\"\n}\nlayer { # L1_b4_brc3_conv\n  name: \"L1_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b4_brc3_bn_top\"\n  top: \"L1_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b4_brc3 end\nlayer { # L1_b4_sum_eltwise\n  name: \"L1_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b4_brc3_conv_top\"\n  bottom: \"L1_b3_sum_eltwise_top\"\n  top: \"L1_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L1_b4 end\n#{ L1_b5 start\n#{ L1_b5_brc1 start\nlayer { # L1_b5_brc1_bn\n  name: \"L1_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b5_brc1_relu\n  name: \"L1_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_bn_top\"\n}\nlayer { # L1_b5_brc1_conv\n  name: \"L1_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc1_bn_top\"\n  top: \"L1_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b5_brc1 end\n#{ L1_b5_brc2 start\nlayer { # L1_b5_brc2_bn\n  name: \"L1_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc1_conv_top\"\n  top: \"L1_b5_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b5_brc2_relu\n  name: \"L1_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_bn_top\"\n}\nlayer { # L1_b5_brc2_conv\n  name: \"L1_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc2_bn_top\"\n  top: \"L1_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b5_brc2 end\n#{ L1_b5_brc3 start\nlayer { # L1_b5_brc3_bn\n  name: \"L1_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_brc2_conv_top\"\n  top: \"L1_b5_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b5_brc3_relu\n  name: \"L1_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_bn_top\"\n}\nlayer { # L1_b5_brc3_conv\n  name: \"L1_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b5_brc3_bn_top\"\n  top: \"L1_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b5_brc3 end\nlayer { # L1_b5_sum_eltwise\n  name: \"L1_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b5_brc3_conv_top\"\n  bottom: \"L1_b4_sum_eltwise_top\"\n  top: \"L1_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L1_b5 end\n#{ L1_b6 start\n#{ L1_b6_brc1 start\nlayer { # L1_b6_brc1_bn\n  name: \"L1_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b6_brc1_relu\n  name: \"L1_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_bn_top\"\n}\nlayer { # L1_b6_brc1_conv\n  name: \"L1_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc1_bn_top\"\n  top: \"L1_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b6_brc1 end\n#{ L1_b6_brc2 start\nlayer { # L1_b6_brc2_bn\n  name: \"L1_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc1_conv_top\"\n  top: \"L1_b6_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b6_brc2_relu\n  name: \"L1_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_bn_top\"\n}\nlayer { # L1_b6_brc2_conv\n  name: \"L1_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc2_bn_top\"\n  top: \"L1_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 16\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b6_brc2 end\n#{ L1_b6_brc3 start\nlayer { # L1_b6_brc3_bn\n  name: \"L1_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_brc2_conv_top\"\n  top: \"L1_b6_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L1_b6_brc3_relu\n  name: \"L1_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_bn_top\"\n}\nlayer { # L1_b6_brc3_conv\n  name: \"L1_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_brc3_bn_top\"\n  top: \"L1_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L1_b6_brc3 end\nlayer { # L1_b6_sum_eltwise\n  name: \"L1_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L1_b6_brc3_conv_top\"\n  bottom: \"L1_b5_sum_eltwise_top\"\n  top: \"L1_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L1_b6 end\n#} L1 end\n#{ L2 start\n#{ L2_b1 start\n#{ L2_b1_brc1 start\nlayer { # L2_b1_brc1_bn\n  name: \"L2_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b1_brc1_relu\n  name: \"L2_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_bn_top\"\n}\nlayer { # L2_b1_brc1_conv\n  name: \"L2_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc1_bn_top\"\n  top: \"L2_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b1_brc1 end\n#{ L2_b1_brc2 start\nlayer { # L2_b1_brc2_bn\n  name: \"L2_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc1_conv_top\"\n  top: \"L2_b1_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b1_brc2_relu\n  name: \"L2_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_bn_top\"\n}\nlayer { # L2_b1_brc2_conv\n  name: \"L2_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc2_bn_top\"\n  top: \"L2_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b1_brc2 end\n#{ L2_b1_brc3 start\nlayer { # L2_b1_brc3_bn\n  name: \"L2_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_brc2_conv_top\"\n  top: \"L2_b1_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b1_brc3_relu\n  name: \"L2_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_bn_top\"\n}\nlayer { # L2_b1_brc3_conv\n  name: \"L2_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b1_brc3_bn_top\"\n  top: \"L2_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b1_brc3 end\nlayer { # L2_b1_chanInc_conv\n  name: \"L2_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L1_b6_sum_eltwise_top\"\n  top: \"L2_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L2_b1_sum_eltwise\n  name: \"L2_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b1_brc3_conv_top\"\n  bottom: \"L2_b1_chanInc_conv_top\"\n  top: \"L2_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L2_b1 end\n#{ L2_b2 start\n#{ L2_b2_brc1 start\nlayer { # L2_b2_brc1_bn\n  name: \"L2_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b2_brc1_relu\n  name: \"L2_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_bn_top\"\n}\nlayer { # L2_b2_brc1_conv\n  name: \"L2_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc1_bn_top\"\n  top: \"L2_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b2_brc1 end\n#{ L2_b2_brc2 start\nlayer { # L2_b2_brc2_bn\n  name: \"L2_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc1_conv_top\"\n  top: \"L2_b2_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b2_brc2_relu\n  name: \"L2_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_bn_top\"\n}\nlayer { # L2_b2_brc2_conv\n  name: \"L2_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc2_bn_top\"\n  top: \"L2_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b2_brc2 end\n#{ L2_b2_brc3 start\nlayer { # L2_b2_brc3_bn\n  name: \"L2_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_brc2_conv_top\"\n  top: \"L2_b2_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b2_brc3_relu\n  name: \"L2_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_bn_top\"\n}\nlayer { # L2_b2_brc3_conv\n  name: \"L2_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b2_brc3_bn_top\"\n  top: \"L2_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b2_brc3 end\nlayer { # L2_b2_sum_eltwise\n  name: \"L2_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b2_brc3_conv_top\"\n  bottom: \"L2_b1_sum_eltwise_top\"\n  top: \"L2_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L2_b2 end\n#{ L2_b3 start\n#{ L2_b3_brc1 start\nlayer { # L2_b3_brc1_bn\n  name: \"L2_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b3_brc1_relu\n  name: \"L2_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_bn_top\"\n}\nlayer { # L2_b3_brc1_conv\n  name: \"L2_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc1_bn_top\"\n  top: \"L2_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b3_brc1 end\n#{ L2_b3_brc2 start\nlayer { # L2_b3_brc2_bn\n  name: \"L2_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc1_conv_top\"\n  top: \"L2_b3_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b3_brc2_relu\n  name: \"L2_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_bn_top\"\n}\nlayer { # L2_b3_brc2_conv\n  name: \"L2_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc2_bn_top\"\n  top: \"L2_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b3_brc2 end\n#{ L2_b3_brc3 start\nlayer { # L2_b3_brc3_bn\n  name: \"L2_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_brc2_conv_top\"\n  top: \"L2_b3_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b3_brc3_relu\n  name: \"L2_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_bn_top\"\n}\nlayer { # L2_b3_brc3_conv\n  name: \"L2_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b3_brc3_bn_top\"\n  top: \"L2_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b3_brc3 end\nlayer { # L2_b3_sum_eltwise\n  name: \"L2_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b3_brc3_conv_top\"\n  bottom: \"L2_b2_sum_eltwise_top\"\n  top: \"L2_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L2_b3 end\n#{ L2_b4 start\n#{ L2_b4_brc1 start\nlayer { # L2_b4_brc1_bn\n  name: \"L2_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b4_brc1_relu\n  name: \"L2_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_bn_top\"\n}\nlayer { # L2_b4_brc1_conv\n  name: \"L2_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc1_bn_top\"\n  top: \"L2_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b4_brc1 end\n#{ L2_b4_brc2 start\nlayer { # L2_b4_brc2_bn\n  name: \"L2_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc1_conv_top\"\n  top: \"L2_b4_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b4_brc2_relu\n  name: \"L2_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_bn_top\"\n}\nlayer { # L2_b4_brc2_conv\n  name: \"L2_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc2_bn_top\"\n  top: \"L2_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b4_brc2 end\n#{ L2_b4_brc3 start\nlayer { # L2_b4_brc3_bn\n  name: \"L2_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_brc2_conv_top\"\n  top: \"L2_b4_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b4_brc3_relu\n  name: \"L2_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_bn_top\"\n}\nlayer { # L2_b4_brc3_conv\n  name: \"L2_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b4_brc3_bn_top\"\n  top: \"L2_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b4_brc3 end\nlayer { # L2_b4_sum_eltwise\n  name: \"L2_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b4_brc3_conv_top\"\n  bottom: \"L2_b3_sum_eltwise_top\"\n  top: \"L2_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L2_b4 end\n#{ L2_b5 start\n#{ L2_b5_brc1 start\nlayer { # L2_b5_brc1_bn\n  name: \"L2_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b5_brc1_relu\n  name: \"L2_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_bn_top\"\n}\nlayer { # L2_b5_brc1_conv\n  name: \"L2_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc1_bn_top\"\n  top: \"L2_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b5_brc1 end\n#{ L2_b5_brc2 start\nlayer { # L2_b5_brc2_bn\n  name: \"L2_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc1_conv_top\"\n  top: \"L2_b5_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b5_brc2_relu\n  name: \"L2_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_bn_top\"\n}\nlayer { # L2_b5_brc2_conv\n  name: \"L2_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc2_bn_top\"\n  top: \"L2_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b5_brc2 end\n#{ L2_b5_brc3 start\nlayer { # L2_b5_brc3_bn\n  name: \"L2_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_brc2_conv_top\"\n  top: \"L2_b5_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b5_brc3_relu\n  name: \"L2_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_bn_top\"\n}\nlayer { # L2_b5_brc3_conv\n  name: \"L2_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b5_brc3_bn_top\"\n  top: \"L2_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b5_brc3 end\nlayer { # L2_b5_sum_eltwise\n  name: \"L2_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b5_brc3_conv_top\"\n  bottom: \"L2_b4_sum_eltwise_top\"\n  top: \"L2_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L2_b5 end\n#{ L2_b6 start\n#{ L2_b6_brc1 start\nlayer { # L2_b6_brc1_bn\n  name: \"L2_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b6_brc1_relu\n  name: \"L2_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_bn_top\"\n}\nlayer { # L2_b6_brc1_conv\n  name: \"L2_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc1_bn_top\"\n  top: \"L2_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b6_brc1 end\n#{ L2_b6_brc2 start\nlayer { # L2_b6_brc2_bn\n  name: \"L2_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc1_conv_top\"\n  top: \"L2_b6_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b6_brc2_relu\n  name: \"L2_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_bn_top\"\n}\nlayer { # L2_b6_brc2_conv\n  name: \"L2_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc2_bn_top\"\n  top: \"L2_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 32\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b6_brc2 end\n#{ L2_b6_brc3 start\nlayer { # L2_b6_brc3_bn\n  name: \"L2_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_brc2_conv_top\"\n  top: \"L2_b6_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L2_b6_brc3_relu\n  name: \"L2_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_bn_top\"\n}\nlayer { # L2_b6_brc3_conv\n  name: \"L2_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_brc3_bn_top\"\n  top: \"L2_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 128\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L2_b6_brc3 end\nlayer { # L2_b6_sum_eltwise\n  name: \"L2_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L2_b6_brc3_conv_top\"\n  bottom: \"L2_b5_sum_eltwise_top\"\n  top: \"L2_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L2_b6 end\n#} L2 end\n#{ L3 start\n#{ L3_b1 start\n#{ L3_b1_brc1 start\nlayer { # L3_b1_brc1_bn\n  name: \"L3_b1_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b1_brc1_relu\n  name: \"L3_b1_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_bn_top\"\n}\nlayer { # L3_b1_brc1_conv\n  name: \"L3_b1_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc1_bn_top\"\n  top: \"L3_b1_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b1_brc1 end\n#{ L3_b1_brc2 start\nlayer { # L3_b1_brc2_bn\n  name: \"L3_b1_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_brc1_conv_top\"\n  top: \"L3_b1_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b1_brc2_relu\n  name: \"L3_b1_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc2_bn_top\"\n  top: \"L3_b1_brc2_bn_top\"\n}\nlayer { # L3_b1_brc2_conv\n  name: \"L3_b1_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc2_bn_top\"\n  top: \"L3_b1_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b1_brc2 end\n#{ L3_b1_brc3 start\nlayer { # L3_b1_brc3_bn\n  name: \"L3_b1_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_brc2_conv_top\"\n  top: \"L3_b1_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b1_brc3_relu\n  name: \"L3_b1_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b1_brc3_bn_top\"\n  top: \"L3_b1_brc3_bn_top\"\n}\nlayer { # L3_b1_brc3_conv\n  name: \"L3_b1_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b1_brc3_bn_top\"\n  top: \"L3_b1_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 256\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b1_brc3 end\nlayer { # L3_b1_chanInc_conv\n  name: \"L3_b1_chanInc_conv\"\n  type: \"Convolution\"\n  bottom: \"L2_b6_sum_eltwise_top\"\n  top: \"L3_b1_chanInc_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 256\n    pad: 0\n    kernel_size: 1\n    stride: 2\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # L3_b1_sum_eltwise\n  name: \"L3_b1_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b1_brc3_conv_top\"\n  bottom: \"L3_b1_chanInc_conv_top\"\n  top: \"L3_b1_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L3_b1 end\n#{ L3_b2 start\n#{ L3_b2_brc1 start\nlayer { # L3_b2_brc1_bn\n  name: \"L3_b2_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b2_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b2_brc1_relu\n  name: \"L3_b2_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_brc1_bn_top\"\n  top: \"L3_b2_brc1_bn_top\"\n}\nlayer { # L3_b2_brc1_conv\n  name: \"L3_b2_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_brc1_bn_top\"\n  top: \"L3_b2_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b2_brc1 end\n#{ L3_b2_brc2 start\nlayer { # L3_b2_brc2_bn\n  name: \"L3_b2_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_brc1_conv_top\"\n  top: \"L3_b2_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b2_brc2_relu\n  name: \"L3_b2_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_brc2_bn_top\"\n  top: \"L3_b2_brc2_bn_top\"\n}\nlayer { # L3_b2_brc2_conv\n  name: \"L3_b2_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_brc2_bn_top\"\n  top: \"L3_b2_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b2_brc2 end\n#{ L3_b2_brc3 start\nlayer { # L3_b2_brc3_bn\n  name: \"L3_b2_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_brc2_conv_top\"\n  top: \"L3_b2_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b2_brc3_relu\n  name: \"L3_b2_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b2_brc3_bn_top\"\n  top: \"L3_b2_brc3_bn_top\"\n}\nlayer { # L3_b2_brc3_conv\n  name: \"L3_b2_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b2_brc3_bn_top\"\n  top: \"L3_b2_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 256\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b2_brc3 end\nlayer { # L3_b2_sum_eltwise\n  name: \"L3_b2_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b2_brc3_conv_top\"\n  bottom: \"L3_b1_sum_eltwise_top\"\n  top: \"L3_b2_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L3_b2 end\n#{ L3_b3 start\n#{ L3_b3_brc1 start\nlayer { # L3_b3_brc1_bn\n  name: \"L3_b3_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b3_brc1_relu\n  name: \"L3_b3_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_brc1_bn_top\"\n  top: \"L3_b3_brc1_bn_top\"\n}\nlayer { # L3_b3_brc1_conv\n  name: \"L3_b3_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_brc1_bn_top\"\n  top: \"L3_b3_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b3_brc1 end\n#{ L3_b3_brc2 start\nlayer { # L3_b3_brc2_bn\n  name: \"L3_b3_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_brc1_conv_top\"\n  top: \"L3_b3_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b3_brc2_relu\n  name: \"L3_b3_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_brc2_bn_top\"\n  top: \"L3_b3_brc2_bn_top\"\n}\nlayer { # L3_b3_brc2_conv\n  name: \"L3_b3_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_brc2_bn_top\"\n  top: \"L3_b3_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b3_brc2 end\n#{ L3_b3_brc3 start\nlayer { # L3_b3_brc3_bn\n  name: \"L3_b3_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_brc2_conv_top\"\n  top: \"L3_b3_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b3_brc3_relu\n  name: \"L3_b3_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b3_brc3_bn_top\"\n  top: \"L3_b3_brc3_bn_top\"\n}\nlayer { # L3_b3_brc3_conv\n  name: \"L3_b3_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b3_brc3_bn_top\"\n  top: \"L3_b3_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 256\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b3_brc3 end\nlayer { # L3_b3_sum_eltwise\n  name: \"L3_b3_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b3_brc3_conv_top\"\n  bottom: \"L3_b2_sum_eltwise_top\"\n  top: \"L3_b3_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L3_b3 end\n#{ L3_b4 start\n#{ L3_b4_brc1 start\nlayer { # L3_b4_brc1_bn\n  name: \"L3_b4_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b4_brc1_relu\n  name: \"L3_b4_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_brc1_bn_top\"\n  top: \"L3_b4_brc1_bn_top\"\n}\nlayer { # L3_b4_brc1_conv\n  name: \"L3_b4_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_brc1_bn_top\"\n  top: \"L3_b4_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b4_brc1 end\n#{ L3_b4_brc2 start\nlayer { # L3_b4_brc2_bn\n  name: \"L3_b4_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_brc1_conv_top\"\n  top: \"L3_b4_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b4_brc2_relu\n  name: \"L3_b4_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_brc2_bn_top\"\n  top: \"L3_b4_brc2_bn_top\"\n}\nlayer { # L3_b4_brc2_conv\n  name: \"L3_b4_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_brc2_bn_top\"\n  top: \"L3_b4_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b4_brc2 end\n#{ L3_b4_brc3 start\nlayer { # L3_b4_brc3_bn\n  name: \"L3_b4_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_brc2_conv_top\"\n  top: \"L3_b4_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b4_brc3_relu\n  name: \"L3_b4_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b4_brc3_bn_top\"\n  top: \"L3_b4_brc3_bn_top\"\n}\nlayer { # L3_b4_brc3_conv\n  name: \"L3_b4_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b4_brc3_bn_top\"\n  top: \"L3_b4_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 256\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b4_brc3 end\nlayer { # L3_b4_sum_eltwise\n  name: \"L3_b4_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b4_brc3_conv_top\"\n  bottom: \"L3_b3_sum_eltwise_top\"\n  top: \"L3_b4_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L3_b4 end\n#{ L3_b5 start\n#{ L3_b5_brc1 start\nlayer { # L3_b5_brc1_bn\n  name: \"L3_b5_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b5_brc1_relu\n  name: \"L3_b5_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_brc1_bn_top\"\n  top: \"L3_b5_brc1_bn_top\"\n}\nlayer { # L3_b5_brc1_conv\n  name: \"L3_b5_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_brc1_bn_top\"\n  top: \"L3_b5_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b5_brc1 end\n#{ L3_b5_brc2 start\nlayer { # L3_b5_brc2_bn\n  name: \"L3_b5_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_brc1_conv_top\"\n  top: \"L3_b5_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b5_brc2_relu\n  name: \"L3_b5_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_brc2_bn_top\"\n  top: \"L3_b5_brc2_bn_top\"\n}\nlayer { # L3_b5_brc2_conv\n  name: \"L3_b5_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_brc2_bn_top\"\n  top: \"L3_b5_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b5_brc2 end\n#{ L3_b5_brc3 start\nlayer { # L3_b5_brc3_bn\n  name: \"L3_b5_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_brc2_conv_top\"\n  top: \"L3_b5_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b5_brc3_relu\n  name: \"L3_b5_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b5_brc3_bn_top\"\n  top: \"L3_b5_brc3_bn_top\"\n}\nlayer { # L3_b5_brc3_conv\n  name: \"L3_b5_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b5_brc3_bn_top\"\n  top: \"L3_b5_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 256\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b5_brc3 end\nlayer { # L3_b5_sum_eltwise\n  name: \"L3_b5_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b5_brc3_conv_top\"\n  bottom: \"L3_b4_sum_eltwise_top\"\n  top: \"L3_b5_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L3_b5 end\n#{ L3_b6 start\n#{ L3_b6_brc1 start\nlayer { # L3_b6_brc1_bn\n  name: \"L3_b6_brc1_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_brc1_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b6_brc1_relu\n  name: \"L3_b6_brc1_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_brc1_bn_top\"\n  top: \"L3_b6_brc1_bn_top\"\n}\nlayer { # L3_b6_brc1_conv\n  name: \"L3_b6_brc1_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_brc1_bn_top\"\n  top: \"L3_b6_brc1_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b6_brc1 end\n#{ L3_b6_brc2 start\nlayer { # L3_b6_brc2_bn\n  name: \"L3_b6_brc2_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_brc1_conv_top\"\n  top: \"L3_b6_brc2_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b6_brc2_relu\n  name: \"L3_b6_brc2_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_brc2_bn_top\"\n  top: \"L3_b6_brc2_bn_top\"\n}\nlayer { # L3_b6_brc2_conv\n  name: \"L3_b6_brc2_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_brc2_bn_top\"\n  top: \"L3_b6_brc2_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 64\n    pad: 1\n    kernel_size: 3\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b6_brc2 end\n#{ L3_b6_brc3 start\nlayer { # L3_b6_brc3_bn\n  name: \"L3_b6_brc3_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_brc2_conv_top\"\n  top: \"L3_b6_brc3_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # L3_b6_brc3_relu\n  name: \"L3_b6_brc3_relu\"\n  type: \"ReLU\"\n  bottom: \"L3_b6_brc3_bn_top\"\n  top: \"L3_b6_brc3_bn_top\"\n}\nlayer { # L3_b6_brc3_conv\n  name: \"L3_b6_brc3_conv\"\n  type: \"Convolution\"\n  bottom: \"L3_b6_brc3_bn_top\"\n  top: \"L3_b6_brc3_conv_top\"\n  param {\n    lr_mult: 1\n    decay_mult: 1\n  }\n  param {\n    lr_mult: 2\n    decay_mult: 0\n  }\n  convolution_param {\n    num_output: 256\n    pad: 0\n    kernel_size: 1\n    stride: 1\n    weight_filler {\n      type: \"msra\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\n#} L3_b6_brc3 end\nlayer { # L3_b6_sum_eltwise\n  name: \"L3_b6_sum_eltwise\"\n  type: \"Eltwise\"\n  bottom: \"L3_b6_brc3_conv_top\"\n  bottom: \"L3_b5_sum_eltwise_top\"\n  top: \"L3_b6_sum_eltwise_top\"\n  eltwise_param {\n    operation: SUM\n  }\n}\n#} L3_b6 end\n#} L3 end\nlayer { # post_bn\n  name: \"post_bn\"\n  type: \"BatchNorm\"\n  bottom: \"L3_b6_sum_eltwise_top\"\n  top: \"post_bn_top\"\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  param {\n    lr_mult: 0\n    decay_mult: 0\n  }\n  batch_norm_param {\n    use_global_stats: false\n    moving_average_fraction: 0.95\n  }\n}\nlayer { # post_relu\n  name: \"post_relu\"\n  type: \"ReLU\"\n  bottom: \"post_bn_top\"\n  top: \"post_bn_top\"\n}\nlayer { # post_pool\n  name: \"post_pool\"\n  type: \"Pooling\"\n  bottom: \"post_bn_top\"\n  top: \"post_pool\"\n  pooling_param {\n    pool: AVE\n    kernel_size: 8\n    stride: 1\n  }\n}\nlayer { # post_FC\n  name: \"post_FC\"\n  type: \"InnerProduct\"\n  bottom: \"post_pool\"\n  top: \"post_FC_top\"\n  param {\n    lr_mult: 1\n  }\n  param {\n    lr_mult: 2\n  }\n\t  inner_product_param {\n    num_output: 10\n    weight_filler {\n      type: \"xavier\"\n    }\n    bias_filler {\n      type: \"constant\"\n    }\n  }\n}\nlayer { # accuracy\n  name: \"accuracy\"\n  type: \"Accuracy\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"accuracy\"\n}\nlayer { # loss\n  name: \"loss\"\n  type: \"SoftmaxWithLoss\"\n  bottom: \"post_FC_top\"\n  bottom: \"label\"\n  top: \"loss\"\n}\n"
  },
  {
    "path": "architectures/resnet56.prototxt",
    "content": "name: \"Cifar-Resnet\" \nlayer { # train data layer \n  name: \"dataLayer\" \n  type: \"Data\" \n  top: \"data\" \n  top: \"label\" \n  include { \n    phase: TRAIN \n  } \n  transform_param { \n    mirror: true \n    crop_size: 32 \n    mean_file: \"examples/cifar10/mean.binaryproto\" \n  } \n  data_param { \n    source: \"examples/cifar10/cifar10_train_lmdb\" \n#    source: \"examples/cifar10/cifar10_train10k_lmdb\" \n    batch_size: 125\n    backend: LMDB \n  } \n  image_data_param { \n  shuffle: true \n  } \n} \nlayer { # test data layer \n  name: \"dataLayer\" \n  type: \"Data\" \n  top: \"data\" \n  top: \"label\" \n  include { \n    phase: TEST \n  } \n  transform_param { \n    mirror: false \n    crop_size: 32 \n    mean_file: \"examples/cifar10/mean.binaryproto\" \n  } \n  data_param { \n    source: \"examples/cifar10/cifar10_test_lmdb\" \n    batch_size: 125 \n    backend: LMDB \n  } \n} \nlayer { # conv \n  name: \"conv\" \n  type: \"Convolution\" \n  bottom: \"data\" \n  top: \"conv\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_conv \n  name: \"batchNorm_conv\" \n  type: \"BatchNorm\" \n  bottom: \"conv\" \n  top: \"bn_conv\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_conv \n  name: \"batchNorm_conv\" \n  type: \"BatchNorm\" \n  bottom: \"conv\" \n  top: \"bn_conv\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_conv \n  name: \"scale_conv\" \n  type: \"Scale\" \n  bottom: \"bn_conv\" \n  top: \"bn_conv\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_conv \n  name: \"relu_bn_conv\" \n  type: \"ReLU\" \n  bottom: \"bn_conv\" \n  top: \"bn_conv\" \n} \nlayer { # Conv16_1 \n  name: \"Conv16_1\" \n  type: \"Convolution\" \n  bottom: \"bn_conv\" \n  top: \"Conv16_1\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_1 \n  name: \"batchNorm_Conv16_1\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_1\" \n  top: \"bn_Conv16_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_1 \n  name: \"batchNorm_Conv16_1\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_1\" \n  top: \"bn_Conv16_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_1 \n  name: \"scale_Conv16_1\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_1\" \n  top: \"bn_Conv16_1\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_1 \n  name: \"relu_bn_Conv16_1\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_1\" \n  top: \"bn_Conv16_1\" \n} \nlayer { # Conv16_1_b \n  name: \"Conv16_1_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_1\" \n  top: \"Conv16_1_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_1_b \n  name: \"batchNorm_Conv16_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_1_b\" \n  top: \"bn_Conv16_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_1_b \n  name: \"batchNorm_Conv16_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_1_b\" \n  top: \"bn_Conv16_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_1_b \n  name: \"scale_Conv16_1_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_1_b\" \n  top: \"bn_Conv16_1_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_bn_conv \n    name: \"sum_bn_conv\" \n    type: \"Eltwise\" \n    bottom: \"bn_conv\" \n    bottom: \"bn_Conv16_1_b\" \n    top: \"sum_bn_Conv16_1_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_1_b \n  name: \"relu_sum_bn_Conv16_1_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_1_b\" \n  top: \"sum_bn_Conv16_1_b\" \n} \nlayer { # Conv16_2 \n  name: \"Conv16_2\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_1_b\" \n  top: \"Conv16_2\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_2 \n  name: \"batchNorm_Conv16_2\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_2\" \n  top: \"bn_Conv16_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_2 \n  name: \"batchNorm_Conv16_2\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_2\" \n  top: \"bn_Conv16_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_2 \n  name: \"scale_Conv16_2\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_2\" \n  top: \"bn_Conv16_2\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_2 \n  name: \"relu_bn_Conv16_2\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_2\" \n  top: \"bn_Conv16_2\" \n} \nlayer { # Conv16_2_b \n  name: \"Conv16_2_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_2\" \n  top: \"Conv16_2_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_2_b \n  name: \"batchNorm_Conv16_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_2_b\" \n  top: \"bn_Conv16_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_2_b \n  name: \"batchNorm_Conv16_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_2_b\" \n  top: \"bn_Conv16_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_2_b \n  name: \"scale_Conv16_2_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_2_b\" \n  top: \"bn_Conv16_2_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_1_b \n    name: \"sum_sum_bn_Conv16_1_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_1_b\" \n    bottom: \"bn_Conv16_2_b\" \n    top: \"sum_bn_Conv16_2_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_2_b \n  name: \"relu_sum_bn_Conv16_2_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_2_b\" \n  top: \"sum_bn_Conv16_2_b\" \n} \nlayer { # Conv16_3 \n  name: \"Conv16_3\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_2_b\" \n  top: \"Conv16_3\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_3 \n  name: \"batchNorm_Conv16_3\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_3\" \n  top: \"bn_Conv16_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_3 \n  name: \"batchNorm_Conv16_3\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_3\" \n  top: \"bn_Conv16_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_3 \n  name: \"scale_Conv16_3\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_3\" \n  top: \"bn_Conv16_3\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_3 \n  name: \"relu_bn_Conv16_3\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_3\" \n  top: \"bn_Conv16_3\" \n} \nlayer { # Conv16_3_b \n  name: \"Conv16_3_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_3\" \n  top: \"Conv16_3_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_3_b \n  name: \"batchNorm_Conv16_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_3_b\" \n  top: \"bn_Conv16_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_3_b \n  name: \"batchNorm_Conv16_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_3_b\" \n  top: \"bn_Conv16_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_3_b \n  name: \"scale_Conv16_3_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_3_b\" \n  top: \"bn_Conv16_3_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_2_b \n    name: \"sum_sum_bn_Conv16_2_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_2_b\" \n    bottom: \"bn_Conv16_3_b\" \n    top: \"sum_bn_Conv16_3_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_3_b \n  name: \"relu_sum_bn_Conv16_3_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_3_b\" \n  top: \"sum_bn_Conv16_3_b\" \n} \nlayer { # Conv16_4 \n  name: \"Conv16_4\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_3_b\" \n  top: \"Conv16_4\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_4 \n  name: \"batchNorm_Conv16_4\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_4\" \n  top: \"bn_Conv16_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_4 \n  name: \"batchNorm_Conv16_4\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_4\" \n  top: \"bn_Conv16_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_4 \n  name: \"scale_Conv16_4\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_4\" \n  top: \"bn_Conv16_4\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_4 \n  name: \"relu_bn_Conv16_4\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_4\" \n  top: \"bn_Conv16_4\" \n} \nlayer { # Conv16_4_b \n  name: \"Conv16_4_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_4\" \n  top: \"Conv16_4_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_4_b \n  name: \"batchNorm_Conv16_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_4_b\" \n  top: \"bn_Conv16_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_4_b \n  name: \"batchNorm_Conv16_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_4_b\" \n  top: \"bn_Conv16_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_4_b \n  name: \"scale_Conv16_4_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_4_b\" \n  top: \"bn_Conv16_4_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_3_b \n    name: \"sum_sum_bn_Conv16_3_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_3_b\" \n    bottom: \"bn_Conv16_4_b\" \n    top: \"sum_bn_Conv16_4_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_4_b \n  name: \"relu_sum_bn_Conv16_4_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_4_b\" \n  top: \"sum_bn_Conv16_4_b\" \n} \nlayer { # Conv16_5 \n  name: \"Conv16_5\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_4_b\" \n  top: \"Conv16_5\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_5 \n  name: \"batchNorm_Conv16_5\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_5\" \n  top: \"bn_Conv16_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_5 \n  name: \"batchNorm_Conv16_5\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_5\" \n  top: \"bn_Conv16_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_5 \n  name: \"scale_Conv16_5\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_5\" \n  top: \"bn_Conv16_5\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_5 \n  name: \"relu_bn_Conv16_5\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_5\" \n  top: \"bn_Conv16_5\" \n} \nlayer { # Conv16_5_b \n  name: \"Conv16_5_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_5\" \n  top: \"Conv16_5_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_5_b \n  name: \"batchNorm_Conv16_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_5_b\" \n  top: \"bn_Conv16_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_5_b \n  name: \"batchNorm_Conv16_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_5_b\" \n  top: \"bn_Conv16_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_5_b \n  name: \"scale_Conv16_5_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_5_b\" \n  top: \"bn_Conv16_5_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_4_b \n    name: \"sum_sum_bn_Conv16_4_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_4_b\" \n    bottom: \"bn_Conv16_5_b\" \n    top: \"sum_bn_Conv16_5_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_5_b \n  name: \"relu_sum_bn_Conv16_5_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_5_b\" \n  top: \"sum_bn_Conv16_5_b\" \n} \nlayer { # Conv16_6 \n  name: \"Conv16_6\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_5_b\" \n  top: \"Conv16_6\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_6 \n  name: \"batchNorm_Conv16_6\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_6\" \n  top: \"bn_Conv16_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_6 \n  name: \"batchNorm_Conv16_6\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_6\" \n  top: \"bn_Conv16_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_6 \n  name: \"scale_Conv16_6\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_6\" \n  top: \"bn_Conv16_6\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_6 \n  name: \"relu_bn_Conv16_6\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_6\" \n  top: \"bn_Conv16_6\" \n} \nlayer { # Conv16_6_b \n  name: \"Conv16_6_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_6\" \n  top: \"Conv16_6_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_6_b \n  name: \"batchNorm_Conv16_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_6_b\" \n  top: \"bn_Conv16_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_6_b \n  name: \"batchNorm_Conv16_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_6_b\" \n  top: \"bn_Conv16_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_6_b \n  name: \"scale_Conv16_6_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_6_b\" \n  top: \"bn_Conv16_6_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_5_b \n    name: \"sum_sum_bn_Conv16_5_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_5_b\" \n    bottom: \"bn_Conv16_6_b\" \n    top: \"sum_bn_Conv16_6_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_6_b \n  name: \"relu_sum_bn_Conv16_6_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_6_b\" \n  top: \"sum_bn_Conv16_6_b\" \n} \nlayer { # Conv16_7 \n  name: \"Conv16_7\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_6_b\" \n  top: \"Conv16_7\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_7 \n  name: \"batchNorm_Conv16_7\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_7\" \n  top: \"bn_Conv16_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_7 \n  name: \"batchNorm_Conv16_7\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_7\" \n  top: \"bn_Conv16_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_7 \n  name: \"scale_Conv16_7\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_7\" \n  top: \"bn_Conv16_7\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_7 \n  name: \"relu_bn_Conv16_7\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_7\" \n  top: \"bn_Conv16_7\" \n} \nlayer { # Conv16_7_b \n  name: \"Conv16_7_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_7\" \n  top: \"Conv16_7_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_7_b \n  name: \"batchNorm_Conv16_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_7_b\" \n  top: \"bn_Conv16_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_7_b \n  name: \"batchNorm_Conv16_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_7_b\" \n  top: \"bn_Conv16_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_7_b \n  name: \"scale_Conv16_7_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_7_b\" \n  top: \"bn_Conv16_7_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_6_b \n    name: \"sum_sum_bn_Conv16_6_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_6_b\" \n    bottom: \"bn_Conv16_7_b\" \n    top: \"sum_bn_Conv16_7_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_7_b \n  name: \"relu_sum_bn_Conv16_7_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_7_b\" \n  top: \"sum_bn_Conv16_7_b\" \n} \nlayer { # Conv16_8 \n  name: \"Conv16_8\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_7_b\" \n  top: \"Conv16_8\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_8 \n  name: \"batchNorm_Conv16_8\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_8\" \n  top: \"bn_Conv16_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_8 \n  name: \"batchNorm_Conv16_8\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_8\" \n  top: \"bn_Conv16_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_8 \n  name: \"scale_Conv16_8\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_8\" \n  top: \"bn_Conv16_8\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_8 \n  name: \"relu_bn_Conv16_8\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_8\" \n  top: \"bn_Conv16_8\" \n} \nlayer { # Conv16_8_b \n  name: \"Conv16_8_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_8\" \n  top: \"Conv16_8_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_8_b \n  name: \"batchNorm_Conv16_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_8_b\" \n  top: \"bn_Conv16_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_8_b \n  name: \"batchNorm_Conv16_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_8_b\" \n  top: \"bn_Conv16_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_8_b \n  name: \"scale_Conv16_8_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_8_b\" \n  top: \"bn_Conv16_8_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_7_b \n    name: \"sum_sum_bn_Conv16_7_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_7_b\" \n    bottom: \"bn_Conv16_8_b\" \n    top: \"sum_bn_Conv16_8_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_8_b \n  name: \"relu_sum_bn_Conv16_8_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_8_b\" \n  top: \"sum_bn_Conv16_8_b\" \n} \nlayer { # Conv16_9 \n  name: \"Conv16_9\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_8_b\" \n  top: \"Conv16_9\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_9 \n  name: \"batchNorm_Conv16_9\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_9\" \n  top: \"bn_Conv16_9\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_9 \n  name: \"batchNorm_Conv16_9\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_9\" \n  top: \"bn_Conv16_9\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_9 \n  name: \"scale_Conv16_9\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_9\" \n  top: \"bn_Conv16_9\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_Conv16_9 \n  name: \"relu_bn_Conv16_9\" \n  type: \"ReLU\" \n  bottom: \"bn_Conv16_9\" \n  top: \"bn_Conv16_9\" \n} \nlayer { # Conv16_9_b \n  name: \"Conv16_9_b\" \n  type: \"Convolution\" \n  bottom: \"bn_Conv16_9\" \n  top: \"Conv16_9_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_Conv16_9_b \n  name: \"batchNorm_Conv16_9_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_9_b\" \n  top: \"bn_Conv16_9_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_Conv16_9_b \n  name: \"batchNorm_Conv16_9_b\" \n  type: \"BatchNorm\" \n  bottom: \"Conv16_9_b\" \n  top: \"bn_Conv16_9_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_Conv16_9_b \n  name: \"scale_Conv16_9_b\" \n  type: \"Scale\" \n  bottom: \"bn_Conv16_9_b\" \n  top: \"bn_Conv16_9_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_Conv16_8_b \n    name: \"sum_sum_bn_Conv16_8_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_Conv16_8_b\" \n    bottom: \"bn_Conv16_9_b\" \n    top: \"sum_bn_Conv16_9_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_Conv16_9_b \n  name: \"relu_sum_bn_Conv16_9_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_Conv16_9_b\" \n  top: \"sum_bn_Conv16_9_b\" \n} \nlayer { # resblk32 \n  name: \"resblk32\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_Conv16_9_b\" \n  top: \"resblk32\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 2 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32 \n  name: \"batchNorm_resblk32\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32\" \n  top: \"bn_resblk32\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32 \n  name: \"batchNorm_resblk32\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32\" \n  top: \"bn_resblk32\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32 \n  name: \"scale_resblk32\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32\" \n  top: \"bn_resblk32\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32 \n  name: \"relu_bn_resblk32\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32\" \n  top: \"bn_resblk32\" \n} \nlayer { # resblk32_b \n  name: \"resblk32_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32\" \n  top: \"resblk32_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 16 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_b \n  name: \"batchNorm_resblk32_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_b\" \n  top: \"bn_resblk32_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_b \n  name: \"batchNorm_resblk32_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_b\" \n  top: \"bn_resblk32_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_b \n  name: \"scale_resblk32_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_b\" \n  top: \"bn_resblk32_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # pool_resblk32 \n  name: \"avePooling_resblk32\" \n  type: \"Pooling\" \n  bottom: \"sum_bn_Conv16_9_b\" \n  top: \"avgPool_resblk32\" \n  pooling_param { \n    pool: AVE \n    kernel_size: 3 \n    stride: 2 \n  } \n} \nlayer { # sum_avgPool_resblk32 \n    name: \"sum_avgPool_resblk32\" \n    type: \"Eltwise\" \n    bottom: \"avgPool_resblk32\" \n    bottom: \"bn_resblk32_b\" \n    top: \"sum_bn_resblk32_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_b \n  name: \"relu_sum_bn_resblk32_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_b\" \n  top: \"sum_bn_resblk32_b\" \n} \nlayer { # Dummy \n  name: \"zeros_sum_bn_resblk32_b\" \n  type: \"DummyData\" \n  top: \"zeros_sum_bn_resblk32_b\" \n  dummy_data_param { \n    shape: {dim: 125  dim: 16 dim: 16  dim: 16 } \n    data_filler: { \n                type: \"constant\" \n                value: 0 \n        } \n  } \n} \nlayer { # ConCat_sum_bn_resblk32_b \n  name: \"CC_sum_bn_resblk32_b\" \n  bottom: \"sum_bn_resblk32_b\" \n  bottom: \"zeros_sum_bn_resblk32_b\" \n  top: \"CC_sum_bn_resblk32_b\" \n  type: \"Concat\" \n  concat_param { \n    axis: 1 \n  } \n} \nlayer { # resblk32_1 \n  name: \"resblk32_1\" \n  type: \"Convolution\" \n  bottom: \"CC_sum_bn_resblk32_b\" \n  top: \"resblk32_1\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_1 \n  name: \"batchNorm_resblk32_1\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_1\" \n  top: \"bn_resblk32_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_1 \n  name: \"batchNorm_resblk32_1\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_1\" \n  top: \"bn_resblk32_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_1 \n  name: \"scale_resblk32_1\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_1\" \n  top: \"bn_resblk32_1\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_1 \n  name: \"relu_bn_resblk32_1\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_1\" \n  top: \"bn_resblk32_1\" \n} \nlayer { # resblk32_1_b \n  name: \"resblk32_1_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_1\" \n  top: \"resblk32_1_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_1_b \n  name: \"batchNorm_resblk32_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_1_b\" \n  top: \"bn_resblk32_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_1_b \n  name: \"batchNorm_resblk32_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_1_b\" \n  top: \"bn_resblk32_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_1_b \n  name: \"scale_resblk32_1_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_1_b\" \n  top: \"bn_resblk32_1_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_CC_sum_bn_resblk32_b \n    name: \"sum_CC_sum_bn_resblk32_b\" \n    type: \"Eltwise\" \n    bottom: \"CC_sum_bn_resblk32_b\" \n    bottom: \"bn_resblk32_1_b\" \n    top: \"sum_bn_resblk32_1_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_1_b \n  name: \"relu_sum_bn_resblk32_1_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_1_b\" \n  top: \"sum_bn_resblk32_1_b\" \n} \nlayer { # resblk32_2 \n  name: \"resblk32_2\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_1_b\" \n  top: \"resblk32_2\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_2 \n  name: \"batchNorm_resblk32_2\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_2\" \n  top: \"bn_resblk32_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_2 \n  name: \"batchNorm_resblk32_2\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_2\" \n  top: \"bn_resblk32_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_2 \n  name: \"scale_resblk32_2\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_2\" \n  top: \"bn_resblk32_2\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_2 \n  name: \"relu_bn_resblk32_2\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_2\" \n  top: \"bn_resblk32_2\" \n} \nlayer { # resblk32_2_b \n  name: \"resblk32_2_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_2\" \n  top: \"resblk32_2_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_2_b \n  name: \"batchNorm_resblk32_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_2_b\" \n  top: \"bn_resblk32_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_2_b \n  name: \"batchNorm_resblk32_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_2_b\" \n  top: \"bn_resblk32_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true    \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_2_b \n  name: \"scale_resblk32_2_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_2_b\" \n  top: \"bn_resblk32_2_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_1_b \n    name: \"sum_sum_bn_resblk32_1_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_1_b\" \n    bottom: \"bn_resblk32_2_b\" \n    top: \"sum_bn_resblk32_2_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_2_b \n  name: \"relu_sum_bn_resblk32_2_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_2_b\" \n  top: \"sum_bn_resblk32_2_b\" \n} \nlayer { # resblk32_3 \n  name: \"resblk32_3\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_2_b\" \n  top: \"resblk32_3\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_3 \n  name: \"batchNorm_resblk32_3\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_3\" \n  top: \"bn_resblk32_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_3 \n  name: \"batchNorm_resblk32_3\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_3\" \n  top: \"bn_resblk32_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true   \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_3 \n  name: \"scale_resblk32_3\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_3\" \n  top: \"bn_resblk32_3\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_3 \n  name: \"relu_bn_resblk32_3\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_3\" \n  top: \"bn_resblk32_3\" \n} \nlayer { # resblk32_3_b \n  name: \"resblk32_3_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_3\" \n  top: \"resblk32_3_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_3_b \n  name: \"batchNorm_resblk32_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_3_b\" \n  top: \"bn_resblk32_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_3_b \n  name: \"batchNorm_resblk32_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_3_b\" \n  top: \"bn_resblk32_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_3_b \n  name: \"scale_resblk32_3_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_3_b\" \n  top: \"bn_resblk32_3_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_2_b \n    name: \"sum_sum_bn_resblk32_2_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_2_b\" \n    bottom: \"bn_resblk32_3_b\" \n    top: \"sum_bn_resblk32_3_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_3_b \n  name: \"relu_sum_bn_resblk32_3_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_3_b\" \n  top: \"sum_bn_resblk32_3_b\" \n} \nlayer { # resblk32_4 \n  name: \"resblk32_4\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_3_b\" \n  top: \"resblk32_4\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_4 \n  name: \"batchNorm_resblk32_4\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_4\" \n  top: \"bn_resblk32_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_4 \n  name: \"batchNorm_resblk32_4\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_4\" \n  top: \"bn_resblk32_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_4 \n  name: \"scale_resblk32_4\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_4\" \n  top: \"bn_resblk32_4\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_4 \n  name: \"relu_bn_resblk32_4\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_4\" \n  top: \"bn_resblk32_4\" \n} \nlayer { # resblk32_4_b \n  name: \"resblk32_4_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_4\" \n  top: \"resblk32_4_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_4_b \n  name: \"batchNorm_resblk32_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_4_b\" \n  top: \"bn_resblk32_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_4_b \n  name: \"batchNorm_resblk32_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_4_b\" \n  top: \"bn_resblk32_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_4_b \n  name: \"scale_resblk32_4_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_4_b\" \n  top: \"bn_resblk32_4_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_3_b \n    name: \"sum_sum_bn_resblk32_3_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_3_b\" \n    bottom: \"bn_resblk32_4_b\" \n    top: \"sum_bn_resblk32_4_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_4_b \n  name: \"relu_sum_bn_resblk32_4_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_4_b\" \n  top: \"sum_bn_resblk32_4_b\" \n} \nlayer { # resblk32_5 \n  name: \"resblk32_5\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_4_b\" \n  top: \"resblk32_5\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_5 \n  name: \"batchNorm_resblk32_5\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_5\" \n  top: \"bn_resblk32_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_5 \n  name: \"batchNorm_resblk32_5\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_5\" \n  top: \"bn_resblk32_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_5 \n  name: \"scale_resblk32_5\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_5\" \n  top: \"bn_resblk32_5\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_5 \n  name: \"relu_bn_resblk32_5\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_5\" \n  top: \"bn_resblk32_5\" \n} \nlayer { # resblk32_5_b \n  name: \"resblk32_5_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_5\" \n  top: \"resblk32_5_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_5_b \n  name: \"batchNorm_resblk32_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_5_b\" \n  top: \"bn_resblk32_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_5_b \n  name: \"batchNorm_resblk32_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_5_b\" \n  top: \"bn_resblk32_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_5_b \n  name: \"scale_resblk32_5_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_5_b\" \n  top: \"bn_resblk32_5_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_4_b \n    name: \"sum_sum_bn_resblk32_4_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_4_b\" \n    bottom: \"bn_resblk32_5_b\" \n    top: \"sum_bn_resblk32_5_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_5_b \n  name: \"relu_sum_bn_resblk32_5_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_5_b\" \n  top: \"sum_bn_resblk32_5_b\" \n} \nlayer { # resblk32_6 \n  name: \"resblk32_6\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_5_b\" \n  top: \"resblk32_6\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_6 \n  name: \"batchNorm_resblk32_6\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_6\" \n  top: \"bn_resblk32_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_6 \n  name: \"batchNorm_resblk32_6\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_6\" \n  top: \"bn_resblk32_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_6 \n  name: \"scale_resblk32_6\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_6\" \n  top: \"bn_resblk32_6\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_6 \n  name: \"relu_bn_resblk32_6\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_6\" \n  top: \"bn_resblk32_6\" \n} \nlayer { # resblk32_6_b \n  name: \"resblk32_6_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_6\" \n  top: \"resblk32_6_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_6_b \n  name: \"batchNorm_resblk32_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_6_b\" \n  top: \"bn_resblk32_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_6_b \n  name: \"batchNorm_resblk32_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_6_b\" \n  top: \"bn_resblk32_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_6_b \n  name: \"scale_resblk32_6_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_6_b\" \n  top: \"bn_resblk32_6_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_5_b \n    name: \"sum_sum_bn_resblk32_5_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_5_b\" \n    bottom: \"bn_resblk32_6_b\" \n    top: \"sum_bn_resblk32_6_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_6_b \n  name: \"relu_sum_bn_resblk32_6_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_6_b\" \n  top: \"sum_bn_resblk32_6_b\" \n} \nlayer { # resblk32_7 \n  name: \"resblk32_7\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_6_b\" \n  top: \"resblk32_7\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_7 \n  name: \"batchNorm_resblk32_7\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_7\" \n  top: \"bn_resblk32_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_7 \n  name: \"batchNorm_resblk32_7\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_7\" \n  top: \"bn_resblk32_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_7 \n  name: \"scale_resblk32_7\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_7\" \n  top: \"bn_resblk32_7\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_7 \n  name: \"relu_bn_resblk32_7\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_7\" \n  top: \"bn_resblk32_7\" \n} \nlayer { # resblk32_7_b \n  name: \"resblk32_7_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_7\" \n  top: \"resblk32_7_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_7_b \n  name: \"batchNorm_resblk32_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_7_b\" \n  top: \"bn_resblk32_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_7_b \n  name: \"batchNorm_resblk32_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_7_b\" \n  top: \"bn_resblk32_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_7_b \n  name: \"scale_resblk32_7_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_7_b\" \n  top: \"bn_resblk32_7_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_6_b \n    name: \"sum_sum_bn_resblk32_6_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_6_b\" \n    bottom: \"bn_resblk32_7_b\" \n    top: \"sum_bn_resblk32_7_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_7_b \n  name: \"relu_sum_bn_resblk32_7_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_7_b\" \n  top: \"sum_bn_resblk32_7_b\" \n} \nlayer { # resblk32_8 \n  name: \"resblk32_8\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_7_b\" \n  top: \"resblk32_8\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_8 \n  name: \"batchNorm_resblk32_8\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_8\" \n  top: \"bn_resblk32_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_8 \n  name: \"batchNorm_resblk32_8\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_8\" \n  top: \"bn_resblk32_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_8 \n  name: \"scale_resblk32_8\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_8\" \n  top: \"bn_resblk32_8\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk32_8 \n  name: \"relu_bn_resblk32_8\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk32_8\" \n  top: \"bn_resblk32_8\" \n} \nlayer { # resblk32_8_b \n  name: \"resblk32_8_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk32_8\" \n  top: \"resblk32_8_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk32_8_b \n  name: \"batchNorm_resblk32_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_8_b\" \n  top: \"bn_resblk32_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk32_8_b \n  name: \"batchNorm_resblk32_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk32_8_b\" \n  top: \"bn_resblk32_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk32_8_b \n  name: \"scale_resblk32_8_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk32_8_b\" \n  top: \"bn_resblk32_8_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk32_7_b \n    name: \"sum_sum_bn_resblk32_7_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk32_7_b\" \n    bottom: \"bn_resblk32_8_b\" \n    top: \"sum_bn_resblk32_8_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk32_8_b \n  name: \"relu_sum_bn_resblk32_8_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk32_8_b\" \n  top: \"sum_bn_resblk32_8_b\" \n} \nlayer { # resblk64 \n  name: \"resblk64\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk32_8_b\" \n  top: \"resblk64\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 2 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64 \n  name: \"batchNorm_resblk64\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64\" \n  top: \"bn_resblk64\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64 \n  name: \"batchNorm_resblk64\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64\" \n  top: \"bn_resblk64\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64 \n  name: \"scale_resblk64\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64\" \n  top: \"bn_resblk64\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64 \n  name: \"relu_bn_resblk64\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64\" \n  top: \"bn_resblk64\" \n} \nlayer { # resblk64_b \n  name: \"resblk64_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64\" \n  top: \"resblk64_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 32 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_b \n  name: \"batchNorm_resblk64_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_b\" \n  top: \"bn_resblk64_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_b \n  name: \"batchNorm_resblk64_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_b\" \n  top: \"bn_resblk64_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_b \n  name: \"scale_resblk64_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_b\" \n  top: \"bn_resblk64_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # pool_resblk64 \n  name: \"avePooling_resblk64\" \n  type: \"Pooling\" \n  bottom: \"sum_bn_resblk32_8_b\" \n  top: \"avgPool_resblk64\" \n  pooling_param { \n    pool: AVE \n    kernel_size: 3 \n    stride: 2 \n  } \n} \nlayer { # sum_avgPool_resblk64 \n    name: \"sum_avgPool_resblk64\" \n    type: \"Eltwise\" \n    bottom: \"avgPool_resblk64\" \n    bottom: \"bn_resblk64_b\" \n    top: \"sum_bn_resblk64_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_b \n  name: \"relu_sum_bn_resblk64_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_b\" \n  top: \"sum_bn_resblk64_b\" \n} \nlayer { # Dummy \n  name: \"zeros_sum_bn_resblk64_b\" \n  type: \"DummyData\" \n  top: \"zeros_sum_bn_resblk64_b\" \n  dummy_data_param { \n    shape: {dim: 125  dim: 32 dim: 8  dim: 8 } \n    data_filler: { \n                type: \"constant\" \n                value: 0 \n        } \n  } \n} \nlayer { # ConCat_sum_bn_resblk64_b \n  name: \"CC_sum_bn_resblk64_b\" \n  bottom: \"sum_bn_resblk64_b\" \n  bottom: \"zeros_sum_bn_resblk64_b\" \n  top: \"CC_sum_bn_resblk64_b\" \n  type: \"Concat\" \n  concat_param { \n    axis: 1 \n  } \n} \nlayer { # resblk64_1 \n  name: \"resblk64_1\" \n  type: \"Convolution\" \n  bottom: \"CC_sum_bn_resblk64_b\" \n  top: \"resblk64_1\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_1 \n  name: \"batchNorm_resblk64_1\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_1\" \n  top: \"bn_resblk64_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_1 \n  name: \"batchNorm_resblk64_1\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_1\" \n  top: \"bn_resblk64_1\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_1 \n  name: \"scale_resblk64_1\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_1\" \n  top: \"bn_resblk64_1\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_1 \n  name: \"relu_bn_resblk64_1\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_1\" \n  top: \"bn_resblk64_1\" \n} \nlayer { # resblk64_1_b \n  name: \"resblk64_1_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_1\" \n  top: \"resblk64_1_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_1_b \n  name: \"batchNorm_resblk64_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_1_b\" \n  top: \"bn_resblk64_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_1_b \n  name: \"batchNorm_resblk64_1_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_1_b\" \n  top: \"bn_resblk64_1_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_1_b \n  name: \"scale_resblk64_1_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_1_b\" \n  top: \"bn_resblk64_1_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_CC_sum_bn_resblk64_b \n    name: \"sum_CC_sum_bn_resblk64_b\" \n    type: \"Eltwise\" \n    bottom: \"CC_sum_bn_resblk64_b\" \n    bottom: \"bn_resblk64_1_b\" \n    top: \"sum_bn_resblk64_1_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_1_b \n  name: \"relu_sum_bn_resblk64_1_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_1_b\" \n  top: \"sum_bn_resblk64_1_b\" \n} \nlayer { # resblk64_2 \n  name: \"resblk64_2\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_1_b\" \n  top: \"resblk64_2\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_2 \n  name: \"batchNorm_resblk64_2\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_2\" \n  top: \"bn_resblk64_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_2 \n  name: \"batchNorm_resblk64_2\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_2\" \n  top: \"bn_resblk64_2\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_2 \n  name: \"scale_resblk64_2\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_2\" \n  top: \"bn_resblk64_2\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_2 \n  name: \"relu_bn_resblk64_2\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_2\" \n  top: \"bn_resblk64_2\" \n} \nlayer { # resblk64_2_b \n  name: \"resblk64_2_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_2\" \n  top: \"resblk64_2_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_2_b \n  name: \"batchNorm_resblk64_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_2_b\" \n  top: \"bn_resblk64_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_2_b \n  name: \"batchNorm_resblk64_2_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_2_b\" \n  top: \"bn_resblk64_2_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_2_b \n  name: \"scale_resblk64_2_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_2_b\" \n  top: \"bn_resblk64_2_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_1_b \n    name: \"sum_sum_bn_resblk64_1_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_1_b\" \n    bottom: \"bn_resblk64_2_b\" \n    top: \"sum_bn_resblk64_2_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_2_b \n  name: \"relu_sum_bn_resblk64_2_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_2_b\" \n  top: \"sum_bn_resblk64_2_b\" \n} \nlayer { # resblk64_3 \n  name: \"resblk64_3\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_2_b\" \n  top: \"resblk64_3\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_3 \n  name: \"batchNorm_resblk64_3\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_3\" \n  top: \"bn_resblk64_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_3 \n  name: \"batchNorm_resblk64_3\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_3\" \n  top: \"bn_resblk64_3\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_3 \n  name: \"scale_resblk64_3\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_3\" \n  top: \"bn_resblk64_3\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_3 \n  name: \"relu_bn_resblk64_3\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_3\" \n  top: \"bn_resblk64_3\" \n} \nlayer { # resblk64_3_b \n  name: \"resblk64_3_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_3\" \n  top: \"resblk64_3_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_3_b \n  name: \"batchNorm_resblk64_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_3_b\" \n  top: \"bn_resblk64_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_3_b \n  name: \"batchNorm_resblk64_3_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_3_b\" \n  top: \"bn_resblk64_3_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_3_b \n  name: \"scale_resblk64_3_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_3_b\" \n  top: \"bn_resblk64_3_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_2_b \n    name: \"sum_sum_bn_resblk64_2_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_2_b\" \n    bottom: \"bn_resblk64_3_b\" \n    top: \"sum_bn_resblk64_3_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_3_b \n  name: \"relu_sum_bn_resblk64_3_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_3_b\" \n  top: \"sum_bn_resblk64_3_b\" \n} \nlayer { # resblk64_4 \n  name: \"resblk64_4\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_3_b\" \n  top: \"resblk64_4\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_4 \n  name: \"batchNorm_resblk64_4\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_4\" \n  top: \"bn_resblk64_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_4 \n  name: \"batchNorm_resblk64_4\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_4\" \n  top: \"bn_resblk64_4\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_4 \n  name: \"scale_resblk64_4\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_4\" \n  top: \"bn_resblk64_4\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_4 \n  name: \"relu_bn_resblk64_4\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_4\" \n  top: \"bn_resblk64_4\" \n} \nlayer { # resblk64_4_b \n  name: \"resblk64_4_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_4\" \n  top: \"resblk64_4_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_4_b \n  name: \"batchNorm_resblk64_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_4_b\" \n  top: \"bn_resblk64_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_4_b \n  name: \"batchNorm_resblk64_4_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_4_b\" \n  top: \"bn_resblk64_4_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_4_b \n  name: \"scale_resblk64_4_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_4_b\" \n  top: \"bn_resblk64_4_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_3_b \n    name: \"sum_sum_bn_resblk64_3_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_3_b\" \n    bottom: \"bn_resblk64_4_b\" \n    top: \"sum_bn_resblk64_4_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_4_b \n  name: \"relu_sum_bn_resblk64_4_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_4_b\" \n  top: \"sum_bn_resblk64_4_b\" \n} \nlayer { # resblk64_5 \n  name: \"resblk64_5\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_4_b\" \n  top: \"resblk64_5\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_5 \n  name: \"batchNorm_resblk64_5\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_5\" \n  top: \"bn_resblk64_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_5 \n  name: \"batchNorm_resblk64_5\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_5\" \n  top: \"bn_resblk64_5\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_5 \n  name: \"scale_resblk64_5\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_5\" \n  top: \"bn_resblk64_5\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_5 \n  name: \"relu_bn_resblk64_5\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_5\" \n  top: \"bn_resblk64_5\" \n} \nlayer { # resblk64_5_b \n  name: \"resblk64_5_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_5\" \n  top: \"resblk64_5_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_5_b \n  name: \"batchNorm_resblk64_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_5_b\" \n  top: \"bn_resblk64_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_5_b \n  name: \"batchNorm_resblk64_5_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_5_b\" \n  top: \"bn_resblk64_5_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_5_b \n  name: \"scale_resblk64_5_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_5_b\" \n  top: \"bn_resblk64_5_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_4_b \n    name: \"sum_sum_bn_resblk64_4_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_4_b\" \n    bottom: \"bn_resblk64_5_b\" \n    top: \"sum_bn_resblk64_5_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_5_b \n  name: \"relu_sum_bn_resblk64_5_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_5_b\" \n  top: \"sum_bn_resblk64_5_b\" \n} \nlayer { # resblk64_6 \n  name: \"resblk64_6\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_5_b\" \n  top: \"resblk64_6\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_6 \n  name: \"batchNorm_resblk64_6\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_6\" \n  top: \"bn_resblk64_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_6 \n  name: \"batchNorm_resblk64_6\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_6\" \n  top: \"bn_resblk64_6\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_6 \n  name: \"scale_resblk64_6\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_6\" \n  top: \"bn_resblk64_6\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_6 \n  name: \"relu_bn_resblk64_6\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_6\" \n  top: \"bn_resblk64_6\" \n} \nlayer { # resblk64_6_b \n  name: \"resblk64_6_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_6\" \n  top: \"resblk64_6_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_6_b \n  name: \"batchNorm_resblk64_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_6_b\" \n  top: \"bn_resblk64_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_6_b \n  name: \"batchNorm_resblk64_6_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_6_b\" \n  top: \"bn_resblk64_6_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_6_b \n  name: \"scale_resblk64_6_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_6_b\" \n  top: \"bn_resblk64_6_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_5_b \n    name: \"sum_sum_bn_resblk64_5_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_5_b\" \n    bottom: \"bn_resblk64_6_b\" \n    top: \"sum_bn_resblk64_6_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_6_b \n  name: \"relu_sum_bn_resblk64_6_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_6_b\" \n  top: \"sum_bn_resblk64_6_b\" \n} \nlayer { # resblk64_7 \n  name: \"resblk64_7\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_6_b\" \n  top: \"resblk64_7\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_7 \n  name: \"batchNorm_resblk64_7\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_7\" \n  top: \"bn_resblk64_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_7 \n  name: \"batchNorm_resblk64_7\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_7\" \n  top: \"bn_resblk64_7\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_7 \n  name: \"scale_resblk64_7\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_7\" \n  top: \"bn_resblk64_7\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_7 \n  name: \"relu_bn_resblk64_7\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_7\" \n  top: \"bn_resblk64_7\" \n} \nlayer { # resblk64_7_b \n  name: \"resblk64_7_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_7\" \n  top: \"resblk64_7_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_7_b \n  name: \"batchNorm_resblk64_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_7_b\" \n  top: \"bn_resblk64_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_7_b \n  name: \"batchNorm_resblk64_7_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_7_b\" \n  top: \"bn_resblk64_7_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_7_b \n  name: \"scale_resblk64_7_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_7_b\" \n  top: \"bn_resblk64_7_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_6_b \n    name: \"sum_sum_bn_resblk64_6_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_6_b\" \n    bottom: \"bn_resblk64_7_b\" \n    top: \"sum_bn_resblk64_7_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_7_b \n  name: \"relu_sum_bn_resblk64_7_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_7_b\" \n  top: \"sum_bn_resblk64_7_b\" \n} \nlayer { # resblk64_8 \n  name: \"resblk64_8\" \n  type: \"Convolution\" \n  bottom: \"sum_bn_resblk64_7_b\" \n  top: \"resblk64_8\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_8 \n  name: \"batchNorm_resblk64_8\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_8\" \n  top: \"bn_resblk64_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_8 \n  name: \"batchNorm_resblk64_8\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_8\" \n  top: \"bn_resblk64_8\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_8 \n  name: \"scale_resblk64_8\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_8\" \n  top: \"bn_resblk64_8\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # relu_bn_resblk64_8 \n  name: \"relu_bn_resblk64_8\" \n  type: \"ReLU\" \n  bottom: \"bn_resblk64_8\" \n  top: \"bn_resblk64_8\" \n} \nlayer { # resblk64_8_b \n  name: \"resblk64_8_b\" \n  type: \"Convolution\" \n  bottom: \"bn_resblk64_8\" \n  top: \"resblk64_8_b\" \n  param { \n    lr_mult: 1 \n    decay_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n    decay_mult: 1 \n  } \n  convolution_param { \n    num_output: 64 \n    pad: 1 \n    kernel_size: 3 \n    stride: 1 \n    weight_filler { \n      type: \"msra\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # bn_resblk64_8_b \n  name: \"batchNorm_resblk64_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_8_b\" \n  top: \"bn_resblk64_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TRAIN\n  }\n  batch_norm_param { \n    use_global_stats: false      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # bn_resblk64_8_b \n  name: \"batchNorm_resblk64_8_b\" \n  type: \"BatchNorm\" \n  bottom: \"resblk64_8_b\" \n  top: \"bn_resblk64_8_b\" \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n  param { \n    lr_mult: 0 \n    decay_mult: 0 \n  } \n    include {\n    phase: TEST\n  }\n  batch_norm_param { \n    use_global_stats: true      \n    moving_average_fraction: 0.999\n  } \n} \nlayer { # scale_resblk64_8_b \n  name: \"scale_resblk64_8_b\" \n  type: \"Scale\" \n  bottom: \"bn_resblk64_8_b\" \n  top: \"bn_resblk64_8_b\" \n  scale_param { \n    bias_term: true \n  } \n} \nlayer { # sum_sum_bn_resblk64_7_b \n    name: \"sum_sum_bn_resblk64_7_b\" \n    type: \"Eltwise\" \n    bottom: \"sum_bn_resblk64_7_b\" \n    bottom: \"bn_resblk64_8_b\" \n    top: \"sum_bn_resblk64_8_b\" \n    eltwise_param { \n    operation: SUM \n    } \n} \nlayer { # relu_sum_bn_resblk64_8_b \n  name: \"relu_sum_bn_resblk64_8_b\" \n  type: \"ReLU\" \n  bottom: \"sum_bn_resblk64_8_b\" \n  top: \"sum_bn_resblk64_8_b\" \n} \nlayer { # pool_resblk64_8 \n  name: \"avePooling_resblk64_8\" \n  type: \"Pooling\" \n  bottom: \"sum_bn_resblk64_8_b\" \n  top: \"avgPool_resblk64_8\" \n  pooling_param { \n    pool: AVE \n    kernel_size: 8 \n    stride: 1 \n  } \n} \nlayer { # FC_final \n  name: \"FC_final\" \n  type: \"InnerProduct\" \n  bottom: \"avgPool_resblk64_8\" \n  top: \"FC_final\" \n  param { \n    lr_mult: 1 \n  } \n  param { \n    lr_mult: 2 \n  } \n  inner_product_param { \n    num_output: 10 \n    weight_filler { \n      type: \"xavier\" \n    } \n    bias_filler { \n      type: \"constant\" \n    } \n  } \n} \nlayer { # accuracy \n  name: \"accuracy\" \n  type: \"Accuracy\" \n  bottom: \"FC_final\" \n  bottom: \"label\" \n  top: \"accuracy\" \n} \nlayer { # loss \n  name: \"loss\" \n  type: \"SoftmaxWithLoss\" \n  bottom: \"FC_final\" \n  bottom: \"label\" \n  top: \"loss\" \n} \n"
  },
  {
    "path": "clrsolver.prototxt",
    "content": "net: \"examples/sc/architectures/arch.prototxt\"\n#net: \"examples/sc/architectures/Resnet56Cifar.prototxt\"\n#net: \"examples/sc/architectures/Resnet56Dropout.prototxt\"\n#net: \"examples/sc/architectures/Resnet56Cifar-mod.prototxt\"\n#net: \"examples/sc/architectures/Resnet56Cifar_changeBatchNorm.prototxt\"\n#net: \"examples/sc/architectures/Resnet56Cifar_changePooling.prototxt\"\n#net: \"examples/sc/architectures/bottleneckResnet56.prototxt\"\n#net: \"examples/sc/architectures/Resnet20Cifar.prototxt\"\n#net: \"examples/sc/architectures/resnet20bn.prototxt\"\n#net: \"examples/sc/architectures/ResNeXt56.prototxt\"\n#net: \"examples/sc/architectures/Resnet20Cifar.prototxt\"\n#\ntest_iter: 200\ntest_interval: 100\ndisplay: 100\nlr_policy: \"triangular\" \nbase_lr: 0.1 \nmax_lr:  3.0\nstepsize: 5000\nmax_iter: 10000 \nsnapshot: 20000\nsnapshot_prefix: \"examples/sc/snapshots/fig1a\"\nsolver_mode: GPU\nweight_decay: 1e-4\nmomentum: 0.9\n#type:  \"Nesterov\" # \"RMSProp\" # \"Adam\" #   \"AdaGrad\"  #\n"
  },
  {
    "path": "lrRangeSolver.prototxt",
    "content": "net: \"examples/sc/architectures/arch.prototxt\"\n#net: \"examples/sc/architectures/Resnet56Cifar.prototxt\"\n#net: \"examples/sc/architectures/Resnet20Cifar.prototxt\"\n#\n#net: \"examples/sc/architectures/Resnet56Dropout.prototxt\"\n#net: \"examples/sc/architectures/Resnet56Cifar-mod.prototxt\"\n#net: \"examples/sc/architectures/Resnet56Cifar_changePooling.prototxt\"\n#net: \"examples/sc/architectures/bottleneckResnet56.prototxt\"\n#net: \"examples/sc/architectures/resnet20bn.prototxt\"\n#net: \"examples/sc/architectures/ResNeXt56.prototxt\"\ntest_iter: 200\ntest_interval: 100\ndisplay: 100\nlr_policy: \"triangular\" \nbase_lr: 0 # 0.00001\nmax_lr: 3\nstepsize: 20000\nmax_iter: 20000 \nsnapshot: 100000\nsnapshot_prefix: \"examples/sc/snapshots/Fig2b\"\nsolver_mode: GPU\nweight_decay: 1e-4\nmomentum: 0.9\n# type: \"AdaGrad\"  # \"Nesterov\" # \"RMSProp\" # \"Adam\" #  \n"
  },
  {
    "path": "solver.prototxt",
    "content": "net: \"examples/superconvergence/architectures/arch.prototxt\"\n#net: \"examples/superconvergence/architectures/Resnet56Cifar.prototxt\"\n#net: \"examples/superconvergence/architectures/Resnet110Cifar.prototxt\"\n#\n#net: \"examples/superconvergence/architectures/CifarResNet-56.prototxt\"\n#net: \"examples/superconvergence/architectures/Resnet56Dropout.prototxt\"\n#net: \"examples/superconvergence/architectures/Resnet56Cifar-mod.prototxt\"\n#net: \"examples/superconvergence/architectures/Resnet56Cifar_changeBatchNorm.prototxt\"\n#net: \"examples/superconvergence/architectures/Resnet56Cifar_changePooling.prototxt\"\n#net: \"examples/superconvergence/architectures/bottleneckResnet56.prototxt\"\n#net: \"examples/superconvergence/architectures/resnet20bn.prototxt\"\n#net: \"examples/superconvergence/architectures/ResNeXt56.prototxt\"\n#net: \"examples/superconvergence/architectures/Resnet20Cifar.prototxt\"\n\ntest_iter: 200\ntest_interval: 100\ndisplay: 100\nbase_lr: 0.35 \nmax_iter: 80000 \nlr_policy: \"multistep\" \ngamma: 0.1\nmomentum: 0.9\nweight_decay: 1e-4\nsnapshot: 100000\nsnapshot_prefix: \"examples/superconvergence/snapshots/lr35Fig1a\"\nsolver_mode: GPU\nstepvalue: 50000\nstepvalue: 70000\n#type:  \"Adam\" #  \"AdaDelta\"  # \"AdaGrad\"  # \"Nesterov\" # \"RMSProp\" #\n\n"
  },
  {
    "path": "train.sh",
    "content": "#!/usr/bin/env sh\n\n\nTOOLS=./build/tools\n\n$TOOLS/caffe train  --solver=examples/largeLR/solver.prototxt  \\\n    --gpu=all 2>&1 | tee examples/largeLR/results/norm\n\ngrep 'Test net output #0:' examples/largeLR/results/norm  > examples/largeLR/results/plt/norm.test.acc.plt\ngrep 'Test net output #1:' examples/largeLR/results/norm  > examples/largeLR/results/plt/norm.test.loss.plt\ngrep 'Train net output #0:' examples/largeLR/results/norm  > examples/largeLR/results/plt/norm.train.acc.plt\ngrep 'Train net output #1:' examples/largeLR/results/norm  > examples/largeLR/results/plt/norm.train.loss.plt\n\nexit\n"
  },
  {
    "path": "x.sh",
    "content": "#!/usr/bin/env sh\n\n# shorter jobs: FIG1b,  TAB1a, TAB1b, FIG10a, FIG10b, FIG11a, FIG11b\n# long jobs: FIG1a, FIG2b, FIG4a, FIG4b,TAB1c, FIG6b, FIG8, FIG9\nFIG=FIG8\n\nif [ $FIG == FIG1a ]; then\n\n   cp qsub.pbs xqsub.pbs\n#   sed s/train.sh/xtrain.sh/g  qsub.pbs > xqsub.pbs\n#   sed s/solver.prototxt/xsolver.prototxt/g train.sh > xtrain.sh\n#   sed s/Resnet56Cifar.prototxt/arch.prototxt/g solver.prototxt > xsolver.prototxt\n   cp train.sh  xtrain.sh\n   cp solver.prototxt xsolver.prototxt\n   cp architectures/Resnet56Cifar.prototxt architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kFig1a/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kFig1a/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == FIG1b ]; then\n#  Different step sizes\n\n   sed s/Fig1a/Fig1b/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS10kFig1b/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS10kFig1b/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/20000/g xsolver.prototxt > tmp\n   sed s/5000/10000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS3kFig1b/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS3kFig1b/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/6000/g xsolver.prototxt > tmp\n   sed s/5000/3000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS1kFig1b/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS1kFig1b/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/2000/g xsolver.prototxt > tmp\n   sed s/5000/1000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == FIG2b ]; then\n\n   sed s/Fig1a/Fig2b/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/range3Iter100kFig2b/g train.sh > xtrain.sh\n   sed s/Fig2b/range3Iter100kFig2b/g lrRangeSolver.prototxt > tmp\n   sed s/20000/100000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/range3Iter20kFig2b/g train.sh > xtrain.sh\n   sed s/Fig2b/range3Iter20kFig2b/g lrRangeSolver.prototxt > tmp\n   sed s/20000/20000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/range3Iter5kFig2b/g train.sh > xtrain.sh\n   sed s/Fig2b/range3Iter5kFig2b/g lrRangeSolver.prototxt > tmp\n   sed s/20000/5000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == FIG4a ]; then\n#  Show regularization\n\n   sed s/Fig1a/Fig4a/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/range2SS80kRes56wd0/g train.sh > xtrain.sh\n   sed s/Fig2b/range2SS80kRes56wd0/g lrRangeSolver.prototxt > xsolver.prototxt\n   sed s/'max_lr: 3'/'max_lr: 2'/g xsolver.prototxt > tmp\n   sed s/20000/80000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/'use_global_stats: true'/'use_global_stats: false'/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/range2SS80kRes56LR/g train.sh > xtrain.sh\n   sed s/Fig2b/range2SS80kRes56LR/g lrRangeSolver.prototxt > xsolver.prototxt\n   sed s/'max_lr: 3'/'max_lr: 2'/g xsolver.prototxt > tmp\n   sed s/20000/80000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  ../largeLR/architectures/Resnet56Cifar.prototxt > tmp\n   sed s/'use_global_stats: true'/'use_global_stats: false'/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n\n\nelif [ $FIG == FIG4b ]; then\n#  Show regularization for Res110 and Res20\n\n   sed s/Fig1a/Fig4a/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/range2SS80kRes56/g train.sh > xtrain.sh\n   sed s/Fig2b/range2SS80kRes56/g lrRangeSolver.prototxt > xsolver.prototxt\n   sed s/'max_lr: 3'/'max_lr: 2'/g xsolver.prototxt > tmp\n   sed s/20000/80000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56DM1.prototxt > tmp\n   sed s/'use_global_stats: true'/'use_global_stats: false'/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/Fig1a/Fig4b/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/range2Res110Fig4b/g train.sh > xtrain.sh\n   sed s/Fig2b/range2Res110Fig4b/g lrRangeSolver.prototxt > xsolver.prototxt\n   sed s/'max_lr: 3'/'max_lr: 2'/g  xsolver.prototxt > tmp\n   sed s/20000/80000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet110Cifar.prototxt > tmp\n   sed s/125/100/g  tmp  > architectures/arch.prototxt\n   qsub xqsub.pbs\n\n   sleep 30\n\n   sed s/Fig1a/Fig4b/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/range2Res20Fig4b/g train.sh > xtrain.sh\n   sed s/Fig2b/range2Res20Fig4b/g lrRangeSolver.prototxt > xsolver.prototxt\n   sed s/'max_lr: 3'/'max_lr: 2'/g xsolver.prototxt > tmp\n   sed s/20000/80000/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet20Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == TAB1a ]; then\n#  CLR with a range of max_lr\n\n   sed s/Fig1a/Tab1a/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr35SS5kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr35SS5kTab1/g clrsolver.prototxt > tmp\n   sed s/'max_lr:  3.0'/'max_lr:  3.5'/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr25SS5kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr25SS5kTab1/g clrsolver.prototxt > tmp\n   sed s/'max_lr:  3.0'/'max_lr:  2.5'/g tmp > xsolver.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr2SS5kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr2SS5kTab1/g clrsolver.prototxt > tmp\n   sed s/'max_lr:  3.0'/'max_lr:  2.'/g tmp > xsolver.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr15SS5kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr15SS5kTab1/g clrsolver.prototxt > tmp\n   sed s/'max_lr:  3.0'/'max_lr:  1.5'/g tmp > xsolver.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/Fig1a/Tab1a/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr1SS5kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr1SS5kTab1/g clrsolver.prototxt > tmp\n   sed s/'max_lr:  3.0'/'max_lr:  1.0'/g tmp > xsolver.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == TAB1b ]; then\n#  CLR with different step sizes\n\n   sed s/Fig1a/Tab1b/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS10kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS10kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/20000/g xsolver.prototxt > tmp\n   sed s/5000/10000/g tmp > xsolver.prototxt\n   sed s/0.999/0.97/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS75kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS75kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/15000/g xsolver.prototxt > tmp\n   sed s/5000/7500/g tmp > xsolver.prototxt\n   sed s/0.999/0.96/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS4kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS4kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/8000/g xsolver.prototxt > tmp\n   sed s/5000/4000/g tmp > xsolver.prototxt\n   sed s/0.999/0.93/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS3kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS3kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/6000/g xsolver.prototxt > tmp\n   sed s/5000/3000/g tmp > xsolver.prototxt\n   sed s/0.999/0.90/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS2kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS2kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/4000/g xsolver.prototxt > tmp\n   sed s/5000/2000/g tmp > xsolver.prototxt\n   sed s/0.999/0.85/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS1kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS1kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/10000/2000/g xsolver.prototxt > tmp\n   sed s/5000/1000/g tmp > xsolver.prototxt\n   sed s/0.999/0.80/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == TAB1c ]; then\n#  Limited number of training data\n\n   sed s/Fig1a/Tab1c/g  qsub.pbs > xqsub.pbs\n\n   sed s/lr35Fig1a/lr35Tr40kTab1/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35Tr40kTab1/g solver.prototxt > xsolver.prototxt\n   sed s/'train_lmdb'/'train40k_lmdb'/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/lr35Tr30kTab1/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35Tr30kTab1/g solver.prototxt > xsolver.prototxt\n   sed s/'train_lmdb'/'train30k_lmdb'/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/lr35Tr20kTab1/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35Tr20kTab1/g solver.prototxt > xsolver.prototxt\n   sed s/'train_lmdb'/'train20k_lmdb'/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/lr35Tr10kTab1/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35Tr10kTab1/g solver.prototxt > xsolver.prototxt\n   sed s/'train_lmdb'/'train10k_lmdb'/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kTr40kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kTr40kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/'train_lmdb'/'train40k_lmdb'/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/Fig1a/Tab1c/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS5kTr30kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kTr30kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/'train_lmdb'/'train30k_lmdb'/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/Fig1a/Tab1c/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS5kTr20kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kTr20kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/'train_lmdb'/'train20k_lmdb'/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/Fig1a/Tab1c/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS5kTr10kTab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kTr10kTab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/'train_lmdb'/'train10k_lmdb'/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\nelif [ $FIG == FIG6b ]; then\n# LR vs CLR for Res110 and Res20\n   sed s/Fig1a/Fig6b/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/lr35Res110Fig6b/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35Res110Fig6b/g solver.prototxt > xsolver.prototxt\n   sed s/125/100/g  architectures/Resnet110Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kRes110Tab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kRes110Tab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/125/100/g  architectures/Resnet110Cifar.prototxt > tmp\n   sed s/0.999/0.95/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\nexit\n   sed s/lr35Fig1a/lr35Res20Fig6b/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35Res20Fig6b/g solver.prototxt > xsolver.prototxt\n   cp  architectures/Resnet20Cifar.prototxt architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kRes20Tab1/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kRes20Tab1/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet20Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == FIG8 ]; then\n# Cifar 100\n\n   sed s/Fig1a/Fig8/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/lr35Cifar100Fig8/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35Cifar100Fig8/g solver.prototxt > xsolver.prototxt\n   sed s/10/100/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kCifar100Fig8/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kCifar100Fig8/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/10/100/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/range3Cifar100kFig8/g train.sh > xtrain.sh\n   sed s/Fig2b/range3Cifar100kFig8/g lrRangeSolver.prototxt > xsolver.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == FIG9 ]; then\n# Adaptive LRs\n\n   sed s/Fig1a/Fig9a/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/lr35AdaGradFig9/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35AdaGradFig9/g solver.prototxt > xsolver.prototxt\n   sed s/'#type:'/'type: \"AdaGrad\"  #'/g  xsolver.prototxt > tmp\n   sed s/momentum/'#momentum'/g  tmp > xsolver.prototxt \n   cp  architectures/Resnet56Cifar.prototxt architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/lr35NestFig9/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35NestFig9/g solver.prototxt > xsolver.prototxt\n   sed s/'#type'/type/g  xsolver.prototxt > tmp\n   sed s/Adam/Nesterov/g  tmp > xsolver.prototxt \n   cp  architectures/Resnet56Cifar.prototxt architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/lr35AdamFig9/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35AdamFig9/g solver.prototxt > xsolver.prototxt\n   sed s/'#type'/type/g  xsolver.prototxt > tmp\n   sed s/'base_lr: 0.35'/'base_lr: 0.0035'/g  tmp > xsolver.prototxt \n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/lr35AdaDeltaFig9/g train.sh > xtrain.sh\n   sed s/lr35Fig1a/lr35AdaDeltaFig9/g solver.prototxt > tmp\n   sed s/'#type'/type/g  tmp > xsolver.prototxt\n   sed s/Adam/AdaDelta/g  xsolver.prototxt > tmp\n   sed s/'momentum'/'#momentum'/g  tmp > xsolver.prototxt \n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kNestFig9/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kNestFig9/g clrsolver.prototxt > xsolver.prototxt\n   sed s/'#type'/type/g  xsolver.prototxt > tmp\n   sed s/Adam/Nesterov/g  tmp > xsolver.prototxt \n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   \nelif [ $FIG == FIG10a ]; then\n# Batch sizes\n\n   sed s/Fig1a/Fig10a/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS5kBS1536Fig10/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kBS1536Fig10/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/125/192/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kBS512Fig10/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kBS512Fig10/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/125/64/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kBS256Fig10/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kBS256Fig10/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > tmp\n   sed s/125/32/g  tmp > architectures/arch.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == FIG10b ]; then\n# Dropout\n\n   sed s/Fig1a/Fig10b/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS5kDropoutFig10/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kDropoutFig10/g clrsolver.prototxt > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Dropout.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\nelif [ $FIG == FIG11a ]; then\n# Momentum\n\n   sed s/Fig1a/Fig11a/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS5kMom95Fig11/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kMom95Fig11/g clrsolver.prototxt > tmp\n   sed s/'momentum: 0.9'/'momentum: 0.95'/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kMom85Fig11/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kMom85Fig11/g clrsolver.prototxt > tmp\n   sed s/'momentum: 0.9'/'momentum: 0.85'/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kMom8Fig11/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kMom8Fig11/g clrsolver.prototxt > tmp\n   sed s/'momentum: 0.9'/'momentum: 0.8'/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n\nelif [ $FIG == FIG11b ]; then\n# Weight Decay\n\n   sed s/Fig1a/Fig11b/g  qsub.pbs > xqsub.pbs\n   sed s/lr35Fig1a/clr3SS5kWD-3Fig11/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kWD-3Fig11/g clrsolver.prototxt > tmp\n   sed s/'1e-4'/'1e-3'/g tmp > xsolver.prototxt\n   sed s/0.999/0.95/g  architectures/Resnet56Cifar.prototxt > architectures/arch.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kWD-5Fig11/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kWD-5Fig11/g clrsolver.prototxt > tmp\n   sed s/'1e-4'/'1e-5'/g tmp > xsolver.prototxt\n   qsub xqsub.pbs\n   sleep 30\n\n   sed s/lr35Fig1a/clr3SS5kWD-6Fig11/g train.sh > xtrain.sh\n   sed s/fig1a/clr3SS5kWD-6Fig11/g clrsolver.prototxt > tmp\n   sed s/'1e-4'/'1e-6'/g tmp > xsolver.prototxt\n   qsub xqsub.pbs\n\n\n   \nfi\n\nqstat\n\nexit\n"
  }
]